summaryrefslogtreecommitdiff
path: root/tools/perf/scripts/python/export-to-sqlite.py
blob: 8043a7272a56dcacc8ea3924cfd7a94b8d517adc (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
# export-to-sqlite.py: export perf data to a sqlite3 database
# Copyright (c) 2017, Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
# more details.

from __future__ import print_function

import os
import sys
import struct
import datetime

# To use this script you will need to have installed package python-pyside which
# provides LGPL-licensed Python bindings for Qt.  You will also need the package
# libqt4-sql-sqlite for Qt sqlite3 support.
#
# Examples of installing pyside:
#
# ubuntu:
#
#	$ sudo apt-get install python-pyside.qtsql libqt4-sql-psql
#
#	Alternately, to use Python3 and/or pyside 2, one of the following:
#
#		$ sudo apt-get install python3-pyside.qtsql libqt4-sql-psql
#		$ sudo apt-get install python-pyside2.qtsql libqt5sql5-psql
#		$ sudo apt-get install python3-pyside2.qtsql libqt5sql5-psql
# fedora:
#
#	$ sudo yum install python-pyside
#
#	Alternately, to use Python3 and/or pyside 2, one of the following:
#		$ sudo yum install python3-pyside
#		$ pip install --user PySide2
#		$ pip3 install --user PySide2
#
# An example of using this script with Intel PT:
#
#	$ perf record -e intel_pt//u ls
#	$ perf script -s ~/libexec/perf-core/scripts/python/export-to-sqlite.py pt_example branches calls
#	2017-07-31 14:26:07.326913 Creating database...
#	2017-07-31 14:26:07.538097 Writing records...
#	2017-07-31 14:26:09.889292 Adding indexes
#	2017-07-31 14:26:09.958746 Done
#
# To browse the database, sqlite3 can be used e.g.
#
#	$ sqlite3 pt_example
#	sqlite> .header on
#	sqlite> select * from samples_view where id < 10;
#	sqlite> .mode column
#	sqlite> select * from samples_view where id < 10;
#	sqlite> .tables
#	sqlite> .schema samples_view
#	sqlite> .quit
#
# An example of using the database is provided by the script
# exported-sql-viewer.py.  Refer to that script for details.
#
# The database structure is practically the same as created by the script
# export-to-postgresql.py. Refer to that script for details.  A notable
# difference is  the 'transaction' column of the 'samples' table which is
# renamed 'transaction_' in sqlite because 'transaction' is a reserved word.

pyside_version_1 = True
if not "pyside-version-1" in sys.argv:
	try:
		from PySide2.QtSql import *
		pyside_version_1 = False
	except:
		pass

if pyside_version_1:
	from PySide.QtSql import *

sys.path.append(os.environ['PERF_EXEC_PATH'] + \
	'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')

# These perf imports are not used at present
#from perf_trace_context import *
#from Core import *

perf_db_export_mode = True
perf_db_export_calls = False
perf_db_export_callchains = False

def printerr(*args, **keyword_args):
	print(*args, file=sys.stderr, **keyword_args)

def printdate(*args, **kw_args):
        print(datetime.datetime.today(), *args, sep=' ', **kw_args)

def usage():
	printerr("Usage is: export-to-sqlite.py <database name> [<columns>] [<calls>] [<callchains>] [<pyside-version-1>]");
	printerr("where:  columns            'all' or 'branches'");
	printerr("        calls              'calls' => create calls and call_paths table");
	printerr("        callchains         'callchains' => create call_paths table");
	printerr("        pyside-version-1   'pyside-version-1' => use pyside version 1");
	raise Exception("Too few or bad arguments")

if (len(sys.argv) < 2):
	usage()

dbname = sys.argv[1]

if (len(sys.argv) >= 3):
	columns = sys.argv[2]
else:
	columns = "all"

if columns not in ("all", "branches"):
	usage()

branches = (columns == "branches")

for i in range(3,len(sys.argv)):
	if (sys.argv[i] == "calls"):
		perf_db_export_calls = True
	elif (sys.argv[i] == "callchains"):
		perf_db_export_callchains = True
	elif (sys.argv[i] == "pyside-version-1"):
		pass
	else:
		usage()

def do_query(q, s):
	if (q.exec_(s)):
		return
	raise Exception("Query failed: " + q.lastError().text())

def do_query_(q):
	if (q.exec_()):
		return
	raise Exception("Query failed: " + q.lastError().text())

printdate("Creating database ...")

db_exists = False
try:
	f = open(dbname)
	f.close()
	db_exists = True
except:
	pass

if db_exists:
	raise Exception(dbname + " already exists")

db = QSqlDatabase.addDatabase('QSQLITE')
db.setDatabaseName(dbname)
db.open()

query = QSqlQuery(db)

do_query(query, 'PRAGMA journal_mode = OFF')
do_query(query, 'BEGIN TRANSACTION')

do_query(query, 'CREATE TABLE selected_events ('
		'id		integer		NOT NULL	PRIMARY KEY,'
		'name		varchar(80))')
do_query(query, 'CREATE TABLE machines ('
		'id		integer		NOT NULL	PRIMARY KEY,'
		'pid		integer,'
		'root_dir 	varchar(4096))')
do_query(query, 'CREATE TABLE threads ('
		'id		integer		NOT NULL	PRIMARY KEY,'
		'machine_id	bigint,'
		'process_id	bigint,'
		'pid		integer,'
		'tid		integer)')
do_query(query, 'CREATE TABLE comms ('
		'id		integer		NOT NULL	PRIMARY KEY,'
		'comm		varchar(16),'
		'c_thread_id	bigint,'
		'c_time		bigint,'
		'exec_flag	boolean)')
do_query(query, 'CREATE TABLE comm_threads ('
		'id		integer		NOT NULL	PRIMARY KEY,'
		'comm_id	bigint,'
		'thread_id	bigint)')
do_query(query, 'CREATE TABLE dsos ('
		'id		integer		NOT NULL	PRIMARY KEY,'
		'machine_id	bigint,'
		'short_name	varchar(256),'
		'long_name	varchar(4096),'
		'build_id	varchar(64))')
do_query(query, 'CREATE TABLE symbols ('
		'id		integer		NOT NULL	PRIMARY KEY,'
		'dso_id		bigint,'
		'sym_start	bigint,'
		'sym_end	bigint,'
		'binding	integer,'
		'name		varchar(2048))')
do_query(query, 'CREATE TABLE branch_types ('
		'id		integer		NOT NULL	PRIMARY KEY,'
		'name		varchar(80))')

if branches:
	do_query(query, 'CREATE TABLE samples ('
		'id		integer		NOT NULL	PRIMARY KEY,'
		'evsel_id	bigint,'
		'machine_id	bigint,'
		'thread_id	bigint,'
		'comm_id	bigint,'
		'dso_id		bigint,'
		'symbol_id	bigint,'
		'sym_offset	bigint,'
		'ip		bigint,'
		'time		bigint,'
		'cpu		integer,'
		'to_dso_id	bigint,'
		'to_symbol_id	bigint,'
		'to_sym_offset	bigint,'
		'to_ip		bigint,'
		'branch_type	integer,'
		'in_tx		boolean,'
		'call_path_id	bigint,'
		'insn_count	bigint,'
		'cyc_count	bigint)')
else:
	do_query(query, 'CREATE TABLE samples ('
		'id		integer		NOT NULL	PRIMARY KEY,'
		'evsel_id	bigint,'
		'machine_id	bigint,'
		'thread_id	bigint,'
		'comm_id	bigint,'
		'dso_id		bigint,'
		'symbol_id	bigint,'
		'sym_offset	bigint,'
		'ip		bigint,'
		'time		bigint,'
		'cpu		integer,'
		'to_dso_id	bigint,'
		'to_symbol_id	bigint,'
		'to_sym_offset	bigint,'
		'to_ip		bigint,'
		'period		bigint,'
		'weight		bigint,'
		'transaction_	bigint,'
		'data_src	bigint,'
		'branch_type	integer,'
		'in_tx		boolean,'
		'call_path_id	bigint,'
		'insn_count	bigint,'
		'cyc_count	bigint)')

if perf_db_export_calls or perf_db_export_callchains:
	do_query(query, 'CREATE TABLE call_paths ('
		'id		integer		NOT NULL	PRIMARY KEY,'
		'parent_id	bigint,'
		'symbol_id	bigint,'
		'ip		bigint)')
if perf_db_export_calls:
	do_query(query, 'CREATE TABLE calls ('
		'id		integer		NOT NULL	PRIMARY KEY,'
		'thread_id	bigint,'
		'comm_id	bigint,'
		'call_path_id	bigint,'
		'call_time	bigint,'
		'return_time	bigint,'
		'branch_count	bigint,'
		'call_id	bigint,'
		'return_id	bigint,'
		'parent_call_path_id	bigint,'
		'flags		integer,'
		'parent_id	bigint,'
		'insn_count	bigint,'
		'cyc_count	bigint)')

do_query(query, 'CREATE TABLE ptwrite ('
		'id		integer		NOT NULL	PRIMARY KEY,'
		'payload	bigint,'
		'exact_ip	integer)')

do_query(query, 'CREATE TABLE cbr ('
		'id		integer		NOT NULL	PRIMARY KEY,'
		'cbr		integer,'
		'mhz		integer,'
		'percent	integer)')

do_query(query, 'CREATE TABLE mwait ('
		'id		integer		NOT NULL	PRIMARY KEY,'
		'hints		integer,'
		'extensions	integer)')

do_query(query, 'CREATE TABLE pwre ('
		'id		integer		NOT NULL	PRIMARY KEY,'
		'cstate		integer,'
		'subcstate	integer,'
		'hw		integer)')

do_query(query, 'CREATE TABLE exstop ('
		'id		integer		NOT NULL	PRIMARY KEY,'
		'exact_ip	integer)')

do_query(query, 'CREATE TABLE pwrx ('
		'id		integer		NOT NULL	PRIMARY KEY,'
		'deepest_cstate	integer,'
		'last_cstate	integer,'
		'wake_reason	integer)')

do_query(query, 'CREATE TABLE context_switches ('
		'id		integer		NOT NULL	PRIMARY KEY,'
		'machine_id	bigint,'
		'time		bigint,'
		'cpu		integer,'
		'thread_out_id	bigint,'
		'comm_out_id	bigint,'
		'thread_in_id	bigint,'
		'comm_in_id	bigint,'
		'flags		integer)')

# printf was added to sqlite in version 3.8.3
sqlite_has_printf = False
try:
	do_query(query, 'SELECT printf("") FROM machines')
	sqlite_has_printf = True
except:
	pass

def emit_to_hex(x):
	if sqlite_has_printf:
		return 'printf("%x", ' + x + ')'
	else:
		return x

do_query(query, 'CREATE VIEW machines_view AS '
	'SELECT '
		'id,'
		'pid,'
		'root_dir,'
		'CASE WHEN id=0 THEN \'unknown\' WHEN pid=-1 THEN \'host\' ELSE \'guest\' END AS host_or_guest'
	' FROM machines')

do_query(query, 'CREATE VIEW dsos_view AS '
	'SELECT '
		'id,'
		'machine_id,'
		'(SELECT host_or_guest FROM machines_view WHERE id = machine_id) AS host_or_guest,'
		'short_name,'
		'long_name,'
		'build_id'
	' FROM dsos')

do_query(query, 'CREATE VIEW symbols_view AS '
	'SELECT '
		'id,'
		'name,'
		'(SELECT short_name FROM dsos WHERE id=dso_id) AS dso,'
		'dso_id,'
		'sym_start,'
		'sym_end,'
		'CASE WHEN binding=0 THEN \'local\' WHEN binding=1 THEN \'global\' ELSE \'weak\' END AS binding'
	' FROM symbols')

do_query(query, 'CREATE VIEW threads_view AS '
	'SELECT '
		'id,'
		'machine_id,'
		'(SELECT host_or_guest FROM machines_view WHERE id = machine_id) AS host_or_guest,'
		'process_id,'
		'pid,'
		'tid'
	' FROM threads')

do_query(query, 'CREATE VIEW comm_threads_view AS '
	'SELECT '
		'comm_id,'
		'(SELECT comm FROM comms WHERE id = comm_id) AS command,'
		'thread_id,'
		'(SELECT pid FROM threads WHERE id = thread_id) AS pid,'
		'(SELECT tid FROM threads WHERE id = thread_id) AS tid'
	' FROM comm_threads')

if perf_db_export_calls or perf_db_export_callchains:
	do_query(query, 'CREATE VIEW call_paths_view AS '
		'SELECT '
			'c.id,'
			+ emit_to_hex('c.ip') + ' AS ip,'
			'c.symbol_id,'
			'(SELECT name FROM symbols WHERE id = c.symbol_id) AS symbol,'
			'(SELECT dso_id FROM symbols WHERE id = c.symbol_id) AS dso_id,'
			'(SELECT dso FROM symbols_view  WHERE id = c.symbol_id) AS dso_short_name,'
			'c.parent_id,'
			+ emit_to_hex('p.ip') + ' AS parent_ip,'
			'p.symbol_id AS parent_symbol_id,'
			'(SELECT name FROM symbols WHERE id = p.symbol_id) AS parent_symbol,'
			'(SELECT dso_id FROM symbols WHERE id = p.symbol_id) AS parent_dso_id,'
			'(SELECT dso FROM symbols_view  WHERE id = p.symbol_id) AS parent_dso_short_name'
		' FROM call_paths c INNER JOIN call_paths p ON p.id = c.parent_id')
if perf_db_export_calls:
	do_query(query, 'CREATE VIEW calls_view AS '
		'SELECT '
			'calls.id,'
			'thread_id,'
			'(SELECT pid FROM threads WHERE id = thread_id) AS pid,'
			'(SELECT tid FROM threads WHERE id = thread_id) AS tid,'
			'(SELECT comm FROM comms WHERE id = comm_id) AS command,'
			'call_path_id,'
			+ emit_to_hex('ip') + ' AS ip,'
			'symbol_id,'
			'(SELECT name FROM symbols WHERE id = symbol_id) AS symbol,'
			'call_time,'
			'return_time,'
			'return_time - call_time AS elapsed_time,'
			'branch_count,'
			'insn_count,'
			'cyc_count,'
			'CASE WHEN cyc_count=0 THEN CAST(0 AS FLOAT) ELSE ROUND(CAST(insn_count AS FLOAT) / cyc_count, 2) END AS IPC,'
			'call_id,'
			'return_id,'
			'CASE WHEN flags=0 THEN \'\' WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' WHEN flags=6 THEN \'jump\' ELSE flags END AS flags,'
			'parent_call_path_id,'
			'calls.parent_id'
		' FROM calls INNER JOIN call_paths ON call_paths.id = call_path_id')

do_query(query, 'CREATE VIEW samples_view AS '
	'SELECT '
		'id,'
		'time,'
		'cpu,'
		'(SELECT pid FROM threads WHERE id = thread_id) AS pid,'
		'(SELECT tid FROM threads WHERE id = thread_id) AS tid,'
		'(SELECT comm FROM comms WHERE id = comm_id) AS command,'
		'(SELECT name FROM selected_events WHERE id = evsel_id) AS event,'
		+ emit_to_hex('ip') + ' AS ip_hex,'
		'(SELECT name FROM symbols WHERE id = symbol_id) AS symbol,'
		'sym_offset,'
		'(SELECT short_name FROM dsos WHERE id = dso_id) AS dso_short_name,'
		+ emit_to_hex('to_ip') + ' AS to_ip_hex,'
		'(SELECT name FROM symbols WHERE id = to_symbol_id) AS to_symbol,'
		'to_sym_offset,'
		'(SELECT short_name FROM dsos WHERE id = to_dso_id) AS to_dso_short_name,'
		'(SELECT name FROM branch_types WHERE id = branch_type) AS branch_type_name,'
		'in_tx,'
		'insn_count,'
		'cyc_count,'
		'CASE WHEN cyc_count=0 THEN CAST(0 AS FLOAT) ELSE ROUND(CAST(insn_count AS FLOAT) / cyc_count, 2) END AS IPC'
	' FROM samples')

do_query(query, 'CREATE VIEW ptwrite_view AS '
	'SELECT '
		'ptwrite.id,'
		'time,'
		'cpu,'
		+ emit_to_hex('payload') + ' AS payload_hex,'
		'CASE WHEN exact_ip=0 THEN \'False\' ELSE \'True\' END AS exact_ip'
	' FROM ptwrite'
	' INNER JOIN samples ON samples.id = ptwrite.id')

do_query(query, 'CREATE VIEW cbr_view AS '
	'SELECT '
		'cbr.id,'
		'time,'
		'cpu,'
		'cbr,'
		'mhz,'
		'percent'
	' FROM cbr'
	' INNER JOIN samples ON samples.id = cbr.id')

do_query(query, 'CREATE VIEW mwait_view AS '
	'SELECT '
		'mwait.id,'
		'time,'
		'cpu,'
		+ emit_to_hex('hints') + ' AS hints_hex,'
		+ emit_to_hex('extensions') + ' AS extensions_hex'
	' FROM mwait'
	' INNER JOIN samples ON samples.id = mwait.id')

do_query(query, 'CREATE VIEW pwre_view AS '
	'SELECT '
		'pwre.id,'
		'time,'
		'cpu,'
		'cstate,'
		'subcstate,'
		'CASE WHEN hw=0 THEN \'False\' ELSE \'True\' END AS hw'
	' FROM pwre'
	' INNER JOIN samples ON samples.id = pwre.id')

do_query(query, 'CREATE VIEW exstop_view AS '
	'SELECT '
		'exstop.id,'
		'time,'
		'cpu,'
		'CASE WHEN exact_ip=0 THEN \'False\' ELSE \'True\' END AS exact_ip'
	' FROM exstop'
	' INNER JOIN samples ON samples.id = exstop.id')

do_query(query, 'CREATE VIEW pwrx_view AS '
	'SELECT '
		'pwrx.id,'
		'time,'
		'cpu,'
		'deepest_cstate,'
		'last_cstate,'
		'CASE     WHEN wake_reason=1 THEN \'Interrupt\''
			' WHEN wake_reason=2 THEN \'Timer Deadline\''
			' WHEN wake_reason=4 THEN \'Monitored Address\''
			' WHEN wake_reason=8 THEN \'HW\''
			' ELSE wake_reason '
		'END AS wake_reason'
	' FROM pwrx'
	' INNER JOIN samples ON samples.id = pwrx.id')

do_query(query, 'CREATE VIEW power_events_view AS '
	'SELECT '
		'samples.id,'
		'time,'
		'cpu,'
		'selected_events.name AS event,'
		'CASE WHEN selected_events.name=\'cbr\' THEN (SELECT cbr FROM cbr WHERE cbr.id = samples.id) ELSE "" END AS cbr,'
		'CASE WHEN selected_events.name=\'cbr\' THEN (SELECT mhz FROM cbr WHERE cbr.id = samples.id) ELSE "" END AS mhz,'
		'CASE WHEN selected_events.name=\'cbr\' THEN (SELECT percent FROM cbr WHERE cbr.id = samples.id) ELSE "" END AS percent,'
		'CASE WHEN selected_events.name=\'mwait\' THEN (SELECT ' + emit_to_hex('hints') + ' FROM mwait WHERE mwait.id = samples.id) ELSE "" END AS hints_hex,'
		'CASE WHEN selected_events.name=\'mwait\' THEN (SELECT ' + emit_to_hex('extensions') + ' FROM mwait WHERE mwait.id = samples.id) ELSE "" END AS extensions_hex,'
		'CASE WHEN selected_events.name=\'pwre\' THEN (SELECT cstate FROM pwre WHERE pwre.id = samples.id) ELSE "" END AS cstate,'
		'CASE WHEN selected_events.name=\'pwre\' THEN (SELECT subcstate FROM pwre WHERE pwre.id = samples.id) ELSE "" END AS subcstate,'
		'CASE WHEN selected_events.name=\'pwre\' THEN (SELECT hw FROM pwre WHERE pwre.id = samples.id) ELSE "" END AS hw,'
		'CASE WHEN selected_events.name=\'exstop\' THEN (SELECT exact_ip FROM exstop WHERE exstop.id = samples.id) ELSE "" END AS exact_ip,'
		'CASE WHEN selected_events.name=\'pwrx\' THEN (SELECT deepest_cstate FROM pwrx WHERE pwrx.id = samples.id) ELSE "" END AS deepest_cstate,'
		'CASE WHEN selected_events.name=\'pwrx\' THEN (SELECT last_cstate FROM pwrx WHERE pwrx.id = samples.id) ELSE "" END AS last_cstate,'
		'CASE WHEN selected_events.name=\'pwrx\' THEN (SELECT '
			'CASE     WHEN wake_reason=1 THEN \'Interrupt\''
				' WHEN wake_reason=2 THEN \'Timer Deadline\''
				' WHEN wake_reason=4 THEN \'Monitored Address\''
				' WHEN wake_reason=8 THEN \'HW\''
				' ELSE wake_reason '
			'END'
		' FROM pwrx WHERE pwrx.id = samples.id) ELSE "" END AS wake_reason'
	' FROM samples'
	' INNER JOIN selected_events ON selected_events.id = evsel_id'
	' WHERE selected_events.name IN (\'cbr\',\'mwait\',\'exstop\',\'pwre\',\'pwrx\')')

do_query(query, 'CREATE VIEW context_switches_view AS '
	'SELECT '
		'context_switches.id,'
		'context_switches.machine_id,'
		'context_switches.time,'
		'context_switches.cpu,'
		'th_out.pid AS pid_out,'
		'th_out.tid AS tid_out,'
		'comm_out.comm AS comm_out,'
		'th_in.pid AS pid_in,'
		'th_in.tid AS tid_in,'
		'comm_in.comm AS comm_in,'
		'CASE	  WHEN context_switches.flags = 0 THEN \'in\''
			' WHEN context_switches.flags = 1 THEN \'out\''
			' WHEN context_switches.flags = 3 THEN \'out preempt\''
			' ELSE context_switches.flags '
		'END AS flags'
	' FROM context_switches'
	' INNER JOIN threads AS th_out ON th_out.id   = context_switches.thread_out_id'
	' INNER JOIN threads AS th_in  ON th_in.id    = context_switches.thread_in_id'
	' INNER JOIN comms AS comm_out ON comm_out.id = context_switches.comm_out_id'
	' INNER JOIN comms AS comm_in  ON comm_in.id  = context_switches.comm_in_id')

do_query(query, 'END TRANSACTION')

evsel_query = QSqlQuery(db)
evsel_query.prepare("INSERT INTO selected_events VALUES (?, ?)")
machine_query = QSqlQuery(db)
machine_query.prepare("INSERT INTO machines VALUES (?, ?, ?)")
thread_query = QSqlQuery(db)
thread_query.prepare("INSERT INTO threads VALUES (?, ?, ?, ?, ?)")
comm_query = QSqlQuery(db)
comm_query.prepare("INSERT INTO comms VALUES (?, ?, ?, ?, ?)")
comm_thread_query = QSqlQuery(db)
comm_thread_query.prepare("INSERT INTO comm_threads VALUES (?, ?, ?)")
dso_query = QSqlQuery(db)
dso_query.prepare("INSERT INTO dsos VALUES (?, ?, ?, ?, ?)")
symbol_query = QSqlQuery(db)
symbol_query.prepare("INSERT INTO symbols VALUES (?, ?, ?, ?, ?, ?)")
branch_type_query = QSqlQuery(db)
branch_type_query.prepare("INSERT INTO branch_types VALUES (?, ?)")
sample_query = QSqlQuery(db)
if branches:
	sample_query.prepare("INSERT INTO samples VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")
else:
	sample_query.prepare("INSERT INTO samples VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")
if perf_db_export_calls or perf_db_export_callchains:
	call_path_query = QSqlQuery(db)
	call_path_query.prepare("INSERT INTO call_paths VALUES (?, ?, ?, ?)")
if perf_db_export_calls:
	call_query = QSqlQuery(db)
	call_query.prepare("INSERT INTO calls VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")
ptwrite_query = QSqlQuery(db)
ptwrite_query.prepare("INSERT INTO ptwrite VALUES (?, ?, ?)")
cbr_query = QSqlQuery(db)
cbr_query.prepare("INSERT INTO cbr VALUES (?, ?, ?, ?)")
mwait_query = QSqlQuery(db)
mwait_query.prepare("INSERT INTO mwait VALUES (?, ?, ?)")
pwre_query = QSqlQuery(db)
pwre_query.prepare("INSERT INTO pwre VALUES (?, ?, ?, ?)")
exstop_query = QSqlQuery(db)
exstop_query.prepare("INSERT INTO exstop VALUES (?, ?)")
pwrx_query = QSqlQuery(db)
pwrx_query.prepare("INSERT INTO pwrx VALUES (?, ?, ?, ?)")
context_switch_query = QSqlQuery(db)
context_switch_query.prepare("INSERT INTO context_switches VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)")

def trace_begin():
	printdate("Writing records...")
	do_query(query, 'BEGIN TRANSACTION')
	# id == 0 means unknown.  It is easier to create records for them than replace the zeroes with NULLs
	evsel_table(0, "unknown")
	machine_table(0, 0, "unknown")
	thread_table(0, 0, 0, -1, -1)
	comm_table(0, "unknown", 0, 0, 0)
	dso_table(0, 0, "unknown", "unknown", "")
	symbol_table(0, 0, 0, 0, 0, "unknown")
	sample_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
	if perf_db_export_calls or perf_db_export_callchains:
		call_path_table(0, 0, 0, 0)
		call_return_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)

unhandled_count = 0

def is_table_empty(table_name):
	do_query(query, 'SELECT * FROM ' + table_name + ' LIMIT 1');
	if query.next():
		return False
	return True

def drop(table_name):
	do_query(query, 'DROP VIEW ' + table_name + '_view');
	do_query(query, 'DROP TABLE ' + table_name);

def trace_end():
	do_query(query, 'END TRANSACTION')

	printdate("Adding indexes")
	if perf_db_export_calls:
		do_query(query, 'CREATE INDEX pcpid_idx ON calls (parent_call_path_id)')
		do_query(query, 'CREATE INDEX pid_idx ON calls (parent_id)')
		do_query(query, 'ALTER TABLE comms ADD has_calls boolean')
		do_query(query, 'UPDATE comms SET has_calls = 1 WHERE comms.id IN (SELECT DISTINCT comm_id FROM calls)')

	printdate("Dropping unused tables")
	if is_table_empty("ptwrite"):
		drop("ptwrite")
	if is_table_empty("mwait") and is_table_empty("pwre") and is_table_empty("exstop") and is_table_empty("pwrx"):
		do_query(query, 'DROP VIEW power_events_view');
		drop("mwait")
		drop("pwre")
		drop("exstop")
		drop("pwrx")
		if is_table_empty("cbr"):
			drop("cbr")
	if is_table_empty("context_switches"):
		drop("context_switches")

	if (unhandled_count):
		printdate("Warning: ", unhandled_count, " unhandled events")
	printdate("Done")

def trace_unhandled(event_name, context, event_fields_dict):
	global unhandled_count
	unhandled_count += 1

def sched__sched_switch(*x):
	pass

def bind_exec(q, n, x):
	for xx in x[0:n]:
		q.addBindValue(str(xx))
	do_query_(q)

def evsel_table(*x):
	bind_exec(evsel_query, 2, x)

def machine_table(*x):
	bind_exec(machine_query, 3, x)

def thread_table(*x):
	bind_exec(thread_query, 5, x)

def comm_table(*x):
	bind_exec(comm_query, 5, x)

def comm_thread_table(*x):
	bind_exec(comm_thread_query, 3, x)

def dso_table(*x):
	bind_exec(dso_query, 5, x)

def symbol_table(*x):
	bind_exec(symbol_query, 6, x)

def branch_type_table(*x):
	bind_exec(branch_type_query, 2, x)

def sample_table(*x):
	if branches:
		for xx in x[0:15]:
			sample_query.addBindValue(str(xx))
		for xx in x[19:24]:
			sample_query.addBindValue(str(xx))
		do_query_(sample_query)
	else:
		bind_exec(sample_query, 24, x)

def call_path_table(*x):
	bind_exec(call_path_query, 4, x)

def call_return_table(*x):
	bind_exec(call_query, 14, x)

def ptwrite(id, raw_buf):
	data = struct.unpack_from("<IQ", raw_buf)
	flags = data[0]
	payload = data[1]
	exact_ip = flags & 1
	ptwrite_query.addBindValue(str(id))
	ptwrite_query.addBindValue(str(payload))
	ptwrite_query.addBindValue(str(exact_ip))
	do_query_(ptwrite_query)

def cbr(id, raw_buf):
	data = struct.unpack_from("<BBBBII", raw_buf)
	cbr = data[0]
	MHz = (data[4] + 500) / 1000
	percent = ((cbr * 1000 / data[2]) + 5) / 10
	cbr_query.addBindValue(str(id))
	cbr_query.addBindValue(str(cbr))
	cbr_query.addBindValue(str(MHz))
	cbr_query.addBindValue(str(percent))
	do_query_(cbr_query)

def mwait(id, raw_buf):
	data = struct.unpack_from("<IQ", raw_buf)
	payload = data[1]
	hints = payload & 0xff
	extensions = (payload >> 32) & 0x3
	mwait_query.addBindValue(str(id))
	mwait_query.addBindValue(str(hints))
	mwait_query.addBindValue(str(extensions))
	do_query_(mwait_query)

def pwre(id, raw_buf):
	data = struct.unpack_from("<IQ", raw_buf)
	payload = data[1]
	hw = (payload >> 7) & 1
	cstate = (payload >> 12) & 0xf
	subcstate = (payload >> 8) & 0xf
	pwre_query.addBindValue(str(id))
	pwre_query.addBindValue(str(cstate))
	pwre_query.addBindValue(str(subcstate))
	pwre_query.addBindValue(str(hw))
	do_query_(pwre_query)

def exstop(id, raw_buf):
	data = struct.unpack_from("<I", raw_buf)
	flags = data[0]
	exact_ip = flags & 1
	exstop_query.addBindValue(str(id))
	exstop_query.addBindValue(str(exact_ip))
	do_query_(exstop_query)

def pwrx(id, raw_buf):
	data = struct.unpack_from("<IQ", raw_buf)
	payload = data[1]
	deepest_cstate = payload & 0xf
	last_cstate = (payload >> 4) & 0xf
	wake_reason = (payload >> 8) & 0xf
	pwrx_query.addBindValue(str(id))
	pwrx_query.addBindValue(str(deepest_cstate))
	pwrx_query.addBindValue(str(last_cstate))
	pwrx_query.addBindValue(str(wake_reason))
	do_query_(pwrx_query)

def synth_data(id, config, raw_buf, *x):
	if config == 0:
		ptwrite(id, raw_buf)
	elif config == 1:
		mwait(id, raw_buf)
	elif config == 2:
		pwre(id, raw_buf)
	elif config == 3:
		exstop(id, raw_buf)
	elif config == 4:
		pwrx(id, raw_buf)
	elif config == 5:
		cbr(id, raw_buf)

def context_switch_table(*x):
	bind_exec(context_switch_query, 9, x)
tree.rs488
-rw-r--r--drivers/android/binder/rust_binder.h23
-rw-r--r--drivers/android/binder/rust_binder_events.c59
-rw-r--r--drivers/android/binder/rust_binder_events.h36
-rw-r--r--drivers/android/binder/rust_binder_internal.h87
-rw-r--r--drivers/android/binder/rust_binder_main.rs627
-rw-r--r--drivers/android/binder/rust_binderfs.c850
-rw-r--r--drivers/android/binder/stats.rs89
-rw-r--r--drivers/android/binder/thread.rs1596
-rw-r--r--drivers/android/binder/trace.rs16
-rw-r--r--drivers/android/binder/transaction.rs456
-rw-r--r--drivers/android/binder_internal.h4
-rw-r--r--drivers/android/binder_netlink.c31
-rw-r--r--drivers/android/binder_netlink.h20
-rw-r--r--drivers/android/binder_trace.h37
-rw-r--r--drivers/android/binderfs.c8
-rw-r--r--drivers/android/dbitmap.h1
-rw-r--r--drivers/ata/ahci.c57
-rw-r--r--drivers/ata/ahci.h1
-rw-r--r--drivers/ata/ahci_xgene.c7
-rw-r--r--drivers/ata/libata-eh.c9
-rw-r--r--drivers/ata/libata-scsi.c15
-rw-r--r--drivers/ata/libata-sff.c6
-rw-r--r--drivers/atm/atmtcp.c17
-rw-r--r--drivers/base/Kconfig6
-rw-r--r--drivers/base/auxiliary.c25
-rw-r--r--drivers/base/base.h9
-rw-r--r--drivers/base/core.c29
-rw-r--r--drivers/base/cpu.c5
-rw-r--r--drivers/base/devres.c21
-rw-r--r--drivers/base/devtmpfs.c22
-rw-r--r--drivers/base/faux.c1
-rw-r--r--drivers/base/memory.c53
-rw-r--r--drivers/base/node.c52
-rw-r--r--drivers/base/power/Makefile1
-rw-r--r--drivers/base/power/main.c38
-rw-r--r--drivers/base/power/runtime-test.c253
-rw-r--r--drivers/base/power/runtime.c8
-rw-r--r--drivers/base/property.c2
-rw-r--r--drivers/base/regmap/regmap-irq.c30
-rw-r--r--drivers/base/regmap/regmap-mmio.c1
-rw-r--r--drivers/base/regmap/regmap.c13
-rw-r--r--drivers/base/swnode.c5
-rw-r--r--drivers/bcma/driver_gpio.c2
-rw-r--r--drivers/block/Kconfig10
-rw-r--r--drivers/block/Makefile4
-rw-r--r--drivers/block/amiflop.c10
-rw-r--r--drivers/block/aoe/aoeblk.c4
-rw-r--r--drivers/block/aoe/aoecmd.c2
-rw-r--r--drivers/block/aoe/aoemain.c2
-rw-r--r--drivers/block/brd.c75
-rw-r--r--drivers/block/drbd/drbd_int.h39
-rw-r--r--drivers/block/drbd/drbd_main.c59
-rw-r--r--drivers/block/drbd/drbd_nl.c1
-rw-r--r--drivers/block/drbd/drbd_receiver.c264
-rw-r--r--drivers/block/drbd/drbd_worker.c56
-rw-r--r--drivers/block/floppy.c59
-rw-r--r--drivers/block/loop.c55
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c6
-rw-r--r--drivers/block/nbd.c10
-rw-r--r--drivers/block/null_blk/main.c2
-rw-r--r--drivers/block/rbd.c2
-rw-r--r--drivers/block/rnbd/rnbd-clt.c6
-rw-r--r--drivers/block/rnull.rs80
-rw-r--r--drivers/block/rnull/Kconfig13
-rw-r--r--drivers/block/rnull/Makefile3
-rw-r--r--drivers/block/rnull/configfs.rs262
-rw-r--r--drivers/block/rnull/rnull.rs104
-rw-r--r--drivers/block/sunvdc.c7
-rw-r--r--drivers/block/swim.c4
-rw-r--r--drivers/block/ublk_drv.c340
-rw-r--r--drivers/block/virtio_blk.c8
-rw-r--r--drivers/block/xen-blkfront.c4
-rw-r--r--drivers/block/zloop.c3
-rw-r--r--drivers/block/zram/zram_drv.c33
-rw-r--r--drivers/bluetooth/Kconfig6
-rw-r--r--drivers/bluetooth/bpa10x.c2
-rw-r--r--drivers/bluetooth/btintel.c3
-rw-r--r--drivers/bluetooth/btintel_pcie.c328
-rw-r--r--drivers/bluetooth/btintel_pcie.h2
-rw-r--r--drivers/bluetooth/btmtk.c7
-rw-r--r--drivers/bluetooth/btmtksdio.c2
-rw-r--r--drivers/bluetooth/btmtkuart.c2
-rw-r--r--drivers/bluetooth/btnxpuart.c10
-rw-r--r--drivers/bluetooth/btusb.c23
-rw-r--r--drivers/bluetooth/h4_recv.h153
-rw-r--r--drivers/bluetooth/hci_bcsp.c3
-rw-r--r--drivers/bluetooth/hci_uart.h8
-rw-r--r--drivers/bluetooth/hci_vhci.c57
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-bus.c9
-rw-r--r--drivers/bus/mhi/ep/main.c37
-rw-r--r--drivers/bus/mhi/host/init.c5
-rw-r--r--drivers/bus/mhi/host/internal.h3
-rw-r--r--drivers/bus/mhi/host/main.c1
-rw-r--r--drivers/bus/mhi/host/pci_generic.c85
-rw-r--r--drivers/bus/mhi/host/pm.c29
-rw-r--r--drivers/cache/sifive_ccache.c8
-rw-r--r--drivers/cdx/Kconfig1
-rw-r--r--drivers/cdx/cdx.c4
-rw-r--r--drivers/cdx/cdx_msi.c1
-rw-r--r--drivers/cdx/controller/Kconfig1
-rw-r--r--drivers/cdx/controller/bitfield.h90
-rw-r--r--drivers/cdx/controller/cdx_controller.c5
-rw-r--r--drivers/cdx/controller/cdx_rpmsg.c5
-rw-r--r--drivers/cdx/controller/mcdi.c43
-rw-r--r--drivers/cdx/controller/mcdi.h242
-rw-r--r--drivers/cdx/controller/mcdi_functions.c1
-rw-r--r--drivers/cdx/controller/mcdi_functions.h3
-rw-r--r--drivers/cdx/controller/mcdid.h63
-rw-r--r--drivers/char/Makefile1
-rw-r--r--drivers/char/adi.c2
-rw-r--r--drivers/char/hpet.c2
-rw-r--r--drivers/char/hw_random/Kconfig3
-rw-r--r--drivers/char/hw_random/cn10k-rng.c2
-rw-r--r--drivers/char/hw_random/ks-sa-rng.c4
-rw-r--r--drivers/char/hw_random/n2rng.h4
-rw-r--r--drivers/char/hw_random/timeriomem-rng.c2
-rw-r--r--drivers/char/ipmi/Kconfig7
-rw-r--r--drivers/char/ipmi/Makefile1
-rw-r--r--drivers/char/ipmi/ipmi_ipmb.c4
-rw-r--r--drivers/char/ipmi/ipmi_kcs_sm.c16
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c605
-rw-r--r--drivers/char/ipmi/ipmi_powernv.c17
-rw-r--r--drivers/char/ipmi/ipmi_si.h7
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c74
-rw-r--r--drivers/char/ipmi/ipmi_si_ls2k.c189
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c4
-rw-r--r--drivers/char/mem.c21
-rw-r--r--drivers/char/misc.c21
-rw-r--r--drivers/char/misc_minor_kunit.c (renamed from drivers/misc/misc_minor_kunit.c)95
-rw-r--r--drivers/char/tpm/Kconfig12
-rw-r--r--drivers/char/tpm/Makefile1
-rw-r--r--drivers/char/tpm/tpm-interface.c2
-rw-r--r--drivers/char/tpm/tpm.h2
-rw-r--r--drivers/char/tpm/tpm2-cmd.c127
-rw-r--r--drivers/char/tpm/tpm2-sessions.c104
-rw-r--r--drivers/char/tpm/tpm_loongson.c84
-rw-r--r--drivers/char/tpm/tpm_ppi.c89
-rw-r--r--drivers/char/tpm/tpm_tis_core.c4
-rw-r--r--drivers/clk/Kconfig11
-rw-r--r--drivers/clk/Makefile1
-rw-r--r--drivers/clk/actions/owl-common.c1
-rw-r--r--drivers/clk/actions/owl-composite.c8
-rw-r--r--drivers/clk/actions/owl-divider.c13
-rw-r--r--drivers/clk/actions/owl-factor.c12
-rw-r--r--drivers/clk/actions/owl-pll.c25
-rw-r--r--drivers/clk/at91/clk-audio-pll.c42
-rw-r--r--drivers/clk/at91/clk-h32mx.c33
-rw-r--r--drivers/clk/at91/clk-master.c3
-rw-r--r--drivers/clk/at91/clk-peripheral.c48
-rw-r--r--drivers/clk/at91/clk-pll.c12
-rw-r--r--drivers/clk/at91/clk-plldiv.c34
-rw-r--r--drivers/clk/at91/clk-sam9x60-pll.c111
-rw-r--r--drivers/clk/at91/clk-usb.c20
-rw-r--r--drivers/clk/at91/pmc.h1
-rw-r--r--drivers/clk/at91/sam9x60.c2
-rw-r--r--drivers/clk/at91/sam9x7.c6
-rw-r--r--drivers/clk/at91/sama7d65.c4
-rw-r--r--drivers/clk/at91/sama7g5.c2
-rw-r--r--drivers/clk/axs10x/i2s_pll_clock.c14
-rw-r--r--drivers/clk/axs10x/pll_clock.c12
-rw-r--r--drivers/clk/baikal-t1/ccu-div.c27
-rw-r--r--drivers/clk/baikal-t1/ccu-pll.c14
-rw-r--r--drivers/clk/bcm/clk-iproc-asiu.c25
-rw-r--r--drivers/clk/bcm/clk-raspberrypi.c72
-rw-r--r--drivers/clk/clk-apple-nco.c14
-rw-r--r--drivers/clk/clk-axi-clkgen.c2
-rw-r--r--drivers/clk/clk-axm5516.c1
-rw-r--r--drivers/clk/clk-bm1880.c21
-rw-r--r--drivers/clk/clk-cdce706.c16
-rw-r--r--drivers/clk/clk-cdce925.c50
-rw-r--r--drivers/clk/clk-cs2000-cp.c14
-rw-r--r--drivers/clk/clk-divider.c23
-rw-r--r--drivers/clk/clk-ep93xx.c21
-rw-r--r--drivers/clk/clk-fixed-factor.c16
-rw-r--r--drivers/clk/clk-fractional-divider.c25
-rw-r--r--drivers/clk/clk-gemini.c15
-rw-r--r--drivers/clk/clk-highbank.c26
-rw-r--r--drivers/clk/clk-hsdk-pll.c12
-rw-r--r--drivers/clk/clk-lmk04832.c53
-rw-r--r--drivers/clk/clk-loongson1.c12
-rw-r--r--drivers/clk/clk-loongson2.c122
-rw-r--r--drivers/clk/clk-max9485.c27
-rw-r--r--drivers/clk/clk-milbeaut.c22
-rw-r--r--drivers/clk/clk-multiplier.c12
-rw-r--r--drivers/clk/clk-rp1.c1022
-rw-r--r--drivers/clk/clk-rpmi.c620
-rw-r--r--drivers/clk/clk-s2mps11.c8
-rw-r--r--drivers/clk/clk-scmi.c46
-rw-r--r--drivers/clk/clk-scpi.c18
-rw-r--r--drivers/clk/clk-si514.c24
-rw-r--r--drivers/clk/clk-si521xx.c14
-rw-r--r--drivers/clk/clk-si5341.c22
-rw-r--r--drivers/clk/clk-si544.c10
-rw-r--r--drivers/clk/clk-si570.c24
-rw-r--r--drivers/clk/clk-sp7021.c44
-rw-r--r--drivers/clk/clk-sparx5.c10
-rw-r--r--drivers/clk/clk-stm32f4.c26
-rw-r--r--drivers/clk/clk-tps68470.c12
-rw-r--r--drivers/clk/clk-versaclock3.c70
-rw-r--r--drivers/clk/clk-versaclock5.c71
-rw-r--r--drivers/clk/clk-versaclock7.c30
-rw-r--r--drivers/clk/clk-vt8500.c59
-rw-r--r--drivers/clk/clk-wm831x.c14
-rw-r--r--drivers/clk/clk-xgene.c41
-rw-r--r--drivers/clk/clk.c66
-rw-r--r--drivers/clk/hisilicon/clk-hi3660-stub.c18
-rw-r--r--drivers/clk/hisilicon/clk-hi6220-stub.c12
-rw-r--r--drivers/clk/hisilicon/clkdivider-hi6220.c12
-rw-r--r--drivers/clk/imx/clk-imx95-blk-ctl.c57
-rw-r--r--drivers/clk/ingenic/cgu.c12
-rw-r--r--drivers/clk/ingenic/jz4780-cgu.c24
-rw-r--r--drivers/clk/ingenic/x1000-cgu.c19
-rw-r--r--drivers/clk/keystone/sci-clk.c5
-rw-r--r--drivers/clk/mediatek/Kconfig71
-rw-r--r--drivers/clk/mediatek/Makefile13
-rw-r--r--drivers/clk/mediatek/clk-gate.c117
-rw-r--r--drivers/clk/mediatek/clk-gate.h3
-rw-r--r--drivers/clk/mediatek/clk-mt7622-aud.c1
-rw-r--r--drivers/clk/mediatek/clk-mt8195-infra_ao.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8196-apmixedsys.c204
-rw-r--r--drivers/clk/mediatek/clk-mt8196-disp0.c170
-rw-r--r--drivers/clk/mediatek/clk-mt8196-disp1.c170
-rw-r--r--drivers/clk/mediatek/clk-mt8196-imp_iic_wrap.c118
-rw-r--r--drivers/clk/mediatek/clk-mt8196-mcu.c167
-rw-r--r--drivers/clk/mediatek/clk-mt8196-mdpsys.c186
-rw-r--r--drivers/clk/mediatek/clk-mt8196-mfg.c150
-rw-r--r--drivers/clk/mediatek/clk-mt8196-ovl0.c154
-rw-r--r--drivers/clk/mediatek/clk-mt8196-ovl1.c154
-rw-r--r--drivers/clk/mediatek/clk-mt8196-peri_ao.c142
-rw-r--r--drivers/clk/mediatek/clk-mt8196-pextp.c131
-rw-r--r--drivers/clk/mediatek/clk-mt8196-topckgen.c985
-rw-r--r--drivers/clk/mediatek/clk-mt8196-topckgen2.c568
-rw-r--r--drivers/clk/mediatek/clk-mt8196-ufs_ao.c108
-rw-r--r--drivers/clk/mediatek/clk-mt8196-vdec.c253
-rw-r--r--drivers/clk/mediatek/clk-mt8196-vdisp_ao.c80
-rw-r--r--drivers/clk/mediatek/clk-mt8196-venc.c236
-rw-r--r--drivers/clk/mediatek/clk-mt8196-vlpckgen.c725
-rw-r--r--drivers/clk/mediatek/clk-mtk.c16
-rw-r--r--drivers/clk/mediatek/clk-mtk.h22
-rw-r--r--drivers/clk/mediatek/clk-mux.c122
-rw-r--r--drivers/clk/mediatek/clk-mux.h87
-rw-r--r--drivers/clk/mediatek/clk-pll.c58
-rw-r--r--drivers/clk/mediatek/clk-pll.h11
-rw-r--r--drivers/clk/mediatek/clk-pllfh.c2
-rw-r--r--drivers/clk/meson/Kconfig13
-rw-r--r--drivers/clk/meson/Makefile1
-rw-r--r--drivers/clk/meson/a1-peripherals.c995
-rw-r--r--drivers/clk/meson/a1-pll.c124
-rw-r--r--drivers/clk/meson/axg-aoclk.c153
-rw-r--r--drivers/clk/meson/axg.c237
-rw-r--r--drivers/clk/meson/c3-peripherals.c2037
-rw-r--r--drivers/clk/meson/c3-pll.c245
-rw-r--r--drivers/clk/meson/clk-regmap.h20
-rw-r--r--drivers/clk/meson/g12a-aoclk.c238
-rw-r--r--drivers/clk/meson/g12a.c1994
-rw-r--r--drivers/clk/meson/gxbb-aoclk.c123
-rw-r--r--drivers/clk/meson/gxbb.c611
-rw-r--r--drivers/clk/meson/meson-aoclk.c32
-rw-r--r--drivers/clk/meson/meson-aoclk.h2
-rw-r--r--drivers/clk/meson/meson-clkc-utils.c86
-rw-r--r--drivers/clk/meson/meson-clkc-utils.h89
-rw-r--r--drivers/clk/meson/meson-eeclk.c60
-rw-r--r--drivers/clk/meson/meson-eeclk.h24
-rw-r--r--drivers/clk/meson/meson8-ddr.c62
-rw-r--r--drivers/clk/meson/meson8b.c746
-rw-r--r--drivers/clk/meson/s4-peripherals.c1160
-rw-r--r--drivers/clk/meson/s4-pll.c82
-rw-r--r--drivers/clk/microchip/clk-core.c55
-rw-r--r--drivers/clk/mmp/Kconfig10
-rw-r--r--drivers/clk/mmp/Makefile5
-rw-r--r--drivers/clk/mmp/clk-audio.c18
-rw-r--r--drivers/clk/mmp/clk-frac.c27
-rw-r--r--drivers/clk/mmp/clk-pxa1908-apmu.c7
-rw-r--r--drivers/clk/mstar/clk-msc313-cpupll.c18
-rw-r--r--drivers/clk/mvebu/ap-cpu-clk.c12
-rw-r--r--drivers/clk/mvebu/armada-37xx-periph.c15
-rw-r--r--drivers/clk/mvebu/clk-corediv.c18
-rw-r--r--drivers/clk/mvebu/clk-cpu.c12
-rw-r--r--drivers/clk/mvebu/dove-divider.c16
-rw-r--r--drivers/clk/mxs/clk-div.c8
-rw-r--r--drivers/clk/mxs/clk-frac.c16
-rw-r--r--drivers/clk/mxs/clk-ref.c16
-rw-r--r--drivers/clk/nuvoton/clk-ma35d1-divider.c12
-rw-r--r--drivers/clk/nuvoton/clk-ma35d1-pll.c28
-rw-r--r--drivers/clk/nxp/clk-lpc18xx-cgu.c20
-rw-r--r--drivers/clk/nxp/clk-lpc32xx.c60
-rw-r--r--drivers/clk/pistachio/clk-pll.c20
-rw-r--r--drivers/clk/qcom/Kconfig44
-rw-r--r--drivers/clk/qcom/Makefile4
-rw-r--r--drivers/clk/qcom/a53-pll.c1
-rw-r--r--drivers/clk/qcom/a7-pll.c3
-rw-r--r--drivers/clk/qcom/apss-ipq-pll.c1
-rw-r--r--drivers/clk/qcom/apss-ipq5424.c265
-rw-r--r--drivers/clk/qcom/camcc-milos.c2
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.c162
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.h6
-rw-r--r--drivers/clk/qcom/clk-cbf-8996.c1
-rw-r--r--drivers/clk/qcom/clk-cpu-8996.c1
-rw-r--r--drivers/clk/qcom/clk-rcg.c2
-rw-r--r--drivers/clk/qcom/clk-rcg2.c8
-rw-r--r--drivers/clk/qcom/clk-regmap-divider.c27
-rw-r--r--drivers/clk/qcom/clk-rpmh.c28
-rw-r--r--drivers/clk/qcom/clk-smd-rpm.c8
-rw-r--r--drivers/clk/qcom/common.c4
-rw-r--r--drivers/clk/qcom/dispcc-glymur.c1982
-rw-r--r--drivers/clk/qcom/dispcc-milos.c2
-rw-r--r--drivers/clk/qcom/dispcc-sc7280.c8
-rw-r--r--drivers/clk/qcom/gcc-glymur.c8616
-rw-r--r--drivers/clk/qcom/gcc-ipq6018.c60
-rw-r--r--drivers/clk/qcom/gcc-msm8917.c617
-rw-r--r--drivers/clk/qcom/gcc-qcs404.c2
-rw-r--r--drivers/clk/qcom/gcc-sc8280xp.c4
-rw-r--r--drivers/clk/qcom/gcc-sdm660.c72
-rw-r--r--drivers/clk/qcom/gpucc-sa8775p.c6
-rw-r--r--drivers/clk/qcom/gpucc-sc7180.c2
-rw-r--r--drivers/clk/qcom/gpucc-sm6350.c4
-rw-r--r--drivers/clk/qcom/gpucc-sm8150.c2
-rw-r--r--drivers/clk/qcom/gpucc-sm8250.c2
-rw-r--r--drivers/clk/qcom/hfpll.c1
-rw-r--r--drivers/clk/qcom/ipq-cmn-pll.c1
-rw-r--r--drivers/clk/qcom/lpassaudiocc-sc7280.c4
-rw-r--r--drivers/clk/qcom/lpasscc-sc8280xp.c4
-rw-r--r--drivers/clk/qcom/lpasscc-sm6115.c2
-rw-r--r--drivers/clk/qcom/lpasscorecc-sc7180.c2
-rw-r--r--drivers/clk/qcom/mmcc-sdm660.c2
-rw-r--r--drivers/clk/qcom/nsscc-ipq9574.c2
-rw-r--r--drivers/clk/qcom/tcsrcc-glymur.c313
-rw-r--r--drivers/clk/qcom/tcsrcc-x1e80100.c4
-rw-r--r--drivers/clk/qcom/videocc-milos.c2
-rw-r--r--drivers/clk/renesas/clk-mstp.c20
-rw-r--r--drivers/clk/renesas/r9a07g043-cpg.c140
-rw-r--r--drivers/clk/renesas/r9a07g044-cpg.c162
-rw-r--r--drivers/clk/renesas/r9a08g045-cpg.c29
-rw-r--r--drivers/clk/renesas/r9a09g047-cpg.c36
-rw-r--r--drivers/clk/renesas/r9a09g056-cpg.c16
-rw-r--r--drivers/clk/renesas/r9a09g057-cpg.c11
-rw-r--r--drivers/clk/renesas/r9a09g077-cpg.c41
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.c7
-rw-r--r--drivers/clk/renesas/rzg2l-cpg.c61
-rw-r--r--drivers/clk/renesas/rzg2l-cpg.h1
-rw-r--r--drivers/clk/renesas/rzv2h-cpg.c22
-rw-r--r--drivers/clk/rockchip/clk-ddr.c13
-rw-r--r--drivers/clk/rockchip/clk-half-divider.c12
-rw-r--r--drivers/clk/rockchip/clk-pll.c23
-rw-r--r--drivers/clk/rockchip/clk-rk3368.c2
-rw-r--r--drivers/clk/samsung/Makefile1
-rw-r--r--drivers/clk/samsung/clk-artpec8.c1044
-rw-r--r--drivers/clk/samsung/clk-cpu.c12
-rw-r--r--drivers/clk/samsung/clk-exynos990.c1240
-rw-r--r--drivers/clk/samsung/clk-fsd.c28
-rw-r--r--drivers/clk/samsung/clk-pll.c161
-rw-r--r--drivers/clk/samsung/clk-pll.h2
-rw-r--r--drivers/clk/sifive/fu540-prci.h2
-rw-r--r--drivers/clk/sifive/fu740-prci.h2
-rw-r--r--drivers/clk/sifive/sifive-prci.c11
-rw-r--r--drivers/clk/sifive/sifive-prci.h4
-rw-r--r--drivers/clk/sophgo/clk-cv18xx-ip.c10
-rw-r--r--drivers/clk/sophgo/clk-sg2042-clkgen.c17
-rw-r--r--drivers/clk/sophgo/clk-sg2042-pll.c26
-rw-r--r--drivers/clk/spacemit/ccu-k1.c61
-rw-r--r--drivers/clk/spacemit/ccu_ddn.c23
-rw-r--r--drivers/clk/spacemit/ccu_ddn.h6
-rw-r--r--drivers/clk/spacemit/ccu_mix.c12
-rw-r--r--drivers/clk/spacemit/ccu_pll.c10
-rw-r--r--drivers/clk/spear/clk-aux-synth.c12
-rw-r--r--drivers/clk/spear/clk-frac-synth.c12
-rw-r--r--drivers/clk/spear/clk-gpt-synth.c12
-rw-r--r--drivers/clk/spear/clk-vco-pll.c23
-rw-r--r--drivers/clk/sprd/div.c13
-rw-r--r--drivers/clk/sprd/pll.c8
-rw-r--r--drivers/clk/st/clk-flexgen.c80
-rw-r--r--drivers/clk/st/clkgen-fsyn.c33
-rw-r--r--drivers/clk/st/clkgen-pll.c38
-rw-r--r--drivers/clk/stm32/Kconfig7
-rw-r--r--drivers/clk/stm32/Makefile1
-rw-r--r--drivers/clk/stm32/clk-stm32-core.c28
-rw-r--r--drivers/clk/stm32/clk-stm32mp1.c13
-rw-r--r--drivers/clk/stm32/clk-stm32mp21.c1586
-rw-r--r--drivers/clk/stm32/stm32mp21_rcc.h651
-rw-r--r--drivers/clk/sunxi-ng/Kconfig5
-rw-r--r--drivers/clk/sunxi-ng/Makefile2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun55i-a523-mcu.c469
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun55i-a523.c21
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun55i-a523.h14
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun6i-rtc.c11
-rw-r--r--drivers/clk/sunxi-ng/ccu_div.h18
-rw-r--r--drivers/clk/sunxi-ng/ccu_mp.c2
-rw-r--r--drivers/clk/tegra/Kconfig2
-rw-r--r--drivers/clk/tegra/clk-audio-sync.c10
-rw-r--r--drivers/clk/tegra/clk-bpmp.c2
-rw-r--r--drivers/clk/tegra/clk-dfll.c2
-rw-r--r--drivers/clk/tegra/clk-divider.c28
-rw-r--r--drivers/clk/tegra/clk-periph.c8
-rw-r--r--drivers/clk/tegra/clk-pll.c52
-rw-r--r--drivers/clk/tegra/clk-super.c9
-rw-r--r--drivers/clk/tegra/clk-tegra114.c30
-rw-r--r--drivers/clk/tegra/clk-tegra124-dfll-fcpu.c158
-rw-r--r--drivers/clk/tegra/clk-tegra210-emc.c24
-rw-r--r--drivers/clk/tegra/clk-tegra30.c1
-rw-r--r--drivers/clk/tegra/clk.h2
-rw-r--r--drivers/clk/thead/clk-th1520-ap.c504
-rw-r--r--drivers/clk/ti/clk-33xx.c2
-rw-r--r--drivers/clk/ti/clk-dra7-atl.c12
-rw-r--r--drivers/clk/ti/clkt_dpll.c36
-rw-r--r--drivers/clk/ti/clock.h6
-rw-r--r--drivers/clk/ti/composite.c6
-rw-r--r--drivers/clk/ti/divider.c12
-rw-r--r--drivers/clk/ti/dpll.c10
-rw-r--r--drivers/clk/ti/dpll3xxx.c7
-rw-r--r--drivers/clk/ti/dpll44xx.c89
-rw-r--r--drivers/clk/ti/fapll.c48
-rw-r--r--drivers/clk/ux500/clk-prcmu.c14
-rw-r--r--drivers/clk/versatile/clk-icst.c72
-rw-r--r--drivers/clk/versatile/clk-vexpress-osc.c16
-rw-r--r--drivers/clk/visconti/pll.c17
-rw-r--r--drivers/clk/x86/clk-cgu.c35
-rw-r--r--drivers/clk/xilinx/clk-xlnx-clock-wizard.c89
-rw-r--r--drivers/clk/xilinx/xlnx_vcu.c15
-rw-r--r--drivers/clk/zynq/pll.c12
-rw-r--r--drivers/clk/zynqmp/divider.c23
-rw-r--r--drivers/clk/zynqmp/pll.c24
-rw-r--r--drivers/clocksource/Kconfig13
-rw-r--r--drivers/clocksource/Makefile3
-rw-r--r--drivers/clocksource/arm_arch_timer.c686
-rw-r--r--drivers/clocksource/arm_arch_timer_mmio.c440
-rw-r--r--drivers/clocksource/arm_global_timer.c44
-rw-r--r--drivers/clocksource/clps711x-timer.c23
-rw-r--r--drivers/clocksource/hyperv_timer.c10
-rw-r--r--drivers/clocksource/ingenic-sysost.c27
-rw-r--r--drivers/clocksource/scx200_hrt.c1
-rw-r--r--drivers/clocksource/sh_cmt.c84
-rw-r--r--drivers/clocksource/timer-cs5535.c1
-rw-r--r--drivers/clocksource/timer-econet-en751221.c2
-rw-r--r--drivers/clocksource/timer-nxp-pit.c382
-rw-r--r--drivers/clocksource/timer-nxp-stm.c2
-rw-r--r--drivers/clocksource/timer-rtl-otto.c42
-rw-r--r--drivers/clocksource/timer-stm32-lp.c1
-rw-r--r--drivers/clocksource/timer-sun5i.c2
-rw-r--r--drivers/clocksource/timer-tegra186.c38
-rw-r--r--drivers/clocksource/timer-ti-dm.c119
-rw-r--r--drivers/clocksource/timer-vf-pit.c194
-rw-r--r--drivers/comedi/Kconfig9
-rw-r--r--drivers/comedi/comedi_fops.c5
-rw-r--r--drivers/comedi/drivers.c23
-rw-r--r--drivers/comedi/drivers/Makefile1
-rw-r--r--drivers/comedi/drivers/adl_pci7250.c220
-rw-r--r--drivers/comedi/drivers/pcl726.c3
-rw-r--r--drivers/counter/ti-ecap-capture.c12
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c9
-rw-r--r--drivers/cpufreq/airoha-cpufreq.c1
-rw-r--r--drivers/cpufreq/amd-pstate.c27
-rw-r--r--drivers/cpufreq/armada-37xx-cpufreq.c4
-rw-r--r--drivers/cpufreq/brcmstb-avs-cpufreq.c4
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c30
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c3
-rw-r--r--drivers/cpufreq/cpufreq-dt.c2
-rw-r--r--drivers/cpufreq/cpufreq.c64
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c24
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c25
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.h23
-rw-r--r--drivers/cpufreq/freq_table.c22
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c2
-rw-r--r--drivers/cpufreq/intel_pstate.c187
-rw-r--r--drivers/cpufreq/longhaul.c3
-rw-r--r--drivers/cpufreq/mediatek-cpufreq-hw.c136
-rw-r--r--drivers/cpufreq/mediatek-cpufreq.c25
-rw-r--r--drivers/cpufreq/qcom-cpufreq-nvmem.c5
-rw-r--r--drivers/cpufreq/rcpufreq_dt.rs12
-rw-r--r--drivers/cpufreq/s5pv210-cpufreq.c4
-rw-r--r--drivers/cpufreq/scmi-cpufreq.c12
-rw-r--r--drivers/cpufreq/scpi-cpufreq.c2
-rw-r--r--drivers/cpufreq/sh-cpufreq.c6
-rw-r--r--drivers/cpufreq/spear-cpufreq.c2
-rw-r--r--drivers/cpufreq/speedstep-lib.c12
-rw-r--r--drivers/cpufreq/speedstep-lib.h10
-rw-r--r--drivers/cpufreq/tegra186-cpufreq.c39
-rw-r--r--drivers/cpufreq/ti-cpufreq.c12
-rw-r--r--drivers/cpufreq/virtual-cpufreq.c2
-rw-r--r--drivers/cpuidle/cpuidle-qcom-spm.c11
-rw-r--r--drivers/cpuidle/cpuidle.c8
-rw-r--r--drivers/cpuidle/governors/menu.c120
-rw-r--r--drivers/cpuidle/sysfs.c34
-rw-r--r--drivers/crypto/Kconfig16
-rw-r--r--drivers/crypto/Makefile2
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c85
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c35
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c145
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c1
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c1
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h27
-rw-r--r--drivers/crypto/aspeed/aspeed-hace-crypto.c2
-rw-r--r--drivers/crypto/atmel-tdes.c2
-rw-r--r--drivers/crypto/caam/ctrl.c10
-rw-r--r--drivers/crypto/ccp/Makefile3
-rw-r--r--drivers/crypto/ccp/hsti.c8
-rw-r--r--drivers/crypto/ccp/psp-dev.c20
-rw-r--r--drivers/crypto/ccp/psp-dev.h8
-rw-r--r--drivers/crypto/ccp/sev-dev.c325
-rw-r--r--drivers/crypto/ccp/sev-dev.h9
-rw-r--r--drivers/crypto/ccp/sfs.c311
-rw-r--r--drivers/crypto/ccp/sfs.h47
-rw-r--r--drivers/crypto/chelsio/Kconfig6
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c259
-rw-r--r--drivers/crypto/chelsio/chcr_crypto.h1
-rw-r--r--drivers/crypto/hisilicon/Kconfig1
-rw-r--r--drivers/crypto/hisilicon/debugfs.c1
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_crypto.c403
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_main.c179
-rw-r--r--drivers/crypto/hisilicon/qm.c218
-rw-r--r--drivers/crypto/hisilicon/sec/sec_drv.c3
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c8
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c229
-rw-r--r--drivers/crypto/hisilicon/zip/dae_main.c19
-rw-r--r--drivers/crypto/hisilicon/zip/zip_main.c234
-rw-r--r--drivers/crypto/img-hash.c2
-rw-r--r--drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c5
-rw-r--r--drivers/crypto/intel/qat/Kconfig7
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c40
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen6_tl.c112
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_telemetry.c19
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_telemetry.h5
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c52
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.h5
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_algs.c191
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_uclo.c2
-rw-r--r--drivers/crypto/loongson/Kconfig5
-rw-r--r--drivers/crypto/loongson/Makefile1
-rw-r--r--drivers/crypto/loongson/loongson-rng.c209
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c2
-rw-r--r--drivers/crypto/nx/nx-common-powernv.c6
-rw-r--r--drivers/crypto/nx/nx-common-pseries.c6
-rw-r--r--drivers/crypto/omap-aes.c15
-rw-r--r--drivers/crypto/omap-aes.h2
-rw-r--r--drivers/crypto/omap-des.c17
-rw-r--r--drivers/crypto/omap-sham.c15
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_ahash.c2
-rw-r--r--drivers/crypto/starfive/jh7110-aes.c12
-rw-r--r--drivers/crypto/starfive/jh7110-hash.c3
-rw-r--r--drivers/crypto/stm32/stm32-cryp.c2
-rw-r--r--drivers/crypto/tegra/tegra-se-hash.c3
-rw-r--r--drivers/crypto/tegra/tegra-se-main.c2
-rw-r--r--drivers/crypto/ti/Kconfig14
-rw-r--r--drivers/crypto/ti/Makefile3
-rw-r--r--drivers/crypto/ti/dthev2-aes.c413
-rw-r--r--drivers/crypto/ti/dthev2-common.c217
-rw-r--r--drivers/crypto/ti/dthev2-common.h101
-rw-r--r--drivers/crypto/xilinx/Makefile1
-rw-r--r--drivers/crypto/xilinx/xilinx-trng.c405
-rw-r--r--drivers/cxl/acpi.c46
-rw-r--r--drivers/cxl/core/cdat.c36
-rw-r--r--drivers/cxl/core/core.h12
-rw-r--r--drivers/cxl/core/hdm.c107
-rw-r--r--drivers/cxl/core/memdev.c60
-rw-r--r--drivers/cxl/core/pci.c89
-rw-r--r--drivers/cxl/core/port.c319
-rw-r--r--drivers/cxl/core/region.c266
-rw-r--r--drivers/cxl/cxl.h57
-rw-r--r--drivers/cxl/cxlmem.h2
-rw-r--r--drivers/cxl/cxlpci.h2
-rw-r--r--drivers/cxl/port.c47
-rw-r--r--drivers/dax/super.c2
-rw-r--r--drivers/devfreq/event/rockchip-dfi.c91
-rw-r--r--drivers/devfreq/mtk-cci-devfreq.c5
-rw-r--r--drivers/dibs/Kconfig23
-rw-r--r--drivers/dibs/Makefile8
-rw-r--r--drivers/dibs/dibs_loopback.c361
-rw-r--r--drivers/dibs/dibs_loopback.h57
-rw-r--r--drivers/dibs/dibs_main.c278
-rw-r--r--drivers/dma-buf/dma-heap.c4
-rw-r--r--drivers/dma/Kconfig2
-rw-r--r--drivers/dma/dw-edma/dw-edma-core.c22
-rw-r--r--drivers/dma/dw/rzn1-dmamux.c15
-rw-r--r--drivers/dma/idxd/defaults.c6
-rw-r--r--drivers/dma/idxd/init.c41
-rw-r--r--drivers/dma/idxd/registers.h5
-rw-r--r--drivers/dma/imx-sdma.c2
-rw-r--r--drivers/dma/ioat/dma.h2
-rw-r--r--drivers/dma/ioat/hw.h3
-rw-r--r--drivers/dma/mmp_pdma.c289
-rw-r--r--drivers/dma/mv_xor.c4
-rw-r--r--drivers/dma/ppc4xx/adma.c4
-rw-r--r--drivers/dma/qcom/bam_dma.c8
-rw-r--r--drivers/dma/sh/shdma-base.c25
-rw-r--r--drivers/dma/sh/shdmac.c17
-rw-r--r--drivers/dma/ti/edma.c4
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c94
-rw-r--r--drivers/dma/xilinx/zynqmp_dma.c5
-rw-r--r--drivers/dpll/dpll_netlink.c70
-rw-r--r--drivers/dpll/dpll_nl.c5
-rw-r--r--drivers/dpll/zl3073x/Kconfig10
-rw-r--r--drivers/dpll/zl3073x/Makefile2
-rw-r--r--drivers/dpll/zl3073x/core.c392
-rw-r--r--drivers/dpll/zl3073x/core.h48
-rw-r--r--drivers/dpll/zl3073x/devlink.c155
-rw-r--r--drivers/dpll/zl3073x/devlink.h3
-rw-r--r--drivers/dpll/zl3073x/dpll.c58
-rw-r--r--drivers/dpll/zl3073x/dpll.h2
-rw-r--r--drivers/dpll/zl3073x/flash.c666
-rw-r--r--drivers/dpll/zl3073x/flash.h29
-rw-r--r--drivers/dpll/zl3073x/fw.c419
-rw-r--r--drivers/dpll/zl3073x/fw.h52
-rw-r--r--drivers/dpll/zl3073x/regs.h51
-rw-r--r--drivers/edac/Kconfig16
-rw-r--r--drivers/edac/Makefile2
-rw-r--r--drivers/edac/a72_edac.c225
-rw-r--r--drivers/edac/altera_edac.c5
-rw-r--r--drivers/edac/amd64_edac.c20
-rw-r--r--drivers/edac/amd64_edac.h2
-rw-r--r--[-rwxr-xr-x]drivers/edac/ecs.c0
-rw-r--r--drivers/edac/edac_mc_sysfs.c24
-rw-r--r--drivers/edac/i10nm_base.c27
-rw-r--r--drivers/edac/ie31200_edac.c4
-rw-r--r--[-rwxr-xr-x]drivers/edac/mem_repair.c0
-rw-r--r--[-rwxr-xr-x]drivers/edac/scrub.c0
-rw-r--r--drivers/edac/skx_base.c33
-rw-r--r--drivers/edac/skx_common.c54
-rw-r--r--drivers/edac/skx_common.h28
-rw-r--r--drivers/edac/versalnet_edac.c960
-rw-r--r--drivers/extcon/Kconfig13
-rw-r--r--drivers/extcon/Makefile1
-rw-r--r--drivers/extcon/extcon-adc-jack.c2
-rw-r--r--drivers/extcon/extcon-axp288.c2
-rw-r--r--drivers/extcon/extcon-fsa9480.c2
-rw-r--r--drivers/extcon/extcon-max14526.c302
-rw-r--r--drivers/extcon/extcon-qcom-spmi-misc.c2
-rw-r--r--drivers/firewire/core-card.c490
-rw-r--r--drivers/firewire/core-cdev.c38
-rw-r--r--drivers/firewire/core-device.c27
-rw-r--r--drivers/firewire/core-topology.c91
-rw-r--r--drivers/firewire/core-transaction.c221
-rw-r--r--drivers/firewire/core.h22
-rw-r--r--drivers/firewire/ohci.c316
-rw-r--r--drivers/firmware/arm_scmi/bus.c13
-rw-r--r--drivers/firmware/arm_scmi/quirks.c15
-rw-r--r--drivers/firmware/arm_scmi/transports/mailbox.c7
-rw-r--r--drivers/firmware/arm_scmi/transports/optee.c2
-rw-r--r--drivers/firmware/arm_scmi/transports/virtio.c3
-rw-r--r--drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c111
-rw-r--r--drivers/firmware/arm_scmi/vendors/imx/imx95.rst25
-rw-r--r--drivers/firmware/arm_scmi/voltage.c2
-rw-r--r--drivers/firmware/broadcom/bcm47xx_sprom.c2
-rw-r--r--drivers/firmware/efi/Kconfig9
-rw-r--r--drivers/firmware/efi/Makefile1
-rw-r--r--drivers/firmware/efi/efi-init.c29
-rw-r--r--drivers/firmware/efi/efi.c8
-rw-r--r--drivers/firmware/efi/libstub/printk.c4
-rw-r--r--drivers/firmware/efi/libstub/x86-stub.c14
-rw-r--r--drivers/firmware/efi/ovmf-debug-log.c111
-rw-r--r--drivers/firmware/efi/stmm/tee_stmm_efi.c61
-rw-r--r--drivers/firmware/meson/Kconfig2
-rw-r--r--drivers/firmware/meson/meson_sm.c7
-rw-r--r--drivers/firmware/qcom/qcom_scm.c125
-rw-r--r--drivers/firmware/qcom/qcom_scm.h7
-rw-r--r--drivers/firmware/qcom/qcom_tzmem.c64
-rw-r--r--drivers/firmware/samsung/exynos-acpm-pmic.c25
-rw-r--r--drivers/firmware/tegra/bpmp-tegra186.c5
-rw-r--r--drivers/firmware/ti_sci.c57
-rw-r--r--drivers/firmware/ti_sci.h3
-rw-r--r--drivers/fpga/zynq-fpga.c8
-rw-r--r--drivers/fwctl/mlx5/main.c9
-rw-r--r--drivers/fwctl/pds/main.c18
-rw-r--r--drivers/gpio/Kconfig67
-rw-r--r--drivers/gpio/Makefile3
-rw-r--r--drivers/gpio/TODO17
-rw-r--r--drivers/gpio/gpio-74x164.c4
-rw-r--r--drivers/gpio/gpio-adnp.c2
-rw-r--r--drivers/gpio/gpio-adp5520.c2
-rw-r--r--drivers/gpio/gpio-adp5585.c2
-rw-r--r--drivers/gpio/gpio-aggregator.c396
-rw-r--r--drivers/gpio/gpio-altera-a10sr.c2
-rw-r--r--drivers/gpio/gpio-altera.c2
-rw-r--r--drivers/gpio/gpio-amd-fch.c2
-rw-r--r--drivers/gpio/gpio-amd8111.c2
-rw-r--r--drivers/gpio/gpio-amdpt.c44
-rw-r--r--drivers/gpio/gpio-arizona.c2
-rw-r--r--drivers/gpio/gpio-aspeed-sgpio.c2
-rw-r--r--drivers/gpio/gpio-aspeed.c2
-rw-r--r--drivers/gpio/gpio-ath79.c88
-rw-r--r--drivers/gpio/gpio-bcm-kona.c2
-rw-r--r--drivers/gpio/gpio-bd71815.c2
-rw-r--r--drivers/gpio/gpio-bd71828.c2
-rw-r--r--drivers/gpio/gpio-bd9571mwv.c2
-rw-r--r--drivers/gpio/gpio-blzp1600.c39
-rw-r--r--drivers/gpio/gpio-brcmstb.c118
-rw-r--r--drivers/gpio/gpio-bt8xx.c2
-rw-r--r--drivers/gpio/gpio-cadence.c2
-rw-r--r--drivers/gpio/gpio-cgbc.c2
-rw-r--r--drivers/gpio/gpio-creg-snps.c2
-rw-r--r--drivers/gpio/gpio-cros-ec.c2
-rw-r--r--drivers/gpio/gpio-crystalcove.c2
-rw-r--r--drivers/gpio/gpio-cs5535.c2
-rw-r--r--drivers/gpio/gpio-da9052.c2
-rw-r--r--drivers/gpio/gpio-da9055.c2
-rw-r--r--drivers/gpio/gpio-davinci.c2
-rw-r--r--drivers/gpio/gpio-dln2.c2
-rw-r--r--drivers/gpio/gpio-dwapb.c162
-rw-r--r--drivers/gpio/gpio-eic-sprd.c2
-rw-r--r--drivers/gpio/gpio-em.c2
-rw-r--r--drivers/gpio/gpio-ep93xx.c33
-rw-r--r--drivers/gpio/gpio-exar.c2
-rw-r--r--drivers/gpio/gpio-f7188x.c2
-rw-r--r--drivers/gpio/gpio-ftgpio010.c46
-rw-r--r--drivers/gpio/gpio-ge.c25
-rw-r--r--drivers/gpio/gpio-graniterapids.c2
-rw-r--r--drivers/gpio/gpio-grgpio.c87
-rw-r--r--drivers/gpio/gpio-gw-pld.c2
-rw-r--r--drivers/gpio/gpio-hisi.c48
-rw-r--r--drivers/gpio/gpio-hlwd.c105
-rw-r--r--drivers/gpio/gpio-htc-egpio.c2
-rw-r--r--drivers/gpio/gpio-ich.c2
-rw-r--r--drivers/gpio/gpio-idt3243x.c45
-rw-r--r--drivers/gpio/gpio-imx-scu.c2
-rw-r--r--drivers/gpio/gpio-it87.c2
-rw-r--r--drivers/gpio/gpio-ixp4xx.c72
-rw-r--r--drivers/gpio/gpio-janz-ttl.c2
-rw-r--r--drivers/gpio/gpio-kempld.c2
-rw-r--r--drivers/gpio/gpio-latch.c4
-rw-r--r--drivers/gpio/gpio-ljca.c2
-rw-r--r--drivers/gpio/gpio-logicvc.c2
-rw-r--r--drivers/gpio/gpio-loongson-64bit.c228
-rw-r--r--drivers/gpio/gpio-loongson.c2
-rw-r--r--drivers/gpio/gpio-loongson1.c40
-rw-r--r--drivers/gpio/gpio-lp3943.c2
-rw-r--r--drivers/gpio/gpio-lp873x.c2
-rw-r--r--drivers/gpio/gpio-lp87565.c2
-rw-r--r--drivers/gpio/gpio-lpc18xx.c2
-rw-r--r--drivers/gpio/gpio-lpc32xx.c10
-rw-r--r--drivers/gpio/gpio-macsmc.c2
-rw-r--r--drivers/gpio/gpio-madera.c2
-rw-r--r--drivers/gpio/gpio-max730x.c2
-rw-r--r--drivers/gpio/gpio-max732x.c4
-rw-r--r--drivers/gpio/gpio-max7360.c257
-rw-r--r--drivers/gpio/gpio-max77620.c2
-rw-r--r--drivers/gpio/gpio-max77650.c2
-rw-r--r--drivers/gpio/gpio-max77759.c2
-rw-r--r--drivers/gpio/gpio-mb86s7x.c2
-rw-r--r--drivers/gpio/gpio-mc33880.c2
-rw-r--r--drivers/gpio/gpio-menz127.c31
-rw-r--r--drivers/gpio/gpio-ml-ioh.c2
-rw-r--r--drivers/gpio/gpio-mlxbf.c25
-rw-r--r--drivers/gpio/gpio-mlxbf2.c83
-rw-r--r--drivers/gpio/gpio-mlxbf3.c155
-rw-r--r--drivers/gpio/gpio-mm-lantiq.c2
-rw-r--r--drivers/gpio/gpio-mmio.c389
-rw-r--r--drivers/gpio/gpio-mockup.c4
-rw-r--r--drivers/gpio/gpio-moxtet.c2
-rw-r--r--drivers/gpio/gpio-mpc5200.c82
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c105
-rw-r--r--drivers/gpio/gpio-mpfs.c4
-rw-r--r--drivers/gpio/gpio-mpsse.c4
-rw-r--r--drivers/gpio/gpio-msc313.c2
-rw-r--r--drivers/gpio/gpio-mt7621.c80
-rw-r--r--drivers/gpio/gpio-mvebu.c5
-rw-r--r--drivers/gpio/gpio-mxc.c2
-rw-r--r--drivers/gpio/gpio-mxs.c31
-rw-r--r--drivers/gpio/gpio-nct6694.c499
-rw-r--r--drivers/gpio/gpio-nomadik.c29
-rw-r--r--drivers/gpio/gpio-npcm-sgpio.c4
-rw-r--r--drivers/gpio/gpio-octeon.c2
-rw-r--r--drivers/gpio/gpio-omap.c4
-rw-r--r--drivers/gpio/gpio-palmas.c2
-rw-r--r--drivers/gpio/gpio-pca953x.c4
-rw-r--r--drivers/gpio/gpio-pca9570.c2
-rw-r--r--drivers/gpio/gpio-pcf857x.c4
-rw-r--r--drivers/gpio/gpio-pch.c2
-rw-r--r--drivers/gpio/gpio-pisosr.c8
-rw-r--r--drivers/gpio/gpio-pl061.c2
-rw-r--r--drivers/gpio/gpio-pxa.c10
-rw-r--r--drivers/gpio/gpio-raspberrypi-exp.c2
-rw-r--r--drivers/gpio/gpio-rc5t583.c2
-rw-r--r--drivers/gpio/gpio-rcar.c4
-rw-r--r--drivers/gpio/gpio-rda.c35
-rw-r--r--drivers/gpio/gpio-rdc321x.c2
-rw-r--r--drivers/gpio/gpio-realtek-otto.c41
-rw-r--r--drivers/gpio/gpio-reg.c6
-rw-r--r--drivers/gpio/gpio-regmap.c36
-rw-r--r--drivers/gpio/gpio-rockchip.c4
-rw-r--r--drivers/gpio/gpio-rtd.c2
-rw-r--r--drivers/gpio/gpio-sa1100.c2
-rw-r--r--drivers/gpio/gpio-sama5d2-piobu.c2
-rw-r--r--drivers/gpio/gpio-sch.c2
-rw-r--r--drivers/gpio/gpio-sch311x.c2
-rw-r--r--drivers/gpio/gpio-sifive.c74
-rw-r--r--drivers/gpio/gpio-sim.c7
-rw-r--r--drivers/gpio/gpio-siox.c2
-rw-r--r--drivers/gpio/gpio-sodaville.c20
-rw-r--r--drivers/gpio/gpio-spacemit-k1.c29
-rw-r--r--drivers/gpio/gpio-spear-spics.c2
-rw-r--r--drivers/gpio/gpio-sprd.c2
-rw-r--r--drivers/gpio/gpio-stmpe.c36
-rw-r--r--drivers/gpio/gpio-stp-xway.c2
-rw-r--r--drivers/gpio/gpio-syscon.c4
-rw-r--r--drivers/gpio/gpio-tangier.c2
-rw-r--r--drivers/gpio/gpio-tb10x.c74
-rw-r--r--drivers/gpio/gpio-tc3589x.c2
-rw-r--r--drivers/gpio/gpio-tegra.c2
-rw-r--r--drivers/gpio/gpio-tegra186.c30
-rw-r--r--drivers/gpio/gpio-thunderx.c4
-rw-r--r--drivers/gpio/gpio-timberdale.c4
-rw-r--r--drivers/gpio/gpio-tpic2810.c4
-rw-r--r--drivers/gpio/gpio-tps65086.c2
-rw-r--r--drivers/gpio/gpio-tps65218.c2
-rw-r--r--drivers/gpio/gpio-tps65219.c4
-rw-r--r--drivers/gpio/gpio-tps6586x.c2
-rw-r--r--drivers/gpio/gpio-tps65910.c2
-rw-r--r--drivers/gpio/gpio-tps65912.c2
-rw-r--r--drivers/gpio/gpio-tps68470.c2
-rw-r--r--drivers/gpio/gpio-tqmx86.c2
-rw-r--r--drivers/gpio/gpio-ts4800.c39
-rw-r--r--drivers/gpio/gpio-ts4900.c2
-rw-r--r--drivers/gpio/gpio-ts5500.c2
-rw-r--r--drivers/gpio/gpio-twl4030.c6
-rw-r--r--drivers/gpio/gpio-twl6040.c2
-rw-r--r--drivers/gpio/gpio-uniphier.c4
-rw-r--r--drivers/gpio/gpio-usbio.c248
-rw-r--r--drivers/gpio/gpio-vf610.c31
-rw-r--r--drivers/gpio/gpio-viperboard.c4
-rw-r--r--drivers/gpio/gpio-virtio.c2
-rw-r--r--drivers/gpio/gpio-visconti.c25
-rw-r--r--drivers/gpio/gpio-vx855.c2
-rw-r--r--drivers/gpio/gpio-wcd934x.c4
-rw-r--r--drivers/gpio/gpio-wcove.c2
-rw-r--r--drivers/gpio/gpio-winbond.c2
-rw-r--r--drivers/gpio/gpio-wm831x.c7
-rw-r--r--drivers/gpio/gpio-wm8350.c2
-rw-r--r--drivers/gpio/gpio-wm8994.c8
-rw-r--r--drivers/gpio/gpio-xgene-sb.c58
-rw-r--r--drivers/gpio/gpio-xgene.c2
-rw-r--r--drivers/gpio/gpio-xgs-iproc.c34
-rw-r--r--drivers/gpio/gpio-xilinx.c4
-rw-r--r--drivers/gpio/gpio-xlp.c2
-rw-r--r--drivers/gpio/gpio-xra1403.c5
-rw-r--r--drivers/gpio/gpio-xtensa.c2
-rw-r--r--drivers/gpio/gpio-zevio.c2
-rw-r--r--drivers/gpio/gpio-zynq.c2
-rw-r--r--drivers/gpio/gpio-zynqmp-modepin.c2
-rw-r--r--drivers/gpio/gpiolib-acpi-core.c11
-rw-r--r--drivers/gpio/gpiolib-acpi-quirks.c26
-rw-r--r--drivers/gpio/gpiolib-cdev.c90
-rw-r--r--drivers/gpio/gpiolib-of.c2
-rw-r--r--drivers/gpio/gpiolib-sysfs.c46
-rw-r--r--drivers/gpio/gpiolib.c247
-rw-r--r--drivers/gpio/gpiolib.h36
-rw-r--r--drivers/gpu/drm/Kconfig4
-rw-r--r--drivers/gpu/drm/Makefile1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c53
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v12.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c85
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c107
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c106
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c337
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c111
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c227
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c66
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.h19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c76
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c65
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c348
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c138
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c359
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_utils.h91
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c204
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c55
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c183
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h50
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c90
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atom.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atom.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c3823
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c60
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c60
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c65
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c69
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v6_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v6_1.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v7_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c58
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_userqueue.c144
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v12_0.c112
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c105
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v12_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c103
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c134
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c117
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c178
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c139
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c108
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c115
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c171
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c31
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c56
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c93
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_module.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c61
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h11
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c20
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c42
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c5
-rw-r--r--drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c56
-rw-r--r--drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.h1
-rw-r--r--drivers/gpu/drm/amd/display/Makefile1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c572
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h33
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c90
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c52
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c35
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c15
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c64
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c6
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/Makefile1
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/vector.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c42
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c144
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c123
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c34
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.c130
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c225
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c51
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stat.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_state.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h91
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c66
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_helper.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_spl_translate.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c98
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_transform.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_transform.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c57
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_link_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn20/dcn20_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn31/dcn31_dio_link_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn32/dcn32_dio_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c417
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c40
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c41
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c140
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c50
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c74
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c117
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c63
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h24
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h21
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h26
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link_service.h (renamed from drivers/gpu/drm/amd/display/dc/inc/link.h)11
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/soc_and_ip_translator.h24
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_detection.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_detection.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_dpms.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_dpms.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_factory.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_factory.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_resource.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_validation.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_validation.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c51
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c65
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/mmhubbub/dcn20/dcn20_mmhubbub.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/pg/dcn35/dcn35_pg_cntl.c78
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c98
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c60
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/Makefile19
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.c304
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h22
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/soc_and_ip_translator.c37
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c7
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h4
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h209
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c1
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c53
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h8
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c7
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c8
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_asic_id.h5
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c4
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c5
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c3
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h1
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h98
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h7
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h30
-rw-r--r--drivers/gpu/drm/amd/include/dm_pp_interface.h1
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h85
-rw-r--r--drivers/gpu/drm/amd/include/mes_v11_api_def.h3
-rw-r--r--drivers/gpu/drm/amd/include/mes_v12_api_def.h33
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm.c79
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c86
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c388
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h9
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_dpm_internal.h6
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c4
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c9
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c126
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c26
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c11
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c5
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c5
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c5
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c113
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h129
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h82
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_ppsmc.h7
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_0_pptable.h (renamed from drivers/gpu/drm/amd/pm/inc/smu_v13_0_0_pptable.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c21
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c19
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c19
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c17
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c19
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c368
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c215
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h11
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c49
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c14
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h28
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/Makefile0
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_core_status.h (renamed from drivers/gpu/drm/amd/amdgpu/dce_v11_0.h)21
-rw-r--r--drivers/gpu/drm/ast/ast_2100.c46
-rw-r--r--drivers/gpu/drm/ast/ast_dp.c2
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h20
-rw-r--r--drivers/gpu/drm/ast/ast_main.c126
-rw-r--r--drivers/gpu/drm/bridge/Kconfig29
-rw-r--r--drivers/gpu/drm/bridge/Makefile2
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511.h6
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_audio.c23
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c63
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c4
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.c7
-rw-r--r--drivers/gpu/drm/bridge/aux-bridge.c2
-rw-r--r--drivers/gpu/drm/bridge/cadence/Kconfig1
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c211
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c6
-rw-r--r--drivers/gpu/drm/bridge/display-connector.c7
-rw-r--r--drivers/gpu/drm/bridge/imx/imx93-mipi-dsi.c12
-rw-r--r--drivers/gpu/drm/bridge/ite-it6263.c64
-rw-r--r--drivers/gpu/drm/bridge/ite-it6505.c33
-rw-r--r--drivers/gpu/drm/bridge/samsung-dsim.c353
-rw-r--r--drivers/gpu/drm/bridge/simple-bridge.c10
-rw-r--r--drivers/gpu/drm/bridge/ssd2825.c775
-rw-r--r--drivers/gpu/drm/bridge/synopsys/Kconfig7
-rw-r--r--drivers/gpu/drm/bridge/synopsys/Makefile1
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-dp.c2095
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c13
-rw-r--r--drivers/gpu/drm/bridge/waveshare-dsi.c203
-rw-r--r--drivers/gpu/drm/display/drm_bridge_connector.c20
-rw-r--r--drivers/gpu/drm/display/drm_dp_cec.c2
-rw-r--r--drivers/gpu/drm/display/drm_dp_helper.c4
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c7
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c23
-rw-r--r--drivers/gpu/drm/drm_bridge.c8
-rw-r--r--drivers/gpu/drm/drm_color_mgmt.c34
-rw-r--r--drivers/gpu/drm/drm_drv.c4
-rw-r--r--drivers/gpu/drm/drm_format_helper.c111
-rw-r--r--drivers/gpu/drm/drm_gem.c92
-rw-r--r--drivers/gpu/drm/drm_gpusvm.c439
-rw-r--r--drivers/gpu/drm/drm_gpuvm.c397
-rw-r--r--drivers/gpu/drm/drm_internal.h4
-rw-r--r--drivers/gpu/drm/drm_ioctl.c1
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c119
-rw-r--r--drivers/gpu/drm/drm_of.c7
-rw-r--r--drivers/gpu/drm/drm_pagemap.c138
-rw-r--r--drivers/gpu/drm/drm_panel.c73
-rw-r--r--drivers/gpu/drm/drm_panel_backlight_quirks.c113
-rw-r--r--drivers/gpu/drm/drm_panic_qr.rs24
-rw-r--r--drivers/gpu/drm/drm_prime.c6
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c1
-rw-r--r--drivers/gpu/drm/drm_sysfs.c41
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c36
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c9
-rw-r--r--drivers/gpu/drm/gma500/fbdev.c2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c2
-rw-r--r--drivers/gpu/drm/gud/gud_connector.c25
-rw-r--r--drivers/gpu/drm/gud/gud_drv.c54
-rw-r--r--drivers/gpu/drm/gud/gud_internal.h13
-rw-r--r--drivers/gpu/drm/gud/gud_pipe.c64
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c14
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c22
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h1
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c5
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c11
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug2
-rw-r--r--drivers/gpu/drm/i915/Makefile2
-rw-r--r--drivers/gpu/drm/i915/display/g4x_dp.c51
-rw-r--r--drivers/gpu/drm/i915/display/g4x_hdmi.c15
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_plane.c62
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_plane.h1
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_wm.c32
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c59
-rw-r--r--drivers/gpu/drm/i915/display/intel_alpm.c133
-rw-r--r--drivers/gpu/drm/i915/display/intel_backlight.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c46
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.h176
-rw-r--r--drivers/gpu/drm/i915/display/intel_bo.c17
-rw-r--r--drivers/gpu/drm/i915/display/intel_bo.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c56
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.c28
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.c21
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c147
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_conversion.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_conversion.h12
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs_params.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.c20
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_driver.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_irq.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_params.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_params.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c30
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_map.c57
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.c52
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_regs.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_wa.c35
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_wa.h11
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c137
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c156
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c11
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_test.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c20
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt_defs.h197
-rw-r--r--drivers/gpu/drm/i915/display/intel_encoder.c41
-rw-r--r--drivers/gpu/drm/i915/display/intel_encoder.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c24
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_pin.c39
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c27
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.c28
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_global_state.c32
-rw-r--r--drivers/gpu/drm/i915/display/intel_global_state.h36
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.c53
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c33
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c24
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c11
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug_irq.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_link_bw.c34
-rw-r--r--drivers/gpu/drm/i915/display/intel_link_bw.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_lpe_audio.c11
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_panic.c27
-rw-r--r--drivers/gpu/drm/i915/display/intel_panic.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_refclk.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_pfit.c11
-rw-r--r--drivers/gpu/drm/i915/display/intel_pfit.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane_initial.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c204
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_quirks.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_quirks.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c51
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c238
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.h72
-rw-r--r--drivers/gpu/drm/i915/display/intel_vblank.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h20
-rw-r--r--drivers/gpu/drm/i915/display/intel_wm.c9
-rw-r--r--drivers/gpu/drm/i915/display/skl_scaler.c53
-rw-r--r--drivers/gpu/drm/i915/display/skl_scaler.h13
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c64
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.c20
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c4
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_pll.c32
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c12
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c59
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h11
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c42
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c7
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_wait.c8
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gemfs.c9
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c5
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c70
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_types.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_debugfs.c5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_mcr.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset_types.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c13
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c49
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_hangcheck.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_tlb.c6
-rw-r--r--drivers/gpu/drm/i915/gt/sysfs_engines.c1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c6
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c8
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c14
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c12
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.c12
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.h8
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c13
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c10
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/debugfs.c12
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c6
-rw-r--r--drivers/gpu/drm/i915/i915_active.c5
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c23
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs_params.c4
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c18
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h8
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c108
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h1
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c19
-rw-r--r--drivers/gpu/drm/i915/i915_list_util.h23
-rw-r--r--drivers/gpu/drm/i915/i915_ptr_util.h66
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h9
-rw-r--r--drivers/gpu/drm/i915/i915_request.h5
-rw-r--r--drivers/gpu/drm/i915/i915_switcheroo.c9
-rw-r--r--drivers/gpu/drm/i915/i915_timer_util.c36
-rw-r--r--drivers/gpu/drm/i915/i915_timer_util.h23
-rw-r--r--drivers/gpu/drm/i915/i915_utils.c30
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h210
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h6
-rw-r--r--drivers/gpu/drm/i915/i915_wait_util.h119
-rw-r--r--drivers/gpu/drm/i915/intel_clock_gating.c35
-rw-r--r--drivers/gpu/drm/i915/intel_gvt_mmio_table.c266
-rw-r--r--drivers/gpu/drm/i915/intel_pcode.c1
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c10
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp.c4
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c8
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c5
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_selftest.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.c5
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_uncore.c8
-rw-r--r--drivers/gpu/drm/i915/soc/intel_dram.c97
-rw-r--r--drivers/gpu/drm/i915/soc/intel_dram.h13
-rw-r--r--drivers/gpu/drm/i915/soc/intel_gmch.c3
-rw-r--r--drivers/gpu/drm/i915/vlv_suspend.c5
-rw-r--r--drivers/gpu/drm/imagination/Kconfig3
-rw-r--r--drivers/gpu/drm/imagination/pvr_device.c22
-rw-r--r--drivers/gpu/drm/imagination/pvr_device.h17
-rw-r--r--drivers/gpu/drm/imagination/pvr_drv.c23
-rw-r--r--drivers/gpu/drm/imagination/pvr_power.c158
-rw-r--r--drivers/gpu/drm/imagination/pvr_power.h15
-rw-r--r--drivers/gpu/drm/imagination/pvr_vm.c15
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c6
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c8
-rw-r--r--drivers/gpu/drm/mediatek/mtk_plane.c3
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_catalog.c92
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c108
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.h14
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c242
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.h3
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c57
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h38
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.c34
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_preempt.c44
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c13
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gen7_0_0_snapshot.h19
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gen7_2_0_snapshot.h10
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h34
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c21
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c35
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c19
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c23
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c10
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c29
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c47
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c59
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.h2
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c16
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c34
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c21
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c32
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c95
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c16
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c16
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c12
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.c11
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c1
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h2
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c34
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h8
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c72
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c127
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c22
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h9
-rw-r--r--drivers/gpu/drm/msm/msm_gpu_trace.h12
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c24
-rw-r--r--drivers/gpu/drm/msm/msm_kms.c24
-rw-r--r--drivers/gpu/drm/msm/msm_mdss.c5
-rw-r--r--drivers/gpu/drm/msm/msm_submitqueue.c4
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a6xx.xml730
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a6xx_descriptors.xml40
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a6xx_enums.xml50
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a6xx_gmu.xml11
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml179
-rw-r--r--drivers/gpu/drm/msm/registers/display/dsi.xml28
-rw-r--r--drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml11
-rw-r--r--drivers/gpu/drm/msm/registers/gen_header.py201
-rw-r--r--drivers/gpu/drm/mxsfb/lcdif_kms.c4
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c4
-rw-r--r--drivers/gpu/drm/nouveau/gv100_fence.c7
-rw-r--r--drivers/gpu/drm/nouveau/include/nvhw/class/clc36f.h85
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_exec.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sched.c35
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sched.h9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_uvmm.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvif/vmm.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/enum.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga100.c23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/gm200.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb202.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gh100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c6
-rw-r--r--drivers/gpu/drm/nova/driver.rs4
-rw-r--r--drivers/gpu/drm/nova/file.rs23
-rw-r--r--drivers/gpu/drm/nova/gem.rs10
-rw-r--r--drivers/gpu/drm/nova/nova.rs1
-rw-r--r--drivers/gpu/drm/nova/uapi.rs61
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c6
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c23
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.h2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c5
-rw-r--r--drivers/gpu/drm/panel/Kconfig26
-rw-r--r--drivers/gpu/drm/panel/Makefile2
-rw-r--r--drivers/gpu/drm/panel/panel-edp.c55
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx8279.c2
-rw-r--r--drivers/gpu/drm/panel/panel-hydis-hv101hd1.c188
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9881c.c302
-rw-r--r--drivers/gpu/drm/panel/panel-jdi-lpm102a188a.c192
-rw-r--r--drivers/gpu/drm/panel/panel-lvds.c2
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35560.c198
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt36523.c804
-rw-r--r--drivers/gpu/drm/panel/panel-orisetech-ota5601a.c7
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c2
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e8aa5x01-ams561ra01.c981
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c26
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7703.c2
-rw-r--r--drivers/gpu/drm/panel/panel-summit.c2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.c2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_perfcnt.c2
-rw-r--r--drivers/gpu/drm/panthor/Makefile1
-rw-r--r--drivers/gpu/drm/panthor/panthor_device.c5
-rw-r--r--drivers/gpu/drm/panthor/panthor_drv.c27
-rw-r--r--drivers/gpu/drm/panthor/panthor_fw.c5
-rw-r--r--drivers/gpu/drm/panthor/panthor_gem.c3
-rw-r--r--drivers/gpu/drm/panthor/panthor_gem.h12
-rw-r--r--drivers/gpu/drm/panthor/panthor_gpu.c100
-rw-r--r--drivers/gpu/drm/panthor/panthor_hw.c125
-rw-r--r--drivers/gpu/drm/panthor/panthor_hw.h11
-rw-r--r--drivers/gpu/drm/panthor/panthor_mmu.c76
-rw-r--r--drivers/gpu/drm/panthor/panthor_regs.h3
-rw-r--r--drivers/gpu/drm/panthor/panthor_sched.c51
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c2
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c14
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c523
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/r100.c215
-rw-r--r--drivers/gpu/drm/radeon/r200.c34
-rw-r--r--drivers/gpu/drm/radeon/r300.c66
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c449
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_fbdev.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_vce.c6
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c4
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c230
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h133
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c8
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig10
-rw-r--r--drivers/gpu/drm/rockchip/Makefile1
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c142
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi2-rockchip.c21
-rw-r--r--drivers/gpu/drm/rockchip/dw_dp-rockchip.c150
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c80
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c68
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.c11
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.h1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop2.c9
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop2.h1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.h21
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop2_reg.c15
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c25
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c67
-rw-r--r--drivers/gpu/drm/scheduler/tests/mock_scheduler.c2
-rw-r--r--drivers/gpu/drm/scheduler/tests/sched_tests.h8
-rw-r--r--drivers/gpu/drm/scheduler/tests/tests_basic.c4
-rw-r--r--drivers/gpu/drm/sitronix/st7571-i2c.c45
-rw-r--r--drivers/gpu/drm/solomon/ssd130x-spi.c3
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c2
-rw-r--r--drivers/gpu/drm/stm/drv.c12
-rw-r--r--drivers/gpu/drm/stm/ltdc.c197
-rw-r--r--drivers/gpu/drm/stm/ltdc.h6
-rw-r--r--drivers/gpu/drm/sysfb/drm_sysfb_helper.h2
-rw-r--r--drivers/gpu/drm/sysfb/drm_sysfb_modeset.c23
-rw-r--r--drivers/gpu/drm/sysfb/drm_sysfb_screen_info.c21
-rw-r--r--drivers/gpu/drm/sysfb/simpledrm.c15
-rw-r--r--drivers/gpu/drm/sysfb/vesadrm.c153
-rw-r--r--drivers/gpu/drm/tegra/gem.c2
-rw-r--r--drivers/gpu/drm/tests/drm_exec_test.c22
-rw-r--r--drivers/gpu/drm/tests/drm_format_helper_test.c3
-rw-r--r--drivers/gpu/drm/tidss/tidss_crtc.c7
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.c322
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.h3
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc_regs.h76
-rw-r--r--drivers/gpu/drm/tidss/tidss_drv.c9
-rw-r--r--drivers/gpu/drm/tidss/tidss_drv.h2
-rw-r--r--drivers/gpu/drm/tidss/tidss_oldi.c1
-rw-r--r--drivers/gpu/drm/tidss/tidss_plane.h2
-rw-r--r--drivers/gpu/drm/tidss/tidss_scale_coefs.h2
-rw-r--r--drivers/gpu/drm/tiny/Kconfig15
-rw-r--r--drivers/gpu/drm/tiny/Makefile1
-rw-r--r--drivers/gpu/drm/tiny/bochs.c2
-rw-r--r--drivers/gpu/drm/tiny/pixpaper.c1165
-rw-r--r--drivers/gpu/drm/tiny/repaper.c16
-rw-r--r--drivers/gpu/drm/tiny/sharp-memory.c27
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c15
-rw-r--r--drivers/gpu/drm/tyr/Kconfig19
-rw-r--r--drivers/gpu/drm/tyr/Makefile3
-rw-r--r--drivers/gpu/drm/tyr/driver.rs205
-rw-r--r--drivers/gpu/drm/tyr/file.rs56
-rw-r--r--drivers/gpu/drm/tyr/gem.rs18
-rw-r--r--drivers/gpu/drm/tyr/gpu.rs219
-rw-r--r--drivers/gpu/drm/tyr/regs.rs108
-rw-r--r--drivers/gpu/drm/tyr/tyr.rs22
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c25
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.h35
-rw-r--r--drivers/gpu/drm/v3d/v3d_fence.c11
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c11
-rw-r--r--drivers/gpu/drm/v3d/v3d_gemfs.c9
-rw-r--r--drivers/gpu/drm/v3d/v3d_irq.c68
-rw-r--r--drivers/gpu/drm/v3d/v3d_sched.c86
-rw-r--r--drivers/gpu/drm/v3d/v3d_submit.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_kms.c20
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c2
-rw-r--r--drivers/gpu/drm/vkms/tests/vkms_config_test.c51
-rw-r--r--drivers/gpu/drm/vkms/tests/vkms_format_test.c143
-rw-r--r--drivers/gpu/drm/vkms/vkms_formats.c331
-rw-r--r--drivers/gpu/drm/vkms/vkms_formats.h4
-rw-r--r--drivers/gpu/drm/vkms/vkms_output.c13
-rw-r--r--drivers/gpu/drm/vkms/vkms_plane.c13
-rw-r--r--drivers/gpu/drm/vkms/vkms_writeback.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c17
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.c6
-rw-r--r--drivers/gpu/drm/xe/Kconfig3
-rw-r--r--drivers/gpu/drm/xe/Kconfig.debug1
-rw-r--r--drivers/gpu/drm/xe/Makefile16
-rw-r--r--drivers/gpu/drm/xe/abi/guc_actions_abi.h11
-rw-r--r--drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h5
-rw-r--r--drivers/gpu/drm/xe/abi/guc_errors_abi.h3
-rw-r--r--drivers/gpu/drm/xe/abi/guc_klvs_abi.h27
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h15
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h22
-rw-r--r--drivers/gpu/drm/xe/display/ext/i915_utils.c1
-rw-r--r--drivers/gpu/drm/xe/display/intel_bo.c91
-rw-r--r--drivers/gpu/drm/xe/display/intel_fbdev_fb.c20
-rw-r--r--drivers/gpu/drm/xe/display/xe_display.c39
-rw-r--r--drivers/gpu/drm/xe/display/xe_display_wa.c2
-rw-r--r--drivers/gpu/drm/xe/display/xe_dsb_buffer.c10
-rw-r--r--drivers/gpu/drm/xe/display/xe_fb_pin.c87
-rw-r--r--drivers/gpu/drm/xe/display/xe_hdcp_gsc.c8
-rw-r--r--drivers/gpu/drm/xe/display/xe_panic.c80
-rw-r--r--drivers/gpu/drm/xe/display/xe_plane_initial.c13
-rw-r--r--drivers/gpu/drm/xe/instructions/xe_mi_commands.h1
-rw-r--r--drivers/gpu/drm/xe/regs/xe_bars.h1
-rw-r--r--drivers/gpu/drm/xe/regs/xe_engine_regs.h3
-rw-r--r--drivers/gpu/drm/xe/regs/xe_gsc_regs.h2
-rw-r--r--drivers/gpu/drm/xe/regs/xe_gt_regs.h3
-rw-r--r--drivers/gpu/drm/xe/regs/xe_hw_error_regs.h20
-rw-r--r--drivers/gpu/drm/xe/regs/xe_irq_regs.h1
-rw-r--r--drivers/gpu/drm/xe/regs/xe_lrc_layout.h3
-rw-r--r--drivers/gpu/drm/xe/regs/xe_pmt.h10
-rw-r--r--drivers/gpu/drm/xe/tests/xe_bo.c36
-rw-r--r--drivers/gpu/drm/xe/tests/xe_dma_buf.c39
-rw-r--r--drivers/gpu/drm/xe/tests/xe_guc_g2g_test.c776
-rw-r--r--drivers/gpu/drm/xe/tests/xe_live_test_mod.c2
-rw-r--r--drivers/gpu/drm/xe/tests/xe_migrate.c66
-rw-r--r--drivers/gpu/drm/xe/tests/xe_pci.c246
-rw-r--r--drivers/gpu/drm/xe/tests/xe_pci_test.h15
-rw-r--r--drivers/gpu/drm/xe/tests/xe_wa_test.c90
-rw-r--r--drivers/gpu/drm/xe/xe_assert.h4
-rw-r--r--drivers/gpu/drm/xe/xe_bb.c35
-rw-r--r--drivers/gpu/drm/xe/xe_bb.h3
-rw-r--r--drivers/gpu/drm/xe/xe_bo.c903
-rw-r--r--drivers/gpu/drm/xe/xe_bo.h82
-rw-r--r--drivers/gpu/drm/xe/xe_bo_evict.c4
-rw-r--r--drivers/gpu/drm/xe/xe_bo_types.h25
-rw-r--r--drivers/gpu/drm/xe/xe_configfs.c793
-rw-r--r--drivers/gpu/drm/xe/xe_configfs.h16
-rw-r--r--drivers/gpu/drm/xe/xe_debugfs.c142
-rw-r--r--drivers/gpu/drm/xe/xe_dep_job_types.h29
-rw-r--r--drivers/gpu/drm/xe/xe_dep_scheduler.c143
-rw-r--r--drivers/gpu/drm/xe/xe_dep_scheduler.h21
-rw-r--r--drivers/gpu/drm/xe/xe_device.c148
-rw-r--r--drivers/gpu/drm/xe/xe_device.h1
-rw-r--r--drivers/gpu/drm/xe/xe_device_sysfs.c104
-rw-r--r--drivers/gpu/drm/xe/xe_device_types.h108
-rw-r--r--drivers/gpu/drm/xe/xe_dma_buf.c84
-rw-r--r--drivers/gpu/drm/xe/xe_eu_stall.c9
-rw-r--r--drivers/gpu/drm/xe/xe_exec.c40
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.c133
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.h5
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue_types.h23
-rw-r--r--drivers/gpu/drm/xe/xe_execlist.c25
-rw-r--r--drivers/gpu/drm/xe/xe_execlist_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gen_wa_oob.c10
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt.c42
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt.h5
-rw-r--r--drivers/gpu/drm/xe/xe_gpu_scheduler.c13
-rw-r--r--drivers/gpu/drm/xe/xe_gpu_scheduler.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gsc.c14
-rw-r--r--drivers/gpu/drm/xe/xe_gt.c36
-rw-r--r--drivers/gpu/drm/xe/xe_gt_debugfs.c49
-rw-r--r--drivers/gpu/drm/xe/xe_gt_freq.c28
-rw-r--r--drivers/gpu/drm/xe/xe_gt_idle.c21
-rw-r--r--drivers/gpu/drm/xe/xe_gt_idle.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_mcr.c6
-rw-r--r--drivers/gpu/drm/xe/xe_gt_mcr.h3
-rw-r--r--drivers/gpu/drm/xe/xe_gt_pagefault.c57
-rw-r--r--drivers/gpu/drm/xe/xe_gt_printk.h32
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf.c79
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c35
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c4
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c24
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_vf.c14
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_vf.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gt_stats.c57
-rw-r--r--drivers/gpu/drm/xe/xe_gt_stats.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gt_stats_types.h33
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c596
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h40
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation_types.h32
-rw-r--r--drivers/gpu/drm/xe/xe_gt_topology.c48
-rw-r--r--drivers/gpu/drm/xe/xe_gt_topology.h4
-rw-r--r--drivers/gpu/drm/xe/xe_gt_types.h33
-rw-r--r--drivers/gpu/drm/xe/xe_guc.c95
-rw-r--r--drivers/gpu/drm/xe/xe_guc.h4
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ads.c127
-rw-r--r--drivers/gpu/drm/xe/xe_guc_buf.c2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_capture.c6
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.c63
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.h1
-rw-r--r--drivers/gpu/drm/xe/xe_guc_engine_activity.c13
-rw-r--r--drivers/gpu/drm/xe/xe_guc_exec_queue_types.h4
-rw-r--r--drivers/gpu/drm/xe/xe_guc_fwif.h37
-rw-r--r--drivers/gpu/drm/xe/xe_guc_log.h2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc.c90
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc.h2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.c331
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.h12
-rw-r--r--drivers/gpu/drm/xe/xe_guc_tlb_inval.c242
-rw-r--r--drivers/gpu/drm/xe/xe_guc_tlb_inval.h19
-rw-r--r--drivers/gpu/drm/xe/xe_guc_types.h6
-rw-r--r--drivers/gpu/drm/xe/xe_heci_gsc.c2
-rw-r--r--drivers/gpu/drm/xe/xe_hmm.c325
-rw-r--r--drivers/gpu/drm/xe/xe_hmm.h18
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine.c2
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine_group.c10
-rw-r--r--drivers/gpu/drm/xe/xe_hw_error.c182
-rw-r--r--drivers/gpu/drm/xe/xe_hw_error.h15
-rw-r--r--drivers/gpu/drm/xe/xe_hwmon.c82
-rw-r--r--drivers/gpu/drm/xe/xe_i2c.c20
-rw-r--r--drivers/gpu/drm/xe/xe_i2c.h2
-rw-r--r--drivers/gpu/drm/xe/xe_irq.c4
-rw-r--r--drivers/gpu/drm/xe/xe_late_bind_fw.c464
-rw-r--r--drivers/gpu/drm/xe/xe_late_bind_fw.h17
-rw-r--r--drivers/gpu/drm/xe/xe_late_bind_fw_types.h75
-rw-r--r--drivers/gpu/drm/xe/xe_lmtt.c33
-rw-r--r--drivers/gpu/drm/xe/xe_lrc.c264
-rw-r--r--drivers/gpu/drm/xe/xe_lrc.h9
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.c509
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.h29
-rw-r--r--drivers/gpu/drm/xe/xe_mmio.c33
-rw-r--r--drivers/gpu/drm/xe/xe_mmio_gem.c226
-rw-r--r--drivers/gpu/drm/xe/xe_mmio_gem.h20
-rw-r--r--drivers/gpu/drm/xe/xe_module.c37
-rw-r--r--drivers/gpu/drm/xe/xe_nvm.c13
-rw-r--r--drivers/gpu/drm/xe/xe_oa.c14
-rw-r--r--drivers/gpu/drm/xe/xe_pci.c116
-rw-r--r--drivers/gpu/drm/xe/xe_pci_sriov.c29
-rw-r--r--drivers/gpu/drm/xe/xe_pci_types.h3
-rw-r--r--drivers/gpu/drm/xe/xe_pm.c75
-rw-r--r--drivers/gpu/drm/xe/xe_printk.h129
-rw-r--r--drivers/gpu/drm/xe/xe_psmi.c294
-rw-r--r--drivers/gpu/drm/xe/xe_psmi.h14
-rw-r--r--drivers/gpu/drm/xe/xe_pt.c387
-rw-r--r--drivers/gpu/drm/xe/xe_pt.h3
-rw-r--r--drivers/gpu/drm/xe/xe_pt_types.h5
-rw-r--r--drivers/gpu/drm/xe/xe_pxp.c1
-rw-r--r--drivers/gpu/drm/xe/xe_pxp_submit.c36
-rw-r--r--drivers/gpu/drm/xe/xe_query.c29
-rw-r--r--drivers/gpu/drm/xe/xe_res_cursor.h10
-rw-r--r--drivers/gpu/drm/xe/xe_ring_ops.c22
-rw-r--r--drivers/gpu/drm/xe/xe_rtp.c13
-rw-r--r--drivers/gpu/drm/xe/xe_rtp.h6
-rw-r--r--drivers/gpu/drm/xe/xe_sa.c1
-rw-r--r--drivers/gpu/drm/xe/xe_sa.h15
-rw-r--r--drivers/gpu/drm/xe/xe_sa_types.h1
-rw-r--r--drivers/gpu/drm/xe/xe_shrinker.c51
-rw-r--r--drivers/gpu/drm/xe/xe_sriov.c15
-rw-r--r--drivers/gpu/drm/xe/xe_sriov.h1
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf.c27
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf.h1
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf.c191
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf.h6
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf_ccs.c410
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf_ccs.h34
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf_ccs_types.h51
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf_types.h10
-rw-r--r--drivers/gpu/drm/xe/xe_survivability_mode.c181
-rw-r--r--drivers/gpu/drm/xe/xe_survivability_mode.h5
-rw-r--r--drivers/gpu/drm/xe/xe_survivability_mode_types.h8
-rw-r--r--drivers/gpu/drm/xe/xe_svm.c728
-rw-r--r--drivers/gpu/drm/xe/xe_svm.h100
-rw-r--r--drivers/gpu/drm/xe/xe_sync.c2
-rw-r--r--drivers/gpu/drm/xe/xe_tile.c62
-rw-r--r--drivers/gpu/drm/xe/xe_tile.h14
-rw-r--r--drivers/gpu/drm/xe/xe_tile_debugfs.c135
-rw-r--r--drivers/gpu/drm/xe/xe_tile_debugfs.h13
-rw-r--r--drivers/gpu/drm/xe/xe_tile_printk.h127
-rw-r--r--drivers/gpu/drm/xe/xe_tile_sysfs.c12
-rw-r--r--drivers/gpu/drm/xe/xe_tlb_inval.c433
-rw-r--r--drivers/gpu/drm/xe/xe_tlb_inval.h46
-rw-r--r--drivers/gpu/drm/xe/xe_tlb_inval_job.c268
-rw-r--r--drivers/gpu/drm/xe/xe_tlb_inval_job.h33
-rw-r--r--drivers/gpu/drm/xe/xe_tlb_inval_types.h130
-rw-r--r--drivers/gpu/drm/xe/xe_trace.h40
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c12
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_vram_mgr.c22
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_vram_mgr.h3
-rw-r--r--drivers/gpu/drm/xe/xe_tuning.c2
-rw-r--r--drivers/gpu/drm/xe/xe_uc_fw.c29
-rw-r--r--drivers/gpu/drm/xe/xe_uc_fw_abi.h130
-rw-r--r--drivers/gpu/drm/xe/xe_uc_fw_types.h3
-rw-r--r--drivers/gpu/drm/xe/xe_userptr.c320
-rw-r--r--drivers/gpu/drm/xe/xe_userptr.h107
-rw-r--r--drivers/gpu/drm/xe/xe_validation.c278
-rw-r--r--drivers/gpu/drm/xe/xe_validation.h192
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c1261
-rw-r--r--drivers/gpu/drm/xe/xe_vm.h88
-rw-r--r--drivers/gpu/drm/xe/xe_vm_madvise.c431
-rw-r--r--drivers/gpu/drm/xe/xe_vm_madvise.h15
-rw-r--r--drivers/gpu/drm/xe/xe_vm_types.h150
-rw-r--r--drivers/gpu/drm/xe/xe_vram.c209
-rw-r--r--drivers/gpu/drm/xe/xe_vram.h11
-rw-r--r--drivers/gpu/drm/xe/xe_vram_freq.c4
-rw-r--r--drivers/gpu/drm/xe/xe_vram_types.h85
-rw-r--r--drivers/gpu/drm/xe/xe_wa.c88
-rw-r--r--drivers/gpu/drm/xe/xe_wa.h8
-rw-r--r--drivers/gpu/drm/xe/xe_wa_oob.rules13
-rw-r--r--drivers/gpu/nova-core/Kconfig1
-rw-r--r--drivers/gpu/nova-core/driver.rs46
-rw-r--r--drivers/gpu/nova-core/falcon.rs113
-rw-r--r--drivers/gpu/nova-core/falcon/gsp.rs16
-rw-r--r--drivers/gpu/nova-core/falcon/hal.rs2
-rw-r--r--drivers/gpu/nova-core/falcon/hal/ga102.rs47
-rw-r--r--drivers/gpu/nova-core/falcon/sec2.rs13
-rw-r--r--drivers/gpu/nova-core/fb.rs8
-rw-r--r--drivers/gpu/nova-core/firmware.rs113
-rw-r--r--drivers/gpu/nova-core/firmware/booter.rs375
-rw-r--r--drivers/gpu/nova-core/firmware/fwsec.rs17
-rw-r--r--drivers/gpu/nova-core/firmware/gsp.rs243
-rw-r--r--drivers/gpu/nova-core/firmware/riscv.rs91
-rw-r--r--drivers/gpu/nova-core/gpu.rs209
-rw-r--r--drivers/gpu/nova-core/gsp.rs22
-rw-r--r--drivers/gpu/nova-core/gsp/boot.rs137
-rw-r--r--drivers/gpu/nova-core/gsp/fw.rs7
-rw-r--r--drivers/gpu/nova-core/gsp/fw/r570_144.rs29
-rw-r--r--drivers/gpu/nova-core/gsp/fw/r570_144/bindings.rs1
-rw-r--r--drivers/gpu/nova-core/nova_core.rs1
-rw-r--r--drivers/gpu/nova-core/regs.rs84
-rw-r--r--drivers/gpu/nova-core/regs/macros.rs751
-rw-r--r--drivers/gpu/nova-core/util.rs20
-rw-r--r--drivers/gpu/nova-core/vbios.rs180
-rw-r--r--drivers/greybus/svc.c3
-rw-r--r--drivers/hid/Kconfig17
-rw-r--r--drivers/hid/Makefile1
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_client.c12
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_common.h3
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_pcie.c4
-rw-r--r--drivers/hid/hid-asus.c17
-rw-r--r--drivers/hid/hid-core.c44
-rw-r--r--drivers/hid/hid-cp2112.c12
-rw-r--r--drivers/hid/hid-elecom.c2
-rw-r--r--drivers/hid/hid-haptic.c580
-rw-r--r--drivers/hid/hid-haptic.h127
-rw-r--r--drivers/hid/hid-ids.h8
-rw-r--r--drivers/hid/hid-input-test.c10
-rw-r--r--drivers/hid/hid-input.c69
-rw-r--r--drivers/hid/hid-lenovo.c4
-rw-r--r--drivers/hid/hid-logitech-dj.c4
-rw-r--r--drivers/hid/hid-logitech-hidpp.c2
-rw-r--r--drivers/hid/hid-mcp2200.c4
-rw-r--r--drivers/hid/hid-mcp2221.c6
-rw-r--r--drivers/hid/hid-multitouch.c55
-rw-r--r--drivers/hid/hid-ntrig.c3
-rw-r--r--drivers/hid/hid-playstation.c1073
-rw-r--r--drivers/hid/hid-quirks.c5
-rw-r--r--drivers/hid/hid-steelseries.c108
-rw-r--r--drivers/hid/hid-uclogic-params.c10
-rw-r--r--drivers/hid/hid-universal-pidff.c57
-rw-r--r--drivers/hid/hidraw.c224
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-acpi.c8
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c74
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-of-elan.c11
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.h2
-rw-r--r--drivers/hid/intel-ish-hid/ipc/ipc.c17
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c3
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid-client.c3
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/bus.c3
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h3
-rw-r--r--drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c42
-rw-r--r--drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-dev.h28
-rw-r--r--drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c2
-rw-r--r--drivers/hid/intel-thc-hid/intel-quickspi/quickspi-dev.h2
-rw-r--r--drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.c7
-rw-r--r--drivers/hid/usbhid/hid-pidff.c716
-rw-r--r--drivers/hid/usbhid/hid-pidff.h2
-rw-r--r--drivers/hid/wacom_wac.c1
-rw-r--r--drivers/hsi/controllers/omap_ssi_port.c11
-rw-r--r--drivers/hv/Kconfig15
-rw-r--r--drivers/hv/Makefile4
-rw-r--r--drivers/hv/channel.c2
-rw-r--r--drivers/hv/hv_common.c22
-rw-r--r--drivers/hv/hv_utils_transport.c10
-rw-r--r--drivers/hv/mshv.h2
-rw-r--r--drivers/hv/mshv_common.c22
-rw-r--r--drivers/hv/mshv_root_main.c57
-rw-r--r--drivers/hv/vmbus_drv.c10
-rw-r--r--drivers/hwmon/Kconfig50
-rw-r--r--drivers/hwmon/Makefile3
-rw-r--r--drivers/hwmon/asus-ec-sensors.c340
-rw-r--r--drivers/hwmon/coretemp.c76
-rw-r--r--drivers/hwmon/cros_ec_hwmon.c313
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c99
-rw-r--r--drivers/hwmon/gpd-fan.c715
-rw-r--r--drivers/hwmon/hwmon.c56
-rw-r--r--drivers/hwmon/ina238.c580
-rw-r--r--drivers/hwmon/k10temp.c10
-rw-r--r--drivers/hwmon/lenovo-ec-sensors.c34
-rw-r--r--drivers/hwmon/lm75.c13
-rw-r--r--drivers/hwmon/ltc2992.c4
-rw-r--r--drivers/hwmon/ltc4282.c3
-rw-r--r--drivers/hwmon/mlxreg-fan.c47
-rw-r--r--drivers/hwmon/nct6694-hwmon.c949
-rw-r--r--drivers/hwmon/nct6775-platform.c3
-rw-r--r--drivers/hwmon/nzxt-smart2.c8
-rw-r--r--drivers/hwmon/pmbus/Kconfig21
-rw-r--r--drivers/hwmon/pmbus/Makefile2
-rw-r--r--drivers/hwmon/pmbus/adm1275.c11
-rw-r--r--drivers/hwmon/pmbus/isl68137.c6
-rw-r--r--drivers/hwmon/pmbus/mp2869.c659
-rw-r--r--drivers/hwmon/pmbus/mp29502.c670
-rw-r--r--drivers/hwmon/pmbus/mp5990.c67
-rw-r--r--drivers/hwmon/pmbus/ucd9000.c2
-rw-r--r--drivers/hwmon/pwm-fan.c18
-rw-r--r--drivers/hwmon/sa67mcu-hwmon.c161
-rw-r--r--drivers/hwmon/sbtsi_temp.c46
-rw-r--r--drivers/hwmon/sch56xx-common.c4
-rw-r--r--drivers/hwmon/sht21.c15
-rw-r--r--drivers/hwmon/sy7636a-hwmon.c1
-rw-r--r--drivers/hwmon/tmp102.c22
-rw-r--r--drivers/hwtracing/coresight/Kconfig12
-rw-r--r--drivers/hwtracing/coresight/Makefile1
-rw-r--r--drivers/hwtracing/coresight/coresight-catu.c53
-rw-r--r--drivers/hwtracing/coresight/coresight-catu.h1
-rw-r--r--drivers/hwtracing/coresight/coresight-core.c54
-rw-r--r--drivers/hwtracing/coresight/coresight-cpu-debug.c41
-rw-r--r--drivers/hwtracing/coresight/coresight-ctcu-core.c24
-rw-r--r--drivers/hwtracing/coresight/coresight-etb10.c18
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x-core.c17
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-core.c44
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-sysfs.c1
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.h6
-rw-r--r--drivers/hwtracing/coresight/coresight-funnel.c66
-rw-r--r--drivers/hwtracing/coresight/coresight-replicator.c63
-rw-r--r--drivers/hwtracing/coresight/coresight-stm.c42
-rw-r--r--drivers/hwtracing/coresight/coresight-syscfg.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-sysfs.c71
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-core.c70
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.h2
-rw-r--r--drivers/hwtracing/coresight/coresight-tnoc.c246
-rw-r--r--drivers/hwtracing/coresight/coresight-tpda.c3
-rw-r--r--drivers/hwtracing/coresight/coresight-tpiu.c36
-rw-r--r--drivers/hwtracing/coresight/coresight-trbe.c12
-rw-r--r--drivers/hwtracing/coresight/ultrasoc-smb.h1
-rw-r--r--drivers/i2c/algos/i2c-algo-pca.c2
-rw-r--r--drivers/i2c/busses/Kconfig24
-rw-r--r--drivers/i2c/busses/Makefile2
-rw-r--r--drivers/i2c/busses/i2c-designware-master.c9
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c15
-rw-r--r--drivers/i2c/busses/i2c-designware-slave.c9
-rw-r--r--drivers/i2c/busses/i2c-hix5hd2.c2
-rw-r--r--drivers/i2c/busses/i2c-i801.c5
-rw-r--r--drivers/i2c/busses/i2c-k1.c71
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c28
-rw-r--r--drivers/i2c/busses/i2c-nct6694.c196
-rw-r--r--drivers/i2c/busses/i2c-pca-isa.c2
-rw-r--r--drivers/i2c/busses/i2c-pca-platform.c2
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c8
-rw-r--r--drivers/i2c/busses/i2c-riic.c2
-rw-r--r--drivers/i2c/busses/i2c-rtl9300.c476
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c1
-rw-r--r--drivers/i2c/busses/i2c-sprd.c2
-rw-r--r--drivers/i2c/busses/i2c-st.c2
-rw-r--r--drivers/i2c/busses/i2c-tegra.c26
-rw-r--r--drivers/i2c/busses/i2c-usbio.c320
-rw-r--r--drivers/i2c/busses/i2c-viperboard.c2
-rw-r--r--drivers/i2c/i2c-core-base.c9
-rw-r--r--drivers/i2c/i2c-core-slave.c3
-rw-r--r--drivers/i2c/i2c-mux.c9
-rw-r--r--drivers/i2c/muxes/i2c-mux-ltc4306.c2
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca9541.c12
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca954x.c50
-rw-r--r--drivers/i3c/internals.h12
-rw-r--r--drivers/i3c/master.c78
-rw-r--r--drivers/i3c/master/Kconfig11
-rw-r--r--drivers/i3c/master/Makefile1
-rw-r--r--drivers/i3c/master/adi-i3c-master.c1019
-rw-r--r--drivers/i3c/master/dw-i3c-master.c23
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/cmd_v1.c9
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/cmd_v2.c7
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/core.c74
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/dma.c96
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/ext_caps.c11
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/hci.h6
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/mipi-i3c-hci-pci.c3
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/pio.c75
-rw-r--r--drivers/i3c/master/renesas-i3c.c2
-rw-r--r--drivers/i3c/master/svc-i3c-master.c31
-rw-r--r--drivers/idle/intel_idle.c258
-rw-r--r--drivers/iio/accel/adxl345_core.c782
-rw-r--r--drivers/iio/accel/bma180.c13
-rw-r--r--drivers/iio/accel/bma220_spi.c4
-rw-r--r--drivers/iio/accel/bmc150-accel-core.c7
-rw-r--r--drivers/iio/accel/bmi088-accel-core.c3
-rw-r--r--drivers/iio/accel/dmard06.c4
-rw-r--r--drivers/iio/accel/dmard09.c4
-rw-r--r--drivers/iio/accel/dmard10.c4
-rw-r--r--drivers/iio/accel/fxls8962af-core.c1
-rw-r--r--drivers/iio/accel/kxcjk-1013.c4
-rw-r--r--drivers/iio/accel/kxsd9.c3
-rw-r--r--drivers/iio/accel/mc3230.c4
-rw-r--r--drivers/iio/accel/mma7660.c4
-rw-r--r--drivers/iio/accel/mma8452.c7
-rw-r--r--drivers/iio/accel/mma9551_core.c5
-rw-r--r--drivers/iio/accel/msa311.c16
-rw-r--r--drivers/iio/accel/sca3300.c2
-rw-r--r--drivers/iio/accel/stk8312.c4
-rw-r--r--drivers/iio/accel/stk8ba50.c4
-rw-r--r--drivers/iio/adc/88pm886-gpadc.c393
-rw-r--r--drivers/iio/adc/Kconfig56
-rw-r--r--drivers/iio/adc/Makefile4
-rw-r--r--drivers/iio/adc/ab8500-gpadc.c1
-rw-r--r--drivers/iio/adc/ad4130.c5
-rw-r--r--drivers/iio/adc/ad4170-4.c2
-rw-r--r--drivers/iio/adc/ad7124.c577
-rw-r--r--drivers/iio/adc/ad7173.c304
-rw-r--r--drivers/iio/adc/ad7380.c1
-rw-r--r--drivers/iio/adc/ad7476.c461
-rw-r--r--drivers/iio/adc/ad7768-1.c39
-rw-r--r--drivers/iio/adc/ad7779.c192
-rw-r--r--drivers/iio/adc/ad7949.c4
-rw-r--r--drivers/iio/adc/ad799x.c30
-rw-r--r--drivers/iio/adc/ade9000.c1799
-rw-r--r--drivers/iio/adc/adi-axi-adc.c1
-rw-r--r--drivers/iio/adc/at91-sama5d2_adc.c13
-rw-r--r--drivers/iio/adc/bcm_iproc_adc.c4
-rw-r--r--drivers/iio/adc/cpcap-adc.c6
-rw-r--r--drivers/iio/adc/da9150-gpadc.c5
-rw-r--r--drivers/iio/adc/dln2-adc.c9
-rw-r--r--drivers/iio/adc/exynos_adc.c286
-rw-r--r--drivers/iio/adc/hx711.c2
-rw-r--r--drivers/iio/adc/imx7d_adc.c4
-rw-r--r--drivers/iio/adc/imx8qxp-adc.c6
-rw-r--r--drivers/iio/adc/imx93_adc.c26
-rw-r--r--drivers/iio/adc/intel_dc_ti_adc.c328
-rw-r--r--drivers/iio/adc/mcp3564.c2
-rw-r--r--drivers/iio/adc/meson_saradc.c2
-rw-r--r--drivers/iio/adc/mt6577_auxadc.c3
-rw-r--r--drivers/iio/adc/mxs-lradc-adc.c4
-rw-r--r--drivers/iio/adc/pac1921.c3
-rw-r--r--drivers/iio/adc/pac1934.c31
-rw-r--r--drivers/iio/adc/palmas_gpadc.c4
-rw-r--r--drivers/iio/adc/rcar-gyroadc.c8
-rw-r--r--drivers/iio/adc/rn5t618-adc.c4
-rw-r--r--drivers/iio/adc/rockchip_saradc.c6
-rw-r--r--drivers/iio/adc/rohm-bd79112.c556
-rw-r--r--drivers/iio/adc/rohm-bd79124.c4
-rw-r--r--drivers/iio/adc/rzg2l_adc.c35
-rw-r--r--drivers/iio/adc/spear_adc.c12
-rw-r--r--drivers/iio/adc/stm32-adc-core.c1
-rw-r--r--drivers/iio/adc/stm32-adc.c7
-rw-r--r--drivers/iio/adc/stm32-dfsdm-adc.c4
-rw-r--r--drivers/iio/adc/stmpe-adc.c4
-rw-r--r--drivers/iio/adc/sun4i-gpadc-iio.c3
-rw-r--r--drivers/iio/adc/ti-adc081c.c40
-rw-r--r--drivers/iio/adc/ti-adc084s021.c4
-rw-r--r--drivers/iio/adc/ti-adc12138.c30
-rw-r--r--drivers/iio/adc/ti-adc128s052.c132
-rw-r--r--drivers/iio/adc/ti-ads1015.c6
-rw-r--r--drivers/iio/adc/ti-ads1100.c1
-rw-r--r--drivers/iio/adc/ti-ads1119.c11
-rw-r--r--drivers/iio/adc/ti-ads131e08.c8
-rw-r--r--drivers/iio/adc/ti-ads7924.c9
-rw-r--r--drivers/iio/adc/ti-ads7950.c2
-rw-r--r--drivers/iio/adc/ti-tsc2046.c6
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c5
-rw-r--r--drivers/iio/adc/twl4030-madc.c4
-rw-r--r--drivers/iio/adc/vf610_adc.c2
-rw-r--r--drivers/iio/adc/viperboard_adc.c4
-rw-r--r--drivers/iio/adc/xilinx-ams.c47
-rw-r--r--drivers/iio/addac/ad74115.c2
-rw-r--r--drivers/iio/addac/ad74413r.c4
-rw-r--r--drivers/iio/buffer/industrialio-buffer-cb.c1
-rw-r--r--drivers/iio/chemical/atlas-sensor.c2
-rw-r--r--drivers/iio/chemical/bme680_core.c3
-rw-r--r--drivers/iio/chemical/ens160_core.c3
-rw-r--r--drivers/iio/chemical/scd30_core.c2
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-trigger.c1
-rw-r--r--drivers/iio/common/scmi_sensors/scmi_iio.c10
-rw-r--r--drivers/iio/dac/ad5360.c2
-rw-r--r--drivers/iio/dac/ad5380.c4
-rw-r--r--drivers/iio/dac/ad5421.c2
-rw-r--r--drivers/iio/dac/ad5592r-base.c2
-rw-r--r--drivers/iio/dac/ad5764.c4
-rw-r--r--drivers/iio/dac/ad5791.c4
-rw-r--r--drivers/iio/dac/ds4424.c4
-rw-r--r--drivers/iio/dac/stm32-dac.c19
-rw-r--r--drivers/iio/dac/ti-dac7311.c4
-rw-r--r--drivers/iio/frequency/adf4350.c23
-rw-r--r--drivers/iio/gyro/bmg160_core.c4
-rw-r--r--drivers/iio/gyro/fxas21002c_core.c2
-rw-r--r--drivers/iio/gyro/mpu3050-core.c3
-rw-r--r--drivers/iio/gyro/mpu3050-i2c.c1
-rw-r--r--drivers/iio/health/afe4403.c48
-rw-r--r--drivers/iio/health/afe4404.c48
-rw-r--r--drivers/iio/humidity/am2315.c4
-rw-r--r--drivers/iio/humidity/dht11.c4
-rw-r--r--drivers/iio/imu/adis16475.c1
-rw-r--r--drivers/iio/imu/bmi270/bmi270_i2c.c2
-rw-r--r--drivers/iio/imu/bmi323/bmi323_core.c3
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600.h1
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c29
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c65
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_core.c117
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c29
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_temp.c7
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c6
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c1
-rw-r--r--drivers/iio/imu/kmx61.c6
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c14
-rw-r--r--drivers/iio/industrialio-core.c11
-rw-r--r--drivers/iio/inkern.c81
-rw-r--r--drivers/iio/light/Kconfig13
-rw-r--r--drivers/iio/light/Makefile1
-rw-r--r--drivers/iio/light/acpi-als.c19
-rw-r--r--drivers/iio/light/adjd_s311.c12
-rw-r--r--drivers/iio/light/al3000a.c2
-rw-r--r--drivers/iio/light/apds9306.c4
-rw-r--r--drivers/iio/light/apds9960.c1
-rw-r--r--drivers/iio/light/as73211.c2
-rw-r--r--drivers/iio/light/bh1745.c7
-rw-r--r--drivers/iio/light/bh1780.c1
-rw-r--r--drivers/iio/light/gp2ap002.c2
-rw-r--r--drivers/iio/light/hid-sensor-als.c5
-rw-r--r--drivers/iio/light/isl29028.c11
-rw-r--r--drivers/iio/light/isl29125.c14
-rw-r--r--drivers/iio/light/ltr390.c197
-rw-r--r--drivers/iio/light/ltr501.c4
-rw-r--r--drivers/iio/light/ltrf216a.c1
-rw-r--r--drivers/iio/light/max44000.c18
-rw-r--r--drivers/iio/light/opt4001.c3
-rw-r--r--drivers/iio/light/opt4060.c7
-rw-r--r--drivers/iio/light/pa12203001.c11
-rw-r--r--drivers/iio/light/rohm-bu27034.c3
-rw-r--r--drivers/iio/light/rpr0521.c10
-rw-r--r--drivers/iio/light/si1145.c5
-rw-r--r--drivers/iio/light/st_uvis25.h5
-rw-r--r--drivers/iio/light/st_uvis25_core.c12
-rw-r--r--drivers/iio/light/stk3310.c4
-rw-r--r--drivers/iio/light/tcs3414.c15
-rw-r--r--drivers/iio/light/tcs3472.c14
-rw-r--r--drivers/iio/light/tsl2583.c12
-rw-r--r--drivers/iio/light/tsl2591.c2
-rw-r--r--drivers/iio/light/us5182d.c12
-rw-r--r--drivers/iio/light/vcnl4000.c22
-rw-r--r--drivers/iio/light/vcnl4035.c11
-rw-r--r--drivers/iio/light/veml6030.c2
-rw-r--r--drivers/iio/light/veml6040.c3
-rw-r--r--drivers/iio/light/veml6046x00.c1030
-rw-r--r--drivers/iio/light/vl6180.c16
-rw-r--r--drivers/iio/magnetometer/Kconfig15
-rw-r--r--drivers/iio/magnetometer/Makefile2
-rw-r--r--drivers/iio/magnetometer/ak8974.c2
-rw-r--r--drivers/iio/magnetometer/ak8975.c1
-rw-r--r--drivers/iio/magnetometer/als31300.c5
-rw-r--r--drivers/iio/magnetometer/bmc150_magn.c13
-rw-r--r--drivers/iio/magnetometer/tlv493d.c526
-rw-r--r--drivers/iio/magnetometer/tmag5273.c5
-rw-r--r--drivers/iio/magnetometer/yamaha-yas530.c2
-rw-r--r--drivers/iio/potentiostat/lmp91000.c4
-rw-r--r--drivers/iio/pressure/bmp280-core.c13
-rw-r--r--drivers/iio/pressure/dlhl60d.c4
-rw-r--r--drivers/iio/pressure/icp10100.c1
-rw-r--r--drivers/iio/pressure/mpl115.c2
-rw-r--r--drivers/iio/pressure/zpa2326.c2
-rw-r--r--drivers/iio/proximity/d3323aa.c3
-rw-r--r--drivers/iio/proximity/hx9023s.c3
-rw-r--r--drivers/iio/proximity/irsd200.c6
-rw-r--r--drivers/iio/proximity/isl29501.c16
-rw-r--r--drivers/iio/proximity/mb1232.c15
-rw-r--r--drivers/iio/proximity/ping.c4
-rw-r--r--drivers/iio/proximity/pulsedlight-lidar-lite-v2.c16
-rw-r--r--drivers/iio/proximity/srf04.c8
-rw-r--r--drivers/iio/proximity/srf08.c18
-rw-r--r--drivers/iio/proximity/sx9500.c27
-rw-r--r--drivers/iio/proximity/vl53l0x-i2c.c27
-rw-r--r--drivers/iio/temperature/Kconfig8
-rw-r--r--drivers/iio/temperature/maxim_thermocouple.c26
-rw-r--r--drivers/iio/temperature/mcp9600.c151
-rw-r--r--drivers/iio/temperature/mlx90614.c1
-rw-r--r--drivers/iio/temperature/mlx90632.c5
-rw-r--r--drivers/iio/temperature/mlx90635.c9
-rw-r--r--drivers/iio/test/Kconfig12
-rw-r--r--drivers/iio/test/Makefile1
-rw-r--r--drivers/iio/test/iio-test-multiply.c212
-rw-r--r--drivers/infiniband/Kconfig1
-rw-r--r--drivers/infiniband/core/addr.c83
-rw-r--r--drivers/infiniband/core/agent.c3
-rw-r--r--drivers/infiniband/core/cm.c4
-rw-r--r--drivers/infiniband/core/cma.c136
-rw-r--r--drivers/infiniband/core/cma_priv.h4
-rw-r--r--drivers/infiniband/core/device.c2
-rw-r--r--drivers/infiniband/core/sa_query.c283
-rw-r--r--drivers/infiniband/core/ucma.c120
-rw-r--r--drivers/infiniband/core/umem_odp.c4
-rw-r--r--drivers/infiniband/hw/Makefile1
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h19
-rw-r--r--drivers/infiniband/hw/bnxt_re/debugfs.c37
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.c109
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.h26
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c164
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h10
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c401
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c43
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.h4
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c10
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h1
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c40
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.h21
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c98
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.h6
-rw-r--r--drivers/infiniband/hw/bnxt_re/roce_hsi.h44
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c5
-rw-r--r--drivers/infiniband/hw/efa/efa_com.c18
-rw-r--r--drivers/infiniband/hw/efa/efa_verbs.c6
-rw-r--r--drivers/infiniband/hw/erdma/erdma_verbs.c116
-rw-r--r--drivers/infiniband/hw/erdma/erdma_verbs.h4
-rw-r--r--drivers/infiniband/hw/hfi1/device.c4
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c2
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.c4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c6
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c8
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_restrack.c9
-rw-r--r--drivers/infiniband/hw/ionic/Kconfig15
-rw-r--r--drivers/infiniband/hw/ionic/Makefile9
-rw-r--r--drivers/infiniband/hw/ionic/ionic_admin.c1229
-rw-r--r--drivers/infiniband/hw/ionic/ionic_controlpath.c2679
-rw-r--r--drivers/infiniband/hw/ionic/ionic_datapath.c1399
-rw-r--r--drivers/infiniband/hw/ionic/ionic_fw.h1029
-rw-r--r--drivers/infiniband/hw/ionic/ionic_hw_stats.c484
-rw-r--r--drivers/infiniband/hw/ionic/ionic_ibdev.c440
-rw-r--r--drivers/infiniband/hw/ionic/ionic_ibdev.h517
-rw-r--r--drivers/infiniband/hw/ionic/ionic_lif_cfg.c111
-rw-r--r--drivers/infiniband/hw/ionic/ionic_lif_cfg.h66
-rw-r--r--drivers/infiniband/hw/ionic/ionic_pgtbl.c143
-rw-r--r--drivers/infiniband/hw/ionic/ionic_queue.c52
-rw-r--r--drivers/infiniband/hw/ionic/ionic_queue.h234
-rw-r--r--drivers/infiniband/hw/ionic/ionic_res.h154
-rw-r--r--drivers/infiniband/hw/irdma/Kconfig7
-rw-r--r--drivers/infiniband/hw/irdma/Makefile4
-rw-r--r--drivers/infiniband/hw/irdma/ctrl.c1440
-rw-r--r--drivers/infiniband/hw/irdma/defs.h264
-rw-r--r--drivers/infiniband/hw/irdma/hmc.c18
-rw-r--r--drivers/infiniband/hw/irdma/hmc.h19
-rw-r--r--drivers/infiniband/hw/irdma/hw.c363
-rw-r--r--drivers/infiniband/hw/irdma/i40iw_hw.c2
-rw-r--r--drivers/infiniband/hw/irdma/i40iw_hw.h2
-rw-r--r--drivers/infiniband/hw/irdma/i40iw_if.c3
-rw-r--r--drivers/infiniband/hw/irdma/icrdma_hw.c3
-rw-r--r--drivers/infiniband/hw/irdma/icrdma_hw.h5
-rw-r--r--drivers/infiniband/hw/irdma/icrdma_if.c343
-rw-r--r--drivers/infiniband/hw/irdma/ig3rdma_hw.c170
-rw-r--r--drivers/infiniband/hw/irdma/ig3rdma_hw.h32
-rw-r--r--drivers/infiniband/hw/irdma/ig3rdma_if.c232
-rw-r--r--drivers/infiniband/hw/irdma/irdma.h22
-rw-r--r--drivers/infiniband/hw/irdma/main.c371
-rw-r--r--drivers/infiniband/hw/irdma/main.h35
-rw-r--r--drivers/infiniband/hw/irdma/pble.c20
-rw-r--r--drivers/infiniband/hw/irdma/protos.h1
-rw-r--r--drivers/infiniband/hw/irdma/puda.h4
-rw-r--r--drivers/infiniband/hw/irdma/type.h221
-rw-r--r--drivers/infiniband/hw/irdma/uda_d.h5
-rw-r--r--drivers/infiniband/hw/irdma/uk.c303
-rw-r--r--drivers/infiniband/hw/irdma/user.h267
-rw-r--r--drivers/infiniband/hw/irdma/utils.c112
-rw-r--r--drivers/infiniband/hw/irdma/verbs.c834
-rw-r--r--drivers/infiniband/hw/irdma/verbs.h50
-rw-r--r--drivers/infiniband/hw/irdma/virtchnl.c618
-rw-r--r--drivers/infiniband/hw/irdma/virtchnl.h176
-rw-r--r--drivers/infiniband/hw/mana/cq.c26
-rw-r--r--drivers/infiniband/hw/mana/device.c3
-rw-r--r--drivers/infiniband/hw/mana/main.c5
-rw-r--r--drivers/infiniband/hw/mana/mana_ib.h14
-rw-r--r--drivers/infiniband/hw/mana/mr.c6
-rw-r--r--drivers/infiniband/hw/mana/qp.c9
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c8
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c3
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c4
-rw-r--r--drivers/infiniband/hw/mlx5/data_direct.c2
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c1
-rw-r--r--drivers/infiniband/hw/mlx5/gsi.c15
-rw-r--r--drivers/infiniband/hw/mlx5/main.c113
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h7
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c11
-rw-r--r--drivers/infiniband/hw/mlx5/std_types.c27
-rw-r--r--drivers/infiniband/hw/mlx5/umr.c6
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c13
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c29
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_task.c8
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c25
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c21
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c16
-rw-r--r--drivers/input/ff-core.c2
-rw-r--r--drivers/input/ff-memless.c1
-rw-r--r--drivers/input/gameport/gameport.c1
-rw-r--r--drivers/input/input-compat.c30
-rw-r--r--drivers/input/input-compat.h3
-rw-r--r--drivers/input/input-mt.c14
-rw-r--r--drivers/input/input-poller.c1
-rw-r--r--drivers/input/input.c36
-rw-r--r--drivers/input/joystick/iforce/iforce-main.c1
-rw-r--r--drivers/input/joystick/iforce/iforce-packets.c1
-rw-r--r--drivers/input/joystick/psxpad-spi.c6
-rw-r--r--drivers/input/joystick/xpad.c2
-rw-r--r--drivers/input/keyboard/Kconfig30
-rw-r--r--drivers/input/keyboard/Makefile2
-rw-r--r--drivers/input/keyboard/adp5588-keys.c2
-rw-r--r--drivers/input/keyboard/cros_ec_keyb.c6
-rw-r--r--drivers/input/keyboard/max7360-keypad.c308
-rw-r--r--drivers/input/keyboard/mtk-pmic-keys.c5
-rw-r--r--drivers/input/keyboard/pxa27x_keypad.c530
-rw-r--r--drivers/input/keyboard/spear-keyboard.c71
-rw-r--r--drivers/input/keyboard/tca6416-keypad.c305
-rw-r--r--drivers/input/keyboard/tca8418_keypad.c13
-rw-r--r--drivers/input/keyboard/twl4030_keypad.c35
-rw-r--r--drivers/input/misc/Kconfig31
-rw-r--r--drivers/input/misc/Makefile3
-rw-r--r--drivers/input/misc/ad714x.c1
-rw-r--r--drivers/input/misc/adxl34x.c1
-rw-r--r--drivers/input/misc/aw86927.c846
-rw-r--r--drivers/input/misc/cma3000_d0x.c1
-rw-r--r--drivers/input/misc/iqs7222.c3
-rw-r--r--drivers/input/misc/max7360-rotary.c192
-rw-r--r--drivers/input/misc/mc13783-pwrbutton.c1
-rw-r--r--drivers/input/misc/pm8941-pwrkey.c12
-rw-r--r--drivers/input/misc/tps6594-pwrbutton.c126
-rw-r--r--drivers/input/misc/uinput.c1
-rw-r--r--drivers/input/rmi4/rmi_2d_sensor.c1
-rw-r--r--drivers/input/rmi4/rmi_2d_sensor.h3
-rw-r--r--drivers/input/rmi4/rmi_bus.c1
-rw-r--r--drivers/input/rmi4/rmi_driver.c1
-rw-r--r--drivers/input/serio/Kconfig4
-rw-r--r--drivers/input/serio/hil_mlc.c1
-rw-r--r--drivers/input/serio/hp_sdc.c1
-rw-r--r--drivers/input/serio/i8042-acpipnpio.h14
-rw-r--r--drivers/input/serio/i8042.c1
-rw-r--r--drivers/input/serio/libps2.c1
-rw-r--r--drivers/input/serio/ps2-gpio.c2
-rw-r--r--drivers/input/serio/serio.c1
-rw-r--r--drivers/input/sparse-keymap.c1
-rw-r--r--drivers/input/touch-overlay.c1
-rw-r--r--drivers/input/touchscreen.c1
-rw-r--r--drivers/input/touchscreen/Kconfig22
-rw-r--r--drivers/input/touchscreen/Makefile2
-rw-r--r--drivers/input/touchscreen/ad7879.c3
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c13
-rw-r--r--drivers/input/touchscreen/cyttsp_core.c1
-rw-r--r--drivers/input/touchscreen/fsl-imx25-tcq.c1
-rw-r--r--drivers/input/touchscreen/goodix_berlin_core.c1
-rw-r--r--drivers/input/touchscreen/himax_hx852x.c503
-rw-r--r--drivers/input/touchscreen/hynitron-cst816x.c253
-rw-r--r--drivers/input/touchscreen/imx6ul_tsc.c121
-rw-r--r--drivers/input/touchscreen/mc13783_ts.c4
-rw-r--r--drivers/input/touchscreen/tsc2007_core.c39
-rw-r--r--drivers/input/touchscreen/tsc200x-core.c1
-rw-r--r--drivers/input/touchscreen/wm9705.c1
-rw-r--r--drivers/input/touchscreen/wm9712.c1
-rw-r--r--drivers/input/touchscreen/wm9713.c1
-rw-r--r--drivers/input/touchscreen/wm97xx-core.c1
-rw-r--r--drivers/interconnect/core.c2
-rw-r--r--drivers/interconnect/qcom/Kconfig9
-rw-r--r--drivers/interconnect/qcom/Makefile2
-rw-r--r--drivers/interconnect/qcom/glymur.c2543
-rw-r--r--drivers/interconnect/qcom/icc-rpmh.h2
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h6
-rw-r--r--drivers/iommu/amd/init.c297
-rw-r--r--drivers/iommu/amd/io_pgtable.c25
-rw-r--r--drivers/iommu/amd/iommu.c5
-rw-r--r--drivers/iommu/apple-dart.c55
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c2
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c8
-rw-r--r--drivers/iommu/dma-iommu.c61
-rw-r--r--drivers/iommu/intel/debugfs.c29
-rw-r--r--drivers/iommu/intel/iommu.c9
-rw-r--r--drivers/iommu/intel/iommu.h7
-rw-r--r--drivers/iommu/intel/perf.c10
-rw-r--r--drivers/iommu/intel/perf.h5
-rw-r--r--drivers/iommu/intel/prq.c7
-rw-r--r--drivers/iommu/io-pgtable-dart.c139
-rw-r--r--drivers/iommu/iommu-priv.h2
-rw-r--r--drivers/iommu/iommu.c26
-rw-r--r--drivers/iommu/iommufd/device.c3
-rw-r--r--drivers/iommu/iommufd/eventq.c9
-rw-r--r--drivers/iommu/iommufd/iommufd_private.h3
-rw-r--r--drivers/iommu/iommufd/main.c59
-rw-r--r--drivers/iommu/iommufd/selftest.c2
-rw-r--r--drivers/iommu/iommufd/viommu.c4
-rw-r--r--drivers/iommu/omap-iommu.c2
-rw-r--r--drivers/iommu/riscv/iommu-platform.c17
-rw-r--r--drivers/iommu/riscv/iommu.c12
-rw-r--r--drivers/iommu/s390-iommu.c29
-rw-r--r--drivers/iommu/virtio-iommu.c15
-rw-r--r--drivers/irqchip/Kconfig8
-rw-r--r--drivers/irqchip/Makefile1
-rw-r--r--drivers/irqchip/irq-aspeed-scu-ic.c256
-rw-r--r--drivers/irqchip/irq-atmel-aic.c2
-rw-r--r--drivers/irqchip/irq-atmel-aic5.c2
-rw-r--r--drivers/irqchip/irq-gic-v2m.c13
-rw-r--r--drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c2
-rw-r--r--drivers/irqchip/irq-gic-v3.c3
-rw-r--r--drivers/irqchip/irq-gic-v5-irs.c11
-rw-r--r--drivers/irqchip/irq-gic-v5-its.c27
-rw-r--r--drivers/irqchip/irq-gic-v5-iwb.c11
-rw-r--r--drivers/irqchip/irq-gic-v5.c7
-rw-r--r--drivers/irqchip/irq-loongson-eiointc.c105
-rw-r--r--drivers/irqchip/irq-loongson-pch-lpc.c9
-rw-r--r--drivers/irqchip/irq-msi-lib.c20
-rw-r--r--drivers/irqchip/irq-mvebu-gicp.c10
-rw-r--r--drivers/irqchip/irq-nvic.c3
-rw-r--r--drivers/irqchip/irq-renesas-rza1.c3
-rw-r--r--drivers/irqchip/irq-renesas-rzg2l.c2
-rw-r--r--drivers/irqchip/irq-riscv-imsic-early.c2
-rw-r--r--drivers/irqchip/irq-riscv-imsic-platform.c2
-rw-r--r--drivers/irqchip/irq-riscv-rpmi-sysmsi.c328
-rw-r--r--drivers/irqchip/irq-sg2042-msi.c26
-rw-r--r--drivers/irqchip/irq-sifive-plic.c16
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c12
-rw-r--r--drivers/isdn/mISDN/dsp_hwec.c6
-rw-r--r--drivers/leds/Kconfig2
-rw-r--r--drivers/leds/blink/leds-lgm-sso.c2
-rw-r--r--drivers/leds/flash/leds-qcom-flash.c87
-rw-r--r--drivers/leds/led-class.c17
-rw-r--r--drivers/leds/leds-is31fl319x.c8
-rw-r--r--drivers/leds/leds-is31fl32xx.c47
-rw-r--r--drivers/leds/leds-lp55xx-common.c2
-rw-r--r--drivers/leds/leds-max77705.c2
-rw-r--r--drivers/leds/leds-pca9532.c2
-rw-r--r--drivers/leds/leds-pca955x.c2
-rw-r--r--drivers/leds/leds-qnap-mcu.c175
-rw-r--r--drivers/leds/leds-tca6507.c2
-rw-r--r--drivers/mailbox/Kconfig40
-rw-r--r--drivers/mailbox/Makefile8
-rw-r--r--drivers/mailbox/arm_mhuv3.c2
-rw-r--r--drivers/mailbox/ast2700-mailbox.c235
-rw-r--r--drivers/mailbox/bcm74110-mailbox.c656
-rw-r--r--drivers/mailbox/mailbox.c65
-rw-r--r--drivers/mailbox/mtk-cmdq-mailbox.c18
-rw-r--r--drivers/mailbox/mtk-gpueb-mailbox.c319
-rw-r--r--drivers/mailbox/pcc.c102
-rw-r--r--drivers/mailbox/qcom-apcs-ipc-mailbox.c1
-rw-r--r--drivers/mailbox/qcom-ipcc.c3
-rw-r--r--drivers/mailbox/riscv-sbi-mpxy-mbox.c1019
-rw-r--r--drivers/mailbox/zynqmp-ipi-mailbox.c24
-rw-r--r--drivers/md/Kconfig31
-rw-r--r--drivers/md/Makefile5
-rw-r--r--drivers/md/bcache/debug.c3
-rw-r--r--drivers/md/bcache/io.c3
-rw-r--r--drivers/md/bcache/journal.c2
-rw-r--r--drivers/md/bcache/movinggc.c8
-rw-r--r--drivers/md/bcache/super.c2
-rw-r--r--drivers/md/bcache/writeback.c8
-rw-r--r--drivers/md/dm-bufio.c12
-rw-r--r--drivers/md/dm-cache-policy-smq.c2
-rw-r--r--drivers/md/dm-core.h2
-rw-r--r--drivers/md/dm-flakey.c2
-rw-r--r--drivers/md/dm-ima.c70
-rw-r--r--drivers/md/dm-integrity.c361
-rw-r--r--drivers/md/dm-log-writes.c2
-rw-r--r--drivers/md/dm-pcache/Kconfig17
-rw-r--r--drivers/md/dm-pcache/Makefile3
-rw-r--r--drivers/md/dm-pcache/backing_dev.c374
-rw-r--r--drivers/md/dm-pcache/backing_dev.h127
-rw-r--r--drivers/md/dm-pcache/cache.c445
-rw-r--r--drivers/md/dm-pcache/cache.h635
-rw-r--r--drivers/md/dm-pcache/cache_dev.c303
-rw-r--r--drivers/md/dm-pcache/cache_dev.h70
-rw-r--r--drivers/md/dm-pcache/cache_gc.c170
-rw-r--r--drivers/md/dm-pcache/cache_key.c888
-rw-r--r--drivers/md/dm-pcache/cache_req.c836
-rw-r--r--drivers/md/dm-pcache/cache_segment.c305
-rw-r--r--drivers/md/dm-pcache/cache_writeback.c261
-rw-r--r--drivers/md/dm-pcache/dm_pcache.c497
-rw-r--r--drivers/md/dm-pcache/dm_pcache.h67
-rw-r--r--drivers/md/dm-pcache/pcache_internal.h117
-rw-r--r--drivers/md/dm-pcache/segment.c61
-rw-r--r--drivers/md/dm-pcache/segment.h74
-rw-r--r--drivers/md/dm-raid.c79
-rw-r--r--drivers/md/dm-region-hash.c2
-rw-r--r--drivers/md/dm-stripe.c10
-rw-r--r--drivers/md/dm-switch.c4
-rw-r--r--drivers/md/dm-target.c3
-rw-r--r--drivers/md/dm-thin.c4
-rw-r--r--drivers/md/dm-vdo/data-vio.c17
-rw-r--r--drivers/md/dm-vdo/indexer/volume-index.c4
-rw-r--r--drivers/md/dm-vdo/vio.c2
-rw-r--r--drivers/md/dm.c49
-rw-r--r--drivers/md/md-bitmap.c97
-rw-r--r--drivers/md/md-bitmap.h107
-rw-r--r--drivers/md/md-cluster.c22
-rw-r--r--drivers/md/md-linear.c15
-rw-r--r--drivers/md/md-llbitmap.c1626
-rw-r--r--drivers/md/md.c574
-rw-r--r--drivers/md/md.h26
-rw-r--r--drivers/md/raid0.c37
-rw-r--r--drivers/md/raid1-10.c4
-rw-r--r--drivers/md/raid1.c216
-rw-r--r--drivers/md/raid1.h26
-rw-r--r--drivers/md/raid10.c124
-rw-r--r--drivers/md/raid10.h2
-rw-r--r--drivers/md/raid5-ppl.c6
-rw-r--r--drivers/md/raid5.c103
-rw-r--r--drivers/media/cec/core/cec-core.c2
-rw-r--r--drivers/media/cec/platform/cec-gpio/cec-gpio.c2
-rw-r--r--drivers/media/cec/platform/stm32/stm32-cec.c1
-rw-r--r--drivers/media/cec/usb/extron-da-hd-4k-plus/Makefile6
-rw-r--r--drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.c6
-rw-r--r--drivers/media/cec/usb/pulse8/pulse8-cec.c4
-rw-r--r--drivers/media/cec/usb/rainshadow/rainshadow-cec.c4
-rw-r--r--drivers/media/common/b2c2/flexcop-sram.c2
-rw-r--r--drivers/media/common/b2c2/flexcop.c22
-rw-r--r--drivers/media/common/cx2341x.c2
-rw-r--r--drivers/media/common/videobuf2/videobuf2-v4l2.c12
-rw-r--r--drivers/media/dvb-frontends/Kconfig4
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_core.c2
-rw-r--r--drivers/media/i2c/Kconfig50
-rw-r--r--drivers/media/i2c/Makefile5
-rw-r--r--drivers/media/i2c/adv7180.c338
-rw-r--r--drivers/media/i2c/adv7604.c2
-rw-r--r--drivers/media/i2c/adv7842.c2
-rw-r--r--drivers/media/i2c/alvium-csi2.c1
-rw-r--r--drivers/media/i2c/ar0521.c9
-rw-r--r--drivers/media/i2c/ccs/ccs-core.c7
-rw-r--r--drivers/media/i2c/cx25840/cx25840-core.c4
-rw-r--r--drivers/media/i2c/ds90ub913.c19
-rw-r--r--drivers/media/i2c/ds90ub953.c2
-rw-r--r--drivers/media/i2c/dw9768.c1
-rw-r--r--drivers/media/i2c/et8ek8/et8ek8_driver.c34
-rw-r--r--drivers/media/i2c/et8ek8/et8ek8_mode.c9
-rw-r--r--drivers/media/i2c/et8ek8/et8ek8_reg.h1
-rw-r--r--drivers/media/i2c/gc0308.c3
-rw-r--r--drivers/media/i2c/gc0310.c (renamed from drivers/staging/media/atomisp/i2c/atomisp-gc0310.c)0
-rw-r--r--drivers/media/i2c/gc05a2.c8
-rw-r--r--drivers/media/i2c/gc08a3.c8
-rw-r--r--drivers/media/i2c/gc2145.c5
-rw-r--r--drivers/media/i2c/hi556.c92
-rw-r--r--drivers/media/i2c/hi846.c11
-rw-r--r--drivers/media/i2c/hi847.c84
-rw-r--r--drivers/media/i2c/imx208.c91
-rw-r--r--drivers/media/i2c/imx214.c247
-rw-r--r--drivers/media/i2c/imx219.c8
-rw-r--r--drivers/media/i2c/imx258.c105
-rw-r--r--drivers/media/i2c/imx274.c2
-rw-r--r--drivers/media/i2c/imx283.c8
-rw-r--r--drivers/media/i2c/imx290.c30
-rw-r--r--drivers/media/i2c/imx296.c5
-rw-r--r--drivers/media/i2c/imx319.c92
-rw-r--r--drivers/media/i2c/imx334.c15
-rw-r--r--drivers/media/i2c/imx335.c9
-rw-r--r--drivers/media/i2c/imx355.c90
-rw-r--r--drivers/media/i2c/imx412.c9
-rw-r--r--drivers/media/i2c/imx415.c3
-rw-r--r--drivers/media/i2c/ir-kbd-i2c.c6
-rw-r--r--drivers/media/i2c/max9286.c2
-rw-r--r--drivers/media/i2c/max96717.c2
-rw-r--r--drivers/media/i2c/mt9m001.c5
-rw-r--r--drivers/media/i2c/mt9m111.c5
-rw-r--r--drivers/media/i2c/mt9m114.c81
-rw-r--r--drivers/media/i2c/mt9p031.c9
-rw-r--r--drivers/media/i2c/mt9t112.c11
-rw-r--r--drivers/media/i2c/mt9v032.c105
-rw-r--r--drivers/media/i2c/mt9v111.c9
-rw-r--r--drivers/media/i2c/og01a1b.c115
-rw-r--r--drivers/media/i2c/og0ve1b.c816
-rw-r--r--drivers/media/i2c/ov02a10.c45
-rw-r--r--drivers/media/i2c/ov02c10.c108
-rw-r--r--drivers/media/i2c/ov02e10.c107
-rw-r--r--drivers/media/i2c/ov08d10.c82
-rw-r--r--drivers/media/i2c/ov08x40.c95
-rw-r--r--drivers/media/i2c/ov13858.c69
-rw-r--r--drivers/media/i2c/ov13b10.c110
-rw-r--r--drivers/media/i2c/ov2659.c5
-rw-r--r--drivers/media/i2c/ov2680.c29
-rw-r--r--drivers/media/i2c/ov2685.c16
-rw-r--r--drivers/media/i2c/ov2735.c1109
-rw-r--r--drivers/media/i2c/ov2740.c91
-rw-r--r--drivers/media/i2c/ov4689.c15
-rw-r--r--drivers/media/i2c/ov5640.c13
-rw-r--r--drivers/media/i2c/ov5645.c16
-rw-r--r--drivers/media/i2c/ov5647.c9
-rw-r--r--drivers/media/i2c/ov5648.c10
-rw-r--r--drivers/media/i2c/ov5670.c105
-rw-r--r--drivers/media/i2c/ov5675.c89
-rw-r--r--drivers/media/i2c/ov5693.c16
-rw-r--r--drivers/media/i2c/ov5695.c16
-rw-r--r--drivers/media/i2c/ov6211.c793
-rw-r--r--drivers/media/i2c/ov64a40.c9
-rw-r--r--drivers/media/i2c/ov6650.c1149
-rw-r--r--drivers/media/i2c/ov7251.c26
-rw-r--r--drivers/media/i2c/ov7740.c11
-rw-r--r--drivers/media/i2c/ov8856.c95
-rw-r--r--drivers/media/i2c/ov8858.c4
-rw-r--r--drivers/media/i2c/ov8865.c50
-rw-r--r--drivers/media/i2c/ov9282.c9
-rw-r--r--drivers/media/i2c/ov9640.c5
-rw-r--r--drivers/media/i2c/ov9650.c5
-rw-r--r--drivers/media/i2c/ov9734.c82
-rw-r--r--drivers/media/i2c/rj54n1cb0c.c9
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3-core.c19
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3.h2
-rw-r--r--drivers/media/i2c/s5k5baf.c21
-rw-r--r--drivers/media/i2c/s5k6a3.c20
-rw-r--r--drivers/media/i2c/saa6752hs.c2
-rw-r--r--drivers/media/i2c/saa7115.c2
-rw-r--r--drivers/media/i2c/saa7127.c2
-rw-r--r--drivers/media/i2c/saa717x.c2
-rw-r--r--drivers/media/i2c/st-mipid02.c2
-rw-r--r--drivers/media/i2c/tc358743.c113
-rw-r--r--drivers/media/i2c/tc358743_regs.h57
-rw-r--r--drivers/media/i2c/tc358746.c5
-rw-r--r--drivers/media/i2c/tda9840.c2
-rw-r--r--drivers/media/i2c/tea6415c.c2
-rw-r--r--drivers/media/i2c/tea6420.c2
-rw-r--r--drivers/media/i2c/thp7312.c4
-rw-r--r--drivers/media/i2c/ths7303.c2
-rw-r--r--drivers/media/i2c/tlv320aic23b.c2
-rw-r--r--drivers/media/i2c/upd64031a.c2
-rw-r--r--drivers/media/i2c/upd64083.c2
-rw-r--r--drivers/media/i2c/vd55g1.c8
-rw-r--r--drivers/media/i2c/vd56g3.c6
-rw-r--r--drivers/media/i2c/vgxy61.c26
-rw-r--r--drivers/media/i2c/video-i2c.c4
-rw-r--r--drivers/media/i2c/vp27smpx.c2
-rw-r--r--drivers/media/i2c/wm8739.c2
-rw-r--r--drivers/media/i2c/wm8775.c2
-rw-r--r--drivers/media/mc/mc-devnode.c6
-rw-r--r--drivers/media/mc/mc-entity.c6
-rw-r--r--drivers/media/mc/mc-request.c2
-rw-r--r--drivers/media/pci/b2c2/flexcop-pci.c2
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c14
-rw-r--r--drivers/media/pci/bt8xx/bttv-vbi.c6
-rw-r--r--drivers/media/pci/cobalt/cobalt-driver.c2
-rw-r--r--drivers/media/pci/cobalt/cobalt-v4l2.c60
-rw-r--r--drivers/media/pci/cx18/cx18-audio.c2
-rw-r--r--drivers/media/pci/cx18/cx18-audio.h2
-rw-r--r--drivers/media/pci/cx18/cx18-av-audio.c2
-rw-r--r--drivers/media/pci/cx18/cx18-av-core.c2
-rw-r--r--drivers/media/pci/cx18/cx18-av-core.h2
-rw-r--r--drivers/media/pci/cx18/cx18-av-firmware.c2
-rw-r--r--drivers/media/pci/cx18/cx18-av-vbi.c2
-rw-r--r--drivers/media/pci/cx18/cx18-cards.c2
-rw-r--r--drivers/media/pci/cx18/cx18-cards.h2
-rw-r--r--drivers/media/pci/cx18/cx18-controls.c2
-rw-r--r--drivers/media/pci/cx18/cx18-controls.h2
-rw-r--r--drivers/media/pci/cx18/cx18-driver.c2
-rw-r--r--drivers/media/pci/cx18/cx18-driver.h4
-rw-r--r--drivers/media/pci/cx18/cx18-fileops.c13
-rw-r--r--drivers/media/pci/cx18/cx18-fileops.h2
-rw-r--r--drivers/media/pci/cx18/cx18-firmware.c2
-rw-r--r--drivers/media/pci/cx18/cx18-firmware.h2
-rw-r--r--drivers/media/pci/cx18/cx18-gpio.c2
-rw-r--r--drivers/media/pci/cx18/cx18-gpio.h2
-rw-r--r--drivers/media/pci/cx18/cx18-i2c.c2
-rw-r--r--drivers/media/pci/cx18/cx18-i2c.h2
-rw-r--r--drivers/media/pci/cx18/cx18-io.c2
-rw-r--r--drivers/media/pci/cx18/cx18-io.h2
-rw-r--r--drivers/media/pci/cx18/cx18-ioctl.c66
-rw-r--r--drivers/media/pci/cx18/cx18-ioctl.h2
-rw-r--r--drivers/media/pci/cx18/cx18-irq.c2
-rw-r--r--drivers/media/pci/cx18/cx18-irq.h2
-rw-r--r--drivers/media/pci/cx18/cx18-mailbox.c2
-rw-r--r--drivers/media/pci/cx18/cx18-mailbox.h2
-rw-r--r--drivers/media/pci/cx18/cx18-queue.c15
-rw-r--r--drivers/media/pci/cx18/cx18-queue.h2
-rw-r--r--drivers/media/pci/cx18/cx18-scb.c2
-rw-r--r--drivers/media/pci/cx18/cx18-scb.h2
-rw-r--r--drivers/media/pci/cx18/cx18-streams.c2
-rw-r--r--drivers/media/pci/cx18/cx18-streams.h2
-rw-r--r--drivers/media/pci/cx18/cx18-vbi.c2
-rw-r--r--drivers/media/pci/cx18/cx18-vbi.h2
-rw-r--r--drivers/media/pci/cx18/cx18-version.h2
-rw-r--r--drivers/media/pci/cx18/cx18-video.c2
-rw-r--r--drivers/media/pci/cx18/cx18-video.h2
-rw-r--r--drivers/media/pci/cx18/cx23418.h2
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c2
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-subdev.c6
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-video.c1
-rw-r--r--drivers/media/pci/ivtv/ivtv-alsa-pcm.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-cards.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-cards.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-controls.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-controls.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-driver.c17
-rw-r--r--drivers/media/pci/ivtv/ivtv-driver.h24
-rw-r--r--drivers/media/pci/ivtv/ivtv-fileops.c42
-rw-r--r--drivers/media/pci/ivtv/ivtv-fileops.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-firmware.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-firmware.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-gpio.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-gpio.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-i2c.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-i2c.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-ioctl.c126
-rw-r--r--drivers/media/pci/ivtv/ivtv-ioctl.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-irq.c8
-rw-r--r--drivers/media/pci/ivtv/ivtv-irq.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-mailbox.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-mailbox.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-queue.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-queue.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-routing.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-routing.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-streams.c17
-rw-r--r--drivers/media/pci/ivtv/ivtv-streams.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-udma.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-udma.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-vbi.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-vbi.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-version.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-yuv.c8
-rw-r--r--drivers/media/pci/mgb4/mgb4_trigger.c2
-rw-r--r--drivers/media/pci/mgb4/mgb4_vin.c3
-rw-r--r--drivers/media/pci/saa7134/saa7134-video.c4
-rw-r--r--drivers/media/pci/saa7164/saa7164-encoder.c30
-rw-r--r--drivers/media/pci/saa7164/saa7164-vbi.c25
-rw-r--r--drivers/media/pci/saa7164/saa7164.h10
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-gpio.c2
-rw-r--r--drivers/media/pci/tw68/tw68-core.c4
-rw-r--r--drivers/media/pci/tw68/tw68-reg.h2
-rw-r--r--drivers/media/pci/tw68/tw68-risc.c2
-rw-r--r--drivers/media/pci/tw68/tw68-video.c2
-rw-r--r--drivers/media/pci/tw68/tw68.h2
-rw-r--r--drivers/media/pci/zoran/zoran.h6
-rw-r--r--drivers/media/pci/zoran/zoran_card.c4
-rw-r--r--drivers/media/pci/zoran/zoran_card.h2
-rw-r--r--drivers/media/pci/zoran/zoran_driver.c35
-rw-r--r--drivers/media/platform/allegro-dvt/allegro-core.c33
-rw-r--r--drivers/media/platform/amlogic/c3/mipi-csi2/c3-mipi-csi2.c7
-rw-r--r--drivers/media/platform/amlogic/meson-ge2d/ge2d.c25
-rw-r--r--drivers/media/platform/amphion/vpu.h2
-rw-r--r--drivers/media/platform/amphion/vpu_v4l2.c22
-rw-r--r--drivers/media/platform/amphion/vpu_v4l2.h8
-rw-r--r--drivers/media/platform/aspeed/aspeed-video.c199
-rw-r--r--drivers/media/platform/cadence/cdns-csi2rx.c75
-rw-r--r--drivers/media/platform/chips-media/coda/coda-common.c50
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-helper.c10
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-helper.h2
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c27
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c36
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpu.h5
-rw-r--r--drivers/media/platform/imagination/e5010-jpeg-enc.c23
-rw-r--r--drivers/media/platform/imagination/e5010-jpeg-enc.h5
-rw-r--r--drivers/media/platform/m2m-deinterlace.c26
-rw-r--r--drivers/media/platform/marvell/cafe-driver.c2
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c37
-rw-r--r--drivers/media/platform/mediatek/mdp/mtk_mdp_m2m.c29
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c3
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c2
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c25
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-vpu.c2
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec.c36
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.c9
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h5
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c37
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.c9
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.h4
-rw-r--r--drivers/media/platform/nvidia/tegra-vde/h264.c2
-rw-r--r--drivers/media/platform/nvidia/tegra-vde/v4l2.c35
-rw-r--r--drivers/media/platform/nxp/dw100/dw100.c7
-rw-r--r--drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c45
-rw-r--r--drivers/media/platform/nxp/imx-mipi-csis.c353
-rw-r--r--drivers/media/platform/nxp/imx-pxp.c7
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c8
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-core.h14
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-hw.c2
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c292
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-pipe.c2
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c156
-rw-r--r--drivers/media/platform/nxp/mx2_emmaprp.c24
-rw-r--r--drivers/media/platform/qcom/camss/Makefile6
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid-340.c190
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid-gen3.c (renamed from drivers/media/platform/qcom/camss/camss-csid-780.c)34
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid-gen3.h (renamed from drivers/media/platform/qcom/camss/camss-csid-780.h)8
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid.h3
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c175
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-340.c320
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-gen3.c (renamed from drivers/media/platform/qcom/camss/camss-vfe-780.c)76
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe.c28
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe.h3
-rw-r--r--drivers/media/platform/qcom/camss/camss-video.c43
-rw-r--r--drivers/media/platform/qcom/camss/camss.c705
-rw-r--r--drivers/media/platform/qcom/camss/camss.h4
-rw-r--r--drivers/media/platform/qcom/iris/Makefile5
-rw-r--r--drivers/media/platform/qcom/iris/iris_buffer.c222
-rw-r--r--drivers/media/platform/qcom/iris/iris_buffer.h7
-rw-r--r--drivers/media/platform/qcom/iris/iris_common.c232
-rw-r--r--drivers/media/platform/qcom/iris/iris_common.h18
-rw-r--r--drivers/media/platform/qcom/iris/iris_core.c10
-rw-r--r--drivers/media/platform/qcom/iris/iris_core.h20
-rw-r--r--drivers/media/platform/qcom/iris/iris_ctrls.c675
-rw-r--r--drivers/media/platform/qcom/iris/iris_ctrls.h15
-rw-r--r--drivers/media/platform/qcom/iris/iris_firmware.c15
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_common.h2
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen1_command.c482
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen1_defines.h112
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen1_response.c60
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen2_command.c359
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen2_defines.h44
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen2_response.c46
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_queue.c1
-rw-r--r--drivers/media/platform/qcom/iris/iris_instance.h24
-rw-r--r--drivers/media/platform/qcom/iris/iris_platform_common.h82
-rw-r--r--drivers/media/platform/qcom/iris/iris_platform_gen2.c609
-rw-r--r--drivers/media/platform/qcom/iris/iris_platform_qcs8300.h352
-rw-r--r--drivers/media/platform/qcom/iris/iris_platform_sm8250.c236
-rw-r--r--drivers/media/platform/qcom/iris/iris_platform_sm8750.h22
-rw-r--r--drivers/media/platform/qcom/iris/iris_probe.c37
-rw-r--r--drivers/media/platform/qcom/iris/iris_state.c9
-rw-r--r--drivers/media/platform/qcom/iris/iris_state.h1
-rw-r--r--drivers/media/platform/qcom/iris/iris_utils.c36
-rw-r--r--drivers/media/platform/qcom/iris/iris_utils.h2
-rw-r--r--drivers/media/platform/qcom/iris/iris_vb2.c58
-rw-r--r--drivers/media/platform/qcom/iris/iris_vdec.c251
-rw-r--r--drivers/media/platform/qcom/iris/iris_vdec.h13
-rw-r--r--drivers/media/platform/qcom/iris/iris_venc.c579
-rw-r--r--drivers/media/platform/qcom/iris/iris_venc.h27
-rw-r--r--drivers/media/platform/qcom/iris/iris_vidc.c335
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu2.c2
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu3x.c202
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu_buffer.c922
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu_buffer.h24
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu_common.c14
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu_common.h6
-rw-r--r--drivers/media/platform/qcom/venus/core.c113
-rw-r--r--drivers/media/platform/qcom/venus/core.h22
-rw-r--r--drivers/media/platform/qcom/venus/firmware.c42
-rw-r--r--drivers/media/platform/qcom/venus/firmware.h2
-rw-r--r--drivers/media/platform/qcom/venus/helpers.c12
-rw-r--r--drivers/media/platform/qcom/venus/hfi_msgs.c11
-rw-r--r--drivers/media/platform/qcom/venus/hfi_parser.c2
-rw-r--r--drivers/media/platform/qcom/venus/hfi_platform.c23
-rw-r--r--drivers/media/platform/qcom/venus/hfi_platform.h34
-rw-r--r--drivers/media/platform/qcom/venus/hfi_platform_v4.c188
-rw-r--r--drivers/media/platform/qcom/venus/hfi_platform_v6.c33
-rw-r--r--drivers/media/platform/qcom/venus/hfi_venus.c25
-rw-r--r--drivers/media/platform/qcom/venus/hfi_venus_io.h4
-rw-r--r--drivers/media/platform/qcom/venus/pm_helpers.c11
-rw-r--r--drivers/media/platform/qcom/venus/vdec.c5
-rw-r--r--drivers/media/platform/qcom/venus/venc.c5
-rw-r--r--drivers/media/platform/raspberrypi/pisp_be/pisp_be.c2
-rw-r--r--drivers/media/platform/raspberrypi/rp1-cfe/csi2.c2
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-core.c8
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-v4l2.c2
-rw-r--r--drivers/media/platform/renesas/rcar_drif.c12
-rw-r--r--drivers/media/platform/renesas/rcar_fdp1.c27
-rw-r--r--drivers/media/platform/renesas/rcar_jpu.c29
-rw-r--r--drivers/media/platform/renesas/renesas-ceu.c10
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-core.c2
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-cru.h9
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c31
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_drv.c14
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_histo.c6
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_video.c18
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_vspx.c1
-rw-r--r--drivers/media/platform/rockchip/rga/rga.c30
-rw-r--r--drivers/media/platform/rockchip/rga/rga.h5
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-common.h17
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c123
-rw-r--r--drivers/media/platform/rockchip/rkvdec/rkvdec.c38
-rw-r--r--drivers/media/platform/rockchip/rkvdec/rkvdec.h4
-rw-r--r--drivers/media/platform/samsung/exynos-gsc/gsc-core.h6
-rw-r--r--drivers/media/platform/samsung/exynos-gsc/gsc-m2m.c37
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-core.h5
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-m2m.c19
-rw-r--r--drivers/media/platform/samsung/s3c-camif/camif-capture.c26
-rw-r--r--drivers/media/platform/samsung/s5p-g2d/g2d.c40
-rw-r--r--drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c33
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c17
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd_v6.c35
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_common.h6
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_dec.c34
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c38
-rw-r--r--drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c30
-rw-r--r--drivers/media/platform/st/sti/delta/delta-mjpeg-dec.c20
-rw-r--r--drivers/media/platform/st/sti/delta/delta-v4l2.c41
-rw-r--r--drivers/media/platform/st/sti/hva/hva-v4l2.c38
-rw-r--r--drivers/media/platform/st/sti/hva/hva.h2
-rw-r--r--drivers/media/platform/st/stm32/dma2d/dma2d.c28
-rw-r--r--drivers/media/platform/st/stm32/stm32-csi.c4
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmi.c4
-rw-r--r--drivers/media/platform/sunxi/sun6i-csi/sun6i_csi_capture.c16
-rw-r--r--drivers/media/platform/sunxi/sun8i-di/sun8i-di.c10
-rw-r--r--drivers/media/platform/sunxi/sun8i-rotate/sun8i_rotate.c10
-rw-r--r--drivers/media/platform/synopsys/hdmirx/snps_hdmirx.c8
-rw-r--r--drivers/media/platform/synopsys/hdmirx/snps_hdmirx.h6
-rw-r--r--drivers/media/platform/ti/Kconfig3
-rw-r--r--drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c67
-rw-r--r--drivers/media/platform/ti/omap/omap_vout.c6
-rw-r--r--drivers/media/platform/ti/omap3isp/ispccdc.c8
-rw-r--r--drivers/media/platform/ti/omap3isp/isph3a_aewb.c2
-rw-r--r--drivers/media/platform/ti/omap3isp/isph3a_af.c2
-rw-r--r--drivers/media/platform/ti/omap3isp/isphist.c2
-rw-r--r--drivers/media/platform/ti/omap3isp/ispstat.c7
-rw-r--r--drivers/media/platform/ti/omap3isp/ispstat.h3
-rw-r--r--drivers/media/platform/ti/omap3isp/ispvideo.c36
-rw-r--r--drivers/media/platform/ti/omap3isp/ispvideo.h6
-rw-r--r--drivers/media/platform/ti/vpe/vpe.c21
-rw-r--r--drivers/media/platform/verisilicon/hantro.h4
-rw-r--r--drivers/media/platform/verisilicon/hantro_drv.c13
-rw-r--r--drivers/media/platform/verisilicon/hantro_v4l2.c28
-rw-r--r--drivers/media/platform/verisilicon/imx8m_vpu_hw.c20
-rw-r--r--drivers/media/platform/xilinx/xilinx-dma.c10
-rw-r--r--drivers/media/radio/Kconfig17
-rw-r--r--drivers/media/radio/Makefile1
-rw-r--r--drivers/media/radio/radio-aimslab.c2
-rw-r--r--drivers/media/radio/radio-aztech.c2
-rw-r--r--drivers/media/radio/radio-gemtek.c2
-rw-r--r--drivers/media/radio/radio-isa.c2
-rw-r--r--drivers/media/radio/radio-isa.h2
-rw-r--r--drivers/media/radio/radio-keene.c4
-rw-r--r--drivers/media/radio/radio-miropcm20.c2
-rw-r--r--drivers/media/radio/radio-raremono.c4
-rw-r--r--drivers/media/radio/radio-rtrack2.c2
-rw-r--r--drivers/media/radio/radio-terratec.c2
-rw-r--r--drivers/media/radio/radio-wl1273.c2159
-rw-r--r--drivers/media/radio/radio-zoltrix.c2
-rw-r--r--drivers/media/radio/si4713/radio-platform-si4713.c10
-rw-r--r--drivers/media/rc/gpio-ir-recv.c4
-rw-r--r--drivers/media/rc/imon.c99
-rw-r--r--drivers/media/rc/lirc_dev.c9
-rw-r--r--drivers/media/rc/pwm-ir-tx.c5
-rw-r--r--drivers/media/rc/redrat3.c2
-rw-r--r--drivers/media/test-drivers/vicodec/vicodec-core.c23
-rw-r--r--drivers/media/test-drivers/vim2m.c23
-rw-r--r--drivers/media/test-drivers/vimc/vimc-capture.c4
-rw-r--r--drivers/media/test-drivers/vimc/vimc-core.c2
-rw-r--r--drivers/media/test-drivers/visl/visl-core.c5
-rw-r--r--drivers/media/test-drivers/visl/visl.h7
-rw-r--r--drivers/media/test-drivers/vivid/vivid-cec.c12
-rw-r--r--drivers/media/test-drivers/vivid/vivid-core.c100
-rw-r--r--drivers/media/test-drivers/vivid/vivid-radio-rx.c12
-rw-r--r--drivers/media/test-drivers/vivid/vivid-radio-rx.h8
-rw-r--r--drivers/media/test-drivers/vivid/vivid-radio-tx.c8
-rw-r--r--drivers/media/test-drivers/vivid/vivid-radio-tx.h4
-rw-r--r--drivers/media/test-drivers/vivid/vivid-sdr-cap.c18
-rw-r--r--drivers/media/test-drivers/vivid/vivid-sdr-cap.h18
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vbi-cap.c10
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vbi-cap.h8
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vbi-out.c8
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vbi-out.h6
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-cap.c24
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-cap.h24
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-common.c8
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-common.h8
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-out.c16
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-out.h16
-rw-r--r--drivers/media/tuners/xc4000.c8
-rw-r--r--drivers/media/tuners/xc5000.c14
-rw-r--r--drivers/media/usb/au0828/au0828-video.c5
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-417.c2
-rw-r--r--drivers/media/usb/em28xx/Kconfig1
-rw-r--r--drivers/media/usb/em28xx/em28xx-dvb.c4
-rw-r--r--drivers/media/usb/gspca/gspca.c18
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-video.c69
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-v4l2.c69
-rw-r--r--drivers/media/usb/stk1160/stk1160-core.c3
-rw-r--r--drivers/media/usb/stk1160/stk1160-video.c7
-rw-r--r--drivers/media/usb/uvc/uvc_ctrl.c56
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c119
-rw-r--r--drivers/media/usb/uvc/uvc_entity.c4
-rw-r--r--drivers/media/usb/uvc/uvc_metadata.c71
-rw-r--r--drivers/media/usb/uvc/uvc_status.c7
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c128
-rw-r--r--drivers/media/usb/uvc/uvc_video.c34
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h25
-rw-r--r--drivers/media/v4l2-core/v4l2-common.c90
-rw-r--r--drivers/media/v4l2-core/v4l2-compat-ioctl32.c11
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-api.c13
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-core.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-defs.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-priv.h2
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-request.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c45
-rw-r--r--drivers/media/v4l2-core/v4l2-device.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-dv-timings.c4
-rw-r--r--drivers/media/v4l2-core/v4l2-fh.c16
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c456
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c50
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c40
-rw-r--r--drivers/memory/samsung/exynos-srom.c10
-rw-r--r--drivers/memory/stm32_omm.c2
-rw-r--r--drivers/memory/tegra/tegra210.c146
-rw-r--r--drivers/memstick/core/memstick.c9
-rw-r--r--drivers/memstick/core/ms_block.c4
-rw-r--r--drivers/memstick/core/mspro_block.c7
-rw-r--r--drivers/memstick/host/jmb38x_ms.c3
-rw-r--r--drivers/memstick/host/rtsx_usb_ms.c6
-rw-r--r--drivers/memstick/host/tifm_ms.c3
-rw-r--r--drivers/message/fusion/mptscsih.c2
-rw-r--r--drivers/message/fusion/mptscsih.h2
-rw-r--r--drivers/mfd/88pm886.c1
-rw-r--r--drivers/mfd/Kconfig93
-rw-r--r--drivers/mfd/Makefile8
-rw-r--r--drivers/mfd/adp5585.c1
-rw-r--r--drivers/mfd/arizona-irq.c5
-rw-r--r--drivers/mfd/bq257xx.c99
-rw-r--r--drivers/mfd/cs42l43.c32
-rw-r--r--drivers/mfd/da9063-i2c.c27
-rw-r--r--drivers/mfd/exynos-lpass.c1
-rw-r--r--drivers/mfd/fsl-imx25-tsadc.c1
-rw-r--r--drivers/mfd/intel-lpss-pci.c13
-rw-r--r--drivers/mfd/intel_soc_pmic_chtdc_ti.c2
-rw-r--r--drivers/mfd/kempld-core.c36
-rw-r--r--drivers/mfd/loongson-se.c253
-rw-r--r--drivers/mfd/ls2k-bmc-core.c528
-rw-r--r--drivers/mfd/macsmc.c5
-rw-r--r--drivers/mfd/madera-core.c4
-rw-r--r--drivers/mfd/max7360.c171
-rw-r--r--drivers/mfd/max77705.c38
-rw-r--r--drivers/mfd/max8997.c4
-rw-r--r--drivers/mfd/max8998.c4
-rw-r--r--drivers/mfd/mfd-core.c1
-rw-r--r--drivers/mfd/nct6694.c388
-rw-r--r--drivers/mfd/qnap-mcu.c39
-rw-r--r--drivers/mfd/rohm-bd71828.c44
-rw-r--r--drivers/mfd/rz-mtu3.c2
-rw-r--r--drivers/mfd/simple-mfd-i2c.c22
-rw-r--r--drivers/mfd/sm501.c2
-rw-r--r--drivers/mfd/stm32-lptimer.c1
-rw-r--r--drivers/mfd/stmpe-i2c.c14
-rw-r--r--drivers/mfd/stmpe-spi.c14
-rw-r--r--drivers/mfd/stmpe.c9
-rw-r--r--drivers/mfd/sun4i-gpadc.c1
-rw-r--r--drivers/mfd/tps65010.c2
-rw-r--r--drivers/mfd/tps6594-core.c59
-rw-r--r--drivers/mfd/ucb1x00-core.c2
-rw-r--r--drivers/mfd/vexpress-sysreg.c25
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/ad525x_dpot.c7
-rw-r--r--drivers/misc/amd-sbi/Kconfig1
-rw-r--r--drivers/misc/apds990x.c1
-rw-r--r--drivers/misc/cardreader/rts5227.c13
-rw-r--r--drivers/misc/cardreader/rts5228.c12
-rw-r--r--drivers/misc/cardreader/rts5249.c16
-rw-r--r--drivers/misc/cardreader/rts5264.c20
-rw-r--r--drivers/misc/cardreader/rts5264.h1
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.h2
-rw-r--r--drivers/misc/cardreader/rtsx_usb.c7
-rw-r--r--drivers/misc/dw-xdata-pcie.c5
-rw-r--r--drivers/misc/eeprom/Kconfig18
-rw-r--r--drivers/misc/eeprom/Makefile1
-rw-r--r--drivers/misc/eeprom/at25.c67
-rw-r--r--drivers/misc/eeprom/m24lr.c606
-rw-r--r--drivers/misc/fastrpc.c143
-rw-r--r--drivers/misc/genwqe/card_ddcb.c2
-rw-r--r--drivers/misc/hisi_hikey_usb.c3
-rw-r--r--drivers/misc/ibmasm/ibmasmfs.c14
-rw-r--r--drivers/misc/lis3lv02d/Kconfig4
-rw-r--r--drivers/misc/lkdtm/cfi.c2
-rw-r--r--drivers/misc/lkdtm/fortify.c6
-rw-r--r--drivers/misc/lkdtm/perms.c5
-rw-r--r--drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c2
-rw-r--r--drivers/misc/mei/Kconfig13
-rw-r--r--drivers/misc/mei/Makefile1
-rw-r--r--drivers/misc/mei/bus-fixup.c6
-rw-r--r--drivers/misc/mei/bus.c39
-rw-r--r--drivers/misc/mei/client.c82
-rw-r--r--drivers/misc/mei/client.h6
-rw-r--r--drivers/misc/mei/dma-ring.c8
-rw-r--r--drivers/misc/mei/gsc-me.c20
-rw-r--r--drivers/misc/mei/hbm.c121
-rw-r--r--drivers/misc/mei/hw-me.c153
-rw-r--r--drivers/misc/mei/hw-txe.c60
-rw-r--r--drivers/misc/mei/hw.h2
-rw-r--r--drivers/misc/mei/init.c66
-rw-r--r--drivers/misc/mei/interrupt.c45
-rw-r--r--drivers/misc/mei/main.c137
-rw-r--r--drivers/misc/mei/mei_dev.h24
-rw-r--r--drivers/misc/mei/mei_lb.c312
-rw-r--r--drivers/misc/mei/pci-me.c20
-rw-r--r--drivers/misc/mei/pci-txe.c4
-rw-r--r--drivers/misc/mei/platform-vsc.c26
-rw-r--r--drivers/misc/pci_endpoint_test.c16
-rw-r--r--drivers/misc/ti_fpc202.c2
-rw-r--r--drivers/misc/vmw_balloon.c4
-rw-r--r--drivers/mmc/core/block.c50
-rw-r--r--drivers/mmc/core/bus.c12
-rw-r--r--drivers/mmc/core/card.h9
-rw-r--r--drivers/mmc/core/core.c32
-rw-r--r--drivers/mmc/core/core.h6
-rw-r--r--drivers/mmc/core/host.c4
-rw-r--r--drivers/mmc/core/mmc.c70
-rw-r--r--drivers/mmc/core/mmc_ops.c72
-rw-r--r--drivers/mmc/core/mmc_test.c10
-rw-r--r--drivers/mmc/core/regulator.c77
-rw-r--r--drivers/mmc/core/sd.c2
-rw-r--r--drivers/mmc/core/sdio.c6
-rw-r--r--drivers/mmc/core/sdio_bus.c3
-rw-r--r--drivers/mmc/host/Kconfig14
-rw-r--r--drivers/mmc/host/alcor.c8
-rw-r--r--drivers/mmc/host/atmel-mci.c9
-rw-r--r--drivers/mmc/host/au1xmmc.c18
-rw-r--r--drivers/mmc/host/cb710-mmc.c19
-rw-r--r--drivers/mmc/host/davinci_mmc.c16
-rw-r--r--drivers/mmc/host/dw_mmc-exynos.c13
-rw-r--r--drivers/mmc/host/dw_mmc-k3.c9
-rw-r--r--drivers/mmc/host/dw_mmc-pci.c9
-rw-r--r--drivers/mmc/host/dw_mmc-rockchip.c18
-rw-r--r--drivers/mmc/host/dw_mmc.h3
-rw-r--r--drivers/mmc/host/meson-mx-sdhc-clkc.c4
-rw-r--r--drivers/mmc/host/mmc_spi.c4
-rw-r--r--drivers/mmc/host/mmci.c9
-rw-r--r--drivers/mmc/host/mtk-sd.c14
-rw-r--r--drivers/mmc/host/mvsdio.c2
-rw-r--r--drivers/mmc/host/mxs-mmc.c6
-rw-r--r--drivers/mmc/host/omap_hsmmc.c13
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c6
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c3
-rw-r--r--drivers/mmc/host/rtsx_usb_sdmmc.c40
-rw-r--r--drivers/mmc/host/sdhci-acpi.c18
-rw-r--r--drivers/mmc/host/sdhci-brcmstb.c8
-rw-r--r--drivers/mmc/host/sdhci-cadence.c70
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c13
-rw-r--r--drivers/mmc/host/sdhci-msm.c36
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c41
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c12
-rw-r--r--drivers/mmc/host/sdhci-of-dwcmshc.c13
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c8
-rw-r--r--drivers/mmc/host/sdhci-omap.c18
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c15
-rw-r--r--drivers/mmc/host/sdhci-pci-gli.c105
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c52
-rw-r--r--drivers/mmc/host/sdhci-s3c.c11
-rw-r--r--drivers/mmc/host/sdhci-spear.c6
-rw-r--r--drivers/mmc/host/sdhci-sprd.c10
-rw-r--r--drivers/mmc/host/sdhci-st.c6
-rw-r--r--drivers/mmc/host/sdhci-tegra.c13
-rw-r--r--drivers/mmc/host/sdhci-uhs2.c3
-rw-r--r--drivers/mmc/host/sdhci-xenon.c13
-rw-r--r--drivers/mmc/host/sdhci.c34
-rw-r--r--drivers/mmc/host/sdhci.h7
-rw-r--r--drivers/mmc/host/sdhci_am654.c29
-rw-r--r--drivers/mmc/host/sh_mmcif.c13
-rw-r--r--drivers/mmc/host/sunxi-mmc.c11
-rw-r--r--drivers/mmc/host/tifm_sd.c4
-rw-r--r--drivers/mmc/host/tmio_mmc.h15
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c33
-rw-r--r--drivers/mmc/host/toshsd.c8
-rw-r--r--drivers/mmc/host/usdhi6rol0.c4
-rw-r--r--drivers/mmc/host/via-sdmmc.c10
-rw-r--r--drivers/mmc/host/wmt-sdmmc.c16
-rw-r--r--drivers/most/core.c2
-rw-r--r--drivers/mtd/chips/cfi_probe.c2
-rw-r--r--drivers/mtd/chips/jedec_probe.c4
-rw-r--r--drivers/mtd/devices/Kconfig4
-rw-r--r--drivers/mtd/ftl.c2
-rw-r--r--drivers/mtd/hyperbus/hbmc-am654.c1
-rw-r--r--drivers/mtd/lpddr/lpddr_cmds.c10
-rw-r--r--drivers/mtd/lpddr/qinfo_probe.c4
-rw-r--r--drivers/mtd/mtd_blkdevs.c4
-rw-r--r--drivers/mtd/mtdcore.c61
-rw-r--r--drivers/mtd/mtdoops.c5
-rw-r--r--drivers/mtd/mtdswap.c4
-rw-r--r--drivers/mtd/nand/Kconfig8
-rw-r--r--drivers/mtd/nand/Makefile1
-rw-r--r--drivers/mtd/nand/core.c131
-rw-r--r--drivers/mtd/nand/ecc-mxic.c14
-rw-r--r--drivers/mtd/nand/ecc-realtek.c464
-rw-r--r--drivers/mtd/nand/ecc.c2
-rw-r--r--drivers/mtd/nand/onenand/onenand_omap2.c1
-rw-r--r--drivers/mtd/nand/qpic_common.c6
-rw-r--r--drivers/mtd/nand/raw/Kconfig34
-rw-r--r--drivers/mtd/nand/raw/Makefile3
-rw-r--r--drivers/mtd/nand/raw/atmel/nand-controller.c33
-rw-r--r--drivers/mtd/nand/raw/atmel/pmecc.c1
-rw-r--r--drivers/mtd/nand/raw/fsmc_nand.c6
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c14
-rw-r--r--drivers/mtd/nand/raw/loongson-nand-controller.c1024
-rw-r--r--drivers/mtd/nand/raw/loongson1-nand-controller.c836
-rw-r--r--drivers/mtd/nand/raw/nand_base.c131
-rw-r--r--drivers/mtd/nand/raw/nandsim.c7
-rw-r--r--drivers/mtd/nand/raw/nuvoton-ma35d1-nand-controller.c4
-rw-r--r--drivers/mtd/nand/raw/omap2.c27
-rw-r--r--drivers/mtd/nand/raw/pl35x-nand-controller.c3
-rw-r--r--drivers/mtd/nand/raw/rockchip-nand-controller.c1
-rw-r--r--drivers/mtd/nand/raw/s3c2410.c1230
-rw-r--r--drivers/mtd/nand/raw/stm32_fmc2_nand.c47
-rw-r--r--drivers/mtd/nand/raw/sunxi_nand.c1
-rw-r--r--drivers/mtd/nand/spi/Makefile2
-rw-r--r--drivers/mtd/nand/spi/core.c75
-rw-r--r--drivers/mtd/nand/spi/fmsh.c74
-rw-r--r--drivers/mtd/nand/spi/gigadevice.c107
-rw-r--r--drivers/mtd/nand/spi/winbond.c37
-rw-r--r--drivers/mtd/rfd_ftl.c4
-rw-r--r--drivers/mtd/spi-nor/core.c145
-rw-r--r--drivers/mtd/ubi/block.c4
-rw-r--r--drivers/net/Kconfig15
-rw-r--r--drivers/net/Space.c3
-rw-r--r--drivers/net/amt.c6
-rw-r--r--drivers/net/bonding/bond_3ad.c98
-rw-r--r--drivers/net/bonding/bond_main.c117
-rw-r--r--drivers/net/bonding/bond_netlink.c46
-rw-r--r--drivers/net/bonding/bond_options.c55
-rw-r--r--drivers/net/bonding/bond_sysfs.c6
-rw-r--r--drivers/net/can/dev/calc_bittiming.c10
-rw-r--r--drivers/net/can/dev/dev.c80
-rw-r--r--drivers/net/can/dev/netlink.c592
-rw-r--r--drivers/net/can/m_can/m_can.c6
-rw-r--r--drivers/net/can/peak_canfd/peak_canfd.c4
-rw-r--r--drivers/net/can/peak_canfd/peak_canfd_user.h4
-rw-r--r--drivers/net/can/peak_canfd/peak_pciefd_main.c6
-rw-r--r--drivers/net/can/rcar/rcar_can.c300
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c91
-rw-r--r--drivers/net/can/sja1000/peak_pci.c6
-rw-r--r--drivers/net/can/sja1000/peak_pcmcia.c8
-rw-r--r--drivers/net/can/spi/hi311x.c35
-rw-r--r--drivers/net/can/spi/mcp251x.c7
-rw-r--r--drivers/net/can/sun4i_can.c1
-rw-r--r--drivers/net/can/usb/Kconfig11
-rw-r--r--drivers/net/can/usb/Makefile1
-rw-r--r--drivers/net/can/usb/esd_usb.c64
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_core.c3
-rw-r--r--drivers/net/can/usb/mcba_usb.c1
-rw-r--r--drivers/net/can/usb/nct6694_canfd.c832
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c6
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c8
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.h4
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_fd.c3
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c4
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.h4
-rw-r--r--drivers/net/can/vcan.c2
-rw-r--r--drivers/net/can/vxcan.c2
-rw-r--r--drivers/net/can/xilinx_can.c16
-rw-r--r--drivers/net/dsa/Kconfig16
-rw-r--r--drivers/net/dsa/Makefile6
-rw-r--r--drivers/net/dsa/b53/b53_common.c19
-rw-r--r--drivers/net/dsa/b53/b53_mmap.c35
-rw-r--r--drivers/net/dsa/dsa_loop.c77
-rw-r--r--drivers/net/dsa/dsa_loop.h20
-rw-r--r--drivers/net/dsa/dsa_loop_bdinfo.c36
-rw-r--r--drivers/net/dsa/ks8995.c (renamed from drivers/net/phy/spi_ks8995.c)453
-rw-r--r--drivers/net/dsa/lantiq/Kconfig7
-rw-r--r--drivers/net/dsa/lantiq/Makefile1
-rw-r--r--drivers/net/dsa/lantiq/lantiq_gswip.c (renamed from drivers/net/dsa/lantiq_gswip.c)490
-rw-r--r--drivers/net/dsa/lantiq/lantiq_gswip.h276
-rw-r--r--drivers/net/dsa/lantiq/lantiq_pce.h (renamed from drivers/net/dsa/lantiq_pce.h)9
-rw-r--r--drivers/net/dsa/microchip/ksz8.c20
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c52
-rw-r--r--drivers/net/dsa/mt7530.c2
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c17
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h2
-rw-r--r--drivers/net/dsa/mv88e6xxx/hwtstamp.c2
-rw-r--r--drivers/net/dsa/mv88e6xxx/hwtstamp.h1
-rw-r--r--drivers/net/dsa/mv88e6xxx/leds.c17
-rw-r--r--drivers/net/dsa/mv88e6xxx/ptp.c70
-rw-r--r--drivers/net/dsa/mv88e6xxx/ptp.h133
-rw-r--r--drivers/net/dsa/ocelot/felix.c4
-rw-r--r--drivers/net/dsa/ocelot/felix.h3
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c3
-rw-r--r--drivers/net/dsa/realtek/realtek.h3
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-core.c2
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/airoha/airoha_eth.c11
-rw-r--r--drivers/net/ethernet/airoha/airoha_eth.h27
-rw-r--r--drivers/net/ethernet/airoha/airoha_npu.c200
-rw-r--r--drivers/net/ethernet/airoha/airoha_npu.h36
-rw-r--r--drivers/net/ethernet/airoha/airoha_ppe.c264
-rw-r--r--drivers/net/ethernet/airoha/airoha_regs.h7
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c5
-rw-r--r--drivers/net/ethernet/amd/pds_core/main.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/Makefile2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h22
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c15
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c30
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-i2c.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-pps.c74
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ptp.c26
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h17
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig1
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge.h27
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_core.c16
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_db.h34
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c482
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.h31
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_netdev.c2217
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_netdev.h250
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_resc.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_resc.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_rmem.c67
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_rmem.h14
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c146
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h15
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c15
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c152
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c35
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h7
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c21
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c7
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c3
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c66
-rw-r--r--drivers/net/ethernet/cadence/macb.h71
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c483
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_core.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c8
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c3
-rw-r--r--drivers/net/ethernet/cavium/liquidio/request_manager.c4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/response_manager.c3
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c20
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c20
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h7
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c2
-rw-r--r--drivers/net/ethernet/dlink/Kconfig20
-rw-r--r--drivers/net/ethernet/dlink/Makefile1
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c9
-rw-r--r--drivers/net/ethernet/dlink/sundance.c1990
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c2
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c7
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c4
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/Kconfig3
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c209
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h24
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc4_hw.h6
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc4_pf.c8
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c86
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h1
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c14
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ptp.c5
-rw-r--r--drivers/net/ethernet/freescale/enetc/ntmp.c15
-rw-r--r--drivers/net/ethernet/freescale/fec.h11
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c71
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c2
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c4
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_ethtool.c3
-rw-r--r--drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c5
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c2
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c35
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c14
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c15
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.h7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c36
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c7
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_devlink.c10
-rw-r--r--drivers/net/ethernet/huawei/hinic3/Makefile6
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.c915
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.h156
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_common.c23
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_common.h27
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_csr.h79
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_eqs.c776
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_eqs.h122
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.c211
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.h4
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.c394
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.h34
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h151
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c541
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hwif.c417
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hwif.h32
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_irq.c138
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_lld.c9
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_main.c69
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c848
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h126
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.c21
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h2
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h119
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c426
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c152
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h20
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h19
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c870
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h39
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_pci_id_tbl.h9
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_rss.c336
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_rss.h14
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_rx.c226
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_rx.h38
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_tx.c190
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_tx.h30
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_wq.c109
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_wq.h19
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c59
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h6
-rw-r--r--drivers/net/ethernet/intel/Kconfig2
-rw-r--r--drivers/net/ethernet/intel/Makefile2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000.h2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c4
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c3
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c12
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.c4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_common.c5
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_common.h2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_vf.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c34
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c123
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c46
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c18
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c110
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h3
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c2
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile9
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/health.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h41
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adapter.c59
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adapter.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h117
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c390
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c143
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.c44
-rw-r--r--drivers/net/ethernet/intel/ice/ice_debugfs.c633
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fwlog.c474
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fwlog.h79
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_idc.c10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c1008
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.h22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h41
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c182
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_trace.h10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c270
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h16
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.h14
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c153
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.h22
-rw-r--r--drivers/net/ethernet/intel/ice/virt/allowlist.c (renamed from drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c)2
-rw-r--r--drivers/net/ethernet/intel/ice/virt/allowlist.h (renamed from drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.h)0
-rw-r--r--drivers/net/ethernet/intel/ice/virt/fdir.c (renamed from drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c)0
-rw-r--r--drivers/net/ethernet/intel/ice/virt/fdir.h (renamed from drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h)0
-rw-r--r--drivers/net/ethernet/intel/ice/virt/queues.c973
-rw-r--r--drivers/net/ethernet/intel/ice/virt/queues.h20
-rw-r--r--drivers/net/ethernet/intel/ice/virt/rss.c719
-rw-r--r--drivers/net/ethernet/intel/ice/virt/rss.h18
-rw-r--r--drivers/net/ethernet/intel/ice/virt/virtchnl.c (renamed from drivers/net/ethernet/intel/ice/ice_virtchnl.c)1683
-rw-r--r--drivers/net/ethernet/intel/ice/virt/virtchnl.h (renamed from drivers/net/ethernet/intel/ice/ice_virtchnl.h)0
-rw-r--r--drivers/net/ethernet/intel/idpf/Kconfig2
-rw-r--r--drivers/net/ethernet/intel/idpf/Makefile3
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf.h57
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_dev.c11
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ethtool.c64
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_idc.c4
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h6
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lib.c188
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_main.c1
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ptp.c11
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c171
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c1693
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.h287
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_vf_dev.c11
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.c1245
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.h33
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c4
-rw-r--r--drivers/net/ethernet/intel/idpf/xdp.c486
-rw-r--r--drivers/net/ethernet/intel/idpf/xdp.h175
-rw-r--r--drivers/net/ethernet/intel/idpf/xsk.c633
-rw-r--r--drivers/net/ethernet/intel/idpf/xsk.h33
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.c4
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c13
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c6
-rw-r--r--drivers/net/ethernet/intel/igbvf/ethtool.c5
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/igc/igc_i225.c2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c26
-rw-r--r--drivers/net/ethernet/intel/igc/igc_nvm.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/devlink/devlink.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c130
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c66
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c14
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c2
-rw-r--r--drivers/net/ethernet/intel/libie/Kconfig9
-rw-r--r--drivers/net/ethernet/intel/libie/Makefile4
-rw-r--r--drivers/net/ethernet/intel/libie/adminq.c2
-rw-r--r--drivers/net/ethernet/intel/libie/fwlog.c1115
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c15
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c19
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c10
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.c16
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c3
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c12
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.h4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h15
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c32
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/rep.c13
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/rep.h1
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_main.c2
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_pci.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c10
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_offload.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.c34
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c147
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/pcie_cong_event.c79
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rss.c91
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rss.h30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c43
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_hmfs.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/trap.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c79
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.c952
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.h61
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.c200
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.h121
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c93
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c81
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c128
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/adj_vport.c209
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c47
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c199
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/vporttbl.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c238
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h62
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c177
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c204
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c150
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c45
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c131
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c44
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c395
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.c567
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/diag/dev_tracepoint.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c118
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c1862
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.h60
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c89
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_cmd.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c58
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wc.c42
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h1
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic.h14
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_csr.h37
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_devlink.c249
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c215
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.c482
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.h92
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw_log.c2
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw_log.h2
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c66
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h28
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mac.c65
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mac.h6
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.c186
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.h14
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_pci.c76
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_rpc.c145
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_rpc.h4
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.c1023
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.h46
-rw-r--r--drivers/net/ethernet/microchip/lan865x/lan865x.c30
-rw-r--r--drivers/net/ethernet/microchip/sparx5/Kconfig2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c18
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.c5
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c12
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c10
-rw-r--r--drivers/net/ethernet/microsoft/mana/hw_channel.c7
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_bpf.c46
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c161
-rw-r--r--drivers/net/ethernet/mscc/ocelot_stats.c2
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c13
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/tls.c9
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/metadata.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/dp.c16
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfdk/dp.c16
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c2
-rw-r--r--drivers/net/ethernet/oa_tc6.c3
-rw-r--r--drivers/net/ethernet/pensando/Kconfig1
-rw-r--r--drivers/net/ethernet/pensando/ionic/Makefile2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic.h7
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_api.h131
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_aux.c102
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_aux.h10
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c7
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c270
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h28
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_if.h118
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c47
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h3
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c7
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_devlink.c9
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ooo.c9
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c2
-rw-r--r--drivers/net/ethernet/qualcomm/Kconfig15
-rw-r--r--drivers/net/ethernet/qualcomm/Makefile1
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/Makefile7
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/ppe.c239
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/ppe.h39
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/ppe_config.c2034
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/ppe_config.h317
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.c847
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.h16
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/ppe_regs.h591
-rw-r--r--drivers/net/ethernet/realtek/Kconfig2
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c12
-rw-r--r--drivers/net/ethernet/realtek/rtase/rtase.h2
-rw-r--r--drivers/net/ethernet/renesas/Makefile1
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c3
-rw-r--r--drivers/net/ethernet/renesas/rcar_gen4_ptp.c76
-rw-r--r--drivers/net/ethernet/renesas/rcar_gen4_ptp.h33
-rw-r--r--drivers/net/ethernet/renesas/rswitch.h43
-rw-r--r--drivers/net/ethernet/renesas/rswitch_l2.c316
-rw-r--r--drivers/net/ethernet/renesas/rswitch_l2.h15
-rw-r--r--drivers/net/ethernet/renesas/rswitch_main.c (renamed from drivers/net/ethernet/renesas/rswitch.c)97
-rw-r--r--drivers/net/ethernet/renesas/rtsn.c3
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c34
-rw-r--r--drivers/net/ethernet/sfc/ef100_tx.c17
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.c6
-rw-r--r--drivers/net/ethernet/sfc/efx_common.c3
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c3
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c8
-rw-r--r--drivers/net/ethernet/sfc/siena/efx_channels.c6
-rw-r--r--drivers/net/ethernet/sfc/siena/efx_common.c3
-rw-r--r--drivers/net/ethernet/sfc/siena/ethtool.c3
-rw-r--r--drivers/net/ethernet/sfc/tc_encap_actions.c6
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c14
-rw-r--r--drivers/net/ethernet/spacemit/Kconfig29
-rw-r--r--drivers/net/ethernet/spacemit/Makefile6
-rw-r--r--drivers/net/ethernet/spacemit/k1_emac.c2159
-rw-r--r--drivers/net/ethernet/spacemit/k1_emac.h416
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig24
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c16
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c30
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c25
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c86
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c85
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c108
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c67
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c94
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun55i.c159
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c51
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c47
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h17
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_est.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_est.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c31
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c28
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c344
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c391
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c78
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c94
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c54
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c7
-rw-r--r--drivers/net/ethernet/ti/Kconfig12
-rw-r--r--drivers/net/ethernet/ti/Makefile3
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-ethtool.c27
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c11
-rw-r--r--drivers/net/ethernet/ti/icssg/icss_iep.c127
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_common.c15
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.c98
-rw-r--r--drivers/net/ethernet/ti/icssm/icssm_prueth.c1746
-rw-r--r--drivers/net/ethernet/ti/icssm/icssm_prueth.h262
-rw-r--r--drivers/net/ethernet/ti/icssm/icssm_prueth_ptp.h85
-rw-r--r--drivers/net/ethernet/ti/icssm/icssm_switch.h257
-rw-r--r--drivers/net/ethernet/wangxun/Kconfig1
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ethtool.c224
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ethtool.h13
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.c137
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.h5
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.c113
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_sriov.c22
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_type.h28
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf.h72
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf_lib.c4
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf_lib.h1
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c9
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_main.c6
-rw-r--r--drivers/net/ethernet/wangxun/ngbevf/ngbevf_main.c5
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c9
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_main.c1
-rw-r--r--drivers/net/ethernet/wangxun/txgbevf/txgbevf_main.c5
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c28
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c2
-rw-r--r--drivers/net/fjes/fjes_main.c5
-rw-r--r--drivers/net/geneve.c4
-rw-r--r--drivers/net/gtp.c7
-rw-r--r--drivers/net/hamradio/6pack.c57
-rw-r--r--drivers/net/hamradio/bpqether.c2
-rw-r--r--drivers/net/hyperv/Kconfig2
-rw-r--r--drivers/net/hyperv/hyperv_net.h3
-rw-r--r--drivers/net/hyperv/netvsc.c17
-rw-r--r--drivers/net/hyperv/netvsc_drv.c29
-rw-r--r--drivers/net/hyperv/rndis_filter.c23
-rw-r--r--drivers/net/ipa/Kconfig2
-rw-r--r--drivers/net/ipa/ipa_sysfs.c6
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c4
-rw-r--r--drivers/net/macsec.c182
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/mctp/mctp-usb.c1
-rw-r--r--drivers/net/mdio/Kconfig5
-rw-r--r--drivers/net/mdio/mdio-bcm-unimac.c9
-rw-r--r--drivers/net/mdio/mdio-i2c.c39
-rw-r--r--drivers/net/mdio/of_mdio.c3
-rw-r--r--drivers/net/netconsole.c91
-rw-r--r--drivers/net/netdevsim/Makefile4
-rw-r--r--drivers/net/netdevsim/dev.c6
-rw-r--r--drivers/net/netdevsim/ethtool.c25
-rw-r--r--drivers/net/netdevsim/health.c4
-rw-r--r--drivers/net/netdevsim/netdev.c53
-rw-r--r--drivers/net/netdevsim/netdevsim.h27
-rw-r--r--drivers/net/netdevsim/psp.c225
-rw-r--r--drivers/net/pcs/Kconfig11
-rw-r--r--drivers/net/pcs/pcs-lynx.c11
-rw-r--r--drivers/net/pcs/pcs-rzn1-miic.c319
-rw-r--r--drivers/net/phy/Kconfig13
-rw-r--r--drivers/net/phy/Makefile3
-rw-r--r--drivers/net/phy/aquantia/aquantia.h52
-rw-r--r--drivers/net/phy/aquantia/aquantia_main.c702
-rw-r--r--drivers/net/phy/as21xxx.c7
-rw-r--r--drivers/net/phy/ax88796b.c5
-rw-r--r--drivers/net/phy/bcm-phy-ptp.c6
-rw-r--r--drivers/net/phy/broadcom.c147
-rw-r--r--drivers/net/phy/dp83640.c58
-rw-r--r--drivers/net/phy/fixed_phy.c217
-rw-r--r--drivers/net/phy/marvell-88x2222.c13
-rw-r--r--drivers/net/phy/marvell.c47
-rw-r--r--drivers/net/phy/marvell10g.c7
-rw-r--r--drivers/net/phy/mdio-boardinfo.c79
-rw-r--r--drivers/net/phy/mdio-boardinfo.h18
-rw-r--r--drivers/net/phy/mdio_bus.c1
-rw-r--r--drivers/net/phy/mdio_bus_provider.c36
-rw-r--r--drivers/net/phy/mediatek/mtk-2p5ge.c104
-rw-r--r--drivers/net/phy/micrel.c1004
-rw-r--r--drivers/net/phy/motorcomm.c117
-rw-r--r--drivers/net/phy/mscc/mscc.h19
-rw-r--r--drivers/net/phy/mscc/mscc_main.c50
-rw-r--r--drivers/net/phy/mscc/mscc_ptp.c102
-rw-r--r--drivers/net/phy/mscc/mscc_ptp.h1
-rw-r--r--drivers/net/phy/mxl-86110.c392
-rw-r--r--drivers/net/phy/nxp-c45-tja11xx-macsec.c8
-rw-r--r--drivers/net/phy/nxp-c45-tja11xx.c23
-rw-r--r--drivers/net/phy/phy-caps.h2
-rw-r--r--drivers/net/phy/phy.c27
-rw-r--r--drivers/net/phy/phy_caps.c2
-rw-r--r--drivers/net/phy/phy_device.c36
-rw-r--r--drivers/net/phy/phylink.c145
-rw-r--r--drivers/net/phy/qcom/at803x.c9
-rw-r--r--drivers/net/phy/qcom/qca807x.c9
-rw-r--r--drivers/net/phy/realtek/realtek_main.c263
-rw-r--r--drivers/net/phy/sfp-bus.c107
-rw-r--r--drivers/net/phy/sfp.c88
-rw-r--r--drivers/net/phy/sfp.h4
-rw-r--r--drivers/net/phy/smsc.c1
-rw-r--r--drivers/net/ppp/Kconfig3
-rw-r--r--drivers/net/ppp/bsd_comp.c4
-rw-r--r--drivers/net/ppp/ppp_generic.c143
-rw-r--r--drivers/net/ppp/ppp_mppe.c108
-rw-r--r--drivers/net/ppp/pppoe.c129
-rw-r--r--drivers/net/ppp/pptp.c18
-rw-r--r--drivers/net/pse-pd/Kconfig11
-rw-r--r--drivers/net/pse-pd/Makefile1
-rw-r--r--drivers/net/pse-pd/pd692x0.c63
-rw-r--r--drivers/net/pse-pd/si3474.c578
-rw-r--r--drivers/net/pse-pd/tps23881.c2
-rw-r--r--drivers/net/tun.c7
-rw-r--r--drivers/net/usb/Kconfig1
-rw-r--r--drivers/net/usb/asix_devices.c30
-rw-r--r--drivers/net/usb/cdc_ncm.c7
-rw-r--r--drivers/net/usb/lan78xx.c17
-rw-r--r--drivers/net/usb/qmi_wwan.c4
-rw-r--r--drivers/net/usb/rtl8150.c2
-rw-r--r--drivers/net/virtio_net.c57
-rw-r--r--drivers/net/vrf.c4
-rw-r--r--drivers/net/vxlan/vxlan_core.c25
-rw-r--r--drivers/net/vxlan/vxlan_private.h4
-rw-r--r--drivers/net/wan/framer/pef2256/pef2256.c28
-rw-r--r--drivers/net/wan/lapbether.c2
-rw-r--r--drivers/net/wireguard/device.c6
-rw-r--r--drivers/net/wireguard/queueing.h13
-rw-r--r--drivers/net/wireless/ath/ath10k/leds.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c12
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c14
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c39
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.c17
-rw-r--r--drivers/net/wireless/ath/ath11k/ce.c3
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c6
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c1
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.c16
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c111
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.c19
-rw-r--r--drivers/net/wireless/ath/ath12k/ahb.c2
-rw-r--r--drivers/net/wireless/ath/ath12k/ce.c5
-rw-r--r--drivers/net/wireless/ath/ath12k/core.h7
-rw-r--r--drivers/net/wireless/ath/ath12k/debug.h1
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.c2
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.h12
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_mon.c56
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.c352
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.h18
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.h1
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_desc.h1
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_rx.c3
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_rx.h12
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.c239
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.h3
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.c24
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.h16
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.c161
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.h33
-rw-r--r--drivers/net/wireless/ath/carl9170/rx.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c23
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c14
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h1
-rw-r--r--drivers/net/wireless/intel/iwlegacy/iwl-spectrum.h24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/bz.c18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/dr.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-gf.c22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-hr.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/sc.c18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/eeprom.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/power.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c31
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/d3.h113
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/offload.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/power.h34
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rs.h35
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c43
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dump.c54
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/error-dump.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/pnvm.c81
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.c53
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c47
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.c95
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c80
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h74
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.c71
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h81
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/sap.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/d3.c553
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/debugfs.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/iface.c39
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/iface.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/key.c38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/key.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/link.c26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/link.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mac80211.c19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mld.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mlo.c34
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/notif.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/regulatory.c28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/roc.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/rx.c26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/scan.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/sta.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/stats.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tlc.c75
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/coex.c131
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/constants.h20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c384
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c94
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/link.c809
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c124
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c138
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h136
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c53
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c133
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c101
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c89
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tests/links.c433
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c57
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/internal.h53
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans-gen2.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c237
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/nvm_parse.c72
-rw-r--r--drivers/net/wireless/intersil/p54/txrx.c2
-rw-r--r--drivers/net/wireless/marvell/libertas/cfg.c9
-rw-r--r--drivers/net/wireless/marvell/libertas/if_sdio.c3
-rw-r--r--drivers/net/wireless/marvell/libertas/if_spi.c3
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/main.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c12
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c9
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmd.c113
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_ioctl.c58
-rw-r--r--drivers/net/wireless/mediatek/mt76/agg-rx.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/channel.c13
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c231
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.h29
-rw-r--r--drivers/net/wireless/mediatek/mt76/eeprom.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c102
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h76
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/soc.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/init.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c25
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/dma.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/init.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c29
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mmio.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/init.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/main.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/usb.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/init.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mac.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/main.c74
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mcu.c40
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/pci.c26
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/usb.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_core.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_dma.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/dma.c326
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/init.c356
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mac.c841
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/main.c512
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.c327
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.h17
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mmio.c97
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h107
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/pci.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/regs.h32
-rw-r--r--drivers/net/wireless/mediatek/mt76/scan.c13
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c15
-rw-r--r--drivers/net/wireless/mediatek/mt76/wed.c8
-rw-r--r--drivers/net/wireless/microchip/wilc1000/cfg80211.c7
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan_cfg.c37
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan_cfg.h5
-rw-r--r--drivers/net/wireless/ralink/rt2x00/Kconfig4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00dev.c2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/core.c27
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/ps.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c1
-rw-r--r--drivers/net/wireless/realtek/rtw88/led.c13
-rw-r--r--drivers/net/wireless/realtek/rtw88/sdio.c4
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.c11
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.h10
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.c5
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.c684
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.h148
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.c125
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.h1
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.c177
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.h77
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.c72
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.h1
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac80211.c35
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac_be.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.c462
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.h128
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci_be.c18
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.c476
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.h24
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy_be.c9
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.c3
-rw-r--r--drivers/net/wireless/realtek/rtw89/reg.h56
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b.c4
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c159
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851be.c4
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851bu.c3
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.c46
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ae.c4
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b.c4
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852be.c4
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c14
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bte.c4
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bu.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.c4
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ce.c4
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a.c11
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922ae.c4
-rw-r--r--drivers/net/wireless/realtek/rtw89/sar.c15
-rw-r--r--drivers/net/wireless/realtek/rtw89/sar.h1
-rw-r--r--drivers/net/wireless/realtek/rtw89/ser.c5
-rw-r--r--drivers/net/wireless/realtek/rtw89/txrx.h38
-rw-r--r--drivers/net/wireless/realtek/rtw89/wow.c79
-rw-r--r--drivers/net/wireless/realtek/rtw89/wow.h6
-rw-r--r--drivers/net/wireless/st/cw1200/sta.c2
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.c259
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.h4
-rw-r--r--drivers/net/wireless/virtual/virt_wifi.c4
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_pcie.c2
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c3
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pci.c1
-rw-r--r--drivers/net/wwan/wwan_hwsim.c2
-rw-r--r--drivers/nfc/pn533/pn533.c12
-rw-r--r--drivers/nfc/s3fwrn5/Kconfig3
-rw-r--r--drivers/nfc/s3fwrn5/firmware.c17
-rw-r--r--drivers/ntb/hw/amd/ntb_hw_amd.c18
-rw-r--r--drivers/ntb/hw/amd/ntb_hw_amd.h1
-rw-r--r--drivers/ntb/hw/epf/ntb_hw_epf.c118
-rw-r--r--drivers/ntb/ntb_transport.c7
-rw-r--r--drivers/nvdimm/badrange.c3
-rw-r--r--drivers/nvdimm/btt.c4
-rw-r--r--drivers/nvdimm/btt_devs.c24
-rw-r--r--drivers/nvdimm/bus.c72
-rw-r--r--drivers/nvdimm/claim.c7
-rw-r--r--drivers/nvdimm/core.c17
-rw-r--r--drivers/nvdimm/dax_devs.c12
-rw-r--r--drivers/nvdimm/dimm.c5
-rw-r--r--drivers/nvdimm/dimm_devs.c48
-rw-r--r--drivers/nvdimm/namespace_devs.c113
-rw-r--r--drivers/nvdimm/nd.h3
-rw-r--r--drivers/nvdimm/pfn_devs.c63
-rw-r--r--drivers/nvdimm/region.c16
-rw-r--r--drivers/nvdimm/region_devs.c118
-rw-r--r--drivers/nvdimm/security.c10
-rw-r--r--drivers/nvme/common/auth.c86
-rw-r--r--drivers/nvme/host/apple.c197
-rw-r--r--drivers/nvme/host/auth.c9
-rw-r--r--drivers/nvme/host/core.c57
-rw-r--r--drivers/nvme/host/fc.c14
-rw-r--r--drivers/nvme/host/ioctl.c7
-rw-r--r--drivers/nvme/host/nvme.h2
-rw-r--r--drivers/nvme/host/pci.c186
-rw-r--r--drivers/nvme/host/tcp.c5
-rw-r--r--drivers/nvme/target/core.c29
-rw-r--r--drivers/nvme/target/fc.c41
-rw-r--r--drivers/nvme/target/fcloop.c8
-rw-r--r--drivers/nvme/target/passthru.c2
-rw-r--r--drivers/nvme/target/rdma.c6
-rw-r--r--drivers/nvmem/Kconfig21
-rw-r--r--drivers/nvmem/Makefile4
-rw-r--r--drivers/nvmem/an8855-efuse.c68
-rw-r--r--drivers/nvmem/layouts.c13
-rw-r--r--drivers/nvmem/s32g-ocotp-nvmem.c100
-rw-r--r--drivers/of/device.c4
-rw-r--r--drivers/of/dynamic.c9
-rw-r--r--drivers/of/irq.c28
-rw-r--r--drivers/of/of_numa.c5
-rw-r--r--drivers/of/of_reserved_mem.c17
-rw-r--r--drivers/of/overlay.c2
-rw-r--r--drivers/of/unittest.c1
-rw-r--r--drivers/opp/core.c99
-rw-r--r--drivers/parisc/eisa_eeprom.c2
-rw-r--r--drivers/pci/Kconfig2
-rw-r--r--drivers/pci/bus.c17
-rw-r--r--drivers/pci/controller/cadence/Kconfig10
-rw-r--r--drivers/pci/controller/cadence/Makefile1
-rw-r--r--drivers/pci/controller/cadence/pci-j721e.c28
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-ep.c40
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-host.c2
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.c18
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.h45
-rw-r--r--drivers/pci/controller/cadence/pcie-sg2042.c134
-rw-r--r--drivers/pci/controller/dwc/Kconfig26
-rw-r--r--drivers/pci/controller/dwc/Makefile2
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c1
-rw-r--r--drivers/pci/controller/dwc/pci-exynos.c62
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c8
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.c9
-rw-r--r--drivers/pci/controller/dwc/pcie-al.c1
-rw-r--r--drivers/pci/controller/dwc/pcie-amd-mdb.c52
-rw-r--r--drivers/pci/controller/dwc/pcie-artpec6.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c31
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c148
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-plat.c1
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c94
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h55
-rw-r--r--drivers/pci/controller/dwc/pcie-dw-rockchip.c44
-rw-r--r--drivers/pci/controller/dwc/pcie-keembay.c1
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-common.c58
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-common.h2
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-ep.c23
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c211
-rw-r--r--drivers/pci/controller/dwc/pcie-rcar-gen4.c30
-rw-r--r--drivers/pci/controller/dwc/pcie-stm32-ep.c364
-rw-r--r--drivers/pci/controller/dwc/pcie-stm32.c358
-rw-r--r--drivers/pci/controller/dwc/pcie-stm32.h16
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c51
-rw-r--r--drivers/pci/controller/pci-hyperv.c8
-rw-r--r--drivers/pci/controller/pci-mvebu.c21
-rw-r--r--drivers/pci/controller/pci-tegra.c29
-rw-r--r--drivers/pci/controller/pci-xgene-msi.c2
-rw-r--r--drivers/pci/controller/pcie-mediatek-gen3.c23
-rw-r--r--drivers/pci/controller/pcie-rcar-ep.c2
-rw-r--r--drivers/pci/controller/pcie-rcar-host.c42
-rw-r--r--drivers/pci/controller/pcie-rockchip-ep.c1
-rw-r--r--drivers/pci/controller/pcie-rockchip.h35
-rw-r--r--drivers/pci/controller/pcie-xilinx-nwl.c7
-rw-r--r--drivers/pci/controller/pcie-xilinx.c2
-rw-r--r--drivers/pci/controller/plda/pcie-plda-host.c3
-rw-r--r--drivers/pci/controller/vmd.c7
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c38
-rw-r--r--drivers/pci/endpoint/pci-ep-msi.c2
-rw-r--r--drivers/pci/hotplug/cpqphp_pci.c8
-rw-r--r--drivers/pci/hotplug/ibmphp_hpc.c6
-rw-r--r--drivers/pci/iov.c5
-rw-r--r--drivers/pci/msi/irqdomain.c55
-rw-r--r--drivers/pci/of_property.c22
-rw-r--r--drivers/pci/p2pdma.c5
-rw-r--r--drivers/pci/pci-acpi.c6
-rw-r--r--drivers/pci/pci-driver.c3
-rw-r--r--drivers/pci/pci-sysfs.c73
-rw-r--r--drivers/pci/pci.c87
-rw-r--r--drivers/pci/pci.h96
-rw-r--r--drivers/pci/pcie/aer.c49
-rw-r--r--drivers/pci/pcie/aspm.c45
-rw-r--r--drivers/pci/pcie/err.c40
-rw-r--r--drivers/pci/probe.c88
-rw-r--r--drivers/pci/pwrctrl/slot.c12
-rw-r--r--drivers/pci/quirks.c3
-rw-r--r--drivers/pci/remove.c3
-rw-r--r--drivers/pci/setup-bus.c848
-rw-r--r--drivers/pci/setup-res.c46
-rw-r--r--drivers/pci/switch/switchtec.c25
-rw-r--r--drivers/pci/vgaarb.c31
-rw-r--r--drivers/pcmcia/Kconfig3
-rw-r--r--drivers/pcmcia/Makefile1
-rw-r--r--drivers/pcmcia/cs.c17
-rw-r--r--drivers/pcmcia/cs_internal.h1
-rw-r--r--drivers/pcmcia/ds.c2
-rw-r--r--drivers/pcmcia/omap_cf.c10
-rw-r--r--drivers/pcmcia/rsrc_iodyn.c168
-rw-r--r--drivers/pcmcia/rsrc_nonstatic.c4
-rw-r--r--drivers/pcmcia/socket_sysfs.c5
-rw-r--r--drivers/peci/controller/peci-npcm.c1
-rw-r--r--drivers/perf/Kconfig9
-rw-r--r--drivers/perf/Makefile1
-rw-r--r--drivers/perf/arm-ccn.c2
-rw-r--r--drivers/perf/arm-cmn.c9
-rw-r--r--drivers/perf/arm_pmuv3.c29
-rw-r--r--drivers/perf/arm_spe_pmu.c114
-rw-r--r--drivers/perf/dwc_pcie_pmu.c161
-rw-r--r--drivers/perf/fsl_imx9_ddr_perf.c6
-rw-r--r--drivers/perf/fujitsu_uncore_pmu.c613
-rw-r--r--drivers/perf/hisilicon/Makefile3
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c557
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_mn_pmu.c411
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_noc_pmu.c443
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.c5
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.h6
-rw-r--r--drivers/perf/riscv_pmu_sbi.c199
-rw-r--r--drivers/phy/Kconfig1
-rw-r--r--drivers/phy/Makefile1
-rw-r--r--drivers/phy/allwinner/phy-sun4i-usb.c38
-rw-r--r--drivers/phy/broadcom/phy-brcm-sata.c1
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb.c1
-rw-r--r--drivers/phy/cadence/cdns-dphy-rx.c3
-rw-r--r--drivers/phy/cadence/cdns-dphy.c154
-rw-r--r--drivers/phy/cadence/phy-cadence-sierra.c1
-rw-r--r--drivers/phy/freescale/phy-fsl-lynx-28g.c16
-rw-r--r--drivers/phy/hisilicon/phy-hi6220-usb.c1
-rw-r--r--drivers/phy/hisilicon/phy-histb-combphy.c2
-rw-r--r--drivers/phy/ingenic/phy-ingenic-usb.c8
-rw-r--r--drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c19
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c1
-rw-r--r--drivers/phy/qualcomm/phy-qcom-m31-eusb2.c2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-combo.c179
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcie.c174
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-v7.h2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v7.h4
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-ufs.c159
-rw-r--r--drivers/phy/renesas/phy-rcar-gen3-usb2.c134
-rw-r--r--drivers/phy/renesas/r8a779f0-ether-serdes.c97
-rw-r--r--drivers/phy/rockchip/phy-rockchip-emmc.c3
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-csidphy.c67
-rw-r--r--drivers/phy/rockchip/phy-rockchip-naneng-combphy.c761
-rw-r--r--drivers/phy/rockchip/phy-rockchip-pcie.c70
-rw-r--r--drivers/phy/rockchip/phy-rockchip-samsung-dcphy.c11
-rw-r--r--drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c1
-rw-r--r--drivers/phy/rockchip/phy-rockchip-usb.c51
-rw-r--r--drivers/phy/rockchip/phy-rockchip-usbdp.c3
-rw-r--r--drivers/phy/samsung/phy-exynos5-usbdrd.c1
-rw-r--r--drivers/phy/samsung/phy-samsung-usb2.c1
-rw-r--r--drivers/phy/sophgo/Kconfig19
-rw-r--r--drivers/phy/sophgo/Makefile2
-rw-r--r--drivers/phy/sophgo/phy-cv1800-usb2.c170
-rw-r--r--drivers/phy/tegra/xusb-tegra210.c6
-rw-r--r--drivers/phy/ti/Kconfig2
-rw-r--r--drivers/phy/ti/phy-am654-serdes.c1
-rw-r--r--drivers/phy/ti/phy-dm816x-usb.c1
-rw-r--r--drivers/phy/ti/phy-gmii-sel.c47
-rw-r--r--drivers/phy/ti/phy-j721e-wiz.c1
-rw-r--r--drivers/phy/ti/phy-omap-control.c1
-rw-r--r--drivers/phy/ti/phy-omap-usb2.c14
-rw-r--r--drivers/phy/ti/phy-ti-pipe3.c14
-rw-r--r--drivers/pinctrl/Kconfig35
-rw-r--r--drivers/pinctrl/Makefile2
-rw-r--r--drivers/pinctrl/actions/pinctrl-owl.c2
-rw-r--r--drivers/pinctrl/bcm/Kconfig12
-rw-r--r--drivers/pinctrl/bcm/Kconfig.stb10
-rw-r--r--drivers/pinctrl/bcm/Makefile2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c10
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm6358.c4
-rw-r--r--drivers/pinctrl/bcm/pinctrl-brcmstb-bcm2712.c747
-rw-r--r--drivers/pinctrl/bcm/pinctrl-brcmstb.c442
-rw-r--r--drivers/pinctrl/bcm/pinctrl-brcmstb.h93
-rw-r--r--drivers/pinctrl/bcm/pinctrl-iproc-gpio.c2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-nsp-gpio.c2
-rw-r--r--drivers/pinctrl/cirrus/pinctrl-cs42l43.c2
-rw-r--r--drivers/pinctrl/cirrus/pinctrl-lochnagar.c2
-rw-r--r--drivers/pinctrl/cirrus/pinctrl-madera-core.c4
-rw-r--r--drivers/pinctrl/core.c13
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c45
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-lynxpoint.c2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-airoha.c64
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-moore.c14
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-moore.h7
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7622.c2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7623.c2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7629.c2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7981.c2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7986.c2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7988.c44
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.c4
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-paris.c6
-rw-r--r--drivers/pinctrl/meson/pinctrl-amlogic-a4.c10
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-g12a.c8
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxl.c10
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.c8
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-37xx.c8
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-abx500.c8
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c2
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-ma35.c5
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c187
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c160
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-wpcm450.c46
-rw-r--r--drivers/pinctrl/pinconf-generic.c6
-rw-r--r--drivers/pinctrl/pinctrl-amd.c43
-rw-r--r--drivers/pinctrl/pinctrl-amdisp.c2
-rw-r--r--drivers/pinctrl/pinctrl-apple-gpio.c3
-rw-r--r--drivers/pinctrl/pinctrl-as3722.c2
-rw-r--r--drivers/pinctrl/pinctrl-at91-pio4.c6
-rw-r--r--drivers/pinctrl/pinctrl-at91.c4
-rw-r--r--drivers/pinctrl/pinctrl-aw9523.c10
-rw-r--r--drivers/pinctrl/pinctrl-axp209.c4
-rw-r--r--drivers/pinctrl/pinctrl-cy8c95x0.c6
-rw-r--r--drivers/pinctrl/pinctrl-da9062.c2
-rw-r--r--drivers/pinctrl/pinctrl-digicolor.c2
-rw-r--r--drivers/pinctrl/pinctrl-eic7700.c2
-rw-r--r--drivers/pinctrl/pinctrl-equilibrium.c30
-rw-r--r--drivers/pinctrl/pinctrl-equilibrium.h2
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c55
-rw-r--r--drivers/pinctrl/pinctrl-k210.c2
-rw-r--r--drivers/pinctrl/pinctrl-keembay.c32
-rw-r--r--drivers/pinctrl/pinctrl-max7360.c215
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08.c4
-rw-r--r--drivers/pinctrl/pinctrl-microchip-sgpio.c8
-rw-r--r--drivers/pinctrl/pinctrl-ocelot.c6
-rw-r--r--drivers/pinctrl/pinctrl-pic32.c6
-rw-r--r--drivers/pinctrl/pinctrl-pistachio.c2
-rw-r--r--drivers/pinctrl/pinctrl-rk805.c6
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c6
-rw-r--r--drivers/pinctrl/pinctrl-rp1.c98
-rw-r--r--drivers/pinctrl/pinctrl-scmi.c2
-rw-r--r--drivers/pinctrl/pinctrl-single.c8
-rw-r--r--drivers/pinctrl/pinctrl-st.c2
-rw-r--r--drivers/pinctrl/pinctrl-stmfx.c6
-rw-r--r--drivers/pinctrl/pinctrl-sx150x.c16
-rw-r--r--drivers/pinctrl/pinctrl-upboard.c1070
-rw-r--r--drivers/pinctrl/pinctrl-xway.c2
-rw-r--r--drivers/pinctrl/pinctrl-zynqmp.c2
-rw-r--r--drivers/pinctrl/pinmux.c70
-rw-r--r--drivers/pinctrl/pinmux.h9
-rw-r--r--drivers/pinctrl/qcom/Kconfig11
-rw-r--r--drivers/pinctrl/qcom/Kconfig.msm10
-rw-r--r--drivers/pinctrl/qcom/Makefile2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-glymur.c1777
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq5018.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq5332.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq5424.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq6018.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq8074.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq9574.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-lpass-lpi.c28
-rw-r--r--drivers/pinctrl/qcom/pinctrl-lpass-lpi.h18
-rw-r--r--drivers/pinctrl/qcom/pinctrl-mdm9607.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-mdm9615.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-milos.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c53
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.h5
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8226.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8660.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8909.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8916.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8917.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8953.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8960.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8976.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8994.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8996.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8998.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8x74.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qcm2290.c4
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qcs404.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qcs615.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qcs8300.c4
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qdu1000.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sa8775p.c4
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sar2130p.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc7180.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc7280.c4
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc8180x.c4
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc8280xp.c4
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdm660-lpass-lpi.c160
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdm660.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdm670.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdm845.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdx55.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdx65.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdx75.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm4450.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm6115.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm6125.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm6350.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm6375.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm7150.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8150.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8250.c83
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8350.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8450.c4
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8550.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8650.c4
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8750.c4
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c10
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-mpp.c10
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c6
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c6
-rw-r--r--drivers/pinctrl/qcom/pinctrl-x1e80100.c2
-rw-r--r--drivers/pinctrl/renesas/Kconfig13
-rw-r--r--drivers/pinctrl/renesas/Makefile1
-rw-r--r--drivers/pinctrl/renesas/gpio.c2
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a779g0.c2
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rza1.c6
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rza2.c4
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzg2l.c222
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzt2h.c813
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzv2m.c4
-rw-r--r--drivers/pinctrl/renesas/pinctrl.c3
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos-arm64.c50
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.h10
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c4
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.h5
-rw-r--r--drivers/pinctrl/spacemit/pinctrl-k1.c4
-rw-r--r--drivers/pinctrl/spear/pinctrl-plgpio.c2
-rw-r--r--drivers/pinctrl/sprd/pinctrl-sprd.c9
-rw-r--r--drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c2
-rw-r--r--drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c2
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32-hdp.c36
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c4
-rw-r--r--drivers/pinctrl/sunplus/sppctl.c6
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi-dt.c11
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c2
-rw-r--r--drivers/pinctrl/tegra/Kconfig4
-rw-r--r--drivers/pinctrl/tegra/Makefile1
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra186.c1979
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wmt.c2
-rw-r--r--drivers/platform/arm64/Kconfig20
-rw-r--r--drivers/platform/arm64/Makefile1
-rw-r--r--drivers/platform/arm64/lenovo-thinkpad-t14s.c616
-rw-r--r--drivers/platform/chrome/cros_ec.c90
-rw-r--r--drivers/platform/chrome/cros_ec.h3
-rw-r--r--drivers/platform/chrome/cros_ec_chardev.c72
-rw-r--r--drivers/platform/chrome/cros_ec_i2c.c9
-rw-r--r--drivers/platform/chrome/cros_ec_ishtp.c6
-rw-r--r--drivers/platform/chrome/cros_ec_lpc.c6
-rw-r--r--drivers/platform/chrome/cros_ec_proto.c15
-rw-r--r--drivers/platform/chrome/cros_ec_rpmsg.c6
-rw-r--r--drivers/platform/chrome/cros_ec_spi.c7
-rw-r--r--drivers/platform/chrome/cros_ec_uart.c6
-rw-r--r--drivers/platform/chrome/wilco_ec/telemetry.c2
-rw-r--r--drivers/platform/cznic/turris-omnia-mcu-gpio.c4
-rw-r--r--drivers/platform/x86/Kconfig12
-rw-r--r--drivers/platform/x86/Makefile1
-rw-r--r--drivers/platform/x86/acer-wmi.c71
-rw-r--r--drivers/platform/x86/amd/hfi/hfi.c14
-rw-r--r--drivers/platform/x86/amd/hsmp/acpi.c6
-rw-r--r--drivers/platform/x86/amd/hsmp/hsmp.c5
-rw-r--r--drivers/platform/x86/amd/hsmp/plat.c4
-rw-r--r--drivers/platform/x86/amd/pmc/pmc-quirks.c83
-rw-r--r--drivers/platform/x86/amd/pmc/pmc.c13
-rw-r--r--drivers/platform/x86/amd/pmf/acpi.c87
-rw-r--r--drivers/platform/x86/amd/pmf/core.c1
-rw-r--r--drivers/platform/x86/amd/pmf/pmf.h77
-rw-r--r--drivers/platform/x86/amd/pmf/spc.c80
-rw-r--r--drivers/platform/x86/amd/pmf/sps.c2
-rw-r--r--drivers/platform/x86/amd/pmf/tee-if.c22
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c28
-rw-r--r--drivers/platform/x86/asus-wmi.c9
-rw-r--r--drivers/platform/x86/asus-wmi.h3
-rw-r--r--drivers/platform/x86/barco-p50-gpio.c106
-rw-r--r--drivers/platform/x86/dell/dell-lis3lv02d.c1
-rw-r--r--drivers/platform/x86/dell/dell-pc.c9
-rw-r--r--drivers/platform/x86/dell/dell-smbios-base.c19
-rw-r--r--drivers/platform/x86/dell/dell-smbios-smm.c3
-rw-r--r--drivers/platform/x86/dell/dell-smbios-wmi.c4
-rw-r--r--drivers/platform/x86/dell/dell-smbios.h2
-rw-r--r--drivers/platform/x86/dell/dell_rbu.c8
-rw-r--r--drivers/platform/x86/hp/hp-wmi.c8
-rw-r--r--drivers/platform/x86/intel/int0002_vgpio.c2
-rw-r--r--drivers/platform/x86/intel/int3472/discrete.c64
-rw-r--r--drivers/platform/x86/intel/pmc/Makefile2
-rw-r--r--drivers/platform/x86/intel/pmc/arl.c4
-rw-r--r--drivers/platform/x86/intel/pmc/core.c186
-rw-r--r--drivers/platform/x86/intel/pmc/core.h27
-rw-r--r--drivers/platform/x86/intel/pmc/lnl.c18
-rw-r--r--drivers/platform/x86/intel/pmc/mtl.c2
-rw-r--r--drivers/platform/x86/intel/pmc/ptl.c37
-rw-r--r--drivers/platform/x86/intel/pmc/ssram_telemetry.c1
-rw-r--r--drivers/platform/x86/intel/pmc/tgl.c4
-rw-r--r--drivers/platform/x86/intel/pmc/wcl.c486
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_common.c2
-rw-r--r--drivers/platform/x86/intel/tpmi_power_domains.c4
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c81
-rw-r--r--drivers/platform/x86/lenovo/think-lmi.c94
-rw-r--r--drivers/platform/x86/lenovo/think-lmi.h14
-rw-r--r--drivers/platform/x86/lenovo/wmi-capdata01.c2
-rw-r--r--drivers/platform/x86/lenovo/yoga-tab2-pro-1380-fastcharger.c5
-rw-r--r--drivers/platform/x86/lg-laptop.c34
-rw-r--r--drivers/platform/x86/meraki-mx100.c404
-rw-r--r--drivers/platform/x86/oxpec.c14
-rw-r--r--drivers/platform/x86/pcengines-apuv2.c192
-rw-r--r--drivers/platform/x86/portwell-ec.c198
-rw-r--r--drivers/platform/x86/quickstart.c10
-rw-r--r--drivers/platform/x86/redmi-wmi.c130
-rw-r--r--drivers/platform/x86/silicom-platform.c2
-rw-r--r--drivers/platform/x86/x86-android-tablets/Makefile2
-rw-r--r--drivers/platform/x86/x86-android-tablets/acer.c247
-rw-r--r--drivers/platform/x86/x86-android-tablets/asus.c108
-rw-r--r--drivers/platform/x86/x86-android-tablets/core.c121
-rw-r--r--drivers/platform/x86/x86-android-tablets/dmi.c12
-rw-r--r--drivers/platform/x86/x86-android-tablets/lenovo.c291
-rw-r--r--drivers/platform/x86/x86-android-tablets/other.c334
-rw-r--r--drivers/platform/x86/x86-android-tablets/shared-psy-info.c34
-rw-r--r--drivers/platform/x86/x86-android-tablets/shared-psy-info.h8
-rw-r--r--drivers/platform/x86/x86-android-tablets/vexia_atla10_ec.c2
-rw-r--r--drivers/platform/x86/x86-android-tablets/x86-android-tablets.h28
-rw-r--r--drivers/platform/x86/xiaomi-wmi.c10
-rw-r--r--drivers/pmdomain/Kconfig1
-rw-r--r--drivers/pmdomain/Makefile1
-rw-r--r--drivers/pmdomain/amlogic/meson-secure-pwrc.c95
-rw-r--r--drivers/pmdomain/apple/pmgr-pwrstate.c1
-rw-r--r--drivers/pmdomain/core.c20
-rw-r--r--drivers/pmdomain/imx/gpc.c1
-rw-r--r--drivers/pmdomain/imx/imx93-blk-ctrl.c23
-rw-r--r--drivers/pmdomain/marvell/Kconfig18
-rw-r--r--drivers/pmdomain/marvell/Makefile3
-rw-r--r--drivers/pmdomain/marvell/pxa1908-power-controller.c274
-rw-r--r--drivers/pmdomain/mediatek/airoha-cpu-pmdomain.c8
-rw-r--r--drivers/pmdomain/mediatek/mt6795-pm-domains.h5
-rw-r--r--drivers/pmdomain/mediatek/mt8167-pm-domains.h5
-rw-r--r--drivers/pmdomain/mediatek/mt8173-pm-domains.h5
-rw-r--r--drivers/pmdomain/mediatek/mt8183-pm-domains.h5
-rw-r--r--drivers/pmdomain/mediatek/mt8186-pm-domains.h5
-rw-r--r--drivers/pmdomain/mediatek/mt8188-pm-domains.h6
-rw-r--r--drivers/pmdomain/mediatek/mt8192-pm-domains.h5
-rw-r--r--drivers/pmdomain/mediatek/mt8195-pm-domains.h6
-rw-r--r--drivers/pmdomain/mediatek/mt8365-pm-domains.h14
-rw-r--r--drivers/pmdomain/mediatek/mtk-pm-domains.c399
-rw-r--r--drivers/pmdomain/mediatek/mtk-pm-domains.h74
-rw-r--r--drivers/pmdomain/qcom/rpmpd.c112
-rw-r--r--drivers/pmdomain/renesas/rcar-gen4-sysc.c1
-rw-r--r--drivers/pmdomain/renesas/rcar-sysc.c3
-rw-r--r--drivers/pmdomain/renesas/rmobile-sysc.c3
-rw-r--r--drivers/pmdomain/rockchip/Kconfig1
-rw-r--r--drivers/pmdomain/rockchip/pm-domains.c2
-rw-r--r--drivers/pmdomain/thead/th1520-pm-domains.c16
-rw-r--r--drivers/pmdomain/ti/ti_sci_pm_domains.c24
-rw-r--r--drivers/pnp/isapnp/core.c3
-rw-r--r--drivers/power/reset/Kconfig7
-rw-r--r--drivers/power/reset/Makefile1
-rw-r--r--drivers/power/reset/th1520-aon-reboot.c98
-rw-r--r--drivers/power/supply/88pm860x_charger.c8
-rw-r--r--drivers/power/supply/Kconfig23
-rw-r--r--drivers/power/supply/Makefile3
-rw-r--r--drivers/power/supply/ab8500_btemp.c3
-rw-r--r--drivers/power/supply/adc-battery-helper.c327
-rw-r--r--drivers/power/supply/adc-battery-helper.h62
-rw-r--r--drivers/power/supply/bq2415x_charger.c4
-rw-r--r--drivers/power/supply/bq24190_charger.c2
-rw-r--r--drivers/power/supply/bq257xx_charger.c755
-rw-r--r--drivers/power/supply/bq27xxx_battery.c21
-rw-r--r--drivers/power/supply/cw2015_battery.c8
-rw-r--r--drivers/power/supply/gpio-charger.c7
-rw-r--r--drivers/power/supply/intel_dc_ti_battery.c389
-rw-r--r--drivers/power/supply/ipaq_micro_battery.c3
-rw-r--r--drivers/power/supply/max77705_charger.c332
-rw-r--r--drivers/power/supply/max77976_charger.c12
-rw-r--r--drivers/power/supply/mt6370-charger.c18
-rw-r--r--drivers/power/supply/power_supply_sysfs.c2
-rw-r--r--drivers/power/supply/qcom_battmgr.c324
-rw-r--r--drivers/power/supply/rk817_charger.c6
-rw-r--r--drivers/power/supply/rt9467-charger.c47
-rw-r--r--drivers/power/supply/rx51_battery.c2
-rw-r--r--drivers/power/supply/sbs-charger.c16
-rw-r--r--drivers/power/supply/sbs-manager.c2
-rw-r--r--drivers/power/supply/ucs1002_power.c2
-rw-r--r--drivers/power/supply/ug3105_battery.c346
-rw-r--r--drivers/powercap/idle_inject.c5
-rw-r--r--drivers/pps/kapi.c5
-rw-r--r--drivers/pps/pps.c5
-rw-r--r--drivers/ps3/ps3stor_lib.c3
-rw-r--r--drivers/ptp/Kconfig13
-rw-r--r--drivers/ptp/Makefile5
-rw-r--r--drivers/ptp/ptp_chardev.c62
-rw-r--r--drivers/ptp/ptp_clock.c150
-rw-r--r--drivers/ptp/ptp_clockmatrix.c2
-rw-r--r--drivers/ptp/ptp_netc.c1043
-rw-r--r--drivers/ptp/ptp_ocp.c9
-rw-r--r--drivers/ptp/ptp_private.h8
-rw-r--r--drivers/ptp/ptp_qoriq.c24
-rw-r--r--drivers/ptp/ptp_qoriq_debugfs.c101
-rw-r--r--drivers/ptp/ptp_sysfs.c2
-rw-r--r--drivers/ptp/ptp_vclock.c7
-rw-r--r--drivers/pwm/Kconfig19
-rw-r--r--drivers/pwm/Makefile1
-rw-r--r--drivers/pwm/core.c108
-rw-r--r--drivers/pwm/pwm-berlin.c4
-rw-r--r--drivers/pwm/pwm-cros-ec.c10
-rw-r--r--drivers/pwm/pwm-fsl-ftm.c35
-rw-r--r--drivers/pwm/pwm-loongson.c2
-rw-r--r--drivers/pwm/pwm-max7360.c209
-rw-r--r--drivers/pwm/pwm-mediatek.c308
-rw-r--r--drivers/pwm/pwm-pca9685.c515
-rw-r--r--drivers/pwm/pwm-tiecap.c4
-rw-r--r--drivers/pwm/pwm-tiehrpwm.c154
-rw-r--r--drivers/ras/ras.c1
-rw-r--r--drivers/regulator/Kconfig70
-rw-r--r--drivers/regulator/Makefile7
-rw-r--r--drivers/regulator/bd718x7-regulator.c2
-rw-r--r--drivers/regulator/bq257xx-regulator.c186
-rw-r--r--drivers/regulator/core.c6
-rw-r--r--drivers/regulator/max77838-regulator.c221
-rw-r--r--drivers/regulator/pca9450-regulator.c13
-rw-r--r--drivers/regulator/pf0900-regulator.c975
-rw-r--r--drivers/regulator/pf530x-regulator.c375
-rw-r--r--drivers/regulator/qcom-pm8008-regulator.c2
-rw-r--r--drivers/regulator/qcom-refgen-regulator.c1
-rw-r--r--drivers/regulator/rpi-panel-attiny-regulator.c2
-rw-r--r--drivers/regulator/rt5133-regulator.c642
-rw-r--r--drivers/regulator/s2dos05-regulator.c165
-rw-r--r--drivers/regulator/scmi-regulator.c3
-rw-r--r--drivers/regulator/spacemit-p1.c157
-rw-r--r--drivers/regulator/sy7636a-regulator.c7
-rw-r--r--drivers/regulator/tps65219-regulator.c12
-rw-r--r--drivers/regulator/tps6524x-regulator.c1
-rw-r--r--drivers/regulator/tps6594-regulator.c2
-rw-r--r--drivers/remoteproc/da8xx_remoteproc.c57
-rw-r--r--drivers/remoteproc/imx_dsp_rproc.c45
-rw-r--r--drivers/remoteproc/imx_rproc.c449
-rw-r--r--drivers/remoteproc/imx_rproc.h7
-rw-r--r--drivers/remoteproc/keystone_remoteproc.c95
-rw-r--r--drivers/remoteproc/pru_rproc.c3
-rw-r--r--drivers/remoteproc/qcom_q6v5.c8
-rw-r--r--drivers/remoteproc/qcom_q6v5_adsp.c2
-rw-r--r--drivers/remoteproc/qcom_q6v5_mss.c11
-rw-r--r--drivers/remoteproc/qcom_q6v5_pas.c39
-rw-r--r--drivers/remoteproc/qcom_q6v5_wcss.c2
-rw-r--r--drivers/remoteproc/ti_k3_common.c21
-rw-r--r--drivers/remoteproc/ti_k3_dsp_remoteproc.c2
-rw-r--r--drivers/remoteproc/ti_k3_r5_remoteproc.c2
-rw-r--r--drivers/remoteproc/wkup_m3_rproc.c69
-rw-r--r--drivers/reset/Kconfig7
-rw-r--r--drivers/reset/Makefile1
-rw-r--r--drivers/reset/reset-aspeed.c253
-rw-r--r--drivers/reset/reset-bcm6345.c1
-rw-r--r--drivers/reset/reset-eyeq.c11
-rw-r--r--drivers/reset/reset-intel-gw.c1
-rw-r--r--drivers/reset/reset-qcom-pdc.c1
-rw-r--r--drivers/reset/reset-th1520.c41
-rw-r--r--drivers/rpmsg/qcom_glink_native.c2
-rw-r--r--drivers/rpmsg/qcom_smd.c4
-rw-r--r--drivers/rpmsg/rpmsg_char.c3
-rw-r--r--drivers/rpmsg/rpmsg_core.c5
-rw-r--r--drivers/rtc/Kconfig48
-rw-r--r--drivers/rtc/Makefile2
-rw-r--r--drivers/rtc/interface.c27
-rw-r--r--drivers/rtc/rtc-amlogic-a4.c14
-rw-r--r--drivers/rtc/rtc-cpcap.c1
-rw-r--r--drivers/rtc/rtc-efi.c76
-rw-r--r--drivers/rtc/rtc-isl12022.c1
-rw-r--r--drivers/rtc/rtc-mc13xxx.c13
-rw-r--r--drivers/rtc/rtc-meson.c1
-rw-r--r--drivers/rtc/rtc-nct6694.c297
-rw-r--r--drivers/rtc/rtc-optee.c465
-rw-r--r--drivers/rtc/rtc-pcf2127.c19
-rw-r--r--drivers/rtc/rtc-s3c.c49
-rw-r--r--drivers/rtc/rtc-s3c.h19
-rw-r--r--drivers/rtc/rtc-sd2405al.c4
-rw-r--r--drivers/rtc/rtc-spacemit-p1.c167
-rw-r--r--drivers/rtc/rtc-tps6586x.c1
-rw-r--r--drivers/rtc/rtc-x1205.c2
-rw-r--r--drivers/rtc/rtc-zynqmp.c19
-rw-r--r--drivers/s390/block/Kconfig12
-rw-r--r--drivers/s390/block/dasd.c24
-rw-r--r--drivers/s390/block/dcssblk.c35
-rw-r--r--drivers/s390/char/Makefile1
-rw-r--r--drivers/s390/char/con3270.c18
-rw-r--r--drivers/s390/char/hmcdrv_dev.c19
-rw-r--r--drivers/s390/char/sclp.c11
-rw-r--r--drivers/s390/char/sclp_cmd.c478
-rw-r--r--drivers/s390/char/sclp_early_core.c2
-rw-r--r--drivers/s390/char/sclp_mem.c399
-rw-r--r--drivers/s390/char/tape_3590.c2
-rw-r--r--drivers/s390/cio/cmf.c2
-rw-r--r--drivers/s390/cio/device.c37
-rw-r--r--drivers/s390/cio/ioasm.c7
-rw-r--r--drivers/s390/crypto/ap_bus.h2
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c2
-rw-r--r--drivers/s390/crypto/zcrypt_ep11misc.c4
-rw-r--r--drivers/s390/net/Kconfig3
-rw-r--r--drivers/s390/net/ism.h53
-rw-r--r--drivers/s390/net/ism_drv.c573
-rw-r--r--drivers/scsi/3w-9xxx.c2
-rw-r--r--drivers/scsi/3w-sas.c2
-rw-r--r--drivers/scsi/3w-xxxx.c2
-rw-r--r--drivers/scsi/BusLogic.c8
-rw-r--r--drivers/scsi/BusLogic.h2
-rw-r--r--drivers/scsi/Kconfig2
-rw-r--r--drivers/scsi/aacraid/linit.c6
-rw-r--r--drivers/scsi/advansys.c2
-rw-r--r--drivers/scsi/aha152x.c4
-rw-r--r--drivers/scsi/aha1542.c2
-rw-r--r--drivers/scsi/aha1740.c2
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c4
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c4
-rw-r--r--drivers/scsi/aic94xx/aic94xx_task.c1
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c6
-rw-r--r--drivers/scsi/atp870u.c2
-rw-r--r--drivers/scsi/bfa/bfa_core.c1
-rw-r--r--drivers/scsi/csiostor/csio_wr.c4
-rw-r--r--drivers/scsi/fdomain.c4
-rw-r--r--drivers/scsi/fnic/fnic.h2
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c2
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c6
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c6
-rw-r--r--drivers/scsi/hpsa.c53
-rw-r--r--drivers/scsi/imm.c2
-rw-r--r--drivers/scsi/initio.c4
-rw-r--r--drivers/scsi/ipr.c16
-rw-r--r--drivers/scsi/ips.c2
-rw-r--r--drivers/scsi/ips.h2
-rw-r--r--drivers/scsi/isci/remote_device.c2
-rw-r--r--drivers/scsi/libfc/fc_encode.h2
-rw-r--r--drivers/scsi/libsas/sas_expander.c5
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c2
-rw-r--r--drivers/scsi/lpfc/lpfc.h52
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c633
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h5
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c23
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c14
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c25
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c14
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c21
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c2
-rw-r--r--drivers/scsi/megaraid.c4
-rw-r--r--drivers/scsi/megaraid.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c4
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h38
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_pci.h2
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_sas.h1
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_transport.h2
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr.h8
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_fw.c13
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_os.c32
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_transport.c11
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c8
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h4
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c4
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c11
-rw-r--r--drivers/scsi/mvsas/mv_init.c2
-rw-r--r--drivers/scsi/mvsas/mv_sas.c2
-rw-r--r--drivers/scsi/mvumi.c2
-rw-r--r--drivers/scsi/myrb.c2
-rw-r--r--drivers/scsi/myrs.c8
-rw-r--r--drivers/scsi/pcmcia/sym53c500_cs.c2
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.c24
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c11
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.h4
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c1
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c34
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h5
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c10
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.h4
-rw-r--r--drivers/scsi/ppa.c2
-rw-r--r--drivers/scsi/qla1280.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_edif.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c13
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c2
-rw-r--r--drivers/scsi/qlogicfas408.c2
-rw-r--r--drivers/scsi/qlogicfas408.h2
-rw-r--r--drivers/scsi/scsi_debug.c108
-rw-r--r--drivers/scsi/scsi_lib.c3
-rw-r--r--drivers/scsi/scsi_sysfs.c4
-rw-r--r--drivers/scsi/scsicam.c16
-rw-r--r--drivers/scsi/sd.c66
-rw-r--r--drivers/scsi/sg.c3
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c17
-rw-r--r--drivers/scsi/sr.c16
-rw-r--r--drivers/scsi/stex.c2
-rw-r--r--drivers/scsi/storvsc_drv.c6
-rw-r--r--drivers/scsi/wd719x.c2
-rw-r--r--drivers/siox/siox-bus-gpio.c3
-rw-r--r--drivers/slimbus/Kconfig7
-rw-r--r--drivers/slimbus/Makefile3
-rw-r--r--drivers/slimbus/messaging.c4
-rw-r--r--drivers/slimbus/qcom-ctrl.c735
-rw-r--r--drivers/soc/apple/Kconfig3
-rw-r--r--drivers/soc/apple/mailbox.c19
-rw-r--r--drivers/soc/apple/sart.c60
-rw-r--r--drivers/soc/aspeed/aspeed-lpc-ctrl.c14
-rw-r--r--drivers/soc/aspeed/aspeed-p2a-ctrl.c14
-rw-r--r--drivers/soc/aspeed/aspeed-socinfo.c4
-rw-r--r--drivers/soc/bcm/brcmstb/pm/pm.h2
-rw-r--r--drivers/soc/fsl/qbman/qman_test_stash.c2
-rw-r--r--drivers/soc/fsl/qe/gpio.c139
-rw-r--r--drivers/soc/fsl/qe/qmc.c44
-rw-r--r--drivers/soc/hisilicon/kunpeng_hccs.c2
-rw-r--r--drivers/soc/mediatek/mtk-svs.c23
-rw-r--r--drivers/soc/qcom/icc-bwmon.c3
-rw-r--r--drivers/soc/qcom/llcc-qcom.c1
-rw-r--r--drivers/soc/qcom/mdt_loader.c32
-rw-r--r--drivers/soc/qcom/qcom-geni-se.c506
-rw-r--r--drivers/soc/qcom/qcom_pd_mapper.c1
-rw-r--r--drivers/soc/qcom/ramp_controller.c1
-rw-r--r--drivers/soc/qcom/rpm_master_stats.c2
-rw-r--r--drivers/soc/qcom/rpmh-rsc.c7
-rw-r--r--drivers/soc/qcom/smem.c2
-rw-r--r--drivers/soc/qcom/ubwc_config.c23
-rw-r--r--drivers/soc/renesas/Kconfig13
-rw-r--r--drivers/soc/renesas/pwc-rzv2m.c2
-rw-r--r--drivers/soc/renesas/r9a08g045-sysc.c1
-rw-r--r--drivers/soc/renesas/r9a09g047-sys.c1
-rw-r--r--drivers/soc/renesas/r9a09g057-sys.c1
-rw-r--r--drivers/soc/renesas/renesas-soc.c12
-rw-r--r--drivers/soc/renesas/rz-sysc.c30
-rw-r--r--drivers/soc/renesas/rz-sysc.h2
-rw-r--r--drivers/soc/rockchip/grf.c35
-rw-r--r--drivers/soc/samsung/exynos-pmu.c276
-rw-r--r--drivers/soc/sunxi/sunxi_sram.c14
-rw-r--r--drivers/soc/tegra/Kconfig1
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra30.c122
-rw-r--r--drivers/soc/tegra/pmc.c51
-rw-r--r--drivers/soc/ti/k3-socinfo.c10
-rw-r--r--drivers/soc/ti/pruss.c2
-rw-r--r--drivers/soundwire/bus.c12
-rw-r--r--drivers/soundwire/bus_type.c3
-rw-r--r--drivers/soundwire/debugfs.c2
-rw-r--r--drivers/soundwire/qcom.c5
-rw-r--r--drivers/soundwire/slave.c6
-rw-r--r--drivers/spi/Kconfig26
-rw-r--r--drivers/spi/Makefile2
-rw-r--r--drivers/spi/atmel-quadspi.c134
-rw-r--r--drivers/spi/spi-altera-platform.c1
-rw-r--r--drivers/spi/spi-amd-pci.c5
-rw-r--r--drivers/spi/spi-amd.c2
-rw-r--r--drivers/spi/spi-amlogic-spifc-a4.c1222
-rw-r--r--drivers/spi/spi-amlogic-spisg.c4
-rw-r--r--drivers/spi/spi-apple.c1
-rw-r--r--drivers/spi/spi-atmel.c78
-rw-r--r--drivers/spi/spi-axi-spi-engine.c17
-rw-r--r--drivers/spi/spi-bcm2835.c2
-rw-r--r--drivers/spi/spi-cadence-quadspi.c122
-rw-r--r--drivers/spi/spi-cs42l43.c2
-rw-r--r--drivers/spi/spi-fsl-dspi.c232
-rw-r--r--drivers/spi/spi-fsl-lpspi.c63
-rw-r--r--drivers/spi/spi-geni-qcom.c6
-rw-r--r--drivers/spi/spi-ljca.c2
-rw-r--r--drivers/spi/spi-loopback-test.c12
-rw-r--r--drivers/spi/spi-mem.c4
-rw-r--r--drivers/spi/spi-microchip-core-qspi.c15
-rw-r--r--drivers/spi/spi-microchip-core.c3
-rw-r--r--drivers/spi/spi-mt65xx.c30
-rw-r--r--drivers/spi/spi-mtk-snfi.c1
-rw-r--r--drivers/spi/spi-mxs.c2
-rw-r--r--drivers/spi/spi-npcm-fiu.c6
-rw-r--r--drivers/spi/spi-nxp-fspi.c117
-rw-r--r--drivers/spi/spi-offload-trigger-adi-util-sigma-delta.c5
-rw-r--r--drivers/spi/spi-omap2-mcspi.c1
-rw-r--r--drivers/spi/spi-pl022.c13
-rw-r--r--drivers/spi/spi-pxa2xx.c2
-rw-r--r--drivers/spi/spi-qpic-snand.c86
-rw-r--r--drivers/spi/spi-rb4xx.c36
-rw-r--r--drivers/spi/spi-rpc-if.c12
-rw-r--r--drivers/spi/spi-s3c64xx.c19
-rw-r--r--drivers/spi/spi-st-ssc4.c10
-rw-r--r--drivers/spi/spi-sunplus-sp7021.c6
-rw-r--r--drivers/spi/spi-virtio.c431
-rw-r--r--drivers/spi/spi-xcomm.c2
-rw-r--r--drivers/spi/spi.c85
-rw-r--r--drivers/ssb/driver_gpio.c4
-rw-r--r--drivers/staging/axis-fifo/axis-fifo.c105
-rw-r--r--drivers/staging/gpib/agilent_82357a/agilent_82357a.c18
-rw-r--r--drivers/staging/gpib/agilent_82357a/agilent_82357a.h10
-rw-r--r--drivers/staging/gpib/cb7210/cb7210.h4
-rw-r--r--drivers/staging/gpib/cec/cec_gpib.c2
-rw-r--r--drivers/staging/gpib/common/gpib_os.c2
-rw-r--r--drivers/staging/gpib/common/iblib.c2
-rw-r--r--drivers/staging/gpib/eastwood/fluke_gpib.c2
-rw-r--r--drivers/staging/gpib/fmh_gpib/fmh_gpib.c2
-rw-r--r--drivers/staging/gpib/gpio/gpib_bitbang.c16
-rw-r--r--drivers/staging/gpib/hp_82341/hp_82341.c12
-rw-r--r--drivers/staging/gpib/hp_82341/hp_82341.h40
-rw-r--r--drivers/staging/gpib/include/amccs5933.h4
-rw-r--r--drivers/staging/gpib/include/gpib_types.h3
-rw-r--r--drivers/staging/gpib/include/nec7210.h26
-rw-r--r--drivers/staging/gpib/include/nec7210_registers.h4
-rw-r--r--drivers/staging/gpib/include/plx9050.h8
-rw-r--r--drivers/staging/gpib/include/tms9914.h90
-rw-r--r--drivers/staging/gpib/include/tnt4882_registers.h22
-rw-r--r--drivers/staging/gpib/ines/ines.h12
-rw-r--r--drivers/staging/gpib/ines/ines_gpib.c4
-rw-r--r--drivers/staging/gpib/nec7210/nec7210.c6
-rw-r--r--drivers/staging/gpib/ni_usb/ni_usb_gpib.c10
-rw-r--r--drivers/staging/gpib/ni_usb/ni_usb_gpib.h10
-rw-r--r--drivers/staging/gpib/pc2/pc2_gpib.c4
-rw-r--r--drivers/staging/gpib/tms9914/tms9914.c10
-rw-r--r--drivers/staging/gpib/tnt4882/mite.h10
-rw-r--r--drivers/staging/gpib/tnt4882/tnt4882_gpib.c5
-rw-r--r--drivers/staging/greybus/gpio.c2
-rw-r--r--drivers/staging/iio/adc/ad7816.c2
-rw-r--r--drivers/staging/media/atomisp/i2c/Kconfig9
-rw-r--r--drivers/staging/media/atomisp/i2c/Makefile1
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_subdev.c9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c4
-rw-r--r--drivers/staging/media/imx/imx-media-csc-scaler.c26
-rw-r--r--drivers/staging/media/imx/imx-media-csi.c8
-rw-r--r--drivers/staging/media/ipu3/ipu3-css.c3
-rw-r--r--drivers/staging/media/ipu3/ipu3-v4l2.c5
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-csi2.c2
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-queue.c3
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-subdev.c35
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-subdev.h1
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-video.c37
-rw-r--r--drivers/staging/media/ipu7/ipu7.c29
-rw-r--r--drivers/staging/media/meson/vdec/vdec.c29
-rw-r--r--drivers/staging/media/meson/vdec/vdec.h5
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.c8
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.h5
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_video.c5
-rw-r--r--drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_capture.c16
-rw-r--r--drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_params.c6
-rw-r--r--drivers/staging/media/tegra-video/tegra20.c4
-rw-r--r--drivers/staging/most/video/video.c19
-rw-r--r--drivers/staging/octeon/ethernet-tx.c43
-rw-r--r--drivers/staging/octeon/octeon-stubs.h134
-rw-r--r--drivers/staging/rtl8723bs/Makefile2
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ap.c8
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_efuse.c169
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme.c231
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme_ext.c130
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_pwrctrl.c10
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_recv.c194
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_security.c78
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_sta_mgt.c6
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_wlan_util.c60
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_xmit.c2
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_com_phycfg.c5
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_pwr_seq.c2
-rw-r--r--drivers/staging/rtl8723bs/hal/odm.c152
-rw-r--r--drivers/staging/rtl8723bs/hal/odm.h6
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c368
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c6
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c2
-rw-r--r--drivers/staging/rtl8723bs/hal/sdio_halinit.c1
-rw-r--r--drivers/staging/rtl8723bs/include/basic_types.h41
-rw-r--r--drivers/staging/rtl8723bs/include/drv_types.h2
-rw-r--r--drivers/staging/rtl8723bs/include/hal_intf.h7
-rw-r--r--drivers/staging/rtl8723bs/include/mlme_osdep.h19
-rw-r--r--drivers/staging/rtl8723bs/include/recv_osdep.h40
-rw-r--r--drivers/staging/rtl8723bs/include/rtl8723b_hal.h2
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_efuse.h15
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_mlme.h1
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_mlme_ext.h2
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_recv.h4
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c8
-rw-r--r--drivers/staging/rtl8723bs/os_dep/mlme_linux.c179
-rw-r--r--drivers/staging/rtl8723bs/os_dep/recv_linux.c225
-rw-r--r--drivers/staging/sm750fb/sm750.h6
-rw-r--r--drivers/staging/sm750fb/sm750_accel.c8
-rw-r--r--drivers/staging/sm750fb/sm750_hw.c4
-rw-r--r--drivers/staging/vc04_services/vchiq-mmal/mmal-msg.h2
-rw-r--r--drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c2
-rw-r--r--drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.h2
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_tmr.c3
-rw-r--r--drivers/target/target_core_configfs.c2
-rw-r--r--drivers/target/target_core_pscsi.c2
-rw-r--r--drivers/tee/Kconfig9
-rw-r--r--drivers/tee/Makefile2
-rw-r--r--drivers/tee/optee/Kconfig5
-rw-r--r--drivers/tee/optee/Makefile1
-rw-r--r--drivers/tee/optee/core.c9
-rw-r--r--drivers/tee/optee/ffa_abi.c150
-rw-r--r--drivers/tee/optee/optee_ffa.h27
-rw-r--r--drivers/tee/optee/optee_msg.h84
-rw-r--r--drivers/tee/optee/optee_private.h15
-rw-r--r--drivers/tee/optee/optee_smc.h37
-rw-r--r--drivers/tee/optee/protmem.c335
-rw-r--r--drivers/tee/optee/smc_abi.c141
-rw-r--r--drivers/tee/qcomtee/Kconfig12
-rw-r--r--drivers/tee/qcomtee/Makefile9
-rw-r--r--drivers/tee/qcomtee/async.c182
-rw-r--r--drivers/tee/qcomtee/call.c820
-rw-r--r--drivers/tee/qcomtee/core.c915
-rw-r--r--drivers/tee/qcomtee/mem_obj.c169
-rw-r--r--drivers/tee/qcomtee/primordial_obj.c113
-rw-r--r--drivers/tee/qcomtee/qcomtee.h185
-rw-r--r--drivers/tee/qcomtee/qcomtee_msg.h304
-rw-r--r--drivers/tee/qcomtee/qcomtee_object.h316
-rw-r--r--drivers/tee/qcomtee/shm.c150
-rw-r--r--drivers/tee/qcomtee/user_obj.c692
-rw-r--r--drivers/tee/tee_core.c342
-rw-r--r--drivers/tee/tee_heap.c500
-rw-r--r--drivers/tee/tee_private.h20
-rw-r--r--drivers/tee/tee_shm.c179
-rw-r--r--drivers/thermal/gov_step_wise.c25
-rw-r--r--drivers/thermal/intel/int340x_thermal/Kconfig1
-rw-r--r--drivers/thermal/intel/int340x_thermal/Makefile1
-rw-r--r--drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c3
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device.c20
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device.h6
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c3
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_soc_slider.c284
-rw-r--r--drivers/thermal/k3_j72xx_bandgap.c4
-rw-r--r--drivers/thermal/mediatek/lvts_thermal.c2
-rw-r--r--drivers/thermal/qcom/Kconfig3
-rw-r--r--drivers/thermal/qcom/lmh.c4
-rw-r--r--drivers/thermal/renesas/Kconfig21
-rw-r--r--drivers/thermal/renesas/Makefile2
-rw-r--r--drivers/thermal/renesas/rcar_gen3_thermal.c63
-rw-r--r--drivers/thermal/renesas/rzg3e_thermal.c547
-rw-r--r--drivers/thermal/renesas/rzg3s_thermal.c272
-rw-r--r--drivers/thermal/rockchip_thermal.c50
-rw-r--r--drivers/thermal/tegra/Makefile1
-rw-r--r--drivers/thermal/tegra/soctherm-fuse.c18
-rw-r--r--drivers/thermal/tegra/soctherm.c13
-rw-r--r--drivers/thermal/tegra/soctherm.h11
-rw-r--r--drivers/thermal/tegra/tegra114-soctherm.c209
-rw-r--r--drivers/thermal/tegra/tegra124-soctherm.c4
-rw-r--r--drivers/thermal/tegra/tegra132-soctherm.c4
-rw-r--r--drivers/thermal/tegra/tegra210-soctherm.c4
-rw-r--r--drivers/thermal/testing/zone.c31
-rw-r--r--drivers/thermal/thermal-generic-adc.c55
-rw-r--r--drivers/thermal/thermal_hwmon.c2
-rw-r--r--drivers/thunderbolt/Kconfig4
-rw-r--r--drivers/thunderbolt/acpi.c28
-rw-r--r--drivers/thunderbolt/cap.c49
-rw-r--r--drivers/thunderbolt/clx.c12
-rw-r--r--drivers/thunderbolt/ctl.c33
-rw-r--r--drivers/thunderbolt/ctl.h1
-rw-r--r--drivers/thunderbolt/debugfs.c3
-rw-r--r--drivers/thunderbolt/dma_port.c21
-rw-r--r--drivers/thunderbolt/domain.c73
-rw-r--r--drivers/thunderbolt/eeprom.c4
-rw-r--r--drivers/thunderbolt/lc.c58
-rw-r--r--drivers/thunderbolt/nhi.c20
-rw-r--r--drivers/thunderbolt/nhi_regs.h6
-rw-r--r--drivers/thunderbolt/nvm.c42
-rw-r--r--drivers/thunderbolt/path.c14
-rw-r--r--drivers/thunderbolt/property.c38
-rw-r--r--drivers/thunderbolt/retimer.c7
-rw-r--r--drivers/thunderbolt/switch.c140
-rw-r--r--drivers/thunderbolt/tb.c32
-rw-r--r--drivers/thunderbolt/tb.h45
-rw-r--r--drivers/thunderbolt/tmu.c16
-rw-r--r--drivers/thunderbolt/tunnel.c90
-rw-r--r--drivers/thunderbolt/tunnel.h9
-rw-r--r--drivers/thunderbolt/usb4.c346
-rw-r--r--drivers/thunderbolt/usb4_port.c7
-rw-r--r--drivers/thunderbolt/xdomain.c53
-rw-r--r--drivers/tty/hvc/hvc_console.c8
-rw-r--r--drivers/tty/mxser.c259
-rw-r--r--drivers/tty/n_gsm.c25
-rw-r--r--drivers/tty/serdev/core.c11
-rw-r--r--drivers/tty/serial/8250/8250.h5
-rw-r--r--drivers/tty/serial/8250/8250_core.c93
-rw-r--r--drivers/tty/serial/8250/8250_omap.c181
-rw-r--r--drivers/tty/serial/8250/8250_platform.c87
-rw-r--r--drivers/tty/serial/8250/8250_port.c298
-rw-r--r--drivers/tty/serial/8250/8250_rsa.c15
-rw-r--r--drivers/tty/serial/Kconfig14
-rw-r--r--drivers/tty/serial/ip22zilog.c352
-rw-r--r--drivers/tty/serial/max3100.c2
-rw-r--r--drivers/tty/serial/max310x.c30
-rw-r--r--drivers/tty/serial/msm_serial.c2
-rw-r--r--drivers/tty/serial/mvebu-uart.c10
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c331
-rw-r--r--drivers/tty/serial/sc16is7xx.c18
-rw-r--r--drivers/tty/serial/serial_core.c143
-rw-r--r--drivers/tty/serial/xilinx_uartps.c10
-rw-r--r--drivers/tty/sysrq.c3
-rw-r--r--drivers/tty/tty_port.c168
-rw-r--r--drivers/tty/vt/consolemap.c116
-rw-r--r--drivers/tty/vt/selection.c20
-rw-r--r--drivers/tty/vt/vc_screen.c74
-rw-r--r--drivers/tty/vt/vt.c247
-rw-r--r--drivers/tty/vt/vt_ioctl.c194
-rw-r--r--drivers/ufs/core/ufs-mcq.c15
-rw-r--r--drivers/ufs/core/ufs-sysfs.c4
-rw-r--r--drivers/ufs/core/ufs-sysfs.h1
-rw-r--r--drivers/ufs/core/ufs_trace.h1
-rw-r--r--drivers/ufs/core/ufs_trace_types.h24
-rw-r--r--drivers/ufs/core/ufshcd.c158
-rw-r--r--drivers/ufs/host/ufs-exynos.c10
-rw-r--r--drivers/ufs/host/ufs-mediatek.c354
-rw-r--r--drivers/ufs/host/ufs-mediatek.h1
-rw-r--r--drivers/ufs/host/ufs-qcom.c265
-rw-r--r--drivers/ufs/host/ufs-qcom.h28
-rw-r--r--drivers/ufs/host/ufshcd-pci.c1
-rw-r--r--drivers/ufs/host/ufshcd-pltfrm.c33
-rw-r--r--drivers/ufs/host/ufshcd-pltfrm.h1
-rw-r--r--drivers/uio/Kconfig2
-rw-r--r--drivers/uio/uio_aec.c2
-rw-r--r--drivers/uio/uio_cif.c2
-rw-r--r--drivers/uio/uio_dmem_genirq.c23
-rw-r--r--drivers/uio/uio_hv_generic.c7
-rw-r--r--drivers/uio/uio_netx.c2
-rw-r--r--drivers/uio/uio_pdrv_genirq.c24
-rw-r--r--drivers/uio/uio_sercos3.c2
-rw-r--r--drivers/usb/cdns3/cdns3-trace.h61
-rw-r--r--drivers/usb/cdns3/cdnsp-gadget.c8
-rw-r--r--drivers/usb/cdns3/cdnsp-pci.c5
-rw-r--r--drivers/usb/cdns3/cdnsp-trace.h25
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c3
-rw-r--r--drivers/usb/chipidea/usbmisc_imx.c23
-rw-r--r--drivers/usb/class/usblp.c3
-rw-r--r--drivers/usb/core/Makefile1
-rw-r--r--drivers/usb/core/config.c4
-rw-r--r--drivers/usb/core/driver.c62
-rw-r--r--drivers/usb/core/generic.c2
-rw-r--r--drivers/usb/core/hcd.c28
-rw-r--r--drivers/usb/core/offload.c136
-rw-r--r--drivers/usb/core/quirks.c1
-rw-r--r--drivers/usb/core/urb.c14
-rw-r--r--drivers/usb/core/usb.c51
-rw-r--r--drivers/usb/dwc2/params.c26
-rw-r--r--drivers/usb/dwc3/Kconfig11
-rw-r--r--drivers/usb/dwc3/Makefile1
-rw-r--r--drivers/usb/dwc3/core.c2
-rw-r--r--drivers/usb/dwc3/core.h26
-rw-r--r--drivers/usb/dwc3/debug.h18
-rw-r--r--drivers/usb/dwc3/debugfs.c12
-rw-r--r--drivers/usb/dwc3/drd.c1
-rw-r--r--drivers/usb/dwc3/dwc3-generic-plat.c166
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c2
-rw-r--r--drivers/usb/dwc3/dwc3-qcom.c174
-rw-r--r--drivers/usb/dwc3/ep0.c20
-rw-r--r--drivers/usb/dwc3/gadget.c20
-rw-r--r--drivers/usb/dwc3/trace.h17
-rw-r--r--drivers/usb/gadget/configfs.c2
-rw-r--r--drivers/usb/gadget/function/f_acm.c42
-rw-r--r--drivers/usb/gadget/function/f_ecm.c48
-rw-r--r--drivers/usb/gadget/function/f_fs.c10
-rw-r--r--drivers/usb/gadget/function/f_hid.c4
-rw-r--r--drivers/usb/gadget/function/f_midi2.c11
-rw-r--r--drivers/usb/gadget/function/f_ncm.c81
-rw-r--r--drivers/usb/gadget/function/f_rndis.c85
-rw-r--r--drivers/usb/gadget/function/uvc.h5
-rw-r--r--drivers/usb/gadget/function/uvc_v4l2.c8
-rw-r--r--drivers/usb/gadget/legacy/inode.c2
-rw-r--r--drivers/usb/gadget/udc/cdns2/cdns2-trace.h69
-rw-r--r--drivers/usb/gadget/udc/core.c4
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c8
-rw-r--r--drivers/usb/gadget/udc/tegra-xudc.c12
-rw-r--r--drivers/usb/gadget/udc/trace.h5
-rw-r--r--drivers/usb/host/Kconfig2
-rw-r--r--drivers/usb/host/max3421-hcd.c2
-rw-r--r--drivers/usb/host/ohci-s3c2410.c8
-rw-r--r--drivers/usb/host/xhci-caps.h2
-rw-r--r--drivers/usb/host/xhci-dbgcap.c94
-rw-r--r--drivers/usb/host/xhci-hub.c3
-rw-r--r--drivers/usb/host/xhci-mem.c107
-rw-r--r--drivers/usb/host/xhci-pci-renesas.c7
-rw-r--r--drivers/usb/host/xhci-pci.c42
-rw-r--r--drivers/usb/host/xhci-plat.c57
-rw-r--r--drivers/usb/host/xhci-plat.h2
-rw-r--r--drivers/usb/host/xhci-rcar-regs.h49
-rw-r--r--drivers/usb/host/xhci-rcar.c100
-rw-r--r--drivers/usb/host/xhci-ring.c39
-rw-r--r--drivers/usb/host/xhci-rzg3e-regs.h12
-rw-r--r--drivers/usb/host/xhci-sideband.c36
-rw-r--r--drivers/usb/host/xhci-tegra.c84
-rw-r--r--drivers/usb/host/xhci-trace.h34
-rw-r--r--drivers/usb/host/xhci.c39
-rw-r--r--drivers/usb/host/xhci.h8
-rw-r--r--drivers/usb/misc/Kconfig19
-rw-r--r--drivers/usb/misc/Makefile1
-rw-r--r--drivers/usb/misc/qcom_eud.c36
-rw-r--r--drivers/usb/misc/usb251xb.c108
-rw-r--r--drivers/usb/misc/usbio.c749
-rw-r--r--drivers/usb/mon/mon_bin.c14
-rw-r--r--drivers/usb/musb/musb_dsps.c2
-rw-r--r--drivers/usb/phy/phy-twl6030-usb.c3
-rw-r--r--drivers/usb/renesas_usbhs/common.c4
-rw-r--r--drivers/usb/serial/cp210x.c2
-rw-r--r--drivers/usb/serial/ftdi_sio.c4
-rw-r--r--drivers/usb/serial/option.c23
-rw-r--r--drivers/usb/serial/oti6858.c2
-rw-r--r--drivers/usb/storage/realtek_cr.c6
-rw-r--r--drivers/usb/storage/unusual_devs.h29
-rw-r--r--drivers/usb/typec/mux/tusb1046.c2
-rw-r--r--drivers/usb/typec/tcpm/fusb302.c12
-rw-r--r--drivers/usb/typec/tcpm/maxim_contaminant.c58
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c2
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.c2
-rw-r--r--drivers/usb/typec/tcpm/tcpci.c33
-rw-r--r--drivers/usb/typec/tcpm/tcpci_maxim.h1
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c12
-rw-r--r--drivers/usb/typec/tipd/core.c557
-rw-r--r--drivers/usb/typec/tipd/tps6598x.h5
-rw-r--r--drivers/usb/typec/tipd/trace.h39
-rw-r--r--drivers/usb/typec/ucsi/debugfs.c31
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c17
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h13
-rw-r--r--drivers/usb/typec/ucsi/ucsi_stm32g0.c7
-rw-r--r--drivers/usb/usbip/vhci_hcd.c22
-rw-r--r--drivers/vdpa/Kconfig8
-rw-r--r--drivers/vdpa/alibaba/eni_vdpa.c5
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_main.c5
-rw-r--r--drivers/vdpa/mlx5/core/mr.c4
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c15
-rw-r--r--drivers/vdpa/octeon_ep/octep_vdpa_main.c6
-rw-r--r--drivers/vdpa/pds/vdpa_dev.c5
-rw-r--r--drivers/vdpa/solidrun/snet_main.c8
-rw-r--r--drivers/vdpa/vdpa.c5
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.c4
-rw-r--r--drivers/vdpa/vdpa_user/iova_domain.c134
-rw-r--r--drivers/vdpa/vdpa_user/iova_domain.h7
-rw-r--r--drivers/vdpa/vdpa_user/vduse_dev.c79
-rw-r--r--drivers/vdpa/virtio_pci/vp_vdpa.c5
-rw-r--r--drivers/vfio/cdx/Makefile6
-rw-r--r--drivers/vfio/cdx/private.h14
-rw-r--r--drivers/vfio/debugfs.c19
-rw-r--r--drivers/vfio/fsl-mc/Kconfig5
-rw-r--r--drivers/vfio/fsl-mc/vfio_fsl_mc.c2
-rw-r--r--drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c6
-rw-r--r--drivers/vfio/pci/nvgrace-gpu/main.c4
-rw-r--r--drivers/vfio/pci/pds/dirty.c2
-rw-r--r--drivers/vfio/pci/pds/lm.c3
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c9
-rw-r--r--drivers/vfio/pci/virtio/migrate.c3
-rw-r--r--drivers/vfio/platform/Kconfig5
-rw-r--r--drivers/vfio/platform/reset/Kconfig6
-rw-r--r--drivers/vfio/platform/reset/vfio_platform_amdxgbe.c2
-rw-r--r--drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c2
-rw-r--r--drivers/vfio/platform/reset/vfio_platform_calxedaxgmac.c2
-rw-r--r--drivers/vfio/platform/vfio_amba.c2
-rw-r--r--drivers/vfio/vfio_iommu_type1.c112
-rw-r--r--drivers/vfio/vfio_main.c22
-rw-r--r--drivers/vhost/net.c49
-rw-r--r--drivers/vhost/scsi.c2
-rw-r--r--drivers/vhost/vdpa.c6
-rw-r--r--drivers/vhost/vhost.c1
-rw-r--r--drivers/vhost/vringh.c14
-rw-r--r--drivers/video/backlight/apple_dwi_bl.c1
-rw-r--r--drivers/video/backlight/as3711_bl.c1
-rw-r--r--drivers/video/backlight/backlight.c1
-rw-r--r--drivers/video/backlight/da9052_bl.c1
-rw-r--r--drivers/video/backlight/jornada720_bl.c1
-rw-r--r--drivers/video/backlight/ktd2801-backlight.c1
-rw-r--r--drivers/video/backlight/led_bl.c5
-rw-r--r--drivers/video/backlight/lp855x_bl.c2
-rw-r--r--drivers/video/backlight/mp3309c.c14
-rw-r--r--drivers/video/backlight/rave-sp-backlight.c2
-rw-r--r--drivers/video/backlight/rt4831-backlight.c1
-rw-r--r--drivers/video/console/vgacon.c2
-rw-r--r--drivers/video/fbdev/Kconfig15
-rw-r--r--drivers/video/fbdev/core/bitblit.c17
-rw-r--r--drivers/video/fbdev/core/fb_cmdline.c2
-rw-r--r--drivers/video/fbdev/core/fb_fillrect.h3
-rw-r--r--drivers/video/fbdev/core/fbcon.c31
-rw-r--r--drivers/video/fbdev/core/fbmon.c7
-rw-r--r--drivers/video/fbdev/hyperv_fb.c2
-rw-r--r--drivers/video/fbdev/mb862xx/mb862xxfbdrv.c2
-rw-r--r--drivers/video/fbdev/nvidia/nvidia.c3
-rw-r--r--drivers/video/fbdev/pxafb.c3
-rw-r--r--drivers/video/fbdev/s3fb.c177
-rw-r--r--drivers/video/fbdev/simplefb.c31
-rw-r--r--drivers/video/fbdev/via/via-gpio.c2
-rw-r--r--drivers/video/fbdev/xen-fbfront.c2
-rw-r--r--drivers/video/screen_info_generic.c55
-rw-r--r--drivers/virt/coco/efi_secret/Kconfig2
-rw-r--r--drivers/virt/coco/sev-guest/sev-guest.c27
-rw-r--r--drivers/virtio/virtio_balloon.c12
-rw-r--r--drivers/virtio/virtio_input.c4
-rw-r--r--drivers/virtio/virtio_pci_legacy_dev.c4
-rw-r--r--drivers/virtio/virtio_pci_modern_dev.c4
-rw-r--r--drivers/virtio/virtio_ring.c463
-rw-r--r--drivers/virtio/virtio_vdpa.c22
-rw-r--r--drivers/w1/masters/matrox_w1.c10
-rw-r--r--drivers/watchdog/Kconfig11
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/intel_oc_wdt.c8
-rw-r--r--drivers/watchdog/mpc8xxx_wdt.c2
-rw-r--r--drivers/watchdog/nct6694_wdt.c307
-rw-r--r--drivers/watchdog/rzg2l_wdt.c4
-rw-r--r--drivers/watchdog/rzv2h_wdt.c150
-rw-r--r--drivers/watchdog/s3c2410_wdt.c46
-rw-r--r--drivers/watchdog/visconti_wdt.c5
-rw-r--r--drivers/xen/Kconfig1
-rw-r--r--drivers/xen/balloon.c4
-rw-r--r--drivers/xen/events/events_base.c37
-rw-r--r--drivers/xen/gntdev-dmabuf.c7
-rw-r--r--drivers/xen/gntdev-dmabuf.h2
-rw-r--r--drivers/xen/gntdev.c38
-rw-r--r--drivers/xen/grant-table.c6
-rw-r--r--drivers/xen/manage.c14
-rw-r--r--drivers/xen/privcmd.c14
-rw-r--r--drivers/xen/swiotlb-xen.c21
-rw-r--r--drivers/xen/unpopulated-alloc.c4
-rw-r--r--drivers/xen/xenbus/xenbus_client.c2
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c23
-rw-r--r--drivers/zorro/names.c12
5715 files changed, 291294 insertions, 90338 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index b5749cf67044..8e1ffa4358d5 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -161,7 +161,7 @@ obj-$(CONFIG_SOUNDWIRE) += soundwire/
# Virtualization drivers
obj-$(CONFIG_VIRT_DRIVERS) += virt/
-obj-$(subst m,y,$(CONFIG_HYPERV)) += hv/
+obj-$(CONFIG_HYPERV) += hv/
obj-$(CONFIG_PM_DEVFREQ) += devfreq/
obj-$(CONFIG_EXTCON) += extcon/
@@ -195,4 +195,5 @@ obj-$(CONFIG_DRM_ACCEL) += accel/
obj-$(CONFIG_CDX_BUS) += cdx/
obj-$(CONFIG_DPLL) += dpll/
+obj-$(CONFIG_DIBS) += dibs/
obj-$(CONFIG_S390) += s390/
diff --git a/drivers/accel/Kconfig b/drivers/accel/Kconfig
index 5b9490367a39..bb01cebc42bf 100644
--- a/drivers/accel/Kconfig
+++ b/drivers/accel/Kconfig
@@ -28,5 +28,6 @@ source "drivers/accel/amdxdna/Kconfig"
source "drivers/accel/habanalabs/Kconfig"
source "drivers/accel/ivpu/Kconfig"
source "drivers/accel/qaic/Kconfig"
+source "drivers/accel/rocket/Kconfig"
endif
diff --git a/drivers/accel/Makefile b/drivers/accel/Makefile
index a301fb6089d4..ffc3fa588666 100644
--- a/drivers/accel/Makefile
+++ b/drivers/accel/Makefile
@@ -4,3 +4,4 @@ obj-$(CONFIG_DRM_ACCEL_AMDXDNA) += amdxdna/
obj-$(CONFIG_DRM_ACCEL_HABANALABS) += habanalabs/
obj-$(CONFIG_DRM_ACCEL_IVPU) += ivpu/
obj-$(CONFIG_DRM_ACCEL_QAIC) += qaic/
+obj-$(CONFIG_DRM_ACCEL_ROCKET) += rocket/ \ No newline at end of file
diff --git a/drivers/accel/amdxdna/Makefile b/drivers/accel/amdxdna/Makefile
index 0e9adf6890a0..6797dac65efa 100644
--- a/drivers/accel/amdxdna/Makefile
+++ b/drivers/accel/amdxdna/Makefile
@@ -15,6 +15,7 @@ amdxdna-y := \
amdxdna_mailbox_helper.o \
amdxdna_pci_drv.o \
amdxdna_sysfs.o \
+ amdxdna_ubuf.o \
npu1_regs.o \
npu2_regs.o \
npu4_regs.o \
diff --git a/drivers/accel/amdxdna/aie2_ctx.c b/drivers/accel/amdxdna/aie2_ctx.c
index 2cff5419bd2f..e9f9b1fa5dc1 100644
--- a/drivers/accel/amdxdna/aie2_ctx.c
+++ b/drivers/accel/amdxdna/aie2_ctx.c
@@ -46,6 +46,17 @@ static void aie2_job_put(struct amdxdna_sched_job *job)
kref_put(&job->refcnt, aie2_job_release);
}
+static void aie2_hwctx_status_shift_stop(struct amdxdna_hwctx *hwctx)
+{
+ hwctx->old_status = hwctx->status;
+ hwctx->status = HWCTX_STAT_STOP;
+}
+
+static void aie2_hwctx_status_restore(struct amdxdna_hwctx *hwctx)
+{
+ hwctx->status = hwctx->old_status;
+}
+
/* The bad_job is used in aie2_sched_job_timedout, otherwise, set it to NULL */
static void aie2_hwctx_stop(struct amdxdna_dev *xdna, struct amdxdna_hwctx *hwctx,
struct drm_sched_job *bad_job)
@@ -89,25 +100,6 @@ out:
return ret;
}
-void aie2_restart_ctx(struct amdxdna_client *client)
-{
- struct amdxdna_dev *xdna = client->xdna;
- struct amdxdna_hwctx *hwctx;
- unsigned long hwctx_id;
-
- drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
- mutex_lock(&client->hwctx_lock);
- amdxdna_for_each_hwctx(client, hwctx_id, hwctx) {
- if (hwctx->status != HWCTX_STAT_STOP)
- continue;
-
- hwctx->status = hwctx->old_status;
- XDNA_DBG(xdna, "Resetting %s", hwctx->name);
- aie2_hwctx_restart(xdna, hwctx);
- }
- mutex_unlock(&client->hwctx_lock);
-}
-
static struct dma_fence *aie2_cmd_get_out_fence(struct amdxdna_hwctx *hwctx, u64 seq)
{
struct dma_fence *fence, *out_fence = NULL;
@@ -141,34 +133,49 @@ static void aie2_hwctx_wait_for_idle(struct amdxdna_hwctx *hwctx)
dma_fence_put(fence);
}
-void aie2_hwctx_suspend(struct amdxdna_hwctx *hwctx)
+static int aie2_hwctx_suspend_cb(struct amdxdna_hwctx *hwctx, void *arg)
{
struct amdxdna_dev *xdna = hwctx->client->xdna;
+ aie2_hwctx_wait_for_idle(hwctx);
+ aie2_hwctx_stop(xdna, hwctx, NULL);
+ aie2_hwctx_status_shift_stop(hwctx);
+
+ return 0;
+}
+
+void aie2_hwctx_suspend(struct amdxdna_client *client)
+{
+ struct amdxdna_dev *xdna = client->xdna;
+
/*
* Command timeout is unlikely. But if it happens, it doesn't
* break the system. aie2_hwctx_stop() will destroy mailbox
* and abort all commands.
*/
drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
- aie2_hwctx_wait_for_idle(hwctx);
- aie2_hwctx_stop(xdna, hwctx, NULL);
- hwctx->old_status = hwctx->status;
- hwctx->status = HWCTX_STAT_STOP;
+ amdxdna_hwctx_walk(client, NULL, aie2_hwctx_suspend_cb);
}
-void aie2_hwctx_resume(struct amdxdna_hwctx *hwctx)
+static int aie2_hwctx_resume_cb(struct amdxdna_hwctx *hwctx, void *arg)
{
struct amdxdna_dev *xdna = hwctx->client->xdna;
+ aie2_hwctx_status_restore(hwctx);
+ return aie2_hwctx_restart(xdna, hwctx);
+}
+
+int aie2_hwctx_resume(struct amdxdna_client *client)
+{
+ struct amdxdna_dev *xdna = client->xdna;
+
/*
* The resume path cannot guarantee that mailbox channel can be
* regenerated. If this happen, when submit message to this
* mailbox channel, error will return.
*/
drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
- hwctx->status = hwctx->old_status;
- aie2_hwctx_restart(xdna, hwctx);
+ return amdxdna_hwctx_walk(client, NULL, aie2_hwctx_resume_cb);
}
static void
@@ -192,7 +199,7 @@ aie2_sched_resp_handler(void *handle, void __iomem *data, size_t size)
{
struct amdxdna_sched_job *job = handle;
struct amdxdna_gem_obj *cmd_abo;
- u32 ret = 0;
+ int ret = 0;
u32 status;
cmd_abo = job->cmd_bo;
@@ -222,7 +229,7 @@ static int
aie2_sched_nocmd_resp_handler(void *handle, void __iomem *data, size_t size)
{
struct amdxdna_sched_job *job = handle;
- u32 ret = 0;
+ int ret = 0;
u32 status;
if (unlikely(!data))
@@ -250,7 +257,7 @@ aie2_sched_cmdlist_resp_handler(void *handle, void __iomem *data, size_t size)
u32 fail_cmd_status;
u32 fail_cmd_idx;
u32 cmd_status;
- u32 ret = 0;
+ int ret = 0;
cmd_abo = job->cmd_bo;
if (unlikely(!data) || unlikely(size != sizeof(u32) * 3)) {
diff --git a/drivers/accel/amdxdna/aie2_message.c b/drivers/accel/amdxdna/aie2_message.c
index 82412eec9a4b..9caad083543d 100644
--- a/drivers/accel/amdxdna/aie2_message.c
+++ b/drivers/accel/amdxdna/aie2_message.c
@@ -290,18 +290,25 @@ int aie2_map_host_buf(struct amdxdna_dev_hdl *ndev, u32 context_id, u64 addr, u6
return 0;
}
+static int amdxdna_hwctx_col_map(struct amdxdna_hwctx *hwctx, void *arg)
+{
+ u32 *bitmap = arg;
+
+ *bitmap |= GENMASK(hwctx->start_col + hwctx->num_col - 1, hwctx->start_col);
+
+ return 0;
+}
+
int aie2_query_status(struct amdxdna_dev_hdl *ndev, char __user *buf,
u32 size, u32 *cols_filled)
{
DECLARE_AIE2_MSG(aie_column_info, MSG_OP_QUERY_COL_STATUS);
struct amdxdna_dev *xdna = ndev->xdna;
struct amdxdna_client *client;
- struct amdxdna_hwctx *hwctx;
- unsigned long hwctx_id;
dma_addr_t dma_addr;
u32 aie_bitmap = 0;
u8 *buff_addr;
- int ret, idx;
+ int ret;
buff_addr = dma_alloc_noncoherent(xdna->ddev.dev, size, &dma_addr,
DMA_FROM_DEVICE, GFP_KERNEL);
@@ -309,12 +316,8 @@ int aie2_query_status(struct amdxdna_dev_hdl *ndev, char __user *buf,
return -ENOMEM;
/* Go through each hardware context and mark the AIE columns that are active */
- list_for_each_entry(client, &xdna->client_list, node) {
- idx = srcu_read_lock(&client->hwctx_srcu);
- amdxdna_for_each_hwctx(client, hwctx_id, hwctx)
- aie_bitmap |= amdxdna_hwctx_col_map(hwctx);
- srcu_read_unlock(&client->hwctx_srcu, idx);
- }
+ list_for_each_entry(client, &xdna->client_list, node)
+ amdxdna_hwctx_walk(client, &aie_bitmap, amdxdna_hwctx_col_map);
*cols_filled = 0;
req.dump_buff_addr = dma_addr;
diff --git a/drivers/accel/amdxdna/aie2_pci.c b/drivers/accel/amdxdna/aie2_pci.c
index c6cf7068d23c..87c425e3d2b9 100644
--- a/drivers/accel/amdxdna/aie2_pci.c
+++ b/drivers/accel/amdxdna/aie2_pci.c
@@ -10,6 +10,7 @@
#include <drm/drm_managed.h>
#include <drm/drm_print.h>
#include <drm/gpu_scheduler.h>
+#include <linux/cleanup.h>
#include <linux/errno.h>
#include <linux/firmware.h>
#include <linux/iommu.h>
@@ -440,6 +441,40 @@ disable_dev:
return ret;
}
+static int aie2_hw_suspend(struct amdxdna_dev *xdna)
+{
+ struct amdxdna_client *client;
+
+ guard(mutex)(&xdna->dev_lock);
+ list_for_each_entry(client, &xdna->client_list, node)
+ aie2_hwctx_suspend(client);
+
+ aie2_hw_stop(xdna);
+
+ return 0;
+}
+
+static int aie2_hw_resume(struct amdxdna_dev *xdna)
+{
+ struct amdxdna_client *client;
+ int ret;
+
+ guard(mutex)(&xdna->dev_lock);
+ ret = aie2_hw_start(xdna);
+ if (ret) {
+ XDNA_ERR(xdna, "Start hardware failed, %d", ret);
+ return ret;
+ }
+
+ list_for_each_entry(client, &xdna->client_list, node) {
+ ret = aie2_hwctx_resume(client);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
static int aie2_init(struct amdxdna_dev *xdna)
{
struct pci_dev *pdev = to_pci_dev(xdna->ddev.dev);
@@ -520,14 +555,14 @@ static int aie2_init(struct amdxdna_dev *xdna)
if (!ndev->psp_hdl) {
XDNA_ERR(xdna, "failed to create psp");
ret = -ENOMEM;
- goto free_irq;
+ goto release_fw;
}
xdna->dev_handle = ndev;
ret = aie2_hw_start(xdna);
if (ret) {
XDNA_ERR(xdna, "start npu failed, ret %d", ret);
- goto free_irq;
+ goto release_fw;
}
ret = aie2_mgmt_fw_query(ndev);
@@ -578,8 +613,6 @@ async_event_free:
aie2_error_async_events_free(ndev);
stop_hw:
aie2_hw_stop(xdna);
-free_irq:
- pci_free_irq_vectors(pdev);
release_fw:
release_firmware(fw);
@@ -588,12 +621,10 @@ release_fw:
static void aie2_fini(struct amdxdna_dev *xdna)
{
- struct pci_dev *pdev = to_pci_dev(xdna->ddev.dev);
struct amdxdna_dev_hdl *ndev = xdna->dev_handle;
aie2_hw_stop(xdna);
aie2_error_async_events_free(ndev);
- pci_free_irq_vectors(pdev);
}
static int aie2_get_aie_status(struct amdxdna_client *client,
@@ -752,65 +783,68 @@ static int aie2_get_clock_metadata(struct amdxdna_client *client,
return ret;
}
+static int aie2_hwctx_status_cb(struct amdxdna_hwctx *hwctx, void *arg)
+{
+ struct amdxdna_drm_hwctx_entry *tmp __free(kfree) = NULL;
+ struct amdxdna_drm_get_array *array_args = arg;
+ struct amdxdna_drm_hwctx_entry __user *buf;
+ u32 size;
+
+ if (!array_args->num_element)
+ return -EINVAL;
+
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ tmp->pid = hwctx->client->pid;
+ tmp->context_id = hwctx->id;
+ tmp->start_col = hwctx->start_col;
+ tmp->num_col = hwctx->num_col;
+ tmp->command_submissions = hwctx->priv->seq;
+ tmp->command_completions = hwctx->priv->completed;
+ tmp->pasid = hwctx->client->pasid;
+ tmp->priority = hwctx->qos.priority;
+ tmp->gops = hwctx->qos.gops;
+ tmp->fps = hwctx->qos.fps;
+ tmp->dma_bandwidth = hwctx->qos.dma_bandwidth;
+ tmp->latency = hwctx->qos.latency;
+ tmp->frame_exec_time = hwctx->qos.frame_exec_time;
+ tmp->state = AMDXDNA_HWCTX_STATE_ACTIVE;
+
+ buf = u64_to_user_ptr(array_args->buffer);
+ size = min(sizeof(*tmp), array_args->element_size);
+
+ if (copy_to_user(buf, tmp, size))
+ return -EFAULT;
+
+ array_args->buffer += size;
+ array_args->num_element--;
+
+ return 0;
+}
+
static int aie2_get_hwctx_status(struct amdxdna_client *client,
struct amdxdna_drm_get_info *args)
{
- struct amdxdna_drm_query_hwctx __user *buf;
+ struct amdxdna_drm_get_array array_args;
struct amdxdna_dev *xdna = client->xdna;
- struct amdxdna_drm_query_hwctx *tmp;
struct amdxdna_client *tmp_client;
- struct amdxdna_hwctx *hwctx;
- unsigned long hwctx_id;
- bool overflow = false;
- u32 req_bytes = 0;
- u32 hw_i = 0;
- int ret = 0;
- int idx;
+ int ret;
drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
- tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
- if (!tmp)
- return -ENOMEM;
-
- buf = u64_to_user_ptr(args->buffer);
+ array_args.element_size = sizeof(struct amdxdna_drm_query_hwctx);
+ array_args.buffer = args->buffer;
+ array_args.num_element = args->buffer_size / array_args.element_size;
list_for_each_entry(tmp_client, &xdna->client_list, node) {
- idx = srcu_read_lock(&tmp_client->hwctx_srcu);
- amdxdna_for_each_hwctx(tmp_client, hwctx_id, hwctx) {
- req_bytes += sizeof(*tmp);
- if (args->buffer_size < req_bytes) {
- /* Continue iterating to get the required size */
- overflow = true;
- continue;
- }
-
- memset(tmp, 0, sizeof(*tmp));
- tmp->pid = tmp_client->pid;
- tmp->context_id = hwctx->id;
- tmp->start_col = hwctx->start_col;
- tmp->num_col = hwctx->num_col;
- tmp->command_submissions = hwctx->priv->seq;
- tmp->command_completions = hwctx->priv->completed;
-
- if (copy_to_user(&buf[hw_i], tmp, sizeof(*tmp))) {
- ret = -EFAULT;
- srcu_read_unlock(&tmp_client->hwctx_srcu, idx);
- goto out;
- }
- hw_i++;
- }
- srcu_read_unlock(&tmp_client->hwctx_srcu, idx);
- }
-
- if (overflow) {
- XDNA_ERR(xdna, "Invalid buffer size. Given: %u Need: %u.",
- args->buffer_size, req_bytes);
- ret = -EINVAL;
+ ret = amdxdna_hwctx_walk(tmp_client, &array_args,
+ aie2_hwctx_status_cb);
+ if (ret)
+ break;
}
-out:
- kfree(tmp);
- args->buffer_size = req_bytes;
+ args->buffer_size -= (u32)(array_args.buffer - args->buffer);
return ret;
}
@@ -854,6 +888,58 @@ static int aie2_get_info(struct amdxdna_client *client, struct amdxdna_drm_get_i
return ret;
}
+static int aie2_query_ctx_status_array(struct amdxdna_client *client,
+ struct amdxdna_drm_get_array *args)
+{
+ struct amdxdna_drm_get_array array_args;
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_client *tmp_client;
+ int ret;
+
+ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
+
+ array_args.element_size = min(args->element_size,
+ sizeof(struct amdxdna_drm_hwctx_entry));
+ array_args.buffer = args->buffer;
+ array_args.num_element = args->num_element * args->element_size /
+ array_args.element_size;
+ list_for_each_entry(tmp_client, &xdna->client_list, node) {
+ ret = amdxdna_hwctx_walk(tmp_client, &array_args,
+ aie2_hwctx_status_cb);
+ if (ret)
+ break;
+ }
+
+ args->element_size = array_args.element_size;
+ args->num_element = (u32)((array_args.buffer - args->buffer) /
+ args->element_size);
+
+ return ret;
+}
+
+static int aie2_get_array(struct amdxdna_client *client,
+ struct amdxdna_drm_get_array *args)
+{
+ struct amdxdna_dev *xdna = client->xdna;
+ int ret, idx;
+
+ if (!drm_dev_enter(&xdna->ddev, &idx))
+ return -ENODEV;
+
+ switch (args->param) {
+ case DRM_AMDXDNA_HW_CONTEXT_ALL:
+ ret = aie2_query_ctx_status_array(client, args);
+ break;
+ default:
+ XDNA_ERR(xdna, "Not supported request parameter %u", args->param);
+ ret = -EOPNOTSUPP;
+ }
+ XDNA_DBG(xdna, "Got param %d", args->param);
+
+ drm_dev_exit(idx);
+ return ret;
+}
+
static int aie2_set_power_mode(struct amdxdna_client *client,
struct amdxdna_drm_set_state *args)
{
@@ -903,17 +989,16 @@ static int aie2_set_state(struct amdxdna_client *client,
}
const struct amdxdna_dev_ops aie2_ops = {
- .init = aie2_init,
- .fini = aie2_fini,
- .resume = aie2_hw_start,
- .suspend = aie2_hw_stop,
- .get_aie_info = aie2_get_info,
- .set_aie_state = aie2_set_state,
- .hwctx_init = aie2_hwctx_init,
- .hwctx_fini = aie2_hwctx_fini,
- .hwctx_config = aie2_hwctx_config,
- .cmd_submit = aie2_cmd_submit,
+ .init = aie2_init,
+ .fini = aie2_fini,
+ .resume = aie2_hw_resume,
+ .suspend = aie2_hw_suspend,
+ .get_aie_info = aie2_get_info,
+ .set_aie_state = aie2_set_state,
+ .hwctx_init = aie2_hwctx_init,
+ .hwctx_fini = aie2_hwctx_fini,
+ .hwctx_config = aie2_hwctx_config,
+ .cmd_submit = aie2_cmd_submit,
.hmm_invalidate = aie2_hmm_invalidate,
- .hwctx_suspend = aie2_hwctx_suspend,
- .hwctx_resume = aie2_hwctx_resume,
+ .get_array = aie2_get_array,
};
diff --git a/drivers/accel/amdxdna/aie2_pci.h b/drivers/accel/amdxdna/aie2_pci.h
index 385914840eaa..91a8e948f82a 100644
--- a/drivers/accel/amdxdna/aie2_pci.h
+++ b/drivers/accel/amdxdna/aie2_pci.h
@@ -288,10 +288,9 @@ int aie2_sync_bo(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
int aie2_hwctx_init(struct amdxdna_hwctx *hwctx);
void aie2_hwctx_fini(struct amdxdna_hwctx *hwctx);
int aie2_hwctx_config(struct amdxdna_hwctx *hwctx, u32 type, u64 value, void *buf, u32 size);
-void aie2_hwctx_suspend(struct amdxdna_hwctx *hwctx);
-void aie2_hwctx_resume(struct amdxdna_hwctx *hwctx);
+void aie2_hwctx_suspend(struct amdxdna_client *client);
+int aie2_hwctx_resume(struct amdxdna_client *client);
int aie2_cmd_submit(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, u64 *seq);
void aie2_hmm_invalidate(struct amdxdna_gem_obj *abo, unsigned long cur_seq);
-void aie2_restart_ctx(struct amdxdna_client *client);
#endif /* _AIE2_PCI_H_ */
diff --git a/drivers/accel/amdxdna/amdxdna_ctx.c b/drivers/accel/amdxdna/amdxdna_ctx.c
index be073224bd69..4bfe4ef20550 100644
--- a/drivers/accel/amdxdna/amdxdna_ctx.c
+++ b/drivers/accel/amdxdna/amdxdna_ctx.c
@@ -60,32 +60,6 @@ static struct dma_fence *amdxdna_fence_create(struct amdxdna_hwctx *hwctx)
return &fence->base;
}
-void amdxdna_hwctx_suspend(struct amdxdna_client *client)
-{
- struct amdxdna_dev *xdna = client->xdna;
- struct amdxdna_hwctx *hwctx;
- unsigned long hwctx_id;
-
- drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
- mutex_lock(&client->hwctx_lock);
- amdxdna_for_each_hwctx(client, hwctx_id, hwctx)
- xdna->dev_info->ops->hwctx_suspend(hwctx);
- mutex_unlock(&client->hwctx_lock);
-}
-
-void amdxdna_hwctx_resume(struct amdxdna_client *client)
-{
- struct amdxdna_dev *xdna = client->xdna;
- struct amdxdna_hwctx *hwctx;
- unsigned long hwctx_id;
-
- drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
- mutex_lock(&client->hwctx_lock);
- amdxdna_for_each_hwctx(client, hwctx_id, hwctx)
- xdna->dev_info->ops->hwctx_resume(hwctx);
- mutex_unlock(&client->hwctx_lock);
-}
-
static void amdxdna_hwctx_destroy_rcu(struct amdxdna_hwctx *hwctx,
struct srcu_struct *ss)
{
@@ -94,14 +68,30 @@ static void amdxdna_hwctx_destroy_rcu(struct amdxdna_hwctx *hwctx,
synchronize_srcu(ss);
/* At this point, user is not able to submit new commands */
- mutex_lock(&xdna->dev_lock);
xdna->dev_info->ops->hwctx_fini(hwctx);
- mutex_unlock(&xdna->dev_lock);
kfree(hwctx->name);
kfree(hwctx);
}
+int amdxdna_hwctx_walk(struct amdxdna_client *client, void *arg,
+ int (*walk)(struct amdxdna_hwctx *hwctx, void *arg))
+{
+ struct amdxdna_hwctx *hwctx;
+ unsigned long hwctx_id;
+ int ret = 0, idx;
+
+ idx = srcu_read_lock(&client->hwctx_srcu);
+ amdxdna_for_each_hwctx(client, hwctx_id, hwctx) {
+ ret = walk(hwctx, arg);
+ if (ret)
+ break;
+ }
+ srcu_read_unlock(&client->hwctx_srcu, idx);
+
+ return ret;
+}
+
void *amdxdna_cmd_get_payload(struct amdxdna_gem_obj *abo, u32 *size)
{
struct amdxdna_cmd *cmd = abo->mem.kva;
@@ -152,16 +142,12 @@ void amdxdna_hwctx_remove_all(struct amdxdna_client *client)
struct amdxdna_hwctx *hwctx;
unsigned long hwctx_id;
- mutex_lock(&client->hwctx_lock);
amdxdna_for_each_hwctx(client, hwctx_id, hwctx) {
XDNA_DBG(client->xdna, "PID %d close HW context %d",
client->pid, hwctx->id);
xa_erase(&client->hwctx_xa, hwctx->id);
- mutex_unlock(&client->hwctx_lock);
amdxdna_hwctx_destroy_rcu(hwctx, &client->hwctx_srcu);
- mutex_lock(&client->hwctx_lock);
}
- mutex_unlock(&client->hwctx_lock);
}
int amdxdna_drm_create_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
@@ -251,6 +237,7 @@ int amdxdna_drm_destroy_hwctx_ioctl(struct drm_device *dev, void *data, struct d
if (!drm_dev_enter(dev, &idx))
return -ENODEV;
+ mutex_lock(&xdna->dev_lock);
hwctx = xa_erase(&client->hwctx_xa, args->handle);
if (!hwctx) {
ret = -EINVAL;
@@ -267,6 +254,7 @@ int amdxdna_drm_destroy_hwctx_ioctl(struct drm_device *dev, void *data, struct d
XDNA_DBG(xdna, "PID %d destroyed HW context %d", client->pid, args->handle);
out:
+ mutex_unlock(&xdna->dev_lock);
drm_dev_exit(idx);
return ret;
}
diff --git a/drivers/accel/amdxdna/amdxdna_ctx.h b/drivers/accel/amdxdna/amdxdna_ctx.h
index f0a4a8586d85..7cd7a55936f0 100644
--- a/drivers/accel/amdxdna/amdxdna_ctx.h
+++ b/drivers/accel/amdxdna/amdxdna_ctx.h
@@ -139,16 +139,10 @@ amdxdna_cmd_get_state(struct amdxdna_gem_obj *abo)
void *amdxdna_cmd_get_payload(struct amdxdna_gem_obj *abo, u32 *size);
int amdxdna_cmd_get_cu_idx(struct amdxdna_gem_obj *abo);
-static inline u32 amdxdna_hwctx_col_map(struct amdxdna_hwctx *hwctx)
-{
- return GENMASK(hwctx->start_col + hwctx->num_col - 1,
- hwctx->start_col);
-}
-
void amdxdna_sched_job_cleanup(struct amdxdna_sched_job *job);
void amdxdna_hwctx_remove_all(struct amdxdna_client *client);
-void amdxdna_hwctx_suspend(struct amdxdna_client *client);
-void amdxdna_hwctx_resume(struct amdxdna_client *client);
+int amdxdna_hwctx_walk(struct amdxdna_client *client, void *arg,
+ int (*walk)(struct amdxdna_hwctx *hwctx, void *arg));
int amdxdna_cmd_submit(struct amdxdna_client *client,
u32 cmd_bo_hdls, u32 *arg_bo_hdls, u32 arg_bo_cnt,
diff --git a/drivers/accel/amdxdna/amdxdna_gem.c b/drivers/accel/amdxdna/amdxdna_gem.c
index 0f85a0105178..d407a36eb412 100644
--- a/drivers/accel/amdxdna/amdxdna_gem.c
+++ b/drivers/accel/amdxdna/amdxdna_gem.c
@@ -18,6 +18,7 @@
#include "amdxdna_ctx.h"
#include "amdxdna_gem.h"
#include "amdxdna_pci_drv.h"
+#include "amdxdna_ubuf.h"
#define XDNA_MAX_CMD_BO_SIZE SZ_32K
@@ -296,7 +297,7 @@ static int amdxdna_insert_pages(struct amdxdna_gem_obj *abo,
vma->vm_private_data = NULL;
vma->vm_ops = NULL;
- ret = dma_buf_mmap(to_gobj(abo)->dma_buf, vma, 0);
+ ret = dma_buf_mmap(abo->dma_buf, vma, 0);
if (ret) {
XDNA_ERR(xdna, "Failed to mmap dma buf %d", ret);
return ret;
@@ -391,10 +392,47 @@ static const struct dma_buf_ops amdxdna_dmabuf_ops = {
.vunmap = drm_gem_dmabuf_vunmap,
};
+static int amdxdna_gem_obj_vmap(struct drm_gem_object *obj, struct iosys_map *map)
+{
+ struct amdxdna_gem_obj *abo = to_xdna_obj(obj);
+
+ iosys_map_clear(map);
+
+ dma_resv_assert_held(obj->resv);
+
+ if (is_import_bo(abo))
+ dma_buf_vmap(abo->dma_buf, map);
+ else
+ drm_gem_shmem_object_vmap(obj, map);
+
+ if (!map->vaddr)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void amdxdna_gem_obj_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
+{
+ struct amdxdna_gem_obj *abo = to_xdna_obj(obj);
+
+ dma_resv_assert_held(obj->resv);
+
+ if (is_import_bo(abo))
+ dma_buf_vunmap(abo->dma_buf, map);
+ else
+ drm_gem_shmem_object_vunmap(obj, map);
+}
+
static struct dma_buf *amdxdna_gem_prime_export(struct drm_gem_object *gobj, int flags)
{
+ struct amdxdna_gem_obj *abo = to_xdna_obj(gobj);
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+ if (abo->dma_buf) {
+ get_dma_buf(abo->dma_buf);
+ return abo->dma_buf;
+ }
+
exp_info.ops = &amdxdna_dmabuf_ops;
exp_info.size = gobj->size;
exp_info.flags = flags;
@@ -451,8 +489,8 @@ static const struct drm_gem_object_funcs amdxdna_gem_shmem_funcs = {
.pin = drm_gem_shmem_object_pin,
.unpin = drm_gem_shmem_object_unpin,
.get_sg_table = drm_gem_shmem_object_get_sg_table,
- .vmap = drm_gem_shmem_object_vmap,
- .vunmap = drm_gem_shmem_object_vunmap,
+ .vmap = amdxdna_gem_obj_vmap,
+ .vunmap = amdxdna_gem_obj_vunmap,
.mmap = amdxdna_gem_obj_mmap,
.vm_ops = &drm_gem_shmem_vm_ops,
.export = amdxdna_gem_prime_export,
@@ -494,6 +532,68 @@ amdxdna_gem_create_object_cb(struct drm_device *dev, size_t size)
return to_gobj(abo);
}
+static struct amdxdna_gem_obj *
+amdxdna_gem_create_shmem_object(struct drm_device *dev, size_t size)
+{
+ struct drm_gem_shmem_object *shmem = drm_gem_shmem_create(dev, size);
+
+ if (IS_ERR(shmem))
+ return ERR_CAST(shmem);
+
+ shmem->map_wc = false;
+ return to_xdna_obj(&shmem->base);
+}
+
+static struct amdxdna_gem_obj *
+amdxdna_gem_create_ubuf_object(struct drm_device *dev, struct amdxdna_drm_create_bo *args)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ enum amdxdna_ubuf_flag flags = 0;
+ struct amdxdna_drm_va_tbl va_tbl;
+ struct drm_gem_object *gobj;
+ struct dma_buf *dma_buf;
+
+ if (copy_from_user(&va_tbl, u64_to_user_ptr(args->vaddr), sizeof(va_tbl))) {
+ XDNA_DBG(xdna, "Access va table failed");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (va_tbl.num_entries) {
+ if (args->type == AMDXDNA_BO_CMD)
+ flags |= AMDXDNA_UBUF_FLAG_MAP_DMA;
+
+ dma_buf = amdxdna_get_ubuf(dev, flags, va_tbl.num_entries,
+ u64_to_user_ptr(args->vaddr + sizeof(va_tbl)));
+ } else {
+ dma_buf = dma_buf_get(va_tbl.dmabuf_fd);
+ }
+
+ if (IS_ERR(dma_buf))
+ return ERR_CAST(dma_buf);
+
+ gobj = amdxdna_gem_prime_import(dev, dma_buf);
+ if (IS_ERR(gobj)) {
+ dma_buf_put(dma_buf);
+ return ERR_CAST(gobj);
+ }
+
+ dma_buf_put(dma_buf);
+
+ return to_xdna_obj(gobj);
+}
+
+static struct amdxdna_gem_obj *
+amdxdna_gem_create_object(struct drm_device *dev,
+ struct amdxdna_drm_create_bo *args)
+{
+ size_t aligned_sz = PAGE_ALIGN(args->size);
+
+ if (args->vaddr)
+ return amdxdna_gem_create_ubuf_object(dev, args);
+
+ return amdxdna_gem_create_shmem_object(dev, aligned_sz);
+}
+
struct drm_gem_object *
amdxdna_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf)
{
@@ -545,16 +645,12 @@ amdxdna_drm_alloc_shmem(struct drm_device *dev,
struct drm_file *filp)
{
struct amdxdna_client *client = filp->driver_priv;
- struct drm_gem_shmem_object *shmem;
struct amdxdna_gem_obj *abo;
- shmem = drm_gem_shmem_create(dev, args->size);
- if (IS_ERR(shmem))
- return ERR_CAST(shmem);
-
- shmem->map_wc = false;
+ abo = amdxdna_gem_create_object(dev, args);
+ if (IS_ERR(abo))
+ return ERR_CAST(abo);
- abo = to_xdna_obj(&shmem->base);
abo->client = client;
abo->type = AMDXDNA_BO_SHMEM;
@@ -569,7 +665,6 @@ amdxdna_drm_create_dev_heap(struct drm_device *dev,
struct amdxdna_client *client = filp->driver_priv;
struct iosys_map map = IOSYS_MAP_INIT_VADDR(NULL);
struct amdxdna_dev *xdna = to_xdna_dev(dev);
- struct drm_gem_shmem_object *shmem;
struct amdxdna_gem_obj *abo;
int ret;
@@ -586,14 +681,12 @@ amdxdna_drm_create_dev_heap(struct drm_device *dev,
goto mm_unlock;
}
- shmem = drm_gem_shmem_create(dev, args->size);
- if (IS_ERR(shmem)) {
- ret = PTR_ERR(shmem);
+ abo = amdxdna_gem_create_object(dev, args);
+ if (IS_ERR(abo)) {
+ ret = PTR_ERR(abo);
goto mm_unlock;
}
- shmem->map_wc = false;
- abo = to_xdna_obj(&shmem->base);
abo->type = AMDXDNA_BO_DEV_HEAP;
abo->client = client;
abo->mem.dev_addr = client->xdna->dev_info->dev_mem_base;
@@ -657,7 +750,6 @@ amdxdna_drm_create_cmd_bo(struct drm_device *dev,
{
struct iosys_map map = IOSYS_MAP_INIT_VADDR(NULL);
struct amdxdna_dev *xdna = to_xdna_dev(dev);
- struct drm_gem_shmem_object *shmem;
struct amdxdna_gem_obj *abo;
int ret;
@@ -671,12 +763,9 @@ amdxdna_drm_create_cmd_bo(struct drm_device *dev,
return ERR_PTR(-EINVAL);
}
- shmem = drm_gem_shmem_create(dev, args->size);
- if (IS_ERR(shmem))
- return ERR_CAST(shmem);
-
- shmem->map_wc = false;
- abo = to_xdna_obj(&shmem->base);
+ abo = amdxdna_gem_create_object(dev, args);
+ if (IS_ERR(abo))
+ return ERR_CAST(abo);
abo->type = AMDXDNA_BO_CMD;
abo->client = filp->driver_priv;
@@ -691,7 +780,7 @@ amdxdna_drm_create_cmd_bo(struct drm_device *dev,
return abo;
release_obj:
- drm_gem_shmem_free(shmem);
+ drm_gem_object_put(to_gobj(abo));
return ERR_PTR(ret);
}
@@ -702,7 +791,7 @@ int amdxdna_drm_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_f
struct amdxdna_gem_obj *abo;
int ret;
- if (args->flags || args->vaddr || !args->size)
+ if (args->flags)
return -EINVAL;
XDNA_DBG(xdna, "BO arg type %d vaddr 0x%llx size 0x%llx flags 0x%llx",
diff --git a/drivers/accel/amdxdna/amdxdna_pci_drv.c b/drivers/accel/amdxdna/amdxdna_pci_drv.c
index f2bf1d374cc7..569cd703729d 100644
--- a/drivers/accel/amdxdna/amdxdna_pci_drv.c
+++ b/drivers/accel/amdxdna/amdxdna_pci_drv.c
@@ -27,6 +27,13 @@ MODULE_FIRMWARE("amdnpu/17f0_11/npu.sbin");
MODULE_FIRMWARE("amdnpu/17f0_20/npu.sbin");
/*
+ * 0.0: Initial version
+ * 0.1: Support getting all hardware contexts by DRM_IOCTL_AMDXDNA_GET_ARRAY
+ */
+#define AMDXDNA_DRIVER_MAJOR 0
+#define AMDXDNA_DRIVER_MINOR 1
+
+/*
* Bind the driver base on (vendor_id, device_id) pair and later use the
* (device_id, rev_id) pair as a key to select the devices. The devices with
* same device_id have very similar interface to host driver.
@@ -81,7 +88,6 @@ static int amdxdna_drm_open(struct drm_device *ddev, struct drm_file *filp)
ret = -ENODEV;
goto unbind_sva;
}
- mutex_init(&client->hwctx_lock);
init_srcu_struct(&client->hwctx_srcu);
xa_init_flags(&client->hwctx_xa, XA_FLAGS_ALLOC);
mutex_init(&client->mm_lock);
@@ -116,7 +122,6 @@ static void amdxdna_drm_close(struct drm_device *ddev, struct drm_file *filp)
xa_destroy(&client->hwctx_xa);
cleanup_srcu_struct(&client->hwctx_srcu);
- mutex_destroy(&client->hwctx_lock);
mutex_destroy(&client->mm_lock);
if (client->dev_heap)
drm_gem_object_put(to_gobj(client->dev_heap));
@@ -142,8 +147,8 @@ static int amdxdna_flush(struct file *f, fl_owner_t id)
mutex_lock(&xdna->dev_lock);
list_del_init(&client->node);
- mutex_unlock(&xdna->dev_lock);
amdxdna_hwctx_remove_all(client);
+ mutex_unlock(&xdna->dev_lock);
drm_dev_exit(idx);
return 0;
@@ -166,6 +171,23 @@ static int amdxdna_drm_get_info_ioctl(struct drm_device *dev, void *data, struct
return ret;
}
+static int amdxdna_drm_get_array_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct amdxdna_client *client = filp->driver_priv;
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ struct amdxdna_drm_get_array *args = data;
+
+ if (!xdna->dev_info->ops->get_array)
+ return -EOPNOTSUPP;
+
+ if (args->pad || !args->num_element || !args->element_size)
+ return -EINVAL;
+
+ guard(mutex)(&xdna->dev_lock);
+ return xdna->dev_info->ops->get_array(client, args);
+}
+
static int amdxdna_drm_set_state_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
struct amdxdna_client *client = filp->driver_priv;
@@ -197,6 +219,7 @@ static const struct drm_ioctl_desc amdxdna_drm_ioctls[] = {
DRM_IOCTL_DEF_DRV(AMDXDNA_EXEC_CMD, amdxdna_drm_submit_cmd_ioctl, 0),
/* AIE hardware */
DRM_IOCTL_DEF_DRV(AMDXDNA_GET_INFO, amdxdna_drm_get_info_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(AMDXDNA_GET_ARRAY, amdxdna_drm_get_array_ioctl, 0),
DRM_IOCTL_DEF_DRV(AMDXDNA_SET_STATE, amdxdna_drm_set_state_ioctl, DRM_ROOT_ONLY),
};
@@ -220,6 +243,8 @@ const struct drm_driver amdxdna_drm_drv = {
.fops = &amdxdna_fops,
.name = "amdxdna_accel_driver",
.desc = "AMD XDNA DRM implementation",
+ .major = AMDXDNA_DRIVER_MAJOR,
+ .minor = AMDXDNA_DRIVER_MINOR,
.open = amdxdna_drm_open,
.postclose = amdxdna_drm_close,
.ioctls = amdxdna_drm_ioctls,
@@ -330,11 +355,8 @@ static void amdxdna_remove(struct pci_dev *pdev)
struct amdxdna_client, node);
while (client) {
list_del_init(&client->node);
- mutex_unlock(&xdna->dev_lock);
-
amdxdna_hwctx_remove_all(client);
- mutex_lock(&xdna->dev_lock);
client = list_first_entry_or_null(&xdna->client_list,
struct amdxdna_client, node);
}
@@ -343,89 +365,29 @@ static void amdxdna_remove(struct pci_dev *pdev)
mutex_unlock(&xdna->dev_lock);
}
-static int amdxdna_dev_suspend_nolock(struct amdxdna_dev *xdna)
-{
- if (xdna->dev_info->ops->suspend)
- xdna->dev_info->ops->suspend(xdna);
-
- return 0;
-}
-
-static int amdxdna_dev_resume_nolock(struct amdxdna_dev *xdna)
-{
- if (xdna->dev_info->ops->resume)
- return xdna->dev_info->ops->resume(xdna);
-
- return 0;
-}
-
static int amdxdna_pmops_suspend(struct device *dev)
{
struct amdxdna_dev *xdna = pci_get_drvdata(to_pci_dev(dev));
- struct amdxdna_client *client;
-
- mutex_lock(&xdna->dev_lock);
- list_for_each_entry(client, &xdna->client_list, node)
- amdxdna_hwctx_suspend(client);
- amdxdna_dev_suspend_nolock(xdna);
- mutex_unlock(&xdna->dev_lock);
+ if (!xdna->dev_info->ops->suspend)
+ return -EOPNOTSUPP;
- return 0;
+ return xdna->dev_info->ops->suspend(xdna);
}
static int amdxdna_pmops_resume(struct device *dev)
{
struct amdxdna_dev *xdna = pci_get_drvdata(to_pci_dev(dev));
- struct amdxdna_client *client;
- int ret;
-
- XDNA_INFO(xdna, "firmware resuming...");
- mutex_lock(&xdna->dev_lock);
- ret = amdxdna_dev_resume_nolock(xdna);
- if (ret) {
- XDNA_ERR(xdna, "resume NPU firmware failed");
- mutex_unlock(&xdna->dev_lock);
- return ret;
- }
-
- XDNA_INFO(xdna, "hardware context resuming...");
- list_for_each_entry(client, &xdna->client_list, node)
- amdxdna_hwctx_resume(client);
- mutex_unlock(&xdna->dev_lock);
-
- return 0;
-}
-
-static int amdxdna_rpmops_suspend(struct device *dev)
-{
- struct amdxdna_dev *xdna = pci_get_drvdata(to_pci_dev(dev));
- int ret;
-
- mutex_lock(&xdna->dev_lock);
- ret = amdxdna_dev_suspend_nolock(xdna);
- mutex_unlock(&xdna->dev_lock);
- XDNA_DBG(xdna, "Runtime suspend done ret: %d", ret);
- return ret;
-}
-
-static int amdxdna_rpmops_resume(struct device *dev)
-{
- struct amdxdna_dev *xdna = pci_get_drvdata(to_pci_dev(dev));
- int ret;
-
- mutex_lock(&xdna->dev_lock);
- ret = amdxdna_dev_resume_nolock(xdna);
- mutex_unlock(&xdna->dev_lock);
+ if (!xdna->dev_info->ops->resume)
+ return -EOPNOTSUPP;
- XDNA_DBG(xdna, "Runtime resume done ret: %d", ret);
- return ret;
+ return xdna->dev_info->ops->resume(xdna);
}
static const struct dev_pm_ops amdxdna_pm_ops = {
SYSTEM_SLEEP_PM_OPS(amdxdna_pmops_suspend, amdxdna_pmops_resume)
- RUNTIME_PM_OPS(amdxdna_rpmops_suspend, amdxdna_rpmops_resume, NULL)
+ RUNTIME_PM_OPS(amdxdna_pmops_suspend, amdxdna_pmops_resume, NULL)
};
static struct pci_driver amdxdna_pci_driver = {
diff --git a/drivers/accel/amdxdna/amdxdna_pci_drv.h b/drivers/accel/amdxdna/amdxdna_pci_drv.h
index ab79600911aa..72d6696d49da 100644
--- a/drivers/accel/amdxdna/amdxdna_pci_drv.h
+++ b/drivers/accel/amdxdna/amdxdna_pci_drv.h
@@ -50,16 +50,15 @@ struct amdxdna_dev_ops {
int (*init)(struct amdxdna_dev *xdna);
void (*fini)(struct amdxdna_dev *xdna);
int (*resume)(struct amdxdna_dev *xdna);
- void (*suspend)(struct amdxdna_dev *xdna);
+ int (*suspend)(struct amdxdna_dev *xdna);
int (*hwctx_init)(struct amdxdna_hwctx *hwctx);
void (*hwctx_fini)(struct amdxdna_hwctx *hwctx);
int (*hwctx_config)(struct amdxdna_hwctx *hwctx, u32 type, u64 value, void *buf, u32 size);
void (*hmm_invalidate)(struct amdxdna_gem_obj *abo, unsigned long cur_seq);
- void (*hwctx_suspend)(struct amdxdna_hwctx *hwctx);
- void (*hwctx_resume)(struct amdxdna_hwctx *hwctx);
int (*cmd_submit)(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, u64 *seq);
int (*get_aie_info)(struct amdxdna_client *client, struct amdxdna_drm_get_info *args);
int (*set_aie_state)(struct amdxdna_client *client, struct amdxdna_drm_set_state *args);
+ int (*get_array)(struct amdxdna_client *client, struct amdxdna_drm_get_array *args);
};
/*
@@ -118,8 +117,6 @@ struct amdxdna_device_id {
struct amdxdna_client {
struct list_head node;
pid_t pid;
- struct mutex hwctx_lock; /* protect hwctx */
- /* do NOT wait this srcu when hwctx_lock is held */
struct srcu_struct hwctx_srcu;
struct xarray hwctx_xa;
u32 next_hwctxid;
diff --git a/drivers/accel/amdxdna/amdxdna_ubuf.c b/drivers/accel/amdxdna/amdxdna_ubuf.c
new file mode 100644
index 000000000000..077b2261cf2a
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_ubuf.c
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_device.h>
+#include <drm/drm_print.h>
+#include <linux/dma-buf.h>
+#include <linux/pagemap.h>
+#include <linux/vmalloc.h>
+
+#include "amdxdna_pci_drv.h"
+#include "amdxdna_ubuf.h"
+
+struct amdxdna_ubuf_priv {
+ struct page **pages;
+ u64 nr_pages;
+ enum amdxdna_ubuf_flag flags;
+ struct mm_struct *mm;
+};
+
+static struct sg_table *amdxdna_ubuf_map(struct dma_buf_attachment *attach,
+ enum dma_data_direction direction)
+{
+ struct amdxdna_ubuf_priv *ubuf = attach->dmabuf->priv;
+ struct sg_table *sg;
+ int ret;
+
+ sg = kzalloc(sizeof(*sg), GFP_KERNEL);
+ if (!sg)
+ return ERR_PTR(-ENOMEM);
+
+ ret = sg_alloc_table_from_pages(sg, ubuf->pages, ubuf->nr_pages, 0,
+ ubuf->nr_pages << PAGE_SHIFT, GFP_KERNEL);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (ubuf->flags & AMDXDNA_UBUF_FLAG_MAP_DMA) {
+ ret = dma_map_sgtable(attach->dev, sg, direction, 0);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ return sg;
+}
+
+static void amdxdna_ubuf_unmap(struct dma_buf_attachment *attach,
+ struct sg_table *sg,
+ enum dma_data_direction direction)
+{
+ struct amdxdna_ubuf_priv *ubuf = attach->dmabuf->priv;
+
+ if (ubuf->flags & AMDXDNA_UBUF_FLAG_MAP_DMA)
+ dma_unmap_sgtable(attach->dev, sg, direction, 0);
+
+ sg_free_table(sg);
+ kfree(sg);
+}
+
+static void amdxdna_ubuf_release(struct dma_buf *dbuf)
+{
+ struct amdxdna_ubuf_priv *ubuf = dbuf->priv;
+
+ unpin_user_pages(ubuf->pages, ubuf->nr_pages);
+ kvfree(ubuf->pages);
+ atomic64_sub(ubuf->nr_pages, &ubuf->mm->pinned_vm);
+ mmdrop(ubuf->mm);
+ kfree(ubuf);
+}
+
+static vm_fault_t amdxdna_ubuf_vm_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct amdxdna_ubuf_priv *ubuf;
+ unsigned long pfn;
+ pgoff_t pgoff;
+
+ ubuf = vma->vm_private_data;
+ pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
+
+ pfn = page_to_pfn(ubuf->pages[pgoff]);
+ return vmf_insert_pfn(vma, vmf->address, pfn);
+}
+
+static const struct vm_operations_struct amdxdna_ubuf_vm_ops = {
+ .fault = amdxdna_ubuf_vm_fault,
+};
+
+static int amdxdna_ubuf_mmap(struct dma_buf *dbuf, struct vm_area_struct *vma)
+{
+ struct amdxdna_ubuf_priv *ubuf = dbuf->priv;
+
+ vma->vm_ops = &amdxdna_ubuf_vm_ops;
+ vma->vm_private_data = ubuf;
+ vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
+
+ return 0;
+}
+
+static int amdxdna_ubuf_vmap(struct dma_buf *dbuf, struct iosys_map *map)
+{
+ struct amdxdna_ubuf_priv *ubuf = dbuf->priv;
+ void *kva;
+
+ kva = vmap(ubuf->pages, ubuf->nr_pages, VM_MAP, PAGE_KERNEL);
+ if (!kva)
+ return -EINVAL;
+
+ iosys_map_set_vaddr(map, kva);
+ return 0;
+}
+
+static void amdxdna_ubuf_vunmap(struct dma_buf *dbuf, struct iosys_map *map)
+{
+ vunmap(map->vaddr);
+}
+
+static const struct dma_buf_ops amdxdna_ubuf_dmabuf_ops = {
+ .map_dma_buf = amdxdna_ubuf_map,
+ .unmap_dma_buf = amdxdna_ubuf_unmap,
+ .release = amdxdna_ubuf_release,
+ .mmap = amdxdna_ubuf_mmap,
+ .vmap = amdxdna_ubuf_vmap,
+ .vunmap = amdxdna_ubuf_vunmap,
+};
+
+struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
+ enum amdxdna_ubuf_flag flags,
+ u32 num_entries, void __user *va_entries)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ unsigned long lock_limit, new_pinned;
+ struct amdxdna_drm_va_entry *va_ent;
+ struct amdxdna_ubuf_priv *ubuf;
+ u32 npages, start = 0;
+ struct dma_buf *dbuf;
+ int i, ret;
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+
+ if (!can_do_mlock())
+ return ERR_PTR(-EPERM);
+
+ ubuf = kzalloc(sizeof(*ubuf), GFP_KERNEL);
+ if (!ubuf)
+ return ERR_PTR(-ENOMEM);
+
+ ubuf->flags = flags;
+ ubuf->mm = current->mm;
+ mmgrab(ubuf->mm);
+
+ va_ent = kvcalloc(num_entries, sizeof(*va_ent), GFP_KERNEL);
+ if (!va_ent) {
+ ret = -ENOMEM;
+ goto free_ubuf;
+ }
+
+ if (copy_from_user(va_ent, va_entries, sizeof(*va_ent) * num_entries)) {
+ XDNA_DBG(xdna, "Access va entries failed");
+ ret = -EINVAL;
+ goto free_ent;
+ }
+
+ for (i = 0, exp_info.size = 0; i < num_entries; i++) {
+ if (!IS_ALIGNED(va_ent[i].vaddr, PAGE_SIZE) ||
+ !IS_ALIGNED(va_ent[i].len, PAGE_SIZE)) {
+ XDNA_ERR(xdna, "Invalid address or len %llx, %llx",
+ va_ent[i].vaddr, va_ent[i].len);
+ ret = -EINVAL;
+ goto free_ent;
+ }
+
+ exp_info.size += va_ent[i].len;
+ }
+
+ ubuf->nr_pages = exp_info.size >> PAGE_SHIFT;
+ lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+ new_pinned = atomic64_add_return(ubuf->nr_pages, &ubuf->mm->pinned_vm);
+ if (new_pinned > lock_limit && !capable(CAP_IPC_LOCK)) {
+ XDNA_DBG(xdna, "New pin %ld, limit %ld, cap %d",
+ new_pinned, lock_limit, capable(CAP_IPC_LOCK));
+ ret = -ENOMEM;
+ goto sub_pin_cnt;
+ }
+
+ ubuf->pages = kvmalloc_array(ubuf->nr_pages, sizeof(*ubuf->pages), GFP_KERNEL);
+ if (!ubuf->pages) {
+ ret = -ENOMEM;
+ goto sub_pin_cnt;
+ }
+
+ for (i = 0; i < num_entries; i++) {
+ npages = va_ent[i].len >> PAGE_SHIFT;
+
+ ret = pin_user_pages_fast(va_ent[i].vaddr, npages,
+ FOLL_WRITE | FOLL_LONGTERM,
+ &ubuf->pages[start]);
+ if (ret < 0 || ret != npages) {
+ ret = -ENOMEM;
+ XDNA_ERR(xdna, "Failed to pin pages ret %d", ret);
+ goto destroy_pages;
+ }
+
+ start += ret;
+ }
+
+ exp_info.ops = &amdxdna_ubuf_dmabuf_ops;
+ exp_info.priv = ubuf;
+ exp_info.flags = O_RDWR | O_CLOEXEC;
+
+ dbuf = dma_buf_export(&exp_info);
+ if (IS_ERR(dbuf)) {
+ ret = PTR_ERR(dbuf);
+ goto destroy_pages;
+ }
+ kvfree(va_ent);
+
+ return dbuf;
+
+destroy_pages:
+ if (start)
+ unpin_user_pages(ubuf->pages, start);
+ kvfree(ubuf->pages);
+sub_pin_cnt:
+ atomic64_sub(ubuf->nr_pages, &ubuf->mm->pinned_vm);
+free_ent:
+ kvfree(va_ent);
+free_ubuf:
+ mmdrop(ubuf->mm);
+ kfree(ubuf);
+ return ERR_PTR(ret);
+}
diff --git a/drivers/accel/amdxdna/amdxdna_ubuf.h b/drivers/accel/amdxdna/amdxdna_ubuf.h
new file mode 100644
index 000000000000..e5cb3bdb3ec9
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_ubuf.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2025, Advanced Micro Devices, Inc.
+ */
+#ifndef _AMDXDNA_UBUF_H_
+#define _AMDXDNA_UBUF_H_
+
+#include <drm/drm_device.h>
+#include <linux/dma-buf.h>
+
+enum amdxdna_ubuf_flag {
+ AMDXDNA_UBUF_FLAG_MAP_DMA = 1,
+};
+
+struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
+ enum amdxdna_ubuf_flag flags,
+ u32 num_entries, void __user *va_entries);
+
+#endif /* _AMDXDNA_UBUF_H_ */
diff --git a/drivers/accel/habanalabs/Kconfig b/drivers/accel/habanalabs/Kconfig
index 1919fbb169c7..6d1506acbd72 100644
--- a/drivers/accel/habanalabs/Kconfig
+++ b/drivers/accel/habanalabs/Kconfig
@@ -27,3 +27,26 @@ config DRM_ACCEL_HABANALABS
To compile this driver as a module, choose M here: the
module will be called habanalabs.
+
+if DRM_ACCEL_HABANALABS
+
+config HL_HLDIO
+ bool "Habanalabs NVMe Direct I/O (HLDIO)"
+ depends on PCI_P2PDMA
+ depends on BLOCK
+ help
+ Enable NVMe peer-to-peer direct I/O support for Habanalabs AI
+ accelerators.
+
+ This allows direct data transfers between NVMe storage devices
+ and Habanalabs accelerators without involving system memory,
+ using PCI peer-to-peer DMA capabilities.
+
+ Requirements:
+ - CONFIG_PCI_P2PDMA=y
+ - NVMe device and Habanalabs accelerator under same PCI root complex
+ - IOMMU disabled or in passthrough mode
+ - Hardware supporting PCI P2P DMA
+
+ If unsure, say N
+endif # DRM_ACCEL_HABANALABS
diff --git a/drivers/accel/habanalabs/common/Makefile b/drivers/accel/habanalabs/common/Makefile
index e6abffea9f87..b6d00de09db5 100644
--- a/drivers/accel/habanalabs/common/Makefile
+++ b/drivers/accel/habanalabs/common/Makefile
@@ -13,3 +13,8 @@ HL_COMMON_FILES := common/habanalabs_drv.o common/device.o common/context.o \
common/command_submission.o common/firmware_if.o \
common/security.o common/state_dump.o \
common/memory_mgr.o common/decoder.o
+
+# Conditionally add HLDIO support
+ifdef CONFIG_HL_HLDIO
+HL_COMMON_FILES += common/hldio.o
+endif \ No newline at end of file
diff --git a/drivers/accel/habanalabs/common/debugfs.c b/drivers/accel/habanalabs/common/debugfs.c
index 4b391807e5f2..5f0820b19ccb 100644
--- a/drivers/accel/habanalabs/common/debugfs.c
+++ b/drivers/accel/habanalabs/common/debugfs.c
@@ -6,6 +6,7 @@
*/
#include "habanalabs.h"
+#include "hldio.h"
#include "../include/hw_ip/mmu/mmu_general.h"
#include <linux/pci.h>
@@ -602,6 +603,198 @@ static int engines_show(struct seq_file *s, void *data)
return 0;
}
+#ifdef CONFIG_HL_HLDIO
+/* DIO debugfs functions following the standard pattern */
+static int dio_ssd2hl_show(struct seq_file *s, void *data)
+{
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ struct hl_device *hdev = dev_entry->hdev;
+
+ if (!hdev->asic_prop.supports_nvme) {
+ seq_puts(s, "NVMe Direct I/O not supported\\n");
+ return 0;
+ }
+
+ seq_puts(s, "Usage: echo \"fd=N va=0xADDR off=N len=N\" > dio_ssd2hl\n");
+ seq_printf(s, "Last transfer: %zu bytes\\n", dev_entry->dio_stats.last_len_read);
+ seq_puts(s, "Note: All parameters must be page-aligned (4KB)\\n");
+
+ return 0;
+}
+
+static ssize_t dio_ssd2hl_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ struct seq_file *s = file->private_data;
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ struct hl_device *hdev = dev_entry->hdev;
+ struct hl_ctx *ctx = hdev->kernel_ctx;
+ char kbuf[128];
+ u64 device_va = 0, off_bytes = 0, len_bytes = 0;
+ u32 fd = 0;
+ size_t len_read = 0;
+ int rc, parsed;
+
+ if (!hdev->asic_prop.supports_nvme)
+ return -EOPNOTSUPP;
+
+ if (count >= sizeof(kbuf))
+ return -EINVAL;
+
+ if (copy_from_user(kbuf, buf, count))
+ return -EFAULT;
+
+ kbuf[count] = 0;
+
+ /* Parse: fd=N va=0xADDR off=N len=N */
+ parsed = sscanf(kbuf, "fd=%u va=0x%llx off=%llu len=%llu",
+ &fd, &device_va, &off_bytes, &len_bytes);
+ if (parsed != 4) {
+ dev_err(hdev->dev, "Invalid format. Expected: fd=N va=0xADDR off=N len=N\\n");
+ return -EINVAL;
+ }
+
+ /* Validate file descriptor */
+ if (fd == 0) {
+ dev_err(hdev->dev, "Invalid file descriptor: %u\\n", fd);
+ return -EINVAL;
+ }
+
+ /* Validate alignment requirements */
+ if (!IS_ALIGNED(device_va, PAGE_SIZE) ||
+ !IS_ALIGNED(off_bytes, PAGE_SIZE) ||
+ !IS_ALIGNED(len_bytes, PAGE_SIZE)) {
+ dev_err(hdev->dev,
+ "All parameters must be page-aligned (4KB)\\n");
+ return -EINVAL;
+ }
+
+ /* Validate transfer size */
+ if (len_bytes == 0 || len_bytes > SZ_1G) {
+ dev_err(hdev->dev, "Invalid length: %llu (max 1GB)\\n",
+ len_bytes);
+ return -EINVAL;
+ }
+
+ dev_dbg(hdev->dev, "DIO SSD2HL: fd=%u va=0x%llx off=%llu len=%llu\\n",
+ fd, device_va, off_bytes, len_bytes);
+
+ rc = hl_dio_ssd2hl(hdev, ctx, fd, device_va, off_bytes, len_bytes, &len_read);
+ if (rc < 0) {
+ dev_entry->dio_stats.failed_ops++;
+ dev_err(hdev->dev, "SSD2HL operation failed: %d\\n", rc);
+ return rc;
+ }
+
+ /* Update statistics */
+ dev_entry->dio_stats.total_ops++;
+ dev_entry->dio_stats.successful_ops++;
+ dev_entry->dio_stats.bytes_transferred += len_read;
+ dev_entry->dio_stats.last_len_read = len_read;
+
+ dev_dbg(hdev->dev, "DIO SSD2HL completed: %zu bytes transferred\\n", len_read);
+
+ return count;
+}
+
+static int dio_hl2ssd_show(struct seq_file *s, void *data)
+{
+ seq_puts(s, "HL2SSD (device-to-SSD) transfers not implemented\\n");
+ return 0;
+}
+
+static ssize_t dio_hl2ssd_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ struct seq_file *s = file->private_data;
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ struct hl_device *hdev = dev_entry->hdev;
+
+ if (!hdev->asic_prop.supports_nvme)
+ return -EOPNOTSUPP;
+
+ dev_dbg(hdev->dev, "HL2SSD operation not implemented\\n");
+ return -EOPNOTSUPP;
+}
+
+static int dio_stats_show(struct seq_file *s, void *data)
+{
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ struct hl_device *hdev = dev_entry->hdev;
+ struct hl_dio_stats *stats = &dev_entry->dio_stats;
+ u64 avg_bytes_per_op = 0, success_rate = 0;
+
+ if (!hdev->asic_prop.supports_nvme) {
+ seq_puts(s, "NVMe Direct I/O not supported\\n");
+ return 0;
+ }
+
+ if (stats->successful_ops > 0)
+ avg_bytes_per_op = stats->bytes_transferred / stats->successful_ops;
+
+ if (stats->total_ops > 0)
+ success_rate = (stats->successful_ops * 100) / stats->total_ops;
+
+ seq_puts(s, "=== Habanalabs Direct I/O Statistics ===\\n");
+ seq_printf(s, "Total operations: %llu\\n", stats->total_ops);
+ seq_printf(s, "Successful ops: %llu\\n", stats->successful_ops);
+ seq_printf(s, "Failed ops: %llu\\n", stats->failed_ops);
+ seq_printf(s, "Success rate: %llu%%\\n", success_rate);
+ seq_printf(s, "Total bytes: %llu\\n", stats->bytes_transferred);
+ seq_printf(s, "Avg bytes per op: %llu\\n", avg_bytes_per_op);
+ seq_printf(s, "Last transfer: %zu bytes\\n", stats->last_len_read);
+
+ return 0;
+}
+
+static int dio_reset_show(struct seq_file *s, void *data)
+{
+ seq_puts(s, "Write '1' to reset DIO statistics\\n");
+ return 0;
+}
+
+static ssize_t dio_reset_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ struct seq_file *s = file->private_data;
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ struct hl_device *hdev = dev_entry->hdev;
+ char kbuf[8];
+ unsigned long val;
+ int rc;
+
+ if (!hdev->asic_prop.supports_nvme)
+ return -EOPNOTSUPP;
+
+ if (count >= sizeof(kbuf))
+ return -EINVAL;
+
+ if (copy_from_user(kbuf, buf, count))
+ return -EFAULT;
+
+ kbuf[count] = 0;
+
+ rc = kstrtoul(kbuf, 0, &val);
+ if (rc)
+ return rc;
+
+ if (val == 1) {
+ memset(&dev_entry->dio_stats, 0, sizeof(dev_entry->dio_stats));
+ dev_dbg(hdev->dev, "DIO statistics reset\\n");
+ } else {
+ dev_err(hdev->dev, "Write '1' to reset statistics\\n");
+ return -EINVAL;
+ }
+
+ return count;
+}
+#endif
+
static ssize_t hl_memory_scrub(struct file *f, const char __user *buf,
size_t count, loff_t *ppos)
{
@@ -788,6 +981,113 @@ static void hl_access_host_mem(struct hl_device *hdev, u64 addr, u64 *val,
}
}
+static void dump_cfg_access_entry(struct hl_device *hdev,
+ struct hl_debugfs_cfg_access_entry *entry)
+{
+ char *access_type = "";
+ struct tm tm;
+
+ switch (entry->debugfs_type) {
+ case DEBUGFS_READ32:
+ access_type = "READ32 from";
+ break;
+ case DEBUGFS_WRITE32:
+ access_type = "WRITE32 to";
+ break;
+ case DEBUGFS_READ64:
+ access_type = "READ64 from";
+ break;
+ case DEBUGFS_WRITE64:
+ access_type = "WRITE64 to";
+ break;
+ default:
+ dev_err(hdev->dev, "Invalid DEBUGFS access type (%u)\n", entry->debugfs_type);
+ return;
+ }
+
+ time64_to_tm(entry->seconds_since_epoch, 0, &tm);
+ dev_info(hdev->dev,
+ "%ld-%02d-%02d %02d:%02d:%02d (UTC): %s %#llx\n", tm.tm_year + 1900, tm.tm_mon + 1,
+ tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec, access_type, entry->addr);
+}
+
+void hl_debugfs_cfg_access_history_dump(struct hl_device *hdev)
+{
+ struct hl_debugfs_cfg_access *dbgfs = &hdev->debugfs_cfg_accesses;
+ u32 i, head, count = 0;
+ time64_t entry_time, now;
+ unsigned long flags;
+
+ now = ktime_get_real_seconds();
+
+ spin_lock_irqsave(&dbgfs->lock, flags);
+ head = dbgfs->head;
+ if (head == 0)
+ i = HL_DBGFS_CFG_ACCESS_HIST_LEN - 1;
+ else
+ i = head - 1;
+
+ /* Walk back until timeout or invalid entry */
+ while (dbgfs->cfg_access_list[i].valid) {
+ entry_time = dbgfs->cfg_access_list[i].seconds_since_epoch;
+ /* Stop when entry is older than timeout */
+ if (now - entry_time > HL_DBGFS_CFG_ACCESS_HIST_TIMEOUT_SEC)
+ break;
+
+ /* print single entry under lock */
+ {
+ struct hl_debugfs_cfg_access_entry entry = dbgfs->cfg_access_list[i];
+ /*
+ * We copy the entry out under lock and then print after
+ * releasing the lock to minimize time under lock.
+ */
+ spin_unlock_irqrestore(&dbgfs->lock, flags);
+ dump_cfg_access_entry(hdev, &entry);
+ spin_lock_irqsave(&dbgfs->lock, flags);
+ }
+
+ /* mark consumed */
+ dbgfs->cfg_access_list[i].valid = false;
+
+ if (i == 0)
+ i = HL_DBGFS_CFG_ACCESS_HIST_LEN - 1;
+ else
+ i--;
+ count++;
+ if (count >= HL_DBGFS_CFG_ACCESS_HIST_LEN)
+ break;
+ }
+ spin_unlock_irqrestore(&dbgfs->lock, flags);
+}
+
+static void check_if_cfg_access_and_log(struct hl_device *hdev, u64 addr, size_t access_size,
+ enum debugfs_access_type access_type)
+{
+ struct hl_debugfs_cfg_access *dbgfs_cfg_accesses = &hdev->debugfs_cfg_accesses;
+ struct pci_mem_region *mem_reg = &hdev->pci_mem_region[PCI_REGION_CFG];
+ struct hl_debugfs_cfg_access_entry *new_entry;
+ unsigned long flags;
+
+ /* Check if address is in config memory */
+ if (addr >= mem_reg->region_base &&
+ mem_reg->region_size >= access_size &&
+ addr <= mem_reg->region_base + mem_reg->region_size - access_size) {
+
+ spin_lock_irqsave(&dbgfs_cfg_accesses->lock, flags);
+
+ new_entry = &dbgfs_cfg_accesses->cfg_access_list[dbgfs_cfg_accesses->head];
+ new_entry->seconds_since_epoch = ktime_get_real_seconds();
+ new_entry->addr = addr;
+ new_entry->debugfs_type = access_type;
+ new_entry->valid = true;
+ dbgfs_cfg_accesses->head = (dbgfs_cfg_accesses->head + 1)
+ % HL_DBGFS_CFG_ACCESS_HIST_LEN;
+
+ spin_unlock_irqrestore(&dbgfs_cfg_accesses->lock, flags);
+
+ }
+}
+
static int hl_access_mem(struct hl_device *hdev, u64 addr, u64 *val,
enum debugfs_access_type acc_type)
{
@@ -805,6 +1105,7 @@ static int hl_access_mem(struct hl_device *hdev, u64 addr, u64 *val,
return rc;
}
+ check_if_cfg_access_and_log(hdev, addr, acc_size, acc_type);
rc = hl_access_dev_mem_by_region(hdev, addr, val, acc_type, &found);
if (rc) {
dev_err(hdev->dev,
@@ -1525,6 +1826,13 @@ static const struct hl_info_list hl_debugfs_list[] = {
{"mmu", mmu_show, mmu_asid_va_write},
{"mmu_error", mmu_ack_error, mmu_ack_error_value_write},
{"engines", engines_show, NULL},
+#ifdef CONFIG_HL_HLDIO
+ /* DIO entries - only created if NVMe is supported */
+ {"dio_ssd2hl", dio_ssd2hl_show, dio_ssd2hl_write},
+ {"dio_stats", dio_stats_show, NULL},
+ {"dio_reset", dio_reset_show, dio_reset_write},
+ {"dio_hl2ssd", dio_hl2ssd_show, dio_hl2ssd_write},
+#endif
};
static int hl_debugfs_open(struct inode *inode, struct file *file)
@@ -1723,6 +2031,11 @@ static void add_files_to_device(struct hl_device *hdev, struct hl_dbg_device_ent
&hdev->asic_prop.server_type);
for (i = 0, entry = dev_entry->entry_arr ; i < count ; i++, entry++) {
+ /* Skip DIO entries if NVMe is not supported */
+ if (strncmp(hl_debugfs_list[i].name, "dio_", 4) == 0 &&
+ !hdev->asic_prop.supports_nvme)
+ continue;
+
debugfs_create_file(hl_debugfs_list[i].name,
0644,
root,
@@ -1762,6 +2075,14 @@ int hl_debugfs_device_init(struct hl_device *hdev)
spin_lock_init(&dev_entry->userptr_spinlock);
mutex_init(&dev_entry->ctx_mem_hash_mutex);
+ spin_lock_init(&hdev->debugfs_cfg_accesses.lock);
+ hdev->debugfs_cfg_accesses.head = 0; /* already zero by alloc but explicit init is fine */
+
+#ifdef CONFIG_HL_HLDIO
+ /* Initialize DIO statistics */
+ memset(&dev_entry->dio_stats, 0, sizeof(dev_entry->dio_stats));
+#endif
+
return 0;
}
@@ -1780,6 +2101,7 @@ void hl_debugfs_device_fini(struct hl_device *hdev)
vfree(entry->state_dump[i]);
kfree(entry->entry_arr);
+
}
void hl_debugfs_add_device(struct hl_device *hdev)
@@ -1792,6 +2114,7 @@ void hl_debugfs_add_device(struct hl_device *hdev)
if (!hdev->asic_prop.fw_security_enabled)
add_secured_nodes(dev_entry, dev_entry->root);
+
}
void hl_debugfs_add_file(struct hl_fpriv *hpriv)
@@ -1924,3 +2247,4 @@ void hl_debugfs_set_state_dump(struct hl_device *hdev, char *data,
up_write(&dev_entry->state_dump_sem);
}
+
diff --git a/drivers/accel/habanalabs/common/device.c b/drivers/accel/habanalabs/common/device.c
index 80fa08bf57bd..999c92d7036e 100644
--- a/drivers/accel/habanalabs/common/device.c
+++ b/drivers/accel/habanalabs/common/device.c
@@ -1630,6 +1630,11 @@ int hl_device_reset(struct hl_device *hdev, u32 flags)
from_watchdog_thread = !!(flags & HL_DRV_RESET_FROM_WD_THR);
reset_upon_device_release = hdev->reset_upon_device_release && from_dev_release;
+ if (hdev->cpld_shutdown) {
+ dev_err(hdev->dev, "Cannot reset device, cpld is shutdown! Device is NOT usable\n");
+ return -EIO;
+ }
+
if (!hard_reset && (hl_device_status(hdev) == HL_DEVICE_STATUS_MALFUNCTION)) {
dev_dbg(hdev->dev, "soft-reset isn't supported on a malfunctioning device\n");
return 0;
@@ -2576,6 +2581,14 @@ void hl_device_fini(struct hl_device *hdev)
if (rc)
dev_err(hdev->dev, "hw_fini failed in device fini while removing device %d\n", rc);
+ /* Reset the H/W (if it accessible). It will be in idle state after this returns */
+ if (!hdev->cpld_shutdown) {
+ rc = hdev->asic_funcs->hw_fini(hdev, true, false);
+ if (rc)
+ dev_err(hdev->dev,
+ "hw_fini failed in device fini while removing device %d\n", rc);
+ }
+
hdev->fw_loader.fw_comp_loaded = FW_TYPE_NONE;
/* Release kernel context */
@@ -2943,3 +2956,13 @@ void hl_handle_clk_change_event(struct hl_device *hdev, u16 event_type, u64 *eve
mutex_unlock(&clk_throttle->lock);
}
+
+void hl_eq_cpld_shutdown_event_handle(struct hl_device *hdev, u16 event_id, u64 *event_mask)
+{
+ hl_handle_critical_hw_err(hdev, event_id, event_mask);
+ *event_mask |= HL_NOTIFIER_EVENT_DEVICE_UNAVAILABLE;
+
+ /* Avoid any new accesses to the H/W */
+ hdev->disabled = true;
+ hdev->cpld_shutdown = true;
+}
diff --git a/drivers/accel/habanalabs/common/habanalabs.h b/drivers/accel/habanalabs/common/habanalabs.h
index 6f27ce4fa01b..d94c2ba22a6a 100644
--- a/drivers/accel/habanalabs/common/habanalabs.h
+++ b/drivers/accel/habanalabs/common/habanalabs.h
@@ -90,7 +90,9 @@ struct hl_fpriv;
#define HL_COMMON_USER_CQ_INTERRUPT_ID 0xFFF
#define HL_COMMON_DEC_INTERRUPT_ID 0xFFE
-#define HL_STATE_DUMP_HIST_LEN 5
+#define HL_STATE_DUMP_HIST_LEN 5
+#define HL_DBGFS_CFG_ACCESS_HIST_LEN 20
+#define HL_DBGFS_CFG_ACCESS_HIST_TIMEOUT_SEC 2 /* 2s */
/* Default value for device reset trigger , an invalid value */
#define HL_RESET_TRIGGER_DEFAULT 0xFF
@@ -702,6 +704,7 @@ struct hl_hints_range {
* @supports_advanced_cpucp_rc: true if new cpucp opcodes are supported.
* @supports_engine_modes: true if changing engines/engine_cores modes is supported.
* @support_dynamic_resereved_fw_size: true if we support dynamic reserved size for fw.
+ * @supports_nvme: indicates whether the asic supports NVMe P2P DMA.
*/
struct asic_fixed_properties {
struct hw_queue_properties *hw_queues_props;
@@ -822,6 +825,7 @@ struct asic_fixed_properties {
u8 supports_advanced_cpucp_rc;
u8 supports_engine_modes;
u8 support_dynamic_resereved_fw_size;
+ u8 supports_nvme;
};
/**
@@ -2274,6 +2278,9 @@ struct hl_vm {
u8 init_done;
};
+#ifdef CONFIG_HL_HLDIO
+#include "hldio.h"
+#endif
/*
* DEBUG, PROFILING STRUCTURE
@@ -2344,7 +2351,6 @@ struct hl_fpriv {
struct mutex ctx_lock;
};
-
/*
* DebugFS
*/
@@ -2372,6 +2378,7 @@ struct hl_debugfs_entry {
struct hl_dbg_device_entry *dev_entry;
};
+
/**
* struct hl_dbg_device_entry - ASIC specific debugfs manager.
* @root: root dentry.
@@ -2403,6 +2410,7 @@ struct hl_debugfs_entry {
* @i2c_addr: generic u8 debugfs file for address value to use in i2c_data_read.
* @i2c_reg: generic u8 debugfs file for register value to use in i2c_data_read.
* @i2c_len: generic u8 debugfs file for length value to use in i2c_data_read.
+ * @dio_stats: Direct I/O statistics
*/
struct hl_dbg_device_entry {
struct dentry *root;
@@ -2434,6 +2442,35 @@ struct hl_dbg_device_entry {
u8 i2c_addr;
u8 i2c_reg;
u8 i2c_len;
+#ifdef CONFIG_HL_HLDIO
+ struct hl_dio_stats dio_stats;
+#endif
+};
+
+/**
+ * struct hl_debugfs_cfg_access_entry - single debugfs config access object, member of
+ * hl_debugfs_cfg_access.
+ * @seconds_since_epoch: seconds since January 1, 1970, used for time comparisons.
+ * @debugfs_type: the debugfs operation requested, can be READ32, WRITE32, READ64 or WRITE64.
+ * @addr: the requested address to access.
+ * @valid: if set, this entry has valid data for dumping at interrupt time.
+ */
+struct hl_debugfs_cfg_access_entry {
+ ktime_t seconds_since_epoch;
+ enum debugfs_access_type debugfs_type;
+ u64 addr;
+ bool valid;
+};
+
+/**
+ * struct hl_debugfs_cfg_access - saves debugfs config region access requests history.
+ * @cfg_access_list: list of objects describing config region access requests.
+ * @head: next valid index to add new entry to in cfg_access_list.
+ */
+struct hl_debugfs_cfg_access {
+ struct hl_debugfs_cfg_access_entry cfg_access_list[HL_DBGFS_CFG_ACCESS_HIST_LEN];
+ u32 head;
+ spinlock_t lock; /* protects head and entries */
};
/**
@@ -3281,6 +3318,7 @@ struct eq_heartbeat_debug_info {
* @hl_chip_info: ASIC's sensors information.
* @device_status_description: device status description.
* @hl_debugfs: device's debugfs manager.
+ * @debugfs_cfg_accesses: list of last debugfs config region accesses.
* @cb_pool: list of pre allocated CBs.
* @cb_pool_lock: protects the CB pool.
* @internal_cb_pool_virt_addr: internal command buffer pool virtual address.
@@ -3305,6 +3343,7 @@ struct eq_heartbeat_debug_info {
* @captured_err_info: holds information about errors.
* @reset_info: holds current device reset information.
* @heartbeat_debug_info: counters used to debug heartbeat failures.
+ * @hldio: describes habanalabs direct storage interaction interface.
* @irq_affinity_mask: mask of available CPU cores for user and decoder interrupt handling.
* @stream_master_qid_arr: pointer to array with QIDs of master streams.
* @fw_inner_major_ver: the major of current loaded preboot inner version.
@@ -3357,6 +3396,7 @@ struct eq_heartbeat_debug_info {
* addresses.
* @is_in_dram_scrub: true if dram scrub operation is on going.
* @disabled: is device disabled.
+ * @cpld_shutdown: is cpld shutdown.
* @late_init_done: is late init stage was done during initialization.
* @hwmon_initialized: is H/W monitor sensors was initialized.
* @reset_on_lockup: true if a reset should be done in case of stuck CS, false
@@ -3461,6 +3501,7 @@ struct hl_device {
struct hwmon_chip_info *hl_chip_info;
struct hl_dbg_device_entry hl_debugfs;
+ struct hl_debugfs_cfg_access debugfs_cfg_accesses;
struct list_head cb_pool;
spinlock_t cb_pool_lock;
@@ -3496,7 +3537,9 @@ struct hl_device {
struct hl_reset_info reset_info;
struct eq_heartbeat_debug_info heartbeat_debug_info;
-
+#ifdef CONFIG_HL_HLDIO
+ struct hl_dio hldio;
+#endif
cpumask_t irq_affinity_mask;
u32 *stream_master_qid_arr;
@@ -3532,6 +3575,7 @@ struct hl_device {
u16 cpu_pci_msb_addr;
u8 is_in_dram_scrub;
u8 disabled;
+ u8 cpld_shutdown;
u8 late_init_done;
u8 hwmon_initialized;
u8 reset_on_lockup;
@@ -4089,6 +4133,7 @@ void hl_init_cpu_for_irq(struct hl_device *hdev);
void hl_set_irq_affinity(struct hl_device *hdev, int irq);
void hl_eq_heartbeat_event_handle(struct hl_device *hdev);
void hl_handle_clk_change_event(struct hl_device *hdev, u16 event_type, u64 *event_mask);
+void hl_eq_cpld_shutdown_event_handle(struct hl_device *hdev, u16 event_id, u64 *event_mask);
#ifdef CONFIG_DEBUG_FS
@@ -4110,6 +4155,7 @@ void hl_debugfs_add_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx);
void hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx);
void hl_debugfs_set_state_dump(struct hl_device *hdev, char *data,
unsigned long length);
+void hl_debugfs_cfg_access_history_dump(struct hl_device *hdev);
#else
@@ -4185,6 +4231,10 @@ static inline void hl_debugfs_set_state_dump(struct hl_device *hdev,
{
}
+static inline void hl_debugfs_cfg_access_history_dump(struct hl_device *hdev)
+{
+}
+
#endif
/* Security */
diff --git a/drivers/accel/habanalabs/common/habanalabs_ioctl.c b/drivers/accel/habanalabs/common/habanalabs_ioctl.c
index dc80ca921d90..fdfdabc85e54 100644
--- a/drivers/accel/habanalabs/common/habanalabs_ioctl.c
+++ b/drivers/accel/habanalabs/common/habanalabs_ioctl.c
@@ -961,6 +961,12 @@ static int send_fw_generic_request(struct hl_device *hdev, struct hl_info_args *
case HL_PASSTHROUGH_VERSIONS:
need_input_buff = false;
break;
+ case HL_GET_ERR_COUNTERS_CMD:
+ need_input_buff = true;
+ break;
+ case HL_GET_P_STATE:
+ need_input_buff = false;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/accel/habanalabs/common/hldio.c b/drivers/accel/habanalabs/common/hldio.c
new file mode 100644
index 000000000000..083ae5610875
--- /dev/null
+++ b/drivers/accel/habanalabs/common/hldio.c
@@ -0,0 +1,437 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2024 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include "habanalabs.h"
+#include "hldio.h"
+#include <generated/uapi/linux/version.h>
+#include <linux/pci-p2pdma.h>
+#include <linux/blkdev.h>
+#include <linux/vmalloc.h>
+
+/*
+ * NVMe Direct I/O implementation for habanalabs driver
+ *
+ * ASSUMPTIONS
+ * ===========
+ * 1. No IOMMU (well, technically it can work with IOMMU, but it is *almost useless).
+ * 2. Only READ operations (can extend in the future).
+ * 3. No sparse files (can overcome this in the future).
+ * 4. Kernel version >= 6.9
+ * 5. Requiring page alignment is OK (I don't see a solution to this one right,
+ * now, how do we read partial pages?)
+ * 6. Kernel compiled with CONFIG_PCI_P2PDMA. This requires a CUSTOM kernel.
+ * Theoretically I have a slight idea on how this could be solvable, but it
+ * is probably inacceptable for the upstream. Also may not work in the end.
+ * 7. Either make sure our cards and disks are under the same PCI bridge, or
+ * compile a custom kernel to hack around this.
+ */
+
+#define IO_STABILIZE_TIMEOUT 10000000 /* 10 seconds in microseconds */
+
+/*
+ * This struct contains all the useful data I could milk out of the file handle
+ * provided by the user.
+ * @TODO: right now it is retrieved on each IO, but can be done once with some
+ * dedicated IOCTL, call it for example HL_REGISTER_HANDLE.
+ */
+struct hl_dio_fd {
+ /* Back pointer in case we need it in async completion */
+ struct hl_ctx *ctx;
+ /* Associated fd struct */
+ struct file *filp;
+};
+
+/*
+ * This is a single IO descriptor
+ */
+struct hl_direct_io {
+ struct hl_dio_fd f;
+ struct kiocb kio;
+ struct bio_vec *bv;
+ struct iov_iter iter;
+ u64 device_va;
+ u64 off_bytes;
+ u64 len_bytes;
+ u32 type;
+};
+
+bool hl_device_supports_nvme(struct hl_device *hdev)
+{
+ return hdev->asic_prop.supports_nvme;
+}
+
+static int hl_dio_fd_register(struct hl_ctx *ctx, int fd, struct hl_dio_fd *f)
+{
+ struct hl_device *hdev = ctx->hdev;
+ struct block_device *bd;
+ struct super_block *sb;
+ struct inode *inode;
+ struct gendisk *gd;
+ struct device *disk_dev;
+ int rc;
+
+ f->filp = fget(fd);
+ if (!f->filp) {
+ rc = -ENOENT;
+ goto out;
+ }
+
+ if (!(f->filp->f_flags & O_DIRECT)) {
+ dev_err(hdev->dev, "file is not in the direct mode\n");
+ rc = -EINVAL;
+ goto fput;
+ }
+
+ if (!f->filp->f_op->read_iter) {
+ dev_err(hdev->dev, "read iter is not supported, need to fall back to legacy\n");
+ rc = -EINVAL;
+ goto fput;
+ }
+
+ inode = file_inode(f->filp);
+ sb = inode->i_sb;
+ bd = sb->s_bdev;
+ gd = bd->bd_disk;
+
+ if (inode->i_blocks << sb->s_blocksize_bits < i_size_read(inode)) {
+ dev_err(hdev->dev, "sparse files are not currently supported\n");
+ rc = -EINVAL;
+ goto fput;
+ }
+
+ if (!bd || !gd) {
+ dev_err(hdev->dev, "invalid block device\n");
+ rc = -ENODEV;
+ goto fput;
+ }
+ /* Get the underlying device from the block device */
+ disk_dev = disk_to_dev(gd);
+ if (!dma_pci_p2pdma_supported(disk_dev)) {
+ dev_err(hdev->dev, "device does not support PCI P2P DMA\n");
+ rc = -EOPNOTSUPP;
+ goto fput;
+ }
+
+ /*
+ * @TODO: Maybe we need additional checks here
+ */
+
+ f->ctx = ctx;
+ rc = 0;
+
+ goto out;
+fput:
+ fput(f->filp);
+out:
+ return rc;
+}
+
+static void hl_dio_fd_unregister(struct hl_dio_fd *f)
+{
+ fput(f->filp);
+}
+
+static long hl_dio_count_io(struct hl_device *hdev)
+{
+ s64 sum = 0;
+ int i;
+
+ for_each_possible_cpu(i)
+ sum += per_cpu(*hdev->hldio.inflight_ios, i);
+
+ return sum;
+}
+
+static bool hl_dio_get_iopath(struct hl_ctx *ctx)
+{
+ struct hl_device *hdev = ctx->hdev;
+
+ if (hdev->hldio.io_enabled) {
+ this_cpu_inc(*hdev->hldio.inflight_ios);
+
+ /* Avoid race conditions */
+ if (!hdev->hldio.io_enabled) {
+ this_cpu_dec(*hdev->hldio.inflight_ios);
+ return false;
+ }
+
+ hl_ctx_get(ctx);
+
+ return true;
+ }
+
+ return false;
+}
+
+static void hl_dio_put_iopath(struct hl_ctx *ctx)
+{
+ struct hl_device *hdev = ctx->hdev;
+
+ hl_ctx_put(ctx);
+ this_cpu_dec(*hdev->hldio.inflight_ios);
+}
+
+static void hl_dio_set_io_enabled(struct hl_device *hdev, bool enabled)
+{
+ hdev->hldio.io_enabled = enabled;
+}
+
+static bool hl_dio_validate_io(struct hl_device *hdev, struct hl_direct_io *io)
+{
+ if ((u64)io->device_va & ~PAGE_MASK) {
+ dev_dbg(hdev->dev, "device address must be 4K aligned\n");
+ return false;
+ }
+
+ if (io->len_bytes & ~PAGE_MASK) {
+ dev_dbg(hdev->dev, "IO length must be 4K aligned\n");
+ return false;
+ }
+
+ if (io->off_bytes & ~PAGE_MASK) {
+ dev_dbg(hdev->dev, "IO offset must be 4K aligned\n");
+ return false;
+ }
+
+ return true;
+}
+
+static struct page *hl_dio_va2page(struct hl_device *hdev, struct hl_ctx *ctx, u64 device_va)
+{
+ struct hl_dio *hldio = &hdev->hldio;
+ u64 device_pa;
+ int rc, i;
+
+ rc = hl_mmu_va_to_pa(ctx, device_va, &device_pa);
+ if (rc) {
+ dev_err(hdev->dev, "device virtual address translation error: %#llx (%d)",
+ device_va, rc);
+ return NULL;
+ }
+
+ for (i = 0 ; i < hldio->np2prs ; ++i) {
+ if (device_pa >= hldio->p2prs[i].device_pa &&
+ device_pa < hldio->p2prs[i].device_pa + hldio->p2prs[i].size)
+ return hldio->p2prs[i].p2ppages[(device_pa - hldio->p2prs[i].device_pa) >>
+ PAGE_SHIFT];
+ }
+
+ return NULL;
+}
+
+static ssize_t hl_direct_io(struct hl_device *hdev, struct hl_direct_io *io)
+{
+ u64 npages, device_va;
+ ssize_t rc;
+ int i;
+
+ if (!hl_dio_validate_io(hdev, io))
+ return -EINVAL;
+
+ if (!hl_dio_get_iopath(io->f.ctx)) {
+ dev_info(hdev->dev, "can't schedule a new IO, IO is disabled\n");
+ return -ESHUTDOWN;
+ }
+
+ init_sync_kiocb(&io->kio, io->f.filp);
+ io->kio.ki_pos = io->off_bytes;
+
+ npages = (io->len_bytes >> PAGE_SHIFT);
+
+ /* @TODO: this can be implemented smarter, vmalloc in iopath is not
+ * ideal. Maybe some variation of genpool. Number of pages may differ
+ * greatly, so maybe even use pools of different sizes and chose the
+ * closest one.
+ */
+ io->bv = vzalloc(npages * sizeof(struct bio_vec));
+ if (!io->bv)
+ return -ENOMEM;
+
+ for (i = 0, device_va = io->device_va; i < npages ; ++i, device_va += PAGE_SIZE) {
+ io->bv[i].bv_page = hl_dio_va2page(hdev, io->f.ctx, device_va);
+ if (!io->bv[i].bv_page) {
+ dev_err(hdev->dev, "error getting page struct for device va %#llx",
+ device_va);
+ rc = -EFAULT;
+ goto cleanup;
+ }
+ io->bv[i].bv_offset = 0;
+ io->bv[i].bv_len = PAGE_SIZE;
+ }
+
+ iov_iter_bvec(&io->iter, io->type, io->bv, 1, io->len_bytes);
+ if (io->f.filp->f_op && io->f.filp->f_op->read_iter)
+ rc = io->f.filp->f_op->read_iter(&io->kio, &io->iter);
+ else
+ rc = -EINVAL;
+
+cleanup:
+ vfree(io->bv);
+ hl_dio_put_iopath(io->f.ctx);
+
+ dev_dbg(hdev->dev, "IO ended with %ld\n", rc);
+
+ return rc;
+}
+
+/*
+ * @TODO: This function can be used as a callback for io completion under
+ * kio->ki_complete in order to implement async IO.
+ * Note that on more recent kernels there is no ret2.
+ */
+__maybe_unused static void hl_direct_io_complete(struct kiocb *kio, long ret, long ret2)
+{
+ struct hl_direct_io *io = container_of(kio, struct hl_direct_io, kio);
+
+ dev_dbg(io->f.ctx->hdev->dev, "IO completed with %ld\n", ret);
+
+ /* Do something to copy result to user / notify completion */
+
+ hl_dio_put_iopath(io->f.ctx);
+
+ hl_dio_fd_unregister(&io->f);
+}
+
+/*
+ * DMA disk to ASIC, wait for results. Must be invoked from the user context
+ */
+int hl_dio_ssd2hl(struct hl_device *hdev, struct hl_ctx *ctx, int fd,
+ u64 device_va, off_t off_bytes, size_t len_bytes,
+ size_t *len_read)
+{
+ struct hl_direct_io *io;
+ ssize_t rc;
+
+ dev_dbg(hdev->dev, "SSD2HL fd=%d va=%#llx len=%#lx\n", fd, device_va, len_bytes);
+
+ io = kzalloc(sizeof(*io), GFP_KERNEL);
+ if (!io) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ *io = (struct hl_direct_io){
+ .device_va = device_va,
+ .len_bytes = len_bytes,
+ .off_bytes = off_bytes,
+ .type = READ,
+ };
+
+ rc = hl_dio_fd_register(ctx, fd, &io->f);
+ if (rc)
+ goto kfree_io;
+
+ rc = hl_direct_io(hdev, io);
+ if (rc >= 0) {
+ *len_read = rc;
+ rc = 0;
+ }
+
+ /* This shall be called only in the case of a sync IO */
+ hl_dio_fd_unregister(&io->f);
+kfree_io:
+ kfree(io);
+out:
+ return rc;
+}
+
+static void hl_p2p_region_fini(struct hl_device *hdev, struct hl_p2p_region *p2pr)
+{
+ if (p2pr->p2ppages) {
+ vfree(p2pr->p2ppages);
+ p2pr->p2ppages = NULL;
+ }
+
+ if (p2pr->p2pmem) {
+ dev_dbg(hdev->dev, "freeing P2P mem from %p, size=%#llx\n",
+ p2pr->p2pmem, p2pr->size);
+ pci_free_p2pmem(hdev->pdev, p2pr->p2pmem, p2pr->size);
+ p2pr->p2pmem = NULL;
+ }
+}
+
+void hl_p2p_region_fini_all(struct hl_device *hdev)
+{
+ int i;
+
+ for (i = 0 ; i < hdev->hldio.np2prs ; ++i)
+ hl_p2p_region_fini(hdev, &hdev->hldio.p2prs[i]);
+
+ kvfree(hdev->hldio.p2prs);
+ hdev->hldio.p2prs = NULL;
+ hdev->hldio.np2prs = 0;
+}
+
+int hl_p2p_region_init(struct hl_device *hdev, struct hl_p2p_region *p2pr)
+{
+ void *addr;
+ int rc, i;
+
+ /* Start by publishing our p2p memory */
+ rc = pci_p2pdma_add_resource(hdev->pdev, p2pr->bar, p2pr->size, p2pr->bar_offset);
+ if (rc) {
+ dev_err(hdev->dev, "error adding p2p resource: %d\n", rc);
+ goto err;
+ }
+
+ /* Alloc all p2p mem */
+ p2pr->p2pmem = pci_alloc_p2pmem(hdev->pdev, p2pr->size);
+ if (!p2pr->p2pmem) {
+ dev_err(hdev->dev, "error allocating p2p memory\n");
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ p2pr->p2ppages = vmalloc((p2pr->size >> PAGE_SHIFT) * sizeof(struct page *));
+ if (!p2pr->p2ppages) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ for (i = 0, addr = p2pr->p2pmem ; i < (p2pr->size >> PAGE_SHIFT) ; ++i, addr += PAGE_SIZE) {
+ p2pr->p2ppages[i] = virt_to_page(addr);
+ if (!p2pr->p2ppages[i]) {
+ rc = -EFAULT;
+ goto err;
+ }
+ }
+
+ return 0;
+err:
+ hl_p2p_region_fini(hdev, p2pr);
+ return rc;
+}
+
+int hl_dio_start(struct hl_device *hdev)
+{
+ dev_dbg(hdev->dev, "initializing HLDIO\n");
+
+ /* Initialize the IO counter and enable IO */
+ hdev->hldio.inflight_ios = alloc_percpu(s64);
+ if (!hdev->hldio.inflight_ios)
+ return -ENOMEM;
+
+ hl_dio_set_io_enabled(hdev, true);
+
+ return 0;
+}
+
+void hl_dio_stop(struct hl_device *hdev)
+{
+ dev_dbg(hdev->dev, "deinitializing HLDIO\n");
+
+ if (hdev->hldio.io_enabled) {
+ /* Wait for all the IO to finish */
+ hl_dio_set_io_enabled(hdev, false);
+ hl_poll_timeout_condition(hdev, !hl_dio_count_io(hdev), 1000, IO_STABILIZE_TIMEOUT);
+ }
+
+ if (hdev->hldio.inflight_ios) {
+ free_percpu(hdev->hldio.inflight_ios);
+ hdev->hldio.inflight_ios = NULL;
+ }
+}
diff --git a/drivers/accel/habanalabs/common/hldio.h b/drivers/accel/habanalabs/common/hldio.h
new file mode 100644
index 000000000000..2874388f2851
--- /dev/null
+++ b/drivers/accel/habanalabs/common/hldio.h
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * hldio.h - NVMe Direct I/O (HLDIO) infrastructure for Habana Labs Driver
+ *
+ * This feature requires specific hardware setup and must not be built
+ * under COMPILE_TEST.
+ */
+
+#ifndef __HL_HLDIO_H__
+#define __HL_HLDIO_H__
+
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/seq_file.h>
+#include <linux/ktime.h> /* ktime functions */
+#include <linux/delay.h> /* usleep_range */
+#include <linux/kernel.h> /* might_sleep_if */
+#include <linux/errno.h> /* error codes */
+
+/* Forward declarations */
+struct hl_device;
+struct file;
+
+/* Enable only if Kconfig selected */
+#ifdef CONFIG_HL_HLDIO
+/**
+ * struct hl_p2p_region - describes a single P2P memory region
+ * @p2ppages: array of page structs for the P2P memory
+ * @p2pmem: virtual address of the P2P memory region
+ * @device_pa: physical address on the device
+ * @bar_offset: offset within the BAR
+ * @size: size of the region in bytes
+ * @bar: BAR number containing this region
+ */
+struct hl_p2p_region {
+ struct page **p2ppages;
+ void *p2pmem;
+ u64 device_pa;
+ u64 bar_offset;
+ u64 size;
+ int bar;
+};
+
+/**
+ * struct hl_dio_stats - Direct I/O statistics
+ * @total_ops: total number of operations attempted
+ * @successful_ops: number of successful operations
+ * @failed_ops: number of failed operations
+ * @bytes_transferred: total bytes successfully transferred
+ * @last_len_read: length of the last read operation
+ */
+struct hl_dio_stats {
+ u64 total_ops;
+ u64 successful_ops;
+ u64 failed_ops;
+ u64 bytes_transferred;
+ size_t last_len_read;
+};
+
+/**
+ * struct hl_dio - describes habanalabs direct storage interaction interface
+ * @p2prs: array of p2p regions
+ * @inflight_ios: percpu counter for inflight ios
+ * @np2prs: number of elements in p2prs
+ * @io_enabled: 1 if io is enabled 0 otherwise
+ */
+struct hl_dio {
+ struct hl_p2p_region *p2prs;
+ s64 __percpu *inflight_ios;
+ u8 np2prs;
+ u8 io_enabled;
+};
+
+int hl_dio_ssd2hl(struct hl_device *hdev, struct hl_ctx *ctx, int fd,
+ u64 device_va, off_t off_bytes, size_t len_bytes,
+ size_t *len_read);
+void hl_p2p_region_fini_all(struct hl_device *hdev);
+int hl_p2p_region_init(struct hl_device *hdev, struct hl_p2p_region *p2pr);
+int hl_dio_start(struct hl_device *hdev);
+void hl_dio_stop(struct hl_device *hdev);
+
+/* Init/teardown */
+int hl_hldio_init(struct hl_device *hdev);
+void hl_hldio_fini(struct hl_device *hdev);
+
+/* File operations */
+long hl_hldio_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
+
+/* DebugFS hooks */
+#ifdef CONFIG_DEBUG_FS
+void hl_hldio_debugfs_init(struct hl_device *hdev);
+void hl_hldio_debugfs_fini(struct hl_device *hdev);
+#else
+static inline void hl_hldio_debugfs_init(struct hl_device *hdev) { }
+static inline void hl_hldio_debugfs_fini(struct hl_device *hdev) { }
+#endif
+
+#else /* !CONFIG_HL_HLDIO */
+
+struct hl_p2p_region;
+/* Stubs when HLDIO is disabled */
+static inline int hl_dio_ssd2hl(struct hl_device *hdev, struct hl_ctx *ctx, int fd,
+ u64 device_va, off_t off_bytes, size_t len_bytes,
+ size_t *len_read)
+{ return -EOPNOTSUPP; }
+static inline void hl_p2p_region_fini_all(struct hl_device *hdev) {}
+static inline int hl_p2p_region_init(struct hl_device *hdev, struct hl_p2p_region *p2pr)
+{ return -EOPNOTSUPP; }
+static inline int hl_dio_start(struct hl_device *hdev) { return -EOPNOTSUPP; }
+static inline void hl_dio_stop(struct hl_device *hdev) {}
+
+static inline int hl_hldio_init(struct hl_device *hdev) { return 0; }
+static inline void hl_hldio_fini(struct hl_device *hdev) { }
+static inline long hl_hldio_ioctl(struct file *f, unsigned int c,
+ unsigned long a)
+{ return -ENOTTY; }
+static inline void hl_hldio_debugfs_init(struct hl_device *hdev) { }
+static inline void hl_hldio_debugfs_fini(struct hl_device *hdev) { }
+
+#endif /* CONFIG_HL_HLDIO */
+
+/* Simplified polling macro for HLDIO (no simulator support) */
+#define hl_poll_timeout_condition(hdev, cond, sleep_us, timeout_us) \
+({ \
+ ktime_t __timeout = ktime_add_us(ktime_get(), timeout_us); \
+ might_sleep_if(sleep_us); \
+ (void)(hdev); /* keep signature consistent, hdev unused */ \
+ for (;;) { \
+ mb(); /* ensure ordering of memory operations */ \
+ if (cond) \
+ break; \
+ if (timeout_us && ktime_compare(ktime_get(), __timeout) > 0) \
+ break; \
+ if (sleep_us) \
+ usleep_range((sleep_us >> 2) + 1, sleep_us); \
+ } \
+ (cond) ? 0 : -ETIMEDOUT; \
+})
+
+#ifdef CONFIG_HL_HLDIO
+bool hl_device_supports_nvme(struct hl_device *hdev);
+#else
+static inline bool hl_device_supports_nvme(struct hl_device *hdev) { return false; }
+#endif
+
+#endif /* __HL_HLDIO_H__ */
diff --git a/drivers/accel/habanalabs/common/memory.c b/drivers/accel/habanalabs/common/memory.c
index 601fdbe70179..633db4bff46f 100644
--- a/drivers/accel/habanalabs/common/memory.c
+++ b/drivers/accel/habanalabs/common/memory.c
@@ -1829,9 +1829,6 @@ static void hl_release_dmabuf(struct dma_buf *dmabuf)
struct hl_dmabuf_priv *hl_dmabuf = dmabuf->priv;
struct hl_ctx *ctx;
- if (!hl_dmabuf)
- return;
-
ctx = hl_dmabuf->ctx;
if (hl_dmabuf->memhash_hnode)
@@ -1840,7 +1837,12 @@ static void hl_release_dmabuf(struct dma_buf *dmabuf)
atomic_dec(&ctx->hdev->dmabuf_export_cnt);
hl_ctx_put(ctx);
- /* Paired with get_file() in export_dmabuf() */
+ /*
+ * Paired with get_file() in export_dmabuf().
+ * 'ctx' can be still used here to get the file pointer, even after hl_ctx_put() was called,
+ * because releasing the compute device file involves another reference decrement, and it
+ * would be possible only after calling fput().
+ */
fput(ctx->hpriv->file_priv->filp);
kfree(hl_dmabuf);
@@ -1859,7 +1861,12 @@ static int export_dmabuf(struct hl_ctx *ctx,
{
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
struct hl_device *hdev = ctx->hdev;
- int rc, fd;
+ CLASS(get_unused_fd, fd)(flags);
+
+ if (fd < 0) {
+ dev_err(hdev->dev, "failed to get a file descriptor for a dma-buf, %d\n", fd);
+ return fd;
+ }
exp_info.ops = &habanalabs_dmabuf_ops;
exp_info.size = total_size;
@@ -1872,13 +1879,6 @@ static int export_dmabuf(struct hl_ctx *ctx,
return PTR_ERR(hl_dmabuf->dmabuf);
}
- fd = dma_buf_fd(hl_dmabuf->dmabuf, flags);
- if (fd < 0) {
- dev_err(hdev->dev, "failed to get a file descriptor for a dma-buf, %d\n", fd);
- rc = fd;
- goto err_dma_buf_put;
- }
-
hl_dmabuf->ctx = ctx;
hl_ctx_get(hl_dmabuf->ctx);
atomic_inc(&ctx->hdev->dmabuf_export_cnt);
@@ -1890,13 +1890,9 @@ static int export_dmabuf(struct hl_ctx *ctx,
get_file(ctx->hpriv->file_priv->filp);
*dmabuf_fd = fd;
+ fd_install(take_fd(fd), hl_dmabuf->dmabuf->file);
return 0;
-
-err_dma_buf_put:
- hl_dmabuf->dmabuf->priv = NULL;
- dma_buf_put(hl_dmabuf->dmabuf);
- return rc;
}
static int validate_export_params_common(struct hl_device *hdev, u64 addr, u64 size, u64 offset)
@@ -2341,7 +2337,7 @@ static int get_user_memory(struct hl_device *hdev, u64 addr, u64 size,
if (rc < 0)
goto destroy_pages;
npages = rc;
- rc = -EFAULT;
+ rc = -ENOMEM;
goto put_pages;
}
userptr->npages = npages;
diff --git a/drivers/accel/habanalabs/common/memory_mgr.c b/drivers/accel/habanalabs/common/memory_mgr.c
index 99cd83139d46..4401beb99e42 100644
--- a/drivers/accel/habanalabs/common/memory_mgr.c
+++ b/drivers/accel/habanalabs/common/memory_mgr.c
@@ -259,13 +259,8 @@ int hl_mem_mgr_mmap(struct hl_mem_mgr *mmg, struct vm_area_struct *vma,
goto put_mem;
}
-#ifdef _HAS_TYPE_ARG_IN_ACCESS_OK
- if (!access_ok(VERIFY_WRITE, (void __user *)(uintptr_t)vma->vm_start,
- user_mem_size)) {
-#else
if (!access_ok((void __user *)(uintptr_t)vma->vm_start,
user_mem_size)) {
-#endif
dev_err(mmg->dev, "%s: User pointer is invalid - 0x%lx\n",
buf->behavior->topic, vma->vm_start);
diff --git a/drivers/accel/habanalabs/common/sysfs.c b/drivers/accel/habanalabs/common/sysfs.c
index 82f66520ec18..8f55ba3b4e73 100644
--- a/drivers/accel/habanalabs/common/sysfs.c
+++ b/drivers/accel/habanalabs/common/sysfs.c
@@ -96,14 +96,21 @@ static ssize_t vrm_ver_show(struct device *dev, struct device_attribute *attr, c
infineon_second_stage_third_instance =
(infineon_second_stage_version >> 16) & mask;
- if (cpucp_info->infineon_second_stage_version)
+ if (cpucp_info->infineon_version && cpucp_info->infineon_second_stage_version)
return sprintf(buf, "%#04x %#04x:%#04x:%#04x\n",
le32_to_cpu(cpucp_info->infineon_version),
infineon_second_stage_first_instance,
infineon_second_stage_second_instance,
infineon_second_stage_third_instance);
- else
+ else if (cpucp_info->infineon_second_stage_version)
+ return sprintf(buf, "%#04x:%#04x:%#04x\n",
+ infineon_second_stage_first_instance,
+ infineon_second_stage_second_instance,
+ infineon_second_stage_third_instance);
+ else if (cpucp_info->infineon_version)
return sprintf(buf, "%#04x\n", le32_to_cpu(cpucp_info->infineon_version));
+
+ return 0;
}
static DEVICE_ATTR_RO(vrm_ver);
diff --git a/drivers/accel/habanalabs/gaudi/gaudi.c b/drivers/accel/habanalabs/gaudi/gaudi.c
index fa893a9b826e..34771d75da9d 100644
--- a/drivers/accel/habanalabs/gaudi/gaudi.c
+++ b/drivers/accel/habanalabs/gaudi/gaudi.c
@@ -4168,10 +4168,29 @@ static int gaudi_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
VM_DONTCOPY | VM_NORESERVE);
+#ifdef _HAS_DMA_MMAP_COHERENT
+ /*
+ * If dma_alloc_coherent() returns a vmalloc address, set VM_MIXEDMAP
+ * so vm_insert_page() can handle it safely. Without this, the kernel
+ * may BUG_ON due to VM_PFNMAP.
+ */
+ if (is_vmalloc_addr(cpu_addr))
+ vm_flags_set(vma, VM_MIXEDMAP);
+
rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr,
(dma_addr - HOST_PHYS_BASE), size);
if (rc)
dev_err(hdev->dev, "dma_mmap_coherent error %d", rc);
+#else
+
+ rc = remap_pfn_range(vma, vma->vm_start,
+ virt_to_phys(cpu_addr) >> PAGE_SHIFT,
+ size, vma->vm_page_prot);
+ if (rc)
+ dev_err(hdev->dev, "remap_pfn_range error %d", rc);
+
+ #endif
+
return rc;
}
diff --git a/drivers/accel/habanalabs/gaudi2/gaudi2.c b/drivers/accel/habanalabs/gaudi2/gaudi2.c
index a38b88baadf2..b8c0689dba64 100644
--- a/drivers/accel/habanalabs/gaudi2/gaudi2.c
+++ b/drivers/accel/habanalabs/gaudi2/gaudi2.c
@@ -728,6 +728,354 @@ static const int gaudi2_dma_core_async_event_id[] = {
[DMA_CORE_ID_KDMA] = GAUDI2_EVENT_KDMA0_CORE,
};
+const char *gaudi2_engine_id_str[] = {
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_EDMA_0),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_EDMA_1),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_MME),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_TPC_0),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_TPC_1),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_TPC_2),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_TPC_3),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_TPC_4),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_TPC_5),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_DEC_0),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_DEC_1),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_EDMA_0),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_EDMA_1),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_MME),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_TPC_0),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_TPC_1),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_TPC_2),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_TPC_3),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_TPC_4),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_TPC_5),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_DEC_0),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_DEC_1),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_EDMA_0),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_EDMA_1),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_MME),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_TPC_0),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_TPC_1),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_TPC_2),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_TPC_3),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_TPC_4),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_TPC_5),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_DEC_0),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_DEC_1),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_EDMA_0),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_EDMA_1),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_MME),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_TPC_0),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_TPC_1),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_TPC_2),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_TPC_3),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_TPC_4),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_TPC_5),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_DEC_0),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_DEC_1),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_TPC_6),
+ __stringify(GAUDI2_ENGINE_ID_PDMA_0),
+ __stringify(GAUDI2_ENGINE_ID_PDMA_1),
+ __stringify(GAUDI2_ENGINE_ID_ROT_0),
+ __stringify(GAUDI2_ENGINE_ID_ROT_1),
+ __stringify(GAUDI2_PCIE_ENGINE_ID_DEC_0),
+ __stringify(GAUDI2_PCIE_ENGINE_ID_DEC_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC0_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC0_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC1_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC1_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC2_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC2_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC3_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC3_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC4_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC4_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC5_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC5_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC6_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC6_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC7_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC7_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC8_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC8_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC9_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC9_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC10_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC10_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC11_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC11_1),
+ __stringify(GAUDI2_ENGINE_ID_PCIE),
+ __stringify(GAUDI2_ENGINE_ID_PSOC),
+ __stringify(GAUDI2_ENGINE_ID_ARC_FARM),
+ __stringify(GAUDI2_ENGINE_ID_KDMA),
+ __stringify(GAUDI2_ENGINE_ID_SIZE),
+};
+
+const char *gaudi2_queue_id_str[] = {
+ __stringify(GAUDI2_QUEUE_ID_PDMA_0_0),
+ __stringify(GAUDI2_QUEUE_ID_PDMA_0_1),
+ __stringify(GAUDI2_QUEUE_ID_PDMA_0_2),
+ __stringify(GAUDI2_QUEUE_ID_PDMA_0_3),
+ __stringify(GAUDI2_QUEUE_ID_PDMA_1_0),
+ __stringify(GAUDI2_QUEUE_ID_PDMA_1_1),
+ __stringify(GAUDI2_QUEUE_ID_PDMA_1_2),
+ __stringify(GAUDI2_QUEUE_ID_PDMA_1_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_EDMA_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_EDMA_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_EDMA_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_EDMA_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_EDMA_1_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_EDMA_1_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_EDMA_1_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_EDMA_1_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_MME_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_MME_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_MME_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_MME_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_1_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_1_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_1_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_1_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_2_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_2_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_2_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_2_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_3_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_3_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_3_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_3_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_4_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_4_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_4_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_4_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_5_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_5_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_5_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_5_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_6_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_6_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_6_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_6_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_EDMA_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_EDMA_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_EDMA_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_EDMA_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_EDMA_1_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_EDMA_1_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_EDMA_1_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_EDMA_1_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_MME_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_MME_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_MME_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_MME_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_1_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_1_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_1_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_1_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_2_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_2_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_2_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_2_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_3_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_3_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_3_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_3_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_4_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_4_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_4_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_4_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_5_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_5_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_5_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_5_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_EDMA_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_EDMA_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_EDMA_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_EDMA_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_EDMA_1_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_EDMA_1_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_EDMA_1_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_EDMA_1_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_MME_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_MME_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_MME_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_MME_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_1_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_1_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_1_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_1_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_2_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_2_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_2_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_2_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_3_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_3_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_3_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_3_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_4_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_4_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_4_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_4_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_5_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_5_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_5_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_5_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_EDMA_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_EDMA_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_EDMA_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_EDMA_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_EDMA_1_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_EDMA_1_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_EDMA_1_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_EDMA_1_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_MME_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_MME_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_MME_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_MME_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_1_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_1_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_1_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_1_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_2_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_2_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_2_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_2_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_3_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_3_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_3_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_3_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_4_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_4_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_4_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_4_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_5_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_5_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_5_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_5_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_0_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_0_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_0_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_0_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_1_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_1_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_1_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_1_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_2_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_2_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_2_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_2_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_3_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_3_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_3_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_3_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_4_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_4_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_4_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_4_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_5_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_5_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_5_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_5_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_6_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_6_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_6_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_6_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_7_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_7_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_7_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_7_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_8_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_8_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_8_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_8_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_9_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_9_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_9_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_9_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_10_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_10_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_10_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_10_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_11_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_11_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_11_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_11_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_12_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_12_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_12_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_12_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_13_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_13_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_13_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_13_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_14_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_14_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_14_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_14_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_15_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_15_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_15_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_15_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_16_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_16_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_16_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_16_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_17_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_17_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_17_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_17_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_18_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_18_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_18_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_18_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_19_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_19_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_19_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_19_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_20_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_20_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_20_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_20_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_21_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_21_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_21_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_21_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_22_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_22_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_22_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_22_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_23_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_23_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_23_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_23_3),
+ __stringify(GAUDI2_QUEUE_ID_ROT_0_0),
+ __stringify(GAUDI2_QUEUE_ID_ROT_0_1),
+ __stringify(GAUDI2_QUEUE_ID_ROT_0_2),
+ __stringify(GAUDI2_QUEUE_ID_ROT_0_3),
+ __stringify(GAUDI2_QUEUE_ID_ROT_1_0),
+ __stringify(GAUDI2_QUEUE_ID_ROT_1_1),
+ __stringify(GAUDI2_QUEUE_ID_ROT_1_2),
+ __stringify(GAUDI2_QUEUE_ID_ROT_1_3),
+ __stringify(GAUDI2_QUEUE_ID_CPU_PQ),
+ __stringify(GAUDI2_QUEUE_ID_SIZE),
+};
+
static const char * const gaudi2_qm_sei_error_cause[GAUDI2_NUM_OF_QM_SEI_ERR_CAUSE] = {
"qman sei intr",
"arc sei intr"
@@ -3150,7 +3498,6 @@ static int gaudi2_early_init(struct hl_device *hdev)
rc = hl_fw_read_preboot_status(hdev);
if (rc) {
if (hdev->reset_on_preboot_fail)
- /* we are already on failure flow, so don't check if hw_fini fails. */
hdev->asic_funcs->hw_fini(hdev, true, false);
goto pci_fini;
}
@@ -3162,6 +3509,13 @@ static int gaudi2_early_init(struct hl_device *hdev)
dev_err(hdev->dev, "failed to reset HW in dirty state (%d)\n", rc);
goto pci_fini;
}
+
+ rc = hl_fw_read_preboot_status(hdev);
+ if (rc) {
+ if (hdev->reset_on_preboot_fail)
+ hdev->asic_funcs->hw_fini(hdev, true, false);
+ goto pci_fini;
+ }
}
return 0;
@@ -4836,7 +5190,7 @@ static void gaudi2_halt_engines(struct hl_device *hdev, bool hard_reset, bool fw
else
wait_timeout_ms = GAUDI2_RESET_WAIT_MSEC;
- if (fw_reset)
+ if (fw_reset || hdev->cpld_shutdown)
goto skip_engines;
gaudi2_stop_dma_qmans(hdev);
@@ -6484,6 +6838,13 @@ static int gaudi2_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
VM_DONTCOPY | VM_NORESERVE);
#ifdef _HAS_DMA_MMAP_COHERENT
+ /*
+ * If dma_alloc_coherent() returns a vmalloc address, set VM_MIXEDMAP
+ * so vm_insert_page() can handle it safely. Without this, the kernel
+ * may BUG_ON due to VM_PFNMAP.
+ */
+ if (is_vmalloc_addr(cpu_addr))
+ vm_flags_set(vma, VM_MIXEDMAP);
rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr, dma_addr, size);
if (rc)
@@ -6774,7 +7135,8 @@ static int gaudi2_validate_cb_address(struct hl_device *hdev, struct hl_cs_parse
struct gaudi2_device *gaudi2 = hdev->asic_specific;
if (!gaudi2_is_queue_enabled(hdev, parser->hw_queue_id)) {
- dev_err(hdev->dev, "h/w queue %d is disabled\n", parser->hw_queue_id);
+ dev_err(hdev->dev, "h/w queue %s is disabled\n",
+ GAUDI2_QUEUE_ID_TO_STR(parser->hw_queue_id));
return -EINVAL;
}
@@ -7026,7 +7388,8 @@ static int gaudi2_test_queue_send_msg_short(struct hl_device *hdev, u32 hw_queue
rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id, pkt_size, msg_info->dma_addr);
if (rc)
dev_err(hdev->dev,
- "Failed to send msg_short packet to H/W queue %d\n", hw_queue_id);
+ "Failed to send msg_short packet to H/W queue %s\n",
+ GAUDI2_QUEUE_ID_TO_STR(hw_queue_id));
return rc;
}
@@ -7052,8 +7415,8 @@ static int gaudi2_test_queue_wait_completion(struct hl_device *hdev, u32 hw_queu
timeout_usec);
if (rc == -ETIMEDOUT) {
- dev_err(hdev->dev, "H/W queue %d test failed (SOB_OBJ_0 == 0x%x)\n",
- hw_queue_id, tmp);
+ dev_err(hdev->dev, "H/W queue %s test failed (SOB_OBJ_0 == 0x%x)\n",
+ GAUDI2_QUEUE_ID_TO_STR(hw_queue_id), tmp);
rc = -EIO;
}
@@ -9603,8 +9966,8 @@ static int hl_arc_event_handle(struct hl_device *hdev, u16 event_type,
q = (struct hl_engine_arc_dccm_queue_full_irq *) &payload;
gaudi2_print_event(hdev, event_type, true,
- "ARC DCCM Full event: EngId: %u, Intr_type: %u, Qidx: %u",
- engine_id, intr_type, q->queue_index);
+ "ARC DCCM Full event: Eng: %s, Intr_type: %u, Qidx: %u",
+ GAUDI2_ENG_ID_TO_STR(engine_id), intr_type, q->queue_index);
return 1;
default:
gaudi2_print_event(hdev, event_type, true, "Unknown ARC event type");
@@ -10172,7 +10535,7 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
dev_err(hdev->dev, "CPLD shutdown event, reset reason: 0x%llx\n",
le64_to_cpu(eq_entry->data[0]));
error_count = GAUDI2_NA_EVENT_CAUSE;
- event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
+ hl_eq_cpld_shutdown_event_handle(hdev, event_type, &event_mask);
break;
case GAUDI2_EVENT_CPU_PKT_SANITY_FAILED:
@@ -10260,6 +10623,7 @@ reset_device:
if (event_mask & HL_NOTIFIER_EVENT_GENERAL_HW_ERR)
hl_handle_critical_hw_err(hdev, event_type, &event_mask);
+ hl_debugfs_cfg_access_history_dump(hdev);
event_mask |= HL_NOTIFIER_EVENT_DEVICE_RESET;
hl_device_cond_reset(hdev, reset_flags, event_mask);
}
@@ -10296,8 +10660,8 @@ static int gaudi2_memset_memory_chunk_using_edma_qm(struct hl_device *hdev,
rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id, pkt_size, phys_addr);
if (rc)
- dev_err(hdev->dev, "Failed to send lin_dma packet to H/W queue %d\n",
- hw_queue_id);
+ dev_err(hdev->dev, "Failed to send lin_dma packet to H/W queue %s\n",
+ GAUDI2_QUEUE_ID_TO_STR(hw_queue_id));
return rc;
}
@@ -10437,7 +10801,7 @@ end:
(u64 *)(lin_dma_pkts_arr), DEBUGFS_WRITE64);
WREG32(sob_addr, 0);
- kfree(lin_dma_pkts_arr);
+ kvfree(lin_dma_pkts_arr);
return rc;
}
diff --git a/drivers/accel/habanalabs/gaudi2/gaudi2P.h b/drivers/accel/habanalabs/gaudi2/gaudi2P.h
index 05117272cac7..bdf5c1bd2d63 100644
--- a/drivers/accel/habanalabs/gaudi2/gaudi2P.h
+++ b/drivers/accel/habanalabs/gaudi2/gaudi2P.h
@@ -240,6 +240,15 @@
#define GAUDI2_NUM_TESTED_QS (GAUDI2_QUEUE_ID_CPU_PQ - GAUDI2_QUEUE_ID_PDMA_0_0)
+extern const char *gaudi2_engine_id_str[];
+extern const char *gaudi2_queue_id_str[];
+
+#define GAUDI2_ENG_ID_TO_STR(initiator) ((initiator) >= GAUDI2_ENGINE_ID_SIZE ? "not found" : \
+ gaudi2_engine_id_str[initiator])
+
+#define GAUDI2_QUEUE_ID_TO_STR(initiator) ((initiator) >= GAUDI2_QUEUE_ID_SIZE ? "not found" : \
+ gaudi2_queue_id_str[initiator])
+
enum gaudi2_reserved_sob_id {
GAUDI2_RESERVED_SOB_CS_COMPLETION_FIRST,
GAUDI2_RESERVED_SOB_CS_COMPLETION_LAST =
diff --git a/drivers/accel/habanalabs/gaudi2/gaudi2_coresight.c b/drivers/accel/habanalabs/gaudi2/gaudi2_coresight.c
index 2423620ff358..bc3c57bda5cd 100644
--- a/drivers/accel/habanalabs/gaudi2/gaudi2_coresight.c
+++ b/drivers/accel/habanalabs/gaudi2/gaudi2_coresight.c
@@ -2426,7 +2426,7 @@ static int gaudi2_config_bmon(struct hl_device *hdev, struct hl_debug_params *pa
WREG32(base_reg + mmBMON_ADDRH_E3_OFFSET, 0);
WREG32(base_reg + mmBMON_REDUCTION_OFFSET, 0);
WREG32(base_reg + mmBMON_STM_TRC_OFFSET, 0x7 | (0xA << 8));
- WREG32(base_reg + mmBMON_CR_OFFSET, 0x77 | 0xf << 24);
+ WREG32(base_reg + mmBMON_CR_OFFSET, 0x41);
}
return 0;
diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c
index 3d6d52492536..3289751b4757 100644
--- a/drivers/accel/ivpu/ivpu_drv.c
+++ b/drivers/accel/ivpu/ivpu_drv.c
@@ -677,7 +677,7 @@ static void ivpu_bo_unbind_all_user_contexts(struct ivpu_device *vdev)
static void ivpu_dev_fini(struct ivpu_device *vdev)
{
ivpu_jobs_abort_all(vdev);
- ivpu_pm_cancel_recovery(vdev);
+ ivpu_pm_disable_recovery(vdev);
ivpu_pm_disable(vdev);
ivpu_prepare_for_reset(vdev);
ivpu_shutdown(vdev);
diff --git a/drivers/accel/ivpu/ivpu_fw.h b/drivers/accel/ivpu/ivpu_fw.h
index 9a3935be1c05..7081913fb0dd 100644
--- a/drivers/accel/ivpu/ivpu_fw.h
+++ b/drivers/accel/ivpu/ivpu_fw.h
@@ -45,7 +45,7 @@ struct ivpu_fw_info {
int ivpu_fw_init(struct ivpu_device *vdev);
void ivpu_fw_fini(struct ivpu_device *vdev);
void ivpu_fw_load(struct ivpu_device *vdev);
-void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params *bp);
+void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params *boot_params);
static inline bool ivpu_fw_is_cold_boot(struct ivpu_device *vdev)
{
diff --git a/drivers/accel/ivpu/ivpu_hw_btrs.c b/drivers/accel/ivpu/ivpu_hw_btrs.c
index b236c7234daa..afdb3b2aa72a 100644
--- a/drivers/accel/ivpu/ivpu_hw_btrs.c
+++ b/drivers/accel/ivpu/ivpu_hw_btrs.c
@@ -33,7 +33,6 @@
#define PLL_CDYN_DEFAULT 0x80
#define PLL_EPP_DEFAULT 0x80
-#define PLL_CONFIG_DEFAULT 0x0
#define PLL_REF_CLK_FREQ 50000000ull
#define PLL_RATIO_TO_FREQ(x) ((x) * PLL_REF_CLK_FREQ)
@@ -303,7 +302,7 @@ static void prepare_wp_request(struct ivpu_device *vdev, struct wp_request *wp,
wp->epp = 0;
} else {
wp->target = hw->pll.pn_ratio;
- wp->cfg = enable ? PLL_CONFIG_DEFAULT : 0;
+ wp->cfg = 0;
wp->cdyn = enable ? PLL_CDYN_DEFAULT : 0;
wp->epp = enable ? PLL_EPP_DEFAULT : 0;
}
diff --git a/drivers/accel/ivpu/ivpu_hw_btrs.h b/drivers/accel/ivpu/ivpu_hw_btrs.h
index d2d82651976d..032c384ac3d4 100644
--- a/drivers/accel/ivpu/ivpu_hw_btrs.h
+++ b/drivers/accel/ivpu/ivpu_hw_btrs.h
@@ -36,7 +36,7 @@ u32 ivpu_hw_btrs_dpu_freq_get(struct ivpu_device *vdev);
bool ivpu_hw_btrs_irq_handler_mtl(struct ivpu_device *vdev, int irq);
bool ivpu_hw_btrs_irq_handler_lnl(struct ivpu_device *vdev, int irq);
int ivpu_hw_btrs_dct_get_request(struct ivpu_device *vdev, bool *enable);
-void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u32 dct_percent);
+void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u32 active_percent);
u32 ivpu_hw_btrs_telemetry_offset_get(struct ivpu_device *vdev);
u32 ivpu_hw_btrs_telemetry_size_get(struct ivpu_device *vdev);
u32 ivpu_hw_btrs_telemetry_enable_get(struct ivpu_device *vdev);
diff --git a/drivers/accel/ivpu/ivpu_ipc.c b/drivers/accel/ivpu/ivpu_ipc.c
index 39f83225c181..5f00809d448a 100644
--- a/drivers/accel/ivpu/ivpu_ipc.c
+++ b/drivers/accel/ivpu/ivpu_ipc.c
@@ -141,7 +141,6 @@ ivpu_ipc_rx_msg_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
struct ivpu_ipc_rx_msg *rx_msg;
lockdep_assert_held(&ipc->cons_lock);
- lockdep_assert_irqs_disabled();
rx_msg = kzalloc(sizeof(*rx_msg), GFP_ATOMIC);
if (!rx_msg) {
diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c
index eacda1dbe840..475ddc94f1cf 100644
--- a/drivers/accel/ivpu/ivpu_pm.c
+++ b/drivers/accel/ivpu/ivpu_pm.c
@@ -417,10 +417,10 @@ void ivpu_pm_init(struct ivpu_device *vdev)
ivpu_dbg(vdev, PM, "Autosuspend delay = %d\n", delay);
}
-void ivpu_pm_cancel_recovery(struct ivpu_device *vdev)
+void ivpu_pm_disable_recovery(struct ivpu_device *vdev)
{
drm_WARN_ON(&vdev->drm, delayed_work_pending(&vdev->pm->job_timeout_work));
- cancel_work_sync(&vdev->pm->recovery_work);
+ disable_work_sync(&vdev->pm->recovery_work);
}
void ivpu_pm_enable(struct ivpu_device *vdev)
diff --git a/drivers/accel/ivpu/ivpu_pm.h b/drivers/accel/ivpu/ivpu_pm.h
index 89b264cc0e3e..a2aa7a27f32e 100644
--- a/drivers/accel/ivpu/ivpu_pm.h
+++ b/drivers/accel/ivpu/ivpu_pm.h
@@ -25,7 +25,7 @@ struct ivpu_pm_info {
void ivpu_pm_init(struct ivpu_device *vdev);
void ivpu_pm_enable(struct ivpu_device *vdev);
void ivpu_pm_disable(struct ivpu_device *vdev);
-void ivpu_pm_cancel_recovery(struct ivpu_device *vdev);
+void ivpu_pm_disable_recovery(struct ivpu_device *vdev);
int ivpu_pm_suspend_cb(struct device *dev);
int ivpu_pm_resume_cb(struct device *dev);
diff --git a/drivers/accel/rocket/Kconfig b/drivers/accel/rocket/Kconfig
new file mode 100644
index 000000000000..16465abe0660
--- /dev/null
+++ b/drivers/accel/rocket/Kconfig
@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config DRM_ACCEL_ROCKET
+ tristate "Rocket (support for Rockchip NPUs)"
+ depends on DRM_ACCEL
+ depends on (ARCH_ROCKCHIP && ARM64) || COMPILE_TEST
+ depends on ROCKCHIP_IOMMU || COMPILE_TEST
+ depends on MMU
+ select DRM_SCHED
+ select DRM_GEM_SHMEM_HELPER
+ help
+ Choose this option if you have a Rockchip SoC that contains a
+ compatible Neural Processing Unit (NPU), such as the RK3588. Called by
+ Rockchip either RKNN or RKNPU, it accelerates inference of neural
+ networks.
+
+ The interface exposed to userspace is described in
+ include/uapi/drm/rocket_accel.h and is used by the Rocket userspace
+ driver in Mesa3D.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called rocket.
diff --git a/drivers/accel/rocket/Makefile b/drivers/accel/rocket/Makefile
new file mode 100644
index 000000000000..3713dfe223d6
--- /dev/null
+++ b/drivers/accel/rocket/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_DRM_ACCEL_ROCKET) := rocket.o
+
+rocket-y := \
+ rocket_core.o \
+ rocket_device.o \
+ rocket_drv.o \
+ rocket_gem.o \
+ rocket_job.o
diff --git a/drivers/accel/rocket/rocket_core.c b/drivers/accel/rocket/rocket_core.c
new file mode 100644
index 000000000000..abe7719c1db4
--- /dev/null
+++ b/drivers/accel/rocket/rocket_core.c
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dev_printk.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/iommu.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+#include "rocket_core.h"
+#include "rocket_job.h"
+
+int rocket_core_init(struct rocket_core *core)
+{
+ struct device *dev = core->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ u32 version;
+ int err = 0;
+
+ core->resets[0].id = "srst_a";
+ core->resets[1].id = "srst_h";
+ err = devm_reset_control_bulk_get_exclusive(&pdev->dev, ARRAY_SIZE(core->resets),
+ core->resets);
+ if (err)
+ return dev_err_probe(dev, err, "failed to get resets for core %d\n", core->index);
+
+ err = devm_clk_bulk_get(dev, ARRAY_SIZE(core->clks), core->clks);
+ if (err)
+ return dev_err_probe(dev, err, "failed to get clocks for core %d\n", core->index);
+
+ core->pc_iomem = devm_platform_ioremap_resource_byname(pdev, "pc");
+ if (IS_ERR(core->pc_iomem)) {
+ dev_err(dev, "couldn't find PC registers %ld\n", PTR_ERR(core->pc_iomem));
+ return PTR_ERR(core->pc_iomem);
+ }
+
+ core->cna_iomem = devm_platform_ioremap_resource_byname(pdev, "cna");
+ if (IS_ERR(core->cna_iomem)) {
+ dev_err(dev, "couldn't find CNA registers %ld\n", PTR_ERR(core->cna_iomem));
+ return PTR_ERR(core->cna_iomem);
+ }
+
+ core->core_iomem = devm_platform_ioremap_resource_byname(pdev, "core");
+ if (IS_ERR(core->core_iomem)) {
+ dev_err(dev, "couldn't find CORE registers %ld\n", PTR_ERR(core->core_iomem));
+ return PTR_ERR(core->core_iomem);
+ }
+
+ dma_set_max_seg_size(dev, UINT_MAX);
+
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
+ if (err)
+ return err;
+
+ core->iommu_group = iommu_group_get(dev);
+
+ err = rocket_job_init(core);
+ if (err)
+ return err;
+
+ pm_runtime_use_autosuspend(dev);
+
+ /*
+ * As this NPU will be most often used as part of a media pipeline that
+ * ends presenting in a display, choose 50 ms (~3 frames at 60Hz) as an
+ * autosuspend delay as that will keep the device powered up while the
+ * pipeline is running.
+ */
+ pm_runtime_set_autosuspend_delay(dev, 50);
+
+ pm_runtime_enable(dev);
+
+ err = pm_runtime_resume_and_get(dev);
+ if (err) {
+ rocket_job_fini(core);
+ return err;
+ }
+
+ version = rocket_pc_readl(core, VERSION);
+ version += rocket_pc_readl(core, VERSION_NUM) & 0xffff;
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ dev_info(dev, "Rockchip NPU core %d version: %d\n", core->index, version);
+
+ return 0;
+}
+
+void rocket_core_fini(struct rocket_core *core)
+{
+ pm_runtime_dont_use_autosuspend(core->dev);
+ pm_runtime_disable(core->dev);
+ iommu_group_put(core->iommu_group);
+ core->iommu_group = NULL;
+ rocket_job_fini(core);
+}
+
+void rocket_core_reset(struct rocket_core *core)
+{
+ reset_control_bulk_assert(ARRAY_SIZE(core->resets), core->resets);
+
+ udelay(10);
+
+ reset_control_bulk_deassert(ARRAY_SIZE(core->resets), core->resets);
+}
diff --git a/drivers/accel/rocket/rocket_core.h b/drivers/accel/rocket/rocket_core.h
new file mode 100644
index 000000000000..f6d7382854ca
--- /dev/null
+++ b/drivers/accel/rocket/rocket_core.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+
+#ifndef __ROCKET_CORE_H__
+#define __ROCKET_CORE_H__
+
+#include <drm/gpu_scheduler.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/mutex_types.h>
+#include <linux/reset.h>
+
+#include "rocket_registers.h"
+
+#define rocket_pc_readl(core, reg) \
+ readl((core)->pc_iomem + (REG_PC_##reg))
+#define rocket_pc_writel(core, reg, value) \
+ writel(value, (core)->pc_iomem + (REG_PC_##reg))
+
+#define rocket_cna_readl(core, reg) \
+ readl((core)->cna_iomem + (REG_CNA_##reg) - REG_CNA_S_STATUS)
+#define rocket_cna_writel(core, reg, value) \
+ writel(value, (core)->cna_iomem + (REG_CNA_##reg) - REG_CNA_S_STATUS)
+
+#define rocket_core_readl(core, reg) \
+ readl((core)->core_iomem + (REG_CORE_##reg) - REG_CORE_S_STATUS)
+#define rocket_core_writel(core, reg, value) \
+ writel(value, (core)->core_iomem + (REG_CORE_##reg) - REG_CORE_S_STATUS)
+
+struct rocket_core {
+ struct device *dev;
+ struct rocket_device *rdev;
+ unsigned int index;
+
+ int irq;
+ void __iomem *pc_iomem;
+ void __iomem *cna_iomem;
+ void __iomem *core_iomem;
+ struct clk_bulk_data clks[4];
+ struct reset_control_bulk_data resets[2];
+
+ struct iommu_group *iommu_group;
+
+ struct mutex job_lock;
+ struct rocket_job *in_flight_job;
+
+ spinlock_t fence_lock;
+
+ struct {
+ struct workqueue_struct *wq;
+ struct work_struct work;
+ atomic_t pending;
+ } reset;
+
+ struct drm_gpu_scheduler sched;
+ u64 fence_context;
+ u64 emit_seqno;
+};
+
+int rocket_core_init(struct rocket_core *core);
+void rocket_core_fini(struct rocket_core *core);
+void rocket_core_reset(struct rocket_core *core);
+
+#endif
diff --git a/drivers/accel/rocket/rocket_device.c b/drivers/accel/rocket/rocket_device.c
new file mode 100644
index 000000000000..46e6ee1e72c5
--- /dev/null
+++ b/drivers/accel/rocket/rocket_device.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+
+#include <drm/drm_drv.h>
+#include <linux/array_size.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+
+#include "rocket_device.h"
+
+struct rocket_device *rocket_device_init(struct platform_device *pdev,
+ const struct drm_driver *rocket_drm_driver)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *core_node;
+ struct rocket_device *rdev;
+ struct drm_device *ddev;
+ unsigned int num_cores = 0;
+ int err;
+
+ rdev = devm_drm_dev_alloc(dev, rocket_drm_driver, struct rocket_device, ddev);
+ if (IS_ERR(rdev))
+ return rdev;
+
+ ddev = &rdev->ddev;
+ dev_set_drvdata(dev, rdev);
+
+ for_each_compatible_node(core_node, NULL, "rockchip,rk3588-rknn-core")
+ if (of_device_is_available(core_node))
+ num_cores++;
+
+ rdev->cores = devm_kcalloc(dev, num_cores, sizeof(*rdev->cores), GFP_KERNEL);
+ if (!rdev->cores)
+ return ERR_PTR(-ENOMEM);
+
+ dma_set_max_seg_size(dev, UINT_MAX);
+
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
+ if (err)
+ return ERR_PTR(err);
+
+ err = devm_mutex_init(dev, &rdev->sched_lock);
+ if (err)
+ return ERR_PTR(-ENOMEM);
+
+ err = drm_dev_register(ddev, 0);
+ if (err)
+ return ERR_PTR(err);
+
+ return rdev;
+}
+
+void rocket_device_fini(struct rocket_device *rdev)
+{
+ WARN_ON(rdev->num_cores > 0);
+
+ drm_dev_unregister(&rdev->ddev);
+}
diff --git a/drivers/accel/rocket/rocket_device.h b/drivers/accel/rocket/rocket_device.h
new file mode 100644
index 000000000000..ce662abc01d3
--- /dev/null
+++ b/drivers/accel/rocket/rocket_device.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+
+#ifndef __ROCKET_DEVICE_H__
+#define __ROCKET_DEVICE_H__
+
+#include <drm/drm_device.h>
+#include <linux/clk.h>
+#include <linux/container_of.h>
+#include <linux/iommu.h>
+#include <linux/platform_device.h>
+
+#include "rocket_core.h"
+
+struct rocket_device {
+ struct drm_device ddev;
+
+ struct mutex sched_lock;
+
+ struct rocket_core *cores;
+ unsigned int num_cores;
+};
+
+struct rocket_device *rocket_device_init(struct platform_device *pdev,
+ const struct drm_driver *rocket_drm_driver);
+void rocket_device_fini(struct rocket_device *rdev);
+#define to_rocket_device(drm_dev) \
+ ((struct rocket_device *)(container_of((drm_dev), struct rocket_device, ddev)))
+
+#endif /* __ROCKET_DEVICE_H__ */
diff --git a/drivers/accel/rocket/rocket_drv.c b/drivers/accel/rocket/rocket_drv.c
new file mode 100644
index 000000000000..5c0b63f0a8f0
--- /dev/null
+++ b/drivers/accel/rocket/rocket_drv.c
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+
+#include <drm/drm_accel.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_ioctl.h>
+#include <drm/rocket_accel.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/iommu.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include "rocket_drv.h"
+#include "rocket_gem.h"
+#include "rocket_job.h"
+
+/*
+ * Facade device, used to expose a single DRM device to userspace, that
+ * schedules jobs to any RKNN cores in the system.
+ */
+static struct platform_device *drm_dev;
+static struct rocket_device *rdev;
+
+static void
+rocket_iommu_domain_destroy(struct kref *kref)
+{
+ struct rocket_iommu_domain *domain = container_of(kref, struct rocket_iommu_domain, kref);
+
+ iommu_domain_free(domain->domain);
+ domain->domain = NULL;
+ kfree(domain);
+}
+
+static struct rocket_iommu_domain*
+rocket_iommu_domain_create(struct device *dev)
+{
+ struct rocket_iommu_domain *domain = kmalloc(sizeof(*domain), GFP_KERNEL);
+ void *err;
+
+ if (!domain)
+ return ERR_PTR(-ENOMEM);
+
+ domain->domain = iommu_paging_domain_alloc(dev);
+ if (IS_ERR(domain->domain)) {
+ err = ERR_CAST(domain->domain);
+ kfree(domain);
+ return err;
+ }
+ kref_init(&domain->kref);
+
+ return domain;
+}
+
+struct rocket_iommu_domain *
+rocket_iommu_domain_get(struct rocket_file_priv *rocket_priv)
+{
+ kref_get(&rocket_priv->domain->kref);
+ return rocket_priv->domain;
+}
+
+void
+rocket_iommu_domain_put(struct rocket_iommu_domain *domain)
+{
+ kref_put(&domain->kref, rocket_iommu_domain_destroy);
+}
+
+static int
+rocket_open(struct drm_device *dev, struct drm_file *file)
+{
+ struct rocket_device *rdev = to_rocket_device(dev);
+ struct rocket_file_priv *rocket_priv;
+ u64 start, end;
+ int ret;
+
+ if (!try_module_get(THIS_MODULE))
+ return -EINVAL;
+
+ rocket_priv = kzalloc(sizeof(*rocket_priv), GFP_KERNEL);
+ if (!rocket_priv) {
+ ret = -ENOMEM;
+ goto err_put_mod;
+ }
+
+ rocket_priv->rdev = rdev;
+ rocket_priv->domain = rocket_iommu_domain_create(rdev->cores[0].dev);
+ if (IS_ERR(rocket_priv->domain)) {
+ ret = PTR_ERR(rocket_priv->domain);
+ goto err_free;
+ }
+
+ file->driver_priv = rocket_priv;
+
+ start = rocket_priv->domain->domain->geometry.aperture_start;
+ end = rocket_priv->domain->domain->geometry.aperture_end;
+ drm_mm_init(&rocket_priv->mm, start, end - start + 1);
+ mutex_init(&rocket_priv->mm_lock);
+
+ ret = rocket_job_open(rocket_priv);
+ if (ret)
+ goto err_mm_takedown;
+
+ return 0;
+
+err_mm_takedown:
+ mutex_destroy(&rocket_priv->mm_lock);
+ drm_mm_takedown(&rocket_priv->mm);
+ rocket_iommu_domain_put(rocket_priv->domain);
+err_free:
+ kfree(rocket_priv);
+err_put_mod:
+ module_put(THIS_MODULE);
+ return ret;
+}
+
+static void
+rocket_postclose(struct drm_device *dev, struct drm_file *file)
+{
+ struct rocket_file_priv *rocket_priv = file->driver_priv;
+
+ rocket_job_close(rocket_priv);
+ mutex_destroy(&rocket_priv->mm_lock);
+ drm_mm_takedown(&rocket_priv->mm);
+ rocket_iommu_domain_put(rocket_priv->domain);
+ kfree(rocket_priv);
+ module_put(THIS_MODULE);
+}
+
+static const struct drm_ioctl_desc rocket_drm_driver_ioctls[] = {
+#define ROCKET_IOCTL(n, func) \
+ DRM_IOCTL_DEF_DRV(ROCKET_##n, rocket_ioctl_##func, 0)
+
+ ROCKET_IOCTL(CREATE_BO, create_bo),
+ ROCKET_IOCTL(SUBMIT, submit),
+ ROCKET_IOCTL(PREP_BO, prep_bo),
+ ROCKET_IOCTL(FINI_BO, fini_bo),
+};
+
+DEFINE_DRM_ACCEL_FOPS(rocket_accel_driver_fops);
+
+/*
+ * Rocket driver version:
+ * - 1.0 - initial interface
+ */
+static const struct drm_driver rocket_drm_driver = {
+ .driver_features = DRIVER_COMPUTE_ACCEL | DRIVER_GEM,
+ .open = rocket_open,
+ .postclose = rocket_postclose,
+ .gem_create_object = rocket_gem_create_object,
+ .ioctls = rocket_drm_driver_ioctls,
+ .num_ioctls = ARRAY_SIZE(rocket_drm_driver_ioctls),
+ .fops = &rocket_accel_driver_fops,
+ .name = "rocket",
+ .desc = "rocket DRM",
+};
+
+static int rocket_probe(struct platform_device *pdev)
+{
+ if (rdev == NULL) {
+ /* First core probing, initialize DRM device. */
+ rdev = rocket_device_init(drm_dev, &rocket_drm_driver);
+ if (IS_ERR(rdev)) {
+ dev_err(&pdev->dev, "failed to initialize rocket device\n");
+ return PTR_ERR(rdev);
+ }
+ }
+
+ unsigned int core = rdev->num_cores;
+
+ dev_set_drvdata(&pdev->dev, rdev);
+
+ rdev->cores[core].rdev = rdev;
+ rdev->cores[core].dev = &pdev->dev;
+ rdev->cores[core].index = core;
+
+ rdev->num_cores++;
+
+ return rocket_core_init(&rdev->cores[core]);
+}
+
+static void rocket_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ for (unsigned int core = 0; core < rdev->num_cores; core++) {
+ if (rdev->cores[core].dev == dev) {
+ rocket_core_fini(&rdev->cores[core]);
+ rdev->num_cores--;
+ break;
+ }
+ }
+
+ if (rdev->num_cores == 0) {
+ /* Last core removed, deinitialize DRM device. */
+ rocket_device_fini(rdev);
+ rdev = NULL;
+ }
+}
+
+static const struct of_device_id dt_match[] = {
+ { .compatible = "rockchip,rk3588-rknn-core" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dt_match);
+
+static int find_core_for_dev(struct device *dev)
+{
+ struct rocket_device *rdev = dev_get_drvdata(dev);
+
+ for (unsigned int core = 0; core < rdev->num_cores; core++) {
+ if (dev == rdev->cores[core].dev)
+ return core;
+ }
+
+ return -1;
+}
+
+static int rocket_device_runtime_resume(struct device *dev)
+{
+ struct rocket_device *rdev = dev_get_drvdata(dev);
+ int core = find_core_for_dev(dev);
+ int err = 0;
+
+ if (core < 0)
+ return -ENODEV;
+
+ err = clk_bulk_prepare_enable(ARRAY_SIZE(rdev->cores[core].clks), rdev->cores[core].clks);
+ if (err) {
+ dev_err(dev, "failed to enable (%d) clocks for core %d\n", err, core);
+ return err;
+ }
+
+ return 0;
+}
+
+static int rocket_device_runtime_suspend(struct device *dev)
+{
+ struct rocket_device *rdev = dev_get_drvdata(dev);
+ int core = find_core_for_dev(dev);
+
+ if (core < 0)
+ return -ENODEV;
+
+ if (!rocket_job_is_idle(&rdev->cores[core]))
+ return -EBUSY;
+
+ clk_bulk_disable_unprepare(ARRAY_SIZE(rdev->cores[core].clks), rdev->cores[core].clks);
+
+ return 0;
+}
+
+EXPORT_GPL_DEV_PM_OPS(rocket_pm_ops) = {
+ RUNTIME_PM_OPS(rocket_device_runtime_suspend, rocket_device_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+};
+
+static struct platform_driver rocket_driver = {
+ .probe = rocket_probe,
+ .remove = rocket_remove,
+ .driver = {
+ .name = "rocket",
+ .pm = pm_ptr(&rocket_pm_ops),
+ .of_match_table = dt_match,
+ },
+};
+
+static int __init rocket_register(void)
+{
+ drm_dev = platform_device_register_simple("rknn", -1, NULL, 0);
+ if (IS_ERR(drm_dev))
+ return PTR_ERR(drm_dev);
+
+ return platform_driver_register(&rocket_driver);
+}
+
+static void __exit rocket_unregister(void)
+{
+ platform_driver_unregister(&rocket_driver);
+
+ platform_device_unregister(drm_dev);
+}
+
+module_init(rocket_register);
+module_exit(rocket_unregister);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("DRM driver for the Rockchip NPU IP");
+MODULE_AUTHOR("Tomeu Vizoso");
diff --git a/drivers/accel/rocket/rocket_drv.h b/drivers/accel/rocket/rocket_drv.h
new file mode 100644
index 000000000000..2c673bb99ccc
--- /dev/null
+++ b/drivers/accel/rocket/rocket_drv.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+
+#ifndef __ROCKET_DRV_H__
+#define __ROCKET_DRV_H__
+
+#include <drm/drm_mm.h>
+#include <drm/gpu_scheduler.h>
+
+#include "rocket_device.h"
+
+extern const struct dev_pm_ops rocket_pm_ops;
+
+struct rocket_iommu_domain {
+ struct iommu_domain *domain;
+ struct kref kref;
+};
+
+struct rocket_file_priv {
+ struct rocket_device *rdev;
+
+ struct rocket_iommu_domain *domain;
+ struct drm_mm mm;
+ struct mutex mm_lock;
+
+ struct drm_sched_entity sched_entity;
+};
+
+struct rocket_iommu_domain *rocket_iommu_domain_get(struct rocket_file_priv *rocket_priv);
+void rocket_iommu_domain_put(struct rocket_iommu_domain *domain);
+
+#endif
diff --git a/drivers/accel/rocket/rocket_gem.c b/drivers/accel/rocket/rocket_gem.c
new file mode 100644
index 000000000000..0551e11cc184
--- /dev/null
+++ b/drivers/accel/rocket/rocket_gem.c
@@ -0,0 +1,181 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+
+#include <drm/drm_device.h>
+#include <drm/drm_utils.h>
+#include <drm/rocket_accel.h>
+#include <linux/dma-mapping.h>
+#include <linux/iommu.h>
+
+#include "rocket_drv.h"
+#include "rocket_gem.h"
+
+static void rocket_gem_bo_free(struct drm_gem_object *obj)
+{
+ struct rocket_gem_object *bo = to_rocket_bo(obj);
+ struct rocket_file_priv *rocket_priv = bo->driver_priv;
+ size_t unmapped;
+
+ drm_WARN_ON(obj->dev, refcount_read(&bo->base.pages_use_count) > 1);
+
+ unmapped = iommu_unmap(bo->domain->domain, bo->mm.start, bo->size);
+ drm_WARN_ON(obj->dev, unmapped != bo->size);
+
+ mutex_lock(&rocket_priv->mm_lock);
+ drm_mm_remove_node(&bo->mm);
+ mutex_unlock(&rocket_priv->mm_lock);
+
+ rocket_iommu_domain_put(bo->domain);
+ bo->domain = NULL;
+
+ drm_gem_shmem_free(&bo->base);
+}
+
+static const struct drm_gem_object_funcs rocket_gem_funcs = {
+ .free = rocket_gem_bo_free,
+ .print_info = drm_gem_shmem_object_print_info,
+ .pin = drm_gem_shmem_object_pin,
+ .unpin = drm_gem_shmem_object_unpin,
+ .get_sg_table = drm_gem_shmem_object_get_sg_table,
+ .vmap = drm_gem_shmem_object_vmap,
+ .vunmap = drm_gem_shmem_object_vunmap,
+ .mmap = drm_gem_shmem_object_mmap,
+ .vm_ops = &drm_gem_shmem_vm_ops,
+};
+
+struct drm_gem_object *rocket_gem_create_object(struct drm_device *dev, size_t size)
+{
+ struct rocket_gem_object *obj;
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+
+ obj->base.base.funcs = &rocket_gem_funcs;
+
+ return &obj->base.base;
+}
+
+int rocket_ioctl_create_bo(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct rocket_file_priv *rocket_priv = file->driver_priv;
+ struct drm_rocket_create_bo *args = data;
+ struct drm_gem_shmem_object *shmem_obj;
+ struct rocket_gem_object *rkt_obj;
+ struct drm_gem_object *gem_obj;
+ struct sg_table *sgt;
+ int ret;
+
+ shmem_obj = drm_gem_shmem_create(dev, args->size);
+ if (IS_ERR(shmem_obj))
+ return PTR_ERR(shmem_obj);
+
+ gem_obj = &shmem_obj->base;
+ rkt_obj = to_rocket_bo(gem_obj);
+
+ rkt_obj->driver_priv = rocket_priv;
+ rkt_obj->domain = rocket_iommu_domain_get(rocket_priv);
+ rkt_obj->size = args->size;
+ rkt_obj->offset = 0;
+
+ ret = drm_gem_handle_create(file, gem_obj, &args->handle);
+ drm_gem_object_put(gem_obj);
+ if (ret)
+ goto err;
+
+ sgt = drm_gem_shmem_get_pages_sgt(shmem_obj);
+ if (IS_ERR(sgt)) {
+ ret = PTR_ERR(sgt);
+ goto err;
+ }
+
+ mutex_lock(&rocket_priv->mm_lock);
+ ret = drm_mm_insert_node_generic(&rocket_priv->mm, &rkt_obj->mm,
+ rkt_obj->size, PAGE_SIZE,
+ 0, 0);
+ mutex_unlock(&rocket_priv->mm_lock);
+
+ ret = iommu_map_sgtable(rocket_priv->domain->domain,
+ rkt_obj->mm.start,
+ shmem_obj->sgt,
+ IOMMU_READ | IOMMU_WRITE);
+ if (ret < 0 || ret < args->size) {
+ drm_err(dev, "failed to map buffer: size=%d request_size=%u\n",
+ ret, args->size);
+ ret = -ENOMEM;
+ goto err_remove_node;
+ }
+
+ /* iommu_map_sgtable might have aligned the size */
+ rkt_obj->size = ret;
+ args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
+ args->dma_address = rkt_obj->mm.start;
+
+ return 0;
+
+err_remove_node:
+ mutex_lock(&rocket_priv->mm_lock);
+ drm_mm_remove_node(&rkt_obj->mm);
+ mutex_unlock(&rocket_priv->mm_lock);
+
+err:
+ drm_gem_shmem_object_free(gem_obj);
+
+ return ret;
+}
+
+int rocket_ioctl_prep_bo(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct drm_rocket_prep_bo *args = data;
+ unsigned long timeout = drm_timeout_abs_to_jiffies(args->timeout_ns);
+ struct drm_gem_object *gem_obj;
+ struct drm_gem_shmem_object *shmem_obj;
+ long ret = 0;
+
+ if (args->reserved != 0) {
+ drm_dbg(dev, "Reserved field in drm_rocket_prep_bo struct should be 0.\n");
+ return -EINVAL;
+ }
+
+ gem_obj = drm_gem_object_lookup(file, args->handle);
+ if (!gem_obj)
+ return -ENOENT;
+
+ ret = dma_resv_wait_timeout(gem_obj->resv, DMA_RESV_USAGE_WRITE, true, timeout);
+ if (!ret)
+ ret = timeout ? -ETIMEDOUT : -EBUSY;
+
+ shmem_obj = &to_rocket_bo(gem_obj)->base;
+
+ dma_sync_sgtable_for_cpu(dev->dev, shmem_obj->sgt, DMA_BIDIRECTIONAL);
+
+ drm_gem_object_put(gem_obj);
+
+ return ret;
+}
+
+int rocket_ioctl_fini_bo(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct drm_rocket_fini_bo *args = data;
+ struct drm_gem_shmem_object *shmem_obj;
+ struct rocket_gem_object *rkt_obj;
+ struct drm_gem_object *gem_obj;
+
+ if (args->reserved != 0) {
+ drm_dbg(dev, "Reserved field in drm_rocket_fini_bo struct should be 0.\n");
+ return -EINVAL;
+ }
+
+ gem_obj = drm_gem_object_lookup(file, args->handle);
+ if (!gem_obj)
+ return -ENOENT;
+
+ rkt_obj = to_rocket_bo(gem_obj);
+ shmem_obj = &rkt_obj->base;
+
+ dma_sync_sgtable_for_device(dev->dev, shmem_obj->sgt, DMA_BIDIRECTIONAL);
+
+ drm_gem_object_put(gem_obj);
+
+ return 0;
+}
diff --git a/drivers/accel/rocket/rocket_gem.h b/drivers/accel/rocket/rocket_gem.h
new file mode 100644
index 000000000000..240430334509
--- /dev/null
+++ b/drivers/accel/rocket/rocket_gem.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+
+#ifndef __ROCKET_GEM_H__
+#define __ROCKET_GEM_H__
+
+#include <drm/drm_gem_shmem_helper.h>
+
+struct rocket_gem_object {
+ struct drm_gem_shmem_object base;
+
+ struct rocket_file_priv *driver_priv;
+
+ struct rocket_iommu_domain *domain;
+ struct drm_mm_node mm;
+ size_t size;
+ u32 offset;
+};
+
+struct drm_gem_object *rocket_gem_create_object(struct drm_device *dev, size_t size);
+
+int rocket_ioctl_create_bo(struct drm_device *dev, void *data, struct drm_file *file);
+
+int rocket_ioctl_prep_bo(struct drm_device *dev, void *data, struct drm_file *file);
+
+int rocket_ioctl_fini_bo(struct drm_device *dev, void *data, struct drm_file *file);
+
+static inline
+struct rocket_gem_object *to_rocket_bo(struct drm_gem_object *obj)
+{
+ return container_of(to_drm_gem_shmem_obj(obj), struct rocket_gem_object, base);
+}
+
+#endif
diff --git a/drivers/accel/rocket/rocket_job.c b/drivers/accel/rocket/rocket_job.c
new file mode 100644
index 000000000000..acd606160dc9
--- /dev/null
+++ b/drivers/accel/rocket/rocket_job.c
@@ -0,0 +1,637 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
+/* Copyright 2019 Collabora ltd. */
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+
+#include <drm/drm_print.h>
+#include <drm/drm_file.h>
+#include <drm/drm_gem.h>
+#include <drm/rocket_accel.h>
+#include <linux/interrupt.h>
+#include <linux/iommu.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include "rocket_core.h"
+#include "rocket_device.h"
+#include "rocket_drv.h"
+#include "rocket_job.h"
+#include "rocket_registers.h"
+
+#define JOB_TIMEOUT_MS 500
+
+static struct rocket_job *
+to_rocket_job(struct drm_sched_job *sched_job)
+{
+ return container_of(sched_job, struct rocket_job, base);
+}
+
+static const char *rocket_fence_get_driver_name(struct dma_fence *fence)
+{
+ return "rocket";
+}
+
+static const char *rocket_fence_get_timeline_name(struct dma_fence *fence)
+{
+ return "rockchip-npu";
+}
+
+static const struct dma_fence_ops rocket_fence_ops = {
+ .get_driver_name = rocket_fence_get_driver_name,
+ .get_timeline_name = rocket_fence_get_timeline_name,
+};
+
+static struct dma_fence *rocket_fence_create(struct rocket_core *core)
+{
+ struct dma_fence *fence;
+
+ fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+ if (!fence)
+ return ERR_PTR(-ENOMEM);
+
+ dma_fence_init(fence, &rocket_fence_ops, &core->fence_lock,
+ core->fence_context, ++core->emit_seqno);
+
+ return fence;
+}
+
+static int
+rocket_copy_tasks(struct drm_device *dev,
+ struct drm_file *file_priv,
+ struct drm_rocket_job *job,
+ struct rocket_job *rjob)
+{
+ int ret = 0;
+
+ if (job->task_struct_size < sizeof(struct drm_rocket_task))
+ return -EINVAL;
+
+ rjob->task_count = job->task_count;
+
+ if (!rjob->task_count)
+ return 0;
+
+ rjob->tasks = kvmalloc_array(job->task_count, sizeof(*rjob->tasks), GFP_KERNEL);
+ if (!rjob->tasks) {
+ drm_dbg(dev, "Failed to allocate task array\n");
+ return -ENOMEM;
+ }
+
+ for (int i = 0; i < rjob->task_count; i++) {
+ struct drm_rocket_task task = {0};
+
+ if (copy_from_user(&task,
+ u64_to_user_ptr(job->tasks) + i * job->task_struct_size,
+ sizeof(task))) {
+ drm_dbg(dev, "Failed to copy incoming tasks\n");
+ ret = -EFAULT;
+ goto fail;
+ }
+
+ if (task.regcmd_count == 0) {
+ drm_dbg(dev, "regcmd_count field in drm_rocket_task should be > 0.\n");
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ rjob->tasks[i].regcmd = task.regcmd;
+ rjob->tasks[i].regcmd_count = task.regcmd_count;
+ }
+
+ return 0;
+
+fail:
+ kvfree(rjob->tasks);
+ return ret;
+}
+
+static void rocket_job_hw_submit(struct rocket_core *core, struct rocket_job *job)
+{
+ struct rocket_task *task;
+ unsigned int extra_bit;
+
+ /* Don't queue the job if a reset is in progress */
+ if (atomic_read(&core->reset.pending))
+ return;
+
+ /* GO ! */
+
+ task = &job->tasks[job->next_task_idx];
+ job->next_task_idx++;
+
+ rocket_pc_writel(core, BASE_ADDRESS, 0x1);
+
+ /* From rknpu, in the TRM this bit is marked as reserved */
+ extra_bit = 0x10000000 * core->index;
+ rocket_cna_writel(core, S_POINTER, CNA_S_POINTER_POINTER_PP_EN(1) |
+ CNA_S_POINTER_EXECUTER_PP_EN(1) |
+ CNA_S_POINTER_POINTER_PP_MODE(1) |
+ extra_bit);
+
+ rocket_core_writel(core, S_POINTER, CORE_S_POINTER_POINTER_PP_EN(1) |
+ CORE_S_POINTER_EXECUTER_PP_EN(1) |
+ CORE_S_POINTER_POINTER_PP_MODE(1) |
+ extra_bit);
+
+ rocket_pc_writel(core, BASE_ADDRESS, task->regcmd);
+ rocket_pc_writel(core, REGISTER_AMOUNTS,
+ PC_REGISTER_AMOUNTS_PC_DATA_AMOUNT((task->regcmd_count + 1) / 2 - 1));
+
+ rocket_pc_writel(core, INTERRUPT_MASK, PC_INTERRUPT_MASK_DPU_0 | PC_INTERRUPT_MASK_DPU_1);
+ rocket_pc_writel(core, INTERRUPT_CLEAR, PC_INTERRUPT_CLEAR_DPU_0 | PC_INTERRUPT_CLEAR_DPU_1);
+
+ rocket_pc_writel(core, TASK_CON, PC_TASK_CON_RESERVED_0(1) |
+ PC_TASK_CON_TASK_COUNT_CLEAR(1) |
+ PC_TASK_CON_TASK_NUMBER(1) |
+ PC_TASK_CON_TASK_PP_EN(1));
+
+ rocket_pc_writel(core, TASK_DMA_BASE_ADDR, PC_TASK_DMA_BASE_ADDR_DMA_BASE_ADDR(0x0));
+
+ rocket_pc_writel(core, OPERATION_ENABLE, PC_OPERATION_ENABLE_OP_EN(1));
+
+ dev_dbg(core->dev, "Submitted regcmd at 0x%llx to core %d", task->regcmd, core->index);
+}
+
+static int rocket_acquire_object_fences(struct drm_gem_object **bos,
+ int bo_count,
+ struct drm_sched_job *job,
+ bool is_write)
+{
+ int i, ret;
+
+ for (i = 0; i < bo_count; i++) {
+ ret = dma_resv_reserve_fences(bos[i]->resv, 1);
+ if (ret)
+ return ret;
+
+ ret = drm_sched_job_add_implicit_dependencies(job, bos[i],
+ is_write);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void rocket_attach_object_fences(struct drm_gem_object **bos,
+ int bo_count,
+ struct dma_fence *fence)
+{
+ int i;
+
+ for (i = 0; i < bo_count; i++)
+ dma_resv_add_fence(bos[i]->resv, fence, DMA_RESV_USAGE_WRITE);
+}
+
+static int rocket_job_push(struct rocket_job *job)
+{
+ struct rocket_device *rdev = job->rdev;
+ struct drm_gem_object **bos;
+ struct ww_acquire_ctx acquire_ctx;
+ int ret = 0;
+
+ bos = kvmalloc_array(job->in_bo_count + job->out_bo_count, sizeof(void *),
+ GFP_KERNEL);
+ memcpy(bos, job->in_bos, job->in_bo_count * sizeof(void *));
+ memcpy(&bos[job->in_bo_count], job->out_bos, job->out_bo_count * sizeof(void *));
+
+ ret = drm_gem_lock_reservations(bos, job->in_bo_count + job->out_bo_count, &acquire_ctx);
+ if (ret)
+ goto err;
+
+ scoped_guard(mutex, &rdev->sched_lock) {
+ drm_sched_job_arm(&job->base);
+
+ job->inference_done_fence = dma_fence_get(&job->base.s_fence->finished);
+
+ ret = rocket_acquire_object_fences(job->in_bos, job->in_bo_count, &job->base, false);
+ if (ret)
+ goto err_unlock;
+
+ ret = rocket_acquire_object_fences(job->out_bos, job->out_bo_count, &job->base, true);
+ if (ret)
+ goto err_unlock;
+
+ kref_get(&job->refcount); /* put by scheduler job completion */
+
+ drm_sched_entity_push_job(&job->base);
+ }
+
+ rocket_attach_object_fences(job->out_bos, job->out_bo_count, job->inference_done_fence);
+
+err_unlock:
+ drm_gem_unlock_reservations(bos, job->in_bo_count + job->out_bo_count, &acquire_ctx);
+err:
+ kvfree(bos);
+
+ return ret;
+}
+
+static void rocket_job_cleanup(struct kref *ref)
+{
+ struct rocket_job *job = container_of(ref, struct rocket_job,
+ refcount);
+ unsigned int i;
+
+ rocket_iommu_domain_put(job->domain);
+
+ dma_fence_put(job->done_fence);
+ dma_fence_put(job->inference_done_fence);
+
+ if (job->in_bos) {
+ for (i = 0; i < job->in_bo_count; i++)
+ drm_gem_object_put(job->in_bos[i]);
+
+ kvfree(job->in_bos);
+ }
+
+ if (job->out_bos) {
+ for (i = 0; i < job->out_bo_count; i++)
+ drm_gem_object_put(job->out_bos[i]);
+
+ kvfree(job->out_bos);
+ }
+
+ kvfree(job->tasks);
+
+ kfree(job);
+}
+
+static void rocket_job_put(struct rocket_job *job)
+{
+ kref_put(&job->refcount, rocket_job_cleanup);
+}
+
+static void rocket_job_free(struct drm_sched_job *sched_job)
+{
+ struct rocket_job *job = to_rocket_job(sched_job);
+
+ drm_sched_job_cleanup(sched_job);
+
+ rocket_job_put(job);
+}
+
+static struct rocket_core *sched_to_core(struct rocket_device *rdev,
+ struct drm_gpu_scheduler *sched)
+{
+ unsigned int core;
+
+ for (core = 0; core < rdev->num_cores; core++) {
+ if (&rdev->cores[core].sched == sched)
+ return &rdev->cores[core];
+ }
+
+ return NULL;
+}
+
+static struct dma_fence *rocket_job_run(struct drm_sched_job *sched_job)
+{
+ struct rocket_job *job = to_rocket_job(sched_job);
+ struct rocket_device *rdev = job->rdev;
+ struct rocket_core *core = sched_to_core(rdev, sched_job->sched);
+ struct dma_fence *fence = NULL;
+ int ret;
+
+ if (unlikely(job->base.s_fence->finished.error))
+ return NULL;
+
+ /*
+ * Nothing to execute: can happen if the job has finished while
+ * we were resetting the NPU.
+ */
+ if (job->next_task_idx == job->task_count)
+ return NULL;
+
+ fence = rocket_fence_create(core);
+ if (IS_ERR(fence))
+ return fence;
+
+ if (job->done_fence)
+ dma_fence_put(job->done_fence);
+ job->done_fence = dma_fence_get(fence);
+
+ ret = pm_runtime_get_sync(core->dev);
+ if (ret < 0)
+ return fence;
+
+ ret = iommu_attach_group(job->domain->domain, core->iommu_group);
+ if (ret < 0)
+ return fence;
+
+ scoped_guard(mutex, &core->job_lock) {
+ core->in_flight_job = job;
+ rocket_job_hw_submit(core, job);
+ }
+
+ return fence;
+}
+
+static void rocket_job_handle_irq(struct rocket_core *core)
+{
+ pm_runtime_mark_last_busy(core->dev);
+
+ rocket_pc_writel(core, OPERATION_ENABLE, 0x0);
+ rocket_pc_writel(core, INTERRUPT_CLEAR, 0x1ffff);
+
+ scoped_guard(mutex, &core->job_lock)
+ if (core->in_flight_job) {
+ if (core->in_flight_job->next_task_idx < core->in_flight_job->task_count) {
+ rocket_job_hw_submit(core, core->in_flight_job);
+ return;
+ }
+
+ iommu_detach_group(NULL, iommu_group_get(core->dev));
+ dma_fence_signal(core->in_flight_job->done_fence);
+ pm_runtime_put_autosuspend(core->dev);
+ core->in_flight_job = NULL;
+ }
+}
+
+static void
+rocket_reset(struct rocket_core *core, struct drm_sched_job *bad)
+{
+ if (!atomic_read(&core->reset.pending))
+ return;
+
+ drm_sched_stop(&core->sched, bad);
+
+ /*
+ * Remaining interrupts have been handled, but we might still have
+ * stuck jobs. Let's make sure the PM counters stay balanced by
+ * manually calling pm_runtime_put_noidle().
+ */
+ scoped_guard(mutex, &core->job_lock) {
+ if (core->in_flight_job)
+ pm_runtime_put_noidle(core->dev);
+
+ iommu_detach_group(NULL, core->iommu_group);
+
+ core->in_flight_job = NULL;
+ }
+
+ /* Proceed with reset now. */
+ rocket_core_reset(core);
+
+ /* NPU has been reset, we can clear the reset pending bit. */
+ atomic_set(&core->reset.pending, 0);
+
+ /* Restart the scheduler */
+ drm_sched_start(&core->sched, 0);
+}
+
+static enum drm_gpu_sched_stat rocket_job_timedout(struct drm_sched_job *sched_job)
+{
+ struct rocket_job *job = to_rocket_job(sched_job);
+ struct rocket_device *rdev = job->rdev;
+ struct rocket_core *core = sched_to_core(rdev, sched_job->sched);
+
+ dev_err(core->dev, "NPU job timed out");
+
+ atomic_set(&core->reset.pending, 1);
+ rocket_reset(core, sched_job);
+
+ return DRM_GPU_SCHED_STAT_RESET;
+}
+
+static void rocket_reset_work(struct work_struct *work)
+{
+ struct rocket_core *core;
+
+ core = container_of(work, struct rocket_core, reset.work);
+ rocket_reset(core, NULL);
+}
+
+static const struct drm_sched_backend_ops rocket_sched_ops = {
+ .run_job = rocket_job_run,
+ .timedout_job = rocket_job_timedout,
+ .free_job = rocket_job_free
+};
+
+static irqreturn_t rocket_job_irq_handler_thread(int irq, void *data)
+{
+ struct rocket_core *core = data;
+
+ rocket_job_handle_irq(core);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rocket_job_irq_handler(int irq, void *data)
+{
+ struct rocket_core *core = data;
+ u32 raw_status = rocket_pc_readl(core, INTERRUPT_RAW_STATUS);
+
+ WARN_ON(raw_status & PC_INTERRUPT_RAW_STATUS_DMA_READ_ERROR);
+ WARN_ON(raw_status & PC_INTERRUPT_RAW_STATUS_DMA_WRITE_ERROR);
+
+ if (!(raw_status & PC_INTERRUPT_RAW_STATUS_DPU_0 ||
+ raw_status & PC_INTERRUPT_RAW_STATUS_DPU_1))
+ return IRQ_NONE;
+
+ rocket_pc_writel(core, INTERRUPT_MASK, 0x0);
+
+ return IRQ_WAKE_THREAD;
+}
+
+int rocket_job_init(struct rocket_core *core)
+{
+ struct drm_sched_init_args args = {
+ .ops = &rocket_sched_ops,
+ .num_rqs = DRM_SCHED_PRIORITY_COUNT,
+ .credit_limit = 1,
+ .timeout = msecs_to_jiffies(JOB_TIMEOUT_MS),
+ .name = dev_name(core->dev),
+ .dev = core->dev,
+ };
+ int ret;
+
+ INIT_WORK(&core->reset.work, rocket_reset_work);
+ spin_lock_init(&core->fence_lock);
+ mutex_init(&core->job_lock);
+
+ core->irq = platform_get_irq(to_platform_device(core->dev), 0);
+ if (core->irq < 0)
+ return core->irq;
+
+ ret = devm_request_threaded_irq(core->dev, core->irq,
+ rocket_job_irq_handler,
+ rocket_job_irq_handler_thread,
+ IRQF_SHARED, dev_name(core->dev),
+ core);
+ if (ret) {
+ dev_err(core->dev, "failed to request job irq");
+ return ret;
+ }
+
+ core->reset.wq = alloc_ordered_workqueue("rocket-reset-%d", 0, core->index);
+ if (!core->reset.wq)
+ return -ENOMEM;
+
+ core->fence_context = dma_fence_context_alloc(1);
+
+ args.timeout_wq = core->reset.wq;
+ ret = drm_sched_init(&core->sched, &args);
+ if (ret) {
+ dev_err(core->dev, "Failed to create scheduler: %d.", ret);
+ goto err_sched;
+ }
+
+ return 0;
+
+err_sched:
+ drm_sched_fini(&core->sched);
+
+ destroy_workqueue(core->reset.wq);
+ return ret;
+}
+
+void rocket_job_fini(struct rocket_core *core)
+{
+ drm_sched_fini(&core->sched);
+
+ cancel_work_sync(&core->reset.work);
+ destroy_workqueue(core->reset.wq);
+}
+
+int rocket_job_open(struct rocket_file_priv *rocket_priv)
+{
+ struct rocket_device *rdev = rocket_priv->rdev;
+ struct drm_gpu_scheduler **scheds = kmalloc_array(rdev->num_cores,
+ sizeof(*scheds),
+ GFP_KERNEL);
+ unsigned int core;
+ int ret;
+
+ for (core = 0; core < rdev->num_cores; core++)
+ scheds[core] = &rdev->cores[core].sched;
+
+ ret = drm_sched_entity_init(&rocket_priv->sched_entity,
+ DRM_SCHED_PRIORITY_NORMAL,
+ scheds,
+ rdev->num_cores, NULL);
+ if (WARN_ON(ret))
+ return ret;
+
+ return 0;
+}
+
+void rocket_job_close(struct rocket_file_priv *rocket_priv)
+{
+ struct drm_sched_entity *entity = &rocket_priv->sched_entity;
+
+ kfree(entity->sched_list);
+ drm_sched_entity_destroy(entity);
+}
+
+int rocket_job_is_idle(struct rocket_core *core)
+{
+ /* If there are any jobs in this HW queue, we're not idle */
+ if (atomic_read(&core->sched.credit_count))
+ return false;
+
+ return true;
+}
+
+static int rocket_ioctl_submit_job(struct drm_device *dev, struct drm_file *file,
+ struct drm_rocket_job *job)
+{
+ struct rocket_device *rdev = to_rocket_device(dev);
+ struct rocket_file_priv *file_priv = file->driver_priv;
+ struct rocket_job *rjob = NULL;
+ int ret = 0;
+
+ if (job->task_count == 0)
+ return -EINVAL;
+
+ rjob = kzalloc(sizeof(*rjob), GFP_KERNEL);
+ if (!rjob)
+ return -ENOMEM;
+
+ kref_init(&rjob->refcount);
+
+ rjob->rdev = rdev;
+
+ ret = drm_sched_job_init(&rjob->base,
+ &file_priv->sched_entity,
+ 1, NULL, file->client_id);
+ if (ret)
+ goto out_put_job;
+
+ ret = rocket_copy_tasks(dev, file, job, rjob);
+ if (ret)
+ goto out_cleanup_job;
+
+ ret = drm_gem_objects_lookup(file, u64_to_user_ptr(job->in_bo_handles),
+ job->in_bo_handle_count, &rjob->in_bos);
+ if (ret)
+ goto out_cleanup_job;
+
+ rjob->in_bo_count = job->in_bo_handle_count;
+
+ ret = drm_gem_objects_lookup(file, u64_to_user_ptr(job->out_bo_handles),
+ job->out_bo_handle_count, &rjob->out_bos);
+ if (ret)
+ goto out_cleanup_job;
+
+ rjob->out_bo_count = job->out_bo_handle_count;
+
+ rjob->domain = rocket_iommu_domain_get(file_priv);
+
+ ret = rocket_job_push(rjob);
+ if (ret)
+ goto out_cleanup_job;
+
+out_cleanup_job:
+ if (ret)
+ drm_sched_job_cleanup(&rjob->base);
+out_put_job:
+ rocket_job_put(rjob);
+
+ return ret;
+}
+
+int rocket_ioctl_submit(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct drm_rocket_submit *args = data;
+ struct drm_rocket_job *jobs;
+ int ret = 0;
+ unsigned int i = 0;
+
+ if (args->job_count == 0)
+ return 0;
+
+ if (args->job_struct_size < sizeof(struct drm_rocket_job)) {
+ drm_dbg(dev, "job_struct_size field in drm_rocket_submit struct is too small.\n");
+ return -EINVAL;
+ }
+
+ if (args->reserved != 0) {
+ drm_dbg(dev, "Reserved field in drm_rocket_submit struct should be 0.\n");
+ return -EINVAL;
+ }
+
+ jobs = kvmalloc_array(args->job_count, sizeof(*jobs), GFP_KERNEL);
+ if (!jobs) {
+ drm_dbg(dev, "Failed to allocate incoming job array\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < args->job_count; i++) {
+ if (copy_from_user(&jobs[i],
+ u64_to_user_ptr(args->jobs) + i * args->job_struct_size,
+ sizeof(*jobs))) {
+ ret = -EFAULT;
+ drm_dbg(dev, "Failed to copy incoming job array\n");
+ goto exit;
+ }
+ }
+
+
+ for (i = 0; i < args->job_count; i++)
+ rocket_ioctl_submit_job(dev, file, &jobs[i]);
+
+exit:
+ kvfree(jobs);
+
+ return ret;
+}
diff --git a/drivers/accel/rocket/rocket_job.h b/drivers/accel/rocket/rocket_job.h
new file mode 100644
index 000000000000..4ae00feec3b9
--- /dev/null
+++ b/drivers/accel/rocket/rocket_job.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+
+#ifndef __ROCKET_JOB_H__
+#define __ROCKET_JOB_H__
+
+#include <drm/drm_drv.h>
+#include <drm/gpu_scheduler.h>
+
+#include "rocket_core.h"
+#include "rocket_drv.h"
+
+struct rocket_task {
+ u64 regcmd;
+ u32 regcmd_count;
+};
+
+struct rocket_job {
+ struct drm_sched_job base;
+
+ struct rocket_device *rdev;
+
+ struct drm_gem_object **in_bos;
+ struct drm_gem_object **out_bos;
+
+ u32 in_bo_count;
+ u32 out_bo_count;
+
+ struct rocket_task *tasks;
+ u32 task_count;
+ u32 next_task_idx;
+
+ /* Fence to be signaled by drm-sched once its done with the job */
+ struct dma_fence *inference_done_fence;
+
+ /* Fence to be signaled by IRQ handler when the job is complete. */
+ struct dma_fence *done_fence;
+
+ struct rocket_iommu_domain *domain;
+
+ struct kref refcount;
+};
+
+int rocket_ioctl_submit(struct drm_device *dev, void *data, struct drm_file *file);
+
+int rocket_job_init(struct rocket_core *core);
+void rocket_job_fini(struct rocket_core *core);
+int rocket_job_open(struct rocket_file_priv *rocket_priv);
+void rocket_job_close(struct rocket_file_priv *rocket_priv);
+int rocket_job_is_idle(struct rocket_core *core);
+
+#endif
diff --git a/drivers/accel/rocket/rocket_registers.h b/drivers/accel/rocket/rocket_registers.h
new file mode 100644
index 000000000000..9aef614c3470
--- /dev/null
+++ b/drivers/accel/rocket/rocket_registers.h
@@ -0,0 +1,4404 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+
+#ifndef __ROCKET_REGISTERS_XML__
+#define __ROCKET_REGISTERS_XML__
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng gen_header.py tool in this git repository:
+http://gitlab.freedesktop.org/mesa/mesa/
+git clone https://gitlab.freedesktop.org/mesa/mesa.git
+
+The rules-ng-ng source files this header was generated from are:
+
+- /home/tomeu/src/mesa/src/gallium/drivers/rocket/registers.xml ( 60076 bytes, from Wed Jun 12 10:02:25 2024)
+
+Copyright (C) 2024-2025 by the following authors:
+- Tomeu Vizoso <tomeu@tomeuvizoso.net>
+*/
+
+#define REG_PC_VERSION 0x00000000
+#define PC_VERSION_VERSION__MASK 0xffffffff
+#define PC_VERSION_VERSION__SHIFT 0
+static inline uint32_t PC_VERSION_VERSION(uint32_t val)
+{
+ return ((val) << PC_VERSION_VERSION__SHIFT) & PC_VERSION_VERSION__MASK;
+}
+
+#define REG_PC_VERSION_NUM 0x00000004
+#define PC_VERSION_NUM_VERSION_NUM__MASK 0xffffffff
+#define PC_VERSION_NUM_VERSION_NUM__SHIFT 0
+static inline uint32_t PC_VERSION_NUM_VERSION_NUM(uint32_t val)
+{
+ return ((val) << PC_VERSION_NUM_VERSION_NUM__SHIFT) & PC_VERSION_NUM_VERSION_NUM__MASK;
+}
+
+#define REG_PC_OPERATION_ENABLE 0x00000008
+#define PC_OPERATION_ENABLE_RESERVED_0__MASK 0xfffffffe
+#define PC_OPERATION_ENABLE_RESERVED_0__SHIFT 1
+static inline uint32_t PC_OPERATION_ENABLE_RESERVED_0(uint32_t val)
+{
+ return ((val) << PC_OPERATION_ENABLE_RESERVED_0__SHIFT) & PC_OPERATION_ENABLE_RESERVED_0__MASK;
+}
+#define PC_OPERATION_ENABLE_OP_EN__MASK 0x00000001
+#define PC_OPERATION_ENABLE_OP_EN__SHIFT 0
+static inline uint32_t PC_OPERATION_ENABLE_OP_EN(uint32_t val)
+{
+ return ((val) << PC_OPERATION_ENABLE_OP_EN__SHIFT) & PC_OPERATION_ENABLE_OP_EN__MASK;
+}
+
+#define REG_PC_BASE_ADDRESS 0x00000010
+#define PC_BASE_ADDRESS_PC_SOURCE_ADDR__MASK 0xfffffff0
+#define PC_BASE_ADDRESS_PC_SOURCE_ADDR__SHIFT 4
+static inline uint32_t PC_BASE_ADDRESS_PC_SOURCE_ADDR(uint32_t val)
+{
+ return ((val) << PC_BASE_ADDRESS_PC_SOURCE_ADDR__SHIFT) & PC_BASE_ADDRESS_PC_SOURCE_ADDR__MASK;
+}
+#define PC_BASE_ADDRESS_RESERVED_0__MASK 0x0000000e
+#define PC_BASE_ADDRESS_RESERVED_0__SHIFT 1
+static inline uint32_t PC_BASE_ADDRESS_RESERVED_0(uint32_t val)
+{
+ return ((val) << PC_BASE_ADDRESS_RESERVED_0__SHIFT) & PC_BASE_ADDRESS_RESERVED_0__MASK;
+}
+#define PC_BASE_ADDRESS_PC_SEL__MASK 0x00000001
+#define PC_BASE_ADDRESS_PC_SEL__SHIFT 0
+static inline uint32_t PC_BASE_ADDRESS_PC_SEL(uint32_t val)
+{
+ return ((val) << PC_BASE_ADDRESS_PC_SEL__SHIFT) & PC_BASE_ADDRESS_PC_SEL__MASK;
+}
+
+#define REG_PC_REGISTER_AMOUNTS 0x00000014
+#define PC_REGISTER_AMOUNTS_RESERVED_0__MASK 0xffff0000
+#define PC_REGISTER_AMOUNTS_RESERVED_0__SHIFT 16
+static inline uint32_t PC_REGISTER_AMOUNTS_RESERVED_0(uint32_t val)
+{
+ return ((val) << PC_REGISTER_AMOUNTS_RESERVED_0__SHIFT) & PC_REGISTER_AMOUNTS_RESERVED_0__MASK;
+}
+#define PC_REGISTER_AMOUNTS_PC_DATA_AMOUNT__MASK 0x0000ffff
+#define PC_REGISTER_AMOUNTS_PC_DATA_AMOUNT__SHIFT 0
+static inline uint32_t PC_REGISTER_AMOUNTS_PC_DATA_AMOUNT(uint32_t val)
+{
+ return ((val) << PC_REGISTER_AMOUNTS_PC_DATA_AMOUNT__SHIFT) & PC_REGISTER_AMOUNTS_PC_DATA_AMOUNT__MASK;
+}
+
+#define REG_PC_INTERRUPT_MASK 0x00000020
+#define PC_INTERRUPT_MASK_RESERVED_0__MASK 0xffffc000
+#define PC_INTERRUPT_MASK_RESERVED_0__SHIFT 14
+static inline uint32_t PC_INTERRUPT_MASK_RESERVED_0(uint32_t val)
+{
+ return ((val) << PC_INTERRUPT_MASK_RESERVED_0__SHIFT) & PC_INTERRUPT_MASK_RESERVED_0__MASK;
+}
+#define PC_INTERRUPT_MASK_DMA_WRITE_ERROR 0x00002000
+#define PC_INTERRUPT_MASK_DMA_READ_ERROR 0x00001000
+#define PC_INTERRUPT_MASK_PPU_1 0x00000800
+#define PC_INTERRUPT_MASK_PPU_0 0x00000400
+#define PC_INTERRUPT_MASK_DPU_1 0x00000200
+#define PC_INTERRUPT_MASK_DPU_0 0x00000100
+#define PC_INTERRUPT_MASK_CORE_1 0x00000080
+#define PC_INTERRUPT_MASK_CORE_0 0x00000040
+#define PC_INTERRUPT_MASK_CNA_CSC_1 0x00000020
+#define PC_INTERRUPT_MASK_CNA_CSC_0 0x00000010
+#define PC_INTERRUPT_MASK_CNA_WEIGHT_1 0x00000008
+#define PC_INTERRUPT_MASK_CNA_WEIGHT_0 0x00000004
+#define PC_INTERRUPT_MASK_CNA_FEATURE_1 0x00000002
+#define PC_INTERRUPT_MASK_CNA_FEATURE_0 0x00000001
+
+#define REG_PC_INTERRUPT_CLEAR 0x00000024
+#define PC_INTERRUPT_CLEAR_RESERVED_0__MASK 0xffffc000
+#define PC_INTERRUPT_CLEAR_RESERVED_0__SHIFT 14
+static inline uint32_t PC_INTERRUPT_CLEAR_RESERVED_0(uint32_t val)
+{
+ return ((val) << PC_INTERRUPT_CLEAR_RESERVED_0__SHIFT) & PC_INTERRUPT_CLEAR_RESERVED_0__MASK;
+}
+#define PC_INTERRUPT_CLEAR_DMA_WRITE_ERROR 0x00002000
+#define PC_INTERRUPT_CLEAR_DMA_READ_ERROR 0x00001000
+#define PC_INTERRUPT_CLEAR_PPU_1 0x00000800
+#define PC_INTERRUPT_CLEAR_PPU_0 0x00000400
+#define PC_INTERRUPT_CLEAR_DPU_1 0x00000200
+#define PC_INTERRUPT_CLEAR_DPU_0 0x00000100
+#define PC_INTERRUPT_CLEAR_CORE_1 0x00000080
+#define PC_INTERRUPT_CLEAR_CORE_0 0x00000040
+#define PC_INTERRUPT_CLEAR_CNA_CSC_1 0x00000020
+#define PC_INTERRUPT_CLEAR_CNA_CSC_0 0x00000010
+#define PC_INTERRUPT_CLEAR_CNA_WEIGHT_1 0x00000008
+#define PC_INTERRUPT_CLEAR_CNA_WEIGHT_0 0x00000004
+#define PC_INTERRUPT_CLEAR_CNA_FEATURE_1 0x00000002
+#define PC_INTERRUPT_CLEAR_CNA_FEATURE_0 0x00000001
+
+#define REG_PC_INTERRUPT_STATUS 0x00000028
+#define PC_INTERRUPT_STATUS_RESERVED_0__MASK 0xffffc000
+#define PC_INTERRUPT_STATUS_RESERVED_0__SHIFT 14
+static inline uint32_t PC_INTERRUPT_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << PC_INTERRUPT_STATUS_RESERVED_0__SHIFT) & PC_INTERRUPT_STATUS_RESERVED_0__MASK;
+}
+#define PC_INTERRUPT_STATUS_DMA_WRITE_ERROR 0x00002000
+#define PC_INTERRUPT_STATUS_DMA_READ_ERROR 0x00001000
+#define PC_INTERRUPT_STATUS_PPU_1 0x00000800
+#define PC_INTERRUPT_STATUS_PPU_0 0x00000400
+#define PC_INTERRUPT_STATUS_DPU_1 0x00000200
+#define PC_INTERRUPT_STATUS_DPU_0 0x00000100
+#define PC_INTERRUPT_STATUS_CORE_1 0x00000080
+#define PC_INTERRUPT_STATUS_CORE_0 0x00000040
+#define PC_INTERRUPT_STATUS_CNA_CSC_1 0x00000020
+#define PC_INTERRUPT_STATUS_CNA_CSC_0 0x00000010
+#define PC_INTERRUPT_STATUS_CNA_WEIGHT_1 0x00000008
+#define PC_INTERRUPT_STATUS_CNA_WEIGHT_0 0x00000004
+#define PC_INTERRUPT_STATUS_CNA_FEATURE_1 0x00000002
+#define PC_INTERRUPT_STATUS_CNA_FEATURE_0 0x00000001
+
+#define REG_PC_INTERRUPT_RAW_STATUS 0x0000002c
+#define PC_INTERRUPT_RAW_STATUS_RESERVED_0__MASK 0xffffc000
+#define PC_INTERRUPT_RAW_STATUS_RESERVED_0__SHIFT 14
+static inline uint32_t PC_INTERRUPT_RAW_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << PC_INTERRUPT_RAW_STATUS_RESERVED_0__SHIFT) & PC_INTERRUPT_RAW_STATUS_RESERVED_0__MASK;
+}
+#define PC_INTERRUPT_RAW_STATUS_DMA_WRITE_ERROR 0x00002000
+#define PC_INTERRUPT_RAW_STATUS_DMA_READ_ERROR 0x00001000
+#define PC_INTERRUPT_RAW_STATUS_PPU_1 0x00000800
+#define PC_INTERRUPT_RAW_STATUS_PPU_0 0x00000400
+#define PC_INTERRUPT_RAW_STATUS_DPU_1 0x00000200
+#define PC_INTERRUPT_RAW_STATUS_DPU_0 0x00000100
+#define PC_INTERRUPT_RAW_STATUS_CORE_1 0x00000080
+#define PC_INTERRUPT_RAW_STATUS_CORE_0 0x00000040
+#define PC_INTERRUPT_RAW_STATUS_CNA_CSC_1 0x00000020
+#define PC_INTERRUPT_RAW_STATUS_CNA_CSC_0 0x00000010
+#define PC_INTERRUPT_RAW_STATUS_CNA_WEIGHT_1 0x00000008
+#define PC_INTERRUPT_RAW_STATUS_CNA_WEIGHT_0 0x00000004
+#define PC_INTERRUPT_RAW_STATUS_CNA_FEATURE_1 0x00000002
+#define PC_INTERRUPT_RAW_STATUS_CNA_FEATURE_0 0x00000001
+
+#define REG_PC_TASK_CON 0x00000030
+#define PC_TASK_CON_RESERVED_0__MASK 0xffffc000
+#define PC_TASK_CON_RESERVED_0__SHIFT 14
+static inline uint32_t PC_TASK_CON_RESERVED_0(uint32_t val)
+{
+ return ((val) << PC_TASK_CON_RESERVED_0__SHIFT) & PC_TASK_CON_RESERVED_0__MASK;
+}
+#define PC_TASK_CON_TASK_COUNT_CLEAR__MASK 0x00002000
+#define PC_TASK_CON_TASK_COUNT_CLEAR__SHIFT 13
+static inline uint32_t PC_TASK_CON_TASK_COUNT_CLEAR(uint32_t val)
+{
+ return ((val) << PC_TASK_CON_TASK_COUNT_CLEAR__SHIFT) & PC_TASK_CON_TASK_COUNT_CLEAR__MASK;
+}
+#define PC_TASK_CON_TASK_PP_EN__MASK 0x00001000
+#define PC_TASK_CON_TASK_PP_EN__SHIFT 12
+static inline uint32_t PC_TASK_CON_TASK_PP_EN(uint32_t val)
+{
+ return ((val) << PC_TASK_CON_TASK_PP_EN__SHIFT) & PC_TASK_CON_TASK_PP_EN__MASK;
+}
+#define PC_TASK_CON_TASK_NUMBER__MASK 0x00000fff
+#define PC_TASK_CON_TASK_NUMBER__SHIFT 0
+static inline uint32_t PC_TASK_CON_TASK_NUMBER(uint32_t val)
+{
+ return ((val) << PC_TASK_CON_TASK_NUMBER__SHIFT) & PC_TASK_CON_TASK_NUMBER__MASK;
+}
+
+#define REG_PC_TASK_DMA_BASE_ADDR 0x00000034
+#define PC_TASK_DMA_BASE_ADDR_DMA_BASE_ADDR__MASK 0xfffffff0
+#define PC_TASK_DMA_BASE_ADDR_DMA_BASE_ADDR__SHIFT 4
+static inline uint32_t PC_TASK_DMA_BASE_ADDR_DMA_BASE_ADDR(uint32_t val)
+{
+ return ((val) << PC_TASK_DMA_BASE_ADDR_DMA_BASE_ADDR__SHIFT) & PC_TASK_DMA_BASE_ADDR_DMA_BASE_ADDR__MASK;
+}
+#define PC_TASK_DMA_BASE_ADDR_RESERVED_0__MASK 0x0000000f
+#define PC_TASK_DMA_BASE_ADDR_RESERVED_0__SHIFT 0
+static inline uint32_t PC_TASK_DMA_BASE_ADDR_RESERVED_0(uint32_t val)
+{
+ return ((val) << PC_TASK_DMA_BASE_ADDR_RESERVED_0__SHIFT) & PC_TASK_DMA_BASE_ADDR_RESERVED_0__MASK;
+}
+
+#define REG_PC_TASK_STATUS 0x0000003c
+#define PC_TASK_STATUS_RESERVED_0__MASK 0xf0000000
+#define PC_TASK_STATUS_RESERVED_0__SHIFT 28
+static inline uint32_t PC_TASK_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << PC_TASK_STATUS_RESERVED_0__SHIFT) & PC_TASK_STATUS_RESERVED_0__MASK;
+}
+#define PC_TASK_STATUS_TASK_STATUS__MASK 0x0fffffff
+#define PC_TASK_STATUS_TASK_STATUS__SHIFT 0
+static inline uint32_t PC_TASK_STATUS_TASK_STATUS(uint32_t val)
+{
+ return ((val) << PC_TASK_STATUS_TASK_STATUS__SHIFT) & PC_TASK_STATUS_TASK_STATUS__MASK;
+}
+
+#define REG_CNA_S_STATUS 0x00001000
+#define CNA_S_STATUS_RESERVED_0__MASK 0xfffc0000
+#define CNA_S_STATUS_RESERVED_0__SHIFT 18
+static inline uint32_t CNA_S_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_S_STATUS_RESERVED_0__SHIFT) & CNA_S_STATUS_RESERVED_0__MASK;
+}
+#define CNA_S_STATUS_STATUS_1__MASK 0x00030000
+#define CNA_S_STATUS_STATUS_1__SHIFT 16
+static inline uint32_t CNA_S_STATUS_STATUS_1(uint32_t val)
+{
+ return ((val) << CNA_S_STATUS_STATUS_1__SHIFT) & CNA_S_STATUS_STATUS_1__MASK;
+}
+#define CNA_S_STATUS_RESERVED_1__MASK 0x0000fffc
+#define CNA_S_STATUS_RESERVED_1__SHIFT 2
+static inline uint32_t CNA_S_STATUS_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_S_STATUS_RESERVED_1__SHIFT) & CNA_S_STATUS_RESERVED_1__MASK;
+}
+#define CNA_S_STATUS_STATUS_0__MASK 0x00000003
+#define CNA_S_STATUS_STATUS_0__SHIFT 0
+static inline uint32_t CNA_S_STATUS_STATUS_0(uint32_t val)
+{
+ return ((val) << CNA_S_STATUS_STATUS_0__SHIFT) & CNA_S_STATUS_STATUS_0__MASK;
+}
+
+#define REG_CNA_S_POINTER 0x00001004
+#define CNA_S_POINTER_RESERVED_0__MASK 0xfffe0000
+#define CNA_S_POINTER_RESERVED_0__SHIFT 17
+static inline uint32_t CNA_S_POINTER_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_S_POINTER_RESERVED_0__SHIFT) & CNA_S_POINTER_RESERVED_0__MASK;
+}
+#define CNA_S_POINTER_EXECUTER__MASK 0x00010000
+#define CNA_S_POINTER_EXECUTER__SHIFT 16
+static inline uint32_t CNA_S_POINTER_EXECUTER(uint32_t val)
+{
+ return ((val) << CNA_S_POINTER_EXECUTER__SHIFT) & CNA_S_POINTER_EXECUTER__MASK;
+}
+#define CNA_S_POINTER_RESERVED_1__MASK 0x0000ffc0
+#define CNA_S_POINTER_RESERVED_1__SHIFT 6
+static inline uint32_t CNA_S_POINTER_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_S_POINTER_RESERVED_1__SHIFT) & CNA_S_POINTER_RESERVED_1__MASK;
+}
+#define CNA_S_POINTER_EXECUTER_PP_CLEAR__MASK 0x00000020
+#define CNA_S_POINTER_EXECUTER_PP_CLEAR__SHIFT 5
+static inline uint32_t CNA_S_POINTER_EXECUTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << CNA_S_POINTER_EXECUTER_PP_CLEAR__SHIFT) & CNA_S_POINTER_EXECUTER_PP_CLEAR__MASK;
+}
+#define CNA_S_POINTER_POINTER_PP_CLEAR__MASK 0x00000010
+#define CNA_S_POINTER_POINTER_PP_CLEAR__SHIFT 4
+static inline uint32_t CNA_S_POINTER_POINTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << CNA_S_POINTER_POINTER_PP_CLEAR__SHIFT) & CNA_S_POINTER_POINTER_PP_CLEAR__MASK;
+}
+#define CNA_S_POINTER_POINTER_PP_MODE__MASK 0x00000008
+#define CNA_S_POINTER_POINTER_PP_MODE__SHIFT 3
+static inline uint32_t CNA_S_POINTER_POINTER_PP_MODE(uint32_t val)
+{
+ return ((val) << CNA_S_POINTER_POINTER_PP_MODE__SHIFT) & CNA_S_POINTER_POINTER_PP_MODE__MASK;
+}
+#define CNA_S_POINTER_EXECUTER_PP_EN__MASK 0x00000004
+#define CNA_S_POINTER_EXECUTER_PP_EN__SHIFT 2
+static inline uint32_t CNA_S_POINTER_EXECUTER_PP_EN(uint32_t val)
+{
+ return ((val) << CNA_S_POINTER_EXECUTER_PP_EN__SHIFT) & CNA_S_POINTER_EXECUTER_PP_EN__MASK;
+}
+#define CNA_S_POINTER_POINTER_PP_EN__MASK 0x00000002
+#define CNA_S_POINTER_POINTER_PP_EN__SHIFT 1
+static inline uint32_t CNA_S_POINTER_POINTER_PP_EN(uint32_t val)
+{
+ return ((val) << CNA_S_POINTER_POINTER_PP_EN__SHIFT) & CNA_S_POINTER_POINTER_PP_EN__MASK;
+}
+#define CNA_S_POINTER_POINTER__MASK 0x00000001
+#define CNA_S_POINTER_POINTER__SHIFT 0
+static inline uint32_t CNA_S_POINTER_POINTER(uint32_t val)
+{
+ return ((val) << CNA_S_POINTER_POINTER__SHIFT) & CNA_S_POINTER_POINTER__MASK;
+}
+
+#define REG_CNA_OPERATION_ENABLE 0x00001008
+#define CNA_OPERATION_ENABLE_RESERVED_0__MASK 0xfffffffe
+#define CNA_OPERATION_ENABLE_RESERVED_0__SHIFT 1
+static inline uint32_t CNA_OPERATION_ENABLE_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_OPERATION_ENABLE_RESERVED_0__SHIFT) & CNA_OPERATION_ENABLE_RESERVED_0__MASK;
+}
+#define CNA_OPERATION_ENABLE_OP_EN__MASK 0x00000001
+#define CNA_OPERATION_ENABLE_OP_EN__SHIFT 0
+static inline uint32_t CNA_OPERATION_ENABLE_OP_EN(uint32_t val)
+{
+ return ((val) << CNA_OPERATION_ENABLE_OP_EN__SHIFT) & CNA_OPERATION_ENABLE_OP_EN__MASK;
+}
+
+#define REG_CNA_CONV_CON1 0x0000100c
+#define CNA_CONV_CON1_RESERVED_0__MASK 0x80000000
+#define CNA_CONV_CON1_RESERVED_0__SHIFT 31
+static inline uint32_t CNA_CONV_CON1_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON1_RESERVED_0__SHIFT) & CNA_CONV_CON1_RESERVED_0__MASK;
+}
+#define CNA_CONV_CON1_NONALIGN_DMA__MASK 0x40000000
+#define CNA_CONV_CON1_NONALIGN_DMA__SHIFT 30
+static inline uint32_t CNA_CONV_CON1_NONALIGN_DMA(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON1_NONALIGN_DMA__SHIFT) & CNA_CONV_CON1_NONALIGN_DMA__MASK;
+}
+#define CNA_CONV_CON1_GROUP_LINE_OFF__MASK 0x20000000
+#define CNA_CONV_CON1_GROUP_LINE_OFF__SHIFT 29
+static inline uint32_t CNA_CONV_CON1_GROUP_LINE_OFF(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON1_GROUP_LINE_OFF__SHIFT) & CNA_CONV_CON1_GROUP_LINE_OFF__MASK;
+}
+#define CNA_CONV_CON1_RESERVED_1__MASK 0x1ffe0000
+#define CNA_CONV_CON1_RESERVED_1__SHIFT 17
+static inline uint32_t CNA_CONV_CON1_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON1_RESERVED_1__SHIFT) & CNA_CONV_CON1_RESERVED_1__MASK;
+}
+#define CNA_CONV_CON1_DECONV__MASK 0x00010000
+#define CNA_CONV_CON1_DECONV__SHIFT 16
+static inline uint32_t CNA_CONV_CON1_DECONV(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON1_DECONV__SHIFT) & CNA_CONV_CON1_DECONV__MASK;
+}
+#define CNA_CONV_CON1_ARGB_IN__MASK 0x0000f000
+#define CNA_CONV_CON1_ARGB_IN__SHIFT 12
+static inline uint32_t CNA_CONV_CON1_ARGB_IN(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON1_ARGB_IN__SHIFT) & CNA_CONV_CON1_ARGB_IN__MASK;
+}
+#define CNA_CONV_CON1_RESERVED_2__MASK 0x00000c00
+#define CNA_CONV_CON1_RESERVED_2__SHIFT 10
+static inline uint32_t CNA_CONV_CON1_RESERVED_2(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON1_RESERVED_2__SHIFT) & CNA_CONV_CON1_RESERVED_2__MASK;
+}
+#define CNA_CONV_CON1_PROC_PRECISION__MASK 0x00000380
+#define CNA_CONV_CON1_PROC_PRECISION__SHIFT 7
+static inline uint32_t CNA_CONV_CON1_PROC_PRECISION(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON1_PROC_PRECISION__SHIFT) & CNA_CONV_CON1_PROC_PRECISION__MASK;
+}
+#define CNA_CONV_CON1_IN_PRECISION__MASK 0x00000070
+#define CNA_CONV_CON1_IN_PRECISION__SHIFT 4
+static inline uint32_t CNA_CONV_CON1_IN_PRECISION(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON1_IN_PRECISION__SHIFT) & CNA_CONV_CON1_IN_PRECISION__MASK;
+}
+#define CNA_CONV_CON1_CONV_MODE__MASK 0x0000000f
+#define CNA_CONV_CON1_CONV_MODE__SHIFT 0
+static inline uint32_t CNA_CONV_CON1_CONV_MODE(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON1_CONV_MODE__SHIFT) & CNA_CONV_CON1_CONV_MODE__MASK;
+}
+
+#define REG_CNA_CONV_CON2 0x00001010
+#define CNA_CONV_CON2_RESERVED_0__MASK 0xff000000
+#define CNA_CONV_CON2_RESERVED_0__SHIFT 24
+static inline uint32_t CNA_CONV_CON2_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON2_RESERVED_0__SHIFT) & CNA_CONV_CON2_RESERVED_0__MASK;
+}
+#define CNA_CONV_CON2_KERNEL_GROUP__MASK 0x00ff0000
+#define CNA_CONV_CON2_KERNEL_GROUP__SHIFT 16
+static inline uint32_t CNA_CONV_CON2_KERNEL_GROUP(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON2_KERNEL_GROUP__SHIFT) & CNA_CONV_CON2_KERNEL_GROUP__MASK;
+}
+#define CNA_CONV_CON2_RESERVED_1__MASK 0x0000c000
+#define CNA_CONV_CON2_RESERVED_1__SHIFT 14
+static inline uint32_t CNA_CONV_CON2_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON2_RESERVED_1__SHIFT) & CNA_CONV_CON2_RESERVED_1__MASK;
+}
+#define CNA_CONV_CON2_FEATURE_GRAINS__MASK 0x00003ff0
+#define CNA_CONV_CON2_FEATURE_GRAINS__SHIFT 4
+static inline uint32_t CNA_CONV_CON2_FEATURE_GRAINS(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON2_FEATURE_GRAINS__SHIFT) & CNA_CONV_CON2_FEATURE_GRAINS__MASK;
+}
+#define CNA_CONV_CON2_RESERVED_2__MASK 0x00000008
+#define CNA_CONV_CON2_RESERVED_2__SHIFT 3
+static inline uint32_t CNA_CONV_CON2_RESERVED_2(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON2_RESERVED_2__SHIFT) & CNA_CONV_CON2_RESERVED_2__MASK;
+}
+#define CNA_CONV_CON2_CSC_WO_EN__MASK 0x00000004
+#define CNA_CONV_CON2_CSC_WO_EN__SHIFT 2
+static inline uint32_t CNA_CONV_CON2_CSC_WO_EN(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON2_CSC_WO_EN__SHIFT) & CNA_CONV_CON2_CSC_WO_EN__MASK;
+}
+#define CNA_CONV_CON2_CSC_DO_EN__MASK 0x00000002
+#define CNA_CONV_CON2_CSC_DO_EN__SHIFT 1
+static inline uint32_t CNA_CONV_CON2_CSC_DO_EN(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON2_CSC_DO_EN__SHIFT) & CNA_CONV_CON2_CSC_DO_EN__MASK;
+}
+#define CNA_CONV_CON2_CMD_FIFO_SRST__MASK 0x00000001
+#define CNA_CONV_CON2_CMD_FIFO_SRST__SHIFT 0
+static inline uint32_t CNA_CONV_CON2_CMD_FIFO_SRST(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON2_CMD_FIFO_SRST__SHIFT) & CNA_CONV_CON2_CMD_FIFO_SRST__MASK;
+}
+
+#define REG_CNA_CONV_CON3 0x00001014
+#define CNA_CONV_CON3_RESERVED_0__MASK 0x80000000
+#define CNA_CONV_CON3_RESERVED_0__SHIFT 31
+static inline uint32_t CNA_CONV_CON3_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_RESERVED_0__SHIFT) & CNA_CONV_CON3_RESERVED_0__MASK;
+}
+#define CNA_CONV_CON3_NN_MODE__MASK 0x70000000
+#define CNA_CONV_CON3_NN_MODE__SHIFT 28
+static inline uint32_t CNA_CONV_CON3_NN_MODE(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_NN_MODE__SHIFT) & CNA_CONV_CON3_NN_MODE__MASK;
+}
+#define CNA_CONV_CON3_RESERVED_1__MASK 0x0c000000
+#define CNA_CONV_CON3_RESERVED_1__SHIFT 26
+static inline uint32_t CNA_CONV_CON3_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_RESERVED_1__SHIFT) & CNA_CONV_CON3_RESERVED_1__MASK;
+}
+#define CNA_CONV_CON3_ATROUS_Y_DILATION__MASK 0x03e00000
+#define CNA_CONV_CON3_ATROUS_Y_DILATION__SHIFT 21
+static inline uint32_t CNA_CONV_CON3_ATROUS_Y_DILATION(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_ATROUS_Y_DILATION__SHIFT) & CNA_CONV_CON3_ATROUS_Y_DILATION__MASK;
+}
+#define CNA_CONV_CON3_ATROUS_X_DILATION__MASK 0x001f0000
+#define CNA_CONV_CON3_ATROUS_X_DILATION__SHIFT 16
+static inline uint32_t CNA_CONV_CON3_ATROUS_X_DILATION(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_ATROUS_X_DILATION__SHIFT) & CNA_CONV_CON3_ATROUS_X_DILATION__MASK;
+}
+#define CNA_CONV_CON3_RESERVED_2__MASK 0x0000c000
+#define CNA_CONV_CON3_RESERVED_2__SHIFT 14
+static inline uint32_t CNA_CONV_CON3_RESERVED_2(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_RESERVED_2__SHIFT) & CNA_CONV_CON3_RESERVED_2__MASK;
+}
+#define CNA_CONV_CON3_DECONV_Y_STRIDE__MASK 0x00003800
+#define CNA_CONV_CON3_DECONV_Y_STRIDE__SHIFT 11
+static inline uint32_t CNA_CONV_CON3_DECONV_Y_STRIDE(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_DECONV_Y_STRIDE__SHIFT) & CNA_CONV_CON3_DECONV_Y_STRIDE__MASK;
+}
+#define CNA_CONV_CON3_DECONV_X_STRIDE__MASK 0x00000700
+#define CNA_CONV_CON3_DECONV_X_STRIDE__SHIFT 8
+static inline uint32_t CNA_CONV_CON3_DECONV_X_STRIDE(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_DECONV_X_STRIDE__SHIFT) & CNA_CONV_CON3_DECONV_X_STRIDE__MASK;
+}
+#define CNA_CONV_CON3_RESERVED_3__MASK 0x000000c0
+#define CNA_CONV_CON3_RESERVED_3__SHIFT 6
+static inline uint32_t CNA_CONV_CON3_RESERVED_3(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_RESERVED_3__SHIFT) & CNA_CONV_CON3_RESERVED_3__MASK;
+}
+#define CNA_CONV_CON3_CONV_Y_STRIDE__MASK 0x00000038
+#define CNA_CONV_CON3_CONV_Y_STRIDE__SHIFT 3
+static inline uint32_t CNA_CONV_CON3_CONV_Y_STRIDE(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_CONV_Y_STRIDE__SHIFT) & CNA_CONV_CON3_CONV_Y_STRIDE__MASK;
+}
+#define CNA_CONV_CON3_CONV_X_STRIDE__MASK 0x00000007
+#define CNA_CONV_CON3_CONV_X_STRIDE__SHIFT 0
+static inline uint32_t CNA_CONV_CON3_CONV_X_STRIDE(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_CONV_X_STRIDE__SHIFT) & CNA_CONV_CON3_CONV_X_STRIDE__MASK;
+}
+
+#define REG_CNA_DATA_SIZE0 0x00001020
+#define CNA_DATA_SIZE0_RESERVED_0__MASK 0xf8000000
+#define CNA_DATA_SIZE0_RESERVED_0__SHIFT 27
+static inline uint32_t CNA_DATA_SIZE0_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE0_RESERVED_0__SHIFT) & CNA_DATA_SIZE0_RESERVED_0__MASK;
+}
+#define CNA_DATA_SIZE0_DATAIN_WIDTH__MASK 0x07ff0000
+#define CNA_DATA_SIZE0_DATAIN_WIDTH__SHIFT 16
+static inline uint32_t CNA_DATA_SIZE0_DATAIN_WIDTH(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE0_DATAIN_WIDTH__SHIFT) & CNA_DATA_SIZE0_DATAIN_WIDTH__MASK;
+}
+#define CNA_DATA_SIZE0_RESERVED_1__MASK 0x0000f800
+#define CNA_DATA_SIZE0_RESERVED_1__SHIFT 11
+static inline uint32_t CNA_DATA_SIZE0_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE0_RESERVED_1__SHIFT) & CNA_DATA_SIZE0_RESERVED_1__MASK;
+}
+#define CNA_DATA_SIZE0_DATAIN_HEIGHT__MASK 0x000007ff
+#define CNA_DATA_SIZE0_DATAIN_HEIGHT__SHIFT 0
+static inline uint32_t CNA_DATA_SIZE0_DATAIN_HEIGHT(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE0_DATAIN_HEIGHT__SHIFT) & CNA_DATA_SIZE0_DATAIN_HEIGHT__MASK;
+}
+
+#define REG_CNA_DATA_SIZE1 0x00001024
+#define CNA_DATA_SIZE1_RESERVED_0__MASK 0xc0000000
+#define CNA_DATA_SIZE1_RESERVED_0__SHIFT 30
+static inline uint32_t CNA_DATA_SIZE1_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE1_RESERVED_0__SHIFT) & CNA_DATA_SIZE1_RESERVED_0__MASK;
+}
+#define CNA_DATA_SIZE1_DATAIN_CHANNEL_REAL__MASK 0x3fff0000
+#define CNA_DATA_SIZE1_DATAIN_CHANNEL_REAL__SHIFT 16
+static inline uint32_t CNA_DATA_SIZE1_DATAIN_CHANNEL_REAL(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE1_DATAIN_CHANNEL_REAL__SHIFT) & CNA_DATA_SIZE1_DATAIN_CHANNEL_REAL__MASK;
+}
+#define CNA_DATA_SIZE1_DATAIN_CHANNEL__MASK 0x0000ffff
+#define CNA_DATA_SIZE1_DATAIN_CHANNEL__SHIFT 0
+static inline uint32_t CNA_DATA_SIZE1_DATAIN_CHANNEL(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE1_DATAIN_CHANNEL__SHIFT) & CNA_DATA_SIZE1_DATAIN_CHANNEL__MASK;
+}
+
+#define REG_CNA_DATA_SIZE2 0x00001028
+#define CNA_DATA_SIZE2_RESERVED_0__MASK 0xfffff800
+#define CNA_DATA_SIZE2_RESERVED_0__SHIFT 11
+static inline uint32_t CNA_DATA_SIZE2_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE2_RESERVED_0__SHIFT) & CNA_DATA_SIZE2_RESERVED_0__MASK;
+}
+#define CNA_DATA_SIZE2_DATAOUT_WIDTH__MASK 0x000007ff
+#define CNA_DATA_SIZE2_DATAOUT_WIDTH__SHIFT 0
+static inline uint32_t CNA_DATA_SIZE2_DATAOUT_WIDTH(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE2_DATAOUT_WIDTH__SHIFT) & CNA_DATA_SIZE2_DATAOUT_WIDTH__MASK;
+}
+
+#define REG_CNA_DATA_SIZE3 0x0000102c
+#define CNA_DATA_SIZE3_RESERVED_0__MASK 0xff000000
+#define CNA_DATA_SIZE3_RESERVED_0__SHIFT 24
+static inline uint32_t CNA_DATA_SIZE3_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE3_RESERVED_0__SHIFT) & CNA_DATA_SIZE3_RESERVED_0__MASK;
+}
+#define CNA_DATA_SIZE3_SURF_MODE__MASK 0x00c00000
+#define CNA_DATA_SIZE3_SURF_MODE__SHIFT 22
+static inline uint32_t CNA_DATA_SIZE3_SURF_MODE(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE3_SURF_MODE__SHIFT) & CNA_DATA_SIZE3_SURF_MODE__MASK;
+}
+#define CNA_DATA_SIZE3_DATAOUT_ATOMICS__MASK 0x003fffff
+#define CNA_DATA_SIZE3_DATAOUT_ATOMICS__SHIFT 0
+static inline uint32_t CNA_DATA_SIZE3_DATAOUT_ATOMICS(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE3_DATAOUT_ATOMICS__SHIFT) & CNA_DATA_SIZE3_DATAOUT_ATOMICS__MASK;
+}
+
+#define REG_CNA_WEIGHT_SIZE0 0x00001030
+#define CNA_WEIGHT_SIZE0_WEIGHT_BYTES__MASK 0xffffffff
+#define CNA_WEIGHT_SIZE0_WEIGHT_BYTES__SHIFT 0
+static inline uint32_t CNA_WEIGHT_SIZE0_WEIGHT_BYTES(uint32_t val)
+{
+ return ((val) << CNA_WEIGHT_SIZE0_WEIGHT_BYTES__SHIFT) & CNA_WEIGHT_SIZE0_WEIGHT_BYTES__MASK;
+}
+
+#define REG_CNA_WEIGHT_SIZE1 0x00001034
+#define CNA_WEIGHT_SIZE1_RESERVED_0__MASK 0xfff80000
+#define CNA_WEIGHT_SIZE1_RESERVED_0__SHIFT 19
+static inline uint32_t CNA_WEIGHT_SIZE1_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_WEIGHT_SIZE1_RESERVED_0__SHIFT) & CNA_WEIGHT_SIZE1_RESERVED_0__MASK;
+}
+#define CNA_WEIGHT_SIZE1_WEIGHT_BYTES_PER_KERNEL__MASK 0x0007ffff
+#define CNA_WEIGHT_SIZE1_WEIGHT_BYTES_PER_KERNEL__SHIFT 0
+static inline uint32_t CNA_WEIGHT_SIZE1_WEIGHT_BYTES_PER_KERNEL(uint32_t val)
+{
+ return ((val) << CNA_WEIGHT_SIZE1_WEIGHT_BYTES_PER_KERNEL__SHIFT) & CNA_WEIGHT_SIZE1_WEIGHT_BYTES_PER_KERNEL__MASK;
+}
+
+#define REG_CNA_WEIGHT_SIZE2 0x00001038
+#define CNA_WEIGHT_SIZE2_RESERVED_0__MASK 0xe0000000
+#define CNA_WEIGHT_SIZE2_RESERVED_0__SHIFT 29
+static inline uint32_t CNA_WEIGHT_SIZE2_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_WEIGHT_SIZE2_RESERVED_0__SHIFT) & CNA_WEIGHT_SIZE2_RESERVED_0__MASK;
+}
+#define CNA_WEIGHT_SIZE2_WEIGHT_WIDTH__MASK 0x1f000000
+#define CNA_WEIGHT_SIZE2_WEIGHT_WIDTH__SHIFT 24
+static inline uint32_t CNA_WEIGHT_SIZE2_WEIGHT_WIDTH(uint32_t val)
+{
+ return ((val) << CNA_WEIGHT_SIZE2_WEIGHT_WIDTH__SHIFT) & CNA_WEIGHT_SIZE2_WEIGHT_WIDTH__MASK;
+}
+#define CNA_WEIGHT_SIZE2_RESERVED_1__MASK 0x00e00000
+#define CNA_WEIGHT_SIZE2_RESERVED_1__SHIFT 21
+static inline uint32_t CNA_WEIGHT_SIZE2_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_WEIGHT_SIZE2_RESERVED_1__SHIFT) & CNA_WEIGHT_SIZE2_RESERVED_1__MASK;
+}
+#define CNA_WEIGHT_SIZE2_WEIGHT_HEIGHT__MASK 0x001f0000
+#define CNA_WEIGHT_SIZE2_WEIGHT_HEIGHT__SHIFT 16
+static inline uint32_t CNA_WEIGHT_SIZE2_WEIGHT_HEIGHT(uint32_t val)
+{
+ return ((val) << CNA_WEIGHT_SIZE2_WEIGHT_HEIGHT__SHIFT) & CNA_WEIGHT_SIZE2_WEIGHT_HEIGHT__MASK;
+}
+#define CNA_WEIGHT_SIZE2_RESERVED_2__MASK 0x0000c000
+#define CNA_WEIGHT_SIZE2_RESERVED_2__SHIFT 14
+static inline uint32_t CNA_WEIGHT_SIZE2_RESERVED_2(uint32_t val)
+{
+ return ((val) << CNA_WEIGHT_SIZE2_RESERVED_2__SHIFT) & CNA_WEIGHT_SIZE2_RESERVED_2__MASK;
+}
+#define CNA_WEIGHT_SIZE2_WEIGHT_KERNELS__MASK 0x00003fff
+#define CNA_WEIGHT_SIZE2_WEIGHT_KERNELS__SHIFT 0
+static inline uint32_t CNA_WEIGHT_SIZE2_WEIGHT_KERNELS(uint32_t val)
+{
+ return ((val) << CNA_WEIGHT_SIZE2_WEIGHT_KERNELS__SHIFT) & CNA_WEIGHT_SIZE2_WEIGHT_KERNELS__MASK;
+}
+
+#define REG_CNA_CBUF_CON0 0x00001040
+#define CNA_CBUF_CON0_RESERVED_0__MASK 0xffffc000
+#define CNA_CBUF_CON0_RESERVED_0__SHIFT 14
+static inline uint32_t CNA_CBUF_CON0_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_CBUF_CON0_RESERVED_0__SHIFT) & CNA_CBUF_CON0_RESERVED_0__MASK;
+}
+#define CNA_CBUF_CON0_WEIGHT_REUSE__MASK 0x00002000
+#define CNA_CBUF_CON0_WEIGHT_REUSE__SHIFT 13
+static inline uint32_t CNA_CBUF_CON0_WEIGHT_REUSE(uint32_t val)
+{
+ return ((val) << CNA_CBUF_CON0_WEIGHT_REUSE__SHIFT) & CNA_CBUF_CON0_WEIGHT_REUSE__MASK;
+}
+#define CNA_CBUF_CON0_DATA_REUSE__MASK 0x00001000
+#define CNA_CBUF_CON0_DATA_REUSE__SHIFT 12
+static inline uint32_t CNA_CBUF_CON0_DATA_REUSE(uint32_t val)
+{
+ return ((val) << CNA_CBUF_CON0_DATA_REUSE__SHIFT) & CNA_CBUF_CON0_DATA_REUSE__MASK;
+}
+#define CNA_CBUF_CON0_RESERVED_1__MASK 0x00000800
+#define CNA_CBUF_CON0_RESERVED_1__SHIFT 11
+static inline uint32_t CNA_CBUF_CON0_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_CBUF_CON0_RESERVED_1__SHIFT) & CNA_CBUF_CON0_RESERVED_1__MASK;
+}
+#define CNA_CBUF_CON0_FC_DATA_BANK__MASK 0x00000700
+#define CNA_CBUF_CON0_FC_DATA_BANK__SHIFT 8
+static inline uint32_t CNA_CBUF_CON0_FC_DATA_BANK(uint32_t val)
+{
+ return ((val) << CNA_CBUF_CON0_FC_DATA_BANK__SHIFT) & CNA_CBUF_CON0_FC_DATA_BANK__MASK;
+}
+#define CNA_CBUF_CON0_WEIGHT_BANK__MASK 0x000000f0
+#define CNA_CBUF_CON0_WEIGHT_BANK__SHIFT 4
+static inline uint32_t CNA_CBUF_CON0_WEIGHT_BANK(uint32_t val)
+{
+ return ((val) << CNA_CBUF_CON0_WEIGHT_BANK__SHIFT) & CNA_CBUF_CON0_WEIGHT_BANK__MASK;
+}
+#define CNA_CBUF_CON0_DATA_BANK__MASK 0x0000000f
+#define CNA_CBUF_CON0_DATA_BANK__SHIFT 0
+static inline uint32_t CNA_CBUF_CON0_DATA_BANK(uint32_t val)
+{
+ return ((val) << CNA_CBUF_CON0_DATA_BANK__SHIFT) & CNA_CBUF_CON0_DATA_BANK__MASK;
+}
+
+#define REG_CNA_CBUF_CON1 0x00001044
+#define CNA_CBUF_CON1_RESERVED_0__MASK 0xffffc000
+#define CNA_CBUF_CON1_RESERVED_0__SHIFT 14
+static inline uint32_t CNA_CBUF_CON1_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_CBUF_CON1_RESERVED_0__SHIFT) & CNA_CBUF_CON1_RESERVED_0__MASK;
+}
+#define CNA_CBUF_CON1_DATA_ENTRIES__MASK 0x00003fff
+#define CNA_CBUF_CON1_DATA_ENTRIES__SHIFT 0
+static inline uint32_t CNA_CBUF_CON1_DATA_ENTRIES(uint32_t val)
+{
+ return ((val) << CNA_CBUF_CON1_DATA_ENTRIES__SHIFT) & CNA_CBUF_CON1_DATA_ENTRIES__MASK;
+}
+
+#define REG_CNA_CVT_CON0 0x0000104c
+#define CNA_CVT_CON0_RESERVED_0__MASK 0xf0000000
+#define CNA_CVT_CON0_RESERVED_0__SHIFT 28
+static inline uint32_t CNA_CVT_CON0_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON0_RESERVED_0__SHIFT) & CNA_CVT_CON0_RESERVED_0__MASK;
+}
+#define CNA_CVT_CON0_CVT_TRUNCATE_3__MASK 0x0fc00000
+#define CNA_CVT_CON0_CVT_TRUNCATE_3__SHIFT 22
+static inline uint32_t CNA_CVT_CON0_CVT_TRUNCATE_3(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON0_CVT_TRUNCATE_3__SHIFT) & CNA_CVT_CON0_CVT_TRUNCATE_3__MASK;
+}
+#define CNA_CVT_CON0_CVT_TRUNCATE_2__MASK 0x003f0000
+#define CNA_CVT_CON0_CVT_TRUNCATE_2__SHIFT 16
+static inline uint32_t CNA_CVT_CON0_CVT_TRUNCATE_2(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON0_CVT_TRUNCATE_2__SHIFT) & CNA_CVT_CON0_CVT_TRUNCATE_2__MASK;
+}
+#define CNA_CVT_CON0_CVT_TRUNCATE_1__MASK 0x0000fc00
+#define CNA_CVT_CON0_CVT_TRUNCATE_1__SHIFT 10
+static inline uint32_t CNA_CVT_CON0_CVT_TRUNCATE_1(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON0_CVT_TRUNCATE_1__SHIFT) & CNA_CVT_CON0_CVT_TRUNCATE_1__MASK;
+}
+#define CNA_CVT_CON0_CVT_TRUNCATE_0__MASK 0x000003f0
+#define CNA_CVT_CON0_CVT_TRUNCATE_0__SHIFT 4
+static inline uint32_t CNA_CVT_CON0_CVT_TRUNCATE_0(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON0_CVT_TRUNCATE_0__SHIFT) & CNA_CVT_CON0_CVT_TRUNCATE_0__MASK;
+}
+#define CNA_CVT_CON0_DATA_SIGN__MASK 0x00000008
+#define CNA_CVT_CON0_DATA_SIGN__SHIFT 3
+static inline uint32_t CNA_CVT_CON0_DATA_SIGN(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON0_DATA_SIGN__SHIFT) & CNA_CVT_CON0_DATA_SIGN__MASK;
+}
+#define CNA_CVT_CON0_ROUND_TYPE__MASK 0x00000004
+#define CNA_CVT_CON0_ROUND_TYPE__SHIFT 2
+static inline uint32_t CNA_CVT_CON0_ROUND_TYPE(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON0_ROUND_TYPE__SHIFT) & CNA_CVT_CON0_ROUND_TYPE__MASK;
+}
+#define CNA_CVT_CON0_CVT_TYPE__MASK 0x00000002
+#define CNA_CVT_CON0_CVT_TYPE__SHIFT 1
+static inline uint32_t CNA_CVT_CON0_CVT_TYPE(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON0_CVT_TYPE__SHIFT) & CNA_CVT_CON0_CVT_TYPE__MASK;
+}
+#define CNA_CVT_CON0_CVT_BYPASS__MASK 0x00000001
+#define CNA_CVT_CON0_CVT_BYPASS__SHIFT 0
+static inline uint32_t CNA_CVT_CON0_CVT_BYPASS(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON0_CVT_BYPASS__SHIFT) & CNA_CVT_CON0_CVT_BYPASS__MASK;
+}
+
+#define REG_CNA_CVT_CON1 0x00001050
+#define CNA_CVT_CON1_CVT_SCALE0__MASK 0xffff0000
+#define CNA_CVT_CON1_CVT_SCALE0__SHIFT 16
+static inline uint32_t CNA_CVT_CON1_CVT_SCALE0(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON1_CVT_SCALE0__SHIFT) & CNA_CVT_CON1_CVT_SCALE0__MASK;
+}
+#define CNA_CVT_CON1_CVT_OFFSET0__MASK 0x0000ffff
+#define CNA_CVT_CON1_CVT_OFFSET0__SHIFT 0
+static inline uint32_t CNA_CVT_CON1_CVT_OFFSET0(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON1_CVT_OFFSET0__SHIFT) & CNA_CVT_CON1_CVT_OFFSET0__MASK;
+}
+
+#define REG_CNA_CVT_CON2 0x00001054
+#define CNA_CVT_CON2_CVT_SCALE1__MASK 0xffff0000
+#define CNA_CVT_CON2_CVT_SCALE1__SHIFT 16
+static inline uint32_t CNA_CVT_CON2_CVT_SCALE1(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON2_CVT_SCALE1__SHIFT) & CNA_CVT_CON2_CVT_SCALE1__MASK;
+}
+#define CNA_CVT_CON2_CVT_OFFSET1__MASK 0x0000ffff
+#define CNA_CVT_CON2_CVT_OFFSET1__SHIFT 0
+static inline uint32_t CNA_CVT_CON2_CVT_OFFSET1(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON2_CVT_OFFSET1__SHIFT) & CNA_CVT_CON2_CVT_OFFSET1__MASK;
+}
+
+#define REG_CNA_CVT_CON3 0x00001058
+#define CNA_CVT_CON3_CVT_SCALE2__MASK 0xffff0000
+#define CNA_CVT_CON3_CVT_SCALE2__SHIFT 16
+static inline uint32_t CNA_CVT_CON3_CVT_SCALE2(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON3_CVT_SCALE2__SHIFT) & CNA_CVT_CON3_CVT_SCALE2__MASK;
+}
+#define CNA_CVT_CON3_CVT_OFFSET2__MASK 0x0000ffff
+#define CNA_CVT_CON3_CVT_OFFSET2__SHIFT 0
+static inline uint32_t CNA_CVT_CON3_CVT_OFFSET2(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON3_CVT_OFFSET2__SHIFT) & CNA_CVT_CON3_CVT_OFFSET2__MASK;
+}
+
+#define REG_CNA_CVT_CON4 0x0000105c
+#define CNA_CVT_CON4_CVT_SCALE3__MASK 0xffff0000
+#define CNA_CVT_CON4_CVT_SCALE3__SHIFT 16
+static inline uint32_t CNA_CVT_CON4_CVT_SCALE3(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON4_CVT_SCALE3__SHIFT) & CNA_CVT_CON4_CVT_SCALE3__MASK;
+}
+#define CNA_CVT_CON4_CVT_OFFSET3__MASK 0x0000ffff
+#define CNA_CVT_CON4_CVT_OFFSET3__SHIFT 0
+static inline uint32_t CNA_CVT_CON4_CVT_OFFSET3(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON4_CVT_OFFSET3__SHIFT) & CNA_CVT_CON4_CVT_OFFSET3__MASK;
+}
+
+#define REG_CNA_FC_CON0 0x00001060
+#define CNA_FC_CON0_FC_SKIP_DATA__MASK 0xffff0000
+#define CNA_FC_CON0_FC_SKIP_DATA__SHIFT 16
+static inline uint32_t CNA_FC_CON0_FC_SKIP_DATA(uint32_t val)
+{
+ return ((val) << CNA_FC_CON0_FC_SKIP_DATA__SHIFT) & CNA_FC_CON0_FC_SKIP_DATA__MASK;
+}
+#define CNA_FC_CON0_RESERVED_0__MASK 0x0000fffe
+#define CNA_FC_CON0_RESERVED_0__SHIFT 1
+static inline uint32_t CNA_FC_CON0_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_FC_CON0_RESERVED_0__SHIFT) & CNA_FC_CON0_RESERVED_0__MASK;
+}
+#define CNA_FC_CON0_FC_SKIP_EN__MASK 0x00000001
+#define CNA_FC_CON0_FC_SKIP_EN__SHIFT 0
+static inline uint32_t CNA_FC_CON0_FC_SKIP_EN(uint32_t val)
+{
+ return ((val) << CNA_FC_CON0_FC_SKIP_EN__SHIFT) & CNA_FC_CON0_FC_SKIP_EN__MASK;
+}
+
+#define REG_CNA_FC_CON1 0x00001064
+#define CNA_FC_CON1_RESERVED_0__MASK 0xfffe0000
+#define CNA_FC_CON1_RESERVED_0__SHIFT 17
+static inline uint32_t CNA_FC_CON1_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_FC_CON1_RESERVED_0__SHIFT) & CNA_FC_CON1_RESERVED_0__MASK;
+}
+#define CNA_FC_CON1_DATA_OFFSET__MASK 0x0001ffff
+#define CNA_FC_CON1_DATA_OFFSET__SHIFT 0
+static inline uint32_t CNA_FC_CON1_DATA_OFFSET(uint32_t val)
+{
+ return ((val) << CNA_FC_CON1_DATA_OFFSET__SHIFT) & CNA_FC_CON1_DATA_OFFSET__MASK;
+}
+
+#define REG_CNA_PAD_CON0 0x00001068
+#define CNA_PAD_CON0_RESERVED_0__MASK 0xffffff00
+#define CNA_PAD_CON0_RESERVED_0__SHIFT 8
+static inline uint32_t CNA_PAD_CON0_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_PAD_CON0_RESERVED_0__SHIFT) & CNA_PAD_CON0_RESERVED_0__MASK;
+}
+#define CNA_PAD_CON0_PAD_LEFT__MASK 0x000000f0
+#define CNA_PAD_CON0_PAD_LEFT__SHIFT 4
+static inline uint32_t CNA_PAD_CON0_PAD_LEFT(uint32_t val)
+{
+ return ((val) << CNA_PAD_CON0_PAD_LEFT__SHIFT) & CNA_PAD_CON0_PAD_LEFT__MASK;
+}
+#define CNA_PAD_CON0_PAD_TOP__MASK 0x0000000f
+#define CNA_PAD_CON0_PAD_TOP__SHIFT 0
+static inline uint32_t CNA_PAD_CON0_PAD_TOP(uint32_t val)
+{
+ return ((val) << CNA_PAD_CON0_PAD_TOP__SHIFT) & CNA_PAD_CON0_PAD_TOP__MASK;
+}
+
+#define REG_CNA_FEATURE_DATA_ADDR 0x00001070
+#define CNA_FEATURE_DATA_ADDR_FEATURE_BASE_ADDR__MASK 0xffffffff
+#define CNA_FEATURE_DATA_ADDR_FEATURE_BASE_ADDR__SHIFT 0
+static inline uint32_t CNA_FEATURE_DATA_ADDR_FEATURE_BASE_ADDR(uint32_t val)
+{
+ return ((val) << CNA_FEATURE_DATA_ADDR_FEATURE_BASE_ADDR__SHIFT) & CNA_FEATURE_DATA_ADDR_FEATURE_BASE_ADDR__MASK;
+}
+
+#define REG_CNA_FC_CON2 0x00001074
+#define CNA_FC_CON2_RESERVED_0__MASK 0xfffe0000
+#define CNA_FC_CON2_RESERVED_0__SHIFT 17
+static inline uint32_t CNA_FC_CON2_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_FC_CON2_RESERVED_0__SHIFT) & CNA_FC_CON2_RESERVED_0__MASK;
+}
+#define CNA_FC_CON2_WEIGHT_OFFSET__MASK 0x0001ffff
+#define CNA_FC_CON2_WEIGHT_OFFSET__SHIFT 0
+static inline uint32_t CNA_FC_CON2_WEIGHT_OFFSET(uint32_t val)
+{
+ return ((val) << CNA_FC_CON2_WEIGHT_OFFSET__SHIFT) & CNA_FC_CON2_WEIGHT_OFFSET__MASK;
+}
+
+#define REG_CNA_DMA_CON0 0x00001078
+#define CNA_DMA_CON0_OV4K_BYPASS__MASK 0x80000000
+#define CNA_DMA_CON0_OV4K_BYPASS__SHIFT 31
+static inline uint32_t CNA_DMA_CON0_OV4K_BYPASS(uint32_t val)
+{
+ return ((val) << CNA_DMA_CON0_OV4K_BYPASS__SHIFT) & CNA_DMA_CON0_OV4K_BYPASS__MASK;
+}
+#define CNA_DMA_CON0_RESERVED_0__MASK 0x7ff00000
+#define CNA_DMA_CON0_RESERVED_0__SHIFT 20
+static inline uint32_t CNA_DMA_CON0_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_DMA_CON0_RESERVED_0__SHIFT) & CNA_DMA_CON0_RESERVED_0__MASK;
+}
+#define CNA_DMA_CON0_WEIGHT_BURST_LEN__MASK 0x000f0000
+#define CNA_DMA_CON0_WEIGHT_BURST_LEN__SHIFT 16
+static inline uint32_t CNA_DMA_CON0_WEIGHT_BURST_LEN(uint32_t val)
+{
+ return ((val) << CNA_DMA_CON0_WEIGHT_BURST_LEN__SHIFT) & CNA_DMA_CON0_WEIGHT_BURST_LEN__MASK;
+}
+#define CNA_DMA_CON0_RESERVED_1__MASK 0x0000fff0
+#define CNA_DMA_CON0_RESERVED_1__SHIFT 4
+static inline uint32_t CNA_DMA_CON0_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_DMA_CON0_RESERVED_1__SHIFT) & CNA_DMA_CON0_RESERVED_1__MASK;
+}
+#define CNA_DMA_CON0_DATA_BURST_LEN__MASK 0x0000000f
+#define CNA_DMA_CON0_DATA_BURST_LEN__SHIFT 0
+static inline uint32_t CNA_DMA_CON0_DATA_BURST_LEN(uint32_t val)
+{
+ return ((val) << CNA_DMA_CON0_DATA_BURST_LEN__SHIFT) & CNA_DMA_CON0_DATA_BURST_LEN__MASK;
+}
+
+#define REG_CNA_DMA_CON1 0x0000107c
+#define CNA_DMA_CON1_RESERVED_0__MASK 0xf0000000
+#define CNA_DMA_CON1_RESERVED_0__SHIFT 28
+static inline uint32_t CNA_DMA_CON1_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_DMA_CON1_RESERVED_0__SHIFT) & CNA_DMA_CON1_RESERVED_0__MASK;
+}
+#define CNA_DMA_CON1_LINE_STRIDE__MASK 0x0fffffff
+#define CNA_DMA_CON1_LINE_STRIDE__SHIFT 0
+static inline uint32_t CNA_DMA_CON1_LINE_STRIDE(uint32_t val)
+{
+ return ((val) << CNA_DMA_CON1_LINE_STRIDE__SHIFT) & CNA_DMA_CON1_LINE_STRIDE__MASK;
+}
+
+#define REG_CNA_DMA_CON2 0x00001080
+#define CNA_DMA_CON2_RESERVED_0__MASK 0xf0000000
+#define CNA_DMA_CON2_RESERVED_0__SHIFT 28
+static inline uint32_t CNA_DMA_CON2_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_DMA_CON2_RESERVED_0__SHIFT) & CNA_DMA_CON2_RESERVED_0__MASK;
+}
+#define CNA_DMA_CON2_SURF_STRIDE__MASK 0x0fffffff
+#define CNA_DMA_CON2_SURF_STRIDE__SHIFT 0
+static inline uint32_t CNA_DMA_CON2_SURF_STRIDE(uint32_t val)
+{
+ return ((val) << CNA_DMA_CON2_SURF_STRIDE__SHIFT) & CNA_DMA_CON2_SURF_STRIDE__MASK;
+}
+
+#define REG_CNA_FC_DATA_SIZE0 0x00001084
+#define CNA_FC_DATA_SIZE0_RESERVED_0__MASK 0xc0000000
+#define CNA_FC_DATA_SIZE0_RESERVED_0__SHIFT 30
+static inline uint32_t CNA_FC_DATA_SIZE0_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_FC_DATA_SIZE0_RESERVED_0__SHIFT) & CNA_FC_DATA_SIZE0_RESERVED_0__MASK;
+}
+#define CNA_FC_DATA_SIZE0_DMA_WIDTH__MASK 0x3fff0000
+#define CNA_FC_DATA_SIZE0_DMA_WIDTH__SHIFT 16
+static inline uint32_t CNA_FC_DATA_SIZE0_DMA_WIDTH(uint32_t val)
+{
+ return ((val) << CNA_FC_DATA_SIZE0_DMA_WIDTH__SHIFT) & CNA_FC_DATA_SIZE0_DMA_WIDTH__MASK;
+}
+#define CNA_FC_DATA_SIZE0_RESERVED_1__MASK 0x0000f800
+#define CNA_FC_DATA_SIZE0_RESERVED_1__SHIFT 11
+static inline uint32_t CNA_FC_DATA_SIZE0_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_FC_DATA_SIZE0_RESERVED_1__SHIFT) & CNA_FC_DATA_SIZE0_RESERVED_1__MASK;
+}
+#define CNA_FC_DATA_SIZE0_DMA_HEIGHT__MASK 0x000007ff
+#define CNA_FC_DATA_SIZE0_DMA_HEIGHT__SHIFT 0
+static inline uint32_t CNA_FC_DATA_SIZE0_DMA_HEIGHT(uint32_t val)
+{
+ return ((val) << CNA_FC_DATA_SIZE0_DMA_HEIGHT__SHIFT) & CNA_FC_DATA_SIZE0_DMA_HEIGHT__MASK;
+}
+
+#define REG_CNA_FC_DATA_SIZE1 0x00001088
+#define CNA_FC_DATA_SIZE1_RESERVED_0__MASK 0xffff0000
+#define CNA_FC_DATA_SIZE1_RESERVED_0__SHIFT 16
+static inline uint32_t CNA_FC_DATA_SIZE1_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_FC_DATA_SIZE1_RESERVED_0__SHIFT) & CNA_FC_DATA_SIZE1_RESERVED_0__MASK;
+}
+#define CNA_FC_DATA_SIZE1_DMA_CHANNEL__MASK 0x0000ffff
+#define CNA_FC_DATA_SIZE1_DMA_CHANNEL__SHIFT 0
+static inline uint32_t CNA_FC_DATA_SIZE1_DMA_CHANNEL(uint32_t val)
+{
+ return ((val) << CNA_FC_DATA_SIZE1_DMA_CHANNEL__SHIFT) & CNA_FC_DATA_SIZE1_DMA_CHANNEL__MASK;
+}
+
+#define REG_CNA_CLK_GATE 0x00001090
+#define CNA_CLK_GATE_RESERVED_0__MASK 0xffffffe0
+#define CNA_CLK_GATE_RESERVED_0__SHIFT 5
+static inline uint32_t CNA_CLK_GATE_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_CLK_GATE_RESERVED_0__SHIFT) & CNA_CLK_GATE_RESERVED_0__MASK;
+}
+#define CNA_CLK_GATE_CBUF_CS_DISABLE_CLKGATE__MASK 0x00000010
+#define CNA_CLK_GATE_CBUF_CS_DISABLE_CLKGATE__SHIFT 4
+static inline uint32_t CNA_CLK_GATE_CBUF_CS_DISABLE_CLKGATE(uint32_t val)
+{
+ return ((val) << CNA_CLK_GATE_CBUF_CS_DISABLE_CLKGATE__SHIFT) & CNA_CLK_GATE_CBUF_CS_DISABLE_CLKGATE__MASK;
+}
+#define CNA_CLK_GATE_RESERVED_1__MASK 0x00000008
+#define CNA_CLK_GATE_RESERVED_1__SHIFT 3
+static inline uint32_t CNA_CLK_GATE_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_CLK_GATE_RESERVED_1__SHIFT) & CNA_CLK_GATE_RESERVED_1__MASK;
+}
+#define CNA_CLK_GATE_CSC_DISABLE_CLKGATE__MASK 0x00000004
+#define CNA_CLK_GATE_CSC_DISABLE_CLKGATE__SHIFT 2
+static inline uint32_t CNA_CLK_GATE_CSC_DISABLE_CLKGATE(uint32_t val)
+{
+ return ((val) << CNA_CLK_GATE_CSC_DISABLE_CLKGATE__SHIFT) & CNA_CLK_GATE_CSC_DISABLE_CLKGATE__MASK;
+}
+#define CNA_CLK_GATE_CNA_WEIGHT_DISABLE_CLKGATE__MASK 0x00000002
+#define CNA_CLK_GATE_CNA_WEIGHT_DISABLE_CLKGATE__SHIFT 1
+static inline uint32_t CNA_CLK_GATE_CNA_WEIGHT_DISABLE_CLKGATE(uint32_t val)
+{
+ return ((val) << CNA_CLK_GATE_CNA_WEIGHT_DISABLE_CLKGATE__SHIFT) & CNA_CLK_GATE_CNA_WEIGHT_DISABLE_CLKGATE__MASK;
+}
+#define CNA_CLK_GATE_CNA_FEATURE_DISABLE_CLKGATE__MASK 0x00000001
+#define CNA_CLK_GATE_CNA_FEATURE_DISABLE_CLKGATE__SHIFT 0
+static inline uint32_t CNA_CLK_GATE_CNA_FEATURE_DISABLE_CLKGATE(uint32_t val)
+{
+ return ((val) << CNA_CLK_GATE_CNA_FEATURE_DISABLE_CLKGATE__SHIFT) & CNA_CLK_GATE_CNA_FEATURE_DISABLE_CLKGATE__MASK;
+}
+
+#define REG_CNA_DCOMP_CTRL 0x00001100
+#define CNA_DCOMP_CTRL_RESERVED_0__MASK 0xfffffff0
+#define CNA_DCOMP_CTRL_RESERVED_0__SHIFT 4
+static inline uint32_t CNA_DCOMP_CTRL_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_CTRL_RESERVED_0__SHIFT) & CNA_DCOMP_CTRL_RESERVED_0__MASK;
+}
+#define CNA_DCOMP_CTRL_WT_DEC_BYPASS__MASK 0x00000008
+#define CNA_DCOMP_CTRL_WT_DEC_BYPASS__SHIFT 3
+static inline uint32_t CNA_DCOMP_CTRL_WT_DEC_BYPASS(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_CTRL_WT_DEC_BYPASS__SHIFT) & CNA_DCOMP_CTRL_WT_DEC_BYPASS__MASK;
+}
+#define CNA_DCOMP_CTRL_DECOMP_CONTROL__MASK 0x00000007
+#define CNA_DCOMP_CTRL_DECOMP_CONTROL__SHIFT 0
+static inline uint32_t CNA_DCOMP_CTRL_DECOMP_CONTROL(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_CTRL_DECOMP_CONTROL__SHIFT) & CNA_DCOMP_CTRL_DECOMP_CONTROL__MASK;
+}
+
+#define REG_CNA_DCOMP_REGNUM 0x00001104
+#define CNA_DCOMP_REGNUM_DCOMP_REGNUM__MASK 0xffffffff
+#define CNA_DCOMP_REGNUM_DCOMP_REGNUM__SHIFT 0
+static inline uint32_t CNA_DCOMP_REGNUM_DCOMP_REGNUM(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_REGNUM_DCOMP_REGNUM__SHIFT) & CNA_DCOMP_REGNUM_DCOMP_REGNUM__MASK;
+}
+
+#define REG_CNA_DCOMP_ADDR0 0x00001110
+#define CNA_DCOMP_ADDR0_DECOMPRESS_ADDR0__MASK 0xffffffff
+#define CNA_DCOMP_ADDR0_DECOMPRESS_ADDR0__SHIFT 0
+static inline uint32_t CNA_DCOMP_ADDR0_DECOMPRESS_ADDR0(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_ADDR0_DECOMPRESS_ADDR0__SHIFT) & CNA_DCOMP_ADDR0_DECOMPRESS_ADDR0__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT0 0x00001140
+#define CNA_DCOMP_AMOUNT0_DCOMP_AMOUNT0__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT0_DCOMP_AMOUNT0__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT0_DCOMP_AMOUNT0(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT0_DCOMP_AMOUNT0__SHIFT) & CNA_DCOMP_AMOUNT0_DCOMP_AMOUNT0__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT1 0x00001144
+#define CNA_DCOMP_AMOUNT1_DCOMP_AMOUNT1__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT1_DCOMP_AMOUNT1__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT1_DCOMP_AMOUNT1(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT1_DCOMP_AMOUNT1__SHIFT) & CNA_DCOMP_AMOUNT1_DCOMP_AMOUNT1__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT2 0x00001148
+#define CNA_DCOMP_AMOUNT2_DCOMP_AMOUNT2__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT2_DCOMP_AMOUNT2__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT2_DCOMP_AMOUNT2(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT2_DCOMP_AMOUNT2__SHIFT) & CNA_DCOMP_AMOUNT2_DCOMP_AMOUNT2__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT3 0x0000114c
+#define CNA_DCOMP_AMOUNT3_DCOMP_AMOUNT3__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT3_DCOMP_AMOUNT3__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT3_DCOMP_AMOUNT3(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT3_DCOMP_AMOUNT3__SHIFT) & CNA_DCOMP_AMOUNT3_DCOMP_AMOUNT3__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT4 0x00001150
+#define CNA_DCOMP_AMOUNT4_DCOMP_AMOUNT4__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT4_DCOMP_AMOUNT4__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT4_DCOMP_AMOUNT4(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT4_DCOMP_AMOUNT4__SHIFT) & CNA_DCOMP_AMOUNT4_DCOMP_AMOUNT4__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT5 0x00001154
+#define CNA_DCOMP_AMOUNT5_DCOMP_AMOUNT5__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT5_DCOMP_AMOUNT5__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT5_DCOMP_AMOUNT5(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT5_DCOMP_AMOUNT5__SHIFT) & CNA_DCOMP_AMOUNT5_DCOMP_AMOUNT5__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT6 0x00001158
+#define CNA_DCOMP_AMOUNT6_DCOMP_AMOUNT6__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT6_DCOMP_AMOUNT6__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT6_DCOMP_AMOUNT6(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT6_DCOMP_AMOUNT6__SHIFT) & CNA_DCOMP_AMOUNT6_DCOMP_AMOUNT6__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT7 0x0000115c
+#define CNA_DCOMP_AMOUNT7_DCOMP_AMOUNT7__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT7_DCOMP_AMOUNT7__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT7_DCOMP_AMOUNT7(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT7_DCOMP_AMOUNT7__SHIFT) & CNA_DCOMP_AMOUNT7_DCOMP_AMOUNT7__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT8 0x00001160
+#define CNA_DCOMP_AMOUNT8_DCOMP_AMOUNT8__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT8_DCOMP_AMOUNT8__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT8_DCOMP_AMOUNT8(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT8_DCOMP_AMOUNT8__SHIFT) & CNA_DCOMP_AMOUNT8_DCOMP_AMOUNT8__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT9 0x00001164
+#define CNA_DCOMP_AMOUNT9_DCOMP_AMOUNT9__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT9_DCOMP_AMOUNT9__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT9_DCOMP_AMOUNT9(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT9_DCOMP_AMOUNT9__SHIFT) & CNA_DCOMP_AMOUNT9_DCOMP_AMOUNT9__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT10 0x00001168
+#define CNA_DCOMP_AMOUNT10_DCOMP_AMOUNT10__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT10_DCOMP_AMOUNT10__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT10_DCOMP_AMOUNT10(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT10_DCOMP_AMOUNT10__SHIFT) & CNA_DCOMP_AMOUNT10_DCOMP_AMOUNT10__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT11 0x0000116c
+#define CNA_DCOMP_AMOUNT11_DCOMP_AMOUNT11__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT11_DCOMP_AMOUNT11__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT11_DCOMP_AMOUNT11(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT11_DCOMP_AMOUNT11__SHIFT) & CNA_DCOMP_AMOUNT11_DCOMP_AMOUNT11__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT12 0x00001170
+#define CNA_DCOMP_AMOUNT12_DCOMP_AMOUNT12__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT12_DCOMP_AMOUNT12__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT12_DCOMP_AMOUNT12(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT12_DCOMP_AMOUNT12__SHIFT) & CNA_DCOMP_AMOUNT12_DCOMP_AMOUNT12__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT13 0x00001174
+#define CNA_DCOMP_AMOUNT13_DCOMP_AMOUNT13__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT13_DCOMP_AMOUNT13__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT13_DCOMP_AMOUNT13(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT13_DCOMP_AMOUNT13__SHIFT) & CNA_DCOMP_AMOUNT13_DCOMP_AMOUNT13__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT14 0x00001178
+#define CNA_DCOMP_AMOUNT14_DCOMP_AMOUNT14__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT14_DCOMP_AMOUNT14__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT14_DCOMP_AMOUNT14(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT14_DCOMP_AMOUNT14__SHIFT) & CNA_DCOMP_AMOUNT14_DCOMP_AMOUNT14__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT15 0x0000117c
+#define CNA_DCOMP_AMOUNT15_DCOMP_AMOUNT15__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT15_DCOMP_AMOUNT15__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT15_DCOMP_AMOUNT15(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT15_DCOMP_AMOUNT15__SHIFT) & CNA_DCOMP_AMOUNT15_DCOMP_AMOUNT15__MASK;
+}
+
+#define REG_CNA_CVT_CON5 0x00001180
+#define CNA_CVT_CON5_PER_CHANNEL_CVT_EN__MASK 0xffffffff
+#define CNA_CVT_CON5_PER_CHANNEL_CVT_EN__SHIFT 0
+static inline uint32_t CNA_CVT_CON5_PER_CHANNEL_CVT_EN(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON5_PER_CHANNEL_CVT_EN__SHIFT) & CNA_CVT_CON5_PER_CHANNEL_CVT_EN__MASK;
+}
+
+#define REG_CNA_PAD_CON1 0x00001184
+#define CNA_PAD_CON1_PAD_VALUE__MASK 0xffffffff
+#define CNA_PAD_CON1_PAD_VALUE__SHIFT 0
+static inline uint32_t CNA_PAD_CON1_PAD_VALUE(uint32_t val)
+{
+ return ((val) << CNA_PAD_CON1_PAD_VALUE__SHIFT) & CNA_PAD_CON1_PAD_VALUE__MASK;
+}
+
+#define REG_CORE_S_STATUS 0x00003000
+#define CORE_S_STATUS_RESERVED_0__MASK 0xfffc0000
+#define CORE_S_STATUS_RESERVED_0__SHIFT 18
+static inline uint32_t CORE_S_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << CORE_S_STATUS_RESERVED_0__SHIFT) & CORE_S_STATUS_RESERVED_0__MASK;
+}
+#define CORE_S_STATUS_STATUS_1__MASK 0x00030000
+#define CORE_S_STATUS_STATUS_1__SHIFT 16
+static inline uint32_t CORE_S_STATUS_STATUS_1(uint32_t val)
+{
+ return ((val) << CORE_S_STATUS_STATUS_1__SHIFT) & CORE_S_STATUS_STATUS_1__MASK;
+}
+#define CORE_S_STATUS_RESERVED_1__MASK 0x0000fffc
+#define CORE_S_STATUS_RESERVED_1__SHIFT 2
+static inline uint32_t CORE_S_STATUS_RESERVED_1(uint32_t val)
+{
+ return ((val) << CORE_S_STATUS_RESERVED_1__SHIFT) & CORE_S_STATUS_RESERVED_1__MASK;
+}
+#define CORE_S_STATUS_STATUS_0__MASK 0x00000003
+#define CORE_S_STATUS_STATUS_0__SHIFT 0
+static inline uint32_t CORE_S_STATUS_STATUS_0(uint32_t val)
+{
+ return ((val) << CORE_S_STATUS_STATUS_0__SHIFT) & CORE_S_STATUS_STATUS_0__MASK;
+}
+
+#define REG_CORE_S_POINTER 0x00003004
+#define CORE_S_POINTER_RESERVED_0__MASK 0xfffe0000
+#define CORE_S_POINTER_RESERVED_0__SHIFT 17
+static inline uint32_t CORE_S_POINTER_RESERVED_0(uint32_t val)
+{
+ return ((val) << CORE_S_POINTER_RESERVED_0__SHIFT) & CORE_S_POINTER_RESERVED_0__MASK;
+}
+#define CORE_S_POINTER_EXECUTER__MASK 0x00010000
+#define CORE_S_POINTER_EXECUTER__SHIFT 16
+static inline uint32_t CORE_S_POINTER_EXECUTER(uint32_t val)
+{
+ return ((val) << CORE_S_POINTER_EXECUTER__SHIFT) & CORE_S_POINTER_EXECUTER__MASK;
+}
+#define CORE_S_POINTER_RESERVED_1__MASK 0x0000ffc0
+#define CORE_S_POINTER_RESERVED_1__SHIFT 6
+static inline uint32_t CORE_S_POINTER_RESERVED_1(uint32_t val)
+{
+ return ((val) << CORE_S_POINTER_RESERVED_1__SHIFT) & CORE_S_POINTER_RESERVED_1__MASK;
+}
+#define CORE_S_POINTER_EXECUTER_PP_CLEAR__MASK 0x00000020
+#define CORE_S_POINTER_EXECUTER_PP_CLEAR__SHIFT 5
+static inline uint32_t CORE_S_POINTER_EXECUTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << CORE_S_POINTER_EXECUTER_PP_CLEAR__SHIFT) & CORE_S_POINTER_EXECUTER_PP_CLEAR__MASK;
+}
+#define CORE_S_POINTER_POINTER_PP_CLEAR__MASK 0x00000010
+#define CORE_S_POINTER_POINTER_PP_CLEAR__SHIFT 4
+static inline uint32_t CORE_S_POINTER_POINTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << CORE_S_POINTER_POINTER_PP_CLEAR__SHIFT) & CORE_S_POINTER_POINTER_PP_CLEAR__MASK;
+}
+#define CORE_S_POINTER_POINTER_PP_MODE__MASK 0x00000008
+#define CORE_S_POINTER_POINTER_PP_MODE__SHIFT 3
+static inline uint32_t CORE_S_POINTER_POINTER_PP_MODE(uint32_t val)
+{
+ return ((val) << CORE_S_POINTER_POINTER_PP_MODE__SHIFT) & CORE_S_POINTER_POINTER_PP_MODE__MASK;
+}
+#define CORE_S_POINTER_EXECUTER_PP_EN__MASK 0x00000004
+#define CORE_S_POINTER_EXECUTER_PP_EN__SHIFT 2
+static inline uint32_t CORE_S_POINTER_EXECUTER_PP_EN(uint32_t val)
+{
+ return ((val) << CORE_S_POINTER_EXECUTER_PP_EN__SHIFT) & CORE_S_POINTER_EXECUTER_PP_EN__MASK;
+}
+#define CORE_S_POINTER_POINTER_PP_EN__MASK 0x00000002
+#define CORE_S_POINTER_POINTER_PP_EN__SHIFT 1
+static inline uint32_t CORE_S_POINTER_POINTER_PP_EN(uint32_t val)
+{
+ return ((val) << CORE_S_POINTER_POINTER_PP_EN__SHIFT) & CORE_S_POINTER_POINTER_PP_EN__MASK;
+}
+#define CORE_S_POINTER_POINTER__MASK 0x00000001
+#define CORE_S_POINTER_POINTER__SHIFT 0
+static inline uint32_t CORE_S_POINTER_POINTER(uint32_t val)
+{
+ return ((val) << CORE_S_POINTER_POINTER__SHIFT) & CORE_S_POINTER_POINTER__MASK;
+}
+
+#define REG_CORE_OPERATION_ENABLE 0x00003008
+#define CORE_OPERATION_ENABLE_RESERVED_0__MASK 0xfffffffe
+#define CORE_OPERATION_ENABLE_RESERVED_0__SHIFT 1
+static inline uint32_t CORE_OPERATION_ENABLE_RESERVED_0(uint32_t val)
+{
+ return ((val) << CORE_OPERATION_ENABLE_RESERVED_0__SHIFT) & CORE_OPERATION_ENABLE_RESERVED_0__MASK;
+}
+#define CORE_OPERATION_ENABLE_OP_EN__MASK 0x00000001
+#define CORE_OPERATION_ENABLE_OP_EN__SHIFT 0
+static inline uint32_t CORE_OPERATION_ENABLE_OP_EN(uint32_t val)
+{
+ return ((val) << CORE_OPERATION_ENABLE_OP_EN__SHIFT) & CORE_OPERATION_ENABLE_OP_EN__MASK;
+}
+
+#define REG_CORE_MAC_GATING 0x0000300c
+#define CORE_MAC_GATING_RESERVED_0__MASK 0xf8000000
+#define CORE_MAC_GATING_RESERVED_0__SHIFT 27
+static inline uint32_t CORE_MAC_GATING_RESERVED_0(uint32_t val)
+{
+ return ((val) << CORE_MAC_GATING_RESERVED_0__SHIFT) & CORE_MAC_GATING_RESERVED_0__MASK;
+}
+#define CORE_MAC_GATING_SLCG_OP_EN__MASK 0x07ffffff
+#define CORE_MAC_GATING_SLCG_OP_EN__SHIFT 0
+static inline uint32_t CORE_MAC_GATING_SLCG_OP_EN(uint32_t val)
+{
+ return ((val) << CORE_MAC_GATING_SLCG_OP_EN__SHIFT) & CORE_MAC_GATING_SLCG_OP_EN__MASK;
+}
+
+#define REG_CORE_MISC_CFG 0x00003010
+#define CORE_MISC_CFG_RESERVED_0__MASK 0xfff00000
+#define CORE_MISC_CFG_RESERVED_0__SHIFT 20
+static inline uint32_t CORE_MISC_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << CORE_MISC_CFG_RESERVED_0__SHIFT) & CORE_MISC_CFG_RESERVED_0__MASK;
+}
+#define CORE_MISC_CFG_SOFT_GATING__MASK 0x000fc000
+#define CORE_MISC_CFG_SOFT_GATING__SHIFT 14
+static inline uint32_t CORE_MISC_CFG_SOFT_GATING(uint32_t val)
+{
+ return ((val) << CORE_MISC_CFG_SOFT_GATING__SHIFT) & CORE_MISC_CFG_SOFT_GATING__MASK;
+}
+#define CORE_MISC_CFG_RESERVED_1__MASK 0x00003800
+#define CORE_MISC_CFG_RESERVED_1__SHIFT 11
+static inline uint32_t CORE_MISC_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << CORE_MISC_CFG_RESERVED_1__SHIFT) & CORE_MISC_CFG_RESERVED_1__MASK;
+}
+#define CORE_MISC_CFG_PROC_PRECISION__MASK 0x00000700
+#define CORE_MISC_CFG_PROC_PRECISION__SHIFT 8
+static inline uint32_t CORE_MISC_CFG_PROC_PRECISION(uint32_t val)
+{
+ return ((val) << CORE_MISC_CFG_PROC_PRECISION__SHIFT) & CORE_MISC_CFG_PROC_PRECISION__MASK;
+}
+#define CORE_MISC_CFG_RESERVED_2__MASK 0x000000fc
+#define CORE_MISC_CFG_RESERVED_2__SHIFT 2
+static inline uint32_t CORE_MISC_CFG_RESERVED_2(uint32_t val)
+{
+ return ((val) << CORE_MISC_CFG_RESERVED_2__SHIFT) & CORE_MISC_CFG_RESERVED_2__MASK;
+}
+#define CORE_MISC_CFG_DW_EN__MASK 0x00000002
+#define CORE_MISC_CFG_DW_EN__SHIFT 1
+static inline uint32_t CORE_MISC_CFG_DW_EN(uint32_t val)
+{
+ return ((val) << CORE_MISC_CFG_DW_EN__SHIFT) & CORE_MISC_CFG_DW_EN__MASK;
+}
+#define CORE_MISC_CFG_QD_EN__MASK 0x00000001
+#define CORE_MISC_CFG_QD_EN__SHIFT 0
+static inline uint32_t CORE_MISC_CFG_QD_EN(uint32_t val)
+{
+ return ((val) << CORE_MISC_CFG_QD_EN__SHIFT) & CORE_MISC_CFG_QD_EN__MASK;
+}
+
+#define REG_CORE_DATAOUT_SIZE_0 0x00003014
+#define CORE_DATAOUT_SIZE_0_DATAOUT_HEIGHT__MASK 0xffff0000
+#define CORE_DATAOUT_SIZE_0_DATAOUT_HEIGHT__SHIFT 16
+static inline uint32_t CORE_DATAOUT_SIZE_0_DATAOUT_HEIGHT(uint32_t val)
+{
+ return ((val) << CORE_DATAOUT_SIZE_0_DATAOUT_HEIGHT__SHIFT) & CORE_DATAOUT_SIZE_0_DATAOUT_HEIGHT__MASK;
+}
+#define CORE_DATAOUT_SIZE_0_DATAOUT_WIDTH__MASK 0x0000ffff
+#define CORE_DATAOUT_SIZE_0_DATAOUT_WIDTH__SHIFT 0
+static inline uint32_t CORE_DATAOUT_SIZE_0_DATAOUT_WIDTH(uint32_t val)
+{
+ return ((val) << CORE_DATAOUT_SIZE_0_DATAOUT_WIDTH__SHIFT) & CORE_DATAOUT_SIZE_0_DATAOUT_WIDTH__MASK;
+}
+
+#define REG_CORE_DATAOUT_SIZE_1 0x00003018
+#define CORE_DATAOUT_SIZE_1_RESERVED_0__MASK 0xffff0000
+#define CORE_DATAOUT_SIZE_1_RESERVED_0__SHIFT 16
+static inline uint32_t CORE_DATAOUT_SIZE_1_RESERVED_0(uint32_t val)
+{
+ return ((val) << CORE_DATAOUT_SIZE_1_RESERVED_0__SHIFT) & CORE_DATAOUT_SIZE_1_RESERVED_0__MASK;
+}
+#define CORE_DATAOUT_SIZE_1_DATAOUT_CHANNEL__MASK 0x0000ffff
+#define CORE_DATAOUT_SIZE_1_DATAOUT_CHANNEL__SHIFT 0
+static inline uint32_t CORE_DATAOUT_SIZE_1_DATAOUT_CHANNEL(uint32_t val)
+{
+ return ((val) << CORE_DATAOUT_SIZE_1_DATAOUT_CHANNEL__SHIFT) & CORE_DATAOUT_SIZE_1_DATAOUT_CHANNEL__MASK;
+}
+
+#define REG_CORE_CLIP_TRUNCATE 0x0000301c
+#define CORE_CLIP_TRUNCATE_RESERVED_0__MASK 0xffffff80
+#define CORE_CLIP_TRUNCATE_RESERVED_0__SHIFT 7
+static inline uint32_t CORE_CLIP_TRUNCATE_RESERVED_0(uint32_t val)
+{
+ return ((val) << CORE_CLIP_TRUNCATE_RESERVED_0__SHIFT) & CORE_CLIP_TRUNCATE_RESERVED_0__MASK;
+}
+#define CORE_CLIP_TRUNCATE_ROUND_TYPE__MASK 0x00000040
+#define CORE_CLIP_TRUNCATE_ROUND_TYPE__SHIFT 6
+static inline uint32_t CORE_CLIP_TRUNCATE_ROUND_TYPE(uint32_t val)
+{
+ return ((val) << CORE_CLIP_TRUNCATE_ROUND_TYPE__SHIFT) & CORE_CLIP_TRUNCATE_ROUND_TYPE__MASK;
+}
+#define CORE_CLIP_TRUNCATE_RESERVED_1__MASK 0x00000020
+#define CORE_CLIP_TRUNCATE_RESERVED_1__SHIFT 5
+static inline uint32_t CORE_CLIP_TRUNCATE_RESERVED_1(uint32_t val)
+{
+ return ((val) << CORE_CLIP_TRUNCATE_RESERVED_1__SHIFT) & CORE_CLIP_TRUNCATE_RESERVED_1__MASK;
+}
+#define CORE_CLIP_TRUNCATE_CLIP_TRUNCATE__MASK 0x0000001f
+#define CORE_CLIP_TRUNCATE_CLIP_TRUNCATE__SHIFT 0
+static inline uint32_t CORE_CLIP_TRUNCATE_CLIP_TRUNCATE(uint32_t val)
+{
+ return ((val) << CORE_CLIP_TRUNCATE_CLIP_TRUNCATE__SHIFT) & CORE_CLIP_TRUNCATE_CLIP_TRUNCATE__MASK;
+}
+
+#define REG_DPU_S_STATUS 0x00004000
+#define DPU_S_STATUS_RESERVED_0__MASK 0xfffc0000
+#define DPU_S_STATUS_RESERVED_0__SHIFT 18
+static inline uint32_t DPU_S_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_S_STATUS_RESERVED_0__SHIFT) & DPU_S_STATUS_RESERVED_0__MASK;
+}
+#define DPU_S_STATUS_STATUS_1__MASK 0x00030000
+#define DPU_S_STATUS_STATUS_1__SHIFT 16
+static inline uint32_t DPU_S_STATUS_STATUS_1(uint32_t val)
+{
+ return ((val) << DPU_S_STATUS_STATUS_1__SHIFT) & DPU_S_STATUS_STATUS_1__MASK;
+}
+#define DPU_S_STATUS_RESERVED_1__MASK 0x0000fffc
+#define DPU_S_STATUS_RESERVED_1__SHIFT 2
+static inline uint32_t DPU_S_STATUS_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_S_STATUS_RESERVED_1__SHIFT) & DPU_S_STATUS_RESERVED_1__MASK;
+}
+#define DPU_S_STATUS_STATUS_0__MASK 0x00000003
+#define DPU_S_STATUS_STATUS_0__SHIFT 0
+static inline uint32_t DPU_S_STATUS_STATUS_0(uint32_t val)
+{
+ return ((val) << DPU_S_STATUS_STATUS_0__SHIFT) & DPU_S_STATUS_STATUS_0__MASK;
+}
+
+#define REG_DPU_S_POINTER 0x00004004
+#define DPU_S_POINTER_RESERVED_0__MASK 0xfffe0000
+#define DPU_S_POINTER_RESERVED_0__SHIFT 17
+static inline uint32_t DPU_S_POINTER_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_S_POINTER_RESERVED_0__SHIFT) & DPU_S_POINTER_RESERVED_0__MASK;
+}
+#define DPU_S_POINTER_EXECUTER__MASK 0x00010000
+#define DPU_S_POINTER_EXECUTER__SHIFT 16
+static inline uint32_t DPU_S_POINTER_EXECUTER(uint32_t val)
+{
+ return ((val) << DPU_S_POINTER_EXECUTER__SHIFT) & DPU_S_POINTER_EXECUTER__MASK;
+}
+#define DPU_S_POINTER_RESERVED_1__MASK 0x0000ffc0
+#define DPU_S_POINTER_RESERVED_1__SHIFT 6
+static inline uint32_t DPU_S_POINTER_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_S_POINTER_RESERVED_1__SHIFT) & DPU_S_POINTER_RESERVED_1__MASK;
+}
+#define DPU_S_POINTER_EXECUTER_PP_CLEAR__MASK 0x00000020
+#define DPU_S_POINTER_EXECUTER_PP_CLEAR__SHIFT 5
+static inline uint32_t DPU_S_POINTER_EXECUTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << DPU_S_POINTER_EXECUTER_PP_CLEAR__SHIFT) & DPU_S_POINTER_EXECUTER_PP_CLEAR__MASK;
+}
+#define DPU_S_POINTER_POINTER_PP_CLEAR__MASK 0x00000010
+#define DPU_S_POINTER_POINTER_PP_CLEAR__SHIFT 4
+static inline uint32_t DPU_S_POINTER_POINTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << DPU_S_POINTER_POINTER_PP_CLEAR__SHIFT) & DPU_S_POINTER_POINTER_PP_CLEAR__MASK;
+}
+#define DPU_S_POINTER_POINTER_PP_MODE__MASK 0x00000008
+#define DPU_S_POINTER_POINTER_PP_MODE__SHIFT 3
+static inline uint32_t DPU_S_POINTER_POINTER_PP_MODE(uint32_t val)
+{
+ return ((val) << DPU_S_POINTER_POINTER_PP_MODE__SHIFT) & DPU_S_POINTER_POINTER_PP_MODE__MASK;
+}
+#define DPU_S_POINTER_EXECUTER_PP_EN__MASK 0x00000004
+#define DPU_S_POINTER_EXECUTER_PP_EN__SHIFT 2
+static inline uint32_t DPU_S_POINTER_EXECUTER_PP_EN(uint32_t val)
+{
+ return ((val) << DPU_S_POINTER_EXECUTER_PP_EN__SHIFT) & DPU_S_POINTER_EXECUTER_PP_EN__MASK;
+}
+#define DPU_S_POINTER_POINTER_PP_EN__MASK 0x00000002
+#define DPU_S_POINTER_POINTER_PP_EN__SHIFT 1
+static inline uint32_t DPU_S_POINTER_POINTER_PP_EN(uint32_t val)
+{
+ return ((val) << DPU_S_POINTER_POINTER_PP_EN__SHIFT) & DPU_S_POINTER_POINTER_PP_EN__MASK;
+}
+#define DPU_S_POINTER_POINTER__MASK 0x00000001
+#define DPU_S_POINTER_POINTER__SHIFT 0
+static inline uint32_t DPU_S_POINTER_POINTER(uint32_t val)
+{
+ return ((val) << DPU_S_POINTER_POINTER__SHIFT) & DPU_S_POINTER_POINTER__MASK;
+}
+
+#define REG_DPU_OPERATION_ENABLE 0x00004008
+#define DPU_OPERATION_ENABLE_RESERVED_0__MASK 0xfffffffe
+#define DPU_OPERATION_ENABLE_RESERVED_0__SHIFT 1
+static inline uint32_t DPU_OPERATION_ENABLE_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_OPERATION_ENABLE_RESERVED_0__SHIFT) & DPU_OPERATION_ENABLE_RESERVED_0__MASK;
+}
+#define DPU_OPERATION_ENABLE_OP_EN__MASK 0x00000001
+#define DPU_OPERATION_ENABLE_OP_EN__SHIFT 0
+static inline uint32_t DPU_OPERATION_ENABLE_OP_EN(uint32_t val)
+{
+ return ((val) << DPU_OPERATION_ENABLE_OP_EN__SHIFT) & DPU_OPERATION_ENABLE_OP_EN__MASK;
+}
+
+#define REG_DPU_FEATURE_MODE_CFG 0x0000400c
+#define DPU_FEATURE_MODE_CFG_COMB_USE__MASK 0x80000000
+#define DPU_FEATURE_MODE_CFG_COMB_USE__SHIFT 31
+static inline uint32_t DPU_FEATURE_MODE_CFG_COMB_USE(uint32_t val)
+{
+ return ((val) << DPU_FEATURE_MODE_CFG_COMB_USE__SHIFT) & DPU_FEATURE_MODE_CFG_COMB_USE__MASK;
+}
+#define DPU_FEATURE_MODE_CFG_TP_EN__MASK 0x40000000
+#define DPU_FEATURE_MODE_CFG_TP_EN__SHIFT 30
+static inline uint32_t DPU_FEATURE_MODE_CFG_TP_EN(uint32_t val)
+{
+ return ((val) << DPU_FEATURE_MODE_CFG_TP_EN__SHIFT) & DPU_FEATURE_MODE_CFG_TP_EN__MASK;
+}
+#define DPU_FEATURE_MODE_CFG_RGP_TYPE__MASK 0x3c000000
+#define DPU_FEATURE_MODE_CFG_RGP_TYPE__SHIFT 26
+static inline uint32_t DPU_FEATURE_MODE_CFG_RGP_TYPE(uint32_t val)
+{
+ return ((val) << DPU_FEATURE_MODE_CFG_RGP_TYPE__SHIFT) & DPU_FEATURE_MODE_CFG_RGP_TYPE__MASK;
+}
+#define DPU_FEATURE_MODE_CFG_NONALIGN__MASK 0x02000000
+#define DPU_FEATURE_MODE_CFG_NONALIGN__SHIFT 25
+static inline uint32_t DPU_FEATURE_MODE_CFG_NONALIGN(uint32_t val)
+{
+ return ((val) << DPU_FEATURE_MODE_CFG_NONALIGN__SHIFT) & DPU_FEATURE_MODE_CFG_NONALIGN__MASK;
+}
+#define DPU_FEATURE_MODE_CFG_SURF_LEN__MASK 0x01fffe00
+#define DPU_FEATURE_MODE_CFG_SURF_LEN__SHIFT 9
+static inline uint32_t DPU_FEATURE_MODE_CFG_SURF_LEN(uint32_t val)
+{
+ return ((val) << DPU_FEATURE_MODE_CFG_SURF_LEN__SHIFT) & DPU_FEATURE_MODE_CFG_SURF_LEN__MASK;
+}
+#define DPU_FEATURE_MODE_CFG_BURST_LEN__MASK 0x000001e0
+#define DPU_FEATURE_MODE_CFG_BURST_LEN__SHIFT 5
+static inline uint32_t DPU_FEATURE_MODE_CFG_BURST_LEN(uint32_t val)
+{
+ return ((val) << DPU_FEATURE_MODE_CFG_BURST_LEN__SHIFT) & DPU_FEATURE_MODE_CFG_BURST_LEN__MASK;
+}
+#define DPU_FEATURE_MODE_CFG_CONV_MODE__MASK 0x00000018
+#define DPU_FEATURE_MODE_CFG_CONV_MODE__SHIFT 3
+static inline uint32_t DPU_FEATURE_MODE_CFG_CONV_MODE(uint32_t val)
+{
+ return ((val) << DPU_FEATURE_MODE_CFG_CONV_MODE__SHIFT) & DPU_FEATURE_MODE_CFG_CONV_MODE__MASK;
+}
+#define DPU_FEATURE_MODE_CFG_OUTPUT_MODE__MASK 0x00000006
+#define DPU_FEATURE_MODE_CFG_OUTPUT_MODE__SHIFT 1
+static inline uint32_t DPU_FEATURE_MODE_CFG_OUTPUT_MODE(uint32_t val)
+{
+ return ((val) << DPU_FEATURE_MODE_CFG_OUTPUT_MODE__SHIFT) & DPU_FEATURE_MODE_CFG_OUTPUT_MODE__MASK;
+}
+#define DPU_FEATURE_MODE_CFG_FLYING_MODE__MASK 0x00000001
+#define DPU_FEATURE_MODE_CFG_FLYING_MODE__SHIFT 0
+static inline uint32_t DPU_FEATURE_MODE_CFG_FLYING_MODE(uint32_t val)
+{
+ return ((val) << DPU_FEATURE_MODE_CFG_FLYING_MODE__SHIFT) & DPU_FEATURE_MODE_CFG_FLYING_MODE__MASK;
+}
+
+#define REG_DPU_DATA_FORMAT 0x00004010
+#define DPU_DATA_FORMAT_OUT_PRECISION__MASK 0xe0000000
+#define DPU_DATA_FORMAT_OUT_PRECISION__SHIFT 29
+static inline uint32_t DPU_DATA_FORMAT_OUT_PRECISION(uint32_t val)
+{
+ return ((val) << DPU_DATA_FORMAT_OUT_PRECISION__SHIFT) & DPU_DATA_FORMAT_OUT_PRECISION__MASK;
+}
+#define DPU_DATA_FORMAT_IN_PRECISION__MASK 0x1c000000
+#define DPU_DATA_FORMAT_IN_PRECISION__SHIFT 26
+static inline uint32_t DPU_DATA_FORMAT_IN_PRECISION(uint32_t val)
+{
+ return ((val) << DPU_DATA_FORMAT_IN_PRECISION__SHIFT) & DPU_DATA_FORMAT_IN_PRECISION__MASK;
+}
+#define DPU_DATA_FORMAT_EW_TRUNCATE_NEG__MASK 0x03ff0000
+#define DPU_DATA_FORMAT_EW_TRUNCATE_NEG__SHIFT 16
+static inline uint32_t DPU_DATA_FORMAT_EW_TRUNCATE_NEG(uint32_t val)
+{
+ return ((val) << DPU_DATA_FORMAT_EW_TRUNCATE_NEG__SHIFT) & DPU_DATA_FORMAT_EW_TRUNCATE_NEG__MASK;
+}
+#define DPU_DATA_FORMAT_BN_MUL_SHIFT_VALUE_NEG__MASK 0x0000fc00
+#define DPU_DATA_FORMAT_BN_MUL_SHIFT_VALUE_NEG__SHIFT 10
+static inline uint32_t DPU_DATA_FORMAT_BN_MUL_SHIFT_VALUE_NEG(uint32_t val)
+{
+ return ((val) << DPU_DATA_FORMAT_BN_MUL_SHIFT_VALUE_NEG__SHIFT) & DPU_DATA_FORMAT_BN_MUL_SHIFT_VALUE_NEG__MASK;
+}
+#define DPU_DATA_FORMAT_BS_MUL_SHIFT_VALUE_NEG__MASK 0x000003f0
+#define DPU_DATA_FORMAT_BS_MUL_SHIFT_VALUE_NEG__SHIFT 4
+static inline uint32_t DPU_DATA_FORMAT_BS_MUL_SHIFT_VALUE_NEG(uint32_t val)
+{
+ return ((val) << DPU_DATA_FORMAT_BS_MUL_SHIFT_VALUE_NEG__SHIFT) & DPU_DATA_FORMAT_BS_MUL_SHIFT_VALUE_NEG__MASK;
+}
+#define DPU_DATA_FORMAT_MC_SURF_OUT__MASK 0x00000008
+#define DPU_DATA_FORMAT_MC_SURF_OUT__SHIFT 3
+static inline uint32_t DPU_DATA_FORMAT_MC_SURF_OUT(uint32_t val)
+{
+ return ((val) << DPU_DATA_FORMAT_MC_SURF_OUT__SHIFT) & DPU_DATA_FORMAT_MC_SURF_OUT__MASK;
+}
+#define DPU_DATA_FORMAT_PROC_PRECISION__MASK 0x00000007
+#define DPU_DATA_FORMAT_PROC_PRECISION__SHIFT 0
+static inline uint32_t DPU_DATA_FORMAT_PROC_PRECISION(uint32_t val)
+{
+ return ((val) << DPU_DATA_FORMAT_PROC_PRECISION__SHIFT) & DPU_DATA_FORMAT_PROC_PRECISION__MASK;
+}
+
+#define REG_DPU_OFFSET_PEND 0x00004014
+#define DPU_OFFSET_PEND_RESERVED_0__MASK 0xffff0000
+#define DPU_OFFSET_PEND_RESERVED_0__SHIFT 16
+static inline uint32_t DPU_OFFSET_PEND_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_OFFSET_PEND_RESERVED_0__SHIFT) & DPU_OFFSET_PEND_RESERVED_0__MASK;
+}
+#define DPU_OFFSET_PEND_OFFSET_PEND__MASK 0x0000ffff
+#define DPU_OFFSET_PEND_OFFSET_PEND__SHIFT 0
+static inline uint32_t DPU_OFFSET_PEND_OFFSET_PEND(uint32_t val)
+{
+ return ((val) << DPU_OFFSET_PEND_OFFSET_PEND__SHIFT) & DPU_OFFSET_PEND_OFFSET_PEND__MASK;
+}
+
+#define REG_DPU_DST_BASE_ADDR 0x00004020
+#define DPU_DST_BASE_ADDR_DST_BASE_ADDR__MASK 0xffffffff
+#define DPU_DST_BASE_ADDR_DST_BASE_ADDR__SHIFT 0
+static inline uint32_t DPU_DST_BASE_ADDR_DST_BASE_ADDR(uint32_t val)
+{
+ return ((val) << DPU_DST_BASE_ADDR_DST_BASE_ADDR__SHIFT) & DPU_DST_BASE_ADDR_DST_BASE_ADDR__MASK;
+}
+
+#define REG_DPU_DST_SURF_STRIDE 0x00004024
+#define DPU_DST_SURF_STRIDE_DST_SURF_STRIDE__MASK 0xfffffff0
+#define DPU_DST_SURF_STRIDE_DST_SURF_STRIDE__SHIFT 4
+static inline uint32_t DPU_DST_SURF_STRIDE_DST_SURF_STRIDE(uint32_t val)
+{
+ return ((val) << DPU_DST_SURF_STRIDE_DST_SURF_STRIDE__SHIFT) & DPU_DST_SURF_STRIDE_DST_SURF_STRIDE__MASK;
+}
+#define DPU_DST_SURF_STRIDE_RESERVED_0__MASK 0x0000000f
+#define DPU_DST_SURF_STRIDE_RESERVED_0__SHIFT 0
+static inline uint32_t DPU_DST_SURF_STRIDE_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_DST_SURF_STRIDE_RESERVED_0__SHIFT) & DPU_DST_SURF_STRIDE_RESERVED_0__MASK;
+}
+
+#define REG_DPU_DATA_CUBE_WIDTH 0x00004030
+#define DPU_DATA_CUBE_WIDTH_RESERVED_0__MASK 0xffffe000
+#define DPU_DATA_CUBE_WIDTH_RESERVED_0__SHIFT 13
+static inline uint32_t DPU_DATA_CUBE_WIDTH_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_WIDTH_RESERVED_0__SHIFT) & DPU_DATA_CUBE_WIDTH_RESERVED_0__MASK;
+}
+#define DPU_DATA_CUBE_WIDTH_WIDTH__MASK 0x00001fff
+#define DPU_DATA_CUBE_WIDTH_WIDTH__SHIFT 0
+static inline uint32_t DPU_DATA_CUBE_WIDTH_WIDTH(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_WIDTH_WIDTH__SHIFT) & DPU_DATA_CUBE_WIDTH_WIDTH__MASK;
+}
+
+#define REG_DPU_DATA_CUBE_HEIGHT 0x00004034
+#define DPU_DATA_CUBE_HEIGHT_RESERVED_0__MASK 0xfe000000
+#define DPU_DATA_CUBE_HEIGHT_RESERVED_0__SHIFT 25
+static inline uint32_t DPU_DATA_CUBE_HEIGHT_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_HEIGHT_RESERVED_0__SHIFT) & DPU_DATA_CUBE_HEIGHT_RESERVED_0__MASK;
+}
+#define DPU_DATA_CUBE_HEIGHT_MINMAX_CTL__MASK 0x01c00000
+#define DPU_DATA_CUBE_HEIGHT_MINMAX_CTL__SHIFT 22
+static inline uint32_t DPU_DATA_CUBE_HEIGHT_MINMAX_CTL(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_HEIGHT_MINMAX_CTL__SHIFT) & DPU_DATA_CUBE_HEIGHT_MINMAX_CTL__MASK;
+}
+#define DPU_DATA_CUBE_HEIGHT_RESERVED_1__MASK 0x003fe000
+#define DPU_DATA_CUBE_HEIGHT_RESERVED_1__SHIFT 13
+static inline uint32_t DPU_DATA_CUBE_HEIGHT_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_HEIGHT_RESERVED_1__SHIFT) & DPU_DATA_CUBE_HEIGHT_RESERVED_1__MASK;
+}
+#define DPU_DATA_CUBE_HEIGHT_HEIGHT__MASK 0x00001fff
+#define DPU_DATA_CUBE_HEIGHT_HEIGHT__SHIFT 0
+static inline uint32_t DPU_DATA_CUBE_HEIGHT_HEIGHT(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_HEIGHT_HEIGHT__SHIFT) & DPU_DATA_CUBE_HEIGHT_HEIGHT__MASK;
+}
+
+#define REG_DPU_DATA_CUBE_NOTCH_ADDR 0x00004038
+#define DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_0__MASK 0xe0000000
+#define DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_0__SHIFT 29
+static inline uint32_t DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_0__SHIFT) & DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_0__MASK;
+}
+#define DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_1__MASK 0x1fff0000
+#define DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_1__SHIFT 16
+static inline uint32_t DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_1(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_1__SHIFT) & DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_1__MASK;
+}
+#define DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_1__MASK 0x0000e000
+#define DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_1__SHIFT 13
+static inline uint32_t DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_1__SHIFT) & DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_1__MASK;
+}
+#define DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_0__MASK 0x00001fff
+#define DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_0__SHIFT 0
+static inline uint32_t DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_0(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_0__SHIFT) & DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_0__MASK;
+}
+
+#define REG_DPU_DATA_CUBE_CHANNEL 0x0000403c
+#define DPU_DATA_CUBE_CHANNEL_RESERVED_0__MASK 0xe0000000
+#define DPU_DATA_CUBE_CHANNEL_RESERVED_0__SHIFT 29
+static inline uint32_t DPU_DATA_CUBE_CHANNEL_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_CHANNEL_RESERVED_0__SHIFT) & DPU_DATA_CUBE_CHANNEL_RESERVED_0__MASK;
+}
+#define DPU_DATA_CUBE_CHANNEL_ORIG_CHANNEL__MASK 0x1fff0000
+#define DPU_DATA_CUBE_CHANNEL_ORIG_CHANNEL__SHIFT 16
+static inline uint32_t DPU_DATA_CUBE_CHANNEL_ORIG_CHANNEL(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_CHANNEL_ORIG_CHANNEL__SHIFT) & DPU_DATA_CUBE_CHANNEL_ORIG_CHANNEL__MASK;
+}
+#define DPU_DATA_CUBE_CHANNEL_RESERVED_1__MASK 0x0000e000
+#define DPU_DATA_CUBE_CHANNEL_RESERVED_1__SHIFT 13
+static inline uint32_t DPU_DATA_CUBE_CHANNEL_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_CHANNEL_RESERVED_1__SHIFT) & DPU_DATA_CUBE_CHANNEL_RESERVED_1__MASK;
+}
+#define DPU_DATA_CUBE_CHANNEL_CHANNEL__MASK 0x00001fff
+#define DPU_DATA_CUBE_CHANNEL_CHANNEL__SHIFT 0
+static inline uint32_t DPU_DATA_CUBE_CHANNEL_CHANNEL(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_CHANNEL_CHANNEL__SHIFT) & DPU_DATA_CUBE_CHANNEL_CHANNEL__MASK;
+}
+
+#define REG_DPU_BS_CFG 0x00004040
+#define DPU_BS_CFG_RESERVED_0__MASK 0xfff00000
+#define DPU_BS_CFG_RESERVED_0__SHIFT 20
+static inline uint32_t DPU_BS_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_RESERVED_0__SHIFT) & DPU_BS_CFG_RESERVED_0__MASK;
+}
+#define DPU_BS_CFG_BS_ALU_ALGO__MASK 0x000f0000
+#define DPU_BS_CFG_BS_ALU_ALGO__SHIFT 16
+static inline uint32_t DPU_BS_CFG_BS_ALU_ALGO(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_BS_ALU_ALGO__SHIFT) & DPU_BS_CFG_BS_ALU_ALGO__MASK;
+}
+#define DPU_BS_CFG_RESERVED_1__MASK 0x0000fe00
+#define DPU_BS_CFG_RESERVED_1__SHIFT 9
+static inline uint32_t DPU_BS_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_RESERVED_1__SHIFT) & DPU_BS_CFG_RESERVED_1__MASK;
+}
+#define DPU_BS_CFG_BS_ALU_SRC__MASK 0x00000100
+#define DPU_BS_CFG_BS_ALU_SRC__SHIFT 8
+static inline uint32_t DPU_BS_CFG_BS_ALU_SRC(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_BS_ALU_SRC__SHIFT) & DPU_BS_CFG_BS_ALU_SRC__MASK;
+}
+#define DPU_BS_CFG_BS_RELUX_EN__MASK 0x00000080
+#define DPU_BS_CFG_BS_RELUX_EN__SHIFT 7
+static inline uint32_t DPU_BS_CFG_BS_RELUX_EN(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_BS_RELUX_EN__SHIFT) & DPU_BS_CFG_BS_RELUX_EN__MASK;
+}
+#define DPU_BS_CFG_BS_RELU_BYPASS__MASK 0x00000040
+#define DPU_BS_CFG_BS_RELU_BYPASS__SHIFT 6
+static inline uint32_t DPU_BS_CFG_BS_RELU_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_BS_RELU_BYPASS__SHIFT) & DPU_BS_CFG_BS_RELU_BYPASS__MASK;
+}
+#define DPU_BS_CFG_BS_MUL_PRELU__MASK 0x00000020
+#define DPU_BS_CFG_BS_MUL_PRELU__SHIFT 5
+static inline uint32_t DPU_BS_CFG_BS_MUL_PRELU(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_BS_MUL_PRELU__SHIFT) & DPU_BS_CFG_BS_MUL_PRELU__MASK;
+}
+#define DPU_BS_CFG_BS_MUL_BYPASS__MASK 0x00000010
+#define DPU_BS_CFG_BS_MUL_BYPASS__SHIFT 4
+static inline uint32_t DPU_BS_CFG_BS_MUL_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_BS_MUL_BYPASS__SHIFT) & DPU_BS_CFG_BS_MUL_BYPASS__MASK;
+}
+#define DPU_BS_CFG_RESERVED_2__MASK 0x0000000c
+#define DPU_BS_CFG_RESERVED_2__SHIFT 2
+static inline uint32_t DPU_BS_CFG_RESERVED_2(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_RESERVED_2__SHIFT) & DPU_BS_CFG_RESERVED_2__MASK;
+}
+#define DPU_BS_CFG_BS_ALU_BYPASS__MASK 0x00000002
+#define DPU_BS_CFG_BS_ALU_BYPASS__SHIFT 1
+static inline uint32_t DPU_BS_CFG_BS_ALU_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_BS_ALU_BYPASS__SHIFT) & DPU_BS_CFG_BS_ALU_BYPASS__MASK;
+}
+#define DPU_BS_CFG_BS_BYPASS__MASK 0x00000001
+#define DPU_BS_CFG_BS_BYPASS__SHIFT 0
+static inline uint32_t DPU_BS_CFG_BS_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_BS_BYPASS__SHIFT) & DPU_BS_CFG_BS_BYPASS__MASK;
+}
+
+#define REG_DPU_BS_ALU_CFG 0x00004044
+#define DPU_BS_ALU_CFG_BS_ALU_OPERAND__MASK 0xffffffff
+#define DPU_BS_ALU_CFG_BS_ALU_OPERAND__SHIFT 0
+static inline uint32_t DPU_BS_ALU_CFG_BS_ALU_OPERAND(uint32_t val)
+{
+ return ((val) << DPU_BS_ALU_CFG_BS_ALU_OPERAND__SHIFT) & DPU_BS_ALU_CFG_BS_ALU_OPERAND__MASK;
+}
+
+#define REG_DPU_BS_MUL_CFG 0x00004048
+#define DPU_BS_MUL_CFG_BS_MUL_OPERAND__MASK 0xffff0000
+#define DPU_BS_MUL_CFG_BS_MUL_OPERAND__SHIFT 16
+static inline uint32_t DPU_BS_MUL_CFG_BS_MUL_OPERAND(uint32_t val)
+{
+ return ((val) << DPU_BS_MUL_CFG_BS_MUL_OPERAND__SHIFT) & DPU_BS_MUL_CFG_BS_MUL_OPERAND__MASK;
+}
+#define DPU_BS_MUL_CFG_RESERVED_0__MASK 0x0000c000
+#define DPU_BS_MUL_CFG_RESERVED_0__SHIFT 14
+static inline uint32_t DPU_BS_MUL_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_BS_MUL_CFG_RESERVED_0__SHIFT) & DPU_BS_MUL_CFG_RESERVED_0__MASK;
+}
+#define DPU_BS_MUL_CFG_BS_MUL_SHIFT_VALUE__MASK 0x00003f00
+#define DPU_BS_MUL_CFG_BS_MUL_SHIFT_VALUE__SHIFT 8
+static inline uint32_t DPU_BS_MUL_CFG_BS_MUL_SHIFT_VALUE(uint32_t val)
+{
+ return ((val) << DPU_BS_MUL_CFG_BS_MUL_SHIFT_VALUE__SHIFT) & DPU_BS_MUL_CFG_BS_MUL_SHIFT_VALUE__MASK;
+}
+#define DPU_BS_MUL_CFG_RESERVED_1__MASK 0x000000fc
+#define DPU_BS_MUL_CFG_RESERVED_1__SHIFT 2
+static inline uint32_t DPU_BS_MUL_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_BS_MUL_CFG_RESERVED_1__SHIFT) & DPU_BS_MUL_CFG_RESERVED_1__MASK;
+}
+#define DPU_BS_MUL_CFG_BS_TRUNCATE_SRC__MASK 0x00000002
+#define DPU_BS_MUL_CFG_BS_TRUNCATE_SRC__SHIFT 1
+static inline uint32_t DPU_BS_MUL_CFG_BS_TRUNCATE_SRC(uint32_t val)
+{
+ return ((val) << DPU_BS_MUL_CFG_BS_TRUNCATE_SRC__SHIFT) & DPU_BS_MUL_CFG_BS_TRUNCATE_SRC__MASK;
+}
+#define DPU_BS_MUL_CFG_BS_MUL_SRC__MASK 0x00000001
+#define DPU_BS_MUL_CFG_BS_MUL_SRC__SHIFT 0
+static inline uint32_t DPU_BS_MUL_CFG_BS_MUL_SRC(uint32_t val)
+{
+ return ((val) << DPU_BS_MUL_CFG_BS_MUL_SRC__SHIFT) & DPU_BS_MUL_CFG_BS_MUL_SRC__MASK;
+}
+
+#define REG_DPU_BS_RELUX_CMP_VALUE 0x0000404c
+#define DPU_BS_RELUX_CMP_VALUE_BS_RELUX_CMP_DAT__MASK 0xffffffff
+#define DPU_BS_RELUX_CMP_VALUE_BS_RELUX_CMP_DAT__SHIFT 0
+static inline uint32_t DPU_BS_RELUX_CMP_VALUE_BS_RELUX_CMP_DAT(uint32_t val)
+{
+ return ((val) << DPU_BS_RELUX_CMP_VALUE_BS_RELUX_CMP_DAT__SHIFT) & DPU_BS_RELUX_CMP_VALUE_BS_RELUX_CMP_DAT__MASK;
+}
+
+#define REG_DPU_BS_OW_CFG 0x00004050
+#define DPU_BS_OW_CFG_RGP_CNTER__MASK 0xf0000000
+#define DPU_BS_OW_CFG_RGP_CNTER__SHIFT 28
+static inline uint32_t DPU_BS_OW_CFG_RGP_CNTER(uint32_t val)
+{
+ return ((val) << DPU_BS_OW_CFG_RGP_CNTER__SHIFT) & DPU_BS_OW_CFG_RGP_CNTER__MASK;
+}
+#define DPU_BS_OW_CFG_TP_ORG_EN__MASK 0x08000000
+#define DPU_BS_OW_CFG_TP_ORG_EN__SHIFT 27
+static inline uint32_t DPU_BS_OW_CFG_TP_ORG_EN(uint32_t val)
+{
+ return ((val) << DPU_BS_OW_CFG_TP_ORG_EN__SHIFT) & DPU_BS_OW_CFG_TP_ORG_EN__MASK;
+}
+#define DPU_BS_OW_CFG_RESERVED_0__MASK 0x07fff800
+#define DPU_BS_OW_CFG_RESERVED_0__SHIFT 11
+static inline uint32_t DPU_BS_OW_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_BS_OW_CFG_RESERVED_0__SHIFT) & DPU_BS_OW_CFG_RESERVED_0__MASK;
+}
+#define DPU_BS_OW_CFG_SIZE_E_2__MASK 0x00000700
+#define DPU_BS_OW_CFG_SIZE_E_2__SHIFT 8
+static inline uint32_t DPU_BS_OW_CFG_SIZE_E_2(uint32_t val)
+{
+ return ((val) << DPU_BS_OW_CFG_SIZE_E_2__SHIFT) & DPU_BS_OW_CFG_SIZE_E_2__MASK;
+}
+#define DPU_BS_OW_CFG_SIZE_E_1__MASK 0x000000e0
+#define DPU_BS_OW_CFG_SIZE_E_1__SHIFT 5
+static inline uint32_t DPU_BS_OW_CFG_SIZE_E_1(uint32_t val)
+{
+ return ((val) << DPU_BS_OW_CFG_SIZE_E_1__SHIFT) & DPU_BS_OW_CFG_SIZE_E_1__MASK;
+}
+#define DPU_BS_OW_CFG_SIZE_E_0__MASK 0x0000001c
+#define DPU_BS_OW_CFG_SIZE_E_0__SHIFT 2
+static inline uint32_t DPU_BS_OW_CFG_SIZE_E_0(uint32_t val)
+{
+ return ((val) << DPU_BS_OW_CFG_SIZE_E_0__SHIFT) & DPU_BS_OW_CFG_SIZE_E_0__MASK;
+}
+#define DPU_BS_OW_CFG_OD_BYPASS__MASK 0x00000002
+#define DPU_BS_OW_CFG_OD_BYPASS__SHIFT 1
+static inline uint32_t DPU_BS_OW_CFG_OD_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_BS_OW_CFG_OD_BYPASS__SHIFT) & DPU_BS_OW_CFG_OD_BYPASS__MASK;
+}
+#define DPU_BS_OW_CFG_OW_SRC__MASK 0x00000001
+#define DPU_BS_OW_CFG_OW_SRC__SHIFT 0
+static inline uint32_t DPU_BS_OW_CFG_OW_SRC(uint32_t val)
+{
+ return ((val) << DPU_BS_OW_CFG_OW_SRC__SHIFT) & DPU_BS_OW_CFG_OW_SRC__MASK;
+}
+
+#define REG_DPU_BS_OW_OP 0x00004054
+#define DPU_BS_OW_OP_RESERVED_0__MASK 0xffff0000
+#define DPU_BS_OW_OP_RESERVED_0__SHIFT 16
+static inline uint32_t DPU_BS_OW_OP_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_BS_OW_OP_RESERVED_0__SHIFT) & DPU_BS_OW_OP_RESERVED_0__MASK;
+}
+#define DPU_BS_OW_OP_OW_OP__MASK 0x0000ffff
+#define DPU_BS_OW_OP_OW_OP__SHIFT 0
+static inline uint32_t DPU_BS_OW_OP_OW_OP(uint32_t val)
+{
+ return ((val) << DPU_BS_OW_OP_OW_OP__SHIFT) & DPU_BS_OW_OP_OW_OP__MASK;
+}
+
+#define REG_DPU_WDMA_SIZE_0 0x00004058
+#define DPU_WDMA_SIZE_0_RESERVED_0__MASK 0xf0000000
+#define DPU_WDMA_SIZE_0_RESERVED_0__SHIFT 28
+static inline uint32_t DPU_WDMA_SIZE_0_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_WDMA_SIZE_0_RESERVED_0__SHIFT) & DPU_WDMA_SIZE_0_RESERVED_0__MASK;
+}
+#define DPU_WDMA_SIZE_0_TP_PRECISION__MASK 0x08000000
+#define DPU_WDMA_SIZE_0_TP_PRECISION__SHIFT 27
+static inline uint32_t DPU_WDMA_SIZE_0_TP_PRECISION(uint32_t val)
+{
+ return ((val) << DPU_WDMA_SIZE_0_TP_PRECISION__SHIFT) & DPU_WDMA_SIZE_0_TP_PRECISION__MASK;
+}
+#define DPU_WDMA_SIZE_0_SIZE_C_WDMA__MASK 0x07ff0000
+#define DPU_WDMA_SIZE_0_SIZE_C_WDMA__SHIFT 16
+static inline uint32_t DPU_WDMA_SIZE_0_SIZE_C_WDMA(uint32_t val)
+{
+ return ((val) << DPU_WDMA_SIZE_0_SIZE_C_WDMA__SHIFT) & DPU_WDMA_SIZE_0_SIZE_C_WDMA__MASK;
+}
+#define DPU_WDMA_SIZE_0_RESERVED_1__MASK 0x0000e000
+#define DPU_WDMA_SIZE_0_RESERVED_1__SHIFT 13
+static inline uint32_t DPU_WDMA_SIZE_0_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_WDMA_SIZE_0_RESERVED_1__SHIFT) & DPU_WDMA_SIZE_0_RESERVED_1__MASK;
+}
+#define DPU_WDMA_SIZE_0_CHANNEL_WDMA__MASK 0x00001fff
+#define DPU_WDMA_SIZE_0_CHANNEL_WDMA__SHIFT 0
+static inline uint32_t DPU_WDMA_SIZE_0_CHANNEL_WDMA(uint32_t val)
+{
+ return ((val) << DPU_WDMA_SIZE_0_CHANNEL_WDMA__SHIFT) & DPU_WDMA_SIZE_0_CHANNEL_WDMA__MASK;
+}
+
+#define REG_DPU_WDMA_SIZE_1 0x0000405c
+#define DPU_WDMA_SIZE_1_RESERVED_0__MASK 0xe0000000
+#define DPU_WDMA_SIZE_1_RESERVED_0__SHIFT 29
+static inline uint32_t DPU_WDMA_SIZE_1_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_WDMA_SIZE_1_RESERVED_0__SHIFT) & DPU_WDMA_SIZE_1_RESERVED_0__MASK;
+}
+#define DPU_WDMA_SIZE_1_HEIGHT_WDMA__MASK 0x1fff0000
+#define DPU_WDMA_SIZE_1_HEIGHT_WDMA__SHIFT 16
+static inline uint32_t DPU_WDMA_SIZE_1_HEIGHT_WDMA(uint32_t val)
+{
+ return ((val) << DPU_WDMA_SIZE_1_HEIGHT_WDMA__SHIFT) & DPU_WDMA_SIZE_1_HEIGHT_WDMA__MASK;
+}
+#define DPU_WDMA_SIZE_1_RESERVED_1__MASK 0x0000e000
+#define DPU_WDMA_SIZE_1_RESERVED_1__SHIFT 13
+static inline uint32_t DPU_WDMA_SIZE_1_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_WDMA_SIZE_1_RESERVED_1__SHIFT) & DPU_WDMA_SIZE_1_RESERVED_1__MASK;
+}
+#define DPU_WDMA_SIZE_1_WIDTH_WDMA__MASK 0x00001fff
+#define DPU_WDMA_SIZE_1_WIDTH_WDMA__SHIFT 0
+static inline uint32_t DPU_WDMA_SIZE_1_WIDTH_WDMA(uint32_t val)
+{
+ return ((val) << DPU_WDMA_SIZE_1_WIDTH_WDMA__SHIFT) & DPU_WDMA_SIZE_1_WIDTH_WDMA__MASK;
+}
+
+#define REG_DPU_BN_CFG 0x00004060
+#define DPU_BN_CFG_RESERVED_0__MASK 0xfff00000
+#define DPU_BN_CFG_RESERVED_0__SHIFT 20
+static inline uint32_t DPU_BN_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_RESERVED_0__SHIFT) & DPU_BN_CFG_RESERVED_0__MASK;
+}
+#define DPU_BN_CFG_BN_ALU_ALGO__MASK 0x000f0000
+#define DPU_BN_CFG_BN_ALU_ALGO__SHIFT 16
+static inline uint32_t DPU_BN_CFG_BN_ALU_ALGO(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_BN_ALU_ALGO__SHIFT) & DPU_BN_CFG_BN_ALU_ALGO__MASK;
+}
+#define DPU_BN_CFG_RESERVED_1__MASK 0x0000fe00
+#define DPU_BN_CFG_RESERVED_1__SHIFT 9
+static inline uint32_t DPU_BN_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_RESERVED_1__SHIFT) & DPU_BN_CFG_RESERVED_1__MASK;
+}
+#define DPU_BN_CFG_BN_ALU_SRC__MASK 0x00000100
+#define DPU_BN_CFG_BN_ALU_SRC__SHIFT 8
+static inline uint32_t DPU_BN_CFG_BN_ALU_SRC(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_BN_ALU_SRC__SHIFT) & DPU_BN_CFG_BN_ALU_SRC__MASK;
+}
+#define DPU_BN_CFG_BN_RELUX_EN__MASK 0x00000080
+#define DPU_BN_CFG_BN_RELUX_EN__SHIFT 7
+static inline uint32_t DPU_BN_CFG_BN_RELUX_EN(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_BN_RELUX_EN__SHIFT) & DPU_BN_CFG_BN_RELUX_EN__MASK;
+}
+#define DPU_BN_CFG_BN_RELU_BYPASS__MASK 0x00000040
+#define DPU_BN_CFG_BN_RELU_BYPASS__SHIFT 6
+static inline uint32_t DPU_BN_CFG_BN_RELU_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_BN_RELU_BYPASS__SHIFT) & DPU_BN_CFG_BN_RELU_BYPASS__MASK;
+}
+#define DPU_BN_CFG_BN_MUL_PRELU__MASK 0x00000020
+#define DPU_BN_CFG_BN_MUL_PRELU__SHIFT 5
+static inline uint32_t DPU_BN_CFG_BN_MUL_PRELU(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_BN_MUL_PRELU__SHIFT) & DPU_BN_CFG_BN_MUL_PRELU__MASK;
+}
+#define DPU_BN_CFG_BN_MUL_BYPASS__MASK 0x00000010
+#define DPU_BN_CFG_BN_MUL_BYPASS__SHIFT 4
+static inline uint32_t DPU_BN_CFG_BN_MUL_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_BN_MUL_BYPASS__SHIFT) & DPU_BN_CFG_BN_MUL_BYPASS__MASK;
+}
+#define DPU_BN_CFG_RESERVED_2__MASK 0x0000000c
+#define DPU_BN_CFG_RESERVED_2__SHIFT 2
+static inline uint32_t DPU_BN_CFG_RESERVED_2(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_RESERVED_2__SHIFT) & DPU_BN_CFG_RESERVED_2__MASK;
+}
+#define DPU_BN_CFG_BN_ALU_BYPASS__MASK 0x00000002
+#define DPU_BN_CFG_BN_ALU_BYPASS__SHIFT 1
+static inline uint32_t DPU_BN_CFG_BN_ALU_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_BN_ALU_BYPASS__SHIFT) & DPU_BN_CFG_BN_ALU_BYPASS__MASK;
+}
+#define DPU_BN_CFG_BN_BYPASS__MASK 0x00000001
+#define DPU_BN_CFG_BN_BYPASS__SHIFT 0
+static inline uint32_t DPU_BN_CFG_BN_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_BN_BYPASS__SHIFT) & DPU_BN_CFG_BN_BYPASS__MASK;
+}
+
+#define REG_DPU_BN_ALU_CFG 0x00004064
+#define DPU_BN_ALU_CFG_BN_ALU_OPERAND__MASK 0xffffffff
+#define DPU_BN_ALU_CFG_BN_ALU_OPERAND__SHIFT 0
+static inline uint32_t DPU_BN_ALU_CFG_BN_ALU_OPERAND(uint32_t val)
+{
+ return ((val) << DPU_BN_ALU_CFG_BN_ALU_OPERAND__SHIFT) & DPU_BN_ALU_CFG_BN_ALU_OPERAND__MASK;
+}
+
+#define REG_DPU_BN_MUL_CFG 0x00004068
+#define DPU_BN_MUL_CFG_BN_MUL_OPERAND__MASK 0xffff0000
+#define DPU_BN_MUL_CFG_BN_MUL_OPERAND__SHIFT 16
+static inline uint32_t DPU_BN_MUL_CFG_BN_MUL_OPERAND(uint32_t val)
+{
+ return ((val) << DPU_BN_MUL_CFG_BN_MUL_OPERAND__SHIFT) & DPU_BN_MUL_CFG_BN_MUL_OPERAND__MASK;
+}
+#define DPU_BN_MUL_CFG_RESERVED_0__MASK 0x0000c000
+#define DPU_BN_MUL_CFG_RESERVED_0__SHIFT 14
+static inline uint32_t DPU_BN_MUL_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_BN_MUL_CFG_RESERVED_0__SHIFT) & DPU_BN_MUL_CFG_RESERVED_0__MASK;
+}
+#define DPU_BN_MUL_CFG_BN_MUL_SHIFT_VALUE__MASK 0x00003f00
+#define DPU_BN_MUL_CFG_BN_MUL_SHIFT_VALUE__SHIFT 8
+static inline uint32_t DPU_BN_MUL_CFG_BN_MUL_SHIFT_VALUE(uint32_t val)
+{
+ return ((val) << DPU_BN_MUL_CFG_BN_MUL_SHIFT_VALUE__SHIFT) & DPU_BN_MUL_CFG_BN_MUL_SHIFT_VALUE__MASK;
+}
+#define DPU_BN_MUL_CFG_RESERVED_1__MASK 0x000000fc
+#define DPU_BN_MUL_CFG_RESERVED_1__SHIFT 2
+static inline uint32_t DPU_BN_MUL_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_BN_MUL_CFG_RESERVED_1__SHIFT) & DPU_BN_MUL_CFG_RESERVED_1__MASK;
+}
+#define DPU_BN_MUL_CFG_BN_TRUNCATE_SRC__MASK 0x00000002
+#define DPU_BN_MUL_CFG_BN_TRUNCATE_SRC__SHIFT 1
+static inline uint32_t DPU_BN_MUL_CFG_BN_TRUNCATE_SRC(uint32_t val)
+{
+ return ((val) << DPU_BN_MUL_CFG_BN_TRUNCATE_SRC__SHIFT) & DPU_BN_MUL_CFG_BN_TRUNCATE_SRC__MASK;
+}
+#define DPU_BN_MUL_CFG_BN_MUL_SRC__MASK 0x00000001
+#define DPU_BN_MUL_CFG_BN_MUL_SRC__SHIFT 0
+static inline uint32_t DPU_BN_MUL_CFG_BN_MUL_SRC(uint32_t val)
+{
+ return ((val) << DPU_BN_MUL_CFG_BN_MUL_SRC__SHIFT) & DPU_BN_MUL_CFG_BN_MUL_SRC__MASK;
+}
+
+#define REG_DPU_BN_RELUX_CMP_VALUE 0x0000406c
+#define DPU_BN_RELUX_CMP_VALUE_BN_RELUX_CMP_DAT__MASK 0xffffffff
+#define DPU_BN_RELUX_CMP_VALUE_BN_RELUX_CMP_DAT__SHIFT 0
+static inline uint32_t DPU_BN_RELUX_CMP_VALUE_BN_RELUX_CMP_DAT(uint32_t val)
+{
+ return ((val) << DPU_BN_RELUX_CMP_VALUE_BN_RELUX_CMP_DAT__SHIFT) & DPU_BN_RELUX_CMP_VALUE_BN_RELUX_CMP_DAT__MASK;
+}
+
+#define REG_DPU_EW_CFG 0x00004070
+#define DPU_EW_CFG_EW_CVT_TYPE__MASK 0x80000000
+#define DPU_EW_CFG_EW_CVT_TYPE__SHIFT 31
+static inline uint32_t DPU_EW_CFG_EW_CVT_TYPE(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_CVT_TYPE__SHIFT) & DPU_EW_CFG_EW_CVT_TYPE__MASK;
+}
+#define DPU_EW_CFG_EW_CVT_ROUND__MASK 0x40000000
+#define DPU_EW_CFG_EW_CVT_ROUND__SHIFT 30
+static inline uint32_t DPU_EW_CFG_EW_CVT_ROUND(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_CVT_ROUND__SHIFT) & DPU_EW_CFG_EW_CVT_ROUND__MASK;
+}
+#define DPU_EW_CFG_EW_DATA_MODE__MASK 0x30000000
+#define DPU_EW_CFG_EW_DATA_MODE__SHIFT 28
+static inline uint32_t DPU_EW_CFG_EW_DATA_MODE(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_DATA_MODE__SHIFT) & DPU_EW_CFG_EW_DATA_MODE__MASK;
+}
+#define DPU_EW_CFG_RESERVED_0__MASK 0x0f000000
+#define DPU_EW_CFG_RESERVED_0__SHIFT 24
+static inline uint32_t DPU_EW_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_RESERVED_0__SHIFT) & DPU_EW_CFG_RESERVED_0__MASK;
+}
+#define DPU_EW_CFG_EDATA_SIZE__MASK 0x00c00000
+#define DPU_EW_CFG_EDATA_SIZE__SHIFT 22
+static inline uint32_t DPU_EW_CFG_EDATA_SIZE(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EDATA_SIZE__SHIFT) & DPU_EW_CFG_EDATA_SIZE__MASK;
+}
+#define DPU_EW_CFG_EW_EQUAL_EN__MASK 0x00200000
+#define DPU_EW_CFG_EW_EQUAL_EN__SHIFT 21
+static inline uint32_t DPU_EW_CFG_EW_EQUAL_EN(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_EQUAL_EN__SHIFT) & DPU_EW_CFG_EW_EQUAL_EN__MASK;
+}
+#define DPU_EW_CFG_EW_BINARY_EN__MASK 0x00100000
+#define DPU_EW_CFG_EW_BINARY_EN__SHIFT 20
+static inline uint32_t DPU_EW_CFG_EW_BINARY_EN(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_BINARY_EN__SHIFT) & DPU_EW_CFG_EW_BINARY_EN__MASK;
+}
+#define DPU_EW_CFG_EW_ALU_ALGO__MASK 0x000f0000
+#define DPU_EW_CFG_EW_ALU_ALGO__SHIFT 16
+static inline uint32_t DPU_EW_CFG_EW_ALU_ALGO(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_ALU_ALGO__SHIFT) & DPU_EW_CFG_EW_ALU_ALGO__MASK;
+}
+#define DPU_EW_CFG_RESERVED_1__MASK 0x0000f800
+#define DPU_EW_CFG_RESERVED_1__SHIFT 11
+static inline uint32_t DPU_EW_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_RESERVED_1__SHIFT) & DPU_EW_CFG_RESERVED_1__MASK;
+}
+#define DPU_EW_CFG_EW_RELUX_EN__MASK 0x00000400
+#define DPU_EW_CFG_EW_RELUX_EN__SHIFT 10
+static inline uint32_t DPU_EW_CFG_EW_RELUX_EN(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_RELUX_EN__SHIFT) & DPU_EW_CFG_EW_RELUX_EN__MASK;
+}
+#define DPU_EW_CFG_EW_RELU_BYPASS__MASK 0x00000200
+#define DPU_EW_CFG_EW_RELU_BYPASS__SHIFT 9
+static inline uint32_t DPU_EW_CFG_EW_RELU_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_RELU_BYPASS__SHIFT) & DPU_EW_CFG_EW_RELU_BYPASS__MASK;
+}
+#define DPU_EW_CFG_EW_OP_CVT_BYPASS__MASK 0x00000100
+#define DPU_EW_CFG_EW_OP_CVT_BYPASS__SHIFT 8
+static inline uint32_t DPU_EW_CFG_EW_OP_CVT_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_OP_CVT_BYPASS__SHIFT) & DPU_EW_CFG_EW_OP_CVT_BYPASS__MASK;
+}
+#define DPU_EW_CFG_EW_LUT_BYPASS__MASK 0x00000080
+#define DPU_EW_CFG_EW_LUT_BYPASS__SHIFT 7
+static inline uint32_t DPU_EW_CFG_EW_LUT_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_LUT_BYPASS__SHIFT) & DPU_EW_CFG_EW_LUT_BYPASS__MASK;
+}
+#define DPU_EW_CFG_EW_OP_SRC__MASK 0x00000040
+#define DPU_EW_CFG_EW_OP_SRC__SHIFT 6
+static inline uint32_t DPU_EW_CFG_EW_OP_SRC(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_OP_SRC__SHIFT) & DPU_EW_CFG_EW_OP_SRC__MASK;
+}
+#define DPU_EW_CFG_EW_MUL_PRELU__MASK 0x00000020
+#define DPU_EW_CFG_EW_MUL_PRELU__SHIFT 5
+static inline uint32_t DPU_EW_CFG_EW_MUL_PRELU(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_MUL_PRELU__SHIFT) & DPU_EW_CFG_EW_MUL_PRELU__MASK;
+}
+#define DPU_EW_CFG_RESERVED_2__MASK 0x00000018
+#define DPU_EW_CFG_RESERVED_2__SHIFT 3
+static inline uint32_t DPU_EW_CFG_RESERVED_2(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_RESERVED_2__SHIFT) & DPU_EW_CFG_RESERVED_2__MASK;
+}
+#define DPU_EW_CFG_EW_OP_TYPE__MASK 0x00000004
+#define DPU_EW_CFG_EW_OP_TYPE__SHIFT 2
+static inline uint32_t DPU_EW_CFG_EW_OP_TYPE(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_OP_TYPE__SHIFT) & DPU_EW_CFG_EW_OP_TYPE__MASK;
+}
+#define DPU_EW_CFG_EW_OP_BYPASS__MASK 0x00000002
+#define DPU_EW_CFG_EW_OP_BYPASS__SHIFT 1
+static inline uint32_t DPU_EW_CFG_EW_OP_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_OP_BYPASS__SHIFT) & DPU_EW_CFG_EW_OP_BYPASS__MASK;
+}
+#define DPU_EW_CFG_EW_BYPASS__MASK 0x00000001
+#define DPU_EW_CFG_EW_BYPASS__SHIFT 0
+static inline uint32_t DPU_EW_CFG_EW_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_BYPASS__SHIFT) & DPU_EW_CFG_EW_BYPASS__MASK;
+}
+
+#define REG_DPU_EW_CVT_OFFSET_VALUE 0x00004074
+#define DPU_EW_CVT_OFFSET_VALUE_EW_OP_CVT_OFFSET__MASK 0xffffffff
+#define DPU_EW_CVT_OFFSET_VALUE_EW_OP_CVT_OFFSET__SHIFT 0
+static inline uint32_t DPU_EW_CVT_OFFSET_VALUE_EW_OP_CVT_OFFSET(uint32_t val)
+{
+ return ((val) << DPU_EW_CVT_OFFSET_VALUE_EW_OP_CVT_OFFSET__SHIFT) & DPU_EW_CVT_OFFSET_VALUE_EW_OP_CVT_OFFSET__MASK;
+}
+
+#define REG_DPU_EW_CVT_SCALE_VALUE 0x00004078
+#define DPU_EW_CVT_SCALE_VALUE_EW_TRUNCATE__MASK 0xffc00000
+#define DPU_EW_CVT_SCALE_VALUE_EW_TRUNCATE__SHIFT 22
+static inline uint32_t DPU_EW_CVT_SCALE_VALUE_EW_TRUNCATE(uint32_t val)
+{
+ return ((val) << DPU_EW_CVT_SCALE_VALUE_EW_TRUNCATE__SHIFT) & DPU_EW_CVT_SCALE_VALUE_EW_TRUNCATE__MASK;
+}
+#define DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SHIFT__MASK 0x003f0000
+#define DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SHIFT__SHIFT 16
+static inline uint32_t DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SHIFT(uint32_t val)
+{
+ return ((val) << DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SHIFT__SHIFT) & DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SHIFT__MASK;
+}
+#define DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SCALE__MASK 0x0000ffff
+#define DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SCALE__SHIFT 0
+static inline uint32_t DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SCALE(uint32_t val)
+{
+ return ((val) << DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SCALE__SHIFT) & DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SCALE__MASK;
+}
+
+#define REG_DPU_EW_RELUX_CMP_VALUE 0x0000407c
+#define DPU_EW_RELUX_CMP_VALUE_EW_RELUX_CMP_DAT__MASK 0xffffffff
+#define DPU_EW_RELUX_CMP_VALUE_EW_RELUX_CMP_DAT__SHIFT 0
+static inline uint32_t DPU_EW_RELUX_CMP_VALUE_EW_RELUX_CMP_DAT(uint32_t val)
+{
+ return ((val) << DPU_EW_RELUX_CMP_VALUE_EW_RELUX_CMP_DAT__SHIFT) & DPU_EW_RELUX_CMP_VALUE_EW_RELUX_CMP_DAT__MASK;
+}
+
+#define REG_DPU_OUT_CVT_OFFSET 0x00004080
+#define DPU_OUT_CVT_OFFSET_OUT_CVT_OFFSET__MASK 0xffffffff
+#define DPU_OUT_CVT_OFFSET_OUT_CVT_OFFSET__SHIFT 0
+static inline uint32_t DPU_OUT_CVT_OFFSET_OUT_CVT_OFFSET(uint32_t val)
+{
+ return ((val) << DPU_OUT_CVT_OFFSET_OUT_CVT_OFFSET__SHIFT) & DPU_OUT_CVT_OFFSET_OUT_CVT_OFFSET__MASK;
+}
+
+#define REG_DPU_OUT_CVT_SCALE 0x00004084
+#define DPU_OUT_CVT_SCALE_RESERVED_0__MASK 0xfffe0000
+#define DPU_OUT_CVT_SCALE_RESERVED_0__SHIFT 17
+static inline uint32_t DPU_OUT_CVT_SCALE_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_OUT_CVT_SCALE_RESERVED_0__SHIFT) & DPU_OUT_CVT_SCALE_RESERVED_0__MASK;
+}
+#define DPU_OUT_CVT_SCALE_FP32TOFP16_EN__MASK 0x00010000
+#define DPU_OUT_CVT_SCALE_FP32TOFP16_EN__SHIFT 16
+static inline uint32_t DPU_OUT_CVT_SCALE_FP32TOFP16_EN(uint32_t val)
+{
+ return ((val) << DPU_OUT_CVT_SCALE_FP32TOFP16_EN__SHIFT) & DPU_OUT_CVT_SCALE_FP32TOFP16_EN__MASK;
+}
+#define DPU_OUT_CVT_SCALE_OUT_CVT_SCALE__MASK 0x0000ffff
+#define DPU_OUT_CVT_SCALE_OUT_CVT_SCALE__SHIFT 0
+static inline uint32_t DPU_OUT_CVT_SCALE_OUT_CVT_SCALE(uint32_t val)
+{
+ return ((val) << DPU_OUT_CVT_SCALE_OUT_CVT_SCALE__SHIFT) & DPU_OUT_CVT_SCALE_OUT_CVT_SCALE__MASK;
+}
+
+#define REG_DPU_OUT_CVT_SHIFT 0x00004088
+#define DPU_OUT_CVT_SHIFT_CVT_TYPE__MASK 0x80000000
+#define DPU_OUT_CVT_SHIFT_CVT_TYPE__SHIFT 31
+static inline uint32_t DPU_OUT_CVT_SHIFT_CVT_TYPE(uint32_t val)
+{
+ return ((val) << DPU_OUT_CVT_SHIFT_CVT_TYPE__SHIFT) & DPU_OUT_CVT_SHIFT_CVT_TYPE__MASK;
+}
+#define DPU_OUT_CVT_SHIFT_CVT_ROUND__MASK 0x40000000
+#define DPU_OUT_CVT_SHIFT_CVT_ROUND__SHIFT 30
+static inline uint32_t DPU_OUT_CVT_SHIFT_CVT_ROUND(uint32_t val)
+{
+ return ((val) << DPU_OUT_CVT_SHIFT_CVT_ROUND__SHIFT) & DPU_OUT_CVT_SHIFT_CVT_ROUND__MASK;
+}
+#define DPU_OUT_CVT_SHIFT_RESERVED_0__MASK 0x3ff00000
+#define DPU_OUT_CVT_SHIFT_RESERVED_0__SHIFT 20
+static inline uint32_t DPU_OUT_CVT_SHIFT_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_OUT_CVT_SHIFT_RESERVED_0__SHIFT) & DPU_OUT_CVT_SHIFT_RESERVED_0__MASK;
+}
+#define DPU_OUT_CVT_SHIFT_MINUS_EXP__MASK 0x000ff000
+#define DPU_OUT_CVT_SHIFT_MINUS_EXP__SHIFT 12
+static inline uint32_t DPU_OUT_CVT_SHIFT_MINUS_EXP(uint32_t val)
+{
+ return ((val) << DPU_OUT_CVT_SHIFT_MINUS_EXP__SHIFT) & DPU_OUT_CVT_SHIFT_MINUS_EXP__MASK;
+}
+#define DPU_OUT_CVT_SHIFT_OUT_CVT_SHIFT__MASK 0x00000fff
+#define DPU_OUT_CVT_SHIFT_OUT_CVT_SHIFT__SHIFT 0
+static inline uint32_t DPU_OUT_CVT_SHIFT_OUT_CVT_SHIFT(uint32_t val)
+{
+ return ((val) << DPU_OUT_CVT_SHIFT_OUT_CVT_SHIFT__SHIFT) & DPU_OUT_CVT_SHIFT_OUT_CVT_SHIFT__MASK;
+}
+
+#define REG_DPU_EW_OP_VALUE_0 0x00004090
+#define DPU_EW_OP_VALUE_0_EW_OPERAND_0__MASK 0xffffffff
+#define DPU_EW_OP_VALUE_0_EW_OPERAND_0__SHIFT 0
+static inline uint32_t DPU_EW_OP_VALUE_0_EW_OPERAND_0(uint32_t val)
+{
+ return ((val) << DPU_EW_OP_VALUE_0_EW_OPERAND_0__SHIFT) & DPU_EW_OP_VALUE_0_EW_OPERAND_0__MASK;
+}
+
+#define REG_DPU_EW_OP_VALUE_1 0x00004094
+#define DPU_EW_OP_VALUE_1_EW_OPERAND_1__MASK 0xffffffff
+#define DPU_EW_OP_VALUE_1_EW_OPERAND_1__SHIFT 0
+static inline uint32_t DPU_EW_OP_VALUE_1_EW_OPERAND_1(uint32_t val)
+{
+ return ((val) << DPU_EW_OP_VALUE_1_EW_OPERAND_1__SHIFT) & DPU_EW_OP_VALUE_1_EW_OPERAND_1__MASK;
+}
+
+#define REG_DPU_EW_OP_VALUE_2 0x00004098
+#define DPU_EW_OP_VALUE_2_EW_OPERAND_2__MASK 0xffffffff
+#define DPU_EW_OP_VALUE_2_EW_OPERAND_2__SHIFT 0
+static inline uint32_t DPU_EW_OP_VALUE_2_EW_OPERAND_2(uint32_t val)
+{
+ return ((val) << DPU_EW_OP_VALUE_2_EW_OPERAND_2__SHIFT) & DPU_EW_OP_VALUE_2_EW_OPERAND_2__MASK;
+}
+
+#define REG_DPU_EW_OP_VALUE_3 0x0000409c
+#define DPU_EW_OP_VALUE_3_EW_OPERAND_3__MASK 0xffffffff
+#define DPU_EW_OP_VALUE_3_EW_OPERAND_3__SHIFT 0
+static inline uint32_t DPU_EW_OP_VALUE_3_EW_OPERAND_3(uint32_t val)
+{
+ return ((val) << DPU_EW_OP_VALUE_3_EW_OPERAND_3__SHIFT) & DPU_EW_OP_VALUE_3_EW_OPERAND_3__MASK;
+}
+
+#define REG_DPU_EW_OP_VALUE_4 0x000040a0
+#define DPU_EW_OP_VALUE_4_EW_OPERAND_4__MASK 0xffffffff
+#define DPU_EW_OP_VALUE_4_EW_OPERAND_4__SHIFT 0
+static inline uint32_t DPU_EW_OP_VALUE_4_EW_OPERAND_4(uint32_t val)
+{
+ return ((val) << DPU_EW_OP_VALUE_4_EW_OPERAND_4__SHIFT) & DPU_EW_OP_VALUE_4_EW_OPERAND_4__MASK;
+}
+
+#define REG_DPU_EW_OP_VALUE_5 0x000040a4
+#define DPU_EW_OP_VALUE_5_EW_OPERAND_5__MASK 0xffffffff
+#define DPU_EW_OP_VALUE_5_EW_OPERAND_5__SHIFT 0
+static inline uint32_t DPU_EW_OP_VALUE_5_EW_OPERAND_5(uint32_t val)
+{
+ return ((val) << DPU_EW_OP_VALUE_5_EW_OPERAND_5__SHIFT) & DPU_EW_OP_VALUE_5_EW_OPERAND_5__MASK;
+}
+
+#define REG_DPU_EW_OP_VALUE_6 0x000040a8
+#define DPU_EW_OP_VALUE_6_EW_OPERAND_6__MASK 0xffffffff
+#define DPU_EW_OP_VALUE_6_EW_OPERAND_6__SHIFT 0
+static inline uint32_t DPU_EW_OP_VALUE_6_EW_OPERAND_6(uint32_t val)
+{
+ return ((val) << DPU_EW_OP_VALUE_6_EW_OPERAND_6__SHIFT) & DPU_EW_OP_VALUE_6_EW_OPERAND_6__MASK;
+}
+
+#define REG_DPU_EW_OP_VALUE_7 0x000040ac
+#define DPU_EW_OP_VALUE_7_EW_OPERAND_7__MASK 0xffffffff
+#define DPU_EW_OP_VALUE_7_EW_OPERAND_7__SHIFT 0
+static inline uint32_t DPU_EW_OP_VALUE_7_EW_OPERAND_7(uint32_t val)
+{
+ return ((val) << DPU_EW_OP_VALUE_7_EW_OPERAND_7__SHIFT) & DPU_EW_OP_VALUE_7_EW_OPERAND_7__MASK;
+}
+
+#define REG_DPU_SURFACE_ADD 0x000040c0
+#define DPU_SURFACE_ADD_SURF_ADD__MASK 0xfffffff0
+#define DPU_SURFACE_ADD_SURF_ADD__SHIFT 4
+static inline uint32_t DPU_SURFACE_ADD_SURF_ADD(uint32_t val)
+{
+ return ((val) << DPU_SURFACE_ADD_SURF_ADD__SHIFT) & DPU_SURFACE_ADD_SURF_ADD__MASK;
+}
+#define DPU_SURFACE_ADD_RESERVED_0__MASK 0x0000000f
+#define DPU_SURFACE_ADD_RESERVED_0__SHIFT 0
+static inline uint32_t DPU_SURFACE_ADD_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_SURFACE_ADD_RESERVED_0__SHIFT) & DPU_SURFACE_ADD_RESERVED_0__MASK;
+}
+
+#define REG_DPU_LUT_ACCESS_CFG 0x00004100
+#define DPU_LUT_ACCESS_CFG_RESERVED_0__MASK 0xfffc0000
+#define DPU_LUT_ACCESS_CFG_RESERVED_0__SHIFT 18
+static inline uint32_t DPU_LUT_ACCESS_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_LUT_ACCESS_CFG_RESERVED_0__SHIFT) & DPU_LUT_ACCESS_CFG_RESERVED_0__MASK;
+}
+#define DPU_LUT_ACCESS_CFG_LUT_ACCESS_TYPE__MASK 0x00020000
+#define DPU_LUT_ACCESS_CFG_LUT_ACCESS_TYPE__SHIFT 17
+static inline uint32_t DPU_LUT_ACCESS_CFG_LUT_ACCESS_TYPE(uint32_t val)
+{
+ return ((val) << DPU_LUT_ACCESS_CFG_LUT_ACCESS_TYPE__SHIFT) & DPU_LUT_ACCESS_CFG_LUT_ACCESS_TYPE__MASK;
+}
+#define DPU_LUT_ACCESS_CFG_LUT_TABLE_ID__MASK 0x00010000
+#define DPU_LUT_ACCESS_CFG_LUT_TABLE_ID__SHIFT 16
+static inline uint32_t DPU_LUT_ACCESS_CFG_LUT_TABLE_ID(uint32_t val)
+{
+ return ((val) << DPU_LUT_ACCESS_CFG_LUT_TABLE_ID__SHIFT) & DPU_LUT_ACCESS_CFG_LUT_TABLE_ID__MASK;
+}
+#define DPU_LUT_ACCESS_CFG_RESERVED_1__MASK 0x0000fc00
+#define DPU_LUT_ACCESS_CFG_RESERVED_1__SHIFT 10
+static inline uint32_t DPU_LUT_ACCESS_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_LUT_ACCESS_CFG_RESERVED_1__SHIFT) & DPU_LUT_ACCESS_CFG_RESERVED_1__MASK;
+}
+#define DPU_LUT_ACCESS_CFG_LUT_ADDR__MASK 0x000003ff
+#define DPU_LUT_ACCESS_CFG_LUT_ADDR__SHIFT 0
+static inline uint32_t DPU_LUT_ACCESS_CFG_LUT_ADDR(uint32_t val)
+{
+ return ((val) << DPU_LUT_ACCESS_CFG_LUT_ADDR__SHIFT) & DPU_LUT_ACCESS_CFG_LUT_ADDR__MASK;
+}
+
+#define REG_DPU_LUT_ACCESS_DATA 0x00004104
+#define DPU_LUT_ACCESS_DATA_RESERVED_0__MASK 0xffff0000
+#define DPU_LUT_ACCESS_DATA_RESERVED_0__SHIFT 16
+static inline uint32_t DPU_LUT_ACCESS_DATA_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_LUT_ACCESS_DATA_RESERVED_0__SHIFT) & DPU_LUT_ACCESS_DATA_RESERVED_0__MASK;
+}
+#define DPU_LUT_ACCESS_DATA_LUT_ACCESS_DATA__MASK 0x0000ffff
+#define DPU_LUT_ACCESS_DATA_LUT_ACCESS_DATA__SHIFT 0
+static inline uint32_t DPU_LUT_ACCESS_DATA_LUT_ACCESS_DATA(uint32_t val)
+{
+ return ((val) << DPU_LUT_ACCESS_DATA_LUT_ACCESS_DATA__SHIFT) & DPU_LUT_ACCESS_DATA_LUT_ACCESS_DATA__MASK;
+}
+
+#define REG_DPU_LUT_CFG 0x00004108
+#define DPU_LUT_CFG_RESERVED_0__MASK 0xffffff00
+#define DPU_LUT_CFG_RESERVED_0__SHIFT 8
+static inline uint32_t DPU_LUT_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_LUT_CFG_RESERVED_0__SHIFT) & DPU_LUT_CFG_RESERVED_0__MASK;
+}
+#define DPU_LUT_CFG_LUT_CAL_SEL__MASK 0x00000080
+#define DPU_LUT_CFG_LUT_CAL_SEL__SHIFT 7
+static inline uint32_t DPU_LUT_CFG_LUT_CAL_SEL(uint32_t val)
+{
+ return ((val) << DPU_LUT_CFG_LUT_CAL_SEL__SHIFT) & DPU_LUT_CFG_LUT_CAL_SEL__MASK;
+}
+#define DPU_LUT_CFG_LUT_HYBRID_PRIORITY__MASK 0x00000040
+#define DPU_LUT_CFG_LUT_HYBRID_PRIORITY__SHIFT 6
+static inline uint32_t DPU_LUT_CFG_LUT_HYBRID_PRIORITY(uint32_t val)
+{
+ return ((val) << DPU_LUT_CFG_LUT_HYBRID_PRIORITY__SHIFT) & DPU_LUT_CFG_LUT_HYBRID_PRIORITY__MASK;
+}
+#define DPU_LUT_CFG_LUT_OFLOW_PRIORITY__MASK 0x00000020
+#define DPU_LUT_CFG_LUT_OFLOW_PRIORITY__SHIFT 5
+static inline uint32_t DPU_LUT_CFG_LUT_OFLOW_PRIORITY(uint32_t val)
+{
+ return ((val) << DPU_LUT_CFG_LUT_OFLOW_PRIORITY__SHIFT) & DPU_LUT_CFG_LUT_OFLOW_PRIORITY__MASK;
+}
+#define DPU_LUT_CFG_LUT_UFLOW_PRIORITY__MASK 0x00000010
+#define DPU_LUT_CFG_LUT_UFLOW_PRIORITY__SHIFT 4
+static inline uint32_t DPU_LUT_CFG_LUT_UFLOW_PRIORITY(uint32_t val)
+{
+ return ((val) << DPU_LUT_CFG_LUT_UFLOW_PRIORITY__SHIFT) & DPU_LUT_CFG_LUT_UFLOW_PRIORITY__MASK;
+}
+#define DPU_LUT_CFG_LUT_LO_LE_MUX__MASK 0x0000000c
+#define DPU_LUT_CFG_LUT_LO_LE_MUX__SHIFT 2
+static inline uint32_t DPU_LUT_CFG_LUT_LO_LE_MUX(uint32_t val)
+{
+ return ((val) << DPU_LUT_CFG_LUT_LO_LE_MUX__SHIFT) & DPU_LUT_CFG_LUT_LO_LE_MUX__MASK;
+}
+#define DPU_LUT_CFG_LUT_EXPAND_EN__MASK 0x00000002
+#define DPU_LUT_CFG_LUT_EXPAND_EN__SHIFT 1
+static inline uint32_t DPU_LUT_CFG_LUT_EXPAND_EN(uint32_t val)
+{
+ return ((val) << DPU_LUT_CFG_LUT_EXPAND_EN__SHIFT) & DPU_LUT_CFG_LUT_EXPAND_EN__MASK;
+}
+#define DPU_LUT_CFG_LUT_ROAD_SEL__MASK 0x00000001
+#define DPU_LUT_CFG_LUT_ROAD_SEL__SHIFT 0
+static inline uint32_t DPU_LUT_CFG_LUT_ROAD_SEL(uint32_t val)
+{
+ return ((val) << DPU_LUT_CFG_LUT_ROAD_SEL__SHIFT) & DPU_LUT_CFG_LUT_ROAD_SEL__MASK;
+}
+
+#define REG_DPU_LUT_INFO 0x0000410c
+#define DPU_LUT_INFO_RESERVED_0__MASK 0xff000000
+#define DPU_LUT_INFO_RESERVED_0__SHIFT 24
+static inline uint32_t DPU_LUT_INFO_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_LUT_INFO_RESERVED_0__SHIFT) & DPU_LUT_INFO_RESERVED_0__MASK;
+}
+#define DPU_LUT_INFO_LUT_LO_INDEX_SELECT__MASK 0x00ff0000
+#define DPU_LUT_INFO_LUT_LO_INDEX_SELECT__SHIFT 16
+static inline uint32_t DPU_LUT_INFO_LUT_LO_INDEX_SELECT(uint32_t val)
+{
+ return ((val) << DPU_LUT_INFO_LUT_LO_INDEX_SELECT__SHIFT) & DPU_LUT_INFO_LUT_LO_INDEX_SELECT__MASK;
+}
+#define DPU_LUT_INFO_LUT_LE_INDEX_SELECT__MASK 0x0000ff00
+#define DPU_LUT_INFO_LUT_LE_INDEX_SELECT__SHIFT 8
+static inline uint32_t DPU_LUT_INFO_LUT_LE_INDEX_SELECT(uint32_t val)
+{
+ return ((val) << DPU_LUT_INFO_LUT_LE_INDEX_SELECT__SHIFT) & DPU_LUT_INFO_LUT_LE_INDEX_SELECT__MASK;
+}
+#define DPU_LUT_INFO_RESERVED_1__MASK 0x000000ff
+#define DPU_LUT_INFO_RESERVED_1__SHIFT 0
+static inline uint32_t DPU_LUT_INFO_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_LUT_INFO_RESERVED_1__SHIFT) & DPU_LUT_INFO_RESERVED_1__MASK;
+}
+
+#define REG_DPU_LUT_LE_START 0x00004110
+#define DPU_LUT_LE_START_LUT_LE_START__MASK 0xffffffff
+#define DPU_LUT_LE_START_LUT_LE_START__SHIFT 0
+static inline uint32_t DPU_LUT_LE_START_LUT_LE_START(uint32_t val)
+{
+ return ((val) << DPU_LUT_LE_START_LUT_LE_START__SHIFT) & DPU_LUT_LE_START_LUT_LE_START__MASK;
+}
+
+#define REG_DPU_LUT_LE_END 0x00004114
+#define DPU_LUT_LE_END_LUT_LE_END__MASK 0xffffffff
+#define DPU_LUT_LE_END_LUT_LE_END__SHIFT 0
+static inline uint32_t DPU_LUT_LE_END_LUT_LE_END(uint32_t val)
+{
+ return ((val) << DPU_LUT_LE_END_LUT_LE_END__SHIFT) & DPU_LUT_LE_END_LUT_LE_END__MASK;
+}
+
+#define REG_DPU_LUT_LO_START 0x00004118
+#define DPU_LUT_LO_START_LUT_LO_START__MASK 0xffffffff
+#define DPU_LUT_LO_START_LUT_LO_START__SHIFT 0
+static inline uint32_t DPU_LUT_LO_START_LUT_LO_START(uint32_t val)
+{
+ return ((val) << DPU_LUT_LO_START_LUT_LO_START__SHIFT) & DPU_LUT_LO_START_LUT_LO_START__MASK;
+}
+
+#define REG_DPU_LUT_LO_END 0x0000411c
+#define DPU_LUT_LO_END_LUT_LO_END__MASK 0xffffffff
+#define DPU_LUT_LO_END_LUT_LO_END__SHIFT 0
+static inline uint32_t DPU_LUT_LO_END_LUT_LO_END(uint32_t val)
+{
+ return ((val) << DPU_LUT_LO_END_LUT_LO_END__SHIFT) & DPU_LUT_LO_END_LUT_LO_END__MASK;
+}
+
+#define REG_DPU_LUT_LE_SLOPE_SCALE 0x00004120
+#define DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_OFLOW_SCALE__MASK 0xffff0000
+#define DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_OFLOW_SCALE__SHIFT 16
+static inline uint32_t DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_OFLOW_SCALE(uint32_t val)
+{
+ return ((val) << DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_OFLOW_SCALE__SHIFT) & DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_OFLOW_SCALE__MASK;
+}
+#define DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_UFLOW_SCALE__MASK 0x0000ffff
+#define DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_UFLOW_SCALE__SHIFT 0
+static inline uint32_t DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_UFLOW_SCALE(uint32_t val)
+{
+ return ((val) << DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_UFLOW_SCALE__SHIFT) & DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_UFLOW_SCALE__MASK;
+}
+
+#define REG_DPU_LUT_LE_SLOPE_SHIFT 0x00004124
+#define DPU_LUT_LE_SLOPE_SHIFT_RESERVED_0__MASK 0xfffffc00
+#define DPU_LUT_LE_SLOPE_SHIFT_RESERVED_0__SHIFT 10
+static inline uint32_t DPU_LUT_LE_SLOPE_SHIFT_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_LUT_LE_SLOPE_SHIFT_RESERVED_0__SHIFT) & DPU_LUT_LE_SLOPE_SHIFT_RESERVED_0__MASK;
+}
+#define DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_OFLOW_SHIFT__MASK 0x000003e0
+#define DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_OFLOW_SHIFT__SHIFT 5
+static inline uint32_t DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_OFLOW_SHIFT(uint32_t val)
+{
+ return ((val) << DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_OFLOW_SHIFT__SHIFT) & DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_OFLOW_SHIFT__MASK;
+}
+#define DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_UFLOW_SHIFT__MASK 0x0000001f
+#define DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_UFLOW_SHIFT__SHIFT 0
+static inline uint32_t DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_UFLOW_SHIFT(uint32_t val)
+{
+ return ((val) << DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_UFLOW_SHIFT__SHIFT) & DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_UFLOW_SHIFT__MASK;
+}
+
+#define REG_DPU_LUT_LO_SLOPE_SCALE 0x00004128
+#define DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_OFLOW_SCALE__MASK 0xffff0000
+#define DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_OFLOW_SCALE__SHIFT 16
+static inline uint32_t DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_OFLOW_SCALE(uint32_t val)
+{
+ return ((val) << DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_OFLOW_SCALE__SHIFT) & DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_OFLOW_SCALE__MASK;
+}
+#define DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_UFLOW_SCALE__MASK 0x0000ffff
+#define DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_UFLOW_SCALE__SHIFT 0
+static inline uint32_t DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_UFLOW_SCALE(uint32_t val)
+{
+ return ((val) << DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_UFLOW_SCALE__SHIFT) & DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_UFLOW_SCALE__MASK;
+}
+
+#define REG_DPU_LUT_LO_SLOPE_SHIFT 0x0000412c
+#define DPU_LUT_LO_SLOPE_SHIFT_RESERVED_0__MASK 0xfffffc00
+#define DPU_LUT_LO_SLOPE_SHIFT_RESERVED_0__SHIFT 10
+static inline uint32_t DPU_LUT_LO_SLOPE_SHIFT_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_LUT_LO_SLOPE_SHIFT_RESERVED_0__SHIFT) & DPU_LUT_LO_SLOPE_SHIFT_RESERVED_0__MASK;
+}
+#define DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_OFLOW_SHIFT__MASK 0x000003e0
+#define DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_OFLOW_SHIFT__SHIFT 5
+static inline uint32_t DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_OFLOW_SHIFT(uint32_t val)
+{
+ return ((val) << DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_OFLOW_SHIFT__SHIFT) & DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_OFLOW_SHIFT__MASK;
+}
+#define DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_UFLOW_SHIFT__MASK 0x0000001f
+#define DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_UFLOW_SHIFT__SHIFT 0
+static inline uint32_t DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_UFLOW_SHIFT(uint32_t val)
+{
+ return ((val) << DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_UFLOW_SHIFT__SHIFT) & DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_UFLOW_SHIFT__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_S_STATUS 0x00005000
+#define DPU_RDMA_RDMA_S_STATUS_RESERVED_0__MASK 0xfffc0000
+#define DPU_RDMA_RDMA_S_STATUS_RESERVED_0__SHIFT 18
+static inline uint32_t DPU_RDMA_RDMA_S_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_STATUS_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_S_STATUS_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_S_STATUS_STATUS_1__MASK 0x00030000
+#define DPU_RDMA_RDMA_S_STATUS_STATUS_1__SHIFT 16
+static inline uint32_t DPU_RDMA_RDMA_S_STATUS_STATUS_1(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_STATUS_STATUS_1__SHIFT) & DPU_RDMA_RDMA_S_STATUS_STATUS_1__MASK;
+}
+#define DPU_RDMA_RDMA_S_STATUS_RESERVED_1__MASK 0x0000fffc
+#define DPU_RDMA_RDMA_S_STATUS_RESERVED_1__SHIFT 2
+static inline uint32_t DPU_RDMA_RDMA_S_STATUS_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_STATUS_RESERVED_1__SHIFT) & DPU_RDMA_RDMA_S_STATUS_RESERVED_1__MASK;
+}
+#define DPU_RDMA_RDMA_S_STATUS_STATUS_0__MASK 0x00000003
+#define DPU_RDMA_RDMA_S_STATUS_STATUS_0__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_S_STATUS_STATUS_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_STATUS_STATUS_0__SHIFT) & DPU_RDMA_RDMA_S_STATUS_STATUS_0__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_S_POINTER 0x00005004
+#define DPU_RDMA_RDMA_S_POINTER_RESERVED_0__MASK 0xfffe0000
+#define DPU_RDMA_RDMA_S_POINTER_RESERVED_0__SHIFT 17
+static inline uint32_t DPU_RDMA_RDMA_S_POINTER_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_POINTER_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_S_POINTER_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_S_POINTER_EXECUTER__MASK 0x00010000
+#define DPU_RDMA_RDMA_S_POINTER_EXECUTER__SHIFT 16
+static inline uint32_t DPU_RDMA_RDMA_S_POINTER_EXECUTER(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_POINTER_EXECUTER__SHIFT) & DPU_RDMA_RDMA_S_POINTER_EXECUTER__MASK;
+}
+#define DPU_RDMA_RDMA_S_POINTER_RESERVED_1__MASK 0x0000ffc0
+#define DPU_RDMA_RDMA_S_POINTER_RESERVED_1__SHIFT 6
+static inline uint32_t DPU_RDMA_RDMA_S_POINTER_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_POINTER_RESERVED_1__SHIFT) & DPU_RDMA_RDMA_S_POINTER_RESERVED_1__MASK;
+}
+#define DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR__MASK 0x00000020
+#define DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR__SHIFT 5
+static inline uint32_t DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR__SHIFT) & DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR__MASK;
+}
+#define DPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR__MASK 0x00000010
+#define DPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR__SHIFT 4
+static inline uint32_t DPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR__SHIFT) & DPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR__MASK;
+}
+#define DPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE__MASK 0x00000008
+#define DPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE__SHIFT 3
+static inline uint32_t DPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE__SHIFT) & DPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE__MASK;
+}
+#define DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN__MASK 0x00000004
+#define DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN__SHIFT 2
+static inline uint32_t DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN__SHIFT) & DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN__MASK;
+}
+#define DPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN__MASK 0x00000002
+#define DPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN__SHIFT 1
+static inline uint32_t DPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN__SHIFT) & DPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN__MASK;
+}
+#define DPU_RDMA_RDMA_S_POINTER_POINTER__MASK 0x00000001
+#define DPU_RDMA_RDMA_S_POINTER_POINTER__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_S_POINTER_POINTER(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_POINTER_POINTER__SHIFT) & DPU_RDMA_RDMA_S_POINTER_POINTER__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_OPERATION_ENABLE 0x00005008
+#define DPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0__MASK 0xfffffffe
+#define DPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0__SHIFT 1
+static inline uint32_t DPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN__MASK 0x00000001
+#define DPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN__SHIFT) & DPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_DATA_CUBE_WIDTH 0x0000500c
+#define DPU_RDMA_RDMA_DATA_CUBE_WIDTH_RESERVED_0__MASK 0xffffe000
+#define DPU_RDMA_RDMA_DATA_CUBE_WIDTH_RESERVED_0__SHIFT 13
+static inline uint32_t DPU_RDMA_RDMA_DATA_CUBE_WIDTH_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_DATA_CUBE_WIDTH_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_DATA_CUBE_WIDTH_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_DATA_CUBE_WIDTH_WIDTH__MASK 0x00001fff
+#define DPU_RDMA_RDMA_DATA_CUBE_WIDTH_WIDTH__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_DATA_CUBE_WIDTH_WIDTH(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_DATA_CUBE_WIDTH_WIDTH__SHIFT) & DPU_RDMA_RDMA_DATA_CUBE_WIDTH_WIDTH__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_DATA_CUBE_HEIGHT 0x00005010
+#define DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_0__MASK 0xe0000000
+#define DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_0__SHIFT 29
+static inline uint32_t DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_EW_LINE_NOTCH_ADDR__MASK 0x1fff0000
+#define DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_EW_LINE_NOTCH_ADDR__SHIFT 16
+static inline uint32_t DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_EW_LINE_NOTCH_ADDR(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_EW_LINE_NOTCH_ADDR__SHIFT) & DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_EW_LINE_NOTCH_ADDR__MASK;
+}
+#define DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_1__MASK 0x0000e000
+#define DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_1__SHIFT 13
+static inline uint32_t DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_1__SHIFT) & DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_1__MASK;
+}
+#define DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_HEIGHT__MASK 0x00001fff
+#define DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_HEIGHT__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_HEIGHT(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_HEIGHT__SHIFT) & DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_HEIGHT__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_DATA_CUBE_CHANNEL 0x00005014
+#define DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_RESERVED_0__MASK 0xffffe000
+#define DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_RESERVED_0__SHIFT 13
+static inline uint32_t DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_CHANNEL__MASK 0x00001fff
+#define DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_CHANNEL__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_CHANNEL(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_CHANNEL__SHIFT) & DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_CHANNEL__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_SRC_BASE_ADDR 0x00005018
+#define DPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR__MASK 0xffffffff
+#define DPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR__SHIFT) & DPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_BRDMA_CFG 0x0000501c
+#define DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_0__MASK 0xffffffe0
+#define DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_0__SHIFT 5
+static inline uint32_t DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_BRDMA_CFG_BRDMA_DATA_USE__MASK 0x0000001e
+#define DPU_RDMA_RDMA_BRDMA_CFG_BRDMA_DATA_USE__SHIFT 1
+static inline uint32_t DPU_RDMA_RDMA_BRDMA_CFG_BRDMA_DATA_USE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_BRDMA_CFG_BRDMA_DATA_USE__SHIFT) & DPU_RDMA_RDMA_BRDMA_CFG_BRDMA_DATA_USE__MASK;
+}
+#define DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_1__MASK 0x00000001
+#define DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_1__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_1__SHIFT) & DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_1__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_BS_BASE_ADDR 0x00005020
+#define DPU_RDMA_RDMA_BS_BASE_ADDR_BS_BASE_ADDR__MASK 0xffffffff
+#define DPU_RDMA_RDMA_BS_BASE_ADDR_BS_BASE_ADDR__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_BS_BASE_ADDR_BS_BASE_ADDR(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_BS_BASE_ADDR_BS_BASE_ADDR__SHIFT) & DPU_RDMA_RDMA_BS_BASE_ADDR_BS_BASE_ADDR__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_NRDMA_CFG 0x00005028
+#define DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_0__MASK 0xffffffe0
+#define DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_0__SHIFT 5
+static inline uint32_t DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_NRDMA_CFG_NRDMA_DATA_USE__MASK 0x0000001e
+#define DPU_RDMA_RDMA_NRDMA_CFG_NRDMA_DATA_USE__SHIFT 1
+static inline uint32_t DPU_RDMA_RDMA_NRDMA_CFG_NRDMA_DATA_USE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_NRDMA_CFG_NRDMA_DATA_USE__SHIFT) & DPU_RDMA_RDMA_NRDMA_CFG_NRDMA_DATA_USE__MASK;
+}
+#define DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_1__MASK 0x00000001
+#define DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_1__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_1__SHIFT) & DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_1__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_BN_BASE_ADDR 0x0000502c
+#define DPU_RDMA_RDMA_BN_BASE_ADDR_BN_BASE_ADDR__MASK 0xffffffff
+#define DPU_RDMA_RDMA_BN_BASE_ADDR_BN_BASE_ADDR__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_BN_BASE_ADDR_BN_BASE_ADDR(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_BN_BASE_ADDR_BN_BASE_ADDR__SHIFT) & DPU_RDMA_RDMA_BN_BASE_ADDR_BN_BASE_ADDR__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_ERDMA_CFG 0x00005034
+#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_MODE__MASK 0xc0000000
+#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_MODE__SHIFT 30
+static inline uint32_t DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_MODE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_MODE__SHIFT) & DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_MODE__MASK;
+}
+#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_SURF_MODE__MASK 0x20000000
+#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_SURF_MODE__SHIFT 29
+static inline uint32_t DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_SURF_MODE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_SURF_MODE__SHIFT) & DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_SURF_MODE__MASK;
+}
+#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_NONALIGN__MASK 0x10000000
+#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_NONALIGN__SHIFT 28
+static inline uint32_t DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_NONALIGN(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_NONALIGN__SHIFT) & DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_NONALIGN__MASK;
+}
+#define DPU_RDMA_RDMA_ERDMA_CFG_RESERVED_0__MASK 0x0ffffff0
+#define DPU_RDMA_RDMA_ERDMA_CFG_RESERVED_0__SHIFT 4
+static inline uint32_t DPU_RDMA_RDMA_ERDMA_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_ERDMA_CFG_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_ERDMA_CFG_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_SIZE__MASK 0x0000000c
+#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_SIZE__SHIFT 2
+static inline uint32_t DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_SIZE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_SIZE__SHIFT) & DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_SIZE__MASK;
+}
+#define DPU_RDMA_RDMA_ERDMA_CFG_OV4K_BYPASS__MASK 0x00000002
+#define DPU_RDMA_RDMA_ERDMA_CFG_OV4K_BYPASS__SHIFT 1
+static inline uint32_t DPU_RDMA_RDMA_ERDMA_CFG_OV4K_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_ERDMA_CFG_OV4K_BYPASS__SHIFT) & DPU_RDMA_RDMA_ERDMA_CFG_OV4K_BYPASS__MASK;
+}
+#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DISABLE__MASK 0x00000001
+#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DISABLE__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DISABLE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DISABLE__SHIFT) & DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DISABLE__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_EW_BASE_ADDR 0x00005038
+#define DPU_RDMA_RDMA_EW_BASE_ADDR_EW_BASE_ADDR__MASK 0xffffffff
+#define DPU_RDMA_RDMA_EW_BASE_ADDR_EW_BASE_ADDR__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_EW_BASE_ADDR_EW_BASE_ADDR(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_EW_BASE_ADDR_EW_BASE_ADDR__SHIFT) & DPU_RDMA_RDMA_EW_BASE_ADDR_EW_BASE_ADDR__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_EW_SURF_STRIDE 0x00005040
+#define DPU_RDMA_RDMA_EW_SURF_STRIDE_EW_SURF_STRIDE__MASK 0xfffffff0
+#define DPU_RDMA_RDMA_EW_SURF_STRIDE_EW_SURF_STRIDE__SHIFT 4
+static inline uint32_t DPU_RDMA_RDMA_EW_SURF_STRIDE_EW_SURF_STRIDE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_EW_SURF_STRIDE_EW_SURF_STRIDE__SHIFT) & DPU_RDMA_RDMA_EW_SURF_STRIDE_EW_SURF_STRIDE__MASK;
+}
+#define DPU_RDMA_RDMA_EW_SURF_STRIDE_RESERVED_0__MASK 0x0000000f
+#define DPU_RDMA_RDMA_EW_SURF_STRIDE_RESERVED_0__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_EW_SURF_STRIDE_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_EW_SURF_STRIDE_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_EW_SURF_STRIDE_RESERVED_0__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_FEATURE_MODE_CFG 0x00005044
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_RESERVED_0__MASK 0xfffc0000
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_RESERVED_0__SHIFT 18
+static inline uint32_t DPU_RDMA_RDMA_FEATURE_MODE_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_FEATURE_MODE_CFG_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_FEATURE_MODE_CFG_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_IN_PRECISION__MASK 0x00038000
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_IN_PRECISION__SHIFT 15
+static inline uint32_t DPU_RDMA_RDMA_FEATURE_MODE_CFG_IN_PRECISION(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_FEATURE_MODE_CFG_IN_PRECISION__SHIFT) & DPU_RDMA_RDMA_FEATURE_MODE_CFG_IN_PRECISION__MASK;
+}
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_BURST_LEN__MASK 0x00007800
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_BURST_LEN__SHIFT 11
+static inline uint32_t DPU_RDMA_RDMA_FEATURE_MODE_CFG_BURST_LEN(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_FEATURE_MODE_CFG_BURST_LEN__SHIFT) & DPU_RDMA_RDMA_FEATURE_MODE_CFG_BURST_LEN__MASK;
+}
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_COMB_USE__MASK 0x00000700
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_COMB_USE__SHIFT 8
+static inline uint32_t DPU_RDMA_RDMA_FEATURE_MODE_CFG_COMB_USE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_FEATURE_MODE_CFG_COMB_USE__SHIFT) & DPU_RDMA_RDMA_FEATURE_MODE_CFG_COMB_USE__MASK;
+}
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_PROC_PRECISION__MASK 0x000000e0
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_PROC_PRECISION__SHIFT 5
+static inline uint32_t DPU_RDMA_RDMA_FEATURE_MODE_CFG_PROC_PRECISION(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_FEATURE_MODE_CFG_PROC_PRECISION__SHIFT) & DPU_RDMA_RDMA_FEATURE_MODE_CFG_PROC_PRECISION__MASK;
+}
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_DISABLE__MASK 0x00000010
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_DISABLE__SHIFT 4
+static inline uint32_t DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_DISABLE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_DISABLE__SHIFT) & DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_DISABLE__MASK;
+}
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_FP16TOFP32_EN__MASK 0x00000008
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_FP16TOFP32_EN__SHIFT 3
+static inline uint32_t DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_FP16TOFP32_EN(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_FP16TOFP32_EN__SHIFT) & DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_FP16TOFP32_EN__MASK;
+}
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_CONV_MODE__MASK 0x00000006
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_CONV_MODE__SHIFT 1
+static inline uint32_t DPU_RDMA_RDMA_FEATURE_MODE_CFG_CONV_MODE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_FEATURE_MODE_CFG_CONV_MODE__SHIFT) & DPU_RDMA_RDMA_FEATURE_MODE_CFG_CONV_MODE__MASK;
+}
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_FLYING_MODE__MASK 0x00000001
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_FLYING_MODE__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_FEATURE_MODE_CFG_FLYING_MODE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_FEATURE_MODE_CFG_FLYING_MODE__SHIFT) & DPU_RDMA_RDMA_FEATURE_MODE_CFG_FLYING_MODE__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_SRC_DMA_CFG 0x00005048
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_LINE_NOTCH_ADDR__MASK 0xfff80000
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_LINE_NOTCH_ADDR__SHIFT 19
+static inline uint32_t DPU_RDMA_RDMA_SRC_DMA_CFG_LINE_NOTCH_ADDR(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SRC_DMA_CFG_LINE_NOTCH_ADDR__SHIFT) & DPU_RDMA_RDMA_SRC_DMA_CFG_LINE_NOTCH_ADDR__MASK;
+}
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_RESERVED_0__MASK 0x0007c000
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_RESERVED_0__SHIFT 14
+static inline uint32_t DPU_RDMA_RDMA_SRC_DMA_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SRC_DMA_CFG_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_SRC_DMA_CFG_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_POOLING_METHOD__MASK 0x00002000
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_POOLING_METHOD__SHIFT 13
+static inline uint32_t DPU_RDMA_RDMA_SRC_DMA_CFG_POOLING_METHOD(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SRC_DMA_CFG_POOLING_METHOD__SHIFT) & DPU_RDMA_RDMA_SRC_DMA_CFG_POOLING_METHOD__MASK;
+}
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_UNPOOLING_EN__MASK 0x00001000
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_UNPOOLING_EN__SHIFT 12
+static inline uint32_t DPU_RDMA_RDMA_SRC_DMA_CFG_UNPOOLING_EN(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SRC_DMA_CFG_UNPOOLING_EN__SHIFT) & DPU_RDMA_RDMA_SRC_DMA_CFG_UNPOOLING_EN__MASK;
+}
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_HEIGHT__MASK 0x00000e00
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_HEIGHT__SHIFT 9
+static inline uint32_t DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_HEIGHT(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_HEIGHT__SHIFT) & DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_HEIGHT__MASK;
+}
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_WIDTH__MASK 0x000001c0
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_WIDTH__SHIFT 6
+static inline uint32_t DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_WIDTH(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_WIDTH__SHIFT) & DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_WIDTH__MASK;
+}
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_HEIGHT__MASK 0x00000038
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_HEIGHT__SHIFT 3
+static inline uint32_t DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_HEIGHT(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_HEIGHT__SHIFT) & DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_HEIGHT__MASK;
+}
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_WIDTH__MASK 0x00000007
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_WIDTH__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_WIDTH(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_WIDTH__SHIFT) & DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_WIDTH__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_SURF_NOTCH 0x0000504c
+#define DPU_RDMA_RDMA_SURF_NOTCH_SURF_NOTCH_ADDR__MASK 0xfffffff0
+#define DPU_RDMA_RDMA_SURF_NOTCH_SURF_NOTCH_ADDR__SHIFT 4
+static inline uint32_t DPU_RDMA_RDMA_SURF_NOTCH_SURF_NOTCH_ADDR(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SURF_NOTCH_SURF_NOTCH_ADDR__SHIFT) & DPU_RDMA_RDMA_SURF_NOTCH_SURF_NOTCH_ADDR__MASK;
+}
+#define DPU_RDMA_RDMA_SURF_NOTCH_RESERVED_0__MASK 0x0000000f
+#define DPU_RDMA_RDMA_SURF_NOTCH_RESERVED_0__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_SURF_NOTCH_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SURF_NOTCH_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_SURF_NOTCH_RESERVED_0__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_PAD_CFG 0x00005064
+#define DPU_RDMA_RDMA_PAD_CFG_PAD_VALUE__MASK 0xffff0000
+#define DPU_RDMA_RDMA_PAD_CFG_PAD_VALUE__SHIFT 16
+static inline uint32_t DPU_RDMA_RDMA_PAD_CFG_PAD_VALUE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_PAD_CFG_PAD_VALUE__SHIFT) & DPU_RDMA_RDMA_PAD_CFG_PAD_VALUE__MASK;
+}
+#define DPU_RDMA_RDMA_PAD_CFG_RESERVED_0__MASK 0x0000ff80
+#define DPU_RDMA_RDMA_PAD_CFG_RESERVED_0__SHIFT 7
+static inline uint32_t DPU_RDMA_RDMA_PAD_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_PAD_CFG_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_PAD_CFG_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_PAD_CFG_PAD_TOP__MASK 0x00000070
+#define DPU_RDMA_RDMA_PAD_CFG_PAD_TOP__SHIFT 4
+static inline uint32_t DPU_RDMA_RDMA_PAD_CFG_PAD_TOP(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_PAD_CFG_PAD_TOP__SHIFT) & DPU_RDMA_RDMA_PAD_CFG_PAD_TOP__MASK;
+}
+#define DPU_RDMA_RDMA_PAD_CFG_RESERVED_1__MASK 0x00000008
+#define DPU_RDMA_RDMA_PAD_CFG_RESERVED_1__SHIFT 3
+static inline uint32_t DPU_RDMA_RDMA_PAD_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_PAD_CFG_RESERVED_1__SHIFT) & DPU_RDMA_RDMA_PAD_CFG_RESERVED_1__MASK;
+}
+#define DPU_RDMA_RDMA_PAD_CFG_PAD_LEFT__MASK 0x00000007
+#define DPU_RDMA_RDMA_PAD_CFG_PAD_LEFT__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_PAD_CFG_PAD_LEFT(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_PAD_CFG_PAD_LEFT__SHIFT) & DPU_RDMA_RDMA_PAD_CFG_PAD_LEFT__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_WEIGHT 0x00005068
+#define DPU_RDMA_RDMA_WEIGHT_E_WEIGHT__MASK 0xff000000
+#define DPU_RDMA_RDMA_WEIGHT_E_WEIGHT__SHIFT 24
+static inline uint32_t DPU_RDMA_RDMA_WEIGHT_E_WEIGHT(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_WEIGHT_E_WEIGHT__SHIFT) & DPU_RDMA_RDMA_WEIGHT_E_WEIGHT__MASK;
+}
+#define DPU_RDMA_RDMA_WEIGHT_N_WEIGHT__MASK 0x00ff0000
+#define DPU_RDMA_RDMA_WEIGHT_N_WEIGHT__SHIFT 16
+static inline uint32_t DPU_RDMA_RDMA_WEIGHT_N_WEIGHT(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_WEIGHT_N_WEIGHT__SHIFT) & DPU_RDMA_RDMA_WEIGHT_N_WEIGHT__MASK;
+}
+#define DPU_RDMA_RDMA_WEIGHT_B_WEIGHT__MASK 0x0000ff00
+#define DPU_RDMA_RDMA_WEIGHT_B_WEIGHT__SHIFT 8
+static inline uint32_t DPU_RDMA_RDMA_WEIGHT_B_WEIGHT(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_WEIGHT_B_WEIGHT__SHIFT) & DPU_RDMA_RDMA_WEIGHT_B_WEIGHT__MASK;
+}
+#define DPU_RDMA_RDMA_WEIGHT_M_WEIGHT__MASK 0x000000ff
+#define DPU_RDMA_RDMA_WEIGHT_M_WEIGHT__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_WEIGHT_M_WEIGHT(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_WEIGHT_M_WEIGHT__SHIFT) & DPU_RDMA_RDMA_WEIGHT_M_WEIGHT__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_EW_SURF_NOTCH 0x0000506c
+#define DPU_RDMA_RDMA_EW_SURF_NOTCH_EW_SURF_NOTCH__MASK 0xfffffff0
+#define DPU_RDMA_RDMA_EW_SURF_NOTCH_EW_SURF_NOTCH__SHIFT 4
+static inline uint32_t DPU_RDMA_RDMA_EW_SURF_NOTCH_EW_SURF_NOTCH(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_EW_SURF_NOTCH_EW_SURF_NOTCH__SHIFT) & DPU_RDMA_RDMA_EW_SURF_NOTCH_EW_SURF_NOTCH__MASK;
+}
+#define DPU_RDMA_RDMA_EW_SURF_NOTCH_RESERVED_0__MASK 0x0000000f
+#define DPU_RDMA_RDMA_EW_SURF_NOTCH_RESERVED_0__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_EW_SURF_NOTCH_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_EW_SURF_NOTCH_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_EW_SURF_NOTCH_RESERVED_0__MASK;
+}
+
+#define REG_PPU_S_STATUS 0x00006000
+#define PPU_S_STATUS_RESERVED_0__MASK 0xfffc0000
+#define PPU_S_STATUS_RESERVED_0__SHIFT 18
+static inline uint32_t PPU_S_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_S_STATUS_RESERVED_0__SHIFT) & PPU_S_STATUS_RESERVED_0__MASK;
+}
+#define PPU_S_STATUS_STATUS_1__MASK 0x00030000
+#define PPU_S_STATUS_STATUS_1__SHIFT 16
+static inline uint32_t PPU_S_STATUS_STATUS_1(uint32_t val)
+{
+ return ((val) << PPU_S_STATUS_STATUS_1__SHIFT) & PPU_S_STATUS_STATUS_1__MASK;
+}
+#define PPU_S_STATUS_RESERVED_1__MASK 0x0000fffc
+#define PPU_S_STATUS_RESERVED_1__SHIFT 2
+static inline uint32_t PPU_S_STATUS_RESERVED_1(uint32_t val)
+{
+ return ((val) << PPU_S_STATUS_RESERVED_1__SHIFT) & PPU_S_STATUS_RESERVED_1__MASK;
+}
+#define PPU_S_STATUS_STATUS_0__MASK 0x00000003
+#define PPU_S_STATUS_STATUS_0__SHIFT 0
+static inline uint32_t PPU_S_STATUS_STATUS_0(uint32_t val)
+{
+ return ((val) << PPU_S_STATUS_STATUS_0__SHIFT) & PPU_S_STATUS_STATUS_0__MASK;
+}
+
+#define REG_PPU_S_POINTER 0x00006004
+#define PPU_S_POINTER_RESERVED_0__MASK 0xfffe0000
+#define PPU_S_POINTER_RESERVED_0__SHIFT 17
+static inline uint32_t PPU_S_POINTER_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_S_POINTER_RESERVED_0__SHIFT) & PPU_S_POINTER_RESERVED_0__MASK;
+}
+#define PPU_S_POINTER_EXECUTER__MASK 0x00010000
+#define PPU_S_POINTER_EXECUTER__SHIFT 16
+static inline uint32_t PPU_S_POINTER_EXECUTER(uint32_t val)
+{
+ return ((val) << PPU_S_POINTER_EXECUTER__SHIFT) & PPU_S_POINTER_EXECUTER__MASK;
+}
+#define PPU_S_POINTER_RESERVED_1__MASK 0x0000ffc0
+#define PPU_S_POINTER_RESERVED_1__SHIFT 6
+static inline uint32_t PPU_S_POINTER_RESERVED_1(uint32_t val)
+{
+ return ((val) << PPU_S_POINTER_RESERVED_1__SHIFT) & PPU_S_POINTER_RESERVED_1__MASK;
+}
+#define PPU_S_POINTER_EXECUTER_PP_CLEAR__MASK 0x00000020
+#define PPU_S_POINTER_EXECUTER_PP_CLEAR__SHIFT 5
+static inline uint32_t PPU_S_POINTER_EXECUTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << PPU_S_POINTER_EXECUTER_PP_CLEAR__SHIFT) & PPU_S_POINTER_EXECUTER_PP_CLEAR__MASK;
+}
+#define PPU_S_POINTER_POINTER_PP_CLEAR__MASK 0x00000010
+#define PPU_S_POINTER_POINTER_PP_CLEAR__SHIFT 4
+static inline uint32_t PPU_S_POINTER_POINTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << PPU_S_POINTER_POINTER_PP_CLEAR__SHIFT) & PPU_S_POINTER_POINTER_PP_CLEAR__MASK;
+}
+#define PPU_S_POINTER_POINTER_PP_MODE__MASK 0x00000008
+#define PPU_S_POINTER_POINTER_PP_MODE__SHIFT 3
+static inline uint32_t PPU_S_POINTER_POINTER_PP_MODE(uint32_t val)
+{
+ return ((val) << PPU_S_POINTER_POINTER_PP_MODE__SHIFT) & PPU_S_POINTER_POINTER_PP_MODE__MASK;
+}
+#define PPU_S_POINTER_EXECUTER_PP_EN__MASK 0x00000004
+#define PPU_S_POINTER_EXECUTER_PP_EN__SHIFT 2
+static inline uint32_t PPU_S_POINTER_EXECUTER_PP_EN(uint32_t val)
+{
+ return ((val) << PPU_S_POINTER_EXECUTER_PP_EN__SHIFT) & PPU_S_POINTER_EXECUTER_PP_EN__MASK;
+}
+#define PPU_S_POINTER_POINTER_PP_EN__MASK 0x00000002
+#define PPU_S_POINTER_POINTER_PP_EN__SHIFT 1
+static inline uint32_t PPU_S_POINTER_POINTER_PP_EN(uint32_t val)
+{
+ return ((val) << PPU_S_POINTER_POINTER_PP_EN__SHIFT) & PPU_S_POINTER_POINTER_PP_EN__MASK;
+}
+#define PPU_S_POINTER_POINTER__MASK 0x00000001
+#define PPU_S_POINTER_POINTER__SHIFT 0
+static inline uint32_t PPU_S_POINTER_POINTER(uint32_t val)
+{
+ return ((val) << PPU_S_POINTER_POINTER__SHIFT) & PPU_S_POINTER_POINTER__MASK;
+}
+
+#define REG_PPU_OPERATION_ENABLE 0x00006008
+#define PPU_OPERATION_ENABLE_RESERVED_0__MASK 0xfffffffe
+#define PPU_OPERATION_ENABLE_RESERVED_0__SHIFT 1
+static inline uint32_t PPU_OPERATION_ENABLE_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_ENABLE_RESERVED_0__SHIFT) & PPU_OPERATION_ENABLE_RESERVED_0__MASK;
+}
+#define PPU_OPERATION_ENABLE_OP_EN__MASK 0x00000001
+#define PPU_OPERATION_ENABLE_OP_EN__SHIFT 0
+static inline uint32_t PPU_OPERATION_ENABLE_OP_EN(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_ENABLE_OP_EN__SHIFT) & PPU_OPERATION_ENABLE_OP_EN__MASK;
+}
+
+#define REG_PPU_DATA_CUBE_IN_WIDTH 0x0000600c
+#define PPU_DATA_CUBE_IN_WIDTH_RESERVED_0__MASK 0xffffe000
+#define PPU_DATA_CUBE_IN_WIDTH_RESERVED_0__SHIFT 13
+static inline uint32_t PPU_DATA_CUBE_IN_WIDTH_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_IN_WIDTH_RESERVED_0__SHIFT) & PPU_DATA_CUBE_IN_WIDTH_RESERVED_0__MASK;
+}
+#define PPU_DATA_CUBE_IN_WIDTH_CUBE_IN_WIDTH__MASK 0x00001fff
+#define PPU_DATA_CUBE_IN_WIDTH_CUBE_IN_WIDTH__SHIFT 0
+static inline uint32_t PPU_DATA_CUBE_IN_WIDTH_CUBE_IN_WIDTH(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_IN_WIDTH_CUBE_IN_WIDTH__SHIFT) & PPU_DATA_CUBE_IN_WIDTH_CUBE_IN_WIDTH__MASK;
+}
+
+#define REG_PPU_DATA_CUBE_IN_HEIGHT 0x00006010
+#define PPU_DATA_CUBE_IN_HEIGHT_RESERVED_0__MASK 0xffffe000
+#define PPU_DATA_CUBE_IN_HEIGHT_RESERVED_0__SHIFT 13
+static inline uint32_t PPU_DATA_CUBE_IN_HEIGHT_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_IN_HEIGHT_RESERVED_0__SHIFT) & PPU_DATA_CUBE_IN_HEIGHT_RESERVED_0__MASK;
+}
+#define PPU_DATA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT__MASK 0x00001fff
+#define PPU_DATA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT__SHIFT 0
+static inline uint32_t PPU_DATA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT__SHIFT) & PPU_DATA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT__MASK;
+}
+
+#define REG_PPU_DATA_CUBE_IN_CHANNEL 0x00006014
+#define PPU_DATA_CUBE_IN_CHANNEL_RESERVED_0__MASK 0xffffe000
+#define PPU_DATA_CUBE_IN_CHANNEL_RESERVED_0__SHIFT 13
+static inline uint32_t PPU_DATA_CUBE_IN_CHANNEL_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_IN_CHANNEL_RESERVED_0__SHIFT) & PPU_DATA_CUBE_IN_CHANNEL_RESERVED_0__MASK;
+}
+#define PPU_DATA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL__MASK 0x00001fff
+#define PPU_DATA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL__SHIFT 0
+static inline uint32_t PPU_DATA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL__SHIFT) & PPU_DATA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL__MASK;
+}
+
+#define REG_PPU_DATA_CUBE_OUT_WIDTH 0x00006018
+#define PPU_DATA_CUBE_OUT_WIDTH_RESERVED_0__MASK 0xffffe000
+#define PPU_DATA_CUBE_OUT_WIDTH_RESERVED_0__SHIFT 13
+static inline uint32_t PPU_DATA_CUBE_OUT_WIDTH_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_OUT_WIDTH_RESERVED_0__SHIFT) & PPU_DATA_CUBE_OUT_WIDTH_RESERVED_0__MASK;
+}
+#define PPU_DATA_CUBE_OUT_WIDTH_CUBE_OUT_WIDTH__MASK 0x00001fff
+#define PPU_DATA_CUBE_OUT_WIDTH_CUBE_OUT_WIDTH__SHIFT 0
+static inline uint32_t PPU_DATA_CUBE_OUT_WIDTH_CUBE_OUT_WIDTH(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_OUT_WIDTH_CUBE_OUT_WIDTH__SHIFT) & PPU_DATA_CUBE_OUT_WIDTH_CUBE_OUT_WIDTH__MASK;
+}
+
+#define REG_PPU_DATA_CUBE_OUT_HEIGHT 0x0000601c
+#define PPU_DATA_CUBE_OUT_HEIGHT_RESERVED_0__MASK 0xffffe000
+#define PPU_DATA_CUBE_OUT_HEIGHT_RESERVED_0__SHIFT 13
+static inline uint32_t PPU_DATA_CUBE_OUT_HEIGHT_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_OUT_HEIGHT_RESERVED_0__SHIFT) & PPU_DATA_CUBE_OUT_HEIGHT_RESERVED_0__MASK;
+}
+#define PPU_DATA_CUBE_OUT_HEIGHT_CUBE_OUT_HEIGHT__MASK 0x00001fff
+#define PPU_DATA_CUBE_OUT_HEIGHT_CUBE_OUT_HEIGHT__SHIFT 0
+static inline uint32_t PPU_DATA_CUBE_OUT_HEIGHT_CUBE_OUT_HEIGHT(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_OUT_HEIGHT_CUBE_OUT_HEIGHT__SHIFT) & PPU_DATA_CUBE_OUT_HEIGHT_CUBE_OUT_HEIGHT__MASK;
+}
+
+#define REG_PPU_DATA_CUBE_OUT_CHANNEL 0x00006020
+#define PPU_DATA_CUBE_OUT_CHANNEL_RESERVED_0__MASK 0xffffe000
+#define PPU_DATA_CUBE_OUT_CHANNEL_RESERVED_0__SHIFT 13
+static inline uint32_t PPU_DATA_CUBE_OUT_CHANNEL_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_OUT_CHANNEL_RESERVED_0__SHIFT) & PPU_DATA_CUBE_OUT_CHANNEL_RESERVED_0__MASK;
+}
+#define PPU_DATA_CUBE_OUT_CHANNEL_CUBE_OUT_CHANNEL__MASK 0x00001fff
+#define PPU_DATA_CUBE_OUT_CHANNEL_CUBE_OUT_CHANNEL__SHIFT 0
+static inline uint32_t PPU_DATA_CUBE_OUT_CHANNEL_CUBE_OUT_CHANNEL(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_OUT_CHANNEL_CUBE_OUT_CHANNEL__SHIFT) & PPU_DATA_CUBE_OUT_CHANNEL_CUBE_OUT_CHANNEL__MASK;
+}
+
+#define REG_PPU_OPERATION_MODE_CFG 0x00006024
+#define PPU_OPERATION_MODE_CFG_RESERVED_0__MASK 0x80000000
+#define PPU_OPERATION_MODE_CFG_RESERVED_0__SHIFT 31
+static inline uint32_t PPU_OPERATION_MODE_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_MODE_CFG_RESERVED_0__SHIFT) & PPU_OPERATION_MODE_CFG_RESERVED_0__MASK;
+}
+#define PPU_OPERATION_MODE_CFG_INDEX_EN__MASK 0x40000000
+#define PPU_OPERATION_MODE_CFG_INDEX_EN__SHIFT 30
+static inline uint32_t PPU_OPERATION_MODE_CFG_INDEX_EN(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_MODE_CFG_INDEX_EN__SHIFT) & PPU_OPERATION_MODE_CFG_INDEX_EN__MASK;
+}
+#define PPU_OPERATION_MODE_CFG_RESERVED_1__MASK 0x20000000
+#define PPU_OPERATION_MODE_CFG_RESERVED_1__SHIFT 29
+static inline uint32_t PPU_OPERATION_MODE_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_MODE_CFG_RESERVED_1__SHIFT) & PPU_OPERATION_MODE_CFG_RESERVED_1__MASK;
+}
+#define PPU_OPERATION_MODE_CFG_NOTCH_ADDR__MASK 0x1fff0000
+#define PPU_OPERATION_MODE_CFG_NOTCH_ADDR__SHIFT 16
+static inline uint32_t PPU_OPERATION_MODE_CFG_NOTCH_ADDR(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_MODE_CFG_NOTCH_ADDR__SHIFT) & PPU_OPERATION_MODE_CFG_NOTCH_ADDR__MASK;
+}
+#define PPU_OPERATION_MODE_CFG_RESERVED_2__MASK 0x0000ff00
+#define PPU_OPERATION_MODE_CFG_RESERVED_2__SHIFT 8
+static inline uint32_t PPU_OPERATION_MODE_CFG_RESERVED_2(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_MODE_CFG_RESERVED_2__SHIFT) & PPU_OPERATION_MODE_CFG_RESERVED_2__MASK;
+}
+#define PPU_OPERATION_MODE_CFG_USE_CNT__MASK 0x000000e0
+#define PPU_OPERATION_MODE_CFG_USE_CNT__SHIFT 5
+static inline uint32_t PPU_OPERATION_MODE_CFG_USE_CNT(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_MODE_CFG_USE_CNT__SHIFT) & PPU_OPERATION_MODE_CFG_USE_CNT__MASK;
+}
+#define PPU_OPERATION_MODE_CFG_FLYING_MODE__MASK 0x00000010
+#define PPU_OPERATION_MODE_CFG_FLYING_MODE__SHIFT 4
+static inline uint32_t PPU_OPERATION_MODE_CFG_FLYING_MODE(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_MODE_CFG_FLYING_MODE__SHIFT) & PPU_OPERATION_MODE_CFG_FLYING_MODE__MASK;
+}
+#define PPU_OPERATION_MODE_CFG_RESERVED_3__MASK 0x0000000c
+#define PPU_OPERATION_MODE_CFG_RESERVED_3__SHIFT 2
+static inline uint32_t PPU_OPERATION_MODE_CFG_RESERVED_3(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_MODE_CFG_RESERVED_3__SHIFT) & PPU_OPERATION_MODE_CFG_RESERVED_3__MASK;
+}
+#define PPU_OPERATION_MODE_CFG_POOLING_METHOD__MASK 0x00000003
+#define PPU_OPERATION_MODE_CFG_POOLING_METHOD__SHIFT 0
+static inline uint32_t PPU_OPERATION_MODE_CFG_POOLING_METHOD(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_MODE_CFG_POOLING_METHOD__SHIFT) & PPU_OPERATION_MODE_CFG_POOLING_METHOD__MASK;
+}
+
+#define REG_PPU_POOLING_KERNEL_CFG 0x00006034
+#define PPU_POOLING_KERNEL_CFG_RESERVED_0__MASK 0xff000000
+#define PPU_POOLING_KERNEL_CFG_RESERVED_0__SHIFT 24
+static inline uint32_t PPU_POOLING_KERNEL_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_POOLING_KERNEL_CFG_RESERVED_0__SHIFT) & PPU_POOLING_KERNEL_CFG_RESERVED_0__MASK;
+}
+#define PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_HEIGHT__MASK 0x00f00000
+#define PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_HEIGHT__SHIFT 20
+static inline uint32_t PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_HEIGHT(uint32_t val)
+{
+ return ((val) << PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_HEIGHT__SHIFT) & PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_HEIGHT__MASK;
+}
+#define PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_WIDTH__MASK 0x000f0000
+#define PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_WIDTH__SHIFT 16
+static inline uint32_t PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_WIDTH(uint32_t val)
+{
+ return ((val) << PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_WIDTH__SHIFT) & PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_WIDTH__MASK;
+}
+#define PPU_POOLING_KERNEL_CFG_RESERVED_1__MASK 0x0000f000
+#define PPU_POOLING_KERNEL_CFG_RESERVED_1__SHIFT 12
+static inline uint32_t PPU_POOLING_KERNEL_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << PPU_POOLING_KERNEL_CFG_RESERVED_1__SHIFT) & PPU_POOLING_KERNEL_CFG_RESERVED_1__MASK;
+}
+#define PPU_POOLING_KERNEL_CFG_KERNEL_HEIGHT__MASK 0x00000f00
+#define PPU_POOLING_KERNEL_CFG_KERNEL_HEIGHT__SHIFT 8
+static inline uint32_t PPU_POOLING_KERNEL_CFG_KERNEL_HEIGHT(uint32_t val)
+{
+ return ((val) << PPU_POOLING_KERNEL_CFG_KERNEL_HEIGHT__SHIFT) & PPU_POOLING_KERNEL_CFG_KERNEL_HEIGHT__MASK;
+}
+#define PPU_POOLING_KERNEL_CFG_RESERVED_2__MASK 0x000000f0
+#define PPU_POOLING_KERNEL_CFG_RESERVED_2__SHIFT 4
+static inline uint32_t PPU_POOLING_KERNEL_CFG_RESERVED_2(uint32_t val)
+{
+ return ((val) << PPU_POOLING_KERNEL_CFG_RESERVED_2__SHIFT) & PPU_POOLING_KERNEL_CFG_RESERVED_2__MASK;
+}
+#define PPU_POOLING_KERNEL_CFG_KERNEL_WIDTH__MASK 0x0000000f
+#define PPU_POOLING_KERNEL_CFG_KERNEL_WIDTH__SHIFT 0
+static inline uint32_t PPU_POOLING_KERNEL_CFG_KERNEL_WIDTH(uint32_t val)
+{
+ return ((val) << PPU_POOLING_KERNEL_CFG_KERNEL_WIDTH__SHIFT) & PPU_POOLING_KERNEL_CFG_KERNEL_WIDTH__MASK;
+}
+
+#define REG_PPU_RECIP_KERNEL_WIDTH 0x00006038
+#define PPU_RECIP_KERNEL_WIDTH_RESERVED_0__MASK 0xfffe0000
+#define PPU_RECIP_KERNEL_WIDTH_RESERVED_0__SHIFT 17
+static inline uint32_t PPU_RECIP_KERNEL_WIDTH_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RECIP_KERNEL_WIDTH_RESERVED_0__SHIFT) & PPU_RECIP_KERNEL_WIDTH_RESERVED_0__MASK;
+}
+#define PPU_RECIP_KERNEL_WIDTH_RECIP_KERNEL_WIDTH__MASK 0x0001ffff
+#define PPU_RECIP_KERNEL_WIDTH_RECIP_KERNEL_WIDTH__SHIFT 0
+static inline uint32_t PPU_RECIP_KERNEL_WIDTH_RECIP_KERNEL_WIDTH(uint32_t val)
+{
+ return ((val) << PPU_RECIP_KERNEL_WIDTH_RECIP_KERNEL_WIDTH__SHIFT) & PPU_RECIP_KERNEL_WIDTH_RECIP_KERNEL_WIDTH__MASK;
+}
+
+#define REG_PPU_RECIP_KERNEL_HEIGHT 0x0000603c
+#define PPU_RECIP_KERNEL_HEIGHT_RESERVED_0__MASK 0xfffe0000
+#define PPU_RECIP_KERNEL_HEIGHT_RESERVED_0__SHIFT 17
+static inline uint32_t PPU_RECIP_KERNEL_HEIGHT_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RECIP_KERNEL_HEIGHT_RESERVED_0__SHIFT) & PPU_RECIP_KERNEL_HEIGHT_RESERVED_0__MASK;
+}
+#define PPU_RECIP_KERNEL_HEIGHT_RECIP_KERNEL_HEIGHT__MASK 0x0001ffff
+#define PPU_RECIP_KERNEL_HEIGHT_RECIP_KERNEL_HEIGHT__SHIFT 0
+static inline uint32_t PPU_RECIP_KERNEL_HEIGHT_RECIP_KERNEL_HEIGHT(uint32_t val)
+{
+ return ((val) << PPU_RECIP_KERNEL_HEIGHT_RECIP_KERNEL_HEIGHT__SHIFT) & PPU_RECIP_KERNEL_HEIGHT_RECIP_KERNEL_HEIGHT__MASK;
+}
+
+#define REG_PPU_POOLING_PADDING_CFG 0x00006040
+#define PPU_POOLING_PADDING_CFG_RESERVED_0__MASK 0xffff8000
+#define PPU_POOLING_PADDING_CFG_RESERVED_0__SHIFT 15
+static inline uint32_t PPU_POOLING_PADDING_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_POOLING_PADDING_CFG_RESERVED_0__SHIFT) & PPU_POOLING_PADDING_CFG_RESERVED_0__MASK;
+}
+#define PPU_POOLING_PADDING_CFG_PAD_BOTTOM__MASK 0x00007000
+#define PPU_POOLING_PADDING_CFG_PAD_BOTTOM__SHIFT 12
+static inline uint32_t PPU_POOLING_PADDING_CFG_PAD_BOTTOM(uint32_t val)
+{
+ return ((val) << PPU_POOLING_PADDING_CFG_PAD_BOTTOM__SHIFT) & PPU_POOLING_PADDING_CFG_PAD_BOTTOM__MASK;
+}
+#define PPU_POOLING_PADDING_CFG_RESERVED_1__MASK 0x00000800
+#define PPU_POOLING_PADDING_CFG_RESERVED_1__SHIFT 11
+static inline uint32_t PPU_POOLING_PADDING_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << PPU_POOLING_PADDING_CFG_RESERVED_1__SHIFT) & PPU_POOLING_PADDING_CFG_RESERVED_1__MASK;
+}
+#define PPU_POOLING_PADDING_CFG_PAD_RIGHT__MASK 0x00000700
+#define PPU_POOLING_PADDING_CFG_PAD_RIGHT__SHIFT 8
+static inline uint32_t PPU_POOLING_PADDING_CFG_PAD_RIGHT(uint32_t val)
+{
+ return ((val) << PPU_POOLING_PADDING_CFG_PAD_RIGHT__SHIFT) & PPU_POOLING_PADDING_CFG_PAD_RIGHT__MASK;
+}
+#define PPU_POOLING_PADDING_CFG_RESERVED_2__MASK 0x00000080
+#define PPU_POOLING_PADDING_CFG_RESERVED_2__SHIFT 7
+static inline uint32_t PPU_POOLING_PADDING_CFG_RESERVED_2(uint32_t val)
+{
+ return ((val) << PPU_POOLING_PADDING_CFG_RESERVED_2__SHIFT) & PPU_POOLING_PADDING_CFG_RESERVED_2__MASK;
+}
+#define PPU_POOLING_PADDING_CFG_PAD_TOP__MASK 0x00000070
+#define PPU_POOLING_PADDING_CFG_PAD_TOP__SHIFT 4
+static inline uint32_t PPU_POOLING_PADDING_CFG_PAD_TOP(uint32_t val)
+{
+ return ((val) << PPU_POOLING_PADDING_CFG_PAD_TOP__SHIFT) & PPU_POOLING_PADDING_CFG_PAD_TOP__MASK;
+}
+#define PPU_POOLING_PADDING_CFG_RESERVED_3__MASK 0x00000008
+#define PPU_POOLING_PADDING_CFG_RESERVED_3__SHIFT 3
+static inline uint32_t PPU_POOLING_PADDING_CFG_RESERVED_3(uint32_t val)
+{
+ return ((val) << PPU_POOLING_PADDING_CFG_RESERVED_3__SHIFT) & PPU_POOLING_PADDING_CFG_RESERVED_3__MASK;
+}
+#define PPU_POOLING_PADDING_CFG_PAD_LEFT__MASK 0x00000007
+#define PPU_POOLING_PADDING_CFG_PAD_LEFT__SHIFT 0
+static inline uint32_t PPU_POOLING_PADDING_CFG_PAD_LEFT(uint32_t val)
+{
+ return ((val) << PPU_POOLING_PADDING_CFG_PAD_LEFT__SHIFT) & PPU_POOLING_PADDING_CFG_PAD_LEFT__MASK;
+}
+
+#define REG_PPU_PADDING_VALUE_1_CFG 0x00006044
+#define PPU_PADDING_VALUE_1_CFG_PAD_VALUE_0__MASK 0xffffffff
+#define PPU_PADDING_VALUE_1_CFG_PAD_VALUE_0__SHIFT 0
+static inline uint32_t PPU_PADDING_VALUE_1_CFG_PAD_VALUE_0(uint32_t val)
+{
+ return ((val) << PPU_PADDING_VALUE_1_CFG_PAD_VALUE_0__SHIFT) & PPU_PADDING_VALUE_1_CFG_PAD_VALUE_0__MASK;
+}
+
+#define REG_PPU_PADDING_VALUE_2_CFG 0x00006048
+#define PPU_PADDING_VALUE_2_CFG_RESERVED_0__MASK 0xfffffff8
+#define PPU_PADDING_VALUE_2_CFG_RESERVED_0__SHIFT 3
+static inline uint32_t PPU_PADDING_VALUE_2_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_PADDING_VALUE_2_CFG_RESERVED_0__SHIFT) & PPU_PADDING_VALUE_2_CFG_RESERVED_0__MASK;
+}
+#define PPU_PADDING_VALUE_2_CFG_PAD_VALUE_1__MASK 0x00000007
+#define PPU_PADDING_VALUE_2_CFG_PAD_VALUE_1__SHIFT 0
+static inline uint32_t PPU_PADDING_VALUE_2_CFG_PAD_VALUE_1(uint32_t val)
+{
+ return ((val) << PPU_PADDING_VALUE_2_CFG_PAD_VALUE_1__SHIFT) & PPU_PADDING_VALUE_2_CFG_PAD_VALUE_1__MASK;
+}
+
+#define REG_PPU_DST_BASE_ADDR 0x00006070
+#define PPU_DST_BASE_ADDR_DST_BASE_ADDR__MASK 0xfffffff0
+#define PPU_DST_BASE_ADDR_DST_BASE_ADDR__SHIFT 4
+static inline uint32_t PPU_DST_BASE_ADDR_DST_BASE_ADDR(uint32_t val)
+{
+ return ((val) << PPU_DST_BASE_ADDR_DST_BASE_ADDR__SHIFT) & PPU_DST_BASE_ADDR_DST_BASE_ADDR__MASK;
+}
+#define PPU_DST_BASE_ADDR_RESERVED_0__MASK 0x0000000f
+#define PPU_DST_BASE_ADDR_RESERVED_0__SHIFT 0
+static inline uint32_t PPU_DST_BASE_ADDR_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_DST_BASE_ADDR_RESERVED_0__SHIFT) & PPU_DST_BASE_ADDR_RESERVED_0__MASK;
+}
+
+#define REG_PPU_DST_SURF_STRIDE 0x0000607c
+#define PPU_DST_SURF_STRIDE_DST_SURF_STRIDE__MASK 0xfffffff0
+#define PPU_DST_SURF_STRIDE_DST_SURF_STRIDE__SHIFT 4
+static inline uint32_t PPU_DST_SURF_STRIDE_DST_SURF_STRIDE(uint32_t val)
+{
+ return ((val) << PPU_DST_SURF_STRIDE_DST_SURF_STRIDE__SHIFT) & PPU_DST_SURF_STRIDE_DST_SURF_STRIDE__MASK;
+}
+#define PPU_DST_SURF_STRIDE_RESERVED_0__MASK 0x0000000f
+#define PPU_DST_SURF_STRIDE_RESERVED_0__SHIFT 0
+static inline uint32_t PPU_DST_SURF_STRIDE_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_DST_SURF_STRIDE_RESERVED_0__SHIFT) & PPU_DST_SURF_STRIDE_RESERVED_0__MASK;
+}
+
+#define REG_PPU_DATA_FORMAT 0x00006084
+#define PPU_DATA_FORMAT_INDEX_ADD__MASK 0xfffffff0
+#define PPU_DATA_FORMAT_INDEX_ADD__SHIFT 4
+static inline uint32_t PPU_DATA_FORMAT_INDEX_ADD(uint32_t val)
+{
+ return ((val) << PPU_DATA_FORMAT_INDEX_ADD__SHIFT) & PPU_DATA_FORMAT_INDEX_ADD__MASK;
+}
+#define PPU_DATA_FORMAT_DPU_FLYIN__MASK 0x00000008
+#define PPU_DATA_FORMAT_DPU_FLYIN__SHIFT 3
+static inline uint32_t PPU_DATA_FORMAT_DPU_FLYIN(uint32_t val)
+{
+ return ((val) << PPU_DATA_FORMAT_DPU_FLYIN__SHIFT) & PPU_DATA_FORMAT_DPU_FLYIN__MASK;
+}
+#define PPU_DATA_FORMAT_PROC_PRECISION__MASK 0x00000007
+#define PPU_DATA_FORMAT_PROC_PRECISION__SHIFT 0
+static inline uint32_t PPU_DATA_FORMAT_PROC_PRECISION(uint32_t val)
+{
+ return ((val) << PPU_DATA_FORMAT_PROC_PRECISION__SHIFT) & PPU_DATA_FORMAT_PROC_PRECISION__MASK;
+}
+
+#define REG_PPU_MISC_CTRL 0x000060dc
+#define PPU_MISC_CTRL_SURF_LEN__MASK 0xffff0000
+#define PPU_MISC_CTRL_SURF_LEN__SHIFT 16
+static inline uint32_t PPU_MISC_CTRL_SURF_LEN(uint32_t val)
+{
+ return ((val) << PPU_MISC_CTRL_SURF_LEN__SHIFT) & PPU_MISC_CTRL_SURF_LEN__MASK;
+}
+#define PPU_MISC_CTRL_RESERVED_0__MASK 0x0000fe00
+#define PPU_MISC_CTRL_RESERVED_0__SHIFT 9
+static inline uint32_t PPU_MISC_CTRL_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_MISC_CTRL_RESERVED_0__SHIFT) & PPU_MISC_CTRL_RESERVED_0__MASK;
+}
+#define PPU_MISC_CTRL_MC_SURF_OUT__MASK 0x00000100
+#define PPU_MISC_CTRL_MC_SURF_OUT__SHIFT 8
+static inline uint32_t PPU_MISC_CTRL_MC_SURF_OUT(uint32_t val)
+{
+ return ((val) << PPU_MISC_CTRL_MC_SURF_OUT__SHIFT) & PPU_MISC_CTRL_MC_SURF_OUT__MASK;
+}
+#define PPU_MISC_CTRL_NONALIGN__MASK 0x00000080
+#define PPU_MISC_CTRL_NONALIGN__SHIFT 7
+static inline uint32_t PPU_MISC_CTRL_NONALIGN(uint32_t val)
+{
+ return ((val) << PPU_MISC_CTRL_NONALIGN__SHIFT) & PPU_MISC_CTRL_NONALIGN__MASK;
+}
+#define PPU_MISC_CTRL_RESERVED_1__MASK 0x00000070
+#define PPU_MISC_CTRL_RESERVED_1__SHIFT 4
+static inline uint32_t PPU_MISC_CTRL_RESERVED_1(uint32_t val)
+{
+ return ((val) << PPU_MISC_CTRL_RESERVED_1__SHIFT) & PPU_MISC_CTRL_RESERVED_1__MASK;
+}
+#define PPU_MISC_CTRL_BURST_LEN__MASK 0x0000000f
+#define PPU_MISC_CTRL_BURST_LEN__SHIFT 0
+static inline uint32_t PPU_MISC_CTRL_BURST_LEN(uint32_t val)
+{
+ return ((val) << PPU_MISC_CTRL_BURST_LEN__SHIFT) & PPU_MISC_CTRL_BURST_LEN__MASK;
+}
+
+#define REG_PPU_RDMA_RDMA_S_STATUS 0x00007000
+#define PPU_RDMA_RDMA_S_STATUS_RESERVED_0__MASK 0xfffc0000
+#define PPU_RDMA_RDMA_S_STATUS_RESERVED_0__SHIFT 18
+static inline uint32_t PPU_RDMA_RDMA_S_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_STATUS_RESERVED_0__SHIFT) & PPU_RDMA_RDMA_S_STATUS_RESERVED_0__MASK;
+}
+#define PPU_RDMA_RDMA_S_STATUS_STATUS_1__MASK 0x00030000
+#define PPU_RDMA_RDMA_S_STATUS_STATUS_1__SHIFT 16
+static inline uint32_t PPU_RDMA_RDMA_S_STATUS_STATUS_1(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_STATUS_STATUS_1__SHIFT) & PPU_RDMA_RDMA_S_STATUS_STATUS_1__MASK;
+}
+#define PPU_RDMA_RDMA_S_STATUS_RESERVED_1__MASK 0x0000fffc
+#define PPU_RDMA_RDMA_S_STATUS_RESERVED_1__SHIFT 2
+static inline uint32_t PPU_RDMA_RDMA_S_STATUS_RESERVED_1(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_STATUS_RESERVED_1__SHIFT) & PPU_RDMA_RDMA_S_STATUS_RESERVED_1__MASK;
+}
+#define PPU_RDMA_RDMA_S_STATUS_STATUS_0__MASK 0x00000003
+#define PPU_RDMA_RDMA_S_STATUS_STATUS_0__SHIFT 0
+static inline uint32_t PPU_RDMA_RDMA_S_STATUS_STATUS_0(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_STATUS_STATUS_0__SHIFT) & PPU_RDMA_RDMA_S_STATUS_STATUS_0__MASK;
+}
+
+#define REG_PPU_RDMA_RDMA_S_POINTER 0x00007004
+#define PPU_RDMA_RDMA_S_POINTER_RESERVED_0__MASK 0xfffe0000
+#define PPU_RDMA_RDMA_S_POINTER_RESERVED_0__SHIFT 17
+static inline uint32_t PPU_RDMA_RDMA_S_POINTER_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_POINTER_RESERVED_0__SHIFT) & PPU_RDMA_RDMA_S_POINTER_RESERVED_0__MASK;
+}
+#define PPU_RDMA_RDMA_S_POINTER_EXECUTER__MASK 0x00010000
+#define PPU_RDMA_RDMA_S_POINTER_EXECUTER__SHIFT 16
+static inline uint32_t PPU_RDMA_RDMA_S_POINTER_EXECUTER(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_POINTER_EXECUTER__SHIFT) & PPU_RDMA_RDMA_S_POINTER_EXECUTER__MASK;
+}
+#define PPU_RDMA_RDMA_S_POINTER_RESERVED_1__MASK 0x0000ffc0
+#define PPU_RDMA_RDMA_S_POINTER_RESERVED_1__SHIFT 6
+static inline uint32_t PPU_RDMA_RDMA_S_POINTER_RESERVED_1(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_POINTER_RESERVED_1__SHIFT) & PPU_RDMA_RDMA_S_POINTER_RESERVED_1__MASK;
+}
+#define PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR__MASK 0x00000020
+#define PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR__SHIFT 5
+static inline uint32_t PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR__SHIFT) & PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR__MASK;
+}
+#define PPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR__MASK 0x00000010
+#define PPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR__SHIFT 4
+static inline uint32_t PPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR__SHIFT) & PPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR__MASK;
+}
+#define PPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE__MASK 0x00000008
+#define PPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE__SHIFT 3
+static inline uint32_t PPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE__SHIFT) & PPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE__MASK;
+}
+#define PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN__MASK 0x00000004
+#define PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN__SHIFT 2
+static inline uint32_t PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN__SHIFT) & PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN__MASK;
+}
+#define PPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN__MASK 0x00000002
+#define PPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN__SHIFT 1
+static inline uint32_t PPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN__SHIFT) & PPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN__MASK;
+}
+#define PPU_RDMA_RDMA_S_POINTER_POINTER__MASK 0x00000001
+#define PPU_RDMA_RDMA_S_POINTER_POINTER__SHIFT 0
+static inline uint32_t PPU_RDMA_RDMA_S_POINTER_POINTER(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_POINTER_POINTER__SHIFT) & PPU_RDMA_RDMA_S_POINTER_POINTER__MASK;
+}
+
+#define REG_PPU_RDMA_RDMA_OPERATION_ENABLE 0x00007008
+#define PPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0__MASK 0xfffffffe
+#define PPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0__SHIFT 1
+static inline uint32_t PPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0__SHIFT) & PPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0__MASK;
+}
+#define PPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN__MASK 0x00000001
+#define PPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN__SHIFT 0
+static inline uint32_t PPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN__SHIFT) & PPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN__MASK;
+}
+
+#define REG_PPU_RDMA_RDMA_CUBE_IN_WIDTH 0x0000700c
+#define PPU_RDMA_RDMA_CUBE_IN_WIDTH_RESERVED_0__MASK 0xffffe000
+#define PPU_RDMA_RDMA_CUBE_IN_WIDTH_RESERVED_0__SHIFT 13
+static inline uint32_t PPU_RDMA_RDMA_CUBE_IN_WIDTH_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_CUBE_IN_WIDTH_RESERVED_0__SHIFT) & PPU_RDMA_RDMA_CUBE_IN_WIDTH_RESERVED_0__MASK;
+}
+#define PPU_RDMA_RDMA_CUBE_IN_WIDTH_CUBE_IN_WIDTH__MASK 0x00001fff
+#define PPU_RDMA_RDMA_CUBE_IN_WIDTH_CUBE_IN_WIDTH__SHIFT 0
+static inline uint32_t PPU_RDMA_RDMA_CUBE_IN_WIDTH_CUBE_IN_WIDTH(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_CUBE_IN_WIDTH_CUBE_IN_WIDTH__SHIFT) & PPU_RDMA_RDMA_CUBE_IN_WIDTH_CUBE_IN_WIDTH__MASK;
+}
+
+#define REG_PPU_RDMA_RDMA_CUBE_IN_HEIGHT 0x00007010
+#define PPU_RDMA_RDMA_CUBE_IN_HEIGHT_RESERVED_0__MASK 0xffffe000
+#define PPU_RDMA_RDMA_CUBE_IN_HEIGHT_RESERVED_0__SHIFT 13
+static inline uint32_t PPU_RDMA_RDMA_CUBE_IN_HEIGHT_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_CUBE_IN_HEIGHT_RESERVED_0__SHIFT) & PPU_RDMA_RDMA_CUBE_IN_HEIGHT_RESERVED_0__MASK;
+}
+#define PPU_RDMA_RDMA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT__MASK 0x00001fff
+#define PPU_RDMA_RDMA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT__SHIFT 0
+static inline uint32_t PPU_RDMA_RDMA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT__SHIFT) & PPU_RDMA_RDMA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT__MASK;
+}
+
+#define REG_PPU_RDMA_RDMA_CUBE_IN_CHANNEL 0x00007014
+#define PPU_RDMA_RDMA_CUBE_IN_CHANNEL_RESERVED_0__MASK 0xffffe000
+#define PPU_RDMA_RDMA_CUBE_IN_CHANNEL_RESERVED_0__SHIFT 13
+static inline uint32_t PPU_RDMA_RDMA_CUBE_IN_CHANNEL_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_CUBE_IN_CHANNEL_RESERVED_0__SHIFT) & PPU_RDMA_RDMA_CUBE_IN_CHANNEL_RESERVED_0__MASK;
+}
+#define PPU_RDMA_RDMA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL__MASK 0x00001fff
+#define PPU_RDMA_RDMA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL__SHIFT 0
+static inline uint32_t PPU_RDMA_RDMA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL__SHIFT) & PPU_RDMA_RDMA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL__MASK;
+}
+
+#define REG_PPU_RDMA_RDMA_SRC_BASE_ADDR 0x0000701c
+#define PPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR__MASK 0xffffffff
+#define PPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR__SHIFT 0
+static inline uint32_t PPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR__SHIFT) & PPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR__MASK;
+}
+
+#define REG_PPU_RDMA_RDMA_SRC_LINE_STRIDE 0x00007024
+#define PPU_RDMA_RDMA_SRC_LINE_STRIDE_SRC_LINE_STRIDE__MASK 0xfffffff0
+#define PPU_RDMA_RDMA_SRC_LINE_STRIDE_SRC_LINE_STRIDE__SHIFT 4
+static inline uint32_t PPU_RDMA_RDMA_SRC_LINE_STRIDE_SRC_LINE_STRIDE(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_SRC_LINE_STRIDE_SRC_LINE_STRIDE__SHIFT) & PPU_RDMA_RDMA_SRC_LINE_STRIDE_SRC_LINE_STRIDE__MASK;
+}
+#define PPU_RDMA_RDMA_SRC_LINE_STRIDE_RESERVED_0__MASK 0x0000000f
+#define PPU_RDMA_RDMA_SRC_LINE_STRIDE_RESERVED_0__SHIFT 0
+static inline uint32_t PPU_RDMA_RDMA_SRC_LINE_STRIDE_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_SRC_LINE_STRIDE_RESERVED_0__SHIFT) & PPU_RDMA_RDMA_SRC_LINE_STRIDE_RESERVED_0__MASK;
+}
+
+#define REG_PPU_RDMA_RDMA_SRC_SURF_STRIDE 0x00007028
+#define PPU_RDMA_RDMA_SRC_SURF_STRIDE_SRC_SURF_STRIDE__MASK 0xfffffff0
+#define PPU_RDMA_RDMA_SRC_SURF_STRIDE_SRC_SURF_STRIDE__SHIFT 4
+static inline uint32_t PPU_RDMA_RDMA_SRC_SURF_STRIDE_SRC_SURF_STRIDE(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_SRC_SURF_STRIDE_SRC_SURF_STRIDE__SHIFT) & PPU_RDMA_RDMA_SRC_SURF_STRIDE_SRC_SURF_STRIDE__MASK;
+}
+#define PPU_RDMA_RDMA_SRC_SURF_STRIDE_RESERVED_0__MASK 0x0000000f
+#define PPU_RDMA_RDMA_SRC_SURF_STRIDE_RESERVED_0__SHIFT 0
+static inline uint32_t PPU_RDMA_RDMA_SRC_SURF_STRIDE_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_SRC_SURF_STRIDE_RESERVED_0__SHIFT) & PPU_RDMA_RDMA_SRC_SURF_STRIDE_RESERVED_0__MASK;
+}
+
+#define REG_PPU_RDMA_RDMA_DATA_FORMAT 0x00007030
+#define PPU_RDMA_RDMA_DATA_FORMAT_RESERVED_0__MASK 0xfffffffc
+#define PPU_RDMA_RDMA_DATA_FORMAT_RESERVED_0__SHIFT 2
+static inline uint32_t PPU_RDMA_RDMA_DATA_FORMAT_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_DATA_FORMAT_RESERVED_0__SHIFT) & PPU_RDMA_RDMA_DATA_FORMAT_RESERVED_0__MASK;
+}
+#define PPU_RDMA_RDMA_DATA_FORMAT_IN_PRECISION__MASK 0x00000003
+#define PPU_RDMA_RDMA_DATA_FORMAT_IN_PRECISION__SHIFT 0
+static inline uint32_t PPU_RDMA_RDMA_DATA_FORMAT_IN_PRECISION(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_DATA_FORMAT_IN_PRECISION__SHIFT) & PPU_RDMA_RDMA_DATA_FORMAT_IN_PRECISION__MASK;
+}
+
+#define REG_DDMA_CFG_OUTSTANDING 0x00008000
+#define DDMA_CFG_OUTSTANDING_RESERVED_0__MASK 0xffff0000
+#define DDMA_CFG_OUTSTANDING_RESERVED_0__SHIFT 16
+static inline uint32_t DDMA_CFG_OUTSTANDING_RESERVED_0(uint32_t val)
+{
+ return ((val) << DDMA_CFG_OUTSTANDING_RESERVED_0__SHIFT) & DDMA_CFG_OUTSTANDING_RESERVED_0__MASK;
+}
+#define DDMA_CFG_OUTSTANDING_WR_OS_CNT__MASK 0x0000ff00
+#define DDMA_CFG_OUTSTANDING_WR_OS_CNT__SHIFT 8
+static inline uint32_t DDMA_CFG_OUTSTANDING_WR_OS_CNT(uint32_t val)
+{
+ return ((val) << DDMA_CFG_OUTSTANDING_WR_OS_CNT__SHIFT) & DDMA_CFG_OUTSTANDING_WR_OS_CNT__MASK;
+}
+#define DDMA_CFG_OUTSTANDING_RD_OS_CNT__MASK 0x000000ff
+#define DDMA_CFG_OUTSTANDING_RD_OS_CNT__SHIFT 0
+static inline uint32_t DDMA_CFG_OUTSTANDING_RD_OS_CNT(uint32_t val)
+{
+ return ((val) << DDMA_CFG_OUTSTANDING_RD_OS_CNT__SHIFT) & DDMA_CFG_OUTSTANDING_RD_OS_CNT__MASK;
+}
+
+#define REG_DDMA_RD_WEIGHT_0 0x00008004
+#define DDMA_RD_WEIGHT_0_RD_WEIGHT_PDP__MASK 0xff000000
+#define DDMA_RD_WEIGHT_0_RD_WEIGHT_PDP__SHIFT 24
+static inline uint32_t DDMA_RD_WEIGHT_0_RD_WEIGHT_PDP(uint32_t val)
+{
+ return ((val) << DDMA_RD_WEIGHT_0_RD_WEIGHT_PDP__SHIFT) & DDMA_RD_WEIGHT_0_RD_WEIGHT_PDP__MASK;
+}
+#define DDMA_RD_WEIGHT_0_RD_WEIGHT_DPU__MASK 0x00ff0000
+#define DDMA_RD_WEIGHT_0_RD_WEIGHT_DPU__SHIFT 16
+static inline uint32_t DDMA_RD_WEIGHT_0_RD_WEIGHT_DPU(uint32_t val)
+{
+ return ((val) << DDMA_RD_WEIGHT_0_RD_WEIGHT_DPU__SHIFT) & DDMA_RD_WEIGHT_0_RD_WEIGHT_DPU__MASK;
+}
+#define DDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL__MASK 0x0000ff00
+#define DDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL__SHIFT 8
+static inline uint32_t DDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL(uint32_t val)
+{
+ return ((val) << DDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL__SHIFT) & DDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL__MASK;
+}
+#define DDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE__MASK 0x000000ff
+#define DDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE__SHIFT 0
+static inline uint32_t DDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE(uint32_t val)
+{
+ return ((val) << DDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE__SHIFT) & DDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE__MASK;
+}
+
+#define REG_DDMA_WR_WEIGHT_0 0x00008008
+#define DDMA_WR_WEIGHT_0_RESERVED_0__MASK 0xffff0000
+#define DDMA_WR_WEIGHT_0_RESERVED_0__SHIFT 16
+static inline uint32_t DDMA_WR_WEIGHT_0_RESERVED_0(uint32_t val)
+{
+ return ((val) << DDMA_WR_WEIGHT_0_RESERVED_0__SHIFT) & DDMA_WR_WEIGHT_0_RESERVED_0__MASK;
+}
+#define DDMA_WR_WEIGHT_0_WR_WEIGHT_PDP__MASK 0x0000ff00
+#define DDMA_WR_WEIGHT_0_WR_WEIGHT_PDP__SHIFT 8
+static inline uint32_t DDMA_WR_WEIGHT_0_WR_WEIGHT_PDP(uint32_t val)
+{
+ return ((val) << DDMA_WR_WEIGHT_0_WR_WEIGHT_PDP__SHIFT) & DDMA_WR_WEIGHT_0_WR_WEIGHT_PDP__MASK;
+}
+#define DDMA_WR_WEIGHT_0_WR_WEIGHT_DPU__MASK 0x000000ff
+#define DDMA_WR_WEIGHT_0_WR_WEIGHT_DPU__SHIFT 0
+static inline uint32_t DDMA_WR_WEIGHT_0_WR_WEIGHT_DPU(uint32_t val)
+{
+ return ((val) << DDMA_WR_WEIGHT_0_WR_WEIGHT_DPU__SHIFT) & DDMA_WR_WEIGHT_0_WR_WEIGHT_DPU__MASK;
+}
+
+#define REG_DDMA_CFG_ID_ERROR 0x0000800c
+#define DDMA_CFG_ID_ERROR_RESERVED_0__MASK 0xfffffc00
+#define DDMA_CFG_ID_ERROR_RESERVED_0__SHIFT 10
+static inline uint32_t DDMA_CFG_ID_ERROR_RESERVED_0(uint32_t val)
+{
+ return ((val) << DDMA_CFG_ID_ERROR_RESERVED_0__SHIFT) & DDMA_CFG_ID_ERROR_RESERVED_0__MASK;
+}
+#define DDMA_CFG_ID_ERROR_WR_RESP_ID__MASK 0x000003c0
+#define DDMA_CFG_ID_ERROR_WR_RESP_ID__SHIFT 6
+static inline uint32_t DDMA_CFG_ID_ERROR_WR_RESP_ID(uint32_t val)
+{
+ return ((val) << DDMA_CFG_ID_ERROR_WR_RESP_ID__SHIFT) & DDMA_CFG_ID_ERROR_WR_RESP_ID__MASK;
+}
+#define DDMA_CFG_ID_ERROR_RESERVED_1__MASK 0x00000020
+#define DDMA_CFG_ID_ERROR_RESERVED_1__SHIFT 5
+static inline uint32_t DDMA_CFG_ID_ERROR_RESERVED_1(uint32_t val)
+{
+ return ((val) << DDMA_CFG_ID_ERROR_RESERVED_1__SHIFT) & DDMA_CFG_ID_ERROR_RESERVED_1__MASK;
+}
+#define DDMA_CFG_ID_ERROR_RD_RESP_ID__MASK 0x0000001f
+#define DDMA_CFG_ID_ERROR_RD_RESP_ID__SHIFT 0
+static inline uint32_t DDMA_CFG_ID_ERROR_RD_RESP_ID(uint32_t val)
+{
+ return ((val) << DDMA_CFG_ID_ERROR_RD_RESP_ID__SHIFT) & DDMA_CFG_ID_ERROR_RD_RESP_ID__MASK;
+}
+
+#define REG_DDMA_RD_WEIGHT_1 0x00008010
+#define DDMA_RD_WEIGHT_1_RESERVED_0__MASK 0xffffff00
+#define DDMA_RD_WEIGHT_1_RESERVED_0__SHIFT 8
+static inline uint32_t DDMA_RD_WEIGHT_1_RESERVED_0(uint32_t val)
+{
+ return ((val) << DDMA_RD_WEIGHT_1_RESERVED_0__SHIFT) & DDMA_RD_WEIGHT_1_RESERVED_0__MASK;
+}
+#define DDMA_RD_WEIGHT_1_RD_WEIGHT_PC__MASK 0x000000ff
+#define DDMA_RD_WEIGHT_1_RD_WEIGHT_PC__SHIFT 0
+static inline uint32_t DDMA_RD_WEIGHT_1_RD_WEIGHT_PC(uint32_t val)
+{
+ return ((val) << DDMA_RD_WEIGHT_1_RD_WEIGHT_PC__SHIFT) & DDMA_RD_WEIGHT_1_RD_WEIGHT_PC__MASK;
+}
+
+#define REG_DDMA_CFG_DMA_FIFO_CLR 0x00008014
+#define DDMA_CFG_DMA_FIFO_CLR_RESERVED_0__MASK 0xfffffffe
+#define DDMA_CFG_DMA_FIFO_CLR_RESERVED_0__SHIFT 1
+static inline uint32_t DDMA_CFG_DMA_FIFO_CLR_RESERVED_0(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_FIFO_CLR_RESERVED_0__SHIFT) & DDMA_CFG_DMA_FIFO_CLR_RESERVED_0__MASK;
+}
+#define DDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR__MASK 0x00000001
+#define DDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR__SHIFT 0
+static inline uint32_t DDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR__SHIFT) & DDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR__MASK;
+}
+
+#define REG_DDMA_CFG_DMA_ARB 0x00008018
+#define DDMA_CFG_DMA_ARB_RESERVED_0__MASK 0xfffffc00
+#define DDMA_CFG_DMA_ARB_RESERVED_0__SHIFT 10
+static inline uint32_t DDMA_CFG_DMA_ARB_RESERVED_0(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_ARB_RESERVED_0__SHIFT) & DDMA_CFG_DMA_ARB_RESERVED_0__MASK;
+}
+#define DDMA_CFG_DMA_ARB_WR_ARBIT_MODEL__MASK 0x00000200
+#define DDMA_CFG_DMA_ARB_WR_ARBIT_MODEL__SHIFT 9
+static inline uint32_t DDMA_CFG_DMA_ARB_WR_ARBIT_MODEL(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_ARB_WR_ARBIT_MODEL__SHIFT) & DDMA_CFG_DMA_ARB_WR_ARBIT_MODEL__MASK;
+}
+#define DDMA_CFG_DMA_ARB_RD_ARBIT_MODEL__MASK 0x00000100
+#define DDMA_CFG_DMA_ARB_RD_ARBIT_MODEL__SHIFT 8
+static inline uint32_t DDMA_CFG_DMA_ARB_RD_ARBIT_MODEL(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_ARB_RD_ARBIT_MODEL__SHIFT) & DDMA_CFG_DMA_ARB_RD_ARBIT_MODEL__MASK;
+}
+#define DDMA_CFG_DMA_ARB_RESERVED_1__MASK 0x00000080
+#define DDMA_CFG_DMA_ARB_RESERVED_1__SHIFT 7
+static inline uint32_t DDMA_CFG_DMA_ARB_RESERVED_1(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_ARB_RESERVED_1__SHIFT) & DDMA_CFG_DMA_ARB_RESERVED_1__MASK;
+}
+#define DDMA_CFG_DMA_ARB_WR_FIX_ARB__MASK 0x00000070
+#define DDMA_CFG_DMA_ARB_WR_FIX_ARB__SHIFT 4
+static inline uint32_t DDMA_CFG_DMA_ARB_WR_FIX_ARB(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_ARB_WR_FIX_ARB__SHIFT) & DDMA_CFG_DMA_ARB_WR_FIX_ARB__MASK;
+}
+#define DDMA_CFG_DMA_ARB_RESERVED_2__MASK 0x00000008
+#define DDMA_CFG_DMA_ARB_RESERVED_2__SHIFT 3
+static inline uint32_t DDMA_CFG_DMA_ARB_RESERVED_2(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_ARB_RESERVED_2__SHIFT) & DDMA_CFG_DMA_ARB_RESERVED_2__MASK;
+}
+#define DDMA_CFG_DMA_ARB_RD_FIX_ARB__MASK 0x00000007
+#define DDMA_CFG_DMA_ARB_RD_FIX_ARB__SHIFT 0
+static inline uint32_t DDMA_CFG_DMA_ARB_RD_FIX_ARB(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_ARB_RD_FIX_ARB__SHIFT) & DDMA_CFG_DMA_ARB_RD_FIX_ARB__MASK;
+}
+
+#define REG_DDMA_CFG_DMA_RD_QOS 0x00008020
+#define DDMA_CFG_DMA_RD_QOS_RESERVED_0__MASK 0xfffffc00
+#define DDMA_CFG_DMA_RD_QOS_RESERVED_0__SHIFT 10
+static inline uint32_t DDMA_CFG_DMA_RD_QOS_RESERVED_0(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_QOS_RESERVED_0__SHIFT) & DDMA_CFG_DMA_RD_QOS_RESERVED_0__MASK;
+}
+#define DDMA_CFG_DMA_RD_QOS_RD_PC_QOS__MASK 0x00000300
+#define DDMA_CFG_DMA_RD_QOS_RD_PC_QOS__SHIFT 8
+static inline uint32_t DDMA_CFG_DMA_RD_QOS_RD_PC_QOS(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_QOS_RD_PC_QOS__SHIFT) & DDMA_CFG_DMA_RD_QOS_RD_PC_QOS__MASK;
+}
+#define DDMA_CFG_DMA_RD_QOS_RD_PPU_QOS__MASK 0x000000c0
+#define DDMA_CFG_DMA_RD_QOS_RD_PPU_QOS__SHIFT 6
+static inline uint32_t DDMA_CFG_DMA_RD_QOS_RD_PPU_QOS(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_QOS_RD_PPU_QOS__SHIFT) & DDMA_CFG_DMA_RD_QOS_RD_PPU_QOS__MASK;
+}
+#define DDMA_CFG_DMA_RD_QOS_RD_DPU_QOS__MASK 0x00000030
+#define DDMA_CFG_DMA_RD_QOS_RD_DPU_QOS__SHIFT 4
+static inline uint32_t DDMA_CFG_DMA_RD_QOS_RD_DPU_QOS(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_QOS_RD_DPU_QOS__SHIFT) & DDMA_CFG_DMA_RD_QOS_RD_DPU_QOS__MASK;
+}
+#define DDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS__MASK 0x0000000c
+#define DDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS__SHIFT 2
+static inline uint32_t DDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS__SHIFT) & DDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS__MASK;
+}
+#define DDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS__MASK 0x00000003
+#define DDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS__SHIFT 0
+static inline uint32_t DDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS__SHIFT) & DDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS__MASK;
+}
+
+#define REG_DDMA_CFG_DMA_RD_CFG 0x00008024
+#define DDMA_CFG_DMA_RD_CFG_RESERVED_0__MASK 0xffffe000
+#define DDMA_CFG_DMA_RD_CFG_RESERVED_0__SHIFT 13
+static inline uint32_t DDMA_CFG_DMA_RD_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_CFG_RESERVED_0__SHIFT) & DDMA_CFG_DMA_RD_CFG_RESERVED_0__MASK;
+}
+#define DDMA_CFG_DMA_RD_CFG_RD_ARLOCK__MASK 0x00001000
+#define DDMA_CFG_DMA_RD_CFG_RD_ARLOCK__SHIFT 12
+static inline uint32_t DDMA_CFG_DMA_RD_CFG_RD_ARLOCK(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_CFG_RD_ARLOCK__SHIFT) & DDMA_CFG_DMA_RD_CFG_RD_ARLOCK__MASK;
+}
+#define DDMA_CFG_DMA_RD_CFG_RD_ARCACHE__MASK 0x00000f00
+#define DDMA_CFG_DMA_RD_CFG_RD_ARCACHE__SHIFT 8
+static inline uint32_t DDMA_CFG_DMA_RD_CFG_RD_ARCACHE(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_CFG_RD_ARCACHE__SHIFT) & DDMA_CFG_DMA_RD_CFG_RD_ARCACHE__MASK;
+}
+#define DDMA_CFG_DMA_RD_CFG_RD_ARPROT__MASK 0x000000e0
+#define DDMA_CFG_DMA_RD_CFG_RD_ARPROT__SHIFT 5
+static inline uint32_t DDMA_CFG_DMA_RD_CFG_RD_ARPROT(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_CFG_RD_ARPROT__SHIFT) & DDMA_CFG_DMA_RD_CFG_RD_ARPROT__MASK;
+}
+#define DDMA_CFG_DMA_RD_CFG_RD_ARBURST__MASK 0x00000018
+#define DDMA_CFG_DMA_RD_CFG_RD_ARBURST__SHIFT 3
+static inline uint32_t DDMA_CFG_DMA_RD_CFG_RD_ARBURST(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_CFG_RD_ARBURST__SHIFT) & DDMA_CFG_DMA_RD_CFG_RD_ARBURST__MASK;
+}
+#define DDMA_CFG_DMA_RD_CFG_RD_ARSIZE__MASK 0x00000007
+#define DDMA_CFG_DMA_RD_CFG_RD_ARSIZE__SHIFT 0
+static inline uint32_t DDMA_CFG_DMA_RD_CFG_RD_ARSIZE(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_CFG_RD_ARSIZE__SHIFT) & DDMA_CFG_DMA_RD_CFG_RD_ARSIZE__MASK;
+}
+
+#define REG_DDMA_CFG_DMA_WR_CFG 0x00008028
+#define DDMA_CFG_DMA_WR_CFG_RESERVED_0__MASK 0xffffe000
+#define DDMA_CFG_DMA_WR_CFG_RESERVED_0__SHIFT 13
+static inline uint32_t DDMA_CFG_DMA_WR_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_WR_CFG_RESERVED_0__SHIFT) & DDMA_CFG_DMA_WR_CFG_RESERVED_0__MASK;
+}
+#define DDMA_CFG_DMA_WR_CFG_WR_AWLOCK__MASK 0x00001000
+#define DDMA_CFG_DMA_WR_CFG_WR_AWLOCK__SHIFT 12
+static inline uint32_t DDMA_CFG_DMA_WR_CFG_WR_AWLOCK(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_WR_CFG_WR_AWLOCK__SHIFT) & DDMA_CFG_DMA_WR_CFG_WR_AWLOCK__MASK;
+}
+#define DDMA_CFG_DMA_WR_CFG_WR_AWCACHE__MASK 0x00000f00
+#define DDMA_CFG_DMA_WR_CFG_WR_AWCACHE__SHIFT 8
+static inline uint32_t DDMA_CFG_DMA_WR_CFG_WR_AWCACHE(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_WR_CFG_WR_AWCACHE__SHIFT) & DDMA_CFG_DMA_WR_CFG_WR_AWCACHE__MASK;
+}
+#define DDMA_CFG_DMA_WR_CFG_WR_AWPROT__MASK 0x000000e0
+#define DDMA_CFG_DMA_WR_CFG_WR_AWPROT__SHIFT 5
+static inline uint32_t DDMA_CFG_DMA_WR_CFG_WR_AWPROT(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_WR_CFG_WR_AWPROT__SHIFT) & DDMA_CFG_DMA_WR_CFG_WR_AWPROT__MASK;
+}
+#define DDMA_CFG_DMA_WR_CFG_WR_AWBURST__MASK 0x00000018
+#define DDMA_CFG_DMA_WR_CFG_WR_AWBURST__SHIFT 3
+static inline uint32_t DDMA_CFG_DMA_WR_CFG_WR_AWBURST(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_WR_CFG_WR_AWBURST__SHIFT) & DDMA_CFG_DMA_WR_CFG_WR_AWBURST__MASK;
+}
+#define DDMA_CFG_DMA_WR_CFG_WR_AWSIZE__MASK 0x00000007
+#define DDMA_CFG_DMA_WR_CFG_WR_AWSIZE__SHIFT 0
+static inline uint32_t DDMA_CFG_DMA_WR_CFG_WR_AWSIZE(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_WR_CFG_WR_AWSIZE__SHIFT) & DDMA_CFG_DMA_WR_CFG_WR_AWSIZE__MASK;
+}
+
+#define REG_DDMA_CFG_DMA_WSTRB 0x0000802c
+#define DDMA_CFG_DMA_WSTRB_WR_WSTRB__MASK 0xffffffff
+#define DDMA_CFG_DMA_WSTRB_WR_WSTRB__SHIFT 0
+static inline uint32_t DDMA_CFG_DMA_WSTRB_WR_WSTRB(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_WSTRB_WR_WSTRB__SHIFT) & DDMA_CFG_DMA_WSTRB_WR_WSTRB__MASK;
+}
+
+#define REG_DDMA_CFG_STATUS 0x00008030
+#define DDMA_CFG_STATUS_RESERVED_0__MASK 0xfffffe00
+#define DDMA_CFG_STATUS_RESERVED_0__SHIFT 9
+static inline uint32_t DDMA_CFG_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << DDMA_CFG_STATUS_RESERVED_0__SHIFT) & DDMA_CFG_STATUS_RESERVED_0__MASK;
+}
+#define DDMA_CFG_STATUS_IDEL__MASK 0x00000100
+#define DDMA_CFG_STATUS_IDEL__SHIFT 8
+static inline uint32_t DDMA_CFG_STATUS_IDEL(uint32_t val)
+{
+ return ((val) << DDMA_CFG_STATUS_IDEL__SHIFT) & DDMA_CFG_STATUS_IDEL__MASK;
+}
+#define DDMA_CFG_STATUS_RESERVED_1__MASK 0x000000ff
+#define DDMA_CFG_STATUS_RESERVED_1__SHIFT 0
+static inline uint32_t DDMA_CFG_STATUS_RESERVED_1(uint32_t val)
+{
+ return ((val) << DDMA_CFG_STATUS_RESERVED_1__SHIFT) & DDMA_CFG_STATUS_RESERVED_1__MASK;
+}
+
+#define REG_SDMA_CFG_OUTSTANDING 0x00009000
+#define SDMA_CFG_OUTSTANDING_RESERVED_0__MASK 0xffff0000
+#define SDMA_CFG_OUTSTANDING_RESERVED_0__SHIFT 16
+static inline uint32_t SDMA_CFG_OUTSTANDING_RESERVED_0(uint32_t val)
+{
+ return ((val) << SDMA_CFG_OUTSTANDING_RESERVED_0__SHIFT) & SDMA_CFG_OUTSTANDING_RESERVED_0__MASK;
+}
+#define SDMA_CFG_OUTSTANDING_WR_OS_CNT__MASK 0x0000ff00
+#define SDMA_CFG_OUTSTANDING_WR_OS_CNT__SHIFT 8
+static inline uint32_t SDMA_CFG_OUTSTANDING_WR_OS_CNT(uint32_t val)
+{
+ return ((val) << SDMA_CFG_OUTSTANDING_WR_OS_CNT__SHIFT) & SDMA_CFG_OUTSTANDING_WR_OS_CNT__MASK;
+}
+#define SDMA_CFG_OUTSTANDING_RD_OS_CNT__MASK 0x000000ff
+#define SDMA_CFG_OUTSTANDING_RD_OS_CNT__SHIFT 0
+static inline uint32_t SDMA_CFG_OUTSTANDING_RD_OS_CNT(uint32_t val)
+{
+ return ((val) << SDMA_CFG_OUTSTANDING_RD_OS_CNT__SHIFT) & SDMA_CFG_OUTSTANDING_RD_OS_CNT__MASK;
+}
+
+#define REG_SDMA_RD_WEIGHT_0 0x00009004
+#define SDMA_RD_WEIGHT_0_RD_WEIGHT_PDP__MASK 0xff000000
+#define SDMA_RD_WEIGHT_0_RD_WEIGHT_PDP__SHIFT 24
+static inline uint32_t SDMA_RD_WEIGHT_0_RD_WEIGHT_PDP(uint32_t val)
+{
+ return ((val) << SDMA_RD_WEIGHT_0_RD_WEIGHT_PDP__SHIFT) & SDMA_RD_WEIGHT_0_RD_WEIGHT_PDP__MASK;
+}
+#define SDMA_RD_WEIGHT_0_RD_WEIGHT_DPU__MASK 0x00ff0000
+#define SDMA_RD_WEIGHT_0_RD_WEIGHT_DPU__SHIFT 16
+static inline uint32_t SDMA_RD_WEIGHT_0_RD_WEIGHT_DPU(uint32_t val)
+{
+ return ((val) << SDMA_RD_WEIGHT_0_RD_WEIGHT_DPU__SHIFT) & SDMA_RD_WEIGHT_0_RD_WEIGHT_DPU__MASK;
+}
+#define SDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL__MASK 0x0000ff00
+#define SDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL__SHIFT 8
+static inline uint32_t SDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL(uint32_t val)
+{
+ return ((val) << SDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL__SHIFT) & SDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL__MASK;
+}
+#define SDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE__MASK 0x000000ff
+#define SDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE__SHIFT 0
+static inline uint32_t SDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE(uint32_t val)
+{
+ return ((val) << SDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE__SHIFT) & SDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE__MASK;
+}
+
+#define REG_SDMA_WR_WEIGHT_0 0x00009008
+#define SDMA_WR_WEIGHT_0_RESERVED_0__MASK 0xffff0000
+#define SDMA_WR_WEIGHT_0_RESERVED_0__SHIFT 16
+static inline uint32_t SDMA_WR_WEIGHT_0_RESERVED_0(uint32_t val)
+{
+ return ((val) << SDMA_WR_WEIGHT_0_RESERVED_0__SHIFT) & SDMA_WR_WEIGHT_0_RESERVED_0__MASK;
+}
+#define SDMA_WR_WEIGHT_0_WR_WEIGHT_PDP__MASK 0x0000ff00
+#define SDMA_WR_WEIGHT_0_WR_WEIGHT_PDP__SHIFT 8
+static inline uint32_t SDMA_WR_WEIGHT_0_WR_WEIGHT_PDP(uint32_t val)
+{
+ return ((val) << SDMA_WR_WEIGHT_0_WR_WEIGHT_PDP__SHIFT) & SDMA_WR_WEIGHT_0_WR_WEIGHT_PDP__MASK;
+}
+#define SDMA_WR_WEIGHT_0_WR_WEIGHT_DPU__MASK 0x000000ff
+#define SDMA_WR_WEIGHT_0_WR_WEIGHT_DPU__SHIFT 0
+static inline uint32_t SDMA_WR_WEIGHT_0_WR_WEIGHT_DPU(uint32_t val)
+{
+ return ((val) << SDMA_WR_WEIGHT_0_WR_WEIGHT_DPU__SHIFT) & SDMA_WR_WEIGHT_0_WR_WEIGHT_DPU__MASK;
+}
+
+#define REG_SDMA_CFG_ID_ERROR 0x0000900c
+#define SDMA_CFG_ID_ERROR_RESERVED_0__MASK 0xfffffc00
+#define SDMA_CFG_ID_ERROR_RESERVED_0__SHIFT 10
+static inline uint32_t SDMA_CFG_ID_ERROR_RESERVED_0(uint32_t val)
+{
+ return ((val) << SDMA_CFG_ID_ERROR_RESERVED_0__SHIFT) & SDMA_CFG_ID_ERROR_RESERVED_0__MASK;
+}
+#define SDMA_CFG_ID_ERROR_WR_RESP_ID__MASK 0x000003c0
+#define SDMA_CFG_ID_ERROR_WR_RESP_ID__SHIFT 6
+static inline uint32_t SDMA_CFG_ID_ERROR_WR_RESP_ID(uint32_t val)
+{
+ return ((val) << SDMA_CFG_ID_ERROR_WR_RESP_ID__SHIFT) & SDMA_CFG_ID_ERROR_WR_RESP_ID__MASK;
+}
+#define SDMA_CFG_ID_ERROR_RESERVED_1__MASK 0x00000020
+#define SDMA_CFG_ID_ERROR_RESERVED_1__SHIFT 5
+static inline uint32_t SDMA_CFG_ID_ERROR_RESERVED_1(uint32_t val)
+{
+ return ((val) << SDMA_CFG_ID_ERROR_RESERVED_1__SHIFT) & SDMA_CFG_ID_ERROR_RESERVED_1__MASK;
+}
+#define SDMA_CFG_ID_ERROR_RD_RESP_ID__MASK 0x0000001f
+#define SDMA_CFG_ID_ERROR_RD_RESP_ID__SHIFT 0
+static inline uint32_t SDMA_CFG_ID_ERROR_RD_RESP_ID(uint32_t val)
+{
+ return ((val) << SDMA_CFG_ID_ERROR_RD_RESP_ID__SHIFT) & SDMA_CFG_ID_ERROR_RD_RESP_ID__MASK;
+}
+
+#define REG_SDMA_RD_WEIGHT_1 0x00009010
+#define SDMA_RD_WEIGHT_1_RESERVED_0__MASK 0xffffff00
+#define SDMA_RD_WEIGHT_1_RESERVED_0__SHIFT 8
+static inline uint32_t SDMA_RD_WEIGHT_1_RESERVED_0(uint32_t val)
+{
+ return ((val) << SDMA_RD_WEIGHT_1_RESERVED_0__SHIFT) & SDMA_RD_WEIGHT_1_RESERVED_0__MASK;
+}
+#define SDMA_RD_WEIGHT_1_RD_WEIGHT_PC__MASK 0x000000ff
+#define SDMA_RD_WEIGHT_1_RD_WEIGHT_PC__SHIFT 0
+static inline uint32_t SDMA_RD_WEIGHT_1_RD_WEIGHT_PC(uint32_t val)
+{
+ return ((val) << SDMA_RD_WEIGHT_1_RD_WEIGHT_PC__SHIFT) & SDMA_RD_WEIGHT_1_RD_WEIGHT_PC__MASK;
+}
+
+#define REG_SDMA_CFG_DMA_FIFO_CLR 0x00009014
+#define SDMA_CFG_DMA_FIFO_CLR_RESERVED_0__MASK 0xfffffffe
+#define SDMA_CFG_DMA_FIFO_CLR_RESERVED_0__SHIFT 1
+static inline uint32_t SDMA_CFG_DMA_FIFO_CLR_RESERVED_0(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_FIFO_CLR_RESERVED_0__SHIFT) & SDMA_CFG_DMA_FIFO_CLR_RESERVED_0__MASK;
+}
+#define SDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR__MASK 0x00000001
+#define SDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR__SHIFT 0
+static inline uint32_t SDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR__SHIFT) & SDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR__MASK;
+}
+
+#define REG_SDMA_CFG_DMA_ARB 0x00009018
+#define SDMA_CFG_DMA_ARB_RESERVED_0__MASK 0xfffffc00
+#define SDMA_CFG_DMA_ARB_RESERVED_0__SHIFT 10
+static inline uint32_t SDMA_CFG_DMA_ARB_RESERVED_0(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_ARB_RESERVED_0__SHIFT) & SDMA_CFG_DMA_ARB_RESERVED_0__MASK;
+}
+#define SDMA_CFG_DMA_ARB_WR_ARBIT_MODEL__MASK 0x00000200
+#define SDMA_CFG_DMA_ARB_WR_ARBIT_MODEL__SHIFT 9
+static inline uint32_t SDMA_CFG_DMA_ARB_WR_ARBIT_MODEL(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_ARB_WR_ARBIT_MODEL__SHIFT) & SDMA_CFG_DMA_ARB_WR_ARBIT_MODEL__MASK;
+}
+#define SDMA_CFG_DMA_ARB_RD_ARBIT_MODEL__MASK 0x00000100
+#define SDMA_CFG_DMA_ARB_RD_ARBIT_MODEL__SHIFT 8
+static inline uint32_t SDMA_CFG_DMA_ARB_RD_ARBIT_MODEL(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_ARB_RD_ARBIT_MODEL__SHIFT) & SDMA_CFG_DMA_ARB_RD_ARBIT_MODEL__MASK;
+}
+#define SDMA_CFG_DMA_ARB_RESERVED_1__MASK 0x00000080
+#define SDMA_CFG_DMA_ARB_RESERVED_1__SHIFT 7
+static inline uint32_t SDMA_CFG_DMA_ARB_RESERVED_1(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_ARB_RESERVED_1__SHIFT) & SDMA_CFG_DMA_ARB_RESERVED_1__MASK;
+}
+#define SDMA_CFG_DMA_ARB_WR_FIX_ARB__MASK 0x00000070
+#define SDMA_CFG_DMA_ARB_WR_FIX_ARB__SHIFT 4
+static inline uint32_t SDMA_CFG_DMA_ARB_WR_FIX_ARB(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_ARB_WR_FIX_ARB__SHIFT) & SDMA_CFG_DMA_ARB_WR_FIX_ARB__MASK;
+}
+#define SDMA_CFG_DMA_ARB_RESERVED_2__MASK 0x00000008
+#define SDMA_CFG_DMA_ARB_RESERVED_2__SHIFT 3
+static inline uint32_t SDMA_CFG_DMA_ARB_RESERVED_2(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_ARB_RESERVED_2__SHIFT) & SDMA_CFG_DMA_ARB_RESERVED_2__MASK;
+}
+#define SDMA_CFG_DMA_ARB_RD_FIX_ARB__MASK 0x00000007
+#define SDMA_CFG_DMA_ARB_RD_FIX_ARB__SHIFT 0
+static inline uint32_t SDMA_CFG_DMA_ARB_RD_FIX_ARB(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_ARB_RD_FIX_ARB__SHIFT) & SDMA_CFG_DMA_ARB_RD_FIX_ARB__MASK;
+}
+
+#define REG_SDMA_CFG_DMA_RD_QOS 0x00009020
+#define SDMA_CFG_DMA_RD_QOS_RESERVED_0__MASK 0xfffffc00
+#define SDMA_CFG_DMA_RD_QOS_RESERVED_0__SHIFT 10
+static inline uint32_t SDMA_CFG_DMA_RD_QOS_RESERVED_0(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_QOS_RESERVED_0__SHIFT) & SDMA_CFG_DMA_RD_QOS_RESERVED_0__MASK;
+}
+#define SDMA_CFG_DMA_RD_QOS_RD_PC_QOS__MASK 0x00000300
+#define SDMA_CFG_DMA_RD_QOS_RD_PC_QOS__SHIFT 8
+static inline uint32_t SDMA_CFG_DMA_RD_QOS_RD_PC_QOS(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_QOS_RD_PC_QOS__SHIFT) & SDMA_CFG_DMA_RD_QOS_RD_PC_QOS__MASK;
+}
+#define SDMA_CFG_DMA_RD_QOS_RD_PPU_QOS__MASK 0x000000c0
+#define SDMA_CFG_DMA_RD_QOS_RD_PPU_QOS__SHIFT 6
+static inline uint32_t SDMA_CFG_DMA_RD_QOS_RD_PPU_QOS(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_QOS_RD_PPU_QOS__SHIFT) & SDMA_CFG_DMA_RD_QOS_RD_PPU_QOS__MASK;
+}
+#define SDMA_CFG_DMA_RD_QOS_RD_DPU_QOS__MASK 0x00000030
+#define SDMA_CFG_DMA_RD_QOS_RD_DPU_QOS__SHIFT 4
+static inline uint32_t SDMA_CFG_DMA_RD_QOS_RD_DPU_QOS(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_QOS_RD_DPU_QOS__SHIFT) & SDMA_CFG_DMA_RD_QOS_RD_DPU_QOS__MASK;
+}
+#define SDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS__MASK 0x0000000c
+#define SDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS__SHIFT 2
+static inline uint32_t SDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS__SHIFT) & SDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS__MASK;
+}
+#define SDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS__MASK 0x00000003
+#define SDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS__SHIFT 0
+static inline uint32_t SDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS__SHIFT) & SDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS__MASK;
+}
+
+#define REG_SDMA_CFG_DMA_RD_CFG 0x00009024
+#define SDMA_CFG_DMA_RD_CFG_RESERVED_0__MASK 0xffffe000
+#define SDMA_CFG_DMA_RD_CFG_RESERVED_0__SHIFT 13
+static inline uint32_t SDMA_CFG_DMA_RD_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_CFG_RESERVED_0__SHIFT) & SDMA_CFG_DMA_RD_CFG_RESERVED_0__MASK;
+}
+#define SDMA_CFG_DMA_RD_CFG_RD_ARLOCK__MASK 0x00001000
+#define SDMA_CFG_DMA_RD_CFG_RD_ARLOCK__SHIFT 12
+static inline uint32_t SDMA_CFG_DMA_RD_CFG_RD_ARLOCK(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_CFG_RD_ARLOCK__SHIFT) & SDMA_CFG_DMA_RD_CFG_RD_ARLOCK__MASK;
+}
+#define SDMA_CFG_DMA_RD_CFG_RD_ARCACHE__MASK 0x00000f00
+#define SDMA_CFG_DMA_RD_CFG_RD_ARCACHE__SHIFT 8
+static inline uint32_t SDMA_CFG_DMA_RD_CFG_RD_ARCACHE(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_CFG_RD_ARCACHE__SHIFT) & SDMA_CFG_DMA_RD_CFG_RD_ARCACHE__MASK;
+}
+#define SDMA_CFG_DMA_RD_CFG_RD_ARPROT__MASK 0x000000e0
+#define SDMA_CFG_DMA_RD_CFG_RD_ARPROT__SHIFT 5
+static inline uint32_t SDMA_CFG_DMA_RD_CFG_RD_ARPROT(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_CFG_RD_ARPROT__SHIFT) & SDMA_CFG_DMA_RD_CFG_RD_ARPROT__MASK;
+}
+#define SDMA_CFG_DMA_RD_CFG_RD_ARBURST__MASK 0x00000018
+#define SDMA_CFG_DMA_RD_CFG_RD_ARBURST__SHIFT 3
+static inline uint32_t SDMA_CFG_DMA_RD_CFG_RD_ARBURST(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_CFG_RD_ARBURST__SHIFT) & SDMA_CFG_DMA_RD_CFG_RD_ARBURST__MASK;
+}
+#define SDMA_CFG_DMA_RD_CFG_RD_ARSIZE__MASK 0x00000007
+#define SDMA_CFG_DMA_RD_CFG_RD_ARSIZE__SHIFT 0
+static inline uint32_t SDMA_CFG_DMA_RD_CFG_RD_ARSIZE(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_CFG_RD_ARSIZE__SHIFT) & SDMA_CFG_DMA_RD_CFG_RD_ARSIZE__MASK;
+}
+
+#define REG_SDMA_CFG_DMA_WR_CFG 0x00009028
+#define SDMA_CFG_DMA_WR_CFG_RESERVED_0__MASK 0xffffe000
+#define SDMA_CFG_DMA_WR_CFG_RESERVED_0__SHIFT 13
+static inline uint32_t SDMA_CFG_DMA_WR_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_WR_CFG_RESERVED_0__SHIFT) & SDMA_CFG_DMA_WR_CFG_RESERVED_0__MASK;
+}
+#define SDMA_CFG_DMA_WR_CFG_WR_AWLOCK__MASK 0x00001000
+#define SDMA_CFG_DMA_WR_CFG_WR_AWLOCK__SHIFT 12
+static inline uint32_t SDMA_CFG_DMA_WR_CFG_WR_AWLOCK(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_WR_CFG_WR_AWLOCK__SHIFT) & SDMA_CFG_DMA_WR_CFG_WR_AWLOCK__MASK;
+}
+#define SDMA_CFG_DMA_WR_CFG_WR_AWCACHE__MASK 0x00000f00
+#define SDMA_CFG_DMA_WR_CFG_WR_AWCACHE__SHIFT 8
+static inline uint32_t SDMA_CFG_DMA_WR_CFG_WR_AWCACHE(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_WR_CFG_WR_AWCACHE__SHIFT) & SDMA_CFG_DMA_WR_CFG_WR_AWCACHE__MASK;
+}
+#define SDMA_CFG_DMA_WR_CFG_WR_AWPROT__MASK 0x000000e0
+#define SDMA_CFG_DMA_WR_CFG_WR_AWPROT__SHIFT 5
+static inline uint32_t SDMA_CFG_DMA_WR_CFG_WR_AWPROT(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_WR_CFG_WR_AWPROT__SHIFT) & SDMA_CFG_DMA_WR_CFG_WR_AWPROT__MASK;
+}
+#define SDMA_CFG_DMA_WR_CFG_WR_AWBURST__MASK 0x00000018
+#define SDMA_CFG_DMA_WR_CFG_WR_AWBURST__SHIFT 3
+static inline uint32_t SDMA_CFG_DMA_WR_CFG_WR_AWBURST(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_WR_CFG_WR_AWBURST__SHIFT) & SDMA_CFG_DMA_WR_CFG_WR_AWBURST__MASK;
+}
+#define SDMA_CFG_DMA_WR_CFG_WR_AWSIZE__MASK 0x00000007
+#define SDMA_CFG_DMA_WR_CFG_WR_AWSIZE__SHIFT 0
+static inline uint32_t SDMA_CFG_DMA_WR_CFG_WR_AWSIZE(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_WR_CFG_WR_AWSIZE__SHIFT) & SDMA_CFG_DMA_WR_CFG_WR_AWSIZE__MASK;
+}
+
+#define REG_SDMA_CFG_DMA_WSTRB 0x0000902c
+#define SDMA_CFG_DMA_WSTRB_WR_WSTRB__MASK 0xffffffff
+#define SDMA_CFG_DMA_WSTRB_WR_WSTRB__SHIFT 0
+static inline uint32_t SDMA_CFG_DMA_WSTRB_WR_WSTRB(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_WSTRB_WR_WSTRB__SHIFT) & SDMA_CFG_DMA_WSTRB_WR_WSTRB__MASK;
+}
+
+#define REG_SDMA_CFG_STATUS 0x00009030
+#define SDMA_CFG_STATUS_RESERVED_0__MASK 0xfffffe00
+#define SDMA_CFG_STATUS_RESERVED_0__SHIFT 9
+static inline uint32_t SDMA_CFG_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << SDMA_CFG_STATUS_RESERVED_0__SHIFT) & SDMA_CFG_STATUS_RESERVED_0__MASK;
+}
+#define SDMA_CFG_STATUS_IDEL__MASK 0x00000100
+#define SDMA_CFG_STATUS_IDEL__SHIFT 8
+static inline uint32_t SDMA_CFG_STATUS_IDEL(uint32_t val)
+{
+ return ((val) << SDMA_CFG_STATUS_IDEL__SHIFT) & SDMA_CFG_STATUS_IDEL__MASK;
+}
+#define SDMA_CFG_STATUS_RESERVED_1__MASK 0x000000ff
+#define SDMA_CFG_STATUS_RESERVED_1__SHIFT 0
+static inline uint32_t SDMA_CFG_STATUS_RESERVED_1(uint32_t val)
+{
+ return ((val) << SDMA_CFG_STATUS_RESERVED_1__SHIFT) & SDMA_CFG_STATUS_RESERVED_1__MASK;
+}
+
+#define REG_GLOBAL_OPERATION_ENABLE 0x0000f008
+#define GLOBAL_OPERATION_ENABLE_RESERVED_0__MASK 0xffffff80
+#define GLOBAL_OPERATION_ENABLE_RESERVED_0__SHIFT 7
+static inline uint32_t GLOBAL_OPERATION_ENABLE_RESERVED_0(uint32_t val)
+{
+ return ((val) << GLOBAL_OPERATION_ENABLE_RESERVED_0__SHIFT) & GLOBAL_OPERATION_ENABLE_RESERVED_0__MASK;
+}
+#define GLOBAL_OPERATION_ENABLE_PPU_RDMA_OP_EN__MASK 0x00000040
+#define GLOBAL_OPERATION_ENABLE_PPU_RDMA_OP_EN__SHIFT 6
+static inline uint32_t GLOBAL_OPERATION_ENABLE_PPU_RDMA_OP_EN(uint32_t val)
+{
+ return ((val) << GLOBAL_OPERATION_ENABLE_PPU_RDMA_OP_EN__SHIFT) & GLOBAL_OPERATION_ENABLE_PPU_RDMA_OP_EN__MASK;
+}
+#define GLOBAL_OPERATION_ENABLE_PPU_OP_EN__MASK 0x00000020
+#define GLOBAL_OPERATION_ENABLE_PPU_OP_EN__SHIFT 5
+static inline uint32_t GLOBAL_OPERATION_ENABLE_PPU_OP_EN(uint32_t val)
+{
+ return ((val) << GLOBAL_OPERATION_ENABLE_PPU_OP_EN__SHIFT) & GLOBAL_OPERATION_ENABLE_PPU_OP_EN__MASK;
+}
+#define GLOBAL_OPERATION_ENABLE_DPU_RDMA_OP_EN__MASK 0x00000010
+#define GLOBAL_OPERATION_ENABLE_DPU_RDMA_OP_EN__SHIFT 4
+static inline uint32_t GLOBAL_OPERATION_ENABLE_DPU_RDMA_OP_EN(uint32_t val)
+{
+ return ((val) << GLOBAL_OPERATION_ENABLE_DPU_RDMA_OP_EN__SHIFT) & GLOBAL_OPERATION_ENABLE_DPU_RDMA_OP_EN__MASK;
+}
+#define GLOBAL_OPERATION_ENABLE_DPU_OP_EN__MASK 0x00000008
+#define GLOBAL_OPERATION_ENABLE_DPU_OP_EN__SHIFT 3
+static inline uint32_t GLOBAL_OPERATION_ENABLE_DPU_OP_EN(uint32_t val)
+{
+ return ((val) << GLOBAL_OPERATION_ENABLE_DPU_OP_EN__SHIFT) & GLOBAL_OPERATION_ENABLE_DPU_OP_EN__MASK;
+}
+#define GLOBAL_OPERATION_ENABLE_CORE_OP_EN__MASK 0x00000004
+#define GLOBAL_OPERATION_ENABLE_CORE_OP_EN__SHIFT 2
+static inline uint32_t GLOBAL_OPERATION_ENABLE_CORE_OP_EN(uint32_t val)
+{
+ return ((val) << GLOBAL_OPERATION_ENABLE_CORE_OP_EN__SHIFT) & GLOBAL_OPERATION_ENABLE_CORE_OP_EN__MASK;
+}
+#define GLOBAL_OPERATION_ENABLE_RESERVED_1__MASK 0x00000002
+#define GLOBAL_OPERATION_ENABLE_RESERVED_1__SHIFT 1
+static inline uint32_t GLOBAL_OPERATION_ENABLE_RESERVED_1(uint32_t val)
+{
+ return ((val) << GLOBAL_OPERATION_ENABLE_RESERVED_1__SHIFT) & GLOBAL_OPERATION_ENABLE_RESERVED_1__MASK;
+}
+#define GLOBAL_OPERATION_ENABLE_CNA_OP_EN__MASK 0x00000001
+#define GLOBAL_OPERATION_ENABLE_CNA_OP_EN__SHIFT 0
+static inline uint32_t GLOBAL_OPERATION_ENABLE_CNA_OP_EN(uint32_t val)
+{
+ return ((val) << GLOBAL_OPERATION_ENABLE_CNA_OP_EN__SHIFT) & GLOBAL_OPERATION_ENABLE_CNA_OP_EN__MASK;
+}
+
+#endif /* __ROCKET_REGISTERS_XML__ */
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index b594780a57d7..ca00a5dbcf75 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -461,7 +461,7 @@ config ACPI_HED
config ACPI_BGRT
bool "Boottime Graphics Resource Table support"
- depends on EFI && (X86 || ARM64 || LOONGARCH)
+ depends on EFI
help
This driver adds support for exposing the ACPI Boottime Graphics
Resource Table, which allows the operating system to obtain
@@ -547,6 +547,10 @@ if ARM64
source "drivers/acpi/arm64/Kconfig"
endif
+if RISCV
+source "drivers/acpi/riscv/Kconfig"
+endif
+
config ACPI_PPTT
bool
diff --git a/drivers/acpi/acpi_dbg.c b/drivers/acpi/acpi_dbg.c
index d50261d05f3a..515b20d0b698 100644
--- a/drivers/acpi/acpi_dbg.c
+++ b/drivers/acpi/acpi_dbg.c
@@ -569,11 +569,11 @@ static int acpi_aml_release(struct inode *inode, struct file *file)
return 0;
}
-static int acpi_aml_read_user(char __user *buf, int len)
+static ssize_t acpi_aml_read_user(char __user *buf, size_t len)
{
- int ret;
struct circ_buf *crc = &acpi_aml_io.out_crc;
- int n;
+ ssize_t ret;
+ size_t n;
char *p;
ret = acpi_aml_lock_read(crc, ACPI_AML_OUT_USER);
@@ -582,7 +582,7 @@ static int acpi_aml_read_user(char __user *buf, int len)
/* sync head before removing logs */
smp_rmb();
p = &crc->buf[crc->tail];
- n = min(len, circ_count_to_end(crc));
+ n = min_t(size_t, len, circ_count_to_end(crc));
if (copy_to_user(buf, p, n)) {
ret = -EFAULT;
goto out;
@@ -599,8 +599,8 @@ out:
static ssize_t acpi_aml_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
- int ret = 0;
- int size = 0;
+ ssize_t ret = 0;
+ ssize_t size = 0;
if (!count)
return 0;
@@ -639,11 +639,11 @@ again:
return size > 0 ? size : ret;
}
-static int acpi_aml_write_user(const char __user *buf, int len)
+static ssize_t acpi_aml_write_user(const char __user *buf, size_t len)
{
- int ret;
struct circ_buf *crc = &acpi_aml_io.in_crc;
- int n;
+ ssize_t ret;
+ size_t n;
char *p;
ret = acpi_aml_lock_write(crc, ACPI_AML_IN_USER);
@@ -652,7 +652,7 @@ static int acpi_aml_write_user(const char __user *buf, int len)
/* sync tail before inserting cmds */
smp_mb();
p = &crc->buf[crc->head];
- n = min(len, circ_space_to_end(crc));
+ n = min_t(size_t, len, circ_space_to_end(crc));
if (copy_from_user(p, buf, n)) {
ret = -EFAULT;
goto out;
@@ -663,14 +663,14 @@ static int acpi_aml_write_user(const char __user *buf, int len)
ret = n;
out:
acpi_aml_unlock_fifo(ACPI_AML_IN_USER, ret >= 0);
- return n;
+ return ret;
}
static ssize_t acpi_aml_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
- int ret = 0;
- int size = 0;
+ ssize_t ret = 0;
+ ssize_t size = 0;
if (!count)
return 0;
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index 2a99f5eb6962..7ec1dc04fd11 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -815,7 +815,7 @@ bool acpi_processor_claim_cst_control(void)
cst_control_claimed = true;
return true;
}
-EXPORT_SYMBOL_GPL(acpi_processor_claim_cst_control);
+EXPORT_SYMBOL_NS_GPL(acpi_processor_claim_cst_control, "ACPI_PROCESSOR_IDLE");
/**
* acpi_processor_evaluate_cst - Evaluate the processor _CST control method.
@@ -994,5 +994,5 @@ end:
return ret;
}
-EXPORT_SYMBOL_GPL(acpi_processor_evaluate_cst);
+EXPORT_SYMBOL_NS_GPL(acpi_processor_evaluate_cst, "ACPI_PROCESSOR_IDLE");
#endif /* CONFIG_ACPI_PROCESSOR_CSTATE */
diff --git a/drivers/acpi/acpi_tad.c b/drivers/acpi/acpi_tad.c
index 91d7d90c47da..33418dd6768a 100644
--- a/drivers/acpi/acpi_tad.c
+++ b/drivers/acpi/acpi_tad.c
@@ -565,6 +565,9 @@ static void acpi_tad_remove(struct platform_device *pdev)
pm_runtime_get_sync(dev);
+ if (dd->capabilities & ACPI_TAD_RT)
+ sysfs_remove_group(&dev->kobj, &acpi_tad_time_attr_group);
+
if (dd->capabilities & ACPI_TAD_DC_WAKE)
sysfs_remove_group(&dev->kobj, &acpi_tad_dc_attr_group);
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index fe6d38b43c9a..91241bd6917a 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -37,7 +37,7 @@ struct acpi_db_argument_info {
struct acpi_db_execute_walk {
u32 count;
u32 max_count;
- char name_seg[ACPI_NAMESEG_SIZE + 1] ACPI_NONSTRING;
+ char name_seg[ACPI_NAMESEG_SIZE + 1];
};
#define PARAM_LIST(pl) pl
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 0c41f0097e8d..f98640086f4e 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -1141,7 +1141,7 @@ struct acpi_port_info {
#define ACPI_RESOURCE_NAME_PIN_GROUP_FUNCTION 0x91
#define ACPI_RESOURCE_NAME_PIN_GROUP_CONFIG 0x92
#define ACPI_RESOURCE_NAME_CLOCK_INPUT 0x93
-#define ACPI_RESOURCE_NAME_LARGE_MAX 0x94
+#define ACPI_RESOURCE_NAME_LARGE_MAX 0x93
/*****************************************************************************
*
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index 76c5ed02e916..da2c45880cc7 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -450,7 +450,8 @@ const union acpi_predefined_info acpi_gbl_predefined_methods[] = {
{{"_DSM",
METHOD_4ARGS(ACPI_TYPE_BUFFER, ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER,
- ACPI_TYPE_ANY) | ARG_COUNT_IS_MINIMUM,
+ ACPI_TYPE_ANY | ACPI_TYPE_PACKAGE) |
+ ARG_COUNT_IS_MINIMUM,
METHOD_RETURNS(ACPI_RTYPE_ALL)}}, /* Must return a value, but it can be of any type */
{{"_DSS", METHOD_1ARGS(ACPI_TYPE_INTEGER),
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index fef6fb29ece4..45ec32e81903 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -462,7 +462,6 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
struct acpi_walk_state *next_walk_state = NULL;
union acpi_operand_object *obj_desc;
struct acpi_evaluate_info *info;
- u32 i;
ACPI_FUNCTION_TRACE_PTR(ds_call_control_method, this_walk_state);
@@ -484,10 +483,17 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
}
if (this_walk_state->num_operands < obj_desc->method.param_count) {
- ACPI_ERROR((AE_INFO, "Missing argument for method [%4.4s]",
+ ACPI_ERROR((AE_INFO, "Missing argument(s) for method [%4.4s]",
acpi_ut_get_node_name(method_node)));
- return_ACPI_STATUS(AE_AML_UNINITIALIZED_ARG);
+ return_ACPI_STATUS(AE_AML_TOO_FEW_ARGUMENTS);
+ }
+
+ else if (this_walk_state->num_operands > obj_desc->method.param_count) {
+ ACPI_ERROR((AE_INFO, "Too many arguments for method [%4.4s]",
+ acpi_ut_get_node_name(method_node)));
+
+ return_ACPI_STATUS(AE_AML_TOO_MANY_ARGUMENTS);
}
/* Init for new method, possibly wait on method mutex */
@@ -546,14 +552,7 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
* Delete the operands on the previous walkstate operand stack
* (they were copied to new objects)
*/
- for (i = 0; i < obj_desc->method.param_count; i++) {
- acpi_ut_remove_reference(this_walk_state->operands[i]);
- this_walk_state->operands[i] = NULL;
- }
-
- /* Clear the operand stack */
-
- this_walk_state->num_operands = 0;
+ acpi_ds_clear_operands(this_walk_state);
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"**** Begin nested execution of [%4.4s] **** WalkState=%p\n",
diff --git a/drivers/acpi/acpica/evglock.c b/drivers/acpi/acpica/evglock.c
index fa3e0d00d1ca..df2a4ab0e0da 100644
--- a/drivers/acpi/acpica/evglock.c
+++ b/drivers/acpi/acpica/evglock.c
@@ -42,6 +42,10 @@ acpi_status acpi_ev_init_global_lock_handler(void)
return_ACPI_STATUS(AE_OK);
}
+ if (!acpi_gbl_use_global_lock) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
/* Attempt installation of the global lock handler */
status = acpi_install_fixed_event_handler(ACPI_EVENT_GLOBAL,
diff --git a/drivers/acpi/acpica/psopinfo.c b/drivers/acpi/acpica/psopinfo.c
index 1c8044ffcb97..532ea307a675 100644
--- a/drivers/acpi/acpica/psopinfo.c
+++ b/drivers/acpi/acpica/psopinfo.c
@@ -34,7 +34,7 @@ static const u8 acpi_gbl_argument_count[] =
const struct acpi_opcode_info *acpi_ps_get_opcode_info(u16 opcode)
{
-#ifdef ACPI_DEBUG_OUTPUT
+#if defined ACPI_ASL_COMPILER && defined ACPI_DEBUG_OUTPUT
const char *opcode_name = "Unknown AML opcode";
#endif
@@ -102,11 +102,11 @@ const struct acpi_opcode_info *acpi_ps_get_opcode_info(u16 opcode)
default:
break;
}
-#endif
/* Unknown AML opcode */
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%s [%4.4X]\n", opcode_name, opcode));
+#endif
return (&acpi_gbl_aml_op_info[_UNK]);
}
diff --git a/drivers/acpi/acpica/tbprint.c b/drivers/acpi/acpica/tbprint.c
index fd64460a2e26..049f6c2f1e32 100644
--- a/drivers/acpi/acpica/tbprint.c
+++ b/drivers/acpi/acpica/tbprint.c
@@ -121,6 +121,14 @@ acpi_tb_print_table_header(acpi_physical_address address,
ACPI_CAST_PTR(struct acpi_table_rsdp,
header)->revision,
local_header.oem_id));
+ } else if (acpi_gbl_CDAT && !acpi_ut_valid_nameseg(header->signature)) {
+
+ /* CDAT does not use the common ACPI table header */
+
+ ACPI_INFO(("%-4.4s 0x%8.8X%8.8X %06X",
+ ACPI_SIG_CDAT, ACPI_FORMAT_UINT64(address),
+ ACPI_CAST_PTR(struct acpi_table_cdat,
+ header)->length));
} else {
/* Standard ACPI table with full common header */
diff --git a/drivers/acpi/apei/einj-core.c b/drivers/acpi/apei/einj-core.c
index bf8dc92a373a..3c87953dbd19 100644
--- a/drivers/acpi/apei/einj-core.c
+++ b/drivers/acpi/apei/einj-core.c
@@ -315,7 +315,7 @@ static void __iomem *einj_get_parameter_address(void)
memcpy_fromio(&v5param, p, v5param_size);
acpi5 = 1;
check_vendor_extension(pa_v5, &v5param);
- if (available_error_type & ACPI65_EINJV2_SUPP) {
+ if (is_v2 && available_error_type & ACPI65_EINJV2_SUPP) {
len = v5param.einjv2_struct.length;
offset = offsetof(struct einjv2_extension_struct, component_arr);
max_nr_components = (len - offset) /
@@ -540,6 +540,9 @@ static int __einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
struct set_error_type_with_address *v5param;
v5param = kmalloc(v5param_size, GFP_KERNEL);
+ if (!v5param)
+ return -ENOMEM;
+
memcpy_fromio(v5param, einj_param, v5param_size);
v5param->type = type;
if (type & ACPI5_VENDOR_BIT) {
@@ -653,6 +656,43 @@ static int __einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
return rc;
}
+/* Allow almost all types of address except MMIO. */
+static bool is_allowed_range(u64 base_addr, u64 size)
+{
+ int i;
+ /*
+ * MMIO region is usually claimed with IORESOURCE_MEM + IORES_DESC_NONE.
+ * However, IORES_DESC_NONE is treated like a wildcard when we check if
+ * region intersects with known resource. So do an allow list check for
+ * IORES_DESCs that definitely or most likely not MMIO.
+ */
+ int non_mmio_desc[] = {
+ IORES_DESC_CRASH_KERNEL,
+ IORES_DESC_ACPI_TABLES,
+ IORES_DESC_ACPI_NV_STORAGE,
+ IORES_DESC_PERSISTENT_MEMORY,
+ IORES_DESC_PERSISTENT_MEMORY_LEGACY,
+ /* Treat IORES_DESC_DEVICE_PRIVATE_MEMORY as MMIO. */
+ IORES_DESC_RESERVED,
+ IORES_DESC_SOFT_RESERVED,
+ };
+
+ if (region_intersects(base_addr, size, IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE)
+ == REGION_INTERSECTS)
+ return true;
+
+ for (i = 0; i < ARRAY_SIZE(non_mmio_desc); ++i) {
+ if (region_intersects(base_addr, size, IORESOURCE_MEM, non_mmio_desc[i])
+ == REGION_INTERSECTS)
+ return true;
+ }
+
+ if (arch_is_platform_page(base_addr))
+ return true;
+
+ return false;
+}
+
/* Inject the specified hardware error */
int einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2, u64 param3,
u64 param4)
@@ -699,19 +739,15 @@ int einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2, u64 param3,
* Disallow crazy address masks that give BIOS leeway to pick
* injection address almost anywhere. Insist on page or
* better granularity and that target address is normal RAM or
- * NVDIMM.
+ * as long as is not MMIO.
*/
base_addr = param1 & param2;
size = ~param2 + 1;
- if (((param2 & PAGE_MASK) != PAGE_MASK) ||
- ((region_intersects(base_addr, size, IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE)
- != REGION_INTERSECTS) &&
- (region_intersects(base_addr, size, IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY)
- != REGION_INTERSECTS) &&
- (region_intersects(base_addr, size, IORESOURCE_MEM, IORES_DESC_SOFT_RESERVED)
- != REGION_INTERSECTS) &&
- !arch_is_platform_page(base_addr)))
+ if ((param2 & PAGE_MASK) != PAGE_MASK)
+ return -EINVAL;
+
+ if (!is_allowed_range(base_addr, size))
return -EINVAL;
if (is_zero_pfn(base_addr >> PAGE_SHIFT))
@@ -1091,7 +1127,7 @@ err_put_table:
return rc;
}
-static void __exit einj_remove(struct faux_device *fdev)
+static void einj_remove(struct faux_device *fdev)
{
struct apei_exec_context ctx;
@@ -1114,15 +1150,9 @@ static void __exit einj_remove(struct faux_device *fdev)
}
static struct faux_device *einj_dev;
-/*
- * einj_remove() lives in .exit.text. For drivers registered via
- * platform_driver_probe() this is ok because they cannot get unbound at
- * runtime. So mark the driver struct with __refdata to prevent modpost
- * triggering a section mismatch warning.
- */
-static struct faux_device_ops einj_device_ops __refdata = {
+static struct faux_device_ops einj_device_ops = {
.probe = einj_probe,
- .remove = __exit_p(einj_remove),
+ .remove = einj_remove,
};
static int __init einj_init(void)
diff --git a/drivers/acpi/apei/erst-dbg.c b/drivers/acpi/apei/erst-dbg.c
index 246076341e8c..ff0e8bf8e97a 100644
--- a/drivers/acpi/apei/erst-dbg.c
+++ b/drivers/acpi/apei/erst-dbg.c
@@ -60,9 +60,8 @@ static long erst_dbg_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
switch (cmd) {
case APEI_ERST_CLEAR_RECORD:
- rc = copy_from_user(&record_id, (void __user *)arg,
- sizeof(record_id));
- if (rc)
+ if (copy_from_user(&record_id, (void __user *)arg,
+ sizeof(record_id)))
return -EFAULT;
return erst_clear(record_id);
case APEI_ERST_GET_RECORD_COUNT:
@@ -175,8 +174,7 @@ static ssize_t erst_dbg_write(struct file *filp, const char __user *ubuf,
erst_dbg_buf = p;
erst_dbg_buf_len = usize;
}
- rc = copy_from_user(erst_dbg_buf, ubuf, usize);
- if (rc) {
+ if (copy_from_user(erst_dbg_buf, ubuf, usize)) {
rc = -EFAULT;
goto out;
}
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index a0d54993edb3..97ee19f2cae0 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -1207,12 +1207,10 @@ static int ghes_notify_hed(struct notifier_block *this, unsigned long event,
int ret = NOTIFY_DONE;
spin_lock_irqsave(&ghes_notify_lock_irq, flags);
- rcu_read_lock();
list_for_each_entry_rcu(ghes, &ghes_hed, list) {
if (!ghes_proc(ghes))
ret = NOTIFY_OK;
}
- rcu_read_unlock();
spin_unlock_irqrestore(&ghes_notify_lock_irq, flags);
return ret;
diff --git a/drivers/acpi/arm64/gtdt.c b/drivers/acpi/arm64/gtdt.c
index 70f8290b659d..fd995a1d3d24 100644
--- a/drivers/acpi/arm64/gtdt.c
+++ b/drivers/acpi/arm64/gtdt.c
@@ -388,11 +388,11 @@ static int __init gtdt_import_sbsa_gwdt(struct acpi_gtdt_watchdog *wd,
return 0;
}
-static int __init gtdt_sbsa_gwdt_init(void)
+static int __init gtdt_platform_timer_init(void)
{
void *platform_timer;
struct acpi_table_header *table;
- int ret, timer_count, gwdt_count = 0;
+ int ret, timer_count, gwdt_count = 0, mmio_timer_count = 0;
if (acpi_disabled)
return 0;
@@ -414,20 +414,41 @@ static int __init gtdt_sbsa_gwdt_init(void)
goto out_put_gtdt;
for_each_platform_timer(platform_timer) {
+ ret = 0;
+
if (is_non_secure_watchdog(platform_timer)) {
ret = gtdt_import_sbsa_gwdt(platform_timer, gwdt_count);
if (ret)
- break;
+ continue;
gwdt_count++;
+ } else if (is_timer_block(platform_timer)) {
+ struct arch_timer_mem atm = {};
+ struct platform_device *pdev;
+
+ ret = gtdt_parse_timer_block(platform_timer, &atm);
+ if (ret)
+ continue;
+
+ pdev = platform_device_register_data(NULL, "gtdt-arm-mmio-timer",
+ gwdt_count, &atm,
+ sizeof(atm));
+ if (IS_ERR(pdev)) {
+ pr_err("Can't register timer %d\n", gwdt_count);
+ continue;
+ }
+
+ mmio_timer_count++;
}
}
if (gwdt_count)
pr_info("found %d SBSA generic Watchdog(s).\n", gwdt_count);
+ if (mmio_timer_count)
+ pr_info("found %d Generic MMIO timer(s).\n", mmio_timer_count);
out_put_gtdt:
acpi_put_table(table);
return ret;
}
-device_initcall(gtdt_sbsa_gwdt_init);
+device_initcall(gtdt_platform_timer_init);
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index 98759d6199d3..65f0f56ad753 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -937,8 +937,10 @@ static u32 *iort_rmr_alloc_sids(u32 *sids, u32 count, u32 id_start,
new_sids = krealloc_array(sids, count + new_count,
sizeof(*new_sids), GFP_KERNEL);
- if (!new_sids)
+ if (!new_sids) {
+ kfree(sids);
return NULL;
+ }
for (i = count; i < total_count; i++)
new_sids[i] = id_start++;
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 6905b56bf3e4..67b76492c839 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -92,7 +92,7 @@ enum {
struct acpi_battery {
struct mutex lock;
- struct mutex sysfs_lock;
+ struct mutex update_lock;
struct power_supply *bat;
struct power_supply_desc bat_desc;
struct acpi_device *device;
@@ -904,15 +904,12 @@ static int sysfs_add_battery(struct acpi_battery *battery)
static void sysfs_remove_battery(struct acpi_battery *battery)
{
- mutex_lock(&battery->sysfs_lock);
- if (!battery->bat) {
- mutex_unlock(&battery->sysfs_lock);
+ if (!battery->bat)
return;
- }
+
battery_hook_remove_battery(battery);
power_supply_unregister(battery->bat);
battery->bat = NULL;
- mutex_unlock(&battery->sysfs_lock);
}
static void find_battery(const struct dmi_header *dm, void *private)
@@ -1072,6 +1069,9 @@ static void acpi_battery_notify(acpi_handle handle, u32 event, void *data)
if (!battery)
return;
+
+ guard(mutex)(&battery->update_lock);
+
old = battery->bat;
/*
* On Acer Aspire V5-573G notifications are sometimes triggered too
@@ -1094,21 +1094,22 @@ static void acpi_battery_notify(acpi_handle handle, u32 event, void *data)
}
static int battery_notify(struct notifier_block *nb,
- unsigned long mode, void *_unused)
+ unsigned long mode, void *_unused)
{
struct acpi_battery *battery = container_of(nb, struct acpi_battery,
pm_nb);
- int result;
- switch (mode) {
- case PM_POST_HIBERNATION:
- case PM_POST_SUSPEND:
+ if (mode == PM_POST_SUSPEND || mode == PM_POST_HIBERNATION) {
+ guard(mutex)(&battery->update_lock);
+
if (!acpi_battery_present(battery))
return 0;
if (battery->bat) {
acpi_battery_refresh(battery);
} else {
+ int result;
+
result = acpi_battery_get_info(battery);
if (result)
return result;
@@ -1120,7 +1121,6 @@ static int battery_notify(struct notifier_block *nb,
acpi_battery_init_alarm(battery);
acpi_battery_get_state(battery);
- break;
}
return 0;
@@ -1198,6 +1198,8 @@ static int acpi_battery_update_retry(struct acpi_battery *battery)
{
int retry, ret;
+ guard(mutex)(&battery->update_lock);
+
for (retry = 5; retry; retry--) {
ret = acpi_battery_update(battery, false);
if (!ret)
@@ -1208,6 +1210,13 @@ static int acpi_battery_update_retry(struct acpi_battery *battery)
return ret;
}
+static void sysfs_battery_cleanup(struct acpi_battery *battery)
+{
+ guard(mutex)(&battery->update_lock);
+
+ sysfs_remove_battery(battery);
+}
+
static int acpi_battery_add(struct acpi_device *device)
{
int result = 0;
@@ -1230,7 +1239,7 @@ static int acpi_battery_add(struct acpi_device *device)
if (result)
return result;
- result = devm_mutex_init(&device->dev, &battery->sysfs_lock);
+ result = devm_mutex_init(&device->dev, &battery->update_lock);
if (result)
return result;
@@ -1262,7 +1271,7 @@ fail_pm:
device_init_wakeup(&device->dev, 0);
unregister_pm_notifier(&battery->pm_nb);
fail:
- sysfs_remove_battery(battery);
+ sysfs_battery_cleanup(battery);
return result;
}
@@ -1281,6 +1290,9 @@ static void acpi_battery_remove(struct acpi_device *device)
device_init_wakeup(&device->dev, 0);
unregister_pm_notifier(&battery->pm_nb);
+
+ guard(mutex)(&battery->update_lock);
+
sysfs_remove_battery(battery);
}
@@ -1297,6 +1309,9 @@ static int acpi_battery_resume(struct device *dev)
return -EINVAL;
battery->update_time = 0;
+
+ guard(mutex)(&battery->update_lock);
+
acpi_battery_update(battery, true);
return 0;
}
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 6b649031808f..ab4651205e8a 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -1876,7 +1876,7 @@ EXPORT_SYMBOL_GPL(cppc_set_perf);
* If desired_reg is in the SystemMemory or SystemIo ACPI address space,
* then assume there is no latency.
*/
-unsigned int cppc_get_transition_latency(int cpu_num)
+int cppc_get_transition_latency(int cpu_num)
{
/*
* Expected transition latency is based on the PCCT timing values
@@ -1889,31 +1889,29 @@ unsigned int cppc_get_transition_latency(int cpu_num)
* completion of a command before issuing the next command,
* in microseconds.
*/
- unsigned int latency_ns = 0;
struct cpc_desc *cpc_desc;
struct cpc_register_resource *desired_reg;
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu_num);
struct cppc_pcc_data *pcc_ss_data;
+ int latency_ns = 0;
cpc_desc = per_cpu(cpc_desc_ptr, cpu_num);
if (!cpc_desc)
- return CPUFREQ_ETERNAL;
+ return -ENODATA;
desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
if (CPC_IN_SYSTEM_MEMORY(desired_reg) || CPC_IN_SYSTEM_IO(desired_reg))
return 0;
- else if (!CPC_IN_PCC(desired_reg))
- return CPUFREQ_ETERNAL;
- if (pcc_ss_id < 0)
- return CPUFREQ_ETERNAL;
+ if (!CPC_IN_PCC(desired_reg) || pcc_ss_id < 0)
+ return -ENODATA;
pcc_ss_data = pcc_data[pcc_ss_id];
if (pcc_ss_data->pcc_mpar)
latency_ns = 60 * (1000 * 1000 * 1000 / pcc_ss_data->pcc_mpar);
- latency_ns = max(latency_ns, pcc_ss_data->pcc_nominal * 1000);
- latency_ns = max(latency_ns, pcc_ss_data->pcc_mrtt * 1000);
+ latency_ns = max_t(int, latency_ns, pcc_ss_data->pcc_nominal * 1000);
+ latency_ns = max_t(int, latency_ns, pcc_ss_data->pcc_mrtt * 1000);
return latency_ns;
}
diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c
index 3961fc47152c..cd199fbe4dc9 100644
--- a/drivers/acpi/device_sysfs.c
+++ b/drivers/acpi/device_sysfs.c
@@ -464,7 +464,7 @@ static ssize_t description_show(struct device *dev,
buf[result++] = '\n';
- kfree(str_obj);
+ ACPI_FREE(str_obj);
return result;
}
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 75c7db8b156a..7855bbf752b1 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -2033,7 +2033,7 @@ void __init acpi_ec_ecdt_probe(void)
goto out;
}
- if (!strstarts(ecdt_ptr->id, "\\")) {
+ if (!strlen(ecdt_ptr->id)) {
/*
* The ECDT table on some MSI notebooks contains invalid data, together
* with an empty ID string ("").
@@ -2042,9 +2042,13 @@ void __init acpi_ec_ecdt_probe(void)
* a "fully qualified reference to the (...) embedded controller device",
* so this string always has to start with a backslash.
*
- * By verifying this we can avoid such faulty ECDT tables in a safe way.
+ * However some ThinkBook machines have a ECDT table with a valid EC
+ * description but an invalid ID string ("_SB.PC00.LPCB.EC0").
+ *
+ * Because of this we only check if the ID string is empty in order to
+ * avoid the obvious cases.
*/
- pr_err(FW_BUG "Ignoring ECDT due to invalid ID string \"%s\"\n", ecdt_ptr->id);
+ pr_err(FW_BUG "Ignoring ECDT due to empty ID string\n");
goto out;
}
diff --git a/drivers/acpi/fan_core.c b/drivers/acpi/fan_core.c
index 095502086b41..04ff608f2ff0 100644
--- a/drivers/acpi/fan_core.c
+++ b/drivers/acpi/fan_core.c
@@ -203,18 +203,6 @@ static const struct thermal_cooling_device_ops fan_cooling_ops = {
* --------------------------------------------------------------------------
*/
-static bool acpi_fan_has_fst(struct acpi_device *device)
-{
- return acpi_has_method(device->handle, "_FST");
-}
-
-static bool acpi_fan_is_acpi4(struct acpi_device *device)
-{
- return acpi_has_method(device->handle, "_FIF") &&
- acpi_has_method(device->handle, "_FPS") &&
- acpi_has_method(device->handle, "_FSL");
-}
-
static int acpi_fan_get_fif(struct acpi_device *device)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -331,9 +319,11 @@ static int acpi_fan_probe(struct platform_device *pdev)
device->driver_data = fan;
platform_set_drvdata(pdev, fan);
- if (acpi_fan_has_fst(device)) {
+ if (acpi_has_method(device->handle, "_FST")) {
fan->has_fst = true;
- fan->acpi4 = acpi_fan_is_acpi4(device);
+ fan->acpi4 = acpi_has_method(device->handle, "_FIF") &&
+ acpi_has_method(device->handle, "_FPS") &&
+ acpi_has_method(device->handle, "_FSL");
}
if (fan->acpi4) {
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index e2781864fdce..63354972ab0b 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -140,6 +140,7 @@ int __acpi_device_uevent_modalias(const struct acpi_device *adev,
/* --------------------------------------------------------------------------
Power Resource
-------------------------------------------------------------------------- */
+void acpi_power_resources_init(void);
void acpi_power_resources_list_free(struct list_head *list);
int acpi_extract_power_resources(union acpi_object *package, unsigned int start,
struct list_head *list);
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index ae035b93da08..3eb56b77cb6d 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -2637,7 +2637,7 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
if (ndr_desc->target_node == NUMA_NO_NODE) {
ndr_desc->target_node = phys_to_target_node(spa->address);
dev_info(acpi_desc->dev, "changing target node from %d to %d for nfit region [%pa-%pa]",
- NUMA_NO_NODE, ndr_desc->numa_node, &res.start, &res.end);
+ NUMA_NO_NODE, ndr_desc->target_node, &res.start, &res.end);
}
/*
diff --git a/drivers/acpi/numa/hmat.c b/drivers/acpi/numa/hmat.c
index 4958301f5417..5a36d57289b4 100644
--- a/drivers/acpi/numa/hmat.c
+++ b/drivers/acpi/numa/hmat.c
@@ -74,7 +74,6 @@ struct memory_target {
struct node_cache_attrs cache_attrs;
u8 gen_port_device_handle[ACPI_SRAT_DEVICE_HANDLE_SIZE];
bool registered;
- bool ext_updated; /* externally updated */
};
struct memory_initiator {
@@ -368,35 +367,6 @@ static void hmat_update_target_access(struct memory_target *target,
}
}
-int hmat_update_target_coordinates(int nid, struct access_coordinate *coord,
- enum access_coordinate_class access)
-{
- struct memory_target *target;
- int pxm;
-
- if (nid == NUMA_NO_NODE)
- return -EINVAL;
-
- pxm = node_to_pxm(nid);
- guard(mutex)(&target_lock);
- target = find_mem_target(pxm);
- if (!target)
- return -ENODEV;
-
- hmat_update_target_access(target, ACPI_HMAT_READ_LATENCY,
- coord->read_latency, access);
- hmat_update_target_access(target, ACPI_HMAT_WRITE_LATENCY,
- coord->write_latency, access);
- hmat_update_target_access(target, ACPI_HMAT_READ_BANDWIDTH,
- coord->read_bandwidth, access);
- hmat_update_target_access(target, ACPI_HMAT_WRITE_BANDWIDTH,
- coord->write_bandwidth, access);
- target->ext_updated = true;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(hmat_update_target_coordinates);
-
static __init void hmat_add_locality(struct acpi_hmat_locality *hmat_loc)
{
struct memory_locality *loc;
@@ -773,10 +743,6 @@ static void hmat_update_target_attrs(struct memory_target *target,
u32 best = 0;
int i;
- /* Don't update if an external agent has changed the data. */
- if (target->ext_updated)
- return;
-
/* Don't update for generic port if there's no device handle */
if ((access == NODE_ACCESS_CLASS_GENPORT_SINK_LOCAL ||
access == NODE_ACCESS_CLASS_GENPORT_SINK_CPU) &&
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index 630fe0a34bc6..ad81aa03fe2f 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -22,6 +22,7 @@
#include <linux/acpi.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
+#include <linux/string_choices.h>
struct acpi_prt_entry {
struct acpi_pci_id id;
@@ -468,7 +469,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
dev_dbg(&dev->dev, "PCI INT %c%s -> GSI %u (%s, %s) -> IRQ %d\n",
pin_name(pin), link_desc, gsi,
(triggering == ACPI_LEVEL_SENSITIVE) ? "level" : "edge",
- (polarity == ACPI_ACTIVE_LOW) ? "low" : "high", dev->irq);
+ str_low_high(polarity == ACPI_ACTIVE_LOW), dev->irq);
kfree(entry);
return 0;
diff --git a/drivers/acpi/pfr_update.c b/drivers/acpi/pfr_update.c
index 318683744ed1..11b1c2828005 100644
--- a/drivers/acpi/pfr_update.c
+++ b/drivers/acpi/pfr_update.c
@@ -329,7 +329,7 @@ static bool applicable_image(const void *data, struct pfru_update_cap_info *cap,
if (type == PFRU_CODE_INJECT_TYPE)
return payload_hdr->rt_ver >= cap->code_rt_version;
- return payload_hdr->rt_ver >= cap->drv_rt_version;
+ return payload_hdr->svn_ver >= cap->drv_svn;
}
static void print_update_debug_info(struct pfru_updated_result *result,
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index b7243d7563b1..361a7721a6a8 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -23,6 +23,7 @@
#define pr_fmt(fmt) "ACPI: PM: " fmt
+#include <linux/delay.h>
#include <linux/dmi.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -63,6 +64,9 @@ struct acpi_power_resource_entry {
struct acpi_power_resource *resource;
};
+static bool hp_eb_gp12pxp_quirk;
+static bool unused_power_resources_quirk;
+
static LIST_HEAD(acpi_power_resource_list);
static DEFINE_MUTEX(power_resource_list_lock);
@@ -992,6 +996,38 @@ struct acpi_device *acpi_add_power_resource(acpi_handle handle)
}
#ifdef CONFIG_ACPI_SLEEP
+static bool resource_is_gp12pxp(acpi_handle handle)
+{
+ const char *path;
+ bool ret;
+
+ path = acpi_handle_path(handle);
+ ret = path && strcmp(path, "\\_SB_.PCI0.GP12.PXP_") == 0;
+ kfree(path);
+
+ return ret;
+}
+
+static void acpi_resume_on_eb_gp12pxp(struct acpi_power_resource *resource)
+{
+ acpi_handle_notice(resource->device.handle,
+ "HP EB quirk - turning OFF then ON\n");
+
+ __acpi_power_off(resource);
+ __acpi_power_on(resource);
+
+ /*
+ * Use the same delay as DSDT uses in modem _RST method.
+ *
+ * Otherwise we get "Unable to change power state from unknown to D0,
+ * device inaccessible" error for the modem PCI device after thaw.
+ *
+ * This power resource is normally being enabled only during thaw (once)
+ * so this wait is not a performance issue.
+ */
+ msleep(200);
+}
+
void acpi_resume_power_resources(void)
{
struct acpi_power_resource *resource;
@@ -1013,8 +1049,14 @@ void acpi_resume_power_resources(void)
if (state == ACPI_POWER_RESOURCE_STATE_OFF
&& resource->ref_count) {
- acpi_handle_debug(resource->device.handle, "Turning ON\n");
- __acpi_power_on(resource);
+ if (hp_eb_gp12pxp_quirk &&
+ resource_is_gp12pxp(resource->device.handle)) {
+ acpi_resume_on_eb_gp12pxp(resource);
+ } else {
+ acpi_handle_debug(resource->device.handle,
+ "Turning ON\n");
+ __acpi_power_on(resource);
+ }
}
mutex_unlock(&resource->resource_lock);
@@ -1024,6 +1066,41 @@ void acpi_resume_power_resources(void)
}
#endif
+static const struct dmi_system_id dmi_hp_elitebook_gp12pxp_quirk[] = {
+/*
+ * This laptop (and possibly similar models too) has power resource called
+ * "GP12.PXP_" for its WWAN modem.
+ *
+ * For this power resource to turn ON power for the modem it needs certain
+ * internal flag called "ONEN" to be set.
+ * This flag only gets set from this power resource "_OFF" method, while the
+ * actual modem power gets turned off during suspend by "GP12.PTS" method
+ * called from the global "_PTS" (Prepare To Sleep) method.
+ * On the other hand, this power resource "_OFF" method implementation just
+ * sets the aforementioned flag without actually doing anything else (it
+ * doesn't contain any code to actually turn off power).
+ *
+ * The above means that when upon hibernation finish we try to set this
+ * power resource back ON since its "_STA" method returns 0 (while the resource
+ * is still considered in use) its "_ON" method won't do anything since
+ * that "ONEN" flag is not set.
+ * Overall, this means the modem is dead until laptop is rebooted since its
+ * power has been cut by "_PTS" and its PCI configuration was lost and not able
+ * to be restored.
+ *
+ * The easiest way to workaround the issue is to call this power resource
+ * "_OFF" method before calling the "_ON" method to make sure the "ONEN"
+ * flag gets properly set.
+ */
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 855 G7 Notebook PC"),
+ },
+ },
+ {}
+};
+
static const struct dmi_system_id dmi_leave_unused_power_resources_on[] = {
{
/*
@@ -1046,7 +1123,7 @@ void acpi_turn_off_unused_power_resources(void)
{
struct acpi_power_resource *resource;
- if (dmi_check_system(dmi_leave_unused_power_resources_on))
+ if (unused_power_resources_quirk)
return;
mutex_lock(&power_resource_list_lock);
@@ -1065,3 +1142,10 @@ void acpi_turn_off_unused_power_resources(void)
mutex_unlock(&power_resource_list_lock);
}
+
+void __init acpi_power_resources_init(void)
+{
+ hp_eb_gp12pxp_quirk = dmi_check_system(dmi_hp_elitebook_gp12pxp_quirk);
+ unused_power_resources_quirk =
+ dmi_check_system(dmi_leave_unused_power_resources_on);
+}
diff --git a/drivers/acpi/prmt.c b/drivers/acpi/prmt.c
index be033bbb126a..6792d4385eee 100644
--- a/drivers/acpi/prmt.c
+++ b/drivers/acpi/prmt.c
@@ -150,15 +150,28 @@ acpi_parse_prmt(union acpi_subtable_headers *header, const unsigned long end)
th = &tm->handlers[cur_handler];
guid_copy(&th->guid, (guid_t *)handler_info->handler_guid);
+
+ /*
+ * Print an error message if handler_address is NULL, the parse of VA also
+ * can be skipped.
+ */
+ if (unlikely(!handler_info->handler_address)) {
+ pr_info("Skipping handler with NULL address for GUID: %pUL",
+ (guid_t *)handler_info->handler_guid);
+ continue;
+ }
+
th->handler_addr =
(void *)efi_pa_va_lookup(&th->guid, handler_info->handler_address);
/*
- * Print a warning message if handler_addr is zero which is not expected to
- * ever happen.
+ * Print a warning message and skip the parse of VA if handler_addr is zero
+ * which is not expected to ever happen.
*/
- if (unlikely(!th->handler_addr))
+ if (unlikely(!th->handler_addr)) {
pr_warn("Failed to find VA of handler for GUID: %pUL, PA: 0x%llx",
&th->guid, handler_info->handler_address);
+ continue;
+ }
th->static_data_buffer_addr =
efi_pa_va_lookup(&th->guid, handler_info->static_data_buffer_address);
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 65e779be64ff..5d824435b26b 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -166,8 +166,7 @@ static int __acpi_processor_start(struct acpi_device *device)
if (result && !IS_ENABLED(CONFIG_ACPI_CPU_FREQ_PSS))
dev_dbg(&device->dev, "CPPC data invalid or not present\n");
- if (!cpuidle_get_driver() || cpuidle_get_driver() == &acpi_idle_driver)
- acpi_processor_power_init(pr);
+ acpi_processor_power_init(pr);
acpi_pss_perf_init(pr);
@@ -263,6 +262,8 @@ static int __init acpi_processor_driver_init(void)
if (result < 0)
return result;
+ acpi_processor_register_idle_driver();
+
result = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
"acpi/cpu-drv:online",
acpi_soft_cpu_online, NULL);
@@ -301,6 +302,7 @@ static void __exit acpi_processor_driver_exit(void)
cpuhp_remove_state_nocalls(hp_online);
cpuhp_remove_state_nocalls(CPUHP_ACPI_CPUDRV_DEAD);
+ acpi_processor_unregister_idle_driver();
driver_unregister(&acpi_processor_driver);
}
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 2c2dc559e0f8..22b051b94a86 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -51,7 +51,7 @@ module_param(latency_factor, uint, 0644);
static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device);
-struct cpuidle_driver acpi_idle_driver = {
+static struct cpuidle_driver acpi_idle_driver = {
.name = "acpi_idle",
.owner = THIS_MODULE,
};
@@ -998,11 +998,6 @@ end:
return ret;
}
-/*
- * flat_state_cnt - the number of composite LPI states after the process of flattening
- */
-static int flat_state_cnt;
-
/**
* combine_lpi_states - combine local and parent LPI states to form a composite LPI state
*
@@ -1045,9 +1040,10 @@ static void stash_composite_state(struct acpi_lpi_states_array *curr_level,
curr_level->composite_states[curr_level->composite_states_size++] = t;
}
-static int flatten_lpi_states(struct acpi_processor *pr,
- struct acpi_lpi_states_array *curr_level,
- struct acpi_lpi_states_array *prev_level)
+static unsigned int flatten_lpi_states(struct acpi_processor *pr,
+ unsigned int flat_state_cnt,
+ struct acpi_lpi_states_array *curr_level,
+ struct acpi_lpi_states_array *prev_level)
{
int i, j, state_count = curr_level->size;
struct acpi_lpi_state *p, *t = curr_level->entries;
@@ -1087,7 +1083,7 @@ static int flatten_lpi_states(struct acpi_processor *pr,
}
kfree(curr_level->entries);
- return 0;
+ return flat_state_cnt;
}
int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu)
@@ -1102,6 +1098,7 @@ static int acpi_processor_get_lpi_info(struct acpi_processor *pr)
acpi_handle handle = pr->handle, pr_ahandle;
struct acpi_device *d = NULL;
struct acpi_lpi_states_array info[2], *tmp, *prev, *curr;
+ unsigned int state_count;
/* make sure our architecture has support */
ret = acpi_processor_ffh_lpi_probe(pr->id);
@@ -1114,14 +1111,13 @@ static int acpi_processor_get_lpi_info(struct acpi_processor *pr)
if (!acpi_has_method(handle, "_LPI"))
return -EINVAL;
- flat_state_cnt = 0;
prev = &info[0];
curr = &info[1];
handle = pr->handle;
ret = acpi_processor_evaluate_lpi(handle, prev);
if (ret)
return ret;
- flatten_lpi_states(pr, prev, NULL);
+ state_count = flatten_lpi_states(pr, 0, prev, NULL);
status = acpi_get_parent(handle, &pr_ahandle);
while (ACPI_SUCCESS(status)) {
@@ -1143,18 +1139,19 @@ static int acpi_processor_get_lpi_info(struct acpi_processor *pr)
break;
/* flatten all the LPI states in this level of hierarchy */
- flatten_lpi_states(pr, curr, prev);
+ state_count = flatten_lpi_states(pr, state_count, curr, prev);
tmp = prev, prev = curr, curr = tmp;
status = acpi_get_parent(handle, &pr_ahandle);
}
- pr->power.count = flat_state_cnt;
/* reset the index after flattening */
- for (i = 0; i < pr->power.count; i++)
+ for (i = 0; i < state_count; i++)
pr->power.lpi_states[i].index = i;
+ pr->power.count = state_count;
+
/* Tell driver that _LPI is supported. */
pr->flags.has_lpi = 1;
pr->flags.power = 1;
@@ -1360,74 +1357,102 @@ int acpi_processor_power_state_has_changed(struct acpi_processor *pr)
return 0;
}
-static int acpi_processor_registered;
+void acpi_processor_register_idle_driver(void)
+{
+ struct acpi_processor *pr;
+ int ret = -ENODEV;
+ int cpu;
+
+ /*
+ * Acpi idle driver is used by all possible CPUs.
+ * Install the idle handler by the processor power info of one in them.
+ * Note that we use previously set idle handler will be used on
+ * platforms that only support C1.
+ */
+ for_each_cpu(cpu, (struct cpumask *)cpu_possible_mask) {
+ pr = per_cpu(processors, cpu);
+ if (!pr)
+ continue;
+
+ ret = acpi_processor_get_power_info(pr);
+ if (!ret) {
+ pr->flags.power_setup_done = 1;
+ acpi_processor_setup_cpuidle_states(pr);
+ break;
+ }
+ }
+
+ if (ret) {
+ pr_debug("No ACPI power information from any CPUs.\n");
+ return;
+ }
+
+ ret = cpuidle_register_driver(&acpi_idle_driver);
+ if (ret) {
+ pr_debug("register %s failed.\n", acpi_idle_driver.name);
+ return;
+ }
+ pr_debug("%s registered with cpuidle.\n", acpi_idle_driver.name);
+}
+
+void acpi_processor_unregister_idle_driver(void)
+{
+ cpuidle_unregister_driver(&acpi_idle_driver);
+}
-int acpi_processor_power_init(struct acpi_processor *pr)
+void acpi_processor_power_init(struct acpi_processor *pr)
{
- int retval;
struct cpuidle_device *dev;
+ /*
+ * The code below only works if the current cpuidle driver is the ACPI
+ * idle driver.
+ */
+ if (cpuidle_get_driver() != &acpi_idle_driver)
+ return;
+
if (disabled_by_idle_boot_param())
- return 0;
+ return;
acpi_processor_cstate_first_run_checks();
if (!acpi_processor_get_power_info(pr))
pr->flags.power_setup_done = 1;
- /*
- * Install the idle handler if processor power management is supported.
- * Note that we use previously set idle handler will be used on
- * platforms that only support C1.
- */
- if (pr->flags.power) {
- /* Register acpi_idle_driver if not already registered */
- if (!acpi_processor_registered) {
- acpi_processor_setup_cpuidle_states(pr);
- retval = cpuidle_register_driver(&acpi_idle_driver);
- if (retval)
- return retval;
- pr_debug("%s registered with cpuidle\n",
- acpi_idle_driver.name);
- }
+ if (!pr->flags.power)
+ return;
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev)
- return -ENOMEM;
- per_cpu(acpi_cpuidle_device, pr->id) = dev;
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return;
- acpi_processor_setup_cpuidle_dev(pr, dev);
+ per_cpu(acpi_cpuidle_device, pr->id) = dev;
- /* Register per-cpu cpuidle_device. Cpuidle driver
- * must already be registered before registering device
- */
- retval = cpuidle_register_device(dev);
- if (retval) {
- if (acpi_processor_registered == 0)
- cpuidle_unregister_driver(&acpi_idle_driver);
- return retval;
- }
- acpi_processor_registered++;
+ acpi_processor_setup_cpuidle_dev(pr, dev);
+
+ /*
+ * Register a cpuidle device for this CPU. The cpuidle driver using
+ * this device is expected to be registered.
+ */
+ if (cpuidle_register_device(dev)) {
+ per_cpu(acpi_cpuidle_device, pr->id) = NULL;
+ kfree(dev);
}
- return 0;
}
-int acpi_processor_power_exit(struct acpi_processor *pr)
+void acpi_processor_power_exit(struct acpi_processor *pr)
{
struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id);
if (disabled_by_idle_boot_param())
- return 0;
+ return;
if (pr->flags.power) {
cpuidle_unregister_device(dev);
- acpi_processor_registered--;
- if (acpi_processor_registered == 0)
- cpuidle_unregister_driver(&acpi_idle_driver);
-
kfree(dev);
}
pr->flags.power_setup_done = 0;
- return 0;
}
+
+MODULE_IMPORT_NS("ACPI_PROCESSOR_IDLE");
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 755003bf3a45..8972446b7162 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -180,7 +180,7 @@ void acpi_processor_ppc_init(struct cpufreq_policy *policy)
struct acpi_processor *pr = per_cpu(processors, cpu);
int ret;
- if (!pr || !pr->performance)
+ if (!pr)
continue;
/*
@@ -197,6 +197,9 @@ void acpi_processor_ppc_init(struct cpufreq_policy *policy)
pr_err("Failed to add freq constraint for CPU%d (%d)\n",
cpu, ret);
+ if (!pr->performance)
+ continue;
+
ret = acpi_processor_get_platform_limit(pr);
if (ret)
pr_err("Failed to update freq constraint for CPU%d (%d)\n",
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index 1219adb11ab9..c7b1dc5687ec 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -62,19 +62,14 @@ static int phys_package_first_cpu(int cpu)
return 0;
}
-static int cpu_has_cpufreq(unsigned int cpu)
+static bool cpu_has_cpufreq(unsigned int cpu)
{
- struct cpufreq_policy *policy;
-
if (!acpi_processor_cpufreq_init)
return 0;
- policy = cpufreq_cpu_get(cpu);
- if (policy) {
- cpufreq_cpu_put(policy);
- return 1;
- }
- return 0;
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
+
+ return policy != NULL;
}
static int cpufreq_get_max_state(unsigned int cpu)
@@ -93,12 +88,31 @@ static int cpufreq_get_cur_state(unsigned int cpu)
return reduction_step(cpu);
}
+static bool cpufreq_update_thermal_limit(unsigned int cpu, struct acpi_processor *pr)
+{
+ unsigned long max_freq;
+ int ret;
+
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
+ if (!policy)
+ return false;
+
+ max_freq = (policy->cpuinfo.max_freq *
+ (100 - reduction_step(cpu) * cpufreq_thermal_reduction_pctg)) / 100;
+
+ ret = freq_qos_update_request(&pr->thermal_req, max_freq);
+ if (ret < 0) {
+ pr_warn("Failed to update thermal freq constraint: CPU%d (%d)\n",
+ pr->id, ret);
+ }
+
+ return true;
+}
+
static int cpufreq_set_cur_state(unsigned int cpu, int state)
{
- struct cpufreq_policy *policy;
struct acpi_processor *pr;
- unsigned long max_freq;
- int i, ret;
+ int i;
if (!cpu_has_cpufreq(cpu))
return 0;
@@ -120,20 +134,8 @@ static int cpufreq_set_cur_state(unsigned int cpu, int state)
if (unlikely(!freq_qos_request_active(&pr->thermal_req)))
continue;
- policy = cpufreq_cpu_get(i);
- if (!policy)
+ if (!cpufreq_update_thermal_limit(i, pr))
return -EINVAL;
-
- max_freq = (policy->cpuinfo.max_freq *
- (100 - reduction_step(i) * cpufreq_thermal_reduction_pctg)) / 100;
-
- cpufreq_cpu_put(policy);
-
- ret = freq_qos_update_request(&pr->thermal_req, max_freq);
- if (ret < 0) {
- pr_warn("Failed to update thermal freq constraint: CPU%d (%d)\n",
- pr->id, ret);
- }
}
return 0;
}
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 436019d96027..1b997a5497e7 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -83,6 +83,7 @@ static bool acpi_nondev_subnode_extract(union acpi_object *desc,
struct fwnode_handle *parent)
{
struct acpi_data_node *dn;
+ acpi_handle scope = NULL;
bool result;
if (acpi_graph_ignore_port(handle))
@@ -98,59 +99,45 @@ static bool acpi_nondev_subnode_extract(union acpi_object *desc,
INIT_LIST_HEAD(&dn->data.properties);
INIT_LIST_HEAD(&dn->data.subnodes);
- result = acpi_extract_properties(handle, desc, &dn->data);
-
- if (handle) {
- acpi_handle scope;
- acpi_status status;
+ /*
+ * The scope for the completion of relative pathname segments and
+ * subnode object lookup is the one of the namespace node (device)
+ * containing the object that has returned the package. That is, it's
+ * the scope of that object's parent device.
+ */
+ if (handle)
+ acpi_get_parent(handle, &scope);
- /*
- * The scope for the subnode object lookup is the one of the
- * namespace node (device) containing the object that has
- * returned the package. That is, it's the scope of that
- * object's parent.
- */
- status = acpi_get_parent(handle, &scope);
- if (ACPI_SUCCESS(status)
- && acpi_enumerate_nondev_subnodes(scope, desc, &dn->data,
- &dn->fwnode))
- result = true;
- } else if (acpi_enumerate_nondev_subnodes(NULL, desc, &dn->data,
- &dn->fwnode)) {
+ /*
+ * Extract properties from the _DSD-equivalent package pointed to by
+ * desc and use scope (if not NULL) for the completion of relative
+ * pathname segments.
+ *
+ * The extracted properties will be held in the new data node dn.
+ */
+ result = acpi_extract_properties(scope, desc, &dn->data);
+ /*
+ * Look for subnodes in the _DSD-equivalent package pointed to by desc
+ * and create child nodes of dn if there are any.
+ */
+ if (acpi_enumerate_nondev_subnodes(scope, desc, &dn->data, &dn->fwnode))
result = true;
- }
-
- if (result) {
- dn->handle = handle;
- dn->data.pointer = desc;
- list_add_tail(&dn->sibling, list);
- return true;
- }
-
- kfree(dn);
- acpi_handle_debug(handle, "Invalid properties/subnodes data, skipping\n");
- return false;
-}
-
-static bool acpi_nondev_subnode_data_ok(acpi_handle handle,
- const union acpi_object *link,
- struct list_head *list,
- struct fwnode_handle *parent)
-{
- struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
- acpi_status status;
- status = acpi_evaluate_object_typed(handle, NULL, NULL, &buf,
- ACPI_TYPE_PACKAGE);
- if (ACPI_FAILURE(status))
+ if (!result) {
+ kfree(dn);
+ acpi_handle_debug(handle, "Invalid properties/subnodes data, skipping\n");
return false;
+ }
- if (acpi_nondev_subnode_extract(buf.pointer, handle, link, list,
- parent))
- return true;
+ /*
+ * This will be NULL if the desc package is embedded in an outer
+ * _DSD-equivalent package and its scope cannot be determined.
+ */
+ dn->handle = handle;
+ dn->data.pointer = desc;
+ list_add_tail(&dn->sibling, list);
- ACPI_FREE(buf.pointer);
- return false;
+ return true;
}
static bool acpi_nondev_subnode_ok(acpi_handle scope,
@@ -158,9 +145,16 @@ static bool acpi_nondev_subnode_ok(acpi_handle scope,
struct list_head *list,
struct fwnode_handle *parent)
{
+ struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
acpi_handle handle;
acpi_status status;
+ /*
+ * If the scope is unknown, the _DSD-equivalent package being parsed
+ * was embedded in an outer _DSD-equivalent package as a result of
+ * direct evaluation of an object pointed to by a reference. In that
+ * case, using a pathname as the target object pointer is invalid.
+ */
if (!scope)
return false;
@@ -169,7 +163,17 @@ static bool acpi_nondev_subnode_ok(acpi_handle scope,
if (ACPI_FAILURE(status))
return false;
- return acpi_nondev_subnode_data_ok(handle, link, list, parent);
+ status = acpi_evaluate_object_typed(handle, NULL, NULL, &buf,
+ ACPI_TYPE_PACKAGE);
+ if (ACPI_FAILURE(status))
+ return false;
+
+ if (acpi_nondev_subnode_extract(buf.pointer, handle, link, list,
+ parent))
+ return true;
+
+ ACPI_FREE(buf.pointer);
+ return false;
}
static bool acpi_add_nondev_subnodes(acpi_handle scope,
@@ -180,9 +184,12 @@ static bool acpi_add_nondev_subnodes(acpi_handle scope,
bool ret = false;
int i;
+ /*
+ * Every element in the links package is expected to represent a link
+ * to a non-device node in a tree containing device-specific data.
+ */
for (i = 0; i < links->package.count; i++) {
union acpi_object *link, *desc;
- acpi_handle handle;
bool result;
link = &links->package.elements[i];
@@ -190,26 +197,53 @@ static bool acpi_add_nondev_subnodes(acpi_handle scope,
if (link->package.count != 2)
continue;
- /* The first one must be a string. */
+ /* The first one (the key) must be a string. */
if (link->package.elements[0].type != ACPI_TYPE_STRING)
continue;
- /* The second one may be a string, a reference or a package. */
+ /* The second one (the target) may be a string or a package. */
switch (link->package.elements[1].type) {
case ACPI_TYPE_STRING:
+ /*
+ * The string is expected to be a full pathname or a
+ * pathname segment relative to the given scope. That
+ * pathname is expected to point to an object returning
+ * a package that contains _DSD-equivalent information.
+ */
result = acpi_nondev_subnode_ok(scope, link, list,
parent);
break;
- case ACPI_TYPE_LOCAL_REFERENCE:
- handle = link->package.elements[1].reference.handle;
- result = acpi_nondev_subnode_data_ok(handle, link, list,
- parent);
- break;
case ACPI_TYPE_PACKAGE:
+ /*
+ * This happens when a reference is used in AML to
+ * point to the target. Since the target is expected
+ * to be a named object, a reference to it will cause it
+ * to be avaluated in place and its return package will
+ * be embedded in the links package at the location of
+ * the reference.
+ *
+ * The target package is expected to contain _DSD-
+ * equivalent information, but the scope in which it
+ * is located in the original AML is unknown. Thus
+ * it cannot contain pathname segments represented as
+ * strings because there is no way to build full
+ * pathnames out of them.
+ */
+ acpi_handle_debug(scope, "subnode %s: Unknown scope\n",
+ link->package.elements[0].string.pointer);
desc = &link->package.elements[1];
result = acpi_nondev_subnode_extract(desc, NULL, link,
list, parent);
break;
+ case ACPI_TYPE_LOCAL_REFERENCE:
+ /*
+ * It is not expected to see any local references in
+ * the links package because referencing a named object
+ * should cause it to be evaluated in place.
+ */
+ acpi_handle_info(scope, "subnode %s: Unexpected reference\n",
+ link->package.elements[0].string.pointer);
+ fallthrough;
default:
result = false;
break;
@@ -369,6 +403,9 @@ static void acpi_untie_nondev_subnodes(struct acpi_device_data *data)
struct acpi_data_node *dn;
list_for_each_entry(dn, &data->subnodes, sibling) {
+ if (!dn->handle)
+ continue;
+
acpi_detach_data(dn->handle, acpi_nondev_subnode_tag);
acpi_untie_nondev_subnodes(&dn->data);
@@ -383,6 +420,9 @@ static bool acpi_tie_nondev_subnodes(struct acpi_device_data *data)
acpi_status status;
bool ret;
+ if (!dn->handle)
+ continue;
+
status = acpi_attach_data(dn->handle, acpi_nondev_subnode_tag, dn);
if (ACPI_FAILURE(status) && status != AE_ALREADY_EXISTS) {
acpi_handle_err(dn->handle, "Can't tag data node\n");
@@ -804,13 +844,35 @@ acpi_fwnode_get_named_child_node(const struct fwnode_handle *fwnode,
return NULL;
}
+static unsigned int acpi_fwnode_get_args_count(struct fwnode_handle *fwnode,
+ const char *nargs_prop)
+{
+ const struct acpi_device_data *data;
+ const union acpi_object *obj;
+ int ret;
+
+ data = acpi_device_data_of_node(fwnode);
+ if (!data)
+ return 0;
+
+ ret = acpi_data_get_property(data, nargs_prop, ACPI_TYPE_INTEGER, &obj);
+ if (ret)
+ return 0;
+
+ return obj->integer.value;
+}
+
static int acpi_get_ref_args(struct fwnode_reference_args *args,
struct fwnode_handle *ref_fwnode,
+ const char *nargs_prop,
const union acpi_object **element,
const union acpi_object *end, size_t num_args)
{
u32 nargs = 0, i;
+ if (nargs_prop)
+ num_args = acpi_fwnode_get_args_count(ref_fwnode, nargs_prop);
+
/*
* Assume the following integer elements are all args. Stop counting on
* the first reference (possibly represented as a string) or end of the
@@ -882,45 +944,10 @@ static struct fwnode_handle *acpi_parse_string_ref(const struct fwnode_handle *f
return &dn->fwnode;
}
-/**
- * __acpi_node_get_property_reference - returns handle to the referenced object
- * @fwnode: Firmware node to get the property from
- * @propname: Name of the property
- * @index: Index of the reference to return
- * @num_args: Maximum number of arguments after each reference
- * @args: Location to store the returned reference with optional arguments
- * (may be NULL)
- *
- * Find property with @name, verifify that it is a package containing at least
- * one object reference and if so, store the ACPI device object pointer to the
- * target object in @args->adev. If the reference includes arguments, store
- * them in the @args->args[] array.
- *
- * If there's more than one reference in the property value package, @index is
- * used to select the one to return.
- *
- * It is possible to leave holes in the property value set like in the
- * example below:
- *
- * Package () {
- * "cs-gpios",
- * Package () {
- * ^GPIO, 19, 0, 0,
- * ^GPIO, 20, 0, 0,
- * 0,
- * ^GPIO, 21, 0, 0,
- * }
- * }
- *
- * Calling this function with index %2 or index %3 return %-ENOENT. If the
- * property does not contain any more values %-ENOENT is returned. The NULL
- * entry must be single integer and preferably contain value %0.
- *
- * Return: %0 on success, negative error code on failure.
- */
-int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
- const char *propname, size_t index, size_t num_args,
- struct fwnode_reference_args *args)
+static int acpi_fwnode_get_reference_args(const struct fwnode_handle *fwnode,
+ const char *propname, const char *nargs_prop,
+ unsigned int args_count, unsigned int index,
+ struct fwnode_reference_args *args)
{
const union acpi_object *element, *end;
const union acpi_object *obj;
@@ -996,10 +1023,10 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
return -EINVAL;
element++;
-
ret = acpi_get_ref_args(idx == index ? args : NULL,
acpi_fwnode_handle(device),
- &element, end, num_args);
+ nargs_prop, &element, end,
+ args_count);
if (ret < 0)
return ret;
@@ -1014,10 +1041,9 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
return -EINVAL;
element++;
-
ret = acpi_get_ref_args(idx == index ? args : NULL,
- ref_fwnode, &element, end,
- num_args);
+ ref_fwnode, nargs_prop, &element, end,
+ args_count);
if (ret < 0)
return ret;
@@ -1039,6 +1065,50 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
return -ENOENT;
}
+
+/**
+ * __acpi_node_get_property_reference - returns handle to the referenced object
+ * @fwnode: Firmware node to get the property from
+ * @propname: Name of the property
+ * @index: Index of the reference to return
+ * @num_args: Maximum number of arguments after each reference
+ * @args: Location to store the returned reference with optional arguments
+ * (may be NULL)
+ *
+ * Find property with @name, verifify that it is a package containing at least
+ * one object reference and if so, store the ACPI device object pointer to the
+ * target object in @args->adev. If the reference includes arguments, store
+ * them in the @args->args[] array.
+ *
+ * If there's more than one reference in the property value package, @index is
+ * used to select the one to return.
+ *
+ * It is possible to leave holes in the property value set like in the
+ * example below:
+ *
+ * Package () {
+ * "cs-gpios",
+ * Package () {
+ * ^GPIO, 19, 0, 0,
+ * ^GPIO, 20, 0, 0,
+ * 0,
+ * ^GPIO, 21, 0, 0,
+ * }
+ * }
+ *
+ * Calling this function with index %2 or index %3 return %-ENOENT. If the
+ * property does not contain any more values %-ENOENT is returned. The NULL
+ * entry must be single integer and preferably contain value %0.
+ *
+ * Return: %0 on success, negative error code on failure.
+ */
+int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
+ const char *propname, size_t index,
+ size_t num_args,
+ struct fwnode_reference_args *args)
+{
+ return acpi_fwnode_get_reference_args(fwnode, propname, NULL, index, num_args, args);
+}
EXPORT_SYMBOL_GPL(__acpi_node_get_property_reference);
static int acpi_data_prop_read_single(const struct acpi_device_data *data,
@@ -1318,6 +1388,28 @@ struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode,
return NULL;
}
+/*
+ * acpi_get_next_present_subnode - Return the next present child node handle
+ * @fwnode: Firmware node to find the next child node for.
+ * @child: Handle to one of the device's child nodes or a null handle.
+ *
+ * Like acpi_get_next_subnode(), but the device nodes returned by
+ * acpi_get_next_present_subnode() are guaranteed to be present.
+ *
+ * Returns: The fwnode handle of the next present sub-node.
+ */
+static struct fwnode_handle *
+acpi_get_next_present_subnode(const struct fwnode_handle *fwnode,
+ struct fwnode_handle *child)
+{
+ do {
+ child = acpi_get_next_subnode(fwnode, child);
+ } while (is_acpi_device_node(child) &&
+ !acpi_device_is_present(to_acpi_device_node(child)));
+
+ return child;
+}
+
/**
* acpi_node_get_parent - Return parent fwnode of this fwnode
* @fwnode: Firmware node whose parent to get
@@ -1558,16 +1650,6 @@ acpi_fwnode_property_read_string_array(const struct fwnode_handle *fwnode,
val, nval);
}
-static int
-acpi_fwnode_get_reference_args(const struct fwnode_handle *fwnode,
- const char *prop, const char *nargs_prop,
- unsigned int args_count, unsigned int index,
- struct fwnode_reference_args *args)
-{
- return __acpi_node_get_property_reference(fwnode, prop, index,
- args_count, args);
-}
-
static const char *acpi_fwnode_get_name(const struct fwnode_handle *fwnode)
{
const struct acpi_device *adev;
@@ -1662,7 +1744,7 @@ static int acpi_fwnode_irq_get(const struct fwnode_handle *fwnode,
.property_read_string_array = \
acpi_fwnode_property_read_string_array, \
.get_parent = acpi_node_get_parent, \
- .get_next_child_node = acpi_get_next_subnode, \
+ .get_next_child_node = acpi_get_next_present_subnode, \
.get_named_child_node = acpi_fwnode_get_named_child_node, \
.get_name = acpi_fwnode_get_name, \
.get_name_prefix = acpi_fwnode_get_name_prefix, \
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index b1ab192d7a08..d16906f46484 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -17,6 +17,7 @@
#include <linux/slab.h>
#include <linux/irq.h>
#include <linux/dmi.h>
+#include <linux/string_choices.h>
#ifdef CONFIG_X86
#define valid_IRQ(i) (((i) != 0) && ((i) != 2))
@@ -511,6 +512,13 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = {
},
},
{
+ /* Asus Vivobook Pro N6506CU* */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "N6506CU"),
+ },
+ },
+ {
/* LG Electronics 17U70P */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"),
@@ -773,7 +781,7 @@ static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
pr_warn("ACPI: IRQ %d override to %s%s, %s%s\n", gsi,
t ? "level" : "edge",
trig == triggering ? "" : "(!)",
- p ? "low" : "high",
+ str_low_high(p),
pol == polarity ? "" : "(!)");
triggering = trig;
polarity = pol;
diff --git a/drivers/acpi/riscv/Kconfig b/drivers/acpi/riscv/Kconfig
new file mode 100644
index 000000000000..046296a18d00
--- /dev/null
+++ b/drivers/acpi/riscv/Kconfig
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# ACPI Configuration for RISC-V
+#
+
+config ACPI_RIMT
+ bool
diff --git a/drivers/acpi/riscv/Makefile b/drivers/acpi/riscv/Makefile
index a96fdf1e2cb8..1284a076fa88 100644
--- a/drivers/acpi/riscv/Makefile
+++ b/drivers/acpi/riscv/Makefile
@@ -2,3 +2,4 @@
obj-y += rhct.o init.o irq.o
obj-$(CONFIG_ACPI_PROCESSOR_IDLE) += cpuidle.o
obj-$(CONFIG_ACPI_CPPC_LIB) += cppc.o
+obj-$(CONFIG_ACPI_RIMT) += rimt.o
diff --git a/drivers/acpi/riscv/cppc.c b/drivers/acpi/riscv/cppc.c
index 440cf9fb91aa..42c1a9052470 100644
--- a/drivers/acpi/riscv/cppc.c
+++ b/drivers/acpi/riscv/cppc.c
@@ -119,7 +119,7 @@ int cpc_read_ffh(int cpu, struct cpc_reg *reg, u64 *val)
*val = data.ret.value;
- return (data.ret.error) ? sbi_err_map_linux_errno(data.ret.error) : 0;
+ return data.ret.error;
}
return -EINVAL;
@@ -148,7 +148,7 @@ int cpc_write_ffh(int cpu, struct cpc_reg *reg, u64 val)
smp_call_function_single(cpu, cppc_ffh_csr_write, &data, 1);
- return (data.ret.error) ? sbi_err_map_linux_errno(data.ret.error) : 0;
+ return data.ret.error;
}
return -EINVAL;
diff --git a/drivers/acpi/riscv/init.c b/drivers/acpi/riscv/init.c
index 673e4d5dd752..7c00f7995e86 100644
--- a/drivers/acpi/riscv/init.c
+++ b/drivers/acpi/riscv/init.c
@@ -10,4 +10,6 @@
void __init acpi_arch_init(void)
{
riscv_acpi_init_gsi_mapping();
+ if (IS_ENABLED(CONFIG_ACPI_RIMT))
+ riscv_acpi_rimt_init();
}
diff --git a/drivers/acpi/riscv/init.h b/drivers/acpi/riscv/init.h
index 0b9a07e4031f..1680aa2aaf23 100644
--- a/drivers/acpi/riscv/init.h
+++ b/drivers/acpi/riscv/init.h
@@ -2,3 +2,4 @@
#include <linux/init.h>
void __init riscv_acpi_init_gsi_mapping(void);
+void __init riscv_acpi_rimt_init(void);
diff --git a/drivers/acpi/riscv/irq.c b/drivers/acpi/riscv/irq.c
index cced960c2aef..d9a2154d6c6a 100644
--- a/drivers/acpi/riscv/irq.c
+++ b/drivers/acpi/riscv/irq.c
@@ -10,6 +10,8 @@
#include "init.h"
+#define RISCV_ACPI_INTC_FLAG_PENDING BIT(0)
+
struct riscv_ext_intc_list {
acpi_handle handle;
u32 gsi_base;
@@ -17,6 +19,7 @@ struct riscv_ext_intc_list {
u32 nr_idcs;
u32 id;
u32 type;
+ u32 flag;
struct list_head list;
};
@@ -69,6 +72,22 @@ static acpi_status riscv_acpi_update_gsi_handle(u32 gsi_base, acpi_handle handle
return AE_NOT_FOUND;
}
+int riscv_acpi_update_gsi_range(u32 gsi_base, u32 nr_irqs)
+{
+ struct riscv_ext_intc_list *ext_intc_element;
+
+ list_for_each_entry(ext_intc_element, &ext_intc_list, list) {
+ if (gsi_base == ext_intc_element->gsi_base &&
+ (ext_intc_element->flag & RISCV_ACPI_INTC_FLAG_PENDING)) {
+ ext_intc_element->nr_irqs = nr_irqs;
+ ext_intc_element->flag &= ~RISCV_ACPI_INTC_FLAG_PENDING;
+ return 0;
+ }
+ }
+
+ return -ENODEV;
+}
+
int riscv_acpi_get_gsi_info(struct fwnode_handle *fwnode, u32 *gsi_base,
u32 *id, u32 *nr_irqs, u32 *nr_idcs)
{
@@ -115,20 +134,67 @@ struct fwnode_handle *riscv_acpi_get_gsi_domain_id(u32 gsi)
static int __init riscv_acpi_register_ext_intc(u32 gsi_base, u32 nr_irqs, u32 nr_idcs,
u32 id, u32 type)
{
- struct riscv_ext_intc_list *ext_intc_element;
+ struct riscv_ext_intc_list *ext_intc_element, *node, *prev;
ext_intc_element = kzalloc(sizeof(*ext_intc_element), GFP_KERNEL);
if (!ext_intc_element)
return -ENOMEM;
ext_intc_element->gsi_base = gsi_base;
- ext_intc_element->nr_irqs = nr_irqs;
+
+ /* If nr_irqs is zero, indicate it in flag and set to max range possible */
+ if (nr_irqs) {
+ ext_intc_element->nr_irqs = nr_irqs;
+ } else {
+ ext_intc_element->flag |= RISCV_ACPI_INTC_FLAG_PENDING;
+ ext_intc_element->nr_irqs = U32_MAX - ext_intc_element->gsi_base;
+ }
+
ext_intc_element->nr_idcs = nr_idcs;
ext_intc_element->id = id;
- list_add_tail(&ext_intc_element->list, &ext_intc_list);
+ list_for_each_entry(node, &ext_intc_list, list) {
+ if (node->gsi_base < ext_intc_element->gsi_base)
+ break;
+ }
+
+ /* Adjust the previous node's GSI range if that has pending registration */
+ prev = list_prev_entry(node, list);
+ if (!list_entry_is_head(prev, &ext_intc_list, list)) {
+ if (prev->flag & RISCV_ACPI_INTC_FLAG_PENDING)
+ prev->nr_irqs = ext_intc_element->gsi_base - prev->gsi_base;
+ }
+
+ list_add_tail(&ext_intc_element->list, &node->list);
return 0;
}
+static acpi_status __init riscv_acpi_create_gsi_map_smsi(acpi_handle handle, u32 level,
+ void *context, void **return_value)
+{
+ acpi_status status;
+ u64 gbase;
+
+ if (!acpi_has_method(handle, "_GSB")) {
+ acpi_handle_err(handle, "_GSB method not found\n");
+ return AE_ERROR;
+ }
+
+ status = acpi_evaluate_integer(handle, "_GSB", NULL, &gbase);
+ if (ACPI_FAILURE(status)) {
+ acpi_handle_err(handle, "failed to evaluate _GSB method\n");
+ return status;
+ }
+
+ riscv_acpi_register_ext_intc(gbase, 0, 0, 0, ACPI_RISCV_IRQCHIP_SMSI);
+ status = riscv_acpi_update_gsi_handle((u32)gbase, handle);
+ if (ACPI_FAILURE(status)) {
+ acpi_handle_err(handle, "failed to find the GSI mapping entry\n");
+ return status;
+ }
+
+ return AE_OK;
+}
+
static acpi_status __init riscv_acpi_create_gsi_map(acpi_handle handle, u32 level,
void *context, void **return_value)
{
@@ -183,6 +249,9 @@ void __init riscv_acpi_init_gsi_mapping(void)
if (acpi_table_parse_madt(ACPI_MADT_TYPE_APLIC, riscv_acpi_aplic_parse_madt, 0) > 0)
acpi_get_devices("RSCV0002", riscv_acpi_create_gsi_map, NULL, NULL);
+
+ /* Unlike PLIC/APLIC, SYSMSI doesn't have MADT */
+ acpi_get_devices("RSCV0006", riscv_acpi_create_gsi_map_smsi, NULL, NULL);
}
static acpi_handle riscv_acpi_get_gsi_handle(u32 gsi)
diff --git a/drivers/acpi/riscv/rimt.c b/drivers/acpi/riscv/rimt.c
new file mode 100644
index 000000000000..683fcfe35c31
--- /dev/null
+++ b/drivers/acpi/riscv/rimt.c
@@ -0,0 +1,520 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024-2025, Ventana Micro Systems Inc
+ * Author: Sunil V L <sunilvl@ventanamicro.com>
+ *
+ */
+
+#define pr_fmt(fmt) "ACPI: RIMT: " fmt
+
+#include <linux/acpi.h>
+#include <linux/acpi_rimt.h>
+#include <linux/iommu.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include "init.h"
+
+struct rimt_fwnode {
+ struct list_head list;
+ struct acpi_rimt_node *rimt_node;
+ struct fwnode_handle *fwnode;
+};
+
+static LIST_HEAD(rimt_fwnode_list);
+static DEFINE_SPINLOCK(rimt_fwnode_lock);
+
+#define RIMT_TYPE_MASK(type) (1 << (type))
+#define RIMT_IOMMU_TYPE BIT(0)
+
+/* Root pointer to the mapped RIMT table */
+static struct acpi_table_header *rimt_table;
+
+/**
+ * rimt_set_fwnode() - Create rimt_fwnode and use it to register
+ * iommu data in the rimt_fwnode_list
+ *
+ * @rimt_node: RIMT table node associated with the IOMMU
+ * @fwnode: fwnode associated with the RIMT node
+ *
+ * Returns: 0 on success
+ * <0 on failure
+ */
+static int rimt_set_fwnode(struct acpi_rimt_node *rimt_node,
+ struct fwnode_handle *fwnode)
+{
+ struct rimt_fwnode *np;
+
+ np = kzalloc(sizeof(*np), GFP_ATOMIC);
+
+ if (WARN_ON(!np))
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&np->list);
+ np->rimt_node = rimt_node;
+ np->fwnode = fwnode;
+
+ spin_lock(&rimt_fwnode_lock);
+ list_add_tail(&np->list, &rimt_fwnode_list);
+ spin_unlock(&rimt_fwnode_lock);
+
+ return 0;
+}
+
+/**
+ * rimt_get_fwnode() - Retrieve fwnode associated with an RIMT node
+ *
+ * @node: RIMT table node to be looked-up
+ *
+ * Returns: fwnode_handle pointer on success, NULL on failure
+ */
+static struct fwnode_handle *rimt_get_fwnode(struct acpi_rimt_node *node)
+{
+ struct fwnode_handle *fwnode = NULL;
+ struct rimt_fwnode *curr;
+
+ spin_lock(&rimt_fwnode_lock);
+ list_for_each_entry(curr, &rimt_fwnode_list, list) {
+ if (curr->rimt_node == node) {
+ fwnode = curr->fwnode;
+ break;
+ }
+ }
+ spin_unlock(&rimt_fwnode_lock);
+
+ return fwnode;
+}
+
+static acpi_status rimt_match_node_callback(struct acpi_rimt_node *node,
+ void *context)
+{
+ acpi_status status = AE_NOT_FOUND;
+ struct device *dev = context;
+
+ if (node->type == ACPI_RIMT_NODE_TYPE_IOMMU) {
+ struct acpi_rimt_iommu *iommu_node = (struct acpi_rimt_iommu *)&node->node_data;
+
+ if (dev_is_pci(dev)) {
+ struct pci_dev *pdev;
+ u16 bdf;
+
+ pdev = to_pci_dev(dev);
+ bdf = PCI_DEVID(pdev->bus->number, pdev->devfn);
+ if ((pci_domain_nr(pdev->bus) == iommu_node->pcie_segment_number) &&
+ bdf == iommu_node->pcie_bdf) {
+ status = AE_OK;
+ } else {
+ status = AE_NOT_FOUND;
+ }
+ } else {
+ struct platform_device *pdev = to_platform_device(dev);
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res && res->start == iommu_node->base_address)
+ status = AE_OK;
+ else
+ status = AE_NOT_FOUND;
+ }
+ } else if (node->type == ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX) {
+ struct acpi_rimt_pcie_rc *pci_rc;
+ struct pci_bus *bus;
+
+ bus = to_pci_bus(dev);
+ pci_rc = (struct acpi_rimt_pcie_rc *)node->node_data;
+
+ /*
+ * It is assumed that PCI segment numbers maps one-to-one
+ * with root complexes. Each segment number can represent only
+ * one root complex.
+ */
+ status = pci_rc->pcie_segment_number == pci_domain_nr(bus) ?
+ AE_OK : AE_NOT_FOUND;
+ } else if (node->type == ACPI_RIMT_NODE_TYPE_PLAT_DEVICE) {
+ struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_rimt_platform_device *ncomp;
+ struct device *plat_dev = dev;
+ struct acpi_device *adev;
+
+ /*
+ * Walk the device tree to find a device with an
+ * ACPI companion; there is no point in scanning
+ * RIMT for a device matching a platform device if
+ * the device does not have an ACPI companion to
+ * start with.
+ */
+ do {
+ adev = ACPI_COMPANION(plat_dev);
+ if (adev)
+ break;
+
+ plat_dev = plat_dev->parent;
+ } while (plat_dev);
+
+ if (!adev)
+ return status;
+
+ status = acpi_get_name(adev->handle, ACPI_FULL_PATHNAME, &buf);
+ if (ACPI_FAILURE(status)) {
+ dev_warn(plat_dev, "Can't get device full path name\n");
+ return status;
+ }
+
+ ncomp = (struct acpi_rimt_platform_device *)node->node_data;
+ status = !strcmp(ncomp->device_name, buf.pointer) ?
+ AE_OK : AE_NOT_FOUND;
+ acpi_os_free(buf.pointer);
+ }
+
+ return status;
+}
+
+static struct acpi_rimt_node *rimt_scan_node(enum acpi_rimt_node_type type,
+ void *context)
+{
+ struct acpi_rimt_node *rimt_node, *rimt_end;
+ struct acpi_table_rimt *rimt;
+ int i;
+
+ if (!rimt_table)
+ return NULL;
+
+ /* Get the first RIMT node */
+ rimt = (struct acpi_table_rimt *)rimt_table;
+ rimt_node = ACPI_ADD_PTR(struct acpi_rimt_node, rimt,
+ rimt->node_offset);
+ rimt_end = ACPI_ADD_PTR(struct acpi_rimt_node, rimt_table,
+ rimt_table->length);
+
+ for (i = 0; i < rimt->num_nodes; i++) {
+ if (WARN_TAINT(rimt_node >= rimt_end, TAINT_FIRMWARE_WORKAROUND,
+ "RIMT node pointer overflows, bad table!\n"))
+ return NULL;
+
+ if (rimt_node->type == type &&
+ ACPI_SUCCESS(rimt_match_node_callback(rimt_node, context)))
+ return rimt_node;
+
+ rimt_node = ACPI_ADD_PTR(struct acpi_rimt_node, rimt_node,
+ rimt_node->length);
+ }
+
+ return NULL;
+}
+
+static bool rimt_pcie_rc_supports_ats(struct acpi_rimt_node *node)
+{
+ struct acpi_rimt_pcie_rc *pci_rc;
+
+ pci_rc = (struct acpi_rimt_pcie_rc *)node->node_data;
+ return pci_rc->flags & ACPI_RIMT_PCIE_ATS_SUPPORTED;
+}
+
+static int rimt_iommu_xlate(struct device *dev, struct acpi_rimt_node *node, u32 deviceid)
+{
+ struct fwnode_handle *rimt_fwnode;
+
+ if (!node)
+ return -ENODEV;
+
+ rimt_fwnode = rimt_get_fwnode(node);
+
+ /*
+ * The IOMMU drivers may not be probed yet.
+ * Defer the IOMMU configuration
+ */
+ if (!rimt_fwnode)
+ return -EPROBE_DEFER;
+
+ return acpi_iommu_fwspec_init(dev, deviceid, rimt_fwnode);
+}
+
+struct rimt_pci_alias_info {
+ struct device *dev;
+ struct acpi_rimt_node *node;
+ const struct iommu_ops *ops;
+};
+
+static int rimt_id_map(struct acpi_rimt_id_mapping *map, u8 type, u32 rid_in, u32 *rid_out)
+{
+ if (rid_in < map->source_id_base ||
+ (rid_in > map->source_id_base + map->num_ids))
+ return -ENXIO;
+
+ *rid_out = map->dest_id_base + (rid_in - map->source_id_base);
+ return 0;
+}
+
+static struct acpi_rimt_node *rimt_node_get_id(struct acpi_rimt_node *node,
+ u32 *id_out, int index)
+{
+ struct acpi_rimt_platform_device *plat_node;
+ u32 id_mapping_offset, num_id_mapping;
+ struct acpi_rimt_pcie_rc *pci_node;
+ struct acpi_rimt_id_mapping *map;
+ struct acpi_rimt_node *parent;
+
+ if (node->type == ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX) {
+ pci_node = (struct acpi_rimt_pcie_rc *)&node->node_data;
+ id_mapping_offset = pci_node->id_mapping_offset;
+ num_id_mapping = pci_node->num_id_mappings;
+ } else if (node->type == ACPI_RIMT_NODE_TYPE_PLAT_DEVICE) {
+ plat_node = (struct acpi_rimt_platform_device *)&node->node_data;
+ id_mapping_offset = plat_node->id_mapping_offset;
+ num_id_mapping = plat_node->num_id_mappings;
+ } else {
+ return NULL;
+ }
+
+ if (!id_mapping_offset || !num_id_mapping || index >= num_id_mapping)
+ return NULL;
+
+ map = ACPI_ADD_PTR(struct acpi_rimt_id_mapping, node,
+ id_mapping_offset + index * sizeof(*map));
+
+ /* Firmware bug! */
+ if (!map->dest_offset) {
+ pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
+ node, node->type);
+ return NULL;
+ }
+
+ parent = ACPI_ADD_PTR(struct acpi_rimt_node, rimt_table, map->dest_offset);
+
+ if (node->type == ACPI_RIMT_NODE_TYPE_PLAT_DEVICE ||
+ node->type == ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX) {
+ *id_out = map->dest_id_base;
+ return parent;
+ }
+
+ return NULL;
+}
+
+/*
+ * RISC-V supports IOMMU as a PCI device or a platform device.
+ * When it is a platform device, there should be a namespace device as
+ * well along with RIMT. To create the link between RIMT information and
+ * the platform device, the IOMMU driver should register itself with the
+ * RIMT module. This is true for PCI based IOMMU as well.
+ */
+int rimt_iommu_register(struct device *dev)
+{
+ struct fwnode_handle *rimt_fwnode;
+ struct acpi_rimt_node *node;
+
+ node = rimt_scan_node(ACPI_RIMT_NODE_TYPE_IOMMU, dev);
+ if (!node) {
+ pr_err("Could not find IOMMU node in RIMT\n");
+ return -ENODEV;
+ }
+
+ if (dev_is_pci(dev)) {
+ rimt_fwnode = acpi_alloc_fwnode_static();
+ if (!rimt_fwnode)
+ return -ENOMEM;
+
+ rimt_fwnode->dev = dev;
+ if (!dev->fwnode)
+ dev->fwnode = rimt_fwnode;
+
+ rimt_set_fwnode(node, rimt_fwnode);
+ } else {
+ rimt_set_fwnode(node, dev->fwnode);
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_IOMMU_API
+
+static struct acpi_rimt_node *rimt_node_map_id(struct acpi_rimt_node *node,
+ u32 id_in, u32 *id_out,
+ u8 type_mask)
+{
+ struct acpi_rimt_platform_device *plat_node;
+ u32 id_mapping_offset, num_id_mapping;
+ struct acpi_rimt_pcie_rc *pci_node;
+ u32 id = id_in;
+
+ /* Parse the ID mapping tree to find specified node type */
+ while (node) {
+ struct acpi_rimt_id_mapping *map;
+ int i, rc = 0;
+ u32 map_id = id;
+
+ if (RIMT_TYPE_MASK(node->type) & type_mask) {
+ if (id_out)
+ *id_out = id;
+ return node;
+ }
+
+ if (node->type == ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX) {
+ pci_node = (struct acpi_rimt_pcie_rc *)&node->node_data;
+ id_mapping_offset = pci_node->id_mapping_offset;
+ num_id_mapping = pci_node->num_id_mappings;
+ } else if (node->type == ACPI_RIMT_NODE_TYPE_PLAT_DEVICE) {
+ plat_node = (struct acpi_rimt_platform_device *)&node->node_data;
+ id_mapping_offset = plat_node->id_mapping_offset;
+ num_id_mapping = plat_node->num_id_mappings;
+ } else {
+ goto fail_map;
+ }
+
+ if (!id_mapping_offset || !num_id_mapping)
+ goto fail_map;
+
+ map = ACPI_ADD_PTR(struct acpi_rimt_id_mapping, node,
+ id_mapping_offset);
+
+ /* Firmware bug! */
+ if (!map->dest_offset) {
+ pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
+ node, node->type);
+ goto fail_map;
+ }
+
+ /* Do the ID translation */
+ for (i = 0; i < num_id_mapping; i++, map++) {
+ rc = rimt_id_map(map, node->type, map_id, &id);
+ if (!rc)
+ break;
+ }
+
+ if (i == num_id_mapping)
+ goto fail_map;
+
+ node = ACPI_ADD_PTR(struct acpi_rimt_node, rimt_table,
+ rc ? 0 : map->dest_offset);
+ }
+
+fail_map:
+ /* Map input ID to output ID unchanged on mapping failure */
+ if (id_out)
+ *id_out = id_in;
+
+ return NULL;
+}
+
+static struct acpi_rimt_node *rimt_node_map_platform_id(struct acpi_rimt_node *node, u32 *id_out,
+ u8 type_mask, int index)
+{
+ struct acpi_rimt_node *parent;
+ u32 id;
+
+ parent = rimt_node_get_id(node, &id, index);
+ if (!parent)
+ return NULL;
+
+ if (!(RIMT_TYPE_MASK(parent->type) & type_mask))
+ parent = rimt_node_map_id(parent, id, id_out, type_mask);
+ else
+ if (id_out)
+ *id_out = id;
+
+ return parent;
+}
+
+static int rimt_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data)
+{
+ struct rimt_pci_alias_info *info = data;
+ struct acpi_rimt_node *parent;
+ u32 deviceid;
+
+ parent = rimt_node_map_id(info->node, alias, &deviceid, RIMT_IOMMU_TYPE);
+ return rimt_iommu_xlate(info->dev, parent, deviceid);
+}
+
+static int rimt_plat_iommu_map(struct device *dev, struct acpi_rimt_node *node)
+{
+ struct acpi_rimt_node *parent;
+ int err = -ENODEV, i = 0;
+ u32 deviceid = 0;
+
+ do {
+ parent = rimt_node_map_platform_id(node, &deviceid,
+ RIMT_IOMMU_TYPE,
+ i++);
+
+ if (parent)
+ err = rimt_iommu_xlate(dev, parent, deviceid);
+ } while (parent && !err);
+
+ return err;
+}
+
+static int rimt_plat_iommu_map_id(struct device *dev,
+ struct acpi_rimt_node *node,
+ const u32 *in_id)
+{
+ struct acpi_rimt_node *parent;
+ u32 deviceid;
+
+ parent = rimt_node_map_id(node, *in_id, &deviceid, RIMT_IOMMU_TYPE);
+ if (parent)
+ return rimt_iommu_xlate(dev, parent, deviceid);
+
+ return -ENODEV;
+}
+
+/**
+ * rimt_iommu_configure_id - Set-up IOMMU configuration for a device.
+ *
+ * @dev: device to configure
+ * @id_in: optional input id const value pointer
+ *
+ * Returns: 0 on success, <0 on failure
+ */
+int rimt_iommu_configure_id(struct device *dev, const u32 *id_in)
+{
+ struct acpi_rimt_node *node;
+ int err = -ENODEV;
+
+ if (dev_is_pci(dev)) {
+ struct iommu_fwspec *fwspec;
+ struct pci_bus *bus = to_pci_dev(dev)->bus;
+ struct rimt_pci_alias_info info = { .dev = dev };
+
+ node = rimt_scan_node(ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX, &bus->dev);
+ if (!node)
+ return -ENODEV;
+
+ info.node = node;
+ err = pci_for_each_dma_alias(to_pci_dev(dev),
+ rimt_pci_iommu_init, &info);
+
+ fwspec = dev_iommu_fwspec_get(dev);
+ if (fwspec && rimt_pcie_rc_supports_ats(node))
+ fwspec->flags |= IOMMU_FWSPEC_PCI_RC_ATS;
+ } else {
+ node = rimt_scan_node(ACPI_RIMT_NODE_TYPE_PLAT_DEVICE, dev);
+ if (!node)
+ return -ENODEV;
+
+ err = id_in ? rimt_plat_iommu_map_id(dev, node, id_in) :
+ rimt_plat_iommu_map(dev, node);
+ }
+
+ return err;
+}
+
+#endif
+
+void __init riscv_acpi_rimt_init(void)
+{
+ acpi_status status;
+
+ /* rimt_table will be used at runtime after the rimt init,
+ * so we don't need to call acpi_put_table() to release
+ * the RIMT table mapping.
+ */
+ status = acpi_get_table(ACPI_SIG_RIMT, 0, &rimt_table);
+ if (ACPI_FAILURE(status)) {
+ if (status != AE_NOT_FOUND) {
+ const char *msg = acpi_format_exception(status);
+
+ pr_err("Failed to get table, %s\n", msg);
+ }
+
+ return;
+ }
+}
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index fb1fe9f3b1a3..ef16d58b2949 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/acpi.h>
#include <linux/acpi_iort.h>
+#include <linux/acpi_rimt.h>
#include <linux/acpi_viot.h>
#include <linux/iommu.h>
#include <linux/signal.h>
@@ -845,6 +846,8 @@ static bool acpi_info_matches_ids(struct acpi_device_info *info,
static const char * const acpi_ignore_dep_ids[] = {
"PNP0D80", /* Windows-compatible System Power Management Controller */
"INT33BD", /* Intel Baytrail Mailbox Device */
+ "INTC10DE", /* Intel CVS LNL */
+ "INTC10E0", /* Intel CVS ARL */
"LATT2021", /* Lattice FW Update Client Driver */
NULL
};
@@ -858,6 +861,8 @@ static const char * const acpi_honor_dep_ids[] = {
"INTC10CF", /* IVSC (MTL) driver must be loaded to allow i2c access to camera sensors */
"RSCV0001", /* RISC-V PLIC */
"RSCV0002", /* RISC-V APLIC */
+ "RSCV0005", /* RISC-V SBI MPXY MBOX */
+ "RSCV0006", /* RISC-V RPMI SYSMSI */
"PNP0C0F", /* PCI Link Device */
NULL
};
@@ -1629,7 +1634,10 @@ static int acpi_iommu_configure_id(struct device *dev, const u32 *id_in)
err = iort_iommu_configure_id(dev, id_in);
if (err && err != -EPROBE_DEFER)
+ err = rimt_iommu_configure_id(dev, id_in);
+ if (err && err != -EPROBE_DEFER)
err = viot_iommu_configure(dev);
+
mutex_unlock(&iommu_probe_device_lock);
return err;
@@ -2702,6 +2710,7 @@ void __init acpi_scan_init(void)
acpi_memory_hotplug_init();
acpi_watchdog_init();
acpi_pnp_init();
+ acpi_power_resources_init();
acpi_int340x_thermal_init();
acpi_init_lpit();
diff --git a/drivers/acpi/spcr.c b/drivers/acpi/spcr.c
index cd36a97b0ea2..d4d52d5e9016 100644
--- a/drivers/acpi/spcr.c
+++ b/drivers/acpi/spcr.c
@@ -141,12 +141,23 @@ int __init acpi_parse_spcr(bool enable_earlycon, bool enable_console)
case ACPI_DBG2_16550_NVIDIA:
uart = "uart";
break;
+ case ACPI_DBG2_RISCV_SBI_CON:
+ uart = "sbi";
+ break;
default:
err = -ENOENT;
goto done;
}
- switch (table->baud_rate) {
+ /*
+ * SPCR 1.09 defines Precise Baud Rate Filed contains a specific
+ * non-zero baud rate which overrides the value of the Configured
+ * Baud Rate field. If this field is zero or not present, Configured
+ * Baud Rate is used.
+ */
+ if (table->precise_baudrate)
+ baud_rate = table->precise_baudrate;
+ else switch (table->baud_rate) {
case 0:
/*
* SPCR 1.04 defines 0 as a preconfigured state of UART.
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index fa9bb8c8ce95..57fc8bc56166 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -408,7 +408,7 @@ static const char table_sigs[][ACPI_NAMESEG_SIZE] __nonstring_array __initconst
ACPI_SIG_PSDT, ACPI_SIG_RSDT, ACPI_SIG_XSDT, ACPI_SIG_SSDT,
ACPI_SIG_IORT, ACPI_SIG_NFIT, ACPI_SIG_HMAT, ACPI_SIG_PPTT,
ACPI_SIG_NHLT, ACPI_SIG_AEST, ACPI_SIG_CEDT, ACPI_SIG_AGDI,
- ACPI_SIG_NBFT };
+ ACPI_SIG_NBFT, ACPI_SIG_SWFT};
#define ACPI_HEADER_SIZE sizeof(struct acpi_table_header)
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 5c2defe55898..8537395b417b 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -924,7 +924,7 @@ static int acpi_thermal_suspend(struct device *dev)
static int acpi_thermal_resume(struct device *dev)
{
struct acpi_thermal *tz;
- int i, j, power_state;
+ int i, j;
if (!dev)
return -EINVAL;
@@ -939,10 +939,8 @@ static int acpi_thermal_resume(struct device *dev)
if (!acpi_thermal_trip_valid(acpi_trip))
break;
- for (j = 0; j < acpi_trip->devices.count; j++) {
- acpi_bus_update_power(acpi_trip->devices.handles[j],
- &power_state);
- }
+ for (j = 0; j < acpi_trip->devices.count; j++)
+ acpi_bus_update_power(acpi_trip->devices.handles[j], NULL);
}
acpi_queue_thermal_check(tz);
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index d507d5e08435..4cf74f173c78 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -948,6 +948,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Mipad2"),
},
},
+ /* https://gitlab.freedesktop.org/drm/amd/-/issues/4512 */
+ {
+ .callback = video_detect_force_native,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82K8"),
+ },
+ },
{ },
};
diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig
index 5b3b8041f827..e2e402c9d175 100644
--- a/drivers/android/Kconfig
+++ b/drivers/android/Kconfig
@@ -4,6 +4,7 @@ menu "Android"
config ANDROID_BINDER_IPC
bool "Android Binder IPC Driver"
depends on MMU
+ depends on NET
default n
help
Binder is used in Android for both communication between processes,
@@ -13,6 +14,19 @@ config ANDROID_BINDER_IPC
Android process, using Binder to identify, invoke and pass arguments
between said processes.
+config ANDROID_BINDER_IPC_RUST
+ bool "Rust version of Android Binder IPC Driver"
+ depends on RUST && MMU && !ANDROID_BINDER_IPC
+ help
+ This enables the Rust implementation of the Binder driver.
+
+ Binder is used in Android for both communication between processes,
+ and remote method invocation.
+
+ This means one Android process can call a method/routine in another
+ Android process, using Binder to identify, invoke and pass arguments
+ between said processes.
+
config ANDROID_BINDERFS
bool "Android Binderfs filesystem"
depends on ANDROID_BINDER_IPC
@@ -27,7 +41,7 @@ config ANDROID_BINDERFS
config ANDROID_BINDER_DEVICES
string "Android Binder devices"
- depends on ANDROID_BINDER_IPC
+ depends on ANDROID_BINDER_IPC || ANDROID_BINDER_IPC_RUST
default "binder,hwbinder,vndbinder"
help
Default value for the binder.devices parameter.
diff --git a/drivers/android/Makefile b/drivers/android/Makefile
index c5d47be0276c..e0c650d3898e 100644
--- a/drivers/android/Makefile
+++ b/drivers/android/Makefile
@@ -2,5 +2,6 @@
ccflags-y += -I$(src) # needed for trace events
obj-$(CONFIG_ANDROID_BINDERFS) += binderfs.o
-obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o binder_alloc.o
+obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o binder_alloc.o binder_netlink.o
obj-$(CONFIG_ANDROID_BINDER_ALLOC_KUNIT_TEST) += tests/
+obj-$(CONFIG_ANDROID_BINDER_IPC_RUST) += binder/
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 312b462e349d..8c99ceaa303b 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -74,6 +74,7 @@
#include <linux/cacheflush.h>
+#include "binder_netlink.h"
#include "binder_internal.h"
#include "binder_trace.h"
@@ -2993,6 +2994,69 @@ static void binder_set_txn_from_error(struct binder_transaction *t, int id,
binder_thread_dec_tmpref(from);
}
+/**
+ * binder_netlink_report() - report a transaction failure via netlink
+ * @proc: the binder proc sending the transaction
+ * @t: the binder transaction that failed
+ * @data_size: the user provided data size for the transaction
+ * @error: enum binder_driver_return_protocol returned to sender
+ */
+static void binder_netlink_report(struct binder_proc *proc,
+ struct binder_transaction *t,
+ u32 data_size,
+ u32 error)
+{
+ const char *context = proc->context->name;
+ struct sk_buff *skb;
+ void *hdr;
+
+ if (!genl_has_listeners(&binder_nl_family, &init_net,
+ BINDER_NLGRP_REPORT))
+ return;
+
+ trace_binder_netlink_report(context, t, data_size, error);
+
+ skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!skb)
+ return;
+
+ hdr = genlmsg_put(skb, 0, 0, &binder_nl_family, 0, BINDER_CMD_REPORT);
+ if (!hdr)
+ goto free_skb;
+
+ if (nla_put_u32(skb, BINDER_A_REPORT_ERROR, error) ||
+ nla_put_string(skb, BINDER_A_REPORT_CONTEXT, context) ||
+ nla_put_u32(skb, BINDER_A_REPORT_FROM_PID, t->from_pid) ||
+ nla_put_u32(skb, BINDER_A_REPORT_FROM_TID, t->from_tid))
+ goto cancel_skb;
+
+ if (t->to_proc &&
+ nla_put_u32(skb, BINDER_A_REPORT_TO_PID, t->to_proc->pid))
+ goto cancel_skb;
+
+ if (t->to_thread &&
+ nla_put_u32(skb, BINDER_A_REPORT_TO_TID, t->to_thread->pid))
+ goto cancel_skb;
+
+ if (t->is_reply && nla_put_flag(skb, BINDER_A_REPORT_IS_REPLY))
+ goto cancel_skb;
+
+ if (nla_put_u32(skb, BINDER_A_REPORT_FLAGS, t->flags) ||
+ nla_put_u32(skb, BINDER_A_REPORT_CODE, t->code) ||
+ nla_put_u32(skb, BINDER_A_REPORT_DATA_SIZE, data_size))
+ goto cancel_skb;
+
+ genlmsg_end(skb, hdr);
+ genlmsg_multicast(&binder_nl_family, skb, 0, BINDER_NLGRP_REPORT,
+ GFP_KERNEL);
+ return;
+
+cancel_skb:
+ genlmsg_cancel(skb, hdr);
+free_skb:
+ nlmsg_free(skb);
+}
+
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply,
@@ -3042,6 +3106,32 @@ static void binder_transaction(struct binder_proc *proc,
binder_set_extended_error(&thread->ee, t_debug_id, BR_OK, 0);
binder_inner_proc_unlock(proc);
+ t = kzalloc(sizeof(*t), GFP_KERNEL);
+ if (!t) {
+ binder_txn_error("%d:%d cannot allocate transaction\n",
+ thread->pid, proc->pid);
+ return_error = BR_FAILED_REPLY;
+ return_error_param = -ENOMEM;
+ return_error_line = __LINE__;
+ goto err_alloc_t_failed;
+ }
+ INIT_LIST_HEAD(&t->fd_fixups);
+ binder_stats_created(BINDER_STAT_TRANSACTION);
+ spin_lock_init(&t->lock);
+ t->debug_id = t_debug_id;
+ t->start_time = t_start_time;
+ t->from_pid = proc->pid;
+ t->from_tid = thread->pid;
+ t->sender_euid = task_euid(proc->tsk);
+ t->code = tr->code;
+ t->flags = tr->flags;
+ t->priority = task_nice(current);
+ t->work.type = BINDER_WORK_TRANSACTION;
+ t->is_async = !reply && (tr->flags & TF_ONE_WAY);
+ t->is_reply = reply;
+ if (!reply && !(tr->flags & TF_ONE_WAY))
+ t->from = thread;
+
if (reply) {
binder_inner_proc_lock(proc);
in_reply_to = thread->transaction_stack;
@@ -3228,24 +3318,13 @@ static void binder_transaction(struct binder_proc *proc,
}
binder_inner_proc_unlock(proc);
}
+
+ t->to_proc = target_proc;
+ t->to_thread = target_thread;
if (target_thread)
e->to_thread = target_thread->pid;
e->to_proc = target_proc->pid;
- /* TODO: reuse incoming transaction for reply */
- t = kzalloc(sizeof(*t), GFP_KERNEL);
- if (t == NULL) {
- binder_txn_error("%d:%d cannot allocate transaction\n",
- thread->pid, proc->pid);
- return_error = BR_FAILED_REPLY;
- return_error_param = -ENOMEM;
- return_error_line = __LINE__;
- goto err_alloc_t_failed;
- }
- INIT_LIST_HEAD(&t->fd_fixups);
- binder_stats_created(BINDER_STAT_TRANSACTION);
- spin_lock_init(&t->lock);
-
tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
if (tcomplete == NULL) {
binder_txn_error("%d:%d cannot allocate work for transaction\n",
@@ -3257,9 +3336,6 @@ static void binder_transaction(struct binder_proc *proc,
}
binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
- t->debug_id = t_debug_id;
- t->start_time = t_start_time;
-
if (reply)
binder_debug(BINDER_DEBUG_TRANSACTION,
"%d:%d BC_REPLY %d -> %d:%d, data size %lld-%lld-%lld\n",
@@ -3275,19 +3351,6 @@ static void binder_transaction(struct binder_proc *proc,
(u64)tr->data_size, (u64)tr->offsets_size,
(u64)extra_buffers_size);
- if (!reply && !(tr->flags & TF_ONE_WAY))
- t->from = thread;
- else
- t->from = NULL;
- t->from_pid = proc->pid;
- t->from_tid = thread->pid;
- t->sender_euid = task_euid(proc->tsk);
- t->to_proc = target_proc;
- t->to_thread = target_thread;
- t->code = tr->code;
- t->flags = tr->flags;
- t->priority = task_nice(current);
-
if (target_node && target_node->txn_security_ctx) {
u32 secid;
size_t added_size;
@@ -3680,11 +3743,13 @@ static void binder_transaction(struct binder_proc *proc,
return_error_line = __LINE__;
goto err_copy_data_failed;
}
- if (t->buffer->oneway_spam_suspect)
+ if (t->buffer->oneway_spam_suspect) {
tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT;
- else
+ binder_netlink_report(proc, t, tr->data_size,
+ BR_ONEWAY_SPAM_SUSPECT);
+ } else {
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
- t->work.type = BINDER_WORK_TRANSACTION;
+ }
if (reply) {
binder_enqueue_thread_work(thread, tcomplete);
@@ -3712,7 +3777,6 @@ static void binder_transaction(struct binder_proc *proc,
* the target replies (or there is an error).
*/
binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
- t->need_reply = 1;
t->from_parent = thread->transaction_stack;
thread->transaction_stack = t;
binder_inner_proc_unlock(proc);
@@ -3733,8 +3797,11 @@ static void binder_transaction(struct binder_proc *proc,
* process and is put in a pending queue, waiting for the target
* process to be unfrozen.
*/
- if (return_error == BR_TRANSACTION_PENDING_FROZEN)
+ if (return_error == BR_TRANSACTION_PENDING_FROZEN) {
tcomplete->type = BINDER_WORK_TRANSACTION_PENDING;
+ binder_netlink_report(proc, t, tr->data_size,
+ return_error);
+ }
binder_enqueue_thread_work(thread, tcomplete);
if (return_error &&
return_error != BR_TRANSACTION_PENDING_FROZEN)
@@ -3783,9 +3850,6 @@ err_get_secctx_failed:
err_alloc_tcomplete_failed:
if (trace_binder_txn_latency_free_enabled())
binder_txn_latency_free(t);
- kfree(t);
- binder_stats_deleted(BINDER_STAT_TRANSACTION);
-err_alloc_t_failed:
err_bad_todo_list:
err_bad_call_stack:
err_empty_call_stack:
@@ -3796,6 +3860,11 @@ err_invalid_target_handle:
binder_dec_node_tmpref(target_node);
}
+ binder_netlink_report(proc, t, tr->data_size, return_error);
+ kfree(t);
+ binder_stats_deleted(BINDER_STAT_TRANSACTION);
+err_alloc_t_failed:
+
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
"%d:%d transaction %s to %d:%d failed %d/%d/%d, code %u size %lld-%lld line %d\n",
proc->pid, thread->pid, reply ? "reply" :
@@ -6324,13 +6393,13 @@ static void print_binder_transaction_ilocked(struct seq_file *m,
spin_lock(&t->lock);
to_proc = t->to_proc;
seq_printf(m,
- "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d elapsed %lldms",
+ "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld a%d r%d elapsed %lldms",
prefix, t->debug_id, t,
t->from_pid,
t->from_tid,
to_proc ? to_proc->pid : 0,
t->to_thread ? t->to_thread->pid : 0,
- t->code, t->flags, t->priority, t->need_reply,
+ t->code, t->flags, t->priority, t->is_async, t->is_reply,
ktime_ms_delta(current_time, t->start_time));
spin_unlock(&t->lock);
@@ -7062,12 +7131,19 @@ static int __init binder_init(void)
}
}
- ret = init_binderfs();
+ ret = genl_register_family(&binder_nl_family);
if (ret)
goto err_init_binder_device_failed;
+ ret = init_binderfs();
+ if (ret)
+ goto err_init_binderfs_failed;
+
return ret;
+err_init_binderfs_failed:
+ genl_unregister_family(&binder_nl_family);
+
err_init_binder_device_failed:
hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
misc_deregister(&device->miscdev);
@@ -7088,5 +7164,3 @@ device_initcall(binder_init);
#define CREATE_TRACE_POINTS
#include "binder_trace.h"
-
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/android/binder/Makefile b/drivers/android/binder/Makefile
new file mode 100644
index 000000000000..09eabb527fa0
--- /dev/null
+++ b/drivers/android/binder/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+ccflags-y += -I$(src) # needed for trace events
+
+obj-$(CONFIG_ANDROID_BINDER_IPC_RUST) += rust_binder.o
+rust_binder-y := \
+ rust_binder_main.o \
+ rust_binderfs.o \
+ rust_binder_events.o \
+ page_range_helper.o
diff --git a/drivers/android/binder/allocation.rs b/drivers/android/binder/allocation.rs
new file mode 100644
index 000000000000..7f65a9c3a0e5
--- /dev/null
+++ b/drivers/android/binder/allocation.rs
@@ -0,0 +1,602 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use core::mem::{size_of, size_of_val, MaybeUninit};
+use core::ops::Range;
+
+use kernel::{
+ bindings,
+ fs::file::{File, FileDescriptorReservation},
+ prelude::*,
+ sync::{aref::ARef, Arc},
+ transmute::{AsBytes, FromBytes},
+ uaccess::UserSliceReader,
+ uapi,
+};
+
+use crate::{
+ deferred_close::DeferredFdCloser,
+ defs::*,
+ node::{Node, NodeRef},
+ process::Process,
+ DArc,
+};
+
+#[derive(Default)]
+pub(crate) struct AllocationInfo {
+ /// Range within the allocation where we can find the offsets to the object descriptors.
+ pub(crate) offsets: Option<Range<usize>>,
+ /// The target node of the transaction this allocation is associated to.
+ /// Not set for replies.
+ pub(crate) target_node: Option<NodeRef>,
+ /// When this allocation is dropped, call `pending_oneway_finished` on the node.
+ ///
+ /// This is used to serialize oneway transaction on the same node. Binder guarantees that
+ /// oneway transactions to the same node are delivered sequentially in the order they are sent.
+ pub(crate) oneway_node: Option<DArc<Node>>,
+ /// Zero the data in the buffer on free.
+ pub(crate) clear_on_free: bool,
+ /// List of files embedded in this transaction.
+ file_list: FileList,
+}
+
+/// Represents an allocation that the kernel is currently using.
+///
+/// When allocations are idle, the range allocator holds the data related to them.
+///
+/// # Invariants
+///
+/// This allocation corresponds to an allocation in the range allocator, so the relevant pages are
+/// marked in use in the page range.
+pub(crate) struct Allocation {
+ pub(crate) offset: usize,
+ size: usize,
+ pub(crate) ptr: usize,
+ pub(crate) process: Arc<Process>,
+ allocation_info: Option<AllocationInfo>,
+ free_on_drop: bool,
+ pub(crate) oneway_spam_detected: bool,
+ #[allow(dead_code)]
+ pub(crate) debug_id: usize,
+}
+
+impl Allocation {
+ pub(crate) fn new(
+ process: Arc<Process>,
+ debug_id: usize,
+ offset: usize,
+ size: usize,
+ ptr: usize,
+ oneway_spam_detected: bool,
+ ) -> Self {
+ Self {
+ process,
+ offset,
+ size,
+ ptr,
+ debug_id,
+ oneway_spam_detected,
+ allocation_info: None,
+ free_on_drop: true,
+ }
+ }
+
+ fn size_check(&self, offset: usize, size: usize) -> Result {
+ let overflow_fail = offset.checked_add(size).is_none();
+ let cmp_size_fail = offset.wrapping_add(size) > self.size;
+ if overflow_fail || cmp_size_fail {
+ return Err(EFAULT);
+ }
+ Ok(())
+ }
+
+ pub(crate) fn copy_into(
+ &self,
+ reader: &mut UserSliceReader,
+ offset: usize,
+ size: usize,
+ ) -> Result {
+ self.size_check(offset, size)?;
+
+ // SAFETY: While this object exists, the range allocator will keep the range allocated, and
+ // in turn, the pages will be marked as in use.
+ unsafe {
+ self.process
+ .pages
+ .copy_from_user_slice(reader, self.offset + offset, size)
+ }
+ }
+
+ pub(crate) fn read<T: FromBytes>(&self, offset: usize) -> Result<T> {
+ self.size_check(offset, size_of::<T>())?;
+
+ // SAFETY: While this object exists, the range allocator will keep the range allocated, and
+ // in turn, the pages will be marked as in use.
+ unsafe { self.process.pages.read(self.offset + offset) }
+ }
+
+ pub(crate) fn write<T: ?Sized>(&self, offset: usize, obj: &T) -> Result {
+ self.size_check(offset, size_of_val::<T>(obj))?;
+
+ // SAFETY: While this object exists, the range allocator will keep the range allocated, and
+ // in turn, the pages will be marked as in use.
+ unsafe { self.process.pages.write(self.offset + offset, obj) }
+ }
+
+ pub(crate) fn fill_zero(&self) -> Result {
+ // SAFETY: While this object exists, the range allocator will keep the range allocated, and
+ // in turn, the pages will be marked as in use.
+ unsafe { self.process.pages.fill_zero(self.offset, self.size) }
+ }
+
+ pub(crate) fn keep_alive(mut self) {
+ self.process
+ .buffer_make_freeable(self.offset, self.allocation_info.take());
+ self.free_on_drop = false;
+ }
+
+ pub(crate) fn set_info(&mut self, info: AllocationInfo) {
+ self.allocation_info = Some(info);
+ }
+
+ pub(crate) fn get_or_init_info(&mut self) -> &mut AllocationInfo {
+ self.allocation_info.get_or_insert_with(Default::default)
+ }
+
+ pub(crate) fn set_info_offsets(&mut self, offsets: Range<usize>) {
+ self.get_or_init_info().offsets = Some(offsets);
+ }
+
+ pub(crate) fn set_info_oneway_node(&mut self, oneway_node: DArc<Node>) {
+ self.get_or_init_info().oneway_node = Some(oneway_node);
+ }
+
+ pub(crate) fn set_info_clear_on_drop(&mut self) {
+ self.get_or_init_info().clear_on_free = true;
+ }
+
+ pub(crate) fn set_info_target_node(&mut self, target_node: NodeRef) {
+ self.get_or_init_info().target_node = Some(target_node);
+ }
+
+ /// Reserve enough space to push at least `num_fds` fds.
+ pub(crate) fn info_add_fd_reserve(&mut self, num_fds: usize) -> Result {
+ self.get_or_init_info()
+ .file_list
+ .files_to_translate
+ .reserve(num_fds, GFP_KERNEL)?;
+
+ Ok(())
+ }
+
+ pub(crate) fn info_add_fd(
+ &mut self,
+ file: ARef<File>,
+ buffer_offset: usize,
+ close_on_free: bool,
+ ) -> Result {
+ self.get_or_init_info().file_list.files_to_translate.push(
+ FileEntry {
+ file,
+ buffer_offset,
+ close_on_free,
+ },
+ GFP_KERNEL,
+ )?;
+
+ Ok(())
+ }
+
+ pub(crate) fn set_info_close_on_free(&mut self, cof: FdsCloseOnFree) {
+ self.get_or_init_info().file_list.close_on_free = cof.0;
+ }
+
+ pub(crate) fn translate_fds(&mut self) -> Result<TranslatedFds> {
+ let file_list = match self.allocation_info.as_mut() {
+ Some(info) => &mut info.file_list,
+ None => return Ok(TranslatedFds::new()),
+ };
+
+ let files = core::mem::take(&mut file_list.files_to_translate);
+
+ let num_close_on_free = files.iter().filter(|entry| entry.close_on_free).count();
+ let mut close_on_free = KVec::with_capacity(num_close_on_free, GFP_KERNEL)?;
+
+ let mut reservations = KVec::with_capacity(files.len(), GFP_KERNEL)?;
+ for file_info in files {
+ let res = FileDescriptorReservation::get_unused_fd_flags(bindings::O_CLOEXEC)?;
+ let fd = res.reserved_fd();
+ self.write::<u32>(file_info.buffer_offset, &fd)?;
+
+ reservations.push(
+ Reservation {
+ res,
+ file: file_info.file,
+ },
+ GFP_KERNEL,
+ )?;
+ if file_info.close_on_free {
+ close_on_free.push(fd, GFP_KERNEL)?;
+ }
+ }
+
+ Ok(TranslatedFds {
+ reservations,
+ close_on_free: FdsCloseOnFree(close_on_free),
+ })
+ }
+
+ /// Should the looper return to userspace when freeing this allocation?
+ pub(crate) fn looper_need_return_on_free(&self) -> bool {
+ // Closing fds involves pushing task_work for execution when we return to userspace. Hence,
+ // we should return to userspace asap if we are closing fds.
+ match self.allocation_info {
+ Some(ref info) => !info.file_list.close_on_free.is_empty(),
+ None => false,
+ }
+ }
+}
+
+impl Drop for Allocation {
+ fn drop(&mut self) {
+ if !self.free_on_drop {
+ return;
+ }
+
+ if let Some(mut info) = self.allocation_info.take() {
+ if let Some(oneway_node) = info.oneway_node.as_ref() {
+ oneway_node.pending_oneway_finished();
+ }
+
+ info.target_node = None;
+
+ if let Some(offsets) = info.offsets.clone() {
+ let view = AllocationView::new(self, offsets.start);
+ for i in offsets.step_by(size_of::<usize>()) {
+ if view.cleanup_object(i).is_err() {
+ pr_warn!("Error cleaning up object at offset {}\n", i)
+ }
+ }
+ }
+
+ for &fd in &info.file_list.close_on_free {
+ let closer = match DeferredFdCloser::new(GFP_KERNEL) {
+ Ok(closer) => closer,
+ Err(kernel::alloc::AllocError) => {
+ // Ignore allocation failures.
+ break;
+ }
+ };
+
+ // Here, we ignore errors. The operation can fail if the fd is not valid, or if the
+ // method is called from a kthread. However, this is always called from a syscall,
+ // so the latter case cannot happen, and we don't care about the first case.
+ let _ = closer.close_fd(fd);
+ }
+
+ if info.clear_on_free {
+ if let Err(e) = self.fill_zero() {
+ pr_warn!("Failed to clear data on free: {:?}", e);
+ }
+ }
+ }
+
+ self.process.buffer_raw_free(self.ptr);
+ }
+}
+
+/// A wrapper around `Allocation` that is being created.
+///
+/// If the allocation is destroyed while wrapped in this wrapper, then the allocation will be
+/// considered to be part of a failed transaction. Successful transactions avoid that by calling
+/// `success`, which skips the destructor.
+#[repr(transparent)]
+pub(crate) struct NewAllocation(pub(crate) Allocation);
+
+impl NewAllocation {
+ pub(crate) fn success(self) -> Allocation {
+ // This skips the destructor.
+ //
+ // SAFETY: This type is `#[repr(transparent)]`, so the layout matches.
+ unsafe { core::mem::transmute(self) }
+ }
+}
+
+impl core::ops::Deref for NewAllocation {
+ type Target = Allocation;
+ fn deref(&self) -> &Allocation {
+ &self.0
+ }
+}
+
+impl core::ops::DerefMut for NewAllocation {
+ fn deref_mut(&mut self) -> &mut Allocation {
+ &mut self.0
+ }
+}
+
+/// A view into the beginning of an allocation.
+///
+/// All attempts to read or write outside of the view will fail. To intentionally access outside of
+/// this view, use the `alloc` field of this struct directly.
+pub(crate) struct AllocationView<'a> {
+ pub(crate) alloc: &'a mut Allocation,
+ limit: usize,
+}
+
+impl<'a> AllocationView<'a> {
+ pub(crate) fn new(alloc: &'a mut Allocation, limit: usize) -> Self {
+ AllocationView { alloc, limit }
+ }
+
+ pub(crate) fn read<T: FromBytes>(&self, offset: usize) -> Result<T> {
+ if offset.checked_add(size_of::<T>()).ok_or(EINVAL)? > self.limit {
+ return Err(EINVAL);
+ }
+ self.alloc.read(offset)
+ }
+
+ pub(crate) fn write<T: AsBytes>(&self, offset: usize, obj: &T) -> Result {
+ if offset.checked_add(size_of::<T>()).ok_or(EINVAL)? > self.limit {
+ return Err(EINVAL);
+ }
+ self.alloc.write(offset, obj)
+ }
+
+ pub(crate) fn copy_into(
+ &self,
+ reader: &mut UserSliceReader,
+ offset: usize,
+ size: usize,
+ ) -> Result {
+ if offset.checked_add(size).ok_or(EINVAL)? > self.limit {
+ return Err(EINVAL);
+ }
+ self.alloc.copy_into(reader, offset, size)
+ }
+
+ pub(crate) fn transfer_binder_object(
+ &self,
+ offset: usize,
+ obj: &uapi::flat_binder_object,
+ strong: bool,
+ node_ref: NodeRef,
+ ) -> Result {
+ let mut newobj = FlatBinderObject::default();
+ let node = node_ref.node.clone();
+ if Arc::ptr_eq(&node_ref.node.owner, &self.alloc.process) {
+ // The receiving process is the owner of the node, so send it a binder object (instead
+ // of a handle).
+ let (ptr, cookie) = node.get_id();
+ newobj.hdr.type_ = if strong {
+ BINDER_TYPE_BINDER
+ } else {
+ BINDER_TYPE_WEAK_BINDER
+ };
+ newobj.flags = obj.flags;
+ newobj.__bindgen_anon_1.binder = ptr as _;
+ newobj.cookie = cookie as _;
+ self.write(offset, &newobj)?;
+ // Increment the user ref count on the node. It will be decremented as part of the
+ // destruction of the buffer, when we see a binder or weak-binder object.
+ node.update_refcount(true, 1, strong);
+ } else {
+ // The receiving process is different from the owner, so we need to insert a handle to
+ // the binder object.
+ let handle = self
+ .alloc
+ .process
+ .as_arc_borrow()
+ .insert_or_update_handle(node_ref, false)?;
+ newobj.hdr.type_ = if strong {
+ BINDER_TYPE_HANDLE
+ } else {
+ BINDER_TYPE_WEAK_HANDLE
+ };
+ newobj.flags = obj.flags;
+ newobj.__bindgen_anon_1.handle = handle;
+ if self.write(offset, &newobj).is_err() {
+ // Decrement ref count on the handle we just created.
+ let _ = self
+ .alloc
+ .process
+ .as_arc_borrow()
+ .update_ref(handle, false, strong);
+ return Err(EINVAL);
+ }
+ }
+
+ Ok(())
+ }
+
+ fn cleanup_object(&self, index_offset: usize) -> Result {
+ let offset = self.alloc.read(index_offset)?;
+ let header = self.read::<BinderObjectHeader>(offset)?;
+ match header.type_ {
+ BINDER_TYPE_WEAK_BINDER | BINDER_TYPE_BINDER => {
+ let obj = self.read::<FlatBinderObject>(offset)?;
+ let strong = header.type_ == BINDER_TYPE_BINDER;
+ // SAFETY: The type is `BINDER_TYPE_{WEAK_}BINDER`, so the `binder` field is
+ // populated.
+ let ptr = unsafe { obj.__bindgen_anon_1.binder };
+ let cookie = obj.cookie;
+ self.alloc.process.update_node(ptr, cookie, strong);
+ Ok(())
+ }
+ BINDER_TYPE_WEAK_HANDLE | BINDER_TYPE_HANDLE => {
+ let obj = self.read::<FlatBinderObject>(offset)?;
+ let strong = header.type_ == BINDER_TYPE_HANDLE;
+ // SAFETY: The type is `BINDER_TYPE_{WEAK_}HANDLE`, so the `handle` field is
+ // populated.
+ let handle = unsafe { obj.__bindgen_anon_1.handle };
+ self.alloc
+ .process
+ .as_arc_borrow()
+ .update_ref(handle, false, strong)
+ }
+ _ => Ok(()),
+ }
+ }
+}
+
+/// A binder object as it is serialized.
+///
+/// # Invariants
+///
+/// All bytes must be initialized, and the value of `self.hdr.type_` must be one of the allowed
+/// types.
+#[repr(C)]
+pub(crate) union BinderObject {
+ hdr: uapi::binder_object_header,
+ fbo: uapi::flat_binder_object,
+ fdo: uapi::binder_fd_object,
+ bbo: uapi::binder_buffer_object,
+ fdao: uapi::binder_fd_array_object,
+}
+
+/// A view into a `BinderObject` that can be used in a match statement.
+pub(crate) enum BinderObjectRef<'a> {
+ Binder(&'a mut uapi::flat_binder_object),
+ Handle(&'a mut uapi::flat_binder_object),
+ Fd(&'a mut uapi::binder_fd_object),
+ Ptr(&'a mut uapi::binder_buffer_object),
+ Fda(&'a mut uapi::binder_fd_array_object),
+}
+
+impl BinderObject {
+ pub(crate) fn read_from(reader: &mut UserSliceReader) -> Result<BinderObject> {
+ let object = Self::read_from_inner(|slice| {
+ let read_len = usize::min(slice.len(), reader.len());
+ reader.clone_reader().read_slice(&mut slice[..read_len])?;
+ Ok(())
+ })?;
+
+ // If we used a object type smaller than the largest object size, then we've read more
+ // bytes than we needed to. However, we used `.clone_reader()` to avoid advancing the
+ // original reader. Now, we call `skip` so that the caller's reader is advanced by the
+ // right amount.
+ //
+ // The `skip` call fails if the reader doesn't have `size` bytes available. This could
+ // happen if the type header corresponds to an object type that is larger than the rest of
+ // the reader.
+ //
+ // Any extra bytes beyond the size of the object are inaccessible after this call, so
+ // reading them again from the `reader` later does not result in TOCTOU bugs.
+ reader.skip(object.size())?;
+
+ Ok(object)
+ }
+
+ /// Use the provided reader closure to construct a `BinderObject`.
+ ///
+ /// The closure should write the bytes for the object into the provided slice.
+ pub(crate) fn read_from_inner<R>(reader: R) -> Result<BinderObject>
+ where
+ R: FnOnce(&mut [u8; size_of::<BinderObject>()]) -> Result<()>,
+ {
+ let mut obj = MaybeUninit::<BinderObject>::zeroed();
+
+ // SAFETY: The lengths of `BinderObject` and `[u8; size_of::<BinderObject>()]` are equal,
+ // and the byte array has an alignment requirement of one, so the pointer cast is okay.
+ // Additionally, `obj` was initialized to zeros, so the byte array will not be
+ // uninitialized.
+ (reader)(unsafe { &mut *obj.as_mut_ptr().cast() })?;
+
+ // SAFETY: The entire object is initialized, so accessing this field is safe.
+ let type_ = unsafe { obj.assume_init_ref().hdr.type_ };
+ if Self::type_to_size(type_).is_none() {
+ // The value of `obj.hdr_type_` was invalid.
+ return Err(EINVAL);
+ }
+
+ // SAFETY: All bytes are initialized (since we zeroed them at the start) and we checked
+ // that `self.hdr.type_` is one of the allowed types, so the type invariants are satisfied.
+ unsafe { Ok(obj.assume_init()) }
+ }
+
+ pub(crate) fn as_ref(&mut self) -> BinderObjectRef<'_> {
+ use BinderObjectRef::*;
+ // SAFETY: The constructor ensures that all bytes of `self` are initialized, and all
+ // variants of this union accept all initialized bit patterns.
+ unsafe {
+ match self.hdr.type_ {
+ BINDER_TYPE_WEAK_BINDER | BINDER_TYPE_BINDER => Binder(&mut self.fbo),
+ BINDER_TYPE_WEAK_HANDLE | BINDER_TYPE_HANDLE => Handle(&mut self.fbo),
+ BINDER_TYPE_FD => Fd(&mut self.fdo),
+ BINDER_TYPE_PTR => Ptr(&mut self.bbo),
+ BINDER_TYPE_FDA => Fda(&mut self.fdao),
+ // SAFETY: By the type invariant, the value of `self.hdr.type_` cannot have any
+ // other value than the ones checked above.
+ _ => core::hint::unreachable_unchecked(),
+ }
+ }
+ }
+
+ pub(crate) fn size(&self) -> usize {
+ // SAFETY: The entire object is initialized, so accessing this field is safe.
+ let type_ = unsafe { self.hdr.type_ };
+
+ // SAFETY: The type invariants guarantee that the type field is correct.
+ unsafe { Self::type_to_size(type_).unwrap_unchecked() }
+ }
+
+ fn type_to_size(type_: u32) -> Option<usize> {
+ match type_ {
+ BINDER_TYPE_WEAK_BINDER => Some(size_of::<uapi::flat_binder_object>()),
+ BINDER_TYPE_BINDER => Some(size_of::<uapi::flat_binder_object>()),
+ BINDER_TYPE_WEAK_HANDLE => Some(size_of::<uapi::flat_binder_object>()),
+ BINDER_TYPE_HANDLE => Some(size_of::<uapi::flat_binder_object>()),
+ BINDER_TYPE_FD => Some(size_of::<uapi::binder_fd_object>()),
+ BINDER_TYPE_PTR => Some(size_of::<uapi::binder_buffer_object>()),
+ BINDER_TYPE_FDA => Some(size_of::<uapi::binder_fd_array_object>()),
+ _ => None,
+ }
+ }
+}
+
+#[derive(Default)]
+struct FileList {
+ files_to_translate: KVec<FileEntry>,
+ close_on_free: KVec<u32>,
+}
+
+struct FileEntry {
+ /// The file for which a descriptor will be created in the recipient process.
+ file: ARef<File>,
+ /// The offset in the buffer where the file descriptor is stored.
+ buffer_offset: usize,
+ /// Whether this fd should be closed when the allocation is freed.
+ close_on_free: bool,
+}
+
+pub(crate) struct TranslatedFds {
+ reservations: KVec<Reservation>,
+ /// If commit is called, then these fds should be closed. (If commit is not called, then they
+ /// shouldn't be closed.)
+ close_on_free: FdsCloseOnFree,
+}
+
+struct Reservation {
+ res: FileDescriptorReservation,
+ file: ARef<File>,
+}
+
+impl TranslatedFds {
+ pub(crate) fn new() -> Self {
+ Self {
+ reservations: KVec::new(),
+ close_on_free: FdsCloseOnFree(KVec::new()),
+ }
+ }
+
+ pub(crate) fn commit(self) -> FdsCloseOnFree {
+ for entry in self.reservations {
+ entry.res.fd_install(entry.file);
+ }
+
+ self.close_on_free
+ }
+}
+
+pub(crate) struct FdsCloseOnFree(KVec<u32>);
diff --git a/drivers/android/binder/context.rs b/drivers/android/binder/context.rs
new file mode 100644
index 000000000000..3d135ec03ca7
--- /dev/null
+++ b/drivers/android/binder/context.rs
@@ -0,0 +1,180 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use kernel::{
+ error::Error,
+ list::{List, ListArc, ListLinks},
+ prelude::*,
+ security,
+ str::{CStr, CString},
+ sync::{Arc, Mutex},
+ task::Kuid,
+};
+
+use crate::{error::BinderError, node::NodeRef, process::Process};
+
+kernel::sync::global_lock! {
+ // SAFETY: We call `init` in the module initializer, so it's initialized before first use.
+ pub(crate) unsafe(uninit) static CONTEXTS: Mutex<ContextList> = ContextList {
+ list: List::new(),
+ };
+}
+
+pub(crate) struct ContextList {
+ list: List<Context>,
+}
+
+pub(crate) fn get_all_contexts() -> Result<KVec<Arc<Context>>> {
+ let lock = CONTEXTS.lock();
+
+ let count = lock.list.iter().count();
+
+ let mut ctxs = KVec::with_capacity(count, GFP_KERNEL)?;
+ for ctx in &lock.list {
+ ctxs.push(Arc::from(ctx), GFP_KERNEL)?;
+ }
+ Ok(ctxs)
+}
+
+/// This struct keeps track of the processes using this context, and which process is the context
+/// manager.
+struct Manager {
+ node: Option<NodeRef>,
+ uid: Option<Kuid>,
+ all_procs: List<Process>,
+}
+
+/// There is one context per binder file (/dev/binder, /dev/hwbinder, etc)
+#[pin_data]
+pub(crate) struct Context {
+ #[pin]
+ manager: Mutex<Manager>,
+ pub(crate) name: CString,
+ #[pin]
+ links: ListLinks,
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<0> for Context { untracked; }
+}
+kernel::list::impl_list_item! {
+ impl ListItem<0> for Context {
+ using ListLinks { self.links };
+ }
+}
+
+impl Context {
+ pub(crate) fn new(name: &CStr) -> Result<Arc<Self>> {
+ let name = CString::try_from(name)?;
+ let list_ctx = ListArc::pin_init::<Error>(
+ try_pin_init!(Context {
+ name,
+ links <- ListLinks::new(),
+ manager <- kernel::new_mutex!(Manager {
+ all_procs: List::new(),
+ node: None,
+ uid: None,
+ }, "Context::manager"),
+ }),
+ GFP_KERNEL,
+ )?;
+
+ let ctx = list_ctx.clone_arc();
+ CONTEXTS.lock().list.push_back(list_ctx);
+
+ Ok(ctx)
+ }
+
+ /// Called when the file for this context is unlinked.
+ ///
+ /// No-op if called twice.
+ pub(crate) fn deregister(&self) {
+ // SAFETY: We never add the context to any other linked list than this one, so it is either
+ // in this list, or not in any list.
+ unsafe { CONTEXTS.lock().list.remove(self) };
+ }
+
+ pub(crate) fn register_process(self: &Arc<Self>, proc: ListArc<Process>) {
+ if !Arc::ptr_eq(self, &proc.ctx) {
+ pr_err!("Context::register_process called on the wrong context.");
+ return;
+ }
+ self.manager.lock().all_procs.push_back(proc);
+ }
+
+ pub(crate) fn deregister_process(self: &Arc<Self>, proc: &Process) {
+ if !Arc::ptr_eq(self, &proc.ctx) {
+ pr_err!("Context::deregister_process called on the wrong context.");
+ return;
+ }
+ // SAFETY: We just checked that this is the right list.
+ unsafe { self.manager.lock().all_procs.remove(proc) };
+ }
+
+ pub(crate) fn set_manager_node(&self, node_ref: NodeRef) -> Result {
+ let mut manager = self.manager.lock();
+ if manager.node.is_some() {
+ pr_warn!("BINDER_SET_CONTEXT_MGR already set");
+ return Err(EBUSY);
+ }
+ security::binder_set_context_mgr(&node_ref.node.owner.cred)?;
+
+ // If the context manager has been set before, ensure that we use the same euid.
+ let caller_uid = Kuid::current_euid();
+ if let Some(ref uid) = manager.uid {
+ if *uid != caller_uid {
+ return Err(EPERM);
+ }
+ }
+
+ manager.node = Some(node_ref);
+ manager.uid = Some(caller_uid);
+ Ok(())
+ }
+
+ pub(crate) fn unset_manager_node(&self) {
+ let node_ref = self.manager.lock().node.take();
+ drop(node_ref);
+ }
+
+ pub(crate) fn get_manager_node(&self, strong: bool) -> Result<NodeRef, BinderError> {
+ self.manager
+ .lock()
+ .node
+ .as_ref()
+ .ok_or_else(BinderError::new_dead)?
+ .clone(strong)
+ .map_err(BinderError::from)
+ }
+
+ pub(crate) fn for_each_proc<F>(&self, mut func: F)
+ where
+ F: FnMut(&Process),
+ {
+ let lock = self.manager.lock();
+ for proc in &lock.all_procs {
+ func(&proc);
+ }
+ }
+
+ pub(crate) fn get_all_procs(&self) -> Result<KVec<Arc<Process>>> {
+ let lock = self.manager.lock();
+ let count = lock.all_procs.iter().count();
+
+ let mut procs = KVec::with_capacity(count, GFP_KERNEL)?;
+ for proc in &lock.all_procs {
+ procs.push(Arc::from(proc), GFP_KERNEL)?;
+ }
+ Ok(procs)
+ }
+
+ pub(crate) fn get_procs_with_pid(&self, pid: i32) -> Result<KVec<Arc<Process>>> {
+ let orig = self.get_all_procs()?;
+ let mut backing = KVec::with_capacity(orig.len(), GFP_KERNEL)?;
+ for proc in orig.into_iter().filter(|proc| proc.task.pid() == pid) {
+ backing.push(proc, GFP_KERNEL)?;
+ }
+ Ok(backing)
+ }
+}
diff --git a/drivers/android/binder/deferred_close.rs b/drivers/android/binder/deferred_close.rs
new file mode 100644
index 000000000000..ac895c04d0cb
--- /dev/null
+++ b/drivers/android/binder/deferred_close.rs
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+//! Logic for closing files in a deferred manner.
+//!
+//! This file could make sense to have in `kernel::fs`, but it was rejected for being too
+//! Binder-specific.
+
+use core::mem::MaybeUninit;
+use kernel::{
+ alloc::{AllocError, Flags},
+ bindings,
+ prelude::*,
+};
+
+/// Helper used for closing file descriptors in a way that is safe even if the file is currently
+/// held using `fdget`.
+///
+/// Additional motivation can be found in commit 80cd795630d6 ("binder: fix use-after-free due to
+/// ksys_close() during fdget()") and in the comments on `binder_do_fd_close`.
+pub(crate) struct DeferredFdCloser {
+ inner: KBox<DeferredFdCloserInner>,
+}
+
+/// SAFETY: This just holds an allocation with no real content, so there's no safety issue with
+/// moving it across threads.
+unsafe impl Send for DeferredFdCloser {}
+/// SAFETY: This just holds an allocation with no real content, so there's no safety issue with
+/// moving it across threads.
+unsafe impl Sync for DeferredFdCloser {}
+
+/// # Invariants
+///
+/// If the `file` pointer is non-null, then it points at a `struct file` and owns a refcount to
+/// that file.
+#[repr(C)]
+struct DeferredFdCloserInner {
+ twork: MaybeUninit<bindings::callback_head>,
+ file: *mut bindings::file,
+}
+
+impl DeferredFdCloser {
+ /// Create a new [`DeferredFdCloser`].
+ pub(crate) fn new(flags: Flags) -> Result<Self, AllocError> {
+ Ok(Self {
+ // INVARIANT: The `file` pointer is null, so the type invariant does not apply.
+ inner: KBox::new(
+ DeferredFdCloserInner {
+ twork: MaybeUninit::uninit(),
+ file: core::ptr::null_mut(),
+ },
+ flags,
+ )?,
+ })
+ }
+
+ /// Schedule a task work that closes the file descriptor when this task returns to userspace.
+ ///
+ /// Fails if this is called from a context where we cannot run work when returning to
+ /// userspace. (E.g., from a kthread.)
+ pub(crate) fn close_fd(self, fd: u32) -> Result<(), DeferredFdCloseError> {
+ use bindings::task_work_notify_mode_TWA_RESUME as TWA_RESUME;
+
+ // In this method, we schedule the task work before closing the file. This is because
+ // scheduling a task work is fallible, and we need to know whether it will fail before we
+ // attempt to close the file.
+
+ // Task works are not available on kthreads.
+ let current = kernel::current!();
+
+ // Check if this is a kthread.
+ // SAFETY: Reading `flags` from a task is always okay.
+ if unsafe { ((*current.as_ptr()).flags & bindings::PF_KTHREAD) != 0 } {
+ return Err(DeferredFdCloseError::TaskWorkUnavailable);
+ }
+
+ // Transfer ownership of the box's allocation to a raw pointer. This disables the
+ // destructor, so we must manually convert it back to a KBox to drop it.
+ //
+ // Until we convert it back to a `KBox`, there are no aliasing requirements on this
+ // pointer.
+ let inner = KBox::into_raw(self.inner);
+
+ // The `callback_head` field is first in the struct, so this cast correctly gives us a
+ // pointer to the field.
+ let callback_head = inner.cast::<bindings::callback_head>();
+ // SAFETY: This pointer offset operation does not go out-of-bounds.
+ let file_field = unsafe { core::ptr::addr_of_mut!((*inner).file) };
+
+ let current = current.as_ptr();
+
+ // SAFETY: This function currently has exclusive access to the `DeferredFdCloserInner`, so
+ // it is okay for us to perform unsynchronized writes to its `callback_head` field.
+ unsafe { bindings::init_task_work(callback_head, Some(Self::do_close_fd)) };
+
+ // SAFETY: This inserts the `DeferredFdCloserInner` into the task workqueue for the current
+ // task. If this operation is successful, then this transfers exclusive ownership of the
+ // `callback_head` field to the C side until it calls `do_close_fd`, and we don't touch or
+ // invalidate the field during that time.
+ //
+ // When the C side calls `do_close_fd`, the safety requirements of that method are
+ // satisfied because when a task work is executed, the callback is given ownership of the
+ // pointer.
+ //
+ // The file pointer is currently null. If it is changed to be non-null before `do_close_fd`
+ // is called, then that change happens due to the write at the end of this function, and
+ // that write has a safety comment that explains why the refcount can be dropped when
+ // `do_close_fd` runs.
+ let res = unsafe { bindings::task_work_add(current, callback_head, TWA_RESUME) };
+
+ if res != 0 {
+ // SAFETY: Scheduling the task work failed, so we still have ownership of the box, so
+ // we may destroy it.
+ unsafe { drop(KBox::from_raw(inner)) };
+
+ return Err(DeferredFdCloseError::TaskWorkUnavailable);
+ }
+
+ // This removes the fd from the fd table in `current`. The file is not fully closed until
+ // `filp_close` is called. We are given ownership of one refcount to the file.
+ //
+ // SAFETY: This is safe no matter what `fd` is. If the `fd` is valid (that is, if the
+ // pointer is non-null), then we call `filp_close` on the returned pointer as required by
+ // `file_close_fd`.
+ let file = unsafe { bindings::file_close_fd(fd) };
+ if file.is_null() {
+ // We don't clean up the task work since that might be expensive if the task work queue
+ // is long. Just let it execute and let it clean up for itself.
+ return Err(DeferredFdCloseError::BadFd);
+ }
+
+ // Acquire a second refcount to the file.
+ //
+ // SAFETY: The `file` pointer points at a file with a non-zero refcount.
+ unsafe { bindings::get_file(file) };
+
+ // This method closes the fd, consuming one of our two refcounts. There could be active
+ // light refcounts created from that fd, so we must ensure that the file has a positive
+ // refcount for the duration of those active light refcounts. We do that by holding on to
+ // the second refcount until the current task returns to userspace.
+ //
+ // SAFETY: The `file` pointer is valid. Passing `current->files` as the file table to close
+ // it in is correct, since we just got the `fd` from `file_close_fd` which also uses
+ // `current->files`.
+ //
+ // Note: fl_owner_t is currently a void pointer.
+ unsafe { bindings::filp_close(file, (*current).files as bindings::fl_owner_t) };
+
+ // We update the file pointer that the task work is supposed to fput. This transfers
+ // ownership of our last refcount.
+ //
+ // INVARIANT: This changes the `file` field of a `DeferredFdCloserInner` from null to
+ // non-null. This doesn't break the type invariant for `DeferredFdCloserInner` because we
+ // still own a refcount to the file, so we can pass ownership of that refcount to the
+ // `DeferredFdCloserInner`.
+ //
+ // When `do_close_fd` runs, it must be safe for it to `fput` the refcount. However, this is
+ // the case because all light refcounts that are associated with the fd we closed
+ // previously must be dropped when `do_close_fd`, since light refcounts must be dropped
+ // before returning to userspace.
+ //
+ // SAFETY: Task works are executed on the current thread right before we return to
+ // userspace, so this write is guaranteed to happen before `do_close_fd` is called, which
+ // means that a race is not possible here.
+ unsafe { *file_field = file };
+
+ Ok(())
+ }
+
+ /// # Safety
+ ///
+ /// The provided pointer must point at the `twork` field of a `DeferredFdCloserInner` stored in
+ /// a `KBox`, and the caller must pass exclusive ownership of that `KBox`. Furthermore, if the
+ /// file pointer is non-null, then it must be okay to release the refcount by calling `fput`.
+ unsafe extern "C" fn do_close_fd(inner: *mut bindings::callback_head) {
+ // SAFETY: The caller just passed us ownership of this box.
+ let inner = unsafe { KBox::from_raw(inner.cast::<DeferredFdCloserInner>()) };
+ if !inner.file.is_null() {
+ // SAFETY: By the type invariants, we own a refcount to this file, and the caller
+ // guarantees that dropping the refcount now is okay.
+ unsafe { bindings::fput(inner.file) };
+ }
+ // The allocation is freed when `inner` goes out of scope.
+ }
+}
+
+/// Represents a failure to close an fd in a deferred manner.
+#[derive(Copy, Clone, Debug, Eq, PartialEq)]
+pub(crate) enum DeferredFdCloseError {
+ /// Closing the fd failed because we were unable to schedule a task work.
+ TaskWorkUnavailable,
+ /// Closing the fd failed because the fd does not exist.
+ BadFd,
+}
+
+impl From<DeferredFdCloseError> for Error {
+ fn from(err: DeferredFdCloseError) -> Error {
+ match err {
+ DeferredFdCloseError::TaskWorkUnavailable => ESRCH,
+ DeferredFdCloseError::BadFd => EBADF,
+ }
+ }
+}
diff --git a/drivers/android/binder/defs.rs b/drivers/android/binder/defs.rs
new file mode 100644
index 000000000000..33f51b4139c7
--- /dev/null
+++ b/drivers/android/binder/defs.rs
@@ -0,0 +1,182 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use core::mem::MaybeUninit;
+use core::ops::{Deref, DerefMut};
+use kernel::{
+ transmute::{AsBytes, FromBytes},
+ uapi::{self, *},
+};
+
+macro_rules! pub_no_prefix {
+ ($prefix:ident, $($newname:ident),+ $(,)?) => {
+ $(pub(crate) const $newname: u32 = kernel::macros::concat_idents!($prefix, $newname);)+
+ };
+}
+
+pub_no_prefix!(
+ binder_driver_return_protocol_,
+ BR_TRANSACTION,
+ BR_TRANSACTION_SEC_CTX,
+ BR_REPLY,
+ BR_DEAD_REPLY,
+ BR_FAILED_REPLY,
+ BR_FROZEN_REPLY,
+ BR_NOOP,
+ BR_SPAWN_LOOPER,
+ BR_TRANSACTION_COMPLETE,
+ BR_TRANSACTION_PENDING_FROZEN,
+ BR_ONEWAY_SPAM_SUSPECT,
+ BR_OK,
+ BR_ERROR,
+ BR_INCREFS,
+ BR_ACQUIRE,
+ BR_RELEASE,
+ BR_DECREFS,
+ BR_DEAD_BINDER,
+ BR_CLEAR_DEATH_NOTIFICATION_DONE,
+ BR_FROZEN_BINDER,
+ BR_CLEAR_FREEZE_NOTIFICATION_DONE,
+);
+
+pub_no_prefix!(
+ binder_driver_command_protocol_,
+ BC_TRANSACTION,
+ BC_TRANSACTION_SG,
+ BC_REPLY,
+ BC_REPLY_SG,
+ BC_FREE_BUFFER,
+ BC_ENTER_LOOPER,
+ BC_EXIT_LOOPER,
+ BC_REGISTER_LOOPER,
+ BC_INCREFS,
+ BC_ACQUIRE,
+ BC_RELEASE,
+ BC_DECREFS,
+ BC_INCREFS_DONE,
+ BC_ACQUIRE_DONE,
+ BC_REQUEST_DEATH_NOTIFICATION,
+ BC_CLEAR_DEATH_NOTIFICATION,
+ BC_DEAD_BINDER_DONE,
+ BC_REQUEST_FREEZE_NOTIFICATION,
+ BC_CLEAR_FREEZE_NOTIFICATION,
+ BC_FREEZE_NOTIFICATION_DONE,
+);
+
+pub_no_prefix!(
+ flat_binder_object_flags_,
+ FLAT_BINDER_FLAG_ACCEPTS_FDS,
+ FLAT_BINDER_FLAG_TXN_SECURITY_CTX
+);
+
+pub_no_prefix!(
+ transaction_flags_,
+ TF_ONE_WAY,
+ TF_ACCEPT_FDS,
+ TF_CLEAR_BUF,
+ TF_UPDATE_TXN
+);
+
+pub(crate) use uapi::{
+ BINDER_TYPE_BINDER, BINDER_TYPE_FD, BINDER_TYPE_FDA, BINDER_TYPE_HANDLE, BINDER_TYPE_PTR,
+ BINDER_TYPE_WEAK_BINDER, BINDER_TYPE_WEAK_HANDLE,
+};
+
+macro_rules! decl_wrapper {
+ ($newname:ident, $wrapped:ty) => {
+ // Define a wrapper around the C type. Use `MaybeUninit` to enforce that the value of
+ // padding bytes must be preserved.
+ #[derive(Copy, Clone)]
+ #[repr(transparent)]
+ pub(crate) struct $newname(MaybeUninit<$wrapped>);
+
+ // SAFETY: This macro is only used with types where this is ok.
+ unsafe impl FromBytes for $newname {}
+ // SAFETY: This macro is only used with types where this is ok.
+ unsafe impl AsBytes for $newname {}
+
+ impl Deref for $newname {
+ type Target = $wrapped;
+ fn deref(&self) -> &Self::Target {
+ // SAFETY: We use `MaybeUninit` only to preserve padding. The value must still
+ // always be valid.
+ unsafe { self.0.assume_init_ref() }
+ }
+ }
+
+ impl DerefMut for $newname {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ // SAFETY: We use `MaybeUninit` only to preserve padding. The value must still
+ // always be valid.
+ unsafe { self.0.assume_init_mut() }
+ }
+ }
+
+ impl Default for $newname {
+ fn default() -> Self {
+ // Create a new value of this type where all bytes (including padding) are zeroed.
+ Self(MaybeUninit::zeroed())
+ }
+ }
+ };
+}
+
+decl_wrapper!(BinderNodeDebugInfo, uapi::binder_node_debug_info);
+decl_wrapper!(BinderNodeInfoForRef, uapi::binder_node_info_for_ref);
+decl_wrapper!(FlatBinderObject, uapi::flat_binder_object);
+decl_wrapper!(BinderFdObject, uapi::binder_fd_object);
+decl_wrapper!(BinderFdArrayObject, uapi::binder_fd_array_object);
+decl_wrapper!(BinderObjectHeader, uapi::binder_object_header);
+decl_wrapper!(BinderBufferObject, uapi::binder_buffer_object);
+decl_wrapper!(BinderTransactionData, uapi::binder_transaction_data);
+decl_wrapper!(
+ BinderTransactionDataSecctx,
+ uapi::binder_transaction_data_secctx
+);
+decl_wrapper!(BinderTransactionDataSg, uapi::binder_transaction_data_sg);
+decl_wrapper!(BinderWriteRead, uapi::binder_write_read);
+decl_wrapper!(BinderVersion, uapi::binder_version);
+decl_wrapper!(BinderFrozenStatusInfo, uapi::binder_frozen_status_info);
+decl_wrapper!(BinderFreezeInfo, uapi::binder_freeze_info);
+decl_wrapper!(BinderFrozenStateInfo, uapi::binder_frozen_state_info);
+decl_wrapper!(BinderHandleCookie, uapi::binder_handle_cookie);
+decl_wrapper!(ExtendedError, uapi::binder_extended_error);
+
+impl BinderVersion {
+ pub(crate) fn current() -> Self {
+ Self(MaybeUninit::new(uapi::binder_version {
+ protocol_version: BINDER_CURRENT_PROTOCOL_VERSION as _,
+ }))
+ }
+}
+
+impl BinderTransactionData {
+ pub(crate) fn with_buffers_size(self, buffers_size: u64) -> BinderTransactionDataSg {
+ BinderTransactionDataSg(MaybeUninit::new(uapi::binder_transaction_data_sg {
+ transaction_data: *self,
+ buffers_size,
+ }))
+ }
+}
+
+impl BinderTransactionDataSecctx {
+ /// View the inner data as wrapped in `BinderTransactionData`.
+ pub(crate) fn tr_data(&mut self) -> &mut BinderTransactionData {
+ // SAFETY: Transparent wrapper is safe to transmute.
+ unsafe {
+ &mut *(&mut self.transaction_data as *mut uapi::binder_transaction_data
+ as *mut BinderTransactionData)
+ }
+ }
+}
+
+impl ExtendedError {
+ pub(crate) fn new(id: u32, command: u32, param: i32) -> Self {
+ Self(MaybeUninit::new(uapi::binder_extended_error {
+ id,
+ command,
+ param,
+ }))
+ }
+}
diff --git a/drivers/android/binder/error.rs b/drivers/android/binder/error.rs
new file mode 100644
index 000000000000..9921827267d0
--- /dev/null
+++ b/drivers/android/binder/error.rs
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use kernel::prelude::*;
+
+use crate::defs::*;
+
+pub(crate) type BinderResult<T = ()> = core::result::Result<T, BinderError>;
+
+/// An error that will be returned to userspace via the `BINDER_WRITE_READ` ioctl rather than via
+/// errno.
+pub(crate) struct BinderError {
+ pub(crate) reply: u32,
+ source: Option<Error>,
+}
+
+impl BinderError {
+ pub(crate) fn new_dead() -> Self {
+ Self {
+ reply: BR_DEAD_REPLY,
+ source: None,
+ }
+ }
+
+ pub(crate) fn new_frozen() -> Self {
+ Self {
+ reply: BR_FROZEN_REPLY,
+ source: None,
+ }
+ }
+
+ pub(crate) fn new_frozen_oneway() -> Self {
+ Self {
+ reply: BR_TRANSACTION_PENDING_FROZEN,
+ source: None,
+ }
+ }
+
+ pub(crate) fn is_dead(&self) -> bool {
+ self.reply == BR_DEAD_REPLY
+ }
+
+ pub(crate) fn as_errno(&self) -> kernel::ffi::c_int {
+ self.source.unwrap_or(EINVAL).to_errno()
+ }
+
+ pub(crate) fn should_pr_warn(&self) -> bool {
+ self.source.is_some()
+ }
+}
+
+/// Convert an errno into a `BinderError` and store the errno used to construct it. The errno
+/// should be stored as the thread's extended error when given to userspace.
+impl From<Error> for BinderError {
+ fn from(source: Error) -> Self {
+ Self {
+ reply: BR_FAILED_REPLY,
+ source: Some(source),
+ }
+ }
+}
+
+impl From<kernel::fs::file::BadFdError> for BinderError {
+ fn from(source: kernel::fs::file::BadFdError) -> Self {
+ BinderError::from(Error::from(source))
+ }
+}
+
+impl From<kernel::alloc::AllocError> for BinderError {
+ fn from(_: kernel::alloc::AllocError) -> Self {
+ Self {
+ reply: BR_FAILED_REPLY,
+ source: Some(ENOMEM),
+ }
+ }
+}
+
+impl core::fmt::Debug for BinderError {
+ fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
+ match self.reply {
+ BR_FAILED_REPLY => match self.source.as_ref() {
+ Some(source) => f
+ .debug_struct("BR_FAILED_REPLY")
+ .field("source", source)
+ .finish(),
+ None => f.pad("BR_FAILED_REPLY"),
+ },
+ BR_DEAD_REPLY => f.pad("BR_DEAD_REPLY"),
+ BR_FROZEN_REPLY => f.pad("BR_FROZEN_REPLY"),
+ BR_TRANSACTION_PENDING_FROZEN => f.pad("BR_TRANSACTION_PENDING_FROZEN"),
+ BR_TRANSACTION_COMPLETE => f.pad("BR_TRANSACTION_COMPLETE"),
+ _ => f
+ .debug_struct("BinderError")
+ .field("reply", &self.reply)
+ .finish(),
+ }
+ }
+}
diff --git a/drivers/android/binder/freeze.rs b/drivers/android/binder/freeze.rs
new file mode 100644
index 000000000000..e68c3c8bc55a
--- /dev/null
+++ b/drivers/android/binder/freeze.rs
@@ -0,0 +1,388 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use kernel::{
+ alloc::AllocError,
+ list::ListArc,
+ prelude::*,
+ rbtree::{self, RBTreeNodeReservation},
+ seq_file::SeqFile,
+ seq_print,
+ sync::{Arc, UniqueArc},
+ uaccess::UserSliceReader,
+};
+
+use crate::{
+ defs::*, node::Node, process::Process, thread::Thread, BinderReturnWriter, DArc, DLArc,
+ DTRWrap, DeliverToRead,
+};
+
+#[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd)]
+pub(crate) struct FreezeCookie(u64);
+
+/// Represents a listener for changes to the frozen state of a process.
+pub(crate) struct FreezeListener {
+ /// The node we are listening for.
+ pub(crate) node: DArc<Node>,
+ /// The cookie of this freeze listener.
+ cookie: FreezeCookie,
+ /// What value of `is_frozen` did we most recently tell userspace about?
+ last_is_frozen: Option<bool>,
+ /// We sent a `BR_FROZEN_BINDER` and we are waiting for `BC_FREEZE_NOTIFICATION_DONE` before
+ /// sending any other commands.
+ is_pending: bool,
+ /// Userspace sent `BC_CLEAR_FREEZE_NOTIFICATION` and we need to reply with
+ /// `BR_CLEAR_FREEZE_NOTIFICATION_DONE` as soon as possible. If `is_pending` is set, then we
+ /// must wait for it to be unset before we can reply.
+ is_clearing: bool,
+ /// Number of cleared duplicates that can't be deleted until userspace sends
+ /// `BC_FREEZE_NOTIFICATION_DONE`.
+ num_pending_duplicates: u64,
+ /// Number of cleared duplicates that can be deleted.
+ num_cleared_duplicates: u64,
+}
+
+impl FreezeListener {
+ /// Is it okay to create a new listener with the same cookie as this one for the provided node?
+ ///
+ /// Under some scenarios, userspace may delete a freeze listener and immediately recreate it
+ /// with the same cookie. This results in duplicate listeners. To avoid issues with ambiguity,
+ /// we allow this only if the new listener is for the same node, and we also require that the
+ /// old listener has already been cleared.
+ fn allow_duplicate(&self, node: &DArc<Node>) -> bool {
+ Arc::ptr_eq(&self.node, node) && self.is_clearing
+ }
+}
+
+type UninitFM = UniqueArc<core::mem::MaybeUninit<DTRWrap<FreezeMessage>>>;
+
+/// Represents a notification that the freeze state has changed.
+pub(crate) struct FreezeMessage {
+ cookie: FreezeCookie,
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<0> for FreezeMessage {
+ untracked;
+ }
+}
+
+impl FreezeMessage {
+ fn new(flags: kernel::alloc::Flags) -> Result<UninitFM, AllocError> {
+ UniqueArc::new_uninit(flags)
+ }
+
+ fn init(ua: UninitFM, cookie: FreezeCookie) -> DLArc<FreezeMessage> {
+ match ua.pin_init_with(DTRWrap::new(FreezeMessage { cookie })) {
+ Ok(msg) => ListArc::from(msg),
+ Err(err) => match err {},
+ }
+ }
+}
+
+impl DeliverToRead for FreezeMessage {
+ fn do_work(
+ self: DArc<Self>,
+ thread: &Thread,
+ writer: &mut BinderReturnWriter<'_>,
+ ) -> Result<bool> {
+ let _removed_listener;
+ let mut node_refs = thread.process.node_refs.lock();
+ let Some(mut freeze_entry) = node_refs.freeze_listeners.find_mut(&self.cookie) else {
+ return Ok(true);
+ };
+ let freeze = freeze_entry.get_mut();
+
+ if freeze.num_cleared_duplicates > 0 {
+ freeze.num_cleared_duplicates -= 1;
+ drop(node_refs);
+ writer.write_code(BR_CLEAR_FREEZE_NOTIFICATION_DONE)?;
+ writer.write_payload(&self.cookie.0)?;
+ return Ok(true);
+ }
+
+ if freeze.is_pending {
+ return Ok(true);
+ }
+ if freeze.is_clearing {
+ _removed_listener = freeze_entry.remove_node();
+ drop(node_refs);
+ writer.write_code(BR_CLEAR_FREEZE_NOTIFICATION_DONE)?;
+ writer.write_payload(&self.cookie.0)?;
+ Ok(true)
+ } else {
+ let is_frozen = freeze.node.owner.inner.lock().is_frozen;
+ if freeze.last_is_frozen == Some(is_frozen) {
+ return Ok(true);
+ }
+
+ let mut state_info = BinderFrozenStateInfo::default();
+ state_info.is_frozen = is_frozen as u32;
+ state_info.cookie = freeze.cookie.0;
+ freeze.is_pending = true;
+ freeze.last_is_frozen = Some(is_frozen);
+ drop(node_refs);
+
+ writer.write_code(BR_FROZEN_BINDER)?;
+ writer.write_payload(&state_info)?;
+ // BR_FROZEN_BINDER notifications can cause transactions
+ Ok(false)
+ }
+ }
+
+ fn cancel(self: DArc<Self>) {}
+
+ fn should_sync_wakeup(&self) -> bool {
+ false
+ }
+
+ #[inline(never)]
+ fn debug_print(&self, m: &SeqFile, prefix: &str, _tprefix: &str) -> Result<()> {
+ seq_print!(m, "{}has frozen binder\n", prefix);
+ Ok(())
+ }
+}
+
+impl FreezeListener {
+ pub(crate) fn on_process_exit(&self, proc: &Arc<Process>) {
+ if !self.is_clearing {
+ self.node.remove_freeze_listener(proc);
+ }
+ }
+}
+
+impl Process {
+ pub(crate) fn request_freeze_notif(
+ self: &Arc<Self>,
+ reader: &mut UserSliceReader,
+ ) -> Result<()> {
+ let hc = reader.read::<BinderHandleCookie>()?;
+ let handle = hc.handle;
+ let cookie = FreezeCookie(hc.cookie);
+
+ let msg = FreezeMessage::new(GFP_KERNEL)?;
+ let alloc = RBTreeNodeReservation::new(GFP_KERNEL)?;
+
+ let mut node_refs_guard = self.node_refs.lock();
+ let node_refs = &mut *node_refs_guard;
+ let Some(info) = node_refs.by_handle.get_mut(&handle) else {
+ pr_warn!("BC_REQUEST_FREEZE_NOTIFICATION invalid ref {}\n", handle);
+ return Err(EINVAL);
+ };
+ if info.freeze().is_some() {
+ pr_warn!("BC_REQUEST_FREEZE_NOTIFICATION already set\n");
+ return Err(EINVAL);
+ }
+ let node_ref = info.node_ref();
+ let freeze_entry = node_refs.freeze_listeners.entry(cookie);
+
+ if let rbtree::Entry::Occupied(ref dupe) = freeze_entry {
+ if !dupe.get().allow_duplicate(&node_ref.node) {
+ pr_warn!("BC_REQUEST_FREEZE_NOTIFICATION duplicate cookie\n");
+ return Err(EINVAL);
+ }
+ }
+
+ // All failure paths must come before this call, and all modifications must come after this
+ // call.
+ node_ref.node.add_freeze_listener(self, GFP_KERNEL)?;
+
+ match freeze_entry {
+ rbtree::Entry::Vacant(entry) => {
+ entry.insert(
+ FreezeListener {
+ cookie,
+ node: node_ref.node.clone(),
+ last_is_frozen: None,
+ is_pending: false,
+ is_clearing: false,
+ num_pending_duplicates: 0,
+ num_cleared_duplicates: 0,
+ },
+ alloc,
+ );
+ }
+ rbtree::Entry::Occupied(mut dupe) => {
+ let dupe = dupe.get_mut();
+ if dupe.is_pending {
+ dupe.num_pending_duplicates += 1;
+ } else {
+ dupe.num_cleared_duplicates += 1;
+ }
+ dupe.last_is_frozen = None;
+ dupe.is_pending = false;
+ dupe.is_clearing = false;
+ }
+ }
+
+ *info.freeze() = Some(cookie);
+ let msg = FreezeMessage::init(msg, cookie);
+ drop(node_refs_guard);
+ let _ = self.push_work(msg);
+ Ok(())
+ }
+
+ pub(crate) fn freeze_notif_done(self: &Arc<Self>, reader: &mut UserSliceReader) -> Result<()> {
+ let cookie = FreezeCookie(reader.read()?);
+ let alloc = FreezeMessage::new(GFP_KERNEL)?;
+ let mut node_refs_guard = self.node_refs.lock();
+ let node_refs = &mut *node_refs_guard;
+ let Some(freeze) = node_refs.freeze_listeners.get_mut(&cookie) else {
+ pr_warn!("BC_FREEZE_NOTIFICATION_DONE {:016x} not found\n", cookie.0);
+ return Err(EINVAL);
+ };
+ let mut clear_msg = None;
+ if freeze.num_pending_duplicates > 0 {
+ clear_msg = Some(FreezeMessage::init(alloc, cookie));
+ freeze.num_pending_duplicates -= 1;
+ freeze.num_cleared_duplicates += 1;
+ } else {
+ if !freeze.is_pending {
+ pr_warn!(
+ "BC_FREEZE_NOTIFICATION_DONE {:016x} not pending\n",
+ cookie.0
+ );
+ return Err(EINVAL);
+ }
+ if freeze.is_clearing {
+ // Immediately send another FreezeMessage for BR_CLEAR_FREEZE_NOTIFICATION_DONE.
+ clear_msg = Some(FreezeMessage::init(alloc, cookie));
+ }
+ freeze.is_pending = false;
+ }
+ drop(node_refs_guard);
+ if let Some(clear_msg) = clear_msg {
+ let _ = self.push_work(clear_msg);
+ }
+ Ok(())
+ }
+
+ pub(crate) fn clear_freeze_notif(self: &Arc<Self>, reader: &mut UserSliceReader) -> Result<()> {
+ let hc = reader.read::<BinderHandleCookie>()?;
+ let handle = hc.handle;
+ let cookie = FreezeCookie(hc.cookie);
+
+ let alloc = FreezeMessage::new(GFP_KERNEL)?;
+ let mut node_refs_guard = self.node_refs.lock();
+ let node_refs = &mut *node_refs_guard;
+ let Some(info) = node_refs.by_handle.get_mut(&handle) else {
+ pr_warn!("BC_CLEAR_FREEZE_NOTIFICATION invalid ref {}\n", handle);
+ return Err(EINVAL);
+ };
+ let Some(info_cookie) = info.freeze() else {
+ pr_warn!("BC_CLEAR_FREEZE_NOTIFICATION freeze notification not active\n");
+ return Err(EINVAL);
+ };
+ if *info_cookie != cookie {
+ pr_warn!("BC_CLEAR_FREEZE_NOTIFICATION freeze notification cookie mismatch\n");
+ return Err(EINVAL);
+ }
+ let Some(listener) = node_refs.freeze_listeners.get_mut(&cookie) else {
+ pr_warn!("BC_CLEAR_FREEZE_NOTIFICATION invalid cookie {}\n", handle);
+ return Err(EINVAL);
+ };
+ listener.is_clearing = true;
+ listener.node.remove_freeze_listener(self);
+ *info.freeze() = None;
+ let mut msg = None;
+ if !listener.is_pending {
+ msg = Some(FreezeMessage::init(alloc, cookie));
+ }
+ drop(node_refs_guard);
+
+ if let Some(msg) = msg {
+ let _ = self.push_work(msg);
+ }
+ Ok(())
+ }
+
+ fn get_freeze_cookie(&self, node: &DArc<Node>) -> Option<FreezeCookie> {
+ let node_refs = &mut *self.node_refs.lock();
+ let handle = node_refs.by_node.get(&node.global_id())?;
+ let node_ref = node_refs.by_handle.get_mut(handle)?;
+ *node_ref.freeze()
+ }
+
+ /// Creates a vector of every freeze listener on this process.
+ ///
+ /// Returns pairs of the remote process listening for notifications and the local node it is
+ /// listening on.
+ #[expect(clippy::type_complexity)]
+ fn find_freeze_recipients(&self) -> Result<KVVec<(DArc<Node>, Arc<Process>)>, AllocError> {
+ // Defined before `inner` to drop after releasing spinlock if `push_within_capacity` fails.
+ let mut node_proc_pair;
+
+ // We pre-allocate space for up to 8 recipients before we take the spinlock. However, if
+ // the allocation fails, use a vector with a capacity of zero instead of failing. After
+ // all, there might not be any freeze listeners, in which case this operation could still
+ // succeed.
+ let mut recipients =
+ KVVec::with_capacity(8, GFP_KERNEL).unwrap_or_else(|_err| KVVec::new());
+
+ let mut inner = self.lock_with_nodes();
+ let mut curr = inner.nodes.cursor_front();
+ while let Some(cursor) = curr {
+ let (key, node) = cursor.current();
+ let key = *key;
+ let list = node.freeze_list(&inner.inner);
+ let len = list.len();
+
+ if recipients.spare_capacity_mut().len() < len {
+ drop(inner);
+ recipients.reserve(len, GFP_KERNEL)?;
+ inner = self.lock_with_nodes();
+ // Find the node we were looking at and try again. If the set of nodes was changed,
+ // then just proceed to the next node. This is ok because we don't guarantee the
+ // inclusion of nodes that are added or removed in parallel with this operation.
+ curr = inner.nodes.cursor_lower_bound(&key);
+ continue;
+ }
+
+ for proc in list {
+ node_proc_pair = (node.clone(), proc.clone());
+ recipients
+ .push_within_capacity(node_proc_pair)
+ .map_err(|_| {
+ pr_err!(
+ "push_within_capacity failed even though we checked the capacity\n"
+ );
+ AllocError
+ })?;
+ }
+
+ curr = cursor.move_next();
+ }
+ Ok(recipients)
+ }
+
+ /// Prepare allocations for sending freeze messages.
+ pub(crate) fn prepare_freeze_messages(&self) -> Result<FreezeMessages, AllocError> {
+ let recipients = self.find_freeze_recipients()?;
+ let mut batch = KVVec::with_capacity(recipients.len(), GFP_KERNEL)?;
+ for (node, proc) in recipients {
+ let Some(cookie) = proc.get_freeze_cookie(&node) else {
+ // If the freeze listener was removed in the meantime, just discard the
+ // notification.
+ continue;
+ };
+ let msg_alloc = FreezeMessage::new(GFP_KERNEL)?;
+ let msg = FreezeMessage::init(msg_alloc, cookie);
+ batch.push((proc, msg), GFP_KERNEL)?;
+ }
+
+ Ok(FreezeMessages { batch })
+ }
+}
+
+pub(crate) struct FreezeMessages {
+ batch: KVVec<(Arc<Process>, DLArc<FreezeMessage>)>,
+}
+
+impl FreezeMessages {
+ pub(crate) fn send_messages(self) {
+ for (proc, msg) in self.batch {
+ let _ = proc.push_work(msg);
+ }
+ }
+}
diff --git a/drivers/android/binder/node.rs b/drivers/android/binder/node.rs
new file mode 100644
index 000000000000..ade895ef791e
--- /dev/null
+++ b/drivers/android/binder/node.rs
@@ -0,0 +1,1131 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use kernel::{
+ list::{AtomicTracker, List, ListArc, ListLinks, TryNewListArc},
+ prelude::*,
+ seq_file::SeqFile,
+ seq_print,
+ sync::lock::{spinlock::SpinLockBackend, Guard},
+ sync::{Arc, LockedBy, SpinLock},
+};
+
+use crate::{
+ defs::*,
+ error::BinderError,
+ process::{NodeRefInfo, Process, ProcessInner},
+ thread::Thread,
+ transaction::Transaction,
+ BinderReturnWriter, DArc, DLArc, DTRWrap, DeliverToRead,
+};
+
+use core::mem;
+
+mod wrapper;
+pub(crate) use self::wrapper::CritIncrWrapper;
+
+#[derive(Debug)]
+pub(crate) struct CouldNotDeliverCriticalIncrement;
+
+/// Keeps track of how this node is scheduled.
+///
+/// There are two ways to schedule a node to a work list. Just schedule the node itself, or
+/// allocate a wrapper that references the node and schedule the wrapper. These wrappers exists to
+/// make it possible to "move" a node from one list to another - when `do_work` is called directly
+/// on the `Node`, then it's a no-op if there's also a pending wrapper.
+///
+/// Wrappers are generally only needed for zero-to-one refcount increments, and there are two cases
+/// of this: weak increments and strong increments. We call such increments "critical" because it
+/// is critical that they are delivered to the thread doing the increment. Some examples:
+///
+/// * One thread makes a zero-to-one strong increment, and another thread makes a zero-to-one weak
+/// increment. Delivering the node to the thread doing the weak increment is wrong, since the
+/// thread doing the strong increment may have ended a long time ago when the command is actually
+/// processed by userspace.
+///
+/// * We have a weak reference and are about to drop it on one thread. But then another thread does
+/// a zero-to-one strong increment. If the strong increment gets sent to the thread that was
+/// about to drop the weak reference, then the strong increment could be processed after the
+/// other thread has already exited, which would be too late.
+///
+/// Note that trying to create a `ListArc` to the node can succeed even if `has_normal_push` is
+/// set. This is because another thread might just have popped the node from a todo list, but not
+/// yet called `do_work`. However, if `has_normal_push` is false, then creating a `ListArc` should
+/// always succeed.
+///
+/// Like the other fields in `NodeInner`, the delivery state is protected by the process lock.
+struct DeliveryState {
+ /// Is the `Node` currently scheduled?
+ has_pushed_node: bool,
+
+ /// Is a wrapper currently scheduled?
+ ///
+ /// The wrapper is used only for strong zero2one increments.
+ has_pushed_wrapper: bool,
+
+ /// Is the currently scheduled `Node` scheduled due to a weak zero2one increment?
+ ///
+ /// Weak zero2one operations are always scheduled using the `Node`.
+ has_weak_zero2one: bool,
+
+ /// Is the currently scheduled wrapper/`Node` scheduled due to a strong zero2one increment?
+ ///
+ /// If `has_pushed_wrapper` is set, then the strong zero2one increment was scheduled using the
+ /// wrapper. Otherwise, `has_pushed_node` must be set and it was scheduled using the `Node`.
+ has_strong_zero2one: bool,
+}
+
+impl DeliveryState {
+ fn should_normal_push(&self) -> bool {
+ !self.has_pushed_node && !self.has_pushed_wrapper
+ }
+
+ fn did_normal_push(&mut self) {
+ assert!(self.should_normal_push());
+ self.has_pushed_node = true;
+ }
+
+ fn should_push_weak_zero2one(&self) -> bool {
+ !self.has_weak_zero2one && !self.has_strong_zero2one
+ }
+
+ fn can_push_weak_zero2one_normally(&self) -> bool {
+ !self.has_pushed_node
+ }
+
+ fn did_push_weak_zero2one(&mut self) {
+ assert!(self.should_push_weak_zero2one());
+ assert!(self.can_push_weak_zero2one_normally());
+ self.has_pushed_node = true;
+ self.has_weak_zero2one = true;
+ }
+
+ fn should_push_strong_zero2one(&self) -> bool {
+ !self.has_strong_zero2one
+ }
+
+ fn can_push_strong_zero2one_normally(&self) -> bool {
+ !self.has_pushed_node
+ }
+
+ fn did_push_strong_zero2one(&mut self) {
+ assert!(self.should_push_strong_zero2one());
+ assert!(self.can_push_strong_zero2one_normally());
+ self.has_pushed_node = true;
+ self.has_strong_zero2one = true;
+ }
+
+ fn did_push_strong_zero2one_wrapper(&mut self) {
+ assert!(self.should_push_strong_zero2one());
+ assert!(!self.can_push_strong_zero2one_normally());
+ self.has_pushed_wrapper = true;
+ self.has_strong_zero2one = true;
+ }
+}
+
+struct CountState {
+ /// The reference count.
+ count: usize,
+ /// Whether the process that owns this node thinks that we hold a refcount on it. (Note that
+ /// even if count is greater than one, we only increment it once in the owning process.)
+ has_count: bool,
+}
+
+impl CountState {
+ fn new() -> Self {
+ Self {
+ count: 0,
+ has_count: false,
+ }
+ }
+}
+
+struct NodeInner {
+ /// Strong refcounts held on this node by `NodeRef` objects.
+ strong: CountState,
+ /// Weak refcounts held on this node by `NodeRef` objects.
+ weak: CountState,
+ delivery_state: DeliveryState,
+ /// The binder driver guarantees that oneway transactions sent to the same node are serialized,
+ /// that is, userspace will not be given the next one until it has finished processing the
+ /// previous oneway transaction. This is done to avoid the case where two oneway transactions
+ /// arrive in opposite order from the order in which they were sent. (E.g., they could be
+ /// delivered to two different threads, which could appear as-if they were sent in opposite
+ /// order.)
+ ///
+ /// To fix that, we store pending oneway transactions in a separate list in the node, and don't
+ /// deliver the next oneway transaction until userspace signals that it has finished processing
+ /// the previous oneway transaction by calling the `BC_FREE_BUFFER` ioctl.
+ oneway_todo: List<DTRWrap<Transaction>>,
+ /// Keeps track of whether this node has a pending oneway transaction.
+ ///
+ /// When this is true, incoming oneway transactions are stored in `oneway_todo`, instead of
+ /// being delivered directly to the process.
+ has_oneway_transaction: bool,
+ /// List of processes to deliver a notification to when this node is destroyed (usually due to
+ /// the process dying).
+ death_list: List<DTRWrap<NodeDeath>, 1>,
+ /// List of processes to deliver freeze notifications to.
+ freeze_list: KVVec<Arc<Process>>,
+ /// The number of active BR_INCREFS or BR_ACQUIRE operations. (should be maximum two)
+ ///
+ /// If this is non-zero, then we postpone any BR_RELEASE or BR_DECREFS notifications until the
+ /// active operations have ended. This avoids the situation an increment and decrement get
+ /// reordered from userspace's perspective.
+ active_inc_refs: u8,
+ /// List of `NodeRefInfo` objects that reference this node.
+ refs: List<NodeRefInfo, { NodeRefInfo::LIST_NODE }>,
+}
+
+#[pin_data]
+pub(crate) struct Node {
+ pub(crate) debug_id: usize,
+ ptr: u64,
+ pub(crate) cookie: u64,
+ pub(crate) flags: u32,
+ pub(crate) owner: Arc<Process>,
+ inner: LockedBy<NodeInner, ProcessInner>,
+ #[pin]
+ links_track: AtomicTracker,
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<0> for Node {
+ tracked_by links_track: AtomicTracker;
+ }
+}
+
+// Make `oneway_todo` work.
+kernel::list::impl_list_item! {
+ impl ListItem<0> for DTRWrap<Transaction> {
+ using ListLinks { self.links.inner };
+ }
+}
+
+impl Node {
+ pub(crate) fn new(
+ ptr: u64,
+ cookie: u64,
+ flags: u32,
+ owner: Arc<Process>,
+ ) -> impl PinInit<Self> {
+ pin_init!(Self {
+ inner: LockedBy::new(
+ &owner.inner,
+ NodeInner {
+ strong: CountState::new(),
+ weak: CountState::new(),
+ delivery_state: DeliveryState {
+ has_pushed_node: false,
+ has_pushed_wrapper: false,
+ has_weak_zero2one: false,
+ has_strong_zero2one: false,
+ },
+ death_list: List::new(),
+ oneway_todo: List::new(),
+ freeze_list: KVVec::new(),
+ has_oneway_transaction: false,
+ active_inc_refs: 0,
+ refs: List::new(),
+ },
+ ),
+ debug_id: super::next_debug_id(),
+ ptr,
+ cookie,
+ flags,
+ owner,
+ links_track <- AtomicTracker::new(),
+ })
+ }
+
+ pub(crate) fn has_oneway_transaction(&self, owner_inner: &mut ProcessInner) -> bool {
+ let inner = self.inner.access_mut(owner_inner);
+ inner.has_oneway_transaction
+ }
+
+ #[inline(never)]
+ pub(crate) fn full_debug_print(
+ &self,
+ m: &SeqFile,
+ owner_inner: &mut ProcessInner,
+ ) -> Result<()> {
+ let inner = self.inner.access_mut(owner_inner);
+ seq_print!(
+ m,
+ " node {}: u{:016x} c{:016x} hs {} hw {} cs {} cw {}",
+ self.debug_id,
+ self.ptr,
+ self.cookie,
+ inner.strong.has_count,
+ inner.weak.has_count,
+ inner.strong.count,
+ inner.weak.count,
+ );
+ if !inner.refs.is_empty() {
+ seq_print!(m, " proc");
+ for node_ref in &inner.refs {
+ seq_print!(m, " {}", node_ref.process.task.pid());
+ }
+ }
+ seq_print!(m, "\n");
+ for t in &inner.oneway_todo {
+ t.debug_print_inner(m, " pending async transaction ");
+ }
+ Ok(())
+ }
+
+ /// Insert the `NodeRef` into this `refs` list.
+ ///
+ /// # Safety
+ ///
+ /// It must be the case that `info.node_ref.node` is this node.
+ pub(crate) unsafe fn insert_node_info(
+ &self,
+ info: ListArc<NodeRefInfo, { NodeRefInfo::LIST_NODE }>,
+ ) {
+ self.inner
+ .access_mut(&mut self.owner.inner.lock())
+ .refs
+ .push_front(info);
+ }
+
+ /// Insert the `NodeRef` into this `refs` list.
+ ///
+ /// # Safety
+ ///
+ /// It must be the case that `info.node_ref.node` is this node.
+ pub(crate) unsafe fn remove_node_info(
+ &self,
+ info: &NodeRefInfo,
+ ) -> Option<ListArc<NodeRefInfo, { NodeRefInfo::LIST_NODE }>> {
+ // SAFETY: We always insert `NodeRefInfo` objects into the `refs` list of the node that it
+ // references in `info.node_ref.node`. That is this node, so `info` cannot possibly be in
+ // the `refs` list of another node.
+ unsafe {
+ self.inner
+ .access_mut(&mut self.owner.inner.lock())
+ .refs
+ .remove(info)
+ }
+ }
+
+ /// An id that is unique across all binder nodes on the system. Used as the key in the
+ /// `by_node` map.
+ pub(crate) fn global_id(&self) -> usize {
+ self as *const Node as usize
+ }
+
+ pub(crate) fn get_id(&self) -> (u64, u64) {
+ (self.ptr, self.cookie)
+ }
+
+ pub(crate) fn add_death(
+ &self,
+ death: ListArc<DTRWrap<NodeDeath>, 1>,
+ guard: &mut Guard<'_, ProcessInner, SpinLockBackend>,
+ ) {
+ self.inner.access_mut(guard).death_list.push_back(death);
+ }
+
+ pub(crate) fn inc_ref_done_locked(
+ self: &DArc<Node>,
+ _strong: bool,
+ owner_inner: &mut ProcessInner,
+ ) -> Option<DLArc<Node>> {
+ let inner = self.inner.access_mut(owner_inner);
+ if inner.active_inc_refs == 0 {
+ pr_err!("inc_ref_done called when no active inc_refs");
+ return None;
+ }
+
+ inner.active_inc_refs -= 1;
+ if inner.active_inc_refs == 0 {
+ // Having active inc_refs can inhibit dropping of ref-counts. Calculate whether we
+ // would send a refcount decrement, and if so, tell the caller to schedule us.
+ let strong = inner.strong.count > 0;
+ let has_strong = inner.strong.has_count;
+ let weak = strong || inner.weak.count > 0;
+ let has_weak = inner.weak.has_count;
+
+ let should_drop_weak = !weak && has_weak;
+ let should_drop_strong = !strong && has_strong;
+
+ // If we want to drop the ref-count again, tell the caller to schedule a work node for
+ // that.
+ let need_push = should_drop_weak || should_drop_strong;
+
+ if need_push && inner.delivery_state.should_normal_push() {
+ let list_arc = ListArc::try_from_arc(self.clone()).ok().unwrap();
+ inner.delivery_state.did_normal_push();
+ Some(list_arc)
+ } else {
+ None
+ }
+ } else {
+ None
+ }
+ }
+
+ pub(crate) fn update_refcount_locked(
+ self: &DArc<Node>,
+ inc: bool,
+ strong: bool,
+ count: usize,
+ owner_inner: &mut ProcessInner,
+ ) -> Option<DLArc<Node>> {
+ let is_dead = owner_inner.is_dead;
+ let inner = self.inner.access_mut(owner_inner);
+
+ // Get a reference to the state we'll update.
+ let state = if strong {
+ &mut inner.strong
+ } else {
+ &mut inner.weak
+ };
+
+ // Update the count and determine whether we need to push work.
+ let need_push = if inc {
+ state.count += count;
+ // TODO: This method shouldn't be used for zero-to-one increments.
+ !is_dead && !state.has_count
+ } else {
+ if state.count < count {
+ pr_err!("Failure: refcount underflow!");
+ return None;
+ }
+ state.count -= count;
+ !is_dead && state.count == 0 && state.has_count
+ };
+
+ if need_push && inner.delivery_state.should_normal_push() {
+ let list_arc = ListArc::try_from_arc(self.clone()).ok().unwrap();
+ inner.delivery_state.did_normal_push();
+ Some(list_arc)
+ } else {
+ None
+ }
+ }
+
+ pub(crate) fn incr_refcount_allow_zero2one(
+ self: &DArc<Self>,
+ strong: bool,
+ owner_inner: &mut ProcessInner,
+ ) -> Result<Option<DLArc<Node>>, CouldNotDeliverCriticalIncrement> {
+ let is_dead = owner_inner.is_dead;
+ let inner = self.inner.access_mut(owner_inner);
+
+ // Get a reference to the state we'll update.
+ let state = if strong {
+ &mut inner.strong
+ } else {
+ &mut inner.weak
+ };
+
+ // Update the count and determine whether we need to push work.
+ state.count += 1;
+ if is_dead || state.has_count {
+ return Ok(None);
+ }
+
+ // Userspace needs to be notified of this.
+ if !strong && inner.delivery_state.should_push_weak_zero2one() {
+ assert!(inner.delivery_state.can_push_weak_zero2one_normally());
+ let list_arc = ListArc::try_from_arc(self.clone()).ok().unwrap();
+ inner.delivery_state.did_push_weak_zero2one();
+ Ok(Some(list_arc))
+ } else if strong && inner.delivery_state.should_push_strong_zero2one() {
+ if inner.delivery_state.can_push_strong_zero2one_normally() {
+ let list_arc = ListArc::try_from_arc(self.clone()).ok().unwrap();
+ inner.delivery_state.did_push_strong_zero2one();
+ Ok(Some(list_arc))
+ } else {
+ state.count -= 1;
+ Err(CouldNotDeliverCriticalIncrement)
+ }
+ } else {
+ // Work is already pushed, and we don't need to push again.
+ Ok(None)
+ }
+ }
+
+ pub(crate) fn incr_refcount_allow_zero2one_with_wrapper(
+ self: &DArc<Self>,
+ strong: bool,
+ wrapper: CritIncrWrapper,
+ owner_inner: &mut ProcessInner,
+ ) -> Option<DLArc<dyn DeliverToRead>> {
+ match self.incr_refcount_allow_zero2one(strong, owner_inner) {
+ Ok(Some(node)) => Some(node as _),
+ Ok(None) => None,
+ Err(CouldNotDeliverCriticalIncrement) => {
+ assert!(strong);
+ let inner = self.inner.access_mut(owner_inner);
+ inner.strong.count += 1;
+ inner.delivery_state.did_push_strong_zero2one_wrapper();
+ Some(wrapper.init(self.clone()))
+ }
+ }
+ }
+
+ pub(crate) fn update_refcount(self: &DArc<Self>, inc: bool, count: usize, strong: bool) {
+ self.owner
+ .inner
+ .lock()
+ .update_node_refcount(self, inc, strong, count, None);
+ }
+
+ pub(crate) fn populate_counts(
+ &self,
+ out: &mut BinderNodeInfoForRef,
+ guard: &Guard<'_, ProcessInner, SpinLockBackend>,
+ ) {
+ let inner = self.inner.access(guard);
+ out.strong_count = inner.strong.count as _;
+ out.weak_count = inner.weak.count as _;
+ }
+
+ pub(crate) fn populate_debug_info(
+ &self,
+ out: &mut BinderNodeDebugInfo,
+ guard: &Guard<'_, ProcessInner, SpinLockBackend>,
+ ) {
+ out.ptr = self.ptr as _;
+ out.cookie = self.cookie as _;
+ let inner = self.inner.access(guard);
+ if inner.strong.has_count {
+ out.has_strong_ref = 1;
+ }
+ if inner.weak.has_count {
+ out.has_weak_ref = 1;
+ }
+ }
+
+ pub(crate) fn force_has_count(&self, guard: &mut Guard<'_, ProcessInner, SpinLockBackend>) {
+ let inner = self.inner.access_mut(guard);
+ inner.strong.has_count = true;
+ inner.weak.has_count = true;
+ }
+
+ fn write(&self, writer: &mut BinderReturnWriter<'_>, code: u32) -> Result {
+ writer.write_code(code)?;
+ writer.write_payload(&self.ptr)?;
+ writer.write_payload(&self.cookie)?;
+ Ok(())
+ }
+
+ pub(crate) fn submit_oneway(
+ &self,
+ transaction: DLArc<Transaction>,
+ guard: &mut Guard<'_, ProcessInner, SpinLockBackend>,
+ ) -> Result<(), (BinderError, DLArc<dyn DeliverToRead>)> {
+ if guard.is_dead {
+ return Err((BinderError::new_dead(), transaction));
+ }
+
+ let inner = self.inner.access_mut(guard);
+ if inner.has_oneway_transaction {
+ inner.oneway_todo.push_back(transaction);
+ } else {
+ inner.has_oneway_transaction = true;
+ guard.push_work(transaction)?;
+ }
+ Ok(())
+ }
+
+ pub(crate) fn release(&self) {
+ let mut guard = self.owner.inner.lock();
+ while let Some(work) = self.inner.access_mut(&mut guard).oneway_todo.pop_front() {
+ drop(guard);
+ work.into_arc().cancel();
+ guard = self.owner.inner.lock();
+ }
+
+ let death_list = core::mem::take(&mut self.inner.access_mut(&mut guard).death_list);
+ drop(guard);
+ for death in death_list {
+ death.into_arc().set_dead();
+ }
+ }
+
+ pub(crate) fn pending_oneway_finished(&self) {
+ let mut guard = self.owner.inner.lock();
+ if guard.is_dead {
+ // Cleanup will happen in `Process::deferred_release`.
+ return;
+ }
+
+ let inner = self.inner.access_mut(&mut guard);
+
+ let transaction = inner.oneway_todo.pop_front();
+ inner.has_oneway_transaction = transaction.is_some();
+ if let Some(transaction) = transaction {
+ match guard.push_work(transaction) {
+ Ok(()) => {}
+ Err((_err, work)) => {
+ // Process is dead.
+ // This shouldn't happen due to the `is_dead` check, but if it does, just drop
+ // the transaction and return.
+ drop(guard);
+ drop(work);
+ }
+ }
+ }
+ }
+
+ /// Finds an outdated transaction that the given transaction can replace.
+ ///
+ /// If one is found, it is removed from the list and returned.
+ pub(crate) fn take_outdated_transaction(
+ &self,
+ new: &Transaction,
+ guard: &mut Guard<'_, ProcessInner, SpinLockBackend>,
+ ) -> Option<DLArc<Transaction>> {
+ let inner = self.inner.access_mut(guard);
+ let mut cursor = inner.oneway_todo.cursor_front();
+ while let Some(next) = cursor.peek_next() {
+ if new.can_replace(&next) {
+ return Some(next.remove());
+ }
+ cursor.move_next();
+ }
+ None
+ }
+
+ /// This is split into a separate function since it's called by both `Node::do_work` and
+ /// `NodeWrapper::do_work`.
+ fn do_work_locked(
+ &self,
+ writer: &mut BinderReturnWriter<'_>,
+ mut guard: Guard<'_, ProcessInner, SpinLockBackend>,
+ ) -> Result<bool> {
+ let inner = self.inner.access_mut(&mut guard);
+ let strong = inner.strong.count > 0;
+ let has_strong = inner.strong.has_count;
+ let weak = strong || inner.weak.count > 0;
+ let has_weak = inner.weak.has_count;
+
+ if weak && !has_weak {
+ inner.weak.has_count = true;
+ inner.active_inc_refs += 1;
+ }
+
+ if strong && !has_strong {
+ inner.strong.has_count = true;
+ inner.active_inc_refs += 1;
+ }
+
+ let no_active_inc_refs = inner.active_inc_refs == 0;
+ let should_drop_weak = no_active_inc_refs && (!weak && has_weak);
+ let should_drop_strong = no_active_inc_refs && (!strong && has_strong);
+ if should_drop_weak {
+ inner.weak.has_count = false;
+ }
+ if should_drop_strong {
+ inner.strong.has_count = false;
+ }
+ if no_active_inc_refs && !weak {
+ // Remove the node if there are no references to it.
+ guard.remove_node(self.ptr);
+ }
+ drop(guard);
+
+ if weak && !has_weak {
+ self.write(writer, BR_INCREFS)?;
+ }
+ if strong && !has_strong {
+ self.write(writer, BR_ACQUIRE)?;
+ }
+ if should_drop_strong {
+ self.write(writer, BR_RELEASE)?;
+ }
+ if should_drop_weak {
+ self.write(writer, BR_DECREFS)?;
+ }
+
+ Ok(true)
+ }
+
+ pub(crate) fn add_freeze_listener(
+ &self,
+ process: &Arc<Process>,
+ flags: kernel::alloc::Flags,
+ ) -> Result {
+ let mut vec_alloc = KVVec::<Arc<Process>>::new();
+ loop {
+ let mut guard = self.owner.inner.lock();
+ // Do not check for `guard.dead`. The `dead` flag that matters here is the owner of the
+ // listener, no the target.
+ let inner = self.inner.access_mut(&mut guard);
+ let len = inner.freeze_list.len();
+ if len >= inner.freeze_list.capacity() {
+ if len >= vec_alloc.capacity() {
+ drop(guard);
+ vec_alloc = KVVec::with_capacity((1 + len).next_power_of_two(), flags)?;
+ continue;
+ }
+ mem::swap(&mut inner.freeze_list, &mut vec_alloc);
+ for elem in vec_alloc.drain_all() {
+ inner.freeze_list.push_within_capacity(elem)?;
+ }
+ }
+ inner.freeze_list.push_within_capacity(process.clone())?;
+ return Ok(());
+ }
+ }
+
+ pub(crate) fn remove_freeze_listener(&self, p: &Arc<Process>) {
+ let _unused_capacity;
+ let mut guard = self.owner.inner.lock();
+ let inner = self.inner.access_mut(&mut guard);
+ let len = inner.freeze_list.len();
+ inner.freeze_list.retain(|proc| !Arc::ptr_eq(proc, p));
+ if len == inner.freeze_list.len() {
+ pr_warn!(
+ "Could not remove freeze listener for {}\n",
+ p.pid_in_current_ns()
+ );
+ }
+ if inner.freeze_list.is_empty() {
+ _unused_capacity = mem::replace(&mut inner.freeze_list, KVVec::new());
+ }
+ }
+
+ pub(crate) fn freeze_list<'a>(&'a self, guard: &'a ProcessInner) -> &'a [Arc<Process>] {
+ &self.inner.access(guard).freeze_list
+ }
+}
+
+impl DeliverToRead for Node {
+ fn do_work(
+ self: DArc<Self>,
+ _thread: &Thread,
+ writer: &mut BinderReturnWriter<'_>,
+ ) -> Result<bool> {
+ let mut owner_inner = self.owner.inner.lock();
+ let inner = self.inner.access_mut(&mut owner_inner);
+
+ assert!(inner.delivery_state.has_pushed_node);
+ if inner.delivery_state.has_pushed_wrapper {
+ // If the wrapper is scheduled, then we are either a normal push or weak zero2one
+ // increment, and the wrapper is a strong zero2one increment, so the wrapper always
+ // takes precedence over us.
+ assert!(inner.delivery_state.has_strong_zero2one);
+ inner.delivery_state.has_pushed_node = false;
+ inner.delivery_state.has_weak_zero2one = false;
+ return Ok(true);
+ }
+
+ inner.delivery_state.has_pushed_node = false;
+ inner.delivery_state.has_weak_zero2one = false;
+ inner.delivery_state.has_strong_zero2one = false;
+
+ self.do_work_locked(writer, owner_inner)
+ }
+
+ fn cancel(self: DArc<Self>) {}
+
+ fn should_sync_wakeup(&self) -> bool {
+ false
+ }
+
+ #[inline(never)]
+ fn debug_print(&self, m: &SeqFile, prefix: &str, _tprefix: &str) -> Result<()> {
+ seq_print!(
+ m,
+ "{}node work {}: u{:016x} c{:016x}\n",
+ prefix,
+ self.debug_id,
+ self.ptr,
+ self.cookie,
+ );
+ Ok(())
+ }
+}
+
+/// Represents something that holds one or more ref-counts to a `Node`.
+///
+/// Whenever process A holds a refcount to a node owned by a different process B, then process A
+/// will store a `NodeRef` that refers to the `Node` in process B. When process A releases the
+/// refcount, we destroy the NodeRef, which decrements the ref-count in process A.
+///
+/// This type is also used for some other cases. For example, a transaction allocation holds a
+/// refcount on the target node, and this is implemented by storing a `NodeRef` in the allocation
+/// so that the destructor of the allocation will drop a refcount of the `Node`.
+pub(crate) struct NodeRef {
+ pub(crate) node: DArc<Node>,
+ /// How many times does this NodeRef hold a refcount on the Node?
+ strong_node_count: usize,
+ weak_node_count: usize,
+ /// How many times does userspace hold a refcount on this NodeRef?
+ strong_count: usize,
+ weak_count: usize,
+}
+
+impl NodeRef {
+ pub(crate) fn new(node: DArc<Node>, strong_count: usize, weak_count: usize) -> Self {
+ Self {
+ node,
+ strong_node_count: strong_count,
+ weak_node_count: weak_count,
+ strong_count,
+ weak_count,
+ }
+ }
+
+ pub(crate) fn absorb(&mut self, mut other: Self) {
+ assert!(
+ Arc::ptr_eq(&self.node, &other.node),
+ "absorb called with differing nodes"
+ );
+ self.strong_node_count += other.strong_node_count;
+ self.weak_node_count += other.weak_node_count;
+ self.strong_count += other.strong_count;
+ self.weak_count += other.weak_count;
+ other.strong_count = 0;
+ other.weak_count = 0;
+ other.strong_node_count = 0;
+ other.weak_node_count = 0;
+
+ if self.strong_node_count >= 2 || self.weak_node_count >= 2 {
+ let mut guard = self.node.owner.inner.lock();
+ let inner = self.node.inner.access_mut(&mut guard);
+
+ if self.strong_node_count >= 2 {
+ inner.strong.count -= self.strong_node_count - 1;
+ self.strong_node_count = 1;
+ assert_ne!(inner.strong.count, 0);
+ }
+ if self.weak_node_count >= 2 {
+ inner.weak.count -= self.weak_node_count - 1;
+ self.weak_node_count = 1;
+ assert_ne!(inner.weak.count, 0);
+ }
+ }
+ }
+
+ pub(crate) fn get_count(&self) -> (usize, usize) {
+ (self.strong_count, self.weak_count)
+ }
+
+ pub(crate) fn clone(&self, strong: bool) -> Result<NodeRef> {
+ if strong && self.strong_count == 0 {
+ return Err(EINVAL);
+ }
+ Ok(self
+ .node
+ .owner
+ .inner
+ .lock()
+ .new_node_ref(self.node.clone(), strong, None))
+ }
+
+ /// Updates (increments or decrements) the number of references held against the node. If the
+ /// count being updated transitions from 0 to 1 or from 1 to 0, the node is notified by having
+ /// its `update_refcount` function called.
+ ///
+ /// Returns whether `self` should be removed (when both counts are zero).
+ pub(crate) fn update(&mut self, inc: bool, strong: bool) -> bool {
+ if strong && self.strong_count == 0 {
+ return false;
+ }
+ let (count, node_count, other_count) = if strong {
+ (
+ &mut self.strong_count,
+ &mut self.strong_node_count,
+ self.weak_count,
+ )
+ } else {
+ (
+ &mut self.weak_count,
+ &mut self.weak_node_count,
+ self.strong_count,
+ )
+ };
+ if inc {
+ if *count == 0 {
+ *node_count = 1;
+ self.node.update_refcount(true, 1, strong);
+ }
+ *count += 1;
+ } else {
+ if *count == 0 {
+ pr_warn!(
+ "pid {} performed invalid decrement on ref\n",
+ kernel::current!().pid()
+ );
+ return false;
+ }
+ *count -= 1;
+ if *count == 0 {
+ self.node.update_refcount(false, *node_count, strong);
+ *node_count = 0;
+ return other_count == 0;
+ }
+ }
+ false
+ }
+}
+
+impl Drop for NodeRef {
+ // This destructor is called conditionally from `Allocation::drop`. That branch is often
+ // mispredicted. Inlining this method call reduces the cost of those branch mispredictions.
+ #[inline(always)]
+ fn drop(&mut self) {
+ if self.strong_node_count > 0 {
+ self.node
+ .update_refcount(false, self.strong_node_count, true);
+ }
+ if self.weak_node_count > 0 {
+ self.node
+ .update_refcount(false, self.weak_node_count, false);
+ }
+ }
+}
+
+struct NodeDeathInner {
+ dead: bool,
+ cleared: bool,
+ notification_done: bool,
+ /// Indicates whether the normal flow was interrupted by removing the handle. In this case, we
+ /// need behave as if the death notification didn't exist (i.e., we don't deliver anything to
+ /// the user.
+ aborted: bool,
+}
+
+/// Used to deliver notifications when a process dies.
+///
+/// A process can request to be notified when a process dies using `BC_REQUEST_DEATH_NOTIFICATION`.
+/// This will make the driver send a `BR_DEAD_BINDER` to userspace when the process dies (or
+/// immediately if it is already dead). Userspace is supposed to respond with `BC_DEAD_BINDER_DONE`
+/// once it has processed the notification.
+///
+/// Userspace can unregister from death notifications using the `BC_CLEAR_DEATH_NOTIFICATION`
+/// command. In this case, the kernel will respond with `BR_CLEAR_DEATH_NOTIFICATION_DONE` once the
+/// notification has been removed. Note that if the remote process dies before the kernel has
+/// responded with `BR_CLEAR_DEATH_NOTIFICATION_DONE`, then the kernel will still send a
+/// `BR_DEAD_BINDER`, which userspace must be able to process. In this case, the kernel will wait
+/// for the `BC_DEAD_BINDER_DONE` command before it sends `BR_CLEAR_DEATH_NOTIFICATION_DONE`.
+///
+/// Note that even if the kernel sends a `BR_DEAD_BINDER`, this does not remove the death
+/// notification. Userspace must still remove it manually using `BC_CLEAR_DEATH_NOTIFICATION`.
+///
+/// If a process uses `BC_RELEASE` to destroy its last refcount on a node that has an active death
+/// registration, then the death registration is immediately deleted (we implement this using the
+/// `aborted` field). However, userspace is not supposed to delete a `NodeRef` without first
+/// deregistering death notifications, so this codepath is not executed under normal circumstances.
+#[pin_data]
+pub(crate) struct NodeDeath {
+ node: DArc<Node>,
+ process: Arc<Process>,
+ pub(crate) cookie: u64,
+ #[pin]
+ links_track: AtomicTracker<0>,
+ /// Used by the owner `Node` to store a list of registered death notifications.
+ ///
+ /// # Invariants
+ ///
+ /// Only ever used with the `death_list` list of `self.node`.
+ #[pin]
+ death_links: ListLinks<1>,
+ /// Used by the process to keep track of the death notifications for which we have sent a
+ /// `BR_DEAD_BINDER` but not yet received a `BC_DEAD_BINDER_DONE`.
+ ///
+ /// # Invariants
+ ///
+ /// Only ever used with the `delivered_deaths` list of `self.process`.
+ #[pin]
+ delivered_links: ListLinks<2>,
+ #[pin]
+ delivered_links_track: AtomicTracker<2>,
+ #[pin]
+ inner: SpinLock<NodeDeathInner>,
+}
+
+impl NodeDeath {
+ /// Constructs a new node death notification object.
+ pub(crate) fn new(
+ node: DArc<Node>,
+ process: Arc<Process>,
+ cookie: u64,
+ ) -> impl PinInit<DTRWrap<Self>> {
+ DTRWrap::new(pin_init!(
+ Self {
+ node,
+ process,
+ cookie,
+ links_track <- AtomicTracker::new(),
+ death_links <- ListLinks::new(),
+ delivered_links <- ListLinks::new(),
+ delivered_links_track <- AtomicTracker::new(),
+ inner <- kernel::new_spinlock!(NodeDeathInner {
+ dead: false,
+ cleared: false,
+ notification_done: false,
+ aborted: false,
+ }, "NodeDeath::inner"),
+ }
+ ))
+ }
+
+ /// Sets the cleared flag to `true`.
+ ///
+ /// It removes `self` from the node's death notification list if needed.
+ ///
+ /// Returns whether it needs to be queued.
+ pub(crate) fn set_cleared(self: &DArc<Self>, abort: bool) -> bool {
+ let (needs_removal, needs_queueing) = {
+ // Update state and determine if we need to queue a work item. We only need to do it
+ // when the node is not dead or if the user already completed the death notification.
+ let mut inner = self.inner.lock();
+ if abort {
+ inner.aborted = true;
+ }
+ if inner.cleared {
+ // Already cleared.
+ return false;
+ }
+ inner.cleared = true;
+ (!inner.dead, !inner.dead || inner.notification_done)
+ };
+
+ // Remove death notification from node.
+ if needs_removal {
+ let mut owner_inner = self.node.owner.inner.lock();
+ let node_inner = self.node.inner.access_mut(&mut owner_inner);
+ // SAFETY: A `NodeDeath` is never inserted into the death list of any node other than
+ // its owner, so it is either in this death list or in no death list.
+ unsafe { node_inner.death_list.remove(self) };
+ }
+ needs_queueing
+ }
+
+ /// Sets the 'notification done' flag to `true`.
+ pub(crate) fn set_notification_done(self: DArc<Self>, thread: &Thread) {
+ let needs_queueing = {
+ let mut inner = self.inner.lock();
+ inner.notification_done = true;
+ inner.cleared
+ };
+ if needs_queueing {
+ if let Some(death) = ListArc::try_from_arc_or_drop(self) {
+ let _ = thread.push_work_if_looper(death);
+ }
+ }
+ }
+
+ /// Sets the 'dead' flag to `true` and queues work item if needed.
+ pub(crate) fn set_dead(self: DArc<Self>) {
+ let needs_queueing = {
+ let mut inner = self.inner.lock();
+ if inner.cleared {
+ false
+ } else {
+ inner.dead = true;
+ true
+ }
+ };
+ if needs_queueing {
+ // Push the death notification to the target process. There is nothing else to do if
+ // it's already dead.
+ if let Some(death) = ListArc::try_from_arc_or_drop(self) {
+ let process = death.process.clone();
+ let _ = process.push_work(death);
+ }
+ }
+ }
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<0> for NodeDeath {
+ tracked_by links_track: AtomicTracker;
+ }
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<1> for DTRWrap<NodeDeath> { untracked; }
+}
+kernel::list::impl_list_item! {
+ impl ListItem<1> for DTRWrap<NodeDeath> {
+ using ListLinks { self.wrapped.death_links };
+ }
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<2> for DTRWrap<NodeDeath> {
+ tracked_by wrapped: NodeDeath;
+ }
+}
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<2> for NodeDeath {
+ tracked_by delivered_links_track: AtomicTracker<2>;
+ }
+}
+kernel::list::impl_list_item! {
+ impl ListItem<2> for DTRWrap<NodeDeath> {
+ using ListLinks { self.wrapped.delivered_links };
+ }
+}
+
+impl DeliverToRead for NodeDeath {
+ fn do_work(
+ self: DArc<Self>,
+ _thread: &Thread,
+ writer: &mut BinderReturnWriter<'_>,
+ ) -> Result<bool> {
+ let done = {
+ let inner = self.inner.lock();
+ if inner.aborted {
+ return Ok(true);
+ }
+ inner.cleared && (!inner.dead || inner.notification_done)
+ };
+
+ let cookie = self.cookie;
+ let cmd = if done {
+ BR_CLEAR_DEATH_NOTIFICATION_DONE
+ } else {
+ let process = self.process.clone();
+ let mut process_inner = process.inner.lock();
+ let inner = self.inner.lock();
+ if inner.aborted {
+ return Ok(true);
+ }
+ // We're still holding the inner lock, so it cannot be aborted while we insert it into
+ // the delivered list.
+ process_inner.death_delivered(self.clone());
+ BR_DEAD_BINDER
+ };
+
+ writer.write_code(cmd)?;
+ writer.write_payload(&cookie)?;
+ // DEAD_BINDER notifications can cause transactions, so stop processing work items when we
+ // get to a death notification.
+ Ok(cmd != BR_DEAD_BINDER)
+ }
+
+ fn cancel(self: DArc<Self>) {}
+
+ fn should_sync_wakeup(&self) -> bool {
+ false
+ }
+
+ #[inline(never)]
+ fn debug_print(&self, m: &SeqFile, prefix: &str, _tprefix: &str) -> Result<()> {
+ let inner = self.inner.lock();
+
+ let dead_binder = inner.dead && !inner.notification_done;
+
+ if dead_binder {
+ if inner.cleared {
+ seq_print!(m, "{}has cleared dead binder\n", prefix);
+ } else {
+ seq_print!(m, "{}has dead binder\n", prefix);
+ }
+ } else {
+ seq_print!(m, "{}has cleared death notification\n", prefix);
+ }
+
+ Ok(())
+ }
+}
diff --git a/drivers/android/binder/node/wrapper.rs b/drivers/android/binder/node/wrapper.rs
new file mode 100644
index 000000000000..43294c050502
--- /dev/null
+++ b/drivers/android/binder/node/wrapper.rs
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use kernel::{list::ListArc, prelude::*, seq_file::SeqFile, seq_print, sync::UniqueArc};
+
+use crate::{node::Node, thread::Thread, BinderReturnWriter, DArc, DLArc, DTRWrap, DeliverToRead};
+
+use core::mem::MaybeUninit;
+
+pub(crate) struct CritIncrWrapper {
+ inner: UniqueArc<MaybeUninit<DTRWrap<NodeWrapper>>>,
+}
+
+impl CritIncrWrapper {
+ pub(crate) fn new() -> Result<Self> {
+ Ok(CritIncrWrapper {
+ inner: UniqueArc::new_uninit(GFP_KERNEL)?,
+ })
+ }
+
+ pub(super) fn init(self, node: DArc<Node>) -> DLArc<dyn DeliverToRead> {
+ match self.inner.pin_init_with(DTRWrap::new(NodeWrapper { node })) {
+ Ok(initialized) => ListArc::from(initialized) as _,
+ Err(err) => match err {},
+ }
+ }
+}
+
+struct NodeWrapper {
+ node: DArc<Node>,
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<0> for NodeWrapper {
+ untracked;
+ }
+}
+
+impl DeliverToRead for NodeWrapper {
+ fn do_work(
+ self: DArc<Self>,
+ _thread: &Thread,
+ writer: &mut BinderReturnWriter<'_>,
+ ) -> Result<bool> {
+ let node = &self.node;
+ let mut owner_inner = node.owner.inner.lock();
+ let inner = node.inner.access_mut(&mut owner_inner);
+
+ let ds = &mut inner.delivery_state;
+
+ assert!(ds.has_pushed_wrapper);
+ assert!(ds.has_strong_zero2one);
+ ds.has_pushed_wrapper = false;
+ ds.has_strong_zero2one = false;
+
+ node.do_work_locked(writer, owner_inner)
+ }
+
+ fn cancel(self: DArc<Self>) {}
+
+ fn should_sync_wakeup(&self) -> bool {
+ false
+ }
+
+ #[inline(never)]
+ fn debug_print(&self, m: &SeqFile, prefix: &str, _tprefix: &str) -> Result<()> {
+ seq_print!(
+ m,
+ "{}node work {}: u{:016x} c{:016x}\n",
+ prefix,
+ self.node.debug_id,
+ self.node.ptr,
+ self.node.cookie,
+ );
+ Ok(())
+ }
+}
diff --git a/drivers/android/binder/page_range.rs b/drivers/android/binder/page_range.rs
new file mode 100644
index 000000000000..9379038f61f5
--- /dev/null
+++ b/drivers/android/binder/page_range.rs
@@ -0,0 +1,734 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+//! This module has utilities for managing a page range where unused pages may be reclaimed by a
+//! vma shrinker.
+
+// To avoid deadlocks, locks are taken in the order:
+//
+// 1. mmap lock
+// 2. spinlock
+// 3. lru spinlock
+//
+// The shrinker will use trylock methods because it locks them in a different order.
+
+use core::{
+ marker::PhantomPinned,
+ mem::{size_of, size_of_val, MaybeUninit},
+ ptr,
+};
+
+use kernel::{
+ bindings,
+ error::Result,
+ ffi::{c_ulong, c_void},
+ mm::{virt, Mm, MmWithUser},
+ new_mutex, new_spinlock,
+ page::{Page, PAGE_SHIFT, PAGE_SIZE},
+ prelude::*,
+ str::CStr,
+ sync::{aref::ARef, Mutex, SpinLock},
+ task::Pid,
+ transmute::FromBytes,
+ types::Opaque,
+ uaccess::UserSliceReader,
+};
+
+/// Represents a shrinker that can be registered with the kernel.
+///
+/// Each shrinker can be used by many `ShrinkablePageRange` objects.
+#[repr(C)]
+pub(crate) struct Shrinker {
+ inner: Opaque<*mut bindings::shrinker>,
+ list_lru: Opaque<bindings::list_lru>,
+}
+
+// SAFETY: The shrinker and list_lru are thread safe.
+unsafe impl Send for Shrinker {}
+// SAFETY: The shrinker and list_lru are thread safe.
+unsafe impl Sync for Shrinker {}
+
+impl Shrinker {
+ /// Create a new shrinker.
+ ///
+ /// # Safety
+ ///
+ /// Before using this shrinker with a `ShrinkablePageRange`, the `register` method must have
+ /// been called exactly once, and it must not have returned an error.
+ pub(crate) const unsafe fn new() -> Self {
+ Self {
+ inner: Opaque::uninit(),
+ list_lru: Opaque::uninit(),
+ }
+ }
+
+ /// Register this shrinker with the kernel.
+ pub(crate) fn register(&'static self, name: &CStr) -> Result<()> {
+ // SAFETY: These fields are not yet used, so it's okay to zero them.
+ unsafe {
+ self.inner.get().write(ptr::null_mut());
+ self.list_lru.get().write_bytes(0, 1);
+ }
+
+ // SAFETY: The field is not yet used, so we can initialize it.
+ let ret = unsafe { bindings::__list_lru_init(self.list_lru.get(), false, ptr::null_mut()) };
+ if ret != 0 {
+ return Err(Error::from_errno(ret));
+ }
+
+ // SAFETY: The `name` points at a valid c string.
+ let shrinker = unsafe { bindings::shrinker_alloc(0, name.as_char_ptr()) };
+ if shrinker.is_null() {
+ // SAFETY: We initialized it, so its okay to destroy it.
+ unsafe { bindings::list_lru_destroy(self.list_lru.get()) };
+ return Err(Error::from_errno(ret));
+ }
+
+ // SAFETY: We're about to register the shrinker, and these are the fields we need to
+ // initialize. (All other fields are already zeroed.)
+ unsafe {
+ (&raw mut (*shrinker).count_objects).write(Some(rust_shrink_count));
+ (&raw mut (*shrinker).scan_objects).write(Some(rust_shrink_scan));
+ (&raw mut (*shrinker).private_data).write(self.list_lru.get().cast());
+ }
+
+ // SAFETY: The new shrinker has been fully initialized, so we can register it.
+ unsafe { bindings::shrinker_register(shrinker) };
+
+ // SAFETY: This initializes the pointer to the shrinker so that we can use it.
+ unsafe { self.inner.get().write(shrinker) };
+
+ Ok(())
+ }
+}
+
+/// A container that manages a page range in a vma.
+///
+/// The pages can be thought of as an array of booleans of whether the pages are usable. The
+/// methods `use_range` and `stop_using_range` set all booleans in a range to true or false
+/// respectively. Initially, no pages are allocated. When a page is not used, it is not freed
+/// immediately. Instead, it is made available to the memory shrinker to free it if the device is
+/// under memory pressure.
+///
+/// It's okay for `use_range` and `stop_using_range` to race with each other, although there's no
+/// way to know whether an index ends up with true or false if a call to `use_range` races with
+/// another call to `stop_using_range` on a given index.
+///
+/// It's also okay for the two methods to race with themselves, e.g. if two threads call
+/// `use_range` on the same index, then that's fine and neither call will return until the page is
+/// allocated and mapped.
+///
+/// The methods that read or write to a range require that the page is marked as in use. So it is
+/// _not_ okay to call `stop_using_range` on a page that is in use by the methods that read or
+/// write to the page.
+#[pin_data(PinnedDrop)]
+pub(crate) struct ShrinkablePageRange {
+ /// Shrinker object registered with the kernel.
+ shrinker: &'static Shrinker,
+ /// Pid using this page range. Only used as debugging information.
+ pid: Pid,
+ /// The mm for the relevant process.
+ mm: ARef<Mm>,
+ /// Used to synchronize calls to `vm_insert_page` and `zap_page_range_single`.
+ #[pin]
+ mm_lock: Mutex<()>,
+ /// Spinlock protecting changes to pages.
+ #[pin]
+ lock: SpinLock<Inner>,
+
+ /// Must not move, since page info has pointers back.
+ #[pin]
+ _pin: PhantomPinned,
+}
+
+struct Inner {
+ /// Array of pages.
+ ///
+ /// Since this is also accessed by the shrinker, we can't use a `Box`, which asserts exclusive
+ /// ownership. To deal with that, we manage it using raw pointers.
+ pages: *mut PageInfo,
+ /// Length of the `pages` array.
+ size: usize,
+ /// The address of the vma to insert the pages into.
+ vma_addr: usize,
+}
+
+// SAFETY: proper locking is in place for `Inner`
+unsafe impl Send for Inner {}
+
+type StableMmGuard =
+ kernel::sync::lock::Guard<'static, (), kernel::sync::lock::mutex::MutexBackend>;
+
+/// An array element that describes the current state of a page.
+///
+/// There are three states:
+///
+/// * Free. The page is None. The `lru` element is not queued.
+/// * Available. The page is Some. The `lru` element is queued to the shrinker's lru.
+/// * Used. The page is Some. The `lru` element is not queued.
+///
+/// When an element is available, the shrinker is able to free the page.
+#[repr(C)]
+struct PageInfo {
+ lru: bindings::list_head,
+ page: Option<Page>,
+ range: *const ShrinkablePageRange,
+}
+
+impl PageInfo {
+ /// # Safety
+ ///
+ /// The caller ensures that writing to `me.page` is ok, and that the page is not currently set.
+ unsafe fn set_page(me: *mut PageInfo, page: Page) {
+ // SAFETY: This pointer offset is in bounds.
+ let ptr = unsafe { &raw mut (*me).page };
+
+ // SAFETY: The pointer is valid for writing, so also valid for reading.
+ if unsafe { (*ptr).is_some() } {
+ pr_err!("set_page called when there is already a page");
+ // SAFETY: We will initialize the page again below.
+ unsafe { ptr::drop_in_place(ptr) };
+ }
+
+ // SAFETY: The pointer is valid for writing.
+ unsafe { ptr::write(ptr, Some(page)) };
+ }
+
+ /// # Safety
+ ///
+ /// The caller ensures that reading from `me.page` is ok for the duration of 'a.
+ unsafe fn get_page<'a>(me: *const PageInfo) -> Option<&'a Page> {
+ // SAFETY: This pointer offset is in bounds.
+ let ptr = unsafe { &raw const (*me).page };
+
+ // SAFETY: The pointer is valid for reading.
+ unsafe { (*ptr).as_ref() }
+ }
+
+ /// # Safety
+ ///
+ /// The caller ensures that writing to `me.page` is ok for the duration of 'a.
+ unsafe fn take_page(me: *mut PageInfo) -> Option<Page> {
+ // SAFETY: This pointer offset is in bounds.
+ let ptr = unsafe { &raw mut (*me).page };
+
+ // SAFETY: The pointer is valid for reading.
+ unsafe { (*ptr).take() }
+ }
+
+ /// Add this page to the lru list, if not already in the list.
+ ///
+ /// # Safety
+ ///
+ /// The pointer must be valid, and it must be the right shrinker and nid.
+ unsafe fn list_lru_add(me: *mut PageInfo, nid: i32, shrinker: &'static Shrinker) {
+ // SAFETY: This pointer offset is in bounds.
+ let lru_ptr = unsafe { &raw mut (*me).lru };
+ // SAFETY: The lru pointer is valid, and we're not using it with any other lru list.
+ unsafe { bindings::list_lru_add(shrinker.list_lru.get(), lru_ptr, nid, ptr::null_mut()) };
+ }
+
+ /// Remove this page from the lru list, if it is in the list.
+ ///
+ /// # Safety
+ ///
+ /// The pointer must be valid, and it must be the right shrinker and nid.
+ unsafe fn list_lru_del(me: *mut PageInfo, nid: i32, shrinker: &'static Shrinker) {
+ // SAFETY: This pointer offset is in bounds.
+ let lru_ptr = unsafe { &raw mut (*me).lru };
+ // SAFETY: The lru pointer is valid, and we're not using it with any other lru list.
+ unsafe { bindings::list_lru_del(shrinker.list_lru.get(), lru_ptr, nid, ptr::null_mut()) };
+ }
+}
+
+impl ShrinkablePageRange {
+ /// Create a new `ShrinkablePageRange` using the given shrinker.
+ pub(crate) fn new(shrinker: &'static Shrinker) -> impl PinInit<Self, Error> {
+ try_pin_init!(Self {
+ shrinker,
+ pid: kernel::current!().pid(),
+ mm: ARef::from(&**kernel::current!().mm().ok_or(ESRCH)?),
+ mm_lock <- new_mutex!((), "ShrinkablePageRange::mm"),
+ lock <- new_spinlock!(Inner {
+ pages: ptr::null_mut(),
+ size: 0,
+ vma_addr: 0,
+ }, "ShrinkablePageRange"),
+ _pin: PhantomPinned,
+ })
+ }
+
+ pub(crate) fn stable_trylock_mm(&self) -> Option<StableMmGuard> {
+ // SAFETY: This extends the duration of the reference. Since this call happens before
+ // `mm_lock` is taken in the destructor of `ShrinkablePageRange`, the destructor will block
+ // until the returned guard is dropped. This ensures that the guard is valid until dropped.
+ let mm_lock = unsafe { &*ptr::from_ref(&self.mm_lock) };
+
+ mm_lock.try_lock()
+ }
+
+ /// Register a vma with this page range. Returns the size of the region.
+ pub(crate) fn register_with_vma(&self, vma: &virt::VmaNew) -> Result<usize> {
+ let num_bytes = usize::min(vma.end() - vma.start(), bindings::SZ_4M as usize);
+ let num_pages = num_bytes >> PAGE_SHIFT;
+
+ if !ptr::eq::<Mm>(&*self.mm, &**vma.mm()) {
+ pr_debug!("Failed to register with vma: invalid vma->vm_mm");
+ return Err(EINVAL);
+ }
+ if num_pages == 0 {
+ pr_debug!("Failed to register with vma: size zero");
+ return Err(EINVAL);
+ }
+
+ let mut pages = KVVec::<PageInfo>::with_capacity(num_pages, GFP_KERNEL)?;
+
+ // SAFETY: This just initializes the pages array.
+ unsafe {
+ let self_ptr = self as *const ShrinkablePageRange;
+ for i in 0..num_pages {
+ let info = pages.as_mut_ptr().add(i);
+ (&raw mut (*info).range).write(self_ptr);
+ (&raw mut (*info).page).write(None);
+ let lru = &raw mut (*info).lru;
+ (&raw mut (*lru).next).write(lru);
+ (&raw mut (*lru).prev).write(lru);
+ }
+ }
+
+ let mut inner = self.lock.lock();
+ if inner.size > 0 {
+ pr_debug!("Failed to register with vma: already registered");
+ drop(inner);
+ return Err(EBUSY);
+ }
+
+ inner.pages = pages.into_raw_parts().0;
+ inner.size = num_pages;
+ inner.vma_addr = vma.start();
+
+ Ok(num_pages)
+ }
+
+ /// Make sure that the given pages are allocated and mapped.
+ ///
+ /// Must not be called from an atomic context.
+ pub(crate) fn use_range(&self, start: usize, end: usize) -> Result<()> {
+ if start >= end {
+ return Ok(());
+ }
+ let mut inner = self.lock.lock();
+ assert!(end <= inner.size);
+
+ for i in start..end {
+ // SAFETY: This pointer offset is in bounds.
+ let page_info = unsafe { inner.pages.add(i) };
+
+ // SAFETY: The pointer is valid, and we hold the lock so reading from the page is okay.
+ if let Some(page) = unsafe { PageInfo::get_page(page_info) } {
+ // Since we're going to use the page, we should remove it from the lru list so that
+ // the shrinker will not free it.
+ //
+ // SAFETY: The pointer is valid, and this is the right shrinker.
+ //
+ // The shrinker can't free the page between the check and this call to
+ // `list_lru_del` because we hold the lock.
+ unsafe { PageInfo::list_lru_del(page_info, page.nid(), self.shrinker) };
+ } else {
+ // We have to allocate a new page. Use the slow path.
+ drop(inner);
+ // SAFETY: `i < end <= inner.size` so `i` is in bounds.
+ match unsafe { self.use_page_slow(i) } {
+ Ok(()) => {}
+ Err(err) => {
+ pr_warn!("Error in use_page_slow: {:?}", err);
+ return Err(err);
+ }
+ }
+ inner = self.lock.lock();
+ }
+ }
+ Ok(())
+ }
+
+ /// Mark the given page as in use, slow path.
+ ///
+ /// Must not be called from an atomic context.
+ ///
+ /// # Safety
+ ///
+ /// Assumes that `i` is in bounds.
+ #[cold]
+ unsafe fn use_page_slow(&self, i: usize) -> Result<()> {
+ let new_page = Page::alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO)?;
+
+ let mm_mutex = self.mm_lock.lock();
+ let inner = self.lock.lock();
+
+ // SAFETY: This pointer offset is in bounds.
+ let page_info = unsafe { inner.pages.add(i) };
+
+ // SAFETY: The pointer is valid, and we hold the lock so reading from the page is okay.
+ if let Some(page) = unsafe { PageInfo::get_page(page_info) } {
+ // The page was already there, or someone else added the page while we didn't hold the
+ // spinlock.
+ //
+ // SAFETY: The pointer is valid, and this is the right shrinker.
+ //
+ // The shrinker can't free the page between the check and this call to
+ // `list_lru_del` because we hold the lock.
+ unsafe { PageInfo::list_lru_del(page_info, page.nid(), self.shrinker) };
+ return Ok(());
+ }
+
+ let vma_addr = inner.vma_addr;
+ // Release the spinlock while we insert the page into the vma.
+ drop(inner);
+
+ // No overflow since we stay in bounds of the vma.
+ let user_page_addr = vma_addr + (i << PAGE_SHIFT);
+
+ // We use `mmput_async` when dropping the `mm` because `use_page_slow` is usually used from
+ // a remote process. If the call to `mmput` races with the process shutting down, then the
+ // caller of `use_page_slow` becomes responsible for cleaning up the `mm`, which doesn't
+ // happen until it returns to userspace. However, the caller might instead go to sleep and
+ // wait for the owner of the `mm` to wake it up, which doesn't happen because it's in the
+ // middle of a shutdown process that won't complete until the `mm` is dropped. This can
+ // amount to a deadlock.
+ //
+ // Using `mmput_async` avoids this, because then the `mm` cleanup is instead queued to a
+ // workqueue.
+ MmWithUser::into_mmput_async(self.mm.mmget_not_zero().ok_or(ESRCH)?)
+ .mmap_read_lock()
+ .vma_lookup(vma_addr)
+ .ok_or(ESRCH)?
+ .as_mixedmap_vma()
+ .ok_or(ESRCH)?
+ .vm_insert_page(user_page_addr, &new_page)
+ .inspect_err(|err| {
+ pr_warn!(
+ "Failed to vm_insert_page({}): vma_addr:{} i:{} err:{:?}",
+ user_page_addr,
+ vma_addr,
+ i,
+ err
+ )
+ })?;
+
+ let inner = self.lock.lock();
+
+ // SAFETY: The `page_info` pointer is valid and currently does not have a page. The page
+ // can be written to since we hold the lock.
+ //
+ // We released and reacquired the spinlock since we checked that the page is null, but we
+ // always hold the mm_lock mutex when setting the page to a non-null value, so it's not
+ // possible for someone else to have changed it since our check.
+ unsafe { PageInfo::set_page(page_info, new_page) };
+
+ drop(inner);
+ drop(mm_mutex);
+
+ Ok(())
+ }
+
+ /// If the given page is in use, then mark it as available so that the shrinker can free it.
+ ///
+ /// May be called from an atomic context.
+ pub(crate) fn stop_using_range(&self, start: usize, end: usize) {
+ if start >= end {
+ return;
+ }
+ let inner = self.lock.lock();
+ assert!(end <= inner.size);
+
+ for i in (start..end).rev() {
+ // SAFETY: The pointer is in bounds.
+ let page_info = unsafe { inner.pages.add(i) };
+
+ // SAFETY: Okay for reading since we have the lock.
+ if let Some(page) = unsafe { PageInfo::get_page(page_info) } {
+ // SAFETY: The pointer is valid, and it's the right shrinker.
+ unsafe { PageInfo::list_lru_add(page_info, page.nid(), self.shrinker) };
+ }
+ }
+ }
+
+ /// Helper for reading or writing to a range of bytes that may overlap with several pages.
+ ///
+ /// # Safety
+ ///
+ /// All pages touched by this operation must be in use for the duration of this call.
+ unsafe fn iterate<T>(&self, mut offset: usize, mut size: usize, mut cb: T) -> Result
+ where
+ T: FnMut(&Page, usize, usize) -> Result,
+ {
+ if size == 0 {
+ return Ok(());
+ }
+
+ let (pages, num_pages) = {
+ let inner = self.lock.lock();
+ (inner.pages, inner.size)
+ };
+ let num_bytes = num_pages << PAGE_SHIFT;
+
+ // Check that the request is within the buffer.
+ if offset.checked_add(size).ok_or(EFAULT)? > num_bytes {
+ return Err(EFAULT);
+ }
+
+ let mut page_index = offset >> PAGE_SHIFT;
+ offset &= PAGE_SIZE - 1;
+ while size > 0 {
+ let available = usize::min(size, PAGE_SIZE - offset);
+ // SAFETY: The pointer is in bounds.
+ let page_info = unsafe { pages.add(page_index) };
+ // SAFETY: The caller guarantees that this page is in the "in use" state for the
+ // duration of this call to `iterate`, so nobody will change the page.
+ let page = unsafe { PageInfo::get_page(page_info) };
+ if page.is_none() {
+ pr_warn!("Page is null!");
+ }
+ let page = page.ok_or(EFAULT)?;
+ cb(page, offset, available)?;
+ size -= available;
+ page_index += 1;
+ offset = 0;
+ }
+ Ok(())
+ }
+
+ /// Copy from userspace into this page range.
+ ///
+ /// # Safety
+ ///
+ /// All pages touched by this operation must be in use for the duration of this call.
+ pub(crate) unsafe fn copy_from_user_slice(
+ &self,
+ reader: &mut UserSliceReader,
+ offset: usize,
+ size: usize,
+ ) -> Result {
+ // SAFETY: `self.iterate` has the same safety requirements as `copy_from_user_slice`.
+ unsafe {
+ self.iterate(offset, size, |page, offset, to_copy| {
+ page.copy_from_user_slice_raw(reader, offset, to_copy)
+ })
+ }
+ }
+
+ /// Copy from this page range into kernel space.
+ ///
+ /// # Safety
+ ///
+ /// All pages touched by this operation must be in use for the duration of this call.
+ pub(crate) unsafe fn read<T: FromBytes>(&self, offset: usize) -> Result<T> {
+ let mut out = MaybeUninit::<T>::uninit();
+ let mut out_offset = 0;
+ // SAFETY: `self.iterate` has the same safety requirements as `read`.
+ unsafe {
+ self.iterate(offset, size_of::<T>(), |page, offset, to_copy| {
+ // SAFETY: The sum of `offset` and `to_copy` is bounded by the size of T.
+ let obj_ptr = (out.as_mut_ptr() as *mut u8).add(out_offset);
+ // SAFETY: The pointer points is in-bounds of the `out` variable, so it is valid.
+ page.read_raw(obj_ptr, offset, to_copy)?;
+ out_offset += to_copy;
+ Ok(())
+ })?;
+ }
+ // SAFETY: We just initialised the data.
+ Ok(unsafe { out.assume_init() })
+ }
+
+ /// Copy from kernel space into this page range.
+ ///
+ /// # Safety
+ ///
+ /// All pages touched by this operation must be in use for the duration of this call.
+ pub(crate) unsafe fn write<T: ?Sized>(&self, offset: usize, obj: &T) -> Result {
+ let mut obj_offset = 0;
+ // SAFETY: `self.iterate` has the same safety requirements as `write`.
+ unsafe {
+ self.iterate(offset, size_of_val(obj), |page, offset, to_copy| {
+ // SAFETY: The sum of `offset` and `to_copy` is bounded by the size of T.
+ let obj_ptr = (obj as *const T as *const u8).add(obj_offset);
+ // SAFETY: We have a reference to the object, so the pointer is valid.
+ page.write_raw(obj_ptr, offset, to_copy)?;
+ obj_offset += to_copy;
+ Ok(())
+ })
+ }
+ }
+
+ /// Write zeroes to the given range.
+ ///
+ /// # Safety
+ ///
+ /// All pages touched by this operation must be in use for the duration of this call.
+ pub(crate) unsafe fn fill_zero(&self, offset: usize, size: usize) -> Result {
+ // SAFETY: `self.iterate` has the same safety requirements as `copy_into`.
+ unsafe {
+ self.iterate(offset, size, |page, offset, len| {
+ page.fill_zero_raw(offset, len)
+ })
+ }
+ }
+}
+
+#[pinned_drop]
+impl PinnedDrop for ShrinkablePageRange {
+ fn drop(self: Pin<&mut Self>) {
+ let (pages, size) = {
+ let lock = self.lock.lock();
+ (lock.pages, lock.size)
+ };
+
+ if size == 0 {
+ return;
+ }
+
+ // Note: This call is also necessary for the safety of `stable_trylock_mm`.
+ let mm_lock = self.mm_lock.lock();
+
+ // This is the destructor, so unlike the other methods, we only need to worry about races
+ // with the shrinker here. Since we hold the `mm_lock`, we also can't race with the
+ // shrinker, and after this loop, the shrinker will not access any of our pages since we
+ // removed them from the lru list.
+ for i in 0..size {
+ // SAFETY: Loop is in-bounds of the size.
+ let p_ptr = unsafe { pages.add(i) };
+ // SAFETY: No other readers, so we can read.
+ if let Some(p) = unsafe { PageInfo::get_page(p_ptr) } {
+ // SAFETY: The pointer is valid and it's the right shrinker.
+ unsafe { PageInfo::list_lru_del(p_ptr, p.nid(), self.shrinker) };
+ }
+ }
+
+ drop(mm_lock);
+
+ // SAFETY: `pages` was allocated as an `KVVec<PageInfo>` with capacity `size`. Furthermore,
+ // all `size` elements are initialized. Also, the array is no longer shared with the
+ // shrinker due to the above loop.
+ drop(unsafe { KVVec::from_raw_parts(pages, size, size) });
+ }
+}
+
+/// # Safety
+/// Called by the shrinker.
+#[no_mangle]
+unsafe extern "C" fn rust_shrink_count(
+ shrink: *mut bindings::shrinker,
+ _sc: *mut bindings::shrink_control,
+) -> c_ulong {
+ // SAFETY: We can access our own private data.
+ let list_lru = unsafe { (*shrink).private_data.cast::<bindings::list_lru>() };
+ // SAFETY: Accessing the lru list is okay. Just an FFI call.
+ unsafe { bindings::list_lru_count(list_lru) }
+}
+
+/// # Safety
+/// Called by the shrinker.
+#[no_mangle]
+unsafe extern "C" fn rust_shrink_scan(
+ shrink: *mut bindings::shrinker,
+ sc: *mut bindings::shrink_control,
+) -> c_ulong {
+ // SAFETY: We can access our own private data.
+ let list_lru = unsafe { (*shrink).private_data.cast::<bindings::list_lru>() };
+ // SAFETY: Caller guarantees that it is safe to read this field.
+ let nr_to_scan = unsafe { (*sc).nr_to_scan };
+ // SAFETY: Accessing the lru list is okay. Just an FFI call.
+ unsafe {
+ bindings::list_lru_walk(
+ list_lru,
+ Some(bindings::rust_shrink_free_page_wrap),
+ ptr::null_mut(),
+ nr_to_scan,
+ )
+ }
+}
+
+const LRU_SKIP: bindings::lru_status = bindings::lru_status_LRU_SKIP;
+const LRU_REMOVED_ENTRY: bindings::lru_status = bindings::lru_status_LRU_REMOVED_RETRY;
+
+/// # Safety
+/// Called by the shrinker.
+#[no_mangle]
+unsafe extern "C" fn rust_shrink_free_page(
+ item: *mut bindings::list_head,
+ lru: *mut bindings::list_lru_one,
+ _cb_arg: *mut c_void,
+) -> bindings::lru_status {
+ // Fields that should survive after unlocking the lru lock.
+ let page;
+ let page_index;
+ let mm;
+ let mmap_read;
+ let mm_mutex;
+ let vma_addr;
+
+ {
+ // CAST: The `list_head` field is first in `PageInfo`.
+ let info = item as *mut PageInfo;
+ // SAFETY: The `range` field of `PageInfo` is immutable.
+ let range = unsafe { &*((*info).range) };
+
+ mm = match range.mm.mmget_not_zero() {
+ Some(mm) => MmWithUser::into_mmput_async(mm),
+ None => return LRU_SKIP,
+ };
+
+ mm_mutex = match range.stable_trylock_mm() {
+ Some(guard) => guard,
+ None => return LRU_SKIP,
+ };
+
+ mmap_read = match mm.mmap_read_trylock() {
+ Some(guard) => guard,
+ None => return LRU_SKIP,
+ };
+
+ // We can't lock it normally here, since we hold the lru lock.
+ let inner = match range.lock.try_lock() {
+ Some(inner) => inner,
+ None => return LRU_SKIP,
+ };
+
+ // SAFETY: The item is in this lru list, so it's okay to remove it.
+ unsafe { bindings::list_lru_isolate(lru, item) };
+
+ // SAFETY: Both pointers are in bounds of the same allocation.
+ page_index = unsafe { info.offset_from(inner.pages) } as usize;
+
+ // SAFETY: We hold the spinlock, so we can take the page.
+ //
+ // This sets the page pointer to zero before we unmap it from the vma. However, we call
+ // `zap_page_range` before we release the mmap lock, so `use_page_slow` will not be able to
+ // insert a new page until after our call to `zap_page_range`.
+ page = unsafe { PageInfo::take_page(info) };
+ vma_addr = inner.vma_addr;
+
+ // From this point on, we don't access this PageInfo or ShrinkablePageRange again, because
+ // they can be freed at any point after we unlock `lru_lock`. This is with the exception of
+ // `mm_mutex` which is kept alive by holding the lock.
+ }
+
+ // SAFETY: The lru lock is locked when this method is called.
+ unsafe { bindings::spin_unlock(&raw mut (*lru).lock) };
+
+ if let Some(vma) = mmap_read.vma_lookup(vma_addr) {
+ let user_page_addr = vma_addr + (page_index << PAGE_SHIFT);
+ vma.zap_page_range_single(user_page_addr, PAGE_SIZE);
+ }
+
+ drop(mmap_read);
+ drop(mm_mutex);
+ drop(mm);
+ drop(page);
+
+ // SAFETY: We just unlocked the lru lock, but it should be locked when we return.
+ unsafe { bindings::spin_lock(&raw mut (*lru).lock) };
+
+ LRU_REMOVED_ENTRY
+}
diff --git a/drivers/android/binder/page_range_helper.c b/drivers/android/binder/page_range_helper.c
new file mode 100644
index 000000000000..496887723ee0
--- /dev/null
+++ b/drivers/android/binder/page_range_helper.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* C helper for page_range.rs to work around a CFI violation.
+ *
+ * Bindgen currently pretends that `enum lru_status` is the same as an integer.
+ * This assumption is fine ABI-wise, but once you add CFI to the mix, it
+ * triggers a CFI violation because `enum lru_status` gets a different CFI tag.
+ *
+ * This file contains a workaround until bindgen can be fixed.
+ *
+ * Copyright (C) 2025 Google LLC.
+ */
+#include "page_range_helper.h"
+
+unsigned int rust_shrink_free_page(struct list_head *item,
+ struct list_lru_one *list,
+ void *cb_arg);
+
+enum lru_status
+rust_shrink_free_page_wrap(struct list_head *item, struct list_lru_one *list,
+ void *cb_arg)
+{
+ return rust_shrink_free_page(item, list, cb_arg);
+}
diff --git a/drivers/android/binder/page_range_helper.h b/drivers/android/binder/page_range_helper.h
new file mode 100644
index 000000000000..18dd2dd117b2
--- /dev/null
+++ b/drivers/android/binder/page_range_helper.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2025 Google, Inc.
+ */
+
+#ifndef _LINUX_PAGE_RANGE_HELPER_H
+#define _LINUX_PAGE_RANGE_HELPER_H
+
+#include <linux/list_lru.h>
+
+enum lru_status
+rust_shrink_free_page_wrap(struct list_head *item, struct list_lru_one *list,
+ void *cb_arg);
+
+#endif /* _LINUX_PAGE_RANGE_HELPER_H */
diff --git a/drivers/android/binder/process.rs b/drivers/android/binder/process.rs
new file mode 100644
index 000000000000..f13a747e784c
--- /dev/null
+++ b/drivers/android/binder/process.rs
@@ -0,0 +1,1696 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+//! This module defines the `Process` type, which represents a process using a particular binder
+//! context.
+//!
+//! The `Process` object keeps track of all of the resources that this process owns in the binder
+//! context.
+//!
+//! There is one `Process` object for each binder fd that a process has opened, so processes using
+//! several binder contexts have several `Process` objects. This ensures that the contexts are
+//! fully separated.
+
+use core::mem::take;
+
+use kernel::{
+ bindings,
+ cred::Credential,
+ error::Error,
+ fs::file::{self, File},
+ list::{List, ListArc, ListArcField, ListLinks},
+ mm,
+ prelude::*,
+ rbtree::{self, RBTree, RBTreeNode, RBTreeNodeReservation},
+ seq_file::SeqFile,
+ seq_print,
+ sync::poll::PollTable,
+ sync::{
+ lock::{spinlock::SpinLockBackend, Guard},
+ Arc, ArcBorrow, CondVar, CondVarTimeoutResult, Mutex, SpinLock, UniqueArc,
+ },
+ task::Task,
+ types::ARef,
+ uaccess::{UserSlice, UserSliceReader},
+ uapi,
+ workqueue::{self, Work},
+};
+
+use crate::{
+ allocation::{Allocation, AllocationInfo, NewAllocation},
+ context::Context,
+ defs::*,
+ error::{BinderError, BinderResult},
+ node::{CouldNotDeliverCriticalIncrement, CritIncrWrapper, Node, NodeDeath, NodeRef},
+ page_range::ShrinkablePageRange,
+ range_alloc::{RangeAllocator, ReserveNew, ReserveNewArgs},
+ stats::BinderStats,
+ thread::{PushWorkRes, Thread},
+ BinderfsProcFile, DArc, DLArc, DTRWrap, DeliverToRead,
+};
+
+#[path = "freeze.rs"]
+mod freeze;
+use self::freeze::{FreezeCookie, FreezeListener};
+
+struct Mapping {
+ address: usize,
+ alloc: RangeAllocator<AllocationInfo>,
+}
+
+impl Mapping {
+ fn new(address: usize, size: usize) -> Self {
+ Self {
+ address,
+ alloc: RangeAllocator::new(size),
+ }
+ }
+}
+
+// bitflags for defer_work.
+const PROC_DEFER_FLUSH: u8 = 1;
+const PROC_DEFER_RELEASE: u8 = 2;
+
+/// The fields of `Process` protected by the spinlock.
+pub(crate) struct ProcessInner {
+ is_manager: bool,
+ pub(crate) is_dead: bool,
+ threads: RBTree<i32, Arc<Thread>>,
+ /// INVARIANT: Threads pushed to this list must be owned by this process.
+ ready_threads: List<Thread>,
+ nodes: RBTree<u64, DArc<Node>>,
+ mapping: Option<Mapping>,
+ work: List<DTRWrap<dyn DeliverToRead>>,
+ delivered_deaths: List<DTRWrap<NodeDeath>, 2>,
+
+ /// The number of requested threads that haven't registered yet.
+ requested_thread_count: u32,
+ /// The maximum number of threads used by the process thread pool.
+ max_threads: u32,
+ /// The number of threads the started and registered with the thread pool.
+ started_thread_count: u32,
+
+ /// Bitmap of deferred work to do.
+ defer_work: u8,
+
+ /// Number of transactions to be transmitted before processes in freeze_wait
+ /// are woken up.
+ outstanding_txns: u32,
+ /// Process is frozen and unable to service binder transactions.
+ pub(crate) is_frozen: bool,
+ /// Process received sync transactions since last frozen.
+ pub(crate) sync_recv: bool,
+ /// Process received async transactions since last frozen.
+ pub(crate) async_recv: bool,
+ pub(crate) binderfs_file: Option<BinderfsProcFile>,
+ /// Check for oneway spam
+ oneway_spam_detection_enabled: bool,
+}
+
+impl ProcessInner {
+ fn new() -> Self {
+ Self {
+ is_manager: false,
+ is_dead: false,
+ threads: RBTree::new(),
+ ready_threads: List::new(),
+ mapping: None,
+ nodes: RBTree::new(),
+ work: List::new(),
+ delivered_deaths: List::new(),
+ requested_thread_count: 0,
+ max_threads: 0,
+ started_thread_count: 0,
+ defer_work: 0,
+ outstanding_txns: 0,
+ is_frozen: false,
+ sync_recv: false,
+ async_recv: false,
+ binderfs_file: None,
+ oneway_spam_detection_enabled: false,
+ }
+ }
+
+ /// Schedule the work item for execution on this process.
+ ///
+ /// If any threads are ready for work, then the work item is given directly to that thread and
+ /// it is woken up. Otherwise, it is pushed to the process work list.
+ ///
+ /// This call can fail only if the process is dead. In this case, the work item is returned to
+ /// the caller so that the caller can drop it after releasing the inner process lock. This is
+ /// necessary since the destructor of `Transaction` will take locks that can't necessarily be
+ /// taken while holding the inner process lock.
+ pub(crate) fn push_work(
+ &mut self,
+ work: DLArc<dyn DeliverToRead>,
+ ) -> Result<(), (BinderError, DLArc<dyn DeliverToRead>)> {
+ // Try to find a ready thread to which to push the work.
+ if let Some(thread) = self.ready_threads.pop_front() {
+ // Push to thread while holding state lock. This prevents the thread from giving up
+ // (for example, because of a signal) when we're about to deliver work.
+ match thread.push_work(work) {
+ PushWorkRes::Ok => Ok(()),
+ PushWorkRes::FailedDead(work) => Err((BinderError::new_dead(), work)),
+ }
+ } else if self.is_dead {
+ Err((BinderError::new_dead(), work))
+ } else {
+ let sync = work.should_sync_wakeup();
+
+ // Didn't find a thread waiting for proc work; this can happen
+ // in two scenarios:
+ // 1. All threads are busy handling transactions
+ // In that case, one of those threads should call back into
+ // the kernel driver soon and pick up this work.
+ // 2. Threads are using the (e)poll interface, in which case
+ // they may be blocked on the waitqueue without having been
+ // added to waiting_threads. For this case, we just iterate
+ // over all threads not handling transaction work, and
+ // wake them all up. We wake all because we don't know whether
+ // a thread that called into (e)poll is handling non-binder
+ // work currently.
+ self.work.push_back(work);
+
+ // Wake up polling threads, if any.
+ for thread in self.threads.values() {
+ thread.notify_if_poll_ready(sync);
+ }
+
+ Ok(())
+ }
+ }
+
+ pub(crate) fn remove_node(&mut self, ptr: u64) {
+ self.nodes.remove(&ptr);
+ }
+
+ /// Updates the reference count on the given node.
+ pub(crate) fn update_node_refcount(
+ &mut self,
+ node: &DArc<Node>,
+ inc: bool,
+ strong: bool,
+ count: usize,
+ othread: Option<&Thread>,
+ ) {
+ let push = node.update_refcount_locked(inc, strong, count, self);
+
+ // If we decided that we need to push work, push either to the process or to a thread if
+ // one is specified.
+ if let Some(node) = push {
+ if let Some(thread) = othread {
+ thread.push_work_deferred(node);
+ } else {
+ let _ = self.push_work(node);
+ // Nothing to do: `push_work` may fail if the process is dead, but that's ok as in
+ // that case, it doesn't care about the notification.
+ }
+ }
+ }
+
+ pub(crate) fn new_node_ref(
+ &mut self,
+ node: DArc<Node>,
+ strong: bool,
+ thread: Option<&Thread>,
+ ) -> NodeRef {
+ self.update_node_refcount(&node, true, strong, 1, thread);
+ let strong_count = if strong { 1 } else { 0 };
+ NodeRef::new(node, strong_count, 1 - strong_count)
+ }
+
+ pub(crate) fn new_node_ref_with_thread(
+ &mut self,
+ node: DArc<Node>,
+ strong: bool,
+ thread: &Thread,
+ wrapper: Option<CritIncrWrapper>,
+ ) -> Result<NodeRef, CouldNotDeliverCriticalIncrement> {
+ let push = match wrapper {
+ None => node
+ .incr_refcount_allow_zero2one(strong, self)?
+ .map(|node| node as _),
+ Some(wrapper) => node.incr_refcount_allow_zero2one_with_wrapper(strong, wrapper, self),
+ };
+ if let Some(node) = push {
+ thread.push_work_deferred(node);
+ }
+ let strong_count = if strong { 1 } else { 0 };
+ Ok(NodeRef::new(node, strong_count, 1 - strong_count))
+ }
+
+ /// Returns an existing node with the given pointer and cookie, if one exists.
+ ///
+ /// Returns an error if a node with the given pointer but a different cookie exists.
+ fn get_existing_node(&self, ptr: u64, cookie: u64) -> Result<Option<DArc<Node>>> {
+ match self.nodes.get(&ptr) {
+ None => Ok(None),
+ Some(node) => {
+ let (_, node_cookie) = node.get_id();
+ if node_cookie == cookie {
+ Ok(Some(node.clone()))
+ } else {
+ Err(EINVAL)
+ }
+ }
+ }
+ }
+
+ fn register_thread(&mut self) -> bool {
+ if self.requested_thread_count == 0 {
+ return false;
+ }
+
+ self.requested_thread_count -= 1;
+ self.started_thread_count += 1;
+ true
+ }
+
+ /// Finds a delivered death notification with the given cookie, removes it from the thread's
+ /// delivered list, and returns it.
+ fn pull_delivered_death(&mut self, cookie: u64) -> Option<DArc<NodeDeath>> {
+ let mut cursor = self.delivered_deaths.cursor_front();
+ while let Some(next) = cursor.peek_next() {
+ if next.cookie == cookie {
+ return Some(next.remove().into_arc());
+ }
+ cursor.move_next();
+ }
+ None
+ }
+
+ pub(crate) fn death_delivered(&mut self, death: DArc<NodeDeath>) {
+ if let Some(death) = ListArc::try_from_arc_or_drop(death) {
+ self.delivered_deaths.push_back(death);
+ } else {
+ pr_warn!("Notification added to `delivered_deaths` twice.");
+ }
+ }
+
+ pub(crate) fn add_outstanding_txn(&mut self) {
+ self.outstanding_txns += 1;
+ }
+
+ fn txns_pending_locked(&self) -> bool {
+ if self.outstanding_txns > 0 {
+ return true;
+ }
+ for thread in self.threads.values() {
+ if thread.has_current_transaction() {
+ return true;
+ }
+ }
+ false
+ }
+}
+
+/// Used to keep track of a node that this process has a handle to.
+#[pin_data]
+pub(crate) struct NodeRefInfo {
+ debug_id: usize,
+ /// The refcount that this process owns to the node.
+ node_ref: ListArcField<NodeRef, { Self::LIST_PROC }>,
+ death: ListArcField<Option<DArc<NodeDeath>>, { Self::LIST_PROC }>,
+ /// Cookie of the active freeze listener for this node.
+ freeze: ListArcField<Option<FreezeCookie>, { Self::LIST_PROC }>,
+ /// Used to store this `NodeRefInfo` in the node's `refs` list.
+ #[pin]
+ links: ListLinks<{ Self::LIST_NODE }>,
+ /// The handle for this `NodeRefInfo`.
+ handle: u32,
+ /// The process that has a handle to the node.
+ pub(crate) process: Arc<Process>,
+}
+
+impl NodeRefInfo {
+ /// The id used for the `Node::refs` list.
+ pub(crate) const LIST_NODE: u64 = 0x2da16350fb724a10;
+ /// The id used for the `ListArc` in `ProcessNodeRefs`.
+ const LIST_PROC: u64 = 0xd703a5263dcc8650;
+
+ fn new(node_ref: NodeRef, handle: u32, process: Arc<Process>) -> impl PinInit<Self> {
+ pin_init!(Self {
+ debug_id: super::next_debug_id(),
+ node_ref: ListArcField::new(node_ref),
+ death: ListArcField::new(None),
+ freeze: ListArcField::new(None),
+ links <- ListLinks::new(),
+ handle,
+ process,
+ })
+ }
+
+ kernel::list::define_list_arc_field_getter! {
+ pub(crate) fn death(&mut self<{Self::LIST_PROC}>) -> &mut Option<DArc<NodeDeath>> { death }
+ pub(crate) fn freeze(&mut self<{Self::LIST_PROC}>) -> &mut Option<FreezeCookie> { freeze }
+ pub(crate) fn node_ref(&mut self<{Self::LIST_PROC}>) -> &mut NodeRef { node_ref }
+ pub(crate) fn node_ref2(&self<{Self::LIST_PROC}>) -> &NodeRef { node_ref }
+ }
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<{Self::LIST_NODE}> for NodeRefInfo { untracked; }
+ impl ListArcSafe<{Self::LIST_PROC}> for NodeRefInfo { untracked; }
+}
+kernel::list::impl_list_item! {
+ impl ListItem<{Self::LIST_NODE}> for NodeRefInfo {
+ using ListLinks { self.links };
+ }
+}
+
+/// Keeps track of references this process has to nodes owned by other processes.
+///
+/// TODO: Currently, the rbtree requires two allocations per node reference, and two tree
+/// traversals to look up a node by `Node::global_id`. Once the rbtree is more powerful, these
+/// extra costs should be eliminated.
+struct ProcessNodeRefs {
+ /// Used to look up nodes using the 32-bit id that this process knows it by.
+ by_handle: RBTree<u32, ListArc<NodeRefInfo, { NodeRefInfo::LIST_PROC }>>,
+ /// Used to look up nodes without knowing their local 32-bit id. The usize is the address of
+ /// the underlying `Node` struct as returned by `Node::global_id`.
+ by_node: RBTree<usize, u32>,
+ /// Used to look up a `FreezeListener` by cookie.
+ ///
+ /// There might be multiple freeze listeners for the same node, but at most one of them is
+ /// active.
+ freeze_listeners: RBTree<FreezeCookie, FreezeListener>,
+}
+
+impl ProcessNodeRefs {
+ fn new() -> Self {
+ Self {
+ by_handle: RBTree::new(),
+ by_node: RBTree::new(),
+ freeze_listeners: RBTree::new(),
+ }
+ }
+}
+
+/// A process using binder.
+///
+/// Strictly speaking, there can be multiple of these per process. There is one for each binder fd
+/// that a process has opened, so processes using several binder contexts have several `Process`
+/// objects. This ensures that the contexts are fully separated.
+#[pin_data]
+pub(crate) struct Process {
+ pub(crate) ctx: Arc<Context>,
+
+ // The task leader (process).
+ pub(crate) task: ARef<Task>,
+
+ // Credential associated with file when `Process` is created.
+ pub(crate) cred: ARef<Credential>,
+
+ #[pin]
+ pub(crate) inner: SpinLock<ProcessInner>,
+
+ #[pin]
+ pub(crate) pages: ShrinkablePageRange,
+
+ // Waitqueue of processes waiting for all outstanding transactions to be
+ // processed.
+ #[pin]
+ freeze_wait: CondVar,
+
+ // Node references are in a different lock to avoid recursive acquisition when
+ // incrementing/decrementing a node in another process.
+ #[pin]
+ node_refs: Mutex<ProcessNodeRefs>,
+
+ // Work node for deferred work item.
+ #[pin]
+ defer_work: Work<Process>,
+
+ // Links for process list in Context.
+ #[pin]
+ links: ListLinks,
+
+ pub(crate) stats: BinderStats,
+}
+
+kernel::impl_has_work! {
+ impl HasWork<Process> for Process { self.defer_work }
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<0> for Process { untracked; }
+}
+kernel::list::impl_list_item! {
+ impl ListItem<0> for Process {
+ using ListLinks { self.links };
+ }
+}
+
+impl workqueue::WorkItem for Process {
+ type Pointer = Arc<Process>;
+
+ fn run(me: Arc<Self>) {
+ let defer;
+ {
+ let mut inner = me.inner.lock();
+ defer = inner.defer_work;
+ inner.defer_work = 0;
+ }
+
+ if defer & PROC_DEFER_FLUSH != 0 {
+ me.deferred_flush();
+ }
+ if defer & PROC_DEFER_RELEASE != 0 {
+ me.deferred_release();
+ }
+ }
+}
+
+impl Process {
+ fn new(ctx: Arc<Context>, cred: ARef<Credential>) -> Result<Arc<Self>> {
+ let current = kernel::current!();
+ let list_process = ListArc::pin_init::<Error>(
+ try_pin_init!(Process {
+ ctx,
+ cred,
+ inner <- kernel::new_spinlock!(ProcessInner::new(), "Process::inner"),
+ pages <- ShrinkablePageRange::new(&super::BINDER_SHRINKER),
+ node_refs <- kernel::new_mutex!(ProcessNodeRefs::new(), "Process::node_refs"),
+ freeze_wait <- kernel::new_condvar!("Process::freeze_wait"),
+ task: current.group_leader().into(),
+ defer_work <- kernel::new_work!("Process::defer_work"),
+ links <- ListLinks::new(),
+ stats: BinderStats::new(),
+ }),
+ GFP_KERNEL,
+ )?;
+
+ let process = list_process.clone_arc();
+ process.ctx.register_process(list_process);
+
+ Ok(process)
+ }
+
+ pub(crate) fn pid_in_current_ns(&self) -> kernel::task::Pid {
+ self.task.tgid_nr_ns(None)
+ }
+
+ #[inline(never)]
+ pub(crate) fn debug_print_stats(&self, m: &SeqFile, ctx: &Context) -> Result<()> {
+ seq_print!(m, "proc {}\n", self.pid_in_current_ns());
+ seq_print!(m, "context {}\n", &*ctx.name);
+
+ let inner = self.inner.lock();
+ seq_print!(m, " threads: {}\n", inner.threads.iter().count());
+ seq_print!(
+ m,
+ " requested threads: {}+{}/{}\n",
+ inner.requested_thread_count,
+ inner.started_thread_count,
+ inner.max_threads,
+ );
+ if let Some(mapping) = &inner.mapping {
+ seq_print!(
+ m,
+ " free oneway space: {}\n",
+ mapping.alloc.free_oneway_space()
+ );
+ seq_print!(m, " buffers: {}\n", mapping.alloc.count_buffers());
+ }
+ seq_print!(
+ m,
+ " outstanding transactions: {}\n",
+ inner.outstanding_txns
+ );
+ seq_print!(m, " nodes: {}\n", inner.nodes.iter().count());
+ drop(inner);
+
+ {
+ let mut refs = self.node_refs.lock();
+ let (mut count, mut weak, mut strong) = (0, 0, 0);
+ for r in refs.by_handle.values_mut() {
+ let node_ref = r.node_ref();
+ let (nstrong, nweak) = node_ref.get_count();
+ count += 1;
+ weak += nweak;
+ strong += nstrong;
+ }
+ seq_print!(m, " refs: {count} s {strong} w {weak}\n");
+ }
+
+ self.stats.debug_print(" ", m);
+
+ Ok(())
+ }
+
+ #[inline(never)]
+ pub(crate) fn debug_print(&self, m: &SeqFile, ctx: &Context, print_all: bool) -> Result<()> {
+ seq_print!(m, "proc {}\n", self.pid_in_current_ns());
+ seq_print!(m, "context {}\n", &*ctx.name);
+
+ let mut all_threads = KVec::new();
+ let mut all_nodes = KVec::new();
+ loop {
+ let inner = self.inner.lock();
+ let num_threads = inner.threads.iter().count();
+ let num_nodes = inner.nodes.iter().count();
+
+ if all_threads.capacity() < num_threads || all_nodes.capacity() < num_nodes {
+ drop(inner);
+ all_threads.reserve(num_threads, GFP_KERNEL)?;
+ all_nodes.reserve(num_nodes, GFP_KERNEL)?;
+ continue;
+ }
+
+ for thread in inner.threads.values() {
+ assert!(all_threads.len() < all_threads.capacity());
+ let _ = all_threads.push(thread.clone(), GFP_ATOMIC);
+ }
+
+ for node in inner.nodes.values() {
+ assert!(all_nodes.len() < all_nodes.capacity());
+ let _ = all_nodes.push(node.clone(), GFP_ATOMIC);
+ }
+
+ break;
+ }
+
+ for thread in all_threads {
+ thread.debug_print(m, print_all)?;
+ }
+
+ let mut inner = self.inner.lock();
+ for node in all_nodes {
+ if print_all || node.has_oneway_transaction(&mut inner) {
+ node.full_debug_print(m, &mut inner)?;
+ }
+ }
+ drop(inner);
+
+ if print_all {
+ let mut refs = self.node_refs.lock();
+ for r in refs.by_handle.values_mut() {
+ let node_ref = r.node_ref();
+ let dead = node_ref.node.owner.inner.lock().is_dead;
+ let (strong, weak) = node_ref.get_count();
+ let debug_id = node_ref.node.debug_id;
+
+ seq_print!(
+ m,
+ " ref {}: desc {} {}node {debug_id} s {strong} w {weak}",
+ r.debug_id,
+ r.handle,
+ if dead { "dead " } else { "" },
+ );
+ }
+ }
+
+ let inner = self.inner.lock();
+ for work in &inner.work {
+ work.debug_print(m, " ", " pending transaction ")?;
+ }
+ for _death in &inner.delivered_deaths {
+ seq_print!(m, " has delivered dead binder\n");
+ }
+ if let Some(mapping) = &inner.mapping {
+ mapping.alloc.debug_print(m)?;
+ }
+ drop(inner);
+
+ Ok(())
+ }
+
+ /// Attempts to fetch a work item from the process queue.
+ pub(crate) fn get_work(&self) -> Option<DLArc<dyn DeliverToRead>> {
+ self.inner.lock().work.pop_front()
+ }
+
+ /// Attempts to fetch a work item from the process queue. If none is available, it registers the
+ /// given thread as ready to receive work directly.
+ ///
+ /// This must only be called when the thread is not participating in a transaction chain; when
+ /// it is, work will always be delivered directly to the thread (and not through the process
+ /// queue).
+ pub(crate) fn get_work_or_register<'a>(
+ &'a self,
+ thread: &'a Arc<Thread>,
+ ) -> GetWorkOrRegister<'a> {
+ let mut inner = self.inner.lock();
+ // Try to get work from the process queue.
+ if let Some(work) = inner.work.pop_front() {
+ return GetWorkOrRegister::Work(work);
+ }
+
+ // Register the thread as ready.
+ GetWorkOrRegister::Register(Registration::new(thread, &mut inner))
+ }
+
+ fn get_current_thread(self: ArcBorrow<'_, Self>) -> Result<Arc<Thread>> {
+ let id = {
+ let current = kernel::current!();
+ if !core::ptr::eq(current.group_leader(), &*self.task) {
+ pr_err!("get_current_thread was called from the wrong process.");
+ return Err(EINVAL);
+ }
+ current.pid()
+ };
+
+ {
+ let inner = self.inner.lock();
+ if let Some(thread) = inner.threads.get(&id) {
+ return Ok(thread.clone());
+ }
+ }
+
+ // Allocate a new `Thread` without holding any locks.
+ let reservation = RBTreeNodeReservation::new(GFP_KERNEL)?;
+ let ta: Arc<Thread> = Thread::new(id, self.into())?;
+
+ let mut inner = self.inner.lock();
+ match inner.threads.entry(id) {
+ rbtree::Entry::Vacant(entry) => {
+ entry.insert(ta.clone(), reservation);
+ Ok(ta)
+ }
+ rbtree::Entry::Occupied(_entry) => {
+ pr_err!("Cannot create two threads with the same id.");
+ Err(EINVAL)
+ }
+ }
+ }
+
+ pub(crate) fn push_work(&self, work: DLArc<dyn DeliverToRead>) -> BinderResult {
+ // If push_work fails, drop the work item outside the lock.
+ let res = self.inner.lock().push_work(work);
+ match res {
+ Ok(()) => Ok(()),
+ Err((err, work)) => {
+ drop(work);
+ Err(err)
+ }
+ }
+ }
+
+ fn set_as_manager(
+ self: ArcBorrow<'_, Self>,
+ info: Option<FlatBinderObject>,
+ thread: &Thread,
+ ) -> Result {
+ let (ptr, cookie, flags) = if let Some(obj) = info {
+ (
+ // SAFETY: The object type for this ioctl is implicitly `BINDER_TYPE_BINDER`, so it
+ // is safe to access the `binder` field.
+ unsafe { obj.__bindgen_anon_1.binder },
+ obj.cookie,
+ obj.flags,
+ )
+ } else {
+ (0, 0, 0)
+ };
+ let node_ref = self.get_node(ptr, cookie, flags as _, true, thread)?;
+ let node = node_ref.node.clone();
+ self.ctx.set_manager_node(node_ref)?;
+ self.inner.lock().is_manager = true;
+
+ // Force the state of the node to prevent the delivery of acquire/increfs.
+ let mut owner_inner = node.owner.inner.lock();
+ node.force_has_count(&mut owner_inner);
+ Ok(())
+ }
+
+ fn get_node_inner(
+ self: ArcBorrow<'_, Self>,
+ ptr: u64,
+ cookie: u64,
+ flags: u32,
+ strong: bool,
+ thread: &Thread,
+ wrapper: Option<CritIncrWrapper>,
+ ) -> Result<Result<NodeRef, CouldNotDeliverCriticalIncrement>> {
+ // Try to find an existing node.
+ {
+ let mut inner = self.inner.lock();
+ if let Some(node) = inner.get_existing_node(ptr, cookie)? {
+ return Ok(inner.new_node_ref_with_thread(node, strong, thread, wrapper));
+ }
+ }
+
+ // Allocate the node before reacquiring the lock.
+ let node = DTRWrap::arc_pin_init(Node::new(ptr, cookie, flags, self.into()))?.into_arc();
+ let rbnode = RBTreeNode::new(ptr, node.clone(), GFP_KERNEL)?;
+ let mut inner = self.inner.lock();
+ if let Some(node) = inner.get_existing_node(ptr, cookie)? {
+ return Ok(inner.new_node_ref_with_thread(node, strong, thread, wrapper));
+ }
+
+ inner.nodes.insert(rbnode);
+ // This can only fail if someone has already pushed the node to a list, but we just created
+ // it and still hold the lock, so it can't fail right now.
+ let node_ref = inner
+ .new_node_ref_with_thread(node, strong, thread, wrapper)
+ .unwrap();
+
+ Ok(Ok(node_ref))
+ }
+
+ pub(crate) fn get_node(
+ self: ArcBorrow<'_, Self>,
+ ptr: u64,
+ cookie: u64,
+ flags: u32,
+ strong: bool,
+ thread: &Thread,
+ ) -> Result<NodeRef> {
+ let mut wrapper = None;
+ for _ in 0..2 {
+ match self.get_node_inner(ptr, cookie, flags, strong, thread, wrapper) {
+ Err(err) => return Err(err),
+ Ok(Ok(node_ref)) => return Ok(node_ref),
+ Ok(Err(CouldNotDeliverCriticalIncrement)) => {
+ wrapper = Some(CritIncrWrapper::new()?);
+ }
+ }
+ }
+ // We only get a `CouldNotDeliverCriticalIncrement` error if `wrapper` is `None`, so the
+ // loop should run at most twice.
+ unreachable!()
+ }
+
+ pub(crate) fn insert_or_update_handle(
+ self: ArcBorrow<'_, Process>,
+ node_ref: NodeRef,
+ is_mananger: bool,
+ ) -> Result<u32> {
+ {
+ let mut refs = self.node_refs.lock();
+
+ // Do a lookup before inserting.
+ if let Some(handle_ref) = refs.by_node.get(&node_ref.node.global_id()) {
+ let handle = *handle_ref;
+ let info = refs.by_handle.get_mut(&handle).unwrap();
+ info.node_ref().absorb(node_ref);
+ return Ok(handle);
+ }
+ }
+
+ // Reserve memory for tree nodes.
+ let reserve1 = RBTreeNodeReservation::new(GFP_KERNEL)?;
+ let reserve2 = RBTreeNodeReservation::new(GFP_KERNEL)?;
+ let info = UniqueArc::new_uninit(GFP_KERNEL)?;
+
+ let mut refs = self.node_refs.lock();
+
+ // Do a lookup again as node may have been inserted before the lock was reacquired.
+ if let Some(handle_ref) = refs.by_node.get(&node_ref.node.global_id()) {
+ let handle = *handle_ref;
+ let info = refs.by_handle.get_mut(&handle).unwrap();
+ info.node_ref().absorb(node_ref);
+ return Ok(handle);
+ }
+
+ // Find id.
+ let mut target: u32 = if is_mananger { 0 } else { 1 };
+ for handle in refs.by_handle.keys() {
+ if *handle > target {
+ break;
+ }
+ if *handle == target {
+ target = target.checked_add(1).ok_or(ENOMEM)?;
+ }
+ }
+
+ let gid = node_ref.node.global_id();
+ let (info_proc, info_node) = {
+ let info_init = NodeRefInfo::new(node_ref, target, self.into());
+ match info.pin_init_with(info_init) {
+ Ok(info) => ListArc::pair_from_pin_unique(info),
+ // error is infallible
+ Err(err) => match err {},
+ }
+ };
+
+ // Ensure the process is still alive while we insert a new reference.
+ //
+ // This releases the lock before inserting the nodes, but since `is_dead` is set as the
+ // first thing in `deferred_release`, process cleanup will not miss the items inserted into
+ // `refs` below.
+ if self.inner.lock().is_dead {
+ return Err(ESRCH);
+ }
+
+ // SAFETY: `info_proc` and `info_node` reference the same node, so we are inserting
+ // `info_node` into the right node's `refs` list.
+ unsafe { info_proc.node_ref2().node.insert_node_info(info_node) };
+
+ refs.by_node.insert(reserve1.into_node(gid, target));
+ refs.by_handle.insert(reserve2.into_node(target, info_proc));
+ Ok(target)
+ }
+
+ pub(crate) fn get_transaction_node(&self, handle: u32) -> BinderResult<NodeRef> {
+ // When handle is zero, try to get the context manager.
+ if handle == 0 {
+ Ok(self.ctx.get_manager_node(true)?)
+ } else {
+ Ok(self.get_node_from_handle(handle, true)?)
+ }
+ }
+
+ pub(crate) fn get_node_from_handle(&self, handle: u32, strong: bool) -> Result<NodeRef> {
+ self.node_refs
+ .lock()
+ .by_handle
+ .get_mut(&handle)
+ .ok_or(ENOENT)?
+ .node_ref()
+ .clone(strong)
+ }
+
+ pub(crate) fn remove_from_delivered_deaths(&self, death: &DArc<NodeDeath>) {
+ let mut inner = self.inner.lock();
+ // SAFETY: By the invariant on the `delivered_links` field, this is the right linked list.
+ let removed = unsafe { inner.delivered_deaths.remove(death) };
+ drop(inner);
+ drop(removed);
+ }
+
+ pub(crate) fn update_ref(
+ self: ArcBorrow<'_, Process>,
+ handle: u32,
+ inc: bool,
+ strong: bool,
+ ) -> Result {
+ if inc && handle == 0 {
+ if let Ok(node_ref) = self.ctx.get_manager_node(strong) {
+ if core::ptr::eq(&*self, &*node_ref.node.owner) {
+ return Err(EINVAL);
+ }
+ let _ = self.insert_or_update_handle(node_ref, true);
+ return Ok(());
+ }
+ }
+
+ // To preserve original binder behaviour, we only fail requests where the manager tries to
+ // increment references on itself.
+ let mut refs = self.node_refs.lock();
+ if let Some(info) = refs.by_handle.get_mut(&handle) {
+ if info.node_ref().update(inc, strong) {
+ // Clean up death if there is one attached to this node reference.
+ if let Some(death) = info.death().take() {
+ death.set_cleared(true);
+ self.remove_from_delivered_deaths(&death);
+ }
+
+ // Remove reference from process tables, and from the node's `refs` list.
+
+ // SAFETY: We are removing the `NodeRefInfo` from the right node.
+ unsafe { info.node_ref2().node.remove_node_info(info) };
+
+ let id = info.node_ref().node.global_id();
+ refs.by_handle.remove(&handle);
+ refs.by_node.remove(&id);
+ }
+ } else {
+ // All refs are cleared in process exit, so this warning is expected in that case.
+ if !self.inner.lock().is_dead {
+ pr_warn!("{}: no such ref {handle}\n", self.pid_in_current_ns());
+ }
+ }
+ Ok(())
+ }
+
+ /// Decrements the refcount of the given node, if one exists.
+ pub(crate) fn update_node(&self, ptr: u64, cookie: u64, strong: bool) {
+ let mut inner = self.inner.lock();
+ if let Ok(Some(node)) = inner.get_existing_node(ptr, cookie) {
+ inner.update_node_refcount(&node, false, strong, 1, None);
+ }
+ }
+
+ pub(crate) fn inc_ref_done(&self, reader: &mut UserSliceReader, strong: bool) -> Result {
+ let ptr = reader.read::<u64>()?;
+ let cookie = reader.read::<u64>()?;
+ let mut inner = self.inner.lock();
+ if let Ok(Some(node)) = inner.get_existing_node(ptr, cookie) {
+ if let Some(node) = node.inc_ref_done_locked(strong, &mut inner) {
+ // This only fails if the process is dead.
+ let _ = inner.push_work(node);
+ }
+ }
+ Ok(())
+ }
+
+ pub(crate) fn buffer_alloc(
+ self: &Arc<Self>,
+ debug_id: usize,
+ size: usize,
+ is_oneway: bool,
+ from_pid: i32,
+ ) -> BinderResult<NewAllocation> {
+ use kernel::page::PAGE_SIZE;
+
+ let mut reserve_new_args = ReserveNewArgs {
+ debug_id,
+ size,
+ is_oneway,
+ pid: from_pid,
+ ..ReserveNewArgs::default()
+ };
+
+ let (new_alloc, addr) = loop {
+ let mut inner = self.inner.lock();
+ let mapping = inner.mapping.as_mut().ok_or_else(BinderError::new_dead)?;
+ let alloc_request = match mapping.alloc.reserve_new(reserve_new_args)? {
+ ReserveNew::Success(new_alloc) => break (new_alloc, mapping.address),
+ ReserveNew::NeedAlloc(request) => request,
+ };
+ drop(inner);
+ // We need to allocate memory and then call `reserve_new` again.
+ reserve_new_args = alloc_request.make_alloc()?;
+ };
+
+ let res = Allocation::new(
+ self.clone(),
+ debug_id,
+ new_alloc.offset,
+ size,
+ addr + new_alloc.offset,
+ new_alloc.oneway_spam_detected,
+ );
+
+ // This allocation will be marked as in use until the `Allocation` is used to free it.
+ //
+ // This method can't be called while holding a lock, so we release the lock first. It's
+ // okay for several threads to use the method on the same index at the same time. In that
+ // case, one of the calls will allocate the given page (if missing), and the other call
+ // will wait for the other call to finish allocating the page.
+ //
+ // We will not call `stop_using_range` in parallel with this on the same page, because the
+ // allocation can only be removed via the destructor of the `Allocation` object that we
+ // currently own.
+ match self.pages.use_range(
+ new_alloc.offset / PAGE_SIZE,
+ (new_alloc.offset + size).div_ceil(PAGE_SIZE),
+ ) {
+ Ok(()) => {}
+ Err(err) => {
+ pr_warn!("use_range failure {:?}", err);
+ return Err(err.into());
+ }
+ }
+
+ Ok(NewAllocation(res))
+ }
+
+ pub(crate) fn buffer_get(self: &Arc<Self>, ptr: usize) -> Option<Allocation> {
+ let mut inner = self.inner.lock();
+ let mapping = inner.mapping.as_mut()?;
+ let offset = ptr.checked_sub(mapping.address)?;
+ let (size, debug_id, odata) = mapping.alloc.reserve_existing(offset).ok()?;
+ let mut alloc = Allocation::new(self.clone(), debug_id, offset, size, ptr, false);
+ if let Some(data) = odata {
+ alloc.set_info(data);
+ }
+ Some(alloc)
+ }
+
+ pub(crate) fn buffer_raw_free(&self, ptr: usize) {
+ let mut inner = self.inner.lock();
+ if let Some(ref mut mapping) = &mut inner.mapping {
+ let offset = match ptr.checked_sub(mapping.address) {
+ Some(offset) => offset,
+ None => return,
+ };
+
+ let freed_range = match mapping.alloc.reservation_abort(offset) {
+ Ok(freed_range) => freed_range,
+ Err(_) => {
+ pr_warn!(
+ "Pointer {:x} failed to free, base = {:x}\n",
+ ptr,
+ mapping.address
+ );
+ return;
+ }
+ };
+
+ // No more allocations in this range. Mark them as not in use.
+ //
+ // Must be done before we release the lock so that `use_range` is not used on these
+ // indices until `stop_using_range` returns.
+ self.pages
+ .stop_using_range(freed_range.start_page_idx, freed_range.end_page_idx);
+ }
+ }
+
+ pub(crate) fn buffer_make_freeable(&self, offset: usize, mut data: Option<AllocationInfo>) {
+ let mut inner = self.inner.lock();
+ if let Some(ref mut mapping) = &mut inner.mapping {
+ if mapping.alloc.reservation_commit(offset, &mut data).is_err() {
+ pr_warn!("Offset {} failed to be marked freeable\n", offset);
+ }
+ }
+ }
+
+ fn create_mapping(&self, vma: &mm::virt::VmaNew) -> Result {
+ use kernel::page::PAGE_SIZE;
+ let size = usize::min(vma.end() - vma.start(), bindings::SZ_4M as usize);
+ let mapping = Mapping::new(vma.start(), size);
+ let page_count = self.pages.register_with_vma(vma)?;
+ if page_count * PAGE_SIZE != size {
+ return Err(EINVAL);
+ }
+
+ // Save range allocator for later.
+ self.inner.lock().mapping = Some(mapping);
+
+ Ok(())
+ }
+
+ fn version(&self, data: UserSlice) -> Result {
+ data.writer().write(&BinderVersion::current())
+ }
+
+ pub(crate) fn register_thread(&self) -> bool {
+ self.inner.lock().register_thread()
+ }
+
+ fn remove_thread(&self, thread: Arc<Thread>) {
+ self.inner.lock().threads.remove(&thread.id);
+ thread.release();
+ }
+
+ fn set_max_threads(&self, max: u32) {
+ self.inner.lock().max_threads = max;
+ }
+
+ fn set_oneway_spam_detection_enabled(&self, enabled: u32) {
+ self.inner.lock().oneway_spam_detection_enabled = enabled != 0;
+ }
+
+ pub(crate) fn is_oneway_spam_detection_enabled(&self) -> bool {
+ self.inner.lock().oneway_spam_detection_enabled
+ }
+
+ fn get_node_debug_info(&self, data: UserSlice) -> Result {
+ let (mut reader, mut writer) = data.reader_writer();
+
+ // Read the starting point.
+ let ptr = reader.read::<BinderNodeDebugInfo>()?.ptr;
+ let mut out = BinderNodeDebugInfo::default();
+
+ {
+ let inner = self.inner.lock();
+ for (node_ptr, node) in &inner.nodes {
+ if *node_ptr > ptr {
+ node.populate_debug_info(&mut out, &inner);
+ break;
+ }
+ }
+ }
+
+ writer.write(&out)
+ }
+
+ fn get_node_info_from_ref(&self, data: UserSlice) -> Result {
+ let (mut reader, mut writer) = data.reader_writer();
+ let mut out = reader.read::<BinderNodeInfoForRef>()?;
+
+ if out.strong_count != 0
+ || out.weak_count != 0
+ || out.reserved1 != 0
+ || out.reserved2 != 0
+ || out.reserved3 != 0
+ {
+ return Err(EINVAL);
+ }
+
+ // Only the context manager is allowed to use this ioctl.
+ if !self.inner.lock().is_manager {
+ return Err(EPERM);
+ }
+
+ {
+ let mut node_refs = self.node_refs.lock();
+ let node_info = node_refs.by_handle.get_mut(&out.handle).ok_or(ENOENT)?;
+ let node_ref = node_info.node_ref();
+ let owner_inner = node_ref.node.owner.inner.lock();
+ node_ref.node.populate_counts(&mut out, &owner_inner);
+ }
+
+ // Write the result back.
+ writer.write(&out)
+ }
+
+ pub(crate) fn needs_thread(&self) -> bool {
+ let mut inner = self.inner.lock();
+ let ret = inner.requested_thread_count == 0
+ && inner.ready_threads.is_empty()
+ && inner.started_thread_count < inner.max_threads;
+ if ret {
+ inner.requested_thread_count += 1
+ }
+ ret
+ }
+
+ pub(crate) fn request_death(
+ self: &Arc<Self>,
+ reader: &mut UserSliceReader,
+ thread: &Thread,
+ ) -> Result {
+ let handle: u32 = reader.read()?;
+ let cookie: u64 = reader.read()?;
+
+ // Queue BR_ERROR if we can't allocate memory for the death notification.
+ let death = UniqueArc::new_uninit(GFP_KERNEL).inspect_err(|_| {
+ thread.push_return_work(BR_ERROR);
+ })?;
+ let mut refs = self.node_refs.lock();
+ let Some(info) = refs.by_handle.get_mut(&handle) else {
+ pr_warn!("BC_REQUEST_DEATH_NOTIFICATION invalid ref {handle}\n");
+ return Ok(());
+ };
+
+ // Nothing to do if there is already a death notification request for this handle.
+ if info.death().is_some() {
+ pr_warn!("BC_REQUEST_DEATH_NOTIFICATION death notification already set\n");
+ return Ok(());
+ }
+
+ let death = {
+ let death_init = NodeDeath::new(info.node_ref().node.clone(), self.clone(), cookie);
+ match death.pin_init_with(death_init) {
+ Ok(death) => death,
+ // error is infallible
+ Err(err) => match err {},
+ }
+ };
+
+ // Register the death notification.
+ {
+ let owner = info.node_ref2().node.owner.clone();
+ let mut owner_inner = owner.inner.lock();
+ if owner_inner.is_dead {
+ let death = Arc::from(death);
+ *info.death() = Some(death.clone());
+ drop(owner_inner);
+ death.set_dead();
+ } else {
+ let death = ListArc::from(death);
+ *info.death() = Some(death.clone_arc());
+ info.node_ref().node.add_death(death, &mut owner_inner);
+ }
+ }
+ Ok(())
+ }
+
+ pub(crate) fn clear_death(&self, reader: &mut UserSliceReader, thread: &Thread) -> Result {
+ let handle: u32 = reader.read()?;
+ let cookie: u64 = reader.read()?;
+
+ let mut refs = self.node_refs.lock();
+ let Some(info) = refs.by_handle.get_mut(&handle) else {
+ pr_warn!("BC_CLEAR_DEATH_NOTIFICATION invalid ref {handle}\n");
+ return Ok(());
+ };
+
+ let Some(death) = info.death().take() else {
+ pr_warn!("BC_CLEAR_DEATH_NOTIFICATION death notification not active\n");
+ return Ok(());
+ };
+ if death.cookie != cookie {
+ *info.death() = Some(death);
+ pr_warn!("BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch\n");
+ return Ok(());
+ }
+
+ // Update state and determine if we need to queue a work item. We only need to do it when
+ // the node is not dead or if the user already completed the death notification.
+ if death.set_cleared(false) {
+ if let Some(death) = ListArc::try_from_arc_or_drop(death) {
+ let _ = thread.push_work_if_looper(death);
+ }
+ }
+
+ Ok(())
+ }
+
+ pub(crate) fn dead_binder_done(&self, cookie: u64, thread: &Thread) {
+ if let Some(death) = self.inner.lock().pull_delivered_death(cookie) {
+ death.set_notification_done(thread);
+ }
+ }
+
+ /// Locks the spinlock and move the `nodes` rbtree out.
+ ///
+ /// This allows you to iterate through `nodes` while also allowing you to give other parts of
+ /// the codebase exclusive access to `ProcessInner`.
+ pub(crate) fn lock_with_nodes(&self) -> WithNodes<'_> {
+ let mut inner = self.inner.lock();
+ WithNodes {
+ nodes: take(&mut inner.nodes),
+ inner,
+ }
+ }
+
+ fn deferred_flush(&self) {
+ let inner = self.inner.lock();
+ for thread in inner.threads.values() {
+ thread.exit_looper();
+ }
+ }
+
+ fn deferred_release(self: Arc<Self>) {
+ let is_manager = {
+ let mut inner = self.inner.lock();
+ inner.is_dead = true;
+ inner.is_frozen = false;
+ inner.sync_recv = false;
+ inner.async_recv = false;
+ inner.is_manager
+ };
+
+ if is_manager {
+ self.ctx.unset_manager_node();
+ }
+
+ self.ctx.deregister_process(&self);
+
+ let binderfs_file = self.inner.lock().binderfs_file.take();
+ drop(binderfs_file);
+
+ // Release threads.
+ let threads = {
+ let mut inner = self.inner.lock();
+ let threads = take(&mut inner.threads);
+ let ready = take(&mut inner.ready_threads);
+ drop(inner);
+ drop(ready);
+
+ for thread in threads.values() {
+ thread.release();
+ }
+ threads
+ };
+
+ // Release nodes.
+ {
+ while let Some(node) = {
+ let mut lock = self.inner.lock();
+ lock.nodes.cursor_front().map(|c| c.remove_current().1)
+ } {
+ node.to_key_value().1.release();
+ }
+ }
+
+ // Clean up death listeners and remove nodes from external node info lists.
+ for info in self.node_refs.lock().by_handle.values_mut() {
+ // SAFETY: We are removing the `NodeRefInfo` from the right node.
+ unsafe { info.node_ref2().node.remove_node_info(info) };
+
+ // Remove all death notifications from the nodes (that belong to a different process).
+ let death = if let Some(existing) = info.death().take() {
+ existing
+ } else {
+ continue;
+ };
+ death.set_cleared(false);
+ }
+
+ // Clean up freeze listeners.
+ let freeze_listeners = take(&mut self.node_refs.lock().freeze_listeners);
+ for listener in freeze_listeners.values() {
+ listener.on_process_exit(&self);
+ }
+ drop(freeze_listeners);
+
+ // Release refs on foreign nodes.
+ {
+ let mut refs = self.node_refs.lock();
+ let by_handle = take(&mut refs.by_handle);
+ let by_node = take(&mut refs.by_node);
+ drop(refs);
+ drop(by_node);
+ drop(by_handle);
+ }
+
+ // Cancel all pending work items.
+ while let Some(work) = self.get_work() {
+ work.into_arc().cancel();
+ }
+
+ let delivered_deaths = take(&mut self.inner.lock().delivered_deaths);
+ drop(delivered_deaths);
+
+ // Free any resources kept alive by allocated buffers.
+ let omapping = self.inner.lock().mapping.take();
+ if let Some(mut mapping) = omapping {
+ let address = mapping.address;
+ mapping
+ .alloc
+ .take_for_each(|offset, size, debug_id, odata| {
+ let ptr = offset + address;
+ pr_warn!(
+ "{}: removing orphan mapping {offset}:{size}\n",
+ self.pid_in_current_ns()
+ );
+ let mut alloc =
+ Allocation::new(self.clone(), debug_id, offset, size, ptr, false);
+ if let Some(data) = odata {
+ alloc.set_info(data);
+ }
+ drop(alloc)
+ });
+ }
+
+ // calls to synchronize_rcu() in thread drop will happen here
+ drop(threads);
+ }
+
+ pub(crate) fn drop_outstanding_txn(&self) {
+ let wake = {
+ let mut inner = self.inner.lock();
+ if inner.outstanding_txns == 0 {
+ pr_err!("outstanding_txns underflow");
+ return;
+ }
+ inner.outstanding_txns -= 1;
+ inner.is_frozen && inner.outstanding_txns == 0
+ };
+
+ if wake {
+ self.freeze_wait.notify_all();
+ }
+ }
+
+ pub(crate) fn ioctl_freeze(&self, info: &BinderFreezeInfo) -> Result {
+ if info.enable == 0 {
+ let msgs = self.prepare_freeze_messages()?;
+ let mut inner = self.inner.lock();
+ inner.sync_recv = false;
+ inner.async_recv = false;
+ inner.is_frozen = false;
+ drop(inner);
+ msgs.send_messages();
+ return Ok(());
+ }
+
+ let mut inner = self.inner.lock();
+ inner.sync_recv = false;
+ inner.async_recv = false;
+ inner.is_frozen = true;
+
+ if info.timeout_ms > 0 {
+ let mut jiffies = kernel::time::msecs_to_jiffies(info.timeout_ms);
+ while jiffies > 0 {
+ if inner.outstanding_txns == 0 {
+ break;
+ }
+
+ match self
+ .freeze_wait
+ .wait_interruptible_timeout(&mut inner, jiffies)
+ {
+ CondVarTimeoutResult::Signal { .. } => {
+ inner.is_frozen = false;
+ return Err(ERESTARTSYS);
+ }
+ CondVarTimeoutResult::Woken { jiffies: remaining } => {
+ jiffies = remaining;
+ }
+ CondVarTimeoutResult::Timeout => {
+ jiffies = 0;
+ }
+ }
+ }
+ }
+
+ if inner.txns_pending_locked() {
+ inner.is_frozen = false;
+ Err(EAGAIN)
+ } else {
+ drop(inner);
+ match self.prepare_freeze_messages() {
+ Ok(batch) => {
+ batch.send_messages();
+ Ok(())
+ }
+ Err(kernel::alloc::AllocError) => {
+ self.inner.lock().is_frozen = false;
+ Err(ENOMEM)
+ }
+ }
+ }
+ }
+}
+
+fn get_frozen_status(data: UserSlice) -> Result {
+ let (mut reader, mut writer) = data.reader_writer();
+
+ let mut info = reader.read::<BinderFrozenStatusInfo>()?;
+ info.sync_recv = 0;
+ info.async_recv = 0;
+ let mut found = false;
+
+ for ctx in crate::context::get_all_contexts()? {
+ ctx.for_each_proc(|proc| {
+ if proc.task.pid() == info.pid as _ {
+ found = true;
+ let inner = proc.inner.lock();
+ let txns_pending = inner.txns_pending_locked();
+ info.async_recv |= inner.async_recv as u32;
+ info.sync_recv |= inner.sync_recv as u32;
+ info.sync_recv |= (txns_pending as u32) << 1;
+ }
+ });
+ }
+
+ if found {
+ writer.write(&info)?;
+ Ok(())
+ } else {
+ Err(EINVAL)
+ }
+}
+
+fn ioctl_freeze(reader: &mut UserSliceReader) -> Result {
+ let info = reader.read::<BinderFreezeInfo>()?;
+
+ // Very unlikely for there to be more than 3, since a process normally uses at most binder and
+ // hwbinder.
+ let mut procs = KVec::with_capacity(3, GFP_KERNEL)?;
+
+ let ctxs = crate::context::get_all_contexts()?;
+ for ctx in ctxs {
+ for proc in ctx.get_procs_with_pid(info.pid as i32)? {
+ procs.push(proc, GFP_KERNEL)?;
+ }
+ }
+
+ for proc in procs {
+ proc.ioctl_freeze(&info)?;
+ }
+ Ok(())
+}
+
+/// The ioctl handler.
+impl Process {
+ /// Ioctls that are write-only from the perspective of userspace.
+ ///
+ /// The kernel will only read from the pointer that userspace provided to us.
+ fn ioctl_write_only(
+ this: ArcBorrow<'_, Process>,
+ _file: &File,
+ cmd: u32,
+ reader: &mut UserSliceReader,
+ ) -> Result {
+ let thread = this.get_current_thread()?;
+ match cmd {
+ uapi::BINDER_SET_MAX_THREADS => this.set_max_threads(reader.read()?),
+ uapi::BINDER_THREAD_EXIT => this.remove_thread(thread),
+ uapi::BINDER_SET_CONTEXT_MGR => this.set_as_manager(None, &thread)?,
+ uapi::BINDER_SET_CONTEXT_MGR_EXT => {
+ this.set_as_manager(Some(reader.read()?), &thread)?
+ }
+ uapi::BINDER_ENABLE_ONEWAY_SPAM_DETECTION => {
+ this.set_oneway_spam_detection_enabled(reader.read()?)
+ }
+ uapi::BINDER_FREEZE => ioctl_freeze(reader)?,
+ _ => return Err(EINVAL),
+ }
+ Ok(())
+ }
+
+ /// Ioctls that are read/write from the perspective of userspace.
+ ///
+ /// The kernel will both read from and write to the pointer that userspace provided to us.
+ fn ioctl_write_read(
+ this: ArcBorrow<'_, Process>,
+ file: &File,
+ cmd: u32,
+ data: UserSlice,
+ ) -> Result {
+ let thread = this.get_current_thread()?;
+ let blocking = (file.flags() & file::flags::O_NONBLOCK) == 0;
+ match cmd {
+ uapi::BINDER_WRITE_READ => thread.write_read(data, blocking)?,
+ uapi::BINDER_GET_NODE_DEBUG_INFO => this.get_node_debug_info(data)?,
+ uapi::BINDER_GET_NODE_INFO_FOR_REF => this.get_node_info_from_ref(data)?,
+ uapi::BINDER_VERSION => this.version(data)?,
+ uapi::BINDER_GET_FROZEN_INFO => get_frozen_status(data)?,
+ uapi::BINDER_GET_EXTENDED_ERROR => thread.get_extended_error(data)?,
+ _ => return Err(EINVAL),
+ }
+ Ok(())
+ }
+}
+
+/// The file operations supported by `Process`.
+impl Process {
+ pub(crate) fn open(ctx: ArcBorrow<'_, Context>, file: &File) -> Result<Arc<Process>> {
+ Self::new(ctx.into(), ARef::from(file.cred()))
+ }
+
+ pub(crate) fn release(this: Arc<Process>, _file: &File) {
+ let binderfs_file;
+ let should_schedule;
+ {
+ let mut inner = this.inner.lock();
+ should_schedule = inner.defer_work == 0;
+ inner.defer_work |= PROC_DEFER_RELEASE;
+ binderfs_file = inner.binderfs_file.take();
+ }
+
+ if should_schedule {
+ // Ignore failures to schedule to the workqueue. Those just mean that we're already
+ // scheduled for execution.
+ let _ = workqueue::system().enqueue(this);
+ }
+
+ drop(binderfs_file);
+ }
+
+ pub(crate) fn flush(this: ArcBorrow<'_, Process>) -> Result {
+ let should_schedule;
+ {
+ let mut inner = this.inner.lock();
+ should_schedule = inner.defer_work == 0;
+ inner.defer_work |= PROC_DEFER_FLUSH;
+ }
+
+ if should_schedule {
+ // Ignore failures to schedule to the workqueue. Those just mean that we're already
+ // scheduled for execution.
+ let _ = workqueue::system().enqueue(Arc::from(this));
+ }
+ Ok(())
+ }
+
+ pub(crate) fn ioctl(this: ArcBorrow<'_, Process>, file: &File, cmd: u32, arg: usize) -> Result {
+ use kernel::ioctl::{_IOC_DIR, _IOC_SIZE};
+ use kernel::uapi::{_IOC_READ, _IOC_WRITE};
+
+ crate::trace::trace_ioctl(cmd, arg);
+
+ let user_slice = UserSlice::new(UserPtr::from_addr(arg), _IOC_SIZE(cmd));
+
+ const _IOC_READ_WRITE: u32 = _IOC_READ | _IOC_WRITE;
+
+ match _IOC_DIR(cmd) {
+ _IOC_WRITE => Self::ioctl_write_only(this, file, cmd, &mut user_slice.reader()),
+ _IOC_READ_WRITE => Self::ioctl_write_read(this, file, cmd, user_slice),
+ _ => Err(EINVAL),
+ }
+ }
+
+ pub(crate) fn compat_ioctl(
+ this: ArcBorrow<'_, Process>,
+ file: &File,
+ cmd: u32,
+ arg: usize,
+ ) -> Result {
+ Self::ioctl(this, file, cmd, arg)
+ }
+
+ pub(crate) fn mmap(
+ this: ArcBorrow<'_, Process>,
+ _file: &File,
+ vma: &mm::virt::VmaNew,
+ ) -> Result {
+ // We don't allow mmap to be used in a different process.
+ if !core::ptr::eq(kernel::current!().group_leader(), &*this.task) {
+ return Err(EINVAL);
+ }
+ if vma.start() == 0 {
+ return Err(EINVAL);
+ }
+
+ vma.try_clear_maywrite().map_err(|_| EPERM)?;
+ vma.set_dontcopy();
+ vma.set_mixedmap();
+
+ // TODO: Set ops. We need to learn when the user unmaps so that we can stop using it.
+ this.create_mapping(vma)
+ }
+
+ pub(crate) fn poll(
+ this: ArcBorrow<'_, Process>,
+ file: &File,
+ table: PollTable<'_>,
+ ) -> Result<u32> {
+ let thread = this.get_current_thread()?;
+ let (from_proc, mut mask) = thread.poll(file, table);
+ if mask == 0 && from_proc && !this.inner.lock().work.is_empty() {
+ mask |= bindings::POLLIN;
+ }
+ Ok(mask)
+ }
+}
+
+/// Represents that a thread has registered with the `ready_threads` list of its process.
+///
+/// The destructor of this type will unregister the thread from the list of ready threads.
+pub(crate) struct Registration<'a> {
+ thread: &'a Arc<Thread>,
+}
+
+impl<'a> Registration<'a> {
+ fn new(thread: &'a Arc<Thread>, guard: &mut Guard<'_, ProcessInner, SpinLockBackend>) -> Self {
+ assert!(core::ptr::eq(&thread.process.inner, guard.lock_ref()));
+ // INVARIANT: We are pushing this thread to the right `ready_threads` list.
+ if let Ok(list_arc) = ListArc::try_from_arc(thread.clone()) {
+ guard.ready_threads.push_front(list_arc);
+ } else {
+ // It is an error to hit this branch, and it should not be reachable. We try to do
+ // something reasonable when the failure path happens. Most likely, the thread in
+ // question will sleep forever.
+ pr_err!("Same thread registered with `ready_threads` twice.");
+ }
+ Self { thread }
+ }
+}
+
+impl Drop for Registration<'_> {
+ fn drop(&mut self) {
+ let mut inner = self.thread.process.inner.lock();
+ // SAFETY: The thread has the invariant that we never push it to any other linked list than
+ // the `ready_threads` list of its parent process. Therefore, the thread is either in that
+ // list, or in no list.
+ unsafe { inner.ready_threads.remove(self.thread) };
+ }
+}
+
+pub(crate) struct WithNodes<'a> {
+ pub(crate) inner: Guard<'a, ProcessInner, SpinLockBackend>,
+ pub(crate) nodes: RBTree<u64, DArc<Node>>,
+}
+
+impl Drop for WithNodes<'_> {
+ fn drop(&mut self) {
+ core::mem::swap(&mut self.nodes, &mut self.inner.nodes);
+ if self.nodes.iter().next().is_some() {
+ pr_err!("nodes array was modified while using lock_with_nodes\n");
+ }
+ }
+}
+
+pub(crate) enum GetWorkOrRegister<'a> {
+ Work(DLArc<dyn DeliverToRead>),
+ Register(Registration<'a>),
+}
diff --git a/drivers/android/binder/range_alloc/array.rs b/drivers/android/binder/range_alloc/array.rs
new file mode 100644
index 000000000000..07e1dec2ce63
--- /dev/null
+++ b/drivers/android/binder/range_alloc/array.rs
@@ -0,0 +1,251 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use kernel::{
+ page::{PAGE_MASK, PAGE_SIZE},
+ prelude::*,
+ seq_file::SeqFile,
+ seq_print,
+ task::Pid,
+};
+
+use crate::range_alloc::{DescriptorState, FreedRange, Range};
+
+/// Keeps track of allocations in a process' mmap.
+///
+/// Each process has an mmap where the data for incoming transactions will be placed. This struct
+/// keeps track of allocations made in the mmap. For each allocation, we store a descriptor that
+/// has metadata related to the allocation. We also keep track of available free space.
+pub(super) struct ArrayRangeAllocator<T> {
+ /// This stores all ranges that are allocated. Unlike the tree based allocator, we do *not*
+ /// store the free ranges.
+ ///
+ /// Sorted by offset.
+ pub(super) ranges: KVec<Range<T>>,
+ size: usize,
+ free_oneway_space: usize,
+}
+
+struct FindEmptyRes {
+ /// Which index in `ranges` should we insert the new range at?
+ ///
+ /// Inserting the new range at this index keeps `ranges` sorted.
+ insert_at_idx: usize,
+ /// Which offset should we insert the new range at?
+ insert_at_offset: usize,
+}
+
+impl<T> ArrayRangeAllocator<T> {
+ pub(crate) fn new(size: usize, alloc: EmptyArrayAlloc<T>) -> Self {
+ Self {
+ ranges: alloc.ranges,
+ size,
+ free_oneway_space: size / 2,
+ }
+ }
+
+ pub(crate) fn free_oneway_space(&self) -> usize {
+ self.free_oneway_space
+ }
+
+ pub(crate) fn count_buffers(&self) -> usize {
+ self.ranges.len()
+ }
+
+ pub(crate) fn total_size(&self) -> usize {
+ self.size
+ }
+
+ pub(crate) fn is_full(&self) -> bool {
+ self.ranges.len() == self.ranges.capacity()
+ }
+
+ pub(crate) fn debug_print(&self, m: &SeqFile) -> Result<()> {
+ for range in &self.ranges {
+ seq_print!(
+ m,
+ " buffer {}: {} size {} pid {} oneway {}",
+ 0,
+ range.offset,
+ range.size,
+ range.state.pid(),
+ range.state.is_oneway(),
+ );
+ if let DescriptorState::Reserved(_) = range.state {
+ seq_print!(m, " reserved\n");
+ } else {
+ seq_print!(m, " allocated\n");
+ }
+ }
+ Ok(())
+ }
+
+ /// Find somewhere to put a new range.
+ ///
+ /// Unlike the tree implementation, we do not bother to find the smallest gap. The idea is that
+ /// fragmentation isn't a big issue when we don't have many ranges.
+ ///
+ /// Returns the index that the new range should have in `self.ranges` after insertion.
+ fn find_empty_range(&self, size: usize) -> Option<FindEmptyRes> {
+ let after_last_range = self.ranges.last().map(Range::endpoint).unwrap_or(0);
+
+ if size <= self.total_size() - after_last_range {
+ // We can put the range at the end, so just do that.
+ Some(FindEmptyRes {
+ insert_at_idx: self.ranges.len(),
+ insert_at_offset: after_last_range,
+ })
+ } else {
+ let mut end_of_prev = 0;
+ for (i, range) in self.ranges.iter().enumerate() {
+ // Does it fit before the i'th range?
+ if size <= range.offset - end_of_prev {
+ return Some(FindEmptyRes {
+ insert_at_idx: i,
+ insert_at_offset: end_of_prev,
+ });
+ }
+ end_of_prev = range.endpoint();
+ }
+ None
+ }
+ }
+
+ pub(crate) fn reserve_new(
+ &mut self,
+ debug_id: usize,
+ size: usize,
+ is_oneway: bool,
+ pid: Pid,
+ ) -> Result<usize> {
+ // Compute new value of free_oneway_space, which is set only on success.
+ let new_oneway_space = if is_oneway {
+ match self.free_oneway_space.checked_sub(size) {
+ Some(new_oneway_space) => new_oneway_space,
+ None => return Err(ENOSPC),
+ }
+ } else {
+ self.free_oneway_space
+ };
+
+ let FindEmptyRes {
+ insert_at_idx,
+ insert_at_offset,
+ } = self.find_empty_range(size).ok_or(ENOSPC)?;
+ self.free_oneway_space = new_oneway_space;
+
+ let new_range = Range {
+ offset: insert_at_offset,
+ size,
+ state: DescriptorState::new(is_oneway, debug_id, pid),
+ };
+ // Insert the value at the given index to keep the array sorted.
+ self.ranges
+ .insert_within_capacity(insert_at_idx, new_range)
+ .ok()
+ .unwrap();
+
+ Ok(insert_at_offset)
+ }
+
+ pub(crate) fn reservation_abort(&mut self, offset: usize) -> Result<FreedRange> {
+ // This could use a binary search, but linear scans are usually faster for small arrays.
+ let i = self
+ .ranges
+ .iter()
+ .position(|range| range.offset == offset)
+ .ok_or(EINVAL)?;
+ let range = &self.ranges[i];
+
+ if let DescriptorState::Allocated(_) = range.state {
+ return Err(EPERM);
+ }
+
+ let size = range.size;
+ let offset = range.offset;
+
+ if range.state.is_oneway() {
+ self.free_oneway_space += size;
+ }
+
+ // This computes the range of pages that are no longer used by *any* allocated range. The
+ // caller will mark them as unused, which means that they can be freed if the system comes
+ // under memory pressure.
+ let mut freed_range = FreedRange::interior_pages(offset, size);
+ #[expect(clippy::collapsible_if)] // reads better like this
+ if offset % PAGE_SIZE != 0 {
+ if i == 0 || self.ranges[i - 1].endpoint() <= (offset & PAGE_MASK) {
+ freed_range.start_page_idx -= 1;
+ }
+ }
+ if range.endpoint() % PAGE_SIZE != 0 {
+ let page_after = (range.endpoint() & PAGE_MASK) + PAGE_SIZE;
+ if i + 1 == self.ranges.len() || page_after <= self.ranges[i + 1].offset {
+ freed_range.end_page_idx += 1;
+ }
+ }
+
+ self.ranges.remove(i)?;
+ Ok(freed_range)
+ }
+
+ pub(crate) fn reservation_commit(&mut self, offset: usize, data: &mut Option<T>) -> Result {
+ // This could use a binary search, but linear scans are usually faster for small arrays.
+ let range = self
+ .ranges
+ .iter_mut()
+ .find(|range| range.offset == offset)
+ .ok_or(ENOENT)?;
+
+ let DescriptorState::Reserved(reservation) = &range.state else {
+ return Err(ENOENT);
+ };
+
+ range.state = DescriptorState::Allocated(reservation.clone().allocate(data.take()));
+ Ok(())
+ }
+
+ pub(crate) fn reserve_existing(&mut self, offset: usize) -> Result<(usize, usize, Option<T>)> {
+ // This could use a binary search, but linear scans are usually faster for small arrays.
+ let range = self
+ .ranges
+ .iter_mut()
+ .find(|range| range.offset == offset)
+ .ok_or(ENOENT)?;
+
+ let DescriptorState::Allocated(allocation) = &mut range.state else {
+ return Err(ENOENT);
+ };
+
+ let data = allocation.take();
+ let debug_id = allocation.reservation.debug_id;
+ range.state = DescriptorState::Reserved(allocation.reservation.clone());
+ Ok((range.size, debug_id, data))
+ }
+
+ pub(crate) fn take_for_each<F: Fn(usize, usize, usize, Option<T>)>(&mut self, callback: F) {
+ for range in self.ranges.iter_mut() {
+ if let DescriptorState::Allocated(allocation) = &mut range.state {
+ callback(
+ range.offset,
+ range.size,
+ allocation.reservation.debug_id,
+ allocation.data.take(),
+ );
+ }
+ }
+ }
+}
+
+pub(crate) struct EmptyArrayAlloc<T> {
+ ranges: KVec<Range<T>>,
+}
+
+impl<T> EmptyArrayAlloc<T> {
+ pub(crate) fn try_new(capacity: usize) -> Result<Self> {
+ Ok(Self {
+ ranges: KVec::with_capacity(capacity, GFP_KERNEL)?,
+ })
+ }
+}
diff --git a/drivers/android/binder/range_alloc/mod.rs b/drivers/android/binder/range_alloc/mod.rs
new file mode 100644
index 000000000000..2301e2bc1a1f
--- /dev/null
+++ b/drivers/android/binder/range_alloc/mod.rs
@@ -0,0 +1,329 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use kernel::{page::PAGE_SIZE, prelude::*, seq_file::SeqFile, task::Pid};
+
+mod tree;
+use self::tree::{FromArrayAllocs, ReserveNewTreeAlloc, TreeRangeAllocator};
+
+mod array;
+use self::array::{ArrayRangeAllocator, EmptyArrayAlloc};
+
+enum DescriptorState<T> {
+ Reserved(Reservation),
+ Allocated(Allocation<T>),
+}
+
+impl<T> DescriptorState<T> {
+ fn new(is_oneway: bool, debug_id: usize, pid: Pid) -> Self {
+ DescriptorState::Reserved(Reservation {
+ debug_id,
+ is_oneway,
+ pid,
+ })
+ }
+
+ fn pid(&self) -> Pid {
+ match self {
+ DescriptorState::Reserved(inner) => inner.pid,
+ DescriptorState::Allocated(inner) => inner.reservation.pid,
+ }
+ }
+
+ fn is_oneway(&self) -> bool {
+ match self {
+ DescriptorState::Reserved(inner) => inner.is_oneway,
+ DescriptorState::Allocated(inner) => inner.reservation.is_oneway,
+ }
+ }
+}
+
+#[derive(Clone)]
+struct Reservation {
+ debug_id: usize,
+ is_oneway: bool,
+ pid: Pid,
+}
+
+impl Reservation {
+ fn allocate<T>(self, data: Option<T>) -> Allocation<T> {
+ Allocation {
+ data,
+ reservation: self,
+ }
+ }
+}
+
+struct Allocation<T> {
+ reservation: Reservation,
+ data: Option<T>,
+}
+
+impl<T> Allocation<T> {
+ fn deallocate(self) -> (Reservation, Option<T>) {
+ (self.reservation, self.data)
+ }
+
+ fn debug_id(&self) -> usize {
+ self.reservation.debug_id
+ }
+
+ fn take(&mut self) -> Option<T> {
+ self.data.take()
+ }
+}
+
+/// The array implementation must switch to the tree if it wants to go beyond this number of
+/// ranges.
+const TREE_THRESHOLD: usize = 8;
+
+/// Represents a range of pages that have just become completely free.
+#[derive(Copy, Clone)]
+pub(crate) struct FreedRange {
+ pub(crate) start_page_idx: usize,
+ pub(crate) end_page_idx: usize,
+}
+
+impl FreedRange {
+ fn interior_pages(offset: usize, size: usize) -> FreedRange {
+ FreedRange {
+ // Divide round up
+ start_page_idx: offset.div_ceil(PAGE_SIZE),
+ // Divide round down
+ end_page_idx: (offset + size) / PAGE_SIZE,
+ }
+ }
+}
+
+struct Range<T> {
+ offset: usize,
+ size: usize,
+ state: DescriptorState<T>,
+}
+
+impl<T> Range<T> {
+ fn endpoint(&self) -> usize {
+ self.offset + self.size
+ }
+}
+
+pub(crate) struct RangeAllocator<T> {
+ inner: Impl<T>,
+}
+
+enum Impl<T> {
+ Empty(usize),
+ Array(ArrayRangeAllocator<T>),
+ Tree(TreeRangeAllocator<T>),
+}
+
+impl<T> RangeAllocator<T> {
+ pub(crate) fn new(size: usize) -> Self {
+ Self {
+ inner: Impl::Empty(size),
+ }
+ }
+
+ pub(crate) fn free_oneway_space(&self) -> usize {
+ match &self.inner {
+ Impl::Empty(size) => size / 2,
+ Impl::Array(array) => array.free_oneway_space(),
+ Impl::Tree(tree) => tree.free_oneway_space(),
+ }
+ }
+
+ pub(crate) fn count_buffers(&self) -> usize {
+ match &self.inner {
+ Impl::Empty(_size) => 0,
+ Impl::Array(array) => array.count_buffers(),
+ Impl::Tree(tree) => tree.count_buffers(),
+ }
+ }
+
+ pub(crate) fn debug_print(&self, m: &SeqFile) -> Result<()> {
+ match &self.inner {
+ Impl::Empty(_size) => Ok(()),
+ Impl::Array(array) => array.debug_print(m),
+ Impl::Tree(tree) => tree.debug_print(m),
+ }
+ }
+
+ /// Try to reserve a new buffer, using the provided allocation if necessary.
+ pub(crate) fn reserve_new(&mut self, mut args: ReserveNewArgs<T>) -> Result<ReserveNew<T>> {
+ match &mut self.inner {
+ Impl::Empty(size) => {
+ let empty_array = match args.empty_array_alloc.take() {
+ Some(empty_array) => ArrayRangeAllocator::new(*size, empty_array),
+ None => {
+ return Ok(ReserveNew::NeedAlloc(ReserveNewNeedAlloc {
+ args,
+ need_empty_array_alloc: true,
+ need_new_tree_alloc: false,
+ need_tree_alloc: false,
+ }))
+ }
+ };
+
+ self.inner = Impl::Array(empty_array);
+ self.reserve_new(args)
+ }
+ Impl::Array(array) if array.is_full() => {
+ let allocs = match args.new_tree_alloc {
+ Some(ref mut allocs) => allocs,
+ None => {
+ return Ok(ReserveNew::NeedAlloc(ReserveNewNeedAlloc {
+ args,
+ need_empty_array_alloc: false,
+ need_new_tree_alloc: true,
+ need_tree_alloc: true,
+ }))
+ }
+ };
+
+ let new_tree =
+ TreeRangeAllocator::from_array(array.total_size(), &mut array.ranges, allocs);
+
+ self.inner = Impl::Tree(new_tree);
+ self.reserve_new(args)
+ }
+ Impl::Array(array) => {
+ let offset =
+ array.reserve_new(args.debug_id, args.size, args.is_oneway, args.pid)?;
+ Ok(ReserveNew::Success(ReserveNewSuccess {
+ offset,
+ oneway_spam_detected: false,
+ _empty_array_alloc: args.empty_array_alloc,
+ _new_tree_alloc: args.new_tree_alloc,
+ _tree_alloc: args.tree_alloc,
+ }))
+ }
+ Impl::Tree(tree) => {
+ let alloc = match args.tree_alloc {
+ Some(alloc) => alloc,
+ None => {
+ return Ok(ReserveNew::NeedAlloc(ReserveNewNeedAlloc {
+ args,
+ need_empty_array_alloc: false,
+ need_new_tree_alloc: false,
+ need_tree_alloc: true,
+ }));
+ }
+ };
+ let (offset, oneway_spam_detected) =
+ tree.reserve_new(args.debug_id, args.size, args.is_oneway, args.pid, alloc)?;
+ Ok(ReserveNew::Success(ReserveNewSuccess {
+ offset,
+ oneway_spam_detected,
+ _empty_array_alloc: args.empty_array_alloc,
+ _new_tree_alloc: args.new_tree_alloc,
+ _tree_alloc: None,
+ }))
+ }
+ }
+ }
+
+ /// Deletes the allocations at `offset`.
+ pub(crate) fn reservation_abort(&mut self, offset: usize) -> Result<FreedRange> {
+ match &mut self.inner {
+ Impl::Empty(_size) => Err(EINVAL),
+ Impl::Array(array) => array.reservation_abort(offset),
+ Impl::Tree(tree) => {
+ let freed_range = tree.reservation_abort(offset)?;
+ if tree.is_empty() {
+ self.inner = Impl::Empty(tree.total_size());
+ }
+ Ok(freed_range)
+ }
+ }
+ }
+
+ /// Called when an allocation is no longer in use by the kernel.
+ ///
+ /// The value in `data` will be stored, if any. A mutable reference is used to avoid dropping
+ /// the `T` when an error is returned.
+ pub(crate) fn reservation_commit(&mut self, offset: usize, data: &mut Option<T>) -> Result {
+ match &mut self.inner {
+ Impl::Empty(_size) => Err(EINVAL),
+ Impl::Array(array) => array.reservation_commit(offset, data),
+ Impl::Tree(tree) => tree.reservation_commit(offset, data),
+ }
+ }
+
+ /// Called when the kernel starts using an allocation.
+ ///
+ /// Returns the size of the existing entry and the data associated with it.
+ pub(crate) fn reserve_existing(&mut self, offset: usize) -> Result<(usize, usize, Option<T>)> {
+ match &mut self.inner {
+ Impl::Empty(_size) => Err(EINVAL),
+ Impl::Array(array) => array.reserve_existing(offset),
+ Impl::Tree(tree) => tree.reserve_existing(offset),
+ }
+ }
+
+ /// Call the provided callback at every allocated region.
+ ///
+ /// This destroys the range allocator. Used only during shutdown.
+ pub(crate) fn take_for_each<F: Fn(usize, usize, usize, Option<T>)>(&mut self, callback: F) {
+ match &mut self.inner {
+ Impl::Empty(_size) => {}
+ Impl::Array(array) => array.take_for_each(callback),
+ Impl::Tree(tree) => tree.take_for_each(callback),
+ }
+ }
+}
+
+/// The arguments for `reserve_new`.
+#[derive(Default)]
+pub(crate) struct ReserveNewArgs<T> {
+ pub(crate) size: usize,
+ pub(crate) is_oneway: bool,
+ pub(crate) debug_id: usize,
+ pub(crate) pid: Pid,
+ pub(crate) empty_array_alloc: Option<EmptyArrayAlloc<T>>,
+ pub(crate) new_tree_alloc: Option<FromArrayAllocs<T>>,
+ pub(crate) tree_alloc: Option<ReserveNewTreeAlloc<T>>,
+}
+
+/// The return type of `ReserveNew`.
+pub(crate) enum ReserveNew<T> {
+ Success(ReserveNewSuccess<T>),
+ NeedAlloc(ReserveNewNeedAlloc<T>),
+}
+
+/// Returned by `reserve_new` when the reservation was successul.
+pub(crate) struct ReserveNewSuccess<T> {
+ pub(crate) offset: usize,
+ pub(crate) oneway_spam_detected: bool,
+
+ // If the user supplied an allocation that we did not end up using, then we return it here.
+ // The caller will kfree it outside of the lock.
+ _empty_array_alloc: Option<EmptyArrayAlloc<T>>,
+ _new_tree_alloc: Option<FromArrayAllocs<T>>,
+ _tree_alloc: Option<ReserveNewTreeAlloc<T>>,
+}
+
+/// Returned by `reserve_new` to request the caller to make an allocation before calling the method
+/// again.
+pub(crate) struct ReserveNewNeedAlloc<T> {
+ args: ReserveNewArgs<T>,
+ need_empty_array_alloc: bool,
+ need_new_tree_alloc: bool,
+ need_tree_alloc: bool,
+}
+
+impl<T> ReserveNewNeedAlloc<T> {
+ /// Make the necessary allocations for another call to `reserve_new`.
+ pub(crate) fn make_alloc(mut self) -> Result<ReserveNewArgs<T>> {
+ if self.need_empty_array_alloc && self.args.empty_array_alloc.is_none() {
+ self.args.empty_array_alloc = Some(EmptyArrayAlloc::try_new(TREE_THRESHOLD)?);
+ }
+ if self.need_new_tree_alloc && self.args.new_tree_alloc.is_none() {
+ self.args.new_tree_alloc = Some(FromArrayAllocs::try_new(TREE_THRESHOLD)?);
+ }
+ if self.need_tree_alloc && self.args.tree_alloc.is_none() {
+ self.args.tree_alloc = Some(ReserveNewTreeAlloc::try_new()?);
+ }
+ Ok(self.args)
+ }
+}
diff --git a/drivers/android/binder/range_alloc/tree.rs b/drivers/android/binder/range_alloc/tree.rs
new file mode 100644
index 000000000000..7b1a248fcb02
--- /dev/null
+++ b/drivers/android/binder/range_alloc/tree.rs
@@ -0,0 +1,488 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use kernel::{
+ page::PAGE_SIZE,
+ prelude::*,
+ rbtree::{RBTree, RBTreeNode, RBTreeNodeReservation},
+ seq_file::SeqFile,
+ seq_print,
+ task::Pid,
+};
+
+use crate::range_alloc::{DescriptorState, FreedRange, Range};
+
+/// Keeps track of allocations in a process' mmap.
+///
+/// Each process has an mmap where the data for incoming transactions will be placed. This struct
+/// keeps track of allocations made in the mmap. For each allocation, we store a descriptor that
+/// has metadata related to the allocation. We also keep track of available free space.
+pub(super) struct TreeRangeAllocator<T> {
+ /// This collection contains descriptors for *both* ranges containing an allocation, *and* free
+ /// ranges between allocations. The free ranges get merged, so there are never two free ranges
+ /// next to each other.
+ tree: RBTree<usize, Descriptor<T>>,
+ /// Contains an entry for every free range in `self.tree`. This tree sorts the ranges by size,
+ /// letting us look up the smallest range whose size is at least some lower bound.
+ free_tree: RBTree<FreeKey, ()>,
+ size: usize,
+ free_oneway_space: usize,
+}
+
+impl<T> TreeRangeAllocator<T> {
+ pub(crate) fn from_array(
+ size: usize,
+ ranges: &mut KVec<Range<T>>,
+ alloc: &mut FromArrayAllocs<T>,
+ ) -> Self {
+ let mut tree = TreeRangeAllocator {
+ tree: RBTree::new(),
+ free_tree: RBTree::new(),
+ size,
+ free_oneway_space: size / 2,
+ };
+
+ let mut free_offset = 0;
+ for range in ranges.drain_all() {
+ let free_size = range.offset - free_offset;
+ if free_size > 0 {
+ let free_node = alloc.free_tree.pop().unwrap();
+ tree.free_tree
+ .insert(free_node.into_node((free_size, free_offset), ()));
+ let tree_node = alloc.tree.pop().unwrap();
+ tree.tree.insert(
+ tree_node.into_node(free_offset, Descriptor::new(free_offset, free_size)),
+ );
+ }
+ free_offset = range.endpoint();
+
+ if range.state.is_oneway() {
+ tree.free_oneway_space = tree.free_oneway_space.saturating_sub(range.size);
+ }
+
+ let free_res = alloc.free_tree.pop().unwrap();
+ let tree_node = alloc.tree.pop().unwrap();
+ let mut desc = Descriptor::new(range.offset, range.size);
+ desc.state = Some((range.state, free_res));
+ tree.tree.insert(tree_node.into_node(range.offset, desc));
+ }
+
+ // After the last range, we may need a free range.
+ if free_offset < size {
+ let free_size = size - free_offset;
+ let free_node = alloc.free_tree.pop().unwrap();
+ tree.free_tree
+ .insert(free_node.into_node((free_size, free_offset), ()));
+ let tree_node = alloc.tree.pop().unwrap();
+ tree.tree
+ .insert(tree_node.into_node(free_offset, Descriptor::new(free_offset, free_size)));
+ }
+
+ tree
+ }
+
+ pub(crate) fn is_empty(&self) -> bool {
+ let mut tree_iter = self.tree.values();
+ // There's always at least one range, because index zero is either the start of a free or
+ // allocated range.
+ let first_value = tree_iter.next().unwrap();
+ if tree_iter.next().is_some() {
+ // There are never two free ranges next to each other, so if there is more than one
+ // descriptor, then at least one of them must hold an allocated range.
+ return false;
+ }
+ // There is only one descriptor. Return true if it is for a free range.
+ first_value.state.is_none()
+ }
+
+ pub(crate) fn total_size(&self) -> usize {
+ self.size
+ }
+
+ pub(crate) fn free_oneway_space(&self) -> usize {
+ self.free_oneway_space
+ }
+
+ pub(crate) fn count_buffers(&self) -> usize {
+ self.tree
+ .values()
+ .filter(|desc| desc.state.is_some())
+ .count()
+ }
+
+ pub(crate) fn debug_print(&self, m: &SeqFile) -> Result<()> {
+ for desc in self.tree.values() {
+ let state = match &desc.state {
+ Some(state) => &state.0,
+ None => continue,
+ };
+ seq_print!(
+ m,
+ " buffer: {} size {} pid {}",
+ desc.offset,
+ desc.size,
+ state.pid(),
+ );
+ if state.is_oneway() {
+ seq_print!(m, " oneway");
+ }
+ match state {
+ DescriptorState::Reserved(_res) => {
+ seq_print!(m, " reserved\n");
+ }
+ DescriptorState::Allocated(_alloc) => {
+ seq_print!(m, " allocated\n");
+ }
+ }
+ }
+ Ok(())
+ }
+
+ fn find_best_match(&mut self, size: usize) -> Option<&mut Descriptor<T>> {
+ let free_cursor = self.free_tree.cursor_lower_bound(&(size, 0))?;
+ let ((_, offset), ()) = free_cursor.current();
+ self.tree.get_mut(offset)
+ }
+
+ /// Try to reserve a new buffer, using the provided allocation if necessary.
+ pub(crate) fn reserve_new(
+ &mut self,
+ debug_id: usize,
+ size: usize,
+ is_oneway: bool,
+ pid: Pid,
+ alloc: ReserveNewTreeAlloc<T>,
+ ) -> Result<(usize, bool)> {
+ // Compute new value of free_oneway_space, which is set only on success.
+ let new_oneway_space = if is_oneway {
+ match self.free_oneway_space.checked_sub(size) {
+ Some(new_oneway_space) => new_oneway_space,
+ None => return Err(ENOSPC),
+ }
+ } else {
+ self.free_oneway_space
+ };
+
+ // Start detecting spammers once we have less than 20%
+ // of async space left (which is less than 10% of total
+ // buffer size).
+ //
+ // (This will short-circut, so `low_oneway_space` is
+ // only called when necessary.)
+ let oneway_spam_detected =
+ is_oneway && new_oneway_space < self.size / 10 && self.low_oneway_space(pid);
+
+ let (found_size, found_off, tree_node, free_tree_node) = match self.find_best_match(size) {
+ None => {
+ pr_warn!("ENOSPC from range_alloc.reserve_new - size: {}", size);
+ return Err(ENOSPC);
+ }
+ Some(desc) => {
+ let found_size = desc.size;
+ let found_offset = desc.offset;
+
+ // In case we need to break up the descriptor
+ let new_desc = Descriptor::new(found_offset + size, found_size - size);
+ let (tree_node, free_tree_node, desc_node_res) = alloc.initialize(new_desc);
+
+ desc.state = Some((
+ DescriptorState::new(is_oneway, debug_id, pid),
+ desc_node_res,
+ ));
+ desc.size = size;
+
+ (found_size, found_offset, tree_node, free_tree_node)
+ }
+ };
+ self.free_oneway_space = new_oneway_space;
+ self.free_tree.remove(&(found_size, found_off));
+
+ if found_size != size {
+ self.tree.insert(tree_node);
+ self.free_tree.insert(free_tree_node);
+ }
+
+ Ok((found_off, oneway_spam_detected))
+ }
+
+ pub(crate) fn reservation_abort(&mut self, offset: usize) -> Result<FreedRange> {
+ let mut cursor = self.tree.cursor_lower_bound(&offset).ok_or_else(|| {
+ pr_warn!(
+ "EINVAL from range_alloc.reservation_abort - offset: {}",
+ offset
+ );
+ EINVAL
+ })?;
+
+ let (_, desc) = cursor.current_mut();
+
+ if desc.offset != offset {
+ pr_warn!(
+ "EINVAL from range_alloc.reservation_abort - offset: {}",
+ offset
+ );
+ return Err(EINVAL);
+ }
+
+ let (reservation, free_node_res) = desc.try_change_state(|state| match state {
+ Some((DescriptorState::Reserved(reservation), free_node_res)) => {
+ (None, Ok((reservation, free_node_res)))
+ }
+ None => {
+ pr_warn!(
+ "EINVAL from range_alloc.reservation_abort - offset: {}",
+ offset
+ );
+ (None, Err(EINVAL))
+ }
+ allocated => {
+ pr_warn!(
+ "EPERM from range_alloc.reservation_abort - offset: {}",
+ offset
+ );
+ (allocated, Err(EPERM))
+ }
+ })?;
+
+ let mut size = desc.size;
+ let mut offset = desc.offset;
+ let free_oneway_space_add = if reservation.is_oneway { size } else { 0 };
+
+ self.free_oneway_space += free_oneway_space_add;
+
+ let mut freed_range = FreedRange::interior_pages(offset, size);
+ // Compute how large the next free region needs to be to include one more page in
+ // the newly freed range.
+ let add_next_page_needed = match (offset + size) % PAGE_SIZE {
+ 0 => usize::MAX,
+ unalign => PAGE_SIZE - unalign,
+ };
+ // Compute how large the previous free region needs to be to include one more page
+ // in the newly freed range.
+ let add_prev_page_needed = match offset % PAGE_SIZE {
+ 0 => usize::MAX,
+ unalign => unalign,
+ };
+
+ // Merge next into current if next is free
+ let remove_next = match cursor.peek_next() {
+ Some((_, next)) if next.state.is_none() => {
+ if next.size >= add_next_page_needed {
+ freed_range.end_page_idx += 1;
+ }
+ self.free_tree.remove(&(next.size, next.offset));
+ size += next.size;
+ true
+ }
+ _ => false,
+ };
+
+ if remove_next {
+ let (_, desc) = cursor.current_mut();
+ desc.size = size;
+ cursor.remove_next();
+ }
+
+ // Merge current into prev if prev is free
+ match cursor.peek_prev_mut() {
+ Some((_, prev)) if prev.state.is_none() => {
+ if prev.size >= add_prev_page_needed {
+ freed_range.start_page_idx -= 1;
+ }
+ // merge previous with current, remove current
+ self.free_tree.remove(&(prev.size, prev.offset));
+ offset = prev.offset;
+ size += prev.size;
+ prev.size = size;
+ cursor.remove_current();
+ }
+ _ => {}
+ };
+
+ self.free_tree
+ .insert(free_node_res.into_node((size, offset), ()));
+
+ Ok(freed_range)
+ }
+
+ pub(crate) fn reservation_commit(&mut self, offset: usize, data: &mut Option<T>) -> Result {
+ let desc = self.tree.get_mut(&offset).ok_or(ENOENT)?;
+
+ desc.try_change_state(|state| match state {
+ Some((DescriptorState::Reserved(reservation), free_node_res)) => (
+ Some((
+ DescriptorState::Allocated(reservation.allocate(data.take())),
+ free_node_res,
+ )),
+ Ok(()),
+ ),
+ other => (other, Err(ENOENT)),
+ })
+ }
+
+ /// Takes an entry at the given offset from [`DescriptorState::Allocated`] to
+ /// [`DescriptorState::Reserved`].
+ ///
+ /// Returns the size of the existing entry and the data associated with it.
+ pub(crate) fn reserve_existing(&mut self, offset: usize) -> Result<(usize, usize, Option<T>)> {
+ let desc = self.tree.get_mut(&offset).ok_or_else(|| {
+ pr_warn!(
+ "ENOENT from range_alloc.reserve_existing - offset: {}",
+ offset
+ );
+ ENOENT
+ })?;
+
+ let (debug_id, data) = desc.try_change_state(|state| match state {
+ Some((DescriptorState::Allocated(allocation), free_node_res)) => {
+ let (reservation, data) = allocation.deallocate();
+ let debug_id = reservation.debug_id;
+ (
+ Some((DescriptorState::Reserved(reservation), free_node_res)),
+ Ok((debug_id, data)),
+ )
+ }
+ other => {
+ pr_warn!(
+ "ENOENT from range_alloc.reserve_existing - offset: {}",
+ offset
+ );
+ (other, Err(ENOENT))
+ }
+ })?;
+
+ Ok((desc.size, debug_id, data))
+ }
+
+ /// Call the provided callback at every allocated region.
+ ///
+ /// This destroys the range allocator. Used only during shutdown.
+ pub(crate) fn take_for_each<F: Fn(usize, usize, usize, Option<T>)>(&mut self, callback: F) {
+ for (_, desc) in self.tree.iter_mut() {
+ if let Some((DescriptorState::Allocated(allocation), _)) = &mut desc.state {
+ callback(
+ desc.offset,
+ desc.size,
+ allocation.debug_id(),
+ allocation.take(),
+ );
+ }
+ }
+ }
+
+ /// Find the amount and size of buffers allocated by the current caller.
+ ///
+ /// The idea is that once we cross the threshold, whoever is responsible
+ /// for the low async space is likely to try to send another async transaction,
+ /// and at some point we'll catch them in the act. This is more efficient
+ /// than keeping a map per pid.
+ fn low_oneway_space(&self, calling_pid: Pid) -> bool {
+ let mut total_alloc_size = 0;
+ let mut num_buffers = 0;
+ for (_, desc) in self.tree.iter() {
+ if let Some((state, _)) = &desc.state {
+ if state.is_oneway() && state.pid() == calling_pid {
+ total_alloc_size += desc.size;
+ num_buffers += 1;
+ }
+ }
+ }
+
+ // Warn if this pid has more than 50 transactions, or more than 50% of
+ // async space (which is 25% of total buffer size). Oneway spam is only
+ // detected when the threshold is exceeded.
+ num_buffers > 50 || total_alloc_size > self.size / 4
+ }
+}
+
+type TreeDescriptorState<T> = (DescriptorState<T>, FreeNodeRes);
+struct Descriptor<T> {
+ size: usize,
+ offset: usize,
+ state: Option<TreeDescriptorState<T>>,
+}
+
+impl<T> Descriptor<T> {
+ fn new(offset: usize, size: usize) -> Self {
+ Self {
+ size,
+ offset,
+ state: None,
+ }
+ }
+
+ fn try_change_state<F, Data>(&mut self, f: F) -> Result<Data>
+ where
+ F: FnOnce(Option<TreeDescriptorState<T>>) -> (Option<TreeDescriptorState<T>>, Result<Data>),
+ {
+ let (new_state, result) = f(self.state.take());
+ self.state = new_state;
+ result
+ }
+}
+
+// (Descriptor.size, Descriptor.offset)
+type FreeKey = (usize, usize);
+type FreeNodeRes = RBTreeNodeReservation<FreeKey, ()>;
+
+/// An allocation for use by `reserve_new`.
+pub(crate) struct ReserveNewTreeAlloc<T> {
+ tree_node_res: RBTreeNodeReservation<usize, Descriptor<T>>,
+ free_tree_node_res: FreeNodeRes,
+ desc_node_res: FreeNodeRes,
+}
+
+impl<T> ReserveNewTreeAlloc<T> {
+ pub(crate) fn try_new() -> Result<Self> {
+ let tree_node_res = RBTreeNodeReservation::new(GFP_KERNEL)?;
+ let free_tree_node_res = RBTreeNodeReservation::new(GFP_KERNEL)?;
+ let desc_node_res = RBTreeNodeReservation::new(GFP_KERNEL)?;
+ Ok(Self {
+ tree_node_res,
+ free_tree_node_res,
+ desc_node_res,
+ })
+ }
+
+ fn initialize(
+ self,
+ desc: Descriptor<T>,
+ ) -> (
+ RBTreeNode<usize, Descriptor<T>>,
+ RBTreeNode<FreeKey, ()>,
+ FreeNodeRes,
+ ) {
+ let size = desc.size;
+ let offset = desc.offset;
+ (
+ self.tree_node_res.into_node(offset, desc),
+ self.free_tree_node_res.into_node((size, offset), ()),
+ self.desc_node_res,
+ )
+ }
+}
+
+/// An allocation for creating a tree from an `ArrayRangeAllocator`.
+pub(crate) struct FromArrayAllocs<T> {
+ tree: KVec<RBTreeNodeReservation<usize, Descriptor<T>>>,
+ free_tree: KVec<RBTreeNodeReservation<FreeKey, ()>>,
+}
+
+impl<T> FromArrayAllocs<T> {
+ pub(crate) fn try_new(len: usize) -> Result<Self> {
+ let num_descriptors = 2 * len + 1;
+
+ let mut tree = KVec::with_capacity(num_descriptors, GFP_KERNEL)?;
+ for _ in 0..num_descriptors {
+ tree.push(RBTreeNodeReservation::new(GFP_KERNEL)?, GFP_KERNEL)?;
+ }
+
+ let mut free_tree = KVec::with_capacity(num_descriptors, GFP_KERNEL)?;
+ for _ in 0..num_descriptors {
+ free_tree.push(RBTreeNodeReservation::new(GFP_KERNEL)?, GFP_KERNEL)?;
+ }
+
+ Ok(Self { tree, free_tree })
+ }
+}
diff --git a/drivers/android/binder/rust_binder.h b/drivers/android/binder/rust_binder.h
new file mode 100644
index 000000000000..31806890ed1a
--- /dev/null
+++ b/drivers/android/binder/rust_binder.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2025 Google, Inc.
+ */
+
+#ifndef _LINUX_RUST_BINDER_H
+#define _LINUX_RUST_BINDER_H
+
+#include <uapi/linux/android/binder.h>
+#include <uapi/linux/android/binderfs.h>
+
+/*
+ * These symbols are exposed by `rust_binderfs.c` and exist here so that Rust
+ * Binder can call them.
+ */
+int init_rust_binderfs(void);
+
+struct dentry;
+struct inode;
+struct dentry *rust_binderfs_create_proc_file(struct inode *nodp, int pid);
+void rust_binderfs_remove_file(struct dentry *dentry);
+
+#endif
diff --git a/drivers/android/binder/rust_binder_events.c b/drivers/android/binder/rust_binder_events.c
new file mode 100644
index 000000000000..488b1470060c
--- /dev/null
+++ b/drivers/android/binder/rust_binder_events.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* rust_binder_events.c
+ *
+ * Rust Binder tracepoints.
+ *
+ * Copyright 2025 Google LLC
+ */
+
+#include "rust_binder.h"
+
+const char * const binder_command_strings[] = {
+ "BC_TRANSACTION",
+ "BC_REPLY",
+ "BC_ACQUIRE_RESULT",
+ "BC_FREE_BUFFER",
+ "BC_INCREFS",
+ "BC_ACQUIRE",
+ "BC_RELEASE",
+ "BC_DECREFS",
+ "BC_INCREFS_DONE",
+ "BC_ACQUIRE_DONE",
+ "BC_ATTEMPT_ACQUIRE",
+ "BC_REGISTER_LOOPER",
+ "BC_ENTER_LOOPER",
+ "BC_EXIT_LOOPER",
+ "BC_REQUEST_DEATH_NOTIFICATION",
+ "BC_CLEAR_DEATH_NOTIFICATION",
+ "BC_DEAD_BINDER_DONE",
+ "BC_TRANSACTION_SG",
+ "BC_REPLY_SG",
+};
+
+const char * const binder_return_strings[] = {
+ "BR_ERROR",
+ "BR_OK",
+ "BR_TRANSACTION",
+ "BR_REPLY",
+ "BR_ACQUIRE_RESULT",
+ "BR_DEAD_REPLY",
+ "BR_TRANSACTION_COMPLETE",
+ "BR_INCREFS",
+ "BR_ACQUIRE",
+ "BR_RELEASE",
+ "BR_DECREFS",
+ "BR_ATTEMPT_ACQUIRE",
+ "BR_NOOP",
+ "BR_SPAWN_LOOPER",
+ "BR_FINISHED",
+ "BR_DEAD_BINDER",
+ "BR_CLEAR_DEATH_NOTIFICATION_DONE",
+ "BR_FAILED_REPLY",
+ "BR_FROZEN_REPLY",
+ "BR_ONEWAY_SPAM_SUSPECT",
+ "BR_TRANSACTION_PENDING_FROZEN"
+};
+
+#define CREATE_TRACE_POINTS
+#define CREATE_RUST_TRACE_POINTS
+#include "rust_binder_events.h"
diff --git a/drivers/android/binder/rust_binder_events.h b/drivers/android/binder/rust_binder_events.h
new file mode 100644
index 000000000000..2f3efbf9dba6
--- /dev/null
+++ b/drivers/android/binder/rust_binder_events.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2025 Google, Inc.
+ */
+
+#undef TRACE_SYSTEM
+#undef TRACE_INCLUDE_FILE
+#undef TRACE_INCLUDE_PATH
+#define TRACE_SYSTEM rust_binder
+#define TRACE_INCLUDE_FILE rust_binder_events
+#define TRACE_INCLUDE_PATH ../drivers/android/binder
+
+#if !defined(_RUST_BINDER_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _RUST_BINDER_TRACE_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(rust_binder_ioctl,
+ TP_PROTO(unsigned int cmd, unsigned long arg),
+ TP_ARGS(cmd, arg),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, cmd)
+ __field(unsigned long, arg)
+ ),
+ TP_fast_assign(
+ __entry->cmd = cmd;
+ __entry->arg = arg;
+ ),
+ TP_printk("cmd=0x%x arg=0x%lx", __entry->cmd, __entry->arg)
+);
+
+#endif /* _RUST_BINDER_TRACE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/android/binder/rust_binder_internal.h b/drivers/android/binder/rust_binder_internal.h
new file mode 100644
index 000000000000..78288fe7964d
--- /dev/null
+++ b/drivers/android/binder/rust_binder_internal.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* rust_binder_internal.h
+ *
+ * This file contains internal data structures used by Rust Binder. Mostly,
+ * these are type definitions used only by binderfs or things that Rust Binder
+ * define and export to binderfs.
+ *
+ * It does not include things exported by binderfs to Rust Binder since this
+ * file is not included as input to bindgen.
+ *
+ * Copyright (C) 2025 Google LLC.
+ */
+
+#ifndef _LINUX_RUST_BINDER_INTERNAL_H
+#define _LINUX_RUST_BINDER_INTERNAL_H
+
+#define RUST_BINDERFS_SUPER_MAGIC 0x6c6f6f71
+
+#include <linux/seq_file.h>
+#include <uapi/linux/android/binder.h>
+#include <uapi/linux/android/binderfs.h>
+
+/*
+ * The internal data types in the Rust Binder driver are opaque to C, so we use
+ * void pointer typedefs for these types.
+ */
+typedef void *rust_binder_context;
+
+/**
+ * struct binder_device - information about a binder device node
+ * @minor: the minor number used by this device
+ * @ctx: the Rust Context used by this device, or null for binder-control
+ *
+ * This is used as the private data for files directly in binderfs, but not
+ * files in the binder_logs subdirectory. This struct owns a refcount on `ctx`
+ * and the entry for `minor` in `binderfs_minors`. For binder-control `ctx` is
+ * null.
+ */
+struct binder_device {
+ int minor;
+ rust_binder_context ctx;
+};
+
+int rust_binder_stats_show(struct seq_file *m, void *unused);
+int rust_binder_state_show(struct seq_file *m, void *unused);
+int rust_binder_transactions_show(struct seq_file *m, void *unused);
+int rust_binder_proc_show(struct seq_file *m, void *pid);
+
+extern const struct file_operations rust_binder_fops;
+rust_binder_context rust_binder_new_context(char *name);
+void rust_binder_remove_context(rust_binder_context device);
+
+/**
+ * binderfs_mount_opts - mount options for binderfs
+ * @max: maximum number of allocatable binderfs binder devices
+ * @stats_mode: enable binder stats in binderfs.
+ */
+struct binderfs_mount_opts {
+ int max;
+ int stats_mode;
+};
+
+/**
+ * binderfs_info - information about a binderfs mount
+ * @ipc_ns: The ipc namespace the binderfs mount belongs to.
+ * @control_dentry: This records the dentry of this binderfs mount
+ * binder-control device.
+ * @root_uid: uid that needs to be used when a new binder device is
+ * created.
+ * @root_gid: gid that needs to be used when a new binder device is
+ * created.
+ * @mount_opts: The mount options in use.
+ * @device_count: The current number of allocated binder devices.
+ * @proc_log_dir: Pointer to the directory dentry containing process-specific
+ * logs.
+ */
+struct binderfs_info {
+ struct ipc_namespace *ipc_ns;
+ struct dentry *control_dentry;
+ kuid_t root_uid;
+ kgid_t root_gid;
+ struct binderfs_mount_opts mount_opts;
+ int device_count;
+ struct dentry *proc_log_dir;
+};
+
+#endif /* _LINUX_RUST_BINDER_INTERNAL_H */
diff --git a/drivers/android/binder/rust_binder_main.rs b/drivers/android/binder/rust_binder_main.rs
new file mode 100644
index 000000000000..6773b7c273ec
--- /dev/null
+++ b/drivers/android/binder/rust_binder_main.rs
@@ -0,0 +1,627 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+//! Binder -- the Android IPC mechanism.
+#![recursion_limit = "256"]
+#![allow(
+ clippy::as_underscore,
+ clippy::ref_as_ptr,
+ clippy::ptr_as_ptr,
+ clippy::cast_lossless
+)]
+
+use kernel::{
+ bindings::{self, seq_file},
+ fs::File,
+ list::{ListArc, ListArcSafe, ListLinksSelfPtr, TryNewListArc},
+ prelude::*,
+ seq_file::SeqFile,
+ seq_print,
+ sync::poll::PollTable,
+ sync::Arc,
+ task::Pid,
+ transmute::AsBytes,
+ types::ForeignOwnable,
+ uaccess::UserSliceWriter,
+};
+
+use crate::{context::Context, page_range::Shrinker, process::Process, thread::Thread};
+
+use core::{
+ ptr::NonNull,
+ sync::atomic::{AtomicBool, AtomicUsize, Ordering},
+};
+
+mod allocation;
+mod context;
+mod deferred_close;
+mod defs;
+mod error;
+mod node;
+mod page_range;
+mod process;
+mod range_alloc;
+mod stats;
+mod thread;
+mod trace;
+mod transaction;
+
+#[allow(warnings)] // generated bindgen code
+mod binderfs {
+ use kernel::bindings::{dentry, inode};
+
+ extern "C" {
+ pub fn init_rust_binderfs() -> kernel::ffi::c_int;
+ }
+ extern "C" {
+ pub fn rust_binderfs_create_proc_file(
+ nodp: *mut inode,
+ pid: kernel::ffi::c_int,
+ ) -> *mut dentry;
+ }
+ extern "C" {
+ pub fn rust_binderfs_remove_file(dentry: *mut dentry);
+ }
+ pub type rust_binder_context = *mut kernel::ffi::c_void;
+ #[repr(C)]
+ #[derive(Copy, Clone)]
+ pub struct binder_device {
+ pub minor: kernel::ffi::c_int,
+ pub ctx: rust_binder_context,
+ }
+ impl Default for binder_device {
+ fn default() -> Self {
+ let mut s = ::core::mem::MaybeUninit::<Self>::uninit();
+ unsafe {
+ ::core::ptr::write_bytes(s.as_mut_ptr(), 0, 1);
+ s.assume_init()
+ }
+ }
+ }
+}
+
+module! {
+ type: BinderModule,
+ name: "rust_binder",
+ authors: ["Wedson Almeida Filho", "Alice Ryhl"],
+ description: "Android Binder",
+ license: "GPL",
+}
+
+fn next_debug_id() -> usize {
+ static NEXT_DEBUG_ID: AtomicUsize = AtomicUsize::new(0);
+
+ NEXT_DEBUG_ID.fetch_add(1, Ordering::Relaxed)
+}
+
+/// Provides a single place to write Binder return values via the
+/// supplied `UserSliceWriter`.
+pub(crate) struct BinderReturnWriter<'a> {
+ writer: UserSliceWriter,
+ thread: &'a Thread,
+}
+
+impl<'a> BinderReturnWriter<'a> {
+ fn new(writer: UserSliceWriter, thread: &'a Thread) -> Self {
+ BinderReturnWriter { writer, thread }
+ }
+
+ /// Write a return code back to user space.
+ /// Should be a `BR_` constant from [`defs`] e.g. [`defs::BR_TRANSACTION_COMPLETE`].
+ fn write_code(&mut self, code: u32) -> Result {
+ stats::GLOBAL_STATS.inc_br(code);
+ self.thread.process.stats.inc_br(code);
+ self.writer.write(&code)
+ }
+
+ /// Write something *other than* a return code to user space.
+ fn write_payload<T: AsBytes>(&mut self, payload: &T) -> Result {
+ self.writer.write(payload)
+ }
+
+ fn len(&self) -> usize {
+ self.writer.len()
+ }
+}
+
+/// Specifies how a type should be delivered to the read part of a BINDER_WRITE_READ ioctl.
+///
+/// When a value is pushed to the todo list for a process or thread, it is stored as a trait object
+/// with the type `Arc<dyn DeliverToRead>`. Trait objects are a Rust feature that lets you
+/// implement dynamic dispatch over many different types. This lets us store many different types
+/// in the todo list.
+trait DeliverToRead: ListArcSafe + Send + Sync {
+ /// Performs work. Returns true if remaining work items in the queue should be processed
+ /// immediately, or false if it should return to caller before processing additional work
+ /// items.
+ fn do_work(
+ self: DArc<Self>,
+ thread: &Thread,
+ writer: &mut BinderReturnWriter<'_>,
+ ) -> Result<bool>;
+
+ /// Cancels the given work item. This is called instead of [`DeliverToRead::do_work`] when work
+ /// won't be delivered.
+ fn cancel(self: DArc<Self>);
+
+ /// Should we use `wake_up_interruptible_sync` or `wake_up_interruptible` when scheduling this
+ /// work item?
+ ///
+ /// Generally only set to true for non-oneway transactions.
+ fn should_sync_wakeup(&self) -> bool;
+
+ fn debug_print(&self, m: &SeqFile, prefix: &str, transaction_prefix: &str) -> Result<()>;
+}
+
+// Wrapper around a `DeliverToRead` with linked list links.
+#[pin_data]
+struct DTRWrap<T: ?Sized> {
+ #[pin]
+ links: ListLinksSelfPtr<DTRWrap<dyn DeliverToRead>>,
+ #[pin]
+ wrapped: T,
+}
+kernel::list::impl_list_arc_safe! {
+ impl{T: ListArcSafe + ?Sized} ListArcSafe<0> for DTRWrap<T> {
+ tracked_by wrapped: T;
+ }
+}
+kernel::list::impl_list_item! {
+ impl ListItem<0> for DTRWrap<dyn DeliverToRead> {
+ using ListLinksSelfPtr { self.links };
+ }
+}
+
+impl<T: ?Sized> core::ops::Deref for DTRWrap<T> {
+ type Target = T;
+ fn deref(&self) -> &T {
+ &self.wrapped
+ }
+}
+
+type DArc<T> = kernel::sync::Arc<DTRWrap<T>>;
+type DLArc<T> = kernel::list::ListArc<DTRWrap<T>>;
+
+impl<T: ListArcSafe> DTRWrap<T> {
+ fn new(val: impl PinInit<T>) -> impl PinInit<Self> {
+ pin_init!(Self {
+ links <- ListLinksSelfPtr::new(),
+ wrapped <- val,
+ })
+ }
+
+ fn arc_try_new(val: T) -> Result<DLArc<T>, kernel::alloc::AllocError> {
+ ListArc::pin_init(
+ try_pin_init!(Self {
+ links <- ListLinksSelfPtr::new(),
+ wrapped: val,
+ }),
+ GFP_KERNEL,
+ )
+ .map_err(|_| kernel::alloc::AllocError)
+ }
+
+ fn arc_pin_init(init: impl PinInit<T>) -> Result<DLArc<T>, kernel::error::Error> {
+ ListArc::pin_init(
+ try_pin_init!(Self {
+ links <- ListLinksSelfPtr::new(),
+ wrapped <- init,
+ }),
+ GFP_KERNEL,
+ )
+ }
+}
+
+struct DeliverCode {
+ code: u32,
+ skip: AtomicBool,
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<0> for DeliverCode { untracked; }
+}
+
+impl DeliverCode {
+ fn new(code: u32) -> Self {
+ Self {
+ code,
+ skip: AtomicBool::new(false),
+ }
+ }
+
+ /// Disable this DeliverCode and make it do nothing.
+ ///
+ /// This is used instead of removing it from the work list, since `LinkedList::remove` is
+ /// unsafe, whereas this method is not.
+ fn skip(&self) {
+ self.skip.store(true, Ordering::Relaxed);
+ }
+}
+
+impl DeliverToRead for DeliverCode {
+ fn do_work(
+ self: DArc<Self>,
+ _thread: &Thread,
+ writer: &mut BinderReturnWriter<'_>,
+ ) -> Result<bool> {
+ if !self.skip.load(Ordering::Relaxed) {
+ writer.write_code(self.code)?;
+ }
+ Ok(true)
+ }
+
+ fn cancel(self: DArc<Self>) {}
+
+ fn should_sync_wakeup(&self) -> bool {
+ false
+ }
+
+ fn debug_print(&self, m: &SeqFile, prefix: &str, _tprefix: &str) -> Result<()> {
+ seq_print!(m, "{}", prefix);
+ if self.skip.load(Ordering::Relaxed) {
+ seq_print!(m, "(skipped) ");
+ }
+ if self.code == defs::BR_TRANSACTION_COMPLETE {
+ seq_print!(m, "transaction complete\n");
+ } else {
+ seq_print!(m, "transaction error: {}\n", self.code);
+ }
+ Ok(())
+ }
+}
+
+fn ptr_align(value: usize) -> Option<usize> {
+ let size = core::mem::size_of::<usize>() - 1;
+ Some(value.checked_add(size)? & !size)
+}
+
+// SAFETY: We call register in `init`.
+static BINDER_SHRINKER: Shrinker = unsafe { Shrinker::new() };
+
+struct BinderModule {}
+
+impl kernel::Module for BinderModule {
+ fn init(_module: &'static kernel::ThisModule) -> Result<Self> {
+ // SAFETY: The module initializer never runs twice, so we only call this once.
+ unsafe { crate::context::CONTEXTS.init() };
+
+ pr_warn!("Loaded Rust Binder.");
+
+ BINDER_SHRINKER.register(kernel::c_str!("android-binder"))?;
+
+ // SAFETY: The module is being loaded, so we can initialize binderfs.
+ unsafe { kernel::error::to_result(binderfs::init_rust_binderfs())? };
+
+ Ok(Self {})
+ }
+}
+
+/// Makes the inner type Sync.
+#[repr(transparent)]
+pub struct AssertSync<T>(T);
+// SAFETY: Used only to insert `file_operations` into a global, which is safe.
+unsafe impl<T> Sync for AssertSync<T> {}
+
+/// File operations that rust_binderfs.c can use.
+#[no_mangle]
+#[used]
+pub static rust_binder_fops: AssertSync<kernel::bindings::file_operations> = {
+ // SAFETY: All zeroes is safe for the `file_operations` type.
+ let zeroed_ops = unsafe { core::mem::MaybeUninit::zeroed().assume_init() };
+
+ let ops = kernel::bindings::file_operations {
+ owner: THIS_MODULE.as_ptr(),
+ poll: Some(rust_binder_poll),
+ unlocked_ioctl: Some(rust_binder_unlocked_ioctl),
+ compat_ioctl: Some(rust_binder_compat_ioctl),
+ mmap: Some(rust_binder_mmap),
+ open: Some(rust_binder_open),
+ release: Some(rust_binder_release),
+ flush: Some(rust_binder_flush),
+ ..zeroed_ops
+ };
+ AssertSync(ops)
+};
+
+/// # Safety
+/// Only called by binderfs.
+#[no_mangle]
+unsafe extern "C" fn rust_binder_new_context(
+ name: *const kernel::ffi::c_char,
+) -> *mut kernel::ffi::c_void {
+ // SAFETY: The caller will always provide a valid c string here.
+ let name = unsafe { kernel::str::CStr::from_char_ptr(name) };
+ match Context::new(name) {
+ Ok(ctx) => Arc::into_foreign(ctx),
+ Err(_err) => core::ptr::null_mut(),
+ }
+}
+
+/// # Safety
+/// Only called by binderfs.
+#[no_mangle]
+unsafe extern "C" fn rust_binder_remove_context(device: *mut kernel::ffi::c_void) {
+ if !device.is_null() {
+ // SAFETY: The caller ensures that the `device` pointer came from a previous call to
+ // `rust_binder_new_device`.
+ let ctx = unsafe { Arc::<Context>::from_foreign(device) };
+ ctx.deregister();
+ drop(ctx);
+ }
+}
+
+/// # Safety
+/// Only called by binderfs.
+unsafe extern "C" fn rust_binder_open(
+ inode: *mut bindings::inode,
+ file_ptr: *mut bindings::file,
+) -> kernel::ffi::c_int {
+ // SAFETY: The `rust_binderfs.c` file ensures that `i_private` is set to a
+ // `struct binder_device`.
+ let device = unsafe { (*inode).i_private } as *const binderfs::binder_device;
+
+ assert!(!device.is_null());
+
+ // SAFETY: The `rust_binderfs.c` file ensures that `device->ctx` holds a binder context when
+ // using the rust binder fops.
+ let ctx = unsafe { Arc::<Context>::borrow((*device).ctx) };
+
+ // SAFETY: The caller provides a valid file pointer to a new `struct file`.
+ let file = unsafe { File::from_raw_file(file_ptr) };
+ let process = match Process::open(ctx, file) {
+ Ok(process) => process,
+ Err(err) => return err.to_errno(),
+ };
+
+ // SAFETY: This is an `inode` for a newly created binder file.
+ match unsafe { BinderfsProcFile::new(inode, process.task.pid()) } {
+ Ok(Some(file)) => process.inner.lock().binderfs_file = Some(file),
+ Ok(None) => { /* pid already exists */ }
+ Err(err) => return err.to_errno(),
+ }
+
+ // SAFETY: This file is associated with Rust binder, so we own the `private_data` field.
+ unsafe { (*file_ptr).private_data = process.into_foreign() };
+ 0
+}
+
+/// # Safety
+/// Only called by binderfs.
+unsafe extern "C" fn rust_binder_release(
+ _inode: *mut bindings::inode,
+ file: *mut bindings::file,
+) -> kernel::ffi::c_int {
+ // SAFETY: We previously set `private_data` in `rust_binder_open`.
+ let process = unsafe { Arc::<Process>::from_foreign((*file).private_data) };
+ // SAFETY: The caller ensures that the file is valid.
+ let file = unsafe { File::from_raw_file(file) };
+ Process::release(process, file);
+ 0
+}
+
+/// # Safety
+/// Only called by binderfs.
+unsafe extern "C" fn rust_binder_compat_ioctl(
+ file: *mut bindings::file,
+ cmd: kernel::ffi::c_uint,
+ arg: kernel::ffi::c_ulong,
+) -> kernel::ffi::c_long {
+ // SAFETY: We previously set `private_data` in `rust_binder_open`.
+ let f = unsafe { Arc::<Process>::borrow((*file).private_data) };
+ // SAFETY: The caller ensures that the file is valid.
+ match Process::compat_ioctl(f, unsafe { File::from_raw_file(file) }, cmd as _, arg as _) {
+ Ok(()) => 0,
+ Err(err) => err.to_errno() as isize,
+ }
+}
+
+/// # Safety
+/// Only called by binderfs.
+unsafe extern "C" fn rust_binder_unlocked_ioctl(
+ file: *mut bindings::file,
+ cmd: kernel::ffi::c_uint,
+ arg: kernel::ffi::c_ulong,
+) -> kernel::ffi::c_long {
+ // SAFETY: We previously set `private_data` in `rust_binder_open`.
+ let f = unsafe { Arc::<Process>::borrow((*file).private_data) };
+ // SAFETY: The caller ensures that the file is valid.
+ match Process::ioctl(f, unsafe { File::from_raw_file(file) }, cmd as _, arg as _) {
+ Ok(()) => 0,
+ Err(err) => err.to_errno() as isize,
+ }
+}
+
+/// # Safety
+/// Only called by binderfs.
+unsafe extern "C" fn rust_binder_mmap(
+ file: *mut bindings::file,
+ vma: *mut bindings::vm_area_struct,
+) -> kernel::ffi::c_int {
+ // SAFETY: We previously set `private_data` in `rust_binder_open`.
+ let f = unsafe { Arc::<Process>::borrow((*file).private_data) };
+ // SAFETY: The caller ensures that the vma is valid.
+ let area = unsafe { kernel::mm::virt::VmaNew::from_raw(vma) };
+ // SAFETY: The caller ensures that the file is valid.
+ match Process::mmap(f, unsafe { File::from_raw_file(file) }, area) {
+ Ok(()) => 0,
+ Err(err) => err.to_errno(),
+ }
+}
+
+/// # Safety
+/// Only called by binderfs.
+unsafe extern "C" fn rust_binder_poll(
+ file: *mut bindings::file,
+ wait: *mut bindings::poll_table_struct,
+) -> bindings::__poll_t {
+ // SAFETY: We previously set `private_data` in `rust_binder_open`.
+ let f = unsafe { Arc::<Process>::borrow((*file).private_data) };
+ // SAFETY: The caller ensures that the file is valid.
+ let fileref = unsafe { File::from_raw_file(file) };
+ // SAFETY: The caller ensures that the `PollTable` is valid.
+ match Process::poll(f, fileref, unsafe { PollTable::from_raw(wait) }) {
+ Ok(v) => v,
+ Err(_) => bindings::POLLERR,
+ }
+}
+
+/// # Safety
+/// Only called by binderfs.
+unsafe extern "C" fn rust_binder_flush(
+ file: *mut bindings::file,
+ _id: bindings::fl_owner_t,
+) -> kernel::ffi::c_int {
+ // SAFETY: We previously set `private_data` in `rust_binder_open`.
+ let f = unsafe { Arc::<Process>::borrow((*file).private_data) };
+ match Process::flush(f) {
+ Ok(()) => 0,
+ Err(err) => err.to_errno(),
+ }
+}
+
+/// # Safety
+/// Only called by binderfs.
+#[no_mangle]
+unsafe extern "C" fn rust_binder_stats_show(
+ ptr: *mut seq_file,
+ _: *mut kernel::ffi::c_void,
+) -> kernel::ffi::c_int {
+ // SAFETY: The caller ensures that the pointer is valid and exclusive for the duration in which
+ // this method is called.
+ let m = unsafe { SeqFile::from_raw(ptr) };
+ if let Err(err) = rust_binder_stats_show_impl(m) {
+ seq_print!(m, "failed to generate state: {:?}\n", err);
+ }
+ 0
+}
+
+/// # Safety
+/// Only called by binderfs.
+#[no_mangle]
+unsafe extern "C" fn rust_binder_state_show(
+ ptr: *mut seq_file,
+ _: *mut kernel::ffi::c_void,
+) -> kernel::ffi::c_int {
+ // SAFETY: The caller ensures that the pointer is valid and exclusive for the duration in which
+ // this method is called.
+ let m = unsafe { SeqFile::from_raw(ptr) };
+ if let Err(err) = rust_binder_state_show_impl(m) {
+ seq_print!(m, "failed to generate state: {:?}\n", err);
+ }
+ 0
+}
+
+/// # Safety
+/// Only called by binderfs.
+#[no_mangle]
+unsafe extern "C" fn rust_binder_proc_show(
+ ptr: *mut seq_file,
+ _: *mut kernel::ffi::c_void,
+) -> kernel::ffi::c_int {
+ // SAFETY: Accessing the private field of `seq_file` is okay.
+ let pid = (unsafe { (*ptr).private }) as usize as Pid;
+ // SAFETY: The caller ensures that the pointer is valid and exclusive for the duration in which
+ // this method is called.
+ let m = unsafe { SeqFile::from_raw(ptr) };
+ if let Err(err) = rust_binder_proc_show_impl(m, pid) {
+ seq_print!(m, "failed to generate state: {:?}\n", err);
+ }
+ 0
+}
+
+/// # Safety
+/// Only called by binderfs.
+#[no_mangle]
+unsafe extern "C" fn rust_binder_transactions_show(
+ ptr: *mut seq_file,
+ _: *mut kernel::ffi::c_void,
+) -> kernel::ffi::c_int {
+ // SAFETY: The caller ensures that the pointer is valid and exclusive for the duration in which
+ // this method is called.
+ let m = unsafe { SeqFile::from_raw(ptr) };
+ if let Err(err) = rust_binder_transactions_show_impl(m) {
+ seq_print!(m, "failed to generate state: {:?}\n", err);
+ }
+ 0
+}
+
+fn rust_binder_transactions_show_impl(m: &SeqFile) -> Result<()> {
+ seq_print!(m, "binder transactions:\n");
+ let contexts = context::get_all_contexts()?;
+ for ctx in contexts {
+ let procs = ctx.get_all_procs()?;
+ for proc in procs {
+ proc.debug_print(m, &ctx, false)?;
+ seq_print!(m, "\n");
+ }
+ }
+ Ok(())
+}
+
+fn rust_binder_stats_show_impl(m: &SeqFile) -> Result<()> {
+ seq_print!(m, "binder stats:\n");
+ stats::GLOBAL_STATS.debug_print("", m);
+ let contexts = context::get_all_contexts()?;
+ for ctx in contexts {
+ let procs = ctx.get_all_procs()?;
+ for proc in procs {
+ proc.debug_print_stats(m, &ctx)?;
+ seq_print!(m, "\n");
+ }
+ }
+ Ok(())
+}
+
+fn rust_binder_state_show_impl(m: &SeqFile) -> Result<()> {
+ seq_print!(m, "binder state:\n");
+ let contexts = context::get_all_contexts()?;
+ for ctx in contexts {
+ let procs = ctx.get_all_procs()?;
+ for proc in procs {
+ proc.debug_print(m, &ctx, true)?;
+ seq_print!(m, "\n");
+ }
+ }
+ Ok(())
+}
+
+fn rust_binder_proc_show_impl(m: &SeqFile, pid: Pid) -> Result<()> {
+ seq_print!(m, "binder proc state:\n");
+ let contexts = context::get_all_contexts()?;
+ for ctx in contexts {
+ let procs = ctx.get_procs_with_pid(pid)?;
+ for proc in procs {
+ proc.debug_print(m, &ctx, true)?;
+ seq_print!(m, "\n");
+ }
+ }
+ Ok(())
+}
+
+struct BinderfsProcFile(NonNull<bindings::dentry>);
+
+// SAFETY: Safe to drop any thread.
+unsafe impl Send for BinderfsProcFile {}
+
+impl BinderfsProcFile {
+ /// # Safety
+ ///
+ /// Takes an inode from a newly created binder file.
+ unsafe fn new(nodp: *mut bindings::inode, pid: i32) -> Result<Option<Self>> {
+ // SAFETY: The caller passes an `inode` for a newly created binder file.
+ let dentry = unsafe { binderfs::rust_binderfs_create_proc_file(nodp, pid) };
+ match kernel::error::from_err_ptr(dentry) {
+ Ok(dentry) => Ok(NonNull::new(dentry).map(Self)),
+ Err(err) if err == EEXIST => Ok(None),
+ Err(err) => Err(err),
+ }
+ }
+}
+
+impl Drop for BinderfsProcFile {
+ fn drop(&mut self) {
+ // SAFETY: This is a dentry from `rust_binderfs_remove_file` that has not been deleted yet.
+ unsafe { binderfs::rust_binderfs_remove_file(self.0.as_ptr()) };
+ }
+}
diff --git a/drivers/android/binder/rust_binderfs.c b/drivers/android/binder/rust_binderfs.c
new file mode 100644
index 000000000000..6b497146b698
--- /dev/null
+++ b/drivers/android/binder/rust_binderfs.c
@@ -0,0 +1,850 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/compiler_types.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/fsnotify.h>
+#include <linux/gfp.h>
+#include <linux/idr.h>
+#include <linux/init.h>
+#include <linux/ipc_namespace.h>
+#include <linux/kdev_t.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/namei.h>
+#include <linux/magic.h>
+#include <linux/major.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/mount.h>
+#include <linux/fs_parser.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/spinlock_types.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/user_namespace.h>
+#include <linux/xarray.h>
+#include <uapi/asm-generic/errno-base.h>
+#include <uapi/linux/android/binder.h>
+#include <uapi/linux/android/binderfs.h>
+
+#include "rust_binder.h"
+#include "rust_binder_internal.h"
+
+#define FIRST_INODE 1
+#define SECOND_INODE 2
+#define INODE_OFFSET 3
+#define BINDERFS_MAX_MINOR (1U << MINORBITS)
+/* Ensure that the initial ipc namespace always has devices available. */
+#define BINDERFS_MAX_MINOR_CAPPED (BINDERFS_MAX_MINOR - 4)
+
+DEFINE_SHOW_ATTRIBUTE(rust_binder_stats);
+DEFINE_SHOW_ATTRIBUTE(rust_binder_state);
+DEFINE_SHOW_ATTRIBUTE(rust_binder_transactions);
+DEFINE_SHOW_ATTRIBUTE(rust_binder_proc);
+
+char *rust_binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
+module_param_named(rust_devices, rust_binder_devices_param, charp, 0444);
+
+static dev_t binderfs_dev;
+static DEFINE_MUTEX(binderfs_minors_mutex);
+static DEFINE_IDA(binderfs_minors);
+
+enum binderfs_param {
+ Opt_max,
+ Opt_stats_mode,
+};
+
+enum binderfs_stats_mode {
+ binderfs_stats_mode_unset,
+ binderfs_stats_mode_global,
+};
+
+struct binder_features {
+ bool oneway_spam_detection;
+ bool extended_error;
+ bool freeze_notification;
+};
+
+static const struct constant_table binderfs_param_stats[] = {
+ { "global", binderfs_stats_mode_global },
+ {}
+};
+
+static const struct fs_parameter_spec binderfs_fs_parameters[] = {
+ fsparam_u32("max", Opt_max),
+ fsparam_enum("stats", Opt_stats_mode, binderfs_param_stats),
+ {}
+};
+
+static struct binder_features binder_features = {
+ .oneway_spam_detection = true,
+ .extended_error = true,
+ .freeze_notification = true,
+};
+
+static inline struct binderfs_info *BINDERFS_SB(const struct super_block *sb)
+{
+ return sb->s_fs_info;
+}
+
+/**
+ * binderfs_binder_device_create - allocate inode from super block of a
+ * binderfs mount
+ * @ref_inode: inode from wich the super block will be taken
+ * @userp: buffer to copy information about new device for userspace to
+ * @req: struct binderfs_device as copied from userspace
+ *
+ * This function allocates a new binder_device and reserves a new minor
+ * number for it.
+ * Minor numbers are limited and tracked globally in binderfs_minors. The
+ * function will stash a struct binder_device for the specific binder
+ * device in i_private of the inode.
+ * It will go on to allocate a new inode from the super block of the
+ * filesystem mount, stash a struct binder_device in its i_private field
+ * and attach a dentry to that inode.
+ *
+ * Return: 0 on success, negative errno on failure
+ */
+static int binderfs_binder_device_create(struct inode *ref_inode,
+ struct binderfs_device __user *userp,
+ struct binderfs_device *req)
+{
+ int minor, ret;
+ struct dentry *dentry, *root;
+ struct binder_device *device = NULL;
+ rust_binder_context ctx = NULL;
+ struct inode *inode = NULL;
+ struct super_block *sb = ref_inode->i_sb;
+ struct binderfs_info *info = sb->s_fs_info;
+#if defined(CONFIG_IPC_NS)
+ bool use_reserve = (info->ipc_ns == &init_ipc_ns);
+#else
+ bool use_reserve = true;
+#endif
+
+ /* Reserve new minor number for the new device. */
+ mutex_lock(&binderfs_minors_mutex);
+ if (++info->device_count <= info->mount_opts.max)
+ minor = ida_alloc_max(&binderfs_minors,
+ use_reserve ? BINDERFS_MAX_MINOR :
+ BINDERFS_MAX_MINOR_CAPPED,
+ GFP_KERNEL);
+ else
+ minor = -ENOSPC;
+ if (minor < 0) {
+ --info->device_count;
+ mutex_unlock(&binderfs_minors_mutex);
+ return minor;
+ }
+ mutex_unlock(&binderfs_minors_mutex);
+
+ ret = -ENOMEM;
+ device = kzalloc(sizeof(*device), GFP_KERNEL);
+ if (!device)
+ goto err;
+
+ req->name[BINDERFS_MAX_NAME] = '\0'; /* NUL-terminate */
+
+ ctx = rust_binder_new_context(req->name);
+ if (!ctx)
+ goto err;
+
+ inode = new_inode(sb);
+ if (!inode)
+ goto err;
+
+ inode->i_ino = minor + INODE_OFFSET;
+ simple_inode_init_ts(inode);
+ init_special_inode(inode, S_IFCHR | 0600,
+ MKDEV(MAJOR(binderfs_dev), minor));
+ inode->i_fop = &rust_binder_fops;
+ inode->i_uid = info->root_uid;
+ inode->i_gid = info->root_gid;
+
+ req->major = MAJOR(binderfs_dev);
+ req->minor = minor;
+ device->ctx = ctx;
+ device->minor = minor;
+
+ if (userp && copy_to_user(userp, req, sizeof(*req))) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ root = sb->s_root;
+ inode_lock(d_inode(root));
+
+ /* look it up */
+ dentry = lookup_noperm(&QSTR(req->name), root);
+ if (IS_ERR(dentry)) {
+ inode_unlock(d_inode(root));
+ ret = PTR_ERR(dentry);
+ goto err;
+ }
+
+ if (d_really_is_positive(dentry)) {
+ /* already exists */
+ dput(dentry);
+ inode_unlock(d_inode(root));
+ ret = -EEXIST;
+ goto err;
+ }
+
+ inode->i_private = device;
+ d_instantiate(dentry, inode);
+ fsnotify_create(root->d_inode, dentry);
+ inode_unlock(d_inode(root));
+
+ return 0;
+
+err:
+ kfree(device);
+ rust_binder_remove_context(ctx);
+ mutex_lock(&binderfs_minors_mutex);
+ --info->device_count;
+ ida_free(&binderfs_minors, minor);
+ mutex_unlock(&binderfs_minors_mutex);
+ iput(inode);
+
+ return ret;
+}
+
+/**
+ * binder_ctl_ioctl - handle binder device node allocation requests
+ *
+ * The request handler for the binder-control device. All requests operate on
+ * the binderfs mount the binder-control device resides in:
+ * - BINDER_CTL_ADD
+ * Allocate a new binder device.
+ *
+ * Return: %0 on success, negative errno on failure.
+ */
+static long binder_ctl_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret = -EINVAL;
+ struct inode *inode = file_inode(file);
+ struct binderfs_device __user *device = (struct binderfs_device __user *)arg;
+ struct binderfs_device device_req;
+
+ switch (cmd) {
+ case BINDER_CTL_ADD:
+ ret = copy_from_user(&device_req, device, sizeof(device_req));
+ if (ret) {
+ ret = -EFAULT;
+ break;
+ }
+
+ ret = binderfs_binder_device_create(inode, device, &device_req);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static void binderfs_evict_inode(struct inode *inode)
+{
+ struct binder_device *device = inode->i_private;
+ struct binderfs_info *info = BINDERFS_SB(inode->i_sb);
+
+ clear_inode(inode);
+
+ if (!S_ISCHR(inode->i_mode) || !device)
+ return;
+
+ mutex_lock(&binderfs_minors_mutex);
+ --info->device_count;
+ ida_free(&binderfs_minors, device->minor);
+ mutex_unlock(&binderfs_minors_mutex);
+
+ /* ctx is null for binder-control, but this function ignores null pointers */
+ rust_binder_remove_context(device->ctx);
+
+ kfree(device);
+}
+
+static int binderfs_fs_context_parse_param(struct fs_context *fc,
+ struct fs_parameter *param)
+{
+ int opt;
+ struct binderfs_mount_opts *ctx = fc->fs_private;
+ struct fs_parse_result result;
+
+ opt = fs_parse(fc, binderfs_fs_parameters, param, &result);
+ if (opt < 0)
+ return opt;
+
+ switch (opt) {
+ case Opt_max:
+ if (result.uint_32 > BINDERFS_MAX_MINOR)
+ return invalfc(fc, "Bad value for '%s'", param->key);
+
+ ctx->max = result.uint_32;
+ break;
+ case Opt_stats_mode:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ ctx->stats_mode = result.uint_32;
+ break;
+ default:
+ return invalfc(fc, "Unsupported parameter '%s'", param->key);
+ }
+
+ return 0;
+}
+
+static int binderfs_fs_context_reconfigure(struct fs_context *fc)
+{
+ struct binderfs_mount_opts *ctx = fc->fs_private;
+ struct binderfs_info *info = BINDERFS_SB(fc->root->d_sb);
+
+ if (info->mount_opts.stats_mode != ctx->stats_mode)
+ return invalfc(fc, "Binderfs stats mode cannot be changed during a remount");
+
+ info->mount_opts.stats_mode = ctx->stats_mode;
+ info->mount_opts.max = ctx->max;
+ return 0;
+}
+
+static int binderfs_show_options(struct seq_file *seq, struct dentry *root)
+{
+ struct binderfs_info *info = BINDERFS_SB(root->d_sb);
+
+ if (info->mount_opts.max <= BINDERFS_MAX_MINOR)
+ seq_printf(seq, ",max=%d", info->mount_opts.max);
+
+ switch (info->mount_opts.stats_mode) {
+ case binderfs_stats_mode_unset:
+ break;
+ case binderfs_stats_mode_global:
+ seq_puts(seq, ",stats=global");
+ break;
+ }
+
+ return 0;
+}
+
+static const struct super_operations binderfs_super_ops = {
+ .evict_inode = binderfs_evict_inode,
+ .show_options = binderfs_show_options,
+ .statfs = simple_statfs,
+};
+
+static inline bool is_binderfs_control_device(const struct dentry *dentry)
+{
+ struct binderfs_info *info = dentry->d_sb->s_fs_info;
+
+ return info->control_dentry == dentry;
+}
+
+static int binderfs_rename(struct mnt_idmap *idmap,
+ struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry,
+ unsigned int flags)
+{
+ if (is_binderfs_control_device(old_dentry) ||
+ is_binderfs_control_device(new_dentry))
+ return -EPERM;
+
+ return simple_rename(idmap, old_dir, old_dentry, new_dir,
+ new_dentry, flags);
+}
+
+static int binderfs_unlink(struct inode *dir, struct dentry *dentry)
+{
+ if (is_binderfs_control_device(dentry))
+ return -EPERM;
+
+ return simple_unlink(dir, dentry);
+}
+
+static const struct file_operations binder_ctl_fops = {
+ .owner = THIS_MODULE,
+ .open = nonseekable_open,
+ .unlocked_ioctl = binder_ctl_ioctl,
+ .compat_ioctl = binder_ctl_ioctl,
+ .llseek = noop_llseek,
+};
+
+/**
+ * binderfs_binder_ctl_create - create a new binder-control device
+ * @sb: super block of the binderfs mount
+ *
+ * This function creates a new binder-control device node in the binderfs mount
+ * referred to by @sb.
+ *
+ * Return: 0 on success, negative errno on failure
+ */
+static int binderfs_binder_ctl_create(struct super_block *sb)
+{
+ int minor, ret;
+ struct dentry *dentry;
+ struct binder_device *device;
+ struct inode *inode = NULL;
+ struct dentry *root = sb->s_root;
+ struct binderfs_info *info = sb->s_fs_info;
+#if defined(CONFIG_IPC_NS)
+ bool use_reserve = (info->ipc_ns == &init_ipc_ns);
+#else
+ bool use_reserve = true;
+#endif
+
+ device = kzalloc(sizeof(*device), GFP_KERNEL);
+ if (!device)
+ return -ENOMEM;
+
+ /* If we have already created a binder-control node, return. */
+ if (info->control_dentry) {
+ ret = 0;
+ goto out;
+ }
+
+ ret = -ENOMEM;
+ inode = new_inode(sb);
+ if (!inode)
+ goto out;
+
+ /* Reserve a new minor number for the new device. */
+ mutex_lock(&binderfs_minors_mutex);
+ minor = ida_alloc_max(&binderfs_minors,
+ use_reserve ? BINDERFS_MAX_MINOR :
+ BINDERFS_MAX_MINOR_CAPPED,
+ GFP_KERNEL);
+ mutex_unlock(&binderfs_minors_mutex);
+ if (minor < 0) {
+ ret = minor;
+ goto out;
+ }
+
+ inode->i_ino = SECOND_INODE;
+ simple_inode_init_ts(inode);
+ init_special_inode(inode, S_IFCHR | 0600,
+ MKDEV(MAJOR(binderfs_dev), minor));
+ inode->i_fop = &binder_ctl_fops;
+ inode->i_uid = info->root_uid;
+ inode->i_gid = info->root_gid;
+
+ device->minor = minor;
+ device->ctx = NULL;
+
+ dentry = d_alloc_name(root, "binder-control");
+ if (!dentry)
+ goto out;
+
+ inode->i_private = device;
+ info->control_dentry = dentry;
+ d_add(dentry, inode);
+
+ return 0;
+
+out:
+ kfree(device);
+ iput(inode);
+
+ return ret;
+}
+
+static const struct inode_operations binderfs_dir_inode_operations = {
+ .lookup = simple_lookup,
+ .rename = binderfs_rename,
+ .unlink = binderfs_unlink,
+};
+
+static struct inode *binderfs_make_inode(struct super_block *sb, int mode)
+{
+ struct inode *ret;
+
+ ret = new_inode(sb);
+ if (ret) {
+ ret->i_ino = iunique(sb, BINDERFS_MAX_MINOR + INODE_OFFSET);
+ ret->i_mode = mode;
+ simple_inode_init_ts(ret);
+ }
+ return ret;
+}
+
+static struct dentry *binderfs_create_dentry(struct dentry *parent,
+ const char *name)
+{
+ struct dentry *dentry;
+
+ dentry = lookup_noperm(&QSTR(name), parent);
+ if (IS_ERR(dentry))
+ return dentry;
+
+ /* Return error if the file/dir already exists. */
+ if (d_really_is_positive(dentry)) {
+ dput(dentry);
+ return ERR_PTR(-EEXIST);
+ }
+
+ return dentry;
+}
+
+void rust_binderfs_remove_file(struct dentry *dentry)
+{
+ struct inode *parent_inode;
+
+ parent_inode = d_inode(dentry->d_parent);
+ inode_lock(parent_inode);
+ if (simple_positive(dentry)) {
+ dget(dentry);
+ simple_unlink(parent_inode, dentry);
+ d_delete(dentry);
+ dput(dentry);
+ }
+ inode_unlock(parent_inode);
+}
+
+static struct dentry *rust_binderfs_create_file(struct dentry *parent, const char *name,
+ const struct file_operations *fops,
+ void *data)
+{
+ struct dentry *dentry;
+ struct inode *new_inode, *parent_inode;
+ struct super_block *sb;
+
+ parent_inode = d_inode(parent);
+ inode_lock(parent_inode);
+
+ dentry = binderfs_create_dentry(parent, name);
+ if (IS_ERR(dentry))
+ goto out;
+
+ sb = parent_inode->i_sb;
+ new_inode = binderfs_make_inode(sb, S_IFREG | 0444);
+ if (!new_inode) {
+ dput(dentry);
+ dentry = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ new_inode->i_fop = fops;
+ new_inode->i_private = data;
+ d_instantiate(dentry, new_inode);
+ fsnotify_create(parent_inode, dentry);
+
+out:
+ inode_unlock(parent_inode);
+ return dentry;
+}
+
+struct dentry *rust_binderfs_create_proc_file(struct inode *nodp, int pid)
+{
+ struct binderfs_info *info = nodp->i_sb->s_fs_info;
+ struct dentry *dir = info->proc_log_dir;
+ char strbuf[20 + 1];
+ void *data = (void *)(unsigned long) pid;
+
+ if (!dir)
+ return NULL;
+
+ snprintf(strbuf, sizeof(strbuf), "%u", pid);
+ return rust_binderfs_create_file(dir, strbuf, &rust_binder_proc_fops, data);
+}
+
+static struct dentry *binderfs_create_dir(struct dentry *parent,
+ const char *name)
+{
+ struct dentry *dentry;
+ struct inode *new_inode, *parent_inode;
+ struct super_block *sb;
+
+ parent_inode = d_inode(parent);
+ inode_lock(parent_inode);
+
+ dentry = binderfs_create_dentry(parent, name);
+ if (IS_ERR(dentry))
+ goto out;
+
+ sb = parent_inode->i_sb;
+ new_inode = binderfs_make_inode(sb, S_IFDIR | 0755);
+ if (!new_inode) {
+ dput(dentry);
+ dentry = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ new_inode->i_fop = &simple_dir_operations;
+ new_inode->i_op = &simple_dir_inode_operations;
+
+ set_nlink(new_inode, 2);
+ d_instantiate(dentry, new_inode);
+ inc_nlink(parent_inode);
+ fsnotify_mkdir(parent_inode, dentry);
+
+out:
+ inode_unlock(parent_inode);
+ return dentry;
+}
+
+static int binder_features_show(struct seq_file *m, void *unused)
+{
+ bool *feature = m->private;
+
+ seq_printf(m, "%d\n", *feature);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(binder_features);
+
+static int init_binder_features(struct super_block *sb)
+{
+ struct dentry *dentry, *dir;
+
+ dir = binderfs_create_dir(sb->s_root, "features");
+ if (IS_ERR(dir))
+ return PTR_ERR(dir);
+
+ dentry = rust_binderfs_create_file(dir, "oneway_spam_detection",
+ &binder_features_fops,
+ &binder_features.oneway_spam_detection);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+
+ dentry = rust_binderfs_create_file(dir, "extended_error",
+ &binder_features_fops,
+ &binder_features.extended_error);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+
+ dentry = rust_binderfs_create_file(dir, "freeze_notification",
+ &binder_features_fops,
+ &binder_features.freeze_notification);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+
+ return 0;
+}
+
+static int init_binder_logs(struct super_block *sb)
+{
+ struct dentry *binder_logs_root_dir, *dentry, *proc_log_dir;
+ struct binderfs_info *info;
+ int ret = 0;
+
+ binder_logs_root_dir = binderfs_create_dir(sb->s_root,
+ "binder_logs");
+ if (IS_ERR(binder_logs_root_dir)) {
+ ret = PTR_ERR(binder_logs_root_dir);
+ goto out;
+ }
+
+ dentry = rust_binderfs_create_file(binder_logs_root_dir, "stats",
+ &rust_binder_stats_fops, NULL);
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
+ goto out;
+ }
+
+ dentry = rust_binderfs_create_file(binder_logs_root_dir, "state",
+ &rust_binder_state_fops, NULL);
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
+ goto out;
+ }
+
+ dentry = rust_binderfs_create_file(binder_logs_root_dir, "transactions",
+ &rust_binder_transactions_fops, NULL);
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
+ goto out;
+ }
+
+ proc_log_dir = binderfs_create_dir(binder_logs_root_dir, "proc");
+ if (IS_ERR(proc_log_dir)) {
+ ret = PTR_ERR(proc_log_dir);
+ goto out;
+ }
+ info = sb->s_fs_info;
+ info->proc_log_dir = proc_log_dir;
+
+out:
+ return ret;
+}
+
+static int binderfs_fill_super(struct super_block *sb, struct fs_context *fc)
+{
+ int ret;
+ struct binderfs_info *info;
+ struct binderfs_mount_opts *ctx = fc->fs_private;
+ struct inode *inode = NULL;
+ struct binderfs_device device_info = {};
+ const char *name;
+ size_t len;
+
+ sb->s_blocksize = PAGE_SIZE;
+ sb->s_blocksize_bits = PAGE_SHIFT;
+
+ /*
+ * The binderfs filesystem can be mounted by userns root in a
+ * non-initial userns. By default such mounts have the SB_I_NODEV flag
+ * set in s_iflags to prevent security issues where userns root can
+ * just create random device nodes via mknod() since it owns the
+ * filesystem mount. But binderfs does not allow to create any files
+ * including devices nodes. The only way to create binder devices nodes
+ * is through the binder-control device which userns root is explicitly
+ * allowed to do. So removing the SB_I_NODEV flag from s_iflags is both
+ * necessary and safe.
+ */
+ sb->s_iflags &= ~SB_I_NODEV;
+ sb->s_iflags |= SB_I_NOEXEC;
+ sb->s_magic = RUST_BINDERFS_SUPER_MAGIC;
+ sb->s_op = &binderfs_super_ops;
+ sb->s_time_gran = 1;
+
+ sb->s_fs_info = kzalloc(sizeof(struct binderfs_info), GFP_KERNEL);
+ if (!sb->s_fs_info)
+ return -ENOMEM;
+ info = sb->s_fs_info;
+
+ info->ipc_ns = get_ipc_ns(current->nsproxy->ipc_ns);
+
+ info->root_gid = make_kgid(sb->s_user_ns, 0);
+ if (!gid_valid(info->root_gid))
+ info->root_gid = GLOBAL_ROOT_GID;
+ info->root_uid = make_kuid(sb->s_user_ns, 0);
+ if (!uid_valid(info->root_uid))
+ info->root_uid = GLOBAL_ROOT_UID;
+ info->mount_opts.max = ctx->max;
+ info->mount_opts.stats_mode = ctx->stats_mode;
+
+ inode = new_inode(sb);
+ if (!inode)
+ return -ENOMEM;
+
+ inode->i_ino = FIRST_INODE;
+ inode->i_fop = &simple_dir_operations;
+ inode->i_mode = S_IFDIR | 0755;
+ simple_inode_init_ts(inode);
+ inode->i_op = &binderfs_dir_inode_operations;
+ set_nlink(inode, 2);
+
+ sb->s_root = d_make_root(inode);
+ if (!sb->s_root)
+ return -ENOMEM;
+
+ ret = binderfs_binder_ctl_create(sb);
+ if (ret)
+ return ret;
+
+ name = rust_binder_devices_param;
+ for (len = strcspn(name, ","); len > 0; len = strcspn(name, ",")) {
+ strscpy(device_info.name, name, len + 1);
+ ret = binderfs_binder_device_create(inode, NULL, &device_info);
+ if (ret)
+ return ret;
+ name += len;
+ if (*name == ',')
+ name++;
+ }
+
+ ret = init_binder_features(sb);
+ if (ret)
+ return ret;
+
+ if (info->mount_opts.stats_mode == binderfs_stats_mode_global)
+ return init_binder_logs(sb);
+
+ return 0;
+}
+
+static int binderfs_fs_context_get_tree(struct fs_context *fc)
+{
+ return get_tree_nodev(fc, binderfs_fill_super);
+}
+
+static void binderfs_fs_context_free(struct fs_context *fc)
+{
+ struct binderfs_mount_opts *ctx = fc->fs_private;
+
+ kfree(ctx);
+}
+
+static const struct fs_context_operations binderfs_fs_context_ops = {
+ .free = binderfs_fs_context_free,
+ .get_tree = binderfs_fs_context_get_tree,
+ .parse_param = binderfs_fs_context_parse_param,
+ .reconfigure = binderfs_fs_context_reconfigure,
+};
+
+static int binderfs_init_fs_context(struct fs_context *fc)
+{
+ struct binderfs_mount_opts *ctx;
+
+ ctx = kzalloc(sizeof(struct binderfs_mount_opts), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->max = BINDERFS_MAX_MINOR;
+ ctx->stats_mode = binderfs_stats_mode_unset;
+
+ fc->fs_private = ctx;
+ fc->ops = &binderfs_fs_context_ops;
+
+ return 0;
+}
+
+static void binderfs_kill_super(struct super_block *sb)
+{
+ struct binderfs_info *info = sb->s_fs_info;
+
+ /*
+ * During inode eviction struct binderfs_info is needed.
+ * So first wipe the super_block then free struct binderfs_info.
+ */
+ kill_litter_super(sb);
+
+ if (info && info->ipc_ns)
+ put_ipc_ns(info->ipc_ns);
+
+ kfree(info);
+}
+
+static struct file_system_type binder_fs_type = {
+ .name = "binder",
+ .init_fs_context = binderfs_init_fs_context,
+ .parameters = binderfs_fs_parameters,
+ .kill_sb = binderfs_kill_super,
+ .fs_flags = FS_USERNS_MOUNT,
+};
+
+int init_rust_binderfs(void)
+{
+ int ret;
+ const char *name;
+ size_t len;
+
+ /* Verify that the default binderfs device names are valid. */
+ name = rust_binder_devices_param;
+ for (len = strcspn(name, ","); len > 0; len = strcspn(name, ",")) {
+ if (len > BINDERFS_MAX_NAME)
+ return -E2BIG;
+ name += len;
+ if (*name == ',')
+ name++;
+ }
+
+ /* Allocate new major number for binderfs. */
+ ret = alloc_chrdev_region(&binderfs_dev, 0, BINDERFS_MAX_MINOR,
+ "rust_binder");
+ if (ret)
+ return ret;
+
+ ret = register_filesystem(&binder_fs_type);
+ if (ret) {
+ unregister_chrdev_region(binderfs_dev, BINDERFS_MAX_MINOR);
+ return ret;
+ }
+
+ return ret;
+}
diff --git a/drivers/android/binder/stats.rs b/drivers/android/binder/stats.rs
new file mode 100644
index 000000000000..a83ec111d2cb
--- /dev/null
+++ b/drivers/android/binder/stats.rs
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+//! Keep track of statistics for binder_logs.
+
+use crate::defs::*;
+use core::sync::atomic::{AtomicU32, Ordering::Relaxed};
+use kernel::{ioctl::_IOC_NR, seq_file::SeqFile, seq_print};
+
+const BC_COUNT: usize = _IOC_NR(BC_REPLY_SG) as usize + 1;
+const BR_COUNT: usize = _IOC_NR(BR_TRANSACTION_PENDING_FROZEN) as usize + 1;
+
+pub(crate) static GLOBAL_STATS: BinderStats = BinderStats::new();
+
+pub(crate) struct BinderStats {
+ bc: [AtomicU32; BC_COUNT],
+ br: [AtomicU32; BR_COUNT],
+}
+
+impl BinderStats {
+ pub(crate) const fn new() -> Self {
+ #[expect(clippy::declare_interior_mutable_const)]
+ const ZERO: AtomicU32 = AtomicU32::new(0);
+
+ Self {
+ bc: [ZERO; BC_COUNT],
+ br: [ZERO; BR_COUNT],
+ }
+ }
+
+ pub(crate) fn inc_bc(&self, bc: u32) {
+ let idx = _IOC_NR(bc) as usize;
+ if let Some(bc_ref) = self.bc.get(idx) {
+ bc_ref.fetch_add(1, Relaxed);
+ }
+ }
+
+ pub(crate) fn inc_br(&self, br: u32) {
+ let idx = _IOC_NR(br) as usize;
+ if let Some(br_ref) = self.br.get(idx) {
+ br_ref.fetch_add(1, Relaxed);
+ }
+ }
+
+ pub(crate) fn debug_print(&self, prefix: &str, m: &SeqFile) {
+ for (i, cnt) in self.bc.iter().enumerate() {
+ let cnt = cnt.load(Relaxed);
+ if cnt > 0 {
+ seq_print!(m, "{}{}: {}\n", prefix, command_string(i), cnt);
+ }
+ }
+ for (i, cnt) in self.br.iter().enumerate() {
+ let cnt = cnt.load(Relaxed);
+ if cnt > 0 {
+ seq_print!(m, "{}{}: {}\n", prefix, return_string(i), cnt);
+ }
+ }
+ }
+}
+
+mod strings {
+ use core::str::from_utf8_unchecked;
+ use kernel::str::CStr;
+
+ extern "C" {
+ static binder_command_strings: [*const u8; super::BC_COUNT];
+ static binder_return_strings: [*const u8; super::BR_COUNT];
+ }
+
+ pub(super) fn command_string(i: usize) -> &'static str {
+ // SAFETY: Accessing `binder_command_strings` is always safe.
+ let c_str_ptr = unsafe { binder_command_strings[i] };
+ // SAFETY: The `binder_command_strings` array only contains nul-terminated strings.
+ let bytes = unsafe { CStr::from_char_ptr(c_str_ptr) }.as_bytes();
+ // SAFETY: The `binder_command_strings` array only contains strings with ascii-chars.
+ unsafe { from_utf8_unchecked(bytes) }
+ }
+
+ pub(super) fn return_string(i: usize) -> &'static str {
+ // SAFETY: Accessing `binder_return_strings` is always safe.
+ let c_str_ptr = unsafe { binder_return_strings[i] };
+ // SAFETY: The `binder_command_strings` array only contains nul-terminated strings.
+ let bytes = unsafe { CStr::from_char_ptr(c_str_ptr) }.as_bytes();
+ // SAFETY: The `binder_command_strings` array only contains strings with ascii-chars.
+ unsafe { from_utf8_unchecked(bytes) }
+ }
+}
+use strings::{command_string, return_string};
diff --git a/drivers/android/binder/thread.rs b/drivers/android/binder/thread.rs
new file mode 100644
index 000000000000..7e34ccd394f8
--- /dev/null
+++ b/drivers/android/binder/thread.rs
@@ -0,0 +1,1596 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+//! This module defines the `Thread` type, which represents a userspace thread that is using
+//! binder.
+//!
+//! The `Process` object stores all of the threads in an rb tree.
+
+use kernel::{
+ bindings,
+ fs::{File, LocalFile},
+ list::{AtomicTracker, List, ListArc, ListLinks, TryNewListArc},
+ prelude::*,
+ security,
+ seq_file::SeqFile,
+ seq_print,
+ sync::poll::{PollCondVar, PollTable},
+ sync::{Arc, SpinLock},
+ task::Task,
+ types::ARef,
+ uaccess::UserSlice,
+ uapi,
+};
+
+use crate::{
+ allocation::{Allocation, AllocationView, BinderObject, BinderObjectRef, NewAllocation},
+ defs::*,
+ error::BinderResult,
+ process::{GetWorkOrRegister, Process},
+ ptr_align,
+ stats::GLOBAL_STATS,
+ transaction::Transaction,
+ BinderReturnWriter, DArc, DLArc, DTRWrap, DeliverCode, DeliverToRead,
+};
+
+use core::{
+ mem::size_of,
+ sync::atomic::{AtomicU32, Ordering},
+};
+
+/// Stores the layout of the scatter-gather entries. This is used during the `translate_objects`
+/// call and is discarded when it returns.
+struct ScatterGatherState {
+ /// A struct that tracks the amount of unused buffer space.
+ unused_buffer_space: UnusedBufferSpace,
+ /// Scatter-gather entries to copy.
+ sg_entries: KVec<ScatterGatherEntry>,
+ /// Indexes into `sg_entries` corresponding to the last binder_buffer_object that
+ /// was processed and all of its ancestors. The array is in sorted order.
+ ancestors: KVec<usize>,
+}
+
+/// This entry specifies an additional buffer that should be copied using the scatter-gather
+/// mechanism.
+struct ScatterGatherEntry {
+ /// The index in the offset array of the BINDER_TYPE_PTR that this entry originates from.
+ obj_index: usize,
+ /// Offset in target buffer.
+ offset: usize,
+ /// User address in source buffer.
+ sender_uaddr: usize,
+ /// Number of bytes to copy.
+ length: usize,
+ /// The minimum offset of the next fixup in this buffer.
+ fixup_min_offset: usize,
+ /// The offsets within this buffer that contain pointers which should be translated.
+ pointer_fixups: KVec<PointerFixupEntry>,
+}
+
+/// This entry specifies that a fixup should happen at `target_offset` of the
+/// buffer. If `skip` is nonzero, then the fixup is a `binder_fd_array_object`
+/// and is applied later. Otherwise if `skip` is zero, then the size of the
+/// fixup is `sizeof::<u64>()` and `pointer_value` is written to the buffer.
+struct PointerFixupEntry {
+ /// The number of bytes to skip, or zero for a `binder_buffer_object` fixup.
+ skip: usize,
+ /// The translated pointer to write when `skip` is zero.
+ pointer_value: u64,
+ /// The offset at which the value should be written. The offset is relative
+ /// to the original buffer.
+ target_offset: usize,
+}
+
+/// Return type of `apply_and_validate_fixup_in_parent`.
+struct ParentFixupInfo {
+ /// The index of the parent buffer in `sg_entries`.
+ parent_sg_index: usize,
+ /// The number of ancestors of the buffer.
+ ///
+ /// The buffer is considered an ancestor of itself, so this is always at
+ /// least one.
+ num_ancestors: usize,
+ /// New value of `fixup_min_offset` if this fixup is applied.
+ new_min_offset: usize,
+ /// The offset of the fixup in the target buffer.
+ target_offset: usize,
+}
+
+impl ScatterGatherState {
+ /// Called when a `binder_buffer_object` or `binder_fd_array_object` tries
+ /// to access a region in its parent buffer. These accesses have various
+ /// restrictions, which this method verifies.
+ ///
+ /// The `parent_offset` and `length` arguments describe the offset and
+ /// length of the access in the parent buffer.
+ ///
+ /// # Detailed restrictions
+ ///
+ /// Obviously the fixup must be in-bounds for the parent buffer.
+ ///
+ /// For safety reasons, we only allow fixups inside a buffer to happen
+ /// at increasing offsets; additionally, we only allow fixup on the last
+ /// buffer object that was verified, or one of its parents.
+ ///
+ /// Example of what is allowed:
+ ///
+ /// A
+ /// B (parent = A, offset = 0)
+ /// C (parent = A, offset = 16)
+ /// D (parent = C, offset = 0)
+ /// E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
+ ///
+ /// Examples of what is not allowed:
+ ///
+ /// Decreasing offsets within the same parent:
+ /// A
+ /// C (parent = A, offset = 16)
+ /// B (parent = A, offset = 0) // decreasing offset within A
+ ///
+ /// Arcerring to a parent that wasn't the last object or any of its parents:
+ /// A
+ /// B (parent = A, offset = 0)
+ /// C (parent = A, offset = 0)
+ /// C (parent = A, offset = 16)
+ /// D (parent = B, offset = 0) // B is not A or any of A's parents
+ fn validate_parent_fixup(
+ &self,
+ parent: usize,
+ parent_offset: usize,
+ length: usize,
+ ) -> Result<ParentFixupInfo> {
+ // Using `position` would also be correct, but `rposition` avoids
+ // quadratic running times.
+ let ancestors_i = self
+ .ancestors
+ .iter()
+ .copied()
+ .rposition(|sg_idx| self.sg_entries[sg_idx].obj_index == parent)
+ .ok_or(EINVAL)?;
+ let sg_idx = self.ancestors[ancestors_i];
+ let sg_entry = match self.sg_entries.get(sg_idx) {
+ Some(sg_entry) => sg_entry,
+ None => {
+ pr_err!(
+ "self.ancestors[{}] is {}, but self.sg_entries.len() is {}",
+ ancestors_i,
+ sg_idx,
+ self.sg_entries.len()
+ );
+ return Err(EINVAL);
+ }
+ };
+ if sg_entry.fixup_min_offset > parent_offset {
+ pr_warn!(
+ "validate_parent_fixup: fixup_min_offset={}, parent_offset={}",
+ sg_entry.fixup_min_offset,
+ parent_offset
+ );
+ return Err(EINVAL);
+ }
+ let new_min_offset = parent_offset.checked_add(length).ok_or(EINVAL)?;
+ if new_min_offset > sg_entry.length {
+ pr_warn!(
+ "validate_parent_fixup: new_min_offset={}, sg_entry.length={}",
+ new_min_offset,
+ sg_entry.length
+ );
+ return Err(EINVAL);
+ }
+ let target_offset = sg_entry.offset.checked_add(parent_offset).ok_or(EINVAL)?;
+ // The `ancestors_i + 1` operation can't overflow since the output of the addition is at
+ // most `self.ancestors.len()`, which also fits in a usize.
+ Ok(ParentFixupInfo {
+ parent_sg_index: sg_idx,
+ num_ancestors: ancestors_i + 1,
+ new_min_offset,
+ target_offset,
+ })
+ }
+}
+
+/// Keeps track of how much unused buffer space is left. The initial amount is the number of bytes
+/// requested by the user using the `buffers_size` field of `binder_transaction_data_sg`. Each time
+/// we translate an object of type `BINDER_TYPE_PTR`, some of the unused buffer space is consumed.
+struct UnusedBufferSpace {
+ /// The start of the remaining space.
+ offset: usize,
+ /// The end of the remaining space.
+ limit: usize,
+}
+impl UnusedBufferSpace {
+ /// Claim the next `size` bytes from the unused buffer space. The offset for the claimed chunk
+ /// into the buffer is returned.
+ fn claim_next(&mut self, size: usize) -> Result<usize> {
+ // We require every chunk to be aligned.
+ let size = ptr_align(size).ok_or(EINVAL)?;
+ let new_offset = self.offset.checked_add(size).ok_or(EINVAL)?;
+
+ if new_offset <= self.limit {
+ let offset = self.offset;
+ self.offset = new_offset;
+ Ok(offset)
+ } else {
+ Err(EINVAL)
+ }
+ }
+}
+
+pub(crate) enum PushWorkRes {
+ Ok,
+ FailedDead(DLArc<dyn DeliverToRead>),
+}
+
+impl PushWorkRes {
+ fn is_ok(&self) -> bool {
+ match self {
+ PushWorkRes::Ok => true,
+ PushWorkRes::FailedDead(_) => false,
+ }
+ }
+}
+
+/// The fields of `Thread` protected by the spinlock.
+struct InnerThread {
+ /// Determines the looper state of the thread. It is a bit-wise combination of the constants
+ /// prefixed with `LOOPER_`.
+ looper_flags: u32,
+
+ /// Determines whether the looper should return.
+ looper_need_return: bool,
+
+ /// Determines if thread is dead.
+ is_dead: bool,
+
+ /// Work item used to deliver error codes to the thread that started a transaction. Stored here
+ /// so that it can be reused.
+ reply_work: DArc<ThreadError>,
+
+ /// Work item used to deliver error codes to the current thread. Stored here so that it can be
+ /// reused.
+ return_work: DArc<ThreadError>,
+
+ /// Determines whether the work list below should be processed. When set to false, `work_list`
+ /// is treated as if it were empty.
+ process_work_list: bool,
+ /// List of work items to deliver to userspace.
+ work_list: List<DTRWrap<dyn DeliverToRead>>,
+ current_transaction: Option<DArc<Transaction>>,
+
+ /// Extended error information for this thread.
+ extended_error: ExtendedError,
+}
+
+const LOOPER_REGISTERED: u32 = 0x01;
+const LOOPER_ENTERED: u32 = 0x02;
+const LOOPER_EXITED: u32 = 0x04;
+const LOOPER_INVALID: u32 = 0x08;
+const LOOPER_WAITING: u32 = 0x10;
+const LOOPER_WAITING_PROC: u32 = 0x20;
+const LOOPER_POLL: u32 = 0x40;
+
+impl InnerThread {
+ fn new() -> Result<Self> {
+ fn next_err_id() -> u32 {
+ static EE_ID: AtomicU32 = AtomicU32::new(0);
+ EE_ID.fetch_add(1, Ordering::Relaxed)
+ }
+
+ Ok(Self {
+ looper_flags: 0,
+ looper_need_return: false,
+ is_dead: false,
+ process_work_list: false,
+ reply_work: ThreadError::try_new()?,
+ return_work: ThreadError::try_new()?,
+ work_list: List::new(),
+ current_transaction: None,
+ extended_error: ExtendedError::new(next_err_id(), BR_OK, 0),
+ })
+ }
+
+ fn pop_work(&mut self) -> Option<DLArc<dyn DeliverToRead>> {
+ if !self.process_work_list {
+ return None;
+ }
+
+ let ret = self.work_list.pop_front();
+ self.process_work_list = !self.work_list.is_empty();
+ ret
+ }
+
+ fn push_work(&mut self, work: DLArc<dyn DeliverToRead>) -> PushWorkRes {
+ if self.is_dead {
+ PushWorkRes::FailedDead(work)
+ } else {
+ self.work_list.push_back(work);
+ self.process_work_list = true;
+ PushWorkRes::Ok
+ }
+ }
+
+ fn push_reply_work(&mut self, code: u32) {
+ if let Ok(work) = ListArc::try_from_arc(self.reply_work.clone()) {
+ work.set_error_code(code);
+ self.push_work(work);
+ } else {
+ pr_warn!("Thread reply work is already in use.");
+ }
+ }
+
+ fn push_return_work(&mut self, reply: u32) {
+ if let Ok(work) = ListArc::try_from_arc(self.return_work.clone()) {
+ work.set_error_code(reply);
+ self.push_work(work);
+ } else {
+ pr_warn!("Thread return work is already in use.");
+ }
+ }
+
+ /// Used to push work items that do not need to be processed immediately and can wait until the
+ /// thread gets another work item.
+ fn push_work_deferred(&mut self, work: DLArc<dyn DeliverToRead>) {
+ self.work_list.push_back(work);
+ }
+
+ /// Fetches the transaction this thread can reply to. If the thread has a pending transaction
+ /// (that it could respond to) but it has also issued a transaction, it must first wait for the
+ /// previously-issued transaction to complete.
+ ///
+ /// The `thread` parameter should be the thread containing this `ThreadInner`.
+ fn pop_transaction_to_reply(&mut self, thread: &Thread) -> Result<DArc<Transaction>> {
+ let transaction = self.current_transaction.take().ok_or(EINVAL)?;
+ if core::ptr::eq(thread, transaction.from.as_ref()) {
+ self.current_transaction = Some(transaction);
+ return Err(EINVAL);
+ }
+ // Find a new current transaction for this thread.
+ self.current_transaction = transaction.find_from(thread).cloned();
+ Ok(transaction)
+ }
+
+ fn pop_transaction_replied(&mut self, transaction: &DArc<Transaction>) -> bool {
+ match self.current_transaction.take() {
+ None => false,
+ Some(old) => {
+ if !Arc::ptr_eq(transaction, &old) {
+ self.current_transaction = Some(old);
+ return false;
+ }
+ self.current_transaction = old.clone_next();
+ true
+ }
+ }
+ }
+
+ fn looper_enter(&mut self) {
+ self.looper_flags |= LOOPER_ENTERED;
+ if self.looper_flags & LOOPER_REGISTERED != 0 {
+ self.looper_flags |= LOOPER_INVALID;
+ }
+ }
+
+ fn looper_register(&mut self, valid: bool) {
+ self.looper_flags |= LOOPER_REGISTERED;
+ if !valid || self.looper_flags & LOOPER_ENTERED != 0 {
+ self.looper_flags |= LOOPER_INVALID;
+ }
+ }
+
+ fn looper_exit(&mut self) {
+ self.looper_flags |= LOOPER_EXITED;
+ }
+
+ /// Determines whether the thread is part of a pool, i.e., if it is a looper.
+ fn is_looper(&self) -> bool {
+ self.looper_flags & (LOOPER_ENTERED | LOOPER_REGISTERED) != 0
+ }
+
+ /// Determines whether the thread should attempt to fetch work items from the process queue.
+ /// This is generally case when the thread is registered as a looper and not part of a
+ /// transaction stack. But if there is local work, we want to return to userspace before we
+ /// deliver any remote work.
+ fn should_use_process_work_queue(&self) -> bool {
+ self.current_transaction.is_none() && !self.process_work_list && self.is_looper()
+ }
+
+ fn poll(&mut self) -> u32 {
+ self.looper_flags |= LOOPER_POLL;
+ if self.process_work_list || self.looper_need_return {
+ bindings::POLLIN
+ } else {
+ 0
+ }
+ }
+}
+
+/// This represents a thread that's used with binder.
+#[pin_data]
+pub(crate) struct Thread {
+ pub(crate) id: i32,
+ pub(crate) process: Arc<Process>,
+ pub(crate) task: ARef<Task>,
+ #[pin]
+ inner: SpinLock<InnerThread>,
+ #[pin]
+ work_condvar: PollCondVar,
+ /// Used to insert this thread into the process' `ready_threads` list.
+ ///
+ /// INVARIANT: May never be used for any other list than the `self.process.ready_threads`.
+ #[pin]
+ links: ListLinks,
+ #[pin]
+ links_track: AtomicTracker,
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<0> for Thread {
+ tracked_by links_track: AtomicTracker;
+ }
+}
+kernel::list::impl_list_item! {
+ impl ListItem<0> for Thread {
+ using ListLinks { self.links };
+ }
+}
+
+impl Thread {
+ pub(crate) fn new(id: i32, process: Arc<Process>) -> Result<Arc<Self>> {
+ let inner = InnerThread::new()?;
+
+ Arc::pin_init(
+ try_pin_init!(Thread {
+ id,
+ process,
+ task: ARef::from(&**kernel::current!()),
+ inner <- kernel::new_spinlock!(inner, "Thread::inner"),
+ work_condvar <- kernel::new_poll_condvar!("Thread::work_condvar"),
+ links <- ListLinks::new(),
+ links_track <- AtomicTracker::new(),
+ }),
+ GFP_KERNEL,
+ )
+ }
+
+ #[inline(never)]
+ pub(crate) fn debug_print(self: &Arc<Self>, m: &SeqFile, print_all: bool) -> Result<()> {
+ let inner = self.inner.lock();
+
+ if print_all || inner.current_transaction.is_some() || !inner.work_list.is_empty() {
+ seq_print!(
+ m,
+ " thread {}: l {:02x} need_return {}\n",
+ self.id,
+ inner.looper_flags,
+ inner.looper_need_return,
+ );
+ }
+
+ let mut t_opt = inner.current_transaction.as_ref();
+ while let Some(t) = t_opt {
+ if Arc::ptr_eq(&t.from, self) {
+ t.debug_print_inner(m, " outgoing transaction ");
+ t_opt = t.from_parent.as_ref();
+ } else if Arc::ptr_eq(&t.to, &self.process) {
+ t.debug_print_inner(m, " incoming transaction ");
+ t_opt = t.find_from(self);
+ } else {
+ t.debug_print_inner(m, " bad transaction ");
+ t_opt = None;
+ }
+ }
+
+ for work in &inner.work_list {
+ work.debug_print(m, " ", " pending transaction ")?;
+ }
+ Ok(())
+ }
+
+ pub(crate) fn get_extended_error(&self, data: UserSlice) -> Result {
+ let mut writer = data.writer();
+ let ee = self.inner.lock().extended_error;
+ writer.write(&ee)?;
+ Ok(())
+ }
+
+ pub(crate) fn set_current_transaction(&self, transaction: DArc<Transaction>) {
+ self.inner.lock().current_transaction = Some(transaction);
+ }
+
+ pub(crate) fn has_current_transaction(&self) -> bool {
+ self.inner.lock().current_transaction.is_some()
+ }
+
+ /// Attempts to fetch a work item from the thread-local queue. The behaviour if the queue is
+ /// empty depends on `wait`: if it is true, the function waits for some work to be queued (or a
+ /// signal); otherwise it returns indicating that none is available.
+ fn get_work_local(self: &Arc<Self>, wait: bool) -> Result<Option<DLArc<dyn DeliverToRead>>> {
+ {
+ let mut inner = self.inner.lock();
+ if inner.looper_need_return {
+ return Ok(inner.pop_work());
+ }
+ }
+
+ // Try once if the caller does not want to wait.
+ if !wait {
+ return self.inner.lock().pop_work().ok_or(EAGAIN).map(Some);
+ }
+
+ // Loop waiting only on the local queue (i.e., not registering with the process queue).
+ let mut inner = self.inner.lock();
+ loop {
+ if let Some(work) = inner.pop_work() {
+ return Ok(Some(work));
+ }
+
+ inner.looper_flags |= LOOPER_WAITING;
+ let signal_pending = self.work_condvar.wait_interruptible_freezable(&mut inner);
+ inner.looper_flags &= !LOOPER_WAITING;
+
+ if signal_pending {
+ return Err(EINTR);
+ }
+ if inner.looper_need_return {
+ return Ok(None);
+ }
+ }
+ }
+
+ /// Attempts to fetch a work item from the thread-local queue, falling back to the process-wide
+ /// queue if none is available locally.
+ ///
+ /// This must only be called when the thread is not participating in a transaction chain. If it
+ /// is, the local version (`get_work_local`) should be used instead.
+ fn get_work(self: &Arc<Self>, wait: bool) -> Result<Option<DLArc<dyn DeliverToRead>>> {
+ // Try to get work from the thread's work queue, using only a local lock.
+ {
+ let mut inner = self.inner.lock();
+ if let Some(work) = inner.pop_work() {
+ return Ok(Some(work));
+ }
+ if inner.looper_need_return {
+ drop(inner);
+ return Ok(self.process.get_work());
+ }
+ }
+
+ // If the caller doesn't want to wait, try to grab work from the process queue.
+ //
+ // We know nothing will have been queued directly to the thread queue because it is not in
+ // a transaction and it is not in the process' ready list.
+ if !wait {
+ return self.process.get_work().ok_or(EAGAIN).map(Some);
+ }
+
+ // Get work from the process queue. If none is available, atomically register as ready.
+ let reg = match self.process.get_work_or_register(self) {
+ GetWorkOrRegister::Work(work) => return Ok(Some(work)),
+ GetWorkOrRegister::Register(reg) => reg,
+ };
+
+ let mut inner = self.inner.lock();
+ loop {
+ if let Some(work) = inner.pop_work() {
+ return Ok(Some(work));
+ }
+
+ inner.looper_flags |= LOOPER_WAITING | LOOPER_WAITING_PROC;
+ let signal_pending = self.work_condvar.wait_interruptible_freezable(&mut inner);
+ inner.looper_flags &= !(LOOPER_WAITING | LOOPER_WAITING_PROC);
+
+ if signal_pending || inner.looper_need_return {
+ // We need to return now. We need to pull the thread off the list of ready threads
+ // (by dropping `reg`), then check the state again after it's off the list to
+ // ensure that something was not queued in the meantime. If something has been
+ // queued, we just return it (instead of the error).
+ drop(inner);
+ drop(reg);
+
+ let res = match self.inner.lock().pop_work() {
+ Some(work) => Ok(Some(work)),
+ None if signal_pending => Err(EINTR),
+ None => Ok(None),
+ };
+ return res;
+ }
+ }
+ }
+
+ /// Push the provided work item to be delivered to user space via this thread.
+ ///
+ /// Returns whether the item was successfully pushed. This can only fail if the thread is dead.
+ pub(crate) fn push_work(&self, work: DLArc<dyn DeliverToRead>) -> PushWorkRes {
+ let sync = work.should_sync_wakeup();
+
+ let res = self.inner.lock().push_work(work);
+
+ if res.is_ok() {
+ if sync {
+ self.work_condvar.notify_sync();
+ } else {
+ self.work_condvar.notify_one();
+ }
+ }
+
+ res
+ }
+
+ /// Attempts to push to given work item to the thread if it's a looper thread (i.e., if it's
+ /// part of a thread pool) and is alive. Otherwise, push the work item to the process instead.
+ pub(crate) fn push_work_if_looper(&self, work: DLArc<dyn DeliverToRead>) -> BinderResult {
+ let mut inner = self.inner.lock();
+ if inner.is_looper() && !inner.is_dead {
+ inner.push_work(work);
+ Ok(())
+ } else {
+ drop(inner);
+ self.process.push_work(work)
+ }
+ }
+
+ pub(crate) fn push_work_deferred(&self, work: DLArc<dyn DeliverToRead>) {
+ self.inner.lock().push_work_deferred(work);
+ }
+
+ pub(crate) fn push_return_work(&self, reply: u32) {
+ self.inner.lock().push_return_work(reply);
+ }
+
+ fn translate_object(
+ &self,
+ obj_index: usize,
+ offset: usize,
+ object: BinderObjectRef<'_>,
+ view: &mut AllocationView<'_>,
+ allow_fds: bool,
+ sg_state: &mut ScatterGatherState,
+ ) -> BinderResult {
+ match object {
+ BinderObjectRef::Binder(obj) => {
+ let strong = obj.hdr.type_ == BINDER_TYPE_BINDER;
+ // SAFETY: `binder` is a `binder_uintptr_t`; any bit pattern is a valid
+ // representation.
+ let ptr = unsafe { obj.__bindgen_anon_1.binder } as _;
+ let cookie = obj.cookie as _;
+ let flags = obj.flags as _;
+ let node = self
+ .process
+ .as_arc_borrow()
+ .get_node(ptr, cookie, flags, strong, self)?;
+ security::binder_transfer_binder(&self.process.cred, &view.alloc.process.cred)?;
+ view.transfer_binder_object(offset, obj, strong, node)?;
+ }
+ BinderObjectRef::Handle(obj) => {
+ let strong = obj.hdr.type_ == BINDER_TYPE_HANDLE;
+ // SAFETY: `handle` is a `u32`; any bit pattern is a valid representation.
+ let handle = unsafe { obj.__bindgen_anon_1.handle } as _;
+ let node = self.process.get_node_from_handle(handle, strong)?;
+ security::binder_transfer_binder(&self.process.cred, &view.alloc.process.cred)?;
+ view.transfer_binder_object(offset, obj, strong, node)?;
+ }
+ BinderObjectRef::Fd(obj) => {
+ if !allow_fds {
+ return Err(EPERM.into());
+ }
+
+ // SAFETY: `fd` is a `u32`; any bit pattern is a valid representation.
+ let fd = unsafe { obj.__bindgen_anon_1.fd };
+ let file = LocalFile::fget(fd)?;
+ // SAFETY: The binder driver never calls `fdget_pos` and this code runs from an
+ // ioctl, so there are no active calls to `fdget_pos` on this thread.
+ let file = unsafe { LocalFile::assume_no_fdget_pos(file) };
+ security::binder_transfer_file(
+ &self.process.cred,
+ &view.alloc.process.cred,
+ &file,
+ )?;
+
+ let mut obj_write = BinderFdObject::default();
+ obj_write.hdr.type_ = BINDER_TYPE_FD;
+ // This will be overwritten with the actual fd when the transaction is received.
+ obj_write.__bindgen_anon_1.fd = u32::MAX;
+ obj_write.cookie = obj.cookie;
+ view.write::<BinderFdObject>(offset, &obj_write)?;
+
+ const FD_FIELD_OFFSET: usize =
+ core::mem::offset_of!(uapi::binder_fd_object, __bindgen_anon_1.fd);
+
+ let field_offset = offset + FD_FIELD_OFFSET;
+
+ view.alloc.info_add_fd(file, field_offset, false)?;
+ }
+ BinderObjectRef::Ptr(obj) => {
+ let obj_length = obj.length.try_into().map_err(|_| EINVAL)?;
+ let alloc_offset = match sg_state.unused_buffer_space.claim_next(obj_length) {
+ Ok(alloc_offset) => alloc_offset,
+ Err(err) => {
+ pr_warn!(
+ "Failed to claim space for a BINDER_TYPE_PTR. (offset: {}, limit: {}, size: {})",
+ sg_state.unused_buffer_space.offset,
+ sg_state.unused_buffer_space.limit,
+ obj_length,
+ );
+ return Err(err.into());
+ }
+ };
+
+ let sg_state_idx = sg_state.sg_entries.len();
+ sg_state.sg_entries.push(
+ ScatterGatherEntry {
+ obj_index,
+ offset: alloc_offset,
+ sender_uaddr: obj.buffer as _,
+ length: obj_length,
+ pointer_fixups: KVec::new(),
+ fixup_min_offset: 0,
+ },
+ GFP_KERNEL,
+ )?;
+
+ let buffer_ptr_in_user_space = (view.alloc.ptr + alloc_offset) as u64;
+
+ if obj.flags & uapi::BINDER_BUFFER_FLAG_HAS_PARENT == 0 {
+ sg_state.ancestors.clear();
+ sg_state.ancestors.push(sg_state_idx, GFP_KERNEL)?;
+ } else {
+ // Another buffer also has a pointer to this buffer, and we need to fixup that
+ // pointer too.
+
+ let parent_index = usize::try_from(obj.parent).map_err(|_| EINVAL)?;
+ let parent_offset = usize::try_from(obj.parent_offset).map_err(|_| EINVAL)?;
+
+ let info = sg_state.validate_parent_fixup(
+ parent_index,
+ parent_offset,
+ size_of::<u64>(),
+ )?;
+
+ sg_state.ancestors.truncate(info.num_ancestors);
+ sg_state.ancestors.push(sg_state_idx, GFP_KERNEL)?;
+
+ let parent_entry = match sg_state.sg_entries.get_mut(info.parent_sg_index) {
+ Some(parent_entry) => parent_entry,
+ None => {
+ pr_err!(
+ "validate_parent_fixup returned index out of bounds for sg.entries"
+ );
+ return Err(EINVAL.into());
+ }
+ };
+
+ parent_entry.fixup_min_offset = info.new_min_offset;
+ parent_entry.pointer_fixups.push(
+ PointerFixupEntry {
+ skip: 0,
+ pointer_value: buffer_ptr_in_user_space,
+ target_offset: info.target_offset,
+ },
+ GFP_KERNEL,
+ )?;
+ }
+
+ let mut obj_write = BinderBufferObject::default();
+ obj_write.hdr.type_ = BINDER_TYPE_PTR;
+ obj_write.flags = obj.flags;
+ obj_write.buffer = buffer_ptr_in_user_space;
+ obj_write.length = obj.length;
+ obj_write.parent = obj.parent;
+ obj_write.parent_offset = obj.parent_offset;
+ view.write::<BinderBufferObject>(offset, &obj_write)?;
+ }
+ BinderObjectRef::Fda(obj) => {
+ if !allow_fds {
+ return Err(EPERM.into());
+ }
+ let parent_index = usize::try_from(obj.parent).map_err(|_| EINVAL)?;
+ let parent_offset = usize::try_from(obj.parent_offset).map_err(|_| EINVAL)?;
+ let num_fds = usize::try_from(obj.num_fds).map_err(|_| EINVAL)?;
+ let fds_len = num_fds.checked_mul(size_of::<u32>()).ok_or(EINVAL)?;
+
+ let info = sg_state.validate_parent_fixup(parent_index, parent_offset, fds_len)?;
+ view.alloc.info_add_fd_reserve(num_fds)?;
+
+ sg_state.ancestors.truncate(info.num_ancestors);
+ let parent_entry = match sg_state.sg_entries.get_mut(info.parent_sg_index) {
+ Some(parent_entry) => parent_entry,
+ None => {
+ pr_err!(
+ "validate_parent_fixup returned index out of bounds for sg.entries"
+ );
+ return Err(EINVAL.into());
+ }
+ };
+
+ parent_entry.fixup_min_offset = info.new_min_offset;
+ parent_entry
+ .pointer_fixups
+ .push(
+ PointerFixupEntry {
+ skip: fds_len,
+ pointer_value: 0,
+ target_offset: info.target_offset,
+ },
+ GFP_KERNEL,
+ )
+ .map_err(|_| ENOMEM)?;
+
+ let fda_uaddr = parent_entry
+ .sender_uaddr
+ .checked_add(parent_offset)
+ .ok_or(EINVAL)?;
+ let mut fda_bytes = KVec::new();
+ UserSlice::new(UserPtr::from_addr(fda_uaddr as _), fds_len)
+ .read_all(&mut fda_bytes, GFP_KERNEL)?;
+
+ if fds_len != fda_bytes.len() {
+ pr_err!("UserSlice::read_all returned wrong length in BINDER_TYPE_FDA");
+ return Err(EINVAL.into());
+ }
+
+ for i in (0..fds_len).step_by(size_of::<u32>()) {
+ let fd = {
+ let mut fd_bytes = [0u8; size_of::<u32>()];
+ fd_bytes.copy_from_slice(&fda_bytes[i..i + size_of::<u32>()]);
+ u32::from_ne_bytes(fd_bytes)
+ };
+
+ let file = LocalFile::fget(fd)?;
+ // SAFETY: The binder driver never calls `fdget_pos` and this code runs from an
+ // ioctl, so there are no active calls to `fdget_pos` on this thread.
+ let file = unsafe { LocalFile::assume_no_fdget_pos(file) };
+ security::binder_transfer_file(
+ &self.process.cred,
+ &view.alloc.process.cred,
+ &file,
+ )?;
+
+ // The `validate_parent_fixup` call ensuers that this addition will not
+ // overflow.
+ view.alloc.info_add_fd(file, info.target_offset + i, true)?;
+ }
+ drop(fda_bytes);
+
+ let mut obj_write = BinderFdArrayObject::default();
+ obj_write.hdr.type_ = BINDER_TYPE_FDA;
+ obj_write.num_fds = obj.num_fds;
+ obj_write.parent = obj.parent;
+ obj_write.parent_offset = obj.parent_offset;
+ view.write::<BinderFdArrayObject>(offset, &obj_write)?;
+ }
+ }
+ Ok(())
+ }
+
+ fn apply_sg(&self, alloc: &mut Allocation, sg_state: &mut ScatterGatherState) -> BinderResult {
+ for sg_entry in &mut sg_state.sg_entries {
+ let mut end_of_previous_fixup = sg_entry.offset;
+ let offset_end = sg_entry.offset.checked_add(sg_entry.length).ok_or(EINVAL)?;
+
+ let mut reader =
+ UserSlice::new(UserPtr::from_addr(sg_entry.sender_uaddr), sg_entry.length).reader();
+ for fixup in &mut sg_entry.pointer_fixups {
+ let fixup_len = if fixup.skip == 0 {
+ size_of::<u64>()
+ } else {
+ fixup.skip
+ };
+
+ let target_offset_end = fixup.target_offset.checked_add(fixup_len).ok_or(EINVAL)?;
+ if fixup.target_offset < end_of_previous_fixup || offset_end < target_offset_end {
+ pr_warn!(
+ "Fixups oob {} {} {} {}",
+ fixup.target_offset,
+ end_of_previous_fixup,
+ offset_end,
+ target_offset_end
+ );
+ return Err(EINVAL.into());
+ }
+
+ let copy_off = end_of_previous_fixup;
+ let copy_len = fixup.target_offset - end_of_previous_fixup;
+ if let Err(err) = alloc.copy_into(&mut reader, copy_off, copy_len) {
+ pr_warn!("Failed copying into alloc: {:?}", err);
+ return Err(err.into());
+ }
+ if fixup.skip == 0 {
+ let res = alloc.write::<u64>(fixup.target_offset, &fixup.pointer_value);
+ if let Err(err) = res {
+ pr_warn!("Failed copying ptr into alloc: {:?}", err);
+ return Err(err.into());
+ }
+ }
+ if let Err(err) = reader.skip(fixup_len) {
+ pr_warn!("Failed skipping {} from reader: {:?}", fixup_len, err);
+ return Err(err.into());
+ }
+ end_of_previous_fixup = target_offset_end;
+ }
+ let copy_off = end_of_previous_fixup;
+ let copy_len = offset_end - end_of_previous_fixup;
+ if let Err(err) = alloc.copy_into(&mut reader, copy_off, copy_len) {
+ pr_warn!("Failed copying remainder into alloc: {:?}", err);
+ return Err(err.into());
+ }
+ }
+ Ok(())
+ }
+
+ /// This method copies the payload of a transaction into the target process.
+ ///
+ /// The resulting payload will have several different components, which will be stored next to
+ /// each other in the allocation. Furthermore, various objects can be embedded in the payload,
+ /// and those objects have to be translated so that they make sense to the target transaction.
+ pub(crate) fn copy_transaction_data(
+ &self,
+ to_process: Arc<Process>,
+ tr: &BinderTransactionDataSg,
+ debug_id: usize,
+ allow_fds: bool,
+ txn_security_ctx_offset: Option<&mut usize>,
+ ) -> BinderResult<NewAllocation> {
+ let trd = &tr.transaction_data;
+ let is_oneway = trd.flags & TF_ONE_WAY != 0;
+ let mut secctx = if let Some(offset) = txn_security_ctx_offset {
+ let secid = self.process.cred.get_secid();
+ let ctx = match security::SecurityCtx::from_secid(secid) {
+ Ok(ctx) => ctx,
+ Err(err) => {
+ pr_warn!("Failed to get security ctx for id {}: {:?}", secid, err);
+ return Err(err.into());
+ }
+ };
+ Some((offset, ctx))
+ } else {
+ None
+ };
+
+ let data_size = trd.data_size.try_into().map_err(|_| EINVAL)?;
+ let aligned_data_size = ptr_align(data_size).ok_or(EINVAL)?;
+ let offsets_size = trd.offsets_size.try_into().map_err(|_| EINVAL)?;
+ let aligned_offsets_size = ptr_align(offsets_size).ok_or(EINVAL)?;
+ let buffers_size = tr.buffers_size.try_into().map_err(|_| EINVAL)?;
+ let aligned_buffers_size = ptr_align(buffers_size).ok_or(EINVAL)?;
+ let aligned_secctx_size = match secctx.as_ref() {
+ Some((_offset, ctx)) => ptr_align(ctx.len()).ok_or(EINVAL)?,
+ None => 0,
+ };
+
+ // This guarantees that at least `sizeof(usize)` bytes will be allocated.
+ let len = usize::max(
+ aligned_data_size
+ .checked_add(aligned_offsets_size)
+ .and_then(|sum| sum.checked_add(aligned_buffers_size))
+ .and_then(|sum| sum.checked_add(aligned_secctx_size))
+ .ok_or(ENOMEM)?,
+ size_of::<usize>(),
+ );
+ let secctx_off = aligned_data_size + aligned_offsets_size + aligned_buffers_size;
+ let mut alloc =
+ match to_process.buffer_alloc(debug_id, len, is_oneway, self.process.task.pid()) {
+ Ok(alloc) => alloc,
+ Err(err) => {
+ pr_warn!(
+ "Failed to allocate buffer. len:{}, is_oneway:{}",
+ len,
+ is_oneway
+ );
+ return Err(err);
+ }
+ };
+
+ // SAFETY: This accesses a union field, but it's okay because the field's type is valid for
+ // all bit-patterns.
+ let trd_data_ptr = unsafe { &trd.data.ptr };
+ let mut buffer_reader =
+ UserSlice::new(UserPtr::from_addr(trd_data_ptr.buffer as _), data_size).reader();
+ let mut end_of_previous_object = 0;
+ let mut sg_state = None;
+
+ // Copy offsets if there are any.
+ if offsets_size > 0 {
+ {
+ let mut reader =
+ UserSlice::new(UserPtr::from_addr(trd_data_ptr.offsets as _), offsets_size)
+ .reader();
+ alloc.copy_into(&mut reader, aligned_data_size, offsets_size)?;
+ }
+
+ let offsets_start = aligned_data_size;
+ let offsets_end = aligned_data_size + aligned_offsets_size;
+
+ // This state is used for BINDER_TYPE_PTR objects.
+ let sg_state = sg_state.insert(ScatterGatherState {
+ unused_buffer_space: UnusedBufferSpace {
+ offset: offsets_end,
+ limit: len,
+ },
+ sg_entries: KVec::new(),
+ ancestors: KVec::new(),
+ });
+
+ // Traverse the objects specified.
+ let mut view = AllocationView::new(&mut alloc, data_size);
+ for (index, index_offset) in (offsets_start..offsets_end)
+ .step_by(size_of::<usize>())
+ .enumerate()
+ {
+ let offset = view.alloc.read(index_offset)?;
+
+ if offset < end_of_previous_object {
+ pr_warn!("Got transaction with invalid offset.");
+ return Err(EINVAL.into());
+ }
+
+ // Copy data between two objects.
+ if end_of_previous_object < offset {
+ view.copy_into(
+ &mut buffer_reader,
+ end_of_previous_object,
+ offset - end_of_previous_object,
+ )?;
+ }
+
+ let mut object = BinderObject::read_from(&mut buffer_reader)?;
+
+ match self.translate_object(
+ index,
+ offset,
+ object.as_ref(),
+ &mut view,
+ allow_fds,
+ sg_state,
+ ) {
+ Ok(()) => end_of_previous_object = offset + object.size(),
+ Err(err) => {
+ pr_warn!("Error while translating object.");
+ return Err(err);
+ }
+ }
+
+ // Update the indexes containing objects to clean up.
+ let offset_after_object = index_offset + size_of::<usize>();
+ view.alloc
+ .set_info_offsets(offsets_start..offset_after_object);
+ }
+ }
+
+ // Copy remaining raw data.
+ alloc.copy_into(
+ &mut buffer_reader,
+ end_of_previous_object,
+ data_size - end_of_previous_object,
+ )?;
+
+ if let Some(sg_state) = sg_state.as_mut() {
+ if let Err(err) = self.apply_sg(&mut alloc, sg_state) {
+ pr_warn!("Failure in apply_sg: {:?}", err);
+ return Err(err);
+ }
+ }
+
+ if let Some((off_out, secctx)) = secctx.as_mut() {
+ if let Err(err) = alloc.write(secctx_off, secctx.as_bytes()) {
+ pr_warn!("Failed to write security context: {:?}", err);
+ return Err(err.into());
+ }
+ **off_out = secctx_off;
+ }
+ Ok(alloc)
+ }
+
+ fn unwind_transaction_stack(self: &Arc<Self>) {
+ let mut thread = self.clone();
+ while let Ok(transaction) = {
+ let mut inner = thread.inner.lock();
+ inner.pop_transaction_to_reply(thread.as_ref())
+ } {
+ let reply = Err(BR_DEAD_REPLY);
+ if !transaction.from.deliver_single_reply(reply, &transaction) {
+ break;
+ }
+
+ thread = transaction.from.clone();
+ }
+ }
+
+ pub(crate) fn deliver_reply(
+ &self,
+ reply: Result<DLArc<Transaction>, u32>,
+ transaction: &DArc<Transaction>,
+ ) {
+ if self.deliver_single_reply(reply, transaction) {
+ transaction.from.unwind_transaction_stack();
+ }
+ }
+
+ /// Delivers a reply to the thread that started a transaction. The reply can either be a
+ /// reply-transaction or an error code to be delivered instead.
+ ///
+ /// Returns whether the thread is dead. If it is, the caller is expected to unwind the
+ /// transaction stack by completing transactions for threads that are dead.
+ fn deliver_single_reply(
+ &self,
+ reply: Result<DLArc<Transaction>, u32>,
+ transaction: &DArc<Transaction>,
+ ) -> bool {
+ if let Ok(transaction) = &reply {
+ transaction.set_outstanding(&mut self.process.inner.lock());
+ }
+
+ {
+ let mut inner = self.inner.lock();
+ if !inner.pop_transaction_replied(transaction) {
+ return false;
+ }
+
+ if inner.is_dead {
+ return true;
+ }
+
+ match reply {
+ Ok(work) => {
+ inner.push_work(work);
+ }
+ Err(code) => inner.push_reply_work(code),
+ }
+ }
+
+ // Notify the thread now that we've released the inner lock.
+ self.work_condvar.notify_sync();
+ false
+ }
+
+ /// Determines if the given transaction is the current transaction for this thread.
+ fn is_current_transaction(&self, transaction: &DArc<Transaction>) -> bool {
+ let inner = self.inner.lock();
+ match &inner.current_transaction {
+ None => false,
+ Some(current) => Arc::ptr_eq(current, transaction),
+ }
+ }
+
+ /// Determines the current top of the transaction stack. It fails if the top is in another
+ /// thread (i.e., this thread belongs to a stack but it has called another thread). The top is
+ /// [`None`] if the thread is not currently participating in a transaction stack.
+ fn top_of_transaction_stack(&self) -> Result<Option<DArc<Transaction>>> {
+ let inner = self.inner.lock();
+ if let Some(cur) = &inner.current_transaction {
+ if core::ptr::eq(self, cur.from.as_ref()) {
+ pr_warn!("got new transaction with bad transaction stack");
+ return Err(EINVAL);
+ }
+ Ok(Some(cur.clone()))
+ } else {
+ Ok(None)
+ }
+ }
+
+ fn transaction<T>(self: &Arc<Self>, tr: &BinderTransactionDataSg, inner: T)
+ where
+ T: FnOnce(&Arc<Self>, &BinderTransactionDataSg) -> BinderResult,
+ {
+ if let Err(err) = inner(self, tr) {
+ if err.should_pr_warn() {
+ let mut ee = self.inner.lock().extended_error;
+ ee.command = err.reply;
+ ee.param = err.as_errno();
+ pr_warn!(
+ "Transaction failed: {:?} my_pid:{}",
+ err,
+ self.process.pid_in_current_ns()
+ );
+ }
+
+ self.push_return_work(err.reply);
+ }
+ }
+
+ fn transaction_inner(self: &Arc<Self>, tr: &BinderTransactionDataSg) -> BinderResult {
+ // SAFETY: Handle's type has no invalid bit patterns.
+ let handle = unsafe { tr.transaction_data.target.handle };
+ let node_ref = self.process.get_transaction_node(handle)?;
+ security::binder_transaction(&self.process.cred, &node_ref.node.owner.cred)?;
+ // TODO: We need to ensure that there isn't a pending transaction in the work queue. How
+ // could this happen?
+ let top = self.top_of_transaction_stack()?;
+ let list_completion = DTRWrap::arc_try_new(DeliverCode::new(BR_TRANSACTION_COMPLETE))?;
+ let completion = list_completion.clone_arc();
+ let transaction = Transaction::new(node_ref, top, self, tr)?;
+
+ // Check that the transaction stack hasn't changed while the lock was released, then update
+ // it with the new transaction.
+ {
+ let mut inner = self.inner.lock();
+ if !transaction.is_stacked_on(&inner.current_transaction) {
+ pr_warn!("Transaction stack changed during transaction!");
+ return Err(EINVAL.into());
+ }
+ inner.current_transaction = Some(transaction.clone_arc());
+ // We push the completion as a deferred work so that we wait for the reply before
+ // returning to userland.
+ inner.push_work_deferred(list_completion);
+ }
+
+ if let Err(e) = transaction.submit() {
+ completion.skip();
+ // Define `transaction` first to drop it after `inner`.
+ let transaction;
+ let mut inner = self.inner.lock();
+ transaction = inner.current_transaction.take().unwrap();
+ inner.current_transaction = transaction.clone_next();
+ Err(e)
+ } else {
+ Ok(())
+ }
+ }
+
+ fn reply_inner(self: &Arc<Self>, tr: &BinderTransactionDataSg) -> BinderResult {
+ let orig = self.inner.lock().pop_transaction_to_reply(self)?;
+ if !orig.from.is_current_transaction(&orig) {
+ return Err(EINVAL.into());
+ }
+
+ // We need to complete the transaction even if we cannot complete building the reply.
+ let out = (|| -> BinderResult<_> {
+ let completion = DTRWrap::arc_try_new(DeliverCode::new(BR_TRANSACTION_COMPLETE))?;
+ let process = orig.from.process.clone();
+ let allow_fds = orig.flags & TF_ACCEPT_FDS != 0;
+ let reply = Transaction::new_reply(self, process, tr, allow_fds)?;
+ self.inner.lock().push_work(completion);
+ orig.from.deliver_reply(Ok(reply), &orig);
+ Ok(())
+ })()
+ .map_err(|mut err| {
+ // At this point we only return `BR_TRANSACTION_COMPLETE` to the caller, and we must let
+ // the sender know that the transaction has completed (with an error in this case).
+ pr_warn!(
+ "Failure {:?} during reply - delivering BR_FAILED_REPLY to sender.",
+ err
+ );
+ let reply = Err(BR_FAILED_REPLY);
+ orig.from.deliver_reply(reply, &orig);
+ err.reply = BR_TRANSACTION_COMPLETE;
+ err
+ });
+
+ out
+ }
+
+ fn oneway_transaction_inner(self: &Arc<Self>, tr: &BinderTransactionDataSg) -> BinderResult {
+ // SAFETY: The `handle` field is valid for all possible byte values, so reading from the
+ // union is okay.
+ let handle = unsafe { tr.transaction_data.target.handle };
+ let node_ref = self.process.get_transaction_node(handle)?;
+ security::binder_transaction(&self.process.cred, &node_ref.node.owner.cred)?;
+ let transaction = Transaction::new(node_ref, None, self, tr)?;
+ let code = if self.process.is_oneway_spam_detection_enabled()
+ && transaction.oneway_spam_detected
+ {
+ BR_ONEWAY_SPAM_SUSPECT
+ } else {
+ BR_TRANSACTION_COMPLETE
+ };
+ let list_completion = DTRWrap::arc_try_new(DeliverCode::new(code))?;
+ let completion = list_completion.clone_arc();
+ self.inner.lock().push_work(list_completion);
+ match transaction.submit() {
+ Ok(()) => Ok(()),
+ Err(err) => {
+ completion.skip();
+ Err(err)
+ }
+ }
+ }
+
+ fn write(self: &Arc<Self>, req: &mut BinderWriteRead) -> Result {
+ let write_start = req.write_buffer.wrapping_add(req.write_consumed);
+ let write_len = req.write_size.saturating_sub(req.write_consumed);
+ let mut reader =
+ UserSlice::new(UserPtr::from_addr(write_start as _), write_len as _).reader();
+
+ while reader.len() >= size_of::<u32>() && self.inner.lock().return_work.is_unused() {
+ let before = reader.len();
+ let cmd = reader.read::<u32>()?;
+ GLOBAL_STATS.inc_bc(cmd);
+ self.process.stats.inc_bc(cmd);
+ match cmd {
+ BC_TRANSACTION => {
+ let tr = reader.read::<BinderTransactionData>()?.with_buffers_size(0);
+ if tr.transaction_data.flags & TF_ONE_WAY != 0 {
+ self.transaction(&tr, Self::oneway_transaction_inner);
+ } else {
+ self.transaction(&tr, Self::transaction_inner);
+ }
+ }
+ BC_TRANSACTION_SG => {
+ let tr = reader.read::<BinderTransactionDataSg>()?;
+ if tr.transaction_data.flags & TF_ONE_WAY != 0 {
+ self.transaction(&tr, Self::oneway_transaction_inner);
+ } else {
+ self.transaction(&tr, Self::transaction_inner);
+ }
+ }
+ BC_REPLY => {
+ let tr = reader.read::<BinderTransactionData>()?.with_buffers_size(0);
+ self.transaction(&tr, Self::reply_inner)
+ }
+ BC_REPLY_SG => {
+ let tr = reader.read::<BinderTransactionDataSg>()?;
+ self.transaction(&tr, Self::reply_inner)
+ }
+ BC_FREE_BUFFER => {
+ let buffer = self.process.buffer_get(reader.read()?);
+ if let Some(buffer) = &buffer {
+ if buffer.looper_need_return_on_free() {
+ self.inner.lock().looper_need_return = true;
+ }
+ }
+ drop(buffer);
+ }
+ BC_INCREFS => {
+ self.process
+ .as_arc_borrow()
+ .update_ref(reader.read()?, true, false)?
+ }
+ BC_ACQUIRE => {
+ self.process
+ .as_arc_borrow()
+ .update_ref(reader.read()?, true, true)?
+ }
+ BC_RELEASE => {
+ self.process
+ .as_arc_borrow()
+ .update_ref(reader.read()?, false, true)?
+ }
+ BC_DECREFS => {
+ self.process
+ .as_arc_borrow()
+ .update_ref(reader.read()?, false, false)?
+ }
+ BC_INCREFS_DONE => self.process.inc_ref_done(&mut reader, false)?,
+ BC_ACQUIRE_DONE => self.process.inc_ref_done(&mut reader, true)?,
+ BC_REQUEST_DEATH_NOTIFICATION => self.process.request_death(&mut reader, self)?,
+ BC_CLEAR_DEATH_NOTIFICATION => self.process.clear_death(&mut reader, self)?,
+ BC_DEAD_BINDER_DONE => self.process.dead_binder_done(reader.read()?, self),
+ BC_REGISTER_LOOPER => {
+ let valid = self.process.register_thread();
+ self.inner.lock().looper_register(valid);
+ }
+ BC_ENTER_LOOPER => self.inner.lock().looper_enter(),
+ BC_EXIT_LOOPER => self.inner.lock().looper_exit(),
+ BC_REQUEST_FREEZE_NOTIFICATION => self.process.request_freeze_notif(&mut reader)?,
+ BC_CLEAR_FREEZE_NOTIFICATION => self.process.clear_freeze_notif(&mut reader)?,
+ BC_FREEZE_NOTIFICATION_DONE => self.process.freeze_notif_done(&mut reader)?,
+
+ // Fail if given an unknown error code.
+ // BC_ATTEMPT_ACQUIRE and BC_ACQUIRE_RESULT are no longer supported.
+ _ => return Err(EINVAL),
+ }
+ // Update the number of write bytes consumed.
+ req.write_consumed += (before - reader.len()) as u64;
+ }
+
+ Ok(())
+ }
+
+ fn read(self: &Arc<Self>, req: &mut BinderWriteRead, wait: bool) -> Result {
+ let read_start = req.read_buffer.wrapping_add(req.read_consumed);
+ let read_len = req.read_size.saturating_sub(req.read_consumed);
+ let mut writer = BinderReturnWriter::new(
+ UserSlice::new(UserPtr::from_addr(read_start as _), read_len as _).writer(),
+ self,
+ );
+ let (in_pool, use_proc_queue) = {
+ let inner = self.inner.lock();
+ (inner.is_looper(), inner.should_use_process_work_queue())
+ };
+
+ let getter = if use_proc_queue {
+ Self::get_work
+ } else {
+ Self::get_work_local
+ };
+
+ // Reserve some room at the beginning of the read buffer so that we can send a
+ // BR_SPAWN_LOOPER if we need to.
+ let mut has_noop_placeholder = false;
+ if req.read_consumed == 0 {
+ if let Err(err) = writer.write_code(BR_NOOP) {
+ pr_warn!("Failure when writing BR_NOOP at beginning of buffer.");
+ return Err(err);
+ }
+ has_noop_placeholder = true;
+ }
+
+ // Loop doing work while there is room in the buffer.
+ let initial_len = writer.len();
+ while writer.len() >= size_of::<uapi::binder_transaction_data_secctx>() + 4 {
+ match getter(self, wait && initial_len == writer.len()) {
+ Ok(Some(work)) => match work.into_arc().do_work(self, &mut writer) {
+ Ok(true) => {}
+ Ok(false) => break,
+ Err(err) => {
+ return Err(err);
+ }
+ },
+ Ok(None) => {
+ break;
+ }
+ Err(err) => {
+ // Propagate the error if we haven't written anything else.
+ if err != EINTR && err != EAGAIN {
+ pr_warn!("Failure in work getter: {:?}", err);
+ }
+ if initial_len == writer.len() {
+ return Err(err);
+ } else {
+ break;
+ }
+ }
+ }
+ }
+
+ req.read_consumed += read_len - writer.len() as u64;
+
+ // Write BR_SPAWN_LOOPER if the process needs more threads for its pool.
+ if has_noop_placeholder && in_pool && self.process.needs_thread() {
+ let mut writer =
+ UserSlice::new(UserPtr::from_addr(req.read_buffer as _), req.read_size as _)
+ .writer();
+ writer.write(&BR_SPAWN_LOOPER)?;
+ }
+ Ok(())
+ }
+
+ pub(crate) fn write_read(self: &Arc<Self>, data: UserSlice, wait: bool) -> Result {
+ let (mut reader, mut writer) = data.reader_writer();
+ let mut req = reader.read::<BinderWriteRead>()?;
+
+ // Go through the write buffer.
+ let mut ret = Ok(());
+ if req.write_size > 0 {
+ ret = self.write(&mut req);
+ if let Err(err) = ret {
+ pr_warn!(
+ "Write failure {:?} in pid:{}",
+ err,
+ self.process.pid_in_current_ns()
+ );
+ req.read_consumed = 0;
+ writer.write(&req)?;
+ self.inner.lock().looper_need_return = false;
+ return ret;
+ }
+ }
+
+ // Go through the work queue.
+ if req.read_size > 0 {
+ ret = self.read(&mut req, wait);
+ if ret.is_err() && ret != Err(EINTR) {
+ pr_warn!(
+ "Read failure {:?} in pid:{}",
+ ret,
+ self.process.pid_in_current_ns()
+ );
+ }
+ }
+
+ // Write the request back so that the consumed fields are visible to the caller.
+ writer.write(&req)?;
+
+ self.inner.lock().looper_need_return = false;
+
+ ret
+ }
+
+ pub(crate) fn poll(&self, file: &File, table: PollTable<'_>) -> (bool, u32) {
+ table.register_wait(file, &self.work_condvar);
+ let mut inner = self.inner.lock();
+ (inner.should_use_process_work_queue(), inner.poll())
+ }
+
+ /// Make the call to `get_work` or `get_work_local` return immediately, if any.
+ pub(crate) fn exit_looper(&self) {
+ let mut inner = self.inner.lock();
+ let should_notify = inner.looper_flags & LOOPER_WAITING != 0;
+ if should_notify {
+ inner.looper_need_return = true;
+ }
+ drop(inner);
+
+ if should_notify {
+ self.work_condvar.notify_one();
+ }
+ }
+
+ pub(crate) fn notify_if_poll_ready(&self, sync: bool) {
+ // Determine if we need to notify. This requires the lock.
+ let inner = self.inner.lock();
+ let notify = inner.looper_flags & LOOPER_POLL != 0 && inner.should_use_process_work_queue();
+ drop(inner);
+
+ // Now that the lock is no longer held, notify the waiters if we have to.
+ if notify {
+ if sync {
+ self.work_condvar.notify_sync();
+ } else {
+ self.work_condvar.notify_one();
+ }
+ }
+ }
+
+ pub(crate) fn release(self: &Arc<Self>) {
+ self.inner.lock().is_dead = true;
+
+ //self.work_condvar.clear();
+ self.unwind_transaction_stack();
+
+ // Cancel all pending work items.
+ while let Ok(Some(work)) = self.get_work_local(false) {
+ work.into_arc().cancel();
+ }
+ }
+}
+
+#[pin_data]
+struct ThreadError {
+ error_code: AtomicU32,
+ #[pin]
+ links_track: AtomicTracker,
+}
+
+impl ThreadError {
+ fn try_new() -> Result<DArc<Self>> {
+ DTRWrap::arc_pin_init(pin_init!(Self {
+ error_code: AtomicU32::new(BR_OK),
+ links_track <- AtomicTracker::new(),
+ }))
+ .map(ListArc::into_arc)
+ }
+
+ fn set_error_code(&self, code: u32) {
+ self.error_code.store(code, Ordering::Relaxed);
+ }
+
+ fn is_unused(&self) -> bool {
+ self.error_code.load(Ordering::Relaxed) == BR_OK
+ }
+}
+
+impl DeliverToRead for ThreadError {
+ fn do_work(
+ self: DArc<Self>,
+ _thread: &Thread,
+ writer: &mut BinderReturnWriter<'_>,
+ ) -> Result<bool> {
+ let code = self.error_code.load(Ordering::Relaxed);
+ self.error_code.store(BR_OK, Ordering::Relaxed);
+ writer.write_code(code)?;
+ Ok(true)
+ }
+
+ fn cancel(self: DArc<Self>) {}
+
+ fn should_sync_wakeup(&self) -> bool {
+ false
+ }
+
+ fn debug_print(&self, m: &SeqFile, prefix: &str, _tprefix: &str) -> Result<()> {
+ seq_print!(
+ m,
+ "{}transaction error: {}\n",
+ prefix,
+ self.error_code.load(Ordering::Relaxed)
+ );
+ Ok(())
+ }
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<0> for ThreadError {
+ tracked_by links_track: AtomicTracker;
+ }
+}
diff --git a/drivers/android/binder/trace.rs b/drivers/android/binder/trace.rs
new file mode 100644
index 000000000000..af0e4392805e
--- /dev/null
+++ b/drivers/android/binder/trace.rs
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use kernel::ffi::{c_uint, c_ulong};
+use kernel::tracepoint::declare_trace;
+
+declare_trace! {
+ unsafe fn rust_binder_ioctl(cmd: c_uint, arg: c_ulong);
+}
+
+#[inline]
+pub(crate) fn trace_ioctl(cmd: u32, arg: usize) {
+ // SAFETY: Always safe to call.
+ unsafe { rust_binder_ioctl(cmd, arg as c_ulong) }
+}
diff --git a/drivers/android/binder/transaction.rs b/drivers/android/binder/transaction.rs
new file mode 100644
index 000000000000..02512175d622
--- /dev/null
+++ b/drivers/android/binder/transaction.rs
@@ -0,0 +1,456 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use core::sync::atomic::{AtomicBool, Ordering};
+use kernel::{
+ prelude::*,
+ seq_file::SeqFile,
+ seq_print,
+ sync::{Arc, SpinLock},
+ task::Kuid,
+ time::{Instant, Monotonic},
+ types::ScopeGuard,
+};
+
+use crate::{
+ allocation::{Allocation, TranslatedFds},
+ defs::*,
+ error::{BinderError, BinderResult},
+ node::{Node, NodeRef},
+ process::{Process, ProcessInner},
+ ptr_align,
+ thread::{PushWorkRes, Thread},
+ BinderReturnWriter, DArc, DLArc, DTRWrap, DeliverToRead,
+};
+
+#[pin_data(PinnedDrop)]
+pub(crate) struct Transaction {
+ pub(crate) debug_id: usize,
+ target_node: Option<DArc<Node>>,
+ pub(crate) from_parent: Option<DArc<Transaction>>,
+ pub(crate) from: Arc<Thread>,
+ pub(crate) to: Arc<Process>,
+ #[pin]
+ allocation: SpinLock<Option<Allocation>>,
+ is_outstanding: AtomicBool,
+ code: u32,
+ pub(crate) flags: u32,
+ data_size: usize,
+ offsets_size: usize,
+ data_address: usize,
+ sender_euid: Kuid,
+ txn_security_ctx_off: Option<usize>,
+ pub(crate) oneway_spam_detected: bool,
+ start_time: Instant<Monotonic>,
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<0> for Transaction { untracked; }
+}
+
+impl Transaction {
+ pub(crate) fn new(
+ node_ref: NodeRef,
+ from_parent: Option<DArc<Transaction>>,
+ from: &Arc<Thread>,
+ tr: &BinderTransactionDataSg,
+ ) -> BinderResult<DLArc<Self>> {
+ let debug_id = super::next_debug_id();
+ let trd = &tr.transaction_data;
+ let allow_fds = node_ref.node.flags & FLAT_BINDER_FLAG_ACCEPTS_FDS != 0;
+ let txn_security_ctx = node_ref.node.flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX != 0;
+ let mut txn_security_ctx_off = if txn_security_ctx { Some(0) } else { None };
+ let to = node_ref.node.owner.clone();
+ let mut alloc = match from.copy_transaction_data(
+ to.clone(),
+ tr,
+ debug_id,
+ allow_fds,
+ txn_security_ctx_off.as_mut(),
+ ) {
+ Ok(alloc) => alloc,
+ Err(err) => {
+ if !err.is_dead() {
+ pr_warn!("Failure in copy_transaction_data: {:?}", err);
+ }
+ return Err(err);
+ }
+ };
+ let oneway_spam_detected = alloc.oneway_spam_detected;
+ if trd.flags & TF_ONE_WAY != 0 {
+ if from_parent.is_some() {
+ pr_warn!("Oneway transaction should not be in a transaction stack.");
+ return Err(EINVAL.into());
+ }
+ alloc.set_info_oneway_node(node_ref.node.clone());
+ }
+ if trd.flags & TF_CLEAR_BUF != 0 {
+ alloc.set_info_clear_on_drop();
+ }
+ let target_node = node_ref.node.clone();
+ alloc.set_info_target_node(node_ref);
+ let data_address = alloc.ptr;
+
+ Ok(DTRWrap::arc_pin_init(pin_init!(Transaction {
+ debug_id,
+ target_node: Some(target_node),
+ from_parent,
+ sender_euid: from.process.task.euid(),
+ from: from.clone(),
+ to,
+ code: trd.code,
+ flags: trd.flags,
+ data_size: trd.data_size as _,
+ offsets_size: trd.offsets_size as _,
+ data_address,
+ allocation <- kernel::new_spinlock!(Some(alloc.success()), "Transaction::new"),
+ is_outstanding: AtomicBool::new(false),
+ txn_security_ctx_off,
+ oneway_spam_detected,
+ start_time: Instant::now(),
+ }))?)
+ }
+
+ pub(crate) fn new_reply(
+ from: &Arc<Thread>,
+ to: Arc<Process>,
+ tr: &BinderTransactionDataSg,
+ allow_fds: bool,
+ ) -> BinderResult<DLArc<Self>> {
+ let debug_id = super::next_debug_id();
+ let trd = &tr.transaction_data;
+ let mut alloc = match from.copy_transaction_data(to.clone(), tr, debug_id, allow_fds, None)
+ {
+ Ok(alloc) => alloc,
+ Err(err) => {
+ pr_warn!("Failure in copy_transaction_data: {:?}", err);
+ return Err(err);
+ }
+ };
+ let oneway_spam_detected = alloc.oneway_spam_detected;
+ if trd.flags & TF_CLEAR_BUF != 0 {
+ alloc.set_info_clear_on_drop();
+ }
+ Ok(DTRWrap::arc_pin_init(pin_init!(Transaction {
+ debug_id,
+ target_node: None,
+ from_parent: None,
+ sender_euid: from.process.task.euid(),
+ from: from.clone(),
+ to,
+ code: trd.code,
+ flags: trd.flags,
+ data_size: trd.data_size as _,
+ offsets_size: trd.offsets_size as _,
+ data_address: alloc.ptr,
+ allocation <- kernel::new_spinlock!(Some(alloc.success()), "Transaction::new"),
+ is_outstanding: AtomicBool::new(false),
+ txn_security_ctx_off: None,
+ oneway_spam_detected,
+ start_time: Instant::now(),
+ }))?)
+ }
+
+ #[inline(never)]
+ pub(crate) fn debug_print_inner(&self, m: &SeqFile, prefix: &str) {
+ seq_print!(
+ m,
+ "{}{}: from {}:{} to {} code {:x} flags {:x} elapsed {}ms",
+ prefix,
+ self.debug_id,
+ self.from.process.task.pid(),
+ self.from.id,
+ self.to.task.pid(),
+ self.code,
+ self.flags,
+ self.start_time.elapsed().as_millis(),
+ );
+ if let Some(target_node) = &self.target_node {
+ seq_print!(m, " node {}", target_node.debug_id);
+ }
+ seq_print!(m, " size {}:{}\n", self.data_size, self.offsets_size);
+ }
+
+ /// Determines if the transaction is stacked on top of the given transaction.
+ pub(crate) fn is_stacked_on(&self, onext: &Option<DArc<Self>>) -> bool {
+ match (&self.from_parent, onext) {
+ (None, None) => true,
+ (Some(from_parent), Some(next)) => Arc::ptr_eq(from_parent, next),
+ _ => false,
+ }
+ }
+
+ /// Returns a pointer to the next transaction on the transaction stack, if there is one.
+ pub(crate) fn clone_next(&self) -> Option<DArc<Self>> {
+ Some(self.from_parent.as_ref()?.clone())
+ }
+
+ /// Searches in the transaction stack for a thread that belongs to the target process. This is
+ /// useful when finding a target for a new transaction: if the node belongs to a process that
+ /// is already part of the transaction stack, we reuse the thread.
+ fn find_target_thread(&self) -> Option<Arc<Thread>> {
+ let mut it = &self.from_parent;
+ while let Some(transaction) = it {
+ if Arc::ptr_eq(&transaction.from.process, &self.to) {
+ return Some(transaction.from.clone());
+ }
+ it = &transaction.from_parent;
+ }
+ None
+ }
+
+ /// Searches in the transaction stack for a transaction originating at the given thread.
+ pub(crate) fn find_from(&self, thread: &Thread) -> Option<&DArc<Transaction>> {
+ let mut it = &self.from_parent;
+ while let Some(transaction) = it {
+ if core::ptr::eq(thread, transaction.from.as_ref()) {
+ return Some(transaction);
+ }
+
+ it = &transaction.from_parent;
+ }
+ None
+ }
+
+ pub(crate) fn set_outstanding(&self, to_process: &mut ProcessInner) {
+ // No race because this method is only called once.
+ if !self.is_outstanding.load(Ordering::Relaxed) {
+ self.is_outstanding.store(true, Ordering::Relaxed);
+ to_process.add_outstanding_txn();
+ }
+ }
+
+ /// Decrement `outstanding_txns` in `to` if it hasn't already been decremented.
+ fn drop_outstanding_txn(&self) {
+ // No race because this is called at most twice, and one of the calls are in the
+ // destructor, which is guaranteed to not race with any other operations on the
+ // transaction. It also cannot race with `set_outstanding`, since submission happens
+ // before delivery.
+ if self.is_outstanding.load(Ordering::Relaxed) {
+ self.is_outstanding.store(false, Ordering::Relaxed);
+ self.to.drop_outstanding_txn();
+ }
+ }
+
+ /// Submits the transaction to a work queue. Uses a thread if there is one in the transaction
+ /// stack, otherwise uses the destination process.
+ ///
+ /// Not used for replies.
+ pub(crate) fn submit(self: DLArc<Self>) -> BinderResult {
+ // Defined before `process_inner` so that the destructor runs after releasing the lock.
+ let mut _t_outdated;
+
+ let oneway = self.flags & TF_ONE_WAY != 0;
+ let process = self.to.clone();
+ let mut process_inner = process.inner.lock();
+
+ self.set_outstanding(&mut process_inner);
+
+ if oneway {
+ if let Some(target_node) = self.target_node.clone() {
+ if process_inner.is_frozen {
+ process_inner.async_recv = true;
+ if self.flags & TF_UPDATE_TXN != 0 {
+ if let Some(t_outdated) =
+ target_node.take_outdated_transaction(&self, &mut process_inner)
+ {
+ // Save the transaction to be dropped after locks are released.
+ _t_outdated = t_outdated;
+ }
+ }
+ }
+ match target_node.submit_oneway(self, &mut process_inner) {
+ Ok(()) => {}
+ Err((err, work)) => {
+ drop(process_inner);
+ // Drop work after releasing process lock.
+ drop(work);
+ return Err(err);
+ }
+ }
+
+ if process_inner.is_frozen {
+ return Err(BinderError::new_frozen_oneway());
+ } else {
+ return Ok(());
+ }
+ } else {
+ pr_err!("Failed to submit oneway transaction to node.");
+ }
+ }
+
+ if process_inner.is_frozen {
+ process_inner.sync_recv = true;
+ return Err(BinderError::new_frozen());
+ }
+
+ let res = if let Some(thread) = self.find_target_thread() {
+ match thread.push_work(self) {
+ PushWorkRes::Ok => Ok(()),
+ PushWorkRes::FailedDead(me) => Err((BinderError::new_dead(), me)),
+ }
+ } else {
+ process_inner.push_work(self)
+ };
+ drop(process_inner);
+
+ match res {
+ Ok(()) => Ok(()),
+ Err((err, work)) => {
+ // Drop work after releasing process lock.
+ drop(work);
+ Err(err)
+ }
+ }
+ }
+
+ /// Check whether one oneway transaction can supersede another.
+ pub(crate) fn can_replace(&self, old: &Transaction) -> bool {
+ if self.from.process.task.pid() != old.from.process.task.pid() {
+ return false;
+ }
+
+ if self.flags & old.flags & (TF_ONE_WAY | TF_UPDATE_TXN) != (TF_ONE_WAY | TF_UPDATE_TXN) {
+ return false;
+ }
+
+ let target_node_match = match (self.target_node.as_ref(), old.target_node.as_ref()) {
+ (None, None) => true,
+ (Some(tn1), Some(tn2)) => Arc::ptr_eq(tn1, tn2),
+ _ => false,
+ };
+
+ self.code == old.code && self.flags == old.flags && target_node_match
+ }
+
+ fn prepare_file_list(&self) -> Result<TranslatedFds> {
+ let mut alloc = self.allocation.lock().take().ok_or(ESRCH)?;
+
+ match alloc.translate_fds() {
+ Ok(translated) => {
+ *self.allocation.lock() = Some(alloc);
+ Ok(translated)
+ }
+ Err(err) => {
+ // Free the allocation eagerly.
+ drop(alloc);
+ Err(err)
+ }
+ }
+ }
+}
+
+impl DeliverToRead for Transaction {
+ fn do_work(
+ self: DArc<Self>,
+ thread: &Thread,
+ writer: &mut BinderReturnWriter<'_>,
+ ) -> Result<bool> {
+ let send_failed_reply = ScopeGuard::new(|| {
+ if self.target_node.is_some() && self.flags & TF_ONE_WAY == 0 {
+ let reply = Err(BR_FAILED_REPLY);
+ self.from.deliver_reply(reply, &self);
+ }
+ self.drop_outstanding_txn();
+ });
+
+ let files = if let Ok(list) = self.prepare_file_list() {
+ list
+ } else {
+ // On failure to process the list, we send a reply back to the sender and ignore the
+ // transaction on the recipient.
+ return Ok(true);
+ };
+
+ let mut tr_sec = BinderTransactionDataSecctx::default();
+ let tr = tr_sec.tr_data();
+ if let Some(target_node) = &self.target_node {
+ let (ptr, cookie) = target_node.get_id();
+ tr.target.ptr = ptr as _;
+ tr.cookie = cookie as _;
+ };
+ tr.code = self.code;
+ tr.flags = self.flags;
+ tr.data_size = self.data_size as _;
+ tr.data.ptr.buffer = self.data_address as _;
+ tr.offsets_size = self.offsets_size as _;
+ if tr.offsets_size > 0 {
+ tr.data.ptr.offsets = (self.data_address + ptr_align(self.data_size).unwrap()) as _;
+ }
+ tr.sender_euid = self.sender_euid.into_uid_in_current_ns();
+ tr.sender_pid = 0;
+ if self.target_node.is_some() && self.flags & TF_ONE_WAY == 0 {
+ // Not a reply and not one-way.
+ tr.sender_pid = self.from.process.pid_in_current_ns();
+ }
+ let code = if self.target_node.is_none() {
+ BR_REPLY
+ } else if self.txn_security_ctx_off.is_some() {
+ BR_TRANSACTION_SEC_CTX
+ } else {
+ BR_TRANSACTION
+ };
+
+ // Write the transaction code and data to the user buffer.
+ writer.write_code(code)?;
+ if let Some(off) = self.txn_security_ctx_off {
+ tr_sec.secctx = (self.data_address + off) as u64;
+ writer.write_payload(&tr_sec)?;
+ } else {
+ writer.write_payload(&*tr)?;
+ }
+
+ let mut alloc = self.allocation.lock().take().ok_or(ESRCH)?;
+
+ // Dismiss the completion of transaction with a failure. No failure paths are allowed from
+ // here on out.
+ send_failed_reply.dismiss();
+
+ // Commit files, and set FDs in FDA to be closed on buffer free.
+ let close_on_free = files.commit();
+ alloc.set_info_close_on_free(close_on_free);
+
+ // It is now the user's responsibility to clear the allocation.
+ alloc.keep_alive();
+
+ self.drop_outstanding_txn();
+
+ // When this is not a reply and not a oneway transaction, update `current_transaction`. If
+ // it's a reply, `current_transaction` has already been updated appropriately.
+ if self.target_node.is_some() && tr_sec.transaction_data.flags & TF_ONE_WAY == 0 {
+ thread.set_current_transaction(self);
+ }
+
+ Ok(false)
+ }
+
+ fn cancel(self: DArc<Self>) {
+ let allocation = self.allocation.lock().take();
+ drop(allocation);
+
+ // If this is not a reply or oneway transaction, then send a dead reply.
+ if self.target_node.is_some() && self.flags & TF_ONE_WAY == 0 {
+ let reply = Err(BR_DEAD_REPLY);
+ self.from.deliver_reply(reply, &self);
+ }
+
+ self.drop_outstanding_txn();
+ }
+
+ fn should_sync_wakeup(&self) -> bool {
+ self.flags & TF_ONE_WAY == 0
+ }
+
+ fn debug_print(&self, m: &SeqFile, _prefix: &str, tprefix: &str) -> Result<()> {
+ self.debug_print_inner(m, tprefix);
+ Ok(())
+ }
+}
+
+#[pinned_drop]
+impl PinnedDrop for Transaction {
+ fn drop(self: Pin<&mut Self>) {
+ self.drop_outstanding_txn();
+ }
+}
diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h
index 8b08976146ba..342574bfd28a 100644
--- a/drivers/android/binder_internal.h
+++ b/drivers/android/binder_internal.h
@@ -537,8 +537,8 @@ struct binder_transaction {
struct binder_proc *to_proc;
struct binder_thread *to_thread;
struct binder_transaction *to_parent;
- unsigned need_reply:1;
- /* unsigned is_dead:1; */ /* not used at the moment */
+ unsigned is_async:1;
+ unsigned is_reply:1;
struct binder_buffer *buffer;
unsigned int code;
diff --git a/drivers/android/binder_netlink.c b/drivers/android/binder_netlink.c
new file mode 100644
index 000000000000..d05397a50ca6
--- /dev/null
+++ b/drivers/android/binder_netlink.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/binder.yaml */
+/* YNL-GEN kernel source */
+
+#include <net/netlink.h>
+#include <net/genetlink.h>
+
+#include "binder_netlink.h"
+
+#include <uapi/linux/android/binder_netlink.h>
+
+/* Ops table for binder */
+static const struct genl_split_ops binder_nl_ops[] = {
+};
+
+static const struct genl_multicast_group binder_nl_mcgrps[] = {
+ [BINDER_NLGRP_REPORT] = { "report", },
+};
+
+struct genl_family binder_nl_family __ro_after_init = {
+ .name = BINDER_FAMILY_NAME,
+ .version = BINDER_FAMILY_VERSION,
+ .netnsok = true,
+ .parallel_ops = true,
+ .module = THIS_MODULE,
+ .split_ops = binder_nl_ops,
+ .n_split_ops = ARRAY_SIZE(binder_nl_ops),
+ .mcgrps = binder_nl_mcgrps,
+ .n_mcgrps = ARRAY_SIZE(binder_nl_mcgrps),
+};
diff --git a/drivers/android/binder_netlink.h b/drivers/android/binder_netlink.h
new file mode 100644
index 000000000000..882c7a6b537e
--- /dev/null
+++ b/drivers/android/binder_netlink.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/binder.yaml */
+/* YNL-GEN kernel header */
+
+#ifndef _LINUX_BINDER_GEN_H
+#define _LINUX_BINDER_GEN_H
+
+#include <net/netlink.h>
+#include <net/genetlink.h>
+
+#include <uapi/linux/android/binder_netlink.h>
+
+enum {
+ BINDER_NLGRP_REPORT,
+};
+
+extern struct genl_family binder_nl_family;
+
+#endif /* _LINUX_BINDER_GEN_H */
diff --git a/drivers/android/binder_trace.h b/drivers/android/binder_trace.h
index 97a78e5623db..fa5eb61cf580 100644
--- a/drivers/android/binder_trace.h
+++ b/drivers/android/binder_trace.h
@@ -402,6 +402,43 @@ TRACE_EVENT(binder_return,
"unknown")
);
+TRACE_EVENT(binder_netlink_report,
+ TP_PROTO(const char *context,
+ struct binder_transaction *t,
+ u32 data_size,
+ u32 error),
+ TP_ARGS(context, t, data_size, error),
+ TP_STRUCT__entry(
+ __field(const char *, context)
+ __field(u32, error)
+ __field(int, from_pid)
+ __field(int, from_tid)
+ __field(int, to_pid)
+ __field(int, to_tid)
+ __field(bool, is_reply)
+ __field(unsigned int, flags)
+ __field(unsigned int, code)
+ __field(size_t, data_size)
+ ),
+ TP_fast_assign(
+ __entry->context = context;
+ __entry->error = error;
+ __entry->from_pid = t->from_pid;
+ __entry->from_tid = t->from_tid;
+ __entry->to_pid = t->to_proc ? t->to_proc->pid : 0;
+ __entry->to_tid = t->to_thread ? t->to_thread->pid : 0;
+ __entry->is_reply = t->is_reply;
+ __entry->flags = t->flags;
+ __entry->code = t->code;
+ __entry->data_size = data_size;
+ ),
+ TP_printk("from %d:%d to %d:%d context=%s error=%d is_reply=%d flags=0x%x code=0x%x size=%zu",
+ __entry->from_pid, __entry->from_tid,
+ __entry->to_pid, __entry->to_tid,
+ __entry->context, __entry->error, __entry->is_reply,
+ __entry->flags, __entry->code, __entry->data_size)
+);
+
#endif /* _BINDER_TRACE_H */
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
index 0d9d95a7fb60..be8e64eb39ec 100644
--- a/drivers/android/binderfs.c
+++ b/drivers/android/binderfs.c
@@ -59,6 +59,7 @@ struct binder_features {
bool oneway_spam_detection;
bool extended_error;
bool freeze_notification;
+ bool transaction_report;
};
static const struct constant_table binderfs_param_stats[] = {
@@ -76,6 +77,7 @@ static struct binder_features binder_features = {
.oneway_spam_detection = true,
.extended_error = true,
.freeze_notification = true,
+ .transaction_report = true,
};
static inline struct binderfs_info *BINDERFS_SB(const struct super_block *sb)
@@ -601,6 +603,12 @@ static int init_binder_features(struct super_block *sb)
if (IS_ERR(dentry))
return PTR_ERR(dentry);
+ dentry = binderfs_create_file(dir, "transaction_report",
+ &binder_features_fops,
+ &binder_features.transaction_report);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+
return 0;
}
diff --git a/drivers/android/dbitmap.h b/drivers/android/dbitmap.h
index 956f1bd087d1..c7299ce8b374 100644
--- a/drivers/android/dbitmap.h
+++ b/drivers/android/dbitmap.h
@@ -37,6 +37,7 @@ static inline void dbitmap_free(struct dbitmap *dmap)
{
dmap->nbits = 0;
kfree(dmap->map);
+ dmap->map = NULL;
}
/* Returns the nbits that a dbitmap can shrink to, 0 if not possible. */
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index e1c24bbacf64..7a7f88b3fa2b 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -689,40 +689,50 @@ MODULE_PARM_DESC(mask_port_map,
"where <pci_dev> is the PCI ID of an AHCI controller in the "
"form \"domain:bus:dev.func\"");
-static void ahci_apply_port_map_mask(struct device *dev,
- struct ahci_host_priv *hpriv, char *mask_s)
+static char *ahci_mask_port_ext;
+module_param_named(mask_port_ext, ahci_mask_port_ext, charp, 0444);
+MODULE_PARM_DESC(mask_port_ext,
+ "32-bits mask to ignore the external/hotplug capability of ports. "
+ "Valid values are: "
+ "\"<mask>\" to apply the same mask to all AHCI controller "
+ "devices, and \"<pci_dev>=<mask>,<pci_dev>=<mask>,...\" to "
+ "specify different masks for the controllers specified, "
+ "where <pci_dev> is the PCI ID of an AHCI controller in the "
+ "form \"domain:bus:dev.func\"");
+
+static u32 ahci_port_mask(struct device *dev, char *mask_s)
{
unsigned int mask;
if (kstrtouint(mask_s, 0, &mask)) {
dev_err(dev, "Invalid port map mask\n");
- return;
+ return 0;
}
- hpriv->mask_port_map = mask;
+ return mask;
}
-static void ahci_get_port_map_mask(struct device *dev,
- struct ahci_host_priv *hpriv)
+static u32 ahci_get_port_mask(struct device *dev, char *mask_p)
{
char *param, *end, *str, *mask_s;
char *name;
+ u32 mask = 0;
- if (!strlen(ahci_mask_port_map))
- return;
+ if (!mask_p || !strlen(mask_p))
+ return 0;
- str = kstrdup(ahci_mask_port_map, GFP_KERNEL);
+ str = kstrdup(mask_p, GFP_KERNEL);
if (!str)
- return;
+ return 0;
/* Handle single mask case */
if (!strchr(str, '=')) {
- ahci_apply_port_map_mask(dev, hpriv, str);
+ mask = ahci_port_mask(dev, str);
goto free;
}
/*
- * Mask list case: parse the parameter to apply the mask only if
+ * Mask list case: parse the parameter to get the mask only if
* the device name matches.
*/
param = str;
@@ -752,11 +762,13 @@ static void ahci_get_port_map_mask(struct device *dev,
param++;
}
- ahci_apply_port_map_mask(dev, hpriv, mask_s);
+ mask = ahci_port_mask(dev, mask_s);
}
free:
kfree(str);
+
+ return mask;
}
static void ahci_pci_save_initial_config(struct pci_dev *pdev,
@@ -782,8 +794,10 @@ static void ahci_pci_save_initial_config(struct pci_dev *pdev,
}
/* Handle port map masks passed as module parameter. */
- if (ahci_mask_port_map)
- ahci_get_port_map_mask(&pdev->dev, hpriv);
+ hpriv->mask_port_map =
+ ahci_get_port_mask(&pdev->dev, ahci_mask_port_map);
+ hpriv->mask_port_ext =
+ ahci_get_port_mask(&pdev->dev, ahci_mask_port_ext);
ahci_save_initial_config(&pdev->dev, hpriv);
}
@@ -1757,11 +1771,20 @@ static void ahci_mark_external_port(struct ata_port *ap)
void __iomem *port_mmio = ahci_port_base(ap);
u32 tmp;
- /* mark external ports (hotplug-capable, eSATA) */
+ /*
+ * Mark external ports (hotplug-capable, eSATA), unless we were asked to
+ * ignore this feature.
+ */
tmp = readl(port_mmio + PORT_CMD);
if (((tmp & PORT_CMD_ESP) && (hpriv->cap & HOST_CAP_SXS)) ||
- (tmp & PORT_CMD_HPCP))
+ (tmp & PORT_CMD_HPCP)) {
+ if (hpriv->mask_port_ext & (1U << ap->port_no)) {
+ ata_port_info(ap,
+ "Ignoring external/hotplug capability\n");
+ return;
+ }
ap->pflags |= ATA_PFLAG_EXTERNAL;
+ }
}
static void ahci_update_initial_lpm_policy(struct ata_port *ap)
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 2c10c8f440d1..293b7fb216b5 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -330,6 +330,7 @@ struct ahci_host_priv {
/* Input fields */
unsigned int flags; /* AHCI_HFLAG_* */
u32 mask_port_map; /* Mask of valid ports */
+ u32 mask_port_ext; /* Mask of ports ext capability */
void __iomem * mmio; /* bus-independent mem map */
u32 cap; /* cap to use */
diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c
index 5d5a51a77f5d..6b8844646fcd 100644
--- a/drivers/ata/ahci_xgene.c
+++ b/drivers/ata/ahci_xgene.c
@@ -450,7 +450,6 @@ static int xgene_ahci_pmp_softreset(struct ata_link *link, unsigned int *class,
{
int pmp = sata_srst_pmp(link);
struct ata_port *ap = link->ap;
- u32 rc;
void __iomem *port_mmio = ahci_port_base(ap);
u32 port_fbs;
@@ -463,9 +462,7 @@ static int xgene_ahci_pmp_softreset(struct ata_link *link, unsigned int *class,
port_fbs |= pmp << PORT_FBS_DEV_OFFSET;
writel(port_fbs, port_mmio + PORT_FBS);
- rc = ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
-
- return rc;
+ return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
}
/**
@@ -500,7 +497,7 @@ static int xgene_ahci_softreset(struct ata_link *link, unsigned int *class,
u32 port_fbs;
u32 port_fbs_save;
u32 retry = 1;
- u32 rc;
+ int rc;
port_fbs_save = readl(port_mmio + PORT_FBS);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 2946ae6d4b2c..2586e77ebf45 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -2075,7 +2075,7 @@ out:
* Check if a link is established. This is a relaxed version of
* ata_phys_link_online() which accounts for the fact that this is potentially
* called after changing the link power management policy, which may not be
- * reflected immediately in the SSTAUS register (e.g., we may still be seeing
+ * reflected immediately in the SStatus register (e.g., we may still be seeing
* the PHY in partial, slumber or devsleep Partial power management state.
* So check that:
* - A device is still present, that is, DET is 1h (Device presence detected
@@ -2089,8 +2089,13 @@ static bool ata_eh_link_established(struct ata_link *link)
u32 sstatus;
u8 det, ipm;
+ /*
+ * For old IDE/PATA adapters that do not have a valid scr_read method,
+ * or if reading the SStatus register fails, assume that the device is
+ * present. Device probe will determine if that is really the case.
+ */
if (sata_scr_read(link, SCR_STATUS, &sstatus))
- return false;
+ return true;
det = sstatus & 0x0f;
ipm = (sstatus >> 8) & 0x0f;
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 57f674f51b0c..b43a3196e2be 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -351,7 +351,7 @@ EXPORT_SYMBOL_GPL(ata_common_sdev_groups);
/**
* ata_std_bios_param - generic bios head/sector/cylinder calculator used by sd.
* @sdev: SCSI device for which BIOS geometry is to be determined
- * @bdev: block device associated with @sdev
+ * @unused: gendisk associated with @sdev
* @capacity: capacity of SCSI device
* @geom: location to which geometry will be output
*
@@ -366,7 +366,7 @@ EXPORT_SYMBOL_GPL(ata_common_sdev_groups);
* RETURNS:
* Zero.
*/
-int ata_std_bios_param(struct scsi_device *sdev, struct block_device *bdev,
+int ata_std_bios_param(struct scsi_device *sdev, struct gendisk *unused,
sector_t capacity, int geom[])
{
geom[0] = 255;
@@ -3904,21 +3904,16 @@ static int ata_mselect_control_ata_feature(struct ata_queued_cmd *qc,
/* Check cdl_ctrl */
switch (buf[0] & 0x03) {
case 0:
- /* Disable CDL if it is enabled */
- if (!(dev->flags & ATA_DFLAG_CDL_ENABLED))
- return 0;
+ /* Disable CDL */
ata_dev_dbg(dev, "Disabling CDL\n");
cdl_action = 0;
dev->flags &= ~ATA_DFLAG_CDL_ENABLED;
break;
case 0x02:
/*
- * Enable CDL if not already enabled. Since this is mutually
- * exclusive with NCQ priority, allow this only if NCQ priority
- * is disabled.
+ * Enable CDL. Since CDL is mutually exclusive with NCQ
+ * priority, allow this only if NCQ priority is disabled.
*/
- if (dev->flags & ATA_DFLAG_CDL_ENABLED)
- return 0;
if (dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLED) {
ata_dev_err(dev,
"NCQ priority must be disabled to enable CDL\n");
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 7fc407255eb4..1e2a2c33cdc8 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -614,7 +614,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
offset = qc->cursg->offset + qc->cursg_ofs;
/* get the current page and offset */
- page = nth_page(page, (offset >> PAGE_SHIFT));
+ page += offset >> PAGE_SHIFT;
offset %= PAGE_SIZE;
/* don't overrun current sg */
@@ -631,7 +631,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
unsigned int split_len = PAGE_SIZE - offset;
ata_pio_xfer(qc, page, offset, split_len);
- ata_pio_xfer(qc, nth_page(page, 1), 0, count - split_len);
+ ata_pio_xfer(qc, page + 1, 0, count - split_len);
} else {
ata_pio_xfer(qc, page, offset, count);
}
@@ -751,7 +751,7 @@ next_sg:
offset = sg->offset + qc->cursg_ofs;
/* get the current page and offset */
- page = nth_page(page, (offset >> PAGE_SHIFT));
+ page += offset >> PAGE_SHIFT;
offset %= PAGE_SIZE;
/* don't overrun current sg */
diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
index eeae160c898d..fa3c76a2b49d 100644
--- a/drivers/atm/atmtcp.c
+++ b/drivers/atm/atmtcp.c
@@ -279,6 +279,19 @@ static struct atm_vcc *find_vcc(struct atm_dev *dev, short vpi, int vci)
return NULL;
}
+static int atmtcp_c_pre_send(struct atm_vcc *vcc, struct sk_buff *skb)
+{
+ struct atmtcp_hdr *hdr;
+
+ if (skb->len < sizeof(struct atmtcp_hdr))
+ return -EINVAL;
+
+ hdr = (struct atmtcp_hdr *)skb->data;
+ if (hdr->length == ATMTCP_HDR_MAGIC)
+ return -EINVAL;
+
+ return 0;
+}
static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
{
@@ -288,9 +301,6 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
struct sk_buff *new_skb;
int result = 0;
- if (skb->len < sizeof(struct atmtcp_hdr))
- goto done;
-
dev = vcc->dev_data;
hdr = (struct atmtcp_hdr *) skb->data;
if (hdr->length == ATMTCP_HDR_MAGIC) {
@@ -347,6 +357,7 @@ static const struct atmdev_ops atmtcp_v_dev_ops = {
static const struct atmdev_ops atmtcp_c_dev_ops = {
.close = atmtcp_c_close,
+ .pre_send = atmtcp_c_pre_send,
.send = atmtcp_c_send
};
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 064eb52ff7e2..1786d87b29e2 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -167,6 +167,12 @@ config PM_QOS_KUNIT_TEST
depends on KUNIT=y
default KUNIT_ALL_TESTS
+config PM_RUNTIME_KUNIT_TEST
+ tristate "KUnit Tests for runtime PM" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ depends on PM
+ default KUNIT_ALL_TESTS
+
config HMEM_REPORTING
bool
default n
diff --git a/drivers/base/auxiliary.c b/drivers/base/auxiliary.c
index 12ffdd843756..04bdbff4dbe5 100644
--- a/drivers/base/auxiliary.c
+++ b/drivers/base/auxiliary.c
@@ -171,17 +171,18 @@
static const struct auxiliary_device_id *auxiliary_match_id(const struct auxiliary_device_id *id,
const struct auxiliary_device *auxdev)
{
- for (; id->name[0]; id++) {
- const char *p = strrchr(dev_name(&auxdev->dev), '.');
- int match_size;
+ const char *auxdev_name = dev_name(&auxdev->dev);
+ const char *p = strrchr(auxdev_name, '.');
+ int match_size;
- if (!p)
- continue;
- match_size = p - dev_name(&auxdev->dev);
+ if (!p)
+ return NULL;
+ match_size = p - auxdev_name;
+ for (; id->name[0]; id++) {
/* use dev_name(&auxdev->dev) prefix before last '.' char to match to */
if (strlen(id->name) == match_size &&
- !strncmp(dev_name(&auxdev->dev), id->name, match_size))
+ !strncmp(auxdev_name, id->name, match_size))
return id;
}
return NULL;
@@ -217,17 +218,14 @@ static int auxiliary_bus_probe(struct device *dev)
struct auxiliary_device *auxdev = to_auxiliary_dev(dev);
int ret;
- ret = dev_pm_domain_attach(dev, PD_FLAG_ATTACH_POWER_ON);
+ ret = dev_pm_domain_attach(dev, PD_FLAG_ATTACH_POWER_ON |
+ PD_FLAG_DETACH_POWER_OFF);
if (ret) {
dev_warn(dev, "Failed to attach to PM Domain : %d\n", ret);
return ret;
}
- ret = auxdrv->probe(auxdev, auxiliary_match_id(auxdrv->id_table, auxdev));
- if (ret)
- dev_pm_domain_detach(dev, true);
-
- return ret;
+ return auxdrv->probe(auxdev, auxiliary_match_id(auxdrv->id_table, auxdev));
}
static void auxiliary_bus_remove(struct device *dev)
@@ -237,7 +235,6 @@ static void auxiliary_bus_remove(struct device *dev)
if (auxdrv->remove)
auxdrv->remove(auxdev);
- dev_pm_domain_detach(dev, true);
}
static void auxiliary_bus_shutdown(struct device *dev)
diff --git a/drivers/base/base.h b/drivers/base/base.h
index 123031a757d9..86fa7fbb3548 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -248,9 +248,18 @@ void device_links_driver_cleanup(struct device *dev);
void device_links_no_driver(struct device *dev);
bool device_links_busy(struct device *dev);
void device_links_unbind_consumers(struct device *dev);
+bool device_link_flag_is_sync_state_only(u32 flags);
void fw_devlink_drivers_done(void);
void fw_devlink_probing_done(void);
+#define dev_for_each_link_to_supplier(__link, __dev) \
+ list_for_each_entry_srcu(__link, &(__dev)->links.suppliers, c_node, \
+ device_links_read_lock_held())
+
+#define dev_for_each_link_to_consumer(__link, __dev) \
+ list_for_each_entry_srcu(__link, &(__dev)->links.consumers, s_node, \
+ device_links_read_lock_held())
+
/* device pm support */
void device_pm_move_to_tail(struct device *dev);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index d22d6b23e758..3c533dab8fa5 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -287,7 +287,7 @@ static bool device_is_ancestor(struct device *dev, struct device *target)
#define DL_MARKER_FLAGS (DL_FLAG_INFERRED | \
DL_FLAG_CYCLE | \
DL_FLAG_MANAGED)
-static inline bool device_link_flag_is_sync_state_only(u32 flags)
+bool device_link_flag_is_sync_state_only(u32 flags)
{
return (flags & ~DL_MARKER_FLAGS) == DL_FLAG_SYNC_STATE_ONLY;
}
@@ -3994,8 +3994,8 @@ const char *device_get_devnode(const struct device *dev,
/**
* device_for_each_child - device child iterator.
* @parent: parent struct device.
- * @fn: function to be called for each device.
* @data: data for the callback.
+ * @fn: function to be called for each device.
*
* Iterate over @parent's child devices, and call @fn for each,
* passing it @data.
@@ -4024,8 +4024,8 @@ EXPORT_SYMBOL_GPL(device_for_each_child);
/**
* device_for_each_child_reverse - device child iterator in reversed order.
* @parent: parent struct device.
- * @fn: function to be called for each device.
* @data: data for the callback.
+ * @fn: function to be called for each device.
*
* Iterate over @parent's child devices, and call @fn for each,
* passing it @data.
@@ -4055,8 +4055,8 @@ EXPORT_SYMBOL_GPL(device_for_each_child_reverse);
* device_for_each_child_reverse_from - device child iterator in reversed order.
* @parent: parent struct device.
* @from: optional starting point in child list
- * @fn: function to be called for each device.
* @data: data for the callback.
+ * @fn: function to be called for each device.
*
* Iterate over @parent's child devices, starting at @from, and call @fn
* for each, passing it @data. This helper is identical to
@@ -4089,8 +4089,8 @@ EXPORT_SYMBOL_GPL(device_for_each_child_reverse_from);
/**
* device_find_child - device iterator for locating a particular device.
* @parent: parent struct device
- * @match: Callback function to check device
* @data: Data to pass to match function
+ * @match: Callback function to check device
*
* This is similar to the device_for_each_child() function above, but it
* returns a reference to a device that is 'found' for later use, as
@@ -5278,6 +5278,25 @@ void device_set_node(struct device *dev, struct fwnode_handle *fwnode)
}
EXPORT_SYMBOL_GPL(device_set_node);
+/**
+ * get_dev_from_fwnode - Obtain a reference count of the struct device the
+ * struct fwnode_handle is associated with.
+ * @fwnode: The pointer to the struct fwnode_handle to obtain the struct device
+ * reference count of.
+ *
+ * This function obtains a reference count of the device the device pointer
+ * embedded in the struct fwnode_handle points to.
+ *
+ * Note that the struct device pointer embedded in struct fwnode_handle does
+ * *not* have a reference count of the struct device itself.
+ *
+ * Hence, it is a UAF (and thus a bug) to call this function if the caller can't
+ * guarantee that the last reference count of the corresponding struct device is
+ * not dropped concurrently.
+ *
+ * This is possible since struct fwnode_handle has its own reference count and
+ * hence can out-live the struct device it is associated with.
+ */
struct device *get_dev_from_fwnode(struct fwnode_handle *fwnode)
{
return get_device((fwnode)->dev);
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index efc575a00edd..fa0a2eef93ac 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -325,7 +325,7 @@ static void cpu_device_release(struct device *dev)
* This is an empty function to prevent the driver core from spitting a
* warning at us. Yes, I know this is directly opposite of what the
* documentation for the driver core and kobjects say, and the author
- * of this code has already been publically ridiculed for doing
+ * of this code has already been publicly ridiculed for doing
* something as foolish as this. However, at this point in time, it is
* the only way to handle the issue of statically allocated cpu
* devices. The different architectures will have their cpu device
@@ -603,6 +603,7 @@ CPU_SHOW_VULN_FALLBACK(ghostwrite);
CPU_SHOW_VULN_FALLBACK(old_microcode);
CPU_SHOW_VULN_FALLBACK(indirect_target_selection);
CPU_SHOW_VULN_FALLBACK(tsa);
+CPU_SHOW_VULN_FALLBACK(vmscape);
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
@@ -622,6 +623,7 @@ static DEVICE_ATTR(ghostwrite, 0444, cpu_show_ghostwrite, NULL);
static DEVICE_ATTR(old_microcode, 0444, cpu_show_old_microcode, NULL);
static DEVICE_ATTR(indirect_target_selection, 0444, cpu_show_indirect_target_selection, NULL);
static DEVICE_ATTR(tsa, 0444, cpu_show_tsa, NULL);
+static DEVICE_ATTR(vmscape, 0444, cpu_show_vmscape, NULL);
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_meltdown.attr,
@@ -642,6 +644,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_old_microcode.attr,
&dev_attr_indirect_target_selection.attr,
&dev_attr_tsa.attr,
+ &dev_attr_vmscape.attr,
NULL
};
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index ff55e1bcfa30..c948c88d3956 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -1117,6 +1117,27 @@ void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp)
}
EXPORT_SYMBOL_GPL(devm_kmemdup);
+/**
+ * devm_kmemdup_const - conditionally duplicate and manage a region of memory
+ *
+ * @dev: Device this memory belongs to
+ * @src: memory region to duplicate
+ * @len: memory region length,
+ * @gfp: GFP mask to use
+ *
+ * Return: source address if it is in .rodata or the return value of kmemdup()
+ * to which the function falls back otherwise.
+ */
+const void *
+devm_kmemdup_const(struct device *dev, const void *src, size_t len, gfp_t gfp)
+{
+ if (is_kernel_rodata((unsigned long)src))
+ return src;
+
+ return devm_kmemdup(dev, src, len, gfp);
+}
+EXPORT_SYMBOL_GPL(devm_kmemdup_const);
+
struct pages_devres {
unsigned long addr;
unsigned int order;
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index 31bfb3194b4c..9d4e46ad8352 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -176,7 +176,7 @@ static int dev_mkdir(const char *name, umode_t mode)
struct dentry *dentry;
struct path path;
- dentry = kern_path_create(AT_FDCWD, name, &path, LOOKUP_DIRECTORY);
+ dentry = start_creating_path(AT_FDCWD, name, &path, LOOKUP_DIRECTORY);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
@@ -184,7 +184,7 @@ static int dev_mkdir(const char *name, umode_t mode)
if (!IS_ERR(dentry))
/* mark as kernel-created inode */
d_inode(dentry)->i_private = &thread;
- done_path_create(&path, dentry);
+ end_creating_path(&path, dentry);
return PTR_ERR_OR_ZERO(dentry);
}
@@ -222,10 +222,10 @@ static int handle_create(const char *nodename, umode_t mode, kuid_t uid,
struct path path;
int err;
- dentry = kern_path_create(AT_FDCWD, nodename, &path, 0);
+ dentry = start_creating_path(AT_FDCWD, nodename, &path, 0);
if (dentry == ERR_PTR(-ENOENT)) {
create_path(nodename);
- dentry = kern_path_create(AT_FDCWD, nodename, &path, 0);
+ dentry = start_creating_path(AT_FDCWD, nodename, &path, 0);
}
if (IS_ERR(dentry))
return PTR_ERR(dentry);
@@ -246,7 +246,7 @@ static int handle_create(const char *nodename, umode_t mode, kuid_t uid,
/* mark as kernel-created inode */
d_inode(dentry)->i_private = &thread;
}
- done_path_create(&path, dentry);
+ end_creating_path(&path, dentry);
return err;
}
@@ -256,7 +256,7 @@ static int dev_rmdir(const char *name)
struct dentry *dentry;
int err;
- dentry = kern_path_locked(name, &parent);
+ dentry = start_removing_path(name, &parent);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
if (d_inode(dentry)->i_private == &thread)
@@ -265,9 +265,7 @@ static int dev_rmdir(const char *name)
else
err = -EPERM;
- dput(dentry);
- inode_unlock(d_inode(parent.dentry));
- path_put(&parent);
+ end_removing_path(&parent, dentry);
return err;
}
@@ -325,7 +323,7 @@ static int handle_remove(const char *nodename, struct device *dev)
int deleted = 0;
int err = 0;
- dentry = kern_path_locked(nodename, &parent);
+ dentry = start_removing_path(nodename, &parent);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
@@ -349,10 +347,8 @@ static int handle_remove(const char *nodename, struct device *dev)
if (!err || err == -ENOENT)
deleted = 1;
}
- dput(dentry);
- inode_unlock(d_inode(parent.dentry));
+ end_removing_path(&parent, dentry);
- path_put(&parent);
if (deleted && strchr(nodename, '/'))
delete_path(nodename);
return err;
diff --git a/drivers/base/faux.c b/drivers/base/faux.c
index f5fbda0a9a44..21dd02124231 100644
--- a/drivers/base/faux.c
+++ b/drivers/base/faux.c
@@ -155,6 +155,7 @@ struct faux_device *faux_device_create_with_groups(const char *name,
dev->parent = &faux_bus_root;
dev->bus = &faux_bus_type;
dev_set_name(dev, "%s", name);
+ device_set_pm_not_required(dev);
ret = device_add(dev);
if (ret) {
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 5c6c1d6bb59f..6d84a02cfa5d 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -769,21 +769,22 @@ static struct zone *early_node_zone_for_memory_block(struct memory_block *mem,
#ifdef CONFIG_NUMA
/**
- * memory_block_add_nid() - Indicate that system RAM falling into this memory
- * block device (partially) belongs to the given node.
+ * memory_block_add_nid_early() - Indicate that early system RAM falling into
+ * this memory block device (partially) belongs
+ * to the given node.
* @mem: The memory block device.
* @nid: The node id.
- * @context: The memory initialization context.
*
- * Indicate that system RAM falling into this memory block (partially) belongs
- * to the given node. If the context indicates ("early") that we are adding the
- * node during node device subsystem initialization, this will also properly
- * set/adjust mem->zone based on the zone ranges of the given node.
+ * Indicate that early system RAM falling into this memory block (partially)
+ * belongs to the given node. This will also properly set/adjust mem->zone based
+ * on the zone ranges of the given node.
+ *
+ * Memory hotplug handles this on memory block creation, where we can only have
+ * a single nid span a memory block.
*/
-void memory_block_add_nid(struct memory_block *mem, int nid,
- enum meminit_context context)
+void memory_block_add_nid_early(struct memory_block *mem, int nid)
{
- if (context == MEMINIT_EARLY && mem->nid != nid) {
+ if (mem->nid != nid) {
/*
* For early memory we have to determine the zone when setting
* the node id and handle multiple nodes spanning a single
@@ -797,19 +798,18 @@ void memory_block_add_nid(struct memory_block *mem, int nid,
mem->zone = early_node_zone_for_memory_block(mem, nid);
else
mem->zone = NULL;
+ /*
+ * If this memory block spans multiple nodes, we only indicate
+ * the last processed node. If we span multiple nodes (not applicable
+ * to hotplugged memory), zone == NULL will prohibit memory offlining
+ * and consequently unplug.
+ */
+ mem->nid = nid;
}
-
- /*
- * If this memory block spans multiple nodes, we only indicate
- * the last processed node. If we span multiple nodes (not applicable
- * to hotplugged memory), zone == NULL will prohibit memory offlining
- * and consequently unplug.
- */
- mem->nid = nid;
}
#endif
-static int add_memory_block(unsigned long block_id, unsigned long state,
+static int add_memory_block(unsigned long block_id, int nid, unsigned long state,
struct vmem_altmap *altmap,
struct memory_group *group)
{
@@ -827,7 +827,7 @@ static int add_memory_block(unsigned long block_id, unsigned long state,
mem->start_section_nr = block_id * sections_per_block;
mem->state = state;
- mem->nid = NUMA_NO_NODE;
+ mem->nid = nid;
mem->altmap = altmap;
INIT_LIST_HEAD(&mem->group_next);
@@ -854,13 +854,6 @@ static int add_memory_block(unsigned long block_id, unsigned long state,
return 0;
}
-static int add_hotplug_memory_block(unsigned long block_id,
- struct vmem_altmap *altmap,
- struct memory_group *group)
-{
- return add_memory_block(block_id, MEM_OFFLINE, altmap, group);
-}
-
static void remove_memory_block(struct memory_block *memory)
{
if (WARN_ON_ONCE(memory->dev.bus != &memory_subsys))
@@ -886,7 +879,7 @@ static void remove_memory_block(struct memory_block *memory)
* Called under device_hotplug_lock.
*/
int create_memory_block_devices(unsigned long start, unsigned long size,
- struct vmem_altmap *altmap,
+ int nid, struct vmem_altmap *altmap,
struct memory_group *group)
{
const unsigned long start_block_id = pfn_to_block_id(PFN_DOWN(start));
@@ -900,7 +893,7 @@ int create_memory_block_devices(unsigned long start, unsigned long size,
return -EINVAL;
for (block_id = start_block_id; block_id != end_block_id; block_id++) {
- ret = add_hotplug_memory_block(block_id, altmap, group);
+ ret = add_memory_block(block_id, nid, MEM_OFFLINE, altmap, group);
if (ret)
break;
}
@@ -1005,7 +998,7 @@ void __init memory_dev_init(void)
continue;
block_id = memory_block_id(nr);
- ret = add_memory_block(block_id, MEM_ONLINE, NULL, NULL);
+ ret = add_memory_block(block_id, NUMA_NO_NODE, MEM_ONLINE, NULL, NULL);
if (ret) {
panic("%s() failed to add memory block: %d\n",
__func__, ret);
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 3399594136b2..83aeb0518e1d 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -249,6 +249,44 @@ void node_set_perf_attrs(unsigned int nid, struct access_coordinate *coord,
EXPORT_SYMBOL_GPL(node_set_perf_attrs);
/**
+ * node_update_perf_attrs - Update the performance values for given access class
+ * @nid: Node identifier to be updated
+ * @coord: Heterogeneous memory performance coordinates
+ * @access: The access class for the given attributes
+ */
+void node_update_perf_attrs(unsigned int nid, struct access_coordinate *coord,
+ enum access_coordinate_class access)
+{
+ struct node_access_nodes *access_node;
+ struct node *node;
+ int i;
+
+ if (WARN_ON_ONCE(!node_online(nid)))
+ return;
+
+ node = node_devices[nid];
+ list_for_each_entry(access_node, &node->access_list, list_node) {
+ if (access_node->access != access)
+ continue;
+
+ access_node->coord = *coord;
+ for (i = 0; access_attrs[i]; i++) {
+ sysfs_notify(&access_node->dev.kobj,
+ NULL, access_attrs[i]->name);
+ }
+ break;
+ }
+
+ /* When setting CPU access coordinates, update mempolicy */
+ if (access != ACCESS_COORDINATE_CPU)
+ return;
+
+ if (mempolicy_set_node_perf(nid, coord))
+ pr_info("failed to set mempolicy attrs for node %d\n", nid);
+}
+EXPORT_SYMBOL_GPL(node_update_perf_attrs);
+
+/**
* struct node_cache_info - Internal tracking for memory node caches
* @dev: Device represeting the cache level
* @node: List element for tracking in the node
@@ -781,13 +819,10 @@ int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
#ifdef CONFIG_MEMORY_HOTPLUG
static void do_register_memory_block_under_node(int nid,
- struct memory_block *mem_blk,
- enum meminit_context context)
+ struct memory_block *mem_blk)
{
int ret;
- memory_block_add_nid(mem_blk, nid, context);
-
ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj,
&mem_blk->dev.kobj,
kobject_name(&mem_blk->dev.kobj));
@@ -815,7 +850,7 @@ static int register_mem_block_under_node_hotplug(struct memory_block *mem_blk,
{
int nid = *(int *)arg;
- do_register_memory_block_under_node(nid, mem_blk, MEMINIT_HOTPLUG);
+ do_register_memory_block_under_node(nid, mem_blk);
return 0;
}
@@ -855,7 +890,8 @@ static void register_memory_blocks_under_nodes(void)
if (!mem)
continue;
- do_register_memory_block_under_node(nid, mem, MEMINIT_EARLY);
+ memory_block_add_nid_early(mem, nid);
+ do_register_memory_block_under_node(nid, mem);
put_device(&mem->dev);
}
@@ -885,6 +921,10 @@ int register_one_node(int nid)
node_devices[nid] = node;
error = register_node(node_devices[nid], nid);
+ if (error) {
+ node_devices[nid] = NULL;
+ return error;
+ }
/* link cpu under this node */
for_each_present_cpu(cpu) {
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
index 01f11629d241..2989e42d0161 100644
--- a/drivers/base/power/Makefile
+++ b/drivers/base/power/Makefile
@@ -4,5 +4,6 @@ obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o wakeup_stats.o
obj-$(CONFIG_PM_TRACE_RTC) += trace.o
obj-$(CONFIG_HAVE_CLK) += clock_ops.o
obj-$(CONFIG_PM_QOS_KUNIT_TEST) += qos-test.o
+obj-$(CONFIG_PM_RUNTIME_KUNIT_TEST) += runtime-test.o
ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index dbf5456cd891..e83503bdc1fd 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -40,10 +40,6 @@
typedef int (*pm_callback_t)(struct device *);
-#define list_for_each_entry_rcu_locked(pos, head, member) \
- list_for_each_entry_rcu(pos, head, member, \
- device_links_read_lock_held())
-
/*
* The entries in the dpm_list list are in a depth first order, simply
* because children are guaranteed to be discovered after parents, and
@@ -281,8 +277,9 @@ static void dpm_wait_for_suppliers(struct device *dev, bool async)
* callbacks freeing the link objects for the links in the list we're
* walking.
*/
- list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
- if (READ_ONCE(link->status) != DL_STATE_DORMANT)
+ dev_for_each_link_to_supplier(link, dev)
+ if (READ_ONCE(link->status) != DL_STATE_DORMANT &&
+ !device_link_flag_is_sync_state_only(link->flags))
dpm_wait(link->supplier, async);
device_links_read_unlock(idx);
@@ -338,8 +335,9 @@ static void dpm_wait_for_consumers(struct device *dev, bool async)
* continue instead of trying to continue in parallel with its
* unregistration).
*/
- list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
- if (READ_ONCE(link->status) != DL_STATE_DORMANT)
+ dev_for_each_link_to_consumer(link, dev)
+ if (READ_ONCE(link->status) != DL_STATE_DORMANT &&
+ !device_link_flag_is_sync_state_only(link->flags))
dpm_wait(link->consumer, async);
device_links_read_unlock(idx);
@@ -675,7 +673,7 @@ static void dpm_async_resume_subordinate(struct device *dev, async_func_t func)
idx = device_links_read_lock();
/* Start processing the device's "async" consumers. */
- list_for_each_entry_rcu(link, &dev->links.consumers, s_node)
+ dev_for_each_link_to_consumer(link, dev)
if (READ_ONCE(link->status) != DL_STATE_DORMANT)
dpm_async_with_cleanup(link->consumer, func);
@@ -724,8 +722,20 @@ static void device_resume_noirq(struct device *dev, pm_message_t state, bool asy
if (dev->power.syscore || dev->power.direct_complete)
goto Out;
- if (!dev->power.is_noirq_suspended)
+ if (!dev->power.is_noirq_suspended) {
+ /*
+ * This means that system suspend has been aborted in the noirq
+ * phase before invoking the noirq suspend callback for the
+ * device, so if device_suspend_late() has left it in suspend,
+ * device_resume_early() should leave it in suspend either in
+ * case the early resume of it depends on the noirq resume that
+ * has not run.
+ */
+ if (dev_pm_skip_suspend(dev))
+ dev->power.must_resume = false;
+
goto Out;
+ }
if (!dpm_wait_for_superior(dev, async))
goto Out;
@@ -1330,7 +1340,7 @@ static void dpm_async_suspend_superior(struct device *dev, async_func_t func)
idx = device_links_read_lock();
/* Start processing the device's "async" suppliers. */
- list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
+ dev_for_each_link_to_supplier(link, dev)
if (READ_ONCE(link->status) != DL_STATE_DORMANT)
dpm_async_with_cleanup(link->supplier, func);
@@ -1384,7 +1394,7 @@ static void dpm_superior_set_must_resume(struct device *dev)
idx = device_links_read_lock();
- list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
+ dev_for_each_link_to_supplier(link, dev)
link->supplier->power.must_resume = true;
device_links_read_unlock(idx);
@@ -1813,7 +1823,7 @@ static void dpm_clear_superiors_direct_complete(struct device *dev)
idx = device_links_read_lock();
- list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
+ dev_for_each_link_to_supplier(link, dev) {
spin_lock_irq(&link->supplier->power.lock);
link->supplier->power.direct_complete = false;
spin_unlock_irq(&link->supplier->power.lock);
@@ -2065,7 +2075,7 @@ static bool device_prepare_smart_suspend(struct device *dev)
idx = device_links_read_lock();
- list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
+ dev_for_each_link_to_supplier(link, dev) {
if (!device_link_test(link, DL_FLAG_PM_RUNTIME))
continue;
diff --git a/drivers/base/power/runtime-test.c b/drivers/base/power/runtime-test.c
new file mode 100644
index 000000000000..477feca804c7
--- /dev/null
+++ b/drivers/base/power/runtime-test.c
@@ -0,0 +1,253 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2025 Google, Inc.
+ */
+
+#include <linux/cleanup.h>
+#include <linux/pm_runtime.h>
+#include <kunit/device.h>
+#include <kunit/test.h>
+
+#define DEVICE_NAME "pm_runtime_test_device"
+
+static void pm_runtime_depth_test(struct kunit *test)
+{
+ struct device *dev = kunit_device_register(test, DEVICE_NAME);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ pm_runtime_enable(dev);
+
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_get_sync(dev));
+ KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev));
+ KUNIT_EXPECT_EQ(test, 1, pm_runtime_get_sync(dev)); /* "already active" */
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_put_sync(dev));
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_put_sync(dev));
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+}
+
+/* Test pm_runtime_put() and friends when already suspended. */
+static void pm_runtime_already_suspended_test(struct kunit *test)
+{
+ struct device *dev = kunit_device_register(test, DEVICE_NAME);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ pm_runtime_enable(dev);
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+
+ pm_runtime_get_noresume(dev);
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_barrier(dev)); /* no wakeup needed */
+ pm_runtime_put(dev);
+
+ pm_runtime_get_noresume(dev);
+ KUNIT_EXPECT_EQ(test, 1, pm_runtime_put_sync(dev));
+
+ KUNIT_EXPECT_EQ(test, 1, pm_runtime_suspend(dev));
+ KUNIT_EXPECT_EQ(test, 1, pm_runtime_autosuspend(dev));
+ KUNIT_EXPECT_EQ(test, 1, pm_request_autosuspend(dev));
+
+ pm_runtime_get_noresume(dev);
+ KUNIT_EXPECT_EQ(test, 1, pm_runtime_put_sync_autosuspend(dev));
+
+ pm_runtime_get_noresume(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ /* Grab 2 refcounts */
+ pm_runtime_get_noresume(dev);
+ pm_runtime_get_noresume(dev);
+ /* The first put() sees usage_count 1 */
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_put_sync_autosuspend(dev));
+ /* The second put() sees usage_count 0 but tells us "already suspended". */
+ KUNIT_EXPECT_EQ(test, 1, pm_runtime_put_sync_autosuspend(dev));
+
+ /* Should have remained suspended the whole time. */
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+}
+
+static void pm_runtime_idle_test(struct kunit *test)
+{
+ struct device *dev = kunit_device_register(test, DEVICE_NAME);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ pm_runtime_enable(dev);
+
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_get_sync(dev));
+ KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev));
+ KUNIT_EXPECT_EQ(test, -EAGAIN, pm_runtime_idle(dev));
+ KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev));
+ pm_runtime_put_noidle(dev);
+ KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev));
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_idle(dev));
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+ KUNIT_EXPECT_EQ(test, -EAGAIN, pm_runtime_idle(dev));
+ KUNIT_EXPECT_EQ(test, -EAGAIN, pm_request_idle(dev));
+}
+
+static void pm_runtime_disabled_test(struct kunit *test)
+{
+ struct device *dev = kunit_device_register(test, DEVICE_NAME);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ /* Never called pm_runtime_enable() */
+ KUNIT_EXPECT_FALSE(test, pm_runtime_enabled(dev));
+
+ /* "disabled" is treated as "active" */
+ KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev));
+ KUNIT_EXPECT_FALSE(test, pm_runtime_suspended(dev));
+
+ /*
+ * Note: these "fail", but they still acquire/release refcounts, so
+ * keep them balanced.
+ */
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_runtime_get(dev));
+ pm_runtime_put(dev);
+
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_runtime_get_sync(dev));
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_runtime_put_sync(dev));
+
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_runtime_get(dev));
+ pm_runtime_put_autosuspend(dev);
+
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_runtime_resume_and_get(dev));
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_runtime_idle(dev));
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_request_idle(dev));
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_request_resume(dev));
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_request_autosuspend(dev));
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_runtime_suspend(dev));
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_runtime_resume(dev));
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_runtime_autosuspend(dev));
+
+ /* Still disabled */
+ KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev));
+ KUNIT_EXPECT_FALSE(test, pm_runtime_enabled(dev));
+}
+
+static void pm_runtime_error_test(struct kunit *test)
+{
+ struct device *dev = kunit_device_register(test, DEVICE_NAME);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ pm_runtime_enable(dev);
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+
+ /* Fake a .runtime_resume() error */
+ dev->power.runtime_error = -EIO;
+
+ /*
+ * Note: these "fail", but they still acquire/release refcounts, so
+ * keep them balanced.
+ */
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_get(dev));
+ pm_runtime_put(dev);
+
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_get_sync(dev));
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_put_sync(dev));
+
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_get(dev));
+ pm_runtime_put_autosuspend(dev);
+
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_get(dev));
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_put_sync_autosuspend(dev));
+
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_resume_and_get(dev));
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_idle(dev));
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_request_idle(dev));
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_request_resume(dev));
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_request_autosuspend(dev));
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_suspend(dev));
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_resume(dev));
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_autosuspend(dev));
+
+ /* Error is still pending */
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+ KUNIT_EXPECT_EQ(test, -EIO, dev->power.runtime_error);
+ /* Clear error */
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_set_suspended(dev));
+ KUNIT_EXPECT_EQ(test, 0, dev->power.runtime_error);
+ /* Still suspended */
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_get(dev));
+ KUNIT_EXPECT_EQ(test, 1, pm_runtime_barrier(dev)); /* resume was pending */
+ pm_runtime_put(dev);
+ pm_runtime_suspend(dev); /* flush the put(), to suspend */
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_get_sync(dev));
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_put_sync(dev));
+
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_get_sync(dev));
+ pm_runtime_put_autosuspend(dev);
+
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_resume_and_get(dev));
+
+ /*
+ * The following should all return -EAGAIN (usage is non-zero) or 1
+ * (already resumed).
+ */
+ KUNIT_EXPECT_EQ(test, -EAGAIN, pm_runtime_idle(dev));
+ KUNIT_EXPECT_EQ(test, -EAGAIN, pm_request_idle(dev));
+ KUNIT_EXPECT_EQ(test, 1, pm_request_resume(dev));
+ KUNIT_EXPECT_EQ(test, -EAGAIN, pm_request_autosuspend(dev));
+ KUNIT_EXPECT_EQ(test, -EAGAIN, pm_runtime_suspend(dev));
+ KUNIT_EXPECT_EQ(test, 1, pm_runtime_resume(dev));
+ KUNIT_EXPECT_EQ(test, -EAGAIN, pm_runtime_autosuspend(dev));
+
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_put_sync(dev));
+
+ /* Suspended again */
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+}
+
+/*
+ * Explore a typical probe() sequence in which a device marks itself powered,
+ * but doesn't hold any runtime PM reference, so it suspends as soon as it goes
+ * idle.
+ */
+static void pm_runtime_probe_active_test(struct kunit *test)
+{
+ struct device *dev = kunit_device_register(test, DEVICE_NAME);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ KUNIT_EXPECT_TRUE(test, pm_runtime_status_suspended(dev));
+
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_set_active(dev));
+ KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev));
+
+ pm_runtime_enable(dev);
+ KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev));
+
+ /* Nothing to flush. We stay active. */
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_barrier(dev));
+ KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev));
+
+ /* Ask for idle? Now we suspend. */
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_idle(dev));
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+}
+
+static struct kunit_case pm_runtime_test_cases[] = {
+ KUNIT_CASE(pm_runtime_depth_test),
+ KUNIT_CASE(pm_runtime_already_suspended_test),
+ KUNIT_CASE(pm_runtime_idle_test),
+ KUNIT_CASE(pm_runtime_disabled_test),
+ KUNIT_CASE(pm_runtime_error_test),
+ KUNIT_CASE(pm_runtime_probe_active_test),
+ {}
+};
+
+static struct kunit_suite pm_runtime_test_suite = {
+ .name = "pm_runtime_test_cases",
+ .test_cases = pm_runtime_test_cases,
+};
+
+kunit_test_suite(pm_runtime_test_suite);
+MODULE_DESCRIPTION("Runtime power management unit test suite");
+MODULE_LICENSE("GPL");
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 3e84dc4122de..1b11a3cd4acc 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -498,6 +498,9 @@ static int rpm_idle(struct device *dev, int rpmflags)
if (retval < 0)
; /* Conditions are wrong. */
+ else if ((rpmflags & RPM_GET_PUT) && retval == 1)
+ ; /* put() is allowed in RPM_SUSPENDED */
+
/* Idle notifications are allowed only in the RPM_ACTIVE state. */
else if (dev->power.runtime_status != RPM_ACTIVE)
retval = -EAGAIN;
@@ -796,6 +799,8 @@ static int rpm_resume(struct device *dev, int rpmflags)
if (dev->power.runtime_status == RPM_ACTIVE &&
dev->power.last_status == RPM_ACTIVE)
retval = 1;
+ else if (rpmflags & RPM_TRANSPARENT)
+ goto out;
else
retval = -EACCES;
}
@@ -1903,8 +1908,7 @@ void pm_runtime_get_suppliers(struct device *dev)
idx = device_links_read_lock();
- list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
- device_links_read_lock_held())
+ dev_for_each_link_to_supplier(link, dev)
if (device_link_test(link, DL_FLAG_PM_RUNTIME)) {
link->supplier_preactivated = true;
pm_runtime_get_sync(link->supplier);
diff --git a/drivers/base/property.c b/drivers/base/property.c
index f626d5bbe806..6a63860579dd 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -578,7 +578,7 @@ EXPORT_SYMBOL_GPL(fwnode_property_match_property_string);
* @prop: The name of the property
* @nargs_prop: The name of the property telling the number of
* arguments in the referred node. NULL if @nargs is known,
- * otherwise @nargs is ignored. Only relevant on OF.
+ * otherwise @nargs is ignored.
* @nargs: Number of arguments. Ignored if @nargs_prop is non-NULL.
* @index: Index of the reference, from zero onwards.
* @args: Result structure with reference and integer arguments.
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index d1585f073776..6112d942499b 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -21,6 +21,7 @@
struct regmap_irq_chip_data {
struct mutex lock;
+ struct lock_class_key lock_key;
struct irq_chip irq_chip;
struct regmap *map;
@@ -801,7 +802,13 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
goto err_alloc;
}
- mutex_init(&d->lock);
+ /*
+ * If one regmap-irq is the parent of another then we'll try
+ * to lock the child with the parent locked, use an explicit
+ * lock_key so lockdep can figure out what's going on.
+ */
+ lockdep_register_key(&d->lock_key);
+ mutex_init_with_key(&d->lock, &d->lock_key);
for (i = 0; i < chip->num_irqs; i++)
d->mask_buf_def[chip->irqs[i].reg_offset / map->reg_stride]
@@ -816,7 +823,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
d->mask_buf[i],
chip->irq_drv_data);
if (ret)
- goto err_alloc;
+ goto err_mutex;
}
if (chip->mask_base && !chip->handle_mask_sync) {
@@ -827,7 +834,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
if (ret) {
dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
reg, ret);
- goto err_alloc;
+ goto err_mutex;
}
}
@@ -838,7 +845,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
if (ret) {
dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
reg, ret);
- goto err_alloc;
+ goto err_mutex;
}
}
@@ -855,7 +862,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
if (ret != 0) {
dev_err(map->dev, "Failed to read IRQ status: %d\n",
ret);
- goto err_alloc;
+ goto err_mutex;
}
}
@@ -879,7 +886,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
if (ret != 0) {
dev_err(map->dev, "Failed to ack 0x%x: %d\n",
reg, ret);
- goto err_alloc;
+ goto err_mutex;
}
}
}
@@ -901,7 +908,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
if (ret != 0) {
dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
reg, ret);
- goto err_alloc;
+ goto err_mutex;
}
}
}
@@ -910,7 +917,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
if (chip->status_is_level) {
ret = read_irq_data(d);
if (ret < 0)
- goto err_alloc;
+ goto err_mutex;
memcpy(d->prev_status_buf, d->status_buf,
array_size(d->chip->num_regs, sizeof(d->prev_status_buf[0])));
@@ -918,7 +925,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
ret = regmap_irq_create_domain(fwnode, irq_base, chip, d);
if (ret)
- goto err_alloc;
+ goto err_mutex;
ret = request_threaded_irq(irq, NULL, regmap_irq_thread,
irq_flags | IRQF_ONESHOT,
@@ -935,6 +942,9 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
err_domain:
/* Should really dispose of the domain but... */
+err_mutex:
+ mutex_destroy(&d->lock);
+ lockdep_unregister_key(&d->lock_key);
err_alloc:
kfree(d->type_buf);
kfree(d->type_buf_def);
@@ -1027,6 +1037,8 @@ void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
kfree(d->config_buf[i]);
kfree(d->config_buf);
}
+ mutex_destroy(&d->lock);
+ lockdep_unregister_key(&d->lock_key);
kfree(d);
}
EXPORT_SYMBOL_GPL(regmap_del_irq_chip);
diff --git a/drivers/base/regmap/regmap-mmio.c b/drivers/base/regmap/regmap-mmio.c
index 99d7fd85ca7d..29e5f3175301 100644
--- a/drivers/base/regmap/regmap-mmio.c
+++ b/drivers/base/regmap/regmap-mmio.c
@@ -609,4 +609,5 @@ void regmap_mmio_detach_clk(struct regmap *map)
}
EXPORT_SYMBOL_GPL(regmap_mmio_detach_clk);
+MODULE_DESCRIPTION("regmap MMIO Module");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 1f3f782a04ba..ce9be3989a21 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -827,7 +827,7 @@ struct regmap *__regmap_init(struct device *dev,
map->read_flag_mask = bus->read_flag_mask;
}
- if (config && config->read && config->write) {
+ if (config->read && config->write) {
map->reg_read = _regmap_bus_read;
if (config->reg_update_bits)
map->reg_update_bits = config->reg_update_bits;
@@ -2258,12 +2258,14 @@ EXPORT_SYMBOL_GPL(regmap_field_update_bits_base);
* @field: Register field to operate on
* @bits: Bits to test
*
- * Returns -1 if the underlying regmap_field_read() fails, 0 if at least one of the
- * tested bits is not set and 1 if all tested bits are set.
+ * Returns negative errno if the underlying regmap_field_read() fails,
+ * 0 if at least one of the tested bits is not set and 1 if all tested
+ * bits are set.
*/
int regmap_field_test_bits(struct regmap_field *field, unsigned int bits)
{
- unsigned int val, ret;
+ unsigned int val;
+ int ret;
ret = regmap_field_read(field, &val);
if (ret)
@@ -3309,7 +3311,8 @@ EXPORT_SYMBOL_GPL(regmap_update_bits_base);
*/
int regmap_test_bits(struct regmap *map, unsigned int reg, unsigned int bits)
{
- unsigned int val, ret;
+ unsigned int val;
+ int ret;
ret = regmap_read(map, reg, &val);
if (ret)
diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
index deda7f35a059..be1e9e61a7bf 100644
--- a/drivers/base/swnode.c
+++ b/drivers/base/swnode.c
@@ -844,7 +844,7 @@ swnode_register(const struct software_node *node, struct swnode *parent,
* of this function or by ordering the array such that parent comes before
* child.
*/
-int software_node_register_node_group(const struct software_node **node_group)
+int software_node_register_node_group(const struct software_node * const *node_group)
{
unsigned int i;
int ret;
@@ -877,8 +877,7 @@ EXPORT_SYMBOL_GPL(software_node_register_node_group);
* remove the nodes individually, in the correct order (child before
* parent).
*/
-void software_node_unregister_node_group(
- const struct software_node **node_group)
+void software_node_unregister_node_group(const struct software_node * const *node_group)
{
unsigned int i = 0;
diff --git a/drivers/bcma/driver_gpio.c b/drivers/bcma/driver_gpio.c
index f021e27644e0..658c7e2ac8bf 100644
--- a/drivers/bcma/driver_gpio.c
+++ b/drivers/bcma/driver_gpio.c
@@ -186,7 +186,7 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
chip->request = bcma_gpio_request;
chip->free = bcma_gpio_free;
chip->get = bcma_gpio_get_value;
- chip->set_rv = bcma_gpio_set_value;
+ chip->set = bcma_gpio_set_value;
chip->direction_input = bcma_gpio_direction_input;
chip->direction_output = bcma_gpio_direction_output;
chip->parent = bus->dev;
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index df38fb364904..77d694448990 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -17,6 +17,7 @@ menuconfig BLK_DEV
if BLK_DEV
source "drivers/block/null_blk/Kconfig"
+source "drivers/block/rnull/Kconfig"
config BLK_DEV_FD
tristate "Normal floppy disk support"
@@ -311,15 +312,6 @@ config VIRTIO_BLK
This is the virtual block driver for virtio. It can be used with
QEMU based VMMs (like KVM or Xen). Say Y or M.
-config BLK_DEV_RUST_NULL
- tristate "Rust null block driver (Experimental)"
- depends on RUST
- help
- This is the Rust implementation of the null block driver. For now it
- is only a minimal stub.
-
- If unsure, say N.
-
config BLK_DEV_RBD
tristate "Rados block device (RBD)"
depends on INET && BLOCK
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index a695ce74ef22..2d8096eb8cdf 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -9,9 +9,6 @@
# needed for trace events
ccflags-y += -I$(src)
-obj-$(CONFIG_BLK_DEV_RUST_NULL) += rnull_mod.o
-rnull_mod-y := rnull.o
-
obj-$(CONFIG_MAC_FLOPPY) += swim3.o
obj-$(CONFIG_BLK_DEV_SWIM) += swim_mod.o
obj-$(CONFIG_BLK_DEV_FD) += floppy.o
@@ -38,6 +35,7 @@ obj-$(CONFIG_ZRAM) += zram/
obj-$(CONFIG_BLK_DEV_RNBD) += rnbd/
obj-$(CONFIG_BLK_DEV_NULL_BLK) += null_blk/
+obj-$(CONFIG_BLK_DEV_RUST_NULL) += rnull/
obj-$(CONFIG_BLK_DEV_UBLK) += ublk_drv.o
obj-$(CONFIG_BLK_DEV_ZONED_LOOP) += zloop.o
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 6357d86eafdc..2932b6653b6f 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -1523,13 +1523,13 @@ static blk_status_t amiflop_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_OK;
}
-static int fd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+static int fd_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
- int drive = MINOR(bdev->bd_dev) & 3;
+ struct amiga_floppy_struct *p = disk->private_data;
- geo->heads = unit[drive].type->heads;
- geo->sectors = unit[drive].dtype->sects * unit[drive].type->sect_mult;
- geo->cylinders = unit[drive].type->tracks;
+ geo->heads = p->type->heads;
+ geo->sectors = p->dtype->sects * p->type->sect_mult;
+ geo->cylinders = p->type->tracks;
return 0;
}
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 00b74a845328..34ead75e7e02 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -269,9 +269,9 @@ static blk_status_t aoeblk_queue_rq(struct blk_mq_hw_ctx *hctx,
}
static int
-aoeblk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+aoeblk_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
- struct aoedev *d = bdev->bd_disk->private_data;
+ struct aoedev *d = disk->private_data;
if ((d->flags & DEVFL_UP) == 0) {
printk(KERN_ERR "aoe: disk not up\n");
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 6298f8e271e3..a9affb7c264d 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -1761,6 +1761,6 @@ aoecmd_exit(void)
kfree(kts);
kfree(ktiowq);
- free_page((unsigned long) page_address(empty_page));
+ __free_page(empty_page);
empty_page = NULL;
}
diff --git a/drivers/block/aoe/aoemain.c b/drivers/block/aoe/aoemain.c
index cdf6e4041bb9..3b21750038ee 100644
--- a/drivers/block/aoe/aoemain.c
+++ b/drivers/block/aoe/aoemain.c
@@ -44,7 +44,7 @@ aoe_init(void)
{
int ret;
- aoe_wq = alloc_workqueue("aoe_wq", 0, 0);
+ aoe_wq = alloc_workqueue("aoe_wq", WQ_PERCPU, 0);
if (!aoe_wq)
return -ENOMEM;
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 0c2eabe14af3..9778259b30d4 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -44,45 +44,74 @@ struct brd_device {
};
/*
- * Look up and return a brd's page for a given sector.
+ * Look up and return a brd's page with reference grabbed for a given sector.
*/
static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector)
{
- return xa_load(&brd->brd_pages, sector >> PAGE_SECTORS_SHIFT);
+ struct page *page;
+ XA_STATE(xas, &brd->brd_pages, sector >> PAGE_SECTORS_SHIFT);
+
+ rcu_read_lock();
+repeat:
+ page = xas_load(&xas);
+ if (xas_retry(&xas, page)) {
+ xas_reset(&xas);
+ goto repeat;
+ }
+
+ if (!page)
+ goto out;
+
+ if (!get_page_unless_zero(page)) {
+ xas_reset(&xas);
+ goto repeat;
+ }
+
+ if (unlikely(page != xas_reload(&xas))) {
+ put_page(page);
+ xas_reset(&xas);
+ goto repeat;
+ }
+out:
+ rcu_read_unlock();
+
+ return page;
}
/*
* Insert a new page for a given sector, if one does not already exist.
+ * The returned page will grab reference.
*/
static struct page *brd_insert_page(struct brd_device *brd, sector_t sector,
blk_opf_t opf)
- __releases(rcu)
- __acquires(rcu)
{
gfp_t gfp = (opf & REQ_NOWAIT) ? GFP_NOWAIT : GFP_NOIO;
struct page *page, *ret;
- rcu_read_unlock();
page = alloc_page(gfp | __GFP_ZERO | __GFP_HIGHMEM);
- if (!page) {
- rcu_read_lock();
+ if (!page)
return ERR_PTR(-ENOMEM);
- }
xa_lock(&brd->brd_pages);
ret = __xa_cmpxchg(&brd->brd_pages, sector >> PAGE_SECTORS_SHIFT, NULL,
page, gfp);
- rcu_read_lock();
- if (ret) {
+ if (!ret) {
+ brd->brd_nr_pages++;
+ get_page(page);
+ xa_unlock(&brd->brd_pages);
+ return page;
+ }
+
+ if (!xa_is_err(ret)) {
+ get_page(ret);
xa_unlock(&brd->brd_pages);
- __free_page(page);
- if (xa_is_err(ret))
- return ERR_PTR(xa_err(ret));
+ put_page(page);
return ret;
}
- brd->brd_nr_pages++;
+
xa_unlock(&brd->brd_pages);
- return page;
+ put_page(page);
+ return ERR_PTR(xa_err(ret));
}
/*
@@ -95,7 +124,7 @@ static void brd_free_pages(struct brd_device *brd)
pgoff_t idx;
xa_for_each(&brd->brd_pages, idx, page) {
- __free_page(page);
+ put_page(page);
cond_resched();
}
@@ -117,7 +146,6 @@ static bool brd_rw_bvec(struct brd_device *brd, struct bio *bio)
bv.bv_len = min_t(u32, bv.bv_len, PAGE_SIZE - offset);
- rcu_read_lock();
page = brd_lookup_page(brd, sector);
if (!page && op_is_write(opf)) {
page = brd_insert_page(brd, sector, opf);
@@ -135,13 +163,13 @@ static bool brd_rw_bvec(struct brd_device *brd, struct bio *bio)
memset(kaddr, 0, bv.bv_len);
}
kunmap_local(kaddr);
- rcu_read_unlock();
bio_advance_iter_single(bio, &bio->bi_iter, bv.bv_len);
+ if (page)
+ put_page(page);
return true;
out_error:
- rcu_read_unlock();
if (PTR_ERR(page) == -ENOMEM && (opf & REQ_NOWAIT))
bio_wouldblock_error(bio);
else
@@ -149,13 +177,6 @@ out_error:
return false;
}
-static void brd_free_one_page(struct rcu_head *head)
-{
- struct page *page = container_of(head, struct page, rcu_head);
-
- __free_page(page);
-}
-
static void brd_do_discard(struct brd_device *brd, sector_t sector, u32 size)
{
sector_t aligned_sector = round_up(sector, PAGE_SECTORS);
@@ -170,7 +191,7 @@ static void brd_do_discard(struct brd_device *brd, sector_t sector, u32 size)
while (aligned_sector < aligned_end && aligned_sector < rd_size * 2) {
page = __xa_erase(&brd->brd_pages, aligned_sector >> PAGE_SECTORS_SHIFT);
if (page) {
- call_rcu(&page->rcu_head, brd_free_one_page);
+ put_page(page);
brd->brd_nr_pages--;
}
aligned_sector += PAGE_SECTORS;
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index e21492981f7d..f6d6276974ee 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -380,6 +380,9 @@ enum {
/* this is/was a write request */
__EE_WRITE,
+ /* hand back using mempool_free(e, drbd_buffer_page_pool) */
+ __EE_RELEASE_TO_MEMPOOL,
+
/* this is/was a write same request */
__EE_WRITE_SAME,
@@ -402,6 +405,7 @@ enum {
#define EE_IN_INTERVAL_TREE (1<<__EE_IN_INTERVAL_TREE)
#define EE_SUBMITTED (1<<__EE_SUBMITTED)
#define EE_WRITE (1<<__EE_WRITE)
+#define EE_RELEASE_TO_MEMPOOL (1<<__EE_RELEASE_TO_MEMPOOL)
#define EE_WRITE_SAME (1<<__EE_WRITE_SAME)
#define EE_APPLICATION (1<<__EE_APPLICATION)
#define EE_RS_THIN_REQ (1<<__EE_RS_THIN_REQ)
@@ -858,7 +862,6 @@ struct drbd_device {
struct list_head sync_ee; /* IO in progress (P_RS_DATA_REPLY gets written to disk) */
struct list_head done_ee; /* need to send P_WRITE_ACK */
struct list_head read_ee; /* [RS]P_DATA_REQUEST being read */
- struct list_head net_ee; /* zero-copy network send in progress */
struct list_head resync_reads;
atomic_t pp_in_use; /* allocated from page pool */
@@ -1329,24 +1332,6 @@ extern struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
extern mempool_t drbd_request_mempool;
extern mempool_t drbd_ee_mempool;
-/* drbd's page pool, used to buffer data received from the peer,
- * or data requested by the peer.
- *
- * This does not have an emergency reserve.
- *
- * When allocating from this pool, it first takes pages from the pool.
- * Only if the pool is depleted will try to allocate from the system.
- *
- * The assumption is that pages taken from this pool will be processed,
- * and given back, "quickly", and then can be recycled, so we can avoid
- * frequent calls to alloc_page(), and still will be able to make progress even
- * under memory pressure.
- */
-extern struct page *drbd_pp_pool;
-extern spinlock_t drbd_pp_lock;
-extern int drbd_pp_vacant;
-extern wait_queue_head_t drbd_pp_wait;
-
/* We also need a standard (emergency-reserve backed) page pool
* for meta data IO (activity log, bitmap).
* We can keep it global, as long as it is used as "N pages at a time".
@@ -1354,6 +1339,7 @@ extern wait_queue_head_t drbd_pp_wait;
*/
#define DRBD_MIN_POOL_PAGES 128
extern mempool_t drbd_md_io_page_pool;
+extern mempool_t drbd_buffer_page_pool;
/* We also need to make sure we get a bio
* when we need it for housekeeping purposes */
@@ -1488,10 +1474,7 @@ extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_peer_device *,
sector_t, unsigned int,
unsigned int,
gfp_t) __must_hold(local);
-extern void __drbd_free_peer_req(struct drbd_device *, struct drbd_peer_request *,
- int);
-#define drbd_free_peer_req(m,e) __drbd_free_peer_req(m, e, 0)
-#define drbd_free_net_peer_req(m,e) __drbd_free_peer_req(m, e, 1)
+extern void drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *req);
extern struct page *drbd_alloc_pages(struct drbd_peer_device *, unsigned int, bool);
extern void _drbd_clear_done_ee(struct drbd_device *device, struct list_head *to_be_freed);
extern int drbd_connected(struct drbd_peer_device *);
@@ -1610,16 +1593,6 @@ static inline struct page *page_chain_next(struct page *page)
for (; page && ({ n = page_chain_next(page); 1; }); page = n)
-static inline int drbd_peer_req_has_active_page(struct drbd_peer_request *peer_req)
-{
- struct page *page = peer_req->pages;
- page_chain_for_each(page) {
- if (page_count(page) > 1)
- return 1;
- }
- return 0;
-}
-
static inline union drbd_state drbd_read_state(struct drbd_device *device)
{
struct drbd_resource *resource = device->resource;
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 52724b79be30..c73376886e7a 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -114,20 +114,10 @@ struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
mempool_t drbd_request_mempool;
mempool_t drbd_ee_mempool;
mempool_t drbd_md_io_page_pool;
+mempool_t drbd_buffer_page_pool;
struct bio_set drbd_md_io_bio_set;
struct bio_set drbd_io_bio_set;
-/* I do not use a standard mempool, because:
- 1) I want to hand out the pre-allocated objects first.
- 2) I want to be able to interrupt sleeping allocation with a signal.
- Note: This is a single linked list, the next pointer is the private
- member of struct page.
- */
-struct page *drbd_pp_pool;
-DEFINE_SPINLOCK(drbd_pp_lock);
-int drbd_pp_vacant;
-wait_queue_head_t drbd_pp_wait;
-
DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
static const struct block_device_operations drbd_ops = {
@@ -1611,6 +1601,7 @@ static int _drbd_send_zc_bio(struct drbd_peer_device *peer_device, struct bio *b
static int _drbd_send_zc_ee(struct drbd_peer_device *peer_device,
struct drbd_peer_request *peer_req)
{
+ bool use_sendpage = !(peer_req->flags & EE_RELEASE_TO_MEMPOOL);
struct page *page = peer_req->pages;
unsigned len = peer_req->i.size;
int err;
@@ -1619,8 +1610,13 @@ static int _drbd_send_zc_ee(struct drbd_peer_device *peer_device,
page_chain_for_each(page) {
unsigned l = min_t(unsigned, len, PAGE_SIZE);
- err = _drbd_send_page(peer_device, page, 0, l,
- page_chain_next(page) ? MSG_MORE : 0);
+ if (likely(use_sendpage))
+ err = _drbd_send_page(peer_device, page, 0, l,
+ page_chain_next(page) ? MSG_MORE : 0);
+ else
+ err = _drbd_no_send_page(peer_device, page, 0, l,
+ page_chain_next(page) ? MSG_MORE : 0);
+
if (err)
return err;
len -= l;
@@ -1962,7 +1958,6 @@ void drbd_init_set_defaults(struct drbd_device *device)
INIT_LIST_HEAD(&device->sync_ee);
INIT_LIST_HEAD(&device->done_ee);
INIT_LIST_HEAD(&device->read_ee);
- INIT_LIST_HEAD(&device->net_ee);
INIT_LIST_HEAD(&device->resync_reads);
INIT_LIST_HEAD(&device->resync_work.list);
INIT_LIST_HEAD(&device->unplug_work.list);
@@ -2043,7 +2038,6 @@ void drbd_device_cleanup(struct drbd_device *device)
D_ASSERT(device, list_empty(&device->sync_ee));
D_ASSERT(device, list_empty(&device->done_ee));
D_ASSERT(device, list_empty(&device->read_ee));
- D_ASSERT(device, list_empty(&device->net_ee));
D_ASSERT(device, list_empty(&device->resync_reads));
D_ASSERT(device, list_empty(&first_peer_device(device)->connection->sender_work.q));
D_ASSERT(device, list_empty(&device->resync_work.list));
@@ -2055,19 +2049,11 @@ void drbd_device_cleanup(struct drbd_device *device)
static void drbd_destroy_mempools(void)
{
- struct page *page;
-
- while (drbd_pp_pool) {
- page = drbd_pp_pool;
- drbd_pp_pool = (struct page *)page_private(page);
- __free_page(page);
- drbd_pp_vacant--;
- }
-
/* D_ASSERT(device, atomic_read(&drbd_pp_vacant)==0); */
bioset_exit(&drbd_io_bio_set);
bioset_exit(&drbd_md_io_bio_set);
+ mempool_exit(&drbd_buffer_page_pool);
mempool_exit(&drbd_md_io_page_pool);
mempool_exit(&drbd_ee_mempool);
mempool_exit(&drbd_request_mempool);
@@ -2086,9 +2072,8 @@ static void drbd_destroy_mempools(void)
static int drbd_create_mempools(void)
{
- struct page *page;
const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * drbd_minor_count;
- int i, ret;
+ int ret;
/* caches */
drbd_request_cache = kmem_cache_create(
@@ -2125,6 +2110,10 @@ static int drbd_create_mempools(void)
if (ret)
goto Enomem;
+ ret = mempool_init_page_pool(&drbd_buffer_page_pool, number, 0);
+ if (ret)
+ goto Enomem;
+
ret = mempool_init_slab_pool(&drbd_request_mempool, number,
drbd_request_cache);
if (ret)
@@ -2134,15 +2123,6 @@ static int drbd_create_mempools(void)
if (ret)
goto Enomem;
- for (i = 0; i < number; i++) {
- page = alloc_page(GFP_HIGHUSER);
- if (!page)
- goto Enomem;
- set_page_private(page, (unsigned long)drbd_pp_pool);
- drbd_pp_pool = page;
- }
- drbd_pp_vacant = number;
-
return 0;
Enomem:
@@ -2169,10 +2149,6 @@ static void drbd_release_all_peer_reqs(struct drbd_device *device)
rr = drbd_free_peer_reqs(device, &device->done_ee);
if (rr)
drbd_err(device, "%d EEs in done list found!\n", rr);
-
- rr = drbd_free_peer_reqs(device, &device->net_ee);
- if (rr)
- drbd_err(device, "%d EEs in net list found!\n", rr);
}
/* caution. no locking. */
@@ -2863,11 +2839,6 @@ static int __init drbd_init(void)
return err;
}
- /*
- * allocate all necessary structs
- */
- init_waitqueue_head(&drbd_pp_wait);
-
drbd_proc = NULL; /* play safe for drbd_cleanup */
idr_init(&drbd_devices);
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index e09930c2b226..91f3b8afb63c 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -1330,6 +1330,7 @@ void drbd_reconsider_queue_parameters(struct drbd_device *device,
lim.max_write_zeroes_sectors = DRBD_MAX_BBIO_SECTORS;
else
lim.max_write_zeroes_sectors = 0;
+ lim.max_hw_wzeroes_unmap_sectors = 0;
if ((lim.discard_granularity >> SECTOR_SHIFT) >
lim.max_hw_discard_sectors) {
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 975024cf03c5..caaf2781136d 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -33,6 +33,7 @@
#include <linux/string.h>
#include <linux/scatterlist.h>
#include <linux/part_stat.h>
+#include <linux/mempool.h>
#include "drbd_int.h"
#include "drbd_protocol.h"
#include "drbd_req.h"
@@ -63,182 +64,31 @@ static int e_end_block(struct drbd_work *, int);
#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
-/*
- * some helper functions to deal with single linked page lists,
- * page->private being our "next" pointer.
- */
-
-/* If at least n pages are linked at head, get n pages off.
- * Otherwise, don't modify head, and return NULL.
- * Locking is the responsibility of the caller.
- */
-static struct page *page_chain_del(struct page **head, int n)
-{
- struct page *page;
- struct page *tmp;
-
- BUG_ON(!n);
- BUG_ON(!head);
-
- page = *head;
-
- if (!page)
- return NULL;
-
- while (page) {
- tmp = page_chain_next(page);
- if (--n == 0)
- break; /* found sufficient pages */
- if (tmp == NULL)
- /* insufficient pages, don't use any of them. */
- return NULL;
- page = tmp;
- }
-
- /* add end of list marker for the returned list */
- set_page_private(page, 0);
- /* actual return value, and adjustment of head */
- page = *head;
- *head = tmp;
- return page;
-}
-
-/* may be used outside of locks to find the tail of a (usually short)
- * "private" page chain, before adding it back to a global chain head
- * with page_chain_add() under a spinlock. */
-static struct page *page_chain_tail(struct page *page, int *len)
-{
- struct page *tmp;
- int i = 1;
- while ((tmp = page_chain_next(page))) {
- ++i;
- page = tmp;
- }
- if (len)
- *len = i;
- return page;
-}
-
-static int page_chain_free(struct page *page)
-{
- struct page *tmp;
- int i = 0;
- page_chain_for_each_safe(page, tmp) {
- put_page(page);
- ++i;
- }
- return i;
-}
-
-static void page_chain_add(struct page **head,
- struct page *chain_first, struct page *chain_last)
-{
-#if 1
- struct page *tmp;
- tmp = page_chain_tail(chain_first, NULL);
- BUG_ON(tmp != chain_last);
-#endif
-
- /* add chain to head */
- set_page_private(chain_last, (unsigned long)*head);
- *head = chain_first;
-}
-
-static struct page *__drbd_alloc_pages(struct drbd_device *device,
- unsigned int number)
+static struct page *__drbd_alloc_pages(unsigned int number)
{
struct page *page = NULL;
struct page *tmp = NULL;
unsigned int i = 0;
- /* Yes, testing drbd_pp_vacant outside the lock is racy.
- * So what. It saves a spin_lock. */
- if (drbd_pp_vacant >= number) {
- spin_lock(&drbd_pp_lock);
- page = page_chain_del(&drbd_pp_pool, number);
- if (page)
- drbd_pp_vacant -= number;
- spin_unlock(&drbd_pp_lock);
- if (page)
- return page;
- }
-
/* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
* "criss-cross" setup, that might cause write-out on some other DRBD,
* which in turn might block on the other node at this very place. */
for (i = 0; i < number; i++) {
- tmp = alloc_page(GFP_TRY);
+ tmp = mempool_alloc(&drbd_buffer_page_pool, GFP_TRY);
if (!tmp)
- break;
+ goto fail;
set_page_private(tmp, (unsigned long)page);
page = tmp;
}
-
- if (i == number)
- return page;
-
- /* Not enough pages immediately available this time.
- * No need to jump around here, drbd_alloc_pages will retry this
- * function "soon". */
- if (page) {
- tmp = page_chain_tail(page, NULL);
- spin_lock(&drbd_pp_lock);
- page_chain_add(&drbd_pp_pool, page, tmp);
- drbd_pp_vacant += i;
- spin_unlock(&drbd_pp_lock);
+ return page;
+fail:
+ page_chain_for_each_safe(page, tmp) {
+ set_page_private(page, 0);
+ mempool_free(page, &drbd_buffer_page_pool);
}
return NULL;
}
-static void reclaim_finished_net_peer_reqs(struct drbd_device *device,
- struct list_head *to_be_freed)
-{
- struct drbd_peer_request *peer_req, *tmp;
-
- /* The EEs are always appended to the end of the list. Since
- they are sent in order over the wire, they have to finish
- in order. As soon as we see the first not finished we can
- stop to examine the list... */
-
- list_for_each_entry_safe(peer_req, tmp, &device->net_ee, w.list) {
- if (drbd_peer_req_has_active_page(peer_req))
- break;
- list_move(&peer_req->w.list, to_be_freed);
- }
-}
-
-static void drbd_reclaim_net_peer_reqs(struct drbd_device *device)
-{
- LIST_HEAD(reclaimed);
- struct drbd_peer_request *peer_req, *t;
-
- spin_lock_irq(&device->resource->req_lock);
- reclaim_finished_net_peer_reqs(device, &reclaimed);
- spin_unlock_irq(&device->resource->req_lock);
- list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
- drbd_free_net_peer_req(device, peer_req);
-}
-
-static void conn_reclaim_net_peer_reqs(struct drbd_connection *connection)
-{
- struct drbd_peer_device *peer_device;
- int vnr;
-
- rcu_read_lock();
- idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
- struct drbd_device *device = peer_device->device;
- if (!atomic_read(&device->pp_in_use_by_net))
- continue;
-
- kref_get(&device->kref);
- rcu_read_unlock();
- drbd_reclaim_net_peer_reqs(device);
- kref_put(&device->kref, drbd_destroy_device);
- rcu_read_lock();
- }
- rcu_read_unlock();
-}
-
/**
* drbd_alloc_pages() - Returns @number pages, retries forever (or until signalled)
* @peer_device: DRBD device.
@@ -263,9 +113,8 @@ struct page *drbd_alloc_pages(struct drbd_peer_device *peer_device, unsigned int
bool retry)
{
struct drbd_device *device = peer_device->device;
- struct page *page = NULL;
+ struct page *page;
struct net_conf *nc;
- DEFINE_WAIT(wait);
unsigned int mxb;
rcu_read_lock();
@@ -273,37 +122,9 @@ struct page *drbd_alloc_pages(struct drbd_peer_device *peer_device, unsigned int
mxb = nc ? nc->max_buffers : 1000000;
rcu_read_unlock();
- if (atomic_read(&device->pp_in_use) < mxb)
- page = __drbd_alloc_pages(device, number);
-
- /* Try to keep the fast path fast, but occasionally we need
- * to reclaim the pages we lended to the network stack. */
- if (page && atomic_read(&device->pp_in_use_by_net) > 512)
- drbd_reclaim_net_peer_reqs(device);
-
- while (page == NULL) {
- prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
-
- drbd_reclaim_net_peer_reqs(device);
-
- if (atomic_read(&device->pp_in_use) < mxb) {
- page = __drbd_alloc_pages(device, number);
- if (page)
- break;
- }
-
- if (!retry)
- break;
-
- if (signal_pending(current)) {
- drbd_warn(device, "drbd_alloc_pages interrupted!\n");
- break;
- }
-
- if (schedule_timeout(HZ/10) == 0)
- mxb = UINT_MAX;
- }
- finish_wait(&drbd_pp_wait, &wait);
+ if (atomic_read(&device->pp_in_use) >= mxb)
+ schedule_timeout_interruptible(HZ / 10);
+ page = __drbd_alloc_pages(number);
if (page)
atomic_add(number, &device->pp_in_use);
@@ -314,29 +135,25 @@ struct page *drbd_alloc_pages(struct drbd_peer_device *peer_device, unsigned int
* Is also used from inside an other spin_lock_irq(&resource->req_lock);
* Either links the page chain back to the global pool,
* or returns all pages to the system. */
-static void drbd_free_pages(struct drbd_device *device, struct page *page, int is_net)
+static void drbd_free_pages(struct drbd_device *device, struct page *page)
{
- atomic_t *a = is_net ? &device->pp_in_use_by_net : &device->pp_in_use;
- int i;
+ struct page *tmp;
+ int i = 0;
if (page == NULL)
return;
- if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * drbd_minor_count)
- i = page_chain_free(page);
- else {
- struct page *tmp;
- tmp = page_chain_tail(page, &i);
- spin_lock(&drbd_pp_lock);
- page_chain_add(&drbd_pp_pool, page, tmp);
- drbd_pp_vacant += i;
- spin_unlock(&drbd_pp_lock);
- }
- i = atomic_sub_return(i, a);
+ page_chain_for_each_safe(page, tmp) {
+ set_page_private(page, 0);
+ if (page_count(page) == 1)
+ mempool_free(page, &drbd_buffer_page_pool);
+ else
+ put_page(page);
+ i++;
+ }
+ i = atomic_sub_return(i, &device->pp_in_use);
if (i < 0)
- drbd_warn(device, "ASSERTION FAILED: %s: %d < 0\n",
- is_net ? "pp_in_use_by_net" : "pp_in_use", i);
- wake_up(&drbd_pp_wait);
+ drbd_warn(device, "ASSERTION FAILED: pp_in_use: %d < 0\n", i);
}
/*
@@ -380,6 +197,8 @@ drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t secto
gfpflags_allow_blocking(gfp_mask));
if (!page)
goto fail;
+ if (!mempool_is_saturated(&drbd_buffer_page_pool))
+ peer_req->flags |= EE_RELEASE_TO_MEMPOOL;
}
memset(peer_req, 0, sizeof(*peer_req));
@@ -403,13 +222,12 @@ drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t secto
return NULL;
}
-void __drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *peer_req,
- int is_net)
+void drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *peer_req)
{
might_sleep();
if (peer_req->flags & EE_HAS_DIGEST)
kfree(peer_req->digest);
- drbd_free_pages(device, peer_req->pages, is_net);
+ drbd_free_pages(device, peer_req->pages);
D_ASSERT(device, atomic_read(&peer_req->pending_bios) == 0);
D_ASSERT(device, drbd_interval_empty(&peer_req->i));
if (!expect(device, !(peer_req->flags & EE_CALL_AL_COMPLETE_IO))) {
@@ -424,14 +242,13 @@ int drbd_free_peer_reqs(struct drbd_device *device, struct list_head *list)
LIST_HEAD(work_list);
struct drbd_peer_request *peer_req, *t;
int count = 0;
- int is_net = list == &device->net_ee;
spin_lock_irq(&device->resource->req_lock);
list_splice_init(list, &work_list);
spin_unlock_irq(&device->resource->req_lock);
list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
- __drbd_free_peer_req(device, peer_req, is_net);
+ drbd_free_peer_req(device, peer_req);
count++;
}
return count;
@@ -443,18 +260,13 @@ int drbd_free_peer_reqs(struct drbd_device *device, struct list_head *list)
static int drbd_finish_peer_reqs(struct drbd_device *device)
{
LIST_HEAD(work_list);
- LIST_HEAD(reclaimed);
struct drbd_peer_request *peer_req, *t;
int err = 0;
spin_lock_irq(&device->resource->req_lock);
- reclaim_finished_net_peer_reqs(device, &reclaimed);
list_splice_init(&device->done_ee, &work_list);
spin_unlock_irq(&device->resource->req_lock);
- list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
- drbd_free_net_peer_req(device, peer_req);
-
/* possible callbacks here:
* e_end_block, and e_end_resync_block, e_send_superseded.
* all ignore the last argument.
@@ -1975,7 +1787,7 @@ static int drbd_drain_block(struct drbd_peer_device *peer_device, int data_size)
data_size -= len;
}
kunmap(page);
- drbd_free_pages(peer_device->device, page, 0);
+ drbd_free_pages(peer_device->device, page);
return err;
}
@@ -5224,16 +5036,6 @@ static int drbd_disconnected(struct drbd_peer_device *peer_device)
put_ldev(device);
}
- /* tcp_close and release of sendpage pages can be deferred. I don't
- * want to use SO_LINGER, because apparently it can be deferred for
- * more than 20 seconds (longest time I checked).
- *
- * Actually we don't care for exactly when the network stack does its
- * put_page(), but release our reference on these pages right here.
- */
- i = drbd_free_peer_reqs(device, &device->net_ee);
- if (i)
- drbd_info(device, "net_ee not empty, killed %u entries\n", i);
i = atomic_read(&device->pp_in_use_by_net);
if (i)
drbd_info(device, "pp_in_use_by_net = %d, expected 0\n", i);
@@ -5980,8 +5782,6 @@ int drbd_ack_receiver(struct drbd_thread *thi)
while (get_t_state(thi) == RUNNING) {
drbd_thread_current_set_cpu(thi);
- conn_reclaim_net_peer_reqs(connection);
-
if (test_and_clear_bit(SEND_PING, &connection->flags)) {
if (drbd_send_ping(connection)) {
drbd_err(connection, "drbd_send_ping has failed\n");
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index a6ea737b3b71..dea3e79d044f 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -1030,22 +1030,6 @@ out:
return 1;
}
-/* helper */
-static void move_to_net_ee_or_free(struct drbd_device *device, struct drbd_peer_request *peer_req)
-{
- if (drbd_peer_req_has_active_page(peer_req)) {
- /* This might happen if sendpage() has not finished */
- int i = PFN_UP(peer_req->i.size);
- atomic_add(i, &device->pp_in_use_by_net);
- atomic_sub(i, &device->pp_in_use);
- spin_lock_irq(&device->resource->req_lock);
- list_add_tail(&peer_req->w.list, &device->net_ee);
- spin_unlock_irq(&device->resource->req_lock);
- wake_up(&drbd_pp_wait);
- } else
- drbd_free_peer_req(device, peer_req);
-}
-
/**
* w_e_end_data_req() - Worker callback, to send a P_DATA_REPLY packet in response to a P_DATA_REQUEST
* @w: work object.
@@ -1059,9 +1043,8 @@ int w_e_end_data_req(struct drbd_work *w, int cancel)
int err;
if (unlikely(cancel)) {
- drbd_free_peer_req(device, peer_req);
- dec_unacked(device);
- return 0;
+ err = 0;
+ goto out;
}
if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
@@ -1074,12 +1057,12 @@ int w_e_end_data_req(struct drbd_work *w, int cancel)
err = drbd_send_ack(peer_device, P_NEG_DREPLY, peer_req);
}
- dec_unacked(device);
-
- move_to_net_ee_or_free(device, peer_req);
-
if (unlikely(err))
drbd_err(device, "drbd_send_block() failed\n");
+out:
+ dec_unacked(device);
+ drbd_free_peer_req(device, peer_req);
+
return err;
}
@@ -1120,9 +1103,8 @@ int w_e_end_rsdata_req(struct drbd_work *w, int cancel)
int err;
if (unlikely(cancel)) {
- drbd_free_peer_req(device, peer_req);
- dec_unacked(device);
- return 0;
+ err = 0;
+ goto out;
}
if (get_ldev_if_state(device, D_FAILED)) {
@@ -1155,13 +1137,12 @@ int w_e_end_rsdata_req(struct drbd_work *w, int cancel)
/* update resync data with failure */
drbd_rs_failed_io(peer_device, peer_req->i.sector, peer_req->i.size);
}
-
- dec_unacked(device);
-
- move_to_net_ee_or_free(device, peer_req);
-
if (unlikely(err))
drbd_err(device, "drbd_send_block() failed\n");
+out:
+ dec_unacked(device);
+ drbd_free_peer_req(device, peer_req);
+
return err;
}
@@ -1176,9 +1157,8 @@ int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
int err, eq = 0;
if (unlikely(cancel)) {
- drbd_free_peer_req(device, peer_req);
- dec_unacked(device);
- return 0;
+ err = 0;
+ goto out;
}
if (get_ldev(device)) {
@@ -1220,12 +1200,12 @@ int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
if (drbd_ratelimit())
drbd_err(device, "Sending NegDReply. I guess it gets messy.\n");
}
-
- dec_unacked(device);
- move_to_net_ee_or_free(device, peer_req);
-
if (unlikely(err))
drbd_err(device, "drbd_send_block/ack() failed\n");
+out:
+ dec_unacked(device);
+ drbd_free_peer_req(device, peer_req);
+
return err;
}
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 24be0c2c4075..5336c3c5ca36 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -163,35 +163,35 @@
/* do print messages for unexpected interrupts */
static int print_unex = 1;
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/fs.h>
-#include <linux/kernel.h>
-#include <linux/timer.h>
-#include <linux/workqueue.h>
-#include <linux/fdreg.h>
-#include <linux/fd.h>
-#include <linux/hdreg.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
+#include <linux/async.h>
#include <linux/bio.h>
-#include <linux/string.h>
-#include <linux/jiffies.h>
-#include <linux/fcntl.h>
+#include <linux/compat.h>
#include <linux/delay.h>
-#include <linux/mc146818rtc.h> /* CMOS defines */
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/fcntl.h>
+#include <linux/fd.h>
+#include <linux/fdreg.h>
+#include <linux/fs.h>
+#include <linux/hdreg.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
#include <linux/major.h>
-#include <linux/platform_device.h>
+#include <linux/mc146818rtc.h> /* CMOS defines */
+#include <linux/mm.h>
#include <linux/mod_devicetable.h>
+#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
#include <linux/uaccess.h>
-#include <linux/async.h>
-#include <linux/compat.h>
+#include <linux/workqueue.h>
/*
* PS/2 floppies have much slower step rates than regular floppies.
@@ -233,8 +233,6 @@ static unsigned short virtual_dma_port = 0x3f0;
irqreturn_t floppy_interrupt(int irq, void *dev_id);
static int set_dor(int fdc, char mask, char data);
-#define K_64 0x10000 /* 64KB */
-
/* the following is the mask of allowed drives. By default units 2 and
* 3 of both floppy controllers are disabled, because switching on the
* motor of these drives causes system hangs on some PCI computers. drive
@@ -3092,16 +3090,13 @@ static int raw_cmd_copyin(int cmd, void __user *param,
*rcmd = NULL;
loop:
- ptr = kmalloc(sizeof(struct floppy_raw_cmd), GFP_KERNEL);
- if (!ptr)
- return -ENOMEM;
+ ptr = memdup_user(param, sizeof(*ptr));
+ if (IS_ERR(ptr))
+ return PTR_ERR(ptr);
*rcmd = ptr;
- ret = copy_from_user(ptr, param, sizeof(*ptr));
ptr->next = NULL;
ptr->buffer_length = 0;
ptr->kernel_data = NULL;
- if (ret)
- return -EFAULT;
param += sizeof(struct floppy_raw_cmd);
if (ptr->cmd_count > FD_RAW_CMD_FULLSIZE)
return -EINVAL;
@@ -3363,9 +3358,9 @@ static int get_floppy_geometry(int drive, int type, struct floppy_struct **g)
return 0;
}
-static int fd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+static int fd_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
- int drive = (long)bdev->bd_disk->private_data;
+ int drive = (long)disk->private_data;
int type = ITYPE(drive_state[drive].fd_device);
struct floppy_struct *g;
int ret;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 1b6ee91f8eb9..13ce229d450c 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -137,20 +137,35 @@ static void loop_global_unlock(struct loop_device *lo, bool global)
static int max_part;
static int part_shift;
-static loff_t get_size(loff_t offset, loff_t sizelimit, struct file *file)
+static loff_t lo_calculate_size(struct loop_device *lo, struct file *file)
{
loff_t loopsize;
+ int ret;
+
+ if (S_ISBLK(file_inode(file)->i_mode)) {
+ loopsize = i_size_read(file->f_mapping->host);
+ } else {
+ struct kstat stat;
+
+ /*
+ * Get the accurate file size. This provides better results than
+ * cached inode data, particularly for network filesystems where
+ * metadata may be stale.
+ */
+ ret = vfs_getattr_nosec(&file->f_path, &stat, STATX_SIZE, 0);
+ if (ret)
+ return 0;
+
+ loopsize = stat.size;
+ }
- /* Compute loopsize in bytes */
- loopsize = i_size_read(file->f_mapping->host);
- if (offset > 0)
- loopsize -= offset;
+ if (lo->lo_offset > 0)
+ loopsize -= lo->lo_offset;
/* offset is beyond i_size, weird but possible */
if (loopsize < 0)
return 0;
-
- if (sizelimit > 0 && sizelimit < loopsize)
- loopsize = sizelimit;
+ if (lo->lo_sizelimit > 0 && lo->lo_sizelimit < loopsize)
+ loopsize = lo->lo_sizelimit;
/*
* Unfortunately, if we want to do I/O on the device,
* the number of 512-byte sectors has to fit into a sector_t.
@@ -158,11 +173,6 @@ static loff_t get_size(loff_t offset, loff_t sizelimit, struct file *file)
return loopsize >> 9;
}
-static loff_t get_loop_size(struct loop_device *lo, struct file *file)
-{
- return get_size(lo->lo_offset, lo->lo_sizelimit, file);
-}
-
/*
* We support direct I/O only if lo_offset is aligned with the logical I/O size
* of backing device, and the logical block size of loop is bigger than that of
@@ -541,8 +551,10 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
return -EBADF;
error = loop_check_backing_file(file);
- if (error)
+ if (error) {
+ fput(file);
return error;
+ }
/* suppress uevents while reconfiguring the device */
dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1);
@@ -569,7 +581,7 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
error = -EINVAL;
/* size of the new backing store needs to be the same */
- if (get_loop_size(lo, file) != get_loop_size(lo, old_file))
+ if (lo_calculate_size(lo, file) != lo_calculate_size(lo, old_file))
goto out_err;
/*
@@ -812,7 +824,7 @@ static void loop_queue_work(struct loop_device *lo, struct loop_cmd *cmd)
if (worker)
goto queue_work;
- worker = kzalloc(sizeof(struct loop_worker), GFP_NOWAIT | __GFP_NOWARN);
+ worker = kzalloc(sizeof(struct loop_worker), GFP_NOWAIT);
/*
* In the event we cannot allocate a worker, just queue on the
* rootcg worker and issue the I/O as the rootcg
@@ -983,8 +995,10 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
return -EBADF;
error = loop_check_backing_file(file);
- if (error)
+ if (error) {
+ fput(file);
return error;
+ }
is_loop = is_loop_device(file);
@@ -1063,7 +1077,7 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
loop_update_dio(lo);
loop_sysfs_init(lo);
- size = get_loop_size(lo, file);
+ size = lo_calculate_size(lo, file);
loop_set_size(lo, size);
/* Order wrt reading lo_state in loop_validate_file(). */
@@ -1255,8 +1269,7 @@ out_unfreeze:
if (partscan)
clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
if (!err && size_changed) {
- loff_t new_size = get_size(lo->lo_offset, lo->lo_sizelimit,
- lo->lo_backing_file);
+ loff_t new_size = lo_calculate_size(lo, lo->lo_backing_file);
loop_set_size(lo, new_size);
}
out_unlock:
@@ -1399,7 +1412,7 @@ static int loop_set_capacity(struct loop_device *lo)
if (unlikely(lo->lo_state != Lo_bound))
return -ENXIO;
- size = get_loop_size(lo, lo->lo_backing_file);
+ size = lo_calculate_size(lo, lo->lo_backing_file);
loop_set_size(lo, size);
return 0;
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 8fc7761397bd..567192e371a8 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -3148,17 +3148,17 @@ static int mtip_block_compat_ioctl(struct block_device *dev,
* that each partition is also 4KB aligned. Non-aligned partitions adversely
* affects performance.
*
- * @dev Pointer to the block_device strucutre.
+ * @disk Pointer to the gendisk strucutre.
* @geo Pointer to a hd_geometry structure.
*
* return value
* 0 Operation completed successfully.
* -ENOTTY An error occurred while reading the drive capacity.
*/
-static int mtip_block_getgeo(struct block_device *dev,
+static int mtip_block_getgeo(struct gendisk *disk,
struct hd_geometry *geo)
{
- struct driver_data *dd = dev->bd_disk->private_data;
+ struct driver_data *dd = disk->private_data;
sector_t capacity;
if (!dd)
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 6463d0e8d0ce..1188f32a5e5e 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -311,7 +311,7 @@ static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock,
if (args) {
INIT_WORK(&args->work, nbd_dead_link_work);
args->index = nbd->index;
- queue_work(system_wq, &args->work);
+ queue_work(system_percpu_wq, &args->work);
}
}
if (!nsock->dead) {
@@ -1217,6 +1217,14 @@ static struct socket *nbd_get_socket(struct nbd_device *nbd, unsigned long fd,
if (!sock)
return NULL;
+ if (!sk_is_tcp(sock->sk) &&
+ !sk_is_stream_unix(sock->sk)) {
+ dev_err(disk_to_dev(nbd->disk), "Unsupported socket: should be TCP or UNIX.\n");
+ *err = -EINVAL;
+ sockfd_put(sock);
+ return NULL;
+ }
+
if (sock->ops->shutdown == sock_no_shutdown) {
dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n");
*err = -EINVAL;
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index 91642c9a3b29..f982027e8c85 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -223,7 +223,7 @@ MODULE_PARM_DESC(discard, "Support discard operations (requires memory-backed nu
static unsigned long g_cache_size;
module_param_named(cache_size, g_cache_size, ulong, 0444);
-MODULE_PARM_DESC(mbps, "Cache size in MiB for memory-backed device. Default: 0 (none)");
+MODULE_PARM_DESC(cache_size, "Cache size in MiB for memory-backed device. Default: 0 (none)");
static bool g_fua = true;
module_param_named(fua, g_fua, bool, 0444);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index faafd7ff43d6..af0e21149dbc 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -7389,7 +7389,7 @@ static int __init rbd_init(void)
* The number of active work items is limited by the number of
* rbd devices * queue depth, so leave @max_active at default.
*/
- rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0);
+ rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (!rbd_wq) {
rc = -ENOMEM;
goto err_out_slab;
diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
index 15627417f12e..f1409e54010a 100644
--- a/drivers/block/rnbd/rnbd-clt.c
+++ b/drivers/block/rnbd/rnbd-clt.c
@@ -942,11 +942,11 @@ static void rnbd_client_release(struct gendisk *gen)
rnbd_clt_put_dev(dev);
}
-static int rnbd_client_getgeo(struct block_device *block_device,
+static int rnbd_client_getgeo(struct gendisk *disk,
struct hd_geometry *geo)
{
u64 size;
- struct rnbd_clt_dev *dev = block_device->bd_disk->private_data;
+ struct rnbd_clt_dev *dev = disk->private_data;
struct queue_limits *limit = &dev->queue->limits;
size = dev->size * (limit->logical_block_size / SECTOR_SIZE);
@@ -1809,7 +1809,7 @@ static int __init rnbd_client_init(void)
unregister_blkdev(rnbd_client_major, "rnbd");
return err;
}
- rnbd_clt_wq = alloc_workqueue("rnbd_clt_wq", 0, 0);
+ rnbd_clt_wq = alloc_workqueue("rnbd_clt_wq", WQ_PERCPU, 0);
if (!rnbd_clt_wq) {
pr_err("Failed to load module, alloc_workqueue failed.\n");
rnbd_clt_destroy_sysfs_files();
diff --git a/drivers/block/rnull.rs b/drivers/block/rnull.rs
deleted file mode 100644
index d07e76ae2c13..000000000000
--- a/drivers/block/rnull.rs
+++ /dev/null
@@ -1,80 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-//! This is a Rust implementation of the C null block driver.
-//!
-//! Supported features:
-//!
-//! - blk-mq interface
-//! - direct completion
-//! - block size 4k
-//!
-//! The driver is not configurable.
-
-use kernel::{
- alloc::flags,
- block::mq::{
- self,
- gen_disk::{self, GenDisk},
- Operations, TagSet,
- },
- error::Result,
- new_mutex, pr_info,
- prelude::*,
- sync::{Arc, Mutex},
- types::ARef,
-};
-
-module! {
- type: NullBlkModule,
- name: "rnull_mod",
- authors: ["Andreas Hindborg"],
- description: "Rust implementation of the C null block driver",
- license: "GPL v2",
-}
-
-#[pin_data]
-struct NullBlkModule {
- #[pin]
- _disk: Mutex<GenDisk<NullBlkDevice>>,
-}
-
-impl kernel::InPlaceModule for NullBlkModule {
- fn init(_module: &'static ThisModule) -> impl PinInit<Self, Error> {
- pr_info!("Rust null_blk loaded\n");
-
- // Use a immediately-called closure as a stable `try` block
- let disk = /* try */ (|| {
- let tagset = Arc::pin_init(TagSet::new(1, 256, 1), flags::GFP_KERNEL)?;
-
- gen_disk::GenDiskBuilder::new()
- .capacity_sectors(4096 << 11)
- .logical_block_size(4096)?
- .physical_block_size(4096)?
- .rotational(false)
- .build(format_args!("rnullb{}", 0), tagset)
- })();
-
- try_pin_init!(Self {
- _disk <- new_mutex!(disk?, "nullb:disk"),
- })
- }
-}
-
-struct NullBlkDevice;
-
-#[vtable]
-impl Operations for NullBlkDevice {
- #[inline(always)]
- fn queue_rq(rq: ARef<mq::Request<Self>>, _is_last: bool) -> Result {
- mq::Request::end_ok(rq)
- .map_err(|_e| kernel::error::code::EIO)
- // We take no refcounts on the request, so we expect to be able to
- // end the request. The request reference must be unique at this
- // point, and so `end_ok` cannot fail.
- .expect("Fatal error - expected to be able to end request");
-
- Ok(())
- }
-
- fn commit_rqs() {}
-}
diff --git a/drivers/block/rnull/Kconfig b/drivers/block/rnull/Kconfig
new file mode 100644
index 000000000000..7bc5b376c128
--- /dev/null
+++ b/drivers/block/rnull/Kconfig
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Rust null block device driver configuration
+
+config BLK_DEV_RUST_NULL
+ tristate "Rust null block driver (Experimental)"
+ depends on RUST && CONFIGFS_FS
+ help
+ This is the Rust implementation of the null block driver. Like
+ the C version, the driver allows the user to create virutal block
+ devices that can be configured via various configuration options.
+
+ If unsure, say N.
diff --git a/drivers/block/rnull/Makefile b/drivers/block/rnull/Makefile
new file mode 100644
index 000000000000..11cfa5e615dc
--- /dev/null
+++ b/drivers/block/rnull/Makefile
@@ -0,0 +1,3 @@
+
+obj-$(CONFIG_BLK_DEV_RUST_NULL) += rnull_mod.o
+rnull_mod-y := rnull.o
diff --git a/drivers/block/rnull/configfs.rs b/drivers/block/rnull/configfs.rs
new file mode 100644
index 000000000000..8498e9bae6fd
--- /dev/null
+++ b/drivers/block/rnull/configfs.rs
@@ -0,0 +1,262 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use super::{NullBlkDevice, THIS_MODULE};
+use core::fmt::{Display, Write};
+use kernel::{
+ block::mq::gen_disk::{GenDisk, GenDiskBuilder},
+ c_str,
+ configfs::{self, AttributeOperations},
+ configfs_attrs, new_mutex,
+ page::PAGE_SIZE,
+ prelude::*,
+ str::{kstrtobool_bytes, CString},
+ sync::Mutex,
+};
+use pin_init::PinInit;
+
+pub(crate) fn subsystem() -> impl PinInit<kernel::configfs::Subsystem<Config>, Error> {
+ let item_type = configfs_attrs! {
+ container: configfs::Subsystem<Config>,
+ data: Config,
+ child: DeviceConfig,
+ attributes: [
+ features: 0,
+ ],
+ };
+
+ kernel::configfs::Subsystem::new(c_str!("rnull"), item_type, try_pin_init!(Config {}))
+}
+
+#[pin_data]
+pub(crate) struct Config {}
+
+#[vtable]
+impl AttributeOperations<0> for Config {
+ type Data = Config;
+
+ fn show(_this: &Config, page: &mut [u8; PAGE_SIZE]) -> Result<usize> {
+ let mut writer = kernel::str::Formatter::new(page);
+ writer.write_str("blocksize,size,rotational,irqmode\n")?;
+ Ok(writer.bytes_written())
+ }
+}
+
+#[vtable]
+impl configfs::GroupOperations for Config {
+ type Child = DeviceConfig;
+
+ fn make_group(
+ &self,
+ name: &CStr,
+ ) -> Result<impl PinInit<configfs::Group<DeviceConfig>, Error>> {
+ let item_type = configfs_attrs! {
+ container: configfs::Group<DeviceConfig>,
+ data: DeviceConfig,
+ attributes: [
+ // Named for compatibility with C null_blk
+ power: 0,
+ blocksize: 1,
+ rotational: 2,
+ size: 3,
+ irqmode: 4,
+ ],
+ };
+
+ Ok(configfs::Group::new(
+ name.try_into()?,
+ item_type,
+ // TODO: cannot coerce new_mutex!() to impl PinInit<_, Error>, so put mutex inside
+ try_pin_init!( DeviceConfig {
+ data <- new_mutex!(DeviceConfigInner {
+ powered: false,
+ block_size: 4096,
+ rotational: false,
+ disk: None,
+ capacity_mib: 4096,
+ irq_mode: IRQMode::None,
+ name: name.try_into()?,
+ }),
+ }),
+ ))
+ }
+}
+
+#[derive(Debug, Clone, Copy)]
+pub(crate) enum IRQMode {
+ None,
+ Soft,
+}
+
+impl TryFrom<u8> for IRQMode {
+ type Error = kernel::error::Error;
+
+ fn try_from(value: u8) -> Result<Self> {
+ match value {
+ 0 => Ok(Self::None),
+ 1 => Ok(Self::Soft),
+ _ => Err(EINVAL),
+ }
+ }
+}
+
+impl Display for IRQMode {
+ fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
+ match self {
+ Self::None => f.write_str("0")?,
+ Self::Soft => f.write_str("1")?,
+ }
+ Ok(())
+ }
+}
+
+#[pin_data]
+pub(crate) struct DeviceConfig {
+ #[pin]
+ data: Mutex<DeviceConfigInner>,
+}
+
+#[pin_data]
+struct DeviceConfigInner {
+ powered: bool,
+ name: CString,
+ block_size: u32,
+ rotational: bool,
+ capacity_mib: u64,
+ irq_mode: IRQMode,
+ disk: Option<GenDisk<NullBlkDevice>>,
+}
+
+#[vtable]
+impl configfs::AttributeOperations<0> for DeviceConfig {
+ type Data = DeviceConfig;
+
+ fn show(this: &DeviceConfig, page: &mut [u8; PAGE_SIZE]) -> Result<usize> {
+ let mut writer = kernel::str::Formatter::new(page);
+
+ if this.data.lock().powered {
+ writer.write_str("1\n")?;
+ } else {
+ writer.write_str("0\n")?;
+ }
+
+ Ok(writer.bytes_written())
+ }
+
+ fn store(this: &DeviceConfig, page: &[u8]) -> Result {
+ let power_op = kstrtobool_bytes(page)?;
+ let mut guard = this.data.lock();
+
+ if !guard.powered && power_op {
+ guard.disk = Some(NullBlkDevice::new(
+ &guard.name,
+ guard.block_size,
+ guard.rotational,
+ guard.capacity_mib,
+ guard.irq_mode,
+ )?);
+ guard.powered = true;
+ } else if guard.powered && !power_op {
+ drop(guard.disk.take());
+ guard.powered = false;
+ }
+
+ Ok(())
+ }
+}
+
+#[vtable]
+impl configfs::AttributeOperations<1> for DeviceConfig {
+ type Data = DeviceConfig;
+
+ fn show(this: &DeviceConfig, page: &mut [u8; PAGE_SIZE]) -> Result<usize> {
+ let mut writer = kernel::str::Formatter::new(page);
+ writer.write_fmt(fmt!("{}\n", this.data.lock().block_size))?;
+ Ok(writer.bytes_written())
+ }
+
+ fn store(this: &DeviceConfig, page: &[u8]) -> Result {
+ if this.data.lock().powered {
+ return Err(EBUSY);
+ }
+
+ let text = core::str::from_utf8(page)?.trim();
+ let value = text.parse::<u32>().map_err(|_| EINVAL)?;
+
+ GenDiskBuilder::validate_block_size(value)?;
+ this.data.lock().block_size = value;
+ Ok(())
+ }
+}
+
+#[vtable]
+impl configfs::AttributeOperations<2> for DeviceConfig {
+ type Data = DeviceConfig;
+
+ fn show(this: &DeviceConfig, page: &mut [u8; PAGE_SIZE]) -> Result<usize> {
+ let mut writer = kernel::str::Formatter::new(page);
+
+ if this.data.lock().rotational {
+ writer.write_str("1\n")?;
+ } else {
+ writer.write_str("0\n")?;
+ }
+
+ Ok(writer.bytes_written())
+ }
+
+ fn store(this: &DeviceConfig, page: &[u8]) -> Result {
+ if this.data.lock().powered {
+ return Err(EBUSY);
+ }
+
+ this.data.lock().rotational = kstrtobool_bytes(page)?;
+
+ Ok(())
+ }
+}
+
+#[vtable]
+impl configfs::AttributeOperations<3> for DeviceConfig {
+ type Data = DeviceConfig;
+
+ fn show(this: &DeviceConfig, page: &mut [u8; PAGE_SIZE]) -> Result<usize> {
+ let mut writer = kernel::str::Formatter::new(page);
+ writer.write_fmt(fmt!("{}\n", this.data.lock().capacity_mib))?;
+ Ok(writer.bytes_written())
+ }
+
+ fn store(this: &DeviceConfig, page: &[u8]) -> Result {
+ if this.data.lock().powered {
+ return Err(EBUSY);
+ }
+
+ let text = core::str::from_utf8(page)?.trim();
+ let value = text.parse::<u64>().map_err(|_| EINVAL)?;
+
+ this.data.lock().capacity_mib = value;
+ Ok(())
+ }
+}
+
+#[vtable]
+impl configfs::AttributeOperations<4> for DeviceConfig {
+ type Data = DeviceConfig;
+
+ fn show(this: &DeviceConfig, page: &mut [u8; PAGE_SIZE]) -> Result<usize> {
+ let mut writer = kernel::str::Formatter::new(page);
+ writer.write_fmt(fmt!("{}\n", this.data.lock().irq_mode))?;
+ Ok(writer.bytes_written())
+ }
+
+ fn store(this: &DeviceConfig, page: &[u8]) -> Result {
+ if this.data.lock().powered {
+ return Err(EBUSY);
+ }
+
+ let text = core::str::from_utf8(page)?.trim();
+ let value = text.parse::<u8>().map_err(|_| EINVAL)?;
+
+ this.data.lock().irq_mode = IRQMode::try_from(value)?;
+ Ok(())
+ }
+}
diff --git a/drivers/block/rnull/rnull.rs b/drivers/block/rnull/rnull.rs
new file mode 100644
index 000000000000..1ec694d7f1a6
--- /dev/null
+++ b/drivers/block/rnull/rnull.rs
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! This is a Rust implementation of the C null block driver.
+
+mod configfs;
+
+use configfs::IRQMode;
+use kernel::{
+ block::{
+ self,
+ mq::{
+ self,
+ gen_disk::{self, GenDisk},
+ Operations, TagSet,
+ },
+ },
+ error::Result,
+ pr_info,
+ prelude::*,
+ sync::Arc,
+ types::ARef,
+};
+use pin_init::PinInit;
+
+module! {
+ type: NullBlkModule,
+ name: "rnull_mod",
+ authors: ["Andreas Hindborg"],
+ description: "Rust implementation of the C null block driver",
+ license: "GPL v2",
+}
+
+#[pin_data]
+struct NullBlkModule {
+ #[pin]
+ configfs_subsystem: kernel::configfs::Subsystem<configfs::Config>,
+}
+
+impl kernel::InPlaceModule for NullBlkModule {
+ fn init(_module: &'static ThisModule) -> impl PinInit<Self, Error> {
+ pr_info!("Rust null_blk loaded\n");
+
+ try_pin_init!(Self {
+ configfs_subsystem <- configfs::subsystem(),
+ })
+ }
+}
+
+struct NullBlkDevice;
+
+impl NullBlkDevice {
+ fn new(
+ name: &CStr,
+ block_size: u32,
+ rotational: bool,
+ capacity_mib: u64,
+ irq_mode: IRQMode,
+ ) -> Result<GenDisk<Self>> {
+ let tagset = Arc::pin_init(TagSet::new(1, 256, 1), GFP_KERNEL)?;
+
+ let queue_data = Box::new(QueueData { irq_mode }, GFP_KERNEL)?;
+
+ gen_disk::GenDiskBuilder::new()
+ .capacity_sectors(capacity_mib << (20 - block::SECTOR_SHIFT))
+ .logical_block_size(block_size)?
+ .physical_block_size(block_size)?
+ .rotational(rotational)
+ .build(fmt!("{}", name.to_str()?), tagset, queue_data)
+ }
+}
+
+struct QueueData {
+ irq_mode: IRQMode,
+}
+
+#[vtable]
+impl Operations for NullBlkDevice {
+ type QueueData = KBox<QueueData>;
+
+ #[inline(always)]
+ fn queue_rq(queue_data: &QueueData, rq: ARef<mq::Request<Self>>, _is_last: bool) -> Result {
+ match queue_data.irq_mode {
+ IRQMode::None => mq::Request::end_ok(rq)
+ .map_err(|_e| kernel::error::code::EIO)
+ // We take no refcounts on the request, so we expect to be able to
+ // end the request. The request reference must be unique at this
+ // point, and so `end_ok` cannot fail.
+ .expect("Fatal error - expected to be able to end request"),
+ IRQMode::Soft => mq::Request::complete(rq),
+ }
+ Ok(())
+ }
+
+ fn commit_rqs(_queue_data: &QueueData) {}
+
+ fn complete(rq: ARef<mq::Request<Self>>) {
+ mq::Request::end_ok(rq)
+ .map_err(|_e| kernel::error::code::EIO)
+ // We take no refcounts on the request, so we expect to be able to
+ // end the request. The request reference must be unique at this
+ // point, and so `end_ok` cannot fail.
+ .expect("Fatal error - expected to be able to end request");
+ }
+}
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 7af21fe67671..db1fe9772a4d 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -119,9 +119,8 @@ static inline u32 vdc_tx_dring_avail(struct vio_dring_state *dr)
return vio_dring_avail(dr, VDC_TX_RING_SIZE);
}
-static int vdc_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+static int vdc_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
- struct gendisk *disk = bdev->bd_disk;
sector_t nsect = get_capacity(disk);
sector_t cylinders = nsect;
@@ -1189,7 +1188,7 @@ static void vdc_ldc_reset(struct vdc_port *port)
}
if (port->ldc_timeout)
- mod_delayed_work(system_wq, &port->ldc_reset_timer_work,
+ mod_delayed_work(system_percpu_wq, &port->ldc_reset_timer_work,
round_jiffies(jiffies + HZ * port->ldc_timeout));
mod_timer(&port->vio.timer, round_jiffies(jiffies + HZ));
return;
@@ -1217,7 +1216,7 @@ static int __init vdc_init(void)
{
int err;
- sunvdc_wq = alloc_workqueue("sunvdc", 0, 0);
+ sunvdc_wq = alloc_workqueue("sunvdc", WQ_PERCPU, 0);
if (!sunvdc_wq)
return -ENOMEM;
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index eda33c5eb5e2..416015947ae6 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -711,9 +711,9 @@ static int floppy_ioctl(struct block_device *bdev, blk_mode_t mode,
return -ENOTTY;
}
-static int floppy_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+static int floppy_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
- struct floppy_state *fs = bdev->bd_disk->private_data;
+ struct floppy_state *fs = disk->private_data;
struct floppy_struct *g;
int ret;
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index 6561d2a561fa..0c74a41a6753 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -201,7 +201,6 @@ struct ublk_queue {
bool force_abort;
bool canceling;
bool fail_io; /* copy of dev->state == UBLK_S_DEV_FAIL_IO */
- unsigned short nr_io_ready; /* how many ios setup */
spinlock_t cancel_lock;
struct ublk_device *dev;
struct ublk_io ios[];
@@ -234,11 +233,12 @@ struct ublk_device {
struct ublk_params params;
struct completion completion;
- unsigned int nr_queues_ready;
- unsigned int nr_privileged_daemon;
+ u32 nr_io_ready;
+ bool unprivileged_daemons;
struct mutex cancel_mutex;
bool canceling;
pid_t ublksrv_tgid;
+ struct delayed_work exit_work;
};
/* header of ublk_params */
@@ -251,8 +251,7 @@ static void ublk_io_release(void *priv);
static void ublk_stop_dev_unlocked(struct ublk_device *ub);
static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq);
static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
- const struct ublk_queue *ubq, struct ublk_io *io,
- size_t offset);
+ u16 q_id, u16 tag, struct ublk_io *io, size_t offset);
static inline unsigned int ublk_req_build_flags(struct request *req);
static inline struct ublksrv_io_desc *
@@ -531,7 +530,8 @@ static blk_status_t ublk_setup_iod_zoned(struct ublk_queue *ubq,
#endif
-static inline void __ublk_complete_rq(struct request *req);
+static inline void __ublk_complete_rq(struct request *req, struct ublk_io *io,
+ bool need_map);
static dev_t ublk_chr_devt;
static const struct class ublk_chr_class = {
@@ -663,22 +663,44 @@ static inline bool ublk_support_zero_copy(const struct ublk_queue *ubq)
return ubq->flags & UBLK_F_SUPPORT_ZERO_COPY;
}
+static inline bool ublk_dev_support_zero_copy(const struct ublk_device *ub)
+{
+ return ub->dev_info.flags & UBLK_F_SUPPORT_ZERO_COPY;
+}
+
static inline bool ublk_support_auto_buf_reg(const struct ublk_queue *ubq)
{
return ubq->flags & UBLK_F_AUTO_BUF_REG;
}
+static inline bool ublk_dev_support_auto_buf_reg(const struct ublk_device *ub)
+{
+ return ub->dev_info.flags & UBLK_F_AUTO_BUF_REG;
+}
+
static inline bool ublk_support_user_copy(const struct ublk_queue *ubq)
{
return ubq->flags & UBLK_F_USER_COPY;
}
+static inline bool ublk_dev_support_user_copy(const struct ublk_device *ub)
+{
+ return ub->dev_info.flags & UBLK_F_USER_COPY;
+}
+
static inline bool ublk_need_map_io(const struct ublk_queue *ubq)
{
return !ublk_support_user_copy(ubq) && !ublk_support_zero_copy(ubq) &&
!ublk_support_auto_buf_reg(ubq);
}
+static inline bool ublk_dev_need_map_io(const struct ublk_device *ub)
+{
+ return !ublk_dev_support_user_copy(ub) &&
+ !ublk_dev_support_zero_copy(ub) &&
+ !ublk_dev_support_auto_buf_reg(ub);
+}
+
static inline bool ublk_need_req_ref(const struct ublk_queue *ubq)
{
/*
@@ -696,6 +718,13 @@ static inline bool ublk_need_req_ref(const struct ublk_queue *ubq)
ublk_support_auto_buf_reg(ubq);
}
+static inline bool ublk_dev_need_req_ref(const struct ublk_device *ub)
+{
+ return ublk_dev_support_user_copy(ub) ||
+ ublk_dev_support_zero_copy(ub) ||
+ ublk_dev_support_auto_buf_reg(ub);
+}
+
static inline void ublk_init_req_ref(const struct ublk_queue *ubq,
struct ublk_io *io)
{
@@ -710,8 +739,11 @@ static inline bool ublk_get_req_ref(struct ublk_io *io)
static inline void ublk_put_req_ref(struct ublk_io *io, struct request *req)
{
- if (refcount_dec_and_test(&io->ref))
- __ublk_complete_rq(req);
+ if (!refcount_dec_and_test(&io->ref))
+ return;
+
+ /* ublk_need_map_io() and ublk_need_req_ref() are mutually exclusive */
+ __ublk_complete_rq(req, io, false);
}
static inline bool ublk_sub_req_ref(struct ublk_io *io)
@@ -727,6 +759,11 @@ static inline bool ublk_need_get_data(const struct ublk_queue *ubq)
return ubq->flags & UBLK_F_NEED_GET_DATA;
}
+static inline bool ublk_dev_need_get_data(const struct ublk_device *ub)
+{
+ return ub->dev_info.flags & UBLK_F_NEED_GET_DATA;
+}
+
/* Called in slow path only, keep it noinline for trace purpose */
static noinline struct ublk_device *ublk_get_device(struct ublk_device *ub)
{
@@ -763,11 +800,9 @@ static inline int __ublk_queue_cmd_buf_size(int depth)
return round_up(depth * sizeof(struct ublksrv_io_desc), PAGE_SIZE);
}
-static inline int ublk_queue_cmd_buf_size(struct ublk_device *ub, int q_id)
+static inline int ublk_queue_cmd_buf_size(struct ublk_device *ub)
{
- struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
-
- return __ublk_queue_cmd_buf_size(ubq->q_depth);
+ return __ublk_queue_cmd_buf_size(ub->dev_info.queue_depth);
}
static int ublk_max_cmd_buf_size(void)
@@ -1018,13 +1053,13 @@ static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req,
return rq_bytes;
}
-static int ublk_unmap_io(const struct ublk_queue *ubq,
+static int ublk_unmap_io(bool need_map,
const struct request *req,
const struct ublk_io *io)
{
const unsigned int rq_bytes = blk_rq_bytes(req);
- if (!ublk_need_map_io(ubq))
+ if (!need_map)
return rq_bytes;
if (ublk_need_unmap_req(req)) {
@@ -1071,13 +1106,8 @@ static blk_status_t ublk_setup_iod(struct ublk_queue *ubq, struct request *req)
{
struct ublksrv_io_desc *iod = ublk_get_iod(ubq, req->tag);
struct ublk_io *io = &ubq->ios[req->tag];
- enum req_op op = req_op(req);
u32 ublk_op;
- if (!ublk_queue_is_zoned(ubq) &&
- (op_is_zone_mgmt(op) || op == REQ_OP_ZONE_APPEND))
- return BLK_STS_IOERR;
-
switch (req_op(req)) {
case REQ_OP_READ:
ublk_op = UBLK_IO_OP_READ;
@@ -1116,10 +1146,9 @@ static inline struct ublk_uring_cmd_pdu *ublk_get_uring_cmd_pdu(
}
/* todo: handle partial completion */
-static inline void __ublk_complete_rq(struct request *req)
+static inline void __ublk_complete_rq(struct request *req, struct ublk_io *io,
+ bool need_map)
{
- struct ublk_queue *ubq = req->mq_hctx->driver_data;
- struct ublk_io *io = &ubq->ios[req->tag];
unsigned int unmapped_bytes;
blk_status_t res = BLK_STS_OK;
@@ -1143,7 +1172,7 @@ static inline void __ublk_complete_rq(struct request *req)
goto exit;
/* for READ request, writing data in iod->addr to rq buffers */
- unmapped_bytes = ublk_unmap_io(ubq, req, io);
+ unmapped_bytes = ublk_unmap_io(need_map, req, io);
/*
* Extremely impossible since we got data filled in just before
@@ -1188,7 +1217,7 @@ static void ublk_complete_io_cmd(struct ublk_io *io, struct request *req,
struct io_uring_cmd *cmd = __ublk_prep_compl_io_cmd(io, req);
/* tell ublksrv one io request is coming */
- io_uring_cmd_done(cmd, res, 0, issue_flags);
+ io_uring_cmd_done(cmd, res, issue_flags);
}
#define UBLK_REQUEUE_DELAY_MS 3
@@ -1389,7 +1418,7 @@ static blk_status_t ublk_prep_req(struct ublk_queue *ubq, struct request *rq,
{
blk_status_t res;
- if (unlikely(ubq->fail_io))
+ if (unlikely(READ_ONCE(ubq->fail_io)))
return BLK_STS_TARGET;
/* With recovery feature enabled, force_abort is set in
@@ -1401,7 +1430,8 @@ static blk_status_t ublk_prep_req(struct ublk_queue *ubq, struct request *rq,
* Note: force_abort is guaranteed to be seen because it is set
* before request queue is unqiuesced.
*/
- if (ublk_nosrv_should_queue_io(ubq) && unlikely(ubq->force_abort))
+ if (ublk_nosrv_should_queue_io(ubq) &&
+ unlikely(READ_ONCE(ubq->force_abort)))
return BLK_STS_IOERR;
if (check_cancel && unlikely(ubq->canceling))
@@ -1498,9 +1528,6 @@ static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
{
int i;
- /* All old ioucmds have to be completed */
- ubq->nr_io_ready = 0;
-
for (i = 0; i < ubq->q_depth; i++) {
struct ublk_io *io = &ubq->ios[i];
@@ -1549,8 +1576,8 @@ static void ublk_reset_ch_dev(struct ublk_device *ub)
/* set to NULL, otherwise new tasks cannot mmap io_cmd_buf */
ub->mm = NULL;
- ub->nr_queues_ready = 0;
- ub->nr_privileged_daemon = 0;
+ ub->nr_io_ready = 0;
+ ub->unprivileged_daemons = false;
ub->ublksrv_tgid = -1;
}
@@ -1594,13 +1621,63 @@ static void ublk_set_canceling(struct ublk_device *ub, bool canceling)
ublk_get_queue(ub, i)->canceling = canceling;
}
-static int ublk_ch_release(struct inode *inode, struct file *filp)
+static bool ublk_check_and_reset_active_ref(struct ublk_device *ub)
{
- struct ublk_device *ub = filp->private_data;
+ int i, j;
+
+ if (!(ub->dev_info.flags & (UBLK_F_SUPPORT_ZERO_COPY |
+ UBLK_F_AUTO_BUF_REG)))
+ return false;
+
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
+ struct ublk_queue *ubq = ublk_get_queue(ub, i);
+
+ for (j = 0; j < ubq->q_depth; j++) {
+ struct ublk_io *io = &ubq->ios[j];
+ unsigned int refs = refcount_read(&io->ref) +
+ io->task_registered_buffers;
+
+ /*
+ * UBLK_REFCOUNT_INIT or zero means no active
+ * reference
+ */
+ if (refs != UBLK_REFCOUNT_INIT && refs != 0)
+ return true;
+
+ /* reset to zero if the io hasn't active references */
+ refcount_set(&io->ref, 0);
+ io->task_registered_buffers = 0;
+ }
+ }
+ return false;
+}
+
+static void ublk_ch_release_work_fn(struct work_struct *work)
+{
+ struct ublk_device *ub =
+ container_of(work, struct ublk_device, exit_work.work);
struct gendisk *disk;
int i;
/*
+ * For zero-copy and auto buffer register modes, I/O references
+ * might not be dropped naturally when the daemon is killed, but
+ * io_uring guarantees that registered bvec kernel buffers are
+ * unregistered finally when freeing io_uring context, then the
+ * active references are dropped.
+ *
+ * Wait until active references are dropped for avoiding use-after-free
+ *
+ * registered buffer may be unregistered in io_ring's release hander,
+ * so have to wait by scheduling work function for avoiding the two
+ * file release dependency.
+ */
+ if (ublk_check_and_reset_active_ref(ub)) {
+ schedule_delayed_work(&ub->exit_work, 1);
+ return;
+ }
+
+ /*
* disk isn't attached yet, either device isn't live, or it has
* been removed already, so we needn't to do anything
*/
@@ -1644,7 +1721,6 @@ static int ublk_ch_release(struct inode *inode, struct file *filp)
* Transition the device to the nosrv state. What exactly this
* means depends on the recovery flags
*/
- blk_mq_quiesce_queue(disk->queue);
if (ublk_nosrv_should_stop_dev(ub)) {
/*
* Allow any pending/future I/O to pass through quickly
@@ -1652,8 +1728,7 @@ static int ublk_ch_release(struct inode *inode, struct file *filp)
* waits for all pending I/O to complete
*/
for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
- ublk_get_queue(ub, i)->force_abort = true;
- blk_mq_unquiesce_queue(disk->queue);
+ WRITE_ONCE(ublk_get_queue(ub, i)->force_abort, true);
ublk_stop_dev_unlocked(ub);
} else {
@@ -1663,9 +1738,8 @@ static int ublk_ch_release(struct inode *inode, struct file *filp)
} else {
ub->dev_info.state = UBLK_S_DEV_FAIL_IO;
for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
- ublk_get_queue(ub, i)->fail_io = true;
+ WRITE_ONCE(ublk_get_queue(ub, i)->fail_io, true);
}
- blk_mq_unquiesce_queue(disk->queue);
}
unlock:
mutex_unlock(&ub->mutex);
@@ -1675,6 +1749,23 @@ unlock:
ublk_reset_ch_dev(ub);
out:
clear_bit(UB_STATE_OPEN, &ub->state);
+
+ /* put the reference grabbed in ublk_ch_release() */
+ ublk_put_device(ub);
+}
+
+static int ublk_ch_release(struct inode *inode, struct file *filp)
+{
+ struct ublk_device *ub = filp->private_data;
+
+ /*
+ * Grab ublk device reference, so it won't be gone until we are
+ * really released from work function.
+ */
+ ublk_get_device(ub);
+
+ INIT_DELAYED_WORK(&ub->exit_work, ublk_ch_release_work_fn);
+ schedule_delayed_work(&ub->exit_work, 0);
return 0;
}
@@ -1709,23 +1800,23 @@ static int ublk_ch_mmap(struct file *filp, struct vm_area_struct *vma)
__func__, q_id, current->pid, vma->vm_start,
phys_off, (unsigned long)sz);
- if (sz != ublk_queue_cmd_buf_size(ub, q_id))
+ if (sz != ublk_queue_cmd_buf_size(ub))
return -EINVAL;
pfn = virt_to_phys(ublk_queue_cmd_buf(ub, q_id)) >> PAGE_SHIFT;
return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
}
-static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io,
+static void __ublk_fail_req(struct ublk_device *ub, struct ublk_io *io,
struct request *req)
{
WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_ACTIVE);
- if (ublk_nosrv_should_reissue_outstanding(ubq->dev))
+ if (ublk_nosrv_should_reissue_outstanding(ub))
blk_mq_requeue_request(req, false);
else {
io->res = -EIO;
- __ublk_complete_rq(req);
+ __ublk_complete_rq(req, io, ublk_dev_need_map_io(ub));
}
}
@@ -1745,7 +1836,7 @@ static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
struct ublk_io *io = &ubq->ios[i];
if (io->flags & UBLK_IO_FLAG_OWNED_BY_SRV)
- __ublk_fail_req(ubq, io, io->req);
+ __ublk_fail_req(ub, io, io->req);
}
}
@@ -1807,7 +1898,7 @@ static void ublk_cancel_cmd(struct ublk_queue *ubq, unsigned tag,
spin_unlock(&ubq->cancel_lock);
if (!done)
- io_uring_cmd_done(io->cmd, UBLK_IO_RES_ABORT, 0, issue_flags);
+ io_uring_cmd_done(io->cmd, UBLK_IO_RES_ABORT, issue_flags);
}
/*
@@ -1850,9 +1941,11 @@ static void ublk_uring_cmd_cancel_fn(struct io_uring_cmd *cmd,
ublk_cancel_cmd(ubq, pdu->tag, issue_flags);
}
-static inline bool ublk_queue_ready(struct ublk_queue *ubq)
+static inline bool ublk_dev_ready(const struct ublk_device *ub)
{
- return ubq->nr_io_ready == ubq->q_depth;
+ u32 total = (u32)ub->dev_info.nr_hw_queues * ub->dev_info.queue_depth;
+
+ return ub->nr_io_ready == total;
}
static void ublk_cancel_queue(struct ublk_queue *ubq)
@@ -1976,18 +2069,14 @@ static void ublk_reset_io_flags(struct ublk_device *ub)
}
/* device can only be started after all IOs are ready */
-static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
+static void ublk_mark_io_ready(struct ublk_device *ub)
__must_hold(&ub->mutex)
{
- ubq->nr_io_ready++;
- if (ublk_queue_ready(ubq)) {
- ub->nr_queues_ready++;
-
- if (capable(CAP_SYS_ADMIN))
- ub->nr_privileged_daemon++;
- }
+ if (!ub->unprivileged_daemons && !capable(CAP_SYS_ADMIN))
+ ub->unprivileged_daemons = true;
- if (ub->nr_queues_ready == ub->dev_info.nr_hw_queues) {
+ ub->nr_io_ready++;
+ if (ublk_dev_ready(ub)) {
/* now we are ready for handling ublk io request */
ublk_reset_io_flags(ub);
complete_all(&ub->completion);
@@ -2058,11 +2147,11 @@ ublk_fill_io_cmd(struct ublk_io *io, struct io_uring_cmd *cmd)
}
static inline int
-ublk_config_io_buf(const struct ublk_queue *ubq, struct ublk_io *io,
+ublk_config_io_buf(const struct ublk_device *ub, struct ublk_io *io,
struct io_uring_cmd *cmd, unsigned long buf_addr,
u16 *buf_idx)
{
- if (ublk_support_auto_buf_reg(ubq))
+ if (ublk_dev_support_auto_buf_reg(ub))
return ublk_handle_auto_buf_reg(io, cmd, buf_idx);
io->addr = buf_addr;
@@ -2101,18 +2190,18 @@ static void ublk_io_release(void *priv)
}
static int ublk_register_io_buf(struct io_uring_cmd *cmd,
- const struct ublk_queue *ubq,
+ struct ublk_device *ub,
+ u16 q_id, u16 tag,
struct ublk_io *io,
unsigned int index, unsigned int issue_flags)
{
- struct ublk_device *ub = cmd->file->private_data;
struct request *req;
int ret;
- if (!ublk_support_zero_copy(ubq))
+ if (!ublk_dev_support_zero_copy(ub))
return -EINVAL;
- req = __ublk_check_and_get_req(ub, ubq, io, 0);
+ req = __ublk_check_and_get_req(ub, q_id, tag, io, 0);
if (!req)
return -EINVAL;
@@ -2128,7 +2217,8 @@ static int ublk_register_io_buf(struct io_uring_cmd *cmd,
static int
ublk_daemon_register_io_buf(struct io_uring_cmd *cmd,
- const struct ublk_queue *ubq, struct ublk_io *io,
+ struct ublk_device *ub,
+ u16 q_id, u16 tag, struct ublk_io *io,
unsigned index, unsigned issue_flags)
{
unsigned new_registered_buffers;
@@ -2141,9 +2231,10 @@ ublk_daemon_register_io_buf(struct io_uring_cmd *cmd,
*/
new_registered_buffers = io->task_registered_buffers + 1;
if (unlikely(new_registered_buffers >= UBLK_REFCOUNT_INIT))
- return ublk_register_io_buf(cmd, ubq, io, index, issue_flags);
+ return ublk_register_io_buf(cmd, ub, q_id, tag, io, index,
+ issue_flags);
- if (!ublk_support_zero_copy(ubq) || !ublk_rq_has_data(req))
+ if (!ublk_dev_support_zero_copy(ub) || !ublk_rq_has_data(req))
return -EINVAL;
ret = io_buffer_register_bvec(cmd, req, ublk_io_release, index,
@@ -2165,14 +2256,14 @@ static int ublk_unregister_io_buf(struct io_uring_cmd *cmd,
return io_buffer_unregister_bvec(cmd, index, issue_flags);
}
-static int ublk_check_fetch_buf(const struct ublk_queue *ubq, __u64 buf_addr)
+static int ublk_check_fetch_buf(const struct ublk_device *ub, __u64 buf_addr)
{
- if (ublk_need_map_io(ubq)) {
+ if (ublk_dev_need_map_io(ub)) {
/*
* FETCH_RQ has to provide IO buffer if NEED GET
* DATA is not enabled
*/
- if (!buf_addr && !ublk_need_get_data(ubq))
+ if (!buf_addr && !ublk_dev_need_get_data(ub))
return -EINVAL;
} else if (buf_addr) {
/* User copy requires addr to be unset */
@@ -2181,10 +2272,9 @@ static int ublk_check_fetch_buf(const struct ublk_queue *ubq, __u64 buf_addr)
return 0;
}
-static int ublk_fetch(struct io_uring_cmd *cmd, struct ublk_queue *ubq,
+static int ublk_fetch(struct io_uring_cmd *cmd, struct ublk_device *ub,
struct ublk_io *io, __u64 buf_addr)
{
- struct ublk_device *ub = ubq->dev;
int ret = 0;
/*
@@ -2193,8 +2283,8 @@ static int ublk_fetch(struct io_uring_cmd *cmd, struct ublk_queue *ubq,
* FETCH, so it is fine even for IO_URING_F_NONBLOCK.
*/
mutex_lock(&ub->mutex);
- /* UBLK_IO_FETCH_REQ is only allowed before queue is setup */
- if (ublk_queue_ready(ubq)) {
+ /* UBLK_IO_FETCH_REQ is only allowed before dev is setup */
+ if (ublk_dev_ready(ub)) {
ret = -EBUSY;
goto out;
}
@@ -2208,28 +2298,28 @@ static int ublk_fetch(struct io_uring_cmd *cmd, struct ublk_queue *ubq,
WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV);
ublk_fill_io_cmd(io, cmd);
- ret = ublk_config_io_buf(ubq, io, cmd, buf_addr, NULL);
+ ret = ublk_config_io_buf(ub, io, cmd, buf_addr, NULL);
if (ret)
goto out;
WRITE_ONCE(io->task, get_task_struct(current));
- ublk_mark_io_ready(ub, ubq);
+ ublk_mark_io_ready(ub);
out:
mutex_unlock(&ub->mutex);
return ret;
}
-static int ublk_check_commit_and_fetch(const struct ublk_queue *ubq,
+static int ublk_check_commit_and_fetch(const struct ublk_device *ub,
struct ublk_io *io, __u64 buf_addr)
{
struct request *req = io->req;
- if (ublk_need_map_io(ubq)) {
+ if (ublk_dev_need_map_io(ub)) {
/*
* COMMIT_AND_FETCH_REQ has to provide IO buffer if
* NEED GET DATA is not enabled or it is Read IO.
*/
- if (!buf_addr && (!ublk_need_get_data(ubq) ||
+ if (!buf_addr && (!ublk_dev_need_get_data(ub) ||
req_op(req) == REQ_OP_READ))
return -EINVAL;
} else if (req_op(req) != REQ_OP_ZONE_APPEND && buf_addr) {
@@ -2243,10 +2333,10 @@ static int ublk_check_commit_and_fetch(const struct ublk_queue *ubq,
return 0;
}
-static bool ublk_need_complete_req(const struct ublk_queue *ubq,
+static bool ublk_need_complete_req(const struct ublk_device *ub,
struct ublk_io *io)
{
- if (ublk_need_req_ref(ubq))
+ if (ublk_dev_need_req_ref(ub))
return ublk_sub_req_ref(io);
return true;
}
@@ -2269,23 +2359,28 @@ static bool ublk_get_data(const struct ublk_queue *ubq, struct ublk_io *io,
return ublk_start_io(ubq, req, io);
}
-static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
- unsigned int issue_flags,
- const struct ublksrv_io_cmd *ub_cmd)
+static int ublk_ch_uring_cmd_local(struct io_uring_cmd *cmd,
+ unsigned int issue_flags)
{
+ /* May point to userspace-mapped memory */
+ const struct ublksrv_io_cmd *ub_src = io_uring_sqe_cmd(cmd->sqe);
u16 buf_idx = UBLK_INVALID_BUF_IDX;
struct ublk_device *ub = cmd->file->private_data;
struct ublk_queue *ubq;
struct ublk_io *io;
u32 cmd_op = cmd->cmd_op;
- unsigned tag = ub_cmd->tag;
+ u16 q_id = READ_ONCE(ub_src->q_id);
+ u16 tag = READ_ONCE(ub_src->tag);
+ s32 result = READ_ONCE(ub_src->result);
+ u64 addr = READ_ONCE(ub_src->addr); /* unioned with zone_append_lba */
struct request *req;
int ret;
bool compl;
+ WARN_ON_ONCE(issue_flags & IO_URING_F_UNLOCKED);
+
pr_devel("%s: received: cmd op %d queue %d tag %d result %d\n",
- __func__, cmd->cmd_op, ub_cmd->q_id, tag,
- ub_cmd->result);
+ __func__, cmd->cmd_op, q_id, tag, result);
ret = ublk_check_cmd_op(cmd_op);
if (ret)
@@ -2296,25 +2391,24 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
* so no need to validate the q_id, tag, or task
*/
if (_IOC_NR(cmd_op) == UBLK_IO_UNREGISTER_IO_BUF)
- return ublk_unregister_io_buf(cmd, ub, ub_cmd->addr,
- issue_flags);
+ return ublk_unregister_io_buf(cmd, ub, addr, issue_flags);
ret = -EINVAL;
- if (ub_cmd->q_id >= ub->dev_info.nr_hw_queues)
+ if (q_id >= ub->dev_info.nr_hw_queues)
goto out;
- ubq = ublk_get_queue(ub, ub_cmd->q_id);
+ ubq = ublk_get_queue(ub, q_id);
- if (tag >= ubq->q_depth)
+ if (tag >= ub->dev_info.queue_depth)
goto out;
io = &ubq->ios[tag];
/* UBLK_IO_FETCH_REQ can be handled on any task, which sets io->task */
if (unlikely(_IOC_NR(cmd_op) == UBLK_IO_FETCH_REQ)) {
- ret = ublk_check_fetch_buf(ubq, ub_cmd->addr);
+ ret = ublk_check_fetch_buf(ub, addr);
if (ret)
goto out;
- ret = ublk_fetch(cmd, ubq, io, ub_cmd->addr);
+ ret = ublk_fetch(cmd, ub, io, addr);
if (ret)
goto out;
@@ -2328,8 +2422,8 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
* so can be handled on any task
*/
if (_IOC_NR(cmd_op) == UBLK_IO_REGISTER_IO_BUF)
- return ublk_register_io_buf(cmd, ubq, io, ub_cmd->addr,
- issue_flags);
+ return ublk_register_io_buf(cmd, ub, q_id, tag, io,
+ addr, issue_flags);
goto out;
}
@@ -2350,24 +2444,24 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
switch (_IOC_NR(cmd_op)) {
case UBLK_IO_REGISTER_IO_BUF:
- return ublk_daemon_register_io_buf(cmd, ubq, io, ub_cmd->addr,
+ return ublk_daemon_register_io_buf(cmd, ub, q_id, tag, io, addr,
issue_flags);
case UBLK_IO_COMMIT_AND_FETCH_REQ:
- ret = ublk_check_commit_and_fetch(ubq, io, ub_cmd->addr);
+ ret = ublk_check_commit_and_fetch(ub, io, addr);
if (ret)
goto out;
- io->res = ub_cmd->result;
+ io->res = result;
req = ublk_fill_io_cmd(io, cmd);
- ret = ublk_config_io_buf(ubq, io, cmd, ub_cmd->addr, &buf_idx);
- compl = ublk_need_complete_req(ubq, io);
+ ret = ublk_config_io_buf(ub, io, cmd, addr, &buf_idx);
+ compl = ublk_need_complete_req(ub, io);
/* can't touch 'ublk_io' any more */
if (buf_idx != UBLK_INVALID_BUF_IDX)
io_buffer_unregister_bvec(cmd, buf_idx, issue_flags);
if (req_op(req) == REQ_OP_ZONE_APPEND)
- req->__sector = ub_cmd->zone_append_lba;
+ req->__sector = addr;
if (compl)
- __ublk_complete_rq(req);
+ __ublk_complete_rq(req, io, ublk_dev_need_map_io(ub));
if (ret)
goto out;
@@ -2379,7 +2473,7 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
* request
*/
req = ublk_fill_io_cmd(io, cmd);
- ret = ublk_config_io_buf(ubq, io, cmd, ub_cmd->addr, NULL);
+ ret = ublk_config_io_buf(ub, io, cmd, addr, NULL);
WARN_ON_ONCE(ret);
if (likely(ublk_get_data(ubq, io, req))) {
__ublk_prep_compl_io_cmd(io, req);
@@ -2399,16 +2493,15 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
}
static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
- const struct ublk_queue *ubq, struct ublk_io *io, size_t offset)
+ u16 q_id, u16 tag, struct ublk_io *io, size_t offset)
{
- unsigned tag = io - ubq->ios;
struct request *req;
/*
* can't use io->req in case of concurrent UBLK_IO_COMMIT_AND_FETCH_REQ,
* which would overwrite it with io->cmd
*/
- req = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], tag);
+ req = blk_mq_tag_to_rq(ub->tag_set.tags[q_id], tag);
if (!req)
return NULL;
@@ -2430,33 +2523,13 @@ fail_put:
return NULL;
}
-static inline int ublk_ch_uring_cmd_local(struct io_uring_cmd *cmd,
- unsigned int issue_flags)
-{
- /*
- * Not necessary for async retry, but let's keep it simple and always
- * copy the values to avoid any potential reuse.
- */
- const struct ublksrv_io_cmd *ub_src = io_uring_sqe_cmd(cmd->sqe);
- const struct ublksrv_io_cmd ub_cmd = {
- .q_id = READ_ONCE(ub_src->q_id),
- .tag = READ_ONCE(ub_src->tag),
- .result = READ_ONCE(ub_src->result),
- .addr = READ_ONCE(ub_src->addr)
- };
-
- WARN_ON_ONCE(issue_flags & IO_URING_F_UNLOCKED);
-
- return __ublk_ch_uring_cmd(cmd, issue_flags, &ub_cmd);
-}
-
static void ublk_ch_uring_cmd_cb(struct io_uring_cmd *cmd,
unsigned int issue_flags)
{
int ret = ublk_ch_uring_cmd_local(cmd, issue_flags);
if (ret != -EIOCBQUEUED)
- io_uring_cmd_done(cmd, ret, 0, issue_flags);
+ io_uring_cmd_done(cmd, ret, issue_flags);
}
static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
@@ -2519,17 +2592,14 @@ static struct request *ublk_check_and_get_req(struct kiocb *iocb,
return ERR_PTR(-EINVAL);
ubq = ublk_get_queue(ub, q_id);
- if (!ubq)
- return ERR_PTR(-EINVAL);
-
- if (!ublk_support_user_copy(ubq))
+ if (!ublk_dev_support_user_copy(ub))
return ERR_PTR(-EACCES);
- if (tag >= ubq->q_depth)
+ if (tag >= ub->dev_info.queue_depth)
return ERR_PTR(-EINVAL);
*io = &ubq->ios[tag];
- req = __ublk_check_and_get_req(ub, ubq, *io, buf_off);
+ req = __ublk_check_and_get_req(ub, q_id, tag, *io, buf_off);
if (!req)
return ERR_PTR(-EINVAL);
@@ -2592,7 +2662,7 @@ static const struct file_operations ublk_ch_fops = {
static void ublk_deinit_queue(struct ublk_device *ub, int q_id)
{
- int size = ublk_queue_cmd_buf_size(ub, q_id);
+ int size = ublk_queue_cmd_buf_size(ub);
struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
int i;
@@ -2619,7 +2689,7 @@ static int ublk_init_queue(struct ublk_device *ub, int q_id)
ubq->flags = ub->dev_info.flags;
ubq->q_id = q_id;
ubq->q_depth = ub->dev_info.queue_depth;
- size = ublk_queue_cmd_buf_size(ub, q_id);
+ size = ublk_queue_cmd_buf_size(ub);
ptr = (void *) __get_free_pages(gfp_flags, get_order(size));
if (!ptr)
@@ -2880,8 +2950,8 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub,
ublk_apply_params(ub);
- /* don't probe partitions if any one ubq daemon is un-trusted */
- if (ub->nr_privileged_daemon != ub->nr_queues_ready)
+ /* don't probe partitions if any daemon task is un-trusted */
+ if (ub->unprivileged_daemons)
set_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
ublk_get_device(ub);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index e649fa67bac1..f061420dfb10 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -829,9 +829,9 @@ out:
}
/* We provide getgeo only to please some old bootloader/partitioning tools */
-static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
+static int virtblk_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
- struct virtio_blk *vblk = bd->bd_disk->private_data;
+ struct virtio_blk *vblk = disk->private_data;
int ret = 0;
mutex_lock(&vblk->vdev_mutex);
@@ -853,7 +853,7 @@ static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
/* some standard values, similar to sd */
geo->heads = 1 << 6;
geo->sectors = 1 << 5;
- geo->cylinders = get_capacity(bd->bd_disk) >> 11;
+ geo->cylinders = get_capacity(disk) >> 11;
}
out:
mutex_unlock(&vblk->vdev_mutex);
@@ -1682,7 +1682,7 @@ static int __init virtio_blk_init(void)
{
int error;
- virtblk_wq = alloc_workqueue("virtio-blk", 0, 0);
+ virtblk_wq = alloc_workqueue("virtio-blk", WQ_PERCPU, 0);
if (!virtblk_wq)
return -ENOMEM;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 5babe575c288..04fc6b552c04 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -493,11 +493,11 @@ static void blkif_restart_queue_callback(void *arg)
schedule_work(&rinfo->work);
}
-static int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg)
+static int blkif_getgeo(struct gendisk *disk, struct hd_geometry *hg)
{
/* We don't have real geometry info, but let's at least return
values consistent with the size of the device */
- sector_t nsect = get_capacity(bd->bd_disk);
+ sector_t nsect = get_capacity(disk);
sector_t cylinders = nsect;
hg->heads = 0xff;
diff --git a/drivers/block/zloop.c b/drivers/block/zloop.c
index 553b1a713ab9..a423228e201b 100644
--- a/drivers/block/zloop.c
+++ b/drivers/block/zloop.c
@@ -700,6 +700,8 @@ static void zloop_free_disk(struct gendisk *disk)
struct zloop_device *zlo = disk->private_data;
unsigned int i;
+ blk_mq_free_tag_set(&zlo->tag_set);
+
for (i = 0; i < zlo->nr_zones; i++) {
struct zloop_zone *zone = &zlo->zones[i];
@@ -1080,7 +1082,6 @@ static int zloop_ctl_remove(struct zloop_options *opts)
del_gendisk(zlo->disk);
put_disk(zlo->disk);
- blk_mq_free_tag_set(&zlo->tag_set);
pr_info("Removed device %d\n", opts->id);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 8acad3cc6e6e..a43074657531 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -1085,7 +1085,7 @@ static int read_from_bdev_sync(struct zram *zram, struct page *page,
work.entry = entry;
INIT_WORK_ONSTACK(&work.work, zram_sync_read);
- queue_work(system_unbound_wq, &work.work);
+ queue_work(system_dfl_wq, &work.work);
flush_work(&work.work);
destroy_work_on_stack(&work.work);
@@ -1225,18 +1225,6 @@ static void comp_algorithm_set(struct zram *zram, u32 prio, const char *alg)
zram->comp_algs[prio] = alg;
}
-static ssize_t __comp_algorithm_show(struct zram *zram, u32 prio,
- char *buf, ssize_t at)
-{
- ssize_t sz;
-
- down_read(&zram->init_lock);
- sz = zcomp_available_show(zram->comp_algs[prio], buf, at);
- up_read(&zram->init_lock);
-
- return sz;
-}
-
static int __comp_algorithm_store(struct zram *zram, u32 prio, const char *buf)
{
char *compressor;
@@ -1387,8 +1375,12 @@ static ssize_t comp_algorithm_show(struct device *dev,
char *buf)
{
struct zram *zram = dev_to_zram(dev);
+ ssize_t sz;
- return __comp_algorithm_show(zram, ZRAM_PRIMARY_COMP, buf, 0);
+ down_read(&zram->init_lock);
+ sz = zcomp_available_show(zram->comp_algs[ZRAM_PRIMARY_COMP], buf, 0);
+ up_read(&zram->init_lock);
+ return sz;
}
static ssize_t comp_algorithm_store(struct device *dev,
@@ -1412,14 +1404,15 @@ static ssize_t recomp_algorithm_show(struct device *dev,
ssize_t sz = 0;
u32 prio;
+ down_read(&zram->init_lock);
for (prio = ZRAM_SECONDARY_COMP; prio < ZRAM_MAX_COMPS; prio++) {
if (!zram->comp_algs[prio])
continue;
sz += sysfs_emit_at(buf, sz, "#%d: ", prio);
- sz += __comp_algorithm_show(zram, prio, buf, sz);
+ sz += zcomp_available_show(zram->comp_algs[prio], buf, sz);
}
-
+ up_read(&zram->init_lock);
return sz;
}
@@ -1795,6 +1788,7 @@ static int write_same_filled_page(struct zram *zram, unsigned long fill,
u32 index)
{
zram_slot_lock(zram, index);
+ zram_free_page(zram, index);
zram_set_flag(zram, index, ZRAM_SAME);
zram_set_handle(zram, index, fill);
zram_slot_unlock(zram, index);
@@ -1832,6 +1826,7 @@ static int write_incompressible_page(struct zram *zram, struct page *page,
kunmap_local(src);
zram_slot_lock(zram, index);
+ zram_free_page(zram, index);
zram_set_flag(zram, index, ZRAM_HUGE);
zram_set_handle(zram, index, handle);
zram_set_obj_size(zram, index, PAGE_SIZE);
@@ -1855,11 +1850,6 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index)
unsigned long element;
bool same_filled;
- /* First, free memory allocated to this slot (if any) */
- zram_slot_lock(zram, index);
- zram_free_page(zram, index);
- zram_slot_unlock(zram, index);
-
mem = kmap_local_page(page);
same_filled = page_same_filled(mem, &element);
kunmap_local(mem);
@@ -1901,6 +1891,7 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index)
zcomp_stream_put(zstrm);
zram_slot_lock(zram, index);
+ zram_free_page(zram, index);
zram_set_handle(zram, index, handle);
zram_set_obj_size(zram, index, comp_len);
zram_slot_unlock(zram, index);
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 4ab32abf0f48..7df69ccb6600 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -312,7 +312,9 @@ config BT_HCIBCM4377
config BT_HCIBPA10X
tristate "HCI BPA10x USB driver"
+ depends on BT_HCIUART
depends on USB
+ select BT_HCIUART_H4
help
Bluetooth HCI BPA10x USB driver.
This driver provides support for the Digianswer BPA 100/105 Bluetooth
@@ -437,8 +439,10 @@ config BT_MTKSDIO
config BT_MTKUART
tristate "MediaTek HCI UART driver"
+ depends on BT_HCIUART
depends on SERIAL_DEV_BUS
depends on USB || !BT_HCIBTUSB_MTK
+ select BT_HCIUART_H4
select BT_MTK
help
MediaTek Bluetooth HCI UART driver.
@@ -483,7 +487,9 @@ config BT_VIRTIO
config BT_NXPUART
tristate "NXP protocol support"
+ depends on BT_HCIUART
depends on SERIAL_DEV_BUS
+ select BT_HCIUART_H4
select CRC32
select CRC8
help
diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c
index 8b43dfc755de..b7ba667a3d09 100644
--- a/drivers/bluetooth/bpa10x.c
+++ b/drivers/bluetooth/bpa10x.c
@@ -20,7 +20,7 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
-#include "h4_recv.h"
+#include "hci_uart.h"
#define VERSION "0.11"
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index be69d21c9aa7..9d29ab811f80 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -484,6 +484,7 @@ int btintel_version_info_tlv(struct hci_dev *hdev,
case 0x1d: /* BlazarU (BzrU) */
case 0x1e: /* BlazarI (Bzr) */
case 0x1f: /* Scorpious Peak */
+ case 0x22: /* BlazarIW (BzrIW) */
break;
default:
bt_dev_err(hdev, "Unsupported Intel hardware variant (0x%x)",
@@ -3253,6 +3254,7 @@ void btintel_set_msft_opcode(struct hci_dev *hdev, u8 hw_variant)
case 0x1d:
case 0x1e:
case 0x1f:
+ case 0x22:
hci_set_msft_opcode(hdev, 0xFC1E);
break;
default:
@@ -3593,6 +3595,7 @@ static int btintel_setup_combined(struct hci_dev *hdev)
case 0x1d:
case 0x1e:
case 0x1f:
+ case 0x22:
/* Display version information of TLV type */
btintel_version_info_tlv(hdev, &ver_tlv);
diff --git a/drivers/bluetooth/btintel_pcie.c b/drivers/bluetooth/btintel_pcie.c
index 6e7bbbd35279..6d3963bd56a9 100644
--- a/drivers/bluetooth/btintel_pcie.c
+++ b/drivers/bluetooth/btintel_pcie.c
@@ -15,6 +15,7 @@
#include <linux/interrupt.h>
#include <linux/unaligned.h>
+#include <linux/devcoredump.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -35,8 +36,13 @@
/* Intel Bluetooth PCIe device id table */
static const struct pci_device_id btintel_pcie_table[] = {
+ /* BlazarI, Wildcat Lake */
{ BTINTEL_PCI_DEVICE(0x4D76, PCI_ANY_ID) },
+ /* BlazarI, Lunar Lake */
{ BTINTEL_PCI_DEVICE(0xA876, PCI_ANY_ID) },
+ /* Scorpious, Panther Lake-H484 */
+ { BTINTEL_PCI_DEVICE(0xE376, PCI_ANY_ID) },
+ /* Scorpious, Panther Lake-H404 */
{ BTINTEL_PCI_DEVICE(0xE476, PCI_ANY_ID) },
{ 0 }
};
@@ -554,25 +560,6 @@ static void btintel_pcie_mac_init(struct btintel_pcie_data *data)
btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
}
-static int btintel_pcie_add_dmp_data(struct hci_dev *hdev, const void *data, int size)
-{
- struct sk_buff *skb;
- int err;
-
- skb = alloc_skb(size, GFP_ATOMIC);
- if (!skb)
- return -ENOMEM;
-
- skb_put_data(skb, data, size);
- err = hci_devcd_append(hdev, skb);
- if (err) {
- bt_dev_err(hdev, "Failed to append data in the coredump");
- return err;
- }
-
- return 0;
-}
-
static int btintel_pcie_get_mac_access(struct btintel_pcie_data *data)
{
u32 reg;
@@ -617,30 +604,35 @@ static void btintel_pcie_release_mac_access(struct btintel_pcie_data *data)
btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
}
-static void btintel_pcie_copy_tlv(struct sk_buff *skb, enum btintel_pcie_tlv_type type,
- void *data, int size)
+static void *btintel_pcie_copy_tlv(void *dest, enum btintel_pcie_tlv_type type,
+ void *data, size_t size)
{
struct intel_tlv *tlv;
- tlv = skb_put(skb, sizeof(*tlv) + size);
+ tlv = dest;
tlv->type = type;
tlv->len = size;
memcpy(tlv->val, data, tlv->len);
+ return dest + sizeof(*tlv) + size;
}
static int btintel_pcie_read_dram_buffers(struct btintel_pcie_data *data)
{
- u32 offset, prev_size, wr_ptr_status, dump_size, i;
+ u32 offset, prev_size, wr_ptr_status, dump_size, data_len;
struct btintel_pcie_dbgc *dbgc = &data->dbgc;
- u8 buf_idx, dump_time_len, fw_build;
struct hci_dev *hdev = data->hdev;
+ u8 *pdata, *p, buf_idx;
struct intel_tlv *tlv;
struct timespec64 now;
- struct sk_buff *skb;
struct tm tm_now;
- char buf[256];
- u16 hdr_len;
- int ret;
+ char fw_build[128];
+ char ts[128];
+ char vendor[64];
+ char driver[64];
+
+ if (!IS_ENABLED(CONFIG_DEV_COREDUMP))
+ return -EOPNOTSUPP;
+
wr_ptr_status = btintel_pcie_rd_dev_mem(data, BTINTEL_PCIE_DBGC_CUR_DBGBUFF_STATUS);
offset = wr_ptr_status & BTINTEL_PCIE_DBG_OFFSET_BIT_MASK;
@@ -657,88 +649,84 @@ static int btintel_pcie_read_dram_buffers(struct btintel_pcie_data *data)
else
return -EINVAL;
+ snprintf(vendor, sizeof(vendor), "Vendor: Intel\n");
+ snprintf(driver, sizeof(driver), "Driver: %s\n",
+ data->dmp_hdr.driver_name);
+
ktime_get_real_ts64(&now);
time64_to_tm(now.tv_sec, 0, &tm_now);
- dump_time_len = snprintf(buf, sizeof(buf), "Dump Time: %02d-%02d-%04ld %02d:%02d:%02d",
+ snprintf(ts, sizeof(ts), "Dump Time: %02d-%02d-%04ld %02d:%02d:%02d",
tm_now.tm_mday, tm_now.tm_mon + 1, tm_now.tm_year + 1900,
tm_now.tm_hour, tm_now.tm_min, tm_now.tm_sec);
- fw_build = snprintf(buf + dump_time_len, sizeof(buf) - dump_time_len,
+ snprintf(fw_build, sizeof(fw_build),
"Firmware Timestamp: Year %u WW %02u buildtype %u build %u",
2000 + (data->dmp_hdr.fw_timestamp >> 8),
data->dmp_hdr.fw_timestamp & 0xff, data->dmp_hdr.fw_build_type,
data->dmp_hdr.fw_build_num);
- hdr_len = sizeof(*tlv) + sizeof(data->dmp_hdr.cnvi_bt) +
- sizeof(*tlv) + sizeof(data->dmp_hdr.write_ptr) +
- sizeof(*tlv) + sizeof(data->dmp_hdr.wrap_ctr) +
- sizeof(*tlv) + sizeof(data->dmp_hdr.trigger_reason) +
- sizeof(*tlv) + sizeof(data->dmp_hdr.fw_git_sha1) +
- sizeof(*tlv) + sizeof(data->dmp_hdr.cnvr_top) +
- sizeof(*tlv) + sizeof(data->dmp_hdr.cnvi_top) +
- sizeof(*tlv) + dump_time_len +
- sizeof(*tlv) + fw_build;
+ data_len = sizeof(*tlv) + sizeof(data->dmp_hdr.cnvi_bt) +
+ sizeof(*tlv) + sizeof(data->dmp_hdr.write_ptr) +
+ sizeof(*tlv) + sizeof(data->dmp_hdr.wrap_ctr) +
+ sizeof(*tlv) + sizeof(data->dmp_hdr.trigger_reason) +
+ sizeof(*tlv) + sizeof(data->dmp_hdr.fw_git_sha1) +
+ sizeof(*tlv) + sizeof(data->dmp_hdr.cnvr_top) +
+ sizeof(*tlv) + sizeof(data->dmp_hdr.cnvi_top) +
+ sizeof(*tlv) + strlen(ts) +
+ sizeof(*tlv) + strlen(fw_build) +
+ sizeof(*tlv) + strlen(vendor) +
+ sizeof(*tlv) + strlen(driver);
- dump_size = hdr_len + sizeof(hdr_len);
+ /*
+ * sizeof(u32) - signature
+ * sizeof(data_len) - to store tlv data size
+ * data_len - TLV data
+ */
+ dump_size = sizeof(u32) + sizeof(data_len) + data_len;
- skb = alloc_skb(dump_size, GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
/* Add debug buffers data length to dump size */
dump_size += BTINTEL_PCIE_DBGC_BUFFER_SIZE * dbgc->count;
- ret = hci_devcd_init(hdev, dump_size);
- if (ret) {
- bt_dev_err(hdev, "Failed to init devcoredump, err %d", ret);
- kfree_skb(skb);
- return ret;
- }
+ pdata = vmalloc(dump_size);
+ if (!pdata)
+ return -ENOMEM;
+ p = pdata;
- skb_put_data(skb, &hdr_len, sizeof(hdr_len));
+ *(u32 *)p = BTINTEL_PCIE_MAGIC_NUM;
+ p += sizeof(u32);
- btintel_pcie_copy_tlv(skb, BTINTEL_CNVI_BT, &data->dmp_hdr.cnvi_bt,
- sizeof(data->dmp_hdr.cnvi_bt));
+ *(u32 *)p = data_len;
+ p += sizeof(u32);
- btintel_pcie_copy_tlv(skb, BTINTEL_WRITE_PTR, &data->dmp_hdr.write_ptr,
- sizeof(data->dmp_hdr.write_ptr));
+
+ p = btintel_pcie_copy_tlv(p, BTINTEL_VENDOR, vendor, strlen(vendor));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_DRIVER, driver, strlen(driver));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_DUMP_TIME, ts, strlen(ts));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_FW_BUILD, fw_build,
+ strlen(fw_build));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_CNVI_BT, &data->dmp_hdr.cnvi_bt,
+ sizeof(data->dmp_hdr.cnvi_bt));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_WRITE_PTR, &data->dmp_hdr.write_ptr,
+ sizeof(data->dmp_hdr.write_ptr));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_WRAP_CTR, &data->dmp_hdr.wrap_ctr,
+ sizeof(data->dmp_hdr.wrap_ctr));
data->dmp_hdr.wrap_ctr = btintel_pcie_rd_dev_mem(data,
BTINTEL_PCIE_DBGC_DBGBUFF_WRAP_ARND);
- btintel_pcie_copy_tlv(skb, BTINTEL_WRAP_CTR, &data->dmp_hdr.wrap_ctr,
- sizeof(data->dmp_hdr.wrap_ctr));
-
- btintel_pcie_copy_tlv(skb, BTINTEL_TRIGGER_REASON, &data->dmp_hdr.trigger_reason,
- sizeof(data->dmp_hdr.trigger_reason));
-
- btintel_pcie_copy_tlv(skb, BTINTEL_FW_SHA, &data->dmp_hdr.fw_git_sha1,
- sizeof(data->dmp_hdr.fw_git_sha1));
-
- btintel_pcie_copy_tlv(skb, BTINTEL_CNVR_TOP, &data->dmp_hdr.cnvr_top,
- sizeof(data->dmp_hdr.cnvr_top));
-
- btintel_pcie_copy_tlv(skb, BTINTEL_CNVI_TOP, &data->dmp_hdr.cnvi_top,
- sizeof(data->dmp_hdr.cnvi_top));
-
- btintel_pcie_copy_tlv(skb, BTINTEL_DUMP_TIME, buf, dump_time_len);
-
- btintel_pcie_copy_tlv(skb, BTINTEL_FW_BUILD, buf + dump_time_len, fw_build);
-
- ret = hci_devcd_append(hdev, skb);
- if (ret)
- goto exit_err;
-
- for (i = 0; i < dbgc->count; i++) {
- ret = btintel_pcie_add_dmp_data(hdev, dbgc->bufs[i].data,
- BTINTEL_PCIE_DBGC_BUFFER_SIZE);
- if (ret)
- break;
- }
-
-exit_err:
- hci_devcd_complete(hdev);
- return ret;
+ p = btintel_pcie_copy_tlv(p, BTINTEL_TRIGGER_REASON, &data->dmp_hdr.trigger_reason,
+ sizeof(data->dmp_hdr.trigger_reason));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_FW_SHA, &data->dmp_hdr.fw_git_sha1,
+ sizeof(data->dmp_hdr.fw_git_sha1));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_CNVR_TOP, &data->dmp_hdr.cnvr_top,
+ sizeof(data->dmp_hdr.cnvr_top));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_CNVI_TOP, &data->dmp_hdr.cnvi_top,
+ sizeof(data->dmp_hdr.cnvi_top));
+
+ memcpy(p, dbgc->bufs[0].data, dbgc->count * BTINTEL_PCIE_DBGC_BUFFER_SIZE);
+ dev_coredumpv(&hdev->dev, pdata, dump_size, GFP_KERNEL);
+ return 0;
}
static void btintel_pcie_dump_traces(struct hci_dev *hdev)
@@ -760,51 +748,6 @@ static void btintel_pcie_dump_traces(struct hci_dev *hdev)
bt_dev_err(hdev, "Failed to dump traces: (%d)", ret);
}
-static void btintel_pcie_dump_hdr(struct hci_dev *hdev, struct sk_buff *skb)
-{
- struct btintel_pcie_data *data = hci_get_drvdata(hdev);
- u16 len = skb->len;
- u16 *hdrlen_ptr;
- char buf[80];
-
- hdrlen_ptr = skb_put_zero(skb, sizeof(len));
-
- snprintf(buf, sizeof(buf), "Controller Name: 0x%X\n",
- INTEL_HW_VARIANT(data->dmp_hdr.cnvi_bt));
- skb_put_data(skb, buf, strlen(buf));
-
- snprintf(buf, sizeof(buf), "Firmware Build Number: %u\n",
- data->dmp_hdr.fw_build_num);
- skb_put_data(skb, buf, strlen(buf));
-
- snprintf(buf, sizeof(buf), "Driver: %s\n", data->dmp_hdr.driver_name);
- skb_put_data(skb, buf, strlen(buf));
-
- snprintf(buf, sizeof(buf), "Vendor: Intel\n");
- skb_put_data(skb, buf, strlen(buf));
-
- *hdrlen_ptr = skb->len - len;
-}
-
-static void btintel_pcie_dump_notify(struct hci_dev *hdev, int state)
-{
- struct btintel_pcie_data *data = hci_get_drvdata(hdev);
-
- switch (state) {
- case HCI_DEVCOREDUMP_IDLE:
- data->dmp_hdr.state = HCI_DEVCOREDUMP_IDLE;
- break;
- case HCI_DEVCOREDUMP_ACTIVE:
- data->dmp_hdr.state = HCI_DEVCOREDUMP_ACTIVE;
- break;
- case HCI_DEVCOREDUMP_TIMEOUT:
- case HCI_DEVCOREDUMP_ABORT:
- case HCI_DEVCOREDUMP_DONE:
- data->dmp_hdr.state = HCI_DEVCOREDUMP_IDLE;
- break;
- }
-}
-
/* This function enables BT function by setting BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT bit in
* BTINTEL_PCIE_CSR_FUNC_CTRL_REG register and wait for MSI-X with
* BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0.
@@ -1378,6 +1321,11 @@ static void btintel_pcie_rx_work(struct work_struct *work)
struct btintel_pcie_data, rx_work);
struct sk_buff *skb;
+ if (test_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags)) {
+ btintel_pcie_dump_traces(data->hdev);
+ clear_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags);
+ }
+
if (test_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags)) {
/* Unlike usb products, controller will not send hardware
* exception event on exception. Instead controller writes the
@@ -1390,11 +1338,6 @@ static void btintel_pcie_rx_work(struct work_struct *work)
clear_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags);
}
- if (test_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags)) {
- btintel_pcie_dump_traces(data->hdev);
- clear_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags);
- }
-
/* Process the sk_buf in queue and send to the HCI layer */
while ((skb = skb_dequeue(&data->rx_skb_q))) {
btintel_pcie_recv_frame(data, skb);
@@ -2149,6 +2092,7 @@ static int btintel_pcie_setup_internal(struct hci_dev *hdev)
switch (INTEL_HW_VARIANT(ver_tlv.cnvi_bt)) {
case 0x1e: /* BzrI */
case 0x1f: /* ScP */
+ case 0x22: /* BzrIW */
/* Display version information of TLV type */
btintel_version_info_tlv(hdev, &ver_tlv);
@@ -2184,13 +2128,6 @@ static int btintel_pcie_setup_internal(struct hci_dev *hdev)
if (ver_tlv.img_type == 0x02 || ver_tlv.img_type == 0x03)
data->dmp_hdr.fw_git_sha1 = ver_tlv.git_sha1;
- err = hci_devcd_register(hdev, btintel_pcie_dump_traces, btintel_pcie_dump_hdr,
- btintel_pcie_dump_notify);
- if (err) {
- bt_dev_err(hdev, "Failed to register coredump (%d)", err);
- goto exit_error;
- }
-
btintel_print_fseq_info(hdev);
exit_error:
kfree_skb(skb);
@@ -2236,6 +2173,7 @@ btintel_pcie_get_recovery(struct pci_dev *pdev, struct device *dev)
{
struct btintel_pcie_dev_recovery *tmp, *data = NULL;
const char *name = pci_name(pdev);
+ const size_t name_len = strlen(name) + 1;
struct hci_dev *hdev = to_hci_dev(dev);
spin_lock(&btintel_pcie_recovery_lock);
@@ -2252,11 +2190,11 @@ btintel_pcie_get_recovery(struct pci_dev *pdev, struct device *dev)
return data;
}
- data = kzalloc(struct_size(data, name, strlen(name) + 1), GFP_ATOMIC);
+ data = kzalloc(struct_size(data, name, name_len), GFP_ATOMIC);
if (!data)
return NULL;
- strscpy_pad(data->name, name, strlen(name) + 1);
+ strscpy(data->name, name, name_len);
spin_lock(&btintel_pcie_recovery_lock);
list_add_tail(&data->list, &btintel_pcie_recovery_list);
spin_unlock(&btintel_pcie_recovery_lock);
@@ -2319,7 +2257,6 @@ static void btintel_pcie_removal_work(struct work_struct *wk)
btintel_pcie_synchronize_irqs(data);
flush_work(&data->rx_work);
- flush_work(&data->hdev->dump.dump_rx);
bt_dev_dbg(data->hdev, "Release bluetooth interface");
btintel_pcie_release_hdev(data);
@@ -2410,6 +2347,13 @@ static void btintel_pcie_hw_error(struct hci_dev *hdev, u8 code)
btintel_pcie_reset(hdev);
}
+static bool btintel_pcie_wakeup(struct hci_dev *hdev)
+{
+ struct btintel_pcie_data *data = hci_get_drvdata(hdev);
+
+ return device_may_wakeup(&data->pdev->dev);
+}
+
static int btintel_pcie_setup_hdev(struct btintel_pcie_data *data)
{
int err;
@@ -2435,6 +2379,7 @@ static int btintel_pcie_setup_hdev(struct btintel_pcie_data *data)
hdev->set_diag = btintel_set_diag;
hdev->set_bdaddr = btintel_set_bdaddr;
hdev->reset = btintel_pcie_reset;
+ hdev->wakeup = btintel_pcie_wakeup;
err = hci_register_dev(hdev);
if (err < 0) {
@@ -2573,11 +2518,100 @@ static void btintel_pcie_coredump(struct device *dev)
}
#endif
+static int btintel_pcie_suspend_late(struct device *dev, pm_message_t mesg)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct btintel_pcie_data *data;
+ ktime_t start;
+ u32 dxstate;
+ int err;
+
+ data = pci_get_drvdata(pdev);
+
+ dxstate = (mesg.event == PM_EVENT_SUSPEND ?
+ BTINTEL_PCIE_STATE_D3_HOT : BTINTEL_PCIE_STATE_D3_COLD);
+
+ data->gp0_received = false;
+
+ start = ktime_get();
+
+ /* Refer: 6.4.11.7 -> Platform power management */
+ btintel_pcie_wr_sleep_cntrl(data, dxstate);
+ err = wait_event_timeout(data->gp0_wait_q, data->gp0_received,
+ msecs_to_jiffies(BTINTEL_DEFAULT_INTR_TIMEOUT_MS));
+ if (err == 0) {
+ bt_dev_err(data->hdev,
+ "Timeout (%u ms) on alive interrupt for D3 entry",
+ BTINTEL_DEFAULT_INTR_TIMEOUT_MS);
+ return -EBUSY;
+ }
+
+ bt_dev_dbg(data->hdev,
+ "device entered into d3 state from d0 in %lld us",
+ ktime_to_us(ktime_get() - start));
+
+ return 0;
+}
+
+static int btintel_pcie_suspend(struct device *dev)
+{
+ return btintel_pcie_suspend_late(dev, PMSG_SUSPEND);
+}
+
+static int btintel_pcie_hibernate(struct device *dev)
+{
+ return btintel_pcie_suspend_late(dev, PMSG_HIBERNATE);
+}
+
+static int btintel_pcie_freeze(struct device *dev)
+{
+ return btintel_pcie_suspend_late(dev, PMSG_FREEZE);
+}
+
+static int btintel_pcie_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct btintel_pcie_data *data;
+ ktime_t start;
+ int err;
+
+ data = pci_get_drvdata(pdev);
+ data->gp0_received = false;
+
+ start = ktime_get();
+
+ /* Refer: 6.4.11.7 -> Platform power management */
+ btintel_pcie_wr_sleep_cntrl(data, BTINTEL_PCIE_STATE_D0);
+ err = wait_event_timeout(data->gp0_wait_q, data->gp0_received,
+ msecs_to_jiffies(BTINTEL_DEFAULT_INTR_TIMEOUT_MS));
+ if (err == 0) {
+ bt_dev_err(data->hdev,
+ "Timeout (%u ms) on alive interrupt for D0 entry",
+ BTINTEL_DEFAULT_INTR_TIMEOUT_MS);
+ return -EBUSY;
+ }
+
+ bt_dev_dbg(data->hdev,
+ "device entered into d0 state from d3 in %lld us",
+ ktime_to_us(ktime_get() - start));
+ return 0;
+}
+
+static const struct dev_pm_ops btintel_pcie_pm_ops = {
+ .suspend = btintel_pcie_suspend,
+ .resume = btintel_pcie_resume,
+ .freeze = btintel_pcie_freeze,
+ .thaw = btintel_pcie_resume,
+ .poweroff = btintel_pcie_hibernate,
+ .restore = btintel_pcie_resume,
+};
+
static struct pci_driver btintel_pcie_driver = {
.name = KBUILD_MODNAME,
.id_table = btintel_pcie_table,
.probe = btintel_pcie_probe,
.remove = btintel_pcie_remove,
+ .driver.pm = pm_sleep_ptr(&btintel_pcie_pm_ops),
#ifdef CONFIG_DEV_COREDUMP
.driver.coredump = btintel_pcie_coredump
#endif
diff --git a/drivers/bluetooth/btintel_pcie.h b/drivers/bluetooth/btintel_pcie.h
index 0fa876c5b954..04b21f968ad3 100644
--- a/drivers/bluetooth/btintel_pcie.h
+++ b/drivers/bluetooth/btintel_pcie.h
@@ -132,6 +132,8 @@ enum btintel_pcie_tlv_type {
BTINTEL_CNVI_TOP,
BTINTEL_DUMP_TIME,
BTINTEL_FW_BUILD,
+ BTINTEL_VENDOR,
+ BTINTEL_DRIVER
};
/* causes for the MBOX interrupts */
diff --git a/drivers/bluetooth/btmtk.c b/drivers/bluetooth/btmtk.c
index 4390fd571dbd..a8c520dc09e1 100644
--- a/drivers/bluetooth/btmtk.c
+++ b/drivers/bluetooth/btmtk.c
@@ -642,12 +642,7 @@ static int btmtk_usb_hci_wmt_sync(struct hci_dev *hdev,
* WMT command.
*/
err = wait_on_bit_timeout(&data->flags, BTMTK_TX_WAIT_VND_EVT,
- TASK_INTERRUPTIBLE, HCI_INIT_TIMEOUT);
- if (err == -EINTR) {
- bt_dev_err(hdev, "Execution of wmt command interrupted");
- clear_bit(BTMTK_TX_WAIT_VND_EVT, &data->flags);
- goto err_free_wc;
- }
+ TASK_UNINTERRUPTIBLE, HCI_INIT_TIMEOUT);
if (err) {
bt_dev_err(hdev, "Execution of wmt command timed out");
diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c
index 4fc673640bfc..50abefba6d04 100644
--- a/drivers/bluetooth/btmtksdio.c
+++ b/drivers/bluetooth/btmtksdio.c
@@ -29,7 +29,7 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
-#include "h4_recv.h"
+#include "hci_uart.h"
#include "btmtk.h"
#define VERSION "0.1"
diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c
index 76995cfcd534..d9b90ea2ad38 100644
--- a/drivers/bluetooth/btmtkuart.c
+++ b/drivers/bluetooth/btmtkuart.c
@@ -27,7 +27,7 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
-#include "h4_recv.h"
+#include "hci_uart.h"
#include "btmtk.h"
#define VERSION "0.2"
diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c
index 73a4a325c867..d5153fed0518 100644
--- a/drivers/bluetooth/btnxpuart.c
+++ b/drivers/bluetooth/btnxpuart.c
@@ -24,7 +24,7 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
-#include "h4_recv.h"
+#include "hci_uart.h"
#define MANUFACTURER_NXP 37
@@ -543,10 +543,10 @@ static int ps_setup(struct hci_dev *hdev)
}
if (psdata->wakeup_source) {
- ret = devm_request_irq(&serdev->dev, psdata->irq_handler,
- ps_host_wakeup_irq_handler,
- IRQF_ONESHOT | IRQF_TRIGGER_FALLING,
- dev_name(&serdev->dev), nxpdev);
+ ret = devm_request_threaded_irq(&serdev->dev, psdata->irq_handler,
+ NULL, ps_host_wakeup_irq_handler,
+ IRQF_ONESHOT,
+ dev_name(&serdev->dev), nxpdev);
if (ret)
bt_dev_info(hdev, "error setting wakeup IRQ handler, ignoring\n");
disable_irq(psdata->irq_handler);
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 8085fabadde8..5e9ebf0c5312 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -66,6 +66,7 @@ static struct usb_driver btusb_driver;
#define BTUSB_INTEL_BROKEN_INITIAL_NCMD BIT(25)
#define BTUSB_INTEL_NO_WBS_SUPPORT BIT(26)
#define BTUSB_ACTIONS_SEMI BIT(27)
+#define BTUSB_BARROT BIT(28)
static const struct usb_device_id btusb_table[] = {
/* Generic Bluetooth USB device */
@@ -522,6 +523,8 @@ static const struct usb_device_id quirks_table[] = {
/* Realtek 8851BU Bluetooth devices */
{ USB_DEVICE(0x3625, 0x010b), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x2001, 0x332a), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
/* Realtek 8852AE Bluetooth devices */
{ USB_DEVICE(0x0bda, 0x2852), .driver_info = BTUSB_REALTEK |
@@ -698,6 +701,8 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3615), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3633), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x35f5, 0x7922), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
@@ -732,6 +737,8 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3613), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3627), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3628), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3630), .driver_info = BTUSB_MEDIATEK |
@@ -810,6 +817,10 @@ static const struct usb_device_id quirks_table[] = {
{ USB_DEVICE(0x0cb5, 0xc547), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ /* Barrot Technology Bluetooth devices */
+ { USB_DEVICE(0x33fa, 0x0010), .driver_info = BTUSB_BARROT },
+ { USB_DEVICE(0x33fa, 0x0012), .driver_info = BTUSB_BARROT },
+
/* Actions Semiconductor ATS2851 based devices */
{ USB_DEVICE(0x10d7, 0xb012), .driver_info = BTUSB_ACTIONS_SEMI },
@@ -1192,6 +1203,18 @@ static int btusb_recv_intr(struct btusb_data *data, void *buffer, int count)
}
if (!hci_skb_expect(skb)) {
+ /* Each chunk should correspond to at least 1 or more
+ * events so if there are still bytes left that doesn't
+ * constitute a new event this is likely a bug in the
+ * controller.
+ */
+ if (count && count < HCI_EVENT_HDR_SIZE) {
+ bt_dev_warn(data->hdev,
+ "Unexpected continuation: %d bytes",
+ count);
+ count = 0;
+ }
+
/* Complete frame */
btusb_recv_event(data, skb);
skb = NULL;
diff --git a/drivers/bluetooth/h4_recv.h b/drivers/bluetooth/h4_recv.h
deleted file mode 100644
index 28cf2d8c2d48..000000000000
--- a/drivers/bluetooth/h4_recv.h
+++ /dev/null
@@ -1,153 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- *
- * Generic Bluetooth HCI UART driver
- *
- * Copyright (C) 2015-2018 Intel Corporation
- */
-
-#include <linux/unaligned.h>
-
-struct h4_recv_pkt {
- u8 type; /* Packet type */
- u8 hlen; /* Header length */
- u8 loff; /* Data length offset in header */
- u8 lsize; /* Data length field size */
- u16 maxlen; /* Max overall packet length */
- int (*recv)(struct hci_dev *hdev, struct sk_buff *skb);
-};
-
-#define H4_RECV_ACL \
- .type = HCI_ACLDATA_PKT, \
- .hlen = HCI_ACL_HDR_SIZE, \
- .loff = 2, \
- .lsize = 2, \
- .maxlen = HCI_MAX_FRAME_SIZE \
-
-#define H4_RECV_SCO \
- .type = HCI_SCODATA_PKT, \
- .hlen = HCI_SCO_HDR_SIZE, \
- .loff = 2, \
- .lsize = 1, \
- .maxlen = HCI_MAX_SCO_SIZE
-
-#define H4_RECV_EVENT \
- .type = HCI_EVENT_PKT, \
- .hlen = HCI_EVENT_HDR_SIZE, \
- .loff = 1, \
- .lsize = 1, \
- .maxlen = HCI_MAX_EVENT_SIZE
-
-#define H4_RECV_ISO \
- .type = HCI_ISODATA_PKT, \
- .hlen = HCI_ISO_HDR_SIZE, \
- .loff = 2, \
- .lsize = 2, \
- .maxlen = HCI_MAX_FRAME_SIZE
-
-static inline struct sk_buff *h4_recv_buf(struct hci_dev *hdev,
- struct sk_buff *skb,
- const unsigned char *buffer,
- int count,
- const struct h4_recv_pkt *pkts,
- int pkts_count)
-{
- /* Check for error from previous call */
- if (IS_ERR(skb))
- skb = NULL;
-
- while (count) {
- int i, len;
-
- if (!skb) {
- for (i = 0; i < pkts_count; i++) {
- if (buffer[0] != (&pkts[i])->type)
- continue;
-
- skb = bt_skb_alloc((&pkts[i])->maxlen,
- GFP_ATOMIC);
- if (!skb)
- return ERR_PTR(-ENOMEM);
-
- hci_skb_pkt_type(skb) = (&pkts[i])->type;
- hci_skb_expect(skb) = (&pkts[i])->hlen;
- break;
- }
-
- /* Check for invalid packet type */
- if (!skb)
- return ERR_PTR(-EILSEQ);
-
- count -= 1;
- buffer += 1;
- }
-
- len = min_t(uint, hci_skb_expect(skb) - skb->len, count);
- skb_put_data(skb, buffer, len);
-
- count -= len;
- buffer += len;
-
- /* Check for partial packet */
- if (skb->len < hci_skb_expect(skb))
- continue;
-
- for (i = 0; i < pkts_count; i++) {
- if (hci_skb_pkt_type(skb) == (&pkts[i])->type)
- break;
- }
-
- if (i >= pkts_count) {
- kfree_skb(skb);
- return ERR_PTR(-EILSEQ);
- }
-
- if (skb->len == (&pkts[i])->hlen) {
- u16 dlen;
-
- switch ((&pkts[i])->lsize) {
- case 0:
- /* No variable data length */
- dlen = 0;
- break;
- case 1:
- /* Single octet variable length */
- dlen = skb->data[(&pkts[i])->loff];
- hci_skb_expect(skb) += dlen;
-
- if (skb_tailroom(skb) < dlen) {
- kfree_skb(skb);
- return ERR_PTR(-EMSGSIZE);
- }
- break;
- case 2:
- /* Double octet variable length */
- dlen = get_unaligned_le16(skb->data +
- (&pkts[i])->loff);
- hci_skb_expect(skb) += dlen;
-
- if (skb_tailroom(skb) < dlen) {
- kfree_skb(skb);
- return ERR_PTR(-EMSGSIZE);
- }
- break;
- default:
- /* Unsupported variable length */
- kfree_skb(skb);
- return ERR_PTR(-EILSEQ);
- }
-
- if (!dlen) {
- /* No more data, complete frame */
- (&pkts[i])->recv(hdev, skb);
- skb = NULL;
- }
- } else {
- /* Complete frame */
- (&pkts[i])->recv(hdev, skb);
- skb = NULL;
- }
- }
-
- return skb;
-}
diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
index 664d82d1e613..591abe6d63dd 100644
--- a/drivers/bluetooth/hci_bcsp.c
+++ b/drivers/bluetooth/hci_bcsp.c
@@ -582,6 +582,9 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
struct bcsp_struct *bcsp = hu->priv;
const unsigned char *ptr;
+ if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
+ return -EUNATCH;
+
BT_DBG("hu %p count %d rx_state %d rx_count %ld",
hu, count, bcsp->rx_state, bcsp->rx_count);
diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
index 5ea5dd80e297..cbbe79b241ce 100644
--- a/drivers/bluetooth/hci_uart.h
+++ b/drivers/bluetooth/hci_uart.h
@@ -121,10 +121,6 @@ void hci_uart_set_flow_control(struct hci_uart *hu, bool enable);
void hci_uart_set_speeds(struct hci_uart *hu, unsigned int init_speed,
unsigned int oper_speed);
-#ifdef CONFIG_BT_HCIUART_H4
-int h4_init(void);
-int h4_deinit(void);
-
struct h4_recv_pkt {
u8 type; /* Packet type */
u8 hlen; /* Header length */
@@ -162,6 +158,10 @@ struct h4_recv_pkt {
.lsize = 2, \
.maxlen = HCI_MAX_FRAME_SIZE \
+#ifdef CONFIG_BT_HCIUART_H4
+int h4_init(void);
+int h4_deinit(void);
+
struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb,
const unsigned char *buffer, int count,
const struct h4_recv_pkt *pkts, int pkts_count);
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index f7d8c3c00655..2fef08254d78 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -380,6 +380,28 @@ static const struct file_operations force_devcoredump_fops = {
.write = force_devcd_write,
};
+static void vhci_debugfs_init(struct vhci_data *data)
+{
+ struct hci_dev *hdev = data->hdev;
+
+ debugfs_create_file("force_suspend", 0644, hdev->debugfs, data,
+ &force_suspend_fops);
+
+ debugfs_create_file("force_wakeup", 0644, hdev->debugfs, data,
+ &force_wakeup_fops);
+
+ if (IS_ENABLED(CONFIG_BT_MSFTEXT))
+ debugfs_create_file("msft_opcode", 0644, hdev->debugfs, data,
+ &msft_opcode_fops);
+
+ if (IS_ENABLED(CONFIG_BT_AOSPEXT))
+ debugfs_create_file("aosp_capable", 0644, hdev->debugfs, data,
+ &aosp_capable_fops);
+
+ debugfs_create_file("force_devcoredump", 0644, hdev->debugfs, data,
+ &force_devcoredump_fops);
+}
+
static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
{
struct hci_dev *hdev;
@@ -434,22 +456,8 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
return -EBUSY;
}
- debugfs_create_file("force_suspend", 0644, hdev->debugfs, data,
- &force_suspend_fops);
-
- debugfs_create_file("force_wakeup", 0644, hdev->debugfs, data,
- &force_wakeup_fops);
-
- if (IS_ENABLED(CONFIG_BT_MSFTEXT))
- debugfs_create_file("msft_opcode", 0644, hdev->debugfs, data,
- &msft_opcode_fops);
-
- if (IS_ENABLED(CONFIG_BT_AOSPEXT))
- debugfs_create_file("aosp_capable", 0644, hdev->debugfs, data,
- &aosp_capable_fops);
-
- debugfs_create_file("force_devcoredump", 0644, hdev->debugfs, data,
- &force_devcoredump_fops);
+ if (!IS_ERR_OR_NULL(hdev->debugfs))
+ vhci_debugfs_init(data);
hci_skb_pkt_type(skb) = HCI_VENDOR_PKT;
@@ -651,6 +659,21 @@ static int vhci_open(struct inode *inode, struct file *file)
return 0;
}
+static void vhci_debugfs_remove(struct hci_dev *hdev)
+{
+ debugfs_lookup_and_remove("force_suspend", hdev->debugfs);
+
+ debugfs_lookup_and_remove("force_wakeup", hdev->debugfs);
+
+ if (IS_ENABLED(CONFIG_BT_MSFTEXT))
+ debugfs_lookup_and_remove("msft_opcode", hdev->debugfs);
+
+ if (IS_ENABLED(CONFIG_BT_AOSPEXT))
+ debugfs_lookup_and_remove("aosp_capable", hdev->debugfs);
+
+ debugfs_lookup_and_remove("force_devcoredump", hdev->debugfs);
+}
+
static int vhci_release(struct inode *inode, struct file *file)
{
struct vhci_data *data = file->private_data;
@@ -662,6 +685,8 @@ static int vhci_release(struct inode *inode, struct file *file)
hdev = data->hdev;
if (hdev) {
+ if (!IS_ERR_OR_NULL(hdev->debugfs))
+ vhci_debugfs_remove(hdev);
hci_unregister_dev(hdev);
hci_free_dev(hdev);
}
diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
index c1c0a4759c7e..25845c04e562 100644
--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
@@ -176,8 +176,8 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
{
struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
- return sprintf(buf, "fsl-mc:v%08Xd%s\n", mc_dev->obj_desc.vendor,
- mc_dev->obj_desc.type);
+ return sysfs_emit(buf, "fsl-mc:v%08Xd%s\n", mc_dev->obj_desc.vendor,
+ mc_dev->obj_desc.type);
}
static DEVICE_ATTR_RO(modalias);
@@ -203,7 +203,7 @@ static ssize_t driver_override_show(struct device *dev,
{
struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
- return snprintf(buf, PAGE_SIZE, "%s\n", mc_dev->driver_override);
+ return sysfs_emit(buf, "%s\n", mc_dev->driver_override);
}
static DEVICE_ATTR_RW(driver_override);
@@ -1104,6 +1104,9 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
* Get physical address of MC portal for the root DPRC:
*/
plat_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!plat_res)
+ return -EINVAL;
+
mc_portal_phys_addr = plat_res->start;
mc_portal_size = resource_size(plat_res);
mc_portal_base_phys_addr = mc_portal_phys_addr & ~0x3ffffff;
diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c
index b3eafcf2a2c5..cdea24e92919 100644
--- a/drivers/bus/mhi/ep/main.c
+++ b/drivers/bus/mhi/ep/main.c
@@ -403,17 +403,13 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
{
struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id];
struct device *dev = &mhi_cntrl->mhi_dev->dev;
- size_t tr_len, read_offset, write_offset;
+ size_t tr_len, read_offset;
struct mhi_ep_buf_info buf_info = {};
u32 len = MHI_EP_DEFAULT_MTU;
struct mhi_ring_element *el;
- bool tr_done = false;
void *buf_addr;
- u32 buf_left;
int ret;
- buf_left = len;
-
do {
/* Don't process the transfer ring if the channel is not in RUNNING state */
if (mhi_chan->state != MHI_CH_STATE_RUNNING) {
@@ -426,24 +422,23 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
/* Check if there is data pending to be read from previous read operation */
if (mhi_chan->tre_bytes_left) {
dev_dbg(dev, "TRE bytes remaining: %u\n", mhi_chan->tre_bytes_left);
- tr_len = min(buf_left, mhi_chan->tre_bytes_left);
+ tr_len = min(len, mhi_chan->tre_bytes_left);
} else {
mhi_chan->tre_loc = MHI_TRE_DATA_GET_PTR(el);
mhi_chan->tre_size = MHI_TRE_DATA_GET_LEN(el);
mhi_chan->tre_bytes_left = mhi_chan->tre_size;
- tr_len = min(buf_left, mhi_chan->tre_size);
+ tr_len = min(len, mhi_chan->tre_size);
}
read_offset = mhi_chan->tre_size - mhi_chan->tre_bytes_left;
- write_offset = len - buf_left;
buf_addr = kmem_cache_zalloc(mhi_cntrl->tre_buf_cache, GFP_KERNEL);
if (!buf_addr)
return -ENOMEM;
buf_info.host_addr = mhi_chan->tre_loc + read_offset;
- buf_info.dev_addr = buf_addr + write_offset;
+ buf_info.dev_addr = buf_addr;
buf_info.size = tr_len;
buf_info.cb = mhi_ep_read_completion;
buf_info.cb_buf = buf_addr;
@@ -459,16 +454,12 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
goto err_free_buf_addr;
}
- buf_left -= tr_len;
mhi_chan->tre_bytes_left -= tr_len;
- if (!mhi_chan->tre_bytes_left) {
- if (MHI_TRE_DATA_GET_IEOT(el))
- tr_done = true;
-
+ if (!mhi_chan->tre_bytes_left)
mhi_chan->rd_offset = (mhi_chan->rd_offset + 1) % ring->ring_size;
- }
- } while (buf_left && !tr_done);
+ /* Read until the some buffer is left or the ring becomes not empty */
+ } while (!mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE));
return 0;
@@ -502,15 +493,11 @@ static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring)
mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
} else {
/* UL channel */
- do {
- ret = mhi_ep_read_channel(mhi_cntrl, ring);
- if (ret < 0) {
- dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n");
- return ret;
- }
-
- /* Read until the ring becomes empty */
- } while (!mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE));
+ ret = mhi_ep_read_channel(mhi_cntrl, ring);
+ if (ret < 0) {
+ dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n");
+ return ret;
+ }
}
return 0;
diff --git a/drivers/bus/mhi/host/init.c b/drivers/bus/mhi/host/init.c
index 7f72aab38ce9..099be8dd1900 100644
--- a/drivers/bus/mhi/host/init.c
+++ b/drivers/bus/mhi/host/init.c
@@ -194,7 +194,6 @@ static void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl)
static int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
{
struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
- struct device *dev = &mhi_cntrl->mhi_dev->dev;
unsigned long irq_flags = IRQF_SHARED | IRQF_NO_SUSPEND;
int i, ret;
@@ -221,7 +220,7 @@ static int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
continue;
if (mhi_event->irq >= mhi_cntrl->nr_irqs) {
- dev_err(dev, "irq %d not available for event ring\n",
+ dev_err(mhi_cntrl->cntrl_dev, "irq %d not available for event ring\n",
mhi_event->irq);
ret = -EINVAL;
goto error_request;
@@ -232,7 +231,7 @@ static int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
irq_flags,
"mhi", mhi_event);
if (ret) {
- dev_err(dev, "Error requesting irq:%d for ev:%d\n",
+ dev_err(mhi_cntrl->cntrl_dev, "Error requesting irq:%d for ev:%d\n",
mhi_cntrl->irq[mhi_event->irq], i);
goto error_request;
}
diff --git a/drivers/bus/mhi/host/internal.h b/drivers/bus/mhi/host/internal.h
index 034be33565b7..7937bb1f742c 100644
--- a/drivers/bus/mhi/host/internal.h
+++ b/drivers/bus/mhi/host/internal.h
@@ -170,6 +170,8 @@ enum mhi_pm_state {
MHI_PM_IN_ERROR_STATE(pm_state))
#define MHI_PM_IN_SUSPEND_STATE(pm_state) (pm_state & \
(MHI_PM_M3_ENTER | MHI_PM_M3))
+#define MHI_PM_FATAL_ERROR(pm_state) ((pm_state == MHI_PM_FW_DL_ERR) || \
+ (pm_state >= MHI_PM_SYS_ERR_FAIL))
#define NR_OF_CMD_RINGS 1
#define CMD_EL_PER_RING 128
@@ -403,6 +405,7 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
struct mhi_event *mhi_event, u32 event_quota);
int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
struct mhi_event *mhi_event, u32 event_quota);
+void mhi_uevent_notify(struct mhi_controller *mhi_cntrl, enum mhi_ee_type ee);
/* ISR handlers */
irqreturn_t mhi_irq_handler(int irq_number, void *dev);
diff --git a/drivers/bus/mhi/host/main.c b/drivers/bus/mhi/host/main.c
index 52bef663e182..861551274319 100644
--- a/drivers/bus/mhi/host/main.c
+++ b/drivers/bus/mhi/host/main.c
@@ -512,6 +512,7 @@ irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
if (mhi_cntrl->rddm_image && mhi_is_active(mhi_cntrl)) {
mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
mhi_cntrl->ee = ee;
+ mhi_uevent_notify(mhi_cntrl, mhi_cntrl->ee);
wake_up_all(&mhi_cntrl->state_event);
}
break;
diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c
index 4edb5bb476ba..b188bbf7de04 100644
--- a/drivers/bus/mhi/host/pci_generic.c
+++ b/drivers/bus/mhi/host/pci_generic.c
@@ -34,28 +34,34 @@
/**
* struct mhi_pci_dev_info - MHI PCI device specific information
* @config: MHI controller configuration
+ * @vf_config: MHI controller configuration for Virtual function (optional)
* @name: name of the PCI module
* @fw: firmware path (if any)
* @edl: emergency download mode firmware path (if any)
* @edl_trigger: capable of triggering EDL mode in the device (if supported)
* @bar_num: PCI base address register to use for MHI MMIO register space
* @dma_data_width: DMA transfer word size (32 or 64 bits)
+ * @vf_dma_data_width: DMA transfer word size for VF's (optional)
* @mru_default: default MRU size for MBIM network packets
* @sideband_wake: Devices using dedicated sideband GPIO for wakeup instead
* of inband wake support (such as sdx24)
* @no_m3: M3 not supported
+ * @reset_on_remove: Set true for devices that require SoC during driver removal
*/
struct mhi_pci_dev_info {
const struct mhi_controller_config *config;
+ const struct mhi_controller_config *vf_config;
const char *name;
const char *fw;
const char *edl;
bool edl_trigger;
unsigned int bar_num;
unsigned int dma_data_width;
+ unsigned int vf_dma_data_width;
unsigned int mru_default;
bool sideband_wake;
bool no_m3;
+ bool reset_on_remove;
};
#define MHI_CHANNEL_CONFIG_UL(ch_num, ch_name, el_count, ev_ring) \
@@ -296,8 +302,10 @@ static const struct mhi_pci_dev_info mhi_qcom_qdu100_info = {
.config = &mhi_qcom_qdu100_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
+ .vf_dma_data_width = 40,
.sideband_wake = false,
.no_m3 = true,
+ .reset_on_remove = true,
};
static const struct mhi_channel_config mhi_qcom_sa8775p_channels[] = {
@@ -917,20 +925,8 @@ static const struct pci_device_id mhi_pci_id_table[] = {
/* Telit FE990A */
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2015),
.driver_data = (kernel_ulong_t) &mhi_telit_fe990a_info },
- /* Foxconn T99W696.01, Lenovo Generic SKU */
- { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, PCI_VENDOR_ID_FOXCONN, 0xe142),
- .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w696_info },
- /* Foxconn T99W696.02, Lenovo X1 Carbon SKU */
- { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, PCI_VENDOR_ID_FOXCONN, 0xe143),
- .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w696_info },
- /* Foxconn T99W696.03, Lenovo X1 2in1 SKU */
- { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, PCI_VENDOR_ID_FOXCONN, 0xe144),
- .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w696_info },
- /* Foxconn T99W696.04, Lenovo PRC SKU */
- { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, PCI_VENDOR_ID_FOXCONN, 0xe145),
- .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w696_info },
- /* Foxconn T99W696.00, Foxconn SKU */
- { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, PCI_VENDOR_ID_FOXCONN, 0xe146),
+ /* Foxconn T99W696, all variants */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, PCI_VENDOR_ID_FOXCONN, PCI_ANY_ID),
.driver_data = (kernel_ulong_t) &mhi_foxconn_t99w696_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0308),
.driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info },
@@ -1037,6 +1033,7 @@ struct mhi_pci_device {
struct work_struct recovery_work;
struct timer_list health_check_timer;
unsigned long status;
+ bool reset_on_remove;
};
static int mhi_pci_read_reg(struct mhi_controller *mhi_cntrl,
@@ -1092,7 +1089,7 @@ static bool mhi_pci_is_alive(struct mhi_controller *mhi_cntrl)
struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
u16 vendor = 0;
- if (pci_read_config_word(pdev, PCI_VENDOR_ID, &vendor))
+ if (pci_read_config_word(pci_physfn(pdev), PCI_VENDOR_ID, &vendor))
return false;
if (vendor == (u16) ~0 || vendor == 0)
@@ -1203,7 +1200,9 @@ static void mhi_pci_recovery_work(struct work_struct *work)
dev_warn(&pdev->dev, "device recovery started\n");
- timer_delete(&mhi_pdev->health_check_timer);
+ if (pdev->is_physfn)
+ timer_delete(&mhi_pdev->health_check_timer);
+
pm_runtime_forbid(&pdev->dev);
/* Clean up MHI state */
@@ -1230,7 +1229,10 @@ static void mhi_pci_recovery_work(struct work_struct *work)
dev_dbg(&pdev->dev, "Recovery completed\n");
set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status);
- mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
+
+ if (pdev->is_physfn)
+ mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
+
return;
err_unprepare:
@@ -1301,6 +1303,7 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
const struct mhi_controller_config *mhi_cntrl_config;
struct mhi_pci_device *mhi_pdev;
struct mhi_controller *mhi_cntrl;
+ unsigned int dma_data_width;
int err;
dev_info(&pdev->dev, "MHI PCI device found: %s\n", info->name);
@@ -1311,14 +1314,24 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return -ENOMEM;
INIT_WORK(&mhi_pdev->recovery_work, mhi_pci_recovery_work);
- timer_setup(&mhi_pdev->health_check_timer, health_check, 0);
- mhi_cntrl_config = info->config;
+ if (pdev->is_virtfn && info->vf_config)
+ mhi_cntrl_config = info->vf_config;
+ else
+ mhi_cntrl_config = info->config;
+
+ /* Initialize health check monitor only for Physical functions */
+ if (pdev->is_physfn)
+ timer_setup(&mhi_pdev->health_check_timer, health_check, 0);
+
mhi_cntrl = &mhi_pdev->mhi_cntrl;
+ dma_data_width = (pdev->is_virtfn && info->vf_dma_data_width) ?
+ info->vf_dma_data_width : info->dma_data_width;
+
mhi_cntrl->cntrl_dev = &pdev->dev;
mhi_cntrl->iova_start = 0;
- mhi_cntrl->iova_stop = (dma_addr_t)DMA_BIT_MASK(info->dma_data_width);
+ mhi_cntrl->iova_stop = (dma_addr_t)DMA_BIT_MASK(dma_data_width);
mhi_cntrl->fw_image = info->fw;
mhi_cntrl->edl_image = info->edl;
@@ -1330,6 +1343,9 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mhi_cntrl->mru = info->mru_default;
mhi_cntrl->name = info->name;
+ if (pdev->is_physfn)
+ mhi_pdev->reset_on_remove = info->reset_on_remove;
+
if (info->edl_trigger)
mhi_cntrl->edl_trigger = mhi_pci_generic_edl_trigger;
@@ -1339,7 +1355,7 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mhi_cntrl->wake_toggle = mhi_pci_wake_toggle_nop;
}
- err = mhi_pci_claim(mhi_cntrl, info->bar_num, DMA_BIT_MASK(info->dma_data_width));
+ err = mhi_pci_claim(mhi_cntrl, info->bar_num, DMA_BIT_MASK(dma_data_width));
if (err)
return err;
@@ -1376,7 +1392,8 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status);
/* start health check */
- mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
+ if (pdev->is_physfn)
+ mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
/* Allow runtime suspend only if both PME from D3Hot and M3 are supported */
if (pci_pme_capable(pdev, PCI_D3hot) && !(info->no_m3)) {
@@ -1401,7 +1418,10 @@ static void mhi_pci_remove(struct pci_dev *pdev)
struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
- timer_delete_sync(&mhi_pdev->health_check_timer);
+ pci_disable_sriov(pdev);
+
+ if (pdev->is_physfn)
+ timer_delete_sync(&mhi_pdev->health_check_timer);
cancel_work_sync(&mhi_pdev->recovery_work);
if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
@@ -1413,6 +1433,9 @@ static void mhi_pci_remove(struct pci_dev *pdev)
if (pci_pme_capable(pdev, PCI_D3hot))
pm_runtime_get_noresume(&pdev->dev);
+ if (mhi_pdev->reset_on_remove)
+ mhi_soc_reset(mhi_cntrl);
+
mhi_unregister_controller(mhi_cntrl);
}
@@ -1429,7 +1452,8 @@ static void mhi_pci_reset_prepare(struct pci_dev *pdev)
dev_info(&pdev->dev, "reset\n");
- timer_delete(&mhi_pdev->health_check_timer);
+ if (pdev->is_physfn)
+ timer_delete(&mhi_pdev->health_check_timer);
/* Clean up MHI state */
if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
@@ -1474,7 +1498,8 @@ static void mhi_pci_reset_done(struct pci_dev *pdev)
}
set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status);
- mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
+ if (pdev->is_physfn)
+ mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
}
static pci_ers_result_t mhi_pci_error_detected(struct pci_dev *pdev,
@@ -1539,7 +1564,9 @@ static int __maybe_unused mhi_pci_runtime_suspend(struct device *dev)
if (test_and_set_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status))
return 0;
- timer_delete(&mhi_pdev->health_check_timer);
+ if (pdev->is_physfn)
+ timer_delete(&mhi_pdev->health_check_timer);
+
cancel_work_sync(&mhi_pdev->recovery_work);
if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) ||
@@ -1590,7 +1617,8 @@ static int __maybe_unused mhi_pci_runtime_resume(struct device *dev)
}
/* Resume health check */
- mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
+ if (pdev->is_physfn)
+ mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
/* It can be a remote wakeup (no mhi runtime_get), update access time */
pm_runtime_mark_last_busy(dev);
@@ -1676,7 +1704,8 @@ static struct pci_driver mhi_pci_driver = {
.remove = mhi_pci_remove,
.shutdown = mhi_pci_shutdown,
.err_handler = &mhi_pci_err_handler,
- .driver.pm = &mhi_pci_pm_ops
+ .driver.pm = &mhi_pci_pm_ops,
+ .sriov_configure = pci_sriov_configure_simple,
};
module_pci_driver(mhi_pci_driver);
diff --git a/drivers/bus/mhi/host/pm.c b/drivers/bus/mhi/host/pm.c
index 33d92bf2fc3e..b4ef115189b5 100644
--- a/drivers/bus/mhi/host/pm.c
+++ b/drivers/bus/mhi/host/pm.c
@@ -418,6 +418,7 @@ static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl)
device_for_each_child(&mhi_cntrl->mhi_dev->dev, &current_ee,
mhi_destroy_device);
mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_MISSION_MODE);
+ mhi_uevent_notify(mhi_cntrl, mhi_cntrl->ee);
/* Force MHI to be in M0 state before continuing */
ret = __mhi_device_get_sync(mhi_cntrl);
@@ -631,6 +632,8 @@ static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl)
/* Wake up threads waiting for state transition */
wake_up_all(&mhi_cntrl->state_event);
+ mhi_uevent_notify(mhi_cntrl, mhi_cntrl->ee);
+
if (MHI_REG_ACCESS_VALID(prev_state)) {
/*
* If the device is in PBL or SBL, it will only respond to
@@ -829,6 +832,8 @@ void mhi_pm_st_worker(struct work_struct *work)
mhi_create_devices(mhi_cntrl);
if (mhi_cntrl->fbc_download)
mhi_download_amss_image(mhi_cntrl);
+
+ mhi_uevent_notify(mhi_cntrl, mhi_cntrl->ee);
break;
case DEV_ST_TRANSITION_MISSION_MODE:
mhi_pm_mission_mode_transition(mhi_cntrl);
@@ -838,6 +843,7 @@ void mhi_pm_st_worker(struct work_struct *work)
mhi_cntrl->ee = MHI_EE_FP;
write_unlock_irq(&mhi_cntrl->pm_lock);
mhi_create_devices(mhi_cntrl);
+ mhi_uevent_notify(mhi_cntrl, mhi_cntrl->ee);
break;
case DEV_ST_TRANSITION_READY:
mhi_ready_state_transition(mhi_cntrl);
@@ -1240,6 +1246,8 @@ static void __mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful,
write_unlock_irq(&mhi_cntrl->pm_lock);
mutex_unlock(&mhi_cntrl->pm_mutex);
+ mhi_uevent_notify(mhi_cntrl, mhi_cntrl->ee);
+
if (destroy_device)
mhi_queue_state_transition(mhi_cntrl,
DEV_ST_TRANSITION_DISABLE_DESTROY_DEVICE);
@@ -1279,7 +1287,7 @@ int mhi_sync_power_up(struct mhi_controller *mhi_cntrl)
mhi_cntrl->ready_timeout_ms : mhi_cntrl->timeout_ms;
wait_event_timeout(mhi_cntrl->state_event,
MHI_IN_MISSION_MODE(mhi_cntrl->ee) ||
- MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
+ MHI_PM_FATAL_ERROR(mhi_cntrl->pm_state),
msecs_to_jiffies(timeout_ms));
ret = (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -ETIMEDOUT;
@@ -1338,3 +1346,22 @@ void mhi_device_put(struct mhi_device *mhi_dev)
read_unlock_bh(&mhi_cntrl->pm_lock);
}
EXPORT_SYMBOL_GPL(mhi_device_put);
+
+void mhi_uevent_notify(struct mhi_controller *mhi_cntrl, enum mhi_ee_type ee)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ char *buf[2];
+ int ret;
+
+ buf[0] = kasprintf(GFP_KERNEL, "EXEC_ENV=%s", TO_MHI_EXEC_STR(ee));
+ buf[1] = NULL;
+
+ if (!buf[0])
+ return;
+
+ ret = kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, buf);
+ if (ret)
+ dev_err(dev, "Failed to send %s uevent\n", TO_MHI_EXEC_STR(ee));
+
+ kfree(buf[0]);
+}
diff --git a/drivers/cache/sifive_ccache.c b/drivers/cache/sifive_ccache.c
index e1a283805ea7..a86800b123b9 100644
--- a/drivers/cache/sifive_ccache.c
+++ b/drivers/cache/sifive_ccache.c
@@ -151,16 +151,16 @@ static void ccache_flush_range(phys_addr_t start, size_t len)
if (!len)
return;
- mb();
+ mb(); /* complete earlier memory accesses before the cache flush */
for (line = ALIGN_DOWN(start, SIFIVE_CCACHE_LINE_SIZE); line < end;
line += SIFIVE_CCACHE_LINE_SIZE) {
#ifdef CONFIG_32BIT
- writel(line >> 4, ccache_base + SIFIVE_CCACHE_FLUSH32);
+ writel_relaxed(line >> 4, ccache_base + SIFIVE_CCACHE_FLUSH32);
#else
- writeq(line, ccache_base + SIFIVE_CCACHE_FLUSH64);
+ writeq_relaxed(line, ccache_base + SIFIVE_CCACHE_FLUSH64);
#endif
- mb();
}
+ mb(); /* issue later memory accesses after the cache flush */
}
static const struct riscv_nonstd_cache_ops ccache_mgmt_ops __initconst = {
diff --git a/drivers/cdx/Kconfig b/drivers/cdx/Kconfig
index 3af41f51cf38..1f1e360507d7 100644
--- a/drivers/cdx/Kconfig
+++ b/drivers/cdx/Kconfig
@@ -8,7 +8,6 @@
config CDX_BUS
bool "CDX Bus driver"
depends on OF && ARM64 || COMPILE_TEST
- select GENERIC_MSI_IRQ
help
Driver to enable Composable DMA Transfer(CDX) Bus. CDX bus
exposes Fabric devices which uses composable DMA IP to the
diff --git a/drivers/cdx/cdx.c b/drivers/cdx/cdx.c
index 092306ca2541..3d50f8cd9c0b 100644
--- a/drivers/cdx/cdx.c
+++ b/drivers/cdx/cdx.c
@@ -310,7 +310,7 @@ static int cdx_probe(struct device *dev)
* Setup MSI device data so that generic MSI alloc/free can
* be used by the device driver.
*/
- if (cdx->msi_domain) {
+ if (IS_ENABLED(CONFIG_GENERIC_MSI_IRQ) && cdx->msi_domain) {
error = msi_setup_device_data(&cdx_dev->dev);
if (error)
return error;
@@ -833,7 +833,7 @@ int cdx_device_add(struct cdx_dev_params *dev_params)
((cdx->id << CDX_CONTROLLER_ID_SHIFT) | (cdx_dev->bus_num & CDX_BUS_NUM_MASK)),
cdx_dev->dev_num);
- if (cdx->msi_domain) {
+ if (IS_ENABLED(CONFIG_GENERIC_MSI_IRQ) && cdx->msi_domain) {
cdx_dev->num_msi = dev_params->num_msi;
dev_set_msi_domain(&cdx_dev->dev, cdx->msi_domain);
}
diff --git a/drivers/cdx/cdx_msi.c b/drivers/cdx/cdx_msi.c
index 3388a5d1462c..91b95422b263 100644
--- a/drivers/cdx/cdx_msi.c
+++ b/drivers/cdx/cdx_msi.c
@@ -174,6 +174,7 @@ struct irq_domain *cdx_msi_domain_init(struct device *dev)
}
parent = irq_find_matching_fwnode(of_fwnode_handle(parent_node), DOMAIN_BUS_NEXUS);
+ of_node_put(parent_node);
if (!parent || !msi_get_domain_info(parent)) {
dev_err(dev, "unable to locate ITS domain\n");
return NULL;
diff --git a/drivers/cdx/controller/Kconfig b/drivers/cdx/controller/Kconfig
index 0641a4c21e66..a480b62cbd1f 100644
--- a/drivers/cdx/controller/Kconfig
+++ b/drivers/cdx/controller/Kconfig
@@ -10,7 +10,6 @@ if CDX_BUS
config CDX_CONTROLLER
tristate "CDX bus controller"
depends on HAS_DMA
- select GENERIC_MSI_IRQ
select REMOTEPROC
select RPMSG
help
diff --git a/drivers/cdx/controller/bitfield.h b/drivers/cdx/controller/bitfield.h
deleted file mode 100644
index 567f8ec47582..000000000000
--- a/drivers/cdx/controller/bitfield.h
+++ /dev/null
@@ -1,90 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
- * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2013 Solarflare Communications Inc.
- * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
- */
-
-#ifndef CDX_BITFIELD_H
-#define CDX_BITFIELD_H
-
-#include <linux/bitfield.h>
-
-/* Lowest bit numbers and widths */
-#define CDX_DWORD_LBN 0
-#define CDX_DWORD_WIDTH 32
-
-/* Specified attribute (e.g. LBN) of the specified field */
-#define CDX_VAL(field, attribute) field ## _ ## attribute
-/* Low bit number of the specified field */
-#define CDX_LOW_BIT(field) CDX_VAL(field, LBN)
-/* Bit width of the specified field */
-#define CDX_WIDTH(field) CDX_VAL(field, WIDTH)
-/* High bit number of the specified field */
-#define CDX_HIGH_BIT(field) (CDX_LOW_BIT(field) + CDX_WIDTH(field) - 1)
-
-/* A doubleword (i.e. 4 byte) datatype - little-endian in HW */
-struct cdx_dword {
- __le32 cdx_u32;
-};
-
-/* Value expanders for printk */
-#define CDX_DWORD_VAL(dword) \
- ((unsigned int)le32_to_cpu((dword).cdx_u32))
-
-/*
- * Extract bit field portion [low,high) from the 32-bit little-endian
- * element which contains bits [min,max)
- */
-#define CDX_DWORD_FIELD(dword, field) \
- (FIELD_GET(GENMASK(CDX_HIGH_BIT(field), CDX_LOW_BIT(field)), \
- le32_to_cpu((dword).cdx_u32)))
-
-/*
- * Creates the portion of the named bit field that lies within the
- * range [min,max).
- */
-#define CDX_INSERT_FIELD(field, value) \
- (FIELD_PREP(GENMASK(CDX_HIGH_BIT(field), \
- CDX_LOW_BIT(field)), value))
-
-/*
- * Creates the portion of the named bit fields that lie within the
- * range [min,max).
- */
-#define CDX_INSERT_FIELDS(field1, value1, \
- field2, value2, \
- field3, value3, \
- field4, value4, \
- field5, value5, \
- field6, value6, \
- field7, value7) \
- (CDX_INSERT_FIELD(field1, (value1)) | \
- CDX_INSERT_FIELD(field2, (value2)) | \
- CDX_INSERT_FIELD(field3, (value3)) | \
- CDX_INSERT_FIELD(field4, (value4)) | \
- CDX_INSERT_FIELD(field5, (value5)) | \
- CDX_INSERT_FIELD(field6, (value6)) | \
- CDX_INSERT_FIELD(field7, (value7)))
-
-#define CDX_POPULATE_DWORD(dword, ...) \
- (dword).cdx_u32 = cpu_to_le32(CDX_INSERT_FIELDS(__VA_ARGS__))
-
-/* Populate a dword field with various numbers of arguments */
-#define CDX_POPULATE_DWORD_7 CDX_POPULATE_DWORD
-#define CDX_POPULATE_DWORD_6(dword, ...) \
- CDX_POPULATE_DWORD_7(dword, CDX_DWORD, 0, __VA_ARGS__)
-#define CDX_POPULATE_DWORD_5(dword, ...) \
- CDX_POPULATE_DWORD_6(dword, CDX_DWORD, 0, __VA_ARGS__)
-#define CDX_POPULATE_DWORD_4(dword, ...) \
- CDX_POPULATE_DWORD_5(dword, CDX_DWORD, 0, __VA_ARGS__)
-#define CDX_POPULATE_DWORD_3(dword, ...) \
- CDX_POPULATE_DWORD_4(dword, CDX_DWORD, 0, __VA_ARGS__)
-#define CDX_POPULATE_DWORD_2(dword, ...) \
- CDX_POPULATE_DWORD_3(dword, CDX_DWORD, 0, __VA_ARGS__)
-#define CDX_POPULATE_DWORD_1(dword, ...) \
- CDX_POPULATE_DWORD_2(dword, CDX_DWORD, 0, __VA_ARGS__)
-#define CDX_SET_DWORD(dword) \
- CDX_POPULATE_DWORD_1(dword, CDX_DWORD, 0xffffffff)
-
-#endif /* CDX_BITFIELD_H */
diff --git a/drivers/cdx/controller/cdx_controller.c b/drivers/cdx/controller/cdx_controller.c
index fca83141e3e6..280f207735da 100644
--- a/drivers/cdx/controller/cdx_controller.c
+++ b/drivers/cdx/controller/cdx_controller.c
@@ -14,7 +14,7 @@
#include "cdx_controller.h"
#include "../cdx.h"
#include "mcdi_functions.h"
-#include "mcdi.h"
+#include "mcdid.h"
static unsigned int cdx_mcdi_rpc_timeout(struct cdx_mcdi *cdx, unsigned int cmd)
{
@@ -193,7 +193,8 @@ static int xlnx_cdx_probe(struct platform_device *pdev)
cdx->ops = &cdx_ops;
/* Create MSI domain */
- cdx->msi_domain = cdx_msi_domain_init(&pdev->dev);
+ if (IS_ENABLED(CONFIG_GENERIC_MSI_IRQ))
+ cdx->msi_domain = cdx_msi_domain_init(&pdev->dev);
if (!cdx->msi_domain) {
ret = dev_err_probe(&pdev->dev, -ENODEV, "cdx_msi_domain_init() failed");
goto cdx_msi_fail;
diff --git a/drivers/cdx/controller/cdx_rpmsg.c b/drivers/cdx/controller/cdx_rpmsg.c
index 04b578a0be17..59aabd99fa8f 100644
--- a/drivers/cdx/controller/cdx_rpmsg.c
+++ b/drivers/cdx/controller/cdx_rpmsg.c
@@ -15,7 +15,7 @@
#include "../cdx.h"
#include "cdx_controller.h"
#include "mcdi_functions.h"
-#include "mcdi.h"
+#include "mcdid.h"
static struct rpmsg_device_id cdx_rpmsg_id_table[] = {
{ .name = "mcdi_ipc" },
@@ -129,8 +129,7 @@ static int cdx_rpmsg_probe(struct rpmsg_device *rpdev)
chinfo.src = RPMSG_ADDR_ANY;
chinfo.dst = rpdev->dst;
- strscpy(chinfo.name, cdx_rpmsg_id_table[0].name,
- strlen(cdx_rpmsg_id_table[0].name));
+ strscpy(chinfo.name, cdx_rpmsg_id_table[0].name, sizeof(chinfo.name));
cdx_mcdi->ept = rpmsg_create_ept(rpdev, cdx_rpmsg_cb, NULL, chinfo);
if (!cdx_mcdi->ept) {
diff --git a/drivers/cdx/controller/mcdi.c b/drivers/cdx/controller/mcdi.c
index e760f8d347cc..2e82ffc18d89 100644
--- a/drivers/cdx/controller/mcdi.c
+++ b/drivers/cdx/controller/mcdi.c
@@ -23,9 +23,10 @@
#include <linux/log2.h>
#include <linux/net_tstamp.h>
#include <linux/wait.h>
+#include <linux/cdx/bitfield.h>
-#include "bitfield.h"
-#include "mcdi.h"
+#include <linux/cdx/mcdi.h>
+#include "mcdid.h"
static void cdx_mcdi_cancel_cmd(struct cdx_mcdi *cdx, struct cdx_mcdi_cmd *cmd);
static void cdx_mcdi_wait_for_cleanup(struct cdx_mcdi *cdx);
@@ -99,6 +100,19 @@ static unsigned long cdx_mcdi_rpc_timeout(struct cdx_mcdi *cdx, unsigned int cmd
return cdx->mcdi_ops->mcdi_rpc_timeout(cdx, cmd);
}
+/**
+ * cdx_mcdi_init - Initialize MCDI (Management Controller Driver Interface) state
+ * @cdx: Handle to the CDX MCDI structure
+ *
+ * This function allocates and initializes internal MCDI structures and resources
+ * for the CDX device, including the workqueue, locking primitives, and command
+ * tracking mechanisms. It sets the initial operating mode and prepares the device
+ * for MCDI operations.
+ *
+ * Return:
+ * * 0 - on success
+ * * -ENOMEM - if memory allocation or workqueue creation fails
+ */
int cdx_mcdi_init(struct cdx_mcdi *cdx)
{
struct cdx_mcdi_iface *mcdi;
@@ -128,7 +142,16 @@ fail2:
fail:
return rc;
}
+EXPORT_SYMBOL_GPL(cdx_mcdi_init);
+/**
+ * cdx_mcdi_finish - Cleanup MCDI (Management Controller Driver Interface) state
+ * @cdx: Handle to the CDX MCDI structure
+ *
+ * This function is responsible for cleaning up the MCDI (Management Controller Driver Interface)
+ * resources associated with a cdx_mcdi structure. Also destroys the mcdi workqueue.
+ *
+ */
void cdx_mcdi_finish(struct cdx_mcdi *cdx)
{
struct cdx_mcdi_iface *mcdi;
@@ -143,6 +166,7 @@ void cdx_mcdi_finish(struct cdx_mcdi *cdx)
kfree(cdx->mcdi);
cdx->mcdi = NULL;
}
+EXPORT_SYMBOL_GPL(cdx_mcdi_finish);
static bool cdx_mcdi_flushed(struct cdx_mcdi_iface *mcdi, bool ignore_cleanups)
{
@@ -553,6 +577,19 @@ static void cdx_mcdi_start_or_queue(struct cdx_mcdi_iface *mcdi,
cdx_mcdi_cmd_start_or_queue(mcdi, cmd);
}
+/**
+ * cdx_mcdi_process_cmd - Process an incoming MCDI response
+ * @cdx: Handle to the CDX MCDI structure
+ * @outbuf: Pointer to the response buffer received from the management controller
+ * @len: Length of the response buffer in bytes
+ *
+ * This function handles a response from the management controller. It locates the
+ * corresponding command using the sequence number embedded in the header,
+ * completes the command if it is still pending, and initiates any necessary cleanup.
+ *
+ * The function assumes that the response buffer is well-formed and at least one
+ * dword in size.
+ */
void cdx_mcdi_process_cmd(struct cdx_mcdi *cdx, struct cdx_dword *outbuf, int len)
{
struct cdx_mcdi_iface *mcdi;
@@ -590,6 +627,7 @@ void cdx_mcdi_process_cmd(struct cdx_mcdi *cdx, struct cdx_dword *outbuf, int le
cdx_mcdi_process_cleanup_list(mcdi->cdx, &cleanup_list);
}
+EXPORT_SYMBOL_GPL(cdx_mcdi_process_cmd);
static void cdx_mcdi_cmd_work(struct work_struct *context)
{
@@ -757,6 +795,7 @@ int cdx_mcdi_rpc(struct cdx_mcdi *cdx, unsigned int cmd,
return cdx_mcdi_rpc_sync(cdx, cmd, inbuf, inlen, outbuf, outlen,
outlen_actual, false);
}
+EXPORT_SYMBOL_GPL(cdx_mcdi_rpc);
/**
* cdx_mcdi_rpc_async - Schedule an MCDI command to run asynchronously
diff --git a/drivers/cdx/controller/mcdi.h b/drivers/cdx/controller/mcdi.h
deleted file mode 100644
index 54a65e9760ae..000000000000
--- a/drivers/cdx/controller/mcdi.h
+++ /dev/null
@@ -1,242 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
- * Copyright 2008-2013 Solarflare Communications Inc.
- * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
- */
-
-#ifndef CDX_MCDI_H
-#define CDX_MCDI_H
-
-#include <linux/mutex.h>
-#include <linux/kref.h>
-#include <linux/rpmsg.h>
-
-#include "bitfield.h"
-#include "mc_cdx_pcol.h"
-
-#ifdef DEBUG
-#define CDX_WARN_ON_ONCE_PARANOID(x) WARN_ON_ONCE(x)
-#define CDX_WARN_ON_PARANOID(x) WARN_ON(x)
-#else
-#define CDX_WARN_ON_ONCE_PARANOID(x) do {} while (0)
-#define CDX_WARN_ON_PARANOID(x) do {} while (0)
-#endif
-
-/**
- * enum cdx_mcdi_mode - MCDI transaction mode
- * @MCDI_MODE_EVENTS: wait for an mcdi response callback.
- * @MCDI_MODE_FAIL: we think MCDI is dead, so fail-fast all calls
- */
-enum cdx_mcdi_mode {
- MCDI_MODE_EVENTS,
- MCDI_MODE_FAIL,
-};
-
-#define MCDI_RPC_TIMEOUT (10 * HZ)
-#define MCDI_RPC_LONG_TIMEOU (60 * HZ)
-#define MCDI_RPC_POST_RST_TIME (10 * HZ)
-
-#define MCDI_BUF_LEN (8 + MCDI_CTL_SDU_LEN_MAX)
-
-/**
- * enum cdx_mcdi_cmd_state - State for an individual MCDI command
- * @MCDI_STATE_QUEUED: Command not started and is waiting to run.
- * @MCDI_STATE_RETRY: Command was submitted and MC rejected with no resources,
- * as MC have too many outstanding commands. Command will be retried once
- * another command returns.
- * @MCDI_STATE_RUNNING: Command was accepted and is running.
- * @MCDI_STATE_RUNNING_CANCELLED: Command is running but the issuer cancelled
- * the command.
- * @MCDI_STATE_FINISHED: Processing of this command has completed.
- */
-
-enum cdx_mcdi_cmd_state {
- MCDI_STATE_QUEUED,
- MCDI_STATE_RETRY,
- MCDI_STATE_RUNNING,
- MCDI_STATE_RUNNING_CANCELLED,
- MCDI_STATE_FINISHED,
-};
-
-/**
- * struct cdx_mcdi - CDX MCDI Firmware interface, to interact
- * with CDX controller.
- * @mcdi: MCDI interface
- * @mcdi_ops: MCDI operations
- * @r5_rproc : R5 Remoteproc device handle
- * @rpdev: RPMsg device
- * @ept: RPMsg endpoint
- * @work: Post probe work
- */
-struct cdx_mcdi {
- /* MCDI interface */
- struct cdx_mcdi_data *mcdi;
- const struct cdx_mcdi_ops *mcdi_ops;
-
- struct rproc *r5_rproc;
- struct rpmsg_device *rpdev;
- struct rpmsg_endpoint *ept;
- struct work_struct work;
-};
-
-struct cdx_mcdi_ops {
- void (*mcdi_request)(struct cdx_mcdi *cdx,
- const struct cdx_dword *hdr, size_t hdr_len,
- const struct cdx_dword *sdu, size_t sdu_len);
- unsigned int (*mcdi_rpc_timeout)(struct cdx_mcdi *cdx, unsigned int cmd);
-};
-
-typedef void cdx_mcdi_async_completer(struct cdx_mcdi *cdx,
- unsigned long cookie, int rc,
- struct cdx_dword *outbuf,
- size_t outlen_actual);
-
-/**
- * struct cdx_mcdi_cmd - An outstanding MCDI command
- * @ref: Reference count. There will be one reference if the command is
- * in the mcdi_iface cmd_list, another if it's on a cleanup list,
- * and a third if it's queued in the work queue.
- * @list: The data for this entry in mcdi->cmd_list
- * @cleanup_list: The data for this entry in a cleanup list
- * @work: The work item for this command, queued in mcdi->workqueue
- * @mcdi: The mcdi_iface for this command
- * @state: The state of this command
- * @inlen: inbuf length
- * @inbuf: Input buffer
- * @quiet: Whether to silence errors
- * @reboot_seen: Whether a reboot has been seen during this command,
- * to prevent duplicates
- * @seq: Sequence number
- * @started: Jiffies this command was started at
- * @cookie: Context for completion function
- * @completer: Completion function
- * @handle: Command handle
- * @cmd: Command number
- * @rc: Return code
- * @outlen: Length of output buffer
- * @outbuf: Output buffer
- */
-struct cdx_mcdi_cmd {
- struct kref ref;
- struct list_head list;
- struct list_head cleanup_list;
- struct work_struct work;
- struct cdx_mcdi_iface *mcdi;
- enum cdx_mcdi_cmd_state state;
- size_t inlen;
- const struct cdx_dword *inbuf;
- bool quiet;
- bool reboot_seen;
- u8 seq;
- unsigned long started;
- unsigned long cookie;
- cdx_mcdi_async_completer *completer;
- unsigned int handle;
- unsigned int cmd;
- int rc;
- size_t outlen;
- struct cdx_dword *outbuf;
- /* followed by inbuf data if necessary */
-};
-
-/**
- * struct cdx_mcdi_iface - MCDI protocol context
- * @cdx: The associated NIC
- * @iface_lock: Serialise access to this structure
- * @outstanding_cleanups: Count of cleanups
- * @cmd_list: List of outstanding and running commands
- * @workqueue: Workqueue used for delayed processing
- * @cmd_complete_wq: Waitqueue for command completion
- * @db_held_by: Command the MC doorbell is in use by
- * @seq_held_by: Command each sequence number is in use by
- * @prev_handle: The last used command handle
- * @mode: Poll for mcdi completion, or wait for an mcdi_event
- * @prev_seq: The last used sequence number
- * @new_epoch: Indicates start of day or start of MC reboot recovery
- */
-struct cdx_mcdi_iface {
- struct cdx_mcdi *cdx;
- /* Serialise access */
- struct mutex iface_lock;
- unsigned int outstanding_cleanups;
- struct list_head cmd_list;
- struct workqueue_struct *workqueue;
- wait_queue_head_t cmd_complete_wq;
- struct cdx_mcdi_cmd *db_held_by;
- struct cdx_mcdi_cmd *seq_held_by[16];
- unsigned int prev_handle;
- enum cdx_mcdi_mode mode;
- u8 prev_seq;
- bool new_epoch;
-};
-
-/**
- * struct cdx_mcdi_data - extra state for NICs that implement MCDI
- * @iface: Interface/protocol state
- * @fn_flags: Flags for this function, as returned by %MC_CMD_DRV_ATTACH.
- */
-struct cdx_mcdi_data {
- struct cdx_mcdi_iface iface;
- u32 fn_flags;
-};
-
-static inline struct cdx_mcdi_iface *cdx_mcdi_if(struct cdx_mcdi *cdx)
-{
- return cdx->mcdi ? &cdx->mcdi->iface : NULL;
-}
-
-int cdx_mcdi_init(struct cdx_mcdi *cdx);
-void cdx_mcdi_finish(struct cdx_mcdi *cdx);
-
-void cdx_mcdi_process_cmd(struct cdx_mcdi *cdx, struct cdx_dword *outbuf, int len);
-int cdx_mcdi_rpc(struct cdx_mcdi *cdx, unsigned int cmd,
- const struct cdx_dword *inbuf, size_t inlen,
- struct cdx_dword *outbuf, size_t outlen, size_t *outlen_actual);
-int cdx_mcdi_rpc_async(struct cdx_mcdi *cdx, unsigned int cmd,
- const struct cdx_dword *inbuf, size_t inlen,
- cdx_mcdi_async_completer *complete,
- unsigned long cookie);
-int cdx_mcdi_wait_for_quiescence(struct cdx_mcdi *cdx,
- unsigned int timeout_jiffies);
-
-/*
- * We expect that 16- and 32-bit fields in MCDI requests and responses
- * are appropriately aligned, but 64-bit fields are only
- * 32-bit-aligned.
- */
-#define MCDI_DECLARE_BUF(_name, _len) struct cdx_dword _name[DIV_ROUND_UP(_len, 4)] = {{0}}
-#define _MCDI_PTR(_buf, _offset) \
- ((u8 *)(_buf) + (_offset))
-#define MCDI_PTR(_buf, _field) \
- _MCDI_PTR(_buf, MC_CMD_ ## _field ## _OFST)
-#define _MCDI_CHECK_ALIGN(_ofst, _align) \
- ((void)BUILD_BUG_ON_ZERO((_ofst) & ((_align) - 1)), \
- (_ofst))
-#define _MCDI_DWORD(_buf, _field) \
- ((_buf) + (_MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, 4) >> 2))
-
-#define MCDI_BYTE(_buf, _field) \
- ((void)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 1), \
- *MCDI_PTR(_buf, _field))
-#define MCDI_WORD(_buf, _field) \
- ((void)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2), \
- le16_to_cpu(*(__force const __le16 *)MCDI_PTR(_buf, _field)))
-#define MCDI_SET_DWORD(_buf, _field, _value) \
- CDX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), CDX_DWORD, _value)
-#define MCDI_DWORD(_buf, _field) \
- CDX_DWORD_FIELD(*_MCDI_DWORD(_buf, _field), CDX_DWORD)
-#define MCDI_POPULATE_DWORD_1(_buf, _field, _name1, _value1) \
- CDX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), \
- MC_CMD_ ## _name1, _value1)
-#define MCDI_SET_QWORD(_buf, _field, _value) \
- do { \
- CDX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[0], \
- CDX_DWORD, (u32)(_value)); \
- CDX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[1], \
- CDX_DWORD, (u64)(_value) >> 32); \
- } while (0)
-#define MCDI_QWORD(_buf, _field) \
- (CDX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[0], CDX_DWORD) | \
- (u64)CDX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[1], CDX_DWORD) << 32)
-
-#endif /* CDX_MCDI_H */
diff --git a/drivers/cdx/controller/mcdi_functions.c b/drivers/cdx/controller/mcdi_functions.c
index 885c69e6ebe5..8ae2d99be81e 100644
--- a/drivers/cdx/controller/mcdi_functions.c
+++ b/drivers/cdx/controller/mcdi_functions.c
@@ -5,7 +5,6 @@
#include <linux/module.h>
-#include "mcdi.h"
#include "mcdi_functions.h"
int cdx_mcdi_get_num_buses(struct cdx_mcdi *cdx)
diff --git a/drivers/cdx/controller/mcdi_functions.h b/drivers/cdx/controller/mcdi_functions.h
index b9942affdc6b..57fd1bae706b 100644
--- a/drivers/cdx/controller/mcdi_functions.h
+++ b/drivers/cdx/controller/mcdi_functions.h
@@ -8,7 +8,8 @@
#ifndef CDX_MCDI_FUNCTIONS_H
#define CDX_MCDI_FUNCTIONS_H
-#include "mcdi.h"
+#include <linux/cdx/mcdi.h>
+#include "mcdid.h"
#include "../cdx.h"
/**
diff --git a/drivers/cdx/controller/mcdid.h b/drivers/cdx/controller/mcdid.h
new file mode 100644
index 000000000000..7fc29f099265
--- /dev/null
+++ b/drivers/cdx/controller/mcdid.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2008-2013 Solarflare Communications Inc.
+ * Copyright (C) 2022-2025, Advanced Micro Devices, Inc.
+ */
+
+#ifndef CDX_MCDID_H
+#define CDX_MCDID_H
+
+#include <linux/mutex.h>
+#include <linux/kref.h>
+#include <linux/rpmsg.h>
+
+#include "mc_cdx_pcol.h"
+
+#ifdef DEBUG
+#define CDX_WARN_ON_ONCE_PARANOID(x) WARN_ON_ONCE(x)
+#define CDX_WARN_ON_PARANOID(x) WARN_ON(x)
+#else
+#define CDX_WARN_ON_ONCE_PARANOID(x) do {} while (0)
+#define CDX_WARN_ON_PARANOID(x) do {} while (0)
+#endif
+
+#define MCDI_BUF_LEN (8 + MCDI_CTL_SDU_LEN_MAX)
+
+static inline struct cdx_mcdi_iface *cdx_mcdi_if(struct cdx_mcdi *cdx)
+{
+ return cdx->mcdi ? &cdx->mcdi->iface : NULL;
+}
+
+int cdx_mcdi_rpc_async(struct cdx_mcdi *cdx, unsigned int cmd,
+ const struct cdx_dword *inbuf, size_t inlen,
+ cdx_mcdi_async_completer *complete,
+ unsigned long cookie);
+int cdx_mcdi_wait_for_quiescence(struct cdx_mcdi *cdx,
+ unsigned int timeout_jiffies);
+
+/*
+ * We expect that 16- and 32-bit fields in MCDI requests and responses
+ * are appropriately aligned, but 64-bit fields are only
+ * 32-bit-aligned.
+ */
+#define MCDI_BYTE(_buf, _field) \
+ ((void)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 1), \
+ *MCDI_PTR(_buf, _field))
+#define MCDI_WORD(_buf, _field) \
+ ((void)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2), \
+ le16_to_cpu(*(__force const __le16 *)MCDI_PTR(_buf, _field)))
+#define MCDI_POPULATE_DWORD_1(_buf, _field, _name1, _value1) \
+ CDX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), \
+ MC_CMD_ ## _name1, _value1)
+#define MCDI_SET_QWORD(_buf, _field, _value) \
+ do { \
+ CDX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[0], \
+ CDX_DWORD, (u32)(_value)); \
+ CDX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[1], \
+ CDX_DWORD, (u64)(_value) >> 32); \
+ } while (0)
+#define MCDI_QWORD(_buf, _field) \
+ (CDX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[0], CDX_DWORD) | \
+ (u64)CDX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[1], CDX_DWORD) << 32)
+
+#endif /* CDX_MCDID_H */
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index e9b360cdc99a..1291369b9126 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -6,6 +6,7 @@
obj-y += mem.o random.o
obj-$(CONFIG_TTY_PRINTK) += ttyprintk.o
obj-y += misc.o
+obj-$(CONFIG_TEST_MISC_MINOR) += misc_minor_kunit.o
obj-$(CONFIG_ATARI_DSP56K) += dsp56k.o
obj-$(CONFIG_VIRTIO_CONSOLE) += virtio_console.o
obj-$(CONFIG_UV_MMTIMER) += uv_mmtimer.o
diff --git a/drivers/char/adi.c b/drivers/char/adi.c
index f9bec10a6064..4312b0cc391c 100644
--- a/drivers/char/adi.c
+++ b/drivers/char/adi.c
@@ -131,7 +131,7 @@ static ssize_t adi_write(struct file *file, const char __user *buf,
ssize_t ret;
int i;
- if (count <= 0)
+ if (count == 0)
return -EINVAL;
ver_buf_sz = min_t(size_t, count, MAX_BUF_SZ);
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 0713ea2b2a51..4f5ccd3a1f56 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -867,7 +867,7 @@ int hpet_alloc(struct hpet_data *hdp)
printk(KERN_INFO "hpet%u: at MMIO 0x%lx, IRQ%s",
hpetp->hp_which, hdp->hd_phys_address,
- hpetp->hp_ntimer > 1 ? "s" : "");
+ str_plural(hpetp->hp_ntimer));
for (i = 0; i < hpetp->hp_ntimer; i++)
printk(KERN_CONT "%s %u", i > 0 ? "," : "", hdp->hd_irq[i]);
printk(KERN_CONT "\n");
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index c85827843447..492a2a61a65b 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -77,7 +77,7 @@ config HW_RANDOM_AIROHA
config HW_RANDOM_ATMEL
tristate "Atmel Random Number Generator support"
- depends on (ARCH_AT91 || COMPILE_TEST)
+ depends on (ARCH_MICROCHIP || COMPILE_TEST)
default HW_RANDOM
help
This driver provides kernel-side support for the Random Number
@@ -312,6 +312,7 @@ config HW_RANDOM_INGENIC_TRNG
config HW_RANDOM_NOMADIK
tristate "ST-Ericsson Nomadik Random Number Generator support"
depends on ARCH_NOMADIK || COMPILE_TEST
+ depends on ARM_AMBA
default HW_RANDOM
help
This driver provides kernel-side support for the Random Number
diff --git a/drivers/char/hw_random/cn10k-rng.c b/drivers/char/hw_random/cn10k-rng.c
index 31935316a160..3b4e78182e14 100644
--- a/drivers/char/hw_random/cn10k-rng.c
+++ b/drivers/char/hw_random/cn10k-rng.c
@@ -188,7 +188,7 @@ static int cn10k_rng_probe(struct pci_dev *pdev, const struct pci_device_id *id)
rng->reg_base = pcim_iomap(pdev, 0, 0);
if (!rng->reg_base)
- return dev_err_probe(&pdev->dev, -ENOMEM, "Error while mapping CSRs, exiting\n");
+ return -ENOMEM;
rng->ops.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
"cn10k-rng-%s", dev_name(&pdev->dev));
diff --git a/drivers/char/hw_random/ks-sa-rng.c b/drivers/char/hw_random/ks-sa-rng.c
index d8fd8a354482..9e408144a10c 100644
--- a/drivers/char/hw_random/ks-sa-rng.c
+++ b/drivers/char/hw_random/ks-sa-rng.c
@@ -231,6 +231,10 @@ static int ks_sa_rng_probe(struct platform_device *pdev)
if (IS_ERR(ks_sa_rng->regmap_cfg))
return dev_err_probe(dev, -EINVAL, "syscon_node_to_regmap failed\n");
+ ks_sa_rng->clk = devm_clk_get_enabled(dev, NULL);
+ if (IS_ERR(ks_sa_rng->clk))
+ return dev_err_probe(dev, PTR_ERR(ks_sa_rng->clk), "Failed to get clock\n");
+
pm_runtime_enable(dev);
ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
diff --git a/drivers/char/hw_random/n2rng.h b/drivers/char/hw_random/n2rng.h
index 9a870f5dc371..7612f15a261f 100644
--- a/drivers/char/hw_random/n2rng.h
+++ b/drivers/char/hw_random/n2rng.h
@@ -48,7 +48,7 @@
#define HV_RNG_NUM_CONTROL 4
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
extern unsigned long sun4v_rng_get_diag_ctl(void);
extern unsigned long sun4v_rng_ctl_read_v1(unsigned long ctl_regs_ra,
unsigned long *state,
@@ -147,6 +147,6 @@ struct n2rng {
#define N2RNG_BUSY_LIMIT 100
#define N2RNG_HCHECK_LIMIT 100
-#endif /* !(__ASSEMBLY__) */
+#endif /* !(__ASSEMBLER__) */
#endif /* _N2RNG_H */
diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c
index b95f6d0f17ed..e61f06393209 100644
--- a/drivers/char/hw_random/timeriomem-rng.c
+++ b/drivers/char/hw_random/timeriomem-rng.c
@@ -150,7 +150,7 @@ static int timeriomem_rng_probe(struct platform_device *pdev)
priv->rng_ops.quality = pdata->quality;
}
- priv->period = ns_to_ktime(period * NSEC_PER_USEC);
+ priv->period = us_to_ktime(period);
init_completion(&priv->completion);
hrtimer_setup(&priv->timer, timeriomem_rng_trigger, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig
index f4adc6feb3b2..92bed266d07c 100644
--- a/drivers/char/ipmi/Kconfig
+++ b/drivers/char/ipmi/Kconfig
@@ -84,6 +84,13 @@ config IPMI_IPMB
bus, and it also supports direct messaging on the bus using
IPMB direct messages. This module requires I2C support.
+config IPMI_LS2K
+ bool 'Loongson-2K IPMI interface'
+ depends on LOONGARCH
+ select MFD_LS2K_BMC_CORE
+ help
+ Provides a driver for Loongson-2K IPMI interfaces.
+
config IPMI_POWERNV
depends on PPC_POWERNV
tristate 'POWERNV (OPAL firmware) IPMI interface'
diff --git a/drivers/char/ipmi/Makefile b/drivers/char/ipmi/Makefile
index e0944547c9d0..4ea450a82242 100644
--- a/drivers/char/ipmi/Makefile
+++ b/drivers/char/ipmi/Makefile
@@ -8,6 +8,7 @@ ipmi_si-y := ipmi_si_intf.o ipmi_kcs_sm.o ipmi_smic_sm.o ipmi_bt_sm.o \
ipmi_si_mem_io.o
ipmi_si-$(CONFIG_HAS_IOPORT) += ipmi_si_port_io.o
ipmi_si-$(CONFIG_PCI) += ipmi_si_pci.o
+ipmi_si-$(CONFIG_IPMI_LS2K) += ipmi_si_ls2k.o
ipmi_si-$(CONFIG_PARISC) += ipmi_si_parisc.o
obj-$(CONFIG_IPMI_HANDLER) += ipmi_msghandler.o
diff --git a/drivers/char/ipmi/ipmi_ipmb.c b/drivers/char/ipmi/ipmi_ipmb.c
index 6a4f279c7c1f..3a51e58b2487 100644
--- a/drivers/char/ipmi/ipmi_ipmb.c
+++ b/drivers/char/ipmi/ipmi_ipmb.c
@@ -404,8 +404,7 @@ static void ipmi_ipmb_shutdown(void *send_info)
ipmi_ipmb_stop_thread(iidev);
}
-static void ipmi_ipmb_sender(void *send_info,
- struct ipmi_smi_msg *msg)
+static int ipmi_ipmb_sender(void *send_info, struct ipmi_smi_msg *msg)
{
struct ipmi_ipmb_dev *iidev = send_info;
unsigned long flags;
@@ -417,6 +416,7 @@ static void ipmi_ipmb_sender(void *send_info,
spin_unlock_irqrestore(&iidev->lock, flags);
up(&iidev->wake_thread);
+ return IPMI_CC_NO_ERROR;
}
static void ipmi_ipmb_request_events(void *send_info)
diff --git a/drivers/char/ipmi/ipmi_kcs_sm.c b/drivers/char/ipmi/ipmi_kcs_sm.c
index ecfcb50302f6..efda90dcf5b3 100644
--- a/drivers/char/ipmi/ipmi_kcs_sm.c
+++ b/drivers/char/ipmi/ipmi_kcs_sm.c
@@ -122,10 +122,10 @@ struct si_sm_data {
unsigned long error0_timeout;
};
-static unsigned int init_kcs_data_with_state(struct si_sm_data *kcs,
- struct si_sm_io *io, enum kcs_states state)
+static unsigned int init_kcs_data(struct si_sm_data *kcs,
+ struct si_sm_io *io)
{
- kcs->state = state;
+ kcs->state = KCS_IDLE;
kcs->io = io;
kcs->write_pos = 0;
kcs->write_count = 0;
@@ -140,12 +140,6 @@ static unsigned int init_kcs_data_with_state(struct si_sm_data *kcs,
return 2;
}
-static unsigned int init_kcs_data(struct si_sm_data *kcs,
- struct si_sm_io *io)
-{
- return init_kcs_data_with_state(kcs, io, KCS_IDLE);
-}
-
static inline unsigned char read_status(struct si_sm_data *kcs)
{
return kcs->io->inputb(kcs->io, 1);
@@ -276,7 +270,7 @@ static int start_kcs_transaction(struct si_sm_data *kcs, unsigned char *data,
if (size > MAX_KCS_WRITE_SIZE)
return IPMI_REQ_LEN_EXCEEDED_ERR;
- if (kcs->state != KCS_IDLE) {
+ if ((kcs->state != KCS_IDLE) && (kcs->state != KCS_HOSED)) {
dev_warn(kcs->io->dev, "KCS in invalid state %d\n", kcs->state);
return IPMI_NOT_IN_MY_STATE_ERR;
}
@@ -501,7 +495,7 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
}
if (kcs->state == KCS_HOSED) {
- init_kcs_data_with_state(kcs, kcs->io, KCS_ERROR0);
+ init_kcs_data(kcs, kcs->io);
return SI_SM_HOSED;
}
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 8e9050f99e9e..a0b67a35a5f0 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -38,7 +38,9 @@
#define IPMI_DRIVER_VERSION "39.2"
-static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
+static struct ipmi_recv_msg *ipmi_alloc_recv_msg(struct ipmi_user *user);
+static void ipmi_set_recv_msg_user(struct ipmi_recv_msg *msg,
+ struct ipmi_user *user);
static int ipmi_init_msghandler(void);
static void smi_work(struct work_struct *t);
static void handle_new_recv_msgs(struct ipmi_smi *intf);
@@ -50,6 +52,8 @@ static void intf_free(struct kref *ref);
static bool initialized;
static bool drvregistered;
+static struct timer_list ipmi_timer;
+
/* Numbers in this enumerator should be mapped to ipmi_panic_event_str */
enum ipmi_panic_event_op {
IPMI_SEND_PANIC_EVENT_NONE,
@@ -432,6 +436,7 @@ struct ipmi_smi {
atomic_t nr_users;
struct device_attribute nr_users_devattr;
struct device_attribute nr_msgs_devattr;
+ struct device_attribute maintenance_mode_devattr;
/* Used for wake ups at startup. */
@@ -464,7 +469,7 @@ struct ipmi_smi {
* interface to match them up with their responses. A routine
* is called periodically to time the items in this list.
*/
- spinlock_t seq_lock;
+ struct mutex seq_lock;
struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
int curr_seq;
@@ -539,7 +544,11 @@ struct ipmi_smi {
/* For handling of maintenance mode. */
int maintenance_mode;
- bool maintenance_mode_enable;
+
+#define IPMI_MAINTENANCE_MODE_STATE_OFF 0
+#define IPMI_MAINTENANCE_MODE_STATE_FIRMWARE 1
+#define IPMI_MAINTENANCE_MODE_STATE_RESET 2
+ int maintenance_mode_state;
int auto_maintenance_timeout;
spinlock_t maintenance_mode_lock; /* Used in a timer... */
@@ -955,7 +964,6 @@ static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
* risk. At this moment, simply skip it in that case.
*/
ipmi_free_recv_msg(msg);
- atomic_dec(&msg->user->nr_msgs);
} else {
/*
* Deliver it in smi_work. The message will hold a
@@ -1116,12 +1124,11 @@ static int intf_find_seq(struct ipmi_smi *intf,
struct ipmi_recv_msg **recv_msg)
{
int rv = -ENODEV;
- unsigned long flags;
if (seq >= IPMI_IPMB_NUM_SEQ)
return -EINVAL;
- spin_lock_irqsave(&intf->seq_lock, flags);
+ mutex_lock(&intf->seq_lock);
if (intf->seq_table[seq].inuse) {
struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
@@ -1134,7 +1141,7 @@ static int intf_find_seq(struct ipmi_smi *intf,
rv = 0;
}
}
- spin_unlock_irqrestore(&intf->seq_lock, flags);
+ mutex_unlock(&intf->seq_lock);
return rv;
}
@@ -1145,14 +1152,13 @@ static int intf_start_seq_timer(struct ipmi_smi *intf,
long msgid)
{
int rv = -ENODEV;
- unsigned long flags;
unsigned char seq;
unsigned long seqid;
GET_SEQ_FROM_MSGID(msgid, seq, seqid);
- spin_lock_irqsave(&intf->seq_lock, flags);
+ mutex_lock(&intf->seq_lock);
/*
* We do this verification because the user can be deleted
* while a message is outstanding.
@@ -1163,7 +1169,7 @@ static int intf_start_seq_timer(struct ipmi_smi *intf,
ent->timeout = ent->orig_timeout;
rv = 0;
}
- spin_unlock_irqrestore(&intf->seq_lock, flags);
+ mutex_unlock(&intf->seq_lock);
return rv;
}
@@ -1174,7 +1180,6 @@ static int intf_err_seq(struct ipmi_smi *intf,
unsigned int err)
{
int rv = -ENODEV;
- unsigned long flags;
unsigned char seq;
unsigned long seqid;
struct ipmi_recv_msg *msg = NULL;
@@ -1182,7 +1187,7 @@ static int intf_err_seq(struct ipmi_smi *intf,
GET_SEQ_FROM_MSGID(msgid, seq, seqid);
- spin_lock_irqsave(&intf->seq_lock, flags);
+ mutex_lock(&intf->seq_lock);
/*
* We do this verification because the user can be deleted
* while a message is outstanding.
@@ -1196,7 +1201,7 @@ static int intf_err_seq(struct ipmi_smi *intf,
msg = ent->recv_msg;
rv = 0;
}
- spin_unlock_irqrestore(&intf->seq_lock, flags);
+ mutex_unlock(&intf->seq_lock);
if (msg)
deliver_err_response(intf, msg, err);
@@ -1209,7 +1214,6 @@ int ipmi_create_user(unsigned int if_num,
void *handler_data,
struct ipmi_user **user)
{
- unsigned long flags;
struct ipmi_user *new_user = NULL;
int rv = 0;
struct ipmi_smi *intf;
@@ -1277,9 +1281,9 @@ int ipmi_create_user(unsigned int if_num,
new_user->gets_events = false;
mutex_lock(&intf->users_mutex);
- spin_lock_irqsave(&intf->seq_lock, flags);
+ mutex_lock(&intf->seq_lock);
list_add(&new_user->link, &intf->users);
- spin_unlock_irqrestore(&intf->seq_lock, flags);
+ mutex_unlock(&intf->seq_lock);
mutex_unlock(&intf->users_mutex);
if (handler->ipmi_watchdog_pretimeout)
@@ -1325,7 +1329,6 @@ static void _ipmi_destroy_user(struct ipmi_user *user)
{
struct ipmi_smi *intf = user->intf;
int i;
- unsigned long flags;
struct cmd_rcvr *rcvr;
struct cmd_rcvr *rcvrs = NULL;
struct ipmi_recv_msg *msg, *msg2;
@@ -1346,7 +1349,7 @@ static void _ipmi_destroy_user(struct ipmi_user *user)
list_del(&user->link);
atomic_dec(&intf->nr_users);
- spin_lock_irqsave(&intf->seq_lock, flags);
+ mutex_lock(&intf->seq_lock);
for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
if (intf->seq_table[i].inuse
&& (intf->seq_table[i].recv_msg->user == user)) {
@@ -1355,7 +1358,7 @@ static void _ipmi_destroy_user(struct ipmi_user *user)
ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
}
}
- spin_unlock_irqrestore(&intf->seq_lock, flags);
+ mutex_unlock(&intf->seq_lock);
/*
* Remove the user from the command receiver's table. First
@@ -1534,8 +1537,15 @@ EXPORT_SYMBOL(ipmi_get_maintenance_mode);
static void maintenance_mode_update(struct ipmi_smi *intf)
{
if (intf->handlers->set_maintenance_mode)
+ /*
+ * Lower level drivers only care about firmware mode
+ * as it affects their timing. They don't care about
+ * reset, which disables all commands for a while.
+ */
intf->handlers->set_maintenance_mode(
- intf->send_info, intf->maintenance_mode_enable);
+ intf->send_info,
+ (intf->maintenance_mode_state ==
+ IPMI_MAINTENANCE_MODE_STATE_FIRMWARE));
}
int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode)
@@ -1552,16 +1562,17 @@ int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode)
if (intf->maintenance_mode != mode) {
switch (mode) {
case IPMI_MAINTENANCE_MODE_AUTO:
- intf->maintenance_mode_enable
- = (intf->auto_maintenance_timeout > 0);
+ /* Just leave it alone. */
break;
case IPMI_MAINTENANCE_MODE_OFF:
- intf->maintenance_mode_enable = false;
+ intf->maintenance_mode_state =
+ IPMI_MAINTENANCE_MODE_STATE_OFF;
break;
case IPMI_MAINTENANCE_MODE_ON:
- intf->maintenance_mode_enable = true;
+ intf->maintenance_mode_state =
+ IPMI_MAINTENANCE_MODE_STATE_FIRMWARE;
break;
default:
@@ -1616,8 +1627,7 @@ int ipmi_set_gets_events(struct ipmi_user *user, bool val)
}
list_for_each_entry_safe(msg, msg2, &msgs, link) {
- msg->user = user;
- kref_get(&user->refcount);
+ ipmi_set_recv_msg_user(msg, user);
deliver_local_response(intf, msg);
}
}
@@ -1922,14 +1932,20 @@ static int i_ipmi_req_sysintf(struct ipmi_smi *intf,
if (is_maintenance_mode_cmd(msg)) {
unsigned long flags;
+ int newst;
+
+ if (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST)
+ newst = IPMI_MAINTENANCE_MODE_STATE_FIRMWARE;
+ else
+ newst = IPMI_MAINTENANCE_MODE_STATE_RESET;
spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
- intf->auto_maintenance_timeout
- = maintenance_mode_timeout_ms;
+ intf->auto_maintenance_timeout = maintenance_mode_timeout_ms;
if (!intf->maintenance_mode
- && !intf->maintenance_mode_enable) {
- intf->maintenance_mode_enable = true;
+ && intf->maintenance_mode_state < newst) {
+ intf->maintenance_mode_state = newst;
maintenance_mode_update(intf);
+ mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
}
spin_unlock_irqrestore(&intf->maintenance_mode_lock,
flags);
@@ -1943,7 +1959,7 @@ static int i_ipmi_req_sysintf(struct ipmi_smi *intf,
smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3);
smi_msg->data[1] = msg->cmd;
smi_msg->msgid = msgid;
- smi_msg->user_data = recv_msg;
+ smi_msg->recv_msg = recv_msg;
if (msg->data_len > 0)
memcpy(&smi_msg->data[2], msg->data, msg->data_len);
smi_msg->data_size = msg->data_len + 2;
@@ -2024,12 +2040,9 @@ static int i_ipmi_req_ipmb(struct ipmi_smi *intf,
* Save the receive message so we can use it
* to deliver the response.
*/
- smi_msg->user_data = recv_msg;
+ smi_msg->recv_msg = recv_msg;
} else {
- /* It's a command, so get a sequence for it. */
- unsigned long flags;
-
- spin_lock_irqsave(&intf->seq_lock, flags);
+ mutex_lock(&intf->seq_lock);
if (is_maintenance_mode_cmd(msg))
intf->ipmb_maintenance_mode_timeout =
@@ -2087,7 +2100,7 @@ static int i_ipmi_req_ipmb(struct ipmi_smi *intf,
* to be correct.
*/
out_err:
- spin_unlock_irqrestore(&intf->seq_lock, flags);
+ mutex_unlock(&intf->seq_lock);
}
return rv;
@@ -2140,7 +2153,7 @@ static int i_ipmi_req_ipmb_direct(struct ipmi_smi *intf,
memcpy(smi_msg->data + 4, msg->data, msg->data_len);
smi_msg->data_size = msg->data_len + 4;
- smi_msg->user_data = recv_msg;
+ smi_msg->recv_msg = recv_msg;
return 0;
}
@@ -2203,12 +2216,9 @@ static int i_ipmi_req_lan(struct ipmi_smi *intf,
* Save the receive message so we can use it
* to deliver the response.
*/
- smi_msg->user_data = recv_msg;
+ smi_msg->recv_msg = recv_msg;
} else {
- /* It's a command, so get a sequence for it. */
- unsigned long flags;
-
- spin_lock_irqsave(&intf->seq_lock, flags);
+ mutex_lock(&intf->seq_lock);
/*
* Create a sequence number with a 1 second
@@ -2257,7 +2267,7 @@ static int i_ipmi_req_lan(struct ipmi_smi *intf,
* to be correct.
*/
out_err:
- spin_unlock_irqrestore(&intf->seq_lock, flags);
+ mutex_unlock(&intf->seq_lock);
}
return rv;
@@ -2288,22 +2298,15 @@ static int i_ipmi_request(struct ipmi_user *user,
int run_to_completion = READ_ONCE(intf->run_to_completion);
int rv = 0;
- if (user) {
- if (atomic_add_return(1, &user->nr_msgs) > max_msgs_per_user) {
- /* Decrement will happen at the end of the routine. */
- rv = -EBUSY;
- goto out;
- }
- }
-
- if (supplied_recv)
+ if (supplied_recv) {
recv_msg = supplied_recv;
- else {
- recv_msg = ipmi_alloc_recv_msg();
- if (recv_msg == NULL) {
- rv = -ENOMEM;
- goto out;
- }
+ recv_msg->user = user;
+ if (user)
+ atomic_inc(&user->nr_msgs);
+ } else {
+ recv_msg = ipmi_alloc_recv_msg(user);
+ if (IS_ERR(recv_msg))
+ return PTR_ERR(recv_msg);
}
recv_msg->user_msg_data = user_msg_data;
@@ -2314,22 +2317,22 @@ static int i_ipmi_request(struct ipmi_user *user,
if (smi_msg == NULL) {
if (!supplied_recv)
ipmi_free_recv_msg(recv_msg);
- rv = -ENOMEM;
- goto out;
+ return -ENOMEM;
}
}
if (!run_to_completion)
mutex_lock(&intf->users_mutex);
+ if (intf->maintenance_mode_state == IPMI_MAINTENANCE_MODE_STATE_RESET) {
+ /* No messages while the BMC is in reset. */
+ rv = -EBUSY;
+ goto out_err;
+ }
if (intf->in_shutdown) {
rv = -ENODEV;
goto out_err;
}
- recv_msg->user = user;
- if (user)
- /* The put happens when the message is freed. */
- kref_get(&user->refcount);
recv_msg->msgid = msgid;
/*
* Store the message to send in the receive message so timeout
@@ -2358,8 +2361,10 @@ static int i_ipmi_request(struct ipmi_user *user,
if (rv) {
out_err:
- ipmi_free_smi_msg(smi_msg);
- ipmi_free_recv_msg(recv_msg);
+ if (!supplied_smi)
+ ipmi_free_smi_msg(smi_msg);
+ if (!supplied_recv)
+ ipmi_free_recv_msg(recv_msg);
} else {
dev_dbg(intf->si_dev, "Send: %*ph\n",
smi_msg->data_size, smi_msg->data);
@@ -2369,9 +2374,6 @@ out_err:
if (!run_to_completion)
mutex_unlock(&intf->users_mutex);
-out:
- if (rv && user)
- atomic_dec(&user->nr_msgs);
return rv;
}
@@ -2622,6 +2624,12 @@ retry_bmc_lock:
(bmc->dyn_id_set && time_is_after_jiffies(bmc->dyn_id_expiry)))
goto out_noprocessing;
+ /* Don't allow sysfs access when in maintenance mode. */
+ if (intf->maintenance_mode_state) {
+ rv = -EBUSY;
+ goto out_noprocessing;
+ }
+
prev_guid_set = bmc->dyn_guid_set;
__get_guid(intf);
@@ -3517,6 +3525,19 @@ static ssize_t nr_msgs_show(struct device *dev,
}
static DEVICE_ATTR_RO(nr_msgs);
+static ssize_t maintenance_mode_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ipmi_smi *intf = container_of(attr,
+ struct ipmi_smi,
+ maintenance_mode_devattr);
+
+ return sysfs_emit(buf, "%u %d\n", intf->maintenance_mode_state,
+ intf->auto_maintenance_timeout);
+}
+static DEVICE_ATTR_RO(maintenance_mode);
+
static void redo_bmc_reg(struct work_struct *work)
{
struct ipmi_smi *intf = container_of(work, struct ipmi_smi,
@@ -3575,7 +3596,7 @@ int ipmi_add_smi(struct module *owner,
atomic_set(&intf->nr_users, 0);
intf->handlers = handlers;
intf->send_info = send_info;
- spin_lock_init(&intf->seq_lock);
+ mutex_init(&intf->seq_lock);
for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
intf->seq_table[j].inuse = 0;
intf->seq_table[j].seqid = 0;
@@ -3653,6 +3674,14 @@ int ipmi_add_smi(struct module *owner,
goto out_err_bmc_reg;
}
+ intf->maintenance_mode_devattr = dev_attr_maintenance_mode;
+ sysfs_attr_init(&intf->maintenance_mode_devattr.attr);
+ rv = device_create_file(intf->si_dev, &intf->maintenance_mode_devattr);
+ if (rv) {
+ device_remove_file(intf->si_dev, &intf->nr_users_devattr);
+ goto out_err_bmc_reg;
+ }
+
intf->intf_num = i;
mutex_unlock(&ipmi_interfaces_mutex);
@@ -3760,6 +3789,7 @@ void ipmi_unregister_smi(struct ipmi_smi *intf)
if (intf->handlers->shutdown)
intf->handlers->shutdown(intf->send_info);
+ device_remove_file(intf->si_dev, &intf->maintenance_mode_devattr);
device_remove_file(intf->si_dev, &intf->nr_msgs_devattr);
device_remove_file(intf->si_dev, &intf->nr_users_devattr);
@@ -3862,7 +3892,7 @@ static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
unsigned char chan;
struct ipmi_user *user = NULL;
struct ipmi_ipmb_addr *ipmb_addr;
- struct ipmi_recv_msg *recv_msg;
+ struct ipmi_recv_msg *recv_msg = NULL;
if (msg->rsp_size < 10) {
/* Message not big enough, just ignore it. */
@@ -3883,9 +3913,8 @@ static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
if (rcvr) {
user = rcvr->user;
- kref_get(&user->refcount);
- } else
- user = NULL;
+ recv_msg = ipmi_alloc_recv_msg(user);
+ }
rcu_read_unlock();
if (user == NULL) {
@@ -3915,47 +3944,41 @@ static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
* causes it to not be freed or queued.
*/
rv = -1;
- } else {
- recv_msg = ipmi_alloc_recv_msg();
- if (!recv_msg) {
- /*
- * We couldn't allocate memory for the
- * message, so requeue it for handling
- * later.
- */
- rv = 1;
- kref_put(&user->refcount, free_ipmi_user);
- } else {
- /* Extract the source address from the data. */
- ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
- ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
- ipmb_addr->slave_addr = msg->rsp[6];
- ipmb_addr->lun = msg->rsp[7] & 3;
- ipmb_addr->channel = msg->rsp[3] & 0xf;
+ } else if (!IS_ERR(recv_msg)) {
+ /* Extract the source address from the data. */
+ ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
+ ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
+ ipmb_addr->slave_addr = msg->rsp[6];
+ ipmb_addr->lun = msg->rsp[7] & 3;
+ ipmb_addr->channel = msg->rsp[3] & 0xf;
- /*
- * Extract the rest of the message information
- * from the IPMB header.
- */
- recv_msg->user = user;
- recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
- recv_msg->msgid = msg->rsp[7] >> 2;
- recv_msg->msg.netfn = msg->rsp[4] >> 2;
- recv_msg->msg.cmd = msg->rsp[8];
- recv_msg->msg.data = recv_msg->msg_data;
+ /*
+ * Extract the rest of the message information
+ * from the IPMB header.
+ */
+ recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
+ recv_msg->msgid = msg->rsp[7] >> 2;
+ recv_msg->msg.netfn = msg->rsp[4] >> 2;
+ recv_msg->msg.cmd = msg->rsp[8];
+ recv_msg->msg.data = recv_msg->msg_data;
- /*
- * We chop off 10, not 9 bytes because the checksum
- * at the end also needs to be removed.
- */
- recv_msg->msg.data_len = msg->rsp_size - 10;
- memcpy(recv_msg->msg_data, &msg->rsp[9],
- msg->rsp_size - 10);
- if (deliver_response(intf, recv_msg))
- ipmi_inc_stat(intf, unhandled_commands);
- else
- ipmi_inc_stat(intf, handled_commands);
- }
+ /*
+ * We chop off 10, not 9 bytes because the checksum
+ * at the end also needs to be removed.
+ */
+ recv_msg->msg.data_len = msg->rsp_size - 10;
+ memcpy(recv_msg->msg_data, &msg->rsp[9],
+ msg->rsp_size - 10);
+ if (deliver_response(intf, recv_msg))
+ ipmi_inc_stat(intf, unhandled_commands);
+ else
+ ipmi_inc_stat(intf, handled_commands);
+ } else {
+ /*
+ * We couldn't allocate memory for the message, so
+ * requeue it for handling later.
+ */
+ rv = 1;
}
return rv;
@@ -3968,7 +3991,7 @@ static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf,
int rv = 0;
struct ipmi_user *user = NULL;
struct ipmi_ipmb_direct_addr *daddr;
- struct ipmi_recv_msg *recv_msg;
+ struct ipmi_recv_msg *recv_msg = NULL;
unsigned char netfn = msg->rsp[0] >> 2;
unsigned char cmd = msg->rsp[3];
@@ -3977,9 +4000,8 @@ static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf,
rcvr = find_cmd_rcvr(intf, netfn, cmd, 0);
if (rcvr) {
user = rcvr->user;
- kref_get(&user->refcount);
- } else
- user = NULL;
+ recv_msg = ipmi_alloc_recv_msg(user);
+ }
rcu_read_unlock();
if (user == NULL) {
@@ -4001,44 +4023,38 @@ static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf,
* causes it to not be freed or queued.
*/
rv = -1;
- } else {
- recv_msg = ipmi_alloc_recv_msg();
- if (!recv_msg) {
- /*
- * We couldn't allocate memory for the
- * message, so requeue it for handling
- * later.
- */
- rv = 1;
- kref_put(&user->refcount, free_ipmi_user);
- } else {
- /* Extract the source address from the data. */
- daddr = (struct ipmi_ipmb_direct_addr *)&recv_msg->addr;
- daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE;
- daddr->channel = 0;
- daddr->slave_addr = msg->rsp[1];
- daddr->rs_lun = msg->rsp[0] & 3;
- daddr->rq_lun = msg->rsp[2] & 3;
+ } else if (!IS_ERR(recv_msg)) {
+ /* Extract the source address from the data. */
+ daddr = (struct ipmi_ipmb_direct_addr *)&recv_msg->addr;
+ daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE;
+ daddr->channel = 0;
+ daddr->slave_addr = msg->rsp[1];
+ daddr->rs_lun = msg->rsp[0] & 3;
+ daddr->rq_lun = msg->rsp[2] & 3;
- /*
- * Extract the rest of the message information
- * from the IPMB header.
- */
- recv_msg->user = user;
- recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
- recv_msg->msgid = (msg->rsp[2] >> 2);
- recv_msg->msg.netfn = msg->rsp[0] >> 2;
- recv_msg->msg.cmd = msg->rsp[3];
- recv_msg->msg.data = recv_msg->msg_data;
-
- recv_msg->msg.data_len = msg->rsp_size - 4;
- memcpy(recv_msg->msg_data, msg->rsp + 4,
- msg->rsp_size - 4);
- if (deliver_response(intf, recv_msg))
- ipmi_inc_stat(intf, unhandled_commands);
- else
- ipmi_inc_stat(intf, handled_commands);
- }
+ /*
+ * Extract the rest of the message information
+ * from the IPMB header.
+ */
+ recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
+ recv_msg->msgid = (msg->rsp[2] >> 2);
+ recv_msg->msg.netfn = msg->rsp[0] >> 2;
+ recv_msg->msg.cmd = msg->rsp[3];
+ recv_msg->msg.data = recv_msg->msg_data;
+
+ recv_msg->msg.data_len = msg->rsp_size - 4;
+ memcpy(recv_msg->msg_data, msg->rsp + 4,
+ msg->rsp_size - 4);
+ if (deliver_response(intf, recv_msg))
+ ipmi_inc_stat(intf, unhandled_commands);
+ else
+ ipmi_inc_stat(intf, handled_commands);
+ } else {
+ /*
+ * We couldn't allocate memory for the message, so
+ * requeue it for handling later.
+ */
+ rv = 1;
}
return rv;
@@ -4050,7 +4066,7 @@ static int handle_ipmb_direct_rcv_rsp(struct ipmi_smi *intf,
struct ipmi_recv_msg *recv_msg;
struct ipmi_ipmb_direct_addr *daddr;
- recv_msg = msg->user_data;
+ recv_msg = msg->recv_msg;
if (recv_msg == NULL) {
dev_warn(intf->si_dev,
"IPMI direct message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n");
@@ -4152,7 +4168,7 @@ static int handle_lan_get_msg_cmd(struct ipmi_smi *intf,
unsigned char chan;
struct ipmi_user *user = NULL;
struct ipmi_lan_addr *lan_addr;
- struct ipmi_recv_msg *recv_msg;
+ struct ipmi_recv_msg *recv_msg = NULL;
if (msg->rsp_size < 12) {
/* Message not big enough, just ignore it. */
@@ -4173,9 +4189,8 @@ static int handle_lan_get_msg_cmd(struct ipmi_smi *intf,
rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
if (rcvr) {
user = rcvr->user;
- kref_get(&user->refcount);
- } else
- user = NULL;
+ recv_msg = ipmi_alloc_recv_msg(user);
+ }
rcu_read_unlock();
if (user == NULL) {
@@ -4206,49 +4221,44 @@ static int handle_lan_get_msg_cmd(struct ipmi_smi *intf,
* causes it to not be freed or queued.
*/
rv = -1;
- } else {
- recv_msg = ipmi_alloc_recv_msg();
- if (!recv_msg) {
- /*
- * We couldn't allocate memory for the
- * message, so requeue it for handling later.
- */
- rv = 1;
- kref_put(&user->refcount, free_ipmi_user);
- } else {
- /* Extract the source address from the data. */
- lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
- lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
- lan_addr->session_handle = msg->rsp[4];
- lan_addr->remote_SWID = msg->rsp[8];
- lan_addr->local_SWID = msg->rsp[5];
- lan_addr->lun = msg->rsp[9] & 3;
- lan_addr->channel = msg->rsp[3] & 0xf;
- lan_addr->privilege = msg->rsp[3] >> 4;
+ } else if (!IS_ERR(recv_msg)) {
+ /* Extract the source address from the data. */
+ lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
+ lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
+ lan_addr->session_handle = msg->rsp[4];
+ lan_addr->remote_SWID = msg->rsp[8];
+ lan_addr->local_SWID = msg->rsp[5];
+ lan_addr->lun = msg->rsp[9] & 3;
+ lan_addr->channel = msg->rsp[3] & 0xf;
+ lan_addr->privilege = msg->rsp[3] >> 4;
- /*
- * Extract the rest of the message information
- * from the IPMB header.
- */
- recv_msg->user = user;
- recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
- recv_msg->msgid = msg->rsp[9] >> 2;
- recv_msg->msg.netfn = msg->rsp[6] >> 2;
- recv_msg->msg.cmd = msg->rsp[10];
- recv_msg->msg.data = recv_msg->msg_data;
+ /*
+ * Extract the rest of the message information
+ * from the IPMB header.
+ */
+ recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
+ recv_msg->msgid = msg->rsp[9] >> 2;
+ recv_msg->msg.netfn = msg->rsp[6] >> 2;
+ recv_msg->msg.cmd = msg->rsp[10];
+ recv_msg->msg.data = recv_msg->msg_data;
- /*
- * We chop off 12, not 11 bytes because the checksum
- * at the end also needs to be removed.
- */
- recv_msg->msg.data_len = msg->rsp_size - 12;
- memcpy(recv_msg->msg_data, &msg->rsp[11],
- msg->rsp_size - 12);
- if (deliver_response(intf, recv_msg))
- ipmi_inc_stat(intf, unhandled_commands);
- else
- ipmi_inc_stat(intf, handled_commands);
- }
+ /*
+ * We chop off 12, not 11 bytes because the checksum
+ * at the end also needs to be removed.
+ */
+ recv_msg->msg.data_len = msg->rsp_size - 12;
+ memcpy(recv_msg->msg_data, &msg->rsp[11],
+ msg->rsp_size - 12);
+ if (deliver_response(intf, recv_msg))
+ ipmi_inc_stat(intf, unhandled_commands);
+ else
+ ipmi_inc_stat(intf, handled_commands);
+ } else {
+ /*
+ * We couldn't allocate memory for the message, so
+ * requeue it for handling later.
+ */
+ rv = 1;
}
return rv;
@@ -4270,7 +4280,7 @@ static int handle_oem_get_msg_cmd(struct ipmi_smi *intf,
unsigned char chan;
struct ipmi_user *user = NULL;
struct ipmi_system_interface_addr *smi_addr;
- struct ipmi_recv_msg *recv_msg;
+ struct ipmi_recv_msg *recv_msg = NULL;
/*
* We expect the OEM SW to perform error checking
@@ -4299,9 +4309,8 @@ static int handle_oem_get_msg_cmd(struct ipmi_smi *intf,
rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
if (rcvr) {
user = rcvr->user;
- kref_get(&user->refcount);
- } else
- user = NULL;
+ recv_msg = ipmi_alloc_recv_msg(user);
+ }
rcu_read_unlock();
if (user == NULL) {
@@ -4314,48 +4323,42 @@ static int handle_oem_get_msg_cmd(struct ipmi_smi *intf,
*/
rv = 0;
- } else {
- recv_msg = ipmi_alloc_recv_msg();
- if (!recv_msg) {
- /*
- * We couldn't allocate memory for the
- * message, so requeue it for handling
- * later.
- */
- rv = 1;
- kref_put(&user->refcount, free_ipmi_user);
- } else {
- /*
- * OEM Messages are expected to be delivered via
- * the system interface to SMS software. We might
- * need to visit this again depending on OEM
- * requirements
- */
- smi_addr = ((struct ipmi_system_interface_addr *)
- &recv_msg->addr);
- smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
- smi_addr->channel = IPMI_BMC_CHANNEL;
- smi_addr->lun = msg->rsp[0] & 3;
-
- recv_msg->user = user;
- recv_msg->user_msg_data = NULL;
- recv_msg->recv_type = IPMI_OEM_RECV_TYPE;
- recv_msg->msg.netfn = msg->rsp[0] >> 2;
- recv_msg->msg.cmd = msg->rsp[1];
- recv_msg->msg.data = recv_msg->msg_data;
+ } else if (!IS_ERR(recv_msg)) {
+ /*
+ * OEM Messages are expected to be delivered via
+ * the system interface to SMS software. We might
+ * need to visit this again depending on OEM
+ * requirements
+ */
+ smi_addr = ((struct ipmi_system_interface_addr *)
+ &recv_msg->addr);
+ smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ smi_addr->channel = IPMI_BMC_CHANNEL;
+ smi_addr->lun = msg->rsp[0] & 3;
+
+ recv_msg->user_msg_data = NULL;
+ recv_msg->recv_type = IPMI_OEM_RECV_TYPE;
+ recv_msg->msg.netfn = msg->rsp[0] >> 2;
+ recv_msg->msg.cmd = msg->rsp[1];
+ recv_msg->msg.data = recv_msg->msg_data;
- /*
- * The message starts at byte 4 which follows the
- * Channel Byte in the "GET MESSAGE" command
- */
- recv_msg->msg.data_len = msg->rsp_size - 4;
- memcpy(recv_msg->msg_data, &msg->rsp[4],
- msg->rsp_size - 4);
- if (deliver_response(intf, recv_msg))
- ipmi_inc_stat(intf, unhandled_commands);
- else
- ipmi_inc_stat(intf, handled_commands);
- }
+ /*
+ * The message starts at byte 4 which follows the
+ * Channel Byte in the "GET MESSAGE" command
+ */
+ recv_msg->msg.data_len = msg->rsp_size - 4;
+ memcpy(recv_msg->msg_data, &msg->rsp[4],
+ msg->rsp_size - 4);
+ if (deliver_response(intf, recv_msg))
+ ipmi_inc_stat(intf, unhandled_commands);
+ else
+ ipmi_inc_stat(intf, handled_commands);
+ } else {
+ /*
+ * We couldn't allocate memory for the message, so
+ * requeue it for handling later.
+ */
+ rv = 1;
}
return rv;
@@ -4413,8 +4416,8 @@ static int handle_read_event_rsp(struct ipmi_smi *intf,
if (!user->gets_events)
continue;
- recv_msg = ipmi_alloc_recv_msg();
- if (!recv_msg) {
+ recv_msg = ipmi_alloc_recv_msg(user);
+ if (IS_ERR(recv_msg)) {
mutex_unlock(&intf->users_mutex);
list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
link) {
@@ -4435,8 +4438,6 @@ static int handle_read_event_rsp(struct ipmi_smi *intf,
deliver_count++;
copy_event_into_recv_msg(recv_msg, msg);
- recv_msg->user = user;
- kref_get(&user->refcount);
list_add_tail(&recv_msg->link, &msgs);
}
mutex_unlock(&intf->users_mutex);
@@ -4452,8 +4453,8 @@ static int handle_read_event_rsp(struct ipmi_smi *intf,
* No one to receive the message, put it in queue if there's
* not already too many things in the queue.
*/
- recv_msg = ipmi_alloc_recv_msg();
- if (!recv_msg) {
+ recv_msg = ipmi_alloc_recv_msg(NULL);
+ if (IS_ERR(recv_msg)) {
/*
* We couldn't allocate memory for the
* message, so requeue it for handling
@@ -4488,7 +4489,7 @@ static int handle_bmc_rsp(struct ipmi_smi *intf,
struct ipmi_recv_msg *recv_msg;
struct ipmi_system_interface_addr *smi_addr;
- recv_msg = msg->user_data;
+ recv_msg = msg->recv_msg;
if (recv_msg == NULL) {
dev_warn(intf->si_dev,
"IPMI SMI message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n");
@@ -4529,9 +4530,10 @@ static int handle_one_recv_msg(struct ipmi_smi *intf,
if (msg->rsp_size < 2) {
/* Message is too small to be correct. */
- dev_warn(intf->si_dev,
- "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n",
- (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
+ dev_warn_ratelimited(intf->si_dev,
+ "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n",
+ (msg->data[0] >> 2) | 1,
+ msg->data[1], msg->rsp_size);
return_unspecified:
/* Generate an error response for the message. */
@@ -4561,14 +4563,14 @@ return_unspecified:
} else if ((msg->data_size >= 2)
&& (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
&& (msg->data[1] == IPMI_SEND_MSG_CMD)
- && (msg->user_data == NULL)) {
+ && (msg->recv_msg == NULL)) {
if (intf->in_shutdown || intf->run_to_completion)
goto out;
/*
* This is the local response to a command send, start
- * the timer for these. The user_data will not be
+ * the timer for these. The recv_msg will not be
* NULL if this is a response send, and we will let
* response sends just go through.
*/
@@ -4628,7 +4630,7 @@ return_unspecified:
requeue = handle_ipmb_direct_rcv_rsp(intf, msg);
} else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
&& (msg->rsp[1] == IPMI_SEND_MSG_CMD)
- && (msg->user_data != NULL)) {
+ && (msg->recv_msg != NULL)) {
/*
* It's a response to a response we sent. For this we
* deliver a send message response to the user.
@@ -4645,7 +4647,7 @@ return_unspecified:
cc = msg->rsp[2];
process_response_response:
- recv_msg = msg->user_data;
+ recv_msg = msg->recv_msg;
requeue = 0;
if (!recv_msg)
@@ -4801,6 +4803,7 @@ static void smi_work(struct work_struct *t)
int run_to_completion = READ_ONCE(intf->run_to_completion);
struct ipmi_smi_msg *newmsg = NULL;
struct ipmi_recv_msg *msg, *msg2;
+ int cc;
/*
* Start the next message if available.
@@ -4809,7 +4812,7 @@ static void smi_work(struct work_struct *t)
* because the lower layer is allowed to hold locks while calling
* message delivery.
*/
-
+restart:
if (!run_to_completion)
spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
if (intf->curr_msg == NULL && !intf->in_shutdown) {
@@ -4830,8 +4833,17 @@ static void smi_work(struct work_struct *t)
if (!run_to_completion)
spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
- if (newmsg)
- intf->handlers->sender(intf->send_info, newmsg);
+ if (newmsg) {
+ cc = intf->handlers->sender(intf->send_info, newmsg);
+ if (cc) {
+ if (newmsg->recv_msg)
+ deliver_err_response(intf,
+ newmsg->recv_msg, cc);
+ else
+ ipmi_free_smi_msg(newmsg);
+ goto restart;
+ }
+ }
handle_new_recv_msgs(intf);
@@ -4868,12 +4880,10 @@ static void smi_work(struct work_struct *t)
list_del(&msg->link);
- if (refcount_read(&user->destroyed) == 0) {
+ if (refcount_read(&user->destroyed) == 0)
ipmi_free_recv_msg(msg);
- } else {
- atomic_dec(&user->nr_msgs);
+ else
user->handler->ipmi_recv_hndl(msg, user->handler_data);
- }
}
mutex_unlock(&intf->user_msgs_mutex);
@@ -4951,8 +4961,7 @@ smi_from_recv_msg(struct ipmi_smi *intf, struct ipmi_recv_msg *recv_msg,
static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent,
struct list_head *timeouts,
unsigned long timeout_period,
- int slot, unsigned long *flags,
- bool *need_timer)
+ int slot, bool *need_timer)
{
struct ipmi_recv_msg *msg;
@@ -5004,7 +5013,7 @@ static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent,
return;
}
- spin_unlock_irqrestore(&intf->seq_lock, *flags);
+ mutex_unlock(&intf->seq_lock);
/*
* Send the new message. We send with a zero
@@ -5025,7 +5034,7 @@ static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent,
} else
ipmi_free_smi_msg(smi_msg);
- spin_lock_irqsave(&intf->seq_lock, *flags);
+ mutex_lock(&intf->seq_lock);
}
}
@@ -5052,7 +5061,7 @@ static bool ipmi_timeout_handler(struct ipmi_smi *intf,
* list.
*/
INIT_LIST_HEAD(&timeouts);
- spin_lock_irqsave(&intf->seq_lock, flags);
+ mutex_lock(&intf->seq_lock);
if (intf->ipmb_maintenance_mode_timeout) {
if (intf->ipmb_maintenance_mode_timeout <= timeout_period)
intf->ipmb_maintenance_mode_timeout = 0;
@@ -5062,8 +5071,8 @@ static bool ipmi_timeout_handler(struct ipmi_smi *intf,
for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
check_msg_timeout(intf, &intf->seq_table[i],
&timeouts, timeout_period, i,
- &flags, &need_timer);
- spin_unlock_irqrestore(&intf->seq_lock, flags);
+ &need_timer);
+ mutex_unlock(&intf->seq_lock);
list_for_each_entry_safe(msg, msg2, &timeouts, link)
deliver_err_response(intf, msg, IPMI_TIMEOUT_COMPLETION_CODE);
@@ -5083,7 +5092,9 @@ static bool ipmi_timeout_handler(struct ipmi_smi *intf,
-= timeout_period;
if (!intf->maintenance_mode
&& (intf->auto_maintenance_timeout <= 0)) {
- intf->maintenance_mode_enable = false;
+ intf->maintenance_mode_state =
+ IPMI_MAINTENANCE_MODE_STATE_OFF;
+ intf->auto_maintenance_timeout = 0;
maintenance_mode_update(intf);
}
}
@@ -5099,15 +5110,13 @@ static bool ipmi_timeout_handler(struct ipmi_smi *intf,
static void ipmi_request_event(struct ipmi_smi *intf)
{
/* No event requests when in maintenance mode. */
- if (intf->maintenance_mode_enable)
+ if (intf->maintenance_mode_state)
return;
if (!intf->in_shutdown)
intf->handlers->request_events(intf->send_info);
}
-static struct timer_list ipmi_timer;
-
static atomic_t stop_operation;
static void ipmi_timeout_work(struct work_struct *work)
@@ -5131,6 +5140,8 @@ static void ipmi_timeout_work(struct work_struct *work)
}
need_timer = true;
}
+ if (intf->maintenance_mode_state)
+ need_timer = true;
need_timer |= ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME);
}
@@ -5174,7 +5185,7 @@ struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC);
if (rv) {
rv->done = free_smi_msg;
- rv->user_data = NULL;
+ rv->recv_msg = NULL;
rv->type = IPMI_SMI_MSG_TYPE_NORMAL;
atomic_inc(&smi_msg_inuse_count);
}
@@ -5190,27 +5201,51 @@ static void free_recv_msg(struct ipmi_recv_msg *msg)
kfree(msg);
}
-static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
+static struct ipmi_recv_msg *ipmi_alloc_recv_msg(struct ipmi_user *user)
{
struct ipmi_recv_msg *rv;
+ if (user) {
+ if (atomic_add_return(1, &user->nr_msgs) > max_msgs_per_user) {
+ atomic_dec(&user->nr_msgs);
+ return ERR_PTR(-EBUSY);
+ }
+ }
+
rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
- if (rv) {
- rv->user = NULL;
- rv->done = free_recv_msg;
- atomic_inc(&recv_msg_inuse_count);
+ if (!rv) {
+ if (user)
+ atomic_dec(&user->nr_msgs);
+ return ERR_PTR(-ENOMEM);
}
+
+ rv->user = user;
+ rv->done = free_recv_msg;
+ if (user)
+ kref_get(&user->refcount);
+ atomic_inc(&recv_msg_inuse_count);
return rv;
}
void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
{
- if (msg->user && !oops_in_progress)
+ if (msg->user && !oops_in_progress) {
+ atomic_dec(&msg->user->nr_msgs);
kref_put(&msg->user->refcount, free_ipmi_user);
+ }
msg->done(msg);
}
EXPORT_SYMBOL(ipmi_free_recv_msg);
+static void ipmi_set_recv_msg_user(struct ipmi_recv_msg *msg,
+ struct ipmi_user *user)
+{
+ WARN_ON_ONCE(msg->user); /* User should not be set. */
+ msg->user = user;
+ atomic_inc(&user->nr_msgs);
+ kref_get(&user->refcount);
+}
+
static atomic_t panic_done_count = ATOMIC_INIT(0);
static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
diff --git a/drivers/char/ipmi/ipmi_powernv.c b/drivers/char/ipmi/ipmi_powernv.c
index 4a2efafcd1f8..52a1130defe5 100644
--- a/drivers/char/ipmi/ipmi_powernv.c
+++ b/drivers/char/ipmi/ipmi_powernv.c
@@ -51,7 +51,7 @@ static void send_error_reply(struct ipmi_smi_powernv *smi,
ipmi_smi_msg_received(smi->intf, msg);
}
-static void ipmi_powernv_send(void *send_info, struct ipmi_smi_msg *msg)
+static int ipmi_powernv_send(void *send_info, struct ipmi_smi_msg *msg)
{
struct ipmi_smi_powernv *smi = send_info;
struct opal_ipmi_msg *opal_msg;
@@ -93,18 +93,19 @@ static void ipmi_powernv_send(void *send_info, struct ipmi_smi_msg *msg)
smi->interface_id, opal_msg, size);
rc = opal_ipmi_send(smi->interface_id, opal_msg, size);
pr_devel("%s: -> %d\n", __func__, rc);
-
- if (!rc) {
- smi->cur_msg = msg;
- spin_unlock_irqrestore(&smi->msg_lock, flags);
- return;
+ if (rc) {
+ comp = IPMI_ERR_UNSPECIFIED;
+ goto err_unlock;
}
- comp = IPMI_ERR_UNSPECIFIED;
+ smi->cur_msg = msg;
+ spin_unlock_irqrestore(&smi->msg_lock, flags);
+ return IPMI_CC_NO_ERROR;
+
err_unlock:
spin_unlock_irqrestore(&smi->msg_lock, flags);
err:
- send_error_reply(smi, msg, comp);
+ return comp;
}
static int ipmi_powernv_recv(struct ipmi_smi_powernv *smi)
diff --git a/drivers/char/ipmi/ipmi_si.h b/drivers/char/ipmi/ipmi_si.h
index 508c3fd45877..687835b53da5 100644
--- a/drivers/char/ipmi/ipmi_si.h
+++ b/drivers/char/ipmi/ipmi_si.h
@@ -101,6 +101,13 @@ void ipmi_si_pci_shutdown(void);
static inline void ipmi_si_pci_init(void) { }
static inline void ipmi_si_pci_shutdown(void) { }
#endif
+#ifdef CONFIG_IPMI_LS2K
+void ipmi_si_ls2k_init(void);
+void ipmi_si_ls2k_shutdown(void);
+#else
+static inline void ipmi_si_ls2k_init(void) { }
+static inline void ipmi_si_ls2k_shutdown(void) { }
+#endif
#ifdef CONFIG_PARISC
void ipmi_si_parisc_init(void);
void ipmi_si_parisc_shutdown(void);
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 8b5524069c15..70e55f5ff85e 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -53,6 +53,7 @@
#define SI_TIMEOUT_JIFFIES (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
#define SI_SHORT_TIMEOUT_USEC 250 /* .25ms when the SM request a
short timeout */
+#define SI_TIMEOUT_HOSED (HZ) /* 1 second when in hosed state. */
enum si_intf_state {
SI_NORMAL,
@@ -61,7 +62,8 @@ enum si_intf_state {
SI_CLEARING_FLAGS,
SI_GETTING_MESSAGES,
SI_CHECKING_ENABLES,
- SI_SETTING_ENABLES
+ SI_SETTING_ENABLES,
+ SI_HOSED
/* FIXME - add watchdog stuff. */
};
@@ -313,7 +315,7 @@ static void return_hosed_msg(struct smi_info *smi_info, int cCode)
static enum si_sm_result start_next_msg(struct smi_info *smi_info)
{
- int rv;
+ int rv;
if (!smi_info->waiting_msg) {
smi_info->curr_msg = NULL;
@@ -390,6 +392,17 @@ static void start_clear_flags(struct smi_info *smi_info)
smi_info->si_state = SI_CLEARING_FLAGS;
}
+static void start_get_flags(struct smi_info *smi_info)
+{
+ unsigned char msg[2];
+
+ msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ msg[1] = IPMI_GET_MSG_FLAGS_CMD;
+
+ start_new_msg(smi_info, msg, 2);
+ smi_info->si_state = SI_GETTING_FLAGS;
+}
+
static void start_getting_msg_queue(struct smi_info *smi_info)
{
smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
@@ -742,6 +755,8 @@ static void handle_transaction_done(struct smi_info *smi_info)
}
break;
}
+ case SI_HOSED: /* Shouldn't happen. */
+ break;
}
}
@@ -756,6 +771,10 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
enum si_sm_result si_sm_result;
restart:
+ if (smi_info->si_state == SI_HOSED)
+ /* Just in case, hosed state is only left from the timeout. */
+ return SI_SM_HOSED;
+
/*
* There used to be a loop here that waited a little while
* (around 25us) before giving up. That turned out to be
@@ -779,18 +798,20 @@ restart:
/*
* Do the before return_hosed_msg, because that
- * releases the lock.
+ * releases the lock. We just disable operations for
+ * a while and retry in hosed state.
*/
- smi_info->si_state = SI_NORMAL;
+ smi_info->si_state = SI_HOSED;
if (smi_info->curr_msg != NULL) {
/*
* If we were handling a user message, format
* a response to send to the upper layer to
* tell it about the error.
*/
- return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED);
+ return_hosed_msg(smi_info, IPMI_BUS_ERR);
}
- goto restart;
+ smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_HOSED);
+ goto out;
}
/*
@@ -798,8 +819,6 @@ restart:
* this if there is not yet an upper layer to handle anything.
*/
if (si_sm_result == SI_SM_ATTN || smi_info->got_attn) {
- unsigned char msg[2];
-
if (smi_info->si_state != SI_NORMAL) {
/*
* We got an ATTN, but we are doing something else.
@@ -817,11 +836,7 @@ restart:
* interrupts work with the SMI, that's not really
* possible.
*/
- msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
- msg[1] = IPMI_GET_MSG_FLAGS_CMD;
-
- start_new_msg(smi_info, msg, 2);
- smi_info->si_state = SI_GETTING_FLAGS;
+ start_get_flags(smi_info);
goto restart;
}
}
@@ -894,27 +909,29 @@ static void flush_messages(void *send_info)
* mode. This means we are single-threaded, no need for locks.
*/
result = smi_event_handler(smi_info, 0);
- while (result != SI_SM_IDLE) {
+ while (result != SI_SM_IDLE && result != SI_SM_HOSED) {
udelay(SI_SHORT_TIMEOUT_USEC);
result = smi_event_handler(smi_info, SI_SHORT_TIMEOUT_USEC);
}
}
-static void sender(void *send_info,
- struct ipmi_smi_msg *msg)
+static int sender(void *send_info, struct ipmi_smi_msg *msg)
{
struct smi_info *smi_info = send_info;
unsigned long flags;
debug_timestamp(smi_info, "Enqueue");
+ if (smi_info->si_state == SI_HOSED)
+ return IPMI_BUS_ERR;
+
if (smi_info->run_to_completion) {
/*
* If we are running to completion, start it. Upper
* layer will call flush_messages to clear it out.
*/
smi_info->waiting_msg = msg;
- return;
+ return IPMI_CC_NO_ERROR;
}
spin_lock_irqsave(&smi_info->si_lock, flags);
@@ -929,6 +946,7 @@ static void sender(void *send_info,
smi_info->waiting_msg = msg;
check_start_timer_thread(smi_info);
spin_unlock_irqrestore(&smi_info->si_lock, flags);
+ return IPMI_CC_NO_ERROR;
}
static void set_run_to_completion(void *send_info, bool i_run_to_completion)
@@ -1087,6 +1105,10 @@ static void smi_timeout(struct timer_list *t)
spin_lock_irqsave(&(smi_info->si_lock), flags);
debug_timestamp(smi_info, "Timer");
+ if (smi_info->si_state == SI_HOSED)
+ /* Try something to see if the BMC is now operational. */
+ start_get_flags(smi_info);
+
jiffies_now = jiffies;
time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
* SI_USEC_PER_JIFFY);
@@ -1096,14 +1118,11 @@ static void smi_timeout(struct timer_list *t)
/* Running with interrupts, only do long timeouts. */
timeout = jiffies + SI_TIMEOUT_JIFFIES;
smi_inc_stat(smi_info, long_timeouts);
- goto do_mod_timer;
- }
-
- /*
- * If the state machine asks for a short delay, then shorten
- * the timer timeout.
- */
- if (smi_result == SI_SM_CALL_WITH_DELAY) {
+ } else if (smi_result == SI_SM_CALL_WITH_DELAY) {
+ /*
+ * If the state machine asks for a short delay, then shorten
+ * the timer timeout.
+ */
smi_inc_stat(smi_info, short_timeouts);
timeout = jiffies + 1;
} else {
@@ -1111,7 +1130,6 @@ static void smi_timeout(struct timer_list *t)
timeout = jiffies + SI_TIMEOUT_JIFFIES;
}
-do_mod_timer:
if (smi_result != SI_SM_IDLE)
smi_mod_timer(smi_info, timeout);
else
@@ -2120,6 +2138,8 @@ static int __init init_ipmi_si(void)
ipmi_si_pci_init();
+ ipmi_si_ls2k_init();
+
ipmi_si_parisc_init();
mutex_lock(&smi_infos_lock);
@@ -2331,6 +2351,8 @@ static void cleanup_ipmi_si(void)
ipmi_si_pci_shutdown();
+ ipmi_si_ls2k_shutdown();
+
ipmi_si_parisc_shutdown();
ipmi_si_platform_shutdown();
diff --git a/drivers/char/ipmi/ipmi_si_ls2k.c b/drivers/char/ipmi/ipmi_si_ls2k.c
new file mode 100644
index 000000000000..45442c257efd
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_si_ls2k.c
@@ -0,0 +1,189 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for Loongson-2K BMC IPMI interface
+ *
+ * Copyright (C) 2024-2025 Loongson Technology Corporation Limited.
+ *
+ * Authors:
+ * Chong Qiao <qiaochong@loongson.cn>
+ * Binbin Zhou <zhoubinbin@loongson.cn>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/types.h>
+
+#include "ipmi_si.h"
+
+#define LS2K_KCS_FIFO_IBFH 0x0
+#define LS2K_KCS_FIFO_IBFT 0x1
+#define LS2K_KCS_FIFO_OBFH 0x2
+#define LS2K_KCS_FIFO_OBFT 0x3
+
+/* KCS registers */
+#define LS2K_KCS_REG_STS 0x4
+#define LS2K_KCS_REG_DATA_OUT 0x5
+#define LS2K_KCS_REG_DATA_IN 0x6
+#define LS2K_KCS_REG_CMD 0x8
+
+#define LS2K_KCS_CMD_DATA 0xa
+#define LS2K_KCS_VERSION 0xb
+#define LS2K_KCS_WR_REQ 0xc
+#define LS2K_KCS_WR_ACK 0x10
+
+#define LS2K_KCS_STS_OBF BIT(0)
+#define LS2K_KCS_STS_IBF BIT(1)
+#define LS2K_KCS_STS_SMS_ATN BIT(2)
+#define LS2K_KCS_STS_CMD BIT(3)
+
+#define LS2K_KCS_DATA_MASK (LS2K_KCS_STS_OBF | LS2K_KCS_STS_IBF | LS2K_KCS_STS_CMD)
+
+static bool ls2k_registered;
+
+static unsigned char ls2k_mem_inb_v0(const struct si_sm_io *io, unsigned int offset)
+{
+ void __iomem *addr = io->addr;
+ int reg_offset;
+
+ if (offset & BIT(0)) {
+ reg_offset = LS2K_KCS_REG_STS;
+ } else {
+ writeb(readb(addr + LS2K_KCS_REG_STS) & ~LS2K_KCS_STS_OBF, addr + LS2K_KCS_REG_STS);
+ reg_offset = LS2K_KCS_REG_DATA_OUT;
+ }
+
+ return readb(addr + reg_offset);
+}
+
+static unsigned char ls2k_mem_inb_v1(const struct si_sm_io *io, unsigned int offset)
+{
+ void __iomem *addr = io->addr;
+ unsigned char inb = 0, cmd;
+ bool obf, ibf;
+
+ obf = readb(addr + LS2K_KCS_FIFO_OBFH) ^ readb(addr + LS2K_KCS_FIFO_OBFT);
+ ibf = readb(addr + LS2K_KCS_FIFO_IBFH) ^ readb(addr + LS2K_KCS_FIFO_IBFT);
+ cmd = readb(addr + LS2K_KCS_CMD_DATA);
+
+ if (offset & BIT(0)) {
+ inb = readb(addr + LS2K_KCS_REG_STS) & ~LS2K_KCS_DATA_MASK;
+ inb |= FIELD_PREP(LS2K_KCS_STS_OBF, obf)
+ | FIELD_PREP(LS2K_KCS_STS_IBF, ibf)
+ | FIELD_PREP(LS2K_KCS_STS_CMD, cmd);
+ } else {
+ inb = readb(addr + LS2K_KCS_REG_DATA_OUT);
+ writeb(readb(addr + LS2K_KCS_FIFO_OBFH), addr + LS2K_KCS_FIFO_OBFT);
+ }
+
+ return inb;
+}
+
+static void ls2k_mem_outb_v0(const struct si_sm_io *io, unsigned int offset,
+ unsigned char val)
+{
+ void __iomem *addr = io->addr;
+ unsigned char sts = readb(addr + LS2K_KCS_REG_STS);
+ int reg_offset;
+
+ if (sts & LS2K_KCS_STS_IBF)
+ return;
+
+ if (offset & BIT(0)) {
+ reg_offset = LS2K_KCS_REG_CMD;
+ sts |= LS2K_KCS_STS_CMD;
+ } else {
+ reg_offset = LS2K_KCS_REG_DATA_IN;
+ sts &= ~LS2K_KCS_STS_CMD;
+ }
+
+ writew(val, addr + reg_offset);
+ writeb(sts | LS2K_KCS_STS_IBF, addr + LS2K_KCS_REG_STS);
+ writel(readl(addr + LS2K_KCS_WR_REQ) + 1, addr + LS2K_KCS_WR_REQ);
+}
+
+static void ls2k_mem_outb_v1(const struct si_sm_io *io, unsigned int offset,
+ unsigned char val)
+{
+ void __iomem *addr = io->addr;
+ unsigned char ibfh, ibft;
+ int reg_offset;
+
+ ibfh = readb(addr + LS2K_KCS_FIFO_IBFH);
+ ibft = readb(addr + LS2K_KCS_FIFO_IBFT);
+
+ if (ibfh ^ ibft)
+ return;
+
+ reg_offset = (offset & BIT(0)) ? LS2K_KCS_REG_CMD : LS2K_KCS_REG_DATA_IN;
+ writew(val, addr + reg_offset);
+
+ writeb(offset & BIT(0), addr + LS2K_KCS_CMD_DATA);
+ writeb(!ibft, addr + LS2K_KCS_FIFO_IBFH);
+ writel(readl(addr + LS2K_KCS_WR_REQ) + 1, addr + LS2K_KCS_WR_REQ);
+}
+
+static void ls2k_mem_cleanup(struct si_sm_io *io)
+{
+ if (io->addr)
+ iounmap(io->addr);
+}
+
+static int ipmi_ls2k_mem_setup(struct si_sm_io *io)
+{
+ unsigned char version;
+
+ io->addr = ioremap(io->addr_data, io->regspacing);
+ if (!io->addr)
+ return -EIO;
+
+ version = readb(io->addr + LS2K_KCS_VERSION);
+
+ io->inputb = version ? ls2k_mem_inb_v1 : ls2k_mem_inb_v0;
+ io->outputb = version ? ls2k_mem_outb_v1 : ls2k_mem_outb_v0;
+ io->io_cleanup = ls2k_mem_cleanup;
+
+ return 0;
+}
+
+static int ipmi_ls2k_probe(struct platform_device *pdev)
+{
+ struct si_sm_io io;
+
+ memset(&io, 0, sizeof(io));
+
+ io.si_info = &ipmi_kcs_si_info;
+ io.io_setup = ipmi_ls2k_mem_setup;
+ io.addr_data = pdev->resource[0].start;
+ io.regspacing = resource_size(&pdev->resource[0]);
+ io.dev = &pdev->dev;
+
+ dev_dbg(&pdev->dev, "addr 0x%lx, spacing %d.\n", io.addr_data, io.regspacing);
+
+ return ipmi_si_add_smi(&io);
+}
+
+static void ipmi_ls2k_remove(struct platform_device *pdev)
+{
+ ipmi_si_remove_by_dev(&pdev->dev);
+}
+
+struct platform_driver ipmi_ls2k_platform_driver = {
+ .driver = {
+ .name = "ls2k-ipmi-si",
+ },
+ .probe = ipmi_ls2k_probe,
+ .remove = ipmi_ls2k_remove,
+};
+
+void ipmi_si_ls2k_init(void)
+{
+ platform_driver_register(&ipmi_ls2k_platform_driver);
+ ls2k_registered = true;
+}
+
+void ipmi_si_ls2k_shutdown(void)
+{
+ if (ls2k_registered)
+ platform_driver_unregister(&ipmi_ls2k_platform_driver);
+}
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 1bc42830444d..1b63f7d2fcda 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -1068,8 +1068,7 @@ static void start_next_msg(struct ssif_info *ssif_info, unsigned long *flags)
}
}
-static void sender(void *send_info,
- struct ipmi_smi_msg *msg)
+static int sender(void *send_info, struct ipmi_smi_msg *msg)
{
struct ssif_info *ssif_info = send_info;
unsigned long oflags, *flags;
@@ -1089,6 +1088,7 @@ static void sender(void *send_info,
msg->data[0], msg->data[1],
(long long)t.tv_sec, (long)t.tv_nsec / NSEC_PER_USEC);
}
+ return IPMI_CC_NO_ERROR;
}
static int get_smi_info(void *send_info, struct ipmi_smi_info *data)
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 48839958b0b1..34b815901b20 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -512,11 +512,18 @@ static int mmap_zero(struct file *file, struct vm_area_struct *vma)
return 0;
}
+#ifndef CONFIG_MMU
+static unsigned long get_unmapped_area_zero(struct file *file,
+ unsigned long addr, unsigned long len,
+ unsigned long pgoff, unsigned long flags)
+{
+ return -ENOSYS;
+}
+#else
static unsigned long get_unmapped_area_zero(struct file *file,
unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags)
{
-#ifdef CONFIG_MMU
if (flags & MAP_SHARED) {
/*
* mmap_zero() will call shmem_zero_setup() to create a file,
@@ -527,12 +534,18 @@ static unsigned long get_unmapped_area_zero(struct file *file,
return shmem_get_unmapped_area(NULL, addr, len, pgoff, flags);
}
- /* Otherwise flags & MAP_PRIVATE: with no shmem object beneath it */
- return mm_get_unmapped_area(current->mm, file, addr, len, pgoff, flags);
+ /*
+ * Otherwise flags & MAP_PRIVATE: with no shmem object beneath it,
+ * attempt to map aligned to huge page size if possible, otherwise we
+ * fall back to system page size mappings.
+ */
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ return thp_get_unmapped_area(file, addr, len, pgoff, flags);
#else
- return -ENOSYS;
+ return mm_get_unmapped_area(current->mm, file, addr, len, pgoff, flags);
#endif
}
+#endif /* CONFIG_MMU */
static ssize_t write_full(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index 558302a64dd9..726516fb0a3b 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -132,7 +132,8 @@ static int misc_open(struct inode *inode, struct file *file)
break;
}
- if (!new_fops) {
+ /* Only request module for fixed minor code */
+ if (!new_fops && minor < MISC_DYNAMIC_MINOR) {
mutex_unlock(&misc_mtx);
request_module("char-major-%d-%d", MISC_MAJOR, minor);
mutex_lock(&misc_mtx);
@@ -144,10 +145,11 @@ static int misc_open(struct inode *inode, struct file *file)
new_fops = fops_get(iter->fops);
break;
}
- if (!new_fops)
- goto fail;
}
+ if (!new_fops)
+ goto fail;
+
/*
* Place the miscdevice in the file's
* private_data so it can be used by the
@@ -210,6 +212,12 @@ int misc_register(struct miscdevice *misc)
int err = 0;
bool is_dynamic = (misc->minor == MISC_DYNAMIC_MINOR);
+ if (misc->minor > MISC_DYNAMIC_MINOR) {
+ pr_err("Invalid fixed minor %d for miscdevice '%s'\n",
+ misc->minor, misc->name);
+ return -EINVAL;
+ }
+
INIT_LIST_HEAD(&misc->list);
mutex_lock(&misc_mtx);
@@ -275,13 +283,12 @@ EXPORT_SYMBOL(misc_register);
void misc_deregister(struct miscdevice *misc)
{
- if (WARN_ON(list_empty(&misc->list)))
- return;
-
mutex_lock(&misc_mtx);
- list_del(&misc->list);
+ list_del_init(&misc->list);
device_destroy(&misc_class, MKDEV(MISC_MAJOR, misc->minor));
misc_minor_free(misc->minor);
+ if (misc->minor > MISC_DYNAMIC_MINOR)
+ misc->minor = MISC_DYNAMIC_MINOR;
mutex_unlock(&misc_mtx);
}
EXPORT_SYMBOL(misc_deregister);
diff --git a/drivers/misc/misc_minor_kunit.c b/drivers/char/misc_minor_kunit.c
index 30eceac5f1b6..6fc8b05169c5 100644
--- a/drivers/misc/misc_minor_kunit.c
+++ b/drivers/char/misc_minor_kunit.c
@@ -7,12 +7,6 @@
#include <linux/file.h>
#include <linux/init_syscalls.h>
-/* dynamic minor (2) */
-static struct miscdevice dev_dynamic_minor = {
- .minor = 2,
- .name = "dev_dynamic_minor",
-};
-
/* static minor (LCD_MINOR) */
static struct miscdevice dev_static_minor = {
.minor = LCD_MINOR,
@@ -25,16 +19,6 @@ static struct miscdevice dev_misc_dynamic_minor = {
.name = "dev_misc_dynamic_minor",
};
-static void kunit_dynamic_minor(struct kunit *test)
-{
- int ret;
-
- ret = misc_register(&dev_dynamic_minor);
- KUNIT_EXPECT_EQ(test, 0, ret);
- KUNIT_EXPECT_EQ(test, 2, dev_dynamic_minor.minor);
- misc_deregister(&dev_dynamic_minor);
-}
-
static void kunit_static_minor(struct kunit *test)
{
int ret;
@@ -157,13 +141,7 @@ static bool is_valid_dynamic_minor(int minor)
{
if (minor < 0)
return false;
- if (minor == MISC_DYNAMIC_MINOR)
- return false;
- if (minor >= 0 && minor <= 15)
- return false;
- if (minor >= 128 && minor < MISC_DYNAMIC_MINOR)
- return false;
- return true;
+ return minor > MISC_DYNAMIC_MINOR;
}
static int miscdev_test_open(struct inode *inode, struct file *file)
@@ -557,7 +535,7 @@ static void __init miscdev_test_conflict(struct kunit *test)
*/
miscstat.minor = miscdyn.minor;
ret = misc_register(&miscstat);
- KUNIT_EXPECT_EQ(test, ret, -EBUSY);
+ KUNIT_EXPECT_EQ(test, ret, -EINVAL);
if (ret == 0)
misc_deregister(&miscstat);
@@ -590,8 +568,9 @@ static void __init miscdev_test_conflict_reverse(struct kunit *test)
misc_deregister(&miscdyn);
ret = misc_register(&miscstat);
- KUNIT_EXPECT_EQ(test, ret, 0);
- KUNIT_EXPECT_EQ(test, miscstat.minor, miscdyn.minor);
+ KUNIT_EXPECT_EQ(test, ret, -EINVAL);
+ if (ret == 0)
+ misc_deregister(&miscstat);
/*
* Try to register a dynamic minor after registering a static minor
@@ -601,25 +580,81 @@ static void __init miscdev_test_conflict_reverse(struct kunit *test)
miscdyn.minor = MISC_DYNAMIC_MINOR;
ret = misc_register(&miscdyn);
KUNIT_EXPECT_EQ(test, ret, 0);
- KUNIT_EXPECT_NE(test, miscdyn.minor, miscstat.minor);
+ KUNIT_EXPECT_EQ(test, miscdyn.minor, miscstat.minor);
KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(miscdyn.minor));
if (ret == 0)
misc_deregister(&miscdyn);
+}
- miscdev_test_can_open(test, &miscstat);
+/* Take minor(> MISC_DYNAMIC_MINOR) as invalid when register miscdevice */
+static void miscdev_test_invalid_input(struct kunit *test)
+{
+ struct miscdevice misc_test = {
+ .minor = MISC_DYNAMIC_MINOR + 1,
+ .name = "misc_test",
+ .fops = &miscdev_test_fops,
+ };
+ int ret;
- misc_deregister(&miscstat);
+ ret = misc_register(&misc_test);
+ KUNIT_EXPECT_EQ(test, ret, -EINVAL);
+ if (ret == 0)
+ misc_deregister(&misc_test);
+}
+
+/*
+ * Verify if @miscdyn_a can still be registered successfully without
+ * reinitialization even if its minor ever owned was requested by
+ * another miscdevice such as @miscdyn_b.
+ */
+static void miscdev_test_dynamic_reentry(struct kunit *test)
+{
+ struct miscdevice miscdyn_a = {
+ .name = "miscdyn_a",
+ .minor = MISC_DYNAMIC_MINOR,
+ .fops = &miscdev_test_fops,
+ };
+ struct miscdevice miscdyn_b = {
+ .name = "miscdyn_b",
+ .minor = MISC_DYNAMIC_MINOR,
+ .fops = &miscdev_test_fops,
+ };
+ int ret, minor_a;
+
+ ret = misc_register(&miscdyn_a);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(miscdyn_a.minor));
+ minor_a = miscdyn_a.minor;
+ if (ret != 0)
+ return;
+ misc_deregister(&miscdyn_a);
+
+ ret = misc_register(&miscdyn_b);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_EXPECT_EQ(test, miscdyn_b.minor, minor_a);
+ if (ret != 0)
+ return;
+
+ ret = misc_register(&miscdyn_a);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(miscdyn_a.minor));
+ KUNIT_EXPECT_NE(test, miscdyn_a.minor, miscdyn_b.minor);
+ if (ret == 0)
+ misc_deregister(&miscdyn_a);
+
+ misc_deregister(&miscdyn_b);
}
static struct kunit_case test_cases[] = {
- KUNIT_CASE(kunit_dynamic_minor),
KUNIT_CASE(kunit_static_minor),
KUNIT_CASE(kunit_misc_dynamic_minor),
+ KUNIT_CASE(miscdev_test_invalid_input),
KUNIT_CASE_PARAM(miscdev_test_twice, miscdev_gen_params),
KUNIT_CASE_PARAM(miscdev_test_duplicate_minor, miscdev_gen_params),
KUNIT_CASE(miscdev_test_duplicate_name),
KUNIT_CASE(miscdev_test_duplicate_name_leak),
KUNIT_CASE_PARAM(miscdev_test_duplicate_error, miscdev_gen_params),
+ KUNIT_CASE(miscdev_test_dynamic_reentry),
{}
};
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index dddd702b2454..8a8f692b6088 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -29,10 +29,11 @@ if TCG_TPM
config TCG_TPM2_HMAC
bool "Use HMAC and encrypted transactions on the TPM bus"
- default X86_64
+ default n
select CRYPTO_ECDH
select CRYPTO_LIB_AESCFB
select CRYPTO_LIB_SHA256
+ select CRYPTO_LIB_UTILS
help
Setting this causes us to deploy a scheme which uses request
and response HMACs in addition to encryption for
@@ -189,6 +190,15 @@ config TCG_IBMVTPM
will be accessible from within Linux. To compile this driver
as a module, choose M here; the module will be called tpm_ibmvtpm.
+config TCG_LOONGSON
+ tristate "Loongson TPM Interface"
+ depends on MFD_LOONGSON_SE
+ help
+ If you want to make Loongson TPM support available, say Yes and
+ it will be accessible from within Linux. To compile this
+ driver as a module, choose M here; the module will be called
+ tpm_loongson.
+
config TCG_XEN
tristate "XEN TPM Interface"
depends on TCG_TPM && XEN
diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
index 9de1b3ea34a9..5b5cdc0d32e4 100644
--- a/drivers/char/tpm/Makefile
+++ b/drivers/char/tpm/Makefile
@@ -46,3 +46,4 @@ obj-$(CONFIG_TCG_ARM_CRB_FFA) += tpm_crb_ffa.o
obj-$(CONFIG_TCG_VTPM_PROXY) += tpm_vtpm_proxy.o
obj-$(CONFIG_TCG_FTPM_TEE) += tpm_ftpm_tee.o
obj-$(CONFIG_TCG_SVSM) += tpm_svsm.o
+obj-$(CONFIG_TCG_LOONGSON) += tpm_loongson.o
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index b71725827743..c9f173001d0e 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -52,7 +52,7 @@ MODULE_PARM_DESC(suspend_pcr,
unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal)
{
if (chip->flags & TPM_CHIP_FLAG_TPM2)
- return tpm2_calc_ordinal_duration(chip, ordinal);
+ return tpm2_calc_ordinal_duration(ordinal);
else
return tpm1_calc_ordinal_duration(chip, ordinal);
}
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 7bb87fa5f7a1..2726bd38e5ac 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -299,7 +299,7 @@ ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id,
ssize_t tpm2_get_pcr_allocation(struct tpm_chip *chip);
int tpm2_auto_startup(struct tpm_chip *chip);
void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type);
-unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal);
+unsigned long tpm2_calc_ordinal_duration(u32 ordinal);
int tpm2_probe(struct tpm_chip *chip);
int tpm2_get_cc_attrs_tbl(struct tpm_chip *chip);
int tpm2_find_cc(struct tpm_chip *chip, u32 cc);
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index 524d802ede26..7d77f6fbc152 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -28,120 +28,57 @@ static struct tpm2_hash tpm2_hash_map[] = {
int tpm2_get_timeouts(struct tpm_chip *chip)
{
- /* Fixed timeouts for TPM2 */
chip->timeout_a = msecs_to_jiffies(TPM2_TIMEOUT_A);
chip->timeout_b = msecs_to_jiffies(TPM2_TIMEOUT_B);
chip->timeout_c = msecs_to_jiffies(TPM2_TIMEOUT_C);
chip->timeout_d = msecs_to_jiffies(TPM2_TIMEOUT_D);
-
- /* PTP spec timeouts */
- chip->duration[TPM_SHORT] = msecs_to_jiffies(TPM2_DURATION_SHORT);
- chip->duration[TPM_MEDIUM] = msecs_to_jiffies(TPM2_DURATION_MEDIUM);
- chip->duration[TPM_LONG] = msecs_to_jiffies(TPM2_DURATION_LONG);
-
- /* Key creation commands long timeouts */
- chip->duration[TPM_LONG_LONG] =
- msecs_to_jiffies(TPM2_DURATION_LONG_LONG);
-
chip->flags |= TPM_CHIP_FLAG_HAVE_TIMEOUTS;
-
return 0;
}
-/**
- * tpm2_ordinal_duration_index() - returns an index to the chip duration table
- * @ordinal: TPM command ordinal.
- *
- * The function returns an index to the chip duration table
- * (enum tpm_duration), that describes the maximum amount of
- * time the chip could take to return the result for a particular ordinal.
- *
- * The values of the MEDIUM, and LONG durations are taken
- * from the PC Client Profile (PTP) specification (750, 2000 msec)
- *
- * LONG_LONG is for commands that generates keys which empirically takes
- * a longer time on some systems.
- *
- * Return:
- * * TPM_MEDIUM
- * * TPM_LONG
- * * TPM_LONG_LONG
- * * TPM_UNDEFINED
+/*
+ * Contains the maximum durations in milliseconds for TPM2 commands.
*/
-static u8 tpm2_ordinal_duration_index(u32 ordinal)
-{
- switch (ordinal) {
- /* Startup */
- case TPM2_CC_STARTUP: /* 144 */
- return TPM_MEDIUM;
-
- case TPM2_CC_SELF_TEST: /* 143 */
- return TPM_LONG;
-
- case TPM2_CC_GET_RANDOM: /* 17B */
- return TPM_LONG;
-
- case TPM2_CC_SEQUENCE_UPDATE: /* 15C */
- return TPM_MEDIUM;
- case TPM2_CC_SEQUENCE_COMPLETE: /* 13E */
- return TPM_MEDIUM;
- case TPM2_CC_EVENT_SEQUENCE_COMPLETE: /* 185 */
- return TPM_MEDIUM;
- case TPM2_CC_HASH_SEQUENCE_START: /* 186 */
- return TPM_MEDIUM;
-
- case TPM2_CC_VERIFY_SIGNATURE: /* 177 */
- return TPM_LONG_LONG;
-
- case TPM2_CC_PCR_EXTEND: /* 182 */
- return TPM_MEDIUM;
-
- case TPM2_CC_HIERARCHY_CONTROL: /* 121 */
- return TPM_LONG;
- case TPM2_CC_HIERARCHY_CHANGE_AUTH: /* 129 */
- return TPM_LONG;
-
- case TPM2_CC_GET_CAPABILITY: /* 17A */
- return TPM_MEDIUM;
-
- case TPM2_CC_NV_READ: /* 14E */
- return TPM_LONG;
-
- case TPM2_CC_CREATE_PRIMARY: /* 131 */
- return TPM_LONG_LONG;
- case TPM2_CC_CREATE: /* 153 */
- return TPM_LONG_LONG;
- case TPM2_CC_CREATE_LOADED: /* 191 */
- return TPM_LONG_LONG;
-
- default:
- return TPM_UNDEFINED;
- }
-}
+static const struct {
+ unsigned long ordinal;
+ unsigned long duration;
+} tpm2_ordinal_duration_map[] = {
+ {TPM2_CC_STARTUP, 750},
+ {TPM2_CC_SELF_TEST, 3000},
+ {TPM2_CC_GET_RANDOM, 2000},
+ {TPM2_CC_SEQUENCE_UPDATE, 750},
+ {TPM2_CC_SEQUENCE_COMPLETE, 750},
+ {TPM2_CC_EVENT_SEQUENCE_COMPLETE, 750},
+ {TPM2_CC_HASH_SEQUENCE_START, 750},
+ {TPM2_CC_VERIFY_SIGNATURE, 30000},
+ {TPM2_CC_PCR_EXTEND, 750},
+ {TPM2_CC_HIERARCHY_CONTROL, 2000},
+ {TPM2_CC_HIERARCHY_CHANGE_AUTH, 2000},
+ {TPM2_CC_GET_CAPABILITY, 750},
+ {TPM2_CC_NV_READ, 2000},
+ {TPM2_CC_CREATE_PRIMARY, 30000},
+ {TPM2_CC_CREATE, 30000},
+ {TPM2_CC_CREATE_LOADED, 30000},
+};
/**
- * tpm2_calc_ordinal_duration() - calculate the maximum command duration
- * @chip: TPM chip to use.
+ * tpm2_calc_ordinal_duration() - Calculate the maximum command duration
* @ordinal: TPM command ordinal.
*
- * The function returns the maximum amount of time the chip could take
- * to return the result for a particular ordinal in jiffies.
- *
- * Return: A maximal duration time for an ordinal in jiffies.
+ * Returns the maximum amount of time the chip is expected by kernel to
+ * take in jiffies.
*/
-unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal)
+unsigned long tpm2_calc_ordinal_duration(u32 ordinal)
{
- unsigned int index;
+ int i;
- index = tpm2_ordinal_duration_index(ordinal);
+ for (i = 0; i < ARRAY_SIZE(tpm2_ordinal_duration_map); i++)
+ if (ordinal == tpm2_ordinal_duration_map[i].ordinal)
+ return msecs_to_jiffies(tpm2_ordinal_duration_map[i].duration);
- if (index != TPM_UNDEFINED)
- return chip->duration[index];
- else
- return msecs_to_jiffies(TPM2_DURATION_DEFAULT);
+ return msecs_to_jiffies(TPM2_DURATION_DEFAULT);
}
-
struct tpm2_pcr_read_out {
__be32 update_cnt;
__be32 pcr_selects_cnt;
diff --git a/drivers/char/tpm/tpm2-sessions.c b/drivers/char/tpm/tpm2-sessions.c
index bdb119453dfb..6d03c224e6b2 100644
--- a/drivers/char/tpm/tpm2-sessions.c
+++ b/drivers/char/tpm/tpm2-sessions.c
@@ -69,8 +69,8 @@
#include <linux/unaligned.h>
#include <crypto/kpp.h>
#include <crypto/ecdh.h>
-#include <crypto/hash.h>
-#include <crypto/hmac.h>
+#include <crypto/sha2.h>
+#include <crypto/utils.h>
/* maximum number of names the TPM must remember for authorization */
#define AUTH_MAX_NAMES 3
@@ -385,51 +385,6 @@ static int tpm2_create_primary(struct tpm_chip *chip, u32 hierarchy,
u32 *handle, u8 *name);
/*
- * It turns out the crypto hmac(sha256) is hard for us to consume
- * because it assumes a fixed key and the TPM seems to change the key
- * on every operation, so we weld the hmac init and final functions in
- * here to give it the same usage characteristics as a regular hash
- */
-static void tpm2_hmac_init(struct sha256_ctx *sctx, u8 *key, u32 key_len)
-{
- u8 pad[SHA256_BLOCK_SIZE];
- int i;
-
- sha256_init(sctx);
- for (i = 0; i < sizeof(pad); i++) {
- if (i < key_len)
- pad[i] = key[i];
- else
- pad[i] = 0;
- pad[i] ^= HMAC_IPAD_VALUE;
- }
- sha256_update(sctx, pad, sizeof(pad));
-}
-
-static void tpm2_hmac_final(struct sha256_ctx *sctx, u8 *key, u32 key_len,
- u8 *out)
-{
- u8 pad[SHA256_BLOCK_SIZE];
- int i;
-
- for (i = 0; i < sizeof(pad); i++) {
- if (i < key_len)
- pad[i] = key[i];
- else
- pad[i] = 0;
- pad[i] ^= HMAC_OPAD_VALUE;
- }
-
- /* collect the final hash; use out as temporary storage */
- sha256_final(sctx, out);
-
- sha256_init(sctx);
- sha256_update(sctx, pad, sizeof(pad));
- sha256_update(sctx, out, SHA256_DIGEST_SIZE);
- sha256_final(sctx, out);
-}
-
-/*
* assume hash sha256 and nonces u, v of size SHA256_DIGEST_SIZE but
* otherwise standard tpm2_KDFa. Note output is in bytes not bits.
*/
@@ -440,16 +395,16 @@ static void tpm2_KDFa(u8 *key, u32 key_len, const char *label, u8 *u,
const __be32 bits = cpu_to_be32(bytes * 8);
while (bytes > 0) {
- struct sha256_ctx sctx;
+ struct hmac_sha256_ctx hctx;
__be32 c = cpu_to_be32(counter);
- tpm2_hmac_init(&sctx, key, key_len);
- sha256_update(&sctx, (u8 *)&c, sizeof(c));
- sha256_update(&sctx, label, strlen(label)+1);
- sha256_update(&sctx, u, SHA256_DIGEST_SIZE);
- sha256_update(&sctx, v, SHA256_DIGEST_SIZE);
- sha256_update(&sctx, (u8 *)&bits, sizeof(bits));
- tpm2_hmac_final(&sctx, key, key_len, out);
+ hmac_sha256_init_usingrawkey(&hctx, key, key_len);
+ hmac_sha256_update(&hctx, (u8 *)&c, sizeof(c));
+ hmac_sha256_update(&hctx, label, strlen(label) + 1);
+ hmac_sha256_update(&hctx, u, SHA256_DIGEST_SIZE);
+ hmac_sha256_update(&hctx, v, SHA256_DIGEST_SIZE);
+ hmac_sha256_update(&hctx, (u8 *)&bits, sizeof(bits));
+ hmac_sha256_final(&hctx, out);
bytes -= SHA256_DIGEST_SIZE;
counter++;
@@ -593,6 +548,7 @@ void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf)
u32 attrs;
u8 cphash[SHA256_DIGEST_SIZE];
struct sha256_ctx sctx;
+ struct hmac_sha256_ctx hctx;
if (!auth)
return;
@@ -704,14 +660,14 @@ void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf)
sha256_final(&sctx, cphash);
/* now calculate the hmac */
- tpm2_hmac_init(&sctx, auth->session_key, sizeof(auth->session_key)
- + auth->passphrase_len);
- sha256_update(&sctx, cphash, sizeof(cphash));
- sha256_update(&sctx, auth->our_nonce, sizeof(auth->our_nonce));
- sha256_update(&sctx, auth->tpm_nonce, sizeof(auth->tpm_nonce));
- sha256_update(&sctx, &auth->attrs, 1);
- tpm2_hmac_final(&sctx, auth->session_key, sizeof(auth->session_key)
- + auth->passphrase_len, hmac);
+ hmac_sha256_init_usingrawkey(&hctx, auth->session_key,
+ sizeof(auth->session_key) +
+ auth->passphrase_len);
+ hmac_sha256_update(&hctx, cphash, sizeof(cphash));
+ hmac_sha256_update(&hctx, auth->our_nonce, sizeof(auth->our_nonce));
+ hmac_sha256_update(&hctx, auth->tpm_nonce, sizeof(auth->tpm_nonce));
+ hmac_sha256_update(&hctx, &auth->attrs, 1);
+ hmac_sha256_final(&hctx, hmac);
}
EXPORT_SYMBOL(tpm_buf_fill_hmac_session);
@@ -751,6 +707,7 @@ int tpm_buf_check_hmac_response(struct tpm_chip *chip, struct tpm_buf *buf,
u8 rphash[SHA256_DIGEST_SIZE];
u32 attrs, cc;
struct sha256_ctx sctx;
+ struct hmac_sha256_ctx hctx;
u16 tag = be16_to_cpu(head->tag);
int parm_len, len, i, handles;
@@ -820,21 +777,20 @@ int tpm_buf_check_hmac_response(struct tpm_chip *chip, struct tpm_buf *buf,
sha256_final(&sctx, rphash);
/* now calculate the hmac */
- tpm2_hmac_init(&sctx, auth->session_key, sizeof(auth->session_key)
- + auth->passphrase_len);
- sha256_update(&sctx, rphash, sizeof(rphash));
- sha256_update(&sctx, auth->tpm_nonce, sizeof(auth->tpm_nonce));
- sha256_update(&sctx, auth->our_nonce, sizeof(auth->our_nonce));
- sha256_update(&sctx, &auth->attrs, 1);
+ hmac_sha256_init_usingrawkey(&hctx, auth->session_key,
+ sizeof(auth->session_key) +
+ auth->passphrase_len);
+ hmac_sha256_update(&hctx, rphash, sizeof(rphash));
+ hmac_sha256_update(&hctx, auth->tpm_nonce, sizeof(auth->tpm_nonce));
+ hmac_sha256_update(&hctx, auth->our_nonce, sizeof(auth->our_nonce));
+ hmac_sha256_update(&hctx, &auth->attrs, 1);
/* we're done with the rphash, so put our idea of the hmac there */
- tpm2_hmac_final(&sctx, auth->session_key, sizeof(auth->session_key)
- + auth->passphrase_len, rphash);
- if (memcmp(rphash, &buf->data[offset_s], SHA256_DIGEST_SIZE) == 0) {
- rc = 0;
- } else {
+ hmac_sha256_final(&hctx, rphash);
+ if (crypto_memneq(rphash, &buf->data[offset_s], SHA256_DIGEST_SIZE)) {
dev_err(&chip->dev, "TPM: HMAC check failed\n");
goto out;
}
+ rc = 0;
/* now do response decryption */
if (auth->attrs & TPM2_SA_ENCRYPT) {
diff --git a/drivers/char/tpm/tpm_loongson.c b/drivers/char/tpm/tpm_loongson.c
new file mode 100644
index 000000000000..9e50250763d1
--- /dev/null
+++ b/drivers/char/tpm/tpm_loongson.c
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Loongson Technology Corporation Limited. */
+
+#include <linux/device.h>
+#include <linux/mfd/loongson-se.h>
+#include <linux/platform_device.h>
+#include <linux/wait.h>
+
+#include "tpm.h"
+
+struct tpm_loongson_cmd {
+ u32 cmd_id;
+ u32 data_off;
+ u32 data_len;
+ u32 pad[5];
+};
+
+static int tpm_loongson_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ struct loongson_se_engine *tpm_engine = dev_get_drvdata(&chip->dev);
+ struct tpm_loongson_cmd *cmd_ret = tpm_engine->command_ret;
+
+ if (cmd_ret->data_len > count)
+ return -EIO;
+
+ memcpy(buf, tpm_engine->data_buffer, cmd_ret->data_len);
+
+ return cmd_ret->data_len;
+}
+
+static int tpm_loongson_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz, size_t count)
+{
+ struct loongson_se_engine *tpm_engine = dev_get_drvdata(&chip->dev);
+ struct tpm_loongson_cmd *cmd = tpm_engine->command;
+
+ if (count > tpm_engine->buffer_size)
+ return -E2BIG;
+
+ cmd->data_len = count;
+ memcpy(tpm_engine->data_buffer, buf, count);
+
+ return loongson_se_send_engine_cmd(tpm_engine);
+}
+
+static const struct tpm_class_ops tpm_loongson_ops = {
+ .flags = TPM_OPS_AUTO_STARTUP,
+ .recv = tpm_loongson_recv,
+ .send = tpm_loongson_send,
+};
+
+static int tpm_loongson_probe(struct platform_device *pdev)
+{
+ struct loongson_se_engine *tpm_engine;
+ struct device *dev = &pdev->dev;
+ struct tpm_loongson_cmd *cmd;
+ struct tpm_chip *chip;
+
+ tpm_engine = loongson_se_init_engine(dev->parent, SE_ENGINE_TPM);
+ if (!tpm_engine)
+ return -ENODEV;
+ cmd = tpm_engine->command;
+ cmd->cmd_id = SE_CMD_TPM;
+ cmd->data_off = tpm_engine->buffer_off;
+
+ chip = tpmm_chip_alloc(dev, &tpm_loongson_ops);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ chip->flags = TPM_CHIP_FLAG_TPM2 | TPM_CHIP_FLAG_IRQ;
+ dev_set_drvdata(&chip->dev, tpm_engine);
+
+ return tpm_chip_register(chip);
+}
+
+static struct platform_driver tpm_loongson = {
+ .probe = tpm_loongson_probe,
+ .driver = {
+ .name = "tpm_loongson",
+ },
+};
+module_platform_driver(tpm_loongson);
+
+MODULE_ALIAS("platform:tpm_loongson");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Loongson TPM driver");
diff --git a/drivers/char/tpm/tpm_ppi.c b/drivers/char/tpm/tpm_ppi.c
index d53fce1c9d6f..c9793a3d986d 100644
--- a/drivers/char/tpm/tpm_ppi.c
+++ b/drivers/char/tpm/tpm_ppi.c
@@ -33,6 +33,20 @@ static const guid_t tpm_ppi_guid =
GUID_INIT(0x3DDDFAA6, 0x361B, 0x4EB4,
0xA4, 0x24, 0x8D, 0x10, 0x08, 0x9D, 0x16, 0x53);
+static const char * const tpm_ppi_info[] = {
+ "Not implemented",
+ "BIOS only",
+ "Blocked for OS by system firmware",
+ "User required",
+ "User not required",
+};
+
+/* A spinlock to protect access to the cache from concurrent reads */
+static DEFINE_MUTEX(tpm_ppi_lock);
+
+static u32 ppi_operations_cache[PPI_VS_REQ_END + 1];
+static bool ppi_cache_populated;
+
static bool tpm_ppi_req_has_parameter(u64 req)
{
return req == 23;
@@ -277,8 +291,7 @@ cleanup:
return status;
}
-static ssize_t show_ppi_operations(acpi_handle dev_handle, char *buf, u32 start,
- u32 end)
+static ssize_t cache_ppi_operations(acpi_handle dev_handle, char *buf)
{
int i;
u32 ret;
@@ -286,34 +299,22 @@ static ssize_t show_ppi_operations(acpi_handle dev_handle, char *buf, u32 start,
union acpi_object *obj, tmp;
union acpi_object argv = ACPI_INIT_DSM_ARGV4(1, &tmp);
- static char *info[] = {
- "Not implemented",
- "BIOS only",
- "Blocked for OS by BIOS",
- "User required",
- "User not required",
- };
-
if (!acpi_check_dsm(dev_handle, &tpm_ppi_guid, TPM_PPI_REVISION_ID_1,
1 << TPM_PPI_FN_GETOPR))
return -EPERM;
tmp.integer.type = ACPI_TYPE_INTEGER;
- for (i = start; i <= end; i++) {
+ for (i = 0; i <= PPI_VS_REQ_END; i++) {
tmp.integer.value = i;
obj = tpm_eval_dsm(dev_handle, TPM_PPI_FN_GETOPR,
ACPI_TYPE_INTEGER, &argv,
TPM_PPI_REVISION_ID_1);
- if (!obj) {
+ if (!obj)
return -ENOMEM;
- } else {
- ret = obj->integer.value;
- ACPI_FREE(obj);
- }
- if (ret > 0 && ret < ARRAY_SIZE(info))
- len += sysfs_emit_at(buf, len, "%d %d: %s\n",
- i, ret, info[ret]);
+ ret = obj->integer.value;
+ ppi_operations_cache[i] = ret;
+ ACPI_FREE(obj);
}
return len;
@@ -324,9 +325,30 @@ static ssize_t tpm_show_ppi_tcg_operations(struct device *dev,
char *buf)
{
struct tpm_chip *chip = to_tpm_chip(dev);
+ ssize_t len = 0;
+ u32 ret;
+ int i;
+
+ mutex_lock(&tpm_ppi_lock);
+ if (!ppi_cache_populated) {
+ len = cache_ppi_operations(chip->acpi_dev_handle, buf);
+ if (len < 0) {
+ mutex_unlock(&tpm_ppi_lock);
+ return len;
+ }
- return show_ppi_operations(chip->acpi_dev_handle, buf, 0,
- PPI_TPM_REQ_MAX);
+ ppi_cache_populated = true;
+ }
+
+ for (i = 0; i <= PPI_TPM_REQ_MAX; i++) {
+ ret = ppi_operations_cache[i];
+ if (ret >= 0 && ret < ARRAY_SIZE(tpm_ppi_info))
+ len += sysfs_emit_at(buf, len, "%d %d: %s\n",
+ i, ret, tpm_ppi_info[ret]);
+ }
+ mutex_unlock(&tpm_ppi_lock);
+
+ return len;
}
static ssize_t tpm_show_ppi_vs_operations(struct device *dev,
@@ -334,9 +356,30 @@ static ssize_t tpm_show_ppi_vs_operations(struct device *dev,
char *buf)
{
struct tpm_chip *chip = to_tpm_chip(dev);
+ ssize_t len = 0;
+ u32 ret;
+ int i;
- return show_ppi_operations(chip->acpi_dev_handle, buf, PPI_VS_REQ_START,
- PPI_VS_REQ_END);
+ mutex_lock(&tpm_ppi_lock);
+ if (!ppi_cache_populated) {
+ len = cache_ppi_operations(chip->acpi_dev_handle, buf);
+ if (len < 0) {
+ mutex_unlock(&tpm_ppi_lock);
+ return len;
+ }
+
+ ppi_cache_populated = true;
+ }
+
+ for (i = PPI_VS_REQ_START; i <= PPI_VS_REQ_END; i++) {
+ ret = ppi_operations_cache[i];
+ if (ret >= 0 && ret < ARRAY_SIZE(tpm_ppi_info))
+ len += sysfs_emit_at(buf, len, "%d %d: %s\n",
+ i, ret, tpm_ppi_info[ret]);
+ }
+ mutex_unlock(&tpm_ppi_lock);
+
+ return len;
}
static DEVICE_ATTR(version, S_IRUGO, tpm_show_ppi_version, NULL);
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index 4b12c4b9da8b..8954a8660ffc 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -978,8 +978,8 @@ restore_irqs:
* will call disable_irq which undoes all of the above.
*/
if (!(chip->flags & TPM_CHIP_FLAG_IRQ)) {
- tpm_tis_write8(priv, original_int_vec,
- TPM_INT_VECTOR(priv->locality));
+ tpm_tis_write8(priv, TPM_INT_VECTOR(priv->locality),
+ original_int_vec);
rc = -1;
}
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 4d56475f94fc..3a1611008e48 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -364,6 +364,7 @@ config COMMON_CLK_LOCHNAGAR
config COMMON_CLK_NPCM8XX
tristate "Clock driver for the NPCM8XX SoC Family"
depends on ARCH_NPCM || COMPILE_TEST
+ select AUXILIARY_BUS
help
This driver supports the clocks on the Nuvoton BMC NPCM8XX SoC Family,
all the clocks are initialized by the bootloader, so this driver
@@ -501,6 +502,15 @@ config COMMON_CLK_SP7021
Not all features of the PLL are currently supported
by the driver.
+config COMMON_CLK_RPMI
+ tristate "Clock driver based on RISC-V RPMI"
+ depends on RISCV || COMPILE_TEST
+ depends on MAILBOX
+ default RISCV
+ help
+ Support for clocks based on the clock service group defined by
+ the RISC-V platform management interface (RPMI) specification.
+
source "drivers/clk/actions/Kconfig"
source "drivers/clk/analogbits/Kconfig"
source "drivers/clk/baikal-t1/Kconfig"
@@ -511,6 +521,7 @@ source "drivers/clk/imx/Kconfig"
source "drivers/clk/ingenic/Kconfig"
source "drivers/clk/keystone/Kconfig"
source "drivers/clk/mediatek/Kconfig"
+source "drivers/clk/mmp/Kconfig"
source "drivers/clk/meson/Kconfig"
source "drivers/clk/mstar/Kconfig"
source "drivers/clk/microchip/Kconfig"
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index 18ed29cfdc11..b74a1767ca27 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -86,6 +86,7 @@ obj-$(CONFIG_COMMON_CLK_PWM) += clk-pwm.o
obj-$(CONFIG_CLK_QORIQ) += clk-qoriq.o
obj-$(CONFIG_COMMON_CLK_RK808) += clk-rk808.o
obj-$(CONFIG_COMMON_CLK_RP1) += clk-rp1.o
+obj-$(CONFIG_COMMON_CLK_RPMI) += clk-rpmi.o
obj-$(CONFIG_COMMON_CLK_HI655X) += clk-hi655x.o
obj-$(CONFIG_COMMON_CLK_S2MPS11) += clk-s2mps11.o
obj-$(CONFIG_COMMON_CLK_SCMI) += clk-scmi.o
diff --git a/drivers/clk/actions/owl-common.c b/drivers/clk/actions/owl-common.c
index c62024b7c737..b3dded204dc5 100644
--- a/drivers/clk/actions/owl-common.c
+++ b/drivers/clk/actions/owl-common.c
@@ -18,7 +18,6 @@ static const struct regmap_config owl_regmap_config = {
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x00cc,
- .fast_io = true,
};
static void owl_clk_set_regmap(const struct owl_clk_desc *desc,
diff --git a/drivers/clk/actions/owl-composite.c b/drivers/clk/actions/owl-composite.c
index 48f177f6ce9c..00b74f8bc437 100644
--- a/drivers/clk/actions/owl-composite.c
+++ b/drivers/clk/actions/owl-composite.c
@@ -122,13 +122,13 @@ static int owl_comp_fact_set_rate(struct clk_hw *hw, unsigned long rate,
rate, parent_rate);
}
-static long owl_comp_fix_fact_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int owl_comp_fix_fact_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct owl_composite *comp = hw_to_owl_comp(hw);
struct clk_fixed_factor *fix_fact_hw = &comp->rate.fix_fact_hw;
- return comp->fix_fact_ops->round_rate(&fix_fact_hw->hw, rate, parent_rate);
+ return comp->fix_fact_ops->determine_rate(&fix_fact_hw->hw, req);
}
static unsigned long owl_comp_fix_fact_recalc_rate(struct clk_hw *hw,
@@ -193,7 +193,7 @@ const struct clk_ops owl_comp_fix_fact_ops = {
.is_enabled = owl_comp_is_enabled,
/* fix_fact_ops */
- .round_rate = owl_comp_fix_fact_round_rate,
+ .determine_rate = owl_comp_fix_fact_determine_rate,
.recalc_rate = owl_comp_fix_fact_recalc_rate,
.set_rate = owl_comp_fix_fact_set_rate,
};
diff --git a/drivers/clk/actions/owl-divider.c b/drivers/clk/actions/owl-divider.c
index cddac00fe324..118f1393c678 100644
--- a/drivers/clk/actions/owl-divider.c
+++ b/drivers/clk/actions/owl-divider.c
@@ -23,13 +23,16 @@ long owl_divider_helper_round_rate(struct owl_clk_common *common,
div_hw->div_flags);
}
-static long owl_divider_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int owl_divider_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct owl_divider *div = hw_to_owl_divider(hw);
- return owl_divider_helper_round_rate(&div->common, &div->div_hw,
- rate, parent_rate);
+ req->rate = owl_divider_helper_round_rate(&div->common, &div->div_hw,
+ req->rate,
+ &req->best_parent_rate);
+
+ return 0;
}
unsigned long owl_divider_helper_recalc_rate(struct owl_clk_common *common,
@@ -89,6 +92,6 @@ static int owl_divider_set_rate(struct clk_hw *hw, unsigned long rate,
const struct clk_ops owl_divider_ops = {
.recalc_rate = owl_divider_recalc_rate,
- .round_rate = owl_divider_round_rate,
+ .determine_rate = owl_divider_determine_rate,
.set_rate = owl_divider_set_rate,
};
diff --git a/drivers/clk/actions/owl-factor.c b/drivers/clk/actions/owl-factor.c
index 64f316cf7cfc..12f41f6bacd6 100644
--- a/drivers/clk/actions/owl-factor.c
+++ b/drivers/clk/actions/owl-factor.c
@@ -130,14 +130,16 @@ long owl_factor_helper_round_rate(struct owl_clk_common *common,
return *parent_rate * mul / div;
}
-static long owl_factor_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int owl_factor_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct owl_factor *factor = hw_to_owl_factor(hw);
struct owl_factor_hw *factor_hw = &factor->factor_hw;
- return owl_factor_helper_round_rate(&factor->common, factor_hw,
- rate, parent_rate);
+ req->rate = owl_factor_helper_round_rate(&factor->common, factor_hw,
+ req->rate, &req->best_parent_rate);
+
+ return 0;
}
unsigned long owl_factor_helper_recalc_rate(struct owl_clk_common *common,
@@ -214,7 +216,7 @@ static int owl_factor_set_rate(struct clk_hw *hw, unsigned long rate,
}
const struct clk_ops owl_factor_ops = {
- .round_rate = owl_factor_round_rate,
+ .determine_rate = owl_factor_determine_rate,
.recalc_rate = owl_factor_recalc_rate,
.set_rate = owl_factor_set_rate,
};
diff --git a/drivers/clk/actions/owl-pll.c b/drivers/clk/actions/owl-pll.c
index 155f313986b4..869690b79cc1 100644
--- a/drivers/clk/actions/owl-pll.c
+++ b/drivers/clk/actions/owl-pll.c
@@ -56,8 +56,8 @@ static const struct clk_pll_table *_get_pll_table(
return table;
}
-static long owl_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int owl_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct owl_pll *pll = hw_to_owl_pll(hw);
struct owl_pll_hw *pll_hw = &pll->pll_hw;
@@ -65,17 +65,24 @@ static long owl_pll_round_rate(struct clk_hw *hw, unsigned long rate,
u32 mul;
if (pll_hw->table) {
- clkt = _get_pll_table(pll_hw->table, rate);
- return clkt->rate;
+ clkt = _get_pll_table(pll_hw->table, req->rate);
+ req->rate = clkt->rate;
+
+ return 0;
}
/* fixed frequency */
- if (pll_hw->width == 0)
- return pll_hw->bfreq;
+ if (pll_hw->width == 0) {
+ req->rate = pll_hw->bfreq;
- mul = owl_pll_calculate_mul(pll_hw, rate);
+ return 0;
+ }
+
+ mul = owl_pll_calculate_mul(pll_hw, req->rate);
- return pll_hw->bfreq * mul;
+ req->rate = pll_hw->bfreq * mul;
+
+ return 0;
}
static unsigned long owl_pll_recalc_rate(struct clk_hw *hw,
@@ -188,7 +195,7 @@ const struct clk_ops owl_pll_ops = {
.enable = owl_pll_enable,
.disable = owl_pll_disable,
.is_enabled = owl_pll_is_enabled,
- .round_rate = owl_pll_round_rate,
+ .determine_rate = owl_pll_determine_rate,
.recalc_rate = owl_pll_recalc_rate,
.set_rate = owl_pll_set_rate,
};
diff --git a/drivers/clk/at91/clk-audio-pll.c b/drivers/clk/at91/clk-audio-pll.c
index a92da64c12e1..bf9b635ac9d6 100644
--- a/drivers/clk/at91/clk-audio-pll.c
+++ b/drivers/clk/at91/clk-audio-pll.c
@@ -270,8 +270,8 @@ static int clk_audio_pll_frac_determine_rate(struct clk_hw *hw,
return 0;
}
-static long clk_audio_pll_pad_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_audio_pll_pad_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_hw *pclk = clk_hw_get_parent(hw);
long best_rate = -EINVAL;
@@ -283,7 +283,7 @@ static long clk_audio_pll_pad_round_rate(struct clk_hw *hw, unsigned long rate,
int best_diff = -1;
pr_debug("A PLL/PAD: %s, rate = %lu (parent_rate = %lu)\n", __func__,
- rate, *parent_rate);
+ req->rate, req->best_parent_rate);
/*
* Rate divisor is actually made of two different divisors, multiplied
@@ -304,12 +304,12 @@ static long clk_audio_pll_pad_round_rate(struct clk_hw *hw, unsigned long rate,
continue;
best_parent_rate = clk_hw_round_rate(pclk,
- rate * tmp_qd * div);
+ req->rate * tmp_qd * div);
tmp_rate = best_parent_rate / (div * tmp_qd);
- tmp_diff = abs(rate - tmp_rate);
+ tmp_diff = abs(req->rate - tmp_rate);
if (best_diff < 0 || best_diff > tmp_diff) {
- *parent_rate = best_parent_rate;
+ req->best_parent_rate = best_parent_rate;
best_rate = tmp_rate;
best_diff = tmp_diff;
}
@@ -318,11 +318,13 @@ static long clk_audio_pll_pad_round_rate(struct clk_hw *hw, unsigned long rate,
pr_debug("A PLL/PAD: %s, best_rate = %ld, best_parent_rate = %lu\n",
__func__, best_rate, best_parent_rate);
- return best_rate;
+ req->rate = best_rate;
+
+ return 0;
}
-static long clk_audio_pll_pmc_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_audio_pll_pmc_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_hw *pclk = clk_hw_get_parent(hw);
long best_rate = -EINVAL;
@@ -333,20 +335,20 @@ static long clk_audio_pll_pmc_round_rate(struct clk_hw *hw, unsigned long rate,
int best_diff = -1;
pr_debug("A PLL/PMC: %s, rate = %lu (parent_rate = %lu)\n", __func__,
- rate, *parent_rate);
+ req->rate, req->best_parent_rate);
- if (!rate)
+ if (!req->rate)
return 0;
best_parent_rate = clk_round_rate(pclk->clk, 1);
- div = max(best_parent_rate / rate, 1UL);
+ div = max(best_parent_rate / req->rate, 1UL);
for (; div <= AUDIO_PLL_QDPMC_MAX; div++) {
- best_parent_rate = clk_round_rate(pclk->clk, rate * div);
+ best_parent_rate = clk_round_rate(pclk->clk, req->rate * div);
tmp_rate = best_parent_rate / div;
- tmp_diff = abs(rate - tmp_rate);
+ tmp_diff = abs(req->rate - tmp_rate);
if (best_diff < 0 || best_diff > tmp_diff) {
- *parent_rate = best_parent_rate;
+ req->best_parent_rate = best_parent_rate;
best_rate = tmp_rate;
best_diff = tmp_diff;
tmp_qd = div;
@@ -356,9 +358,11 @@ static long clk_audio_pll_pmc_round_rate(struct clk_hw *hw, unsigned long rate,
}
pr_debug("A PLL/PMC: %s, best_rate = %ld, best_parent_rate = %lu (qd = %d)\n",
- __func__, best_rate, *parent_rate, tmp_qd - 1);
+ __func__, best_rate, req->best_parent_rate, tmp_qd - 1);
+
+ req->rate = best_rate;
- return best_rate;
+ return 0;
}
static int clk_audio_pll_frac_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -436,7 +440,7 @@ static const struct clk_ops audio_pll_pad_ops = {
.enable = clk_audio_pll_pad_enable,
.disable = clk_audio_pll_pad_disable,
.recalc_rate = clk_audio_pll_pad_recalc_rate,
- .round_rate = clk_audio_pll_pad_round_rate,
+ .determine_rate = clk_audio_pll_pad_determine_rate,
.set_rate = clk_audio_pll_pad_set_rate,
};
@@ -444,7 +448,7 @@ static const struct clk_ops audio_pll_pmc_ops = {
.enable = clk_audio_pll_pmc_enable,
.disable = clk_audio_pll_pmc_disable,
.recalc_rate = clk_audio_pll_pmc_recalc_rate,
- .round_rate = clk_audio_pll_pmc_round_rate,
+ .determine_rate = clk_audio_pll_pmc_determine_rate,
.set_rate = clk_audio_pll_pmc_set_rate,
};
diff --git a/drivers/clk/at91/clk-h32mx.c b/drivers/clk/at91/clk-h32mx.c
index 1e6c12eeda10..a9aa93b5a870 100644
--- a/drivers/clk/at91/clk-h32mx.c
+++ b/drivers/clk/at91/clk-h32mx.c
@@ -40,21 +40,32 @@ static unsigned long clk_sama5d4_h32mx_recalc_rate(struct clk_hw *hw,
return parent_rate;
}
-static long clk_sama5d4_h32mx_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_sama5d4_h32mx_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
unsigned long div;
- if (rate > *parent_rate)
- return *parent_rate;
- div = *parent_rate / 2;
- if (rate < div)
- return div;
+ if (req->rate > req->best_parent_rate) {
+ req->rate = req->best_parent_rate;
- if (rate - div < *parent_rate - rate)
- return div;
+ return 0;
+ }
+ div = req->best_parent_rate / 2;
+ if (req->rate < div) {
+ req->rate = div;
+
+ return 0;
+ }
+
+ if (req->rate - div < req->best_parent_rate - req->rate) {
+ req->rate = div;
- return *parent_rate;
+ return 0;
+ }
+
+ req->rate = req->best_parent_rate;
+
+ return 0;
}
static int clk_sama5d4_h32mx_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -77,7 +88,7 @@ static int clk_sama5d4_h32mx_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops h32mx_ops = {
.recalc_rate = clk_sama5d4_h32mx_recalc_rate,
- .round_rate = clk_sama5d4_h32mx_round_rate,
+ .determine_rate = clk_sama5d4_h32mx_determine_rate,
.set_rate = clk_sama5d4_h32mx_set_rate,
};
diff --git a/drivers/clk/at91/clk-master.c b/drivers/clk/at91/clk-master.c
index 7a544e429d34..d5ea2069ec83 100644
--- a/drivers/clk/at91/clk-master.c
+++ b/drivers/clk/at91/clk-master.c
@@ -580,6 +580,9 @@ clk_sama7g5_master_recalc_rate(struct clk_hw *hw,
{
struct clk_master *master = to_clk_master(hw);
+ if (master->div == MASTER_PRES_MAX)
+ return DIV_ROUND_CLOSEST_ULL(parent_rate, 3);
+
return DIV_ROUND_CLOSEST_ULL(parent_rate, (1 << master->div));
}
diff --git a/drivers/clk/at91/clk-peripheral.c b/drivers/clk/at91/clk-peripheral.c
index c173a44c800a..e700f40fd87f 100644
--- a/drivers/clk/at91/clk-peripheral.c
+++ b/drivers/clk/at91/clk-peripheral.c
@@ -279,8 +279,11 @@ static int clk_sam9x5_peripheral_determine_rate(struct clk_hw *hw,
long best_diff = LONG_MIN;
u32 shift;
- if (periph->id < PERIPHERAL_ID_MIN || !periph->range.max)
- return parent_rate;
+ if (periph->id < PERIPHERAL_ID_MIN || !periph->range.max) {
+ req->rate = parent_rate;
+
+ return 0;
+ }
/* Fist step: check the available dividers. */
for (shift = 0; shift <= PERIPHERAL_MAX_SHIFT; shift++) {
@@ -332,50 +335,57 @@ end:
return 0;
}
-static long clk_sam9x5_peripheral_round_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long *parent_rate)
+static int clk_sam9x5_peripheral_no_parent_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
int shift = 0;
unsigned long best_rate;
unsigned long best_diff;
- unsigned long cur_rate = *parent_rate;
+ unsigned long cur_rate = req->best_parent_rate;
unsigned long cur_diff;
struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw);
- if (periph->id < PERIPHERAL_ID_MIN || !periph->range.max)
- return *parent_rate;
+ if (periph->id < PERIPHERAL_ID_MIN || !periph->range.max) {
+ req->rate = req->best_parent_rate;
+
+ return 0;
+ }
if (periph->range.max) {
for (; shift <= PERIPHERAL_MAX_SHIFT; shift++) {
- cur_rate = *parent_rate >> shift;
+ cur_rate = req->best_parent_rate >> shift;
if (cur_rate <= periph->range.max)
break;
}
}
- if (rate >= cur_rate)
- return cur_rate;
+ if (req->rate >= cur_rate) {
+ req->rate = cur_rate;
+
+ return 0;
+ }
- best_diff = cur_rate - rate;
+ best_diff = cur_rate - req->rate;
best_rate = cur_rate;
for (; shift <= PERIPHERAL_MAX_SHIFT; shift++) {
- cur_rate = *parent_rate >> shift;
- if (cur_rate < rate)
- cur_diff = rate - cur_rate;
+ cur_rate = req->best_parent_rate >> shift;
+ if (cur_rate < req->rate)
+ cur_diff = req->rate - cur_rate;
else
- cur_diff = cur_rate - rate;
+ cur_diff = cur_rate - req->rate;
if (cur_diff < best_diff) {
best_diff = cur_diff;
best_rate = cur_rate;
}
- if (!best_diff || cur_rate < rate)
+ if (!best_diff || cur_rate < req->rate)
break;
}
- return best_rate;
+ req->rate = best_rate;
+
+ return 0;
}
static int clk_sam9x5_peripheral_set_rate(struct clk_hw *hw,
@@ -427,7 +437,7 @@ static const struct clk_ops sam9x5_peripheral_ops = {
.disable = clk_sam9x5_peripheral_disable,
.is_enabled = clk_sam9x5_peripheral_is_enabled,
.recalc_rate = clk_sam9x5_peripheral_recalc_rate,
- .round_rate = clk_sam9x5_peripheral_round_rate,
+ .determine_rate = clk_sam9x5_peripheral_no_parent_determine_rate,
.set_rate = clk_sam9x5_peripheral_set_rate,
.save_context = clk_sam9x5_peripheral_save_context,
.restore_context = clk_sam9x5_peripheral_restore_context,
diff --git a/drivers/clk/at91/clk-pll.c b/drivers/clk/at91/clk-pll.c
index 249d6a53cedf..5c5f7398effe 100644
--- a/drivers/clk/at91/clk-pll.c
+++ b/drivers/clk/at91/clk-pll.c
@@ -231,13 +231,15 @@ static long clk_pll_get_best_div_mul(struct clk_pll *pll, unsigned long rate,
return bestrate;
}
-static long clk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_pll *pll = to_clk_pll(hw);
- return clk_pll_get_best_div_mul(pll, rate, *parent_rate,
- NULL, NULL, NULL);
+ req->rate = clk_pll_get_best_div_mul(pll, req->rate, req->best_parent_rate,
+ NULL, NULL, NULL);
+
+ return 0;
}
static int clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -302,7 +304,7 @@ static const struct clk_ops pll_ops = {
.unprepare = clk_pll_unprepare,
.is_prepared = clk_pll_is_prepared,
.recalc_rate = clk_pll_recalc_rate,
- .round_rate = clk_pll_round_rate,
+ .determine_rate = clk_pll_determine_rate,
.set_rate = clk_pll_set_rate,
.save_context = clk_pll_save_context,
.restore_context = clk_pll_restore_context,
diff --git a/drivers/clk/at91/clk-plldiv.c b/drivers/clk/at91/clk-plldiv.c
index ba3a1839a96d..3ac09fecc54e 100644
--- a/drivers/clk/at91/clk-plldiv.c
+++ b/drivers/clk/at91/clk-plldiv.c
@@ -33,21 +33,33 @@ static unsigned long clk_plldiv_recalc_rate(struct clk_hw *hw,
return parent_rate;
}
-static long clk_plldiv_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_plldiv_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
unsigned long div;
- if (rate > *parent_rate)
- return *parent_rate;
- div = *parent_rate / 2;
- if (rate < div)
- return div;
+ if (req->rate > req->best_parent_rate) {
+ req->rate = req->best_parent_rate;
- if (rate - div < *parent_rate - rate)
- return div;
+ return 0;
+ }
+
+ div = req->best_parent_rate / 2;
+ if (req->rate < div) {
+ req->rate = div;
+
+ return 0;
+ }
+
+ if (req->rate - div < req->best_parent_rate - req->rate) {
+ req->rate = div;
- return *parent_rate;
+ return 0;
+ }
+
+ req->rate = req->best_parent_rate;
+
+ return 0;
}
static int clk_plldiv_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -66,7 +78,7 @@ static int clk_plldiv_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops plldiv_ops = {
.recalc_rate = clk_plldiv_recalc_rate,
- .round_rate = clk_plldiv_round_rate,
+ .determine_rate = clk_plldiv_determine_rate,
.set_rate = clk_plldiv_set_rate,
};
diff --git a/drivers/clk/at91/clk-sam9x60-pll.c b/drivers/clk/at91/clk-sam9x60-pll.c
index cefd9948e103..3b965057ba0d 100644
--- a/drivers/clk/at91/clk-sam9x60-pll.c
+++ b/drivers/clk/at91/clk-sam9x60-pll.c
@@ -93,8 +93,8 @@ static int sam9x60_frac_pll_set(struct sam9x60_pll_core *core)
spin_lock_irqsave(core->lock, flags);
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
- AT91_PMC_PLL_UPDT_ID_MSK, core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_ID_MSK, core->id);
regmap_read(regmap, AT91_PMC_PLL_CTRL1, &val);
cmul = (val & core->layout->mul_mask) >> core->layout->mul_shift;
cfrac = (val & core->layout->frac_mask) >> core->layout->frac_shift;
@@ -103,11 +103,8 @@ static int sam9x60_frac_pll_set(struct sam9x60_pll_core *core)
(cmul == frac->mul && cfrac == frac->frac))
goto unlock;
- /* Recommended value for PMC_PLL_ACR */
- if (core->characteristics->upll)
- val = AT91_PMC_PLL_ACR_DEFAULT_UPLL;
- else
- val = AT91_PMC_PLL_ACR_DEFAULT_PLLA;
+ /* Load recommended value for PMC_PLL_ACR */
+ val = core->characteristics->acr;
regmap_write(regmap, AT91_PMC_PLL_ACR, val);
regmap_write(regmap, AT91_PMC_PLL_CTRL1,
@@ -128,17 +125,17 @@ static int sam9x60_frac_pll_set(struct sam9x60_pll_core *core)
udelay(10);
}
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
- AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
- AT91_PMC_PLL_UPDT_UPDATE | core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
+ AT91_PMC_PLL_UPDT_UPDATE | core->id);
regmap_update_bits(regmap, AT91_PMC_PLL_CTRL0,
AT91_PMC_PLL_CTRL0_ENLOCK | AT91_PMC_PLL_CTRL0_ENPLL,
AT91_PMC_PLL_CTRL0_ENLOCK | AT91_PMC_PLL_CTRL0_ENPLL);
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
- AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
- AT91_PMC_PLL_UPDT_UPDATE | core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
+ AT91_PMC_PLL_UPDT_UPDATE | core->id);
while (!sam9x60_pll_ready(regmap, core->id))
cpu_relax();
@@ -164,8 +161,8 @@ static void sam9x60_frac_pll_unprepare(struct clk_hw *hw)
spin_lock_irqsave(core->lock, flags);
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
- AT91_PMC_PLL_UPDT_ID_MSK, core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_ID_MSK, core->id);
regmap_update_bits(regmap, AT91_PMC_PLL_CTRL0, AT91_PMC_PLL_CTRL0_ENPLL, 0);
@@ -173,9 +170,9 @@ static void sam9x60_frac_pll_unprepare(struct clk_hw *hw)
regmap_update_bits(regmap, AT91_PMC_PLL_ACR,
AT91_PMC_PLL_ACR_UTMIBG | AT91_PMC_PLL_ACR_UTMIVR, 0);
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
- AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
- AT91_PMC_PLL_UPDT_UPDATE | core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
+ AT91_PMC_PLL_UPDT_UPDATE | core->id);
spin_unlock_irqrestore(core->lock, flags);
}
@@ -230,12 +227,16 @@ static long sam9x60_frac_pll_compute_mul_frac(struct sam9x60_pll_core *core,
return tmprate;
}
-static long sam9x60_frac_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int sam9x60_frac_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
- return sam9x60_frac_pll_compute_mul_frac(core, rate, *parent_rate, false);
+ req->rate = sam9x60_frac_pll_compute_mul_frac(core, req->rate,
+ req->best_parent_rate,
+ false);
+
+ return 0;
}
static int sam9x60_frac_pll_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -262,8 +263,8 @@ static int sam9x60_frac_pll_set_rate_chg(struct clk_hw *hw, unsigned long rate,
spin_lock_irqsave(core->lock, irqflags);
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT, AT91_PMC_PLL_UPDT_ID_MSK,
- core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT, AT91_PMC_PLL_UPDT_ID_MSK,
+ core->id);
regmap_read(regmap, AT91_PMC_PLL_CTRL1, &val);
cmul = (val & core->layout->mul_mask) >> core->layout->mul_shift;
cfrac = (val & core->layout->frac_mask) >> core->layout->frac_shift;
@@ -275,18 +276,18 @@ static int sam9x60_frac_pll_set_rate_chg(struct clk_hw *hw, unsigned long rate,
(frac->mul << core->layout->mul_shift) |
(frac->frac << core->layout->frac_shift));
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
- AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
- AT91_PMC_PLL_UPDT_UPDATE | core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
+ AT91_PMC_PLL_UPDT_UPDATE | core->id);
regmap_update_bits(regmap, AT91_PMC_PLL_CTRL0,
AT91_PMC_PLL_CTRL0_ENLOCK | AT91_PMC_PLL_CTRL0_ENPLL,
AT91_PMC_PLL_CTRL0_ENLOCK |
AT91_PMC_PLL_CTRL0_ENPLL);
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
- AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
- AT91_PMC_PLL_UPDT_UPDATE | core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
+ AT91_PMC_PLL_UPDT_UPDATE | core->id);
while (!sam9x60_pll_ready(regmap, core->id))
cpu_relax();
@@ -321,7 +322,7 @@ static const struct clk_ops sam9x60_frac_pll_ops = {
.unprepare = sam9x60_frac_pll_unprepare,
.is_prepared = sam9x60_frac_pll_is_prepared,
.recalc_rate = sam9x60_frac_pll_recalc_rate,
- .round_rate = sam9x60_frac_pll_round_rate,
+ .determine_rate = sam9x60_frac_pll_determine_rate,
.set_rate = sam9x60_frac_pll_set_rate,
.save_context = sam9x60_frac_pll_save_context,
.restore_context = sam9x60_frac_pll_restore_context,
@@ -332,13 +333,16 @@ static const struct clk_ops sam9x60_frac_pll_ops_chg = {
.unprepare = sam9x60_frac_pll_unprepare,
.is_prepared = sam9x60_frac_pll_is_prepared,
.recalc_rate = sam9x60_frac_pll_recalc_rate,
- .round_rate = sam9x60_frac_pll_round_rate,
+ .determine_rate = sam9x60_frac_pll_determine_rate,
.set_rate = sam9x60_frac_pll_set_rate_chg,
.save_context = sam9x60_frac_pll_save_context,
.restore_context = sam9x60_frac_pll_restore_context,
};
-/* This function should be called with spinlock acquired. */
+/* This function should be called with spinlock acquired.
+ * Warning: this function must be called only if the same PLL ID was set in
+ * PLL_UPDT register previously.
+ */
static void sam9x60_div_pll_set_div(struct sam9x60_pll_core *core, u32 div,
bool enable)
{
@@ -350,9 +354,9 @@ static void sam9x60_div_pll_set_div(struct sam9x60_pll_core *core, u32 div,
core->layout->div_mask | ena_msk,
(div << core->layout->div_shift) | ena_val);
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
- AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
- AT91_PMC_PLL_UPDT_UPDATE | core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
+ AT91_PMC_PLL_UPDT_UPDATE | core->id);
while (!sam9x60_pll_ready(regmap, core->id))
cpu_relax();
@@ -366,8 +370,8 @@ static int sam9x60_div_pll_set(struct sam9x60_pll_core *core)
unsigned int val, cdiv;
spin_lock_irqsave(core->lock, flags);
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
- AT91_PMC_PLL_UPDT_ID_MSK, core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_ID_MSK, core->id);
regmap_read(regmap, AT91_PMC_PLL_CTRL0, &val);
cdiv = (val & core->layout->div_mask) >> core->layout->div_shift;
@@ -398,15 +402,15 @@ static void sam9x60_div_pll_unprepare(struct clk_hw *hw)
spin_lock_irqsave(core->lock, flags);
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
- AT91_PMC_PLL_UPDT_ID_MSK, core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_ID_MSK, core->id);
regmap_update_bits(regmap, AT91_PMC_PLL_CTRL0,
core->layout->endiv_mask, 0);
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
- AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
- AT91_PMC_PLL_UPDT_UPDATE | core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
+ AT91_PMC_PLL_UPDT_UPDATE | core->id);
spin_unlock_irqrestore(core->lock, flags);
}
@@ -487,12 +491,15 @@ static long sam9x60_div_pll_compute_div(struct sam9x60_pll_core *core,
return best_rate;
}
-static long sam9x60_div_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int sam9x60_div_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
- return sam9x60_div_pll_compute_div(core, parent_rate, rate);
+ req->rate = sam9x60_div_pll_compute_div(core, &req->best_parent_rate,
+ req->rate);
+
+ return 0;
}
static int sam9x60_div_pll_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -518,8 +525,8 @@ static int sam9x60_div_pll_set_rate_chg(struct clk_hw *hw, unsigned long rate,
div->div = DIV_ROUND_CLOSEST(parent_rate, rate) - 1;
spin_lock_irqsave(core->lock, irqflags);
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT, AT91_PMC_PLL_UPDT_ID_MSK,
- core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT, AT91_PMC_PLL_UPDT_ID_MSK,
+ core->id);
regmap_read(regmap, AT91_PMC_PLL_CTRL0, &val);
cdiv = (val & core->layout->div_mask) >> core->layout->div_shift;
@@ -574,8 +581,8 @@ static int sam9x60_div_pll_notifier_fn(struct notifier_block *notifier,
div->div = div->safe_div;
spin_lock_irqsave(core.lock, irqflags);
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT, AT91_PMC_PLL_UPDT_ID_MSK,
- core.id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT, AT91_PMC_PLL_UPDT_ID_MSK,
+ core.id);
regmap_read(regmap, AT91_PMC_PLL_CTRL0, &val);
cdiv = (val & core.layout->div_mask) >> core.layout->div_shift;
@@ -601,7 +608,7 @@ static const struct clk_ops sam9x60_div_pll_ops = {
.unprepare = sam9x60_div_pll_unprepare,
.is_prepared = sam9x60_div_pll_is_prepared,
.recalc_rate = sam9x60_div_pll_recalc_rate,
- .round_rate = sam9x60_div_pll_round_rate,
+ .determine_rate = sam9x60_div_pll_determine_rate,
.set_rate = sam9x60_div_pll_set_rate,
.save_context = sam9x60_div_pll_save_context,
.restore_context = sam9x60_div_pll_restore_context,
@@ -612,7 +619,7 @@ static const struct clk_ops sam9x60_div_pll_ops_chg = {
.unprepare = sam9x60_div_pll_unprepare,
.is_prepared = sam9x60_div_pll_is_prepared,
.recalc_rate = sam9x60_div_pll_recalc_rate,
- .round_rate = sam9x60_div_pll_round_rate,
+ .determine_rate = sam9x60_div_pll_determine_rate,
.set_rate = sam9x60_div_pll_set_rate_chg,
.save_context = sam9x60_div_pll_save_context,
.restore_context = sam9x60_div_pll_restore_context,
@@ -623,7 +630,7 @@ static const struct clk_ops sam9x60_fixed_div_pll_ops = {
.unprepare = sam9x60_div_pll_unprepare,
.is_prepared = sam9x60_div_pll_is_prepared,
.recalc_rate = sam9x60_fixed_div_pll_recalc_rate,
- .round_rate = sam9x60_div_pll_round_rate,
+ .determine_rate = sam9x60_div_pll_determine_rate,
.save_context = sam9x60_div_pll_save_context,
.restore_context = sam9x60_div_pll_restore_context,
};
diff --git a/drivers/clk/at91/clk-usb.c b/drivers/clk/at91/clk-usb.c
index b0696a928aa9..e906928cfbf0 100644
--- a/drivers/clk/at91/clk-usb.c
+++ b/drivers/clk/at91/clk-usb.c
@@ -319,8 +319,8 @@ static unsigned long at91rm9200_clk_usb_recalc_rate(struct clk_hw *hw,
return 0;
}
-static long at91rm9200_clk_usb_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int at91rm9200_clk_usb_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct at91rm9200_clk_usb *usb = to_at91rm9200_clk_usb(hw);
struct clk_hw *parent = clk_hw_get_parent(hw);
@@ -336,25 +336,27 @@ static long at91rm9200_clk_usb_round_rate(struct clk_hw *hw, unsigned long rate,
if (!usb->divisors[i])
continue;
- tmp_parent_rate = rate * usb->divisors[i];
+ tmp_parent_rate = req->rate * usb->divisors[i];
tmp_parent_rate = clk_hw_round_rate(parent, tmp_parent_rate);
tmprate = DIV_ROUND_CLOSEST(tmp_parent_rate, usb->divisors[i]);
- if (tmprate < rate)
- tmpdiff = rate - tmprate;
+ if (tmprate < req->rate)
+ tmpdiff = req->rate - tmprate;
else
- tmpdiff = tmprate - rate;
+ tmpdiff = tmprate - req->rate;
if (bestdiff < 0 || bestdiff > tmpdiff) {
bestrate = tmprate;
bestdiff = tmpdiff;
- *parent_rate = tmp_parent_rate;
+ req->best_parent_rate = tmp_parent_rate;
}
if (!bestdiff)
break;
}
- return bestrate;
+ req->rate = bestrate;
+
+ return 0;
}
static int at91rm9200_clk_usb_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -384,7 +386,7 @@ static int at91rm9200_clk_usb_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops at91rm9200_usb_ops = {
.recalc_rate = at91rm9200_clk_usb_recalc_rate,
- .round_rate = at91rm9200_clk_usb_round_rate,
+ .determine_rate = at91rm9200_clk_usb_determine_rate,
.set_rate = at91rm9200_clk_usb_set_rate,
};
diff --git a/drivers/clk/at91/pmc.h b/drivers/clk/at91/pmc.h
index 4fb29ca111f7..5daa32c4cf25 100644
--- a/drivers/clk/at91/pmc.h
+++ b/drivers/clk/at91/pmc.h
@@ -80,6 +80,7 @@ struct clk_pll_characteristics {
u16 *icpll;
u8 *out;
u8 upll : 1;
+ u32 acr;
};
struct clk_programmable_layout {
diff --git a/drivers/clk/at91/sam9x60.c b/drivers/clk/at91/sam9x60.c
index db6db9e2073e..18baf4a256f4 100644
--- a/drivers/clk/at91/sam9x60.c
+++ b/drivers/clk/at91/sam9x60.c
@@ -36,6 +36,7 @@ static const struct clk_pll_characteristics plla_characteristics = {
.num_output = ARRAY_SIZE(plla_outputs),
.output = plla_outputs,
.core_output = core_outputs,
+ .acr = UL(0x00020010),
};
static const struct clk_range upll_outputs[] = {
@@ -48,6 +49,7 @@ static const struct clk_pll_characteristics upll_characteristics = {
.output = upll_outputs,
.core_output = core_outputs,
.upll = true,
+ .acr = UL(0x12023010), /* fIN = [18 MHz, 32 MHz]*/
};
static const struct clk_pll_layout pll_frac_layout = {
diff --git a/drivers/clk/at91/sam9x7.c b/drivers/clk/at91/sam9x7.c
index ffab32b047a0..89868a0aeaba 100644
--- a/drivers/clk/at91/sam9x7.c
+++ b/drivers/clk/at91/sam9x7.c
@@ -107,6 +107,7 @@ static const struct clk_pll_characteristics plla_characteristics = {
.num_output = ARRAY_SIZE(plla_outputs),
.output = plla_outputs,
.core_output = plla_core_outputs,
+ .acr = UL(0x00020010), /* Old ACR_DEFAULT_PLLA value */
};
static const struct clk_pll_characteristics upll_characteristics = {
@@ -115,6 +116,7 @@ static const struct clk_pll_characteristics upll_characteristics = {
.output = upll_outputs,
.core_output = upll_core_outputs,
.upll = true,
+ .acr = UL(0x12023010), /* fIN=[20 MHz, 32 MHz] */
};
static const struct clk_pll_characteristics lvdspll_characteristics = {
@@ -122,6 +124,7 @@ static const struct clk_pll_characteristics lvdspll_characteristics = {
.num_output = ARRAY_SIZE(lvdspll_outputs),
.output = lvdspll_outputs,
.core_output = lvdspll_core_outputs,
+ .acr = UL(0x12023010), /* fIN=[20 MHz, 32 MHz] */
};
static const struct clk_pll_characteristics audiopll_characteristics = {
@@ -129,6 +132,7 @@ static const struct clk_pll_characteristics audiopll_characteristics = {
.num_output = ARRAY_SIZE(audiopll_outputs),
.output = audiopll_outputs,
.core_output = audiopll_core_outputs,
+ .acr = UL(0x12023010), /* fIN=[20 MHz, 32 MHz] */
};
static const struct clk_pll_characteristics plladiv2_characteristics = {
@@ -136,6 +140,7 @@ static const struct clk_pll_characteristics plladiv2_characteristics = {
.num_output = ARRAY_SIZE(plladiv2_outputs),
.output = plladiv2_outputs,
.core_output = plladiv2_core_outputs,
+ .acr = UL(0x00020010), /* Old ACR_DEFAULT_PLLA value */
};
/* Layout for fractional PLL ID PLLA. */
@@ -403,6 +408,7 @@ static const struct {
{ .n = "pioD_clk", .id = 44, },
{ .n = "tcb1_clk", .id = 45, },
{ .n = "dbgu_clk", .id = 47, },
+ { .n = "pmecc_clk", .id = 48, },
/*
* mpddr_clk feeds DDR controller and is enabled by bootloader thus we
* need to keep it enabled in case there is no Linux consumer for it.
diff --git a/drivers/clk/at91/sama7d65.c b/drivers/clk/at91/sama7d65.c
index a5d40df8b2f2..7dee2b160ffb 100644
--- a/drivers/clk/at91/sama7d65.c
+++ b/drivers/clk/at91/sama7d65.c
@@ -138,6 +138,7 @@ static const struct clk_pll_characteristics cpu_pll_characteristics = {
.num_output = ARRAY_SIZE(cpu_pll_outputs),
.output = cpu_pll_outputs,
.core_output = core_outputs,
+ .acr = UL(0x00070010),
};
/* PLL characteristics. */
@@ -146,6 +147,7 @@ static const struct clk_pll_characteristics pll_characteristics = {
.num_output = ARRAY_SIZE(pll_outputs),
.output = pll_outputs,
.core_output = core_outputs,
+ .acr = UL(0x00070010),
};
static const struct clk_pll_characteristics lvdspll_characteristics = {
@@ -153,6 +155,7 @@ static const struct clk_pll_characteristics lvdspll_characteristics = {
.num_output = ARRAY_SIZE(lvdspll_outputs),
.output = lvdspll_outputs,
.core_output = lvdspll_core_outputs,
+ .acr = UL(0x00070010),
};
static const struct clk_pll_characteristics upll_characteristics = {
@@ -160,6 +163,7 @@ static const struct clk_pll_characteristics upll_characteristics = {
.num_output = ARRAY_SIZE(upll_outputs),
.output = upll_outputs,
.core_output = upll_core_outputs,
+ .acr = UL(0x12020010),
.upll = true,
};
diff --git a/drivers/clk/at91/sama7g5.c b/drivers/clk/at91/sama7g5.c
index 8385badc1c70..1340c2b00619 100644
--- a/drivers/clk/at91/sama7g5.c
+++ b/drivers/clk/at91/sama7g5.c
@@ -113,6 +113,7 @@ static const struct clk_pll_characteristics cpu_pll_characteristics = {
.num_output = ARRAY_SIZE(cpu_pll_outputs),
.output = cpu_pll_outputs,
.core_output = core_outputs,
+ .acr = UL(0x00070010),
};
/* PLL characteristics. */
@@ -121,6 +122,7 @@ static const struct clk_pll_characteristics pll_characteristics = {
.num_output = ARRAY_SIZE(pll_outputs),
.output = pll_outputs,
.core_output = core_outputs,
+ .acr = UL(0x00070010),
};
/*
diff --git a/drivers/clk/axs10x/i2s_pll_clock.c b/drivers/clk/axs10x/i2s_pll_clock.c
index 9667ce898428..6f3e1151b354 100644
--- a/drivers/clk/axs10x/i2s_pll_clock.c
+++ b/drivers/clk/axs10x/i2s_pll_clock.c
@@ -108,21 +108,21 @@ static unsigned long i2s_pll_recalc_rate(struct clk_hw *hw,
return ((parent_rate / idiv) * fbdiv) / odiv;
}
-static long i2s_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int i2s_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct i2s_pll_clk *clk = to_i2s_pll_clk(hw);
- const struct i2s_pll_cfg *pll_cfg = i2s_pll_get_cfg(*prate);
+ const struct i2s_pll_cfg *pll_cfg = i2s_pll_get_cfg(req->best_parent_rate);
int i;
if (!pll_cfg) {
- dev_err(clk->dev, "invalid parent rate=%ld\n", *prate);
+ dev_err(clk->dev, "invalid parent rate=%ld\n", req->best_parent_rate);
return -EINVAL;
}
for (i = 0; pll_cfg[i].rate != 0; i++)
- if (pll_cfg[i].rate == rate)
- return rate;
+ if (pll_cfg[i].rate == req->rate)
+ return 0;
return -EINVAL;
}
@@ -156,7 +156,7 @@ static int i2s_pll_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops i2s_pll_ops = {
.recalc_rate = i2s_pll_recalc_rate,
- .round_rate = i2s_pll_round_rate,
+ .determine_rate = i2s_pll_determine_rate,
.set_rate = i2s_pll_set_rate,
};
diff --git a/drivers/clk/axs10x/pll_clock.c b/drivers/clk/axs10x/pll_clock.c
index 6c7a2b62b406..c7ca473ee76c 100644
--- a/drivers/clk/axs10x/pll_clock.c
+++ b/drivers/clk/axs10x/pll_clock.c
@@ -149,8 +149,8 @@ static unsigned long axs10x_pll_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long axs10x_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int axs10x_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
int i;
long best_rate;
@@ -163,11 +163,13 @@ static long axs10x_pll_round_rate(struct clk_hw *hw, unsigned long rate,
best_rate = pll_cfg[0].rate;
for (i = 1; pll_cfg[i].rate != 0; i++) {
- if (abs(rate - pll_cfg[i].rate) < abs(rate - best_rate))
+ if (abs(req->rate - pll_cfg[i].rate) < abs(req->rate - best_rate))
best_rate = pll_cfg[i].rate;
}
- return best_rate;
+ req->rate = best_rate;
+
+ return 0;
}
static int axs10x_pll_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -208,7 +210,7 @@ static int axs10x_pll_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops axs10x_pll_ops = {
.recalc_rate = axs10x_pll_recalc_rate,
- .round_rate = axs10x_pll_round_rate,
+ .determine_rate = axs10x_pll_determine_rate,
.set_rate = axs10x_pll_set_rate,
};
diff --git a/drivers/clk/baikal-t1/ccu-div.c b/drivers/clk/baikal-t1/ccu-div.c
index 8d5fc7158f33..849d1f55765f 100644
--- a/drivers/clk/baikal-t1/ccu-div.c
+++ b/drivers/clk/baikal-t1/ccu-div.c
@@ -228,15 +228,18 @@ static inline unsigned long ccu_div_var_calc_divider(unsigned long rate,
CCU_DIV_CLKDIV_MAX(mask));
}
-static long ccu_div_var_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int ccu_div_var_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct ccu_div *div = to_ccu_div(hw);
unsigned long divider;
- divider = ccu_div_var_calc_divider(rate, *parent_rate, div->mask);
+ divider = ccu_div_var_calc_divider(req->rate, req->best_parent_rate,
+ div->mask);
- return ccu_div_calc_freq(*parent_rate, divider);
+ req->rate = ccu_div_calc_freq(req->best_parent_rate, divider);
+
+ return 0;
}
/*
@@ -308,12 +311,14 @@ static unsigned long ccu_div_fixed_recalc_rate(struct clk_hw *hw,
return ccu_div_calc_freq(parent_rate, div->divider);
}
-static long ccu_div_fixed_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int ccu_div_fixed_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct ccu_div *div = to_ccu_div(hw);
- return ccu_div_calc_freq(*parent_rate, div->divider);
+ req->rate = ccu_div_calc_freq(req->best_parent_rate, div->divider);
+
+ return 0;
}
static int ccu_div_fixed_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -534,14 +539,14 @@ static const struct clk_ops ccu_div_var_gate_to_set_ops = {
.disable = ccu_div_gate_disable,
.is_enabled = ccu_div_gate_is_enabled,
.recalc_rate = ccu_div_var_recalc_rate,
- .round_rate = ccu_div_var_round_rate,
+ .determine_rate = ccu_div_var_determine_rate,
.set_rate = ccu_div_var_set_rate_fast,
.debug_init = ccu_div_var_debug_init
};
static const struct clk_ops ccu_div_var_nogate_ops = {
.recalc_rate = ccu_div_var_recalc_rate,
- .round_rate = ccu_div_var_round_rate,
+ .determine_rate = ccu_div_var_determine_rate,
.set_rate = ccu_div_var_set_rate_slow,
.debug_init = ccu_div_var_debug_init
};
@@ -551,7 +556,7 @@ static const struct clk_ops ccu_div_gate_ops = {
.disable = ccu_div_gate_disable,
.is_enabled = ccu_div_gate_is_enabled,
.recalc_rate = ccu_div_fixed_recalc_rate,
- .round_rate = ccu_div_fixed_round_rate,
+ .determine_rate = ccu_div_fixed_determine_rate,
.set_rate = ccu_div_fixed_set_rate,
.debug_init = ccu_div_gate_debug_init
};
@@ -565,7 +570,7 @@ static const struct clk_ops ccu_div_buf_ops = {
static const struct clk_ops ccu_div_fixed_ops = {
.recalc_rate = ccu_div_fixed_recalc_rate,
- .round_rate = ccu_div_fixed_round_rate,
+ .determine_rate = ccu_div_fixed_determine_rate,
.set_rate = ccu_div_fixed_set_rate,
.debug_init = ccu_div_fixed_debug_init
};
diff --git a/drivers/clk/baikal-t1/ccu-pll.c b/drivers/clk/baikal-t1/ccu-pll.c
index 13ef28001439..357269f41cdc 100644
--- a/drivers/clk/baikal-t1/ccu-pll.c
+++ b/drivers/clk/baikal-t1/ccu-pll.c
@@ -228,14 +228,16 @@ static void ccu_pll_calc_factors(unsigned long rate, unsigned long parent_rate,
}
}
-static long ccu_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int ccu_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
unsigned long nr = 1, nf = 1, od = 1;
- ccu_pll_calc_factors(rate, *parent_rate, &nr, &nf, &od);
+ ccu_pll_calc_factors(req->rate, req->best_parent_rate, &nr, &nf, &od);
- return ccu_pll_calc_freq(*parent_rate, nr, nf, od);
+ req->rate = ccu_pll_calc_freq(req->best_parent_rate, nr, nf, od);
+
+ return 0;
}
/*
@@ -481,7 +483,7 @@ static const struct clk_ops ccu_pll_gate_to_set_ops = {
.disable = ccu_pll_disable,
.is_enabled = ccu_pll_is_enabled,
.recalc_rate = ccu_pll_recalc_rate,
- .round_rate = ccu_pll_round_rate,
+ .determine_rate = ccu_pll_determine_rate,
.set_rate = ccu_pll_set_rate_norst,
.debug_init = ccu_pll_debug_init
};
@@ -491,7 +493,7 @@ static const struct clk_ops ccu_pll_straight_set_ops = {
.disable = ccu_pll_disable,
.is_enabled = ccu_pll_is_enabled,
.recalc_rate = ccu_pll_recalc_rate,
- .round_rate = ccu_pll_round_rate,
+ .determine_rate = ccu_pll_determine_rate,
.set_rate = ccu_pll_set_rate_reset,
.debug_init = ccu_pll_debug_init
};
diff --git a/drivers/clk/bcm/clk-iproc-asiu.c b/drivers/clk/bcm/clk-iproc-asiu.c
index dcacf55c55ae..83ec13da9b2e 100644
--- a/drivers/clk/bcm/clk-iproc-asiu.c
+++ b/drivers/clk/bcm/clk-iproc-asiu.c
@@ -98,22 +98,27 @@ static unsigned long iproc_asiu_clk_recalc_rate(struct clk_hw *hw,
return clk->rate;
}
-static long iproc_asiu_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int iproc_asiu_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
unsigned int div;
- if (rate == 0 || *parent_rate == 0)
+ if (req->rate == 0 || req->best_parent_rate == 0)
return -EINVAL;
- if (rate == *parent_rate)
- return *parent_rate;
+ if (req->rate == req->best_parent_rate)
+ return 0;
- div = DIV_ROUND_CLOSEST(*parent_rate, rate);
- if (div < 2)
- return *parent_rate;
+ div = DIV_ROUND_CLOSEST(req->best_parent_rate, req->rate);
+ if (div < 2) {
+ req->rate = req->best_parent_rate;
- return *parent_rate / div;
+ return 0;
+ }
+
+ req->rate = req->best_parent_rate / div;
+
+ return 0;
}
static int iproc_asiu_clk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -168,7 +173,7 @@ static const struct clk_ops iproc_asiu_ops = {
.enable = iproc_asiu_clk_enable,
.disable = iproc_asiu_clk_disable,
.recalc_rate = iproc_asiu_clk_recalc_rate,
- .round_rate = iproc_asiu_clk_round_rate,
+ .determine_rate = iproc_asiu_clk_determine_rate,
.set_rate = iproc_asiu_clk_set_rate,
};
diff --git a/drivers/clk/bcm/clk-raspberrypi.c b/drivers/clk/bcm/clk-raspberrypi.c
index 8e4fde03ed23..1a9162f0ae31 100644
--- a/drivers/clk/bcm/clk-raspberrypi.c
+++ b/drivers/clk/bcm/clk-raspberrypi.c
@@ -68,6 +68,8 @@ struct raspberrypi_clk_variant {
char *clkdev;
unsigned long min_rate;
bool minimize;
+ bool maximize;
+ u32 flags;
};
static struct raspberrypi_clk_variant
@@ -75,6 +77,7 @@ raspberrypi_clk_variants[RPI_FIRMWARE_NUM_CLK_ID] = {
[RPI_FIRMWARE_ARM_CLK_ID] = {
.export = true,
.clkdev = "cpu0",
+ .flags = CLK_IS_CRITICAL,
},
[RPI_FIRMWARE_CORE_CLK_ID] = {
.export = true,
@@ -90,6 +93,12 @@ raspberrypi_clk_variants[RPI_FIRMWARE_NUM_CLK_ID] = {
* always use the minimum the drivers will let us.
*/
.minimize = true,
+
+ /*
+ * It should never be disabled as it drives the bus for
+ * everything else.
+ */
+ .flags = CLK_IS_CRITICAL,
},
[RPI_FIRMWARE_M2MC_CLK_ID] = {
.export = true,
@@ -115,18 +124,29 @@ raspberrypi_clk_variants[RPI_FIRMWARE_NUM_CLK_ID] = {
* drivers will let us.
*/
.minimize = true,
+
+ /*
+ * As mentioned above, this clock is disabled during boot,
+ * the firmware will skip the HSM initialization, resulting
+ * in a bus lockup. Therefore, make sure it's enabled
+ * during boot, but after it, it can be enabled/disabled
+ * by the driver.
+ */
+ .flags = CLK_IGNORE_UNUSED,
},
[RPI_FIRMWARE_V3D_CLK_ID] = {
.export = true,
- .minimize = true,
+ .maximize = true,
},
[RPI_FIRMWARE_PIXEL_CLK_ID] = {
.export = true,
.minimize = true,
+ .flags = CLK_IS_CRITICAL,
},
[RPI_FIRMWARE_HEVC_CLK_ID] = {
.export = true,
.minimize = true,
+ .flags = CLK_IS_CRITICAL,
},
[RPI_FIRMWARE_ISP_CLK_ID] = {
.export = true,
@@ -135,6 +155,7 @@ raspberrypi_clk_variants[RPI_FIRMWARE_NUM_CLK_ID] = {
[RPI_FIRMWARE_PIXEL_BVB_CLK_ID] = {
.export = true,
.minimize = true,
+ .flags = CLK_IS_CRITICAL,
},
[RPI_FIRMWARE_VEC_CLK_ID] = {
.export = true,
@@ -194,8 +215,11 @@ static int raspberrypi_fw_is_prepared(struct clk_hw *hw)
ret = raspberrypi_clock_property(rpi->firmware, data,
RPI_FIRMWARE_GET_CLOCK_STATE, &val);
- if (ret)
+ if (ret) {
+ dev_err_ratelimited(rpi->dev, "Failed to get %s state: %d\n",
+ clk_hw_get_name(hw), ret);
return 0;
+ }
return !!(val & RPI_FIRMWARE_STATE_ENABLE_BIT);
}
@@ -211,8 +235,11 @@ static unsigned long raspberrypi_fw_get_rate(struct clk_hw *hw,
ret = raspberrypi_clock_property(rpi->firmware, data,
RPI_FIRMWARE_GET_CLOCK_RATE, &val);
- if (ret)
+ if (ret) {
+ dev_err_ratelimited(rpi->dev, "Failed to get %s frequency: %d\n",
+ clk_hw_get_name(hw), ret);
return 0;
+ }
return val;
}
@@ -259,7 +286,41 @@ static int raspberrypi_fw_dumb_determine_rate(struct clk_hw *hw,
return 0;
}
+static int raspberrypi_fw_prepare(struct clk_hw *hw)
+{
+ const struct raspberrypi_clk_data *data = clk_hw_to_data(hw);
+ struct raspberrypi_clk *rpi = data->rpi;
+ u32 state = RPI_FIRMWARE_STATE_ENABLE_BIT;
+ int ret;
+
+ ret = raspberrypi_clock_property(rpi->firmware, data,
+ RPI_FIRMWARE_SET_CLOCK_STATE, &state);
+ if (ret)
+ dev_err_ratelimited(rpi->dev,
+ "Failed to set clock %s state to on: %d\n",
+ clk_hw_get_name(hw), ret);
+
+ return ret;
+}
+
+static void raspberrypi_fw_unprepare(struct clk_hw *hw)
+{
+ const struct raspberrypi_clk_data *data = clk_hw_to_data(hw);
+ struct raspberrypi_clk *rpi = data->rpi;
+ u32 state = 0;
+ int ret;
+
+ ret = raspberrypi_clock_property(rpi->firmware, data,
+ RPI_FIRMWARE_SET_CLOCK_STATE, &state);
+ if (ret)
+ dev_err_ratelimited(rpi->dev,
+ "Failed to set clock %s state to off: %d\n",
+ clk_hw_get_name(hw), ret);
+}
+
static const struct clk_ops raspberrypi_firmware_clk_ops = {
+ .prepare = raspberrypi_fw_prepare,
+ .unprepare = raspberrypi_fw_unprepare,
.is_prepared = raspberrypi_fw_is_prepared,
.recalc_rate = raspberrypi_fw_get_rate,
.determine_rate = raspberrypi_fw_dumb_determine_rate,
@@ -289,7 +350,7 @@ static struct clk_hw *raspberrypi_clk_register(struct raspberrypi_clk *rpi,
if (!init.name)
return ERR_PTR(-ENOMEM);
init.ops = &raspberrypi_firmware_clk_ops;
- init.flags = CLK_GET_RATE_NOCACHE;
+ init.flags = variant->flags | CLK_GET_RATE_NOCACHE;
data->hw.init = &init;
@@ -326,6 +387,9 @@ static struct clk_hw *raspberrypi_clk_register(struct raspberrypi_clk *rpi,
}
}
+ if (variant->maximize)
+ variant->min_rate = max_rate;
+
if (variant->min_rate) {
unsigned long rate;
diff --git a/drivers/clk/clk-apple-nco.c b/drivers/clk/clk-apple-nco.c
index 457a48d48941..d3ced4a0f029 100644
--- a/drivers/clk/clk-apple-nco.c
+++ b/drivers/clk/clk-apple-nco.c
@@ -212,13 +212,15 @@ static unsigned long applnco_recalc_rate(struct clk_hw *hw,
((u64) div) * incbase + inc1);
}
-static long applnco_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int applnco_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- unsigned long lo = *parent_rate / (COARSE_DIV_OFFSET + LFSR_TBLSIZE) + 1;
- unsigned long hi = *parent_rate / COARSE_DIV_OFFSET;
+ unsigned long lo = req->best_parent_rate / (COARSE_DIV_OFFSET + LFSR_TBLSIZE) + 1;
+ unsigned long hi = req->best_parent_rate / COARSE_DIV_OFFSET;
- return clamp(rate, lo, hi);
+ req->rate = clamp(req->rate, lo, hi);
+
+ return 0;
}
static int applnco_enable(struct clk_hw *hw)
@@ -246,7 +248,7 @@ static void applnco_disable(struct clk_hw *hw)
static const struct clk_ops applnco_ops = {
.set_rate = applnco_set_rate,
.recalc_rate = applnco_recalc_rate,
- .round_rate = applnco_round_rate,
+ .determine_rate = applnco_determine_rate,
.enable = applnco_enable,
.disable = applnco_disable,
.is_enabled = applnco_is_enabled,
diff --git a/drivers/clk/clk-axi-clkgen.c b/drivers/clk/clk-axi-clkgen.c
index aec62301fa06..fa5ccef73e60 100644
--- a/drivers/clk/clk-axi-clkgen.c
+++ b/drivers/clk/clk-axi-clkgen.c
@@ -540,7 +540,7 @@ static int axi_clkgen_setup_limits(struct axi_clkgen *axi_clkgen,
default:
return dev_err_probe(dev, -ENODEV, "Unknown speed grade %d\n",
speed_grade);
- };
+ }
/* Overwrite vco limits for ultrascale+ */
if (tech == ADI_AXI_FPGA_TECH_ULTRASCALE_PLUS) {
diff --git a/drivers/clk/clk-axm5516.c b/drivers/clk/clk-axm5516.c
index 4a3462ee8f3e..3823383f3fa6 100644
--- a/drivers/clk/clk-axm5516.c
+++ b/drivers/clk/clk-axm5516.c
@@ -529,7 +529,6 @@ static const struct regmap_config axmclk_regmap_config = {
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x1fffc,
- .fast_io = true,
};
static const struct of_device_id axmclk_match_table[] = {
diff --git a/drivers/clk/clk-bm1880.c b/drivers/clk/clk-bm1880.c
index 002f7360b1c6..dac190bc6e19 100644
--- a/drivers/clk/clk-bm1880.c
+++ b/drivers/clk/clk-bm1880.c
@@ -608,8 +608,8 @@ static unsigned long bm1880_clk_div_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long bm1880_clk_div_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int bm1880_clk_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct bm1880_div_hw_clock *div_hw = to_bm1880_div_clk(hw);
struct bm1880_div_clock *div = &div_hw->div;
@@ -621,13 +621,18 @@ static long bm1880_clk_div_round_rate(struct clk_hw *hw, unsigned long rate,
val = readl(reg_addr) >> div->shift;
val &= clk_div_mask(div->width);
- return divider_ro_round_rate(hw, rate, prate, div->table,
- div->width, div->flags,
- val);
+ req->rate = divider_ro_round_rate(hw, req->rate,
+ &req->best_parent_rate,
+ div->table,
+ div->width, div->flags, val);
+
+ return 0;
}
- return divider_round_rate(hw, rate, prate, div->table,
- div->width, div->flags);
+ req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate,
+ div->table, div->width, div->flags);
+
+ return 0;
}
static int bm1880_clk_div_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -665,7 +670,7 @@ static int bm1880_clk_div_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops bm1880_clk_div_ops = {
.recalc_rate = bm1880_clk_div_recalc_rate,
- .round_rate = bm1880_clk_div_round_rate,
+ .determine_rate = bm1880_clk_div_determine_rate,
.set_rate = bm1880_clk_div_set_rate,
};
diff --git a/drivers/clk/clk-cdce706.c b/drivers/clk/clk-cdce706.c
index d0705bb03a2a..a495d313b02f 100644
--- a/drivers/clk/clk-cdce706.c
+++ b/drivers/clk/clk-cdce706.c
@@ -183,8 +183,8 @@ static unsigned long cdce706_pll_recalc_rate(struct clk_hw *hw,
return 0;
}
-static long cdce706_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int cdce706_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct cdce706_hw_data *hwd = to_hw_data(hw);
unsigned long mul, div;
@@ -192,9 +192,9 @@ static long cdce706_pll_round_rate(struct clk_hw *hw, unsigned long rate,
dev_dbg(&hwd->dev_data->client->dev,
"%s, rate: %lu, parent_rate: %lu\n",
- __func__, rate, *parent_rate);
+ __func__, req->rate, req->best_parent_rate);
- rational_best_approximation(rate, *parent_rate,
+ rational_best_approximation(req->rate, req->best_parent_rate,
CDCE706_PLL_N_MAX, CDCE706_PLL_M_MAX,
&mul, &div);
hwd->mul = mul;
@@ -204,9 +204,11 @@ static long cdce706_pll_round_rate(struct clk_hw *hw, unsigned long rate,
"%s, pll: %d, mul: %lu, div: %lu\n",
__func__, hwd->idx, mul, div);
- res = (u64)*parent_rate * hwd->mul;
+ res = (u64)req->best_parent_rate * hwd->mul;
do_div(res, hwd->div);
- return res;
+ req->rate = res;
+
+ return 0;
}
static int cdce706_pll_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -251,7 +253,7 @@ static int cdce706_pll_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops cdce706_pll_ops = {
.recalc_rate = cdce706_pll_recalc_rate,
- .round_rate = cdce706_pll_round_rate,
+ .determine_rate = cdce706_pll_determine_rate,
.set_rate = cdce706_pll_set_rate,
};
diff --git a/drivers/clk/clk-cdce925.c b/drivers/clk/clk-cdce925.c
index c51818c1af98..0b2ad21e6e4d 100644
--- a/drivers/clk/clk-cdce925.c
+++ b/drivers/clk/clk-cdce925.c
@@ -128,13 +128,15 @@ static void cdce925_pll_find_rate(unsigned long rate,
}
}
-static long cdce925_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int cdce925_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
u16 n, m;
- cdce925_pll_find_rate(rate, *parent_rate, &n, &m);
- return (long)cdce925_pll_calculate_rate(*parent_rate, n, m);
+ cdce925_pll_find_rate(req->rate, req->best_parent_rate, &n, &m);
+ req->rate = (long)cdce925_pll_calculate_rate(req->best_parent_rate, n, m);
+
+ return 0;
}
static int cdce925_pll_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -266,7 +268,7 @@ static const struct clk_ops cdce925_pll_ops = {
.prepare = cdce925_pll_prepare,
.unprepare = cdce925_pll_unprepare,
.recalc_rate = cdce925_pll_recalc_rate,
- .round_rate = cdce925_pll_round_rate,
+ .determine_rate = cdce925_pll_determine_rate,
.set_rate = cdce925_pll_set_rate,
};
@@ -420,20 +422,23 @@ static unsigned long cdce925_clk_best_parent_rate(
return rate * pdiv_best;
}
-static long cdce925_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int cdce925_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- unsigned long l_parent_rate = *parent_rate;
- u16 divider = cdce925_calc_divider(rate, l_parent_rate);
+ unsigned long l_parent_rate = req->best_parent_rate;
+ u16 divider = cdce925_calc_divider(req->rate, l_parent_rate);
- if (l_parent_rate / divider != rate) {
- l_parent_rate = cdce925_clk_best_parent_rate(hw, rate);
- divider = cdce925_calc_divider(rate, l_parent_rate);
- *parent_rate = l_parent_rate;
+ if (l_parent_rate / divider != req->rate) {
+ l_parent_rate = cdce925_clk_best_parent_rate(hw, req->rate);
+ divider = cdce925_calc_divider(req->rate, l_parent_rate);
+ req->best_parent_rate = l_parent_rate;
}
if (divider)
- return (long)(l_parent_rate / divider);
+ req->rate = (long)(l_parent_rate / divider);
+ else
+ req->rate = 0;
+
return 0;
}
@@ -451,7 +456,7 @@ static const struct clk_ops cdce925_clk_ops = {
.prepare = cdce925_clk_prepare,
.unprepare = cdce925_clk_unprepare,
.recalc_rate = cdce925_clk_recalc_rate,
- .round_rate = cdce925_clk_round_rate,
+ .determine_rate = cdce925_clk_determine_rate,
.set_rate = cdce925_clk_set_rate,
};
@@ -473,14 +478,17 @@ static u16 cdce925_y1_calc_divider(unsigned long rate,
return (u16)divider;
}
-static long cdce925_clk_y1_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int cdce925_clk_y1_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- unsigned long l_parent_rate = *parent_rate;
- u16 divider = cdce925_y1_calc_divider(rate, l_parent_rate);
+ unsigned long l_parent_rate = req->best_parent_rate;
+ u16 divider = cdce925_y1_calc_divider(req->rate, l_parent_rate);
if (divider)
- return (long)(l_parent_rate / divider);
+ req->rate = (long)(l_parent_rate / divider);
+ else
+ req->rate = 0;
+
return 0;
}
@@ -498,7 +506,7 @@ static const struct clk_ops cdce925_clk_y1_ops = {
.prepare = cdce925_clk_prepare,
.unprepare = cdce925_clk_unprepare,
.recalc_rate = cdce925_clk_recalc_rate,
- .round_rate = cdce925_clk_y1_round_rate,
+ .determine_rate = cdce925_clk_y1_determine_rate,
.set_rate = cdce925_clk_y1_set_rate,
};
diff --git a/drivers/clk/clk-cs2000-cp.c b/drivers/clk/clk-cs2000-cp.c
index 35cb93ad298a..8800472ba63f 100644
--- a/drivers/clk/clk-cs2000-cp.c
+++ b/drivers/clk/clk-cs2000-cp.c
@@ -305,15 +305,19 @@ static unsigned long cs2000_recalc_rate(struct clk_hw *hw,
return cs2000_ratio_to_rate(ratio, parent_rate, priv->lf_ratio);
}
-static long cs2000_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int cs2000_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct cs2000_priv *priv = hw_to_priv(hw);
u32 ratio;
- ratio = cs2000_rate_to_ratio(*parent_rate, rate, priv->lf_ratio);
+ ratio = cs2000_rate_to_ratio(req->best_parent_rate, req->rate,
+ priv->lf_ratio);
- return cs2000_ratio_to_rate(ratio, *parent_rate, priv->lf_ratio);
+ req->rate = cs2000_ratio_to_rate(ratio, req->best_parent_rate,
+ priv->lf_ratio);
+
+ return 0;
}
static int cs2000_select_ratio_mode(struct cs2000_priv *priv,
@@ -430,7 +434,7 @@ static u8 cs2000_get_parent(struct clk_hw *hw)
static const struct clk_ops cs2000_ops = {
.get_parent = cs2000_get_parent,
.recalc_rate = cs2000_recalc_rate,
- .round_rate = cs2000_round_rate,
+ .determine_rate = cs2000_determine_rate,
.set_rate = cs2000_set_rate,
.prepare = cs2000_enable,
.unprepare = cs2000_disable,
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index c1f426b8a504..2601b6155afb 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -431,27 +431,6 @@ long divider_ro_round_rate_parent(struct clk_hw *hw, struct clk_hw *parent,
}
EXPORT_SYMBOL_GPL(divider_ro_round_rate_parent);
-static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
-{
- struct clk_divider *divider = to_clk_divider(hw);
-
- /* if read only, just return current value */
- if (divider->flags & CLK_DIVIDER_READ_ONLY) {
- u32 val;
-
- val = clk_div_readl(divider) >> divider->shift;
- val &= clk_div_mask(divider->width);
-
- return divider_ro_round_rate(hw, rate, prate, divider->table,
- divider->width, divider->flags,
- val);
- }
-
- return divider_round_rate(hw, rate, prate, divider->table,
- divider->width, divider->flags);
-}
-
static int clk_divider_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
@@ -527,7 +506,6 @@ static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
const struct clk_ops clk_divider_ops = {
.recalc_rate = clk_divider_recalc_rate,
- .round_rate = clk_divider_round_rate,
.determine_rate = clk_divider_determine_rate,
.set_rate = clk_divider_set_rate,
};
@@ -535,7 +513,6 @@ EXPORT_SYMBOL_GPL(clk_divider_ops);
const struct clk_ops clk_divider_ro_ops = {
.recalc_rate = clk_divider_recalc_rate,
- .round_rate = clk_divider_round_rate,
.determine_rate = clk_divider_determine_rate,
};
EXPORT_SYMBOL_GPL(clk_divider_ro_ops);
diff --git a/drivers/clk/clk-ep93xx.c b/drivers/clk/clk-ep93xx.c
index 4bd8d6ecf6a2..972aadd11493 100644
--- a/drivers/clk/clk-ep93xx.c
+++ b/drivers/clk/clk-ep93xx.c
@@ -389,23 +389,25 @@ static unsigned long ep93xx_div_recalc_rate(struct clk_hw *hw,
return DIV_ROUND_CLOSEST(parent_rate, clk->div[index]);
}
-static long ep93xx_div_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int ep93xx_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct ep93xx_clk *clk = ep93xx_clk_from(hw);
unsigned long best = 0, now;
unsigned int i;
for (i = 0; i < clk->num_div; i++) {
- if ((rate * clk->div[i]) == *parent_rate)
- return rate;
+ if (req->rate * clk->div[i] == req->best_parent_rate)
+ return 0;
- now = DIV_ROUND_CLOSEST(*parent_rate, clk->div[i]);
- if (!best || is_best(rate, now, best))
+ now = DIV_ROUND_CLOSEST(req->best_parent_rate, clk->div[i]);
+ if (!best || is_best(req->rate, now, best))
best = now;
}
- return best;
+ req->rate = best;
+
+ return 0;
}
static int ep93xx_div_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -437,7 +439,7 @@ static const struct clk_ops ep93xx_div_ops = {
.disable = ep93xx_clk_disable,
.is_enabled = ep93xx_clk_is_enabled,
.recalc_rate = ep93xx_div_recalc_rate,
- .round_rate = ep93xx_div_round_rate,
+ .determine_rate = ep93xx_div_determine_rate,
.set_rate = ep93xx_div_set_rate,
};
@@ -486,9 +488,10 @@ static const struct ep93xx_gate ep93xx_uarts[] = {
static int ep93xx_uart_clock_init(struct ep93xx_clk_priv *priv)
{
struct clk_parent_data parent_data = { };
- unsigned int i, idx, ret, clk_uart_div;
+ unsigned int i, idx, clk_uart_div;
struct ep93xx_clk *clk;
u32 val;
+ int ret;
regmap_read(priv->map, EP93XX_SYSCON_PWRCNT, &val);
if (val & EP93XX_SYSCON_PWRCNT_UARTBAUD)
diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c
index e62ae8794d44..de658c9e4c53 100644
--- a/drivers/clk/clk-fixed-factor.c
+++ b/drivers/clk/clk-fixed-factor.c
@@ -30,19 +30,21 @@ static unsigned long clk_factor_recalc_rate(struct clk_hw *hw,
return (unsigned long)rate;
}
-static long clk_factor_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_factor_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_fixed_factor *fix = to_clk_fixed_factor(hw);
if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) {
unsigned long best_parent;
- best_parent = (rate / fix->mult) * fix->div;
- *prate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
+ best_parent = (req->rate / fix->mult) * fix->div;
+ req->best_parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
}
- return (*prate / fix->div) * fix->mult;
+ req->rate = (req->best_parent_rate / fix->div) * fix->mult;
+
+ return 0;
}
static int clk_factor_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -50,7 +52,7 @@ static int clk_factor_set_rate(struct clk_hw *hw, unsigned long rate,
{
/*
* We must report success but we can do so unconditionally because
- * clk_factor_round_rate returns values that ensure this call is a
+ * clk_factor_determine_rate returns values that ensure this call is a
* nop.
*/
@@ -69,7 +71,7 @@ static unsigned long clk_factor_recalc_accuracy(struct clk_hw *hw,
}
const struct clk_ops clk_fixed_factor_ops = {
- .round_rate = clk_factor_round_rate,
+ .determine_rate = clk_factor_determine_rate,
.set_rate = clk_factor_set_rate,
.recalc_rate = clk_factor_recalc_rate,
.recalc_accuracy = clk_factor_recalc_accuracy,
diff --git a/drivers/clk/clk-fractional-divider.c b/drivers/clk/clk-fractional-divider.c
index da057172cc90..cd36a6e27f25 100644
--- a/drivers/clk/clk-fractional-divider.c
+++ b/drivers/clk/clk-fractional-divider.c
@@ -151,25 +151,32 @@ void clk_fractional_divider_general_approximation(struct clk_hw *hw,
}
EXPORT_SYMBOL_GPL(clk_fractional_divider_general_approximation);
-static long clk_fd_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_fd_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_fractional_divider *fd = to_clk_fd(hw);
unsigned long m, n;
u64 ret;
- if (!rate || (!clk_hw_can_set_rate_parent(hw) && rate >= *parent_rate))
- return *parent_rate;
+ if (!req->rate || (!clk_hw_can_set_rate_parent(hw) && req->rate >= req->best_parent_rate)) {
+ req->rate = req->best_parent_rate;
+
+ return 0;
+ }
if (fd->approximation)
- fd->approximation(hw, rate, parent_rate, &m, &n);
+ fd->approximation(hw, req->rate, &req->best_parent_rate, &m, &n);
else
- clk_fractional_divider_general_approximation(hw, rate, parent_rate, &m, &n);
+ clk_fractional_divider_general_approximation(hw, req->rate,
+ &req->best_parent_rate,
+ &m, &n);
- ret = (u64)*parent_rate * m;
+ ret = (u64)req->best_parent_rate * m;
do_div(ret, n);
- return ret;
+ req->rate = ret;
+
+ return 0;
}
static int clk_fd_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -250,7 +257,7 @@ static void clk_fd_debug_init(struct clk_hw *hw, struct dentry *dentry)
const struct clk_ops clk_fractional_divider_ops = {
.recalc_rate = clk_fd_recalc_rate,
- .round_rate = clk_fd_round_rate,
+ .determine_rate = clk_fd_determine_rate,
.set_rate = clk_fd_set_rate,
#ifdef CONFIG_DEBUG_FS
.debug_init = clk_fd_debug_init,
diff --git a/drivers/clk/clk-gemini.c b/drivers/clk/clk-gemini.c
index 856b008e07c6..e94589c38568 100644
--- a/drivers/clk/clk-gemini.c
+++ b/drivers/clk/clk-gemini.c
@@ -126,13 +126,16 @@ static unsigned long gemini_pci_recalc_rate(struct clk_hw *hw,
return 33000000;
}
-static long gemini_pci_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int gemini_pci_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
/* We support 33 and 66 MHz */
- if (rate < 48000000)
- return 33000000;
- return 66000000;
+ if (req->rate < 48000000)
+ req->rate = 33000000;
+ else
+ req->rate = 66000000;
+
+ return 0;
}
static int gemini_pci_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -179,7 +182,7 @@ static int gemini_pci_is_enabled(struct clk_hw *hw)
static const struct clk_ops gemini_pci_clk_ops = {
.recalc_rate = gemini_pci_recalc_rate,
- .round_rate = gemini_pci_round_rate,
+ .determine_rate = gemini_pci_determine_rate,
.set_rate = gemini_pci_set_rate,
.enable = gemini_pci_enable,
.disable = gemini_pci_disable,
diff --git a/drivers/clk/clk-highbank.c b/drivers/clk/clk-highbank.c
index 6e68a41a70a1..cc583934ecf2 100644
--- a/drivers/clk/clk-highbank.c
+++ b/drivers/clk/clk-highbank.c
@@ -130,15 +130,17 @@ static void clk_pll_calc(unsigned long rate, unsigned long ref_freq,
*pdivf = divf;
}
-static long clk_pll_round_rate(struct clk_hw *hwclk, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
u32 divq, divf;
- unsigned long ref_freq = *parent_rate;
+ unsigned long ref_freq = req->best_parent_rate;
- clk_pll_calc(rate, ref_freq, &divq, &divf);
+ clk_pll_calc(req->rate, ref_freq, &divq, &divf);
- return (ref_freq * (divf + 1)) / (1 << divq);
+ req->rate = (ref_freq * (divf + 1)) / (1 << divq);
+
+ return 0;
}
static int clk_pll_set_rate(struct clk_hw *hwclk, unsigned long rate,
@@ -185,7 +187,7 @@ static const struct clk_ops clk_pll_ops = {
.enable = clk_pll_enable,
.disable = clk_pll_disable,
.recalc_rate = clk_pll_recalc_rate,
- .round_rate = clk_pll_round_rate,
+ .determine_rate = clk_pll_determine_rate,
.set_rate = clk_pll_set_rate,
};
@@ -227,16 +229,18 @@ static unsigned long clk_periclk_recalc_rate(struct clk_hw *hwclk,
return parent_rate / div;
}
-static long clk_periclk_round_rate(struct clk_hw *hwclk, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_periclk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
u32 div;
- div = *parent_rate / rate;
+ div = req->best_parent_rate / req->rate;
div++;
div &= ~0x1;
- return *parent_rate / div;
+ req->rate = req->best_parent_rate / div;
+
+ return 0;
}
static int clk_periclk_set_rate(struct clk_hw *hwclk, unsigned long rate,
@@ -255,7 +259,7 @@ static int clk_periclk_set_rate(struct clk_hw *hwclk, unsigned long rate,
static const struct clk_ops periclk_ops = {
.recalc_rate = clk_periclk_recalc_rate,
- .round_rate = clk_periclk_round_rate,
+ .determine_rate = clk_periclk_determine_rate,
.set_rate = clk_periclk_set_rate,
};
diff --git a/drivers/clk/clk-hsdk-pll.c b/drivers/clk/clk-hsdk-pll.c
index 921523fc26f2..7d56a47c2aa7 100644
--- a/drivers/clk/clk-hsdk-pll.c
+++ b/drivers/clk/clk-hsdk-pll.c
@@ -197,8 +197,8 @@ static unsigned long hsdk_pll_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long hsdk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int hsdk_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
int i;
unsigned long best_rate;
@@ -211,13 +211,15 @@ static long hsdk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
best_rate = pll_cfg[0].rate;
for (i = 1; pll_cfg[i].rate != 0; i++) {
- if (abs(rate - pll_cfg[i].rate) < abs(rate - best_rate))
+ if (abs(req->rate - pll_cfg[i].rate) < abs(req->rate - best_rate))
best_rate = pll_cfg[i].rate;
}
dev_dbg(clk->dev, "chosen best rate: %lu\n", best_rate);
- return best_rate;
+ req->rate = best_rate;
+
+ return 0;
}
static int hsdk_pll_comm_update_rate(struct hsdk_pll_clk *clk,
@@ -296,7 +298,7 @@ static int hsdk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops hsdk_pll_ops = {
.recalc_rate = hsdk_pll_recalc_rate,
- .round_rate = hsdk_pll_round_rate,
+ .determine_rate = hsdk_pll_determine_rate,
.set_rate = hsdk_pll_set_rate,
};
diff --git a/drivers/clk/clk-lmk04832.c b/drivers/clk/clk-lmk04832.c
index 2bcf422f0b04..b2107b31efa2 100644
--- a/drivers/clk/clk-lmk04832.c
+++ b/drivers/clk/clk-lmk04832.c
@@ -491,28 +491,33 @@ static long lmk04832_calc_pll2_params(unsigned long prate, unsigned long rate,
return DIV_ROUND_CLOSEST(prate * 2 * pll2_p * pll2_n, pll2_r);
}
-static long lmk04832_vco_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int lmk04832_vco_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct lmk04832 *lmk = container_of(hw, struct lmk04832, vco);
unsigned int n, p, r;
long vco_rate;
int ret;
- ret = lmk04832_check_vco_ranges(lmk, rate);
+ ret = lmk04832_check_vco_ranges(lmk, req->rate);
if (ret < 0)
return ret;
- vco_rate = lmk04832_calc_pll2_params(*prate, rate, &n, &p, &r);
+ vco_rate = lmk04832_calc_pll2_params(req->best_parent_rate, req->rate,
+ &n, &p, &r);
if (vco_rate < 0) {
dev_err(lmk->dev, "PLL2 parameters out of range\n");
- return vco_rate;
+ req->rate = vco_rate;
+
+ return 0;
}
- if (rate != vco_rate)
+ if (req->rate != vco_rate)
return -EINVAL;
- return vco_rate;
+ req->rate = vco_rate;
+
+ return 0;
}
static int lmk04832_vco_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -579,7 +584,7 @@ static const struct clk_ops lmk04832_vco_ops = {
.prepare = lmk04832_vco_prepare,
.unprepare = lmk04832_vco_unprepare,
.recalc_rate = lmk04832_vco_recalc_rate,
- .round_rate = lmk04832_vco_round_rate,
+ .determine_rate = lmk04832_vco_determine_rate,
.set_rate = lmk04832_vco_set_rate,
};
@@ -888,25 +893,27 @@ static unsigned long lmk04832_sclk_recalc_rate(struct clk_hw *hw,
return DIV_ROUND_CLOSEST(prate, sysref_div);
}
-static long lmk04832_sclk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int lmk04832_sclk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct lmk04832 *lmk = container_of(hw, struct lmk04832, sclk);
unsigned long sclk_rate;
unsigned int sysref_div;
- sysref_div = DIV_ROUND_CLOSEST(*prate, rate);
- sclk_rate = DIV_ROUND_CLOSEST(*prate, sysref_div);
+ sysref_div = DIV_ROUND_CLOSEST(req->best_parent_rate, req->rate);
+ sclk_rate = DIV_ROUND_CLOSEST(req->best_parent_rate, sysref_div);
if (sysref_div < 0x07 || sysref_div > 0x1fff) {
dev_err(lmk->dev, "SYSREF divider out of range\n");
return -EINVAL;
}
- if (rate != sclk_rate)
+ if (req->rate != sclk_rate)
return -EINVAL;
- return sclk_rate;
+ req->rate = sclk_rate;
+
+ return 0;
}
static int lmk04832_sclk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -945,7 +952,7 @@ static const struct clk_ops lmk04832_sclk_ops = {
.prepare = lmk04832_sclk_prepare,
.unprepare = lmk04832_sclk_unprepare,
.recalc_rate = lmk04832_sclk_recalc_rate,
- .round_rate = lmk04832_sclk_round_rate,
+ .determine_rate = lmk04832_sclk_determine_rate,
.set_rate = lmk04832_sclk_set_rate,
};
@@ -1069,26 +1076,28 @@ static unsigned long lmk04832_dclk_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long lmk04832_dclk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int lmk04832_dclk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct lmk_dclk *dclk = container_of(hw, struct lmk_dclk, hw);
struct lmk04832 *lmk = dclk->lmk;
unsigned long dclk_rate;
unsigned int dclk_div;
- dclk_div = DIV_ROUND_CLOSEST(*prate, rate);
- dclk_rate = DIV_ROUND_CLOSEST(*prate, dclk_div);
+ dclk_div = DIV_ROUND_CLOSEST(req->best_parent_rate, req->rate);
+ dclk_rate = DIV_ROUND_CLOSEST(req->best_parent_rate, dclk_div);
if (dclk_div < 1 || dclk_div > 0x3ff) {
dev_err(lmk->dev, "%s_div out of range\n", clk_hw_get_name(hw));
return -EINVAL;
}
- if (rate != dclk_rate)
+ if (req->rate != dclk_rate)
return -EINVAL;
- return dclk_rate;
+ req->rate = dclk_rate;
+
+ return 0;
}
static int lmk04832_dclk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -1158,7 +1167,7 @@ static const struct clk_ops lmk04832_dclk_ops = {
.prepare = lmk04832_dclk_prepare,
.unprepare = lmk04832_dclk_unprepare,
.recalc_rate = lmk04832_dclk_recalc_rate,
- .round_rate = lmk04832_dclk_round_rate,
+ .determine_rate = lmk04832_dclk_determine_rate,
.set_rate = lmk04832_dclk_set_rate,
};
diff --git a/drivers/clk/clk-loongson1.c b/drivers/clk/clk-loongson1.c
index a3467aa6790f..f9f060d08a5f 100644
--- a/drivers/clk/clk-loongson1.c
+++ b/drivers/clk/clk-loongson1.c
@@ -93,14 +93,16 @@ static unsigned long ls1x_divider_recalc_rate(struct clk_hw *hw,
d->flags, d->width);
}
-static long ls1x_divider_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int ls1x_divider_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct ls1x_clk *ls1x_clk = to_ls1x_clk(hw);
const struct ls1x_clk_div_data *d = ls1x_clk->data;
- return divider_round_rate(hw, rate, prate, d->table,
- d->width, d->flags);
+ req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate,
+ d->table, d->width, d->flags);
+
+ return 0;
}
static int ls1x_divider_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -146,7 +148,7 @@ static int ls1x_divider_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops ls1x_clk_divider_ops = {
.recalc_rate = ls1x_divider_recalc_rate,
- .round_rate = ls1x_divider_round_rate,
+ .determine_rate = ls1x_divider_determine_rate,
.set_rate = ls1x_divider_set_rate,
};
diff --git a/drivers/clk/clk-loongson2.c b/drivers/clk/clk-loongson2.c
index 27e632edd484..9c4c6c99db3e 100644
--- a/drivers/clk/clk-loongson2.c
+++ b/drivers/clk/clk-loongson2.c
@@ -13,10 +13,6 @@
#include <linux/io-64-nonatomic-lo-hi.h>
#include <dt-bindings/clock/loongson,ls2k-clk.h>
-static const struct clk_parent_data pdata[] = {
- { .fw_name = "ref_100m", },
-};
-
enum loongson2_clk_type {
CLK_TYPE_PLL,
CLK_TYPE_SCALE,
@@ -42,6 +38,7 @@ struct loongson2_clk_data {
u8 div_width;
u8 mult_shift;
u8 mult_width;
+ u8 bit_idx;
};
struct loongson2_clk_board_info {
@@ -50,6 +47,7 @@ struct loongson2_clk_board_info {
const char *name;
const char *parent_name;
unsigned long fixed_rate;
+ unsigned long flags;
u8 reg_offset;
u8 div_shift;
u8 div_width;
@@ -95,6 +93,19 @@ struct loongson2_clk_board_info {
.div_width = _dwidth, \
}
+#define CLK_SCALE_MODE(_id, _name, _pname, _offset, \
+ _dshift, _dwidth, _midx) \
+ { \
+ .id = _id, \
+ .type = CLK_TYPE_SCALE, \
+ .name = _name, \
+ .parent_name = _pname, \
+ .reg_offset = _offset, \
+ .div_shift = _dshift, \
+ .div_width = _dwidth, \
+ .bit_idx = _midx + 1, \
+ }
+
#define CLK_GATE(_id, _name, _pname, _offset, _bidx) \
{ \
.id = _id, \
@@ -105,6 +116,18 @@ struct loongson2_clk_board_info {
.bit_idx = _bidx, \
}
+#define CLK_GATE_FLAGS(_id, _name, _pname, _offset, _bidx, \
+ _flags) \
+ { \
+ .id = _id, \
+ .type = CLK_TYPE_GATE, \
+ .name = _name, \
+ .parent_name = _pname, \
+ .reg_offset = _offset, \
+ .bit_idx = _bidx, \
+ .flags = _flags \
+ }
+
#define CLK_FIXED(_id, _name, _pname, _rate) \
{ \
.id = _id, \
@@ -114,6 +137,51 @@ struct loongson2_clk_board_info {
.fixed_rate = _rate, \
}
+static const struct loongson2_clk_board_info ls2k0300_clks[] = {
+ /* Reference Clock */
+ CLK_PLL(LS2K0300_NODE_PLL, "pll_node", 0x00, 15, 9, 8, 7),
+ CLK_PLL(LS2K0300_DDR_PLL, "pll_ddr", 0x08, 15, 9, 8, 7),
+ CLK_PLL(LS2K0300_PIX_PLL, "pll_pix", 0x10, 15, 9, 8, 7),
+ CLK_FIXED(LS2K0300_CLK_STABLE, "clk_stable", NULL, 100000000),
+ CLK_FIXED(LS2K0300_CLK_THSENS, "clk_thsens", NULL, 10000000),
+ /* Node PLL */
+ CLK_DIV(LS2K0300_CLK_NODE_DIV, "clk_node_div", "pll_node", 0x00, 24, 7),
+ CLK_DIV(LS2K0300_CLK_GMAC_DIV, "clk_gmac_div", "pll_node", 0x04, 0, 7),
+ CLK_DIV(LS2K0300_CLK_I2S_DIV, "clk_i2s_div", "pll_node", 0x04, 8, 7),
+ CLK_GATE(LS2K0300_CLK_NODE_PLL_GATE, "clk_node_pll_gate", "clk_node_div", 0x00, 0),
+ CLK_GATE(LS2K0300_CLK_GMAC_GATE, "clk_gmac_gate", "clk_gmac_div", 0x00, 1),
+ CLK_GATE(LS2K0300_CLK_I2S_GATE, "clk_i2s_gate", "clk_i2s_div", 0x00, 2),
+ CLK_GATE_FLAGS(LS2K0300_CLK_NODE_GATE, "clk_node_gate", "clk_node_scale", 0x24, 0,
+ CLK_IS_CRITICAL),
+ CLK_SCALE_MODE(LS2K0300_CLK_NODE_SCALE, "clk_node_scale", "clk_node_pll_gate", 0x20, 0, 3,
+ 3),
+ /* DDR PLL */
+ CLK_DIV(LS2K0300_CLK_DDR_DIV, "clk_ddr_div", "pll_ddr", 0x08, 24, 7),
+ CLK_DIV(LS2K0300_CLK_NET_DIV, "clk_net_div", "pll_ddr", 0x0c, 0, 7),
+ CLK_DIV(LS2K0300_CLK_DEV_DIV, "clk_dev_div", "pll_ddr", 0x0c, 8, 7),
+ CLK_GATE(LS2K0300_CLK_NET_GATE, "clk_net_gate", "clk_net_div", 0x08, 1),
+ CLK_GATE(LS2K0300_CLK_DEV_GATE, "clk_dev_gate", "clk_dev_div", 0x08, 2),
+ CLK_GATE_FLAGS(LS2K0300_CLK_DDR_GATE, "clk_ddr_gate", "clk_ddr_div", 0x08, 0,
+ CLK_IS_CRITICAL),
+ /* PIX PLL */
+ CLK_DIV(LS2K0300_CLK_PIX_DIV, "clk_pix_div", "pll_pix", 0x10, 24, 7),
+ CLK_DIV(LS2K0300_CLK_GMACBP_DIV, "clk_gmacbp_div", "pll_pix", 0x14, 0, 7),
+ CLK_GATE(LS2K0300_CLK_PIX_PLL_GATE, "clk_pix_pll_gate", "clk_pix_div", 0x10, 0),
+ CLK_GATE(LS2K0300_CLK_PIX_GATE, "clk_pix_gate", "clk_pix_scale", 0x24, 6),
+ CLK_GATE(LS2K0300_CLK_GMACBP_GATE, "clk_gmacbp_gate", "clk_gmacbp_div", 0x10, 1),
+ CLK_SCALE_MODE(LS2K0300_CLK_PIX_SCALE, "clk_pix_scale", "clk_pix_pll_gate", 0x20, 4, 3, 7),
+ /* clk_dev_gate */
+ CLK_DIV(LS2K0300_CLK_SDIO_SCALE, "clk_sdio_scale", "clk_dev_gate", 0x20, 24, 4),
+ CLK_GATE(LS2K0300_CLK_USB_GATE, "clk_usb_gate", "clk_usb_scale", 0x24, 2),
+ CLK_GATE(LS2K0300_CLK_SDIO_GATE, "clk_sdio_gate", "clk_sdio_scale", 0x24, 4),
+ CLK_GATE(LS2K0300_CLK_APB_GATE, "clk_apb_gate", "clk_apb_scale", 0x24, 3),
+ CLK_GATE_FLAGS(LS2K0300_CLK_BOOT_GATE, "clk_boot_gate", "clk_boot_scale", 0x24, 1,
+ CLK_IS_CRITICAL),
+ CLK_SCALE_MODE(LS2K0300_CLK_USB_SCALE, "clk_usb_scale", "clk_dev_gate", 0x20, 12, 3, 15),
+ CLK_SCALE_MODE(LS2K0300_CLK_APB_SCALE, "clk_apb_scale", "clk_dev_gate", 0x20, 16, 3, 19),
+ CLK_SCALE_MODE(LS2K0300_CLK_BOOT_SCALE, "clk_boot_scale", "clk_dev_gate", 0x20, 8, 3, 11),
+};
+
static const struct loongson2_clk_board_info ls2k0500_clks[] = {
CLK_PLL(LOONGSON2_NODE_PLL, "pll_node", 0, 16, 8, 8, 6),
CLK_PLL(LOONGSON2_DDR_PLL, "pll_ddr", 0x8, 16, 8, 8, 6),
@@ -230,20 +298,26 @@ static const struct clk_ops loongson2_pll_recalc_ops = {
static unsigned long loongson2_freqscale_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
- u64 val, mult;
+ u64 val, scale;
+ u32 mode = 0;
struct loongson2_clk_data *clk = to_loongson2_clk(hw);
val = readq(clk->reg);
- mult = loongson2_rate_part(val, clk->div_shift, clk->div_width) + 1;
+ scale = loongson2_rate_part(val, clk->div_shift, clk->div_width) + 1;
+
+ if (clk->bit_idx)
+ mode = val & BIT(clk->bit_idx - 1);
- return div_u64((u64)parent_rate * mult, 8);
+ return mode == 0 ? div_u64((u64)parent_rate * scale, 8) :
+ div_u64((u64)parent_rate, scale);
}
static const struct clk_ops loongson2_freqscale_recalc_ops = {
.recalc_rate = loongson2_freqscale_recalc_rate,
};
-static struct clk_hw *loongson2_clk_register(struct loongson2_clk_provider *clp,
+static struct clk_hw *loongson2_clk_register(const char *parent,
+ struct loongson2_clk_provider *clp,
const struct loongson2_clk_board_info *cld,
const struct clk_ops *ops)
{
@@ -260,17 +334,14 @@ static struct clk_hw *loongson2_clk_register(struct loongson2_clk_provider *clp,
init.ops = ops;
init.flags = 0;
init.num_parents = 1;
-
- if (!cld->parent_name)
- init.parent_data = pdata;
- else
- init.parent_names = &cld->parent_name;
+ init.parent_names = &parent;
clk->reg = clp->base + cld->reg_offset;
clk->div_shift = cld->div_shift;
clk->div_width = cld->div_width;
clk->mult_shift = cld->mult_shift;
clk->mult_width = cld->mult_width;
+ clk->bit_idx = cld->bit_idx;
clk->hw.init = &init;
hw = &clk->hw;
@@ -288,11 +359,17 @@ static int loongson2_clk_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct loongson2_clk_provider *clp;
const struct loongson2_clk_board_info *p, *data;
+ const char *refclk_name, *parent_name;
data = device_get_match_data(dev);
if (!data)
return -EINVAL;
+ refclk_name = of_clk_get_parent_name(dev->of_node, 0);
+ if (IS_ERR(refclk_name))
+ return dev_err_probe(dev, PTR_ERR(refclk_name),
+ "failed to get refclk name\n");
+
for (p = data; p->name; p++)
clks_num = max(clks_num, p->id + 1);
@@ -314,32 +391,36 @@ static int loongson2_clk_probe(struct platform_device *pdev)
for (i = 0; i < clks_num; i++) {
p = &data[i];
+ parent_name = p->parent_name ? p->parent_name : refclk_name;
+
switch (p->type) {
case CLK_TYPE_PLL:
- hw = loongson2_clk_register(clp, p,
+ hw = loongson2_clk_register(parent_name, clp, p,
&loongson2_pll_recalc_ops);
break;
case CLK_TYPE_SCALE:
- hw = loongson2_clk_register(clp, p,
+ hw = loongson2_clk_register(parent_name, clp, p,
&loongson2_freqscale_recalc_ops);
break;
case CLK_TYPE_DIVIDER:
hw = devm_clk_hw_register_divider(dev, p->name,
- p->parent_name, 0,
+ parent_name, 0,
clp->base + p->reg_offset,
p->div_shift, p->div_width,
- CLK_DIVIDER_ONE_BASED,
+ CLK_DIVIDER_ONE_BASED |
+ CLK_DIVIDER_ALLOW_ZERO,
&clp->clk_lock);
break;
case CLK_TYPE_GATE:
- hw = devm_clk_hw_register_gate(dev, p->name, p->parent_name, 0,
+ hw = devm_clk_hw_register_gate(dev, p->name, parent_name,
+ p->flags,
clp->base + p->reg_offset,
p->bit_idx, 0,
&clp->clk_lock);
break;
case CLK_TYPE_FIXED:
- hw = devm_clk_hw_register_fixed_rate_parent_data(dev, p->name, pdata,
- 0, p->fixed_rate);
+ hw = devm_clk_hw_register_fixed_rate(dev, p->name, parent_name,
+ 0, p->fixed_rate);
break;
default:
return dev_err_probe(dev, -EINVAL, "Invalid clk type\n");
@@ -357,6 +438,7 @@ static int loongson2_clk_probe(struct platform_device *pdev)
}
static const struct of_device_id loongson2_clk_match_table[] = {
+ { .compatible = "loongson,ls2k0300-clk", .data = &ls2k0300_clks },
{ .compatible = "loongson,ls2k0500-clk", .data = &ls2k0500_clks },
{ .compatible = "loongson,ls2k-clk", .data = &ls2k1000_clks },
{ .compatible = "loongson,ls2k2000-clk", .data = &ls2k2000_clks },
diff --git a/drivers/clk/clk-max9485.c b/drivers/clk/clk-max9485.c
index be9020b6c789..0515e3e41162 100644
--- a/drivers/clk/clk-max9485.c
+++ b/drivers/clk/clk-max9485.c
@@ -159,29 +159,32 @@ static unsigned long max9485_clkout_recalc_rate(struct clk_hw *hw,
return 0;
}
-static long max9485_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int max9485_clkout_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
const struct max9485_rate *curr, *prev = NULL;
for (curr = max9485_rates; curr->out != 0; curr++) {
/* Exact matches */
- if (curr->out == rate)
- return rate;
+ if (curr->out == req->rate)
+ return 0;
/*
* Find the first entry that has a frequency higher than the
* requested one.
*/
- if (curr->out > rate) {
+ if (curr->out > req->rate) {
unsigned int mid;
/*
* If this is the first entry, clamp the value to the
* lowest possible frequency.
*/
- if (!prev)
- return curr->out;
+ if (!prev) {
+ req->rate = curr->out;
+
+ return 0;
+ }
/*
* Otherwise, determine whether the previous entry or
@@ -189,14 +192,18 @@ static long max9485_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
*/
mid = prev->out + ((curr->out - prev->out) / 2);
- return (mid > rate) ? prev->out : curr->out;
+ req->rate = mid > req->rate ? prev->out : curr->out;
+
+ return 0;
}
prev = curr;
}
/* If the last entry was still too high, clamp the value */
- return prev->out;
+ req->rate = prev->out;
+
+ return 0;
}
struct max9485_clk {
@@ -221,7 +228,7 @@ static const struct max9485_clk max9485_clks[MAX9485_NUM_CLKS] = {
.parent_index = -1,
.ops = {
.set_rate = max9485_clkout_set_rate,
- .round_rate = max9485_clkout_round_rate,
+ .determine_rate = max9485_clkout_determine_rate,
.recalc_rate = max9485_clkout_recalc_rate,
},
},
diff --git a/drivers/clk/clk-milbeaut.c b/drivers/clk/clk-milbeaut.c
index 18c20aff45f7..b4f9b7143eaa 100644
--- a/drivers/clk/clk-milbeaut.c
+++ b/drivers/clk/clk-milbeaut.c
@@ -386,8 +386,8 @@ static unsigned long m10v_clk_divider_recalc_rate(struct clk_hw *hw,
divider->flags, divider->width);
}
-static long m10v_clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int m10v_clk_divider_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct m10v_clk_divider *divider = to_m10v_div(hw);
@@ -398,13 +398,19 @@ static long m10v_clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
val = readl(divider->reg) >> divider->shift;
val &= clk_div_mask(divider->width);
- return divider_ro_round_rate(hw, rate, prate, divider->table,
- divider->width, divider->flags,
- val);
+ req->rate = divider_ro_round_rate(hw, req->rate,
+ &req->best_parent_rate,
+ divider->table,
+ divider->width,
+ divider->flags, val);
+
+ return 0;
}
- return divider_round_rate(hw, rate, prate, divider->table,
- divider->width, divider->flags);
+ req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate,
+ divider->table, divider->width, divider->flags);
+
+ return 0;
}
static int m10v_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -450,7 +456,7 @@ static int m10v_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops m10v_clk_divider_ops = {
.recalc_rate = m10v_clk_divider_recalc_rate,
- .round_rate = m10v_clk_divider_round_rate,
+ .determine_rate = m10v_clk_divider_determine_rate,
.set_rate = m10v_clk_divider_set_rate,
};
diff --git a/drivers/clk/clk-multiplier.c b/drivers/clk/clk-multiplier.c
index e507aa958da9..6f2955d408b6 100644
--- a/drivers/clk/clk-multiplier.c
+++ b/drivers/clk/clk-multiplier.c
@@ -112,14 +112,16 @@ static unsigned long __bestmult(struct clk_hw *hw, unsigned long rate,
return bestmult;
}
-static long clk_multiplier_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_multiplier_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_multiplier *mult = to_clk_multiplier(hw);
- unsigned long factor = __bestmult(hw, rate, parent_rate,
+ unsigned long factor = __bestmult(hw, req->rate, &req->best_parent_rate,
mult->width, mult->flags);
- return *parent_rate * factor;
+ req->rate = req->best_parent_rate * factor;
+
+ return 0;
}
static int clk_multiplier_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -150,7 +152,7 @@ static int clk_multiplier_set_rate(struct clk_hw *hw, unsigned long rate,
const struct clk_ops clk_multiplier_ops = {
.recalc_rate = clk_multiplier_recalc_rate,
- .round_rate = clk_multiplier_round_rate,
+ .determine_rate = clk_multiplier_determine_rate,
.set_rate = clk_multiplier_set_rate,
};
EXPORT_SYMBOL_GPL(clk_multiplier_ops);
diff --git a/drivers/clk/clk-rp1.c b/drivers/clk/clk-rp1.c
index afff90d48734..fd144755b879 100644
--- a/drivers/clk/clk-rp1.c
+++ b/drivers/clk/clk-rp1.c
@@ -368,6 +368,11 @@ struct rp1_clk_desc {
struct clk_divider div;
};
+static struct rp1_clk_desc *clk_audio_core;
+static struct rp1_clk_desc *clk_audio;
+static struct rp1_clk_desc *clk_i2s;
+static struct clk_hw *clk_xosc;
+
static inline
void clockman_write(struct rp1_clockman *clockman, u32 reg, u32 val)
{
@@ -475,7 +480,6 @@ static int rp1_pll_core_set_rate(struct clk_hw *hw,
struct rp1_clk_desc *pll_core = container_of(hw, struct rp1_clk_desc, hw);
struct rp1_clockman *clockman = pll_core->clockman;
const struct rp1_pll_core_data *data = pll_core->data;
- unsigned long calc_rate;
u32 fbdiv_int, fbdiv_frac;
/* Disable dividers to start with. */
@@ -484,8 +488,8 @@ static int rp1_pll_core_set_rate(struct clk_hw *hw,
clockman_write(clockman, data->fbdiv_frac_reg, 0);
spin_unlock(&clockman->regs_lock);
- calc_rate = get_pll_core_divider(hw, rate, parent_rate,
- &fbdiv_int, &fbdiv_frac);
+ get_pll_core_divider(hw, rate, parent_rate,
+ &fbdiv_int, &fbdiv_frac);
spin_lock(&clockman->regs_lock);
clockman_write(clockman, data->pwr_reg, fbdiv_frac ? 0 : PLL_PWR_DSMPD);
@@ -497,8 +501,6 @@ static int rp1_pll_core_set_rate(struct clk_hw *hw,
if (WARN_ON_ONCE(parent_rate > (rate / 16)))
return -ERANGE;
- pll_core->cached_rate = calc_rate;
-
spin_lock(&clockman->regs_lock);
/* Don't need to divide ref unless parent_rate > (output freq / 16) */
clockman_write(clockman, data->cs_reg,
@@ -530,13 +532,16 @@ static unsigned long rp1_pll_core_recalc_rate(struct clk_hw *hw,
return calc_rate;
}
-static long rp1_pll_core_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int rp1_pll_core_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
u32 fbdiv_int, fbdiv_frac;
- return get_pll_core_divider(hw, rate, *parent_rate,
- &fbdiv_int, &fbdiv_frac);
+ req->rate = get_pll_core_divider(hw, req->rate, req->best_parent_rate,
+ &fbdiv_int,
+ &fbdiv_frac);
+
+ return 0;
}
static void get_pll_prim_dividers(unsigned long rate, unsigned long parent_rate,
@@ -614,14 +619,20 @@ static unsigned long rp1_pll_recalc_rate(struct clk_hw *hw,
return DIV_ROUND_CLOSEST(parent_rate, prim_div1 * prim_div2);
}
-static long rp1_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int rp1_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
+ struct clk_hw *clk_audio_hw = &clk_audio->hw;
u32 div1, div2;
- get_pll_prim_dividers(rate, *parent_rate, &div1, &div2);
+ if (hw == clk_audio_hw && clk_audio->cached_rate == req->rate)
+ req->best_parent_rate = clk_audio_core->cached_rate;
- return DIV_ROUND_CLOSEST(*parent_rate, div1 * div2);
+ get_pll_prim_dividers(req->rate, req->best_parent_rate, &div1, &div2);
+
+ req->rate = DIV_ROUND_CLOSEST(req->best_parent_rate, div1 * div2);
+
+ return 0;
}
static int rp1_pll_ph_is_on(struct clk_hw *hw)
@@ -671,13 +682,15 @@ static unsigned long rp1_pll_ph_recalc_rate(struct clk_hw *hw,
return parent_rate / data->fixed_divider;
}
-static long rp1_pll_ph_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int rp1_pll_ph_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct rp1_clk_desc *pll_ph = container_of(hw, struct rp1_clk_desc, hw);
const struct rp1_pll_ph_data *data = pll_ph->data;
- return *parent_rate / data->fixed_divider;
+ req->rate = req->best_parent_rate / data->fixed_divider;
+
+ return 0;
}
static int rp1_pll_divider_is_on(struct clk_hw *hw)
@@ -754,11 +767,12 @@ static unsigned long rp1_pll_divider_recalc_rate(struct clk_hw *hw,
return clk_divider_ops.recalc_rate(hw, parent_rate);
}
-static long rp1_pll_divider_round_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long *parent_rate)
+static int rp1_pll_divider_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- return clk_divider_ops.round_rate(hw, rate, parent_rate);
+ req->rate = clk_divider_ops.determine_rate(hw, req);
+
+ return 0;
}
static int rp1_clock_is_on(struct clk_hw *hw)
@@ -964,6 +978,59 @@ static int rp1_clock_set_rate(struct clk_hw *hw, unsigned long rate,
return rp1_clock_set_rate_and_parent(hw, rate, parent_rate, 0xff);
}
+static unsigned long calc_core_pll_rate(struct clk_hw *pll_hw,
+ unsigned long target_rate,
+ int *pdiv_prim, int *pdiv_clk)
+{
+ static const int prim_divs[] = {
+ 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 14, 15, 16,
+ 18, 20, 21, 24, 25, 28, 30, 35, 36, 42, 49,
+ };
+ const unsigned long xosc_rate = clk_hw_get_rate(clk_xosc);
+ const unsigned long core_min = xosc_rate * 16;
+ const unsigned long core_max = 2400000000;
+ int best_div_prim = 1, best_div_clk = 1;
+ unsigned long best_rate = core_max + 1;
+ unsigned long core_rate = 0;
+ int div_int, div_frac;
+ u64 div;
+ int i;
+
+ /* Given the target rate, choose a set of divisors/multipliers */
+ for (i = 0; i < ARRAY_SIZE(prim_divs); i++) {
+ int div_prim = prim_divs[i];
+ int div_clk;
+
+ for (div_clk = 1; div_clk <= 256; div_clk++) {
+ core_rate = target_rate * div_clk * div_prim;
+ if (core_rate >= core_min) {
+ if (core_rate < best_rate) {
+ best_rate = core_rate;
+ best_div_prim = div_prim;
+ best_div_clk = div_clk;
+ }
+ break;
+ }
+ }
+ }
+
+ if (best_rate < core_max) {
+ div = ((best_rate << 24) + xosc_rate / 2) / xosc_rate;
+ div_int = div >> 24;
+ div_frac = div % (1 << 24);
+ core_rate = (xosc_rate * ((div_int << 24) + div_frac) + (1 << 23)) >> 24;
+ } else {
+ core_rate = 0;
+ }
+
+ if (pdiv_prim)
+ *pdiv_prim = best_div_prim;
+ if (pdiv_clk)
+ *pdiv_clk = best_div_clk;
+
+ return core_rate;
+}
+
static void rp1_clock_choose_div_and_prate(struct clk_hw *hw,
int parent_idx,
unsigned long rate,
@@ -972,12 +1039,35 @@ static void rp1_clock_choose_div_and_prate(struct clk_hw *hw,
{
struct rp1_clk_desc *clock = container_of(hw, struct rp1_clk_desc, hw);
const struct rp1_clock_data *data = clock->data;
+ struct clk_hw *clk_audio_hw = &clk_audio->hw;
+ struct clk_hw *clk_i2s_hw = &clk_i2s->hw;
struct clk_hw *parent;
u32 div;
u64 tmp;
parent = clk_hw_get_parent_by_index(hw, parent_idx);
+ if (hw == clk_i2s_hw && clk_i2s->cached_rate == rate && parent == clk_audio_hw) {
+ *prate = clk_audio->cached_rate;
+ *calc_rate = rate;
+ return;
+ }
+
+ if (hw == clk_i2s_hw && parent == clk_audio_hw) {
+ unsigned long core_rate, audio_rate, i2s_rate;
+ int div_prim, div_clk;
+
+ core_rate = calc_core_pll_rate(parent, rate, &div_prim, &div_clk);
+ audio_rate = DIV_ROUND_CLOSEST(core_rate, div_prim);
+ i2s_rate = DIV_ROUND_CLOSEST(audio_rate, div_clk);
+ clk_audio_core->cached_rate = core_rate;
+ clk_audio->cached_rate = audio_rate;
+ clk_i2s->cached_rate = i2s_rate;
+ *prate = audio_rate;
+ *calc_rate = i2s_rate;
+ return;
+ }
+
*prate = clk_hw_get_rate(parent);
div = rp1_clock_choose_div(rate, *prate, data);
@@ -1062,19 +1152,47 @@ static int rp1_clock_determine_rate(struct clk_hw *hw,
return 0;
}
+static int rp1_varsrc_set_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate)
+{
+ struct rp1_clk_desc *clock = container_of(hw, struct rp1_clk_desc, hw);
+
+ /*
+ * "varsrc" exists purely to let clock dividers know the frequency
+ * of an externally-managed clock source (such as MIPI DSI byte-clock)
+ * which may change at run-time as a side-effect of some other driver.
+ */
+ clock->cached_rate = rate;
+ return 0;
+}
+
+static unsigned long rp1_varsrc_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct rp1_clk_desc *clock = container_of(hw, struct rp1_clk_desc, hw);
+
+ return clock->cached_rate;
+}
+
+static int rp1_varsrc_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ return 0;
+}
+
static const struct clk_ops rp1_pll_core_ops = {
.is_prepared = rp1_pll_core_is_on,
.prepare = rp1_pll_core_on,
.unprepare = rp1_pll_core_off,
.set_rate = rp1_pll_core_set_rate,
.recalc_rate = rp1_pll_core_recalc_rate,
- .round_rate = rp1_pll_core_round_rate,
+ .determine_rate = rp1_pll_core_determine_rate,
};
static const struct clk_ops rp1_pll_ops = {
.set_rate = rp1_pll_set_rate,
.recalc_rate = rp1_pll_recalc_rate,
- .round_rate = rp1_pll_round_rate,
+ .determine_rate = rp1_pll_determine_rate,
};
static const struct clk_ops rp1_pll_ph_ops = {
@@ -1082,7 +1200,7 @@ static const struct clk_ops rp1_pll_ph_ops = {
.prepare = rp1_pll_ph_on,
.unprepare = rp1_pll_ph_off,
.recalc_rate = rp1_pll_ph_recalc_rate,
- .round_rate = rp1_pll_ph_round_rate,
+ .determine_rate = rp1_pll_ph_determine_rate,
};
static const struct clk_ops rp1_pll_divider_ops = {
@@ -1091,7 +1209,7 @@ static const struct clk_ops rp1_pll_divider_ops = {
.unprepare = rp1_pll_divider_off,
.set_rate = rp1_pll_divider_set_rate,
.recalc_rate = rp1_pll_divider_recalc_rate,
- .round_rate = rp1_pll_divider_round_rate,
+ .determine_rate = rp1_pll_divider_determine_rate,
};
static const struct clk_ops rp1_clk_ops = {
@@ -1106,6 +1224,12 @@ static const struct clk_ops rp1_clk_ops = {
.determine_rate = rp1_clock_determine_rate,
};
+static const struct clk_ops rp1_varsrc_ops = {
+ .set_rate = rp1_varsrc_set_rate,
+ .recalc_rate = rp1_varsrc_recalc_rate,
+ .determine_rate = rp1_varsrc_determine_rate,
+};
+
static struct clk_hw *rp1_register_pll(struct rp1_clockman *clockman,
struct rp1_clk_desc *desc)
{
@@ -1241,6 +1365,36 @@ static struct rp1_clk_desc pll_sys_desc = REGISTER_PLL(
)
);
+static struct rp1_clk_desc pll_audio_desc = REGISTER_PLL(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "pll_audio",
+ (const struct clk_parent_data[]) {
+ { .hw = &pll_audio_core_desc.hw }
+ },
+ &rp1_pll_ops,
+ CLK_SET_RATE_PARENT
+ ),
+ CLK_DATA(rp1_pll_data,
+ .ctrl_reg = PLL_AUDIO_PRIM,
+ .fc0_src = FC_NUM(4, 2),
+ )
+);
+
+static struct rp1_clk_desc pll_video_desc = REGISTER_PLL(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "pll_video",
+ (const struct clk_parent_data[]) {
+ { .hw = &pll_video_core_desc.hw }
+ },
+ &rp1_pll_ops,
+ 0
+ ),
+ CLK_DATA(rp1_pll_data,
+ .ctrl_reg = PLL_VIDEO_PRIM,
+ .fc0_src = FC_NUM(3, 2),
+ )
+);
+
static struct rp1_clk_desc pll_sys_sec_desc = REGISTER_PLL_DIV(
.hw.init = CLK_HW_INIT_PARENTS_DATA(
"pll_sys_sec",
@@ -1256,16 +1410,42 @@ static struct rp1_clk_desc pll_sys_sec_desc = REGISTER_PLL_DIV(
)
);
+static struct rp1_clk_desc pll_video_sec_desc = REGISTER_PLL_DIV(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "pll_video_sec",
+ (const struct clk_parent_data[]) {
+ { .hw = &pll_video_core_desc.hw }
+ },
+ &rp1_pll_divider_ops,
+ 0
+ ),
+ CLK_DATA(rp1_pll_data,
+ .ctrl_reg = PLL_VIDEO_SEC,
+ .fc0_src = FC_NUM(5, 3),
+ )
+);
+
+static const struct clk_parent_data clk_eth_tsu_parents[] = {
+ { .index = 0 },
+ { .hw = &pll_video_sec_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+};
+
static struct rp1_clk_desc clk_eth_tsu_desc = REGISTER_CLK(
.hw.init = CLK_HW_INIT_PARENTS_DATA(
"clk_eth_tsu",
- (const struct clk_parent_data[]) { { .index = 0 } },
+ clk_eth_tsu_parents,
&rp1_clk_ops,
0
),
CLK_DATA(rp1_clock_data,
.num_std_parents = 0,
- .num_aux_parents = 1,
+ .num_aux_parents = 8,
.ctrl_reg = CLK_ETH_TSU_CTRL,
.div_int_reg = CLK_ETH_TSU_DIV_INT,
.sel_reg = CLK_ETH_TSU_SEL,
@@ -1278,6 +1458,7 @@ static struct rp1_clk_desc clk_eth_tsu_desc = REGISTER_CLK(
static const struct clk_parent_data clk_eth_parents[] = {
{ .hw = &pll_sys_sec_desc.div.hw },
{ .hw = &pll_sys_desc.hw },
+ { .hw = &pll_video_sec_desc.hw },
};
static struct rp1_clk_desc clk_eth_desc = REGISTER_CLK(
@@ -1289,7 +1470,7 @@ static struct rp1_clk_desc clk_eth_desc = REGISTER_CLK(
),
CLK_DATA(rp1_clock_data,
.num_std_parents = 0,
- .num_aux_parents = 2,
+ .num_aux_parents = 3,
.ctrl_reg = CLK_ETH_CTRL,
.div_int_reg = CLK_ETH_DIV_INT,
.sel_reg = CLK_ETH_SEL,
@@ -1342,6 +1523,756 @@ static struct rp1_clk_desc pll_sys_pri_ph_desc = REGISTER_PLL(
)
);
+static struct rp1_clk_desc pll_audio_pri_ph_desc = REGISTER_PLL(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "pll_audio_pri_ph",
+ (const struct clk_parent_data[]) {
+ { .hw = &pll_audio_desc.hw }
+ },
+ &rp1_pll_ph_ops,
+ 0
+ ),
+ CLK_DATA(rp1_pll_ph_data,
+ .ph_reg = PLL_AUDIO_PRIM,
+ .fixed_divider = 2,
+ .phase = RP1_PLL_PHASE_0,
+ .fc0_src = FC_NUM(5, 1),
+ )
+);
+
+static struct rp1_clk_desc pll_video_pri_ph_desc = REGISTER_PLL(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "pll_video_pri_ph",
+ (const struct clk_parent_data[]) {
+ { .hw = &pll_video_desc.hw }
+ },
+ &rp1_pll_ph_ops,
+ 0
+ ),
+ CLK_DATA(rp1_pll_ph_data,
+ .ph_reg = PLL_VIDEO_PRIM,
+ .fixed_divider = 2,
+ .phase = RP1_PLL_PHASE_0,
+ .fc0_src = FC_NUM(4, 3),
+ )
+);
+
+static struct rp1_clk_desc pll_audio_sec_desc = REGISTER_PLL_DIV(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "pll_audio_sec",
+ (const struct clk_parent_data[]) {
+ { .hw = &pll_audio_core_desc.hw }
+ },
+ &rp1_pll_divider_ops,
+ 0
+ ),
+ CLK_DATA(rp1_pll_data,
+ .ctrl_reg = PLL_AUDIO_SEC,
+ .fc0_src = FC_NUM(6, 2),
+ )
+);
+
+static struct rp1_clk_desc pll_audio_tern_desc = REGISTER_PLL_DIV(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "pll_audio_tern",
+ (const struct clk_parent_data[]) {
+ { .hw = &pll_audio_core_desc.hw }
+ },
+ &rp1_pll_divider_ops,
+ 0
+ ),
+ CLK_DATA(rp1_pll_data,
+ .ctrl_reg = PLL_AUDIO_TERN,
+ .fc0_src = FC_NUM(6, 2),
+ )
+);
+
+static struct rp1_clk_desc clk_slow_sys_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_slow_sys",
+ (const struct clk_parent_data[]) { { .index = 0 } },
+ &rp1_clk_ops,
+ CLK_IS_CRITICAL
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 1,
+ .num_aux_parents = 0,
+ .ctrl_reg = CLK_SLOW_SYS_CTRL,
+ .div_int_reg = CLK_SLOW_SYS_DIV_INT,
+ .sel_reg = CLK_SLOW_SYS_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 50 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(1, 4),
+ .clk_src_mask = 0x1,
+ )
+);
+
+static const struct clk_parent_data clk_dma_parents[] = {
+ { .hw = &pll_sys_pri_ph_desc.hw },
+ { .hw = &pll_video_desc.hw },
+ { .index = 0 },
+};
+
+static struct rp1_clk_desc clk_dma_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_dma",
+ clk_dma_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 3,
+ .ctrl_reg = CLK_DMA_CTRL,
+ .div_int_reg = CLK_DMA_DIV_INT,
+ .sel_reg = CLK_DMA_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 100 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(2, 2),
+ )
+);
+
+static const struct clk_parent_data clk_uart_parents[] = {
+ { .hw = &pll_sys_pri_ph_desc.hw },
+ { .hw = &pll_video_desc.hw },
+ { .index = 0 },
+};
+
+static struct rp1_clk_desc clk_uart_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_uart",
+ clk_uart_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 3,
+ .ctrl_reg = CLK_UART_CTRL,
+ .div_int_reg = CLK_UART_DIV_INT,
+ .sel_reg = CLK_UART_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 100 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(6, 7),
+ )
+);
+
+static const struct clk_parent_data clk_pwm0_parents[] = {
+ { .index = -1 },
+ { .hw = &pll_video_sec_desc.hw },
+ { .index = 0 },
+};
+
+static struct rp1_clk_desc clk_pwm0_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_pwm0",
+ clk_pwm0_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 3,
+ .ctrl_reg = CLK_PWM0_CTRL,
+ .div_int_reg = CLK_PWM0_DIV_INT,
+ .div_frac_reg = CLK_PWM0_DIV_FRAC,
+ .sel_reg = CLK_PWM0_SEL,
+ .div_int_max = DIV_INT_16BIT_MAX,
+ .max_freq = 76800 * HZ_PER_KHZ,
+ .fc0_src = FC_NUM(0, 5),
+ )
+);
+
+static const struct clk_parent_data clk_pwm1_parents[] = {
+ { .index = -1 },
+ { .hw = &pll_video_sec_desc.hw },
+ { .index = 0 },
+};
+
+static struct rp1_clk_desc clk_pwm1_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_pwm1",
+ clk_pwm1_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 3,
+ .ctrl_reg = CLK_PWM1_CTRL,
+ .div_int_reg = CLK_PWM1_DIV_INT,
+ .div_frac_reg = CLK_PWM1_DIV_FRAC,
+ .sel_reg = CLK_PWM1_SEL,
+ .div_int_max = DIV_INT_16BIT_MAX,
+ .max_freq = 76800 * HZ_PER_KHZ,
+ .fc0_src = FC_NUM(1, 5),
+ )
+);
+
+static const struct clk_parent_data clk_audio_in_parents[] = {
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &pll_video_sec_desc.hw },
+ { .index = 0 },
+};
+
+static struct rp1_clk_desc clk_audio_in_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_audio_in",
+ clk_audio_in_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 5,
+ .ctrl_reg = CLK_AUDIO_IN_CTRL,
+ .div_int_reg = CLK_AUDIO_IN_DIV_INT,
+ .sel_reg = CLK_AUDIO_IN_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 76800 * HZ_PER_KHZ,
+ .fc0_src = FC_NUM(2, 5),
+ )
+);
+
+static const struct clk_parent_data clk_audio_out_parents[] = {
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &pll_video_sec_desc.hw },
+ { .index = 0 },
+};
+
+static struct rp1_clk_desc clk_audio_out_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_audio_out",
+ clk_audio_out_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 4,
+ .ctrl_reg = CLK_AUDIO_OUT_CTRL,
+ .div_int_reg = CLK_AUDIO_OUT_DIV_INT,
+ .sel_reg = CLK_AUDIO_OUT_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 153600 * HZ_PER_KHZ,
+ .fc0_src = FC_NUM(3, 5),
+ )
+);
+
+static const struct clk_parent_data clk_i2s_parents[] = {
+ { .index = 0 },
+ { .hw = &pll_audio_desc.hw },
+ { .hw = &pll_audio_sec_desc.hw },
+};
+
+static struct rp1_clk_desc clk_i2s_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_i2s",
+ clk_i2s_parents,
+ &rp1_clk_ops,
+ CLK_SET_RATE_PARENT
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 3,
+ .ctrl_reg = CLK_I2S_CTRL,
+ .div_int_reg = CLK_I2S_DIV_INT,
+ .sel_reg = CLK_I2S_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 50 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(4, 4),
+ )
+);
+
+static struct rp1_clk_desc clk_mipi0_cfg_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_mipi0_cfg",
+ (const struct clk_parent_data[]) { { .index = 0 } },
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 1,
+ .ctrl_reg = CLK_MIPI0_CFG_CTRL,
+ .div_int_reg = CLK_MIPI0_CFG_DIV_INT,
+ .sel_reg = CLK_MIPI0_CFG_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 50 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(4, 5),
+ )
+);
+
+static struct rp1_clk_desc clk_mipi1_cfg_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_mipi1_cfg",
+ (const struct clk_parent_data[]) { { .index = 0 } },
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 1,
+ .ctrl_reg = CLK_MIPI1_CFG_CTRL,
+ .div_int_reg = CLK_MIPI1_CFG_DIV_INT,
+ .sel_reg = CLK_MIPI1_CFG_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 50 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(5, 6),
+ .clk_src_mask = 0x1,
+ )
+);
+
+static struct rp1_clk_desc clk_adc_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_adc",
+ (const struct clk_parent_data[]) { { .index = 0 } },
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 1,
+ .ctrl_reg = CLK_ADC_CTRL,
+ .div_int_reg = CLK_ADC_DIV_INT,
+ .sel_reg = CLK_ADC_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 50 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(5, 5),
+ )
+);
+
+static struct rp1_clk_desc clk_sdio_timer_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_sdio_timer",
+ (const struct clk_parent_data[]) { { .index = 0 } },
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 1,
+ .ctrl_reg = CLK_SDIO_TIMER_CTRL,
+ .div_int_reg = CLK_SDIO_TIMER_DIV_INT,
+ .sel_reg = CLK_SDIO_TIMER_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 50 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(3, 4),
+ )
+);
+
+static struct rp1_clk_desc clk_sdio_alt_src_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_sdio_alt_src",
+ (const struct clk_parent_data[]) {
+ { .hw = &pll_sys_desc.hw }
+ },
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 1,
+ .ctrl_reg = CLK_SDIO_ALT_SRC_CTRL,
+ .div_int_reg = CLK_SDIO_ALT_SRC_DIV_INT,
+ .sel_reg = CLK_SDIO_ALT_SRC_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 200 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(5, 4),
+ )
+);
+
+static const struct clk_parent_data clk_dpi_parents[] = {
+ { .hw = &pll_sys_desc.hw },
+ { .hw = &pll_video_sec_desc.hw },
+ { .hw = &pll_video_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+};
+
+static struct rp1_clk_desc clk_dpi_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_dpi",
+ clk_dpi_parents,
+ &rp1_clk_ops,
+ CLK_SET_RATE_NO_REPARENT /* Let DPI driver set parent */
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 8,
+ .ctrl_reg = VIDEO_CLK_DPI_CTRL,
+ .div_int_reg = VIDEO_CLK_DPI_DIV_INT,
+ .sel_reg = VIDEO_CLK_DPI_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 200 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(1, 6),
+ )
+);
+
+static const struct clk_parent_data clk_gp0_parents[] = {
+ { .index = 0 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &pll_sys_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &clk_i2s_desc.hw },
+ { .hw = &clk_adc_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &clk_sys_desc.hw },
+};
+
+static struct rp1_clk_desc clk_gp0_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_gp0",
+ clk_gp0_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 16,
+ .oe_mask = BIT(0),
+ .ctrl_reg = CLK_GP0_CTRL,
+ .div_int_reg = CLK_GP0_DIV_INT,
+ .div_frac_reg = CLK_GP0_DIV_FRAC,
+ .sel_reg = CLK_GP0_SEL,
+ .div_int_max = DIV_INT_16BIT_MAX,
+ .max_freq = 100 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(0, 1),
+ )
+);
+
+static const struct clk_parent_data clk_gp1_parents[] = {
+ { .hw = &clk_sdio_timer_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &pll_sys_pri_ph_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &clk_adc_desc.hw },
+ { .hw = &clk_dpi_desc.hw },
+ { .hw = &clk_pwm0_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+};
+
+static struct rp1_clk_desc clk_gp1_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_gp1",
+ clk_gp1_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 16,
+ .oe_mask = BIT(1),
+ .ctrl_reg = CLK_GP1_CTRL,
+ .div_int_reg = CLK_GP1_DIV_INT,
+ .div_frac_reg = CLK_GP1_DIV_FRAC,
+ .sel_reg = CLK_GP1_SEL,
+ .div_int_max = DIV_INT_16BIT_MAX,
+ .max_freq = 100 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(1, 1),
+ )
+);
+
+static struct rp1_clk_desc clksrc_mipi0_dsi_byteclk_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clksrc_mipi0_dsi_byteclk",
+ (const struct clk_parent_data[]) { { .index = 0 } },
+ &rp1_varsrc_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 1,
+ .num_aux_parents = 0,
+ )
+);
+
+static struct rp1_clk_desc clksrc_mipi1_dsi_byteclk_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clksrc_mipi1_dsi_byteclk",
+ (const struct clk_parent_data[]) { { .index = 0 } },
+ &rp1_varsrc_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 1,
+ .num_aux_parents = 0,
+ )
+);
+
+static const struct clk_parent_data clk_mipi0_dpi_parents[] = {
+ { .hw = &pll_sys_desc.hw },
+ { .hw = &pll_video_sec_desc.hw },
+ { .hw = &pll_video_desc.hw },
+ { .hw = &clksrc_mipi0_dsi_byteclk_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+};
+
+static struct rp1_clk_desc clk_mipi0_dpi_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_mipi0_dpi",
+ clk_mipi0_dpi_parents,
+ &rp1_clk_ops,
+ CLK_SET_RATE_NO_REPARENT /* Let DSI driver set parent */
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 8,
+ .ctrl_reg = VIDEO_CLK_MIPI0_DPI_CTRL,
+ .div_int_reg = VIDEO_CLK_MIPI0_DPI_DIV_INT,
+ .div_frac_reg = VIDEO_CLK_MIPI0_DPI_DIV_FRAC,
+ .sel_reg = VIDEO_CLK_MIPI0_DPI_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 200 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(2, 6),
+ )
+);
+
+static const struct clk_parent_data clk_mipi1_dpi_parents[] = {
+ { .hw = &pll_sys_desc.hw },
+ { .hw = &pll_video_sec_desc.hw },
+ { .hw = &pll_video_desc.hw },
+ { .hw = &clksrc_mipi1_dsi_byteclk_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+};
+
+static struct rp1_clk_desc clk_mipi1_dpi_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_mipi1_dpi",
+ clk_mipi1_dpi_parents,
+ &rp1_clk_ops,
+ CLK_SET_RATE_NO_REPARENT /* Let DSI driver set parent */
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 8,
+ .ctrl_reg = VIDEO_CLK_MIPI1_DPI_CTRL,
+ .div_int_reg = VIDEO_CLK_MIPI1_DPI_DIV_INT,
+ .div_frac_reg = VIDEO_CLK_MIPI1_DPI_DIV_FRAC,
+ .sel_reg = VIDEO_CLK_MIPI1_DPI_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 200 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(3, 6),
+ )
+);
+
+static const struct clk_parent_data clk_gp2_parents[] = {
+ { .hw = &clk_sdio_alt_src_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &pll_sys_sec_desc.hw },
+ { .index = -1 },
+ { .hw = &pll_video_desc.hw },
+ { .hw = &clk_audio_in_desc.hw },
+ { .hw = &clk_dpi_desc.hw },
+ { .hw = &clk_pwm0_desc.hw },
+ { .hw = &clk_pwm1_desc.hw },
+ { .hw = &clk_mipi0_dpi_desc.hw },
+ { .hw = &clk_mipi1_cfg_desc.hw },
+ { .hw = &clk_sys_desc.hw },
+};
+
+static struct rp1_clk_desc clk_gp2_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_gp2",
+ clk_gp2_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 16,
+ .oe_mask = BIT(2),
+ .ctrl_reg = CLK_GP2_CTRL,
+ .div_int_reg = CLK_GP2_DIV_INT,
+ .div_frac_reg = CLK_GP2_DIV_FRAC,
+ .sel_reg = CLK_GP2_SEL,
+ .div_int_max = DIV_INT_16BIT_MAX,
+ .max_freq = 100 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(2, 1),
+ )
+);
+
+static const struct clk_parent_data clk_gp3_parents[] = {
+ { .index = 0 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &pll_video_pri_ph_desc.hw },
+ { .hw = &clk_audio_out_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &clk_mipi1_dpi_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+};
+
+static struct rp1_clk_desc clk_gp3_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_gp3",
+ clk_gp3_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 16,
+ .oe_mask = BIT(3),
+ .ctrl_reg = CLK_GP3_CTRL,
+ .div_int_reg = CLK_GP3_DIV_INT,
+ .div_frac_reg = CLK_GP3_DIV_FRAC,
+ .sel_reg = CLK_GP3_SEL,
+ .div_int_max = DIV_INT_16BIT_MAX,
+ .max_freq = 100 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(3, 1),
+ )
+);
+
+static const struct clk_parent_data clk_gp4_parents[] = {
+ { .index = 0 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &pll_video_sec_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &clk_mipi0_cfg_desc.hw },
+ { .hw = &clk_uart_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &clk_sys_desc.hw },
+};
+
+static struct rp1_clk_desc clk_gp4_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_gp4",
+ clk_gp4_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 16,
+ .oe_mask = BIT(4),
+ .ctrl_reg = CLK_GP4_CTRL,
+ .div_int_reg = CLK_GP4_DIV_INT,
+ .div_frac_reg = CLK_GP4_DIV_FRAC,
+ .sel_reg = CLK_GP4_SEL,
+ .div_int_max = DIV_INT_16BIT_MAX,
+ .max_freq = 100 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(4, 1),
+ )
+);
+
+static const struct clk_parent_data clk_vec_parents[] = {
+ { .hw = &pll_sys_pri_ph_desc.hw },
+ { .hw = &pll_video_sec_desc.hw },
+ { .hw = &pll_video_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+};
+
+static struct rp1_clk_desc clk_vec_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_vec",
+ clk_vec_parents,
+ &rp1_clk_ops,
+ CLK_SET_RATE_NO_REPARENT /* Let VEC driver set parent */
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 8,
+ .ctrl_reg = VIDEO_CLK_VEC_CTRL,
+ .div_int_reg = VIDEO_CLK_VEC_DIV_INT,
+ .sel_reg = VIDEO_CLK_VEC_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 108 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(0, 6),
+ )
+);
+
+static const struct clk_parent_data clk_gp5_parents[] = {
+ { .index = 0 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &pll_video_sec_desc.hw },
+ { .hw = &clk_eth_tsu_desc.hw },
+ { .index = -1 },
+ { .hw = &clk_vec_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+};
+
+static struct rp1_clk_desc clk_gp5_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_gp5",
+ clk_gp5_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 16,
+ .oe_mask = BIT(5),
+ .ctrl_reg = CLK_GP5_CTRL,
+ .div_int_reg = CLK_GP5_DIV_INT,
+ .div_frac_reg = CLK_GP5_DIV_FRAC,
+ .sel_reg = CLK_GP5_SEL,
+ .div_int_max = DIV_INT_16BIT_MAX,
+ .max_freq = 100 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(5, 1),
+ )
+);
+
static struct rp1_clk_desc *const clk_desc_array[] = {
[RP1_PLL_SYS_CORE] = &pll_sys_core_desc,
[RP1_PLL_AUDIO_CORE] = &pll_audio_core_desc,
@@ -1352,6 +2283,38 @@ static struct rp1_clk_desc *const clk_desc_array[] = {
[RP1_CLK_SYS] = &clk_sys_desc,
[RP1_PLL_SYS_PRI_PH] = &pll_sys_pri_ph_desc,
[RP1_PLL_SYS_SEC] = &pll_sys_sec_desc,
+ [RP1_PLL_AUDIO] = &pll_audio_desc,
+ [RP1_PLL_VIDEO] = &pll_video_desc,
+ [RP1_PLL_AUDIO_PRI_PH] = &pll_audio_pri_ph_desc,
+ [RP1_PLL_VIDEO_PRI_PH] = &pll_video_pri_ph_desc,
+ [RP1_PLL_AUDIO_SEC] = &pll_audio_sec_desc,
+ [RP1_PLL_VIDEO_SEC] = &pll_video_sec_desc,
+ [RP1_PLL_AUDIO_TERN] = &pll_audio_tern_desc,
+ [RP1_CLK_SLOW_SYS] = &clk_slow_sys_desc,
+ [RP1_CLK_DMA] = &clk_dma_desc,
+ [RP1_CLK_UART] = &clk_uart_desc,
+ [RP1_CLK_PWM0] = &clk_pwm0_desc,
+ [RP1_CLK_PWM1] = &clk_pwm1_desc,
+ [RP1_CLK_AUDIO_IN] = &clk_audio_in_desc,
+ [RP1_CLK_AUDIO_OUT] = &clk_audio_out_desc,
+ [RP1_CLK_I2S] = &clk_i2s_desc,
+ [RP1_CLK_MIPI0_CFG] = &clk_mipi0_cfg_desc,
+ [RP1_CLK_MIPI1_CFG] = &clk_mipi1_cfg_desc,
+ [RP1_CLK_ADC] = &clk_adc_desc,
+ [RP1_CLK_SDIO_TIMER] = &clk_sdio_timer_desc,
+ [RP1_CLK_SDIO_ALT_SRC] = &clk_sdio_alt_src_desc,
+ [RP1_CLK_GP0] = &clk_gp0_desc,
+ [RP1_CLK_GP1] = &clk_gp1_desc,
+ [RP1_CLK_GP2] = &clk_gp2_desc,
+ [RP1_CLK_GP3] = &clk_gp3_desc,
+ [RP1_CLK_GP4] = &clk_gp4_desc,
+ [RP1_CLK_GP5] = &clk_gp5_desc,
+ [RP1_CLK_VEC] = &clk_vec_desc,
+ [RP1_CLK_DPI] = &clk_dpi_desc,
+ [RP1_CLK_MIPI0_DPI] = &clk_mipi0_dpi_desc,
+ [RP1_CLK_MIPI1_DPI] = &clk_mipi1_dpi_desc,
+ [RP1_CLK_MIPI0_DSI_BYTECLOCK] = &clksrc_mipi0_dsi_byteclk_desc,
+ [RP1_CLK_MIPI1_DSI_BYTECLOCK] = &clksrc_mipi1_dsi_byteclk_desc,
};
static const struct regmap_range rp1_reg_ranges[] = {
@@ -1466,6 +2429,11 @@ static int rp1_clk_probe(struct platform_device *pdev)
hws[i] = desc->clk_register(clockman, desc);
}
+ clk_audio_core = &pll_audio_core_desc;
+ clk_audio = &pll_audio_desc;
+ clk_i2s = &clk_i2s_desc;
+ clk_xosc = clk_hw_get_parent_by_index(&clk_i2s->hw, 0);
+
platform_set_drvdata(pdev, clockman);
return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
diff --git a/drivers/clk/clk-rpmi.c b/drivers/clk/clk-rpmi.c
new file mode 100644
index 000000000000..921296aafa68
--- /dev/null
+++ b/drivers/clk/clk-rpmi.c
@@ -0,0 +1,620 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * RISC-V MPXY Based Clock Driver
+ *
+ * Copyright (C) 2025 Ventana Micro Systems Ltd.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/mailbox_client.h>
+#include <linux/mailbox/riscv-rpmi-message.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/wordpart.h>
+
+#define RPMI_CLK_DISCRETE_MAX_NUM_RATES 16
+#define RPMI_CLK_NAME_LEN 16
+
+#define to_rpmi_clk(clk) container_of(clk, struct rpmi_clk, hw)
+
+enum rpmi_clk_config {
+ RPMI_CLK_DISABLE = 0,
+ RPMI_CLK_ENABLE = 1,
+ RPMI_CLK_CONFIG_MAX_IDX
+};
+
+#define RPMI_CLK_TYPE_MASK GENMASK(1, 0)
+enum rpmi_clk_type {
+ RPMI_CLK_DISCRETE = 0,
+ RPMI_CLK_LINEAR = 1,
+ RPMI_CLK_TYPE_MAX_IDX
+};
+
+struct rpmi_clk_context {
+ struct device *dev;
+ struct mbox_chan *chan;
+ struct mbox_client client;
+ u32 max_msg_data_size;
+};
+
+/*
+ * rpmi_clk_rates represents the rates format
+ * as specified by the RPMI specification.
+ * No other data format (e.g., struct linear_range)
+ * is required to avoid to and from conversion.
+ */
+union rpmi_clk_rates {
+ u64 discrete[RPMI_CLK_DISCRETE_MAX_NUM_RATES];
+ struct {
+ u64 min;
+ u64 max;
+ u64 step;
+ } linear;
+};
+
+struct rpmi_clk {
+ struct rpmi_clk_context *context;
+ u32 id;
+ u32 num_rates;
+ u32 transition_latency;
+ enum rpmi_clk_type type;
+ union rpmi_clk_rates *rates;
+ char name[RPMI_CLK_NAME_LEN];
+ struct clk_hw hw;
+};
+
+struct rpmi_clk_rate_discrete {
+ __le32 lo;
+ __le32 hi;
+};
+
+struct rpmi_clk_rate_linear {
+ __le32 min_lo;
+ __le32 min_hi;
+ __le32 max_lo;
+ __le32 max_hi;
+ __le32 step_lo;
+ __le32 step_hi;
+};
+
+struct rpmi_get_num_clocks_rx {
+ __le32 status;
+ __le32 num_clocks;
+};
+
+struct rpmi_get_attrs_tx {
+ __le32 clkid;
+};
+
+struct rpmi_get_attrs_rx {
+ __le32 status;
+ __le32 flags;
+ __le32 num_rates;
+ __le32 transition_latency;
+ char name[RPMI_CLK_NAME_LEN];
+};
+
+struct rpmi_get_supp_rates_tx {
+ __le32 clkid;
+ __le32 clk_rate_idx;
+};
+
+struct rpmi_get_supp_rates_rx {
+ __le32 status;
+ __le32 flags;
+ __le32 remaining;
+ __le32 returned;
+ __le32 rates[];
+};
+
+struct rpmi_get_rate_tx {
+ __le32 clkid;
+};
+
+struct rpmi_get_rate_rx {
+ __le32 status;
+ __le32 lo;
+ __le32 hi;
+};
+
+struct rpmi_set_rate_tx {
+ __le32 clkid;
+ __le32 flags;
+ __le32 lo;
+ __le32 hi;
+};
+
+struct rpmi_set_rate_rx {
+ __le32 status;
+};
+
+struct rpmi_set_config_tx {
+ __le32 clkid;
+ __le32 config;
+};
+
+struct rpmi_set_config_rx {
+ __le32 status;
+};
+
+static inline u64 rpmi_clkrate_u64(u32 __hi, u32 __lo)
+{
+ return (((u64)(__hi) << 32) | (u32)(__lo));
+}
+
+static u32 rpmi_clk_get_num_clocks(struct rpmi_clk_context *context)
+{
+ struct rpmi_get_num_clocks_rx rx, *resp;
+ struct rpmi_mbox_message msg;
+ int ret;
+
+ rpmi_mbox_init_send_with_response(&msg, RPMI_CLK_SRV_GET_NUM_CLOCKS,
+ NULL, 0, &rx, sizeof(rx));
+
+ ret = rpmi_mbox_send_message(context->chan, &msg);
+ if (ret)
+ return 0;
+
+ resp = rpmi_mbox_get_msg_response(&msg);
+ if (!resp || resp->status)
+ return 0;
+
+ return le32_to_cpu(resp->num_clocks);
+}
+
+static int rpmi_clk_get_attrs(u32 clkid, struct rpmi_clk *rpmi_clk)
+{
+ struct rpmi_clk_context *context = rpmi_clk->context;
+ struct rpmi_mbox_message msg;
+ struct rpmi_get_attrs_tx tx;
+ struct rpmi_get_attrs_rx rx, *resp;
+ u8 format;
+ int ret;
+
+ tx.clkid = cpu_to_le32(clkid);
+ rpmi_mbox_init_send_with_response(&msg, RPMI_CLK_SRV_GET_ATTRIBUTES,
+ &tx, sizeof(tx), &rx, sizeof(rx));
+
+ ret = rpmi_mbox_send_message(context->chan, &msg);
+ if (ret)
+ return ret;
+
+ resp = rpmi_mbox_get_msg_response(&msg);
+ if (!resp)
+ return -EINVAL;
+ if (resp->status)
+ return rpmi_to_linux_error(le32_to_cpu(resp->status));
+
+ rpmi_clk->id = clkid;
+ rpmi_clk->num_rates = le32_to_cpu(resp->num_rates);
+ rpmi_clk->transition_latency = le32_to_cpu(resp->transition_latency);
+ strscpy(rpmi_clk->name, resp->name, RPMI_CLK_NAME_LEN);
+
+ format = le32_to_cpu(resp->flags) & RPMI_CLK_TYPE_MASK;
+ if (format >= RPMI_CLK_TYPE_MAX_IDX)
+ return -EINVAL;
+
+ rpmi_clk->type = format;
+
+ return 0;
+}
+
+static int rpmi_clk_get_supported_rates(u32 clkid, struct rpmi_clk *rpmi_clk)
+{
+ struct rpmi_clk_context *context = rpmi_clk->context;
+ struct rpmi_clk_rate_discrete *rate_discrete;
+ struct rpmi_clk_rate_linear *rate_linear;
+ struct rpmi_get_supp_rates_tx tx;
+ struct rpmi_get_supp_rates_rx *resp;
+ struct rpmi_mbox_message msg;
+ size_t clk_rate_idx;
+ int ret, rateidx, j;
+
+ tx.clkid = cpu_to_le32(clkid);
+ tx.clk_rate_idx = 0;
+
+ /*
+ * Make sure we allocate rx buffer sufficient to be accommodate all
+ * the rates sent in one RPMI message.
+ */
+ struct rpmi_get_supp_rates_rx *rx __free(kfree) =
+ kzalloc(context->max_msg_data_size, GFP_KERNEL);
+ if (!rx)
+ return -ENOMEM;
+
+ rpmi_mbox_init_send_with_response(&msg, RPMI_CLK_SRV_GET_SUPPORTED_RATES,
+ &tx, sizeof(tx), rx, context->max_msg_data_size);
+
+ ret = rpmi_mbox_send_message(context->chan, &msg);
+ if (ret)
+ return ret;
+
+ resp = rpmi_mbox_get_msg_response(&msg);
+ if (!resp)
+ return -EINVAL;
+ if (resp->status)
+ return rpmi_to_linux_error(le32_to_cpu(resp->status));
+ if (!le32_to_cpu(resp->returned))
+ return -EINVAL;
+
+ if (rpmi_clk->type == RPMI_CLK_DISCRETE) {
+ rate_discrete = (struct rpmi_clk_rate_discrete *)resp->rates;
+
+ for (rateidx = 0; rateidx < le32_to_cpu(resp->returned); rateidx++) {
+ rpmi_clk->rates->discrete[rateidx] =
+ rpmi_clkrate_u64(le32_to_cpu(rate_discrete[rateidx].hi),
+ le32_to_cpu(rate_discrete[rateidx].lo));
+ }
+
+ /*
+ * Keep sending the request message until all
+ * the rates are received.
+ */
+ clk_rate_idx = 0;
+ while (le32_to_cpu(resp->remaining)) {
+ clk_rate_idx += le32_to_cpu(resp->returned);
+ tx.clk_rate_idx = cpu_to_le32(clk_rate_idx);
+
+ rpmi_mbox_init_send_with_response(&msg,
+ RPMI_CLK_SRV_GET_SUPPORTED_RATES,
+ &tx, sizeof(tx),
+ rx, context->max_msg_data_size);
+
+ ret = rpmi_mbox_send_message(context->chan, &msg);
+ if (ret)
+ return ret;
+
+ resp = rpmi_mbox_get_msg_response(&msg);
+ if (!resp)
+ return -EINVAL;
+ if (resp->status)
+ return rpmi_to_linux_error(le32_to_cpu(resp->status));
+ if (!le32_to_cpu(resp->returned))
+ return -EINVAL;
+
+ for (j = 0; j < le32_to_cpu(resp->returned); j++) {
+ if (rateidx >= clk_rate_idx + le32_to_cpu(resp->returned))
+ break;
+ rpmi_clk->rates->discrete[rateidx++] =
+ rpmi_clkrate_u64(le32_to_cpu(rate_discrete[j].hi),
+ le32_to_cpu(rate_discrete[j].lo));
+ }
+ }
+ } else if (rpmi_clk->type == RPMI_CLK_LINEAR) {
+ rate_linear = (struct rpmi_clk_rate_linear *)resp->rates;
+
+ rpmi_clk->rates->linear.min = rpmi_clkrate_u64(le32_to_cpu(rate_linear->min_hi),
+ le32_to_cpu(rate_linear->min_lo));
+ rpmi_clk->rates->linear.max = rpmi_clkrate_u64(le32_to_cpu(rate_linear->max_hi),
+ le32_to_cpu(rate_linear->max_lo));
+ rpmi_clk->rates->linear.step = rpmi_clkrate_u64(le32_to_cpu(rate_linear->step_hi),
+ le32_to_cpu(rate_linear->step_lo));
+ }
+
+ return 0;
+}
+
+static unsigned long rpmi_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct rpmi_clk *rpmi_clk = to_rpmi_clk(hw);
+ struct rpmi_clk_context *context = rpmi_clk->context;
+ struct rpmi_mbox_message msg;
+ struct rpmi_get_rate_tx tx;
+ struct rpmi_get_rate_rx rx, *resp;
+ int ret;
+
+ tx.clkid = cpu_to_le32(rpmi_clk->id);
+
+ rpmi_mbox_init_send_with_response(&msg, RPMI_CLK_SRV_GET_RATE,
+ &tx, sizeof(tx), &rx, sizeof(rx));
+
+ ret = rpmi_mbox_send_message(context->chan, &msg);
+ if (ret)
+ return ret;
+
+ resp = rpmi_mbox_get_msg_response(&msg);
+ if (!resp)
+ return -EINVAL;
+ if (resp->status)
+ return rpmi_to_linux_error(le32_to_cpu(resp->status));
+
+ return rpmi_clkrate_u64(le32_to_cpu(resp->hi), le32_to_cpu(resp->lo));
+}
+
+static int rpmi_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct rpmi_clk *rpmi_clk = to_rpmi_clk(hw);
+ u64 fmin, fmax, ftmp;
+
+ /*
+ * Keep the requested rate if the clock format
+ * is of discrete type. Let the platform which
+ * is actually controlling the clock handle that.
+ */
+ if (rpmi_clk->type == RPMI_CLK_DISCRETE)
+ return 0;
+
+ fmin = rpmi_clk->rates->linear.min;
+ fmax = rpmi_clk->rates->linear.max;
+
+ if (req->rate <= fmin) {
+ req->rate = fmin;
+ return 0;
+ } else if (req->rate >= fmax) {
+ req->rate = fmax;
+ return 0;
+ }
+
+ ftmp = req->rate - fmin;
+ ftmp += rpmi_clk->rates->linear.step - 1;
+ do_div(ftmp, rpmi_clk->rates->linear.step);
+
+ req->rate = ftmp * rpmi_clk->rates->linear.step + fmin;
+
+ return 0;
+}
+
+static int rpmi_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct rpmi_clk *rpmi_clk = to_rpmi_clk(hw);
+ struct rpmi_clk_context *context = rpmi_clk->context;
+ struct rpmi_mbox_message msg;
+ struct rpmi_set_rate_tx tx;
+ struct rpmi_set_rate_rx rx, *resp;
+ int ret;
+
+ tx.clkid = cpu_to_le32(rpmi_clk->id);
+ tx.lo = cpu_to_le32(lower_32_bits(rate));
+ tx.hi = cpu_to_le32(upper_32_bits(rate));
+
+ rpmi_mbox_init_send_with_response(&msg, RPMI_CLK_SRV_SET_RATE,
+ &tx, sizeof(tx), &rx, sizeof(rx));
+
+ ret = rpmi_mbox_send_message(context->chan, &msg);
+ if (ret)
+ return ret;
+
+ resp = rpmi_mbox_get_msg_response(&msg);
+ if (!resp)
+ return -EINVAL;
+ if (resp->status)
+ return rpmi_to_linux_error(le32_to_cpu(resp->status));
+
+ return 0;
+}
+
+static int rpmi_clk_enable(struct clk_hw *hw)
+{
+ struct rpmi_clk *rpmi_clk = to_rpmi_clk(hw);
+ struct rpmi_clk_context *context = rpmi_clk->context;
+ struct rpmi_mbox_message msg;
+ struct rpmi_set_config_tx tx;
+ struct rpmi_set_config_rx rx, *resp;
+ int ret;
+
+ tx.config = cpu_to_le32(RPMI_CLK_ENABLE);
+ tx.clkid = cpu_to_le32(rpmi_clk->id);
+
+ rpmi_mbox_init_send_with_response(&msg, RPMI_CLK_SRV_SET_CONFIG,
+ &tx, sizeof(tx), &rx, sizeof(rx));
+
+ ret = rpmi_mbox_send_message(context->chan, &msg);
+ if (ret)
+ return ret;
+
+ resp = rpmi_mbox_get_msg_response(&msg);
+ if (!resp)
+ return -EINVAL;
+ if (resp->status)
+ return rpmi_to_linux_error(le32_to_cpu(resp->status));
+
+ return 0;
+}
+
+static void rpmi_clk_disable(struct clk_hw *hw)
+{
+ struct rpmi_clk *rpmi_clk = to_rpmi_clk(hw);
+ struct rpmi_clk_context *context = rpmi_clk->context;
+ struct rpmi_mbox_message msg;
+ struct rpmi_set_config_tx tx;
+ struct rpmi_set_config_rx rx;
+
+ tx.config = cpu_to_le32(RPMI_CLK_DISABLE);
+ tx.clkid = cpu_to_le32(rpmi_clk->id);
+
+ rpmi_mbox_init_send_with_response(&msg, RPMI_CLK_SRV_SET_CONFIG,
+ &tx, sizeof(tx), &rx, sizeof(rx));
+
+ rpmi_mbox_send_message(context->chan, &msg);
+}
+
+static const struct clk_ops rpmi_clk_ops = {
+ .recalc_rate = rpmi_clk_recalc_rate,
+ .determine_rate = rpmi_clk_determine_rate,
+ .set_rate = rpmi_clk_set_rate,
+ .prepare = rpmi_clk_enable,
+ .unprepare = rpmi_clk_disable,
+};
+
+static struct clk_hw *rpmi_clk_enumerate(struct rpmi_clk_context *context, u32 clkid)
+{
+ struct device *dev = context->dev;
+ unsigned long min_rate, max_rate;
+ union rpmi_clk_rates *rates;
+ struct rpmi_clk *rpmi_clk;
+ struct clk_init_data init = {};
+ struct clk_hw *clk_hw;
+ int ret;
+
+ rates = devm_kzalloc(dev, sizeof(*rates), GFP_KERNEL);
+ if (!rates)
+ return ERR_PTR(-ENOMEM);
+
+ rpmi_clk = devm_kzalloc(dev, sizeof(*rpmi_clk), GFP_KERNEL);
+ if (!rpmi_clk)
+ return ERR_PTR(-ENOMEM);
+
+ rpmi_clk->context = context;
+ rpmi_clk->rates = rates;
+
+ ret = rpmi_clk_get_attrs(clkid, rpmi_clk);
+ if (ret)
+ return dev_err_ptr_probe(dev, ret,
+ "Failed to get clk-%u attributes\n",
+ clkid);
+
+ ret = rpmi_clk_get_supported_rates(clkid, rpmi_clk);
+ if (ret)
+ return dev_err_ptr_probe(dev, ret,
+ "Get supported rates failed for clk-%u\n",
+ clkid);
+
+ init.flags = CLK_GET_RATE_NOCACHE;
+ init.num_parents = 0;
+ init.ops = &rpmi_clk_ops;
+ init.name = rpmi_clk->name;
+ clk_hw = &rpmi_clk->hw;
+ clk_hw->init = &init;
+
+ ret = devm_clk_hw_register(dev, clk_hw);
+ if (ret)
+ return dev_err_ptr_probe(dev, ret,
+ "Unable to register clk-%u\n",
+ clkid);
+
+ if (rpmi_clk->type == RPMI_CLK_DISCRETE) {
+ min_rate = rpmi_clk->rates->discrete[0];
+ max_rate = rpmi_clk->rates->discrete[rpmi_clk->num_rates - 1];
+ } else {
+ min_rate = rpmi_clk->rates->linear.min;
+ max_rate = rpmi_clk->rates->linear.max;
+ }
+
+ clk_hw_set_rate_range(clk_hw, min_rate, max_rate);
+
+ return clk_hw;
+}
+
+static void rpmi_clk_mbox_chan_release(void *data)
+{
+ struct mbox_chan *chan = data;
+
+ mbox_free_channel(chan);
+}
+
+static int rpmi_clk_probe(struct platform_device *pdev)
+{
+ int ret;
+ unsigned int num_clocks, i;
+ struct clk_hw_onecell_data *clk_data;
+ struct rpmi_clk_context *context;
+ struct rpmi_mbox_message msg;
+ struct clk_hw *hw_ptr;
+ struct device *dev = &pdev->dev;
+
+ context = devm_kzalloc(dev, sizeof(*context), GFP_KERNEL);
+ if (!context)
+ return -ENOMEM;
+ context->dev = dev;
+ platform_set_drvdata(pdev, context);
+
+ context->client.dev = context->dev;
+ context->client.rx_callback = NULL;
+ context->client.tx_block = false;
+ context->client.knows_txdone = true;
+ context->client.tx_tout = 0;
+
+ context->chan = mbox_request_channel(&context->client, 0);
+ if (IS_ERR(context->chan))
+ return PTR_ERR(context->chan);
+
+ ret = devm_add_action_or_reset(dev, rpmi_clk_mbox_chan_release, context->chan);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add rpmi mbox channel cleanup\n");
+
+ rpmi_mbox_init_get_attribute(&msg, RPMI_MBOX_ATTR_SPEC_VERSION);
+ ret = rpmi_mbox_send_message(context->chan, &msg);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get spec version\n");
+ if (msg.attr.value < RPMI_MKVER(1, 0)) {
+ return dev_err_probe(dev, -EINVAL,
+ "msg protocol version mismatch, expected 0x%x, found 0x%x\n",
+ RPMI_MKVER(1, 0), msg.attr.value);
+ }
+
+ rpmi_mbox_init_get_attribute(&msg, RPMI_MBOX_ATTR_SERVICEGROUP_ID);
+ ret = rpmi_mbox_send_message(context->chan, &msg);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get service group ID\n");
+ if (msg.attr.value != RPMI_SRVGRP_CLOCK) {
+ return dev_err_probe(dev, -EINVAL,
+ "service group match failed, expected 0x%x, found 0x%x\n",
+ RPMI_SRVGRP_CLOCK, msg.attr.value);
+ }
+
+ rpmi_mbox_init_get_attribute(&msg, RPMI_MBOX_ATTR_SERVICEGROUP_VERSION);
+ ret = rpmi_mbox_send_message(context->chan, &msg);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get service group version\n");
+ if (msg.attr.value < RPMI_MKVER(1, 0)) {
+ return dev_err_probe(dev, -EINVAL,
+ "service group version failed, expected 0x%x, found 0x%x\n",
+ RPMI_MKVER(1, 0), msg.attr.value);
+ }
+
+ rpmi_mbox_init_get_attribute(&msg, RPMI_MBOX_ATTR_MAX_MSG_DATA_SIZE);
+ ret = rpmi_mbox_send_message(context->chan, &msg);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get max message data size\n");
+
+ context->max_msg_data_size = msg.attr.value;
+ num_clocks = rpmi_clk_get_num_clocks(context);
+ if (!num_clocks)
+ return dev_err_probe(dev, -ENODEV, "No clocks found\n");
+
+ clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, num_clocks),
+ GFP_KERNEL);
+ if (!clk_data)
+ return dev_err_probe(dev, -ENOMEM, "No memory for clock data\n");
+ clk_data->num = num_clocks;
+
+ for (i = 0; i < clk_data->num; i++) {
+ hw_ptr = rpmi_clk_enumerate(context, i);
+ if (IS_ERR(hw_ptr)) {
+ return dev_err_probe(dev, PTR_ERR(hw_ptr),
+ "Failed to register clk-%d\n", i);
+ }
+ clk_data->hws[i] = hw_ptr;
+ }
+
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, clk_data);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to register clock HW provider\n");
+
+ return 0;
+}
+
+static const struct of_device_id rpmi_clk_of_match[] = {
+ { .compatible = "riscv,rpmi-clock" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, rpmi_clk_of_match);
+
+static struct platform_driver rpmi_clk_driver = {
+ .driver = {
+ .name = "riscv-rpmi-clock",
+ .of_match_table = rpmi_clk_of_match,
+ },
+ .probe = rpmi_clk_probe,
+};
+module_platform_driver(rpmi_clk_driver);
+
+MODULE_AUTHOR("Rahul Pathak <rpathak@ventanamicro.com>");
+MODULE_DESCRIPTION("Clock Driver based on RPMI message protocol");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c
index d4e9c3577b35..ff7ce12a5da6 100644
--- a/drivers/clk/clk-s2mps11.c
+++ b/drivers/clk/clk-s2mps11.c
@@ -11,6 +11,7 @@
#include <linux/regmap.h>
#include <linux/clk-provider.h>
#include <linux/platform_device.h>
+#include <linux/mfd/samsung/s2mpg10.h>
#include <linux/mfd/samsung/s2mps11.h>
#include <linux/mfd/samsung/s2mps13.h>
#include <linux/mfd/samsung/s2mps14.h>
@@ -140,6 +141,9 @@ static int s2mps11_clk_probe(struct platform_device *pdev)
clk_data->num = S2MPS11_CLKS_NUM;
switch (hwid) {
+ case S2MPG10:
+ s2mps11_reg = S2MPG10_PMIC_RTCBUF;
+ break;
case S2MPS11X:
s2mps11_reg = S2MPS11_REG_RTC_CTRL;
break;
@@ -221,6 +225,7 @@ static void s2mps11_clk_remove(struct platform_device *pdev)
}
static const struct platform_device_id s2mps11_clk_id[] = {
+ { "s2mpg10-clk", S2MPG10},
{ "s2mps11-clk", S2MPS11X},
{ "s2mps13-clk", S2MPS13X},
{ "s2mps14-clk", S2MPS14X},
@@ -241,6 +246,9 @@ MODULE_DEVICE_TABLE(platform, s2mps11_clk_id);
*/
static const struct of_device_id s2mps11_dt_match[] __used = {
{
+ .compatible = "samsung,s2mpg10-clk",
+ .data = (void *)S2MPG10,
+ }, {
.compatible = "samsung,s2mps11-clk",
.data = (void *)S2MPS11X,
}, {
diff --git a/drivers/clk/clk-scmi.c b/drivers/clk/clk-scmi.c
index d2408403283f..6b286ea6f121 100644
--- a/drivers/clk/clk-scmi.c
+++ b/drivers/clk/clk-scmi.c
@@ -54,8 +54,8 @@ static unsigned long scmi_clk_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long scmi_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int scmi_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
u64 fmin, fmax, ftmp;
struct scmi_clk *clk = to_scmi_clk(hw);
@@ -67,20 +67,27 @@ static long scmi_clk_round_rate(struct clk_hw *hw, unsigned long rate,
* running at then.
*/
if (clk->info->rate_discrete)
- return rate;
+ return 0;
fmin = clk->info->range.min_rate;
fmax = clk->info->range.max_rate;
- if (rate <= fmin)
- return fmin;
- else if (rate >= fmax)
- return fmax;
+ if (req->rate <= fmin) {
+ req->rate = fmin;
+
+ return 0;
+ } else if (req->rate >= fmax) {
+ req->rate = fmax;
+
+ return 0;
+ }
- ftmp = rate - fmin;
+ ftmp = req->rate - fmin;
ftmp += clk->info->range.step_size - 1; /* to round up */
do_div(ftmp, clk->info->range.step_size);
- return ftmp * clk->info->range.step_size + fmin;
+ req->rate = ftmp * clk->info->range.step_size + fmin;
+
+ return 0;
}
static int scmi_clk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -119,15 +126,6 @@ static u8 scmi_clk_get_parent(struct clk_hw *hw)
return p_idx;
}
-static int scmi_clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
-{
- /*
- * Suppose all the requested rates are supported, and let firmware
- * to handle the left work.
- */
- return 0;
-}
-
static int scmi_clk_enable(struct clk_hw *hw)
{
struct scmi_clk *clk = to_scmi_clk(hw);
@@ -300,7 +298,6 @@ scmi_clk_ops_alloc(struct device *dev, unsigned long feats_key)
/* Rate ops */
ops->recalc_rate = scmi_clk_recalc_rate;
- ops->round_rate = scmi_clk_round_rate;
ops->determine_rate = scmi_clk_determine_rate;
if (feats_key & BIT(SCMI_CLK_RATE_CTRL_SUPPORTED))
ops->set_rate = scmi_clk_set_rate;
@@ -349,6 +346,8 @@ scmi_clk_ops_select(struct scmi_clk *sclk, bool atomic_capable,
unsigned int atomic_threshold_us,
const struct clk_ops **clk_ops_db, size_t db_size)
{
+ int ret;
+ u32 val;
const struct scmi_clock_info *ci = sclk->info;
unsigned int feats_key = 0;
const struct clk_ops *ops;
@@ -370,8 +369,13 @@ scmi_clk_ops_select(struct scmi_clk *sclk, bool atomic_capable,
if (!ci->parent_ctrl_forbidden)
feats_key |= BIT(SCMI_CLK_PARENT_CTRL_SUPPORTED);
- if (ci->extended_config)
- feats_key |= BIT(SCMI_CLK_DUTY_CYCLE_SUPPORTED);
+ if (ci->extended_config) {
+ ret = scmi_proto_clk_ops->config_oem_get(sclk->ph, sclk->id,
+ SCMI_CLOCK_CFG_DUTY_CYCLE,
+ &val, NULL, false);
+ if (!ret)
+ feats_key |= BIT(SCMI_CLK_DUTY_CYCLE_SUPPORTED);
+ }
if (WARN_ON(feats_key >= db_size))
return NULL;
diff --git a/drivers/clk/clk-scpi.c b/drivers/clk/clk-scpi.c
index 19d530d52e64..0b592de7bdb2 100644
--- a/drivers/clk/clk-scpi.c
+++ b/drivers/clk/clk-scpi.c
@@ -32,8 +32,8 @@ static unsigned long scpi_clk_recalc_rate(struct clk_hw *hw,
return clk->scpi_ops->clk_get_val(clk->id);
}
-static long scpi_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int scpi_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
/*
* We can't figure out what rate it will be, so just return the
@@ -41,7 +41,7 @@ static long scpi_clk_round_rate(struct clk_hw *hw, unsigned long rate,
* after the rate is set and we'll know what rate the clock is
* running at then.
*/
- return rate;
+ return 0;
}
static int scpi_clk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -54,7 +54,7 @@ static int scpi_clk_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops scpi_clk_ops = {
.recalc_rate = scpi_clk_recalc_rate,
- .round_rate = scpi_clk_round_rate,
+ .determine_rate = scpi_clk_determine_rate,
.set_rate = scpi_clk_set_rate,
};
@@ -92,12 +92,14 @@ static unsigned long scpi_dvfs_recalc_rate(struct clk_hw *hw,
return opp->freq;
}
-static long scpi_dvfs_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int scpi_dvfs_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct scpi_clk *clk = to_scpi_clk(hw);
- return __scpi_dvfs_round_rate(clk, rate);
+ req->rate = __scpi_dvfs_round_rate(clk, req->rate);
+
+ return 0;
}
static int __scpi_find_dvfs_index(struct scpi_clk *clk, unsigned long rate)
@@ -124,7 +126,7 @@ static int scpi_dvfs_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops scpi_dvfs_ops = {
.recalc_rate = scpi_dvfs_recalc_rate,
- .round_rate = scpi_dvfs_round_rate,
+ .determine_rate = scpi_dvfs_determine_rate,
.set_rate = scpi_dvfs_set_rate,
};
diff --git a/drivers/clk/clk-si514.c b/drivers/clk/clk-si514.c
index 1127c35ce57d..f61590d70575 100644
--- a/drivers/clk/clk-si514.c
+++ b/drivers/clk/clk-si514.c
@@ -227,20 +227,28 @@ static unsigned long si514_recalc_rate(struct clk_hw *hw,
return si514_calc_rate(&settings);
}
-static long si514_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int si514_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_si514_muldiv settings;
int err;
- if (!rate)
+ if (!req->rate) {
+ req->rate = 0;
+
return 0;
+ }
- err = si514_calc_muldiv(&settings, rate);
- if (err)
- return err;
+ err = si514_calc_muldiv(&settings, req->rate);
+ if (err) {
+ req->rate = err;
- return si514_calc_rate(&settings);
+ return 0;
+ }
+
+ req->rate = si514_calc_rate(&settings);
+
+ return 0;
}
/*
@@ -289,7 +297,7 @@ static const struct clk_ops si514_clk_ops = {
.unprepare = si514_unprepare,
.is_prepared = si514_is_prepared,
.recalc_rate = si514_recalc_rate,
- .round_rate = si514_round_rate,
+ .determine_rate = si514_determine_rate,
.set_rate = si514_set_rate,
};
diff --git a/drivers/clk/clk-si521xx.c b/drivers/clk/clk-si521xx.c
index 4f7b74f889f1..4ed4e1a5f4f2 100644
--- a/drivers/clk/clk-si521xx.c
+++ b/drivers/clk/clk-si521xx.c
@@ -164,15 +164,17 @@ static unsigned long si521xx_diff_recalc_rate(struct clk_hw *hw,
return (unsigned long)rate;
}
-static long si521xx_diff_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int si521xx_diff_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
unsigned long best_parent;
- best_parent = (rate / SI521XX_DIFF_MULT) * SI521XX_DIFF_DIV;
- *prate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
+ best_parent = (req->rate / SI521XX_DIFF_MULT) * SI521XX_DIFF_DIV;
+ req->best_parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
- return (*prate / SI521XX_DIFF_DIV) * SI521XX_DIFF_MULT;
+ req->rate = (req->best_parent_rate / SI521XX_DIFF_DIV) * SI521XX_DIFF_MULT;
+
+ return 0;
}
static int si521xx_diff_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -208,7 +210,7 @@ static void si521xx_diff_unprepare(struct clk_hw *hw)
}
static const struct clk_ops si521xx_diff_clk_ops = {
- .round_rate = si521xx_diff_round_rate,
+ .determine_rate = si521xx_diff_determine_rate,
.set_rate = si521xx_diff_set_rate,
.recalc_rate = si521xx_diff_recalc_rate,
.prepare = si521xx_diff_prepare,
diff --git a/drivers/clk/clk-si5341.c b/drivers/clk/clk-si5341.c
index 5004888c7eca..2499b771cd83 100644
--- a/drivers/clk/clk-si5341.c
+++ b/drivers/clk/clk-si5341.c
@@ -663,8 +663,8 @@ static unsigned long si5341_synth_clk_recalc_rate(struct clk_hw *hw,
return f;
}
-static long si5341_synth_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int si5341_synth_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_si5341_synth *synth = to_clk_si5341_synth(hw);
u64 f;
@@ -672,15 +672,21 @@ static long si5341_synth_clk_round_rate(struct clk_hw *hw, unsigned long rate,
/* The synthesizer accuracy is such that anything in range will work */
f = synth->data->freq_vco;
do_div(f, SI5341_SYNTH_N_MAX);
- if (rate < f)
- return f;
+ if (req->rate < f) {
+ req->rate = f;
+
+ return 0;
+ }
f = synth->data->freq_vco;
do_div(f, SI5341_SYNTH_N_MIN);
- if (rate > f)
- return f;
+ if (req->rate > f) {
+ req->rate = f;
- return rate;
+ return 0;
+ }
+
+ return 0;
}
static int si5341_synth_program(struct clk_si5341_synth *synth,
@@ -741,7 +747,7 @@ static const struct clk_ops si5341_synth_clk_ops = {
.prepare = si5341_synth_clk_prepare,
.unprepare = si5341_synth_clk_unprepare,
.recalc_rate = si5341_synth_clk_recalc_rate,
- .round_rate = si5341_synth_clk_round_rate,
+ .determine_rate = si5341_synth_clk_determine_rate,
.set_rate = si5341_synth_clk_set_rate,
};
diff --git a/drivers/clk/clk-si544.c b/drivers/clk/clk-si544.c
index ca3473efa314..09c06ecec1a5 100644
--- a/drivers/clk/clk-si544.c
+++ b/drivers/clk/clk-si544.c
@@ -307,16 +307,16 @@ static unsigned long si544_recalc_rate(struct clk_hw *hw,
return si544_calc_rate(&settings);
}
-static long si544_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int si544_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_si544 *data = to_clk_si544(hw);
- if (!is_valid_frequency(data, rate))
+ if (!is_valid_frequency(data, req->rate))
return -EINVAL;
/* The accuracy is less than 1 Hz, so any rate is possible */
- return rate;
+ return 0;
}
/* Calculates the maximum "small" change, 950 * rate / 1000000 */
@@ -408,7 +408,7 @@ static const struct clk_ops si544_clk_ops = {
.unprepare = si544_unprepare,
.is_prepared = si544_is_prepared,
.recalc_rate = si544_recalc_rate,
- .round_rate = si544_round_rate,
+ .determine_rate = si544_determine_rate,
.set_rate = si544_set_rate,
};
diff --git a/drivers/clk/clk-si570.c b/drivers/clk/clk-si570.c
index e97fe90443a6..b0b1830dd430 100644
--- a/drivers/clk/clk-si570.c
+++ b/drivers/clk/clk-si570.c
@@ -246,34 +246,40 @@ static unsigned long si570_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long si570_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int si570_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
int err;
u64 rfreq;
unsigned int n1, hs_div;
struct clk_si570 *data = to_clk_si570(hw);
- if (!rate)
+ if (!req->rate) {
+ req->rate = 0;
+
return 0;
+ }
- if (div64_u64(abs(rate - data->frequency) * 10000LL,
+ if (div64_u64(abs(req->rate - data->frequency) * 10000LL,
data->frequency) < 35) {
- rfreq = div64_u64((data->rfreq * rate) +
- div64_u64(data->frequency, 2), data->frequency);
+ rfreq = div64_u64((data->rfreq * req->rate) +
+ div64_u64(data->frequency, 2),
+ data->frequency);
n1 = data->n1;
hs_div = data->hs_div;
} else {
- err = si570_calc_divs(rate, data, &rfreq, &n1, &hs_div);
+ err = si570_calc_divs(req->rate, data, &rfreq, &n1, &hs_div);
if (err) {
dev_err(&data->i2c_client->dev,
"unable to round rate\n");
+ req->rate = 0;
+
return 0;
}
}
- return rate;
+ return 0;
}
/**
@@ -368,7 +374,7 @@ static int si570_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops si570_clk_ops = {
.recalc_rate = si570_recalc_rate,
- .round_rate = si570_round_rate,
+ .determine_rate = si570_determine_rate,
.set_rate = si570_set_rate,
};
diff --git a/drivers/clk/clk-sp7021.c b/drivers/clk/clk-sp7021.c
index 95d66191df4b..36528a71a2e6 100644
--- a/drivers/clk/clk-sp7021.c
+++ b/drivers/clk/clk-sp7021.c
@@ -7,6 +7,7 @@
#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/bitfield.h>
+#include <linux/hw_bitfield.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/err.h>
@@ -38,13 +39,6 @@ enum {
#define MASK_DIVN GENMASK(7, 0)
#define MASK_DIVM GENMASK(14, 8)
-/* HIWORD_MASK FIELD_PREP */
-#define HWM_FIELD_PREP(mask, value) \
-({ \
- u64 _m = mask; \
- (_m << 16) | FIELD_PREP(_m, value); \
-})
-
struct sp_pll {
struct clk_hw hw;
void __iomem *reg;
@@ -313,15 +307,15 @@ static int plltv_set_rate(struct sp_pll *clk)
u32 r0, r1, r2;
r0 = BIT(clk->bp_bit + 16);
- r0 |= HWM_FIELD_PREP(MASK_SEL_FRA, clk->p[SEL_FRA]);
- r0 |= HWM_FIELD_PREP(MASK_SDM_MOD, clk->p[SDM_MOD]);
- r0 |= HWM_FIELD_PREP(MASK_PH_SEL, clk->p[PH_SEL]);
- r0 |= HWM_FIELD_PREP(MASK_NFRA, clk->p[NFRA]);
+ r0 |= FIELD_PREP_WM16(MASK_SEL_FRA, clk->p[SEL_FRA]);
+ r0 |= FIELD_PREP_WM16(MASK_SDM_MOD, clk->p[SDM_MOD]);
+ r0 |= FIELD_PREP_WM16(MASK_PH_SEL, clk->p[PH_SEL]);
+ r0 |= FIELD_PREP_WM16(MASK_NFRA, clk->p[NFRA]);
- r1 = HWM_FIELD_PREP(MASK_DIVR, clk->p[DIVR]);
+ r1 = FIELD_PREP_WM16(MASK_DIVR, clk->p[DIVR]);
- r2 = HWM_FIELD_PREP(MASK_DIVN, clk->p[DIVN] - 1);
- r2 |= HWM_FIELD_PREP(MASK_DIVM, clk->p[DIVM] - 1);
+ r2 = FIELD_PREP_WM16(MASK_DIVN, clk->p[DIVN] - 1);
+ r2 |= FIELD_PREP_WM16(MASK_DIVM, clk->p[DIVM] - 1);
spin_lock_irqsave(&clk->lock, flags);
writel(r0, clk->reg);
@@ -412,25 +406,27 @@ static long sp_pll_calc_div(struct sp_pll *clk, unsigned long rate)
return fbdiv;
}
-static long sp_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int sp_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct sp_pll *clk = to_sp_pll(hw);
long ret;
- if (rate == *prate) {
- ret = *prate; /* bypass */
+ if (req->rate == req->best_parent_rate) {
+ ret = req->best_parent_rate; /* bypass */
} else if (clk->div_width == DIV_A) {
- ret = plla_round_rate(clk, rate);
+ ret = plla_round_rate(clk, req->rate);
} else if (clk->div_width == DIV_TV) {
- ret = plltv_div(clk, rate);
+ ret = plltv_div(clk, req->rate);
if (ret < 0)
- ret = *prate;
+ ret = req->best_parent_rate;
} else {
- ret = sp_pll_calc_div(clk, rate) * clk->brate;
+ ret = sp_pll_calc_div(clk, req->rate) * clk->brate;
}
- return ret;
+ req->rate = ret;
+
+ return 0;
}
static unsigned long sp_pll_recalc_rate(struct clk_hw *hw,
@@ -535,7 +531,7 @@ static const struct clk_ops sp_pll_ops = {
.enable = sp_pll_enable,
.disable = sp_pll_disable,
.is_enabled = sp_pll_is_enabled,
- .round_rate = sp_pll_round_rate,
+ .determine_rate = sp_pll_determine_rate,
.recalc_rate = sp_pll_recalc_rate,
.set_rate = sp_pll_set_rate
};
diff --git a/drivers/clk/clk-sparx5.c b/drivers/clk/clk-sparx5.c
index 0fad0c1a0186..b2facc9c95d4 100644
--- a/drivers/clk/clk-sparx5.c
+++ b/drivers/clk/clk-sparx5.c
@@ -213,19 +213,21 @@ static unsigned long s5_pll_recalc_rate(struct clk_hw *hw,
return conf.freq;
}
-static long s5_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int s5_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct s5_pll_conf conf;
- return s5_calc_params(rate, *parent_rate, &conf);
+ req->rate = s5_calc_params(req->rate, req->best_parent_rate, &conf);
+
+ return 0;
}
static const struct clk_ops s5_pll_ops = {
.enable = s5_pll_enable,
.disable = s5_pll_disable,
.set_rate = s5_pll_set_rate,
- .round_rate = s5_pll_round_rate,
+ .determine_rate = s5_pll_determine_rate,
.recalc_rate = s5_pll_recalc_rate,
};
diff --git a/drivers/clk/clk-stm32f4.c b/drivers/clk/clk-stm32f4.c
index 719cddc82ae6..b5d4d48432a0 100644
--- a/drivers/clk/clk-stm32f4.c
+++ b/drivers/clk/clk-stm32f4.c
@@ -443,8 +443,8 @@ static unsigned long clk_apb_mul_recalc_rate(struct clk_hw *hw,
return parent_rate;
}
-static long clk_apb_mul_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_apb_mul_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_apb_mul *am = to_clk_apb_mul(hw);
unsigned long mult = 1;
@@ -453,12 +453,14 @@ static long clk_apb_mul_round_rate(struct clk_hw *hw, unsigned long rate,
mult = 2;
if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) {
- unsigned long best_parent = rate / mult;
+ unsigned long best_parent = req->rate / mult;
- *prate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
+ req->best_parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
}
- return *prate * mult;
+ req->rate = req->best_parent_rate * mult;
+
+ return 0;
}
static int clk_apb_mul_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -474,7 +476,7 @@ static int clk_apb_mul_set_rate(struct clk_hw *hw, unsigned long rate,
}
static const struct clk_ops clk_apb_mul_factor_ops = {
- .round_rate = clk_apb_mul_round_rate,
+ .determine_rate = clk_apb_mul_determine_rate,
.set_rate = clk_apb_mul_set_rate,
.recalc_rate = clk_apb_mul_recalc_rate,
};
@@ -670,21 +672,23 @@ static unsigned long stm32f4_pll_recalc(struct clk_hw *hw,
return parent_rate * n;
}
-static long stm32f4_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int stm32f4_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_gate *gate = to_clk_gate(hw);
struct stm32f4_pll *pll = to_stm32f4_pll(gate);
unsigned long n;
- n = rate / *prate;
+ n = req->rate / req->best_parent_rate;
if (n < pll->n_start)
n = pll->n_start;
else if (n > 432)
n = 432;
- return *prate * n;
+ req->rate = req->best_parent_rate * n;
+
+ return 0;
}
static void stm32f4_pll_set_ssc(struct clk_hw *hw, unsigned long parent_rate,
@@ -749,7 +753,7 @@ static const struct clk_ops stm32f4_pll_gate_ops = {
.disable = stm32f4_pll_disable,
.is_enabled = stm32f4_pll_is_enabled,
.recalc_rate = stm32f4_pll_recalc,
- .round_rate = stm32f4_pll_round_rate,
+ .determine_rate = stm32f4_pll_determine_rate,
.set_rate = stm32f4_pll_set_rate,
};
diff --git a/drivers/clk/clk-tps68470.c b/drivers/clk/clk-tps68470.c
index 38f44b5b9b1b..9511248c6bc9 100644
--- a/drivers/clk/clk-tps68470.c
+++ b/drivers/clk/clk-tps68470.c
@@ -146,12 +146,14 @@ static unsigned int tps68470_clk_cfg_lookup(unsigned long rate)
return best_idx;
}
-static long tps68470_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int tps68470_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- unsigned int idx = tps68470_clk_cfg_lookup(rate);
+ unsigned int idx = tps68470_clk_cfg_lookup(req->rate);
+
+ req->rate = clk_freqs[idx].freq;
- return clk_freqs[idx].freq;
+ return 0;
}
static int tps68470_clk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -186,7 +188,7 @@ static const struct clk_ops tps68470_clk_ops = {
.prepare = tps68470_clk_prepare,
.unprepare = tps68470_clk_unprepare,
.recalc_rate = tps68470_clk_recalc_rate,
- .round_rate = tps68470_clk_round_rate,
+ .determine_rate = tps68470_clk_determine_rate,
.set_rate = tps68470_clk_set_rate,
};
diff --git a/drivers/clk/clk-versaclock3.c b/drivers/clk/clk-versaclock3.c
index 9fe27dace111..1849863dbd67 100644
--- a/drivers/clk/clk-versaclock3.c
+++ b/drivers/clk/clk-versaclock3.c
@@ -289,22 +289,25 @@ static unsigned long vc3_pfd_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long vc3_pfd_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int vc3_pfd_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct vc3_hw_data *vc3 = container_of(hw, struct vc3_hw_data, hw);
const struct vc3_pfd_data *pfd = vc3->data;
unsigned long idiv;
/* PLL cannot operate with input clock above 50 MHz. */
- if (rate > 50000000)
+ if (req->rate > 50000000)
return -EINVAL;
/* CLKIN within range of PLL input, feed directly to PLL. */
- if (*parent_rate <= 50000000)
- return *parent_rate;
+ if (req->best_parent_rate <= 50000000) {
+ req->rate = req->best_parent_rate;
- idiv = DIV_ROUND_UP(*parent_rate, rate);
+ return 0;
+ }
+
+ idiv = DIV_ROUND_UP(req->best_parent_rate, req->rate);
if (pfd->num == VC3_PFD1 || pfd->num == VC3_PFD3) {
if (idiv > 63)
return -EINVAL;
@@ -313,7 +316,9 @@ static long vc3_pfd_round_rate(struct clk_hw *hw, unsigned long rate,
return -EINVAL;
}
- return *parent_rate / idiv;
+ req->rate = req->best_parent_rate / idiv;
+
+ return 0;
}
static int vc3_pfd_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -354,7 +359,7 @@ static int vc3_pfd_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops vc3_pfd_ops = {
.recalc_rate = vc3_pfd_recalc_rate,
- .round_rate = vc3_pfd_round_rate,
+ .determine_rate = vc3_pfd_determine_rate,
.set_rate = vc3_pfd_set_rate,
};
@@ -385,36 +390,38 @@ static unsigned long vc3_pll_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long vc3_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int vc3_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct vc3_hw_data *vc3 = container_of(hw, struct vc3_hw_data, hw);
const struct vc3_pll_data *pll = vc3->data;
u64 div_frc;
- if (rate < pll->vco.min)
- rate = pll->vco.min;
- if (rate > pll->vco.max)
- rate = pll->vco.max;
+ if (req->rate < pll->vco.min)
+ req->rate = pll->vco.min;
+ if (req->rate > pll->vco.max)
+ req->rate = pll->vco.max;
- vc3->div_int = rate / *parent_rate;
+ vc3->div_int = req->rate / req->best_parent_rate;
if (pll->num == VC3_PLL2) {
if (vc3->div_int > 0x7ff)
- rate = *parent_rate * 0x7ff;
+ req->rate = req->best_parent_rate * 0x7ff;
/* Determine best fractional part, which is 16 bit wide */
- div_frc = rate % *parent_rate;
+ div_frc = req->rate % req->best_parent_rate;
div_frc *= BIT(16) - 1;
- vc3->div_frc = min_t(u64, div64_ul(div_frc, *parent_rate), U16_MAX);
- rate = (*parent_rate *
- (vc3->div_int * VC3_2_POW_16 + vc3->div_frc) / VC3_2_POW_16);
+ vc3->div_frc = min_t(u64,
+ div64_ul(div_frc, req->best_parent_rate),
+ U16_MAX);
+ req->rate = (req->best_parent_rate *
+ (vc3->div_int * VC3_2_POW_16 + vc3->div_frc) / VC3_2_POW_16);
} else {
- rate = *parent_rate * vc3->div_int;
+ req->rate = req->best_parent_rate * vc3->div_int;
}
- return rate;
+ return 0;
}
static int vc3_pll_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -441,7 +448,7 @@ static int vc3_pll_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops vc3_pll_ops = {
.recalc_rate = vc3_pll_recalc_rate,
- .round_rate = vc3_pll_round_rate,
+ .determine_rate = vc3_pll_determine_rate,
.set_rate = vc3_pll_set_rate,
};
@@ -498,8 +505,8 @@ static unsigned long vc3_div_recalc_rate(struct clk_hw *hw,
div_data->flags, div_data->width);
}
-static long vc3_div_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int vc3_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct vc3_hw_data *vc3 = container_of(hw, struct vc3_hw_data, hw);
const struct vc3_div_data *div_data = vc3->data;
@@ -511,11 +518,16 @@ static long vc3_div_round_rate(struct clk_hw *hw, unsigned long rate,
bestdiv >>= div_data->shift;
bestdiv &= VC3_DIV_MASK(div_data->width);
bestdiv = vc3_get_div(div_data->table, bestdiv, div_data->flags);
- return DIV_ROUND_UP(*parent_rate, bestdiv);
+ req->rate = DIV_ROUND_UP(req->best_parent_rate, bestdiv);
+
+ return 0;
}
- return divider_round_rate(hw, rate, parent_rate, div_data->table,
- div_data->width, div_data->flags);
+ req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate,
+ div_data->table,
+ div_data->width, div_data->flags);
+
+ return 0;
}
static int vc3_div_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -534,7 +546,7 @@ static int vc3_div_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops vc3_div_ops = {
.recalc_rate = vc3_div_recalc_rate,
- .round_rate = vc3_div_round_rate,
+ .determine_rate = vc3_div_determine_rate,
.set_rate = vc3_div_set_rate,
};
diff --git a/drivers/clk/clk-versaclock5.c b/drivers/clk/clk-versaclock5.c
index 4200022d2084..57228e88e81d 100644
--- a/drivers/clk/clk-versaclock5.c
+++ b/drivers/clk/clk-versaclock5.c
@@ -304,11 +304,11 @@ static unsigned long vc5_dbl_recalc_rate(struct clk_hw *hw,
return parent_rate;
}
-static long vc5_dbl_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int vc5_dbl_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- if ((*parent_rate == rate) || ((*parent_rate * 2) == rate))
- return rate;
+ if ((req->best_parent_rate == req->rate) || ((req->best_parent_rate * 2) == req->rate))
+ return 0;
else
return -EINVAL;
}
@@ -332,7 +332,7 @@ static int vc5_dbl_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops vc5_dbl_ops = {
.recalc_rate = vc5_dbl_recalc_rate,
- .round_rate = vc5_dbl_round_rate,
+ .determine_rate = vc5_dbl_determine_rate,
.set_rate = vc5_dbl_set_rate,
};
@@ -363,24 +363,29 @@ static unsigned long vc5_pfd_recalc_rate(struct clk_hw *hw,
return parent_rate / VC5_REF_DIVIDER_REF_DIV(div);
}
-static long vc5_pfd_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int vc5_pfd_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
unsigned long idiv;
/* PLL cannot operate with input clock above 50 MHz. */
- if (rate > 50000000)
+ if (req->rate > 50000000)
return -EINVAL;
/* CLKIN within range of PLL input, feed directly to PLL. */
- if (*parent_rate <= 50000000)
- return *parent_rate;
+ if (req->best_parent_rate <= 50000000) {
+ req->rate = req->best_parent_rate;
+
+ return 0;
+ }
- idiv = DIV_ROUND_UP(*parent_rate, rate);
+ idiv = DIV_ROUND_UP(req->best_parent_rate, req->rate);
if (idiv > 127)
return -EINVAL;
- return *parent_rate / idiv;
+ req->rate = req->best_parent_rate / idiv;
+
+ return 0;
}
static int vc5_pfd_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -420,7 +425,7 @@ static int vc5_pfd_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops vc5_pfd_ops = {
.recalc_rate = vc5_pfd_recalc_rate,
- .round_rate = vc5_pfd_round_rate,
+ .determine_rate = vc5_pfd_determine_rate,
.set_rate = vc5_pfd_set_rate,
};
@@ -444,30 +449,32 @@ static unsigned long vc5_pll_recalc_rate(struct clk_hw *hw,
return (parent_rate * div_int) + ((parent_rate * div_frc) >> 24);
}
-static long vc5_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int vc5_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct vc5_hw_data *hwdata = container_of(hw, struct vc5_hw_data, hw);
struct vc5_driver_data *vc5 = hwdata->vc5;
u32 div_int;
u64 div_frc;
- rate = clamp(rate, VC5_PLL_VCO_MIN, vc5->chip_info->vco_max);
+ req->rate = clamp(req->rate, VC5_PLL_VCO_MIN, vc5->chip_info->vco_max);
/* Determine integer part, which is 12 bit wide */
- div_int = rate / *parent_rate;
+ div_int = req->rate / req->best_parent_rate;
if (div_int > 0xfff)
- rate = *parent_rate * 0xfff;
+ req->rate = req->best_parent_rate * 0xfff;
/* Determine best fractional part, which is 24 bit wide */
- div_frc = rate % *parent_rate;
+ div_frc = req->rate % req->best_parent_rate;
div_frc *= BIT(24) - 1;
- do_div(div_frc, *parent_rate);
+ do_div(div_frc, req->best_parent_rate);
hwdata->div_int = div_int;
hwdata->div_frc = (u32)div_frc;
- return (*parent_rate * div_int) + ((*parent_rate * div_frc) >> 24);
+ req->rate = (req->best_parent_rate * div_int) + ((req->best_parent_rate * div_frc) >> 24);
+
+ return 0;
}
static int vc5_pll_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -488,7 +495,7 @@ static int vc5_pll_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops vc5_pll_ops = {
.recalc_rate = vc5_pll_recalc_rate,
- .round_rate = vc5_pll_round_rate,
+ .determine_rate = vc5_pll_determine_rate,
.set_rate = vc5_pll_set_rate,
};
@@ -520,17 +527,17 @@ static unsigned long vc5_fod_recalc_rate(struct clk_hw *hw,
return div64_u64((u64)f_in << 24ULL, ((u64)div_int << 24ULL) + div_frc);
}
-static long vc5_fod_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int vc5_fod_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct vc5_hw_data *hwdata = container_of(hw, struct vc5_hw_data, hw);
/* VCO frequency is divided by two before entering FOD */
- u32 f_in = *parent_rate / 2;
+ u32 f_in = req->best_parent_rate / 2;
u32 div_int;
u64 div_frc;
/* Determine integer part, which is 12 bit wide */
- div_int = f_in / rate;
+ div_int = f_in / req->rate;
/*
* WARNING: The clock chip does not output signal if the integer part
* of the divider is 0xfff and fractional part is non-zero.
@@ -538,18 +545,20 @@ static long vc5_fod_round_rate(struct clk_hw *hw, unsigned long rate,
*/
if (div_int > 0xffe) {
div_int = 0xffe;
- rate = f_in / div_int;
+ req->rate = f_in / div_int;
}
/* Determine best fractional part, which is 30 bit wide */
- div_frc = f_in % rate;
+ div_frc = f_in % req->rate;
div_frc <<= 24;
- do_div(div_frc, rate);
+ do_div(div_frc, req->rate);
hwdata->div_int = div_int;
hwdata->div_frc = (u32)div_frc;
- return div64_u64((u64)f_in << 24ULL, ((u64)div_int << 24ULL) + div_frc);
+ req->rate = div64_u64((u64)f_in << 24ULL, ((u64)div_int << 24ULL) + div_frc);
+
+ return 0;
}
static int vc5_fod_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -589,7 +598,7 @@ static int vc5_fod_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops vc5_fod_ops = {
.recalc_rate = vc5_fod_recalc_rate,
- .round_rate = vc5_fod_round_rate,
+ .determine_rate = vc5_fod_determine_rate,
.set_rate = vc5_fod_set_rate,
};
diff --git a/drivers/clk/clk-versaclock7.c b/drivers/clk/clk-versaclock7.c
index 483285b30c13..adcc603e3259 100644
--- a/drivers/clk/clk-versaclock7.c
+++ b/drivers/clk/clk-versaclock7.c
@@ -900,17 +900,18 @@ static unsigned long vc7_fod_recalc_rate(struct clk_hw *hw, unsigned long parent
return fod_rate;
}
-static long vc7_fod_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *parent_rate)
+static int vc7_fod_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct vc7_fod_data *fod = container_of(hw, struct vc7_fod_data, hw);
unsigned long fod_rate;
pr_debug("%s - %s: requested rate: %lu, parent_rate: %lu\n",
- __func__, clk_hw_get_name(hw), rate, *parent_rate);
+ __func__, clk_hw_get_name(hw), req->rate, req->best_parent_rate);
- vc7_calc_fod_divider(rate, *parent_rate,
+ vc7_calc_fod_divider(req->rate, req->best_parent_rate,
&fod->fod_1st_int, &fod->fod_2nd_int, &fod->fod_frac);
- fod_rate = vc7_calc_fod_2nd_stage_rate(*parent_rate, fod->fod_1st_int,
+ fod_rate = vc7_calc_fod_2nd_stage_rate(req->best_parent_rate, fod->fod_1st_int,
fod->fod_2nd_int, fod->fod_frac);
pr_debug("%s - %s: fod_1st_int: %u, fod_2nd_int: %u, fod_frac: %llu\n",
@@ -918,7 +919,9 @@ static long vc7_fod_round_rate(struct clk_hw *hw, unsigned long rate, unsigned l
fod->fod_1st_int, fod->fod_2nd_int, fod->fod_frac);
pr_debug("%s - %s rate: %lu\n", __func__, clk_hw_get_name(hw), fod_rate);
- return fod_rate;
+ req->rate = fod_rate;
+
+ return 0;
}
static int vc7_fod_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate)
@@ -952,7 +955,7 @@ static int vc7_fod_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long
static const struct clk_ops vc7_fod_ops = {
.recalc_rate = vc7_fod_recalc_rate,
- .round_rate = vc7_fod_round_rate,
+ .determine_rate = vc7_fod_determine_rate,
.set_rate = vc7_fod_set_rate,
};
@@ -978,21 +981,24 @@ static unsigned long vc7_iod_recalc_rate(struct clk_hw *hw, unsigned long parent
return iod_rate;
}
-static long vc7_iod_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *parent_rate)
+static int vc7_iod_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct vc7_iod_data *iod = container_of(hw, struct vc7_iod_data, hw);
unsigned long iod_rate;
pr_debug("%s - %s: requested rate: %lu, parent_rate: %lu\n",
- __func__, clk_hw_get_name(hw), rate, *parent_rate);
+ __func__, clk_hw_get_name(hw), req->rate, req->best_parent_rate);
- vc7_calc_iod_divider(rate, *parent_rate, &iod->iod_int);
- iod_rate = div64_u64(*parent_rate, iod->iod_int);
+ vc7_calc_iod_divider(req->rate, req->best_parent_rate, &iod->iod_int);
+ iod_rate = div64_u64(req->best_parent_rate, iod->iod_int);
pr_debug("%s - %s: iod_int: %u\n", __func__, clk_hw_get_name(hw), iod->iod_int);
pr_debug("%s - %s rate: %ld\n", __func__, clk_hw_get_name(hw), iod_rate);
- return iod_rate;
+ req->rate = iod_rate;
+
+ return 0;
}
static int vc7_iod_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate)
@@ -1023,7 +1029,7 @@ static int vc7_iod_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long
static const struct clk_ops vc7_iod_ops = {
.recalc_rate = vc7_iod_recalc_rate,
- .round_rate = vc7_iod_round_rate,
+ .determine_rate = vc7_iod_determine_rate,
.set_rate = vc7_iod_set_rate,
};
diff --git a/drivers/clk/clk-vt8500.c b/drivers/clk/clk-vt8500.c
index 2a74a713ad59..eae5b3fbfb82 100644
--- a/drivers/clk/clk-vt8500.c
+++ b/drivers/clk/clk-vt8500.c
@@ -128,30 +128,31 @@ static unsigned long vt8500_dclk_recalc_rate(struct clk_hw *hw,
return parent_rate / div;
}
-static long vt8500_dclk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int vt8500_dclk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_device *cdev = to_clk_device(hw);
u32 divisor;
- if (rate == 0)
+ if (req->rate == 0)
return 0;
- divisor = *prate / rate;
+ divisor = req->best_parent_rate / req->rate;
/* If prate / rate would be decimal, incr the divisor */
- if (rate * divisor < *prate)
+ if (req->rate * divisor < req->best_parent_rate)
divisor++;
/*
* If this is a request for SDMMC we have to adjust the divisor
* when >31 to use the fixed predivisor
*/
- if ((cdev->div_mask == 0x3F) && (divisor > 31)) {
+ if ((cdev->div_mask == 0x3F) && (divisor > 31))
divisor = 64 * ((divisor / 64) + 1);
- }
- return *prate / divisor;
+ req->rate = req->best_parent_rate / divisor;
+
+ return 0;
}
static int vt8500_dclk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -202,7 +203,7 @@ static const struct clk_ops vt8500_gated_clk_ops = {
};
static const struct clk_ops vt8500_divisor_clk_ops = {
- .round_rate = vt8500_dclk_round_rate,
+ .determine_rate = vt8500_dclk_determine_rate,
.set_rate = vt8500_dclk_set_rate,
.recalc_rate = vt8500_dclk_recalc_rate,
};
@@ -211,7 +212,7 @@ static const struct clk_ops vt8500_gated_divisor_clk_ops = {
.enable = vt8500_dclk_enable,
.disable = vt8500_dclk_disable,
.is_enabled = vt8500_dclk_is_enabled,
- .round_rate = vt8500_dclk_round_rate,
+ .determine_rate = vt8500_dclk_determine_rate,
.set_rate = vt8500_dclk_set_rate,
.recalc_rate = vt8500_dclk_recalc_rate,
};
@@ -594,8 +595,8 @@ static int vtwm_pll_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
-static long vtwm_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int vtwm_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_pll *pll = to_clk_pll(hw);
u32 filter, mul, div1, div2;
@@ -604,33 +605,43 @@ static long vtwm_pll_round_rate(struct clk_hw *hw, unsigned long rate,
switch (pll->type) {
case PLL_TYPE_VT8500:
- ret = vt8500_find_pll_bits(rate, *prate, &mul, &div1);
+ ret = vt8500_find_pll_bits(req->rate, req->best_parent_rate,
+ &mul, &div1);
if (!ret)
- round_rate = VT8500_BITS_TO_FREQ(*prate, mul, div1);
+ round_rate = VT8500_BITS_TO_FREQ(req->best_parent_rate,
+ mul, div1);
break;
case PLL_TYPE_WM8650:
- ret = wm8650_find_pll_bits(rate, *prate, &mul, &div1, &div2);
+ ret = wm8650_find_pll_bits(req->rate, req->best_parent_rate,
+ &mul, &div1, &div2);
if (!ret)
- round_rate = WM8650_BITS_TO_FREQ(*prate, mul, div1, div2);
+ round_rate = WM8650_BITS_TO_FREQ(req->best_parent_rate,
+ mul, div1, div2);
break;
case PLL_TYPE_WM8750:
- ret = wm8750_find_pll_bits(rate, *prate, &filter, &mul, &div1, &div2);
+ ret = wm8750_find_pll_bits(req->rate, req->best_parent_rate,
+ &filter, &mul, &div1, &div2);
if (!ret)
- round_rate = WM8750_BITS_TO_FREQ(*prate, mul, div1, div2);
+ round_rate = WM8750_BITS_TO_FREQ(req->best_parent_rate,
+ mul, div1, div2);
break;
case PLL_TYPE_WM8850:
- ret = wm8850_find_pll_bits(rate, *prate, &mul, &div1, &div2);
+ ret = wm8850_find_pll_bits(req->rate, req->best_parent_rate,
+ &mul, &div1, &div2);
if (!ret)
- round_rate = WM8850_BITS_TO_FREQ(*prate, mul, div1, div2);
+ round_rate = WM8850_BITS_TO_FREQ(req->best_parent_rate,
+ mul, div1, div2);
break;
default:
- ret = -EINVAL;
+ return -EINVAL;
}
if (ret)
- return ret;
+ req->rate = ret;
+ else
+ req->rate = round_rate;
- return round_rate;
+ return 0;
}
static unsigned long vtwm_pll_recalc_rate(struct clk_hw *hw,
@@ -665,7 +676,7 @@ static unsigned long vtwm_pll_recalc_rate(struct clk_hw *hw,
}
static const struct clk_ops vtwm_pll_ops = {
- .round_rate = vtwm_pll_round_rate,
+ .determine_rate = vtwm_pll_determine_rate,
.set_rate = vtwm_pll_set_rate,
.recalc_rate = vtwm_pll_recalc_rate,
};
diff --git a/drivers/clk/clk-wm831x.c b/drivers/clk/clk-wm831x.c
index 34e9d4d541e2..263e927138c2 100644
--- a/drivers/clk/clk-wm831x.c
+++ b/drivers/clk/clk-wm831x.c
@@ -133,18 +133,20 @@ static unsigned long wm831x_fll_recalc_rate(struct clk_hw *hw,
return 0;
}
-static long wm831x_fll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *unused)
+static int wm831x_fll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
int best = 0;
int i;
for (i = 0; i < ARRAY_SIZE(wm831x_fll_auto_rates); i++)
- if (abs(wm831x_fll_auto_rates[i] - rate) <
- abs(wm831x_fll_auto_rates[best] - rate))
+ if (abs(wm831x_fll_auto_rates[i] - req->rate) <
+ abs(wm831x_fll_auto_rates[best] - req->rate))
best = i;
- return wm831x_fll_auto_rates[best];
+ req->rate = wm831x_fll_auto_rates[best];
+
+ return 0;
}
static int wm831x_fll_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -214,7 +216,7 @@ static const struct clk_ops wm831x_fll_ops = {
.is_prepared = wm831x_fll_is_prepared,
.prepare = wm831x_fll_prepare,
.unprepare = wm831x_fll_unprepare,
- .round_rate = wm831x_fll_round_rate,
+ .determine_rate = wm831x_fll_determine_rate,
.recalc_rate = wm831x_fll_recalc_rate,
.set_rate = wm831x_fll_set_rate,
.get_parent = wm831x_fll_get_parent,
diff --git a/drivers/clk/clk-xgene.c b/drivers/clk/clk-xgene.c
index 96946a8e2854..92e39f3237c2 100644
--- a/drivers/clk/clk-xgene.c
+++ b/drivers/clk/clk-xgene.c
@@ -271,23 +271,28 @@ static unsigned long xgene_clk_pmd_recalc_rate(struct clk_hw *hw,
return ret;
}
-static long xgene_clk_pmd_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int xgene_clk_pmd_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct xgene_clk_pmd *fd = to_xgene_clk_pmd(hw);
u64 ret, scale;
- if (!rate || rate >= *parent_rate)
- return *parent_rate;
+ if (!req->rate || req->rate >= req->best_parent_rate) {
+ req->rate = req->best_parent_rate;
+
+ return 0;
+ }
/* freq = parent_rate * scaler / denom */
- ret = rate * fd->denom;
- scale = DIV_ROUND_UP_ULL(ret, *parent_rate);
+ ret = req->rate * fd->denom;
+ scale = DIV_ROUND_UP_ULL(ret, req->best_parent_rate);
- ret = (u64)*parent_rate * scale;
+ ret = (u64)req->best_parent_rate * scale;
do_div(ret, fd->denom);
- return ret;
+ req->rate = ret;
+
+ return 0;
}
static int xgene_clk_pmd_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -333,7 +338,7 @@ static int xgene_clk_pmd_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops xgene_clk_pmd_ops = {
.recalc_rate = xgene_clk_pmd_recalc_rate,
- .round_rate = xgene_clk_pmd_round_rate,
+ .determine_rate = xgene_clk_pmd_determine_rate,
.set_rate = xgene_clk_pmd_set_rate,
};
@@ -593,23 +598,25 @@ static int xgene_clk_set_rate(struct clk_hw *hw, unsigned long rate,
return parent_rate / divider_save;
}
-static long xgene_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int xgene_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct xgene_clk *pclk = to_xgene_clk(hw);
- unsigned long parent_rate = *prate;
+ unsigned long parent_rate = req->best_parent_rate;
u32 divider;
if (pclk->param.divider_reg) {
/* Let's compute the divider */
- if (rate > parent_rate)
- rate = parent_rate;
- divider = parent_rate / rate; /* Rounded down */
+ if (req->rate > parent_rate)
+ req->rate = parent_rate;
+ divider = parent_rate / req->rate; /* Rounded down */
} else {
divider = 1;
}
- return parent_rate / divider;
+ req->rate = parent_rate / divider;
+
+ return 0;
}
static const struct clk_ops xgene_clk_ops = {
@@ -618,7 +625,7 @@ static const struct clk_ops xgene_clk_ops = {
.is_enabled = xgene_clk_is_enabled,
.recalc_rate = xgene_clk_recalc_rate,
.set_rate = xgene_clk_set_rate,
- .round_rate = xgene_clk_round_rate,
+ .determine_rate = xgene_clk_determine_rate,
};
static struct clk *xgene_register_clk(struct device *dev,
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index b821b2cdb155..85d2f2481acf 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -6,21 +6,24 @@
* Standard functionality for the common clock API. See Documentation/driver-api/clk.rst
*/
+#include <linux/clk/clk-conf.h>
+#include <linux/clkdev.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
-#include <linux/clk/clk-conf.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/spinlock.h>
+#include <linux/device.h>
#include <linux/err.h>
+#include <linux/hashtable.h>
+#include <linux/init.h>
#include <linux/list.h>
-#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/of.h>
-#include <linux/device.h>
-#include <linux/init.h>
#include <linux/pm_runtime.h>
#include <linux/sched.h>
-#include <linux/clkdev.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/stringhash.h>
#include "clk.h"
@@ -33,6 +36,9 @@ static struct task_struct *enable_owner;
static int prepare_refcnt;
static int enable_refcnt;
+#define CLK_HASH_BITS 9
+static DEFINE_HASHTABLE(clk_hashtable, CLK_HASH_BITS);
+
static HLIST_HEAD(clk_root_list);
static HLIST_HEAD(clk_orphan_list);
static LIST_HEAD(clk_notifier_list);
@@ -87,6 +93,7 @@ struct clk_core {
struct clk_duty duty;
struct hlist_head children;
struct hlist_node child_node;
+ struct hlist_node hashtable_node;
struct hlist_head clks;
unsigned int notifier_count;
#ifdef CONFIG_DEBUG_FS
@@ -395,45 +402,20 @@ struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw)
}
EXPORT_SYMBOL_GPL(clk_hw_get_parent);
-static struct clk_core *__clk_lookup_subtree(const char *name,
- struct clk_core *core)
-{
- struct clk_core *child;
- struct clk_core *ret;
-
- if (!strcmp(core->name, name))
- return core;
-
- hlist_for_each_entry(child, &core->children, child_node) {
- ret = __clk_lookup_subtree(name, child);
- if (ret)
- return ret;
- }
-
- return NULL;
-}
-
static struct clk_core *clk_core_lookup(const char *name)
{
- struct clk_core *root_clk;
- struct clk_core *ret;
+ struct clk_core *core;
+ u32 hash;
if (!name)
return NULL;
- /* search the 'proper' clk tree first */
- hlist_for_each_entry(root_clk, &clk_root_list, child_node) {
- ret = __clk_lookup_subtree(name, root_clk);
- if (ret)
- return ret;
- }
+ hash = full_name_hash(NULL, name, strlen(name));
- /* if not found, then search the orphan tree */
- hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) {
- ret = __clk_lookup_subtree(name, root_clk);
- if (ret)
- return ret;
- }
+ /* search the hashtable */
+ hash_for_each_possible(clk_hashtable, core, hashtable_node, hash)
+ if (!strcmp(core->name, name))
+ return core;
return NULL;
}
@@ -4013,6 +3995,8 @@ static int __clk_core_init(struct clk_core *core)
hlist_add_head(&core->child_node, &clk_orphan_list);
core->orphan = true;
}
+ hash_add(clk_hashtable, &core->hashtable_node,
+ full_name_hash(NULL, core->name, strlen(core->name)));
/*
* Set clk's accuracy. The preferred method is to use
@@ -4089,6 +4073,7 @@ out:
clk_pm_runtime_put(core);
unlock:
if (ret) {
+ hash_del(&core->hashtable_node);
hlist_del_init(&core->child_node);
core->hw->core = NULL;
}
@@ -4610,6 +4595,7 @@ void clk_unregister(struct clk *clk)
clk_core_evict_parent_cache(clk->core);
+ hash_del(&clk->core->hashtable_node);
hlist_del_init(&clk->core->child_node);
if (clk->core->prepare_count)
diff --git a/drivers/clk/hisilicon/clk-hi3660-stub.c b/drivers/clk/hisilicon/clk-hi3660-stub.c
index 3a653d54bee0..7c8b00ee6019 100644
--- a/drivers/clk/hisilicon/clk-hi3660-stub.c
+++ b/drivers/clk/hisilicon/clk-hi3660-stub.c
@@ -34,7 +34,7 @@
.num_parents = 0, \
.flags = CLK_GET_RATE_NOCACHE, \
}, \
- },
+ }
#define to_stub_clk(_hw) container_of(_hw, struct hi3660_stub_clk, hw)
@@ -67,14 +67,14 @@ static unsigned long hi3660_stub_clk_recalc_rate(struct clk_hw *hw,
return stub_clk->rate;
}
-static long hi3660_stub_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int hi3660_stub_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
/*
* LPM3 handles rate rounding so just return whatever
* rate is requested.
*/
- return rate;
+ return 0;
}
static int hi3660_stub_clk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -97,15 +97,15 @@ static int hi3660_stub_clk_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops hi3660_stub_clk_ops = {
.recalc_rate = hi3660_stub_clk_recalc_rate,
- .round_rate = hi3660_stub_clk_round_rate,
+ .determine_rate = hi3660_stub_clk_determine_rate,
.set_rate = hi3660_stub_clk_set_rate,
};
static struct hi3660_stub_clk hi3660_stub_clks[HI3660_CLK_STUB_NUM] = {
- DEFINE_CLK_STUB(HI3660_CLK_STUB_CLUSTER0, 0x0001030A, "cpu-cluster.0")
- DEFINE_CLK_STUB(HI3660_CLK_STUB_CLUSTER1, 0x0002030A, "cpu-cluster.1")
- DEFINE_CLK_STUB(HI3660_CLK_STUB_GPU, 0x0003030A, "clk-g3d")
- DEFINE_CLK_STUB(HI3660_CLK_STUB_DDR, 0x00040309, "clk-ddrc")
+ DEFINE_CLK_STUB(HI3660_CLK_STUB_CLUSTER0, 0x0001030A, "cpu-cluster.0"),
+ DEFINE_CLK_STUB(HI3660_CLK_STUB_CLUSTER1, 0x0002030A, "cpu-cluster.1"),
+ DEFINE_CLK_STUB(HI3660_CLK_STUB_GPU, 0x0003030A, "clk-g3d"),
+ DEFINE_CLK_STUB(HI3660_CLK_STUB_DDR, 0x00040309, "clk-ddrc"),
};
static struct clk_hw *hi3660_stub_clk_hw_get(struct of_phandle_args *clkspec,
diff --git a/drivers/clk/hisilicon/clk-hi6220-stub.c b/drivers/clk/hisilicon/clk-hi6220-stub.c
index a8319795ed1c..bf99cfafafa0 100644
--- a/drivers/clk/hisilicon/clk-hi6220-stub.c
+++ b/drivers/clk/hisilicon/clk-hi6220-stub.c
@@ -161,11 +161,11 @@ static int hi6220_stub_clk_set_rate(struct clk_hw *hw, unsigned long rate,
return ret;
}
-static long hi6220_stub_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int hi6220_stub_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct hi6220_stub_clk *stub_clk = to_stub_clk(hw);
- unsigned long new_rate = rate / 1000; /* kHz */
+ unsigned long new_rate = req->rate / 1000; /* kHz */
switch (stub_clk->id) {
case HI6220_STUB_ACPU0:
@@ -181,12 +181,14 @@ static long hi6220_stub_clk_round_rate(struct clk_hw *hw, unsigned long rate,
break;
}
- return new_rate;
+ req->rate = new_rate;
+
+ return 0;
}
static const struct clk_ops hi6220_stub_clk_ops = {
.recalc_rate = hi6220_stub_clk_recalc_rate,
- .round_rate = hi6220_stub_clk_round_rate,
+ .determine_rate = hi6220_stub_clk_determine_rate,
.set_rate = hi6220_stub_clk_set_rate,
};
diff --git a/drivers/clk/hisilicon/clkdivider-hi6220.c b/drivers/clk/hisilicon/clkdivider-hi6220.c
index 5348bafe694f..6bae18a84cb6 100644
--- a/drivers/clk/hisilicon/clkdivider-hi6220.c
+++ b/drivers/clk/hisilicon/clkdivider-hi6220.c
@@ -55,13 +55,15 @@ static unsigned long hi6220_clkdiv_recalc_rate(struct clk_hw *hw,
CLK_DIVIDER_ROUND_CLOSEST, dclk->width);
}
-static long hi6220_clkdiv_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int hi6220_clkdiv_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct hi6220_clk_divider *dclk = to_hi6220_clk_divider(hw);
- return divider_round_rate(hw, rate, prate, dclk->table,
- dclk->width, CLK_DIVIDER_ROUND_CLOSEST);
+ req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate, dclk->table,
+ dclk->width, CLK_DIVIDER_ROUND_CLOSEST);
+
+ return 0;
}
static int hi6220_clkdiv_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -93,7 +95,7 @@ static int hi6220_clkdiv_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops hi6220_clkdiv_ops = {
.recalc_rate = hi6220_clkdiv_recalc_rate,
- .round_rate = hi6220_clkdiv_round_rate,
+ .determine_rate = hi6220_clkdiv_determine_rate,
.set_rate = hi6220_clkdiv_set_rate,
};
diff --git a/drivers/clk/imx/clk-imx95-blk-ctl.c b/drivers/clk/imx/clk-imx95-blk-ctl.c
index 7e88877a6245..56bed4471995 100644
--- a/drivers/clk/imx/clk-imx95-blk-ctl.c
+++ b/drivers/clk/imx/clk-imx95-blk-ctl.c
@@ -36,6 +36,7 @@ struct imx95_blk_ctl {
void __iomem *base;
/* clock gate register */
u32 clk_reg_restore;
+ const struct imx95_blk_ctl_dev_data *pdata;
};
struct imx95_blk_ctl_clk_dev_data {
@@ -349,7 +350,6 @@ static const struct imx95_blk_ctl_dev_data imx94_dispmix_csr_dev_data = {
static int imx95_bc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- const struct imx95_blk_ctl_dev_data *bc_data;
struct imx95_blk_ctl *bc;
struct clk_hw_onecell_data *clk_hw_data;
struct clk_hw **hws;
@@ -379,25 +379,25 @@ static int imx95_bc_probe(struct platform_device *pdev)
return ret;
}
- bc_data = of_device_get_match_data(dev);
- if (!bc_data)
+ bc->pdata = of_device_get_match_data(dev);
+ if (!bc->pdata)
return devm_of_platform_populate(dev);
- clk_hw_data = devm_kzalloc(dev, struct_size(clk_hw_data, hws, bc_data->num_clks),
+ clk_hw_data = devm_kzalloc(dev, struct_size(clk_hw_data, hws, bc->pdata->num_clks),
GFP_KERNEL);
if (!clk_hw_data)
return -ENOMEM;
- if (bc_data->rpm_enabled) {
+ if (bc->pdata->rpm_enabled) {
devm_pm_runtime_enable(&pdev->dev);
pm_runtime_resume_and_get(&pdev->dev);
}
- clk_hw_data->num = bc_data->num_clks;
+ clk_hw_data->num = bc->pdata->num_clks;
hws = clk_hw_data->hws;
- for (i = 0; i < bc_data->num_clks; i++) {
- const struct imx95_blk_ctl_clk_dev_data *data = &bc_data->clk_dev_data[i];
+ for (i = 0; i < bc->pdata->num_clks; i++) {
+ const struct imx95_blk_ctl_clk_dev_data *data = &bc->pdata->clk_dev_data[i];
void __iomem *reg = base + data->reg;
if (data->type == CLK_MUX) {
@@ -439,7 +439,7 @@ static int imx95_bc_probe(struct platform_device *pdev)
return 0;
cleanup:
- for (i = 0; i < bc_data->num_clks; i++) {
+ for (i = 0; i < bc->pdata->num_clks; i++) {
if (IS_ERR_OR_NULL(hws[i]))
continue;
clk_hw_unregister(hws[i]);
@@ -453,15 +453,24 @@ static int imx95_bc_runtime_suspend(struct device *dev)
{
struct imx95_blk_ctl *bc = dev_get_drvdata(dev);
+ bc->clk_reg_restore = readl(bc->base + bc->pdata->clk_reg_offset);
clk_disable_unprepare(bc->clk_apb);
+
return 0;
}
static int imx95_bc_runtime_resume(struct device *dev)
{
struct imx95_blk_ctl *bc = dev_get_drvdata(dev);
+ int ret;
- return clk_prepare_enable(bc->clk_apb);
+ ret = clk_prepare_enable(bc->clk_apb);
+ if (ret)
+ return ret;
+
+ writel(bc->clk_reg_restore, bc->base + bc->pdata->clk_reg_offset);
+
+ return 0;
}
#endif
@@ -469,22 +478,12 @@ static int imx95_bc_runtime_resume(struct device *dev)
static int imx95_bc_suspend(struct device *dev)
{
struct imx95_blk_ctl *bc = dev_get_drvdata(dev);
- const struct imx95_blk_ctl_dev_data *bc_data;
- int ret;
- bc_data = of_device_get_match_data(dev);
- if (!bc_data)
+ if (pm_runtime_suspended(dev))
return 0;
- if (bc_data->rpm_enabled) {
- ret = pm_runtime_get_sync(bc->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(bc->dev);
- return ret;
- }
- }
-
- bc->clk_reg_restore = readl(bc->base + bc_data->clk_reg_offset);
+ bc->clk_reg_restore = readl(bc->base + bc->pdata->clk_reg_offset);
+ clk_disable_unprepare(bc->clk_apb);
return 0;
}
@@ -492,16 +491,16 @@ static int imx95_bc_suspend(struct device *dev)
static int imx95_bc_resume(struct device *dev)
{
struct imx95_blk_ctl *bc = dev_get_drvdata(dev);
- const struct imx95_blk_ctl_dev_data *bc_data;
+ int ret;
- bc_data = of_device_get_match_data(dev);
- if (!bc_data)
+ if (pm_runtime_suspended(dev))
return 0;
- writel(bc->clk_reg_restore, bc->base + bc_data->clk_reg_offset);
+ ret = clk_prepare_enable(bc->clk_apb);
+ if (ret)
+ return ret;
- if (bc_data->rpm_enabled)
- pm_runtime_put(bc->dev);
+ writel(bc->clk_reg_restore, bc->base + bc->pdata->clk_reg_offset);
return 0;
}
diff --git a/drivers/clk/ingenic/cgu.c b/drivers/clk/ingenic/cgu.c
index 0c9c8344ad11..91e7ac0cc334 100644
--- a/drivers/clk/ingenic/cgu.c
+++ b/drivers/clk/ingenic/cgu.c
@@ -174,14 +174,16 @@ ingenic_pll_calc(const struct ingenic_cgu_clk_info *clk_info,
n * od);
}
-static long
-ingenic_pll_round_rate(struct clk_hw *hw, unsigned long req_rate,
- unsigned long *prate)
+static int ingenic_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
- return ingenic_pll_calc(clk_info, req_rate, *prate, NULL, NULL, NULL);
+ req->rate = ingenic_pll_calc(clk_info, req->rate, req->best_parent_rate,
+ NULL, NULL, NULL);
+
+ return 0;
}
static inline int ingenic_pll_check_stable(struct ingenic_cgu *cgu,
@@ -317,7 +319,7 @@ static int ingenic_pll_is_enabled(struct clk_hw *hw)
static const struct clk_ops ingenic_pll_ops = {
.recalc_rate = ingenic_pll_recalc_rate,
- .round_rate = ingenic_pll_round_rate,
+ .determine_rate = ingenic_pll_determine_rate,
.set_rate = ingenic_pll_set_rate,
.enable = ingenic_pll_enable,
diff --git a/drivers/clk/ingenic/jz4780-cgu.c b/drivers/clk/ingenic/jz4780-cgu.c
index b1dadc0a5e75..07e2f3c5c454 100644
--- a/drivers/clk/ingenic/jz4780-cgu.c
+++ b/drivers/clk/ingenic/jz4780-cgu.c
@@ -128,19 +128,19 @@ static unsigned long jz4780_otg_phy_recalc_rate(struct clk_hw *hw,
return parent_rate;
}
-static long jz4780_otg_phy_round_rate(struct clk_hw *hw, unsigned long req_rate,
- unsigned long *parent_rate)
+static int jz4780_otg_phy_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- if (req_rate < 15600000)
- return 12000000;
-
- if (req_rate < 21600000)
- return 19200000;
+ if (req->rate < 15600000)
+ req->rate = 12000000;
+ else if (req->rate < 21600000)
+ req->rate = 19200000;
+ else if (req->rate < 36000000)
+ req->rate = 24000000;
+ else
+ req->rate = 48000000;
- if (req_rate < 36000000)
- return 24000000;
-
- return 48000000;
+ return 0;
}
static int jz4780_otg_phy_set_rate(struct clk_hw *hw, unsigned long req_rate,
@@ -212,7 +212,7 @@ static int jz4780_otg_phy_is_enabled(struct clk_hw *hw)
static const struct clk_ops jz4780_otg_phy_ops = {
.recalc_rate = jz4780_otg_phy_recalc_rate,
- .round_rate = jz4780_otg_phy_round_rate,
+ .determine_rate = jz4780_otg_phy_determine_rate,
.set_rate = jz4780_otg_phy_set_rate,
.enable = jz4780_otg_phy_enable,
diff --git a/drivers/clk/ingenic/x1000-cgu.c b/drivers/clk/ingenic/x1000-cgu.c
index feb03eed4fe8..d80886caf393 100644
--- a/drivers/clk/ingenic/x1000-cgu.c
+++ b/drivers/clk/ingenic/x1000-cgu.c
@@ -84,16 +84,17 @@ static unsigned long x1000_otg_phy_recalc_rate(struct clk_hw *hw,
return parent_rate;
}
-static long x1000_otg_phy_round_rate(struct clk_hw *hw, unsigned long req_rate,
- unsigned long *parent_rate)
+static int x1000_otg_phy_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- if (req_rate < 18000000)
- return 12000000;
-
- if (req_rate < 36000000)
- return 24000000;
+ if (req->rate < 18000000)
+ req->rate = 12000000;
+ else if (req->rate < 36000000)
+ req->rate = 24000000;
+ else
+ req->rate = 48000000;
- return 48000000;
+ return 0;
}
static int x1000_otg_phy_set_rate(struct clk_hw *hw, unsigned long req_rate,
@@ -161,7 +162,7 @@ static int x1000_usb_phy_is_enabled(struct clk_hw *hw)
static const struct clk_ops x1000_otg_phy_ops = {
.recalc_rate = x1000_otg_phy_recalc_rate,
- .round_rate = x1000_otg_phy_round_rate,
+ .determine_rate = x1000_otg_phy_determine_rate,
.set_rate = x1000_otg_phy_set_rate,
.enable = x1000_usb_phy_enable,
diff --git a/drivers/clk/keystone/sci-clk.c b/drivers/clk/keystone/sci-clk.c
index c5894fc9395e..a4b42811de55 100644
--- a/drivers/clk/keystone/sci-clk.c
+++ b/drivers/clk/keystone/sci-clk.c
@@ -480,13 +480,10 @@ static int ti_sci_scan_clocks_from_fw(struct sci_clk_provider *provider)
num_clks++;
}
- provider->clocks = devm_kmalloc_array(dev, num_clks, sizeof(sci_clk),
- GFP_KERNEL);
+ provider->clocks = devm_kmemdup_array(dev, clks, num_clks, sizeof(sci_clk), GFP_KERNEL);
if (!provider->clocks)
return -ENOMEM;
- memcpy(provider->clocks, clks, num_clks * sizeof(sci_clk));
-
provider->num_clocks = num_clks;
devm_kfree(dev, clks);
diff --git a/drivers/clk/mediatek/Kconfig b/drivers/clk/mediatek/Kconfig
index 5f8e6d68fa14..0e8dd82aa84e 100644
--- a/drivers/clk/mediatek/Kconfig
+++ b/drivers/clk/mediatek/Kconfig
@@ -1002,6 +1002,77 @@ config COMMON_CLK_MT8195_VENCSYS
help
This driver supports MediaTek MT8195 vencsys clocks.
+config COMMON_CLK_MT8196
+ tristate "Clock driver for MediaTek MT8196"
+ depends on ARM64 || COMPILE_TEST
+ select COMMON_CLK_MEDIATEK
+ default ARCH_MEDIATEK
+ help
+ This driver supports MediaTek MT8196 basic clocks.
+
+config COMMON_CLK_MT8196_IMP_IIC_WRAP
+ tristate "Clock driver for MediaTek MT8196 imp_iic_wrap"
+ depends on COMMON_CLK_MT8196
+ default COMMON_CLK_MT8196
+ help
+ This driver supports MediaTek MT8196 i2c clocks.
+
+config COMMON_CLK_MT8196_MCUSYS
+ tristate "Clock driver for MediaTek MT8196 mcusys"
+ depends on COMMON_CLK_MT8196
+ default COMMON_CLK_MT8196
+ help
+ This driver supports MediaTek MT8196 mcusys clocks.
+
+config COMMON_CLK_MT8196_MDPSYS
+ tristate "Clock driver for MediaTek MT8196 mdpsys"
+ depends on COMMON_CLK_MT8196
+ default COMMON_CLK_MT8196
+ help
+ This driver supports MediaTek MT8196 mdpsys clocks.
+
+config COMMON_CLK_MT8196_MFGCFG
+ tristate "Clock driver for MediaTek MT8196 mfgcfg"
+ depends on COMMON_CLK_MT8196
+ default m
+ help
+ This driver supports MediaTek MT8196 mfgcfg clocks.
+
+config COMMON_CLK_MT8196_MMSYS
+ tristate "Clock driver for MediaTek MT8196 mmsys"
+ depends on COMMON_CLK_MT8196
+ default m
+ help
+ This driver supports MediaTek MT8196 mmsys clocks.
+
+config COMMON_CLK_MT8196_PEXTPSYS
+ tristate "Clock driver for MediaTek MT8196 pextpsys"
+ depends on COMMON_CLK_MT8196
+ default COMMON_CLK_MT8196
+ help
+ This driver supports MediaTek MT8196 pextpsys clocks.
+
+config COMMON_CLK_MT8196_UFSSYS
+ tristate "Clock driver for MediaTek MT8196 ufssys"
+ depends on COMMON_CLK_MT8196
+ default COMMON_CLK_MT8196
+ help
+ This driver supports MediaTek MT8196 ufssys clocks.
+
+config COMMON_CLK_MT8196_VDECSYS
+ tristate "Clock driver for MediaTek MT8196 vdecsys"
+ depends on COMMON_CLK_MT8196
+ default m
+ help
+ This driver supports MediaTek MT8196 vdecsys clocks.
+
+config COMMON_CLK_MT8196_VENCSYS
+ tristate "Clock driver for MediaTek MT8196 vencsys"
+ depends on COMMON_CLK_MT8196
+ default m
+ help
+ This driver supports MediaTek MT8196 vencsys clocks.
+
config COMMON_CLK_MT8365
tristate "Clock driver for MediaTek MT8365"
depends on ARCH_MEDIATEK || COMPILE_TEST
diff --git a/drivers/clk/mediatek/Makefile b/drivers/clk/mediatek/Makefile
index 6efec95406bd..d8736a060dbd 100644
--- a/drivers/clk/mediatek/Makefile
+++ b/drivers/clk/mediatek/Makefile
@@ -150,6 +150,19 @@ obj-$(CONFIG_COMMON_CLK_MT8195_VDOSYS) += clk-mt8195-vdo0.o clk-mt8195-vdo1.o
obj-$(CONFIG_COMMON_CLK_MT8195_VENCSYS) += clk-mt8195-venc.o
obj-$(CONFIG_COMMON_CLK_MT8195_VPPSYS) += clk-mt8195-vpp0.o clk-mt8195-vpp1.o
obj-$(CONFIG_COMMON_CLK_MT8195_WPESYS) += clk-mt8195-wpe.o
+obj-$(CONFIG_COMMON_CLK_MT8196) += clk-mt8196-apmixedsys.o clk-mt8196-topckgen.o \
+ clk-mt8196-topckgen2.o clk-mt8196-vlpckgen.o \
+ clk-mt8196-peri_ao.o
+obj-$(CONFIG_COMMON_CLK_MT8196_IMP_IIC_WRAP) += clk-mt8196-imp_iic_wrap.o
+obj-$(CONFIG_COMMON_CLK_MT8196_MCUSYS) += clk-mt8196-mcu.o
+obj-$(CONFIG_COMMON_CLK_MT8196_MDPSYS) += clk-mt8196-mdpsys.o
+obj-$(CONFIG_COMMON_CLK_MT8196_MFGCFG) += clk-mt8196-mfg.o
+obj-$(CONFIG_COMMON_CLK_MT8196_MMSYS) += clk-mt8196-disp0.o clk-mt8196-disp1.o clk-mt8196-vdisp_ao.o \
+ clk-mt8196-ovl0.o clk-mt8196-ovl1.o
+obj-$(CONFIG_COMMON_CLK_MT8196_PEXTPSYS) += clk-mt8196-pextp.o
+obj-$(CONFIG_COMMON_CLK_MT8196_UFSSYS) += clk-mt8196-ufs_ao.o
+obj-$(CONFIG_COMMON_CLK_MT8196_VDECSYS) += clk-mt8196-vdec.o
+obj-$(CONFIG_COMMON_CLK_MT8196_VENCSYS) += clk-mt8196-venc.o
obj-$(CONFIG_COMMON_CLK_MT8365) += clk-mt8365-apmixedsys.o clk-mt8365.o
obj-$(CONFIG_COMMON_CLK_MT8365_APU) += clk-mt8365-apu.o
obj-$(CONFIG_COMMON_CLK_MT8365_CAM) += clk-mt8365-cam.o
diff --git a/drivers/clk/mediatek/clk-gate.c b/drivers/clk/mediatek/clk-gate.c
index 67d9e741c5e7..f6b1429ff757 100644
--- a/drivers/clk/mediatek/clk-gate.c
+++ b/drivers/clk/mediatek/clk-gate.c
@@ -5,6 +5,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/dev_printk.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/printk.h>
@@ -12,15 +13,14 @@
#include <linux/slab.h>
#include <linux/types.h>
+#include "clk-mtk.h"
#include "clk-gate.h"
struct mtk_clk_gate {
struct clk_hw hw;
struct regmap *regmap;
- int set_ofs;
- int clr_ofs;
- int sta_ofs;
- u8 bit;
+ struct regmap *regmap_hwv;
+ const struct mtk_gate *gate;
};
static inline struct mtk_clk_gate *to_mtk_clk_gate(struct clk_hw *hw)
@@ -33,9 +33,9 @@ static u32 mtk_get_clockgating(struct clk_hw *hw)
struct mtk_clk_gate *cg = to_mtk_clk_gate(hw);
u32 val;
- regmap_read(cg->regmap, cg->sta_ofs, &val);
+ regmap_read(cg->regmap, cg->gate->regs->sta_ofs, &val);
- return val & BIT(cg->bit);
+ return val & BIT(cg->gate->shift);
}
static int mtk_cg_bit_is_cleared(struct clk_hw *hw)
@@ -52,28 +52,30 @@ static void mtk_cg_set_bit(struct clk_hw *hw)
{
struct mtk_clk_gate *cg = to_mtk_clk_gate(hw);
- regmap_write(cg->regmap, cg->set_ofs, BIT(cg->bit));
+ regmap_write(cg->regmap, cg->gate->regs->set_ofs, BIT(cg->gate->shift));
}
static void mtk_cg_clr_bit(struct clk_hw *hw)
{
struct mtk_clk_gate *cg = to_mtk_clk_gate(hw);
- regmap_write(cg->regmap, cg->clr_ofs, BIT(cg->bit));
+ regmap_write(cg->regmap, cg->gate->regs->clr_ofs, BIT(cg->gate->shift));
}
static void mtk_cg_set_bit_no_setclr(struct clk_hw *hw)
{
struct mtk_clk_gate *cg = to_mtk_clk_gate(hw);
- regmap_set_bits(cg->regmap, cg->sta_ofs, BIT(cg->bit));
+ regmap_set_bits(cg->regmap, cg->gate->regs->sta_ofs,
+ BIT(cg->gate->shift));
}
static void mtk_cg_clr_bit_no_setclr(struct clk_hw *hw)
{
struct mtk_clk_gate *cg = to_mtk_clk_gate(hw);
- regmap_clear_bits(cg->regmap, cg->sta_ofs, BIT(cg->bit));
+ regmap_clear_bits(cg->regmap, cg->gate->regs->sta_ofs,
+ BIT(cg->gate->shift));
}
static int mtk_cg_enable(struct clk_hw *hw)
@@ -100,6 +102,32 @@ static void mtk_cg_disable_inv(struct clk_hw *hw)
mtk_cg_clr_bit(hw);
}
+static int mtk_cg_hwv_set_en(struct clk_hw *hw, bool enable)
+{
+ struct mtk_clk_gate *cg = to_mtk_clk_gate(hw);
+ u32 val;
+
+ regmap_write(cg->regmap_hwv,
+ enable ? cg->gate->hwv_regs->set_ofs :
+ cg->gate->hwv_regs->clr_ofs,
+ BIT(cg->gate->shift));
+
+ return regmap_read_poll_timeout_atomic(cg->regmap_hwv,
+ cg->gate->hwv_regs->sta_ofs, val,
+ val & BIT(cg->gate->shift), 0,
+ MTK_WAIT_HWV_DONE_US);
+}
+
+static int mtk_cg_hwv_enable(struct clk_hw *hw)
+{
+ return mtk_cg_hwv_set_en(hw, true);
+}
+
+static void mtk_cg_hwv_disable(struct clk_hw *hw)
+{
+ mtk_cg_hwv_set_en(hw, false);
+}
+
static int mtk_cg_enable_no_setclr(struct clk_hw *hw)
{
mtk_cg_clr_bit_no_setclr(hw);
@@ -124,6 +152,15 @@ static void mtk_cg_disable_inv_no_setclr(struct clk_hw *hw)
mtk_cg_clr_bit_no_setclr(hw);
}
+static bool mtk_cg_uses_hwv(const struct clk_ops *ops)
+{
+ if (ops == &mtk_clk_gate_hwv_ops_setclr ||
+ ops == &mtk_clk_gate_hwv_ops_setclr_inv)
+ return true;
+
+ return false;
+}
+
const struct clk_ops mtk_clk_gate_ops_setclr = {
.is_enabled = mtk_cg_bit_is_cleared,
.enable = mtk_cg_enable,
@@ -138,6 +175,20 @@ const struct clk_ops mtk_clk_gate_ops_setclr_inv = {
};
EXPORT_SYMBOL_GPL(mtk_clk_gate_ops_setclr_inv);
+const struct clk_ops mtk_clk_gate_hwv_ops_setclr = {
+ .is_enabled = mtk_cg_bit_is_cleared,
+ .enable = mtk_cg_hwv_enable,
+ .disable = mtk_cg_hwv_disable,
+};
+EXPORT_SYMBOL_GPL(mtk_clk_gate_hwv_ops_setclr);
+
+const struct clk_ops mtk_clk_gate_hwv_ops_setclr_inv = {
+ .is_enabled = mtk_cg_bit_is_set,
+ .enable = mtk_cg_hwv_enable,
+ .disable = mtk_cg_hwv_disable,
+};
+EXPORT_SYMBOL_GPL(mtk_clk_gate_hwv_ops_setclr_inv);
+
const struct clk_ops mtk_clk_gate_ops_no_setclr = {
.is_enabled = mtk_cg_bit_is_cleared,
.enable = mtk_cg_enable_no_setclr,
@@ -152,12 +203,10 @@ const struct clk_ops mtk_clk_gate_ops_no_setclr_inv = {
};
EXPORT_SYMBOL_GPL(mtk_clk_gate_ops_no_setclr_inv);
-static struct clk_hw *mtk_clk_register_gate(struct device *dev, const char *name,
- const char *parent_name,
- struct regmap *regmap, int set_ofs,
- int clr_ofs, int sta_ofs, u8 bit,
- const struct clk_ops *ops,
- unsigned long flags)
+static struct clk_hw *mtk_clk_register_gate(struct device *dev,
+ const struct mtk_gate *gate,
+ struct regmap *regmap,
+ struct regmap *regmap_hwv)
{
struct mtk_clk_gate *cg;
int ret;
@@ -167,18 +216,19 @@ static struct clk_hw *mtk_clk_register_gate(struct device *dev, const char *name
if (!cg)
return ERR_PTR(-ENOMEM);
- init.name = name;
- init.flags = flags | CLK_SET_RATE_PARENT;
- init.parent_names = parent_name ? &parent_name : NULL;
- init.num_parents = parent_name ? 1 : 0;
- init.ops = ops;
+ init.name = gate->name;
+ init.flags = gate->flags | CLK_SET_RATE_PARENT;
+ init.parent_names = gate->parent_name ? &gate->parent_name : NULL;
+ init.num_parents = gate->parent_name ? 1 : 0;
+ init.ops = gate->ops;
+ if (mtk_cg_uses_hwv(init.ops) && !regmap_hwv)
+ return dev_err_ptr_probe(
+ dev, -ENXIO,
+ "regmap not found for hardware voter clocks\n");
cg->regmap = regmap;
- cg->set_ofs = set_ofs;
- cg->clr_ofs = clr_ofs;
- cg->sta_ofs = sta_ofs;
- cg->bit = bit;
-
+ cg->regmap_hwv = regmap_hwv;
+ cg->gate = gate;
cg->hw.init = &init;
ret = clk_hw_register(dev, &cg->hw);
@@ -209,6 +259,7 @@ int mtk_clk_register_gates(struct device *dev, struct device_node *node,
int i;
struct clk_hw *hw;
struct regmap *regmap;
+ struct regmap *regmap_hwv;
if (!clk_data)
return -ENOMEM;
@@ -219,6 +270,12 @@ int mtk_clk_register_gates(struct device *dev, struct device_node *node,
return PTR_ERR(regmap);
}
+ regmap_hwv = mtk_clk_get_hwv_regmap(node);
+ if (IS_ERR(regmap_hwv))
+ return dev_err_probe(
+ dev, PTR_ERR(regmap_hwv),
+ "Cannot find hardware voter regmap for %pOF\n", node);
+
for (i = 0; i < num; i++) {
const struct mtk_gate *gate = &clks[i];
@@ -228,13 +285,7 @@ int mtk_clk_register_gates(struct device *dev, struct device_node *node,
continue;
}
- hw = mtk_clk_register_gate(dev, gate->name, gate->parent_name,
- regmap,
- gate->regs->set_ofs,
- gate->regs->clr_ofs,
- gate->regs->sta_ofs,
- gate->shift, gate->ops,
- gate->flags);
+ hw = mtk_clk_register_gate(dev, gate, regmap, regmap_hwv);
if (IS_ERR(hw)) {
pr_err("Failed to register clk %s: %pe\n", gate->name,
diff --git a/drivers/clk/mediatek/clk-gate.h b/drivers/clk/mediatek/clk-gate.h
index 1a46b4c56fc5..4f05b9855dae 100644
--- a/drivers/clk/mediatek/clk-gate.h
+++ b/drivers/clk/mediatek/clk-gate.h
@@ -19,6 +19,8 @@ extern const struct clk_ops mtk_clk_gate_ops_setclr;
extern const struct clk_ops mtk_clk_gate_ops_setclr_inv;
extern const struct clk_ops mtk_clk_gate_ops_no_setclr;
extern const struct clk_ops mtk_clk_gate_ops_no_setclr_inv;
+extern const struct clk_ops mtk_clk_gate_hwv_ops_setclr;
+extern const struct clk_ops mtk_clk_gate_hwv_ops_setclr_inv;
struct mtk_gate_regs {
u32 sta_ofs;
@@ -31,6 +33,7 @@ struct mtk_gate {
const char *name;
const char *parent_name;
const struct mtk_gate_regs *regs;
+ const struct mtk_gate_regs *hwv_regs;
int shift;
const struct clk_ops *ops;
unsigned long flags;
diff --git a/drivers/clk/mediatek/clk-mt7622-aud.c b/drivers/clk/mediatek/clk-mt7622-aud.c
index 931a0598e598..a4ea5e20efa2 100644
--- a/drivers/clk/mediatek/clk-mt7622-aud.c
+++ b/drivers/clk/mediatek/clk-mt7622-aud.c
@@ -75,6 +75,7 @@ static const struct mtk_gate audio_clks[] = {
GATE_AUDIO1(CLK_AUDIO_A1SYS, "audio_a1sys", "a1sys_hp_sel", 21),
GATE_AUDIO1(CLK_AUDIO_A2SYS, "audio_a2sys", "a2sys_hp_sel", 22),
GATE_AUDIO1(CLK_AUDIO_AFE_CONN, "audio_afe_conn", "a1sys_hp_sel", 23),
+ GATE_AUDIO1(CLK_AUDIO_AFE_MRGIF, "audio_afe_mrgif", "aud_mux1_sel", 25),
/* AUDIO2 */
GATE_AUDIO2(CLK_AUDIO_UL1, "audio_ul1", "a1sys_hp_sel", 0),
GATE_AUDIO2(CLK_AUDIO_UL2, "audio_ul2", "a1sys_hp_sel", 1),
diff --git a/drivers/clk/mediatek/clk-mt8195-infra_ao.c b/drivers/clk/mediatek/clk-mt8195-infra_ao.c
index bb648a88e43a..ad47fdb23460 100644
--- a/drivers/clk/mediatek/clk-mt8195-infra_ao.c
+++ b/drivers/clk/mediatek/clk-mt8195-infra_ao.c
@@ -103,7 +103,7 @@ static const struct mtk_gate infra_ao_clks[] = {
GATE_INFRA_AO0(CLK_INFRA_AO_CQ_DMA_FPC, "infra_ao_cq_dma_fpc", "fpc", 28),
GATE_INFRA_AO0(CLK_INFRA_AO_UART5, "infra_ao_uart5", "top_uart", 29),
/* INFRA_AO1 */
- GATE_INFRA_AO1(CLK_INFRA_AO_HDMI_26M, "infra_ao_hdmi_26m", "clk26m", 0),
+ GATE_INFRA_AO1(CLK_INFRA_AO_HDMI_26M, "infra_ao_hdmi_26m", "top_hdmi_xtal", 0),
GATE_INFRA_AO1(CLK_INFRA_AO_SPI0, "infra_ao_spi0", "top_spi", 1),
GATE_INFRA_AO1(CLK_INFRA_AO_MSDC0, "infra_ao_msdc0", "top_msdc50_0_hclk", 2),
GATE_INFRA_AO1(CLK_INFRA_AO_MSDC1, "infra_ao_msdc1", "top_axi", 4),
diff --git a/drivers/clk/mediatek/clk-mt8196-apmixedsys.c b/drivers/clk/mediatek/clk-mt8196-apmixedsys.c
new file mode 100644
index 000000000000..617f5449b88b
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-apmixedsys.c
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-pll.h"
+
+/* APMIXEDSYS PLL control register offsets */
+#define MAINPLL_CON0 0x250
+#define MAINPLL_CON1 0x254
+#define UNIVPLL_CON0 0x264
+#define UNIVPLL_CON1 0x268
+#define MSDCPLL_CON0 0x278
+#define MSDCPLL_CON1 0x27c
+#define ADSPPLL_CON0 0x28c
+#define ADSPPLL_CON1 0x290
+#define EMIPLL_CON0 0x2a0
+#define EMIPLL_CON1 0x2a4
+#define EMIPLL2_CON0 0x2b4
+#define EMIPLL2_CON1 0x2b8
+#define NET1PLL_CON0 0x2c8
+#define NET1PLL_CON1 0x2cc
+#define SGMIIPLL_CON0 0x2dc
+#define SGMIIPLL_CON1 0x2e0
+
+/* APMIXEDSYS_GP2 PLL control register offsets*/
+#define MAINPLL2_CON0 0x250
+#define MAINPLL2_CON1 0x254
+#define UNIVPLL2_CON0 0x264
+#define UNIVPLL2_CON1 0x268
+#define MMPLL2_CON0 0x278
+#define MMPLL2_CON1 0x27c
+#define IMGPLL_CON0 0x28c
+#define IMGPLL_CON1 0x290
+#define TVDPLL1_CON0 0x2a0
+#define TVDPLL1_CON1 0x2a4
+#define TVDPLL2_CON0 0x2b4
+#define TVDPLL2_CON1 0x2b8
+#define TVDPLL3_CON0 0x2c8
+#define TVDPLL3_CON1 0x2cc
+
+#define PLLEN_ALL 0x080
+#define PLLEN_ALL_SET 0x084
+#define PLLEN_ALL_CLR 0x088
+
+#define FENC_STATUS_CON0 0x03c
+
+#define MT8196_PLL_FMAX (3800UL * MHZ)
+#define MT8196_PLL_FMIN (1500UL * MHZ)
+#define MT8196_INTEGER_BITS 8
+
+#define PLL_FENC(_id, _name, _reg, _fenc_sta_ofs, _fenc_sta_bit,\
+ _flags, _pd_reg, _pd_shift, \
+ _pcw_reg, _pcw_shift, _pcwbits, \
+ _pll_en_bit) { \
+ .id = _id, \
+ .name = _name, \
+ .reg = _reg, \
+ .fenc_sta_ofs = _fenc_sta_ofs, \
+ .fenc_sta_bit = _fenc_sta_bit, \
+ .flags = _flags, \
+ .fmax = MT8196_PLL_FMAX, \
+ .fmin = MT8196_PLL_FMIN, \
+ .pd_reg = _pd_reg, \
+ .pd_shift = _pd_shift, \
+ .pcw_reg = _pcw_reg, \
+ .pcw_shift = _pcw_shift, \
+ .pcwbits = _pcwbits, \
+ .pcwibits = MT8196_INTEGER_BITS, \
+ .en_reg = PLLEN_ALL, \
+ .en_set_reg = PLLEN_ALL_SET, \
+ .en_clr_reg = PLLEN_ALL_CLR, \
+ .pll_en_bit = _pll_en_bit, \
+ .ops = &mtk_pll_fenc_clr_set_ops, \
+}
+
+struct mtk_pll_desc {
+ const struct mtk_pll_data *clks;
+ size_t num_clks;
+};
+
+static const struct mtk_pll_data apmixed_plls[] = {
+ PLL_FENC(CLK_APMIXED_MAINPLL, "mainpll", MAINPLL_CON0, FENC_STATUS_CON0,
+ 7, PLL_AO, MAINPLL_CON1, 24, MAINPLL_CON1, 0, 22, 0),
+ PLL_FENC(CLK_APMIXED_UNIVPLL, "univpll", UNIVPLL_CON0, FENC_STATUS_CON0,
+ 6, 0, UNIVPLL_CON1, 24, UNIVPLL_CON1, 0, 22, 1),
+ PLL_FENC(CLK_APMIXED_MSDCPLL, "msdcpll", MSDCPLL_CON0, FENC_STATUS_CON0,
+ 5, 0, MSDCPLL_CON1, 24, MSDCPLL_CON1, 0, 22, 2),
+ PLL_FENC(CLK_APMIXED_ADSPPLL, "adsppll", ADSPPLL_CON0, FENC_STATUS_CON0,
+ 4, 0, ADSPPLL_CON1, 24, ADSPPLL_CON1, 0, 22, 3),
+ PLL_FENC(CLK_APMIXED_EMIPLL, "emipll", EMIPLL_CON0, FENC_STATUS_CON0, 3,
+ PLL_AO, EMIPLL_CON1, 24, EMIPLL_CON1, 0, 22, 4),
+ PLL_FENC(CLK_APMIXED_EMIPLL2, "emipll2", EMIPLL2_CON0, FENC_STATUS_CON0,
+ 2, PLL_AO, EMIPLL2_CON1, 24, EMIPLL2_CON1, 0, 22, 5),
+ PLL_FENC(CLK_APMIXED_NET1PLL, "net1pll", NET1PLL_CON0, FENC_STATUS_CON0,
+ 1, 0, NET1PLL_CON1, 24, NET1PLL_CON1, 0, 22, 6),
+ PLL_FENC(CLK_APMIXED_SGMIIPLL, "sgmiipll", SGMIIPLL_CON0, FENC_STATUS_CON0,
+ 0, 0, SGMIIPLL_CON1, 24, SGMIIPLL_CON1, 0, 22, 7),
+};
+
+static const struct mtk_pll_desc apmixed_desc = {
+ .clks = apmixed_plls,
+ .num_clks = ARRAY_SIZE(apmixed_plls),
+};
+
+static const struct mtk_pll_data apmixed2_plls[] = {
+ PLL_FENC(CLK_APMIXED2_MAINPLL2, "mainpll2", MAINPLL2_CON0, FENC_STATUS_CON0,
+ 6, 0, MAINPLL2_CON1, 24, MAINPLL2_CON1, 0, 22, 0),
+ PLL_FENC(CLK_APMIXED2_UNIVPLL2, "univpll2", UNIVPLL2_CON0, FENC_STATUS_CON0,
+ 5, 0, UNIVPLL2_CON1, 24, UNIVPLL2_CON1, 0, 22, 1),
+ PLL_FENC(CLK_APMIXED2_MMPLL2, "mmpll2", MMPLL2_CON0, FENC_STATUS_CON0,
+ 4, 0, MMPLL2_CON1, 24, MMPLL2_CON1, 0, 22, 2),
+ PLL_FENC(CLK_APMIXED2_IMGPLL, "imgpll", IMGPLL_CON0, FENC_STATUS_CON0,
+ 3, 0, IMGPLL_CON1, 24, IMGPLL_CON1, 0, 22, 3),
+ PLL_FENC(CLK_APMIXED2_TVDPLL1, "tvdpll1", TVDPLL1_CON0, FENC_STATUS_CON0,
+ 2, 0, TVDPLL1_CON1, 24, TVDPLL1_CON1, 0, 22, 4),
+ PLL_FENC(CLK_APMIXED2_TVDPLL2, "tvdpll2", TVDPLL2_CON0, FENC_STATUS_CON0,
+ 1, 0, TVDPLL2_CON1, 24, TVDPLL2_CON1, 0, 22, 5),
+ PLL_FENC(CLK_APMIXED2_TVDPLL3, "tvdpll3", TVDPLL3_CON0, FENC_STATUS_CON0,
+ 0, 0, TVDPLL3_CON1, 24, TVDPLL3_CON1, 0, 22, 6),
+};
+
+static const struct mtk_pll_desc apmixed2_desc = {
+ .clks = apmixed2_plls,
+ .num_clks = ARRAY_SIZE(apmixed2_plls),
+};
+
+static int clk_mt8196_apmixed_probe(struct platform_device *pdev)
+{
+ struct clk_hw_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ const struct mtk_pll_desc *mcd;
+ int r;
+
+ mcd = device_get_match_data(&pdev->dev);
+ if (!mcd)
+ return -EINVAL;
+
+ clk_data = mtk_alloc_clk_data(mcd->num_clks);
+ if (!clk_data)
+ return -ENOMEM;
+
+ r = mtk_clk_register_plls(node, mcd->clks, mcd->num_clks, clk_data);
+ if (r)
+ goto free_apmixed_data;
+
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ if (r)
+ goto unregister_plls;
+
+ platform_set_drvdata(pdev, clk_data);
+
+ return r;
+
+unregister_plls:
+ mtk_clk_unregister_plls(mcd->clks, mcd->num_clks, clk_data);
+free_apmixed_data:
+ mtk_free_clk_data(clk_data);
+ return r;
+}
+
+static void clk_mt8196_apmixed_remove(struct platform_device *pdev)
+{
+ const struct mtk_pll_desc *mcd = device_get_match_data(&pdev->dev);
+ struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev);
+ struct device_node *node = pdev->dev.of_node;
+
+ of_clk_del_provider(node);
+ mtk_clk_unregister_plls(mcd->clks, mcd->num_clks, clk_data);
+ mtk_free_clk_data(clk_data);
+}
+
+static const struct of_device_id of_match_clk_mt8196_apmixed[] = {
+ { .compatible = "mediatek,mt8196-apmixedsys", .data = &apmixed_desc },
+ { .compatible = "mediatek,mt8196-apmixedsys-gp2",
+ .data = &apmixed2_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_apmixed);
+
+static struct platform_driver clk_mt8196_apmixed_drv = {
+ .probe = clk_mt8196_apmixed_probe,
+ .remove = clk_mt8196_apmixed_remove,
+ .driver = {
+ .name = "clk-mt8196-apmixed",
+ .of_match_table = of_match_clk_mt8196_apmixed,
+ },
+};
+module_platform_driver(clk_mt8196_apmixed_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8196 apmixedsys clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-disp0.c b/drivers/clk/mediatek/clk-mt8196-disp0.c
new file mode 100644
index 000000000000..9474aad26e92
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-disp0.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs mm0_cg_regs = {
+ .set_ofs = 0x104,
+ .clr_ofs = 0x108,
+ .sta_ofs = 0x100,
+};
+
+static const struct mtk_gate_regs mm0_hwv_regs = {
+ .set_ofs = 0x0020,
+ .clr_ofs = 0x0024,
+ .sta_ofs = 0x2c10,
+};
+
+static const struct mtk_gate_regs mm1_cg_regs = {
+ .set_ofs = 0x114,
+ .clr_ofs = 0x118,
+ .sta_ofs = 0x110,
+};
+
+static const struct mtk_gate_regs mm1_hwv_regs = {
+ .set_ofs = 0x0028,
+ .clr_ofs = 0x002c,
+ .sta_ofs = 0x2c14,
+};
+
+#define GATE_MM0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm0_cg_regs, \
+ .shift = _shift, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ .ops = &mtk_clk_gate_ops_setclr,\
+ }
+
+#define GATE_HWV_MM0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm0_cg_regs, \
+ .hwv_regs = &mm0_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ .flags = CLK_OPS_PARENT_ENABLE \
+ }
+
+#define GATE_MM1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm1_cg_regs, \
+ .shift = _shift, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ .ops = &mtk_clk_gate_ops_setclr,\
+ }
+
+#define GATE_HWV_MM1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm1_cg_regs, \
+ .hwv_regs = &mm1_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+static const struct mtk_gate mm_clks[] = {
+ /* MM0 */
+ GATE_HWV_MM0(CLK_MM_CONFIG, "mm_config", "disp", 0),
+ GATE_HWV_MM0(CLK_MM_DISP_MUTEX0, "mm_disp_mutex0", "disp", 1),
+ GATE_HWV_MM0(CLK_MM_DISP_AAL0, "mm_disp_aal0", "disp", 2),
+ GATE_HWV_MM0(CLK_MM_DISP_AAL1, "mm_disp_aal1", "disp", 3),
+ GATE_MM0(CLK_MM_DISP_C3D0, "mm_disp_c3d0", "disp", 4),
+ GATE_MM0(CLK_MM_DISP_C3D1, "mm_disp_c3d1", "disp", 5),
+ GATE_MM0(CLK_MM_DISP_C3D2, "mm_disp_c3d2", "disp", 6),
+ GATE_MM0(CLK_MM_DISP_C3D3, "mm_disp_c3d3", "disp", 7),
+ GATE_MM0(CLK_MM_DISP_CCORR0, "mm_disp_ccorr0", "disp", 8),
+ GATE_MM0(CLK_MM_DISP_CCORR1, "mm_disp_ccorr1", "disp", 9),
+ GATE_MM0(CLK_MM_DISP_CCORR2, "mm_disp_ccorr2", "disp", 10),
+ GATE_MM0(CLK_MM_DISP_CCORR3, "mm_disp_ccorr3", "disp", 11),
+ GATE_MM0(CLK_MM_DISP_CHIST0, "mm_disp_chist0", "disp", 12),
+ GATE_MM0(CLK_MM_DISP_CHIST1, "mm_disp_chist1", "disp", 13),
+ GATE_MM0(CLK_MM_DISP_COLOR0, "mm_disp_color0", "disp", 14),
+ GATE_MM0(CLK_MM_DISP_COLOR1, "mm_disp_color1", "disp", 15),
+ GATE_MM0(CLK_MM_DISP_DITHER0, "mm_disp_dither0", "disp", 16),
+ GATE_MM0(CLK_MM_DISP_DITHER1, "mm_disp_dither1", "disp", 17),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC0, "mm_disp_dli_async0", "disp", 18),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC1, "mm_disp_dli_async1", "disp", 19),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC2, "mm_disp_dli_async2", "disp", 20),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC3, "mm_disp_dli_async3", "disp", 21),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC4, "mm_disp_dli_async4", "disp", 22),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC5, "mm_disp_dli_async5", "disp", 23),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC6, "mm_disp_dli_async6", "disp", 24),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC7, "mm_disp_dli_async7", "disp", 25),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC8, "mm_disp_dli_async8", "disp", 26),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC9, "mm_disp_dli_async9", "disp", 27),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC10, "mm_disp_dli_async10", "disp", 28),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC11, "mm_disp_dli_async11", "disp", 29),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC12, "mm_disp_dli_async12", "disp", 30),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC13, "mm_disp_dli_async13", "disp", 31),
+ /* MM1 */
+ GATE_HWV_MM1(CLK_MM_DISP_DLI_ASYNC14, "mm_disp_dli_async14", "disp", 0),
+ GATE_HWV_MM1(CLK_MM_DISP_DLI_ASYNC15, "mm_disp_dli_async15", "disp", 1),
+ GATE_HWV_MM1(CLK_MM_DISP_DLO_ASYNC0, "mm_disp_dlo_async0", "disp", 2),
+ GATE_HWV_MM1(CLK_MM_DISP_DLO_ASYNC1, "mm_disp_dlo_async1", "disp", 3),
+ GATE_HWV_MM1(CLK_MM_DISP_DLO_ASYNC2, "mm_disp_dlo_async2", "disp", 4),
+ GATE_HWV_MM1(CLK_MM_DISP_DLO_ASYNC3, "mm_disp_dlo_async3", "disp", 5),
+ GATE_HWV_MM1(CLK_MM_DISP_DLO_ASYNC4, "mm_disp_dlo_async4", "disp", 6),
+ GATE_HWV_MM1(CLK_MM_DISP_DLO_ASYNC5, "mm_disp_dlo_async5", "disp", 7),
+ GATE_HWV_MM1(CLK_MM_DISP_DLO_ASYNC6, "mm_disp_dlo_async6", "disp", 8),
+ GATE_HWV_MM1(CLK_MM_DISP_DLO_ASYNC7, "mm_disp_dlo_async7", "disp", 9),
+ GATE_HWV_MM1(CLK_MM_DISP_DLO_ASYNC8, "mm_disp_dlo_async8", "disp", 10),
+ GATE_MM1(CLK_MM_DISP_GAMMA0, "mm_disp_gamma0", "disp", 11),
+ GATE_MM1(CLK_MM_DISP_GAMMA1, "mm_disp_gamma1", "disp", 12),
+ GATE_MM1(CLK_MM_MDP_AAL0, "mm_mdp_aal0", "disp", 13),
+ GATE_MM1(CLK_MM_MDP_AAL1, "mm_mdp_aal1", "disp", 14),
+ GATE_HWV_MM1(CLK_MM_MDP_RDMA0, "mm_mdp_rdma0", "disp", 15),
+ GATE_HWV_MM1(CLK_MM_DISP_POSTMASK0, "mm_disp_postmask0", "disp", 16),
+ GATE_HWV_MM1(CLK_MM_DISP_POSTMASK1, "mm_disp_postmask1", "disp", 17),
+ GATE_HWV_MM1(CLK_MM_MDP_RSZ0, "mm_mdp_rsz0", "disp", 18),
+ GATE_HWV_MM1(CLK_MM_MDP_RSZ1, "mm_mdp_rsz1", "disp", 19),
+ GATE_HWV_MM1(CLK_MM_DISP_SPR0, "mm_disp_spr0", "disp", 20),
+ GATE_MM1(CLK_MM_DISP_TDSHP0, "mm_disp_tdshp0", "disp", 21),
+ GATE_MM1(CLK_MM_DISP_TDSHP1, "mm_disp_tdshp1", "disp", 22),
+ GATE_HWV_MM1(CLK_MM_DISP_WDMA0, "mm_disp_wdma0", "disp", 23),
+ GATE_HWV_MM1(CLK_MM_DISP_Y2R0, "mm_disp_y2r0", "disp", 24),
+ GATE_HWV_MM1(CLK_MM_SMI_SUB_COMM0, "mm_ssc", "disp", 25),
+ GATE_HWV_MM1(CLK_MM_DISP_FAKE_ENG0, "mm_disp_fake_eng0", "disp", 26),
+};
+
+static const struct mtk_clk_desc mm_mcd = {
+ .clks = mm_clks,
+ .num_clks = ARRAY_SIZE(mm_clks),
+};
+
+static const struct platform_device_id clk_mt8196_disp0_id_table[] = {
+ { .name = "clk-mt8196-disp0", .driver_data = (kernel_ulong_t)&mm_mcd },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, clk_mt8196_disp0_id_table);
+
+static struct platform_driver clk_mt8196_disp0_drv = {
+ .probe = mtk_clk_pdev_probe,
+ .remove = mtk_clk_pdev_remove,
+ .driver = {
+ .name = "clk-mt8196-disp0",
+ },
+ .id_table = clk_mt8196_disp0_id_table,
+};
+module_platform_driver(clk_mt8196_disp0_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8196 disp0 clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-disp1.c b/drivers/clk/mediatek/clk-mt8196-disp1.c
new file mode 100644
index 000000000000..3bbec79a7010
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-disp1.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs mm10_cg_regs = {
+ .set_ofs = 0x104,
+ .clr_ofs = 0x108,
+ .sta_ofs = 0x100,
+};
+
+static const struct mtk_gate_regs mm10_hwv_regs = {
+ .set_ofs = 0x0010,
+ .clr_ofs = 0x0014,
+ .sta_ofs = 0x2c08,
+};
+
+static const struct mtk_gate_regs mm11_cg_regs = {
+ .set_ofs = 0x114,
+ .clr_ofs = 0x118,
+ .sta_ofs = 0x110,
+};
+
+static const struct mtk_gate_regs mm11_hwv_regs = {
+ .set_ofs = 0x0018,
+ .clr_ofs = 0x001c,
+ .sta_ofs = 0x2c0c,
+};
+
+#define GATE_MM10(_id, _name, _parent, _shift) {\
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm10_cg_regs, \
+ .shift = _shift, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ .ops = &mtk_clk_gate_ops_setclr,\
+ }
+
+#define GATE_HWV_MM10(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm10_cg_regs, \
+ .hwv_regs = &mm10_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+#define GATE_MM11(_id, _name, _parent, _shift) {\
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm11_cg_regs, \
+ .shift = _shift, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ .ops = &mtk_clk_gate_ops_setclr,\
+ }
+
+#define GATE_HWV_MM11(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm11_cg_regs, \
+ .hwv_regs = &mm11_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ }
+
+static const struct mtk_gate mm1_clks[] = {
+ /* MM10 */
+ GATE_HWV_MM10(CLK_MM1_DISPSYS1_CONFIG, "mm1_dispsys1_config", "disp", 0),
+ GATE_HWV_MM10(CLK_MM1_DISPSYS1_S_CONFIG, "mm1_dispsys1_s_config", "disp", 1),
+ GATE_HWV_MM10(CLK_MM1_DISP_MUTEX0, "mm1_disp_mutex0", "disp", 2),
+ GATE_HWV_MM10(CLK_MM1_DISP_DLI_ASYNC20, "mm1_disp_dli_async20", "disp", 3),
+ GATE_HWV_MM10(CLK_MM1_DISP_DLI_ASYNC21, "mm1_disp_dli_async21", "disp", 4),
+ GATE_HWV_MM10(CLK_MM1_DISP_DLI_ASYNC22, "mm1_disp_dli_async22", "disp", 5),
+ GATE_HWV_MM10(CLK_MM1_DISP_DLI_ASYNC23, "mm1_disp_dli_async23", "disp", 6),
+ GATE_HWV_MM10(CLK_MM1_DISP_DLI_ASYNC24, "mm1_disp_dli_async24", "disp", 7),
+ GATE_HWV_MM10(CLK_MM1_DISP_DLI_ASYNC25, "mm1_disp_dli_async25", "disp", 8),
+ GATE_HWV_MM10(CLK_MM1_DISP_DLI_ASYNC26, "mm1_disp_dli_async26", "disp", 9),
+ GATE_HWV_MM10(CLK_MM1_DISP_DLI_ASYNC27, "mm1_disp_dli_async27", "disp", 10),
+ GATE_HWV_MM10(CLK_MM1_DISP_DLI_ASYNC28, "mm1_disp_dli_async28", "disp", 11),
+ GATE_HWV_MM10(CLK_MM1_DISP_RELAY0, "mm1_disp_relay0", "disp", 12),
+ GATE_HWV_MM10(CLK_MM1_DISP_RELAY1, "mm1_disp_relay1", "disp", 13),
+ GATE_HWV_MM10(CLK_MM1_DISP_RELAY2, "mm1_disp_relay2", "disp", 14),
+ GATE_HWV_MM10(CLK_MM1_DISP_RELAY3, "mm1_disp_relay3", "disp", 15),
+ GATE_HWV_MM10(CLK_MM1_DISP_DP_INTF0, "mm1_DP_CLK", "disp", 16),
+ GATE_HWV_MM10(CLK_MM1_DISP_DP_INTF1, "mm1_disp_dp_intf1", "disp", 17),
+ GATE_HWV_MM10(CLK_MM1_DISP_DSC_WRAP0, "mm1_disp_dsc_wrap0", "disp", 18),
+ GATE_HWV_MM10(CLK_MM1_DISP_DSC_WRAP1, "mm1_disp_dsc_wrap1", "disp", 19),
+ GATE_HWV_MM10(CLK_MM1_DISP_DSC_WRAP2, "mm1_disp_dsc_wrap2", "disp", 20),
+ GATE_HWV_MM10(CLK_MM1_DISP_DSC_WRAP3, "mm1_disp_dsc_wrap3", "disp", 21),
+ GATE_HWV_MM10(CLK_MM1_DISP_DSI0, "mm1_CLK0", "disp", 22),
+ GATE_HWV_MM10(CLK_MM1_DISP_DSI1, "mm1_CLK1", "disp", 23),
+ GATE_HWV_MM10(CLK_MM1_DISP_DSI2, "mm1_CLK2", "disp", 24),
+ GATE_HWV_MM10(CLK_MM1_DISP_DVO0, "mm1_disp_dvo0", "disp", 25),
+ GATE_HWV_MM10(CLK_MM1_DISP_GDMA0, "mm1_disp_gdma0", "disp", 26),
+ GATE_HWV_MM10(CLK_MM1_DISP_MERGE0, "mm1_disp_merge0", "disp", 27),
+ GATE_HWV_MM10(CLK_MM1_DISP_MERGE1, "mm1_disp_merge1", "disp", 28),
+ GATE_HWV_MM10(CLK_MM1_DISP_MERGE2, "mm1_disp_merge2", "disp", 29),
+ GATE_HWV_MM10(CLK_MM1_DISP_ODDMR0, "mm1_disp_oddmr0", "disp", 30),
+ GATE_HWV_MM10(CLK_MM1_DISP_POSTALIGN0, "mm1_disp_postalign0", "disp", 31),
+ /* MM11 */
+ GATE_HWV_MM11(CLK_MM1_DISP_DITHER2, "mm1_disp_dither2", "disp", 0),
+ GATE_HWV_MM11(CLK_MM1_DISP_R2Y0, "mm1_disp_r2y0", "disp", 1),
+ GATE_HWV_MM11(CLK_MM1_DISP_SPLITTER0, "mm1_disp_splitter0", "disp", 2),
+ GATE_HWV_MM11(CLK_MM1_DISP_SPLITTER1, "mm1_disp_splitter1", "disp", 3),
+ GATE_HWV_MM11(CLK_MM1_DISP_SPLITTER2, "mm1_disp_splitter2", "disp", 4),
+ GATE_HWV_MM11(CLK_MM1_DISP_SPLITTER3, "mm1_disp_splitter3", "disp", 5),
+ GATE_HWV_MM11(CLK_MM1_DISP_VDCM0, "mm1_disp_vdcm0", "disp", 6),
+ GATE_HWV_MM11(CLK_MM1_DISP_WDMA1, "mm1_disp_wdma1", "disp", 7),
+ GATE_HWV_MM11(CLK_MM1_DISP_WDMA2, "mm1_disp_wdma2", "disp", 8),
+ GATE_HWV_MM11(CLK_MM1_DISP_WDMA3, "mm1_disp_wdma3", "disp", 9),
+ GATE_HWV_MM11(CLK_MM1_DISP_WDMA4, "mm1_disp_wdma4", "disp", 10),
+ GATE_HWV_MM11(CLK_MM1_MDP_RDMA1, "mm1_mdp_rdma1", "disp", 11),
+ GATE_HWV_MM11(CLK_MM1_SMI_LARB0, "mm1_smi_larb0", "disp", 12),
+ GATE_HWV_MM11(CLK_MM1_MOD1, "mm1_mod1", "clk26m", 13),
+ GATE_HWV_MM11(CLK_MM1_MOD2, "mm1_mod2", "clk26m", 14),
+ GATE_HWV_MM11(CLK_MM1_MOD3, "mm1_mod3", "clk26m", 15),
+ GATE_HWV_MM11(CLK_MM1_MOD4, "mm1_mod4", "dp0", 16),
+ GATE_HWV_MM11(CLK_MM1_MOD5, "mm1_mod5", "dp1", 17),
+ GATE_HWV_MM11(CLK_MM1_MOD6, "mm1_mod6", "dp1", 18),
+ GATE_HWV_MM11(CLK_MM1_CG0, "mm1_cg0", "disp", 20),
+ GATE_HWV_MM11(CLK_MM1_CG1, "mm1_cg1", "disp", 21),
+ GATE_HWV_MM11(CLK_MM1_CG2, "mm1_cg2", "disp", 22),
+ GATE_HWV_MM11(CLK_MM1_CG3, "mm1_cg3", "disp", 23),
+ GATE_HWV_MM11(CLK_MM1_CG4, "mm1_cg4", "disp", 24),
+ GATE_HWV_MM11(CLK_MM1_CG5, "mm1_cg5", "disp", 25),
+ GATE_HWV_MM11(CLK_MM1_CG6, "mm1_cg6", "disp", 26),
+ GATE_HWV_MM11(CLK_MM1_CG7, "mm1_cg7", "disp", 27),
+ GATE_HWV_MM11(CLK_MM1_F26M, "mm1_f26m_ck", "clk26m", 28),
+};
+
+static const struct mtk_clk_desc mm1_mcd = {
+ .clks = mm1_clks,
+ .num_clks = ARRAY_SIZE(mm1_clks),
+};
+
+static const struct platform_device_id clk_mt8196_disp1_id_table[] = {
+ { .name = "clk-mt8196-disp1", .driver_data = (kernel_ulong_t)&mm1_mcd },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, clk_mt8196_disp1_id_table);
+
+static struct platform_driver clk_mt8196_disp1_drv = {
+ .probe = mtk_clk_pdev_probe,
+ .remove = mtk_clk_pdev_remove,
+ .driver = {
+ .name = "clk-mt8196-disp1",
+ },
+ .id_table = clk_mt8196_disp1_id_table,
+};
+module_platform_driver(clk_mt8196_disp1_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8196 disp1 clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-imp_iic_wrap.c b/drivers/clk/mediatek/clk-mt8196-imp_iic_wrap.c
new file mode 100644
index 000000000000..a63241671650
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-imp_iic_wrap.c
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs imp_cg_regs = {
+ .set_ofs = 0xe08,
+ .clr_ofs = 0xe04,
+ .sta_ofs = 0xe00,
+};
+
+#define GATE_IMP(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &imp_cg_regs, \
+ .shift = _shift, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+static const struct mtk_gate impc_clks[] = {
+ GATE_IMP(CLK_IMPC_I2C11, "impc_i2c11", "i2c_p", 0),
+ GATE_IMP(CLK_IMPC_I2C12, "impc_i2c12", "i2c_p", 1),
+ GATE_IMP(CLK_IMPC_I2C13, "impc_i2c13", "i2c_p", 2),
+ GATE_IMP(CLK_IMPC_I2C14, "impc_i2c14", "i2c_p", 3),
+};
+
+static const struct mtk_clk_desc impc_mcd = {
+ .clks = impc_clks,
+ .num_clks = ARRAY_SIZE(impc_clks),
+};
+
+static const struct mtk_gate impe_clks[] = {
+ GATE_IMP(CLK_IMPE_I2C5, "impe_i2c5", "i2c_east", 0),
+};
+
+static const struct mtk_clk_desc impe_mcd = {
+ .clks = impe_clks,
+ .num_clks = ARRAY_SIZE(impe_clks),
+};
+
+static const struct mtk_gate_regs impn_hwv_regs = {
+ .set_ofs = 0x0000,
+ .clr_ofs = 0x0004,
+ .sta_ofs = 0x2c00,
+};
+
+#define GATE_HWV_IMPN(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &imp_cg_regs, \
+ .hwv_regs = &impn_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+static const struct mtk_gate impn_clks[] = {
+ GATE_IMP(CLK_IMPN_I2C1, "impn_i2c1", "i2c_north", 0),
+ GATE_IMP(CLK_IMPN_I2C2, "impn_i2c2", "i2c_north", 1),
+ GATE_IMP(CLK_IMPN_I2C4, "impn_i2c4", "i2c_north", 2),
+ GATE_HWV_IMPN(CLK_IMPN_I2C7, "impn_i2c7", "i2c_north", 3),
+ GATE_IMP(CLK_IMPN_I2C8, "impn_i2c8", "i2c_north", 4),
+ GATE_IMP(CLK_IMPN_I2C9, "impn_i2c9", "i2c_north", 5),
+};
+
+static const struct mtk_clk_desc impn_mcd = {
+ .clks = impn_clks,
+ .num_clks = ARRAY_SIZE(impn_clks),
+};
+
+static const struct mtk_gate impw_clks[] = {
+ GATE_IMP(CLK_IMPW_I2C0, "impw_i2c0", "i2c_west", 0),
+ GATE_IMP(CLK_IMPW_I2C3, "impw_i2c3", "i2c_west", 1),
+ GATE_IMP(CLK_IMPW_I2C6, "impw_i2c6", "i2c_west", 2),
+ GATE_IMP(CLK_IMPW_I2C10, "impw_i2c10", "i2c_west", 3),
+};
+
+static const struct mtk_clk_desc impw_mcd = {
+ .clks = impw_clks,
+ .num_clks = ARRAY_SIZE(impw_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8196_imp_iic_wrap[] = {
+ { .compatible = "mediatek,mt8196-imp-iic-wrap-c", .data = &impc_mcd },
+ { .compatible = "mediatek,mt8196-imp-iic-wrap-e", .data = &impe_mcd },
+ { .compatible = "mediatek,mt8196-imp-iic-wrap-n", .data = &impn_mcd },
+ { .compatible = "mediatek,mt8196-imp-iic-wrap-w", .data = &impw_mcd },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_imp_iic_wrap);
+
+static struct platform_driver clk_mt8196_imp_iic_wrap_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8196-imp_iic_wrap",
+ .of_match_table = of_match_clk_mt8196_imp_iic_wrap,
+ },
+};
+module_platform_driver(clk_mt8196_imp_iic_wrap_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8196 I2C Wrapper clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-mcu.c b/drivers/clk/mediatek/clk-mt8196-mcu.c
new file mode 100644
index 000000000000..5cbcc411ae73
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-mcu.c
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-pll.h"
+
+#define ARMPLL_LL_CON0 0x008
+#define ARMPLL_LL_CON1 0x00c
+#define ARMPLL_LL_CON2 0x010
+#define ARMPLL_LL_CON3 0x014
+#define ARMPLL_BL_CON0 0x008
+#define ARMPLL_BL_CON1 0x00c
+#define ARMPLL_BL_CON2 0x010
+#define ARMPLL_BL_CON3 0x014
+#define ARMPLL_B_CON0 0x008
+#define ARMPLL_B_CON1 0x00c
+#define ARMPLL_B_CON2 0x010
+#define ARMPLL_B_CON3 0x014
+#define CCIPLL_CON0 0x008
+#define CCIPLL_CON1 0x00c
+#define CCIPLL_CON2 0x010
+#define CCIPLL_CON3 0x014
+#define PTPPLL_CON0 0x008
+#define PTPPLL_CON1 0x00c
+#define PTPPLL_CON2 0x010
+#define PTPPLL_CON3 0x014
+
+#define MT8196_PLL_FMAX (3800UL * MHZ)
+#define MT8196_PLL_FMIN (1500UL * MHZ)
+#define MT8196_INTEGER_BITS 8
+
+#define PLL(_id, _name, _reg, _en_reg, _en_mask, _pll_en_bit, \
+ _flags, _rst_bar_mask, \
+ _pd_reg, _pd_shift, _tuner_reg, \
+ _tuner_en_reg, _tuner_en_bit, \
+ _pcw_reg, _pcw_shift, _pcwbits) { \
+ .id = _id, \
+ .name = _name, \
+ .reg = _reg, \
+ .en_reg = _en_reg, \
+ .en_mask = _en_mask, \
+ .pll_en_bit = _pll_en_bit, \
+ .flags = _flags, \
+ .rst_bar_mask = _rst_bar_mask, \
+ .fmax = MT8196_PLL_FMAX, \
+ .fmin = MT8196_PLL_FMIN, \
+ .pd_reg = _pd_reg, \
+ .pd_shift = _pd_shift, \
+ .tuner_reg = _tuner_reg, \
+ .tuner_en_reg = _tuner_en_reg, \
+ .tuner_en_bit = _tuner_en_bit, \
+ .pcw_reg = _pcw_reg, \
+ .pcw_shift = _pcw_shift, \
+ .pcwbits = _pcwbits, \
+ .pcwibits = MT8196_INTEGER_BITS, \
+ }
+
+static const struct mtk_pll_data cpu_bl_plls[] = {
+ PLL(CLK_CPBL_ARMPLL_BL, "armpll-bl", ARMPLL_BL_CON0, ARMPLL_BL_CON0, 0,
+ 0, PLL_AO, BIT(0), ARMPLL_BL_CON1, 24, 0, 0, 0, ARMPLL_BL_CON1, 0, 22),
+};
+
+static const struct mtk_pll_data cpu_b_plls[] = {
+ PLL(CLK_CPB_ARMPLL_B, "armpll-b", ARMPLL_B_CON0, ARMPLL_B_CON0, 0, 0,
+ PLL_AO, BIT(0), ARMPLL_B_CON1, 24, 0, 0, 0, ARMPLL_B_CON1, 0, 22),
+};
+
+static const struct mtk_pll_data cpu_ll_plls[] = {
+ PLL(CLK_CPLL_ARMPLL_LL, "armpll-ll", ARMPLL_LL_CON0, ARMPLL_LL_CON0, 0,
+ 0, PLL_AO, BIT(0), ARMPLL_LL_CON1, 24, 0, 0, 0, ARMPLL_LL_CON1, 0, 22),
+};
+
+static const struct mtk_pll_data cci_plls[] = {
+ PLL(CLK_CCIPLL, "ccipll", CCIPLL_CON0, CCIPLL_CON0, 0, 0, PLL_AO,
+ BIT(0), CCIPLL_CON1, 24, 0, 0, 0, CCIPLL_CON1, 0, 22),
+};
+
+static const struct mtk_pll_data ptp_plls[] = {
+ PLL(CLK_PTPPLL, "ptppll", PTPPLL_CON0, PTPPLL_CON0, 0, 0, PLL_AO,
+ BIT(0), PTPPLL_CON1, 24, 0, 0, 0, PTPPLL_CON1, 0, 22),
+};
+
+static const struct of_device_id of_match_clk_mt8196_mcu[] = {
+ { .compatible = "mediatek,mt8196-armpll-bl-pll-ctrl",
+ .data = &cpu_bl_plls },
+ { .compatible = "mediatek,mt8196-armpll-b-pll-ctrl",
+ .data = &cpu_b_plls },
+ { .compatible = "mediatek,mt8196-armpll-ll-pll-ctrl",
+ .data = &cpu_ll_plls },
+ { .compatible = "mediatek,mt8196-ccipll-pll-ctrl", .data = &cci_plls },
+ { .compatible = "mediatek,mt8196-ptppll-pll-ctrl", .data = &ptp_plls },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_mcu);
+
+static int clk_mt8196_mcu_probe(struct platform_device *pdev)
+{
+ const struct mtk_pll_data *plls;
+ struct clk_hw_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ const int num_plls = 1;
+ int r;
+
+ plls = of_device_get_match_data(&pdev->dev);
+ if (!plls)
+ return -EINVAL;
+
+ clk_data = mtk_alloc_clk_data(num_plls);
+ if (!clk_data)
+ return -ENOMEM;
+
+ r = mtk_clk_register_plls(node, plls, num_plls, clk_data);
+ if (r)
+ goto free_clk_data;
+
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ if (r)
+ goto unregister_plls;
+
+ platform_set_drvdata(pdev, clk_data);
+
+ return r;
+
+unregister_plls:
+ mtk_clk_unregister_plls(plls, num_plls, clk_data);
+free_clk_data:
+ mtk_free_clk_data(clk_data);
+
+ return r;
+}
+
+static void clk_mt8196_mcu_remove(struct platform_device *pdev)
+{
+ const struct mtk_pll_data *plls = of_device_get_match_data(&pdev->dev);
+ struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev);
+ struct device_node *node = pdev->dev.of_node;
+
+ of_clk_del_provider(node);
+ mtk_clk_unregister_plls(plls, 1, clk_data);
+ mtk_free_clk_data(clk_data);
+}
+
+static struct platform_driver clk_mt8196_mcu_drv = {
+ .probe = clk_mt8196_mcu_probe,
+ .remove = clk_mt8196_mcu_remove,
+ .driver = {
+ .name = "clk-mt8196-mcu",
+ .of_match_table = of_match_clk_mt8196_mcu,
+ },
+};
+module_platform_driver(clk_mt8196_mcu_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8196 mcusys clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-mdpsys.c b/drivers/clk/mediatek/clk-mt8196-mdpsys.c
new file mode 100644
index 000000000000..7667d88f0eb0
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-mdpsys.c
@@ -0,0 +1,186 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs mdp0_cg_regs = {
+ .set_ofs = 0x104,
+ .clr_ofs = 0x108,
+ .sta_ofs = 0x100,
+};
+
+static const struct mtk_gate_regs mdp1_cg_regs = {
+ .set_ofs = 0x114,
+ .clr_ofs = 0x118,
+ .sta_ofs = 0x110,
+};
+
+static const struct mtk_gate_regs mdp2_cg_regs = {
+ .set_ofs = 0x124,
+ .clr_ofs = 0x128,
+ .sta_ofs = 0x120,
+};
+
+#define GATE_MDP0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mdp0_cg_regs, \
+ .shift = _shift, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_MDP1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mdp1_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_MDP2(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mdp2_cg_regs, \
+ .shift = _shift, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+static const struct mtk_gate mdp1_clks[] = {
+ /* MDP1-0 */
+ GATE_MDP0(CLK_MDP1_MDP_MUTEX0, "mdp1_mdp_mutex0", "mdp", 0),
+ GATE_MDP0(CLK_MDP1_SMI0, "mdp1_smi0", "mdp", 1),
+ GATE_MDP0(CLK_MDP1_APB_BUS, "mdp1_apb_bus", "mdp", 2),
+ GATE_MDP0(CLK_MDP1_MDP_RDMA0, "mdp1_mdp_rdma0", "mdp", 3),
+ GATE_MDP0(CLK_MDP1_MDP_RDMA1, "mdp1_mdp_rdma1", "mdp", 4),
+ GATE_MDP0(CLK_MDP1_MDP_RDMA2, "mdp1_mdp_rdma2", "mdp", 5),
+ GATE_MDP0(CLK_MDP1_MDP_BIRSZ0, "mdp1_mdp_birsz0", "mdp", 6),
+ GATE_MDP0(CLK_MDP1_MDP_HDR0, "mdp1_mdp_hdr0", "mdp", 7),
+ GATE_MDP0(CLK_MDP1_MDP_AAL0, "mdp1_mdp_aal0", "mdp", 8),
+ GATE_MDP0(CLK_MDP1_MDP_RSZ0, "mdp1_mdp_rsz0", "mdp", 9),
+ GATE_MDP0(CLK_MDP1_MDP_RSZ2, "mdp1_mdp_rsz2", "mdp", 10),
+ GATE_MDP0(CLK_MDP1_MDP_TDSHP0, "mdp1_mdp_tdshp0", "mdp", 11),
+ GATE_MDP0(CLK_MDP1_MDP_COLOR0, "mdp1_mdp_color0", "mdp", 12),
+ GATE_MDP0(CLK_MDP1_MDP_WROT0, "mdp1_mdp_wrot0", "mdp", 13),
+ GATE_MDP0(CLK_MDP1_MDP_WROT1, "mdp1_mdp_wrot1", "mdp", 14),
+ GATE_MDP0(CLK_MDP1_MDP_WROT2, "mdp1_mdp_wrot2", "mdp", 15),
+ GATE_MDP0(CLK_MDP1_MDP_FAKE_ENG0, "mdp1_mdp_fake_eng0", "mdp", 16),
+ GATE_MDP0(CLK_MDP1_APB_DB, "mdp1_apb_db", "mdp", 17),
+ GATE_MDP0(CLK_MDP1_MDP_DLI_ASYNC0, "mdp1_mdp_dli_async0", "mdp", 18),
+ GATE_MDP0(CLK_MDP1_MDP_DLI_ASYNC1, "mdp1_mdp_dli_async1", "mdp", 19),
+ GATE_MDP0(CLK_MDP1_MDP_DLO_ASYNC0, "mdp1_mdp_dlo_async0", "mdp", 20),
+ GATE_MDP0(CLK_MDP1_MDP_DLO_ASYNC1, "mdp1_mdp_dlo_async1", "mdp", 21),
+ GATE_MDP0(CLK_MDP1_MDP_DLI_ASYNC2, "mdp1_mdp_dli_async2", "mdp", 22),
+ GATE_MDP0(CLK_MDP1_MDP_DLO_ASYNC2, "mdp1_mdp_dlo_async2", "mdp", 23),
+ GATE_MDP0(CLK_MDP1_MDP_DLO_ASYNC3, "mdp1_mdp_dlo_async3", "mdp", 24),
+ GATE_MDP0(CLK_MDP1_IMG_DL_ASYNC0, "mdp1_img_dl_async0", "mdp", 25),
+ GATE_MDP0(CLK_MDP1_MDP_RROT0, "mdp1_mdp_rrot0", "mdp", 26),
+ GATE_MDP0(CLK_MDP1_MDP_MERGE0, "mdp1_mdp_merge0", "mdp", 27),
+ GATE_MDP0(CLK_MDP1_MDP_C3D0, "mdp1_mdp_c3d0", "mdp", 28),
+ GATE_MDP0(CLK_MDP1_MDP_FG0, "mdp1_mdp_fg0", "mdp", 29),
+ GATE_MDP0(CLK_MDP1_MDP_CLA2, "mdp1_mdp_cla2", "mdp", 30),
+ GATE_MDP0(CLK_MDP1_MDP_DLO_ASYNC4, "mdp1_mdp_dlo_async4", "mdp", 31),
+ /* MDP1-1 */
+ GATE_MDP1(CLK_MDP1_VPP_RSZ0, "mdp1_vpp_rsz0", "mdp", 0),
+ GATE_MDP1(CLK_MDP1_VPP_RSZ1, "mdp1_vpp_rsz1", "mdp", 1),
+ GATE_MDP1(CLK_MDP1_MDP_DLO_ASYNC5, "mdp1_mdp_dlo_async5", "mdp", 2),
+ GATE_MDP1(CLK_MDP1_IMG0, "mdp1_img0", "mdp", 3),
+ GATE_MDP1(CLK_MDP1_F26M, "mdp1_f26m", "clk26m", 27),
+ /* MDP1-2 */
+ GATE_MDP2(CLK_MDP1_IMG_DL_RELAY0, "mdp1_img_dl_relay0", "mdp", 0),
+ GATE_MDP2(CLK_MDP1_IMG_DL_RELAY1, "mdp1_img_dl_relay1", "mdp", 8),
+};
+
+static const struct mtk_clk_desc mdp1_mcd = {
+ .clks = mdp1_clks,
+ .num_clks = ARRAY_SIZE(mdp1_clks),
+ .need_runtime_pm = true,
+};
+
+
+static const struct mtk_gate mdp_clks[] = {
+ /* MDP0 */
+ GATE_MDP0(CLK_MDP_MDP_MUTEX0, "mdp_mdp_mutex0", "mdp", 0),
+ GATE_MDP0(CLK_MDP_SMI0, "mdp_smi0", "mdp", 1),
+ GATE_MDP0(CLK_MDP_APB_BUS, "mdp_apb_bus", "mdp", 2),
+ GATE_MDP0(CLK_MDP_MDP_RDMA0, "mdp_mdp_rdma0", "mdp", 3),
+ GATE_MDP0(CLK_MDP_MDP_RDMA1, "mdp_mdp_rdma1", "mdp", 4),
+ GATE_MDP0(CLK_MDP_MDP_RDMA2, "mdp_mdp_rdma2", "mdp", 5),
+ GATE_MDP0(CLK_MDP_MDP_BIRSZ0, "mdp_mdp_birsz0", "mdp", 6),
+ GATE_MDP0(CLK_MDP_MDP_HDR0, "mdp_mdp_hdr0", "mdp", 7),
+ GATE_MDP0(CLK_MDP_MDP_AAL0, "mdp_mdp_aal0", "mdp", 8),
+ GATE_MDP0(CLK_MDP_MDP_RSZ0, "mdp_mdp_rsz0", "mdp", 9),
+ GATE_MDP0(CLK_MDP_MDP_RSZ2, "mdp_mdp_rsz2", "mdp", 10),
+ GATE_MDP0(CLK_MDP_MDP_TDSHP0, "mdp_mdp_tdshp0", "mdp", 11),
+ GATE_MDP0(CLK_MDP_MDP_COLOR0, "mdp_mdp_color0", "mdp", 12),
+ GATE_MDP0(CLK_MDP_MDP_WROT0, "mdp_mdp_wrot0", "mdp", 13),
+ GATE_MDP0(CLK_MDP_MDP_WROT1, "mdp_mdp_wrot1", "mdp", 14),
+ GATE_MDP0(CLK_MDP_MDP_WROT2, "mdp_mdp_wrot2", "mdp", 15),
+ GATE_MDP0(CLK_MDP_MDP_FAKE_ENG0, "mdp_mdp_fake_eng0", "mdp", 16),
+ GATE_MDP0(CLK_MDP_APB_DB, "mdp_apb_db", "mdp", 17),
+ GATE_MDP0(CLK_MDP_MDP_DLI_ASYNC0, "mdp_mdp_dli_async0", "mdp", 18),
+ GATE_MDP0(CLK_MDP_MDP_DLI_ASYNC1, "mdp_mdp_dli_async1", "mdp", 19),
+ GATE_MDP0(CLK_MDP_MDP_DLO_ASYNC0, "mdp_mdp_dlo_async0", "mdp", 20),
+ GATE_MDP0(CLK_MDP_MDP_DLO_ASYNC1, "mdp_mdp_dlo_async1", "mdp", 21),
+ GATE_MDP0(CLK_MDP_MDP_DLI_ASYNC2, "mdp_mdp_dli_async2", "mdp", 22),
+ GATE_MDP0(CLK_MDP_MDP_DLO_ASYNC2, "mdp_mdp_dlo_async2", "mdp", 23),
+ GATE_MDP0(CLK_MDP_MDP_DLO_ASYNC3, "mdp_mdp_dlo_async3", "mdp", 24),
+ GATE_MDP0(CLK_MDP_IMG_DL_ASYNC0, "mdp_img_dl_async0", "mdp", 25),
+ GATE_MDP0(CLK_MDP_MDP_RROT0, "mdp_mdp_rrot0", "mdp", 26),
+ GATE_MDP0(CLK_MDP_MDP_MERGE0, "mdp_mdp_merge0", "mdp", 27),
+ GATE_MDP0(CLK_MDP_MDP_C3D0, "mdp_mdp_c3d0", "mdp", 28),
+ GATE_MDP0(CLK_MDP_MDP_FG0, "mdp_mdp_fg0", "mdp", 29),
+ GATE_MDP0(CLK_MDP_MDP_CLA2, "mdp_mdp_cla2", "mdp", 30),
+ GATE_MDP0(CLK_MDP_MDP_DLO_ASYNC4, "mdp_mdp_dlo_async4", "mdp", 31),
+ /* MDP1 */
+ GATE_MDP1(CLK_MDP_VPP_RSZ0, "mdp_vpp_rsz0", "mdp", 0),
+ GATE_MDP1(CLK_MDP_VPP_RSZ1, "mdp_vpp_rsz1", "mdp", 1),
+ GATE_MDP1(CLK_MDP_MDP_DLO_ASYNC5, "mdp_mdp_dlo_async5", "mdp", 2),
+ GATE_MDP1(CLK_MDP_IMG0, "mdp_img0", "mdp", 3),
+ GATE_MDP1(CLK_MDP_F26M, "mdp_f26m", "clk26m", 27),
+ /* MDP2 */
+ GATE_MDP2(CLK_MDP_IMG_DL_RELAY0, "mdp_img_dl_relay0", "mdp", 0),
+ GATE_MDP2(CLK_MDP_IMG_DL_RELAY1, "mdp_img_dl_relay1", "mdp", 8),
+};
+
+static const struct mtk_clk_desc mdp_mcd = {
+ .clks = mdp_clks,
+ .num_clks = ARRAY_SIZE(mdp_clks),
+ .need_runtime_pm = true,
+};
+
+static const struct of_device_id of_match_clk_mt8196_mdpsys[] = {
+ { .compatible = "mediatek,mt8196-mdpsys0", .data = &mdp_mcd },
+ { .compatible = "mediatek,mt8196-mdpsys1", .data = &mdp1_mcd },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_mdpsys);
+
+static struct platform_driver clk_mt8196_mdpsys_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8196-mdpsys",
+ .of_match_table = of_match_clk_mt8196_mdpsys,
+ },
+};
+module_platform_driver(clk_mt8196_mdpsys_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8196 Multimedia Data Path clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-mfg.c b/drivers/clk/mediatek/clk-mt8196-mfg.c
new file mode 100644
index 000000000000..ae1eb9de79ae
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-mfg.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-pll.h"
+
+#define MFGPLL_CON0 0x008
+#define MFGPLL_CON1 0x00c
+#define MFGPLL_CON2 0x010
+#define MFGPLL_CON3 0x014
+#define MFGPLL_SC0_CON0 0x008
+#define MFGPLL_SC0_CON1 0x00c
+#define MFGPLL_SC0_CON2 0x010
+#define MFGPLL_SC0_CON3 0x014
+#define MFGPLL_SC1_CON0 0x008
+#define MFGPLL_SC1_CON1 0x00c
+#define MFGPLL_SC1_CON2 0x010
+#define MFGPLL_SC1_CON3 0x014
+
+#define MT8196_PLL_FMAX (3800UL * MHZ)
+#define MT8196_PLL_FMIN (1500UL * MHZ)
+#define MT8196_INTEGER_BITS 8
+
+#define PLL(_id, _name, _reg, _en_reg, _en_mask, _pll_en_bit, \
+ _flags, _rst_bar_mask, \
+ _pd_reg, _pd_shift, _tuner_reg, \
+ _tuner_en_reg, _tuner_en_bit, \
+ _pcw_reg, _pcw_shift, _pcwbits) { \
+ .id = _id, \
+ .name = _name, \
+ .reg = _reg, \
+ .en_reg = _en_reg, \
+ .en_mask = _en_mask, \
+ .pll_en_bit = _pll_en_bit, \
+ .flags = _flags, \
+ .rst_bar_mask = _rst_bar_mask, \
+ .fmax = MT8196_PLL_FMAX, \
+ .fmin = MT8196_PLL_FMIN, \
+ .pd_reg = _pd_reg, \
+ .pd_shift = _pd_shift, \
+ .tuner_reg = _tuner_reg, \
+ .tuner_en_reg = _tuner_en_reg, \
+ .tuner_en_bit = _tuner_en_bit, \
+ .pcw_reg = _pcw_reg, \
+ .pcw_shift = _pcw_shift, \
+ .pcwbits = _pcwbits, \
+ .pcwibits = MT8196_INTEGER_BITS, \
+ }
+
+static const struct mtk_pll_data mfg_ao_plls[] = {
+ PLL(CLK_MFG_AO_MFGPLL, "mfgpll", MFGPLL_CON0, MFGPLL_CON0, 0, 0, 0,
+ BIT(0), MFGPLL_CON1, 24, 0, 0, 0,
+ MFGPLL_CON1, 0, 22),
+};
+
+static const struct mtk_pll_data mfgsc0_ao_plls[] = {
+ PLL(CLK_MFGSC0_AO_MFGPLL_SC0, "mfgpll-sc0", MFGPLL_SC0_CON0,
+ MFGPLL_SC0_CON0, 0, 0, 0, BIT(0), MFGPLL_SC0_CON1, 24, 0, 0, 0,
+ MFGPLL_SC0_CON1, 0, 22),
+};
+
+static const struct mtk_pll_data mfgsc1_ao_plls[] = {
+ PLL(CLK_MFGSC1_AO_MFGPLL_SC1, "mfgpll-sc1", MFGPLL_SC1_CON0,
+ MFGPLL_SC1_CON0, 0, 0, 0, BIT(0), MFGPLL_SC1_CON1, 24, 0, 0, 0,
+ MFGPLL_SC1_CON1, 0, 22),
+};
+
+static const struct of_device_id of_match_clk_mt8196_mfg[] = {
+ { .compatible = "mediatek,mt8196-mfgpll-pll-ctrl",
+ .data = &mfg_ao_plls },
+ { .compatible = "mediatek,mt8196-mfgpll-sc0-pll-ctrl",
+ .data = &mfgsc0_ao_plls },
+ { .compatible = "mediatek,mt8196-mfgpll-sc1-pll-ctrl",
+ .data = &mfgsc1_ao_plls },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_mfg);
+
+static int clk_mt8196_mfg_probe(struct platform_device *pdev)
+{
+ const struct mtk_pll_data *plls;
+ struct clk_hw_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ const int num_plls = 1;
+ int r;
+
+ plls = of_device_get_match_data(&pdev->dev);
+ if (!plls)
+ return -EINVAL;
+
+ clk_data = mtk_alloc_clk_data(num_plls);
+ if (!clk_data)
+ return -ENOMEM;
+
+ r = mtk_clk_register_plls(node, plls, num_plls, clk_data);
+ if (r)
+ goto free_clk_data;
+
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ if (r)
+ goto unregister_plls;
+
+ platform_set_drvdata(pdev, clk_data);
+
+ return r;
+
+unregister_plls:
+ mtk_clk_unregister_plls(plls, num_plls, clk_data);
+free_clk_data:
+ mtk_free_clk_data(clk_data);
+
+ return r;
+}
+
+static void clk_mt8196_mfg_remove(struct platform_device *pdev)
+{
+ const struct mtk_pll_data *plls = of_device_get_match_data(&pdev->dev);
+ struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev);
+ struct device_node *node = pdev->dev.of_node;
+
+ of_clk_del_provider(node);
+ mtk_clk_unregister_plls(plls, 1, clk_data);
+ mtk_free_clk_data(clk_data);
+}
+
+static struct platform_driver clk_mt8196_mfg_drv = {
+ .probe = clk_mt8196_mfg_probe,
+ .remove = clk_mt8196_mfg_remove,
+ .driver = {
+ .name = "clk-mt8196-mfg",
+ .of_match_table = of_match_clk_mt8196_mfg,
+ },
+};
+module_platform_driver(clk_mt8196_mfg_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8196 GPU mfg clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-ovl0.c b/drivers/clk/mediatek/clk-mt8196-ovl0.c
new file mode 100644
index 000000000000..d4affd14d2c4
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-ovl0.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs ovl0_cg_regs = {
+ .set_ofs = 0x104,
+ .clr_ofs = 0x108,
+ .sta_ofs = 0x100,
+};
+
+static const struct mtk_gate_regs ovl0_hwv_regs = {
+ .set_ofs = 0x0060,
+ .clr_ofs = 0x0064,
+ .sta_ofs = 0x2c30,
+};
+
+static const struct mtk_gate_regs ovl1_cg_regs = {
+ .set_ofs = 0x114,
+ .clr_ofs = 0x118,
+ .sta_ofs = 0x110,
+};
+
+static const struct mtk_gate_regs ovl1_hwv_regs = {
+ .set_ofs = 0x0068,
+ .clr_ofs = 0x006c,
+ .sta_ofs = 0x2c34,
+};
+
+#define GATE_HWV_OVL0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ovl0_cg_regs, \
+ .hwv_regs = &ovl0_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+#define GATE_HWV_OVL1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ovl1_cg_regs, \
+ .hwv_regs = &ovl1_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+static const struct mtk_gate ovl_clks[] = {
+ /* OVL0 */
+ GATE_HWV_OVL0(CLK_OVLSYS_CONFIG, "ovlsys_config", "disp", 0),
+ GATE_HWV_OVL0(CLK_OVL_FAKE_ENG0, "ovl_fake_eng0", "disp", 1),
+ GATE_HWV_OVL0(CLK_OVL_FAKE_ENG1, "ovl_fake_eng1", "disp", 2),
+ GATE_HWV_OVL0(CLK_OVL_MUTEX0, "ovl_mutex0", "disp", 3),
+ GATE_HWV_OVL0(CLK_OVL_EXDMA0, "ovl_exdma0", "disp", 4),
+ GATE_HWV_OVL0(CLK_OVL_EXDMA1, "ovl_exdma1", "disp", 5),
+ GATE_HWV_OVL0(CLK_OVL_EXDMA2, "ovl_exdma2", "disp", 6),
+ GATE_HWV_OVL0(CLK_OVL_EXDMA3, "ovl_exdma3", "disp", 7),
+ GATE_HWV_OVL0(CLK_OVL_EXDMA4, "ovl_exdma4", "disp", 8),
+ GATE_HWV_OVL0(CLK_OVL_EXDMA5, "ovl_exdma5", "disp", 9),
+ GATE_HWV_OVL0(CLK_OVL_EXDMA6, "ovl_exdma6", "disp", 10),
+ GATE_HWV_OVL0(CLK_OVL_EXDMA7, "ovl_exdma7", "disp", 11),
+ GATE_HWV_OVL0(CLK_OVL_EXDMA8, "ovl_exdma8", "disp", 12),
+ GATE_HWV_OVL0(CLK_OVL_EXDMA9, "ovl_exdma9", "disp", 13),
+ GATE_HWV_OVL0(CLK_OVL_BLENDER0, "ovl_blender0", "disp", 14),
+ GATE_HWV_OVL0(CLK_OVL_BLENDER1, "ovl_blender1", "disp", 15),
+ GATE_HWV_OVL0(CLK_OVL_BLENDER2, "ovl_blender2", "disp", 16),
+ GATE_HWV_OVL0(CLK_OVL_BLENDER3, "ovl_blender3", "disp", 17),
+ GATE_HWV_OVL0(CLK_OVL_BLENDER4, "ovl_blender4", "disp", 18),
+ GATE_HWV_OVL0(CLK_OVL_BLENDER5, "ovl_blender5", "disp", 19),
+ GATE_HWV_OVL0(CLK_OVL_BLENDER6, "ovl_blender6", "disp", 20),
+ GATE_HWV_OVL0(CLK_OVL_BLENDER7, "ovl_blender7", "disp", 21),
+ GATE_HWV_OVL0(CLK_OVL_BLENDER8, "ovl_blender8", "disp", 22),
+ GATE_HWV_OVL0(CLK_OVL_BLENDER9, "ovl_blender9", "disp", 23),
+ GATE_HWV_OVL0(CLK_OVL_OUTPROC0, "ovl_outproc0", "disp", 24),
+ GATE_HWV_OVL0(CLK_OVL_OUTPROC1, "ovl_outproc1", "disp", 25),
+ GATE_HWV_OVL0(CLK_OVL_OUTPROC2, "ovl_outproc2", "disp", 26),
+ GATE_HWV_OVL0(CLK_OVL_OUTPROC3, "ovl_outproc3", "disp", 27),
+ GATE_HWV_OVL0(CLK_OVL_OUTPROC4, "ovl_outproc4", "disp", 28),
+ GATE_HWV_OVL0(CLK_OVL_OUTPROC5, "ovl_outproc5", "disp", 29),
+ GATE_HWV_OVL0(CLK_OVL_MDP_RSZ0, "ovl_mdp_rsz0", "disp", 30),
+ GATE_HWV_OVL0(CLK_OVL_MDP_RSZ1, "ovl_mdp_rsz1", "disp", 31),
+ /* OVL1 */
+ GATE_HWV_OVL1(CLK_OVL_DISP_WDMA0, "ovl_disp_wdma0", "disp", 0),
+ GATE_HWV_OVL1(CLK_OVL_DISP_WDMA1, "ovl_disp_wdma1", "disp", 1),
+ GATE_HWV_OVL1(CLK_OVL_UFBC_WDMA0, "ovl_ufbc_wdma0", "disp", 2),
+ GATE_HWV_OVL1(CLK_OVL_MDP_RDMA0, "ovl_mdp_rdma0", "disp", 3),
+ GATE_HWV_OVL1(CLK_OVL_MDP_RDMA1, "ovl_mdp_rdma1", "disp", 4),
+ GATE_HWV_OVL1(CLK_OVL_BWM0, "ovl_bwm0", "disp", 5),
+ GATE_HWV_OVL1(CLK_OVL_DLI0, "ovl_dli0", "disp", 6),
+ GATE_HWV_OVL1(CLK_OVL_DLI1, "ovl_dli1", "disp", 7),
+ GATE_HWV_OVL1(CLK_OVL_DLI2, "ovl_dli2", "disp", 8),
+ GATE_HWV_OVL1(CLK_OVL_DLI3, "ovl_dli3", "disp", 9),
+ GATE_HWV_OVL1(CLK_OVL_DLI4, "ovl_dli4", "disp", 10),
+ GATE_HWV_OVL1(CLK_OVL_DLI5, "ovl_dli5", "disp", 11),
+ GATE_HWV_OVL1(CLK_OVL_DLI6, "ovl_dli6", "disp", 12),
+ GATE_HWV_OVL1(CLK_OVL_DLI7, "ovl_dli7", "disp", 13),
+ GATE_HWV_OVL1(CLK_OVL_DLI8, "ovl_dli8", "disp", 14),
+ GATE_HWV_OVL1(CLK_OVL_DLO0, "ovl_dlo0", "disp", 15),
+ GATE_HWV_OVL1(CLK_OVL_DLO1, "ovl_dlo1", "disp", 16),
+ GATE_HWV_OVL1(CLK_OVL_DLO2, "ovl_dlo2", "disp", 17),
+ GATE_HWV_OVL1(CLK_OVL_DLO3, "ovl_dlo3", "disp", 18),
+ GATE_HWV_OVL1(CLK_OVL_DLO4, "ovl_dlo4", "disp", 19),
+ GATE_HWV_OVL1(CLK_OVL_DLO5, "ovl_dlo5", "disp", 20),
+ GATE_HWV_OVL1(CLK_OVL_DLO6, "ovl_dlo6", "disp", 21),
+ GATE_HWV_OVL1(CLK_OVL_DLO7, "ovl_dlo7", "disp", 22),
+ GATE_HWV_OVL1(CLK_OVL_DLO8, "ovl_dlo8", "disp", 23),
+ GATE_HWV_OVL1(CLK_OVL_DLO9, "ovl_dlo9", "disp", 24),
+ GATE_HWV_OVL1(CLK_OVL_DLO10, "ovl_dlo10", "disp", 25),
+ GATE_HWV_OVL1(CLK_OVL_DLO11, "ovl_dlo11", "disp", 26),
+ GATE_HWV_OVL1(CLK_OVL_DLO12, "ovl_dlo12", "disp", 27),
+ GATE_HWV_OVL1(CLK_OVLSYS_RELAY0, "ovlsys_relay0", "disp", 28),
+ GATE_HWV_OVL1(CLK_OVL_INLINEROT0, "ovl_inlinerot0", "disp", 29),
+ GATE_HWV_OVL1(CLK_OVL_SMI, "ovl_smi", "disp", 30),
+};
+
+static const struct mtk_clk_desc ovl_mcd = {
+ .clks = ovl_clks,
+ .num_clks = ARRAY_SIZE(ovl_clks),
+};
+
+static const struct platform_device_id clk_mt8196_ovl0_id_table[] = {
+ { .name = "clk-mt8196-ovl0", .driver_data = (kernel_ulong_t)&ovl_mcd },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, clk_mt8196_ovl0_id_table);
+
+static struct platform_driver clk_mt8196_ovl0_drv = {
+ .probe = mtk_clk_pdev_probe,
+ .remove = mtk_clk_pdev_remove,
+ .driver = {
+ .name = "clk-mt8196-ovl0",
+ },
+ .id_table = clk_mt8196_ovl0_id_table,
+};
+module_platform_driver(clk_mt8196_ovl0_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8196 ovl0 clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-ovl1.c b/drivers/clk/mediatek/clk-mt8196-ovl1.c
new file mode 100644
index 000000000000..c8843d0d3ede
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-ovl1.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs ovl10_cg_regs = {
+ .set_ofs = 0x104,
+ .clr_ofs = 0x108,
+ .sta_ofs = 0x100,
+};
+
+static const struct mtk_gate_regs ovl10_hwv_regs = {
+ .set_ofs = 0x0050,
+ .clr_ofs = 0x0054,
+ .sta_ofs = 0x2c28,
+};
+
+static const struct mtk_gate_regs ovl11_cg_regs = {
+ .set_ofs = 0x114,
+ .clr_ofs = 0x118,
+ .sta_ofs = 0x110,
+};
+
+static const struct mtk_gate_regs ovl11_hwv_regs = {
+ .set_ofs = 0x0058,
+ .clr_ofs = 0x005c,
+ .sta_ofs = 0x2c2c,
+};
+
+#define GATE_HWV_OVL10(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ovl10_cg_regs, \
+ .hwv_regs = &ovl10_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+#define GATE_HWV_OVL11(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ovl11_cg_regs, \
+ .hwv_regs = &ovl11_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+static const struct mtk_gate ovl1_clks[] = {
+ /* OVL10 */
+ GATE_HWV_OVL10(CLK_OVL1_OVLSYS_CONFIG, "ovl1_ovlsys_config", "disp", 0),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_FAKE_ENG0, "ovl1_ovl_fake_eng0", "disp", 1),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_FAKE_ENG1, "ovl1_ovl_fake_eng1", "disp", 2),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_MUTEX0, "ovl1_ovl_mutex0", "disp", 3),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_EXDMA0, "ovl1_ovl_exdma0", "disp", 4),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_EXDMA1, "ovl1_ovl_exdma1", "disp", 5),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_EXDMA2, "ovl1_ovl_exdma2", "disp", 6),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_EXDMA3, "ovl1_ovl_exdma3", "disp", 7),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_EXDMA4, "ovl1_ovl_exdma4", "disp", 8),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_EXDMA5, "ovl1_ovl_exdma5", "disp", 9),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_EXDMA6, "ovl1_ovl_exdma6", "disp", 10),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_EXDMA7, "ovl1_ovl_exdma7", "disp", 11),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_EXDMA8, "ovl1_ovl_exdma8", "disp", 12),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_EXDMA9, "ovl1_ovl_exdma9", "disp", 13),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_BLENDER0, "ovl1_ovl_blender0", "disp", 14),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_BLENDER1, "ovl1_ovl_blender1", "disp", 15),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_BLENDER2, "ovl1_ovl_blender2", "disp", 16),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_BLENDER3, "ovl1_ovl_blender3", "disp", 17),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_BLENDER4, "ovl1_ovl_blender4", "disp", 18),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_BLENDER5, "ovl1_ovl_blender5", "disp", 19),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_BLENDER6, "ovl1_ovl_blender6", "disp", 20),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_BLENDER7, "ovl1_ovl_blender7", "disp", 21),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_BLENDER8, "ovl1_ovl_blender8", "disp", 22),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_BLENDER9, "ovl1_ovl_blender9", "disp", 23),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_OUTPROC0, "ovl1_ovl_outproc0", "disp", 24),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_OUTPROC1, "ovl1_ovl_outproc1", "disp", 25),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_OUTPROC2, "ovl1_ovl_outproc2", "disp", 26),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_OUTPROC3, "ovl1_ovl_outproc3", "disp", 27),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_OUTPROC4, "ovl1_ovl_outproc4", "disp", 28),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_OUTPROC5, "ovl1_ovl_outproc5", "disp", 29),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_MDP_RSZ0, "ovl1_ovl_mdp_rsz0", "disp", 30),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_MDP_RSZ1, "ovl1_ovl_mdp_rsz1", "disp", 31),
+ /* OVL11 */
+ GATE_HWV_OVL11(CLK_OVL1_OVL_DISP_WDMA0, "ovl1_ovl_disp_wdma0", "disp", 0),
+ GATE_HWV_OVL11(CLK_OVL1_OVL_DISP_WDMA1, "ovl1_ovl_disp_wdma1", "disp", 1),
+ GATE_HWV_OVL11(CLK_OVL1_OVL_UFBC_WDMA0, "ovl1_ovl_ufbc_wdma0", "disp", 2),
+ GATE_HWV_OVL11(CLK_OVL1_OVL_MDP_RDMA0, "ovl1_ovl_mdp_rdma0", "disp", 3),
+ GATE_HWV_OVL11(CLK_OVL1_OVL_MDP_RDMA1, "ovl1_ovl_mdp_rdma1", "disp", 4),
+ GATE_HWV_OVL11(CLK_OVL1_OVL_BWM0, "ovl1_ovl_bwm0", "disp", 5),
+ GATE_HWV_OVL11(CLK_OVL1_DLI0, "ovl1_dli0", "disp", 6),
+ GATE_HWV_OVL11(CLK_OVL1_DLI1, "ovl1_dli1", "disp", 7),
+ GATE_HWV_OVL11(CLK_OVL1_DLI2, "ovl1_dli2", "disp", 8),
+ GATE_HWV_OVL11(CLK_OVL1_DLI3, "ovl1_dli3", "disp", 9),
+ GATE_HWV_OVL11(CLK_OVL1_DLI4, "ovl1_dli4", "disp", 10),
+ GATE_HWV_OVL11(CLK_OVL1_DLI5, "ovl1_dli5", "disp", 11),
+ GATE_HWV_OVL11(CLK_OVL1_DLI6, "ovl1_dli6", "disp", 12),
+ GATE_HWV_OVL11(CLK_OVL1_DLI7, "ovl1_dli7", "disp", 13),
+ GATE_HWV_OVL11(CLK_OVL1_DLI8, "ovl1_dli8", "disp", 14),
+ GATE_HWV_OVL11(CLK_OVL1_DLO0, "ovl1_dlo0", "disp", 15),
+ GATE_HWV_OVL11(CLK_OVL1_DLO1, "ovl1_dlo1", "disp", 16),
+ GATE_HWV_OVL11(CLK_OVL1_DLO2, "ovl1_dlo2", "disp", 17),
+ GATE_HWV_OVL11(CLK_OVL1_DLO3, "ovl1_dlo3", "disp", 18),
+ GATE_HWV_OVL11(CLK_OVL1_DLO4, "ovl1_dlo4", "disp", 19),
+ GATE_HWV_OVL11(CLK_OVL1_DLO5, "ovl1_dlo5", "disp", 20),
+ GATE_HWV_OVL11(CLK_OVL1_DLO6, "ovl1_dlo6", "disp", 21),
+ GATE_HWV_OVL11(CLK_OVL1_DLO7, "ovl1_dlo7", "disp", 22),
+ GATE_HWV_OVL11(CLK_OVL1_DLO8, "ovl1_dlo8", "disp", 23),
+ GATE_HWV_OVL11(CLK_OVL1_DLO9, "ovl1_dlo9", "disp", 24),
+ GATE_HWV_OVL11(CLK_OVL1_DLO10, "ovl1_dlo10", "disp", 25),
+ GATE_HWV_OVL11(CLK_OVL1_DLO11, "ovl1_dlo11", "disp", 26),
+ GATE_HWV_OVL11(CLK_OVL1_DLO12, "ovl1_dlo12", "disp", 27),
+ GATE_HWV_OVL11(CLK_OVL1_OVLSYS_RELAY0, "ovl1_ovlsys_relay0", "disp", 28),
+ GATE_HWV_OVL11(CLK_OVL1_OVL_INLINEROT0, "ovl1_ovl_inlinerot0", "disp", 29),
+ GATE_HWV_OVL11(CLK_OVL1_SMI, "ovl1_smi", "disp", 30),
+};
+
+static const struct mtk_clk_desc ovl1_mcd = {
+ .clks = ovl1_clks,
+ .num_clks = ARRAY_SIZE(ovl1_clks),
+};
+
+static const struct platform_device_id clk_mt8196_ovl1_id_table[] = {
+ { .name = "clk-mt8196-ovl1", .driver_data = (kernel_ulong_t)&ovl1_mcd },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, clk_mt8196_ovl1_id_table);
+
+static struct platform_driver clk_mt8196_ovl1_drv = {
+ .probe = mtk_clk_pdev_probe,
+ .remove = mtk_clk_pdev_remove,
+ .driver = {
+ .name = "clk-mt8196-ovl1",
+ },
+ .id_table = clk_mt8196_ovl1_id_table,
+};
+module_platform_driver(clk_mt8196_ovl1_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8196 ovl1 clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-peri_ao.c b/drivers/clk/mediatek/clk-mt8196-peri_ao.c
new file mode 100644
index 000000000000..f227a86c5d60
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-peri_ao.c
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs peri_ao0_cg_regs = {
+ .set_ofs = 0x24,
+ .clr_ofs = 0x28,
+ .sta_ofs = 0x10,
+};
+
+static const struct mtk_gate_regs peri_ao1_cg_regs = {
+ .set_ofs = 0x2c,
+ .clr_ofs = 0x30,
+ .sta_ofs = 0x14,
+};
+
+static const struct mtk_gate_regs peri_ao1_hwv_regs = {
+ .set_ofs = 0x0008,
+ .clr_ofs = 0x000c,
+ .sta_ofs = 0x2c04,
+};
+
+static const struct mtk_gate_regs peri_ao2_cg_regs = {
+ .set_ofs = 0x34,
+ .clr_ofs = 0x38,
+ .sta_ofs = 0x18,
+};
+
+#define GATE_PERI_AO0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &peri_ao0_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_PERI_AO1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &peri_ao1_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_HWV_PERI_AO1(_id, _name, _parent, _shift) {\
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &peri_ao1_cg_regs, \
+ .hwv_regs = &peri_ao1_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ }
+
+#define GATE_PERI_AO2(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &peri_ao2_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+static const struct mtk_gate peri_ao_clks[] = {
+ /* PERI_AO0 */
+ GATE_PERI_AO0(CLK_PERI_AO_UART0_BCLK, "peri_ao_uart0_bclk", "uart", 0),
+ GATE_PERI_AO0(CLK_PERI_AO_UART1_BCLK, "peri_ao_uart1_bclk", "uart", 1),
+ GATE_PERI_AO0(CLK_PERI_AO_UART2_BCLK, "peri_ao_uart2_bclk", "uart", 2),
+ GATE_PERI_AO0(CLK_PERI_AO_UART3_BCLK, "peri_ao_uart3_bclk", "uart", 3),
+ GATE_PERI_AO0(CLK_PERI_AO_UART4_BCLK, "peri_ao_uart4_bclk", "uart", 4),
+ GATE_PERI_AO0(CLK_PERI_AO_UART5_BCLK, "peri_ao_uart5_bclk", "uart", 5),
+ GATE_PERI_AO0(CLK_PERI_AO_PWM_X16W_HCLK, "peri_ao_pwm_x16w", "p_axi", 12),
+ GATE_PERI_AO0(CLK_PERI_AO_PWM_X16W_BCLK, "peri_ao_pwm_x16w_bclk", "pwm", 13),
+ GATE_PERI_AO0(CLK_PERI_AO_PWM_PWM_BCLK0, "peri_ao_pwm_pwm_bclk0", "pwm", 14),
+ GATE_PERI_AO0(CLK_PERI_AO_PWM_PWM_BCLK1, "peri_ao_pwm_pwm_bclk1", "pwm", 15),
+ GATE_PERI_AO0(CLK_PERI_AO_PWM_PWM_BCLK2, "peri_ao_pwm_pwm_bclk2", "pwm", 16),
+ GATE_PERI_AO0(CLK_PERI_AO_PWM_PWM_BCLK3, "peri_ao_pwm_pwm_bclk3", "pwm", 17),
+ /* PERI_AO1 */
+ GATE_HWV_PERI_AO1(CLK_PERI_AO_SPI0_BCLK, "peri_ao_spi0_bclk", "spi0_b", 0),
+ GATE_HWV_PERI_AO1(CLK_PERI_AO_SPI1_BCLK, "peri_ao_spi1_bclk", "spi1_b", 2),
+ GATE_HWV_PERI_AO1(CLK_PERI_AO_SPI2_BCLK, "peri_ao_spi2_bclk", "spi2_b", 3),
+ GATE_HWV_PERI_AO1(CLK_PERI_AO_SPI3_BCLK, "peri_ao_spi3_bclk", "spi3_b", 4),
+ GATE_HWV_PERI_AO1(CLK_PERI_AO_SPI4_BCLK, "peri_ao_spi4_bclk", "spi4_b", 5),
+ GATE_HWV_PERI_AO1(CLK_PERI_AO_SPI5_BCLK, "peri_ao_spi5_bclk", "spi5_b", 6),
+ GATE_HWV_PERI_AO1(CLK_PERI_AO_SPI6_BCLK, "peri_ao_spi6_bclk", "spi6_b", 7),
+ GATE_HWV_PERI_AO1(CLK_PERI_AO_SPI7_BCLK, "peri_ao_spi7_bclk", "spi7_b", 8),
+ GATE_PERI_AO1(CLK_PERI_AO_FLASHIF_FLASH, "peri_ao_flashif_flash", "peri_ao_flashif_27m",
+ 18),
+ GATE_PERI_AO1(CLK_PERI_AO_FLASHIF_27M, "peri_ao_flashif_27m", "sflash", 19),
+ GATE_PERI_AO1(CLK_PERI_AO_FLASHIF_DRAM, "peri_ao_flashif_dram", "p_axi", 20),
+ GATE_PERI_AO1(CLK_PERI_AO_FLASHIF_AXI, "peri_ao_flashif_axi", "peri_ao_flashif_dram", 21),
+ GATE_PERI_AO1(CLK_PERI_AO_FLASHIF_BCLK, "peri_ao_flashif_bclk", "p_axi", 22),
+ GATE_PERI_AO1(CLK_PERI_AO_AP_DMA_X32W_BCLK, "peri_ao_ap_dma_x32w_bclk", "p_axi", 26),
+ /* PERI_AO2 */
+ GATE_PERI_AO2(CLK_PERI_AO_MSDC1_MSDC_SRC, "peri_ao_msdc1_msdc_src", "msdc30_1", 1),
+ GATE_PERI_AO2(CLK_PERI_AO_MSDC1_HCLK, "peri_ao_msdc1", "peri_ao_msdc1_axi", 2),
+ GATE_PERI_AO2(CLK_PERI_AO_MSDC1_AXI, "peri_ao_msdc1_axi", "p_axi", 3),
+ GATE_PERI_AO2(CLK_PERI_AO_MSDC1_HCLK_WRAP, "peri_ao_msdc1_h_wrap", "peri_ao_msdc1", 4),
+ GATE_PERI_AO2(CLK_PERI_AO_MSDC2_MSDC_SRC, "peri_ao_msdc2_msdc_src", "msdc30_2", 10),
+ GATE_PERI_AO2(CLK_PERI_AO_MSDC2_HCLK, "peri_ao_msdc2", "peri_ao_msdc2_axi", 11),
+ GATE_PERI_AO2(CLK_PERI_AO_MSDC2_AXI, "peri_ao_msdc2_axi", "p_axi", 12),
+ GATE_PERI_AO2(CLK_PERI_AO_MSDC2_HCLK_WRAP, "peri_ao_msdc2_h_wrap", "peri_ao_msdc2", 13),
+};
+
+static const struct mtk_clk_desc peri_ao_mcd = {
+ .clks = peri_ao_clks,
+ .num_clks = ARRAY_SIZE(peri_ao_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8196_peri_ao[] = {
+ { .compatible = "mediatek,mt8196-pericfg-ao", .data = &peri_ao_mcd },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_peri_ao);
+
+static struct platform_driver clk_mt8196_peri_ao_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8196-peri-ao",
+ .of_match_table = of_match_clk_mt8196_peri_ao,
+ },
+};
+
+MODULE_DESCRIPTION("MediaTek MT8196 pericfg_ao clock controller driver");
+module_platform_driver(clk_mt8196_peri_ao_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-pextp.c b/drivers/clk/mediatek/clk-mt8196-pextp.c
new file mode 100644
index 000000000000..3e505ecc4b6e
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-pextp.c
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+#include <dt-bindings/reset/mediatek,mt8196-resets.h>
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+#include "reset.h"
+
+#define MT8196_PEXTP_RST0_SET_OFFSET 0x8
+
+static const struct mtk_gate_regs pext_cg_regs = {
+ .set_ofs = 0x18,
+ .clr_ofs = 0x1c,
+ .sta_ofs = 0x14,
+};
+
+#define GATE_PEXT(_id, _name, _parent, _shift) {\
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &pext_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr,\
+ }
+
+static const struct mtk_gate pext_clks[] = {
+ GATE_PEXT(CLK_PEXT_PEXTP_MAC_P0_TL, "pext_pm0_tl", "tl", 0),
+ GATE_PEXT(CLK_PEXT_PEXTP_MAC_P0_REF, "pext_pm0_ref", "clk26m", 1),
+ GATE_PEXT(CLK_PEXT_PEXTP_PHY_P0_MCU_BUS, "pext_pp0_mcu_bus", "clk26m", 6),
+ GATE_PEXT(CLK_PEXT_PEXTP_PHY_P0_PEXTP_REF, "pext_pp0_pextp_ref", "clk26m", 7),
+ GATE_PEXT(CLK_PEXT_PEXTP_MAC_P0_AXI_250, "pext_pm0_axi_250", "ufs_pexpt0_mem_sub", 12),
+ GATE_PEXT(CLK_PEXT_PEXTP_MAC_P0_AHB_APB, "pext_pm0_ahb_apb", "ufs_pextp0_axi", 13),
+ GATE_PEXT(CLK_PEXT_PEXTP_MAC_P0_PL_P, "pext_pm0_pl_p", "clk26m", 14),
+ GATE_PEXT(CLK_PEXT_PEXTP_VLP_AO_P0_LP, "pext_pextp_vlp_ao_p0_lp", "clk26m", 19),
+};
+
+static u16 pext_rst_ofs[] = { MT8196_PEXTP_RST0_SET_OFFSET };
+
+static u16 pext_rst_idx_map[] = {
+ [MT8196_PEXTP0_RST0_PCIE0_MAC] = 0,
+ [MT8196_PEXTP0_RST0_PCIE0_PHY] = 1,
+};
+
+static const struct mtk_clk_rst_desc pext_rst_desc = {
+ .version = MTK_RST_SET_CLR,
+ .rst_bank_ofs = pext_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(pext_rst_ofs),
+ .rst_idx_map = pext_rst_idx_map,
+ .rst_idx_map_nr = ARRAY_SIZE(pext_rst_idx_map),
+};
+
+static const struct mtk_clk_desc pext_mcd = {
+ .clks = pext_clks,
+ .num_clks = ARRAY_SIZE(pext_clks),
+ .rst_desc = &pext_rst_desc,
+};
+
+static const struct mtk_gate pext1_clks[] = {
+ GATE_PEXT(CLK_PEXT1_PEXTP_MAC_P1_TL, "pext1_pm1_tl", "tl_p1", 0),
+ GATE_PEXT(CLK_PEXT1_PEXTP_MAC_P1_REF, "pext1_pm1_ref", "clk26m", 1),
+ GATE_PEXT(CLK_PEXT1_PEXTP_MAC_P2_TL, "pext1_pm2_tl", "tl_p2", 2),
+ GATE_PEXT(CLK_PEXT1_PEXTP_MAC_P2_REF, "pext1_pm2_ref", "clk26m", 3),
+ GATE_PEXT(CLK_PEXT1_PEXTP_PHY_P1_MCU_BUS, "pext1_pp1_mcu_bus", "clk26m", 8),
+ GATE_PEXT(CLK_PEXT1_PEXTP_PHY_P1_PEXTP_REF, "pext1_pp1_pextp_ref", "clk26m", 9),
+ GATE_PEXT(CLK_PEXT1_PEXTP_PHY_P2_MCU_BUS, "pext1_pp2_mcu_bus", "clk26m", 10),
+ GATE_PEXT(CLK_PEXT1_PEXTP_PHY_P2_PEXTP_REF, "pext1_pp2_pextp_ref", "clk26m", 11),
+ GATE_PEXT(CLK_PEXT1_PEXTP_MAC_P1_AXI_250, "pext1_pm1_axi_250",
+ "pextp1_usb_axi", 16),
+ GATE_PEXT(CLK_PEXT1_PEXTP_MAC_P1_AHB_APB, "pext1_pm1_ahb_apb",
+ "pextp1_usb_mem_sub", 17),
+ GATE_PEXT(CLK_PEXT1_PEXTP_MAC_P1_PL_P, "pext1_pm1_pl_p", "clk26m", 18),
+ GATE_PEXT(CLK_PEXT1_PEXTP_MAC_P2_AXI_250, "pext1_pm2_axi_250",
+ "pextp1_usb_axi", 19),
+ GATE_PEXT(CLK_PEXT1_PEXTP_MAC_P2_AHB_APB, "pext1_pm2_ahb_apb",
+ "pextp1_usb_mem_sub", 20),
+ GATE_PEXT(CLK_PEXT1_PEXTP_MAC_P2_PL_P, "pext1_pm2_pl_p", "clk26m", 21),
+ GATE_PEXT(CLK_PEXT1_PEXTP_VLP_AO_P1_LP, "pext1_pextp_vlp_ao_p1_lp", "clk26m", 26),
+ GATE_PEXT(CLK_PEXT1_PEXTP_VLP_AO_P2_LP, "pext1_pextp_vlp_ao_p2_lp", "clk26m", 27),
+};
+
+static u16 pext1_rst_idx_map[] = {
+ [MT8196_PEXTP1_RST0_PCIE1_MAC] = 0,
+ [MT8196_PEXTP1_RST0_PCIE1_PHY] = 1,
+ [MT8196_PEXTP1_RST0_PCIE2_MAC] = 8,
+ [MT8196_PEXTP1_RST0_PCIE2_PHY] = 9,
+};
+
+static const struct mtk_clk_rst_desc pext1_rst_desc = {
+ .version = MTK_RST_SET_CLR,
+ .rst_bank_ofs = pext_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(pext_rst_ofs),
+ .rst_idx_map = pext1_rst_idx_map,
+ .rst_idx_map_nr = ARRAY_SIZE(pext1_rst_idx_map),
+};
+
+static const struct mtk_clk_desc pext1_mcd = {
+ .clks = pext1_clks,
+ .num_clks = ARRAY_SIZE(pext1_clks),
+ .rst_desc = &pext1_rst_desc,
+};
+
+static const struct of_device_id of_match_clk_mt8196_pextp[] = {
+ { .compatible = "mediatek,mt8196-pextp0cfg-ao", .data = &pext_mcd },
+ { .compatible = "mediatek,mt8196-pextp1cfg-ao", .data = &pext1_mcd },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_pextp);
+
+static struct platform_driver clk_mt8196_pextp_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8196-pextp",
+ .of_match_table = of_match_clk_mt8196_pextp,
+ },
+};
+
+module_platform_driver(clk_mt8196_pextp_drv);
+MODULE_DESCRIPTION("MediaTek MT8196 PCIe transmit phy clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-topckgen.c b/drivers/clk/mediatek/clk-mt8196-topckgen.c
new file mode 100644
index 000000000000..6ace11ef6b69
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-topckgen.c
@@ -0,0 +1,985 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-mux.h"
+
+/* MUX SEL REG */
+#define CLK_CFG_UPDATE 0x0004
+#define CLK_CFG_UPDATE1 0x0008
+#define CLK_CFG_UPDATE2 0x000c
+#define CLK_CFG_0 0x0010
+#define CLK_CFG_0_SET 0x0014
+#define CLK_CFG_0_CLR 0x0018
+#define CLK_CFG_1 0x0020
+#define CLK_CFG_1_SET 0x0024
+#define CLK_CFG_1_CLR 0x0028
+#define CLK_CFG_2 0x0030
+#define CLK_CFG_2_SET 0x0034
+#define CLK_CFG_2_CLR 0x0038
+#define CLK_CFG_3 0x0040
+#define CLK_CFG_3_SET 0x0044
+#define CLK_CFG_3_CLR 0x0048
+#define CLK_CFG_4 0x0050
+#define CLK_CFG_4_SET 0x0054
+#define CLK_CFG_4_CLR 0x0058
+#define CLK_CFG_5 0x0060
+#define CLK_CFG_5_SET 0x0064
+#define CLK_CFG_5_CLR 0x0068
+#define CLK_CFG_6 0x0070
+#define CLK_CFG_6_SET 0x0074
+#define CLK_CFG_6_CLR 0x0078
+#define CLK_CFG_7 0x0080
+#define CLK_CFG_7_SET 0x0084
+#define CLK_CFG_7_CLR 0x0088
+#define CLK_CFG_8 0x0090
+#define CLK_CFG_8_SET 0x0094
+#define CLK_CFG_8_CLR 0x0098
+#define CLK_CFG_9 0x00a0
+#define CLK_CFG_9_SET 0x00a4
+#define CLK_CFG_9_CLR 0x00a8
+#define CLK_CFG_10 0x00b0
+#define CLK_CFG_10_SET 0x00b4
+#define CLK_CFG_10_CLR 0x00b8
+#define CLK_CFG_11 0x00c0
+#define CLK_CFG_11_SET 0x00c4
+#define CLK_CFG_11_CLR 0x00c8
+#define CLK_CFG_12 0x00d0
+#define CLK_CFG_12_SET 0x00d4
+#define CLK_CFG_12_CLR 0x00d8
+#define CLK_CFG_13 0x00e0
+#define CLK_CFG_13_SET 0x00e4
+#define CLK_CFG_13_CLR 0x00e8
+#define CLK_CFG_14 0x00f0
+#define CLK_CFG_14_SET 0x00f4
+#define CLK_CFG_14_CLR 0x00f8
+#define CLK_CFG_15 0x0100
+#define CLK_CFG_15_SET 0x0104
+#define CLK_CFG_15_CLR 0x0108
+#define CLK_CFG_16 0x0110
+#define CLK_CFG_16_SET 0x0114
+#define CLK_CFG_16_CLR 0x0118
+#define CLK_CFG_17 0x0120
+#define CLK_CFG_17_SET 0x0124
+#define CLK_CFG_17_CLR 0x0128
+#define CLK_CFG_18 0x0130
+#define CLK_CFG_18_SET 0x0134
+#define CLK_CFG_18_CLR 0x0138
+#define CLK_CFG_19 0x0140
+#define CLK_CFG_19_SET 0x0144
+#define CLK_CFG_19_CLR 0x0148
+#define CLK_AUDDIV_0 0x020c
+#define CLK_FENC_STATUS_MON_0 0x0270
+#define CLK_FENC_STATUS_MON_1 0x0274
+#define CLK_FENC_STATUS_MON_2 0x0278
+
+/* MUX SHIFT */
+#define TOP_MUX_AXI_SHIFT 0
+#define TOP_MUX_MEM_SUB_SHIFT 1
+#define TOP_MUX_IO_NOC_SHIFT 2
+#define TOP_MUX_PERI_AXI_SHIFT 3
+#define TOP_MUX_UFS_PEXTP0_AXI_SHIFT 4
+#define TOP_MUX_PEXTP1_USB_AXI_SHIFT 5
+#define TOP_MUX_PERI_FMEM_SUB_SHIFT 6
+#define TOP_MUX_UFS_PEXPT0_MEM_SUB_SHIFT 7
+#define TOP_MUX_PEXTP1_USB_MEM_SUB_SHIFT 8
+#define TOP_MUX_PERI_NOC_SHIFT 9
+#define TOP_MUX_EMI_N_SHIFT 10
+#define TOP_MUX_EMI_S_SHIFT 11
+#define TOP_MUX_AP2CONN_HOST_SHIFT 14
+#define TOP_MUX_ATB_SHIFT 15
+#define TOP_MUX_CIRQ_SHIFT 16
+#define TOP_MUX_PBUS_156M_SHIFT 17
+#define TOP_MUX_EFUSE_SHIFT 20
+#define TOP_MUX_MCU_L3GIC_SHIFT 21
+#define TOP_MUX_MCU_INFRA_SHIFT 22
+#define TOP_MUX_DSP_SHIFT 23
+#define TOP_MUX_MFG_REF_SHIFT 24
+#define TOP_MUX_MFG_EB_SHIFT 26
+#define TOP_MUX_UART_SHIFT 27
+#define TOP_MUX_SPI0_BCLK_SHIFT 28
+#define TOP_MUX_SPI1_BCLK_SHIFT 29
+#define TOP_MUX_SPI2_BCLK_SHIFT 30
+#define TOP_MUX_SPI3_BCLK_SHIFT 0
+#define TOP_MUX_SPI4_BCLK_SHIFT 1
+#define TOP_MUX_SPI5_BCLK_SHIFT 2
+#define TOP_MUX_SPI6_BCLK_SHIFT 3
+#define TOP_MUX_SPI7_BCLK_SHIFT 4
+#define TOP_MUX_MSDC30_1_SHIFT 7
+#define TOP_MUX_MSDC30_2_SHIFT 8
+#define TOP_MUX_DISP_PWM_SHIFT 9
+#define TOP_MUX_USB_TOP_1P_SHIFT 10
+#define TOP_MUX_SSUSB_XHCI_1P_SHIFT 11
+#define TOP_MUX_SSUSB_FMCNT_P1_SHIFT 12
+#define TOP_MUX_I2C_PERI_SHIFT 13
+#define TOP_MUX_I2C_EAST_SHIFT 14
+#define TOP_MUX_I2C_WEST_SHIFT 15
+#define TOP_MUX_I2C_NORTH_SHIFT 16
+#define TOP_MUX_AES_UFSFDE_SHIFT 17
+#define TOP_MUX_UFS_SHIFT 18
+#define TOP_MUX_AUD_1_SHIFT 21
+#define TOP_MUX_AUD_2_SHIFT 22
+#define TOP_MUX_ADSP_SHIFT 23
+#define TOP_MUX_ADSP_UARTHUB_B_SHIFT 24
+#define TOP_MUX_DPMAIF_MAIN_SHIFT 25
+#define TOP_MUX_PWM_SHIFT 26
+#define TOP_MUX_MCUPM_SHIFT 27
+#define TOP_MUX_SFLASH_SHIFT 28
+#define TOP_MUX_IPSEAST_SHIFT 29
+#define TOP_MUX_TL_SHIFT 0
+#define TOP_MUX_TL_P1_SHIFT 1
+#define TOP_MUX_TL_P2_SHIFT 2
+#define TOP_MUX_EMI_INTERFACE_546_SHIFT 3
+#define TOP_MUX_SDF_SHIFT 4
+#define TOP_MUX_UARTHUB_BCLK_SHIFT 5
+#define TOP_MUX_DPSW_CMP_26M_SHIFT 6
+#define TOP_MUX_SMAPCK_SHIFT 7
+#define TOP_MUX_SSR_PKA_SHIFT 8
+#define TOP_MUX_SSR_DMA_SHIFT 9
+#define TOP_MUX_SSR_KDF_SHIFT 10
+#define TOP_MUX_SSR_RNG_SHIFT 11
+#define TOP_MUX_SPU0_SHIFT 12
+#define TOP_MUX_SPU1_SHIFT 13
+#define TOP_MUX_DXCC_SHIFT 14
+
+/* CKSTA REG */
+#define CKSTA_REG 0x01c8
+#define CKSTA_REG1 0x01cc
+#define CKSTA_REG2 0x01d0
+
+/* DIVIDER REG */
+#define CLK_AUDDIV_2 0x0214
+#define CLK_AUDDIV_3 0x0220
+#define CLK_AUDDIV_4 0x0224
+#define CLK_AUDDIV_5 0x0228
+
+/* HW Voter REG */
+#define HWV_CG_0_SET 0x0000
+#define HWV_CG_0_CLR 0x0004
+#define HWV_CG_0_DONE 0x2c00
+#define HWV_CG_1_SET 0x0008
+#define HWV_CG_1_CLR 0x000c
+#define HWV_CG_1_DONE 0x2c04
+#define HWV_CG_2_SET 0x0010
+#define HWV_CG_2_CLR 0x0014
+#define HWV_CG_2_DONE 0x2c08
+#define HWV_CG_3_SET 0x0018
+#define HWV_CG_3_CLR 0x001c
+#define HWV_CG_3_DONE 0x2c0c
+#define HWV_CG_4_SET 0x0020
+#define HWV_CG_4_CLR 0x0024
+#define HWV_CG_4_DONE 0x2c10
+#define HWV_CG_5_SET 0x0028
+#define HWV_CG_5_CLR 0x002c
+#define HWV_CG_5_DONE 0x2c14
+#define HWV_CG_6_SET 0x0030
+#define HWV_CG_6_CLR 0x0034
+#define HWV_CG_6_DONE 0x2c18
+#define HWV_CG_7_SET 0x0038
+#define HWV_CG_7_CLR 0x003c
+#define HWV_CG_7_DONE 0x2c1c
+#define HWV_CG_8_SET 0x0040
+#define HWV_CG_8_CLR 0x0044
+#define HWV_CG_8_DONE 0x2c20
+
+static const struct mtk_fixed_factor top_divs[] = {
+ FACTOR(CLK_TOP_MAINPLL_D3, "mainpll_d3", "mainpll", 1, 3),
+ FACTOR(CLK_TOP_MAINPLL_D4, "mainpll_d4", "mainpll", 1, 4),
+ FACTOR(CLK_TOP_MAINPLL_D4_D2, "mainpll_d4_d2", "mainpll", 1, 8),
+ FACTOR(CLK_TOP_MAINPLL_D4_D4, "mainpll_d4_d4", "mainpll", 1, 16),
+ FACTOR(CLK_TOP_MAINPLL_D4_D8, "mainpll_d4_d8", "mainpll", 1, 32),
+ FACTOR(CLK_TOP_MAINPLL_D5, "mainpll_d5", "mainpll", 1, 5),
+ FACTOR(CLK_TOP_MAINPLL_D5_D2, "mainpll_d5_d2", "mainpll", 1, 10),
+ FACTOR(CLK_TOP_MAINPLL_D5_D4, "mainpll_d5_d4", "mainpll", 1, 20),
+ FACTOR(CLK_TOP_MAINPLL_D5_D8, "mainpll_d5_d8", "mainpll", 1, 40),
+ FACTOR(CLK_TOP_MAINPLL_D6, "mainpll_d6", "mainpll", 1, 6),
+ FACTOR(CLK_TOP_MAINPLL_D6_D2, "mainpll_d6_d2", "mainpll", 1, 12),
+ FACTOR(CLK_TOP_MAINPLL_D7, "mainpll_d7", "mainpll", 1, 7),
+ FACTOR(CLK_TOP_MAINPLL_D7_D2, "mainpll_d7_d2", "mainpll", 1, 14),
+ FACTOR(CLK_TOP_MAINPLL_D7_D4, "mainpll_d7_d4", "mainpll", 1, 28),
+ FACTOR(CLK_TOP_MAINPLL_D7_D8, "mainpll_d7_d8", "mainpll", 1, 56),
+ FACTOR(CLK_TOP_MAINPLL_D9, "mainpll_d9", "mainpll", 1, 9),
+ FACTOR(CLK_TOP_UNIVPLL_D4, "univpll_d4", "univpll", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL_D4_D2, "univpll_d4_d2", "univpll", 1, 8),
+ FACTOR(CLK_TOP_UNIVPLL_D4_D4, "univpll_d4_d4", "univpll", 1, 16),
+ FACTOR(CLK_TOP_UNIVPLL_D4_D8, "univpll_d4_d8", "univpll", 1, 32),
+ FACTOR(CLK_TOP_UNIVPLL_D5, "univpll_d5", "univpll", 1, 5),
+ FACTOR(CLK_TOP_UNIVPLL_D5_D2, "univpll_d5_d2", "univpll", 1, 10),
+ FACTOR(CLK_TOP_UNIVPLL_D5_D4, "univpll_d5_d4", "univpll", 1, 20),
+ FACTOR(CLK_TOP_UNIVPLL_D6, "univpll_d6", "univpll", 1, 6),
+ FACTOR(CLK_TOP_UNIVPLL_D6_D2, "univpll_d6_d2", "univpll", 1, 12),
+ FACTOR(CLK_TOP_UNIVPLL_D6_D4, "univpll_d6_d4", "univpll", 1, 24),
+ FACTOR(CLK_TOP_UNIVPLL_D6_D8, "univpll_d6_d8", "univpll", 1, 48),
+ FACTOR(CLK_TOP_UNIVPLL_D6_D16, "univpll_d6_d16", "univpll", 1, 96),
+ FACTOR(CLK_TOP_UNIVPLL_192M, "univpll_192m", "univpll", 1, 13),
+ FACTOR(CLK_TOP_UNIVPLL_192M_D4, "univpll_192m_d4", "univpll", 1, 52),
+ FACTOR(CLK_TOP_UNIVPLL_192M_D8, "univpll_192m_d8", "univpll", 1, 104),
+ FACTOR(CLK_TOP_UNIVPLL_192M_D16, "univpll_192m_d16", "univpll", 1, 208),
+ FACTOR(CLK_TOP_UNIVPLL_192M_D32, "univpll_192m_d32", "univpll", 1, 416),
+ FACTOR(CLK_TOP_UNIVPLL_192M_D10, "univpll_192m_d10", "univpll", 1, 130),
+ FACTOR(CLK_TOP_TVDPLL1_D2, "tvdpll1_d2", "tvdpll1", 1, 2),
+ FACTOR(CLK_TOP_MSDCPLL_D2, "msdcpll_d2", "msdcpll", 1, 2),
+ FACTOR(CLK_TOP_OSC_D2, "osc_d2", "ulposc", 1, 2),
+ FACTOR(CLK_TOP_OSC_D3, "osc_d3", "ulposc", 1, 3),
+ FACTOR(CLK_TOP_OSC_D4, "osc_d4", "ulposc", 1, 4),
+ FACTOR(CLK_TOP_OSC_D5, "osc_d5", "ulposc", 1, 5),
+ FACTOR(CLK_TOP_OSC_D7, "osc_d7", "ulposc", 1, 7),
+ FACTOR(CLK_TOP_OSC_D8, "osc_d8", "ulposc", 1, 8),
+ FACTOR(CLK_TOP_OSC_D10, "osc_d10", "ulposc", 1, 10),
+ FACTOR(CLK_TOP_OSC_D14, "osc_d14", "ulposc", 1, 14),
+ FACTOR(CLK_TOP_OSC_D20, "osc_d20", "ulposc", 1, 20),
+ FACTOR(CLK_TOP_OSC_D32, "osc_d32", "ulposc", 1, 32),
+ FACTOR(CLK_TOP_OSC_D40, "osc_d40", "ulposc", 1, 40),
+};
+
+static const char * const axi_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "osc_d8",
+ "osc_d4",
+ "mainpll_d4_d4",
+ "mainpll_d7_d2"
+};
+
+static const char * const mem_sub_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "osc_d4",
+ "univpll_d4_d4",
+ "osc_d3",
+ "mainpll_d5_d2",
+ "mainpll_d4_d2",
+ "mainpll_d6",
+ "mainpll_d5",
+ "univpll_d5",
+ "mainpll_d4",
+ "mainpll_d3"
+};
+
+static const char * const io_noc_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "osc_d8",
+ "osc_d4",
+ "mainpll_d6_d2",
+ "mainpll_d9"
+};
+
+static const char * const shared_axi_parents[] = {
+ "clk26m",
+ "mainpll_d7_d8",
+ "mainpll_d5_d8",
+ "osc_d8",
+ "mainpll_d7_d4",
+ "mainpll_d5_d4",
+ "mainpll_d4_d4",
+ "mainpll_d7_d2"
+};
+
+static const char * const shared_sub_parents[] = {
+ "clk26m",
+ "mainpll_d5_d8",
+ "mainpll_d5_d4",
+ "osc_d4",
+ "univpll_d4_d4",
+ "mainpll_d5_d2",
+ "mainpll_d4_d2",
+ "mainpll_d6",
+ "mainpll_d5",
+ "univpll_d5",
+ "mainpll_d4"
+};
+
+static const char * const p_noc_parents[] = {
+ "clk26m",
+ "mainpll_d5_d8",
+ "mainpll_d5_d4",
+ "osc_d4",
+ "univpll_d4_d4",
+ "mainpll_d5_d2",
+ "mainpll_d4_d2",
+ "mainpll_d6",
+ "mainpll_d5",
+ "univpll_d5",
+ "mainpll_d4",
+ "mainpll_d3"
+};
+
+static const char * const emi_parents[] = {
+ "clk26m",
+ "osc_d4",
+ "mainpll_d5_d8",
+ "mainpll_d5_d4",
+ "mainpll_d4_d4",
+ "emipll1_ck"
+};
+
+static const char * const ap2conn_host_parents[] = {
+ "clk26m",
+ "mainpll_d7_d4"
+};
+
+static const char * const atb_parents[] = {
+ "clk26m",
+ "mainpll_d5_d2",
+ "mainpll_d4_d2",
+ "mainpll_d6"
+};
+
+static const char * const cirq_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "mainpll_d7_d4"
+};
+
+static const char * const pbus_156m_parents[] = {
+ "clk26m",
+ "mainpll_d7_d2",
+ "osc_d2",
+ "mainpll_d7"
+};
+
+static const char * const efuse_parents[] = {
+ "clk26m",
+ "osc_d20"
+};
+
+static const char * const mcu_l3gic_parents[] = {
+ "clk26m",
+ "osc_d8",
+ "mainpll_d4_d4",
+ "mainpll_d7_d2"
+};
+
+static const char * const mcu_infra_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "mainpll_d7_d2",
+ "mainpll_d5_d2",
+ "mainpll_d4_d2",
+ "mainpll_d9",
+ "mainpll_d6"
+};
+
+static const char * const dsp_parents[] = {
+ "clk26m",
+ "osc_d5",
+ "osc_d4",
+ "osc_d3",
+ "univpll_d6_d2",
+ "osc_d2",
+ "univpll_d5",
+ "osc"
+};
+
+static const char * const mfg_ref_parents[] = {
+ "clk26m",
+ "mainpll_d7_d2"
+};
+
+static const char * const mfg_eb_parents[] = {
+ "clk26m",
+ "mainpll_d7_d2",
+ "mainpll_d6_d2",
+ "mainpll_d5_d2"
+};
+
+static const char * const uart_parents[] = {
+ "clk26m",
+ "univpll_d6_d8",
+ "univpll_d6_d4",
+ "univpll_d6_d2"
+};
+
+static const char * const spi_b_parents[] = {
+ "clk26m",
+ "univpll_d6_d4",
+ "univpll_d5_d4",
+ "mainpll_d4_d4",
+ "univpll_d4_d4",
+ "mainpll_d6_d2",
+ "univpll_192m",
+ "univpll_d6_d2"
+};
+
+static const char * const msdc30_parents[] = {
+ "clk26m",
+ "univpll_d6_d4",
+ "mainpll_d6_d2",
+ "univpll_d6_d2",
+ "msdcpll_d2"
+};
+
+static const char * const disp_pwm_parents[] = {
+ "clk26m",
+ "osc_d32",
+ "osc_d8",
+ "univpll_d6_d4",
+ "univpll_d5_d4",
+ "osc_d4",
+ "mainpll_d4_d4"
+};
+
+static const char * const usb_1p_parents[] = {
+ "clk26m",
+ "univpll_d5_d4"
+};
+
+static const char * const usb_fmcnt_p1_parents[] = {
+ "clk26m",
+ "univpll_192m_d4"
+};
+
+static const char * const i2c_parents[] = {
+ "clk26m",
+ "mainpll_d4_d8",
+ "univpll_d5_d4",
+ "mainpll_d4_d4",
+ "univpll_d5_d2"
+};
+
+static const char * const aes_ufsfde_parents[] = {
+ "clk26m",
+ "mainpll_d4_d4",
+ "univpll_d6_d2",
+ "mainpll_d4_d2",
+ "univpll_d6",
+ "mainpll_d4"
+};
+
+static const char * const ufs_parents[] = {
+ "clk26m",
+ "mainpll_d4_d4",
+ "univpll_d6_d2",
+ "mainpll_d4_d2",
+ "univpll_d6",
+ "mainpll_d5",
+ "univpll_d5"
+};
+
+static const char * const aud_1_parents[] = {
+ "clk26m",
+ "vlp_apll1"
+};
+
+static const char * const aud_2_parents[] = {
+ "clk26m",
+ "vlp_apll2"
+};
+
+static const char * const adsp_parents[] = {
+ "clk26m",
+ "adsppll"
+};
+
+static const char * const adsp_uarthub_b_parents[] = {
+ "clk26m",
+ "univpll_d6_d4",
+ "univpll_d6_d2"
+};
+
+static const char * const dpmaif_main_parents[] = {
+ "clk26m",
+ "univpll_d4_d4",
+ "univpll_d5_d2",
+ "mainpll_d4_d2",
+ "univpll_d4_d2",
+ "mainpll_d6",
+ "univpll_d6",
+ "mainpll_d5",
+ "univpll_d5"
+};
+
+static const char * const pwm_parents[] = {
+ "clk26m",
+ "mainpll_d7_d4",
+ "univpll_d4_d8"
+};
+
+static const char * const mcupm_parents[] = {
+ "clk26m",
+ "mainpll_d7_d2",
+ "mainpll_d6_d2",
+ "univpll_d6_d2",
+ "mainpll_d5_d2"
+};
+
+static const char * const ipseast_parents[] = {
+ "clk26m",
+ "mainpll_d6",
+ "mainpll_d5",
+ "mainpll_d4",
+ "mainpll_d3"
+};
+
+static const char * const tl_parents[] = {
+ "clk26m",
+ "mainpll_d7_d4",
+ "mainpll_d4_d4",
+ "mainpll_d5_d2"
+};
+
+static const char * const md_emi_parents[] = {
+ "clk26m",
+ "mainpll_d4"
+};
+
+static const char * const sdf_parents[] = {
+ "clk26m",
+ "mainpll_d5_d2",
+ "mainpll_d4_d2",
+ "mainpll_d6",
+ "mainpll_d4",
+ "univpll_d4"
+};
+
+static const char * const uarthub_b_parents[] = {
+ "clk26m",
+ "univpll_d6_d4",
+ "univpll_d6_d2"
+};
+
+static const char * const dpsw_cmp_26m_parents[] = {
+ "clk26m",
+ "osc_d20"
+};
+
+static const char * const smapparents[] = {
+ "clk26m",
+ "mainpll_d4_d8"
+};
+
+static const char * const ssr_parents[] = {
+ "clk26m",
+ "mainpll_d4_d4",
+ "mainpll_d4_d2",
+ "mainpll_d7",
+ "mainpll_d6",
+ "mainpll_d5"
+};
+
+static const char * const ssr_kdf_parents[] = {
+ "clk26m",
+ "mainpll_d4_d4",
+ "mainpll_d4_d2",
+ "mainpll_d7"
+};
+
+static const char * const ssr_rng_parents[] = {
+ "clk26m",
+ "mainpll_d4_d4",
+ "mainpll_d5_d2",
+ "mainpll_d4_d2"
+};
+
+static const char * const spu_parents[] = {
+ "clk26m",
+ "mainpll_d4_d4",
+ "mainpll_d4_d2",
+ "mainpll_d7",
+ "mainpll_d6",
+ "mainpll_d5"
+};
+
+static const char * const dxcc_parents[] = {
+ "clk26m",
+ "mainpll_d4_d8",
+ "mainpll_d4_d4",
+ "mainpll_d4_d2"
+};
+
+static const char * const apll_m_parents[] = {
+ "aud_1",
+ "aud_2"
+};
+
+static const char * const sflash_parents[] = {
+ "clk26m",
+ "mainpll_d7_d8",
+ "univpll_d6_d8"
+};
+
+static const struct mtk_mux top_muxes[] = {
+ /* CLK_CFG_0 */
+ MUX_CLR_SET_UPD(CLK_TOP_AXI, "axi",
+ axi_parents, CLK_CFG_0, CLK_CFG_0_SET,
+ CLK_CFG_0_CLR, 0, 3,
+ CLK_CFG_UPDATE, TOP_MUX_AXI_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_MEM_SUB, "mem_sub",
+ mem_sub_parents, CLK_CFG_0, CLK_CFG_0_SET,
+ CLK_CFG_0_CLR, 8, 4,
+ CLK_CFG_UPDATE, TOP_MUX_MEM_SUB_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_IO_NOC, "io_noc",
+ io_noc_parents, CLK_CFG_0, CLK_CFG_0_SET,
+ CLK_CFG_0_CLR, 16, 3,
+ CLK_CFG_UPDATE, TOP_MUX_IO_NOC_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_P_AXI, "p_axi",
+ shared_axi_parents, CLK_CFG_0, CLK_CFG_0_SET,
+ CLK_CFG_0_CLR, 24, 3,
+ CLK_CFG_UPDATE, TOP_MUX_PERI_AXI_SHIFT),
+ /* CLK_CFG_1 */
+ MUX_CLR_SET_UPD(CLK_TOP_UFS_PEXTP0_AXI, "ufs_pextp0_axi",
+ shared_axi_parents, CLK_CFG_1, CLK_CFG_1_SET,
+ CLK_CFG_1_CLR, 0, 3,
+ CLK_CFG_UPDATE, TOP_MUX_UFS_PEXTP0_AXI_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_PEXTP1_USB_AXI, "pextp1_usb_axi",
+ shared_axi_parents, CLK_CFG_1, CLK_CFG_1_SET,
+ CLK_CFG_1_CLR, 8, 3,
+ CLK_CFG_UPDATE, TOP_MUX_PEXTP1_USB_AXI_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_P_FMEM_SUB, "p_fmem_sub",
+ shared_sub_parents, CLK_CFG_1, CLK_CFG_1_SET,
+ CLK_CFG_1_CLR, 16, 4,
+ CLK_CFG_UPDATE, TOP_MUX_PERI_FMEM_SUB_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_PEXPT0_MEM_SUB, "ufs_pexpt0_mem_sub",
+ shared_sub_parents, CLK_CFG_1, CLK_CFG_1_SET,
+ CLK_CFG_1_CLR, 24, 4,
+ CLK_CFG_UPDATE, TOP_MUX_UFS_PEXPT0_MEM_SUB_SHIFT),
+ /* CLK_CFG_2 */
+ MUX_CLR_SET_UPD(CLK_TOP_PEXTP1_USB_MEM_SUB, "pextp1_usb_mem_sub",
+ shared_sub_parents, CLK_CFG_2, CLK_CFG_2_SET,
+ CLK_CFG_2_CLR, 0, 4,
+ CLK_CFG_UPDATE, TOP_MUX_PEXTP1_USB_MEM_SUB_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_P_NOC, "p_noc",
+ p_noc_parents, CLK_CFG_2, CLK_CFG_2_SET,
+ CLK_CFG_2_CLR, 8, 4,
+ CLK_CFG_UPDATE, TOP_MUX_PERI_NOC_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_EMI_N, "emi_n",
+ emi_parents, CLK_CFG_2, CLK_CFG_2_SET,
+ CLK_CFG_2_CLR, 16, 3,
+ CLK_CFG_UPDATE, TOP_MUX_EMI_N_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_EMI_S, "emi_s",
+ emi_parents, CLK_CFG_2, CLK_CFG_2_SET,
+ CLK_CFG_2_CLR, 24, 3,
+ CLK_CFG_UPDATE, TOP_MUX_EMI_S_SHIFT),
+ /* CLK_CFG_3 */
+ MUX_CLR_SET_UPD(CLK_TOP_AP2CONN_HOST, "ap2conn_host",
+ ap2conn_host_parents, CLK_CFG_3, CLK_CFG_3_SET,
+ CLK_CFG_3_CLR, 16, 1,
+ CLK_CFG_UPDATE, TOP_MUX_AP2CONN_HOST_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_ATB, "atb",
+ atb_parents, CLK_CFG_3, CLK_CFG_3_SET,
+ CLK_CFG_3_CLR, 24, 2,
+ CLK_CFG_UPDATE, TOP_MUX_ATB_SHIFT),
+ /* CLK_CFG_4 */
+ MUX_CLR_SET_UPD(CLK_TOP_CIRQ, "cirq",
+ cirq_parents, CLK_CFG_4, CLK_CFG_4_SET,
+ CLK_CFG_4_CLR, 0, 2,
+ CLK_CFG_UPDATE, TOP_MUX_CIRQ_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_PBUS_156M, "pbus_156m",
+ pbus_156m_parents, CLK_CFG_4, CLK_CFG_4_SET,
+ CLK_CFG_4_CLR, 8, 2,
+ CLK_CFG_UPDATE, TOP_MUX_PBUS_156M_SHIFT),
+ /* CLK_CFG_5 */
+ MUX_CLR_SET_UPD(CLK_TOP_EFUSE, "efuse",
+ efuse_parents, CLK_CFG_5, CLK_CFG_5_SET,
+ CLK_CFG_5_CLR, 0, 1,
+ CLK_CFG_UPDATE, TOP_MUX_EFUSE_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_MCL3GIC, "mcu_l3gic",
+ mcu_l3gic_parents, CLK_CFG_5, CLK_CFG_5_SET,
+ CLK_CFG_5_CLR, 8, 2,
+ CLK_CFG_UPDATE, TOP_MUX_MCU_L3GIC_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_MCINFRA, "mcu_infra",
+ mcu_infra_parents, CLK_CFG_5, CLK_CFG_5_SET,
+ CLK_CFG_5_CLR, 16, 3,
+ CLK_CFG_UPDATE, TOP_MUX_MCU_INFRA_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_DSP, "dsp",
+ dsp_parents, CLK_CFG_5, CLK_CFG_5_SET,
+ CLK_CFG_5_CLR, 24, 3,
+ CLK_CFG_UPDATE, TOP_MUX_DSP_SHIFT),
+ /* CLK_CFG_6 */
+ MUX_GATE_FENC_CLR_SET_UPD_FLAGS(CLK_TOP_MFG_REF, "mfg_ref", mfg_ref_parents,
+ NULL, ARRAY_SIZE(mfg_ref_parents),
+ CLK_CFG_6, CLK_CFG_6_SET, CLK_CFG_6_CLR,
+ 0, 1, 7, CLK_CFG_UPDATE, TOP_MUX_MFG_REF_SHIFT,
+ CLK_FENC_STATUS_MON_0, 7, CLK_IGNORE_UNUSED),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MFG_EB, "mfg_eb",
+ mfg_eb_parents, CLK_CFG_6, CLK_CFG_6_SET,
+ CLK_CFG_6_CLR, 16, 2,
+ 23, CLK_CFG_UPDATE, TOP_MUX_MFG_EB_SHIFT),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP_UART, "uart", uart_parents,
+ CLK_CFG_6, CLK_CFG_6_SET, CLK_CFG_6_CLR,
+ HWV_CG_3_DONE, HWV_CG_3_SET, HWV_CG_3_CLR,
+ 24, 2, 31, CLK_CFG_UPDATE, TOP_MUX_UART_SHIFT,
+ CLK_FENC_STATUS_MON_0, 4),
+ /* CLK_CFG_7 */
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP_SPI0_BCLK, "spi0_b", spi_b_parents,
+ CLK_CFG_7, CLK_CFG_7_SET, CLK_CFG_7_CLR,
+ HWV_CG_4_DONE, HWV_CG_4_SET, HWV_CG_4_CLR,
+ 0, 3, 7, CLK_CFG_UPDATE, TOP_MUX_SPI0_BCLK_SHIFT,
+ CLK_FENC_STATUS_MON_0, 3),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP_SPI1_BCLK, "spi1_b", spi_b_parents,
+ CLK_CFG_7, CLK_CFG_7_SET, CLK_CFG_7_CLR,
+ HWV_CG_4_DONE, HWV_CG_4_SET, HWV_CG_4_CLR,
+ 8, 3, 15, CLK_CFG_UPDATE, TOP_MUX_SPI1_BCLK_SHIFT,
+ CLK_FENC_STATUS_MON_0, 2),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP_SPI2_BCLK, "spi2_b", spi_b_parents,
+ CLK_CFG_7, CLK_CFG_7_SET, CLK_CFG_7_CLR,
+ HWV_CG_4_DONE, HWV_CG_4_SET, HWV_CG_4_CLR,
+ 16, 3, 23, CLK_CFG_UPDATE, TOP_MUX_SPI2_BCLK_SHIFT,
+ CLK_FENC_STATUS_MON_0, 1),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP_SPI3_BCLK, "spi3_b", spi_b_parents,
+ CLK_CFG_7, CLK_CFG_7_SET, CLK_CFG_7_CLR,
+ HWV_CG_4_DONE, HWV_CG_4_SET, HWV_CG_4_CLR,
+ 24, 3, 31, CLK_CFG_UPDATE1, TOP_MUX_SPI3_BCLK_SHIFT,
+ CLK_FENC_STATUS_MON_0, 0),
+ /* CLK_CFG_8 */
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP_SPI4_BCLK, "spi4_b", spi_b_parents,
+ CLK_CFG_8, CLK_CFG_8_SET, CLK_CFG_8_CLR,
+ HWV_CG_5_DONE, HWV_CG_5_SET, HWV_CG_5_CLR,
+ 0, 3, 7, CLK_CFG_UPDATE1, TOP_MUX_SPI4_BCLK_SHIFT,
+ CLK_FENC_STATUS_MON_1, 31),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP_SPI5_BCLK, "spi5_b", spi_b_parents,
+ CLK_CFG_8, CLK_CFG_8_SET, CLK_CFG_8_CLR,
+ HWV_CG_5_DONE, HWV_CG_5_SET, HWV_CG_5_CLR,
+ 8, 3, 15, CLK_CFG_UPDATE1, TOP_MUX_SPI5_BCLK_SHIFT,
+ CLK_FENC_STATUS_MON_1, 30),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP_SPI6_BCLK, "spi6_b", spi_b_parents,
+ CLK_CFG_8, CLK_CFG_8_SET, CLK_CFG_8_CLR,
+ HWV_CG_5_DONE, HWV_CG_5_SET, HWV_CG_5_CLR,
+ 16, 3, 23, CLK_CFG_UPDATE1, TOP_MUX_SPI6_BCLK_SHIFT,
+ CLK_FENC_STATUS_MON_1, 29),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP_SPI7_BCLK, "spi7_b", spi_b_parents,
+ CLK_CFG_8, CLK_CFG_8_SET, CLK_CFG_8_CLR,
+ HWV_CG_5_DONE, HWV_CG_5_SET, HWV_CG_5_CLR,
+ 24, 3, 31, CLK_CFG_UPDATE1, TOP_MUX_SPI7_BCLK_SHIFT,
+ CLK_FENC_STATUS_MON_1, 28),
+ /* CLK_CFG_9 */
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_MSDC30_1, "msdc30_1", msdc30_parents,
+ CLK_CFG_9, CLK_CFG_9_SET, CLK_CFG_9_CLR,
+ 16, 3, 23, CLK_CFG_UPDATE1, TOP_MUX_MSDC30_1_SHIFT,
+ CLK_FENC_STATUS_MON_1, 25),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_MSDC30_2, "msdc30_2", msdc30_parents,
+ CLK_CFG_9, CLK_CFG_9_SET, CLK_CFG_9_CLR,
+ 24, 3, 31, CLK_CFG_UPDATE1, TOP_MUX_MSDC30_2_SHIFT,
+ CLK_FENC_STATUS_MON_1, 24),
+ /* CLK_CFG_10 */
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_DISP_PWM, "disp_pwm", disp_pwm_parents,
+ CLK_CFG_10, CLK_CFG_10_SET, CLK_CFG_10_CLR,
+ 0, 3, 7, CLK_CFG_UPDATE1, TOP_MUX_DISP_PWM_SHIFT,
+ CLK_FENC_STATUS_MON_1, 23),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_USB_TOP_1P, "usb_1p", usb_1p_parents,
+ CLK_CFG_10, CLK_CFG_10_SET, CLK_CFG_10_CLR,
+ 8, 1, 15, CLK_CFG_UPDATE1, TOP_MUX_USB_TOP_1P_SHIFT,
+ CLK_FENC_STATUS_MON_1, 22),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_USB_XHCI_1P, "usb_xhci_1p", usb_1p_parents,
+ CLK_CFG_10, CLK_CFG_10_SET, CLK_CFG_10_CLR,
+ 16, 1, 23, CLK_CFG_UPDATE1, TOP_MUX_SSUSB_XHCI_1P_SHIFT,
+ CLK_FENC_STATUS_MON_1, 21),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_USB_FMCNT_P1, "usb_fmcnt_p1", usb_fmcnt_p1_parents,
+ CLK_CFG_10, CLK_CFG_10_SET, CLK_CFG_10_CLR,
+ 24, 1, 31, CLK_CFG_UPDATE1, TOP_MUX_SSUSB_FMCNT_P1_SHIFT,
+ CLK_FENC_STATUS_MON_1, 20),
+ /* CLK_CFG_11 */
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_I2C_P, "i2c_p", i2c_parents,
+ CLK_CFG_11, CLK_CFG_11_SET, CLK_CFG_11_CLR,
+ 0, 3, 7, CLK_CFG_UPDATE1, TOP_MUX_I2C_PERI_SHIFT,
+ CLK_FENC_STATUS_MON_1, 19),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_I2C_EAST, "i2c_east", i2c_parents,
+ CLK_CFG_11, CLK_CFG_11_SET, CLK_CFG_11_CLR,
+ 8, 3, 15, CLK_CFG_UPDATE1, TOP_MUX_I2C_EAST_SHIFT,
+ CLK_FENC_STATUS_MON_1, 18),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_I2C_WEST, "i2c_west", i2c_parents,
+ CLK_CFG_11, CLK_CFG_11_SET, CLK_CFG_11_CLR,
+ 16, 3, 23, CLK_CFG_UPDATE1, TOP_MUX_I2C_WEST_SHIFT,
+ CLK_FENC_STATUS_MON_1, 17),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP_I2C_NORTH, "i2c_north", i2c_parents,
+ CLK_CFG_11, CLK_CFG_11_SET, CLK_CFG_11_CLR,
+ HWV_CG_6_DONE, HWV_CG_6_SET, HWV_CG_6_CLR,
+ 24, 3, 31, CLK_CFG_UPDATE1, TOP_MUX_I2C_NORTH_SHIFT,
+ CLK_FENC_STATUS_MON_1, 16),
+ /* CLK_CFG_12 */
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_AES_UFSFDE, "aes_ufsfde", aes_ufsfde_parents,
+ CLK_CFG_12, CLK_CFG_12_SET, CLK_CFG_12_CLR,
+ 0, 3, 7, CLK_CFG_UPDATE1, TOP_MUX_AES_UFSFDE_SHIFT,
+ CLK_FENC_STATUS_MON_1, 15),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_UFS, "ufs", ufs_parents,
+ CLK_CFG_12, CLK_CFG_12_SET, CLK_CFG_12_CLR,
+ 8, 3, 15, CLK_CFG_UPDATE1, TOP_MUX_UFS_SHIFT,
+ CLK_FENC_STATUS_MON_1, 14),
+ /* CLK_CFG_13 */
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_AUD_1, "aud_1", aud_1_parents,
+ CLK_CFG_13, CLK_CFG_13_SET, CLK_CFG_13_CLR,
+ 0, 1, 7, CLK_CFG_UPDATE1, TOP_MUX_AUD_1_SHIFT,
+ CLK_FENC_STATUS_MON_1, 11),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_AUD_2, "aud_2", aud_2_parents,
+ CLK_CFG_13, CLK_CFG_13_SET, CLK_CFG_13_CLR,
+ 8, 1, 15, CLK_CFG_UPDATE1, TOP_MUX_AUD_2_SHIFT,
+ CLK_FENC_STATUS_MON_1, 10),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_ADSP, "adsp", adsp_parents,
+ CLK_CFG_13, CLK_CFG_13_SET, CLK_CFG_13_CLR,
+ 16, 1, 23, CLK_CFG_UPDATE1, TOP_MUX_ADSP_SHIFT,
+ CLK_FENC_STATUS_MON_1, 9),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_ADSP_UARTHUB_B, "adsp_uarthub_b",
+ adsp_uarthub_b_parents, CLK_CFG_13, CLK_CFG_13_SET,
+ CLK_CFG_13_CLR, 24, 2, 31,
+ CLK_CFG_UPDATE1, TOP_MUX_ADSP_UARTHUB_B_SHIFT),
+ /* CLK_CFG_14 */
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_DPMAIF_MAIN, "dpmaif_main", dpmaif_main_parents,
+ CLK_CFG_14, CLK_CFG_14_SET, CLK_CFG_14_CLR,
+ 0, 4, 7, CLK_CFG_UPDATE1, TOP_MUX_DPMAIF_MAIN_SHIFT,
+ CLK_FENC_STATUS_MON_1, 7),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_PWM, "pwm", pwm_parents,
+ CLK_CFG_14, CLK_CFG_14_SET, CLK_CFG_14_CLR,
+ 8, 2, 15, CLK_CFG_UPDATE1, TOP_MUX_PWM_SHIFT,
+ CLK_FENC_STATUS_MON_1, 6),
+ MUX_CLR_SET_UPD(CLK_TOP_MCUPM, "mcupm",
+ mcupm_parents, CLK_CFG_14, CLK_CFG_14_SET,
+ CLK_CFG_14_CLR, 16, 3,
+ CLK_CFG_UPDATE1, TOP_MUX_MCUPM_SHIFT),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_SFLASH, "sflash", sflash_parents,
+ CLK_CFG_14, CLK_CFG_14_SET, CLK_CFG_14_CLR,
+ 24, 2, 31, CLK_CFG_UPDATE1, TOP_MUX_SFLASH_SHIFT,
+ CLK_FENC_STATUS_MON_1, 4),
+ /* CLK_CFG_15 */
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_IPSEAST, "ipseast", ipseast_parents,
+ CLK_CFG_15, CLK_CFG_15_SET, CLK_CFG_15_CLR,
+ 0, 3, 7, CLK_CFG_UPDATE1, TOP_MUX_IPSEAST_SHIFT,
+ CLK_FENC_STATUS_MON_1, 3),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_TL, "tl", tl_parents,
+ CLK_CFG_15, CLK_CFG_15_SET, CLK_CFG_15_CLR,
+ 16, 2, 23, CLK_CFG_UPDATE2, TOP_MUX_TL_SHIFT,
+ CLK_FENC_STATUS_MON_1, 1),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_TL_P1, "tl_p1", tl_parents,
+ CLK_CFG_15, CLK_CFG_15_SET, CLK_CFG_15_CLR,
+ 24, 2, 31, CLK_CFG_UPDATE2, TOP_MUX_TL_P1_SHIFT,
+ CLK_FENC_STATUS_MON_1, 0),
+ /* CLK_CFG_16 */
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_TL_P2, "tl_p2", tl_parents,
+ CLK_CFG_16, CLK_CFG_16_SET, CLK_CFG_16_CLR,
+ 0, 2, 7, CLK_CFG_UPDATE2, TOP_MUX_TL_P2_SHIFT,
+ CLK_FENC_STATUS_MON_2, 31),
+ MUX_CLR_SET_UPD(CLK_TOP_EMI_INTERFACE_546, "emi_interface_546",
+ md_emi_parents, CLK_CFG_16, CLK_CFG_16_SET,
+ CLK_CFG_16_CLR, 8, 1,
+ CLK_CFG_UPDATE2, TOP_MUX_EMI_INTERFACE_546_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_SDF, "sdf",
+ sdf_parents, CLK_CFG_16, CLK_CFG_16_SET,
+ CLK_CFG_16_CLR, 16, 3,
+ CLK_CFG_UPDATE2, TOP_MUX_SDF_SHIFT),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP_UARTHUB_BCLK, "uarthub_b", uarthub_b_parents,
+ CLK_CFG_16, CLK_CFG_16_SET, CLK_CFG_16_CLR,
+ HWV_CG_7_DONE, HWV_CG_7_SET, HWV_CG_7_CLR,
+ 24, 2, 31, CLK_CFG_UPDATE2, TOP_MUX_UARTHUB_BCLK_SHIFT,
+ CLK_FENC_STATUS_MON_2, 28),
+ /* CLK_CFG_17 */
+ MUX_CLR_SET_UPD(CLK_TOP_DPSW_CMP_26M, "dpsw_cmp_26m",
+ dpsw_cmp_26m_parents, CLK_CFG_17, CLK_CFG_17_SET,
+ CLK_CFG_17_CLR, 0, 1,
+ CLK_CFG_UPDATE2, TOP_MUX_DPSW_CMP_26M_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_SMAP, "smap",
+ smapparents, CLK_CFG_17, CLK_CFG_17_SET,
+ CLK_CFG_17_CLR, 8, 1,
+ CLK_CFG_UPDATE2, TOP_MUX_SMAPCK_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_SSR_PKA, "ssr_pka",
+ ssr_parents, CLK_CFG_17, CLK_CFG_17_SET,
+ CLK_CFG_17_CLR, 16, 3,
+ CLK_CFG_UPDATE2, TOP_MUX_SSR_PKA_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_SSR_DMA, "ssr_dma",
+ ssr_parents, CLK_CFG_17, CLK_CFG_17_SET,
+ CLK_CFG_17_CLR, 24, 3,
+ CLK_CFG_UPDATE2, TOP_MUX_SSR_DMA_SHIFT),
+ /* CLK_CFG_18 */
+ MUX_CLR_SET_UPD(CLK_TOP_SSR_KDF, "ssr_kdf",
+ ssr_kdf_parents, CLK_CFG_18, CLK_CFG_18_SET,
+ CLK_CFG_18_CLR, 0, 2,
+ CLK_CFG_UPDATE2, TOP_MUX_SSR_KDF_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_SSR_RNG, "ssr_rng",
+ ssr_rng_parents, CLK_CFG_18, CLK_CFG_18_SET,
+ CLK_CFG_18_CLR, 8, 2,
+ CLK_CFG_UPDATE2, TOP_MUX_SSR_RNG_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_SPU0, "spu0",
+ spu_parents, CLK_CFG_18, CLK_CFG_18_SET,
+ CLK_CFG_18_CLR, 16, 3,
+ CLK_CFG_UPDATE2, TOP_MUX_SPU0_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_SPU1, "spu1",
+ spu_parents, CLK_CFG_18, CLK_CFG_18_SET,
+ CLK_CFG_18_CLR, 24, 3,
+ CLK_CFG_UPDATE2, TOP_MUX_SPU1_SHIFT),
+ /* CLK_CFG_19 */
+ MUX_CLR_SET_UPD(CLK_TOP_DXCC, "dxcc",
+ dxcc_parents, CLK_CFG_19, CLK_CFG_19_SET,
+ CLK_CFG_19_CLR, 0, 2,
+ CLK_CFG_UPDATE2, TOP_MUX_DXCC_SHIFT),
+};
+
+static const struct mtk_composite top_aud_divs[] = {
+ /* CLK_AUDDIV_2 */
+ MUX_DIV_GATE(CLK_TOP_APLL_I2SIN0, "apll_i2sin0_m", apll_m_parents,
+ CLK_AUDDIV_0, 16, 1, CLK_AUDDIV_2, 0, 8, CLK_AUDDIV_0, 0),
+ MUX_DIV_GATE(CLK_TOP_APLL_I2SIN1, "apll_i2sin1_m", apll_m_parents,
+ CLK_AUDDIV_0, 17, 1, CLK_AUDDIV_2, 8, 8, CLK_AUDDIV_0, 1),
+ MUX_DIV_GATE(CLK_TOP_APLL_I2SIN2, "apll_i2sin2_m", apll_m_parents,
+ CLK_AUDDIV_0, 18, 1, CLK_AUDDIV_2, 16, 8, CLK_AUDDIV_0, 2),
+ MUX_DIV_GATE(CLK_TOP_APLL_I2SIN3, "apll_i2sin3_m", apll_m_parents,
+ CLK_AUDDIV_0, 19, 1, CLK_AUDDIV_2, 24, 8, CLK_AUDDIV_0, 3),
+ /* CLK_AUDDIV_3 */
+ MUX_DIV_GATE(CLK_TOP_APLL_I2SIN4, "apll_i2sin4_m", apll_m_parents,
+ CLK_AUDDIV_0, 20, 1, CLK_AUDDIV_3, 0, 8, CLK_AUDDIV_0, 4),
+ MUX_DIV_GATE(CLK_TOP_APLL_I2SIN6, "apll_i2sin6_m", apll_m_parents,
+ CLK_AUDDIV_0, 21, 1, CLK_AUDDIV_3, 8, 8, CLK_AUDDIV_0, 5),
+ MUX_DIV_GATE(CLK_TOP_APLL_I2SOUT0, "apll_i2sout0_m", apll_m_parents,
+ CLK_AUDDIV_0, 22, 1, CLK_AUDDIV_3, 16, 8, CLK_AUDDIV_0, 6),
+ MUX_DIV_GATE(CLK_TOP_APLL_I2SOUT1, "apll_i2sout1_m", apll_m_parents,
+ CLK_AUDDIV_0, 23, 1, CLK_AUDDIV_3, 24, 8, CLK_AUDDIV_0, 7),
+ /* CLK_AUDDIV_4 */
+ MUX_DIV_GATE(CLK_TOP_APLL_I2SOUT2, "apll_i2sout2_m", apll_m_parents,
+ CLK_AUDDIV_0, 24, 1, CLK_AUDDIV_4, 0, 8, CLK_AUDDIV_0, 8),
+ MUX_DIV_GATE(CLK_TOP_APLL_I2SOUT3, "apll_i2sout3_m", apll_m_parents,
+ CLK_AUDDIV_0, 25, 1, CLK_AUDDIV_4, 8, 8, CLK_AUDDIV_0, 9),
+ MUX_DIV_GATE(CLK_TOP_APLL_I2SOUT4, "apll_i2sout4_m", apll_m_parents,
+ CLK_AUDDIV_0, 26, 1, CLK_AUDDIV_4, 16, 8, CLK_AUDDIV_0, 10),
+ MUX_DIV_GATE(CLK_TOP_APLL_I2SOUT6, "apll_i2sout6_m", apll_m_parents,
+ CLK_AUDDIV_0, 27, 1, CLK_AUDDIV_4, 24, 8, CLK_AUDDIV_0, 11),
+ /* CLK_AUDDIV_5 */
+ MUX_DIV_GATE(CLK_TOP_APLL_FMI2S, "apll_fmi2s_m", apll_m_parents,
+ CLK_AUDDIV_0, 28, 1, CLK_AUDDIV_5, 0, 8, CLK_AUDDIV_0, 12),
+ MUX(CLK_TOP_APLL_TDMOUT, "apll_tdmout_m",
+ apll_m_parents, CLK_AUDDIV_0, 29, 1),
+ DIV_GATE(CLK_TOP_APLL12_DIV_TDMOUT_M, "apll12_div_tdmout_m",
+ "apll_tdmout_m", CLK_AUDDIV_0,
+ 13, CLK_AUDDIV_5, 8, 8),
+ DIV_GATE(CLK_TOP_APLL12_DIV_TDMOUT_B, "apll12_div_tdmout_b",
+ "apll_tdmout_m", CLK_AUDDIV_0,
+ 14, CLK_AUDDIV_5, 8, 16),
+};
+
+static const struct mtk_clk_desc topck_desc = {
+ .factor_clks = top_divs,
+ .num_factor_clks = ARRAY_SIZE(top_divs),
+ .mux_clks = top_muxes,
+ .num_mux_clks = ARRAY_SIZE(top_muxes),
+ .composite_clks = top_aud_divs,
+ .num_composite_clks = ARRAY_SIZE(top_aud_divs)
+};
+
+static const struct of_device_id of_match_clk_mt8196_ck[] = {
+ { .compatible = "mediatek,mt8196-topckgen", .data = &topck_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_ck);
+
+static struct platform_driver clk_mt8196_topck_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8196-topck",
+ .of_match_table = of_match_clk_mt8196_ck,
+ },
+};
+
+MODULE_DESCRIPTION("MediaTek MT8196 top clock generators driver");
+module_platform_driver(clk_mt8196_topck_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-topckgen2.c b/drivers/clk/mediatek/clk-mt8196-topckgen2.c
new file mode 100644
index 000000000000..6df93d7fbf91
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-topckgen2.c
@@ -0,0 +1,568 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-mux.h"
+
+/* MUX SEL REG */
+#define CKSYS2_CLK_CFG_UPDATE 0x0004
+#define CKSYS2_CLK_CFG_0 0x0010
+#define CKSYS2_CLK_CFG_0_SET 0x0014
+#define CKSYS2_CLK_CFG_0_CLR 0x0018
+#define CKSYS2_CLK_CFG_1 0x0020
+#define CKSYS2_CLK_CFG_1_SET 0x0024
+#define CKSYS2_CLK_CFG_1_CLR 0x0028
+#define CKSYS2_CLK_CFG_2 0x0030
+#define CKSYS2_CLK_CFG_2_SET 0x0034
+#define CKSYS2_CLK_CFG_2_CLR 0x0038
+#define CKSYS2_CLK_CFG_3 0x0040
+#define CKSYS2_CLK_CFG_3_SET 0x0044
+#define CKSYS2_CLK_CFG_3_CLR 0x0048
+#define CKSYS2_CLK_CFG_4 0x0050
+#define CKSYS2_CLK_CFG_4_SET 0x0054
+#define CKSYS2_CLK_CFG_4_CLR 0x0058
+#define CKSYS2_CLK_CFG_5 0x0060
+#define CKSYS2_CLK_CFG_5_SET 0x0064
+#define CKSYS2_CLK_CFG_5_CLR 0x0068
+#define CKSYS2_CLK_CFG_6 0x0070
+#define CKSYS2_CLK_CFG_6_SET 0x0074
+#define CKSYS2_CLK_CFG_6_CLR 0x0078
+#define CKSYS2_CLK_FENC_STATUS_MON_0 0x0174
+
+/* MUX SHIFT */
+#define TOP_MUX_SENINF0_SHIFT 0
+#define TOP_MUX_SENINF1_SHIFT 1
+#define TOP_MUX_SENINF2_SHIFT 2
+#define TOP_MUX_SENINF3_SHIFT 3
+#define TOP_MUX_SENINF4_SHIFT 4
+#define TOP_MUX_SENINF5_SHIFT 5
+#define TOP_MUX_IMG1_SHIFT 6
+#define TOP_MUX_IPE_SHIFT 7
+#define TOP_MUX_CAM_SHIFT 8
+#define TOP_MUX_CAMTM_SHIFT 9
+#define TOP_MUX_DPE_SHIFT 10
+#define TOP_MUX_VDEC_SHIFT 11
+#define TOP_MUX_CCUSYS_SHIFT 12
+#define TOP_MUX_CCUTM_SHIFT 13
+#define TOP_MUX_VENC_SHIFT 14
+#define TOP_MUX_DVO_SHIFT 15
+#define TOP_MUX_DVO_FAVT_SHIFT 16
+#define TOP_MUX_DP1_SHIFT 17
+#define TOP_MUX_DP0_SHIFT 18
+#define TOP_MUX_DISP_SHIFT 19
+#define TOP_MUX_MDP_SHIFT 20
+#define TOP_MUX_MMINFRA_SHIFT 21
+#define TOP_MUX_MMINFRA_SNOC_SHIFT 22
+#define TOP_MUX_MMUP_SHIFT 23
+#define TOP_MUX_MMINFRA_AO_SHIFT 26
+
+/* HW Voter REG */
+#define HWV_CG_30_SET 0x0058
+#define HWV_CG_30_CLR 0x005c
+#define HWV_CG_30_DONE 0x2c2c
+
+#define MM_HWV_CG_30_SET 0x00f0
+#define MM_HWV_CG_30_CLR 0x00f4
+#define MM_HWV_CG_30_DONE 0x2c78
+#define MM_HWV_CG_31_SET 0x00f8
+#define MM_HWV_CG_31_CLR 0x00fc
+#define MM_HWV_CG_31_DONE 0x2c7c
+#define MM_HWV_CG_32_SET 0x0100
+#define MM_HWV_CG_32_CLR 0x0104
+#define MM_HWV_CG_32_DONE 0x2c80
+#define MM_HWV_CG_33_SET 0x0108
+#define MM_HWV_CG_33_CLR 0x010c
+#define MM_HWV_CG_33_DONE 0x2c84
+#define MM_HWV_CG_34_SET 0x0110
+#define MM_HWV_CG_34_CLR 0x0114
+#define MM_HWV_CG_34_DONE 0x2c88
+#define MM_HWV_CG_35_SET 0x0118
+#define MM_HWV_CG_35_CLR 0x011c
+#define MM_HWV_CG_35_DONE 0x2c8c
+#define MM_HWV_CG_36_SET 0x0120
+#define MM_HWV_CG_36_CLR 0x0124
+#define MM_HWV_CG_36_DONE 0x2c90
+#define MM_HWV_MUX_UPDATE_31_0 0x0240
+
+static const struct mtk_fixed_factor top_divs[] = {
+ FACTOR(CLK_TOP2_MAINPLL2_D2, "mainpll2_d2", "mainpll2", 1, 2),
+ FACTOR(CLK_TOP2_MAINPLL2_D3, "mainpll2_d3", "mainpll2", 1, 3),
+ FACTOR(CLK_TOP2_MAINPLL2_D4, "mainpll2_d4", "mainpll2", 1, 4),
+ FACTOR(CLK_TOP2_MAINPLL2_D4_D2, "mainpll2_d4_d2", "mainpll2", 1, 8),
+ FACTOR(CLK_TOP2_MAINPLL2_D4_D4, "mainpll2_d4_d4", "mainpll2", 1, 16),
+ FACTOR(CLK_TOP2_MAINPLL2_D5, "mainpll2_d5", "mainpll2", 1, 5),
+ FACTOR(CLK_TOP2_MAINPLL2_D5_D2, "mainpll2_d5_d2", "mainpll2", 1, 10),
+ FACTOR(CLK_TOP2_MAINPLL2_D6, "mainpll2_d6", "mainpll2", 1, 6),
+ FACTOR(CLK_TOP2_MAINPLL2_D6_D2, "mainpll2_d6_d2", "mainpll2", 1, 12),
+ FACTOR(CLK_TOP2_MAINPLL2_D7, "mainpll2_d7", "mainpll2", 1, 7),
+ FACTOR(CLK_TOP2_MAINPLL2_D7_D2, "mainpll2_d7_d2", "mainpll2", 1, 14),
+ FACTOR(CLK_TOP2_MAINPLL2_D9, "mainpll2_d9", "mainpll2", 1, 9),
+ FACTOR(CLK_TOP2_UNIVPLL2_D3, "univpll2_d3", "univpll2", 1, 3),
+ FACTOR(CLK_TOP2_UNIVPLL2_D4, "univpll2_d4", "univpll2", 1, 4),
+ FACTOR(CLK_TOP2_UNIVPLL2_D4_D2, "univpll2_d4_d2", "univpll2", 1, 8),
+ FACTOR(CLK_TOP2_UNIVPLL2_D5, "univpll2_d5", "univpll2", 1, 5),
+ FACTOR(CLK_TOP2_UNIVPLL2_D5_D2, "univpll2_d5_d2", "univpll2", 1, 10),
+ FACTOR(CLK_TOP2_UNIVPLL2_D6, "univpll2_d6", "univpll2", 1, 6),
+ FACTOR(CLK_TOP2_UNIVPLL2_D6_D2, "univpll2_d6_d2", "univpll2", 1, 12),
+ FACTOR(CLK_TOP2_UNIVPLL2_D6_D4, "univpll2_d6_d4", "univpll2", 1, 24),
+ FACTOR(CLK_TOP2_UNIVPLL2_D7, "univpll2_d7", "univpll2", 1, 7),
+ FACTOR(CLK_TOP2_IMGPLL_D2, "imgpll_d2", "imgpll", 1, 2),
+ FACTOR(CLK_TOP2_IMGPLL_D4, "imgpll_d4", "imgpll", 1, 4),
+ FACTOR(CLK_TOP2_IMGPLL_D5, "imgpll_d5", "imgpll", 1, 5),
+ FACTOR(CLK_TOP2_IMGPLL_D5_D2, "imgpll_d5_d2", "imgpll", 1, 10),
+ FACTOR(CLK_TOP2_MMPLL2_D3, "mmpll2_d3", "mmpll2", 1, 3),
+ FACTOR(CLK_TOP2_MMPLL2_D4, "mmpll2_d4", "mmpll2", 1, 4),
+ FACTOR(CLK_TOP2_MMPLL2_D4_D2, "mmpll2_d4_d2", "mmpll2", 1, 8),
+ FACTOR(CLK_TOP2_MMPLL2_D5, "mmpll2_d5", "mmpll2", 1, 5),
+ FACTOR(CLK_TOP2_MMPLL2_D5_D2, "mmpll2_d5_d2", "mmpll2", 1, 10),
+ FACTOR(CLK_TOP2_MMPLL2_D6, "mmpll2_d6", "mmpll2", 1, 6),
+ FACTOR(CLK_TOP2_MMPLL2_D6_D2, "mmpll2_d6_d2", "mmpll2", 1, 12),
+ FACTOR(CLK_TOP2_MMPLL2_D7, "mmpll2_d7", "mmpll2", 1, 7),
+ FACTOR(CLK_TOP2_MMPLL2_D9, "mmpll2_d9", "mmpll2", 1, 9),
+ FACTOR(CLK_TOP2_TVDPLL1_D4, "tvdpll1_d4", "tvdpll1", 1, 4),
+ FACTOR(CLK_TOP2_TVDPLL1_D8, "tvdpll1_d8", "tvdpll1", 1, 8),
+ FACTOR(CLK_TOP2_TVDPLL1_D16, "tvdpll1_d16", "tvdpll1", 1, 16),
+ FACTOR(CLK_TOP2_TVDPLL2_D2, "tvdpll2_d2", "tvdpll2", 1, 2),
+ FACTOR(CLK_TOP2_TVDPLL2_D4, "tvdpll2_d4", "tvdpll2", 1, 4),
+ FACTOR(CLK_TOP2_TVDPLL2_D8, "tvdpll2_d8", "tvdpll2", 1, 8),
+ FACTOR(CLK_TOP2_TVDPLL2_D16, "tvdpll2_d16", "tvdpll2", 92, 1473),
+ FACTOR(CLK_TOP2_TVDPLL3_D2, "tvdpll3_d2", "tvdpll3", 1, 2),
+ FACTOR(CLK_TOP2_TVDPLL3_D4, "tvdpll3_d4", "tvdpll3", 1, 4),
+ FACTOR(CLK_TOP2_TVDPLL3_D8, "tvdpll3_d8", "tvdpll3", 1, 8),
+ FACTOR(CLK_TOP2_TVDPLL3_D16, "tvdpll3_d16", "tvdpll3", 92, 1473),
+};
+
+static const char * const seninf_parents[] = {
+ "clk26m",
+ "ck_osc_d10",
+ "ck_osc_d8",
+ "ck_osc_d5",
+ "ck_osc_d4",
+ "univpll2_d6_d2",
+ "mainpll2_d9",
+ "ck_osc_d2",
+ "mainpll2_d4_d2",
+ "univpll2_d4_d2",
+ "mmpll2_d4_d2",
+ "univpll2_d7",
+ "mainpll2_d6",
+ "mmpll2_d7",
+ "univpll2_d6",
+ "univpll2_d5"
+};
+
+static const char * const img1_parents[] = {
+ "clk26m",
+ "ck_osc_d4",
+ "ck_osc_d3",
+ "mmpll2_d6_d2",
+ "ck_osc_d2",
+ "imgpll_d5_d2",
+ "mmpll2_d5_d2",
+ "univpll2_d4_d2",
+ "mmpll2_d4_d2",
+ "mmpll2_d7",
+ "univpll2_d6",
+ "mmpll2_d6",
+ "univpll2_d5",
+ "mmpll2_d5",
+ "univpll2_d4",
+ "imgpll_d4"
+};
+
+static const char * const ipe_parents[] = {
+ "clk26m",
+ "ck_osc_d4",
+ "ck_osc_d3",
+ "ck_osc_d2",
+ "univpll2_d6",
+ "mmpll2_d6",
+ "univpll2_d5",
+ "imgpll_d5",
+ "ck_mainpll_d4",
+ "mmpll2_d5",
+ "imgpll_d4"
+};
+
+static const char * const cam_parents[] = {
+ "clk26m",
+ "ck_osc_d10",
+ "ck_osc_d4",
+ "ck_osc_d3",
+ "ck_osc_d2",
+ "mmpll2_d5_d2",
+ "univpll2_d4_d2",
+ "univpll2_d7",
+ "mmpll2_d7",
+ "univpll2_d6",
+ "mmpll2_d6",
+ "univpll2_d5",
+ "mmpll2_d5",
+ "univpll2_d4",
+ "imgpll_d4",
+ "mmpll2_d4"
+};
+
+static const char * const camtm_parents[] = {
+ "clk26m",
+ "univpll2_d6_d4",
+ "ck_osc_d4",
+ "ck_osc_d3",
+ "univpll2_d6_d2"
+};
+
+static const char * const dpe_parents[] = {
+ "clk26m",
+ "mmpll2_d5_d2",
+ "univpll2_d4_d2",
+ "mmpll2_d7",
+ "univpll2_d6",
+ "mmpll2_d6",
+ "univpll2_d5",
+ "mmpll2_d5",
+ "imgpll_d4",
+ "mmpll2_d4"
+};
+
+static const char * const vdec_parents[] = {
+ "clk26m",
+ "ck_mainpll_d5_d2",
+ "mainpll2_d4_d4",
+ "mainpll2_d7_d2",
+ "mainpll2_d6_d2",
+ "mainpll2_d5_d2",
+ "mainpll2_d9",
+ "mainpll2_d4_d2",
+ "mainpll2_d7",
+ "mainpll2_d6",
+ "univpll2_d6",
+ "mainpll2_d5",
+ "mainpll2_d4",
+ "imgpll_d2"
+};
+
+static const char * const ccusys_parents[] = {
+ "clk26m",
+ "ck_osc_d4",
+ "ck_osc_d3",
+ "ck_osc_d2",
+ "mmpll2_d5_d2",
+ "univpll2_d4_d2",
+ "mmpll2_d7",
+ "univpll2_d6",
+ "mmpll2_d6",
+ "univpll2_d5",
+ "mainpll2_d4",
+ "mainpll2_d3",
+ "univpll2_d3"
+};
+
+static const char * const ccutm_parents[] = {
+ "clk26m",
+ "univpll2_d6_d4",
+ "ck_osc_d4",
+ "ck_osc_d3",
+ "univpll2_d6_d2"
+};
+
+static const char * const venc_parents[] = {
+ "clk26m",
+ "mainpll2_d5_d2",
+ "univpll2_d5_d2",
+ "mainpll2_d4_d2",
+ "mmpll2_d9",
+ "univpll2_d4_d2",
+ "mmpll2_d4_d2",
+ "mainpll2_d6",
+ "univpll2_d6",
+ "mainpll2_d5",
+ "mmpll2_d6",
+ "univpll2_d5",
+ "mainpll2_d4",
+ "univpll2_d4",
+ "univpll2_d3"
+};
+
+static const char * const dp1_parents[] = {
+ "clk26m",
+ "tvdpll2_d16",
+ "tvdpll2_d8",
+ "tvdpll2_d4",
+ "tvdpll2_d2"
+};
+
+static const char * const dp0_parents[] = {
+ "clk26m",
+ "tvdpll1_d16",
+ "tvdpll1_d8",
+ "tvdpll1_d4",
+ "ck_tvdpll1_d2"
+};
+
+static const char * const disp_parents[] = {
+ "clk26m",
+ "ck_mainpll_d5_d2",
+ "ck_mainpll_d4_d2",
+ "ck_mainpll_d6",
+ "mainpll2_d5",
+ "mmpll2_d6",
+ "mainpll2_d4",
+ "univpll2_d4",
+ "mainpll2_d3"
+};
+
+static const char * const mdp_parents[] = {
+ "clk26m",
+ "ck_mainpll_d5_d2",
+ "mainpll2_d5_d2",
+ "mmpll2_d6_d2",
+ "mainpll2_d9",
+ "mainpll2_d4_d2",
+ "mainpll2_d7",
+ "mainpll2_d6",
+ "mainpll2_d5",
+ "mmpll2_d6",
+ "mainpll2_d4",
+ "univpll2_d4",
+ "mainpll2_d3"
+};
+
+static const char * const mminfra_parents[] = {
+ "clk26m",
+ "ck_osc_d4",
+ "ck_mainpll_d7_d2",
+ "ck_mainpll_d5_d2",
+ "ck_mainpll_d9",
+ "mmpll2_d6_d2",
+ "mainpll2_d4_d2",
+ "ck_mainpll_d6",
+ "univpll2_d6",
+ "mainpll2_d5",
+ "mmpll2_d6",
+ "univpll2_d5",
+ "mainpll2_d4",
+ "univpll2_d4",
+ "mainpll2_d3",
+ "univpll2_d3"
+};
+
+static const char * const mminfra_snoc_parents[] = {
+ "clk26m",
+ "ck_osc_d4",
+ "ck_mainpll_d7_d2",
+ "ck_mainpll_d9",
+ "ck_mainpll_d7",
+ "ck_mainpll_d6",
+ "mmpll2_d4_d2",
+ "ck_mainpll_d5",
+ "ck_mainpll_d4",
+ "univpll2_d4",
+ "mmpll2_d4",
+ "mainpll2_d3",
+ "univpll2_d3",
+ "mmpll2_d3",
+ "mainpll2_d2"
+};
+
+static const char * const mmup_parents[] = {
+ "clk26m",
+ "mainpll2_d6",
+ "mainpll2_d5",
+ "ck_osc_d2",
+ "ck_osc",
+ "ck_mainpll_d4",
+ "univpll2_d4",
+ "mainpll2_d3"
+};
+
+static const char * const mminfra_ao_parents[] = {
+ "clk26m",
+ "ck_osc_d4",
+ "ck_mainpll_d3"
+};
+
+static const char * const dvo_parents[] = {
+ "clk26m",
+ "tvdpll3_d16",
+ "tvdpll3_d8",
+ "tvdpll3_d4",
+ "tvdpll3_d2"
+};
+
+static const char * const dvo_favt_parents[] = {
+ "clk26m",
+ "tvdpll3_d16",
+ "tvdpll3_d8",
+ "tvdpll3_d4",
+ "vlp_apll1",
+ "vlp_apll2",
+ "tvdpll3_d2"
+};
+
+static const struct mtk_mux top_muxes[] = {
+ /* CKSYS2_CLK_CFG_0 */
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_SENINF0, "seninf0", seninf_parents,
+ CKSYS2_CLK_CFG_0, CKSYS2_CLK_CFG_0_SET, CKSYS2_CLK_CFG_0_CLR,
+ MM_HWV_CG_30_DONE, MM_HWV_CG_30_SET, MM_HWV_CG_30_CLR,
+ 0, 4, 7, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_SENINF0_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 31),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_SENINF1, "seninf1", seninf_parents,
+ CKSYS2_CLK_CFG_0, CKSYS2_CLK_CFG_0_SET, CKSYS2_CLK_CFG_0_CLR,
+ MM_HWV_CG_30_DONE, MM_HWV_CG_30_SET, MM_HWV_CG_30_CLR,
+ 8, 4, 15, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_SENINF1_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 30),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_SENINF2, "seninf2", seninf_parents,
+ CKSYS2_CLK_CFG_0, CKSYS2_CLK_CFG_0_SET, CKSYS2_CLK_CFG_0_CLR,
+ MM_HWV_CG_30_DONE, MM_HWV_CG_30_SET, MM_HWV_CG_30_CLR,
+ 16, 4, 23, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_SENINF2_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 29),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_SENINF3, "seninf3", seninf_parents,
+ CKSYS2_CLK_CFG_0, CKSYS2_CLK_CFG_0_SET, CKSYS2_CLK_CFG_0_CLR,
+ MM_HWV_CG_30_DONE, MM_HWV_CG_30_SET, MM_HWV_CG_30_CLR,
+ 24, 4, 31, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_SENINF3_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 28),
+ /* CKSYS2_CLK_CFG_1 */
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_SENINF4, "seninf4", seninf_parents,
+ CKSYS2_CLK_CFG_1, CKSYS2_CLK_CFG_1_SET, CKSYS2_CLK_CFG_1_CLR,
+ MM_HWV_CG_31_DONE, MM_HWV_CG_31_SET, MM_HWV_CG_31_CLR,
+ 0, 4, 7, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_SENINF4_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 27),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_SENINF5, "seninf5", seninf_parents,
+ CKSYS2_CLK_CFG_1, CKSYS2_CLK_CFG_1_SET, CKSYS2_CLK_CFG_1_CLR,
+ MM_HWV_CG_31_DONE, MM_HWV_CG_31_SET, MM_HWV_CG_31_CLR,
+ 8, 4, 15, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_SENINF5_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 26),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_IMG1, "img1", img1_parents,
+ CKSYS2_CLK_CFG_1, CKSYS2_CLK_CFG_1_SET, CKSYS2_CLK_CFG_1_CLR,
+ MM_HWV_CG_31_DONE, MM_HWV_CG_31_SET, MM_HWV_CG_31_CLR,
+ 16, 4, 23, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_IMG1_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 25),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_IPE, "ipe", ipe_parents,
+ CKSYS2_CLK_CFG_1, CKSYS2_CLK_CFG_1_SET, CKSYS2_CLK_CFG_1_CLR,
+ MM_HWV_CG_31_DONE, MM_HWV_CG_31_SET, MM_HWV_CG_31_CLR,
+ 24, 4, 31, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_IPE_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 24),
+ /* CKSYS2_CLK_CFG_2 */
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_CAM, "cam", cam_parents,
+ CKSYS2_CLK_CFG_2, CKSYS2_CLK_CFG_2_SET, CKSYS2_CLK_CFG_2_CLR,
+ MM_HWV_CG_32_DONE, MM_HWV_CG_32_SET, MM_HWV_CG_32_CLR,
+ 0, 4, 7, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_CAM_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 23),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_CAMTM, "camtm", camtm_parents,
+ CKSYS2_CLK_CFG_2, CKSYS2_CLK_CFG_2_SET, CKSYS2_CLK_CFG_2_CLR,
+ MM_HWV_CG_32_DONE, MM_HWV_CG_32_SET, MM_HWV_CG_32_CLR,
+ 8, 3, 15, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_CAMTM_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 22),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_DPE, "dpe", dpe_parents,
+ CKSYS2_CLK_CFG_2, CKSYS2_CLK_CFG_2_SET, CKSYS2_CLK_CFG_2_CLR,
+ MM_HWV_CG_32_DONE, MM_HWV_CG_32_SET, MM_HWV_CG_32_CLR,
+ 16, 4, 23, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_DPE_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 21),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_VDEC, "vdec", vdec_parents,
+ CKSYS2_CLK_CFG_2, CKSYS2_CLK_CFG_2_SET, CKSYS2_CLK_CFG_2_CLR,
+ MM_HWV_CG_32_DONE, MM_HWV_CG_32_SET, MM_HWV_CG_32_CLR,
+ 24, 4, 31, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_VDEC_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 20),
+ /* CKSYS2_CLK_CFG_3 */
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_CCUSYS, "ccusys", ccusys_parents,
+ CKSYS2_CLK_CFG_3, CKSYS2_CLK_CFG_3_SET, CKSYS2_CLK_CFG_3_CLR,
+ MM_HWV_CG_33_DONE, MM_HWV_CG_33_SET, MM_HWV_CG_33_CLR,
+ 0, 4, 7, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_CCUSYS_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 19),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_CCUTM, "ccutm", ccutm_parents,
+ CKSYS2_CLK_CFG_3, CKSYS2_CLK_CFG_3_SET, CKSYS2_CLK_CFG_3_CLR,
+ MM_HWV_CG_33_DONE, MM_HWV_CG_33_SET, MM_HWV_CG_33_CLR,
+ 8, 3, 15, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_CCUTM_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 18),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_VENC, "venc", venc_parents,
+ CKSYS2_CLK_CFG_3, CKSYS2_CLK_CFG_3_SET, CKSYS2_CLK_CFG_3_CLR,
+ MM_HWV_CG_33_DONE, MM_HWV_CG_33_SET, MM_HWV_CG_33_CLR,
+ 16, 4, 23, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_VENC_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 17),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP2_DVO, "dvo", dvo_parents,
+ CKSYS2_CLK_CFG_3, CKSYS2_CLK_CFG_3_SET, CKSYS2_CLK_CFG_3_CLR,
+ 24, 3, 31, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_DVO_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 16),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP2_DVO_FAVT, "dvo_favt", dvo_favt_parents,
+ CKSYS2_CLK_CFG_4, CKSYS2_CLK_CFG_4_SET, CKSYS2_CLK_CFG_4_CLR,
+ 0, 3, 7, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_DVO_FAVT_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 15),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP2_DP1, "dp1", dp1_parents,
+ CKSYS2_CLK_CFG_4, CKSYS2_CLK_CFG_4_SET, CKSYS2_CLK_CFG_4_CLR,
+ 8, 3, 15, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_DP1_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 14),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP2_DP0, "dp0", dp0_parents,
+ CKSYS2_CLK_CFG_4, CKSYS2_CLK_CFG_4_SET, CKSYS2_CLK_CFG_4_CLR,
+ 16, 3, 23, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_DP0_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 13),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_DISP, "disp", disp_parents,
+ CKSYS2_CLK_CFG_4, CKSYS2_CLK_CFG_4_SET, CKSYS2_CLK_CFG_4_CLR,
+ MM_HWV_CG_34_DONE, MM_HWV_CG_34_SET, MM_HWV_CG_34_CLR,
+ 24, 4, 31, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_DISP_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 12),
+ /* CKSYS2_CLK_CFG_5 */
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_MDP, "mdp", mdp_parents,
+ CKSYS2_CLK_CFG_5, CKSYS2_CLK_CFG_5_SET, CKSYS2_CLK_CFG_5_CLR,
+ MM_HWV_CG_35_DONE, MM_HWV_CG_35_SET, MM_HWV_CG_35_CLR,
+ 0, 4, 7, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_MDP_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 11),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_MMINFRA, "mminfra", mminfra_parents,
+ CKSYS2_CLK_CFG_5, CKSYS2_CLK_CFG_5_SET, CKSYS2_CLK_CFG_5_CLR,
+ MM_HWV_CG_35_DONE, MM_HWV_CG_35_SET, MM_HWV_CG_35_CLR,
+ 8, 4, 15, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_MMINFRA_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 10),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_MMINFRA_SNOC, "mminfra_snoc", mminfra_snoc_parents,
+ CKSYS2_CLK_CFG_5, CKSYS2_CLK_CFG_5_SET, CKSYS2_CLK_CFG_5_CLR,
+ MM_HWV_CG_35_DONE, MM_HWV_CG_35_SET, MM_HWV_CG_35_CLR,
+ 16, 4, 23, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_MMINFRA_SNOC_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 9),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP2_MMUP, "mmup", mmup_parents,
+ CKSYS2_CLK_CFG_5, CKSYS2_CLK_CFG_5_SET, CKSYS2_CLK_CFG_5_CLR,
+ 24, 3, 31, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_MMUP_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 8),
+ /* CKSYS2_CLK_CFG_6 */
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_MMINFRA_AO, "mminfra_ao", mminfra_ao_parents,
+ CKSYS2_CLK_CFG_6, CKSYS2_CLK_CFG_6_SET, CKSYS2_CLK_CFG_6_CLR,
+ MM_HWV_CG_36_DONE, MM_HWV_CG_36_SET, MM_HWV_CG_36_CLR,
+ 16, 2, 7, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_MMINFRA_AO_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 5),
+};
+
+static const struct mtk_clk_desc topck_desc = {
+ .factor_clks = top_divs,
+ .num_factor_clks = ARRAY_SIZE(top_divs),
+ .mux_clks = top_muxes,
+ .num_mux_clks = ARRAY_SIZE(top_muxes),
+};
+
+static const struct of_device_id of_match_clk_mt8196_ck[] = {
+ { .compatible = "mediatek,mt8196-topckgen-gp2", .data = &topck_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_ck);
+
+static struct platform_driver clk_mt8196_topck_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8196-topck2",
+ .of_match_table = of_match_clk_mt8196_ck,
+ },
+};
+
+MODULE_DESCRIPTION("MediaTek MT8196 GP2 top clock generators driver");
+module_platform_driver(clk_mt8196_topck_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-ufs_ao.c b/drivers/clk/mediatek/clk-mt8196-ufs_ao.c
new file mode 100644
index 000000000000..0c04717b7b4b
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-ufs_ao.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+#include <dt-bindings/reset/mediatek,mt8196-resets.h>
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#define MT8196_UFSAO_RST0_SET_OFFSET 0x48
+#define MT8196_UFSAO_RST1_SET_OFFSET 0x148
+
+static const struct mtk_gate_regs ufsao0_cg_regs = {
+ .set_ofs = 0x108,
+ .clr_ofs = 0x10c,
+ .sta_ofs = 0x104,
+};
+
+static const struct mtk_gate_regs ufsao1_cg_regs = {
+ .set_ofs = 0x8,
+ .clr_ofs = 0xc,
+ .sta_ofs = 0x4,
+};
+
+#define GATE_UFSAO0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ufsao0_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_UFSAO1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ufsao1_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+static const struct mtk_gate ufsao_clks[] = {
+ /* UFSAO0 */
+ GATE_UFSAO0(CLK_UFSAO_UFSHCI_UFS, "ufsao_ufshci_ufs", "ufs", 0),
+ GATE_UFSAO0(CLK_UFSAO_UFSHCI_AES, "ufsao_ufshci_aes", "aes_ufsfde", 1),
+ /* UFSAO1 */
+ GATE_UFSAO1(CLK_UFSAO_UNIPRO_TX_SYM, "ufsao_unipro_tx_sym", "clk26m", 0),
+ GATE_UFSAO1(CLK_UFSAO_UNIPRO_RX_SYM0, "ufsao_unipro_rx_sym0", "clk26m", 1),
+ GATE_UFSAO1(CLK_UFSAO_UNIPRO_RX_SYM1, "ufsao_unipro_rx_sym1", "clk26m", 2),
+ GATE_UFSAO1(CLK_UFSAO_UNIPRO_SYS, "ufsao_unipro_sys", "ufs", 3),
+ GATE_UFSAO1(CLK_UFSAO_UNIPRO_SAP, "ufsao_unipro_sap", "clk26m", 4),
+ GATE_UFSAO1(CLK_UFSAO_PHY_SAP, "ufsao_phy_sap", "clk26m", 8),
+};
+
+static u16 ufsao_rst_ofs[] = {
+ MT8196_UFSAO_RST0_SET_OFFSET,
+ MT8196_UFSAO_RST1_SET_OFFSET
+};
+
+static u16 ufsao_rst_idx_map[] = {
+ [MT8196_UFSAO_RST0_UFS_MPHY] = 8,
+ [MT8196_UFSAO_RST1_UFS_UNIPRO] = 1 * RST_NR_PER_BANK + 0,
+ [MT8196_UFSAO_RST1_UFS_CRYPTO] = 1 * RST_NR_PER_BANK + 1,
+ [MT8196_UFSAO_RST1_UFSHCI] = 1 * RST_NR_PER_BANK + 2,
+};
+
+static const struct mtk_clk_rst_desc ufsao_rst_desc = {
+ .version = MTK_RST_SET_CLR,
+ .rst_bank_ofs = ufsao_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(ufsao_rst_ofs),
+ .rst_idx_map = ufsao_rst_idx_map,
+ .rst_idx_map_nr = ARRAY_SIZE(ufsao_rst_idx_map),
+};
+
+static const struct mtk_clk_desc ufsao_mcd = {
+ .clks = ufsao_clks,
+ .num_clks = ARRAY_SIZE(ufsao_clks),
+ .rst_desc = &ufsao_rst_desc,
+};
+
+static const struct of_device_id of_match_clk_mt8196_ufs_ao[] = {
+ { .compatible = "mediatek,mt8196-ufscfg-ao", .data = &ufsao_mcd },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_ufs_ao);
+
+static struct platform_driver clk_mt8196_ufs_ao_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8196-ufs-ao",
+ .of_match_table = of_match_clk_mt8196_ufs_ao,
+ },
+};
+
+module_platform_driver(clk_mt8196_ufs_ao_drv);
+MODULE_DESCRIPTION("MediaTek MT8196 ufs_ao clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-vdec.c b/drivers/clk/mediatek/clk-mt8196-vdec.c
new file mode 100644
index 000000000000..f8dcd84a2b58
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-vdec.c
@@ -0,0 +1,253 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs vde20_cg_regs = {
+ .set_ofs = 0x0,
+ .clr_ofs = 0x4,
+ .sta_ofs = 0x0,
+};
+
+static const struct mtk_gate_regs vde20_hwv_regs = {
+ .set_ofs = 0x0088,
+ .clr_ofs = 0x008c,
+ .sta_ofs = 0x2c44,
+};
+
+static const struct mtk_gate_regs vde21_cg_regs = {
+ .set_ofs = 0x200,
+ .clr_ofs = 0x204,
+ .sta_ofs = 0x200,
+};
+
+static const struct mtk_gate_regs vde21_hwv_regs = {
+ .set_ofs = 0x0080,
+ .clr_ofs = 0x0084,
+ .sta_ofs = 0x2c40,
+};
+
+static const struct mtk_gate_regs vde22_cg_regs = {
+ .set_ofs = 0x8,
+ .clr_ofs = 0xc,
+ .sta_ofs = 0x8,
+};
+
+static const struct mtk_gate_regs vde22_hwv_regs = {
+ .set_ofs = 0x0078,
+ .clr_ofs = 0x007c,
+ .sta_ofs = 0x2c3c,
+};
+
+#define GATE_HWV_VDE20(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &vde20_cg_regs, \
+ .hwv_regs = &vde20_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr_inv,\
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+#define GATE_HWV_VDE21(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &vde21_cg_regs, \
+ .hwv_regs = &vde21_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr_inv,\
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+#define GATE_HWV_VDE22(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &vde22_cg_regs, \
+ .hwv_regs = &vde22_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr_inv,\
+ .flags = CLK_OPS_PARENT_ENABLE | \
+ CLK_IGNORE_UNUSED, \
+ }
+
+static const struct mtk_gate vde2_clks[] = {
+ /* VDE20 */
+ GATE_HWV_VDE20(CLK_VDE2_VDEC_CKEN, "vde2_vdec_cken", "vdec", 0),
+ GATE_HWV_VDE20(CLK_VDE2_VDEC_ACTIVE, "vde2_vdec_active", "vdec", 4),
+ GATE_HWV_VDE20(CLK_VDE2_VDEC_CKEN_ENG, "vde2_vdec_cken_eng", "vdec", 8),
+ /* VDE21 */
+ GATE_HWV_VDE21(CLK_VDE2_LAT_CKEN, "vde2_lat_cken", "vdec", 0),
+ GATE_HWV_VDE21(CLK_VDE2_LAT_ACTIVE, "vde2_lat_active", "vdec", 4),
+ GATE_HWV_VDE21(CLK_VDE2_LAT_CKEN_ENG, "vde2_lat_cken_eng", "vdec", 8),
+ /* VDE22 */
+ GATE_HWV_VDE22(CLK_VDE2_LARB1_CKEN, "vde2_larb1_cken", "vdec", 0),
+};
+
+static const struct mtk_clk_desc vde2_mcd = {
+ .clks = vde2_clks,
+ .num_clks = ARRAY_SIZE(vde2_clks),
+ .need_runtime_pm = true,
+};
+
+static const struct mtk_gate_regs vde10_hwv_regs = {
+ .set_ofs = 0x00a0,
+ .clr_ofs = 0x00a4,
+ .sta_ofs = 0x2c50,
+};
+
+static const struct mtk_gate_regs vde11_cg_regs = {
+ .set_ofs = 0x1e0,
+ .clr_ofs = 0x1e0,
+ .sta_ofs = 0x1e0,
+};
+
+static const struct mtk_gate_regs vde11_hwv_regs = {
+ .set_ofs = 0x00b0,
+ .clr_ofs = 0x00b4,
+ .sta_ofs = 0x2c58,
+};
+
+static const struct mtk_gate_regs vde12_cg_regs = {
+ .set_ofs = 0x1ec,
+ .clr_ofs = 0x1ec,
+ .sta_ofs = 0x1ec,
+};
+
+static const struct mtk_gate_regs vde12_hwv_regs = {
+ .set_ofs = 0x00a8,
+ .clr_ofs = 0x00ac,
+ .sta_ofs = 0x2c54,
+};
+
+static const struct mtk_gate_regs vde13_cg_regs = {
+ .set_ofs = 0x200,
+ .clr_ofs = 0x204,
+ .sta_ofs = 0x200,
+};
+
+static const struct mtk_gate_regs vde13_hwv_regs = {
+ .set_ofs = 0x0098,
+ .clr_ofs = 0x009c,
+ .sta_ofs = 0x2c4c,
+};
+
+static const struct mtk_gate_regs vde14_hwv_regs = {
+ .set_ofs = 0x0090,
+ .clr_ofs = 0x0094,
+ .sta_ofs = 0x2c48,
+};
+
+#define GATE_HWV_VDE10(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &vde20_cg_regs, \
+ .hwv_regs = &vde10_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr_inv,\
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+#define GATE_HWV_VDE11(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &vde11_cg_regs, \
+ .hwv_regs = &vde11_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr_inv, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+#define GATE_HWV_VDE12(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &vde12_cg_regs, \
+ .hwv_regs = &vde12_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr_inv, \
+ .flags = CLK_OPS_PARENT_ENABLE \
+ }
+
+#define GATE_HWV_VDE13(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &vde13_cg_regs, \
+ .hwv_regs = &vde13_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr_inv,\
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+#define GATE_HWV_VDE14(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &vde22_cg_regs, \
+ .hwv_regs = &vde14_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr_inv,\
+ .flags = CLK_OPS_PARENT_ENABLE | \
+ CLK_IGNORE_UNUSED, \
+ }
+
+static const struct mtk_gate vde1_clks[] = {
+ /* VDE10 */
+ GATE_HWV_VDE10(CLK_VDE1_VDEC_CKEN, "vde1_vdec_cken", "vdec", 0),
+ GATE_HWV_VDE10(CLK_VDE1_VDEC_ACTIVE, "vde1_vdec_active", "vdec", 4),
+ GATE_HWV_VDE10(CLK_VDE1_VDEC_CKEN_ENG, "vde1_vdec_cken_eng", "vdec", 8),
+ /* VDE11 */
+ GATE_HWV_VDE11(CLK_VDE1_VDEC_SOC_IPS_EN, "vde1_vdec_soc_ips_en", "vdec", 0),
+ /* VDE12 */
+ GATE_HWV_VDE12(CLK_VDE1_VDEC_SOC_APTV_EN, "vde1_aptv_en", "ck_tck_26m_mx9_ck", 0),
+ GATE_HWV_VDE12(CLK_VDE1_VDEC_SOC_APTV_TOP_EN, "vde1_aptv_topen", "ck_tck_26m_mx9_ck", 1),
+ /* VDE13 */
+ GATE_HWV_VDE13(CLK_VDE1_LAT_CKEN, "vde1_lat_cken", "vdec", 0),
+ GATE_HWV_VDE13(CLK_VDE1_LAT_ACTIVE, "vde1_lat_active", "vdec", 4),
+ GATE_HWV_VDE13(CLK_VDE1_LAT_CKEN_ENG, "vde1_lat_cken_eng", "vdec", 8),
+ /* VDE14 */
+ GATE_HWV_VDE14(CLK_VDE1_LARB1_CKEN, "vde1_larb1_cken", "vdec", 0),
+};
+
+static const struct mtk_clk_desc vde1_mcd = {
+ .clks = vde1_clks,
+ .num_clks = ARRAY_SIZE(vde1_clks),
+ .need_runtime_pm = true,
+};
+
+static const struct of_device_id of_match_clk_mt8196_vdec[] = {
+ { .compatible = "mediatek,mt8196-vdecsys", .data = &vde2_mcd },
+ { .compatible = "mediatek,mt8196-vdecsys-soc", .data = &vde1_mcd },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_vdec);
+
+static struct platform_driver clk_mt8196_vdec_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8196-vdec",
+ .of_match_table = of_match_clk_mt8196_vdec,
+ },
+};
+module_platform_driver(clk_mt8196_vdec_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8196 Video Decoders clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-vdisp_ao.c b/drivers/clk/mediatek/clk-mt8196-vdisp_ao.c
new file mode 100644
index 000000000000..fddb69d1c3eb
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-vdisp_ao.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs mm_v_cg_regs = {
+ .set_ofs = 0x104,
+ .clr_ofs = 0x108,
+ .sta_ofs = 0x100,
+};
+
+static const struct mtk_gate_regs mm_v_hwv_regs = {
+ .set_ofs = 0x0030,
+ .clr_ofs = 0x0034,
+ .sta_ofs = 0x2c18,
+};
+
+#define GATE_MM_AO_V(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm_v_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ .flags = CLK_OPS_PARENT_ENABLE | \
+ CLK_IS_CRITICAL, \
+ }
+
+#define GATE_HWV_MM_V(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm_v_cg_regs, \
+ .hwv_regs = &mm_v_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+static const struct mtk_gate mm_v_clks[] = {
+ GATE_HWV_MM_V(CLK_MM_V_DISP_VDISP_AO_CONFIG, "mm_v_disp_vdisp_ao_config", "disp", 0),
+ GATE_HWV_MM_V(CLK_MM_V_DISP_DPC, "mm_v_disp_dpc", "disp", 16),
+ GATE_MM_AO_V(CLK_MM_V_SMI_SUB_SOMM0, "mm_v_smi_sub_somm0", "disp", 2),
+};
+
+static const struct mtk_clk_desc mm_v_mcd = {
+ .clks = mm_v_clks,
+ .num_clks = ARRAY_SIZE(mm_v_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8196_vdisp_ao[] = {
+ { .compatible = "mediatek,mt8196-vdisp-ao", .data = &mm_v_mcd },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_vdisp_ao);
+
+static struct platform_driver clk_mt8196_vdisp_ao_drv = {
+ .probe = mtk_clk_pdev_probe,
+ .remove = mtk_clk_pdev_remove,
+ .driver = {
+ .name = "clk-mt8196-vdisp-ao",
+ .of_match_table = of_match_clk_mt8196_vdisp_ao,
+ },
+};
+module_platform_driver(clk_mt8196_vdisp_ao_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8196 vdisp_ao clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-venc.c b/drivers/clk/mediatek/clk-mt8196-venc.c
new file mode 100644
index 000000000000..13e2e36e945f
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-venc.c
@@ -0,0 +1,236 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs ven10_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+static const struct mtk_gate_regs ven10_hwv_regs = {
+ .set_ofs = 0x00b8,
+ .clr_ofs = 0x00bc,
+ .sta_ofs = 0x2c5c,
+};
+
+static const struct mtk_gate_regs ven11_cg_regs = {
+ .set_ofs = 0x10,
+ .clr_ofs = 0x14,
+ .sta_ofs = 0x10,
+};
+
+static const struct mtk_gate_regs ven11_hwv_regs = {
+ .set_ofs = 0x00c0,
+ .clr_ofs = 0x00c4,
+ .sta_ofs = 0x2c60,
+};
+
+#define GATE_VEN10(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ven10_cg_regs, \
+ .shift = _shift, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ .ops = &mtk_clk_gate_ops_setclr_inv, \
+ }
+
+#define GATE_HWV_VEN10_FLAGS(_id, _name, _parent, _shift, _flags) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ven10_cg_regs, \
+ .hwv_regs = &ven10_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr_inv, \
+ .flags = (_flags) | \
+ CLK_OPS_PARENT_ENABLE, \
+ }
+
+#define GATE_HWV_VEN10(_id, _name, _parent, _shift) \
+ GATE_HWV_VEN10_FLAGS(_id, _name, _parent, _shift, 0)
+
+#define GATE_HWV_VEN11(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ven11_cg_regs, \
+ .hwv_regs = &ven11_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr_inv,\
+ .flags = CLK_OPS_PARENT_ENABLE \
+ }
+
+static const struct mtk_gate ven1_clks[] = {
+ /* VEN10 */
+ GATE_HWV_VEN10(CLK_VEN1_CKE0_LARB, "ven1_larb", "venc", 0),
+ GATE_HWV_VEN10(CLK_VEN1_CKE1_VENC, "ven1_venc", "venc", 4),
+ GATE_VEN10(CLK_VEN1_CKE2_JPGENC, "ven1_jpgenc", "venc", 8),
+ GATE_VEN10(CLK_VEN1_CKE3_JPGDEC, "ven1_jpgdec", "venc", 12),
+ GATE_VEN10(CLK_VEN1_CKE4_JPGDEC_C1, "ven1_jpgdec_c1", "venc", 16),
+ GATE_HWV_VEN10(CLK_VEN1_CKE5_GALS, "ven1_gals", "venc", 28),
+ GATE_HWV_VEN10(CLK_VEN1_CKE29_VENC_ADAB_CTRL, "ven1_venc_adab_ctrl",
+ "venc", 29),
+ GATE_HWV_VEN10_FLAGS(CLK_VEN1_CKE29_VENC_XPC_CTRL,
+ "ven1_venc_xpc_ctrl", "venc", 30,
+ CLK_IGNORE_UNUSED),
+ GATE_HWV_VEN10(CLK_VEN1_CKE6_GALS_SRAM, "ven1_gals_sram", "venc", 31),
+ /* VEN11 */
+ GATE_HWV_VEN11(CLK_VEN1_RES_FLAT, "ven1_res_flat", "venc", 0),
+};
+
+static const struct mtk_clk_desc ven1_mcd = {
+ .clks = ven1_clks,
+ .num_clks = ARRAY_SIZE(ven1_clks),
+ .need_runtime_pm = true,
+};
+
+static const struct mtk_gate_regs ven20_hwv_regs = {
+ .set_ofs = 0x00c8,
+ .clr_ofs = 0x00cc,
+ .sta_ofs = 0x2c64,
+};
+
+static const struct mtk_gate_regs ven21_hwv_regs = {
+ .set_ofs = 0x00d0,
+ .clr_ofs = 0x00d4,
+ .sta_ofs = 0x2c68,
+};
+
+#define GATE_VEN20(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ven10_cg_regs, \
+ .shift = _shift, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ .ops = &mtk_clk_gate_ops_setclr_inv, \
+ }
+
+#define GATE_HWV_VEN20(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ven10_cg_regs, \
+ .hwv_regs = &ven20_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr_inv,\
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+#define GATE_HWV_VEN21(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ven11_cg_regs, \
+ .hwv_regs = &ven21_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ .flags = CLK_OPS_PARENT_ENABLE \
+ }
+
+static const struct mtk_gate ven2_clks[] = {
+ /* VEN20 */
+ GATE_HWV_VEN20(CLK_VEN2_CKE0_LARB, "ven2_larb", "venc", 0),
+ GATE_HWV_VEN20(CLK_VEN2_CKE1_VENC, "ven2_venc", "venc", 4),
+ GATE_VEN20(CLK_VEN2_CKE2_JPGENC, "ven2_jpgenc", "venc", 8),
+ GATE_VEN20(CLK_VEN2_CKE3_JPGDEC, "ven2_jpgdec", "venc", 12),
+ GATE_HWV_VEN20(CLK_VEN2_CKE5_GALS, "ven2_gals", "venc", 28),
+ GATE_HWV_VEN20(CLK_VEN2_CKE29_VENC_XPC_CTRL, "ven2_venc_xpc_ctrl", "venc", 30),
+ GATE_HWV_VEN20(CLK_VEN2_CKE6_GALS_SRAM, "ven2_gals_sram", "venc", 31),
+ /* VEN21 */
+ GATE_HWV_VEN21(CLK_VEN2_RES_FLAT, "ven2_res_flat", "venc", 0),
+};
+
+static const struct mtk_clk_desc ven2_mcd = {
+ .clks = ven2_clks,
+ .num_clks = ARRAY_SIZE(ven2_clks),
+ .need_runtime_pm = true,
+};
+
+static const struct mtk_gate_regs ven_c20_hwv_regs = {
+ .set_ofs = 0x00d8,
+ .clr_ofs = 0x00dc,
+ .sta_ofs = 0x2c6c,
+};
+
+static const struct mtk_gate_regs ven_c21_hwv_regs = {
+ .set_ofs = 0x00e0,
+ .clr_ofs = 0x00e4,
+ .sta_ofs = 0x2c70,
+};
+
+#define GATE_HWV_VEN_C20(_id, _name, _parent, _shift) {\
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ven10_cg_regs, \
+ .hwv_regs = &ven_c20_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr_inv,\
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+#define GATE_HWV_VEN_C21(_id, _name, _parent, _shift) {\
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ven11_cg_regs, \
+ .hwv_regs = &ven_c21_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+static const struct mtk_gate ven_c2_clks[] = {
+ /* VEN_C20 */
+ GATE_HWV_VEN_C20(CLK_VEN_C2_CKE0_LARB, "ven_c2_larb", "venc", 0),
+ GATE_HWV_VEN_C20(CLK_VEN_C2_CKE1_VENC, "ven_c2_venc", "venc", 4),
+ GATE_HWV_VEN_C20(CLK_VEN_C2_CKE5_GALS, "ven_c2_gals", "venc", 28),
+ GATE_HWV_VEN_C20(CLK_VEN_C2_CKE29_VENC_XPC_CTRL, "ven_c2_venc_xpc_ctrl",
+ "venc", 30),
+ GATE_HWV_VEN_C20(CLK_VEN_C2_CKE6_GALS_SRAM, "ven_c2_gals_sram", "venc", 31),
+ /* VEN_C21 */
+ GATE_HWV_VEN_C21(CLK_VEN_C2_RES_FLAT, "ven_c2_res_flat", "venc", 0),
+};
+
+static const struct mtk_clk_desc ven_c2_mcd = {
+ .clks = ven_c2_clks,
+ .num_clks = ARRAY_SIZE(ven_c2_clks),
+ .need_runtime_pm = true,
+};
+
+static const struct of_device_id of_match_clk_mt8196_venc[] = {
+ { .compatible = "mediatek,mt8196-vencsys", .data = &ven1_mcd },
+ { .compatible = "mediatek,mt8196-vencsys-c1", .data = &ven2_mcd },
+ { .compatible = "mediatek,mt8196-vencsys-c2", .data = &ven_c2_mcd },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_venc);
+
+static struct platform_driver clk_mt8196_venc_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8196-venc",
+ .of_match_table = of_match_clk_mt8196_venc,
+ },
+};
+module_platform_driver(clk_mt8196_venc_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8196 Video Encoders clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-vlpckgen.c b/drivers/clk/mediatek/clk-mt8196-vlpckgen.c
new file mode 100644
index 000000000000..d59a8a9d9855
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-vlpckgen.c
@@ -0,0 +1,725 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "clk-mtk.h"
+#include "clk-mux.h"
+#include "clk-pll.h"
+
+/* MUX SEL REG */
+#define VLP_CLK_CFG_UPDATE 0x0004
+#define VLP_CLK_CFG_UPDATE1 0x0008
+#define VLP_CLK_CFG_0 0x0010
+#define VLP_CLK_CFG_0_SET 0x0014
+#define VLP_CLK_CFG_0_CLR 0x0018
+#define VLP_CLK_CFG_1 0x0020
+#define VLP_CLK_CFG_1_SET 0x0024
+#define VLP_CLK_CFG_1_CLR 0x0028
+#define VLP_CLK_CFG_2 0x0030
+#define VLP_CLK_CFG_2_SET 0x0034
+#define VLP_CLK_CFG_2_CLR 0x0038
+#define VLP_CLK_CFG_3 0x0040
+#define VLP_CLK_CFG_3_SET 0x0044
+#define VLP_CLK_CFG_3_CLR 0x0048
+#define VLP_CLK_CFG_4 0x0050
+#define VLP_CLK_CFG_4_SET 0x0054
+#define VLP_CLK_CFG_4_CLR 0x0058
+#define VLP_CLK_CFG_5 0x0060
+#define VLP_CLK_CFG_5_SET 0x0064
+#define VLP_CLK_CFG_5_CLR 0x0068
+#define VLP_CLK_CFG_6 0x0070
+#define VLP_CLK_CFG_6_SET 0x0074
+#define VLP_CLK_CFG_6_CLR 0x0078
+#define VLP_CLK_CFG_7 0x0080
+#define VLP_CLK_CFG_7_SET 0x0084
+#define VLP_CLK_CFG_7_CLR 0x0088
+#define VLP_CLK_CFG_8 0x0090
+#define VLP_CLK_CFG_8_SET 0x0094
+#define VLP_CLK_CFG_8_CLR 0x0098
+#define VLP_CLK_CFG_9 0x00a0
+#define VLP_CLK_CFG_9_SET 0x00a4
+#define VLP_CLK_CFG_9_CLR 0x00a8
+#define VLP_CLK_CFG_10 0x00b0
+#define VLP_CLK_CFG_10_SET 0x00b4
+#define VLP_CLK_CFG_10_CLR 0x00b8
+#define VLP_OCIC_FENC_STATUS_MON_0 0x039c
+#define VLP_OCIC_FENC_STATUS_MON_1 0x03a0
+
+/* MUX SHIFT */
+#define TOP_MUX_SCP_SHIFT 0
+#define TOP_MUX_SCP_SPI_SHIFT 1
+#define TOP_MUX_SCP_IIC_SHIFT 2
+#define TOP_MUX_SCP_IIC_HS_SHIFT 3
+#define TOP_MUX_PWRAP_ULPOSC_SHIFT 4
+#define TOP_MUX_SPMI_M_TIA_32K_SHIFT 5
+#define TOP_MUX_APXGPT_26M_B_SHIFT 6
+#define TOP_MUX_DPSW_SHIFT 7
+#define TOP_MUX_DPSW_CENTRAL_SHIFT 8
+#define TOP_MUX_SPMI_M_MST_SHIFT 9
+#define TOP_MUX_DVFSRC_SHIFT 10
+#define TOP_MUX_PWM_VLP_SHIFT 11
+#define TOP_MUX_AXI_VLP_SHIFT 12
+#define TOP_MUX_SYSTIMER_26M_SHIFT 13
+#define TOP_MUX_SSPM_SHIFT 14
+#define TOP_MUX_SRCK_SHIFT 15
+#define TOP_MUX_CAMTG0_SHIFT 16
+#define TOP_MUX_CAMTG1_SHIFT 17
+#define TOP_MUX_CAMTG2_SHIFT 18
+#define TOP_MUX_CAMTG3_SHIFT 19
+#define TOP_MUX_CAMTG4_SHIFT 20
+#define TOP_MUX_CAMTG5_SHIFT 21
+#define TOP_MUX_CAMTG6_SHIFT 22
+#define TOP_MUX_CAMTG7_SHIFT 23
+#define TOP_MUX_SSPM_26M_SHIFT 25
+#define TOP_MUX_ULPOSC_SSPM_SHIFT 26
+#define TOP_MUX_VLP_PBUS_26M_SHIFT 27
+#define TOP_MUX_DEBUG_ERR_FLAG_VLP_26M_SHIFT 28
+#define TOP_MUX_DPMSRDMA_SHIFT 29
+#define TOP_MUX_VLP_PBUS_156M_SHIFT 30
+#define TOP_MUX_SPM_SHIFT 0
+#define TOP_MUX_MMINFRA_VLP_SHIFT 1
+#define TOP_MUX_USB_TOP_SHIFT 2
+#define TOP_MUX_SSUSB_XHCI_SHIFT 3
+#define TOP_MUX_NOC_VLP_SHIFT 4
+#define TOP_MUX_AUDIO_H_SHIFT 5
+#define TOP_MUX_AUD_ENGEN1_SHIFT 6
+#define TOP_MUX_AUD_ENGEN2_SHIFT 7
+#define TOP_MUX_AUD_INTBUS_SHIFT 8
+#define TOP_MUX_SPU_VLP_26M_SHIFT 9
+#define TOP_MUX_SPU0_VLP_SHIFT 10
+#define TOP_MUX_SPU1_VLP_SHIFT 11
+
+/* CKSTA REG */
+#define VLP_CKSTA_REG0 0x0250
+#define VLP_CKSTA_REG1 0x0254
+
+/* HW Voter REG */
+#define HWV_CG_9_SET 0x0048
+#define HWV_CG_9_CLR 0x004c
+#define HWV_CG_9_DONE 0x2c24
+#define HWV_CG_10_SET 0x0050
+#define HWV_CG_10_CLR 0x0054
+#define HWV_CG_10_DONE 0x2c28
+
+/* PLL REG */
+#define VLP_AP_PLL_CON3 0x264
+#define VLP_APLL1_TUNER_CON0 0x2a4
+#define VLP_APLL2_TUNER_CON0 0x2a8
+#define VLP_APLL1_CON0 0x274
+#define VLP_APLL1_CON1 0x278
+#define VLP_APLL1_CON2 0x27c
+#define VLP_APLL1_CON3 0x280
+#define VLP_APLL2_CON0 0x28c
+#define VLP_APLL2_CON1 0x290
+#define VLP_APLL2_CON2 0x294
+#define VLP_APLL2_CON3 0x298
+
+/* vlp apll1 tuner default value*/
+#define VLP_APLL1_TUNER_CON0_VALUE 0x6f28bd4d
+/* vlp apll2 tuner default value + 1*/
+#define VLP_APLL2_TUNER_CON0_VALUE 0x78fd5265
+
+#define VLP_PLLEN_ALL 0x080
+#define VLP_PLLEN_ALL_SET 0x084
+#define VLP_PLLEN_ALL_CLR 0x088
+
+#define MT8196_PLL_FMAX (3800UL * MHZ)
+#define MT8196_PLL_FMIN (1500UL * MHZ)
+#define MT8196_INTEGER_BITS 8
+
+#define PLL_FENC(_id, _name, _reg, _fenc_sta_ofs, _fenc_sta_bit,\
+ _flags, _pd_reg, _pd_shift, \
+ _pcw_reg, _pcw_shift, _pcwbits, \
+ _pll_en_bit) { \
+ .id = _id, \
+ .name = _name, \
+ .reg = _reg, \
+ .fenc_sta_ofs = _fenc_sta_ofs, \
+ .fenc_sta_bit = _fenc_sta_bit, \
+ .flags = _flags, \
+ .fmax = MT8196_PLL_FMAX, \
+ .fmin = MT8196_PLL_FMIN, \
+ .pd_reg = _pd_reg, \
+ .pd_shift = _pd_shift, \
+ .pcw_reg = _pcw_reg, \
+ .pcw_shift = _pcw_shift, \
+ .pcwbits = _pcwbits, \
+ .pcwibits = MT8196_INTEGER_BITS, \
+ .en_reg = VLP_PLLEN_ALL, \
+ .en_set_reg = VLP_PLLEN_ALL_SET, \
+ .en_clr_reg = VLP_PLLEN_ALL_CLR, \
+ .pll_en_bit = _pll_en_bit, \
+ .ops = &mtk_pll_fenc_clr_set_ops, \
+}
+
+static DEFINE_SPINLOCK(mt8196_clk_vlp_lock);
+
+static const struct mtk_fixed_factor vlp_divs[] = {
+ FACTOR(CLK_VLP_CLK26M, "vlp_clk26m", "clk26m", 1, 1),
+ FACTOR(CLK_VLP_APLL1_D4, "apll1_d4", "vlp_apll1", 1, 4),
+ FACTOR(CLK_VLP_APLL1_D8, "apll1_d8", "vlp_apll1", 1, 8),
+ FACTOR(CLK_VLP_APLL2_D4, "apll2_d4", "vlp_apll2", 1, 4),
+ FACTOR(CLK_VLP_APLL2_D8, "apll2_d8", "vlp_apll2", 1, 8),
+};
+
+static const char * const vlp_scp_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "mainpll_d6",
+ "mainpll_d4",
+ "mainpll_d3",
+ "vlp_apll1"
+};
+
+static const char * const vlp_scp_spi_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "mainpll_d7_d2",
+ "mainpll_d5_d2"
+};
+
+static const char * const vlp_scp_iic_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "mainpll_d5_d4",
+ "mainpll_d7_d2"
+};
+
+static const char * const vlp_scp_iic_hs_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "mainpll_d5_d4",
+ "mainpll_d7_d2",
+ "mainpll_d7"
+};
+
+static const char * const vlp_pwrap_ulposc_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "osc_d14",
+ "osc_d10"
+};
+
+static const char * const vlp_spmi_32k_parents[] = {
+ "clk26m",
+ "clk32k",
+ "osc_d20",
+ "osc_d14",
+ "osc_d10"
+};
+
+static const char * const vlp_apxgpt_26m_b_parents[] = {
+ "clk26m",
+ "osc_d20"
+};
+
+static const char * const vlp_dpsw_parents[] = {
+ "clk26m",
+ "osc_d10",
+ "osc_d7",
+ "mainpll_d7_d4"
+};
+
+static const char * const vlp_dpsw_central_parents[] = {
+ "clk26m",
+ "osc_d10",
+ "osc_d7",
+ "mainpll_d7_d4"
+};
+
+static const char * const vlp_spmi_m_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "osc_d14",
+ "osc_d10"
+};
+
+static const char * const vlp_dvfsrc_parents[] = {
+ "clk26m",
+ "osc_d20"
+};
+
+static const char * const vlp_pwm_vlp_parents[] = {
+ "clk26m",
+ "clk32k",
+ "osc_d20",
+ "osc_d8",
+ "mainpll_d4_d8"
+};
+
+static const char * const vlp_axi_vlp_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "mainpll_d7_d4",
+ "osc_d4",
+ "mainpll_d7_d2"
+};
+
+static const char * const vlp_systimer_26m_parents[] = {
+ "clk26m",
+ "osc_d20"
+};
+
+static const char * const vlp_sspm_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "mainpll_d5_d2",
+ "osc_d2",
+ "mainpll_d6"
+};
+
+static const char * const vlp_srck_parents[] = {
+ "clk26m",
+ "osc_d20"
+};
+
+static const char * const vlp_camtg0_1_parents[] = {
+ "clk26m",
+ "univpll_192m_d32",
+ "univpll_192m_d16",
+ "clk13m",
+ "osc_d40",
+ "osc_d32",
+ "univpll_192m_d10",
+ "univpll_192m_d8",
+ "univpll_d6_d16",
+ "ulposc3",
+ "osc_d20",
+ "ck2_tvdpll1_d16",
+ "univpll_d6_d8"
+};
+
+static const char * const vlp_camtg2_7_parents[] = {
+ "clk26m",
+ "univpll_192m_d32",
+ "univpll_192m_d16",
+ "clk13m",
+ "osc_d40",
+ "osc_d32",
+ "univpll_192m_d10",
+ "univpll_192m_d8",
+ "univpll_d6_d16",
+ "osc_d20",
+ "ck2_tvdpll1_d16",
+ "univpll_d6_d8"
+};
+
+static const char * const vlp_sspm_26m_parents[] = {
+ "clk26m",
+ "osc_d20"
+};
+
+static const char * const vlp_ulposc_sspm_parents[] = {
+ "clk26m",
+ "osc_d2",
+ "mainpll_d4_d2"
+};
+
+static const char * const vlp_vlp_pbus_26m_parents[] = {
+ "clk26m",
+ "osc_d20"
+};
+
+static const char * const vlp_debug_err_flag_parents[] = {
+ "clk26m",
+ "osc_d20"
+};
+
+static const char * const vlp_dpmsrdma_parents[] = {
+ "clk26m",
+ "mainpll_d7_d2"
+};
+
+static const char * const vlp_vlp_pbus_156m_parents[] = {
+ "clk26m",
+ "osc_d2",
+ "mainpll_d7_d2",
+ "mainpll_d7"
+};
+
+static const char * const vlp_spm_parents[] = {
+ "clk26m",
+ "mainpll_d7_d4"
+};
+
+static const char * const vlp_mminfra_parents[] = {
+ "clk26m",
+ "osc_d4",
+ "mainpll_d3"
+};
+
+static const char * const vlp_usb_parents[] = {
+ "clk26m",
+ "mainpll_d9"
+};
+
+static const char * const vlp_noc_vlp_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "mainpll_d9"
+};
+
+static const char * const vlp_audio_h_parents[] = {
+ "vlp_clk26m",
+ "vlp_apll1",
+ "vlp_apll2"
+};
+
+static const char * const vlp_aud_engen1_parents[] = {
+ "vlp_clk26m",
+ "apll1_d8",
+ "apll1_d4"
+};
+
+static const char * const vlp_aud_engen2_parents[] = {
+ "vlp_clk26m",
+ "apll2_d8",
+ "apll2_d4"
+};
+
+static const char * const vlp_aud_intbus_parents[] = {
+ "vlp_clk26m",
+ "mainpll_d7_d4",
+ "mainpll_d4_d4"
+};
+
+static const u8 vlp_aud_parent_index[] = { 1, 2, 3 };
+
+static const char * const vlp_spvlp_26m_parents[] = {
+ "clk26m",
+ "osc_d20"
+};
+
+static const char * const vlp_spu0_vlp_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "mainpll_d4_d4",
+ "mainpll_d4_d2",
+ "mainpll_d7",
+ "mainpll_d6",
+ "mainpll_d5"
+};
+
+static const char * const vlp_spu1_vlp_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "mainpll_d4_d4",
+ "mainpll_d4_d2",
+ "mainpll_d7",
+ "mainpll_d6",
+ "mainpll_d5"
+};
+
+static const struct mtk_mux vlp_muxes[] = {
+ /* VLP_CLK_CFG_0 */
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_VLP_SCP, "vlp_scp", vlp_scp_parents,
+ VLP_CLK_CFG_0, VLP_CLK_CFG_0_SET, VLP_CLK_CFG_0_CLR,
+ 0, 3, 7, VLP_CLK_CFG_UPDATE, TOP_MUX_SCP_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_0, 31),
+ MUX_CLR_SET_UPD(CLK_VLP_SCP_SPI, "vlp_scp_spi",
+ vlp_scp_spi_parents, VLP_CLK_CFG_0, VLP_CLK_CFG_0_SET,
+ VLP_CLK_CFG_0_CLR, 8, 2,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_SCP_SPI_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_SCP_IIC, "vlp_scp_iic",
+ vlp_scp_iic_parents, VLP_CLK_CFG_0, VLP_CLK_CFG_0_SET,
+ VLP_CLK_CFG_0_CLR, 16, 2,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_SCP_IIC_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_SCP_IIC_HS, "vlp_scp_iic_hs",
+ vlp_scp_iic_hs_parents, VLP_CLK_CFG_0, VLP_CLK_CFG_0_SET,
+ VLP_CLK_CFG_0_CLR, 24, 3,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_SCP_IIC_HS_SHIFT),
+ /* VLP_CLK_CFG_1 */
+ MUX_CLR_SET_UPD(CLK_VLP_PWRAP_ULPOSC, "vlp_pwrap_ulposc",
+ vlp_pwrap_ulposc_parents, VLP_CLK_CFG_1, VLP_CLK_CFG_1_SET,
+ VLP_CLK_CFG_1_CLR, 0, 2,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_PWRAP_ULPOSC_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_SPMI_M_TIA_32K, "vlp_spmi_32k",
+ vlp_spmi_32k_parents, VLP_CLK_CFG_1, VLP_CLK_CFG_1_SET,
+ VLP_CLK_CFG_1_CLR, 8, 3,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_SPMI_M_TIA_32K_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_APXGPT_26M_B, "vlp_apxgpt_26m_b",
+ vlp_apxgpt_26m_b_parents, VLP_CLK_CFG_1, VLP_CLK_CFG_1_SET,
+ VLP_CLK_CFG_1_CLR, 16, 1,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_APXGPT_26M_B_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_DPSW, "vlp_dpsw",
+ vlp_dpsw_parents, VLP_CLK_CFG_1, VLP_CLK_CFG_1_SET,
+ VLP_CLK_CFG_1_CLR, 24, 2,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_DPSW_SHIFT),
+ /* VLP_CLK_CFG_2 */
+ MUX_CLR_SET_UPD(CLK_VLP_DPSW_CENTRAL, "vlp_dpsw_central",
+ vlp_dpsw_central_parents, VLP_CLK_CFG_2, VLP_CLK_CFG_2_SET,
+ VLP_CLK_CFG_2_CLR, 0, 2,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_DPSW_CENTRAL_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_SPMI_M_MST, "vlp_spmi_m",
+ vlp_spmi_m_parents, VLP_CLK_CFG_2, VLP_CLK_CFG_2_SET,
+ VLP_CLK_CFG_2_CLR, 8, 2,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_SPMI_M_MST_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_DVFSRC, "vlp_dvfsrc",
+ vlp_dvfsrc_parents, VLP_CLK_CFG_2, VLP_CLK_CFG_2_SET,
+ VLP_CLK_CFG_2_CLR, 16, 1,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_DVFSRC_SHIFT),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_VLP_PWM_VLP, "vlp_pwm_vlp", vlp_pwm_vlp_parents,
+ VLP_CLK_CFG_2, VLP_CLK_CFG_2_SET, VLP_CLK_CFG_2_CLR,
+ 24, 3, 31, VLP_CLK_CFG_UPDATE, TOP_MUX_PWM_VLP_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_0, 20),
+ /* VLP_CLK_CFG_3 */
+ MUX_CLR_SET_UPD(CLK_VLP_AXI_VLP, "vlp_axi_vlp",
+ vlp_axi_vlp_parents, VLP_CLK_CFG_3, VLP_CLK_CFG_3_SET,
+ VLP_CLK_CFG_3_CLR, 0, 3,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_AXI_VLP_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_SYSTIMER_26M, "vlp_systimer_26m",
+ vlp_systimer_26m_parents, VLP_CLK_CFG_3, VLP_CLK_CFG_3_SET,
+ VLP_CLK_CFG_3_CLR, 8, 1,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_SYSTIMER_26M_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_SSPM, "vlp_sspm",
+ vlp_sspm_parents, VLP_CLK_CFG_3, VLP_CLK_CFG_3_SET,
+ VLP_CLK_CFG_3_CLR, 16, 3,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_SSPM_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_SRCK, "vlp_srck",
+ vlp_srck_parents, VLP_CLK_CFG_3, VLP_CLK_CFG_3_SET,
+ VLP_CLK_CFG_3_CLR, 24, 1,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_SRCK_SHIFT),
+ /* VLP_CLK_CFG_4 */
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_VLP_CAMTG0, "vlp_camtg0", vlp_camtg0_1_parents,
+ VLP_CLK_CFG_4, VLP_CLK_CFG_4_SET, VLP_CLK_CFG_4_CLR,
+ HWV_CG_9_DONE, HWV_CG_9_SET, HWV_CG_9_CLR,
+ 0, 4, 7, VLP_CLK_CFG_UPDATE, TOP_MUX_CAMTG0_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_0, 15),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_VLP_CAMTG1, "vlp_camtg1", vlp_camtg0_1_parents,
+ VLP_CLK_CFG_4, VLP_CLK_CFG_4_SET, VLP_CLK_CFG_4_CLR,
+ HWV_CG_9_DONE, HWV_CG_9_SET, HWV_CG_9_CLR,
+ 8, 4, 15, VLP_CLK_CFG_UPDATE, TOP_MUX_CAMTG1_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_0, 14),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_VLP_CAMTG2, "vlp_camtg2", vlp_camtg2_7_parents,
+ VLP_CLK_CFG_4, VLP_CLK_CFG_4_SET, VLP_CLK_CFG_4_CLR,
+ HWV_CG_9_DONE, HWV_CG_9_SET, HWV_CG_9_CLR,
+ 16, 4, 23, VLP_CLK_CFG_UPDATE, TOP_MUX_CAMTG2_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_0, 13),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_VLP_CAMTG3, "vlp_camtg3", vlp_camtg2_7_parents,
+ VLP_CLK_CFG_4, VLP_CLK_CFG_4_SET, VLP_CLK_CFG_4_CLR,
+ HWV_CG_9_DONE, HWV_CG_9_SET, HWV_CG_9_CLR,
+ 24, 4, 31, VLP_CLK_CFG_UPDATE, TOP_MUX_CAMTG3_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_0, 12),
+ /* VLP_CLK_CFG_5 */
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_VLP_CAMTG4, "vlp_camtg4", vlp_camtg2_7_parents,
+ VLP_CLK_CFG_5, VLP_CLK_CFG_5_SET, VLP_CLK_CFG_5_CLR,
+ HWV_CG_10_DONE, HWV_CG_10_SET, HWV_CG_10_CLR,
+ 0, 4, 7, VLP_CLK_CFG_UPDATE, TOP_MUX_CAMTG4_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_0, 11),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_VLP_CAMTG5, "vlp_camtg5", vlp_camtg2_7_parents,
+ VLP_CLK_CFG_5, VLP_CLK_CFG_5_SET, VLP_CLK_CFG_5_CLR,
+ HWV_CG_10_DONE, HWV_CG_10_SET, HWV_CG_10_CLR,
+ 8, 4, 15, VLP_CLK_CFG_UPDATE, TOP_MUX_CAMTG5_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_0, 10),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_VLP_CAMTG6, "vlp_camtg6", vlp_camtg2_7_parents,
+ VLP_CLK_CFG_5, VLP_CLK_CFG_5_SET, VLP_CLK_CFG_5_CLR,
+ HWV_CG_10_DONE, HWV_CG_10_SET, HWV_CG_10_CLR,
+ 16, 4, 23, VLP_CLK_CFG_UPDATE, TOP_MUX_CAMTG6_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_0, 9),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_VLP_CAMTG7, "vlp_camtg7", vlp_camtg2_7_parents,
+ VLP_CLK_CFG_5, VLP_CLK_CFG_5_SET, VLP_CLK_CFG_5_CLR,
+ HWV_CG_10_DONE, HWV_CG_10_SET, HWV_CG_10_CLR,
+ 24, 4, 31, VLP_CLK_CFG_UPDATE, TOP_MUX_CAMTG7_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_0, 8),
+ /* VLP_CLK_CFG_6 */
+ MUX_CLR_SET_UPD(CLK_VLP_SSPM_26M, "vlp_sspm_26m",
+ vlp_sspm_26m_parents, VLP_CLK_CFG_6, VLP_CLK_CFG_6_SET,
+ VLP_CLK_CFG_6_CLR, 8, 1,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_SSPM_26M_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_ULPOSC_SSPM, "vlp_ulposc_sspm",
+ vlp_ulposc_sspm_parents, VLP_CLK_CFG_6, VLP_CLK_CFG_6_SET,
+ VLP_CLK_CFG_6_CLR, 16, 2,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_ULPOSC_SSPM_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_VLP_PBUS_26M, "vlp_vlp_pbus_26m",
+ vlp_vlp_pbus_26m_parents, VLP_CLK_CFG_6, VLP_CLK_CFG_6_SET,
+ VLP_CLK_CFG_6_CLR, 24, 1,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_VLP_PBUS_26M_SHIFT),
+ /* VLP_CLK_CFG_7 */
+ MUX_CLR_SET_UPD(CLK_VLP_DEBUG_ERR_FLAG, "vlp_debug_err_flag",
+ vlp_debug_err_flag_parents, VLP_CLK_CFG_7, VLP_CLK_CFG_7_SET,
+ VLP_CLK_CFG_7_CLR, 0, 1,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_DEBUG_ERR_FLAG_VLP_26M_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_DPMSRDMA, "vlp_dpmsrdma",
+ vlp_dpmsrdma_parents, VLP_CLK_CFG_7, VLP_CLK_CFG_7_SET,
+ VLP_CLK_CFG_7_CLR, 8, 1,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_DPMSRDMA_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_VLP_PBUS_156M, "vlp_vlp_pbus_156m",
+ vlp_vlp_pbus_156m_parents, VLP_CLK_CFG_7, VLP_CLK_CFG_7_SET,
+ VLP_CLK_CFG_7_CLR, 16, 2,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_VLP_PBUS_156M_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_SPM, "vlp_spm",
+ vlp_spm_parents, VLP_CLK_CFG_7, VLP_CLK_CFG_7_SET,
+ VLP_CLK_CFG_7_CLR, 24, 1,
+ VLP_CLK_CFG_UPDATE1, TOP_MUX_SPM_SHIFT),
+ /* VLP_CLK_CFG_8 */
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_VLP_MMINFRA, "vlp_mminfra", vlp_mminfra_parents,
+ VLP_CLK_CFG_8, VLP_CLK_CFG_8_SET, VLP_CLK_CFG_8_CLR,
+ 0, 2, 7, VLP_CLK_CFG_UPDATE1, TOP_MUX_MMINFRA_VLP_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_1, 31),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_VLP_USB_TOP, "vlp_usb", vlp_usb_parents,
+ VLP_CLK_CFG_8, VLP_CLK_CFG_8_SET, VLP_CLK_CFG_8_CLR,
+ 8, 1, 15, VLP_CLK_CFG_UPDATE1, TOP_MUX_USB_TOP_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_1, 30),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_VLP_USB_XHCI, "vlp_usb_xhci", vlp_usb_parents,
+ VLP_CLK_CFG_8, VLP_CLK_CFG_8_SET, VLP_CLK_CFG_8_CLR,
+ 16, 1, 23, VLP_CLK_CFG_UPDATE1, TOP_MUX_SSUSB_XHCI_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_1, 29),
+ MUX_CLR_SET_UPD(CLK_VLP_NOC_VLP, "vlp_noc_vlp",
+ vlp_noc_vlp_parents, VLP_CLK_CFG_8, VLP_CLK_CFG_8_SET,
+ VLP_CLK_CFG_8_CLR, 24, 2,
+ VLP_CLK_CFG_UPDATE1, TOP_MUX_NOC_VLP_SHIFT),
+ /* VLP_CLK_CFG_9 */
+ MUX_GATE_FENC_CLR_SET_UPD_INDEXED(CLK_VLP_AUDIO_H, "vlp_audio_h",
+ vlp_audio_h_parents, vlp_aud_parent_index,
+ VLP_CLK_CFG_9, VLP_CLK_CFG_9_SET, VLP_CLK_CFG_9_CLR,
+ 0, 2, 7, VLP_CLK_CFG_UPDATE1, TOP_MUX_AUDIO_H_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_1, 27),
+ MUX_GATE_FENC_CLR_SET_UPD_INDEXED(CLK_VLP_AUD_ENGEN1, "vlp_aud_engen1",
+ vlp_aud_engen1_parents, vlp_aud_parent_index,
+ VLP_CLK_CFG_9, VLP_CLK_CFG_9_SET, VLP_CLK_CFG_9_CLR,
+ 8, 2, 15, VLP_CLK_CFG_UPDATE1, TOP_MUX_AUD_ENGEN1_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_1, 26),
+ MUX_GATE_FENC_CLR_SET_UPD_INDEXED(CLK_VLP_AUD_ENGEN2, "vlp_aud_engen2",
+ vlp_aud_engen2_parents, vlp_aud_parent_index,
+ VLP_CLK_CFG_9, VLP_CLK_CFG_9_SET, VLP_CLK_CFG_9_CLR,
+ 16, 2, 23, VLP_CLK_CFG_UPDATE1, TOP_MUX_AUD_ENGEN2_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_1, 25),
+ MUX_GATE_FENC_CLR_SET_UPD_INDEXED(CLK_VLP_AUD_INTBUS, "vlp_aud_intbus",
+ vlp_aud_intbus_parents, vlp_aud_parent_index,
+ VLP_CLK_CFG_9, VLP_CLK_CFG_9_SET, VLP_CLK_CFG_9_CLR,
+ 24, 2, 31, VLP_CLK_CFG_UPDATE1, TOP_MUX_AUD_INTBUS_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_1, 24),
+ /* VLP_CLK_CFG_10 */
+ MUX_CLR_SET_UPD(CLK_VLP_SPVLP_26M, "vlp_spvlp_26m",
+ vlp_spvlp_26m_parents, VLP_CLK_CFG_10, VLP_CLK_CFG_10_SET,
+ VLP_CLK_CFG_10_CLR, 0, 1,
+ VLP_CLK_CFG_UPDATE1, TOP_MUX_SPU_VLP_26M_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_SPU0_VLP, "vlp_spu0_vlp",
+ vlp_spu0_vlp_parents, VLP_CLK_CFG_10, VLP_CLK_CFG_10_SET,
+ VLP_CLK_CFG_10_CLR, 8, 3,
+ VLP_CLK_CFG_UPDATE1, TOP_MUX_SPU0_VLP_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_SPU1_VLP, "vlp_spu1_vlp",
+ vlp_spu1_vlp_parents, VLP_CLK_CFG_10, VLP_CLK_CFG_10_SET,
+ VLP_CLK_CFG_10_CLR, 16, 3,
+ VLP_CLK_CFG_UPDATE1, TOP_MUX_SPU1_VLP_SHIFT),
+};
+
+static const struct mtk_pll_data vlp_plls[] = {
+ PLL_FENC(CLK_VLP_APLL1, "vlp_apll1", VLP_APLL1_CON0, 0x0358, 1, 0,
+ VLP_APLL1_CON1, 24, VLP_APLL1_CON2, 0, 32, 0),
+ PLL_FENC(CLK_VLP_APLL2, "vlp_apll2", VLP_APLL2_CON0, 0x0358, 0, 0,
+ VLP_APLL2_CON1, 24, VLP_APLL2_CON2, 0, 32, 1),
+};
+
+static const struct regmap_config vlpckgen_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0x1000,
+ .fast_io = true,
+};
+
+static int clk_mt8196_vlp_probe(struct platform_device *pdev)
+{
+ static void __iomem *base;
+ struct clk_hw_onecell_data *clk_data;
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct regmap *regmap;
+
+ clk_data = mtk_alloc_clk_data(ARRAY_SIZE(vlp_muxes) +
+ ARRAY_SIZE(vlp_plls) +
+ ARRAY_SIZE(vlp_divs));
+ if (!clk_data)
+ return -ENOMEM;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ regmap = devm_regmap_init_mmio(dev, base, &vlpckgen_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ r = mtk_clk_register_factors(vlp_divs, ARRAY_SIZE(vlp_divs), clk_data);
+ if (r)
+ goto free_clk_data;
+
+ r = mtk_clk_register_muxes(&pdev->dev, vlp_muxes, ARRAY_SIZE(vlp_muxes),
+ node, &mt8196_clk_vlp_lock, clk_data);
+ if (r)
+ goto unregister_factors;
+
+ r = mtk_clk_register_plls(node, vlp_plls, ARRAY_SIZE(vlp_plls),
+ clk_data);
+ if (r)
+ goto unregister_muxes;
+
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ if (r)
+ goto unregister_plls;
+
+ platform_set_drvdata(pdev, clk_data);
+
+ /* Initialize APLL tuner registers */
+ regmap_write(regmap, VLP_APLL1_TUNER_CON0, VLP_APLL1_TUNER_CON0_VALUE);
+ regmap_write(regmap, VLP_APLL2_TUNER_CON0, VLP_APLL2_TUNER_CON0_VALUE);
+
+ return r;
+
+unregister_plls:
+ mtk_clk_unregister_plls(vlp_plls, ARRAY_SIZE(vlp_plls), clk_data);
+unregister_muxes:
+ mtk_clk_unregister_muxes(vlp_muxes, ARRAY_SIZE(vlp_muxes), clk_data);
+unregister_factors:
+ mtk_clk_unregister_factors(vlp_divs, ARRAY_SIZE(vlp_divs), clk_data);
+free_clk_data:
+ mtk_free_clk_data(clk_data);
+
+ return r;
+}
+
+static void clk_mt8196_vlp_remove(struct platform_device *pdev)
+{
+ struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev);
+ struct device_node *node = pdev->dev.of_node;
+
+ of_clk_del_provider(node);
+ mtk_clk_unregister_plls(vlp_plls, ARRAY_SIZE(vlp_plls), clk_data);
+ mtk_clk_unregister_muxes(vlp_muxes, ARRAY_SIZE(vlp_muxes), clk_data);
+ mtk_clk_unregister_factors(vlp_divs, ARRAY_SIZE(vlp_divs), clk_data);
+ mtk_free_clk_data(clk_data);
+}
+
+static const struct of_device_id of_match_clk_mt8196_vlp_ck[] = {
+ { .compatible = "mediatek,mt8196-vlpckgen" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_vlp_ck);
+
+static struct platform_driver clk_mt8196_vlp_drv = {
+ .probe = clk_mt8196_vlp_probe,
+ .remove = clk_mt8196_vlp_remove,
+ .driver = {
+ .name = "clk-mt8196-vlpck",
+ .of_match_table = of_match_clk_mt8196_vlp_ck,
+ },
+};
+
+MODULE_DESCRIPTION("MediaTek MT8196 VLP clock generator driver");
+module_platform_driver(clk_mt8196_vlp_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mtk.c b/drivers/clk/mediatek/clk-mtk.c
index ba1d1c495bc2..19cd27941747 100644
--- a/drivers/clk/mediatek/clk-mtk.c
+++ b/drivers/clk/mediatek/clk-mtk.c
@@ -685,4 +685,20 @@ void mtk_clk_simple_remove(struct platform_device *pdev)
}
EXPORT_SYMBOL_GPL(mtk_clk_simple_remove);
+struct regmap *mtk_clk_get_hwv_regmap(struct device_node *node)
+{
+ struct device_node *hwv_node;
+ struct regmap *regmap_hwv;
+
+ hwv_node = of_parse_phandle(node, "mediatek,hardware-voter", 0);
+ if (!hwv_node)
+ return NULL;
+
+ regmap_hwv = device_node_to_regmap(hwv_node);
+ of_node_put(hwv_node);
+
+ return regmap_hwv;
+}
+EXPORT_SYMBOL_GPL(mtk_clk_get_hwv_regmap);
+
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mtk.h b/drivers/clk/mediatek/clk-mtk.h
index c17fe1c2d732..5417b9264e6d 100644
--- a/drivers/clk/mediatek/clk-mtk.h
+++ b/drivers/clk/mediatek/clk-mtk.h
@@ -20,6 +20,8 @@
#define MHZ (1000 * 1000)
+#define MTK_WAIT_HWV_DONE_US 30
+
struct platform_device;
/*
@@ -173,6 +175,25 @@ struct mtk_composite {
.flags = 0, \
}
+#define MUX_DIV_GATE(_id, _name, _parents, \
+ _mux_reg, _mux_shift, _mux_width, \
+ _div_reg, _div_shift, _div_width, \
+ _gate_reg, _gate_shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_names = _parents, \
+ .num_parents = ARRAY_SIZE(_parents), \
+ .mux_reg = _mux_reg, \
+ .mux_shift = _mux_shift, \
+ .mux_width = _mux_width, \
+ .divider_reg = _div_reg, \
+ .divider_shift = _div_shift, \
+ .divider_width = _div_width, \
+ .gate_reg = _gate_reg, \
+ .gate_shift = _gate_shift, \
+ .flags = CLK_SET_RATE_PARENT, \
+ }
+
int mtk_clk_register_composites(struct device *dev,
const struct mtk_composite *mcs, int num,
void __iomem *base, spinlock_t *lock,
@@ -245,5 +266,6 @@ int mtk_clk_pdev_probe(struct platform_device *pdev);
void mtk_clk_pdev_remove(struct platform_device *pdev);
int mtk_clk_simple_probe(struct platform_device *pdev);
void mtk_clk_simple_remove(struct platform_device *pdev);
+struct regmap *mtk_clk_get_hwv_regmap(struct device_node *node);
#endif /* __DRV_CLK_MTK_H */
diff --git a/drivers/clk/mediatek/clk-mux.c b/drivers/clk/mediatek/clk-mux.c
index 60990296450b..c5af6dc078a3 100644
--- a/drivers/clk/mediatek/clk-mux.c
+++ b/drivers/clk/mediatek/clk-mux.c
@@ -8,6 +8,7 @@
#include <linux/clk-provider.h>
#include <linux/compiler_types.h>
#include <linux/container_of.h>
+#include <linux/dev_printk.h>
#include <linux/err.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
@@ -15,11 +16,15 @@
#include <linux/spinlock.h>
#include <linux/slab.h>
+#include "clk-mtk.h"
#include "clk-mux.h"
+#define MTK_WAIT_FENC_DONE_US 30
+
struct mtk_clk_mux {
struct clk_hw hw;
struct regmap *regmap;
+ struct regmap *regmap_hwv;
const struct mtk_mux *data;
spinlock_t *lock;
bool reparent;
@@ -30,6 +35,33 @@ static inline struct mtk_clk_mux *to_mtk_clk_mux(struct clk_hw *hw)
return container_of(hw, struct mtk_clk_mux, hw);
}
+static int mtk_clk_mux_fenc_enable_setclr(struct clk_hw *hw)
+{
+ struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
+ unsigned long flags;
+ u32 val;
+ int ret;
+
+ if (mux->lock)
+ spin_lock_irqsave(mux->lock, flags);
+ else
+ __acquire(mux->lock);
+
+ regmap_write(mux->regmap, mux->data->clr_ofs,
+ BIT(mux->data->gate_shift));
+
+ ret = regmap_read_poll_timeout_atomic(mux->regmap, mux->data->fenc_sta_mon_ofs,
+ val, val & BIT(mux->data->fenc_shift), 1,
+ MTK_WAIT_FENC_DONE_US);
+
+ if (mux->lock)
+ spin_unlock_irqrestore(mux->lock, flags);
+ else
+ __release(mux->lock);
+
+ return ret;
+}
+
static int mtk_clk_mux_enable_setclr(struct clk_hw *hw)
{
struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
@@ -70,6 +102,16 @@ static void mtk_clk_mux_disable_setclr(struct clk_hw *hw)
BIT(mux->data->gate_shift));
}
+static int mtk_clk_mux_fenc_is_enabled(struct clk_hw *hw)
+{
+ struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
+ u32 val;
+
+ regmap_read(mux->regmap, mux->data->fenc_sta_mon_ofs, &val);
+
+ return !!(val & BIT(mux->data->fenc_shift));
+}
+
static int mtk_clk_mux_is_enabled(struct clk_hw *hw)
{
struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
@@ -80,6 +122,41 @@ static int mtk_clk_mux_is_enabled(struct clk_hw *hw)
return (val & BIT(mux->data->gate_shift)) == 0;
}
+static int mtk_clk_mux_hwv_fenc_enable(struct clk_hw *hw)
+{
+ struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
+ u32 val;
+ int ret;
+
+ regmap_write(mux->regmap_hwv, mux->data->hwv_set_ofs,
+ BIT(mux->data->gate_shift));
+
+ ret = regmap_read_poll_timeout_atomic(mux->regmap_hwv, mux->data->hwv_sta_ofs,
+ val, val & BIT(mux->data->gate_shift), 0,
+ MTK_WAIT_HWV_DONE_US);
+ if (ret)
+ return ret;
+
+ ret = regmap_read_poll_timeout_atomic(mux->regmap, mux->data->fenc_sta_mon_ofs,
+ val, val & BIT(mux->data->fenc_shift), 1,
+ MTK_WAIT_FENC_DONE_US);
+
+ return ret;
+}
+
+static void mtk_clk_mux_hwv_disable(struct clk_hw *hw)
+{
+ struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
+ u32 val;
+
+ regmap_write(mux->regmap_hwv, mux->data->hwv_clr_ofs,
+ BIT(mux->data->gate_shift));
+
+ regmap_read_poll_timeout_atomic(mux->regmap_hwv, mux->data->hwv_sta_ofs,
+ val, (val & BIT(mux->data->gate_shift)),
+ 0, MTK_WAIT_HWV_DONE_US);
+}
+
static u8 mtk_clk_mux_get_parent(struct clk_hw *hw)
{
struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
@@ -146,9 +223,15 @@ static int mtk_clk_mux_set_parent_setclr_lock(struct clk_hw *hw, u8 index)
static int mtk_clk_mux_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
- struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
+ return clk_mux_determine_rate_flags(hw, req, 0);
+}
+
+static bool mtk_clk_mux_uses_hwv(const struct clk_ops *ops)
+{
+ if (ops == &mtk_mux_gate_hwv_fenc_clr_set_upd_ops)
+ return true;
- return clk_mux_determine_rate_flags(hw, req, mux->data->flags);
+ return false;
}
const struct clk_ops mtk_mux_clr_set_upd_ops = {
@@ -168,9 +251,30 @@ const struct clk_ops mtk_mux_gate_clr_set_upd_ops = {
};
EXPORT_SYMBOL_GPL(mtk_mux_gate_clr_set_upd_ops);
+const struct clk_ops mtk_mux_gate_fenc_clr_set_upd_ops = {
+ .enable = mtk_clk_mux_fenc_enable_setclr,
+ .disable = mtk_clk_mux_disable_setclr,
+ .is_enabled = mtk_clk_mux_fenc_is_enabled,
+ .get_parent = mtk_clk_mux_get_parent,
+ .set_parent = mtk_clk_mux_set_parent_setclr_lock,
+ .determine_rate = mtk_clk_mux_determine_rate,
+};
+EXPORT_SYMBOL_GPL(mtk_mux_gate_fenc_clr_set_upd_ops);
+
+const struct clk_ops mtk_mux_gate_hwv_fenc_clr_set_upd_ops = {
+ .enable = mtk_clk_mux_hwv_fenc_enable,
+ .disable = mtk_clk_mux_hwv_disable,
+ .is_enabled = mtk_clk_mux_fenc_is_enabled,
+ .get_parent = mtk_clk_mux_get_parent,
+ .set_parent = mtk_clk_mux_set_parent_setclr_lock,
+ .determine_rate = mtk_clk_mux_determine_rate,
+};
+EXPORT_SYMBOL_GPL(mtk_mux_gate_hwv_fenc_clr_set_upd_ops);
+
static struct clk_hw *mtk_clk_register_mux(struct device *dev,
const struct mtk_mux *mux,
struct regmap *regmap,
+ struct regmap *regmap_hwv,
spinlock_t *lock)
{
struct mtk_clk_mux *clk_mux;
@@ -186,8 +290,13 @@ static struct clk_hw *mtk_clk_register_mux(struct device *dev,
init.parent_names = mux->parent_names;
init.num_parents = mux->num_parents;
init.ops = mux->ops;
+ if (mtk_clk_mux_uses_hwv(init.ops) && !regmap_hwv)
+ return dev_err_ptr_probe(
+ dev, -ENXIO,
+ "regmap not found for hardware voter clocks\n");
clk_mux->regmap = regmap;
+ clk_mux->regmap_hwv = regmap_hwv;
clk_mux->data = mux;
clk_mux->lock = lock;
clk_mux->hw.init = &init;
@@ -220,6 +329,7 @@ int mtk_clk_register_muxes(struct device *dev,
struct clk_hw_onecell_data *clk_data)
{
struct regmap *regmap;
+ struct regmap *regmap_hwv;
struct clk_hw *hw;
int i;
@@ -229,6 +339,12 @@ int mtk_clk_register_muxes(struct device *dev,
return PTR_ERR(regmap);
}
+ regmap_hwv = mtk_clk_get_hwv_regmap(node);
+ if (IS_ERR(regmap_hwv))
+ return dev_err_probe(
+ dev, PTR_ERR(regmap_hwv),
+ "Cannot find hardware voter regmap for %pOF\n", node);
+
for (i = 0; i < num; i++) {
const struct mtk_mux *mux = &muxes[i];
@@ -238,7 +354,7 @@ int mtk_clk_register_muxes(struct device *dev,
continue;
}
- hw = mtk_clk_register_mux(dev, mux, regmap, lock);
+ hw = mtk_clk_register_mux(dev, mux, regmap, regmap_hwv, lock);
if (IS_ERR(hw)) {
pr_err("Failed to register clk %s: %pe\n", mux->name,
diff --git a/drivers/clk/mediatek/clk-mux.h b/drivers/clk/mediatek/clk-mux.h
index 943ad1d7ce4b..151e56dcf884 100644
--- a/drivers/clk/mediatek/clk-mux.h
+++ b/drivers/clk/mediatek/clk-mux.h
@@ -29,10 +29,16 @@ struct mtk_mux {
u32 clr_ofs;
u32 upd_ofs;
+ u32 hwv_set_ofs;
+ u32 hwv_clr_ofs;
+ u32 hwv_sta_ofs;
+ u32 fenc_sta_mon_ofs;
+
u8 mux_shift;
u8 mux_width;
u8 gate_shift;
s8 upd_shift;
+ u8 fenc_shift;
const struct clk_ops *ops;
signed char num_parents;
@@ -77,6 +83,8 @@ struct mtk_mux {
extern const struct clk_ops mtk_mux_clr_set_upd_ops;
extern const struct clk_ops mtk_mux_gate_clr_set_upd_ops;
+extern const struct clk_ops mtk_mux_gate_fenc_clr_set_upd_ops;
+extern const struct clk_ops mtk_mux_gate_hwv_fenc_clr_set_upd_ops;
#define MUX_GATE_CLR_SET_UPD_FLAGS(_id, _name, _parents, _mux_ofs, \
_mux_set_ofs, _mux_clr_ofs, _shift, _width, \
@@ -118,6 +126,85 @@ extern const struct clk_ops mtk_mux_gate_clr_set_upd_ops;
0, _upd_ofs, _upd, CLK_SET_RATE_PARENT, \
mtk_mux_clr_set_upd_ops)
+#define MUX_GATE_HWV_FENC_CLR_SET_UPD_FLAGS(_id, _name, _parents, \
+ _mux_ofs, _mux_set_ofs, _mux_clr_ofs, \
+ _hwv_sta_ofs, _hwv_set_ofs, _hwv_clr_ofs, \
+ _shift, _width, _gate, _upd_ofs, _upd, \
+ _fenc_sta_mon_ofs, _fenc, _flags) { \
+ .id = _id, \
+ .name = _name, \
+ .mux_ofs = _mux_ofs, \
+ .set_ofs = _mux_set_ofs, \
+ .clr_ofs = _mux_clr_ofs, \
+ .hwv_sta_ofs = _hwv_sta_ofs, \
+ .hwv_set_ofs = _hwv_set_ofs, \
+ .hwv_clr_ofs = _hwv_clr_ofs, \
+ .upd_ofs = _upd_ofs, \
+ .fenc_sta_mon_ofs = _fenc_sta_mon_ofs, \
+ .mux_shift = _shift, \
+ .mux_width = _width, \
+ .gate_shift = _gate, \
+ .upd_shift = _upd, \
+ .fenc_shift = _fenc, \
+ .parent_names = _parents, \
+ .num_parents = ARRAY_SIZE(_parents), \
+ .flags = _flags, \
+ .ops = &mtk_mux_gate_hwv_fenc_clr_set_upd_ops, \
+ }
+
+#define MUX_GATE_HWV_FENC_CLR_SET_UPD(_id, _name, _parents, \
+ _mux_ofs, _mux_set_ofs, _mux_clr_ofs, \
+ _hwv_sta_ofs, _hwv_set_ofs, _hwv_clr_ofs, \
+ _shift, _width, _gate, _upd_ofs, _upd, \
+ _fenc_sta_mon_ofs, _fenc) \
+ MUX_GATE_HWV_FENC_CLR_SET_UPD_FLAGS(_id, _name, _parents, \
+ _mux_ofs, _mux_set_ofs, _mux_clr_ofs, \
+ _hwv_sta_ofs, _hwv_set_ofs, _hwv_clr_ofs, \
+ _shift, _width, _gate, _upd_ofs, _upd, \
+ _fenc_sta_mon_ofs, _fenc, 0)
+
+#define MUX_GATE_FENC_CLR_SET_UPD_FLAGS(_id, _name, _parents, _paridx, \
+ _num_parents, _mux_ofs, _mux_set_ofs, _mux_clr_ofs, \
+ _shift, _width, _gate, _upd_ofs, _upd, \
+ _fenc_sta_mon_ofs, _fenc, _flags) { \
+ .id = _id, \
+ .name = _name, \
+ .mux_ofs = _mux_ofs, \
+ .set_ofs = _mux_set_ofs, \
+ .clr_ofs = _mux_clr_ofs, \
+ .upd_ofs = _upd_ofs, \
+ .fenc_sta_mon_ofs = _fenc_sta_mon_ofs, \
+ .mux_shift = _shift, \
+ .mux_width = _width, \
+ .gate_shift = _gate, \
+ .upd_shift = _upd, \
+ .fenc_shift = _fenc, \
+ .parent_names = _parents, \
+ .parent_index = _paridx, \
+ .num_parents = _num_parents, \
+ .flags = _flags, \
+ .ops = &mtk_mux_gate_fenc_clr_set_upd_ops, \
+ }
+
+#define MUX_GATE_FENC_CLR_SET_UPD(_id, _name, _parents, \
+ _mux_ofs, _mux_set_ofs, _mux_clr_ofs, \
+ _shift, _width, _gate, _upd_ofs, _upd, \
+ _fenc_sta_mon_ofs, _fenc) \
+ MUX_GATE_FENC_CLR_SET_UPD_FLAGS(_id, _name, _parents, \
+ NULL, ARRAY_SIZE(_parents), _mux_ofs, \
+ _mux_set_ofs, _mux_clr_ofs, _shift, \
+ _width, _gate, _upd_ofs, _upd, \
+ _fenc_sta_mon_ofs, _fenc, 0)
+
+#define MUX_GATE_FENC_CLR_SET_UPD_INDEXED(_id, _name, _parents, _paridx, \
+ _mux_ofs, _mux_set_ofs, _mux_clr_ofs, \
+ _shift, _width, _gate, _upd_ofs, _upd, \
+ _fenc_sta_mon_ofs, _fenc) \
+ MUX_GATE_FENC_CLR_SET_UPD_FLAGS(_id, _name, _parents, _paridx, \
+ ARRAY_SIZE(_paridx), _mux_ofs, _mux_set_ofs, \
+ _mux_clr_ofs, _shift, _width, _gate, _upd_ofs, _upd, \
+ _fenc_sta_mon_ofs, _fenc, 0)
+
int mtk_clk_register_muxes(struct device *dev,
const struct mtk_mux *muxes,
int num, struct device_node *node,
diff --git a/drivers/clk/mediatek/clk-pll.c b/drivers/clk/mediatek/clk-pll.c
index ce453e1718e5..cd2b6ce551c6 100644
--- a/drivers/clk/mediatek/clk-pll.c
+++ b/drivers/clk/mediatek/clk-pll.c
@@ -37,6 +37,13 @@ int mtk_pll_is_prepared(struct clk_hw *hw)
return (readl(pll->en_addr) & BIT(pll->data->pll_en_bit)) != 0;
}
+static int mtk_pll_fenc_is_prepared(struct clk_hw *hw)
+{
+ struct mtk_clk_pll *pll = to_mtk_clk_pll(hw);
+
+ return !!(readl(pll->fenc_addr) & BIT(pll->data->fenc_sta_bit));
+}
+
static unsigned long __mtk_pll_recalc_rate(struct mtk_clk_pll *pll, u32 fin,
u32 pcw, int postdiv)
{
@@ -200,16 +207,19 @@ unsigned long mtk_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
return __mtk_pll_recalc_rate(pll, parent_rate, pcw, postdiv);
}
-long mtk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+int mtk_pll_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
{
struct mtk_clk_pll *pll = to_mtk_clk_pll(hw);
u32 pcw = 0;
int postdiv;
- mtk_pll_calc_values(pll, &pcw, &postdiv, rate, *prate);
+ mtk_pll_calc_values(pll, &pcw, &postdiv, req->rate,
+ req->best_parent_rate);
+
+ req->rate = __mtk_pll_recalc_rate(pll, req->best_parent_rate, pcw,
+ postdiv);
- return __mtk_pll_recalc_rate(pll, *prate, pcw, postdiv);
+ return 0;
}
int mtk_pll_prepare(struct clk_hw *hw)
@@ -274,14 +284,43 @@ void mtk_pll_unprepare(struct clk_hw *hw)
writel(r, pll->pwr_addr);
}
+static int mtk_pll_prepare_setclr(struct clk_hw *hw)
+{
+ struct mtk_clk_pll *pll = to_mtk_clk_pll(hw);
+
+ writel(BIT(pll->data->pll_en_bit), pll->en_set_addr);
+
+ /* Wait 20us after enable for the PLL to stabilize */
+ udelay(20);
+
+ return 0;
+}
+
+static void mtk_pll_unprepare_setclr(struct clk_hw *hw)
+{
+ struct mtk_clk_pll *pll = to_mtk_clk_pll(hw);
+
+ writel(BIT(pll->data->pll_en_bit), pll->en_clr_addr);
+}
+
const struct clk_ops mtk_pll_ops = {
.is_prepared = mtk_pll_is_prepared,
.prepare = mtk_pll_prepare,
.unprepare = mtk_pll_unprepare,
.recalc_rate = mtk_pll_recalc_rate,
- .round_rate = mtk_pll_round_rate,
+ .determine_rate = mtk_pll_determine_rate,
+ .set_rate = mtk_pll_set_rate,
+};
+
+const struct clk_ops mtk_pll_fenc_clr_set_ops = {
+ .is_prepared = mtk_pll_fenc_is_prepared,
+ .prepare = mtk_pll_prepare_setclr,
+ .unprepare = mtk_pll_unprepare_setclr,
+ .recalc_rate = mtk_pll_recalc_rate,
+ .determine_rate = mtk_pll_determine_rate,
.set_rate = mtk_pll_set_rate,
};
+EXPORT_SYMBOL_GPL(mtk_pll_fenc_clr_set_ops);
struct clk_hw *mtk_clk_register_pll_ops(struct mtk_clk_pll *pll,
const struct mtk_pll_data *data,
@@ -308,9 +347,15 @@ struct clk_hw *mtk_clk_register_pll_ops(struct mtk_clk_pll *pll,
pll->en_addr = base + data->en_reg;
else
pll->en_addr = pll->base_addr + REG_CON0;
+ if (data->en_set_reg)
+ pll->en_set_addr = base + data->en_set_reg;
+ if (data->en_clr_reg)
+ pll->en_clr_addr = base + data->en_clr_reg;
pll->hw.init = &init;
pll->data = data;
+ pll->fenc_addr = base + data->fenc_sta_ofs;
+
init.name = data->name;
init.flags = (data->flags & PLL_AO) ? CLK_IS_CRITICAL : 0;
init.ops = pll_ops;
@@ -333,12 +378,13 @@ struct clk_hw *mtk_clk_register_pll(const struct mtk_pll_data *data,
{
struct mtk_clk_pll *pll;
struct clk_hw *hw;
+ const struct clk_ops *pll_ops = data->ops ? data->ops : &mtk_pll_ops;
pll = kzalloc(sizeof(*pll), GFP_KERNEL);
if (!pll)
return ERR_PTR(-ENOMEM);
- hw = mtk_clk_register_pll_ops(pll, data, base, &mtk_pll_ops);
+ hw = mtk_clk_register_pll_ops(pll, data, base, pll_ops);
if (IS_ERR(hw))
kfree(pll);
diff --git a/drivers/clk/mediatek/clk-pll.h b/drivers/clk/mediatek/clk-pll.h
index 285c8db958b3..d71c150ce83e 100644
--- a/drivers/clk/mediatek/clk-pll.h
+++ b/drivers/clk/mediatek/clk-pll.h
@@ -29,6 +29,7 @@ struct mtk_pll_data {
u32 reg;
u32 pwr_reg;
u32 en_mask;
+ u32 fenc_sta_ofs;
u32 pd_reg;
u32 tuner_reg;
u32 tuner_en_reg;
@@ -47,8 +48,11 @@ struct mtk_pll_data {
const struct mtk_pll_div_table *div_table;
const char *parent_name;
u32 en_reg;
+ u32 en_set_reg;
+ u32 en_clr_reg;
u8 pll_en_bit; /* Assume 0, indicates BIT(0) by default */
u8 pcw_chg_bit;
+ u8 fenc_sta_bit;
};
/*
@@ -68,6 +72,9 @@ struct mtk_clk_pll {
void __iomem *pcw_addr;
void __iomem *pcw_chg_addr;
void __iomem *en_addr;
+ void __iomem *en_set_addr;
+ void __iomem *en_clr_addr;
+ void __iomem *fenc_addr;
const struct mtk_pll_data *data;
};
@@ -78,6 +85,7 @@ void mtk_clk_unregister_plls(const struct mtk_pll_data *plls, int num_plls,
struct clk_hw_onecell_data *clk_data);
extern const struct clk_ops mtk_pll_ops;
+extern const struct clk_ops mtk_pll_fenc_clr_set_ops;
static inline struct mtk_clk_pll *to_mtk_clk_pll(struct clk_hw *hw)
{
@@ -96,8 +104,7 @@ void mtk_pll_calc_values(struct mtk_clk_pll *pll, u32 *pcw, u32 *postdiv,
u32 freq, u32 fin);
int mtk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate);
-long mtk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate);
+int mtk_pll_determine_rate(struct clk_hw *hw, struct clk_rate_request *req);
struct clk_hw *mtk_clk_register_pll_ops(struct mtk_clk_pll *pll,
const struct mtk_pll_data *data,
diff --git a/drivers/clk/mediatek/clk-pllfh.c b/drivers/clk/mediatek/clk-pllfh.c
index 094ec8a26d66..83630ee07ee9 100644
--- a/drivers/clk/mediatek/clk-pllfh.c
+++ b/drivers/clk/mediatek/clk-pllfh.c
@@ -42,7 +42,7 @@ static const struct clk_ops mtk_pllfh_ops = {
.prepare = mtk_pll_prepare,
.unprepare = mtk_pll_unprepare,
.recalc_rate = mtk_pll_recalc_rate,
- .round_rate = mtk_pll_round_rate,
+ .determine_rate = mtk_pll_determine_rate,
.set_rate = mtk_fhctl_set_rate,
};
diff --git a/drivers/clk/meson/Kconfig b/drivers/clk/meson/Kconfig
index 7197d23543b8..71481607a6d5 100644
--- a/drivers/clk/meson/Kconfig
+++ b/drivers/clk/meson/Kconfig
@@ -36,6 +36,8 @@ config COMMON_CLK_MESON_VCLK
select COMMON_CLK_MESON_REGMAP
config COMMON_CLK_MESON_CLKC_UTILS
+ select REGMAP
+ select MFD_SYSCON
tristate
config COMMON_CLK_MESON_AO_CLKC
@@ -44,11 +46,6 @@ config COMMON_CLK_MESON_AO_CLKC
select COMMON_CLK_MESON_CLKC_UTILS
select RESET_CONTROLLER
-config COMMON_CLK_MESON_EE_CLKC
- tristate
- select COMMON_CLK_MESON_REGMAP
- select COMMON_CLK_MESON_CLKC_UTILS
-
config COMMON_CLK_MESON_CPU_DYNDIV
tristate
select COMMON_CLK_MESON_REGMAP
@@ -73,12 +70,12 @@ config COMMON_CLK_GXBB
depends on ARM64
default ARCH_MESON
select COMMON_CLK_MESON_REGMAP
+ select COMMON_CLK_MESON_CLKC_UTILS
select COMMON_CLK_MESON_DUALDIV
select COMMON_CLK_MESON_VID_PLL_DIV
select COMMON_CLK_MESON_MPLL
select COMMON_CLK_MESON_PLL
select COMMON_CLK_MESON_AO_CLKC
- select COMMON_CLK_MESON_EE_CLKC
select MFD_SYSCON
help
Support for the clock controller on AmLogic S905 devices, aka gxbb.
@@ -89,11 +86,11 @@ config COMMON_CLK_AXG
depends on ARM64
default ARCH_MESON
select COMMON_CLK_MESON_REGMAP
+ select COMMON_CLK_MESON_CLKC_UTILS
select COMMON_CLK_MESON_DUALDIV
select COMMON_CLK_MESON_MPLL
select COMMON_CLK_MESON_PLL
select COMMON_CLK_MESON_AO_CLKC
- select COMMON_CLK_MESON_EE_CLKC
select MFD_SYSCON
help
Support for the clock controller on AmLogic A113D devices, aka axg.
@@ -167,11 +164,11 @@ config COMMON_CLK_G12A
depends on ARM64
default ARCH_MESON
select COMMON_CLK_MESON_REGMAP
+ select COMMON_CLK_MESON_CLKC_UTILS
select COMMON_CLK_MESON_DUALDIV
select COMMON_CLK_MESON_MPLL
select COMMON_CLK_MESON_PLL
select COMMON_CLK_MESON_AO_CLKC
- select COMMON_CLK_MESON_EE_CLKC
select COMMON_CLK_MESON_CPU_DYNDIV
select COMMON_CLK_MESON_VID_PLL_DIV
select COMMON_CLK_MESON_VCLK
diff --git a/drivers/clk/meson/Makefile b/drivers/clk/meson/Makefile
index bc56a47931c1..c6998e752c68 100644
--- a/drivers/clk/meson/Makefile
+++ b/drivers/clk/meson/Makefile
@@ -5,7 +5,6 @@ obj-$(CONFIG_COMMON_CLK_MESON_CLKC_UTILS) += meson-clkc-utils.o
obj-$(CONFIG_COMMON_CLK_MESON_AO_CLKC) += meson-aoclk.o
obj-$(CONFIG_COMMON_CLK_MESON_CPU_DYNDIV) += clk-cpu-dyndiv.o
obj-$(CONFIG_COMMON_CLK_MESON_DUALDIV) += clk-dualdiv.o
-obj-$(CONFIG_COMMON_CLK_MESON_EE_CLKC) += meson-eeclk.o
obj-$(CONFIG_COMMON_CLK_MESON_MPLL) += clk-mpll.o
obj-$(CONFIG_COMMON_CLK_MESON_PHASE) += clk-phase.o
obj-$(CONFIG_COMMON_CLK_MESON_PLL) += clk-pll.o
diff --git a/drivers/clk/meson/a1-peripherals.c b/drivers/clk/meson/a1-peripherals.c
index 1f5d445d44fe..5e0d58c01405 100644
--- a/drivers/clk/meson/a1-peripherals.c
+++ b/drivers/clk/meson/a1-peripherals.c
@@ -46,7 +46,7 @@
#define PSRAM_CLK_CTRL 0xf4
#define DMC_CLK_CTRL 0xf8
-static struct clk_regmap xtal_in = {
+static struct clk_regmap a1_xtal_in = {
.data = &(struct clk_regmap_gate_data){
.offset = SYS_OSCIN_CTRL,
.bit_idx = 0,
@@ -61,7 +61,7 @@ static struct clk_regmap xtal_in = {
},
};
-static struct clk_regmap fixpll_in = {
+static struct clk_regmap a1_fixpll_in = {
.data = &(struct clk_regmap_gate_data){
.offset = SYS_OSCIN_CTRL,
.bit_idx = 1,
@@ -76,7 +76,7 @@ static struct clk_regmap fixpll_in = {
},
};
-static struct clk_regmap usb_phy_in = {
+static struct clk_regmap a1_usb_phy_in = {
.data = &(struct clk_regmap_gate_data){
.offset = SYS_OSCIN_CTRL,
.bit_idx = 2,
@@ -91,7 +91,7 @@ static struct clk_regmap usb_phy_in = {
},
};
-static struct clk_regmap usb_ctrl_in = {
+static struct clk_regmap a1_usb_ctrl_in = {
.data = &(struct clk_regmap_gate_data){
.offset = SYS_OSCIN_CTRL,
.bit_idx = 3,
@@ -106,7 +106,7 @@ static struct clk_regmap usb_ctrl_in = {
},
};
-static struct clk_regmap hifipll_in = {
+static struct clk_regmap a1_hifipll_in = {
.data = &(struct clk_regmap_gate_data){
.offset = SYS_OSCIN_CTRL,
.bit_idx = 4,
@@ -121,7 +121,7 @@ static struct clk_regmap hifipll_in = {
},
};
-static struct clk_regmap syspll_in = {
+static struct clk_regmap a1_syspll_in = {
.data = &(struct clk_regmap_gate_data){
.offset = SYS_OSCIN_CTRL,
.bit_idx = 5,
@@ -136,7 +136,7 @@ static struct clk_regmap syspll_in = {
},
};
-static struct clk_regmap dds_in = {
+static struct clk_regmap a1_dds_in = {
.data = &(struct clk_regmap_gate_data){
.offset = SYS_OSCIN_CTRL,
.bit_idx = 6,
@@ -151,7 +151,7 @@ static struct clk_regmap dds_in = {
},
};
-static struct clk_regmap rtc_32k_in = {
+static struct clk_regmap a1_rtc_32k_in = {
.data = &(struct clk_regmap_gate_data){
.offset = RTC_BY_OSCIN_CTRL0,
.bit_idx = 31,
@@ -166,7 +166,7 @@ static struct clk_regmap rtc_32k_in = {
},
};
-static const struct meson_clk_dualdiv_param clk_32k_div_table[] = {
+static const struct meson_clk_dualdiv_param a1_32k_div_table[] = {
{
.dual = 1,
.n1 = 733,
@@ -177,7 +177,7 @@ static const struct meson_clk_dualdiv_param clk_32k_div_table[] = {
{}
};
-static struct clk_regmap rtc_32k_div = {
+static struct clk_regmap a1_rtc_32k_div = {
.data = &(struct meson_clk_dualdiv_data){
.n1 = {
.reg_off = RTC_BY_OSCIN_CTRL0,
@@ -204,19 +204,19 @@ static struct clk_regmap rtc_32k_div = {
.shift = 28,
.width = 1,
},
- .table = clk_32k_div_table,
+ .table = a1_32k_div_table,
},
.hw.init = &(struct clk_init_data){
.name = "rtc_32k_div",
.ops = &meson_clk_dualdiv_ops,
.parent_hws = (const struct clk_hw *[]) {
- &rtc_32k_in.hw
+ &a1_rtc_32k_in.hw
},
.num_parents = 1,
},
};
-static struct clk_regmap rtc_32k_xtal = {
+static struct clk_regmap a1_rtc_32k_xtal = {
.data = &(struct clk_regmap_gate_data){
.offset = RTC_BY_OSCIN_CTRL1,
.bit_idx = 24,
@@ -225,13 +225,13 @@ static struct clk_regmap rtc_32k_xtal = {
.name = "rtc_32k_xtal",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &rtc_32k_in.hw
+ &a1_rtc_32k_in.hw
},
.num_parents = 1,
},
};
-static struct clk_regmap rtc_32k_sel = {
+static struct clk_regmap a1_rtc_32k_sel = {
.data = &(struct clk_regmap_mux_data) {
.offset = RTC_CTRL,
.mask = 0x3,
@@ -242,15 +242,15 @@ static struct clk_regmap rtc_32k_sel = {
.name = "rtc_32k_sel",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &rtc_32k_xtal.hw,
- &rtc_32k_div.hw,
+ &a1_rtc_32k_xtal.hw,
+ &a1_rtc_32k_div.hw,
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap rtc = {
+static struct clk_regmap a1_rtc = {
.data = &(struct clk_regmap_gate_data){
.offset = RTC_BY_OSCIN_CTRL0,
.bit_idx = 30,
@@ -259,38 +259,38 @@ static struct clk_regmap rtc = {
.name = "rtc",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &rtc_32k_sel.hw
+ &a1_rtc_32k_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static u32 mux_table_sys[] = { 0, 1, 2, 3, 7 };
-static const struct clk_parent_data sys_parents[] = {
+static u32 a1_sys_parents_val_table[] = { 0, 1, 2, 3, 7 };
+static const struct clk_parent_data a1_sys_parents[] = {
{ .fw_name = "xtal" },
{ .fw_name = "fclk_div2" },
{ .fw_name = "fclk_div3" },
{ .fw_name = "fclk_div5" },
- { .hw = &rtc.hw },
+ { .hw = &a1_rtc.hw },
};
-static struct clk_regmap sys_b_sel = {
+static struct clk_regmap a1_sys_b_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = SYS_CLK_CTRL0,
.mask = 0x7,
.shift = 26,
- .table = mux_table_sys,
+ .table = a1_sys_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "sys_b_sel",
.ops = &clk_regmap_mux_ro_ops,
- .parent_data = sys_parents,
- .num_parents = ARRAY_SIZE(sys_parents),
+ .parent_data = a1_sys_parents,
+ .num_parents = ARRAY_SIZE(a1_sys_parents),
},
};
-static struct clk_regmap sys_b_div = {
+static struct clk_regmap a1_sys_b_div = {
.data = &(struct clk_regmap_div_data){
.offset = SYS_CLK_CTRL0,
.shift = 16,
@@ -300,14 +300,14 @@ static struct clk_regmap sys_b_div = {
.name = "sys_b_div",
.ops = &clk_regmap_divider_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &sys_b_sel.hw
+ &a1_sys_b_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap sys_b = {
+static struct clk_regmap a1_sys_b = {
.data = &(struct clk_regmap_gate_data){
.offset = SYS_CLK_CTRL0,
.bit_idx = 29,
@@ -316,29 +316,29 @@ static struct clk_regmap sys_b = {
.name = "sys_b",
.ops = &clk_regmap_gate_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &sys_b_div.hw
+ &a1_sys_b_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap sys_a_sel = {
+static struct clk_regmap a1_sys_a_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = SYS_CLK_CTRL0,
.mask = 0x7,
.shift = 10,
- .table = mux_table_sys,
+ .table = a1_sys_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "sys_a_sel",
.ops = &clk_regmap_mux_ro_ops,
- .parent_data = sys_parents,
- .num_parents = ARRAY_SIZE(sys_parents),
+ .parent_data = a1_sys_parents,
+ .num_parents = ARRAY_SIZE(a1_sys_parents),
},
};
-static struct clk_regmap sys_a_div = {
+static struct clk_regmap a1_sys_a_div = {
.data = &(struct clk_regmap_div_data){
.offset = SYS_CLK_CTRL0,
.shift = 0,
@@ -348,14 +348,14 @@ static struct clk_regmap sys_a_div = {
.name = "sys_a_div",
.ops = &clk_regmap_divider_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &sys_a_sel.hw
+ &a1_sys_a_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap sys_a = {
+static struct clk_regmap a1_sys_a = {
.data = &(struct clk_regmap_gate_data){
.offset = SYS_CLK_CTRL0,
.bit_idx = 13,
@@ -364,14 +364,14 @@ static struct clk_regmap sys_a = {
.name = "sys_a",
.ops = &clk_regmap_gate_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &sys_a_div.hw
+ &a1_sys_a_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap sys = {
+static struct clk_regmap a1_sys = {
.data = &(struct clk_regmap_mux_data){
.offset = SYS_CLK_CTRL0,
.mask = 0x1,
@@ -381,8 +381,8 @@ static struct clk_regmap sys = {
.name = "sys",
.ops = &clk_regmap_mux_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &sys_a.hw,
- &sys_b.hw,
+ &a1_sys_a.hw,
+ &a1_sys_b.hw,
},
.num_parents = 2,
/*
@@ -398,32 +398,32 @@ static struct clk_regmap sys = {
},
};
-static u32 mux_table_dsp_ab[] = { 0, 1, 2, 3, 4, 7 };
-static const struct clk_parent_data dsp_ab_parent_data[] = {
+static u32 a1_dsp_parents_val_table[] = { 0, 1, 2, 3, 4, 7 };
+static const struct clk_parent_data a1_dsp_parents[] = {
{ .fw_name = "xtal", },
{ .fw_name = "fclk_div2", },
{ .fw_name = "fclk_div3", },
{ .fw_name = "fclk_div5", },
{ .fw_name = "hifi_pll", },
- { .hw = &rtc.hw },
+ { .hw = &a1_rtc.hw },
};
-static struct clk_regmap dspa_a_sel = {
+static struct clk_regmap a1_dspa_a_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = DSPA_CLK_CTRL0,
.mask = 0x7,
.shift = 10,
- .table = mux_table_dsp_ab,
+ .table = a1_dsp_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "dspa_a_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = dsp_ab_parent_data,
- .num_parents = ARRAY_SIZE(dsp_ab_parent_data),
+ .parent_data = a1_dsp_parents,
+ .num_parents = ARRAY_SIZE(a1_dsp_parents),
},
};
-static struct clk_regmap dspa_a_div = {
+static struct clk_regmap a1_dspa_a_div = {
.data = &(struct clk_regmap_div_data){
.offset = DSPA_CLK_CTRL0,
.shift = 0,
@@ -433,14 +433,14 @@ static struct clk_regmap dspa_a_div = {
.name = "dspa_a_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dspa_a_sel.hw
+ &a1_dspa_a_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap dspa_a = {
+static struct clk_regmap a1_dspa_a = {
.data = &(struct clk_regmap_gate_data){
.offset = DSPA_CLK_CTRL0,
.bit_idx = 13,
@@ -449,29 +449,29 @@ static struct clk_regmap dspa_a = {
.name = "dspa_a",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dspa_a_div.hw
+ &a1_dspa_a_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap dspa_b_sel = {
+static struct clk_regmap a1_dspa_b_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = DSPA_CLK_CTRL0,
.mask = 0x7,
.shift = 26,
- .table = mux_table_dsp_ab,
+ .table = a1_dsp_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "dspa_b_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = dsp_ab_parent_data,
- .num_parents = ARRAY_SIZE(dsp_ab_parent_data),
+ .parent_data = a1_dsp_parents,
+ .num_parents = ARRAY_SIZE(a1_dsp_parents),
},
};
-static struct clk_regmap dspa_b_div = {
+static struct clk_regmap a1_dspa_b_div = {
.data = &(struct clk_regmap_div_data){
.offset = DSPA_CLK_CTRL0,
.shift = 16,
@@ -481,14 +481,14 @@ static struct clk_regmap dspa_b_div = {
.name = "dspa_b_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dspa_b_sel.hw
+ &a1_dspa_b_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap dspa_b = {
+static struct clk_regmap a1_dspa_b = {
.data = &(struct clk_regmap_gate_data){
.offset = DSPA_CLK_CTRL0,
.bit_idx = 29,
@@ -497,14 +497,14 @@ static struct clk_regmap dspa_b = {
.name = "dspa_b",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dspa_b_div.hw
+ &a1_dspa_b_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap dspa_sel = {
+static struct clk_regmap a1_dspa_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = DSPA_CLK_CTRL0,
.mask = 0x1,
@@ -514,15 +514,15 @@ static struct clk_regmap dspa_sel = {
.name = "dspa_sel",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dspa_a.hw,
- &dspa_b.hw,
+ &a1_dspa_a.hw,
+ &a1_dspa_b.hw,
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap dspa_en = {
+static struct clk_regmap a1_dspa_en = {
.data = &(struct clk_regmap_gate_data){
.offset = DSPA_CLK_EN,
.bit_idx = 1,
@@ -531,14 +531,14 @@ static struct clk_regmap dspa_en = {
.name = "dspa_en",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dspa_sel.hw
+ &a1_dspa_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap dspa_en_nic = {
+static struct clk_regmap a1_dspa_en_nic = {
.data = &(struct clk_regmap_gate_data){
.offset = DSPA_CLK_EN,
.bit_idx = 0,
@@ -547,29 +547,29 @@ static struct clk_regmap dspa_en_nic = {
.name = "dspa_en_nic",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dspa_sel.hw
+ &a1_dspa_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap dspb_a_sel = {
+static struct clk_regmap a1_dspb_a_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = DSPB_CLK_CTRL0,
.mask = 0x7,
.shift = 10,
- .table = mux_table_dsp_ab,
+ .table = a1_dsp_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "dspb_a_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = dsp_ab_parent_data,
- .num_parents = ARRAY_SIZE(dsp_ab_parent_data),
+ .parent_data = a1_dsp_parents,
+ .num_parents = ARRAY_SIZE(a1_dsp_parents),
},
};
-static struct clk_regmap dspb_a_div = {
+static struct clk_regmap a1_dspb_a_div = {
.data = &(struct clk_regmap_div_data){
.offset = DSPB_CLK_CTRL0,
.shift = 0,
@@ -579,14 +579,14 @@ static struct clk_regmap dspb_a_div = {
.name = "dspb_a_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dspb_a_sel.hw
+ &a1_dspb_a_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap dspb_a = {
+static struct clk_regmap a1_dspb_a = {
.data = &(struct clk_regmap_gate_data){
.offset = DSPB_CLK_CTRL0,
.bit_idx = 13,
@@ -595,29 +595,29 @@ static struct clk_regmap dspb_a = {
.name = "dspb_a",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dspb_a_div.hw
+ &a1_dspb_a_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap dspb_b_sel = {
+static struct clk_regmap a1_dspb_b_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = DSPB_CLK_CTRL0,
.mask = 0x7,
.shift = 26,
- .table = mux_table_dsp_ab,
+ .table = a1_dsp_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "dspb_b_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = dsp_ab_parent_data,
- .num_parents = ARRAY_SIZE(dsp_ab_parent_data),
+ .parent_data = a1_dsp_parents,
+ .num_parents = ARRAY_SIZE(a1_dsp_parents),
},
};
-static struct clk_regmap dspb_b_div = {
+static struct clk_regmap a1_dspb_b_div = {
.data = &(struct clk_regmap_div_data){
.offset = DSPB_CLK_CTRL0,
.shift = 16,
@@ -627,14 +627,14 @@ static struct clk_regmap dspb_b_div = {
.name = "dspb_b_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dspb_b_sel.hw
+ &a1_dspb_b_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap dspb_b = {
+static struct clk_regmap a1_dspb_b = {
.data = &(struct clk_regmap_gate_data){
.offset = DSPB_CLK_CTRL0,
.bit_idx = 29,
@@ -643,14 +643,14 @@ static struct clk_regmap dspb_b = {
.name = "dspb_b",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dspb_b_div.hw
+ &a1_dspb_b_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap dspb_sel = {
+static struct clk_regmap a1_dspb_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = DSPB_CLK_CTRL0,
.mask = 0x1,
@@ -660,15 +660,15 @@ static struct clk_regmap dspb_sel = {
.name = "dspb_sel",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dspb_a.hw,
- &dspb_b.hw,
+ &a1_dspb_a.hw,
+ &a1_dspb_b.hw,
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap dspb_en = {
+static struct clk_regmap a1_dspb_en = {
.data = &(struct clk_regmap_gate_data){
.offset = DSPB_CLK_EN,
.bit_idx = 1,
@@ -677,14 +677,14 @@ static struct clk_regmap dspb_en = {
.name = "dspb_en",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dspb_sel.hw
+ &a1_dspb_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap dspb_en_nic = {
+static struct clk_regmap a1_dspb_en_nic = {
.data = &(struct clk_regmap_gate_data){
.offset = DSPB_CLK_EN,
.bit_idx = 0,
@@ -693,14 +693,14 @@ static struct clk_regmap dspb_en_nic = {
.name = "dspb_en_nic",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dspb_sel.hw
+ &a1_dspb_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap clk_24m = {
+static struct clk_regmap a1_24m = {
.data = &(struct clk_regmap_gate_data){
.offset = CLK12_24_CTRL,
.bit_idx = 11,
@@ -715,20 +715,20 @@ static struct clk_regmap clk_24m = {
},
};
-static struct clk_fixed_factor clk_24m_div2 = {
+static struct clk_fixed_factor a1_24m_div2 = {
.mult = 1,
.div = 2,
.hw.init = &(struct clk_init_data){
.name = "24m_div2",
.ops = &clk_fixed_factor_ops,
.parent_hws = (const struct clk_hw *[]) {
- &clk_24m.hw
+ &a1_24m.hw
},
.num_parents = 1,
},
};
-static struct clk_regmap clk_12m = {
+static struct clk_regmap a1_12m = {
.data = &(struct clk_regmap_gate_data){
.offset = CLK12_24_CTRL,
.bit_idx = 10,
@@ -737,13 +737,13 @@ static struct clk_regmap clk_12m = {
.name = "12m",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &clk_24m_div2.hw
+ &a1_24m_div2.hw
},
.num_parents = 1,
},
};
-static struct clk_regmap fclk_div2_divn_pre = {
+static struct clk_regmap a1_fclk_div2_divn_pre = {
.data = &(struct clk_regmap_div_data){
.offset = CLK12_24_CTRL,
.shift = 0,
@@ -759,7 +759,7 @@ static struct clk_regmap fclk_div2_divn_pre = {
},
};
-static struct clk_regmap fclk_div2_divn = {
+static struct clk_regmap a1_fclk_div2_divn = {
.data = &(struct clk_regmap_gate_data){
.offset = CLK12_24_CTRL,
.bit_idx = 12,
@@ -768,7 +768,7 @@ static struct clk_regmap fclk_div2_divn = {
.name = "fclk_div2_divn",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fclk_div2_divn_pre.hw
+ &a1_fclk_div2_divn_pre.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -779,10 +779,10 @@ static struct clk_regmap fclk_div2_divn = {
* the index 2 is sys_pll_div16, it will be implemented in the CPU clock driver,
* the index 4 is the clock measurement source, it's not supported yet
*/
-static u32 gen_table[] = { 0, 1, 3, 5, 6, 7, 8 };
-static const struct clk_parent_data gen_parent_data[] = {
+static u32 a1_gen_parents_val_table[] = { 0, 1, 3, 5, 6, 7, 8 };
+static const struct clk_parent_data a1_gen_parents[] = {
{ .fw_name = "xtal", },
- { .hw = &rtc.hw },
+ { .hw = &a1_rtc.hw },
{ .fw_name = "hifi_pll", },
{ .fw_name = "fclk_div2", },
{ .fw_name = "fclk_div3", },
@@ -790,18 +790,18 @@ static const struct clk_parent_data gen_parent_data[] = {
{ .fw_name = "fclk_div7", },
};
-static struct clk_regmap gen_sel = {
+static struct clk_regmap a1_gen_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = GEN_CLK_CTRL,
.mask = 0xf,
.shift = 12,
- .table = gen_table,
+ .table = a1_gen_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "gen_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = gen_parent_data,
- .num_parents = ARRAY_SIZE(gen_parent_data),
+ .parent_data = a1_gen_parents,
+ .num_parents = ARRAY_SIZE(a1_gen_parents),
/*
* The GEN clock can be connected to an external pad, so it
* may be set up directly from the device tree. Additionally,
@@ -813,7 +813,7 @@ static struct clk_regmap gen_sel = {
},
};
-static struct clk_regmap gen_div = {
+static struct clk_regmap a1_gen_div = {
.data = &(struct clk_regmap_div_data){
.offset = GEN_CLK_CTRL,
.shift = 0,
@@ -823,14 +823,14 @@ static struct clk_regmap gen_div = {
.name = "gen_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &gen_sel.hw
+ &a1_gen_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap gen = {
+static struct clk_regmap a1_gen = {
.data = &(struct clk_regmap_gate_data){
.offset = GEN_CLK_CTRL,
.bit_idx = 11,
@@ -839,14 +839,14 @@ static struct clk_regmap gen = {
.name = "gen",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &gen_div.hw
+ &a1_gen_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap saradc_sel = {
+static struct clk_regmap a1_saradc_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = SAR_ADC_CLK_CTRL,
.mask = 0x1,
@@ -857,13 +857,13 @@ static struct clk_regmap saradc_sel = {
.ops = &clk_regmap_mux_ops,
.parent_data = (const struct clk_parent_data []) {
{ .fw_name = "xtal", },
- { .hw = &sys.hw, },
+ { .hw = &a1_sys.hw, },
},
.num_parents = 2,
},
};
-static struct clk_regmap saradc_div = {
+static struct clk_regmap a1_saradc_div = {
.data = &(struct clk_regmap_div_data){
.offset = SAR_ADC_CLK_CTRL,
.shift = 0,
@@ -873,14 +873,14 @@ static struct clk_regmap saradc_div = {
.name = "saradc_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &saradc_sel.hw
+ &a1_saradc_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap saradc = {
+static struct clk_regmap a1_saradc = {
.data = &(struct clk_regmap_gate_data){
.offset = SAR_ADC_CLK_CTRL,
.bit_idx = 8,
@@ -889,20 +889,20 @@ static struct clk_regmap saradc = {
.name = "saradc",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &saradc_div.hw
+ &a1_saradc_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static const struct clk_parent_data pwm_abcd_parents[] = {
+static const struct clk_parent_data a1_pwm_abcd_parents[] = {
{ .fw_name = "xtal", },
- { .hw = &sys.hw },
- { .hw = &rtc.hw },
+ { .hw = &a1_sys.hw },
+ { .hw = &a1_rtc.hw },
};
-static struct clk_regmap pwm_a_sel = {
+static struct clk_regmap a1_pwm_a_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = PWM_CLK_AB_CTRL,
.mask = 0x1,
@@ -911,12 +911,12 @@ static struct clk_regmap pwm_a_sel = {
.hw.init = &(struct clk_init_data){
.name = "pwm_a_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = pwm_abcd_parents,
- .num_parents = ARRAY_SIZE(pwm_abcd_parents),
+ .parent_data = a1_pwm_abcd_parents,
+ .num_parents = ARRAY_SIZE(a1_pwm_abcd_parents),
},
};
-static struct clk_regmap pwm_a_div = {
+static struct clk_regmap a1_pwm_a_div = {
.data = &(struct clk_regmap_div_data){
.offset = PWM_CLK_AB_CTRL,
.shift = 0,
@@ -926,14 +926,14 @@ static struct clk_regmap pwm_a_div = {
.name = "pwm_a_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &pwm_a_sel.hw
+ &a1_pwm_a_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap pwm_a = {
+static struct clk_regmap a1_pwm_a = {
.data = &(struct clk_regmap_gate_data){
.offset = PWM_CLK_AB_CTRL,
.bit_idx = 8,
@@ -942,14 +942,14 @@ static struct clk_regmap pwm_a = {
.name = "pwm_a",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &pwm_a_div.hw
+ &a1_pwm_a_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap pwm_b_sel = {
+static struct clk_regmap a1_pwm_b_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = PWM_CLK_AB_CTRL,
.mask = 0x1,
@@ -958,12 +958,12 @@ static struct clk_regmap pwm_b_sel = {
.hw.init = &(struct clk_init_data){
.name = "pwm_b_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = pwm_abcd_parents,
- .num_parents = ARRAY_SIZE(pwm_abcd_parents),
+ .parent_data = a1_pwm_abcd_parents,
+ .num_parents = ARRAY_SIZE(a1_pwm_abcd_parents),
},
};
-static struct clk_regmap pwm_b_div = {
+static struct clk_regmap a1_pwm_b_div = {
.data = &(struct clk_regmap_div_data){
.offset = PWM_CLK_AB_CTRL,
.shift = 16,
@@ -973,14 +973,14 @@ static struct clk_regmap pwm_b_div = {
.name = "pwm_b_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &pwm_b_sel.hw
+ &a1_pwm_b_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap pwm_b = {
+static struct clk_regmap a1_pwm_b = {
.data = &(struct clk_regmap_gate_data){
.offset = PWM_CLK_AB_CTRL,
.bit_idx = 24,
@@ -989,14 +989,14 @@ static struct clk_regmap pwm_b = {
.name = "pwm_b",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &pwm_b_div.hw
+ &a1_pwm_b_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap pwm_c_sel = {
+static struct clk_regmap a1_pwm_c_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = PWM_CLK_CD_CTRL,
.mask = 0x1,
@@ -1005,12 +1005,12 @@ static struct clk_regmap pwm_c_sel = {
.hw.init = &(struct clk_init_data){
.name = "pwm_c_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = pwm_abcd_parents,
- .num_parents = ARRAY_SIZE(pwm_abcd_parents),
+ .parent_data = a1_pwm_abcd_parents,
+ .num_parents = ARRAY_SIZE(a1_pwm_abcd_parents),
},
};
-static struct clk_regmap pwm_c_div = {
+static struct clk_regmap a1_pwm_c_div = {
.data = &(struct clk_regmap_div_data){
.offset = PWM_CLK_CD_CTRL,
.shift = 0,
@@ -1020,14 +1020,14 @@ static struct clk_regmap pwm_c_div = {
.name = "pwm_c_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &pwm_c_sel.hw
+ &a1_pwm_c_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap pwm_c = {
+static struct clk_regmap a1_pwm_c = {
.data = &(struct clk_regmap_gate_data){
.offset = PWM_CLK_CD_CTRL,
.bit_idx = 8,
@@ -1036,14 +1036,14 @@ static struct clk_regmap pwm_c = {
.name = "pwm_c",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &pwm_c_div.hw
+ &a1_pwm_c_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap pwm_d_sel = {
+static struct clk_regmap a1_pwm_d_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = PWM_CLK_CD_CTRL,
.mask = 0x1,
@@ -1052,12 +1052,12 @@ static struct clk_regmap pwm_d_sel = {
.hw.init = &(struct clk_init_data){
.name = "pwm_d_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = pwm_abcd_parents,
- .num_parents = ARRAY_SIZE(pwm_abcd_parents),
+ .parent_data = a1_pwm_abcd_parents,
+ .num_parents = ARRAY_SIZE(a1_pwm_abcd_parents),
},
};
-static struct clk_regmap pwm_d_div = {
+static struct clk_regmap a1_pwm_d_div = {
.data = &(struct clk_regmap_div_data){
.offset = PWM_CLK_CD_CTRL,
.shift = 16,
@@ -1067,14 +1067,14 @@ static struct clk_regmap pwm_d_div = {
.name = "pwm_d_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &pwm_d_sel.hw
+ &a1_pwm_d_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap pwm_d = {
+static struct clk_regmap a1_pwm_d = {
.data = &(struct clk_regmap_gate_data){
.offset = PWM_CLK_CD_CTRL,
.bit_idx = 24,
@@ -1083,21 +1083,21 @@ static struct clk_regmap pwm_d = {
.name = "pwm_d",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &pwm_d_div.hw
+ &a1_pwm_d_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static const struct clk_parent_data pwm_ef_parents[] = {
+static const struct clk_parent_data a1_pwm_ef_parents[] = {
{ .fw_name = "xtal", },
- { .hw = &sys.hw },
+ { .hw = &a1_sys.hw },
{ .fw_name = "fclk_div5", },
- { .hw = &rtc.hw },
+ { .hw = &a1_rtc.hw },
};
-static struct clk_regmap pwm_e_sel = {
+static struct clk_regmap a1_pwm_e_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = PWM_CLK_EF_CTRL,
.mask = 0x3,
@@ -1106,12 +1106,12 @@ static struct clk_regmap pwm_e_sel = {
.hw.init = &(struct clk_init_data){
.name = "pwm_e_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = pwm_ef_parents,
- .num_parents = ARRAY_SIZE(pwm_ef_parents),
+ .parent_data = a1_pwm_ef_parents,
+ .num_parents = ARRAY_SIZE(a1_pwm_ef_parents),
},
};
-static struct clk_regmap pwm_e_div = {
+static struct clk_regmap a1_pwm_e_div = {
.data = &(struct clk_regmap_div_data){
.offset = PWM_CLK_EF_CTRL,
.shift = 0,
@@ -1121,14 +1121,14 @@ static struct clk_regmap pwm_e_div = {
.name = "pwm_e_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &pwm_e_sel.hw
+ &a1_pwm_e_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap pwm_e = {
+static struct clk_regmap a1_pwm_e = {
.data = &(struct clk_regmap_gate_data){
.offset = PWM_CLK_EF_CTRL,
.bit_idx = 8,
@@ -1137,14 +1137,14 @@ static struct clk_regmap pwm_e = {
.name = "pwm_e",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &pwm_e_div.hw
+ &a1_pwm_e_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap pwm_f_sel = {
+static struct clk_regmap a1_pwm_f_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = PWM_CLK_EF_CTRL,
.mask = 0x3,
@@ -1153,12 +1153,12 @@ static struct clk_regmap pwm_f_sel = {
.hw.init = &(struct clk_init_data){
.name = "pwm_f_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = pwm_ef_parents,
- .num_parents = ARRAY_SIZE(pwm_ef_parents),
+ .parent_data = a1_pwm_ef_parents,
+ .num_parents = ARRAY_SIZE(a1_pwm_ef_parents),
},
};
-static struct clk_regmap pwm_f_div = {
+static struct clk_regmap a1_pwm_f_div = {
.data = &(struct clk_regmap_div_data){
.offset = PWM_CLK_EF_CTRL,
.shift = 16,
@@ -1168,14 +1168,14 @@ static struct clk_regmap pwm_f_div = {
.name = "pwm_f_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &pwm_f_sel.hw
+ &a1_pwm_f_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap pwm_f = {
+static struct clk_regmap a1_pwm_f = {
.data = &(struct clk_regmap_gate_data){
.offset = PWM_CLK_EF_CTRL,
.bit_idx = 24,
@@ -1184,7 +1184,7 @@ static struct clk_regmap pwm_f = {
.name = "pwm_f",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &pwm_f_div.hw
+ &a1_pwm_f_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1200,14 +1200,14 @@ static struct clk_regmap pwm_f = {
* --------------------|/
* 24M
*/
-static const struct clk_parent_data spicc_spifc_parents[] = {
+static const struct clk_parent_data a1_spi_parents[] = {
{ .fw_name = "fclk_div2"},
{ .fw_name = "fclk_div3"},
{ .fw_name = "fclk_div5"},
{ .fw_name = "hifi_pll" },
};
-static struct clk_regmap spicc_sel = {
+static struct clk_regmap a1_spicc_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = SPICC_CLK_CTRL,
.mask = 0x3,
@@ -1216,12 +1216,12 @@ static struct clk_regmap spicc_sel = {
.hw.init = &(struct clk_init_data){
.name = "spicc_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = spicc_spifc_parents,
- .num_parents = ARRAY_SIZE(spicc_spifc_parents),
+ .parent_data = a1_spi_parents,
+ .num_parents = ARRAY_SIZE(a1_spi_parents),
},
};
-static struct clk_regmap spicc_div = {
+static struct clk_regmap a1_spicc_div = {
.data = &(struct clk_regmap_div_data){
.offset = SPICC_CLK_CTRL,
.shift = 0,
@@ -1231,14 +1231,14 @@ static struct clk_regmap spicc_div = {
.name = "spicc_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &spicc_sel.hw
+ &a1_spicc_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap spicc_sel2 = {
+static struct clk_regmap a1_spicc_sel2 = {
.data = &(struct clk_regmap_mux_data){
.offset = SPICC_CLK_CTRL,
.mask = 0x1,
@@ -1248,7 +1248,7 @@ static struct clk_regmap spicc_sel2 = {
.name = "spicc_sel2",
.ops = &clk_regmap_mux_ops,
.parent_data = (const struct clk_parent_data []) {
- { .hw = &spicc_div.hw },
+ { .hw = &a1_spicc_div.hw },
{ .fw_name = "xtal", },
},
.num_parents = 2,
@@ -1256,7 +1256,7 @@ static struct clk_regmap spicc_sel2 = {
},
};
-static struct clk_regmap spicc = {
+static struct clk_regmap a1_spicc = {
.data = &(struct clk_regmap_gate_data){
.offset = SPICC_CLK_CTRL,
.bit_idx = 8,
@@ -1265,14 +1265,14 @@ static struct clk_regmap spicc = {
.name = "spicc",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &spicc_sel2.hw
+ &a1_spicc_sel2.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap ts_div = {
+static struct clk_regmap a1_ts_div = {
.data = &(struct clk_regmap_div_data){
.offset = TS_CLK_CTRL,
.shift = 0,
@@ -1288,7 +1288,7 @@ static struct clk_regmap ts_div = {
},
};
-static struct clk_regmap ts = {
+static struct clk_regmap a1_ts = {
.data = &(struct clk_regmap_gate_data){
.offset = TS_CLK_CTRL,
.bit_idx = 8,
@@ -1297,14 +1297,14 @@ static struct clk_regmap ts = {
.name = "ts",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &ts_div.hw
+ &a1_ts_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap spifc_sel = {
+static struct clk_regmap a1_spifc_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = SPIFC_CLK_CTRL,
.mask = 0x3,
@@ -1313,12 +1313,12 @@ static struct clk_regmap spifc_sel = {
.hw.init = &(struct clk_init_data){
.name = "spifc_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = spicc_spifc_parents,
- .num_parents = ARRAY_SIZE(spicc_spifc_parents),
+ .parent_data = a1_spi_parents,
+ .num_parents = ARRAY_SIZE(a1_spi_parents),
},
};
-static struct clk_regmap spifc_div = {
+static struct clk_regmap a1_spifc_div = {
.data = &(struct clk_regmap_div_data){
.offset = SPIFC_CLK_CTRL,
.shift = 0,
@@ -1328,14 +1328,14 @@ static struct clk_regmap spifc_div = {
.name = "spifc_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &spifc_sel.hw
+ &a1_spifc_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap spifc_sel2 = {
+static struct clk_regmap a1_spifc_sel2 = {
.data = &(struct clk_regmap_mux_data){
.offset = SPIFC_CLK_CTRL,
.mask = 0x1,
@@ -1345,7 +1345,7 @@ static struct clk_regmap spifc_sel2 = {
.name = "spifc_sel2",
.ops = &clk_regmap_mux_ops,
.parent_data = (const struct clk_parent_data []) {
- { .hw = &spifc_div.hw },
+ { .hw = &a1_spifc_div.hw },
{ .fw_name = "xtal", },
},
.num_parents = 2,
@@ -1353,7 +1353,7 @@ static struct clk_regmap spifc_sel2 = {
},
};
-static struct clk_regmap spifc = {
+static struct clk_regmap a1_spifc = {
.data = &(struct clk_regmap_gate_data){
.offset = SPIFC_CLK_CTRL,
.bit_idx = 8,
@@ -1362,21 +1362,21 @@ static struct clk_regmap spifc = {
.name = "spifc",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &spifc_sel2.hw
+ &a1_spifc_sel2.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static const struct clk_parent_data usb_bus_parents[] = {
+static const struct clk_parent_data a1_usb_bus_parents[] = {
{ .fw_name = "xtal", },
- { .hw = &sys.hw },
+ { .hw = &a1_sys.hw },
{ .fw_name = "fclk_div3", },
{ .fw_name = "fclk_div5", },
};
-static struct clk_regmap usb_bus_sel = {
+static struct clk_regmap a1_usb_bus_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = USB_BUSCLK_CTRL,
.mask = 0x3,
@@ -1385,13 +1385,13 @@ static struct clk_regmap usb_bus_sel = {
.hw.init = &(struct clk_init_data){
.name = "usb_bus_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = usb_bus_parents,
- .num_parents = ARRAY_SIZE(usb_bus_parents),
+ .parent_data = a1_usb_bus_parents,
+ .num_parents = ARRAY_SIZE(a1_usb_bus_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap usb_bus_div = {
+static struct clk_regmap a1_usb_bus_div = {
.data = &(struct clk_regmap_div_data){
.offset = USB_BUSCLK_CTRL,
.shift = 0,
@@ -1401,14 +1401,14 @@ static struct clk_regmap usb_bus_div = {
.name = "usb_bus_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &usb_bus_sel.hw
+ &a1_usb_bus_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap usb_bus = {
+static struct clk_regmap a1_usb_bus = {
.data = &(struct clk_regmap_gate_data){
.offset = USB_BUSCLK_CTRL,
.bit_idx = 8,
@@ -1417,21 +1417,21 @@ static struct clk_regmap usb_bus = {
.name = "usb_bus",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &usb_bus_div.hw
+ &a1_usb_bus_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static const struct clk_parent_data sd_emmc_psram_dmc_parents[] = {
+static const struct clk_parent_data a1_sd_emmc_parents[] = {
{ .fw_name = "fclk_div2", },
{ .fw_name = "fclk_div3", },
{ .fw_name = "fclk_div5", },
{ .fw_name = "hifi_pll", },
};
-static struct clk_regmap sd_emmc_sel = {
+static struct clk_regmap a1_sd_emmc_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = SD_EMMC_CLK_CTRL,
.mask = 0x3,
@@ -1440,12 +1440,12 @@ static struct clk_regmap sd_emmc_sel = {
.hw.init = &(struct clk_init_data){
.name = "sd_emmc_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = sd_emmc_psram_dmc_parents,
- .num_parents = ARRAY_SIZE(sd_emmc_psram_dmc_parents),
+ .parent_data = a1_sd_emmc_parents,
+ .num_parents = ARRAY_SIZE(a1_sd_emmc_parents),
},
};
-static struct clk_regmap sd_emmc_div = {
+static struct clk_regmap a1_sd_emmc_div = {
.data = &(struct clk_regmap_div_data){
.offset = SD_EMMC_CLK_CTRL,
.shift = 0,
@@ -1455,14 +1455,14 @@ static struct clk_regmap sd_emmc_div = {
.name = "sd_emmc_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &sd_emmc_sel.hw
+ &a1_sd_emmc_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap sd_emmc_sel2 = {
+static struct clk_regmap a1_sd_emmc_sel2 = {
.data = &(struct clk_regmap_mux_data){
.offset = SD_EMMC_CLK_CTRL,
.mask = 0x1,
@@ -1472,7 +1472,7 @@ static struct clk_regmap sd_emmc_sel2 = {
.name = "sd_emmc_sel2",
.ops = &clk_regmap_mux_ops,
.parent_data = (const struct clk_parent_data []) {
- { .hw = &sd_emmc_div.hw },
+ { .hw = &a1_sd_emmc_div.hw },
{ .fw_name = "xtal", },
},
.num_parents = 2,
@@ -1480,7 +1480,7 @@ static struct clk_regmap sd_emmc_sel2 = {
},
};
-static struct clk_regmap sd_emmc = {
+static struct clk_regmap a1_sd_emmc = {
.data = &(struct clk_regmap_gate_data){
.offset = SD_EMMC_CLK_CTRL,
.bit_idx = 8,
@@ -1489,14 +1489,14 @@ static struct clk_regmap sd_emmc = {
.name = "sd_emmc",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &sd_emmc_sel2.hw
+ &a1_sd_emmc_sel2.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap psram_sel = {
+static struct clk_regmap a1_psram_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = PSRAM_CLK_CTRL,
.mask = 0x3,
@@ -1505,12 +1505,12 @@ static struct clk_regmap psram_sel = {
.hw.init = &(struct clk_init_data){
.name = "psram_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = sd_emmc_psram_dmc_parents,
- .num_parents = ARRAY_SIZE(sd_emmc_psram_dmc_parents),
+ .parent_data = a1_sd_emmc_parents,
+ .num_parents = ARRAY_SIZE(a1_sd_emmc_parents),
},
};
-static struct clk_regmap psram_div = {
+static struct clk_regmap a1_psram_div = {
.data = &(struct clk_regmap_div_data){
.offset = PSRAM_CLK_CTRL,
.shift = 0,
@@ -1520,14 +1520,14 @@ static struct clk_regmap psram_div = {
.name = "psram_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &psram_sel.hw
+ &a1_psram_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap psram_sel2 = {
+static struct clk_regmap a1_psram_sel2 = {
.data = &(struct clk_regmap_mux_data){
.offset = PSRAM_CLK_CTRL,
.mask = 0x1,
@@ -1537,7 +1537,7 @@ static struct clk_regmap psram_sel2 = {
.name = "psram_sel2",
.ops = &clk_regmap_mux_ops,
.parent_data = (const struct clk_parent_data []) {
- { .hw = &psram_div.hw },
+ { .hw = &a1_psram_div.hw },
{ .fw_name = "xtal", },
},
.num_parents = 2,
@@ -1545,7 +1545,7 @@ static struct clk_regmap psram_sel2 = {
},
};
-static struct clk_regmap psram = {
+static struct clk_regmap a1_psram = {
.data = &(struct clk_regmap_gate_data){
.offset = PSRAM_CLK_CTRL,
.bit_idx = 8,
@@ -1554,14 +1554,14 @@ static struct clk_regmap psram = {
.name = "psram",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &psram_sel2.hw
+ &a1_psram_sel2.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap dmc_sel = {
+static struct clk_regmap a1_dmc_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = DMC_CLK_CTRL,
.mask = 0x3,
@@ -1570,12 +1570,12 @@ static struct clk_regmap dmc_sel = {
.hw.init = &(struct clk_init_data){
.name = "dmc_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = sd_emmc_psram_dmc_parents,
- .num_parents = ARRAY_SIZE(sd_emmc_psram_dmc_parents),
+ .parent_data = a1_sd_emmc_parents,
+ .num_parents = ARRAY_SIZE(a1_sd_emmc_parents),
},
};
-static struct clk_regmap dmc_div = {
+static struct clk_regmap a1_dmc_div = {
.data = &(struct clk_regmap_div_data){
.offset = DMC_CLK_CTRL,
.shift = 0,
@@ -1585,14 +1585,14 @@ static struct clk_regmap dmc_div = {
.name = "dmc_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dmc_sel.hw
+ &a1_dmc_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap dmc_sel2 = {
+static struct clk_regmap a1_dmc_sel2 = {
.data = &(struct clk_regmap_mux_data){
.offset = DMC_CLK_CTRL,
.mask = 0x1,
@@ -1602,7 +1602,7 @@ static struct clk_regmap dmc_sel2 = {
.name = "dmc_sel2",
.ops = &clk_regmap_mux_ops,
.parent_data = (const struct clk_parent_data []) {
- { .hw = &dmc_div.hw },
+ { .hw = &a1_dmc_div.hw },
{ .fw_name = "xtal", },
},
.num_parents = 2,
@@ -1610,7 +1610,7 @@ static struct clk_regmap dmc_sel2 = {
},
};
-static struct clk_regmap dmc = {
+static struct clk_regmap a1_dmc = {
.data = &(struct clk_regmap_gate_data){
.offset = DMC_CLK_CTRL,
.bit_idx = 8,
@@ -1619,14 +1619,14 @@ static struct clk_regmap dmc = {
.name = "dmc",
.ops = &clk_regmap_gate_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dmc_sel2.hw
+ &a1_dmc_sel2.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap ceca_32k_in = {
+static struct clk_regmap a1_ceca_32k_in = {
.data = &(struct clk_regmap_gate_data){
.offset = CECA_CLK_CTRL0,
.bit_idx = 31,
@@ -1641,7 +1641,7 @@ static struct clk_regmap ceca_32k_in = {
},
};
-static struct clk_regmap ceca_32k_div = {
+static struct clk_regmap a1_ceca_32k_div = {
.data = &(struct meson_clk_dualdiv_data){
.n1 = {
.reg_off = CECA_CLK_CTRL0,
@@ -1668,19 +1668,19 @@ static struct clk_regmap ceca_32k_div = {
.shift = 28,
.width = 1,
},
- .table = clk_32k_div_table,
+ .table = a1_32k_div_table,
},
.hw.init = &(struct clk_init_data){
.name = "ceca_32k_div",
.ops = &meson_clk_dualdiv_ops,
.parent_hws = (const struct clk_hw *[]) {
- &ceca_32k_in.hw
+ &a1_ceca_32k_in.hw
},
.num_parents = 1,
},
};
-static struct clk_regmap ceca_32k_sel_pre = {
+static struct clk_regmap a1_ceca_32k_sel_pre = {
.data = &(struct clk_regmap_mux_data) {
.offset = CECA_CLK_CTRL1,
.mask = 0x1,
@@ -1691,15 +1691,15 @@ static struct clk_regmap ceca_32k_sel_pre = {
.name = "ceca_32k_sel_pre",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &ceca_32k_div.hw,
- &ceca_32k_in.hw,
+ &a1_ceca_32k_div.hw,
+ &a1_ceca_32k_in.hw,
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap ceca_32k_sel = {
+static struct clk_regmap a1_ceca_32k_sel = {
.data = &(struct clk_regmap_mux_data) {
.offset = CECA_CLK_CTRL1,
.mask = 0x1,
@@ -1710,14 +1710,14 @@ static struct clk_regmap ceca_32k_sel = {
.name = "ceca_32k_sel",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &ceca_32k_sel_pre.hw,
- &rtc.hw,
+ &a1_ceca_32k_sel_pre.hw,
+ &a1_rtc.hw,
},
.num_parents = 2,
},
};
-static struct clk_regmap ceca_32k_out = {
+static struct clk_regmap a1_ceca_32k_out = {
.data = &(struct clk_regmap_gate_data){
.offset = CECA_CLK_CTRL0,
.bit_idx = 30,
@@ -1726,14 +1726,14 @@ static struct clk_regmap ceca_32k_out = {
.name = "ceca_32k_out",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &ceca_32k_sel.hw
+ &a1_ceca_32k_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap cecb_32k_in = {
+static struct clk_regmap a1_cecb_32k_in = {
.data = &(struct clk_regmap_gate_data){
.offset = CECB_CLK_CTRL0,
.bit_idx = 31,
@@ -1748,7 +1748,7 @@ static struct clk_regmap cecb_32k_in = {
},
};
-static struct clk_regmap cecb_32k_div = {
+static struct clk_regmap a1_cecb_32k_div = {
.data = &(struct meson_clk_dualdiv_data){
.n1 = {
.reg_off = CECB_CLK_CTRL0,
@@ -1775,19 +1775,19 @@ static struct clk_regmap cecb_32k_div = {
.shift = 28,
.width = 1,
},
- .table = clk_32k_div_table,
+ .table = a1_32k_div_table,
},
.hw.init = &(struct clk_init_data){
.name = "cecb_32k_div",
.ops = &meson_clk_dualdiv_ops,
.parent_hws = (const struct clk_hw *[]) {
- &cecb_32k_in.hw
+ &a1_cecb_32k_in.hw
},
.num_parents = 1,
},
};
-static struct clk_regmap cecb_32k_sel_pre = {
+static struct clk_regmap a1_cecb_32k_sel_pre = {
.data = &(struct clk_regmap_mux_data) {
.offset = CECB_CLK_CTRL1,
.mask = 0x1,
@@ -1798,15 +1798,15 @@ static struct clk_regmap cecb_32k_sel_pre = {
.name = "cecb_32k_sel_pre",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &cecb_32k_div.hw,
- &cecb_32k_in.hw,
+ &a1_cecb_32k_div.hw,
+ &a1_cecb_32k_in.hw,
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap cecb_32k_sel = {
+static struct clk_regmap a1_cecb_32k_sel = {
.data = &(struct clk_regmap_mux_data) {
.offset = CECB_CLK_CTRL1,
.mask = 0x1,
@@ -1817,14 +1817,14 @@ static struct clk_regmap cecb_32k_sel = {
.name = "cecb_32k_sel",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &cecb_32k_sel_pre.hw,
- &rtc.hw,
+ &a1_cecb_32k_sel_pre.hw,
+ &a1_rtc.hw,
},
.num_parents = 2,
},
};
-static struct clk_regmap cecb_32k_out = {
+static struct clk_regmap a1_cecb_32k_out = {
.data = &(struct clk_regmap_gate_data){
.offset = CECB_CLK_CTRL0,
.bit_idx = 30,
@@ -1833,282 +1833,265 @@ static struct clk_regmap cecb_32k_out = {
.name = "cecb_32k_out",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &cecb_32k_sel.hw
+ &a1_cecb_32k_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-#define MESON_GATE(_name, _reg, _bit) \
- MESON_PCLK(_name, _reg, _bit, &sys.hw)
-
-static MESON_GATE(clktree, SYS_CLK_EN0, 0);
-static MESON_GATE(reset_ctrl, SYS_CLK_EN0, 1);
-static MESON_GATE(analog_ctrl, SYS_CLK_EN0, 2);
-static MESON_GATE(pwr_ctrl, SYS_CLK_EN0, 3);
-static MESON_GATE(pad_ctrl, SYS_CLK_EN0, 4);
-static MESON_GATE(sys_ctrl, SYS_CLK_EN0, 5);
-static MESON_GATE(temp_sensor, SYS_CLK_EN0, 6);
-static MESON_GATE(am2axi_dev, SYS_CLK_EN0, 7);
-static MESON_GATE(spicc_b, SYS_CLK_EN0, 8);
-static MESON_GATE(spicc_a, SYS_CLK_EN0, 9);
-static MESON_GATE(msr, SYS_CLK_EN0, 10);
-static MESON_GATE(audio, SYS_CLK_EN0, 11);
-static MESON_GATE(jtag_ctrl, SYS_CLK_EN0, 12);
-static MESON_GATE(saradc_en, SYS_CLK_EN0, 13);
-static MESON_GATE(pwm_ef, SYS_CLK_EN0, 14);
-static MESON_GATE(pwm_cd, SYS_CLK_EN0, 15);
-static MESON_GATE(pwm_ab, SYS_CLK_EN0, 16);
-static MESON_GATE(cec, SYS_CLK_EN0, 17);
-static MESON_GATE(i2c_s, SYS_CLK_EN0, 18);
-static MESON_GATE(ir_ctrl, SYS_CLK_EN0, 19);
-static MESON_GATE(i2c_m_d, SYS_CLK_EN0, 20);
-static MESON_GATE(i2c_m_c, SYS_CLK_EN0, 21);
-static MESON_GATE(i2c_m_b, SYS_CLK_EN0, 22);
-static MESON_GATE(i2c_m_a, SYS_CLK_EN0, 23);
-static MESON_GATE(acodec, SYS_CLK_EN0, 24);
-static MESON_GATE(otp, SYS_CLK_EN0, 25);
-static MESON_GATE(sd_emmc_a, SYS_CLK_EN0, 26);
-static MESON_GATE(usb_phy, SYS_CLK_EN0, 27);
-static MESON_GATE(usb_ctrl, SYS_CLK_EN0, 28);
-static MESON_GATE(sys_dspb, SYS_CLK_EN0, 29);
-static MESON_GATE(sys_dspa, SYS_CLK_EN0, 30);
-static MESON_GATE(dma, SYS_CLK_EN0, 31);
-static MESON_GATE(irq_ctrl, SYS_CLK_EN1, 0);
-static MESON_GATE(nic, SYS_CLK_EN1, 1);
-static MESON_GATE(gic, SYS_CLK_EN1, 2);
-static MESON_GATE(uart_c, SYS_CLK_EN1, 3);
-static MESON_GATE(uart_b, SYS_CLK_EN1, 4);
-static MESON_GATE(uart_a, SYS_CLK_EN1, 5);
-static MESON_GATE(sys_psram, SYS_CLK_EN1, 6);
-static MESON_GATE(rsa, SYS_CLK_EN1, 8);
-static MESON_GATE(coresight, SYS_CLK_EN1, 9);
-static MESON_GATE(am2axi_vad, AXI_CLK_EN, 0);
-static MESON_GATE(audio_vad, AXI_CLK_EN, 1);
-static MESON_GATE(axi_dmc, AXI_CLK_EN, 3);
-static MESON_GATE(axi_psram, AXI_CLK_EN, 4);
-static MESON_GATE(ramb, AXI_CLK_EN, 5);
-static MESON_GATE(rama, AXI_CLK_EN, 6);
-static MESON_GATE(axi_spifc, AXI_CLK_EN, 7);
-static MESON_GATE(axi_nic, AXI_CLK_EN, 8);
-static MESON_GATE(axi_dma, AXI_CLK_EN, 9);
-static MESON_GATE(cpu_ctrl, AXI_CLK_EN, 10);
-static MESON_GATE(rom, AXI_CLK_EN, 11);
-static MESON_GATE(prod_i2c, AXI_CLK_EN, 12);
+static const struct clk_parent_data a1_pclk_parents = { .hw = &a1_sys.hw };
+
+#define A1_PCLK(_name, _reg, _bit, _flags) \
+ MESON_PCLK(a1_##_name, _reg, _bit, &a1_pclk_parents, _flags)
+
+/*
+ * NOTE: The gates below are marked with CLK_IGNORE_UNUSED for historic reasons
+ * Users are encouraged to test without it and submit changes to:
+ * - remove the flag if not necessary
+ * - replace the flag with something more adequate, such as CLK_IS_CRITICAL,
+ * if appropriate.
+ * - add a comment explaining why the use of CLK_IGNORE_UNUSED is desirable
+ * for a particular clock.
+ */
+static A1_PCLK(clktree, SYS_CLK_EN0, 0, CLK_IGNORE_UNUSED);
+static A1_PCLK(reset_ctrl, SYS_CLK_EN0, 1, CLK_IGNORE_UNUSED);
+static A1_PCLK(analog_ctrl, SYS_CLK_EN0, 2, CLK_IGNORE_UNUSED);
+static A1_PCLK(pwr_ctrl, SYS_CLK_EN0, 3, CLK_IGNORE_UNUSED);
+static A1_PCLK(pad_ctrl, SYS_CLK_EN0, 4, CLK_IGNORE_UNUSED);
+static A1_PCLK(sys_ctrl, SYS_CLK_EN0, 5, CLK_IGNORE_UNUSED);
+static A1_PCLK(temp_sensor, SYS_CLK_EN0, 6, CLK_IGNORE_UNUSED);
+static A1_PCLK(am2axi_dev, SYS_CLK_EN0, 7, CLK_IGNORE_UNUSED);
+static A1_PCLK(spicc_b, SYS_CLK_EN0, 8, CLK_IGNORE_UNUSED);
+static A1_PCLK(spicc_a, SYS_CLK_EN0, 9, CLK_IGNORE_UNUSED);
+static A1_PCLK(msr, SYS_CLK_EN0, 10, CLK_IGNORE_UNUSED);
+static A1_PCLK(audio, SYS_CLK_EN0, 11, CLK_IGNORE_UNUSED);
+static A1_PCLK(jtag_ctrl, SYS_CLK_EN0, 12, CLK_IGNORE_UNUSED);
+static A1_PCLK(saradc_en, SYS_CLK_EN0, 13, CLK_IGNORE_UNUSED);
+static A1_PCLK(pwm_ef, SYS_CLK_EN0, 14, CLK_IGNORE_UNUSED);
+static A1_PCLK(pwm_cd, SYS_CLK_EN0, 15, CLK_IGNORE_UNUSED);
+static A1_PCLK(pwm_ab, SYS_CLK_EN0, 16, CLK_IGNORE_UNUSED);
+static A1_PCLK(cec, SYS_CLK_EN0, 17, CLK_IGNORE_UNUSED);
+static A1_PCLK(i2c_s, SYS_CLK_EN0, 18, CLK_IGNORE_UNUSED);
+static A1_PCLK(ir_ctrl, SYS_CLK_EN0, 19, CLK_IGNORE_UNUSED);
+static A1_PCLK(i2c_m_d, SYS_CLK_EN0, 20, CLK_IGNORE_UNUSED);
+static A1_PCLK(i2c_m_c, SYS_CLK_EN0, 21, CLK_IGNORE_UNUSED);
+static A1_PCLK(i2c_m_b, SYS_CLK_EN0, 22, CLK_IGNORE_UNUSED);
+static A1_PCLK(i2c_m_a, SYS_CLK_EN0, 23, CLK_IGNORE_UNUSED);
+static A1_PCLK(acodec, SYS_CLK_EN0, 24, CLK_IGNORE_UNUSED);
+static A1_PCLK(otp, SYS_CLK_EN0, 25, CLK_IGNORE_UNUSED);
+static A1_PCLK(sd_emmc_a, SYS_CLK_EN0, 26, CLK_IGNORE_UNUSED);
+static A1_PCLK(usb_phy, SYS_CLK_EN0, 27, CLK_IGNORE_UNUSED);
+static A1_PCLK(usb_ctrl, SYS_CLK_EN0, 28, CLK_IGNORE_UNUSED);
+static A1_PCLK(sys_dspb, SYS_CLK_EN0, 29, CLK_IGNORE_UNUSED);
+static A1_PCLK(sys_dspa, SYS_CLK_EN0, 30, CLK_IGNORE_UNUSED);
+static A1_PCLK(dma, SYS_CLK_EN0, 31, CLK_IGNORE_UNUSED);
+
+static A1_PCLK(irq_ctrl, SYS_CLK_EN1, 0, CLK_IGNORE_UNUSED);
+static A1_PCLK(nic, SYS_CLK_EN1, 1, CLK_IGNORE_UNUSED);
+static A1_PCLK(gic, SYS_CLK_EN1, 2, CLK_IGNORE_UNUSED);
+static A1_PCLK(uart_c, SYS_CLK_EN1, 3, CLK_IGNORE_UNUSED);
+static A1_PCLK(uart_b, SYS_CLK_EN1, 4, CLK_IGNORE_UNUSED);
+static A1_PCLK(uart_a, SYS_CLK_EN1, 5, CLK_IGNORE_UNUSED);
+static A1_PCLK(sys_psram, SYS_CLK_EN1, 6, CLK_IGNORE_UNUSED);
+static A1_PCLK(rsa, SYS_CLK_EN1, 8, CLK_IGNORE_UNUSED);
+static A1_PCLK(coresight, SYS_CLK_EN1, 9, CLK_IGNORE_UNUSED);
+
+static A1_PCLK(am2axi_vad, AXI_CLK_EN, 0, CLK_IGNORE_UNUSED);
+static A1_PCLK(audio_vad, AXI_CLK_EN, 1, CLK_IGNORE_UNUSED);
+static A1_PCLK(axi_dmc, AXI_CLK_EN, 3, CLK_IGNORE_UNUSED);
+static A1_PCLK(axi_psram, AXI_CLK_EN, 4, CLK_IGNORE_UNUSED);
+static A1_PCLK(ramb, AXI_CLK_EN, 5, CLK_IGNORE_UNUSED);
+static A1_PCLK(rama, AXI_CLK_EN, 6, CLK_IGNORE_UNUSED);
+static A1_PCLK(axi_spifc, AXI_CLK_EN, 7, CLK_IGNORE_UNUSED);
+static A1_PCLK(axi_nic, AXI_CLK_EN, 8, CLK_IGNORE_UNUSED);
+static A1_PCLK(axi_dma, AXI_CLK_EN, 9, CLK_IGNORE_UNUSED);
+static A1_PCLK(cpu_ctrl, AXI_CLK_EN, 10, CLK_IGNORE_UNUSED);
+static A1_PCLK(rom, AXI_CLK_EN, 11, CLK_IGNORE_UNUSED);
+static A1_PCLK(prod_i2c, AXI_CLK_EN, 12, CLK_IGNORE_UNUSED);
/* Array of all clocks registered by this provider */
-static struct clk_hw *a1_periphs_hw_clks[] = {
- [CLKID_XTAL_IN] = &xtal_in.hw,
- [CLKID_FIXPLL_IN] = &fixpll_in.hw,
- [CLKID_USB_PHY_IN] = &usb_phy_in.hw,
- [CLKID_USB_CTRL_IN] = &usb_ctrl_in.hw,
- [CLKID_HIFIPLL_IN] = &hifipll_in.hw,
- [CLKID_SYSPLL_IN] = &syspll_in.hw,
- [CLKID_DDS_IN] = &dds_in.hw,
- [CLKID_SYS] = &sys.hw,
- [CLKID_CLKTREE] = &clktree.hw,
- [CLKID_RESET_CTRL] = &reset_ctrl.hw,
- [CLKID_ANALOG_CTRL] = &analog_ctrl.hw,
- [CLKID_PWR_CTRL] = &pwr_ctrl.hw,
- [CLKID_PAD_CTRL] = &pad_ctrl.hw,
- [CLKID_SYS_CTRL] = &sys_ctrl.hw,
- [CLKID_TEMP_SENSOR] = &temp_sensor.hw,
- [CLKID_AM2AXI_DIV] = &am2axi_dev.hw,
- [CLKID_SPICC_B] = &spicc_b.hw,
- [CLKID_SPICC_A] = &spicc_a.hw,
- [CLKID_MSR] = &msr.hw,
- [CLKID_AUDIO] = &audio.hw,
- [CLKID_JTAG_CTRL] = &jtag_ctrl.hw,
- [CLKID_SARADC_EN] = &saradc_en.hw,
- [CLKID_PWM_EF] = &pwm_ef.hw,
- [CLKID_PWM_CD] = &pwm_cd.hw,
- [CLKID_PWM_AB] = &pwm_ab.hw,
- [CLKID_CEC] = &cec.hw,
- [CLKID_I2C_S] = &i2c_s.hw,
- [CLKID_IR_CTRL] = &ir_ctrl.hw,
- [CLKID_I2C_M_D] = &i2c_m_d.hw,
- [CLKID_I2C_M_C] = &i2c_m_c.hw,
- [CLKID_I2C_M_B] = &i2c_m_b.hw,
- [CLKID_I2C_M_A] = &i2c_m_a.hw,
- [CLKID_ACODEC] = &acodec.hw,
- [CLKID_OTP] = &otp.hw,
- [CLKID_SD_EMMC_A] = &sd_emmc_a.hw,
- [CLKID_USB_PHY] = &usb_phy.hw,
- [CLKID_USB_CTRL] = &usb_ctrl.hw,
- [CLKID_SYS_DSPB] = &sys_dspb.hw,
- [CLKID_SYS_DSPA] = &sys_dspa.hw,
- [CLKID_DMA] = &dma.hw,
- [CLKID_IRQ_CTRL] = &irq_ctrl.hw,
- [CLKID_NIC] = &nic.hw,
- [CLKID_GIC] = &gic.hw,
- [CLKID_UART_C] = &uart_c.hw,
- [CLKID_UART_B] = &uart_b.hw,
- [CLKID_UART_A] = &uart_a.hw,
- [CLKID_SYS_PSRAM] = &sys_psram.hw,
- [CLKID_RSA] = &rsa.hw,
- [CLKID_CORESIGHT] = &coresight.hw,
- [CLKID_AM2AXI_VAD] = &am2axi_vad.hw,
- [CLKID_AUDIO_VAD] = &audio_vad.hw,
- [CLKID_AXI_DMC] = &axi_dmc.hw,
- [CLKID_AXI_PSRAM] = &axi_psram.hw,
- [CLKID_RAMB] = &ramb.hw,
- [CLKID_RAMA] = &rama.hw,
- [CLKID_AXI_SPIFC] = &axi_spifc.hw,
- [CLKID_AXI_NIC] = &axi_nic.hw,
- [CLKID_AXI_DMA] = &axi_dma.hw,
- [CLKID_CPU_CTRL] = &cpu_ctrl.hw,
- [CLKID_ROM] = &rom.hw,
- [CLKID_PROC_I2C] = &prod_i2c.hw,
- [CLKID_DSPA_SEL] = &dspa_sel.hw,
- [CLKID_DSPB_SEL] = &dspb_sel.hw,
- [CLKID_DSPA_EN] = &dspa_en.hw,
- [CLKID_DSPA_EN_NIC] = &dspa_en_nic.hw,
- [CLKID_DSPB_EN] = &dspb_en.hw,
- [CLKID_DSPB_EN_NIC] = &dspb_en_nic.hw,
- [CLKID_RTC] = &rtc.hw,
- [CLKID_CECA_32K] = &ceca_32k_out.hw,
- [CLKID_CECB_32K] = &cecb_32k_out.hw,
- [CLKID_24M] = &clk_24m.hw,
- [CLKID_12M] = &clk_12m.hw,
- [CLKID_FCLK_DIV2_DIVN] = &fclk_div2_divn.hw,
- [CLKID_GEN] = &gen.hw,
- [CLKID_SARADC_SEL] = &saradc_sel.hw,
- [CLKID_SARADC] = &saradc.hw,
- [CLKID_PWM_A] = &pwm_a.hw,
- [CLKID_PWM_B] = &pwm_b.hw,
- [CLKID_PWM_C] = &pwm_c.hw,
- [CLKID_PWM_D] = &pwm_d.hw,
- [CLKID_PWM_E] = &pwm_e.hw,
- [CLKID_PWM_F] = &pwm_f.hw,
- [CLKID_SPICC] = &spicc.hw,
- [CLKID_TS] = &ts.hw,
- [CLKID_SPIFC] = &spifc.hw,
- [CLKID_USB_BUS] = &usb_bus.hw,
- [CLKID_SD_EMMC] = &sd_emmc.hw,
- [CLKID_PSRAM] = &psram.hw,
- [CLKID_DMC] = &dmc.hw,
- [CLKID_SYS_A_SEL] = &sys_a_sel.hw,
- [CLKID_SYS_A_DIV] = &sys_a_div.hw,
- [CLKID_SYS_A] = &sys_a.hw,
- [CLKID_SYS_B_SEL] = &sys_b_sel.hw,
- [CLKID_SYS_B_DIV] = &sys_b_div.hw,
- [CLKID_SYS_B] = &sys_b.hw,
- [CLKID_DSPA_A_SEL] = &dspa_a_sel.hw,
- [CLKID_DSPA_A_DIV] = &dspa_a_div.hw,
- [CLKID_DSPA_A] = &dspa_a.hw,
- [CLKID_DSPA_B_SEL] = &dspa_b_sel.hw,
- [CLKID_DSPA_B_DIV] = &dspa_b_div.hw,
- [CLKID_DSPA_B] = &dspa_b.hw,
- [CLKID_DSPB_A_SEL] = &dspb_a_sel.hw,
- [CLKID_DSPB_A_DIV] = &dspb_a_div.hw,
- [CLKID_DSPB_A] = &dspb_a.hw,
- [CLKID_DSPB_B_SEL] = &dspb_b_sel.hw,
- [CLKID_DSPB_B_DIV] = &dspb_b_div.hw,
- [CLKID_DSPB_B] = &dspb_b.hw,
- [CLKID_RTC_32K_IN] = &rtc_32k_in.hw,
- [CLKID_RTC_32K_DIV] = &rtc_32k_div.hw,
- [CLKID_RTC_32K_XTAL] = &rtc_32k_xtal.hw,
- [CLKID_RTC_32K_SEL] = &rtc_32k_sel.hw,
- [CLKID_CECB_32K_IN] = &cecb_32k_in.hw,
- [CLKID_CECB_32K_DIV] = &cecb_32k_div.hw,
- [CLKID_CECB_32K_SEL_PRE] = &cecb_32k_sel_pre.hw,
- [CLKID_CECB_32K_SEL] = &cecb_32k_sel.hw,
- [CLKID_CECA_32K_IN] = &ceca_32k_in.hw,
- [CLKID_CECA_32K_DIV] = &ceca_32k_div.hw,
- [CLKID_CECA_32K_SEL_PRE] = &ceca_32k_sel_pre.hw,
- [CLKID_CECA_32K_SEL] = &ceca_32k_sel.hw,
- [CLKID_DIV2_PRE] = &fclk_div2_divn_pre.hw,
- [CLKID_24M_DIV2] = &clk_24m_div2.hw,
- [CLKID_GEN_SEL] = &gen_sel.hw,
- [CLKID_GEN_DIV] = &gen_div.hw,
- [CLKID_SARADC_DIV] = &saradc_div.hw,
- [CLKID_PWM_A_SEL] = &pwm_a_sel.hw,
- [CLKID_PWM_A_DIV] = &pwm_a_div.hw,
- [CLKID_PWM_B_SEL] = &pwm_b_sel.hw,
- [CLKID_PWM_B_DIV] = &pwm_b_div.hw,
- [CLKID_PWM_C_SEL] = &pwm_c_sel.hw,
- [CLKID_PWM_C_DIV] = &pwm_c_div.hw,
- [CLKID_PWM_D_SEL] = &pwm_d_sel.hw,
- [CLKID_PWM_D_DIV] = &pwm_d_div.hw,
- [CLKID_PWM_E_SEL] = &pwm_e_sel.hw,
- [CLKID_PWM_E_DIV] = &pwm_e_div.hw,
- [CLKID_PWM_F_SEL] = &pwm_f_sel.hw,
- [CLKID_PWM_F_DIV] = &pwm_f_div.hw,
- [CLKID_SPICC_SEL] = &spicc_sel.hw,
- [CLKID_SPICC_DIV] = &spicc_div.hw,
- [CLKID_SPICC_SEL2] = &spicc_sel2.hw,
- [CLKID_TS_DIV] = &ts_div.hw,
- [CLKID_SPIFC_SEL] = &spifc_sel.hw,
- [CLKID_SPIFC_DIV] = &spifc_div.hw,
- [CLKID_SPIFC_SEL2] = &spifc_sel2.hw,
- [CLKID_USB_BUS_SEL] = &usb_bus_sel.hw,
- [CLKID_USB_BUS_DIV] = &usb_bus_div.hw,
- [CLKID_SD_EMMC_SEL] = &sd_emmc_sel.hw,
- [CLKID_SD_EMMC_DIV] = &sd_emmc_div.hw,
- [CLKID_SD_EMMC_SEL2] = &sd_emmc_sel2.hw,
- [CLKID_PSRAM_SEL] = &psram_sel.hw,
- [CLKID_PSRAM_DIV] = &psram_div.hw,
- [CLKID_PSRAM_SEL2] = &psram_sel2.hw,
- [CLKID_DMC_SEL] = &dmc_sel.hw,
- [CLKID_DMC_DIV] = &dmc_div.hw,
- [CLKID_DMC_SEL2] = &dmc_sel2.hw,
-};
-
-static const struct regmap_config a1_periphs_regmap_cfg = {
- .reg_bits = 32,
- .val_bits = 32,
- .reg_stride = 4,
- .max_register = DMC_CLK_CTRL,
-};
-
-static struct meson_clk_hw_data a1_periphs_clks = {
- .hws = a1_periphs_hw_clks,
- .num = ARRAY_SIZE(a1_periphs_hw_clks),
-};
-
-static int meson_a1_periphs_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- void __iomem *base;
- struct regmap *map;
- int clkid, err;
-
- base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(base))
- return dev_err_probe(dev, PTR_ERR(base),
- "can't ioremap resource\n");
-
- map = devm_regmap_init_mmio(dev, base, &a1_periphs_regmap_cfg);
- if (IS_ERR(map))
- return dev_err_probe(dev, PTR_ERR(map),
- "can't init regmap mmio region\n");
-
- for (clkid = 0; clkid < a1_periphs_clks.num; clkid++) {
- err = devm_clk_hw_register(dev, a1_periphs_clks.hws[clkid]);
- if (err)
- return dev_err_probe(dev, err,
- "clock[%d] registration failed\n",
- clkid);
- }
-
- return devm_of_clk_add_hw_provider(dev, meson_clk_hw_get, &a1_periphs_clks);
-}
-
-static const struct of_device_id a1_periphs_clkc_match_table[] = {
- { .compatible = "amlogic,a1-peripherals-clkc", },
+static struct clk_hw *a1_peripherals_hw_clks[] = {
+ [CLKID_XTAL_IN] = &a1_xtal_in.hw,
+ [CLKID_FIXPLL_IN] = &a1_fixpll_in.hw,
+ [CLKID_USB_PHY_IN] = &a1_usb_phy_in.hw,
+ [CLKID_USB_CTRL_IN] = &a1_usb_ctrl_in.hw,
+ [CLKID_HIFIPLL_IN] = &a1_hifipll_in.hw,
+ [CLKID_SYSPLL_IN] = &a1_syspll_in.hw,
+ [CLKID_DDS_IN] = &a1_dds_in.hw,
+ [CLKID_SYS] = &a1_sys.hw,
+ [CLKID_CLKTREE] = &a1_clktree.hw,
+ [CLKID_RESET_CTRL] = &a1_reset_ctrl.hw,
+ [CLKID_ANALOG_CTRL] = &a1_analog_ctrl.hw,
+ [CLKID_PWR_CTRL] = &a1_pwr_ctrl.hw,
+ [CLKID_PAD_CTRL] = &a1_pad_ctrl.hw,
+ [CLKID_SYS_CTRL] = &a1_sys_ctrl.hw,
+ [CLKID_TEMP_SENSOR] = &a1_temp_sensor.hw,
+ [CLKID_AM2AXI_DIV] = &a1_am2axi_dev.hw,
+ [CLKID_SPICC_B] = &a1_spicc_b.hw,
+ [CLKID_SPICC_A] = &a1_spicc_a.hw,
+ [CLKID_MSR] = &a1_msr.hw,
+ [CLKID_AUDIO] = &a1_audio.hw,
+ [CLKID_JTAG_CTRL] = &a1_jtag_ctrl.hw,
+ [CLKID_SARADC_EN] = &a1_saradc_en.hw,
+ [CLKID_PWM_EF] = &a1_pwm_ef.hw,
+ [CLKID_PWM_CD] = &a1_pwm_cd.hw,
+ [CLKID_PWM_AB] = &a1_pwm_ab.hw,
+ [CLKID_CEC] = &a1_cec.hw,
+ [CLKID_I2C_S] = &a1_i2c_s.hw,
+ [CLKID_IR_CTRL] = &a1_ir_ctrl.hw,
+ [CLKID_I2C_M_D] = &a1_i2c_m_d.hw,
+ [CLKID_I2C_M_C] = &a1_i2c_m_c.hw,
+ [CLKID_I2C_M_B] = &a1_i2c_m_b.hw,
+ [CLKID_I2C_M_A] = &a1_i2c_m_a.hw,
+ [CLKID_ACODEC] = &a1_acodec.hw,
+ [CLKID_OTP] = &a1_otp.hw,
+ [CLKID_SD_EMMC_A] = &a1_sd_emmc_a.hw,
+ [CLKID_USB_PHY] = &a1_usb_phy.hw,
+ [CLKID_USB_CTRL] = &a1_usb_ctrl.hw,
+ [CLKID_SYS_DSPB] = &a1_sys_dspb.hw,
+ [CLKID_SYS_DSPA] = &a1_sys_dspa.hw,
+ [CLKID_DMA] = &a1_dma.hw,
+ [CLKID_IRQ_CTRL] = &a1_irq_ctrl.hw,
+ [CLKID_NIC] = &a1_nic.hw,
+ [CLKID_GIC] = &a1_gic.hw,
+ [CLKID_UART_C] = &a1_uart_c.hw,
+ [CLKID_UART_B] = &a1_uart_b.hw,
+ [CLKID_UART_A] = &a1_uart_a.hw,
+ [CLKID_SYS_PSRAM] = &a1_sys_psram.hw,
+ [CLKID_RSA] = &a1_rsa.hw,
+ [CLKID_CORESIGHT] = &a1_coresight.hw,
+ [CLKID_AM2AXI_VAD] = &a1_am2axi_vad.hw,
+ [CLKID_AUDIO_VAD] = &a1_audio_vad.hw,
+ [CLKID_AXI_DMC] = &a1_axi_dmc.hw,
+ [CLKID_AXI_PSRAM] = &a1_axi_psram.hw,
+ [CLKID_RAMB] = &a1_ramb.hw,
+ [CLKID_RAMA] = &a1_rama.hw,
+ [CLKID_AXI_SPIFC] = &a1_axi_spifc.hw,
+ [CLKID_AXI_NIC] = &a1_axi_nic.hw,
+ [CLKID_AXI_DMA] = &a1_axi_dma.hw,
+ [CLKID_CPU_CTRL] = &a1_cpu_ctrl.hw,
+ [CLKID_ROM] = &a1_rom.hw,
+ [CLKID_PROC_I2C] = &a1_prod_i2c.hw,
+ [CLKID_DSPA_SEL] = &a1_dspa_sel.hw,
+ [CLKID_DSPB_SEL] = &a1_dspb_sel.hw,
+ [CLKID_DSPA_EN] = &a1_dspa_en.hw,
+ [CLKID_DSPA_EN_NIC] = &a1_dspa_en_nic.hw,
+ [CLKID_DSPB_EN] = &a1_dspb_en.hw,
+ [CLKID_DSPB_EN_NIC] = &a1_dspb_en_nic.hw,
+ [CLKID_RTC] = &a1_rtc.hw,
+ [CLKID_CECA_32K] = &a1_ceca_32k_out.hw,
+ [CLKID_CECB_32K] = &a1_cecb_32k_out.hw,
+ [CLKID_24M] = &a1_24m.hw,
+ [CLKID_12M] = &a1_12m.hw,
+ [CLKID_FCLK_DIV2_DIVN] = &a1_fclk_div2_divn.hw,
+ [CLKID_GEN] = &a1_gen.hw,
+ [CLKID_SARADC_SEL] = &a1_saradc_sel.hw,
+ [CLKID_SARADC] = &a1_saradc.hw,
+ [CLKID_PWM_A] = &a1_pwm_a.hw,
+ [CLKID_PWM_B] = &a1_pwm_b.hw,
+ [CLKID_PWM_C] = &a1_pwm_c.hw,
+ [CLKID_PWM_D] = &a1_pwm_d.hw,
+ [CLKID_PWM_E] = &a1_pwm_e.hw,
+ [CLKID_PWM_F] = &a1_pwm_f.hw,
+ [CLKID_SPICC] = &a1_spicc.hw,
+ [CLKID_TS] = &a1_ts.hw,
+ [CLKID_SPIFC] = &a1_spifc.hw,
+ [CLKID_USB_BUS] = &a1_usb_bus.hw,
+ [CLKID_SD_EMMC] = &a1_sd_emmc.hw,
+ [CLKID_PSRAM] = &a1_psram.hw,
+ [CLKID_DMC] = &a1_dmc.hw,
+ [CLKID_SYS_A_SEL] = &a1_sys_a_sel.hw,
+ [CLKID_SYS_A_DIV] = &a1_sys_a_div.hw,
+ [CLKID_SYS_A] = &a1_sys_a.hw,
+ [CLKID_SYS_B_SEL] = &a1_sys_b_sel.hw,
+ [CLKID_SYS_B_DIV] = &a1_sys_b_div.hw,
+ [CLKID_SYS_B] = &a1_sys_b.hw,
+ [CLKID_DSPA_A_SEL] = &a1_dspa_a_sel.hw,
+ [CLKID_DSPA_A_DIV] = &a1_dspa_a_div.hw,
+ [CLKID_DSPA_A] = &a1_dspa_a.hw,
+ [CLKID_DSPA_B_SEL] = &a1_dspa_b_sel.hw,
+ [CLKID_DSPA_B_DIV] = &a1_dspa_b_div.hw,
+ [CLKID_DSPA_B] = &a1_dspa_b.hw,
+ [CLKID_DSPB_A_SEL] = &a1_dspb_a_sel.hw,
+ [CLKID_DSPB_A_DIV] = &a1_dspb_a_div.hw,
+ [CLKID_DSPB_A] = &a1_dspb_a.hw,
+ [CLKID_DSPB_B_SEL] = &a1_dspb_b_sel.hw,
+ [CLKID_DSPB_B_DIV] = &a1_dspb_b_div.hw,
+ [CLKID_DSPB_B] = &a1_dspb_b.hw,
+ [CLKID_RTC_32K_IN] = &a1_rtc_32k_in.hw,
+ [CLKID_RTC_32K_DIV] = &a1_rtc_32k_div.hw,
+ [CLKID_RTC_32K_XTAL] = &a1_rtc_32k_xtal.hw,
+ [CLKID_RTC_32K_SEL] = &a1_rtc_32k_sel.hw,
+ [CLKID_CECB_32K_IN] = &a1_cecb_32k_in.hw,
+ [CLKID_CECB_32K_DIV] = &a1_cecb_32k_div.hw,
+ [CLKID_CECB_32K_SEL_PRE] = &a1_cecb_32k_sel_pre.hw,
+ [CLKID_CECB_32K_SEL] = &a1_cecb_32k_sel.hw,
+ [CLKID_CECA_32K_IN] = &a1_ceca_32k_in.hw,
+ [CLKID_CECA_32K_DIV] = &a1_ceca_32k_div.hw,
+ [CLKID_CECA_32K_SEL_PRE] = &a1_ceca_32k_sel_pre.hw,
+ [CLKID_CECA_32K_SEL] = &a1_ceca_32k_sel.hw,
+ [CLKID_DIV2_PRE] = &a1_fclk_div2_divn_pre.hw,
+ [CLKID_24M_DIV2] = &a1_24m_div2.hw,
+ [CLKID_GEN_SEL] = &a1_gen_sel.hw,
+ [CLKID_GEN_DIV] = &a1_gen_div.hw,
+ [CLKID_SARADC_DIV] = &a1_saradc_div.hw,
+ [CLKID_PWM_A_SEL] = &a1_pwm_a_sel.hw,
+ [CLKID_PWM_A_DIV] = &a1_pwm_a_div.hw,
+ [CLKID_PWM_B_SEL] = &a1_pwm_b_sel.hw,
+ [CLKID_PWM_B_DIV] = &a1_pwm_b_div.hw,
+ [CLKID_PWM_C_SEL] = &a1_pwm_c_sel.hw,
+ [CLKID_PWM_C_DIV] = &a1_pwm_c_div.hw,
+ [CLKID_PWM_D_SEL] = &a1_pwm_d_sel.hw,
+ [CLKID_PWM_D_DIV] = &a1_pwm_d_div.hw,
+ [CLKID_PWM_E_SEL] = &a1_pwm_e_sel.hw,
+ [CLKID_PWM_E_DIV] = &a1_pwm_e_div.hw,
+ [CLKID_PWM_F_SEL] = &a1_pwm_f_sel.hw,
+ [CLKID_PWM_F_DIV] = &a1_pwm_f_div.hw,
+ [CLKID_SPICC_SEL] = &a1_spicc_sel.hw,
+ [CLKID_SPICC_DIV] = &a1_spicc_div.hw,
+ [CLKID_SPICC_SEL2] = &a1_spicc_sel2.hw,
+ [CLKID_TS_DIV] = &a1_ts_div.hw,
+ [CLKID_SPIFC_SEL] = &a1_spifc_sel.hw,
+ [CLKID_SPIFC_DIV] = &a1_spifc_div.hw,
+ [CLKID_SPIFC_SEL2] = &a1_spifc_sel2.hw,
+ [CLKID_USB_BUS_SEL] = &a1_usb_bus_sel.hw,
+ [CLKID_USB_BUS_DIV] = &a1_usb_bus_div.hw,
+ [CLKID_SD_EMMC_SEL] = &a1_sd_emmc_sel.hw,
+ [CLKID_SD_EMMC_DIV] = &a1_sd_emmc_div.hw,
+ [CLKID_SD_EMMC_SEL2] = &a1_sd_emmc_sel2.hw,
+ [CLKID_PSRAM_SEL] = &a1_psram_sel.hw,
+ [CLKID_PSRAM_DIV] = &a1_psram_div.hw,
+ [CLKID_PSRAM_SEL2] = &a1_psram_sel2.hw,
+ [CLKID_DMC_SEL] = &a1_dmc_sel.hw,
+ [CLKID_DMC_DIV] = &a1_dmc_div.hw,
+ [CLKID_DMC_SEL2] = &a1_dmc_sel2.hw,
+};
+
+static const struct meson_clkc_data a1_peripherals_clkc_data = {
+ .hw_clks = {
+ .hws = a1_peripherals_hw_clks,
+ .num = ARRAY_SIZE(a1_peripherals_hw_clks),
+ },
+};
+
+static const struct of_device_id a1_peripherals_clkc_match_table[] = {
+ {
+ .compatible = "amlogic,a1-peripherals-clkc",
+ .data = &a1_peripherals_clkc_data,
+ },
{}
};
-MODULE_DEVICE_TABLE(of, a1_periphs_clkc_match_table);
+MODULE_DEVICE_TABLE(of, a1_peripherals_clkc_match_table);
-static struct platform_driver a1_periphs_clkc_driver = {
- .probe = meson_a1_periphs_probe,
+static struct platform_driver a1_peripherals_clkc_driver = {
+ .probe = meson_clkc_mmio_probe,
.driver = {
.name = "a1-peripherals-clkc",
- .of_match_table = a1_periphs_clkc_match_table,
+ .of_match_table = a1_peripherals_clkc_match_table,
},
};
-module_platform_driver(a1_periphs_clkc_driver);
+module_platform_driver(a1_peripherals_clkc_driver);
MODULE_DESCRIPTION("Amlogic A1 Peripherals Clock Controller driver");
MODULE_AUTHOR("Jian Hu <jian.hu@amlogic.com>");
diff --git a/drivers/clk/meson/a1-pll.c b/drivers/clk/meson/a1-pll.c
index dabd4fad1f57..1f82e9c7c14e 100644
--- a/drivers/clk/meson/a1-pll.c
+++ b/drivers/clk/meson/a1-pll.c
@@ -26,7 +26,7 @@
#include <dt-bindings/clock/amlogic,a1-pll-clkc.h>
-static struct clk_regmap fixed_pll_dco = {
+static struct clk_regmap a1_fixed_pll_dco = {
.data = &(struct meson_clk_pll_data){
.en = {
.reg_off = ANACTRL_FIXPLL_CTRL0,
@@ -69,7 +69,7 @@ static struct clk_regmap fixed_pll_dco = {
},
};
-static struct clk_regmap fixed_pll = {
+static struct clk_regmap a1_fixed_pll = {
.data = &(struct clk_regmap_gate_data){
.offset = ANACTRL_FIXPLL_CTRL0,
.bit_idx = 20,
@@ -78,18 +78,18 @@ static struct clk_regmap fixed_pll = {
.name = "fixed_pll",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fixed_pll_dco.hw
+ &a1_fixed_pll_dco.hw
},
.num_parents = 1,
},
};
-static const struct pll_mult_range hifi_pll_mult_range = {
+static const struct pll_mult_range a1_hifi_pll_range = {
.min = 32,
.max = 64,
};
-static const struct reg_sequence hifi_init_regs[] = {
+static const struct reg_sequence a1_hifi_pll_init_regs[] = {
{ .reg = ANACTRL_HIFIPLL_CTRL1, .def = 0x01800000 },
{ .reg = ANACTRL_HIFIPLL_CTRL2, .def = 0x00001100 },
{ .reg = ANACTRL_HIFIPLL_CTRL3, .def = 0x100a1100 },
@@ -97,7 +97,7 @@ static const struct reg_sequence hifi_init_regs[] = {
{ .reg = ANACTRL_HIFIPLL_CTRL0, .def = 0x01f18000 },
};
-static struct clk_regmap hifi_pll = {
+static struct clk_regmap a1_hifi_pll = {
.data = &(struct meson_clk_pll_data){
.en = {
.reg_off = ANACTRL_HIFIPLL_CTRL0,
@@ -134,9 +134,9 @@ static struct clk_regmap hifi_pll = {
.shift = 6,
.width = 1,
},
- .range = &hifi_pll_mult_range,
- .init_regs = hifi_init_regs,
- .init_count = ARRAY_SIZE(hifi_init_regs),
+ .range = &a1_hifi_pll_range,
+ .init_regs = a1_hifi_pll_init_regs,
+ .init_count = ARRAY_SIZE(a1_hifi_pll_init_regs),
},
.hw.init = &(struct clk_init_data){
.name = "hifi_pll",
@@ -148,20 +148,20 @@ static struct clk_regmap hifi_pll = {
},
};
-static struct clk_fixed_factor fclk_div2_div = {
+static struct clk_fixed_factor a1_fclk_div2_div = {
.mult = 1,
.div = 2,
.hw.init = &(struct clk_init_data){
.name = "fclk_div2_div",
.ops = &clk_fixed_factor_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fixed_pll.hw
+ &a1_fixed_pll.hw
},
.num_parents = 1,
},
};
-static struct clk_regmap fclk_div2 = {
+static struct clk_regmap a1_fclk_div2 = {
.data = &(struct clk_regmap_gate_data){
.offset = ANACTRL_FIXPLL_CTRL0,
.bit_idx = 21,
@@ -170,7 +170,7 @@ static struct clk_regmap fclk_div2 = {
.name = "fclk_div2",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fclk_div2_div.hw
+ &a1_fclk_div2_div.hw
},
.num_parents = 1,
/*
@@ -186,20 +186,20 @@ static struct clk_regmap fclk_div2 = {
},
};
-static struct clk_fixed_factor fclk_div3_div = {
+static struct clk_fixed_factor a1_fclk_div3_div = {
.mult = 1,
.div = 3,
.hw.init = &(struct clk_init_data){
.name = "fclk_div3_div",
.ops = &clk_fixed_factor_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fixed_pll.hw
+ &a1_fixed_pll.hw
},
.num_parents = 1,
},
};
-static struct clk_regmap fclk_div3 = {
+static struct clk_regmap a1_fclk_div3 = {
.data = &(struct clk_regmap_gate_data){
.offset = ANACTRL_FIXPLL_CTRL0,
.bit_idx = 22,
@@ -208,7 +208,7 @@ static struct clk_regmap fclk_div3 = {
.name = "fclk_div3",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fclk_div3_div.hw
+ &a1_fclk_div3_div.hw
},
.num_parents = 1,
/*
@@ -219,20 +219,20 @@ static struct clk_regmap fclk_div3 = {
},
};
-static struct clk_fixed_factor fclk_div5_div = {
+static struct clk_fixed_factor a1_fclk_div5_div = {
.mult = 1,
.div = 5,
.hw.init = &(struct clk_init_data){
.name = "fclk_div5_div",
.ops = &clk_fixed_factor_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fixed_pll.hw
+ &a1_fixed_pll.hw
},
.num_parents = 1,
},
};
-static struct clk_regmap fclk_div5 = {
+static struct clk_regmap a1_fclk_div5 = {
.data = &(struct clk_regmap_gate_data){
.offset = ANACTRL_FIXPLL_CTRL0,
.bit_idx = 23,
@@ -241,7 +241,7 @@ static struct clk_regmap fclk_div5 = {
.name = "fclk_div5",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fclk_div5_div.hw
+ &a1_fclk_div5_div.hw
},
.num_parents = 1,
/*
@@ -252,20 +252,20 @@ static struct clk_regmap fclk_div5 = {
},
};
-static struct clk_fixed_factor fclk_div7_div = {
+static struct clk_fixed_factor a1_fclk_div7_div = {
.mult = 1,
.div = 7,
.hw.init = &(struct clk_init_data){
.name = "fclk_div7_div",
.ops = &clk_fixed_factor_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fixed_pll.hw
+ &a1_fixed_pll.hw
},
.num_parents = 1,
},
};
-static struct clk_regmap fclk_div7 = {
+static struct clk_regmap a1_fclk_div7 = {
.data = &(struct clk_regmap_gate_data){
.offset = ANACTRL_FIXPLL_CTRL0,
.bit_idx = 24,
@@ -274,7 +274,7 @@ static struct clk_regmap fclk_div7 = {
.name = "fclk_div7",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fclk_div7_div.hw
+ &a1_fclk_div7_div.hw
},
.num_parents = 1,
},
@@ -282,69 +282,37 @@ static struct clk_regmap fclk_div7 = {
/* Array of all clocks registered by this provider */
static struct clk_hw *a1_pll_hw_clks[] = {
- [CLKID_FIXED_PLL_DCO] = &fixed_pll_dco.hw,
- [CLKID_FIXED_PLL] = &fixed_pll.hw,
- [CLKID_FCLK_DIV2_DIV] = &fclk_div2_div.hw,
- [CLKID_FCLK_DIV3_DIV] = &fclk_div3_div.hw,
- [CLKID_FCLK_DIV5_DIV] = &fclk_div5_div.hw,
- [CLKID_FCLK_DIV7_DIV] = &fclk_div7_div.hw,
- [CLKID_FCLK_DIV2] = &fclk_div2.hw,
- [CLKID_FCLK_DIV3] = &fclk_div3.hw,
- [CLKID_FCLK_DIV5] = &fclk_div5.hw,
- [CLKID_FCLK_DIV7] = &fclk_div7.hw,
- [CLKID_HIFI_PLL] = &hifi_pll.hw,
+ [CLKID_FIXED_PLL_DCO] = &a1_fixed_pll_dco.hw,
+ [CLKID_FIXED_PLL] = &a1_fixed_pll.hw,
+ [CLKID_FCLK_DIV2_DIV] = &a1_fclk_div2_div.hw,
+ [CLKID_FCLK_DIV3_DIV] = &a1_fclk_div3_div.hw,
+ [CLKID_FCLK_DIV5_DIV] = &a1_fclk_div5_div.hw,
+ [CLKID_FCLK_DIV7_DIV] = &a1_fclk_div7_div.hw,
+ [CLKID_FCLK_DIV2] = &a1_fclk_div2.hw,
+ [CLKID_FCLK_DIV3] = &a1_fclk_div3.hw,
+ [CLKID_FCLK_DIV5] = &a1_fclk_div5.hw,
+ [CLKID_FCLK_DIV7] = &a1_fclk_div7.hw,
+ [CLKID_HIFI_PLL] = &a1_hifi_pll.hw,
};
-static const struct regmap_config a1_pll_regmap_cfg = {
- .reg_bits = 32,
- .val_bits = 32,
- .reg_stride = 4,
- .max_register = ANACTRL_HIFIPLL_STS,
-};
-
-static struct meson_clk_hw_data a1_pll_clks = {
- .hws = a1_pll_hw_clks,
- .num = ARRAY_SIZE(a1_pll_hw_clks),
+static const struct meson_clkc_data a1_pll_clkc_data = {
+ .hw_clks = {
+ .hws = a1_pll_hw_clks,
+ .num = ARRAY_SIZE(a1_pll_hw_clks),
+ },
};
-static int meson_a1_pll_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- void __iomem *base;
- struct regmap *map;
- int clkid, err;
-
- base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(base))
- return dev_err_probe(dev, PTR_ERR(base),
- "can't ioremap resource\n");
-
- map = devm_regmap_init_mmio(dev, base, &a1_pll_regmap_cfg);
- if (IS_ERR(map))
- return dev_err_probe(dev, PTR_ERR(map),
- "can't init regmap mmio region\n");
-
- /* Register clocks */
- for (clkid = 0; clkid < a1_pll_clks.num; clkid++) {
- err = devm_clk_hw_register(dev, a1_pll_clks.hws[clkid]);
- if (err)
- return dev_err_probe(dev, err,
- "clock[%d] registration failed\n",
- clkid);
- }
-
- return devm_of_clk_add_hw_provider(dev, meson_clk_hw_get,
- &a1_pll_clks);
-}
-
static const struct of_device_id a1_pll_clkc_match_table[] = {
- { .compatible = "amlogic,a1-pll-clkc", },
+ {
+ .compatible = "amlogic,a1-pll-clkc",
+ .data = &a1_pll_clkc_data,
+ },
{}
};
MODULE_DEVICE_TABLE(of, a1_pll_clkc_match_table);
static struct platform_driver a1_pll_clkc_driver = {
- .probe = meson_a1_pll_probe,
+ .probe = meson_clkc_mmio_probe,
.driver = {
.name = "a1-pll-clkc",
.of_match_table = a1_pll_clkc_match_table,
diff --git a/drivers/clk/meson/axg-aoclk.c b/drivers/clk/meson/axg-aoclk.c
index cd5d0b5ebdb2..902fbd34039c 100644
--- a/drivers/clk/meson/axg-aoclk.c
+++ b/drivers/clk/meson/axg-aoclk.c
@@ -34,32 +34,21 @@
#define AO_RTC_ALT_CLK_CNTL0 0x94
#define AO_RTC_ALT_CLK_CNTL1 0x98
-#define AXG_AO_GATE(_name, _bit) \
-static struct clk_regmap axg_aoclk_##_name = { \
- .data = &(struct clk_regmap_gate_data) { \
- .offset = (AO_RTI_GEN_CNTL_REG0), \
- .bit_idx = (_bit), \
- }, \
- .hw.init = &(struct clk_init_data) { \
- .name = "axg_ao_" #_name, \
- .ops = &clk_regmap_gate_ops, \
- .parent_data = &(const struct clk_parent_data) { \
- .fw_name = "mpeg-clk", \
- }, \
- .num_parents = 1, \
- .flags = CLK_IGNORE_UNUSED, \
- }, \
-}
+static const struct clk_parent_data axg_ao_pclk_parents = { .fw_name = "mpeg-clk" };
-AXG_AO_GATE(remote, 0);
-AXG_AO_GATE(i2c_master, 1);
-AXG_AO_GATE(i2c_slave, 2);
-AXG_AO_GATE(uart1, 3);
-AXG_AO_GATE(uart2, 5);
-AXG_AO_GATE(ir_blaster, 6);
-AXG_AO_GATE(saradc, 7);
+#define AXG_AO_GATE(_name, _bit, _flags) \
+ MESON_PCLK(axg_ao_##_name, AO_RTI_GEN_CNTL_REG0, _bit, \
+ &axg_ao_pclk_parents, _flags)
-static struct clk_regmap axg_aoclk_cts_oscin = {
+static AXG_AO_GATE(remote, 0, CLK_IGNORE_UNUSED);
+static AXG_AO_GATE(i2c_master, 1, CLK_IGNORE_UNUSED);
+static AXG_AO_GATE(i2c_slave, 2, CLK_IGNORE_UNUSED);
+static AXG_AO_GATE(uart1, 3, CLK_IGNORE_UNUSED);
+static AXG_AO_GATE(uart2, 5, CLK_IGNORE_UNUSED);
+static AXG_AO_GATE(ir_blaster, 6, CLK_IGNORE_UNUSED);
+static AXG_AO_GATE(saradc, 7, CLK_IGNORE_UNUSED);
+
+static struct clk_regmap axg_ao_cts_oscin = {
.data = &(struct clk_regmap_gate_data){
.offset = AO_RTI_PWR_CNTL_REG0,
.bit_idx = 14,
@@ -74,7 +63,7 @@ static struct clk_regmap axg_aoclk_cts_oscin = {
},
};
-static struct clk_regmap axg_aoclk_32k_pre = {
+static struct clk_regmap axg_ao_32k_pre = {
.data = &(struct clk_regmap_gate_data){
.offset = AO_RTC_ALT_CLK_CNTL0,
.bit_idx = 31,
@@ -83,7 +72,7 @@ static struct clk_regmap axg_aoclk_32k_pre = {
.name = "axg_ao_32k_pre",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &axg_aoclk_cts_oscin.hw
+ &axg_ao_cts_oscin.hw
},
.num_parents = 1,
},
@@ -99,7 +88,7 @@ static const struct meson_clk_dualdiv_param axg_32k_div_table[] = {
}, {}
};
-static struct clk_regmap axg_aoclk_32k_div = {
+static struct clk_regmap axg_ao_32k_div = {
.data = &(struct meson_clk_dualdiv_data){
.n1 = {
.reg_off = AO_RTC_ALT_CLK_CNTL0,
@@ -132,13 +121,13 @@ static struct clk_regmap axg_aoclk_32k_div = {
.name = "axg_ao_32k_div",
.ops = &meson_clk_dualdiv_ops,
.parent_hws = (const struct clk_hw *[]) {
- &axg_aoclk_32k_pre.hw
+ &axg_ao_32k_pre.hw
},
.num_parents = 1,
},
};
-static struct clk_regmap axg_aoclk_32k_sel = {
+static struct clk_regmap axg_ao_32k_sel = {
.data = &(struct clk_regmap_mux_data) {
.offset = AO_RTC_ALT_CLK_CNTL1,
.mask = 0x1,
@@ -149,15 +138,15 @@ static struct clk_regmap axg_aoclk_32k_sel = {
.name = "axg_ao_32k_sel",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &axg_aoclk_32k_div.hw,
- &axg_aoclk_32k_pre.hw,
+ &axg_ao_32k_div.hw,
+ &axg_ao_32k_pre.hw,
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap axg_aoclk_32k = {
+static struct clk_regmap axg_ao_32k = {
.data = &(struct clk_regmap_gate_data){
.offset = AO_RTC_ALT_CLK_CNTL0,
.bit_idx = 30,
@@ -166,14 +155,14 @@ static struct clk_regmap axg_aoclk_32k = {
.name = "axg_ao_32k",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &axg_aoclk_32k_sel.hw
+ &axg_ao_32k_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap axg_aoclk_cts_rtc_oscin = {
+static struct clk_regmap axg_ao_cts_rtc_oscin = {
.data = &(struct clk_regmap_mux_data) {
.offset = AO_RTI_PWR_CNTL_REG0,
.mask = 0x1,
@@ -184,7 +173,7 @@ static struct clk_regmap axg_aoclk_cts_rtc_oscin = {
.name = "axg_ao_cts_rtc_oscin",
.ops = &clk_regmap_mux_ops,
.parent_data = (const struct clk_parent_data []) {
- { .hw = &axg_aoclk_32k.hw },
+ { .hw = &axg_ao_32k.hw },
{ .fw_name = "ext_32k-0", },
},
.num_parents = 2,
@@ -192,7 +181,7 @@ static struct clk_regmap axg_aoclk_cts_rtc_oscin = {
},
};
-static struct clk_regmap axg_aoclk_clk81 = {
+static struct clk_regmap axg_ao_clk81 = {
.data = &(struct clk_regmap_mux_data) {
.offset = AO_RTI_PWR_CNTL_REG0,
.mask = 0x1,
@@ -200,68 +189,74 @@ static struct clk_regmap axg_aoclk_clk81 = {
.flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
+ /*
+ * NOTE: this is one of the infamous clock the pwm driver
+ * can request directly by its global name. It's wrong but
+ * there is not much we can do about it until the support
+ * for the old pwm bindings is dropped
+ */
.name = "axg_ao_clk81",
.ops = &clk_regmap_mux_ro_ops,
.parent_data = (const struct clk_parent_data []) {
{ .fw_name = "mpeg-clk", },
- { .hw = &axg_aoclk_cts_rtc_oscin.hw },
+ { .hw = &axg_ao_cts_rtc_oscin.hw },
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap axg_aoclk_saradc_mux = {
+static struct clk_regmap axg_ao_saradc_mux = {
.data = &(struct clk_regmap_mux_data) {
.offset = AO_SAR_CLK,
.mask = 0x3,
.shift = 9,
},
.hw.init = &(struct clk_init_data){
- .name = "axg_ao_saradc_mux",
+ .name = "ao_saradc_mux",
.ops = &clk_regmap_mux_ops,
.parent_data = (const struct clk_parent_data []) {
{ .fw_name = "xtal", },
- { .hw = &axg_aoclk_clk81.hw },
+ { .hw = &axg_ao_clk81.hw },
},
.num_parents = 2,
},
};
-static struct clk_regmap axg_aoclk_saradc_div = {
+static struct clk_regmap axg_ao_saradc_div = {
.data = &(struct clk_regmap_div_data) {
.offset = AO_SAR_CLK,
.shift = 0,
.width = 8,
},
.hw.init = &(struct clk_init_data){
- .name = "axg_ao_saradc_div",
+ .name = "ao_saradc_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &axg_aoclk_saradc_mux.hw
+ &axg_ao_saradc_mux.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap axg_aoclk_saradc_gate = {
+static struct clk_regmap axg_ao_saradc_gate = {
.data = &(struct clk_regmap_gate_data) {
.offset = AO_SAR_CLK,
.bit_idx = 8,
},
.hw.init = &(struct clk_init_data){
- .name = "axg_ao_saradc_gate",
+ .name = "ao_saradc_gate",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &axg_aoclk_saradc_div.hw
+ &axg_ao_saradc_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static const unsigned int axg_aoclk_reset[] = {
+static const unsigned int axg_ao_reset[] = {
[RESET_AO_REMOTE] = 16,
[RESET_AO_I2C_MASTER] = 18,
[RESET_AO_I2C_SLAVE] = 19,
@@ -270,53 +265,55 @@ static const unsigned int axg_aoclk_reset[] = {
[RESET_AO_IR_BLASTER] = 23,
};
-static struct clk_hw *axg_aoclk_hw_clks[] = {
- [CLKID_AO_REMOTE] = &axg_aoclk_remote.hw,
- [CLKID_AO_I2C_MASTER] = &axg_aoclk_i2c_master.hw,
- [CLKID_AO_I2C_SLAVE] = &axg_aoclk_i2c_slave.hw,
- [CLKID_AO_UART1] = &axg_aoclk_uart1.hw,
- [CLKID_AO_UART2] = &axg_aoclk_uart2.hw,
- [CLKID_AO_IR_BLASTER] = &axg_aoclk_ir_blaster.hw,
- [CLKID_AO_SAR_ADC] = &axg_aoclk_saradc.hw,
- [CLKID_AO_CLK81] = &axg_aoclk_clk81.hw,
- [CLKID_AO_SAR_ADC_SEL] = &axg_aoclk_saradc_mux.hw,
- [CLKID_AO_SAR_ADC_DIV] = &axg_aoclk_saradc_div.hw,
- [CLKID_AO_SAR_ADC_CLK] = &axg_aoclk_saradc_gate.hw,
- [CLKID_AO_CTS_OSCIN] = &axg_aoclk_cts_oscin.hw,
- [CLKID_AO_32K_PRE] = &axg_aoclk_32k_pre.hw,
- [CLKID_AO_32K_DIV] = &axg_aoclk_32k_div.hw,
- [CLKID_AO_32K_SEL] = &axg_aoclk_32k_sel.hw,
- [CLKID_AO_32K] = &axg_aoclk_32k.hw,
- [CLKID_AO_CTS_RTC_OSCIN] = &axg_aoclk_cts_rtc_oscin.hw,
+static struct clk_hw *axg_ao_hw_clks[] = {
+ [CLKID_AO_REMOTE] = &axg_ao_remote.hw,
+ [CLKID_AO_I2C_MASTER] = &axg_ao_i2c_master.hw,
+ [CLKID_AO_I2C_SLAVE] = &axg_ao_i2c_slave.hw,
+ [CLKID_AO_UART1] = &axg_ao_uart1.hw,
+ [CLKID_AO_UART2] = &axg_ao_uart2.hw,
+ [CLKID_AO_IR_BLASTER] = &axg_ao_ir_blaster.hw,
+ [CLKID_AO_SAR_ADC] = &axg_ao_saradc.hw,
+ [CLKID_AO_CLK81] = &axg_ao_clk81.hw,
+ [CLKID_AO_SAR_ADC_SEL] = &axg_ao_saradc_mux.hw,
+ [CLKID_AO_SAR_ADC_DIV] = &axg_ao_saradc_div.hw,
+ [CLKID_AO_SAR_ADC_CLK] = &axg_ao_saradc_gate.hw,
+ [CLKID_AO_CTS_OSCIN] = &axg_ao_cts_oscin.hw,
+ [CLKID_AO_32K_PRE] = &axg_ao_32k_pre.hw,
+ [CLKID_AO_32K_DIV] = &axg_ao_32k_div.hw,
+ [CLKID_AO_32K_SEL] = &axg_ao_32k_sel.hw,
+ [CLKID_AO_32K] = &axg_ao_32k.hw,
+ [CLKID_AO_CTS_RTC_OSCIN] = &axg_ao_cts_rtc_oscin.hw,
};
-static const struct meson_aoclk_data axg_aoclkc_data = {
+static const struct meson_aoclk_data axg_ao_clkc_data = {
.reset_reg = AO_RTI_GEN_CNTL_REG0,
- .num_reset = ARRAY_SIZE(axg_aoclk_reset),
- .reset = axg_aoclk_reset,
- .hw_clks = {
- .hws = axg_aoclk_hw_clks,
- .num = ARRAY_SIZE(axg_aoclk_hw_clks),
+ .num_reset = ARRAY_SIZE(axg_ao_reset),
+ .reset = axg_ao_reset,
+ .clkc_data = {
+ .hw_clks = {
+ .hws = axg_ao_hw_clks,
+ .num = ARRAY_SIZE(axg_ao_hw_clks),
+ },
},
};
-static const struct of_device_id axg_aoclkc_match_table[] = {
+static const struct of_device_id axg_ao_clkc_match_table[] = {
{
.compatible = "amlogic,meson-axg-aoclkc",
- .data = &axg_aoclkc_data,
+ .data = &axg_ao_clkc_data.clkc_data,
},
{ }
};
-MODULE_DEVICE_TABLE(of, axg_aoclkc_match_table);
+MODULE_DEVICE_TABLE(of, axg_ao_clkc_match_table);
-static struct platform_driver axg_aoclkc_driver = {
+static struct platform_driver axg_ao_clkc_driver = {
.probe = meson_aoclkc_probe,
.driver = {
- .name = "axg-aoclkc",
- .of_match_table = axg_aoclkc_match_table,
+ .name = "axg-ao-clkc",
+ .of_match_table = axg_ao_clkc_match_table,
},
};
-module_platform_driver(axg_aoclkc_driver);
+module_platform_driver(axg_ao_clkc_driver);
MODULE_DESCRIPTION("Amlogic AXG Always-ON Clock Controller driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/meson/axg.c b/drivers/clk/meson/axg.c
index 208833c3ee95..0a25c649ef1d 100644
--- a/drivers/clk/meson/axg.c
+++ b/drivers/clk/meson/axg.c
@@ -18,7 +18,7 @@
#include "clk-regmap.h"
#include "clk-pll.h"
#include "clk-mpll.h"
-#include "meson-eeclk.h"
+#include "meson-clkc-utils.h"
#include <dt-bindings/clock/axg-clkc.h>
@@ -333,7 +333,7 @@ static struct clk_regmap axg_gp0_pll = {
},
};
-static const struct reg_sequence axg_hifi_init_regs[] = {
+static const struct reg_sequence axg_hifi_pll_init_regs[] = {
{ .reg = HHI_HIFI_PLL_CNTL1, .def = 0xc084b000 },
{ .reg = HHI_HIFI_PLL_CNTL2, .def = 0xb75020be },
{ .reg = HHI_HIFI_PLL_CNTL3, .def = 0x0a6a3a88 },
@@ -374,8 +374,8 @@ static struct clk_regmap axg_hifi_pll_dco = {
.width = 1,
},
.table = axg_gp0_pll_params_table,
- .init_regs = axg_hifi_init_regs,
- .init_count = ARRAY_SIZE(axg_hifi_init_regs),
+ .init_regs = axg_hifi_pll_init_regs,
+ .init_count = ARRAY_SIZE(axg_hifi_pll_init_regs),
.flags = CLK_MESON_PLL_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
@@ -780,7 +780,7 @@ static const struct pll_params_table axg_pcie_pll_params_table[] = {
{ /* sentinel */ },
};
-static const struct reg_sequence axg_pcie_init_regs[] = {
+static const struct reg_sequence axg_pcie_pll_init_regs[] = {
{ .reg = HHI_PCIE_PLL_CNTL1, .def = 0x0084a2aa },
{ .reg = HHI_PCIE_PLL_CNTL2, .def = 0xb75020be },
{ .reg = HHI_PCIE_PLL_CNTL3, .def = 0x0a47488e },
@@ -823,8 +823,8 @@ static struct clk_regmap axg_pcie_pll_dco = {
.width = 1,
},
.table = axg_pcie_pll_params_table,
- .init_regs = axg_pcie_init_regs,
- .init_count = ARRAY_SIZE(axg_pcie_init_regs),
+ .init_regs = axg_pcie_pll_init_regs,
+ .init_count = ARRAY_SIZE(axg_pcie_pll_init_regs),
},
.hw.init = &(struct clk_init_data){
.name = "pcie_pll_dco",
@@ -935,8 +935,9 @@ static struct clk_regmap axg_pcie_cml_en1 = {
},
};
-static u32 mux_table_clk81[] = { 0, 2, 3, 4, 5, 6, 7 };
-static const struct clk_parent_data clk81_parent_data[] = {
+/* clk81 is often referred as "mpeg_clk" */
+static u32 clk81_parents_val_table[] = { 0, 2, 3, 4, 5, 6, 7 };
+static const struct clk_parent_data clk81_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &axg_fclk_div7.hw },
{ .hw = &axg_mpll1.hw },
@@ -946,32 +947,32 @@ static const struct clk_parent_data clk81_parent_data[] = {
{ .hw = &axg_fclk_div5.hw },
};
-static struct clk_regmap axg_mpeg_clk_sel = {
+static struct clk_regmap axg_clk81_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_MPEG_CLK_CNTL,
.mask = 0x7,
.shift = 12,
- .table = mux_table_clk81,
+ .table = clk81_parents_val_table,
},
.hw.init = &(struct clk_init_data){
- .name = "mpeg_clk_sel",
+ .name = "clk81_sel",
.ops = &clk_regmap_mux_ro_ops,
- .parent_data = clk81_parent_data,
- .num_parents = ARRAY_SIZE(clk81_parent_data),
+ .parent_data = clk81_parents,
+ .num_parents = ARRAY_SIZE(clk81_parents),
},
};
-static struct clk_regmap axg_mpeg_clk_div = {
+static struct clk_regmap axg_clk81_div = {
.data = &(struct clk_regmap_div_data){
.offset = HHI_MPEG_CLK_CNTL,
.shift = 0,
.width = 7,
},
.hw.init = &(struct clk_init_data){
- .name = "mpeg_clk_div",
+ .name = "clk81_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &axg_mpeg_clk_sel.hw
+ &axg_clk81_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -987,14 +988,14 @@ static struct clk_regmap axg_clk81 = {
.name = "clk81",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &axg_mpeg_clk_div.hw
+ &axg_clk81_div.hw
},
.num_parents = 1,
.flags = (CLK_SET_RATE_PARENT | CLK_IS_CRITICAL),
},
};
-static const struct clk_parent_data axg_sd_emmc_clk0_parent_data[] = {
+static const struct clk_parent_data axg_sd_emmc_clk0_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &axg_fclk_div2.hw },
{ .hw = &axg_fclk_div3.hw },
@@ -1018,8 +1019,8 @@ static struct clk_regmap axg_sd_emmc_b_clk0_sel = {
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_b_clk0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = axg_sd_emmc_clk0_parent_data,
- .num_parents = ARRAY_SIZE(axg_sd_emmc_clk0_parent_data),
+ .parent_data = axg_sd_emmc_clk0_parents,
+ .num_parents = ARRAY_SIZE(axg_sd_emmc_clk0_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1068,8 +1069,8 @@ static struct clk_regmap axg_sd_emmc_c_clk0_sel = {
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_c_clk0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = axg_sd_emmc_clk0_parent_data,
- .num_parents = ARRAY_SIZE(axg_sd_emmc_clk0_parent_data),
+ .parent_data = axg_sd_emmc_clk0_parents,
+ .num_parents = ARRAY_SIZE(axg_sd_emmc_clk0_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1110,7 +1111,7 @@ static struct clk_regmap axg_sd_emmc_c_clk0 = {
/* VPU Clock */
-static const struct clk_hw *axg_vpu_parent_hws[] = {
+static const struct clk_hw *axg_vpu_parents[] = {
&axg_fclk_div4.hw,
&axg_fclk_div3.hw,
&axg_fclk_div5.hw,
@@ -1126,8 +1127,8 @@ static struct clk_regmap axg_vpu_0_sel = {
.hw.init = &(struct clk_init_data){
.name = "vpu_0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = axg_vpu_parent_hws,
- .num_parents = ARRAY_SIZE(axg_vpu_parent_hws),
+ .parent_hws = axg_vpu_parents,
+ .num_parents = ARRAY_SIZE(axg_vpu_parents),
/* We need a specific parent for VPU clock source, let it be set in DT */
.flags = CLK_SET_RATE_NO_REPARENT,
},
@@ -1175,8 +1176,8 @@ static struct clk_regmap axg_vpu_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "vpu_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = axg_vpu_parent_hws,
- .num_parents = ARRAY_SIZE(axg_vpu_parent_hws),
+ .parent_hws = axg_vpu_parents,
+ .num_parents = ARRAY_SIZE(axg_vpu_parents),
/* We need a specific parent for VPU clock source, let it be set in DT */
.flags = CLK_SET_RATE_NO_REPARENT,
},
@@ -1244,8 +1245,8 @@ static struct clk_regmap axg_vapb_0_sel = {
.hw.init = &(struct clk_init_data){
.name = "vapb_0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = axg_vpu_parent_hws,
- .num_parents = ARRAY_SIZE(axg_vpu_parent_hws),
+ .parent_hws = axg_vpu_parents,
+ .num_parents = ARRAY_SIZE(axg_vpu_parents),
.flags = CLK_SET_RATE_NO_REPARENT,
},
};
@@ -1292,8 +1293,8 @@ static struct clk_regmap axg_vapb_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "vapb_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = axg_vpu_parent_hws,
- .num_parents = ARRAY_SIZE(axg_vpu_parent_hws),
+ .parent_hws = axg_vpu_parents,
+ .num_parents = ARRAY_SIZE(axg_vpu_parents),
.flags = CLK_SET_RATE_NO_REPARENT,
},
};
@@ -1365,7 +1366,7 @@ static struct clk_regmap axg_vapb = {
/* Video Clocks */
-static const struct clk_hw *axg_vclk_parent_hws[] = {
+static const struct clk_hw *axg_vclk_parents[] = {
&axg_gp0_pll.hw,
&axg_fclk_div4.hw,
&axg_fclk_div3.hw,
@@ -1384,8 +1385,8 @@ static struct clk_regmap axg_vclk_sel = {
.hw.init = &(struct clk_init_data){
.name = "vclk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = axg_vclk_parent_hws,
- .num_parents = ARRAY_SIZE(axg_vclk_parent_hws),
+ .parent_hws = axg_vclk_parents,
+ .num_parents = ARRAY_SIZE(axg_vclk_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -1399,8 +1400,8 @@ static struct clk_regmap axg_vclk2_sel = {
.hw.init = &(struct clk_init_data){
.name = "vclk2_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = axg_vclk_parent_hws,
- .num_parents = ARRAY_SIZE(axg_vclk_parent_hws),
+ .parent_hws = axg_vclk_parents,
+ .num_parents = ARRAY_SIZE(axg_vclk_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -1739,8 +1740,8 @@ static struct clk_fixed_factor axg_vclk2_div12 = {
},
};
-static u32 mux_table_cts_sel[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
-static const struct clk_hw *axg_cts_parent_hws[] = {
+static u32 axg_cts_encl_parents_val_table[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
+static const struct clk_hw *axg_cts_encl_parents[] = {
&axg_vclk_div1.hw,
&axg_vclk_div2.hw,
&axg_vclk_div4.hw,
@@ -1758,13 +1759,13 @@ static struct clk_regmap axg_cts_encl_sel = {
.offset = HHI_VIID_CLK_DIV,
.mask = 0xf,
.shift = 12,
- .table = mux_table_cts_sel,
+ .table = axg_cts_encl_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cts_encl_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = axg_cts_parent_hws,
- .num_parents = ARRAY_SIZE(axg_cts_parent_hws),
+ .parent_hws = axg_cts_encl_parents,
+ .num_parents = ARRAY_SIZE(axg_cts_encl_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -1787,8 +1788,8 @@ static struct clk_regmap axg_cts_encl = {
/* MIPI DSI Host Clock */
-static u32 mux_table_axg_vdin_meas[] = { 0, 1, 2, 3, 6, 7 };
-static const struct clk_parent_data axg_vdin_meas_parent_data[] = {
+static u32 axg_vdin_meas_parents_val_table[] = { 0, 1, 2, 3, 6, 7 };
+static const struct clk_parent_data axg_vdin_meas_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &axg_fclk_div4.hw },
{ .hw = &axg_fclk_div3.hw },
@@ -1803,13 +1804,13 @@ static struct clk_regmap axg_vdin_meas_sel = {
.mask = 0x7,
.shift = 21,
.flags = CLK_MUX_ROUND_CLOSEST,
- .table = mux_table_axg_vdin_meas,
+ .table = axg_vdin_meas_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "vdin_meas_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = axg_vdin_meas_parent_data,
- .num_parents = ARRAY_SIZE(axg_vdin_meas_parent_data),
+ .parent_data = axg_vdin_meas_parents,
+ .num_parents = ARRAY_SIZE(axg_vdin_meas_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1845,9 +1846,8 @@ static struct clk_regmap axg_vdin_meas = {
},
};
-static u32 mux_table_gen_clk[] = { 0, 4, 5, 6, 7, 8,
- 9, 10, 11, 13, 14, };
-static const struct clk_parent_data gen_clk_parent_data[] = {
+static u32 gen_clk_parents_val_table[] = { 0, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, };
+static const struct clk_parent_data gen_clk_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &axg_hifi_pll.hw },
{ .hw = &axg_mpll0.hw },
@@ -1866,7 +1866,7 @@ static struct clk_regmap axg_gen_clk_sel = {
.offset = HHI_GEN_CLK_CNTL,
.mask = 0xf,
.shift = 12,
- .table = mux_table_gen_clk,
+ .table = gen_clk_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "gen_clk_sel",
@@ -1877,8 +1877,8 @@ static struct clk_regmap axg_gen_clk_sel = {
* hifi_pll, mpll0, mpll1, mpll2, mpll3, fdiv4,
* fdiv3, fdiv5, [cts_msr_clk], fdiv7, gp0_pll
*/
- .parent_data = gen_clk_parent_data,
- .num_parents = ARRAY_SIZE(gen_clk_parent_data),
+ .parent_data = gen_clk_parents,
+ .num_parents = ARRAY_SIZE(gen_clk_parents),
},
};
@@ -1915,59 +1915,71 @@ static struct clk_regmap axg_gen_clk = {
},
};
-#define MESON_GATE(_name, _reg, _bit) \
- MESON_PCLK(_name, _reg, _bit, &axg_clk81.hw)
-
-/* Everything Else (EE) domain gates */
-static MESON_GATE(axg_ddr, HHI_GCLK_MPEG0, 0);
-static MESON_GATE(axg_audio_locker, HHI_GCLK_MPEG0, 2);
-static MESON_GATE(axg_mipi_dsi_host, HHI_GCLK_MPEG0, 3);
-static MESON_GATE(axg_isa, HHI_GCLK_MPEG0, 5);
-static MESON_GATE(axg_pl301, HHI_GCLK_MPEG0, 6);
-static MESON_GATE(axg_periphs, HHI_GCLK_MPEG0, 7);
-static MESON_GATE(axg_spicc_0, HHI_GCLK_MPEG0, 8);
-static MESON_GATE(axg_i2c, HHI_GCLK_MPEG0, 9);
-static MESON_GATE(axg_rng0, HHI_GCLK_MPEG0, 12);
-static MESON_GATE(axg_uart0, HHI_GCLK_MPEG0, 13);
-static MESON_GATE(axg_mipi_dsi_phy, HHI_GCLK_MPEG0, 14);
-static MESON_GATE(axg_spicc_1, HHI_GCLK_MPEG0, 15);
-static MESON_GATE(axg_pcie_a, HHI_GCLK_MPEG0, 16);
-static MESON_GATE(axg_pcie_b, HHI_GCLK_MPEG0, 17);
-static MESON_GATE(axg_hiu_reg, HHI_GCLK_MPEG0, 19);
-static MESON_GATE(axg_assist_misc, HHI_GCLK_MPEG0, 23);
-static MESON_GATE(axg_emmc_b, HHI_GCLK_MPEG0, 25);
-static MESON_GATE(axg_emmc_c, HHI_GCLK_MPEG0, 26);
-static MESON_GATE(axg_dma, HHI_GCLK_MPEG0, 27);
-static MESON_GATE(axg_spi, HHI_GCLK_MPEG0, 30);
-
-static MESON_GATE(axg_audio, HHI_GCLK_MPEG1, 0);
-static MESON_GATE(axg_eth_core, HHI_GCLK_MPEG1, 3);
-static MESON_GATE(axg_uart1, HHI_GCLK_MPEG1, 16);
-static MESON_GATE(axg_g2d, HHI_GCLK_MPEG1, 20);
-static MESON_GATE(axg_usb0, HHI_GCLK_MPEG1, 21);
-static MESON_GATE(axg_usb1, HHI_GCLK_MPEG1, 22);
-static MESON_GATE(axg_reset, HHI_GCLK_MPEG1, 23);
-static MESON_GATE(axg_usb_general, HHI_GCLK_MPEG1, 26);
-static MESON_GATE(axg_ahb_arb0, HHI_GCLK_MPEG1, 29);
-static MESON_GATE(axg_efuse, HHI_GCLK_MPEG1, 30);
-static MESON_GATE(axg_boot_rom, HHI_GCLK_MPEG1, 31);
-
-static MESON_GATE(axg_ahb_data_bus, HHI_GCLK_MPEG2, 1);
-static MESON_GATE(axg_ahb_ctrl_bus, HHI_GCLK_MPEG2, 2);
-static MESON_GATE(axg_usb1_to_ddr, HHI_GCLK_MPEG2, 8);
-static MESON_GATE(axg_usb0_to_ddr, HHI_GCLK_MPEG2, 9);
-static MESON_GATE(axg_mmc_pclk, HHI_GCLK_MPEG2, 11);
-static MESON_GATE(axg_vpu_intr, HHI_GCLK_MPEG2, 25);
-static MESON_GATE(axg_sec_ahb_ahb3_bridge, HHI_GCLK_MPEG2, 26);
-static MESON_GATE(axg_gic, HHI_GCLK_MPEG2, 30);
+static const struct clk_parent_data axg_pclk_parents = { .hw = &axg_clk81.hw };
+
+#define AXG_PCLK(_name, _reg, _bit, _flags) \
+ MESON_PCLK(axg_##_name, _reg, _bit, &axg_pclk_parents, _flags)
+
+/*
+ * Everything Else (EE) domain gates
+ *
+ * NOTE: The gates below are marked with CLK_IGNORE_UNUSED for historic reasons
+ * Users are encouraged to test without it and submit changes to:
+ * - remove the flag if not necessary
+ * - replace the flag with something more adequate, such as CLK_IS_CRITICAL,
+ * if appropriate.
+ * - add a comment explaining why the use of CLK_IGNORE_UNUSED is desirable
+ * for a particular clock.
+ */
+static AXG_PCLK(ddr, HHI_GCLK_MPEG0, 0, CLK_IGNORE_UNUSED);
+static AXG_PCLK(audio_locker, HHI_GCLK_MPEG0, 2, CLK_IGNORE_UNUSED);
+static AXG_PCLK(mipi_dsi_host, HHI_GCLK_MPEG0, 3, CLK_IGNORE_UNUSED);
+static AXG_PCLK(isa, HHI_GCLK_MPEG0, 5, CLK_IGNORE_UNUSED);
+static AXG_PCLK(pl301, HHI_GCLK_MPEG0, 6, CLK_IGNORE_UNUSED);
+static AXG_PCLK(periphs, HHI_GCLK_MPEG0, 7, CLK_IGNORE_UNUSED);
+static AXG_PCLK(spicc_0, HHI_GCLK_MPEG0, 8, CLK_IGNORE_UNUSED);
+static AXG_PCLK(i2c, HHI_GCLK_MPEG0, 9, CLK_IGNORE_UNUSED);
+static AXG_PCLK(rng0, HHI_GCLK_MPEG0, 12, CLK_IGNORE_UNUSED);
+static AXG_PCLK(uart0, HHI_GCLK_MPEG0, 13, CLK_IGNORE_UNUSED);
+static AXG_PCLK(mipi_dsi_phy, HHI_GCLK_MPEG0, 14, CLK_IGNORE_UNUSED);
+static AXG_PCLK(spicc_1, HHI_GCLK_MPEG0, 15, CLK_IGNORE_UNUSED);
+static AXG_PCLK(pcie_a, HHI_GCLK_MPEG0, 16, CLK_IGNORE_UNUSED);
+static AXG_PCLK(pcie_b, HHI_GCLK_MPEG0, 17, CLK_IGNORE_UNUSED);
+static AXG_PCLK(hiu_reg, HHI_GCLK_MPEG0, 19, CLK_IGNORE_UNUSED);
+static AXG_PCLK(assist_misc, HHI_GCLK_MPEG0, 23, CLK_IGNORE_UNUSED);
+static AXG_PCLK(emmc_b, HHI_GCLK_MPEG0, 25, CLK_IGNORE_UNUSED);
+static AXG_PCLK(emmc_c, HHI_GCLK_MPEG0, 26, CLK_IGNORE_UNUSED);
+static AXG_PCLK(dma, HHI_GCLK_MPEG0, 27, CLK_IGNORE_UNUSED);
+static AXG_PCLK(spi, HHI_GCLK_MPEG0, 30, CLK_IGNORE_UNUSED);
+
+static AXG_PCLK(audio, HHI_GCLK_MPEG1, 0, CLK_IGNORE_UNUSED);
+static AXG_PCLK(eth_core, HHI_GCLK_MPEG1, 3, CLK_IGNORE_UNUSED);
+static AXG_PCLK(uart1, HHI_GCLK_MPEG1, 16, CLK_IGNORE_UNUSED);
+static AXG_PCLK(g2d, HHI_GCLK_MPEG1, 20, CLK_IGNORE_UNUSED);
+static AXG_PCLK(usb0, HHI_GCLK_MPEG1, 21, CLK_IGNORE_UNUSED);
+static AXG_PCLK(usb1, HHI_GCLK_MPEG1, 22, CLK_IGNORE_UNUSED);
+static AXG_PCLK(reset, HHI_GCLK_MPEG1, 23, CLK_IGNORE_UNUSED);
+static AXG_PCLK(usb_general, HHI_GCLK_MPEG1, 26, CLK_IGNORE_UNUSED);
+static AXG_PCLK(ahb_arb0, HHI_GCLK_MPEG1, 29, CLK_IGNORE_UNUSED);
+static AXG_PCLK(efuse, HHI_GCLK_MPEG1, 30, CLK_IGNORE_UNUSED);
+static AXG_PCLK(boot_rom, HHI_GCLK_MPEG1, 31, CLK_IGNORE_UNUSED);
+
+static AXG_PCLK(ahb_data_bus, HHI_GCLK_MPEG2, 1, CLK_IGNORE_UNUSED);
+static AXG_PCLK(ahb_ctrl_bus, HHI_GCLK_MPEG2, 2, CLK_IGNORE_UNUSED);
+static AXG_PCLK(usb1_to_ddr, HHI_GCLK_MPEG2, 8, CLK_IGNORE_UNUSED);
+static AXG_PCLK(usb0_to_ddr, HHI_GCLK_MPEG2, 9, CLK_IGNORE_UNUSED);
+static AXG_PCLK(mmc_pclk, HHI_GCLK_MPEG2, 11, CLK_IGNORE_UNUSED);
+static AXG_PCLK(vpu_intr, HHI_GCLK_MPEG2, 25, CLK_IGNORE_UNUSED);
+static AXG_PCLK(sec_ahb_ahb3_bridge, HHI_GCLK_MPEG2, 26, CLK_IGNORE_UNUSED);
+static AXG_PCLK(gic, HHI_GCLK_MPEG2, 30, CLK_IGNORE_UNUSED);
/* Always On (AO) domain gates */
-static MESON_GATE(axg_ao_media_cpu, HHI_GCLK_AO, 0);
-static MESON_GATE(axg_ao_ahb_sram, HHI_GCLK_AO, 1);
-static MESON_GATE(axg_ao_ahb_bus, HHI_GCLK_AO, 2);
-static MESON_GATE(axg_ao_iface, HHI_GCLK_AO, 3);
-static MESON_GATE(axg_ao_i2c, HHI_GCLK_AO, 4);
+static AXG_PCLK(ao_media_cpu, HHI_GCLK_AO, 0, CLK_IGNORE_UNUSED);
+static AXG_PCLK(ao_ahb_sram, HHI_GCLK_AO, 1, CLK_IGNORE_UNUSED);
+static AXG_PCLK(ao_ahb_bus, HHI_GCLK_AO, 2, CLK_IGNORE_UNUSED);
+static AXG_PCLK(ao_iface, HHI_GCLK_AO, 3, CLK_IGNORE_UNUSED);
+static AXG_PCLK(ao_i2c, HHI_GCLK_AO, 4, CLK_IGNORE_UNUSED);
/* Array of all clocks provided by this provider */
@@ -1980,8 +1992,8 @@ static struct clk_hw *axg_hw_clks[] = {
[CLKID_FCLK_DIV5] = &axg_fclk_div5.hw,
[CLKID_FCLK_DIV7] = &axg_fclk_div7.hw,
[CLKID_GP0_PLL] = &axg_gp0_pll.hw,
- [CLKID_MPEG_SEL] = &axg_mpeg_clk_sel.hw,
- [CLKID_MPEG_DIV] = &axg_mpeg_clk_div.hw,
+ [CLKID_MPEG_SEL] = &axg_clk81_sel.hw,
+ [CLKID_MPEG_DIV] = &axg_clk81_div.hw,
[CLKID_CLK81] = &axg_clk81.hw,
[CLKID_MPLL0] = &axg_mpll0.hw,
[CLKID_MPLL1] = &axg_mpll1.hw,
@@ -2110,28 +2122,27 @@ static struct clk_hw *axg_hw_clks[] = {
[CLKID_VDIN_MEAS] = &axg_vdin_meas.hw,
};
-static const struct meson_eeclkc_data axg_clkc_data = {
+static const struct meson_clkc_data axg_clkc_data = {
.hw_clks = {
.hws = axg_hw_clks,
.num = ARRAY_SIZE(axg_hw_clks),
},
};
-
-static const struct of_device_id clkc_match_table[] = {
+static const struct of_device_id axg_clkc_match_table[] = {
{ .compatible = "amlogic,axg-clkc", .data = &axg_clkc_data },
{}
};
-MODULE_DEVICE_TABLE(of, clkc_match_table);
+MODULE_DEVICE_TABLE(of, axg_clkc_match_table);
-static struct platform_driver axg_driver = {
- .probe = meson_eeclkc_probe,
+static struct platform_driver axg_clkc_driver = {
+ .probe = meson_clkc_syscon_probe,
.driver = {
.name = "axg-clkc",
- .of_match_table = clkc_match_table,
+ .of_match_table = axg_clkc_match_table,
},
};
-module_platform_driver(axg_driver);
+module_platform_driver(axg_clkc_driver);
MODULE_DESCRIPTION("Amlogic AXG Main Clock Controller driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/meson/c3-peripherals.c b/drivers/clk/meson/c3-peripherals.c
index a25e7d5dc669..b158756cfee4 100644
--- a/drivers/clk/meson/c3-peripherals.c
+++ b/drivers/clk/meson/c3-peripherals.c
@@ -48,7 +48,16 @@
#define SPIFC_CLK_CTRL 0x1a0
#define NNA_CLK_CTRL 0x220
-static struct clk_regmap rtc_xtal_clkin = {
+#define C3_COMP_SEL(_name, _reg, _shift, _mask, _pdata) \
+ MESON_COMP_SEL(c3_, _name, _reg, _shift, _mask, _pdata, NULL, 0, 0)
+
+#define C3_COMP_DIV(_name, _reg, _shift, _width) \
+ MESON_COMP_DIV(c3_, _name, _reg, _shift, _width, 0, CLK_SET_RATE_PARENT)
+
+#define C3_COMP_GATE(_name, _reg, _bit) \
+ MESON_COMP_GATE(c3_, _name, _reg, _bit, CLK_SET_RATE_PARENT)
+
+static struct clk_regmap c3_rtc_xtal_clkin = {
.data = &(struct clk_regmap_gate_data) {
.offset = RTC_BY_OSCIN_CTRL0,
.bit_idx = 31,
@@ -63,12 +72,12 @@ static struct clk_regmap rtc_xtal_clkin = {
},
};
-static const struct meson_clk_dualdiv_param rtc_32k_div_table[] = {
+static const struct meson_clk_dualdiv_param c3_rtc_32k_div_table[] = {
{ 733, 732, 8, 11, 1 },
{ /* sentinel */ }
};
-static struct clk_regmap rtc_32k_div = {
+static struct clk_regmap c3_rtc_32k_div = {
.data = &(struct meson_clk_dualdiv_data) {
.n1 = {
.reg_off = RTC_BY_OSCIN_CTRL0,
@@ -95,39 +104,39 @@ static struct clk_regmap rtc_32k_div = {
.shift = 28,
.width = 1,
},
- .table = rtc_32k_div_table,
+ .table = c3_rtc_32k_div_table,
},
.hw.init = &(struct clk_init_data) {
.name = "rtc_32k_div",
.ops = &meson_clk_dualdiv_ops,
.parent_hws = (const struct clk_hw *[]) {
- &rtc_xtal_clkin.hw
+ &c3_rtc_xtal_clkin.hw
},
.num_parents = 1,
},
};
-static const struct clk_parent_data rtc_32k_mux_parent_data[] = {
- { .hw = &rtc_32k_div.hw },
- { .hw = &rtc_xtal_clkin.hw }
+static const struct clk_parent_data c3_rtc_32k_parents[] = {
+ { .hw = &c3_rtc_32k_div.hw },
+ { .hw = &c3_rtc_xtal_clkin.hw }
};
-static struct clk_regmap rtc_32k_mux = {
+static struct clk_regmap c3_rtc_32k_sel = {
.data = &(struct clk_regmap_mux_data) {
.offset = RTC_BY_OSCIN_CTRL1,
.mask = 0x1,
.shift = 24,
},
.hw.init = &(struct clk_init_data) {
- .name = "rtc_32k_mux",
+ .name = "rtc_32k_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = rtc_32k_mux_parent_data,
- .num_parents = ARRAY_SIZE(rtc_32k_mux_parent_data),
+ .parent_data = c3_rtc_32k_parents,
+ .num_parents = ARRAY_SIZE(c3_rtc_32k_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap rtc_32k = {
+static struct clk_regmap c3_rtc_32k = {
.data = &(struct clk_regmap_gate_data) {
.offset = RTC_BY_OSCIN_CTRL0,
.bit_idx = 30,
@@ -136,20 +145,20 @@ static struct clk_regmap rtc_32k = {
.name = "rtc_32k",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &rtc_32k_mux.hw
+ &c3_rtc_32k_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static const struct clk_parent_data rtc_clk_mux_parent_data[] = {
+static const struct clk_parent_data c3_rtc_clk_parents[] = {
{ .fw_name = "oscin" },
- { .hw = &rtc_32k.hw },
+ { .hw = &c3_rtc_32k.hw },
{ .fw_name = "pad_osc" }
};
-static struct clk_regmap rtc_clk = {
+static struct clk_regmap c3_rtc_clk = {
.data = &(struct clk_regmap_mux_data) {
.offset = RTC_CTRL,
.mask = 0x3,
@@ -158,62 +167,45 @@ static struct clk_regmap rtc_clk = {
.hw.init = &(struct clk_init_data) {
.name = "rtc_clk",
.ops = &clk_regmap_mux_ops,
- .parent_data = rtc_clk_mux_parent_data,
- .num_parents = ARRAY_SIZE(rtc_clk_mux_parent_data),
+ .parent_data = c3_rtc_clk_parents,
+ .num_parents = ARRAY_SIZE(c3_rtc_clk_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
-#define C3_CLK_GATE(_name, _reg, _bit, _fw_name, _ops, _flags) \
-struct clk_regmap _name = { \
- .data = &(struct clk_regmap_gate_data){ \
- .offset = (_reg), \
- .bit_idx = (_bit), \
- }, \
- .hw.init = &(struct clk_init_data) { \
- .name = #_name, \
- .ops = _ops, \
- .parent_data = &(const struct clk_parent_data) { \
- .fw_name = #_fw_name, \
- }, \
- .num_parents = 1, \
- .flags = (_flags), \
- }, \
-}
-
-#define C3_SYS_GATE(_name, _reg, _bit, _flags) \
- C3_CLK_GATE(_name, _reg, _bit, sysclk, \
- &clk_regmap_gate_ops, _flags)
-
-#define C3_SYS_GATE_RO(_name, _reg, _bit) \
- C3_CLK_GATE(_name, _reg, _bit, sysclk, \
- &clk_regmap_gate_ro_ops, 0)
-
-static C3_SYS_GATE(sys_reset_ctrl, SYS_CLK_EN0_REG0, 1, 0);
-static C3_SYS_GATE(sys_pwr_ctrl, SYS_CLK_EN0_REG0, 3, 0);
-static C3_SYS_GATE(sys_pad_ctrl, SYS_CLK_EN0_REG0, 4, 0);
-static C3_SYS_GATE(sys_ctrl, SYS_CLK_EN0_REG0, 5, 0);
-static C3_SYS_GATE(sys_ts_pll, SYS_CLK_EN0_REG0, 6, 0);
+static const struct clk_parent_data c3_sys_pclk_parents = { .fw_name = "sysclk" };
+
+#define C3_SYS_PCLK(_name, _reg, _bit, _flags) \
+ MESON_PCLK(c3_##_name, _reg, _bit, &c3_sys_pclk_parents, _flags)
+
+#define C3_SYS_PCLK_RO(_name, _reg, _bit) \
+ MESON_PCLK_RO(c3_##_name, _reg, _bit, &c3_sys_pclk_parents, 0)
+
+static C3_SYS_PCLK(sys_reset_ctrl, SYS_CLK_EN0_REG0, 1, 0);
+static C3_SYS_PCLK(sys_pwr_ctrl, SYS_CLK_EN0_REG0, 3, 0);
+static C3_SYS_PCLK(sys_pad_ctrl, SYS_CLK_EN0_REG0, 4, 0);
+static C3_SYS_PCLK(sys_ctrl, SYS_CLK_EN0_REG0, 5, 0);
+static C3_SYS_PCLK(sys_ts_pll, SYS_CLK_EN0_REG0, 6, 0);
/*
* NOTE: sys_dev_arb provides the clock to the ETH and SPICC arbiters that
* access the AXI bus.
*/
-static C3_SYS_GATE(sys_dev_arb, SYS_CLK_EN0_REG0, 7, 0);
+static C3_SYS_PCLK(sys_dev_arb, SYS_CLK_EN0_REG0, 7, 0);
/*
* FIXME: sys_mmc_pclk provides the clock for the DDR PHY, DDR will only be
* initialized in bl2, and this clock should not be touched in linux.
*/
-static C3_SYS_GATE_RO(sys_mmc_pclk, SYS_CLK_EN0_REG0, 8);
+static C3_SYS_PCLK_RO(sys_mmc_pclk, SYS_CLK_EN0_REG0, 8);
/*
* NOTE: sys_cpu_ctrl provides the clock for CPU controller. After clock is
* disabled, cpu_clk and other key CPU-related configurations cannot take effect.
*/
-static C3_SYS_GATE(sys_cpu_ctrl, SYS_CLK_EN0_REG0, 11, CLK_IS_CRITICAL);
-static C3_SYS_GATE(sys_jtag_ctrl, SYS_CLK_EN0_REG0, 12, 0);
-static C3_SYS_GATE(sys_ir_ctrl, SYS_CLK_EN0_REG0, 13, 0);
+static C3_SYS_PCLK(sys_cpu_ctrl, SYS_CLK_EN0_REG0, 11, CLK_IS_CRITICAL);
+static C3_SYS_PCLK(sys_jtag_ctrl, SYS_CLK_EN0_REG0, 12, 0);
+static C3_SYS_PCLK(sys_ir_ctrl, SYS_CLK_EN0_REG0, 13, 0);
/*
* NOTE: sys_irq_ctrl provides the clock for IRQ controller. The IRQ controller
@@ -221,18 +213,18 @@ static C3_SYS_GATE(sys_ir_ctrl, SYS_CLK_EN0_REG0, 13, 0);
* AOCPU. If the clock is disabled, interrupt-related functions will occurs an
* exception.
*/
-static C3_SYS_GATE(sys_irq_ctrl, SYS_CLK_EN0_REG0, 14, CLK_IS_CRITICAL);
-static C3_SYS_GATE(sys_msr_clk, SYS_CLK_EN0_REG0, 15, 0);
-static C3_SYS_GATE(sys_rom, SYS_CLK_EN0_REG0, 16, 0);
-static C3_SYS_GATE(sys_uart_f, SYS_CLK_EN0_REG0, 17, 0);
-static C3_SYS_GATE(sys_cpu_apb, SYS_CLK_EN0_REG0, 18, 0);
-static C3_SYS_GATE(sys_rsa, SYS_CLK_EN0_REG0, 19, 0);
-static C3_SYS_GATE(sys_sar_adc, SYS_CLK_EN0_REG0, 20, 0);
-static C3_SYS_GATE(sys_startup, SYS_CLK_EN0_REG0, 21, 0);
-static C3_SYS_GATE(sys_secure, SYS_CLK_EN0_REG0, 22, 0);
-static C3_SYS_GATE(sys_spifc, SYS_CLK_EN0_REG0, 23, 0);
-static C3_SYS_GATE(sys_nna, SYS_CLK_EN0_REG0, 25, 0);
-static C3_SYS_GATE(sys_eth_mac, SYS_CLK_EN0_REG0, 26, 0);
+static C3_SYS_PCLK(sys_irq_ctrl, SYS_CLK_EN0_REG0, 14, CLK_IS_CRITICAL);
+static C3_SYS_PCLK(sys_msr_clk, SYS_CLK_EN0_REG0, 15, 0);
+static C3_SYS_PCLK(sys_rom, SYS_CLK_EN0_REG0, 16, 0);
+static C3_SYS_PCLK(sys_uart_f, SYS_CLK_EN0_REG0, 17, 0);
+static C3_SYS_PCLK(sys_cpu_apb, SYS_CLK_EN0_REG0, 18, 0);
+static C3_SYS_PCLK(sys_rsa, SYS_CLK_EN0_REG0, 19, 0);
+static C3_SYS_PCLK(sys_sar_adc, SYS_CLK_EN0_REG0, 20, 0);
+static C3_SYS_PCLK(sys_startup, SYS_CLK_EN0_REG0, 21, 0);
+static C3_SYS_PCLK(sys_secure, SYS_CLK_EN0_REG0, 22, 0);
+static C3_SYS_PCLK(sys_spifc, SYS_CLK_EN0_REG0, 23, 0);
+static C3_SYS_PCLK(sys_nna, SYS_CLK_EN0_REG0, 25, 0);
+static C3_SYS_PCLK(sys_eth_mac, SYS_CLK_EN0_REG0, 26, 0);
/*
* FIXME: sys_gic provides the clock for GIC(Generic Interrupt Controller).
@@ -240,8 +232,8 @@ static C3_SYS_GATE(sys_eth_mac, SYS_CLK_EN0_REG0, 26, 0);
* used by our GIC is the public driver in kernel, and there is no management
* clock in the driver.
*/
-static C3_SYS_GATE(sys_gic, SYS_CLK_EN0_REG0, 27, CLK_IS_CRITICAL);
-static C3_SYS_GATE(sys_rama, SYS_CLK_EN0_REG0, 28, 0);
+static C3_SYS_PCLK(sys_gic, SYS_CLK_EN0_REG0, 27, CLK_IS_CRITICAL);
+static C3_SYS_PCLK(sys_rama, SYS_CLK_EN0_REG0, 28, 0);
/*
* NOTE: sys_big_nic provides the clock to the control bus of the NIC(Network
@@ -249,84 +241,85 @@ static C3_SYS_GATE(sys_rama, SYS_CLK_EN0_REG0, 28, 0);
* SPIFC, CAPU, JTAG, EMMC, SDIO, sec_top, USB, Audio, ETH, SPICC) in the
* system. After clock is disabled, The NIC cannot work.
*/
-static C3_SYS_GATE(sys_big_nic, SYS_CLK_EN0_REG0, 29, CLK_IS_CRITICAL);
-static C3_SYS_GATE(sys_ramb, SYS_CLK_EN0_REG0, 30, 0);
-static C3_SYS_GATE(sys_audio_pclk, SYS_CLK_EN0_REG0, 31, 0);
-static C3_SYS_GATE(sys_pwm_kl, SYS_CLK_EN0_REG1, 0, 0);
-static C3_SYS_GATE(sys_pwm_ij, SYS_CLK_EN0_REG1, 1, 0);
-static C3_SYS_GATE(sys_usb, SYS_CLK_EN0_REG1, 2, 0);
-static C3_SYS_GATE(sys_sd_emmc_a, SYS_CLK_EN0_REG1, 3, 0);
-static C3_SYS_GATE(sys_sd_emmc_c, SYS_CLK_EN0_REG1, 4, 0);
-static C3_SYS_GATE(sys_pwm_ab, SYS_CLK_EN0_REG1, 5, 0);
-static C3_SYS_GATE(sys_pwm_cd, SYS_CLK_EN0_REG1, 6, 0);
-static C3_SYS_GATE(sys_pwm_ef, SYS_CLK_EN0_REG1, 7, 0);
-static C3_SYS_GATE(sys_pwm_gh, SYS_CLK_EN0_REG1, 8, 0);
-static C3_SYS_GATE(sys_spicc_1, SYS_CLK_EN0_REG1, 9, 0);
-static C3_SYS_GATE(sys_spicc_0, SYS_CLK_EN0_REG1, 10, 0);
-static C3_SYS_GATE(sys_uart_a, SYS_CLK_EN0_REG1, 11, 0);
-static C3_SYS_GATE(sys_uart_b, SYS_CLK_EN0_REG1, 12, 0);
-static C3_SYS_GATE(sys_uart_c, SYS_CLK_EN0_REG1, 13, 0);
-static C3_SYS_GATE(sys_uart_d, SYS_CLK_EN0_REG1, 14, 0);
-static C3_SYS_GATE(sys_uart_e, SYS_CLK_EN0_REG1, 15, 0);
-static C3_SYS_GATE(sys_i2c_m_a, SYS_CLK_EN0_REG1, 16, 0);
-static C3_SYS_GATE(sys_i2c_m_b, SYS_CLK_EN0_REG1, 17, 0);
-static C3_SYS_GATE(sys_i2c_m_c, SYS_CLK_EN0_REG1, 18, 0);
-static C3_SYS_GATE(sys_i2c_m_d, SYS_CLK_EN0_REG1, 19, 0);
-static C3_SYS_GATE(sys_i2c_s_a, SYS_CLK_EN0_REG1, 20, 0);
-static C3_SYS_GATE(sys_rtc, SYS_CLK_EN0_REG1, 21, 0);
-static C3_SYS_GATE(sys_ge2d, SYS_CLK_EN0_REG1, 22, 0);
-static C3_SYS_GATE(sys_isp, SYS_CLK_EN0_REG1, 23, 0);
-static C3_SYS_GATE(sys_gpv_isp_nic, SYS_CLK_EN0_REG1, 24, 0);
-static C3_SYS_GATE(sys_gpv_cve_nic, SYS_CLK_EN0_REG1, 25, 0);
-static C3_SYS_GATE(sys_mipi_dsi_host, SYS_CLK_EN0_REG1, 26, 0);
-static C3_SYS_GATE(sys_mipi_dsi_phy, SYS_CLK_EN0_REG1, 27, 0);
-static C3_SYS_GATE(sys_eth_phy, SYS_CLK_EN0_REG1, 28, 0);
-static C3_SYS_GATE(sys_acodec, SYS_CLK_EN0_REG1, 29, 0);
-static C3_SYS_GATE(sys_dwap, SYS_CLK_EN0_REG1, 30, 0);
-static C3_SYS_GATE(sys_dos, SYS_CLK_EN0_REG1, 31, 0);
-static C3_SYS_GATE(sys_cve, SYS_CLK_EN0_REG2, 0, 0);
-static C3_SYS_GATE(sys_vout, SYS_CLK_EN0_REG2, 1, 0);
-static C3_SYS_GATE(sys_vc9000e, SYS_CLK_EN0_REG2, 2, 0);
-static C3_SYS_GATE(sys_pwm_mn, SYS_CLK_EN0_REG2, 3, 0);
-static C3_SYS_GATE(sys_sd_emmc_b, SYS_CLK_EN0_REG2, 4, 0);
-
-#define C3_AXI_GATE(_name, _reg, _bit, _flags) \
- C3_CLK_GATE(_name, _reg, _bit, axiclk, \
- &clk_regmap_gate_ops, _flags)
+static C3_SYS_PCLK(sys_big_nic, SYS_CLK_EN0_REG0, 29, CLK_IS_CRITICAL);
+static C3_SYS_PCLK(sys_ramb, SYS_CLK_EN0_REG0, 30, 0);
+static C3_SYS_PCLK(sys_audio_pclk, SYS_CLK_EN0_REG0, 31, 0);
+static C3_SYS_PCLK(sys_pwm_kl, SYS_CLK_EN0_REG1, 0, 0);
+static C3_SYS_PCLK(sys_pwm_ij, SYS_CLK_EN0_REG1, 1, 0);
+static C3_SYS_PCLK(sys_usb, SYS_CLK_EN0_REG1, 2, 0);
+static C3_SYS_PCLK(sys_sd_emmc_a, SYS_CLK_EN0_REG1, 3, 0);
+static C3_SYS_PCLK(sys_sd_emmc_c, SYS_CLK_EN0_REG1, 4, 0);
+static C3_SYS_PCLK(sys_pwm_ab, SYS_CLK_EN0_REG1, 5, 0);
+static C3_SYS_PCLK(sys_pwm_cd, SYS_CLK_EN0_REG1, 6, 0);
+static C3_SYS_PCLK(sys_pwm_ef, SYS_CLK_EN0_REG1, 7, 0);
+static C3_SYS_PCLK(sys_pwm_gh, SYS_CLK_EN0_REG1, 8, 0);
+static C3_SYS_PCLK(sys_spicc_1, SYS_CLK_EN0_REG1, 9, 0);
+static C3_SYS_PCLK(sys_spicc_0, SYS_CLK_EN0_REG1, 10, 0);
+static C3_SYS_PCLK(sys_uart_a, SYS_CLK_EN0_REG1, 11, 0);
+static C3_SYS_PCLK(sys_uart_b, SYS_CLK_EN0_REG1, 12, 0);
+static C3_SYS_PCLK(sys_uart_c, SYS_CLK_EN0_REG1, 13, 0);
+static C3_SYS_PCLK(sys_uart_d, SYS_CLK_EN0_REG1, 14, 0);
+static C3_SYS_PCLK(sys_uart_e, SYS_CLK_EN0_REG1, 15, 0);
+static C3_SYS_PCLK(sys_i2c_m_a, SYS_CLK_EN0_REG1, 16, 0);
+static C3_SYS_PCLK(sys_i2c_m_b, SYS_CLK_EN0_REG1, 17, 0);
+static C3_SYS_PCLK(sys_i2c_m_c, SYS_CLK_EN0_REG1, 18, 0);
+static C3_SYS_PCLK(sys_i2c_m_d, SYS_CLK_EN0_REG1, 19, 0);
+static C3_SYS_PCLK(sys_i2c_s_a, SYS_CLK_EN0_REG1, 20, 0);
+static C3_SYS_PCLK(sys_rtc, SYS_CLK_EN0_REG1, 21, 0);
+static C3_SYS_PCLK(sys_ge2d, SYS_CLK_EN0_REG1, 22, 0);
+static C3_SYS_PCLK(sys_isp, SYS_CLK_EN0_REG1, 23, 0);
+static C3_SYS_PCLK(sys_gpv_isp_nic, SYS_CLK_EN0_REG1, 24, 0);
+static C3_SYS_PCLK(sys_gpv_cve_nic, SYS_CLK_EN0_REG1, 25, 0);
+static C3_SYS_PCLK(sys_mipi_dsi_host, SYS_CLK_EN0_REG1, 26, 0);
+static C3_SYS_PCLK(sys_mipi_dsi_phy, SYS_CLK_EN0_REG1, 27, 0);
+static C3_SYS_PCLK(sys_eth_phy, SYS_CLK_EN0_REG1, 28, 0);
+static C3_SYS_PCLK(sys_acodec, SYS_CLK_EN0_REG1, 29, 0);
+static C3_SYS_PCLK(sys_dwap, SYS_CLK_EN0_REG1, 30, 0);
+static C3_SYS_PCLK(sys_dos, SYS_CLK_EN0_REG1, 31, 0);
+static C3_SYS_PCLK(sys_cve, SYS_CLK_EN0_REG2, 0, 0);
+static C3_SYS_PCLK(sys_vout, SYS_CLK_EN0_REG2, 1, 0);
+static C3_SYS_PCLK(sys_vc9000e, SYS_CLK_EN0_REG2, 2, 0);
+static C3_SYS_PCLK(sys_pwm_mn, SYS_CLK_EN0_REG2, 3, 0);
+static C3_SYS_PCLK(sys_sd_emmc_b, SYS_CLK_EN0_REG2, 4, 0);
+
+static const struct clk_parent_data c3_axi_pclk_parents = { .fw_name = "axiclk" };
+
+#define C3_AXI_PCLK(_name, _reg, _bit, _flags) \
+ MESON_PCLK(c3_##_name, _reg, _bit, &c3_axi_pclk_parents, _flags)
/*
* NOTE: axi_sys_nic provides the clock to the AXI bus of the system NIC. After
* clock is disabled, The NIC cannot work.
*/
-static C3_AXI_GATE(axi_sys_nic, AXI_CLK_EN0, 2, CLK_IS_CRITICAL);
-static C3_AXI_GATE(axi_isp_nic, AXI_CLK_EN0, 3, 0);
-static C3_AXI_GATE(axi_cve_nic, AXI_CLK_EN0, 4, 0);
-static C3_AXI_GATE(axi_ramb, AXI_CLK_EN0, 5, 0);
-static C3_AXI_GATE(axi_rama, AXI_CLK_EN0, 6, 0);
+static C3_AXI_PCLK(axi_sys_nic, AXI_CLK_EN0, 2, CLK_IS_CRITICAL);
+static C3_AXI_PCLK(axi_isp_nic, AXI_CLK_EN0, 3, 0);
+static C3_AXI_PCLK(axi_cve_nic, AXI_CLK_EN0, 4, 0);
+static C3_AXI_PCLK(axi_ramb, AXI_CLK_EN0, 5, 0);
+static C3_AXI_PCLK(axi_rama, AXI_CLK_EN0, 6, 0);
/*
* NOTE: axi_cpu_dmc provides the clock to the AXI bus where the CPU accesses
* the DDR. After clock is disabled, The CPU will not have access to the DDR.
*/
-static C3_AXI_GATE(axi_cpu_dmc, AXI_CLK_EN0, 7, CLK_IS_CRITICAL);
-static C3_AXI_GATE(axi_nic, AXI_CLK_EN0, 8, 0);
-static C3_AXI_GATE(axi_dma, AXI_CLK_EN0, 9, 0);
+static C3_AXI_PCLK(axi_cpu_dmc, AXI_CLK_EN0, 7, CLK_IS_CRITICAL);
+static C3_AXI_PCLK(axi_nic, AXI_CLK_EN0, 8, 0);
+static C3_AXI_PCLK(axi_dma, AXI_CLK_EN0, 9, 0);
/*
* NOTE: axi_mux_nic provides the clock to the NIC's AXI bus for NN(Neural
* Network) and other devices(CPU, EMMC, SDIO, sec_top, USB, Audio, ETH, SPICC)
* to access RAM space.
*/
-static C3_AXI_GATE(axi_mux_nic, AXI_CLK_EN0, 10, 0);
-static C3_AXI_GATE(axi_cve, AXI_CLK_EN0, 12, 0);
+static C3_AXI_PCLK(axi_mux_nic, AXI_CLK_EN0, 10, 0);
+static C3_AXI_PCLK(axi_cve, AXI_CLK_EN0, 12, 0);
/*
* NOTE: axi_dev1_dmc provides the clock for the peripherals(EMMC, SDIO,
* sec_top, USB, Audio, ETH, SPICC) to access the AXI bus of the DDR.
*/
-static C3_AXI_GATE(axi_dev1_dmc, AXI_CLK_EN0, 13, 0);
-static C3_AXI_GATE(axi_dev0_dmc, AXI_CLK_EN0, 14, 0);
-static C3_AXI_GATE(axi_dsp_dmc, AXI_CLK_EN0, 15, 0);
+static C3_AXI_PCLK(axi_dev1_dmc, AXI_CLK_EN0, 13, 0);
+static C3_AXI_PCLK(axi_dev0_dmc, AXI_CLK_EN0, 14, 0);
+static C3_AXI_PCLK(axi_dsp_dmc, AXI_CLK_EN0, 15, 0);
/*
* clk_12_24m model
@@ -335,7 +328,7 @@ static C3_AXI_GATE(axi_dsp_dmc, AXI_CLK_EN0, 15, 0);
* xtal---->| gate |---->| div |------------>| pad |
* |------| |-----| |-----|
*/
-static struct clk_regmap clk_12_24m_in = {
+static struct clk_regmap c3_clk_12_24m_in = {
.data = &(struct clk_regmap_gate_data) {
.offset = CLK12_24_CTRL,
.bit_idx = 11,
@@ -350,7 +343,7 @@ static struct clk_regmap clk_12_24m_in = {
},
};
-static struct clk_regmap clk_12_24m = {
+static struct clk_regmap c3_clk_12_24m = {
.data = &(struct clk_regmap_div_data) {
.offset = CLK12_24_CTRL,
.shift = 10,
@@ -360,14 +353,14 @@ static struct clk_regmap clk_12_24m = {
.name = "clk_12_24m",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &clk_12_24m_in.hw
+ &c3_clk_12_24m_in.hw
},
.num_parents = 1,
},
};
/* Fix me: set value 0 will div by 2 like value 1 */
-static struct clk_regmap fclk_25m_div = {
+static struct clk_regmap c3_fclk_25m_div = {
.data = &(struct clk_regmap_div_data) {
.offset = CLK12_24_CTRL,
.shift = 0,
@@ -383,7 +376,7 @@ static struct clk_regmap fclk_25m_div = {
},
};
-static struct clk_regmap fclk_25m = {
+static struct clk_regmap c3_fclk_25m = {
.data = &(struct clk_regmap_gate_data) {
.offset = CLK12_24_CTRL,
.bit_idx = 12,
@@ -392,7 +385,7 @@ static struct clk_regmap fclk_25m = {
.name = "fclk_25m",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fclk_25m_div.hw
+ &c3_fclk_25m_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -404,11 +397,10 @@ static struct clk_regmap fclk_25m = {
* is manged by clock measures module. Their hardware are out of clock tree.
* Channel 4 8 9 10 11 13 14 15 16 18 are not connected.
*/
-static u32 gen_parent_table[] = { 0, 1, 2, 5, 6, 7, 17, 19, 20, 21, 22, 23, 24};
-
-static const struct clk_parent_data gen_parent_data[] = {
+static u32 c3_gen_parents_val_table[] = { 0, 1, 2, 5, 6, 7, 17, 19, 20, 21, 22, 23, 24};
+static const struct clk_parent_data c3_gen_parents[] = {
{ .fw_name = "oscin" },
- { .hw = &rtc_clk.hw },
+ { .hw = &c3_rtc_clk.hw },
{ .fw_name = "sysplldiv16" },
{ .fw_name = "gp0" },
{ .fw_name = "gp1" },
@@ -422,22 +414,22 @@ static const struct clk_parent_data gen_parent_data[] = {
{ .fw_name = "fdiv7" }
};
-static struct clk_regmap gen_sel = {
+static struct clk_regmap c3_gen_sel = {
.data = &(struct clk_regmap_mux_data) {
.offset = GEN_CLK_CTRL,
.mask = 0x1f,
.shift = 12,
- .table = gen_parent_table,
+ .table = c3_gen_parents_val_table,
},
.hw.init = &(struct clk_init_data) {
.name = "gen_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = gen_parent_data,
- .num_parents = ARRAY_SIZE(gen_parent_data),
+ .parent_data = c3_gen_parents,
+ .num_parents = ARRAY_SIZE(c3_gen_parents),
},
};
-static struct clk_regmap gen_div = {
+static struct clk_regmap c3_gen_div = {
.data = &(struct clk_regmap_div_data) {
.offset = GEN_CLK_CTRL,
.shift = 0,
@@ -447,14 +439,14 @@ static struct clk_regmap gen_div = {
.name = "gen_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &gen_sel.hw
+ &c3_gen_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap gen = {
+static struct clk_regmap c3_gen = {
.data = &(struct clk_regmap_gate_data) {
.offset = GEN_CLK_CTRL,
.bit_idx = 11,
@@ -463,214 +455,86 @@ static struct clk_regmap gen = {
.name = "gen",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &gen_div.hw
+ &c3_gen_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static const struct clk_parent_data saradc_parent_data[] = {
+static const struct clk_parent_data c3_saradc_parents[] = {
{ .fw_name = "oscin" },
{ .fw_name = "sysclk" }
};
-static struct clk_regmap saradc_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = SAR_CLK_CTRL0,
- .mask = 0x1,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "saradc_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = saradc_parent_data,
- .num_parents = ARRAY_SIZE(saradc_parent_data),
- },
-};
-
-static struct clk_regmap saradc_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = SAR_CLK_CTRL0,
- .shift = 0,
- .width = 8,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "saradc_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &saradc_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap saradc = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = SAR_CLK_CTRL0,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "saradc",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &saradc_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static C3_COMP_SEL(saradc, SAR_CLK_CTRL0, 9, 0x1, c3_saradc_parents);
+static C3_COMP_DIV(saradc, SAR_CLK_CTRL0, 0, 8);
+static C3_COMP_GATE(saradc, SAR_CLK_CTRL0, 8);
-static const struct clk_parent_data pwm_parent_data[] = {
+static const struct clk_parent_data c3_pwm_parents[] = {
{ .fw_name = "oscin" },
{ .fw_name = "gp1" },
{ .fw_name = "fdiv4" },
{ .fw_name = "fdiv3" }
};
-#define AML_PWM_CLK_MUX(_name, _reg, _shift) { \
- .data = &(struct clk_regmap_mux_data) { \
- .offset = _reg, \
- .mask = 0x3, \
- .shift = _shift, \
- }, \
- .hw.init = &(struct clk_init_data) { \
- .name = #_name "_sel", \
- .ops = &clk_regmap_mux_ops, \
- .parent_data = pwm_parent_data, \
- .num_parents = ARRAY_SIZE(pwm_parent_data), \
- }, \
-}
-
-#define AML_PWM_CLK_DIV(_name, _reg, _shift) { \
- .data = &(struct clk_regmap_div_data) { \
- .offset = _reg, \
- .shift = _shift, \
- .width = 8, \
- }, \
- .hw.init = &(struct clk_init_data) { \
- .name = #_name "_div", \
- .ops = &clk_regmap_divider_ops, \
- .parent_names = (const char *[]) { #_name "_sel" },\
- .num_parents = 1, \
- .flags = CLK_SET_RATE_PARENT, \
- }, \
-}
-
-#define AML_PWM_CLK_GATE(_name, _reg, _bit) { \
- .data = &(struct clk_regmap_gate_data) { \
- .offset = _reg, \
- .bit_idx = _bit, \
- }, \
- .hw.init = &(struct clk_init_data) { \
- .name = #_name, \
- .ops = &clk_regmap_gate_ops, \
- .parent_names = (const char *[]) { #_name "_div" },\
- .num_parents = 1, \
- .flags = CLK_SET_RATE_PARENT, \
- }, \
-}
-
-static struct clk_regmap pwm_a_sel =
- AML_PWM_CLK_MUX(pwm_a, PWM_CLK_AB_CTRL, 9);
-static struct clk_regmap pwm_a_div =
- AML_PWM_CLK_DIV(pwm_a, PWM_CLK_AB_CTRL, 0);
-static struct clk_regmap pwm_a =
- AML_PWM_CLK_GATE(pwm_a, PWM_CLK_AB_CTRL, 8);
-
-static struct clk_regmap pwm_b_sel =
- AML_PWM_CLK_MUX(pwm_b, PWM_CLK_AB_CTRL, 25);
-static struct clk_regmap pwm_b_div =
- AML_PWM_CLK_DIV(pwm_b, PWM_CLK_AB_CTRL, 16);
-static struct clk_regmap pwm_b =
- AML_PWM_CLK_GATE(pwm_b, PWM_CLK_AB_CTRL, 24);
-
-static struct clk_regmap pwm_c_sel =
- AML_PWM_CLK_MUX(pwm_c, PWM_CLK_CD_CTRL, 9);
-static struct clk_regmap pwm_c_div =
- AML_PWM_CLK_DIV(pwm_c, PWM_CLK_CD_CTRL, 0);
-static struct clk_regmap pwm_c =
- AML_PWM_CLK_GATE(pwm_c, PWM_CLK_CD_CTRL, 8);
-
-static struct clk_regmap pwm_d_sel =
- AML_PWM_CLK_MUX(pwm_d, PWM_CLK_CD_CTRL, 25);
-static struct clk_regmap pwm_d_div =
- AML_PWM_CLK_DIV(pwm_d, PWM_CLK_CD_CTRL, 16);
-static struct clk_regmap pwm_d =
- AML_PWM_CLK_GATE(pwm_d, PWM_CLK_CD_CTRL, 24);
-
-static struct clk_regmap pwm_e_sel =
- AML_PWM_CLK_MUX(pwm_e, PWM_CLK_EF_CTRL, 9);
-static struct clk_regmap pwm_e_div =
- AML_PWM_CLK_DIV(pwm_e, PWM_CLK_EF_CTRL, 0);
-static struct clk_regmap pwm_e =
- AML_PWM_CLK_GATE(pwm_e, PWM_CLK_EF_CTRL, 8);
-
-static struct clk_regmap pwm_f_sel =
- AML_PWM_CLK_MUX(pwm_f, PWM_CLK_EF_CTRL, 25);
-static struct clk_regmap pwm_f_div =
- AML_PWM_CLK_DIV(pwm_f, PWM_CLK_EF_CTRL, 16);
-static struct clk_regmap pwm_f =
- AML_PWM_CLK_GATE(pwm_f, PWM_CLK_EF_CTRL, 24);
-
-static struct clk_regmap pwm_g_sel =
- AML_PWM_CLK_MUX(pwm_g, PWM_CLK_GH_CTRL, 9);
-static struct clk_regmap pwm_g_div =
- AML_PWM_CLK_DIV(pwm_g, PWM_CLK_GH_CTRL, 0);
-static struct clk_regmap pwm_g =
- AML_PWM_CLK_GATE(pwm_g, PWM_CLK_GH_CTRL, 8);
-
-static struct clk_regmap pwm_h_sel =
- AML_PWM_CLK_MUX(pwm_h, PWM_CLK_GH_CTRL, 25);
-static struct clk_regmap pwm_h_div =
- AML_PWM_CLK_DIV(pwm_h, PWM_CLK_GH_CTRL, 16);
-static struct clk_regmap pwm_h =
- AML_PWM_CLK_GATE(pwm_h, PWM_CLK_GH_CTRL, 24);
-
-static struct clk_regmap pwm_i_sel =
- AML_PWM_CLK_MUX(pwm_i, PWM_CLK_IJ_CTRL, 9);
-static struct clk_regmap pwm_i_div =
- AML_PWM_CLK_DIV(pwm_i, PWM_CLK_IJ_CTRL, 0);
-static struct clk_regmap pwm_i =
- AML_PWM_CLK_GATE(pwm_i, PWM_CLK_IJ_CTRL, 8);
-
-static struct clk_regmap pwm_j_sel =
- AML_PWM_CLK_MUX(pwm_j, PWM_CLK_IJ_CTRL, 25);
-static struct clk_regmap pwm_j_div =
- AML_PWM_CLK_DIV(pwm_j, PWM_CLK_IJ_CTRL, 16);
-static struct clk_regmap pwm_j =
- AML_PWM_CLK_GATE(pwm_j, PWM_CLK_IJ_CTRL, 24);
-
-static struct clk_regmap pwm_k_sel =
- AML_PWM_CLK_MUX(pwm_k, PWM_CLK_KL_CTRL, 9);
-static struct clk_regmap pwm_k_div =
- AML_PWM_CLK_DIV(pwm_k, PWM_CLK_KL_CTRL, 0);
-static struct clk_regmap pwm_k =
- AML_PWM_CLK_GATE(pwm_k, PWM_CLK_KL_CTRL, 8);
-
-static struct clk_regmap pwm_l_sel =
- AML_PWM_CLK_MUX(pwm_l, PWM_CLK_KL_CTRL, 25);
-static struct clk_regmap pwm_l_div =
- AML_PWM_CLK_DIV(pwm_l, PWM_CLK_KL_CTRL, 16);
-static struct clk_regmap pwm_l =
- AML_PWM_CLK_GATE(pwm_l, PWM_CLK_KL_CTRL, 24);
-
-static struct clk_regmap pwm_m_sel =
- AML_PWM_CLK_MUX(pwm_m, PWM_CLK_MN_CTRL, 9);
-static struct clk_regmap pwm_m_div =
- AML_PWM_CLK_DIV(pwm_m, PWM_CLK_MN_CTRL, 0);
-static struct clk_regmap pwm_m =
- AML_PWM_CLK_GATE(pwm_m, PWM_CLK_MN_CTRL, 8);
-
-static struct clk_regmap pwm_n_sel =
- AML_PWM_CLK_MUX(pwm_n, PWM_CLK_MN_CTRL, 25);
-static struct clk_regmap pwm_n_div =
- AML_PWM_CLK_DIV(pwm_n, PWM_CLK_MN_CTRL, 16);
-static struct clk_regmap pwm_n =
- AML_PWM_CLK_GATE(pwm_n, PWM_CLK_MN_CTRL, 24);
-
-static const struct clk_parent_data spicc_parent_data[] = {
+static C3_COMP_SEL(pwm_a, PWM_CLK_AB_CTRL, 9, 0x3, c3_pwm_parents);
+static C3_COMP_DIV(pwm_a, PWM_CLK_AB_CTRL, 0, 8);
+static C3_COMP_GATE(pwm_a, PWM_CLK_AB_CTRL, 8);
+
+static C3_COMP_SEL(pwm_b, PWM_CLK_AB_CTRL, 25, 0x3, c3_pwm_parents);
+static C3_COMP_DIV(pwm_b, PWM_CLK_AB_CTRL, 16, 8);
+static C3_COMP_GATE(pwm_b, PWM_CLK_AB_CTRL, 24);
+
+static C3_COMP_SEL(pwm_c, PWM_CLK_CD_CTRL, 9, 0x3, c3_pwm_parents);
+static C3_COMP_DIV(pwm_c, PWM_CLK_CD_CTRL, 0, 8);
+static C3_COMP_GATE(pwm_c, PWM_CLK_CD_CTRL, 8);
+
+static C3_COMP_SEL(pwm_d, PWM_CLK_CD_CTRL, 25, 0x3, c3_pwm_parents);
+static C3_COMP_DIV(pwm_d, PWM_CLK_CD_CTRL, 16, 8);
+static C3_COMP_GATE(pwm_d, PWM_CLK_CD_CTRL, 24);
+
+static C3_COMP_SEL(pwm_e, PWM_CLK_EF_CTRL, 9, 0x3, c3_pwm_parents);
+static C3_COMP_DIV(pwm_e, PWM_CLK_EF_CTRL, 0, 8);
+static C3_COMP_GATE(pwm_e, PWM_CLK_EF_CTRL, 8);
+
+static C3_COMP_SEL(pwm_f, PWM_CLK_EF_CTRL, 25, 0x3, c3_pwm_parents);
+static C3_COMP_DIV(pwm_f, PWM_CLK_EF_CTRL, 16, 8);
+static C3_COMP_GATE(pwm_f, PWM_CLK_EF_CTRL, 24);
+
+static C3_COMP_SEL(pwm_g, PWM_CLK_GH_CTRL, 9, 0x3, c3_pwm_parents);
+static C3_COMP_DIV(pwm_g, PWM_CLK_GH_CTRL, 0, 8);
+static C3_COMP_GATE(pwm_g, PWM_CLK_GH_CTRL, 8);
+
+static C3_COMP_SEL(pwm_h, PWM_CLK_GH_CTRL, 25, 0x3, c3_pwm_parents);
+static C3_COMP_DIV(pwm_h, PWM_CLK_GH_CTRL, 16, 8);
+static C3_COMP_GATE(pwm_h, PWM_CLK_GH_CTRL, 24);
+
+static C3_COMP_SEL(pwm_i, PWM_CLK_IJ_CTRL, 9, 0x3, c3_pwm_parents);
+static C3_COMP_DIV(pwm_i, PWM_CLK_IJ_CTRL, 0, 8);
+static C3_COMP_GATE(pwm_i, PWM_CLK_IJ_CTRL, 8);
+
+static C3_COMP_SEL(pwm_j, PWM_CLK_IJ_CTRL, 25, 0x3, c3_pwm_parents);
+static C3_COMP_DIV(pwm_j, PWM_CLK_IJ_CTRL, 16, 8);
+static C3_COMP_GATE(pwm_j, PWM_CLK_IJ_CTRL, 24);
+
+static C3_COMP_SEL(pwm_k, PWM_CLK_KL_CTRL, 9, 0x3, c3_pwm_parents);
+static C3_COMP_DIV(pwm_k, PWM_CLK_KL_CTRL, 0, 8);
+static C3_COMP_GATE(pwm_k, PWM_CLK_KL_CTRL, 8);
+
+static C3_COMP_SEL(pwm_l, PWM_CLK_KL_CTRL, 25, 0x3, c3_pwm_parents);
+static C3_COMP_DIV(pwm_l, PWM_CLK_KL_CTRL, 16, 8);
+static C3_COMP_GATE(pwm_l, PWM_CLK_KL_CTRL, 24);
+
+static C3_COMP_SEL(pwm_m, PWM_CLK_MN_CTRL, 9, 0x3, c3_pwm_parents);
+static C3_COMP_DIV(pwm_m, PWM_CLK_MN_CTRL, 0, 8);
+static C3_COMP_GATE(pwm_m, PWM_CLK_MN_CTRL, 8);
+
+static C3_COMP_SEL(pwm_n, PWM_CLK_MN_CTRL, 25, 0x3, c3_pwm_parents);
+static C3_COMP_DIV(pwm_n, PWM_CLK_MN_CTRL, 16, 8);
+static C3_COMP_GATE(pwm_n, PWM_CLK_MN_CTRL, 24);
+
+static const struct clk_parent_data c3_spicc_parents[] = {
{ .fw_name = "oscin" },
{ .fw_name = "sysclk" },
{ .fw_name = "fdiv4" },
@@ -681,101 +545,15 @@ static const struct clk_parent_data spicc_parent_data[] = {
{ .fw_name = "gp1" }
};
-static struct clk_regmap spicc_a_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = SPICC_CLK_CTRL,
- .mask = 0x7,
- .shift = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "spicc_a_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = spicc_parent_data,
- .num_parents = ARRAY_SIZE(spicc_parent_data),
- },
-};
-
-static struct clk_regmap spicc_a_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = SPICC_CLK_CTRL,
- .shift = 0,
- .width = 6,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "spicc_a_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &spicc_a_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static C3_COMP_SEL(spicc_a, SPICC_CLK_CTRL, 7, 0x7, c3_spicc_parents);
+static C3_COMP_DIV(spicc_a, SPICC_CLK_CTRL, 0, 6);
+static C3_COMP_GATE(spicc_a, SPICC_CLK_CTRL, 6);
-static struct clk_regmap spicc_a = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = SPICC_CLK_CTRL,
- .bit_idx = 6,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "spicc_a",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &spicc_a_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static C3_COMP_SEL(spicc_b, SPICC_CLK_CTRL, 23, 0x7, c3_spicc_parents);
+static C3_COMP_DIV(spicc_b, SPICC_CLK_CTRL, 16, 6);
+static C3_COMP_GATE(spicc_b, SPICC_CLK_CTRL, 22);
-static struct clk_regmap spicc_b_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = SPICC_CLK_CTRL,
- .mask = 0x7,
- .shift = 23,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "spicc_b_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = spicc_parent_data,
- .num_parents = ARRAY_SIZE(spicc_parent_data),
- },
-};
-
-static struct clk_regmap spicc_b_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = SPICC_CLK_CTRL,
- .shift = 16,
- .width = 6,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "spicc_b_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &spicc_b_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap spicc_b = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = SPICC_CLK_CTRL,
- .bit_idx = 22,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "spicc_b",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &spicc_b_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static const struct clk_parent_data spifc_parent_data[] = {
+static const struct clk_parent_data c3_spifc_parents[] = {
{ .fw_name = "gp0" },
{ .fw_name = "fdiv2" },
{ .fw_name = "fdiv3" },
@@ -786,54 +564,11 @@ static const struct clk_parent_data spifc_parent_data[] = {
{ .fw_name = "fdiv7" }
};
-static struct clk_regmap spifc_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = SPIFC_CLK_CTRL,
- .mask = 0x7,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "spifc_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = spifc_parent_data,
- .num_parents = ARRAY_SIZE(spifc_parent_data),
- },
-};
+static C3_COMP_SEL(spifc, SPIFC_CLK_CTRL, 9, 0x7, c3_spifc_parents);
+static C3_COMP_DIV(spifc, SPIFC_CLK_CTRL, 0, 7);
+static C3_COMP_GATE(spifc, SPIFC_CLK_CTRL, 8);
-static struct clk_regmap spifc_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = SPIFC_CLK_CTRL,
- .shift = 0,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "spifc_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &spifc_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap spifc = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = SPIFC_CLK_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "spifc",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &spifc_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static const struct clk_parent_data emmc_parent_data[] = {
+static const struct clk_parent_data c3_sd_emmc_parents[] = {
{ .fw_name = "oscin" },
{ .fw_name = "fdiv2" },
{ .fw_name = "fdiv3" },
@@ -844,148 +579,19 @@ static const struct clk_parent_data emmc_parent_data[] = {
{ .fw_name = "gp0" }
};
-static struct clk_regmap sd_emmc_a_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = SD_EMMC_CLK_CTRL,
- .mask = 0x7,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "sd_emmc_a_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = emmc_parent_data,
- .num_parents = ARRAY_SIZE(emmc_parent_data),
- },
-};
-
-static struct clk_regmap sd_emmc_a_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = SD_EMMC_CLK_CTRL,
- .shift = 0,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "sd_emmc_a_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &sd_emmc_a_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap sd_emmc_a = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = SD_EMMC_CLK_CTRL,
- .bit_idx = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "sd_emmc_a",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &sd_emmc_a_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap sd_emmc_b_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = SD_EMMC_CLK_CTRL,
- .mask = 0x7,
- .shift = 25,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "sd_emmc_b_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = emmc_parent_data,
- .num_parents = ARRAY_SIZE(emmc_parent_data),
- },
-};
-
-static struct clk_regmap sd_emmc_b_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = SD_EMMC_CLK_CTRL,
- .shift = 16,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "sd_emmc_b_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &sd_emmc_b_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap sd_emmc_b = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = SD_EMMC_CLK_CTRL,
- .bit_idx = 23,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "sd_emmc_b",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &sd_emmc_b_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap sd_emmc_c_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = NAND_CLK_CTRL,
- .mask = 0x7,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "sd_emmc_c_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = emmc_parent_data,
- .num_parents = ARRAY_SIZE(emmc_parent_data),
- },
-};
+static C3_COMP_SEL(sd_emmc_a, SD_EMMC_CLK_CTRL, 9, 0x7, c3_sd_emmc_parents);
+static C3_COMP_DIV(sd_emmc_a, SD_EMMC_CLK_CTRL, 0, 7);
+static C3_COMP_GATE(sd_emmc_a, SD_EMMC_CLK_CTRL, 7);
-static struct clk_regmap sd_emmc_c_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = NAND_CLK_CTRL,
- .shift = 0,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "sd_emmc_c_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &sd_emmc_c_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static C3_COMP_SEL(sd_emmc_b, SD_EMMC_CLK_CTRL, 25, 0x7, c3_sd_emmc_parents);
+static C3_COMP_DIV(sd_emmc_b, SD_EMMC_CLK_CTRL, 16, 7);
+static C3_COMP_GATE(sd_emmc_b, SD_EMMC_CLK_CTRL, 23);
-static struct clk_regmap sd_emmc_c = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = NAND_CLK_CTRL,
- .bit_idx = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "sd_emmc_c",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &sd_emmc_c_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static C3_COMP_SEL(sd_emmc_c, NAND_CLK_CTRL, 9, 0x7, c3_sd_emmc_parents);
+static C3_COMP_DIV(sd_emmc_c, NAND_CLK_CTRL, 0, 7);
+static C3_COMP_GATE(sd_emmc_c, NAND_CLK_CTRL, 7);
-static struct clk_regmap ts_div = {
+static struct clk_regmap c3_ts_div = {
.data = &(struct clk_regmap_div_data) {
.offset = TS_CLK_CTRL,
.shift = 0,
@@ -1001,7 +607,7 @@ static struct clk_regmap ts_div = {
},
};
-static struct clk_regmap ts = {
+static struct clk_regmap c3_ts = {
.data = &(struct clk_regmap_gate_data) {
.offset = TS_CLK_CTRL,
.bit_idx = 8,
@@ -1010,29 +616,29 @@ static struct clk_regmap ts = {
.name = "ts",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &ts_div.hw
+ &c3_ts_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static const struct clk_parent_data eth_parent = {
+static const struct clk_parent_data c3_eth_parents = {
.fw_name = "fdiv2",
};
-static struct clk_fixed_factor eth_125m_div = {
+static struct clk_fixed_factor c3_eth_125m_div = {
.mult = 1,
.div = 8,
.hw.init = &(struct clk_init_data) {
.name = "eth_125m_div",
.ops = &clk_fixed_factor_ops,
- .parent_data = &eth_parent,
+ .parent_data = &c3_eth_parents,
.num_parents = 1,
},
};
-static struct clk_regmap eth_125m = {
+static struct clk_regmap c3_eth_125m = {
.data = &(struct clk_regmap_gate_data) {
.offset = ETH_CLK_CTRL,
.bit_idx = 7,
@@ -1041,14 +647,14 @@ static struct clk_regmap eth_125m = {
.name = "eth_125m",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &eth_125m_div.hw
+ &c3_eth_125m_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap eth_rmii_div = {
+static struct clk_regmap c3_eth_rmii_div = {
.data = &(struct clk_regmap_div_data) {
.offset = ETH_CLK_CTRL,
.shift = 0,
@@ -1057,12 +663,12 @@ static struct clk_regmap eth_rmii_div = {
.hw.init = &(struct clk_init_data) {
.name = "eth_rmii_div",
.ops = &clk_regmap_divider_ops,
- .parent_data = &eth_parent,
+ .parent_data = &c3_eth_parents,
.num_parents = 1,
},
};
-static struct clk_regmap eth_rmii = {
+static struct clk_regmap c3_eth_rmii = {
.data = &(struct clk_regmap_gate_data) {
.offset = ETH_CLK_CTRL,
.bit_idx = 8,
@@ -1071,14 +677,14 @@ static struct clk_regmap eth_rmii = {
.name = "eth_rmii",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &eth_rmii_div.hw
+ &c3_eth_rmii_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static const struct clk_parent_data mipi_dsi_meas_parent_data[] = {
+static const struct clk_parent_data c3_mipi_dsi_meas_parents[] = {
{ .fw_name = "oscin" },
{ .fw_name = "fdiv4" },
{ .fw_name = "fdiv3" },
@@ -1089,54 +695,11 @@ static const struct clk_parent_data mipi_dsi_meas_parent_data[] = {
{ .fw_name = "fdiv7" }
};
-static struct clk_regmap mipi_dsi_meas_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = VDIN_MEAS_CLK_CTRL,
- .mask = 0x7,
- .shift = 21,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "mipi_dsi_meas_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = mipi_dsi_meas_parent_data,
- .num_parents = ARRAY_SIZE(mipi_dsi_meas_parent_data),
- },
-};
-
-static struct clk_regmap mipi_dsi_meas_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = VDIN_MEAS_CLK_CTRL,
- .shift = 12,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "mipi_dsi_meas_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &mipi_dsi_meas_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static C3_COMP_SEL(mipi_dsi_meas, VDIN_MEAS_CLK_CTRL, 21, 0x7, c3_mipi_dsi_meas_parents);
+static C3_COMP_DIV(mipi_dsi_meas, VDIN_MEAS_CLK_CTRL, 12, 7);
+static C3_COMP_GATE(mipi_dsi_meas, VDIN_MEAS_CLK_CTRL, 20);
-static struct clk_regmap mipi_dsi_meas = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = VDIN_MEAS_CLK_CTRL,
- .bit_idx = 20,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "mipi_dsi_meas",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &mipi_dsi_meas_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static const struct clk_parent_data dsi_phy_parent_data[] = {
+static const struct clk_parent_data c3_dsi_phy_parents[] = {
{ .fw_name = "gp1" },
{ .fw_name = "gp0" },
{ .fw_name = "hifi" },
@@ -1147,54 +710,11 @@ static const struct clk_parent_data dsi_phy_parent_data[] = {
{ .fw_name = "fdiv7" }
};
-static struct clk_regmap dsi_phy_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = MIPIDSI_PHY_CLK_CTRL,
- .mask = 0x7,
- .shift = 12,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "dsi_phy_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = dsi_phy_parent_data,
- .num_parents = ARRAY_SIZE(dsi_phy_parent_data),
- },
-};
-
-static struct clk_regmap dsi_phy_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = MIPIDSI_PHY_CLK_CTRL,
- .shift = 0,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "dsi_phy_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &dsi_phy_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static C3_COMP_SEL(dsi_phy, MIPIDSI_PHY_CLK_CTRL, 12, 0x7, c3_dsi_phy_parents);
+static C3_COMP_DIV(dsi_phy, MIPIDSI_PHY_CLK_CTRL, 0, 7);
+static C3_COMP_GATE(dsi_phy, MIPIDSI_PHY_CLK_CTRL, 8);
-static struct clk_regmap dsi_phy = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = MIPIDSI_PHY_CLK_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "dsi_phy",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &dsi_phy_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static const struct clk_parent_data vout_mclk_parent_data[] = {
+static const struct clk_parent_data c3_vout_mclk_parents[] = {
{ .fw_name = "fdiv2p5" },
{ .fw_name = "fdiv3" },
{ .fw_name = "fdiv4" },
@@ -1205,54 +725,11 @@ static const struct clk_parent_data vout_mclk_parent_data[] = {
{ .fw_name = "fdiv7" }
};
-static struct clk_regmap vout_mclk_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = VOUTENC_CLK_CTRL,
- .mask = 0x7,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "vout_mclk_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = vout_mclk_parent_data,
- .num_parents = ARRAY_SIZE(vout_mclk_parent_data),
- },
-};
+static C3_COMP_SEL(vout_mclk, VOUTENC_CLK_CTRL, 9, 0x7, c3_vout_mclk_parents);
+static C3_COMP_DIV(vout_mclk, VOUTENC_CLK_CTRL, 0, 7);
+static C3_COMP_GATE(vout_mclk, VOUTENC_CLK_CTRL, 8);
-static struct clk_regmap vout_mclk_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = VOUTENC_CLK_CTRL,
- .shift = 0,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "vout_mclk_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &vout_mclk_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap vout_mclk = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = VOUTENC_CLK_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "vout_mclk",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &vout_mclk_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static const struct clk_parent_data vout_enc_parent_data[] = {
+static const struct clk_parent_data c3_vout_enc_parents[] = {
{ .fw_name = "gp1" },
{ .fw_name = "fdiv3" },
{ .fw_name = "fdiv4" },
@@ -1263,54 +740,11 @@ static const struct clk_parent_data vout_enc_parent_data[] = {
{ .fw_name = "fdiv7" }
};
-static struct clk_regmap vout_enc_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = VOUTENC_CLK_CTRL,
- .mask = 0x7,
- .shift = 25,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "vout_enc_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = vout_enc_parent_data,
- .num_parents = ARRAY_SIZE(vout_enc_parent_data),
- },
-};
-
-static struct clk_regmap vout_enc_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = VOUTENC_CLK_CTRL,
- .shift = 16,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "vout_enc_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &vout_enc_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap vout_enc = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = VOUTENC_CLK_CTRL,
- .bit_idx = 24,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "vout_enc",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &vout_enc_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static C3_COMP_SEL(vout_enc, VOUTENC_CLK_CTRL, 25, 0x7, c3_vout_enc_parents);
+static C3_COMP_DIV(vout_enc, VOUTENC_CLK_CTRL, 16, 7);
+static C3_COMP_GATE(vout_enc, VOUTENC_CLK_CTRL, 24);
-static const struct clk_parent_data hcodec_pre_parent_data[] = {
+static const struct clk_parent_data c3_hcodec_pre_parents[] = {
{ .fw_name = "fdiv2p5" },
{ .fw_name = "fdiv3" },
{ .fw_name = "fdiv4" },
@@ -1321,106 +755,20 @@ static const struct clk_parent_data hcodec_pre_parent_data[] = {
{ .fw_name = "oscin" }
};
-static struct clk_regmap hcodec_0_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = VDEC_CLK_CTRL,
- .mask = 0x7,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "hcodec_0_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = hcodec_pre_parent_data,
- .num_parents = ARRAY_SIZE(hcodec_pre_parent_data),
- },
-};
+static C3_COMP_SEL(hcodec_0, VDEC_CLK_CTRL, 9, 0x7, c3_hcodec_pre_parents);
+static C3_COMP_DIV(hcodec_0, VDEC_CLK_CTRL, 0, 7);
+static C3_COMP_GATE(hcodec_0, VDEC_CLK_CTRL, 8);
-static struct clk_regmap hcodec_0_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = VDEC_CLK_CTRL,
- .shift = 0,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "hcodec_0_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &hcodec_0_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static C3_COMP_SEL(hcodec_1, VDEC3_CLK_CTRL, 9, 0x7, c3_hcodec_pre_parents);
+static C3_COMP_DIV(hcodec_1, VDEC3_CLK_CTRL, 0, 7);
+static C3_COMP_GATE(hcodec_1, VDEC3_CLK_CTRL, 8);
-static struct clk_regmap hcodec_0 = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = VDEC_CLK_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "hcodec_0",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &hcodec_0_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
+static const struct clk_parent_data c3_hcodec_parents[] = {
+ { .hw = &c3_hcodec_0.hw },
+ { .hw = &c3_hcodec_1.hw }
};
-static struct clk_regmap hcodec_1_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = VDEC3_CLK_CTRL,
- .mask = 0x7,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "hcodec_1_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = hcodec_pre_parent_data,
- .num_parents = ARRAY_SIZE(hcodec_pre_parent_data),
- },
-};
-
-static struct clk_regmap hcodec_1_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = VDEC3_CLK_CTRL,
- .shift = 0,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "hcodec_1_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &hcodec_1_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap hcodec_1 = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = VDEC3_CLK_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "hcodec_1",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &hcodec_1_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static const struct clk_parent_data hcodec_parent_data[] = {
- { .hw = &hcodec_0.hw },
- { .hw = &hcodec_1.hw }
-};
-
-static struct clk_regmap hcodec = {
+static struct clk_regmap c3_hcodec = {
.data = &(struct clk_regmap_mux_data) {
.offset = VDEC3_CLK_CTRL,
.mask = 0x1,
@@ -1429,13 +777,13 @@ static struct clk_regmap hcodec = {
.hw.init = &(struct clk_init_data) {
.name = "hcodec",
.ops = &clk_regmap_mux_ops,
- .parent_data = hcodec_parent_data,
- .num_parents = ARRAY_SIZE(hcodec_parent_data),
+ .parent_data = c3_hcodec_parents,
+ .num_parents = ARRAY_SIZE(c3_hcodec_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
-static const struct clk_parent_data vc9000e_parent_data[] = {
+static const struct clk_parent_data c3_vc9000e_parents[] = {
{ .fw_name = "oscin" },
{ .fw_name = "fdiv4" },
{ .fw_name = "fdiv3" },
@@ -1446,101 +794,15 @@ static const struct clk_parent_data vc9000e_parent_data[] = {
{ .fw_name = "gp0" }
};
-static struct clk_regmap vc9000e_aclk_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = VC9000E_CLK_CTRL,
- .mask = 0x7,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "vc9000e_aclk_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = vc9000e_parent_data,
- .num_parents = ARRAY_SIZE(vc9000e_parent_data),
- },
-};
-
-static struct clk_regmap vc9000e_aclk_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = VC9000E_CLK_CTRL,
- .shift = 0,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "vc9000e_aclk_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &vc9000e_aclk_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap vc9000e_aclk = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = VC9000E_CLK_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "vc9000e_aclk",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &vc9000e_aclk_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static C3_COMP_SEL(vc9000e_aclk, VC9000E_CLK_CTRL, 9, 0x7, c3_vc9000e_parents);
+static C3_COMP_DIV(vc9000e_aclk, VC9000E_CLK_CTRL, 0, 7);
+static C3_COMP_GATE(vc9000e_aclk, VC9000E_CLK_CTRL, 8);
-static struct clk_regmap vc9000e_core_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = VC9000E_CLK_CTRL,
- .mask = 0x7,
- .shift = 25,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "vc9000e_core_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = vc9000e_parent_data,
- .num_parents = ARRAY_SIZE(vc9000e_parent_data),
- },
-};
+static C3_COMP_SEL(vc9000e_core, VC9000E_CLK_CTRL, 25, 0x7, c3_vc9000e_parents);
+static C3_COMP_DIV(vc9000e_core, VC9000E_CLK_CTRL, 16, 7);
+static C3_COMP_GATE(vc9000e_core, VC9000E_CLK_CTRL, 24);
-static struct clk_regmap vc9000e_core_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = VC9000E_CLK_CTRL,
- .shift = 16,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "vc9000e_core_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &vc9000e_core_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap vc9000e_core = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = VC9000E_CLK_CTRL,
- .bit_idx = 24,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "vc9000e_core",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &vc9000e_core_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static const struct clk_parent_data csi_phy_parent_data[] = {
+static const struct clk_parent_data c3_csi_phy_parents[] = {
{ .fw_name = "fdiv2p5" },
{ .fw_name = "fdiv3" },
{ .fw_name = "fdiv4" },
@@ -1551,54 +813,11 @@ static const struct clk_parent_data csi_phy_parent_data[] = {
{ .fw_name = "oscin" }
};
-static struct clk_regmap csi_phy0_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = ISP0_CLK_CTRL,
- .mask = 0x7,
- .shift = 25,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "csi_phy0_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = csi_phy_parent_data,
- .num_parents = ARRAY_SIZE(csi_phy_parent_data),
- },
-};
-
-static struct clk_regmap csi_phy0_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = ISP0_CLK_CTRL,
- .shift = 16,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "csi_phy0_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &csi_phy0_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap csi_phy0 = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = ISP0_CLK_CTRL,
- .bit_idx = 24,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "csi_phy0",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &csi_phy0_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static C3_COMP_SEL(csi_phy0, ISP0_CLK_CTRL, 25, 0x7, c3_csi_phy_parents);
+static C3_COMP_DIV(csi_phy0, ISP0_CLK_CTRL, 16, 7);
+static C3_COMP_GATE(csi_phy0, ISP0_CLK_CTRL, 24);
-static const struct clk_parent_data dewarpa_parent_data[] = {
+static const struct clk_parent_data c3_dewarpa_parents[] = {
{ .fw_name = "fdiv2p5" },
{ .fw_name = "fdiv3" },
{ .fw_name = "fdiv4" },
@@ -1609,54 +828,11 @@ static const struct clk_parent_data dewarpa_parent_data[] = {
{ .fw_name = "fdiv7" }
};
-static struct clk_regmap dewarpa_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = DEWARPA_CLK_CTRL,
- .mask = 0x7,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "dewarpa_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = dewarpa_parent_data,
- .num_parents = ARRAY_SIZE(dewarpa_parent_data),
- },
-};
-
-static struct clk_regmap dewarpa_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = DEWARPA_CLK_CTRL,
- .shift = 0,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "dewarpa_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &dewarpa_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap dewarpa = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = DEWARPA_CLK_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "dewarpa",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &dewarpa_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static C3_COMP_SEL(dewarpa, DEWARPA_CLK_CTRL, 9, 0x7, c3_dewarpa_parents);
+static C3_COMP_DIV(dewarpa, DEWARPA_CLK_CTRL, 0, 7);
+static C3_COMP_GATE(dewarpa, DEWARPA_CLK_CTRL, 8);
-static const struct clk_parent_data isp_parent_data[] = {
+static const struct clk_parent_data c3_isp_parents[] = {
{ .fw_name = "fdiv2p5" },
{ .fw_name = "fdiv3" },
{ .fw_name = "fdiv4" },
@@ -1667,54 +843,11 @@ static const struct clk_parent_data isp_parent_data[] = {
{ .fw_name = "oscin" }
};
-static struct clk_regmap isp0_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = ISP0_CLK_CTRL,
- .mask = 0x7,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "isp0_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = isp_parent_data,
- .num_parents = ARRAY_SIZE(isp_parent_data),
- },
-};
-
-static struct clk_regmap isp0_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = ISP0_CLK_CTRL,
- .shift = 0,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "isp0_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &isp0_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap isp0 = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = ISP0_CLK_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "isp0",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &isp0_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static C3_COMP_SEL(isp0, ISP0_CLK_CTRL, 9, 0x7, c3_isp_parents);
+static C3_COMP_DIV(isp0, ISP0_CLK_CTRL, 0, 7);
+static C3_COMP_GATE(isp0, ISP0_CLK_CTRL, 8);
-static const struct clk_parent_data nna_core_parent_data[] = {
+static const struct clk_parent_data c3_nna_core_parents[] = {
{ .fw_name = "oscin" },
{ .fw_name = "fdiv2p5" },
{ .fw_name = "fdiv4" },
@@ -1725,54 +858,11 @@ static const struct clk_parent_data nna_core_parent_data[] = {
{ .fw_name = "hifi" }
};
-static struct clk_regmap nna_core_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = NNA_CLK_CTRL,
- .mask = 0x7,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "nna_core_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = nna_core_parent_data,
- .num_parents = ARRAY_SIZE(nna_core_parent_data),
- },
-};
+static C3_COMP_SEL(nna_core, NNA_CLK_CTRL, 9, 0x7, c3_nna_core_parents);
+static C3_COMP_DIV(nna_core, NNA_CLK_CTRL, 0, 7);
+static C3_COMP_GATE(nna_core, NNA_CLK_CTRL, 8);
-static struct clk_regmap nna_core_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = NNA_CLK_CTRL,
- .shift = 0,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "nna_core_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &nna_core_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap nna_core = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = NNA_CLK_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "nna_core",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &nna_core_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static const struct clk_parent_data ge2d_parent_data[] = {
+static const struct clk_parent_data c3_ge2d_parents[] = {
{ .fw_name = "oscin" },
{ .fw_name = "fdiv2p5" },
{ .fw_name = "fdiv3" },
@@ -1780,57 +870,14 @@ static const struct clk_parent_data ge2d_parent_data[] = {
{ .fw_name = "hifi" },
{ .fw_name = "fdiv5" },
{ .fw_name = "gp0" },
- { .hw = &rtc_clk.hw }
-};
-
-static struct clk_regmap ge2d_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = GE2D_CLK_CTRL,
- .mask = 0x7,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "ge2d_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = ge2d_parent_data,
- .num_parents = ARRAY_SIZE(ge2d_parent_data),
- },
+ { .hw = &c3_rtc_clk.hw }
};
-static struct clk_regmap ge2d_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = GE2D_CLK_CTRL,
- .shift = 0,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "ge2d_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &ge2d_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static C3_COMP_SEL(ge2d, GE2D_CLK_CTRL, 9, 0x7, c3_ge2d_parents);
+static C3_COMP_DIV(ge2d, GE2D_CLK_CTRL, 0, 7);
+static C3_COMP_GATE(ge2d, GE2D_CLK_CTRL, 8);
-static struct clk_regmap ge2d = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = GE2D_CLK_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "ge2d",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &ge2d_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static const struct clk_parent_data vapb_parent_data[] = {
+static const struct clk_parent_data c3_vapb_parents[] = {
{ .fw_name = "fdiv2p5" },
{ .fw_name = "fdiv3" },
{ .fw_name = "fdiv4" },
@@ -1841,317 +888,239 @@ static const struct clk_parent_data vapb_parent_data[] = {
{ .fw_name = "oscin" },
};
-static struct clk_regmap vapb_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = VAPB_CLK_CTRL,
- .mask = 0x7,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "vapb_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = vapb_parent_data,
- .num_parents = ARRAY_SIZE(vapb_parent_data),
+static C3_COMP_SEL(vapb, VAPB_CLK_CTRL, 9, 0x7, c3_vapb_parents);
+static C3_COMP_DIV(vapb, VAPB_CLK_CTRL, 0, 7);
+static C3_COMP_GATE(vapb, VAPB_CLK_CTRL, 8);
+
+static struct clk_hw *c3_peripherals_hw_clks[] = {
+ [CLKID_RTC_XTAL_CLKIN] = &c3_rtc_xtal_clkin.hw,
+ [CLKID_RTC_32K_DIV] = &c3_rtc_32k_div.hw,
+ [CLKID_RTC_32K_MUX] = &c3_rtc_32k_sel.hw,
+ [CLKID_RTC_32K] = &c3_rtc_32k.hw,
+ [CLKID_RTC_CLK] = &c3_rtc_clk.hw,
+ [CLKID_SYS_RESET_CTRL] = &c3_sys_reset_ctrl.hw,
+ [CLKID_SYS_PWR_CTRL] = &c3_sys_pwr_ctrl.hw,
+ [CLKID_SYS_PAD_CTRL] = &c3_sys_pad_ctrl.hw,
+ [CLKID_SYS_CTRL] = &c3_sys_ctrl.hw,
+ [CLKID_SYS_TS_PLL] = &c3_sys_ts_pll.hw,
+ [CLKID_SYS_DEV_ARB] = &c3_sys_dev_arb.hw,
+ [CLKID_SYS_MMC_PCLK] = &c3_sys_mmc_pclk.hw,
+ [CLKID_SYS_CPU_CTRL] = &c3_sys_cpu_ctrl.hw,
+ [CLKID_SYS_JTAG_CTRL] = &c3_sys_jtag_ctrl.hw,
+ [CLKID_SYS_IR_CTRL] = &c3_sys_ir_ctrl.hw,
+ [CLKID_SYS_IRQ_CTRL] = &c3_sys_irq_ctrl.hw,
+ [CLKID_SYS_MSR_CLK] = &c3_sys_msr_clk.hw,
+ [CLKID_SYS_ROM] = &c3_sys_rom.hw,
+ [CLKID_SYS_UART_F] = &c3_sys_uart_f.hw,
+ [CLKID_SYS_CPU_ARB] = &c3_sys_cpu_apb.hw,
+ [CLKID_SYS_RSA] = &c3_sys_rsa.hw,
+ [CLKID_SYS_SAR_ADC] = &c3_sys_sar_adc.hw,
+ [CLKID_SYS_STARTUP] = &c3_sys_startup.hw,
+ [CLKID_SYS_SECURE] = &c3_sys_secure.hw,
+ [CLKID_SYS_SPIFC] = &c3_sys_spifc.hw,
+ [CLKID_SYS_NNA] = &c3_sys_nna.hw,
+ [CLKID_SYS_ETH_MAC] = &c3_sys_eth_mac.hw,
+ [CLKID_SYS_GIC] = &c3_sys_gic.hw,
+ [CLKID_SYS_RAMA] = &c3_sys_rama.hw,
+ [CLKID_SYS_BIG_NIC] = &c3_sys_big_nic.hw,
+ [CLKID_SYS_RAMB] = &c3_sys_ramb.hw,
+ [CLKID_SYS_AUDIO_PCLK] = &c3_sys_audio_pclk.hw,
+ [CLKID_SYS_PWM_KL] = &c3_sys_pwm_kl.hw,
+ [CLKID_SYS_PWM_IJ] = &c3_sys_pwm_ij.hw,
+ [CLKID_SYS_USB] = &c3_sys_usb.hw,
+ [CLKID_SYS_SD_EMMC_A] = &c3_sys_sd_emmc_a.hw,
+ [CLKID_SYS_SD_EMMC_C] = &c3_sys_sd_emmc_c.hw,
+ [CLKID_SYS_PWM_AB] = &c3_sys_pwm_ab.hw,
+ [CLKID_SYS_PWM_CD] = &c3_sys_pwm_cd.hw,
+ [CLKID_SYS_PWM_EF] = &c3_sys_pwm_ef.hw,
+ [CLKID_SYS_PWM_GH] = &c3_sys_pwm_gh.hw,
+ [CLKID_SYS_SPICC_1] = &c3_sys_spicc_1.hw,
+ [CLKID_SYS_SPICC_0] = &c3_sys_spicc_0.hw,
+ [CLKID_SYS_UART_A] = &c3_sys_uart_a.hw,
+ [CLKID_SYS_UART_B] = &c3_sys_uart_b.hw,
+ [CLKID_SYS_UART_C] = &c3_sys_uart_c.hw,
+ [CLKID_SYS_UART_D] = &c3_sys_uart_d.hw,
+ [CLKID_SYS_UART_E] = &c3_sys_uart_e.hw,
+ [CLKID_SYS_I2C_M_A] = &c3_sys_i2c_m_a.hw,
+ [CLKID_SYS_I2C_M_B] = &c3_sys_i2c_m_b.hw,
+ [CLKID_SYS_I2C_M_C] = &c3_sys_i2c_m_c.hw,
+ [CLKID_SYS_I2C_M_D] = &c3_sys_i2c_m_d.hw,
+ [CLKID_SYS_I2S_S_A] = &c3_sys_i2c_s_a.hw,
+ [CLKID_SYS_RTC] = &c3_sys_rtc.hw,
+ [CLKID_SYS_GE2D] = &c3_sys_ge2d.hw,
+ [CLKID_SYS_ISP] = &c3_sys_isp.hw,
+ [CLKID_SYS_GPV_ISP_NIC] = &c3_sys_gpv_isp_nic.hw,
+ [CLKID_SYS_GPV_CVE_NIC] = &c3_sys_gpv_cve_nic.hw,
+ [CLKID_SYS_MIPI_DSI_HOST] = &c3_sys_mipi_dsi_host.hw,
+ [CLKID_SYS_MIPI_DSI_PHY] = &c3_sys_mipi_dsi_phy.hw,
+ [CLKID_SYS_ETH_PHY] = &c3_sys_eth_phy.hw,
+ [CLKID_SYS_ACODEC] = &c3_sys_acodec.hw,
+ [CLKID_SYS_DWAP] = &c3_sys_dwap.hw,
+ [CLKID_SYS_DOS] = &c3_sys_dos.hw,
+ [CLKID_SYS_CVE] = &c3_sys_cve.hw,
+ [CLKID_SYS_VOUT] = &c3_sys_vout.hw,
+ [CLKID_SYS_VC9000E] = &c3_sys_vc9000e.hw,
+ [CLKID_SYS_PWM_MN] = &c3_sys_pwm_mn.hw,
+ [CLKID_SYS_SD_EMMC_B] = &c3_sys_sd_emmc_b.hw,
+ [CLKID_AXI_SYS_NIC] = &c3_axi_sys_nic.hw,
+ [CLKID_AXI_ISP_NIC] = &c3_axi_isp_nic.hw,
+ [CLKID_AXI_CVE_NIC] = &c3_axi_cve_nic.hw,
+ [CLKID_AXI_RAMB] = &c3_axi_ramb.hw,
+ [CLKID_AXI_RAMA] = &c3_axi_rama.hw,
+ [CLKID_AXI_CPU_DMC] = &c3_axi_cpu_dmc.hw,
+ [CLKID_AXI_NIC] = &c3_axi_nic.hw,
+ [CLKID_AXI_DMA] = &c3_axi_dma.hw,
+ [CLKID_AXI_MUX_NIC] = &c3_axi_mux_nic.hw,
+ [CLKID_AXI_CVE] = &c3_axi_cve.hw,
+ [CLKID_AXI_DEV1_DMC] = &c3_axi_dev1_dmc.hw,
+ [CLKID_AXI_DEV0_DMC] = &c3_axi_dev0_dmc.hw,
+ [CLKID_AXI_DSP_DMC] = &c3_axi_dsp_dmc.hw,
+ [CLKID_12_24M_IN] = &c3_clk_12_24m_in.hw,
+ [CLKID_12M_24M] = &c3_clk_12_24m.hw,
+ [CLKID_FCLK_25M_DIV] = &c3_fclk_25m_div.hw,
+ [CLKID_FCLK_25M] = &c3_fclk_25m.hw,
+ [CLKID_GEN_SEL] = &c3_gen_sel.hw,
+ [CLKID_GEN_DIV] = &c3_gen_div.hw,
+ [CLKID_GEN] = &c3_gen.hw,
+ [CLKID_SARADC_SEL] = &c3_saradc_sel.hw,
+ [CLKID_SARADC_DIV] = &c3_saradc_div.hw,
+ [CLKID_SARADC] = &c3_saradc.hw,
+ [CLKID_PWM_A_SEL] = &c3_pwm_a_sel.hw,
+ [CLKID_PWM_A_DIV] = &c3_pwm_a_div.hw,
+ [CLKID_PWM_A] = &c3_pwm_a.hw,
+ [CLKID_PWM_B_SEL] = &c3_pwm_b_sel.hw,
+ [CLKID_PWM_B_DIV] = &c3_pwm_b_div.hw,
+ [CLKID_PWM_B] = &c3_pwm_b.hw,
+ [CLKID_PWM_C_SEL] = &c3_pwm_c_sel.hw,
+ [CLKID_PWM_C_DIV] = &c3_pwm_c_div.hw,
+ [CLKID_PWM_C] = &c3_pwm_c.hw,
+ [CLKID_PWM_D_SEL] = &c3_pwm_d_sel.hw,
+ [CLKID_PWM_D_DIV] = &c3_pwm_d_div.hw,
+ [CLKID_PWM_D] = &c3_pwm_d.hw,
+ [CLKID_PWM_E_SEL] = &c3_pwm_e_sel.hw,
+ [CLKID_PWM_E_DIV] = &c3_pwm_e_div.hw,
+ [CLKID_PWM_E] = &c3_pwm_e.hw,
+ [CLKID_PWM_F_SEL] = &c3_pwm_f_sel.hw,
+ [CLKID_PWM_F_DIV] = &c3_pwm_f_div.hw,
+ [CLKID_PWM_F] = &c3_pwm_f.hw,
+ [CLKID_PWM_G_SEL] = &c3_pwm_g_sel.hw,
+ [CLKID_PWM_G_DIV] = &c3_pwm_g_div.hw,
+ [CLKID_PWM_G] = &c3_pwm_g.hw,
+ [CLKID_PWM_H_SEL] = &c3_pwm_h_sel.hw,
+ [CLKID_PWM_H_DIV] = &c3_pwm_h_div.hw,
+ [CLKID_PWM_H] = &c3_pwm_h.hw,
+ [CLKID_PWM_I_SEL] = &c3_pwm_i_sel.hw,
+ [CLKID_PWM_I_DIV] = &c3_pwm_i_div.hw,
+ [CLKID_PWM_I] = &c3_pwm_i.hw,
+ [CLKID_PWM_J_SEL] = &c3_pwm_j_sel.hw,
+ [CLKID_PWM_J_DIV] = &c3_pwm_j_div.hw,
+ [CLKID_PWM_J] = &c3_pwm_j.hw,
+ [CLKID_PWM_K_SEL] = &c3_pwm_k_sel.hw,
+ [CLKID_PWM_K_DIV] = &c3_pwm_k_div.hw,
+ [CLKID_PWM_K] = &c3_pwm_k.hw,
+ [CLKID_PWM_L_SEL] = &c3_pwm_l_sel.hw,
+ [CLKID_PWM_L_DIV] = &c3_pwm_l_div.hw,
+ [CLKID_PWM_L] = &c3_pwm_l.hw,
+ [CLKID_PWM_M_SEL] = &c3_pwm_m_sel.hw,
+ [CLKID_PWM_M_DIV] = &c3_pwm_m_div.hw,
+ [CLKID_PWM_M] = &c3_pwm_m.hw,
+ [CLKID_PWM_N_SEL] = &c3_pwm_n_sel.hw,
+ [CLKID_PWM_N_DIV] = &c3_pwm_n_div.hw,
+ [CLKID_PWM_N] = &c3_pwm_n.hw,
+ [CLKID_SPICC_A_SEL] = &c3_spicc_a_sel.hw,
+ [CLKID_SPICC_A_DIV] = &c3_spicc_a_div.hw,
+ [CLKID_SPICC_A] = &c3_spicc_a.hw,
+ [CLKID_SPICC_B_SEL] = &c3_spicc_b_sel.hw,
+ [CLKID_SPICC_B_DIV] = &c3_spicc_b_div.hw,
+ [CLKID_SPICC_B] = &c3_spicc_b.hw,
+ [CLKID_SPIFC_SEL] = &c3_spifc_sel.hw,
+ [CLKID_SPIFC_DIV] = &c3_spifc_div.hw,
+ [CLKID_SPIFC] = &c3_spifc.hw,
+ [CLKID_SD_EMMC_A_SEL] = &c3_sd_emmc_a_sel.hw,
+ [CLKID_SD_EMMC_A_DIV] = &c3_sd_emmc_a_div.hw,
+ [CLKID_SD_EMMC_A] = &c3_sd_emmc_a.hw,
+ [CLKID_SD_EMMC_B_SEL] = &c3_sd_emmc_b_sel.hw,
+ [CLKID_SD_EMMC_B_DIV] = &c3_sd_emmc_b_div.hw,
+ [CLKID_SD_EMMC_B] = &c3_sd_emmc_b.hw,
+ [CLKID_SD_EMMC_C_SEL] = &c3_sd_emmc_c_sel.hw,
+ [CLKID_SD_EMMC_C_DIV] = &c3_sd_emmc_c_div.hw,
+ [CLKID_SD_EMMC_C] = &c3_sd_emmc_c.hw,
+ [CLKID_TS_DIV] = &c3_ts_div.hw,
+ [CLKID_TS] = &c3_ts.hw,
+ [CLKID_ETH_125M_DIV] = &c3_eth_125m_div.hw,
+ [CLKID_ETH_125M] = &c3_eth_125m.hw,
+ [CLKID_ETH_RMII_DIV] = &c3_eth_rmii_div.hw,
+ [CLKID_ETH_RMII] = &c3_eth_rmii.hw,
+ [CLKID_MIPI_DSI_MEAS_SEL] = &c3_mipi_dsi_meas_sel.hw,
+ [CLKID_MIPI_DSI_MEAS_DIV] = &c3_mipi_dsi_meas_div.hw,
+ [CLKID_MIPI_DSI_MEAS] = &c3_mipi_dsi_meas.hw,
+ [CLKID_DSI_PHY_SEL] = &c3_dsi_phy_sel.hw,
+ [CLKID_DSI_PHY_DIV] = &c3_dsi_phy_div.hw,
+ [CLKID_DSI_PHY] = &c3_dsi_phy.hw,
+ [CLKID_VOUT_MCLK_SEL] = &c3_vout_mclk_sel.hw,
+ [CLKID_VOUT_MCLK_DIV] = &c3_vout_mclk_div.hw,
+ [CLKID_VOUT_MCLK] = &c3_vout_mclk.hw,
+ [CLKID_VOUT_ENC_SEL] = &c3_vout_enc_sel.hw,
+ [CLKID_VOUT_ENC_DIV] = &c3_vout_enc_div.hw,
+ [CLKID_VOUT_ENC] = &c3_vout_enc.hw,
+ [CLKID_HCODEC_0_SEL] = &c3_hcodec_0_sel.hw,
+ [CLKID_HCODEC_0_DIV] = &c3_hcodec_0_div.hw,
+ [CLKID_HCODEC_0] = &c3_hcodec_0.hw,
+ [CLKID_HCODEC_1_SEL] = &c3_hcodec_1_sel.hw,
+ [CLKID_HCODEC_1_DIV] = &c3_hcodec_1_div.hw,
+ [CLKID_HCODEC_1] = &c3_hcodec_1.hw,
+ [CLKID_HCODEC] = &c3_hcodec.hw,
+ [CLKID_VC9000E_ACLK_SEL] = &c3_vc9000e_aclk_sel.hw,
+ [CLKID_VC9000E_ACLK_DIV] = &c3_vc9000e_aclk_div.hw,
+ [CLKID_VC9000E_ACLK] = &c3_vc9000e_aclk.hw,
+ [CLKID_VC9000E_CORE_SEL] = &c3_vc9000e_core_sel.hw,
+ [CLKID_VC9000E_CORE_DIV] = &c3_vc9000e_core_div.hw,
+ [CLKID_VC9000E_CORE] = &c3_vc9000e_core.hw,
+ [CLKID_CSI_PHY0_SEL] = &c3_csi_phy0_sel.hw,
+ [CLKID_CSI_PHY0_DIV] = &c3_csi_phy0_div.hw,
+ [CLKID_CSI_PHY0] = &c3_csi_phy0.hw,
+ [CLKID_DEWARPA_SEL] = &c3_dewarpa_sel.hw,
+ [CLKID_DEWARPA_DIV] = &c3_dewarpa_div.hw,
+ [CLKID_DEWARPA] = &c3_dewarpa.hw,
+ [CLKID_ISP0_SEL] = &c3_isp0_sel.hw,
+ [CLKID_ISP0_DIV] = &c3_isp0_div.hw,
+ [CLKID_ISP0] = &c3_isp0.hw,
+ [CLKID_NNA_CORE_SEL] = &c3_nna_core_sel.hw,
+ [CLKID_NNA_CORE_DIV] = &c3_nna_core_div.hw,
+ [CLKID_NNA_CORE] = &c3_nna_core.hw,
+ [CLKID_GE2D_SEL] = &c3_ge2d_sel.hw,
+ [CLKID_GE2D_DIV] = &c3_ge2d_div.hw,
+ [CLKID_GE2D] = &c3_ge2d.hw,
+ [CLKID_VAPB_SEL] = &c3_vapb_sel.hw,
+ [CLKID_VAPB_DIV] = &c3_vapb_div.hw,
+ [CLKID_VAPB] = &c3_vapb.hw,
+};
+
+static const struct meson_clkc_data c3_peripherals_clkc_data = {
+ .hw_clks = {
+ .hws = c3_peripherals_hw_clks,
+ .num = ARRAY_SIZE(c3_peripherals_hw_clks),
},
};
-static struct clk_regmap vapb_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = VAPB_CLK_CTRL,
- .shift = 0,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "vapb_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &vapb_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap vapb = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = VAPB_CLK_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "vapb",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &vapb_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_hw *c3_periphs_hw_clks[] = {
- [CLKID_RTC_XTAL_CLKIN] = &rtc_xtal_clkin.hw,
- [CLKID_RTC_32K_DIV] = &rtc_32k_div.hw,
- [CLKID_RTC_32K_MUX] = &rtc_32k_mux.hw,
- [CLKID_RTC_32K] = &rtc_32k.hw,
- [CLKID_RTC_CLK] = &rtc_clk.hw,
- [CLKID_SYS_RESET_CTRL] = &sys_reset_ctrl.hw,
- [CLKID_SYS_PWR_CTRL] = &sys_pwr_ctrl.hw,
- [CLKID_SYS_PAD_CTRL] = &sys_pad_ctrl.hw,
- [CLKID_SYS_CTRL] = &sys_ctrl.hw,
- [CLKID_SYS_TS_PLL] = &sys_ts_pll.hw,
- [CLKID_SYS_DEV_ARB] = &sys_dev_arb.hw,
- [CLKID_SYS_MMC_PCLK] = &sys_mmc_pclk.hw,
- [CLKID_SYS_CPU_CTRL] = &sys_cpu_ctrl.hw,
- [CLKID_SYS_JTAG_CTRL] = &sys_jtag_ctrl.hw,
- [CLKID_SYS_IR_CTRL] = &sys_ir_ctrl.hw,
- [CLKID_SYS_IRQ_CTRL] = &sys_irq_ctrl.hw,
- [CLKID_SYS_MSR_CLK] = &sys_msr_clk.hw,
- [CLKID_SYS_ROM] = &sys_rom.hw,
- [CLKID_SYS_UART_F] = &sys_uart_f.hw,
- [CLKID_SYS_CPU_ARB] = &sys_cpu_apb.hw,
- [CLKID_SYS_RSA] = &sys_rsa.hw,
- [CLKID_SYS_SAR_ADC] = &sys_sar_adc.hw,
- [CLKID_SYS_STARTUP] = &sys_startup.hw,
- [CLKID_SYS_SECURE] = &sys_secure.hw,
- [CLKID_SYS_SPIFC] = &sys_spifc.hw,
- [CLKID_SYS_NNA] = &sys_nna.hw,
- [CLKID_SYS_ETH_MAC] = &sys_eth_mac.hw,
- [CLKID_SYS_GIC] = &sys_gic.hw,
- [CLKID_SYS_RAMA] = &sys_rama.hw,
- [CLKID_SYS_BIG_NIC] = &sys_big_nic.hw,
- [CLKID_SYS_RAMB] = &sys_ramb.hw,
- [CLKID_SYS_AUDIO_PCLK] = &sys_audio_pclk.hw,
- [CLKID_SYS_PWM_KL] = &sys_pwm_kl.hw,
- [CLKID_SYS_PWM_IJ] = &sys_pwm_ij.hw,
- [CLKID_SYS_USB] = &sys_usb.hw,
- [CLKID_SYS_SD_EMMC_A] = &sys_sd_emmc_a.hw,
- [CLKID_SYS_SD_EMMC_C] = &sys_sd_emmc_c.hw,
- [CLKID_SYS_PWM_AB] = &sys_pwm_ab.hw,
- [CLKID_SYS_PWM_CD] = &sys_pwm_cd.hw,
- [CLKID_SYS_PWM_EF] = &sys_pwm_ef.hw,
- [CLKID_SYS_PWM_GH] = &sys_pwm_gh.hw,
- [CLKID_SYS_SPICC_1] = &sys_spicc_1.hw,
- [CLKID_SYS_SPICC_0] = &sys_spicc_0.hw,
- [CLKID_SYS_UART_A] = &sys_uart_a.hw,
- [CLKID_SYS_UART_B] = &sys_uart_b.hw,
- [CLKID_SYS_UART_C] = &sys_uart_c.hw,
- [CLKID_SYS_UART_D] = &sys_uart_d.hw,
- [CLKID_SYS_UART_E] = &sys_uart_e.hw,
- [CLKID_SYS_I2C_M_A] = &sys_i2c_m_a.hw,
- [CLKID_SYS_I2C_M_B] = &sys_i2c_m_b.hw,
- [CLKID_SYS_I2C_M_C] = &sys_i2c_m_c.hw,
- [CLKID_SYS_I2C_M_D] = &sys_i2c_m_d.hw,
- [CLKID_SYS_I2S_S_A] = &sys_i2c_s_a.hw,
- [CLKID_SYS_RTC] = &sys_rtc.hw,
- [CLKID_SYS_GE2D] = &sys_ge2d.hw,
- [CLKID_SYS_ISP] = &sys_isp.hw,
- [CLKID_SYS_GPV_ISP_NIC] = &sys_gpv_isp_nic.hw,
- [CLKID_SYS_GPV_CVE_NIC] = &sys_gpv_cve_nic.hw,
- [CLKID_SYS_MIPI_DSI_HOST] = &sys_mipi_dsi_host.hw,
- [CLKID_SYS_MIPI_DSI_PHY] = &sys_mipi_dsi_phy.hw,
- [CLKID_SYS_ETH_PHY] = &sys_eth_phy.hw,
- [CLKID_SYS_ACODEC] = &sys_acodec.hw,
- [CLKID_SYS_DWAP] = &sys_dwap.hw,
- [CLKID_SYS_DOS] = &sys_dos.hw,
- [CLKID_SYS_CVE] = &sys_cve.hw,
- [CLKID_SYS_VOUT] = &sys_vout.hw,
- [CLKID_SYS_VC9000E] = &sys_vc9000e.hw,
- [CLKID_SYS_PWM_MN] = &sys_pwm_mn.hw,
- [CLKID_SYS_SD_EMMC_B] = &sys_sd_emmc_b.hw,
- [CLKID_AXI_SYS_NIC] = &axi_sys_nic.hw,
- [CLKID_AXI_ISP_NIC] = &axi_isp_nic.hw,
- [CLKID_AXI_CVE_NIC] = &axi_cve_nic.hw,
- [CLKID_AXI_RAMB] = &axi_ramb.hw,
- [CLKID_AXI_RAMA] = &axi_rama.hw,
- [CLKID_AXI_CPU_DMC] = &axi_cpu_dmc.hw,
- [CLKID_AXI_NIC] = &axi_nic.hw,
- [CLKID_AXI_DMA] = &axi_dma.hw,
- [CLKID_AXI_MUX_NIC] = &axi_mux_nic.hw,
- [CLKID_AXI_CVE] = &axi_cve.hw,
- [CLKID_AXI_DEV1_DMC] = &axi_dev1_dmc.hw,
- [CLKID_AXI_DEV0_DMC] = &axi_dev0_dmc.hw,
- [CLKID_AXI_DSP_DMC] = &axi_dsp_dmc.hw,
- [CLKID_12_24M_IN] = &clk_12_24m_in.hw,
- [CLKID_12M_24M] = &clk_12_24m.hw,
- [CLKID_FCLK_25M_DIV] = &fclk_25m_div.hw,
- [CLKID_FCLK_25M] = &fclk_25m.hw,
- [CLKID_GEN_SEL] = &gen_sel.hw,
- [CLKID_GEN_DIV] = &gen_div.hw,
- [CLKID_GEN] = &gen.hw,
- [CLKID_SARADC_SEL] = &saradc_sel.hw,
- [CLKID_SARADC_DIV] = &saradc_div.hw,
- [CLKID_SARADC] = &saradc.hw,
- [CLKID_PWM_A_SEL] = &pwm_a_sel.hw,
- [CLKID_PWM_A_DIV] = &pwm_a_div.hw,
- [CLKID_PWM_A] = &pwm_a.hw,
- [CLKID_PWM_B_SEL] = &pwm_b_sel.hw,
- [CLKID_PWM_B_DIV] = &pwm_b_div.hw,
- [CLKID_PWM_B] = &pwm_b.hw,
- [CLKID_PWM_C_SEL] = &pwm_c_sel.hw,
- [CLKID_PWM_C_DIV] = &pwm_c_div.hw,
- [CLKID_PWM_C] = &pwm_c.hw,
- [CLKID_PWM_D_SEL] = &pwm_d_sel.hw,
- [CLKID_PWM_D_DIV] = &pwm_d_div.hw,
- [CLKID_PWM_D] = &pwm_d.hw,
- [CLKID_PWM_E_SEL] = &pwm_e_sel.hw,
- [CLKID_PWM_E_DIV] = &pwm_e_div.hw,
- [CLKID_PWM_E] = &pwm_e.hw,
- [CLKID_PWM_F_SEL] = &pwm_f_sel.hw,
- [CLKID_PWM_F_DIV] = &pwm_f_div.hw,
- [CLKID_PWM_F] = &pwm_f.hw,
- [CLKID_PWM_G_SEL] = &pwm_g_sel.hw,
- [CLKID_PWM_G_DIV] = &pwm_g_div.hw,
- [CLKID_PWM_G] = &pwm_g.hw,
- [CLKID_PWM_H_SEL] = &pwm_h_sel.hw,
- [CLKID_PWM_H_DIV] = &pwm_h_div.hw,
- [CLKID_PWM_H] = &pwm_h.hw,
- [CLKID_PWM_I_SEL] = &pwm_i_sel.hw,
- [CLKID_PWM_I_DIV] = &pwm_i_div.hw,
- [CLKID_PWM_I] = &pwm_i.hw,
- [CLKID_PWM_J_SEL] = &pwm_j_sel.hw,
- [CLKID_PWM_J_DIV] = &pwm_j_div.hw,
- [CLKID_PWM_J] = &pwm_j.hw,
- [CLKID_PWM_K_SEL] = &pwm_k_sel.hw,
- [CLKID_PWM_K_DIV] = &pwm_k_div.hw,
- [CLKID_PWM_K] = &pwm_k.hw,
- [CLKID_PWM_L_SEL] = &pwm_l_sel.hw,
- [CLKID_PWM_L_DIV] = &pwm_l_div.hw,
- [CLKID_PWM_L] = &pwm_l.hw,
- [CLKID_PWM_M_SEL] = &pwm_m_sel.hw,
- [CLKID_PWM_M_DIV] = &pwm_m_div.hw,
- [CLKID_PWM_M] = &pwm_m.hw,
- [CLKID_PWM_N_SEL] = &pwm_n_sel.hw,
- [CLKID_PWM_N_DIV] = &pwm_n_div.hw,
- [CLKID_PWM_N] = &pwm_n.hw,
- [CLKID_SPICC_A_SEL] = &spicc_a_sel.hw,
- [CLKID_SPICC_A_DIV] = &spicc_a_div.hw,
- [CLKID_SPICC_A] = &spicc_a.hw,
- [CLKID_SPICC_B_SEL] = &spicc_b_sel.hw,
- [CLKID_SPICC_B_DIV] = &spicc_b_div.hw,
- [CLKID_SPICC_B] = &spicc_b.hw,
- [CLKID_SPIFC_SEL] = &spifc_sel.hw,
- [CLKID_SPIFC_DIV] = &spifc_div.hw,
- [CLKID_SPIFC] = &spifc.hw,
- [CLKID_SD_EMMC_A_SEL] = &sd_emmc_a_sel.hw,
- [CLKID_SD_EMMC_A_DIV] = &sd_emmc_a_div.hw,
- [CLKID_SD_EMMC_A] = &sd_emmc_a.hw,
- [CLKID_SD_EMMC_B_SEL] = &sd_emmc_b_sel.hw,
- [CLKID_SD_EMMC_B_DIV] = &sd_emmc_b_div.hw,
- [CLKID_SD_EMMC_B] = &sd_emmc_b.hw,
- [CLKID_SD_EMMC_C_SEL] = &sd_emmc_c_sel.hw,
- [CLKID_SD_EMMC_C_DIV] = &sd_emmc_c_div.hw,
- [CLKID_SD_EMMC_C] = &sd_emmc_c.hw,
- [CLKID_TS_DIV] = &ts_div.hw,
- [CLKID_TS] = &ts.hw,
- [CLKID_ETH_125M_DIV] = &eth_125m_div.hw,
- [CLKID_ETH_125M] = &eth_125m.hw,
- [CLKID_ETH_RMII_DIV] = &eth_rmii_div.hw,
- [CLKID_ETH_RMII] = &eth_rmii.hw,
- [CLKID_MIPI_DSI_MEAS_SEL] = &mipi_dsi_meas_sel.hw,
- [CLKID_MIPI_DSI_MEAS_DIV] = &mipi_dsi_meas_div.hw,
- [CLKID_MIPI_DSI_MEAS] = &mipi_dsi_meas.hw,
- [CLKID_DSI_PHY_SEL] = &dsi_phy_sel.hw,
- [CLKID_DSI_PHY_DIV] = &dsi_phy_div.hw,
- [CLKID_DSI_PHY] = &dsi_phy.hw,
- [CLKID_VOUT_MCLK_SEL] = &vout_mclk_sel.hw,
- [CLKID_VOUT_MCLK_DIV] = &vout_mclk_div.hw,
- [CLKID_VOUT_MCLK] = &vout_mclk.hw,
- [CLKID_VOUT_ENC_SEL] = &vout_enc_sel.hw,
- [CLKID_VOUT_ENC_DIV] = &vout_enc_div.hw,
- [CLKID_VOUT_ENC] = &vout_enc.hw,
- [CLKID_HCODEC_0_SEL] = &hcodec_0_sel.hw,
- [CLKID_HCODEC_0_DIV] = &hcodec_0_div.hw,
- [CLKID_HCODEC_0] = &hcodec_0.hw,
- [CLKID_HCODEC_1_SEL] = &hcodec_1_sel.hw,
- [CLKID_HCODEC_1_DIV] = &hcodec_1_div.hw,
- [CLKID_HCODEC_1] = &hcodec_1.hw,
- [CLKID_HCODEC] = &hcodec.hw,
- [CLKID_VC9000E_ACLK_SEL] = &vc9000e_aclk_sel.hw,
- [CLKID_VC9000E_ACLK_DIV] = &vc9000e_aclk_div.hw,
- [CLKID_VC9000E_ACLK] = &vc9000e_aclk.hw,
- [CLKID_VC9000E_CORE_SEL] = &vc9000e_core_sel.hw,
- [CLKID_VC9000E_CORE_DIV] = &vc9000e_core_div.hw,
- [CLKID_VC9000E_CORE] = &vc9000e_core.hw,
- [CLKID_CSI_PHY0_SEL] = &csi_phy0_sel.hw,
- [CLKID_CSI_PHY0_DIV] = &csi_phy0_div.hw,
- [CLKID_CSI_PHY0] = &csi_phy0.hw,
- [CLKID_DEWARPA_SEL] = &dewarpa_sel.hw,
- [CLKID_DEWARPA_DIV] = &dewarpa_div.hw,
- [CLKID_DEWARPA] = &dewarpa.hw,
- [CLKID_ISP0_SEL] = &isp0_sel.hw,
- [CLKID_ISP0_DIV] = &isp0_div.hw,
- [CLKID_ISP0] = &isp0.hw,
- [CLKID_NNA_CORE_SEL] = &nna_core_sel.hw,
- [CLKID_NNA_CORE_DIV] = &nna_core_div.hw,
- [CLKID_NNA_CORE] = &nna_core.hw,
- [CLKID_GE2D_SEL] = &ge2d_sel.hw,
- [CLKID_GE2D_DIV] = &ge2d_div.hw,
- [CLKID_GE2D] = &ge2d.hw,
- [CLKID_VAPB_SEL] = &vapb_sel.hw,
- [CLKID_VAPB_DIV] = &vapb_div.hw,
- [CLKID_VAPB] = &vapb.hw,
-};
-
-static const struct regmap_config clkc_regmap_config = {
- .reg_bits = 32,
- .val_bits = 32,
- .reg_stride = 4,
- .max_register = NNA_CLK_CTRL,
-};
-
-static struct meson_clk_hw_data c3_periphs_clks = {
- .hws = c3_periphs_hw_clks,
- .num = ARRAY_SIZE(c3_periphs_hw_clks),
-};
-
-static int c3_peripherals_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct regmap *regmap;
- void __iomem *base;
- int clkid, ret;
-
- base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(base))
- return PTR_ERR(base);
-
- regmap = devm_regmap_init_mmio(dev, base, &clkc_regmap_config);
- if (IS_ERR(regmap))
- return PTR_ERR(regmap);
-
- for (clkid = 0; clkid < c3_periphs_clks.num; clkid++) {
- /* array might be sparse */
- if (!c3_periphs_clks.hws[clkid])
- continue;
-
- ret = devm_clk_hw_register(dev, c3_periphs_clks.hws[clkid]);
- if (ret) {
- dev_err(dev, "Clock registration failed\n");
- return ret;
- }
- }
-
- return devm_of_clk_add_hw_provider(dev, meson_clk_hw_get,
- &c3_periphs_clks);
-}
-
static const struct of_device_id c3_peripherals_clkc_match_table[] = {
{
.compatible = "amlogic,c3-peripherals-clkc",
+ .data = &c3_peripherals_clkc_data,
},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, c3_peripherals_clkc_match_table);
-static struct platform_driver c3_peripherals_driver = {
- .probe = c3_peripherals_probe,
+static struct platform_driver c3_peripherals_clkc_driver = {
+ .probe = meson_clkc_mmio_probe,
.driver = {
.name = "c3-peripherals-clkc",
.of_match_table = c3_peripherals_clkc_match_table,
},
};
-module_platform_driver(c3_peripherals_driver);
+module_platform_driver(c3_peripherals_clkc_driver);
MODULE_DESCRIPTION("Amlogic C3 Peripherals Clock Controller driver");
MODULE_AUTHOR("Chuan Liu <chuan.liu@amlogic.com>");
diff --git a/drivers/clk/meson/c3-pll.c b/drivers/clk/meson/c3-pll.c
index 2c5594b8e49a..dd047d17488c 100644
--- a/drivers/clk/meson/c3-pll.c
+++ b/drivers/clk/meson/c3-pll.c
@@ -34,7 +34,7 @@
#define ANACTRL_MPLL_CTRL3 0x18c
#define ANACTRL_MPLL_CTRL4 0x190
-static struct clk_regmap fclk_50m_en = {
+static struct clk_regmap c3_fclk_50m_en = {
.data = &(struct clk_regmap_gate_data) {
.offset = ANACTRL_FIXPLL_CTRL4,
.bit_idx = 0,
@@ -49,20 +49,20 @@ static struct clk_regmap fclk_50m_en = {
},
};
-static struct clk_fixed_factor fclk_50m = {
+static struct clk_fixed_factor c3_fclk_50m = {
.mult = 1,
.div = 40,
.hw.init = &(struct clk_init_data) {
.name = "fclk_50m",
.ops = &clk_fixed_factor_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fclk_50m_en.hw
+ &c3_fclk_50m_en.hw
},
.num_parents = 1,
},
};
-static struct clk_fixed_factor fclk_div2_div = {
+static struct clk_fixed_factor c3_fclk_div2_div = {
.mult = 1,
.div = 2,
.hw.init = &(struct clk_init_data) {
@@ -75,7 +75,7 @@ static struct clk_fixed_factor fclk_div2_div = {
},
};
-static struct clk_regmap fclk_div2 = {
+static struct clk_regmap c3_fclk_div2 = {
.data = &(struct clk_regmap_gate_data) {
.offset = ANACTRL_FIXPLL_CTRL4,
.bit_idx = 24,
@@ -84,13 +84,13 @@ static struct clk_regmap fclk_div2 = {
.name = "fclk_div2",
.ops = &clk_regmap_gate_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fclk_div2_div.hw
+ &c3_fclk_div2_div.hw
},
.num_parents = 1,
},
};
-static struct clk_fixed_factor fclk_div2p5_div = {
+static struct clk_fixed_factor c3_fclk_div2p5_div = {
.mult = 2,
.div = 5,
.hw.init = &(struct clk_init_data) {
@@ -103,7 +103,7 @@ static struct clk_fixed_factor fclk_div2p5_div = {
},
};
-static struct clk_regmap fclk_div2p5 = {
+static struct clk_regmap c3_fclk_div2p5 = {
.data = &(struct clk_regmap_gate_data) {
.offset = ANACTRL_FIXPLL_CTRL4,
.bit_idx = 4,
@@ -112,13 +112,13 @@ static struct clk_regmap fclk_div2p5 = {
.name = "fclk_div2p5",
.ops = &clk_regmap_gate_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fclk_div2p5_div.hw
+ &c3_fclk_div2p5_div.hw
},
.num_parents = 1,
},
};
-static struct clk_fixed_factor fclk_div3_div = {
+static struct clk_fixed_factor c3_fclk_div3_div = {
.mult = 1,
.div = 3,
.hw.init = &(struct clk_init_data) {
@@ -131,7 +131,7 @@ static struct clk_fixed_factor fclk_div3_div = {
},
};
-static struct clk_regmap fclk_div3 = {
+static struct clk_regmap c3_fclk_div3 = {
.data = &(struct clk_regmap_gate_data) {
.offset = ANACTRL_FIXPLL_CTRL4,
.bit_idx = 20,
@@ -140,13 +140,13 @@ static struct clk_regmap fclk_div3 = {
.name = "fclk_div3",
.ops = &clk_regmap_gate_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fclk_div3_div.hw
+ &c3_fclk_div3_div.hw
},
.num_parents = 1,
},
};
-static struct clk_fixed_factor fclk_div4_div = {
+static struct clk_fixed_factor c3_fclk_div4_div = {
.mult = 1,
.div = 4,
.hw.init = &(struct clk_init_data) {
@@ -159,7 +159,7 @@ static struct clk_fixed_factor fclk_div4_div = {
},
};
-static struct clk_regmap fclk_div4 = {
+static struct clk_regmap c3_fclk_div4 = {
.data = &(struct clk_regmap_gate_data) {
.offset = ANACTRL_FIXPLL_CTRL4,
.bit_idx = 21,
@@ -168,13 +168,13 @@ static struct clk_regmap fclk_div4 = {
.name = "fclk_div4",
.ops = &clk_regmap_gate_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fclk_div4_div.hw
+ &c3_fclk_div4_div.hw
},
.num_parents = 1,
},
};
-static struct clk_fixed_factor fclk_div5_div = {
+static struct clk_fixed_factor c3_fclk_div5_div = {
.mult = 1,
.div = 5,
.hw.init = &(struct clk_init_data) {
@@ -187,7 +187,7 @@ static struct clk_fixed_factor fclk_div5_div = {
},
};
-static struct clk_regmap fclk_div5 = {
+static struct clk_regmap c3_fclk_div5 = {
.data = &(struct clk_regmap_gate_data) {
.offset = ANACTRL_FIXPLL_CTRL4,
.bit_idx = 22,
@@ -196,13 +196,13 @@ static struct clk_regmap fclk_div5 = {
.name = "fclk_div5",
.ops = &clk_regmap_gate_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fclk_div5_div.hw
+ &c3_fclk_div5_div.hw
},
.num_parents = 1,
},
};
-static struct clk_fixed_factor fclk_div7_div = {
+static struct clk_fixed_factor c3_fclk_div7_div = {
.mult = 1,
.div = 7,
.hw.init = &(struct clk_init_data) {
@@ -215,7 +215,7 @@ static struct clk_fixed_factor fclk_div7_div = {
},
};
-static struct clk_regmap fclk_div7 = {
+static struct clk_regmap c3_fclk_div7 = {
.data = &(struct clk_regmap_gate_data) {
.offset = ANACTRL_FIXPLL_CTRL4,
.bit_idx = 23,
@@ -224,13 +224,13 @@ static struct clk_regmap fclk_div7 = {
.name = "fclk_div7",
.ops = &clk_regmap_gate_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fclk_div7_div.hw
+ &c3_fclk_div7_div.hw
},
.num_parents = 1,
},
};
-static const struct reg_sequence c3_gp0_init_regs[] = {
+static const struct reg_sequence c3_gp0_pll_init_regs[] = {
{ .reg = ANACTRL_GP0PLL_CTRL2, .def = 0x0 },
{ .reg = ANACTRL_GP0PLL_CTRL3, .def = 0x48681c00 },
{ .reg = ANACTRL_GP0PLL_CTRL4, .def = 0x88770290 },
@@ -243,7 +243,7 @@ static const struct pll_mult_range c3_gp0_pll_mult_range = {
.max = 250,
};
-static struct clk_regmap gp0_pll_dco = {
+static struct clk_regmap c3_gp0_pll_dco = {
.data = &(struct meson_clk_pll_data) {
.en = {
.reg_off = ANACTRL_GP0PLL_CTRL0,
@@ -276,8 +276,8 @@ static struct clk_regmap gp0_pll_dco = {
.width = 1,
},
.range = &c3_gp0_pll_mult_range,
- .init_regs = c3_gp0_init_regs,
- .init_count = ARRAY_SIZE(c3_gp0_init_regs),
+ .init_regs = c3_gp0_pll_init_regs,
+ .init_count = ARRAY_SIZE(c3_gp0_pll_init_regs),
},
.hw.init = &(struct clk_init_data) {
.name = "gp0_pll_dco",
@@ -300,7 +300,7 @@ static const struct clk_div_table c3_gp0_pll_od_table[] = {
{ /* sentinel */ }
};
-static struct clk_regmap gp0_pll = {
+static struct clk_regmap c3_gp0_pll = {
.data = &(struct clk_regmap_div_data) {
.offset = ANACTRL_GP0PLL_CTRL0,
.shift = 16,
@@ -311,14 +311,14 @@ static struct clk_regmap gp0_pll = {
.name = "gp0_pll",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &gp0_pll_dco.hw
+ &c3_gp0_pll_dco.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static const struct reg_sequence c3_hifi_init_regs[] = {
+static const struct reg_sequence c3_hifi_pll_init_regs[] = {
{ .reg = ANACTRL_HIFIPLL_CTRL2, .def = 0x0 },
{ .reg = ANACTRL_HIFIPLL_CTRL3, .def = 0x6a285c00 },
{ .reg = ANACTRL_HIFIPLL_CTRL4, .def = 0x65771290 },
@@ -326,7 +326,7 @@ static const struct reg_sequence c3_hifi_init_regs[] = {
{ .reg = ANACTRL_HIFIPLL_CTRL6, .def = 0x56540000 },
};
-static struct clk_regmap hifi_pll_dco = {
+static struct clk_regmap c3_hifi_pll_dco = {
.data = &(struct meson_clk_pll_data) {
.en = {
.reg_off = ANACTRL_HIFIPLL_CTRL0,
@@ -359,8 +359,8 @@ static struct clk_regmap hifi_pll_dco = {
.width = 1,
},
.range = &c3_gp0_pll_mult_range,
- .init_regs = c3_hifi_init_regs,
- .init_count = ARRAY_SIZE(c3_hifi_init_regs),
+ .init_regs = c3_hifi_pll_init_regs,
+ .init_count = ARRAY_SIZE(c3_hifi_pll_init_regs),
.frac_max = 100000,
},
.hw.init = &(struct clk_init_data) {
@@ -373,7 +373,7 @@ static struct clk_regmap hifi_pll_dco = {
},
};
-static struct clk_regmap hifi_pll = {
+static struct clk_regmap c3_hifi_pll = {
.data = &(struct clk_regmap_div_data) {
.offset = ANACTRL_HIFIPLL_CTRL0,
.shift = 16,
@@ -384,14 +384,14 @@ static struct clk_regmap hifi_pll = {
.name = "hifi_pll",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &hifi_pll_dco.hw
+ &c3_hifi_pll_dco.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static const struct reg_sequence c3_mclk_init_regs[] = {
+static const struct reg_sequence c3_mclk_pll_init_regs[] = {
{ .reg = ANACTRL_MPLL_CTRL1, .def = 0x1420500f },
{ .reg = ANACTRL_MPLL_CTRL2, .def = 0x00023041 },
{ .reg = ANACTRL_MPLL_CTRL3, .def = 0x18180000 },
@@ -403,7 +403,7 @@ static const struct pll_mult_range c3_mclk_pll_mult_range = {
.max = 133,
};
-static struct clk_regmap mclk_pll_dco = {
+static struct clk_regmap c3_mclk_pll_dco = {
.data = &(struct meson_clk_pll_data) {
.en = {
.reg_off = ANACTRL_MPLL_CTRL0,
@@ -431,8 +431,8 @@ static struct clk_regmap mclk_pll_dco = {
.width = 1,
},
.range = &c3_mclk_pll_mult_range,
- .init_regs = c3_mclk_init_regs,
- .init_count = ARRAY_SIZE(c3_mclk_init_regs),
+ .init_regs = c3_mclk_pll_init_regs,
+ .init_count = ARRAY_SIZE(c3_mclk_pll_init_regs),
},
.hw.init = &(struct clk_init_data) {
.name = "mclk_pll_dco",
@@ -444,7 +444,7 @@ static struct clk_regmap mclk_pll_dco = {
},
};
-static const struct clk_div_table c3_mpll_od_table[] = {
+static const struct clk_div_table c3_mpll_pll_od_table[] = {
{ 0, 1 },
{ 1, 2 },
{ 2, 4 },
@@ -453,25 +453,25 @@ static const struct clk_div_table c3_mpll_od_table[] = {
{ /* sentinel */ }
};
-static struct clk_regmap mclk_pll_od = {
+static struct clk_regmap c3_mclk_pll_od = {
.data = &(struct clk_regmap_div_data) {
.offset = ANACTRL_MPLL_CTRL0,
.shift = 12,
.width = 3,
- .table = c3_mpll_od_table,
+ .table = c3_mpll_pll_od_table,
},
.hw.init = &(struct clk_init_data) {
.name = "mclk_pll_od",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &mclk_pll_dco.hw },
+ &c3_mclk_pll_dco.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
/* both value 0 and 1 gives divide the input rate by one */
-static struct clk_regmap mclk_pll = {
+static struct clk_regmap c3_mclk_pll = {
.data = &(struct clk_regmap_div_data) {
.offset = ANACTRL_MPLL_CTRL4,
.shift = 16,
@@ -482,20 +482,20 @@ static struct clk_regmap mclk_pll = {
.name = "mclk_pll",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &mclk_pll_od.hw
+ &c3_mclk_pll_od.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static const struct clk_parent_data mclk_parent[] = {
- { .hw = &mclk_pll.hw },
+static const struct clk_parent_data c3_mclk_parents[] = {
+ { .hw = &c3_mclk_pll.hw },
{ .fw_name = "mclk" },
- { .hw = &fclk_50m.hw }
+ { .hw = &c3_fclk_50m.hw }
};
-static struct clk_regmap mclk0_sel = {
+static struct clk_regmap c3_mclk0_sel = {
.data = &(struct clk_regmap_mux_data) {
.offset = ANACTRL_MPLL_CTRL4,
.mask = 0x3,
@@ -504,12 +504,12 @@ static struct clk_regmap mclk0_sel = {
.hw.init = &(struct clk_init_data) {
.name = "mclk0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = mclk_parent,
- .num_parents = ARRAY_SIZE(mclk_parent),
+ .parent_data = c3_mclk_parents,
+ .num_parents = ARRAY_SIZE(c3_mclk_parents),
},
};
-static struct clk_regmap mclk0_div_en = {
+static struct clk_regmap c3_mclk0_div_en = {
.data = &(struct clk_regmap_gate_data) {
.offset = ANACTRL_MPLL_CTRL4,
.bit_idx = 1,
@@ -518,14 +518,14 @@ static struct clk_regmap mclk0_div_en = {
.name = "mclk0_div_en",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &mclk0_sel.hw
+ &c3_mclk0_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap mclk0_div = {
+static struct clk_regmap c3_mclk0_div = {
.data = &(struct clk_regmap_div_data) {
.offset = ANACTRL_MPLL_CTRL4,
.shift = 2,
@@ -535,14 +535,14 @@ static struct clk_regmap mclk0_div = {
.name = "mclk0_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &mclk0_div_en.hw
+ &c3_mclk0_div_en.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap mclk0 = {
+static struct clk_regmap c3_mclk0 = {
.data = &(struct clk_regmap_gate_data) {
.offset = ANACTRL_MPLL_CTRL4,
.bit_idx = 0,
@@ -551,14 +551,14 @@ static struct clk_regmap mclk0 = {
.name = "mclk0",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &mclk0_div.hw
+ &c3_mclk0_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap mclk1_sel = {
+static struct clk_regmap c3_mclk1_sel = {
.data = &(struct clk_regmap_mux_data) {
.offset = ANACTRL_MPLL_CTRL4,
.mask = 0x3,
@@ -567,12 +567,12 @@ static struct clk_regmap mclk1_sel = {
.hw.init = &(struct clk_init_data) {
.name = "mclk1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = mclk_parent,
- .num_parents = ARRAY_SIZE(mclk_parent),
+ .parent_data = c3_mclk_parents,
+ .num_parents = ARRAY_SIZE(c3_mclk_parents),
},
};
-static struct clk_regmap mclk1_div_en = {
+static struct clk_regmap c3_mclk1_div_en = {
.data = &(struct clk_regmap_gate_data) {
.offset = ANACTRL_MPLL_CTRL4,
.bit_idx = 9,
@@ -581,14 +581,14 @@ static struct clk_regmap mclk1_div_en = {
.name = "mclk1_div_en",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &mclk1_sel.hw
+ &c3_mclk1_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap mclk1_div = {
+static struct clk_regmap c3_mclk1_div = {
.data = &(struct clk_regmap_div_data) {
.offset = ANACTRL_MPLL_CTRL4,
.shift = 10,
@@ -598,14 +598,14 @@ static struct clk_regmap mclk1_div = {
.name = "mclk1_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &mclk1_div_en.hw
+ &c3_mclk1_div_en.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap mclk1 = {
+static struct clk_regmap c3_mclk1 = {
.data = &(struct clk_regmap_gate_data) {
.offset = ANACTRL_MPLL_CTRL4,
.bit_idx = 8,
@@ -614,7 +614,7 @@ static struct clk_regmap mclk1 = {
.name = "mclk1",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &mclk1_div.hw
+ &c3_mclk1_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -622,96 +622,61 @@ static struct clk_regmap mclk1 = {
};
static struct clk_hw *c3_pll_hw_clks[] = {
- [CLKID_FCLK_50M_EN] = &fclk_50m_en.hw,
- [CLKID_FCLK_50M] = &fclk_50m.hw,
- [CLKID_FCLK_DIV2_DIV] = &fclk_div2_div.hw,
- [CLKID_FCLK_DIV2] = &fclk_div2.hw,
- [CLKID_FCLK_DIV2P5_DIV] = &fclk_div2p5_div.hw,
- [CLKID_FCLK_DIV2P5] = &fclk_div2p5.hw,
- [CLKID_FCLK_DIV3_DIV] = &fclk_div3_div.hw,
- [CLKID_FCLK_DIV3] = &fclk_div3.hw,
- [CLKID_FCLK_DIV4_DIV] = &fclk_div4_div.hw,
- [CLKID_FCLK_DIV4] = &fclk_div4.hw,
- [CLKID_FCLK_DIV5_DIV] = &fclk_div5_div.hw,
- [CLKID_FCLK_DIV5] = &fclk_div5.hw,
- [CLKID_FCLK_DIV7_DIV] = &fclk_div7_div.hw,
- [CLKID_FCLK_DIV7] = &fclk_div7.hw,
- [CLKID_GP0_PLL_DCO] = &gp0_pll_dco.hw,
- [CLKID_GP0_PLL] = &gp0_pll.hw,
- [CLKID_HIFI_PLL_DCO] = &hifi_pll_dco.hw,
- [CLKID_HIFI_PLL] = &hifi_pll.hw,
- [CLKID_MCLK_PLL_DCO] = &mclk_pll_dco.hw,
- [CLKID_MCLK_PLL_OD] = &mclk_pll_od.hw,
- [CLKID_MCLK_PLL] = &mclk_pll.hw,
- [CLKID_MCLK0_SEL] = &mclk0_sel.hw,
- [CLKID_MCLK0_SEL_EN] = &mclk0_div_en.hw,
- [CLKID_MCLK0_DIV] = &mclk0_div.hw,
- [CLKID_MCLK0] = &mclk0.hw,
- [CLKID_MCLK1_SEL] = &mclk1_sel.hw,
- [CLKID_MCLK1_SEL_EN] = &mclk1_div_en.hw,
- [CLKID_MCLK1_DIV] = &mclk1_div.hw,
- [CLKID_MCLK1] = &mclk1.hw
-};
-
-static const struct regmap_config clkc_regmap_config = {
- .reg_bits = 32,
- .val_bits = 32,
- .reg_stride = 4,
- .max_register = ANACTRL_MPLL_CTRL4,
-};
-
-static struct meson_clk_hw_data c3_pll_clks = {
- .hws = c3_pll_hw_clks,
- .num = ARRAY_SIZE(c3_pll_hw_clks),
-};
-
-static int c3_pll_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct regmap *regmap;
- void __iomem *base;
- int clkid, ret;
-
- base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(base))
- return PTR_ERR(base);
-
- regmap = devm_regmap_init_mmio(dev, base, &clkc_regmap_config);
- if (IS_ERR(regmap))
- return PTR_ERR(regmap);
-
- for (clkid = 0; clkid < c3_pll_clks.num; clkid++) {
- /* array might be sparse */
- if (!c3_pll_clks.hws[clkid])
- continue;
-
- ret = devm_clk_hw_register(dev, c3_pll_clks.hws[clkid]);
- if (ret) {
- dev_err(dev, "Clock registration failed\n");
- return ret;
- }
- }
-
- return devm_of_clk_add_hw_provider(dev, meson_clk_hw_get,
- &c3_pll_clks);
-}
+ [CLKID_FCLK_50M_EN] = &c3_fclk_50m_en.hw,
+ [CLKID_FCLK_50M] = &c3_fclk_50m.hw,
+ [CLKID_FCLK_DIV2_DIV] = &c3_fclk_div2_div.hw,
+ [CLKID_FCLK_DIV2] = &c3_fclk_div2.hw,
+ [CLKID_FCLK_DIV2P5_DIV] = &c3_fclk_div2p5_div.hw,
+ [CLKID_FCLK_DIV2P5] = &c3_fclk_div2p5.hw,
+ [CLKID_FCLK_DIV3_DIV] = &c3_fclk_div3_div.hw,
+ [CLKID_FCLK_DIV3] = &c3_fclk_div3.hw,
+ [CLKID_FCLK_DIV4_DIV] = &c3_fclk_div4_div.hw,
+ [CLKID_FCLK_DIV4] = &c3_fclk_div4.hw,
+ [CLKID_FCLK_DIV5_DIV] = &c3_fclk_div5_div.hw,
+ [CLKID_FCLK_DIV5] = &c3_fclk_div5.hw,
+ [CLKID_FCLK_DIV7_DIV] = &c3_fclk_div7_div.hw,
+ [CLKID_FCLK_DIV7] = &c3_fclk_div7.hw,
+ [CLKID_GP0_PLL_DCO] = &c3_gp0_pll_dco.hw,
+ [CLKID_GP0_PLL] = &c3_gp0_pll.hw,
+ [CLKID_HIFI_PLL_DCO] = &c3_hifi_pll_dco.hw,
+ [CLKID_HIFI_PLL] = &c3_hifi_pll.hw,
+ [CLKID_MCLK_PLL_DCO] = &c3_mclk_pll_dco.hw,
+ [CLKID_MCLK_PLL_OD] = &c3_mclk_pll_od.hw,
+ [CLKID_MCLK_PLL] = &c3_mclk_pll.hw,
+ [CLKID_MCLK0_SEL] = &c3_mclk0_sel.hw,
+ [CLKID_MCLK0_SEL_EN] = &c3_mclk0_div_en.hw,
+ [CLKID_MCLK0_DIV] = &c3_mclk0_div.hw,
+ [CLKID_MCLK0] = &c3_mclk0.hw,
+ [CLKID_MCLK1_SEL] = &c3_mclk1_sel.hw,
+ [CLKID_MCLK1_SEL_EN] = &c3_mclk1_div_en.hw,
+ [CLKID_MCLK1_DIV] = &c3_mclk1_div.hw,
+ [CLKID_MCLK1] = &c3_mclk1.hw
+};
+
+static const struct meson_clkc_data c3_pll_clkc_data = {
+ .hw_clks = {
+ .hws = c3_pll_hw_clks,
+ .num = ARRAY_SIZE(c3_pll_hw_clks),
+ },
+};
static const struct of_device_id c3_pll_clkc_match_table[] = {
{
.compatible = "amlogic,c3-pll-clkc",
+ .data = &c3_pll_clkc_data,
},
{}
};
MODULE_DEVICE_TABLE(of, c3_pll_clkc_match_table);
-static struct platform_driver c3_pll_driver = {
- .probe = c3_pll_probe,
+static struct platform_driver c3_pll_clkc_driver = {
+ .probe = meson_clkc_mmio_probe,
.driver = {
.name = "c3-pll-clkc",
.of_match_table = c3_pll_clkc_match_table,
},
};
-module_platform_driver(c3_pll_driver);
+module_platform_driver(c3_pll_clkc_driver);
MODULE_DESCRIPTION("Amlogic C3 PLL Clock Controller driver");
MODULE_AUTHOR("Chuan Liu <chuan.liu@amlogic.com>");
diff --git a/drivers/clk/meson/clk-regmap.h b/drivers/clk/meson/clk-regmap.h
index f8cac2df5755..8e5c39b023e1 100644
--- a/drivers/clk/meson/clk-regmap.h
+++ b/drivers/clk/meson/clk-regmap.h
@@ -118,24 +118,4 @@ clk_get_regmap_mux_data(struct clk_regmap *clk)
extern const struct clk_ops clk_regmap_mux_ops;
extern const struct clk_ops clk_regmap_mux_ro_ops;
-#define __MESON_PCLK(_name, _reg, _bit, _ops, _pname) \
-struct clk_regmap _name = { \
- .data = &(struct clk_regmap_gate_data){ \
- .offset = (_reg), \
- .bit_idx = (_bit), \
- }, \
- .hw.init = &(struct clk_init_data) { \
- .name = #_name, \
- .ops = _ops, \
- .parent_hws = (const struct clk_hw *[]) { _pname }, \
- .num_parents = 1, \
- .flags = (CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED), \
- }, \
-}
-
-#define MESON_PCLK(_name, _reg, _bit, _pname) \
- __MESON_PCLK(_name, _reg, _bit, &clk_regmap_gate_ops, _pname)
-
-#define MESON_PCLK_RO(_name, _reg, _bit, _pname) \
- __MESON_PCLK(_name, _reg, _bit, &clk_regmap_gate_ro_ops, _pname)
#endif /* __CLK_REGMAP_H */
diff --git a/drivers/clk/meson/g12a-aoclk.c b/drivers/clk/meson/g12a-aoclk.c
index 4095a1b2bb80..96981da271fa 100644
--- a/drivers/clk/meson/g12a-aoclk.c
+++ b/drivers/clk/meson/g12a-aoclk.c
@@ -37,46 +37,38 @@
#define AO_RTC_ALT_CLK_CNTL0 0x94
#define AO_RTC_ALT_CLK_CNTL1 0x98
+static const struct clk_parent_data g12a_ao_pclk_parents = { .fw_name = "mpeg-clk" };
+
+#define G12A_AO_PCLK(_name, _reg, _bit, _flags) \
+ MESON_PCLK(g12a_ao_##_name, _reg, _bit, &g12a_ao_pclk_parents, _flags)
+
/*
- * Like every other peripheral clock gate in Amlogic Clock drivers,
- * we are using CLK_IGNORE_UNUSED here, so we keep the state of the
- * bootloader. The goal is to remove this flag at some point.
- * Actually removing it will require some extensive test to be done safely.
+ * NOTE: The gates below are marked with CLK_IGNORE_UNUSED for historic reasons
+ * Users are encouraged to test without it and submit changes to:
+ * - remove the flag if not necessary
+ * - replace the flag with something more adequate, such as CLK_IS_CRITICAL,
+ * if appropriate.
+ * - add a comment explaining why the use of CLK_IGNORE_UNUSED is desirable
+ * for a particular clock.
*/
-#define AXG_AO_GATE(_name, _reg, _bit) \
-static struct clk_regmap g12a_aoclk_##_name = { \
- .data = &(struct clk_regmap_gate_data) { \
- .offset = (_reg), \
- .bit_idx = (_bit), \
- }, \
- .hw.init = &(struct clk_init_data) { \
- .name = "g12a_ao_" #_name, \
- .ops = &clk_regmap_gate_ops, \
- .parent_data = &(const struct clk_parent_data) { \
- .fw_name = "mpeg-clk", \
- }, \
- .num_parents = 1, \
- .flags = CLK_IGNORE_UNUSED, \
- }, \
-}
+static G12A_AO_PCLK(ahb, AO_CLK_GATE0, 0, CLK_IGNORE_UNUSED);
+static G12A_AO_PCLK(ir_in, AO_CLK_GATE0, 1, CLK_IGNORE_UNUSED);
+static G12A_AO_PCLK(i2c_m0, AO_CLK_GATE0, 2, CLK_IGNORE_UNUSED);
+static G12A_AO_PCLK(i2c_s0, AO_CLK_GATE0, 3, CLK_IGNORE_UNUSED);
+static G12A_AO_PCLK(uart, AO_CLK_GATE0, 4, CLK_IGNORE_UNUSED);
+static G12A_AO_PCLK(prod_i2c, AO_CLK_GATE0, 5, CLK_IGNORE_UNUSED);
+static G12A_AO_PCLK(uart2, AO_CLK_GATE0, 6, CLK_IGNORE_UNUSED);
+static G12A_AO_PCLK(ir_out, AO_CLK_GATE0, 7, CLK_IGNORE_UNUSED);
+static G12A_AO_PCLK(saradc, AO_CLK_GATE0, 8, CLK_IGNORE_UNUSED);
-AXG_AO_GATE(ahb, AO_CLK_GATE0, 0);
-AXG_AO_GATE(ir_in, AO_CLK_GATE0, 1);
-AXG_AO_GATE(i2c_m0, AO_CLK_GATE0, 2);
-AXG_AO_GATE(i2c_s0, AO_CLK_GATE0, 3);
-AXG_AO_GATE(uart, AO_CLK_GATE0, 4);
-AXG_AO_GATE(prod_i2c, AO_CLK_GATE0, 5);
-AXG_AO_GATE(uart2, AO_CLK_GATE0, 6);
-AXG_AO_GATE(ir_out, AO_CLK_GATE0, 7);
-AXG_AO_GATE(saradc, AO_CLK_GATE0, 8);
-AXG_AO_GATE(mailbox, AO_CLK_GATE0_SP, 0);
-AXG_AO_GATE(m3, AO_CLK_GATE0_SP, 1);
-AXG_AO_GATE(ahb_sram, AO_CLK_GATE0_SP, 2);
-AXG_AO_GATE(rti, AO_CLK_GATE0_SP, 3);
-AXG_AO_GATE(m4_fclk, AO_CLK_GATE0_SP, 4);
-AXG_AO_GATE(m4_hclk, AO_CLK_GATE0_SP, 5);
+static G12A_AO_PCLK(mailbox, AO_CLK_GATE0_SP, 0, CLK_IGNORE_UNUSED);
+static G12A_AO_PCLK(m3, AO_CLK_GATE0_SP, 1, CLK_IGNORE_UNUSED);
+static G12A_AO_PCLK(ahb_sram, AO_CLK_GATE0_SP, 2, CLK_IGNORE_UNUSED);
+static G12A_AO_PCLK(rti, AO_CLK_GATE0_SP, 3, CLK_IGNORE_UNUSED);
+static G12A_AO_PCLK(m4_fclk, AO_CLK_GATE0_SP, 4, CLK_IGNORE_UNUSED);
+static G12A_AO_PCLK(m4_hclk, AO_CLK_GATE0_SP, 5, CLK_IGNORE_UNUSED);
-static struct clk_regmap g12a_aoclk_cts_oscin = {
+static struct clk_regmap g12a_ao_cts_oscin = {
.data = &(struct clk_regmap_gate_data){
.offset = AO_RTI_PWR_CNTL_REG0,
.bit_idx = 14,
@@ -103,22 +95,22 @@ static const struct meson_clk_dualdiv_param g12a_32k_div_table[] = {
/* 32k_by_oscin clock */
-static struct clk_regmap g12a_aoclk_32k_by_oscin_pre = {
+static struct clk_regmap g12a_ao_32k_by_oscin_pre = {
.data = &(struct clk_regmap_gate_data){
.offset = AO_RTC_ALT_CLK_CNTL0,
.bit_idx = 31,
},
.hw.init = &(struct clk_init_data){
- .name = "g12a_ao_32k_by_oscin_pre",
+ .name = "ao_32k_by_oscin_pre",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_aoclk_cts_oscin.hw
+ &g12a_ao_cts_oscin.hw
},
.num_parents = 1,
},
};
-static struct clk_regmap g12a_aoclk_32k_by_oscin_div = {
+static struct clk_regmap g12a_ao_32k_by_oscin_div = {
.data = &(struct meson_clk_dualdiv_data){
.n1 = {
.reg_off = AO_RTC_ALT_CLK_CNTL0,
@@ -148,16 +140,16 @@ static struct clk_regmap g12a_aoclk_32k_by_oscin_div = {
.table = g12a_32k_div_table,
},
.hw.init = &(struct clk_init_data){
- .name = "g12a_ao_32k_by_oscin_div",
+ .name = "ao_32k_by_oscin_div",
.ops = &meson_clk_dualdiv_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_aoclk_32k_by_oscin_pre.hw
+ &g12a_ao_32k_by_oscin_pre.hw
},
.num_parents = 1,
},
};
-static struct clk_regmap g12a_aoclk_32k_by_oscin_sel = {
+static struct clk_regmap g12a_ao_32k_by_oscin_sel = {
.data = &(struct clk_regmap_mux_data) {
.offset = AO_RTC_ALT_CLK_CNTL1,
.mask = 0x1,
@@ -165,27 +157,27 @@ static struct clk_regmap g12a_aoclk_32k_by_oscin_sel = {
.flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
- .name = "g12a_ao_32k_by_oscin_sel",
+ .name = "ao_32k_by_oscin_sel",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_aoclk_32k_by_oscin_div.hw,
- &g12a_aoclk_32k_by_oscin_pre.hw,
+ &g12a_ao_32k_by_oscin_div.hw,
+ &g12a_ao_32k_by_oscin_pre.hw,
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap g12a_aoclk_32k_by_oscin = {
+static struct clk_regmap g12a_ao_32k_by_oscin = {
.data = &(struct clk_regmap_gate_data){
.offset = AO_RTC_ALT_CLK_CNTL0,
.bit_idx = 30,
},
.hw.init = &(struct clk_init_data){
- .name = "g12a_ao_32k_by_oscin",
+ .name = "ao_32k_by_oscin",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_aoclk_32k_by_oscin_sel.hw
+ &g12a_ao_32k_by_oscin_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -194,22 +186,22 @@ static struct clk_regmap g12a_aoclk_32k_by_oscin = {
/* cec clock */
-static struct clk_regmap g12a_aoclk_cec_pre = {
+static struct clk_regmap g12a_ao_cec_pre = {
.data = &(struct clk_regmap_gate_data){
.offset = AO_CEC_CLK_CNTL_REG0,
.bit_idx = 31,
},
.hw.init = &(struct clk_init_data){
- .name = "g12a_ao_cec_pre",
+ .name = "ao_cec_pre",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_aoclk_cts_oscin.hw
+ &g12a_ao_cts_oscin.hw
},
.num_parents = 1,
},
};
-static struct clk_regmap g12a_aoclk_cec_div = {
+static struct clk_regmap g12a_ao_cec_div = {
.data = &(struct meson_clk_dualdiv_data){
.n1 = {
.reg_off = AO_CEC_CLK_CNTL_REG0,
@@ -239,16 +231,16 @@ static struct clk_regmap g12a_aoclk_cec_div = {
.table = g12a_32k_div_table,
},
.hw.init = &(struct clk_init_data){
- .name = "g12a_ao_cec_div",
+ .name = "ao_cec_div",
.ops = &meson_clk_dualdiv_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_aoclk_cec_pre.hw
+ &g12a_ao_cec_pre.hw
},
.num_parents = 1,
},
};
-static struct clk_regmap g12a_aoclk_cec_sel = {
+static struct clk_regmap g12a_ao_cec_sel = {
.data = &(struct clk_regmap_mux_data) {
.offset = AO_CEC_CLK_CNTL_REG1,
.mask = 0x1,
@@ -256,34 +248,34 @@ static struct clk_regmap g12a_aoclk_cec_sel = {
.flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
- .name = "g12a_ao_cec_sel",
+ .name = "ao_cec_sel",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_aoclk_cec_div.hw,
- &g12a_aoclk_cec_pre.hw,
+ &g12a_ao_cec_div.hw,
+ &g12a_ao_cec_pre.hw,
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap g12a_aoclk_cec = {
+static struct clk_regmap g12a_ao_cec = {
.data = &(struct clk_regmap_gate_data){
.offset = AO_CEC_CLK_CNTL_REG0,
.bit_idx = 30,
},
.hw.init = &(struct clk_init_data){
- .name = "g12a_ao_cec",
+ .name = "ao_cec",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_aoclk_cec_sel.hw
+ &g12a_ao_cec_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap g12a_aoclk_cts_rtc_oscin = {
+static struct clk_regmap g12a_ao_cts_rtc_oscin = {
.data = &(struct clk_regmap_mux_data) {
.offset = AO_RTI_PWR_CNTL_REG0,
.mask = 0x1,
@@ -291,10 +283,10 @@ static struct clk_regmap g12a_aoclk_cts_rtc_oscin = {
.flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
- .name = "g12a_ao_cts_rtc_oscin",
+ .name = "ao_cts_rtc_oscin",
.ops = &clk_regmap_mux_ops,
.parent_data = (const struct clk_parent_data []) {
- { .hw = &g12a_aoclk_32k_by_oscin.hw },
+ { .hw = &g12a_ao_32k_by_oscin.hw },
{ .fw_name = "ext-32k-0", },
},
.num_parents = 2,
@@ -302,7 +294,7 @@ static struct clk_regmap g12a_aoclk_cts_rtc_oscin = {
},
};
-static struct clk_regmap g12a_aoclk_clk81 = {
+static struct clk_regmap g12a_ao_clk81 = {
.data = &(struct clk_regmap_mux_data) {
.offset = AO_RTI_PWR_CNTL_REG0,
.mask = 0x1,
@@ -310,68 +302,74 @@ static struct clk_regmap g12a_aoclk_clk81 = {
.flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
+ /*
+ * NOTE: this is one of the infamous clock the pwm driver
+ * can request directly by its global name. It's wrong but
+ * there is not much we can do about it until the support
+ * for the old pwm bindings is dropped
+ */
.name = "g12a_ao_clk81",
.ops = &clk_regmap_mux_ro_ops,
.parent_data = (const struct clk_parent_data []) {
{ .fw_name = "mpeg-clk", },
- { .hw = &g12a_aoclk_cts_rtc_oscin.hw },
+ { .hw = &g12a_ao_cts_rtc_oscin.hw },
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap g12a_aoclk_saradc_mux = {
+static struct clk_regmap g12a_ao_saradc_mux = {
.data = &(struct clk_regmap_mux_data) {
.offset = AO_SAR_CLK,
.mask = 0x3,
.shift = 9,
},
.hw.init = &(struct clk_init_data){
- .name = "g12a_ao_saradc_mux",
+ .name = "ao_saradc_mux",
.ops = &clk_regmap_mux_ops,
.parent_data = (const struct clk_parent_data []) {
{ .fw_name = "xtal", },
- { .hw = &g12a_aoclk_clk81.hw },
+ { .hw = &g12a_ao_clk81.hw },
},
.num_parents = 2,
},
};
-static struct clk_regmap g12a_aoclk_saradc_div = {
+static struct clk_regmap g12a_ao_saradc_div = {
.data = &(struct clk_regmap_div_data) {
.offset = AO_SAR_CLK,
.shift = 0,
.width = 8,
},
.hw.init = &(struct clk_init_data){
- .name = "g12a_ao_saradc_div",
+ .name = "ao_saradc_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_aoclk_saradc_mux.hw
+ &g12a_ao_saradc_mux.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap g12a_aoclk_saradc_gate = {
+static struct clk_regmap g12a_ao_saradc_gate = {
.data = &(struct clk_regmap_gate_data) {
.offset = AO_SAR_CLK,
.bit_idx = 8,
},
.hw.init = &(struct clk_init_data){
- .name = "g12a_ao_saradc_gate",
+ .name = "ao_saradc_gate",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_aoclk_saradc_div.hw
+ &g12a_ao_saradc_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static const unsigned int g12a_aoclk_reset[] = {
+static const unsigned int g12a_ao_reset[] = {
[RESET_AO_IR_IN] = 16,
[RESET_AO_UART] = 17,
[RESET_AO_I2C_M] = 18,
@@ -381,65 +379,67 @@ static const unsigned int g12a_aoclk_reset[] = {
[RESET_AO_IR_OUT] = 23,
};
-static struct clk_hw *g12a_aoclk_hw_clks[] = {
- [CLKID_AO_AHB] = &g12a_aoclk_ahb.hw,
- [CLKID_AO_IR_IN] = &g12a_aoclk_ir_in.hw,
- [CLKID_AO_I2C_M0] = &g12a_aoclk_i2c_m0.hw,
- [CLKID_AO_I2C_S0] = &g12a_aoclk_i2c_s0.hw,
- [CLKID_AO_UART] = &g12a_aoclk_uart.hw,
- [CLKID_AO_PROD_I2C] = &g12a_aoclk_prod_i2c.hw,
- [CLKID_AO_UART2] = &g12a_aoclk_uart2.hw,
- [CLKID_AO_IR_OUT] = &g12a_aoclk_ir_out.hw,
- [CLKID_AO_SAR_ADC] = &g12a_aoclk_saradc.hw,
- [CLKID_AO_MAILBOX] = &g12a_aoclk_mailbox.hw,
- [CLKID_AO_M3] = &g12a_aoclk_m3.hw,
- [CLKID_AO_AHB_SRAM] = &g12a_aoclk_ahb_sram.hw,
- [CLKID_AO_RTI] = &g12a_aoclk_rti.hw,
- [CLKID_AO_M4_FCLK] = &g12a_aoclk_m4_fclk.hw,
- [CLKID_AO_M4_HCLK] = &g12a_aoclk_m4_hclk.hw,
- [CLKID_AO_CLK81] = &g12a_aoclk_clk81.hw,
- [CLKID_AO_SAR_ADC_SEL] = &g12a_aoclk_saradc_mux.hw,
- [CLKID_AO_SAR_ADC_DIV] = &g12a_aoclk_saradc_div.hw,
- [CLKID_AO_SAR_ADC_CLK] = &g12a_aoclk_saradc_gate.hw,
- [CLKID_AO_CTS_OSCIN] = &g12a_aoclk_cts_oscin.hw,
- [CLKID_AO_32K_PRE] = &g12a_aoclk_32k_by_oscin_pre.hw,
- [CLKID_AO_32K_DIV] = &g12a_aoclk_32k_by_oscin_div.hw,
- [CLKID_AO_32K_SEL] = &g12a_aoclk_32k_by_oscin_sel.hw,
- [CLKID_AO_32K] = &g12a_aoclk_32k_by_oscin.hw,
- [CLKID_AO_CEC_PRE] = &g12a_aoclk_cec_pre.hw,
- [CLKID_AO_CEC_DIV] = &g12a_aoclk_cec_div.hw,
- [CLKID_AO_CEC_SEL] = &g12a_aoclk_cec_sel.hw,
- [CLKID_AO_CEC] = &g12a_aoclk_cec.hw,
- [CLKID_AO_CTS_RTC_OSCIN] = &g12a_aoclk_cts_rtc_oscin.hw,
+static struct clk_hw *g12a_ao_hw_clks[] = {
+ [CLKID_AO_AHB] = &g12a_ao_ahb.hw,
+ [CLKID_AO_IR_IN] = &g12a_ao_ir_in.hw,
+ [CLKID_AO_I2C_M0] = &g12a_ao_i2c_m0.hw,
+ [CLKID_AO_I2C_S0] = &g12a_ao_i2c_s0.hw,
+ [CLKID_AO_UART] = &g12a_ao_uart.hw,
+ [CLKID_AO_PROD_I2C] = &g12a_ao_prod_i2c.hw,
+ [CLKID_AO_UART2] = &g12a_ao_uart2.hw,
+ [CLKID_AO_IR_OUT] = &g12a_ao_ir_out.hw,
+ [CLKID_AO_SAR_ADC] = &g12a_ao_saradc.hw,
+ [CLKID_AO_MAILBOX] = &g12a_ao_mailbox.hw,
+ [CLKID_AO_M3] = &g12a_ao_m3.hw,
+ [CLKID_AO_AHB_SRAM] = &g12a_ao_ahb_sram.hw,
+ [CLKID_AO_RTI] = &g12a_ao_rti.hw,
+ [CLKID_AO_M4_FCLK] = &g12a_ao_m4_fclk.hw,
+ [CLKID_AO_M4_HCLK] = &g12a_ao_m4_hclk.hw,
+ [CLKID_AO_CLK81] = &g12a_ao_clk81.hw,
+ [CLKID_AO_SAR_ADC_SEL] = &g12a_ao_saradc_mux.hw,
+ [CLKID_AO_SAR_ADC_DIV] = &g12a_ao_saradc_div.hw,
+ [CLKID_AO_SAR_ADC_CLK] = &g12a_ao_saradc_gate.hw,
+ [CLKID_AO_CTS_OSCIN] = &g12a_ao_cts_oscin.hw,
+ [CLKID_AO_32K_PRE] = &g12a_ao_32k_by_oscin_pre.hw,
+ [CLKID_AO_32K_DIV] = &g12a_ao_32k_by_oscin_div.hw,
+ [CLKID_AO_32K_SEL] = &g12a_ao_32k_by_oscin_sel.hw,
+ [CLKID_AO_32K] = &g12a_ao_32k_by_oscin.hw,
+ [CLKID_AO_CEC_PRE] = &g12a_ao_cec_pre.hw,
+ [CLKID_AO_CEC_DIV] = &g12a_ao_cec_div.hw,
+ [CLKID_AO_CEC_SEL] = &g12a_ao_cec_sel.hw,
+ [CLKID_AO_CEC] = &g12a_ao_cec.hw,
+ [CLKID_AO_CTS_RTC_OSCIN] = &g12a_ao_cts_rtc_oscin.hw,
};
-static const struct meson_aoclk_data g12a_aoclkc_data = {
+static const struct meson_aoclk_data g12a_ao_clkc_data = {
.reset_reg = AO_RTI_GEN_CNTL_REG0,
- .num_reset = ARRAY_SIZE(g12a_aoclk_reset),
- .reset = g12a_aoclk_reset,
- .hw_clks = {
- .hws = g12a_aoclk_hw_clks,
- .num = ARRAY_SIZE(g12a_aoclk_hw_clks),
+ .num_reset = ARRAY_SIZE(g12a_ao_reset),
+ .reset = g12a_ao_reset,
+ .clkc_data = {
+ .hw_clks = {
+ .hws = g12a_ao_hw_clks,
+ .num = ARRAY_SIZE(g12a_ao_hw_clks),
+ },
},
};
-static const struct of_device_id g12a_aoclkc_match_table[] = {
+static const struct of_device_id g12a_ao_clkc_match_table[] = {
{
.compatible = "amlogic,meson-g12a-aoclkc",
- .data = &g12a_aoclkc_data,
+ .data = &g12a_ao_clkc_data.clkc_data,
},
{ }
};
-MODULE_DEVICE_TABLE(of, g12a_aoclkc_match_table);
+MODULE_DEVICE_TABLE(of, g12a_ao_clkc_match_table);
-static struct platform_driver g12a_aoclkc_driver = {
+static struct platform_driver g12a_ao_clkc_driver = {
.probe = meson_aoclkc_probe,
.driver = {
.name = "g12a-aoclkc",
- .of_match_table = g12a_aoclkc_match_table,
+ .of_match_table = g12a_ao_clkc_match_table,
},
};
-module_platform_driver(g12a_aoclkc_driver);
+module_platform_driver(g12a_ao_clkc_driver);
MODULE_DESCRIPTION("Amlogic G12A Always-ON Clock Controller driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c
index 66f0e817e416..185b6348251d 100644
--- a/drivers/clk/meson/g12a.c
+++ b/drivers/clk/meson/g12a.c
@@ -23,7 +23,7 @@
#include "clk-cpu-dyndiv.h"
#include "vid-pll-div.h"
#include "vclk.h"
-#include "meson-eeclk.h"
+#include "meson-clkc-utils.h"
#include <dt-bindings/clock/g12a-clkc.h>
@@ -386,6 +386,451 @@ static struct clk_fixed_factor g12b_sys1_pll_div16 = {
},
};
+static const struct pll_mult_range g12a_gp0_pll_mult_range = {
+ .min = 125,
+ .max = 255,
+};
+
+/*
+ * Internal gp0 pll emulation configuration parameters
+ */
+static const struct reg_sequence g12a_gp0_pll_init_regs[] = {
+ { .reg = HHI_GP0_PLL_CNTL1, .def = 0x00000000 },
+ { .reg = HHI_GP0_PLL_CNTL2, .def = 0x00000000 },
+ { .reg = HHI_GP0_PLL_CNTL3, .def = 0x48681c00 },
+ { .reg = HHI_GP0_PLL_CNTL4, .def = 0x33771290 },
+ { .reg = HHI_GP0_PLL_CNTL5, .def = 0x39272000 },
+ { .reg = HHI_GP0_PLL_CNTL6, .def = 0x56540000 },
+};
+
+static struct clk_regmap g12a_gp0_pll_dco = {
+ .data = &(struct meson_clk_pll_data){
+ .en = {
+ .reg_off = HHI_GP0_PLL_CNTL0,
+ .shift = 28,
+ .width = 1,
+ },
+ .m = {
+ .reg_off = HHI_GP0_PLL_CNTL0,
+ .shift = 0,
+ .width = 8,
+ },
+ .n = {
+ .reg_off = HHI_GP0_PLL_CNTL0,
+ .shift = 10,
+ .width = 5,
+ },
+ .frac = {
+ .reg_off = HHI_GP0_PLL_CNTL1,
+ .shift = 0,
+ .width = 17,
+ },
+ .l = {
+ .reg_off = HHI_GP0_PLL_CNTL0,
+ .shift = 31,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_GP0_PLL_CNTL0,
+ .shift = 29,
+ .width = 1,
+ },
+ .range = &g12a_gp0_pll_mult_range,
+ .init_regs = g12a_gp0_pll_init_regs,
+ .init_count = ARRAY_SIZE(g12a_gp0_pll_init_regs),
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "gp0_pll_dco",
+ .ops = &meson_clk_pll_ops,
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xtal",
+ },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_gp0_pll = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_GP0_PLL_CNTL0,
+ .shift = 16,
+ .width = 3,
+ .flags = (CLK_DIVIDER_POWER_OF_TWO |
+ CLK_DIVIDER_ROUND_CLOSEST),
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "gp0_pll",
+ .ops = &clk_regmap_divider_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_gp0_pll_dco.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap sm1_gp1_pll_dco = {
+ .data = &(struct meson_clk_pll_data){
+ .en = {
+ .reg_off = HHI_GP1_PLL_CNTL0,
+ .shift = 28,
+ .width = 1,
+ },
+ .m = {
+ .reg_off = HHI_GP1_PLL_CNTL0,
+ .shift = 0,
+ .width = 8,
+ },
+ .n = {
+ .reg_off = HHI_GP1_PLL_CNTL0,
+ .shift = 10,
+ .width = 5,
+ },
+ .frac = {
+ .reg_off = HHI_GP1_PLL_CNTL1,
+ .shift = 0,
+ .width = 17,
+ },
+ .l = {
+ .reg_off = HHI_GP1_PLL_CNTL0,
+ .shift = 31,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_GP1_PLL_CNTL0,
+ .shift = 29,
+ .width = 1,
+ },
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "gp1_pll_dco",
+ .ops = &meson_clk_pll_ro_ops,
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xtal",
+ },
+ .num_parents = 1,
+ /* This clock feeds the DSU, avoid disabling it */
+ .flags = CLK_IS_CRITICAL,
+ },
+};
+
+static struct clk_regmap sm1_gp1_pll = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_GP1_PLL_CNTL0,
+ .shift = 16,
+ .width = 3,
+ .flags = (CLK_DIVIDER_POWER_OF_TWO |
+ CLK_DIVIDER_ROUND_CLOSEST),
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "gp1_pll",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &sm1_gp1_pll_dco.hw
+ },
+ .num_parents = 1,
+ },
+};
+
+/*
+ * Internal hifi pll emulation configuration parameters
+ */
+static const struct reg_sequence g12a_hifi_pll_init_regs[] = {
+ { .reg = HHI_HIFI_PLL_CNTL1, .def = 0x00000000 },
+ { .reg = HHI_HIFI_PLL_CNTL2, .def = 0x00000000 },
+ { .reg = HHI_HIFI_PLL_CNTL3, .def = 0x6a285c00 },
+ { .reg = HHI_HIFI_PLL_CNTL4, .def = 0x65771290 },
+ { .reg = HHI_HIFI_PLL_CNTL5, .def = 0x39272000 },
+ { .reg = HHI_HIFI_PLL_CNTL6, .def = 0x56540000 },
+};
+
+static struct clk_regmap g12a_hifi_pll_dco = {
+ .data = &(struct meson_clk_pll_data){
+ .en = {
+ .reg_off = HHI_HIFI_PLL_CNTL0,
+ .shift = 28,
+ .width = 1,
+ },
+ .m = {
+ .reg_off = HHI_HIFI_PLL_CNTL0,
+ .shift = 0,
+ .width = 8,
+ },
+ .n = {
+ .reg_off = HHI_HIFI_PLL_CNTL0,
+ .shift = 10,
+ .width = 5,
+ },
+ .frac = {
+ .reg_off = HHI_HIFI_PLL_CNTL1,
+ .shift = 0,
+ .width = 17,
+ },
+ .l = {
+ .reg_off = HHI_HIFI_PLL_CNTL0,
+ .shift = 31,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_HIFI_PLL_CNTL0,
+ .shift = 29,
+ .width = 1,
+ },
+ .range = &g12a_gp0_pll_mult_range,
+ .init_regs = g12a_hifi_pll_init_regs,
+ .init_count = ARRAY_SIZE(g12a_hifi_pll_init_regs),
+ .flags = CLK_MESON_PLL_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "hifi_pll_dco",
+ .ops = &meson_clk_pll_ops,
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xtal",
+ },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_hifi_pll = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_HIFI_PLL_CNTL0,
+ .shift = 16,
+ .width = 2,
+ .flags = (CLK_DIVIDER_POWER_OF_TWO |
+ CLK_DIVIDER_ROUND_CLOSEST),
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "hifi_pll",
+ .ops = &clk_regmap_divider_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_hifi_pll_dco.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+/*
+ * The Meson G12A PCIE PLL is fined tuned to deliver a very precise
+ * 100MHz reference clock for the PCIe Analog PHY, and thus requires
+ * a strict register sequence to enable the PLL.
+ */
+static const struct reg_sequence g12a_pcie_pll_init_regs[] = {
+ { .reg = HHI_PCIE_PLL_CNTL0, .def = 0x20090496 },
+ { .reg = HHI_PCIE_PLL_CNTL0, .def = 0x30090496 },
+ { .reg = HHI_PCIE_PLL_CNTL1, .def = 0x00000000 },
+ { .reg = HHI_PCIE_PLL_CNTL2, .def = 0x00001100 },
+ { .reg = HHI_PCIE_PLL_CNTL3, .def = 0x10058e00 },
+ { .reg = HHI_PCIE_PLL_CNTL4, .def = 0x000100c0 },
+ { .reg = HHI_PCIE_PLL_CNTL5, .def = 0x68000048 },
+ { .reg = HHI_PCIE_PLL_CNTL5, .def = 0x68000068, .delay_us = 20 },
+ { .reg = HHI_PCIE_PLL_CNTL4, .def = 0x008100c0, .delay_us = 10 },
+ { .reg = HHI_PCIE_PLL_CNTL0, .def = 0x34090496 },
+ { .reg = HHI_PCIE_PLL_CNTL0, .def = 0x14090496, .delay_us = 10 },
+ { .reg = HHI_PCIE_PLL_CNTL2, .def = 0x00001000 },
+};
+
+/* Keep a single entry table for recalc/round_rate() ops */
+static const struct pll_params_table g12a_pcie_pll_table[] = {
+ PLL_PARAMS(150, 1),
+ {0, 0},
+};
+
+static struct clk_regmap g12a_pcie_pll_dco = {
+ .data = &(struct meson_clk_pll_data){
+ .en = {
+ .reg_off = HHI_PCIE_PLL_CNTL0,
+ .shift = 28,
+ .width = 1,
+ },
+ .m = {
+ .reg_off = HHI_PCIE_PLL_CNTL0,
+ .shift = 0,
+ .width = 8,
+ },
+ .n = {
+ .reg_off = HHI_PCIE_PLL_CNTL0,
+ .shift = 10,
+ .width = 5,
+ },
+ .frac = {
+ .reg_off = HHI_PCIE_PLL_CNTL1,
+ .shift = 0,
+ .width = 12,
+ },
+ .l = {
+ .reg_off = HHI_PCIE_PLL_CNTL0,
+ .shift = 31,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_PCIE_PLL_CNTL0,
+ .shift = 29,
+ .width = 1,
+ },
+ .table = g12a_pcie_pll_table,
+ .init_regs = g12a_pcie_pll_init_regs,
+ .init_count = ARRAY_SIZE(g12a_pcie_pll_init_regs),
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "pcie_pll_dco",
+ .ops = &meson_clk_pcie_pll_ops,
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xtal",
+ },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor g12a_pcie_pll_dco_div2 = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "pcie_pll_dco_div2",
+ .ops = &clk_fixed_factor_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_pcie_pll_dco.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_pcie_pll_od = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_PCIE_PLL_CNTL0,
+ .shift = 16,
+ .width = 5,
+ .flags = CLK_DIVIDER_ROUND_CLOSEST |
+ CLK_DIVIDER_ONE_BASED |
+ CLK_DIVIDER_ALLOW_ZERO,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "pcie_pll_od",
+ .ops = &clk_regmap_divider_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_pcie_pll_dco_div2.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_fixed_factor g12a_pcie_pll = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "pcie_pll_pll",
+ .ops = &clk_fixed_factor_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_pcie_pll_od.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_hdmi_pll_dco = {
+ .data = &(struct meson_clk_pll_data){
+ .en = {
+ .reg_off = HHI_HDMI_PLL_CNTL0,
+ .shift = 28,
+ .width = 1,
+ },
+ .m = {
+ .reg_off = HHI_HDMI_PLL_CNTL0,
+ .shift = 0,
+ .width = 8,
+ },
+ .n = {
+ .reg_off = HHI_HDMI_PLL_CNTL0,
+ .shift = 10,
+ .width = 5,
+ },
+ .frac = {
+ .reg_off = HHI_HDMI_PLL_CNTL1,
+ .shift = 0,
+ .width = 16,
+ },
+ .l = {
+ .reg_off = HHI_HDMI_PLL_CNTL0,
+ .shift = 30,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_HDMI_PLL_CNTL0,
+ .shift = 29,
+ .width = 1,
+ },
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "hdmi_pll_dco",
+ .ops = &meson_clk_pll_ro_ops,
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xtal",
+ },
+ .num_parents = 1,
+ /*
+ * Display directly handle hdmi pll registers ATM, we need
+ * NOCACHE to keep our view of the clock as accurate as possible
+ */
+ .flags = CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_regmap g12a_hdmi_pll_od = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_HDMI_PLL_CNTL0,
+ .shift = 16,
+ .width = 2,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "hdmi_pll_od",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_hdmi_pll_dco.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_hdmi_pll_od2 = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_HDMI_PLL_CNTL0,
+ .shift = 18,
+ .width = 2,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "hdmi_pll_od2",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_hdmi_pll_od.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_hdmi_pll = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_HDMI_PLL_CNTL0,
+ .shift = 20,
+ .width = 2,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "hdmi_pll",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_hdmi_pll_od2.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT,
+ },
+};
+
static struct clk_fixed_factor g12a_fclk_div2_div = {
.mult = 1,
.div = 2,
@@ -459,36 +904,166 @@ static struct clk_regmap g12a_fclk_div3 = {
},
};
-/* Datasheet names this field as "premux0" */
-static struct clk_regmap g12a_cpu_clk_premux0 = {
+
+static struct clk_fixed_factor g12a_fclk_div4_div = {
+ .mult = 1,
+ .div = 4,
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div4_div",
+ .ops = &clk_fixed_factor_ops,
+ .parent_hws = (const struct clk_hw *[]) { &g12a_fixed_pll.hw },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_fclk_div4 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_FIX_PLL_CNTL1,
+ .bit_idx = 21,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div4",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_fclk_div4_div.hw
+ },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor g12a_fclk_div5_div = {
+ .mult = 1,
+ .div = 5,
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div5_div",
+ .ops = &clk_fixed_factor_ops,
+ .parent_hws = (const struct clk_hw *[]) { &g12a_fixed_pll.hw },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_fclk_div5 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_FIX_PLL_CNTL1,
+ .bit_idx = 22,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div5",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_fclk_div5_div.hw
+ },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor g12a_fclk_div7_div = {
+ .mult = 1,
+ .div = 7,
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div7_div",
+ .ops = &clk_fixed_factor_ops,
+ .parent_hws = (const struct clk_hw *[]) { &g12a_fixed_pll.hw },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_fclk_div7 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_FIX_PLL_CNTL1,
+ .bit_idx = 23,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div7",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_fclk_div7_div.hw
+ },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor g12a_fclk_div2p5_div = {
+ .mult = 1,
+ .div = 5,
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div2p5_div",
+ .ops = &clk_fixed_factor_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_fixed_pll_dco.hw
+ },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_fclk_div2p5 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_FIX_PLL_CNTL1,
+ .bit_idx = 25,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div2p5",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_fclk_div2p5_div.hw
+ },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor g12a_mpll_50m_div = {
+ .mult = 1,
+ .div = 80,
+ .hw.init = &(struct clk_init_data){
+ .name = "mpll_50m_div",
+ .ops = &clk_fixed_factor_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_fixed_pll_dco.hw
+ },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_mpll_50m = {
.data = &(struct clk_regmap_mux_data){
- .offset = HHI_SYS_CPU_CLK_CNTL0,
- .mask = 0x3,
- .shift = 0,
- .flags = CLK_MUX_ROUND_CLOSEST,
+ .offset = HHI_FIX_PLL_CNTL3,
+ .mask = 0x1,
+ .shift = 5,
},
.hw.init = &(struct clk_init_data){
- .name = "cpu_clk_dyn0_sel",
- .ops = &clk_regmap_mux_ops,
+ .name = "mpll_50m",
+ .ops = &clk_regmap_mux_ro_ops,
.parent_data = (const struct clk_parent_data []) {
{ .fw_name = "xtal", },
- { .hw = &g12a_fclk_div2.hw },
- { .hw = &g12a_fclk_div3.hw },
+ { .hw = &g12a_mpll_50m_div.hw },
},
- .num_parents = 3,
- .flags = CLK_SET_RATE_PARENT,
+ .num_parents = 2,
},
};
-/* Datasheet names this field as "premux1" */
-static struct clk_regmap g12a_cpu_clk_premux1 = {
+static struct clk_fixed_factor g12a_mpll_prediv = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "mpll_prediv",
+ .ops = &clk_fixed_factor_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_fixed_pll_dco.hw
+ },
+ .num_parents = 1,
+ },
+};
+
+/* Datasheet names this field as "premux0" */
+static struct clk_regmap g12a_cpu_clk_dyn0_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPU_CLK_CNTL0,
.mask = 0x3,
- .shift = 16,
+ .shift = 0,
+ .flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
- .name = "cpu_clk_dyn1_sel",
+ .name = "cpu_clk_dyn0_sel",
.ops = &clk_regmap_mux_ops,
.parent_data = (const struct clk_parent_data []) {
{ .fw_name = "xtal", },
@@ -496,13 +1071,12 @@ static struct clk_regmap g12a_cpu_clk_premux1 = {
{ .hw = &g12a_fclk_div3.hw },
},
.num_parents = 3,
- /* This sub-tree is used a parking clock */
- .flags = CLK_SET_RATE_NO_REPARENT
+ .flags = CLK_SET_RATE_PARENT,
},
};
/* Datasheet names this field as "mux0_divn_tcnt" */
-static struct clk_regmap g12a_cpu_clk_mux0_div = {
+static struct clk_regmap g12a_cpu_clk_dyn0_div = {
.data = &(struct meson_clk_cpu_dyndiv_data){
.div = {
.reg_off = HHI_SYS_CPU_CLK_CNTL0,
@@ -519,7 +1093,7 @@ static struct clk_regmap g12a_cpu_clk_mux0_div = {
.name = "cpu_clk_dyn0_div",
.ops = &meson_clk_cpu_dyndiv_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_cpu_clk_premux0.hw
+ &g12a_cpu_clk_dyn0_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -527,7 +1101,7 @@ static struct clk_regmap g12a_cpu_clk_mux0_div = {
};
/* Datasheet names this field as "postmux0" */
-static struct clk_regmap g12a_cpu_clk_postmux0 = {
+static struct clk_regmap g12a_cpu_clk_dyn0 = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPU_CLK_CNTL0,
.mask = 0x1,
@@ -538,16 +1112,37 @@ static struct clk_regmap g12a_cpu_clk_postmux0 = {
.name = "cpu_clk_dyn0",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_cpu_clk_premux0.hw,
- &g12a_cpu_clk_mux0_div.hw,
+ &g12a_cpu_clk_dyn0_sel.hw,
+ &g12a_cpu_clk_dyn0_div.hw,
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
+/* Datasheet names this field as "premux1" */
+static struct clk_regmap g12a_cpu_clk_dyn1_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_SYS_CPU_CLK_CNTL0,
+ .mask = 0x3,
+ .shift = 16,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cpu_clk_dyn1_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_data = (const struct clk_parent_data []) {
+ { .fw_name = "xtal", },
+ { .hw = &g12a_fclk_div2.hw },
+ { .hw = &g12a_fclk_div3.hw },
+ },
+ .num_parents = 3,
+ /* This sub-tree is used a parking clock */
+ .flags = CLK_SET_RATE_NO_REPARENT
+ },
+};
+
/* Datasheet names this field as "Mux1_divn_tcnt" */
-static struct clk_regmap g12a_cpu_clk_mux1_div = {
+static struct clk_regmap g12a_cpu_clk_dyn1_div = {
.data = &(struct clk_regmap_div_data){
.offset = HHI_SYS_CPU_CLK_CNTL0,
.shift = 20,
@@ -557,14 +1152,14 @@ static struct clk_regmap g12a_cpu_clk_mux1_div = {
.name = "cpu_clk_dyn1_div",
.ops = &clk_regmap_divider_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_cpu_clk_premux1.hw
+ &g12a_cpu_clk_dyn1_sel.hw
},
.num_parents = 1,
},
};
/* Datasheet names this field as "postmux1" */
-static struct clk_regmap g12a_cpu_clk_postmux1 = {
+static struct clk_regmap g12a_cpu_clk_dyn1 = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPU_CLK_CNTL0,
.mask = 0x1,
@@ -574,8 +1169,8 @@ static struct clk_regmap g12a_cpu_clk_postmux1 = {
.name = "cpu_clk_dyn1",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_cpu_clk_premux1.hw,
- &g12a_cpu_clk_mux1_div.hw,
+ &g12a_cpu_clk_dyn1_sel.hw,
+ &g12a_cpu_clk_dyn1_div.hw,
},
.num_parents = 2,
/* This sub-tree is used a parking clock */
@@ -595,8 +1190,8 @@ static struct clk_regmap g12a_cpu_clk_dyn = {
.name = "cpu_clk_dyn",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_cpu_clk_postmux0.hw,
- &g12a_cpu_clk_postmux1.hw,
+ &g12a_cpu_clk_dyn0.hw,
+ &g12a_cpu_clk_dyn1.hw,
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
@@ -644,7 +1239,7 @@ static struct clk_regmap g12b_cpu_clk = {
};
/* Datasheet names this field as "premux0" */
-static struct clk_regmap g12b_cpub_clk_premux0 = {
+static struct clk_regmap g12b_cpub_clk_dyn0_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPUB_CLK_CNTL,
.mask = 0x3,
@@ -665,7 +1260,7 @@ static struct clk_regmap g12b_cpub_clk_premux0 = {
};
/* Datasheet names this field as "mux0_divn_tcnt" */
-static struct clk_regmap g12b_cpub_clk_mux0_div = {
+static struct clk_regmap g12b_cpub_clk_dyn0_div = {
.data = &(struct meson_clk_cpu_dyndiv_data){
.div = {
.reg_off = HHI_SYS_CPUB_CLK_CNTL,
@@ -682,7 +1277,7 @@ static struct clk_regmap g12b_cpub_clk_mux0_div = {
.name = "cpub_clk_dyn0_div",
.ops = &meson_clk_cpu_dyndiv_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12b_cpub_clk_premux0.hw
+ &g12b_cpub_clk_dyn0_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -690,7 +1285,7 @@ static struct clk_regmap g12b_cpub_clk_mux0_div = {
};
/* Datasheet names this field as "postmux0" */
-static struct clk_regmap g12b_cpub_clk_postmux0 = {
+static struct clk_regmap g12b_cpub_clk_dyn0 = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPUB_CLK_CNTL,
.mask = 0x1,
@@ -701,8 +1296,8 @@ static struct clk_regmap g12b_cpub_clk_postmux0 = {
.name = "cpub_clk_dyn0",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12b_cpub_clk_premux0.hw,
- &g12b_cpub_clk_mux0_div.hw
+ &g12b_cpub_clk_dyn0_sel.hw,
+ &g12b_cpub_clk_dyn0_div.hw
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
@@ -710,7 +1305,7 @@ static struct clk_regmap g12b_cpub_clk_postmux0 = {
};
/* Datasheet names this field as "premux1" */
-static struct clk_regmap g12b_cpub_clk_premux1 = {
+static struct clk_regmap g12b_cpub_clk_dyn1_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPUB_CLK_CNTL,
.mask = 0x3,
@@ -731,7 +1326,7 @@ static struct clk_regmap g12b_cpub_clk_premux1 = {
};
/* Datasheet names this field as "Mux1_divn_tcnt" */
-static struct clk_regmap g12b_cpub_clk_mux1_div = {
+static struct clk_regmap g12b_cpub_clk_dyn1_div = {
.data = &(struct clk_regmap_div_data){
.offset = HHI_SYS_CPUB_CLK_CNTL,
.shift = 20,
@@ -741,14 +1336,14 @@ static struct clk_regmap g12b_cpub_clk_mux1_div = {
.name = "cpub_clk_dyn1_div",
.ops = &clk_regmap_divider_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12b_cpub_clk_premux1.hw
+ &g12b_cpub_clk_dyn1_sel.hw
},
.num_parents = 1,
},
};
/* Datasheet names this field as "postmux1" */
-static struct clk_regmap g12b_cpub_clk_postmux1 = {
+static struct clk_regmap g12b_cpub_clk_dyn1 = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPUB_CLK_CNTL,
.mask = 0x1,
@@ -758,8 +1353,8 @@ static struct clk_regmap g12b_cpub_clk_postmux1 = {
.name = "cpub_clk_dyn1",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12b_cpub_clk_premux1.hw,
- &g12b_cpub_clk_mux1_div.hw
+ &g12b_cpub_clk_dyn1_sel.hw,
+ &g12b_cpub_clk_dyn1_div.hw
},
.num_parents = 2,
/* This sub-tree is used a parking clock */
@@ -779,8 +1374,8 @@ static struct clk_regmap g12b_cpub_clk_dyn = {
.name = "cpub_clk_dyn",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12b_cpub_clk_postmux0.hw,
- &g12b_cpub_clk_postmux1.hw
+ &g12b_cpub_clk_dyn0.hw,
+ &g12b_cpub_clk_dyn1.hw
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
@@ -807,10 +1402,8 @@ static struct clk_regmap g12b_cpub_clk = {
},
};
-static struct clk_regmap sm1_gp1_pll;
-
/* Datasheet names this field as "premux0" */
-static struct clk_regmap sm1_dsu_clk_premux0 = {
+static struct clk_regmap sm1_dsu_clk_dyn0_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPU_CLK_CNTL5,
.mask = 0x3,
@@ -829,28 +1422,8 @@ static struct clk_regmap sm1_dsu_clk_premux0 = {
},
};
-/* Datasheet names this field as "premux1" */
-static struct clk_regmap sm1_dsu_clk_premux1 = {
- .data = &(struct clk_regmap_mux_data){
- .offset = HHI_SYS_CPU_CLK_CNTL5,
- .mask = 0x3,
- .shift = 16,
- },
- .hw.init = &(struct clk_init_data){
- .name = "dsu_clk_dyn1_sel",
- .ops = &clk_regmap_mux_ro_ops,
- .parent_data = (const struct clk_parent_data []) {
- { .fw_name = "xtal", },
- { .hw = &g12a_fclk_div2.hw },
- { .hw = &g12a_fclk_div3.hw },
- { .hw = &sm1_gp1_pll.hw },
- },
- .num_parents = 4,
- },
-};
-
/* Datasheet names this field as "Mux0_divn_tcnt" */
-static struct clk_regmap sm1_dsu_clk_mux0_div = {
+static struct clk_regmap sm1_dsu_clk_dyn0_div = {
.data = &(struct clk_regmap_div_data){
.offset = HHI_SYS_CPU_CLK_CNTL5,
.shift = 4,
@@ -860,14 +1433,14 @@ static struct clk_regmap sm1_dsu_clk_mux0_div = {
.name = "dsu_clk_dyn0_div",
.ops = &clk_regmap_divider_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &sm1_dsu_clk_premux0.hw
+ &sm1_dsu_clk_dyn0_sel.hw
},
.num_parents = 1,
},
};
/* Datasheet names this field as "postmux0" */
-static struct clk_regmap sm1_dsu_clk_postmux0 = {
+static struct clk_regmap sm1_dsu_clk_dyn0 = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPU_CLK_CNTL5,
.mask = 0x1,
@@ -877,15 +1450,35 @@ static struct clk_regmap sm1_dsu_clk_postmux0 = {
.name = "dsu_clk_dyn0",
.ops = &clk_regmap_mux_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &sm1_dsu_clk_premux0.hw,
- &sm1_dsu_clk_mux0_div.hw,
+ &sm1_dsu_clk_dyn0_sel.hw,
+ &sm1_dsu_clk_dyn0_div.hw,
},
.num_parents = 2,
},
};
+/* Datasheet names this field as "premux1" */
+static struct clk_regmap sm1_dsu_clk_dyn1_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_SYS_CPU_CLK_CNTL5,
+ .mask = 0x3,
+ .shift = 16,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "dsu_clk_dyn1_sel",
+ .ops = &clk_regmap_mux_ro_ops,
+ .parent_data = (const struct clk_parent_data []) {
+ { .fw_name = "xtal", },
+ { .hw = &g12a_fclk_div2.hw },
+ { .hw = &g12a_fclk_div3.hw },
+ { .hw = &sm1_gp1_pll.hw },
+ },
+ .num_parents = 4,
+ },
+};
+
/* Datasheet names this field as "Mux1_divn_tcnt" */
-static struct clk_regmap sm1_dsu_clk_mux1_div = {
+static struct clk_regmap sm1_dsu_clk_dyn1_div = {
.data = &(struct clk_regmap_div_data){
.offset = HHI_SYS_CPU_CLK_CNTL5,
.shift = 20,
@@ -895,14 +1488,14 @@ static struct clk_regmap sm1_dsu_clk_mux1_div = {
.name = "dsu_clk_dyn1_div",
.ops = &clk_regmap_divider_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &sm1_dsu_clk_premux1.hw
+ &sm1_dsu_clk_dyn1_sel.hw
},
.num_parents = 1,
},
};
/* Datasheet names this field as "postmux1" */
-static struct clk_regmap sm1_dsu_clk_postmux1 = {
+static struct clk_regmap sm1_dsu_clk_dyn1 = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPU_CLK_CNTL5,
.mask = 0x1,
@@ -912,8 +1505,8 @@ static struct clk_regmap sm1_dsu_clk_postmux1 = {
.name = "dsu_clk_dyn1",
.ops = &clk_regmap_mux_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &sm1_dsu_clk_premux1.hw,
- &sm1_dsu_clk_mux1_div.hw,
+ &sm1_dsu_clk_dyn1_sel.hw,
+ &sm1_dsu_clk_dyn1_div.hw,
},
.num_parents = 2,
},
@@ -930,8 +1523,8 @@ static struct clk_regmap sm1_dsu_clk_dyn = {
.name = "dsu_clk_dyn",
.ops = &clk_regmap_mux_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &sm1_dsu_clk_postmux0.hw,
- &sm1_dsu_clk_postmux1.hw,
+ &sm1_dsu_clk_dyn0.hw,
+ &sm1_dsu_clk_dyn1.hw,
},
.num_parents = 2,
},
@@ -1043,7 +1636,7 @@ static struct notifier_block g12a_cpu_clk_mux_nb = {
.notifier_call = g12a_cpu_clk_mux_notifier_cb,
};
-struct g12a_cpu_clk_postmux_nb_data {
+struct g12a_cpu_clk_dyn_nb_data {
struct notifier_block nb;
struct clk_hw *xtal;
struct clk_hw *cpu_clk_dyn;
@@ -1052,33 +1645,33 @@ struct g12a_cpu_clk_postmux_nb_data {
struct clk_hw *cpu_clk_premux1;
};
-static int g12a_cpu_clk_postmux_notifier_cb(struct notifier_block *nb,
- unsigned long event, void *data)
+static int g12a_cpu_clk_dyn_notifier_cb(struct notifier_block *nb,
+ unsigned long event, void *data)
{
- struct g12a_cpu_clk_postmux_nb_data *nb_data =
- container_of(nb, struct g12a_cpu_clk_postmux_nb_data, nb);
+ struct g12a_cpu_clk_dyn_nb_data *nb_data =
+ container_of(nb, struct g12a_cpu_clk_dyn_nb_data, nb);
switch (event) {
case PRE_RATE_CHANGE:
/*
- * This notifier means cpu_clk_postmux0 clock will be changed
+ * This notifier means cpu_clk_dyn0 clock will be changed
* to feed cpu_clk, this is the current path :
* cpu_clk
* \- cpu_clk_dyn
- * \- cpu_clk_postmux0
- * \- cpu_clk_muxX_div
- * \- cpu_clk_premux0
+ * \- cpu_clk_dyn0
+ * \- cpu_clk_dyn0_div
+ * \- cpu_clk_dyn0_sel
* \- fclk_div3 or fclk_div2
* OR
- * \- cpu_clk_premux0
+ * \- cpu_clk_dyn0_sel
* \- fclk_div3 or fclk_div2
*/
- /* Setup cpu_clk_premux1 to xtal */
+ /* Setup cpu_clk_dyn1_sel to xtal */
clk_hw_set_parent(nb_data->cpu_clk_premux1,
nb_data->xtal);
- /* Setup cpu_clk_postmux1 to bypass divider */
+ /* Setup cpu_clk_dyn1 to bypass divider */
clk_hw_set_parent(nb_data->cpu_clk_postmux1,
nb_data->cpu_clk_premux1);
@@ -1090,8 +1683,8 @@ static int g12a_cpu_clk_postmux_notifier_cb(struct notifier_block *nb,
* Now, cpu_clk is 24MHz in the current path :
* cpu_clk
* \- cpu_clk_dyn
- * \- cpu_clk_postmux1
- * \- cpu_clk_premux1
+ * \- cpu_clk_dyn1
+ * \- cpu_clk_dyn1_sel
* \- xtal
*/
@@ -1101,8 +1694,8 @@ static int g12a_cpu_clk_postmux_notifier_cb(struct notifier_block *nb,
case POST_RATE_CHANGE:
/*
- * The cpu_clk_postmux0 has ben updated, now switch back
- * cpu_clk_dyn to cpu_clk_postmux0 and take the changes
+ * The cpu_clk_dyn0 has ben updated, now switch back
+ * cpu_clk_dyn to cpu_clk_dyn0 and take the changes
* in account.
*/
@@ -1114,12 +1707,12 @@ static int g12a_cpu_clk_postmux_notifier_cb(struct notifier_block *nb,
* new path :
* cpu_clk
* \- cpu_clk_dyn
- * \- cpu_clk_postmux0
- * \- cpu_clk_muxX_div
- * \- cpu_clk_premux0
+ * \- cpu_clk_dyn0
+ * \- cpu_clk_dyn0_div
+ * \- cpu_clk_dyn0_sel
* \- fclk_div3 or fclk_div2
* OR
- * \- cpu_clk_premux0
+ * \- cpu_clk_dyn0_sel
* \- fclk_div3 or fclk_div2
*/
@@ -1132,20 +1725,20 @@ static int g12a_cpu_clk_postmux_notifier_cb(struct notifier_block *nb,
}
}
-static struct g12a_cpu_clk_postmux_nb_data g12a_cpu_clk_postmux0_nb_data = {
+static struct g12a_cpu_clk_dyn_nb_data g12a_cpu_clk_dyn0_nb_data = {
.cpu_clk_dyn = &g12a_cpu_clk_dyn.hw,
- .cpu_clk_postmux0 = &g12a_cpu_clk_postmux0.hw,
- .cpu_clk_postmux1 = &g12a_cpu_clk_postmux1.hw,
- .cpu_clk_premux1 = &g12a_cpu_clk_premux1.hw,
- .nb.notifier_call = g12a_cpu_clk_postmux_notifier_cb,
+ .cpu_clk_postmux0 = &g12a_cpu_clk_dyn0.hw,
+ .cpu_clk_postmux1 = &g12a_cpu_clk_dyn1.hw,
+ .cpu_clk_premux1 = &g12a_cpu_clk_dyn1_sel.hw,
+ .nb.notifier_call = g12a_cpu_clk_dyn_notifier_cb,
};
-static struct g12a_cpu_clk_postmux_nb_data g12b_cpub_clk_postmux0_nb_data = {
+static struct g12a_cpu_clk_dyn_nb_data g12b_cpub_clk_dyn0_nb_data = {
.cpu_clk_dyn = &g12b_cpub_clk_dyn.hw,
- .cpu_clk_postmux0 = &g12b_cpub_clk_postmux0.hw,
- .cpu_clk_postmux1 = &g12b_cpub_clk_postmux1.hw,
- .cpu_clk_premux1 = &g12b_cpub_clk_premux1.hw,
- .nb.notifier_call = g12a_cpu_clk_postmux_notifier_cb,
+ .cpu_clk_postmux0 = &g12b_cpub_clk_dyn0.hw,
+ .cpu_clk_postmux1 = &g12b_cpub_clk_dyn1.hw,
+ .cpu_clk_premux1 = &g12b_cpub_clk_dyn1_sel.hw,
+ .nb.notifier_call = g12a_cpu_clk_dyn_notifier_cb,
};
struct g12a_sys_pll_nb_data {
@@ -1551,27 +2144,29 @@ static struct clk_fixed_factor g12b_cpub_clk_div8 = {
},
};
-static u32 mux_table_cpub[] = { 1, 2, 3, 4, 5, 6, 7 };
+static u32 g12b_cpub_clk_if_parents_val_table[] = { 1, 2, 3, 4, 5, 6, 7 };
+static const struct clk_hw *g12b_cpub_clk_if_parents[] = {
+ &g12b_cpub_clk_div2.hw,
+ &g12b_cpub_clk_div3.hw,
+ &g12b_cpub_clk_div4.hw,
+ &g12b_cpub_clk_div5.hw,
+ &g12b_cpub_clk_div6.hw,
+ &g12b_cpub_clk_div7.hw,
+ &g12b_cpub_clk_div8.hw,
+};
+
static struct clk_regmap g12b_cpub_clk_apb_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPUB_CLK_CNTL1,
.mask = 7,
.shift = 3,
- .table = mux_table_cpub,
+ .table = g12b_cpub_clk_if_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cpub_clk_apb_sel",
.ops = &clk_regmap_mux_ro_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12b_cpub_clk_div2.hw,
- &g12b_cpub_clk_div3.hw,
- &g12b_cpub_clk_div4.hw,
- &g12b_cpub_clk_div5.hw,
- &g12b_cpub_clk_div6.hw,
- &g12b_cpub_clk_div7.hw,
- &g12b_cpub_clk_div8.hw
- },
- .num_parents = 7,
+ .parent_hws = g12b_cpub_clk_if_parents,
+ .num_parents = ARRAY_SIZE(g12b_cpub_clk_if_parents),
},
};
@@ -1600,21 +2195,13 @@ static struct clk_regmap g12b_cpub_clk_atb_sel = {
.offset = HHI_SYS_CPUB_CLK_CNTL1,
.mask = 7,
.shift = 6,
- .table = mux_table_cpub,
+ .table = g12b_cpub_clk_if_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cpub_clk_atb_sel",
.ops = &clk_regmap_mux_ro_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12b_cpub_clk_div2.hw,
- &g12b_cpub_clk_div3.hw,
- &g12b_cpub_clk_div4.hw,
- &g12b_cpub_clk_div5.hw,
- &g12b_cpub_clk_div6.hw,
- &g12b_cpub_clk_div7.hw,
- &g12b_cpub_clk_div8.hw
- },
- .num_parents = 7,
+ .parent_hws = g12b_cpub_clk_if_parents,
+ .num_parents = ARRAY_SIZE(g12b_cpub_clk_if_parents),
},
};
@@ -1643,21 +2230,13 @@ static struct clk_regmap g12b_cpub_clk_axi_sel = {
.offset = HHI_SYS_CPUB_CLK_CNTL1,
.mask = 7,
.shift = 9,
- .table = mux_table_cpub,
+ .table = g12b_cpub_clk_if_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cpub_clk_axi_sel",
.ops = &clk_regmap_mux_ro_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12b_cpub_clk_div2.hw,
- &g12b_cpub_clk_div3.hw,
- &g12b_cpub_clk_div4.hw,
- &g12b_cpub_clk_div5.hw,
- &g12b_cpub_clk_div6.hw,
- &g12b_cpub_clk_div7.hw,
- &g12b_cpub_clk_div8.hw
- },
- .num_parents = 7,
+ .parent_hws = g12b_cpub_clk_if_parents,
+ .num_parents = ARRAY_SIZE(g12b_cpub_clk_if_parents),
},
};
@@ -1686,21 +2265,13 @@ static struct clk_regmap g12b_cpub_clk_trace_sel = {
.offset = HHI_SYS_CPUB_CLK_CNTL1,
.mask = 7,
.shift = 20,
- .table = mux_table_cpub,
+ .table = g12b_cpub_clk_if_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cpub_clk_trace_sel",
.ops = &clk_regmap_mux_ro_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12b_cpub_clk_div2.hw,
- &g12b_cpub_clk_div3.hw,
- &g12b_cpub_clk_div4.hw,
- &g12b_cpub_clk_div5.hw,
- &g12b_cpub_clk_div6.hw,
- &g12b_cpub_clk_div7.hw,
- &g12b_cpub_clk_div8.hw
- },
- .num_parents = 7,
+ .parent_hws = g12b_cpub_clk_if_parents,
+ .num_parents = ARRAY_SIZE(g12b_cpub_clk_if_parents),
},
};
@@ -1724,600 +2295,6 @@ static struct clk_regmap g12b_cpub_clk_trace = {
},
};
-static const struct pll_mult_range g12a_gp0_pll_mult_range = {
- .min = 125,
- .max = 255,
-};
-
-/*
- * Internal gp0 pll emulation configuration parameters
- */
-static const struct reg_sequence g12a_gp0_init_regs[] = {
- { .reg = HHI_GP0_PLL_CNTL1, .def = 0x00000000 },
- { .reg = HHI_GP0_PLL_CNTL2, .def = 0x00000000 },
- { .reg = HHI_GP0_PLL_CNTL3, .def = 0x48681c00 },
- { .reg = HHI_GP0_PLL_CNTL4, .def = 0x33771290 },
- { .reg = HHI_GP0_PLL_CNTL5, .def = 0x39272000 },
- { .reg = HHI_GP0_PLL_CNTL6, .def = 0x56540000 },
-};
-
-static struct clk_regmap g12a_gp0_pll_dco = {
- .data = &(struct meson_clk_pll_data){
- .en = {
- .reg_off = HHI_GP0_PLL_CNTL0,
- .shift = 28,
- .width = 1,
- },
- .m = {
- .reg_off = HHI_GP0_PLL_CNTL0,
- .shift = 0,
- .width = 8,
- },
- .n = {
- .reg_off = HHI_GP0_PLL_CNTL0,
- .shift = 10,
- .width = 5,
- },
- .frac = {
- .reg_off = HHI_GP0_PLL_CNTL1,
- .shift = 0,
- .width = 17,
- },
- .l = {
- .reg_off = HHI_GP0_PLL_CNTL0,
- .shift = 31,
- .width = 1,
- },
- .rst = {
- .reg_off = HHI_GP0_PLL_CNTL0,
- .shift = 29,
- .width = 1,
- },
- .range = &g12a_gp0_pll_mult_range,
- .init_regs = g12a_gp0_init_regs,
- .init_count = ARRAY_SIZE(g12a_gp0_init_regs),
- },
- .hw.init = &(struct clk_init_data){
- .name = "gp0_pll_dco",
- .ops = &meson_clk_pll_ops,
- .parent_data = &(const struct clk_parent_data) {
- .fw_name = "xtal",
- },
- .num_parents = 1,
- },
-};
-
-static struct clk_regmap g12a_gp0_pll = {
- .data = &(struct clk_regmap_div_data){
- .offset = HHI_GP0_PLL_CNTL0,
- .shift = 16,
- .width = 3,
- .flags = (CLK_DIVIDER_POWER_OF_TWO |
- CLK_DIVIDER_ROUND_CLOSEST),
- },
- .hw.init = &(struct clk_init_data){
- .name = "gp0_pll",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12a_gp0_pll_dco.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap sm1_gp1_pll_dco = {
- .data = &(struct meson_clk_pll_data){
- .en = {
- .reg_off = HHI_GP1_PLL_CNTL0,
- .shift = 28,
- .width = 1,
- },
- .m = {
- .reg_off = HHI_GP1_PLL_CNTL0,
- .shift = 0,
- .width = 8,
- },
- .n = {
- .reg_off = HHI_GP1_PLL_CNTL0,
- .shift = 10,
- .width = 5,
- },
- .frac = {
- .reg_off = HHI_GP1_PLL_CNTL1,
- .shift = 0,
- .width = 17,
- },
- .l = {
- .reg_off = HHI_GP1_PLL_CNTL0,
- .shift = 31,
- .width = 1,
- },
- .rst = {
- .reg_off = HHI_GP1_PLL_CNTL0,
- .shift = 29,
- .width = 1,
- },
- },
- .hw.init = &(struct clk_init_data){
- .name = "gp1_pll_dco",
- .ops = &meson_clk_pll_ro_ops,
- .parent_data = &(const struct clk_parent_data) {
- .fw_name = "xtal",
- },
- .num_parents = 1,
- /* This clock feeds the DSU, avoid disabling it */
- .flags = CLK_IS_CRITICAL,
- },
-};
-
-static struct clk_regmap sm1_gp1_pll = {
- .data = &(struct clk_regmap_div_data){
- .offset = HHI_GP1_PLL_CNTL0,
- .shift = 16,
- .width = 3,
- .flags = (CLK_DIVIDER_POWER_OF_TWO |
- CLK_DIVIDER_ROUND_CLOSEST),
- },
- .hw.init = &(struct clk_init_data){
- .name = "gp1_pll",
- .ops = &clk_regmap_divider_ro_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &sm1_gp1_pll_dco.hw
- },
- .num_parents = 1,
- },
-};
-
-/*
- * Internal hifi pll emulation configuration parameters
- */
-static const struct reg_sequence g12a_hifi_init_regs[] = {
- { .reg = HHI_HIFI_PLL_CNTL1, .def = 0x00000000 },
- { .reg = HHI_HIFI_PLL_CNTL2, .def = 0x00000000 },
- { .reg = HHI_HIFI_PLL_CNTL3, .def = 0x6a285c00 },
- { .reg = HHI_HIFI_PLL_CNTL4, .def = 0x65771290 },
- { .reg = HHI_HIFI_PLL_CNTL5, .def = 0x39272000 },
- { .reg = HHI_HIFI_PLL_CNTL6, .def = 0x56540000 },
-};
-
-static struct clk_regmap g12a_hifi_pll_dco = {
- .data = &(struct meson_clk_pll_data){
- .en = {
- .reg_off = HHI_HIFI_PLL_CNTL0,
- .shift = 28,
- .width = 1,
- },
- .m = {
- .reg_off = HHI_HIFI_PLL_CNTL0,
- .shift = 0,
- .width = 8,
- },
- .n = {
- .reg_off = HHI_HIFI_PLL_CNTL0,
- .shift = 10,
- .width = 5,
- },
- .frac = {
- .reg_off = HHI_HIFI_PLL_CNTL1,
- .shift = 0,
- .width = 17,
- },
- .l = {
- .reg_off = HHI_HIFI_PLL_CNTL0,
- .shift = 31,
- .width = 1,
- },
- .rst = {
- .reg_off = HHI_HIFI_PLL_CNTL0,
- .shift = 29,
- .width = 1,
- },
- .range = &g12a_gp0_pll_mult_range,
- .init_regs = g12a_hifi_init_regs,
- .init_count = ARRAY_SIZE(g12a_hifi_init_regs),
- .flags = CLK_MESON_PLL_ROUND_CLOSEST,
- },
- .hw.init = &(struct clk_init_data){
- .name = "hifi_pll_dco",
- .ops = &meson_clk_pll_ops,
- .parent_data = &(const struct clk_parent_data) {
- .fw_name = "xtal",
- },
- .num_parents = 1,
- },
-};
-
-static struct clk_regmap g12a_hifi_pll = {
- .data = &(struct clk_regmap_div_data){
- .offset = HHI_HIFI_PLL_CNTL0,
- .shift = 16,
- .width = 2,
- .flags = (CLK_DIVIDER_POWER_OF_TWO |
- CLK_DIVIDER_ROUND_CLOSEST),
- },
- .hw.init = &(struct clk_init_data){
- .name = "hifi_pll",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12a_hifi_pll_dco.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-/*
- * The Meson G12A PCIE PLL is fined tuned to deliver a very precise
- * 100MHz reference clock for the PCIe Analog PHY, and thus requires
- * a strict register sequence to enable the PLL.
- */
-static const struct reg_sequence g12a_pcie_pll_init_regs[] = {
- { .reg = HHI_PCIE_PLL_CNTL0, .def = 0x20090496 },
- { .reg = HHI_PCIE_PLL_CNTL0, .def = 0x30090496 },
- { .reg = HHI_PCIE_PLL_CNTL1, .def = 0x00000000 },
- { .reg = HHI_PCIE_PLL_CNTL2, .def = 0x00001100 },
- { .reg = HHI_PCIE_PLL_CNTL3, .def = 0x10058e00 },
- { .reg = HHI_PCIE_PLL_CNTL4, .def = 0x000100c0 },
- { .reg = HHI_PCIE_PLL_CNTL5, .def = 0x68000048 },
- { .reg = HHI_PCIE_PLL_CNTL5, .def = 0x68000068, .delay_us = 20 },
- { .reg = HHI_PCIE_PLL_CNTL4, .def = 0x008100c0, .delay_us = 10 },
- { .reg = HHI_PCIE_PLL_CNTL0, .def = 0x34090496 },
- { .reg = HHI_PCIE_PLL_CNTL0, .def = 0x14090496, .delay_us = 10 },
- { .reg = HHI_PCIE_PLL_CNTL2, .def = 0x00001000 },
-};
-
-/* Keep a single entry table for recalc/round_rate() ops */
-static const struct pll_params_table g12a_pcie_pll_table[] = {
- PLL_PARAMS(150, 1),
- {0, 0},
-};
-
-static struct clk_regmap g12a_pcie_pll_dco = {
- .data = &(struct meson_clk_pll_data){
- .en = {
- .reg_off = HHI_PCIE_PLL_CNTL0,
- .shift = 28,
- .width = 1,
- },
- .m = {
- .reg_off = HHI_PCIE_PLL_CNTL0,
- .shift = 0,
- .width = 8,
- },
- .n = {
- .reg_off = HHI_PCIE_PLL_CNTL0,
- .shift = 10,
- .width = 5,
- },
- .frac = {
- .reg_off = HHI_PCIE_PLL_CNTL1,
- .shift = 0,
- .width = 12,
- },
- .l = {
- .reg_off = HHI_PCIE_PLL_CNTL0,
- .shift = 31,
- .width = 1,
- },
- .rst = {
- .reg_off = HHI_PCIE_PLL_CNTL0,
- .shift = 29,
- .width = 1,
- },
- .table = g12a_pcie_pll_table,
- .init_regs = g12a_pcie_pll_init_regs,
- .init_count = ARRAY_SIZE(g12a_pcie_pll_init_regs),
- },
- .hw.init = &(struct clk_init_data){
- .name = "pcie_pll_dco",
- .ops = &meson_clk_pcie_pll_ops,
- .parent_data = &(const struct clk_parent_data) {
- .fw_name = "xtal",
- },
- .num_parents = 1,
- },
-};
-
-static struct clk_fixed_factor g12a_pcie_pll_dco_div2 = {
- .mult = 1,
- .div = 2,
- .hw.init = &(struct clk_init_data){
- .name = "pcie_pll_dco_div2",
- .ops = &clk_fixed_factor_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12a_pcie_pll_dco.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap g12a_pcie_pll_od = {
- .data = &(struct clk_regmap_div_data){
- .offset = HHI_PCIE_PLL_CNTL0,
- .shift = 16,
- .width = 5,
- .flags = CLK_DIVIDER_ROUND_CLOSEST |
- CLK_DIVIDER_ONE_BASED |
- CLK_DIVIDER_ALLOW_ZERO,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pcie_pll_od",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12a_pcie_pll_dco_div2.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_fixed_factor g12a_pcie_pll = {
- .mult = 1,
- .div = 2,
- .hw.init = &(struct clk_init_data){
- .name = "pcie_pll_pll",
- .ops = &clk_fixed_factor_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12a_pcie_pll_od.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap g12a_hdmi_pll_dco = {
- .data = &(struct meson_clk_pll_data){
- .en = {
- .reg_off = HHI_HDMI_PLL_CNTL0,
- .shift = 28,
- .width = 1,
- },
- .m = {
- .reg_off = HHI_HDMI_PLL_CNTL0,
- .shift = 0,
- .width = 8,
- },
- .n = {
- .reg_off = HHI_HDMI_PLL_CNTL0,
- .shift = 10,
- .width = 5,
- },
- .frac = {
- .reg_off = HHI_HDMI_PLL_CNTL1,
- .shift = 0,
- .width = 16,
- },
- .l = {
- .reg_off = HHI_HDMI_PLL_CNTL0,
- .shift = 30,
- .width = 1,
- },
- .rst = {
- .reg_off = HHI_HDMI_PLL_CNTL0,
- .shift = 29,
- .width = 1,
- },
- },
- .hw.init = &(struct clk_init_data){
- .name = "hdmi_pll_dco",
- .ops = &meson_clk_pll_ro_ops,
- .parent_data = &(const struct clk_parent_data) {
- .fw_name = "xtal",
- },
- .num_parents = 1,
- /*
- * Display directly handle hdmi pll registers ATM, we need
- * NOCACHE to keep our view of the clock as accurate as possible
- */
- .flags = CLK_GET_RATE_NOCACHE,
- },
-};
-
-static struct clk_regmap g12a_hdmi_pll_od = {
- .data = &(struct clk_regmap_div_data){
- .offset = HHI_HDMI_PLL_CNTL0,
- .shift = 16,
- .width = 2,
- .flags = CLK_DIVIDER_POWER_OF_TWO,
- },
- .hw.init = &(struct clk_init_data){
- .name = "hdmi_pll_od",
- .ops = &clk_regmap_divider_ro_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12a_hdmi_pll_dco.hw
- },
- .num_parents = 1,
- .flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap g12a_hdmi_pll_od2 = {
- .data = &(struct clk_regmap_div_data){
- .offset = HHI_HDMI_PLL_CNTL0,
- .shift = 18,
- .width = 2,
- .flags = CLK_DIVIDER_POWER_OF_TWO,
- },
- .hw.init = &(struct clk_init_data){
- .name = "hdmi_pll_od2",
- .ops = &clk_regmap_divider_ro_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12a_hdmi_pll_od.hw
- },
- .num_parents = 1,
- .flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap g12a_hdmi_pll = {
- .data = &(struct clk_regmap_div_data){
- .offset = HHI_HDMI_PLL_CNTL0,
- .shift = 20,
- .width = 2,
- .flags = CLK_DIVIDER_POWER_OF_TWO,
- },
- .hw.init = &(struct clk_init_data){
- .name = "hdmi_pll",
- .ops = &clk_regmap_divider_ro_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12a_hdmi_pll_od2.hw
- },
- .num_parents = 1,
- .flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_fixed_factor g12a_fclk_div4_div = {
- .mult = 1,
- .div = 4,
- .hw.init = &(struct clk_init_data){
- .name = "fclk_div4_div",
- .ops = &clk_fixed_factor_ops,
- .parent_hws = (const struct clk_hw *[]) { &g12a_fixed_pll.hw },
- .num_parents = 1,
- },
-};
-
-static struct clk_regmap g12a_fclk_div4 = {
- .data = &(struct clk_regmap_gate_data){
- .offset = HHI_FIX_PLL_CNTL1,
- .bit_idx = 21,
- },
- .hw.init = &(struct clk_init_data){
- .name = "fclk_div4",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12a_fclk_div4_div.hw
- },
- .num_parents = 1,
- },
-};
-
-static struct clk_fixed_factor g12a_fclk_div5_div = {
- .mult = 1,
- .div = 5,
- .hw.init = &(struct clk_init_data){
- .name = "fclk_div5_div",
- .ops = &clk_fixed_factor_ops,
- .parent_hws = (const struct clk_hw *[]) { &g12a_fixed_pll.hw },
- .num_parents = 1,
- },
-};
-
-static struct clk_regmap g12a_fclk_div5 = {
- .data = &(struct clk_regmap_gate_data){
- .offset = HHI_FIX_PLL_CNTL1,
- .bit_idx = 22,
- },
- .hw.init = &(struct clk_init_data){
- .name = "fclk_div5",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12a_fclk_div5_div.hw
- },
- .num_parents = 1,
- },
-};
-
-static struct clk_fixed_factor g12a_fclk_div7_div = {
- .mult = 1,
- .div = 7,
- .hw.init = &(struct clk_init_data){
- .name = "fclk_div7_div",
- .ops = &clk_fixed_factor_ops,
- .parent_hws = (const struct clk_hw *[]) { &g12a_fixed_pll.hw },
- .num_parents = 1,
- },
-};
-
-static struct clk_regmap g12a_fclk_div7 = {
- .data = &(struct clk_regmap_gate_data){
- .offset = HHI_FIX_PLL_CNTL1,
- .bit_idx = 23,
- },
- .hw.init = &(struct clk_init_data){
- .name = "fclk_div7",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12a_fclk_div7_div.hw
- },
- .num_parents = 1,
- },
-};
-
-static struct clk_fixed_factor g12a_fclk_div2p5_div = {
- .mult = 1,
- .div = 5,
- .hw.init = &(struct clk_init_data){
- .name = "fclk_div2p5_div",
- .ops = &clk_fixed_factor_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12a_fixed_pll_dco.hw
- },
- .num_parents = 1,
- },
-};
-
-static struct clk_regmap g12a_fclk_div2p5 = {
- .data = &(struct clk_regmap_gate_data){
- .offset = HHI_FIX_PLL_CNTL1,
- .bit_idx = 25,
- },
- .hw.init = &(struct clk_init_data){
- .name = "fclk_div2p5",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12a_fclk_div2p5_div.hw
- },
- .num_parents = 1,
- },
-};
-
-static struct clk_fixed_factor g12a_mpll_50m_div = {
- .mult = 1,
- .div = 80,
- .hw.init = &(struct clk_init_data){
- .name = "mpll_50m_div",
- .ops = &clk_fixed_factor_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12a_fixed_pll_dco.hw
- },
- .num_parents = 1,
- },
-};
-
-static struct clk_regmap g12a_mpll_50m = {
- .data = &(struct clk_regmap_mux_data){
- .offset = HHI_FIX_PLL_CNTL3,
- .mask = 0x1,
- .shift = 5,
- },
- .hw.init = &(struct clk_init_data){
- .name = "mpll_50m",
- .ops = &clk_regmap_mux_ro_ops,
- .parent_data = (const struct clk_parent_data []) {
- { .fw_name = "xtal", },
- { .hw = &g12a_mpll_50m_div.hw },
- },
- .num_parents = 2,
- },
-};
-
-static struct clk_fixed_factor g12a_mpll_prediv = {
- .mult = 1,
- .div = 2,
- .hw.init = &(struct clk_init_data){
- .name = "mpll_prediv",
- .ops = &clk_fixed_factor_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12a_fixed_pll_dco.hw
- },
- .num_parents = 1,
- },
-};
-
static const struct reg_sequence g12a_mpll0_init_regs[] = {
{ .reg = HHI_MPLL_CNTL2, .def = 0x40000033 },
};
@@ -2530,8 +2507,9 @@ static struct clk_regmap g12a_mpll3 = {
},
};
-static u32 mux_table_clk81[] = { 0, 2, 3, 4, 5, 6, 7 };
-static const struct clk_parent_data clk81_parent_data[] = {
+/* clk81 is often referred as "mpeg_clk" */
+static u32 g12a_clk81_parents_val_table[] = { 0, 2, 3, 4, 5, 6, 7 };
+static const struct clk_parent_data g12a_clk81_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &g12a_fclk_div7.hw },
{ .hw = &g12a_mpll1.hw },
@@ -2541,32 +2519,32 @@ static const struct clk_parent_data clk81_parent_data[] = {
{ .hw = &g12a_fclk_div5.hw },
};
-static struct clk_regmap g12a_mpeg_clk_sel = {
+static struct clk_regmap g12a_clk81_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_MPEG_CLK_CNTL,
.mask = 0x7,
.shift = 12,
- .table = mux_table_clk81,
+ .table = g12a_clk81_parents_val_table,
},
.hw.init = &(struct clk_init_data){
- .name = "mpeg_clk_sel",
+ .name = "clk81_sel",
.ops = &clk_regmap_mux_ro_ops,
- .parent_data = clk81_parent_data,
- .num_parents = ARRAY_SIZE(clk81_parent_data),
+ .parent_data = g12a_clk81_parents,
+ .num_parents = ARRAY_SIZE(g12a_clk81_parents),
},
};
-static struct clk_regmap g12a_mpeg_clk_div = {
+static struct clk_regmap g12a_clk81_div = {
.data = &(struct clk_regmap_div_data){
.offset = HHI_MPEG_CLK_CNTL,
.shift = 0,
.width = 7,
},
.hw.init = &(struct clk_init_data){
- .name = "mpeg_clk_div",
+ .name = "clk81_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_mpeg_clk_sel.hw
+ &g12a_clk81_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2582,14 +2560,14 @@ static struct clk_regmap g12a_clk81 = {
.name = "clk81",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_mpeg_clk_div.hw
+ &g12a_clk81_div.hw
},
.num_parents = 1,
.flags = (CLK_SET_RATE_PARENT | CLK_IS_CRITICAL),
},
};
-static const struct clk_parent_data g12a_sd_emmc_clk0_parent_data[] = {
+static const struct clk_parent_data g12a_sd_emmc_clk0_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &g12a_fclk_div2.hw },
{ .hw = &g12a_fclk_div3.hw },
@@ -2613,8 +2591,8 @@ static struct clk_regmap g12a_sd_emmc_a_clk0_sel = {
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_a_clk0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = g12a_sd_emmc_clk0_parent_data,
- .num_parents = ARRAY_SIZE(g12a_sd_emmc_clk0_parent_data),
+ .parent_data = g12a_sd_emmc_clk0_parents,
+ .num_parents = ARRAY_SIZE(g12a_sd_emmc_clk0_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2662,8 +2640,8 @@ static struct clk_regmap g12a_sd_emmc_b_clk0_sel = {
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_b_clk0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = g12a_sd_emmc_clk0_parent_data,
- .num_parents = ARRAY_SIZE(g12a_sd_emmc_clk0_parent_data),
+ .parent_data = g12a_sd_emmc_clk0_parents,
+ .num_parents = ARRAY_SIZE(g12a_sd_emmc_clk0_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2711,8 +2689,8 @@ static struct clk_regmap g12a_sd_emmc_c_clk0_sel = {
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_c_clk0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = g12a_sd_emmc_clk0_parent_data,
- .num_parents = ARRAY_SIZE(g12a_sd_emmc_clk0_parent_data),
+ .parent_data = g12a_sd_emmc_clk0_parents,
+ .num_parents = ARRAY_SIZE(g12a_sd_emmc_clk0_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2774,7 +2752,7 @@ static struct clk_regmap g12a_vid_pll_div = {
},
};
-static const struct clk_hw *g12a_vid_pll_parent_hws[] = {
+static const struct clk_hw *g12a_vid_pll_parents[] = {
&g12a_vid_pll_div.hw,
&g12a_hdmi_pll.hw,
};
@@ -2792,8 +2770,8 @@ static struct clk_regmap g12a_vid_pll_sel = {
* bit 18 selects from 2 possible parents:
* vid_pll_div or hdmi_pll
*/
- .parent_hws = g12a_vid_pll_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_vid_pll_parent_hws),
+ .parent_hws = g12a_vid_pll_parents,
+ .num_parents = ARRAY_SIZE(g12a_vid_pll_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -2816,7 +2794,7 @@ static struct clk_regmap g12a_vid_pll = {
/* VPU Clock */
-static const struct clk_hw *g12a_vpu_parent_hws[] = {
+static const struct clk_hw *g12a_vpu_parents[] = {
&g12a_fclk_div3.hw,
&g12a_fclk_div4.hw,
&g12a_fclk_div5.hw,
@@ -2836,8 +2814,8 @@ static struct clk_regmap g12a_vpu_0_sel = {
.hw.init = &(struct clk_init_data){
.name = "vpu_0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_vpu_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_vpu_parent_hws),
+ .parent_hws = g12a_vpu_parents,
+ .num_parents = ARRAY_SIZE(g12a_vpu_parents),
.flags = CLK_SET_RATE_NO_REPARENT,
},
};
@@ -2880,8 +2858,8 @@ static struct clk_regmap g12a_vpu_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "vpu_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_vpu_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_vpu_parent_hws),
+ .parent_hws = g12a_vpu_parents,
+ .num_parents = ARRAY_SIZE(g12a_vpu_parents),
.flags = CLK_SET_RATE_NO_REPARENT,
},
};
@@ -2939,7 +2917,7 @@ static struct clk_regmap g12a_vpu = {
/* VDEC clocks */
-static const struct clk_hw *g12a_vdec_parent_hws[] = {
+static const struct clk_hw *g12a_vdec_parents[] = {
&g12a_fclk_div2p5.hw,
&g12a_fclk_div3.hw,
&g12a_fclk_div4.hw,
@@ -2959,8 +2937,8 @@ static struct clk_regmap g12a_vdec_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "vdec_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_vdec_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_vdec_parent_hws),
+ .parent_hws = g12a_vdec_parents,
+ .num_parents = ARRAY_SIZE(g12a_vdec_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -3009,8 +2987,8 @@ static struct clk_regmap g12a_vdec_hevcf_sel = {
.hw.init = &(struct clk_init_data){
.name = "vdec_hevcf_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_vdec_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_vdec_parent_hws),
+ .parent_hws = g12a_vdec_parents,
+ .num_parents = ARRAY_SIZE(g12a_vdec_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -3059,8 +3037,8 @@ static struct clk_regmap g12a_vdec_hevc_sel = {
.hw.init = &(struct clk_init_data){
.name = "vdec_hevc_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_vdec_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_vdec_parent_hws),
+ .parent_hws = g12a_vdec_parents,
+ .num_parents = ARRAY_SIZE(g12a_vdec_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -3101,7 +3079,7 @@ static struct clk_regmap g12a_vdec_hevc = {
/* VAPB Clock */
-static const struct clk_hw *g12a_vapb_parent_hws[] = {
+static const struct clk_hw *g12a_vapb_parents[] = {
&g12a_fclk_div4.hw,
&g12a_fclk_div3.hw,
&g12a_fclk_div5.hw,
@@ -3121,8 +3099,8 @@ static struct clk_regmap g12a_vapb_0_sel = {
.hw.init = &(struct clk_init_data){
.name = "vapb_0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_vapb_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_vapb_parent_hws),
+ .parent_hws = g12a_vapb_parents,
+ .num_parents = ARRAY_SIZE(g12a_vapb_parents),
.flags = CLK_SET_RATE_NO_REPARENT,
},
};
@@ -3169,8 +3147,8 @@ static struct clk_regmap g12a_vapb_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "vapb_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_vapb_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_vapb_parent_hws),
+ .parent_hws = g12a_vapb_parents,
+ .num_parents = ARRAY_SIZE(g12a_vapb_parents),
.flags = CLK_SET_RATE_NO_REPARENT,
},
};
@@ -3244,7 +3222,7 @@ static struct clk_regmap g12a_vapb = {
},
};
-static const struct clk_hw *g12a_vclk_parent_hws[] = {
+static const struct clk_hw *g12a_vclk_parents[] = {
&g12a_vid_pll.hw,
&g12a_gp0_pll.hw,
&g12a_hifi_pll.hw,
@@ -3264,8 +3242,8 @@ static struct clk_regmap g12a_vclk_sel = {
.hw.init = &(struct clk_init_data){
.name = "vclk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_vclk_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_vclk_parent_hws),
+ .parent_hws = g12a_vclk_parents,
+ .num_parents = ARRAY_SIZE(g12a_vclk_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -3279,8 +3257,8 @@ static struct clk_regmap g12a_vclk2_sel = {
.hw.init = &(struct clk_init_data){
.name = "vclk2_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_vclk_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_vclk_parent_hws),
+ .parent_hws = g12a_vclk_parents,
+ .num_parents = ARRAY_SIZE(g12a_vclk_parents),
.flags = CLK_SET_RATE_NO_REPARENT,
},
};
@@ -3643,8 +3621,8 @@ static struct clk_fixed_factor g12a_vclk2_div12 = {
},
};
-static u32 mux_table_cts_sel[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
-static const struct clk_hw *g12a_cts_parent_hws[] = {
+static u32 g12a_cts_parents_val_table[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
+static const struct clk_hw *g12a_cts_parents[] = {
&g12a_vclk_div1.hw,
&g12a_vclk_div2.hw,
&g12a_vclk_div4.hw,
@@ -3662,13 +3640,13 @@ static struct clk_regmap g12a_cts_enci_sel = {
.offset = HHI_VID_CLK_DIV,
.mask = 0xf,
.shift = 28,
- .table = mux_table_cts_sel,
+ .table = g12a_cts_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cts_enci_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_cts_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_cts_parent_hws),
+ .parent_hws = g12a_cts_parents,
+ .num_parents = ARRAY_SIZE(g12a_cts_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -3678,13 +3656,13 @@ static struct clk_regmap g12a_cts_encp_sel = {
.offset = HHI_VID_CLK_DIV,
.mask = 0xf,
.shift = 20,
- .table = mux_table_cts_sel,
+ .table = g12a_cts_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cts_encp_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_cts_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_cts_parent_hws),
+ .parent_hws = g12a_cts_parents,
+ .num_parents = ARRAY_SIZE(g12a_cts_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -3694,13 +3672,13 @@ static struct clk_regmap g12a_cts_encl_sel = {
.offset = HHI_VIID_CLK_DIV,
.mask = 0xf,
.shift = 12,
- .table = mux_table_cts_sel,
+ .table = g12a_cts_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cts_encl_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_cts_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_cts_parent_hws),
+ .parent_hws = g12a_cts_parents,
+ .num_parents = ARRAY_SIZE(g12a_cts_parents),
.flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
},
};
@@ -3710,20 +3688,20 @@ static struct clk_regmap g12a_cts_vdac_sel = {
.offset = HHI_VIID_CLK_DIV,
.mask = 0xf,
.shift = 28,
- .table = mux_table_cts_sel,
+ .table = g12a_cts_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cts_vdac_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_cts_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_cts_parent_hws),
+ .parent_hws = g12a_cts_parents,
+ .num_parents = ARRAY_SIZE(g12a_cts_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
/* TOFIX: add support for cts_tcon */
-static u32 mux_table_hdmi_tx_sel[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
-static const struct clk_hw *g12a_cts_hdmi_tx_parent_hws[] = {
+static u32 g12a_hdmi_tx_parents_val_table[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
+static const struct clk_hw *g12a_hdmi_tx_parents[] = {
&g12a_vclk_div1.hw,
&g12a_vclk_div2.hw,
&g12a_vclk_div4.hw,
@@ -3741,13 +3719,13 @@ static struct clk_regmap g12a_hdmi_tx_sel = {
.offset = HHI_HDMI_CLK_CNTL,
.mask = 0xf,
.shift = 16,
- .table = mux_table_hdmi_tx_sel,
+ .table = g12a_hdmi_tx_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "hdmi_tx_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_cts_hdmi_tx_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_cts_hdmi_tx_parent_hws),
+ .parent_hws = g12a_hdmi_tx_parents,
+ .num_parents = ARRAY_SIZE(g12a_hdmi_tx_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -3834,7 +3812,7 @@ static struct clk_regmap g12a_hdmi_tx = {
/* MIPI DSI Host Clocks */
-static const struct clk_hw *g12a_mipi_dsi_pxclk_parent_hws[] = {
+static const struct clk_hw *g12a_mipi_dsi_pxclk_parents[] = {
&g12a_vid_pll.hw,
&g12a_gp0_pll.hw,
&g12a_hifi_pll.hw,
@@ -3855,8 +3833,8 @@ static struct clk_regmap g12a_mipi_dsi_pxclk_sel = {
.hw.init = &(struct clk_init_data){
.name = "mipi_dsi_pxclk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_mipi_dsi_pxclk_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_mipi_dsi_pxclk_parent_hws),
+ .parent_hws = g12a_mipi_dsi_pxclk_parents,
+ .num_parents = ARRAY_SIZE(g12a_mipi_dsi_pxclk_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_SET_RATE_PARENT,
},
};
@@ -3907,7 +3885,7 @@ static struct clk_regmap g12a_mipi_dsi_pxclk = {
/* MIPI ISP Clocks */
-static const struct clk_parent_data g12b_mipi_isp_parent_data[] = {
+static const struct clk_parent_data g12b_mipi_isp_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &g12a_gp0_pll.hw },
{ .hw = &g12a_hifi_pll.hw },
@@ -3927,8 +3905,8 @@ static struct clk_regmap g12b_mipi_isp_sel = {
.hw.init = &(struct clk_init_data){
.name = "mipi_isp_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = g12b_mipi_isp_parent_data,
- .num_parents = ARRAY_SIZE(g12b_mipi_isp_parent_data),
+ .parent_data = g12b_mipi_isp_parents,
+ .num_parents = ARRAY_SIZE(g12b_mipi_isp_parents),
},
};
@@ -3967,7 +3945,7 @@ static struct clk_regmap g12b_mipi_isp = {
/* HDMI Clocks */
-static const struct clk_parent_data g12a_hdmi_parent_data[] = {
+static const struct clk_parent_data g12a_hdmi_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &g12a_fclk_div4.hw },
{ .hw = &g12a_fclk_div3.hw },
@@ -3984,8 +3962,8 @@ static struct clk_regmap g12a_hdmi_sel = {
.hw.init = &(struct clk_init_data){
.name = "hdmi_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = g12a_hdmi_parent_data,
- .num_parents = ARRAY_SIZE(g12a_hdmi_parent_data),
+ .parent_data = g12a_hdmi_parents,
+ .num_parents = ARRAY_SIZE(g12a_hdmi_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -4025,7 +4003,7 @@ static struct clk_regmap g12a_hdmi = {
* mux because it does top-to-bottom updates the each clock tree and
* switches to the "inactive" one when CLK_SET_RATE_GATE is set.
*/
-static const struct clk_parent_data g12a_mali_0_1_parent_data[] = {
+static const struct clk_parent_data g12a_mali_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &g12a_gp0_pll.hw },
{ .hw = &g12a_hifi_pll.hw },
@@ -4045,8 +4023,8 @@ static struct clk_regmap g12a_mali_0_sel = {
.hw.init = &(struct clk_init_data){
.name = "mali_0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = g12a_mali_0_1_parent_data,
- .num_parents = 8,
+ .parent_data = g12a_mali_parents,
+ .num_parents = ARRAY_SIZE(g12a_mali_parents),
/*
* Don't request the parent to change the rate because
* all GPU frequencies can be derived from the fclk_*
@@ -4099,8 +4077,8 @@ static struct clk_regmap g12a_mali_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "mali_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = g12a_mali_0_1_parent_data,
- .num_parents = 8,
+ .parent_data = g12a_mali_parents,
+ .num_parents = ARRAY_SIZE(g12a_mali_parents),
/*
* Don't request the parent to change the rate because
* all GPU frequencies can be derived from the fclk_*
@@ -4144,11 +4122,6 @@ static struct clk_regmap g12a_mali_1 = {
},
};
-static const struct clk_hw *g12a_mali_parent_hws[] = {
- &g12a_mali_0.hw,
- &g12a_mali_1.hw,
-};
-
static struct clk_regmap g12a_mali = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_MALI_CLK_CNTL,
@@ -4158,7 +4131,10 @@ static struct clk_regmap g12a_mali = {
.hw.init = &(struct clk_init_data){
.name = "mali",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_mali_parent_hws,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_mali_0.hw,
+ &g12a_mali_1.hw,
+ },
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
@@ -4197,7 +4173,7 @@ static struct clk_regmap g12a_ts = {
/* SPICC SCLK source clock */
-static const struct clk_parent_data spicc_sclk_parent_data[] = {
+static const struct clk_parent_data g12a_spicc_sclk_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &g12a_clk81.hw },
{ .hw = &g12a_fclk_div4.hw },
@@ -4216,8 +4192,8 @@ static struct clk_regmap g12a_spicc0_sclk_sel = {
.hw.init = &(struct clk_init_data){
.name = "spicc0_sclk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = spicc_sclk_parent_data,
- .num_parents = ARRAY_SIZE(spicc_sclk_parent_data),
+ .parent_data = g12a_spicc_sclk_parents,
+ .num_parents = ARRAY_SIZE(g12a_spicc_sclk_parents),
},
};
@@ -4263,8 +4239,8 @@ static struct clk_regmap g12a_spicc1_sclk_sel = {
.hw.init = &(struct clk_init_data){
.name = "spicc1_sclk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = spicc_sclk_parent_data,
- .num_parents = ARRAY_SIZE(spicc_sclk_parent_data),
+ .parent_data = g12a_spicc_sclk_parents,
+ .num_parents = ARRAY_SIZE(g12a_spicc_sclk_parents),
},
};
@@ -4303,7 +4279,7 @@ static struct clk_regmap g12a_spicc1_sclk = {
/* Neural Network Accelerator source clock */
-static const struct clk_parent_data nna_clk_parent_data[] = {
+static const struct clk_parent_data sm1_nna_clk_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &g12a_gp0_pll.hw, },
{ .hw = &g12a_hifi_pll.hw, },
@@ -4323,8 +4299,8 @@ static struct clk_regmap sm1_nna_axi_clk_sel = {
.hw.init = &(struct clk_init_data){
.name = "nna_axi_clk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = nna_clk_parent_data,
- .num_parents = ARRAY_SIZE(nna_clk_parent_data),
+ .parent_data = sm1_nna_clk_parents,
+ .num_parents = ARRAY_SIZE(sm1_nna_clk_parents),
},
};
@@ -4370,8 +4346,8 @@ static struct clk_regmap sm1_nna_core_clk_sel = {
.hw.init = &(struct clk_init_data){
.name = "nna_core_clk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = nna_clk_parent_data,
- .num_parents = ARRAY_SIZE(nna_clk_parent_data),
+ .parent_data = sm1_nna_clk_parents,
+ .num_parents = ARRAY_SIZE(sm1_nna_clk_parents),
},
};
@@ -4408,89 +4384,101 @@ static struct clk_regmap sm1_nna_core_clk = {
},
};
-#define MESON_GATE(_name, _reg, _bit) \
- MESON_PCLK(_name, _reg, _bit, &g12a_clk81.hw)
-
-#define MESON_GATE_RO(_name, _reg, _bit) \
- MESON_PCLK_RO(_name, _reg, _bit, &g12a_clk81.hw)
-
-/* Everything Else (EE) domain gates */
-static MESON_GATE(g12a_ddr, HHI_GCLK_MPEG0, 0);
-static MESON_GATE(g12a_dos, HHI_GCLK_MPEG0, 1);
-static MESON_GATE(g12a_audio_locker, HHI_GCLK_MPEG0, 2);
-static MESON_GATE(g12a_mipi_dsi_host, HHI_GCLK_MPEG0, 3);
-static MESON_GATE(g12a_eth_phy, HHI_GCLK_MPEG0, 4);
-static MESON_GATE(g12a_isa, HHI_GCLK_MPEG0, 5);
-static MESON_GATE(g12a_pl301, HHI_GCLK_MPEG0, 6);
-static MESON_GATE(g12a_periphs, HHI_GCLK_MPEG0, 7);
-static MESON_GATE(g12a_spicc_0, HHI_GCLK_MPEG0, 8);
-static MESON_GATE(g12a_i2c, HHI_GCLK_MPEG0, 9);
-static MESON_GATE(g12a_sana, HHI_GCLK_MPEG0, 10);
-static MESON_GATE(g12a_sd, HHI_GCLK_MPEG0, 11);
-static MESON_GATE(g12a_rng0, HHI_GCLK_MPEG0, 12);
-static MESON_GATE(g12a_uart0, HHI_GCLK_MPEG0, 13);
-static MESON_GATE(g12a_spicc_1, HHI_GCLK_MPEG0, 14);
-static MESON_GATE(g12a_hiu_reg, HHI_GCLK_MPEG0, 19);
-static MESON_GATE(g12a_mipi_dsi_phy, HHI_GCLK_MPEG0, 20);
-static MESON_GATE(g12a_assist_misc, HHI_GCLK_MPEG0, 23);
-static MESON_GATE(g12a_emmc_a, HHI_GCLK_MPEG0, 24);
-static MESON_GATE(g12a_emmc_b, HHI_GCLK_MPEG0, 25);
-static MESON_GATE(g12a_emmc_c, HHI_GCLK_MPEG0, 26);
-static MESON_GATE(g12a_audio_codec, HHI_GCLK_MPEG0, 28);
-
-static MESON_GATE(g12a_audio, HHI_GCLK_MPEG1, 0);
-static MESON_GATE(g12a_eth_core, HHI_GCLK_MPEG1, 3);
-static MESON_GATE(g12a_demux, HHI_GCLK_MPEG1, 4);
-static MESON_GATE(g12a_audio_ififo, HHI_GCLK_MPEG1, 11);
-static MESON_GATE(g12a_adc, HHI_GCLK_MPEG1, 13);
-static MESON_GATE(g12a_uart1, HHI_GCLK_MPEG1, 16);
-static MESON_GATE(g12a_g2d, HHI_GCLK_MPEG1, 20);
-static MESON_GATE(g12a_reset, HHI_GCLK_MPEG1, 23);
-static MESON_GATE(g12a_pcie_comb, HHI_GCLK_MPEG1, 24);
-static MESON_GATE(g12a_parser, HHI_GCLK_MPEG1, 25);
-static MESON_GATE(g12a_usb_general, HHI_GCLK_MPEG1, 26);
-static MESON_GATE(g12a_pcie_phy, HHI_GCLK_MPEG1, 27);
-static MESON_GATE(g12a_ahb_arb0, HHI_GCLK_MPEG1, 29);
-
-static MESON_GATE(g12a_ahb_data_bus, HHI_GCLK_MPEG2, 1);
-static MESON_GATE(g12a_ahb_ctrl_bus, HHI_GCLK_MPEG2, 2);
-static MESON_GATE(g12a_htx_hdcp22, HHI_GCLK_MPEG2, 3);
-static MESON_GATE(g12a_htx_pclk, HHI_GCLK_MPEG2, 4);
-static MESON_GATE(g12a_bt656, HHI_GCLK_MPEG2, 6);
-static MESON_GATE(g12a_usb1_to_ddr, HHI_GCLK_MPEG2, 8);
-static MESON_GATE(g12b_mipi_isp_gate, HHI_GCLK_MPEG2, 17);
-static MESON_GATE(g12a_mmc_pclk, HHI_GCLK_MPEG2, 11);
-static MESON_GATE(g12a_uart2, HHI_GCLK_MPEG2, 15);
-static MESON_GATE(g12a_vpu_intr, HHI_GCLK_MPEG2, 25);
-static MESON_GATE(g12b_csi_phy1, HHI_GCLK_MPEG2, 28);
-static MESON_GATE(g12b_csi_phy0, HHI_GCLK_MPEG2, 29);
-static MESON_GATE(g12a_gic, HHI_GCLK_MPEG2, 30);
-
-static MESON_GATE(g12a_vclk2_venci0, HHI_GCLK_OTHER, 1);
-static MESON_GATE(g12a_vclk2_venci1, HHI_GCLK_OTHER, 2);
-static MESON_GATE(g12a_vclk2_vencp0, HHI_GCLK_OTHER, 3);
-static MESON_GATE(g12a_vclk2_vencp1, HHI_GCLK_OTHER, 4);
-static MESON_GATE(g12a_vclk2_venct0, HHI_GCLK_OTHER, 5);
-static MESON_GATE(g12a_vclk2_venct1, HHI_GCLK_OTHER, 6);
-static MESON_GATE(g12a_vclk2_other, HHI_GCLK_OTHER, 7);
-static MESON_GATE(g12a_vclk2_enci, HHI_GCLK_OTHER, 8);
-static MESON_GATE(g12a_vclk2_encp, HHI_GCLK_OTHER, 9);
-static MESON_GATE(g12a_dac_clk, HHI_GCLK_OTHER, 10);
-static MESON_GATE(g12a_aoclk_gate, HHI_GCLK_OTHER, 14);
-static MESON_GATE(g12a_iec958_gate, HHI_GCLK_OTHER, 16);
-static MESON_GATE(g12a_enc480p, HHI_GCLK_OTHER, 20);
-static MESON_GATE(g12a_rng1, HHI_GCLK_OTHER, 21);
-static MESON_GATE(g12a_vclk2_enct, HHI_GCLK_OTHER, 22);
-static MESON_GATE(g12a_vclk2_encl, HHI_GCLK_OTHER, 23);
-static MESON_GATE(g12a_vclk2_venclmmc, HHI_GCLK_OTHER, 24);
-static MESON_GATE(g12a_vclk2_vencl, HHI_GCLK_OTHER, 25);
-static MESON_GATE(g12a_vclk2_other1, HHI_GCLK_OTHER, 26);
-
-static MESON_GATE_RO(g12a_dma, HHI_GCLK_OTHER2, 0);
-static MESON_GATE_RO(g12a_efuse, HHI_GCLK_OTHER2, 1);
-static MESON_GATE_RO(g12a_rom_boot, HHI_GCLK_OTHER2, 2);
-static MESON_GATE_RO(g12a_reset_sec, HHI_GCLK_OTHER2, 3);
-static MESON_GATE_RO(g12a_sec_ahb_apb3, HHI_GCLK_OTHER2, 4);
+static const struct clk_parent_data g12a_pclk_parents = { .hw = &g12a_clk81.hw };
+
+#define G12A_PCLK(_name, _reg, _bit, _flags) \
+ MESON_PCLK(_name, _reg, _bit, &g12a_pclk_parents, _flags)
+
+#define G12A_PCLK_RO(_name, _reg, _bit, _flags) \
+ MESON_PCLK_RO(_name, _reg, _bit, &g12a_pclk_parents, _flags)
+
+/*
+ * Everything Else (EE) domain gates
+ *
+ * NOTE: The gates below are marked with CLK_IGNORE_UNUSED for historic reasons
+ * Users are encouraged to test without it and submit changes to:
+ * - remove the flag if not necessary
+ * - replace the flag with something more adequate, such as CLK_IS_CRITICAL,
+ * if appropriate.
+ * - add a comment explaining why the use of CLK_IGNORE_UNUSED is desirable
+ * for a particular clock.
+ */
+static G12A_PCLK(g12a_ddr, HHI_GCLK_MPEG0, 0, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_dos, HHI_GCLK_MPEG0, 1, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_audio_locker, HHI_GCLK_MPEG0, 2, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_mipi_dsi_host, HHI_GCLK_MPEG0, 3, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_eth_phy, HHI_GCLK_MPEG0, 4, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_isa, HHI_GCLK_MPEG0, 5, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_pl301, HHI_GCLK_MPEG0, 6, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_periphs, HHI_GCLK_MPEG0, 7, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_spicc_0, HHI_GCLK_MPEG0, 8, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_i2c, HHI_GCLK_MPEG0, 9, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_sana, HHI_GCLK_MPEG0, 10, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_sd, HHI_GCLK_MPEG0, 11, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_rng0, HHI_GCLK_MPEG0, 12, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_uart0, HHI_GCLK_MPEG0, 13, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_spicc_1, HHI_GCLK_MPEG0, 14, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_hiu_reg, HHI_GCLK_MPEG0, 19, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_mipi_dsi_phy, HHI_GCLK_MPEG0, 20, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_assist_misc, HHI_GCLK_MPEG0, 23, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_emmc_a, HHI_GCLK_MPEG0, 24, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_emmc_b, HHI_GCLK_MPEG0, 25, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_emmc_c, HHI_GCLK_MPEG0, 26, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_audio_codec, HHI_GCLK_MPEG0, 28, CLK_IGNORE_UNUSED);
+
+static G12A_PCLK(g12a_audio, HHI_GCLK_MPEG1, 0, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_eth_core, HHI_GCLK_MPEG1, 3, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_demux, HHI_GCLK_MPEG1, 4, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_audio_ififo, HHI_GCLK_MPEG1, 11, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_adc, HHI_GCLK_MPEG1, 13, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_uart1, HHI_GCLK_MPEG1, 16, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_g2d, HHI_GCLK_MPEG1, 20, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_reset, HHI_GCLK_MPEG1, 23, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_pcie_comb, HHI_GCLK_MPEG1, 24, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_parser, HHI_GCLK_MPEG1, 25, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_usb_general, HHI_GCLK_MPEG1, 26, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_pcie_phy, HHI_GCLK_MPEG1, 27, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_ahb_arb0, HHI_GCLK_MPEG1, 29, CLK_IGNORE_UNUSED);
+
+static G12A_PCLK(g12a_ahb_data_bus, HHI_GCLK_MPEG2, 1, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_ahb_ctrl_bus, HHI_GCLK_MPEG2, 2, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_htx_hdcp22, HHI_GCLK_MPEG2, 3, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_htx_pclk, HHI_GCLK_MPEG2, 4, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_bt656, HHI_GCLK_MPEG2, 6, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_usb1_to_ddr, HHI_GCLK_MPEG2, 8, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12b_mipi_isp_gate, HHI_GCLK_MPEG2, 17, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_mmc_pclk, HHI_GCLK_MPEG2, 11, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_uart2, HHI_GCLK_MPEG2, 15, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_vpu_intr, HHI_GCLK_MPEG2, 25, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12b_csi_phy1, HHI_GCLK_MPEG2, 28, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12b_csi_phy0, HHI_GCLK_MPEG2, 29, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_gic, HHI_GCLK_MPEG2, 30, CLK_IGNORE_UNUSED);
+
+static G12A_PCLK(g12a_vclk2_venci0, HHI_GCLK_OTHER, 1, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_vclk2_venci1, HHI_GCLK_OTHER, 2, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_vclk2_vencp0, HHI_GCLK_OTHER, 3, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_vclk2_vencp1, HHI_GCLK_OTHER, 4, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_vclk2_venct0, HHI_GCLK_OTHER, 5, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_vclk2_venct1, HHI_GCLK_OTHER, 6, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_vclk2_other, HHI_GCLK_OTHER, 7, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_vclk2_enci, HHI_GCLK_OTHER, 8, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_vclk2_encp, HHI_GCLK_OTHER, 9, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_dac_clk, HHI_GCLK_OTHER, 10, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_aoclk_gate, HHI_GCLK_OTHER, 14, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_iec958_gate, HHI_GCLK_OTHER, 16, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_enc480p, HHI_GCLK_OTHER, 20, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_rng1, HHI_GCLK_OTHER, 21, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_vclk2_enct, HHI_GCLK_OTHER, 22, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_vclk2_encl, HHI_GCLK_OTHER, 23, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_vclk2_venclmmc, HHI_GCLK_OTHER, 24, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_vclk2_vencl, HHI_GCLK_OTHER, 25, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_vclk2_other1, HHI_GCLK_OTHER, 26, CLK_IGNORE_UNUSED);
+
+static G12A_PCLK_RO(g12a_dma, HHI_GCLK_OTHER2, 0, 0);
+static G12A_PCLK_RO(g12a_efuse, HHI_GCLK_OTHER2, 1, 0);
+static G12A_PCLK_RO(g12a_rom_boot, HHI_GCLK_OTHER2, 2, 0);
+static G12A_PCLK_RO(g12a_reset_sec, HHI_GCLK_OTHER2, 3, 0);
+static G12A_PCLK_RO(g12a_sec_ahb_apb3, HHI_GCLK_OTHER2, 4, 0);
/* Array of all clocks provided by this provider */
static struct clk_hw *g12a_hw_clks[] = {
@@ -4503,8 +4491,8 @@ static struct clk_hw *g12a_hw_clks[] = {
[CLKID_FCLK_DIV7] = &g12a_fclk_div7.hw,
[CLKID_FCLK_DIV2P5] = &g12a_fclk_div2p5.hw,
[CLKID_GP0_PLL] = &g12a_gp0_pll.hw,
- [CLKID_MPEG_SEL] = &g12a_mpeg_clk_sel.hw,
- [CLKID_MPEG_DIV] = &g12a_mpeg_clk_div.hw,
+ [CLKID_MPEG_SEL] = &g12a_clk81_sel.hw,
+ [CLKID_MPEG_DIV] = &g12a_clk81_div.hw,
[CLKID_CLK81] = &g12a_clk81.hw,
[CLKID_MPLL0] = &g12a_mpll0.hw,
[CLKID_MPLL1] = &g12a_mpll1.hw,
@@ -4676,12 +4664,12 @@ static struct clk_hw *g12a_hw_clks[] = {
[CLKID_MPLL_50M] = &g12a_mpll_50m.hw,
[CLKID_SYS_PLL_DIV16_EN] = &g12a_sys_pll_div16_en.hw,
[CLKID_SYS_PLL_DIV16] = &g12a_sys_pll_div16.hw,
- [CLKID_CPU_CLK_DYN0_SEL] = &g12a_cpu_clk_premux0.hw,
- [CLKID_CPU_CLK_DYN0_DIV] = &g12a_cpu_clk_mux0_div.hw,
- [CLKID_CPU_CLK_DYN0] = &g12a_cpu_clk_postmux0.hw,
- [CLKID_CPU_CLK_DYN1_SEL] = &g12a_cpu_clk_premux1.hw,
- [CLKID_CPU_CLK_DYN1_DIV] = &g12a_cpu_clk_mux1_div.hw,
- [CLKID_CPU_CLK_DYN1] = &g12a_cpu_clk_postmux1.hw,
+ [CLKID_CPU_CLK_DYN0_SEL] = &g12a_cpu_clk_dyn0_sel.hw,
+ [CLKID_CPU_CLK_DYN0_DIV] = &g12a_cpu_clk_dyn0_div.hw,
+ [CLKID_CPU_CLK_DYN0] = &g12a_cpu_clk_dyn0.hw,
+ [CLKID_CPU_CLK_DYN1_SEL] = &g12a_cpu_clk_dyn1_sel.hw,
+ [CLKID_CPU_CLK_DYN1_DIV] = &g12a_cpu_clk_dyn1_div.hw,
+ [CLKID_CPU_CLK_DYN1] = &g12a_cpu_clk_dyn1.hw,
[CLKID_CPU_CLK_DYN] = &g12a_cpu_clk_dyn.hw,
[CLKID_CPU_CLK] = &g12a_cpu_clk.hw,
[CLKID_CPU_CLK_DIV16_EN] = &g12a_cpu_clk_div16_en.hw,
@@ -4730,8 +4718,8 @@ static struct clk_hw *g12b_hw_clks[] = {
[CLKID_FCLK_DIV7] = &g12a_fclk_div7.hw,
[CLKID_FCLK_DIV2P5] = &g12a_fclk_div2p5.hw,
[CLKID_GP0_PLL] = &g12a_gp0_pll.hw,
- [CLKID_MPEG_SEL] = &g12a_mpeg_clk_sel.hw,
- [CLKID_MPEG_DIV] = &g12a_mpeg_clk_div.hw,
+ [CLKID_MPEG_SEL] = &g12a_clk81_sel.hw,
+ [CLKID_MPEG_DIV] = &g12a_clk81_div.hw,
[CLKID_CLK81] = &g12a_clk81.hw,
[CLKID_MPLL0] = &g12a_mpll0.hw,
[CLKID_MPLL1] = &g12a_mpll1.hw,
@@ -4903,12 +4891,12 @@ static struct clk_hw *g12b_hw_clks[] = {
[CLKID_MPLL_50M] = &g12a_mpll_50m.hw,
[CLKID_SYS_PLL_DIV16_EN] = &g12a_sys_pll_div16_en.hw,
[CLKID_SYS_PLL_DIV16] = &g12a_sys_pll_div16.hw,
- [CLKID_CPU_CLK_DYN0_SEL] = &g12a_cpu_clk_premux0.hw,
- [CLKID_CPU_CLK_DYN0_DIV] = &g12a_cpu_clk_mux0_div.hw,
- [CLKID_CPU_CLK_DYN0] = &g12a_cpu_clk_postmux0.hw,
- [CLKID_CPU_CLK_DYN1_SEL] = &g12a_cpu_clk_premux1.hw,
- [CLKID_CPU_CLK_DYN1_DIV] = &g12a_cpu_clk_mux1_div.hw,
- [CLKID_CPU_CLK_DYN1] = &g12a_cpu_clk_postmux1.hw,
+ [CLKID_CPU_CLK_DYN0_SEL] = &g12a_cpu_clk_dyn0_sel.hw,
+ [CLKID_CPU_CLK_DYN0_DIV] = &g12a_cpu_clk_dyn0_div.hw,
+ [CLKID_CPU_CLK_DYN0] = &g12a_cpu_clk_dyn0.hw,
+ [CLKID_CPU_CLK_DYN1_SEL] = &g12a_cpu_clk_dyn1_sel.hw,
+ [CLKID_CPU_CLK_DYN1_DIV] = &g12a_cpu_clk_dyn1_div.hw,
+ [CLKID_CPU_CLK_DYN1] = &g12a_cpu_clk_dyn1.hw,
[CLKID_CPU_CLK_DYN] = &g12a_cpu_clk_dyn.hw,
[CLKID_CPU_CLK] = &g12b_cpu_clk.hw,
[CLKID_CPU_CLK_DIV16_EN] = &g12a_cpu_clk_div16_en.hw,
@@ -4940,12 +4928,12 @@ static struct clk_hw *g12b_hw_clks[] = {
[CLKID_SYS1_PLL] = &g12b_sys1_pll.hw,
[CLKID_SYS1_PLL_DIV16_EN] = &g12b_sys1_pll_div16_en.hw,
[CLKID_SYS1_PLL_DIV16] = &g12b_sys1_pll_div16.hw,
- [CLKID_CPUB_CLK_DYN0_SEL] = &g12b_cpub_clk_premux0.hw,
- [CLKID_CPUB_CLK_DYN0_DIV] = &g12b_cpub_clk_mux0_div.hw,
- [CLKID_CPUB_CLK_DYN0] = &g12b_cpub_clk_postmux0.hw,
- [CLKID_CPUB_CLK_DYN1_SEL] = &g12b_cpub_clk_premux1.hw,
- [CLKID_CPUB_CLK_DYN1_DIV] = &g12b_cpub_clk_mux1_div.hw,
- [CLKID_CPUB_CLK_DYN1] = &g12b_cpub_clk_postmux1.hw,
+ [CLKID_CPUB_CLK_DYN0_SEL] = &g12b_cpub_clk_dyn0_sel.hw,
+ [CLKID_CPUB_CLK_DYN0_DIV] = &g12b_cpub_clk_dyn0_div.hw,
+ [CLKID_CPUB_CLK_DYN0] = &g12b_cpub_clk_dyn0.hw,
+ [CLKID_CPUB_CLK_DYN1_SEL] = &g12b_cpub_clk_dyn1_sel.hw,
+ [CLKID_CPUB_CLK_DYN1_DIV] = &g12b_cpub_clk_dyn1_div.hw,
+ [CLKID_CPUB_CLK_DYN1] = &g12b_cpub_clk_dyn1.hw,
[CLKID_CPUB_CLK_DYN] = &g12b_cpub_clk_dyn.hw,
[CLKID_CPUB_CLK] = &g12b_cpub_clk.hw,
[CLKID_CPUB_CLK_DIV16_EN] = &g12b_cpub_clk_div16_en.hw,
@@ -4998,8 +4986,8 @@ static struct clk_hw *sm1_hw_clks[] = {
[CLKID_FCLK_DIV7] = &g12a_fclk_div7.hw,
[CLKID_FCLK_DIV2P5] = &g12a_fclk_div2p5.hw,
[CLKID_GP0_PLL] = &g12a_gp0_pll.hw,
- [CLKID_MPEG_SEL] = &g12a_mpeg_clk_sel.hw,
- [CLKID_MPEG_DIV] = &g12a_mpeg_clk_div.hw,
+ [CLKID_MPEG_SEL] = &g12a_clk81_sel.hw,
+ [CLKID_MPEG_DIV] = &g12a_clk81_div.hw,
[CLKID_CLK81] = &g12a_clk81.hw,
[CLKID_MPLL0] = &g12a_mpll0.hw,
[CLKID_MPLL1] = &g12a_mpll1.hw,
@@ -5171,12 +5159,12 @@ static struct clk_hw *sm1_hw_clks[] = {
[CLKID_MPLL_50M] = &g12a_mpll_50m.hw,
[CLKID_SYS_PLL_DIV16_EN] = &g12a_sys_pll_div16_en.hw,
[CLKID_SYS_PLL_DIV16] = &g12a_sys_pll_div16.hw,
- [CLKID_CPU_CLK_DYN0_SEL] = &g12a_cpu_clk_premux0.hw,
- [CLKID_CPU_CLK_DYN0_DIV] = &g12a_cpu_clk_mux0_div.hw,
- [CLKID_CPU_CLK_DYN0] = &g12a_cpu_clk_postmux0.hw,
- [CLKID_CPU_CLK_DYN1_SEL] = &g12a_cpu_clk_premux1.hw,
- [CLKID_CPU_CLK_DYN1_DIV] = &g12a_cpu_clk_mux1_div.hw,
- [CLKID_CPU_CLK_DYN1] = &g12a_cpu_clk_postmux1.hw,
+ [CLKID_CPU_CLK_DYN0_SEL] = &g12a_cpu_clk_dyn0_sel.hw,
+ [CLKID_CPU_CLK_DYN0_DIV] = &g12a_cpu_clk_dyn0_div.hw,
+ [CLKID_CPU_CLK_DYN0] = &g12a_cpu_clk_dyn0.hw,
+ [CLKID_CPU_CLK_DYN1_SEL] = &g12a_cpu_clk_dyn1_sel.hw,
+ [CLKID_CPU_CLK_DYN1_DIV] = &g12a_cpu_clk_dyn1_div.hw,
+ [CLKID_CPU_CLK_DYN1] = &g12a_cpu_clk_dyn1.hw,
[CLKID_CPU_CLK_DYN] = &g12a_cpu_clk_dyn.hw,
[CLKID_CPU_CLK] = &g12a_cpu_clk.hw,
[CLKID_CPU_CLK_DIV16_EN] = &g12a_cpu_clk_div16_en.hw,
@@ -5206,12 +5194,12 @@ static struct clk_hw *sm1_hw_clks[] = {
[CLKID_TS] = &g12a_ts.hw,
[CLKID_GP1_PLL_DCO] = &sm1_gp1_pll_dco.hw,
[CLKID_GP1_PLL] = &sm1_gp1_pll.hw,
- [CLKID_DSU_CLK_DYN0_SEL] = &sm1_dsu_clk_premux0.hw,
- [CLKID_DSU_CLK_DYN0_DIV] = &sm1_dsu_clk_premux1.hw,
- [CLKID_DSU_CLK_DYN0] = &sm1_dsu_clk_mux0_div.hw,
- [CLKID_DSU_CLK_DYN1_SEL] = &sm1_dsu_clk_postmux0.hw,
- [CLKID_DSU_CLK_DYN1_DIV] = &sm1_dsu_clk_mux1_div.hw,
- [CLKID_DSU_CLK_DYN1] = &sm1_dsu_clk_postmux1.hw,
+ [CLKID_DSU_CLK_DYN0_SEL] = &sm1_dsu_clk_dyn0_sel.hw,
+ [CLKID_DSU_CLK_DYN0_DIV] = &sm1_dsu_clk_dyn0_div.hw,
+ [CLKID_DSU_CLK_DYN0] = &sm1_dsu_clk_dyn0.hw,
+ [CLKID_DSU_CLK_DYN1_SEL] = &sm1_dsu_clk_dyn1_sel.hw,
+ [CLKID_DSU_CLK_DYN1_DIV] = &sm1_dsu_clk_dyn1_div.hw,
+ [CLKID_DSU_CLK_DYN1] = &sm1_dsu_clk_dyn1.hw,
[CLKID_DSU_CLK_DYN] = &sm1_dsu_clk_dyn.hw,
[CLKID_DSU_CLK_FINAL] = &sm1_dsu_final_clk.hw,
[CLKID_DSU_CLK] = &sm1_dsu_clk.hw,
@@ -5241,8 +5229,7 @@ static const struct reg_sequence g12a_init_regs[] = {
#define DVFS_CON_ID "dvfs"
-static int meson_g12a_dvfs_setup_common(struct device *dev,
- struct clk_hw **hws)
+static int g12a_dvfs_setup_common(struct device *dev, struct clk_hw **hws)
{
struct clk *notifier_clk;
struct clk_hw *xtal;
@@ -5251,13 +5238,13 @@ static int meson_g12a_dvfs_setup_common(struct device *dev,
xtal = clk_hw_get_parent_by_index(hws[CLKID_CPU_CLK_DYN1_SEL], 0);
/* Setup clock notifier for cpu_clk_postmux0 */
- g12a_cpu_clk_postmux0_nb_data.xtal = xtal;
- notifier_clk = devm_clk_hw_get_clk(dev, &g12a_cpu_clk_postmux0.hw,
+ g12a_cpu_clk_dyn0_nb_data.xtal = xtal;
+ notifier_clk = devm_clk_hw_get_clk(dev, &g12a_cpu_clk_dyn0.hw,
DVFS_CON_ID);
ret = devm_clk_notifier_register(dev, notifier_clk,
- &g12a_cpu_clk_postmux0_nb_data.nb);
+ &g12a_cpu_clk_dyn0_nb_data.nb);
if (ret) {
- dev_err(dev, "failed to register the cpu_clk_postmux0 notifier\n");
+ dev_err(dev, "failed to register the cpu_clk_dyn0 notifier\n");
return ret;
}
@@ -5274,7 +5261,7 @@ static int meson_g12a_dvfs_setup_common(struct device *dev,
return 0;
}
-static int meson_g12b_dvfs_setup(struct platform_device *pdev)
+static int g12b_dvfs_setup(struct platform_device *pdev)
{
struct clk_hw **hws = g12b_hw_clks;
struct device *dev = &pdev->dev;
@@ -5282,7 +5269,7 @@ static int meson_g12b_dvfs_setup(struct platform_device *pdev)
struct clk_hw *xtal;
int ret;
- ret = meson_g12a_dvfs_setup_common(dev, hws);
+ ret = g12a_dvfs_setup_common(dev, hws);
if (ret)
return ret;
@@ -5311,18 +5298,19 @@ static int meson_g12b_dvfs_setup(struct platform_device *pdev)
/* Add notifiers for the second CPU cluster */
/* Setup clock notifier for cpub_clk_postmux0 */
- g12b_cpub_clk_postmux0_nb_data.xtal = xtal;
- notifier_clk = devm_clk_hw_get_clk(dev, &g12b_cpub_clk_postmux0.hw,
+ g12b_cpub_clk_dyn0_nb_data.xtal = xtal;
+ notifier_clk = devm_clk_hw_get_clk(dev, &g12b_cpub_clk_dyn0.hw,
DVFS_CON_ID);
ret = devm_clk_notifier_register(dev, notifier_clk,
- &g12b_cpub_clk_postmux0_nb_data.nb);
+ &g12b_cpub_clk_dyn0_nb_data.nb);
if (ret) {
- dev_err(dev, "failed to register the cpub_clk_postmux0 notifier\n");
+ dev_err(dev, "failed to register the cpub_clk_dyn0 notifier\n");
return ret;
}
/* Setup clock notifier for cpub_clk_dyn mux */
- notifier_clk = devm_clk_hw_get_clk(dev, &g12b_cpub_clk_dyn.hw, "dvfs");
+ notifier_clk = devm_clk_hw_get_clk(dev, &g12b_cpub_clk_dyn.hw,
+ DVFS_CON_ID);
ret = devm_clk_notifier_register(dev, notifier_clk,
&g12a_cpu_clk_mux_nb);
if (ret) {
@@ -5351,14 +5339,14 @@ static int meson_g12b_dvfs_setup(struct platform_device *pdev)
return 0;
}
-static int meson_g12a_dvfs_setup(struct platform_device *pdev)
+static int g12a_dvfs_setup(struct platform_device *pdev)
{
struct clk_hw **hws = g12a_hw_clks;
struct device *dev = &pdev->dev;
struct clk *notifier_clk;
int ret;
- ret = meson_g12a_dvfs_setup_common(dev, hws);
+ ret = g12a_dvfs_setup_common(dev, hws);
if (ret)
return ret;
@@ -5383,27 +5371,27 @@ static int meson_g12a_dvfs_setup(struct platform_device *pdev)
return 0;
}
-struct meson_g12a_data {
- const struct meson_eeclkc_data eeclkc_data;
+struct g12a_clkc_data {
+ const struct meson_clkc_data clkc_data;
int (*dvfs_setup)(struct platform_device *pdev);
};
-static int meson_g12a_probe(struct platform_device *pdev)
+static int g12a_clkc_probe(struct platform_device *pdev)
{
- const struct meson_eeclkc_data *eeclkc_data;
- const struct meson_g12a_data *g12a_data;
+ const struct meson_clkc_data *clkc_data;
+ const struct g12a_clkc_data *g12a_data;
int ret;
- eeclkc_data = of_device_get_match_data(&pdev->dev);
- if (!eeclkc_data)
+ clkc_data = of_device_get_match_data(&pdev->dev);
+ if (!clkc_data)
return -EINVAL;
- ret = meson_eeclkc_probe(pdev);
+ ret = meson_clkc_syscon_probe(pdev);
if (ret)
return ret;
- g12a_data = container_of(eeclkc_data, struct meson_g12a_data,
- eeclkc_data);
+ g12a_data = container_of(clkc_data, struct g12a_clkc_data,
+ clkc_data);
if (g12a_data->dvfs_setup)
return g12a_data->dvfs_setup(pdev);
@@ -5411,8 +5399,8 @@ static int meson_g12a_probe(struct platform_device *pdev)
return 0;
}
-static const struct meson_g12a_data g12a_clkc_data = {
- .eeclkc_data = {
+static const struct g12a_clkc_data g12a_clkc_data = {
+ .clkc_data = {
.hw_clks = {
.hws = g12a_hw_clks,
.num = ARRAY_SIZE(g12a_hw_clks),
@@ -5420,54 +5408,54 @@ static const struct meson_g12a_data g12a_clkc_data = {
.init_regs = g12a_init_regs,
.init_count = ARRAY_SIZE(g12a_init_regs),
},
- .dvfs_setup = meson_g12a_dvfs_setup,
+ .dvfs_setup = g12a_dvfs_setup,
};
-static const struct meson_g12a_data g12b_clkc_data = {
- .eeclkc_data = {
+static const struct g12a_clkc_data g12b_clkc_data = {
+ .clkc_data = {
.hw_clks = {
.hws = g12b_hw_clks,
.num = ARRAY_SIZE(g12b_hw_clks),
},
},
- .dvfs_setup = meson_g12b_dvfs_setup,
+ .dvfs_setup = g12b_dvfs_setup,
};
-static const struct meson_g12a_data sm1_clkc_data = {
- .eeclkc_data = {
+static const struct g12a_clkc_data sm1_clkc_data = {
+ .clkc_data = {
.hw_clks = {
.hws = sm1_hw_clks,
.num = ARRAY_SIZE(sm1_hw_clks),
},
},
- .dvfs_setup = meson_g12a_dvfs_setup,
+ .dvfs_setup = g12a_dvfs_setup,
};
-static const struct of_device_id clkc_match_table[] = {
+static const struct of_device_id g12a_clkc_match_table[] = {
{
.compatible = "amlogic,g12a-clkc",
- .data = &g12a_clkc_data.eeclkc_data
+ .data = &g12a_clkc_data.clkc_data
},
{
.compatible = "amlogic,g12b-clkc",
- .data = &g12b_clkc_data.eeclkc_data
+ .data = &g12b_clkc_data.clkc_data
},
{
.compatible = "amlogic,sm1-clkc",
- .data = &sm1_clkc_data.eeclkc_data
+ .data = &sm1_clkc_data.clkc_data
},
{}
};
-MODULE_DEVICE_TABLE(of, clkc_match_table);
+MODULE_DEVICE_TABLE(of, g12a_clkc_match_table);
-static struct platform_driver g12a_driver = {
- .probe = meson_g12a_probe,
+static struct platform_driver g12a_clkc_driver = {
+ .probe = g12a_clkc_probe,
.driver = {
.name = "g12a-clkc",
- .of_match_table = clkc_match_table,
+ .of_match_table = g12a_clkc_match_table,
},
};
-module_platform_driver(g12a_driver);
+module_platform_driver(g12a_clkc_driver);
MODULE_DESCRIPTION("Amlogic G12/SM1 Main Clock Controller driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/meson/gxbb-aoclk.c b/drivers/clk/meson/gxbb-aoclk.c
index f075fbd450f3..c7dfb3a06cb5 100644
--- a/drivers/clk/meson/gxbb-aoclk.c
+++ b/drivers/clk/meson/gxbb-aoclk.c
@@ -23,31 +23,20 @@
#define AO_RTC_ALT_CLK_CNTL0 0x94
#define AO_RTC_ALT_CLK_CNTL1 0x98
-#define GXBB_AO_GATE(_name, _bit) \
-static struct clk_regmap _name##_ao = { \
- .data = &(struct clk_regmap_gate_data) { \
- .offset = AO_RTI_GEN_CNTL_REG0, \
- .bit_idx = (_bit), \
- }, \
- .hw.init = &(struct clk_init_data) { \
- .name = #_name "_ao", \
- .ops = &clk_regmap_gate_ops, \
- .parent_data = &(const struct clk_parent_data) { \
- .fw_name = "mpeg-clk", \
- }, \
- .num_parents = 1, \
- .flags = CLK_IGNORE_UNUSED, \
- }, \
-}
+static const struct clk_parent_data gxbb_ao_pclk_parents = { .fw_name = "mpeg-clk" };
-GXBB_AO_GATE(remote, 0);
-GXBB_AO_GATE(i2c_master, 1);
-GXBB_AO_GATE(i2c_slave, 2);
-GXBB_AO_GATE(uart1, 3);
-GXBB_AO_GATE(uart2, 5);
-GXBB_AO_GATE(ir_blaster, 6);
+#define GXBB_AO_PCLK(_name, _bit, _flags) \
+ MESON_PCLK(gxbb_ao_##_name, AO_RTI_GEN_CNTL_REG0, _bit, \
+ &gxbb_ao_pclk_parents, _flags)
-static struct clk_regmap ao_cts_oscin = {
+static GXBB_AO_PCLK(remote, 0, CLK_IGNORE_UNUSED);
+static GXBB_AO_PCLK(i2c_master, 1, CLK_IGNORE_UNUSED);
+static GXBB_AO_PCLK(i2c_slave, 2, CLK_IGNORE_UNUSED);
+static GXBB_AO_PCLK(uart1, 3, CLK_IGNORE_UNUSED);
+static GXBB_AO_PCLK(uart2, 5, CLK_IGNORE_UNUSED);
+static GXBB_AO_PCLK(ir_blaster, 6, CLK_IGNORE_UNUSED);
+
+static struct clk_regmap gxbb_ao_cts_oscin = {
.data = &(struct clk_regmap_gate_data){
.offset = AO_RTI_PWR_CNTL_REG0,
.bit_idx = 6,
@@ -62,7 +51,7 @@ static struct clk_regmap ao_cts_oscin = {
},
};
-static struct clk_regmap ao_32k_pre = {
+static struct clk_regmap gxbb_ao_32k_pre = {
.data = &(struct clk_regmap_gate_data){
.offset = AO_RTC_ALT_CLK_CNTL0,
.bit_idx = 31,
@@ -70,7 +59,7 @@ static struct clk_regmap ao_32k_pre = {
.hw.init = &(struct clk_init_data){
.name = "ao_32k_pre",
.ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) { &ao_cts_oscin.hw },
+ .parent_hws = (const struct clk_hw *[]) { &gxbb_ao_cts_oscin.hw },
.num_parents = 1,
},
};
@@ -85,7 +74,7 @@ static const struct meson_clk_dualdiv_param gxbb_32k_div_table[] = {
}, {}
};
-static struct clk_regmap ao_32k_div = {
+static struct clk_regmap gxbb_ao_32k_div = {
.data = &(struct meson_clk_dualdiv_data){
.n1 = {
.reg_off = AO_RTC_ALT_CLK_CNTL0,
@@ -117,12 +106,12 @@ static struct clk_regmap ao_32k_div = {
.hw.init = &(struct clk_init_data){
.name = "ao_32k_div",
.ops = &meson_clk_dualdiv_ops,
- .parent_hws = (const struct clk_hw *[]) { &ao_32k_pre.hw },
+ .parent_hws = (const struct clk_hw *[]) { &gxbb_ao_32k_pre.hw },
.num_parents = 1,
},
};
-static struct clk_regmap ao_32k_sel = {
+static struct clk_regmap gxbb_ao_32k_sel = {
.data = &(struct clk_regmap_mux_data) {
.offset = AO_RTC_ALT_CLK_CNTL1,
.mask = 0x1,
@@ -133,15 +122,15 @@ static struct clk_regmap ao_32k_sel = {
.name = "ao_32k_sel",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &ao_32k_div.hw,
- &ao_32k_pre.hw
+ &gxbb_ao_32k_div.hw,
+ &gxbb_ao_32k_pre.hw
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap ao_32k = {
+static struct clk_regmap gxbb_ao_32k = {
.data = &(struct clk_regmap_gate_data){
.offset = AO_RTC_ALT_CLK_CNTL0,
.bit_idx = 30,
@@ -149,13 +138,13 @@ static struct clk_regmap ao_32k = {
.hw.init = &(struct clk_init_data){
.name = "ao_32k",
.ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) { &ao_32k_sel.hw },
+ .parent_hws = (const struct clk_hw *[]) { &gxbb_ao_32k_sel.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap ao_cts_rtc_oscin = {
+static struct clk_regmap gxbb_ao_cts_rtc_oscin = {
.data = &(struct clk_regmap_mux_data) {
.offset = AO_RTI_PWR_CNTL_REG0,
.mask = 0x7,
@@ -170,14 +159,14 @@ static struct clk_regmap ao_cts_rtc_oscin = {
{ .fw_name = "ext-32k-0", },
{ .fw_name = "ext-32k-1", },
{ .fw_name = "ext-32k-2", },
- { .hw = &ao_32k.hw },
+ { .hw = &gxbb_ao_32k.hw },
},
.num_parents = 4,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap ao_clk81 = {
+static struct clk_regmap gxbb_ao_clk81 = {
.data = &(struct clk_regmap_mux_data) {
.offset = AO_RTI_PWR_CNTL_REG0,
.mask = 0x1,
@@ -189,14 +178,14 @@ static struct clk_regmap ao_clk81 = {
.ops = &clk_regmap_mux_ro_ops,
.parent_data = (const struct clk_parent_data []) {
{ .fw_name = "mpeg-clk", },
- { .hw = &ao_cts_rtc_oscin.hw },
+ { .hw = &gxbb_ao_cts_rtc_oscin.hw },
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap ao_cts_cec = {
+static struct clk_regmap gxbb_ao_cts_cec = {
.data = &(struct clk_regmap_mux_data) {
.offset = AO_CRT_CLK_CNTL1,
.mask = 0x1,
@@ -221,14 +210,14 @@ static struct clk_regmap ao_cts_cec = {
*/
.parent_data = (const struct clk_parent_data []) {
{ .name = "fixme", .index = -1, },
- { .hw = &ao_cts_rtc_oscin.hw },
+ { .hw = &gxbb_ao_cts_rtc_oscin.hw },
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
-static const unsigned int gxbb_aoclk_reset[] = {
+static const unsigned int gxbb_ao_reset[] = {
[RESET_AO_REMOTE] = 16,
[RESET_AO_I2C_MASTER] = 18,
[RESET_AO_I2C_SLAVE] = 19,
@@ -237,50 +226,52 @@ static const unsigned int gxbb_aoclk_reset[] = {
[RESET_AO_IR_BLASTER] = 23,
};
-static struct clk_hw *gxbb_aoclk_hw_clks[] = {
- [CLKID_AO_REMOTE] = &remote_ao.hw,
- [CLKID_AO_I2C_MASTER] = &i2c_master_ao.hw,
- [CLKID_AO_I2C_SLAVE] = &i2c_slave_ao.hw,
- [CLKID_AO_UART1] = &uart1_ao.hw,
- [CLKID_AO_UART2] = &uart2_ao.hw,
- [CLKID_AO_IR_BLASTER] = &ir_blaster_ao.hw,
- [CLKID_AO_CEC_32K] = &ao_cts_cec.hw,
- [CLKID_AO_CTS_OSCIN] = &ao_cts_oscin.hw,
- [CLKID_AO_32K_PRE] = &ao_32k_pre.hw,
- [CLKID_AO_32K_DIV] = &ao_32k_div.hw,
- [CLKID_AO_32K_SEL] = &ao_32k_sel.hw,
- [CLKID_AO_32K] = &ao_32k.hw,
- [CLKID_AO_CTS_RTC_OSCIN] = &ao_cts_rtc_oscin.hw,
- [CLKID_AO_CLK81] = &ao_clk81.hw,
+static struct clk_hw *gxbb_ao_hw_clks[] = {
+ [CLKID_AO_REMOTE] = &gxbb_ao_remote.hw,
+ [CLKID_AO_I2C_MASTER] = &gxbb_ao_i2c_master.hw,
+ [CLKID_AO_I2C_SLAVE] = &gxbb_ao_i2c_slave.hw,
+ [CLKID_AO_UART1] = &gxbb_ao_uart1.hw,
+ [CLKID_AO_UART2] = &gxbb_ao_uart2.hw,
+ [CLKID_AO_IR_BLASTER] = &gxbb_ao_ir_blaster.hw,
+ [CLKID_AO_CEC_32K] = &gxbb_ao_cts_cec.hw,
+ [CLKID_AO_CTS_OSCIN] = &gxbb_ao_cts_oscin.hw,
+ [CLKID_AO_32K_PRE] = &gxbb_ao_32k_pre.hw,
+ [CLKID_AO_32K_DIV] = &gxbb_ao_32k_div.hw,
+ [CLKID_AO_32K_SEL] = &gxbb_ao_32k_sel.hw,
+ [CLKID_AO_32K] = &gxbb_ao_32k.hw,
+ [CLKID_AO_CTS_RTC_OSCIN] = &gxbb_ao_cts_rtc_oscin.hw,
+ [CLKID_AO_CLK81] = &gxbb_ao_clk81.hw,
};
-static const struct meson_aoclk_data gxbb_aoclkc_data = {
+static const struct meson_aoclk_data gxbb_ao_clkc_data = {
.reset_reg = AO_RTI_GEN_CNTL_REG0,
- .num_reset = ARRAY_SIZE(gxbb_aoclk_reset),
- .reset = gxbb_aoclk_reset,
- .hw_clks = {
- .hws = gxbb_aoclk_hw_clks,
- .num = ARRAY_SIZE(gxbb_aoclk_hw_clks),
+ .num_reset = ARRAY_SIZE(gxbb_ao_reset),
+ .reset = gxbb_ao_reset,
+ .clkc_data = {
+ .hw_clks = {
+ .hws = gxbb_ao_hw_clks,
+ .num = ARRAY_SIZE(gxbb_ao_hw_clks),
+ },
},
};
-static const struct of_device_id gxbb_aoclkc_match_table[] = {
+static const struct of_device_id gxbb_ao_clkc_match_table[] = {
{
.compatible = "amlogic,meson-gx-aoclkc",
- .data = &gxbb_aoclkc_data,
+ .data = &gxbb_ao_clkc_data.clkc_data,
},
{ }
};
-MODULE_DEVICE_TABLE(of, gxbb_aoclkc_match_table);
+MODULE_DEVICE_TABLE(of, gxbb_ao_clkc_match_table);
-static struct platform_driver gxbb_aoclkc_driver = {
+static struct platform_driver gxbb_ao_clkc_driver = {
.probe = meson_aoclkc_probe,
.driver = {
.name = "gxbb-aoclkc",
- .of_match_table = gxbb_aoclkc_match_table,
+ .of_match_table = gxbb_ao_clkc_match_table,
},
};
-module_platform_driver(gxbb_aoclkc_driver);
+module_platform_driver(gxbb_ao_clkc_driver);
MODULE_DESCRIPTION("Amlogic GXBB Always-ON Clock Controller driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c
index 362d1b87ea5b..5a229c4ffae1 100644
--- a/drivers/clk/meson/gxbb.c
+++ b/drivers/clk/meson/gxbb.c
@@ -13,7 +13,7 @@
#include "clk-regmap.h"
#include "clk-pll.h"
#include "clk-mpll.h"
-#include "meson-eeclk.h"
+#include "meson-clkc-utils.h"
#include "vid-pll-div.h"
#include <dt-bindings/clock/gxbb-clkc.h>
@@ -116,70 +116,6 @@
#define HHI_BT656_CLK_CNTL 0x3d4
#define HHI_SAR_CLK_CNTL 0x3d8
-static const struct pll_params_table gxbb_gp0_pll_params_table[] = {
- PLL_PARAMS(32, 1),
- PLL_PARAMS(33, 1),
- PLL_PARAMS(34, 1),
- PLL_PARAMS(35, 1),
- PLL_PARAMS(36, 1),
- PLL_PARAMS(37, 1),
- PLL_PARAMS(38, 1),
- PLL_PARAMS(39, 1),
- PLL_PARAMS(40, 1),
- PLL_PARAMS(41, 1),
- PLL_PARAMS(42, 1),
- PLL_PARAMS(43, 1),
- PLL_PARAMS(44, 1),
- PLL_PARAMS(45, 1),
- PLL_PARAMS(46, 1),
- PLL_PARAMS(47, 1),
- PLL_PARAMS(48, 1),
- PLL_PARAMS(49, 1),
- PLL_PARAMS(50, 1),
- PLL_PARAMS(51, 1),
- PLL_PARAMS(52, 1),
- PLL_PARAMS(53, 1),
- PLL_PARAMS(54, 1),
- PLL_PARAMS(55, 1),
- PLL_PARAMS(56, 1),
- PLL_PARAMS(57, 1),
- PLL_PARAMS(58, 1),
- PLL_PARAMS(59, 1),
- PLL_PARAMS(60, 1),
- PLL_PARAMS(61, 1),
- PLL_PARAMS(62, 1),
- { /* sentinel */ },
-};
-
-static const struct pll_params_table gxl_gp0_pll_params_table[] = {
- PLL_PARAMS(42, 1),
- PLL_PARAMS(43, 1),
- PLL_PARAMS(44, 1),
- PLL_PARAMS(45, 1),
- PLL_PARAMS(46, 1),
- PLL_PARAMS(47, 1),
- PLL_PARAMS(48, 1),
- PLL_PARAMS(49, 1),
- PLL_PARAMS(50, 1),
- PLL_PARAMS(51, 1),
- PLL_PARAMS(52, 1),
- PLL_PARAMS(53, 1),
- PLL_PARAMS(54, 1),
- PLL_PARAMS(55, 1),
- PLL_PARAMS(56, 1),
- PLL_PARAMS(57, 1),
- PLL_PARAMS(58, 1),
- PLL_PARAMS(59, 1),
- PLL_PARAMS(60, 1),
- PLL_PARAMS(61, 1),
- PLL_PARAMS(62, 1),
- PLL_PARAMS(63, 1),
- PLL_PARAMS(64, 1),
- PLL_PARAMS(65, 1),
- PLL_PARAMS(66, 1),
- { /* sentinel */ },
-};
-
static struct clk_regmap gxbb_fixed_pll_dco = {
.data = &(struct meson_clk_pll_data){
.en = {
@@ -523,7 +459,42 @@ static struct clk_regmap gxbb_sys_pll = {
},
};
-static const struct reg_sequence gxbb_gp0_init_regs[] = {
+static const struct pll_params_table gxbb_gp0_pll_params_table[] = {
+ PLL_PARAMS(32, 1),
+ PLL_PARAMS(33, 1),
+ PLL_PARAMS(34, 1),
+ PLL_PARAMS(35, 1),
+ PLL_PARAMS(36, 1),
+ PLL_PARAMS(37, 1),
+ PLL_PARAMS(38, 1),
+ PLL_PARAMS(39, 1),
+ PLL_PARAMS(40, 1),
+ PLL_PARAMS(41, 1),
+ PLL_PARAMS(42, 1),
+ PLL_PARAMS(43, 1),
+ PLL_PARAMS(44, 1),
+ PLL_PARAMS(45, 1),
+ PLL_PARAMS(46, 1),
+ PLL_PARAMS(47, 1),
+ PLL_PARAMS(48, 1),
+ PLL_PARAMS(49, 1),
+ PLL_PARAMS(50, 1),
+ PLL_PARAMS(51, 1),
+ PLL_PARAMS(52, 1),
+ PLL_PARAMS(53, 1),
+ PLL_PARAMS(54, 1),
+ PLL_PARAMS(55, 1),
+ PLL_PARAMS(56, 1),
+ PLL_PARAMS(57, 1),
+ PLL_PARAMS(58, 1),
+ PLL_PARAMS(59, 1),
+ PLL_PARAMS(60, 1),
+ PLL_PARAMS(61, 1),
+ PLL_PARAMS(62, 1),
+ { /* sentinel */ },
+};
+
+static const struct reg_sequence gxbb_gp0_pll_init_regs[] = {
{ .reg = HHI_GP0_PLL_CNTL2, .def = 0x69c80000 },
{ .reg = HHI_GP0_PLL_CNTL3, .def = 0x0a5590c4 },
{ .reg = HHI_GP0_PLL_CNTL4, .def = 0x0000500d },
@@ -557,8 +528,8 @@ static struct clk_regmap gxbb_gp0_pll_dco = {
.width = 1,
},
.table = gxbb_gp0_pll_params_table,
- .init_regs = gxbb_gp0_init_regs,
- .init_count = ARRAY_SIZE(gxbb_gp0_init_regs),
+ .init_regs = gxbb_gp0_pll_init_regs,
+ .init_count = ARRAY_SIZE(gxbb_gp0_pll_init_regs),
},
.hw.init = &(struct clk_init_data){
.name = "gp0_pll_dco",
@@ -570,7 +541,36 @@ static struct clk_regmap gxbb_gp0_pll_dco = {
},
};
-static const struct reg_sequence gxl_gp0_init_regs[] = {
+static const struct pll_params_table gxl_gp0_pll_params_table[] = {
+ PLL_PARAMS(42, 1),
+ PLL_PARAMS(43, 1),
+ PLL_PARAMS(44, 1),
+ PLL_PARAMS(45, 1),
+ PLL_PARAMS(46, 1),
+ PLL_PARAMS(47, 1),
+ PLL_PARAMS(48, 1),
+ PLL_PARAMS(49, 1),
+ PLL_PARAMS(50, 1),
+ PLL_PARAMS(51, 1),
+ PLL_PARAMS(52, 1),
+ PLL_PARAMS(53, 1),
+ PLL_PARAMS(54, 1),
+ PLL_PARAMS(55, 1),
+ PLL_PARAMS(56, 1),
+ PLL_PARAMS(57, 1),
+ PLL_PARAMS(58, 1),
+ PLL_PARAMS(59, 1),
+ PLL_PARAMS(60, 1),
+ PLL_PARAMS(61, 1),
+ PLL_PARAMS(62, 1),
+ PLL_PARAMS(63, 1),
+ PLL_PARAMS(64, 1),
+ PLL_PARAMS(65, 1),
+ PLL_PARAMS(66, 1),
+ { /* sentinel */ },
+};
+
+static const struct reg_sequence gxl_gp0_pll_init_regs[] = {
{ .reg = HHI_GP0_PLL_CNTL1, .def = 0xc084b000 },
{ .reg = HHI_GP0_PLL_CNTL2, .def = 0xb75020be },
{ .reg = HHI_GP0_PLL_CNTL3, .def = 0x0a59a288 },
@@ -611,8 +611,8 @@ static struct clk_regmap gxl_gp0_pll_dco = {
.width = 1,
},
.table = gxl_gp0_pll_params_table,
- .init_regs = gxl_gp0_init_regs,
- .init_count = ARRAY_SIZE(gxl_gp0_init_regs),
+ .init_regs = gxl_gp0_pll_init_regs,
+ .init_count = ARRAY_SIZE(gxl_gp0_pll_init_regs),
},
.hw.init = &(struct clk_init_data){
.name = "gp0_pll_dco",
@@ -972,8 +972,9 @@ static struct clk_regmap gxbb_mpll2 = {
},
};
-static u32 mux_table_clk81[] = { 0, 2, 3, 4, 5, 6, 7 };
-static const struct clk_parent_data clk81_parent_data[] = {
+/* clk81 is often referred as "mpeg_clk" */
+static u32 clk81_parents_val_table[] = { 0, 2, 3, 4, 5, 6, 7 };
+static const struct clk_parent_data clk81_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &gxbb_fclk_div7.hw },
{ .hw = &gxbb_mpll1.hw },
@@ -983,37 +984,37 @@ static const struct clk_parent_data clk81_parent_data[] = {
{ .hw = &gxbb_fclk_div5.hw },
};
-static struct clk_regmap gxbb_mpeg_clk_sel = {
+static struct clk_regmap gxbb_clk81_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_MPEG_CLK_CNTL,
.mask = 0x7,
.shift = 12,
- .table = mux_table_clk81,
+ .table = clk81_parents_val_table,
},
.hw.init = &(struct clk_init_data){
- .name = "mpeg_clk_sel",
+ .name = "clk81_sel",
.ops = &clk_regmap_mux_ro_ops,
/*
* bits 14:12 selects from 8 possible parents:
* xtal, 1'b0 (wtf), fclk_div7, mpll_clkout1, mpll_clkout2,
* fclk_div4, fclk_div3, fclk_div5
*/
- .parent_data = clk81_parent_data,
- .num_parents = ARRAY_SIZE(clk81_parent_data),
+ .parent_data = clk81_parents,
+ .num_parents = ARRAY_SIZE(clk81_parents),
},
};
-static struct clk_regmap gxbb_mpeg_clk_div = {
+static struct clk_regmap gxbb_clk81_div = {
.data = &(struct clk_regmap_div_data){
.offset = HHI_MPEG_CLK_CNTL,
.shift = 0,
.width = 7,
},
.hw.init = &(struct clk_init_data){
- .name = "mpeg_clk_div",
+ .name = "clk81_div",
.ops = &clk_regmap_divider_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &gxbb_mpeg_clk_sel.hw
+ &gxbb_clk81_sel.hw
},
.num_parents = 1,
},
@@ -1029,7 +1030,7 @@ static struct clk_regmap gxbb_clk81 = {
.name = "clk81",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &gxbb_mpeg_clk_div.hw
+ &gxbb_clk81_div.hw
},
.num_parents = 1,
.flags = CLK_IS_CRITICAL,
@@ -1094,7 +1095,7 @@ static struct clk_regmap gxbb_sar_adc_clk = {
* switches to the "inactive" one when CLK_SET_RATE_GATE is set.
*/
-static const struct clk_parent_data gxbb_mali_0_1_parent_data[] = {
+static const struct clk_parent_data gxbb_mali_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &gxbb_gp0_pll.hw },
{ .hw = &gxbb_mpll2.hw },
@@ -1114,8 +1115,8 @@ static struct clk_regmap gxbb_mali_0_sel = {
.hw.init = &(struct clk_init_data){
.name = "mali_0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = gxbb_mali_0_1_parent_data,
- .num_parents = 8,
+ .parent_data = gxbb_mali_parents,
+ .num_parents = ARRAY_SIZE(gxbb_mali_parents),
/*
* Don't request the parent to change the rate because
* all GPU frequencies can be derived from the fclk_*
@@ -1168,8 +1169,8 @@ static struct clk_regmap gxbb_mali_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "mali_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = gxbb_mali_0_1_parent_data,
- .num_parents = 8,
+ .parent_data = gxbb_mali_parents,
+ .num_parents = ARRAY_SIZE(gxbb_mali_parents),
/*
* Don't request the parent to change the rate because
* all GPU frequencies can be derived from the fclk_*
@@ -1213,11 +1214,6 @@ static struct clk_regmap gxbb_mali_1 = {
},
};
-static const struct clk_hw *gxbb_mali_parent_hws[] = {
- &gxbb_mali_0.hw,
- &gxbb_mali_1.hw,
-};
-
static struct clk_regmap gxbb_mali = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_MALI_CLK_CNTL,
@@ -1227,29 +1223,35 @@ static struct clk_regmap gxbb_mali = {
.hw.init = &(struct clk_init_data){
.name = "mali",
.ops = &clk_regmap_mux_ops,
- .parent_hws = gxbb_mali_parent_hws,
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_mali_0.hw,
+ &gxbb_mali_1.hw,
+ },
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
+static u32 gxbb_cts_mclk_parents_val_table[] = { 1, 2, 3 };
+static const struct clk_hw *gxbb_cts_mclk_parents[] = {
+ &gxbb_mpll0.hw,
+ &gxbb_mpll1.hw,
+ &gxbb_mpll2.hw,
+};
+
static struct clk_regmap gxbb_cts_amclk_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_AUD_CLK_CNTL,
.mask = 0x3,
.shift = 9,
- .table = (u32[]){ 1, 2, 3 },
+ .table = gxbb_cts_mclk_parents_val_table,
.flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
.name = "cts_amclk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &gxbb_mpll0.hw,
- &gxbb_mpll1.hw,
- &gxbb_mpll2.hw,
- },
- .num_parents = 3,
+ .parent_hws = gxbb_cts_mclk_parents,
+ .num_parents = ARRAY_SIZE(gxbb_cts_mclk_parents),
},
};
@@ -1292,18 +1294,14 @@ static struct clk_regmap gxbb_cts_mclk_i958_sel = {
.offset = HHI_AUD_CLK_CNTL2,
.mask = 0x3,
.shift = 25,
- .table = (u32[]){ 1, 2, 3 },
+ .table = gxbb_cts_mclk_parents_val_table,
.flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data) {
.name = "cts_mclk_i958_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &gxbb_mpll0.hw,
- &gxbb_mpll1.hw,
- &gxbb_mpll2.hw,
- },
- .num_parents = 3,
+ .parent_hws = gxbb_cts_mclk_parents,
+ .num_parents = ARRAY_SIZE(gxbb_cts_mclk_parents),
},
};
@@ -1368,7 +1366,7 @@ static struct clk_regmap gxbb_cts_i958 = {
* This clock does not exist yet in this controller or the AO one
*/
static u32 gxbb_32k_clk_parents_val_table[] = { 0, 2, 3 };
-static const struct clk_parent_data gxbb_32k_clk_parent_data[] = {
+static const struct clk_parent_data gxbb_32k_clk_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &gxbb_fclk_div3.hw },
{ .hw = &gxbb_fclk_div5.hw },
@@ -1380,11 +1378,11 @@ static struct clk_regmap gxbb_32k_clk_sel = {
.mask = 0x3,
.shift = 16,
.table = gxbb_32k_clk_parents_val_table,
- },
+ },
.hw.init = &(struct clk_init_data){
.name = "32k_clk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = gxbb_32k_clk_parent_data,
+ .parent_data = gxbb_32k_clk_parents,
.num_parents = 4,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1423,7 +1421,7 @@ static struct clk_regmap gxbb_32k_clk = {
},
};
-static const struct clk_parent_data gxbb_sd_emmc_clk0_parent_data[] = {
+static const struct clk_parent_data gxbb_sd_emmc_clk0_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &gxbb_fclk_div2.hw },
{ .hw = &gxbb_fclk_div3.hw },
@@ -1447,8 +1445,8 @@ static struct clk_regmap gxbb_sd_emmc_a_clk0_sel = {
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_a_clk0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = gxbb_sd_emmc_clk0_parent_data,
- .num_parents = ARRAY_SIZE(gxbb_sd_emmc_clk0_parent_data),
+ .parent_data = gxbb_sd_emmc_clk0_parents,
+ .num_parents = ARRAY_SIZE(gxbb_sd_emmc_clk0_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1497,8 +1495,8 @@ static struct clk_regmap gxbb_sd_emmc_b_clk0_sel = {
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_b_clk0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = gxbb_sd_emmc_clk0_parent_data,
- .num_parents = ARRAY_SIZE(gxbb_sd_emmc_clk0_parent_data),
+ .parent_data = gxbb_sd_emmc_clk0_parents,
+ .num_parents = ARRAY_SIZE(gxbb_sd_emmc_clk0_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1547,8 +1545,8 @@ static struct clk_regmap gxbb_sd_emmc_c_clk0_sel = {
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_c_clk0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = gxbb_sd_emmc_clk0_parent_data,
- .num_parents = ARRAY_SIZE(gxbb_sd_emmc_clk0_parent_data),
+ .parent_data = gxbb_sd_emmc_clk0_parents,
+ .num_parents = ARRAY_SIZE(gxbb_sd_emmc_clk0_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1589,7 +1587,7 @@ static struct clk_regmap gxbb_sd_emmc_c_clk0 = {
/* VPU Clock */
-static const struct clk_hw *gxbb_vpu_parent_hws[] = {
+static const struct clk_hw *gxbb_vpu_parents[] = {
&gxbb_fclk_div4.hw,
&gxbb_fclk_div3.hw,
&gxbb_fclk_div5.hw,
@@ -1609,8 +1607,8 @@ static struct clk_regmap gxbb_vpu_0_sel = {
* bits 9:10 selects from 4 possible parents:
* fclk_div4, fclk_div3, fclk_div5, fclk_div7,
*/
- .parent_hws = gxbb_vpu_parent_hws,
- .num_parents = ARRAY_SIZE(gxbb_vpu_parent_hws),
+ .parent_hws = gxbb_vpu_parents,
+ .num_parents = ARRAY_SIZE(gxbb_vpu_parents),
.flags = CLK_SET_RATE_NO_REPARENT,
},
};
@@ -1657,8 +1655,8 @@ static struct clk_regmap gxbb_vpu_1_sel = {
* bits 25:26 selects from 4 possible parents:
* fclk_div4, fclk_div3, fclk_div5, fclk_div7,
*/
- .parent_hws = gxbb_vpu_parent_hws,
- .num_parents = ARRAY_SIZE(gxbb_vpu_parent_hws),
+ .parent_hws = gxbb_vpu_parents,
+ .num_parents = ARRAY_SIZE(gxbb_vpu_parents),
.flags = CLK_SET_RATE_NO_REPARENT,
},
};
@@ -1716,7 +1714,7 @@ static struct clk_regmap gxbb_vpu = {
/* VAPB Clock */
-static const struct clk_hw *gxbb_vapb_parent_hws[] = {
+static const struct clk_hw *gxbb_vapb_parents[] = {
&gxbb_fclk_div4.hw,
&gxbb_fclk_div3.hw,
&gxbb_fclk_div5.hw,
@@ -1736,8 +1734,8 @@ static struct clk_regmap gxbb_vapb_0_sel = {
* bits 9:10 selects from 4 possible parents:
* fclk_div4, fclk_div3, fclk_div5, fclk_div7,
*/
- .parent_hws = gxbb_vapb_parent_hws,
- .num_parents = ARRAY_SIZE(gxbb_vapb_parent_hws),
+ .parent_hws = gxbb_vapb_parents,
+ .num_parents = ARRAY_SIZE(gxbb_vapb_parents),
.flags = CLK_SET_RATE_NO_REPARENT,
},
};
@@ -1788,8 +1786,8 @@ static struct clk_regmap gxbb_vapb_1_sel = {
* bits 25:26 selects from 4 possible parents:
* fclk_div4, fclk_div3, fclk_div5, fclk_div7,
*/
- .parent_hws = gxbb_vapb_parent_hws,
- .num_parents = ARRAY_SIZE(gxbb_vapb_parent_hws),
+ .parent_hws = gxbb_vapb_parents,
+ .num_parents = ARRAY_SIZE(gxbb_vapb_parents),
.flags = CLK_SET_RATE_NO_REPARENT,
},
};
@@ -1897,7 +1895,7 @@ static struct clk_regmap gxbb_vid_pll_div = {
},
};
-static const struct clk_parent_data gxbb_vid_pll_parent_data[] = {
+static const struct clk_parent_data gxbb_vid_pll_parents[] = {
{ .hw = &gxbb_vid_pll_div.hw },
/*
* Note:
@@ -1922,8 +1920,8 @@ static struct clk_regmap gxbb_vid_pll_sel = {
* bit 18 selects from 2 possible parents:
* vid_pll_div or hdmi_pll
*/
- .parent_data = gxbb_vid_pll_parent_data,
- .num_parents = ARRAY_SIZE(gxbb_vid_pll_parent_data),
+ .parent_data = gxbb_vid_pll_parents,
+ .num_parents = ARRAY_SIZE(gxbb_vid_pll_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -1944,7 +1942,7 @@ static struct clk_regmap gxbb_vid_pll = {
},
};
-static const struct clk_hw *gxbb_vclk_parent_hws[] = {
+static const struct clk_hw *gxbb_vclk_parents[] = {
&gxbb_vid_pll.hw,
&gxbb_fclk_div4.hw,
&gxbb_fclk_div3.hw,
@@ -1968,8 +1966,8 @@ static struct clk_regmap gxbb_vclk_sel = {
* vid_pll, fclk_div4, fclk_div3, fclk_div5,
* vid_pll, fclk_div7, mp1
*/
- .parent_hws = gxbb_vclk_parent_hws,
- .num_parents = ARRAY_SIZE(gxbb_vclk_parent_hws),
+ .parent_hws = gxbb_vclk_parents,
+ .num_parents = ARRAY_SIZE(gxbb_vclk_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -1988,8 +1986,8 @@ static struct clk_regmap gxbb_vclk2_sel = {
* vid_pll, fclk_div4, fclk_div3, fclk_div5,
* vid_pll, fclk_div7, mp1
*/
- .parent_hws = gxbb_vclk_parent_hws,
- .num_parents = ARRAY_SIZE(gxbb_vclk_parent_hws),
+ .parent_hws = gxbb_vclk_parents,
+ .num_parents = ARRAY_SIZE(gxbb_vclk_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -2328,8 +2326,8 @@ static struct clk_fixed_factor gxbb_vclk2_div12 = {
},
};
-static u32 mux_table_cts_sel[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
-static const struct clk_hw *gxbb_cts_parent_hws[] = {
+static u32 gxbb_cts_parents_val_table[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
+static const struct clk_hw *gxbb_cts_parents[] = {
&gxbb_vclk_div1.hw,
&gxbb_vclk_div2.hw,
&gxbb_vclk_div4.hw,
@@ -2347,13 +2345,13 @@ static struct clk_regmap gxbb_cts_enci_sel = {
.offset = HHI_VID_CLK_DIV,
.mask = 0xf,
.shift = 28,
- .table = mux_table_cts_sel,
+ .table = gxbb_cts_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cts_enci_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = gxbb_cts_parent_hws,
- .num_parents = ARRAY_SIZE(gxbb_cts_parent_hws),
+ .parent_hws = gxbb_cts_parents,
+ .num_parents = ARRAY_SIZE(gxbb_cts_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -2363,13 +2361,13 @@ static struct clk_regmap gxbb_cts_encp_sel = {
.offset = HHI_VID_CLK_DIV,
.mask = 0xf,
.shift = 20,
- .table = mux_table_cts_sel,
+ .table = gxbb_cts_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cts_encp_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = gxbb_cts_parent_hws,
- .num_parents = ARRAY_SIZE(gxbb_cts_parent_hws),
+ .parent_hws = gxbb_cts_parents,
+ .num_parents = ARRAY_SIZE(gxbb_cts_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -2379,50 +2377,13 @@ static struct clk_regmap gxbb_cts_vdac_sel = {
.offset = HHI_VIID_CLK_DIV,
.mask = 0xf,
.shift = 28,
- .table = mux_table_cts_sel,
+ .table = gxbb_cts_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cts_vdac_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = gxbb_cts_parent_hws,
- .num_parents = ARRAY_SIZE(gxbb_cts_parent_hws),
- .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
- },
-};
-
-/* TOFIX: add support for cts_tcon */
-static u32 mux_table_hdmi_tx_sel[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
-static const struct clk_hw *gxbb_cts_hdmi_tx_parent_hws[] = {
- &gxbb_vclk_div1.hw,
- &gxbb_vclk_div2.hw,
- &gxbb_vclk_div4.hw,
- &gxbb_vclk_div6.hw,
- &gxbb_vclk_div12.hw,
- &gxbb_vclk2_div1.hw,
- &gxbb_vclk2_div2.hw,
- &gxbb_vclk2_div4.hw,
- &gxbb_vclk2_div6.hw,
- &gxbb_vclk2_div12.hw,
-};
-
-static struct clk_regmap gxbb_hdmi_tx_sel = {
- .data = &(struct clk_regmap_mux_data){
- .offset = HHI_HDMI_CLK_CNTL,
- .mask = 0xf,
- .shift = 16,
- .table = mux_table_hdmi_tx_sel,
- },
- .hw.init = &(struct clk_init_data){
- .name = "hdmi_tx_sel",
- .ops = &clk_regmap_mux_ops,
- /*
- * bits 31:28 selects from 12 possible parents:
- * vclk_div1, vclk_div2, vclk_div4, vclk_div6, vclk_div12
- * vclk2_div1, vclk2_div2, vclk2_div4, vclk2_div6, vclk2_div12,
- * cts_tcon
- */
- .parent_hws = gxbb_cts_hdmi_tx_parent_hws,
- .num_parents = ARRAY_SIZE(gxbb_cts_hdmi_tx_parent_hws),
+ .parent_hws = gxbb_cts_parents,
+ .num_parents = ARRAY_SIZE(gxbb_cts_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -2475,6 +2436,43 @@ static struct clk_regmap gxbb_cts_vdac = {
},
};
+/* TOFIX: add support for cts_tcon */
+static u32 gxbb_hdmi_tx_parents_val_table[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
+static const struct clk_hw *gxbb_hdmi_tx_parents[] = {
+ &gxbb_vclk_div1.hw,
+ &gxbb_vclk_div2.hw,
+ &gxbb_vclk_div4.hw,
+ &gxbb_vclk_div6.hw,
+ &gxbb_vclk_div12.hw,
+ &gxbb_vclk2_div1.hw,
+ &gxbb_vclk2_div2.hw,
+ &gxbb_vclk2_div4.hw,
+ &gxbb_vclk2_div6.hw,
+ &gxbb_vclk2_div12.hw,
+};
+
+static struct clk_regmap gxbb_hdmi_tx_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_HDMI_CLK_CNTL,
+ .mask = 0xf,
+ .shift = 16,
+ .table = gxbb_hdmi_tx_parents_val_table,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "hdmi_tx_sel",
+ .ops = &clk_regmap_mux_ops,
+ /*
+ * bits 31:28 selects from 12 possible parents:
+ * vclk_div1, vclk_div2, vclk_div4, vclk_div6, vclk_div12
+ * vclk2_div1, vclk2_div2, vclk2_div4, vclk2_div6, vclk2_div12,
+ * cts_tcon
+ */
+ .parent_hws = gxbb_hdmi_tx_parents,
+ .num_parents = ARRAY_SIZE(gxbb_hdmi_tx_parents),
+ .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
+ },
+};
+
static struct clk_regmap gxbb_hdmi_tx = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_VID_CLK_CNTL2,
@@ -2493,7 +2491,7 @@ static struct clk_regmap gxbb_hdmi_tx = {
/* HDMI Clocks */
-static const struct clk_parent_data gxbb_hdmi_parent_data[] = {
+static const struct clk_parent_data gxbb_hdmi_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &gxbb_fclk_div4.hw },
{ .hw = &gxbb_fclk_div3.hw },
@@ -2510,8 +2508,8 @@ static struct clk_regmap gxbb_hdmi_sel = {
.hw.init = &(struct clk_init_data){
.name = "hdmi_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = gxbb_hdmi_parent_data,
- .num_parents = ARRAY_SIZE(gxbb_hdmi_parent_data),
+ .parent_data = gxbb_hdmi_parents,
+ .num_parents = ARRAY_SIZE(gxbb_hdmi_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -2547,7 +2545,7 @@ static struct clk_regmap gxbb_hdmi = {
/* VDEC clocks */
-static const struct clk_hw *gxbb_vdec_parent_hws[] = {
+static const struct clk_hw *gxbb_vdec_parents[] = {
&gxbb_fclk_div4.hw,
&gxbb_fclk_div3.hw,
&gxbb_fclk_div5.hw,
@@ -2564,8 +2562,8 @@ static struct clk_regmap gxbb_vdec_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "vdec_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = gxbb_vdec_parent_hws,
- .num_parents = ARRAY_SIZE(gxbb_vdec_parent_hws),
+ .parent_hws = gxbb_vdec_parents,
+ .num_parents = ARRAY_SIZE(gxbb_vdec_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2614,8 +2612,8 @@ static struct clk_regmap gxbb_vdec_hevc_sel = {
.hw.init = &(struct clk_init_data){
.name = "vdec_hevc_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = gxbb_vdec_parent_hws,
- .num_parents = ARRAY_SIZE(gxbb_vdec_parent_hws),
+ .parent_hws = gxbb_vdec_parents,
+ .num_parents = ARRAY_SIZE(gxbb_vdec_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2654,9 +2652,8 @@ static struct clk_regmap gxbb_vdec_hevc = {
},
};
-static u32 mux_table_gen_clk[] = { 0, 4, 5, 6, 7, 8,
- 9, 10, 11, 13, 14, };
-static const struct clk_parent_data gen_clk_parent_data[] = {
+static u32 gxbb_gen_clk_parents_val_table[] = { 0, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, };
+static const struct clk_parent_data gxbb_gen_clk_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &gxbb_vdec_1.hw },
{ .hw = &gxbb_vdec_hevc.hw },
@@ -2675,7 +2672,7 @@ static struct clk_regmap gxbb_gen_clk_sel = {
.offset = HHI_GEN_CLK_CNTL,
.mask = 0xf,
.shift = 12,
- .table = mux_table_gen_clk,
+ .table = gxbb_gen_clk_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "gen_clk_sel",
@@ -2686,8 +2683,8 @@ static struct clk_regmap gxbb_gen_clk_sel = {
* vid_pll, vid2_pll (hevc), mpll0, mpll1, mpll2, fdiv4,
* fdiv3, fdiv5, [cts_msr_clk], fdiv7, gp0_pll
*/
- .parent_data = gen_clk_parent_data,
- .num_parents = ARRAY_SIZE(gen_clk_parent_data),
+ .parent_data = gxbb_gen_clk_parents,
+ .num_parents = ARRAY_SIZE(gxbb_gen_clk_parents),
},
};
@@ -2724,100 +2721,118 @@ static struct clk_regmap gxbb_gen_clk = {
},
};
-#define MESON_GATE(_name, _reg, _bit) \
- MESON_PCLK(_name, _reg, _bit, &gxbb_clk81.hw)
-
-/* Everything Else (EE) domain gates */
-static MESON_GATE(gxbb_ddr, HHI_GCLK_MPEG0, 0);
-static MESON_GATE(gxbb_dos, HHI_GCLK_MPEG0, 1);
-static MESON_GATE(gxbb_isa, HHI_GCLK_MPEG0, 5);
-static MESON_GATE(gxbb_pl301, HHI_GCLK_MPEG0, 6);
-static MESON_GATE(gxbb_periphs, HHI_GCLK_MPEG0, 7);
-static MESON_GATE(gxbb_spicc, HHI_GCLK_MPEG0, 8);
-static MESON_GATE(gxbb_i2c, HHI_GCLK_MPEG0, 9);
-static MESON_GATE(gxbb_sana, HHI_GCLK_MPEG0, 10);
-static MESON_GATE(gxbb_smart_card, HHI_GCLK_MPEG0, 11);
-static MESON_GATE(gxbb_rng0, HHI_GCLK_MPEG0, 12);
-static MESON_GATE(gxbb_uart0, HHI_GCLK_MPEG0, 13);
-static MESON_GATE(gxbb_sdhc, HHI_GCLK_MPEG0, 14);
-static MESON_GATE(gxbb_stream, HHI_GCLK_MPEG0, 15);
-static MESON_GATE(gxbb_async_fifo, HHI_GCLK_MPEG0, 16);
-static MESON_GATE(gxbb_sdio, HHI_GCLK_MPEG0, 17);
-static MESON_GATE(gxbb_abuf, HHI_GCLK_MPEG0, 18);
-static MESON_GATE(gxbb_hiu_iface, HHI_GCLK_MPEG0, 19);
-static MESON_GATE(gxbb_assist_misc, HHI_GCLK_MPEG0, 23);
-static MESON_GATE(gxbb_emmc_a, HHI_GCLK_MPEG0, 24);
-static MESON_GATE(gxbb_emmc_b, HHI_GCLK_MPEG0, 25);
-static MESON_GATE(gxbb_emmc_c, HHI_GCLK_MPEG0, 26);
-static MESON_GATE(gxl_acodec, HHI_GCLK_MPEG0, 28);
-static MESON_GATE(gxbb_spi, HHI_GCLK_MPEG0, 30);
-
-static MESON_GATE(gxbb_i2s_spdif, HHI_GCLK_MPEG1, 2);
-static MESON_GATE(gxbb_eth, HHI_GCLK_MPEG1, 3);
-static MESON_GATE(gxbb_demux, HHI_GCLK_MPEG1, 4);
-static MESON_GATE(gxbb_blkmv, HHI_GCLK_MPEG1, 14);
-static MESON_GATE(gxbb_aiu, HHI_GCLK_MPEG1, 15);
-static MESON_GATE(gxbb_uart1, HHI_GCLK_MPEG1, 16);
-static MESON_GATE(gxbb_g2d, HHI_GCLK_MPEG1, 20);
-static MESON_GATE(gxbb_usb0, HHI_GCLK_MPEG1, 21);
-static MESON_GATE(gxbb_usb1, HHI_GCLK_MPEG1, 22);
-static MESON_GATE(gxbb_reset, HHI_GCLK_MPEG1, 23);
-static MESON_GATE(gxbb_nand, HHI_GCLK_MPEG1, 24);
-static MESON_GATE(gxbb_dos_parser, HHI_GCLK_MPEG1, 25);
-static MESON_GATE(gxbb_usb, HHI_GCLK_MPEG1, 26);
-static MESON_GATE(gxbb_vdin1, HHI_GCLK_MPEG1, 28);
-static MESON_GATE(gxbb_ahb_arb0, HHI_GCLK_MPEG1, 29);
-static MESON_GATE(gxbb_efuse, HHI_GCLK_MPEG1, 30);
-static MESON_GATE(gxbb_boot_rom, HHI_GCLK_MPEG1, 31);
-
-static MESON_GATE(gxbb_ahb_data_bus, HHI_GCLK_MPEG2, 1);
-static MESON_GATE(gxbb_ahb_ctrl_bus, HHI_GCLK_MPEG2, 2);
-static MESON_GATE(gxbb_hdmi_intr_sync, HHI_GCLK_MPEG2, 3);
-static MESON_GATE(gxbb_hdmi_pclk, HHI_GCLK_MPEG2, 4);
-static MESON_GATE(gxbb_usb1_ddr_bridge, HHI_GCLK_MPEG2, 8);
-static MESON_GATE(gxbb_usb0_ddr_bridge, HHI_GCLK_MPEG2, 9);
-static MESON_GATE(gxbb_mmc_pclk, HHI_GCLK_MPEG2, 11);
-static MESON_GATE(gxbb_dvin, HHI_GCLK_MPEG2, 12);
-static MESON_GATE(gxbb_uart2, HHI_GCLK_MPEG2, 15);
-static MESON_GATE(gxbb_sar_adc, HHI_GCLK_MPEG2, 22);
-static MESON_GATE(gxbb_vpu_intr, HHI_GCLK_MPEG2, 25);
-static MESON_GATE(gxbb_sec_ahb_ahb3_bridge, HHI_GCLK_MPEG2, 26);
-static MESON_GATE(gxbb_clk81_a53, HHI_GCLK_MPEG2, 29);
-
-static MESON_GATE(gxbb_vclk2_venci0, HHI_GCLK_OTHER, 1);
-static MESON_GATE(gxbb_vclk2_venci1, HHI_GCLK_OTHER, 2);
-static MESON_GATE(gxbb_vclk2_vencp0, HHI_GCLK_OTHER, 3);
-static MESON_GATE(gxbb_vclk2_vencp1, HHI_GCLK_OTHER, 4);
-static MESON_GATE(gxbb_gclk_venci_int0, HHI_GCLK_OTHER, 8);
-static MESON_GATE(gxbb_gclk_vencp_int, HHI_GCLK_OTHER, 9);
-static MESON_GATE(gxbb_dac_clk, HHI_GCLK_OTHER, 10);
-static MESON_GATE(gxbb_aoclk_gate, HHI_GCLK_OTHER, 14);
-static MESON_GATE(gxbb_iec958_gate, HHI_GCLK_OTHER, 16);
-static MESON_GATE(gxbb_enc480p, HHI_GCLK_OTHER, 20);
-static MESON_GATE(gxbb_rng1, HHI_GCLK_OTHER, 21);
-static MESON_GATE(gxbb_gclk_venci_int1, HHI_GCLK_OTHER, 22);
-static MESON_GATE(gxbb_vclk2_venclmcc, HHI_GCLK_OTHER, 24);
-static MESON_GATE(gxbb_vclk2_vencl, HHI_GCLK_OTHER, 25);
-static MESON_GATE(gxbb_vclk_other, HHI_GCLK_OTHER, 26);
-static MESON_GATE(gxbb_edp, HHI_GCLK_OTHER, 31);
+static const struct clk_parent_data gxbb_pclk_parents = { .hw = &gxbb_clk81.hw };
+
+#define GXBB_PCLK(_name, _reg, _bit, _flags) \
+ MESON_PCLK(_name, _reg, _bit, &gxbb_pclk_parents, _flags)
+
+/*
+ * Everything Else (EE) domain gates
+ *
+ * NOTE: The gates below are marked with CLK_IGNORE_UNUSED for historic reasons
+ * Users are encouraged to test without it and submit changes to:
+ * - remove the flag if not necessary
+ * - replace the flag with something more adequate, such as CLK_IS_CRITICAL,
+ * if appropriate.
+ * - add a comment explaining why the use of CLK_IGNORE_UNUSED is desirable
+ * for a particular clock.
+ */
+static GXBB_PCLK(gxbb_ddr, HHI_GCLK_MPEG0, 0, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_dos, HHI_GCLK_MPEG0, 1, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_isa, HHI_GCLK_MPEG0, 5, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_pl301, HHI_GCLK_MPEG0, 6, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_periphs, HHI_GCLK_MPEG0, 7, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_spicc, HHI_GCLK_MPEG0, 8, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_i2c, HHI_GCLK_MPEG0, 9, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_sana, HHI_GCLK_MPEG0, 10, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_smart_card, HHI_GCLK_MPEG0, 11, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_rng0, HHI_GCLK_MPEG0, 12, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_uart0, HHI_GCLK_MPEG0, 13, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_sdhc, HHI_GCLK_MPEG0, 14, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_stream, HHI_GCLK_MPEG0, 15, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_async_fifo, HHI_GCLK_MPEG0, 16, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_sdio, HHI_GCLK_MPEG0, 17, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_abuf, HHI_GCLK_MPEG0, 18, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_hiu_iface, HHI_GCLK_MPEG0, 19, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_assist_misc, HHI_GCLK_MPEG0, 23, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_emmc_a, HHI_GCLK_MPEG0, 24, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_emmc_b, HHI_GCLK_MPEG0, 25, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_emmc_c, HHI_GCLK_MPEG0, 26, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxl_acodec, HHI_GCLK_MPEG0, 28, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_spi, HHI_GCLK_MPEG0, 30, CLK_IGNORE_UNUSED);
+
+static GXBB_PCLK(gxbb_i2s_spdif, HHI_GCLK_MPEG1, 2, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_eth, HHI_GCLK_MPEG1, 3, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_demux, HHI_GCLK_MPEG1, 4, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_blkmv, HHI_GCLK_MPEG1, 14, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_aiu, HHI_GCLK_MPEG1, 15, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_uart1, HHI_GCLK_MPEG1, 16, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_g2d, HHI_GCLK_MPEG1, 20, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_usb0, HHI_GCLK_MPEG1, 21, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_usb1, HHI_GCLK_MPEG1, 22, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_reset, HHI_GCLK_MPEG1, 23, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_nand, HHI_GCLK_MPEG1, 24, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_dos_parser, HHI_GCLK_MPEG1, 25, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_usb, HHI_GCLK_MPEG1, 26, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_vdin1, HHI_GCLK_MPEG1, 28, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_ahb_arb0, HHI_GCLK_MPEG1, 29, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_efuse, HHI_GCLK_MPEG1, 30, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_boot_rom, HHI_GCLK_MPEG1, 31, CLK_IGNORE_UNUSED);
+
+static GXBB_PCLK(gxbb_ahb_data_bus, HHI_GCLK_MPEG2, 1, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_ahb_ctrl_bus, HHI_GCLK_MPEG2, 2, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_hdmi_intr_sync, HHI_GCLK_MPEG2, 3, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_hdmi_pclk, HHI_GCLK_MPEG2, 4, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_usb1_ddr_bridge, HHI_GCLK_MPEG2, 8, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_usb0_ddr_bridge, HHI_GCLK_MPEG2, 9, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_mmc_pclk, HHI_GCLK_MPEG2, 11, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_dvin, HHI_GCLK_MPEG2, 12, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_uart2, HHI_GCLK_MPEG2, 15, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_sar_adc, HHI_GCLK_MPEG2, 22, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_vpu_intr, HHI_GCLK_MPEG2, 25, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_sec_ahb_ahb3_bridge, HHI_GCLK_MPEG2, 26, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_clk81_a53, HHI_GCLK_MPEG2, 29, CLK_IGNORE_UNUSED);
+
+static GXBB_PCLK(gxbb_vclk2_venci0, HHI_GCLK_OTHER, 1, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_vclk2_venci1, HHI_GCLK_OTHER, 2, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_vclk2_vencp0, HHI_GCLK_OTHER, 3, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_vclk2_vencp1, HHI_GCLK_OTHER, 4, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_gclk_venci_int0, HHI_GCLK_OTHER, 8, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_gclk_vencp_int, HHI_GCLK_OTHER, 9, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_dac_clk, HHI_GCLK_OTHER, 10, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_aoclk_gate, HHI_GCLK_OTHER, 14, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_iec958_gate, HHI_GCLK_OTHER, 16, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_enc480p, HHI_GCLK_OTHER, 20, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_rng1, HHI_GCLK_OTHER, 21, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_gclk_venci_int1, HHI_GCLK_OTHER, 22, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_vclk2_venclmcc, HHI_GCLK_OTHER, 24, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_vclk2_vencl, HHI_GCLK_OTHER, 25, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_vclk_other, HHI_GCLK_OTHER, 26, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_edp, HHI_GCLK_OTHER, 31, CLK_IGNORE_UNUSED);
/* Always On (AO) domain gates */
-static MESON_GATE(gxbb_ao_media_cpu, HHI_GCLK_AO, 0);
-static MESON_GATE(gxbb_ao_ahb_sram, HHI_GCLK_AO, 1);
-static MESON_GATE(gxbb_ao_ahb_bus, HHI_GCLK_AO, 2);
-static MESON_GATE(gxbb_ao_iface, HHI_GCLK_AO, 3);
-static MESON_GATE(gxbb_ao_i2c, HHI_GCLK_AO, 4);
+static GXBB_PCLK(gxbb_ao_media_cpu, HHI_GCLK_AO, 0, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_ao_ahb_sram, HHI_GCLK_AO, 1, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_ao_ahb_bus, HHI_GCLK_AO, 2, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_ao_iface, HHI_GCLK_AO, 3, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_ao_i2c, HHI_GCLK_AO, 4, CLK_IGNORE_UNUSED);
/* AIU gates */
-static MESON_PCLK(gxbb_aiu_glue, HHI_GCLK_MPEG1, 6, &gxbb_aiu.hw);
-static MESON_PCLK(gxbb_iec958, HHI_GCLK_MPEG1, 7, &gxbb_aiu_glue.hw);
-static MESON_PCLK(gxbb_i2s_out, HHI_GCLK_MPEG1, 8, &gxbb_aiu_glue.hw);
-static MESON_PCLK(gxbb_amclk, HHI_GCLK_MPEG1, 9, &gxbb_aiu_glue.hw);
-static MESON_PCLK(gxbb_aififo2, HHI_GCLK_MPEG1, 10, &gxbb_aiu_glue.hw);
-static MESON_PCLK(gxbb_mixer, HHI_GCLK_MPEG1, 11, &gxbb_aiu_glue.hw);
-static MESON_PCLK(gxbb_mixer_iface, HHI_GCLK_MPEG1, 12, &gxbb_aiu_glue.hw);
-static MESON_PCLK(gxbb_adc, HHI_GCLK_MPEG1, 13, &gxbb_aiu_glue.hw);
+static const struct clk_parent_data gxbb_aiu_glue_parents = { .hw = &gxbb_aiu.hw };
+static MESON_PCLK(gxbb_aiu_glue, HHI_GCLK_MPEG1, 6, &gxbb_aiu_glue_parents, CLK_IGNORE_UNUSED);
+
+static const struct clk_parent_data gxbb_aiu_pclk_parents = { .hw = &gxbb_aiu_glue.hw };
+#define GXBB_AIU_PCLK(_name, _bit, _flags) \
+ MESON_PCLK(_name, HHI_GCLK_MPEG1, _bit, &gxbb_aiu_pclk_parents, _flags)
+
+static GXBB_AIU_PCLK(gxbb_iec958, 7, CLK_IGNORE_UNUSED);
+static GXBB_AIU_PCLK(gxbb_i2s_out, 8, CLK_IGNORE_UNUSED);
+static GXBB_AIU_PCLK(gxbb_amclk, 9, CLK_IGNORE_UNUSED);
+static GXBB_AIU_PCLK(gxbb_aififo2, 10, CLK_IGNORE_UNUSED);
+static GXBB_AIU_PCLK(gxbb_mixer, 11, CLK_IGNORE_UNUSED);
+static GXBB_AIU_PCLK(gxbb_mixer_iface, 12, CLK_IGNORE_UNUSED);
+static GXBB_AIU_PCLK(gxbb_adc, 13, CLK_IGNORE_UNUSED);
/* Array of all clocks provided by this provider */
@@ -2831,8 +2846,8 @@ static struct clk_hw *gxbb_hw_clks[] = {
[CLKID_FCLK_DIV5] = &gxbb_fclk_div5.hw,
[CLKID_FCLK_DIV7] = &gxbb_fclk_div7.hw,
[CLKID_GP0_PLL] = &gxbb_gp0_pll.hw,
- [CLKID_MPEG_SEL] = &gxbb_mpeg_clk_sel.hw,
- [CLKID_MPEG_DIV] = &gxbb_mpeg_clk_div.hw,
+ [CLKID_MPEG_SEL] = &gxbb_clk81_sel.hw,
+ [CLKID_MPEG_DIV] = &gxbb_clk81_div.hw,
[CLKID_CLK81] = &gxbb_clk81.hw,
[CLKID_MPLL0] = &gxbb_mpll0.hw,
[CLKID_MPLL1] = &gxbb_mpll1.hw,
@@ -3039,8 +3054,8 @@ static struct clk_hw *gxl_hw_clks[] = {
[CLKID_FCLK_DIV5] = &gxbb_fclk_div5.hw,
[CLKID_FCLK_DIV7] = &gxbb_fclk_div7.hw,
[CLKID_GP0_PLL] = &gxbb_gp0_pll.hw,
- [CLKID_MPEG_SEL] = &gxbb_mpeg_clk_sel.hw,
- [CLKID_MPEG_DIV] = &gxbb_mpeg_clk_div.hw,
+ [CLKID_MPEG_SEL] = &gxbb_clk81_sel.hw,
+ [CLKID_MPEG_DIV] = &gxbb_clk81_div.hw,
[CLKID_CLK81] = &gxbb_clk81.hw,
[CLKID_MPLL0] = &gxbb_mpll0.hw,
[CLKID_MPLL1] = &gxbb_mpll1.hw,
@@ -3237,35 +3252,35 @@ static struct clk_hw *gxl_hw_clks[] = {
[CLKID_ACODEC] = &gxl_acodec.hw,
};
-static const struct meson_eeclkc_data gxbb_clkc_data = {
+static const struct meson_clkc_data gxbb_clkc_data = {
.hw_clks = {
.hws = gxbb_hw_clks,
.num = ARRAY_SIZE(gxbb_hw_clks),
},
};
-static const struct meson_eeclkc_data gxl_clkc_data = {
+static const struct meson_clkc_data gxl_clkc_data = {
.hw_clks = {
.hws = gxl_hw_clks,
.num = ARRAY_SIZE(gxl_hw_clks),
},
};
-static const struct of_device_id clkc_match_table[] = {
+static const struct of_device_id gxbb_clkc_match_table[] = {
{ .compatible = "amlogic,gxbb-clkc", .data = &gxbb_clkc_data },
{ .compatible = "amlogic,gxl-clkc", .data = &gxl_clkc_data },
{},
};
-MODULE_DEVICE_TABLE(of, clkc_match_table);
+MODULE_DEVICE_TABLE(of, gxbb_clkc_match_table);
-static struct platform_driver gxbb_driver = {
- .probe = meson_eeclkc_probe,
+static struct platform_driver gxbb_clkc_driver = {
+ .probe = meson_clkc_syscon_probe,
.driver = {
.name = "gxbb-clkc",
- .of_match_table = clkc_match_table,
+ .of_match_table = gxbb_clkc_match_table,
},
};
-module_platform_driver(gxbb_driver);
+module_platform_driver(gxbb_clkc_driver);
MODULE_DESCRIPTION("Amlogic GXBB Main Clock Controller driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/meson/meson-aoclk.c b/drivers/clk/meson/meson-aoclk.c
index 894c02fda072..8f6bdea18119 100644
--- a/drivers/clk/meson/meson-aoclk.c
+++ b/drivers/clk/meson/meson-aoclk.c
@@ -37,15 +37,23 @@ static const struct reset_control_ops meson_aoclk_reset_ops = {
int meson_aoclkc_probe(struct platform_device *pdev)
{
struct meson_aoclk_reset_controller *rstc;
- struct meson_aoclk_data *data;
+ const struct meson_clkc_data *clkc_data;
+ const struct meson_aoclk_data *data;
struct device *dev = &pdev->dev;
struct device_node *np;
struct regmap *regmap;
- int ret, clkid;
+ int ret;
- data = (struct meson_aoclk_data *) of_device_get_match_data(dev);
- if (!data)
- return -ENODEV;
+ clkc_data = of_device_get_match_data(dev);
+ if (!clkc_data)
+ return -EINVAL;
+
+ ret = meson_clkc_syscon_probe(pdev);
+ if (ret)
+ return ret;
+
+ data = container_of(clkc_data, struct meson_aoclk_data,
+ clkc_data);
rstc = devm_kzalloc(dev, sizeof(*rstc), GFP_KERNEL);
if (!rstc)
@@ -71,19 +79,7 @@ int meson_aoclkc_probe(struct platform_device *pdev)
return ret;
}
- /* Register all clks */
- for (clkid = 0; clkid < data->hw_clks.num; clkid++) {
- if (!data->hw_clks.hws[clkid])
- continue;
-
- ret = devm_clk_hw_register(dev, data->hw_clks.hws[clkid]);
- if (ret) {
- dev_err(dev, "Clock registration failed\n");
- return ret;
- }
- }
-
- return devm_of_clk_add_hw_provider(dev, meson_clk_hw_get, (void *)&data->hw_clks);
+ return 0;
}
EXPORT_SYMBOL_NS_GPL(meson_aoclkc_probe, "CLK_MESON");
diff --git a/drivers/clk/meson/meson-aoclk.h b/drivers/clk/meson/meson-aoclk.h
index ea5fc61308af..2c83e73d3a77 100644
--- a/drivers/clk/meson/meson-aoclk.h
+++ b/drivers/clk/meson/meson-aoclk.h
@@ -20,10 +20,10 @@
#include "meson-clkc-utils.h"
struct meson_aoclk_data {
+ const struct meson_clkc_data clkc_data;
const unsigned int reset_reg;
const int num_reset;
const unsigned int *reset;
- struct meson_clk_hw_data hw_clks;
};
struct meson_aoclk_reset_controller {
diff --git a/drivers/clk/meson/meson-clkc-utils.c b/drivers/clk/meson/meson-clkc-utils.c
index 6937d1482719..870f50548e26 100644
--- a/drivers/clk/meson/meson-clkc-utils.c
+++ b/drivers/clk/meson/meson-clkc-utils.c
@@ -3,9 +3,13 @@
* Copyright (c) 2023 Neil Armstrong <neil.armstrong@linaro.org>
*/
-#include <linux/of_device.h>
#include <linux/clk-provider.h>
+#include <linux/mfd/syscon.h>
#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
#include "meson-clkc-utils.h"
struct clk_hw *meson_clk_hw_get(struct of_phandle_args *clkspec, void *clk_hw_data)
@@ -22,6 +26,86 @@ struct clk_hw *meson_clk_hw_get(struct of_phandle_args *clkspec, void *clk_hw_da
}
EXPORT_SYMBOL_NS_GPL(meson_clk_hw_get, "CLK_MESON");
+static int meson_clkc_init(struct device *dev, struct regmap *map)
+{
+ const struct meson_clkc_data *data;
+ struct clk_hw *hw;
+ int ret, i;
+
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -EINVAL;
+
+ if (data->init_count)
+ regmap_multi_reg_write(map, data->init_regs, data->init_count);
+
+ for (i = 0; i < data->hw_clks.num; i++) {
+ hw = data->hw_clks.hws[i];
+
+ /* array might be sparse */
+ if (!hw)
+ continue;
+
+ ret = devm_clk_hw_register(dev, hw);
+ if (ret) {
+ dev_err(dev, "registering %s clock failed\n",
+ hw->init->name);
+ return ret;
+ }
+ }
+
+ return devm_of_clk_add_hw_provider(dev, meson_clk_hw_get, (void *)&data->hw_clks);
+}
+
+int meson_clkc_syscon_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np;
+ struct regmap *map;
+
+ np = of_get_parent(dev->of_node);
+ map = syscon_node_to_regmap(np);
+ of_node_put(np);
+ if (IS_ERR(map)) {
+ dev_err(dev, "failed to get parent syscon regmap\n");
+ return PTR_ERR(map);
+ }
+
+ return meson_clkc_init(dev, map);
+}
+EXPORT_SYMBOL_NS_GPL(meson_clkc_syscon_probe, "CLK_MESON");
+
+int meson_clkc_mmio_probe(struct platform_device *pdev)
+{
+ const struct meson_clkc_data *data;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ void __iomem *base;
+ struct regmap *map;
+ struct regmap_config regmap_cfg = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ };
+
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -EINVAL;
+
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ regmap_cfg.max_register = resource_size(res) - regmap_cfg.reg_stride;
+
+ map = devm_regmap_init_mmio(dev, base, &regmap_cfg);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+
+ return meson_clkc_init(dev, map);
+}
+EXPORT_SYMBOL_NS_GPL(meson_clkc_mmio_probe, "CLK_MESON");
+
MODULE_DESCRIPTION("Amlogic Clock Controller Utilities");
MODULE_LICENSE("GPL");
MODULE_IMPORT_NS("CLK_MESON");
diff --git a/drivers/clk/meson/meson-clkc-utils.h b/drivers/clk/meson/meson-clkc-utils.h
index fe6f40728949..ddadf14b4923 100644
--- a/drivers/clk/meson/meson-clkc-utils.h
+++ b/drivers/clk/meson/meson-clkc-utils.h
@@ -9,6 +9,8 @@
#include <linux/of_device.h>
#include <linux/clk-provider.h>
+struct platform_device;
+
struct meson_clk_hw_data {
struct clk_hw **hws;
unsigned int num;
@@ -16,4 +18,91 @@ struct meson_clk_hw_data {
struct clk_hw *meson_clk_hw_get(struct of_phandle_args *clkspec, void *clk_hw_data);
+struct meson_clkc_data {
+ const struct reg_sequence *init_regs;
+ unsigned int init_count;
+ struct meson_clk_hw_data hw_clks;
+};
+
+int meson_clkc_syscon_probe(struct platform_device *pdev);
+int meson_clkc_mmio_probe(struct platform_device *pdev);
+
+#define __MESON_PCLK(_name, _reg, _bit, _ops, _pdata, _flags) \
+struct clk_regmap _name = { \
+ .data = &(struct clk_regmap_gate_data) { \
+ .offset = (_reg), \
+ .bit_idx = (_bit), \
+ }, \
+ .hw.init = &(struct clk_init_data) { \
+ .name = #_name, \
+ .ops = _ops, \
+ .parent_data = (_pdata), \
+ .num_parents = 1, \
+ .flags = (_flags), \
+ }, \
+}
+
+#define MESON_PCLK(_name, _reg, _bit, _pdata, _flags) \
+ __MESON_PCLK(_name, _reg, _bit, &clk_regmap_gate_ops, _pdata, _flags)
+
+#define MESON_PCLK_RO(_name, _reg, _bit, _pdata, _flags) \
+ __MESON_PCLK(_name, _reg, _bit, &clk_regmap_gate_ro_ops, _pdata, _flags)
+
+/* Helpers for the usual sel/div/gate composite clocks */
+#define MESON_COMP_SEL(_prefix, _name, _reg, _shift, _mask, _pdata, \
+ _table, _dflags, _iflags) \
+struct clk_regmap _prefix##_name##_sel = { \
+ .data = &(struct clk_regmap_mux_data) { \
+ .offset = (_reg), \
+ .mask = (_mask), \
+ .shift = (_shift), \
+ .flags = (_dflags), \
+ .table = (_table), \
+ }, \
+ .hw.init = &(struct clk_init_data){ \
+ .name = #_name "_sel", \
+ .ops = &clk_regmap_mux_ops, \
+ .parent_data = _pdata, \
+ .num_parents = ARRAY_SIZE(_pdata), \
+ .flags = (_iflags), \
+ }, \
+}
+
+#define MESON_COMP_DIV(_prefix, _name, _reg, _shift, _width, \
+ _dflags, _iflags) \
+struct clk_regmap _prefix##_name##_div = { \
+ .data = &(struct clk_regmap_div_data) { \
+ .offset = (_reg), \
+ .shift = (_shift), \
+ .width = (_width), \
+ .flags = (_dflags), \
+ }, \
+ .hw.init = &(struct clk_init_data) { \
+ .name = #_name "_div", \
+ .ops = &clk_regmap_divider_ops, \
+ .parent_hws = (const struct clk_hw *[]) { \
+ &_prefix##_name##_sel.hw \
+ }, \
+ .num_parents = 1, \
+ .flags = (_iflags), \
+ }, \
+}
+
+#define MESON_COMP_GATE(_prefix, _name, _reg, _bit, _iflags) \
+struct clk_regmap _prefix##_name = { \
+ .data = &(struct clk_regmap_gate_data) { \
+ .offset = (_reg), \
+ .bit_idx = (_bit), \
+ }, \
+ .hw.init = &(struct clk_init_data) { \
+ .name = #_name, \
+ .ops = &clk_regmap_gate_ops, \
+ .parent_hws = (const struct clk_hw *[]) { \
+ &_prefix##_name##_div.hw \
+ }, \
+ .num_parents = 1, \
+ .flags = (_iflags), \
+ }, \
+}
+
#endif
diff --git a/drivers/clk/meson/meson-eeclk.c b/drivers/clk/meson/meson-eeclk.c
deleted file mode 100644
index 6236bf970d79..000000000000
--- a/drivers/clk/meson/meson-eeclk.c
+++ /dev/null
@@ -1,60 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (c) 2019 BayLibre, SAS.
- * Author: Jerome Brunet <jbrunet@baylibre.com>
- */
-
-#include <linux/clk-provider.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/mfd/syscon.h>
-#include <linux/regmap.h>
-#include <linux/module.h>
-
-#include "clk-regmap.h"
-#include "meson-eeclk.h"
-
-int meson_eeclkc_probe(struct platform_device *pdev)
-{
- const struct meson_eeclkc_data *data;
- struct device *dev = &pdev->dev;
- struct device_node *np;
- struct regmap *map;
- int ret, i;
-
- data = of_device_get_match_data(dev);
- if (!data)
- return -EINVAL;
-
- /* Get the hhi system controller node */
- np = of_get_parent(dev->of_node);
- map = syscon_node_to_regmap(np);
- of_node_put(np);
- if (IS_ERR(map)) {
- dev_err(dev,
- "failed to get HHI regmap\n");
- return PTR_ERR(map);
- }
-
- if (data->init_count)
- regmap_multi_reg_write(map, data->init_regs, data->init_count);
-
- for (i = 0; i < data->hw_clks.num; i++) {
- /* array might be sparse */
- if (!data->hw_clks.hws[i])
- continue;
-
- ret = devm_clk_hw_register(dev, data->hw_clks.hws[i]);
- if (ret) {
- dev_err(dev, "Clock registration failed\n");
- return ret;
- }
- }
-
- return devm_of_clk_add_hw_provider(dev, meson_clk_hw_get, (void *)&data->hw_clks);
-}
-EXPORT_SYMBOL_NS_GPL(meson_eeclkc_probe, "CLK_MESON");
-
-MODULE_DESCRIPTION("Amlogic Main Clock Controller Helpers");
-MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS("CLK_MESON");
diff --git a/drivers/clk/meson/meson-eeclk.h b/drivers/clk/meson/meson-eeclk.h
deleted file mode 100644
index 6a81d67b46b2..000000000000
--- a/drivers/clk/meson/meson-eeclk.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (c) 2019 BayLibre, SAS.
- * Author: Jerome Brunet <jbrunet@baylibre.com>
- */
-
-#ifndef __MESON_CLKC_H
-#define __MESON_CLKC_H
-
-#include <linux/clk-provider.h>
-#include "clk-regmap.h"
-#include "meson-clkc-utils.h"
-
-struct platform_device;
-
-struct meson_eeclkc_data {
- const struct reg_sequence *init_regs;
- unsigned int init_count;
- struct meson_clk_hw_data hw_clks;
-};
-
-int meson_eeclkc_probe(struct platform_device *pdev);
-
-#endif /* __MESON_CLKC_H */
diff --git a/drivers/clk/meson/meson8-ddr.c b/drivers/clk/meson/meson8-ddr.c
index 1975fc3987e2..0f93774f7371 100644
--- a/drivers/clk/meson/meson8-ddr.c
+++ b/drivers/clk/meson/meson8-ddr.c
@@ -12,6 +12,7 @@
#include "clk-regmap.h"
#include "clk-pll.h"
+#include "meson-clkc-utils.h"
#define AM_DDR_PLL_CNTL 0x00
#define AM_DDR_PLL_CNTL1 0x04
@@ -77,60 +78,31 @@ static struct clk_regmap meson8_ddr_pll = {
},
};
-static struct clk_hw_onecell_data meson8_ddr_clk_hw_onecell_data = {
- .hws = {
- [DDR_CLKID_DDR_PLL_DCO] = &meson8_ddr_pll_dco.hw,
- [DDR_CLKID_DDR_PLL] = &meson8_ddr_pll.hw,
- },
- .num = 2,
+static struct clk_hw *meson8_ddr_hw_clks[] = {
+ [DDR_CLKID_DDR_PLL_DCO] = &meson8_ddr_pll_dco.hw,
+ [DDR_CLKID_DDR_PLL] = &meson8_ddr_pll.hw,
};
-static const struct regmap_config meson8_ddr_clkc_regmap_config = {
- .reg_bits = 8,
- .val_bits = 32,
- .reg_stride = 4,
- .max_register = DDR_CLK_STS,
+static const struct meson_clkc_data meson8_ddr_clkc_data = {
+ .hw_clks = {
+ .hws = meson8_ddr_hw_clks,
+ .num = ARRAY_SIZE(meson8_ddr_hw_clks),
+ },
};
-static int meson8_ddr_clkc_probe(struct platform_device *pdev)
-{
- struct regmap *regmap;
- void __iomem *base;
- struct clk_hw *hw;
- int ret, i;
-
- base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(base))
- return PTR_ERR(base);
-
- regmap = devm_regmap_init_mmio(&pdev->dev, base,
- &meson8_ddr_clkc_regmap_config);
- if (IS_ERR(regmap))
- return PTR_ERR(regmap);
-
- /* Register all clks */
- for (i = 0; i < meson8_ddr_clk_hw_onecell_data.num; i++) {
- hw = meson8_ddr_clk_hw_onecell_data.hws[i];
-
- ret = devm_clk_hw_register(&pdev->dev, hw);
- if (ret) {
- dev_err(&pdev->dev, "Clock registration failed\n");
- return ret;
- }
- }
-
- return devm_of_clk_add_hw_provider(&pdev->dev, of_clk_hw_onecell_get,
- &meson8_ddr_clk_hw_onecell_data);
-}
-
static const struct of_device_id meson8_ddr_clkc_match_table[] = {
- { .compatible = "amlogic,meson8-ddr-clkc" },
- { .compatible = "amlogic,meson8b-ddr-clkc" },
+ {
+ .compatible = "amlogic,meson8-ddr-clkc",
+ .data = &meson8_ddr_clkc_data,
+ }, {
+ .compatible = "amlogic,meson8b-ddr-clkc",
+ .data = &meson8_ddr_clkc_data,
+ },
{ /* sentinel */ }
};
static struct platform_driver meson8_ddr_clkc_driver = {
- .probe = meson8_ddr_clkc_probe,
+ .probe = meson_clkc_mmio_probe,
.driver = {
.name = "meson8-ddr-clkc",
.of_match_table = meson8_ddr_clkc_match_table,
diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c
index 206538326614..95d0b9cbd904 100644
--- a/drivers/clk/meson/meson8b.c
+++ b/drivers/clk/meson/meson8b.c
@@ -214,7 +214,7 @@ static const struct reg_sequence meson8b_hdmi_pll_init_regs[] = {
{ .reg = HHI_VID2_PLL_CNTL2, .def = 0x0430a800 },
};
-static const struct pll_params_table hdmi_pll_params_table[] = {
+static const struct pll_params_table meson8b_hdmi_pll_params_table[] = {
PLL_PARAMS(40, 1),
PLL_PARAMS(42, 1),
PLL_PARAMS(44, 1),
@@ -267,7 +267,7 @@ static struct clk_regmap meson8b_hdmi_pll_dco = {
.shift = 29,
.width = 1,
},
- .table = hdmi_pll_params_table,
+ .table = meson8b_hdmi_pll_params_table,
.init_regs = meson8b_hdmi_pll_init_regs,
.init_count = ARRAY_SIZE(meson8b_hdmi_pll_init_regs),
},
@@ -670,16 +670,17 @@ static struct clk_regmap meson8b_mpll2 = {
},
};
-static u32 mux_table_clk81[] = { 6, 5, 7 };
-static struct clk_regmap meson8b_mpeg_clk_sel = {
+/* clk81 is often referred as "mpeg_clk" */
+static u32 meson8b_clk81_parents_val_table[] = { 6, 5, 7 };
+static struct clk_regmap meson8b_clk81_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_MPEG_CLK_CNTL,
.mask = 0x7,
.shift = 12,
- .table = mux_table_clk81,
+ .table = meson8b_clk81_parents_val_table,
},
.hw.init = &(struct clk_init_data){
- .name = "mpeg_clk_sel",
+ .name = "clk81_sel",
.ops = &clk_regmap_mux_ro_ops,
/*
* FIXME bits 14:12 selects from 8 possible parents:
@@ -695,17 +696,17 @@ static struct clk_regmap meson8b_mpeg_clk_sel = {
},
};
-static struct clk_regmap meson8b_mpeg_clk_div = {
+static struct clk_regmap meson8b_clk81_div = {
.data = &(struct clk_regmap_div_data){
.offset = HHI_MPEG_CLK_CNTL,
.shift = 0,
.width = 7,
},
.hw.init = &(struct clk_init_data){
- .name = "mpeg_clk_div",
+ .name = "clk81_div",
.ops = &clk_regmap_divider_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &meson8b_mpeg_clk_sel.hw
+ &meson8b_clk81_sel.hw
},
.num_parents = 1,
},
@@ -720,7 +721,7 @@ static struct clk_regmap meson8b_clk81 = {
.name = "clk81",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &meson8b_mpeg_clk_div.hw
+ &meson8b_clk81_div.hw
},
.num_parents = 1,
.flags = CLK_IS_CRITICAL,
@@ -774,7 +775,7 @@ static struct clk_fixed_factor meson8b_cpu_in_div3 = {
},
};
-static const struct clk_div_table cpu_scale_table[] = {
+static const struct clk_div_table meson8b_cpu_scale_div_table[] = {
{ .val = 1, .div = 4 },
{ .val = 2, .div = 6 },
{ .val = 3, .div = 8 },
@@ -791,7 +792,7 @@ static struct clk_regmap meson8b_cpu_scale_div = {
.offset = HHI_SYS_CPU_CLK_CNTL1,
.shift = 20,
.width = 10,
- .table = cpu_scale_table,
+ .table = meson8b_cpu_scale_div_table,
.flags = CLK_DIVIDER_ALLOW_ZERO,
},
.hw.init = &(struct clk_init_data){
@@ -805,13 +806,13 @@ static struct clk_regmap meson8b_cpu_scale_div = {
},
};
-static u32 mux_table_cpu_scale_out_sel[] = { 0, 1, 3 };
+static u32 meson8b_cpu_scale_out_parents_val_table[] = { 0, 1, 3 };
static struct clk_regmap meson8b_cpu_scale_out_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPU_CLK_CNTL0,
.mask = 0x3,
.shift = 2,
- .table = mux_table_cpu_scale_out_sel,
+ .table = meson8b_cpu_scale_out_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cpu_scale_out_sel",
@@ -893,13 +894,13 @@ static struct clk_regmap meson8b_nand_clk_div = {
},
};
-static struct clk_regmap meson8b_nand_clk_gate = {
+static struct clk_regmap meson8b_nand_clk = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_NAND_CLK_CNTL,
.bit_idx = 8,
},
.hw.init = &(struct clk_init_data){
- .name = "nand_clk_gate",
+ .name = "nand_clk",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_nand_clk_div.hw
@@ -1000,160 +1001,137 @@ static struct clk_fixed_factor meson8b_cpu_clk_div8 = {
},
};
-static u32 mux_table_apb[] = { 1, 2, 3, 4, 5, 6, 7 };
-static struct clk_regmap meson8b_apb_clk_sel = {
+static u32 meson8b_cpu_if_parents_val_table[] = { 1, 2, 3, 4, 5, 6, 7 };
+static const struct clk_hw *meson8b_cpu_if_parents[] = {
+ &meson8b_cpu_clk_div2.hw,
+ &meson8b_cpu_clk_div3.hw,
+ &meson8b_cpu_clk_div4.hw,
+ &meson8b_cpu_clk_div5.hw,
+ &meson8b_cpu_clk_div6.hw,
+ &meson8b_cpu_clk_div7.hw,
+ &meson8b_cpu_clk_div8.hw,
+};
+
+static struct clk_regmap meson8b_apb_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPU_CLK_CNTL1,
.mask = 0x7,
.shift = 3,
- .table = mux_table_apb,
+ .table = meson8b_cpu_if_parents_val_table,
},
.hw.init = &(struct clk_init_data){
- .name = "apb_clk_sel",
+ .name = "apb_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &meson8b_cpu_clk_div2.hw,
- &meson8b_cpu_clk_div3.hw,
- &meson8b_cpu_clk_div4.hw,
- &meson8b_cpu_clk_div5.hw,
- &meson8b_cpu_clk_div6.hw,
- &meson8b_cpu_clk_div7.hw,
- &meson8b_cpu_clk_div8.hw,
- },
- .num_parents = 7,
+ .parent_hws = meson8b_cpu_if_parents,
+ .num_parents = ARRAY_SIZE(meson8b_cpu_if_parents),
},
};
-static struct clk_regmap meson8b_apb_clk_gate = {
+static struct clk_regmap meson8b_apb = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_SYS_CPU_CLK_CNTL1,
.bit_idx = 16,
.flags = CLK_GATE_SET_TO_DISABLE,
},
.hw.init = &(struct clk_init_data){
- .name = "apb_clk_dis",
+ .name = "apb",
.ops = &clk_regmap_gate_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &meson8b_apb_clk_sel.hw
+ &meson8b_apb_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap meson8b_periph_clk_sel = {
+static struct clk_regmap meson8b_periph_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPU_CLK_CNTL1,
.mask = 0x7,
.shift = 6,
},
.hw.init = &(struct clk_init_data){
- .name = "periph_clk_sel",
+ .name = "periph_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &meson8b_cpu_clk_div2.hw,
- &meson8b_cpu_clk_div3.hw,
- &meson8b_cpu_clk_div4.hw,
- &meson8b_cpu_clk_div5.hw,
- &meson8b_cpu_clk_div6.hw,
- &meson8b_cpu_clk_div7.hw,
- &meson8b_cpu_clk_div8.hw,
- },
- .num_parents = 7,
+ .parent_hws = meson8b_cpu_if_parents,
+ .num_parents = ARRAY_SIZE(meson8b_cpu_if_parents),
},
};
-static struct clk_regmap meson8b_periph_clk_gate = {
+static struct clk_regmap meson8b_periph = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_SYS_CPU_CLK_CNTL1,
.bit_idx = 17,
.flags = CLK_GATE_SET_TO_DISABLE,
},
.hw.init = &(struct clk_init_data){
- .name = "periph_clk_dis",
+ .name = "periph",
.ops = &clk_regmap_gate_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &meson8b_periph_clk_sel.hw
+ &meson8b_periph_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static u32 mux_table_axi[] = { 1, 2, 3, 4, 5, 6, 7 };
-static struct clk_regmap meson8b_axi_clk_sel = {
+static struct clk_regmap meson8b_axi_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPU_CLK_CNTL1,
.mask = 0x7,
.shift = 9,
- .table = mux_table_axi,
+ .table = meson8b_cpu_if_parents_val_table,
},
.hw.init = &(struct clk_init_data){
- .name = "axi_clk_sel",
+ .name = "axi_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &meson8b_cpu_clk_div2.hw,
- &meson8b_cpu_clk_div3.hw,
- &meson8b_cpu_clk_div4.hw,
- &meson8b_cpu_clk_div5.hw,
- &meson8b_cpu_clk_div6.hw,
- &meson8b_cpu_clk_div7.hw,
- &meson8b_cpu_clk_div8.hw,
- },
- .num_parents = 7,
+ .parent_hws = meson8b_cpu_if_parents,
+ .num_parents = ARRAY_SIZE(meson8b_cpu_if_parents),
},
};
-static struct clk_regmap meson8b_axi_clk_gate = {
+static struct clk_regmap meson8b_axi = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_SYS_CPU_CLK_CNTL1,
.bit_idx = 18,
.flags = CLK_GATE_SET_TO_DISABLE,
},
.hw.init = &(struct clk_init_data){
- .name = "axi_clk_dis",
+ .name = "axi",
.ops = &clk_regmap_gate_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &meson8b_axi_clk_sel.hw
+ &meson8b_axi_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap meson8b_l2_dram_clk_sel = {
+static struct clk_regmap meson8b_l2_dram_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPU_CLK_CNTL1,
.mask = 0x7,
.shift = 12,
},
.hw.init = &(struct clk_init_data){
- .name = "l2_dram_clk_sel",
+ .name = "l2_dram_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &meson8b_cpu_clk_div2.hw,
- &meson8b_cpu_clk_div3.hw,
- &meson8b_cpu_clk_div4.hw,
- &meson8b_cpu_clk_div5.hw,
- &meson8b_cpu_clk_div6.hw,
- &meson8b_cpu_clk_div7.hw,
- &meson8b_cpu_clk_div8.hw,
- },
- .num_parents = 7,
+ .parent_hws = meson8b_cpu_if_parents,
+ .num_parents = ARRAY_SIZE(meson8b_cpu_if_parents),
},
};
-static struct clk_regmap meson8b_l2_dram_clk_gate = {
+static struct clk_regmap meson8b_l2_dram = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_SYS_CPU_CLK_CNTL1,
.bit_idx = 19,
.flags = CLK_GATE_SET_TO_DISABLE,
},
.hw.init = &(struct clk_init_data){
- .name = "l2_dram_clk_dis",
+ .name = "l2_dram",
.ops = &clk_regmap_gate_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &meson8b_l2_dram_clk_sel.hw
+ &meson8b_l2_dram_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1286,7 +1264,7 @@ static struct clk_regmap meson8b_vid_pll_final_div = {
},
};
-static const struct clk_hw *meson8b_vclk_mux_parent_hws[] = {
+static const struct clk_hw *meson8b_vclk_parents[] = {
&meson8b_vid_pll_final_div.hw,
&meson8b_fclk_div4.hw,
&meson8b_fclk_div3.hw,
@@ -1305,8 +1283,8 @@ static struct clk_regmap meson8b_vclk_in_sel = {
.hw.init = &(struct clk_init_data){
.name = "vclk_in_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_vclk_mux_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_vclk_mux_parent_hws),
+ .parent_hws = meson8b_vclk_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vclk_parents),
.flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
},
};
@@ -1343,13 +1321,13 @@ static struct clk_regmap meson8b_vclk_en = {
},
};
-static struct clk_regmap meson8b_vclk_div1_gate = {
+static struct clk_regmap meson8b_vclk_div1 = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_VID_CLK_CNTL,
.bit_idx = 0,
},
.hw.init = &(struct clk_init_data){
- .name = "vclk_div1_en",
+ .name = "vclk_div1",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk_en.hw
@@ -1363,7 +1341,7 @@ static struct clk_fixed_factor meson8b_vclk_div2_div = {
.mult = 1,
.div = 2,
.hw.init = &(struct clk_init_data){
- .name = "vclk_div2",
+ .name = "vclk_div2_div",
.ops = &clk_fixed_factor_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk_en.hw
@@ -1373,13 +1351,13 @@ static struct clk_fixed_factor meson8b_vclk_div2_div = {
}
};
-static struct clk_regmap meson8b_vclk_div2_div_gate = {
+static struct clk_regmap meson8b_vclk_div2 = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_VID_CLK_CNTL,
.bit_idx = 1,
},
.hw.init = &(struct clk_init_data){
- .name = "vclk_div2_en",
+ .name = "vclk_div2",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk_div2_div.hw
@@ -1393,7 +1371,7 @@ static struct clk_fixed_factor meson8b_vclk_div4_div = {
.mult = 1,
.div = 4,
.hw.init = &(struct clk_init_data){
- .name = "vclk_div4",
+ .name = "vclk_div4_div",
.ops = &clk_fixed_factor_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk_en.hw
@@ -1403,13 +1381,13 @@ static struct clk_fixed_factor meson8b_vclk_div4_div = {
}
};
-static struct clk_regmap meson8b_vclk_div4_div_gate = {
+static struct clk_regmap meson8b_vclk_div4 = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_VID_CLK_CNTL,
.bit_idx = 2,
},
.hw.init = &(struct clk_init_data){
- .name = "vclk_div4_en",
+ .name = "vclk_div4",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk_div4_div.hw
@@ -1423,7 +1401,7 @@ static struct clk_fixed_factor meson8b_vclk_div6_div = {
.mult = 1,
.div = 6,
.hw.init = &(struct clk_init_data){
- .name = "vclk_div6",
+ .name = "vclk_div6_div",
.ops = &clk_fixed_factor_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk_en.hw
@@ -1433,13 +1411,13 @@ static struct clk_fixed_factor meson8b_vclk_div6_div = {
}
};
-static struct clk_regmap meson8b_vclk_div6_div_gate = {
+static struct clk_regmap meson8b_vclk_div6 = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_VID_CLK_CNTL,
.bit_idx = 3,
},
.hw.init = &(struct clk_init_data){
- .name = "vclk_div6_en",
+ .name = "vclk_div6",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk_div6_div.hw
@@ -1453,7 +1431,7 @@ static struct clk_fixed_factor meson8b_vclk_div12_div = {
.mult = 1,
.div = 12,
.hw.init = &(struct clk_init_data){
- .name = "vclk_div12",
+ .name = "vclk_div12_div",
.ops = &clk_fixed_factor_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk_en.hw
@@ -1463,13 +1441,13 @@ static struct clk_fixed_factor meson8b_vclk_div12_div = {
}
};
-static struct clk_regmap meson8b_vclk_div12_div_gate = {
+static struct clk_regmap meson8b_vclk_div12 = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_VID_CLK_CNTL,
.bit_idx = 4,
},
.hw.init = &(struct clk_init_data){
- .name = "vclk_div12_en",
+ .name = "vclk_div12",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk_div12_div.hw
@@ -1488,13 +1466,13 @@ static struct clk_regmap meson8b_vclk2_in_sel = {
.hw.init = &(struct clk_init_data){
.name = "vclk2_in_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_vclk_mux_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_vclk_mux_parent_hws),
+ .parent_hws = meson8b_vclk_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vclk_parents),
.flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
},
};
-static struct clk_regmap meson8b_vclk2_clk_in_en = {
+static struct clk_regmap meson8b_vclk2_in_en = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_VIID_CLK_DIV,
.bit_idx = 16,
@@ -1510,7 +1488,7 @@ static struct clk_regmap meson8b_vclk2_clk_in_en = {
},
};
-static struct clk_regmap meson8b_vclk2_clk_en = {
+static struct clk_regmap meson8b_vclk2_en = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_VIID_CLK_DIV,
.bit_idx = 19,
@@ -1519,23 +1497,23 @@ static struct clk_regmap meson8b_vclk2_clk_en = {
.name = "vclk2_en",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &meson8b_vclk2_clk_in_en.hw
+ &meson8b_vclk2_in_en.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap meson8b_vclk2_div1_gate = {
+static struct clk_regmap meson8b_vclk2_div1 = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_VIID_CLK_DIV,
.bit_idx = 0,
},
.hw.init = &(struct clk_init_data){
- .name = "vclk2_div1_en",
+ .name = "vclk2_div1",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &meson8b_vclk2_clk_en.hw
+ &meson8b_vclk2_en.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1546,23 +1524,23 @@ static struct clk_fixed_factor meson8b_vclk2_div2_div = {
.mult = 1,
.div = 2,
.hw.init = &(struct clk_init_data){
- .name = "vclk2_div2",
+ .name = "vclk2_div2_div",
.ops = &clk_fixed_factor_ops,
.parent_hws = (const struct clk_hw *[]) {
- &meson8b_vclk2_clk_en.hw
+ &meson8b_vclk2_en.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
}
};
-static struct clk_regmap meson8b_vclk2_div2_div_gate = {
+static struct clk_regmap meson8b_vclk2_div2 = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_VIID_CLK_DIV,
.bit_idx = 1,
},
.hw.init = &(struct clk_init_data){
- .name = "vclk2_div2_en",
+ .name = "vclk2_div2",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk2_div2_div.hw
@@ -1576,23 +1554,23 @@ static struct clk_fixed_factor meson8b_vclk2_div4_div = {
.mult = 1,
.div = 4,
.hw.init = &(struct clk_init_data){
- .name = "vclk2_div4",
+ .name = "vclk2_div4_div",
.ops = &clk_fixed_factor_ops,
.parent_hws = (const struct clk_hw *[]) {
- &meson8b_vclk2_clk_en.hw
+ &meson8b_vclk2_en.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
}
};
-static struct clk_regmap meson8b_vclk2_div4_div_gate = {
+static struct clk_regmap meson8b_vclk2_div4 = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_VIID_CLK_DIV,
.bit_idx = 2,
},
.hw.init = &(struct clk_init_data){
- .name = "vclk2_div4_en",
+ .name = "vclk2_div4",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk2_div4_div.hw
@@ -1606,23 +1584,23 @@ static struct clk_fixed_factor meson8b_vclk2_div6_div = {
.mult = 1,
.div = 6,
.hw.init = &(struct clk_init_data){
- .name = "vclk2_div6",
+ .name = "vclk2_div6_div",
.ops = &clk_fixed_factor_ops,
.parent_hws = (const struct clk_hw *[]) {
- &meson8b_vclk2_clk_en.hw
+ &meson8b_vclk2_en.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
}
};
-static struct clk_regmap meson8b_vclk2_div6_div_gate = {
+static struct clk_regmap meson8b_vclk2_div6 = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_VIID_CLK_DIV,
.bit_idx = 3,
},
.hw.init = &(struct clk_init_data){
- .name = "vclk2_div6_en",
+ .name = "vclk2_div6",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk2_div6_div.hw
@@ -1636,23 +1614,23 @@ static struct clk_fixed_factor meson8b_vclk2_div12_div = {
.mult = 1,
.div = 12,
.hw.init = &(struct clk_init_data){
- .name = "vclk2_div12",
+ .name = "vclk2_div12_div",
.ops = &clk_fixed_factor_ops,
.parent_hws = (const struct clk_hw *[]) {
- &meson8b_vclk2_clk_en.hw
+ &meson8b_vclk2_en.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
}
};
-static struct clk_regmap meson8b_vclk2_div12_div_gate = {
+static struct clk_regmap meson8b_vclk2_div12 = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_VIID_CLK_DIV,
.bit_idx = 4,
},
.hw.init = &(struct clk_init_data){
- .name = "vclk2_div12_en",
+ .name = "vclk2_div12",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk2_div12_div.hw
@@ -1662,12 +1640,12 @@ static struct clk_regmap meson8b_vclk2_div12_div_gate = {
},
};
-static const struct clk_hw *meson8b_vclk_enc_mux_parent_hws[] = {
- &meson8b_vclk_div1_gate.hw,
- &meson8b_vclk_div2_div_gate.hw,
- &meson8b_vclk_div4_div_gate.hw,
- &meson8b_vclk_div6_div_gate.hw,
- &meson8b_vclk_div12_div_gate.hw,
+static const struct clk_hw *meson8b_vclk_enc_parents[] = {
+ &meson8b_vclk_div1.hw,
+ &meson8b_vclk_div2.hw,
+ &meson8b_vclk_div4.hw,
+ &meson8b_vclk_div6.hw,
+ &meson8b_vclk_div12.hw,
};
static struct clk_regmap meson8b_cts_enct_sel = {
@@ -1679,8 +1657,8 @@ static struct clk_regmap meson8b_cts_enct_sel = {
.hw.init = &(struct clk_init_data){
.name = "cts_enct_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_vclk_enc_mux_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_vclk_enc_mux_parent_hws),
+ .parent_hws = meson8b_vclk_enc_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vclk_enc_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1710,8 +1688,8 @@ static struct clk_regmap meson8b_cts_encp_sel = {
.hw.init = &(struct clk_init_data){
.name = "cts_encp_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_vclk_enc_mux_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_vclk_enc_mux_parent_hws),
+ .parent_hws = meson8b_vclk_enc_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vclk_enc_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1741,8 +1719,8 @@ static struct clk_regmap meson8b_cts_enci_sel = {
.hw.init = &(struct clk_init_data){
.name = "cts_enci_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_vclk_enc_mux_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_vclk_enc_mux_parent_hws),
+ .parent_hws = meson8b_vclk_enc_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vclk_enc_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1772,8 +1750,8 @@ static struct clk_regmap meson8b_hdmi_tx_pixel_sel = {
.hw.init = &(struct clk_init_data){
.name = "hdmi_tx_pixel_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_vclk_enc_mux_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_vclk_enc_mux_parent_hws),
+ .parent_hws = meson8b_vclk_enc_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vclk_enc_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1794,14 +1772,6 @@ static struct clk_regmap meson8b_hdmi_tx_pixel = {
},
};
-static const struct clk_hw *meson8b_vclk2_enc_mux_parent_hws[] = {
- &meson8b_vclk2_div1_gate.hw,
- &meson8b_vclk2_div2_div_gate.hw,
- &meson8b_vclk2_div4_div_gate.hw,
- &meson8b_vclk2_div6_div_gate.hw,
- &meson8b_vclk2_div12_div_gate.hw,
-};
-
static struct clk_regmap meson8b_cts_encl_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_VIID_CLK_DIV,
@@ -1811,8 +1781,8 @@ static struct clk_regmap meson8b_cts_encl_sel = {
.hw.init = &(struct clk_init_data){
.name = "cts_encl_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_vclk2_enc_mux_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_vclk2_enc_mux_parent_hws),
+ .parent_hws = meson8b_vclk_enc_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vclk_enc_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1842,8 +1812,8 @@ static struct clk_regmap meson8b_cts_vdac0_sel = {
.hw.init = &(struct clk_init_data){
.name = "cts_vdac0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_vclk2_enc_mux_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_vclk2_enc_mux_parent_hws),
+ .parent_hws = meson8b_vclk_enc_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vclk_enc_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1926,7 +1896,8 @@ static struct clk_regmap meson8b_hdmi_sys = {
* CLK_SET_RATE_GATE is set.
* Meson8 only has mali_0 and no glitch-free mux.
*/
-static const struct clk_parent_data meson8b_mali_0_1_parent_data[] = {
+static u32 meson8b_mali_parents_val_table[] = { 0, 2, 3, 4, 5, 6, 7 };
+static const struct clk_parent_data meson8b_mali_parents[] = {
{ .fw_name = "xtal", .name = "xtal", .index = -1, },
{ .hw = &meson8b_mpll2.hw, },
{ .hw = &meson8b_mpll1.hw, },
@@ -1936,20 +1907,18 @@ static const struct clk_parent_data meson8b_mali_0_1_parent_data[] = {
{ .hw = &meson8b_fclk_div5.hw, },
};
-static u32 meson8b_mali_0_1_mux_table[] = { 0, 2, 3, 4, 5, 6, 7 };
-
static struct clk_regmap meson8b_mali_0_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_MALI_CLK_CNTL,
.mask = 0x7,
.shift = 9,
- .table = meson8b_mali_0_1_mux_table,
+ .table = meson8b_mali_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "mali_0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = meson8b_mali_0_1_parent_data,
- .num_parents = ARRAY_SIZE(meson8b_mali_0_1_parent_data),
+ .parent_data = meson8b_mali_parents,
+ .num_parents = ARRAY_SIZE(meson8b_mali_parents),
/*
* Don't propagate rate changes up because the only changeable
* parents are mpll1 and mpll2 but we need those for audio and
@@ -1998,13 +1967,13 @@ static struct clk_regmap meson8b_mali_1_sel = {
.offset = HHI_MALI_CLK_CNTL,
.mask = 0x7,
.shift = 25,
- .table = meson8b_mali_0_1_mux_table,
+ .table = meson8b_mali_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "mali_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = meson8b_mali_0_1_parent_data,
- .num_parents = ARRAY_SIZE(meson8b_mali_0_1_parent_data),
+ .parent_data = meson8b_mali_parents,
+ .num_parents = ARRAY_SIZE(meson8b_mali_parents),
/*
* Don't propagate rate changes up because the only changeable
* parents are mpll1 and mpll2 but we need those for audio and
@@ -2139,20 +2108,13 @@ static struct clk_regmap meson8m2_gp_pll = {
},
};
-static const struct clk_hw *meson8b_vpu_0_1_parent_hws[] = {
+static const struct clk_hw *meson8b_vpu_parents[] = {
&meson8b_fclk_div4.hw,
&meson8b_fclk_div3.hw,
&meson8b_fclk_div5.hw,
&meson8b_fclk_div7.hw,
};
-static const struct clk_hw *mmeson8m2_vpu_0_1_parent_hws[] = {
- &meson8b_fclk_div4.hw,
- &meson8b_fclk_div3.hw,
- &meson8b_fclk_div5.hw,
- &meson8m2_gp_pll.hw,
-};
-
static struct clk_regmap meson8b_vpu_0_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_VPU_CLK_CNTL,
@@ -2162,12 +2124,19 @@ static struct clk_regmap meson8b_vpu_0_sel = {
.hw.init = &(struct clk_init_data){
.name = "vpu_0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_vpu_0_1_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_vpu_0_1_parent_hws),
+ .parent_hws = meson8b_vpu_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vpu_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
+static const struct clk_hw *mmeson8m2_vpu_parents[] = {
+ &meson8b_fclk_div4.hw,
+ &meson8b_fclk_div3.hw,
+ &meson8b_fclk_div5.hw,
+ &meson8m2_gp_pll.hw,
+};
+
static struct clk_regmap meson8m2_vpu_0_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_VPU_CLK_CNTL,
@@ -2177,8 +2146,8 @@ static struct clk_regmap meson8m2_vpu_0_sel = {
.hw.init = &(struct clk_init_data){
.name = "vpu_0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = mmeson8m2_vpu_0_1_parent_hws,
- .num_parents = ARRAY_SIZE(mmeson8m2_vpu_0_1_parent_hws),
+ .parent_hws = mmeson8m2_vpu_parents,
+ .num_parents = ARRAY_SIZE(mmeson8m2_vpu_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2233,8 +2202,8 @@ static struct clk_regmap meson8b_vpu_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "vpu_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_vpu_0_1_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_vpu_0_1_parent_hws),
+ .parent_hws = meson8b_vpu_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vpu_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2248,8 +2217,8 @@ static struct clk_regmap meson8m2_vpu_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "vpu_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = mmeson8m2_vpu_0_1_parent_hws,
- .num_parents = ARRAY_SIZE(mmeson8m2_vpu_0_1_parent_hws),
+ .parent_hws = mmeson8m2_vpu_parents,
+ .num_parents = ARRAY_SIZE(mmeson8m2_vpu_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2321,7 +2290,7 @@ static struct clk_regmap meson8b_vpu = {
},
};
-static const struct clk_hw *meson8b_vdec_parent_hws[] = {
+static const struct clk_hw *meson8b_vdec_parents[] = {
&meson8b_fclk_div4.hw,
&meson8b_fclk_div3.hw,
&meson8b_fclk_div5.hw,
@@ -2340,8 +2309,8 @@ static struct clk_regmap meson8b_vdec_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "vdec_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_vdec_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_vdec_parent_hws),
+ .parent_hws = meson8b_vdec_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vdec_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2443,8 +2412,8 @@ static struct clk_regmap meson8b_vdec_hcodec_sel = {
.hw.init = &(struct clk_init_data){
.name = "vdec_hcodec_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_vdec_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_vdec_parent_hws),
+ .parent_hws = meson8b_vdec_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vdec_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2493,8 +2462,8 @@ static struct clk_regmap meson8b_vdec_2_sel = {
.hw.init = &(struct clk_init_data){
.name = "vdec_2_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_vdec_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_vdec_parent_hws),
+ .parent_hws = meson8b_vdec_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vdec_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2543,8 +2512,8 @@ static struct clk_regmap meson8b_vdec_hevc_sel = {
.hw.init = &(struct clk_init_data){
.name = "vdec_hevc_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_vdec_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_vdec_parent_hws),
+ .parent_hws = meson8b_vdec_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vdec_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2603,27 +2572,26 @@ static struct clk_regmap meson8b_vdec_hevc = {
};
/* TODO: the clock at index 0 is "DDR_PLL" which we don't support yet */
-static const struct clk_hw *meson8b_cts_amclk_parent_hws[] = {
+static u32 meson8b_cts_mclk_parents_val_table[] = { 1, 2, 3 };
+static const struct clk_hw *meson8b_cts_mclk_parents[] = {
&meson8b_mpll0.hw,
&meson8b_mpll1.hw,
&meson8b_mpll2.hw
};
-static u32 meson8b_cts_amclk_mux_table[] = { 1, 2, 3 };
-
static struct clk_regmap meson8b_cts_amclk_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_AUD_CLK_CNTL,
.mask = 0x3,
.shift = 9,
- .table = meson8b_cts_amclk_mux_table,
+ .table = meson8b_cts_mclk_parents_val_table,
.flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
.name = "cts_amclk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_cts_amclk_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_cts_amclk_parent_hws),
+ .parent_hws = meson8b_cts_mclk_parents,
+ .num_parents = ARRAY_SIZE(meson8b_cts_mclk_parents),
},
};
@@ -2661,28 +2629,19 @@ static struct clk_regmap meson8b_cts_amclk = {
},
};
-/* TODO: the clock at index 0 is "DDR_PLL" which we don't support yet */
-static const struct clk_hw *meson8b_cts_mclk_i958_parent_hws[] = {
- &meson8b_mpll0.hw,
- &meson8b_mpll1.hw,
- &meson8b_mpll2.hw
-};
-
-static u32 meson8b_cts_mclk_i958_mux_table[] = { 1, 2, 3 };
-
static struct clk_regmap meson8b_cts_mclk_i958_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_AUD_CLK_CNTL2,
.mask = 0x3,
.shift = 25,
- .table = meson8b_cts_mclk_i958_mux_table,
+ .table = meson8b_cts_mclk_parents_val_table,
.flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data) {
.name = "cts_mclk_i958_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_cts_mclk_i958_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_cts_mclk_i958_parent_hws),
+ .parent_hws = meson8b_cts_mclk_parents,
+ .num_parents = ARRAY_SIZE(meson8b_cts_mclk_parents),
},
};
@@ -2742,113 +2701,128 @@ static struct clk_regmap meson8b_cts_i958 = {
},
};
-#define MESON_GATE(_name, _reg, _bit) \
- MESON_PCLK(_name, _reg, _bit, &meson8b_clk81.hw)
-
-/* Everything Else (EE) domain gates */
-
-static MESON_GATE(meson8b_ddr, HHI_GCLK_MPEG0, 0);
-static MESON_GATE(meson8b_dos, HHI_GCLK_MPEG0, 1);
-static MESON_GATE(meson8b_isa, HHI_GCLK_MPEG0, 5);
-static MESON_GATE(meson8b_pl301, HHI_GCLK_MPEG0, 6);
-static MESON_GATE(meson8b_periphs, HHI_GCLK_MPEG0, 7);
-static MESON_GATE(meson8b_spicc, HHI_GCLK_MPEG0, 8);
-static MESON_GATE(meson8b_i2c, HHI_GCLK_MPEG0, 9);
-static MESON_GATE(meson8b_sar_adc, HHI_GCLK_MPEG0, 10);
-static MESON_GATE(meson8b_smart_card, HHI_GCLK_MPEG0, 11);
-static MESON_GATE(meson8b_rng0, HHI_GCLK_MPEG0, 12);
-static MESON_GATE(meson8b_uart0, HHI_GCLK_MPEG0, 13);
-static MESON_GATE(meson8b_sdhc, HHI_GCLK_MPEG0, 14);
-static MESON_GATE(meson8b_stream, HHI_GCLK_MPEG0, 15);
-static MESON_GATE(meson8b_async_fifo, HHI_GCLK_MPEG0, 16);
-static MESON_GATE(meson8b_sdio, HHI_GCLK_MPEG0, 17);
-static MESON_GATE(meson8b_abuf, HHI_GCLK_MPEG0, 18);
-static MESON_GATE(meson8b_hiu_iface, HHI_GCLK_MPEG0, 19);
-static MESON_GATE(meson8b_assist_misc, HHI_GCLK_MPEG0, 23);
-static MESON_GATE(meson8b_spi, HHI_GCLK_MPEG0, 30);
-
-static MESON_GATE(meson8b_i2s_spdif, HHI_GCLK_MPEG1, 2);
-static MESON_GATE(meson8b_eth, HHI_GCLK_MPEG1, 3);
-static MESON_GATE(meson8b_demux, HHI_GCLK_MPEG1, 4);
-static MESON_GATE(meson8b_blkmv, HHI_GCLK_MPEG1, 14);
-static MESON_GATE(meson8b_aiu, HHI_GCLK_MPEG1, 15);
-static MESON_GATE(meson8b_uart1, HHI_GCLK_MPEG1, 16);
-static MESON_GATE(meson8b_g2d, HHI_GCLK_MPEG1, 20);
-static MESON_GATE(meson8b_usb0, HHI_GCLK_MPEG1, 21);
-static MESON_GATE(meson8b_usb1, HHI_GCLK_MPEG1, 22);
-static MESON_GATE(meson8b_reset, HHI_GCLK_MPEG1, 23);
-static MESON_GATE(meson8b_nand, HHI_GCLK_MPEG1, 24);
-static MESON_GATE(meson8b_dos_parser, HHI_GCLK_MPEG1, 25);
-static MESON_GATE(meson8b_usb, HHI_GCLK_MPEG1, 26);
-static MESON_GATE(meson8b_vdin1, HHI_GCLK_MPEG1, 28);
-static MESON_GATE(meson8b_ahb_arb0, HHI_GCLK_MPEG1, 29);
-static MESON_GATE(meson8b_efuse, HHI_GCLK_MPEG1, 30);
-static MESON_GATE(meson8b_boot_rom, HHI_GCLK_MPEG1, 31);
-
-static MESON_GATE(meson8b_ahb_data_bus, HHI_GCLK_MPEG2, 1);
-static MESON_GATE(meson8b_ahb_ctrl_bus, HHI_GCLK_MPEG2, 2);
-static MESON_GATE(meson8b_hdmi_intr_sync, HHI_GCLK_MPEG2, 3);
-static MESON_GATE(meson8b_hdmi_pclk, HHI_GCLK_MPEG2, 4);
-static MESON_GATE(meson8b_usb1_ddr_bridge, HHI_GCLK_MPEG2, 8);
-static MESON_GATE(meson8b_usb0_ddr_bridge, HHI_GCLK_MPEG2, 9);
-static MESON_GATE(meson8b_mmc_pclk, HHI_GCLK_MPEG2, 11);
-static MESON_GATE(meson8b_dvin, HHI_GCLK_MPEG2, 12);
-static MESON_GATE(meson8b_uart2, HHI_GCLK_MPEG2, 15);
-static MESON_GATE(meson8b_sana, HHI_GCLK_MPEG2, 22);
-static MESON_GATE(meson8b_vpu_intr, HHI_GCLK_MPEG2, 25);
-static MESON_GATE(meson8b_sec_ahb_ahb3_bridge, HHI_GCLK_MPEG2, 26);
-static MESON_GATE(meson8b_clk81_a9, HHI_GCLK_MPEG2, 29);
-
-static MESON_GATE(meson8b_vclk2_venci0, HHI_GCLK_OTHER, 1);
-static MESON_GATE(meson8b_vclk2_venci1, HHI_GCLK_OTHER, 2);
-static MESON_GATE(meson8b_vclk2_vencp0, HHI_GCLK_OTHER, 3);
-static MESON_GATE(meson8b_vclk2_vencp1, HHI_GCLK_OTHER, 4);
-static MESON_GATE(meson8b_gclk_venci_int, HHI_GCLK_OTHER, 8);
-static MESON_GATE(meson8b_gclk_vencp_int, HHI_GCLK_OTHER, 9);
-static MESON_GATE(meson8b_dac_clk, HHI_GCLK_OTHER, 10);
-static MESON_GATE(meson8b_aoclk_gate, HHI_GCLK_OTHER, 14);
-static MESON_GATE(meson8b_iec958_gate, HHI_GCLK_OTHER, 16);
-static MESON_GATE(meson8b_enc480p, HHI_GCLK_OTHER, 20);
-static MESON_GATE(meson8b_rng1, HHI_GCLK_OTHER, 21);
-static MESON_GATE(meson8b_gclk_vencl_int, HHI_GCLK_OTHER, 22);
-static MESON_GATE(meson8b_vclk2_venclmcc, HHI_GCLK_OTHER, 24);
-static MESON_GATE(meson8b_vclk2_vencl, HHI_GCLK_OTHER, 25);
-static MESON_GATE(meson8b_vclk2_other, HHI_GCLK_OTHER, 26);
-static MESON_GATE(meson8b_edp, HHI_GCLK_OTHER, 31);
+static const struct clk_parent_data meson8b_pclk_parents = { .hw = &meson8b_clk81.hw };
+
+#define MESON8B_PCLK(_name, _reg, _bit, _flags) \
+ MESON_PCLK(_name, _reg, _bit, &meson8b_pclk_parents, _flags)
+
+/*
+ * Everything Else (EE) domain gates
+ *
+ * NOTE: The gates below are marked with CLK_IGNORE_UNUSED for historic reasons
+ * Users are encouraged to test without it and submit changes to:
+ * - remove the flag if not necessary
+ * - replace the flag with something more adequate, such as CLK_IS_CRITICAL,
+ * if appropriate.
+ * - add a comment explaining why the use of CLK_IGNORE_UNUSED is desirable
+ * for a particular clock.
+ */
+static MESON8B_PCLK(meson8b_ddr, HHI_GCLK_MPEG0, 0, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_dos, HHI_GCLK_MPEG0, 1, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_isa, HHI_GCLK_MPEG0, 5, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_pl301, HHI_GCLK_MPEG0, 6, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_periphs, HHI_GCLK_MPEG0, 7, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_spicc, HHI_GCLK_MPEG0, 8, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_i2c, HHI_GCLK_MPEG0, 9, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_sar_adc, HHI_GCLK_MPEG0, 10, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_smart_card, HHI_GCLK_MPEG0, 11, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_rng0, HHI_GCLK_MPEG0, 12, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_uart0, HHI_GCLK_MPEG0, 13, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_sdhc, HHI_GCLK_MPEG0, 14, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_stream, HHI_GCLK_MPEG0, 15, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_async_fifo, HHI_GCLK_MPEG0, 16, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_sdio, HHI_GCLK_MPEG0, 17, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_abuf, HHI_GCLK_MPEG0, 18, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_hiu_iface, HHI_GCLK_MPEG0, 19, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_assist_misc, HHI_GCLK_MPEG0, 23, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_spi, HHI_GCLK_MPEG0, 30, CLK_IGNORE_UNUSED);
+
+static MESON8B_PCLK(meson8b_i2s_spdif, HHI_GCLK_MPEG1, 2, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_eth, HHI_GCLK_MPEG1, 3, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_demux, HHI_GCLK_MPEG1, 4, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_blkmv, HHI_GCLK_MPEG1, 14, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_aiu, HHI_GCLK_MPEG1, 15, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_uart1, HHI_GCLK_MPEG1, 16, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_g2d, HHI_GCLK_MPEG1, 20, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_usb0, HHI_GCLK_MPEG1, 21, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_usb1, HHI_GCLK_MPEG1, 22, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_reset, HHI_GCLK_MPEG1, 23, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_nand, HHI_GCLK_MPEG1, 24, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_dos_parser, HHI_GCLK_MPEG1, 25, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_usb, HHI_GCLK_MPEG1, 26, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_vdin1, HHI_GCLK_MPEG1, 28, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_ahb_arb0, HHI_GCLK_MPEG1, 29, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_efuse, HHI_GCLK_MPEG1, 30, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_boot_rom, HHI_GCLK_MPEG1, 31, CLK_IGNORE_UNUSED);
+
+static MESON8B_PCLK(meson8b_ahb_data_bus, HHI_GCLK_MPEG2, 1, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_ahb_ctrl_bus, HHI_GCLK_MPEG2, 2, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_hdmi_intr_sync, HHI_GCLK_MPEG2, 3, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_hdmi_pclk, HHI_GCLK_MPEG2, 4, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_usb1_ddr_bridge, HHI_GCLK_MPEG2, 8, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_usb0_ddr_bridge, HHI_GCLK_MPEG2, 9, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_mmc_pclk, HHI_GCLK_MPEG2, 11, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_dvin, HHI_GCLK_MPEG2, 12, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_uart2, HHI_GCLK_MPEG2, 15, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_sana, HHI_GCLK_MPEG2, 22, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_vpu_intr, HHI_GCLK_MPEG2, 25, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_sec_ahb_ahb3_bridge, HHI_GCLK_MPEG2, 26, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_clk81_a9, HHI_GCLK_MPEG2, 29, CLK_IGNORE_UNUSED);
+
+static MESON8B_PCLK(meson8b_vclk2_venci0, HHI_GCLK_OTHER, 1, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_vclk2_venci1, HHI_GCLK_OTHER, 2, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_vclk2_vencp0, HHI_GCLK_OTHER, 3, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_vclk2_vencp1, HHI_GCLK_OTHER, 4, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_gclk_venci_int, HHI_GCLK_OTHER, 8, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_gclk_vencp_int, HHI_GCLK_OTHER, 9, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_dac_clk, HHI_GCLK_OTHER, 10, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_aoclk_gate, HHI_GCLK_OTHER, 14, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_iec958_gate, HHI_GCLK_OTHER, 16, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_enc480p, HHI_GCLK_OTHER, 20, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_rng1, HHI_GCLK_OTHER, 21, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_gclk_vencl_int, HHI_GCLK_OTHER, 22, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_vclk2_venclmcc, HHI_GCLK_OTHER, 24, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_vclk2_vencl, HHI_GCLK_OTHER, 25, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_vclk2_other, HHI_GCLK_OTHER, 26, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_edp, HHI_GCLK_OTHER, 31, CLK_IGNORE_UNUSED);
/* AIU gates */
-#define MESON_AIU_GLUE_GATE(_name, _reg, _bit) \
- MESON_PCLK(_name, _reg, _bit, &meson8b_aiu_glue.hw)
-
-static MESON_PCLK(meson8b_aiu_glue, HHI_GCLK_MPEG1, 6, &meson8b_aiu.hw);
-static MESON_AIU_GLUE_GATE(meson8b_iec958, HHI_GCLK_MPEG1, 7);
-static MESON_AIU_GLUE_GATE(meson8b_i2s_out, HHI_GCLK_MPEG1, 8);
-static MESON_AIU_GLUE_GATE(meson8b_amclk, HHI_GCLK_MPEG1, 9);
-static MESON_AIU_GLUE_GATE(meson8b_aififo2, HHI_GCLK_MPEG1, 10);
-static MESON_AIU_GLUE_GATE(meson8b_mixer, HHI_GCLK_MPEG1, 11);
-static MESON_AIU_GLUE_GATE(meson8b_mixer_iface, HHI_GCLK_MPEG1, 12);
-static MESON_AIU_GLUE_GATE(meson8b_adc, HHI_GCLK_MPEG1, 13);
+static const struct clk_parent_data meson8b_aiu_glue_parents = { .hw = &meson8b_aiu.hw };
+static MESON_PCLK(meson8b_aiu_glue, HHI_GCLK_MPEG1, 6,
+ &meson8b_aiu_glue_parents, CLK_IGNORE_UNUSED);
+
+static const struct clk_parent_data meson8b_aiu_pclk_parents = { .hw = &meson8b_aiu_glue.hw };
+#define MESON8B_AIU_PCLK(_name, _bit, _flags) \
+ MESON_PCLK(_name, HHI_GCLK_MPEG1, _bit, &meson8b_aiu_pclk_parents, _flags)
+
+static MESON8B_AIU_PCLK(meson8b_iec958, 7, CLK_IGNORE_UNUSED);
+static MESON8B_AIU_PCLK(meson8b_i2s_out, 8, CLK_IGNORE_UNUSED);
+static MESON8B_AIU_PCLK(meson8b_amclk, 9, CLK_IGNORE_UNUSED);
+static MESON8B_AIU_PCLK(meson8b_aififo2, 10, CLK_IGNORE_UNUSED);
+static MESON8B_AIU_PCLK(meson8b_mixer, 11, CLK_IGNORE_UNUSED);
+static MESON8B_AIU_PCLK(meson8b_mixer_iface, 12, CLK_IGNORE_UNUSED);
+static MESON8B_AIU_PCLK(meson8b_adc, 13, CLK_IGNORE_UNUSED);
/* Always On (AO) domain gates */
-static MESON_GATE(meson8b_ao_media_cpu, HHI_GCLK_AO, 0);
-static MESON_GATE(meson8b_ao_ahb_sram, HHI_GCLK_AO, 1);
-static MESON_GATE(meson8b_ao_ahb_bus, HHI_GCLK_AO, 2);
-static MESON_GATE(meson8b_ao_iface, HHI_GCLK_AO, 3);
+static MESON8B_PCLK(meson8b_ao_media_cpu, HHI_GCLK_AO, 0, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_ao_ahb_sram, HHI_GCLK_AO, 1, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_ao_ahb_bus, HHI_GCLK_AO, 2, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_ao_iface, HHI_GCLK_AO, 3, CLK_IGNORE_UNUSED);
static struct clk_hw *meson8_hw_clks[] = {
- [CLKID_PLL_FIXED] = &meson8b_fixed_pll.hw,
- [CLKID_PLL_VID] = &meson8b_vid_pll.hw,
- [CLKID_PLL_SYS] = &meson8b_sys_pll.hw,
- [CLKID_FCLK_DIV2] = &meson8b_fclk_div2.hw,
- [CLKID_FCLK_DIV3] = &meson8b_fclk_div3.hw,
- [CLKID_FCLK_DIV4] = &meson8b_fclk_div4.hw,
- [CLKID_FCLK_DIV5] = &meson8b_fclk_div5.hw,
- [CLKID_FCLK_DIV7] = &meson8b_fclk_div7.hw,
- [CLKID_CPUCLK] = &meson8b_cpu_clk.hw,
- [CLKID_MPEG_SEL] = &meson8b_mpeg_clk_sel.hw,
- [CLKID_MPEG_DIV] = &meson8b_mpeg_clk_div.hw,
- [CLKID_CLK81] = &meson8b_clk81.hw,
+ [CLKID_PLL_FIXED] = &meson8b_fixed_pll.hw,
+ [CLKID_PLL_VID] = &meson8b_vid_pll.hw,
+ [CLKID_PLL_SYS] = &meson8b_sys_pll.hw,
+ [CLKID_FCLK_DIV2] = &meson8b_fclk_div2.hw,
+ [CLKID_FCLK_DIV3] = &meson8b_fclk_div3.hw,
+ [CLKID_FCLK_DIV4] = &meson8b_fclk_div4.hw,
+ [CLKID_FCLK_DIV5] = &meson8b_fclk_div5.hw,
+ [CLKID_FCLK_DIV7] = &meson8b_fclk_div7.hw,
+ [CLKID_CPUCLK] = &meson8b_cpu_clk.hw,
+ [CLKID_MPEG_SEL] = &meson8b_clk81_sel.hw,
+ [CLKID_MPEG_DIV] = &meson8b_clk81_div.hw,
+ [CLKID_CLK81] = &meson8b_clk81.hw,
[CLKID_DDR] = &meson8b_ddr.hw,
[CLKID_DOS] = &meson8b_dos.hw,
[CLKID_ISA] = &meson8b_isa.hw,
@@ -2945,7 +2919,7 @@ static struct clk_hw *meson8_hw_clks[] = {
[CLKID_FCLK_DIV7_DIV] = &meson8b_fclk_div7_div.hw,
[CLKID_NAND_SEL] = &meson8b_nand_clk_sel.hw,
[CLKID_NAND_DIV] = &meson8b_nand_clk_div.hw,
- [CLKID_NAND_CLK] = &meson8b_nand_clk_gate.hw,
+ [CLKID_NAND_CLK] = &meson8b_nand_clk.hw,
[CLKID_PLL_FIXED_DCO] = &meson8b_fixed_pll_dco.hw,
[CLKID_HDMI_PLL_DCO] = &meson8b_hdmi_pll_dco.hw,
[CLKID_PLL_SYS_DCO] = &meson8b_sys_pll_dco.hw,
@@ -2956,14 +2930,14 @@ static struct clk_hw *meson8_hw_clks[] = {
[CLKID_CPU_CLK_DIV6] = &meson8b_cpu_clk_div6.hw,
[CLKID_CPU_CLK_DIV7] = &meson8b_cpu_clk_div7.hw,
[CLKID_CPU_CLK_DIV8] = &meson8b_cpu_clk_div8.hw,
- [CLKID_APB_SEL] = &meson8b_apb_clk_sel.hw,
- [CLKID_APB] = &meson8b_apb_clk_gate.hw,
- [CLKID_PERIPH_SEL] = &meson8b_periph_clk_sel.hw,
- [CLKID_PERIPH] = &meson8b_periph_clk_gate.hw,
- [CLKID_AXI_SEL] = &meson8b_axi_clk_sel.hw,
- [CLKID_AXI] = &meson8b_axi_clk_gate.hw,
- [CLKID_L2_DRAM_SEL] = &meson8b_l2_dram_clk_sel.hw,
- [CLKID_L2_DRAM] = &meson8b_l2_dram_clk_gate.hw,
+ [CLKID_APB_SEL] = &meson8b_apb_sel.hw,
+ [CLKID_APB] = &meson8b_apb.hw,
+ [CLKID_PERIPH_SEL] = &meson8b_periph_sel.hw,
+ [CLKID_PERIPH] = &meson8b_periph.hw,
+ [CLKID_AXI_SEL] = &meson8b_axi_sel.hw,
+ [CLKID_AXI] = &meson8b_axi.hw,
+ [CLKID_L2_DRAM_SEL] = &meson8b_l2_dram_sel.hw,
+ [CLKID_L2_DRAM] = &meson8b_l2_dram.hw,
[CLKID_HDMI_PLL_LVDS_OUT] = &meson8b_hdmi_pll_lvds_out.hw,
[CLKID_HDMI_PLL_HDMI_OUT] = &meson8b_hdmi_pll_hdmi_out.hw,
[CLKID_VID_PLL_IN_SEL] = &meson8b_vid_pll_in_sel.hw,
@@ -2974,27 +2948,27 @@ static struct clk_hw *meson8_hw_clks[] = {
[CLKID_VCLK_IN_SEL] = &meson8b_vclk_in_sel.hw,
[CLKID_VCLK_IN_EN] = &meson8b_vclk_in_en.hw,
[CLKID_VCLK_EN] = &meson8b_vclk_en.hw,
- [CLKID_VCLK_DIV1] = &meson8b_vclk_div1_gate.hw,
+ [CLKID_VCLK_DIV1] = &meson8b_vclk_div1.hw,
[CLKID_VCLK_DIV2_DIV] = &meson8b_vclk_div2_div.hw,
- [CLKID_VCLK_DIV2] = &meson8b_vclk_div2_div_gate.hw,
+ [CLKID_VCLK_DIV2] = &meson8b_vclk_div2.hw,
[CLKID_VCLK_DIV4_DIV] = &meson8b_vclk_div4_div.hw,
- [CLKID_VCLK_DIV4] = &meson8b_vclk_div4_div_gate.hw,
+ [CLKID_VCLK_DIV4] = &meson8b_vclk_div4.hw,
[CLKID_VCLK_DIV6_DIV] = &meson8b_vclk_div6_div.hw,
- [CLKID_VCLK_DIV6] = &meson8b_vclk_div6_div_gate.hw,
+ [CLKID_VCLK_DIV6] = &meson8b_vclk_div6.hw,
[CLKID_VCLK_DIV12_DIV] = &meson8b_vclk_div12_div.hw,
- [CLKID_VCLK_DIV12] = &meson8b_vclk_div12_div_gate.hw,
+ [CLKID_VCLK_DIV12] = &meson8b_vclk_div12.hw,
[CLKID_VCLK2_IN_SEL] = &meson8b_vclk2_in_sel.hw,
- [CLKID_VCLK2_IN_EN] = &meson8b_vclk2_clk_in_en.hw,
- [CLKID_VCLK2_EN] = &meson8b_vclk2_clk_en.hw,
- [CLKID_VCLK2_DIV1] = &meson8b_vclk2_div1_gate.hw,
+ [CLKID_VCLK2_IN_EN] = &meson8b_vclk2_in_en.hw,
+ [CLKID_VCLK2_EN] = &meson8b_vclk2_en.hw,
+ [CLKID_VCLK2_DIV1] = &meson8b_vclk2_div1.hw,
[CLKID_VCLK2_DIV2_DIV] = &meson8b_vclk2_div2_div.hw,
- [CLKID_VCLK2_DIV2] = &meson8b_vclk2_div2_div_gate.hw,
+ [CLKID_VCLK2_DIV2] = &meson8b_vclk2_div2.hw,
[CLKID_VCLK2_DIV4_DIV] = &meson8b_vclk2_div4_div.hw,
- [CLKID_VCLK2_DIV4] = &meson8b_vclk2_div4_div_gate.hw,
+ [CLKID_VCLK2_DIV4] = &meson8b_vclk2_div4.hw,
[CLKID_VCLK2_DIV6_DIV] = &meson8b_vclk2_div6_div.hw,
- [CLKID_VCLK2_DIV6] = &meson8b_vclk2_div6_div_gate.hw,
+ [CLKID_VCLK2_DIV6] = &meson8b_vclk2_div6.hw,
[CLKID_VCLK2_DIV12_DIV] = &meson8b_vclk2_div12_div.hw,
- [CLKID_VCLK2_DIV12] = &meson8b_vclk2_div12_div_gate.hw,
+ [CLKID_VCLK2_DIV12] = &meson8b_vclk2_div12.hw,
[CLKID_CTS_ENCT_SEL] = &meson8b_cts_enct_sel.hw,
[CLKID_CTS_ENCT] = &meson8b_cts_enct.hw,
[CLKID_CTS_ENCP_SEL] = &meson8b_cts_encp_sel.hw,
@@ -3041,18 +3015,18 @@ static struct clk_hw *meson8_hw_clks[] = {
};
static struct clk_hw *meson8b_hw_clks[] = {
- [CLKID_PLL_FIXED] = &meson8b_fixed_pll.hw,
- [CLKID_PLL_VID] = &meson8b_vid_pll.hw,
- [CLKID_PLL_SYS] = &meson8b_sys_pll.hw,
- [CLKID_FCLK_DIV2] = &meson8b_fclk_div2.hw,
- [CLKID_FCLK_DIV3] = &meson8b_fclk_div3.hw,
- [CLKID_FCLK_DIV4] = &meson8b_fclk_div4.hw,
- [CLKID_FCLK_DIV5] = &meson8b_fclk_div5.hw,
- [CLKID_FCLK_DIV7] = &meson8b_fclk_div7.hw,
- [CLKID_CPUCLK] = &meson8b_cpu_clk.hw,
- [CLKID_MPEG_SEL] = &meson8b_mpeg_clk_sel.hw,
- [CLKID_MPEG_DIV] = &meson8b_mpeg_clk_div.hw,
- [CLKID_CLK81] = &meson8b_clk81.hw,
+ [CLKID_PLL_FIXED] = &meson8b_fixed_pll.hw,
+ [CLKID_PLL_VID] = &meson8b_vid_pll.hw,
+ [CLKID_PLL_SYS] = &meson8b_sys_pll.hw,
+ [CLKID_FCLK_DIV2] = &meson8b_fclk_div2.hw,
+ [CLKID_FCLK_DIV3] = &meson8b_fclk_div3.hw,
+ [CLKID_FCLK_DIV4] = &meson8b_fclk_div4.hw,
+ [CLKID_FCLK_DIV5] = &meson8b_fclk_div5.hw,
+ [CLKID_FCLK_DIV7] = &meson8b_fclk_div7.hw,
+ [CLKID_CPUCLK] = &meson8b_cpu_clk.hw,
+ [CLKID_MPEG_SEL] = &meson8b_clk81_sel.hw,
+ [CLKID_MPEG_DIV] = &meson8b_clk81_div.hw,
+ [CLKID_CLK81] = &meson8b_clk81.hw,
[CLKID_DDR] = &meson8b_ddr.hw,
[CLKID_DOS] = &meson8b_dos.hw,
[CLKID_ISA] = &meson8b_isa.hw,
@@ -3149,7 +3123,7 @@ static struct clk_hw *meson8b_hw_clks[] = {
[CLKID_FCLK_DIV7_DIV] = &meson8b_fclk_div7_div.hw,
[CLKID_NAND_SEL] = &meson8b_nand_clk_sel.hw,
[CLKID_NAND_DIV] = &meson8b_nand_clk_div.hw,
- [CLKID_NAND_CLK] = &meson8b_nand_clk_gate.hw,
+ [CLKID_NAND_CLK] = &meson8b_nand_clk.hw,
[CLKID_PLL_FIXED_DCO] = &meson8b_fixed_pll_dco.hw,
[CLKID_HDMI_PLL_DCO] = &meson8b_hdmi_pll_dco.hw,
[CLKID_PLL_SYS_DCO] = &meson8b_sys_pll_dco.hw,
@@ -3160,14 +3134,14 @@ static struct clk_hw *meson8b_hw_clks[] = {
[CLKID_CPU_CLK_DIV6] = &meson8b_cpu_clk_div6.hw,
[CLKID_CPU_CLK_DIV7] = &meson8b_cpu_clk_div7.hw,
[CLKID_CPU_CLK_DIV8] = &meson8b_cpu_clk_div8.hw,
- [CLKID_APB_SEL] = &meson8b_apb_clk_sel.hw,
- [CLKID_APB] = &meson8b_apb_clk_gate.hw,
- [CLKID_PERIPH_SEL] = &meson8b_periph_clk_sel.hw,
- [CLKID_PERIPH] = &meson8b_periph_clk_gate.hw,
- [CLKID_AXI_SEL] = &meson8b_axi_clk_sel.hw,
- [CLKID_AXI] = &meson8b_axi_clk_gate.hw,
- [CLKID_L2_DRAM_SEL] = &meson8b_l2_dram_clk_sel.hw,
- [CLKID_L2_DRAM] = &meson8b_l2_dram_clk_gate.hw,
+ [CLKID_APB_SEL] = &meson8b_apb_sel.hw,
+ [CLKID_APB] = &meson8b_apb.hw,
+ [CLKID_PERIPH_SEL] = &meson8b_periph_sel.hw,
+ [CLKID_PERIPH] = &meson8b_periph.hw,
+ [CLKID_AXI_SEL] = &meson8b_axi_sel.hw,
+ [CLKID_AXI] = &meson8b_axi.hw,
+ [CLKID_L2_DRAM_SEL] = &meson8b_l2_dram_sel.hw,
+ [CLKID_L2_DRAM] = &meson8b_l2_dram.hw,
[CLKID_HDMI_PLL_LVDS_OUT] = &meson8b_hdmi_pll_lvds_out.hw,
[CLKID_HDMI_PLL_HDMI_OUT] = &meson8b_hdmi_pll_hdmi_out.hw,
[CLKID_VID_PLL_IN_SEL] = &meson8b_vid_pll_in_sel.hw,
@@ -3178,27 +3152,27 @@ static struct clk_hw *meson8b_hw_clks[] = {
[CLKID_VCLK_IN_SEL] = &meson8b_vclk_in_sel.hw,
[CLKID_VCLK_IN_EN] = &meson8b_vclk_in_en.hw,
[CLKID_VCLK_EN] = &meson8b_vclk_en.hw,
- [CLKID_VCLK_DIV1] = &meson8b_vclk_div1_gate.hw,
+ [CLKID_VCLK_DIV1] = &meson8b_vclk_div1.hw,
[CLKID_VCLK_DIV2_DIV] = &meson8b_vclk_div2_div.hw,
- [CLKID_VCLK_DIV2] = &meson8b_vclk_div2_div_gate.hw,
+ [CLKID_VCLK_DIV2] = &meson8b_vclk_div2.hw,
[CLKID_VCLK_DIV4_DIV] = &meson8b_vclk_div4_div.hw,
- [CLKID_VCLK_DIV4] = &meson8b_vclk_div4_div_gate.hw,
+ [CLKID_VCLK_DIV4] = &meson8b_vclk_div4.hw,
[CLKID_VCLK_DIV6_DIV] = &meson8b_vclk_div6_div.hw,
- [CLKID_VCLK_DIV6] = &meson8b_vclk_div6_div_gate.hw,
+ [CLKID_VCLK_DIV6] = &meson8b_vclk_div6.hw,
[CLKID_VCLK_DIV12_DIV] = &meson8b_vclk_div12_div.hw,
- [CLKID_VCLK_DIV12] = &meson8b_vclk_div12_div_gate.hw,
+ [CLKID_VCLK_DIV12] = &meson8b_vclk_div12.hw,
[CLKID_VCLK2_IN_SEL] = &meson8b_vclk2_in_sel.hw,
- [CLKID_VCLK2_IN_EN] = &meson8b_vclk2_clk_in_en.hw,
- [CLKID_VCLK2_EN] = &meson8b_vclk2_clk_en.hw,
- [CLKID_VCLK2_DIV1] = &meson8b_vclk2_div1_gate.hw,
+ [CLKID_VCLK2_IN_EN] = &meson8b_vclk2_in_en.hw,
+ [CLKID_VCLK2_EN] = &meson8b_vclk2_en.hw,
+ [CLKID_VCLK2_DIV1] = &meson8b_vclk2_div1.hw,
[CLKID_VCLK2_DIV2_DIV] = &meson8b_vclk2_div2_div.hw,
- [CLKID_VCLK2_DIV2] = &meson8b_vclk2_div2_div_gate.hw,
+ [CLKID_VCLK2_DIV2] = &meson8b_vclk2_div2.hw,
[CLKID_VCLK2_DIV4_DIV] = &meson8b_vclk2_div4_div.hw,
- [CLKID_VCLK2_DIV4] = &meson8b_vclk2_div4_div_gate.hw,
+ [CLKID_VCLK2_DIV4] = &meson8b_vclk2_div4.hw,
[CLKID_VCLK2_DIV6_DIV] = &meson8b_vclk2_div6_div.hw,
- [CLKID_VCLK2_DIV6] = &meson8b_vclk2_div6_div_gate.hw,
+ [CLKID_VCLK2_DIV6] = &meson8b_vclk2_div6.hw,
[CLKID_VCLK2_DIV12_DIV] = &meson8b_vclk2_div12_div.hw,
- [CLKID_VCLK2_DIV12] = &meson8b_vclk2_div12_div_gate.hw,
+ [CLKID_VCLK2_DIV12] = &meson8b_vclk2_div12.hw,
[CLKID_CTS_ENCT_SEL] = &meson8b_cts_enct_sel.hw,
[CLKID_CTS_ENCT] = &meson8b_cts_enct.hw,
[CLKID_CTS_ENCP_SEL] = &meson8b_cts_encp_sel.hw,
@@ -3256,18 +3230,18 @@ static struct clk_hw *meson8b_hw_clks[] = {
};
static struct clk_hw *meson8m2_hw_clks[] = {
- [CLKID_PLL_FIXED] = &meson8b_fixed_pll.hw,
- [CLKID_PLL_VID] = &meson8b_vid_pll.hw,
- [CLKID_PLL_SYS] = &meson8b_sys_pll.hw,
- [CLKID_FCLK_DIV2] = &meson8b_fclk_div2.hw,
- [CLKID_FCLK_DIV3] = &meson8b_fclk_div3.hw,
- [CLKID_FCLK_DIV4] = &meson8b_fclk_div4.hw,
- [CLKID_FCLK_DIV5] = &meson8b_fclk_div5.hw,
- [CLKID_FCLK_DIV7] = &meson8b_fclk_div7.hw,
- [CLKID_CPUCLK] = &meson8b_cpu_clk.hw,
- [CLKID_MPEG_SEL] = &meson8b_mpeg_clk_sel.hw,
- [CLKID_MPEG_DIV] = &meson8b_mpeg_clk_div.hw,
- [CLKID_CLK81] = &meson8b_clk81.hw,
+ [CLKID_PLL_FIXED] = &meson8b_fixed_pll.hw,
+ [CLKID_PLL_VID] = &meson8b_vid_pll.hw,
+ [CLKID_PLL_SYS] = &meson8b_sys_pll.hw,
+ [CLKID_FCLK_DIV2] = &meson8b_fclk_div2.hw,
+ [CLKID_FCLK_DIV3] = &meson8b_fclk_div3.hw,
+ [CLKID_FCLK_DIV4] = &meson8b_fclk_div4.hw,
+ [CLKID_FCLK_DIV5] = &meson8b_fclk_div5.hw,
+ [CLKID_FCLK_DIV7] = &meson8b_fclk_div7.hw,
+ [CLKID_CPUCLK] = &meson8b_cpu_clk.hw,
+ [CLKID_MPEG_SEL] = &meson8b_clk81_sel.hw,
+ [CLKID_MPEG_DIV] = &meson8b_clk81_div.hw,
+ [CLKID_CLK81] = &meson8b_clk81.hw,
[CLKID_DDR] = &meson8b_ddr.hw,
[CLKID_DOS] = &meson8b_dos.hw,
[CLKID_ISA] = &meson8b_isa.hw,
@@ -3364,7 +3338,7 @@ static struct clk_hw *meson8m2_hw_clks[] = {
[CLKID_FCLK_DIV7_DIV] = &meson8b_fclk_div7_div.hw,
[CLKID_NAND_SEL] = &meson8b_nand_clk_sel.hw,
[CLKID_NAND_DIV] = &meson8b_nand_clk_div.hw,
- [CLKID_NAND_CLK] = &meson8b_nand_clk_gate.hw,
+ [CLKID_NAND_CLK] = &meson8b_nand_clk.hw,
[CLKID_PLL_FIXED_DCO] = &meson8b_fixed_pll_dco.hw,
[CLKID_HDMI_PLL_DCO] = &meson8b_hdmi_pll_dco.hw,
[CLKID_PLL_SYS_DCO] = &meson8b_sys_pll_dco.hw,
@@ -3375,14 +3349,14 @@ static struct clk_hw *meson8m2_hw_clks[] = {
[CLKID_CPU_CLK_DIV6] = &meson8b_cpu_clk_div6.hw,
[CLKID_CPU_CLK_DIV7] = &meson8b_cpu_clk_div7.hw,
[CLKID_CPU_CLK_DIV8] = &meson8b_cpu_clk_div8.hw,
- [CLKID_APB_SEL] = &meson8b_apb_clk_sel.hw,
- [CLKID_APB] = &meson8b_apb_clk_gate.hw,
- [CLKID_PERIPH_SEL] = &meson8b_periph_clk_sel.hw,
- [CLKID_PERIPH] = &meson8b_periph_clk_gate.hw,
- [CLKID_AXI_SEL] = &meson8b_axi_clk_sel.hw,
- [CLKID_AXI] = &meson8b_axi_clk_gate.hw,
- [CLKID_L2_DRAM_SEL] = &meson8b_l2_dram_clk_sel.hw,
- [CLKID_L2_DRAM] = &meson8b_l2_dram_clk_gate.hw,
+ [CLKID_APB_SEL] = &meson8b_apb_sel.hw,
+ [CLKID_APB] = &meson8b_apb.hw,
+ [CLKID_PERIPH_SEL] = &meson8b_periph_sel.hw,
+ [CLKID_PERIPH] = &meson8b_periph.hw,
+ [CLKID_AXI_SEL] = &meson8b_axi_sel.hw,
+ [CLKID_AXI] = &meson8b_axi.hw,
+ [CLKID_L2_DRAM_SEL] = &meson8b_l2_dram_sel.hw,
+ [CLKID_L2_DRAM] = &meson8b_l2_dram.hw,
[CLKID_HDMI_PLL_LVDS_OUT] = &meson8b_hdmi_pll_lvds_out.hw,
[CLKID_HDMI_PLL_HDMI_OUT] = &meson8b_hdmi_pll_hdmi_out.hw,
[CLKID_VID_PLL_IN_SEL] = &meson8b_vid_pll_in_sel.hw,
@@ -3393,27 +3367,27 @@ static struct clk_hw *meson8m2_hw_clks[] = {
[CLKID_VCLK_IN_SEL] = &meson8b_vclk_in_sel.hw,
[CLKID_VCLK_IN_EN] = &meson8b_vclk_in_en.hw,
[CLKID_VCLK_EN] = &meson8b_vclk_en.hw,
- [CLKID_VCLK_DIV1] = &meson8b_vclk_div1_gate.hw,
+ [CLKID_VCLK_DIV1] = &meson8b_vclk_div1.hw,
[CLKID_VCLK_DIV2_DIV] = &meson8b_vclk_div2_div.hw,
- [CLKID_VCLK_DIV2] = &meson8b_vclk_div2_div_gate.hw,
+ [CLKID_VCLK_DIV2] = &meson8b_vclk_div2.hw,
[CLKID_VCLK_DIV4_DIV] = &meson8b_vclk_div4_div.hw,
- [CLKID_VCLK_DIV4] = &meson8b_vclk_div4_div_gate.hw,
+ [CLKID_VCLK_DIV4] = &meson8b_vclk_div4.hw,
[CLKID_VCLK_DIV6_DIV] = &meson8b_vclk_div6_div.hw,
- [CLKID_VCLK_DIV6] = &meson8b_vclk_div6_div_gate.hw,
+ [CLKID_VCLK_DIV6] = &meson8b_vclk_div6.hw,
[CLKID_VCLK_DIV12_DIV] = &meson8b_vclk_div12_div.hw,
- [CLKID_VCLK_DIV12] = &meson8b_vclk_div12_div_gate.hw,
+ [CLKID_VCLK_DIV12] = &meson8b_vclk_div12.hw,
[CLKID_VCLK2_IN_SEL] = &meson8b_vclk2_in_sel.hw,
- [CLKID_VCLK2_IN_EN] = &meson8b_vclk2_clk_in_en.hw,
- [CLKID_VCLK2_EN] = &meson8b_vclk2_clk_en.hw,
- [CLKID_VCLK2_DIV1] = &meson8b_vclk2_div1_gate.hw,
+ [CLKID_VCLK2_IN_EN] = &meson8b_vclk2_in_en.hw,
+ [CLKID_VCLK2_EN] = &meson8b_vclk2_en.hw,
+ [CLKID_VCLK2_DIV1] = &meson8b_vclk2_div1.hw,
[CLKID_VCLK2_DIV2_DIV] = &meson8b_vclk2_div2_div.hw,
- [CLKID_VCLK2_DIV2] = &meson8b_vclk2_div2_div_gate.hw,
+ [CLKID_VCLK2_DIV2] = &meson8b_vclk2_div2.hw,
[CLKID_VCLK2_DIV4_DIV] = &meson8b_vclk2_div4_div.hw,
- [CLKID_VCLK2_DIV4] = &meson8b_vclk2_div4_div_gate.hw,
+ [CLKID_VCLK2_DIV4] = &meson8b_vclk2_div4.hw,
[CLKID_VCLK2_DIV6_DIV] = &meson8b_vclk2_div6_div.hw,
- [CLKID_VCLK2_DIV6] = &meson8b_vclk2_div6_div_gate.hw,
+ [CLKID_VCLK2_DIV6] = &meson8b_vclk2_div6.hw,
[CLKID_VCLK2_DIV12_DIV] = &meson8b_vclk2_div12_div.hw,
- [CLKID_VCLK2_DIV12] = &meson8b_vclk2_div12_div_gate.hw,
+ [CLKID_VCLK2_DIV12] = &meson8b_vclk2_div12.hw,
[CLKID_CTS_ENCT_SEL] = &meson8b_cts_enct_sel.hw,
[CLKID_CTS_ENCT] = &meson8b_cts_enct.hw,
[CLKID_CTS_ENCP_SEL] = &meson8b_cts_encp_sel.hw,
diff --git a/drivers/clk/meson/s4-peripherals.c b/drivers/clk/meson/s4-peripherals.c
index c9400cf54c84..6d69b132d1e1 100644
--- a/drivers/clk/meson/s4-peripherals.c
+++ b/drivers/clk/meson/s4-peripherals.c
@@ -62,6 +62,15 @@
#define CLKCTRL_PWM_CLK_IJ_CTRL 0x190
#define CLKCTRL_DEMOD_CLK_CTRL 0x200
+#define S4_COMP_SEL(_name, _reg, _shift, _mask, _pdata) \
+ MESON_COMP_SEL(s4_, _name, _reg, _shift, _mask, _pdata, NULL, 0, 0)
+
+#define S4_COMP_DIV(_name, _reg, _shift, _width) \
+ MESON_COMP_DIV(s4_, _name, _reg, _shift, _width, 0, CLK_SET_RATE_PARENT)
+
+#define S4_COMP_GATE(_name, _reg, _bit) \
+ MESON_COMP_GATE(s4_, _name, _reg, _bit, CLK_SET_RATE_PARENT)
+
static struct clk_regmap s4_rtc_32k_by_oscin_clkin = {
.data = &(struct clk_regmap_gate_data){
.offset = CLKCTRL_RTC_BY_OSCIN_CTRL0,
@@ -182,8 +191,8 @@ static struct clk_regmap s4_rtc_clk = {
};
/* The index 5 is AXI_CLK, which is dedicated to AXI. So skip it. */
-static u32 mux_table_sys_ab_clk_sel[] = { 0, 1, 2, 3, 4, 6, 7 };
-static const struct clk_parent_data sys_ab_clk_parent_data[] = {
+static u32 s4_sysclk_parents_val_table[] = { 0, 1, 2, 3, 4, 6, 7 };
+static const struct clk_parent_data s4_sysclk_parents[] = {
{ .fw_name = "xtal" },
{ .fw_name = "fclk_div2" },
{ .fw_name = "fclk_div3" },
@@ -205,13 +214,13 @@ static struct clk_regmap s4_sysclk_b_sel = {
.offset = CLKCTRL_SYS_CLK_CTRL0,
.mask = 0x7,
.shift = 26,
- .table = mux_table_sys_ab_clk_sel,
+ .table = s4_sysclk_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "sysclk_b_sel",
.ops = &clk_regmap_mux_ro_ops,
- .parent_data = sys_ab_clk_parent_data,
- .num_parents = ARRAY_SIZE(sys_ab_clk_parent_data),
+ .parent_data = s4_sysclk_parents,
+ .num_parents = ARRAY_SIZE(s4_sysclk_parents),
},
};
@@ -251,13 +260,13 @@ static struct clk_regmap s4_sysclk_a_sel = {
.offset = CLKCTRL_SYS_CLK_CTRL0,
.mask = 0x7,
.shift = 10,
- .table = mux_table_sys_ab_clk_sel,
+ .table = s4_sysclk_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "sysclk_a_sel",
.ops = &clk_regmap_mux_ro_ops,
- .parent_data = sys_ab_clk_parent_data,
- .num_parents = ARRAY_SIZE(sys_ab_clk_parent_data),
+ .parent_data = s4_sysclk_parents,
+ .num_parents = ARRAY_SIZE(s4_sysclk_parents),
},
};
@@ -523,24 +532,24 @@ static struct clk_regmap s4_cecb_32k_clkout = {
},
};
-static const struct clk_parent_data s4_sc_parent_data[] = {
+static const struct clk_parent_data s4_sc_clk_parents[] = {
{ .fw_name = "fclk_div4" },
{ .fw_name = "fclk_div3" },
{ .fw_name = "fclk_div5" },
{ .fw_name = "xtal", }
};
-static struct clk_regmap s4_sc_clk_mux = {
+static struct clk_regmap s4_sc_clk_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_SC_CLK_CTRL,
.mask = 0x3,
.shift = 9,
},
.hw.init = &(struct clk_init_data) {
- .name = "sc_clk_mux",
+ .name = "sc_clk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_sc_parent_data,
- .num_parents = ARRAY_SIZE(s4_sc_parent_data),
+ .parent_data = s4_sc_clk_parents,
+ .num_parents = ARRAY_SIZE(s4_sc_clk_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -555,20 +564,20 @@ static struct clk_regmap s4_sc_clk_div = {
.name = "sc_clk_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &s4_sc_clk_mux.hw
+ &s4_sc_clk_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap s4_sc_clk_gate = {
+static struct clk_regmap s4_sc_clk = {
.data = &(struct clk_regmap_gate_data){
.offset = CLKCTRL_SC_CLK_CTRL,
.bit_idx = 8,
},
.hw.init = &(struct clk_init_data){
- .name = "sc_clk_gate",
+ .name = "sc_clk",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&s4_sc_clk_div.hw
@@ -578,13 +587,13 @@ static struct clk_regmap s4_sc_clk_gate = {
},
};
-static struct clk_regmap s4_12_24M_clk_gate = {
+static struct clk_regmap s4_12_24M = {
.data = &(struct clk_regmap_gate_data){
.offset = CLKCTRL_CLK12_24_CTRL,
.bit_idx = 11,
},
.hw.init = &(struct clk_init_data) {
- .name = "12_24m_gate",
+ .name = "12_24M",
.ops = &clk_regmap_gate_ops,
.parent_data = (const struct clk_parent_data []) {
{ .fw_name = "xtal", }
@@ -593,32 +602,32 @@ static struct clk_regmap s4_12_24M_clk_gate = {
},
};
-static struct clk_fixed_factor s4_12M_clk_div = {
+static struct clk_fixed_factor s4_12M_div = {
.mult = 1,
.div = 2,
.hw.init = &(struct clk_init_data){
- .name = "12M",
+ .name = "12M_div",
.ops = &clk_fixed_factor_ops,
.parent_hws = (const struct clk_hw *[]) {
- &s4_12_24M_clk_gate.hw
+ &s4_12_24M.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap s4_12_24M_clk = {
+static struct clk_regmap s4_12_24M_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_CLK12_24_CTRL,
.mask = 0x1,
.shift = 10,
},
.hw.init = &(struct clk_init_data) {
- .name = "12_24m",
+ .name = "12_24M_sel",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &s4_12_24M_clk_gate.hw,
- &s4_12M_clk_div.hw,
+ &s4_12_24M.hw,
+ &s4_12M_div.hw,
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
@@ -687,7 +696,7 @@ static struct clk_regmap s4_vid_pll = {
},
};
-static const struct clk_parent_data s4_vclk_parent_data[] = {
+static const struct clk_parent_data s4_vclk_parents[] = {
{ .hw = &s4_vid_pll.hw },
{ .fw_name = "gp0_pll", },
{ .fw_name = "hifi_pll", },
@@ -707,8 +716,8 @@ static struct clk_regmap s4_vclk_sel = {
.hw.init = &(struct clk_init_data){
.name = "vclk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_vclk_parent_data,
- .num_parents = ARRAY_SIZE(s4_vclk_parent_data),
+ .parent_data = s4_vclk_parents,
+ .num_parents = ARRAY_SIZE(s4_vclk_parents),
.flags = 0,
},
};
@@ -722,8 +731,8 @@ static struct clk_regmap s4_vclk2_sel = {
.hw.init = &(struct clk_init_data){
.name = "vclk2_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_vclk_parent_data,
- .num_parents = ARRAY_SIZE(s4_vclk_parent_data),
+ .parent_data = s4_vclk_parents,
+ .num_parents = ARRAY_SIZE(s4_vclk_parents),
.flags = 0,
},
};
@@ -1071,8 +1080,8 @@ static struct clk_fixed_factor s4_vclk2_div12 = {
};
/* The 5,6,7 indexes corresponds to no real clock, so there are not used. */
-static u32 mux_table_cts_sel[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
-static const struct clk_hw *s4_cts_parent_hws[] = {
+static u32 s4_cts_parents_val_table[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
+static const struct clk_hw *s4_cts_parents[] = {
&s4_vclk_div1.hw,
&s4_vclk_div2.hw,
&s4_vclk_div4.hw,
@@ -1090,13 +1099,13 @@ static struct clk_regmap s4_cts_enci_sel = {
.offset = CLKCTRL_VID_CLK_DIV,
.mask = 0xf,
.shift = 28,
- .table = mux_table_cts_sel,
+ .table = s4_cts_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cts_enci_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = s4_cts_parent_hws,
- .num_parents = ARRAY_SIZE(s4_cts_parent_hws),
+ .parent_hws = s4_cts_parents,
+ .num_parents = ARRAY_SIZE(s4_cts_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1106,13 +1115,13 @@ static struct clk_regmap s4_cts_encp_sel = {
.offset = CLKCTRL_VID_CLK_DIV,
.mask = 0xf,
.shift = 20,
- .table = mux_table_cts_sel,
+ .table = s4_cts_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cts_encp_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = s4_cts_parent_hws,
- .num_parents = ARRAY_SIZE(s4_cts_parent_hws),
+ .parent_hws = s4_cts_parents,
+ .num_parents = ARRAY_SIZE(s4_cts_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1122,20 +1131,20 @@ static struct clk_regmap s4_cts_vdac_sel = {
.offset = CLKCTRL_VIID_CLK_DIV,
.mask = 0xf,
.shift = 28,
- .table = mux_table_cts_sel,
+ .table = s4_cts_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cts_vdac_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = s4_cts_parent_hws,
- .num_parents = ARRAY_SIZE(s4_cts_parent_hws),
+ .parent_hws = s4_cts_parents,
+ .num_parents = ARRAY_SIZE(s4_cts_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
/* The 5,6,7 indexes corresponds to no real clock, so there are not used. */
-static u32 mux_table_hdmi_tx_sel[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
-static const struct clk_hw *s4_cts_hdmi_tx_parent_hws[] = {
+static u32 s4_hdmi_tx_parents_val_table[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
+static const struct clk_hw *s4_hdmi_tx_parents[] = {
&s4_vclk_div1.hw,
&s4_vclk_div2.hw,
&s4_vclk_div4.hw,
@@ -1153,13 +1162,13 @@ static struct clk_regmap s4_hdmi_tx_sel = {
.offset = CLKCTRL_HDMI_CLK_CTRL,
.mask = 0xf,
.shift = 16,
- .table = mux_table_hdmi_tx_sel,
+ .table = s4_hdmi_tx_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "hdmi_tx_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = s4_cts_hdmi_tx_parent_hws,
- .num_parents = ARRAY_SIZE(s4_cts_hdmi_tx_parent_hws),
+ .parent_hws = s4_hdmi_tx_parents,
+ .num_parents = ARRAY_SIZE(s4_hdmi_tx_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1229,7 +1238,7 @@ static struct clk_regmap s4_hdmi_tx = {
};
/* HDMI Clocks */
-static const struct clk_parent_data s4_hdmi_parent_data[] = {
+static const struct clk_parent_data s4_hdmi_parents[] = {
{ .fw_name = "xtal", },
{ .fw_name = "fclk_div4", },
{ .fw_name = "fclk_div3", },
@@ -1246,8 +1255,8 @@ static struct clk_regmap s4_hdmi_sel = {
.hw.init = &(struct clk_init_data){
.name = "hdmi_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_hdmi_parent_data,
- .num_parents = ARRAY_SIZE(s4_hdmi_parent_data),
+ .parent_data = s4_hdmi_parents,
+ .num_parents = ARRAY_SIZE(s4_hdmi_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1298,7 +1307,7 @@ static struct clk_regmap s4_ts_clk_div = {
},
};
-static struct clk_regmap s4_ts_clk_gate = {
+static struct clk_regmap s4_ts_clk = {
.data = &(struct clk_regmap_gate_data){
.offset = CLKCTRL_TS_CLK_CTRL,
.bit_idx = 8,
@@ -1320,7 +1329,7 @@ static struct clk_regmap s4_ts_clk_gate = {
* mux because it does top-to-bottom updates the each clock tree and
* switches to the "inactive" one when CLK_SET_RATE_GATE is set.
*/
-static const struct clk_parent_data s4_mali_0_1_parent_data[] = {
+static const struct clk_parent_data s4_mali_parents[] = {
{ .fw_name = "xtal", },
{ .fw_name = "gp0_pll", },
{ .fw_name = "hifi_pll", },
@@ -1340,8 +1349,8 @@ static struct clk_regmap s4_mali_0_sel = {
.hw.init = &(struct clk_init_data){
.name = "mali_0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_mali_0_1_parent_data,
- .num_parents = ARRAY_SIZE(s4_mali_0_1_parent_data),
+ .parent_data = s4_mali_parents,
+ .num_parents = ARRAY_SIZE(s4_mali_parents),
/*
* Don't request the parent to change the rate because
* all GPU frequencies can be derived from the fclk_*
@@ -1394,8 +1403,8 @@ static struct clk_regmap s4_mali_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "mali_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_mali_0_1_parent_data,
- .num_parents = ARRAY_SIZE(s4_mali_0_1_parent_data),
+ .parent_data = s4_mali_parents,
+ .num_parents = ARRAY_SIZE(s4_mali_parents),
.flags = 0,
},
};
@@ -1433,28 +1442,26 @@ static struct clk_regmap s4_mali_1 = {
},
};
-static const struct clk_hw *s4_mali_parent_hws[] = {
- &s4_mali_0.hw,
- &s4_mali_1.hw
-};
-
-static struct clk_regmap s4_mali_mux = {
+static struct clk_regmap s4_mali_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_MALI_CLK_CTRL,
.mask = 1,
.shift = 31,
},
.hw.init = &(struct clk_init_data){
- .name = "mali",
+ .name = "mali_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = s4_mali_parent_hws,
+ .parent_hws = (const struct clk_hw *[]) {
+ &s4_mali_0.hw,
+ &s4_mali_1.hw,
+ },
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
/* VDEC clocks */
-static const struct clk_parent_data s4_dec_parent_data[] = {
+static const struct clk_parent_data s4_dec_parents[] = {
{ .fw_name = "fclk_div2p5", },
{ .fw_name = "fclk_div3", },
{ .fw_name = "fclk_div4", },
@@ -1465,7 +1472,7 @@ static const struct clk_parent_data s4_dec_parent_data[] = {
{ .fw_name = "xtal", }
};
-static struct clk_regmap s4_vdec_p0_mux = {
+static struct clk_regmap s4_vdec_p0_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_VDEC_CLK_CTRL,
.mask = 0x7,
@@ -1473,10 +1480,10 @@ static struct clk_regmap s4_vdec_p0_mux = {
.flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data) {
- .name = "vdec_p0_mux",
+ .name = "vdec_p0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_dec_parent_data,
- .num_parents = ARRAY_SIZE(s4_dec_parent_data),
+ .parent_data = s4_dec_parents,
+ .num_parents = ARRAY_SIZE(s4_dec_parents),
.flags = 0,
},
};
@@ -1492,7 +1499,7 @@ static struct clk_regmap s4_vdec_p0_div = {
.name = "vdec_p0_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &s4_vdec_p0_mux.hw
+ &s4_vdec_p0_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1515,7 +1522,7 @@ static struct clk_regmap s4_vdec_p0 = {
},
};
-static struct clk_regmap s4_vdec_p1_mux = {
+static struct clk_regmap s4_vdec_p1_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_VDEC3_CLK_CTRL,
.mask = 0x7,
@@ -1523,10 +1530,10 @@ static struct clk_regmap s4_vdec_p1_mux = {
.flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data) {
- .name = "vdec_p1_mux",
+ .name = "vdec_p1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_dec_parent_data,
- .num_parents = ARRAY_SIZE(s4_dec_parent_data),
+ .parent_data = s4_dec_parents,
+ .num_parents = ARRAY_SIZE(s4_dec_parents),
.flags = 0,
},
};
@@ -1542,7 +1549,7 @@ static struct clk_regmap s4_vdec_p1_div = {
.name = "vdec_p1_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &s4_vdec_p1_mux.hw
+ &s4_vdec_p1_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1565,27 +1572,25 @@ static struct clk_regmap s4_vdec_p1 = {
},
};
-static const struct clk_hw *s4_vdec_mux_parent_hws[] = {
- &s4_vdec_p0.hw,
- &s4_vdec_p1.hw
-};
-
-static struct clk_regmap s4_vdec_mux = {
+static struct clk_regmap s4_vdec_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_VDEC3_CLK_CTRL,
.mask = 0x1,
.shift = 15,
},
.hw.init = &(struct clk_init_data) {
- .name = "vdec_mux",
+ .name = "vdec_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = s4_vdec_mux_parent_hws,
- .num_parents = ARRAY_SIZE(s4_vdec_mux_parent_hws),
+ .parent_hws = (const struct clk_hw *[]) {
+ &s4_vdec_p0.hw,
+ &s4_vdec_p1.hw,
+ },
+ .num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap s4_hevcf_p0_mux = {
+static struct clk_regmap s4_hevcf_p0_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_VDEC2_CLK_CTRL,
.mask = 0x7,
@@ -1593,10 +1598,10 @@ static struct clk_regmap s4_hevcf_p0_mux = {
.flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data) {
- .name = "hevcf_p0_mux",
+ .name = "hevcf_p0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_dec_parent_data,
- .num_parents = ARRAY_SIZE(s4_dec_parent_data),
+ .parent_data = s4_dec_parents,
+ .num_parents = ARRAY_SIZE(s4_dec_parents),
.flags = 0,
},
};
@@ -1612,7 +1617,7 @@ static struct clk_regmap s4_hevcf_p0_div = {
.name = "hevcf_p0_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &s4_hevcf_p0_mux.hw
+ &s4_hevcf_p0_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1625,7 +1630,7 @@ static struct clk_regmap s4_hevcf_p0 = {
.bit_idx = 8,
},
.hw.init = &(struct clk_init_data){
- .name = "hevcf_p0_gate",
+ .name = "hevcf_p0",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&s4_hevcf_p0_div.hw
@@ -1635,7 +1640,7 @@ static struct clk_regmap s4_hevcf_p0 = {
},
};
-static struct clk_regmap s4_hevcf_p1_mux = {
+static struct clk_regmap s4_hevcf_p1_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_VDEC4_CLK_CTRL,
.mask = 0x7,
@@ -1643,10 +1648,10 @@ static struct clk_regmap s4_hevcf_p1_mux = {
.flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data) {
- .name = "hevcf_p1_mux",
+ .name = "hevcf_p1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_dec_parent_data,
- .num_parents = ARRAY_SIZE(s4_dec_parent_data),
+ .parent_data = s4_dec_parents,
+ .num_parents = ARRAY_SIZE(s4_dec_parents),
.flags = 0,
},
};
@@ -1662,7 +1667,7 @@ static struct clk_regmap s4_hevcf_p1_div = {
.name = "hevcf_p1_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &s4_hevcf_p1_mux.hw
+ &s4_hevcf_p1_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1685,28 +1690,26 @@ static struct clk_regmap s4_hevcf_p1 = {
},
};
-static const struct clk_hw *s4_hevcf_mux_parent_hws[] = {
- &s4_hevcf_p0.hw,
- &s4_hevcf_p1.hw
-};
-
-static struct clk_regmap s4_hevcf_mux = {
+static struct clk_regmap s4_hevcf_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_VDEC4_CLK_CTRL,
.mask = 0x1,
.shift = 15,
},
.hw.init = &(struct clk_init_data) {
- .name = "hevcf",
+ .name = "hevcf_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = s4_hevcf_mux_parent_hws,
- .num_parents = ARRAY_SIZE(s4_hevcf_mux_parent_hws),
+ .parent_hws = (const struct clk_hw *[]) {
+ &s4_hevcf_p0.hw,
+ &s4_hevcf_p1.hw,
+ },
+ .num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
/* VPU Clock */
-static const struct clk_parent_data s4_vpu_parent_data[] = {
+static const struct clk_parent_data s4_vpu_parents[] = {
{ .fw_name = "fclk_div3", },
{ .fw_name = "fclk_div4", },
{ .fw_name = "fclk_div5", },
@@ -1726,8 +1729,8 @@ static struct clk_regmap s4_vpu_0_sel = {
.hw.init = &(struct clk_init_data){
.name = "vpu_0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_vpu_parent_data,
- .num_parents = ARRAY_SIZE(s4_vpu_parent_data),
+ .parent_data = s4_vpu_parents,
+ .num_parents = ARRAY_SIZE(s4_vpu_parents),
.flags = 0,
},
};
@@ -1770,8 +1773,8 @@ static struct clk_regmap s4_vpu_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "vpu_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_vpu_parent_data,
- .num_parents = ARRAY_SIZE(s4_vpu_parent_data),
+ .parent_data = s4_vpu_parents,
+ .num_parents = ARRAY_SIZE(s4_vpu_parents),
.flags = 0,
},
};
@@ -1823,24 +1826,24 @@ static struct clk_regmap s4_vpu = {
},
};
-static const struct clk_parent_data vpu_clkb_tmp_parent_data[] = {
+static const struct clk_parent_data vpu_clkb_tmp_parents[] = {
{ .hw = &s4_vpu.hw },
{ .fw_name = "fclk_div4", },
{ .fw_name = "fclk_div5", },
{ .fw_name = "fclk_div7", }
};
-static struct clk_regmap s4_vpu_clkb_tmp_mux = {
+static struct clk_regmap s4_vpu_clkb_tmp_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_VPU_CLKB_CTRL,
.mask = 0x3,
.shift = 20,
},
.hw.init = &(struct clk_init_data) {
- .name = "vpu_clkb_tmp_mux",
+ .name = "vpu_clkb_tmp_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = vpu_clkb_tmp_parent_data,
- .num_parents = ARRAY_SIZE(vpu_clkb_tmp_parent_data),
+ .parent_data = vpu_clkb_tmp_parents,
+ .num_parents = ARRAY_SIZE(vpu_clkb_tmp_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1855,7 +1858,7 @@ static struct clk_regmap s4_vpu_clkb_tmp_div = {
.name = "vpu_clkb_tmp_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &s4_vpu_clkb_tmp_mux.hw
+ &s4_vpu_clkb_tmp_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1911,7 +1914,7 @@ static struct clk_regmap s4_vpu_clkb = {
},
};
-static const struct clk_parent_data s4_vpu_clkc_parent_data[] = {
+static const struct clk_parent_data s4_vpu_clkc_parents[] = {
{ .fw_name = "fclk_div4", },
{ .fw_name = "fclk_div3", },
{ .fw_name = "fclk_div5", },
@@ -1922,17 +1925,17 @@ static const struct clk_parent_data s4_vpu_clkc_parent_data[] = {
{ .fw_name = "gp0_pll", },
};
-static struct clk_regmap s4_vpu_clkc_p0_mux = {
+static struct clk_regmap s4_vpu_clkc_p0_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_VPU_CLKC_CTRL,
.mask = 0x7,
.shift = 9,
},
.hw.init = &(struct clk_init_data) {
- .name = "vpu_clkc_p0_mux",
+ .name = "vpu_clkc_p0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_vpu_clkc_parent_data,
- .num_parents = ARRAY_SIZE(s4_vpu_clkc_parent_data),
+ .parent_data = s4_vpu_clkc_parents,
+ .num_parents = ARRAY_SIZE(s4_vpu_clkc_parents),
.flags = 0,
},
};
@@ -1947,7 +1950,7 @@ static struct clk_regmap s4_vpu_clkc_p0_div = {
.name = "vpu_clkc_p0_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &s4_vpu_clkc_p0_mux.hw
+ &s4_vpu_clkc_p0_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1970,17 +1973,17 @@ static struct clk_regmap s4_vpu_clkc_p0 = {
},
};
-static struct clk_regmap s4_vpu_clkc_p1_mux = {
+static struct clk_regmap s4_vpu_clkc_p1_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_VPU_CLKC_CTRL,
.mask = 0x7,
.shift = 25,
},
.hw.init = &(struct clk_init_data) {
- .name = "vpu_clkc_p1_mux",
+ .name = "vpu_clkc_p1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_vpu_clkc_parent_data,
- .num_parents = ARRAY_SIZE(s4_vpu_clkc_parent_data),
+ .parent_data = s4_vpu_clkc_parents,
+ .num_parents = ARRAY_SIZE(s4_vpu_clkc_parents),
.flags = 0,
},
};
@@ -1995,7 +1998,7 @@ static struct clk_regmap s4_vpu_clkc_p1_div = {
.name = "vpu_clkc_p1_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &s4_vpu_clkc_p1_mux.hw
+ &s4_vpu_clkc_p1_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2018,28 +2021,26 @@ static struct clk_regmap s4_vpu_clkc_p1 = {
},
};
-static const struct clk_hw *s4_vpu_mux_parent_hws[] = {
- &s4_vpu_clkc_p0.hw,
- &s4_vpu_clkc_p1.hw
-};
-
-static struct clk_regmap s4_vpu_clkc_mux = {
+static struct clk_regmap s4_vpu_clkc_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_VPU_CLKC_CTRL,
.mask = 0x1,
.shift = 31,
},
.hw.init = &(struct clk_init_data) {
- .name = "vpu_clkc_mux",
+ .name = "vpu_clkc_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = s4_vpu_mux_parent_hws,
- .num_parents = ARRAY_SIZE(s4_vpu_mux_parent_hws),
+ .parent_hws = (const struct clk_hw *[]) {
+ &s4_vpu_clkc_p0.hw,
+ &s4_vpu_clkc_p1.hw,
+ },
+ .num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
/* VAPB Clock */
-static const struct clk_parent_data s4_vapb_parent_data[] = {
+static const struct clk_parent_data s4_vapb_parents[] = {
{ .fw_name = "fclk_div4", },
{ .fw_name = "fclk_div3", },
{ .fw_name = "fclk_div5", },
@@ -2059,8 +2060,8 @@ static struct clk_regmap s4_vapb_0_sel = {
.hw.init = &(struct clk_init_data){
.name = "vapb_0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_vapb_parent_data,
- .num_parents = ARRAY_SIZE(s4_vapb_parent_data),
+ .parent_data = s4_vapb_parents,
+ .num_parents = ARRAY_SIZE(s4_vapb_parents),
.flags = 0,
},
};
@@ -2107,8 +2108,8 @@ static struct clk_regmap s4_vapb_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "vapb_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_vapb_parent_data,
- .num_parents = ARRAY_SIZE(s4_vapb_parent_data),
+ .parent_data = s4_vapb_parents,
+ .num_parents = ARRAY_SIZE(s4_vapb_parents),
.flags = 0,
},
};
@@ -2164,13 +2165,13 @@ static struct clk_regmap s4_vapb = {
},
};
-static struct clk_regmap s4_ge2d_gate = {
+static struct clk_regmap s4_ge2d = {
.data = &(struct clk_regmap_gate_data){
.offset = CLKCTRL_VAPBCLK_CTRL,
.bit_idx = 30,
},
.hw.init = &(struct clk_init_data) {
- .name = "ge2d_clk",
+ .name = "ge2d",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) { &s4_vapb.hw },
.num_parents = 1,
@@ -2178,24 +2179,24 @@ static struct clk_regmap s4_ge2d_gate = {
},
};
-static const struct clk_parent_data s4_esmclk_parent_data[] = {
+static const struct clk_parent_data s4_hdcp22_esmclk_parents[] = {
{ .fw_name = "fclk_div7", },
{ .fw_name = "fclk_div4", },
{ .fw_name = "fclk_div3", },
{ .fw_name = "fclk_div5", },
};
-static struct clk_regmap s4_hdcp22_esmclk_mux = {
+static struct clk_regmap s4_hdcp22_esmclk_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_HDCP22_CTRL,
.mask = 0x3,
.shift = 9,
},
.hw.init = &(struct clk_init_data) {
- .name = "hdcp22_esmclk_mux",
+ .name = "hdcp22_esmclk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_esmclk_parent_data,
- .num_parents = ARRAY_SIZE(s4_esmclk_parent_data),
+ .parent_data = s4_hdcp22_esmclk_parents,
+ .num_parents = ARRAY_SIZE(s4_hdcp22_esmclk_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2210,20 +2211,20 @@ static struct clk_regmap s4_hdcp22_esmclk_div = {
.name = "hdcp22_esmclk_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &s4_hdcp22_esmclk_mux.hw
+ &s4_hdcp22_esmclk_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap s4_hdcp22_esmclk_gate = {
+static struct clk_regmap s4_hdcp22_esmclk = {
.data = &(struct clk_regmap_gate_data){
.offset = CLKCTRL_HDCP22_CTRL,
.bit_idx = 8,
},
.hw.init = &(struct clk_init_data){
- .name = "hdcp22_esmclk_gate",
+ .name = "hdcp22_esmclk",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&s4_hdcp22_esmclk_div.hw
@@ -2233,24 +2234,24 @@ static struct clk_regmap s4_hdcp22_esmclk_gate = {
},
};
-static const struct clk_parent_data s4_skpclk_parent_data[] = {
+static const struct clk_parent_data s4_hdcp22_skpclk_parents[] = {
{ .fw_name = "xtal", },
{ .fw_name = "fclk_div4", },
{ .fw_name = "fclk_div3", },
{ .fw_name = "fclk_div5", },
};
-static struct clk_regmap s4_hdcp22_skpclk_mux = {
+static struct clk_regmap s4_hdcp22_skpclk_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_HDCP22_CTRL,
.mask = 0x3,
.shift = 25,
},
.hw.init = &(struct clk_init_data) {
- .name = "hdcp22_skpclk_mux",
+ .name = "hdcp22_skpclk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_skpclk_parent_data,
- .num_parents = ARRAY_SIZE(s4_skpclk_parent_data),
+ .parent_data = s4_hdcp22_skpclk_parents,
+ .num_parents = ARRAY_SIZE(s4_hdcp22_skpclk_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2265,20 +2266,20 @@ static struct clk_regmap s4_hdcp22_skpclk_div = {
.name = "hdcp22_skpclk_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &s4_hdcp22_skpclk_mux.hw
+ &s4_hdcp22_skpclk_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap s4_hdcp22_skpclk_gate = {
+static struct clk_regmap s4_hdcp22_skpclk = {
.data = &(struct clk_regmap_gate_data){
.offset = CLKCTRL_HDCP22_CTRL,
.bit_idx = 24,
},
.hw.init = &(struct clk_init_data){
- .name = "hdcp22_skpclk_gate",
+ .name = "hdcp22_skpclk",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&s4_hdcp22_skpclk_div.hw
@@ -2288,7 +2289,7 @@ static struct clk_regmap s4_hdcp22_skpclk_gate = {
},
};
-static const struct clk_parent_data s4_vdin_parent_data[] = {
+static const struct clk_parent_data s4_vdin_parents[] = {
{ .fw_name = "xtal", },
{ .fw_name = "fclk_div4", },
{ .fw_name = "fclk_div3", },
@@ -2296,17 +2297,17 @@ static const struct clk_parent_data s4_vdin_parent_data[] = {
{ .hw = &s4_vid_pll.hw }
};
-static struct clk_regmap s4_vdin_meas_mux = {
+static struct clk_regmap s4_vdin_meas_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_VDIN_MEAS_CLK_CTRL,
.mask = 0x7,
.shift = 9,
},
.hw.init = &(struct clk_init_data) {
- .name = "vdin_meas_mux",
+ .name = "vdin_meas_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_vdin_parent_data,
- .num_parents = ARRAY_SIZE(s4_vdin_parent_data),
+ .parent_data = s4_vdin_parents,
+ .num_parents = ARRAY_SIZE(s4_vdin_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2321,20 +2322,20 @@ static struct clk_regmap s4_vdin_meas_div = {
.name = "vdin_meas_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &s4_vdin_meas_mux.hw
+ &s4_vdin_meas_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap s4_vdin_meas_gate = {
+static struct clk_regmap s4_vdin_meas = {
.data = &(struct clk_regmap_gate_data){
.offset = CLKCTRL_VDIN_MEAS_CLK_CTRL,
.bit_idx = 8,
},
.hw.init = &(struct clk_init_data){
- .name = "vdin_meas_gate",
+ .name = "vdin_meas",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&s4_vdin_meas_div.hw
@@ -2345,7 +2346,7 @@ static struct clk_regmap s4_vdin_meas_gate = {
};
/* EMMC/NAND clock */
-static const struct clk_parent_data s4_sd_emmc_clk0_parent_data[] = {
+static const struct clk_parent_data s4_sd_emmc_clk0_parents[] = {
{ .fw_name = "xtal", },
{ .fw_name = "fclk_div2", },
{ .fw_name = "fclk_div3", },
@@ -2365,8 +2366,8 @@ static struct clk_regmap s4_sd_emmc_c_clk0_sel = {
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_c_clk0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_sd_emmc_clk0_parent_data,
- .num_parents = ARRAY_SIZE(s4_sd_emmc_clk0_parent_data),
+ .parent_data = s4_sd_emmc_clk0_parents,
+ .num_parents = ARRAY_SIZE(s4_sd_emmc_clk0_parents),
.flags = 0,
},
};
@@ -2413,8 +2414,8 @@ static struct clk_regmap s4_sd_emmc_a_clk0_sel = {
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_a_clk0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_sd_emmc_clk0_parent_data,
- .num_parents = ARRAY_SIZE(s4_sd_emmc_clk0_parent_data),
+ .parent_data = s4_sd_emmc_clk0_parents,
+ .num_parents = ARRAY_SIZE(s4_sd_emmc_clk0_parents),
.flags = 0,
},
};
@@ -2461,8 +2462,8 @@ static struct clk_regmap s4_sd_emmc_b_clk0_sel = {
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_b_clk0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_sd_emmc_clk0_parent_data,
- .num_parents = ARRAY_SIZE(s4_sd_emmc_clk0_parent_data),
+ .parent_data = s4_sd_emmc_clk0_parents,
+ .num_parents = ARRAY_SIZE(s4_sd_emmc_clk0_parents),
.flags = 0,
},
};
@@ -2501,7 +2502,7 @@ static struct clk_regmap s4_sd_emmc_b_clk0 = {
};
/* SPICC Clock */
-static const struct clk_parent_data s4_spicc_parent_data[] = {
+static const struct clk_parent_data s4_spicc_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &s4_sys_clk.hw },
{ .fw_name = "fclk_div4", },
@@ -2511,17 +2512,17 @@ static const struct clk_parent_data s4_spicc_parent_data[] = {
{ .fw_name = "fclk_div7", },
};
-static struct clk_regmap s4_spicc0_mux = {
+static struct clk_regmap s4_spicc0_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_SPICC_CLK_CTRL,
.mask = 0x7,
.shift = 7,
},
.hw.init = &(struct clk_init_data) {
- .name = "spicc0_mux",
+ .name = "spicc0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_spicc_parent_data,
- .num_parents = ARRAY_SIZE(s4_spicc_parent_data),
+ .parent_data = s4_spicc_parents,
+ .num_parents = ARRAY_SIZE(s4_spicc_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2536,20 +2537,20 @@ static struct clk_regmap s4_spicc0_div = {
.name = "spicc0_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &s4_spicc0_mux.hw
+ &s4_spicc0_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap s4_spicc0_gate = {
+static struct clk_regmap s4_spicc0_en = {
.data = &(struct clk_regmap_gate_data){
.offset = CLKCTRL_SPICC_CLK_CTRL,
.bit_idx = 6,
},
.hw.init = &(struct clk_init_data){
- .name = "spicc0",
+ .name = "spicc0_en",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&s4_spicc0_div.hw
@@ -2560,500 +2561,61 @@ static struct clk_regmap s4_spicc0_gate = {
};
/* PWM Clock */
-static const struct clk_parent_data s4_pwm_parent_data[] = {
+static const struct clk_parent_data s4_pwm_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &s4_vid_pll.hw },
{ .fw_name = "fclk_div4", },
{ .fw_name = "fclk_div3", },
};
-static struct clk_regmap s4_pwm_a_mux = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = CLKCTRL_PWM_CLK_AB_CTRL,
- .mask = 0x3,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_a_mux",
- .ops = &clk_regmap_mux_ops,
- .parent_data = s4_pwm_parent_data,
- .num_parents = ARRAY_SIZE(s4_pwm_parent_data),
- .flags = 0,
- },
-};
-
-static struct clk_regmap s4_pwm_a_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = CLKCTRL_PWM_CLK_AB_CTRL,
- .shift = 0,
- .width = 8,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_a_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_a_mux.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap s4_pwm_a_gate = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = CLKCTRL_PWM_CLK_AB_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_a_gate",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_a_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap s4_pwm_b_mux = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = CLKCTRL_PWM_CLK_AB_CTRL,
- .mask = 0x3,
- .shift = 25,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_b_mux",
- .ops = &clk_regmap_mux_ops,
- .parent_data = s4_pwm_parent_data,
- .num_parents = ARRAY_SIZE(s4_pwm_parent_data),
- .flags = 0,
- },
-};
-
-static struct clk_regmap s4_pwm_b_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = CLKCTRL_PWM_CLK_AB_CTRL,
- .shift = 16,
- .width = 8,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_b_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_b_mux.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap s4_pwm_b_gate = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = CLKCTRL_PWM_CLK_AB_CTRL,
- .bit_idx = 24,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_b_gate",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_b_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap s4_pwm_c_mux = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = CLKCTRL_PWM_CLK_CD_CTRL,
- .mask = 0x3,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_c_mux",
- .ops = &clk_regmap_mux_ops,
- .parent_data = s4_pwm_parent_data,
- .num_parents = ARRAY_SIZE(s4_pwm_parent_data),
- .flags = 0,
- },
-};
-
-static struct clk_regmap s4_pwm_c_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = CLKCTRL_PWM_CLK_CD_CTRL,
- .shift = 0,
- .width = 8,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_c_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_c_mux.hw
- },
- .num_parents = 1,
- },
-};
-
-static struct clk_regmap s4_pwm_c_gate = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = CLKCTRL_PWM_CLK_CD_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_c_gate",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_c_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap s4_pwm_d_mux = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = CLKCTRL_PWM_CLK_CD_CTRL,
- .mask = 0x3,
- .shift = 25,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_d_mux",
- .ops = &clk_regmap_mux_ops,
- .parent_data = s4_pwm_parent_data,
- .num_parents = ARRAY_SIZE(s4_pwm_parent_data),
- .flags = 0,
- },
-};
-
-static struct clk_regmap s4_pwm_d_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = CLKCTRL_PWM_CLK_CD_CTRL,
- .shift = 16,
- .width = 8,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_d_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_d_mux.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap s4_pwm_d_gate = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = CLKCTRL_PWM_CLK_CD_CTRL,
- .bit_idx = 24,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_d_gate",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_d_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap s4_pwm_e_mux = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = CLKCTRL_PWM_CLK_EF_CTRL,
- .mask = 0x3,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_e_mux",
- .ops = &clk_regmap_mux_ops,
- .parent_data = s4_pwm_parent_data,
- .num_parents = ARRAY_SIZE(s4_pwm_parent_data),
- .flags = 0,
- },
-};
-
-static struct clk_regmap s4_pwm_e_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = CLKCTRL_PWM_CLK_EF_CTRL,
- .shift = 0,
- .width = 8,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_e_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_e_mux.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap s4_pwm_e_gate = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = CLKCTRL_PWM_CLK_EF_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_e_gate",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_e_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap s4_pwm_f_mux = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = CLKCTRL_PWM_CLK_EF_CTRL,
- .mask = 0x3,
- .shift = 25,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_f_mux",
- .ops = &clk_regmap_mux_ops,
- .parent_data = s4_pwm_parent_data,
- .num_parents = ARRAY_SIZE(s4_pwm_parent_data),
- .flags = 0,
- },
-};
+static S4_COMP_SEL(pwm_a, CLKCTRL_PWM_CLK_AB_CTRL, 9, 0x3, s4_pwm_parents);
+static S4_COMP_DIV(pwm_a, CLKCTRL_PWM_CLK_AB_CTRL, 0, 8);
+static S4_COMP_GATE(pwm_a, CLKCTRL_PWM_CLK_AB_CTRL, 8);
-static struct clk_regmap s4_pwm_f_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = CLKCTRL_PWM_CLK_EF_CTRL,
- .shift = 16,
- .width = 8,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_f_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_f_mux.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static S4_COMP_SEL(pwm_b, CLKCTRL_PWM_CLK_AB_CTRL, 25, 0x3, s4_pwm_parents);
+static S4_COMP_DIV(pwm_b, CLKCTRL_PWM_CLK_AB_CTRL, 16, 8);
+static S4_COMP_GATE(pwm_b, CLKCTRL_PWM_CLK_AB_CTRL, 24);
-static struct clk_regmap s4_pwm_f_gate = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = CLKCTRL_PWM_CLK_EF_CTRL,
- .bit_idx = 24,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_f_gate",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_f_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static S4_COMP_SEL(pwm_c, CLKCTRL_PWM_CLK_CD_CTRL, 9, 0x3, s4_pwm_parents);
+static S4_COMP_DIV(pwm_c, CLKCTRL_PWM_CLK_CD_CTRL, 0, 8);
+static S4_COMP_GATE(pwm_c, CLKCTRL_PWM_CLK_CD_CTRL, 8);
-static struct clk_regmap s4_pwm_g_mux = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = CLKCTRL_PWM_CLK_GH_CTRL,
- .mask = 0x3,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_g_mux",
- .ops = &clk_regmap_mux_ops,
- .parent_data = s4_pwm_parent_data,
- .num_parents = ARRAY_SIZE(s4_pwm_parent_data),
- .flags = 0,
- },
-};
+static S4_COMP_SEL(pwm_d, CLKCTRL_PWM_CLK_CD_CTRL, 25, 0x3, s4_pwm_parents);
+static S4_COMP_DIV(pwm_d, CLKCTRL_PWM_CLK_CD_CTRL, 16, 8);
+static S4_COMP_GATE(pwm_d, CLKCTRL_PWM_CLK_CD_CTRL, 24);
-static struct clk_regmap s4_pwm_g_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = CLKCTRL_PWM_CLK_GH_CTRL,
- .shift = 0,
- .width = 8,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_g_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_g_mux.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static S4_COMP_SEL(pwm_e, CLKCTRL_PWM_CLK_EF_CTRL, 9, 0x3, s4_pwm_parents);
+static S4_COMP_DIV(pwm_e, CLKCTRL_PWM_CLK_EF_CTRL, 0, 8);
+static S4_COMP_GATE(pwm_e, CLKCTRL_PWM_CLK_EF_CTRL, 8);
-static struct clk_regmap s4_pwm_g_gate = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = CLKCTRL_PWM_CLK_GH_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_g_gate",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_g_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static S4_COMP_SEL(pwm_f, CLKCTRL_PWM_CLK_EF_CTRL, 25, 0x3, s4_pwm_parents);
+static S4_COMP_DIV(pwm_f, CLKCTRL_PWM_CLK_EF_CTRL, 16, 8);
+static S4_COMP_GATE(pwm_f, CLKCTRL_PWM_CLK_EF_CTRL, 24);
-static struct clk_regmap s4_pwm_h_mux = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = CLKCTRL_PWM_CLK_GH_CTRL,
- .mask = 0x3,
- .shift = 25,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_h_mux",
- .ops = &clk_regmap_mux_ops,
- .parent_data = s4_pwm_parent_data,
- .num_parents = ARRAY_SIZE(s4_pwm_parent_data),
- .flags = 0,
- },
-};
+static S4_COMP_SEL(pwm_g, CLKCTRL_PWM_CLK_GH_CTRL, 9, 0x3, s4_pwm_parents);
+static S4_COMP_DIV(pwm_g, CLKCTRL_PWM_CLK_GH_CTRL, 0, 8);
+static S4_COMP_GATE(pwm_g, CLKCTRL_PWM_CLK_GH_CTRL, 8);
-static struct clk_regmap s4_pwm_h_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = CLKCTRL_PWM_CLK_GH_CTRL,
- .shift = 16,
- .width = 8,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_h_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_h_mux.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static S4_COMP_SEL(pwm_h, CLKCTRL_PWM_CLK_GH_CTRL, 25, 0x3, s4_pwm_parents);
+static S4_COMP_DIV(pwm_h, CLKCTRL_PWM_CLK_GH_CTRL, 16, 8);
+static S4_COMP_GATE(pwm_h, CLKCTRL_PWM_CLK_GH_CTRL, 24);
-static struct clk_regmap s4_pwm_h_gate = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = CLKCTRL_PWM_CLK_GH_CTRL,
- .bit_idx = 24,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_h_gate",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_h_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static S4_COMP_SEL(pwm_i, CLKCTRL_PWM_CLK_IJ_CTRL, 9, 0x3, s4_pwm_parents);
+static S4_COMP_DIV(pwm_i, CLKCTRL_PWM_CLK_IJ_CTRL, 0, 8);
+static S4_COMP_GATE(pwm_i, CLKCTRL_PWM_CLK_IJ_CTRL, 8);
-static struct clk_regmap s4_pwm_i_mux = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = CLKCTRL_PWM_CLK_IJ_CTRL,
- .mask = 0x3,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_i_mux",
- .ops = &clk_regmap_mux_ops,
- .parent_data = s4_pwm_parent_data,
- .num_parents = ARRAY_SIZE(s4_pwm_parent_data),
- .flags = 0,
- },
-};
+static S4_COMP_SEL(pwm_j, CLKCTRL_PWM_CLK_IJ_CTRL, 25, 0x3, s4_pwm_parents);
+static S4_COMP_DIV(pwm_j, CLKCTRL_PWM_CLK_IJ_CTRL, 16, 8);
+static S4_COMP_GATE(pwm_j, CLKCTRL_PWM_CLK_IJ_CTRL, 24);
-static struct clk_regmap s4_pwm_i_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = CLKCTRL_PWM_CLK_IJ_CTRL,
- .shift = 0,
- .width = 8,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_i_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_i_mux.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap s4_pwm_i_gate = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = CLKCTRL_PWM_CLK_IJ_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_i_gate",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_i_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap s4_pwm_j_mux = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = CLKCTRL_PWM_CLK_IJ_CTRL,
- .mask = 0x3,
- .shift = 25,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_j_mux",
- .ops = &clk_regmap_mux_ops,
- .parent_data = s4_pwm_parent_data,
- .num_parents = ARRAY_SIZE(s4_pwm_parent_data),
- .flags = 0,
- },
-};
-
-static struct clk_regmap s4_pwm_j_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = CLKCTRL_PWM_CLK_IJ_CTRL,
- .shift = 16,
- .width = 8,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_j_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_j_mux.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap s4_pwm_j_gate = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = CLKCTRL_PWM_CLK_IJ_CTRL,
- .bit_idx = 24,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_j_gate",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_j_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap s4_saradc_mux = {
+static struct clk_regmap s4_saradc_sel = {
.data = &(struct clk_regmap_mux_data) {
.offset = CLKCTRL_SAR_CLK_CTRL,
.mask = 0x3,
.shift = 9,
},
.hw.init = &(struct clk_init_data){
- .name = "saradc_mux",
+ .name = "saradc_sel",
.ops = &clk_regmap_mux_ops,
.parent_data = (const struct clk_parent_data []) {
{ .fw_name = "xtal", },
@@ -3074,20 +2636,20 @@ static struct clk_regmap s4_saradc_div = {
.name = "saradc_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &s4_saradc_mux.hw
+ &s4_saradc_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap s4_saradc_gate = {
+static struct clk_regmap s4_saradc = {
.data = &(struct clk_regmap_gate_data) {
.offset = CLKCTRL_SAR_CLK_CTRL,
.bit_idx = 8,
},
.hw.init = &(struct clk_init_data){
- .name = "saradc_clk",
+ .name = "saradc",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&s4_saradc_div.hw
@@ -3102,9 +2664,8 @@ static struct clk_regmap s4_saradc_gate = {
* corresponding clock sources are not described in the clock tree and internal clock
* for debug, so they are skipped.
*/
-static u32 s4_gen_clk_mux_table[] = { 0, 4, 5, 7, 19, 21, 22,
- 23, 24, 25, 26, 27, 28 };
-static const struct clk_parent_data s4_gen_clk_parent_data[] = {
+static u32 s4_gen_clk_parents_val_table[] = { 0, 4, 5, 7, 19, 21, 22, 23, 24, 25, 26, 27, 28 };
+static const struct clk_parent_data s4_gen_clk_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &s4_vid_pll.hw },
{ .fw_name = "gp0_pll", },
@@ -3125,13 +2686,13 @@ static struct clk_regmap s4_gen_clk_sel = {
.offset = CLKCTRL_GEN_CLK_CTRL,
.mask = 0x1f,
.shift = 12,
- .table = s4_gen_clk_mux_table,
+ .table = s4_gen_clk_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "gen_clk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_gen_clk_parent_data,
- .num_parents = ARRAY_SIZE(s4_gen_clk_parent_data),
+ .parent_data = s4_gen_clk_parents,
+ .num_parents = ARRAY_SIZE(s4_gen_clk_parents),
/*
* Because the GEN clock can be connected to an external pad
* and may be set up directly from the device tree. Don't
@@ -3174,61 +2735,75 @@ static struct clk_regmap s4_gen_clk = {
},
};
-#define MESON_GATE(_name, _reg, _bit) \
- MESON_PCLK(_name, _reg, _bit, &s4_sys_clk.hw)
-
-static MESON_GATE(s4_ddr, CLKCTRL_SYS_CLK_EN0_REG0, 0);
-static MESON_GATE(s4_dos, CLKCTRL_SYS_CLK_EN0_REG0, 1);
-static MESON_GATE(s4_ethphy, CLKCTRL_SYS_CLK_EN0_REG0, 4);
-static MESON_GATE(s4_mali, CLKCTRL_SYS_CLK_EN0_REG0, 6);
-static MESON_GATE(s4_aocpu, CLKCTRL_SYS_CLK_EN0_REG0, 13);
-static MESON_GATE(s4_aucpu, CLKCTRL_SYS_CLK_EN0_REG0, 14);
-static MESON_GATE(s4_cec, CLKCTRL_SYS_CLK_EN0_REG0, 16);
-static MESON_GATE(s4_sdemmca, CLKCTRL_SYS_CLK_EN0_REG0, 24);
-static MESON_GATE(s4_sdemmcb, CLKCTRL_SYS_CLK_EN0_REG0, 25);
-static MESON_GATE(s4_nand, CLKCTRL_SYS_CLK_EN0_REG0, 26);
-static MESON_GATE(s4_smartcard, CLKCTRL_SYS_CLK_EN0_REG0, 27);
-static MESON_GATE(s4_acodec, CLKCTRL_SYS_CLK_EN0_REG0, 28);
-static MESON_GATE(s4_spifc, CLKCTRL_SYS_CLK_EN0_REG0, 29);
-static MESON_GATE(s4_msr_clk, CLKCTRL_SYS_CLK_EN0_REG0, 30);
-static MESON_GATE(s4_ir_ctrl, CLKCTRL_SYS_CLK_EN0_REG0, 31);
-static MESON_GATE(s4_audio, CLKCTRL_SYS_CLK_EN0_REG1, 0);
-static MESON_GATE(s4_eth, CLKCTRL_SYS_CLK_EN0_REG1, 3);
-static MESON_GATE(s4_uart_a, CLKCTRL_SYS_CLK_EN0_REG1, 5);
-static MESON_GATE(s4_uart_b, CLKCTRL_SYS_CLK_EN0_REG1, 6);
-static MESON_GATE(s4_uart_c, CLKCTRL_SYS_CLK_EN0_REG1, 7);
-static MESON_GATE(s4_uart_d, CLKCTRL_SYS_CLK_EN0_REG1, 8);
-static MESON_GATE(s4_uart_e, CLKCTRL_SYS_CLK_EN0_REG1, 9);
-static MESON_GATE(s4_aififo, CLKCTRL_SYS_CLK_EN0_REG1, 11);
-static MESON_GATE(s4_ts_ddr, CLKCTRL_SYS_CLK_EN0_REG1, 15);
-static MESON_GATE(s4_ts_pll, CLKCTRL_SYS_CLK_EN0_REG1, 16);
-static MESON_GATE(s4_g2d, CLKCTRL_SYS_CLK_EN0_REG1, 20);
-static MESON_GATE(s4_spicc0, CLKCTRL_SYS_CLK_EN0_REG1, 21);
-static MESON_GATE(s4_usb, CLKCTRL_SYS_CLK_EN0_REG1, 26);
-static MESON_GATE(s4_i2c_m_a, CLKCTRL_SYS_CLK_EN0_REG1, 30);
-static MESON_GATE(s4_i2c_m_b, CLKCTRL_SYS_CLK_EN0_REG1, 31);
-static MESON_GATE(s4_i2c_m_c, CLKCTRL_SYS_CLK_EN0_REG2, 0);
-static MESON_GATE(s4_i2c_m_d, CLKCTRL_SYS_CLK_EN0_REG2, 1);
-static MESON_GATE(s4_i2c_m_e, CLKCTRL_SYS_CLK_EN0_REG2, 2);
-static MESON_GATE(s4_hdmitx_apb, CLKCTRL_SYS_CLK_EN0_REG2, 4);
-static MESON_GATE(s4_i2c_s_a, CLKCTRL_SYS_CLK_EN0_REG2, 5);
-static MESON_GATE(s4_usb1_to_ddr, CLKCTRL_SYS_CLK_EN0_REG2, 8);
-static MESON_GATE(s4_hdcp22, CLKCTRL_SYS_CLK_EN0_REG2, 10);
-static MESON_GATE(s4_mmc_apb, CLKCTRL_SYS_CLK_EN0_REG2, 11);
-static MESON_GATE(s4_rsa, CLKCTRL_SYS_CLK_EN0_REG2, 18);
-static MESON_GATE(s4_cpu_debug, CLKCTRL_SYS_CLK_EN0_REG2, 19);
-static MESON_GATE(s4_vpu_intr, CLKCTRL_SYS_CLK_EN0_REG2, 25);
-static MESON_GATE(s4_demod, CLKCTRL_SYS_CLK_EN0_REG2, 27);
-static MESON_GATE(s4_sar_adc, CLKCTRL_SYS_CLK_EN0_REG2, 28);
-static MESON_GATE(s4_gic, CLKCTRL_SYS_CLK_EN0_REG2, 30);
-static MESON_GATE(s4_pwm_ab, CLKCTRL_SYS_CLK_EN0_REG3, 7);
-static MESON_GATE(s4_pwm_cd, CLKCTRL_SYS_CLK_EN0_REG3, 8);
-static MESON_GATE(s4_pwm_ef, CLKCTRL_SYS_CLK_EN0_REG3, 9);
-static MESON_GATE(s4_pwm_gh, CLKCTRL_SYS_CLK_EN0_REG3, 10);
-static MESON_GATE(s4_pwm_ij, CLKCTRL_SYS_CLK_EN0_REG3, 11);
+static const struct clk_parent_data s4_pclk_parents = { .hw = &s4_sys_clk.hw };
+
+#define S4_PCLK(_name, _reg, _bit, _flags) \
+ MESON_PCLK(_name, _reg, _bit, &s4_pclk_parents, _flags)
+
+/*
+ * NOTE: The gates below are marked with CLK_IGNORE_UNUSED for historic reasons
+ * Users are encouraged to test without it and submit changes to:
+ * - remove the flag if not necessary
+ * - replace the flag with something more adequate, such as CLK_IS_CRITICAL,
+ * if appropriate.
+ * - add a comment explaining why the use of CLK_IGNORE_UNUSED is desirable
+ * for a particular clock.
+ */
+static S4_PCLK(s4_ddr, CLKCTRL_SYS_CLK_EN0_REG0, 0, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_dos, CLKCTRL_SYS_CLK_EN0_REG0, 1, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_ethphy, CLKCTRL_SYS_CLK_EN0_REG0, 4, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_mali, CLKCTRL_SYS_CLK_EN0_REG0, 6, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_aocpu, CLKCTRL_SYS_CLK_EN0_REG0, 13, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_aucpu, CLKCTRL_SYS_CLK_EN0_REG0, 14, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_cec, CLKCTRL_SYS_CLK_EN0_REG0, 16, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_sdemmca, CLKCTRL_SYS_CLK_EN0_REG0, 24, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_sdemmcb, CLKCTRL_SYS_CLK_EN0_REG0, 25, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_nand, CLKCTRL_SYS_CLK_EN0_REG0, 26, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_smartcard, CLKCTRL_SYS_CLK_EN0_REG0, 27, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_acodec, CLKCTRL_SYS_CLK_EN0_REG0, 28, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_spifc, CLKCTRL_SYS_CLK_EN0_REG0, 29, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_msr_clk, CLKCTRL_SYS_CLK_EN0_REG0, 30, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_ir_ctrl, CLKCTRL_SYS_CLK_EN0_REG0, 31, CLK_IGNORE_UNUSED);
+
+static S4_PCLK(s4_audio, CLKCTRL_SYS_CLK_EN0_REG1, 0, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_eth, CLKCTRL_SYS_CLK_EN0_REG1, 3, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_uart_a, CLKCTRL_SYS_CLK_EN0_REG1, 5, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_uart_b, CLKCTRL_SYS_CLK_EN0_REG1, 6, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_uart_c, CLKCTRL_SYS_CLK_EN0_REG1, 7, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_uart_d, CLKCTRL_SYS_CLK_EN0_REG1, 8, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_uart_e, CLKCTRL_SYS_CLK_EN0_REG1, 9, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_aififo, CLKCTRL_SYS_CLK_EN0_REG1, 11, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_ts_ddr, CLKCTRL_SYS_CLK_EN0_REG1, 15, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_ts_pll, CLKCTRL_SYS_CLK_EN0_REG1, 16, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_g2d, CLKCTRL_SYS_CLK_EN0_REG1, 20, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_spicc0, CLKCTRL_SYS_CLK_EN0_REG1, 21, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_usb, CLKCTRL_SYS_CLK_EN0_REG1, 26, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_i2c_m_a, CLKCTRL_SYS_CLK_EN0_REG1, 30, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_i2c_m_b, CLKCTRL_SYS_CLK_EN0_REG1, 31, CLK_IGNORE_UNUSED);
+
+static S4_PCLK(s4_i2c_m_c, CLKCTRL_SYS_CLK_EN0_REG2, 0, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_i2c_m_d, CLKCTRL_SYS_CLK_EN0_REG2, 1, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_i2c_m_e, CLKCTRL_SYS_CLK_EN0_REG2, 2, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_hdmitx_apb, CLKCTRL_SYS_CLK_EN0_REG2, 4, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_i2c_s_a, CLKCTRL_SYS_CLK_EN0_REG2, 5, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_usb1_to_ddr, CLKCTRL_SYS_CLK_EN0_REG2, 8, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_hdcp22, CLKCTRL_SYS_CLK_EN0_REG2, 10, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_mmc_apb, CLKCTRL_SYS_CLK_EN0_REG2, 11, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_rsa, CLKCTRL_SYS_CLK_EN0_REG2, 18, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_cpu_debug, CLKCTRL_SYS_CLK_EN0_REG2, 19, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_vpu_intr, CLKCTRL_SYS_CLK_EN0_REG2, 25, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_demod, CLKCTRL_SYS_CLK_EN0_REG2, 27, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_sar_adc, CLKCTRL_SYS_CLK_EN0_REG2, 28, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_gic, CLKCTRL_SYS_CLK_EN0_REG2, 30, CLK_IGNORE_UNUSED);
+
+static S4_PCLK(s4_pwm_ab, CLKCTRL_SYS_CLK_EN0_REG3, 7, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_pwm_cd, CLKCTRL_SYS_CLK_EN0_REG3, 8, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_pwm_ef, CLKCTRL_SYS_CLK_EN0_REG3, 9, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_pwm_gh, CLKCTRL_SYS_CLK_EN0_REG3, 10, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_pwm_ij, CLKCTRL_SYS_CLK_EN0_REG3, 11, CLK_IGNORE_UNUSED);
/* Array of all clocks provided by this provider */
-static struct clk_hw *s4_periphs_hw_clks[] = {
+static struct clk_hw *s4_peripherals_hw_clks[] = {
[CLKID_RTC_32K_CLKIN] = &s4_rtc_32k_by_oscin_clkin.hw,
[CLKID_RTC_32K_DIV] = &s4_rtc_32k_by_oscin_div.hw,
[CLKID_RTC_32K_SEL] = &s4_rtc_32k_by_oscin_sel.hw,
@@ -3251,12 +2826,12 @@ static struct clk_hw *s4_periphs_hw_clks[] = {
[CLKID_CECB_32K_SEL_PRE] = &s4_cecb_32k_sel_pre.hw,
[CLKID_CECB_32K_SEL] = &s4_cecb_32k_sel.hw,
[CLKID_CECB_32K_CLKOUT] = &s4_cecb_32k_clkout.hw,
- [CLKID_SC_CLK_SEL] = &s4_sc_clk_mux.hw,
+ [CLKID_SC_CLK_SEL] = &s4_sc_clk_sel.hw,
[CLKID_SC_CLK_DIV] = &s4_sc_clk_div.hw,
- [CLKID_SC] = &s4_sc_clk_gate.hw,
- [CLKID_12_24M] = &s4_12_24M_clk_gate.hw,
- [CLKID_12M_CLK_DIV] = &s4_12M_clk_div.hw,
- [CLKID_12_24M_CLK_SEL] = &s4_12_24M_clk.hw,
+ [CLKID_SC] = &s4_sc_clk.hw,
+ [CLKID_12_24M] = &s4_12_24M.hw,
+ [CLKID_12M_CLK_DIV] = &s4_12M_div.hw,
+ [CLKID_12_24M_CLK_SEL] = &s4_12_24M_sel.hw,
[CLKID_VID_PLL_DIV] = &s4_vid_pll_div.hw,
[CLKID_VID_PLL_SEL] = &s4_vid_pll_sel.hw,
[CLKID_VID_PLL] = &s4_vid_pll.hw,
@@ -3298,28 +2873,28 @@ static struct clk_hw *s4_periphs_hw_clks[] = {
[CLKID_HDMI_DIV] = &s4_hdmi_div.hw,
[CLKID_HDMI] = &s4_hdmi.hw,
[CLKID_TS_CLK_DIV] = &s4_ts_clk_div.hw,
- [CLKID_TS] = &s4_ts_clk_gate.hw,
+ [CLKID_TS] = &s4_ts_clk.hw,
[CLKID_MALI_0_SEL] = &s4_mali_0_sel.hw,
[CLKID_MALI_0_DIV] = &s4_mali_0_div.hw,
[CLKID_MALI_0] = &s4_mali_0.hw,
[CLKID_MALI_1_SEL] = &s4_mali_1_sel.hw,
[CLKID_MALI_1_DIV] = &s4_mali_1_div.hw,
[CLKID_MALI_1] = &s4_mali_1.hw,
- [CLKID_MALI_SEL] = &s4_mali_mux.hw,
- [CLKID_VDEC_P0_SEL] = &s4_vdec_p0_mux.hw,
+ [CLKID_MALI_SEL] = &s4_mali_sel.hw,
+ [CLKID_VDEC_P0_SEL] = &s4_vdec_p0_sel.hw,
[CLKID_VDEC_P0_DIV] = &s4_vdec_p0_div.hw,
[CLKID_VDEC_P0] = &s4_vdec_p0.hw,
- [CLKID_VDEC_P1_SEL] = &s4_vdec_p1_mux.hw,
+ [CLKID_VDEC_P1_SEL] = &s4_vdec_p1_sel.hw,
[CLKID_VDEC_P1_DIV] = &s4_vdec_p1_div.hw,
[CLKID_VDEC_P1] = &s4_vdec_p1.hw,
- [CLKID_VDEC_SEL] = &s4_vdec_mux.hw,
- [CLKID_HEVCF_P0_SEL] = &s4_hevcf_p0_mux.hw,
+ [CLKID_VDEC_SEL] = &s4_vdec_sel.hw,
+ [CLKID_HEVCF_P0_SEL] = &s4_hevcf_p0_sel.hw,
[CLKID_HEVCF_P0_DIV] = &s4_hevcf_p0_div.hw,
[CLKID_HEVCF_P0] = &s4_hevcf_p0.hw,
- [CLKID_HEVCF_P1_SEL] = &s4_hevcf_p1_mux.hw,
+ [CLKID_HEVCF_P1_SEL] = &s4_hevcf_p1_sel.hw,
[CLKID_HEVCF_P1_DIV] = &s4_hevcf_p1_div.hw,
[CLKID_HEVCF_P1] = &s4_hevcf_p1.hw,
- [CLKID_HEVCF_SEL] = &s4_hevcf_mux.hw,
+ [CLKID_HEVCF_SEL] = &s4_hevcf_sel.hw,
[CLKID_VPU_0_SEL] = &s4_vpu_0_sel.hw,
[CLKID_VPU_0_DIV] = &s4_vpu_0_div.hw,
[CLKID_VPU_0] = &s4_vpu_0.hw,
@@ -3327,18 +2902,18 @@ static struct clk_hw *s4_periphs_hw_clks[] = {
[CLKID_VPU_1_DIV] = &s4_vpu_1_div.hw,
[CLKID_VPU_1] = &s4_vpu_1.hw,
[CLKID_VPU] = &s4_vpu.hw,
- [CLKID_VPU_CLKB_TMP_SEL] = &s4_vpu_clkb_tmp_mux.hw,
+ [CLKID_VPU_CLKB_TMP_SEL] = &s4_vpu_clkb_tmp_sel.hw,
[CLKID_VPU_CLKB_TMP_DIV] = &s4_vpu_clkb_tmp_div.hw,
[CLKID_VPU_CLKB_TMP] = &s4_vpu_clkb_tmp.hw,
[CLKID_VPU_CLKB_DIV] = &s4_vpu_clkb_div.hw,
[CLKID_VPU_CLKB] = &s4_vpu_clkb.hw,
- [CLKID_VPU_CLKC_P0_SEL] = &s4_vpu_clkc_p0_mux.hw,
+ [CLKID_VPU_CLKC_P0_SEL] = &s4_vpu_clkc_p0_sel.hw,
[CLKID_VPU_CLKC_P0_DIV] = &s4_vpu_clkc_p0_div.hw,
[CLKID_VPU_CLKC_P0] = &s4_vpu_clkc_p0.hw,
- [CLKID_VPU_CLKC_P1_SEL] = &s4_vpu_clkc_p1_mux.hw,
+ [CLKID_VPU_CLKC_P1_SEL] = &s4_vpu_clkc_p1_sel.hw,
[CLKID_VPU_CLKC_P1_DIV] = &s4_vpu_clkc_p1_div.hw,
[CLKID_VPU_CLKC_P1] = &s4_vpu_clkc_p1.hw,
- [CLKID_VPU_CLKC_SEL] = &s4_vpu_clkc_mux.hw,
+ [CLKID_VPU_CLKC_SEL] = &s4_vpu_clkc_sel.hw,
[CLKID_VAPB_0_SEL] = &s4_vapb_0_sel.hw,
[CLKID_VAPB_0_DIV] = &s4_vapb_0_div.hw,
[CLKID_VAPB_0] = &s4_vapb_0.hw,
@@ -3346,10 +2921,10 @@ static struct clk_hw *s4_periphs_hw_clks[] = {
[CLKID_VAPB_1_DIV] = &s4_vapb_1_div.hw,
[CLKID_VAPB_1] = &s4_vapb_1.hw,
[CLKID_VAPB] = &s4_vapb.hw,
- [CLKID_GE2D] = &s4_ge2d_gate.hw,
- [CLKID_VDIN_MEAS_SEL] = &s4_vdin_meas_mux.hw,
+ [CLKID_GE2D] = &s4_ge2d.hw,
+ [CLKID_VDIN_MEAS_SEL] = &s4_vdin_meas_sel.hw,
[CLKID_VDIN_MEAS_DIV] = &s4_vdin_meas_div.hw,
- [CLKID_VDIN_MEAS] = &s4_vdin_meas_gate.hw,
+ [CLKID_VDIN_MEAS] = &s4_vdin_meas.hw,
[CLKID_SD_EMMC_C_CLK_SEL] = &s4_sd_emmc_c_clk0_sel.hw,
[CLKID_SD_EMMC_C_CLK_DIV] = &s4_sd_emmc_c_clk0_div.hw,
[CLKID_SD_EMMC_C] = &s4_sd_emmc_c_clk0.hw,
@@ -3359,42 +2934,42 @@ static struct clk_hw *s4_periphs_hw_clks[] = {
[CLKID_SD_EMMC_B_CLK_SEL] = &s4_sd_emmc_b_clk0_sel.hw,
[CLKID_SD_EMMC_B_CLK_DIV] = &s4_sd_emmc_b_clk0_div.hw,
[CLKID_SD_EMMC_B] = &s4_sd_emmc_b_clk0.hw,
- [CLKID_SPICC0_SEL] = &s4_spicc0_mux.hw,
+ [CLKID_SPICC0_SEL] = &s4_spicc0_sel.hw,
[CLKID_SPICC0_DIV] = &s4_spicc0_div.hw,
- [CLKID_SPICC0_EN] = &s4_spicc0_gate.hw,
- [CLKID_PWM_A_SEL] = &s4_pwm_a_mux.hw,
+ [CLKID_SPICC0_EN] = &s4_spicc0_en.hw,
+ [CLKID_PWM_A_SEL] = &s4_pwm_a_sel.hw,
[CLKID_PWM_A_DIV] = &s4_pwm_a_div.hw,
- [CLKID_PWM_A] = &s4_pwm_a_gate.hw,
- [CLKID_PWM_B_SEL] = &s4_pwm_b_mux.hw,
+ [CLKID_PWM_A] = &s4_pwm_a.hw,
+ [CLKID_PWM_B_SEL] = &s4_pwm_b_sel.hw,
[CLKID_PWM_B_DIV] = &s4_pwm_b_div.hw,
- [CLKID_PWM_B] = &s4_pwm_b_gate.hw,
- [CLKID_PWM_C_SEL] = &s4_pwm_c_mux.hw,
+ [CLKID_PWM_B] = &s4_pwm_b.hw,
+ [CLKID_PWM_C_SEL] = &s4_pwm_c_sel.hw,
[CLKID_PWM_C_DIV] = &s4_pwm_c_div.hw,
- [CLKID_PWM_C] = &s4_pwm_c_gate.hw,
- [CLKID_PWM_D_SEL] = &s4_pwm_d_mux.hw,
+ [CLKID_PWM_C] = &s4_pwm_c.hw,
+ [CLKID_PWM_D_SEL] = &s4_pwm_d_sel.hw,
[CLKID_PWM_D_DIV] = &s4_pwm_d_div.hw,
- [CLKID_PWM_D] = &s4_pwm_d_gate.hw,
- [CLKID_PWM_E_SEL] = &s4_pwm_e_mux.hw,
+ [CLKID_PWM_D] = &s4_pwm_d.hw,
+ [CLKID_PWM_E_SEL] = &s4_pwm_e_sel.hw,
[CLKID_PWM_E_DIV] = &s4_pwm_e_div.hw,
- [CLKID_PWM_E] = &s4_pwm_e_gate.hw,
- [CLKID_PWM_F_SEL] = &s4_pwm_f_mux.hw,
+ [CLKID_PWM_E] = &s4_pwm_e.hw,
+ [CLKID_PWM_F_SEL] = &s4_pwm_f_sel.hw,
[CLKID_PWM_F_DIV] = &s4_pwm_f_div.hw,
- [CLKID_PWM_F] = &s4_pwm_f_gate.hw,
- [CLKID_PWM_G_SEL] = &s4_pwm_g_mux.hw,
+ [CLKID_PWM_F] = &s4_pwm_f.hw,
+ [CLKID_PWM_G_SEL] = &s4_pwm_g_sel.hw,
[CLKID_PWM_G_DIV] = &s4_pwm_g_div.hw,
- [CLKID_PWM_G] = &s4_pwm_g_gate.hw,
- [CLKID_PWM_H_SEL] = &s4_pwm_h_mux.hw,
+ [CLKID_PWM_G] = &s4_pwm_g.hw,
+ [CLKID_PWM_H_SEL] = &s4_pwm_h_sel.hw,
[CLKID_PWM_H_DIV] = &s4_pwm_h_div.hw,
- [CLKID_PWM_H] = &s4_pwm_h_gate.hw,
- [CLKID_PWM_I_SEL] = &s4_pwm_i_mux.hw,
+ [CLKID_PWM_H] = &s4_pwm_h.hw,
+ [CLKID_PWM_I_SEL] = &s4_pwm_i_sel.hw,
[CLKID_PWM_I_DIV] = &s4_pwm_i_div.hw,
- [CLKID_PWM_I] = &s4_pwm_i_gate.hw,
- [CLKID_PWM_J_SEL] = &s4_pwm_j_mux.hw,
+ [CLKID_PWM_I] = &s4_pwm_i.hw,
+ [CLKID_PWM_J_SEL] = &s4_pwm_j_sel.hw,
[CLKID_PWM_J_DIV] = &s4_pwm_j_div.hw,
- [CLKID_PWM_J] = &s4_pwm_j_gate.hw,
- [CLKID_SARADC_SEL] = &s4_saradc_mux.hw,
+ [CLKID_PWM_J] = &s4_pwm_j.hw,
+ [CLKID_SARADC_SEL] = &s4_saradc_sel.hw,
[CLKID_SARADC_DIV] = &s4_saradc_div.hw,
- [CLKID_SARADC] = &s4_saradc_gate.hw,
+ [CLKID_SARADC] = &s4_saradc.hw,
[CLKID_GEN_SEL] = &s4_gen_clk_sel.hw,
[CLKID_GEN_DIV] = &s4_gen_clk_div.hw,
[CLKID_GEN] = &s4_gen_clk.hw,
@@ -3447,73 +3022,38 @@ static struct clk_hw *s4_periphs_hw_clks[] = {
[CLKID_PWM_EF] = &s4_pwm_ef.hw,
[CLKID_PWM_GH] = &s4_pwm_gh.hw,
[CLKID_PWM_IJ] = &s4_pwm_ij.hw,
- [CLKID_HDCP22_ESMCLK_SEL] = &s4_hdcp22_esmclk_mux.hw,
+ [CLKID_HDCP22_ESMCLK_SEL] = &s4_hdcp22_esmclk_sel.hw,
[CLKID_HDCP22_ESMCLK_DIV] = &s4_hdcp22_esmclk_div.hw,
- [CLKID_HDCP22_ESMCLK] = &s4_hdcp22_esmclk_gate.hw,
- [CLKID_HDCP22_SKPCLK_SEL] = &s4_hdcp22_skpclk_mux.hw,
+ [CLKID_HDCP22_ESMCLK] = &s4_hdcp22_esmclk.hw,
+ [CLKID_HDCP22_SKPCLK_SEL] = &s4_hdcp22_skpclk_sel.hw,
[CLKID_HDCP22_SKPCLK_DIV] = &s4_hdcp22_skpclk_div.hw,
- [CLKID_HDCP22_SKPCLK] = &s4_hdcp22_skpclk_gate.hw,
-};
-
-static const struct regmap_config clkc_regmap_config = {
- .reg_bits = 32,
- .val_bits = 32,
- .reg_stride = 4,
- .max_register = CLKCTRL_DEMOD_CLK_CTRL,
+ [CLKID_HDCP22_SKPCLK] = &s4_hdcp22_skpclk.hw,
};
-static struct meson_clk_hw_data s4_periphs_clks = {
- .hws = s4_periphs_hw_clks,
- .num = ARRAY_SIZE(s4_periphs_hw_clks),
+static const struct meson_clkc_data s4_peripherals_clkc_data = {
+ .hw_clks = {
+ .hws = s4_peripherals_hw_clks,
+ .num = ARRAY_SIZE(s4_peripherals_hw_clks),
+ },
};
-static int meson_s4_periphs_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct regmap *regmap;
- void __iomem *base;
- int ret, i;
-
- base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(base))
- return dev_err_probe(dev, PTR_ERR(base),
- "can't ioremap resource\n");
-
- regmap = devm_regmap_init_mmio(dev, base, &clkc_regmap_config);
- if (IS_ERR(regmap))
- return dev_err_probe(dev, PTR_ERR(regmap),
- "can't init regmap mmio region\n");
-
- for (i = 0; i < s4_periphs_clks.num; i++) {
- /* array might be sparse */
- if (!s4_periphs_clks.hws[i])
- continue;
-
- ret = devm_clk_hw_register(dev, s4_periphs_clks.hws[i]);
- if (ret)
- return dev_err_probe(dev, ret,
- "clock[%d] registration failed\n", i);
- }
-
- return devm_of_clk_add_hw_provider(dev, meson_clk_hw_get, &s4_periphs_clks);
-}
-
-static const struct of_device_id clkc_match_table[] = {
+static const struct of_device_id s4_peripherals_clkc_match_table[] = {
{
.compatible = "amlogic,s4-peripherals-clkc",
+ .data = &s4_peripherals_clkc_data,
},
{}
};
-MODULE_DEVICE_TABLE(of, clkc_match_table);
+MODULE_DEVICE_TABLE(of, s4_peripherals_clkc_match_table);
-static struct platform_driver s4_driver = {
- .probe = meson_s4_periphs_probe,
+static struct platform_driver s4_peripherals_clkc_driver = {
+ .probe = meson_clkc_mmio_probe,
.driver = {
- .name = "s4-periphs-clkc",
- .of_match_table = clkc_match_table,
+ .name = "s4-peripherals-clkc",
+ .of_match_table = s4_peripherals_clkc_match_table,
},
};
-module_platform_driver(s4_driver);
+module_platform_driver(s4_peripherals_clkc_driver);
MODULE_DESCRIPTION("Amlogic S4 Peripherals Clock Controller driver");
MODULE_AUTHOR("Yu Tu <yu.tu@amlogic.com>");
diff --git a/drivers/clk/meson/s4-pll.c b/drivers/clk/meson/s4-pll.c
index 3d689d2f003e..56ce6f566e53 100644
--- a/drivers/clk/meson/s4-pll.c
+++ b/drivers/clk/meson/s4-pll.c
@@ -281,7 +281,7 @@ static const struct pll_mult_range s4_gp0_pll_mult_range = {
/*
* Internal gp0 pll emulation configuration parameters
*/
-static const struct reg_sequence s4_gp0_init_regs[] = {
+static const struct reg_sequence s4_gp0_pll_init_regs[] = {
{ .reg = ANACTRL_GP0PLL_CTRL1, .def = 0x00000000 },
{ .reg = ANACTRL_GP0PLL_CTRL2, .def = 0x00000000 },
{ .reg = ANACTRL_GP0PLL_CTRL3, .def = 0x48681c00 },
@@ -318,8 +318,8 @@ static struct clk_regmap s4_gp0_pll_dco = {
.width = 1,
},
.range = &s4_gp0_pll_mult_range,
- .init_regs = s4_gp0_init_regs,
- .init_count = ARRAY_SIZE(s4_gp0_init_regs),
+ .init_regs = s4_gp0_pll_init_regs,
+ .init_count = ARRAY_SIZE(s4_gp0_pll_init_regs),
},
.hw.init = &(struct clk_init_data){
.name = "gp0_pll_dco",
@@ -353,7 +353,7 @@ static struct clk_regmap s4_gp0_pll = {
/*
* Internal hifi pll emulation configuration parameters
*/
-static const struct reg_sequence s4_hifi_init_regs[] = {
+static const struct reg_sequence s4_hifi_pll_init_regs[] = {
{ .reg = ANACTRL_HIFIPLL_CTRL2, .def = 0x00000000 },
{ .reg = ANACTRL_HIFIPLL_CTRL3, .def = 0x6a285c00 },
{ .reg = ANACTRL_HIFIPLL_CTRL4, .def = 0x65771290 },
@@ -394,8 +394,8 @@ static struct clk_regmap s4_hifi_pll_dco = {
.width = 1,
},
.range = &s4_gp0_pll_mult_range,
- .init_regs = s4_hifi_init_regs,
- .init_count = ARRAY_SIZE(s4_hifi_init_regs),
+ .init_regs = s4_hifi_pll_init_regs,
+ .init_count = ARRAY_SIZE(s4_hifi_pll_init_regs),
.frac_max = 100000,
.flags = CLK_MESON_PLL_ROUND_CLOSEST,
},
@@ -794,76 +794,36 @@ static struct clk_hw *s4_pll_hw_clks[] = {
[CLKID_MPLL3] = &s4_mpll3.hw,
};
-static const struct reg_sequence s4_init_regs[] = {
+static const struct reg_sequence s4_pll_init_regs[] = {
{ .reg = ANACTRL_MPLL_CTRL0, .def = 0x00000543 },
};
-static const struct regmap_config clkc_regmap_config = {
- .reg_bits = 32,
- .val_bits = 32,
- .reg_stride = 4,
- .max_register = ANACTRL_HDMIPLL_CTRL0,
-};
-
-static struct meson_clk_hw_data s4_pll_clks = {
- .hws = s4_pll_hw_clks,
- .num = ARRAY_SIZE(s4_pll_hw_clks),
+static const struct meson_clkc_data s4_pll_clkc_data = {
+ .hw_clks = {
+ .hws = s4_pll_hw_clks,
+ .num = ARRAY_SIZE(s4_pll_hw_clks),
+ },
+ .init_regs = s4_pll_init_regs,
+ .init_count = ARRAY_SIZE(s4_pll_init_regs),
};
-static int meson_s4_pll_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct regmap *regmap;
- void __iomem *base;
- int ret, i;
-
- base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(base))
- return dev_err_probe(dev, PTR_ERR(base),
- "can't ioremap resource\n");
-
- regmap = devm_regmap_init_mmio(dev, base, &clkc_regmap_config);
- if (IS_ERR(regmap))
- return dev_err_probe(dev, PTR_ERR(regmap),
- "can't init regmap mmio region\n");
-
- ret = regmap_multi_reg_write(regmap, s4_init_regs, ARRAY_SIZE(s4_init_regs));
- if (ret)
- return dev_err_probe(dev, ret,
- "Failed to init registers\n");
-
- /* Register clocks */
- for (i = 0; i < s4_pll_clks.num; i++) {
- /* array might be sparse */
- if (!s4_pll_clks.hws[i])
- continue;
-
- ret = devm_clk_hw_register(dev, s4_pll_clks.hws[i]);
- if (ret)
- return dev_err_probe(dev, ret,
- "clock[%d] registration failed\n", i);
- }
-
- return devm_of_clk_add_hw_provider(dev, meson_clk_hw_get,
- &s4_pll_clks);
-}
-
-static const struct of_device_id clkc_match_table[] = {
+static const struct of_device_id s4_pll_clkc_match_table[] = {
{
.compatible = "amlogic,s4-pll-clkc",
+ .data = &s4_pll_clkc_data,
},
{}
};
-MODULE_DEVICE_TABLE(of, clkc_match_table);
+MODULE_DEVICE_TABLE(of, s4_pll_clkc_match_table);
-static struct platform_driver s4_driver = {
- .probe = meson_s4_pll_probe,
+static struct platform_driver s4_pll_clkc_driver = {
+ .probe = meson_clkc_mmio_probe,
.driver = {
.name = "s4-pll-clkc",
- .of_match_table = clkc_match_table,
+ .of_match_table = s4_pll_clkc_match_table,
},
};
-module_platform_driver(s4_driver);
+module_platform_driver(s4_pll_clkc_driver);
MODULE_DESCRIPTION("Amlogic S4 PLL Clock Controller driver");
MODULE_AUTHOR("Yu Tu <yu.tu@amlogic.com>");
diff --git a/drivers/clk/microchip/clk-core.c b/drivers/clk/microchip/clk-core.c
index 6fbc6dc50ca3..b34348d491f3 100644
--- a/drivers/clk/microchip/clk-core.c
+++ b/drivers/clk/microchip/clk-core.c
@@ -155,11 +155,13 @@ static unsigned long pbclk_recalc_rate(struct clk_hw *hw,
return parent_rate / pbclk_read_pbdiv(pb);
}
-static long pbclk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int pbclk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- return calc_best_divided_rate(rate, *parent_rate,
- PB_DIV_MAX, PB_DIV_MIN);
+ req->rate = calc_best_divided_rate(req->rate, req->best_parent_rate,
+ PB_DIV_MAX, PB_DIV_MIN);
+
+ return 0;
}
static int pbclk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -207,7 +209,7 @@ const struct clk_ops pic32_pbclk_ops = {
.disable = pbclk_disable,
.is_enabled = pbclk_is_enabled,
.recalc_rate = pbclk_recalc_rate,
- .round_rate = pbclk_round_rate,
+ .determine_rate = pbclk_determine_rate,
.set_rate = pbclk_set_rate,
};
@@ -372,18 +374,6 @@ static unsigned long roclk_recalc_rate(struct clk_hw *hw,
return roclk_calc_rate(parent_rate, rodiv, rotrim);
}
-static long roclk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
-{
- u32 rotrim, rodiv;
-
- /* calculate dividers for new rate */
- roclk_calc_div_trim(rate, *parent_rate, &rodiv, &rotrim);
-
- /* caclulate new rate (rounding) based on new rodiv & rotrim */
- return roclk_calc_rate(*parent_rate, rodiv, rotrim);
-}
-
static int roclk_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
@@ -394,6 +384,8 @@ static int roclk_determine_rate(struct clk_hw *hw,
/* find a parent which can generate nearest clkrate >= rate */
for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
+ u32 rotrim, rodiv;
+
/* get parent */
parent_clk = clk_hw_get_parent_by_index(hw, i);
if (!parent_clk)
@@ -404,7 +396,12 @@ static int roclk_determine_rate(struct clk_hw *hw,
if (req->rate > parent_rate)
continue;
- nearest_rate = roclk_round_rate(hw, req->rate, &parent_rate);
+ /* calculate dividers for new rate */
+ roclk_calc_div_trim(req->rate, req->best_parent_rate, &rodiv, &rotrim);
+
+ /* caclulate new rate (rounding) based on new rodiv & rotrim */
+ nearest_rate = roclk_calc_rate(req->best_parent_rate, rodiv, rotrim);
+
delta = abs(nearest_rate - req->rate);
if ((nearest_rate >= req->rate) && (delta < best_delta)) {
best_parent_clk = parent_clk;
@@ -665,12 +662,15 @@ static unsigned long spll_clk_recalc_rate(struct clk_hw *hw,
return rate64;
}
-static long spll_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int spll_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct pic32_sys_pll *pll = clkhw_to_spll(hw);
- return spll_calc_mult_div(pll, rate, *parent_rate, NULL, NULL);
+ req->rate = spll_calc_mult_div(pll, req->rate, req->best_parent_rate,
+ NULL, NULL);
+
+ return 0;
}
static int spll_clk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -725,7 +725,7 @@ static int spll_clk_set_rate(struct clk_hw *hw, unsigned long rate,
/* SPLL clock operation */
const struct clk_ops pic32_spll_ops = {
.recalc_rate = spll_clk_recalc_rate,
- .round_rate = spll_clk_round_rate,
+ .determine_rate = spll_clk_determine_rate,
.set_rate = spll_clk_set_rate,
};
@@ -780,10 +780,13 @@ static unsigned long sclk_get_rate(struct clk_hw *hw, unsigned long parent_rate)
return parent_rate / div;
}
-static long sclk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int sclk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- return calc_best_divided_rate(rate, *parent_rate, SLEW_SYSDIV, 1);
+ req->rate = calc_best_divided_rate(req->rate, req->best_parent_rate,
+ SLEW_SYSDIV, 1);
+
+ return 0;
}
static int sclk_set_rate(struct clk_hw *hw,
@@ -909,7 +912,7 @@ static int sclk_init(struct clk_hw *hw)
const struct clk_ops pic32_sclk_ops = {
.get_parent = sclk_get_parent,
.set_parent = sclk_set_parent,
- .round_rate = sclk_round_rate,
+ .determine_rate = sclk_determine_rate,
.set_rate = sclk_set_rate,
.recalc_rate = sclk_get_rate,
.init = sclk_init,
diff --git a/drivers/clk/mmp/Kconfig b/drivers/clk/mmp/Kconfig
new file mode 100644
index 000000000000..b0d2fea3cda5
--- /dev/null
+++ b/drivers/clk/mmp/Kconfig
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config COMMON_CLK_PXA1908
+ bool "Clock driver for Marvell PXA1908"
+ depends on ARCH_MMP || COMPILE_TEST
+ depends on OF
+ default y if ARCH_MMP && ARM64
+ select AUXILIARY_BUS
+ help
+ This driver supports the Marvell PXA1908 SoC clocks.
diff --git a/drivers/clk/mmp/Makefile b/drivers/clk/mmp/Makefile
index 062cd87fa8dd..0a94f2f08563 100644
--- a/drivers/clk/mmp/Makefile
+++ b/drivers/clk/mmp/Makefile
@@ -11,4 +11,7 @@ obj-$(CONFIG_MACH_MMP_DT) += clk-of-pxa168.o clk-of-pxa910.o
obj-$(CONFIG_COMMON_CLK_MMP2) += clk-of-mmp2.o clk-pll.o pwr-island.o
obj-$(CONFIG_COMMON_CLK_MMP2_AUDIO) += clk-audio.o
-obj-$(CONFIG_ARCH_MMP) += clk-of-pxa1928.o clk-pxa1908-apbc.o clk-pxa1908-apbcp.o clk-pxa1908-apmu.o clk-pxa1908-mpmu.o
+obj-$(CONFIG_COMMON_CLK_PXA1908) += clk-pxa1908-apbc.o clk-pxa1908-apbcp.o \
+ clk-pxa1908-mpmu.o clk-pxa1908-apmu.o
+
+obj-$(CONFIG_ARCH_MMP) += clk-of-pxa1928.o
diff --git a/drivers/clk/mmp/clk-audio.c b/drivers/clk/mmp/clk-audio.c
index 88d798d510cd..ed27fc796c94 100644
--- a/drivers/clk/mmp/clk-audio.c
+++ b/drivers/clk/mmp/clk-audio.c
@@ -164,23 +164,23 @@ static unsigned long audio_pll_recalc_rate(struct clk_hw *hw,
return 0;
}
-static long audio_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int audio_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
unsigned int prediv;
unsigned int postdiv;
long rounded = 0;
for (prediv = 0; prediv < ARRAY_SIZE(predivs); prediv++) {
- if (predivs[prediv].parent_rate != *parent_rate)
+ if (predivs[prediv].parent_rate != req->best_parent_rate)
continue;
for (postdiv = 0; postdiv < ARRAY_SIZE(postdivs); postdiv++) {
long freq = predivs[prediv].freq_vco;
freq /= postdivs[postdiv].divisor;
- if (freq == rate)
- return rate;
- if (freq < rate)
+ if (freq == req->rate)
+ return 0;
+ if (freq < req->rate)
continue;
if (rounded && freq > rounded)
continue;
@@ -188,7 +188,9 @@ static long audio_pll_round_rate(struct clk_hw *hw, unsigned long rate,
}
}
- return rounded;
+ req->rate = rounded;
+
+ return 0;
}
static int audio_pll_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -228,7 +230,7 @@ static int audio_pll_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops audio_pll_ops = {
.recalc_rate = audio_pll_recalc_rate,
- .round_rate = audio_pll_round_rate,
+ .determine_rate = audio_pll_determine_rate,
.set_rate = audio_pll_set_rate,
};
diff --git a/drivers/clk/mmp/clk-frac.c b/drivers/clk/mmp/clk-frac.c
index 6556f6ada2e8..0b1bb01346f0 100644
--- a/drivers/clk/mmp/clk-frac.c
+++ b/drivers/clk/mmp/clk-frac.c
@@ -21,8 +21,8 @@
#define to_clk_factor(hw) container_of(hw, struct mmp_clk_factor, hw)
-static long clk_factor_round_rate(struct clk_hw *hw, unsigned long drate,
- unsigned long *prate)
+static int clk_factor_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct mmp_clk_factor *factor = to_clk_factor(hw);
u64 rate = 0, prev_rate;
@@ -33,19 +33,20 @@ static long clk_factor_round_rate(struct clk_hw *hw, unsigned long drate,
d = &factor->ftbl[i];
prev_rate = rate;
- rate = (u64)(*prate) * d->denominator;
+ rate = (u64)(req->best_parent_rate) * d->denominator;
do_div(rate, d->numerator * factor->masks->factor);
- if (rate > drate)
+ if (rate > req->rate)
break;
}
- if ((i == 0) || (i == factor->ftbl_cnt)) {
- return rate;
- } else {
- if ((drate - prev_rate) > (rate - drate))
- return rate;
- else
- return prev_rate;
- }
+
+ if ((i == 0) || (i == factor->ftbl_cnt))
+ req->rate = rate;
+ else if ((req->rate - prev_rate) > (rate - req->rate))
+ req->rate = rate;
+ else
+ req->rate = prev_rate;
+
+ return 0;
}
static unsigned long clk_factor_recalc_rate(struct clk_hw *hw,
@@ -160,7 +161,7 @@ static int clk_factor_init(struct clk_hw *hw)
static const struct clk_ops clk_factor_ops = {
.recalc_rate = clk_factor_recalc_rate,
- .round_rate = clk_factor_round_rate,
+ .determine_rate = clk_factor_determine_rate,
.set_rate = clk_factor_set_rate,
.init = clk_factor_init,
};
diff --git a/drivers/clk/mmp/clk-pxa1908-apmu.c b/drivers/clk/mmp/clk-pxa1908-apmu.c
index d3a070687fc5..7594a495a009 100644
--- a/drivers/clk/mmp/clk-pxa1908-apmu.c
+++ b/drivers/clk/mmp/clk-pxa1908-apmu.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/auxiliary_bus.h>
#include <linux/clk-provider.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -85,6 +86,7 @@ static void pxa1908_axi_periph_clk_init(struct pxa1908_clk_unit *pxa_unit)
static int pxa1908_apmu_probe(struct platform_device *pdev)
{
struct pxa1908_clk_unit *pxa_unit;
+ struct auxiliary_device *adev;
pxa_unit = devm_kzalloc(&pdev->dev, sizeof(*pxa_unit), GFP_KERNEL);
if (!pxa_unit)
@@ -94,6 +96,11 @@ static int pxa1908_apmu_probe(struct platform_device *pdev)
if (IS_ERR(pxa_unit->base))
return PTR_ERR(pxa_unit->base);
+ adev = devm_auxiliary_device_create(&pdev->dev, "power", NULL);
+ if (IS_ERR(adev))
+ return dev_err_probe(&pdev->dev, PTR_ERR(adev),
+ "Failed to register power controller\n");
+
mmp_clk_init(pdev->dev.of_node, &pxa_unit->unit, APMU_NR_CLKS);
pxa1908_axi_periph_clk_init(pxa_unit);
diff --git a/drivers/clk/mstar/clk-msc313-cpupll.c b/drivers/clk/mstar/clk-msc313-cpupll.c
index a93e2dba09d3..3e643be02fe2 100644
--- a/drivers/clk/mstar/clk-msc313-cpupll.c
+++ b/drivers/clk/mstar/clk-msc313-cpupll.c
@@ -140,20 +140,22 @@ static unsigned long msc313_cpupll_recalc_rate(struct clk_hw *hw, unsigned long
parent_rate);
}
-static long msc313_cpupll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int msc313_cpupll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- u32 reg = msc313_cpupll_regforfrequecy(rate, *parent_rate);
- long rounded = msc313_cpupll_frequencyforreg(reg, *parent_rate);
+ u32 reg = msc313_cpupll_regforfrequecy(req->rate, req->best_parent_rate);
+ long rounded = msc313_cpupll_frequencyforreg(reg, req->best_parent_rate);
/*
* This is my poor attempt at making sure the resulting
* rate doesn't overshoot the requested rate.
*/
- for (; rounded >= rate && reg > 0; reg--)
- rounded = msc313_cpupll_frequencyforreg(reg, *parent_rate);
+ for (; rounded >= req->rate && reg > 0; reg--)
+ rounded = msc313_cpupll_frequencyforreg(reg, req->best_parent_rate);
- return rounded;
+ req->rate = rounded;
+
+ return 0;
}
static int msc313_cpupll_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate)
@@ -168,7 +170,7 @@ static int msc313_cpupll_set_rate(struct clk_hw *hw, unsigned long rate, unsigne
static const struct clk_ops msc313_cpupll_ops = {
.recalc_rate = msc313_cpupll_recalc_rate,
- .round_rate = msc313_cpupll_round_rate,
+ .determine_rate = msc313_cpupll_determine_rate,
.set_rate = msc313_cpupll_set_rate,
};
diff --git a/drivers/clk/mvebu/ap-cpu-clk.c b/drivers/clk/mvebu/ap-cpu-clk.c
index 677cc3514849..1e44ace7d951 100644
--- a/drivers/clk/mvebu/ap-cpu-clk.c
+++ b/drivers/clk/mvebu/ap-cpu-clk.c
@@ -210,19 +210,21 @@ static int ap_cpu_clk_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
-static long ap_cpu_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int ap_cpu_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- int divider = *parent_rate / rate;
+ int divider = req->best_parent_rate / req->rate;
divider = min(divider, APN806_MAX_DIVIDER);
- return *parent_rate / divider;
+ req->rate = req->best_parent_rate / divider;
+
+ return 0;
}
static const struct clk_ops ap_cpu_clk_ops = {
.recalc_rate = ap_cpu_clk_recalc_rate,
- .round_rate = ap_cpu_clk_round_rate,
+ .determine_rate = ap_cpu_clk_determine_rate,
.set_rate = ap_cpu_clk_set_rate,
};
diff --git a/drivers/clk/mvebu/armada-37xx-periph.c b/drivers/clk/mvebu/armada-37xx-periph.c
index 13906e31bef8..bd0bc8e7b1e7 100644
--- a/drivers/clk/mvebu/armada-37xx-periph.c
+++ b/drivers/clk/mvebu/armada-37xx-periph.c
@@ -454,12 +454,12 @@ static unsigned long clk_pm_cpu_recalc_rate(struct clk_hw *hw,
return DIV_ROUND_UP_ULL((u64)parent_rate, div);
}
-static long clk_pm_cpu_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_pm_cpu_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_pm_cpu *pm_cpu = to_clk_pm_cpu(hw);
struct regmap *base = pm_cpu->nb_pm_base;
- unsigned int div = *parent_rate / rate;
+ unsigned int div = req->best_parent_rate / req->rate;
unsigned int load_level;
/* only available when DVFS is enabled */
if (!armada_3700_pm_dvfs_is_enabled(base))
@@ -474,13 +474,16 @@ static long clk_pm_cpu_round_rate(struct clk_hw *hw, unsigned long rate,
val >>= offset;
val &= ARMADA_37XX_NB_TBG_DIV_MASK;
- if (val == div)
+ if (val == div) {
/*
* We found a load level matching the target
* divider, switch to this load level and
* return.
*/
- return *parent_rate / div;
+ req->rate = req->best_parent_rate / div;
+
+ return 0;
+ }
}
/* We didn't find any valid divider */
@@ -600,7 +603,7 @@ static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops clk_pm_cpu_ops = {
.get_parent = clk_pm_cpu_get_parent,
- .round_rate = clk_pm_cpu_round_rate,
+ .determine_rate = clk_pm_cpu_determine_rate,
.set_rate = clk_pm_cpu_set_rate,
.recalc_rate = clk_pm_cpu_recalc_rate,
};
diff --git a/drivers/clk/mvebu/clk-corediv.c b/drivers/clk/mvebu/clk-corediv.c
index 818b175391fa..628032341cbb 100644
--- a/drivers/clk/mvebu/clk-corediv.c
+++ b/drivers/clk/mvebu/clk-corediv.c
@@ -135,19 +135,21 @@ static unsigned long clk_corediv_recalc_rate(struct clk_hw *hwclk,
return parent_rate / div;
}
-static long clk_corediv_round_rate(struct clk_hw *hwclk, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_corediv_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
/* Valid ratio are 1:4, 1:5, 1:6 and 1:8 */
u32 div;
- div = *parent_rate / rate;
+ div = req->best_parent_rate / req->rate;
if (div < 4)
div = 4;
else if (div > 6)
div = 8;
- return *parent_rate / div;
+ req->rate = req->best_parent_rate / div;
+
+ return 0;
}
static int clk_corediv_set_rate(struct clk_hw *hwclk, unsigned long rate,
@@ -199,7 +201,7 @@ static const struct clk_corediv_soc_desc armada370_corediv_soc = {
.disable = clk_corediv_disable,
.is_enabled = clk_corediv_is_enabled,
.recalc_rate = clk_corediv_recalc_rate,
- .round_rate = clk_corediv_round_rate,
+ .determine_rate = clk_corediv_determine_rate,
.set_rate = clk_corediv_set_rate,
},
.ratio_reload = BIT(8),
@@ -215,7 +217,7 @@ static const struct clk_corediv_soc_desc armada380_corediv_soc = {
.disable = clk_corediv_disable,
.is_enabled = clk_corediv_is_enabled,
.recalc_rate = clk_corediv_recalc_rate,
- .round_rate = clk_corediv_round_rate,
+ .determine_rate = clk_corediv_determine_rate,
.set_rate = clk_corediv_set_rate,
},
.ratio_reload = BIT(8),
@@ -228,7 +230,7 @@ static const struct clk_corediv_soc_desc armada375_corediv_soc = {
.ndescs = ARRAY_SIZE(mvebu_corediv_desc),
.ops = {
.recalc_rate = clk_corediv_recalc_rate,
- .round_rate = clk_corediv_round_rate,
+ .determine_rate = clk_corediv_determine_rate,
.set_rate = clk_corediv_set_rate,
},
.ratio_reload = BIT(8),
@@ -240,7 +242,7 @@ static const struct clk_corediv_soc_desc mv98dx3236_corediv_soc = {
.ndescs = ARRAY_SIZE(mv98dx3236_corediv_desc),
.ops = {
.recalc_rate = clk_corediv_recalc_rate,
- .round_rate = clk_corediv_round_rate,
+ .determine_rate = clk_corediv_determine_rate,
.set_rate = clk_corediv_set_rate,
},
.ratio_reload = BIT(10),
diff --git a/drivers/clk/mvebu/clk-cpu.c b/drivers/clk/mvebu/clk-cpu.c
index db2b38c21304..0de7660e73d2 100644
--- a/drivers/clk/mvebu/clk-cpu.c
+++ b/drivers/clk/mvebu/clk-cpu.c
@@ -56,19 +56,21 @@ static unsigned long clk_cpu_recalc_rate(struct clk_hw *hwclk,
return parent_rate / div;
}
-static long clk_cpu_round_rate(struct clk_hw *hwclk, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_cpu_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
/* Valid ratio are 1:1, 1:2 and 1:3 */
u32 div;
- div = *parent_rate / rate;
+ div = req->best_parent_rate / req->rate;
if (div == 0)
div = 1;
else if (div > 3)
div = 3;
- return *parent_rate / div;
+ req->rate = req->best_parent_rate / div;
+
+ return 0;
}
static int clk_cpu_off_set_rate(struct clk_hw *hwclk, unsigned long rate,
@@ -159,7 +161,7 @@ static int clk_cpu_set_rate(struct clk_hw *hwclk, unsigned long rate,
static const struct clk_ops cpu_ops = {
.recalc_rate = clk_cpu_recalc_rate,
- .round_rate = clk_cpu_round_rate,
+ .determine_rate = clk_cpu_determine_rate,
.set_rate = clk_cpu_set_rate,
};
diff --git a/drivers/clk/mvebu/dove-divider.c b/drivers/clk/mvebu/dove-divider.c
index 0a90452ee808..47cc49e4cd99 100644
--- a/drivers/clk/mvebu/dove-divider.c
+++ b/drivers/clk/mvebu/dove-divider.c
@@ -108,23 +108,23 @@ static unsigned long dove_recalc_rate(struct clk_hw *hw, unsigned long parent)
return rate;
}
-static long dove_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent)
+static int dove_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct dove_clk *dc = to_dove_clk(hw);
- unsigned long parent_rate = *parent;
+ unsigned long parent_rate = req->best_parent_rate;
int divider;
- divider = dove_calc_divider(dc, rate, parent_rate, false);
+ divider = dove_calc_divider(dc, req->rate, parent_rate, false);
if (divider < 0)
return divider;
- rate = DIV_ROUND_CLOSEST(parent_rate, divider);
+ req->rate = DIV_ROUND_CLOSEST(parent_rate, divider);
pr_debug("%s(): %s divider=%u parent=%lu rate=%lu\n",
- __func__, dc->name, divider, parent_rate, rate);
+ __func__, dc->name, divider, parent_rate, req->rate);
- return rate;
+ return 0;
}
static int dove_set_clock(struct clk_hw *hw, unsigned long rate,
@@ -154,7 +154,7 @@ static int dove_set_clock(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops dove_divider_ops = {
.set_rate = dove_set_clock,
- .round_rate = dove_round_rate,
+ .determine_rate = dove_determine_rate,
.recalc_rate = dove_recalc_rate,
};
diff --git a/drivers/clk/mxs/clk-div.c b/drivers/clk/mxs/clk-div.c
index 0a78ef380646..8afe1a9c1552 100644
--- a/drivers/clk/mxs/clk-div.c
+++ b/drivers/clk/mxs/clk-div.c
@@ -40,12 +40,12 @@ static unsigned long clk_div_recalc_rate(struct clk_hw *hw,
return div->ops->recalc_rate(&div->divider.hw, parent_rate);
}
-static long clk_div_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_div *div = to_clk_div(hw);
- return div->ops->round_rate(&div->divider.hw, rate, prate);
+ return div->ops->determine_rate(&div->divider.hw, req);
}
static int clk_div_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -63,7 +63,7 @@ static int clk_div_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops clk_div_ops = {
.recalc_rate = clk_div_recalc_rate,
- .round_rate = clk_div_round_rate,
+ .determine_rate = clk_div_determine_rate,
.set_rate = clk_div_set_rate,
};
diff --git a/drivers/clk/mxs/clk-frac.c b/drivers/clk/mxs/clk-frac.c
index bba0d840dd76..73f514fb84ff 100644
--- a/drivers/clk/mxs/clk-frac.c
+++ b/drivers/clk/mxs/clk-frac.c
@@ -44,18 +44,18 @@ static unsigned long clk_frac_recalc_rate(struct clk_hw *hw,
return tmp_rate >> frac->width;
}
-static long clk_frac_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_frac_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_frac *frac = to_clk_frac(hw);
- unsigned long parent_rate = *prate;
+ unsigned long parent_rate = req->best_parent_rate;
u32 div;
u64 tmp, tmp_rate, result;
- if (rate > parent_rate)
+ if (req->rate > parent_rate)
return -EINVAL;
- tmp = rate;
+ tmp = req->rate;
tmp <<= frac->width;
do_div(tmp, parent_rate);
div = tmp;
@@ -67,7 +67,9 @@ static long clk_frac_round_rate(struct clk_hw *hw, unsigned long rate,
result = tmp_rate >> frac->width;
if ((result << frac->width) < tmp_rate)
result += 1;
- return result;
+ req->rate = result;
+
+ return 0;
}
static int clk_frac_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -103,7 +105,7 @@ static int clk_frac_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops clk_frac_ops = {
.recalc_rate = clk_frac_recalc_rate,
- .round_rate = clk_frac_round_rate,
+ .determine_rate = clk_frac_determine_rate,
.set_rate = clk_frac_set_rate,
};
diff --git a/drivers/clk/mxs/clk-ref.c b/drivers/clk/mxs/clk-ref.c
index 2297259da89a..a99ee4cd2ece 100644
--- a/drivers/clk/mxs/clk-ref.c
+++ b/drivers/clk/mxs/clk-ref.c
@@ -57,22 +57,24 @@ static unsigned long clk_ref_recalc_rate(struct clk_hw *hw,
return tmp;
}
-static long clk_ref_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_ref_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- unsigned long parent_rate = *prate;
+ unsigned long parent_rate = req->best_parent_rate;
u64 tmp = parent_rate;
u8 frac;
- tmp = tmp * 18 + rate / 2;
- do_div(tmp, rate);
+ tmp = tmp * 18 + req->rate / 2;
+ do_div(tmp, req->rate);
frac = clamp(tmp, 18, 35);
tmp = parent_rate;
tmp *= 18;
do_div(tmp, frac);
- return tmp;
+ req->rate = tmp;
+
+ return 0;
}
static int clk_ref_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -104,7 +106,7 @@ static const struct clk_ops clk_ref_ops = {
.enable = clk_ref_enable,
.disable = clk_ref_disable,
.recalc_rate = clk_ref_recalc_rate,
- .round_rate = clk_ref_round_rate,
+ .determine_rate = clk_ref_determine_rate,
.set_rate = clk_ref_set_rate,
};
diff --git a/drivers/clk/nuvoton/clk-ma35d1-divider.c b/drivers/clk/nuvoton/clk-ma35d1-divider.c
index bb8c23d2b895..e39f53d5bf45 100644
--- a/drivers/clk/nuvoton/clk-ma35d1-divider.c
+++ b/drivers/clk/nuvoton/clk-ma35d1-divider.c
@@ -39,12 +39,16 @@ static unsigned long ma35d1_clkdiv_recalc_rate(struct clk_hw *hw, unsigned long
CLK_DIVIDER_ROUND_CLOSEST, dclk->width);
}
-static long ma35d1_clkdiv_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *prate)
+static int ma35d1_clkdiv_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct ma35d1_adc_clk_div *dclk = to_ma35d1_adc_clk_div(hw);
- return divider_round_rate(hw, rate, prate, dclk->table,
- dclk->width, CLK_DIVIDER_ROUND_CLOSEST);
+ req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate,
+ dclk->table, dclk->width,
+ CLK_DIVIDER_ROUND_CLOSEST);
+
+ return 0;
}
static int ma35d1_clkdiv_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate)
@@ -71,7 +75,7 @@ static int ma35d1_clkdiv_set_rate(struct clk_hw *hw, unsigned long rate, unsigne
static const struct clk_ops ma35d1_adc_clkdiv_ops = {
.recalc_rate = ma35d1_clkdiv_recalc_rate,
- .round_rate = ma35d1_clkdiv_round_rate,
+ .determine_rate = ma35d1_clkdiv_determine_rate,
.set_rate = ma35d1_clkdiv_set_rate,
};
diff --git a/drivers/clk/nuvoton/clk-ma35d1-pll.c b/drivers/clk/nuvoton/clk-ma35d1-pll.c
index ff3fb8b87c24..4620acfe47e8 100644
--- a/drivers/clk/nuvoton/clk-ma35d1-pll.c
+++ b/drivers/clk/nuvoton/clk-ma35d1-pll.c
@@ -244,35 +244,43 @@ static unsigned long ma35d1_clk_pll_recalc_rate(struct clk_hw *hw, unsigned long
return 0;
}
-static long ma35d1_clk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int ma35d1_clk_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct ma35d1_clk_pll *pll = to_ma35d1_clk_pll(hw);
u32 reg_ctl[3] = { 0 };
unsigned long pll_freq;
long ret;
- if (*parent_rate < PLL_FREF_MIN_FREQ || *parent_rate > PLL_FREF_MAX_FREQ)
+ if (req->best_parent_rate < PLL_FREF_MIN_FREQ || req->best_parent_rate > PLL_FREF_MAX_FREQ)
return -EINVAL;
- ret = ma35d1_pll_find_closest(pll, rate, *parent_rate, reg_ctl, &pll_freq);
+ ret = ma35d1_pll_find_closest(pll, req->rate, req->best_parent_rate,
+ reg_ctl, &pll_freq);
if (ret < 0)
return ret;
switch (pll->id) {
case CAPLL:
reg_ctl[0] = readl_relaxed(pll->ctl0_base);
- pll_freq = ma35d1_calc_smic_pll_freq(reg_ctl[0], *parent_rate);
- return pll_freq;
+ pll_freq = ma35d1_calc_smic_pll_freq(reg_ctl[0], req->best_parent_rate);
+ req->rate = pll_freq;
+
+ return 0;
case DDRPLL:
case APLL:
case EPLL:
case VPLL:
reg_ctl[0] = readl_relaxed(pll->ctl0_base);
reg_ctl[1] = readl_relaxed(pll->ctl1_base);
- pll_freq = ma35d1_calc_pll_freq(pll->mode, reg_ctl, *parent_rate);
- return pll_freq;
+ pll_freq = ma35d1_calc_pll_freq(pll->mode, reg_ctl, req->best_parent_rate);
+ req->rate = pll_freq;
+
+ return 0;
}
+
+ req->rate = 0;
+
return 0;
}
@@ -311,12 +319,12 @@ static const struct clk_ops ma35d1_clk_pll_ops = {
.unprepare = ma35d1_clk_pll_unprepare,
.set_rate = ma35d1_clk_pll_set_rate,
.recalc_rate = ma35d1_clk_pll_recalc_rate,
- .round_rate = ma35d1_clk_pll_round_rate,
+ .determine_rate = ma35d1_clk_pll_determine_rate,
};
static const struct clk_ops ma35d1_clk_fixed_pll_ops = {
.recalc_rate = ma35d1_clk_pll_recalc_rate,
- .round_rate = ma35d1_clk_pll_round_rate,
+ .determine_rate = ma35d1_clk_pll_determine_rate,
};
struct clk_hw *ma35d1_reg_clk_pll(struct device *dev, u32 id, u8 u8mode, const char *name,
diff --git a/drivers/clk/nxp/clk-lpc18xx-cgu.c b/drivers/clk/nxp/clk-lpc18xx-cgu.c
index 81efa885069b..b9e204d63a97 100644
--- a/drivers/clk/nxp/clk-lpc18xx-cgu.c
+++ b/drivers/clk/nxp/clk-lpc18xx-cgu.c
@@ -370,23 +370,25 @@ static unsigned long lpc18xx_pll0_recalc_rate(struct clk_hw *hw,
return 0;
}
-static long lpc18xx_pll0_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int lpc18xx_pll0_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
unsigned long m;
- if (*prate < rate) {
+ if (req->best_parent_rate < req->rate) {
pr_warn("%s: pll dividers not supported\n", __func__);
return -EINVAL;
}
- m = DIV_ROUND_UP_ULL(*prate, rate * 2);
- if (m <= 0 && m > LPC18XX_PLL0_MSEL_MAX) {
- pr_warn("%s: unable to support rate %lu\n", __func__, rate);
+ m = DIV_ROUND_UP_ULL(req->best_parent_rate, req->rate * 2);
+ if (m == 0 || m > LPC18XX_PLL0_MSEL_MAX) {
+ pr_warn("%s: unable to support rate %lu\n", __func__, req->rate);
return -EINVAL;
}
- return 2 * *prate * m;
+ req->rate = 2 * req->best_parent_rate * m;
+
+ return 0;
}
static int lpc18xx_pll0_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -402,7 +404,7 @@ static int lpc18xx_pll0_set_rate(struct clk_hw *hw, unsigned long rate,
}
m = DIV_ROUND_UP_ULL(parent_rate, rate * 2);
- if (m <= 0 && m > LPC18XX_PLL0_MSEL_MAX) {
+ if (m == 0 || m > LPC18XX_PLL0_MSEL_MAX) {
pr_warn("%s: unable to support rate %lu\n", __func__, rate);
return -EINVAL;
}
@@ -443,7 +445,7 @@ static int lpc18xx_pll0_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops lpc18xx_pll0_ops = {
.recalc_rate = lpc18xx_pll0_recalc_rate,
- .round_rate = lpc18xx_pll0_round_rate,
+ .determine_rate = lpc18xx_pll0_determine_rate,
.set_rate = lpc18xx_pll0_set_rate,
};
diff --git a/drivers/clk/nxp/clk-lpc32xx.c b/drivers/clk/nxp/clk-lpc32xx.c
index e00f270bc6aa..23f980cf6a2b 100644
--- a/drivers/clk/nxp/clk-lpc32xx.c
+++ b/drivers/clk/nxp/clk-lpc32xx.c
@@ -68,7 +68,6 @@ static const struct regmap_config lpc32xx_scb_regmap_config = {
.reg_stride = 4,
.val_format_endian = REGMAP_ENDIAN_LITTLE,
.max_register = 0x114,
- .fast_io = true,
};
static struct regmap *clk_regmap;
@@ -579,17 +578,17 @@ static int clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
return regmap_update_bits(clk_regmap, clk->reg, 0x1FFFF, val);
}
-static long clk_hclk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_hclk_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct lpc32xx_pll_clk *clk = to_lpc32xx_pll_clk(hw);
- u64 m_i, o = rate, i = *parent_rate, d = (u64)rate << 6;
+ u64 m_i, o = req->rate, i = req->best_parent_rate, d = (u64)req->rate << 6;
u64 m = 0, n = 0, p = 0;
int p_i, n_i;
- pr_debug("%s: %lu/%lu\n", clk_hw_get_name(hw), *parent_rate, rate);
+ pr_debug("%s: %lu/%lu\n", clk_hw_get_name(hw), req->best_parent_rate, req->rate);
- if (rate > 266500000)
+ if (req->rate > 266500000)
return -EINVAL;
/* Have to check all 20 possibilities to find the minimal M */
@@ -614,9 +613,9 @@ static long clk_hclk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
}
}
- if (d == (u64)rate << 6) {
+ if (d == (u64)req->rate << 6) {
pr_err("%s: %lu: no valid PLL parameters are found\n",
- clk_hw_get_name(hw), rate);
+ clk_hw_get_name(hw), req->rate);
return -EINVAL;
}
@@ -634,22 +633,25 @@ static long clk_hclk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
if (!d)
pr_debug("%s: %lu: found exact match: %llu/%llu/%llu\n",
- clk_hw_get_name(hw), rate, m, n, p);
+ clk_hw_get_name(hw), req->rate, m, n, p);
else
pr_debug("%s: %lu: found closest: %llu/%llu/%llu - %llu\n",
- clk_hw_get_name(hw), rate, m, n, p, o);
+ clk_hw_get_name(hw), req->rate, m, n, p, o);
- return o;
+ req->rate = o;
+
+ return 0;
}
-static long clk_usb_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_usb_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct lpc32xx_pll_clk *clk = to_lpc32xx_pll_clk(hw);
struct clk_hw *usb_div_hw, *osc_hw;
u64 d_i, n_i, m, o;
- pr_debug("%s: %lu/%lu\n", clk_hw_get_name(hw), *parent_rate, rate);
+ pr_debug("%s: %lu/%lu\n", clk_hw_get_name(hw), req->best_parent_rate,
+ req->rate);
/*
* The only supported USB clock is 48MHz, with PLL internal constraints
@@ -657,7 +659,7 @@ static long clk_usb_pll_round_rate(struct clk_hw *hw, unsigned long rate,
* and post-divider must be 4, this slightly simplifies calculation of
* USB divider, USB PLL N and M parameters.
*/
- if (rate != 48000000)
+ if (req->rate != 48000000)
return -EINVAL;
/* USB divider clock */
@@ -685,30 +687,30 @@ static long clk_usb_pll_round_rate(struct clk_hw *hw, unsigned long rate,
clk->m_div = m;
clk->p_div = 2;
clk->mode = PLL_NON_INTEGER;
- *parent_rate = div64_u64(o, d_i);
+ req->best_parent_rate = div64_u64(o, d_i);
- return rate;
+ return 0;
}
}
return -EINVAL;
}
-#define LPC32XX_DEFINE_PLL_OPS(_name, _rc, _sr, _rr) \
+#define LPC32XX_DEFINE_PLL_OPS(_name, _rc, _sr, _dr) \
static const struct clk_ops clk_ ##_name ## _ops = { \
.enable = clk_pll_enable, \
.disable = clk_pll_disable, \
.is_enabled = clk_pll_is_enabled, \
.recalc_rate = _rc, \
.set_rate = _sr, \
- .round_rate = _rr, \
+ .determine_rate = _dr, \
}
LPC32XX_DEFINE_PLL_OPS(pll_397x, clk_pll_397x_recalc_rate, NULL, NULL);
LPC32XX_DEFINE_PLL_OPS(hclk_pll, clk_pll_recalc_rate,
- clk_pll_set_rate, clk_hclk_pll_round_rate);
+ clk_pll_set_rate, clk_hclk_pll_determine_rate);
LPC32XX_DEFINE_PLL_OPS(usb_pll, clk_pll_recalc_rate,
- clk_pll_set_rate, clk_usb_pll_round_rate);
+ clk_pll_set_rate, clk_usb_pll_determine_rate);
static int clk_ddram_is_enabled(struct clk_hw *hw)
{
@@ -955,8 +957,8 @@ static unsigned long clk_divider_recalc_rate(struct clk_hw *hw,
divider->flags, divider->width);
}
-static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_divider_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct lpc32xx_clk_div *divider = to_lpc32xx_div(hw);
unsigned int bestdiv;
@@ -968,11 +970,15 @@ static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
bestdiv &= div_mask(divider->width);
bestdiv = _get_div(divider->table, bestdiv, divider->flags,
divider->width);
- return DIV_ROUND_UP(*prate, bestdiv);
+ req->rate = DIV_ROUND_UP(req->best_parent_rate, bestdiv);
+
+ return 0;
}
- return divider_round_rate(hw, rate, prate, divider->table,
- divider->width, divider->flags);
+ req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate,
+ divider->table, divider->width, divider->flags);
+
+ return 0;
}
static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -991,7 +997,7 @@ static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops lpc32xx_clk_divider_ops = {
.recalc_rate = clk_divider_recalc_rate,
- .round_rate = clk_divider_round_rate,
+ .determine_rate = clk_divider_determine_rate,
.set_rate = clk_divider_set_rate,
};
diff --git a/drivers/clk/pistachio/clk-pll.c b/drivers/clk/pistachio/clk-pll.c
index 025b9df76cdb..d05337915e2b 100644
--- a/drivers/clk/pistachio/clk-pll.c
+++ b/drivers/clk/pistachio/clk-pll.c
@@ -139,19 +139,23 @@ pll_get_params(struct pistachio_clk_pll *pll, unsigned long fref,
return NULL;
}
-static long pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int pll_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
{
struct pistachio_clk_pll *pll = to_pistachio_pll(hw);
unsigned int i;
for (i = 0; i < pll->nr_rates; i++) {
- if (i > 0 && pll->rates[i].fref == *parent_rate &&
- pll->rates[i].fout <= rate)
- return pll->rates[i - 1].fout;
+ if (i > 0 && pll->rates[i].fref == req->best_parent_rate &&
+ pll->rates[i].fout <= req->rate) {
+ req->rate = pll->rates[i - 1].fout;
+
+ return 0;
+ }
}
- return pll->rates[0].fout;
+ req->rate = pll->rates[0].fout;
+
+ return 0;
}
static int pll_gf40lp_frac_enable(struct clk_hw *hw)
@@ -300,7 +304,7 @@ static const struct clk_ops pll_gf40lp_frac_ops = {
.disable = pll_gf40lp_frac_disable,
.is_enabled = pll_gf40lp_frac_is_enabled,
.recalc_rate = pll_gf40lp_frac_recalc_rate,
- .round_rate = pll_round_rate,
+ .determine_rate = pll_determine_rate,
.set_rate = pll_gf40lp_frac_set_rate,
};
@@ -432,7 +436,7 @@ static const struct clk_ops pll_gf40lp_laint_ops = {
.disable = pll_gf40lp_laint_disable,
.is_enabled = pll_gf40lp_laint_is_enabled,
.recalc_rate = pll_gf40lp_laint_recalc_rate,
- .round_rate = pll_round_rate,
+ .determine_rate = pll_determine_rate,
.set_rate = pll_gf40lp_laint_set_rate,
};
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 6cb6cd3e1778..78a303842613 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -19,6 +19,33 @@ menuconfig COMMON_CLK_QCOM
if COMMON_CLK_QCOM
+config CLK_GLYMUR_DISPCC
+ tristate "GLYMUR Display Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select CLK_GLYMUR_GCC
+ help
+ Support for the display clock controllers on Qualcomm
+ Technologies, Inc. GLYMUR devices.
+ Say Y if you want to support display devices and functionality such as
+ splash screen.
+
+config CLK_GLYMUR_GCC
+ tristate "GLYMUR Global Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select QCOM_GDSC
+ help
+ Support for the global clock controller on GLYMUR devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ I2C, USB, UFS, SDCC, etc.
+
+config CLK_GLYMUR_TCSRCC
+ tristate "GLYMUR TCSR Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select QCOM_GDSC
+ help
+ Support for the TCSR clock controller on GLYMUR devices.
+ Say Y if you want to use peripheral devices such as USB/PCIe/EDP.
+
config CLK_X1E80100_CAMCC
tristate "X1E80100 Camera Clock Controller"
depends on ARM64 || COMPILE_TEST
@@ -187,6 +214,15 @@ config IPQ_APSS_PLL
Say Y if you want to support CPU frequency scaling on ipq based
devices.
+config IPQ_APSS_5424
+ tristate "IPQ APSS Clock Controller"
+ select IPQ_APSS_PLL
+ default y if IPQ_GCC_5424
+ help
+ Support for APSS Clock controller on Qualcom IPQ5424 platform.
+ Say Y if you want to support CPU frequency scaling on ipq based
+ devices.
+
config IPQ_APSS_6018
tristate "IPQ APSS Clock Controller"
select IPQ_APSS_PLL
@@ -323,12 +359,12 @@ config MSM_GCC_8916
SD/eMMC, display, graphics, camera etc.
config MSM_GCC_8917
- tristate "MSM8917/QM215 Global Clock Controller"
+ tristate "MSM89(17/37)/QM215 Global Clock Controller"
depends on ARM64 || COMPILE_TEST
select QCOM_GDSC
help
- Support for the global clock controller on msm8917 and qm215
- devices.
+ Support for the global clock controller on msm8917, msm8937
+ and qm215 devices.
Say Y if you want to use devices such as UART, SPI i2c, USB,
SD/eMMC, display, graphics, camera etc.
@@ -495,7 +531,7 @@ config QCM_DISPCC_2290
config QCS_DISPCC_615
tristate "QCS615 Display Clock Controller"
- select QCM_GCC_615
+ select QCS_GCC_615
help
Support for the display clock controller on Qualcomm Technologies, Inc
QCS615 devices.
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index ddb7e06fae40..8051d481c439 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -21,6 +21,9 @@ clk-qcom-$(CONFIG_QCOM_GDSC) += gdsc.o
obj-$(CONFIG_APQ_GCC_8084) += gcc-apq8084.o
obj-$(CONFIG_APQ_MMCC_8084) += mmcc-apq8084.o
obj-$(CONFIG_CLK_GFM_LPASS_SM8250) += lpass-gfm-sm8250.o
+obj-$(CONFIG_CLK_GLYMUR_DISPCC) += dispcc-glymur.o
+obj-$(CONFIG_CLK_GLYMUR_GCC) += gcc-glymur.o
+obj-$(CONFIG_CLK_GLYMUR_TCSRCC) += tcsrcc-glymur.o
obj-$(CONFIG_CLK_X1E80100_CAMCC) += camcc-x1e80100.o
obj-$(CONFIG_CLK_X1E80100_DISPCC) += dispcc-x1e80100.o
obj-$(CONFIG_CLK_X1E80100_GCC) += gcc-x1e80100.o
@@ -29,6 +32,7 @@ obj-$(CONFIG_CLK_X1E80100_TCSRCC) += tcsrcc-x1e80100.o
obj-$(CONFIG_CLK_X1P42100_GPUCC) += gpucc-x1p42100.o
obj-$(CONFIG_CLK_QCM2290_GPUCC) += gpucc-qcm2290.o
obj-$(CONFIG_IPQ_APSS_PLL) += apss-ipq-pll.o
+obj-$(CONFIG_IPQ_APSS_5424) += apss-ipq5424.o
obj-$(CONFIG_IPQ_APSS_6018) += apss-ipq6018.o
obj-$(CONFIG_IPQ_CMN_PLL) += ipq-cmn-pll.o
obj-$(CONFIG_IPQ_GCC_4019) += gcc-ipq4019.o
diff --git a/drivers/clk/qcom/a53-pll.c b/drivers/clk/qcom/a53-pll.c
index f43d455ab4b8..724a642311e5 100644
--- a/drivers/clk/qcom/a53-pll.c
+++ b/drivers/clk/qcom/a53-pll.c
@@ -33,7 +33,6 @@ static const struct regmap_config a53pll_regmap_config = {
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x40,
- .fast_io = true,
};
static struct pll_freq_tbl *qcom_a53pll_get_freq_tbl(struct device *dev)
diff --git a/drivers/clk/qcom/a7-pll.c b/drivers/clk/qcom/a7-pll.c
index c4a53e5db229..04b5492a3c21 100644
--- a/drivers/clk/qcom/a7-pll.c
+++ b/drivers/clk/qcom/a7-pll.c
@@ -27,7 +27,7 @@ static struct clk_alpha_pll a7pll = {
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "a7pll",
- .parent_data = &(const struct clk_parent_data){
+ .parent_data = &(const struct clk_parent_data){
.fw_name = "bi_tcxo",
},
.num_parents = 1,
@@ -50,7 +50,6 @@ static const struct regmap_config a7pll_regmap_config = {
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x1000,
- .fast_io = true,
};
static int qcom_a7pll_probe(struct platform_device *pdev)
diff --git a/drivers/clk/qcom/apss-ipq-pll.c b/drivers/clk/qcom/apss-ipq-pll.c
index d6c1aea7e9e1..3a8987fe7008 100644
--- a/drivers/clk/qcom/apss-ipq-pll.c
+++ b/drivers/clk/qcom/apss-ipq-pll.c
@@ -169,7 +169,6 @@ static const struct regmap_config ipq_pll_regmap_config = {
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x40,
- .fast_io = true,
};
static int apss_ipq_pll_probe(struct platform_device *pdev)
diff --git a/drivers/clk/qcom/apss-ipq5424.c b/drivers/clk/qcom/apss-ipq5424.c
new file mode 100644
index 000000000000..4c67f722e009
--- /dev/null
+++ b/drivers/clk/qcom/apss-ipq5424.c
@@ -0,0 +1,265 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2025, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/interconnect-provider.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/arm/qcom,ids.h>
+#include <dt-bindings/clock/qcom,apss-ipq.h>
+#include <dt-bindings/interconnect/qcom,ipq5424.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "common.h"
+
+enum {
+ DT_XO,
+ DT_CLK_REF,
+};
+
+enum {
+ P_XO,
+ P_GPLL0,
+ P_APSS_PLL_EARLY,
+ P_L3_PLL,
+};
+
+struct apss_clk {
+ struct notifier_block cpu_clk_notifier;
+ struct clk_hw *hw;
+ struct device *dev;
+ struct clk *l3_clk;
+};
+
+static const struct alpha_pll_config apss_pll_config = {
+ .l = 0x3b,
+ .config_ctl_val = 0x08200920,
+ .config_ctl_hi_val = 0x05008001,
+ .config_ctl_hi1_val = 0x04000000,
+ .user_ctl_val = 0xf,
+};
+
+static struct clk_alpha_pll ipq5424_apss_pll = {
+ .offset = 0x0,
+ .config = &apss_pll_config,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_HUAYRA_2290],
+ .flags = SUPPORTS_DYNAMIC_UPDATE,
+ .clkr = {
+ .enable_reg = 0x0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "apss_pll",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_XO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_huayra_ops,
+ },
+ },
+};
+
+static const struct clk_parent_data parents_apss_silver_clk_src[] = {
+ { .index = DT_XO },
+ { .index = DT_CLK_REF },
+ { .hw = &ipq5424_apss_pll.clkr.hw },
+};
+
+static const struct parent_map parents_apss_silver_clk_src_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 4 },
+ { P_APSS_PLL_EARLY, 5 },
+};
+
+static const struct freq_tbl ftbl_apss_clk_src[] = {
+ F(816000000, P_APSS_PLL_EARLY, 1, 0, 0),
+ F(1416000000, P_APSS_PLL_EARLY, 1, 0, 0),
+ F(1800000000, P_APSS_PLL_EARLY, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 apss_silver_clk_src = {
+ .cmd_rcgr = 0x0080,
+ .freq_tbl = ftbl_apss_clk_src,
+ .hid_width = 5,
+ .parent_map = parents_apss_silver_clk_src_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "apss_silver_clk_src",
+ .parent_data = parents_apss_silver_clk_src,
+ .num_parents = ARRAY_SIZE(parents_apss_silver_clk_src),
+ .ops = &clk_rcg2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_branch apss_silver_core_clk = {
+ .halt_reg = 0x008c,
+ .clkr = {
+ .enable_reg = 0x008c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "apss_silver_core_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &apss_silver_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct alpha_pll_config l3_pll_config = {
+ .l = 0x29,
+ .config_ctl_val = 0x08200920,
+ .config_ctl_hi_val = 0x05008001,
+ .config_ctl_hi1_val = 0x04000000,
+ .user_ctl_val = 0xf,
+};
+
+static struct clk_alpha_pll ipq5424_l3_pll = {
+ .offset = 0x10000,
+ .config = &l3_pll_config,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_HUAYRA_2290],
+ .flags = SUPPORTS_DYNAMIC_UPDATE,
+ .clkr = {
+ .enable_reg = 0x0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "l3_pll",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_XO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_huayra_ops,
+ },
+ },
+};
+
+static const struct clk_parent_data parents_l3_clk_src[] = {
+ { .index = DT_XO },
+ { .index = DT_CLK_REF },
+ { .hw = &ipq5424_l3_pll.clkr.hw },
+};
+
+static const struct parent_map parents_l3_clk_src_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 4 },
+ { P_L3_PLL, 5 },
+};
+
+static const struct freq_tbl ftbl_l3_clk_src[] = {
+ F(816000000, P_L3_PLL, 1, 0, 0),
+ F(984000000, P_L3_PLL, 1, 0, 0),
+ F(1272000000, P_L3_PLL, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 l3_clk_src = {
+ .cmd_rcgr = 0x10080,
+ .freq_tbl = ftbl_l3_clk_src,
+ .hid_width = 5,
+ .parent_map = parents_l3_clk_src_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "l3_clk_src",
+ .parent_data = parents_l3_clk_src,
+ .num_parents = ARRAY_SIZE(parents_l3_clk_src),
+ .ops = &clk_rcg2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_branch l3_core_clk = {
+ .halt_reg = 0x1008c,
+ .clkr = {
+ .enable_reg = 0x1008c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "l3_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &l3_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct regmap_config apss_ipq5424_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x20000,
+ .fast_io = true,
+};
+
+static struct clk_regmap *apss_ipq5424_clks[] = {
+ [APSS_PLL_EARLY] = &ipq5424_apss_pll.clkr,
+ [APSS_SILVER_CLK_SRC] = &apss_silver_clk_src.clkr,
+ [APSS_SILVER_CORE_CLK] = &apss_silver_core_clk.clkr,
+ [L3_PLL] = &ipq5424_l3_pll.clkr,
+ [L3_CLK_SRC] = &l3_clk_src.clkr,
+ [L3_CORE_CLK] = &l3_core_clk.clkr,
+};
+
+static struct clk_alpha_pll *ipa5424_apss_plls[] = {
+ &ipq5424_l3_pll,
+ &ipq5424_apss_pll,
+};
+
+static struct qcom_cc_driver_data ipa5424_apss_driver_data = {
+ .alpha_plls = ipa5424_apss_plls,
+ .num_alpha_plls = ARRAY_SIZE(ipa5424_apss_plls),
+};
+
+#define IPQ_APPS_PLL_ID (5424 * 3) /* some unique value */
+
+static const struct qcom_icc_hws_data icc_ipq5424_cpu_l3[] = {
+ { MASTER_CPU, SLAVE_L3, L3_CORE_CLK },
+};
+
+static const struct qcom_cc_desc apss_ipq5424_desc = {
+ .config = &apss_ipq5424_regmap_config,
+ .clks = apss_ipq5424_clks,
+ .num_clks = ARRAY_SIZE(apss_ipq5424_clks),
+ .icc_hws = icc_ipq5424_cpu_l3,
+ .num_icc_hws = ARRAY_SIZE(icc_ipq5424_cpu_l3),
+ .icc_first_node_id = IPQ_APPS_PLL_ID,
+ .driver_data = &ipa5424_apss_driver_data,
+};
+
+static int apss_ipq5424_probe(struct platform_device *pdev)
+{
+ return qcom_cc_probe(pdev, &apss_ipq5424_desc);
+}
+
+static const struct of_device_id apss_ipq5424_match_table[] = {
+ { .compatible = "qcom,ipq5424-apss-clk" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, apss_ipq5424_match_table);
+
+static struct platform_driver apss_ipq5424_driver = {
+ .probe = apss_ipq5424_probe,
+ .driver = {
+ .name = "apss-ipq5424-clk",
+ .of_match_table = apss_ipq5424_match_table,
+ .sync_state = icc_sync_state,
+ },
+};
+
+module_platform_driver(apss_ipq5424_driver);
+
+MODULE_DESCRIPTION("QCOM APSS IPQ5424 CLK Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/camcc-milos.c b/drivers/clk/qcom/camcc-milos.c
index 75bd939f7dd1..0077c9c9249f 100644
--- a/drivers/clk/qcom/camcc-milos.c
+++ b/drivers/clk/qcom/camcc-milos.c
@@ -2124,7 +2124,7 @@ static struct qcom_cc_driver_data cam_cc_milos_driver_data = {
.num_clk_cbcrs = ARRAY_SIZE(cam_cc_milos_critical_cbcrs),
};
-static struct qcom_cc_desc cam_cc_milos_desc = {
+static const struct qcom_cc_desc cam_cc_milos_desc = {
.config = &cam_cc_milos_regmap_config,
.clks = cam_cc_milos_clocks,
.num_clks = ARRAY_SIZE(cam_cc_milos_clocks),
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index fec6eb376e27..6aeba40358c1 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -66,7 +66,7 @@
#define GET_PLL_TYPE(pll) (((pll)->regs - clk_alpha_pll_regs[0]) / PLL_OFF_MAX_REGS)
const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
- [CLK_ALPHA_PLL_TYPE_DEFAULT] = {
+ [CLK_ALPHA_PLL_TYPE_DEFAULT] = {
[PLL_OFF_L_VAL] = 0x04,
[PLL_OFF_ALPHA_VAL] = 0x08,
[PLL_OFF_ALPHA_VAL_U] = 0x0c,
@@ -77,7 +77,7 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_TEST_CTL_U] = 0x20,
[PLL_OFF_STATUS] = 0x24,
},
- [CLK_ALPHA_PLL_TYPE_HUAYRA] = {
+ [CLK_ALPHA_PLL_TYPE_HUAYRA] = {
[PLL_OFF_L_VAL] = 0x04,
[PLL_OFF_ALPHA_VAL] = 0x08,
[PLL_OFF_USER_CTL] = 0x10,
@@ -87,7 +87,7 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_TEST_CTL_U] = 0x20,
[PLL_OFF_STATUS] = 0x24,
},
- [CLK_ALPHA_PLL_TYPE_HUAYRA_APSS] = {
+ [CLK_ALPHA_PLL_TYPE_HUAYRA_APSS] = {
[PLL_OFF_L_VAL] = 0x08,
[PLL_OFF_ALPHA_VAL] = 0x10,
[PLL_OFF_USER_CTL] = 0x18,
@@ -97,7 +97,7 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_TEST_CTL] = 0x30,
[PLL_OFF_TEST_CTL_U] = 0x34,
},
- [CLK_ALPHA_PLL_TYPE_HUAYRA_2290] = {
+ [CLK_ALPHA_PLL_TYPE_HUAYRA_2290] = {
[PLL_OFF_L_VAL] = 0x04,
[PLL_OFF_ALPHA_VAL] = 0x08,
[PLL_OFF_USER_CTL] = 0x0c,
@@ -110,7 +110,7 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_OPMODE] = 0x28,
[PLL_OFF_STATUS] = 0x38,
},
- [CLK_ALPHA_PLL_TYPE_BRAMMO] = {
+ [CLK_ALPHA_PLL_TYPE_BRAMMO] = {
[PLL_OFF_L_VAL] = 0x04,
[PLL_OFF_ALPHA_VAL] = 0x08,
[PLL_OFF_ALPHA_VAL_U] = 0x0c,
@@ -119,7 +119,7 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_TEST_CTL] = 0x1c,
[PLL_OFF_STATUS] = 0x24,
},
- [CLK_ALPHA_PLL_TYPE_FABIA] = {
+ [CLK_ALPHA_PLL_TYPE_FABIA] = {
[PLL_OFF_L_VAL] = 0x04,
[PLL_OFF_USER_CTL] = 0x0c,
[PLL_OFF_USER_CTL_U] = 0x10,
@@ -147,7 +147,7 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_OPMODE] = 0x38,
[PLL_OFF_ALPHA_VAL] = 0x40,
},
- [CLK_ALPHA_PLL_TYPE_AGERA] = {
+ [CLK_ALPHA_PLL_TYPE_AGERA] = {
[PLL_OFF_L_VAL] = 0x04,
[PLL_OFF_ALPHA_VAL] = 0x08,
[PLL_OFF_USER_CTL] = 0x0c,
@@ -157,7 +157,7 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_TEST_CTL_U] = 0x1c,
[PLL_OFF_STATUS] = 0x2c,
},
- [CLK_ALPHA_PLL_TYPE_ZONDA] = {
+ [CLK_ALPHA_PLL_TYPE_ZONDA] = {
[PLL_OFF_L_VAL] = 0x04,
[PLL_OFF_ALPHA_VAL] = 0x08,
[PLL_OFF_USER_CTL] = 0x0c,
@@ -243,7 +243,7 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_TEST_CTL] = 0x28,
[PLL_OFF_TEST_CTL_U] = 0x2c,
},
- [CLK_ALPHA_PLL_TYPE_DEFAULT_EVO] = {
+ [CLK_ALPHA_PLL_TYPE_DEFAULT_EVO] = {
[PLL_OFF_L_VAL] = 0x04,
[PLL_OFF_ALPHA_VAL] = 0x08,
[PLL_OFF_ALPHA_VAL_U] = 0x0c,
@@ -254,7 +254,7 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_CONFIG_CTL] = 0x20,
[PLL_OFF_STATUS] = 0x24,
},
- [CLK_ALPHA_PLL_TYPE_BRAMMO_EVO] = {
+ [CLK_ALPHA_PLL_TYPE_BRAMMO_EVO] = {
[PLL_OFF_L_VAL] = 0x04,
[PLL_OFF_ALPHA_VAL] = 0x08,
[PLL_OFF_ALPHA_VAL_U] = 0x0c,
@@ -275,7 +275,7 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_TEST_CTL] = 0x30,
[PLL_OFF_TEST_CTL_U] = 0x34,
},
- [CLK_ALPHA_PLL_TYPE_STROMER_PLUS] = {
+ [CLK_ALPHA_PLL_TYPE_STROMER_PLUS] = {
[PLL_OFF_L_VAL] = 0x04,
[PLL_OFF_USER_CTL] = 0x08,
[PLL_OFF_USER_CTL_U] = 0x0c,
@@ -286,7 +286,7 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_ALPHA_VAL] = 0x24,
[PLL_OFF_ALPHA_VAL_U] = 0x28,
},
- [CLK_ALPHA_PLL_TYPE_ZONDA_OLE] = {
+ [CLK_ALPHA_PLL_TYPE_ZONDA_OLE] = {
[PLL_OFF_L_VAL] = 0x04,
[PLL_OFF_ALPHA_VAL] = 0x08,
[PLL_OFF_USER_CTL] = 0x0c,
@@ -301,7 +301,7 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_OPMODE] = 0x30,
[PLL_OFF_STATUS] = 0x3c,
},
- [CLK_ALPHA_PLL_TYPE_NSS_HUAYRA] = {
+ [CLK_ALPHA_PLL_TYPE_NSS_HUAYRA] = {
[PLL_OFF_L_VAL] = 0x04,
[PLL_OFF_ALPHA_VAL] = 0x08,
[PLL_OFF_TEST_CTL] = 0x0c,
@@ -849,22 +849,25 @@ static int clk_alpha_pll_hwfsm_set_rate(struct clk_hw *hw, unsigned long rate,
clk_alpha_pll_hwfsm_is_enabled);
}
-static long clk_alpha_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_alpha_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
u32 l, alpha_width = pll_alpha_width(pll);
u64 a;
unsigned long min_freq, max_freq;
- rate = alpha_pll_round_rate(rate, *prate, &l, &a, alpha_width);
- if (!pll->vco_table || alpha_pll_find_vco(pll, rate))
- return rate;
+ req->rate = alpha_pll_round_rate(req->rate, req->best_parent_rate, &l,
+ &a, alpha_width);
+ if (!pll->vco_table || alpha_pll_find_vco(pll, req->rate))
+ return 0;
min_freq = pll->vco_table[0].min_freq;
max_freq = pll->vco_table[pll->num_vco - 1].max_freq;
- return clamp(rate, min_freq, max_freq);
+ req->rate = clamp(req->rate, min_freq, max_freq);
+
+ return 0;
}
void clk_huayra_2290_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
@@ -1048,12 +1051,15 @@ static int alpha_pll_huayra_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
-static long alpha_pll_huayra_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int alpha_pll_huayra_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
u32 l, a;
- return alpha_huayra_pll_round_rate(rate, *prate, &l, &a);
+ req->rate = alpha_huayra_pll_round_rate(req->rate,
+ req->best_parent_rate, &l, &a);
+
+ return 0;
}
static int trion_pll_is_enabled(struct clk_alpha_pll *pll,
@@ -1175,7 +1181,7 @@ const struct clk_ops clk_alpha_pll_ops = {
.disable = clk_alpha_pll_disable,
.is_enabled = clk_alpha_pll_is_enabled,
.recalc_rate = clk_alpha_pll_recalc_rate,
- .round_rate = clk_alpha_pll_round_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
.set_rate = clk_alpha_pll_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_ops);
@@ -1185,7 +1191,7 @@ const struct clk_ops clk_alpha_pll_huayra_ops = {
.disable = clk_alpha_pll_disable,
.is_enabled = clk_alpha_pll_is_enabled,
.recalc_rate = alpha_pll_huayra_recalc_rate,
- .round_rate = alpha_pll_huayra_round_rate,
+ .determine_rate = alpha_pll_huayra_determine_rate,
.set_rate = alpha_pll_huayra_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_huayra_ops);
@@ -1195,7 +1201,7 @@ const struct clk_ops clk_alpha_pll_hwfsm_ops = {
.disable = clk_alpha_pll_hwfsm_disable,
.is_enabled = clk_alpha_pll_hwfsm_is_enabled,
.recalc_rate = clk_alpha_pll_recalc_rate,
- .round_rate = clk_alpha_pll_round_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
.set_rate = clk_alpha_pll_hwfsm_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_hwfsm_ops);
@@ -1205,7 +1211,7 @@ const struct clk_ops clk_alpha_pll_fixed_trion_ops = {
.disable = clk_trion_pll_disable,
.is_enabled = clk_trion_pll_is_enabled,
.recalc_rate = clk_trion_pll_recalc_rate,
- .round_rate = clk_alpha_pll_round_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_fixed_trion_ops);
@@ -1240,9 +1246,8 @@ static const struct clk_div_table clk_alpha_2bit_div_table[] = {
{ }
};
-static long
-clk_alpha_pll_postdiv_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_alpha_pll_postdiv_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
const struct clk_div_table *table;
@@ -1252,13 +1257,15 @@ clk_alpha_pll_postdiv_round_rate(struct clk_hw *hw, unsigned long rate,
else
table = clk_alpha_div_table;
- return divider_round_rate(hw, rate, prate, table,
- pll->width, CLK_DIVIDER_POWER_OF_TWO);
+ req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate,
+ table, pll->width,
+ CLK_DIVIDER_POWER_OF_TWO);
+
+ return 0;
}
-static long
-clk_alpha_pll_postdiv_round_ro_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_alpha_pll_postdiv_ro_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
u32 ctl, div;
@@ -1270,9 +1277,12 @@ clk_alpha_pll_postdiv_round_ro_rate(struct clk_hw *hw, unsigned long rate,
div = 1 << fls(ctl);
if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)
- *prate = clk_hw_round_rate(clk_hw_get_parent(hw), div * rate);
+ req->best_parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw),
+ div * req->rate);
+
+ req->rate = DIV_ROUND_UP_ULL((u64)req->best_parent_rate, div);
- return DIV_ROUND_UP_ULL((u64)*prate, div);
+ return 0;
}
static int clk_alpha_pll_postdiv_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -1291,13 +1301,13 @@ static int clk_alpha_pll_postdiv_set_rate(struct clk_hw *hw, unsigned long rate,
const struct clk_ops clk_alpha_pll_postdiv_ops = {
.recalc_rate = clk_alpha_pll_postdiv_recalc_rate,
- .round_rate = clk_alpha_pll_postdiv_round_rate,
+ .determine_rate = clk_alpha_pll_postdiv_determine_rate,
.set_rate = clk_alpha_pll_postdiv_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_ops);
const struct clk_ops clk_alpha_pll_postdiv_ro_ops = {
- .round_rate = clk_alpha_pll_postdiv_round_ro_rate,
+ .determine_rate = clk_alpha_pll_postdiv_ro_determine_rate,
.recalc_rate = clk_alpha_pll_postdiv_recalc_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_ro_ops);
@@ -1542,7 +1552,7 @@ const struct clk_ops clk_alpha_pll_fabia_ops = {
.is_enabled = clk_alpha_pll_is_enabled,
.set_rate = alpha_pll_fabia_set_rate,
.recalc_rate = alpha_pll_fabia_recalc_rate,
- .round_rate = clk_alpha_pll_round_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_fabia_ops);
@@ -1551,7 +1561,7 @@ const struct clk_ops clk_alpha_pll_fixed_fabia_ops = {
.disable = alpha_pll_fabia_disable,
.is_enabled = clk_alpha_pll_is_enabled,
.recalc_rate = alpha_pll_fabia_recalc_rate,
- .round_rate = clk_alpha_pll_round_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_fixed_fabia_ops);
@@ -1602,14 +1612,16 @@ clk_trion_pll_postdiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
return (parent_rate / div);
}
-static long
-clk_trion_pll_postdiv_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_trion_pll_postdiv_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
- return divider_round_rate(hw, rate, prate, pll->post_div_table,
- pll->width, CLK_DIVIDER_ROUND_CLOSEST);
+ req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate,
+ pll->post_div_table,
+ pll->width, CLK_DIVIDER_ROUND_CLOSEST);
+
+ return 0;
};
static int
@@ -1635,18 +1647,21 @@ clk_trion_pll_postdiv_set_rate(struct clk_hw *hw, unsigned long rate,
const struct clk_ops clk_alpha_pll_postdiv_trion_ops = {
.recalc_rate = clk_trion_pll_postdiv_recalc_rate,
- .round_rate = clk_trion_pll_postdiv_round_rate,
+ .determine_rate = clk_trion_pll_postdiv_determine_rate,
.set_rate = clk_trion_pll_postdiv_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_trion_ops);
-static long clk_alpha_pll_postdiv_fabia_round_rate(struct clk_hw *hw,
- unsigned long rate, unsigned long *prate)
+static int clk_alpha_pll_postdiv_fabia_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
- return divider_round_rate(hw, rate, prate, pll->post_div_table,
- pll->width, CLK_DIVIDER_ROUND_CLOSEST);
+ req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate,
+ pll->post_div_table,
+ pll->width, CLK_DIVIDER_ROUND_CLOSEST);
+
+ return 0;
}
static int clk_alpha_pll_postdiv_fabia_set_rate(struct clk_hw *hw,
@@ -1681,7 +1696,7 @@ static int clk_alpha_pll_postdiv_fabia_set_rate(struct clk_hw *hw,
const struct clk_ops clk_alpha_pll_postdiv_fabia_ops = {
.recalc_rate = clk_alpha_pll_postdiv_fabia_recalc_rate,
- .round_rate = clk_alpha_pll_postdiv_fabia_round_rate,
+ .determine_rate = clk_alpha_pll_postdiv_fabia_determine_rate,
.set_rate = clk_alpha_pll_postdiv_fabia_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_fabia_ops);
@@ -1833,7 +1848,7 @@ const struct clk_ops clk_alpha_pll_trion_ops = {
.disable = clk_trion_pll_disable,
.is_enabled = clk_trion_pll_is_enabled,
.recalc_rate = clk_trion_pll_recalc_rate,
- .round_rate = clk_alpha_pll_round_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
.set_rate = alpha_pll_trion_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_trion_ops);
@@ -1844,14 +1859,14 @@ const struct clk_ops clk_alpha_pll_lucid_ops = {
.disable = clk_trion_pll_disable,
.is_enabled = clk_trion_pll_is_enabled,
.recalc_rate = clk_trion_pll_recalc_rate,
- .round_rate = clk_alpha_pll_round_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
.set_rate = alpha_pll_trion_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_lucid_ops);
const struct clk_ops clk_alpha_pll_postdiv_lucid_ops = {
.recalc_rate = clk_alpha_pll_postdiv_fabia_recalc_rate,
- .round_rate = clk_alpha_pll_postdiv_fabia_round_rate,
+ .determine_rate = clk_alpha_pll_postdiv_fabia_determine_rate,
.set_rate = clk_alpha_pll_postdiv_fabia_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_lucid_ops);
@@ -1903,7 +1918,7 @@ const struct clk_ops clk_alpha_pll_agera_ops = {
.disable = clk_alpha_pll_disable,
.is_enabled = clk_alpha_pll_is_enabled,
.recalc_rate = alpha_pll_fabia_recalc_rate,
- .round_rate = clk_alpha_pll_round_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
.set_rate = clk_alpha_pll_agera_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_agera_ops);
@@ -2119,7 +2134,7 @@ const struct clk_ops clk_alpha_pll_lucid_5lpe_ops = {
.disable = alpha_pll_lucid_5lpe_disable,
.is_enabled = clk_trion_pll_is_enabled,
.recalc_rate = clk_trion_pll_recalc_rate,
- .round_rate = clk_alpha_pll_round_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
.set_rate = alpha_pll_lucid_5lpe_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_lucid_5lpe_ops);
@@ -2129,13 +2144,13 @@ const struct clk_ops clk_alpha_pll_fixed_lucid_5lpe_ops = {
.disable = alpha_pll_lucid_5lpe_disable,
.is_enabled = clk_trion_pll_is_enabled,
.recalc_rate = clk_trion_pll_recalc_rate,
- .round_rate = clk_alpha_pll_round_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_fixed_lucid_5lpe_ops);
const struct clk_ops clk_alpha_pll_postdiv_lucid_5lpe_ops = {
.recalc_rate = clk_alpha_pll_postdiv_fabia_recalc_rate,
- .round_rate = clk_alpha_pll_postdiv_fabia_round_rate,
+ .determine_rate = clk_alpha_pll_postdiv_fabia_determine_rate,
.set_rate = clk_lucid_5lpe_pll_postdiv_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_lucid_5lpe_ops);
@@ -2304,7 +2319,7 @@ const struct clk_ops clk_alpha_pll_zonda_ops = {
.disable = clk_zonda_pll_disable,
.is_enabled = clk_trion_pll_is_enabled,
.recalc_rate = clk_trion_pll_recalc_rate,
- .round_rate = clk_alpha_pll_round_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
.set_rate = clk_zonda_pll_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_zonda_ops);
@@ -2529,13 +2544,13 @@ const struct clk_ops clk_alpha_pll_fixed_lucid_evo_ops = {
.disable = alpha_pll_lucid_evo_disable,
.is_enabled = clk_trion_pll_is_enabled,
.recalc_rate = alpha_pll_lucid_evo_recalc_rate,
- .round_rate = clk_alpha_pll_round_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_fixed_lucid_evo_ops);
const struct clk_ops clk_alpha_pll_postdiv_lucid_evo_ops = {
.recalc_rate = clk_alpha_pll_postdiv_fabia_recalc_rate,
- .round_rate = clk_alpha_pll_postdiv_fabia_round_rate,
+ .determine_rate = clk_alpha_pll_postdiv_fabia_determine_rate,
.set_rate = clk_lucid_evo_pll_postdiv_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_lucid_evo_ops);
@@ -2546,7 +2561,7 @@ const struct clk_ops clk_alpha_pll_lucid_evo_ops = {
.disable = alpha_pll_lucid_evo_disable,
.is_enabled = clk_trion_pll_is_enabled,
.recalc_rate = alpha_pll_lucid_evo_recalc_rate,
- .round_rate = clk_alpha_pll_round_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
.set_rate = alpha_pll_lucid_5lpe_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_lucid_evo_ops);
@@ -2557,7 +2572,7 @@ const struct clk_ops clk_alpha_pll_reset_lucid_evo_ops = {
.disable = alpha_pll_reset_lucid_evo_disable,
.is_enabled = clk_trion_pll_is_enabled,
.recalc_rate = alpha_pll_lucid_evo_recalc_rate,
- .round_rate = clk_alpha_pll_round_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
.set_rate = alpha_pll_lucid_5lpe_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_reset_lucid_evo_ops);
@@ -2732,22 +2747,25 @@ static unsigned long clk_rivian_evo_pll_recalc_rate(struct clk_hw *hw,
return parent_rate * l;
}
-static long clk_rivian_evo_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_rivian_evo_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
unsigned long min_freq, max_freq;
u32 l;
u64 a;
- rate = alpha_pll_round_rate(rate, *prate, &l, &a, 0);
- if (!pll->vco_table || alpha_pll_find_vco(pll, rate))
- return rate;
+ req->rate = alpha_pll_round_rate(req->rate, req->best_parent_rate, &l,
+ &a, 0);
+ if (!pll->vco_table || alpha_pll_find_vco(pll, req->rate))
+ return 0;
min_freq = pll->vco_table[0].min_freq;
max_freq = pll->vco_table[pll->num_vco - 1].max_freq;
- return clamp(rate, min_freq, max_freq);
+ req->rate = clamp(req->rate, min_freq, max_freq);
+
+ return 0;
}
const struct clk_ops clk_alpha_pll_rivian_evo_ops = {
@@ -2755,7 +2773,7 @@ const struct clk_ops clk_alpha_pll_rivian_evo_ops = {
.disable = alpha_pll_lucid_5lpe_disable,
.is_enabled = clk_trion_pll_is_enabled,
.recalc_rate = clk_rivian_evo_pll_recalc_rate,
- .round_rate = clk_rivian_evo_pll_round_rate,
+ .determine_rate = clk_rivian_evo_pll_determine_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_rivian_evo_ops);
@@ -2964,7 +2982,7 @@ const struct clk_ops clk_alpha_pll_regera_ops = {
.disable = clk_zonda_pll_disable,
.is_enabled = clk_alpha_pll_is_enabled,
.recalc_rate = clk_trion_pll_recalc_rate,
- .round_rate = clk_alpha_pll_round_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
.set_rate = clk_zonda_pll_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_regera_ops);
@@ -3169,7 +3187,7 @@ const struct clk_ops clk_alpha_pll_slew_ops = {
.enable = clk_alpha_pll_slew_enable,
.disable = clk_alpha_pll_disable,
.recalc_rate = clk_alpha_pll_recalc_rate,
- .round_rate = clk_alpha_pll_round_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
.set_rate = clk_alpha_pll_slew_set_rate,
};
EXPORT_SYMBOL(clk_alpha_pll_slew_ops);
diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h
index ff41aeab0ab9..0903a05b18cc 100644
--- a/drivers/clk/qcom/clk-alpha-pll.h
+++ b/drivers/clk/qcom/clk-alpha-pll.h
@@ -29,6 +29,7 @@ enum {
CLK_ALPHA_PLL_TYPE_LUCID_OLE,
CLK_ALPHA_PLL_TYPE_PONGO_ELU,
CLK_ALPHA_PLL_TYPE_TAYCAN_ELU,
+ CLK_ALPHA_PLL_TYPE_TAYCAN_EKO_T = CLK_ALPHA_PLL_TYPE_TAYCAN_ELU,
CLK_ALPHA_PLL_TYPE_RIVIAN_EVO,
CLK_ALPHA_PLL_TYPE_DEFAULT_EVO,
CLK_ALPHA_PLL_TYPE_BRAMMO_EVO,
@@ -192,14 +193,17 @@ extern const struct clk_ops clk_alpha_pll_zonda_ops;
extern const struct clk_ops clk_alpha_pll_lucid_evo_ops;
#define clk_alpha_pll_taycan_elu_ops clk_alpha_pll_lucid_evo_ops
+#define clk_alpha_pll_taycan_eko_t_ops clk_alpha_pll_lucid_evo_ops
extern const struct clk_ops clk_alpha_pll_reset_lucid_evo_ops;
#define clk_alpha_pll_reset_lucid_ole_ops clk_alpha_pll_reset_lucid_evo_ops
extern const struct clk_ops clk_alpha_pll_fixed_lucid_evo_ops;
#define clk_alpha_pll_fixed_lucid_ole_ops clk_alpha_pll_fixed_lucid_evo_ops
#define clk_alpha_pll_fixed_taycan_elu_ops clk_alpha_pll_fixed_lucid_evo_ops
+#define clk_alpha_pll_fixed_taycan_eko_t_ops clk_alpha_pll_fixed_lucid_evo_ops
extern const struct clk_ops clk_alpha_pll_postdiv_lucid_evo_ops;
#define clk_alpha_pll_postdiv_lucid_ole_ops clk_alpha_pll_postdiv_lucid_evo_ops
#define clk_alpha_pll_postdiv_taycan_elu_ops clk_alpha_pll_postdiv_lucid_evo_ops
+#define clk_alpha_pll_postdiv_taycan_eko_t_ops clk_alpha_pll_postdiv_lucid_evo_ops
extern const struct clk_ops clk_alpha_pll_pongo_elu_ops;
extern const struct clk_ops clk_alpha_pll_rivian_evo_ops;
@@ -233,6 +237,8 @@ void clk_pongo_elu_pll_configure(struct clk_alpha_pll *pll, struct regmap *regma
const struct alpha_pll_config *config);
#define clk_taycan_elu_pll_configure(pll, regmap, config) \
clk_lucid_evo_pll_configure(pll, regmap, config)
+#define clk_taycan_eko_t_pll_configure(pll, regmap, config) \
+ clk_lucid_evo_pll_configure(pll, regmap, config)
void clk_rivian_evo_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config);
diff --git a/drivers/clk/qcom/clk-cbf-8996.c b/drivers/clk/qcom/clk-cbf-8996.c
index ce4efcd995ea..0b40ed601f9a 100644
--- a/drivers/clk/qcom/clk-cbf-8996.c
+++ b/drivers/clk/qcom/clk-cbf-8996.c
@@ -212,7 +212,6 @@ static const struct regmap_config cbf_msm8996_regmap_config = {
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x10000,
- .fast_io = true,
.val_format_endian = REGMAP_ENDIAN_LITTLE,
};
diff --git a/drivers/clk/qcom/clk-cpu-8996.c b/drivers/clk/qcom/clk-cpu-8996.c
index 72689448a653..21d13c0841ed 100644
--- a/drivers/clk/qcom/clk-cpu-8996.c
+++ b/drivers/clk/qcom/clk-cpu-8996.c
@@ -411,7 +411,6 @@ static const struct regmap_config cpu_msm8996_regmap_config = {
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x80210,
- .fast_io = true,
.val_format_endian = REGMAP_ENDIAN_LITTLE,
};
diff --git a/drivers/clk/qcom/clk-rcg.c b/drivers/clk/qcom/clk-rcg.c
index 987141c91fe0..31f0650b48ba 100644
--- a/drivers/clk/qcom/clk-rcg.c
+++ b/drivers/clk/qcom/clk-rcg.c
@@ -423,7 +423,7 @@ static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f,
rate = tmp;
}
} else {
- rate = clk_hw_get_rate(p);
+ rate = clk_hw_get_rate(p);
}
req->best_parent_hw = p;
req->best_parent_rate = rate;
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index 8001fd9faf9d..e18cb8807d73 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -201,7 +201,7 @@ __clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate, u32 cfg)
regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m);
m &= mask;
regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), &n);
- n = ~n;
+ n = ~n;
n &= mask;
n += m;
mode = cfg & CFG_MODE_MASK;
@@ -274,7 +274,7 @@ static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f,
rate = tmp;
}
} else {
- rate = clk_hw_get_rate(p);
+ rate = clk_hw_get_rate(p);
}
req->best_parent_hw = p;
req->best_parent_rate = rate;
@@ -311,7 +311,7 @@ __clk_rcg2_select_conf(struct clk_hw *hw, const struct freq_multi_tbl *f,
if (!p)
continue;
- parent_rate = clk_hw_get_rate(p);
+ parent_rate = clk_hw_get_rate(p);
rate = calc_rate(parent_rate, conf->n, conf->m, conf->n, conf->pre_div);
if (rate == req_rate) {
@@ -382,7 +382,7 @@ static int _freq_tbl_fm_determine_rate(struct clk_hw *hw, const struct freq_mult
rate = tmp;
}
} else {
- rate = clk_hw_get_rate(p);
+ rate = clk_hw_get_rate(p);
}
req->best_parent_hw = p;
diff --git a/drivers/clk/qcom/clk-regmap-divider.c b/drivers/clk/qcom/clk-regmap-divider.c
index 63c9fca0d65d..4f5395f0ab6d 100644
--- a/drivers/clk/qcom/clk-regmap-divider.c
+++ b/drivers/clk/qcom/clk-regmap-divider.c
@@ -15,8 +15,8 @@ static inline struct clk_regmap_div *to_clk_regmap_div(struct clk_hw *hw)
return container_of(to_clk_regmap(hw), struct clk_regmap_div, clkr);
}
-static long div_round_ro_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int div_ro_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_regmap_div *divider = to_clk_regmap_div(hw);
struct clk_regmap *clkr = &divider->clkr;
@@ -26,17 +26,24 @@ static long div_round_ro_rate(struct clk_hw *hw, unsigned long rate,
val >>= divider->shift;
val &= BIT(divider->width) - 1;
- return divider_ro_round_rate(hw, rate, prate, NULL, divider->width,
- CLK_DIVIDER_ROUND_CLOSEST, val);
+ req->rate = divider_ro_round_rate(hw, req->rate,
+ &req->best_parent_rate, NULL,
+ divider->width,
+ CLK_DIVIDER_ROUND_CLOSEST, val);
+
+ return 0;
}
-static long div_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int div_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
{
struct clk_regmap_div *divider = to_clk_regmap_div(hw);
- return divider_round_rate(hw, rate, prate, NULL, divider->width,
- CLK_DIVIDER_ROUND_CLOSEST);
+ req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate,
+ NULL,
+ divider->width,
+ CLK_DIVIDER_ROUND_CLOSEST);
+
+ return 0;
}
static int div_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -70,14 +77,14 @@ static unsigned long div_recalc_rate(struct clk_hw *hw,
}
const struct clk_ops clk_regmap_div_ops = {
- .round_rate = div_round_rate,
+ .determine_rate = div_determine_rate,
.set_rate = div_set_rate,
.recalc_rate = div_recalc_rate,
};
EXPORT_SYMBOL_GPL(clk_regmap_div_ops);
const struct clk_ops clk_regmap_div_ro_ops = {
- .round_rate = div_round_ro_rate,
+ .determine_rate = div_ro_determine_rate,
.recalc_rate = div_recalc_rate,
};
EXPORT_SYMBOL_GPL(clk_regmap_div_ro_ops);
diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
index 1496fb3de4be..63c38cb47bc4 100644
--- a/drivers/clk/qcom/clk-rpmh.c
+++ b/drivers/clk/qcom/clk-rpmh.c
@@ -87,7 +87,7 @@ static DEFINE_MUTEX(rpmh_clk_lock);
.hw.init = &(struct clk_init_data){ \
.ops = &clk_rpmh_ops, \
.name = #_name, \
- .parent_data = &(const struct clk_parent_data){ \
+ .parent_data = &(const struct clk_parent_data){ \
.fw_name = "xo", \
.name = "xo_board", \
}, \
@@ -105,7 +105,7 @@ static DEFINE_MUTEX(rpmh_clk_lock);
.hw.init = &(struct clk_init_data){ \
.ops = &clk_rpmh_ops, \
.name = #_name "_ao", \
- .parent_data = &(const struct clk_parent_data){ \
+ .parent_data = &(const struct clk_parent_data){ \
.fw_name = "xo", \
.name = "xo_board", \
}, \
@@ -182,7 +182,7 @@ static int clk_rpmh_send_aggregate_command(struct clk_rpmh *c)
}
c->last_sent_aggr_state = c->aggr_state;
- c->peer->last_sent_aggr_state = c->last_sent_aggr_state;
+ c->peer->last_sent_aggr_state = c->last_sent_aggr_state;
return 0;
}
@@ -390,6 +390,11 @@ DEFINE_CLK_RPMH_VRM(clk7, _a4, "clka7", 4);
DEFINE_CLK_RPMH_VRM(div_clk1, _div2, "divclka1", 2);
+DEFINE_CLK_RPMH_VRM(clk3, _a, "C3A_E0", 1);
+DEFINE_CLK_RPMH_VRM(clk4, _a, "C4A_E0", 1);
+DEFINE_CLK_RPMH_VRM(clk5, _a, "C5A_E0", 1);
+DEFINE_CLK_RPMH_VRM(clk8, _a, "C8A_E0", 1);
+
DEFINE_CLK_RPMH_BCM(ce, "CE0");
DEFINE_CLK_RPMH_BCM(hwkm, "HK0");
DEFINE_CLK_RPMH_BCM(ipa, "IP0");
@@ -879,6 +884,22 @@ static const struct clk_rpmh_desc clk_rpmh_sm8750 = {
.clka_optional = true,
};
+static struct clk_hw *glymur_rpmh_clocks[] = {
+ [RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div2.hw,
+ [RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div2_ao.hw,
+ [RPMH_RF_CLK3] = &clk_rpmh_clk3_a.hw,
+ [RPMH_RF_CLK3_A] = &clk_rpmh_clk3_a_ao.hw,
+ [RPMH_RF_CLK4] = &clk_rpmh_clk4_a.hw,
+ [RPMH_RF_CLK4_A] = &clk_rpmh_clk4_a_ao.hw,
+ [RPMH_RF_CLK5] = &clk_rpmh_clk5_a.hw,
+ [RPMH_RF_CLK5_A] = &clk_rpmh_clk5_a_ao.hw,
+};
+
+static const struct clk_rpmh_desc clk_rpmh_glymur = {
+ .clks = glymur_rpmh_clocks,
+ .num_clks = ARRAY_SIZE(glymur_rpmh_clocks),
+};
+
static struct clk_hw *of_clk_rpmh_hw_get(struct of_phandle_args *clkspec,
void *data)
{
@@ -968,6 +989,7 @@ static int clk_rpmh_probe(struct platform_device *pdev)
}
static const struct of_device_id clk_rpmh_match_table[] = {
+ { .compatible = "qcom,glymur-rpmh-clk", .data = &clk_rpmh_glymur},
{ .compatible = "qcom,milos-rpmh-clk", .data = &clk_rpmh_milos},
{ .compatible = "qcom,qcs615-rpmh-clk", .data = &clk_rpmh_qcs615},
{ .compatible = "qcom,qdu1000-rpmh-clk", .data = &clk_rpmh_qdu1000},
diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c
index 3bf6df3884a5..103db984a40b 100644
--- a/drivers/clk/qcom/clk-smd-rpm.c
+++ b/drivers/clk/qcom/clk-smd-rpm.c
@@ -30,7 +30,7 @@
.hw.init = &(struct clk_init_data){ \
.ops = &clk_smd_rpm_ops, \
.name = #_name, \
- .parent_data = &(const struct clk_parent_data){ \
+ .parent_data = &(const struct clk_parent_data){ \
.fw_name = "xo", \
.name = "xo_board", \
}, \
@@ -47,7 +47,7 @@
.hw.init = &(struct clk_init_data){ \
.ops = &clk_smd_rpm_ops, \
.name = #_active, \
- .parent_data = &(const struct clk_parent_data){ \
+ .parent_data = &(const struct clk_parent_data){ \
.fw_name = "xo", \
.name = "xo_board", \
}, \
@@ -74,7 +74,7 @@
.hw.init = &(struct clk_init_data){ \
.ops = &clk_smd_rpm_branch_ops, \
.name = #_name, \
- .parent_data = &(const struct clk_parent_data){ \
+ .parent_data = &(const struct clk_parent_data){ \
.fw_name = "xo", \
.name = "xo_board", \
}, \
@@ -92,7 +92,7 @@
.hw.init = &(struct clk_init_data){ \
.ops = &clk_smd_rpm_branch_ops, \
.name = #_active, \
- .parent_data = &(const struct clk_parent_data){ \
+ .parent_data = &(const struct clk_parent_data){ \
.fw_name = "xo", \
.name = "xo_board", \
}, \
diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c
index 37c3008e6c1b..121591886774 100644
--- a/drivers/clk/qcom/common.c
+++ b/drivers/clk/qcom/common.c
@@ -277,8 +277,8 @@ static int qcom_cc_icc_register(struct device *dev,
icd[i].slave_id = desc->icc_hws[i].slave_id;
hws = &desc->clks[desc->icc_hws[i].clk_id]->hw;
icd[i].clk = devm_clk_hw_get_clk(dev, hws, "icc");
- if (!icd[i].clk)
- return dev_err_probe(dev, -ENOENT,
+ if (IS_ERR(icd[i].clk))
+ return dev_err_probe(dev, PTR_ERR(icd[i].clk),
"(%d) clock entry is null\n", i);
icd[i].name = clk_hw_get_name(hws);
}
diff --git a/drivers/clk/qcom/dispcc-glymur.c b/drivers/clk/qcom/dispcc-glymur.c
new file mode 100644
index 000000000000..5203fa6383f6
--- /dev/null
+++ b/drivers/clk/qcom/dispcc-glymur.c
@@ -0,0 +1,1982 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025, Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,glymur-dispcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO,
+ DT_SLEEP_CLK,
+ DT_DP0_PHY_PLL_LINK_CLK,
+ DT_DP0_PHY_PLL_VCO_DIV_CLK,
+ DT_DP1_PHY_PLL_LINK_CLK,
+ DT_DP1_PHY_PLL_VCO_DIV_CLK,
+ DT_DP2_PHY_PLL_LINK_CLK,
+ DT_DP2_PHY_PLL_VCO_DIV_CLK,
+ DT_DP3_PHY_PLL_LINK_CLK,
+ DT_DP3_PHY_PLL_VCO_DIV_CLK,
+ DT_DSI0_PHY_PLL_OUT_BYTECLK,
+ DT_DSI0_PHY_PLL_OUT_DSICLK,
+ DT_DSI1_PHY_PLL_OUT_BYTECLK,
+ DT_DSI1_PHY_PLL_OUT_DSICLK,
+ DT_STANDALONE_PHY_PLL0_LINK_CLK,
+ DT_STANDALONE_PHY_PLL0_VCO_DIV_CLK,
+ DT_STANDALONE_PHY_PLL1_LINK_CLK,
+ DT_STANDALONE_PHY_PLL1_VCO_DIV_CLK,
+};
+
+enum {
+ P_BI_TCXO,
+ P_SLEEP_CLK,
+ P_DISP_CC_PLL0_OUT_MAIN,
+ P_DISP_CC_PLL1_OUT_EVEN,
+ P_DISP_CC_PLL1_OUT_MAIN,
+ P_DP0_PHY_PLL_LINK_CLK,
+ P_DP0_PHY_PLL_VCO_DIV_CLK,
+ P_DP1_PHY_PLL_LINK_CLK,
+ P_DP1_PHY_PLL_VCO_DIV_CLK,
+ P_DP2_PHY_PLL_LINK_CLK,
+ P_DP2_PHY_PLL_VCO_DIV_CLK,
+ P_DP3_PHY_PLL_LINK_CLK,
+ P_DP3_PHY_PLL_VCO_DIV_CLK,
+ P_DSI0_PHY_PLL_OUT_BYTECLK,
+ P_DSI0_PHY_PLL_OUT_DSICLK,
+ P_DSI1_PHY_PLL_OUT_BYTECLK,
+ P_DSI1_PHY_PLL_OUT_DSICLK,
+ P_STANDALONE_PHY_PLL0_LINK_CLK,
+ P_STANDALONE_PHY_PLL0_VCO_DIV_CLK,
+ P_STANDALONE_PHY_PLL1_LINK_CLK,
+ P_STANDALONE_PHY_PLL1_VCO_DIV_CLK,
+};
+
+static const struct pll_vco taycan_eko_t_vco[] = {
+ { 249600000, 2500000000, 0 },
+};
+
+/* 257.142858 MHz Configuration */
+static const struct alpha_pll_config disp_cc_pll0_config = {
+ .l = 0xd,
+ .alpha = 0x6492,
+ .config_ctl_val = 0x25c400e7,
+ .config_ctl_hi_val = 0x0a8060e0,
+ .config_ctl_hi1_val = 0xf51dea20,
+ .user_ctl_val = 0x00000008,
+ .user_ctl_hi_val = 0x00000002,
+};
+
+static struct clk_alpha_pll disp_cc_pll0 = {
+ .offset = 0x0,
+ .config = &disp_cc_pll0_config,
+ .vco_table = taycan_eko_t_vco,
+ .num_vco = ARRAY_SIZE(taycan_eko_t_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_EKO_T],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_taycan_eko_t_ops,
+ },
+ },
+};
+
+/* 600.0 MHz Configuration */
+static const struct alpha_pll_config disp_cc_pll1_config = {
+ .l = 0x1f,
+ .alpha = 0x4000,
+ .config_ctl_val = 0x25c400e7,
+ .config_ctl_hi_val = 0x0a8060e0,
+ .config_ctl_hi1_val = 0xf51dea20,
+ .user_ctl_val = 0x00000008,
+ .user_ctl_hi_val = 0x00000002,
+};
+
+static struct clk_alpha_pll disp_cc_pll1 = {
+ .offset = 0x1000,
+ .config = &disp_cc_pll1_config,
+ .vco_table = taycan_eko_t_vco,
+ .num_vco = ARRAY_SIZE(taycan_eko_t_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_EKO_T],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_pll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_taycan_eko_t_ops,
+ },
+ },
+};
+
+static const struct parent_map disp_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_STANDALONE_PHY_PLL0_VCO_DIV_CLK, 1 },
+ { P_DP0_PHY_PLL_VCO_DIV_CLK, 2 },
+ { P_DP3_PHY_PLL_VCO_DIV_CLK, 3 },
+ { P_DP1_PHY_PLL_VCO_DIV_CLK, 4 },
+ { P_STANDALONE_PHY_PLL1_VCO_DIV_CLK, 5 },
+ { P_DP2_PHY_PLL_VCO_DIV_CLK, 6 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_STANDALONE_PHY_PLL0_VCO_DIV_CLK },
+ { .index = DT_DP0_PHY_PLL_VCO_DIV_CLK },
+ { .index = DT_DP3_PHY_PLL_VCO_DIV_CLK },
+ { .index = DT_DP1_PHY_PLL_VCO_DIV_CLK },
+ { .index = DT_STANDALONE_PHY_PLL1_VCO_DIV_CLK },
+ { .index = DT_DP2_PHY_PLL_VCO_DIV_CLK },
+};
+
+static const struct parent_map disp_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map disp_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_DSICLK, 1 },
+ { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 },
+ { P_DSI1_PHY_PLL_OUT_DSICLK, 3 },
+ { P_DSI1_PHY_PLL_OUT_BYTECLK, 4 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DSI0_PHY_PLL_OUT_DSICLK },
+ { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK },
+ { .index = DT_DSI1_PHY_PLL_OUT_DSICLK },
+ { .index = DT_DSI1_PHY_PLL_OUT_BYTECLK },
+};
+
+static const struct parent_map disp_cc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_DP0_PHY_PLL_LINK_CLK, 1 },
+ { P_DP1_PHY_PLL_LINK_CLK, 2 },
+ { P_DP2_PHY_PLL_LINK_CLK, 3 },
+ { P_DP3_PHY_PLL_LINK_CLK, 4 },
+ { P_STANDALONE_PHY_PLL1_LINK_CLK, 5 },
+ { P_STANDALONE_PHY_PLL0_LINK_CLK, 6 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DP0_PHY_PLL_LINK_CLK },
+ { .index = DT_DP1_PHY_PLL_LINK_CLK },
+ { .index = DT_DP2_PHY_PLL_LINK_CLK },
+ { .index = DT_DP3_PHY_PLL_LINK_CLK },
+ { .index = DT_STANDALONE_PHY_PLL1_LINK_CLK },
+ { .index = DT_STANDALONE_PHY_PLL0_LINK_CLK },
+};
+
+static const struct parent_map disp_cc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_DSICLK, 1 },
+ { P_DSI1_PHY_PLL_OUT_DSICLK, 3 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DSI0_PHY_PLL_OUT_DSICLK },
+ { .index = DT_DSI1_PHY_PLL_OUT_DSICLK },
+};
+
+static const struct parent_map disp_cc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 },
+ { P_DSI1_PHY_PLL_OUT_BYTECLK, 4 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK },
+ { .index = DT_DSI1_PHY_PLL_OUT_BYTECLK },
+};
+
+static const struct parent_map disp_cc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_DISP_CC_PLL1_OUT_MAIN, 4 },
+ { P_DISP_CC_PLL1_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_6[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &disp_cc_pll1.clkr.hw },
+ { .hw = &disp_cc_pll1.clkr.hw },
+};
+
+static const struct parent_map disp_cc_parent_map_7[] = {
+ { P_BI_TCXO, 0 },
+ { P_DISP_CC_PLL0_OUT_MAIN, 1 },
+ { P_DISP_CC_PLL1_OUT_MAIN, 4 },
+ { P_DISP_CC_PLL1_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_7[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &disp_cc_pll0.clkr.hw },
+ { .hw = &disp_cc_pll1.clkr.hw },
+ { .hw = &disp_cc_pll1.clkr.hw },
+};
+
+static const struct parent_map disp_cc_parent_map_8[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_8[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map disp_cc_parent_map_9[] = {
+ { P_SLEEP_CLK, 0 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_9[] = {
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct freq_tbl ftbl_disp_cc_esync0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_esync0_clk_src = {
+ .cmd_rcgr = 0x80c0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_4,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_esync0_clk_src",
+ .parent_data = disp_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_esync1_clk_src = {
+ .cmd_rcgr = 0x80d8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_4,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_esync1_clk_src",
+ .parent_data = disp_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(37500000, P_DISP_CC_PLL1_OUT_MAIN, 16, 0, 0),
+ F(75000000, P_DISP_CC_PLL1_OUT_MAIN, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_ahb_clk_src = {
+ .cmd_rcgr = 0x8360,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_6,
+ .freq_tbl = ftbl_disp_cc_mdss_ahb_clk_src,
+ .hw_clk_ctrl = true,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_ahb_clk_src",
+ .parent_data = disp_cc_parent_data_6,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_byte0_clk_src = {
+ .cmd_rcgr = 0x8180,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_byte1_clk_src = {
+ .cmd_rcgr = 0x819c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte1_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_aux_clk_src = {
+ .cmd_rcgr = 0x8234,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_aux_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_link_clk_src = {
+ .cmd_rcgr = 0x81e8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_pixel0_clk_src = {
+ .cmd_rcgr = 0x8204,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel0_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_pixel1_clk_src = {
+ .cmd_rcgr = 0x821c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel1_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx1_aux_clk_src = {
+ .cmd_rcgr = 0x8298,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_aux_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx1_link_clk_src = {
+ .cmd_rcgr = 0x827c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_link_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx1_pixel0_clk_src = {
+ .cmd_rcgr = 0x824c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_pixel0_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx1_pixel1_clk_src = {
+ .cmd_rcgr = 0x8264,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_pixel1_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx2_aux_clk_src = {
+ .cmd_rcgr = 0x82fc,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_aux_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx2_link_clk_src = {
+ .cmd_rcgr = 0x82b0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_link_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx2_pixel0_clk_src = {
+ .cmd_rcgr = 0x82cc,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_pixel0_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx2_pixel1_clk_src = {
+ .cmd_rcgr = 0x82e4,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_pixel1_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx3_aux_clk_src = {
+ .cmd_rcgr = 0x8348,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_aux_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx3_link_clk_src = {
+ .cmd_rcgr = 0x832c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_link_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx3_pixel0_clk_src = {
+ .cmd_rcgr = 0x8314,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_pixel0_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_esc0_clk_src = {
+ .cmd_rcgr = 0x81b8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_5,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_esc0_clk_src",
+ .parent_data = disp_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_esc1_clk_src = {
+ .cmd_rcgr = 0x81d0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_5,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_esc1_clk_src",
+ .parent_data = disp_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(85714286, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(100000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(150000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(156000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(205000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(337000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(417000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(532000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(600000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(660000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(717000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_mdp_clk_src = {
+ .cmd_rcgr = 0x8150,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_7,
+ .freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src,
+ .hw_clk_ctrl = true,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_clk_src",
+ .parent_data = disp_cc_parent_data_7,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_7),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_pclk0_clk_src = {
+ .cmd_rcgr = 0x8108,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk0_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_pixel_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_pclk1_clk_src = {
+ .cmd_rcgr = 0x8120,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk1_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_pixel_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_pclk2_clk_src = {
+ .cmd_rcgr = 0x8138,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk2_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_pixel_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_vsync_clk_src = {
+ .cmd_rcgr = 0x8168,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_vsync_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_osc_clk_src = {
+ .cmd_rcgr = 0x80f0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_8,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_osc_clk_src",
+ .parent_data = disp_cc_parent_data_8,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_8),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_sleep_clk_src[] = {
+ F(32000, P_SLEEP_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_sleep_clk_src = {
+ .cmd_rcgr = 0xe064,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_9,
+ .freq_tbl = ftbl_disp_cc_sleep_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_sleep_clk_src",
+ .parent_data = disp_cc_parent_data_9,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_9),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_xo_clk_src = {
+ .cmd_rcgr = 0xe044,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_xo_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_byte0_div_clk_src = {
+ .reg = 0x8198,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_byte1_div_clk_src = {
+ .reg = 0x81b4,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte1_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx0_link_div_clk_src = {
+ .reg = 0x8200,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx0_link_dpin_div_clk_src = {
+ .reg = 0x838c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_dpin_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx1_link_div_clk_src = {
+ .reg = 0x8294,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_link_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx1_link_dpin_div_clk_src = {
+ .reg = 0x8390,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_link_dpin_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx2_link_div_clk_src = {
+ .reg = 0x82c8,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_link_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx2_link_dpin_div_clk_src = {
+ .reg = 0x8394,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_link_dpin_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx3_link_div_clk_src = {
+ .reg = 0x8344,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_link_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx3_link_dpin_div_clk_src = {
+ .reg = 0x8398,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_link_dpin_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch disp_cc_esync0_clk = {
+ .halt_reg = 0x80b8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80b8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_esync0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_esync0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_esync1_clk = {
+ .halt_reg = 0x80bc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_esync1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_esync1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_accu_shift_clk = {
+ .halt_reg = 0xe060,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xe060,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_accu_shift_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_ahb1_clk = {
+ .halt_reg = 0xa028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_ahb1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_ahb_clk = {
+ .halt_reg = 0x80b0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80b0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte0_clk = {
+ .halt_reg = 0x8034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte0_intf_clk = {
+ .halt_reg = 0x8038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte0_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte1_clk = {
+ .halt_reg = 0x803c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x803c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte1_intf_clk = {
+ .halt_reg = 0x8040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte1_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte1_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_aux_clk = {
+ .halt_reg = 0x8064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_link_clk = {
+ .halt_reg = 0x804c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x804c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_link_dpin_clk = {
+ .halt_reg = 0x837c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x837c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_dpin_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_dpin_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_link_intf_clk = {
+ .halt_reg = 0x8054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_pixel0_clk = {
+ .halt_reg = 0x805c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x805c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_pixel0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_pixel1_clk = {
+ .halt_reg = 0x8060,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8060,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_pixel1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_usb_router_link_intf_clk = {
+ .halt_reg = 0x8050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8050,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_usb_router_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_aux_clk = {
+ .halt_reg = 0x8080,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8080,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_link_clk = {
+ .halt_reg = 0x8070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8070,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_link_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_link_dpin_clk = {
+ .halt_reg = 0x8380,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8380,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_link_dpin_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_link_dpin_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_link_intf_clk = {
+ .halt_reg = 0x8078,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8078,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_pixel0_clk = {
+ .halt_reg = 0x8068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_pixel0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_pixel0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_pixel1_clk = {
+ .halt_reg = 0x806c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x806c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_pixel1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_pixel1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_usb_router_link_intf_clk = {
+ .halt_reg = 0x8074,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8074,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_usb_router_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_aux_clk = {
+ .halt_reg = 0x8098,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8098,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_link_clk = {
+ .halt_reg = 0x808c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x808c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_link_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_link_dpin_clk = {
+ .halt_reg = 0x8384,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8384,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_link_dpin_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_link_dpin_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_link_intf_clk = {
+ .halt_reg = 0x8090,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8090,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_pixel0_clk = {
+ .halt_reg = 0x8084,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8084,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_pixel0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_pixel0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_pixel1_clk = {
+ .halt_reg = 0x8088,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8088,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_pixel1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_pixel1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_usb_router_link_intf_clk = {
+ .halt_reg = 0x8378,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8378,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_usb_router_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_aux_clk = {
+ .halt_reg = 0x80a8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_link_clk = {
+ .halt_reg = 0x80a0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80a0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_link_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_link_dpin_clk = {
+ .halt_reg = 0x8388,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8388,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_link_dpin_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_link_dpin_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_link_intf_clk = {
+ .halt_reg = 0x80a4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_pixel0_clk = {
+ .halt_reg = 0x809c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x809c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_pixel0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_pixel0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_esc0_clk = {
+ .halt_reg = 0x8044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_esc0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_esc0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_esc1_clk = {
+ .halt_reg = 0x8048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_esc1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_esc1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp1_clk = {
+ .halt_reg = 0xa004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_clk = {
+ .halt_reg = 0x8010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_lut1_clk = {
+ .halt_reg = 0xa014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xa014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_lut1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_lut_clk = {
+ .halt_reg = 0x8020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x8020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_lut_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_non_gdsc_ahb_clk = {
+ .halt_reg = 0xc004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xc004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_non_gdsc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_pclk0_clk = {
+ .halt_reg = 0x8004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_pclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_pclk1_clk = {
+ .halt_reg = 0x8008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_pclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_pclk2_clk = {
+ .halt_reg = 0x800c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x800c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_pclk2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rscc_ahb_clk = {
+ .halt_reg = 0xc00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_rscc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rscc_vsync_clk = {
+ .halt_reg = 0xc008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_rscc_vsync_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_vsync1_clk = {
+ .halt_reg = 0xa024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_vsync1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_vsync_clk = {
+ .halt_reg = 0x8030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_vsync_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_osc_clk = {
+ .halt_reg = 0x80b4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80b4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_osc_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_osc_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc disp_cc_mdss_core_gdsc = {
+ .gdscr = 0x9000,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "disp_cc_mdss_core_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc disp_cc_mdss_core_int2_gdsc = {
+ .gdscr = 0xb000,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "disp_cc_mdss_core_int2_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct clk_regmap *disp_cc_glymur_clocks[] = {
+ [DISP_CC_ESYNC0_CLK] = &disp_cc_esync0_clk.clkr,
+ [DISP_CC_ESYNC0_CLK_SRC] = &disp_cc_esync0_clk_src.clkr,
+ [DISP_CC_ESYNC1_CLK] = &disp_cc_esync1_clk.clkr,
+ [DISP_CC_ESYNC1_CLK_SRC] = &disp_cc_esync1_clk_src.clkr,
+ [DISP_CC_MDSS_ACCU_SHIFT_CLK] = &disp_cc_mdss_accu_shift_clk.clkr,
+ [DISP_CC_MDSS_AHB1_CLK] = &disp_cc_mdss_ahb1_clk.clkr,
+ [DISP_CC_MDSS_AHB_CLK] = &disp_cc_mdss_ahb_clk.clkr,
+ [DISP_CC_MDSS_AHB_CLK_SRC] = &disp_cc_mdss_ahb_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_CLK] = &disp_cc_mdss_byte0_clk.clkr,
+ [DISP_CC_MDSS_BYTE0_CLK_SRC] = &disp_cc_mdss_byte0_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_DIV_CLK_SRC] = &disp_cc_mdss_byte0_div_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_INTF_CLK] = &disp_cc_mdss_byte0_intf_clk.clkr,
+ [DISP_CC_MDSS_BYTE1_CLK] = &disp_cc_mdss_byte1_clk.clkr,
+ [DISP_CC_MDSS_BYTE1_CLK_SRC] = &disp_cc_mdss_byte1_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE1_DIV_CLK_SRC] = &disp_cc_mdss_byte1_div_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE1_INTF_CLK] = &disp_cc_mdss_byte1_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_AUX_CLK] = &disp_cc_mdss_dptx0_aux_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_AUX_CLK_SRC] = &disp_cc_mdss_dptx0_aux_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_CLK] = &disp_cc_mdss_dptx0_link_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_CLK_SRC] = &disp_cc_mdss_dptx0_link_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx0_link_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_DPIN_CLK] = &disp_cc_mdss_dptx0_link_dpin_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_DPIN_DIV_CLK_SRC] = &disp_cc_mdss_dptx0_link_dpin_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_INTF_CLK] = &disp_cc_mdss_dptx0_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL0_CLK] = &disp_cc_mdss_dptx0_pixel0_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx0_pixel0_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL1_CLK] = &disp_cc_mdss_dptx0_pixel1_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL1_CLK_SRC] = &disp_cc_mdss_dptx0_pixel1_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK] =
+ &disp_cc_mdss_dptx0_usb_router_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_AUX_CLK] = &disp_cc_mdss_dptx1_aux_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_AUX_CLK_SRC] = &disp_cc_mdss_dptx1_aux_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_LINK_CLK] = &disp_cc_mdss_dptx1_link_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_LINK_CLK_SRC] = &disp_cc_mdss_dptx1_link_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx1_link_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_LINK_DPIN_CLK] = &disp_cc_mdss_dptx1_link_dpin_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_LINK_DPIN_DIV_CLK_SRC] = &disp_cc_mdss_dptx1_link_dpin_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_LINK_INTF_CLK] = &disp_cc_mdss_dptx1_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_PIXEL0_CLK] = &disp_cc_mdss_dptx1_pixel0_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx1_pixel0_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_PIXEL1_CLK] = &disp_cc_mdss_dptx1_pixel1_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_PIXEL1_CLK_SRC] = &disp_cc_mdss_dptx1_pixel1_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_USB_ROUTER_LINK_INTF_CLK] =
+ &disp_cc_mdss_dptx1_usb_router_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_AUX_CLK] = &disp_cc_mdss_dptx2_aux_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_AUX_CLK_SRC] = &disp_cc_mdss_dptx2_aux_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX2_LINK_CLK] = &disp_cc_mdss_dptx2_link_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_LINK_CLK_SRC] = &disp_cc_mdss_dptx2_link_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX2_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx2_link_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX2_LINK_DPIN_CLK] = &disp_cc_mdss_dptx2_link_dpin_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_LINK_DPIN_DIV_CLK_SRC] = &disp_cc_mdss_dptx2_link_dpin_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX2_LINK_INTF_CLK] = &disp_cc_mdss_dptx2_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_PIXEL0_CLK] = &disp_cc_mdss_dptx2_pixel0_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx2_pixel0_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX2_PIXEL1_CLK] = &disp_cc_mdss_dptx2_pixel1_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_PIXEL1_CLK_SRC] = &disp_cc_mdss_dptx2_pixel1_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX2_USB_ROUTER_LINK_INTF_CLK] =
+ &disp_cc_mdss_dptx2_usb_router_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_AUX_CLK] = &disp_cc_mdss_dptx3_aux_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_AUX_CLK_SRC] = &disp_cc_mdss_dptx3_aux_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX3_LINK_CLK] = &disp_cc_mdss_dptx3_link_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_LINK_CLK_SRC] = &disp_cc_mdss_dptx3_link_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX3_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx3_link_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX3_LINK_DPIN_CLK] = &disp_cc_mdss_dptx3_link_dpin_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_LINK_DPIN_DIV_CLK_SRC] = &disp_cc_mdss_dptx3_link_dpin_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX3_LINK_INTF_CLK] = &disp_cc_mdss_dptx3_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_PIXEL0_CLK] = &disp_cc_mdss_dptx3_pixel0_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx3_pixel0_clk_src.clkr,
+ [DISP_CC_MDSS_ESC0_CLK] = &disp_cc_mdss_esc0_clk.clkr,
+ [DISP_CC_MDSS_ESC0_CLK_SRC] = &disp_cc_mdss_esc0_clk_src.clkr,
+ [DISP_CC_MDSS_ESC1_CLK] = &disp_cc_mdss_esc1_clk.clkr,
+ [DISP_CC_MDSS_ESC1_CLK_SRC] = &disp_cc_mdss_esc1_clk_src.clkr,
+ [DISP_CC_MDSS_MDP1_CLK] = &disp_cc_mdss_mdp1_clk.clkr,
+ [DISP_CC_MDSS_MDP_CLK] = &disp_cc_mdss_mdp_clk.clkr,
+ [DISP_CC_MDSS_MDP_CLK_SRC] = &disp_cc_mdss_mdp_clk_src.clkr,
+ [DISP_CC_MDSS_MDP_LUT1_CLK] = &disp_cc_mdss_mdp_lut1_clk.clkr,
+ [DISP_CC_MDSS_MDP_LUT_CLK] = &disp_cc_mdss_mdp_lut_clk.clkr,
+ [DISP_CC_MDSS_NON_GDSC_AHB_CLK] = &disp_cc_mdss_non_gdsc_ahb_clk.clkr,
+ [DISP_CC_MDSS_PCLK0_CLK] = &disp_cc_mdss_pclk0_clk.clkr,
+ [DISP_CC_MDSS_PCLK0_CLK_SRC] = &disp_cc_mdss_pclk0_clk_src.clkr,
+ [DISP_CC_MDSS_PCLK1_CLK] = &disp_cc_mdss_pclk1_clk.clkr,
+ [DISP_CC_MDSS_PCLK1_CLK_SRC] = &disp_cc_mdss_pclk1_clk_src.clkr,
+ [DISP_CC_MDSS_PCLK2_CLK] = &disp_cc_mdss_pclk2_clk.clkr,
+ [DISP_CC_MDSS_PCLK2_CLK_SRC] = &disp_cc_mdss_pclk2_clk_src.clkr,
+ [DISP_CC_MDSS_RSCC_AHB_CLK] = &disp_cc_mdss_rscc_ahb_clk.clkr,
+ [DISP_CC_MDSS_RSCC_VSYNC_CLK] = &disp_cc_mdss_rscc_vsync_clk.clkr,
+ [DISP_CC_MDSS_VSYNC1_CLK] = &disp_cc_mdss_vsync1_clk.clkr,
+ [DISP_CC_MDSS_VSYNC_CLK] = &disp_cc_mdss_vsync_clk.clkr,
+ [DISP_CC_MDSS_VSYNC_CLK_SRC] = &disp_cc_mdss_vsync_clk_src.clkr,
+ [DISP_CC_OSC_CLK] = &disp_cc_osc_clk.clkr,
+ [DISP_CC_OSC_CLK_SRC] = &disp_cc_osc_clk_src.clkr,
+ [DISP_CC_PLL0] = &disp_cc_pll0.clkr,
+ [DISP_CC_PLL1] = &disp_cc_pll1.clkr,
+ [DISP_CC_SLEEP_CLK_SRC] = &disp_cc_sleep_clk_src.clkr,
+ [DISP_CC_XO_CLK_SRC] = &disp_cc_xo_clk_src.clkr,
+};
+
+static struct gdsc *disp_cc_glymur_gdscs[] = {
+ [DISP_CC_MDSS_CORE_GDSC] = &disp_cc_mdss_core_gdsc,
+ [DISP_CC_MDSS_CORE_INT2_GDSC] = &disp_cc_mdss_core_int2_gdsc,
+};
+
+static const struct qcom_reset_map disp_cc_glymur_resets[] = {
+ [DISP_CC_MDSS_CORE_BCR] = { 0x8000 },
+ [DISP_CC_MDSS_CORE_INT2_BCR] = { 0xa000 },
+ [DISP_CC_MDSS_RSCC_BCR] = { 0xc000 },
+};
+
+static struct clk_alpha_pll *disp_cc_glymur_plls[] = {
+ &disp_cc_pll0,
+ &disp_cc_pll1,
+};
+
+static u32 disp_cc_glymur_critical_cbcrs[] = {
+ 0xe07c, /* DISP_CC_SLEEP_CLK */
+ 0xe05c, /* DISP_CC_XO_CLK */
+};
+
+static const struct regmap_config disp_cc_glymur_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x11014,
+ .fast_io = true,
+};
+
+static struct qcom_cc_driver_data disp_cc_glymur_driver_data = {
+ .alpha_plls = disp_cc_glymur_plls,
+ .num_alpha_plls = ARRAY_SIZE(disp_cc_glymur_plls),
+ .clk_cbcrs = disp_cc_glymur_critical_cbcrs,
+ .num_clk_cbcrs = ARRAY_SIZE(disp_cc_glymur_critical_cbcrs),
+};
+
+static const struct qcom_cc_desc disp_cc_glymur_desc = {
+ .config = &disp_cc_glymur_regmap_config,
+ .clks = disp_cc_glymur_clocks,
+ .num_clks = ARRAY_SIZE(disp_cc_glymur_clocks),
+ .resets = disp_cc_glymur_resets,
+ .num_resets = ARRAY_SIZE(disp_cc_glymur_resets),
+ .gdscs = disp_cc_glymur_gdscs,
+ .num_gdscs = ARRAY_SIZE(disp_cc_glymur_gdscs),
+ .use_rpm = true,
+ .driver_data = &disp_cc_glymur_driver_data,
+};
+
+static const struct of_device_id disp_cc_glymur_match_table[] = {
+ { .compatible = "qcom,glymur-dispcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, disp_cc_glymur_match_table);
+
+static int disp_cc_glymur_probe(struct platform_device *pdev)
+{
+ return qcom_cc_probe(pdev, &disp_cc_glymur_desc);
+}
+
+static struct platform_driver disp_cc_glymur_driver = {
+ .probe = disp_cc_glymur_probe,
+ .driver = {
+ .name = "dispcc-glymur",
+ .of_match_table = disp_cc_glymur_match_table,
+ },
+};
+
+module_platform_driver(disp_cc_glymur_driver);
+
+MODULE_DESCRIPTION("QTI DISPCC GLYMUR Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/dispcc-milos.c b/drivers/clk/qcom/dispcc-milos.c
index 602d3a498d33..95b6dd89d9ae 100644
--- a/drivers/clk/qcom/dispcc-milos.c
+++ b/drivers/clk/qcom/dispcc-milos.c
@@ -937,7 +937,7 @@ static struct qcom_cc_driver_data disp_cc_milos_driver_data = {
.clk_regs_configure = disp_cc_milos_clk_regs_configure,
};
-static struct qcom_cc_desc disp_cc_milos_desc = {
+static const struct qcom_cc_desc disp_cc_milos_desc = {
.config = &disp_cc_milos_regmap_config,
.clks = disp_cc_milos_clocks,
.num_clks = ARRAY_SIZE(disp_cc_milos_clocks),
diff --git a/drivers/clk/qcom/dispcc-sc7280.c b/drivers/clk/qcom/dispcc-sc7280.c
index 8bdf57734a3d..465dc06c8712 100644
--- a/drivers/clk/qcom/dispcc-sc7280.c
+++ b/drivers/clk/qcom/dispcc-sc7280.c
@@ -17,6 +17,7 @@
#include "clk-regmap-divider.h"
#include "common.h"
#include "gdsc.h"
+#include "reset.h"
enum {
P_BI_TCXO,
@@ -847,6 +848,11 @@ static struct gdsc *disp_cc_sc7280_gdscs[] = {
[DISP_CC_MDSS_CORE_GDSC] = &disp_cc_mdss_core_gdsc,
};
+static const struct qcom_reset_map disp_cc_sc7280_resets[] = {
+ [DISP_CC_MDSS_CORE_BCR] = { 0x1000 },
+ [DISP_CC_MDSS_RSCC_BCR] = { 0x2000 },
+};
+
static const struct regmap_config disp_cc_sc7280_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
@@ -861,6 +867,8 @@ static const struct qcom_cc_desc disp_cc_sc7280_desc = {
.num_clks = ARRAY_SIZE(disp_cc_sc7280_clocks),
.gdscs = disp_cc_sc7280_gdscs,
.num_gdscs = ARRAY_SIZE(disp_cc_sc7280_gdscs),
+ .resets = disp_cc_sc7280_resets,
+ .num_resets = ARRAY_SIZE(disp_cc_sc7280_resets),
};
static const struct of_device_id disp_cc_sc7280_match_table[] = {
diff --git a/drivers/clk/qcom/gcc-glymur.c b/drivers/clk/qcom/gcc-glymur.c
new file mode 100644
index 000000000000..62059120f972
--- /dev/null
+++ b/drivers/clk/qcom/gcc-glymur.c
@@ -0,0 +1,8616 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025, Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,glymur-gcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "clk-regmap-phy-mux.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO,
+ DT_BI_TCXO_AO,
+ DT_SLEEP_CLK,
+ DT_GCC_USB4_0_PHY_DP0_GMUX_CLK_SRC,
+ DT_GCC_USB4_0_PHY_DP1_GMUX_CLK_SRC,
+ DT_GCC_USB4_0_PHY_PCIE_PIPEGMUX_CLK_SRC,
+ DT_GCC_USB4_0_PHY_PIPEGMUX_CLK_SRC,
+ DT_GCC_USB4_0_PHY_SYS_PIPEGMUX_CLK_SRC,
+ DT_GCC_USB4_1_PHY_DP0_GMUX_2_CLK_SRC,
+ DT_GCC_USB4_1_PHY_DP1_GMUX_2_CLK_SRC,
+ DT_GCC_USB4_1_PHY_PCIE_PIPEGMUX_CLK_SRC,
+ DT_GCC_USB4_1_PHY_PIPEGMUX_CLK_SRC,
+ DT_GCC_USB4_1_PHY_SYS_PIPEGMUX_CLK_SRC,
+ DT_GCC_USB4_2_PHY_DP0_GMUX_CLK_SRC,
+ DT_GCC_USB4_2_PHY_DP1_GMUX_CLK_SRC,
+ DT_GCC_USB4_2_PHY_PCIE_PIPEGMUX_CLK_SRC,
+ DT_GCC_USB4_2_PHY_PIPEGMUX_CLK_SRC,
+ DT_GCC_USB4_2_PHY_SYS_PIPEGMUX_CLK_SRC,
+ DT_PCIE_3A_PIPE_CLK,
+ DT_PCIE_3B_PIPE_CLK,
+ DT_PCIE_4_PIPE_CLK,
+ DT_PCIE_5_PIPE_CLK,
+ DT_PCIE_6_PIPE_CLK,
+ DT_QUSB4PHY_0_GCC_USB4_RX0_CLK,
+ DT_QUSB4PHY_0_GCC_USB4_RX1_CLK,
+ DT_QUSB4PHY_1_GCC_USB4_RX0_CLK,
+ DT_QUSB4PHY_1_GCC_USB4_RX1_CLK,
+ DT_QUSB4PHY_2_GCC_USB4_RX0_CLK,
+ DT_QUSB4PHY_2_GCC_USB4_RX1_CLK,
+ DT_UFS_PHY_RX_SYMBOL_0_CLK,
+ DT_UFS_PHY_RX_SYMBOL_1_CLK,
+ DT_UFS_PHY_TX_SYMBOL_0_CLK,
+ DT_USB3_PHY_0_WRAPPER_GCC_USB30_PIPE_CLK,
+ DT_USB3_PHY_1_WRAPPER_GCC_USB30_PIPE_CLK,
+ DT_USB3_PHY_2_WRAPPER_GCC_USB30_PIPE_CLK,
+ DT_USB3_UNI_PHY_MP_GCC_USB30_PIPE_0_CLK,
+ DT_USB3_UNI_PHY_MP_GCC_USB30_PIPE_1_CLK,
+ DT_USB4_0_PHY_GCC_USB4_PCIE_PIPE_CLK,
+ DT_USB4_0_PHY_GCC_USB4RTR_MAX_PIPE_CLK,
+ DT_USB4_1_PHY_GCC_USB4_PCIE_PIPE_CLK,
+ DT_USB4_1_PHY_GCC_USB4RTR_MAX_PIPE_CLK,
+ DT_USB4_2_PHY_GCC_USB4_PCIE_PIPE_CLK,
+ DT_USB4_2_PHY_GCC_USB4RTR_MAX_PIPE_CLK,
+};
+
+enum {
+ P_BI_TCXO,
+ P_GCC_GPLL0_OUT_EVEN,
+ P_GCC_GPLL0_OUT_MAIN,
+ P_GCC_GPLL14_OUT_EVEN,
+ P_GCC_GPLL14_OUT_MAIN,
+ P_GCC_GPLL1_OUT_MAIN,
+ P_GCC_GPLL4_OUT_MAIN,
+ P_GCC_GPLL5_OUT_MAIN,
+ P_GCC_GPLL7_OUT_MAIN,
+ P_GCC_GPLL8_OUT_MAIN,
+ P_GCC_GPLL9_OUT_MAIN,
+ P_GCC_USB3_PRIM_PHY_PIPE_CLK_SRC,
+ P_GCC_USB3_SEC_PHY_PIPE_CLK_SRC,
+ P_GCC_USB3_TERT_PHY_PIPE_CLK_SRC,
+ P_GCC_USB4_0_PHY_DP0_GMUX_CLK_SRC,
+ P_GCC_USB4_0_PHY_DP1_GMUX_CLK_SRC,
+ P_GCC_USB4_0_PHY_PCIE_PIPEGMUX_CLK_SRC,
+ P_GCC_USB4_0_PHY_PIPEGMUX_CLK_SRC,
+ P_GCC_USB4_0_PHY_SYS_PIPEGMUX_CLK_SRC,
+ P_GCC_USB4_1_PHY_DP0_GMUX_2_CLK_SRC,
+ P_GCC_USB4_1_PHY_DP1_GMUX_2_CLK_SRC,
+ P_GCC_USB4_1_PHY_PCIE_PIPEGMUX_CLK_SRC,
+ P_GCC_USB4_1_PHY_PIPEGMUX_CLK_SRC,
+ P_GCC_USB4_1_PHY_PLL_PIPE_CLK_SRC,
+ P_GCC_USB4_1_PHY_SYS_PIPEGMUX_CLK_SRC,
+ P_GCC_USB4_2_PHY_DP0_GMUX_CLK_SRC,
+ P_GCC_USB4_2_PHY_DP1_GMUX_CLK_SRC,
+ P_GCC_USB4_2_PHY_PCIE_PIPEGMUX_CLK_SRC,
+ P_GCC_USB4_2_PHY_PIPEGMUX_CLK_SRC,
+ P_GCC_USB4_2_PHY_SYS_PIPEGMUX_CLK_SRC,
+ P_PCIE_3A_PIPE_CLK,
+ P_PCIE_3B_PIPE_CLK,
+ P_PCIE_4_PIPE_CLK,
+ P_PCIE_5_PIPE_CLK,
+ P_PCIE_6_PIPE_CLK,
+ P_QUSB4PHY_0_GCC_USB4_RX0_CLK,
+ P_QUSB4PHY_0_GCC_USB4_RX1_CLK,
+ P_QUSB4PHY_1_GCC_USB4_RX0_CLK,
+ P_QUSB4PHY_1_GCC_USB4_RX1_CLK,
+ P_QUSB4PHY_2_GCC_USB4_RX0_CLK,
+ P_QUSB4PHY_2_GCC_USB4_RX1_CLK,
+ P_SLEEP_CLK,
+ P_UFS_PHY_RX_SYMBOL_0_CLK,
+ P_UFS_PHY_RX_SYMBOL_1_CLK,
+ P_UFS_PHY_TX_SYMBOL_0_CLK,
+ P_USB3_PHY_0_WRAPPER_GCC_USB30_PIPE_CLK,
+ P_USB3_PHY_1_WRAPPER_GCC_USB30_PIPE_CLK,
+ P_USB3_PHY_2_WRAPPER_GCC_USB30_PIPE_CLK,
+ P_USB3_UNI_PHY_MP_GCC_USB30_PIPE_0_CLK,
+ P_USB3_UNI_PHY_MP_GCC_USB30_PIPE_1_CLK,
+ P_USB4_0_PHY_GCC_USB4_PCIE_PIPE_CLK,
+ P_USB4_0_PHY_GCC_USB4RTR_MAX_PIPE_CLK,
+ P_USB4_1_PHY_GCC_USB4_PCIE_PIPE_CLK,
+ P_USB4_1_PHY_GCC_USB4RTR_MAX_PIPE_CLK,
+ P_USB4_2_PHY_GCC_USB4_PCIE_PIPE_CLK,
+ P_USB4_2_PHY_GCC_USB4RTR_MAX_PIPE_CLK,
+};
+
+static struct clk_alpha_pll gcc_gpll0 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_EKO_T],
+ .clkr = {
+ .enable_reg = 0x62040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_taycan_eko_t_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gcc_gpll0_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gcc_gpll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_gcc_gpll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_gcc_gpll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_EKO_T],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll0_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_taycan_eko_t_ops,
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll1 = {
+ .offset = 0x1000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_EKO_T],
+ .clkr = {
+ .enable_reg = 0x62040,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_taycan_eko_t_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll14 = {
+ .offset = 0xe000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_EKO_T],
+ .clkr = {
+ .enable_reg = 0x62040,
+ .enable_mask = BIT(14),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll14",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_taycan_eko_t_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gcc_gpll14_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gcc_gpll14_out_even = {
+ .offset = 0xe000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_gcc_gpll14_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_gcc_gpll14_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_EKO_T],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll14_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll14.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_taycan_eko_t_ops,
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll4 = {
+ .offset = 0x4000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_EKO_T],
+ .clkr = {
+ .enable_reg = 0x62040,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll4",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_taycan_eko_t_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll5 = {
+ .offset = 0x5000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_EKO_T],
+ .clkr = {
+ .enable_reg = 0x62040,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll5",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_taycan_eko_t_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll7 = {
+ .offset = 0x7000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_EKO_T],
+ .clkr = {
+ .enable_reg = 0x62040,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll7",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_taycan_eko_t_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll8 = {
+ .offset = 0x8000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_EKO_T],
+ .clkr = {
+ .enable_reg = 0x62040,
+ .enable_mask = BIT(8),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll8",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_taycan_eko_t_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll9 = {
+ .offset = 0x9000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_EKO_T],
+ .clkr = {
+ .enable_reg = 0x62040,
+ .enable_mask = BIT(9),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll9",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_taycan_eko_t_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb3_prim_phy_pipe_clk_src;
+static struct clk_regmap_mux gcc_usb3_sec_phy_pipe_clk_src;
+static struct clk_regmap_mux gcc_usb3_tert_phy_pipe_clk_src;
+
+static struct clk_rcg2 gcc_usb4_1_phy_pll_pipe_clk_src;
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL1_OUT_MAIN, 4 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll1.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_SLEEP_CLK, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL1_OUT_MAIN, 4 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll1.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_SLEEP_CLK, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .index = DT_SLEEP_CLK },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data gcc_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_6[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_7[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL14_OUT_MAIN, 1 },
+ { P_GCC_GPLL14_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_7[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll14.clkr.hw },
+ { .hw = &gcc_gpll14_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_8[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_8[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll4.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_9[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL8_OUT_MAIN, 2 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_9[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll8.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_10[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL7_OUT_MAIN, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_10[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll7.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_11[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL7_OUT_MAIN, 2 },
+ { P_GCC_GPLL8_OUT_MAIN, 3 },
+ { P_SLEEP_CLK, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_11[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll7.clkr.hw },
+ { .hw = &gcc_gpll8.clkr.hw },
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map gcc_parent_map_17[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL9_OUT_MAIN, 2 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_17[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll9.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_18[] = {
+ { P_UFS_PHY_RX_SYMBOL_0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_18[] = {
+ { .index = DT_UFS_PHY_RX_SYMBOL_0_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_19[] = {
+ { P_UFS_PHY_RX_SYMBOL_1_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_19[] = {
+ { .index = DT_UFS_PHY_RX_SYMBOL_1_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_20[] = {
+ { P_UFS_PHY_TX_SYMBOL_0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_20[] = {
+ { .index = DT_UFS_PHY_TX_SYMBOL_0_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_21[] = {
+ { P_GCC_USB3_PRIM_PHY_PIPE_CLK_SRC, 0 },
+ { P_USB4_0_PHY_GCC_USB4RTR_MAX_PIPE_CLK, 1 },
+ { P_GCC_USB4_0_PHY_PIPEGMUX_CLK_SRC, 3 },
+};
+
+static const struct clk_parent_data gcc_parent_data_21[] = {
+ { .hw = &gcc_usb3_prim_phy_pipe_clk_src.clkr.hw },
+ { .index = DT_USB4_0_PHY_GCC_USB4RTR_MAX_PIPE_CLK },
+ { .index = DT_GCC_USB4_0_PHY_PIPEGMUX_CLK_SRC },
+};
+
+static const struct parent_map gcc_parent_map_22[] = {
+ { P_GCC_USB3_SEC_PHY_PIPE_CLK_SRC, 0 },
+ { P_USB4_1_PHY_GCC_USB4RTR_MAX_PIPE_CLK, 1 },
+ { P_GCC_USB4_1_PHY_PLL_PIPE_CLK_SRC, 2 },
+ { P_GCC_USB4_1_PHY_PIPEGMUX_CLK_SRC, 3 },
+};
+
+static const struct clk_parent_data gcc_parent_data_22[] = {
+ { .hw = &gcc_usb3_sec_phy_pipe_clk_src.clkr.hw },
+ { .index = DT_USB4_1_PHY_GCC_USB4RTR_MAX_PIPE_CLK },
+ { .hw = &gcc_usb4_1_phy_pll_pipe_clk_src.clkr.hw },
+ { .index = DT_GCC_USB4_1_PHY_PIPEGMUX_CLK_SRC },
+};
+
+static const struct parent_map gcc_parent_map_23[] = {
+ { P_GCC_USB3_TERT_PHY_PIPE_CLK_SRC, 0 },
+ { P_USB4_2_PHY_GCC_USB4RTR_MAX_PIPE_CLK, 1 },
+ { P_GCC_USB4_2_PHY_PIPEGMUX_CLK_SRC, 3 },
+};
+
+static const struct clk_parent_data gcc_parent_data_23[] = {
+ { .hw = &gcc_usb3_tert_phy_pipe_clk_src.clkr.hw },
+ { .index = DT_USB4_2_PHY_GCC_USB4RTR_MAX_PIPE_CLK },
+ { .index = DT_GCC_USB4_2_PHY_PIPEGMUX_CLK_SRC },
+};
+
+static const struct parent_map gcc_parent_map_24[] = {
+ { P_USB3_UNI_PHY_MP_GCC_USB30_PIPE_0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_24[] = {
+ { .index = DT_USB3_UNI_PHY_MP_GCC_USB30_PIPE_0_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_25[] = {
+ { P_USB3_UNI_PHY_MP_GCC_USB30_PIPE_1_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_25[] = {
+ { .index = DT_USB3_UNI_PHY_MP_GCC_USB30_PIPE_1_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_26[] = {
+ { P_USB3_PHY_0_WRAPPER_GCC_USB30_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_26[] = {
+ { .index = DT_USB3_PHY_0_WRAPPER_GCC_USB30_PIPE_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_27[] = {
+ { P_USB3_PHY_1_WRAPPER_GCC_USB30_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_27[] = {
+ { .index = DT_USB3_PHY_1_WRAPPER_GCC_USB30_PIPE_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_28[] = {
+ { P_USB3_PHY_2_WRAPPER_GCC_USB30_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_28[] = {
+ { .index = DT_USB3_PHY_2_WRAPPER_GCC_USB30_PIPE_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_29[] = {
+ { P_GCC_USB4_0_PHY_DP0_GMUX_CLK_SRC, 0 },
+ { P_USB4_0_PHY_GCC_USB4RTR_MAX_PIPE_CLK, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_29[] = {
+ { .index = DT_GCC_USB4_0_PHY_DP0_GMUX_CLK_SRC },
+ { .index = DT_USB4_0_PHY_GCC_USB4RTR_MAX_PIPE_CLK },
+};
+
+static const struct parent_map gcc_parent_map_30[] = {
+ { P_GCC_USB4_0_PHY_DP1_GMUX_CLK_SRC, 0 },
+ { P_USB4_0_PHY_GCC_USB4RTR_MAX_PIPE_CLK, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_30[] = {
+ { .index = DT_GCC_USB4_0_PHY_DP1_GMUX_CLK_SRC },
+ { .index = DT_USB4_0_PHY_GCC_USB4RTR_MAX_PIPE_CLK },
+};
+
+static const struct parent_map gcc_parent_map_31[] = {
+ { P_USB4_0_PHY_GCC_USB4_PCIE_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_31[] = {
+ { .index = DT_USB4_0_PHY_GCC_USB4_PCIE_PIPE_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_32[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL7_OUT_MAIN, 2 },
+ { P_SLEEP_CLK, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_32[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll7.clkr.hw },
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map gcc_parent_map_33[] = {
+ { P_GCC_USB4_0_PHY_PCIE_PIPEGMUX_CLK_SRC, 0 },
+ { P_USB4_0_PHY_GCC_USB4_PCIE_PIPE_CLK, 1 },
+};
+
+static const struct clk_parent_data gcc_parent_data_33[] = {
+ { .index = DT_GCC_USB4_0_PHY_PCIE_PIPEGMUX_CLK_SRC },
+ { .index = DT_USB4_0_PHY_GCC_USB4_PCIE_PIPE_CLK },
+};
+
+static const struct parent_map gcc_parent_map_34[] = {
+ { P_QUSB4PHY_0_GCC_USB4_RX0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_34[] = {
+ { .index = DT_QUSB4PHY_0_GCC_USB4_RX0_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_35[] = {
+ { P_QUSB4PHY_0_GCC_USB4_RX1_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_35[] = {
+ { .index = DT_QUSB4PHY_0_GCC_USB4_RX1_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_36[] = {
+ { P_GCC_USB4_0_PHY_SYS_PIPEGMUX_CLK_SRC, 0 },
+ { P_USB4_0_PHY_GCC_USB4_PCIE_PIPE_CLK, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_36[] = {
+ { .index = DT_GCC_USB4_0_PHY_SYS_PIPEGMUX_CLK_SRC },
+ { .index = DT_USB4_0_PHY_GCC_USB4_PCIE_PIPE_CLK },
+};
+
+static const struct parent_map gcc_parent_map_37[] = {
+ { P_GCC_USB4_1_PHY_DP0_GMUX_2_CLK_SRC, 0 },
+ { P_USB4_1_PHY_GCC_USB4RTR_MAX_PIPE_CLK, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_37[] = {
+ { .index = DT_GCC_USB4_1_PHY_DP0_GMUX_2_CLK_SRC },
+ { .index = DT_USB4_1_PHY_GCC_USB4RTR_MAX_PIPE_CLK },
+};
+
+static const struct parent_map gcc_parent_map_38[] = {
+ { P_GCC_USB4_1_PHY_DP1_GMUX_2_CLK_SRC, 0 },
+ { P_USB4_1_PHY_GCC_USB4RTR_MAX_PIPE_CLK, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_38[] = {
+ { .index = DT_GCC_USB4_1_PHY_DP1_GMUX_2_CLK_SRC },
+ { .index = DT_USB4_1_PHY_GCC_USB4RTR_MAX_PIPE_CLK },
+};
+
+static const struct parent_map gcc_parent_map_39[] = {
+ { P_USB4_1_PHY_GCC_USB4_PCIE_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_39[] = {
+ { .index = DT_USB4_1_PHY_GCC_USB4_PCIE_PIPE_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_40[] = {
+ { P_GCC_USB4_1_PHY_PCIE_PIPEGMUX_CLK_SRC, 0 },
+ { P_USB4_1_PHY_GCC_USB4_PCIE_PIPE_CLK, 1 },
+};
+
+static const struct clk_parent_data gcc_parent_data_40[] = {
+ { .index = DT_GCC_USB4_1_PHY_PCIE_PIPEGMUX_CLK_SRC },
+ { .index = DT_USB4_1_PHY_GCC_USB4_PCIE_PIPE_CLK },
+};
+
+static const struct parent_map gcc_parent_map_41[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL5_OUT_MAIN, 3 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_41[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll5.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_42[] = {
+ { P_QUSB4PHY_1_GCC_USB4_RX0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_42[] = {
+ { .index = DT_QUSB4PHY_1_GCC_USB4_RX0_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_43[] = {
+ { P_QUSB4PHY_1_GCC_USB4_RX1_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_43[] = {
+ { .index = DT_QUSB4PHY_1_GCC_USB4_RX1_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_44[] = {
+ { P_GCC_USB4_1_PHY_SYS_PIPEGMUX_CLK_SRC, 0 },
+ { P_USB4_1_PHY_GCC_USB4_PCIE_PIPE_CLK, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_44[] = {
+ { .index = DT_GCC_USB4_1_PHY_SYS_PIPEGMUX_CLK_SRC },
+ { .index = DT_USB4_1_PHY_GCC_USB4_PCIE_PIPE_CLK },
+};
+
+static const struct parent_map gcc_parent_map_45[] = {
+ { P_GCC_USB4_2_PHY_DP0_GMUX_CLK_SRC, 0 },
+ { P_USB4_2_PHY_GCC_USB4RTR_MAX_PIPE_CLK, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_45[] = {
+ { .index = DT_GCC_USB4_2_PHY_DP0_GMUX_CLK_SRC },
+ { .index = DT_USB4_2_PHY_GCC_USB4RTR_MAX_PIPE_CLK },
+};
+
+static const struct parent_map gcc_parent_map_46[] = {
+ { P_GCC_USB4_2_PHY_DP1_GMUX_CLK_SRC, 0 },
+ { P_USB4_2_PHY_GCC_USB4RTR_MAX_PIPE_CLK, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_46[] = {
+ { .index = DT_GCC_USB4_2_PHY_DP1_GMUX_CLK_SRC },
+ { .index = DT_USB4_2_PHY_GCC_USB4RTR_MAX_PIPE_CLK },
+};
+
+static const struct parent_map gcc_parent_map_47[] = {
+ { P_USB4_2_PHY_GCC_USB4_PCIE_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_47[] = {
+ { .index = DT_USB4_2_PHY_GCC_USB4_PCIE_PIPE_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_48[] = {
+ { P_GCC_USB4_2_PHY_PCIE_PIPEGMUX_CLK_SRC, 0 },
+ { P_USB4_2_PHY_GCC_USB4_PCIE_PIPE_CLK, 1 },
+};
+
+static const struct clk_parent_data gcc_parent_data_48[] = {
+ { .index = DT_GCC_USB4_2_PHY_PCIE_PIPEGMUX_CLK_SRC },
+ { .index = DT_USB4_2_PHY_GCC_USB4_PCIE_PIPE_CLK },
+};
+
+static const struct parent_map gcc_parent_map_49[] = {
+ { P_QUSB4PHY_2_GCC_USB4_RX0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_49[] = {
+ { .index = DT_QUSB4PHY_2_GCC_USB4_RX0_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_50[] = {
+ { P_QUSB4PHY_2_GCC_USB4_RX1_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_50[] = {
+ { .index = DT_QUSB4PHY_2_GCC_USB4_RX1_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_51[] = {
+ { P_GCC_USB4_2_PHY_SYS_PIPEGMUX_CLK_SRC, 0 },
+ { P_USB4_2_PHY_GCC_USB4_PCIE_PIPE_CLK, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_51[] = {
+ { .index = DT_GCC_USB4_2_PHY_SYS_PIPEGMUX_CLK_SRC },
+ { .index = DT_USB4_2_PHY_GCC_USB4_PCIE_PIPE_CLK },
+};
+
+static struct clk_regmap_phy_mux gcc_pcie_3a_pipe_clk_src = {
+ .reg = 0xdc088,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3a_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_PCIE_3A_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_pcie_3b_pipe_clk_src = {
+ .reg = 0x941b4,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3b_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_PCIE_3B_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_pcie_4_pipe_clk_src = {
+ .reg = 0x881a4,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_PCIE_4_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_pcie_5_pipe_clk_src = {
+ .reg = 0xc309c,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_5_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_PCIE_5_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_pcie_6_pipe_clk_src = {
+ .reg = 0x8a1a4,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_PCIE_6_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_rx_symbol_0_clk_src = {
+ .reg = 0x7706c,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_18,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_0_clk_src",
+ .parent_data = gcc_parent_data_18,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_18),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_rx_symbol_1_clk_src = {
+ .reg = 0x770f0,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_19,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_1_clk_src",
+ .parent_data = gcc_parent_data_19,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_19),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_tx_symbol_0_clk_src = {
+ .reg = 0x7705c,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_20,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_tx_symbol_0_clk_src",
+ .parent_data = gcc_parent_data_20,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_20),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb34_prim_phy_pipe_clk_src = {
+ .reg = 0x2b0b8,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_21,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb34_prim_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_21,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_21),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb34_sec_phy_pipe_clk_src = {
+ .reg = 0x2d0c4,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_22,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb34_sec_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_22,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_22),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb34_tert_phy_pipe_clk_src = {
+ .reg = 0xe00bc,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_23,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb34_tert_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_23,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_23),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb3_mp_phy_pipe_0_clk_src = {
+ .reg = 0x9a07c,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_24,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_mp_phy_pipe_0_clk_src",
+ .parent_data = gcc_parent_data_24,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_24),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb3_mp_phy_pipe_1_clk_src = {
+ .reg = 0x9a084,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_25,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_mp_phy_pipe_1_clk_src",
+ .parent_data = gcc_parent_data_25,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_25),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb3_prim_phy_pipe_clk_src = {
+ .reg = 0x3f08c,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_26,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_26,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_26),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb3_sec_phy_pipe_clk_src = {
+ .reg = 0xe207c,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_27,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_sec_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_27,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_27),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb3_tert_phy_pipe_clk_src = {
+ .reg = 0xe107c,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_28,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_tert_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_28,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_28),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_0_phy_dp0_clk_src = {
+ .reg = 0x2b080,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_29,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_dp0_clk_src",
+ .parent_data = gcc_parent_data_29,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_29),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_0_phy_dp1_clk_src = {
+ .reg = 0x2b134,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_30,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_dp1_clk_src",
+ .parent_data = gcc_parent_data_30,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_30),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_0_phy_p2rr2p_pipe_clk_src = {
+ .reg = 0x2b0f0,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_31,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_p2rr2p_pipe_clk_src",
+ .parent_data = gcc_parent_data_31,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_31),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_0_phy_pcie_pipe_mux_clk_src = {
+ .reg = 0x2b120,
+ .shift = 0,
+ .width = 1,
+ .parent_map = gcc_parent_map_33,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_pcie_pipe_mux_clk_src",
+ .parent_data = gcc_parent_data_33,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_33),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_0_phy_rx0_clk_src = {
+ .reg = 0x2b0c0,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_34,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_rx0_clk_src",
+ .parent_data = gcc_parent_data_34,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_34),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_0_phy_rx1_clk_src = {
+ .reg = 0x2b0d4,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_35,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_rx1_clk_src",
+ .parent_data = gcc_parent_data_35,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_35),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_0_phy_sys_clk_src = {
+ .reg = 0x2b100,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_36,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_sys_clk_src",
+ .parent_data = gcc_parent_data_36,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_36),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_1_phy_dp0_clk_src = {
+ .reg = 0x2d08c,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_37,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_dp0_clk_src",
+ .parent_data = gcc_parent_data_37,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_37),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_1_phy_dp1_clk_src = {
+ .reg = 0x2d154,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_38,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_dp1_clk_src",
+ .parent_data = gcc_parent_data_38,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_38),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_1_phy_p2rr2p_pipe_clk_src = {
+ .reg = 0x2d114,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_39,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_p2rr2p_pipe_clk_src",
+ .parent_data = gcc_parent_data_39,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_39),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_1_phy_pcie_pipe_mux_clk_src = {
+ .reg = 0x2d140,
+ .shift = 0,
+ .width = 1,
+ .parent_map = gcc_parent_map_40,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_pcie_pipe_mux_clk_src",
+ .parent_data = gcc_parent_data_40,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_40),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_1_phy_rx0_clk_src = {
+ .reg = 0x2d0e4,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_42,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_rx0_clk_src",
+ .parent_data = gcc_parent_data_42,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_42),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_1_phy_rx1_clk_src = {
+ .reg = 0x2d0f8,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_43,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_rx1_clk_src",
+ .parent_data = gcc_parent_data_43,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_43),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_1_phy_sys_clk_src = {
+ .reg = 0x2d124,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_44,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_sys_clk_src",
+ .parent_data = gcc_parent_data_44,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_44),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_2_phy_dp0_clk_src = {
+ .reg = 0xe0084,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_45,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_dp0_clk_src",
+ .parent_data = gcc_parent_data_45,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_45),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_2_phy_dp1_clk_src = {
+ .reg = 0xe013c,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_46,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_dp1_clk_src",
+ .parent_data = gcc_parent_data_46,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_46),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_2_phy_p2rr2p_pipe_clk_src = {
+ .reg = 0xe00f4,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_47,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_p2rr2p_pipe_clk_src",
+ .parent_data = gcc_parent_data_47,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_47),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_2_phy_pcie_pipe_mux_clk_src = {
+ .reg = 0xe0124,
+ .shift = 0,
+ .width = 1,
+ .parent_map = gcc_parent_map_48,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_pcie_pipe_mux_clk_src",
+ .parent_data = gcc_parent_data_48,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_48),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_2_phy_rx0_clk_src = {
+ .reg = 0xe00c4,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_49,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_rx0_clk_src",
+ .parent_data = gcc_parent_data_49,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_49),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_2_phy_rx1_clk_src = {
+ .reg = 0xe00d8,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_50,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_rx1_clk_src",
+ .parent_data = gcc_parent_data_50,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_50),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_2_phy_sys_clk_src = {
+ .reg = 0xe0104,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_51,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_sys_clk_src",
+ .parent_data = gcc_parent_data_51,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_51),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
+ F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_gp1_clk_src = {
+ .cmd_rcgr = 0x64004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp1_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp2_clk_src = {
+ .cmd_rcgr = 0x92004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp2_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp3_clk_src = {
+ .cmd_rcgr = 0x93004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp3_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_aux_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_0_aux_clk_src = {
+ .cmd_rcgr = 0xc8168,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_phy_rchng_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_0_phy_rchng_clk_src = {
+ .cmd_rcgr = 0xc803c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_1_aux_clk_src = {
+ .cmd_rcgr = 0x2e168,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_1_phy_rchng_clk_src = {
+ .cmd_rcgr = 0x2e03c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_2_aux_clk_src = {
+ .cmd_rcgr = 0xc0168,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_2_phy_rchng_clk_src = {
+ .cmd_rcgr = 0xc003c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_3a_aux_clk_src = {
+ .cmd_rcgr = 0xdc08c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3a_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_3a_phy_rchng_clk_src = {
+ .cmd_rcgr = 0xdc070,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3a_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_3b_aux_clk_src = {
+ .cmd_rcgr = 0x941b8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3b_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_3b_phy_rchng_clk_src = {
+ .cmd_rcgr = 0x94088,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3b_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_4_aux_clk_src = {
+ .cmd_rcgr = 0x881a8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_4_phy_rchng_clk_src = {
+ .cmd_rcgr = 0x88078,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_5_aux_clk_src = {
+ .cmd_rcgr = 0xc30a0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_5_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_5_phy_rchng_clk_src = {
+ .cmd_rcgr = 0xc3084,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_5_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_6_aux_clk_src = {
+ .cmd_rcgr = 0x8a1a8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_6_phy_rchng_clk_src = {
+ .cmd_rcgr = 0x8a078,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_phy_3a_aux_clk_src = {
+ .cmd_rcgr = 0x6c01c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_phy_3a_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_phy_3b_aux_clk_src = {
+ .cmd_rcgr = 0x7501c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_phy_3b_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_phy_4_aux_clk_src = {
+ .cmd_rcgr = 0xd3018,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_phy_4_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_phy_5_aux_clk_src = {
+ .cmd_rcgr = 0xd2018,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_phy_5_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_phy_6_aux_clk_src = {
+ .cmd_rcgr = 0xd4018,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_phy_6_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = {
+ F(60000000, P_GCC_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pdm2_clk_src = {
+ .cmd_rcgr = 0x33010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pdm2_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_oob_qspi_s0_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(120000000, P_GCC_GPLL0_OUT_MAIN, 5, 0, 0),
+ F(150000000, P_GCC_GPLL0_OUT_EVEN, 2, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(403000000, P_GCC_GPLL4_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_oob_qspi_s0_clk_src_init = {
+ .name = "gcc_qupv3_oob_qspi_s0_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_oob_qspi_s0_clk_src = {
+ .cmd_rcgr = 0xe7044,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_qupv3_oob_qspi_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_oob_qspi_s0_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_oob_qspi_s1_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(120000000, P_GCC_GPLL0_OUT_MAIN, 5, 0, 0),
+ F(150000000, P_GCC_GPLL0_OUT_EVEN, 2, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_oob_qspi_s1_clk_src_init = {
+ .name = "gcc_qupv3_oob_qspi_s1_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_oob_qspi_s1_clk_src = {
+ .cmd_rcgr = 0xe7170,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_oob_qspi_s1_clk_src,
+ .clkr.hw.init = &gcc_qupv3_oob_qspi_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_qspi_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_qspi_s2_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_qspi_s2_clk_src = {
+ .cmd_rcgr = 0x287a0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_oob_qspi_s1_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_qspi_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_qspi_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_qspi_s3_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_qspi_s3_clk_src = {
+ .cmd_rcgr = 0x288d0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_oob_qspi_s1_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_qspi_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_qspi_s6_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_qspi_s6_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_qspi_s6_clk_src = {
+ .cmd_rcgr = 0x2866c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_oob_qspi_s1_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_qspi_s6_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(120000000, P_GCC_GPLL0_OUT_MAIN, 5, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s0_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
+ .cmd_rcgr = 0x28014,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s1_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
+ .cmd_rcgr = 0x28150,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s4_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s4_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
+ .cmd_rcgr = 0x282b4,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s4_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s5_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
+ .cmd_rcgr = 0x283f0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s4_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s7_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s7_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s7_clk_src = {
+ .cmd_rcgr = 0x28540,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s4_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s7_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_qspi_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_qspi_s2_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_qspi_s2_clk_src = {
+ .cmd_rcgr = 0xb37a0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_oob_qspi_s1_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_qspi_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_qspi_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_qspi_s3_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_qspi_s3_clk_src = {
+ .cmd_rcgr = 0xb38d0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_oob_qspi_s1_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_qspi_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_qspi_s6_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_qspi_s6_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_qspi_s6_clk_src = {
+ .cmd_rcgr = 0xb366c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_oob_qspi_s1_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_qspi_s6_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s0_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
+ .cmd_rcgr = 0xb3014,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s1_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
+ .cmd_rcgr = 0xb3150,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s4_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
+ .cmd_rcgr = 0xb32b4,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s4_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s5_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
+ .cmd_rcgr = 0xb33f0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s4_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s7_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s7_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s7_clk_src = {
+ .cmd_rcgr = 0xb3540,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s4_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s7_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_qspi_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_qspi_s2_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_qspi_s2_clk_src = {
+ .cmd_rcgr = 0xb47a0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_oob_qspi_s1_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_qspi_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_qspi_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_qspi_s3_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_qspi_s3_clk_src = {
+ .cmd_rcgr = 0xb48d0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_oob_qspi_s1_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_qspi_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_qspi_s6_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_qspi_s6_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_qspi_s6_clk_src = {
+ .cmd_rcgr = 0xb466c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_oob_qspi_s1_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_qspi_s6_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s0_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s0_clk_src = {
+ .cmd_rcgr = 0xb4014,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s1_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s1_clk_src = {
+ .cmd_rcgr = 0xb4150,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s4_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s4_clk_src = {
+ .cmd_rcgr = 0xb42b4,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s4_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s5_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s5_clk_src = {
+ .cmd_rcgr = 0xb43f0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s4_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s7_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s7_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s7_clk_src = {
+ .cmd_rcgr = 0xb4540,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s4_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s7_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(202000000, P_GCC_GPLL9_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+ .cmd_rcgr = 0xb001c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_17,
+ .freq_tbl = ftbl_gcc_sdcc2_apps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc2_apps_clk_src",
+ .parent_data = gcc_parent_data_17,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_17),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc4_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(75000000, P_GCC_GPLL0_OUT_MAIN, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc4_apps_clk_src = {
+ .cmd_rcgr = 0xdf01c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_sdcc4_apps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc4_apps_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_axi_clk_src[] = {
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(201500000, P_GCC_GPLL4_OUT_MAIN, 4, 0, 0),
+ F(403000000, P_GCC_GPLL4_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = {
+ .cmd_rcgr = 0x77038,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_ufs_phy_axi_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_axi_clk_src",
+ .parent_data = gcc_parent_data_6,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_ice_core_clk_src[] = {
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(201500000, P_GCC_GPLL4_OUT_MAIN, 4, 0, 0),
+ F(403000000, P_GCC_GPLL4_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_ice_core_clk_src = {
+ .cmd_rcgr = 0x77090,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_ufs_phy_ice_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ice_core_clk_src",
+ .parent_data = gcc_parent_data_6,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = {
+ .cmd_rcgr = 0x770c4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = {
+ .cmd_rcgr = 0x770a8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_ufs_phy_ice_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_unipro_core_clk_src",
+ .parent_data = gcc_parent_data_6,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb20_master_clk_src[] = {
+ F(60000000, P_GCC_GPLL14_OUT_MAIN, 10, 0, 0),
+ F(120000000, P_GCC_GPLL14_OUT_MAIN, 5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb20_master_clk_src = {
+ .cmd_rcgr = 0xbc030,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_7,
+ .freq_tbl = ftbl_gcc_usb20_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb20_master_clk_src",
+ .parent_data = gcc_parent_data_7,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_7),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb20_mock_utmi_clk_src = {
+ .cmd_rcgr = 0xbc048,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_7,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb20_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_7,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_7),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_mp_master_clk_src[] = {
+ F(66666667, P_GCC_GPLL0_OUT_EVEN, 4.5, 0, 0),
+ F(133333333, P_GCC_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GCC_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_mp_master_clk_src = {
+ .cmd_rcgr = 0x9a03c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_mp_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_mp_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_mp_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x9a054,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_mp_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
+ .cmd_rcgr = 0x3f04c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_mp_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x3f064,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_sec_master_clk_src = {
+ .cmd_rcgr = 0xe203c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_mp_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_sec_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_sec_mock_utmi_clk_src = {
+ .cmd_rcgr = 0xe2054,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_sec_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_tert_master_clk_src = {
+ .cmd_rcgr = 0xe103c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_mp_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_tert_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_tert_mock_utmi_clk_src = {
+ .cmd_rcgr = 0xe1054,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_tert_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_mp_phy_aux_clk_src = {
+ .cmd_rcgr = 0x9a088,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_mp_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_8),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = {
+ .cmd_rcgr = 0x3f090,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_8),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_sec_phy_aux_clk_src = {
+ .cmd_rcgr = 0xe2080,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_sec_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_8),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_tert_phy_aux_clk_src = {
+ .cmd_rcgr = 0xe1080,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_tert_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_8),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb4_0_master_clk_src[] = {
+ F(85714286, P_GCC_GPLL0_OUT_EVEN, 3.5, 0, 0),
+ F(177666750, P_GCC_GPLL8_OUT_MAIN, 4, 0, 0),
+ F(355333500, P_GCC_GPLL8_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb4_0_master_clk_src = {
+ .cmd_rcgr = 0x2b02c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_9,
+ .freq_tbl = ftbl_gcc_usb4_0_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_master_clk_src",
+ .parent_data = gcc_parent_data_9,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_9),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb4_0_phy_pcie_pipe_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(125000000, P_GCC_GPLL7_OUT_MAIN, 4, 0, 0),
+ F(250000000, P_GCC_GPLL7_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb4_0_phy_pcie_pipe_clk_src = {
+ .cmd_rcgr = 0x2b104,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_32,
+ .freq_tbl = ftbl_gcc_usb4_0_phy_pcie_pipe_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_pcie_pipe_clk_src",
+ .parent_data = gcc_parent_data_32,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_32),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb4_0_sb_if_clk_src = {
+ .cmd_rcgr = 0x2b0a0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_sb_if_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb4_0_tmu_clk_src = {
+ .cmd_rcgr = 0x2b084,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_10,
+ .freq_tbl = ftbl_gcc_usb4_0_phy_pcie_pipe_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_tmu_clk_src",
+ .parent_data = gcc_parent_data_10,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_10),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb4_1_master_clk_src = {
+ .cmd_rcgr = 0x2d02c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_9,
+ .freq_tbl = ftbl_gcc_usb4_0_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_master_clk_src",
+ .parent_data = gcc_parent_data_9,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_9),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb4_1_phy_pcie_pipe_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(177666750, P_GCC_GPLL8_OUT_MAIN, 4, 0, 0),
+ F(355333500, P_GCC_GPLL8_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb4_1_phy_pcie_pipe_clk_src = {
+ .cmd_rcgr = 0x2d128,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_11,
+ .freq_tbl = ftbl_gcc_usb4_1_phy_pcie_pipe_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_pcie_pipe_clk_src",
+ .parent_data = gcc_parent_data_11,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_11),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb4_1_phy_pll_pipe_clk_src[] = {
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(311000000, P_GCC_GPLL5_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb4_1_phy_pll_pipe_clk_src = {
+ .cmd_rcgr = 0x2d0c8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_41,
+ .freq_tbl = ftbl_gcc_usb4_1_phy_pll_pipe_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_pll_pipe_clk_src",
+ .parent_data = gcc_parent_data_41,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_41),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb4_1_sb_if_clk_src = {
+ .cmd_rcgr = 0x2d0ac,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_sb_if_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb4_1_tmu_clk_src = {
+ .cmd_rcgr = 0x2d090,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_10,
+ .freq_tbl = ftbl_gcc_usb4_0_phy_pcie_pipe_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_tmu_clk_src",
+ .parent_data = gcc_parent_data_10,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_10),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb4_2_master_clk_src = {
+ .cmd_rcgr = 0xe002c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_9,
+ .freq_tbl = ftbl_gcc_usb4_0_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_master_clk_src",
+ .parent_data = gcc_parent_data_9,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_9),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb4_2_phy_pcie_pipe_clk_src = {
+ .cmd_rcgr = 0xe0108,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_11,
+ .freq_tbl = ftbl_gcc_usb4_0_phy_pcie_pipe_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_pcie_pipe_clk_src",
+ .parent_data = gcc_parent_data_11,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_11),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb4_2_sb_if_clk_src = {
+ .cmd_rcgr = 0xe00a4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_sb_if_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb4_2_tmu_clk_src = {
+ .cmd_rcgr = 0xe0088,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_10,
+ .freq_tbl = ftbl_gcc_usb4_0_phy_pcie_pipe_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_tmu_clk_src",
+ .parent_data = gcc_parent_data_10,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_10),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_pcie_3b_pipe_div_clk_src = {
+ .reg = 0x94070,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3b_pipe_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_3b_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_pcie_4_pipe_div_clk_src = {
+ .reg = 0x88060,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_pipe_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_4_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_pcie_5_pipe_div_clk_src = {
+ .reg = 0xc306c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_5_pipe_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_5_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_pcie_6_pipe_div_clk_src = {
+ .reg = 0x8a060,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6_pipe_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_6_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_oob_s0_clk_src = {
+ .reg = 0xe7024,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_oob_s0_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_oob_qspi_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_oob_s1_clk_src = {
+ .reg = 0xe7038,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_oob_s1_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_oob_qspi_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_wrap0_s2_clk_src = {
+ .reg = 0x2828c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s2_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_qspi_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_wrap0_s3_clk_src = {
+ .reg = 0x282a0,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s3_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_qspi_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_wrap0_s6_clk_src = {
+ .reg = 0x2852c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s6_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_qspi_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_wrap1_s2_clk_src = {
+ .reg = 0xb328c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s2_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_qspi_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_wrap1_s3_clk_src = {
+ .reg = 0xb32a0,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s3_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_qspi_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_wrap1_s6_clk_src = {
+ .reg = 0xb352c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s6_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_qspi_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_wrap2_s2_clk_src = {
+ .reg = 0xb428c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s2_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_qspi_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_wrap2_s3_clk_src = {
+ .reg = 0xb42a0,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s3_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_qspi_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_wrap2_s6_clk_src = {
+ .reg = 0xb452c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s6_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_qspi_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb20_mock_utmi_postdiv_clk_src = {
+ .reg = 0xbc174,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb20_mock_utmi_postdiv_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb20_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb30_mp_mock_utmi_postdiv_clk_src = {
+ .reg = 0x9a06c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_mp_mock_utmi_postdiv_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_mp_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb30_prim_mock_utmi_postdiv_clk_src = {
+ .reg = 0x3f07c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_postdiv_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb30_sec_mock_utmi_postdiv_clk_src = {
+ .reg = 0xe206c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_sec_mock_utmi_postdiv_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_sec_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb30_tert_mock_utmi_postdiv_clk_src = {
+ .reg = 0xe106c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_tert_mock_utmi_postdiv_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_tert_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_pcie_3a_west_sf_axi_clk = {
+ .halt_reg = 0xdc0bc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_noc_pcie_3a_west_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_pcie_3b_west_sf_axi_clk = {
+ .halt_reg = 0x941ec,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(28),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_noc_pcie_3b_west_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_pcie_4_west_sf_axi_clk = {
+ .halt_reg = 0x881d0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(29),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_noc_pcie_4_west_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_pcie_5_east_sf_axi_clk = {
+ .halt_reg = 0xc30d0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(30),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_noc_pcie_5_east_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_pcie_6_west_sf_axi_clk = {
+ .halt_reg = 0x8a1d0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(31),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_noc_pcie_6_west_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_phy_axi_clk = {
+ .halt_reg = 0x77000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77000,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_ufs_phy_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb2_prim_axi_clk = {
+ .halt_reg = 0xbc17c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xbc17c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xbc17c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb2_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb20_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_mp_axi_clk = {
+ .halt_reg = 0x9a004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9a004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x9a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb3_mp_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_mp_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_prim_axi_clk = {
+ .halt_reg = 0x3f00c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x3f00c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3f00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb3_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_sec_axi_clk = {
+ .halt_reg = 0xe2004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xe2004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xe2004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb3_sec_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_sec_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_tert_axi_clk = {
+ .halt_reg = 0xe1004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xe1004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xe1004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb3_tert_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_tert_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb4_0_axi_clk = {
+ .halt_reg = 0x2b000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2b000,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2b000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb4_0_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb4_1_axi_clk = {
+ .halt_reg = 0x2d000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2d000,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2d000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb4_1_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb4_2_axi_clk = {
+ .halt_reg = 0xe0000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xe0000,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xe0000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb4_2_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_av1e_ahb_clk = {
+ .halt_reg = 0x9b02c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9b02c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x9b02c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_av1e_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_av1e_axi_clk = {
+ .halt_reg = 0x9b030,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x9b030,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x9b030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_av1e_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_av1e_xo_clk = {
+ .halt_reg = 0x9b044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9b044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_av1e_xo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x34038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x34038,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_hf_axi_clk = {
+ .halt_reg = 0x26014,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x26014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_camera_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_sf_axi_clk = {
+ .halt_reg = 0x26028,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x26028,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_camera_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_pcie_anoc_ahb_clk = {
+ .halt_reg = 0x82004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x82004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(19),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_pcie_anoc_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_pcie_anoc_south_ahb_clk = {
+ .halt_reg = 0xba2ec,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba2ec,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_pcie_anoc_south_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb2_prim_axi_clk = {
+ .halt_reg = 0xbc178,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xbc178,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xbc178,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_usb2_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb20_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_mp_axi_clk = {
+ .halt_reg = 0x9a000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9a000,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x9a000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_usb3_mp_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_mp_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = {
+ .halt_reg = 0x3f000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x3f000,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3f000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_usb3_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_sec_axi_clk = {
+ .halt_reg = 0xe2000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xe2000,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xe2000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_usb3_sec_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_sec_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_tert_axi_clk = {
+ .halt_reg = 0xe1000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xe1000,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xe1000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_usb3_tert_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_tert_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb_anoc_ahb_clk = {
+ .halt_reg = 0x3f004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x3f004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(17),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_usb_anoc_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb_anoc_south_ahb_clk = {
+ .halt_reg = 0x3f008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x3f008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(18),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_usb_anoc_south_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_hf_axi_clk = {
+ .halt_reg = 0x27008,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x27008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_disp_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ .flags = CLK_IS_CRITICAL,
+ },
+ },
+};
+
+static struct clk_branch gcc_eva_ahb_clk = {
+ .halt_reg = 0x9b004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9b004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x9b004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_eva_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_eva_axi0_clk = {
+ .halt_reg = 0x9b008,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x9b008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x9b008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_eva_axi0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_eva_axi0c_clk = {
+ .halt_reg = 0x9b01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9b01c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x9b01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_eva_axi0c_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_eva_xo_clk = {
+ .halt_reg = 0x9b024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9b024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_eva_xo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x64000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x64000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x92000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x92000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x93000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x93000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gemnoc_gfx_clk = {
+ .halt_reg = 0x71010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x71010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x71010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_gemnoc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_clk_src = {
+ .halt_reg = 0x71024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x71024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_gpll0_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_div_clk_src = {
+ .halt_reg = 0x7102c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7102c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62038,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_gpll0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0_out_even.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_aux_clk = {
+ .halt_reg = 0xc8018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(25),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
+ .halt_reg = 0xba4a8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba4a8,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(24),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
+ .halt_reg = 0xba498,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0xba498,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(23),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_phy_rchng_clk = {
+ .halt_reg = 0xc8038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_pipe_clk = {
+ .halt_reg = 0xc8028,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(26),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_phy_pcie_pipe_mux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_axi_clk = {
+ .halt_reg = 0xba488,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba488,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_q2a_axi_clk = {
+ .halt_reg = 0xba484,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(21),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_aux_clk = {
+ .halt_reg = 0x2e018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(18),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_1_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_cfg_ahb_clk = {
+ .halt_reg = 0xba480,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba480,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(17),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_mstr_axi_clk = {
+ .halt_reg = 0xba470,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0xba470,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_phy_rchng_clk = {
+ .halt_reg = 0x2e038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_1_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_pipe_clk = {
+ .halt_reg = 0x2e028,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(19),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_phy_pcie_pipe_mux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_slv_axi_clk = {
+ .halt_reg = 0xba460,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba460,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_slv_q2a_axi_clk = {
+ .halt_reg = 0xba45c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(14),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_aux_clk = {
+ .halt_reg = 0xc0018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_2_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_cfg_ahb_clk = {
+ .halt_reg = 0xba4d0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba4d0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(31),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_mstr_axi_clk = {
+ .halt_reg = 0xba4c0,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0xba4c0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(30),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_phy_rchng_clk = {
+ .halt_reg = 0xc0038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(2),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_2_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_pipe_clk = {
+ .halt_reg = 0xc0028,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_phy_pcie_pipe_mux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_slv_axi_clk = {
+ .halt_reg = 0xba4b0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba4b0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(29),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_slv_q2a_axi_clk = {
+ .halt_reg = 0xba4ac,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(28),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3a_aux_clk = {
+ .halt_reg = 0xdc04c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xdc04c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3a_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_3a_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3a_cfg_ahb_clk = {
+ .halt_reg = 0xba4f0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba4f0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3a_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3a_mstr_axi_clk = {
+ .halt_reg = 0xdc038,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0xdc038,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(14),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3a_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3a_phy_rchng_clk = {
+ .halt_reg = 0xdc06c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xdc06c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(18),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3a_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_3a_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3a_pipe_clk = {
+ .halt_reg = 0xdc05c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0xdc05c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(17),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3a_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_3a_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3a_slv_axi_clk = {
+ .halt_reg = 0xdc024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xdc024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(13),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3a_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3a_slv_q2a_axi_clk = {
+ .halt_reg = 0xdc01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xdc01c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(12),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3a_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3b_aux_clk = {
+ .halt_reg = 0x94050,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(25),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3b_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_3b_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3b_cfg_ahb_clk = {
+ .halt_reg = 0xba4f4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba4f4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(24),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3b_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3b_mstr_axi_clk = {
+ .halt_reg = 0x94038,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x94038,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(23),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3b_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3b_phy_rchng_clk = {
+ .halt_reg = 0x94084,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(28),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3b_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_3b_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3b_pipe_clk = {
+ .halt_reg = 0x94060,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(26),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3b_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_3b_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3b_pipe_div2_clk = {
+ .halt_reg = 0x94074,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3b_pipe_div2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_3b_pipe_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3b_slv_axi_clk = {
+ .halt_reg = 0x94024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x94024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3b_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3b_slv_q2a_axi_clk = {
+ .halt_reg = 0x9401c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(21),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3b_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_4_aux_clk = {
+ .halt_reg = 0x88040,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(17),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_4_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_4_cfg_ahb_clk = {
+ .halt_reg = 0xba4fc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba4fc,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_4_mstr_axi_clk = {
+ .halt_reg = 0x88030,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x88030,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_4_phy_rchng_clk = {
+ .halt_reg = 0x88074,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_4_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_4_pipe_clk = {
+ .halt_reg = 0x88050,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(18),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_4_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_4_pipe_div2_clk = {
+ .halt_reg = 0x88064,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(19),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_pipe_div2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_4_pipe_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_4_slv_axi_clk = {
+ .halt_reg = 0x88020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x88020,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(14),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_4_slv_q2a_axi_clk = {
+ .halt_reg = 0x8801c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(13),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_5_aux_clk = {
+ .halt_reg = 0xc304c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_5_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_5_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_5_cfg_ahb_clk = {
+ .halt_reg = 0xba4f8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba4f8,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_5_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_5_mstr_axi_clk = {
+ .halt_reg = 0xc3038,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0xc3038,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_5_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_5_phy_rchng_clk = {
+ .halt_reg = 0xc3080,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(8),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_5_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_5_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_5_pipe_clk = {
+ .halt_reg = 0xc305c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(6),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_5_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_5_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_5_pipe_div2_clk = {
+ .halt_reg = 0xc3070,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_5_pipe_div2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_5_pipe_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_5_slv_axi_clk = {
+ .halt_reg = 0xc3024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xc3024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(2),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_5_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_5_slv_q2a_axi_clk = {
+ .halt_reg = 0xc301c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_5_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_6_aux_clk = {
+ .halt_reg = 0x8a040,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_6_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_6_cfg_ahb_clk = {
+ .halt_reg = 0xba500,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba500,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(26),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_6_mstr_axi_clk = {
+ .halt_reg = 0x8a030,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x8a030,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(25),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_6_phy_rchng_clk = {
+ .halt_reg = 0x8a074,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(30),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_6_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_6_pipe_clk = {
+ .halt_reg = 0x8a050,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(28),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_6_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_6_pipe_div2_clk = {
+ .halt_reg = 0x8a064,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(29),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6_pipe_div2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_6_pipe_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_6_slv_axi_clk = {
+ .halt_reg = 0x8a020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8a020,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(24),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_6_slv_q2a_axi_clk = {
+ .halt_reg = 0x8a01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(23),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_noc_pwrctl_clk = {
+ .halt_reg = 0xba2ac,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_noc_pwrctl_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_noc_qosgen_extref_clk = {
+ .halt_reg = 0xba2a8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(6),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_noc_qosgen_extref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_noc_sf_center_clk = {
+ .halt_reg = 0xba2b0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba2b0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(8),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_noc_sf_center_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_noc_slave_sf_east_clk = {
+ .halt_reg = 0xba2b8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba2b8,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(9),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_noc_slave_sf_east_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_noc_slave_sf_west_clk = {
+ .halt_reg = 0xba2c0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba2c0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_noc_slave_sf_west_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_noc_tsctr_clk = {
+ .halt_reg = 0xba2a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba2a4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_noc_tsctr_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_phy_3a_aux_clk = {
+ .halt_reg = 0x6c038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6c038,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(19),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_phy_3a_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_phy_3a_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_phy_3b_aux_clk = {
+ .halt_reg = 0x75034,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(31),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_phy_3b_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_phy_3b_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_phy_4_aux_clk = {
+ .halt_reg = 0xd3030,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(21),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_phy_4_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_phy_4_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_phy_5_aux_clk = {
+ .halt_reg = 0xd2030,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(11),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_phy_5_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_phy_5_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_phy_6_aux_clk = {
+ .halt_reg = 0xd4030,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(31),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_phy_6_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_phy_6_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_rscc_cfg_ahb_clk = {
+ .halt_reg = 0xb8004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xb8004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62038,
+ .enable_mask = BIT(2),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_rscc_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_rscc_xo_clk = {
+ .halt_reg = 0xb8008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62038,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_rscc_xo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x3300c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pdm2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x33004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x33004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x33004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_xo4_clk = {
+ .halt_reg = 0x33008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x33008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm_xo4_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_av1e_ahb_clk = {
+ .halt_reg = 0x9b048,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9b048,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x9b048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_av1e_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_cmd_ahb_clk = {
+ .halt_reg = 0x26010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x26010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_camera_cmd_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_nrt_ahb_clk = {
+ .halt_reg = 0x26008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x26008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_camera_nrt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_rt_ahb_clk = {
+ .halt_reg = 0x2600c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2600c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2600c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_camera_rt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_gpu_ahb_clk = {
+ .halt_reg = 0x71008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x71008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x71008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_gpu_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_pcie_3a_ahb_clk = {
+ .halt_reg = 0xdc018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xdc018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(11),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_pcie_3a_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_pcie_3b_ahb_clk = {
+ .halt_reg = 0x94018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x94018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_pcie_3b_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_pcie_4_ahb_clk = {
+ .halt_reg = 0x88018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x88018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(12),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_pcie_4_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_pcie_5_ahb_clk = {
+ .halt_reg = 0xc3018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xc3018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_pcie_5_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_pcie_6_ahb_clk = {
+ .halt_reg = 0x8a018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8a018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_pcie_6_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_cv_cpu_ahb_clk = {
+ .halt_reg = 0x32018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x32018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_cv_cpu_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_cvp_ahb_clk = {
+ .halt_reg = 0x32008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x32008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_cvp_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_v_cpu_ahb_clk = {
+ .halt_reg = 0x32014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x32014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_v_cpu_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_vcodec1_ahb_clk = {
+ .halt_reg = 0x32010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x32010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_vcodec1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_vcodec_ahb_clk = {
+ .halt_reg = 0x3200c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x3200c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3200c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_vcodec_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_oob_core_2x_clk = {
+ .halt_reg = 0xc5040,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_oob_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_oob_core_clk = {
+ .halt_reg = 0xc502c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_oob_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_oob_m_ahb_clk = {
+ .halt_reg = 0xe7004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xe7004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xe7004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_oob_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_oob_qspi_s0_clk = {
+ .halt_reg = 0xe7040,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(9),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_oob_qspi_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_oob_qspi_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_oob_qspi_s1_clk = {
+ .halt_reg = 0xe729c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_oob_qspi_s1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_oob_qspi_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_oob_s0_clk = {
+ .halt_reg = 0xe7014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(6),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_oob_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_oob_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_oob_s1_clk = {
+ .halt_reg = 0xe7028,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_oob_s1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_oob_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_oob_s_ahb_clk = {
+ .halt_reg = 0xc5028,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xc5028,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_oob_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_oob_tcxo_clk = {
+ .halt_reg = 0xe703c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(8),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_oob_tcxo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_2x_clk = {
+ .halt_reg = 0xc5448,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(12),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_clk = {
+ .halt_reg = 0xc5434,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(11),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_qspi_s2_clk = {
+ .halt_reg = 0x2879c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_qspi_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_qspi_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_qspi_s3_clk = {
+ .halt_reg = 0x288cc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(23),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_qspi_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_qspi_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_qspi_s6_clk = {
+ .halt_reg = 0x28798,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(21),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_qspi_s6_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_qspi_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s0_clk = {
+ .halt_reg = 0x28004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(13),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s1_clk = {
+ .halt_reg = 0x28140,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(14),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s2_clk = {
+ .halt_reg = 0x2827c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s3_clk = {
+ .halt_reg = 0x28290,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s4_clk = {
+ .halt_reg = 0x282a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(17),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s5_clk = {
+ .halt_reg = 0x283e0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(18),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s6_clk = {
+ .halt_reg = 0x2851c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(19),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s6_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s7_clk = {
+ .halt_reg = 0x28530,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s7_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s7_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_2x_clk = {
+ .halt_reg = 0xc5198,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(14),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_clk = {
+ .halt_reg = 0xc5184,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(13),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_qspi_s2_clk = {
+ .halt_reg = 0xb379c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(24),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_qspi_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_qspi_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_qspi_s3_clk = {
+ .halt_reg = 0xb38cc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(25),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_qspi_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_qspi_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_qspi_s6_clk = {
+ .halt_reg = 0xb3798,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(23),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_qspi_s6_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_qspi_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s0_clk = {
+ .halt_reg = 0xb3004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s1_clk = {
+ .halt_reg = 0xb3140,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s2_clk = {
+ .halt_reg = 0xb327c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(17),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s3_clk = {
+ .halt_reg = 0xb3290,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(18),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s4_clk = {
+ .halt_reg = 0xb32a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(19),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s5_clk = {
+ .halt_reg = 0xb33e0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s6_clk = {
+ .halt_reg = 0xb351c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(21),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s6_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s7_clk = {
+ .halt_reg = 0xb3530,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s7_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s7_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_core_2x_clk = {
+ .halt_reg = 0xc52f0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(29),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_core_clk = {
+ .halt_reg = 0xc52dc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(28),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_qspi_s2_clk = {
+ .halt_reg = 0xb479c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_qspi_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_qspi_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_qspi_s3_clk = {
+ .halt_reg = 0xb48cc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(8),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_qspi_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_qspi_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_qspi_s6_clk = {
+ .halt_reg = 0xb4798,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(6),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_qspi_s6_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_qspi_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s0_clk = {
+ .halt_reg = 0xb4004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(30),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s1_clk = {
+ .halt_reg = 0xb4140,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(31),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s2_clk = {
+ .halt_reg = 0xb427c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s3_clk = {
+ .halt_reg = 0xb4290,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s4_clk = {
+ .halt_reg = 0xb42a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(2),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s5_clk = {
+ .halt_reg = 0xb43e0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s6_clk = {
+ .halt_reg = 0xb451c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s6_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s7_clk = {
+ .halt_reg = 0xb4530,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s7_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s7_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_m_ahb_clk = {
+ .halt_reg = 0xc542c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xc542c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(9),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_0_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_s_ahb_clk = {
+ .halt_reg = 0xc5430,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xc5430,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_0_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_m_ahb_clk = {
+ .halt_reg = 0xc517c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xc517c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(11),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_1_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_s_ahb_clk = {
+ .halt_reg = 0xc5180,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xc5180,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(12),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_1_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_2_m_ahb_clk = {
+ .halt_reg = 0xc52d4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xc52d4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(26),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_2_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_2_s_ahb_clk = {
+ .halt_reg = 0xc52d8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xc52d8,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_2_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .halt_reg = 0xb0014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb0014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+ .halt_reg = 0xb0004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb0004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc2_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc2_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_ahb_clk = {
+ .halt_reg = 0xdf014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xdf014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc4_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_apps_clk = {
+ .halt_reg = 0xdf004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xdf004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc4_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc4_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ahb_clk = {
+ .halt_reg = 0xba504,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba504,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xba504,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_axi_clk = {
+ .halt_reg = 0x7701c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7701c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7701c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ice_core_clk = {
+ .halt_reg = 0x77080,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77080,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77080,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ice_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_phy_aux_clk = {
+ .halt_reg = 0x770c0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x770c0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x770c0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_0_clk = {
+ .halt_reg = 0x77034,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x77034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_rx_symbol_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_1_clk = {
+ .halt_reg = 0x770dc,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x770dc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_rx_symbol_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_tx_symbol_0_clk = {
+ .halt_reg = 0x77030,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x77030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_tx_symbol_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_tx_symbol_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_unipro_core_clk = {
+ .halt_reg = 0x77070,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77070,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77070,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_unipro_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_unipro_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb20_master_clk = {
+ .halt_reg = 0xbc018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xbc018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb20_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb20_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb20_mock_utmi_clk = {
+ .halt_reg = 0xbc02c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xbc02c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb20_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb20_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb20_sleep_clk = {
+ .halt_reg = 0xbc028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xbc028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb20_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_mp_master_clk = {
+ .halt_reg = 0x9a024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9a024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_mp_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_mp_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_mp_mock_utmi_clk = {
+ .halt_reg = 0x9a038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9a038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_mp_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_mp_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_mp_sleep_clk = {
+ .halt_reg = 0x9a034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9a034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_mp_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_master_clk = {
+ .halt_reg = 0x3f030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3f030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_mock_utmi_clk = {
+ .halt_reg = 0x3f048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3f048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_sleep_clk = {
+ .halt_reg = 0x3f044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3f044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sec_master_clk = {
+ .halt_reg = 0xe2024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe2024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_sec_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_sec_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sec_mock_utmi_clk = {
+ .halt_reg = 0xe2038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe2038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_sec_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_sec_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sec_sleep_clk = {
+ .halt_reg = 0xe2034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe2034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_sec_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_tert_master_clk = {
+ .halt_reg = 0xe1024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe1024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_tert_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_tert_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_tert_mock_utmi_clk = {
+ .halt_reg = 0xe1038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe1038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_tert_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_tert_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_tert_sleep_clk = {
+ .halt_reg = 0xe1034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe1034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_tert_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_mp_phy_aux_clk = {
+ .halt_reg = 0x9a070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9a070,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_mp_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_mp_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_mp_phy_com_aux_clk = {
+ .halt_reg = 0x9a074,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9a074,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_mp_phy_com_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_mp_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_mp_phy_pipe_0_clk = {
+ .halt_reg = 0x9a078,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x9a078,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_mp_phy_pipe_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_mp_phy_pipe_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_mp_phy_pipe_1_clk = {
+ .halt_reg = 0x9a080,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x9a080,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_mp_phy_pipe_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_mp_phy_pipe_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_aux_clk = {
+ .halt_reg = 0x3f080,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3f080,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = {
+ .halt_reg = 0x3f084,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3f084,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_com_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_pipe_clk = {
+ .halt_reg = 0x3f088,
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0x3f088,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3f088,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb34_prim_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_phy_aux_clk = {
+ .halt_reg = 0xe2070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe2070,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_sec_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_sec_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_phy_com_aux_clk = {
+ .halt_reg = 0xe2074,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe2074,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_sec_phy_com_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_sec_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_phy_pipe_clk = {
+ .halt_reg = 0xe2078,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xe2078,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xe2078,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_sec_phy_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb34_sec_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_tert_phy_aux_clk = {
+ .halt_reg = 0xe1070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe1070,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_tert_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_tert_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_tert_phy_com_aux_clk = {
+ .halt_reg = 0xe1074,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe1074,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_tert_phy_com_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_tert_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_tert_phy_pipe_clk = {
+ .halt_reg = 0xe1078,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xe1078,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xe1078,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_tert_phy_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb34_tert_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_cfg_ahb_clk = {
+ .halt_reg = 0xba450,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba450,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xba450,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_dp0_clk = {
+ .halt_reg = 0x2b070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2b070,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_dp0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_phy_dp0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_dp1_clk = {
+ .halt_reg = 0x2b124,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2b124,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_dp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_phy_dp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_master_clk = {
+ .halt_reg = 0x2b01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2b01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_phy_p2rr2p_pipe_clk = {
+ .halt_reg = 0x2b0f4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2b0f4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_p2rr2p_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_phy_p2rr2p_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_phy_pcie_pipe_clk = {
+ .halt_reg = 0x2b04c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(11),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_pcie_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_phy_pcie_pipe_mux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_phy_rx0_clk = {
+ .halt_reg = 0x2b0c4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2b0c4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_rx0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_phy_rx0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_phy_rx1_clk = {
+ .halt_reg = 0x2b0d8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2b0d8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_rx1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_phy_rx1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_phy_usb_pipe_clk = {
+ .halt_reg = 0x2b0bc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2b0bc,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2b0bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_usb_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb34_prim_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_sb_if_clk = {
+ .halt_reg = 0x2b048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2b048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_sb_if_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_sb_if_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_sys_clk = {
+ .halt_reg = 0x2b05c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2b05c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_sys_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_phy_sys_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_tmu_clk = {
+ .halt_reg = 0x2b09c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2b09c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2b09c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_tmu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_tmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_uc_hrr_clk = {
+ .halt_reg = 0x2b06c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2b06c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_uc_hrr_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_phy_sys_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_cfg_ahb_clk = {
+ .halt_reg = 0xba454,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba454,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xba454,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_dp0_clk = {
+ .halt_reg = 0x2d07c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2d07c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_dp0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_phy_dp0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_dp1_clk = {
+ .halt_reg = 0x2d144,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2d144,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_dp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_phy_dp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_master_clk = {
+ .halt_reg = 0x2d01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2d01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_phy_p2rr2p_pipe_clk = {
+ .halt_reg = 0x2d118,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2d118,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_p2rr2p_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_phy_p2rr2p_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_phy_pcie_pipe_clk = {
+ .halt_reg = 0x2d04c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(12),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_pcie_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_phy_pcie_pipe_mux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_phy_rx0_clk = {
+ .halt_reg = 0x2d0e8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2d0e8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_rx0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_phy_rx0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_phy_rx1_clk = {
+ .halt_reg = 0x2d0fc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2d0fc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_rx1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_phy_rx1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_phy_usb_pipe_clk = {
+ .halt_reg = 0x2d0e0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2d0e0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2d0e0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_usb_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb34_sec_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_sb_if_clk = {
+ .halt_reg = 0x2d048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2d048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_sb_if_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_sb_if_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_sys_clk = {
+ .halt_reg = 0x2d05c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2d05c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_sys_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_phy_sys_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_tmu_clk = {
+ .halt_reg = 0x2d0a8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2d0a8,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2d0a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_tmu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_tmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_uc_hrr_clk = {
+ .halt_reg = 0x2d06c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2d06c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_uc_hrr_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_phy_sys_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_cfg_ahb_clk = {
+ .halt_reg = 0xba458,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba458,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xba458,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_dp0_clk = {
+ .halt_reg = 0xe0070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe0070,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_dp0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_phy_dp0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_dp1_clk = {
+ .halt_reg = 0xe0128,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe0128,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_dp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_phy_dp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_master_clk = {
+ .halt_reg = 0xe001c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe001c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_phy_p2rr2p_pipe_clk = {
+ .halt_reg = 0xe00f8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe00f8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_p2rr2p_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_phy_p2rr2p_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_phy_pcie_pipe_clk = {
+ .halt_reg = 0xe004c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(13),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_pcie_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_phy_pcie_pipe_mux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_phy_rx0_clk = {
+ .halt_reg = 0xe00c8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe00c8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_rx0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_phy_rx0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_phy_rx1_clk = {
+ .halt_reg = 0xe00dc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe00dc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_rx1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_phy_rx1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_phy_usb_pipe_clk = {
+ .halt_reg = 0xe00c0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xe00c0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xe00c0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_usb_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb34_tert_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_sb_if_clk = {
+ .halt_reg = 0xe0048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe0048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_sb_if_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_sb_if_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_sys_clk = {
+ .halt_reg = 0xe005c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe005c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_sys_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_phy_sys_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_tmu_clk = {
+ .halt_reg = 0xe00a0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xe00a0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xe00a0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_tmu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_tmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_uc_hrr_clk = {
+ .halt_reg = 0xe006c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe006c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_uc_hrr_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_phy_sys_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi0_clk = {
+ .halt_reg = 0x3201c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x3201c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3201c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_video_axi0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi0c_clk = {
+ .halt_reg = 0x32030,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x32030,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_video_axi0c_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi1_clk = {
+ .halt_reg = 0x32044,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x32044,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_video_axi1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc gcc_pcie_0_tunnel_gdsc = {
+ .gdscr = 0xc8004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_pcie_0_tunnel_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_pcie_1_tunnel_gdsc = {
+ .gdscr = 0x2e004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_pcie_1_tunnel_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_pcie_2_tunnel_gdsc = {
+ .gdscr = 0xc0004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_pcie_2_tunnel_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_pcie_3a_gdsc = {
+ .gdscr = 0xdc004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_pcie_3a_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_pcie_3a_phy_gdsc = {
+ .gdscr = 0x6c004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gcc_pcie_3a_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_pcie_3b_gdsc = {
+ .gdscr = 0x94004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_pcie_3b_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_pcie_3b_phy_gdsc = {
+ .gdscr = 0x75004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gcc_pcie_3b_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_pcie_4_gdsc = {
+ .gdscr = 0x88004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_pcie_4_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_pcie_4_phy_gdsc = {
+ .gdscr = 0xd3004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gcc_pcie_4_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_pcie_5_gdsc = {
+ .gdscr = 0xc3004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_pcie_5_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_pcie_5_phy_gdsc = {
+ .gdscr = 0xd2004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gcc_pcie_5_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_pcie_6_gdsc = {
+ .gdscr = 0x8a004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_pcie_6_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_pcie_6_phy_gdsc = {
+ .gdscr = 0xd4004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gcc_pcie_6_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_ufs_phy_gdsc = {
+ .gdscr = 0x77008,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_ufs_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb20_prim_gdsc = {
+ .gdscr = 0xbc004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_usb20_prim_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb30_mp_gdsc = {
+ .gdscr = 0x9a010,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_usb30_mp_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb30_prim_gdsc = {
+ .gdscr = 0x3f01c,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_usb30_prim_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb30_sec_gdsc = {
+ .gdscr = 0xe2010,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_usb30_sec_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb30_tert_gdsc = {
+ .gdscr = 0xe1010,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_usb30_tert_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb3_mp_ss0_phy_gdsc = {
+ .gdscr = 0x5400c,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gcc_usb3_mp_ss0_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb3_mp_ss1_phy_gdsc = {
+ .gdscr = 0x5402c,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gcc_usb3_mp_ss1_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb4_0_gdsc = {
+ .gdscr = 0x2b008,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_usb4_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb4_1_gdsc = {
+ .gdscr = 0x2d008,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_usb4_1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb4_2_gdsc = {
+ .gdscr = 0xe0008,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_usb4_2_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb_0_phy_gdsc = {
+ .gdscr = 0xdb024,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gcc_usb_0_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb_1_phy_gdsc = {
+ .gdscr = 0x2c024,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gcc_usb_1_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb_2_phy_gdsc = {
+ .gdscr = 0xbe024,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gcc_usb_2_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct clk_regmap *gcc_glymur_clocks[] = {
+ [GCC_AGGRE_NOC_PCIE_3A_WEST_SF_AXI_CLK] = &gcc_aggre_noc_pcie_3a_west_sf_axi_clk.clkr,
+ [GCC_AGGRE_NOC_PCIE_3B_WEST_SF_AXI_CLK] = &gcc_aggre_noc_pcie_3b_west_sf_axi_clk.clkr,
+ [GCC_AGGRE_NOC_PCIE_4_WEST_SF_AXI_CLK] = &gcc_aggre_noc_pcie_4_west_sf_axi_clk.clkr,
+ [GCC_AGGRE_NOC_PCIE_5_EAST_SF_AXI_CLK] = &gcc_aggre_noc_pcie_5_east_sf_axi_clk.clkr,
+ [GCC_AGGRE_NOC_PCIE_6_WEST_SF_AXI_CLK] = &gcc_aggre_noc_pcie_6_west_sf_axi_clk.clkr,
+ [GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr,
+ [GCC_AGGRE_USB2_PRIM_AXI_CLK] = &gcc_aggre_usb2_prim_axi_clk.clkr,
+ [GCC_AGGRE_USB3_MP_AXI_CLK] = &gcc_aggre_usb3_mp_axi_clk.clkr,
+ [GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr,
+ [GCC_AGGRE_USB3_SEC_AXI_CLK] = &gcc_aggre_usb3_sec_axi_clk.clkr,
+ [GCC_AGGRE_USB3_TERT_AXI_CLK] = &gcc_aggre_usb3_tert_axi_clk.clkr,
+ [GCC_AGGRE_USB4_0_AXI_CLK] = &gcc_aggre_usb4_0_axi_clk.clkr,
+ [GCC_AGGRE_USB4_1_AXI_CLK] = &gcc_aggre_usb4_1_axi_clk.clkr,
+ [GCC_AGGRE_USB4_2_AXI_CLK] = &gcc_aggre_usb4_2_axi_clk.clkr,
+ [GCC_AV1E_AHB_CLK] = &gcc_av1e_ahb_clk.clkr,
+ [GCC_AV1E_AXI_CLK] = &gcc_av1e_axi_clk.clkr,
+ [GCC_AV1E_XO_CLK] = &gcc_av1e_xo_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CAMERA_HF_AXI_CLK] = &gcc_camera_hf_axi_clk.clkr,
+ [GCC_CAMERA_SF_AXI_CLK] = &gcc_camera_sf_axi_clk.clkr,
+ [GCC_CFG_NOC_PCIE_ANOC_AHB_CLK] = &gcc_cfg_noc_pcie_anoc_ahb_clk.clkr,
+ [GCC_CFG_NOC_PCIE_ANOC_SOUTH_AHB_CLK] = &gcc_cfg_noc_pcie_anoc_south_ahb_clk.clkr,
+ [GCC_CFG_NOC_USB2_PRIM_AXI_CLK] = &gcc_cfg_noc_usb2_prim_axi_clk.clkr,
+ [GCC_CFG_NOC_USB3_MP_AXI_CLK] = &gcc_cfg_noc_usb3_mp_axi_clk.clkr,
+ [GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr,
+ [GCC_CFG_NOC_USB3_SEC_AXI_CLK] = &gcc_cfg_noc_usb3_sec_axi_clk.clkr,
+ [GCC_CFG_NOC_USB3_TERT_AXI_CLK] = &gcc_cfg_noc_usb3_tert_axi_clk.clkr,
+ [GCC_CFG_NOC_USB_ANOC_AHB_CLK] = &gcc_cfg_noc_usb_anoc_ahb_clk.clkr,
+ [GCC_CFG_NOC_USB_ANOC_SOUTH_AHB_CLK] = &gcc_cfg_noc_usb_anoc_south_ahb_clk.clkr,
+ [GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr,
+ [GCC_EVA_AHB_CLK] = &gcc_eva_ahb_clk.clkr,
+ [GCC_EVA_AXI0_CLK] = &gcc_eva_axi0_clk.clkr,
+ [GCC_EVA_AXI0C_CLK] = &gcc_eva_axi0c_clk.clkr,
+ [GCC_EVA_XO_CLK] = &gcc_eva_xo_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
+ [GCC_GPLL0] = &gcc_gpll0.clkr,
+ [GCC_GPLL0_OUT_EVEN] = &gcc_gpll0_out_even.clkr,
+ [GCC_GPLL1] = &gcc_gpll1.clkr,
+ [GCC_GPLL14] = &gcc_gpll14.clkr,
+ [GCC_GPLL14_OUT_EVEN] = &gcc_gpll14_out_even.clkr,
+ [GCC_GPLL4] = &gcc_gpll4.clkr,
+ [GCC_GPLL5] = &gcc_gpll5.clkr,
+ [GCC_GPLL7] = &gcc_gpll7.clkr,
+ [GCC_GPLL8] = &gcc_gpll8.clkr,
+ [GCC_GPLL9] = &gcc_gpll9.clkr,
+ [GCC_GPU_GEMNOC_GFX_CLK] = &gcc_gpu_gemnoc_gfx_clk.clkr,
+ [GCC_GPU_GPLL0_CLK_SRC] = &gcc_gpu_gpll0_clk_src.clkr,
+ [GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr,
+ [GCC_PCIE_0_AUX_CLK] = &gcc_pcie_0_aux_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK_SRC] = &gcc_pcie_0_aux_clk_src.clkr,
+ [GCC_PCIE_0_CFG_AHB_CLK] = &gcc_pcie_0_cfg_ahb_clk.clkr,
+ [GCC_PCIE_0_MSTR_AXI_CLK] = &gcc_pcie_0_mstr_axi_clk.clkr,
+ [GCC_PCIE_0_PHY_RCHNG_CLK] = &gcc_pcie_0_phy_rchng_clk.clkr,
+ [GCC_PCIE_0_PHY_RCHNG_CLK_SRC] = &gcc_pcie_0_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_0_PIPE_CLK] = &gcc_pcie_0_pipe_clk.clkr,
+ [GCC_PCIE_0_SLV_AXI_CLK] = &gcc_pcie_0_slv_axi_clk.clkr,
+ [GCC_PCIE_0_SLV_Q2A_AXI_CLK] = &gcc_pcie_0_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_1_AUX_CLK] = &gcc_pcie_1_aux_clk.clkr,
+ [GCC_PCIE_1_AUX_CLK_SRC] = &gcc_pcie_1_aux_clk_src.clkr,
+ [GCC_PCIE_1_CFG_AHB_CLK] = &gcc_pcie_1_cfg_ahb_clk.clkr,
+ [GCC_PCIE_1_MSTR_AXI_CLK] = &gcc_pcie_1_mstr_axi_clk.clkr,
+ [GCC_PCIE_1_PHY_RCHNG_CLK] = &gcc_pcie_1_phy_rchng_clk.clkr,
+ [GCC_PCIE_1_PHY_RCHNG_CLK_SRC] = &gcc_pcie_1_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_1_PIPE_CLK] = &gcc_pcie_1_pipe_clk.clkr,
+ [GCC_PCIE_1_SLV_AXI_CLK] = &gcc_pcie_1_slv_axi_clk.clkr,
+ [GCC_PCIE_1_SLV_Q2A_AXI_CLK] = &gcc_pcie_1_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_2_AUX_CLK] = &gcc_pcie_2_aux_clk.clkr,
+ [GCC_PCIE_2_AUX_CLK_SRC] = &gcc_pcie_2_aux_clk_src.clkr,
+ [GCC_PCIE_2_CFG_AHB_CLK] = &gcc_pcie_2_cfg_ahb_clk.clkr,
+ [GCC_PCIE_2_MSTR_AXI_CLK] = &gcc_pcie_2_mstr_axi_clk.clkr,
+ [GCC_PCIE_2_PHY_RCHNG_CLK] = &gcc_pcie_2_phy_rchng_clk.clkr,
+ [GCC_PCIE_2_PHY_RCHNG_CLK_SRC] = &gcc_pcie_2_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_2_PIPE_CLK] = &gcc_pcie_2_pipe_clk.clkr,
+ [GCC_PCIE_2_SLV_AXI_CLK] = &gcc_pcie_2_slv_axi_clk.clkr,
+ [GCC_PCIE_2_SLV_Q2A_AXI_CLK] = &gcc_pcie_2_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_3A_AUX_CLK] = &gcc_pcie_3a_aux_clk.clkr,
+ [GCC_PCIE_3A_AUX_CLK_SRC] = &gcc_pcie_3a_aux_clk_src.clkr,
+ [GCC_PCIE_3A_CFG_AHB_CLK] = &gcc_pcie_3a_cfg_ahb_clk.clkr,
+ [GCC_PCIE_3A_MSTR_AXI_CLK] = &gcc_pcie_3a_mstr_axi_clk.clkr,
+ [GCC_PCIE_3A_PHY_RCHNG_CLK] = &gcc_pcie_3a_phy_rchng_clk.clkr,
+ [GCC_PCIE_3A_PHY_RCHNG_CLK_SRC] = &gcc_pcie_3a_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_3A_PIPE_CLK] = &gcc_pcie_3a_pipe_clk.clkr,
+ [GCC_PCIE_3A_PIPE_CLK_SRC] = &gcc_pcie_3a_pipe_clk_src.clkr,
+ [GCC_PCIE_3A_SLV_AXI_CLK] = &gcc_pcie_3a_slv_axi_clk.clkr,
+ [GCC_PCIE_3A_SLV_Q2A_AXI_CLK] = &gcc_pcie_3a_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_3B_AUX_CLK] = &gcc_pcie_3b_aux_clk.clkr,
+ [GCC_PCIE_3B_AUX_CLK_SRC] = &gcc_pcie_3b_aux_clk_src.clkr,
+ [GCC_PCIE_3B_CFG_AHB_CLK] = &gcc_pcie_3b_cfg_ahb_clk.clkr,
+ [GCC_PCIE_3B_MSTR_AXI_CLK] = &gcc_pcie_3b_mstr_axi_clk.clkr,
+ [GCC_PCIE_3B_PHY_RCHNG_CLK] = &gcc_pcie_3b_phy_rchng_clk.clkr,
+ [GCC_PCIE_3B_PHY_RCHNG_CLK_SRC] = &gcc_pcie_3b_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_3B_PIPE_CLK] = &gcc_pcie_3b_pipe_clk.clkr,
+ [GCC_PCIE_3B_PIPE_CLK_SRC] = &gcc_pcie_3b_pipe_clk_src.clkr,
+ [GCC_PCIE_3B_PIPE_DIV2_CLK] = &gcc_pcie_3b_pipe_div2_clk.clkr,
+ [GCC_PCIE_3B_PIPE_DIV_CLK_SRC] = &gcc_pcie_3b_pipe_div_clk_src.clkr,
+ [GCC_PCIE_3B_SLV_AXI_CLK] = &gcc_pcie_3b_slv_axi_clk.clkr,
+ [GCC_PCIE_3B_SLV_Q2A_AXI_CLK] = &gcc_pcie_3b_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_4_AUX_CLK] = &gcc_pcie_4_aux_clk.clkr,
+ [GCC_PCIE_4_AUX_CLK_SRC] = &gcc_pcie_4_aux_clk_src.clkr,
+ [GCC_PCIE_4_CFG_AHB_CLK] = &gcc_pcie_4_cfg_ahb_clk.clkr,
+ [GCC_PCIE_4_MSTR_AXI_CLK] = &gcc_pcie_4_mstr_axi_clk.clkr,
+ [GCC_PCIE_4_PHY_RCHNG_CLK] = &gcc_pcie_4_phy_rchng_clk.clkr,
+ [GCC_PCIE_4_PHY_RCHNG_CLK_SRC] = &gcc_pcie_4_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_4_PIPE_CLK] = &gcc_pcie_4_pipe_clk.clkr,
+ [GCC_PCIE_4_PIPE_CLK_SRC] = &gcc_pcie_4_pipe_clk_src.clkr,
+ [GCC_PCIE_4_PIPE_DIV2_CLK] = &gcc_pcie_4_pipe_div2_clk.clkr,
+ [GCC_PCIE_4_PIPE_DIV_CLK_SRC] = &gcc_pcie_4_pipe_div_clk_src.clkr,
+ [GCC_PCIE_4_SLV_AXI_CLK] = &gcc_pcie_4_slv_axi_clk.clkr,
+ [GCC_PCIE_4_SLV_Q2A_AXI_CLK] = &gcc_pcie_4_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_5_AUX_CLK] = &gcc_pcie_5_aux_clk.clkr,
+ [GCC_PCIE_5_AUX_CLK_SRC] = &gcc_pcie_5_aux_clk_src.clkr,
+ [GCC_PCIE_5_CFG_AHB_CLK] = &gcc_pcie_5_cfg_ahb_clk.clkr,
+ [GCC_PCIE_5_MSTR_AXI_CLK] = &gcc_pcie_5_mstr_axi_clk.clkr,
+ [GCC_PCIE_5_PHY_RCHNG_CLK] = &gcc_pcie_5_phy_rchng_clk.clkr,
+ [GCC_PCIE_5_PHY_RCHNG_CLK_SRC] = &gcc_pcie_5_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_5_PIPE_CLK] = &gcc_pcie_5_pipe_clk.clkr,
+ [GCC_PCIE_5_PIPE_CLK_SRC] = &gcc_pcie_5_pipe_clk_src.clkr,
+ [GCC_PCIE_5_PIPE_DIV2_CLK] = &gcc_pcie_5_pipe_div2_clk.clkr,
+ [GCC_PCIE_5_PIPE_DIV_CLK_SRC] = &gcc_pcie_5_pipe_div_clk_src.clkr,
+ [GCC_PCIE_5_SLV_AXI_CLK] = &gcc_pcie_5_slv_axi_clk.clkr,
+ [GCC_PCIE_5_SLV_Q2A_AXI_CLK] = &gcc_pcie_5_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_6_AUX_CLK] = &gcc_pcie_6_aux_clk.clkr,
+ [GCC_PCIE_6_AUX_CLK_SRC] = &gcc_pcie_6_aux_clk_src.clkr,
+ [GCC_PCIE_6_CFG_AHB_CLK] = &gcc_pcie_6_cfg_ahb_clk.clkr,
+ [GCC_PCIE_6_MSTR_AXI_CLK] = &gcc_pcie_6_mstr_axi_clk.clkr,
+ [GCC_PCIE_6_PHY_RCHNG_CLK] = &gcc_pcie_6_phy_rchng_clk.clkr,
+ [GCC_PCIE_6_PHY_RCHNG_CLK_SRC] = &gcc_pcie_6_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_6_PIPE_CLK] = &gcc_pcie_6_pipe_clk.clkr,
+ [GCC_PCIE_6_PIPE_CLK_SRC] = &gcc_pcie_6_pipe_clk_src.clkr,
+ [GCC_PCIE_6_PIPE_DIV2_CLK] = &gcc_pcie_6_pipe_div2_clk.clkr,
+ [GCC_PCIE_6_PIPE_DIV_CLK_SRC] = &gcc_pcie_6_pipe_div_clk_src.clkr,
+ [GCC_PCIE_6_SLV_AXI_CLK] = &gcc_pcie_6_slv_axi_clk.clkr,
+ [GCC_PCIE_6_SLV_Q2A_AXI_CLK] = &gcc_pcie_6_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_NOC_PWRCTL_CLK] = &gcc_pcie_noc_pwrctl_clk.clkr,
+ [GCC_PCIE_NOC_QOSGEN_EXTREF_CLK] = &gcc_pcie_noc_qosgen_extref_clk.clkr,
+ [GCC_PCIE_NOC_SF_CENTER_CLK] = &gcc_pcie_noc_sf_center_clk.clkr,
+ [GCC_PCIE_NOC_SLAVE_SF_EAST_CLK] = &gcc_pcie_noc_slave_sf_east_clk.clkr,
+ [GCC_PCIE_NOC_SLAVE_SF_WEST_CLK] = &gcc_pcie_noc_slave_sf_west_clk.clkr,
+ [GCC_PCIE_NOC_TSCTR_CLK] = &gcc_pcie_noc_tsctr_clk.clkr,
+ [GCC_PCIE_PHY_3A_AUX_CLK] = &gcc_pcie_phy_3a_aux_clk.clkr,
+ [GCC_PCIE_PHY_3A_AUX_CLK_SRC] = &gcc_pcie_phy_3a_aux_clk_src.clkr,
+ [GCC_PCIE_PHY_3B_AUX_CLK] = &gcc_pcie_phy_3b_aux_clk.clkr,
+ [GCC_PCIE_PHY_3B_AUX_CLK_SRC] = &gcc_pcie_phy_3b_aux_clk_src.clkr,
+ [GCC_PCIE_PHY_4_AUX_CLK] = &gcc_pcie_phy_4_aux_clk.clkr,
+ [GCC_PCIE_PHY_4_AUX_CLK_SRC] = &gcc_pcie_phy_4_aux_clk_src.clkr,
+ [GCC_PCIE_PHY_5_AUX_CLK] = &gcc_pcie_phy_5_aux_clk.clkr,
+ [GCC_PCIE_PHY_5_AUX_CLK_SRC] = &gcc_pcie_phy_5_aux_clk_src.clkr,
+ [GCC_PCIE_PHY_6_AUX_CLK] = &gcc_pcie_phy_6_aux_clk.clkr,
+ [GCC_PCIE_PHY_6_AUX_CLK_SRC] = &gcc_pcie_phy_6_aux_clk_src.clkr,
+ [GCC_PCIE_RSCC_CFG_AHB_CLK] = &gcc_pcie_rscc_cfg_ahb_clk.clkr,
+ [GCC_PCIE_RSCC_XO_CLK] = &gcc_pcie_rscc_xo_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
+ [GCC_QMIP_AV1E_AHB_CLK] = &gcc_qmip_av1e_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_CMD_AHB_CLK] = &gcc_qmip_camera_cmd_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_NRT_AHB_CLK] = &gcc_qmip_camera_nrt_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_RT_AHB_CLK] = &gcc_qmip_camera_rt_ahb_clk.clkr,
+ [GCC_QMIP_GPU_AHB_CLK] = &gcc_qmip_gpu_ahb_clk.clkr,
+ [GCC_QMIP_PCIE_3A_AHB_CLK] = &gcc_qmip_pcie_3a_ahb_clk.clkr,
+ [GCC_QMIP_PCIE_3B_AHB_CLK] = &gcc_qmip_pcie_3b_ahb_clk.clkr,
+ [GCC_QMIP_PCIE_4_AHB_CLK] = &gcc_qmip_pcie_4_ahb_clk.clkr,
+ [GCC_QMIP_PCIE_5_AHB_CLK] = &gcc_qmip_pcie_5_ahb_clk.clkr,
+ [GCC_QMIP_PCIE_6_AHB_CLK] = &gcc_qmip_pcie_6_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_CV_CPU_AHB_CLK] = &gcc_qmip_video_cv_cpu_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_CVP_AHB_CLK] = &gcc_qmip_video_cvp_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_V_CPU_AHB_CLK] = &gcc_qmip_video_v_cpu_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_VCODEC1_AHB_CLK] = &gcc_qmip_video_vcodec1_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_VCODEC_AHB_CLK] = &gcc_qmip_video_vcodec_ahb_clk.clkr,
+ [GCC_QUPV3_OOB_CORE_2X_CLK] = &gcc_qupv3_oob_core_2x_clk.clkr,
+ [GCC_QUPV3_OOB_CORE_CLK] = &gcc_qupv3_oob_core_clk.clkr,
+ [GCC_QUPV3_OOB_M_AHB_CLK] = &gcc_qupv3_oob_m_ahb_clk.clkr,
+ [GCC_QUPV3_OOB_QSPI_S0_CLK] = &gcc_qupv3_oob_qspi_s0_clk.clkr,
+ [GCC_QUPV3_OOB_QSPI_S0_CLK_SRC] = &gcc_qupv3_oob_qspi_s0_clk_src.clkr,
+ [GCC_QUPV3_OOB_QSPI_S1_CLK] = &gcc_qupv3_oob_qspi_s1_clk.clkr,
+ [GCC_QUPV3_OOB_QSPI_S1_CLK_SRC] = &gcc_qupv3_oob_qspi_s1_clk_src.clkr,
+ [GCC_QUPV3_OOB_S0_CLK] = &gcc_qupv3_oob_s0_clk.clkr,
+ [GCC_QUPV3_OOB_S0_CLK_SRC] = &gcc_qupv3_oob_s0_clk_src.clkr,
+ [GCC_QUPV3_OOB_S1_CLK] = &gcc_qupv3_oob_s1_clk.clkr,
+ [GCC_QUPV3_OOB_S1_CLK_SRC] = &gcc_qupv3_oob_s1_clk_src.clkr,
+ [GCC_QUPV3_OOB_S_AHB_CLK] = &gcc_qupv3_oob_s_ahb_clk.clkr,
+ [GCC_QUPV3_OOB_TCXO_CLK] = &gcc_qupv3_oob_tcxo_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_2X_CLK] = &gcc_qupv3_wrap0_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_CLK] = &gcc_qupv3_wrap0_core_clk.clkr,
+ [GCC_QUPV3_WRAP0_QSPI_S2_CLK] = &gcc_qupv3_wrap0_qspi_s2_clk.clkr,
+ [GCC_QUPV3_WRAP0_QSPI_S2_CLK_SRC] = &gcc_qupv3_wrap0_qspi_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_QSPI_S3_CLK] = &gcc_qupv3_wrap0_qspi_s3_clk.clkr,
+ [GCC_QUPV3_WRAP0_QSPI_S3_CLK_SRC] = &gcc_qupv3_wrap0_qspi_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_QSPI_S6_CLK] = &gcc_qupv3_wrap0_qspi_s6_clk.clkr,
+ [GCC_QUPV3_WRAP0_QSPI_S6_CLK_SRC] = &gcc_qupv3_wrap0_qspi_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK] = &gcc_qupv3_wrap0_s0_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK_SRC] = &gcc_qupv3_wrap0_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK] = &gcc_qupv3_wrap0_s1_clk.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK_SRC] = &gcc_qupv3_wrap0_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK] = &gcc_qupv3_wrap0_s2_clk.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK_SRC] = &gcc_qupv3_wrap0_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK] = &gcc_qupv3_wrap0_s3_clk.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK_SRC] = &gcc_qupv3_wrap0_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK] = &gcc_qupv3_wrap0_s4_clk.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK_SRC] = &gcc_qupv3_wrap0_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK] = &gcc_qupv3_wrap0_s5_clk.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK_SRC] = &gcc_qupv3_wrap0_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK] = &gcc_qupv3_wrap0_s6_clk.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK_SRC] = &gcc_qupv3_wrap0_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S7_CLK] = &gcc_qupv3_wrap0_s7_clk.clkr,
+ [GCC_QUPV3_WRAP0_S7_CLK_SRC] = &gcc_qupv3_wrap0_s7_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_CORE_2X_CLK] = &gcc_qupv3_wrap1_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP1_CORE_CLK] = &gcc_qupv3_wrap1_core_clk.clkr,
+ [GCC_QUPV3_WRAP1_QSPI_S2_CLK] = &gcc_qupv3_wrap1_qspi_s2_clk.clkr,
+ [GCC_QUPV3_WRAP1_QSPI_S2_CLK_SRC] = &gcc_qupv3_wrap1_qspi_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_QSPI_S3_CLK] = &gcc_qupv3_wrap1_qspi_s3_clk.clkr,
+ [GCC_QUPV3_WRAP1_QSPI_S3_CLK_SRC] = &gcc_qupv3_wrap1_qspi_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_QSPI_S6_CLK] = &gcc_qupv3_wrap1_qspi_s6_clk.clkr,
+ [GCC_QUPV3_WRAP1_QSPI_S6_CLK_SRC] = &gcc_qupv3_wrap1_qspi_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK] = &gcc_qupv3_wrap1_s0_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK_SRC] = &gcc_qupv3_wrap1_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK] = &gcc_qupv3_wrap1_s1_clk.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK_SRC] = &gcc_qupv3_wrap1_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK] = &gcc_qupv3_wrap1_s2_clk.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK_SRC] = &gcc_qupv3_wrap1_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK] = &gcc_qupv3_wrap1_s3_clk.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK_SRC] = &gcc_qupv3_wrap1_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK] = &gcc_qupv3_wrap1_s4_clk.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK_SRC] = &gcc_qupv3_wrap1_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK] = &gcc_qupv3_wrap1_s5_clk.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK_SRC] = &gcc_qupv3_wrap1_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S6_CLK] = &gcc_qupv3_wrap1_s6_clk.clkr,
+ [GCC_QUPV3_WRAP1_S6_CLK_SRC] = &gcc_qupv3_wrap1_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S7_CLK] = &gcc_qupv3_wrap1_s7_clk.clkr,
+ [GCC_QUPV3_WRAP1_S7_CLK_SRC] = &gcc_qupv3_wrap1_s7_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_CORE_2X_CLK] = &gcc_qupv3_wrap2_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP2_CORE_CLK] = &gcc_qupv3_wrap2_core_clk.clkr,
+ [GCC_QUPV3_WRAP2_QSPI_S2_CLK] = &gcc_qupv3_wrap2_qspi_s2_clk.clkr,
+ [GCC_QUPV3_WRAP2_QSPI_S2_CLK_SRC] = &gcc_qupv3_wrap2_qspi_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_QSPI_S3_CLK] = &gcc_qupv3_wrap2_qspi_s3_clk.clkr,
+ [GCC_QUPV3_WRAP2_QSPI_S3_CLK_SRC] = &gcc_qupv3_wrap2_qspi_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_QSPI_S6_CLK] = &gcc_qupv3_wrap2_qspi_s6_clk.clkr,
+ [GCC_QUPV3_WRAP2_QSPI_S6_CLK_SRC] = &gcc_qupv3_wrap2_qspi_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S0_CLK] = &gcc_qupv3_wrap2_s0_clk.clkr,
+ [GCC_QUPV3_WRAP2_S0_CLK_SRC] = &gcc_qupv3_wrap2_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S1_CLK] = &gcc_qupv3_wrap2_s1_clk.clkr,
+ [GCC_QUPV3_WRAP2_S1_CLK_SRC] = &gcc_qupv3_wrap2_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S2_CLK] = &gcc_qupv3_wrap2_s2_clk.clkr,
+ [GCC_QUPV3_WRAP2_S2_CLK_SRC] = &gcc_qupv3_wrap2_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S3_CLK] = &gcc_qupv3_wrap2_s3_clk.clkr,
+ [GCC_QUPV3_WRAP2_S3_CLK_SRC] = &gcc_qupv3_wrap2_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S4_CLK] = &gcc_qupv3_wrap2_s4_clk.clkr,
+ [GCC_QUPV3_WRAP2_S4_CLK_SRC] = &gcc_qupv3_wrap2_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S5_CLK] = &gcc_qupv3_wrap2_s5_clk.clkr,
+ [GCC_QUPV3_WRAP2_S5_CLK_SRC] = &gcc_qupv3_wrap2_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S6_CLK] = &gcc_qupv3_wrap2_s6_clk.clkr,
+ [GCC_QUPV3_WRAP2_S6_CLK_SRC] = &gcc_qupv3_wrap2_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S7_CLK] = &gcc_qupv3_wrap2_s7_clk.clkr,
+ [GCC_QUPV3_WRAP2_S7_CLK_SRC] = &gcc_qupv3_wrap2_s7_clk_src.clkr,
+ [GCC_QUPV3_WRAP_0_M_AHB_CLK] = &gcc_qupv3_wrap_0_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_0_S_AHB_CLK] = &gcc_qupv3_wrap_0_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_M_AHB_CLK] = &gcc_qupv3_wrap_1_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_S_AHB_CLK] = &gcc_qupv3_wrap_1_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_2_M_AHB_CLK] = &gcc_qupv3_wrap_2_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_2_S_AHB_CLK] = &gcc_qupv3_wrap_2_s_ahb_clk.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_SDCC2_APPS_CLK_SRC] = &gcc_sdcc2_apps_clk_src.clkr,
+ [GCC_SDCC4_AHB_CLK] = &gcc_sdcc4_ahb_clk.clkr,
+ [GCC_SDCC4_APPS_CLK] = &gcc_sdcc4_apps_clk.clkr,
+ [GCC_SDCC4_APPS_CLK_SRC] = &gcc_sdcc4_apps_clk_src.clkr,
+ [GCC_UFS_PHY_AHB_CLK] = &gcc_ufs_phy_ahb_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK] = &gcc_ufs_phy_axi_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK_SRC] = &gcc_ufs_phy_axi_clk_src.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK] = &gcc_ufs_phy_ice_core_clk.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK_SRC] = &gcc_ufs_phy_ice_core_clk_src.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK] = &gcc_ufs_phy_phy_aux_clk.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK_SRC] = &gcc_ufs_phy_phy_aux_clk_src.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK] = &gcc_ufs_phy_rx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC] = &gcc_ufs_phy_rx_symbol_0_clk_src.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_1_CLK] = &gcc_ufs_phy_rx_symbol_1_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC] = &gcc_ufs_phy_rx_symbol_1_clk_src.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK] = &gcc_ufs_phy_tx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC] = &gcc_ufs_phy_tx_symbol_0_clk_src.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK] = &gcc_ufs_phy_unipro_core_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC] = &gcc_ufs_phy_unipro_core_clk_src.clkr,
+ [GCC_USB20_MASTER_CLK] = &gcc_usb20_master_clk.clkr,
+ [GCC_USB20_MASTER_CLK_SRC] = &gcc_usb20_master_clk_src.clkr,
+ [GCC_USB20_MOCK_UTMI_CLK] = &gcc_usb20_mock_utmi_clk.clkr,
+ [GCC_USB20_MOCK_UTMI_CLK_SRC] = &gcc_usb20_mock_utmi_clk_src.clkr,
+ [GCC_USB20_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb20_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB20_SLEEP_CLK] = &gcc_usb20_sleep_clk.clkr,
+ [GCC_USB30_MP_MASTER_CLK] = &gcc_usb30_mp_master_clk.clkr,
+ [GCC_USB30_MP_MASTER_CLK_SRC] = &gcc_usb30_mp_master_clk_src.clkr,
+ [GCC_USB30_MP_MOCK_UTMI_CLK] = &gcc_usb30_mp_mock_utmi_clk.clkr,
+ [GCC_USB30_MP_MOCK_UTMI_CLK_SRC] = &gcc_usb30_mp_mock_utmi_clk_src.clkr,
+ [GCC_USB30_MP_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb30_mp_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB30_MP_SLEEP_CLK] = &gcc_usb30_mp_sleep_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK_SRC] = &gcc_usb30_prim_master_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK] = &gcc_usb30_prim_mock_utmi_clk.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC] = &gcc_usb30_prim_mock_utmi_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB30_PRIM_SLEEP_CLK] = &gcc_usb30_prim_sleep_clk.clkr,
+ [GCC_USB30_SEC_MASTER_CLK] = &gcc_usb30_sec_master_clk.clkr,
+ [GCC_USB30_SEC_MASTER_CLK_SRC] = &gcc_usb30_sec_master_clk_src.clkr,
+ [GCC_USB30_SEC_MOCK_UTMI_CLK] = &gcc_usb30_sec_mock_utmi_clk.clkr,
+ [GCC_USB30_SEC_MOCK_UTMI_CLK_SRC] = &gcc_usb30_sec_mock_utmi_clk_src.clkr,
+ [GCC_USB30_SEC_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb30_sec_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB30_SEC_SLEEP_CLK] = &gcc_usb30_sec_sleep_clk.clkr,
+ [GCC_USB30_TERT_MASTER_CLK] = &gcc_usb30_tert_master_clk.clkr,
+ [GCC_USB30_TERT_MASTER_CLK_SRC] = &gcc_usb30_tert_master_clk_src.clkr,
+ [GCC_USB30_TERT_MOCK_UTMI_CLK] = &gcc_usb30_tert_mock_utmi_clk.clkr,
+ [GCC_USB30_TERT_MOCK_UTMI_CLK_SRC] = &gcc_usb30_tert_mock_utmi_clk_src.clkr,
+ [GCC_USB30_TERT_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb30_tert_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB30_TERT_SLEEP_CLK] = &gcc_usb30_tert_sleep_clk.clkr,
+ [GCC_USB34_PRIM_PHY_PIPE_CLK_SRC] = &gcc_usb34_prim_phy_pipe_clk_src.clkr,
+ [GCC_USB34_SEC_PHY_PIPE_CLK_SRC] = &gcc_usb34_sec_phy_pipe_clk_src.clkr,
+ [GCC_USB34_TERT_PHY_PIPE_CLK_SRC] = &gcc_usb34_tert_phy_pipe_clk_src.clkr,
+ [GCC_USB3_MP_PHY_AUX_CLK] = &gcc_usb3_mp_phy_aux_clk.clkr,
+ [GCC_USB3_MP_PHY_AUX_CLK_SRC] = &gcc_usb3_mp_phy_aux_clk_src.clkr,
+ [GCC_USB3_MP_PHY_COM_AUX_CLK] = &gcc_usb3_mp_phy_com_aux_clk.clkr,
+ [GCC_USB3_MP_PHY_PIPE_0_CLK] = &gcc_usb3_mp_phy_pipe_0_clk.clkr,
+ [GCC_USB3_MP_PHY_PIPE_0_CLK_SRC] = &gcc_usb3_mp_phy_pipe_0_clk_src.clkr,
+ [GCC_USB3_MP_PHY_PIPE_1_CLK] = &gcc_usb3_mp_phy_pipe_1_clk.clkr,
+ [GCC_USB3_MP_PHY_PIPE_1_CLK_SRC] = &gcc_usb3_mp_phy_pipe_1_clk_src.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK] = &gcc_usb3_prim_phy_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr,
+ [GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK_SRC] = &gcc_usb3_prim_phy_pipe_clk_src.clkr,
+ [GCC_USB3_SEC_PHY_AUX_CLK] = &gcc_usb3_sec_phy_aux_clk.clkr,
+ [GCC_USB3_SEC_PHY_AUX_CLK_SRC] = &gcc_usb3_sec_phy_aux_clk_src.clkr,
+ [GCC_USB3_SEC_PHY_COM_AUX_CLK] = &gcc_usb3_sec_phy_com_aux_clk.clkr,
+ [GCC_USB3_SEC_PHY_PIPE_CLK] = &gcc_usb3_sec_phy_pipe_clk.clkr,
+ [GCC_USB3_SEC_PHY_PIPE_CLK_SRC] = &gcc_usb3_sec_phy_pipe_clk_src.clkr,
+ [GCC_USB3_TERT_PHY_AUX_CLK] = &gcc_usb3_tert_phy_aux_clk.clkr,
+ [GCC_USB3_TERT_PHY_AUX_CLK_SRC] = &gcc_usb3_tert_phy_aux_clk_src.clkr,
+ [GCC_USB3_TERT_PHY_COM_AUX_CLK] = &gcc_usb3_tert_phy_com_aux_clk.clkr,
+ [GCC_USB3_TERT_PHY_PIPE_CLK] = &gcc_usb3_tert_phy_pipe_clk.clkr,
+ [GCC_USB3_TERT_PHY_PIPE_CLK_SRC] = &gcc_usb3_tert_phy_pipe_clk_src.clkr,
+ [GCC_USB4_0_CFG_AHB_CLK] = &gcc_usb4_0_cfg_ahb_clk.clkr,
+ [GCC_USB4_0_DP0_CLK] = &gcc_usb4_0_dp0_clk.clkr,
+ [GCC_USB4_0_DP1_CLK] = &gcc_usb4_0_dp1_clk.clkr,
+ [GCC_USB4_0_MASTER_CLK] = &gcc_usb4_0_master_clk.clkr,
+ [GCC_USB4_0_MASTER_CLK_SRC] = &gcc_usb4_0_master_clk_src.clkr,
+ [GCC_USB4_0_PHY_DP0_CLK_SRC] = &gcc_usb4_0_phy_dp0_clk_src.clkr,
+ [GCC_USB4_0_PHY_DP1_CLK_SRC] = &gcc_usb4_0_phy_dp1_clk_src.clkr,
+ [GCC_USB4_0_PHY_P2RR2P_PIPE_CLK] = &gcc_usb4_0_phy_p2rr2p_pipe_clk.clkr,
+ [GCC_USB4_0_PHY_P2RR2P_PIPE_CLK_SRC] = &gcc_usb4_0_phy_p2rr2p_pipe_clk_src.clkr,
+ [GCC_USB4_0_PHY_PCIE_PIPE_CLK] = &gcc_usb4_0_phy_pcie_pipe_clk.clkr,
+ [GCC_USB4_0_PHY_PCIE_PIPE_CLK_SRC] = &gcc_usb4_0_phy_pcie_pipe_clk_src.clkr,
+ [GCC_USB4_0_PHY_PCIE_PIPE_MUX_CLK_SRC] = &gcc_usb4_0_phy_pcie_pipe_mux_clk_src.clkr,
+ [GCC_USB4_0_PHY_RX0_CLK] = &gcc_usb4_0_phy_rx0_clk.clkr,
+ [GCC_USB4_0_PHY_RX0_CLK_SRC] = &gcc_usb4_0_phy_rx0_clk_src.clkr,
+ [GCC_USB4_0_PHY_RX1_CLK] = &gcc_usb4_0_phy_rx1_clk.clkr,
+ [GCC_USB4_0_PHY_RX1_CLK_SRC] = &gcc_usb4_0_phy_rx1_clk_src.clkr,
+ [GCC_USB4_0_PHY_SYS_CLK_SRC] = &gcc_usb4_0_phy_sys_clk_src.clkr,
+ [GCC_USB4_0_PHY_USB_PIPE_CLK] = &gcc_usb4_0_phy_usb_pipe_clk.clkr,
+ [GCC_USB4_0_SB_IF_CLK] = &gcc_usb4_0_sb_if_clk.clkr,
+ [GCC_USB4_0_SB_IF_CLK_SRC] = &gcc_usb4_0_sb_if_clk_src.clkr,
+ [GCC_USB4_0_SYS_CLK] = &gcc_usb4_0_sys_clk.clkr,
+ [GCC_USB4_0_TMU_CLK] = &gcc_usb4_0_tmu_clk.clkr,
+ [GCC_USB4_0_TMU_CLK_SRC] = &gcc_usb4_0_tmu_clk_src.clkr,
+ [GCC_USB4_0_UC_HRR_CLK] = &gcc_usb4_0_uc_hrr_clk.clkr,
+ [GCC_USB4_1_CFG_AHB_CLK] = &gcc_usb4_1_cfg_ahb_clk.clkr,
+ [GCC_USB4_1_DP0_CLK] = &gcc_usb4_1_dp0_clk.clkr,
+ [GCC_USB4_1_DP1_CLK] = &gcc_usb4_1_dp1_clk.clkr,
+ [GCC_USB4_1_MASTER_CLK] = &gcc_usb4_1_master_clk.clkr,
+ [GCC_USB4_1_MASTER_CLK_SRC] = &gcc_usb4_1_master_clk_src.clkr,
+ [GCC_USB4_1_PHY_DP0_CLK_SRC] = &gcc_usb4_1_phy_dp0_clk_src.clkr,
+ [GCC_USB4_1_PHY_DP1_CLK_SRC] = &gcc_usb4_1_phy_dp1_clk_src.clkr,
+ [GCC_USB4_1_PHY_P2RR2P_PIPE_CLK] = &gcc_usb4_1_phy_p2rr2p_pipe_clk.clkr,
+ [GCC_USB4_1_PHY_P2RR2P_PIPE_CLK_SRC] = &gcc_usb4_1_phy_p2rr2p_pipe_clk_src.clkr,
+ [GCC_USB4_1_PHY_PCIE_PIPE_CLK] = &gcc_usb4_1_phy_pcie_pipe_clk.clkr,
+ [GCC_USB4_1_PHY_PCIE_PIPE_CLK_SRC] = &gcc_usb4_1_phy_pcie_pipe_clk_src.clkr,
+ [GCC_USB4_1_PHY_PCIE_PIPE_MUX_CLK_SRC] = &gcc_usb4_1_phy_pcie_pipe_mux_clk_src.clkr,
+ [GCC_USB4_1_PHY_PLL_PIPE_CLK_SRC] = &gcc_usb4_1_phy_pll_pipe_clk_src.clkr,
+ [GCC_USB4_1_PHY_RX0_CLK] = &gcc_usb4_1_phy_rx0_clk.clkr,
+ [GCC_USB4_1_PHY_RX0_CLK_SRC] = &gcc_usb4_1_phy_rx0_clk_src.clkr,
+ [GCC_USB4_1_PHY_RX1_CLK] = &gcc_usb4_1_phy_rx1_clk.clkr,
+ [GCC_USB4_1_PHY_RX1_CLK_SRC] = &gcc_usb4_1_phy_rx1_clk_src.clkr,
+ [GCC_USB4_1_PHY_SYS_CLK_SRC] = &gcc_usb4_1_phy_sys_clk_src.clkr,
+ [GCC_USB4_1_PHY_USB_PIPE_CLK] = &gcc_usb4_1_phy_usb_pipe_clk.clkr,
+ [GCC_USB4_1_SB_IF_CLK] = &gcc_usb4_1_sb_if_clk.clkr,
+ [GCC_USB4_1_SB_IF_CLK_SRC] = &gcc_usb4_1_sb_if_clk_src.clkr,
+ [GCC_USB4_1_SYS_CLK] = &gcc_usb4_1_sys_clk.clkr,
+ [GCC_USB4_1_TMU_CLK] = &gcc_usb4_1_tmu_clk.clkr,
+ [GCC_USB4_1_TMU_CLK_SRC] = &gcc_usb4_1_tmu_clk_src.clkr,
+ [GCC_USB4_1_UC_HRR_CLK] = &gcc_usb4_1_uc_hrr_clk.clkr,
+ [GCC_USB4_2_CFG_AHB_CLK] = &gcc_usb4_2_cfg_ahb_clk.clkr,
+ [GCC_USB4_2_DP0_CLK] = &gcc_usb4_2_dp0_clk.clkr,
+ [GCC_USB4_2_DP1_CLK] = &gcc_usb4_2_dp1_clk.clkr,
+ [GCC_USB4_2_MASTER_CLK] = &gcc_usb4_2_master_clk.clkr,
+ [GCC_USB4_2_MASTER_CLK_SRC] = &gcc_usb4_2_master_clk_src.clkr,
+ [GCC_USB4_2_PHY_DP0_CLK_SRC] = &gcc_usb4_2_phy_dp0_clk_src.clkr,
+ [GCC_USB4_2_PHY_DP1_CLK_SRC] = &gcc_usb4_2_phy_dp1_clk_src.clkr,
+ [GCC_USB4_2_PHY_P2RR2P_PIPE_CLK] = &gcc_usb4_2_phy_p2rr2p_pipe_clk.clkr,
+ [GCC_USB4_2_PHY_P2RR2P_PIPE_CLK_SRC] = &gcc_usb4_2_phy_p2rr2p_pipe_clk_src.clkr,
+ [GCC_USB4_2_PHY_PCIE_PIPE_CLK] = &gcc_usb4_2_phy_pcie_pipe_clk.clkr,
+ [GCC_USB4_2_PHY_PCIE_PIPE_CLK_SRC] = &gcc_usb4_2_phy_pcie_pipe_clk_src.clkr,
+ [GCC_USB4_2_PHY_PCIE_PIPE_MUX_CLK_SRC] = &gcc_usb4_2_phy_pcie_pipe_mux_clk_src.clkr,
+ [GCC_USB4_2_PHY_RX0_CLK] = &gcc_usb4_2_phy_rx0_clk.clkr,
+ [GCC_USB4_2_PHY_RX0_CLK_SRC] = &gcc_usb4_2_phy_rx0_clk_src.clkr,
+ [GCC_USB4_2_PHY_RX1_CLK] = &gcc_usb4_2_phy_rx1_clk.clkr,
+ [GCC_USB4_2_PHY_RX1_CLK_SRC] = &gcc_usb4_2_phy_rx1_clk_src.clkr,
+ [GCC_USB4_2_PHY_SYS_CLK_SRC] = &gcc_usb4_2_phy_sys_clk_src.clkr,
+ [GCC_USB4_2_PHY_USB_PIPE_CLK] = &gcc_usb4_2_phy_usb_pipe_clk.clkr,
+ [GCC_USB4_2_SB_IF_CLK] = &gcc_usb4_2_sb_if_clk.clkr,
+ [GCC_USB4_2_SB_IF_CLK_SRC] = &gcc_usb4_2_sb_if_clk_src.clkr,
+ [GCC_USB4_2_SYS_CLK] = &gcc_usb4_2_sys_clk.clkr,
+ [GCC_USB4_2_TMU_CLK] = &gcc_usb4_2_tmu_clk.clkr,
+ [GCC_USB4_2_TMU_CLK_SRC] = &gcc_usb4_2_tmu_clk_src.clkr,
+ [GCC_USB4_2_UC_HRR_CLK] = &gcc_usb4_2_uc_hrr_clk.clkr,
+ [GCC_VIDEO_AXI0_CLK] = &gcc_video_axi0_clk.clkr,
+ [GCC_VIDEO_AXI0C_CLK] = &gcc_video_axi0c_clk.clkr,
+ [GCC_VIDEO_AXI1_CLK] = &gcc_video_axi1_clk.clkr,
+};
+
+static struct gdsc *gcc_glymur_gdscs[] = {
+ [GCC_PCIE_0_TUNNEL_GDSC] = &gcc_pcie_0_tunnel_gdsc,
+ [GCC_PCIE_1_TUNNEL_GDSC] = &gcc_pcie_1_tunnel_gdsc,
+ [GCC_PCIE_2_TUNNEL_GDSC] = &gcc_pcie_2_tunnel_gdsc,
+ [GCC_PCIE_3A_GDSC] = &gcc_pcie_3a_gdsc,
+ [GCC_PCIE_3A_PHY_GDSC] = &gcc_pcie_3a_phy_gdsc,
+ [GCC_PCIE_3B_GDSC] = &gcc_pcie_3b_gdsc,
+ [GCC_PCIE_3B_PHY_GDSC] = &gcc_pcie_3b_phy_gdsc,
+ [GCC_PCIE_4_GDSC] = &gcc_pcie_4_gdsc,
+ [GCC_PCIE_4_PHY_GDSC] = &gcc_pcie_4_phy_gdsc,
+ [GCC_PCIE_5_GDSC] = &gcc_pcie_5_gdsc,
+ [GCC_PCIE_5_PHY_GDSC] = &gcc_pcie_5_phy_gdsc,
+ [GCC_PCIE_6_GDSC] = &gcc_pcie_6_gdsc,
+ [GCC_PCIE_6_PHY_GDSC] = &gcc_pcie_6_phy_gdsc,
+ [GCC_UFS_PHY_GDSC] = &gcc_ufs_phy_gdsc,
+ [GCC_USB20_PRIM_GDSC] = &gcc_usb20_prim_gdsc,
+ [GCC_USB30_MP_GDSC] = &gcc_usb30_mp_gdsc,
+ [GCC_USB30_PRIM_GDSC] = &gcc_usb30_prim_gdsc,
+ [GCC_USB30_SEC_GDSC] = &gcc_usb30_sec_gdsc,
+ [GCC_USB30_TERT_GDSC] = &gcc_usb30_tert_gdsc,
+ [GCC_USB3_MP_SS0_PHY_GDSC] = &gcc_usb3_mp_ss0_phy_gdsc,
+ [GCC_USB3_MP_SS1_PHY_GDSC] = &gcc_usb3_mp_ss1_phy_gdsc,
+ [GCC_USB4_0_GDSC] = &gcc_usb4_0_gdsc,
+ [GCC_USB4_1_GDSC] = &gcc_usb4_1_gdsc,
+ [GCC_USB4_2_GDSC] = &gcc_usb4_2_gdsc,
+ [GCC_USB_0_PHY_GDSC] = &gcc_usb_0_phy_gdsc,
+ [GCC_USB_1_PHY_GDSC] = &gcc_usb_1_phy_gdsc,
+ [GCC_USB_2_PHY_GDSC] = &gcc_usb_2_phy_gdsc,
+};
+
+static const struct qcom_reset_map gcc_glymur_resets[] = {
+ [GCC_AV1E_BCR] = { 0x9b028 },
+ [GCC_CAMERA_BCR] = { 0x26000 },
+ [GCC_DISPLAY_BCR] = { 0x27000 },
+ [GCC_EVA_BCR] = { 0x9b000 },
+ [GCC_GPU_BCR] = { 0x71000 },
+ [GCC_PCIE_0_LINK_DOWN_BCR] = { 0xbc2d0 },
+ [GCC_PCIE_0_NOCSR_COM_PHY_BCR] = { 0xbc2dc },
+ [GCC_PCIE_0_PHY_BCR] = { 0xbc2d8 },
+ [GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR] = { 0xbc2e0 },
+ [GCC_PCIE_0_TUNNEL_BCR] = { 0xc8000 },
+ [GCC_PCIE_1_LINK_DOWN_BCR] = { 0x7f018 },
+ [GCC_PCIE_1_NOCSR_COM_PHY_BCR] = { 0x7f024 },
+ [GCC_PCIE_1_PHY_BCR] = { 0x7f020 },
+ [GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR] = { 0x7f028 },
+ [GCC_PCIE_1_TUNNEL_BCR] = { 0x2e000 },
+ [GCC_PCIE_2_LINK_DOWN_BCR] = { 0x281d0 },
+ [GCC_PCIE_2_NOCSR_COM_PHY_BCR] = { 0x281dc },
+ [GCC_PCIE_2_PHY_BCR] = { 0x281d8 },
+ [GCC_PCIE_2_PHY_NOCSR_COM_PHY_BCR] = { 0x281e0 },
+ [GCC_PCIE_2_TUNNEL_BCR] = { 0xc0000 },
+ [GCC_PCIE_3A_BCR] = { 0xdc000 },
+ [GCC_PCIE_3A_LINK_DOWN_BCR] = { 0x7b0a0 },
+ [GCC_PCIE_3A_NOCSR_COM_PHY_BCR] = { 0x7b0ac },
+ [GCC_PCIE_3A_PHY_BCR] = { 0x6c000 },
+ [GCC_PCIE_3A_PHY_NOCSR_COM_PHY_BCR] = { 0x7b0b0 },
+ [GCC_PCIE_3B_BCR] = { 0x94000 },
+ [GCC_PCIE_3B_LINK_DOWN_BCR] = { 0x7a0c0 },
+ [GCC_PCIE_3B_NOCSR_COM_PHY_BCR] = { 0x7a0cc },
+ [GCC_PCIE_3B_PHY_BCR] = { 0x75000 },
+ [GCC_PCIE_3B_PHY_NOCSR_COM_PHY_BCR] = { 0x7a0c8 },
+ [GCC_PCIE_4_BCR] = { 0x88000 },
+ [GCC_PCIE_4_LINK_DOWN_BCR] = { 0x980c0 },
+ [GCC_PCIE_4_NOCSR_COM_PHY_BCR] = { 0x980cc },
+ [GCC_PCIE_4_PHY_BCR] = { 0xd3000 },
+ [GCC_PCIE_4_PHY_NOCSR_COM_PHY_BCR] = { 0x980d0 },
+ [GCC_PCIE_5_BCR] = { 0xc3000 },
+ [GCC_PCIE_5_LINK_DOWN_BCR] = { 0x850c0 },
+ [GCC_PCIE_5_NOCSR_COM_PHY_BCR] = { 0x850cc },
+ [GCC_PCIE_5_PHY_BCR] = { 0xd2000 },
+ [GCC_PCIE_5_PHY_NOCSR_COM_PHY_BCR] = { 0x850d0 },
+ [GCC_PCIE_6_BCR] = { 0x8a000 },
+ [GCC_PCIE_6_LINK_DOWN_BCR] = { 0x3a0b0 },
+ [GCC_PCIE_6_NOCSR_COM_PHY_BCR] = { 0x3a0bc },
+ [GCC_PCIE_6_PHY_BCR] = { 0xd4000 },
+ [GCC_PCIE_6_PHY_NOCSR_COM_PHY_BCR] = { 0x3a0c0 },
+ [GCC_PCIE_NOC_BCR] = { 0xba294 },
+ [GCC_PCIE_PHY_BCR] = { 0x6f000 },
+ [GCC_PCIE_PHY_CFG_AHB_BCR] = { 0x7f00c },
+ [GCC_PCIE_PHY_COM_BCR] = { 0x7f010 },
+ [GCC_PCIE_RSCC_BCR] = { 0xb8000 },
+ [GCC_PDM_BCR] = { 0x33000 },
+ [GCC_QUPV3_WRAPPER_0_BCR] = { 0x28000 },
+ [GCC_QUPV3_WRAPPER_1_BCR] = { 0xb3000 },
+ [GCC_QUPV3_WRAPPER_2_BCR] = { 0xb4000 },
+ [GCC_QUPV3_WRAPPER_OOB_BCR] = { 0xe7000 },
+ [GCC_QUSB2PHY_HS0_MP_BCR] = { 0xca000 },
+ [GCC_QUSB2PHY_HS1_MP_BCR] = { 0xe6000 },
+ [GCC_QUSB2PHY_PRIM_BCR] = { 0xad024 },
+ [GCC_QUSB2PHY_SEC_BCR] = { 0xae000 },
+ [GCC_QUSB2PHY_TERT_BCR] = { 0xc9000 },
+ [GCC_QUSB2PHY_USB20_HS_BCR] = { 0xe9000 },
+ [GCC_SDCC2_BCR] = { 0xb0000 },
+ [GCC_SDCC4_BCR] = { 0xdf000 },
+ [GCC_TCSR_PCIE_BCR] = { 0x281e4 },
+ [GCC_UFS_PHY_BCR] = { 0x77004 },
+ [GCC_USB20_PRIM_BCR] = { 0xbc000 },
+ [GCC_USB30_MP_BCR] = { 0x9a00c },
+ [GCC_USB30_PRIM_BCR] = { 0x3f018 },
+ [GCC_USB30_SEC_BCR] = { 0xe200c },
+ [GCC_USB30_TERT_BCR] = { 0xe100c },
+ [GCC_USB3_MP_SS0_PHY_BCR] = { 0x54008 },
+ [GCC_USB3_MP_SS1_PHY_BCR] = { 0x54028 },
+ [GCC_USB3_PHY_PRIM_BCR] = { 0xdb000 },
+ [GCC_USB3_PHY_SEC_BCR] = { 0x2c000 },
+ [GCC_USB3_PHY_TERT_BCR] = { 0xbe000 },
+ [GCC_USB3_UNIPHY_MP0_BCR] = { 0x54000 },
+ [GCC_USB3_UNIPHY_MP1_BCR] = { 0x54020 },
+ [GCC_USB3PHY_PHY_PRIM_BCR] = { 0xdb004 },
+ [GCC_USB3PHY_PHY_SEC_BCR] = { 0x2c004 },
+ [GCC_USB3PHY_PHY_TERT_BCR] = { 0xbe004 },
+ [GCC_USB3UNIPHY_PHY_MP0_BCR] = { 0x54004 },
+ [GCC_USB3UNIPHY_PHY_MP1_BCR] = { 0x54024 },
+ [GCC_USB4_0_BCR] = { 0x2b004 },
+ [GCC_USB4_0_DP0_PHY_PRIM_BCR] = { 0xdb010 },
+ [GCC_USB4_1_BCR] = { 0x2d004 },
+ [GCC_USB4_2_BCR] = { 0xe0004 },
+ [GCC_USB_0_PHY_BCR] = { 0xdb020 },
+ [GCC_USB_1_PHY_BCR] = { 0x2c020 },
+ [GCC_USB_2_PHY_BCR] = { 0xbe020 },
+ [GCC_VIDEO_AXI0_CLK_ARES] = { 0x3201c, 2 },
+ [GCC_VIDEO_AXI1_CLK_ARES] = { 0x32044, 2 },
+ [GCC_VIDEO_BCR] = { 0x32000 },
+};
+
+static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = {
+ DEFINE_RCG_DFS(gcc_qupv3_oob_qspi_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_oob_qspi_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_qspi_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_qspi_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_qspi_s6_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s7_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_qspi_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_qspi_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_qspi_s6_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s7_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_qspi_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_qspi_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_qspi_s6_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s7_clk_src),
+};
+
+static u32 gcc_glymur_critical_cbcrs[] = {
+ 0x26004, /* GCC_CAMERA_AHB_CLK */
+ 0x26040, /* GCC_CAMERA_XO_CLK */
+ 0x27004, /* GCC_DISP_AHB_CLK */
+ 0x71004, /* GCC_GPU_CFG_AHB_CLK */
+ 0x32004, /* GCC_VIDEO_AHB_CLK */
+ 0x32058, /* GCC_VIDEO_XO_CLK */
+};
+
+static const struct regmap_config gcc_glymur_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x1f8ff0,
+ .fast_io = true,
+};
+
+static void clk_glymur_regs_configure(struct device *dev, struct regmap *regmap)
+{
+ /* FORCE_MEM_CORE_ON for ufs phy ice core clocks */
+ qcom_branch_set_force_mem_core(regmap, gcc_ufs_phy_ice_core_clk, true);
+}
+
+static struct qcom_cc_driver_data gcc_glymur_driver_data = {
+ .clk_cbcrs = gcc_glymur_critical_cbcrs,
+ .num_clk_cbcrs = ARRAY_SIZE(gcc_glymur_critical_cbcrs),
+ .dfs_rcgs = gcc_dfs_clocks,
+ .num_dfs_rcgs = ARRAY_SIZE(gcc_dfs_clocks),
+ .clk_regs_configure = clk_glymur_regs_configure,
+};
+
+static const struct qcom_cc_desc gcc_glymur_desc = {
+ .config = &gcc_glymur_regmap_config,
+ .clks = gcc_glymur_clocks,
+ .num_clks = ARRAY_SIZE(gcc_glymur_clocks),
+ .resets = gcc_glymur_resets,
+ .num_resets = ARRAY_SIZE(gcc_glymur_resets),
+ .gdscs = gcc_glymur_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_glymur_gdscs),
+ .driver_data = &gcc_glymur_driver_data,
+};
+
+static const struct of_device_id gcc_glymur_match_table[] = {
+ { .compatible = "qcom,glymur-gcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_glymur_match_table);
+
+static int gcc_glymur_probe(struct platform_device *pdev)
+{
+ return qcom_cc_probe(pdev, &gcc_glymur_desc);
+}
+
+static struct platform_driver gcc_glymur_driver = {
+ .probe = gcc_glymur_probe,
+ .driver = {
+ .name = "gcc-glymur",
+ .of_match_table = gcc_glymur_match_table,
+ },
+};
+
+static int __init gcc_glymur_init(void)
+{
+ return platform_driver_register(&gcc_glymur_driver);
+}
+subsys_initcall(gcc_glymur_init);
+
+static void __exit gcc_glymur_exit(void)
+{
+ platform_driver_unregister(&gcc_glymur_driver);
+}
+module_exit(gcc_glymur_exit);
+
+MODULE_DESCRIPTION("QTI GCC GLYMUR Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/gcc-ipq6018.c b/drivers/clk/qcom/gcc-ipq6018.c
index d861191b0c85..d4fc491a18b2 100644
--- a/drivers/clk/qcom/gcc-ipq6018.c
+++ b/drivers/clk/qcom/gcc-ipq6018.c
@@ -511,15 +511,23 @@ static struct clk_rcg2 apss_ahb_clk_src = {
},
};
-static const struct freq_tbl ftbl_nss_port5_rx_clk_src[] = {
- F(24000000, P_XO, 1, 0, 0),
- F(25000000, P_UNIPHY1_RX, 12.5, 0, 0),
- F(25000000, P_UNIPHY0_RX, 5, 0, 0),
- F(78125000, P_UNIPHY1_RX, 4, 0, 0),
- F(125000000, P_UNIPHY1_RX, 2.5, 0, 0),
- F(125000000, P_UNIPHY0_RX, 1, 0, 0),
- F(156250000, P_UNIPHY1_RX, 2, 0, 0),
- F(312500000, P_UNIPHY1_RX, 1, 0, 0),
+static const struct freq_conf ftbl_nss_port5_rx_clk_src_25[] = {
+ C(P_UNIPHY1_RX, 12.5, 0, 0),
+ C(P_UNIPHY0_RX, 5, 0, 0),
+};
+
+static const struct freq_conf ftbl_nss_port5_rx_clk_src_125[] = {
+ C(P_UNIPHY1_RX, 2.5, 0, 0),
+ C(P_UNIPHY0_RX, 1, 0, 0),
+};
+
+static const struct freq_multi_tbl ftbl_nss_port5_rx_clk_src[] = {
+ FMS(24000000, P_XO, 1, 0, 0),
+ FM(25000000, ftbl_nss_port5_rx_clk_src_25),
+ FMS(78125000, P_UNIPHY1_RX, 4, 0, 0),
+ FM(125000000, ftbl_nss_port5_rx_clk_src_125),
+ FMS(156250000, P_UNIPHY1_RX, 2, 0, 0),
+ FMS(312500000, P_UNIPHY1_RX, 1, 0, 0),
{ }
};
@@ -547,26 +555,34 @@ gcc_xo_uniphy0_rx_tx_uniphy1_rx_tx_ubi32_bias_map[] = {
static struct clk_rcg2 nss_port5_rx_clk_src = {
.cmd_rcgr = 0x68060,
- .freq_tbl = ftbl_nss_port5_rx_clk_src,
+ .freq_multi_tbl = ftbl_nss_port5_rx_clk_src,
.hid_width = 5,
.parent_map = gcc_xo_uniphy0_rx_tx_uniphy1_rx_tx_ubi32_bias_map,
.clkr.hw.init = &(struct clk_init_data){
.name = "nss_port5_rx_clk_src",
.parent_data = gcc_xo_uniphy0_rx_tx_uniphy1_rx_tx_ubi32_bias,
.num_parents = 7,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_fm_ops,
},
};
-static const struct freq_tbl ftbl_nss_port5_tx_clk_src[] = {
- F(24000000, P_XO, 1, 0, 0),
- F(25000000, P_UNIPHY1_TX, 12.5, 0, 0),
- F(25000000, P_UNIPHY0_TX, 5, 0, 0),
- F(78125000, P_UNIPHY1_TX, 4, 0, 0),
- F(125000000, P_UNIPHY1_TX, 2.5, 0, 0),
- F(125000000, P_UNIPHY0_TX, 1, 0, 0),
- F(156250000, P_UNIPHY1_TX, 2, 0, 0),
- F(312500000, P_UNIPHY1_TX, 1, 0, 0),
+static const struct freq_conf ftbl_nss_port5_tx_clk_src_25[] = {
+ C(P_UNIPHY1_TX, 12.5, 0, 0),
+ C(P_UNIPHY0_TX, 5, 0, 0),
+};
+
+static const struct freq_conf ftbl_nss_port5_tx_clk_src_125[] = {
+ C(P_UNIPHY1_TX, 2.5, 0, 0),
+ C(P_UNIPHY0_TX, 1, 0, 0),
+};
+
+static const struct freq_multi_tbl ftbl_nss_port5_tx_clk_src[] = {
+ FMS(24000000, P_XO, 1, 0, 0),
+ FM(25000000, ftbl_nss_port5_tx_clk_src_25),
+ FMS(78125000, P_UNIPHY1_TX, 4, 0, 0),
+ FM(125000000, ftbl_nss_port5_tx_clk_src_125),
+ FMS(156250000, P_UNIPHY1_TX, 2, 0, 0),
+ FMS(312500000, P_UNIPHY1_TX, 1, 0, 0),
{ }
};
@@ -594,14 +610,14 @@ gcc_xo_uniphy0_tx_rx_uniphy1_tx_rx_ubi32_bias_map[] = {
static struct clk_rcg2 nss_port5_tx_clk_src = {
.cmd_rcgr = 0x68068,
- .freq_tbl = ftbl_nss_port5_tx_clk_src,
+ .freq_multi_tbl = ftbl_nss_port5_tx_clk_src,
.hid_width = 5,
.parent_map = gcc_xo_uniphy0_tx_rx_uniphy1_tx_rx_ubi32_bias_map,
.clkr.hw.init = &(struct clk_init_data){
.name = "nss_port5_tx_clk_src",
.parent_data = gcc_xo_uniphy0_tx_rx_uniphy1_tx_rx_ubi32_bias,
.num_parents = 7,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_fm_ops,
},
};
diff --git a/drivers/clk/qcom/gcc-msm8917.c b/drivers/clk/qcom/gcc-msm8917.c
index 3e2a2ae2ee6e..0a1aa623cd49 100644
--- a/drivers/clk/qcom/gcc-msm8917.c
+++ b/drivers/clk/qcom/gcc-msm8917.c
@@ -37,6 +37,8 @@ enum {
DT_SLEEP_CLK,
DT_DSI0PLL,
DT_DSI0PLL_BYTE,
+ DT_DSI1PLL,
+ DT_DSI1PLL_BYTE,
};
enum {
@@ -48,6 +50,8 @@ enum {
P_GPLL6,
P_DSI0PLL,
P_DSI0PLL_BYTE,
+ P_DSI1PLL,
+ P_DSI1PLL_BYTE,
};
static struct clk_alpha_pll gpll0_sleep_clk_src = {
@@ -102,7 +106,11 @@ static const struct pll_vco gpll3_p_vco[] = {
{ 700000000, 1400000000, 0 },
};
-static const struct alpha_pll_config gpll3_early_config = {
+static const struct pll_vco gpll3_p_vco_msm8937[] = {
+ { 525000000, 1066000000, 0 },
+};
+
+static struct alpha_pll_config gpll3_early_config = {
.l = 63,
.config_ctl_val = 0x4001055b,
.early_output_mask = 0,
@@ -273,6 +281,19 @@ static const struct freq_tbl ftbl_blsp_i2c_apps_clk_src[] = {
{ }
};
+static struct clk_rcg2 blsp1_qup1_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x0200c,
+ .hid_width = 5,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup1_i2c_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
static struct clk_rcg2 blsp1_qup2_i2c_apps_clk_src = {
.cmd_rcgr = 0x03000,
.hid_width = 5,
@@ -351,6 +372,19 @@ static struct clk_rcg2 blsp2_qup3_i2c_apps_clk_src = {
}
};
+static struct clk_rcg2 blsp2_qup4_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x18000,
+ .hid_width = 5,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup4_i2c_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
static const struct freq_tbl ftbl_blsp_spi_apps_clk_src[] = {
F(960000, P_XO, 10, 1, 2),
F(4800000, P_XO, 4, 0, 0),
@@ -362,6 +396,20 @@ static const struct freq_tbl ftbl_blsp_spi_apps_clk_src[] = {
{ }
};
+static struct clk_rcg2 blsp1_qup1_spi_apps_clk_src = {
+ .cmd_rcgr = 0x02024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .freq_tbl = ftbl_blsp_spi_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup1_spi_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
static struct clk_rcg2 blsp1_qup2_spi_apps_clk_src = {
.cmd_rcgr = 0x03014,
.hid_width = 5,
@@ -446,6 +494,20 @@ static struct clk_rcg2 blsp2_qup3_spi_apps_clk_src = {
}
};
+static struct clk_rcg2 blsp2_qup4_spi_apps_clk_src = {
+ .cmd_rcgr = 0x18024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .freq_tbl = ftbl_blsp_spi_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup4_spi_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
static const struct freq_tbl ftbl_blsp_uart_apps_clk_src[] = {
F(3686400, P_GPLL0, 1, 72, 15625),
F(7372800, P_GPLL0, 1, 144, 15625),
@@ -525,11 +587,19 @@ static struct clk_rcg2 blsp2_uart2_apps_clk_src = {
static const struct parent_map gcc_byte0_map[] = {
{ P_XO, 0 },
{ P_DSI0PLL_BYTE, 1 },
+ { P_DSI1PLL_BYTE, 3 },
+};
+
+static const struct parent_map gcc_byte1_map[] = {
+ { P_XO, 0 },
+ { P_DSI0PLL_BYTE, 3 },
+ { P_DSI1PLL_BYTE, 1 },
};
static const struct clk_parent_data gcc_byte_data[] = {
{ .index = DT_XO },
{ .index = DT_DSI0PLL_BYTE },
+ { .index = DT_DSI1PLL_BYTE },
};
static struct clk_rcg2 byte0_clk_src = {
@@ -545,6 +615,19 @@ static struct clk_rcg2 byte0_clk_src = {
}
};
+static struct clk_rcg2 byte1_clk_src = {
+ .cmd_rcgr = 0x4d0b0,
+ .hid_width = 5,
+ .parent_map = gcc_byte1_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "byte1_clk_src",
+ .parent_data = gcc_byte_data,
+ .num_parents = ARRAY_SIZE(gcc_byte_data),
+ .ops = &clk_byte2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
static const struct freq_tbl ftbl_camss_gp_clk_src[] = {
F(100000000, P_GPLL0, 8, 0, 0),
F(160000000, P_GPLL0, 5, 0, 0),
@@ -642,6 +725,17 @@ static const struct freq_tbl ftbl_cpp_clk_src[] = {
{ }
};
+static const struct freq_tbl ftbl_cpp_clk_src_msm8937[] = {
+ F(133330000, P_GPLL0, 6, 0, 0),
+ F(160000000, P_GPLL0, 5, 0, 0),
+ F(200000000, P_GPLL0, 5, 0, 0),
+ F(266666667, P_GPLL0, 3, 0, 0),
+ F(308570000, P_GPLL6, 3.5, 0, 0),
+ F(320000000, P_GPLL0, 2.5, 0, 0),
+ F(360000000, P_GPLL6, 3, 0, 0),
+ { }
+};
+
static struct clk_rcg2 cpp_clk_src = {
.cmd_rcgr = 0x58018,
.hid_width = 5,
@@ -655,6 +749,13 @@ static struct clk_rcg2 cpp_clk_src = {
}
};
+static struct clk_init_data vcodec0_clk_src_init_msm8937 = {
+ .name = "vcodec0_clk_src",
+ .parent_data = gcc_cpp_data,
+ .num_parents = ARRAY_SIZE(gcc_cpp_data),
+ .ops = &clk_rcg2_ops,
+};
+
static const struct freq_tbl ftbl_crypto_clk_src[] = {
F(50000000, P_GPLL0, 16, 0, 0),
F(80000000, P_GPLL0, 10, 0, 0),
@@ -730,6 +831,13 @@ static const struct freq_tbl ftbl_csi_phytimer_clk_src[] = {
{ }
};
+static const struct freq_tbl ftbl_csi_phytimer_clk_src_msm8937[] = {
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(160000000, P_GPLL0, 5, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ { }
+};
+
static struct clk_rcg2 csi0phytimer_clk_src = {
.cmd_rcgr = 0x4e000,
.hid_width = 5,
@@ -774,6 +882,19 @@ static struct clk_rcg2 esc0_clk_src = {
}
};
+static struct clk_rcg2 esc1_clk_src = {
+ .cmd_rcgr = 0x4d0a8,
+ .hid_width = 5,
+ .freq_tbl = ftbl_esc0_1_clk_src,
+ .parent_map = gcc_xo_gpll0_out_aux_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "esc1_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
static const struct parent_map gcc_gfx3d_map[] = {
{ P_XO, 0 },
{ P_GPLL0, 1 },
@@ -817,6 +938,25 @@ static const struct freq_tbl ftbl_gfx3d_clk_src[] = {
{ }
};
+static const struct freq_tbl ftbl_gfx3d_clk_src_msm8937[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(50000000, P_GPLL0, 16, 0, 0),
+ F(80000000, P_GPLL0, 10, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(160000000, P_GPLL0, 5, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ F(216000000, P_GPLL6, 5, 0, 0),
+ F(228570000, P_GPLL0, 3.5, 0, 0),
+ F(240000000, P_GPLL6, 4.5, 0, 0),
+ F(266670000, P_GPLL0, 3, 0, 0),
+ F(300000000, P_GPLL3, 1, 0, 0),
+ F(320000000, P_GPLL0, 2.5, 0, 0),
+ F(375000000, P_GPLL3, 1, 0, 0),
+ F(400000000, P_GPLL0, 2, 0, 0),
+ F(450000000, P_GPLL3, 1, 0, 0),
+ { }
+};
+
static struct clk_rcg2 gfx3d_clk_src = {
.cmd_rcgr = 0x59000,
.hid_width = 5,
@@ -973,21 +1113,29 @@ static struct clk_rcg2 mdp_clk_src = {
}
};
-static const struct parent_map gcc_pclk_map[] = {
+static const struct parent_map gcc_pclk0_map[] = {
{ P_XO, 0 },
{ P_DSI0PLL, 1 },
+ { P_DSI1PLL, 3 },
+};
+
+static const struct parent_map gcc_pclk1_map[] = {
+ { P_XO, 0 },
+ { P_DSI0PLL, 3 },
+ { P_DSI1PLL, 1 },
};
static const struct clk_parent_data gcc_pclk_data[] = {
{ .index = DT_XO },
{ .index = DT_DSI0PLL },
+ { .index = DT_DSI1PLL },
};
static struct clk_rcg2 pclk0_clk_src = {
.cmd_rcgr = 0x4d000,
.hid_width = 5,
.mnd_width = 8,
- .parent_map = gcc_pclk_map,
+ .parent_map = gcc_pclk0_map,
.clkr.hw.init = &(struct clk_init_data) {
.name = "pclk0_clk_src",
.parent_data = gcc_pclk_data,
@@ -997,6 +1145,20 @@ static struct clk_rcg2 pclk0_clk_src = {
}
};
+static struct clk_rcg2 pclk1_clk_src = {
+ .cmd_rcgr = 0x4d0b8,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .parent_map = gcc_pclk1_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pclk1_clk_src",
+ .parent_data = gcc_pclk_data,
+ .num_parents = ARRAY_SIZE(gcc_pclk_data),
+ .ops = &clk_pixel_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
static const struct freq_tbl ftbl_pdm2_clk_src[] = {
F(64000000, P_GPLL0, 12.5, 0, 0),
{ }
@@ -1108,6 +1270,14 @@ static const struct freq_tbl ftbl_usb_hs_system_clk_src[] = {
{ }
};
+static const struct freq_tbl ftbl_usb_hs_system_clk_src_msm8937[] = {
+ F(57142857, P_GPLL0, 14, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(133333333, P_GPLL0, 6, 0, 0),
+ F(177777778, P_GPLL0, 4.5, 0, 0),
+ { }
+};
+
static struct clk_rcg2 usb_hs_system_clk_src = {
.cmd_rcgr = 0x41010,
.hid_width = 5,
@@ -1132,6 +1302,15 @@ static const struct freq_tbl ftbl_vcodec0_clk_src[] = {
{ }
};
+static const struct freq_tbl ftbl_vcodec0_clk_src_msm8937[] = {
+ F(166150000, P_GPLL6, 6.5, 0, 0),
+ F(240000000, P_GPLL6, 4.5, 0, 0),
+ F(308571428, P_GPLL6, 3.5, 0, 0),
+ F(320000000, P_GPLL0, 2.5, 0, 0),
+ F(360000000, P_GPLL6, 3, 0, 0),
+ { }
+};
+
static struct clk_rcg2 vcodec0_clk_src = {
.cmd_rcgr = 0x4c000,
.hid_width = 5,
@@ -1160,6 +1339,23 @@ static const struct freq_tbl ftbl_vfe_clk_src[] = {
{ }
};
+static const struct freq_tbl ftbl_vfe_clk_src_msm8937[] = {
+ F(50000000, P_GPLL0, 16, 0, 0),
+ F(80000000, P_GPLL0, 10, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(133333333, P_GPLL0, 6, 0, 0),
+ F(160000000, P_GPLL0, 5, 0, 0),
+ F(177777778, P_GPLL0, 4.5, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ F(266666667, P_GPLL0, 3, 0, 0),
+ F(308571428, P_GPLL6, 3.5, 0, 0),
+ F(320000000, P_GPLL0, 2.5, 0, 0),
+ F(360000000, P_GPLL6, 3, 0, 0),
+ F(400000000, P_GPLL0, 2, 0, 0),
+ F(432000000, P_GPLL6, 2.5, 0, 0),
+ { }
+};
+
static struct clk_rcg2 vfe0_clk_src = {
.cmd_rcgr = 0x58000,
.hid_width = 5,
@@ -1269,6 +1465,24 @@ static struct clk_branch gcc_blsp2_ahb_clk = {
}
};
+static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = {
+ .halt_reg = 0x02008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x02008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup1_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup1_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = {
.halt_reg = 0x03010,
.halt_check = BRANCH_HALT,
@@ -1377,6 +1591,42 @@ static struct clk_branch gcc_blsp2_qup3_i2c_apps_clk = {
}
};
+static struct clk_branch gcc_blsp2_qup4_i2c_apps_clk = {
+ .halt_reg = 0x18020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x18020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup4_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp2_qup4_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = {
+ .halt_reg = 0x02004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x02004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup1_spi_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup1_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = {
.halt_reg = 0x0300c,
.halt_check = BRANCH_HALT,
@@ -1485,6 +1735,24 @@ static struct clk_branch gcc_blsp2_qup3_spi_apps_clk = {
}
};
+static struct clk_branch gcc_blsp2_qup4_spi_apps_clk = {
+ .halt_reg = 0x1801c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1801c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup4_spi_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp2_qup4_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
static struct clk_branch gcc_blsp1_uart1_apps_clk = {
.halt_reg = 0x0203c,
.halt_check = BRANCH_HALT,
@@ -2521,6 +2789,24 @@ static struct clk_branch gcc_mdss_byte0_clk = {
}
};
+static struct clk_branch gcc_mdss_byte1_clk = {
+ .halt_reg = 0x4d0a0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d0a0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mdss_byte1_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &byte1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
static struct clk_branch gcc_mdss_esc0_clk = {
.halt_reg = 0x4d098,
.halt_check = BRANCH_HALT,
@@ -2539,6 +2825,24 @@ static struct clk_branch gcc_mdss_esc0_clk = {
}
};
+static struct clk_branch gcc_mdss_esc1_clk = {
+ .halt_reg = 0x4d09c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d09c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mdss_esc1_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &esc1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
static struct clk_branch gcc_mdss_mdp_clk = {
.halt_reg = 0x4d088,
.halt_check = BRANCH_HALT,
@@ -2575,6 +2879,24 @@ static struct clk_branch gcc_mdss_pclk0_clk = {
}
};
+static struct clk_branch gcc_mdss_pclk1_clk = {
+ .halt_reg = 0x4d0a4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d0a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mdss_pclk1_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &pclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
static struct clk_branch gcc_mdss_vsync_clk = {
.halt_reg = 0x4d090,
.halt_check = BRANCH_HALT,
@@ -2632,6 +2954,24 @@ static struct clk_branch gcc_oxili_ahb_clk = {
}
};
+static struct clk_branch gcc_oxili_aon_clk = {
+ .halt_reg = 0x5904c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5904c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_oxili_aon_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gfx3d_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
static struct clk_branch gcc_oxili_gfx3d_clk = {
.halt_reg = 0x59020,
.halt_check = BRANCH_HALT,
@@ -2650,6 +2990,19 @@ static struct clk_branch gcc_oxili_gfx3d_clk = {
}
};
+static struct clk_branch gcc_oxili_timer_clk = {
+ .halt_reg = 0x59040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x59040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_oxili_timer_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_pdm2_clk = {
.halt_reg = 0x4400c,
.halt_check = BRANCH_HALT,
@@ -3027,6 +3380,28 @@ static struct gdsc oxili_gx_gdsc = {
.flags = CLAMP_IO,
};
+static struct gdsc oxili_gx_gdsc_msm8937 = {
+ .gdscr = 0x5901c,
+ .clamp_io_ctrl = 0x5b00c,
+ .cxcs = (unsigned int []){ 0x59000 },
+ .cxc_count = 1,
+ .pd = {
+ .name = "oxili_gx_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = CLAMP_IO,
+};
+
+static struct gdsc oxili_cx_gdsc = {
+ .gdscr = 0x59044,
+ .cxcs = (unsigned int []){ 0x59020 },
+ .cxc_count = 1,
+ .pd = {
+ .name = "oxili_cx_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
static struct gdsc cpp_gdsc = {
.gdscr = 0x58078,
.cxcs = (unsigned int []){ 0x5803c, 0x58064 },
@@ -3207,6 +3582,188 @@ static struct clk_regmap *gcc_msm8917_clocks[] = {
[GCC_VFE_TBU_CLK] = &gcc_vfe_tbu_clk.clkr,
};
+static struct clk_regmap *gcc_msm8937_clocks[] = {
+ [GPLL0] = &gpll0.clkr,
+ [GPLL0_EARLY] = &gpll0_early.clkr,
+ [GPLL0_SLEEP_CLK_SRC] = &gpll0_sleep_clk_src.clkr,
+ [GPLL3] = &gpll3.clkr,
+ [GPLL3_EARLY] = &gpll3_early.clkr,
+ [GPLL4] = &gpll4.clkr,
+ [GPLL4_EARLY] = &gpll4_early.clkr,
+ [GPLL6] = &gpll6,
+ [GPLL6_EARLY] = &gpll6_early.clkr,
+ [APSS_AHB_CLK_SRC] = &apss_ahb_clk_src.clkr,
+ [MSM8937_BLSP1_QUP1_I2C_APPS_CLK_SRC] = &blsp1_qup1_i2c_apps_clk_src.clkr,
+ [MSM8937_BLSP1_QUP1_SPI_APPS_CLK_SRC] = &blsp1_qup1_spi_apps_clk_src.clkr,
+ [BLSP1_QUP2_I2C_APPS_CLK_SRC] = &blsp1_qup2_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP2_SPI_APPS_CLK_SRC] = &blsp1_qup2_spi_apps_clk_src.clkr,
+ [BLSP1_QUP3_I2C_APPS_CLK_SRC] = &blsp1_qup3_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP3_SPI_APPS_CLK_SRC] = &blsp1_qup3_spi_apps_clk_src.clkr,
+ [BLSP1_QUP4_I2C_APPS_CLK_SRC] = &blsp1_qup4_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP4_SPI_APPS_CLK_SRC] = &blsp1_qup4_spi_apps_clk_src.clkr,
+ [BLSP1_UART1_APPS_CLK_SRC] = &blsp1_uart1_apps_clk_src.clkr,
+ [BLSP1_UART2_APPS_CLK_SRC] = &blsp1_uart2_apps_clk_src.clkr,
+ [BLSP2_QUP1_I2C_APPS_CLK_SRC] = &blsp2_qup1_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP1_SPI_APPS_CLK_SRC] = &blsp2_qup1_spi_apps_clk_src.clkr,
+ [BLSP2_QUP2_I2C_APPS_CLK_SRC] = &blsp2_qup2_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP2_SPI_APPS_CLK_SRC] = &blsp2_qup2_spi_apps_clk_src.clkr,
+ [BLSP2_QUP3_I2C_APPS_CLK_SRC] = &blsp2_qup3_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP3_SPI_APPS_CLK_SRC] = &blsp2_qup3_spi_apps_clk_src.clkr,
+ [MSM8937_BLSP2_QUP4_I2C_APPS_CLK_SRC] = &blsp2_qup4_i2c_apps_clk_src.clkr,
+ [MSM8937_BLSP2_QUP4_SPI_APPS_CLK_SRC] = &blsp2_qup4_spi_apps_clk_src.clkr,
+ [BLSP2_UART1_APPS_CLK_SRC] = &blsp2_uart1_apps_clk_src.clkr,
+ [BLSP2_UART2_APPS_CLK_SRC] = &blsp2_uart2_apps_clk_src.clkr,
+ [BYTE0_CLK_SRC] = &byte0_clk_src.clkr,
+ [MSM8937_BYTE1_CLK_SRC] = &byte1_clk_src.clkr,
+ [CAMSS_GP0_CLK_SRC] = &camss_gp0_clk_src.clkr,
+ [CAMSS_GP1_CLK_SRC] = &camss_gp1_clk_src.clkr,
+ [CAMSS_TOP_AHB_CLK_SRC] = &camss_top_ahb_clk_src.clkr,
+ [CCI_CLK_SRC] = &cci_clk_src.clkr,
+ [CPP_CLK_SRC] = &cpp_clk_src.clkr,
+ [CRYPTO_CLK_SRC] = &crypto_clk_src.clkr,
+ [CSI0PHYTIMER_CLK_SRC] = &csi0phytimer_clk_src.clkr,
+ [CSI0_CLK_SRC] = &csi0_clk_src.clkr,
+ [CSI1PHYTIMER_CLK_SRC] = &csi1phytimer_clk_src.clkr,
+ [CSI1_CLK_SRC] = &csi1_clk_src.clkr,
+ [CSI2_CLK_SRC] = &csi2_clk_src.clkr,
+ [ESC0_CLK_SRC] = &esc0_clk_src.clkr,
+ [MSM8937_ESC1_CLK_SRC] = &esc1_clk_src.clkr,
+ [GFX3D_CLK_SRC] = &gfx3d_clk_src.clkr,
+ [GP1_CLK_SRC] = &gp1_clk_src.clkr,
+ [GP2_CLK_SRC] = &gp2_clk_src.clkr,
+ [GP3_CLK_SRC] = &gp3_clk_src.clkr,
+ [JPEG0_CLK_SRC] = &jpeg0_clk_src.clkr,
+ [MCLK0_CLK_SRC] = &mclk0_clk_src.clkr,
+ [MCLK1_CLK_SRC] = &mclk1_clk_src.clkr,
+ [MCLK2_CLK_SRC] = &mclk2_clk_src.clkr,
+ [MDP_CLK_SRC] = &mdp_clk_src.clkr,
+ [PCLK0_CLK_SRC] = &pclk0_clk_src.clkr,
+ [MSM8937_PCLK1_CLK_SRC] = &pclk1_clk_src.clkr,
+ [PDM2_CLK_SRC] = &pdm2_clk_src.clkr,
+ [SDCC1_APPS_CLK_SRC] = &sdcc1_apps_clk_src.clkr,
+ [SDCC1_ICE_CORE_CLK_SRC] = &sdcc1_ice_core_clk_src.clkr,
+ [SDCC2_APPS_CLK_SRC] = &sdcc2_apps_clk_src.clkr,
+ [USB_HS_SYSTEM_CLK_SRC] = &usb_hs_system_clk_src.clkr,
+ [VCODEC0_CLK_SRC] = &vcodec0_clk_src.clkr,
+ [VFE0_CLK_SRC] = &vfe0_clk_src.clkr,
+ [VFE1_CLK_SRC] = &vfe1_clk_src.clkr,
+ [VSYNC_CLK_SRC] = &vsync_clk_src.clkr,
+ [GCC_APSS_TCU_CLK] = &gcc_apss_tcu_clk.clkr,
+ [GCC_BIMC_GFX_CLK] = &gcc_bimc_gfx_clk.clkr,
+ [GCC_BIMC_GPU_CLK] = &gcc_bimc_gpu_clk.clkr,
+ [GCC_BLSP1_AHB_CLK] = &gcc_blsp1_ahb_clk.clkr,
+ [MSM8937_GCC_BLSP1_QUP1_I2C_APPS_CLK] = &gcc_blsp1_qup1_i2c_apps_clk.clkr,
+ [MSM8937_GCC_BLSP1_QUP1_SPI_APPS_CLK] = &gcc_blsp1_qup1_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_I2C_APPS_CLK] = &gcc_blsp1_qup2_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_SPI_APPS_CLK] = &gcc_blsp1_qup2_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_I2C_APPS_CLK] = &gcc_blsp1_qup3_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_SPI_APPS_CLK] = &gcc_blsp1_qup3_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_I2C_APPS_CLK] = &gcc_blsp1_qup4_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_SPI_APPS_CLK] = &gcc_blsp1_qup4_spi_apps_clk.clkr,
+ [GCC_BLSP1_UART1_APPS_CLK] = &gcc_blsp1_uart1_apps_clk.clkr,
+ [GCC_BLSP1_UART2_APPS_CLK] = &gcc_blsp1_uart2_apps_clk.clkr,
+ [GCC_BLSP2_AHB_CLK] = &gcc_blsp2_ahb_clk.clkr,
+ [GCC_BLSP2_QUP1_I2C_APPS_CLK] = &gcc_blsp2_qup1_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP1_SPI_APPS_CLK] = &gcc_blsp2_qup1_spi_apps_clk.clkr,
+ [GCC_BLSP2_QUP2_I2C_APPS_CLK] = &gcc_blsp2_qup2_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP2_SPI_APPS_CLK] = &gcc_blsp2_qup2_spi_apps_clk.clkr,
+ [GCC_BLSP2_QUP3_I2C_APPS_CLK] = &gcc_blsp2_qup3_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP3_SPI_APPS_CLK] = &gcc_blsp2_qup3_spi_apps_clk.clkr,
+ [MSM8937_GCC_BLSP2_QUP4_I2C_APPS_CLK] = &gcc_blsp2_qup4_i2c_apps_clk.clkr,
+ [MSM8937_GCC_BLSP2_QUP4_SPI_APPS_CLK] = &gcc_blsp2_qup4_spi_apps_clk.clkr,
+ [GCC_BLSP2_UART1_APPS_CLK] = &gcc_blsp2_uart1_apps_clk.clkr,
+ [GCC_BLSP2_UART2_APPS_CLK] = &gcc_blsp2_uart2_apps_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CAMSS_AHB_CLK] = &gcc_camss_ahb_clk.clkr,
+ [GCC_CAMSS_CCI_AHB_CLK] = &gcc_camss_cci_ahb_clk.clkr,
+ [GCC_CAMSS_CCI_CLK] = &gcc_camss_cci_clk.clkr,
+ [GCC_CAMSS_CPP_AHB_CLK] = &gcc_camss_cpp_ahb_clk.clkr,
+ [GCC_CAMSS_CPP_CLK] = &gcc_camss_cpp_clk.clkr,
+ [GCC_CAMSS_CSI0PHYTIMER_CLK] = &gcc_camss_csi0phytimer_clk.clkr,
+ [GCC_CAMSS_CSI0PHY_CLK] = &gcc_camss_csi0phy_clk.clkr,
+ [GCC_CAMSS_CSI0PIX_CLK] = &gcc_camss_csi0pix_clk.clkr,
+ [GCC_CAMSS_CSI0RDI_CLK] = &gcc_camss_csi0rdi_clk.clkr,
+ [GCC_CAMSS_CSI0_AHB_CLK] = &gcc_camss_csi0_ahb_clk.clkr,
+ [GCC_CAMSS_CSI0_CLK] = &gcc_camss_csi0_clk.clkr,
+ [GCC_CAMSS_CSI1PHYTIMER_CLK] = &gcc_camss_csi1phytimer_clk.clkr,
+ [GCC_CAMSS_CSI1PHY_CLK] = &gcc_camss_csi1phy_clk.clkr,
+ [GCC_CAMSS_CSI1PIX_CLK] = &gcc_camss_csi1pix_clk.clkr,
+ [GCC_CAMSS_CSI1RDI_CLK] = &gcc_camss_csi1rdi_clk.clkr,
+ [GCC_CAMSS_CSI1_AHB_CLK] = &gcc_camss_csi1_ahb_clk.clkr,
+ [GCC_CAMSS_CSI1_CLK] = &gcc_camss_csi1_clk.clkr,
+ [GCC_CAMSS_CSI2PHY_CLK] = &gcc_camss_csi2phy_clk.clkr,
+ [GCC_CAMSS_CSI2PIX_CLK] = &gcc_camss_csi2pix_clk.clkr,
+ [GCC_CAMSS_CSI2RDI_CLK] = &gcc_camss_csi2rdi_clk.clkr,
+ [GCC_CAMSS_CSI2_AHB_CLK] = &gcc_camss_csi2_ahb_clk.clkr,
+ [GCC_CAMSS_CSI2_CLK] = &gcc_camss_csi2_clk.clkr,
+ [GCC_CAMSS_CSI_VFE0_CLK] = &gcc_camss_csi_vfe0_clk.clkr,
+ [GCC_CAMSS_CSI_VFE1_CLK] = &gcc_camss_csi_vfe1_clk.clkr,
+ [GCC_CAMSS_GP0_CLK] = &gcc_camss_gp0_clk.clkr,
+ [GCC_CAMSS_GP1_CLK] = &gcc_camss_gp1_clk.clkr,
+ [GCC_CAMSS_ISPIF_AHB_CLK] = &gcc_camss_ispif_ahb_clk.clkr,
+ [GCC_CAMSS_JPEG0_CLK] = &gcc_camss_jpeg0_clk.clkr,
+ [GCC_CAMSS_JPEG_AHB_CLK] = &gcc_camss_jpeg_ahb_clk.clkr,
+ [GCC_CAMSS_JPEG_AXI_CLK] = &gcc_camss_jpeg_axi_clk.clkr,
+ [GCC_CAMSS_MCLK0_CLK] = &gcc_camss_mclk0_clk.clkr,
+ [GCC_CAMSS_MCLK1_CLK] = &gcc_camss_mclk1_clk.clkr,
+ [GCC_CAMSS_MCLK2_CLK] = &gcc_camss_mclk2_clk.clkr,
+ [GCC_CAMSS_MICRO_AHB_CLK] = &gcc_camss_micro_ahb_clk.clkr,
+ [GCC_CAMSS_TOP_AHB_CLK] = &gcc_camss_top_ahb_clk.clkr,
+ [GCC_CAMSS_VFE0_AHB_CLK] = &gcc_camss_vfe0_ahb_clk.clkr,
+ [GCC_CAMSS_VFE0_AXI_CLK] = &gcc_camss_vfe0_axi_clk.clkr,
+ [GCC_CAMSS_VFE0_CLK] = &gcc_camss_vfe0_clk.clkr,
+ [GCC_CAMSS_VFE1_AHB_CLK] = &gcc_camss_vfe1_ahb_clk.clkr,
+ [GCC_CAMSS_VFE1_AXI_CLK] = &gcc_camss_vfe1_axi_clk.clkr,
+ [GCC_CAMSS_VFE1_CLK] = &gcc_camss_vfe1_clk.clkr,
+ [GCC_CPP_TBU_CLK] = &gcc_cpp_tbu_clk.clkr,
+ [GCC_CRYPTO_AHB_CLK] = &gcc_crypto_ahb_clk.clkr,
+ [GCC_CRYPTO_AXI_CLK] = &gcc_crypto_axi_clk.clkr,
+ [GCC_CRYPTO_CLK] = &gcc_crypto_clk.clkr,
+ [GCC_DCC_CLK] = &gcc_dcc_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_JPEG_TBU_CLK] = &gcc_jpeg_tbu_clk.clkr,
+ [GCC_MDP_TBU_CLK] = &gcc_mdp_tbu_clk.clkr,
+ [GCC_MDSS_AHB_CLK] = &gcc_mdss_ahb_clk.clkr,
+ [GCC_MDSS_AXI_CLK] = &gcc_mdss_axi_clk.clkr,
+ [GCC_MDSS_BYTE0_CLK] = &gcc_mdss_byte0_clk.clkr,
+ [MSM8937_GCC_MDSS_BYTE1_CLK] = &gcc_mdss_byte1_clk.clkr,
+ [GCC_MDSS_ESC0_CLK] = &gcc_mdss_esc0_clk.clkr,
+ [MSM8937_GCC_MDSS_ESC1_CLK] = &gcc_mdss_esc1_clk.clkr,
+ [GCC_MDSS_MDP_CLK] = &gcc_mdss_mdp_clk.clkr,
+ [GCC_MDSS_PCLK0_CLK] = &gcc_mdss_pclk0_clk.clkr,
+ [MSM8937_GCC_MDSS_PCLK1_CLK] = &gcc_mdss_pclk1_clk.clkr,
+ [GCC_MDSS_VSYNC_CLK] = &gcc_mdss_vsync_clk.clkr,
+ [GCC_MSS_CFG_AHB_CLK] = &gcc_mss_cfg_ahb_clk.clkr,
+ [GCC_MSS_Q6_BIMC_AXI_CLK] = &gcc_mss_q6_bimc_axi_clk.clkr,
+ [GCC_OXILI_AHB_CLK] = &gcc_oxili_ahb_clk.clkr,
+ [MSM8937_GCC_OXILI_AON_CLK] = &gcc_oxili_aon_clk.clkr,
+ [GCC_OXILI_GFX3D_CLK] = &gcc_oxili_gfx3d_clk.clkr,
+ [MSM8937_GCC_OXILI_TIMER_CLK] = &gcc_oxili_timer_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+ [GCC_QDSS_DAP_CLK] = &gcc_qdss_dap_clk.clkr,
+ [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+ [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK] = &gcc_sdcc1_ice_core_clk.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_SMMU_CFG_CLK] = &gcc_smmu_cfg_clk.clkr,
+ [GCC_USB2A_PHY_SLEEP_CLK] = &gcc_usb2a_phy_sleep_clk.clkr,
+ [GCC_USB_HS_AHB_CLK] = &gcc_usb_hs_ahb_clk.clkr,
+ [GCC_USB_HS_PHY_CFG_AHB_CLK] = &gcc_usb_hs_phy_cfg_ahb_clk.clkr,
+ [GCC_USB_HS_SYSTEM_CLK] = &gcc_usb_hs_system_clk.clkr,
+ [GCC_VENUS0_AHB_CLK] = &gcc_venus0_ahb_clk.clkr,
+ [GCC_VENUS0_AXI_CLK] = &gcc_venus0_axi_clk.clkr,
+ [GCC_VENUS0_CORE0_VCODEC0_CLK] = &gcc_venus0_core0_vcodec0_clk.clkr,
+ [GCC_VENUS0_VCODEC0_CLK] = &gcc_venus0_vcodec0_clk.clkr,
+ [GCC_VENUS_TBU_CLK] = &gcc_venus_tbu_clk.clkr,
+ [GCC_VFE1_TBU_CLK] = &gcc_vfe1_tbu_clk.clkr,
+ [GCC_VFE_TBU_CLK] = &gcc_vfe_tbu_clk.clkr,
+};
+
static const struct qcom_reset_map gcc_msm8917_resets[] = {
[GCC_CAMSS_MICRO_BCR] = { 0x56008 },
[GCC_MSS_BCR] = { 0x71000 },
@@ -3234,6 +3791,18 @@ static struct gdsc *gcc_msm8917_gdscs[] = {
[VFE1_GDSC] = &vfe1_gdsc,
};
+static struct gdsc *gcc_msm8937_gdscs[] = {
+ [CPP_GDSC] = &cpp_gdsc,
+ [JPEG_GDSC] = &jpeg_gdsc,
+ [MDSS_GDSC] = &mdss_gdsc,
+ [OXILI_GX_GDSC] = &oxili_gx_gdsc_msm8937,
+ [MSM8937_OXILI_CX_GDSC] = &oxili_cx_gdsc,
+ [VENUS_CORE0_GDSC] = &venus_core0_gdsc,
+ [VENUS_GDSC] = &venus_gdsc,
+ [VFE0_GDSC] = &vfe0_gdsc,
+ [VFE1_GDSC] = &vfe1_gdsc,
+};
+
static const struct qcom_cc_desc gcc_msm8917_desc = {
.config = &gcc_msm8917_regmap_config,
.clks = gcc_msm8917_clocks,
@@ -3254,6 +3823,41 @@ static const struct qcom_cc_desc gcc_qm215_desc = {
.num_gdscs = ARRAY_SIZE(gcc_msm8917_gdscs),
};
+static const struct qcom_cc_desc gcc_msm8937_desc = {
+ .config = &gcc_msm8917_regmap_config,
+ .clks = gcc_msm8937_clocks,
+ .num_clks = ARRAY_SIZE(gcc_msm8937_clocks),
+ .resets = gcc_msm8917_resets,
+ .num_resets = ARRAY_SIZE(gcc_msm8917_resets),
+ .gdscs = gcc_msm8937_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_msm8937_gdscs),
+};
+
+static void msm8937_clock_override(void)
+{
+ /* GPLL3 750MHz configuration */
+ gpll3_early_config.l = 47;
+ gpll3_early.vco_table = gpll3_p_vco_msm8937;
+ gpll3_early.num_vco = ARRAY_SIZE(gpll3_p_vco_msm8937);
+
+ /*
+ * Set below clocks for use specific msm8937 parent map.
+ */
+ vcodec0_clk_src.parent_map = gcc_cpp_map;
+ vcodec0_clk_src.clkr.hw.init = &vcodec0_clk_src_init_msm8937;
+
+ /*
+ * Set below clocks for use specific msm8937 freq table.
+ */
+ vfe0_clk_src.freq_tbl = ftbl_vfe_clk_src_msm8937;
+ vfe1_clk_src.freq_tbl = ftbl_vfe_clk_src_msm8937;
+ cpp_clk_src.freq_tbl = ftbl_cpp_clk_src_msm8937;
+ vcodec0_clk_src.freq_tbl = ftbl_vcodec0_clk_src_msm8937;
+ csi0phytimer_clk_src.freq_tbl = ftbl_csi_phytimer_clk_src_msm8937;
+ csi1phytimer_clk_src.freq_tbl = ftbl_csi_phytimer_clk_src_msm8937;
+ usb_hs_system_clk_src.freq_tbl = ftbl_usb_hs_system_clk_src_msm8937;
+}
+
static int gcc_msm8917_probe(struct platform_device *pdev)
{
struct regmap *regmap;
@@ -3261,8 +3865,12 @@ static int gcc_msm8917_probe(struct platform_device *pdev)
gcc_desc = of_device_get_match_data(&pdev->dev);
- if (gcc_desc == &gcc_qm215_desc)
+ if (gcc_desc == &gcc_qm215_desc) {
gfx3d_clk_src.parent_map = gcc_gfx3d_map_qm215;
+ } else if (gcc_desc == &gcc_msm8937_desc) {
+ msm8937_clock_override();
+ gfx3d_clk_src.freq_tbl = ftbl_gfx3d_clk_src_msm8937;
+ }
regmap = qcom_cc_map(pdev, gcc_desc);
if (IS_ERR(regmap))
@@ -3276,6 +3884,7 @@ static int gcc_msm8917_probe(struct platform_device *pdev)
static const struct of_device_id gcc_msm8917_match_table[] = {
{ .compatible = "qcom,gcc-msm8917", .data = &gcc_msm8917_desc },
{ .compatible = "qcom,gcc-qm215", .data = &gcc_qm215_desc },
+ { .compatible = "qcom,gcc-msm8937", .data = &gcc_msm8937_desc },
{},
};
MODULE_DEVICE_TABLE(of, gcc_msm8917_match_table);
diff --git a/drivers/clk/qcom/gcc-qcs404.c b/drivers/clk/qcom/gcc-qcs404.c
index 5ca003c9bfba..efc75a3814ab 100644
--- a/drivers/clk/qcom/gcc-qcs404.c
+++ b/drivers/clk/qcom/gcc-qcs404.c
@@ -2754,7 +2754,7 @@ static struct clk_regmap *gcc_qcs404_clocks[] = {
[GCC_DCC_CLK] = &gcc_dcc_clk.clkr,
[GCC_DCC_XO_CLK] = &gcc_dcc_xo_clk.clkr,
[GCC_WCSS_Q6_AHB_CLK] = &gcc_wdsp_q6ss_ahbs_clk.clkr,
- [GCC_WCSS_Q6_AXIM_CLK] = &gcc_wdsp_q6ss_axim_clk.clkr,
+ [GCC_WCSS_Q6_AXIM_CLK] = &gcc_wdsp_q6ss_axim_clk.clkr,
};
diff --git a/drivers/clk/qcom/gcc-sc8280xp.c b/drivers/clk/qcom/gcc-sc8280xp.c
index f27d0003f427..b683795475e3 100644
--- a/drivers/clk/qcom/gcc-sc8280xp.c
+++ b/drivers/clk/qcom/gcc-sc8280xp.c
@@ -6775,10 +6775,6 @@ static struct gdsc pcie_1_tunnel_gdsc = {
.flags = VOTABLE | RETAIN_FF_ENABLE,
};
-/*
- * The Qualcomm PCIe driver does not yet implement suspend so to keep the
- * PCIe power domains always-on for now.
- */
static struct gdsc pcie_2a_gdsc = {
.gdscr = 0x9d004,
.collapse_ctrl = 0x52128,
diff --git a/drivers/clk/qcom/gcc-sdm660.c b/drivers/clk/qcom/gcc-sdm660.c
index 01a76f1b5b4c..20253a06a583 100644
--- a/drivers/clk/qcom/gcc-sdm660.c
+++ b/drivers/clk/qcom/gcc-sdm660.c
@@ -2247,6 +2247,45 @@ static struct clk_branch gcc_usb_phy_cfg_ahb2phy_clk = {
},
};
+static struct clk_branch hlos1_vote_lpass_adsp_smmu_clk = {
+ .halt_reg = 0x7d014,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x7d014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "hlos1_vote_lpass_adsp_smmu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch hlos1_vote_turing_adsp_smmu_clk = {
+ .halt_reg = 0x7d048,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x7d048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "hlos1_vote_turing_adsp_smmu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch hlos2_vote_turing_adsp_smmu_clk = {
+ .halt_reg = 0x7e048,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x7e048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "hlos2_vote_turing_adsp_smmu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct gdsc ufs_gdsc = {
.gdscr = 0x75004,
.gds_hw_ctrl = 0x0,
@@ -2277,6 +2316,33 @@ static struct gdsc pcie_0_gdsc = {
.flags = VOTABLE,
};
+static struct gdsc hlos1_vote_turing_adsp_gdsc = {
+ .gdscr = 0x7d04c,
+ .pd = {
+ .name = "hlos1_vote_turing_adsp_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos2_vote_turing_adsp_gdsc = {
+ .gdscr = 0x7e04c,
+ .pd = {
+ .name = "hlos2_vote_turing_adsp_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_lpass_adsp_gdsc = {
+ .gdscr = 0x7d034,
+ .pd = {
+ .name = "hlos1_vote_lpass_adsp_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
static struct clk_hw *gcc_sdm660_hws[] = {
&xo.hw,
&gpll0_early_div.hw,
@@ -2409,12 +2475,18 @@ static struct clk_regmap *gcc_sdm660_clocks[] = {
[USB30_MASTER_CLK_SRC] = &usb30_master_clk_src.clkr,
[USB30_MOCK_UTMI_CLK_SRC] = &usb30_mock_utmi_clk_src.clkr,
[USB3_PHY_AUX_CLK_SRC] = &usb3_phy_aux_clk_src.clkr,
+ [GCC_HLOS1_VOTE_LPASS_ADSP_SMMU_CLK] = &hlos1_vote_lpass_adsp_smmu_clk.clkr,
+ [GCC_HLOS1_VOTE_TURING_ADSP_SMMU_CLK] = &hlos1_vote_turing_adsp_smmu_clk.clkr,
+ [GCC_HLOS2_VOTE_TURING_ADSP_SMMU_CLK] = &hlos2_vote_turing_adsp_smmu_clk.clkr,
};
static struct gdsc *gcc_sdm660_gdscs[] = {
[UFS_GDSC] = &ufs_gdsc,
[USB_30_GDSC] = &usb_30_gdsc,
[PCIE_0_GDSC] = &pcie_0_gdsc,
+ [HLOS1_VOTE_TURING_ADSP_GDSC] = &hlos1_vote_turing_adsp_gdsc,
+ [HLOS2_VOTE_TURING_ADSP_GDSC] = &hlos2_vote_turing_adsp_gdsc,
+ [HLOS1_VOTE_LPASS_ADSP_GDSC] = &hlos1_vote_lpass_adsp_gdsc,
};
static const struct qcom_reset_map gcc_sdm660_resets[] = {
diff --git a/drivers/clk/qcom/gpucc-sa8775p.c b/drivers/clk/qcom/gpucc-sa8775p.c
index 78cad622cb5a..25dcc5912f99 100644
--- a/drivers/clk/qcom/gpucc-sa8775p.c
+++ b/drivers/clk/qcom/gpucc-sa8775p.c
@@ -365,7 +365,7 @@ static struct clk_branch gpu_cc_cx_gmu_clk = {
&gpu_cc_gmu_clk_src.clkr.hw,
},
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_aon_ops,
},
},
@@ -414,7 +414,7 @@ static struct clk_branch gpu_cc_cxo_clk = {
&gpu_cc_xo_clk_src.clkr.hw,
},
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -499,7 +499,7 @@ static struct clk_branch gpu_cc_hub_cx_int_clk = {
&gpu_cc_hub_cx_int_div_clk_src.clkr.hw,
},
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_aon_ops,
},
},
diff --git a/drivers/clk/qcom/gpucc-sc7180.c b/drivers/clk/qcom/gpucc-sc7180.c
index a7bf44544b95..97287488e05a 100644
--- a/drivers/clk/qcom/gpucc-sc7180.c
+++ b/drivers/clk/qcom/gpucc-sc7180.c
@@ -42,7 +42,7 @@ static struct clk_alpha_pll gpu_cc_pll1 = {
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "gpu_cc_pll1",
- .parent_data = &(const struct clk_parent_data){
+ .parent_data = &(const struct clk_parent_data){
.fw_name = "bi_tcxo",
},
.num_parents = 1,
diff --git a/drivers/clk/qcom/gpucc-sm6350.c b/drivers/clk/qcom/gpucc-sm6350.c
index ee89c42413f8..efbee1518dd3 100644
--- a/drivers/clk/qcom/gpucc-sm6350.c
+++ b/drivers/clk/qcom/gpucc-sm6350.c
@@ -67,7 +67,7 @@ static struct clk_alpha_pll gpu_cc_pll0 = {
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "gpu_cc_pll0",
- .parent_data = &(const struct clk_parent_data){
+ .parent_data = &(const struct clk_parent_data){
.index = DT_BI_TCXO,
.fw_name = "bi_tcxo",
},
@@ -111,7 +111,7 @@ static struct clk_alpha_pll gpu_cc_pll1 = {
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "gpu_cc_pll1",
- .parent_data = &(const struct clk_parent_data){
+ .parent_data = &(const struct clk_parent_data){
.index = DT_BI_TCXO,
.fw_name = "bi_tcxo",
},
diff --git a/drivers/clk/qcom/gpucc-sm8150.c b/drivers/clk/qcom/gpucc-sm8150.c
index 7ce91208c0bc..5701031c17f3 100644
--- a/drivers/clk/qcom/gpucc-sm8150.c
+++ b/drivers/clk/qcom/gpucc-sm8150.c
@@ -53,7 +53,7 @@ static struct clk_alpha_pll gpu_cc_pll1 = {
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "gpu_cc_pll1",
- .parent_data = &(const struct clk_parent_data){
+ .parent_data = &(const struct clk_parent_data){
.fw_name = "bi_tcxo",
},
.num_parents = 1,
diff --git a/drivers/clk/qcom/gpucc-sm8250.c b/drivers/clk/qcom/gpucc-sm8250.c
index ca0a1681d352..eee3208640cd 100644
--- a/drivers/clk/qcom/gpucc-sm8250.c
+++ b/drivers/clk/qcom/gpucc-sm8250.c
@@ -56,7 +56,7 @@ static struct clk_alpha_pll gpu_cc_pll1 = {
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "gpu_cc_pll1",
- .parent_data = &(const struct clk_parent_data){
+ .parent_data = &(const struct clk_parent_data){
.fw_name = "bi_tcxo",
},
.num_parents = 1,
diff --git a/drivers/clk/qcom/hfpll.c b/drivers/clk/qcom/hfpll.c
index b0b0cb074b4a..385964196185 100644
--- a/drivers/clk/qcom/hfpll.c
+++ b/drivers/clk/qcom/hfpll.c
@@ -99,7 +99,6 @@ static const struct regmap_config hfpll_regmap_config = {
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x30,
- .fast_io = true,
};
static int qcom_hfpll_probe(struct platform_device *pdev)
diff --git a/drivers/clk/qcom/ipq-cmn-pll.c b/drivers/clk/qcom/ipq-cmn-pll.c
index b3d7169c63e5..dafbf5732048 100644
--- a/drivers/clk/qcom/ipq-cmn-pll.c
+++ b/drivers/clk/qcom/ipq-cmn-pll.c
@@ -108,7 +108,6 @@ static const struct regmap_config ipq_cmn_pll_regmap_config = {
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x7fc,
- .fast_io = true,
};
static const struct cmn_pll_fixed_output_clk ipq5018_output_clks[] = {
diff --git a/drivers/clk/qcom/lpassaudiocc-sc7280.c b/drivers/clk/qcom/lpassaudiocc-sc7280.c
index 3ff123bffa11..7e2172969289 100644
--- a/drivers/clk/qcom/lpassaudiocc-sc7280.c
+++ b/drivers/clk/qcom/lpassaudiocc-sc7280.c
@@ -709,8 +709,8 @@ static const struct qcom_cc_desc lpass_audio_cc_sc7280_desc = {
};
static const struct qcom_reset_map lpass_audio_cc_sc7280_resets[] = {
- [LPASS_AUDIO_SWR_RX_CGCR] = { 0xa0, 1 },
- [LPASS_AUDIO_SWR_TX_CGCR] = { 0xa8, 1 },
+ [LPASS_AUDIO_SWR_RX_CGCR] = { 0xa0, 1 },
+ [LPASS_AUDIO_SWR_TX_CGCR] = { 0xa8, 1 },
[LPASS_AUDIO_SWR_WSA_CGCR] = { 0xb0, 1 },
};
diff --git a/drivers/clk/qcom/lpasscc-sc8280xp.c b/drivers/clk/qcom/lpasscc-sc8280xp.c
index 9fd9498d7dc8..ff839788c40e 100644
--- a/drivers/clk/qcom/lpasscc-sc8280xp.c
+++ b/drivers/clk/qcom/lpasscc-sc8280xp.c
@@ -18,9 +18,9 @@
#include "reset.h"
static const struct qcom_reset_map lpass_audiocc_sc8280xp_resets[] = {
- [LPASS_AUDIO_SWR_RX_CGCR] = { 0xa0, 1 },
+ [LPASS_AUDIO_SWR_RX_CGCR] = { 0xa0, 1 },
[LPASS_AUDIO_SWR_WSA_CGCR] = { 0xb0, 1 },
- [LPASS_AUDIO_SWR_WSA2_CGCR] = { 0xd8, 1 },
+ [LPASS_AUDIO_SWR_WSA2_CGCR] = { 0xd8, 1 },
};
static const struct regmap_config lpass_audiocc_sc8280xp_regmap_config = {
diff --git a/drivers/clk/qcom/lpasscc-sm6115.c b/drivers/clk/qcom/lpasscc-sm6115.c
index 8ffdab71b948..ac6d219233b4 100644
--- a/drivers/clk/qcom/lpasscc-sm6115.c
+++ b/drivers/clk/qcom/lpasscc-sm6115.c
@@ -17,7 +17,7 @@
#include "reset.h"
static const struct qcom_reset_map lpass_audiocc_sm6115_resets[] = {
- [LPASS_AUDIO_SWR_RX_CGCR] = { .reg = 0x98, .bit = 1, .udelay = 500 },
+ [LPASS_AUDIO_SWR_RX_CGCR] = { .reg = 0x98, .bit = 1, .udelay = 500 },
};
static struct regmap_config lpass_audiocc_sm6115_regmap_config = {
diff --git a/drivers/clk/qcom/lpasscorecc-sc7180.c b/drivers/clk/qcom/lpasscorecc-sc7180.c
index 5937b071533b..5174bd3dcdc5 100644
--- a/drivers/clk/qcom/lpasscorecc-sc7180.c
+++ b/drivers/clk/qcom/lpasscorecc-sc7180.c
@@ -42,7 +42,7 @@ static const struct alpha_pll_config lpass_lpaaudio_dig_pll_config = {
};
static const u8 clk_alpha_pll_regs_offset[][PLL_OFF_MAX_REGS] = {
- [CLK_ALPHA_PLL_TYPE_FABIA] = {
+ [CLK_ALPHA_PLL_TYPE_FABIA] = {
[PLL_OFF_L_VAL] = 0x04,
[PLL_OFF_CAL_L_VAL] = 0x8,
[PLL_OFF_USER_CTL] = 0x0c,
diff --git a/drivers/clk/qcom/mmcc-sdm660.c b/drivers/clk/qcom/mmcc-sdm660.c
index e69fc65b13da..b723c536dfb6 100644
--- a/drivers/clk/qcom/mmcc-sdm660.c
+++ b/drivers/clk/qcom/mmcc-sdm660.c
@@ -74,7 +74,7 @@ static struct clk_alpha_pll mmpll0 = {
},
};
-static struct clk_alpha_pll mmpll6 = {
+static struct clk_alpha_pll mmpll6 = {
.offset = 0xf0,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.clkr = {
diff --git a/drivers/clk/qcom/nsscc-ipq9574.c b/drivers/clk/qcom/nsscc-ipq9574.c
index 64c6b05ff066..c8b11b04a7c2 100644
--- a/drivers/clk/qcom/nsscc-ipq9574.c
+++ b/drivers/clk/qcom/nsscc-ipq9574.c
@@ -3016,7 +3016,7 @@ static const struct qcom_reset_map nss_cc_ipq9574_resets[] = {
[NSSPORT4_RESET] = { .reg = 0x28a24, .bitmask = GENMASK(5, 4) },
[NSSPORT5_RESET] = { .reg = 0x28a24, .bitmask = GENMASK(3, 2) },
[NSSPORT6_RESET] = { .reg = 0x28a24, .bitmask = GENMASK(1, 0) },
- [EDMA_HW_RESET] = { .reg = 0x28a08, .bitmask = GENMASK(16, 15) },
+ [EDMA_HW_RESET] = { .reg = 0x28a08, .bitmask = GENMASK(16, 15) },
};
static const struct regmap_config nss_cc_ipq9574_regmap_config = {
diff --git a/drivers/clk/qcom/tcsrcc-glymur.c b/drivers/clk/qcom/tcsrcc-glymur.c
new file mode 100644
index 000000000000..c1f8b6d10b7f
--- /dev/null
+++ b/drivers/clk/qcom/tcsrcc-glymur.c
@@ -0,0 +1,313 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025, Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,glymur-tcsr.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO_PAD,
+};
+
+static struct clk_branch tcsr_edp_clkref_en = {
+ .halt_reg = 0x1c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x1c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_edp_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_pcie_1_clkref_en = {
+ .halt_reg = 0x4,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_pcie_1_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_pcie_2_clkref_en = {
+ .halt_reg = 0x8,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_pcie_2_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_pcie_3_clkref_en = {
+ .halt_reg = 0x10,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x10,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_pcie_3_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_pcie_4_clkref_en = {
+ .halt_reg = 0x14,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x14,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_pcie_4_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_usb2_1_clkref_en = {
+ .halt_reg = 0x28,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x28,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_usb2_1_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_usb2_2_clkref_en = {
+ .halt_reg = 0x2c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x2c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_usb2_2_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_usb2_3_clkref_en = {
+ .halt_reg = 0x30,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x30,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_usb2_3_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_usb2_4_clkref_en = {
+ .halt_reg = 0x44,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x44,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_usb2_4_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_usb3_0_clkref_en = {
+ .halt_reg = 0x20,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x20,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_usb3_0_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_usb3_1_clkref_en = {
+ .halt_reg = 0x24,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x24,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_usb3_1_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_usb4_1_clkref_en = {
+ .halt_reg = 0x0,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_usb4_1_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_usb4_2_clkref_en = {
+ .halt_reg = 0x18,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x18,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_usb4_2_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap *tcsr_cc_glymur_clocks[] = {
+ [TCSR_EDP_CLKREF_EN] = &tcsr_edp_clkref_en.clkr,
+ [TCSR_PCIE_1_CLKREF_EN] = &tcsr_pcie_1_clkref_en.clkr,
+ [TCSR_PCIE_2_CLKREF_EN] = &tcsr_pcie_2_clkref_en.clkr,
+ [TCSR_PCIE_3_CLKREF_EN] = &tcsr_pcie_3_clkref_en.clkr,
+ [TCSR_PCIE_4_CLKREF_EN] = &tcsr_pcie_4_clkref_en.clkr,
+ [TCSR_USB2_1_CLKREF_EN] = &tcsr_usb2_1_clkref_en.clkr,
+ [TCSR_USB2_2_CLKREF_EN] = &tcsr_usb2_2_clkref_en.clkr,
+ [TCSR_USB2_3_CLKREF_EN] = &tcsr_usb2_3_clkref_en.clkr,
+ [TCSR_USB2_4_CLKREF_EN] = &tcsr_usb2_4_clkref_en.clkr,
+ [TCSR_USB3_0_CLKREF_EN] = &tcsr_usb3_0_clkref_en.clkr,
+ [TCSR_USB3_1_CLKREF_EN] = &tcsr_usb3_1_clkref_en.clkr,
+ [TCSR_USB4_1_CLKREF_EN] = &tcsr_usb4_1_clkref_en.clkr,
+ [TCSR_USB4_2_CLKREF_EN] = &tcsr_usb4_2_clkref_en.clkr,
+};
+
+static const struct regmap_config tcsr_cc_glymur_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x44,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc tcsr_cc_glymur_desc = {
+ .config = &tcsr_cc_glymur_regmap_config,
+ .clks = tcsr_cc_glymur_clocks,
+ .num_clks = ARRAY_SIZE(tcsr_cc_glymur_clocks),
+};
+
+static const struct of_device_id tcsr_cc_glymur_match_table[] = {
+ { .compatible = "qcom,glymur-tcsr" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tcsr_cc_glymur_match_table);
+
+static int tcsr_cc_glymur_probe(struct platform_device *pdev)
+{
+ return qcom_cc_probe(pdev, &tcsr_cc_glymur_desc);
+}
+
+static struct platform_driver tcsr_cc_glymur_driver = {
+ .probe = tcsr_cc_glymur_probe,
+ .driver = {
+ .name = "tcsrcc-glymur",
+ .of_match_table = tcsr_cc_glymur_match_table,
+ },
+};
+
+static int __init tcsr_cc_glymur_init(void)
+{
+ return platform_driver_register(&tcsr_cc_glymur_driver);
+}
+subsys_initcall(tcsr_cc_glymur_init);
+
+static void __exit tcsr_cc_glymur_exit(void)
+{
+ platform_driver_unregister(&tcsr_cc_glymur_driver);
+}
+module_exit(tcsr_cc_glymur_exit);
+
+MODULE_DESCRIPTION("QTI TCSRCC GLYMUR Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/tcsrcc-x1e80100.c b/drivers/clk/qcom/tcsrcc-x1e80100.c
index ff61769a0807..a367e1f55622 100644
--- a/drivers/clk/qcom/tcsrcc-x1e80100.c
+++ b/drivers/clk/qcom/tcsrcc-x1e80100.c
@@ -29,6 +29,10 @@ static struct clk_branch tcsr_edp_clkref_en = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "tcsr_edp_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
diff --git a/drivers/clk/qcom/videocc-milos.c b/drivers/clk/qcom/videocc-milos.c
index 998301e0ba88..acc9df295d4f 100644
--- a/drivers/clk/qcom/videocc-milos.c
+++ b/drivers/clk/qcom/videocc-milos.c
@@ -366,7 +366,7 @@ static struct qcom_cc_driver_data video_cc_milos_driver_data = {
.num_clk_cbcrs = ARRAY_SIZE(video_cc_milos_critical_cbcrs),
};
-static struct qcom_cc_desc video_cc_milos_desc = {
+static const struct qcom_cc_desc video_cc_milos_desc = {
.config = &video_cc_milos_regmap_config,
.clks = video_cc_milos_clocks,
.num_clks = ARRAY_SIZE(video_cc_milos_clocks),
diff --git a/drivers/clk/renesas/clk-mstp.c b/drivers/clk/renesas/clk-mstp.c
index 5bc473c2adb3..2f65fe2c6bdf 100644
--- a/drivers/clk/renesas/clk-mstp.c
+++ b/drivers/clk/renesas/clk-mstp.c
@@ -303,6 +303,9 @@ void cpg_mstp_detach_dev(struct generic_pm_domain *unused, struct device *dev)
pm_clk_destroy(dev);
}
+static struct device_node *cpg_mstp_pd_np __initdata = NULL;
+static struct generic_pm_domain *cpg_mstp_pd_genpd __initdata = NULL;
+
void __init cpg_mstp_add_clk_domain(struct device_node *np)
{
struct generic_pm_domain *pd;
@@ -324,5 +327,20 @@ void __init cpg_mstp_add_clk_domain(struct device_node *np)
pd->detach_dev = cpg_mstp_detach_dev;
pm_genpd_init(pd, &pm_domain_always_on_gov, false);
- of_genpd_add_provider_simple(np, pd);
+ cpg_mstp_pd_np = of_node_get(np);
+ cpg_mstp_pd_genpd = pd;
+}
+
+static int __init cpg_mstp_pd_init_provider(void)
+{
+ int error;
+
+ if (!cpg_mstp_pd_np)
+ return -ENODEV;
+
+ error = of_genpd_add_provider_simple(cpg_mstp_pd_np, cpg_mstp_pd_genpd);
+
+ of_node_put(cpg_mstp_pd_np);
+ return error;
}
+postcore_initcall(cpg_mstp_pd_init_provider);
diff --git a/drivers/clk/renesas/r9a07g043-cpg.c b/drivers/clk/renesas/r9a07g043-cpg.c
index 02dc5cecfd8d..33e9a1223c72 100644
--- a/drivers/clk/renesas/r9a07g043-cpg.c
+++ b/drivers/clk/renesas/r9a07g043-cpg.c
@@ -164,143 +164,143 @@ static const struct cpg_core_clk r9a07g043_core_clks[] __initconst = {
static const struct rzg2l_mod_clk r9a07g043_mod_clks[] = {
#ifdef CONFIG_ARM64
DEF_MOD("gic", R9A07G043_GIC600_GICCLK, R9A07G043_CLK_P1,
- 0x514, 0, 0),
+ 0x514, 0, MSTOP(BUS_REG1, BIT(7))),
DEF_MOD("ia55_pclk", R9A07G043_IA55_PCLK, R9A07G043_CLK_P2,
- 0x518, 0, 0),
+ 0x518, 0, MSTOP(BUS_PERI_CPU, BIT(13))),
DEF_MOD("ia55_clk", R9A07G043_IA55_CLK, R9A07G043_CLK_P1,
- 0x518, 1, 0),
+ 0x518, 1, MSTOP(BUS_PERI_CPU, BIT(13))),
#endif
#ifdef CONFIG_RISCV
DEF_MOD("iax45_pclk", R9A07G043_IAX45_PCLK, R9A07G043_CLK_P2,
- 0x518, 0, 0),
+ 0x518, 0, MSTOP(BUS_PERI_CPU, BIT(13))),
DEF_MOD("iax45_clk", R9A07G043_IAX45_CLK, R9A07G043_CLK_P1,
- 0x518, 1, 0),
+ 0x518, 1, MSTOP(BUS_PERI_CPU, BIT(13))),
#endif
DEF_MOD("dmac_aclk", R9A07G043_DMAC_ACLK, R9A07G043_CLK_P1,
- 0x52c, 0, 0),
+ 0x52c, 0, MSTOP(BUS_REG1, BIT(2))),
DEF_MOD("dmac_pclk", R9A07G043_DMAC_PCLK, CLK_P1_DIV2,
- 0x52c, 1, 0),
+ 0x52c, 1, MSTOP(BUS_REG1, BIT(3))),
DEF_MOD("ostm0_pclk", R9A07G043_OSTM0_PCLK, R9A07G043_CLK_P0,
- 0x534, 0, 0),
+ 0x534, 0, MSTOP(BUS_REG0, BIT(4))),
DEF_MOD("ostm1_pclk", R9A07G043_OSTM1_PCLK, R9A07G043_CLK_P0,
- 0x534, 1, 0),
+ 0x534, 1, MSTOP(BUS_REG0, BIT(5))),
DEF_MOD("ostm2_pclk", R9A07G043_OSTM2_PCLK, R9A07G043_CLK_P0,
- 0x534, 2, 0),
+ 0x534, 2, MSTOP(BUS_REG0, BIT(6))),
DEF_MOD("mtu_x_mck", R9A07G043_MTU_X_MCK_MTU3, R9A07G043_CLK_P0,
- 0x538, 0, 0),
+ 0x538, 0, MSTOP(BUS_MCPU1, BIT(2))),
DEF_MOD("wdt0_pclk", R9A07G043_WDT0_PCLK, R9A07G043_CLK_P0,
- 0x548, 0, 0),
+ 0x548, 0, MSTOP(BUS_REG0, BIT(2))),
DEF_MOD("wdt0_clk", R9A07G043_WDT0_CLK, R9A07G043_OSCCLK,
- 0x548, 1, 0),
+ 0x548, 1, MSTOP(BUS_REG0, BIT(2))),
DEF_MOD("spi_clk2", R9A07G043_SPI_CLK2, R9A07G043_CLK_SPI1,
- 0x550, 0, 0),
+ 0x550, 0, MSTOP(BUS_MCPU1, BIT(1))),
DEF_MOD("spi_clk", R9A07G043_SPI_CLK, R9A07G043_CLK_SPI0,
- 0x550, 1, 0),
+ 0x550, 1, MSTOP(BUS_MCPU1, BIT(1))),
DEF_MOD("sdhi0_imclk", R9A07G043_SDHI0_IMCLK, CLK_SD0_DIV4,
- 0x554, 0, 0),
+ 0x554, 0, MSTOP(BUS_PERI_COM, BIT(0))),
DEF_MOD("sdhi0_imclk2", R9A07G043_SDHI0_IMCLK2, CLK_SD0_DIV4,
- 0x554, 1, 0),
+ 0x554, 1, MSTOP(BUS_PERI_COM, BIT(0))),
DEF_MOD("sdhi0_clk_hs", R9A07G043_SDHI0_CLK_HS, R9A07G043_CLK_SD0,
- 0x554, 2, 0),
+ 0x554, 2, MSTOP(BUS_PERI_COM, BIT(0))),
DEF_MOD("sdhi0_aclk", R9A07G043_SDHI0_ACLK, R9A07G043_CLK_P1,
- 0x554, 3, 0),
+ 0x554, 3, MSTOP(BUS_PERI_COM, BIT(0))),
DEF_MOD("sdhi1_imclk", R9A07G043_SDHI1_IMCLK, CLK_SD1_DIV4,
- 0x554, 4, 0),
+ 0x554, 4, MSTOP(BUS_PERI_COM, BIT(1))),
DEF_MOD("sdhi1_imclk2", R9A07G043_SDHI1_IMCLK2, CLK_SD1_DIV4,
- 0x554, 5, 0),
+ 0x554, 5, MSTOP(BUS_PERI_COM, BIT(1))),
DEF_MOD("sdhi1_clk_hs", R9A07G043_SDHI1_CLK_HS, R9A07G043_CLK_SD1,
- 0x554, 6, 0),
+ 0x554, 6, MSTOP(BUS_PERI_COM, BIT(1))),
DEF_MOD("sdhi1_aclk", R9A07G043_SDHI1_ACLK, R9A07G043_CLK_P1,
- 0x554, 7, 0),
+ 0x554, 7, MSTOP(BUS_PERI_COM, BIT(1))),
#ifdef CONFIG_ARM64
- DEF_MOD("cru_sysclk", R9A07G043_CRU_SYSCLK, CLK_M2_DIV2,
- 0x564, 0, 0),
- DEF_MOD("cru_vclk", R9A07G043_CRU_VCLK, R9A07G043_CLK_M2,
- 0x564, 1, 0),
- DEF_MOD("cru_pclk", R9A07G043_CRU_PCLK, R9A07G043_CLK_ZT,
- 0x564, 2, 0),
- DEF_MOD("cru_aclk", R9A07G043_CRU_ACLK, R9A07G043_CLK_M0,
- 0x564, 3, 0),
+ DEF_MOD("cru_sysclk", R9A07G043_CRU_SYSCLK, CLK_M2_DIV2,
+ 0x564, 0, MSTOP(BUS_PERI_VIDEO, BIT(3))),
+ DEF_MOD("cru_vclk", R9A07G043_CRU_VCLK, R9A07G043_CLK_M2,
+ 0x564, 1, MSTOP(BUS_PERI_VIDEO, BIT(3))),
+ DEF_MOD("cru_pclk", R9A07G043_CRU_PCLK, R9A07G043_CLK_ZT,
+ 0x564, 2, MSTOP(BUS_PERI_VIDEO, BIT(3))),
+ DEF_MOD("cru_aclk", R9A07G043_CRU_ACLK, R9A07G043_CLK_M0,
+ 0x564, 3, MSTOP(BUS_PERI_VIDEO, BIT(3))),
DEF_COUPLED("lcdc_clk_a", R9A07G043_LCDC_CLK_A, R9A07G043_CLK_M0,
- 0x56c, 0, 0),
+ 0x56c, 0, MSTOP(BUS_PERI_VIDEO, GENMASK(8, 7))),
DEF_COUPLED("lcdc_clk_p", R9A07G043_LCDC_CLK_P, R9A07G043_CLK_ZT,
- 0x56c, 0, 0),
+ 0x56c, 0, MSTOP(BUS_PERI_VIDEO, GENMASK(8, 7))),
DEF_MOD("lcdc_clk_d", R9A07G043_LCDC_CLK_D, R9A07G043_CLK_M3,
- 0x56c, 1, 0),
+ 0x56c, 1, MSTOP(BUS_PERI_VIDEO, BIT(9))),
#endif
DEF_MOD("ssi0_pclk", R9A07G043_SSI0_PCLK2, R9A07G043_CLK_P0,
- 0x570, 0, 0),
+ 0x570, 0, MSTOP(BUS_MCPU1, BIT(10))),
DEF_MOD("ssi0_sfr", R9A07G043_SSI0_PCLK_SFR, R9A07G043_CLK_P0,
- 0x570, 1, 0),
+ 0x570, 1, MSTOP(BUS_MCPU1, BIT(10))),
DEF_MOD("ssi1_pclk", R9A07G043_SSI1_PCLK2, R9A07G043_CLK_P0,
- 0x570, 2, 0),
+ 0x570, 2, MSTOP(BUS_MCPU1, BIT(11))),
DEF_MOD("ssi1_sfr", R9A07G043_SSI1_PCLK_SFR, R9A07G043_CLK_P0,
- 0x570, 3, 0),
+ 0x570, 3, MSTOP(BUS_MCPU1, BIT(11))),
DEF_MOD("ssi2_pclk", R9A07G043_SSI2_PCLK2, R9A07G043_CLK_P0,
- 0x570, 4, 0),
+ 0x570, 4, MSTOP(BUS_MCPU1, BIT(12))),
DEF_MOD("ssi2_sfr", R9A07G043_SSI2_PCLK_SFR, R9A07G043_CLK_P0,
- 0x570, 5, 0),
+ 0x570, 5, MSTOP(BUS_MCPU1, BIT(12))),
DEF_MOD("ssi3_pclk", R9A07G043_SSI3_PCLK2, R9A07G043_CLK_P0,
- 0x570, 6, 0),
+ 0x570, 6, MSTOP(BUS_MCPU1, BIT(13))),
DEF_MOD("ssi3_sfr", R9A07G043_SSI3_PCLK_SFR, R9A07G043_CLK_P0,
- 0x570, 7, 0),
+ 0x570, 7, MSTOP(BUS_MCPU1, BIT(13))),
DEF_MOD("usb0_host", R9A07G043_USB_U2H0_HCLK, R9A07G043_CLK_P1,
- 0x578, 0, 0),
+ 0x578, 0, MSTOP(BUS_PERI_COM, BIT(5))),
DEF_MOD("usb1_host", R9A07G043_USB_U2H1_HCLK, R9A07G043_CLK_P1,
- 0x578, 1, 0),
+ 0x578, 1, MSTOP(BUS_PERI_COM, BIT(7))),
DEF_MOD("usb0_func", R9A07G043_USB_U2P_EXR_CPUCLK, R9A07G043_CLK_P1,
- 0x578, 2, 0),
+ 0x578, 2, MSTOP(BUS_PERI_COM, BIT(6))),
DEF_MOD("usb_pclk", R9A07G043_USB_PCLK, R9A07G043_CLK_P1,
- 0x578, 3, 0),
+ 0x578, 3, MSTOP(BUS_PERI_COM, BIT(4))),
DEF_COUPLED("eth0_axi", R9A07G043_ETH0_CLK_AXI, R9A07G043_CLK_M0,
- 0x57c, 0, 0),
+ 0x57c, 0, MSTOP(BUS_PERI_COM, BIT(2))),
DEF_COUPLED("eth0_chi", R9A07G043_ETH0_CLK_CHI, R9A07G043_CLK_ZT,
- 0x57c, 0, 0),
+ 0x57c, 0, MSTOP(BUS_PERI_COM, BIT(2))),
DEF_COUPLED("eth1_axi", R9A07G043_ETH1_CLK_AXI, R9A07G043_CLK_M0,
- 0x57c, 1, 0),
+ 0x57c, 1, MSTOP(BUS_PERI_COM, BIT(3))),
DEF_COUPLED("eth1_chi", R9A07G043_ETH1_CLK_CHI, R9A07G043_CLK_ZT,
- 0x57c, 1, 0),
+ 0x57c, 1, MSTOP(BUS_PERI_COM, BIT(3))),
DEF_MOD("i2c0", R9A07G043_I2C0_PCLK, R9A07G043_CLK_P0,
- 0x580, 0, 0),
+ 0x580, 0, MSTOP(BUS_MCPU2, BIT(10))),
DEF_MOD("i2c1", R9A07G043_I2C1_PCLK, R9A07G043_CLK_P0,
- 0x580, 1, 0),
+ 0x580, 1, MSTOP(BUS_MCPU2, BIT(11))),
DEF_MOD("i2c2", R9A07G043_I2C2_PCLK, R9A07G043_CLK_P0,
- 0x580, 2, 0),
+ 0x580, 2, MSTOP(BUS_MCPU2, BIT(12))),
DEF_MOD("i2c3", R9A07G043_I2C3_PCLK, R9A07G043_CLK_P0,
- 0x580, 3, 0),
+ 0x580, 3, MSTOP(BUS_MCPU2, BIT(13))),
DEF_MOD("scif0", R9A07G043_SCIF0_CLK_PCK, R9A07G043_CLK_P0,
- 0x584, 0, 0),
+ 0x584, 0, MSTOP(BUS_MCPU2, BIT(1))),
DEF_MOD("scif1", R9A07G043_SCIF1_CLK_PCK, R9A07G043_CLK_P0,
- 0x584, 1, 0),
+ 0x584, 1, MSTOP(BUS_MCPU2, BIT(2))),
DEF_MOD("scif2", R9A07G043_SCIF2_CLK_PCK, R9A07G043_CLK_P0,
- 0x584, 2, 0),
+ 0x584, 2, MSTOP(BUS_MCPU2, BIT(3))),
DEF_MOD("scif3", R9A07G043_SCIF3_CLK_PCK, R9A07G043_CLK_P0,
- 0x584, 3, 0),
+ 0x584, 3, MSTOP(BUS_MCPU2, BIT(4))),
DEF_MOD("scif4", R9A07G043_SCIF4_CLK_PCK, R9A07G043_CLK_P0,
- 0x584, 4, 0),
+ 0x584, 4, MSTOP(BUS_MCPU2, BIT(5))),
DEF_MOD("sci0", R9A07G043_SCI0_CLKP, R9A07G043_CLK_P0,
- 0x588, 0, 0),
+ 0x588, 0, MSTOP(BUS_MCPU2, BIT(7))),
DEF_MOD("sci1", R9A07G043_SCI1_CLKP, R9A07G043_CLK_P0,
- 0x588, 1, 0),
+ 0x588, 1, MSTOP(BUS_MCPU2, BIT(8))),
DEF_MOD("rspi0", R9A07G043_RSPI0_CLKB, R9A07G043_CLK_P0,
- 0x590, 0, 0),
+ 0x590, 0, MSTOP(BUS_MCPU1, BIT(14))),
DEF_MOD("rspi1", R9A07G043_RSPI1_CLKB, R9A07G043_CLK_P0,
- 0x590, 1, 0),
+ 0x590, 1, MSTOP(BUS_MCPU1, BIT(15))),
DEF_MOD("rspi2", R9A07G043_RSPI2_CLKB, R9A07G043_CLK_P0,
- 0x590, 2, 0),
+ 0x590, 2, MSTOP(BUS_MCPU2, BIT(0))),
DEF_MOD("canfd", R9A07G043_CANFD_PCLK, R9A07G043_CLK_P0,
- 0x594, 0, 0),
+ 0x594, 0, MSTOP(BUS_MCPU2, BIT(9))),
DEF_MOD("gpio", R9A07G043_GPIO_HCLK, R9A07G043_OSCCLK,
- 0x598, 0, 0),
+ 0x598, 0, MSTOP(BUS_PERI_CPU, BIT(6))),
DEF_MOD("adc_adclk", R9A07G043_ADC_ADCLK, R9A07G043_CLK_TSU,
- 0x5a8, 0, 0),
+ 0x5a8, 0, MSTOP(BUS_MCPU2, BIT(14))),
DEF_MOD("adc_pclk", R9A07G043_ADC_PCLK, R9A07G043_CLK_P0,
- 0x5a8, 1, 0),
+ 0x5a8, 1, MSTOP(BUS_MCPU2, BIT(14))),
DEF_MOD("tsu_pclk", R9A07G043_TSU_PCLK, R9A07G043_CLK_TSU,
- 0x5ac, 0, 0),
+ 0x5ac, 0, MSTOP(BUS_MCPU2, BIT(15))),
#ifdef CONFIG_RISCV
DEF_MOD("nceplic_aclk", R9A07G043_NCEPLIC_ACLK, R9A07G043_CLK_P1,
- 0x608, 0, 0),
+ 0x608, 0, MSTOP(BUS_REG1, BIT(7))),
#endif
};
diff --git a/drivers/clk/renesas/r9a07g044-cpg.c b/drivers/clk/renesas/r9a07g044-cpg.c
index c851d4eeebbe..0dd264877b9a 100644
--- a/drivers/clk/renesas/r9a07g044-cpg.c
+++ b/drivers/clk/renesas/r9a07g044-cpg.c
@@ -242,163 +242,163 @@ static const struct {
} mod_clks = {
.common = {
DEF_MOD("gic", R9A07G044_GIC600_GICCLK, R9A07G044_CLK_P1,
- 0x514, 0, 0),
+ 0x514, 0, MSTOP(BUS_REG1, BIT(7))),
DEF_MOD("ia55_pclk", R9A07G044_IA55_PCLK, R9A07G044_CLK_P2,
- 0x518, 0, 0),
+ 0x518, 0, MSTOP(BUS_PERI_CPU, BIT(13))),
DEF_MOD("ia55_clk", R9A07G044_IA55_CLK, R9A07G044_CLK_P1,
- 0x518, 1, 0),
+ 0x518, 1, MSTOP(BUS_PERI_CPU, BIT(13))),
DEF_MOD("dmac_aclk", R9A07G044_DMAC_ACLK, R9A07G044_CLK_P1,
- 0x52c, 0, 0),
+ 0x52c, 0, MSTOP(BUS_REG1, BIT(2))),
DEF_MOD("dmac_pclk", R9A07G044_DMAC_PCLK, CLK_P1_DIV2,
- 0x52c, 1, 0),
+ 0x52c, 1, MSTOP(BUS_REG1, BIT(3))),
DEF_MOD("ostm0_pclk", R9A07G044_OSTM0_PCLK, R9A07G044_CLK_P0,
- 0x534, 0, 0),
+ 0x534, 0, MSTOP(BUS_REG0, BIT(4))),
DEF_MOD("ostm1_pclk", R9A07G044_OSTM1_PCLK, R9A07G044_CLK_P0,
- 0x534, 1, 0),
+ 0x534, 1, MSTOP(BUS_REG0, BIT(5))),
DEF_MOD("ostm2_pclk", R9A07G044_OSTM2_PCLK, R9A07G044_CLK_P0,
- 0x534, 2, 0),
+ 0x534, 2, MSTOP(BUS_REG0, BIT(6))),
DEF_MOD("mtu_x_mck", R9A07G044_MTU_X_MCK_MTU3, R9A07G044_CLK_P0,
- 0x538, 0, 0),
+ 0x538, 0, MSTOP(BUS_MCPU1, BIT(2))),
DEF_MOD("gpt_pclk", R9A07G044_GPT_PCLK, R9A07G044_CLK_P0,
- 0x540, 0, 0),
+ 0x540, 0, MSTOP(BUS_MCPU1, BIT(4))),
DEF_MOD("poeg_a_clkp", R9A07G044_POEG_A_CLKP, R9A07G044_CLK_P0,
- 0x544, 0, 0),
+ 0x544, 0, MSTOP(BUS_MCPU1, BIT(5))),
DEF_MOD("poeg_b_clkp", R9A07G044_POEG_B_CLKP, R9A07G044_CLK_P0,
- 0x544, 1, 0),
+ 0x544, 1, MSTOP(BUS_MCPU1, BIT(6))),
DEF_MOD("poeg_c_clkp", R9A07G044_POEG_C_CLKP, R9A07G044_CLK_P0,
- 0x544, 2, 0),
+ 0x544, 2, MSTOP(BUS_MCPU1, BIT(7))),
DEF_MOD("poeg_d_clkp", R9A07G044_POEG_D_CLKP, R9A07G044_CLK_P0,
- 0x544, 3, 0),
+ 0x544, 3, MSTOP(BUS_MCPU1, BIT(8))),
DEF_MOD("wdt0_pclk", R9A07G044_WDT0_PCLK, R9A07G044_CLK_P0,
- 0x548, 0, 0),
+ 0x548, 0, MSTOP(BUS_REG0, BIT(2))),
DEF_MOD("wdt0_clk", R9A07G044_WDT0_CLK, R9A07G044_OSCCLK,
- 0x548, 1, 0),
+ 0x548, 1, MSTOP(BUS_REG0, BIT(2))),
DEF_MOD("wdt1_pclk", R9A07G044_WDT1_PCLK, R9A07G044_CLK_P0,
- 0x548, 2, 0),
+ 0x548, 2, MSTOP(BUS_REG0, BIT(3))),
DEF_MOD("wdt1_clk", R9A07G044_WDT1_CLK, R9A07G044_OSCCLK,
- 0x548, 3, 0),
+ 0x548, 3, MSTOP(BUS_REG0, BIT(3))),
DEF_MOD("spi_clk2", R9A07G044_SPI_CLK2, R9A07G044_CLK_SPI1,
- 0x550, 0, 0),
+ 0x550, 0, MSTOP(BUS_MCPU1, BIT(1))),
DEF_MOD("spi_clk", R9A07G044_SPI_CLK, R9A07G044_CLK_SPI0,
- 0x550, 1, 0),
+ 0x550, 1, MSTOP(BUS_MCPU1, BIT(1))),
DEF_MOD("sdhi0_imclk", R9A07G044_SDHI0_IMCLK, CLK_SD0_DIV4,
- 0x554, 0, 0),
+ 0x554, 0, MSTOP(BUS_PERI_COM, BIT(0))),
DEF_MOD("sdhi0_imclk2", R9A07G044_SDHI0_IMCLK2, CLK_SD0_DIV4,
- 0x554, 1, 0),
+ 0x554, 1, MSTOP(BUS_PERI_COM, BIT(0))),
DEF_MOD("sdhi0_clk_hs", R9A07G044_SDHI0_CLK_HS, R9A07G044_CLK_SD0,
- 0x554, 2, 0),
+ 0x554, 2, MSTOP(BUS_PERI_COM, BIT(0))),
DEF_MOD("sdhi0_aclk", R9A07G044_SDHI0_ACLK, R9A07G044_CLK_P1,
- 0x554, 3, 0),
+ 0x554, 3, MSTOP(BUS_PERI_COM, BIT(0))),
DEF_MOD("sdhi1_imclk", R9A07G044_SDHI1_IMCLK, CLK_SD1_DIV4,
- 0x554, 4, 0),
+ 0x554, 4, MSTOP(BUS_PERI_COM, BIT(1))),
DEF_MOD("sdhi1_imclk2", R9A07G044_SDHI1_IMCLK2, CLK_SD1_DIV4,
- 0x554, 5, 0),
+ 0x554, 5, MSTOP(BUS_PERI_COM, BIT(1))),
DEF_MOD("sdhi1_clk_hs", R9A07G044_SDHI1_CLK_HS, R9A07G044_CLK_SD1,
- 0x554, 6, 0),
+ 0x554, 6, MSTOP(BUS_PERI_COM, BIT(1))),
DEF_MOD("sdhi1_aclk", R9A07G044_SDHI1_ACLK, R9A07G044_CLK_P1,
- 0x554, 7, 0),
+ 0x554, 7, MSTOP(BUS_PERI_COM, BIT(1))),
DEF_MOD("gpu_clk", R9A07G044_GPU_CLK, R9A07G044_CLK_G,
- 0x558, 0, 0),
+ 0x558, 0, MSTOP(BUS_REG1, BIT(4))),
DEF_MOD("gpu_axi_clk", R9A07G044_GPU_AXI_CLK, R9A07G044_CLK_P1,
0x558, 1, 0),
DEF_MOD("gpu_ace_clk", R9A07G044_GPU_ACE_CLK, R9A07G044_CLK_P1,
0x558, 2, 0),
- DEF_MOD("cru_sysclk", R9A07G044_CRU_SYSCLK, CLK_M2_DIV2,
- 0x564, 0, 0),
- DEF_MOD("cru_vclk", R9A07G044_CRU_VCLK, R9A07G044_CLK_M2,
- 0x564, 1, 0),
- DEF_MOD("cru_pclk", R9A07G044_CRU_PCLK, R9A07G044_CLK_ZT,
- 0x564, 2, 0),
- DEF_MOD("cru_aclk", R9A07G044_CRU_ACLK, R9A07G044_CLK_M0,
- 0x564, 3, 0),
+ DEF_MOD("cru_sysclk", R9A07G044_CRU_SYSCLK, CLK_M2_DIV2,
+ 0x564, 0, MSTOP(BUS_PERI_VIDEO, BIT(3))),
+ DEF_MOD("cru_vclk", R9A07G044_CRU_VCLK, R9A07G044_CLK_M2,
+ 0x564, 1, MSTOP(BUS_PERI_VIDEO, BIT(3))),
+ DEF_MOD("cru_pclk", R9A07G044_CRU_PCLK, R9A07G044_CLK_ZT,
+ 0x564, 2, MSTOP(BUS_PERI_VIDEO, BIT(3))),
+ DEF_MOD("cru_aclk", R9A07G044_CRU_ACLK, R9A07G044_CLK_M0,
+ 0x564, 3, MSTOP(BUS_PERI_VIDEO, BIT(3))),
DEF_MOD("dsi_pll_clk", R9A07G044_MIPI_DSI_PLLCLK, R9A07G044_CLK_M1,
- 0x568, 0, 0),
+ 0x568, 0, MSTOP(BUS_PERI_VIDEO, GENMASK(6, 5))),
DEF_MOD("dsi_sys_clk", R9A07G044_MIPI_DSI_SYSCLK, CLK_M2_DIV2,
- 0x568, 1, 0),
+ 0x568, 1, MSTOP(BUS_PERI_VIDEO, GENMASK(6, 5))),
DEF_MOD("dsi_aclk", R9A07G044_MIPI_DSI_ACLK, R9A07G044_CLK_P1,
- 0x568, 2, 0),
+ 0x568, 2, MSTOP(BUS_PERI_VIDEO, GENMASK(6, 5))),
DEF_MOD("dsi_pclk", R9A07G044_MIPI_DSI_PCLK, R9A07G044_CLK_P2,
- 0x568, 3, 0),
+ 0x568, 3, MSTOP(BUS_PERI_VIDEO, GENMASK(6, 5))),
DEF_MOD("dsi_vclk", R9A07G044_MIPI_DSI_VCLK, R9A07G044_CLK_M3,
- 0x568, 4, 0),
+ 0x568, 4, MSTOP(BUS_PERI_VIDEO, GENMASK(6, 5))),
DEF_MOD("dsi_lpclk", R9A07G044_MIPI_DSI_LPCLK, R9A07G044_CLK_M4,
- 0x568, 5, 0),
+ 0x568, 5, MSTOP(BUS_PERI_VIDEO, GENMASK(6, 5))),
DEF_COUPLED("lcdc_a", R9A07G044_LCDC_CLK_A, R9A07G044_CLK_M0,
- 0x56c, 0, 0),
+ 0x56c, 0, MSTOP(BUS_PERI_VIDEO, GENMASK(8, 7))),
DEF_COUPLED("lcdc_p", R9A07G044_LCDC_CLK_P, R9A07G044_CLK_ZT,
- 0x56c, 0, 0),
+ 0x56c, 0, MSTOP(BUS_PERI_VIDEO, GENMASK(8, 7))),
DEF_MOD("lcdc_clk_d", R9A07G044_LCDC_CLK_D, R9A07G044_CLK_M3,
- 0x56c, 1, 0),
+ 0x56c, 1, MSTOP(BUS_PERI_VIDEO, BIT(9))),
DEF_MOD("ssi0_pclk", R9A07G044_SSI0_PCLK2, R9A07G044_CLK_P0,
- 0x570, 0, 0),
+ 0x570, 0, MSTOP(BUS_MCPU1, BIT(10))),
DEF_MOD("ssi0_sfr", R9A07G044_SSI0_PCLK_SFR, R9A07G044_CLK_P0,
- 0x570, 1, 0),
+ 0x570, 1, MSTOP(BUS_MCPU1, BIT(10))),
DEF_MOD("ssi1_pclk", R9A07G044_SSI1_PCLK2, R9A07G044_CLK_P0,
- 0x570, 2, 0),
+ 0x570, 2, MSTOP(BUS_MCPU1, BIT(11))),
DEF_MOD("ssi1_sfr", R9A07G044_SSI1_PCLK_SFR, R9A07G044_CLK_P0,
- 0x570, 3, 0),
+ 0x570, 3, MSTOP(BUS_MCPU1, BIT(11))),
DEF_MOD("ssi2_pclk", R9A07G044_SSI2_PCLK2, R9A07G044_CLK_P0,
- 0x570, 4, 0),
+ 0x570, 4, MSTOP(BUS_MCPU1, BIT(12))),
DEF_MOD("ssi2_sfr", R9A07G044_SSI2_PCLK_SFR, R9A07G044_CLK_P0,
- 0x570, 5, 0),
+ 0x570, 5, MSTOP(BUS_MCPU1, BIT(12))),
DEF_MOD("ssi3_pclk", R9A07G044_SSI3_PCLK2, R9A07G044_CLK_P0,
- 0x570, 6, 0),
+ 0x570, 6, MSTOP(BUS_MCPU1, BIT(13))),
DEF_MOD("ssi3_sfr", R9A07G044_SSI3_PCLK_SFR, R9A07G044_CLK_P0,
- 0x570, 7, 0),
+ 0x570, 7, MSTOP(BUS_MCPU1, BIT(13))),
DEF_MOD("usb0_host", R9A07G044_USB_U2H0_HCLK, R9A07G044_CLK_P1,
- 0x578, 0, 0),
+ 0x578, 0, MSTOP(BUS_PERI_COM, BIT(5))),
DEF_MOD("usb1_host", R9A07G044_USB_U2H1_HCLK, R9A07G044_CLK_P1,
- 0x578, 1, 0),
+ 0x578, 1, MSTOP(BUS_PERI_COM, BIT(7))),
DEF_MOD("usb0_func", R9A07G044_USB_U2P_EXR_CPUCLK, R9A07G044_CLK_P1,
- 0x578, 2, 0),
+ 0x578, 2, MSTOP(BUS_PERI_COM, BIT(6))),
DEF_MOD("usb_pclk", R9A07G044_USB_PCLK, R9A07G044_CLK_P1,
- 0x578, 3, 0),
+ 0x578, 3, MSTOP(BUS_PERI_COM, BIT(4))),
DEF_COUPLED("eth0_axi", R9A07G044_ETH0_CLK_AXI, R9A07G044_CLK_M0,
- 0x57c, 0, 0),
+ 0x57c, 0, MSTOP(BUS_PERI_COM, BIT(2))),
DEF_COUPLED("eth0_chi", R9A07G044_ETH0_CLK_CHI, R9A07G044_CLK_ZT,
- 0x57c, 0, 0),
+ 0x57c, 0, MSTOP(BUS_PERI_COM, BIT(2))),
DEF_COUPLED("eth1_axi", R9A07G044_ETH1_CLK_AXI, R9A07G044_CLK_M0,
- 0x57c, 1, 0),
+ 0x57c, 1, MSTOP(BUS_PERI_COM, BIT(3))),
DEF_COUPLED("eth1_chi", R9A07G044_ETH1_CLK_CHI, R9A07G044_CLK_ZT,
- 0x57c, 1, 0),
+ 0x57c, 1, MSTOP(BUS_PERI_COM, BIT(3))),
DEF_MOD("i2c0", R9A07G044_I2C0_PCLK, R9A07G044_CLK_P0,
- 0x580, 0, 0),
+ 0x580, 0, MSTOP(BUS_MCPU2, BIT(10))),
DEF_MOD("i2c1", R9A07G044_I2C1_PCLK, R9A07G044_CLK_P0,
- 0x580, 1, 0),
+ 0x580, 1, MSTOP(BUS_MCPU2, BIT(11))),
DEF_MOD("i2c2", R9A07G044_I2C2_PCLK, R9A07G044_CLK_P0,
- 0x580, 2, 0),
+ 0x580, 2, MSTOP(BUS_MCPU2, BIT(12))),
DEF_MOD("i2c3", R9A07G044_I2C3_PCLK, R9A07G044_CLK_P0,
- 0x580, 3, 0),
+ 0x580, 3, MSTOP(BUS_MCPU2, BIT(13))),
DEF_MOD("scif0", R9A07G044_SCIF0_CLK_PCK, R9A07G044_CLK_P0,
- 0x584, 0, 0),
+ 0x584, 0, MSTOP(BUS_MCPU2, BIT(1))),
DEF_MOD("scif1", R9A07G044_SCIF1_CLK_PCK, R9A07G044_CLK_P0,
- 0x584, 1, 0),
+ 0x584, 1, MSTOP(BUS_MCPU2, BIT(2))),
DEF_MOD("scif2", R9A07G044_SCIF2_CLK_PCK, R9A07G044_CLK_P0,
- 0x584, 2, 0),
+ 0x584, 2, MSTOP(BUS_MCPU2, BIT(3))),
DEF_MOD("scif3", R9A07G044_SCIF3_CLK_PCK, R9A07G044_CLK_P0,
- 0x584, 3, 0),
+ 0x584, 3, MSTOP(BUS_MCPU2, BIT(4))),
DEF_MOD("scif4", R9A07G044_SCIF4_CLK_PCK, R9A07G044_CLK_P0,
- 0x584, 4, 0),
+ 0x584, 4, MSTOP(BUS_MCPU2, BIT(5))),
DEF_MOD("sci0", R9A07G044_SCI0_CLKP, R9A07G044_CLK_P0,
- 0x588, 0, 0),
+ 0x588, 0, MSTOP(BUS_MCPU2, BIT(7))),
DEF_MOD("sci1", R9A07G044_SCI1_CLKP, R9A07G044_CLK_P0,
- 0x588, 1, 0),
+ 0x588, 1, MSTOP(BUS_MCPU2, BIT(8))),
DEF_MOD("rspi0", R9A07G044_RSPI0_CLKB, R9A07G044_CLK_P0,
- 0x590, 0, 0),
+ 0x590, 0, MSTOP(BUS_MCPU1, BIT(14))),
DEF_MOD("rspi1", R9A07G044_RSPI1_CLKB, R9A07G044_CLK_P0,
- 0x590, 1, 0),
+ 0x590, 1, MSTOP(BUS_MCPU1, BIT(15))),
DEF_MOD("rspi2", R9A07G044_RSPI2_CLKB, R9A07G044_CLK_P0,
- 0x590, 2, 0),
+ 0x590, 2, MSTOP(BUS_MCPU2, BIT(0))),
DEF_MOD("canfd", R9A07G044_CANFD_PCLK, R9A07G044_CLK_P0,
- 0x594, 0, 0),
+ 0x594, 0, MSTOP(BUS_MCPU2, BIT(9))),
DEF_MOD("gpio", R9A07G044_GPIO_HCLK, R9A07G044_OSCCLK,
- 0x598, 0, 0),
+ 0x598, 0, MSTOP(BUS_PERI_CPU, BIT(6))),
DEF_MOD("adc_adclk", R9A07G044_ADC_ADCLK, R9A07G044_CLK_TSU,
- 0x5a8, 0, 0),
+ 0x5a8, 0, MSTOP(BUS_MCPU2, BIT(14))),
DEF_MOD("adc_pclk", R9A07G044_ADC_PCLK, R9A07G044_CLK_P0,
- 0x5a8, 1, 0),
+ 0x5a8, 1, MSTOP(BUS_MCPU2, BIT(14))),
DEF_MOD("tsu_pclk", R9A07G044_TSU_PCLK, R9A07G044_CLK_TSU,
- 0x5ac, 0, 0),
+ 0x5ac, 0, MSTOP(BUS_MCPU2, BIT(15))),
},
#ifdef CONFIG_CLK_R9A07G054
.drp = {
diff --git a/drivers/clk/renesas/r9a08g045-cpg.c b/drivers/clk/renesas/r9a08g045-cpg.c
index ed0661997928..79e7b19c7882 100644
--- a/drivers/clk/renesas/r9a08g045-cpg.c
+++ b/drivers/clk/renesas/r9a08g045-cpg.c
@@ -183,6 +183,7 @@ static const struct cpg_core_clk r9a08g045_core_clks[] __initconst = {
DEF_G3S_DIV("P3", R9A08G045_CLK_P3, CLK_PLL3_DIV2_4, DIVPL3C, G3S_DIVPL3C_STS,
dtable_1_32, 0, 0, 0, NULL),
DEF_FIXED("P3_DIV2", CLK_P3_DIV2, R9A08G045_CLK_P3, 1, 2),
+ DEF_FIXED("P5", R9A08G045_CLK_P5, CLK_PLL2_DIV2, 1, 4),
DEF_FIXED("ZT", R9A08G045_CLK_ZT, CLK_PLL3_DIV2_8, 1, 1),
DEF_FIXED("S0", R9A08G045_CLK_S0, CLK_SEL_PLL4, 1, 2),
DEF_FIXED("OSC", R9A08G045_OSCCLK, CLK_EXTAL, 1, 1),
@@ -284,13 +285,22 @@ static const struct rzg2l_mod_clk r9a08g045_mod_clks[] = {
MSTOP(BUS_MCPU2, BIT(5))),
DEF_MOD("scif5_clk_pck", R9A08G045_SCIF5_CLK_PCK, R9A08G045_CLK_P0, 0x584, 5,
MSTOP(BUS_MCPU3, BIT(4))),
- DEF_MOD("gpio_hclk", R9A08G045_GPIO_HCLK, R9A08G045_OSCCLK, 0x598, 0, 0),
+ DEF_MOD("gpio_hclk", R9A08G045_GPIO_HCLK, R9A08G045_OSCCLK, 0x598, 0,
+ MSTOP(BUS_PERI_CPU, BIT(6))),
DEF_MOD("adc_adclk", R9A08G045_ADC_ADCLK, R9A08G045_CLK_TSU, 0x5a8, 0,
MSTOP(BUS_MCPU2, BIT(14))),
DEF_MOD("adc_pclk", R9A08G045_ADC_PCLK, R9A08G045_CLK_TSU, 0x5a8, 1,
MSTOP(BUS_MCPU2, BIT(14))),
DEF_MOD("tsu_pclk", R9A08G045_TSU_PCLK, R9A08G045_CLK_TSU, 0x5ac, 0,
MSTOP(BUS_MCPU2, BIT(15))),
+ DEF_MOD("pci_aclk", R9A08G045_PCI_ACLK, R9A08G045_CLK_M0, 0x608, 0,
+ MSTOP(BUS_PERI_COM, BIT(10))),
+ DEF_MOD("pci_clkl1pm", R9A08G045_PCI_CLKL1PM, R9A08G045_CLK_ZT, 0x608, 1,
+ MSTOP(BUS_PERI_COM, BIT(10))),
+ DEF_MOD("i3c_pclk", R9A08G045_I3C_PCLK, R9A08G045_CLK_TSU, 0x610, 0,
+ MSTOP(BUS_MCPU3, BIT(10))),
+ DEF_MOD("i3c_tclk", R9A08G045_I3C_TCLK, R9A08G045_CLK_P5, 0x610, 1,
+ MSTOP(BUS_MCPU3, BIT(10))),
DEF_MOD("vbat_bclk", R9A08G045_VBAT_BCLK, R9A08G045_OSCCLK, 0x614, 0,
MSTOP(BUS_MCPU3, GENMASK(8, 7))),
};
@@ -331,6 +341,15 @@ static const struct rzg2l_reset r9a08g045_resets[] = {
DEF_RST(R9A08G045_ADC_PRESETN, 0x8a8, 0),
DEF_RST(R9A08G045_ADC_ADRST_N, 0x8a8, 1),
DEF_RST(R9A08G045_TSU_PRESETN, 0x8ac, 0),
+ DEF_RST(R9A08G045_PCI_ARESETN, 0x908, 0),
+ DEF_RST(R9A08G045_PCI_RST_B, 0x908, 1),
+ DEF_RST(R9A08G045_PCI_RST_GP_B, 0x908, 2),
+ DEF_RST(R9A08G045_PCI_RST_PS_B, 0x908, 3),
+ DEF_RST(R9A08G045_PCI_RST_RSM_B, 0x908, 4),
+ DEF_RST(R9A08G045_PCI_RST_CFG_B, 0x908, 5),
+ DEF_RST(R9A08G045_PCI_RST_LOAD_B, 0x908, 6),
+ DEF_RST(R9A08G045_I3C_TRESETN, 0x910, 0),
+ DEF_RST(R9A08G045_I3C_PRESETN, 0x910, 1),
DEF_RST(R9A08G045_VBAT_BRESETN, 0x914, 0),
};
@@ -342,6 +361,10 @@ static const unsigned int r9a08g045_crit_mod_clks[] __initconst = {
MOD_CLK_BASE + R9A08G045_VBAT_BCLK,
};
+static const unsigned int r9a08g045_no_pm_mod_clks[] = {
+ MOD_CLK_BASE + R9A08G045_PCI_CLKL1PM,
+};
+
const struct rzg2l_cpg_info r9a08g045_cpg_info = {
/* Core Clocks */
.core_clks = r9a08g045_core_clks,
@@ -358,6 +381,10 @@ const struct rzg2l_cpg_info r9a08g045_cpg_info = {
.num_mod_clks = ARRAY_SIZE(r9a08g045_mod_clks),
.num_hw_mod_clks = R9A08G045_VBAT_BCLK + 1,
+ /* No PM modules Clocks */
+ .no_pm_mod_clks = r9a08g045_no_pm_mod_clks,
+ .num_no_pm_mod_clks = ARRAY_SIZE(r9a08g045_no_pm_mod_clks),
+
/* Resets */
.resets = r9a08g045_resets,
.num_resets = R9A08G045_VBAT_BRESETN + 1, /* Last reset ID + 1 */
diff --git a/drivers/clk/renesas/r9a09g047-cpg.c b/drivers/clk/renesas/r9a09g047-cpg.c
index 26e2be7667eb..ef115f9ec0e6 100644
--- a/drivers/clk/renesas/r9a09g047-cpg.c
+++ b/drivers/clk/renesas/r9a09g047-cpg.c
@@ -16,7 +16,7 @@
enum clk_ids {
/* Core Clock Outputs exported to DT */
- LAST_DT_CORE_CLK = R9A09G047_GBETH_1_CLK_PTP_REF_I,
+ LAST_DT_CORE_CLK = R9A09G047_USB3_0_CLKCORE,
/* External Input Clocks */
CLK_AUDIO_EXTAL,
@@ -48,6 +48,8 @@ enum clk_ids {
CLK_PLLDTY_ACPU_DIV2,
CLK_PLLDTY_ACPU_DIV4,
CLK_PLLDTY_DIV8,
+ CLK_PLLDTY_RCPU,
+ CLK_PLLDTY_RCPU_DIV4,
CLK_PLLETH_DIV_250_FIX,
CLK_PLLETH_DIV_125_FIX,
CLK_CSDIV_PLLETH_GBE0,
@@ -157,6 +159,8 @@ static const struct cpg_core_clk r9a09g047_core_clks[] __initconst = {
DEF_SMUX(".smux2_gbe1_txclk", CLK_SMUX2_GBE1_TXCLK, SSEL1_SELCTL0, smux2_gbe1_txclk),
DEF_SMUX(".smux2_gbe1_rxclk", CLK_SMUX2_GBE1_RXCLK, SSEL1_SELCTL1, smux2_gbe1_rxclk),
DEF_FIXED(".plldty_div16", CLK_PLLDTY_DIV16, CLK_PLLDTY, 1, 16),
+ DEF_DDIV(".plldty_rcpu", CLK_PLLDTY_RCPU, CLK_PLLDTY, CDDIV3_DIVCTL2, dtable_2_64),
+ DEF_FIXED(".plldty_rcpu_div4", CLK_PLLDTY_RCPU_DIV4, CLK_PLLDTY_RCPU, 1, 4),
DEF_DDIV(".pllvdo_cru0", CLK_PLLVDO_CRU0, CLK_PLLVDO, CDDIV3_DIVCTL3, dtable_2_4),
DEF_DDIV(".pllvdo_gpu", CLK_PLLVDO_GPU, CLK_PLLVDO, CDDIV3_DIVCTL1, dtable_2_64),
@@ -177,13 +181,29 @@ static const struct cpg_core_clk r9a09g047_core_clks[] __initconst = {
CLK_PLLETH_DIV_125_FIX, 1, 1),
DEF_FIXED("gbeth_1_clk_ptp_ref_i", R9A09G047_GBETH_1_CLK_PTP_REF_I,
CLK_PLLETH_DIV_125_FIX, 1, 1),
+ DEF_FIXED("usb3_0_ref_alt_clk_p", R9A09G047_USB3_0_REF_ALT_CLK_P, CLK_QEXTAL, 1, 1),
+ DEF_FIXED("usb3_0_core_clk", R9A09G047_USB3_0_CLKCORE, CLK_QEXTAL, 1, 1),
};
static const struct rzv2h_mod_clk r9a09g047_mod_clks[] __initconst = {
+ DEF_MOD("dmac_0_aclk", CLK_PLLCM33_GEAR, 0, 0, 0, 0,
+ BUS_MSTOP(5, BIT(9))),
+ DEF_MOD("dmac_1_aclk", CLK_PLLDTY_ACPU_DIV2, 0, 1, 0, 1,
+ BUS_MSTOP(3, BIT(2))),
+ DEF_MOD("dmac_2_aclk", CLK_PLLDTY_ACPU_DIV2, 0, 2, 0, 2,
+ BUS_MSTOP(3, BIT(3))),
+ DEF_MOD("dmac_3_aclk", CLK_PLLDTY_RCPU_DIV4, 0, 3, 0, 3,
+ BUS_MSTOP(10, BIT(11))),
+ DEF_MOD("dmac_4_aclk", CLK_PLLDTY_RCPU_DIV4, 0, 4, 0, 4,
+ BUS_MSTOP(10, BIT(12))),
DEF_MOD_CRITICAL("icu_0_pclk_i", CLK_PLLCM33_DIV16, 0, 5, 0, 5,
BUS_MSTOP_NONE),
DEF_MOD_CRITICAL("gic_0_gicclk", CLK_PLLDTY_ACPU_DIV4, 1, 3, 0, 19,
BUS_MSTOP(3, BIT(5))),
+ DEF_MOD("gpt_0_pclk_sfr", CLK_PLLCLN_DIV8, 3, 1, 1, 17,
+ BUS_MSTOP(6, BIT(11))),
+ DEF_MOD("gpt_1_pclk_sfr", CLK_PLLCLN_DIV8, 3, 2, 1, 18,
+ BUS_MSTOP(6, BIT(12))),
DEF_MOD("wdt_1_clkp", CLK_PLLCLN_DIV16, 4, 13, 2, 13,
BUS_MSTOP(1, BIT(0))),
DEF_MOD("wdt_1_clk_loco", CLK_QEXTAL, 4, 14, 2, 14,
@@ -258,6 +278,10 @@ static const struct rzv2h_mod_clk r9a09g047_mod_clks[] __initconst = {
BUS_MSTOP(8, BIT(4))),
DEF_MOD("sdhi_2_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 14, 5, 14,
BUS_MSTOP(8, BIT(4))),
+ DEF_MOD("usb3_0_aclk", CLK_PLLDTY_DIV8, 10, 15, 5, 15,
+ BUS_MSTOP(7, BIT(12))),
+ DEF_MOD("usb3_0_pclk_usbtst", CLK_PLLDTY_ACPU_DIV4, 11, 0, 5, 16,
+ BUS_MSTOP(7, BIT(14))),
DEF_MOD_MUX_EXTERNAL("gbeth_0_clk_tx_i", CLK_SMUX2_GBE0_TXCLK, 11, 8, 5, 24,
BUS_MSTOP(8, BIT(5)), 1),
DEF_MOD_MUX_EXTERNAL("gbeth_0_clk_rx_i", CLK_SMUX2_GBE0_RXCLK, 11, 9, 5, 25,
@@ -300,9 +324,18 @@ static const struct rzv2h_mod_clk r9a09g047_mod_clks[] __initconst = {
static const struct rzv2h_reset r9a09g047_resets[] __initconst = {
DEF_RST(3, 0, 1, 1), /* SYS_0_PRESETN */
+ DEF_RST(3, 1, 1, 2), /* DMAC_0_ARESETN */
+ DEF_RST(3, 2, 1, 3), /* DMAC_1_ARESETN */
+ DEF_RST(3, 3, 1, 4), /* DMAC_2_ARESETN */
+ DEF_RST(3, 4, 1, 5), /* DMAC_3_ARESETN */
+ DEF_RST(3, 5, 1, 6), /* DMAC_4_ARESETN */
DEF_RST(3, 6, 1, 7), /* ICU_0_PRESETN_I */
DEF_RST(3, 8, 1, 9), /* GIC_0_GICRESET_N */
DEF_RST(3, 9, 1, 10), /* GIC_0_DBG_GICRESET_N */
+ DEF_RST(5, 9, 2, 10), /* GPT_0_RST_P_REG */
+ DEF_RST(5, 10, 2, 11), /* GPT_0_RST_S_REG */
+ DEF_RST(5, 11, 2, 12), /* GPT_1_RST_P_REG */
+ DEF_RST(5, 12, 2, 13), /* GPT_1_RST_S_REG */
DEF_RST(7, 6, 3, 7), /* WDT_1_RESET */
DEF_RST(7, 7, 3, 8), /* WDT_2_RESET */
DEF_RST(7, 8, 3, 9), /* WDT_3_RESET */
@@ -325,6 +358,7 @@ static const struct rzv2h_reset r9a09g047_resets[] __initconst = {
DEF_RST(10, 7, 4, 24), /* SDHI_0_IXRST */
DEF_RST(10, 8, 4, 25), /* SDHI_1_IXRST */
DEF_RST(10, 9, 4, 26), /* SDHI_2_IXRST */
+ DEF_RST(10, 10, 4, 27), /* USB3_0_ARESETN */
DEF_RST(11, 0, 5, 1), /* GBETH_0_ARESETN_I */
DEF_RST(11, 1, 5, 2), /* GBETH_1_ARESETN_I */
DEF_RST(12, 5, 5, 22), /* CRU_0_PRESETN */
diff --git a/drivers/clk/renesas/r9a09g056-cpg.c b/drivers/clk/renesas/r9a09g056-cpg.c
index 437af86f49dd..55f056359dd7 100644
--- a/drivers/clk/renesas/r9a09g056-cpg.c
+++ b/drivers/clk/renesas/r9a09g056-cpg.c
@@ -36,10 +36,10 @@ enum clk_ids {
CLK_PLLCM33_DIV4,
CLK_PLLCM33_DIV5,
CLK_PLLCM33_DIV16,
+ CLK_PLLCM33_GEAR,
CLK_SMUX2_XSPI_CLK0,
CLK_SMUX2_XSPI_CLK1,
CLK_PLLCM33_XSPI,
- CLK_PLLCM33_GEAR,
CLK_PLLCLN_DIV2,
CLK_PLLCLN_DIV8,
CLK_PLLCLN_DIV16,
@@ -120,11 +120,11 @@ static const struct cpg_core_clk r9a09g056_core_clks[] __initconst = {
DEF_FIXED(".pllcm33_div4", CLK_PLLCM33_DIV4, CLK_PLLCM33, 1, 4),
DEF_FIXED(".pllcm33_div5", CLK_PLLCM33_DIV5, CLK_PLLCM33, 1, 5),
DEF_FIXED(".pllcm33_div16", CLK_PLLCM33_DIV16, CLK_PLLCM33, 1, 16),
+ DEF_DDIV(".pllcm33_gear", CLK_PLLCM33_GEAR, CLK_PLLCM33_DIV4, CDDIV0_DIVCTL1, dtable_2_64),
DEF_SMUX(".smux2_xspi_clk0", CLK_SMUX2_XSPI_CLK0, SSEL1_SELCTL2, smux2_xspi_clk0),
DEF_SMUX(".smux2_xspi_clk1", CLK_SMUX2_XSPI_CLK1, SSEL1_SELCTL3, smux2_xspi_clk1),
DEF_CSDIV(".pllcm33_xspi", CLK_PLLCM33_XSPI, CLK_SMUX2_XSPI_CLK1, CSDIV0_DIVCTL3,
dtable_2_16),
- DEF_DDIV(".pllcm33_gear", CLK_PLLCM33_GEAR, CLK_PLLCM33_DIV4, CDDIV0_DIVCTL1, dtable_2_64),
DEF_FIXED(".pllcln_div2", CLK_PLLCLN_DIV2, CLK_PLLCLN, 1, 2),
DEF_FIXED(".pllcln_div8", CLK_PLLCLN_DIV8, CLK_PLLCLN, 1, 8),
@@ -205,6 +205,12 @@ static const struct rzv2h_mod_clk r9a09g056_mod_clks[] __initconst = {
BUS_MSTOP(5, BIT(13))),
DEF_MOD("scif_0_clk_pck", CLK_PLLCM33_DIV16, 8, 15, 4, 15,
BUS_MSTOP(3, BIT(14))),
+ DEF_MOD("i3c_0_pclkrw", CLK_PLLCLN_DIV16, 9, 0, 4, 16,
+ BUS_MSTOP(10, BIT(15))),
+ DEF_MOD("i3c_0_pclk", CLK_PLLCLN_DIV16, 9, 1, 4, 17,
+ BUS_MSTOP(10, BIT(15))),
+ DEF_MOD("i3c_0_tclk", CLK_PLLCLN_DIV8, 9, 2, 4, 18,
+ BUS_MSTOP(10, BIT(15))),
DEF_MOD("riic_8_ckm", CLK_PLLCM33_DIV16, 9, 3, 4, 19,
BUS_MSTOP(3, BIT(13))),
DEF_MOD("riic_0_ckm", CLK_PLLCLN_DIV16, 9, 4, 4, 20,
@@ -308,6 +314,8 @@ static const struct rzv2h_reset r9a09g056_resets[] __initconst = {
DEF_RST(7, 7, 3, 8), /* WDT_2_RESET */
DEF_RST(7, 8, 3, 9), /* WDT_3_RESET */
DEF_RST(9, 5, 4, 6), /* SCIF_0_RST_SYSTEM_N */
+ DEF_RST(9, 6, 4, 7), /* I3C_0_PRESETN */
+ DEF_RST(9, 7, 4, 8), /* I3C_0_TRESETN */
DEF_RST(9, 8, 4, 9), /* RIIC_0_MRST */
DEF_RST(9, 9, 4, 10), /* RIIC_1_MRST */
DEF_RST(9, 10, 4, 11), /* RIIC_2_MRST */
@@ -317,8 +325,8 @@ static const struct rzv2h_reset r9a09g056_resets[] __initconst = {
DEF_RST(9, 14, 4, 15), /* RIIC_6_MRST */
DEF_RST(9, 15, 4, 16), /* RIIC_7_MRST */
DEF_RST(10, 0, 4, 17), /* RIIC_8_MRST */
- DEF_RST(10, 3, 4, 20), /* SPI_HRESETN */
- DEF_RST(10, 4, 4, 21), /* SPI_ARESETN */
+ DEF_RST(10, 3, 4, 20), /* SPI_HRESETN */
+ DEF_RST(10, 4, 4, 21), /* SPI_ARESETN */
DEF_RST(10, 7, 4, 24), /* SDHI_0_IXRST */
DEF_RST(10, 8, 4, 25), /* SDHI_1_IXRST */
DEF_RST(10, 9, 4, 26), /* SDHI_2_IXRST */
diff --git a/drivers/clk/renesas/r9a09g057-cpg.c b/drivers/clk/renesas/r9a09g057-cpg.c
index f7de69a93de1..6389c4b6a523 100644
--- a/drivers/clk/renesas/r9a09g057-cpg.c
+++ b/drivers/clk/renesas/r9a09g057-cpg.c
@@ -134,9 +134,8 @@ static const struct cpg_core_clk r9a09g057_core_clks[] __initconst = {
DEF_FIXED(".pllcm33_div3", CLK_PLLCM33_DIV3, CLK_PLLCM33, 1, 3),
DEF_FIXED(".pllcm33_div4", CLK_PLLCM33_DIV4, CLK_PLLCM33, 1, 4),
DEF_FIXED(".pllcm33_div5", CLK_PLLCM33_DIV5, CLK_PLLCM33, 1, 5),
- DEF_DDIV(".pllcm33_gear", CLK_PLLCM33_GEAR,
- CLK_PLLCM33_DIV4, CDDIV0_DIVCTL1, dtable_2_64),
DEF_FIXED(".pllcm33_div16", CLK_PLLCM33_DIV16, CLK_PLLCM33, 1, 16),
+ DEF_DDIV(".pllcm33_gear", CLK_PLLCM33_GEAR, CLK_PLLCM33_DIV4, CDDIV0_DIVCTL1, dtable_2_64),
DEF_SMUX(".smux2_xspi_clk0", CLK_SMUX2_XSPI_CLK0, SSEL1_SELCTL2, smux2_xspi_clk0),
DEF_SMUX(".smux2_xspi_clk1", CLK_SMUX2_XSPI_CLK1, SSEL1_SELCTL3, smux2_xspi_clk1),
DEF_CSDIV(".pllcm33_xspi", CLK_PLLCM33_XSPI, CLK_SMUX2_XSPI_CLK1, CSDIV0_DIVCTL3,
@@ -260,6 +259,12 @@ static const struct rzv2h_mod_clk r9a09g057_mod_clks[] __initconst = {
BUS_MSTOP(11, BIT(2))),
DEF_MOD("scif_0_clk_pck", CLK_PLLCM33_DIV16, 8, 15, 4, 15,
BUS_MSTOP(3, BIT(14))),
+ DEF_MOD("i3c_0_pclkrw", CLK_PLLCLN_DIV16, 9, 0, 4, 16,
+ BUS_MSTOP(10, BIT(15))),
+ DEF_MOD("i3c_0_pclk", CLK_PLLCLN_DIV16, 9, 1, 4, 17,
+ BUS_MSTOP(10, BIT(15))),
+ DEF_MOD("i3c_0_tclk", CLK_PLLCLN_DIV8, 9, 2, 4, 18,
+ BUS_MSTOP(10, BIT(15))),
DEF_MOD("riic_8_ckm", CLK_PLLCM33_DIV16, 9, 3, 4, 19,
BUS_MSTOP(3, BIT(13))),
DEF_MOD("riic_0_ckm", CLK_PLLCLN_DIV16, 9, 4, 4, 20,
@@ -403,6 +408,8 @@ static const struct rzv2h_reset r9a09g057_resets[] __initconst = {
DEF_RST(7, 15, 3, 16), /* RSPI_2_PRESETN */
DEF_RST(8, 0, 3, 17), /* RSPI_2_TRESETN */
DEF_RST(9, 5, 4, 6), /* SCIF_0_RST_SYSTEM_N */
+ DEF_RST(9, 6, 4, 7), /* I3C_0_PRESETN */
+ DEF_RST(9, 7, 4, 8), /* I3C_0_TRESETN */
DEF_RST(9, 8, 4, 9), /* RIIC_0_MRST */
DEF_RST(9, 9, 4, 10), /* RIIC_1_MRST */
DEF_RST(9, 10, 4, 11), /* RIIC_2_MRST */
diff --git a/drivers/clk/renesas/r9a09g077-cpg.c b/drivers/clk/renesas/r9a09g077-cpg.c
index c920d6a9707f..af3ef6d58c87 100644
--- a/drivers/clk/renesas/r9a09g077-cpg.c
+++ b/drivers/clk/renesas/r9a09g077-cpg.c
@@ -46,8 +46,13 @@
#define DIVCA55C2 CONF_PACK(SCKCR2, 10, 1)
#define DIVCA55C3 CONF_PACK(SCKCR2, 11, 1)
#define DIVCA55S CONF_PACK(SCKCR2, 12, 1)
+#define DIVSCI5ASYNC CONF_PACK(SCKCR2, 18, 2)
#define DIVSCI0ASYNC CONF_PACK(SCKCR3, 6, 2)
+#define DIVSCI1ASYNC CONF_PACK(SCKCR3, 8, 2)
+#define DIVSCI2ASYNC CONF_PACK(SCKCR3, 10, 2)
+#define DIVSCI3ASYNC CONF_PACK(SCKCR3, 12, 2)
+#define DIVSCI4ASYNC CONF_PACK(SCKCR3, 14, 2)
#define SEL_PLL CONF_PACK(SCKCR, 22, 1)
@@ -67,7 +72,7 @@ enum rzt2h_clk_types {
enum clk_ids {
/* Core Clock Outputs exported to DT */
- LAST_DT_CORE_CLK = R9A09G077_SDHI_CLKHS,
+ LAST_DT_CORE_CLK = R9A09G077_ETCLKE,
/* External Input Clocks */
CLK_EXTAL,
@@ -84,6 +89,11 @@ enum clk_ids {
CLK_SEL_CLK_PLL4,
CLK_PLL4D1,
CLK_SCI0ASYNC,
+ CLK_SCI1ASYNC,
+ CLK_SCI2ASYNC,
+ CLK_SCI3ASYNC,
+ CLK_SCI4ASYNC,
+ CLK_SCI5ASYNC,
/* Module Clocks */
MOD_CLK_BASE,
@@ -133,6 +143,16 @@ static const struct cpg_core_clk r9a09g077_core_clks[] __initconst = {
DEF_FIXED(".pll4d1", CLK_PLL4D1, CLK_SEL_CLK_PLL4, 1, 1),
DEF_DIV(".sci0async", CLK_SCI0ASYNC, CLK_PLL4D1, DIVSCI0ASYNC,
dtable_24_25_30_32),
+ DEF_DIV(".sci1async", CLK_SCI1ASYNC, CLK_PLL4D1, DIVSCI1ASYNC,
+ dtable_24_25_30_32),
+ DEF_DIV(".sci2async", CLK_SCI2ASYNC, CLK_PLL4D1, DIVSCI2ASYNC,
+ dtable_24_25_30_32),
+ DEF_DIV(".sci3async", CLK_SCI3ASYNC, CLK_PLL4D1, DIVSCI3ASYNC,
+ dtable_24_25_30_32),
+ DEF_DIV(".sci4async", CLK_SCI4ASYNC, CLK_PLL4D1, DIVSCI4ASYNC,
+ dtable_24_25_30_32),
+ DEF_DIV(".sci5async", CLK_SCI5ASYNC, CLK_PLL4D1, DIVSCI5ASYNC,
+ dtable_24_25_30_32),
/* Core output clk */
DEF_DIV("CA55C0", R9A09G077_CLK_CA55C0, CLK_SEL_CLK_PLL0, DIVCA55C0,
@@ -146,16 +166,35 @@ static const struct cpg_core_clk r9a09g077_core_clks[] __initconst = {
DEF_DIV("CA55S", R9A09G077_CLK_CA55S, CLK_SEL_CLK_PLL0, DIVCA55S,
dtable_1_2),
DEF_FIXED("PCLKGPTL", R9A09G077_CLK_PCLKGPTL, CLK_SEL_CLK_PLL1, 2, 1),
+ DEF_FIXED("PCLKH", R9A09G077_CLK_PCLKH, CLK_SEL_CLK_PLL1, 4, 1),
DEF_FIXED("PCLKM", R9A09G077_CLK_PCLKM, CLK_SEL_CLK_PLL1, 8, 1),
DEF_FIXED("PCLKL", R9A09G077_CLK_PCLKL, CLK_SEL_CLK_PLL1, 16, 1),
+ DEF_FIXED("PCLKAH", R9A09G077_CLK_PCLKAH, CLK_PLL4D1, 6, 1),
DEF_FIXED("PCLKAM", R9A09G077_CLK_PCLKAM, CLK_PLL4D1, 12, 1),
DEF_FIXED("SDHI_CLKHS", R9A09G077_SDHI_CLKHS, CLK_SEL_CLK_PLL2, 1, 1),
+ DEF_FIXED("USB_CLK", R9A09G077_USB_CLK, CLK_PLL4D1, 48, 1),
+ DEF_FIXED("ETCLKA", R9A09G077_ETCLKA, CLK_SEL_CLK_PLL1, 5, 1),
+ DEF_FIXED("ETCLKB", R9A09G077_ETCLKB, CLK_SEL_CLK_PLL1, 8, 1),
+ DEF_FIXED("ETCLKC", R9A09G077_ETCLKC, CLK_SEL_CLK_PLL1, 10, 1),
+ DEF_FIXED("ETCLKD", R9A09G077_ETCLKD, CLK_SEL_CLK_PLL1, 20, 1),
+ DEF_FIXED("ETCLKE", R9A09G077_ETCLKE, CLK_SEL_CLK_PLL1, 40, 1),
};
static const struct mssr_mod_clk r9a09g077_mod_clks[] __initconst = {
DEF_MOD("sci0fck", 8, CLK_SCI0ASYNC),
+ DEF_MOD("sci1fck", 9, CLK_SCI1ASYNC),
+ DEF_MOD("sci2fck", 10, CLK_SCI2ASYNC),
+ DEF_MOD("sci3fck", 11, CLK_SCI3ASYNC),
+ DEF_MOD("sci4fck", 12, CLK_SCI4ASYNC),
DEF_MOD("iic0", 100, R9A09G077_CLK_PCLKL),
DEF_MOD("iic1", 101, R9A09G077_CLK_PCLKL),
+ DEF_MOD("gmac0", 400, R9A09G077_CLK_PCLKM),
+ DEF_MOD("ethsw", 401, R9A09G077_CLK_PCLKM),
+ DEF_MOD("ethss", 403, R9A09G077_CLK_PCLKM),
+ DEF_MOD("usb", 408, R9A09G077_CLK_PCLKAM),
+ DEF_MOD("gmac1", 416, R9A09G077_CLK_PCLKAM),
+ DEF_MOD("gmac2", 417, R9A09G077_CLK_PCLKAM),
+ DEF_MOD("sci5fck", 600, CLK_SCI5ASYNC),
DEF_MOD("iic2", 601, R9A09G077_CLK_PCLKL),
DEF_MOD("sdhi0", 1212, R9A09G077_CLK_PCLKAM),
DEF_MOD("sdhi1", 1213, R9A09G077_CLK_PCLKAM),
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
index 5ff6ee1f7d4b..de1cf7ba45b7 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.c
+++ b/drivers/clk/renesas/renesas-cpg-mssr.c
@@ -1082,6 +1082,7 @@ static int __init cpg_mssr_reserved_init(struct cpg_mssr_priv *priv,
of_for_each_phandle(&it, rc, node, "clocks", "#clock-cells", -1) {
int idx;
+ unsigned int *new_ids;
if (it.node != priv->np)
continue;
@@ -1092,11 +1093,13 @@ static int __init cpg_mssr_reserved_init(struct cpg_mssr_priv *priv,
if (args[0] != CPG_MOD)
continue;
- ids = krealloc_array(ids, (num + 1), sizeof(*ids), GFP_KERNEL);
- if (!ids) {
+ new_ids = krealloc_array(ids, (num + 1), sizeof(*ids), GFP_KERNEL);
+ if (!new_ids) {
of_node_put(it.node);
+ kfree(ids);
return -ENOMEM;
}
+ ids = new_ids;
if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A)
idx = MOD_CLK_PACK_10(args[1]); /* for DEF_MOD_STB() */
diff --git a/drivers/clk/renesas/rzg2l-cpg.c b/drivers/clk/renesas/rzg2l-cpg.c
index 187233302818..07909e80bae2 100644
--- a/drivers/clk/renesas/rzg2l-cpg.c
+++ b/drivers/clk/renesas/rzg2l-cpg.c
@@ -824,11 +824,10 @@ static unsigned long rzg2l_cpg_sipll5_recalc_rate(struct clk_hw *hw,
return pll5_rate;
}
-static long rzg2l_cpg_sipll5_round_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long *parent_rate)
+static int rzg2l_cpg_sipll5_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- return rate;
+ return 0;
}
static int rzg2l_cpg_sipll5_set_rate(struct clk_hw *hw,
@@ -902,7 +901,7 @@ static int rzg2l_cpg_sipll5_set_rate(struct clk_hw *hw,
static const struct clk_ops rzg2l_cpg_sipll5_ops = {
.recalc_rate = rzg2l_cpg_sipll5_recalc_rate,
- .round_rate = rzg2l_cpg_sipll5_round_rate,
+ .determine_rate = rzg2l_cpg_sipll5_determine_rate,
.set_rate = rzg2l_cpg_sipll5_set_rate,
};
@@ -1639,8 +1638,8 @@ fail:
#define rcdev_to_priv(x) container_of(x, struct rzg2l_cpg_priv, rcdev)
-static int rzg2l_cpg_assert(struct reset_controller_dev *rcdev,
- unsigned long id)
+static int __rzg2l_cpg_assert(struct reset_controller_dev *rcdev,
+ unsigned long id, bool assert)
{
struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
const struct rzg2l_cpg_info *info = priv->info;
@@ -1648,9 +1647,13 @@ static int rzg2l_cpg_assert(struct reset_controller_dev *rcdev,
u32 mask = BIT(info->resets[id].bit);
s8 monbit = info->resets[id].monbit;
u32 value = mask << 16;
+ int ret;
- dev_dbg(rcdev->dev, "assert id:%ld offset:0x%x\n", id, CLK_RST_R(reg));
+ dev_dbg(rcdev->dev, "%s id:%ld offset:0x%x\n",
+ assert ? "assert" : "deassert", id, CLK_RST_R(reg));
+ if (!assert)
+ value |= mask;
writel(value, priv->base + CLK_RST_R(reg));
if (info->has_clk_mon_regs) {
@@ -1664,38 +1667,26 @@ static int rzg2l_cpg_assert(struct reset_controller_dev *rcdev,
return 0;
}
- return readl_poll_timeout_atomic(priv->base + reg, value,
- value & mask, 10, 200);
+ ret = readl_poll_timeout_atomic(priv->base + reg, value,
+ assert == !!(value & mask), 10, 200);
+ if (ret && !assert) {
+ value = mask << 16;
+ writel(value, priv->base + CLK_RST_R(info->resets[id].off));
+ }
+
+ return ret;
+}
+
+static int rzg2l_cpg_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return __rzg2l_cpg_assert(rcdev, id, true);
}
static int rzg2l_cpg_deassert(struct reset_controller_dev *rcdev,
unsigned long id)
{
- struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
- const struct rzg2l_cpg_info *info = priv->info;
- unsigned int reg = info->resets[id].off;
- u32 mask = BIT(info->resets[id].bit);
- s8 monbit = info->resets[id].monbit;
- u32 value = (mask << 16) | mask;
-
- dev_dbg(rcdev->dev, "deassert id:%ld offset:0x%x\n", id,
- CLK_RST_R(reg));
-
- writel(value, priv->base + CLK_RST_R(reg));
-
- if (info->has_clk_mon_regs) {
- reg = CLK_MRST_R(reg);
- } else if (monbit >= 0) {
- reg = CPG_RST_MON;
- mask = BIT(monbit);
- } else {
- /* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
- udelay(35);
- return 0;
- }
-
- return readl_poll_timeout_atomic(priv->base + reg, value,
- !(value & mask), 10, 200);
+ return __rzg2l_cpg_assert(rcdev, id, false);
}
static int rzg2l_cpg_reset(struct reset_controller_dev *rcdev,
diff --git a/drivers/clk/renesas/rzg2l-cpg.h b/drivers/clk/renesas/rzg2l-cpg.h
index 0a71c5ec24b6..55e815be16c8 100644
--- a/drivers/clk/renesas/rzg2l-cpg.h
+++ b/drivers/clk/renesas/rzg2l-cpg.h
@@ -34,6 +34,7 @@
#define CPG_BUS_PERI_COM_MSTOP (0xB6C)
#define CPG_BUS_PERI_CPU_MSTOP (0xB70)
#define CPG_BUS_PERI_DDR_MSTOP (0xB74)
+#define CPG_BUS_PERI_VIDEO_MSTOP (0xB78)
#define CPG_BUS_REG0_MSTOP (0xB7C)
#define CPG_BUS_REG1_MSTOP (0xB80)
#define CPG_BUS_TZCDDR_MSTOP (0xB84)
diff --git a/drivers/clk/renesas/rzv2h-cpg.c b/drivers/clk/renesas/rzv2h-cpg.c
index f468afbb54e2..2197d1d2453a 100644
--- a/drivers/clk/renesas/rzv2h-cpg.c
+++ b/drivers/clk/renesas/rzv2h-cpg.c
@@ -294,15 +294,6 @@ static unsigned long rzv2h_ddiv_recalc_rate(struct clk_hw *hw,
divider->flags, divider->width);
}
-static long rzv2h_ddiv_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
-{
- struct clk_divider *divider = to_clk_divider(hw);
-
- return divider_round_rate(hw, rate, prate, divider->table,
- divider->width, divider->flags);
-}
-
static int rzv2h_ddiv_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
@@ -359,7 +350,6 @@ ddiv_timeout:
static const struct clk_ops rzv2h_ddiv_clk_divider_ops = {
.recalc_rate = rzv2h_ddiv_recalc_rate,
- .round_rate = rzv2h_ddiv_round_rate,
.determine_rate = rzv2h_ddiv_determine_rate,
.set_rate = rzv2h_ddiv_set_rate,
};
@@ -864,6 +854,7 @@ static int __rzv2h_cpg_assert(struct reset_controller_dev *rcdev,
u32 mask = BIT(priv->resets[id].reset_bit);
u8 monbit = priv->resets[id].mon_bit;
u32 value = mask << 16;
+ int ret;
dev_dbg(rcdev->dev, "%s id:%ld offset:0x%x\n",
assert ? "assert" : "deassert", id, reg);
@@ -875,9 +866,14 @@ static int __rzv2h_cpg_assert(struct reset_controller_dev *rcdev,
reg = GET_RST_MON_OFFSET(priv->resets[id].mon_index);
mask = BIT(monbit);
- return readl_poll_timeout_atomic(priv->base + reg, value,
- assert ? (value & mask) : !(value & mask),
- 10, 200);
+ ret = readl_poll_timeout_atomic(priv->base + reg, value,
+ assert == !!(value & mask), 10, 200);
+ if (ret && !assert) {
+ value = mask << 16;
+ writel(value, priv->base + GET_RST_OFFSET(priv->resets[id].reset_index));
+ }
+
+ return ret;
}
static int rzv2h_cpg_assert(struct reset_controller_dev *rcdev,
diff --git a/drivers/clk/rockchip/clk-ddr.c b/drivers/clk/rockchip/clk-ddr.c
index 86718c54e56b..8866a65982a0 100644
--- a/drivers/clk/rockchip/clk-ddr.c
+++ b/drivers/clk/rockchip/clk-ddr.c
@@ -55,17 +55,18 @@ rockchip_ddrclk_sip_recalc_rate(struct clk_hw *hw,
return res.a0;
}
-static long rockchip_ddrclk_sip_round_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long *prate)
+static int rockchip_ddrclk_sip_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct arm_smccc_res res;
- arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, rate, 0,
+ arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, req->rate, 0,
ROCKCHIP_SIP_CONFIG_DRAM_ROUND_RATE,
0, 0, 0, 0, &res);
- return res.a0;
+ req->rate = res.a0;
+
+ return 0;
}
static u8 rockchip_ddrclk_get_parent(struct clk_hw *hw)
@@ -83,7 +84,7 @@ static u8 rockchip_ddrclk_get_parent(struct clk_hw *hw)
static const struct clk_ops rockchip_ddrclk_sip_ops = {
.recalc_rate = rockchip_ddrclk_sip_recalc_rate,
.set_rate = rockchip_ddrclk_sip_set_rate,
- .round_rate = rockchip_ddrclk_sip_round_rate,
+ .determine_rate = rockchip_ddrclk_sip_determine_rate,
.get_parent = rockchip_ddrclk_get_parent,
};
diff --git a/drivers/clk/rockchip/clk-half-divider.c b/drivers/clk/rockchip/clk-half-divider.c
index 64f7faad2148..fbc018e8afa4 100644
--- a/drivers/clk/rockchip/clk-half-divider.c
+++ b/drivers/clk/rockchip/clk-half-divider.c
@@ -92,17 +92,19 @@ static int clk_half_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
return bestdiv;
}
-static long clk_half_divider_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_half_divider_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_divider *divider = to_clk_divider(hw);
int div;
- div = clk_half_divider_bestdiv(hw, rate, prate,
+ div = clk_half_divider_bestdiv(hw, req->rate, &req->best_parent_rate,
divider->width,
divider->flags);
- return DIV_ROUND_UP_ULL(((u64)*prate * 2), div * 2 + 3);
+ req->rate = DIV_ROUND_UP_ULL(((u64)req->best_parent_rate * 2), div * 2 + 3);
+
+ return 0;
}
static int clk_half_divider_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -141,7 +143,7 @@ static int clk_half_divider_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops clk_half_divider_ops = {
.recalc_rate = clk_half_divider_recalc_rate,
- .round_rate = clk_half_divider_round_rate,
+ .determine_rate = clk_half_divider_determine_rate,
.set_rate = clk_half_divider_set_rate,
};
diff --git a/drivers/clk/rockchip/clk-pll.c b/drivers/clk/rockchip/clk-pll.c
index c9d599c31923..86dba3826a77 100644
--- a/drivers/clk/rockchip/clk-pll.c
+++ b/drivers/clk/rockchip/clk-pll.c
@@ -61,8 +61,8 @@ static const struct rockchip_pll_rate_table *rockchip_get_pll_settings(
return NULL;
}
-static long rockchip_pll_round_rate(struct clk_hw *hw,
- unsigned long drate, unsigned long *prate)
+static int rockchip_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
const struct rockchip_pll_rate_table *rate_table = pll->rate_table;
@@ -70,12 +70,17 @@ static long rockchip_pll_round_rate(struct clk_hw *hw,
/* Assuming rate_table is in descending order */
for (i = 0; i < pll->rate_count; i++) {
- if (drate >= rate_table[i].rate)
- return rate_table[i].rate;
+ if (req->rate >= rate_table[i].rate) {
+ req->rate = rate_table[i].rate;
+
+ return 0;
+ }
}
/* return minimum supported value */
- return rate_table[i - 1].rate;
+ req->rate = rate_table[i - 1].rate;
+
+ return 0;
}
/*
@@ -352,7 +357,7 @@ static const struct clk_ops rockchip_rk3036_pll_clk_norate_ops = {
static const struct clk_ops rockchip_rk3036_pll_clk_ops = {
.recalc_rate = rockchip_rk3036_pll_recalc_rate,
- .round_rate = rockchip_pll_round_rate,
+ .determine_rate = rockchip_pll_determine_rate,
.set_rate = rockchip_rk3036_pll_set_rate,
.enable = rockchip_rk3036_pll_enable,
.disable = rockchip_rk3036_pll_disable,
@@ -571,7 +576,7 @@ static const struct clk_ops rockchip_rk3066_pll_clk_norate_ops = {
static const struct clk_ops rockchip_rk3066_pll_clk_ops = {
.recalc_rate = rockchip_rk3066_pll_recalc_rate,
- .round_rate = rockchip_pll_round_rate,
+ .determine_rate = rockchip_pll_determine_rate,
.set_rate = rockchip_rk3066_pll_set_rate,
.enable = rockchip_rk3066_pll_enable,
.disable = rockchip_rk3066_pll_disable,
@@ -836,7 +841,7 @@ static const struct clk_ops rockchip_rk3399_pll_clk_norate_ops = {
static const struct clk_ops rockchip_rk3399_pll_clk_ops = {
.recalc_rate = rockchip_rk3399_pll_recalc_rate,
- .round_rate = rockchip_pll_round_rate,
+ .determine_rate = rockchip_pll_determine_rate,
.set_rate = rockchip_rk3399_pll_set_rate,
.enable = rockchip_rk3399_pll_enable,
.disable = rockchip_rk3399_pll_disable,
@@ -1036,7 +1041,7 @@ static const struct clk_ops rockchip_rk3588_pll_clk_norate_ops = {
static const struct clk_ops rockchip_rk3588_pll_clk_ops = {
.recalc_rate = rockchip_rk3588_pll_recalc_rate,
- .round_rate = rockchip_pll_round_rate,
+ .determine_rate = rockchip_pll_determine_rate,
.set_rate = rockchip_rk3588_pll_set_rate,
.enable = rockchip_rk3588_pll_enable,
.disable = rockchip_rk3588_pll_disable,
diff --git a/drivers/clk/rockchip/clk-rk3368.c b/drivers/clk/rockchip/clk-rk3368.c
index 04391e4e2874..95e6996adbae 100644
--- a/drivers/clk/rockchip/clk-rk3368.c
+++ b/drivers/clk/rockchip/clk-rk3368.c
@@ -526,7 +526,7 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
GATE(ACLK_PERI, "aclk_peri", "aclk_peri_src", CLK_IGNORE_UNUSED,
RK3368_CLKGATE_CON(3), 1, GFLAGS),
- GATE(0, "sclk_mipidsi_24m", "xin24m", 0, RK3368_CLKGATE_CON(4), 14, GFLAGS),
+ GATE(SCLK_MIPIDSI_24M, "sclk_mipidsi_24m", "xin24m", 0, RK3368_CLKGATE_CON(4), 14, GFLAGS),
/*
* Clock-Architecture Diagram 4
diff --git a/drivers/clk/samsung/Makefile b/drivers/clk/samsung/Makefile
index b77fe288e4bb..ef464f434740 100644
--- a/drivers/clk/samsung/Makefile
+++ b/drivers/clk/samsung/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_EXYNOS_5260_COMMON_CLK) += clk-exynos5260.o
obj-$(CONFIG_EXYNOS_5410_COMMON_CLK) += clk-exynos5410.o
obj-$(CONFIG_EXYNOS_5420_COMMON_CLK) += clk-exynos5420.o
obj-$(CONFIG_EXYNOS_5420_COMMON_CLK) += clk-exynos5-subcmu.o
+obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-artpec8.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos5433.o
obj-$(CONFIG_EXYNOS_AUDSS_CLK_CON) += clk-exynos-audss.o
obj-$(CONFIG_EXYNOS_CLKOUT) += clk-exynos-clkout.o
diff --git a/drivers/clk/samsung/clk-artpec8.c b/drivers/clk/samsung/clk-artpec8.c
new file mode 100644
index 000000000000..0ea7c8b58674
--- /dev/null
+++ b/drivers/clk/samsung/clk-artpec8.c
@@ -0,0 +1,1044 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 Samsung Electronics Co., Ltd.
+ * https://www.samsung.com
+ * Copyright (c) 2025 Axis Communications AB.
+ * https://www.axis.com
+ *
+ * Common Clock Framework support for ARTPEC-8 SoC.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+#include <dt-bindings/clock/axis,artpec8-clk.h>
+
+#include "clk.h"
+#include "clk-exynos-arm64.h"
+
+/* NOTE: Must be equal to the last clock ID increased by one */
+#define CMU_CMU_NR_CLK (CLK_DOUT_CMU_VPP_CORE + 1)
+#define CMU_BUS_NR_CLK (CLK_DOUT_BUS_PCLK + 1)
+#define CMU_CORE_NR_CLK (CLK_DOUT_CORE_PCLK + 1)
+#define CMU_CPUCL_NR_CLK (CLK_GOUT_CPUCL_CSSYS_IPCLKPORT_ATCLK + 1)
+#define CMU_FSYS_NR_CLK (CLK_GOUT_FSYS_QSPI_IPCLKPORT_SSI_CLK + 1)
+#define CMU_IMEM_NR_CLK (CLK_GOUT_IMEM_PCLK_TMU0_APBIF + 1)
+#define CMU_PERI_NR_CLK (CLK_GOUT_PERI_DMA4DSIM_IPCLKPORT_CLK_AXI_CLK + 1)
+
+/* Register Offset definitions for CMU_CMU (0x12400000) */
+#define PLL_LOCKTIME_PLL_AUDIO 0x0000
+#define PLL_LOCKTIME_PLL_SHARED0 0x0004
+#define PLL_LOCKTIME_PLL_SHARED1 0x0008
+#define PLL_CON0_PLL_AUDIO 0x0100
+#define PLL_CON0_PLL_SHARED0 0x0120
+#define PLL_CON0_PLL_SHARED1 0x0140
+#define CLK_CON_MUX_CLKCMU_2D 0x1000
+#define CLK_CON_MUX_CLKCMU_3D 0x1004
+#define CLK_CON_MUX_CLKCMU_BUS 0x1008
+#define CLK_CON_MUX_CLKCMU_BUS_DLP 0x100c
+#define CLK_CON_MUX_CLKCMU_CDC_CORE 0x1010
+#define CLK_CON_MUX_CLKCMU_FSYS_SCAN0 0x1014
+#define CLK_CON_MUX_CLKCMU_FSYS_SCAN1 0x1018
+#define CLK_CON_MUX_CLKCMU_IMEM_JPEG 0x101c
+#define CLK_CON_MUX_CLKCMU_PERI_DISP 0x1020
+#define CLK_CON_MUX_CLKCMU_CORE_BUS 0x1024
+#define CLK_CON_MUX_CLKCMU_CORE_DLP 0x1028
+#define CLK_CON_MUX_CLKCMU_CPUCL_SWITCH 0x1030
+#define CLK_CON_MUX_CLKCMU_DLP_CORE 0x1034
+#define CLK_CON_MUX_CLKCMU_FSYS_BUS 0x1038
+#define CLK_CON_MUX_CLKCMU_FSYS_IP 0x103c
+#define CLK_CON_MUX_CLKCMU_IMEM_ACLK 0x1054
+#define CLK_CON_MUX_CLKCMU_MIF_BUSP 0x1080
+#define CLK_CON_MUX_CLKCMU_MIF_SWITCH 0x1084
+#define CLK_CON_MUX_CLKCMU_PERI_IP 0x1088
+#define CLK_CON_MUX_CLKCMU_RSP_CORE 0x108c
+#define CLK_CON_MUX_CLKCMU_TRFM_CORE 0x1090
+#define CLK_CON_MUX_CLKCMU_VCA_ACE 0x1094
+#define CLK_CON_MUX_CLKCMU_VCA_OD 0x1098
+#define CLK_CON_MUX_CLKCMU_VIO_CORE 0x109c
+#define CLK_CON_MUX_CLKCMU_VIP0_CORE 0x10a0
+#define CLK_CON_MUX_CLKCMU_VIP1_CORE 0x10a4
+#define CLK_CON_MUX_CLKCMU_VPP_CORE 0x10a8
+
+#define CLK_CON_DIV_CLKCMU_BUS 0x1800
+#define CLK_CON_DIV_CLKCMU_BUS_DLP 0x1804
+#define CLK_CON_DIV_CLKCMU_CDC_CORE 0x1808
+#define CLK_CON_DIV_CLKCMU_FSYS_SCAN0 0x180c
+#define CLK_CON_DIV_CLKCMU_FSYS_SCAN1 0x1810
+#define CLK_CON_DIV_CLKCMU_IMEM_JPEG 0x1814
+#define CLK_CON_DIV_CLKCMU_MIF_SWITCH 0x1818
+#define CLK_CON_DIV_CLKCMU_CORE_DLP 0x181c
+#define CLK_CON_DIV_CLKCMU_CORE_MAIN 0x1820
+#define CLK_CON_DIV_CLKCMU_PERI_DISP 0x1824
+#define CLK_CON_DIV_CLKCMU_CPUCL_SWITCH 0x1828
+#define CLK_CON_DIV_CLKCMU_DLP_CORE 0x182c
+#define CLK_CON_DIV_CLKCMU_FSYS_BUS 0x1830
+#define CLK_CON_DIV_CLKCMU_FSYS_IP 0x1834
+#define CLK_CON_DIV_CLKCMU_VIO_AUDIO 0x1838
+#define CLK_CON_DIV_CLKCMU_GPU_2D 0x1848
+#define CLK_CON_DIV_CLKCMU_GPU_3D 0x184c
+#define CLK_CON_DIV_CLKCMU_IMEM_ACLK 0x1854
+#define CLK_CON_DIV_CLKCMU_MIF_BUSP 0x1884
+#define CLK_CON_DIV_CLKCMU_PERI_AUDIO 0x1890
+#define CLK_CON_DIV_CLKCMU_PERI_IP 0x1894
+#define CLK_CON_DIV_CLKCMU_RSP_CORE 0x1898
+#define CLK_CON_DIV_CLKCMU_TRFM_CORE 0x189c
+#define CLK_CON_DIV_CLKCMU_VCA_ACE 0x18a0
+#define CLK_CON_DIV_CLKCMU_VCA_OD 0x18a4
+#define CLK_CON_DIV_CLKCMU_VIO_CORE 0x18ac
+#define CLK_CON_DIV_CLKCMU_VIP0_CORE 0x18b0
+#define CLK_CON_DIV_CLKCMU_VIP1_CORE 0x18b4
+#define CLK_CON_DIV_CLKCMU_VPP_CORE 0x18b8
+#define CLK_CON_DIV_PLL_SHARED0_DIV2 0x18bc
+#define CLK_CON_DIV_PLL_SHARED0_DIV3 0x18c0
+#define CLK_CON_DIV_PLL_SHARED0_DIV4 0x18c4
+#define CLK_CON_DIV_PLL_SHARED1_DIV2 0x18c8
+#define CLK_CON_DIV_PLL_SHARED1_DIV3 0x18cc
+#define CLK_CON_DIV_PLL_SHARED1_DIV4 0x18d0
+
+static const unsigned long cmu_cmu_clk_regs[] __initconst = {
+ PLL_LOCKTIME_PLL_AUDIO,
+ PLL_LOCKTIME_PLL_SHARED0,
+ PLL_LOCKTIME_PLL_SHARED1,
+ PLL_CON0_PLL_AUDIO,
+ PLL_CON0_PLL_SHARED0,
+ PLL_CON0_PLL_SHARED1,
+ CLK_CON_MUX_CLKCMU_2D,
+ CLK_CON_MUX_CLKCMU_3D,
+ CLK_CON_MUX_CLKCMU_BUS,
+ CLK_CON_MUX_CLKCMU_BUS_DLP,
+ CLK_CON_MUX_CLKCMU_CDC_CORE,
+ CLK_CON_MUX_CLKCMU_FSYS_SCAN0,
+ CLK_CON_MUX_CLKCMU_FSYS_SCAN1,
+ CLK_CON_MUX_CLKCMU_IMEM_JPEG,
+ CLK_CON_MUX_CLKCMU_PERI_DISP,
+ CLK_CON_MUX_CLKCMU_CORE_BUS,
+ CLK_CON_MUX_CLKCMU_CORE_DLP,
+ CLK_CON_MUX_CLKCMU_CPUCL_SWITCH,
+ CLK_CON_MUX_CLKCMU_DLP_CORE,
+ CLK_CON_MUX_CLKCMU_FSYS_BUS,
+ CLK_CON_MUX_CLKCMU_FSYS_IP,
+ CLK_CON_MUX_CLKCMU_IMEM_ACLK,
+ CLK_CON_MUX_CLKCMU_MIF_BUSP,
+ CLK_CON_MUX_CLKCMU_MIF_SWITCH,
+ CLK_CON_MUX_CLKCMU_PERI_IP,
+ CLK_CON_MUX_CLKCMU_RSP_CORE,
+ CLK_CON_MUX_CLKCMU_TRFM_CORE,
+ CLK_CON_MUX_CLKCMU_VCA_ACE,
+ CLK_CON_MUX_CLKCMU_VCA_OD,
+ CLK_CON_MUX_CLKCMU_VIO_CORE,
+ CLK_CON_MUX_CLKCMU_VIP0_CORE,
+ CLK_CON_MUX_CLKCMU_VIP1_CORE,
+ CLK_CON_MUX_CLKCMU_VPP_CORE,
+ CLK_CON_DIV_CLKCMU_BUS,
+ CLK_CON_DIV_CLKCMU_BUS_DLP,
+ CLK_CON_DIV_CLKCMU_CDC_CORE,
+ CLK_CON_DIV_CLKCMU_FSYS_SCAN0,
+ CLK_CON_DIV_CLKCMU_FSYS_SCAN1,
+ CLK_CON_DIV_CLKCMU_IMEM_JPEG,
+ CLK_CON_DIV_CLKCMU_MIF_SWITCH,
+ CLK_CON_DIV_CLKCMU_CORE_DLP,
+ CLK_CON_DIV_CLKCMU_CORE_MAIN,
+ CLK_CON_DIV_CLKCMU_PERI_DISP,
+ CLK_CON_DIV_CLKCMU_CPUCL_SWITCH,
+ CLK_CON_DIV_CLKCMU_DLP_CORE,
+ CLK_CON_DIV_CLKCMU_FSYS_BUS,
+ CLK_CON_DIV_CLKCMU_FSYS_IP,
+ CLK_CON_DIV_CLKCMU_VIO_AUDIO,
+ CLK_CON_DIV_CLKCMU_GPU_2D,
+ CLK_CON_DIV_CLKCMU_GPU_3D,
+ CLK_CON_DIV_CLKCMU_IMEM_ACLK,
+ CLK_CON_DIV_CLKCMU_MIF_BUSP,
+ CLK_CON_DIV_CLKCMU_PERI_AUDIO,
+ CLK_CON_DIV_CLKCMU_PERI_IP,
+ CLK_CON_DIV_CLKCMU_RSP_CORE,
+ CLK_CON_DIV_CLKCMU_TRFM_CORE,
+ CLK_CON_DIV_CLKCMU_VCA_ACE,
+ CLK_CON_DIV_CLKCMU_VCA_OD,
+ CLK_CON_DIV_CLKCMU_VIO_CORE,
+ CLK_CON_DIV_CLKCMU_VIP0_CORE,
+ CLK_CON_DIV_CLKCMU_VIP1_CORE,
+ CLK_CON_DIV_CLKCMU_VPP_CORE,
+ CLK_CON_DIV_PLL_SHARED0_DIV2,
+ CLK_CON_DIV_PLL_SHARED0_DIV3,
+ CLK_CON_DIV_PLL_SHARED0_DIV4,
+ CLK_CON_DIV_PLL_SHARED1_DIV2,
+ CLK_CON_DIV_PLL_SHARED1_DIV3,
+ CLK_CON_DIV_PLL_SHARED1_DIV4,
+};
+
+static const struct samsung_pll_rate_table artpec8_pll_audio_rates[] __initconst = {
+ PLL_36XX_RATE(25 * MHZ, 589823913U, 47, 1, 1, 12184),
+ PLL_36XX_RATE(25 * MHZ, 393215942U, 47, 3, 0, 12184),
+ PLL_36XX_RATE(25 * MHZ, 294911956U, 47, 1, 2, 12184),
+ PLL_36XX_RATE(25 * MHZ, 100000000U, 32, 2, 2, 0),
+ PLL_36XX_RATE(25 * MHZ, 98303985U, 47, 3, 2, 12184),
+ PLL_36XX_RATE(25 * MHZ, 49151992U, 47, 3, 3, 12184),
+};
+
+static const struct samsung_pll_clock cmu_cmu_pll_clks[] __initconst = {
+ PLL(pll_1017x, CLK_FOUT_SHARED0_PLL, "fout_pll_shared0", "fin_pll",
+ PLL_LOCKTIME_PLL_SHARED0, PLL_CON0_PLL_SHARED0, NULL),
+ PLL(pll_1017x, CLK_FOUT_SHARED1_PLL, "fout_pll_shared1", "fin_pll",
+ PLL_LOCKTIME_PLL_SHARED1, PLL_CON0_PLL_SHARED1, NULL),
+ PLL(pll_1031x, CLK_FOUT_AUDIO_PLL, "fout_pll_audio", "fin_pll",
+ PLL_LOCKTIME_PLL_AUDIO, PLL_CON0_PLL_AUDIO, artpec8_pll_audio_rates),
+};
+
+PNAME(mout_clkcmu_bus_bus_p) = { "dout_pll_shared1_div2", "dout_pll_shared0_div3",
+ "dout_pll_shared1_div3", "dout_pll_shared1_div4" };
+PNAME(mout_clkcmu_bus_dlp_p) = { "dout_pll_shared0_div2", "dout_pll_shared0_div4",
+ "dout_pll_shared1_div2", "dout_pll_shared1_div4" };
+PNAME(mout_clkcmu_core_bus_p) = { "dout_pll_shared1_div2", "dout_pll_shared0_div3",
+ "dout_pll_shared0_div4", "dout_pll_shared1_div3" };
+PNAME(mout_clkcmu_core_dlp_p) = { "dout_pll_shared0_div2", "dout_pll_shared1_div2",
+ "dout_pll_shared0_div3", "dout_pll_shared1_div3" };
+PNAME(mout_clkcmu_cpucl_switch_p) = { "dout_pll_shared0_div2", "dout_pll_shared1_div2",
+ "dout_pll_shared0_div3", "dout_pll_shared1_div3" };
+PNAME(mout_clkcmu_fsys_bus_p) = { "dout_pll_shared1_div2", "dout_pll_shared0_div2",
+ "dout_pll_shared1_div4", "dout_pll_shared1_div3" };
+PNAME(mout_clkcmu_fsys_ip_p) = { "dout_pll_shared0_div2", "dout_pll_shared1_div3",
+ "dout_pll_shared1_div2", "dout_pll_shared0_div3" };
+PNAME(mout_clkcmu_fsys_scan0_p) = { "dout_pll_shared0_div4", "dout_pll_shared1_div4" };
+PNAME(mout_clkcmu_fsys_scan1_p) = { "dout_pll_shared0_div4", "dout_pll_shared1_div4" };
+PNAME(mout_clkcmu_imem_imem_p) = { "dout_pll_shared1_div4", "dout_pll_shared0_div3",
+ "dout_pll_shared1_div3", "dout_pll_shared1_div2" };
+PNAME(mout_clkcmu_imem_jpeg_p) = { "dout_pll_shared0_div2", "dout_pll_shared0_div3",
+ "dout_pll_shared1_div2", "dout_pll_shared1_div3" };
+PNAME(mout_clkcmu_cdc_core_p) = { "dout_pll_shared1_div2", "dout_pll_shared0_div3",
+ "dout_pll_shared1_div3", "dout_pll_shared1_div4" };
+PNAME(mout_clkcmu_dlp_core_p) = { "dout_pll_shared0_div2", "dout_pll_shared1_div2",
+ "dout_pll_shared0_div3", "dout_pll_shared1_div3" };
+PNAME(mout_clkcmu_3d_p) = { "dout_pll_shared0_div2", "dout_pll_shared1_div2",
+ "dout_pll_shared0_div3", "dout_pll_shared1_div3" };
+PNAME(mout_clkcmu_2d_p) = { "dout_pll_shared0_div2", "dout_pll_shared1_div2",
+ "dout_pll_shared0_div3", "dout_pll_shared1_div3" };
+PNAME(mout_clkcmu_mif_switch_p) = { "dout_pll_shared0", "dout_pll_shared1",
+ "dout_pll_shared0_div2", "dout_pll_shared0_div3" };
+PNAME(mout_clkcmu_mif_busp_p) = { "dout_pll_shared0_div3", "dout_pll_shared1_div4",
+ "dout_pll_shared0_div4", "dout_pll_shared0_div2" };
+PNAME(mout_clkcmu_peri_disp_p) = { "dout_pll_shared1_div2", "dout_pll_shared0_div2",
+ "dout_pll_shared1_div4", "dout_pll_shared1_div3" };
+PNAME(mout_clkcmu_peri_ip_p) = { "dout_pll_shared1_div2", "dout_pll_shared0_div4",
+ "dout_pll_shared1_div4", "dout_pll_shared0_div2" };
+PNAME(mout_clkcmu_rsp_core_p) = { "dout_pll_shared1_div2", "dout_pll_shared0_div3",
+ "dout_pll_shared1_div3", "dout_pll_shared1_div4" };
+PNAME(mout_clkcmu_trfm_core_p) = { "dout_pll_shared1_div2", "dout_pll_shared0_div3",
+ "dout_pll_shared1_div3", "dout_pll_shared1_div4" };
+PNAME(mout_clkcmu_vca_ace_p) = { "dout_pll_shared1_div2", "dout_pll_shared0_div3",
+ "dout_pll_shared1_div3", "dout_pll_shared1_div4" };
+PNAME(mout_clkcmu_vca_od_p) = { "dout_pll_shared1_div2", "dout_pll_shared0_div3",
+ "dout_pll_shared1_div3", "dout_pll_shared1_div4" };
+PNAME(mout_clkcmu_vio_core_p) = { "dout_pll_shared0_div3", "dout_pll_shared0_div2",
+ "dout_pll_shared1_div2", "dout_pll_shared1_div3" };
+PNAME(mout_clkcmu_vip0_core_p) = { "dout_pll_shared1_div2", "dout_pll_shared0_div3",
+ "dout_pll_shared1_div3", "dout_pll_shared1_div4" };
+PNAME(mout_clkcmu_vip1_core_p) = { "dout_pll_shared1_div2", "dout_pll_shared0_div3",
+ "dout_pll_shared1_div3", "dout_pll_shared1_div4" };
+PNAME(mout_clkcmu_vpp_core_p) = { "dout_pll_shared1_div2", "dout_pll_shared0_div3",
+ "dout_pll_shared1_div3", "dout_pll_shared1_div4" };
+PNAME(mout_clkcmu_pll_shared0_p) = { "fin_pll", "fout_pll_shared0" };
+PNAME(mout_clkcmu_pll_shared1_p) = { "fin_pll", "fout_pll_shared1" };
+PNAME(mout_clkcmu_pll_audio_p) = { "fin_pll", "fout_pll_audio" };
+
+static const struct samsung_fixed_factor_clock cmu_fixed_factor_clks[] __initconst = {
+ FFACTOR(CLK_DOUT_CMU_OTP, "dout_clkcmu_otp", "fin_pll", 1, 8, 0),
+};
+
+static const struct samsung_mux_clock cmu_cmu_mux_clks[] __initconst = {
+ MUX(0, "mout_clkcmu_pll_shared0", mout_clkcmu_pll_shared0_p, PLL_CON0_PLL_SHARED0, 4, 1),
+ MUX(0, "mout_clkcmu_pll_shared1", mout_clkcmu_pll_shared1_p, PLL_CON0_PLL_SHARED1, 4, 1),
+ MUX(0, "mout_clkcmu_pll_audio", mout_clkcmu_pll_audio_p, PLL_CON0_PLL_AUDIO, 4, 1),
+ MUX(0, "mout_clkcmu_bus_bus", mout_clkcmu_bus_bus_p, CLK_CON_MUX_CLKCMU_BUS, 0, 2),
+ MUX(0, "mout_clkcmu_bus_dlp", mout_clkcmu_bus_dlp_p, CLK_CON_MUX_CLKCMU_BUS_DLP, 0, 2),
+ MUX(0, "mout_clkcmu_core_bus", mout_clkcmu_core_bus_p, CLK_CON_MUX_CLKCMU_CORE_BUS, 0, 2),
+ MUX(0, "mout_clkcmu_core_dlp", mout_clkcmu_core_dlp_p, CLK_CON_MUX_CLKCMU_CORE_DLP, 0, 2),
+ MUX(0, "mout_clkcmu_cpucl_switch", mout_clkcmu_cpucl_switch_p,
+ CLK_CON_MUX_CLKCMU_CPUCL_SWITCH, 0, 3),
+ MUX(0, "mout_clkcmu_fsys_bus", mout_clkcmu_fsys_bus_p, CLK_CON_MUX_CLKCMU_FSYS_BUS, 0, 2),
+ MUX(0, "mout_clkcmu_fsys_ip", mout_clkcmu_fsys_ip_p, CLK_CON_MUX_CLKCMU_FSYS_IP, 0, 2),
+ MUX(0, "mout_clkcmu_fsys_scan0", mout_clkcmu_fsys_scan0_p,
+ CLK_CON_MUX_CLKCMU_FSYS_SCAN0, 0, 1),
+ MUX(0, "mout_clkcmu_fsys_scan1", mout_clkcmu_fsys_scan1_p,
+ CLK_CON_MUX_CLKCMU_FSYS_SCAN1, 0, 1),
+ MUX(0, "mout_clkcmu_imem_imem", mout_clkcmu_imem_imem_p,
+ CLK_CON_MUX_CLKCMU_IMEM_ACLK, 0, 2),
+ MUX(0, "mout_clkcmu_imem_jpeg", mout_clkcmu_imem_jpeg_p,
+ CLK_CON_MUX_CLKCMU_IMEM_JPEG, 0, 2),
+ nMUX(0, "mout_clkcmu_cdc_core", mout_clkcmu_cdc_core_p, CLK_CON_MUX_CLKCMU_CDC_CORE, 0, 2),
+ nMUX(0, "mout_clkcmu_dlp_core", mout_clkcmu_dlp_core_p, CLK_CON_MUX_CLKCMU_DLP_CORE, 0, 2),
+ MUX(0, "mout_clkcmu_3d", mout_clkcmu_3d_p, CLK_CON_MUX_CLKCMU_3D, 0, 2),
+ MUX(0, "mout_clkcmu_2d", mout_clkcmu_2d_p, CLK_CON_MUX_CLKCMU_2D, 0, 2),
+ MUX(0, "mout_clkcmu_mif_switch", mout_clkcmu_mif_switch_p,
+ CLK_CON_MUX_CLKCMU_MIF_SWITCH, 0, 2),
+ MUX(0, "mout_clkcmu_mif_busp", mout_clkcmu_mif_busp_p, CLK_CON_MUX_CLKCMU_MIF_BUSP, 0, 2),
+ MUX(0, "mout_clkcmu_peri_disp", mout_clkcmu_peri_disp_p,
+ CLK_CON_MUX_CLKCMU_PERI_DISP, 0, 2),
+ MUX(0, "mout_clkcmu_peri_ip", mout_clkcmu_peri_ip_p, CLK_CON_MUX_CLKCMU_PERI_IP, 0, 2),
+ MUX(0, "mout_clkcmu_rsp_core", mout_clkcmu_rsp_core_p, CLK_CON_MUX_CLKCMU_RSP_CORE, 0, 2),
+ nMUX(0, "mout_clkcmu_trfm_core", mout_clkcmu_trfm_core_p,
+ CLK_CON_MUX_CLKCMU_TRFM_CORE, 0, 2),
+ MUX(0, "mout_clkcmu_vca_ace", mout_clkcmu_vca_ace_p, CLK_CON_MUX_CLKCMU_VCA_ACE, 0, 2),
+ MUX(0, "mout_clkcmu_vca_od", mout_clkcmu_vca_od_p, CLK_CON_MUX_CLKCMU_VCA_OD, 0, 2),
+ MUX(0, "mout_clkcmu_vio_core", mout_clkcmu_vio_core_p, CLK_CON_MUX_CLKCMU_VIO_CORE, 0, 2),
+ nMUX(0, "mout_clkcmu_vip0_core", mout_clkcmu_vip0_core_p,
+ CLK_CON_MUX_CLKCMU_VIP0_CORE, 0, 2),
+ nMUX(0, "mout_clkcmu_vip1_core", mout_clkcmu_vip1_core_p,
+ CLK_CON_MUX_CLKCMU_VIP1_CORE, 0, 2),
+ nMUX(0, "mout_clkcmu_vpp_core", mout_clkcmu_vpp_core_p, CLK_CON_MUX_CLKCMU_VPP_CORE, 0, 2),
+};
+
+static const struct samsung_div_clock cmu_cmu_div_clks[] __initconst = {
+ DIV(CLK_DOUT_SHARED0_DIV2, "dout_pll_shared0_div2",
+ "mout_clkcmu_pll_shared0", CLK_CON_DIV_PLL_SHARED0_DIV2, 0, 1),
+ DIV(CLK_DOUT_SHARED0_DIV3, "dout_pll_shared0_div3",
+ "mout_clkcmu_pll_shared0", CLK_CON_DIV_PLL_SHARED0_DIV3, 0, 2),
+ DIV(CLK_DOUT_SHARED0_DIV4, "dout_pll_shared0_div4",
+ "dout_pll_shared0_div2", CLK_CON_DIV_PLL_SHARED0_DIV4, 0, 1),
+ DIV(CLK_DOUT_SHARED1_DIV2, "dout_pll_shared1_div2",
+ "mout_clkcmu_pll_shared1", CLK_CON_DIV_PLL_SHARED1_DIV2, 0, 1),
+ DIV(CLK_DOUT_SHARED1_DIV3, "dout_pll_shared1_div3",
+ "mout_clkcmu_pll_shared1", CLK_CON_DIV_PLL_SHARED1_DIV3, 0, 2),
+ DIV(CLK_DOUT_SHARED1_DIV4, "dout_pll_shared1_div4",
+ "dout_pll_shared1_div2", CLK_CON_DIV_PLL_SHARED1_DIV4, 0, 1),
+ DIV(CLK_DOUT_CMU_BUS, "dout_clkcmu_bus",
+ "mout_clkcmu_bus_bus", CLK_CON_DIV_CLKCMU_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_BUS_DLP, "dout_clkcmu_bus_dlp",
+ "mout_clkcmu_bus_dlp", CLK_CON_DIV_CLKCMU_BUS_DLP, 0, 4),
+ DIV(CLK_DOUT_CMU_CORE_MAIN, "dout_clkcmu_core_main",
+ "mout_clkcmu_core_bus", CLK_CON_DIV_CLKCMU_CORE_MAIN, 0, 4),
+ DIV(CLK_DOUT_CMU_CORE_DLP, "dout_clkcmu_core_dlp",
+ "mout_clkcmu_core_dlp", CLK_CON_DIV_CLKCMU_CORE_DLP, 0, 4),
+ DIV(CLK_DOUT_CMU_CPUCL_SWITCH, "dout_clkcmu_cpucl_switch",
+ "mout_clkcmu_cpucl_switch", CLK_CON_DIV_CLKCMU_CPUCL_SWITCH, 0, 3),
+ DIV(CLK_DOUT_CMU_FSYS_BUS, "dout_clkcmu_fsys_bus",
+ "mout_clkcmu_fsys_bus", CLK_CON_DIV_CLKCMU_FSYS_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_FSYS_IP, "dout_clkcmu_fsys_ip",
+ "mout_clkcmu_fsys_ip", CLK_CON_DIV_CLKCMU_FSYS_IP, 0, 9),
+ DIV(CLK_DOUT_CMU_FSYS_SCAN0, "dout_clkcmu_fsys_scan0",
+ "mout_clkcmu_fsys_scan0", CLK_CON_DIV_CLKCMU_FSYS_SCAN0, 0, 4),
+ DIV(CLK_DOUT_CMU_FSYS_SCAN1, "dout_clkcmu_fsys_scan1",
+ "mout_clkcmu_fsys_scan1", CLK_CON_DIV_CLKCMU_FSYS_SCAN1, 0, 4),
+ DIV(CLK_DOUT_CMU_IMEM_ACLK, "dout_clkcmu_imem_aclk",
+ "mout_clkcmu_imem_imem", CLK_CON_DIV_CLKCMU_IMEM_ACLK, 0, 4),
+ DIV(CLK_DOUT_CMU_IMEM_JPEG, "dout_clkcmu_imem_jpeg",
+ "mout_clkcmu_imem_jpeg", CLK_CON_DIV_CLKCMU_IMEM_JPEG, 0, 4),
+ DIV_F(CLK_DOUT_CMU_CDC_CORE, "dout_clkcmu_cdc_core",
+ "mout_clkcmu_cdc_core", CLK_CON_DIV_CLKCMU_CDC_CORE, 0, 4, CLK_SET_RATE_PARENT, 0),
+ DIV_F(CLK_DOUT_CMU_DLP_CORE, "dout_clkcmu_dlp_core",
+ "mout_clkcmu_dlp_core", CLK_CON_DIV_CLKCMU_DLP_CORE, 0, 4, CLK_SET_RATE_PARENT, 0),
+ DIV(CLK_DOUT_CMU_GPU_3D, "dout_clkcmu_gpu_3d",
+ "mout_clkcmu_3d", CLK_CON_DIV_CLKCMU_GPU_3D, 0, 3),
+ DIV(CLK_DOUT_CMU_GPU_2D, "dout_clkcmu_gpu_2d",
+ "mout_clkcmu_2d", CLK_CON_DIV_CLKCMU_GPU_2D, 0, 4),
+ DIV(CLK_DOUT_CMU_MIF_SWITCH, "dout_clkcmu_mif_switch",
+ "mout_clkcmu_mif_switch", CLK_CON_DIV_CLKCMU_MIF_SWITCH, 0, 4),
+ DIV(CLK_DOUT_CMU_MIF_BUSP, "dout_clkcmu_mif_busp",
+ "mout_clkcmu_mif_busp", CLK_CON_DIV_CLKCMU_MIF_BUSP, 0, 3),
+ DIV(CLK_DOUT_CMU_PERI_DISP, "dout_clkcmu_peri_disp",
+ "mout_clkcmu_peri_disp", CLK_CON_DIV_CLKCMU_PERI_DISP, 0, 4),
+ DIV(CLK_DOUT_CMU_PERI_IP, "dout_clkcmu_peri_ip",
+ "mout_clkcmu_peri_ip", CLK_CON_DIV_CLKCMU_PERI_IP, 0, 4),
+ DIV(CLK_DOUT_CMU_PERI_AUDIO, "dout_clkcmu_peri_audio",
+ "mout_clkcmu_pll_audio", CLK_CON_DIV_CLKCMU_PERI_AUDIO, 0, 4),
+ DIV(CLK_DOUT_CMU_RSP_CORE, "dout_clkcmu_rsp_core",
+ "mout_clkcmu_rsp_core", CLK_CON_DIV_CLKCMU_RSP_CORE, 0, 4),
+ DIV_F(CLK_DOUT_CMU_TRFM_CORE, "dout_clkcmu_trfm_core",
+ "mout_clkcmu_trfm_core", CLK_CON_DIV_CLKCMU_TRFM_CORE, 0, 4, CLK_SET_RATE_PARENT, 0),
+ DIV(CLK_DOUT_CMU_VCA_ACE, "dout_clkcmu_vca_ace",
+ "mout_clkcmu_vca_ace", CLK_CON_DIV_CLKCMU_VCA_ACE, 0, 4),
+ DIV(CLK_DOUT_CMU_VCA_OD, "dout_clkcmu_vca_od",
+ "mout_clkcmu_vca_od", CLK_CON_DIV_CLKCMU_VCA_OD, 0, 4),
+ DIV(CLK_DOUT_CMU_VIO_CORE, "dout_clkcmu_vio_core",
+ "mout_clkcmu_vio_core", CLK_CON_DIV_CLKCMU_VIO_CORE, 0, 4),
+ DIV(CLK_DOUT_CMU_VIO_AUDIO, "dout_clkcmu_vio_audio",
+ "mout_clkcmu_pll_audio", CLK_CON_DIV_CLKCMU_VIO_AUDIO, 0, 4),
+ DIV_F(CLK_DOUT_CMU_VIP0_CORE, "dout_clkcmu_vip0_core",
+ "mout_clkcmu_vip0_core", CLK_CON_DIV_CLKCMU_VIP0_CORE, 0, 4, CLK_SET_RATE_PARENT, 0),
+ DIV_F(CLK_DOUT_CMU_VIP1_CORE, "dout_clkcmu_vip1_core",
+ "mout_clkcmu_vip1_core", CLK_CON_DIV_CLKCMU_VIP1_CORE, 0, 4, CLK_SET_RATE_PARENT, 0),
+ DIV_F(CLK_DOUT_CMU_VPP_CORE, "dout_clkcmu_vpp_core",
+ "mout_clkcmu_vpp_core", CLK_CON_DIV_CLKCMU_VPP_CORE, 0, 4, CLK_SET_RATE_PARENT, 0),
+};
+
+static const struct samsung_cmu_info cmu_cmu_info __initconst = {
+ .pll_clks = cmu_cmu_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(cmu_cmu_pll_clks),
+ .fixed_factor_clks = cmu_fixed_factor_clks,
+ .nr_fixed_factor_clks = ARRAY_SIZE(cmu_fixed_factor_clks),
+ .mux_clks = cmu_cmu_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(cmu_cmu_mux_clks),
+ .div_clks = cmu_cmu_div_clks,
+ .nr_div_clks = ARRAY_SIZE(cmu_cmu_div_clks),
+ .nr_clk_ids = CMU_CMU_NR_CLK,
+ .clk_regs = cmu_cmu_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(cmu_cmu_clk_regs),
+};
+
+/* Register Offset definitions for CMU_BUS (0x12c10000) */
+#define PLL_CON0_MUX_CLK_BUS_ACLK_USER 0x0100
+#define PLL_CON0_MUX_CLK_BUS_DLP_USER 0x0120
+#define CLK_CON_DIV_CLK_BUS_PCLK 0x1800
+
+static const unsigned long cmu_bus_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLK_BUS_ACLK_USER,
+ PLL_CON0_MUX_CLK_BUS_DLP_USER,
+ CLK_CON_DIV_CLK_BUS_PCLK,
+};
+
+PNAME(mout_clk_bus_aclk_user_p) = { "fin_pll", "dout_clkcmu_bus" };
+PNAME(mout_clk_bus_dlp_user_p) = { "fin_pll", "dout_clkcmu_bus_dlp" };
+
+static const struct samsung_mux_clock cmu_bus_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_BUS_ACLK_USER, "mout_clk_bus_aclk_user",
+ mout_clk_bus_aclk_user_p, PLL_CON0_MUX_CLK_BUS_ACLK_USER, 4, 1),
+ MUX(CLK_MOUT_BUS_DLP_USER, "mout_clk_bus_dlp_user",
+ mout_clk_bus_dlp_user_p, PLL_CON0_MUX_CLK_BUS_DLP_USER, 4, 1),
+};
+
+static const struct samsung_div_clock cmu_bus_div_clks[] __initconst = {
+ DIV(CLK_DOUT_BUS_PCLK, "dout_clk_bus_pclk", "mout_clk_bus_aclk_user",
+ CLK_CON_DIV_CLK_BUS_PCLK, 0, 4),
+};
+
+static const struct samsung_cmu_info cmu_bus_info __initconst = {
+ .mux_clks = cmu_bus_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(cmu_bus_mux_clks),
+ .div_clks = cmu_bus_div_clks,
+ .nr_div_clks = ARRAY_SIZE(cmu_bus_div_clks),
+ .nr_clk_ids = CMU_BUS_NR_CLK,
+ .clk_regs = cmu_bus_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(cmu_bus_clk_regs),
+};
+
+/* Register Offset definitions for CMU_CORE (0x12410000) */
+#define PLL_CON0_MUX_CLK_CORE_ACLK_USER 0x0100
+#define PLL_CON0_MUX_CLK_CORE_DLP_USER 0x0120
+#define CLK_CON_DIV_CLK_CORE_PCLK 0x1800
+
+static const unsigned long cmu_core_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLK_CORE_ACLK_USER,
+ PLL_CON0_MUX_CLK_CORE_DLP_USER,
+ CLK_CON_DIV_CLK_CORE_PCLK,
+};
+
+PNAME(mout_clk_core_aclk_user_p) = { "fin_pll", "dout_clkcmu_core_main" };
+PNAME(mout_clk_core_dlp_user_p) = { "fin_pll", "dout_clkcmu_core_dlp" };
+
+static const struct samsung_mux_clock cmu_core_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_CORE_ACLK_USER, "mout_clk_core_aclk_user",
+ mout_clk_core_aclk_user_p, PLL_CON0_MUX_CLK_CORE_ACLK_USER, 4, 1),
+ MUX(CLK_MOUT_CORE_DLP_USER, "mout_clk_core_dlp_user",
+ mout_clk_core_dlp_user_p, PLL_CON0_MUX_CLK_CORE_DLP_USER, 4, 1),
+};
+
+static const struct samsung_div_clock cmu_core_div_clks[] __initconst = {
+ DIV(CLK_DOUT_CORE_PCLK, "dout_clk_core_pclk",
+ "mout_clk_core_aclk_user", CLK_CON_DIV_CLK_CORE_PCLK, 0, 4),
+};
+
+static const struct samsung_cmu_info cmu_core_info __initconst = {
+ .mux_clks = cmu_core_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(cmu_core_mux_clks),
+ .div_clks = cmu_core_div_clks,
+ .nr_div_clks = ARRAY_SIZE(cmu_core_div_clks),
+ .nr_clk_ids = CMU_CORE_NR_CLK,
+ .clk_regs = cmu_core_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(cmu_core_clk_regs),
+};
+
+/* Register Offset definitions for CMU_CPUCL (0x11410000) */
+#define PLL_LOCKTIME_PLL_CPUCL 0x0000
+#define PLL_CON0_MUX_CLKCMU_CPUCL_SWITCH_USER 0x0120
+#define PLL_CON0_PLL_CPUCL 0x0140
+#define CLK_CON_MUX_CLK_CPUCL_PLL 0x1000
+#define CLK_CON_DIV_CLK_CLUSTER_ACLK 0x1800
+#define CLK_CON_DIV_CLK_CLUSTER_CNTCLK 0x1804
+#define CLK_CON_DIV_CLK_CLUSTER_PCLKDBG 0x1808
+#define CLK_CON_DIV_CLK_CPUCL_CMUREF 0x180c
+#define CLK_CON_DIV_CLK_CPUCL_PCLK 0x1814
+#define CLK_CON_DIV_CLK_CLUSTER_ATCLK 0x1818
+#define CLK_CON_DIV_CLK_CPUCL_DBG 0x181c
+#define CLK_CON_DIV_CLK_CPUCL_PCLKDBG 0x1820
+#define CLK_CON_GAT_CLK_CLUSTER_CPU 0x2008
+#define CLK_CON_GAT_CLK_CPUCL_SHORTSTOP 0x200c
+#define CLK_CON_DMYQCH_CON_CSSYS_QCH 0x3008
+
+static const unsigned long cmu_cpucl_clk_regs[] __initconst = {
+ PLL_LOCKTIME_PLL_CPUCL,
+ PLL_CON0_MUX_CLKCMU_CPUCL_SWITCH_USER,
+ PLL_CON0_PLL_CPUCL,
+ CLK_CON_MUX_CLK_CPUCL_PLL,
+ CLK_CON_DIV_CLK_CLUSTER_ACLK,
+ CLK_CON_DIV_CLK_CLUSTER_CNTCLK,
+ CLK_CON_DIV_CLK_CLUSTER_PCLKDBG,
+ CLK_CON_DIV_CLK_CPUCL_CMUREF,
+ CLK_CON_DIV_CLK_CPUCL_PCLK,
+ CLK_CON_DIV_CLK_CLUSTER_ATCLK,
+ CLK_CON_DIV_CLK_CPUCL_DBG,
+ CLK_CON_DIV_CLK_CPUCL_PCLKDBG,
+ CLK_CON_GAT_CLK_CLUSTER_CPU,
+ CLK_CON_GAT_CLK_CPUCL_SHORTSTOP,
+ CLK_CON_DMYQCH_CON_CSSYS_QCH,
+};
+
+static const struct samsung_pll_clock cmu_cpucl_pll_clks[] __initconst = {
+ PLL(pll_1017x, CLK_FOUT_CPUCL_PLL, "fout_pll_cpucl", "fin_pll",
+ PLL_LOCKTIME_PLL_CPUCL, PLL_CON0_PLL_CPUCL, NULL),
+};
+
+PNAME(mout_clkcmu_cpucl_switch_user_p) = { "fin_pll", "dout_clkcmu_cpucl_switch" };
+PNAME(mout_pll_cpucl_p) = { "fin_pll", "fout_pll_cpucl" };
+PNAME(mout_clk_cpucl_pll_p) = { "mout_pll_cpucl", "mout_clkcmu_cpucl_switch_user" };
+
+static const struct samsung_mux_clock cmu_cpucl_mux_clks[] __initconst = {
+ MUX_F(0, "mout_pll_cpucl", mout_pll_cpucl_p, PLL_CON0_PLL_CPUCL, 4, 1,
+ CLK_SET_RATE_PARENT | CLK_RECALC_NEW_RATES, 0),
+ MUX(CLK_MOUT_CPUCL_SWITCH_USER, "mout_clkcmu_cpucl_switch_user",
+ mout_clkcmu_cpucl_switch_user_p, PLL_CON0_MUX_CLKCMU_CPUCL_SWITCH_USER, 4, 1),
+ MUX_F(CLK_MOUT_CPUCL_PLL, "mout_clk_cpucl_pll", mout_clk_cpucl_pll_p,
+ CLK_CON_MUX_CLK_CPUCL_PLL, 0, 1, CLK_SET_RATE_PARENT, 0),
+};
+
+static const struct samsung_fixed_factor_clock cpucl_ffactor_clks[] __initconst = {
+ FFACTOR(CLK_DOUT_CPUCL_CPU, "dout_clk_cpucl_cpu",
+ "mout_clk_cpucl_pll", 1, 1, CLK_SET_RATE_PARENT),
+};
+
+static const struct samsung_div_clock cmu_cpucl_div_clks[] __initconst = {
+ DIV(CLK_DOUT_CPUCL_CLUSTER_ACLK, "dout_clk_cluster_aclk",
+ "dout_clk_cpucl_cpu", CLK_CON_DIV_CLK_CLUSTER_ACLK, 0, 4),
+ DIV(CLK_DOUT_CPUCL_CLUSTER_PCLKDBG, "dout_clk_cluster_pclkdbg",
+ "dout_clk_cpucl_cpu", CLK_CON_DIV_CLK_CLUSTER_PCLKDBG, 0, 4),
+ DIV(CLK_DOUT_CPUCL_CLUSTER_CNTCLK, "dout_clk_cluster_cntclk",
+ "dout_clk_cpucl_cpu", CLK_CON_DIV_CLK_CLUSTER_CNTCLK, 0, 4),
+ DIV(CLK_DOUT_CPUCL_CLUSTER_ATCLK, "dout_clk_cluster_atclk",
+ "dout_clk_cpucl_cpu", CLK_CON_DIV_CLK_CLUSTER_ATCLK, 0, 4),
+ DIV(CLK_DOUT_CPUCL_PCLK, "dout_clk_cpucl_pclk",
+ "dout_clk_cpucl_cpu", CLK_CON_DIV_CLK_CPUCL_PCLK, 0, 4),
+ DIV(CLK_DOUT_CPUCL_CMUREF, "dout_clk_cpucl_cmuref",
+ "dout_clk_cpucl_cpu", CLK_CON_DIV_CLK_CPUCL_CMUREF, 0, 3),
+ DIV(CLK_DOUT_CPUCL_DBG, "dout_clk_cpucl_dbg",
+ "dout_clk_cpucl_cpu", CLK_CON_DIV_CLK_CPUCL_DBG, 0, 4),
+ DIV(CLK_DOUT_CPUCL_PCLKDBG, "dout_clk_cpucl_pclkdbg",
+ "dout_clk_cpucl_dbg", CLK_CON_DIV_CLK_CPUCL_PCLKDBG, 0, 4),
+};
+
+static const struct samsung_gate_clock cmu_cpucl_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_CPUCL_CLUSTER_CPU, "clk_con_gat_clk_cluster_cpu",
+ "clk_con_gat_clk_cpucl_shortstop", CLK_CON_GAT_CLK_CLUSTER_CPU, 21,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_CPUCL_SHORTSTOP, "clk_con_gat_clk_cpucl_shortstop",
+ "dout_clk_cpucl_cpu", CLK_CON_GAT_CLK_CPUCL_SHORTSTOP, 21,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_CPUCL_CSSYS_IPCLKPORT_PCLKDBG, "cssys_ipclkport_pclkdbg",
+ "dout_clk_cpucl_pclkdbg", CLK_CON_DMYQCH_CON_CSSYS_QCH, 1,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_CPUCL_CSSYS_IPCLKPORT_ATCLK, "cssys_ipclkport_atclk",
+ "dout_clk_cpucl_dbg", CLK_CON_DMYQCH_CON_CSSYS_QCH, 1,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+};
+
+static const struct samsung_cmu_info cmu_cpucl_info __initconst = {
+ .pll_clks = cmu_cpucl_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(cmu_cpucl_pll_clks),
+ .fixed_factor_clks = cpucl_ffactor_clks,
+ .nr_fixed_factor_clks = ARRAY_SIZE(cpucl_ffactor_clks),
+ .mux_clks = cmu_cpucl_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(cmu_cpucl_mux_clks),
+ .div_clks = cmu_cpucl_div_clks,
+ .nr_div_clks = ARRAY_SIZE(cmu_cpucl_div_clks),
+ .gate_clks = cmu_cpucl_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(cmu_cpucl_gate_clks),
+ .nr_clk_ids = CMU_CPUCL_NR_CLK,
+ .clk_regs = cmu_cpucl_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(cmu_cpucl_clk_regs),
+};
+
+/* Register Offset definitions for CMU_FSYS (0x16c10000) */
+#define PLL_LOCKTIME_PLL_FSYS 0x0004
+#define PLL_CON0_MUX_CLK_FSYS_BUS_USER 0x0120
+#define PLL_CON0_MUX_CLK_FSYS_MMC_USER 0x0140
+#define PLL_CON0_MUX_CLK_FSYS_SCAN0_USER 0x0160
+#define PLL_CON0_MUX_CLK_FSYS_SCAN1_USER 0x0180
+#define PLL_CON0_PLL_FSYS 0x01c0
+#define CLK_CON_DIV_CLK_FSYS_ADC 0x1804
+#define CLK_CON_DIV_CLK_FSYS_BUS300 0x1808
+#define CLK_CON_DIV_CLK_FSYS_BUS_QSPI 0x180c
+#define CLK_CON_DIV_CLK_FSYS_EQOS_25 0x1810
+#define CLK_CON_DIV_CLK_FSYS_EQOS_2P5 0x1814
+#define CLK_CON_DIV_CLK_FSYS_EQOS_500 0x1818
+#define CLK_CON_DIV_CLK_FSYS_EQOS_INT125 0x181c
+#define CLK_CON_DIV_CLK_FSYS_MMC_CARD0 0x1820
+#define CLK_CON_DIV_CLK_FSYS_MMC_CARD1 0x1824
+#define CLK_CON_DIV_CLK_FSYS_OTP_MEM 0x1828
+#define CLK_CON_DIV_CLK_FSYS_PCIE_PHY_REFCLK_SYSPLL 0x182c
+#define CLK_CON_DIV_CLK_FSYS_QSPI 0x1830
+#define CLK_CON_DIV_CLK_FSYS_SCLK_UART 0x1834
+#define CLK_CON_DIV_CLK_FSYS_SFMC_NAND 0x1838
+#define CLK_CON_DIV_SCAN_CLK_FSYS_125 0x183c
+#define CLK_CON_DIV_SCAN_CLK_FSYS_MMC 0x1840
+#define CLK_CON_DIV_SCAN_CLK_FSYS_PCIE_PIPE 0x1844
+#define CLK_CON_FSYS_I2C0_IPCLKPORT_I_PCLK 0x2044
+#define CLK_CON_FSYS_I2C1_IPCLKPORT_I_PCLK 0x2048
+#define CLK_CON_FSYS_UART0_IPCLKPORT_I_PCLK 0x204c
+#define CLK_CON_FSYS_UART0_IPCLKPORT_I_SCLK_UART 0x2050
+#define CLK_CON_MMC0_IPCLKPORT_I_ACLK 0x2070
+#define CLK_CON_MMC1_IPCLKPORT_I_ACLK 0x2078
+#define CLK_CON_DWC_PCIE_CTL_INST_0_DBI_ACLK_UG 0x208c
+#define CLK_CON_DWC_PCIE_CTL_INST_0_MSTR_ACLK_UG 0x2090
+#define CLK_CON_DWC_PCIE_CTL_INST_0_SLV_ACLK_UG 0x2094
+#define CLK_CON_PWM_IPCLKPORT_I_PCLK_S0 0x20a0
+#define CLK_CON_USB20DRD_IPCLKPORT_ACLK_PHYCTRL_20 0x20bc
+#define CLK_CON_USB20DRD_IPCLKPORT_BUS_CLK_EARLY 0x20c0
+#define CLK_CON_XHB_AHBBR_IPCLKPORT_CLK 0x20c4
+#define CLK_CON_XHB_USB_IPCLKPORT_CLK 0x20cc
+#define CLK_CON_BUS_P_FSYS_IPCLKPORT_QSPICLK 0x201c
+#define CLK_CON_DMYQCH_CON_EQOS_TOP_QCH 0x3008
+#define CLK_CON_DMYQCH_CON_MMC0_QCH 0x300c
+#define CLK_CON_DMYQCH_CON_MMC1_QCH 0x3010
+#define CLK_CON_DMYQCH_CON_PCIE_TOP_QCH 0x3018
+#define CLK_CON_DMYQCH_CON_PCIE_TOP_QCH_REF 0x301c
+#define CLK_CON_DMYQCH_CON_QSPI_QCH 0x3020
+#define CLK_CON_DMYQCH_CON_SFMC_QCH 0x3024
+
+static const unsigned long cmu_fsys_clk_regs[] __initconst = {
+ PLL_LOCKTIME_PLL_FSYS,
+ PLL_CON0_MUX_CLK_FSYS_BUS_USER,
+ PLL_CON0_MUX_CLK_FSYS_MMC_USER,
+ PLL_CON0_MUX_CLK_FSYS_SCAN0_USER,
+ PLL_CON0_MUX_CLK_FSYS_SCAN1_USER,
+ PLL_CON0_PLL_FSYS,
+ CLK_CON_DIV_CLK_FSYS_ADC,
+ CLK_CON_DIV_CLK_FSYS_BUS300,
+ CLK_CON_DIV_CLK_FSYS_BUS_QSPI,
+ CLK_CON_DIV_CLK_FSYS_EQOS_25,
+ CLK_CON_DIV_CLK_FSYS_EQOS_2P5,
+ CLK_CON_DIV_CLK_FSYS_EQOS_500,
+ CLK_CON_DIV_CLK_FSYS_EQOS_INT125,
+ CLK_CON_DIV_CLK_FSYS_MMC_CARD0,
+ CLK_CON_DIV_CLK_FSYS_MMC_CARD1,
+ CLK_CON_DIV_CLK_FSYS_OTP_MEM,
+ CLK_CON_DIV_CLK_FSYS_PCIE_PHY_REFCLK_SYSPLL,
+ CLK_CON_DIV_CLK_FSYS_QSPI,
+ CLK_CON_DIV_CLK_FSYS_SCLK_UART,
+ CLK_CON_DIV_CLK_FSYS_SFMC_NAND,
+ CLK_CON_DIV_SCAN_CLK_FSYS_125,
+ CLK_CON_DIV_SCAN_CLK_FSYS_MMC,
+ CLK_CON_DIV_SCAN_CLK_FSYS_PCIE_PIPE,
+ CLK_CON_FSYS_I2C0_IPCLKPORT_I_PCLK,
+ CLK_CON_FSYS_I2C1_IPCLKPORT_I_PCLK,
+ CLK_CON_FSYS_UART0_IPCLKPORT_I_PCLK,
+ CLK_CON_FSYS_UART0_IPCLKPORT_I_SCLK_UART,
+ CLK_CON_MMC0_IPCLKPORT_I_ACLK,
+ CLK_CON_MMC1_IPCLKPORT_I_ACLK,
+ CLK_CON_DWC_PCIE_CTL_INST_0_DBI_ACLK_UG,
+ CLK_CON_DWC_PCIE_CTL_INST_0_MSTR_ACLK_UG,
+ CLK_CON_DWC_PCIE_CTL_INST_0_SLV_ACLK_UG,
+ CLK_CON_PWM_IPCLKPORT_I_PCLK_S0,
+ CLK_CON_USB20DRD_IPCLKPORT_ACLK_PHYCTRL_20,
+ CLK_CON_USB20DRD_IPCLKPORT_BUS_CLK_EARLY,
+ CLK_CON_XHB_AHBBR_IPCLKPORT_CLK,
+ CLK_CON_XHB_USB_IPCLKPORT_CLK,
+ CLK_CON_BUS_P_FSYS_IPCLKPORT_QSPICLK,
+ CLK_CON_DMYQCH_CON_EQOS_TOP_QCH,
+ CLK_CON_DMYQCH_CON_MMC0_QCH,
+ CLK_CON_DMYQCH_CON_MMC1_QCH,
+ CLK_CON_DMYQCH_CON_PCIE_TOP_QCH,
+ CLK_CON_DMYQCH_CON_PCIE_TOP_QCH_REF,
+ CLK_CON_DMYQCH_CON_QSPI_QCH,
+ CLK_CON_DMYQCH_CON_SFMC_QCH,
+};
+
+static const struct samsung_pll_clock cmu_fsys_pll_clks[] __initconst = {
+ PLL(pll_1017x, CLK_FOUT_FSYS_PLL, "fout_pll_fsys", "fin_pll",
+ PLL_LOCKTIME_PLL_FSYS, PLL_CON0_PLL_FSYS, NULL),
+};
+
+PNAME(mout_fsys_scan0_user_p) = { "fin_pll", "dout_clkcmu_fsys_scan0" };
+PNAME(mout_fsys_scan1_user_p) = { "fin_pll", "dout_clkcmu_fsys_scan1" };
+PNAME(mout_fsys_bus_user_p) = { "fin_pll", "dout_clkcmu_fsys_bus" };
+PNAME(mout_fsys_mmc_user_p) = { "fin_pll", "dout_clkcmu_fsys_ip" };
+PNAME(mout_fsys_pll_fsys_p) = { "fin_pll", "fout_pll_fsys" };
+
+static const struct samsung_mux_clock cmu_fsys_mux_clks[] __initconst = {
+ MUX(0, "mout_clk_pll_fsys", mout_fsys_pll_fsys_p, PLL_CON0_PLL_FSYS, 4, 1),
+ MUX(CLK_MOUT_FSYS_SCAN0_USER, "mout_fsys_scan0_user",
+ mout_fsys_scan0_user_p, PLL_CON0_MUX_CLK_FSYS_SCAN0_USER, 4, 1),
+ MUX(CLK_MOUT_FSYS_SCAN1_USER, "mout_fsys_scan1_user",
+ mout_fsys_scan1_user_p, PLL_CON0_MUX_CLK_FSYS_SCAN1_USER, 4, 1),
+ MUX(CLK_MOUT_FSYS_BUS_USER, "mout_fsys_bus_user",
+ mout_fsys_bus_user_p, PLL_CON0_MUX_CLK_FSYS_BUS_USER, 4, 1),
+ MUX(CLK_MOUT_FSYS_MMC_USER, "mout_fsys_mmc_user",
+ mout_fsys_mmc_user_p, PLL_CON0_MUX_CLK_FSYS_MMC_USER, 4, 1),
+};
+
+static const struct samsung_div_clock cmu_fsys_div_clks[] __initconst = {
+ DIV(CLK_DOUT_FSYS_PCIE_PIPE, "dout_fsys_pcie_pipe", "mout_clk_pll_fsys",
+ CLK_CON_DIV_SCAN_CLK_FSYS_PCIE_PIPE, 0, 4),
+ DIV(CLK_DOUT_FSYS_ADC, "dout_fsys_adc", "mout_clk_pll_fsys",
+ CLK_CON_DIV_CLK_FSYS_ADC, 0, 7),
+ DIV(CLK_DOUT_FSYS_PCIE_PHY_REFCLK_SYSPLL, "dout_fsys_pcie_phy_refclk_syspll",
+ "mout_clk_pll_fsys", CLK_CON_DIV_CLK_FSYS_PCIE_PHY_REFCLK_SYSPLL, 0, 8),
+ DIV(CLK_DOUT_FSYS_QSPI, "dout_fsys_qspi", "mout_fsys_mmc_user",
+ CLK_CON_DIV_CLK_FSYS_QSPI, 0, 4),
+ DIV(CLK_DOUT_FSYS_EQOS_INT125, "dout_fsys_eqos_int125", "mout_clk_pll_fsys",
+ CLK_CON_DIV_CLK_FSYS_EQOS_INT125, 0, 4),
+ DIV(CLK_DOUT_FSYS_OTP_MEM, "dout_fsys_otp_mem", "fin_pll",
+ CLK_CON_DIV_CLK_FSYS_OTP_MEM, 0, 9),
+ DIV(CLK_DOUT_FSYS_SCLK_UART, "dout_fsys_sclk_uart", "mout_clk_pll_fsys",
+ CLK_CON_DIV_CLK_FSYS_SCLK_UART, 0, 10),
+ DIV(CLK_DOUT_FSYS_SFMC_NAND, "dout_fsys_sfmc_nand", "mout_fsys_mmc_user",
+ CLK_CON_DIV_CLK_FSYS_SFMC_NAND, 0, 4),
+ DIV(CLK_DOUT_SCAN_CLK_FSYS_125, "dout_scan_clk_fsys_125", "mout_clk_pll_fsys",
+ CLK_CON_DIV_SCAN_CLK_FSYS_125, 0, 4),
+ DIV(CLK_DOUT_FSYS_SCAN_CLK_MMC, "dout_scan_clk_fsys_mmc", "fout_pll_fsys",
+ CLK_CON_DIV_SCAN_CLK_FSYS_MMC, 0, 4),
+ DIV(CLK_DOUT_FSYS_EQOS_25, "dout_fsys_eqos_25", "dout_fsys_eqos_int125",
+ CLK_CON_DIV_CLK_FSYS_EQOS_25, 0, 4),
+ DIV_F(CLK_DOUT_FSYS_EQOS_2p5, "dout_fsys_eqos_2p5", "dout_fsys_eqos_25",
+ CLK_CON_DIV_CLK_FSYS_EQOS_2P5, 0, 4, CLK_SET_RATE_PARENT, 0),
+ DIV(0, "dout_fsys_eqos_500", "mout_clk_pll_fsys",
+ CLK_CON_DIV_CLK_FSYS_EQOS_500, 0, 4),
+ DIV(CLK_DOUT_FSYS_BUS300, "dout_fsys_bus300", "mout_fsys_bus_user",
+ CLK_CON_DIV_CLK_FSYS_BUS300, 0, 4),
+ DIV(CLK_DOUT_FSYS_BUS_QSPI, "dout_fsys_bus_qspi", "mout_fsys_mmc_user",
+ CLK_CON_DIV_CLK_FSYS_BUS_QSPI, 0, 4),
+ DIV(CLK_DOUT_FSYS_MMC_CARD0, "dout_fsys_mmc_card0", "mout_fsys_mmc_user",
+ CLK_CON_DIV_CLK_FSYS_MMC_CARD0, 0, 10),
+ DIV(CLK_DOUT_FSYS_MMC_CARD1, "dout_fsys_mmc_card1", "mout_fsys_mmc_user",
+ CLK_CON_DIV_CLK_FSYS_MMC_CARD1, 0, 10),
+};
+
+static const struct samsung_gate_clock cmu_fsys_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_FSYS_PCIE_PHY_REFCLK_IN, "pcie_sub_ctrl_inst_0_phy_refclk_in",
+ "dout_fsys_pcie_phy_refclk_syspll", CLK_CON_DMYQCH_CON_PCIE_TOP_QCH_REF, 1,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_EQOS_TOP_IPCLKPORT_I_RGMII_TXCLK_2P5,
+ "eqos_top_ipclkport_i_rgmii_txclk_2p5",
+ "dout_fsys_eqos_2p5", CLK_CON_DMYQCH_CON_EQOS_TOP_QCH, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_EQOS_TOP_IPCLKPORT_ACLK_I, "eqos_top_ipclkport_aclk_i",
+ "dout_fsys_bus300", CLK_CON_DMYQCH_CON_EQOS_TOP_QCH, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_EQOS_TOP_IPCLKPORT_CLK_CSR_I, "eqos_top_ipclkport_clk_csr_i",
+ "dout_fsys_bus300", CLK_CON_DMYQCH_CON_EQOS_TOP_QCH, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_PIPE_PAL_INST_0_I_APB_PCLK, "pipe_pal_inst_0_i_apb_pclk",
+ "dout_fsys_bus300", CLK_CON_DMYQCH_CON_PCIE_TOP_QCH, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_QSPI_IPCLKPORT_HCLK, "qspi_ipclkport_hclk",
+ "dout_fsys_bus_qspi", CLK_CON_DMYQCH_CON_QSPI_QCH, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_QSPI_IPCLKPORT_SSI_CLK, "qspi_ipclkport_ssi_clk",
+ "dout_fsys_qspi", CLK_CON_DMYQCH_CON_QSPI_QCH, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_MMC0_IPCLKPORT_SDCLKIN, "mmc0_ipclkport_sdclkin",
+ "dout_fsys_mmc_card0", CLK_CON_DMYQCH_CON_MMC0_QCH, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_MMC1_IPCLKPORT_SDCLKIN, "mmc1_ipclkport_sdclkin",
+ "dout_fsys_mmc_card1", CLK_CON_DMYQCH_CON_MMC1_QCH, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_SFMC_IPCLKPORT_I_ACLK_NAND, "sfmc_ipclkport_i_aclk_nand",
+ "dout_fsys_sfmc_nand", CLK_CON_DMYQCH_CON_SFMC_QCH, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_UART0_SCLK_UART, "uart0_sclk", "dout_fsys_sclk_uart",
+ CLK_CON_FSYS_UART0_IPCLKPORT_I_SCLK_UART, 21,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS_DWC_PCIE_CTL_INST_0_MSTR_ACLK_UG, "dwc_pcie_ctl_inst_0_mstr_aclk_ug",
+ "mout_fsys_bus_user", CLK_CON_DWC_PCIE_CTL_INST_0_MSTR_ACLK_UG, 21,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS_DWC_PCIE_CTL_INXT_0_SLV_ACLK_UG, "dwc_pcie_ctl_inst_0_slv_aclk_ug",
+ "mout_fsys_bus_user", CLK_CON_DWC_PCIE_CTL_INST_0_SLV_ACLK_UG, 21,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS_I2C0_IPCLKPORT_I_PCLK, "fsys_i2c0_ipclkport_i_pclk", "dout_fsys_bus300",
+ CLK_CON_FSYS_I2C0_IPCLKPORT_I_PCLK, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS_I2C1_IPCLKPORT_I_PCLK, "fsys_i2c1_ipclkport_i_pclk", "dout_fsys_bus300",
+ CLK_CON_FSYS_I2C1_IPCLKPORT_I_PCLK, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS_UART0_PCLK, "uart0_pclk", "dout_fsys_bus300",
+ CLK_CON_FSYS_UART0_IPCLKPORT_I_PCLK, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS_MMC0_IPCLKPORT_I_ACLK, "mmc0_ipclkport_i_aclk", "dout_fsys_bus300",
+ CLK_CON_MMC0_IPCLKPORT_I_ACLK, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS_MMC1_IPCLKPORT_I_ACLK, "mmc1_ipclkport_i_aclk", "dout_fsys_bus300",
+ CLK_CON_MMC1_IPCLKPORT_I_ACLK, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS_DWC_PCIE_CTL_INST_0_DBI_ACLK_UG, "dwc_pcie_ctl_inst_0_dbi_aclk_ug",
+ "dout_fsys_bus300", CLK_CON_DWC_PCIE_CTL_INST_0_DBI_ACLK_UG, 21,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS_PWM_IPCLKPORT_I_PCLK_S0, "pwm_ipclkport_i_pclk_s0", "dout_fsys_bus300",
+ CLK_CON_PWM_IPCLKPORT_I_PCLK_S0, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS_USB20DRD_IPCLKPORT_ACLK_PHYCTRL_20, "usb20drd_ipclkport_aclk_phyctrl_20",
+ "dout_fsys_bus300", CLK_CON_USB20DRD_IPCLKPORT_ACLK_PHYCTRL_20, 21,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS_USB20DRD_IPCLKPORT_BUS_CLK_EARLY, "usb20drd_ipclkport_bus_clk_early",
+ "dout_fsys_bus300", CLK_CON_USB20DRD_IPCLKPORT_BUS_CLK_EARLY, 21,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS_XHB_AHBBR_IPCLKPORT_CLK, "xhb_ahbbr_ipclkport_clk", "dout_fsys_bus300",
+ CLK_CON_XHB_AHBBR_IPCLKPORT_CLK, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS_XHB_USB_IPCLKPORT_CLK, "xhb_usb_ipclkport_clk", "dout_fsys_bus300",
+ CLK_CON_XHB_USB_IPCLKPORT_CLK, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS_BUS_QSPI, "bus_p_fsys_ipclkport_qspiclk", "dout_fsys_bus_qspi",
+ CLK_CON_BUS_P_FSYS_IPCLKPORT_QSPICLK, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+};
+
+static const struct samsung_cmu_info cmu_fsys_info __initconst = {
+ .pll_clks = cmu_fsys_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(cmu_fsys_pll_clks),
+ .mux_clks = cmu_fsys_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(cmu_fsys_mux_clks),
+ .div_clks = cmu_fsys_div_clks,
+ .nr_div_clks = ARRAY_SIZE(cmu_fsys_div_clks),
+ .gate_clks = cmu_fsys_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(cmu_fsys_gate_clks),
+ .nr_clk_ids = CMU_FSYS_NR_CLK,
+ .clk_regs = cmu_fsys_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(cmu_fsys_clk_regs),
+};
+
+/* Register Offset definitions for CMU_IMEM (0x10010000) */
+#define PLL_CON0_MUX_CLK_IMEM_ACLK_USER 0x0100
+#define PLL_CON0_MUX_CLK_IMEM_JPEG_USER 0x0120
+#define CLK_CON_MUX_CLK_IMEM_GIC_CA53 0x1000
+#define CLK_CON_MUX_CLK_IMEM_GIC_CA5 0x1008
+#define CLK_CON_MCT_IPCLKPORT_PCLK 0x2038
+#define CLK_CON_SFRIF_TMU_IMEM_IPCLKPORT_PCLK 0x2044
+
+static const unsigned long cmu_imem_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLK_IMEM_ACLK_USER,
+ PLL_CON0_MUX_CLK_IMEM_JPEG_USER,
+ CLK_CON_MUX_CLK_IMEM_GIC_CA53,
+ CLK_CON_MUX_CLK_IMEM_GIC_CA5,
+ CLK_CON_MCT_IPCLKPORT_PCLK,
+ CLK_CON_SFRIF_TMU_IMEM_IPCLKPORT_PCLK,
+};
+
+PNAME(mout_imem_aclk_user_p) = { "fin_pll", "dout_clkcmu_imem_aclk" };
+PNAME(mout_imem_gic_ca53_p) = { "mout_imem_aclk_user", "fin_pll" };
+PNAME(mout_imem_gic_ca5_p) = { "mout_imem_aclk_user", "fin_pll" };
+PNAME(mout_imem_jpeg_user_p) = { "fin_pll", "dout_clkcmu_imem_jpeg" };
+
+static const struct samsung_mux_clock cmu_imem_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_IMEM_ACLK_USER, "mout_imem_aclk_user",
+ mout_imem_aclk_user_p, PLL_CON0_MUX_CLK_IMEM_ACLK_USER, 4, 1),
+ MUX(CLK_MOUT_IMEM_GIC_CA53, "mout_imem_gic_ca53",
+ mout_imem_gic_ca53_p, CLK_CON_MUX_CLK_IMEM_GIC_CA53, 0, 1),
+ MUX(CLK_MOUT_IMEM_GIC_CA5, "mout_imem_gic_ca5",
+ mout_imem_gic_ca5_p, CLK_CON_MUX_CLK_IMEM_GIC_CA5, 0, 1),
+ MUX(CLK_MOUT_IMEM_JPEG_USER, "mout_imem_jpeg_user",
+ mout_imem_jpeg_user_p, PLL_CON0_MUX_CLK_IMEM_JPEG_USER, 4, 1),
+};
+
+static const struct samsung_gate_clock cmu_imem_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_IMEM_MCT_PCLK, "mct_pclk", "mout_imem_aclk_user",
+ CLK_CON_MCT_IPCLKPORT_PCLK, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_IMEM_PCLK_TMU0_APBIF, "sfrif_tmu_imem_ipclkport_pclk", "mout_imem_aclk_user",
+ CLK_CON_SFRIF_TMU_IMEM_IPCLKPORT_PCLK, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+};
+
+static const struct samsung_cmu_info cmu_imem_info __initconst = {
+ .mux_clks = cmu_imem_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(cmu_imem_mux_clks),
+ .gate_clks = cmu_imem_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(cmu_imem_gate_clks),
+ .nr_clk_ids = CMU_IMEM_NR_CLK,
+ .clk_regs = cmu_imem_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(cmu_imem_clk_regs),
+};
+
+static void __init artpec8_clk_cmu_imem_init(struct device_node *np)
+{
+ samsung_cmu_register_one(np, &cmu_imem_info);
+}
+
+CLK_OF_DECLARE(artpec8_clk_cmu_imem, "axis,artpec8-cmu-imem", artpec8_clk_cmu_imem_init);
+
+/* Register Offset definitions for CMU_PERI (0x16410000) */
+#define PLL_CON0_MUX_CLK_PERI_AUDIO_USER 0x0100
+#define PLL_CON0_MUX_CLK_PERI_DISP_USER 0x0120
+#define PLL_CON0_MUX_CLK_PERI_IP_USER 0x0140
+#define CLK_CON_MUX_CLK_PERI_I2S0 0x1000
+#define CLK_CON_MUX_CLK_PERI_I2S1 0x1004
+#define CLK_CON_DIV_CLK_PERI_DSIM 0x1800
+#define CLK_CON_DIV_CLK_PERI_I2S0 0x1804
+#define CLK_CON_DIV_CLK_PERI_I2S1 0x1808
+#define CLK_CON_DIV_CLK_PERI_PCLK 0x180c
+#define CLK_CON_DIV_CLK_PERI_SPI 0x1810
+#define CLK_CON_DIV_CLK_PERI_UART1 0x1814
+#define CLK_CON_DIV_CLK_PERI_UART2 0x1818
+#define CLK_CON_APB_ASYNC_DSIM_IPCLKPORT_PCLKS 0x2004
+#define CLK_CON_PERI_I2C2_IPCLKPORT_I_PCLK 0x2030
+#define CLK_CON_PERI_I2C3_IPCLKPORT_I_PCLK 0x2034
+#define CLK_CON_PERI_SPI0_IPCLKPORT_I_PCLK 0x2048
+#define CLK_CON_PERI_SPI0_IPCLKPORT_I_SCLK_SPI 0x204c
+#define CLK_CON_PERI_UART1_IPCLKPORT_I_PCLK 0x2050
+#define CLK_CON_PERI_UART1_IPCLKPORT_I_SCLK_UART 0x2054
+#define CLK_CON_PERI_UART2_IPCLKPORT_I_PCLK 0x2058
+#define CLK_CON_PERI_UART2_IPCLKPORT_I_SCLK_UART 0x205c
+#define CLK_CON_DMYQCH_CON_AUDIO_OUT_QCH 0x3000
+#define CLK_CON_DMYQCH_CON_DMA4DSIM_QCH 0x3004
+#define CLK_CON_DMYQCH_CON_PERI_I2SSC0_QCH 0x3008
+#define CLK_CON_DMYQCH_CON_PERI_I2SSC1_QCH 0x300c
+
+static const unsigned long cmu_peri_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLK_PERI_AUDIO_USER,
+ PLL_CON0_MUX_CLK_PERI_DISP_USER,
+ PLL_CON0_MUX_CLK_PERI_IP_USER,
+ CLK_CON_MUX_CLK_PERI_I2S0,
+ CLK_CON_MUX_CLK_PERI_I2S1,
+ CLK_CON_DIV_CLK_PERI_DSIM,
+ CLK_CON_DIV_CLK_PERI_I2S0,
+ CLK_CON_DIV_CLK_PERI_I2S1,
+ CLK_CON_DIV_CLK_PERI_PCLK,
+ CLK_CON_DIV_CLK_PERI_SPI,
+ CLK_CON_DIV_CLK_PERI_UART1,
+ CLK_CON_DIV_CLK_PERI_UART2,
+ CLK_CON_APB_ASYNC_DSIM_IPCLKPORT_PCLKS,
+ CLK_CON_PERI_I2C2_IPCLKPORT_I_PCLK,
+ CLK_CON_PERI_I2C3_IPCLKPORT_I_PCLK,
+ CLK_CON_PERI_SPI0_IPCLKPORT_I_PCLK,
+ CLK_CON_PERI_SPI0_IPCLKPORT_I_SCLK_SPI,
+ CLK_CON_PERI_UART1_IPCLKPORT_I_PCLK,
+ CLK_CON_PERI_UART1_IPCLKPORT_I_SCLK_UART,
+ CLK_CON_PERI_UART2_IPCLKPORT_I_PCLK,
+ CLK_CON_PERI_UART2_IPCLKPORT_I_SCLK_UART,
+ CLK_CON_DMYQCH_CON_AUDIO_OUT_QCH,
+ CLK_CON_DMYQCH_CON_DMA4DSIM_QCH,
+ CLK_CON_DMYQCH_CON_PERI_I2SSC0_QCH,
+ CLK_CON_DMYQCH_CON_PERI_I2SSC1_QCH,
+};
+
+static const struct samsung_fixed_rate_clock peri_fixed_clks[] __initconst = {
+ FRATE(0, "clk_peri_audio", NULL, 0, 100000000),
+};
+
+PNAME(mout_peri_ip_user_p) = { "fin_pll", "dout_clkcmu_peri_ip" };
+PNAME(mout_peri_audio_user_p) = { "fin_pll", "dout_clkcmu_peri_audio" };
+PNAME(mout_peri_disp_user_p) = { "fin_pll", "dout_clkcmu_peri_disp" };
+PNAME(mout_peri_i2s0_p) = { "dout_peri_i2s0", "clk_peri_audio" };
+PNAME(mout_peri_i2s1_p) = { "dout_peri_i2s1", "clk_peri_audio" };
+
+static const struct samsung_mux_clock cmu_peri_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PERI_IP_USER, "mout_peri_ip_user", mout_peri_ip_user_p,
+ PLL_CON0_MUX_CLK_PERI_IP_USER, 4, 1),
+ MUX(CLK_MOUT_PERI_AUDIO_USER, "mout_peri_audio_user",
+ mout_peri_audio_user_p, PLL_CON0_MUX_CLK_PERI_AUDIO_USER, 4, 1),
+ MUX(CLK_MOUT_PERI_DISP_USER, "mout_peri_disp_user", mout_peri_disp_user_p,
+ PLL_CON0_MUX_CLK_PERI_DISP_USER, 4, 1),
+ MUX(CLK_MOUT_PERI_I2S0, "mout_peri_i2s0", mout_peri_i2s0_p,
+ CLK_CON_MUX_CLK_PERI_I2S0, 0, 1),
+ MUX(CLK_MOUT_PERI_I2S1, "mout_peri_i2s1", mout_peri_i2s1_p,
+ CLK_CON_MUX_CLK_PERI_I2S1, 0, 1),
+};
+
+static const struct samsung_div_clock cmu_peri_div_clks[] __initconst = {
+ DIV(CLK_DOUT_PERI_SPI, "dout_peri_spi", "mout_peri_ip_user",
+ CLK_CON_DIV_CLK_PERI_SPI, 0, 10),
+ DIV(CLK_DOUT_PERI_UART1, "dout_peri_uart1", "mout_peri_ip_user",
+ CLK_CON_DIV_CLK_PERI_UART1, 0, 10),
+ DIV(CLK_DOUT_PERI_UART2, "dout_peri_uart2", "mout_peri_ip_user",
+ CLK_CON_DIV_CLK_PERI_UART2, 0, 10),
+ DIV(CLK_DOUT_PERI_PCLK, "dout_peri_pclk", "mout_peri_ip_user",
+ CLK_CON_DIV_CLK_PERI_PCLK, 0, 4),
+ DIV(CLK_DOUT_PERI_I2S0, "dout_peri_i2s0", "mout_peri_audio_user",
+ CLK_CON_DIV_CLK_PERI_I2S0, 0, 4),
+ DIV(CLK_DOUT_PERI_I2S1, "dout_peri_i2s1", "mout_peri_audio_user",
+ CLK_CON_DIV_CLK_PERI_I2S1, 0, 4),
+ DIV(CLK_DOUT_PERI_DSIM, "dout_peri_dsim", "mout_peri_disp_user",
+ CLK_CON_DIV_CLK_PERI_DSIM, 0, 4),
+};
+
+static const struct samsung_gate_clock cmu_peri_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_PERI_DMA4DSIM_IPCLKPORT_CLK_APB_CLK, "dma4dsim_ipclkport_clk_apb_clk",
+ "dout_peri_pclk", CLK_CON_DMYQCH_CON_DMA4DSIM_QCH, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_I2SSC0_IPCLKPORT_CLK_HST, "i2ssc0_ipclkport_clk_hst", "dout_peri_pclk",
+ CLK_CON_DMYQCH_CON_PERI_I2SSC0_QCH, 1, CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_PERI_I2SSC1_IPCLKPORT_CLK_HST, "i2ssc1_ipclkport_clk_hst", "dout_peri_pclk",
+ CLK_CON_DMYQCH_CON_PERI_I2SSC1_QCH, 1, CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_PERI_AUDIO_OUT_IPCLKPORT_CLK, "audio_out_ipclkport_clk",
+ "mout_peri_audio_user", CLK_CON_DMYQCH_CON_AUDIO_OUT_QCH, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_I2SSC0_IPCLKPORT_CLK, "peri_i2ssc0_ipclkport_clk", "mout_peri_i2s0",
+ CLK_CON_DMYQCH_CON_PERI_I2SSC0_QCH, 1, CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_PERI_I2SSC1_IPCLKPORT_CLK, "peri_i2ssc1_ipclkport_clk", "mout_peri_i2s1",
+ CLK_CON_DMYQCH_CON_PERI_I2SSC1_QCH, 1, CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_PERI_DMA4DSIM_IPCLKPORT_CLK_AXI_CLK, "dma4dsim_ipclkport_clk_axi_clk",
+ "mout_peri_disp_user", CLK_CON_DMYQCH_CON_DMA4DSIM_QCH, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_SPI0_SCLK_SPI, "peri_spi0_ipclkport_i_sclk_spi", "dout_peri_spi",
+ CLK_CON_PERI_SPI0_IPCLKPORT_I_SCLK_SPI, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERI_UART1_SCLK_UART, "uart1_sclk", "dout_peri_uart1",
+ CLK_CON_PERI_UART1_IPCLKPORT_I_SCLK_UART, 21,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERI_UART2_SCLK_UART, "uart2_sclk", "dout_peri_uart2",
+ CLK_CON_PERI_UART2_IPCLKPORT_I_SCLK_UART, 21,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERI_APB_ASYNC_DSIM_IPCLKPORT_PCLKS, "apb_async_dsim_ipclkport_pclks",
+ "dout_peri_pclk", CLK_CON_APB_ASYNC_DSIM_IPCLKPORT_PCLKS, 21,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERI_I2C2_IPCLKPORT_I_PCLK, "peri_i2c2_ipclkport_i_pclk", "dout_peri_pclk",
+ CLK_CON_PERI_I2C2_IPCLKPORT_I_PCLK, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERI_I2C3_IPCLKPORT_I_PCLK, "peri_i2c3_ipclkport_i_pclk", "dout_peri_pclk",
+ CLK_CON_PERI_I2C3_IPCLKPORT_I_PCLK, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERI_SPI0_PCLK, "peri_spi0_ipclkport_i_pclk", "dout_peri_pclk",
+ CLK_CON_PERI_SPI0_IPCLKPORT_I_PCLK, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERI_UART1_PCLK, "uart1_pclk", "dout_peri_pclk",
+ CLK_CON_PERI_UART1_IPCLKPORT_I_PCLK, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERI_UART2_PCLK, "uart2_pclk", "dout_peri_pclk",
+ CLK_CON_PERI_UART2_IPCLKPORT_I_PCLK, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+};
+
+static const struct samsung_cmu_info cmu_peri_info __initconst = {
+ .mux_clks = cmu_peri_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(cmu_peri_mux_clks),
+ .div_clks = cmu_peri_div_clks,
+ .nr_div_clks = ARRAY_SIZE(cmu_peri_div_clks),
+ .gate_clks = cmu_peri_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(cmu_peri_gate_clks),
+ .fixed_clks = peri_fixed_clks,
+ .nr_fixed_clks = ARRAY_SIZE(peri_fixed_clks),
+ .nr_clk_ids = CMU_PERI_NR_CLK,
+ .clk_regs = cmu_peri_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(cmu_peri_clk_regs),
+};
+
+/**
+ * artpec8_cmu_probe - Probe function for ARTPEC platform clocks
+ * @pdev: Pointer to platform device
+ *
+ * Configure clock hierarchy for clock domains of ARTPEC platform
+ */
+static int __init artpec8_cmu_probe(struct platform_device *pdev)
+{
+ const struct samsung_cmu_info *info;
+ struct device *dev = &pdev->dev;
+
+ info = of_device_get_match_data(dev);
+ exynos_arm64_register_cmu(dev, dev->of_node, info);
+
+ return 0;
+}
+
+static const struct of_device_id artpec8_cmu_of_match[] = {
+ {
+ .compatible = "axis,artpec8-cmu-cmu",
+ .data = &cmu_cmu_info,
+ }, {
+ .compatible = "axis,artpec8-cmu-bus",
+ .data = &cmu_bus_info,
+ }, {
+ .compatible = "axis,artpec8-cmu-core",
+ .data = &cmu_core_info,
+ }, {
+ .compatible = "axis,artpec8-cmu-cpucl",
+ .data = &cmu_cpucl_info,
+ }, {
+ .compatible = "axis,artpec8-cmu-fsys",
+ .data = &cmu_fsys_info,
+ }, {
+ .compatible = "axis,artpec8-cmu-peri",
+ .data = &cmu_peri_info,
+ }, {
+ },
+};
+
+static struct platform_driver artpec8_cmu_driver __refdata = {
+ .driver = {
+ .name = "artpec8-cmu",
+ .of_match_table = artpec8_cmu_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = artpec8_cmu_probe,
+};
+
+static int __init artpec8_cmu_init(void)
+{
+ return platform_driver_register(&artpec8_cmu_driver);
+}
+core_initcall(artpec8_cmu_init);
diff --git a/drivers/clk/samsung/clk-cpu.c b/drivers/clk/samsung/clk-cpu.c
index 4e1ebd8a30b1..300f8d5d3c48 100644
--- a/drivers/clk/samsung/clk-cpu.c
+++ b/drivers/clk/samsung/clk-cpu.c
@@ -567,12 +567,14 @@ static int exynos850_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
/* -------------------------------------------------------------------------- */
/* Common round rate callback usable for all types of CPU clocks */
-static long exynos_cpuclk_round_rate(struct clk_hw *hw, unsigned long drate,
- unsigned long *prate)
+static int exynos_cpuclk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_hw *parent = clk_hw_get_parent(hw);
- *prate = clk_hw_round_rate(parent, drate);
- return *prate;
+ req->best_parent_rate = clk_hw_round_rate(parent, req->rate);
+ req->rate = req->best_parent_rate;
+
+ return 0;
}
/* Common recalc rate callback usable for all types of CPU clocks */
@@ -591,7 +593,7 @@ static unsigned long exynos_cpuclk_recalc_rate(struct clk_hw *hw,
static const struct clk_ops exynos_cpuclk_clk_ops = {
.recalc_rate = exynos_cpuclk_recalc_rate,
- .round_rate = exynos_cpuclk_round_rate,
+ .determine_rate = exynos_cpuclk_determine_rate,
};
/*
diff --git a/drivers/clk/samsung/clk-exynos990.c b/drivers/clk/samsung/clk-exynos990.c
index 8d3f193d2b4d..6277dd557fab 100644
--- a/drivers/clk/samsung/clk-exynos990.c
+++ b/drivers/clk/samsung/clk-exynos990.c
@@ -17,8 +17,10 @@
#include "clk-pll.h"
/* NOTE: Must be equal to the last clock ID increased by one */
-#define CLKS_NR_TOP (CLK_GOUT_CMU_VRA_BUS + 1)
-#define CLKS_NR_HSI0 (CLK_GOUT_HSI0_XIU_D_HSI0_ACLK + 1)
+#define CLKS_NR_TOP (CLK_DOUT_CMU_CLK_CMUREF + 1)
+#define CLKS_NR_HSI0 (CLK_GOUT_HSI0_LHS_ACEL_D_HSI0_CLK + 1)
+#define CLKS_NR_PERIC0 (CLK_GOUT_PERIC0_SYSREG_PCLK + 1)
+#define CLKS_NR_PERIC1 (CLK_GOUT_PERIC1_XIU_P_ACLK + 1)
#define CLKS_NR_PERIS (CLK_GOUT_PERIS_OTP_CON_TOP_OSCCLK + 1)
/* ---- CMU_TOP ------------------------------------------------------------- */
@@ -45,6 +47,7 @@
#define PLL_CON3_PLL_SHARED3 0x024c
#define PLL_CON0_PLL_SHARED4 0x0280
#define PLL_CON3_PLL_SHARED4 0x028c
+#define CLK_CON_MUX_CLKCMU_DPU_BUS 0x1000
#define CLK_CON_MUX_MUX_CLKCMU_APM_BUS 0x1004
#define CLK_CON_MUX_MUX_CLKCMU_AUD_CPU 0x1008
#define CLK_CON_MUX_MUX_CLKCMU_BUS0_BUS 0x100c
@@ -103,6 +106,8 @@
#define CLK_CON_MUX_MUX_CLKCMU_SSP_BUS 0x10e0
#define CLK_CON_MUX_MUX_CLKCMU_TNR_BUS 0x10e4
#define CLK_CON_MUX_MUX_CLKCMU_VRA_BUS 0x10e8
+#define CLK_CON_MUX_MUX_CLK_CMU_CMUREF 0x10f0
+#define CLK_CON_MUX_MUX_CMU_CMUREF 0x10f4
#define CLK_CON_DIV_CLKCMU_APM_BUS 0x1800
#define CLK_CON_DIV_CLKCMU_AUD_CPU 0x1804
#define CLK_CON_DIV_CLKCMU_BUS0_BUS 0x1808
@@ -162,6 +167,7 @@
#define CLK_CON_DIV_CLKCMU_VRA_BUS 0x18e0
#define CLK_CON_DIV_DIV_CLKCMU_DPU 0x18e8
#define CLK_CON_DIV_DIV_CLKCMU_DPU_ALT 0x18ec
+#define CLK_CON_DIV_DIV_CLK_CMU_CMUREF 0x18f0
#define CLK_CON_DIV_PLL_SHARED0_DIV2 0x18f4
#define CLK_CON_DIV_PLL_SHARED0_DIV3 0x18f8
#define CLK_CON_DIV_PLL_SHARED0_DIV4 0x18fc
@@ -239,13 +245,21 @@ static const unsigned long top_clk_regs[] __initconst = {
PLL_LOCKTIME_PLL_SHARED2,
PLL_LOCKTIME_PLL_SHARED3,
PLL_LOCKTIME_PLL_SHARED4,
+ PLL_CON0_PLL_G3D,
PLL_CON3_PLL_G3D,
+ PLL_CON0_PLL_MMC,
PLL_CON3_PLL_MMC,
+ PLL_CON0_PLL_SHARED0,
PLL_CON3_PLL_SHARED0,
+ PLL_CON0_PLL_SHARED1,
PLL_CON3_PLL_SHARED1,
+ PLL_CON0_PLL_SHARED2,
PLL_CON3_PLL_SHARED2,
+ PLL_CON0_PLL_SHARED3,
PLL_CON3_PLL_SHARED3,
+ PLL_CON0_PLL_SHARED4,
PLL_CON3_PLL_SHARED4,
+ CLK_CON_MUX_CLKCMU_DPU_BUS,
CLK_CON_MUX_MUX_CLKCMU_APM_BUS,
CLK_CON_MUX_MUX_CLKCMU_AUD_CPU,
CLK_CON_MUX_MUX_CLKCMU_BUS0_BUS,
@@ -304,6 +318,8 @@ static const unsigned long top_clk_regs[] __initconst = {
CLK_CON_MUX_MUX_CLKCMU_SSP_BUS,
CLK_CON_MUX_MUX_CLKCMU_TNR_BUS,
CLK_CON_MUX_MUX_CLKCMU_VRA_BUS,
+ CLK_CON_MUX_MUX_CLK_CMU_CMUREF,
+ CLK_CON_MUX_MUX_CMU_CMUREF,
CLK_CON_DIV_CLKCMU_APM_BUS,
CLK_CON_DIV_CLKCMU_AUD_CPU,
CLK_CON_DIV_CLKCMU_BUS0_BUS,
@@ -363,6 +379,7 @@ static const unsigned long top_clk_regs[] __initconst = {
CLK_CON_DIV_CLKCMU_VRA_BUS,
CLK_CON_DIV_DIV_CLKCMU_DPU,
CLK_CON_DIV_DIV_CLKCMU_DPU_ALT,
+ CLK_CON_DIV_DIV_CLK_CMU_CMUREF,
CLK_CON_DIV_PLL_SHARED0_DIV2,
CLK_CON_DIV_PLL_SHARED0_DIV3,
CLK_CON_DIV_PLL_SHARED0_DIV4,
@@ -458,6 +475,8 @@ PNAME(mout_pll_shared3_p) = { "oscclk", "fout_shared3_pll" };
PNAME(mout_pll_shared4_p) = { "oscclk", "fout_shared4_pll" };
PNAME(mout_pll_mmc_p) = { "oscclk", "fout_mmc_pll" };
PNAME(mout_pll_g3d_p) = { "oscclk", "fout_g3d_pll" };
+PNAME(mout_cmu_dpu_bus_p) = { "dout_cmu_dpu",
+ "dout_cmu_dpu_alt" };
PNAME(mout_cmu_apm_bus_p) = { "dout_cmu_shared0_div2",
"dout_cmu_shared2_div2" };
PNAME(mout_cmu_aud_cpu_p) = { "dout_cmu_shared0_div2",
@@ -672,6 +691,12 @@ PNAME(mout_cmu_vra_bus_p) = { "dout_cmu_shared0_div3",
"dout_cmu_shared4_div2",
"dout_cmu_shared0_div4",
"dout_cmu_shared4_div3" };
+PNAME(mout_cmu_cmuref_p) = { "oscclk",
+ "dout_cmu_clk_cmuref" };
+PNAME(mout_cmu_clk_cmuref_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "oscclk" };
/*
* Register name to clock name mangling strategy used in this file
@@ -689,19 +714,21 @@ PNAME(mout_cmu_vra_bus_p) = { "dout_cmu_shared0_div3",
static const struct samsung_mux_clock top_mux_clks[] __initconst = {
MUX(CLK_MOUT_PLL_SHARED0, "mout_pll_shared0", mout_pll_shared0_p,
- PLL_CON3_PLL_SHARED0, 4, 1),
+ PLL_CON0_PLL_SHARED0, 4, 1),
MUX(CLK_MOUT_PLL_SHARED1, "mout_pll_shared1", mout_pll_shared1_p,
- PLL_CON3_PLL_SHARED1, 4, 1),
+ PLL_CON0_PLL_SHARED1, 4, 1),
MUX(CLK_MOUT_PLL_SHARED2, "mout_pll_shared2", mout_pll_shared2_p,
- PLL_CON3_PLL_SHARED2, 4, 1),
+ PLL_CON0_PLL_SHARED2, 4, 1),
MUX(CLK_MOUT_PLL_SHARED3, "mout_pll_shared3", mout_pll_shared3_p,
- PLL_CON3_PLL_SHARED3, 4, 1),
+ PLL_CON0_PLL_SHARED3, 4, 1),
MUX(CLK_MOUT_PLL_SHARED4, "mout_pll_shared4", mout_pll_shared4_p,
PLL_CON0_PLL_SHARED4, 4, 1),
MUX(CLK_MOUT_PLL_MMC, "mout_pll_mmc", mout_pll_mmc_p,
PLL_CON0_PLL_MMC, 4, 1),
MUX(CLK_MOUT_PLL_G3D, "mout_pll_g3d", mout_pll_g3d_p,
PLL_CON0_PLL_G3D, 4, 1),
+ MUX(CLK_MOUT_CMU_DPU_BUS, "mout_cmu_dpu_bus",
+ mout_cmu_dpu_bus_p, CLK_CON_MUX_CLKCMU_DPU_BUS, 0, 1),
MUX(CLK_MOUT_CMU_APM_BUS, "mout_cmu_apm_bus",
mout_cmu_apm_bus_p, CLK_CON_MUX_MUX_CLKCMU_APM_BUS, 0, 1),
MUX(CLK_MOUT_CMU_AUD_CPU, "mout_cmu_aud_cpu",
@@ -759,11 +786,11 @@ static const struct samsung_mux_clock top_mux_clks[] __initconst = {
MUX(CLK_MOUT_CMU_DPU_ALT, "mout_cmu_dpu_alt",
mout_cmu_dpu_alt_p, CLK_CON_MUX_MUX_CLKCMU_DPU_ALT, 0, 2),
MUX(CLK_MOUT_CMU_DSP_BUS, "mout_cmu_dsp_bus",
- mout_cmu_dsp_bus_p, CLK_CON_MUX_MUX_CLKCMU_DSP_BUS, 0, 2),
+ mout_cmu_dsp_bus_p, CLK_CON_MUX_MUX_CLKCMU_DSP_BUS, 0, 3),
MUX(CLK_MOUT_CMU_G2D_G2D, "mout_cmu_g2d_g2d",
mout_cmu_g2d_g2d_p, CLK_CON_MUX_MUX_CLKCMU_G2D_G2D, 0, 2),
MUX(CLK_MOUT_CMU_G2D_MSCL, "mout_cmu_g2d_mscl",
- mout_cmu_g2d_mscl_p, CLK_CON_MUX_MUX_CLKCMU_G2D_MSCL, 0, 1),
+ mout_cmu_g2d_mscl_p, CLK_CON_MUX_MUX_CLKCMU_G2D_MSCL, 0, 2),
MUX(CLK_MOUT_CMU_HPM, "mout_cmu_hpm",
mout_cmu_hpm_p, CLK_CON_MUX_MUX_CLKCMU_HPM, 0, 2),
MUX(CLK_MOUT_CMU_HSI0_BUS, "mout_cmu_hsi0_bus",
@@ -775,7 +802,7 @@ static const struct samsung_mux_clock top_mux_clks[] __initconst = {
0, 2),
MUX(CLK_MOUT_CMU_HSI0_USBDP_DEBUG, "mout_cmu_hsi0_usbdp_debug",
mout_cmu_hsi0_usbdp_debug_p,
- CLK_CON_MUX_MUX_CLKCMU_HSI0_USBDP_DEBUG, 0, 2),
+ CLK_CON_MUX_MUX_CLKCMU_HSI0_USBDP_DEBUG, 0, 1),
MUX(CLK_MOUT_CMU_HSI1_BUS, "mout_cmu_hsi1_bus",
mout_cmu_hsi1_bus_p, CLK_CON_MUX_MUX_CLKCMU_HSI1_BUS, 0, 3),
MUX(CLK_MOUT_CMU_HSI1_MMC_CARD, "mout_cmu_hsi1_mmc_card",
@@ -788,7 +815,7 @@ static const struct samsung_mux_clock top_mux_clks[] __initconst = {
0, 2),
MUX(CLK_MOUT_CMU_HSI1_UFS_EMBD, "mout_cmu_hsi1_ufs_embd",
mout_cmu_hsi1_ufs_embd_p, CLK_CON_MUX_MUX_CLKCMU_HSI1_UFS_EMBD,
- 0, 1),
+ 0, 2),
MUX(CLK_MOUT_CMU_HSI2_BUS, "mout_cmu_hsi2_bus",
mout_cmu_hsi2_bus_p, CLK_CON_MUX_MUX_CLKCMU_HSI2_BUS, 0, 1),
MUX(CLK_MOUT_CMU_HSI2_PCIE, "mout_cmu_hsi2_pcie",
@@ -830,6 +857,10 @@ static const struct samsung_mux_clock top_mux_clks[] __initconst = {
mout_cmu_tnr_bus_p, CLK_CON_MUX_MUX_CLKCMU_TNR_BUS, 0, 3),
MUX(CLK_MOUT_CMU_VRA_BUS, "mout_cmu_vra_bus",
mout_cmu_vra_bus_p, CLK_CON_MUX_MUX_CLKCMU_VRA_BUS, 0, 2),
+ MUX(CLK_MOUT_CMU_CMUREF, "mout_cmu_cmuref",
+ mout_cmu_cmuref_p, CLK_CON_MUX_MUX_CMU_CMUREF, 0, 1),
+ MUX(CLK_MOUT_CMU_CLK_CMUREF, "mout_cmu_clk_cmuref",
+ mout_cmu_clk_cmuref_p, CLK_CON_MUX_MUX_CLK_CMU_CMUREF, 0, 2),
};
static const struct samsung_div_clock top_div_clks[] __initconst = {
@@ -862,7 +893,7 @@ static const struct samsung_div_clock top_div_clks[] __initconst = {
CLK_CON_DIV_PLL_SHARED4_DIV4, 0, 1),
DIV(CLK_DOUT_CMU_APM_BUS, "dout_cmu_apm_bus", "gout_cmu_apm_bus",
- CLK_CON_DIV_CLKCMU_APM_BUS, 0, 3),
+ CLK_CON_DIV_CLKCMU_APM_BUS, 0, 2),
DIV(CLK_DOUT_CMU_AUD_CPU, "dout_cmu_aud_cpu", "gout_cmu_aud_cpu",
CLK_CON_DIV_CLKCMU_AUD_CPU, 0, 3),
DIV(CLK_DOUT_CMU_BUS0_BUS, "dout_cmu_bus0_bus", "gout_cmu_bus0_bus",
@@ -887,9 +918,9 @@ static const struct samsung_div_clock top_div_clks[] __initconst = {
CLK_CON_DIV_CLKCMU_CMU_BOOST, 0, 2),
DIV(CLK_DOUT_CMU_CORE_BUS, "dout_cmu_core_bus", "gout_cmu_core_bus",
CLK_CON_DIV_CLKCMU_CORE_BUS, 0, 4),
- DIV(CLK_DOUT_CMU_CPUCL0_DBG_BUS, "dout_cmu_cpucl0_debug",
+ DIV(CLK_DOUT_CMU_CPUCL0_DBG_BUS, "dout_cmu_cpucl0_dbg_bus",
"gout_cmu_cpucl0_dbg_bus", CLK_CON_DIV_CLKCMU_CPUCL0_DBG_BUS,
- 0, 3),
+ 0, 4),
DIV(CLK_DOUT_CMU_CPUCL0_SWITCH, "dout_cmu_cpucl0_switch",
"gout_cmu_cpucl0_switch", CLK_CON_DIV_CLKCMU_CPUCL0_SWITCH, 0, 3),
DIV(CLK_DOUT_CMU_CPUCL1_SWITCH, "dout_cmu_cpucl1_switch",
@@ -924,16 +955,11 @@ static const struct samsung_div_clock top_div_clks[] __initconst = {
CLK_CON_DIV_CLKCMU_HSI0_DPGTC, 0, 3),
DIV(CLK_DOUT_CMU_HSI0_USB31DRD, "dout_cmu_hsi0_usb31drd",
"gout_cmu_hsi0_usb31drd", CLK_CON_DIV_CLKCMU_HSI0_USB31DRD, 0, 4),
- DIV(CLK_DOUT_CMU_HSI0_USBDP_DEBUG, "dout_cmu_hsi0_usbdp_debug",
- "gout_cmu_hsi0_usbdp_debug", CLK_CON_DIV_CLKCMU_HSI0_USBDP_DEBUG,
- 0, 4),
DIV(CLK_DOUT_CMU_HSI1_BUS, "dout_cmu_hsi1_bus", "gout_cmu_hsi1_bus",
CLK_CON_DIV_CLKCMU_HSI1_BUS, 0, 3),
DIV(CLK_DOUT_CMU_HSI1_MMC_CARD, "dout_cmu_hsi1_mmc_card",
"gout_cmu_hsi1_mmc_card", CLK_CON_DIV_CLKCMU_HSI1_MMC_CARD,
0, 9),
- DIV(CLK_DOUT_CMU_HSI1_PCIE, "dout_cmu_hsi1_pcie", "gout_cmu_hsi1_pcie",
- CLK_CON_DIV_CLKCMU_HSI1_PCIE, 0, 7),
DIV(CLK_DOUT_CMU_HSI1_UFS_CARD, "dout_cmu_hsi1_ufs_card",
"gout_cmu_hsi1_ufs_card", CLK_CON_DIV_CLKCMU_HSI1_UFS_CARD,
0, 3),
@@ -942,8 +968,6 @@ static const struct samsung_div_clock top_div_clks[] __initconst = {
0, 3),
DIV(CLK_DOUT_CMU_HSI2_BUS, "dout_cmu_hsi2_bus", "gout_cmu_hsi2_bus",
CLK_CON_DIV_CLKCMU_HSI2_BUS, 0, 4),
- DIV(CLK_DOUT_CMU_HSI2_PCIE, "dout_cmu_hsi2_pcie", "gout_cmu_hsi2_pcie",
- CLK_CON_DIV_CLKCMU_HSI2_PCIE, 0, 7),
DIV(CLK_DOUT_CMU_IPP_BUS, "dout_cmu_ipp_bus", "gout_cmu_ipp_bus",
CLK_CON_DIV_CLKCMU_IPP_BUS, 0, 4),
DIV(CLK_DOUT_CMU_ITP_BUS, "dout_cmu_itp_bus", "gout_cmu_itp_bus",
@@ -979,8 +1003,22 @@ static const struct samsung_div_clock top_div_clks[] __initconst = {
CLK_CON_DIV_CLKCMU_TNR_BUS, 0, 4),
DIV(CLK_DOUT_CMU_VRA_BUS, "dout_cmu_vra_bus", "gout_cmu_vra_bus",
CLK_CON_DIV_CLKCMU_VRA_BUS, 0, 4),
- DIV(CLK_DOUT_CMU_DPU, "dout_cmu_clkcmu_dpu", "gout_cmu_dpu",
- CLK_CON_DIV_DIV_CLKCMU_DPU, 0, 4),
+ DIV(CLK_DOUT_CMU_DPU, "dout_cmu_dpu", "gout_cmu_dpu",
+ CLK_CON_DIV_DIV_CLKCMU_DPU, 0, 3),
+ DIV(CLK_DOUT_CMU_DPU_ALT, "dout_cmu_dpu_alt", "gout_cmu_dpu_bus",
+ CLK_CON_DIV_DIV_CLKCMU_DPU_ALT, 0, 4),
+ DIV(CLK_DOUT_CMU_CLK_CMUREF, "dout_cmu_clk_cmuref", "mout_cmu_clk_cmuref",
+ CLK_CON_DIV_DIV_CLK_CMU_CMUREF, 0, 2),
+};
+
+static const struct samsung_fixed_factor_clock cmu_top_ffactor[] __initconst = {
+ FFACTOR(CLK_DOUT_CMU_HSI1_PCIE, "dout_cmu_hsi1_pcie",
+ "gout_cmu_hsi1_pcie", 1, 8, 0),
+ FFACTOR(CLK_DOUT_CMU_OTP, "dout_cmu_otp", "oscclk", 1, 8, 0),
+ FFACTOR(CLK_DOUT_CMU_HSI0_USBDP_DEBUG, "dout_cmu_hsi0_usbdp_debug",
+ "gout_cmu_hsi0_usbdp_debug", 1, 8, 0),
+ FFACTOR(CLK_DOUT_CMU_HSI2_PCIE, "dout_cmu_hsi2_pcie",
+ "gout_cmu_hsi2_pcie", 1, 8, 0),
};
static const struct samsung_gate_clock top_gate_clks[] __initconst = {
@@ -1126,6 +1164,8 @@ static const struct samsung_cmu_info top_cmu_info __initconst = {
.nr_mux_clks = ARRAY_SIZE(top_mux_clks),
.div_clks = top_div_clks,
.nr_div_clks = ARRAY_SIZE(top_div_clks),
+ .fixed_factor_clks = cmu_top_ffactor,
+ .nr_fixed_factor_clks = ARRAY_SIZE(cmu_top_ffactor),
.gate_clks = top_gate_clks,
.nr_gate_clks = ARRAY_SIZE(top_gate_clks),
.nr_clk_ids = CLKS_NR_TOP,
@@ -1186,6 +1226,8 @@ static const unsigned long hsi0_clk_regs[] __initconst = {
CLK_CON_GAT_GOUT_BLK_HSI0_UID_SYSMMU_USB_IPCLKPORT_CLK_S2,
CLK_CON_GAT_GOUT_BLK_HSI0_UID_SYSREG_HSI0_IPCLKPORT_PCLK,
CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_ACLK_PHYCTRL,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USB31DRD_REF_CLK_40,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USBDPPHY_REF_SOC_PLL,
CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USBDPPHY_SCL_APB_PCLK,
CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USBPCS_APB_CLK,
CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_BUS_CLK_EARLY,
@@ -1294,6 +1336,10 @@ static const struct samsung_gate_clock hsi0_gate_clks[] __initconst = {
"gout_hsi0_xiu_d_hsi0_aclk", "mout_hsi0_bus_user",
CLK_CON_GAT_GOUT_BLK_HSI0_UID_XIU_D_HSI0_IPCLKPORT_ACLK,
21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_HSI0_LHS_ACEL_D_HSI0_CLK,
+ "gout_hsi0_lhs_acel_d_hsi0_clk", "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_LHS_ACEL_D_HSI0_IPCLKPORT_I_CLK,
+ 21, CLK_IS_CRITICAL, 0),
};
static const struct samsung_cmu_info hsi0_cmu_info __initconst = {
@@ -1307,6 +1353,1150 @@ static const struct samsung_cmu_info hsi0_cmu_info __initconst = {
.clk_name = "bus",
};
+/* ---- CMU_PERIC0 --------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_PERIC0 (0x10400000) */
+#define PLL_CON0_MUX_CLKCMU_PERIC0_BUS_USER 0x0600
+#define PLL_CON1_MUX_CLKCMU_PERIC0_BUS_USER 0x0604
+#define PLL_CON0_MUX_CLKCMU_PERIC0_UART_DBG 0x0610
+#define PLL_CON1_MUX_CLKCMU_PERIC0_UART_DBG 0x0614
+#define PLL_CON0_MUX_CLKCMU_PERIC0_USI00_USI_USER 0x0620
+#define PLL_CON1_MUX_CLKCMU_PERIC0_USI00_USI_USER 0x0624
+#define PLL_CON0_MUX_CLKCMU_PERIC0_USI01_USI_USER 0x0630
+#define PLL_CON1_MUX_CLKCMU_PERIC0_USI01_USI_USER 0x0634
+#define PLL_CON0_MUX_CLKCMU_PERIC0_USI02_USI_USER 0x0640
+#define PLL_CON1_MUX_CLKCMU_PERIC0_USI02_USI_USER 0x0644
+#define PLL_CON0_MUX_CLKCMU_PERIC0_USI03_USI_USER 0x0650
+#define PLL_CON1_MUX_CLKCMU_PERIC0_USI03_USI_USER 0x0654
+#define PLL_CON0_MUX_CLKCMU_PERIC0_USI04_USI_USER 0x0660
+#define PLL_CON1_MUX_CLKCMU_PERIC0_USI04_USI_USER 0x0664
+#define PLL_CON0_MUX_CLKCMU_PERIC0_USI05_USI_USER 0x0670
+#define PLL_CON1_MUX_CLKCMU_PERIC0_USI05_USI_USER 0x0674
+#define PLL_CON0_MUX_CLKCMU_PERIC0_USI13_USI_USER 0x0680
+#define PLL_CON1_MUX_CLKCMU_PERIC0_USI13_USI_USER 0x0684
+#define PLL_CON0_MUX_CLKCMU_PERIC0_USI14_USI_USER 0x0690
+#define PLL_CON1_MUX_CLKCMU_PERIC0_USI14_USI_USER 0x0694
+#define PLL_CON0_MUX_CLKCMU_PERIC0_USI15_USI_USER 0x06a0
+#define PLL_CON1_MUX_CLKCMU_PERIC0_USI15_USI_USER 0x06a4
+#define PLL_CON0_MUX_CLKCMU_PERIC0_USI_I2C_USER 0x06b0
+#define PLL_CON1_MUX_CLKCMU_PERIC0_USI_I2C_USER 0x06b4
+#define CLK_CON_DIV_DIV_CLK_PERIC0_UART_DBG 0x1800
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI00_USI 0x1804
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI01_USI 0x1808
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI02_USI 0x180c
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI03_USI 0x1810
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI04_USI 0x1814
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI05_USI 0x1818
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI13_USI 0x181c
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI14_USI 0x1820
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI15_USI 0x1824
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI_I2C 0x1828
+#define CLK_CON_GAT_CLK_BLK_PERIC0_UID_PERIC0_CMU_PERIC0_IPCLKPORT_PCLK 0x2004
+#define CLK_CON_GAT_CLK_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_OSCCLK_IPCLKPORT_CLK 0x2008
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_D_TZPC_PERIC0_IPCLKPORT_PCLK 0x200c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_GPIO_PERIC0_IPCLKPORT_PCLK 0x2010
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_LHM_AXI_P_PERIC0_IPCLKPORT_I_CLK 0x2014
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_10 0x2018
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_11 0x201c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_12 0x2020
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_13 0x2024
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_14 0x2028
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_15 0x202c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_4 0x2030
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_5 0x2034
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_6 0x2038
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_7 0x203c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_8 0x2040
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_9 0x2044
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_10 0x2048
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_11 0x204c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_12 0x2050
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_13 0x2054
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_14 0x2058
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_15 0x205c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_4 0x2060
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_5 0x2064
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_6 0x2068
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_7 0x206c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_8 0x2070
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_9 0x2074
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_0 0x2078
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_3 0x207c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_4 0x2080
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_5 0x2084
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_6 0x2088
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_7 0x208c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_8 0x2090
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_0 0x2094
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_15 0x2098
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_3 0x209c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_4 0x20a0
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_5 0x20a4
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_6 0x20a8
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_7 0x20ac
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_8 0x20b0
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_BUSP_IPCLKPORT_CLK 0x20b4
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_UART_DBG_IPCLKPORT_CLK 0x20b8
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI00_USI_IPCLKPORT_CLK 0x20bc
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI01_USI_IPCLKPORT_CLK 0x20c0
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI02_USI_IPCLKPORT_CLK 0x20c4
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI03_USI_IPCLKPORT_CLK 0x20c8
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI04_USI_IPCLKPORT_CLK 0x20cc
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI05_USI_IPCLKPORT_CLK 0x20d0
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI13_USI_IPCLKPORT_CLK 0x20d4
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI14_USI_IPCLKPORT_CLK 0x20d8
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI15_USI_IPCLKPORT_CLK 0x20dc
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI_I2C_IPCLKPORT_CLK 0x20e0
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_SYSREG_PERIC0_IPCLKPORT_PCLK 0x20e4
+
+static const unsigned long peric0_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_PERIC0_BUS_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC0_BUS_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_UART_DBG,
+ PLL_CON1_MUX_CLKCMU_PERIC0_UART_DBG,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI00_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC0_USI00_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI01_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC0_USI01_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI02_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC0_USI02_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI03_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC0_USI03_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI04_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC0_USI04_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI05_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC0_USI05_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI13_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC0_USI13_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI14_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC0_USI14_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI15_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC0_USI15_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI_I2C_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC0_USI_I2C_USER,
+ CLK_CON_DIV_DIV_CLK_PERIC0_UART_DBG,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI00_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI01_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI02_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI03_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI04_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI05_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI13_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI14_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI15_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI_I2C,
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_PERIC0_CMU_PERIC0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_OSCCLK_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_D_TZPC_PERIC0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_GPIO_PERIC0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_LHM_AXI_P_PERIC0_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_10,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_11,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_12,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_13,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_14,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_15,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_4,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_5,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_6,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_7,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_8,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_9,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_10,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_11,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_12,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_13,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_14,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_15,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_4,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_5,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_6,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_7,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_8,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_9,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_0,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_3,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_4,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_5,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_6,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_7,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_8,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_0,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_15,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_3,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_4,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_5,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_6,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_7,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_8,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_BUSP_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_UART_DBG_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI00_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI01_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI02_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI03_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI04_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI05_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI13_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI14_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI15_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI_I2C_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_SYSREG_PERIC0_IPCLKPORT_PCLK,
+};
+
+/* Parent clock list for CMU_PERIC0 muxes */
+PNAME(mout_peric0_bus_user_p) = { "oscclk", "dout_cmu_peric0_bus" };
+PNAME(mout_peric0_uart_dbg_p) = { "oscclk", "dout_cmu_peric0_ip" };
+PNAME(mout_peric0_usi00_user_p) = { "oscclk", "dout_cmu_peric0_ip" };
+PNAME(mout_peric0_usi01_user_p) = { "oscclk", "dout_cmu_peric0_ip" };
+PNAME(mout_peric0_usi02_user_p) = { "oscclk", "dout_cmu_peric0_ip" };
+PNAME(mout_peric0_usi03_user_p) = { "oscclk", "dout_cmu_peric0_ip" };
+PNAME(mout_peric0_usi04_user_p) = { "oscclk", "dout_cmu_peric0_ip" };
+PNAME(mout_peric0_usi05_user_p) = { "oscclk", "dout_cmu_peric0_ip" };
+PNAME(mout_peric0_usi13_user_p) = { "oscclk", "dout_cmu_peric0_ip" };
+PNAME(mout_peric0_usi14_user_p) = { "oscclk", "dout_cmu_peric0_ip" };
+PNAME(mout_peric0_usi15_user_p) = { "oscclk", "dout_cmu_peric0_ip" };
+PNAME(mout_peric0_usi_i2c_user_p) = { "oscclk", "dout_cmu_peric0_ip" };
+
+static const struct samsung_mux_clock peric0_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PERIC0_BUS_USER, "mout_peric0_bus_user",
+ mout_peric0_bus_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_BUS_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC0_UART_DBG, "mout_peric0_uart_dbg",
+ mout_peric0_uart_dbg_p, PLL_CON0_MUX_CLKCMU_PERIC0_UART_DBG,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC0_USI00_USI_USER, "mout_peric0_usi00_usi_user",
+ mout_peric0_usi00_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_USI00_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC0_USI01_USI_USER, "mout_peric0_usi01_usi_user",
+ mout_peric0_usi01_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_USI01_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC0_USI02_USI_USER, "mout_peric0_usi02_usi_user",
+ mout_peric0_usi02_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_USI02_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC0_USI03_USI_USER, "mout_peric0_usi03_usi_user",
+ mout_peric0_usi03_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_USI03_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC0_USI04_USI_USER, "mout_peric0_usi04_usi_user",
+ mout_peric0_usi04_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_USI04_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC0_USI05_USI_USER, "mout_peric0_usi05_usi_user",
+ mout_peric0_usi05_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_USI05_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC0_USI13_USI_USER, "mout_peric0_usi13_usi_user",
+ mout_peric0_usi13_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_USI13_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC0_USI14_USI_USER, "mout_peric0_usi14_usi_user",
+ mout_peric0_usi14_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_USI14_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC0_USI15_USI_USER, "mout_peric0_usi15_usi_user",
+ mout_peric0_usi15_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_USI15_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC0_USI_I2C_USER, "mout_peric0_usi_i2c_user",
+ mout_peric0_usi_i2c_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_USI_I2C_USER,
+ 4, 1),
+};
+
+static const struct samsung_div_clock peric0_div_clks[] __initconst = {
+ DIV(CLK_DOUT_PERIC0_UART_DBG, "dout_peric0_uart_dbg",
+ "mout_peric0_uart_dbg",
+ CLK_CON_DIV_DIV_CLK_PERIC0_UART_DBG,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI00_USI, "dout_peric0_usi00_usi",
+ "mout_peric0_usi00_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI00_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI01_USI, "dout_peric0_usi01_usi",
+ "mout_peric0_usi01_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI01_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI02_USI, "dout_peric0_usi02_usi",
+ "mout_peric0_usi02_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI02_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI03_USI, "dout_peric0_usi03_usi",
+ "mout_peric0_usi03_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI03_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI04_USI, "dout_peric0_usi04_usi",
+ "mout_peric0_usi04_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI04_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI05_USI, "dout_peric0_usi05_usi",
+ "mout_peric0_usi05_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI05_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI13_USI, "dout_peric0_usi13_usi",
+ "mout_peric0_usi13_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI13_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI14_USI, "dout_peric0_usi14_usi",
+ "mout_peric0_usi14_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI14_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI15_USI, "dout_peric0_usi15_usi",
+ "mout_peric0_usi15_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI15_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI_I2C, "dout_peric0_usi_i2c",
+ "mout_peric0_usi_i2c_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI_I2C,
+ 0, 4),
+};
+
+static const struct samsung_gate_clock peric0_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_PERIC0_CMU_PCLK, "gout_peric0_cmu_pclk",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_PERIC0_CMU_PERIC0_IPCLKPORT_PCLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERIC0_OSCCLK_CLK, "gout_peric0_oscclk_clk",
+ "oscclk",
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_OSCCLK_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_D_TZPC_PCLK, "gout_peric0_d_tpzc_pclk",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_D_TZPC_PERIC0_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_GPIO_PCLK, "gout_peric0_gpio_pclk",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_GPIO_PERIC0_IPCLKPORT_PCLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_PERIC0_LHM_AXI_P_CLK, "gout_peric0_lhm_axi_p_clk",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_LHM_AXI_P_PERIC0_IPCLKPORT_I_CLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_IPCLK_10, "gout_peric0_top0_ipclk_10",
+ "dout_peric0_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_10,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_IPCLK_11, "gout_peric0_top0_ipclk_11",
+ "dout_peric0_usi03_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_11,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_IPCLK_12, "gout_peric0_top0_ipclk_12",
+ "dout_peric0_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_12,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_IPCLK_13, "gout_peric0_top0_ipclk_13",
+ "dout_peric0_usi04_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_13,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_IPCLK_14, "gout_peric0_top0_ipclk_14",
+ "dout_peric0_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_14,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_IPCLK_15, "gout_peric0_top0_ipclk_15",
+ "dout_peric0_usi05_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_15,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_IPCLK_4, "gout_peric0_top0_ipclk_4",
+ "dout_peric0_uart_dbg",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_4,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_IPCLK_5, "gout_peric0_top0_ipclk_5",
+ "dout_peric0_usi00_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_5,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_IPCLK_6, "gout_peric0_top0_ipclk_6",
+ "dout_peric0_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_6,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_IPCLK_7, "gout_peric0_top0_ipclk_7",
+ "dout_peric0_usi01_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_7,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_IPCLK_8, "gout_peric0_top0_ipclk_8",
+ "dout_peric0_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_8,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_IPCLK_9, "gout_peric0_top0_ipclk_9",
+ "dout_peric0_usi02_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_9,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_PCLK_10, "gout_peric0_top0_pclk_10",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_10,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_PCLK_11, "gout_peric0_top0_pclk_11",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_11,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_PCLK_12, "gout_peric0_top0_pclk_12",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_12,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_PCLK_13, "gout_peric0_top0_pclk_13",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_13,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_PCLK_14, "gout_peric0_top0_pclk_14",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_14,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_PCLK_15, "gout_peric0_top0_pclk_15",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_15,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_PCLK_4, "gout_peric0_top0_pclk_4",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_4,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_PCLK_5, "gout_peric0_top0_pclk_5",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_5,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_PCLK_6, "gout_peric0_top0_pclk_6",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_6,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_PCLK_7, "gout_peric0_top0_pclk_7",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_7,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_PCLK_8, "gout_peric0_top0_pclk_8",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_8,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_PCLK_9, "gout_peric0_top0_pclk_9",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_9,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP1_IPCLK_0, "gout_peric0_top1_ipclk_0",
+ "dout_peric0_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_0,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP1_IPCLK_3, "gout_peric0_top1_ipclk_3",
+ "dout_peric0_usi13_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_3,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP1_IPCLK_4, "gout_peric0_top1_ipclk_4",
+ "dout_peric0_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_4,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP1_IPCLK_5, "gout_peric0_top1_ipclk_5",
+ "dout_peric0_usi14_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_5,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP1_IPCLK_6, "gout_peric0_top1_ipclk_6",
+ "dout_peric0_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_6,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP1_IPCLK_7, "gout_peric0_top1_ipclk_7",
+ "dout_peric0_usi15_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_7,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP1_IPCLK_8, "gout_peric0_top1_ipclk_8",
+ "dout_peric0_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_8,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP1_PCLK_0, "gout_peric0_top1_pclk_0",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_0,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP1_PCLK_15, "gout_peric0_top1_pclk_15",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_15,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP1_PCLK_3, "gout_peric0_top1_pclk_3",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_3,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP1_PCLK_4, "gout_peric0_top1_pclk_4",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_4,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP1_PCLK_5, "gout_peric0_top1_pclk_5",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_5,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP1_PCLK_6, "gout_peric0_top1_pclk_6",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_6,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP1_PCLK_7, "gout_peric0_top1_pclk_7",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_7,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP1_PCLK_8, "gout_peric0_top1_pclk_8",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_8,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_BUSP_CLK, "gout_peric0_busp_clk",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_BUSP_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_UART_DBG_CLK, "gout_peric0_uart_dbg_clk",
+ "dout_peric0_uart_dbg",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_UART_DBG_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_USI00_USI_CLK, "gout_peric0_usi00_usi_clk",
+ "dout_peric0_usi00_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI00_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_USI01_USI_CLK, "gout_peric0_usi01_usi_clk",
+ "dout_peric0_usi01_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI01_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_USI02_USI_CLK, "gout_peric0_usi02_usi_clk",
+ "dout_peric0_usi02_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI02_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_USI03_USI_CLK, "gout_peric0_usi03_usi_clk",
+ "dout_peric0_usi03_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI03_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_USI04_USI_CLK, "gout_peric0_usi04_usi_clk",
+ "dout_peric0_usi04_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI04_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_USI05_USI_CLK, "gout_peric0_usi05_usi_clk",
+ "dout_peric0_usi05_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI05_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_USI13_USI_CLK, "gout_peric0_usi13_usi_clk",
+ "dout_peric0_usi13_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI13_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_USI14_USI_CLK, "gout_peric0_usi14_usi_clk",
+ "dout_peric0_usi14_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI14_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_USI15_USI_CLK, "gout_peric0_usi15_usi_clk",
+ "dout_peric0_usi15_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI15_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_USI_I2C_CLK, "gout_peric0_usi_i2c_clk",
+ "dout_peric0_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI_I2C_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_SYSREG_PCLK, "gout_peric0_sysreg_pclk",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_SYSREG_PERIC0_IPCLKPORT_PCLK,
+ 21, 0, 0)
+};
+
+static const struct samsung_cmu_info peric0_cmu_info __initconst = {
+ .mux_clks = peric0_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(peric0_mux_clks),
+ .div_clks = peric0_div_clks,
+ .nr_div_clks = ARRAY_SIZE(peric0_div_clks),
+ .gate_clks = peric0_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(peric0_gate_clks),
+ .nr_clk_ids = CLKS_NR_PERIC0,
+ .clk_regs = peric0_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(peric0_clk_regs),
+ .clk_name = "bus",
+};
+
+/* ---- CMU_PERIC1 --------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_PERIC1 (0x10700000) */
+#define PLL_CON0_MUX_CLKCMU_PERIC1_BUS_USER 0x0600
+#define PLL_CON1_MUX_CLKCMU_PERIC1_BUS_USER 0x0604
+#define PLL_CON0_MUX_CLKCMU_PERIC1_UART_BT_USER 0x0610
+#define PLL_CON1_MUX_CLKCMU_PERIC1_UART_BT_USER 0x0614
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI06_USI_USER 0x0620
+#define PLL_CON1_MUX_CLKCMU_PERIC1_USI06_USI_USER 0x0624
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI07_USI_USER 0x0630
+#define PLL_CON1_MUX_CLKCMU_PERIC1_USI07_USI_USER 0x0634
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI08_USI_USER 0x0640
+#define PLL_CON1_MUX_CLKCMU_PERIC1_USI08_USI_USER 0x0644
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI09_USI_USER 0x0650
+#define PLL_CON1_MUX_CLKCMU_PERIC1_USI09_USI_USER 0x0654
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI10_USI_USER 0x0660
+#define PLL_CON1_MUX_CLKCMU_PERIC1_USI10_USI_USER 0x0664
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI11_USI_USER 0x0670
+#define PLL_CON1_MUX_CLKCMU_PERIC1_USI11_USI_USER 0x0674
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI12_USI_USER 0x0680
+#define PLL_CON1_MUX_CLKCMU_PERIC1_USI12_USI_USER 0x0684
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI16_USI_USER 0x0690
+#define PLL_CON1_MUX_CLKCMU_PERIC1_USI16_USI_USER 0x0694
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI17_USI_USER 0x06a0
+#define PLL_CON1_MUX_CLKCMU_PERIC1_USI17_USI_USER 0x06a4
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI18_USI_USER 0x06b0
+#define PLL_CON1_MUX_CLKCMU_PERIC1_USI18_USI_USER 0x06b4
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI_I2C_USER 0x06c0
+#define PLL_CON1_MUX_CLKCMU_PERIC1_USI_I2C_USER 0x06c4
+#define CLK_CON_DIV_DIV_CLK_PERIC1_UART_BT 0x1800
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI06_USI 0x1804
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI07_USI 0x1808
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI08_USI 0x180c
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI09_USI 0x1810
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI10_USI 0x1814
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI11_USI 0x1818
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI12_USI 0x181c
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI16_USI 0x1820
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI17_USI 0x1824
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI18_USI 0x1828
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI_I2C 0x182c
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_PERIC1_CMU_PERIC1_IPCLKPORT_PCLK 0x2004
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_UART_BT_IPCLKPORT_CLK 0x2008
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI12_USI_IPCLKPORT_CLK 0x200c
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI18_USI_IPCLKPORT_CLK 0x2010
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_D_TZPC_PERIC1_IPCLKPORT_PCLK 0x2014
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_GPIO_PERIC1_IPCLKPORT_PCLK 0x2018
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_LHM_AXI_P_CSISPERIC1_IPCLKPORT_I_CLK 0x201c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_LHM_AXI_P_PERIC1_IPCLKPORT_I_CLK 0x2020
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_10 0x2024
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_11 0x2028
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_12 0x202c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_13 0x2030
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_14 0x2034
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_15 0x2038
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_4 0x203c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_10 0x2040
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_11 0x2044
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_12 0x2048
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_13 0x204c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_14 0x2050
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_15 0x2054
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_4 0x2058
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_0 0x205c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_1 0x2060
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_10 0x2064
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_12 0x206c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_13 0x2070
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_14 0x2074
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_15 0x2078
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_2 0x207c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_3 0x2080
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_4 0x2084
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_5 0x2088
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_6 0x208c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_7 0x2090
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_9 0x2098
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_0 0x209c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_1 0x20a0
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_10 0x20a4
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_12 0x20ac
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_13 0x20b0
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_14 0x20b4
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_15 0x20b8
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_2 0x20bc
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_3 0x20c0
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_4 0x20c4
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_5 0x20c8
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_6 0x20cc
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_7 0x20d0
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_9 0x20d8
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_BUSP_IPCLKPORT_CLK 0x20dc
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_OSCCLK_IPCLKPORT_CLK 0x20e0
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI06_USI_IPCLKPORT_CLK 0x20e4
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI07_USI_IPCLKPORT_CLK 0x20e8
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI08_USI_IPCLKPORT_CLK 0x20ec
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI09_USI_IPCLKPORT_CLK 0x20f0
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI10_USI_IPCLKPORT_CLK 0x20f4
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI11_USI_IPCLKPORT_CLK 0x20f8
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI16_USI_IPCLKPORT_CLK 0x20fc
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI17_USI_IPCLKPORT_CLK 0x2100
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI_I2C_IPCLKPORT_CLK 0x2104
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SYSREG_PERIC1_IPCLKPORT_PCLK 0x2108
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI16_I3C_IPCLKPORT_I_PCLK 0x210c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI16_I3C_IPCLKPORT_I_SCLK 0x2110
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI17_I3C_IPCLKPORT_I_PCLK 0x2114
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI17_I3C_IPCLKPORT_I_SCLK 0x2118
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_XIU_P_PERIC1_IPCLKPORT_ACLK 0x211c
+
+static const unsigned long peric1_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_PERIC1_BUS_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_BUS_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_UART_BT_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_UART_BT_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI06_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_USI06_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI07_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_USI07_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI08_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_USI08_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI09_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_USI09_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI10_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_USI10_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI11_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_USI11_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI12_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_USI12_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI16_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_USI16_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI17_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_USI17_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI18_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_USI18_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI_I2C_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_USI_I2C_USER,
+ CLK_CON_DIV_DIV_CLK_PERIC1_UART_BT,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI06_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI07_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI08_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI09_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI10_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI11_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI12_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI16_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI17_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI18_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI_I2C,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_PERIC1_CMU_PERIC1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_UART_BT_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI12_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI18_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_D_TZPC_PERIC1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_GPIO_PERIC1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_LHM_AXI_P_CSISPERIC1_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_LHM_AXI_P_PERIC1_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_10,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_11,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_12,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_13,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_14,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_15,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_4,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_10,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_11,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_12,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_13,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_14,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_15,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_4,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_0,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_1,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_10,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_12,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_13,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_14,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_15,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_2,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_3,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_4,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_5,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_6,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_7,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_9,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_0,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_1,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_10,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_12,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_13,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_14,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_15,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_2,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_3,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_4,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_5,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_6,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_7,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_9,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_BUSP_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_OSCCLK_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI06_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI07_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI08_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI09_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI10_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI11_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI16_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI17_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI_I2C_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SYSREG_PERIC1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI16_I3C_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI16_I3C_IPCLKPORT_I_SCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI17_I3C_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI17_I3C_IPCLKPORT_I_SCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_XIU_P_PERIC1_IPCLKPORT_ACLK,
+};
+
+/* Parent clock list for CMU_PERIC1 muxes */
+PNAME(mout_peric1_bus_user_p) = { "oscclk", "dout_cmu_peric1_bus" };
+PNAME(mout_peric1_uart_bt_user_p) = { "oscclk", "dout_cmu_peric1_ip" };
+PNAME(mout_peric1_usi06_user_p) = { "oscclk", "dout_cmu_peric1_ip" };
+PNAME(mout_peric1_usi07_user_p) = { "oscclk", "dout_cmu_peric1_ip" };
+PNAME(mout_peric1_usi08_user_p) = { "oscclk", "dout_cmu_peric1_ip" };
+PNAME(mout_peric1_usi09_user_p) = { "oscclk", "dout_cmu_peric1_ip" };
+PNAME(mout_peric1_usi10_user_p) = { "oscclk", "dout_cmu_peric1_ip" };
+PNAME(mout_peric1_usi11_user_p) = { "oscclk", "dout_cmu_peric1_ip" };
+PNAME(mout_peric1_usi12_user_p) = { "oscclk", "dout_cmu_peric1_ip" };
+PNAME(mout_peric1_usi18_user_p) = { "oscclk", "dout_cmu_peric1_ip" };
+PNAME(mout_peric1_usi16_user_p) = { "oscclk", "dout_cmu_peric1_ip" };
+PNAME(mout_peric1_usi17_user_p) = { "oscclk", "dout_cmu_peric1_ip" };
+PNAME(mout_peric1_usi_i2c_user_p) = { "oscclk", "dout_cmu_peric1_ip" };
+
+static const struct samsung_mux_clock peric1_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PERIC1_BUS_USER, "mout_peric1_bus_user",
+ mout_peric1_bus_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_BUS_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_UART_BT_USER, "mout_peric1_uart_bt_user",
+ mout_peric1_uart_bt_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_UART_BT_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI06_USI_USER, "mout_peric1_usi06_usi_user",
+ mout_peric1_usi06_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI06_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI07_USI_USER, "mout_peric1_usi07_usi_user",
+ mout_peric1_usi07_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI07_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI08_USI_USER, "mout_peric1_usi08_usi_user",
+ mout_peric1_usi08_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI08_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI09_USI_USER, "mout_peric1_usi09_usi_user",
+ mout_peric1_usi09_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI09_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI10_USI_USER, "mout_peric1_usi10_usi_user",
+ mout_peric1_usi10_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI10_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI11_USI_USER, "mout_peric1_usi11_usi_user",
+ mout_peric1_usi11_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI11_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI12_USI_USER, "mout_peric1_usi12_usi_user",
+ mout_peric1_usi12_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI12_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI18_USI_USER, "mout_peric1_usi18_usi_user",
+ mout_peric1_usi18_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI18_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI16_USI_USER, "mout_peric1_usi16_usi_user",
+ mout_peric1_usi16_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI16_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI17_USI_USER, "mout_peric1_usi17_usi_user",
+ mout_peric1_usi17_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI17_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI_I2C_USER, "mout_peric1_usi_i2c_user",
+ mout_peric1_usi_i2c_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI_I2C_USER,
+ 4, 1),
+};
+
+static const struct samsung_div_clock peric1_div_clks[] __initconst = {
+ DIV(CLK_DOUT_PERIC1_UART_BT, "dout_peric1_uart_bt",
+ "mout_peric1_uart_bt_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_UART_BT,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI06_USI, "dout_peric1_usi06_usi",
+ "mout_peric1_usi06_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI06_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI07_USI, "dout_peric1_usi07_usi",
+ "mout_peric1_usi07_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI07_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI08_USI, "dout_peric1_usi08_usi",
+ "mout_peric1_usi08_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI08_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI18_USI, "dout_peric1_usi18_usi",
+ "mout_peric1_usi18_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI18_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI12_USI, "dout_peric1_usi12_usi",
+ "mout_peric1_usi12_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI12_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI09_USI, "dout_peric1_usi09_usi",
+ "mout_peric1_usi09_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI09_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI10_USI, "dout_peric1_usi10_usi",
+ "mout_peric1_usi10_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI10_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI11_USI, "dout_peric1_usi11_usi",
+ "mout_peric1_usi11_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI11_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI16_USI, "dout_peric1_usi16_usi",
+ "mout_peric1_usi16_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI16_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI17_USI, "dout_peric1_usi17_usi",
+ "mout_peric1_usi17_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI17_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI_I2C, "dout_peric1_usi_i2c",
+ "mout_peric1_usi_i2c_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI_I2C,
+ 0, 4),
+};
+
+static const struct samsung_gate_clock peric1_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_PERIC1_CMU_PCLK, "gout_peric1_cmu_pclk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_PERIC1_CMU_PERIC1_IPCLKPORT_PCLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERIC1_UART_BT_CLK, "gout_peric1_uart_bt_clk",
+ "dout_peric1_uart_bt",
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_UART_BT_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI12_USI_CLK, "gout_peric1_usi12_usi_clk",
+ "dout_peric1_usi12_usi",
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI12_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI18_USI_CLK, "gout_peric1_usi18_usi_clk",
+ "dout_peric1_usi18_usi",
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI18_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_D_TZPC_PCLK, "gout_peric1_d_tzpc_pclk",
+ "dout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_D_TZPC_PERIC1_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_GPIO_PCLK, "gout_peric1_gpio_pclk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_GPIO_PERIC1_IPCLKPORT_PCLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_PERIC1_LHM_AXI_P_CSIS_CLK, "gout_peric1_lhm_axi_p_csis_clk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_LHM_AXI_P_CSISPERIC1_IPCLKPORT_I_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_LHM_AXI_P_CLK, "gout_peric1_lhm_axi_p_clk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_LHM_AXI_P_PERIC1_IPCLKPORT_I_CLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERIC1_TOP0_IPCLK_10, "gout_peric1_top0_ipclk_10",
+ "dout_peric1_usi06_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_10,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP0_IPCLK_11, "gout_peric1_top0_ipclk_11",
+ "dout_peric1_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_11,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP0_IPCLK_12, "gout_peric1_top0_ipclk_12",
+ "dout_peric1_usi07_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_12,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP0_IPCLK_13, "gout_peric1_top0_ipclk_13",
+ "dout_peric1_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_13,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP0_IPCLK_14, "gout_peric1_top0_ipclk_14",
+ "dout_peric1_usi08_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_14,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP0_IPCLK_15, "gout_peric1_top0_ipclk_15",
+ "dout_peric1_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_15,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP0_IPCLK_4, "gout_peric1_top0_ipclk_4",
+ "dout_peric1_uart_bt",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_4,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP0_PCLK_10, "gout_peric1_top0_pclk_10",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_10,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP0_PCLK_11, "gout_peric1_top0_pclk_11",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_11,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP0_PCLK_12, "gout_peric1_top0_pclk_12",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_12,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP0_PCLK_13, "gout_peric1_top0_pclk_13",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_13,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP0_PCLK_14, "gout_peric1_top0_pclk_14",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_14,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP0_PCLK_15, "gout_peric1_top0_pclk_15",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_15,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP0_PCLK_4, "gout_peric1_top0_pclk_4",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_4,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_IPCLK_0, "gout_peric1_top1_ipclk_0",
+ "dout_peric1_usi09_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_0,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_IPCLK_1, "gout_peric1_top1_ipclk_1",
+ "dout_peric1_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_1,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_IPCLK_10, "gout_peric1_top1_ipclk_10",
+ "dout_peric1_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_10,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_IPCLK_12, "gout_peric1_top1_ipclk_12",
+ "dout_peric1_usi12_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_12,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_IPCLK_13, "gout_peric1_top1_ipclk_13",
+ "dout_peric1_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_13,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_IPCLK_14, "gout_peric1_top1_ipclk_14",
+ "dout_peric1_usi18_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_14,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_IPCLK_15, "gout_peric1_top1_ipclk_15",
+ "dout_peric1_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_15,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_IPCLK_2, "gout_peric1_top1_ipclk_2",
+ "dout_peric1_usi10_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_2,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_IPCLK_3, "gout_peric1_top1_ipclk_3",
+ "dout_peric1_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_3,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_IPCLK_4, "gout_peric1_top1_ipclk_4",
+ "dout_peric1_usi11_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_4,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_IPCLK_5, "gout_peric1_top1_ipclk_5",
+ "dout_peric1_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_5,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_IPCLK_6, "gout_peric1_top1_ipclk_6",
+ "dout_peric1_usi16_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_6,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_IPCLK_7, "gout_peric1_top1_ipclk_7",
+ "dout_peric1_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_7,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_IPCLK_9, "gout_peric1_top1_ipclk_9",
+ "dout_peric1_usi17_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_9,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_PCLK_0, "gout_peric1_top1_pclk_0",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_0,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_PCLK_1, "gout_peric1_top1_pclk_1",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_1,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_PCLK_10, "gout_peric1_top1_pclk_10",
+ "dout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_10,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_PCLK_12, "gout_peric1_top1_pclk_12",
+ "dout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_12,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_PCLK_13, "gout_peric1_top1_pclk_13",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_13,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_PCLK_14, "gout_peric1_top1_pclk_14",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_14,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_PCLK_15, "gout_peric1_top1_pclk_15",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_15,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_PCLK_2, "gout_peric1_top1_pclk_2",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_2,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_PCLK_3, "gout_peric1_top1_pclk_3",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_3,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_PCLK_4, "gout_peric1_top1_pclk_4",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_4,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_PCLK_5, "gout_peric1_top1_pclk_5",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_5,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_PCLK_6, "gout_peric1_top1_pclk_6",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_6,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_PCLK_7, "gout_peric1_top1_pclk_7",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_7,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_PCLK_9, "gout_peric1_top1_pclk_9",
+ "dout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_9,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_BUSP_CLK, "gout_peric1_busp_clk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_BUSP_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_OSCCLK_CLK, "gout_peric1_oscclk_clk",
+ "oscclk",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_OSCCLK_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI06_USI_CLK, "gout_peric1_usi06_usi_clk",
+ "dout_peric1_usi06_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI06_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI07_USI_CLK, "gout_peric1_usi07_usi_clk",
+ "dout_peric1_usi07_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI07_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI08_USI_CLK, "gout_peric1_usi08_usi_clk",
+ "dout_peric1_usi08_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI08_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI09_USI_CLK, "gout_peric1_usi09_usi_clk",
+ "dout_peric1_usi09_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI09_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI10_USI_CLK, "gout_peric1_usi10_usi_clk",
+ "dout_peric1_usi10_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI10_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI11_USI_CLK, "gout_peric1_usi11_usi_clk",
+ "dout_peric1_usi11_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI11_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI16_USI_CLK, "gout_peric1_usi16_usi_clk",
+ "dout_peric1_usi16_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI16_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI17_USI_CLK, "gout_peric1_usi17_usi_clk",
+ "dout_peric1_usi17_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI17_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI_I2C_CLK, "gout_peric1_usi_i2c_clk",
+ "dout_peric1_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI_I2C_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_SYSREG_PCLK, "gout_peric1_sysreg_pclk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SYSREG_PERIC1_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI16_I3C_PCLK, "gout_peric1_usi16_i3c_pclk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI16_I3C_IPCLKPORT_I_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI16_I3C_SCLK, "gout_peric1_usi16_i3c_sclk",
+ "dout_peric1_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI16_I3C_IPCLKPORT_I_SCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI17_I3C_PCLK, "gout_peric1_usi17_i3c_pclk",
+ "dout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI17_I3C_IPCLKPORT_I_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI17_I3C_SCLK, "gout_peric1_usi17_i3c_sclk",
+ "dout_peric1_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI17_I3C_IPCLKPORT_I_SCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_XIU_P_ACLK, "gout_peric1_xiu_p_aclk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_XIU_P_PERIC1_IPCLKPORT_ACLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+};
+
+static const struct samsung_cmu_info peric1_cmu_info __initconst = {
+ .mux_clks = peric1_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(peric1_mux_clks),
+ .div_clks = peric1_div_clks,
+ .nr_div_clks = ARRAY_SIZE(peric1_div_clks),
+ .gate_clks = peric1_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(peric1_gate_clks),
+ .nr_clk_ids = CLKS_NR_PERIC1,
+ .clk_regs = peric1_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(peric1_clk_regs),
+ .clk_name = "bus",
+};
+
/* ---- CMU_PERIS ----------------------------------------------------------- */
/* Register Offset definitions for CMU_PERIS (0x10020000) */
@@ -1500,6 +2690,12 @@ static const struct of_device_id exynos990_cmu_of_match[] = {
{
.compatible = "samsung,exynos990-cmu-hsi0",
.data = &hsi0_cmu_info,
+ }, {
+ .compatible = "samsung,exynos990-cmu-peric0",
+ .data = &peric0_cmu_info,
+ }, {
+ .compatible = "samsung,exynos990-cmu-peric1",
+ .data = &peric1_cmu_info,
},
{ },
};
diff --git a/drivers/clk/samsung/clk-fsd.c b/drivers/clk/samsung/clk-fsd.c
index 594931334574..4124d65e3d18 100644
--- a/drivers/clk/samsung/clk-fsd.c
+++ b/drivers/clk/samsung/clk-fsd.c
@@ -89,7 +89,7 @@
#define CLKS_NR_FSYS1 (PCIE_LINK1_IPCLKPORT_SLV_ACLK + 1)
#define CLKS_NR_IMEM (IMEM_TMU_GT_IPCLKPORT_I_CLK_TS + 1)
#define CLKS_NR_MFC (MFC_MFC_IPCLKPORT_ACLK + 1)
-#define CLKS_NR_CAM_CSI (CAM_CSI2_3_IPCLKPORT_I_ACLK + 1)
+#define CLKS_NR_CAM_CSI (CAM_CSI2_3_IPCLKPORT_I_PCLK + 1)
static const unsigned long cmu_clk_regs[] __initconst = {
PLL_LOCKTIME_PLL_SHARED0,
@@ -1646,7 +1646,7 @@ static const struct samsung_pll_rate_table pll_cam_csi_rate_table[] __initconst
};
static const struct samsung_pll_clock cam_csi_pll_clks[] __initconst = {
- PLL(pll_142xx, 0, "fout_pll_cam_csi", "fin_pll",
+ PLL(pll_142xx, CAM_CSI_PLL, "fout_pll_cam_csi", "fin_pll",
PLL_LOCKTIME_PLL_CAM_CSI, PLL_CON0_PLL_CAM_CSI, pll_cam_csi_rate_table),
};
@@ -1682,51 +1682,51 @@ static const struct samsung_gate_clock cam_csi_gate_clks[] __initconst = {
GAT_CAM_CSI_BUS_D_CAM_CSI_IPCLKPORT_CLK__SYSTEM__NOC, 21, CLK_IGNORE_UNUSED, 0),
GATE(CAM_CSI0_0_IPCLKPORT_I_ACLK, "cam_csi0_0_ipclkport_i_aclk", "dout_cam_csi0_aclk",
GAT_CAM_CSI0_0_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0),
- GATE(0, "cam_csi0_0_ipclkport_i_pclk", "dout_cam_csi_busp",
+ GATE(CAM_CSI0_0_IPCLKPORT_I_PCLK, "cam_csi0_0_ipclkport_i_pclk", "dout_cam_csi_busp",
GAT_CAM_CSI0_0_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CAM_CSI0_1_IPCLKPORT_I_ACLK, "cam_csi0_1_ipclkport_i_aclk", "dout_cam_csi0_aclk",
GAT_CAM_CSI0_1_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0),
- GATE(0, "cam_csi0_1_ipclkport_i_pclk", "dout_cam_csi_busp",
+ GATE(CAM_CSI0_1_IPCLKPORT_I_PCLK, "cam_csi0_1_ipclkport_i_pclk", "dout_cam_csi_busp",
GAT_CAM_CSI0_1_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CAM_CSI0_2_IPCLKPORT_I_ACLK, "cam_csi0_2_ipclkport_i_aclk", "dout_cam_csi0_aclk",
GAT_CAM_CSI0_2_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0),
- GATE(0, "cam_csi0_2_ipclkport_i_pclk", "dout_cam_csi_busp",
+ GATE(CAM_CSI0_2_IPCLKPORT_I_PCLK, "cam_csi0_2_ipclkport_i_pclk", "dout_cam_csi_busp",
GAT_CAM_CSI0_2_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CAM_CSI0_3_IPCLKPORT_I_ACLK, "cam_csi0_3_ipclkport_i_aclk", "dout_cam_csi0_aclk",
GAT_CAM_CSI0_3_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0),
- GATE(0, "cam_csi0_3_ipclkport_i_pclk", "dout_cam_csi_busp",
+ GATE(CAM_CSI0_3_IPCLKPORT_I_PCLK, "cam_csi0_3_ipclkport_i_pclk", "dout_cam_csi_busp",
GAT_CAM_CSI0_3_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CAM_CSI1_0_IPCLKPORT_I_ACLK, "cam_csi1_0_ipclkport_i_aclk", "dout_cam_csi1_aclk",
GAT_CAM_CSI1_0_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0),
- GATE(0, "cam_csi1_0_ipclkport_i_pclk", "dout_cam_csi_busp",
+ GATE(CAM_CSI1_0_IPCLKPORT_I_PCLK, "cam_csi1_0_ipclkport_i_pclk", "dout_cam_csi_busp",
GAT_CAM_CSI1_0_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CAM_CSI1_1_IPCLKPORT_I_ACLK, "cam_csi1_1_ipclkport_i_aclk", "dout_cam_csi1_aclk",
GAT_CAM_CSI1_1_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0),
- GATE(0, "cam_csi1_1_ipclkport_i_pclk", "dout_cam_csi_busp",
+ GATE(CAM_CSI1_1_IPCLKPORT_I_PCLK, "cam_csi1_1_ipclkport_i_pclk", "dout_cam_csi_busp",
GAT_CAM_CSI1_1_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CAM_CSI1_2_IPCLKPORT_I_ACLK, "cam_csi1_2_ipclkport_i_aclk", "dout_cam_csi1_aclk",
GAT_CAM_CSI1_2_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0),
- GATE(0, "cam_csi1_2_ipclkport_i_pclk", "dout_cam_csi_busp",
+ GATE(CAM_CSI1_2_IPCLKPORT_I_PCLK, "cam_csi1_2_ipclkport_i_pclk", "dout_cam_csi_busp",
GAT_CAM_CSI1_2_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CAM_CSI1_3_IPCLKPORT_I_ACLK, "cam_csi1_3_ipclkport_i_aclk", "dout_cam_csi1_aclk",
GAT_CAM_CSI1_3_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0),
- GATE(0, "cam_csi1_3_ipclkport_i_pclk", "dout_cam_csi_busp",
+ GATE(CAM_CSI1_3_IPCLKPORT_I_PCLK, "cam_csi1_3_ipclkport_i_pclk", "dout_cam_csi_busp",
GAT_CAM_CSI1_3_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CAM_CSI2_0_IPCLKPORT_I_ACLK, "cam_csi2_0_ipclkport_i_aclk", "dout_cam_csi2_aclk",
GAT_CAM_CSI2_0_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0),
- GATE(0, "cam_csi2_0_ipclkport_i_pclk", "dout_cam_csi_busp",
+ GATE(CAM_CSI2_0_IPCLKPORT_I_PCLK, "cam_csi2_0_ipclkport_i_pclk", "dout_cam_csi_busp",
GAT_CAM_CSI2_0_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CAM_CSI2_1_IPCLKPORT_I_ACLK, "cam_csi2_1_ipclkport_i_aclk", "dout_cam_csi2_aclk",
GAT_CAM_CSI2_1_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0),
- GATE(0, "cam_csi2_1_ipclkport_i_pclk", "dout_cam_csi_busp",
+ GATE(CAM_CSI2_1_IPCLKPORT_I_PCLK, "cam_csi2_1_ipclkport_i_pclk", "dout_cam_csi_busp",
GAT_CAM_CSI2_1_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CAM_CSI2_2_IPCLKPORT_I_ACLK, "cam_csi2_2_ipclkport_i_aclk", "dout_cam_csi2_aclk",
GAT_CAM_CSI2_2_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0),
- GATE(0, "cam_csi2_2_ipclkport_i_pclk", "dout_cam_csi_busp",
+ GATE(CAM_CSI2_2_IPCLKPORT_I_PCLK, "cam_csi2_2_ipclkport_i_pclk", "dout_cam_csi_busp",
GAT_CAM_CSI2_2_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CAM_CSI2_3_IPCLKPORT_I_ACLK, "cam_csi2_3_ipclkport_i_aclk", "dout_cam_csi2_aclk",
GAT_CAM_CSI2_3_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0),
- GATE(0, "cam_csi2_3_ipclkport_i_pclk", "dout_cam_csi_busp",
+ GATE(CAM_CSI2_3_IPCLKPORT_I_PCLK, "cam_csi2_3_ipclkport_i_pclk", "dout_cam_csi_busp",
GAT_CAM_CSI2_3_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(0, "cam_ns_brdg_cam_csi_ipclkport_clk__psoc_cam_csi__clk_cam_csi_d",
"dout_cam_csi_busd",
diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c
index e4faf02b631e..7bea7be1d7e4 100644
--- a/drivers/clk/samsung/clk-pll.c
+++ b/drivers/clk/samsung/clk-pll.c
@@ -49,8 +49,8 @@ static const struct samsung_pll_rate_table *samsung_get_pll_settings(
return NULL;
}
-static long samsung_pll_round_rate(struct clk_hw *hw,
- unsigned long drate, unsigned long *prate)
+static int samsung_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct samsung_clk_pll *pll = to_clk_pll(hw);
const struct samsung_pll_rate_table *rate_table = pll->rate_table;
@@ -58,12 +58,17 @@ static long samsung_pll_round_rate(struct clk_hw *hw,
/* Assuming rate_table is in descending order */
for (i = 0; i < pll->rate_count; i++) {
- if (drate >= rate_table[i].rate)
- return rate_table[i].rate;
+ if (req->rate >= rate_table[i].rate) {
+ req->rate = rate_table[i].rate;
+
+ return 0;
+ }
}
/* return minimum supported value */
- return rate_table[i - 1].rate;
+ req->rate = rate_table[i - 1].rate;
+
+ return 0;
}
static bool pll_early_timeout = true;
@@ -273,7 +278,7 @@ static int samsung_pll35xx_set_rate(struct clk_hw *hw, unsigned long drate,
}
/* Set PLL lock time. */
- if (pll->type == pll_142xx)
+ if (pll->type == pll_142xx || pll->type == pll_1017x)
writel_relaxed(rate->pdiv * PLL142XX_LOCK_FACTOR,
pll->lock_reg);
else
@@ -298,7 +303,7 @@ static int samsung_pll35xx_set_rate(struct clk_hw *hw, unsigned long drate,
static const struct clk_ops samsung_pll35xx_clk_ops = {
.recalc_rate = samsung_pll35xx_recalc_rate,
- .round_rate = samsung_pll_round_rate,
+ .determine_rate = samsung_pll_determine_rate,
.set_rate = samsung_pll35xx_set_rate,
.enable = samsung_pll3xxx_enable,
.disable = samsung_pll3xxx_disable,
@@ -411,7 +416,7 @@ static int samsung_pll36xx_set_rate(struct clk_hw *hw, unsigned long drate,
static const struct clk_ops samsung_pll36xx_clk_ops = {
.recalc_rate = samsung_pll36xx_recalc_rate,
.set_rate = samsung_pll36xx_set_rate,
- .round_rate = samsung_pll_round_rate,
+ .determine_rate = samsung_pll_determine_rate,
.enable = samsung_pll3xxx_enable,
.disable = samsung_pll3xxx_disable,
};
@@ -514,7 +519,7 @@ static int samsung_pll0822x_set_rate(struct clk_hw *hw, unsigned long drate,
static const struct clk_ops samsung_pll0822x_clk_ops = {
.recalc_rate = samsung_pll0822x_recalc_rate,
- .round_rate = samsung_pll_round_rate,
+ .determine_rate = samsung_pll_determine_rate,
.set_rate = samsung_pll0822x_set_rate,
.enable = samsung_pll3xxx_enable,
.disable = samsung_pll3xxx_disable,
@@ -612,7 +617,7 @@ static int samsung_pll0831x_set_rate(struct clk_hw *hw, unsigned long drate,
static const struct clk_ops samsung_pll0831x_clk_ops = {
.recalc_rate = samsung_pll0831x_recalc_rate,
.set_rate = samsung_pll0831x_set_rate,
- .round_rate = samsung_pll_round_rate,
+ .determine_rate = samsung_pll_determine_rate,
.enable = samsung_pll3xxx_enable,
.disable = samsung_pll3xxx_disable,
};
@@ -735,7 +740,7 @@ static int samsung_pll45xx_set_rate(struct clk_hw *hw, unsigned long drate,
static const struct clk_ops samsung_pll45xx_clk_ops = {
.recalc_rate = samsung_pll45xx_recalc_rate,
- .round_rate = samsung_pll_round_rate,
+ .determine_rate = samsung_pll_determine_rate,
.set_rate = samsung_pll45xx_set_rate,
};
@@ -880,7 +885,7 @@ static int samsung_pll46xx_set_rate(struct clk_hw *hw, unsigned long drate,
static const struct clk_ops samsung_pll46xx_clk_ops = {
.recalc_rate = samsung_pll46xx_recalc_rate,
- .round_rate = samsung_pll_round_rate,
+ .determine_rate = samsung_pll_determine_rate,
.set_rate = samsung_pll46xx_set_rate,
};
@@ -1093,7 +1098,7 @@ static int samsung_pll2550xx_set_rate(struct clk_hw *hw, unsigned long drate,
static const struct clk_ops samsung_pll2550xx_clk_ops = {
.recalc_rate = samsung_pll2550xx_recalc_rate,
- .round_rate = samsung_pll_round_rate,
+ .determine_rate = samsung_pll_determine_rate,
.set_rate = samsung_pll2550xx_set_rate,
};
@@ -1185,7 +1190,7 @@ static int samsung_pll2650x_set_rate(struct clk_hw *hw, unsigned long drate,
static const struct clk_ops samsung_pll2650x_clk_ops = {
.recalc_rate = samsung_pll2650x_recalc_rate,
- .round_rate = samsung_pll_round_rate,
+ .determine_rate = samsung_pll_determine_rate,
.set_rate = samsung_pll2650x_set_rate,
};
@@ -1277,7 +1282,7 @@ static int samsung_pll2650xx_set_rate(struct clk_hw *hw, unsigned long drate,
static const struct clk_ops samsung_pll2650xx_clk_ops = {
.recalc_rate = samsung_pll2650xx_recalc_rate,
.set_rate = samsung_pll2650xx_set_rate,
- .round_rate = samsung_pll_round_rate,
+ .determine_rate = samsung_pll_determine_rate,
};
static const struct clk_ops samsung_pll2650xx_clk_min_ops = {
@@ -1325,6 +1330,125 @@ static const struct clk_ops samsung_pll531x_clk_ops = {
.recalc_rate = samsung_pll531x_recalc_rate,
};
+/*
+ * PLL1031x Clock Type
+ */
+#define PLL1031X_LOCK_FACTOR (500)
+
+#define PLL1031X_MDIV_MASK (0x3ff)
+#define PLL1031X_PDIV_MASK (0x3f)
+#define PLL1031X_SDIV_MASK (0x7)
+#define PLL1031X_MDIV_SHIFT (16)
+#define PLL1031X_PDIV_SHIFT (8)
+#define PLL1031X_SDIV_SHIFT (0)
+
+#define PLL1031X_KDIV_MASK (0xffff)
+#define PLL1031X_KDIV_SHIFT (0)
+#define PLL1031X_MFR_MASK (0x3f)
+#define PLL1031X_MRR_MASK (0x1f)
+#define PLL1031X_MFR_SHIFT (16)
+#define PLL1031X_MRR_SHIFT (24)
+
+static unsigned long samsung_pll1031x_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct samsung_clk_pll *pll = to_clk_pll(hw);
+ u32 mdiv, pdiv, sdiv, kdiv, pll_con0, pll_con3;
+ u64 fvco = parent_rate;
+
+ pll_con0 = readl_relaxed(pll->con_reg);
+ pll_con3 = readl_relaxed(pll->con_reg + 0xc);
+ mdiv = (pll_con0 >> PLL1031X_MDIV_SHIFT) & PLL1031X_MDIV_MASK;
+ pdiv = (pll_con0 >> PLL1031X_PDIV_SHIFT) & PLL1031X_PDIV_MASK;
+ sdiv = (pll_con0 >> PLL1031X_SDIV_SHIFT) & PLL1031X_SDIV_MASK;
+ kdiv = (pll_con3 & PLL1031X_KDIV_MASK);
+
+ fvco *= (mdiv << PLL1031X_MDIV_SHIFT) + kdiv;
+ do_div(fvco, (pdiv << sdiv));
+ fvco >>= PLL1031X_MDIV_SHIFT;
+
+ return (unsigned long)fvco;
+}
+
+static bool samsung_pll1031x_mpk_change(u32 pll_con0, u32 pll_con3,
+ const struct samsung_pll_rate_table *rate)
+{
+ u32 old_mdiv, old_pdiv, old_kdiv;
+
+ old_mdiv = (pll_con0 >> PLL1031X_MDIV_SHIFT) & PLL1031X_MDIV_MASK;
+ old_pdiv = (pll_con0 >> PLL1031X_PDIV_SHIFT) & PLL1031X_PDIV_MASK;
+ old_kdiv = (pll_con3 >> PLL1031X_KDIV_SHIFT) & PLL1031X_KDIV_MASK;
+
+ return (old_mdiv != rate->mdiv || old_pdiv != rate->pdiv ||
+ old_kdiv != rate->kdiv);
+}
+
+static int samsung_pll1031x_set_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long prate)
+{
+ struct samsung_clk_pll *pll = to_clk_pll(hw);
+ const struct samsung_pll_rate_table *rate;
+ u32 con0, con3;
+
+ /* Get required rate settings from table */
+ rate = samsung_get_pll_settings(pll, drate);
+ if (!rate) {
+ pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
+ drate, clk_hw_get_name(hw));
+ return -EINVAL;
+ }
+
+ con0 = readl_relaxed(pll->con_reg);
+ con3 = readl_relaxed(pll->con_reg + 0xc);
+
+ if (!(samsung_pll1031x_mpk_change(con0, con3, rate))) {
+ /* If only s change, change just s value only */
+ con0 &= ~(PLL1031X_SDIV_MASK << PLL1031X_SDIV_SHIFT);
+ con0 |= rate->sdiv << PLL1031X_SDIV_SHIFT;
+ writel_relaxed(con0, pll->con_reg);
+
+ return 0;
+ }
+
+ /* Set PLL lock time. */
+ writel_relaxed(rate->pdiv * PLL1031X_LOCK_FACTOR, pll->lock_reg);
+
+ /* Set PLL M, P, and S values. */
+ con0 &= ~((PLL1031X_MDIV_MASK << PLL1031X_MDIV_SHIFT) |
+ (PLL1031X_PDIV_MASK << PLL1031X_PDIV_SHIFT) |
+ (PLL1031X_SDIV_MASK << PLL1031X_SDIV_SHIFT));
+
+ con0 |= (rate->mdiv << PLL1031X_MDIV_SHIFT) |
+ (rate->pdiv << PLL1031X_PDIV_SHIFT) |
+ (rate->sdiv << PLL1031X_SDIV_SHIFT);
+
+ /* Set PLL K, MFR and MRR values. */
+ con3 = readl_relaxed(pll->con_reg + 0xc);
+ con3 &= ~((PLL1031X_KDIV_MASK << PLL1031X_KDIV_SHIFT) |
+ (PLL1031X_MFR_MASK << PLL1031X_MFR_SHIFT) |
+ (PLL1031X_MRR_MASK << PLL1031X_MRR_SHIFT));
+ con3 |= (rate->kdiv << PLL1031X_KDIV_SHIFT) |
+ (rate->mfr << PLL1031X_MFR_SHIFT) |
+ (rate->mrr << PLL1031X_MRR_SHIFT);
+
+ /* Write configuration to PLL */
+ writel_relaxed(con0, pll->con_reg);
+ writel_relaxed(con3, pll->con_reg + 0xc);
+
+ /* Wait for PLL lock if the PLL is enabled */
+ return samsung_pll_lock_wait(pll, BIT(pll->lock_offs));
+}
+
+static const struct clk_ops samsung_pll1031x_clk_ops = {
+ .recalc_rate = samsung_pll1031x_recalc_rate,
+ .determine_rate = samsung_pll_determine_rate,
+ .set_rate = samsung_pll1031x_set_rate,
+};
+
+static const struct clk_ops samsung_pll1031x_clk_min_ops = {
+ .recalc_rate = samsung_pll1031x_recalc_rate,
+};
+
static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx,
const struct samsung_pll_clock *pll_clk)
{
@@ -1373,6 +1497,7 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx,
case pll_1451x:
case pll_1452x:
case pll_142xx:
+ case pll_1017x:
pll->enable_offs = PLL35XX_ENABLE_SHIFT;
pll->lock_offs = PLL35XX_LOCK_STAT_SHIFT;
if (!pll->rate_table)
@@ -1468,6 +1593,12 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx,
case pll_4311:
init.ops = &samsung_pll531x_clk_ops;
break;
+ case pll_1031x:
+ if (!pll->rate_table)
+ init.ops = &samsung_pll1031x_clk_min_ops;
+ else
+ init.ops = &samsung_pll1031x_clk_ops;
+ break;
default:
pr_warn("%s: Unknown pll type for pll clk %s\n",
__func__, pll_clk->name);
diff --git a/drivers/clk/samsung/clk-pll.h b/drivers/clk/samsung/clk-pll.h
index e9a5f8e0e0a3..6c8bb7f26da5 100644
--- a/drivers/clk/samsung/clk-pll.h
+++ b/drivers/clk/samsung/clk-pll.h
@@ -49,6 +49,8 @@ enum samsung_pll_type {
pll_0718x,
pll_0732x,
pll_4311,
+ pll_1017x,
+ pll_1031x,
};
#define PLL_RATE(_fin, _m, _p, _s, _k, _ks) \
diff --git a/drivers/clk/sifive/fu540-prci.h b/drivers/clk/sifive/fu540-prci.h
index e0173324f3c5..d45193c210b4 100644
--- a/drivers/clk/sifive/fu540-prci.h
+++ b/drivers/clk/sifive/fu540-prci.h
@@ -49,7 +49,7 @@ static struct __prci_wrpll_data sifive_fu540_prci_gemgxlpll_data = {
static const struct clk_ops sifive_fu540_prci_wrpll_clk_ops = {
.set_rate = sifive_prci_wrpll_set_rate,
- .round_rate = sifive_prci_wrpll_round_rate,
+ .determine_rate = sifive_prci_wrpll_determine_rate,
.recalc_rate = sifive_prci_wrpll_recalc_rate,
.enable = sifive_prci_clock_enable,
.disable = sifive_prci_clock_disable,
diff --git a/drivers/clk/sifive/fu740-prci.h b/drivers/clk/sifive/fu740-prci.h
index f31cd30fc395..c605a899d97d 100644
--- a/drivers/clk/sifive/fu740-prci.h
+++ b/drivers/clk/sifive/fu740-prci.h
@@ -55,7 +55,7 @@ static struct __prci_wrpll_data sifive_fu740_prci_cltxpll_data = {
static const struct clk_ops sifive_fu740_prci_wrpll_clk_ops = {
.set_rate = sifive_prci_wrpll_set_rate,
- .round_rate = sifive_prci_wrpll_round_rate,
+ .determine_rate = sifive_prci_wrpll_determine_rate,
.recalc_rate = sifive_prci_wrpll_recalc_rate,
.enable = sifive_prci_clock_enable,
.disable = sifive_prci_clock_disable,
diff --git a/drivers/clk/sifive/sifive-prci.c b/drivers/clk/sifive/sifive-prci.c
index caba0400f8a2..4d1cc7adb2b3 100644
--- a/drivers/clk/sifive/sifive-prci.c
+++ b/drivers/clk/sifive/sifive-prci.c
@@ -183,9 +183,8 @@ unsigned long sifive_prci_wrpll_recalc_rate(struct clk_hw *hw,
return wrpll_calc_output_rate(&pwd->c, parent_rate);
}
-long sifive_prci_wrpll_round_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long *parent_rate)
+int sifive_prci_wrpll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
struct __prci_wrpll_data *pwd = pc->pwd;
@@ -193,9 +192,11 @@ long sifive_prci_wrpll_round_rate(struct clk_hw *hw,
memcpy(&c, &pwd->c, sizeof(c));
- wrpll_configure_for_rate(&c, rate, *parent_rate);
+ wrpll_configure_for_rate(&c, req->rate, req->best_parent_rate);
- return wrpll_calc_output_rate(&c, *parent_rate);
+ req->rate = wrpll_calc_output_rate(&c, req->best_parent_rate);
+
+ return 0;
}
int sifive_prci_wrpll_set_rate(struct clk_hw *hw,
diff --git a/drivers/clk/sifive/sifive-prci.h b/drivers/clk/sifive/sifive-prci.h
index 91658a88af4e..d74b2bddd08a 100644
--- a/drivers/clk/sifive/sifive-prci.h
+++ b/drivers/clk/sifive/sifive-prci.h
@@ -291,8 +291,8 @@ void sifive_prci_hfpclkpllsel_use_hfclk(struct __prci_data *pd);
void sifive_prci_hfpclkpllsel_use_hfpclkpll(struct __prci_data *pd);
/* Linux clock framework integration */
-long sifive_prci_wrpll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate);
+int sifive_prci_wrpll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req);
int sifive_prci_wrpll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate);
int sifive_clk_is_enabled(struct clk_hw *hw);
diff --git a/drivers/clk/sophgo/clk-cv18xx-ip.c b/drivers/clk/sophgo/clk-cv18xx-ip.c
index b186e64d4813..c2b58faf0938 100644
--- a/drivers/clk/sophgo/clk-cv18xx-ip.c
+++ b/drivers/clk/sophgo/clk-cv18xx-ip.c
@@ -45,10 +45,12 @@ static unsigned long gate_recalc_rate(struct clk_hw *hw,
return parent_rate;
}
-static long gate_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int gate_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- return *parent_rate;
+ req->rate = req->best_parent_rate;
+
+ return 0;
}
static int gate_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -63,7 +65,7 @@ const struct clk_ops cv1800_clk_gate_ops = {
.is_enabled = gate_is_enabled,
.recalc_rate = gate_recalc_rate,
- .round_rate = gate_round_rate,
+ .determine_rate = gate_determine_rate,
.set_rate = gate_set_rate,
};
diff --git a/drivers/clk/sophgo/clk-sg2042-clkgen.c b/drivers/clk/sophgo/clk-sg2042-clkgen.c
index 9e61288d34f3..683661b71787 100644
--- a/drivers/clk/sophgo/clk-sg2042-clkgen.c
+++ b/drivers/clk/sophgo/clk-sg2042-clkgen.c
@@ -176,9 +176,8 @@ static unsigned long sg2042_clk_divider_recalc_rate(struct clk_hw *hw,
return ret_rate;
}
-static long sg2042_clk_divider_round_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long *prate)
+static int sg2042_clk_divider_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct sg2042_divider_clock *divider = to_sg2042_clk_divider(hw);
unsigned long ret_rate;
@@ -192,15 +191,17 @@ static long sg2042_clk_divider_round_rate(struct clk_hw *hw,
bestdiv = readl(divider->reg) >> divider->shift;
bestdiv &= clk_div_mask(divider->width);
}
- ret_rate = DIV_ROUND_UP_ULL((u64)*prate, bestdiv);
+ ret_rate = DIV_ROUND_UP_ULL((u64)req->best_parent_rate, bestdiv);
} else {
- ret_rate = divider_round_rate(hw, rate, prate, NULL,
+ ret_rate = divider_round_rate(hw, req->rate, &req->best_parent_rate, NULL,
divider->width, divider->div_flags);
}
pr_debug("--> %s: divider_round_rate: val = %ld\n",
clk_hw_get_name(hw), ret_rate);
- return ret_rate;
+ req->rate = ret_rate;
+
+ return 0;
}
static int sg2042_clk_divider_set_rate(struct clk_hw *hw,
@@ -258,13 +259,13 @@ static int sg2042_clk_divider_set_rate(struct clk_hw *hw,
static const struct clk_ops sg2042_clk_divider_ops = {
.recalc_rate = sg2042_clk_divider_recalc_rate,
- .round_rate = sg2042_clk_divider_round_rate,
+ .determine_rate = sg2042_clk_divider_determine_rate,
.set_rate = sg2042_clk_divider_set_rate,
};
static const struct clk_ops sg2042_clk_divider_ro_ops = {
.recalc_rate = sg2042_clk_divider_recalc_rate,
- .round_rate = sg2042_clk_divider_round_rate,
+ .determine_rate = sg2042_clk_divider_determine_rate,
};
/*
diff --git a/drivers/clk/sophgo/clk-sg2042-pll.c b/drivers/clk/sophgo/clk-sg2042-pll.c
index e5fb0bb7ac4f..110b6ee06fe4 100644
--- a/drivers/clk/sophgo/clk-sg2042-pll.c
+++ b/drivers/clk/sophgo/clk-sg2042-pll.c
@@ -346,37 +346,30 @@ static unsigned long sg2042_clk_pll_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long sg2042_clk_pll_round_rate(struct clk_hw *hw,
- unsigned long req_rate,
- unsigned long *prate)
+static int sg2042_clk_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct sg2042_pll_ctrl pctrl_table;
unsigned int value;
long proper_rate;
int ret;
- ret = sg2042_get_pll_ctl_setting(&pctrl_table, req_rate, *prate);
+ ret = sg2042_get_pll_ctl_setting(&pctrl_table,
+ min(req->rate, req->max_rate),
+ req->best_parent_rate);
if (ret) {
proper_rate = 0;
goto out;
}
value = sg2042_pll_ctrl_encode(&pctrl_table);
- proper_rate = (long)sg2042_pll_recalc_rate(value, *prate);
+ proper_rate = (long)sg2042_pll_recalc_rate(value, req->best_parent_rate);
out:
- pr_debug("--> %s: pll_round_rate: val = %ld\n",
+ pr_debug("--> %s: pll_determine_rate: val = %ld\n",
clk_hw_get_name(hw), proper_rate);
- return proper_rate;
-}
+ req->rate = proper_rate;
-static int sg2042_clk_pll_determine_rate(struct clk_hw *hw,
- struct clk_rate_request *req)
-{
- req->rate = sg2042_clk_pll_round_rate(hw, min(req->rate, req->max_rate),
- &req->best_parent_rate);
- pr_debug("--> %s: pll_determine_rate: val = %ld\n",
- clk_hw_get_name(hw), req->rate);
return 0;
}
@@ -417,14 +410,13 @@ out:
static const struct clk_ops sg2042_clk_pll_ops = {
.recalc_rate = sg2042_clk_pll_recalc_rate,
- .round_rate = sg2042_clk_pll_round_rate,
.determine_rate = sg2042_clk_pll_determine_rate,
.set_rate = sg2042_clk_pll_set_rate,
};
static const struct clk_ops sg2042_clk_pll_ro_ops = {
.recalc_rate = sg2042_clk_pll_recalc_rate,
- .round_rate = sg2042_clk_pll_round_rate,
+ .determine_rate = sg2042_clk_pll_determine_rate,
};
/*
diff --git a/drivers/clk/spacemit/ccu-k1.c b/drivers/clk/spacemit/ccu-k1.c
index 65e6de030717..f5a9fe6ba185 100644
--- a/drivers/clk/spacemit/ccu-k1.c
+++ b/drivers/clk/spacemit/ccu-k1.c
@@ -136,13 +136,33 @@ CCU_GATE_DEFINE(pll1_d3_819p2, CCU_PARENT_HW(pll1_d3), MPMU_ACGR, BIT(14), 0);
CCU_GATE_DEFINE(pll1_d2_1228p8, CCU_PARENT_HW(pll1_d2), MPMU_ACGR, BIT(16), 0);
CCU_GATE_DEFINE(slow_uart, CCU_PARENT_NAME(osc), MPMU_ACGR, BIT(1), CLK_IGNORE_UNUSED);
-CCU_DDN_DEFINE(slow_uart1_14p74, pll1_d16_153p6, MPMU_SUCCR, 16, 13, 0, 13, 0);
-CCU_DDN_DEFINE(slow_uart2_48, pll1_d4_614p4, MPMU_SUCCR_1, 16, 13, 0, 13, 0);
+CCU_DDN_DEFINE(slow_uart1_14p74, pll1_d16_153p6, MPMU_SUCCR, 16, 13, 0, 13, 2, 0);
+CCU_DDN_DEFINE(slow_uart2_48, pll1_d4_614p4, MPMU_SUCCR_1, 16, 13, 0, 13, 2, 0);
CCU_GATE_DEFINE(wdt_clk, CCU_PARENT_HW(pll1_d96_25p6), MPMU_WDTPCR, BIT(1), 0);
-CCU_FACTOR_GATE_DEFINE(i2s_sysclk, CCU_PARENT_HW(pll1_d16_153p6), MPMU_ISCCR, BIT(31), 50, 1);
-CCU_FACTOR_GATE_DEFINE(i2s_bclk, CCU_PARENT_HW(i2s_sysclk), MPMU_ISCCR, BIT(29), 1, 1);
+CCU_FACTOR_DEFINE(i2s_153p6, CCU_PARENT_HW(pll1_d8_307p2), 2, 1);
+
+static const struct clk_parent_data i2s_153p6_base_parents[] = {
+ CCU_PARENT_HW(i2s_153p6),
+ CCU_PARENT_HW(pll1_d8_307p2),
+};
+CCU_MUX_DEFINE(i2s_153p6_base, i2s_153p6_base_parents, MPMU_FCCR, 29, 1, 0);
+
+static const struct clk_parent_data i2s_sysclk_src_parents[] = {
+ CCU_PARENT_HW(pll1_d96_25p6),
+ CCU_PARENT_HW(i2s_153p6_base)
+};
+CCU_MUX_GATE_DEFINE(i2s_sysclk_src, i2s_sysclk_src_parents, MPMU_ISCCR, 30, 1, BIT(31), 0);
+
+CCU_DDN_DEFINE(i2s_sysclk, i2s_sysclk_src, MPMU_ISCCR, 0, 15, 15, 12, 1, 0);
+
+CCU_FACTOR_DEFINE(i2s_bclk_factor, CCU_PARENT_HW(i2s_sysclk), 2, 1);
+/*
+ * Divider of i2s_bclk always implies a 1/2 factor, which is
+ * described by i2s_bclk_factor.
+ */
+CCU_DIV_GATE_DEFINE(i2s_bclk, CCU_PARENT_HW(i2s_bclk_factor), MPMU_ISCCR, 27, 2, BIT(29), 0);
static const struct clk_parent_data apb_parents[] = {
CCU_PARENT_HW(pll1_d96_25p6),
@@ -247,7 +267,14 @@ CCU_GATE_DEFINE(aib_clk, CCU_PARENT_NAME(vctcxo_24m), APBC_AIB_CLK_RST, BIT(1),
CCU_GATE_DEFINE(onewire_clk, CCU_PARENT_NAME(vctcxo_24m), APBC_ONEWIRE_CLK_RST, BIT(1), 0);
-static const struct clk_parent_data sspa_parents[] = {
+/*
+ * When i2s_bclk is selected as the parent clock of sspa,
+ * the hardware requires bit3 to be set
+ */
+CCU_GATE_DEFINE(sspa0_i2s_bclk, CCU_PARENT_HW(i2s_bclk), APBC_SSPA0_CLK_RST, BIT(3), 0);
+CCU_GATE_DEFINE(sspa1_i2s_bclk, CCU_PARENT_HW(i2s_bclk), APBC_SSPA1_CLK_RST, BIT(3), 0);
+
+static const struct clk_parent_data sspa0_parents[] = {
CCU_PARENT_HW(pll1_d384_6p4),
CCU_PARENT_HW(pll1_d192_12p8),
CCU_PARENT_HW(pll1_d96_25p6),
@@ -255,10 +282,22 @@ static const struct clk_parent_data sspa_parents[] = {
CCU_PARENT_HW(pll1_d768_3p2),
CCU_PARENT_HW(pll1_d1536_1p6),
CCU_PARENT_HW(pll1_d3072_0p8),
- CCU_PARENT_HW(i2s_bclk),
+ CCU_PARENT_HW(sspa0_i2s_bclk),
};
-CCU_MUX_GATE_DEFINE(sspa0_clk, sspa_parents, APBC_SSPA0_CLK_RST, 4, 3, BIT(1), 0);
-CCU_MUX_GATE_DEFINE(sspa1_clk, sspa_parents, APBC_SSPA1_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(sspa0_clk, sspa0_parents, APBC_SSPA0_CLK_RST, 4, 3, BIT(1), 0);
+
+static const struct clk_parent_data sspa1_parents[] = {
+ CCU_PARENT_HW(pll1_d384_6p4),
+ CCU_PARENT_HW(pll1_d192_12p8),
+ CCU_PARENT_HW(pll1_d96_25p6),
+ CCU_PARENT_HW(pll1_d48_51p2),
+ CCU_PARENT_HW(pll1_d768_3p2),
+ CCU_PARENT_HW(pll1_d1536_1p6),
+ CCU_PARENT_HW(pll1_d3072_0p8),
+ CCU_PARENT_HW(sspa1_i2s_bclk),
+};
+CCU_MUX_GATE_DEFINE(sspa1_clk, sspa1_parents, APBC_SSPA1_CLK_RST, 4, 3, BIT(1), 0);
+
CCU_GATE_DEFINE(dro_clk, CCU_PARENT_HW(apb_clk), APBC_DRO_CLK_RST, BIT(1), 0);
CCU_GATE_DEFINE(ir_clk, CCU_PARENT_HW(apb_clk), APBC_IR_CLK_RST, BIT(1), 0);
CCU_GATE_DEFINE(tsen_clk, CCU_PARENT_HW(apb_clk), APBC_TSEN_CLK_RST, BIT(1), 0);
@@ -756,6 +795,10 @@ static struct clk_hw *k1_ccu_mpmu_hws[] = {
[CLK_I2S_BCLK] = &i2s_bclk.common.hw,
[CLK_APB] = &apb_clk.common.hw,
[CLK_WDT_BUS] = &wdt_bus_clk.common.hw,
+ [CLK_I2S_153P6] = &i2s_153p6.common.hw,
+ [CLK_I2S_153P6_BASE] = &i2s_153p6_base.common.hw,
+ [CLK_I2S_SYSCLK_SRC] = &i2s_sysclk_src.common.hw,
+ [CLK_I2S_BCLK_FACTOR] = &i2s_bclk_factor.common.hw,
};
static const struct spacemit_ccu_data k1_ccu_mpmu_data = {
@@ -865,6 +908,8 @@ static struct clk_hw *k1_ccu_apbc_hws[] = {
[CLK_SSPA1_BUS] = &sspa1_bus_clk.common.hw,
[CLK_TSEN_BUS] = &tsen_bus_clk.common.hw,
[CLK_IPC_AP2AUD_BUS] = &ipc_ap2aud_bus_clk.common.hw,
+ [CLK_SSPA0_I2S_BCLK] = &sspa0_i2s_bclk.common.hw,
+ [CLK_SSPA1_I2S_BCLK] = &sspa1_i2s_bclk.common.hw,
};
static const struct spacemit_ccu_data k1_ccu_apbc_data = {
diff --git a/drivers/clk/spacemit/ccu_ddn.c b/drivers/clk/spacemit/ccu_ddn.c
index be311b045698..5b16e273bee5 100644
--- a/drivers/clk/spacemit/ccu_ddn.c
+++ b/drivers/clk/spacemit/ccu_ddn.c
@@ -22,30 +22,33 @@
#include "ccu_ddn.h"
-static unsigned long ccu_ddn_calc_rate(unsigned long prate,
- unsigned long num, unsigned long den)
+static unsigned long ccu_ddn_calc_rate(unsigned long prate, unsigned long num,
+ unsigned long den, unsigned int pre_div)
{
- return prate * den / 2 / num;
+ return prate * den / pre_div / num;
}
static unsigned long ccu_ddn_calc_best_rate(struct ccu_ddn *ddn,
unsigned long rate, unsigned long prate,
unsigned long *num, unsigned long *den)
{
- rational_best_approximation(rate, prate / 2,
+ rational_best_approximation(rate, prate / ddn->pre_div,
ddn->den_mask >> ddn->den_shift,
ddn->num_mask >> ddn->num_shift,
den, num);
- return ccu_ddn_calc_rate(prate, *num, *den);
+ return ccu_ddn_calc_rate(prate, *num, *den, ddn->pre_div);
}
-static long ccu_ddn_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int ccu_ddn_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct ccu_ddn *ddn = hw_to_ccu_ddn(hw);
unsigned long num, den;
- return ccu_ddn_calc_best_rate(ddn, rate, *prate, &num, &den);
+ req->rate = ccu_ddn_calc_best_rate(ddn, req->rate,
+ req->best_parent_rate, &num, &den);
+
+ return 0;
}
static unsigned long ccu_ddn_recalc_rate(struct clk_hw *hw, unsigned long prate)
@@ -58,7 +61,7 @@ static unsigned long ccu_ddn_recalc_rate(struct clk_hw *hw, unsigned long prate)
num = (val & ddn->num_mask) >> ddn->num_shift;
den = (val & ddn->den_mask) >> ddn->den_shift;
- return ccu_ddn_calc_rate(prate, num, den);
+ return ccu_ddn_calc_rate(prate, num, den, ddn->pre_div);
}
static int ccu_ddn_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -78,6 +81,6 @@ static int ccu_ddn_set_rate(struct clk_hw *hw, unsigned long rate,
const struct clk_ops spacemit_ccu_ddn_ops = {
.recalc_rate = ccu_ddn_recalc_rate,
- .round_rate = ccu_ddn_round_rate,
+ .determine_rate = ccu_ddn_determine_rate,
.set_rate = ccu_ddn_set_rate,
};
diff --git a/drivers/clk/spacemit/ccu_ddn.h b/drivers/clk/spacemit/ccu_ddn.h
index a52fabe77d62..4838414a8e8d 100644
--- a/drivers/clk/spacemit/ccu_ddn.h
+++ b/drivers/clk/spacemit/ccu_ddn.h
@@ -18,13 +18,14 @@ struct ccu_ddn {
unsigned int num_shift;
unsigned int den_mask;
unsigned int den_shift;
+ unsigned int pre_div;
};
#define CCU_DDN_INIT(_name, _parent, _flags) \
CLK_HW_INIT_HW(#_name, &_parent.common.hw, &spacemit_ccu_ddn_ops, _flags)
#define CCU_DDN_DEFINE(_name, _parent, _reg_ctrl, _num_shift, _num_width, \
- _den_shift, _den_width, _flags) \
+ _den_shift, _den_width, _pre_div, _flags) \
static struct ccu_ddn _name = { \
.common = { \
.reg_ctrl = _reg_ctrl, \
@@ -33,7 +34,8 @@ static struct ccu_ddn _name = { \
.num_mask = GENMASK(_num_shift + _num_width - 1, _num_shift), \
.num_shift = _num_shift, \
.den_mask = GENMASK(_den_shift + _den_width - 1, _den_shift), \
- .den_shift = _den_shift, \
+ .den_shift = _den_shift, \
+ .pre_div = _pre_div, \
}
static inline struct ccu_ddn *hw_to_ccu_ddn(struct clk_hw *hw)
diff --git a/drivers/clk/spacemit/ccu_mix.c b/drivers/clk/spacemit/ccu_mix.c
index 9b852aa61f78..7b7990875372 100644
--- a/drivers/clk/spacemit/ccu_mix.c
+++ b/drivers/clk/spacemit/ccu_mix.c
@@ -80,10 +80,12 @@ static int ccu_mix_trigger_fc(struct clk_hw *hw)
MIX_FC_TIMEOUT_US);
}
-static long ccu_factor_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int ccu_factor_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- return ccu_factor_recalc_rate(hw, *prate);
+ req->rate = ccu_factor_recalc_rate(hw, req->best_parent_rate);
+
+ return 0;
}
static int ccu_factor_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -198,7 +200,7 @@ const struct clk_ops spacemit_ccu_gate_ops = {
};
const struct clk_ops spacemit_ccu_factor_ops = {
- .round_rate = ccu_factor_round_rate,
+ .determine_rate = ccu_factor_determine_rate,
.recalc_rate = ccu_factor_recalc_rate,
.set_rate = ccu_factor_set_rate,
};
@@ -220,7 +222,7 @@ const struct clk_ops spacemit_ccu_factor_gate_ops = {
.enable = ccu_gate_enable,
.is_enabled = ccu_gate_is_enabled,
- .round_rate = ccu_factor_round_rate,
+ .determine_rate = ccu_factor_determine_rate,
.recalc_rate = ccu_factor_recalc_rate,
.set_rate = ccu_factor_set_rate,
};
diff --git a/drivers/clk/spacemit/ccu_pll.c b/drivers/clk/spacemit/ccu_pll.c
index 45f540073a65..d92f0dae65a4 100644
--- a/drivers/clk/spacemit/ccu_pll.c
+++ b/drivers/clk/spacemit/ccu_pll.c
@@ -125,12 +125,14 @@ static unsigned long ccu_pll_recalc_rate(struct clk_hw *hw,
return entry ? entry->rate : 0;
}
-static long ccu_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int ccu_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct ccu_pll *pll = hw_to_ccu_pll(hw);
- return ccu_pll_lookup_best_rate(pll, rate)->rate;
+ req->rate = ccu_pll_lookup_best_rate(pll, req->rate)->rate;
+
+ return 0;
}
static int ccu_pll_init(struct clk_hw *hw)
@@ -152,6 +154,6 @@ const struct clk_ops spacemit_ccu_pll_ops = {
.disable = ccu_pll_disable,
.set_rate = ccu_pll_set_rate,
.recalc_rate = ccu_pll_recalc_rate,
- .round_rate = ccu_pll_round_rate,
+ .determine_rate = ccu_pll_determine_rate,
.is_enabled = ccu_pll_is_enabled,
};
diff --git a/drivers/clk/spear/clk-aux-synth.c b/drivers/clk/spear/clk-aux-synth.c
index 637938e804f8..d0d063147af8 100644
--- a/drivers/clk/spear/clk-aux-synth.c
+++ b/drivers/clk/spear/clk-aux-synth.c
@@ -49,14 +49,16 @@ static unsigned long aux_calc_rate(struct clk_hw *hw, unsigned long prate,
(rtbl[index].yscale * eq)) * 10000;
}
-static long clk_aux_round_rate(struct clk_hw *hw, unsigned long drate,
- unsigned long *prate)
+static int clk_aux_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_aux *aux = to_clk_aux(hw);
int unused;
- return clk_round_rate_index(hw, drate, *prate, aux_calc_rate,
- aux->rtbl_cnt, &unused);
+ req->rate = clk_round_rate_index(hw, req->rate, req->best_parent_rate,
+ aux_calc_rate, aux->rtbl_cnt, &unused);
+
+ return 0;
}
static unsigned long clk_aux_recalc_rate(struct clk_hw *hw,
@@ -127,7 +129,7 @@ static int clk_aux_set_rate(struct clk_hw *hw, unsigned long drate,
static const struct clk_ops clk_aux_ops = {
.recalc_rate = clk_aux_recalc_rate,
- .round_rate = clk_aux_round_rate,
+ .determine_rate = clk_aux_determine_rate,
.set_rate = clk_aux_set_rate,
};
diff --git a/drivers/clk/spear/clk-frac-synth.c b/drivers/clk/spear/clk-frac-synth.c
index 2380df293a2c..150f051d28e0 100644
--- a/drivers/clk/spear/clk-frac-synth.c
+++ b/drivers/clk/spear/clk-frac-synth.c
@@ -52,14 +52,16 @@ static unsigned long frac_calc_rate(struct clk_hw *hw, unsigned long prate,
return prate;
}
-static long clk_frac_round_rate(struct clk_hw *hw, unsigned long drate,
- unsigned long *prate)
+static int clk_frac_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_frac *frac = to_clk_frac(hw);
int unused;
- return clk_round_rate_index(hw, drate, *prate, frac_calc_rate,
- frac->rtbl_cnt, &unused);
+ req->rate = clk_round_rate_index(hw, req->rate, req->best_parent_rate,
+ frac_calc_rate, frac->rtbl_cnt, &unused);
+
+ return 0;
}
static unsigned long clk_frac_recalc_rate(struct clk_hw *hw,
@@ -115,7 +117,7 @@ static int clk_frac_set_rate(struct clk_hw *hw, unsigned long drate,
static const struct clk_ops clk_frac_ops = {
.recalc_rate = clk_frac_recalc_rate,
- .round_rate = clk_frac_round_rate,
+ .determine_rate = clk_frac_determine_rate,
.set_rate = clk_frac_set_rate,
};
diff --git a/drivers/clk/spear/clk-gpt-synth.c b/drivers/clk/spear/clk-gpt-synth.c
index 4ef747c2abbb..cf9659dc9073 100644
--- a/drivers/clk/spear/clk-gpt-synth.c
+++ b/drivers/clk/spear/clk-gpt-synth.c
@@ -39,14 +39,16 @@ static unsigned long gpt_calc_rate(struct clk_hw *hw, unsigned long prate,
return prate;
}
-static long clk_gpt_round_rate(struct clk_hw *hw, unsigned long drate,
- unsigned long *prate)
+static int clk_gpt_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_gpt *gpt = to_clk_gpt(hw);
int unused;
- return clk_round_rate_index(hw, drate, *prate, gpt_calc_rate,
- gpt->rtbl_cnt, &unused);
+ req->rate = clk_round_rate_index(hw, req->rate, req->best_parent_rate,
+ gpt_calc_rate, gpt->rtbl_cnt, &unused);
+
+ return 0;
}
static unsigned long clk_gpt_recalc_rate(struct clk_hw *hw,
@@ -104,7 +106,7 @@ static int clk_gpt_set_rate(struct clk_hw *hw, unsigned long drate,
static const struct clk_ops clk_gpt_ops = {
.recalc_rate = clk_gpt_recalc_rate,
- .round_rate = clk_gpt_round_rate,
+ .determine_rate = clk_gpt_determine_rate,
.set_rate = clk_gpt_set_rate,
};
diff --git a/drivers/clk/spear/clk-vco-pll.c b/drivers/clk/spear/clk-vco-pll.c
index 348eeab0a906..723a6eb67754 100644
--- a/drivers/clk/spear/clk-vco-pll.c
+++ b/drivers/clk/spear/clk-vco-pll.c
@@ -110,12 +110,15 @@ static long clk_pll_round_rate_index(struct clk_hw *hw, unsigned long drate,
return rate;
}
-static long clk_pll_round_rate(struct clk_hw *hw, unsigned long drate,
- unsigned long *prate)
+static int clk_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
int unused;
- return clk_pll_round_rate_index(hw, drate, prate, &unused);
+ req->rate = clk_pll_round_rate_index(hw, req->rate,
+ &req->best_parent_rate, &unused);
+
+ return 0;
}
static unsigned long clk_pll_recalc_rate(struct clk_hw *hw, unsigned long
@@ -164,7 +167,7 @@ static int clk_pll_set_rate(struct clk_hw *hw, unsigned long drate,
static const struct clk_ops clk_pll_ops = {
.recalc_rate = clk_pll_recalc_rate,
- .round_rate = clk_pll_round_rate,
+ .determine_rate = clk_pll_determine_rate,
.set_rate = clk_pll_set_rate,
};
@@ -176,14 +179,16 @@ static inline unsigned long vco_calc_rate(struct clk_hw *hw,
return pll_calc_rate(vco->rtbl, prate, index, NULL);
}
-static long clk_vco_round_rate(struct clk_hw *hw, unsigned long drate,
- unsigned long *prate)
+static int clk_vco_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_vco *vco = to_clk_vco(hw);
int unused;
- return clk_round_rate_index(hw, drate, *prate, vco_calc_rate,
- vco->rtbl_cnt, &unused);
+ req->rate = clk_round_rate_index(hw, req->rate, req->best_parent_rate,
+ vco_calc_rate, vco->rtbl_cnt, &unused);
+
+ return 0;
}
static unsigned long clk_vco_recalc_rate(struct clk_hw *hw,
@@ -265,7 +270,7 @@ static int clk_vco_set_rate(struct clk_hw *hw, unsigned long drate,
static const struct clk_ops clk_vco_ops = {
.recalc_rate = clk_vco_recalc_rate,
- .round_rate = clk_vco_round_rate,
+ .determine_rate = clk_vco_determine_rate,
.set_rate = clk_vco_set_rate,
};
diff --git a/drivers/clk/sprd/div.c b/drivers/clk/sprd/div.c
index 936782c24127..013423881968 100644
--- a/drivers/clk/sprd/div.c
+++ b/drivers/clk/sprd/div.c
@@ -9,13 +9,16 @@
#include "div.h"
-static long sprd_div_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int sprd_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct sprd_div *cd = hw_to_sprd_div(hw);
- return divider_round_rate(&cd->common.hw, rate, parent_rate, NULL,
- cd->div.width, 0);
+ req->rate = divider_round_rate(&cd->common.hw, req->rate,
+ &req->best_parent_rate,
+ NULL, cd->div.width, 0);
+
+ return 0;
}
unsigned long sprd_div_helper_recalc_rate(struct sprd_clk_common *common,
@@ -75,7 +78,7 @@ static int sprd_div_set_rate(struct clk_hw *hw, unsigned long rate,
const struct clk_ops sprd_div_ops = {
.recalc_rate = sprd_div_recalc_rate,
- .round_rate = sprd_div_round_rate,
+ .determine_rate = sprd_div_determine_rate,
.set_rate = sprd_div_set_rate,
};
EXPORT_SYMBOL_GPL(sprd_div_ops);
diff --git a/drivers/clk/sprd/pll.c b/drivers/clk/sprd/pll.c
index 13a322b2535a..bc6610d5fcb7 100644
--- a/drivers/clk/sprd/pll.c
+++ b/drivers/clk/sprd/pll.c
@@ -254,16 +254,16 @@ static int sprd_pll_clk_prepare(struct clk_hw *hw)
return 0;
}
-static long sprd_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int sprd_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- return rate;
+ return 0;
}
const struct clk_ops sprd_pll_ops = {
.prepare = sprd_pll_clk_prepare,
.recalc_rate = sprd_pll_recalc_rate,
- .round_rate = sprd_pll_round_rate,
+ .determine_rate = sprd_pll_determine_rate,
.set_rate = sprd_pll_set_rate,
};
EXPORT_SYMBOL_GPL(sprd_pll_ops);
diff --git a/drivers/clk/st/clk-flexgen.c b/drivers/clk/st/clk-flexgen.c
index 5292208c4dd8..e8e7626c76db 100644
--- a/drivers/clk/st/clk-flexgen.c
+++ b/drivers/clk/st/clk-flexgen.c
@@ -303,16 +303,6 @@ static const struct clkgen_data clkgen_video = {
.mode = 1,
};
-static const struct clkgen_clk_out clkgen_stih407_a0_clk_out[] = {
- /* This clk needs to be on so that memory interface is accessible */
- { .name = "clk-ic-lmi0", .flags = CLK_IS_CRITICAL },
-};
-
-static const struct clkgen_data clkgen_stih407_a0 = {
- .outputs = clkgen_stih407_a0_clk_out,
- .outputs_nb = ARRAY_SIZE(clkgen_stih407_a0_clk_out),
-};
-
static const struct clkgen_clk_out clkgen_stih410_a0_clk_out[] = {
/* Those clks need to be on so that memory interface is accessible */
{ .name = "clk-ic-lmi0", .flags = CLK_IS_CRITICAL },
@@ -324,51 +314,6 @@ static const struct clkgen_data clkgen_stih410_a0 = {
.outputs_nb = ARRAY_SIZE(clkgen_stih410_a0_clk_out),
};
-static const struct clkgen_clk_out clkgen_stih407_c0_clk_out[] = {
- { .name = "clk-icn-gpu", },
- { .name = "clk-fdma", },
- { .name = "clk-nand", },
- { .name = "clk-hva", },
- { .name = "clk-proc-stfe", },
- { .name = "clk-proc-tp", },
- { .name = "clk-rx-icn-dmu", },
- { .name = "clk-rx-icn-hva", },
- /* This clk needs to be on to keep bus interconnect alive */
- { .name = "clk-icn-cpu", .flags = CLK_IS_CRITICAL },
- /* This clk needs to be on to keep bus interconnect alive */
- { .name = "clk-tx-icn-dmu", .flags = CLK_IS_CRITICAL },
- { .name = "clk-mmc-0", },
- { .name = "clk-mmc-1", },
- { .name = "clk-jpegdec", },
- /* This clk needs to be on to keep A9 running */
- { .name = "clk-ext2fa9", .flags = CLK_IS_CRITICAL },
- { .name = "clk-ic-bdisp-0", },
- { .name = "clk-ic-bdisp-1", },
- { .name = "clk-pp-dmu", },
- { .name = "clk-vid-dmu", },
- { .name = "clk-dss-lpc", },
- { .name = "clk-st231-aud-0", },
- { .name = "clk-st231-gp-1", },
- { .name = "clk-st231-dmu", },
- /* This clk needs to be on to keep bus interconnect alive */
- { .name = "clk-icn-lmi", .flags = CLK_IS_CRITICAL },
- { .name = "clk-tx-icn-disp-1", },
- /* This clk needs to be on to keep bus interconnect alive */
- { .name = "clk-icn-sbc", .flags = CLK_IS_CRITICAL },
- { .name = "clk-stfe-frc2", },
- { .name = "clk-eth-phy", },
- { .name = "clk-eth-ref-phyclk", },
- { .name = "clk-flash-promip", },
- { .name = "clk-main-disp", },
- { .name = "clk-aux-disp", },
- { .name = "clk-compo-dvp", },
-};
-
-static const struct clkgen_data clkgen_stih407_c0 = {
- .outputs = clkgen_stih407_c0_clk_out,
- .outputs_nb = ARRAY_SIZE(clkgen_stih407_c0_clk_out),
-};
-
static const struct clkgen_clk_out clkgen_stih410_c0_clk_out[] = {
{ .name = "clk-icn-gpu", },
{ .name = "clk-fdma", },
@@ -482,19 +427,6 @@ static const struct clkgen_data clkgen_stih418_c0 = {
.outputs_nb = ARRAY_SIZE(clkgen_stih418_c0_clk_out),
};
-static const struct clkgen_clk_out clkgen_stih407_d0_clk_out[] = {
- { .name = "clk-pcm-0", },
- { .name = "clk-pcm-1", },
- { .name = "clk-pcm-2", },
- { .name = "clk-spdiff", },
-};
-
-static const struct clkgen_data clkgen_stih407_d0 = {
- .flags = CLK_SET_RATE_PARENT,
- .outputs = clkgen_stih407_d0_clk_out,
- .outputs_nb = ARRAY_SIZE(clkgen_stih407_d0_clk_out),
-};
-
static const struct clkgen_clk_out clkgen_stih410_d0_clk_out[] = {
{ .name = "clk-pcm-0", },
{ .name = "clk-pcm-1", },
@@ -597,18 +529,10 @@ static const struct of_device_id flexgen_of_match[] = {
.data = &clkgen_video,
},
{
- .compatible = "st,flexgen-stih407-a0",
- .data = &clkgen_stih407_a0,
- },
- {
.compatible = "st,flexgen-stih410-a0",
.data = &clkgen_stih410_a0,
},
{
- .compatible = "st,flexgen-stih407-c0",
- .data = &clkgen_stih407_c0,
- },
- {
.compatible = "st,flexgen-stih410-c0",
.data = &clkgen_stih410_c0,
},
@@ -617,10 +541,6 @@ static const struct of_device_id flexgen_of_match[] = {
.data = &clkgen_stih418_c0,
},
{
- .compatible = "st,flexgen-stih407-d0",
- .data = &clkgen_stih407_d0,
- },
- {
.compatible = "st,flexgen-stih410-d0",
.data = &clkgen_stih410_d0,
},
diff --git a/drivers/clk/st/clkgen-fsyn.c b/drivers/clk/st/clkgen-fsyn.c
index 40df1db102a7..e06e7e5cc1a5 100644
--- a/drivers/clk/st/clkgen-fsyn.c
+++ b/drivers/clk/st/clkgen-fsyn.c
@@ -375,22 +375,21 @@ static int clk_fs660c32_vco_get_params(unsigned long input,
return 0;
}
-static long quadfs_pll_fs660c32_round_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long *prate)
+static int quadfs_pll_fs660c32_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct stm_fs params;
- if (clk_fs660c32_vco_get_params(*prate, rate, &params))
- return rate;
+ if (clk_fs660c32_vco_get_params(req->best_parent_rate, req->rate, &params))
+ return 0;
- clk_fs660c32_vco_get_rate(*prate, &params, &rate);
+ clk_fs660c32_vco_get_rate(req->best_parent_rate, &params, &req->rate);
pr_debug("%s: %s new rate %ld [ndiv=%u]\n",
__func__, clk_hw_get_name(hw),
- rate, (unsigned int)params.ndiv);
+ req->rate, (unsigned int)params.ndiv);
- return rate;
+ return 0;
}
static int quadfs_pll_fs660c32_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -436,7 +435,7 @@ static const struct clk_ops st_quadfs_pll_c32_ops = {
.disable = quadfs_pll_disable,
.is_enabled = quadfs_pll_is_enabled,
.recalc_rate = quadfs_pll_fs660c32_recalc_rate,
- .round_rate = quadfs_pll_fs660c32_round_rate,
+ .determine_rate = quadfs_pll_fs660c32_determine_rate,
.set_rate = quadfs_pll_fs660c32_set_rate,
};
@@ -814,19 +813,21 @@ static unsigned long quadfs_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long quadfs_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int quadfs_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct stm_fs params;
- rate = quadfs_find_best_rate(hw, rate, *prate, &params);
+ req->rate = quadfs_find_best_rate(hw, req->rate,
+ req->best_parent_rate, &params);
pr_debug("%s: %s new rate %ld [sdiv=0x%x,md=0x%x,pe=0x%x,nsdiv3=%u]\n",
__func__, clk_hw_get_name(hw),
- rate, (unsigned int)params.sdiv, (unsigned int)params.mdiv,
- (unsigned int)params.pe, (unsigned int)params.nsdiv);
+ req->rate, (unsigned int)params.sdiv,
+ (unsigned int)params.mdiv,
+ (unsigned int)params.pe, (unsigned int)params.nsdiv);
- return rate;
+ return 0;
}
@@ -873,7 +874,7 @@ static const struct clk_ops st_quadfs_ops = {
.enable = quadfs_fsynth_enable,
.disable = quadfs_fsynth_disable,
.is_enabled = quadfs_fsynth_is_enabled,
- .round_rate = quadfs_round_rate,
+ .determine_rate = quadfs_determine_rate,
.set_rate = quadfs_set_rate,
.recalc_rate = quadfs_recalc_rate,
};
diff --git a/drivers/clk/st/clkgen-pll.c b/drivers/clk/st/clkgen-pll.c
index b36e4d803636..c258ff87a171 100644
--- a/drivers/clk/st/clkgen-pll.c
+++ b/drivers/clk/st/clkgen-pll.c
@@ -395,25 +395,28 @@ static unsigned long recalc_stm_pll3200c32(struct clk_hw *hw,
return rate;
}
-static long round_rate_stm_pll3200c32(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int stm_pll3200c32_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct stm_pll params;
- if (!clk_pll3200c32_get_params(*prate, rate, &params))
- clk_pll3200c32_get_rate(*prate, &params, &rate);
+ if (!clk_pll3200c32_get_params(req->best_parent_rate, req->rate, &params))
+ clk_pll3200c32_get_rate(req->best_parent_rate, &params,
+ &req->rate);
else {
pr_debug("%s: %s rate %ld Invalid\n", __func__,
- __clk_get_name(hw->clk), rate);
+ __clk_get_name(hw->clk), req->rate);
+ req->rate = 0;
+
return 0;
}
pr_debug("%s: %s new rate %ld [ndiv=%u] [idf=%u]\n",
__func__, __clk_get_name(hw->clk),
- rate, (unsigned int)params.ndiv,
+ req->rate, (unsigned int)params.ndiv,
(unsigned int)params.idf);
- return rate;
+ return 0;
}
static int set_rate_stm_pll3200c32(struct clk_hw *hw, unsigned long rate,
@@ -549,25 +552,28 @@ static unsigned long recalc_stm_pll4600c28(struct clk_hw *hw,
return rate;
}
-static long round_rate_stm_pll4600c28(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int stm_pll4600c28_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct stm_pll params;
- if (!clk_pll4600c28_get_params(*prate, rate, &params)) {
- clk_pll4600c28_get_rate(*prate, &params, &rate);
+ if (!clk_pll4600c28_get_params(req->best_parent_rate, req->rate, &params)) {
+ clk_pll4600c28_get_rate(req->best_parent_rate, &params,
+ &req->rate);
} else {
pr_debug("%s: %s rate %ld Invalid\n", __func__,
- __clk_get_name(hw->clk), rate);
+ __clk_get_name(hw->clk), req->rate);
+ req->rate = 0;
+
return 0;
}
pr_debug("%s: %s new rate %ld [ndiv=%u] [idf=%u]\n",
__func__, __clk_get_name(hw->clk),
- rate, (unsigned int)params.ndiv,
+ req->rate, (unsigned int)params.ndiv,
(unsigned int)params.idf);
- return rate;
+ return 0;
}
static int set_rate_stm_pll4600c28(struct clk_hw *hw, unsigned long rate,
@@ -628,7 +634,7 @@ static const struct clk_ops stm_pll3200c32_a9_ops = {
.disable = clkgen_pll_disable,
.is_enabled = clkgen_pll_is_enabled,
.recalc_rate = recalc_stm_pll3200c32,
- .round_rate = round_rate_stm_pll3200c32,
+ .determine_rate = stm_pll3200c32_determine_rate,
.set_rate = set_rate_stm_pll3200c32,
};
@@ -637,7 +643,7 @@ static const struct clk_ops stm_pll4600c28_ops = {
.disable = clkgen_pll_disable,
.is_enabled = clkgen_pll_is_enabled,
.recalc_rate = recalc_stm_pll4600c28,
- .round_rate = round_rate_stm_pll4600c28,
+ .determine_rate = stm_pll4600c28_determine_rate,
.set_rate = set_rate_stm_pll4600c28,
};
diff --git a/drivers/clk/stm32/Kconfig b/drivers/clk/stm32/Kconfig
index 4d2eb993ea08..5dbd75cde657 100644
--- a/drivers/clk/stm32/Kconfig
+++ b/drivers/clk/stm32/Kconfig
@@ -25,6 +25,13 @@ config COMMON_CLK_STM32MP157
help
Support for stm32mp15x SoC family clocks.
+config COMMON_CLK_STM32MP215
+ bool "Clock driver for stm32mp21x clocks"
+ depends on ARM || ARM64 || COMPILE_TEST
+ default y
+ help
+ Support for stm32mp21x SoC family clocks
+
config COMMON_CLK_STM32MP257
bool "Clock driver for stm32mp25x clocks"
depends on ARM64 || COMPILE_TEST
diff --git a/drivers/clk/stm32/Makefile b/drivers/clk/stm32/Makefile
index 0a627164fcce..e04727b59449 100644
--- a/drivers/clk/stm32/Makefile
+++ b/drivers/clk/stm32/Makefile
@@ -1,3 +1,4 @@
obj-$(CONFIG_COMMON_CLK_STM32MP135) += clk-stm32mp13.o clk-stm32-core.o reset-stm32.o
obj-$(CONFIG_COMMON_CLK_STM32MP157) += clk-stm32mp1.o reset-stm32.o
+obj-$(CONFIG_COMMON_CLK_STM32MP215) += clk-stm32mp21.o clk-stm32-core.o reset-stm32.o
obj-$(CONFIG_COMMON_CLK_STM32MP257) += clk-stm32mp25.o clk-stm32-core.o reset-stm32.o
diff --git a/drivers/clk/stm32/clk-stm32-core.c b/drivers/clk/stm32/clk-stm32-core.c
index 933e3cde0795..72825b9c36a4 100644
--- a/drivers/clk/stm32/clk-stm32-core.c
+++ b/drivers/clk/stm32/clk-stm32-core.c
@@ -351,14 +351,14 @@ static int clk_stm32_divider_set_rate(struct clk_hw *hw, unsigned long rate,
return ret;
}
-static long clk_stm32_divider_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_stm32_divider_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_stm32_div *div = to_clk_stm32_divider(hw);
const struct stm32_div_cfg *divider;
if (div->div_id == NO_STM32_DIV)
- return rate;
+ return 0;
divider = &div->clock_data->dividers[div->div_id];
@@ -369,14 +369,22 @@ static long clk_stm32_divider_round_rate(struct clk_hw *hw, unsigned long rate,
val = readl(div->base + divider->offset) >> divider->shift;
val &= clk_div_mask(divider->width);
- return divider_ro_round_rate(hw, rate, prate, divider->table,
- divider->width, divider->flags,
- val);
+ req->rate = divider_ro_round_rate(hw, req->rate,
+ &req->best_parent_rate,
+ divider->table,
+ divider->width,
+ divider->flags, val);
+
+ return 0;
}
- return divider_round_rate_parent(hw, clk_hw_get_parent(hw),
- rate, prate, divider->table,
- divider->width, divider->flags);
+ req->rate = divider_round_rate_parent(hw, clk_hw_get_parent(hw),
+ req->rate,
+ &req->best_parent_rate,
+ divider->table,
+ divider->width, divider->flags);
+
+ return 0;
}
static unsigned long clk_stm32_divider_recalc_rate(struct clk_hw *hw,
@@ -392,7 +400,7 @@ static unsigned long clk_stm32_divider_recalc_rate(struct clk_hw *hw,
const struct clk_ops clk_stm32_divider_ops = {
.recalc_rate = clk_stm32_divider_recalc_rate,
- .round_rate = clk_stm32_divider_round_rate,
+ .determine_rate = clk_stm32_divider_determine_rate,
.set_rate = clk_stm32_divider_set_rate,
};
diff --git a/drivers/clk/stm32/clk-stm32mp1.c b/drivers/clk/stm32/clk-stm32mp1.c
index b8b45ed22f98..2d9ccd96ec98 100644
--- a/drivers/clk/stm32/clk-stm32mp1.c
+++ b/drivers/clk/stm32/clk-stm32mp1.c
@@ -970,12 +970,15 @@ static unsigned long __bestmult(struct clk_hw *hw, unsigned long rate,
return mult;
}
-static long timer_ker_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int timer_ker_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- unsigned long factor = __bestmult(hw, rate, *parent_rate);
+ unsigned long factor = __bestmult(hw, req->rate,
+ req->best_parent_rate);
- return *parent_rate * factor;
+ req->rate = req->best_parent_rate * factor;
+
+ return 0;
}
static int timer_ker_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -1026,7 +1029,7 @@ static unsigned long timer_ker_recalc_rate(struct clk_hw *hw,
static const struct clk_ops timer_ker_ops = {
.recalc_rate = timer_ker_recalc_rate,
- .round_rate = timer_ker_round_rate,
+ .determine_rate = timer_ker_determine_rate,
.set_rate = timer_ker_set_rate,
};
diff --git a/drivers/clk/stm32/clk-stm32mp21.c b/drivers/clk/stm32/clk-stm32mp21.c
new file mode 100644
index 000000000000..c8a37b716bd5
--- /dev/null
+++ b/drivers/clk/stm32/clk-stm32mp21.c
@@ -0,0 +1,1586 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) STMicroelectronics 2023 - All Rights Reserved
+ * Author: Gabriel Fernandez <gabriel.fernandez@foss.st.com> for STMicroelectronics.
+ */
+
+#include <linux/bus/stm32_firewall_device.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+
+#include "clk-stm32-core.h"
+#include "reset-stm32.h"
+#include "stm32mp21_rcc.h"
+
+#include <dt-bindings/clock/st,stm32mp21-rcc.h>
+#include <dt-bindings/reset/st,stm32mp21-rcc.h>
+
+/* Max clock binding value */
+#define STM32MP21_LAST_CLK CK_SCMI_KER_ETR
+
+/* Clock security definition */
+#define SECF_NONE -1
+
+#define RCC_REG_SIZE 32
+#define RCC_SECCFGR(x) (((x) / RCC_REG_SIZE) * 0x4 + RCC_SECCFGR0)
+#define RCC_CIDCFGR(x) ((x) * 0x8 + RCC_R0CIDCFGR)
+#define RCC_SEMCR(x) ((x) * 0x8 + RCC_R0SEMCR)
+#define RCC_CID1 1
+
+/* Register: RIFSC_CIDCFGR */
+#define RCC_CIDCFGR_CFEN BIT(0)
+#define RCC_CIDCFGR_SEM_EN BIT(1)
+#define RCC_CIDCFGR_SEMWLC1_EN BIT(17)
+#define RCC_CIDCFGR_SCID_MASK GENMASK(6, 4)
+
+/* Register: RIFSC_SEMCR */
+#define RCC_SEMCR_SEMCID_MASK GENMASK(6, 4)
+
+#define MP21_RIF_RCC_MCO1 108
+#define MP21_RIF_RCC_MCO2 109
+
+#define SEC_RIFSC_FLAG BIT(31)
+#define SEC_RIFSC(_id) ((_id) | SEC_RIFSC_FLAG)
+
+enum {
+ HSE,
+ HSI,
+ MSI,
+ LSE,
+ LSI,
+ HSE_DIV2,
+ ICN_HS_MCU,
+ ICN_LS_MCU,
+ ICN_SDMMC,
+ ICN_DDR,
+ ICN_DISPLAY,
+ ICN_HSL,
+ ICN_NIC,
+ FLEXGEN_07,
+ FLEXGEN_08,
+ FLEXGEN_09,
+ FLEXGEN_10,
+ FLEXGEN_11,
+ FLEXGEN_12,
+ FLEXGEN_13,
+ FLEXGEN_14,
+ FLEXGEN_16,
+ FLEXGEN_17,
+ FLEXGEN_18,
+ FLEXGEN_19,
+ FLEXGEN_20,
+ FLEXGEN_21,
+ FLEXGEN_22,
+ FLEXGEN_23,
+ FLEXGEN_24,
+ FLEXGEN_25,
+ FLEXGEN_26,
+ FLEXGEN_27,
+ FLEXGEN_29,
+ FLEXGEN_30,
+ FLEXGEN_31,
+ FLEXGEN_33,
+ FLEXGEN_36,
+ FLEXGEN_37,
+ FLEXGEN_38,
+ FLEXGEN_39,
+ FLEXGEN_40,
+ FLEXGEN_41,
+ FLEXGEN_42,
+ FLEXGEN_43,
+ FLEXGEN_44,
+ FLEXGEN_45,
+ FLEXGEN_46,
+ FLEXGEN_47,
+ FLEXGEN_48,
+ FLEXGEN_50,
+ FLEXGEN_51,
+ FLEXGEN_52,
+ FLEXGEN_53,
+ FLEXGEN_54,
+ FLEXGEN_55,
+ FLEXGEN_56,
+ FLEXGEN_57,
+ FLEXGEN_58,
+ FLEXGEN_61,
+ FLEXGEN_62,
+ FLEXGEN_63,
+ ICN_APB1,
+ ICN_APB2,
+ ICN_APB3,
+ ICN_APB4,
+ ICN_APB5,
+ ICN_APBDBG,
+ TIMG1,
+ TIMG2,
+};
+
+static const struct clk_parent_data adc1_src[] = {
+ { .index = FLEXGEN_46 },
+ { .index = ICN_LS_MCU },
+};
+
+static const struct clk_parent_data adc2_src[] = {
+ { .index = FLEXGEN_47 },
+ { .index = ICN_LS_MCU },
+ { .index = FLEXGEN_46 },
+};
+
+static const struct clk_parent_data usb2phy1_src[] = {
+ { .index = FLEXGEN_57 },
+ { .index = HSE_DIV2 },
+};
+
+static const struct clk_parent_data usb2phy2_src[] = {
+ { .index = FLEXGEN_58 },
+ { .index = HSE_DIV2 },
+};
+
+static const struct clk_parent_data dts_src[] = {
+ { .index = HSI },
+ { .index = HSE },
+ { .index = MSI },
+};
+
+static const struct clk_parent_data mco1_src[] = {
+ { .index = FLEXGEN_61 },
+};
+
+static const struct clk_parent_data mco2_src[] = {
+ { .index = FLEXGEN_62 },
+};
+
+enum enum_mux_cfg {
+ MUX_ADC1,
+ MUX_ADC2,
+ MUX_DTS,
+ MUX_MCO1,
+ MUX_MCO2,
+ MUX_USB2PHY1,
+ MUX_USB2PHY2,
+ MUX_NB
+};
+
+#define MUX_CFG(id, _offset, _shift, _width) \
+ [id] = { \
+ .offset = (_offset), \
+ .shift = (_shift), \
+ .width = (_width), \
+ }
+
+static const struct stm32_mux_cfg stm32mp21_muxes[MUX_NB] = {
+ MUX_CFG(MUX_ADC1, RCC_ADC1CFGR, 12, 1),
+ MUX_CFG(MUX_ADC2, RCC_ADC2CFGR, 12, 2),
+ MUX_CFG(MUX_DTS, RCC_DTSCFGR, 12, 2),
+ MUX_CFG(MUX_MCO1, RCC_MCO1CFGR, 0, 1),
+ MUX_CFG(MUX_MCO2, RCC_MCO2CFGR, 0, 1),
+ MUX_CFG(MUX_USB2PHY1, RCC_USB2PHY1CFGR, 15, 1),
+ MUX_CFG(MUX_USB2PHY2, RCC_USB2PHY2CFGR, 15, 1),
+};
+
+enum enum_gate_cfg {
+ GATE_ADC1,
+ GATE_ADC2,
+ GATE_CRC,
+ GATE_CRYP1,
+ GATE_CRYP2,
+ GATE_CSI,
+ GATE_DCMIPP,
+ GATE_DCMIPSSI,
+ GATE_DDRPERFM,
+ GATE_DTS,
+ GATE_ETH1,
+ GATE_ETH1MAC,
+ GATE_ETH1RX,
+ GATE_ETH1STP,
+ GATE_ETH1TX,
+ GATE_ETH2,
+ GATE_ETH2MAC,
+ GATE_ETH2RX,
+ GATE_ETH2STP,
+ GATE_ETH2TX,
+ GATE_FDCAN,
+ GATE_HASH1,
+ GATE_HASH2,
+ GATE_HDP,
+ GATE_I2C1,
+ GATE_I2C2,
+ GATE_I2C3,
+ GATE_I3C1,
+ GATE_I3C2,
+ GATE_I3C3,
+ GATE_IWDG1,
+ GATE_IWDG2,
+ GATE_IWDG3,
+ GATE_IWDG4,
+ GATE_LPTIM1,
+ GATE_LPTIM2,
+ GATE_LPTIM3,
+ GATE_LPTIM4,
+ GATE_LPTIM5,
+ GATE_LPUART1,
+ GATE_LTDC,
+ GATE_MCO1,
+ GATE_MCO2,
+ GATE_MDF1,
+ GATE_OTG,
+ GATE_PKA,
+ GATE_RNG1,
+ GATE_RNG2,
+ GATE_SAES,
+ GATE_SAI1,
+ GATE_SAI2,
+ GATE_SAI3,
+ GATE_SAI4,
+ GATE_SDMMC1,
+ GATE_SDMMC2,
+ GATE_SDMMC3,
+ GATE_SERC,
+ GATE_SPDIFRX,
+ GATE_SPI1,
+ GATE_SPI2,
+ GATE_SPI3,
+ GATE_SPI4,
+ GATE_SPI5,
+ GATE_SPI6,
+ GATE_TIM1,
+ GATE_TIM10,
+ GATE_TIM11,
+ GATE_TIM12,
+ GATE_TIM13,
+ GATE_TIM14,
+ GATE_TIM15,
+ GATE_TIM16,
+ GATE_TIM17,
+ GATE_TIM2,
+ GATE_TIM3,
+ GATE_TIM4,
+ GATE_TIM5,
+ GATE_TIM6,
+ GATE_TIM7,
+ GATE_TIM8,
+ GATE_UART4,
+ GATE_UART5,
+ GATE_UART7,
+ GATE_USART1,
+ GATE_USART2,
+ GATE_USART3,
+ GATE_USART6,
+ GATE_USB2PHY1,
+ GATE_USB2PHY2,
+ GATE_USBH,
+ GATE_VREF,
+ GATE_WWDG1,
+ GATE_NB
+};
+
+#define GATE_CFG(id, _offset, _bit_idx, _offset_clr) \
+ [id] = { \
+ .offset = (_offset), \
+ .bit_idx = (_bit_idx), \
+ .set_clr = (_offset_clr), \
+ }
+
+static const struct stm32_gate_cfg stm32mp21_gates[GATE_NB] = {
+ GATE_CFG(GATE_ADC1, RCC_ADC1CFGR, 1, 0),
+ GATE_CFG(GATE_ADC2, RCC_ADC2CFGR, 1, 0),
+ GATE_CFG(GATE_CRC, RCC_CRCCFGR, 1, 0),
+ GATE_CFG(GATE_CRYP1, RCC_CRYP1CFGR, 1, 0),
+ GATE_CFG(GATE_CRYP2, RCC_CRYP2CFGR, 1, 0),
+ GATE_CFG(GATE_CSI, RCC_CSICFGR, 1, 0),
+ GATE_CFG(GATE_DCMIPP, RCC_DCMIPPCFGR, 1, 0),
+ GATE_CFG(GATE_DCMIPSSI, RCC_DCMIPSSICFGR, 1, 0),
+ GATE_CFG(GATE_DDRPERFM, RCC_DDRPERFMCFGR, 1, 0),
+ GATE_CFG(GATE_DTS, RCC_DTSCFGR, 1, 0),
+ GATE_CFG(GATE_ETH1, RCC_ETH1CFGR, 5, 0),
+ GATE_CFG(GATE_ETH1MAC, RCC_ETH1CFGR, 1, 0),
+ GATE_CFG(GATE_ETH1RX, RCC_ETH1CFGR, 10, 0),
+ GATE_CFG(GATE_ETH1STP, RCC_ETH1CFGR, 4, 0),
+ GATE_CFG(GATE_ETH1TX, RCC_ETH1CFGR, 8, 0),
+ GATE_CFG(GATE_ETH2, RCC_ETH2CFGR, 5, 0),
+ GATE_CFG(GATE_ETH2MAC, RCC_ETH2CFGR, 1, 0),
+ GATE_CFG(GATE_ETH2RX, RCC_ETH2CFGR, 10, 0),
+ GATE_CFG(GATE_ETH2STP, RCC_ETH2CFGR, 4, 0),
+ GATE_CFG(GATE_ETH2TX, RCC_ETH2CFGR, 8, 0),
+ GATE_CFG(GATE_FDCAN, RCC_FDCANCFGR, 1, 0),
+ GATE_CFG(GATE_HASH1, RCC_HASH1CFGR, 1, 0),
+ GATE_CFG(GATE_HASH2, RCC_HASH2CFGR, 1, 0),
+ GATE_CFG(GATE_HDP, RCC_HDPCFGR, 1, 0),
+ GATE_CFG(GATE_I2C1, RCC_I2C1CFGR, 1, 0),
+ GATE_CFG(GATE_I2C2, RCC_I2C2CFGR, 1, 0),
+ GATE_CFG(GATE_I2C3, RCC_I2C3CFGR, 1, 0),
+ GATE_CFG(GATE_I3C1, RCC_I3C1CFGR, 1, 0),
+ GATE_CFG(GATE_I3C2, RCC_I3C2CFGR, 1, 0),
+ GATE_CFG(GATE_I3C3, RCC_I3C3CFGR, 1, 0),
+ GATE_CFG(GATE_IWDG1, RCC_IWDG1CFGR, 1, 0),
+ GATE_CFG(GATE_IWDG2, RCC_IWDG2CFGR, 1, 0),
+ GATE_CFG(GATE_IWDG3, RCC_IWDG3CFGR, 1, 0),
+ GATE_CFG(GATE_IWDG4, RCC_IWDG4CFGR, 1, 0),
+ GATE_CFG(GATE_LPTIM1, RCC_LPTIM1CFGR, 1, 0),
+ GATE_CFG(GATE_LPTIM2, RCC_LPTIM2CFGR, 1, 0),
+ GATE_CFG(GATE_LPTIM3, RCC_LPTIM3CFGR, 1, 0),
+ GATE_CFG(GATE_LPTIM4, RCC_LPTIM4CFGR, 1, 0),
+ GATE_CFG(GATE_LPTIM5, RCC_LPTIM5CFGR, 1, 0),
+ GATE_CFG(GATE_LPUART1, RCC_LPUART1CFGR, 1, 0),
+ GATE_CFG(GATE_LTDC, RCC_LTDCCFGR, 1, 0),
+ GATE_CFG(GATE_MCO1, RCC_MCO1CFGR, 8, 0),
+ GATE_CFG(GATE_MCO2, RCC_MCO2CFGR, 8, 0),
+ GATE_CFG(GATE_MDF1, RCC_MDF1CFGR, 1, 0),
+ GATE_CFG(GATE_OTG, RCC_OTGCFGR, 1, 0),
+ GATE_CFG(GATE_PKA, RCC_PKACFGR, 1, 0),
+ GATE_CFG(GATE_RNG1, RCC_RNG1CFGR, 1, 0),
+ GATE_CFG(GATE_RNG2, RCC_RNG2CFGR, 1, 0),
+ GATE_CFG(GATE_SAES, RCC_SAESCFGR, 1, 0),
+ GATE_CFG(GATE_SAI1, RCC_SAI1CFGR, 1, 0),
+ GATE_CFG(GATE_SAI2, RCC_SAI2CFGR, 1, 0),
+ GATE_CFG(GATE_SAI3, RCC_SAI3CFGR, 1, 0),
+ GATE_CFG(GATE_SAI4, RCC_SAI4CFGR, 1, 0),
+ GATE_CFG(GATE_SDMMC1, RCC_SDMMC1CFGR, 1, 0),
+ GATE_CFG(GATE_SDMMC2, RCC_SDMMC2CFGR, 1, 0),
+ GATE_CFG(GATE_SDMMC3, RCC_SDMMC3CFGR, 1, 0),
+ GATE_CFG(GATE_SERC, RCC_SERCCFGR, 1, 0),
+ GATE_CFG(GATE_SPDIFRX, RCC_SPDIFRXCFGR, 1, 0),
+ GATE_CFG(GATE_SPI1, RCC_SPI1CFGR, 1, 0),
+ GATE_CFG(GATE_SPI2, RCC_SPI2CFGR, 1, 0),
+ GATE_CFG(GATE_SPI3, RCC_SPI3CFGR, 1, 0),
+ GATE_CFG(GATE_SPI4, RCC_SPI4CFGR, 1, 0),
+ GATE_CFG(GATE_SPI5, RCC_SPI5CFGR, 1, 0),
+ GATE_CFG(GATE_SPI6, RCC_SPI6CFGR, 1, 0),
+ GATE_CFG(GATE_TIM1, RCC_TIM1CFGR, 1, 0),
+ GATE_CFG(GATE_TIM10, RCC_TIM10CFGR, 1, 0),
+ GATE_CFG(GATE_TIM11, RCC_TIM11CFGR, 1, 0),
+ GATE_CFG(GATE_TIM12, RCC_TIM12CFGR, 1, 0),
+ GATE_CFG(GATE_TIM13, RCC_TIM13CFGR, 1, 0),
+ GATE_CFG(GATE_TIM14, RCC_TIM14CFGR, 1, 0),
+ GATE_CFG(GATE_TIM15, RCC_TIM15CFGR, 1, 0),
+ GATE_CFG(GATE_TIM16, RCC_TIM16CFGR, 1, 0),
+ GATE_CFG(GATE_TIM17, RCC_TIM17CFGR, 1, 0),
+ GATE_CFG(GATE_TIM2, RCC_TIM2CFGR, 1, 0),
+ GATE_CFG(GATE_TIM3, RCC_TIM3CFGR, 1, 0),
+ GATE_CFG(GATE_TIM4, RCC_TIM4CFGR, 1, 0),
+ GATE_CFG(GATE_TIM5, RCC_TIM5CFGR, 1, 0),
+ GATE_CFG(GATE_TIM6, RCC_TIM6CFGR, 1, 0),
+ GATE_CFG(GATE_TIM7, RCC_TIM7CFGR, 1, 0),
+ GATE_CFG(GATE_TIM8, RCC_TIM8CFGR, 1, 0),
+ GATE_CFG(GATE_UART4, RCC_UART4CFGR, 1, 0),
+ GATE_CFG(GATE_UART5, RCC_UART5CFGR, 1, 0),
+ GATE_CFG(GATE_UART7, RCC_UART7CFGR, 1, 0),
+ GATE_CFG(GATE_USART1, RCC_USART1CFGR, 1, 0),
+ GATE_CFG(GATE_USART2, RCC_USART2CFGR, 1, 0),
+ GATE_CFG(GATE_USART3, RCC_USART3CFGR, 1, 0),
+ GATE_CFG(GATE_USART6, RCC_USART6CFGR, 1, 0),
+ GATE_CFG(GATE_USB2PHY1, RCC_USB2PHY1CFGR, 1, 0),
+ GATE_CFG(GATE_USB2PHY2, RCC_USB2PHY2CFGR, 1, 0),
+ GATE_CFG(GATE_USBH, RCC_USBHCFGR, 1, 0),
+ GATE_CFG(GATE_VREF, RCC_VREFCFGR, 1, 0),
+ GATE_CFG(GATE_WWDG1, RCC_WWDG1CFGR, 1, 0),
+};
+
+#define CLK_HW_INIT_INDEX(_name, _parent, _ops, _flags) \
+ (&(struct clk_init_data) { \
+ .flags = _flags, \
+ .name = _name, \
+ .parent_data = (const struct clk_parent_data[]) { \
+ { .index = _parent }, \
+ }, \
+ .num_parents = 1, \
+ .ops = _ops, \
+ })
+
+/* ADC */
+static struct clk_stm32_gate ck_icn_p_adc1 = {
+ .gate_id = GATE_ADC1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_adc1", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_composite ck_ker_adc1 = {
+ .gate_id = GATE_ADC1,
+ .mux_id = MUX_ADC1,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("ck_ker_adc1", adc1_src, &clk_stm32_composite_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_adc2 = {
+ .gate_id = GATE_ADC2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_adc2", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_composite ck_ker_adc2 = {
+ .gate_id = GATE_ADC2,
+ .mux_id = MUX_ADC2,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("ck_ker_adc2", adc2_src, &clk_stm32_composite_ops, 0),
+};
+
+/* CSI-HOST */
+static struct clk_stm32_gate ck_icn_p_csi = {
+ .gate_id = GATE_CSI,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_csi", ICN_APB4, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_csi = {
+ .gate_id = GATE_CSI,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_csi", FLEXGEN_29, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_csitxesc = {
+ .gate_id = GATE_CSI,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_csitxesc", FLEXGEN_30, &clk_stm32_gate_ops, 0),
+};
+
+/* CSI-PHY */
+static struct clk_stm32_gate ck_ker_csiphy = {
+ .gate_id = GATE_CSI,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_csiphy", FLEXGEN_31, &clk_stm32_gate_ops, 0),
+};
+
+/* DCMIPP */
+static struct clk_stm32_gate ck_icn_p_dcmipp = {
+ .gate_id = GATE_DCMIPP,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_dcmipp", ICN_APB4, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_dcmipssi = {
+ .gate_id = GATE_DCMIPSSI,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_dcmipssi", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+/* DDRPERMF */
+static struct clk_stm32_gate ck_icn_p_ddrperfm = {
+ .gate_id = GATE_DDRPERFM,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_ddrperfm", ICN_APB4, &clk_stm32_gate_ops, 0),
+};
+
+/* CRC */
+static struct clk_stm32_gate ck_icn_p_crc = {
+ .gate_id = GATE_CRC,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_crc", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+/* CRYP */
+static struct clk_stm32_gate ck_icn_p_cryp1 = {
+ .gate_id = GATE_CRYP1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_cryp1", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_cryp2 = {
+ .gate_id = GATE_CRYP2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_cryp2", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+/* DBG & TRACE */
+/* Trace and debug clocks are managed by SCMI */
+
+/* LTDC */
+static struct clk_stm32_gate ck_icn_p_ltdc = {
+ .gate_id = GATE_LTDC,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_ltdc", ICN_APB4, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_ltdc = {
+ .gate_id = GATE_LTDC,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_ltdc", FLEXGEN_27, &clk_stm32_gate_ops,
+ CLK_SET_RATE_PARENT),
+};
+
+/* DTS */
+static struct clk_stm32_composite ck_ker_dts = {
+ .gate_id = GATE_DTS,
+ .mux_id = MUX_DTS,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("ck_ker_dts", dts_src,
+ &clk_stm32_composite_ops, 0),
+};
+
+/* ETHERNET */
+static struct clk_stm32_gate ck_icn_p_eth1 = {
+ .gate_id = GATE_ETH1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_eth1", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_eth1stp = {
+ .gate_id = GATE_ETH1STP,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth1stp", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_eth1 = {
+ .gate_id = GATE_ETH1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth1", FLEXGEN_54, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_eth1ptp = {
+ .gate_id = GATE_ETH1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth1ptp", FLEXGEN_56, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_eth1mac = {
+ .gate_id = GATE_ETH1MAC,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth1mac", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_eth1tx = {
+ .gate_id = GATE_ETH1TX,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth1tx", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_eth1rx = {
+ .gate_id = GATE_ETH1RX,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth1rx", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_eth2 = {
+ .gate_id = GATE_ETH2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_eth2", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_eth2stp = {
+ .gate_id = GATE_ETH2STP,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth2stp", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_eth2 = {
+ .gate_id = GATE_ETH2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth2", FLEXGEN_55, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_eth2ptp = {
+ .gate_id = GATE_ETH2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth2ptp", FLEXGEN_56, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_eth2mac = {
+ .gate_id = GATE_ETH2MAC,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth2mac", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_eth2tx = {
+ .gate_id = GATE_ETH2TX,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth2tx", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_eth2rx = {
+ .gate_id = GATE_ETH2RX,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth2rx", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+/* FDCAN */
+static struct clk_stm32_gate ck_icn_p_fdcan = {
+ .gate_id = GATE_FDCAN,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_fdcan", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_fdcan = {
+ .gate_id = GATE_FDCAN,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_fdcan", FLEXGEN_26, &clk_stm32_gate_ops, 0),
+};
+
+/* HASH */
+static struct clk_stm32_gate ck_icn_p_hash1 = {
+ .gate_id = GATE_HASH1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_hash1", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_hash2 = {
+ .gate_id = GATE_HASH2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_hash2", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+/* HDP */
+static struct clk_stm32_gate ck_icn_p_hdp = {
+ .gate_id = GATE_HDP,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_hdp", ICN_APB3, &clk_stm32_gate_ops, 0),
+};
+
+/* I2C */
+static struct clk_stm32_gate ck_icn_p_i2c1 = {
+ .gate_id = GATE_I2C1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_i2c1", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_i2c2 = {
+ .gate_id = GATE_I2C2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_i2c2", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_i2c3 = {
+ .gate_id = GATE_I2C3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_i2c3", ICN_APB5, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_i2c1 = {
+ .gate_id = GATE_I2C1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_i2c1", FLEXGEN_13, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_i2c2 = {
+ .gate_id = GATE_I2C2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_i2c2", FLEXGEN_13, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_i2c3 = {
+ .gate_id = GATE_I2C3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_i2c3", FLEXGEN_38, &clk_stm32_gate_ops, 0),
+};
+
+/* I3C */
+static struct clk_stm32_gate ck_icn_p_i3c1 = {
+ .gate_id = GATE_I3C1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_i3c1", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_i3c2 = {
+ .gate_id = GATE_I3C2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_i3c2", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_i3c3 = {
+ .gate_id = GATE_I3C3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_i3c3", ICN_APB5, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_i3c1 = {
+ .gate_id = GATE_I3C1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_i3c1", FLEXGEN_14, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_i3c2 = {
+ .gate_id = GATE_I3C2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_i3c2", FLEXGEN_14, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_i3c3 = {
+ .gate_id = GATE_I3C3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_i3c3", FLEXGEN_36, &clk_stm32_gate_ops, 0),
+};
+
+/* IWDG */
+static struct clk_stm32_gate ck_icn_p_iwdg1 = {
+ .gate_id = GATE_IWDG1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_iwdg1", ICN_APB3, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_iwdg2 = {
+ .gate_id = GATE_IWDG2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_iwdg2", ICN_APB3, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_iwdg3 = {
+ .gate_id = GATE_IWDG3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_iwdg3", ICN_APB3, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_iwdg4 = {
+ .gate_id = GATE_IWDG4,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_iwdg4", ICN_APB3, &clk_stm32_gate_ops, 0),
+};
+
+/* LPTIM */
+static struct clk_stm32_gate ck_icn_p_lptim1 = {
+ .gate_id = GATE_LPTIM1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_lptim1", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_lptim2 = {
+ .gate_id = GATE_LPTIM2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_lptim2", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_lptim3 = {
+ .gate_id = GATE_LPTIM3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_lptim3", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_lptim4 = {
+ .gate_id = GATE_LPTIM4,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_lptim4", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_lptim5 = {
+ .gate_id = GATE_LPTIM5,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_lptim5", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_lptim1 = {
+ .gate_id = GATE_LPTIM1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_lptim1", FLEXGEN_07, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_lptim2 = {
+ .gate_id = GATE_LPTIM2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_lptim2", FLEXGEN_07, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_lptim3 = {
+ .gate_id = GATE_LPTIM3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_lptim3", FLEXGEN_40, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_lptim4 = {
+ .gate_id = GATE_LPTIM4,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_lptim4", FLEXGEN_41, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_lptim5 = {
+ .gate_id = GATE_LPTIM5,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_lptim5", FLEXGEN_42, &clk_stm32_gate_ops, 0),
+};
+
+/* LPUART */
+static struct clk_stm32_gate ck_icn_p_lpuart1 = {
+ .gate_id = GATE_LPUART1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_lpuart1", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_lpuart1 = {
+ .gate_id = GATE_LPUART1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_lpuart1", FLEXGEN_39, &clk_stm32_gate_ops, 0),
+};
+
+/* MCO1 & MCO2 */
+static struct clk_stm32_composite ck_mco1 = {
+ .gate_id = GATE_MCO1,
+ .mux_id = MUX_MCO1,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("ck_mco1", mco1_src, &clk_stm32_composite_ops, 0),
+};
+
+static struct clk_stm32_composite ck_mco2 = {
+ .gate_id = GATE_MCO2,
+ .mux_id = MUX_MCO2,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("ck_mco2", mco2_src, &clk_stm32_composite_ops, 0),
+};
+
+/* MDF */
+static struct clk_stm32_gate ck_icn_p_mdf1 = {
+ .gate_id = GATE_MDF1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_mdf1", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_mdf1 = {
+ .gate_id = GATE_MDF1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_mdf1", FLEXGEN_21, &clk_stm32_gate_ops, 0),
+};
+
+/* OTG */
+static struct clk_stm32_gate ck_icn_m_otg = {
+ .gate_id = GATE_OTG,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_m_otg", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+/* PKA */
+static struct clk_stm32_gate ck_icn_p_pka = {
+ .gate_id = GATE_PKA,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_pka", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+/* RNG */
+static struct clk_stm32_gate ck_icn_p_rng1 = {
+ .gate_id = GATE_RNG1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_rng1", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_rng2 = {
+ .gate_id = GATE_RNG2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_rng2", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+/* SAES */
+static struct clk_stm32_gate ck_icn_p_saes = {
+ .gate_id = GATE_SAES,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_saes", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+/* SAI */
+static struct clk_stm32_gate ck_icn_p_sai1 = {
+ .gate_id = GATE_SAI1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_sai1", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_sai2 = {
+ .gate_id = GATE_SAI2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_sai2", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_sai3 = {
+ .gate_id = GATE_SAI3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_sai3", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_sai4 = {
+ .gate_id = GATE_SAI4,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_sai4", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_sai1 = {
+ .gate_id = GATE_SAI1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_sai1", FLEXGEN_22, &clk_stm32_gate_ops,
+ CLK_SET_RATE_PARENT),
+};
+
+static struct clk_stm32_gate ck_ker_sai2 = {
+ .gate_id = GATE_SAI2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_sai2", FLEXGEN_23, &clk_stm32_gate_ops,
+ CLK_SET_RATE_PARENT),
+};
+
+static struct clk_stm32_gate ck_ker_sai3 = {
+ .gate_id = GATE_SAI3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_sai3", FLEXGEN_24, &clk_stm32_gate_ops,
+ CLK_SET_RATE_PARENT),
+};
+
+static struct clk_stm32_gate ck_ker_sai4 = {
+ .gate_id = GATE_SAI4,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_sai4", FLEXGEN_25, &clk_stm32_gate_ops,
+ CLK_SET_RATE_PARENT),
+};
+
+/* SDMMC */
+static struct clk_stm32_gate ck_icn_m_sdmmc1 = {
+ .gate_id = GATE_SDMMC1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_m_sdmmc1", ICN_SDMMC, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_m_sdmmc2 = {
+ .gate_id = GATE_SDMMC2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_m_sdmmc2", ICN_SDMMC, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_m_sdmmc3 = {
+ .gate_id = GATE_SDMMC3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_m_sdmmc3", ICN_SDMMC, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_sdmmc1 = {
+ .gate_id = GATE_SDMMC1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_sdmmc1", FLEXGEN_51, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_sdmmc2 = {
+ .gate_id = GATE_SDMMC2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_sdmmc2", FLEXGEN_52, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_sdmmc3 = {
+ .gate_id = GATE_SDMMC3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_sdmmc3", FLEXGEN_53, &clk_stm32_gate_ops, 0),
+};
+
+/* SERC */
+static struct clk_stm32_gate ck_icn_p_serc = {
+ .gate_id = GATE_SERC,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_serc", ICN_APB3, &clk_stm32_gate_ops, 0),
+};
+
+/* SPDIF */
+static struct clk_stm32_gate ck_icn_p_spdifrx = {
+ .gate_id = GATE_SPDIFRX,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_spdifrx", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_spdifrx = {
+ .gate_id = GATE_SPDIFRX,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_spdifrx", FLEXGEN_12, &clk_stm32_gate_ops, 0),
+};
+
+/* SPI */
+static struct clk_stm32_gate ck_icn_p_spi1 = {
+ .gate_id = GATE_SPI1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_spi1", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_spi2 = {
+ .gate_id = GATE_SPI2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_spi2", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_spi3 = {
+ .gate_id = GATE_SPI3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_spi3", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_spi4 = {
+ .gate_id = GATE_SPI4,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_spi4", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_spi5 = {
+ .gate_id = GATE_SPI5,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_spi5", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_spi6 = {
+ .gate_id = GATE_SPI6,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_spi6", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_spi1 = {
+ .gate_id = GATE_SPI1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_spi1", FLEXGEN_16, &clk_stm32_gate_ops,
+ CLK_SET_RATE_PARENT),
+};
+
+static struct clk_stm32_gate ck_ker_spi2 = {
+ .gate_id = GATE_SPI2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_spi2", FLEXGEN_10, &clk_stm32_gate_ops,
+ CLK_SET_RATE_PARENT),
+};
+
+static struct clk_stm32_gate ck_ker_spi3 = {
+ .gate_id = GATE_SPI3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_spi3", FLEXGEN_11, &clk_stm32_gate_ops,
+ CLK_SET_RATE_PARENT),
+};
+
+static struct clk_stm32_gate ck_ker_spi4 = {
+ .gate_id = GATE_SPI4,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_spi4", FLEXGEN_17, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_spi5 = {
+ .gate_id = GATE_SPI5,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_spi5", FLEXGEN_17, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_spi6 = {
+ .gate_id = GATE_SPI6,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_spi6", FLEXGEN_37, &clk_stm32_gate_ops, 0),
+};
+
+/* Timers */
+static struct clk_stm32_gate ck_icn_p_tim2 = {
+ .gate_id = GATE_TIM2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim2", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim3 = {
+ .gate_id = GATE_TIM3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim3", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim4 = {
+ .gate_id = GATE_TIM4,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim4", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim5 = {
+ .gate_id = GATE_TIM5,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim5", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim6 = {
+ .gate_id = GATE_TIM6,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim6", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim7 = {
+ .gate_id = GATE_TIM7,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim7", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim10 = {
+ .gate_id = GATE_TIM10,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim10", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim11 = {
+ .gate_id = GATE_TIM11,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim11", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim12 = {
+ .gate_id = GATE_TIM12,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim12", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim13 = {
+ .gate_id = GATE_TIM13,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim13", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim14 = {
+ .gate_id = GATE_TIM14,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim14", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim1 = {
+ .gate_id = GATE_TIM1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim1", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim8 = {
+ .gate_id = GATE_TIM8,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim8", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim15 = {
+ .gate_id = GATE_TIM15,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim15", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim16 = {
+ .gate_id = GATE_TIM16,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim16", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim17 = {
+ .gate_id = GATE_TIM17,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim17", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim2 = {
+ .gate_id = GATE_TIM2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim2", TIMG1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim3 = {
+ .gate_id = GATE_TIM3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim3", TIMG1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim4 = {
+ .gate_id = GATE_TIM4,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim4", TIMG1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim5 = {
+ .gate_id = GATE_TIM5,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim5", TIMG1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim6 = {
+ .gate_id = GATE_TIM6,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim6", TIMG1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim7 = {
+ .gate_id = GATE_TIM7,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim7", TIMG1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim10 = {
+ .gate_id = GATE_TIM10,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim10", TIMG1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim11 = {
+ .gate_id = GATE_TIM11,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim11", TIMG1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim12 = {
+ .gate_id = GATE_TIM12,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim12", TIMG1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim13 = {
+ .gate_id = GATE_TIM13,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim13", TIMG1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim14 = {
+ .gate_id = GATE_TIM14,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim14", TIMG1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim1 = {
+ .gate_id = GATE_TIM1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim1", TIMG2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim8 = {
+ .gate_id = GATE_TIM8,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim8", TIMG2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim15 = {
+ .gate_id = GATE_TIM15,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim15", TIMG2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim16 = {
+ .gate_id = GATE_TIM16,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim16", TIMG2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim17 = {
+ .gate_id = GATE_TIM17,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim17", TIMG2, &clk_stm32_gate_ops, 0),
+};
+
+/* UART/USART */
+static struct clk_stm32_gate ck_icn_p_usart2 = {
+ .gate_id = GATE_USART2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_usart2", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_usart3 = {
+ .gate_id = GATE_USART3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_usart3", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_uart4 = {
+ .gate_id = GATE_UART4,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_uart4", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_uart5 = {
+ .gate_id = GATE_UART5,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_uart5", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_usart1 = {
+ .gate_id = GATE_USART1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_usart1", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_usart6 = {
+ .gate_id = GATE_USART6,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_usart6", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_uart7 = {
+ .gate_id = GATE_UART7,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_uart7", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_usart2 = {
+ .gate_id = GATE_USART2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_usart2", FLEXGEN_08, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_uart4 = {
+ .gate_id = GATE_UART4,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_uart4", FLEXGEN_08, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_usart3 = {
+ .gate_id = GATE_USART3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_usart3", FLEXGEN_09, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_uart5 = {
+ .gate_id = GATE_UART5,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_uart5", FLEXGEN_09, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_usart1 = {
+ .gate_id = GATE_USART1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_usart1", FLEXGEN_18, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_usart6 = {
+ .gate_id = GATE_USART6,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_usart6", FLEXGEN_19, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_uart7 = {
+ .gate_id = GATE_UART7,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_uart7", FLEXGEN_20, &clk_stm32_gate_ops, 0),
+};
+
+/* USB2PHY1 */
+static struct clk_stm32_composite ck_ker_usb2phy1 = {
+ .gate_id = GATE_USB2PHY1,
+ .mux_id = MUX_USB2PHY1,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("ck_ker_usb2phy1", usb2phy1_src,
+ &clk_stm32_composite_ops, 0),
+};
+
+/* USBH */
+static struct clk_stm32_gate ck_icn_m_usbhehci = {
+ .gate_id = GATE_USBH,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_m_usbhehci", ICN_HSL, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_m_usbhohci = {
+ .gate_id = GATE_USBH,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_m_usbhohci", ICN_HSL, &clk_stm32_gate_ops, 0),
+};
+
+/* USB2PHY2 */
+static struct clk_stm32_composite ck_ker_usb2phy2_en = {
+ .gate_id = GATE_USB2PHY2,
+ .mux_id = MUX_USB2PHY2,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("ck_ker_usb2phy2_en", usb2phy2_src,
+ &clk_stm32_composite_ops, 0),
+};
+
+/* VREF */
+static struct clk_stm32_gate ck_icn_p_vref = {
+ .gate_id = GATE_VREF,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_vref", ICN_APB3, &clk_stm32_gate_ops, 0),
+};
+
+/* WWDG */
+static struct clk_stm32_gate ck_icn_p_wwdg1 = {
+ .gate_id = GATE_WWDG1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_wwdg1", ICN_APB3, &clk_stm32_gate_ops, 0),
+};
+
+static int stm32_rcc_get_access(void __iomem *base, u32 index)
+{
+ u32 seccfgr, cidcfgr, semcr;
+ int bit, cid;
+
+ bit = index % RCC_REG_SIZE;
+
+ seccfgr = readl(base + RCC_SECCFGR(index));
+ if (seccfgr & BIT(bit))
+ return -EACCES;
+
+ cidcfgr = readl(base + RCC_CIDCFGR(index));
+ if (!(cidcfgr & RCC_CIDCFGR_CFEN))
+ /* CID filtering is turned off: access granted */
+ return 0;
+
+ if (!(cidcfgr & RCC_CIDCFGR_SEM_EN)) {
+ /* Static CID mode */
+ cid = FIELD_GET(RCC_CIDCFGR_SCID_MASK, cidcfgr);
+ if (cid != RCC_CID1)
+ return -EACCES;
+ return 0;
+ }
+
+ /* Pass-list with semaphore mode */
+ if (!(cidcfgr & RCC_CIDCFGR_SEMWLC1_EN))
+ return -EACCES;
+
+ semcr = readl(base + RCC_SEMCR(index));
+
+ cid = FIELD_GET(RCC_SEMCR_SEMCID_MASK, semcr);
+ if (cid != RCC_CID1)
+ return -EACCES;
+
+ return 0;
+}
+
+static int stm32mp21_check_security(struct device_node *np, void __iomem *base,
+ const struct clock_config *cfg)
+{
+ int ret = 0;
+
+ if (cfg->sec_id != SECF_NONE) {
+ struct stm32_firewall firewall;
+ u32 index = (u32)cfg->sec_id;
+
+ if (index & SEC_RIFSC_FLAG) {
+ ret = stm32_firewall_get_firewall(np, &firewall, 1);
+ if (ret)
+ return ret;
+ ret = stm32_firewall_grant_access_by_id(&firewall, index & ~SEC_RIFSC_FLAG);
+ } else {
+ ret = stm32_rcc_get_access(base, cfg->sec_id & ~SEC_RIFSC_FLAG);
+ }
+ }
+
+ return ret;
+}
+
+static const struct clock_config stm32mp21_clock_cfg[] = {
+ STM32_GATE_CFG(CK_BUS_ETH1, ck_icn_p_eth1, SEC_RIFSC(60)),
+ STM32_GATE_CFG(CK_BUS_ETH2, ck_icn_p_eth2, SEC_RIFSC(61)),
+ STM32_GATE_CFG(CK_BUS_ADC1, ck_icn_p_adc1, SEC_RIFSC(58)),
+ STM32_GATE_CFG(CK_BUS_ADC2, ck_icn_p_adc2, SEC_RIFSC(59)),
+ STM32_GATE_CFG(CK_BUS_CRC, ck_icn_p_crc, SEC_RIFSC(109)),
+ STM32_GATE_CFG(CK_BUS_MDF1, ck_icn_p_mdf1, SEC_RIFSC(54)),
+ STM32_GATE_CFG(CK_BUS_HASH1, ck_icn_p_hash1, SEC_RIFSC(96)),
+ STM32_GATE_CFG(CK_BUS_HASH2, ck_icn_p_hash2, SEC_RIFSC(97)),
+ STM32_GATE_CFG(CK_BUS_RNG1, ck_icn_p_rng1, SEC_RIFSC(92)),
+ STM32_GATE_CFG(CK_BUS_RNG2, ck_icn_p_rng2, SEC_RIFSC(93)),
+ STM32_GATE_CFG(CK_BUS_CRYP1, ck_icn_p_cryp1, SEC_RIFSC(98)),
+ STM32_GATE_CFG(CK_BUS_CRYP2, ck_icn_p_cryp2, SEC_RIFSC(99)),
+ STM32_GATE_CFG(CK_BUS_SAES, ck_icn_p_saes, SEC_RIFSC(95)),
+ STM32_GATE_CFG(CK_BUS_PKA, ck_icn_p_pka, SEC_RIFSC(94)),
+ STM32_GATE_CFG(CK_BUS_LPUART1, ck_icn_p_lpuart1, SEC_RIFSC(40)),
+ STM32_GATE_CFG(CK_BUS_LPTIM3, ck_icn_p_lptim3, SEC_RIFSC(19)),
+ STM32_GATE_CFG(CK_BUS_LPTIM4, ck_icn_p_lptim4, SEC_RIFSC(20)),
+ STM32_GATE_CFG(CK_BUS_LPTIM5, ck_icn_p_lptim5, SEC_RIFSC(21)),
+ STM32_GATE_CFG(CK_BUS_SDMMC1, ck_icn_m_sdmmc1, SEC_RIFSC(76)),
+ STM32_GATE_CFG(CK_BUS_SDMMC2, ck_icn_m_sdmmc2, SEC_RIFSC(77)),
+ STM32_GATE_CFG(CK_BUS_SDMMC3, ck_icn_m_sdmmc3, SEC_RIFSC(78)),
+ STM32_GATE_CFG(CK_BUS_USBHOHCI, ck_icn_m_usbhohci, SEC_RIFSC(63)),
+ STM32_GATE_CFG(CK_BUS_USBHEHCI, ck_icn_m_usbhehci, SEC_RIFSC(63)),
+ STM32_GATE_CFG(CK_BUS_OTG, ck_icn_m_otg, SEC_RIFSC(66)),
+ STM32_GATE_CFG(CK_BUS_TIM2, ck_icn_p_tim2, SEC_RIFSC(1)),
+ STM32_GATE_CFG(CK_BUS_TIM3, ck_icn_p_tim3, SEC_RIFSC(2)),
+ STM32_GATE_CFG(CK_BUS_TIM4, ck_icn_p_tim4, SEC_RIFSC(3)),
+ STM32_GATE_CFG(CK_BUS_TIM5, ck_icn_p_tim5, SEC_RIFSC(4)),
+ STM32_GATE_CFG(CK_BUS_TIM6, ck_icn_p_tim6, SEC_RIFSC(5)),
+ STM32_GATE_CFG(CK_BUS_TIM7, ck_icn_p_tim7, SEC_RIFSC(6)),
+ STM32_GATE_CFG(CK_BUS_TIM10, ck_icn_p_tim10, SEC_RIFSC(8)),
+ STM32_GATE_CFG(CK_BUS_TIM11, ck_icn_p_tim11, SEC_RIFSC(9)),
+ STM32_GATE_CFG(CK_BUS_TIM12, ck_icn_p_tim12, SEC_RIFSC(10)),
+ STM32_GATE_CFG(CK_BUS_TIM13, ck_icn_p_tim13, SEC_RIFSC(11)),
+ STM32_GATE_CFG(CK_BUS_TIM14, ck_icn_p_tim14, SEC_RIFSC(12)),
+ STM32_GATE_CFG(CK_BUS_LPTIM1, ck_icn_p_lptim1, SEC_RIFSC(17)),
+ STM32_GATE_CFG(CK_BUS_LPTIM2, ck_icn_p_lptim2, SEC_RIFSC(18)),
+ STM32_GATE_CFG(CK_BUS_SPI2, ck_icn_p_spi2, SEC_RIFSC(23)),
+ STM32_GATE_CFG(CK_BUS_SPI3, ck_icn_p_spi3, SEC_RIFSC(24)),
+ STM32_GATE_CFG(CK_BUS_SPDIFRX, ck_icn_p_spdifrx, SEC_RIFSC(30)),
+ STM32_GATE_CFG(CK_BUS_USART2, ck_icn_p_usart2, SEC_RIFSC(32)),
+ STM32_GATE_CFG(CK_BUS_USART3, ck_icn_p_usart3, SEC_RIFSC(33)),
+ STM32_GATE_CFG(CK_BUS_UART4, ck_icn_p_uart4, SEC_RIFSC(34)),
+ STM32_GATE_CFG(CK_BUS_UART5, ck_icn_p_uart5, SEC_RIFSC(35)),
+ STM32_GATE_CFG(CK_BUS_I2C1, ck_icn_p_i2c1, SEC_RIFSC(41)),
+ STM32_GATE_CFG(CK_BUS_I2C2, ck_icn_p_i2c2, SEC_RIFSC(42)),
+ STM32_GATE_CFG(CK_BUS_I2C3, ck_icn_p_i2c3, SEC_RIFSC(43)),
+ STM32_GATE_CFG(CK_BUS_I3C1, ck_icn_p_i3c1, SEC_RIFSC(114)),
+ STM32_GATE_CFG(CK_BUS_I3C2, ck_icn_p_i3c2, SEC_RIFSC(115)),
+ STM32_GATE_CFG(CK_BUS_I3C3, ck_icn_p_i3c3, SEC_RIFSC(116)),
+ STM32_GATE_CFG(CK_BUS_TIM1, ck_icn_p_tim1, SEC_RIFSC(0)),
+ STM32_GATE_CFG(CK_BUS_TIM8, ck_icn_p_tim8, SEC_RIFSC(7)),
+ STM32_GATE_CFG(CK_BUS_TIM15, ck_icn_p_tim15, SEC_RIFSC(13)),
+ STM32_GATE_CFG(CK_BUS_TIM16, ck_icn_p_tim16, SEC_RIFSC(14)),
+ STM32_GATE_CFG(CK_BUS_TIM17, ck_icn_p_tim17, SEC_RIFSC(15)),
+ STM32_GATE_CFG(CK_BUS_SAI1, ck_icn_p_sai1, SEC_RIFSC(49)),
+ STM32_GATE_CFG(CK_BUS_SAI2, ck_icn_p_sai2, SEC_RIFSC(50)),
+ STM32_GATE_CFG(CK_BUS_SAI3, ck_icn_p_sai3, SEC_RIFSC(51)),
+ STM32_GATE_CFG(CK_BUS_SAI4, ck_icn_p_sai4, SEC_RIFSC(52)),
+ STM32_GATE_CFG(CK_BUS_USART1, ck_icn_p_usart1, SEC_RIFSC(31)),
+ STM32_GATE_CFG(CK_BUS_USART6, ck_icn_p_usart6, SEC_RIFSC(36)),
+ STM32_GATE_CFG(CK_BUS_UART7, ck_icn_p_uart7, SEC_RIFSC(37)),
+ STM32_GATE_CFG(CK_BUS_FDCAN, ck_icn_p_fdcan, SEC_RIFSC(56)),
+ STM32_GATE_CFG(CK_BUS_SPI1, ck_icn_p_spi1, SEC_RIFSC(22)),
+ STM32_GATE_CFG(CK_BUS_SPI4, ck_icn_p_spi4, SEC_RIFSC(25)),
+ STM32_GATE_CFG(CK_BUS_SPI5, ck_icn_p_spi5, SEC_RIFSC(26)),
+ STM32_GATE_CFG(CK_BUS_SPI6, ck_icn_p_spi6, SEC_RIFSC(27)),
+ STM32_GATE_CFG(CK_BUS_IWDG1, ck_icn_p_iwdg1, SEC_RIFSC(100)),
+ STM32_GATE_CFG(CK_BUS_IWDG2, ck_icn_p_iwdg2, SEC_RIFSC(101)),
+ STM32_GATE_CFG(CK_BUS_IWDG3, ck_icn_p_iwdg3, SEC_RIFSC(102)),
+ STM32_GATE_CFG(CK_BUS_IWDG4, ck_icn_p_iwdg4, SEC_RIFSC(103)),
+ STM32_GATE_CFG(CK_BUS_WWDG1, ck_icn_p_wwdg1, SEC_RIFSC(104)),
+ STM32_GATE_CFG(CK_BUS_VREF, ck_icn_p_vref, SEC_RIFSC(106)),
+ STM32_GATE_CFG(CK_BUS_SERC, ck_icn_p_serc, SEC_RIFSC(110)),
+ STM32_GATE_CFG(CK_BUS_HDP, ck_icn_p_hdp, SEC_RIFSC(57)),
+ STM32_GATE_CFG(CK_BUS_LTDC, ck_icn_p_ltdc, SEC_RIFSC(80)),
+ STM32_GATE_CFG(CK_BUS_CSI, ck_icn_p_csi, SEC_RIFSC(86)),
+ STM32_GATE_CFG(CK_BUS_DCMIPP, ck_icn_p_dcmipp, SEC_RIFSC(87)),
+ STM32_GATE_CFG(CK_BUS_DCMIPSSI, ck_icn_p_dcmipssi, SEC_RIFSC(88)),
+ STM32_GATE_CFG(CK_BUS_DDRPERFM, ck_icn_p_ddrperfm, SEC_RIFSC(67)),
+ STM32_GATE_CFG(CK_KER_TIM2, ck_ker_tim2, SEC_RIFSC(1)),
+ STM32_GATE_CFG(CK_KER_TIM3, ck_ker_tim3, SEC_RIFSC(2)),
+ STM32_GATE_CFG(CK_KER_TIM4, ck_ker_tim4, SEC_RIFSC(3)),
+ STM32_GATE_CFG(CK_KER_TIM5, ck_ker_tim5, SEC_RIFSC(4)),
+ STM32_GATE_CFG(CK_KER_TIM6, ck_ker_tim6, SEC_RIFSC(5)),
+ STM32_GATE_CFG(CK_KER_TIM7, ck_ker_tim7, SEC_RIFSC(6)),
+ STM32_GATE_CFG(CK_KER_TIM10, ck_ker_tim10, SEC_RIFSC(8)),
+ STM32_GATE_CFG(CK_KER_TIM11, ck_ker_tim11, SEC_RIFSC(9)),
+ STM32_GATE_CFG(CK_KER_TIM12, ck_ker_tim12, SEC_RIFSC(10)),
+ STM32_GATE_CFG(CK_KER_TIM13, ck_ker_tim13, SEC_RIFSC(11)),
+ STM32_GATE_CFG(CK_KER_TIM14, ck_ker_tim14, SEC_RIFSC(12)),
+ STM32_GATE_CFG(CK_KER_TIM1, ck_ker_tim1, SEC_RIFSC(0)),
+ STM32_GATE_CFG(CK_KER_TIM8, ck_ker_tim8, SEC_RIFSC(7)),
+ STM32_GATE_CFG(CK_KER_TIM15, ck_ker_tim15, SEC_RIFSC(13)),
+ STM32_GATE_CFG(CK_KER_TIM16, ck_ker_tim16, SEC_RIFSC(14)),
+ STM32_GATE_CFG(CK_KER_TIM17, ck_ker_tim17, SEC_RIFSC(15)),
+ STM32_GATE_CFG(CK_KER_LPTIM1, ck_ker_lptim1, SEC_RIFSC(17)),
+ STM32_GATE_CFG(CK_KER_LPTIM2, ck_ker_lptim2, SEC_RIFSC(18)),
+ STM32_GATE_CFG(CK_KER_USART2, ck_ker_usart2, SEC_RIFSC(32)),
+ STM32_GATE_CFG(CK_KER_UART4, ck_ker_uart4, SEC_RIFSC(34)),
+ STM32_GATE_CFG(CK_KER_USART3, ck_ker_usart3, SEC_RIFSC(33)),
+ STM32_GATE_CFG(CK_KER_UART5, ck_ker_uart5, SEC_RIFSC(35)),
+ STM32_GATE_CFG(CK_KER_SPI2, ck_ker_spi2, SEC_RIFSC(23)),
+ STM32_GATE_CFG(CK_KER_SPI3, ck_ker_spi3, SEC_RIFSC(24)),
+ STM32_GATE_CFG(CK_KER_SPDIFRX, ck_ker_spdifrx, SEC_RIFSC(30)),
+ STM32_GATE_CFG(CK_KER_I2C1, ck_ker_i2c1, SEC_RIFSC(41)),
+ STM32_GATE_CFG(CK_KER_I2C2, ck_ker_i2c2, SEC_RIFSC(42)),
+ STM32_GATE_CFG(CK_KER_I3C1, ck_ker_i3c1, SEC_RIFSC(114)),
+ STM32_GATE_CFG(CK_KER_I3C2, ck_ker_i3c2, SEC_RIFSC(115)),
+ STM32_GATE_CFG(CK_KER_I2C3, ck_ker_i2c3, SEC_RIFSC(43)),
+ STM32_GATE_CFG(CK_KER_I3C3, ck_ker_i3c3, SEC_RIFSC(116)),
+ STM32_GATE_CFG(CK_KER_SPI1, ck_ker_spi1, SEC_RIFSC(22)),
+ STM32_GATE_CFG(CK_KER_SPI4, ck_ker_spi4, SEC_RIFSC(25)),
+ STM32_GATE_CFG(CK_KER_SPI5, ck_ker_spi5, SEC_RIFSC(26)),
+ STM32_GATE_CFG(CK_KER_SPI6, ck_ker_spi6, SEC_RIFSC(27)),
+ STM32_GATE_CFG(CK_KER_USART1, ck_ker_usart1, SEC_RIFSC(31)),
+ STM32_GATE_CFG(CK_KER_USART6, ck_ker_usart6, SEC_RIFSC(36)),
+ STM32_GATE_CFG(CK_KER_UART7, ck_ker_uart7, SEC_RIFSC(37)),
+ STM32_GATE_CFG(CK_KER_MDF1, ck_ker_mdf1, SEC_RIFSC(54)),
+ STM32_GATE_CFG(CK_KER_SAI1, ck_ker_sai1, SEC_RIFSC(49)),
+ STM32_GATE_CFG(CK_KER_SAI2, ck_ker_sai2, SEC_RIFSC(50)),
+ STM32_GATE_CFG(CK_KER_SAI3, ck_ker_sai3, SEC_RIFSC(51)),
+ STM32_GATE_CFG(CK_KER_SAI4, ck_ker_sai4, SEC_RIFSC(52)),
+ STM32_GATE_CFG(CK_KER_FDCAN, ck_ker_fdcan, SEC_RIFSC(56)),
+ STM32_GATE_CFG(CK_KER_CSI, ck_ker_csi, SEC_RIFSC(86)),
+ STM32_GATE_CFG(CK_KER_CSITXESC, ck_ker_csitxesc, SEC_RIFSC(86)),
+ STM32_GATE_CFG(CK_KER_CSIPHY, ck_ker_csiphy, SEC_RIFSC(86)),
+ STM32_GATE_CFG(CK_KER_LPUART1, ck_ker_lpuart1, SEC_RIFSC(40)),
+ STM32_GATE_CFG(CK_KER_LPTIM3, ck_ker_lptim3, SEC_RIFSC(19)),
+ STM32_GATE_CFG(CK_KER_LPTIM4, ck_ker_lptim4, SEC_RIFSC(20)),
+ STM32_GATE_CFG(CK_KER_LPTIM5, ck_ker_lptim5, SEC_RIFSC(21)),
+ STM32_GATE_CFG(CK_KER_SDMMC1, ck_ker_sdmmc1, SEC_RIFSC(76)),
+ STM32_GATE_CFG(CK_KER_SDMMC2, ck_ker_sdmmc2, SEC_RIFSC(77)),
+ STM32_GATE_CFG(CK_KER_SDMMC3, ck_ker_sdmmc3, SEC_RIFSC(78)),
+ STM32_GATE_CFG(CK_KER_ETH1, ck_ker_eth1, SEC_RIFSC(60)),
+ STM32_GATE_CFG(CK_ETH1_STP, ck_ker_eth1stp, SEC_RIFSC(60)),
+ STM32_GATE_CFG(CK_KER_ETH2, ck_ker_eth2, SEC_RIFSC(61)),
+ STM32_GATE_CFG(CK_ETH2_STP, ck_ker_eth2stp, SEC_RIFSC(61)),
+ STM32_GATE_CFG(CK_KER_ETH1PTP, ck_ker_eth1ptp, SEC_RIFSC(60)),
+ STM32_GATE_CFG(CK_KER_ETH2PTP, ck_ker_eth2ptp, SEC_RIFSC(61)),
+ STM32_GATE_CFG(CK_ETH1_MAC, ck_ker_eth1mac, SEC_RIFSC(60)),
+ STM32_GATE_CFG(CK_ETH1_TX, ck_ker_eth1tx, SEC_RIFSC(60)),
+ STM32_GATE_CFG(CK_ETH1_RX, ck_ker_eth1rx, SEC_RIFSC(60)),
+ STM32_GATE_CFG(CK_ETH2_MAC, ck_ker_eth2mac, SEC_RIFSC(61)),
+ STM32_GATE_CFG(CK_ETH2_TX, ck_ker_eth2tx, SEC_RIFSC(61)),
+ STM32_GATE_CFG(CK_ETH2_RX, ck_ker_eth2rx, SEC_RIFSC(61)),
+ STM32_COMPOSITE_CFG(CK_MCO1, ck_mco1, MP21_RIF_RCC_MCO1),
+ STM32_COMPOSITE_CFG(CK_MCO2, ck_mco2, MP21_RIF_RCC_MCO2),
+ STM32_COMPOSITE_CFG(CK_KER_ADC1, ck_ker_adc1, SEC_RIFSC(58)),
+ STM32_COMPOSITE_CFG(CK_KER_ADC2, ck_ker_adc2, SEC_RIFSC(59)),
+ STM32_COMPOSITE_CFG(CK_KER_USB2PHY1, ck_ker_usb2phy1, SEC_RIFSC(63)),
+ STM32_COMPOSITE_CFG(CK_KER_USB2PHY2EN, ck_ker_usb2phy2_en, SEC_RIFSC(66)),
+ STM32_COMPOSITE_CFG(CK_KER_DTS, ck_ker_dts, SEC_RIFSC(107)),
+ STM32_GATE_CFG(CK_KER_LTDC, ck_ker_ltdc, SEC_RIFSC(80)),
+};
+
+#define RESET_MP21(id, _offset, _bit_idx, _set_clr) \
+ [id] = &(struct stm32_reset_cfg){ \
+ .offset = (_offset), \
+ .bit_idx = (_bit_idx), \
+ .set_clr = (_set_clr), \
+ }
+
+static const struct stm32_reset_cfg *stm32mp21_reset_cfg[] = {
+ RESET_MP21(TIM1_R, RCC_TIM1CFGR, 0, 0),
+ RESET_MP21(TIM2_R, RCC_TIM2CFGR, 0, 0),
+ RESET_MP21(TIM3_R, RCC_TIM3CFGR, 0, 0),
+ RESET_MP21(TIM4_R, RCC_TIM4CFGR, 0, 0),
+ RESET_MP21(TIM5_R, RCC_TIM5CFGR, 0, 0),
+ RESET_MP21(TIM6_R, RCC_TIM6CFGR, 0, 0),
+ RESET_MP21(TIM7_R, RCC_TIM7CFGR, 0, 0),
+ RESET_MP21(TIM8_R, RCC_TIM8CFGR, 0, 0),
+ RESET_MP21(TIM10_R, RCC_TIM10CFGR, 0, 0),
+ RESET_MP21(TIM11_R, RCC_TIM11CFGR, 0, 0),
+ RESET_MP21(TIM12_R, RCC_TIM12CFGR, 0, 0),
+ RESET_MP21(TIM13_R, RCC_TIM13CFGR, 0, 0),
+ RESET_MP21(TIM14_R, RCC_TIM14CFGR, 0, 0),
+ RESET_MP21(TIM15_R, RCC_TIM15CFGR, 0, 0),
+ RESET_MP21(TIM16_R, RCC_TIM16CFGR, 0, 0),
+ RESET_MP21(TIM17_R, RCC_TIM17CFGR, 0, 0),
+ RESET_MP21(LPTIM1_R, RCC_LPTIM1CFGR, 0, 0),
+ RESET_MP21(LPTIM2_R, RCC_LPTIM2CFGR, 0, 0),
+ RESET_MP21(LPTIM3_R, RCC_LPTIM3CFGR, 0, 0),
+ RESET_MP21(LPTIM4_R, RCC_LPTIM4CFGR, 0, 0),
+ RESET_MP21(LPTIM5_R, RCC_LPTIM5CFGR, 0, 0),
+ RESET_MP21(SPI1_R, RCC_SPI1CFGR, 0, 0),
+ RESET_MP21(SPI2_R, RCC_SPI2CFGR, 0, 0),
+ RESET_MP21(SPI3_R, RCC_SPI3CFGR, 0, 0),
+ RESET_MP21(SPI4_R, RCC_SPI4CFGR, 0, 0),
+ RESET_MP21(SPI5_R, RCC_SPI5CFGR, 0, 0),
+ RESET_MP21(SPI6_R, RCC_SPI6CFGR, 0, 0),
+ RESET_MP21(SPDIFRX_R, RCC_SPDIFRXCFGR, 0, 0),
+ RESET_MP21(USART1_R, RCC_USART1CFGR, 0, 0),
+ RESET_MP21(USART2_R, RCC_USART2CFGR, 0, 0),
+ RESET_MP21(USART3_R, RCC_USART3CFGR, 0, 0),
+ RESET_MP21(UART4_R, RCC_UART4CFGR, 0, 0),
+ RESET_MP21(UART5_R, RCC_UART5CFGR, 0, 0),
+ RESET_MP21(USART6_R, RCC_USART6CFGR, 0, 0),
+ RESET_MP21(UART7_R, RCC_UART7CFGR, 0, 0),
+ RESET_MP21(LPUART1_R, RCC_LPUART1CFGR, 0, 0),
+ RESET_MP21(I2C1_R, RCC_I2C1CFGR, 0, 0),
+ RESET_MP21(I2C2_R, RCC_I2C2CFGR, 0, 0),
+ RESET_MP21(I2C3_R, RCC_I2C3CFGR, 0, 0),
+ RESET_MP21(SAI1_R, RCC_SAI1CFGR, 0, 0),
+ RESET_MP21(SAI2_R, RCC_SAI2CFGR, 0, 0),
+ RESET_MP21(SAI3_R, RCC_SAI3CFGR, 0, 0),
+ RESET_MP21(SAI4_R, RCC_SAI4CFGR, 0, 0),
+ RESET_MP21(MDF1_R, RCC_MDF1CFGR, 0, 0),
+ RESET_MP21(FDCAN_R, RCC_FDCANCFGR, 0, 0),
+ RESET_MP21(HDP_R, RCC_HDPCFGR, 0, 0),
+ RESET_MP21(ADC1_R, RCC_ADC1CFGR, 0, 0),
+ RESET_MP21(ADC2_R, RCC_ADC2CFGR, 0, 0),
+ RESET_MP21(ETH1_R, RCC_ETH1CFGR, 0, 0),
+ RESET_MP21(ETH2_R, RCC_ETH2CFGR, 0, 0),
+ RESET_MP21(OTG_R, RCC_OTGCFGR, 0, 0),
+ RESET_MP21(USBH_R, RCC_USBHCFGR, 0, 0),
+ RESET_MP21(USB2PHY1_R, RCC_USB2PHY1CFGR, 0, 0),
+ RESET_MP21(USB2PHY2_R, RCC_USB2PHY2CFGR, 0, 0),
+ RESET_MP21(SDMMC1_R, RCC_SDMMC1CFGR, 0, 0),
+ RESET_MP21(SDMMC1DLL_R, RCC_SDMMC1CFGR, 16, 0),
+ RESET_MP21(SDMMC2_R, RCC_SDMMC2CFGR, 0, 0),
+ RESET_MP21(SDMMC2DLL_R, RCC_SDMMC2CFGR, 16, 0),
+ RESET_MP21(SDMMC3_R, RCC_SDMMC3CFGR, 0, 0),
+ RESET_MP21(SDMMC3DLL_R, RCC_SDMMC3CFGR, 16, 0),
+ RESET_MP21(LTDC_R, RCC_LTDCCFGR, 0, 0),
+ RESET_MP21(CSI_R, RCC_CSICFGR, 0, 0),
+ RESET_MP21(DCMIPP_R, RCC_DCMIPPCFGR, 0, 0),
+ RESET_MP21(DCMIPSSI_R, RCC_DCMIPSSICFGR, 0, 0),
+ RESET_MP21(WWDG1_R, RCC_WWDG1CFGR, 0, 0),
+ RESET_MP21(VREF_R, RCC_VREFCFGR, 0, 0),
+ RESET_MP21(DTS_R, RCC_DTSCFGR, 0, 0),
+ RESET_MP21(CRC_R, RCC_CRCCFGR, 0, 0),
+ RESET_MP21(SERC_R, RCC_SERCCFGR, 0, 0),
+ RESET_MP21(I3C1_R, RCC_I3C1CFGR, 0, 0),
+ RESET_MP21(I3C2_R, RCC_I3C2CFGR, 0, 0),
+ RESET_MP21(IWDG2_KER_R, RCC_IWDGC1CFGSETR, 18, 1),
+ RESET_MP21(IWDG4_KER_R, RCC_IWDGC2CFGSETR, 18, 1),
+ RESET_MP21(RNG1_R, RCC_RNG1CFGR, 0, 0),
+ RESET_MP21(RNG2_R, RCC_RNG2CFGR, 0, 0),
+ RESET_MP21(PKA_R, RCC_PKACFGR, 0, 0),
+ RESET_MP21(SAES_R, RCC_SAESCFGR, 0, 0),
+ RESET_MP21(HASH1_R, RCC_HASH1CFGR, 0, 0),
+ RESET_MP21(HASH2_R, RCC_HASH2CFGR, 0, 0),
+ RESET_MP21(CRYP1_R, RCC_CRYP1CFGR, 0, 0),
+ RESET_MP21(CRYP2_R, RCC_CRYP2CFGR, 0, 0),
+};
+
+static u16 stm32mp21_cpt_gate[GATE_NB];
+
+static struct clk_stm32_clock_data stm32mp21_clock_data = {
+ .gate_cpt = stm32mp21_cpt_gate,
+ .gates = stm32mp21_gates,
+ .muxes = stm32mp21_muxes,
+};
+
+static struct clk_stm32_reset_data stm32mp21_reset_data = {
+ .reset_lines = stm32mp21_reset_cfg,
+ .nr_lines = ARRAY_SIZE(stm32mp21_reset_cfg),
+};
+
+static const struct stm32_rcc_match_data stm32mp21_data = {
+ .tab_clocks = stm32mp21_clock_cfg,
+ .num_clocks = ARRAY_SIZE(stm32mp21_clock_cfg),
+ .maxbinding = STM32MP21_LAST_CLK,
+ .clock_data = &stm32mp21_clock_data,
+ .reset_data = &stm32mp21_reset_data,
+ .check_security = &stm32mp21_check_security,
+};
+
+static const struct of_device_id stm32mp21_match_data[] = {
+ { .compatible = "st,stm32mp21-rcc", .data = &stm32mp21_data, },
+ { }
+};
+MODULE_DEVICE_TABLE(of, stm32mp21_match_data);
+
+static int stm32mp21_rcc_clocks_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ void __iomem *base;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (WARN_ON(IS_ERR(base)))
+ return PTR_ERR(base);
+
+ return stm32_rcc_init(dev, stm32mp21_match_data, base);
+}
+
+static struct platform_driver stm32mp21_rcc_clocks_driver = {
+ .driver = {
+ .name = "stm32mp21_rcc",
+ .of_match_table = stm32mp21_match_data,
+ },
+ .probe = stm32mp21_rcc_clocks_probe,
+};
+
+static int __init stm32mp21_clocks_init(void)
+{
+ return platform_driver_register(&stm32mp21_rcc_clocks_driver);
+}
+
+core_initcall(stm32mp21_clocks_init);
+
diff --git a/drivers/clk/stm32/stm32mp21_rcc.h b/drivers/clk/stm32/stm32mp21_rcc.h
new file mode 100644
index 000000000000..df3ea921ffba
--- /dev/null
+++ b/drivers/clk/stm32/stm32mp21_rcc.h
@@ -0,0 +1,651 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) STMicroelectronics 2025 - All Rights Reserved
+ * Author: Gabriel Fernandez <gabriel.fernandez@foss.st.com> for STMicroelectronics.
+ */
+
+#ifndef STM32MP21_RCC_H
+#define STM32MP21_RCC_H
+
+#define RCC_SECCFGR0 0x0
+#define RCC_SECCFGR1 0x4
+#define RCC_SECCFGR2 0x8
+#define RCC_SECCFGR3 0xC
+#define RCC_PRIVCFGR0 0x10
+#define RCC_PRIVCFGR1 0x14
+#define RCC_PRIVCFGR2 0x18
+#define RCC_PRIVCFGR3 0x1C
+#define RCC_RCFGLOCKR0 0x20
+#define RCC_RCFGLOCKR1 0x24
+#define RCC_RCFGLOCKR2 0x28
+#define RCC_RCFGLOCKR3 0x2C
+#define RCC_R0CIDCFGR 0x30
+#define RCC_R0SEMCR 0x34
+#define RCC_R1CIDCFGR 0x38
+#define RCC_R1SEMCR 0x3C
+#define RCC_R2CIDCFGR 0x40
+#define RCC_R2SEMCR 0x44
+#define RCC_R3CIDCFGR 0x48
+#define RCC_R3SEMCR 0x4C
+#define RCC_R4CIDCFGR 0x50
+#define RCC_R4SEMCR 0x54
+#define RCC_R5CIDCFGR 0x58
+#define RCC_R5SEMCR 0x5C
+#define RCC_R6CIDCFGR 0x60
+#define RCC_R6SEMCR 0x64
+#define RCC_R7CIDCFGR 0x68
+#define RCC_R7SEMCR 0x6C
+#define RCC_R8CIDCFGR 0x70
+#define RCC_R8SEMCR 0x74
+#define RCC_R9CIDCFGR 0x78
+#define RCC_R9SEMCR 0x7C
+#define RCC_R10CIDCFGR 0x80
+#define RCC_R10SEMCR 0x84
+#define RCC_R11CIDCFGR 0x88
+#define RCC_R11SEMCR 0x8C
+#define RCC_R12CIDCFGR 0x90
+#define RCC_R12SEMCR 0x94
+#define RCC_R13CIDCFGR 0x98
+#define RCC_R13SEMCR 0x9C
+#define RCC_R14CIDCFGR 0xA0
+#define RCC_R14SEMCR 0xA4
+#define RCC_R15CIDCFGR 0xA8
+#define RCC_R15SEMCR 0xAC
+#define RCC_R16CIDCFGR 0xB0
+#define RCC_R16SEMCR 0xB4
+#define RCC_R17CIDCFGR 0xB8
+#define RCC_R17SEMCR 0xBC
+#define RCC_R18CIDCFGR 0xC0
+#define RCC_R18SEMCR 0xC4
+#define RCC_R19CIDCFGR 0xC8
+#define RCC_R19SEMCR 0xCC
+#define RCC_R20CIDCFGR 0xD0
+#define RCC_R20SEMCR 0xD4
+#define RCC_R21CIDCFGR 0xD8
+#define RCC_R21SEMCR 0xDC
+#define RCC_R22CIDCFGR 0xE0
+#define RCC_R22SEMCR 0xE4
+#define RCC_R23CIDCFGR 0xE8
+#define RCC_R23SEMCR 0xEC
+#define RCC_R24CIDCFGR 0xF0
+#define RCC_R24SEMCR 0xF4
+#define RCC_R25CIDCFGR 0xF8
+#define RCC_R25SEMCR 0xFC
+#define RCC_R26CIDCFGR 0x100
+#define RCC_R26SEMCR 0x104
+#define RCC_R27CIDCFGR 0x108
+#define RCC_R27SEMCR 0x10C
+#define RCC_R28CIDCFGR 0x110
+#define RCC_R28SEMCR 0x114
+#define RCC_R29CIDCFGR 0x118
+#define RCC_R29SEMCR 0x11C
+#define RCC_R30CIDCFGR 0x120
+#define RCC_R30SEMCR 0x124
+#define RCC_R31CIDCFGR 0x128
+#define RCC_R31SEMCR 0x12C
+#define RCC_R32CIDCFGR 0x130
+#define RCC_R32SEMCR 0x134
+#define RCC_R33CIDCFGR 0x138
+#define RCC_R33SEMCR 0x13C
+#define RCC_R34CIDCFGR 0x140
+#define RCC_R34SEMCR 0x144
+#define RCC_R35CIDCFGR 0x148
+#define RCC_R35SEMCR 0x14C
+#define RCC_R36CIDCFGR 0x150
+#define RCC_R36SEMCR 0x154
+#define RCC_R37CIDCFGR 0x158
+#define RCC_R37SEMCR 0x15C
+#define RCC_R38CIDCFGR 0x160
+#define RCC_R38SEMCR 0x164
+#define RCC_R39CIDCFGR 0x168
+#define RCC_R39SEMCR 0x16C
+#define RCC_R40CIDCFGR 0x170
+#define RCC_R40SEMCR 0x174
+#define RCC_R41CIDCFGR 0x178
+#define RCC_R41SEMCR 0x17C
+#define RCC_R42CIDCFGR 0x180
+#define RCC_R42SEMCR 0x184
+#define RCC_R43CIDCFGR 0x188
+#define RCC_R43SEMCR 0x18C
+#define RCC_R44CIDCFGR 0x190
+#define RCC_R44SEMCR 0x194
+#define RCC_R45CIDCFGR 0x198
+#define RCC_R45SEMCR 0x19C
+#define RCC_R46CIDCFGR 0x1A0
+#define RCC_R46SEMCR 0x1A4
+#define RCC_R47CIDCFGR 0x1A8
+#define RCC_R47SEMCR 0x1AC
+#define RCC_R48CIDCFGR 0x1B0
+#define RCC_R48SEMCR 0x1B4
+#define RCC_R49CIDCFGR 0x1B8
+#define RCC_R49SEMCR 0x1BC
+#define RCC_R50CIDCFGR 0x1C0
+#define RCC_R50SEMCR 0x1C4
+#define RCC_R51CIDCFGR 0x1C8
+#define RCC_R51SEMCR 0x1CC
+#define RCC_R52CIDCFGR 0x1D0
+#define RCC_R52SEMCR 0x1D4
+#define RCC_R53CIDCFGR 0x1D8
+#define RCC_R53SEMCR 0x1DC
+#define RCC_R54CIDCFGR 0x1E0
+#define RCC_R54SEMCR 0x1E4
+#define RCC_R55CIDCFGR 0x1E8
+#define RCC_R55SEMCR 0x1EC
+#define RCC_R56CIDCFGR 0x1F0
+#define RCC_R56SEMCR 0x1F4
+#define RCC_R57CIDCFGR 0x1F8
+#define RCC_R57SEMCR 0x1FC
+#define RCC_R58CIDCFGR 0x200
+#define RCC_R58SEMCR 0x204
+#define RCC_R59CIDCFGR 0x208
+#define RCC_R59SEMCR 0x20C
+#define RCC_R60CIDCFGR 0x210
+#define RCC_R60SEMCR 0x214
+#define RCC_R61CIDCFGR 0x218
+#define RCC_R61SEMCR 0x21C
+#define RCC_R62CIDCFGR 0x220
+#define RCC_R62SEMCR 0x224
+#define RCC_R63CIDCFGR 0x228
+#define RCC_R63SEMCR 0x22C
+#define RCC_R64CIDCFGR 0x230
+#define RCC_R64SEMCR 0x234
+#define RCC_R65CIDCFGR 0x238
+#define RCC_R65SEMCR 0x23C
+#define RCC_R66CIDCFGR 0x240
+#define RCC_R66SEMCR 0x244
+#define RCC_R67CIDCFGR 0x248
+#define RCC_R67SEMCR 0x24C
+#define RCC_R68CIDCFGR 0x250
+#define RCC_R68SEMCR 0x254
+#define RCC_R69CIDCFGR 0x258
+#define RCC_R69SEMCR 0x25C
+#define RCC_R70CIDCFGR 0x260
+#define RCC_R70SEMCR 0x264
+#define RCC_R71CIDCFGR 0x268
+#define RCC_R71SEMCR 0x26C
+#define RCC_R73CIDCFGR 0x278
+#define RCC_R73SEMCR 0x27C
+#define RCC_R74CIDCFGR 0x280
+#define RCC_R74SEMCR 0x284
+#define RCC_R75CIDCFGR 0x288
+#define RCC_R75SEMCR 0x28C
+#define RCC_R76CIDCFGR 0x290
+#define RCC_R76SEMCR 0x294
+#define RCC_R77CIDCFGR 0x298
+#define RCC_R77SEMCR 0x29C
+#define RCC_R78CIDCFGR 0x2A0
+#define RCC_R78SEMCR 0x2A4
+#define RCC_R79CIDCFGR 0x2A8
+#define RCC_R79SEMCR 0x2AC
+#define RCC_R83CIDCFGR 0x2C8
+#define RCC_R83SEMCR 0x2CC
+#define RCC_R84CIDCFGR 0x2D0
+#define RCC_R84SEMCR 0x2D4
+#define RCC_R85CIDCFGR 0x2D8
+#define RCC_R85SEMCR 0x2DC
+#define RCC_R86CIDCFGR 0x2E0
+#define RCC_R86SEMCR 0x2E4
+#define RCC_R87CIDCFGR 0x2E8
+#define RCC_R87SEMCR 0x2EC
+#define RCC_R88CIDCFGR 0x2F0
+#define RCC_R88SEMCR 0x2F4
+#define RCC_R90CIDCFGR 0x300
+#define RCC_R90SEMCR 0x304
+#define RCC_R91CIDCFGR 0x308
+#define RCC_R91SEMCR 0x30C
+#define RCC_R92CIDCFGR 0x310
+#define RCC_R92SEMCR 0x314
+#define RCC_R93CIDCFGR 0x318
+#define RCC_R93SEMCR 0x31C
+#define RCC_R94CIDCFGR 0x320
+#define RCC_R94SEMCR 0x324
+#define RCC_R95CIDCFGR 0x328
+#define RCC_R95SEMCR 0x32C
+#define RCC_R96CIDCFGR 0x330
+#define RCC_R96SEMCR 0x334
+#define RCC_R97CIDCFGR 0x338
+#define RCC_R97SEMCR 0x33C
+#define RCC_R98CIDCFGR 0x340
+#define RCC_R98SEMCR 0x344
+#define RCC_R101CIDCFGR 0x358
+#define RCC_R101SEMCR 0x35C
+#define RCC_R102CIDCFGR 0x360
+#define RCC_R102SEMCR 0x364
+#define RCC_R103CIDCFGR 0x368
+#define RCC_R103SEMCR 0x36C
+#define RCC_R104CIDCFGR 0x370
+#define RCC_R104SEMCR 0x374
+#define RCC_R105CIDCFGR 0x378
+#define RCC_R105SEMCR 0x37C
+#define RCC_R106CIDCFGR 0x380
+#define RCC_R106SEMCR 0x384
+#define RCC_R108CIDCFGR 0x390
+#define RCC_R108SEMCR 0x394
+#define RCC_R109CIDCFGR 0x398
+#define RCC_R109SEMCR 0x39C
+#define RCC_R110CIDCFGR 0x3A0
+#define RCC_R110SEMCR 0x3A4
+#define RCC_R111CIDCFGR 0x3A8
+#define RCC_R111SEMCR 0x3AC
+#define RCC_R112CIDCFGR 0x3B0
+#define RCC_R112SEMCR 0x3B4
+#define RCC_R113CIDCFGR 0x3B8
+#define RCC_R113SEMCR 0x3BC
+#define RCC_GRSTCSETR 0x400
+#define RCC_C1RSTCSETR 0x404
+#define RCC_C2RSTCSETR 0x40C
+#define RCC_HWRSTSCLRR 0x410
+#define RCC_C1HWRSTSCLRR 0x414
+#define RCC_C2HWRSTSCLRR 0x418
+#define RCC_C1BOOTRSTSSETR 0x41C
+#define RCC_C1BOOTRSTSCLRR 0x420
+#define RCC_C2BOOTRSTSSETR 0x424
+#define RCC_C2BOOTRSTSCLRR 0x428
+#define RCC_C1SREQSETR 0x42C
+#define RCC_C1SREQCLRR 0x430
+#define RCC_CPUBOOTCR 0x434
+#define RCC_STBYBOOTCR 0x438
+#define RCC_LEGBOOTCR 0x43C
+#define RCC_BDCR 0x440
+#define RCC_RDCR 0x44C
+#define RCC_C1MSRDCR 0x450
+#define RCC_PWRLPDLYCR 0x454
+#define RCC_C1CIESETR 0x458
+#define RCC_C1CIFCLRR 0x45C
+#define RCC_C2CIESETR 0x460
+#define RCC_C2CIFCLRR 0x464
+#define RCC_IWDGC1FZSETR 0x468
+#define RCC_IWDGC1FZCLRR 0x46C
+#define RCC_IWDGC1CFGSETR 0x470
+#define RCC_IWDGC1CFGCLRR 0x474
+#define RCC_IWDGC2FZSETR 0x478
+#define RCC_IWDGC2FZCLRR 0x47C
+#define RCC_IWDGC2CFGSETR 0x480
+#define RCC_IWDGC2CFGCLRR 0x484
+#define RCC_MCO1CFGR 0x488
+#define RCC_MCO2CFGR 0x48C
+#define RCC_OCENSETR 0x490
+#define RCC_OCENCLRR 0x494
+#define RCC_OCRDYR 0x498
+#define RCC_HSICFGR 0x49C
+#define RCC_MSICFGR 0x4A0
+#define RCC_LSICR 0x4A4
+#define RCC_RTCDIVR 0x4A8
+#define RCC_APB1DIVR 0x4AC
+#define RCC_APB2DIVR 0x4B0
+#define RCC_APB3DIVR 0x4B4
+#define RCC_APB4DIVR 0x4B8
+#define RCC_APB5DIVR 0x4BC
+#define RCC_APBDBGDIVR 0x4C0
+#define RCC_TIMG1PRER 0x4C8
+#define RCC_TIMG2PRER 0x4CC
+#define RCC_LSMCUDIVR 0x4D0
+#define RCC_DDRCPCFGR 0x4D4
+#define RCC_DDRCAPBCFGR 0x4D8
+#define RCC_DDRPHYCAPBCFGR 0x4DC
+#define RCC_DDRPHYCCFGR 0x4E0
+#define RCC_DDRCFGR 0x4E4
+#define RCC_DDRITFCFGR 0x4E8
+#define RCC_SYSRAMCFGR 0x4F0
+#define RCC_SRAM1CFGR 0x4F8
+#define RCC_RETRAMCFGR 0x500
+#define RCC_BKPSRAMCFGR 0x504
+#define RCC_OSPI1CFGR 0x514
+#define RCC_FMCCFGR 0x51C
+#define RCC_DBGCFGR 0x520
+#define RCC_STMCFGR 0x524
+#define RCC_ETRCFGR 0x528
+#define RCC_GPIOACFGR 0x52C
+#define RCC_GPIOBCFGR 0x530
+#define RCC_GPIOCCFGR 0x534
+#define RCC_GPIODCFGR 0x538
+#define RCC_GPIOECFGR 0x53C
+#define RCC_GPIOFCFGR 0x540
+#define RCC_GPIOGCFGR 0x544
+#define RCC_GPIOHCFGR 0x548
+#define RCC_GPIOICFGR 0x54C
+#define RCC_GPIOZCFGR 0x558
+#define RCC_HPDMA1CFGR 0x55C
+#define RCC_HPDMA2CFGR 0x560
+#define RCC_HPDMA3CFGR 0x564
+#define RCC_IPCC1CFGR 0x570
+#define RCC_RTCCFGR 0x578
+#define RCC_SYSCPU1CFGR 0x580
+#define RCC_BSECCFGR 0x584
+#define RCC_PLL2CFGR1 0x590
+#define RCC_PLL2CFGR2 0x594
+#define RCC_PLL2CFGR3 0x598
+#define RCC_PLL2CFGR4 0x59C
+#define RCC_PLL2CFGR5 0x5A0
+#define RCC_PLL2CFGR6 0x5A8
+#define RCC_PLL2CFGR7 0x5AC
+#define RCC_HSIFMONCR 0x5E0
+#define RCC_HSIFVALR 0x5E4
+#define RCC_MSIFMONCR 0x5E8
+#define RCC_MSIFVALR 0x5EC
+#define RCC_TIM1CFGR 0x700
+#define RCC_TIM2CFGR 0x704
+#define RCC_TIM3CFGR 0x708
+#define RCC_TIM4CFGR 0x70C
+#define RCC_TIM5CFGR 0x710
+#define RCC_TIM6CFGR 0x714
+#define RCC_TIM7CFGR 0x718
+#define RCC_TIM8CFGR 0x71C
+#define RCC_TIM10CFGR 0x720
+#define RCC_TIM11CFGR 0x724
+#define RCC_TIM12CFGR 0x728
+#define RCC_TIM13CFGR 0x72C
+#define RCC_TIM14CFGR 0x730
+#define RCC_TIM15CFGR 0x734
+#define RCC_TIM16CFGR 0x738
+#define RCC_TIM17CFGR 0x73C
+#define RCC_LPTIM1CFGR 0x744
+#define RCC_LPTIM2CFGR 0x748
+#define RCC_LPTIM3CFGR 0x74C
+#define RCC_LPTIM4CFGR 0x750
+#define RCC_LPTIM5CFGR 0x754
+#define RCC_SPI1CFGR 0x758
+#define RCC_SPI2CFGR 0x75C
+#define RCC_SPI3CFGR 0x760
+#define RCC_SPI4CFGR 0x764
+#define RCC_SPI5CFGR 0x768
+#define RCC_SPI6CFGR 0x76C
+#define RCC_SPDIFRXCFGR 0x778
+#define RCC_USART1CFGR 0x77C
+#define RCC_USART2CFGR 0x780
+#define RCC_USART3CFGR 0x784
+#define RCC_UART4CFGR 0x788
+#define RCC_UART5CFGR 0x78C
+#define RCC_USART6CFGR 0x790
+#define RCC_UART7CFGR 0x794
+#define RCC_LPUART1CFGR 0x7A0
+#define RCC_I2C1CFGR 0x7A4
+#define RCC_I2C2CFGR 0x7A8
+#define RCC_I2C3CFGR 0x7AC
+#define RCC_SAI1CFGR 0x7C4
+#define RCC_SAI2CFGR 0x7C8
+#define RCC_SAI3CFGR 0x7CC
+#define RCC_SAI4CFGR 0x7D0
+#define RCC_MDF1CFGR 0x7D8
+#define RCC_FDCANCFGR 0x7E0
+#define RCC_HDPCFGR 0x7E4
+#define RCC_ADC1CFGR 0x7E8
+#define RCC_ADC2CFGR 0x7EC
+#define RCC_ETH1CFGR 0x7F0
+#define RCC_ETH2CFGR 0x7F4
+#define RCC_USBHCFGR 0x7FC
+#define RCC_USB2PHY1CFGR 0x800
+#define RCC_OTGCFGR 0x808
+#define RCC_USB2PHY2CFGR 0x80C
+#define RCC_STGENCFGR 0x824
+#define RCC_SDMMC1CFGR 0x830
+#define RCC_SDMMC2CFGR 0x834
+#define RCC_SDMMC3CFGR 0x838
+#define RCC_LTDCCFGR 0x840
+#define RCC_CSICFGR 0x858
+#define RCC_DCMIPPCFGR 0x85C
+#define RCC_DCMIPSSICFGR 0x860
+#define RCC_RNG1CFGR 0x870
+#define RCC_RNG2CFGR 0x874
+#define RCC_PKACFGR 0x878
+#define RCC_SAESCFGR 0x87C
+#define RCC_HASH1CFGR 0x880
+#define RCC_HASH2CFGR 0x884
+#define RCC_CRYP1CFGR 0x888
+#define RCC_CRYP2CFGR 0x88C
+#define RCC_IWDG1CFGR 0x894
+#define RCC_IWDG2CFGR 0x898
+#define RCC_IWDG3CFGR 0x89C
+#define RCC_IWDG4CFGR 0x8A0
+#define RCC_WWDG1CFGR 0x8A4
+#define RCC_VREFCFGR 0x8AC
+#define RCC_DTSCFGR 0x8B0
+#define RCC_CRCCFGR 0x8B4
+#define RCC_SERCCFGR 0x8B8
+#define RCC_DDRPERFMCFGR 0x8C0
+#define RCC_I3C1CFGR 0x8C8
+#define RCC_I3C2CFGR 0x8CC
+#define RCC_I3C3CFGR 0x8D0
+#define RCC_MUXSELCFGR 0x1000
+#define RCC_XBAR0CFGR 0x1018
+#define RCC_XBAR1CFGR 0x101C
+#define RCC_XBAR2CFGR 0x1020
+#define RCC_XBAR3CFGR 0x1024
+#define RCC_XBAR4CFGR 0x1028
+#define RCC_XBAR5CFGR 0x102C
+#define RCC_XBAR6CFGR 0x1030
+#define RCC_XBAR7CFGR 0x1034
+#define RCC_XBAR8CFGR 0x1038
+#define RCC_XBAR9CFGR 0x103C
+#define RCC_XBAR10CFGR 0x1040
+#define RCC_XBAR11CFGR 0x1044
+#define RCC_XBAR12CFGR 0x1048
+#define RCC_XBAR13CFGR 0x104C
+#define RCC_XBAR14CFGR 0x1050
+#define RCC_XBAR15CFGR 0x1054
+#define RCC_XBAR16CFGR 0x1058
+#define RCC_XBAR17CFGR 0x105C
+#define RCC_XBAR18CFGR 0x1060
+#define RCC_XBAR19CFGR 0x1064
+#define RCC_XBAR20CFGR 0x1068
+#define RCC_XBAR21CFGR 0x106C
+#define RCC_XBAR22CFGR 0x1070
+#define RCC_XBAR23CFGR 0x1074
+#define RCC_XBAR24CFGR 0x1078
+#define RCC_XBAR25CFGR 0x107C
+#define RCC_XBAR26CFGR 0x1080
+#define RCC_XBAR27CFGR 0x1084
+#define RCC_XBAR28CFGR 0x1088
+#define RCC_XBAR29CFGR 0x108C
+#define RCC_XBAR30CFGR 0x1090
+#define RCC_XBAR31CFGR 0x1094
+#define RCC_XBAR32CFGR 0x1098
+#define RCC_XBAR33CFGR 0x109C
+#define RCC_XBAR34CFGR 0x10A0
+#define RCC_XBAR35CFGR 0x10A4
+#define RCC_XBAR36CFGR 0x10A8
+#define RCC_XBAR37CFGR 0x10AC
+#define RCC_XBAR38CFGR 0x10B0
+#define RCC_XBAR39CFGR 0x10B4
+#define RCC_XBAR40CFGR 0x10B8
+#define RCC_XBAR41CFGR 0x10BC
+#define RCC_XBAR42CFGR 0x10C0
+#define RCC_XBAR43CFGR 0x10C4
+#define RCC_XBAR44CFGR 0x10C8
+#define RCC_XBAR45CFGR 0x10CC
+#define RCC_XBAR46CFGR 0x10D0
+#define RCC_XBAR47CFGR 0x10D4
+#define RCC_XBAR48CFGR 0x10D8
+#define RCC_XBAR49CFGR 0x10DC
+#define RCC_XBAR50CFGR 0x10E0
+#define RCC_XBAR51CFGR 0x10E4
+#define RCC_XBAR52CFGR 0x10E8
+#define RCC_XBAR53CFGR 0x10EC
+#define RCC_XBAR54CFGR 0x10F0
+#define RCC_XBAR55CFGR 0x10F4
+#define RCC_XBAR56CFGR 0x10F8
+#define RCC_XBAR57CFGR 0x10FC
+#define RCC_XBAR58CFGR 0x1100
+#define RCC_XBAR59CFGR 0x1104
+#define RCC_XBAR60CFGR 0x1108
+#define RCC_XBAR61CFGR 0x110C
+#define RCC_XBAR62CFGR 0x1110
+#define RCC_XBAR63CFGR 0x1114
+#define RCC_PREDIV0CFGR 0x1118
+#define RCC_PREDIV1CFGR 0x111C
+#define RCC_PREDIV2CFGR 0x1120
+#define RCC_PREDIV3CFGR 0x1124
+#define RCC_PREDIV4CFGR 0x1128
+#define RCC_PREDIV5CFGR 0x112C
+#define RCC_PREDIV6CFGR 0x1130
+#define RCC_PREDIV7CFGR 0x1134
+#define RCC_PREDIV8CFGR 0x1138
+#define RCC_PREDIV9CFGR 0x113C
+#define RCC_PREDIV10CFGR 0x1140
+#define RCC_PREDIV11CFGR 0x1144
+#define RCC_PREDIV12CFGR 0x1148
+#define RCC_PREDIV13CFGR 0x114C
+#define RCC_PREDIV14CFGR 0x1150
+#define RCC_PREDIV15CFGR 0x1154
+#define RCC_PREDIV16CFGR 0x1158
+#define RCC_PREDIV17CFGR 0x115C
+#define RCC_PREDIV18CFGR 0x1160
+#define RCC_PREDIV19CFGR 0x1164
+#define RCC_PREDIV20CFGR 0x1168
+#define RCC_PREDIV21CFGR 0x116C
+#define RCC_PREDIV22CFGR 0x1170
+#define RCC_PREDIV23CFGR 0x1174
+#define RCC_PREDIV24CFGR 0x1178
+#define RCC_PREDIV25CFGR 0x117C
+#define RCC_PREDIV26CFGR 0x1180
+#define RCC_PREDIV27CFGR 0x1184
+#define RCC_PREDIV28CFGR 0x1188
+#define RCC_PREDIV29CFGR 0x118C
+#define RCC_PREDIV30CFGR 0x1190
+#define RCC_PREDIV31CFGR 0x1194
+#define RCC_PREDIV32CFGR 0x1198
+#define RCC_PREDIV33CFGR 0x119C
+#define RCC_PREDIV34CFGR 0x11A0
+#define RCC_PREDIV35CFGR 0x11A4
+#define RCC_PREDIV36CFGR 0x11A8
+#define RCC_PREDIV37CFGR 0x11AC
+#define RCC_PREDIV38CFGR 0x11B0
+#define RCC_PREDIV39CFGR 0x11B4
+#define RCC_PREDIV40CFGR 0x11B8
+#define RCC_PREDIV41CFGR 0x11BC
+#define RCC_PREDIV42CFGR 0x11C0
+#define RCC_PREDIV43CFGR 0x11C4
+#define RCC_PREDIV44CFGR 0x11C8
+#define RCC_PREDIV45CFGR 0x11CC
+#define RCC_PREDIV46CFGR 0x11D0
+#define RCC_PREDIV47CFGR 0x11D4
+#define RCC_PREDIV48CFGR 0x11D8
+#define RCC_PREDIV49CFGR 0x11DC
+#define RCC_PREDIV50CFGR 0x11E0
+#define RCC_PREDIV51CFGR 0x11E4
+#define RCC_PREDIV52CFGR 0x11E8
+#define RCC_PREDIV53CFGR 0x11EC
+#define RCC_PREDIV54CFGR 0x11F0
+#define RCC_PREDIV55CFGR 0x11F4
+#define RCC_PREDIV56CFGR 0x11F8
+#define RCC_PREDIV57CFGR 0x11FC
+#define RCC_PREDIV58CFGR 0x1200
+#define RCC_PREDIV59CFGR 0x1204
+#define RCC_PREDIV60CFGR 0x1208
+#define RCC_PREDIV61CFGR 0x120C
+#define RCC_PREDIV62CFGR 0x1210
+#define RCC_PREDIV63CFGR 0x1214
+#define RCC_PREDIVSR1 0x1218
+#define RCC_PREDIVSR2 0x121C
+#define RCC_FINDIV0CFGR 0x1224
+#define RCC_FINDIV1CFGR 0x1228
+#define RCC_FINDIV2CFGR 0x122C
+#define RCC_FINDIV3CFGR 0x1230
+#define RCC_FINDIV4CFGR 0x1234
+#define RCC_FINDIV5CFGR 0x1238
+#define RCC_FINDIV6CFGR 0x123C
+#define RCC_FINDIV7CFGR 0x1240
+#define RCC_FINDIV8CFGR 0x1244
+#define RCC_FINDIV9CFGR 0x1248
+#define RCC_FINDIV10CFGR 0x124C
+#define RCC_FINDIV11CFGR 0x1250
+#define RCC_FINDIV12CFGR 0x1254
+#define RCC_FINDIV13CFGR 0x1258
+#define RCC_FINDIV14CFGR 0x125C
+#define RCC_FINDIV15CFGR 0x1260
+#define RCC_FINDIV16CFGR 0x1264
+#define RCC_FINDIV17CFGR 0x1268
+#define RCC_FINDIV18CFGR 0x126C
+#define RCC_FINDIV19CFGR 0x1270
+#define RCC_FINDIV20CFGR 0x1274
+#define RCC_FINDIV21CFGR 0x1278
+#define RCC_FINDIV22CFGR 0x127C
+#define RCC_FINDIV23CFGR 0x1280
+#define RCC_FINDIV24CFGR 0x1284
+#define RCC_FINDIV25CFGR 0x1288
+#define RCC_FINDIV26CFGR 0x128C
+#define RCC_FINDIV27CFGR 0x1290
+#define RCC_FINDIV28CFGR 0x1294
+#define RCC_FINDIV29CFGR 0x1298
+#define RCC_FINDIV30CFGR 0x129C
+#define RCC_FINDIV31CFGR 0x12A0
+#define RCC_FINDIV32CFGR 0x12A4
+#define RCC_FINDIV33CFGR 0x12A8
+#define RCC_FINDIV34CFGR 0x12AC
+#define RCC_FINDIV35CFGR 0x12B0
+#define RCC_FINDIV36CFGR 0x12B4
+#define RCC_FINDIV37CFGR 0x12B8
+#define RCC_FINDIV38CFGR 0x12BC
+#define RCC_FINDIV39CFGR 0x12C0
+#define RCC_FINDIV40CFGR 0x12C4
+#define RCC_FINDIV41CFGR 0x12C8
+#define RCC_FINDIV42CFGR 0x12CC
+#define RCC_FINDIV43CFGR 0x12D0
+#define RCC_FINDIV44CFGR 0x12D4
+#define RCC_FINDIV45CFGR 0x12D8
+#define RCC_FINDIV46CFGR 0x12DC
+#define RCC_FINDIV47CFGR 0x12E0
+#define RCC_FINDIV48CFGR 0x12E4
+#define RCC_FINDIV49CFGR 0x12E8
+#define RCC_FINDIV50CFGR 0x12EC
+#define RCC_FINDIV51CFGR 0x12F0
+#define RCC_FINDIV52CFGR 0x12F4
+#define RCC_FINDIV53CFGR 0x12F8
+#define RCC_FINDIV54CFGR 0x12FC
+#define RCC_FINDIV55CFGR 0x1300
+#define RCC_FINDIV56CFGR 0x1304
+#define RCC_FINDIV57CFGR 0x1308
+#define RCC_FINDIV58CFGR 0x130C
+#define RCC_FINDIV59CFGR 0x1310
+#define RCC_FINDIV60CFGR 0x1314
+#define RCC_FINDIV61CFGR 0x1318
+#define RCC_FINDIV62CFGR 0x131C
+#define RCC_FINDIV63CFGR 0x1320
+#define RCC_FINDIVSR1 0x1324
+#define RCC_FINDIVSR2 0x1328
+#define RCC_FCALCOBS0CFGR 0x1340
+#define RCC_FCALCOBS1CFGR 0x1344
+#define RCC_FCALCREFCFGR 0x1348
+#define RCC_FCALCCR1 0x134C
+#define RCC_FCALCCR2 0x1354
+#define RCC_FCALCSR 0x1358
+#define RCC_PLL4CFGR1 0x1360
+#define RCC_PLL4CFGR2 0x1364
+#define RCC_PLL4CFGR3 0x1368
+#define RCC_PLL4CFGR4 0x136C
+#define RCC_PLL4CFGR5 0x1370
+#define RCC_PLL4CFGR6 0x1378
+#define RCC_PLL4CFGR7 0x137C
+#define RCC_PLL5CFGR1 0x1388
+#define RCC_PLL5CFGR2 0x138C
+#define RCC_PLL5CFGR3 0x1390
+#define RCC_PLL5CFGR4 0x1394
+#define RCC_PLL5CFGR5 0x1398
+#define RCC_PLL5CFGR6 0x13A0
+#define RCC_PLL5CFGR7 0x13A4
+#define RCC_PLL6CFGR1 0x13B0
+#define RCC_PLL6CFGR2 0x13B4
+#define RCC_PLL6CFGR3 0x13B8
+#define RCC_PLL6CFGR4 0x13BC
+#define RCC_PLL6CFGR5 0x13C0
+#define RCC_PLL6CFGR6 0x13C8
+#define RCC_PLL6CFGR7 0x13CC
+#define RCC_PLL7CFGR1 0x13D8
+#define RCC_PLL7CFGR2 0x13DC
+#define RCC_PLL7CFGR3 0x13E0
+#define RCC_PLL7CFGR4 0x13E4
+#define RCC_PLL7CFGR5 0x13E8
+#define RCC_PLL7CFGR6 0x13F0
+#define RCC_PLL7CFGR7 0x13F4
+#define RCC_PLL8CFGR1 0x1400
+#define RCC_PLL8CFGR2 0x1404
+#define RCC_PLL8CFGR3 0x1408
+#define RCC_PLL8CFGR4 0x140C
+#define RCC_PLL8CFGR5 0x1410
+#define RCC_PLL8CFGR6 0x1418
+#define RCC_PLL8CFGR7 0x141C
+#define RCC_VERR 0xFFF4
+#define RCC_IDR 0xFFF8
+#define RCC_SIDR 0xFFFC
+
+#endif /* STM32MP21_RCC_H */
diff --git a/drivers/clk/sunxi-ng/Kconfig b/drivers/clk/sunxi-ng/Kconfig
index 8896fd052ef1..6af2d020e03e 100644
--- a/drivers/clk/sunxi-ng/Kconfig
+++ b/drivers/clk/sunxi-ng/Kconfig
@@ -57,6 +57,11 @@ config SUN55I_A523_CCU
default ARCH_SUNXI
depends on ARM64 || COMPILE_TEST
+config SUN55I_A523_MCU_CCU
+ tristate "Support for the Allwinner A523/T527 MCU CCU"
+ default ARCH_SUNXI
+ depends on ARM64 || COMPILE_TEST
+
config SUN55I_A523_R_CCU
tristate "Support for the Allwinner A523/T527 PRCM CCU"
default ARCH_SUNXI
diff --git a/drivers/clk/sunxi-ng/Makefile b/drivers/clk/sunxi-ng/Makefile
index 82e471036de6..a1c4087d7241 100644
--- a/drivers/clk/sunxi-ng/Makefile
+++ b/drivers/clk/sunxi-ng/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_SUN50I_H6_CCU) += sun50i-h6-ccu.o
obj-$(CONFIG_SUN50I_H6_R_CCU) += sun50i-h6-r-ccu.o
obj-$(CONFIG_SUN50I_H616_CCU) += sun50i-h616-ccu.o
obj-$(CONFIG_SUN55I_A523_CCU) += sun55i-a523-ccu.o
+obj-$(CONFIG_SUN55I_A523_MCU_CCU) += sun55i-a523-mcu-ccu.o
obj-$(CONFIG_SUN55I_A523_R_CCU) += sun55i-a523-r-ccu.o
obj-$(CONFIG_SUN4I_A10_CCU) += sun4i-a10-ccu.o
obj-$(CONFIG_SUN5I_CCU) += sun5i-ccu.o
@@ -61,6 +62,7 @@ sun50i-h6-ccu-y += ccu-sun50i-h6.o
sun50i-h6-r-ccu-y += ccu-sun50i-h6-r.o
sun50i-h616-ccu-y += ccu-sun50i-h616.o
sun55i-a523-ccu-y += ccu-sun55i-a523.o
+sun55i-a523-mcu-ccu-y += ccu-sun55i-a523-mcu.o
sun55i-a523-r-ccu-y += ccu-sun55i-a523-r.o
sun4i-a10-ccu-y += ccu-sun4i-a10.o
sun5i-ccu-y += ccu-sun5i.o
diff --git a/drivers/clk/sunxi-ng/ccu-sun55i-a523-mcu.c b/drivers/clk/sunxi-ng/ccu-sun55i-a523-mcu.c
new file mode 100644
index 000000000000..197844f0fe4e
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu-sun55i-a523-mcu.c
@@ -0,0 +1,469 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025 Chen-Yu Tsai <wens@csie.org>
+ *
+ * Based on the A523 CCU driver:
+ * Copyright (C) 2023-2024 Arm Ltd.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/clock/sun55i-a523-mcu-ccu.h>
+#include <dt-bindings/reset/sun55i-a523-mcu-ccu.h>
+
+#include "ccu_common.h"
+#include "ccu_reset.h"
+
+#include "ccu_div.h"
+#include "ccu_gate.h"
+#include "ccu_mp.h"
+#include "ccu_mult.h"
+#include "ccu_nm.h"
+
+static const struct clk_parent_data osc24M[] = {
+ { .fw_name = "hosc" }
+};
+
+static const struct clk_parent_data ahb[] = {
+ { .fw_name = "r-ahb" }
+};
+
+static const struct clk_parent_data apb[] = {
+ { .fw_name = "r-apb0" }
+};
+
+#define SUN55I_A523_PLL_AUDIO1_REG 0x00c
+static struct ccu_sdm_setting pll_audio1_sdm_table[] = {
+ { .rate = 2167603200, .pattern = 0xa000a234, .m = 1, .n = 90 }, /* div2->22.5792 */
+ { .rate = 2359296000, .pattern = 0xa0009ba6, .m = 1, .n = 98 }, /* div2->24.576 */
+ { .rate = 1806336000, .pattern = 0xa000872b, .m = 1, .n = 75 }, /* div5->22.576 */
+};
+
+static struct ccu_nm pll_audio1_clk = {
+ .enable = BIT(27),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 11),
+ .m = _SUNXI_CCU_DIV(1, 1),
+ .sdm = _SUNXI_CCU_SDM(pll_audio1_sdm_table, BIT(24),
+ 0x010, BIT(31)),
+ .min_rate = 180000000U,
+ .max_rate = 3500000000U,
+ .common = {
+ .reg = 0x00c,
+ .features = CCU_FEATURE_SIGMA_DELTA_MOD,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("pll-audio1",
+ osc24M, &ccu_nm_ops,
+ CLK_SET_RATE_GATE),
+ },
+};
+
+/*
+ * /2 and /5 dividers are actually programmable, but we just use the
+ * values from the BSP, since the audio PLL only needs to provide a
+ * couple clock rates. This also matches the names given in the manual.
+ */
+static const struct clk_hw *pll_audio1_div_parents[] = { &pll_audio1_clk.common.hw };
+static CLK_FIXED_FACTOR_HWS(pll_audio1_div2_clk, "pll-audio1-div2",
+ pll_audio1_div_parents, 2, 1,
+ CLK_SET_RATE_PARENT);
+static CLK_FIXED_FACTOR_HWS(pll_audio1_div5_clk, "pll-audio1-div5",
+ pll_audio1_div_parents, 5, 1,
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_M_WITH_GATE(audio_out_clk, "audio-out",
+ "pll-audio1-div2", 0x01c,
+ 0, 5, BIT(31), CLK_SET_RATE_PARENT);
+
+static const struct clk_parent_data dsp_parents[] = {
+ { .fw_name = "hosc" },
+ { .fw_name = "losc" },
+ { .fw_name = "iosc" },
+ /*
+ * The order of the following two parent is from the BSP code. It is
+ * the opposite in the manual. Testing with the DSP is required to
+ * figure out the real order.
+ */
+ { .hw = &pll_audio1_div5_clk.hw },
+ { .hw = &pll_audio1_div2_clk.hw },
+ { .fw_name = "dsp" },
+};
+static SUNXI_CCU_M_DATA_WITH_MUX_GATE(dsp_clk, "mcu-dsp", dsp_parents, 0x0020,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static const struct clk_parent_data i2s_parents[] = {
+ { .fw_name = "pll-audio0-4x" },
+ { .hw = &pll_audio1_div2_clk.hw },
+ { .hw = &pll_audio1_div5_clk.hw },
+};
+
+static SUNXI_CCU_DUALDIV_MUX_GATE(i2s0_clk, "i2s0", i2s_parents, 0x02c,
+ 0, 5, /* M */
+ 5, 5, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+static SUNXI_CCU_DUALDIV_MUX_GATE(i2s1_clk, "i2s1", i2s_parents, 0x030,
+ 0, 5, /* M */
+ 5, 5, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+static SUNXI_CCU_DUALDIV_MUX_GATE(i2s2_clk, "i2s2", i2s_parents, 0x034,
+ 0, 5, /* M */
+ 5, 5, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+static SUNXI_CCU_DUALDIV_MUX_GATE(i2s3_clk, "i2s3", i2s_parents, 0x038,
+ 0, 5, /* M */
+ 5, 5, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static const struct clk_parent_data i2s3_asrc_parents[] = {
+ { .fw_name = "pll-periph0-300m" },
+ { .hw = &pll_audio1_div2_clk.hw },
+ { .hw = &pll_audio1_div5_clk.hw },
+};
+static SUNXI_CCU_DUALDIV_MUX_GATE(i2s3_asrc_clk, "i2s3-asrc",
+ i2s3_asrc_parents, 0x03c,
+ 0, 5, /* M */
+ 5, 5, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE_DATA(bus_i2s0_clk, "bus-i2s0", apb, 0x040, BIT(0), 0);
+static SUNXI_CCU_GATE_DATA(bus_i2s1_clk, "bus-i2s1", apb, 0x040, BIT(1), 0);
+static SUNXI_CCU_GATE_DATA(bus_i2s2_clk, "bus-i2s2", apb, 0x040, BIT(2), 0);
+static SUNXI_CCU_GATE_DATA(bus_i2s3_clk, "bus-i2s3", apb, 0x040, BIT(3), 0);
+
+static const struct clk_parent_data audio_parents[] = {
+ { .fw_name = "pll-audio0-4x" },
+ { .hw = &pll_audio1_div2_clk.hw },
+ { .hw = &pll_audio1_div5_clk.hw },
+};
+static SUNXI_CCU_DUALDIV_MUX_GATE(spdif_tx_clk, "spdif-tx",
+ audio_parents, 0x044,
+ 0, 5, /* M */
+ 5, 5, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+static SUNXI_CCU_DUALDIV_MUX_GATE(spdif_rx_clk, "spdif-rx",
+ i2s3_asrc_parents, 0x048,
+ 0, 5, /* M */
+ 5, 5, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE_DATA(bus_spdif_clk, "bus-spdif", apb, 0x04c, BIT(0), 0);
+
+static SUNXI_CCU_DUALDIV_MUX_GATE(dmic_clk, "dmic", audio_parents, 0x050,
+ 0, 5, /* M */
+ 5, 5, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE_DATA(bus_dmic_clk, "bus-dmic", apb, 0x054, BIT(0), 0);
+
+static SUNXI_CCU_DUALDIV_MUX_GATE(audio_dac_clk, "audio-dac",
+ audio_parents, 0x058,
+ 0, 5, /* M */
+ 5, 5, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+static SUNXI_CCU_DUALDIV_MUX_GATE(audio_adc_clk, "audio-adc",
+ audio_parents, 0x05c,
+ 0, 5, /* M */
+ 5, 5, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE_DATA(bus_audio_codec_clk, "bus-audio-codec",
+ apb, 0x060, BIT(0), 0);
+
+static SUNXI_CCU_GATE_DATA(bus_dsp_msgbox_clk, "bus-dsp-msgbox",
+ ahb, 0x068, BIT(0), 0);
+static SUNXI_CCU_GATE_DATA(bus_dsp_cfg_clk, "bus-dsp-cfg",
+ apb, 0x06c, BIT(0), 0);
+
+static SUNXI_CCU_GATE_DATA(bus_npu_hclk, "bus-npu-hclk", ahb, 0x070, BIT(1), 0);
+static SUNXI_CCU_GATE_DATA(bus_npu_aclk, "bus-npu-aclk", ahb, 0x070, BIT(2), 0);
+
+static const struct clk_parent_data timer_parents[] = {
+ { .fw_name = "hosc" },
+ { .fw_name = "losc" },
+ { .fw_name = "iosc" },
+ { .fw_name = "r-ahb" }
+};
+static SUNXI_CCU_P_DATA_WITH_MUX_GATE(mcu_timer0_clk, "mcu-timer0", timer_parents,
+ 0x074,
+ 1, 3, /* P */
+ 4, 2, /* mux */
+ BIT(0), /* gate */
+ 0);
+static SUNXI_CCU_P_DATA_WITH_MUX_GATE(mcu_timer1_clk, "mcu-timer1", timer_parents,
+ 0x078,
+ 1, 3, /* P */
+ 4, 2, /* mux */
+ BIT(0), /* gate */
+ 0);
+static SUNXI_CCU_P_DATA_WITH_MUX_GATE(mcu_timer2_clk, "mcu-timer2", timer_parents,
+ 0x07c,
+ 1, 3, /* P */
+ 4, 2, /* mux */
+ BIT(0), /* gate */
+ 0);
+static SUNXI_CCU_P_DATA_WITH_MUX_GATE(mcu_timer3_clk, "mcu-timer3", timer_parents,
+ 0x080,
+ 1, 3, /* P */
+ 4, 2, /* mux */
+ BIT(0), /* gate */
+ 0);
+static SUNXI_CCU_P_DATA_WITH_MUX_GATE(mcu_timer4_clk, "mcu-timer4", timer_parents,
+ 0x084,
+ 1, 3, /* P */
+ 4, 2, /* mux */
+ BIT(0), /* gate */
+ 0);
+static SUNXI_CCU_P_DATA_WITH_MUX_GATE(mcu_timer5_clk, "mcu-timer5", timer_parents,
+ 0x088,
+ 1, 3, /* P */
+ 4, 2, /* mux */
+ BIT(0), /* gate */
+ 0);
+static SUNXI_CCU_GATE_DATA(bus_mcu_timer_clk, "bus-mcu-timer", ahb, 0x08c, BIT(0), 0);
+static SUNXI_CCU_GATE_DATA(bus_mcu_dma_clk, "bus-mcu-dma", ahb, 0x104, BIT(0), 0);
+/* tzma* only found in BSP code. */
+static SUNXI_CCU_GATE_DATA(tzma0_clk, "tzma0", ahb, 0x108, BIT(0), 0);
+static SUNXI_CCU_GATE_DATA(tzma1_clk, "tzma1", ahb, 0x10c, BIT(0), 0);
+/* parent is a guess as this block is not shown in the system bus tree diagram */
+static SUNXI_CCU_GATE_DATA(bus_pubsram_clk, "bus-pubsram", ahb, 0x114, BIT(0), 0);
+
+/*
+ * user manual has "mbus" clock as parent of both clocks below,
+ * but this makes more sense, since BSP MCU DMA controller has
+ * reference to both of them, likely needing both enabled.
+ */
+static SUNXI_CCU_GATE_FW(mbus_mcu_clk, "mbus-mcu", "mbus", 0x11c, BIT(1), 0);
+static SUNXI_CCU_GATE_HW(mbus_mcu_dma_clk, "mbus-mcu-dma",
+ &mbus_mcu_clk.common.hw, 0x11c, BIT(0), 0);
+
+static const struct clk_parent_data riscv_pwm_parents[] = {
+ { .fw_name = "hosc" },
+ { .fw_name = "losc" },
+ { .fw_name = "iosc" },
+};
+
+static SUNXI_CCU_MUX_DATA_WITH_GATE(riscv_clk, "riscv",
+ riscv_pwm_parents, 0x120,
+ 27, 3, BIT(31), 0);
+/* Parents are guesses as these two blocks are not shown in the system bus tree diagram */
+static SUNXI_CCU_GATE_DATA(bus_riscv_cfg_clk, "bus-riscv-cfg", ahb,
+ 0x124, BIT(0), 0);
+static SUNXI_CCU_GATE_DATA(bus_riscv_msgbox_clk, "bus-riscv-msgbox", ahb,
+ 0x128, BIT(0), 0);
+
+static SUNXI_CCU_MUX_DATA_WITH_GATE(mcu_pwm0_clk, "mcu-pwm0",
+ riscv_pwm_parents, 0x130,
+ 24, 3, BIT(31), 0);
+static SUNXI_CCU_GATE_DATA(bus_mcu_pwm0_clk, "bus-mcu-pwm0", apb,
+ 0x134, BIT(0), 0);
+
+/*
+ * Contains all clocks that are controlled by a hardware register. They
+ * have a (sunxi) .common member, which needs to be initialised by the common
+ * sunxi CCU code, to be filled with the MMIO base address and the shared lock.
+ */
+static struct ccu_common *sun55i_a523_mcu_ccu_clks[] = {
+ &pll_audio1_clk.common,
+ &audio_out_clk.common,
+ &dsp_clk.common,
+ &i2s0_clk.common,
+ &i2s1_clk.common,
+ &i2s2_clk.common,
+ &i2s3_clk.common,
+ &i2s3_asrc_clk.common,
+ &bus_i2s0_clk.common,
+ &bus_i2s1_clk.common,
+ &bus_i2s2_clk.common,
+ &bus_i2s3_clk.common,
+ &spdif_tx_clk.common,
+ &spdif_rx_clk.common,
+ &bus_spdif_clk.common,
+ &dmic_clk.common,
+ &bus_dmic_clk.common,
+ &audio_dac_clk.common,
+ &audio_adc_clk.common,
+ &bus_audio_codec_clk.common,
+ &bus_dsp_msgbox_clk.common,
+ &bus_dsp_cfg_clk.common,
+ &bus_npu_aclk.common,
+ &bus_npu_hclk.common,
+ &mcu_timer0_clk.common,
+ &mcu_timer1_clk.common,
+ &mcu_timer2_clk.common,
+ &mcu_timer3_clk.common,
+ &mcu_timer4_clk.common,
+ &mcu_timer5_clk.common,
+ &bus_mcu_timer_clk.common,
+ &bus_mcu_dma_clk.common,
+ &tzma0_clk.common,
+ &tzma1_clk.common,
+ &bus_pubsram_clk.common,
+ &mbus_mcu_dma_clk.common,
+ &mbus_mcu_clk.common,
+ &riscv_clk.common,
+ &bus_riscv_cfg_clk.common,
+ &bus_riscv_msgbox_clk.common,
+ &mcu_pwm0_clk.common,
+ &bus_mcu_pwm0_clk.common,
+};
+
+static struct clk_hw_onecell_data sun55i_a523_mcu_hw_clks = {
+ .hws = {
+ [CLK_MCU_PLL_AUDIO1] = &pll_audio1_clk.common.hw,
+ [CLK_MCU_PLL_AUDIO1_DIV2] = &pll_audio1_div2_clk.hw,
+ [CLK_MCU_PLL_AUDIO1_DIV5] = &pll_audio1_div5_clk.hw,
+ [CLK_MCU_AUDIO_OUT] = &audio_out_clk.common.hw,
+ [CLK_MCU_DSP] = &dsp_clk.common.hw,
+ [CLK_MCU_I2S0] = &i2s0_clk.common.hw,
+ [CLK_MCU_I2S1] = &i2s1_clk.common.hw,
+ [CLK_MCU_I2S2] = &i2s2_clk.common.hw,
+ [CLK_MCU_I2S3] = &i2s3_clk.common.hw,
+ [CLK_MCU_I2S3_ASRC] = &i2s3_asrc_clk.common.hw,
+ [CLK_BUS_MCU_I2S0] = &bus_i2s0_clk.common.hw,
+ [CLK_BUS_MCU_I2S1] = &bus_i2s1_clk.common.hw,
+ [CLK_BUS_MCU_I2S2] = &bus_i2s2_clk.common.hw,
+ [CLK_BUS_MCU_I2S3] = &bus_i2s3_clk.common.hw,
+ [CLK_MCU_SPDIF_TX] = &spdif_tx_clk.common.hw,
+ [CLK_MCU_SPDIF_RX] = &spdif_rx_clk.common.hw,
+ [CLK_BUS_MCU_SPDIF] = &bus_spdif_clk.common.hw,
+ [CLK_MCU_DMIC] = &dmic_clk.common.hw,
+ [CLK_BUS_MCU_DMIC] = &bus_dmic_clk.common.hw,
+ [CLK_MCU_AUDIO_CODEC_DAC] = &audio_dac_clk.common.hw,
+ [CLK_MCU_AUDIO_CODEC_ADC] = &audio_adc_clk.common.hw,
+ [CLK_BUS_MCU_AUDIO_CODEC] = &bus_audio_codec_clk.common.hw,
+ [CLK_BUS_MCU_DSP_MSGBOX] = &bus_dsp_msgbox_clk.common.hw,
+ [CLK_BUS_MCU_DSP_CFG] = &bus_dsp_cfg_clk.common.hw,
+ [CLK_BUS_MCU_NPU_HCLK] = &bus_npu_hclk.common.hw,
+ [CLK_BUS_MCU_NPU_ACLK] = &bus_npu_aclk.common.hw,
+ [CLK_MCU_TIMER0] = &mcu_timer0_clk.common.hw,
+ [CLK_MCU_TIMER1] = &mcu_timer1_clk.common.hw,
+ [CLK_MCU_TIMER2] = &mcu_timer2_clk.common.hw,
+ [CLK_MCU_TIMER3] = &mcu_timer3_clk.common.hw,
+ [CLK_MCU_TIMER4] = &mcu_timer4_clk.common.hw,
+ [CLK_MCU_TIMER5] = &mcu_timer5_clk.common.hw,
+ [CLK_BUS_MCU_TIMER] = &bus_mcu_timer_clk.common.hw,
+ [CLK_BUS_MCU_DMA] = &bus_mcu_dma_clk.common.hw,
+ [CLK_MCU_TZMA0] = &tzma0_clk.common.hw,
+ [CLK_MCU_TZMA1] = &tzma1_clk.common.hw,
+ [CLK_BUS_MCU_PUBSRAM] = &bus_pubsram_clk.common.hw,
+ [CLK_MCU_MBUS_DMA] = &mbus_mcu_dma_clk.common.hw,
+ [CLK_MCU_MBUS] = &mbus_mcu_clk.common.hw,
+ [CLK_MCU_RISCV] = &riscv_clk.common.hw,
+ [CLK_BUS_MCU_RISCV_CFG] = &bus_riscv_cfg_clk.common.hw,
+ [CLK_BUS_MCU_RISCV_MSGBOX] = &bus_riscv_msgbox_clk.common.hw,
+ [CLK_MCU_PWM0] = &mcu_pwm0_clk.common.hw,
+ [CLK_BUS_MCU_PWM0] = &bus_mcu_pwm0_clk.common.hw,
+ },
+ .num = CLK_BUS_MCU_PWM0 + 1,
+};
+
+static struct ccu_reset_map sun55i_a523_mcu_ccu_resets[] = {
+ [RST_BUS_MCU_I2S0] = { 0x0040, BIT(16) },
+ [RST_BUS_MCU_I2S1] = { 0x0040, BIT(17) },
+ [RST_BUS_MCU_I2S2] = { 0x0040, BIT(18) },
+ [RST_BUS_MCU_I2S3] = { 0x0040, BIT(19) },
+ [RST_BUS_MCU_SPDIF] = { 0x004c, BIT(16) },
+ [RST_BUS_MCU_DMIC] = { 0x0054, BIT(16) },
+ [RST_BUS_MCU_AUDIO_CODEC] = { 0x0060, BIT(16) },
+ [RST_BUS_MCU_DSP_MSGBOX] = { 0x0068, BIT(16) },
+ [RST_BUS_MCU_DSP_CFG] = { 0x006c, BIT(16) },
+ [RST_BUS_MCU_NPU] = { 0x0070, BIT(16) },
+ [RST_BUS_MCU_TIMER] = { 0x008c, BIT(16) },
+ /* dsp and dsp_debug resets only found in BSP code. */
+ [RST_BUS_MCU_DSP_DEBUG] = { 0x0100, BIT(16) },
+ [RST_BUS_MCU_DSP] = { 0x0100, BIT(17) },
+ [RST_BUS_MCU_DMA] = { 0x0104, BIT(16) },
+ [RST_BUS_MCU_PUBSRAM] = { 0x0114, BIT(16) },
+ [RST_BUS_MCU_RISCV_CFG] = { 0x0124, BIT(16) },
+ [RST_BUS_MCU_RISCV_DEBUG] = { 0x0124, BIT(17) },
+ [RST_BUS_MCU_RISCV_CORE] = { 0x0124, BIT(18) },
+ [RST_BUS_MCU_RISCV_MSGBOX] = { 0x0128, BIT(16) },
+ [RST_BUS_MCU_PWM0] = { 0x0134, BIT(16) },
+};
+
+static const struct sunxi_ccu_desc sun55i_a523_mcu_ccu_desc = {
+ .ccu_clks = sun55i_a523_mcu_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun55i_a523_mcu_ccu_clks),
+
+ .hw_clks = &sun55i_a523_mcu_hw_clks,
+
+ .resets = sun55i_a523_mcu_ccu_resets,
+ .num_resets = ARRAY_SIZE(sun55i_a523_mcu_ccu_resets),
+};
+
+static int sun55i_a523_mcu_ccu_probe(struct platform_device *pdev)
+{
+ void __iomem *reg;
+ u32 val;
+ int ret;
+
+ reg = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+
+ val = readl(reg + SUN55I_A523_PLL_AUDIO1_REG);
+
+ /*
+ * The PLL clock code does not model all bits, for instance it does
+ * not support a separate enable and gate bit. We present the
+ * gate bit(27) as the enable bit, but then have to set the
+ * PLL Enable, LDO Enable, and Lock Enable bits on all PLLs here.
+ */
+ val |= BIT(31) | BIT(30) | BIT(29);
+
+ /* Enforce p1 = 5, p0 = 2 (the default) for PLL_AUDIO1 */
+ val &= ~(GENMASK(22, 20) | GENMASK(18, 16));
+ val |= (4 << 20) | (1 << 16);
+
+ writel(val, reg + SUN55I_A523_PLL_AUDIO1_REG);
+
+ ret = devm_sunxi_ccu_probe(&pdev->dev, reg, &sun55i_a523_mcu_ccu_desc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct of_device_id sun55i_a523_mcu_ccu_ids[] = {
+ { .compatible = "allwinner,sun55i-a523-mcu-ccu" },
+ { }
+};
+
+static struct platform_driver sun55i_a523_mcu_ccu_driver = {
+ .probe = sun55i_a523_mcu_ccu_probe,
+ .driver = {
+ .name = "sun55i-a523-mcu-ccu",
+ .suppress_bind_attrs = true,
+ .of_match_table = sun55i_a523_mcu_ccu_ids,
+ },
+};
+module_platform_driver(sun55i_a523_mcu_ccu_driver);
+
+MODULE_IMPORT_NS("SUNXI_CCU");
+MODULE_DESCRIPTION("Support for the Allwinner A523 MCU CCU");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun55i-a523.c b/drivers/clk/sunxi-ng/ccu-sun55i-a523.c
index 1a9a1cb869e2..acb532f8361b 100644
--- a/drivers/clk/sunxi-ng/ccu-sun55i-a523.c
+++ b/drivers/clk/sunxi-ng/ccu-sun55i-a523.c
@@ -11,6 +11,9 @@
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <dt-bindings/clock/sun55i-a523-ccu.h>
+#include <dt-bindings/reset/sun55i-a523-ccu.h>
+
#include "../clk.h"
#include "ccu_common.h"
@@ -25,8 +28,6 @@
#include "ccu_nkmp.h"
#include "ccu_nm.h"
-#include "ccu-sun55i-a523.h"
-
/*
* The 24 MHz oscillator, the root of most of the clock tree.
* .fw_name is the string used in the DT "clock-names" property, used to
@@ -486,6 +487,18 @@ static SUNXI_CCU_M_HW_WITH_MUX_GATE(ve_clk, "ve", ve_parents, 0x690,
static SUNXI_CCU_GATE_HWS(bus_ve_clk, "bus-ve", ahb_hws, 0x69c, BIT(0), 0);
+static const struct clk_hw *npu_parents[] = {
+ &pll_periph0_480M_clk.common.hw,
+ &pll_periph0_600M_clk.hw,
+ &pll_periph0_800M_clk.common.hw,
+ &pll_npu_2x_clk.hw,
+};
+static SUNXI_CCU_M_HW_WITH_MUX_GATE(npu_clk, "npu", npu_parents, 0x6e0,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
static SUNXI_CCU_GATE_HWS(bus_dma_clk, "bus-dma", ahb_hws, 0x70c, BIT(0), 0);
static SUNXI_CCU_GATE_HWS(bus_msgbox_clk, "bus-msgbox", ahb_hws, 0x71c,
@@ -1217,6 +1230,7 @@ static struct ccu_common *sun55i_a523_ccu_clks[] = {
&bus_ce_sys_clk.common,
&ve_clk.common,
&bus_ve_clk.common,
+ &npu_clk.common,
&bus_dma_clk.common,
&bus_msgbox_clk.common,
&bus_spinlock_clk.common,
@@ -1343,7 +1357,6 @@ static struct ccu_common *sun55i_a523_ccu_clks[] = {
};
static struct clk_hw_onecell_data sun55i_a523_hw_clks = {
- .num = CLK_NUMBER,
.hws = {
[CLK_PLL_DDR0] = &pll_ddr_clk.common.hw,
[CLK_PLL_PERIPH0_4X] = &pll_periph0_4x_clk.common.hw,
@@ -1524,7 +1537,9 @@ static struct clk_hw_onecell_data sun55i_a523_hw_clks = {
[CLK_FANOUT0] = &fanout0_clk.common.hw,
[CLK_FANOUT1] = &fanout1_clk.common.hw,
[CLK_FANOUT2] = &fanout2_clk.common.hw,
+ [CLK_NPU] = &npu_clk.common.hw,
},
+ .num = CLK_NPU + 1,
};
static struct ccu_reset_map sun55i_a523_ccu_resets[] = {
diff --git a/drivers/clk/sunxi-ng/ccu-sun55i-a523.h b/drivers/clk/sunxi-ng/ccu-sun55i-a523.h
deleted file mode 100644
index fc8dd42f1b47..000000000000
--- a/drivers/clk/sunxi-ng/ccu-sun55i-a523.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright 2024 Arm Ltd.
- */
-
-#ifndef _CCU_SUN55I_A523_H
-#define _CCU_SUN55I_A523_H
-
-#include <dt-bindings/clock/sun55i-a523-ccu.h>
-#include <dt-bindings/reset/sun55i-a523-ccu.h>
-
-#define CLK_NUMBER (CLK_FANOUT2 + 1)
-
-#endif /* _CCU_SUN55I_A523_H */
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-rtc.c b/drivers/clk/sunxi-ng/ccu-sun6i-rtc.c
index 0536e880b80f..f6bfeba009e8 100644
--- a/drivers/clk/sunxi-ng/ccu-sun6i-rtc.c
+++ b/drivers/clk/sunxi-ng/ccu-sun6i-rtc.c
@@ -325,6 +325,13 @@ static const struct sun6i_rtc_match_data sun50i_r329_rtc_ccu_data = {
.osc32k_fanout_nparents = ARRAY_SIZE(sun50i_r329_osc32k_fanout_parents),
};
+static const struct sun6i_rtc_match_data sun55i_a523_rtc_ccu_data = {
+ .have_ext_osc32k = true,
+ .have_iosc_calibration = true,
+ .osc32k_fanout_parents = sun50i_r329_osc32k_fanout_parents,
+ .osc32k_fanout_nparents = ARRAY_SIZE(sun50i_r329_osc32k_fanout_parents),
+};
+
static const struct of_device_id sun6i_rtc_ccu_match[] = {
{
.compatible = "allwinner,sun50i-h616-rtc",
@@ -334,6 +341,10 @@ static const struct of_device_id sun6i_rtc_ccu_match[] = {
.compatible = "allwinner,sun50i-r329-rtc",
.data = &sun50i_r329_rtc_ccu_data,
},
+ {
+ .compatible = "allwinner,sun55i-a523-rtc",
+ .data = &sun55i_a523_rtc_ccu_data,
+ },
{},
};
MODULE_DEVICE_TABLE(of, sun6i_rtc_ccu_match);
diff --git a/drivers/clk/sunxi-ng/ccu_div.h b/drivers/clk/sunxi-ng/ccu_div.h
index 90d49ee8e0cc..be00b3277e97 100644
--- a/drivers/clk/sunxi-ng/ccu_div.h
+++ b/drivers/clk/sunxi-ng/ccu_div.h
@@ -274,6 +274,24 @@ struct ccu_div {
SUNXI_CCU_M_HWS_WITH_GATE(_struct, _name, _parent, _reg, \
_mshift, _mwidth, 0, _flags)
+#define SUNXI_CCU_P_DATA_WITH_MUX_GATE(_struct, _name, _parents, _reg, \
+ _mshift, _mwidth, \
+ _muxshift, _muxwidth, \
+ _gate, _flags) \
+ struct ccu_div _struct = { \
+ .enable = _gate, \
+ .div = _SUNXI_CCU_DIV_FLAGS(_mshift, _mwidth, \
+ CLK_DIVIDER_POWER_OF_TWO), \
+ .mux = _SUNXI_CCU_MUX(_muxshift, _muxwidth), \
+ .common = { \
+ .reg = _reg, \
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(_name, \
+ _parents, \
+ &ccu_div_ops, \
+ _flags), \
+ }, \
+ }
+
static inline struct ccu_div *hw_to_ccu_div(struct clk_hw *hw)
{
struct ccu_common *common = hw_to_ccu_common(hw);
diff --git a/drivers/clk/sunxi-ng/ccu_mp.c b/drivers/clk/sunxi-ng/ccu_mp.c
index 354c981943b6..4221b1888b38 100644
--- a/drivers/clk/sunxi-ng/ccu_mp.c
+++ b/drivers/clk/sunxi-ng/ccu_mp.c
@@ -185,7 +185,7 @@ static unsigned long ccu_mp_recalc_rate(struct clk_hw *hw,
p &= (1 << cmp->p.width) - 1;
if (cmp->common.features & CCU_FEATURE_DUAL_DIV)
- rate = (parent_rate / p) / m;
+ rate = (parent_rate / (p + cmp->p.offset)) / m;
else
rate = (parent_rate >> p) / m;
diff --git a/drivers/clk/tegra/Kconfig b/drivers/clk/tegra/Kconfig
index 90df619dc087..62147a069606 100644
--- a/drivers/clk/tegra/Kconfig
+++ b/drivers/clk/tegra/Kconfig
@@ -4,7 +4,7 @@ config CLK_TEGRA_BPMP
depends on TEGRA_BPMP
config TEGRA_CLK_DFLL
- depends on ARCH_TEGRA_124_SOC || ARCH_TEGRA_210_SOC
+ depends on ARCH_TEGRA_114_SOC || ARCH_TEGRA_124_SOC || ARCH_TEGRA_210_SOC
select PM_OPP
def_bool y
diff --git a/drivers/clk/tegra/clk-audio-sync.c b/drivers/clk/tegra/clk-audio-sync.c
index 2c4bb96eae16..468a4403f147 100644
--- a/drivers/clk/tegra/clk-audio-sync.c
+++ b/drivers/clk/tegra/clk-audio-sync.c
@@ -17,15 +17,15 @@ static unsigned long clk_sync_source_recalc_rate(struct clk_hw *hw,
return sync->rate;
}
-static long clk_sync_source_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_sync_source_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct tegra_clk_sync_source *sync = to_clk_sync_source(hw);
- if (rate > sync->max_rate)
+ if (req->rate > sync->max_rate)
return -EINVAL;
else
- return rate;
+ return 0;
}
static int clk_sync_source_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -38,7 +38,7 @@ static int clk_sync_source_set_rate(struct clk_hw *hw, unsigned long rate,
}
const struct clk_ops tegra_clk_sync_source_ops = {
- .round_rate = clk_sync_source_round_rate,
+ .determine_rate = clk_sync_source_determine_rate,
.set_rate = clk_sync_source_set_rate,
.recalc_rate = clk_sync_source_recalc_rate,
};
diff --git a/drivers/clk/tegra/clk-bpmp.c b/drivers/clk/tegra/clk-bpmp.c
index b2323cb8eddc..77a2586dbe00 100644
--- a/drivers/clk/tegra/clk-bpmp.c
+++ b/drivers/clk/tegra/clk-bpmp.c
@@ -635,7 +635,7 @@ static int tegra_bpmp_register_clocks(struct tegra_bpmp *bpmp,
bpmp->num_clocks = count;
- bpmp->clocks = devm_kcalloc(bpmp->dev, count, sizeof(struct tegra_bpmp_clk), GFP_KERNEL);
+ bpmp->clocks = devm_kcalloc(bpmp->dev, count, sizeof(*bpmp->clocks), GFP_KERNEL);
if (!bpmp->clocks)
return -ENOMEM;
diff --git a/drivers/clk/tegra/clk-dfll.c b/drivers/clk/tegra/clk-dfll.c
index 58fa5a59e0c7..22dc29432eff 100644
--- a/drivers/clk/tegra/clk-dfll.c
+++ b/drivers/clk/tegra/clk-dfll.c
@@ -882,7 +882,7 @@ static void dfll_set_frequency_request(struct tegra_dfll *td,
{
u32 val = 0;
int force_val;
- int coef = 128; /* FIXME: td->cg_scale? */;
+ int coef = 128; /* FIXME: td->cg_scale? */
force_val = (req->lut_index - td->lut_safe) * coef / td->cg;
force_val = clamp(force_val, FORCE_MIN, FORCE_MAX);
diff --git a/drivers/clk/tegra/clk-divider.c b/drivers/clk/tegra/clk-divider.c
index 38daf483ddf1..37439fcb3ac0 100644
--- a/drivers/clk/tegra/clk-divider.c
+++ b/drivers/clk/tegra/clk-divider.c
@@ -58,23 +58,31 @@ static unsigned long clk_frac_div_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long clk_frac_div_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_frac_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct tegra_clk_frac_div *divider = to_clk_frac_div(hw);
int div, mul;
- unsigned long output_rate = *prate;
+ unsigned long output_rate = req->best_parent_rate;
- if (!rate)
- return output_rate;
+ if (!req->rate) {
+ req->rate = output_rate;
- div = get_div(divider, rate, output_rate);
- if (div < 0)
- return *prate;
+ return 0;
+ }
+
+ div = get_div(divider, req->rate, output_rate);
+ if (div < 0) {
+ req->rate = req->best_parent_rate;
+
+ return 0;
+ }
mul = get_mul(divider);
- return DIV_ROUND_UP(output_rate * mul, div + mul);
+ req->rate = DIV_ROUND_UP(output_rate * mul, div + mul);
+
+ return 0;
}
static int clk_frac_div_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -127,7 +135,7 @@ static void clk_divider_restore_context(struct clk_hw *hw)
const struct clk_ops tegra_clk_frac_div_ops = {
.recalc_rate = clk_frac_div_recalc_rate,
.set_rate = clk_frac_div_set_rate,
- .round_rate = clk_frac_div_round_rate,
+ .determine_rate = clk_frac_div_determine_rate,
.restore_context = clk_divider_restore_context,
};
diff --git a/drivers/clk/tegra/clk-periph.c b/drivers/clk/tegra/clk-periph.c
index fa0cd7bb8ee6..6ebeaa7cb656 100644
--- a/drivers/clk/tegra/clk-periph.c
+++ b/drivers/clk/tegra/clk-periph.c
@@ -51,16 +51,10 @@ static int clk_periph_determine_rate(struct clk_hw *hw,
struct tegra_clk_periph *periph = to_clk_periph(hw);
const struct clk_ops *div_ops = periph->div_ops;
struct clk_hw *div_hw = &periph->divider.hw;
- long rate;
__clk_hw_set_clk(div_hw, hw);
- rate = div_ops->round_rate(div_hw, req->rate, &req->best_parent_rate);
- if (rate < 0)
- return rate;
-
- req->rate = (unsigned long)rate;
- return 0;
+ return div_ops->determine_rate(div_hw, req);
}
static int clk_periph_set_rate(struct clk_hw *hw, unsigned long rate,
diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
index 100b5d9b7e26..591b9f0c155a 100644
--- a/drivers/clk/tegra/clk-pll.c
+++ b/drivers/clk/tegra/clk-pll.c
@@ -840,8 +840,8 @@ static int clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
return ret;
}
-static long clk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct tegra_clk_pll *pll = to_clk_pll(hw);
struct tegra_clk_pll_freq_table cfg;
@@ -849,15 +849,20 @@ static long clk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
if (pll->params->flags & TEGRA_PLL_FIXED) {
/* PLLM/MB are used for memory; we do not change rate */
if (pll->params->flags & (TEGRA_PLLM | TEGRA_PLLMB))
- return clk_hw_get_rate(hw);
- return pll->params->fixed_rate;
+ req->rate = clk_hw_get_rate(hw);
+ else
+ req->rate = pll->params->fixed_rate;
+
+ return 0;
}
- if (_get_table_rate(hw, &cfg, rate, *prate) &&
- pll->params->calc_rate(hw, &cfg, rate, *prate))
+ if (_get_table_rate(hw, &cfg, req->rate, req->best_parent_rate) &&
+ pll->params->calc_rate(hw, &cfg, req->rate, req->best_parent_rate))
return -EINVAL;
- return cfg.output_rate;
+ req->rate = cfg.output_rate;
+
+ return 0;
}
static unsigned long clk_pll_recalc_rate(struct clk_hw *hw,
@@ -1057,7 +1062,7 @@ const struct clk_ops tegra_clk_pll_ops = {
.enable = clk_pll_enable,
.disable = clk_pll_disable,
.recalc_rate = clk_pll_recalc_rate,
- .round_rate = clk_pll_round_rate,
+ .determine_rate = clk_pll_determine_rate,
.set_rate = clk_pll_set_rate,
.restore_context = tegra_clk_pll_restore_context,
};
@@ -1195,7 +1200,7 @@ static const struct clk_ops tegra_clk_pllu_ops = {
.enable = clk_pllu_enable,
.disable = clk_pll_disable,
.recalc_rate = clk_pll_recalc_rate,
- .round_rate = clk_pll_round_rate,
+ .determine_rate = clk_pll_determine_rate,
.set_rate = clk_pll_set_rate,
};
@@ -1353,15 +1358,15 @@ static int clk_pllxc_set_rate(struct clk_hw *hw, unsigned long rate,
return ret;
}
-static long clk_pll_ramp_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_pll_ramp_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct tegra_clk_pll *pll = to_clk_pll(hw);
struct tegra_clk_pll_freq_table cfg;
int ret, p_div;
- u64 output_rate = *prate;
+ u64 output_rate = req->best_parent_rate;
- ret = _pll_ramp_calc_pll(hw, &cfg, rate, *prate);
+ ret = _pll_ramp_calc_pll(hw, &cfg, req->rate, req->best_parent_rate);
if (ret < 0)
return ret;
@@ -1375,7 +1380,9 @@ static long clk_pll_ramp_round_rate(struct clk_hw *hw, unsigned long rate,
output_rate *= cfg.n;
do_div(output_rate, cfg.m * p_div);
- return output_rate;
+ req->rate = output_rate;
+
+ return 0;
}
static void _pllcx_strobe(struct tegra_clk_pll *pll)
@@ -1598,12 +1605,15 @@ static unsigned long clk_pllre_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long clk_pllre_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_pllre_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct tegra_clk_pll *pll = to_clk_pll(hw);
- return _pllre_calc_rate(pll, NULL, rate, *prate);
+ req->rate = _pllre_calc_rate(pll, NULL, req->rate,
+ req->best_parent_rate);
+
+ return 0;
}
static int clk_plle_tegra114_enable(struct clk_hw *hw)
@@ -2003,7 +2013,7 @@ static const struct clk_ops tegra_clk_pllxc_ops = {
.enable = clk_pll_enable,
.disable = clk_pll_disable,
.recalc_rate = clk_pll_recalc_rate,
- .round_rate = clk_pll_ramp_round_rate,
+ .determine_rate = clk_pll_ramp_determine_rate,
.set_rate = clk_pllxc_set_rate,
};
@@ -2012,7 +2022,7 @@ static const struct clk_ops tegra_clk_pllc_ops = {
.enable = clk_pllc_enable,
.disable = clk_pllc_disable,
.recalc_rate = clk_pll_recalc_rate,
- .round_rate = clk_pll_ramp_round_rate,
+ .determine_rate = clk_pll_ramp_determine_rate,
.set_rate = clk_pllc_set_rate,
};
@@ -2021,7 +2031,7 @@ static const struct clk_ops tegra_clk_pllre_ops = {
.enable = clk_pll_enable,
.disable = clk_pll_disable,
.recalc_rate = clk_pllre_recalc_rate,
- .round_rate = clk_pllre_round_rate,
+ .determine_rate = clk_pllre_determine_rate,
.set_rate = clk_pllre_set_rate,
};
@@ -2321,7 +2331,7 @@ static const struct clk_ops tegra_clk_pllss_ops = {
.enable = clk_pll_enable,
.disable = clk_pll_disable,
.recalc_rate = clk_pll_recalc_rate,
- .round_rate = clk_pll_ramp_round_rate,
+ .determine_rate = clk_pll_ramp_determine_rate,
.set_rate = clk_pllxc_set_rate,
.restore_context = tegra_clk_pll_restore_context,
};
diff --git a/drivers/clk/tegra/clk-super.c b/drivers/clk/tegra/clk-super.c
index 7ec47942720c..51fb356e770e 100644
--- a/drivers/clk/tegra/clk-super.c
+++ b/drivers/clk/tegra/clk-super.c
@@ -147,17 +147,10 @@ static int clk_super_determine_rate(struct clk_hw *hw,
{
struct tegra_clk_super_mux *super = to_clk_super_mux(hw);
struct clk_hw *div_hw = &super->frac_div.hw;
- unsigned long rate;
__clk_hw_set_clk(div_hw, hw);
- rate = super->div_ops->round_rate(div_hw, req->rate,
- &req->best_parent_rate);
- if (rate < 0)
- return rate;
-
- req->rate = rate;
- return 0;
+ return super->div_ops->determine_rate(div_hw, req);
}
static unsigned long clk_super_recalc_rate(struct clk_hw *hw,
diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c
index 73303458e886..6c8e053311c3 100644
--- a/drivers/clk/tegra/clk-tegra114.c
+++ b/drivers/clk/tegra/clk-tegra114.c
@@ -11,6 +11,7 @@
#include <linux/export.h>
#include <linux/clk/tegra.h>
#include <dt-bindings/clock/tegra114-car.h>
+#include <dt-bindings/reset/nvidia,tegra114-car.h>
#include "clk.h"
#include "clk-id.h"
@@ -1272,7 +1273,7 @@ EXPORT_SYMBOL(tegra114_clock_tune_cpu_trimmers_init);
*
* Assert the reset line of the DFLL's DVCO. No return value.
*/
-void tegra114_clock_assert_dfll_dvco_reset(void)
+static void tegra114_clock_assert_dfll_dvco_reset(void)
{
u32 v;
@@ -1281,7 +1282,6 @@ void tegra114_clock_assert_dfll_dvco_reset(void)
writel_relaxed(v, clk_base + RST_DFLL_DVCO);
tegra114_car_barrier();
}
-EXPORT_SYMBOL(tegra114_clock_assert_dfll_dvco_reset);
/**
* tegra114_clock_deassert_dfll_dvco_reset - deassert the DFLL's DVCO reset
@@ -1289,7 +1289,7 @@ EXPORT_SYMBOL(tegra114_clock_assert_dfll_dvco_reset);
* Deassert the reset line of the DFLL's DVCO, allowing the DVCO to
* operate. No return value.
*/
-void tegra114_clock_deassert_dfll_dvco_reset(void)
+static void tegra114_clock_deassert_dfll_dvco_reset(void)
{
u32 v;
@@ -1298,7 +1298,26 @@ void tegra114_clock_deassert_dfll_dvco_reset(void)
writel_relaxed(v, clk_base + RST_DFLL_DVCO);
tegra114_car_barrier();
}
-EXPORT_SYMBOL(tegra114_clock_deassert_dfll_dvco_reset);
+
+static int tegra114_reset_assert(unsigned long id)
+{
+ if (id == TEGRA114_RST_DFLL_DVCO)
+ tegra114_clock_assert_dfll_dvco_reset();
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int tegra114_reset_deassert(unsigned long id)
+{
+ if (id == TEGRA114_RST_DFLL_DVCO)
+ tegra114_clock_deassert_dfll_dvco_reset();
+ else
+ return -EINVAL;
+
+ return 0;
+}
static void __init tegra114_clock_init(struct device_node *np)
{
@@ -1344,6 +1363,9 @@ static void __init tegra114_clock_init(struct device_node *np)
tegra_super_clk_gen4_init(clk_base, pmc_base, tegra114_clks,
&pll_x_params);
+ tegra_init_special_resets(1, tegra114_reset_assert,
+ tegra114_reset_deassert);
+
tegra_add_of_provider(np, of_clk_src_onecell_get);
tegra_register_devclks(devclks, ARRAY_SIZE(devclks));
diff --git a/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
index 0251618b82c8..457a77c5bb62 100644
--- a/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
+++ b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
@@ -29,6 +29,99 @@ struct dfll_fcpu_data {
};
/* Maximum CPU frequency, indexed by CPU speedo id */
+static const unsigned long tegra114_cpu_max_freq_table[] = {
+ [0] = 2040000000UL,
+ [1] = 1810500000UL,
+ [2] = 1912500000UL,
+ [3] = 1810500000UL,
+};
+
+#define T114_CPU_CVB_TABLE \
+ .min_millivolts = 1000, \
+ .max_millivolts = 1320, \
+ .speedo_scale = 100, \
+ .voltage_scale = 1000, \
+ .entries = { \
+ { 306000000UL, { 2190643, -141851, 3576 } }, \
+ { 408000000UL, { 2250968, -144331, 3576 } }, \
+ { 510000000UL, { 2313333, -146811, 3576 } }, \
+ { 612000000UL, { 2377738, -149291, 3576 } }, \
+ { 714000000UL, { 2444183, -151771, 3576 } }, \
+ { 816000000UL, { 2512669, -154251, 3576 } }, \
+ { 918000000UL, { 2583194, -156731, 3576 } }, \
+ { 1020000000UL, { 2655759, -159211, 3576 } }, \
+ { 1122000000UL, { 2730365, -161691, 3576 } }, \
+ { 1224000000UL, { 2807010, -164171, 3576 } }, \
+ { 1326000000UL, { 2885696, -166651, 3576 } }, \
+ { 1428000000UL, { 2966422, -169131, 3576 } }, \
+ { 1530000000UL, { 3049183, -171601, 3576 } }, \
+ { 1606500000UL, { 3112179, -173451, 3576 } }, \
+ { 1708500000UL, { 3198504, -175931, 3576 } }, \
+ { 1810500000UL, { 3304747, -179126, 3576 } }, \
+ { 1912500000UL, { 3395401, -181606, 3576 } }, \
+ { 0UL, { 0, 0, 0 } }, \
+ }, \
+ .cpu_dfll_data = { \
+ .tune0_low = 0x00b0039d, \
+ .tune0_high = 0x00b0009d, \
+ .tune1 = 0x0000001f, \
+ .tune_high_min_millivolts = 1050, \
+ }
+
+static const struct cvb_table tegra114_cpu_cvb_tables[] = {
+ {
+ .speedo_id = 0,
+ .process_id = -1,
+ .min_millivolts = 1000,
+ .max_millivolts = 1250,
+ .speedo_scale = 100,
+ .voltage_scale = 100,
+ .entries = {
+ { 306000000UL, { 107330, -1569, 0 } },
+ { 408000000UL, { 111250, -1666, 0 } },
+ { 510000000UL, { 110000, -1460, 0 } },
+ { 612000000UL, { 117290, -1745, 0 } },
+ { 714000000UL, { 122700, -1910, 0 } },
+ { 816000000UL, { 125620, -1945, 0 } },
+ { 918000000UL, { 130560, -2076, 0 } },
+ { 1020000000UL, { 137280, -2303, 0 } },
+ { 1122000000UL, { 146440, -2660, 0 } },
+ { 1224000000UL, { 152190, -2825, 0 } },
+ { 1326000000UL, { 157520, -2953, 0 } },
+ { 1428000000UL, { 166100, -3261, 0 } },
+ { 1530000000UL, { 176410, -3647, 0 } },
+ { 1632000000UL, { 189620, -4186, 0 } },
+ { 1734000000UL, { 203190, -4725, 0 } },
+ { 1836000000UL, { 222670, -5573, 0 } },
+ { 1938000000UL, { 256210, -7165, 0 } },
+ { 2040000000UL, { 250050, -6544, 0 } },
+ { 0UL, { 0, 0, 0 } },
+ },
+ .cpu_dfll_data = {
+ .tune0_low = 0x00b0019d,
+ .tune0_high = 0x00b0019d,
+ .tune1 = 0x0000001f,
+ .tune_high_min_millivolts = 1000,
+ }
+ },
+ {
+ .speedo_id = 1,
+ .process_id = -1,
+ T114_CPU_CVB_TABLE
+ },
+ {
+ .speedo_id = 2,
+ .process_id = -1,
+ T114_CPU_CVB_TABLE
+ },
+ {
+ .speedo_id = 3,
+ .process_id = -1,
+ T114_CPU_CVB_TABLE
+ },
+};
+
+/* Maximum CPU frequency, indexed by CPU speedo id */
static const unsigned long tegra124_cpu_max_freq_table[] = {
[0] = 2014500000UL,
[1] = 2320500000UL,
@@ -93,7 +186,7 @@ static const unsigned long tegra210_cpu_max_freq_table[] = {
[10] = 1504500000UL,
};
-#define CPU_CVB_TABLE \
+#define TEGRA210_CPU_CVB_TABLE \
.speedo_scale = 100, \
.voltage_scale = 1000, \
.entries = { \
@@ -120,7 +213,7 @@ static const unsigned long tegra210_cpu_max_freq_table[] = {
{ 0UL, { 0, 0, 0 } }, \
}
-#define CPU_CVB_TABLE_XA \
+#define TEGRA210_CPU_CVB_TABLE_XA \
.speedo_scale = 100, \
.voltage_scale = 1000, \
.entries = { \
@@ -143,7 +236,7 @@ static const unsigned long tegra210_cpu_max_freq_table[] = {
{ 0UL, { 0, 0, 0 } }, \
}
-#define CPU_CVB_TABLE_EUCM1 \
+#define TEGRA210_CPU_CVB_TABLE_EUCM1 \
.speedo_scale = 100, \
.voltage_scale = 1000, \
.entries = { \
@@ -166,7 +259,7 @@ static const unsigned long tegra210_cpu_max_freq_table[] = {
{ 0UL, { 0, 0, 0 } }, \
}
-#define CPU_CVB_TABLE_EUCM2 \
+#define TEGRA210_CPU_CVB_TABLE_EUCM2 \
.speedo_scale = 100, \
.voltage_scale = 1000, \
.entries = { \
@@ -188,7 +281,7 @@ static const unsigned long tegra210_cpu_max_freq_table[] = {
{ 0UL, { 0, 0, 0 } }, \
}
-#define CPU_CVB_TABLE_EUCM2_JOINT_RAIL \
+#define TEGRA210_CPU_CVB_TABLE_EUCM2_JOINT_RAIL \
.speedo_scale = 100, \
.voltage_scale = 1000, \
.entries = { \
@@ -209,7 +302,7 @@ static const unsigned long tegra210_cpu_max_freq_table[] = {
{ 0UL, { 0, 0, 0 } }, \
}
-#define CPU_CVB_TABLE_ODN \
+#define TEGRA210_CPU_CVB_TABLE_ODN \
.speedo_scale = 100, \
.voltage_scale = 1000, \
.entries = { \
@@ -238,7 +331,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 0,
.min_millivolts = 840,
.max_millivolts = 1120,
- CPU_CVB_TABLE_EUCM2_JOINT_RAIL,
+ TEGRA210_CPU_CVB_TABLE_EUCM2_JOINT_RAIL,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -251,7 +344,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 1,
.min_millivolts = 840,
.max_millivolts = 1120,
- CPU_CVB_TABLE_EUCM2_JOINT_RAIL,
+ TEGRA210_CPU_CVB_TABLE_EUCM2_JOINT_RAIL,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -264,7 +357,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 0,
.min_millivolts = 900,
.max_millivolts = 1162,
- CPU_CVB_TABLE_EUCM2,
+ TEGRA210_CPU_CVB_TABLE_EUCM2,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -276,7 +369,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 1,
.min_millivolts = 900,
.max_millivolts = 1162,
- CPU_CVB_TABLE_EUCM2,
+ TEGRA210_CPU_CVB_TABLE_EUCM2,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -288,7 +381,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 0,
.min_millivolts = 900,
.max_millivolts = 1195,
- CPU_CVB_TABLE_EUCM2,
+ TEGRA210_CPU_CVB_TABLE_EUCM2,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -300,7 +393,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 1,
.min_millivolts = 900,
.max_millivolts = 1195,
- CPU_CVB_TABLE_EUCM2,
+ TEGRA210_CPU_CVB_TABLE_EUCM2,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -312,7 +405,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 0,
.min_millivolts = 841,
.max_millivolts = 1227,
- CPU_CVB_TABLE_EUCM1,
+ TEGRA210_CPU_CVB_TABLE_EUCM1,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -325,7 +418,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 1,
.min_millivolts = 841,
.max_millivolts = 1227,
- CPU_CVB_TABLE_EUCM1,
+ TEGRA210_CPU_CVB_TABLE_EUCM1,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -338,7 +431,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 0,
.min_millivolts = 870,
.max_millivolts = 1150,
- CPU_CVB_TABLE,
+ TEGRA210_CPU_CVB_TABLE,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune1 = 0x20091d9,
@@ -349,7 +442,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 1,
.min_millivolts = 870,
.max_millivolts = 1150,
- CPU_CVB_TABLE,
+ TEGRA210_CPU_CVB_TABLE,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune1 = 0x25501d0,
@@ -360,7 +453,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 0,
.min_millivolts = 818,
.max_millivolts = 1227,
- CPU_CVB_TABLE,
+ TEGRA210_CPU_CVB_TABLE,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -373,7 +466,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 1,
.min_millivolts = 818,
.max_millivolts = 1227,
- CPU_CVB_TABLE,
+ TEGRA210_CPU_CVB_TABLE,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -386,7 +479,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = -1,
.min_millivolts = 918,
.max_millivolts = 1113,
- CPU_CVB_TABLE_XA,
+ TEGRA210_CPU_CVB_TABLE_XA,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune1 = 0x17711BD,
@@ -397,7 +490,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 0,
.min_millivolts = 825,
.max_millivolts = 1227,
- CPU_CVB_TABLE_ODN,
+ TEGRA210_CPU_CVB_TABLE_ODN,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -410,7 +503,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 1,
.min_millivolts = 825,
.max_millivolts = 1227,
- CPU_CVB_TABLE_ODN,
+ TEGRA210_CPU_CVB_TABLE_ODN,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -423,7 +516,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 0,
.min_millivolts = 870,
.max_millivolts = 1227,
- CPU_CVB_TABLE,
+ TEGRA210_CPU_CVB_TABLE,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune1 = 0x20091d9,
@@ -434,7 +527,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 1,
.min_millivolts = 870,
.max_millivolts = 1227,
- CPU_CVB_TABLE,
+ TEGRA210_CPU_CVB_TABLE,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune1 = 0x25501d0,
@@ -445,7 +538,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 0,
.min_millivolts = 837,
.max_millivolts = 1227,
- CPU_CVB_TABLE,
+ TEGRA210_CPU_CVB_TABLE,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -458,7 +551,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 1,
.min_millivolts = 837,
.max_millivolts = 1227,
- CPU_CVB_TABLE,
+ TEGRA210_CPU_CVB_TABLE,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -471,7 +564,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 0,
.min_millivolts = 850,
.max_millivolts = 1170,
- CPU_CVB_TABLE,
+ TEGRA210_CPU_CVB_TABLE,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -484,7 +577,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 1,
.min_millivolts = 850,
.max_millivolts = 1170,
- CPU_CVB_TABLE,
+ TEGRA210_CPU_CVB_TABLE,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -494,6 +587,13 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
},
};
+static const struct dfll_fcpu_data tegra114_dfll_fcpu_data = {
+ .cpu_max_freq_table = tegra114_cpu_max_freq_table,
+ .cpu_max_freq_table_size = ARRAY_SIZE(tegra114_cpu_max_freq_table),
+ .cpu_cvb_tables = tegra114_cpu_cvb_tables,
+ .cpu_cvb_tables_size = ARRAY_SIZE(tegra114_cpu_cvb_tables)
+};
+
static const struct dfll_fcpu_data tegra124_dfll_fcpu_data = {
.cpu_max_freq_table = tegra124_cpu_max_freq_table,
.cpu_max_freq_table_size = ARRAY_SIZE(tegra124_cpu_max_freq_table),
@@ -510,6 +610,10 @@ static const struct dfll_fcpu_data tegra210_dfll_fcpu_data = {
static const struct of_device_id tegra124_dfll_fcpu_of_match[] = {
{
+ .compatible = "nvidia,tegra114-dfll",
+ .data = &tegra114_dfll_fcpu_data,
+ },
+ {
.compatible = "nvidia,tegra124-dfll",
.data = &tegra124_dfll_fcpu_data,
},
diff --git a/drivers/clk/tegra/clk-tegra210-emc.c b/drivers/clk/tegra/clk-tegra210-emc.c
index 672ca8c184d2..fbf3c894eb56 100644
--- a/drivers/clk/tegra/clk-tegra210-emc.c
+++ b/drivers/clk/tegra/clk-tegra210-emc.c
@@ -86,22 +86,30 @@ static unsigned long tegra210_clk_emc_recalc_rate(struct clk_hw *hw,
return DIV_ROUND_UP(parent_rate * 2, div);
}
-static long tegra210_clk_emc_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int tegra210_clk_emc_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct tegra210_clk_emc *emc = to_tegra210_clk_emc(hw);
struct tegra210_clk_emc_provider *provider = emc->provider;
unsigned int i;
- if (!provider || !provider->configs || provider->num_configs == 0)
- return clk_hw_get_rate(hw);
+ if (!provider || !provider->configs || provider->num_configs == 0) {
+ req->rate = clk_hw_get_rate(hw);
+
+ return 0;
+ }
for (i = 0; i < provider->num_configs; i++) {
- if (provider->configs[i].rate >= rate)
- return provider->configs[i].rate;
+ if (provider->configs[i].rate >= req->rate) {
+ req->rate = provider->configs[i].rate;
+
+ return 0;
+ }
}
- return provider->configs[i - 1].rate;
+ req->rate = provider->configs[i - 1].rate;
+
+ return 0;
}
static struct clk *tegra210_clk_emc_find_parent(struct tegra210_clk_emc *emc,
@@ -259,7 +267,7 @@ static int tegra210_clk_emc_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops tegra210_clk_emc_ops = {
.get_parent = tegra210_clk_emc_get_parent,
.recalc_rate = tegra210_clk_emc_recalc_rate,
- .round_rate = tegra210_clk_emc_round_rate,
+ .determine_rate = tegra210_clk_emc_determine_rate,
.set_rate = tegra210_clk_emc_set_rate,
};
diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
index 82a8cb9545eb..e7ebb63970d3 100644
--- a/drivers/clk/tegra/clk-tegra30.c
+++ b/drivers/clk/tegra/clk-tegra30.c
@@ -53,6 +53,7 @@
#define SYSTEM_CLK_RATE 0x030
#define TEGRA30_CLK_PERIPH_BANKS 5
+#define TEGRA30_CLK_CLK_MAX 311
#define PLLC_BASE 0x80
#define PLLC_MISC 0x8c
diff --git a/drivers/clk/tegra/clk.h b/drivers/clk/tegra/clk.h
index 9ea839af14bc..73efd2ff37c9 100644
--- a/drivers/clk/tegra/clk.h
+++ b/drivers/clk/tegra/clk.h
@@ -897,8 +897,6 @@ static inline bool tegra124_clk_emc_driver_available(struct clk_hw *emc_hw)
void tegra114_clock_tune_cpu_trimmers_high(void);
void tegra114_clock_tune_cpu_trimmers_low(void);
void tegra114_clock_tune_cpu_trimmers_init(void);
-void tegra114_clock_assert_dfll_dvco_reset(void);
-void tegra114_clock_deassert_dfll_dvco_reset(void);
typedef void (*tegra_clk_apply_init_table_func)(void);
extern tegra_clk_apply_init_table_func tegra_clk_apply_init_table;
diff --git a/drivers/clk/thead/clk-th1520-ap.c b/drivers/clk/thead/clk-th1520-ap.c
index cf1bba58f641..71ad03a998e8 100644
--- a/drivers/clk/thead/clk-th1520-ap.c
+++ b/drivers/clk/thead/clk-th1520-ap.c
@@ -18,6 +18,7 @@
#define TH1520_PLL_FBDIV GENMASK(19, 8)
#define TH1520_PLL_REFDIV GENMASK(5, 0)
#define TH1520_PLL_BYPASS BIT(30)
+#define TH1520_PLL_VCO_RST BIT(29)
#define TH1520_PLL_DSMPD BIT(24)
#define TH1520_PLL_FRAC GENMASK(23, 0)
#define TH1520_PLL_FRAC_BITS 24
@@ -48,12 +49,14 @@ struct ccu_mux {
};
struct ccu_gate {
- u32 enable;
- struct ccu_common common;
+ int clkid;
+ u32 reg;
+ struct clk_gate gate;
};
struct ccu_div {
u32 enable;
+ u32 div_en;
struct ccu_div_internal div;
struct ccu_internal mux;
struct ccu_common common;
@@ -87,12 +90,12 @@ struct ccu_pll {
0), \
}
-#define CCU_GATE(_clkid, _struct, _name, _parent, _reg, _gate, _flags) \
+#define CCU_GATE(_clkid, _struct, _name, _parent, _reg, _bit, _flags) \
struct ccu_gate _struct = { \
- .enable = _gate, \
- .common = { \
- .clkid = _clkid, \
- .cfg0 = _reg, \
+ .clkid = _clkid, \
+ .reg = _reg, \
+ .gate = { \
+ .bit_idx = _bit, \
.hw.init = CLK_HW_INIT_PARENTS_DATA( \
_name, \
_parent, \
@@ -120,13 +123,6 @@ static inline struct ccu_div *hw_to_ccu_div(struct clk_hw *hw)
return container_of(common, struct ccu_div, common);
}
-static inline struct ccu_gate *hw_to_ccu_gate(struct clk_hw *hw)
-{
- struct ccu_common *common = hw_to_ccu_common(hw);
-
- return container_of(common, struct ccu_gate, common);
-}
-
static u8 ccu_get_parent_helper(struct ccu_common *common,
struct ccu_internal *mux)
{
@@ -197,6 +193,55 @@ static unsigned long ccu_div_recalc_rate(struct clk_hw *hw,
return rate;
}
+static int ccu_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct ccu_div *cd = hw_to_ccu_div(hw);
+ unsigned int val;
+
+ if (cd->div_en)
+ return divider_determine_rate(hw, req, NULL,
+ cd->div.width, cd->div.flags);
+
+ regmap_read(cd->common.map, cd->common.cfg0, &val);
+ val = val >> cd->div.shift;
+ val &= GENMASK(cd->div.width - 1, 0);
+ return divider_ro_determine_rate(hw, req, NULL, cd->div.width,
+ cd->div.flags, val);
+}
+
+static int ccu_div_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct ccu_div *cd = hw_to_ccu_div(hw);
+ int val = divider_get_val(rate, parent_rate, NULL,
+ cd->div.width, cd->div.flags);
+ unsigned int curr_val, reg_val;
+
+ if (val < 0)
+ return val;
+
+ regmap_read(cd->common.map, cd->common.cfg0, &reg_val);
+ curr_val = reg_val >> cd->div.shift;
+ curr_val &= GENMASK(cd->div.width - 1, 0);
+
+ if (!cd->div_en && curr_val != val)
+ return -EINVAL;
+
+ reg_val &= ~cd->div_en;
+ regmap_write(cd->common.map, cd->common.cfg0, reg_val);
+ udelay(1);
+
+ reg_val &= ~GENMASK(cd->div.width + cd->div.shift - 1, cd->div.shift);
+ reg_val |= val << cd->div.shift;
+ regmap_write(cd->common.map, cd->common.cfg0, reg_val);
+
+ reg_val |= cd->div_en;
+ regmap_write(cd->common.map, cd->common.cfg0, reg_val);
+
+ return 0;
+}
+
static u8 ccu_div_get_parent(struct clk_hw *hw)
{
struct ccu_div *cd = hw_to_ccu_div(hw);
@@ -239,9 +284,34 @@ static const struct clk_ops ccu_div_ops = {
.get_parent = ccu_div_get_parent,
.set_parent = ccu_div_set_parent,
.recalc_rate = ccu_div_recalc_rate,
- .determine_rate = clk_hw_determine_rate_no_reparent,
+ .set_rate = ccu_div_set_rate,
+ .determine_rate = ccu_div_determine_rate,
};
+static void ccu_pll_disable(struct clk_hw *hw)
+{
+ struct ccu_pll *pll = hw_to_ccu_pll(hw);
+
+ regmap_set_bits(pll->common.map, pll->common.cfg1,
+ TH1520_PLL_VCO_RST);
+}
+
+static int ccu_pll_enable(struct clk_hw *hw)
+{
+ struct ccu_pll *pll = hw_to_ccu_pll(hw);
+
+ return regmap_clear_bits(pll->common.map, pll->common.cfg1,
+ TH1520_PLL_VCO_RST);
+}
+
+static int ccu_pll_is_enabled(struct clk_hw *hw)
+{
+ struct ccu_pll *pll = hw_to_ccu_pll(hw);
+
+ return !regmap_test_bits(pll->common.map, pll->common.cfg1,
+ TH1520_PLL_VCO_RST);
+}
+
static unsigned long th1520_pll_vco_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
@@ -299,6 +369,9 @@ static unsigned long ccu_pll_recalc_rate(struct clk_hw *hw,
}
static const struct clk_ops clk_pll_ops = {
+ .disable = ccu_pll_disable,
+ .enable = ccu_pll_enable,
+ .is_enabled = ccu_pll_is_enabled,
.recalc_rate = ccu_pll_recalc_rate,
};
@@ -314,7 +387,7 @@ static struct ccu_pll cpu_pll0_clk = {
.hw.init = CLK_HW_INIT_PARENTS_DATA("cpu-pll0",
osc_24m_clk,
&clk_pll_ops,
- 0),
+ CLK_IS_CRITICAL),
},
};
@@ -326,7 +399,7 @@ static struct ccu_pll cpu_pll1_clk = {
.hw.init = CLK_HW_INIT_PARENTS_DATA("cpu-pll1",
osc_24m_clk,
&clk_pll_ops,
- 0),
+ CLK_IS_CRITICAL),
},
};
@@ -338,7 +411,7 @@ static struct ccu_pll gmac_pll_clk = {
.hw.init = CLK_HW_INIT_PARENTS_DATA("gmac-pll",
osc_24m_clk,
&clk_pll_ops,
- 0),
+ CLK_IS_CRITICAL),
},
};
@@ -358,7 +431,7 @@ static struct ccu_pll video_pll_clk = {
.hw.init = CLK_HW_INIT_PARENTS_DATA("video-pll",
osc_24m_clk,
&clk_pll_ops,
- 0),
+ CLK_IS_CRITICAL),
},
};
@@ -410,7 +483,7 @@ static struct ccu_pll tee_pll_clk = {
.hw.init = CLK_HW_INIT_PARENTS_DATA("tee-pll",
osc_24m_clk,
&clk_pll_ops,
- 0),
+ CLK_IS_CRITICAL),
},
};
@@ -486,7 +559,7 @@ static struct ccu_div axi4_cpusys2_aclk = {
.hw.init = CLK_HW_INIT_PARENTS_HW("axi4-cpusys2-aclk",
gmac_pll_clk_parent,
&ccu_div_ops,
- 0),
+ CLK_IS_CRITICAL),
},
};
@@ -508,7 +581,7 @@ static struct ccu_div axi_aclk = {
.hw.init = CLK_HW_INIT_PARENTS_DATA("axi-aclk",
axi_parents,
&ccu_div_ops,
- 0),
+ CLK_IS_CRITICAL),
},
};
@@ -657,7 +730,7 @@ static struct ccu_div apb_pclk = {
.hw.init = CLK_HW_INIT_PARENTS_DATA("apb-pclk",
apb_parents,
&ccu_div_ops,
- CLK_IGNORE_UNUSED),
+ CLK_IS_CRITICAL),
},
};
@@ -688,7 +761,7 @@ static struct ccu_div vi_clk = {
.hw.init = CLK_HW_INIT_PARENTS_HW("vi",
video_pll_clk_parent,
&ccu_div_ops,
- 0),
+ CLK_IS_CRITICAL),
},
};
@@ -713,7 +786,7 @@ static struct ccu_div vo_axi_clk = {
.hw.init = CLK_HW_INIT_PARENTS_HW("vo-axi",
video_pll_clk_parent,
&ccu_div_ops,
- 0),
+ CLK_IS_CRITICAL),
},
};
@@ -738,7 +811,7 @@ static struct ccu_div vp_axi_clk = {
.hw.init = CLK_HW_INIT_PARENTS_HW("vp-axi",
video_pll_clk_parent,
&ccu_div_ops,
- CLK_IGNORE_UNUSED),
+ CLK_IS_CRITICAL),
},
};
@@ -756,6 +829,7 @@ static struct ccu_div venc_clk = {
};
static struct ccu_div dpu0_clk = {
+ .div_en = BIT(8),
.div = TH_CCU_DIV_FLAGS(0, 8, CLK_DIVIDER_ONE_BASED),
.common = {
.clkid = CLK_DPU0,
@@ -763,11 +837,16 @@ static struct ccu_div dpu0_clk = {
.hw.init = CLK_HW_INIT_PARENTS_HW("dpu0",
dpu0_pll_clk_parent,
&ccu_div_ops,
- 0),
+ CLK_SET_RATE_UNGATE),
},
};
+static const struct clk_parent_data dpu0_clk_pd[] = {
+ { .hw = &dpu0_clk.common.hw }
+};
+
static struct ccu_div dpu1_clk = {
+ .div_en = BIT(8),
.div = TH_CCU_DIV_FLAGS(0, 8, CLK_DIVIDER_ONE_BASED),
.common = {
.clkid = CLK_DPU1,
@@ -775,10 +854,14 @@ static struct ccu_div dpu1_clk = {
.hw.init = CLK_HW_INIT_PARENTS_HW("dpu1",
dpu1_pll_clk_parent,
&ccu_div_ops,
- 0),
+ CLK_SET_RATE_UNGATE),
},
};
+static const struct clk_parent_data dpu1_clk_pd[] = {
+ { .hw = &dpu1_clk.common.hw }
+};
+
static CLK_FIXED_FACTOR_HW(emmc_sdio_ref_clk, "emmc-sdio-ref",
&video_pll_clk.common.hw, 4, 1, 0);
@@ -786,128 +869,132 @@ static const struct clk_parent_data emmc_sdio_ref_clk_pd[] = {
{ .hw = &emmc_sdio_ref_clk.hw },
};
-static CCU_GATE(CLK_BROM, brom_clk, "brom", ahb2_cpusys_hclk_pd, 0x100, BIT(4), 0);
-static CCU_GATE(CLK_BMU, bmu_clk, "bmu", axi4_cpusys2_aclk_pd, 0x100, BIT(5), 0);
+static CCU_GATE(CLK_BROM, brom_clk, "brom", ahb2_cpusys_hclk_pd, 0x100, 4, 0);
+static CCU_GATE(CLK_BMU, bmu_clk, "bmu", axi4_cpusys2_aclk_pd, 0x100, 5, 0);
static CCU_GATE(CLK_AON2CPU_A2X, aon2cpu_a2x_clk, "aon2cpu-a2x", axi4_cpusys2_aclk_pd,
- 0x134, BIT(8), 0);
+ 0x134, 8, CLK_IS_CRITICAL);
static CCU_GATE(CLK_X2X_CPUSYS, x2x_cpusys_clk, "x2x-cpusys", axi4_cpusys2_aclk_pd,
- 0x134, BIT(7), 0);
+ 0x134, 7, CLK_IS_CRITICAL);
static CCU_GATE(CLK_CPU2AON_X2H, cpu2aon_x2h_clk, "cpu2aon-x2h", axi_aclk_pd,
- 0x138, BIT(8), CLK_IGNORE_UNUSED);
+ 0x138, 8, CLK_IS_CRITICAL);
static CCU_GATE(CLK_CPU2PERI_X2H, cpu2peri_x2h_clk, "cpu2peri-x2h", axi4_cpusys2_aclk_pd,
- 0x140, BIT(9), CLK_IGNORE_UNUSED);
+ 0x140, 9, CLK_IS_CRITICAL);
static CCU_GATE(CLK_PERISYS_APB1_HCLK, perisys_apb1_hclk, "perisys-apb1-hclk", perisys_ahb_hclk_pd,
- 0x150, BIT(9), CLK_IGNORE_UNUSED);
+ 0x150, 9, CLK_IS_CRITICAL);
static CCU_GATE(CLK_PERISYS_APB2_HCLK, perisys_apb2_hclk, "perisys-apb2-hclk", perisys_ahb_hclk_pd,
- 0x150, BIT(10), CLK_IGNORE_UNUSED);
+ 0x150, 10, CLK_IS_CRITICAL);
static CCU_GATE(CLK_PERISYS_APB3_HCLK, perisys_apb3_hclk, "perisys-apb3-hclk", perisys_ahb_hclk_pd,
- 0x150, BIT(11), CLK_IGNORE_UNUSED);
+ 0x150, 11, CLK_IS_CRITICAL);
static CCU_GATE(CLK_PERISYS_APB4_HCLK, perisys_apb4_hclk, "perisys-apb4-hclk", perisys_ahb_hclk_pd,
- 0x150, BIT(12), 0);
-static CCU_GATE(CLK_NPU_AXI, npu_axi_clk, "npu-axi", axi_aclk_pd, 0x1c8, BIT(5), 0);
-static CCU_GATE(CLK_CPU2VP, cpu2vp_clk, "cpu2vp", axi_aclk_pd, 0x1e0, BIT(13), 0);
-static CCU_GATE(CLK_EMMC_SDIO, emmc_sdio_clk, "emmc-sdio", emmc_sdio_ref_clk_pd, 0x204, BIT(30), 0);
-static CCU_GATE(CLK_GMAC1, gmac1_clk, "gmac1", gmac_pll_clk_pd, 0x204, BIT(26), 0);
-static CCU_GATE(CLK_PADCTRL1, padctrl1_clk, "padctrl1", perisys_apb_pclk_pd, 0x204, BIT(24), 0);
-static CCU_GATE(CLK_DSMART, dsmart_clk, "dsmart", perisys_apb_pclk_pd, 0x204, BIT(23), 0);
-static CCU_GATE(CLK_PADCTRL0, padctrl0_clk, "padctrl0", perisys_apb_pclk_pd, 0x204, BIT(22), 0);
-static CCU_GATE(CLK_GMAC_AXI, gmac_axi_clk, "gmac-axi", axi4_cpusys2_aclk_pd, 0x204, BIT(21), 0);
-static CCU_GATE(CLK_GPIO3, gpio3_clk, "gpio3-clk", peri2sys_apb_pclk_pd, 0x204, BIT(20), 0);
-static CCU_GATE(CLK_GMAC0, gmac0_clk, "gmac0", gmac_pll_clk_pd, 0x204, BIT(19), 0);
-static CCU_GATE(CLK_PWM, pwm_clk, "pwm", perisys_apb_pclk_pd, 0x204, BIT(18), 0);
-static CCU_GATE(CLK_QSPI0, qspi0_clk, "qspi0", video_pll_clk_pd, 0x204, BIT(17), 0);
-static CCU_GATE(CLK_QSPI1, qspi1_clk, "qspi1", video_pll_clk_pd, 0x204, BIT(16), 0);
-static CCU_GATE(CLK_SPI, spi_clk, "spi", video_pll_clk_pd, 0x204, BIT(15), 0);
-static CCU_GATE(CLK_UART0_PCLK, uart0_pclk, "uart0-pclk", perisys_apb_pclk_pd, 0x204, BIT(14), 0);
-static CCU_GATE(CLK_UART1_PCLK, uart1_pclk, "uart1-pclk", perisys_apb_pclk_pd, 0x204, BIT(13), 0);
-static CCU_GATE(CLK_UART2_PCLK, uart2_pclk, "uart2-pclk", perisys_apb_pclk_pd, 0x204, BIT(12), 0);
-static CCU_GATE(CLK_UART3_PCLK, uart3_pclk, "uart3-pclk", perisys_apb_pclk_pd, 0x204, BIT(11), 0);
-static CCU_GATE(CLK_UART4_PCLK, uart4_pclk, "uart4-pclk", perisys_apb_pclk_pd, 0x204, BIT(10), 0);
-static CCU_GATE(CLK_UART5_PCLK, uart5_pclk, "uart5-pclk", perisys_apb_pclk_pd, 0x204, BIT(9), 0);
-static CCU_GATE(CLK_GPIO0, gpio0_clk, "gpio0-clk", perisys_apb_pclk_pd, 0x204, BIT(8), 0);
-static CCU_GATE(CLK_GPIO1, gpio1_clk, "gpio1-clk", perisys_apb_pclk_pd, 0x204, BIT(7), 0);
-static CCU_GATE(CLK_GPIO2, gpio2_clk, "gpio2-clk", peri2sys_apb_pclk_pd, 0x204, BIT(6), 0);
-static CCU_GATE(CLK_I2C0, i2c0_clk, "i2c0", perisys_apb_pclk_pd, 0x204, BIT(5), 0);
-static CCU_GATE(CLK_I2C1, i2c1_clk, "i2c1", perisys_apb_pclk_pd, 0x204, BIT(4), 0);
-static CCU_GATE(CLK_I2C2, i2c2_clk, "i2c2", perisys_apb_pclk_pd, 0x204, BIT(3), 0);
-static CCU_GATE(CLK_I2C3, i2c3_clk, "i2c3", perisys_apb_pclk_pd, 0x204, BIT(2), 0);
-static CCU_GATE(CLK_I2C4, i2c4_clk, "i2c4", perisys_apb_pclk_pd, 0x204, BIT(1), 0);
-static CCU_GATE(CLK_I2C5, i2c5_clk, "i2c5", perisys_apb_pclk_pd, 0x204, BIT(0), 0);
-static CCU_GATE(CLK_SPINLOCK, spinlock_clk, "spinlock", ahb2_cpusys_hclk_pd, 0x208, BIT(10), 0);
-static CCU_GATE(CLK_DMA, dma_clk, "dma", axi4_cpusys2_aclk_pd, 0x208, BIT(8), 0);
-static CCU_GATE(CLK_MBOX0, mbox0_clk, "mbox0", apb3_cpusys_pclk_pd, 0x208, BIT(7), 0);
-static CCU_GATE(CLK_MBOX1, mbox1_clk, "mbox1", apb3_cpusys_pclk_pd, 0x208, BIT(6), 0);
-static CCU_GATE(CLK_MBOX2, mbox2_clk, "mbox2", apb3_cpusys_pclk_pd, 0x208, BIT(5), 0);
-static CCU_GATE(CLK_MBOX3, mbox3_clk, "mbox3", apb3_cpusys_pclk_pd, 0x208, BIT(4), 0);
-static CCU_GATE(CLK_WDT0, wdt0_clk, "wdt0", apb3_cpusys_pclk_pd, 0x208, BIT(3), 0);
-static CCU_GATE(CLK_WDT1, wdt1_clk, "wdt1", apb3_cpusys_pclk_pd, 0x208, BIT(2), 0);
-static CCU_GATE(CLK_TIMER0, timer0_clk, "timer0", apb3_cpusys_pclk_pd, 0x208, BIT(1), 0);
-static CCU_GATE(CLK_TIMER1, timer1_clk, "timer1", apb3_cpusys_pclk_pd, 0x208, BIT(0), 0);
-static CCU_GATE(CLK_SRAM0, sram0_clk, "sram0", axi_aclk_pd, 0x20c, BIT(4), 0);
-static CCU_GATE(CLK_SRAM1, sram1_clk, "sram1", axi_aclk_pd, 0x20c, BIT(3), 0);
-static CCU_GATE(CLK_SRAM2, sram2_clk, "sram2", axi_aclk_pd, 0x20c, BIT(2), 0);
-static CCU_GATE(CLK_SRAM3, sram3_clk, "sram3", axi_aclk_pd, 0x20c, BIT(1), 0);
+ 0x150, 12, 0);
+static const struct clk_parent_data perisys_apb4_hclk_pd[] = {
+ { .hw = &perisys_apb4_hclk.gate.hw },
+};
+
+static CCU_GATE(CLK_NPU_AXI, npu_axi_clk, "npu-axi", axi_aclk_pd, 0x1c8, 5, CLK_IS_CRITICAL);
+static CCU_GATE(CLK_CPU2VP, cpu2vp_clk, "cpu2vp", axi_aclk_pd, 0x1e0, 13, CLK_IS_CRITICAL);
+static CCU_GATE(CLK_EMMC_SDIO, emmc_sdio_clk, "emmc-sdio", emmc_sdio_ref_clk_pd, 0x204, 30, 0);
+static CCU_GATE(CLK_GMAC1, gmac1_clk, "gmac1", gmac_pll_clk_pd, 0x204, 26, 0);
+static CCU_GATE(CLK_PADCTRL1, padctrl1_clk, "padctrl1", perisys_apb_pclk_pd, 0x204, 24, 0);
+static CCU_GATE(CLK_DSMART, dsmart_clk, "dsmart", perisys_apb_pclk_pd, 0x204, 23, 0);
+static CCU_GATE(CLK_PADCTRL0, padctrl0_clk, "padctrl0", perisys_apb4_hclk_pd, 0x204, 22, 0);
+static CCU_GATE(CLK_GMAC_AXI, gmac_axi_clk, "gmac-axi", axi4_cpusys2_aclk_pd, 0x204, 21, 0);
+static CCU_GATE(CLK_GPIO3, gpio3_clk, "gpio3-clk", peri2sys_apb_pclk_pd, 0x204, 20, 0);
+static CCU_GATE(CLK_GMAC0, gmac0_clk, "gmac0", gmac_pll_clk_pd, 0x204, 19, 0);
+static CCU_GATE(CLK_PWM, pwm_clk, "pwm", perisys_apb_pclk_pd, 0x204, 18, 0);
+static CCU_GATE(CLK_QSPI0, qspi0_clk, "qspi0", video_pll_clk_pd, 0x204, 17, 0);
+static CCU_GATE(CLK_QSPI1, qspi1_clk, "qspi1", video_pll_clk_pd, 0x204, 16, 0);
+static CCU_GATE(CLK_SPI, spi_clk, "spi", video_pll_clk_pd, 0x204, 15, 0);
+static CCU_GATE(CLK_UART0_PCLK, uart0_pclk, "uart0-pclk", perisys_apb_pclk_pd, 0x204, 14, 0);
+static CCU_GATE(CLK_UART1_PCLK, uart1_pclk, "uart1-pclk", perisys_apb_pclk_pd, 0x204, 13, 0);
+static CCU_GATE(CLK_UART2_PCLK, uart2_pclk, "uart2-pclk", perisys_apb_pclk_pd, 0x204, 12, 0);
+static CCU_GATE(CLK_UART3_PCLK, uart3_pclk, "uart3-pclk", perisys_apb_pclk_pd, 0x204, 11, 0);
+static CCU_GATE(CLK_UART4_PCLK, uart4_pclk, "uart4-pclk", perisys_apb_pclk_pd, 0x204, 10, 0);
+static CCU_GATE(CLK_UART5_PCLK, uart5_pclk, "uart5-pclk", perisys_apb_pclk_pd, 0x204, 9, 0);
+static CCU_GATE(CLK_GPIO0, gpio0_clk, "gpio0-clk", perisys_apb_pclk_pd, 0x204, 8, 0);
+static CCU_GATE(CLK_GPIO1, gpio1_clk, "gpio1-clk", perisys_apb_pclk_pd, 0x204, 7, 0);
+static CCU_GATE(CLK_GPIO2, gpio2_clk, "gpio2-clk", peri2sys_apb_pclk_pd, 0x204, 6, 0);
+static CCU_GATE(CLK_I2C0, i2c0_clk, "i2c0", perisys_apb_pclk_pd, 0x204, 5, 0);
+static CCU_GATE(CLK_I2C1, i2c1_clk, "i2c1", perisys_apb_pclk_pd, 0x204, 4, 0);
+static CCU_GATE(CLK_I2C2, i2c2_clk, "i2c2", perisys_apb_pclk_pd, 0x204, 3, 0);
+static CCU_GATE(CLK_I2C3, i2c3_clk, "i2c3", perisys_apb_pclk_pd, 0x204, 2, 0);
+static CCU_GATE(CLK_I2C4, i2c4_clk, "i2c4", perisys_apb_pclk_pd, 0x204, 1, 0);
+static CCU_GATE(CLK_I2C5, i2c5_clk, "i2c5", perisys_apb_pclk_pd, 0x204, 0, 0);
+static CCU_GATE(CLK_SPINLOCK, spinlock_clk, "spinlock", ahb2_cpusys_hclk_pd, 0x208, 10, 0);
+static CCU_GATE(CLK_DMA, dma_clk, "dma", axi4_cpusys2_aclk_pd, 0x208, 8, 0);
+static CCU_GATE(CLK_MBOX0, mbox0_clk, "mbox0", apb3_cpusys_pclk_pd, 0x208, 7, 0);
+static CCU_GATE(CLK_MBOX1, mbox1_clk, "mbox1", apb3_cpusys_pclk_pd, 0x208, 6, 0);
+static CCU_GATE(CLK_MBOX2, mbox2_clk, "mbox2", apb3_cpusys_pclk_pd, 0x208, 5, 0);
+static CCU_GATE(CLK_MBOX3, mbox3_clk, "mbox3", apb3_cpusys_pclk_pd, 0x208, 4, 0);
+static CCU_GATE(CLK_WDT0, wdt0_clk, "wdt0", apb3_cpusys_pclk_pd, 0x208, 3, 0);
+static CCU_GATE(CLK_WDT1, wdt1_clk, "wdt1", apb3_cpusys_pclk_pd, 0x208, 2, 0);
+static CCU_GATE(CLK_TIMER0, timer0_clk, "timer0", apb3_cpusys_pclk_pd, 0x208, 1, 0);
+static CCU_GATE(CLK_TIMER1, timer1_clk, "timer1", apb3_cpusys_pclk_pd, 0x208, 0, 0);
+static CCU_GATE(CLK_SRAM0, sram0_clk, "sram0", axi_aclk_pd, 0x20c, 4, 0);
+static CCU_GATE(CLK_SRAM1, sram1_clk, "sram1", axi_aclk_pd, 0x20c, 3, 0);
+static CCU_GATE(CLK_SRAM2, sram2_clk, "sram2", axi_aclk_pd, 0x20c, 2, 0);
+static CCU_GATE(CLK_SRAM3, sram3_clk, "sram3", axi_aclk_pd, 0x20c, 1, 0);
static CCU_GATE(CLK_AXI4_VO_ACLK, axi4_vo_aclk, "axi4-vo-aclk",
- video_pll_clk_pd, 0x0, BIT(0), 0);
+ video_pll_clk_pd, 0x0, 0, CLK_IS_CRITICAL);
static CCU_GATE(CLK_GPU_CORE, gpu_core_clk, "gpu-core-clk", video_pll_clk_pd,
- 0x0, BIT(3), 0);
+ 0x0, 3, 0);
static CCU_GATE(CLK_GPU_CFG_ACLK, gpu_cfg_aclk, "gpu-cfg-aclk",
- video_pll_clk_pd, 0x0, BIT(4), 0);
+ video_pll_clk_pd, 0x0, 4, CLK_IS_CRITICAL);
static CCU_GATE(CLK_DPU_PIXELCLK0, dpu0_pixelclk, "dpu0-pixelclk",
- video_pll_clk_pd, 0x0, BIT(5), 0);
+ dpu0_clk_pd, 0x0, 5, CLK_SET_RATE_PARENT);
static CCU_GATE(CLK_DPU_PIXELCLK1, dpu1_pixelclk, "dpu1-pixelclk",
- video_pll_clk_pd, 0x0, BIT(6), 0);
+ dpu1_clk_pd, 0x0, 6, CLK_SET_RATE_PARENT);
static CCU_GATE(CLK_DPU_HCLK, dpu_hclk, "dpu-hclk", video_pll_clk_pd, 0x0,
- BIT(7), 0);
+ 7, 0);
static CCU_GATE(CLK_DPU_ACLK, dpu_aclk, "dpu-aclk", video_pll_clk_pd, 0x0,
- BIT(8), 0);
+ 8, 0);
static CCU_GATE(CLK_DPU_CCLK, dpu_cclk, "dpu-cclk", video_pll_clk_pd, 0x0,
- BIT(9), 0);
+ 9, 0);
static CCU_GATE(CLK_HDMI_SFR, hdmi_sfr_clk, "hdmi-sfr-clk", video_pll_clk_pd,
- 0x0, BIT(10), 0);
+ 0x0, 10, 0);
static CCU_GATE(CLK_HDMI_PCLK, hdmi_pclk, "hdmi-pclk", video_pll_clk_pd, 0x0,
- BIT(11), 0);
+ 11, 0);
static CCU_GATE(CLK_HDMI_CEC, hdmi_cec_clk, "hdmi-cec-clk", video_pll_clk_pd,
- 0x0, BIT(12), 0);
+ 0x0, 12, 0);
static CCU_GATE(CLK_MIPI_DSI0_PCLK, mipi_dsi0_pclk, "mipi-dsi0-pclk",
- video_pll_clk_pd, 0x0, BIT(13), 0);
+ video_pll_clk_pd, 0x0, 13, 0);
static CCU_GATE(CLK_MIPI_DSI1_PCLK, mipi_dsi1_pclk, "mipi-dsi1-pclk",
- video_pll_clk_pd, 0x0, BIT(14), 0);
+ video_pll_clk_pd, 0x0, 14, 0);
static CCU_GATE(CLK_MIPI_DSI0_CFG, mipi_dsi0_cfg_clk, "mipi-dsi0-cfg-clk",
- video_pll_clk_pd, 0x0, BIT(15), 0);
+ video_pll_clk_pd, 0x0, 15, 0);
static CCU_GATE(CLK_MIPI_DSI1_CFG, mipi_dsi1_cfg_clk, "mipi-dsi1-cfg-clk",
- video_pll_clk_pd, 0x0, BIT(16), 0);
+ video_pll_clk_pd, 0x0, 16, 0);
static CCU_GATE(CLK_MIPI_DSI0_REFCLK, mipi_dsi0_refclk, "mipi-dsi0-refclk",
- video_pll_clk_pd, 0x0, BIT(17), 0);
+ video_pll_clk_pd, 0x0, 17, 0);
static CCU_GATE(CLK_MIPI_DSI1_REFCLK, mipi_dsi1_refclk, "mipi-dsi1-refclk",
- video_pll_clk_pd, 0x0, BIT(18), 0);
+ video_pll_clk_pd, 0x0, 18, 0);
static CCU_GATE(CLK_HDMI_I2S, hdmi_i2s_clk, "hdmi-i2s-clk", video_pll_clk_pd,
- 0x0, BIT(19), 0);
+ 0x0, 19, 0);
static CCU_GATE(CLK_X2H_DPU1_ACLK, x2h_dpu1_aclk, "x2h-dpu1-aclk",
- video_pll_clk_pd, 0x0, BIT(20), 0);
+ video_pll_clk_pd, 0x0, 20, CLK_IS_CRITICAL);
static CCU_GATE(CLK_X2H_DPU_ACLK, x2h_dpu_aclk, "x2h-dpu-aclk",
- video_pll_clk_pd, 0x0, BIT(21), 0);
+ video_pll_clk_pd, 0x0, 21, CLK_IS_CRITICAL);
static CCU_GATE(CLK_AXI4_VO_PCLK, axi4_vo_pclk, "axi4-vo-pclk",
- video_pll_clk_pd, 0x0, BIT(22), 0);
+ video_pll_clk_pd, 0x0, 22, 0);
static CCU_GATE(CLK_IOPMP_VOSYS_DPU_PCLK, iopmp_vosys_dpu_pclk,
- "iopmp-vosys-dpu-pclk", video_pll_clk_pd, 0x0, BIT(23), 0);
+ "iopmp-vosys-dpu-pclk", video_pll_clk_pd, 0x0, 23, 0);
static CCU_GATE(CLK_IOPMP_VOSYS_DPU1_PCLK, iopmp_vosys_dpu1_pclk,
- "iopmp-vosys-dpu1-pclk", video_pll_clk_pd, 0x0, BIT(24), 0);
+ "iopmp-vosys-dpu1-pclk", video_pll_clk_pd, 0x0, 24, 0);
static CCU_GATE(CLK_IOPMP_VOSYS_GPU_PCLK, iopmp_vosys_gpu_pclk,
- "iopmp-vosys-gpu-pclk", video_pll_clk_pd, 0x0, BIT(25), 0);
+ "iopmp-vosys-gpu-pclk", video_pll_clk_pd, 0x0, 25, 0);
static CCU_GATE(CLK_IOPMP_DPU1_ACLK, iopmp_dpu1_aclk, "iopmp-dpu1-aclk",
- video_pll_clk_pd, 0x0, BIT(27), 0);
+ video_pll_clk_pd, 0x0, 27, CLK_IS_CRITICAL);
static CCU_GATE(CLK_IOPMP_DPU_ACLK, iopmp_dpu_aclk, "iopmp-dpu-aclk",
- video_pll_clk_pd, 0x0, BIT(28), 0);
+ video_pll_clk_pd, 0x0, 28, CLK_IS_CRITICAL);
static CCU_GATE(CLK_IOPMP_GPU_ACLK, iopmp_gpu_aclk, "iopmp-gpu-aclk",
- video_pll_clk_pd, 0x0, BIT(29), 0);
+ video_pll_clk_pd, 0x0, 29, CLK_IS_CRITICAL);
static CCU_GATE(CLK_MIPIDSI0_PIXCLK, mipi_dsi0_pixclk, "mipi-dsi0-pixclk",
- video_pll_clk_pd, 0x0, BIT(30), 0);
+ video_pll_clk_pd, 0x0, 30, 0);
static CCU_GATE(CLK_MIPIDSI1_PIXCLK, mipi_dsi1_pixclk, "mipi-dsi1-pixclk",
- video_pll_clk_pd, 0x0, BIT(31), 0);
+ video_pll_clk_pd, 0x0, 31, 0);
static CCU_GATE(CLK_HDMI_PIXCLK, hdmi_pixclk, "hdmi-pixclk", video_pll_clk_pd,
- 0x4, BIT(0), 0);
+ 0x4, 0, 0);
static CLK_FIXED_FACTOR_HW(gmac_pll_clk_100m, "gmac-pll-clk-100m",
&gmac_pll_clk.common.hw, 10, 1, 0);
@@ -963,107 +1050,106 @@ static struct ccu_mux *th1520_mux_clks[] = {
&uart_sclk,
};
-static struct ccu_common *th1520_gate_clks[] = {
- &emmc_sdio_clk.common,
- &aon2cpu_a2x_clk.common,
- &x2x_cpusys_clk.common,
- &brom_clk.common,
- &bmu_clk.common,
- &cpu2aon_x2h_clk.common,
- &cpu2peri_x2h_clk.common,
- &cpu2vp_clk.common,
- &perisys_apb1_hclk.common,
- &perisys_apb2_hclk.common,
- &perisys_apb3_hclk.common,
- &perisys_apb4_hclk.common,
- &npu_axi_clk.common,
- &gmac1_clk.common,
- &padctrl1_clk.common,
- &dsmart_clk.common,
- &padctrl0_clk.common,
- &gmac_axi_clk.common,
- &gpio3_clk.common,
- &gmac0_clk.common,
- &pwm_clk.common,
- &qspi0_clk.common,
- &qspi1_clk.common,
- &spi_clk.common,
- &uart0_pclk.common,
- &uart1_pclk.common,
- &uart2_pclk.common,
- &uart3_pclk.common,
- &uart4_pclk.common,
- &uart5_pclk.common,
- &gpio0_clk.common,
- &gpio1_clk.common,
- &gpio2_clk.common,
- &i2c0_clk.common,
- &i2c1_clk.common,
- &i2c2_clk.common,
- &i2c3_clk.common,
- &i2c4_clk.common,
- &i2c5_clk.common,
- &spinlock_clk.common,
- &dma_clk.common,
- &mbox0_clk.common,
- &mbox1_clk.common,
- &mbox2_clk.common,
- &mbox3_clk.common,
- &wdt0_clk.common,
- &wdt1_clk.common,
- &timer0_clk.common,
- &timer1_clk.common,
- &sram0_clk.common,
- &sram1_clk.common,
- &sram2_clk.common,
- &sram3_clk.common,
-};
-
-static struct ccu_common *th1520_vo_gate_clks[] = {
- &axi4_vo_aclk.common,
- &gpu_core_clk.common,
- &gpu_cfg_aclk.common,
- &dpu0_pixelclk.common,
- &dpu1_pixelclk.common,
- &dpu_hclk.common,
- &dpu_aclk.common,
- &dpu_cclk.common,
- &hdmi_sfr_clk.common,
- &hdmi_pclk.common,
- &hdmi_cec_clk.common,
- &mipi_dsi0_pclk.common,
- &mipi_dsi1_pclk.common,
- &mipi_dsi0_cfg_clk.common,
- &mipi_dsi1_cfg_clk.common,
- &mipi_dsi0_refclk.common,
- &mipi_dsi1_refclk.common,
- &hdmi_i2s_clk.common,
- &x2h_dpu1_aclk.common,
- &x2h_dpu_aclk.common,
- &axi4_vo_pclk.common,
- &iopmp_vosys_dpu_pclk.common,
- &iopmp_vosys_dpu1_pclk.common,
- &iopmp_vosys_gpu_pclk.common,
- &iopmp_dpu1_aclk.common,
- &iopmp_dpu_aclk.common,
- &iopmp_gpu_aclk.common,
- &mipi_dsi0_pixclk.common,
- &mipi_dsi1_pixclk.common,
- &hdmi_pixclk.common
+static struct ccu_gate *th1520_gate_clks[] = {
+ &emmc_sdio_clk,
+ &aon2cpu_a2x_clk,
+ &x2x_cpusys_clk,
+ &brom_clk,
+ &bmu_clk,
+ &cpu2aon_x2h_clk,
+ &cpu2peri_x2h_clk,
+ &cpu2vp_clk,
+ &perisys_apb1_hclk,
+ &perisys_apb2_hclk,
+ &perisys_apb3_hclk,
+ &perisys_apb4_hclk,
+ &npu_axi_clk,
+ &gmac1_clk,
+ &padctrl1_clk,
+ &dsmart_clk,
+ &padctrl0_clk,
+ &gmac_axi_clk,
+ &gpio3_clk,
+ &gmac0_clk,
+ &pwm_clk,
+ &qspi0_clk,
+ &qspi1_clk,
+ &spi_clk,
+ &uart0_pclk,
+ &uart1_pclk,
+ &uart2_pclk,
+ &uart3_pclk,
+ &uart4_pclk,
+ &uart5_pclk,
+ &gpio0_clk,
+ &gpio1_clk,
+ &gpio2_clk,
+ &i2c0_clk,
+ &i2c1_clk,
+ &i2c2_clk,
+ &i2c3_clk,
+ &i2c4_clk,
+ &i2c5_clk,
+ &spinlock_clk,
+ &dma_clk,
+ &mbox0_clk,
+ &mbox1_clk,
+ &mbox2_clk,
+ &mbox3_clk,
+ &wdt0_clk,
+ &wdt1_clk,
+ &timer0_clk,
+ &timer1_clk,
+ &sram0_clk,
+ &sram1_clk,
+ &sram2_clk,
+ &sram3_clk,
+};
+
+static struct ccu_gate *th1520_vo_gate_clks[] = {
+ &axi4_vo_aclk,
+ &gpu_core_clk,
+ &gpu_cfg_aclk,
+ &dpu0_pixelclk,
+ &dpu1_pixelclk,
+ &dpu_hclk,
+ &dpu_aclk,
+ &dpu_cclk,
+ &hdmi_sfr_clk,
+ &hdmi_pclk,
+ &hdmi_cec_clk,
+ &mipi_dsi0_pclk,
+ &mipi_dsi1_pclk,
+ &mipi_dsi0_cfg_clk,
+ &mipi_dsi1_cfg_clk,
+ &mipi_dsi0_refclk,
+ &mipi_dsi1_refclk,
+ &hdmi_i2s_clk,
+ &x2h_dpu1_aclk,
+ &x2h_dpu_aclk,
+ &axi4_vo_pclk,
+ &iopmp_vosys_dpu_pclk,
+ &iopmp_vosys_dpu1_pclk,
+ &iopmp_vosys_gpu_pclk,
+ &iopmp_dpu1_aclk,
+ &iopmp_dpu_aclk,
+ &iopmp_gpu_aclk,
+ &mipi_dsi0_pixclk,
+ &mipi_dsi1_pixclk,
+ &hdmi_pixclk
};
static const struct regmap_config th1520_clk_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
- .fast_io = true,
};
struct th1520_plat_data {
struct ccu_common **th1520_pll_clks;
struct ccu_common **th1520_div_clks;
struct ccu_mux **th1520_mux_clks;
- struct ccu_common **th1520_gate_clks;
+ struct ccu_gate **th1520_gate_clks;
int nr_clks;
int nr_pll_clks;
@@ -1102,7 +1188,6 @@ static int th1520_clk_probe(struct platform_device *pdev)
struct regmap *map;
void __iomem *base;
- struct clk_hw *hw;
int ret, i;
plat_data = device_get_match_data(&pdev->dev);
@@ -1161,20 +1246,15 @@ static int th1520_clk_probe(struct platform_device *pdev)
}
for (i = 0; i < plat_data->nr_gate_clks; i++) {
- struct ccu_gate *cg = hw_to_ccu_gate(&plat_data->th1520_gate_clks[i]->hw);
+ struct ccu_gate *cg = plat_data->th1520_gate_clks[i];
- plat_data->th1520_gate_clks[i]->map = map;
+ cg->gate.reg = base + cg->reg;
- hw = devm_clk_hw_register_gate_parent_data(dev,
- cg->common.hw.init->name,
- cg->common.hw.init->parent_data,
- cg->common.hw.init->flags,
- base + cg->common.cfg0,
- ffs(cg->enable) - 1, 0, NULL);
- if (IS_ERR(hw))
- return PTR_ERR(hw);
+ ret = devm_clk_hw_register(dev, &cg->gate.hw);
+ if (ret)
+ return ret;
- priv->hws[cg->common.clkid] = hw;
+ priv->hws[cg->clkid] = &cg->gate.hw;
}
if (plat_data == &th1520_ap_platdata) {
diff --git a/drivers/clk/ti/clk-33xx.c b/drivers/clk/ti/clk-33xx.c
index 85c50ea39e6d..9269e6a0db6a 100644
--- a/drivers/clk/ti/clk-33xx.c
+++ b/drivers/clk/ti/clk-33xx.c
@@ -258,6 +258,8 @@ static const char *enable_init_clks[] = {
"dpll_ddr_m2_ck",
"dpll_mpu_m2_ck",
"l3_gclk",
+ /* WKUP_DEBUGSS_CLKCTRL - disable fails, AM335x Errata Advisory 1.0.42 */
+ "l3-aon-clkctrl:0000:0",
/* AM3_L3_L3_MAIN_CLKCTRL, needed during suspend */
"l3-clkctrl:00bc:0",
"l4hs_gclk",
diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c
index 0eab7f3e2eab..b02f84d49b96 100644
--- a/drivers/clk/ti/clk-dra7-atl.c
+++ b/drivers/clk/ti/clk-dra7-atl.c
@@ -120,16 +120,18 @@ static unsigned long atl_clk_recalc_rate(struct clk_hw *hw,
return parent_rate / cdesc->divider;
}
-static long atl_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int atl_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
unsigned divider;
- divider = (*parent_rate + rate / 2) / rate;
+ divider = (req->best_parent_rate + req->rate / 2) / req->rate;
if (divider > DRA7_ATL_DIVIDER_MASK + 1)
divider = DRA7_ATL_DIVIDER_MASK + 1;
- return *parent_rate / divider;
+ req->rate = req->best_parent_rate / divider;
+
+ return 0;
}
static int atl_clk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -156,7 +158,7 @@ static const struct clk_ops atl_clk_ops = {
.disable = atl_clk_disable,
.is_enabled = atl_clk_is_enabled,
.recalc_rate = atl_clk_recalc_rate,
- .round_rate = atl_clk_round_rate,
+ .determine_rate = atl_clk_determine_rate,
.set_rate = atl_clk_set_rate,
};
diff --git a/drivers/clk/ti/clkt_dpll.c b/drivers/clk/ti/clkt_dpll.c
index dfaa4d1f0b64..2ecd66968af4 100644
--- a/drivers/clk/ti/clkt_dpll.c
+++ b/drivers/clk/ti/clkt_dpll.c
@@ -268,20 +268,18 @@ unsigned long omap2_get_dpll_rate(struct clk_hw_omap *clk)
/* DPLL rate rounding code */
/**
- * omap2_dpll_round_rate - round a target rate for an OMAP DPLL
+ * omap2_dpll_determine_rate - round a target rate for an OMAP DPLL
* @hw: struct clk_hw containing the struct clk * for a DPLL
- * @target_rate: desired DPLL clock rate
- * @parent_rate: parent's DPLL clock rate
+ * @req: rate request
*
* Given a DPLL and a desired target rate, round the target rate to a
* possible, programmable rate for this DPLL. Attempts to select the
* minimum possible n. Stores the computed (m, n) in the DPLL's
* dpll_data structure so set_rate() will not need to call this
- * (expensive) function again. Returns ~0 if the target rate cannot
- * be rounded, or the rounded rate upon success.
+ * (expensive) function again. Returns -EINVAL if the target rate
+ * cannot be rounded, or the rounded rate upon success.
*/
-long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate,
- unsigned long *parent_rate)
+int omap2_dpll_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
{
struct clk_hw_omap *clk = to_clk_hw_omap(hw);
int m, n, r, scaled_max_m;
@@ -295,19 +293,19 @@ long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate,
const char *clk_name;
if (!clk || !clk->dpll_data)
- return ~0;
+ return -EINVAL;
dd = clk->dpll_data;
- if (dd->max_rate && target_rate > dd->max_rate)
- target_rate = dd->max_rate;
+ if (dd->max_rate && req->rate > dd->max_rate)
+ req->rate = dd->max_rate;
ref_rate = clk_hw_get_rate(dd->clk_ref);
clk_name = clk_hw_get_name(hw);
pr_debug("clock: %s: starting DPLL round_rate, target rate %lu\n",
- clk_name, target_rate);
+ clk_name, req->rate);
- scaled_rt_rp = target_rate / (ref_rate / DPLL_SCALE_FACTOR);
+ scaled_rt_rp = req->rate / (ref_rate / DPLL_SCALE_FACTOR);
scaled_max_m = dd->max_multiplier * DPLL_SCALE_FACTOR;
dd->last_rounded_rate = 0;
@@ -332,7 +330,7 @@ long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate,
if (m > scaled_max_m)
break;
- r = _dpll_test_mult(&m, n, &new_rate, target_rate,
+ r = _dpll_test_mult(&m, n, &new_rate, req->rate,
ref_rate);
/* m can't be set low enough for this n - try with a larger n */
@@ -340,7 +338,7 @@ long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate,
continue;
/* skip rates above our target rate */
- delta = target_rate - new_rate;
+ delta = req->rate - new_rate;
if (delta < 0)
continue;
@@ -359,13 +357,15 @@ long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate,
if (prev_min_delta == LONG_MAX) {
pr_debug("clock: %s: cannot round to rate %lu\n",
- clk_name, target_rate);
- return ~0;
+ clk_name, req->rate);
+ return -EINVAL;
}
dd->last_rounded_m = min_delta_m;
dd->last_rounded_n = min_delta_n;
- dd->last_rounded_rate = target_rate - prev_min_delta;
+ dd->last_rounded_rate = req->rate - prev_min_delta;
- return dd->last_rounded_rate;
+ req->rate = dd->last_rounded_rate;
+
+ return 0;
}
diff --git a/drivers/clk/ti/clock.h b/drivers/clk/ti/clock.h
index 2de7acea1ea0..d5e24fe4ae3a 100644
--- a/drivers/clk/ti/clock.h
+++ b/drivers/clk/ti/clock.h
@@ -273,8 +273,7 @@ int omap3_noncore_dpll_set_rate_and_parent(struct clk_hw *hw,
u8 index);
int omap3_noncore_dpll_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req);
-long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate,
- unsigned long *parent_rate);
+int omap2_dpll_determine_rate(struct clk_hw *hw, struct clk_rate_request *req);
unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw,
unsigned long parent_rate);
@@ -296,9 +295,6 @@ void omap3_clk_lock_dpll5(void);
unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw,
unsigned long parent_rate);
-long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw,
- unsigned long target_rate,
- unsigned long *parent_rate);
int omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req);
int omap2_clk_for_each(int (*fn)(struct clk_hw_omap *hw));
diff --git a/drivers/clk/ti/composite.c b/drivers/clk/ti/composite.c
index b85382c370f7..8cba259188d4 100644
--- a/drivers/clk/ti/composite.c
+++ b/drivers/clk/ti/composite.c
@@ -26,8 +26,8 @@ static unsigned long ti_composite_recalc_rate(struct clk_hw *hw,
return ti_clk_divider_ops.recalc_rate(hw, parent_rate);
}
-static long ti_composite_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int ti_composite_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
return -EINVAL;
}
@@ -40,7 +40,7 @@ static int ti_composite_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops ti_composite_divider_ops = {
.recalc_rate = &ti_composite_recalc_rate,
- .round_rate = &ti_composite_round_rate,
+ .determine_rate = &ti_composite_determine_rate,
.set_rate = &ti_composite_set_rate,
};
diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c
index ade99ab6cfa9..6f58a0f2e74a 100644
--- a/drivers/clk/ti/divider.c
+++ b/drivers/clk/ti/divider.c
@@ -223,13 +223,15 @@ static int ti_clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
return bestdiv;
}
-static long ti_clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int ti_clk_divider_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
int div;
- div = ti_clk_divider_bestdiv(hw, rate, prate);
+ div = ti_clk_divider_bestdiv(hw, req->rate, &req->best_parent_rate);
- return DIV_ROUND_UP(*prate, div);
+ req->rate = DIV_ROUND_UP(req->best_parent_rate, div);
+
+ return 0;
}
static int ti_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -299,7 +301,7 @@ static void clk_divider_restore_context(struct clk_hw *hw)
const struct clk_ops ti_clk_divider_ops = {
.recalc_rate = ti_clk_divider_recalc_rate,
- .round_rate = ti_clk_divider_round_rate,
+ .determine_rate = ti_clk_divider_determine_rate,
.set_rate = ti_clk_divider_set_rate,
.save_context = clk_divider_save_context,
.restore_context = clk_divider_restore_context,
diff --git a/drivers/clk/ti/dpll.c b/drivers/clk/ti/dpll.c
index 3386bd1903df..971adafd9a8b 100644
--- a/drivers/clk/ti/dpll.c
+++ b/drivers/clk/ti/dpll.c
@@ -25,7 +25,6 @@ static const struct clk_ops dpll_m4xen_ck_ops = {
.enable = &omap3_noncore_dpll_enable,
.disable = &omap3_noncore_dpll_disable,
.recalc_rate = &omap4_dpll_regm4xen_recalc,
- .round_rate = &omap4_dpll_regm4xen_round_rate,
.set_rate = &omap3_noncore_dpll_set_rate,
.set_parent = &omap3_noncore_dpll_set_parent,
.set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent,
@@ -48,7 +47,6 @@ static const struct clk_ops dpll_ck_ops = {
.enable = &omap3_noncore_dpll_enable,
.disable = &omap3_noncore_dpll_disable,
.recalc_rate = &omap3_dpll_recalc,
- .round_rate = &omap2_dpll_round_rate,
.set_rate = &omap3_noncore_dpll_set_rate,
.set_parent = &omap3_noncore_dpll_set_parent,
.set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent,
@@ -61,7 +59,6 @@ static const struct clk_ops dpll_ck_ops = {
static const struct clk_ops dpll_no_gate_ck_ops = {
.recalc_rate = &omap3_dpll_recalc,
.get_parent = &omap2_init_dpll_parent,
- .round_rate = &omap2_dpll_round_rate,
.set_rate = &omap3_noncore_dpll_set_rate,
.set_parent = &omap3_noncore_dpll_set_parent,
.set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent,
@@ -80,7 +77,7 @@ const struct clk_hw_omap_ops clkhwops_omap3_dpll = {};
static const struct clk_ops omap2_dpll_core_ck_ops = {
.get_parent = &omap2_init_dpll_parent,
.recalc_rate = &omap2_dpllcore_recalc,
- .round_rate = &omap2_dpll_round_rate,
+ .determine_rate = &omap2_dpll_determine_rate,
.set_rate = &omap2_reprogram_dpllcore,
};
#else
@@ -91,7 +88,7 @@ static const struct clk_ops omap2_dpll_core_ck_ops = {};
static const struct clk_ops omap3_dpll_core_ck_ops = {
.get_parent = &omap2_init_dpll_parent,
.recalc_rate = &omap3_dpll_recalc,
- .round_rate = &omap2_dpll_round_rate,
+ .determine_rate = &omap2_dpll_determine_rate,
};
static const struct clk_ops omap3_dpll_ck_ops = {
@@ -103,7 +100,6 @@ static const struct clk_ops omap3_dpll_ck_ops = {
.set_parent = &omap3_noncore_dpll_set_parent,
.set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent,
.determine_rate = &omap3_noncore_dpll_determine_rate,
- .round_rate = &omap2_dpll_round_rate,
};
static const struct clk_ops omap3_dpll5_ck_ops = {
@@ -115,7 +111,6 @@ static const struct clk_ops omap3_dpll5_ck_ops = {
.set_parent = &omap3_noncore_dpll_set_parent,
.set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent,
.determine_rate = &omap3_noncore_dpll_determine_rate,
- .round_rate = &omap2_dpll_round_rate,
};
static const struct clk_ops omap3_dpll_per_ck_ops = {
@@ -127,7 +122,6 @@ static const struct clk_ops omap3_dpll_per_ck_ops = {
.set_parent = &omap3_noncore_dpll_set_parent,
.set_rate_and_parent = &omap3_dpll4_set_rate_and_parent,
.determine_rate = &omap3_noncore_dpll_determine_rate,
- .round_rate = &omap2_dpll_round_rate,
};
#endif
diff --git a/drivers/clk/ti/dpll3xxx.c b/drivers/clk/ti/dpll3xxx.c
index 00680486b1bd..8c51b988a04f 100644
--- a/drivers/clk/ti/dpll3xxx.c
+++ b/drivers/clk/ti/dpll3xxx.c
@@ -587,6 +587,7 @@ int omap3_noncore_dpll_determine_rate(struct clk_hw *hw,
{
struct clk_hw_omap *clk = to_clk_hw_omap(hw);
struct dpll_data *dd;
+ int ret;
if (!req->rate)
return -EINVAL;
@@ -599,8 +600,10 @@ int omap3_noncore_dpll_determine_rate(struct clk_hw *hw,
(dd->modes & (1 << DPLL_LOW_POWER_BYPASS))) {
req->best_parent_hw = dd->clk_bypass;
} else {
- req->rate = omap2_dpll_round_rate(hw, req->rate,
- &req->best_parent_rate);
+ ret = omap2_dpll_determine_rate(hw, req);
+ if (ret != 0)
+ return ret;
+
req->best_parent_hw = dd->clk_ref;
}
diff --git a/drivers/clk/ti/dpll44xx.c b/drivers/clk/ti/dpll44xx.c
index 3fc2cab69a3f..08ed57f181b4 100644
--- a/drivers/clk/ti/dpll44xx.c
+++ b/drivers/clk/ti/dpll44xx.c
@@ -134,68 +134,13 @@ unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw,
}
/**
- * omap4_dpll_regm4xen_round_rate - round DPLL rate, considering REGM4XEN bit
- * @hw: struct hw_clk containing the struct clk * of the DPLL to round a rate for
- * @target_rate: the desired rate of the DPLL
- * @parent_rate: clock rate of the DPLL parent
- *
- * Compute the rate that would be programmed into the DPLL hardware
- * for @clk if set_rate() were to be provided with the rate
- * @target_rate. Takes the REGM4XEN bit into consideration, which is
- * needed for the OMAP4 ABE DPLL. Returns the rounded rate (before
- * M-dividers) upon success, -EINVAL if @clk is null or not a DPLL, or
- * ~0 if an error occurred in omap2_dpll_round_rate().
- */
-long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw,
- unsigned long target_rate,
- unsigned long *parent_rate)
-{
- struct clk_hw_omap *clk = to_clk_hw_omap(hw);
- struct dpll_data *dd;
- long r;
-
- if (!clk || !clk->dpll_data)
- return -EINVAL;
-
- dd = clk->dpll_data;
-
- dd->last_rounded_m4xen = 0;
-
- /*
- * First try to compute the DPLL configuration for
- * target rate without using the 4X multiplier.
- */
- r = omap2_dpll_round_rate(hw, target_rate, NULL);
- if (r != ~0)
- goto out;
-
- /*
- * If we did not find a valid DPLL configuration, try again, but
- * this time see if using the 4X multiplier can help. Enabling the
- * 4X multiplier is equivalent to dividing the target rate by 4.
- */
- r = omap2_dpll_round_rate(hw, target_rate / OMAP4430_REGM4XEN_MULT,
- NULL);
- if (r == ~0)
- return r;
-
- dd->last_rounded_rate *= OMAP4430_REGM4XEN_MULT;
- dd->last_rounded_m4xen = 1;
-
-out:
- omap4_dpll_lpmode_recalc(dd);
-
- return dd->last_rounded_rate;
-}
-
-/**
* omap4_dpll_regm4xen_determine_rate - determine rate for a DPLL
* @hw: pointer to the clock to determine rate for
* @req: target rate request
*
* Determines which DPLL mode to use for reaching a desired rate.
* Checks whether the DPLL shall be in bypass or locked mode, and if
- * locked, calculates the M,N values for the DPLL via round-rate.
+ * locked, calculates the M,N values for the DPLL.
* Returns 0 on success and a negative error value otherwise.
*/
int omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw,
@@ -215,8 +160,36 @@ int omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw,
(dd->modes & (1 << DPLL_LOW_POWER_BYPASS))) {
req->best_parent_hw = dd->clk_bypass;
} else {
- req->rate = omap4_dpll_regm4xen_round_rate(hw, req->rate,
- &req->best_parent_rate);
+ struct clk_rate_request tmp_req;
+ long r;
+
+ clk_hw_init_rate_request(hw, &tmp_req, req->rate);
+ dd->last_rounded_m4xen = 0;
+
+ /*
+ * First try to compute the DPLL configuration for
+ * target rate without using the 4X multiplier.
+ */
+
+ r = omap2_dpll_determine_rate(hw, &tmp_req);
+ if (r < 0) {
+ /*
+ * If we did not find a valid DPLL configuration, try again, but
+ * this time see if using the 4X multiplier can help. Enabling the
+ * 4X multiplier is equivalent to dividing the target rate by 4.
+ */
+ tmp_req.rate /= OMAP4430_REGM4XEN_MULT;
+ r = omap2_dpll_determine_rate(hw, &tmp_req);
+ if (r < 0)
+ return r;
+
+ dd->last_rounded_rate *= OMAP4430_REGM4XEN_MULT;
+ dd->last_rounded_m4xen = 1;
+ }
+
+ omap4_dpll_lpmode_recalc(dd);
+
+ req->rate = dd->last_rounded_rate;
req->best_parent_hw = dd->clk_ref;
}
diff --git a/drivers/clk/ti/fapll.c b/drivers/clk/ti/fapll.c
index 2db3fc4a443e..4f28138d2d8a 100644
--- a/drivers/clk/ti/fapll.c
+++ b/drivers/clk/ti/fapll.c
@@ -214,24 +214,27 @@ static int ti_fapll_set_div_mult(unsigned long rate,
return 0;
}
-static long ti_fapll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int ti_fapll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
u32 pre_div_p, mult_n;
int error;
- if (!rate)
+ if (!req->rate)
return -EINVAL;
- error = ti_fapll_set_div_mult(rate, *parent_rate,
+ error = ti_fapll_set_div_mult(req->rate, req->best_parent_rate,
&pre_div_p, &mult_n);
- if (error)
- return error;
+ if (error) {
+ req->rate = error;
- rate = *parent_rate / pre_div_p;
- rate *= mult_n;
+ return 0;
+ }
- return rate;
+ req->rate = req->best_parent_rate / pre_div_p;
+ req->rate *= mult_n;
+
+ return 0;
}
static int ti_fapll_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -268,7 +271,7 @@ static const struct clk_ops ti_fapll_ops = {
.is_enabled = ti_fapll_is_enabled,
.recalc_rate = ti_fapll_recalc_rate,
.get_parent = ti_fapll_get_parent,
- .round_rate = ti_fapll_round_rate,
+ .determine_rate = ti_fapll_determine_rate,
.set_rate = ti_fapll_set_rate,
};
@@ -399,14 +402,14 @@ static u32 ti_fapll_synth_set_frac_rate(struct fapll_synth *synth,
return post_div_m;
}
-static long ti_fapll_synth_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int ti_fapll_synth_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct fapll_synth *synth = to_synth(hw);
struct fapll_data *fd = synth->fd;
unsigned long r;
- if (ti_fapll_clock_is_bypass(fd) || !synth->div || !rate)
+ if (ti_fapll_clock_is_bypass(fd) || !synth->div || !req->rate)
return -EINVAL;
/* Only post divider m available with no fractional divider? */
@@ -414,23 +417,26 @@ static long ti_fapll_synth_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long frac_rate;
u32 synth_post_div_m;
- frac_rate = ti_fapll_synth_get_frac_rate(hw, *parent_rate);
- synth_post_div_m = DIV_ROUND_UP(frac_rate, rate);
+ frac_rate = ti_fapll_synth_get_frac_rate(hw,
+ req->best_parent_rate);
+ synth_post_div_m = DIV_ROUND_UP(frac_rate, req->rate);
r = DIV_ROUND_UP(frac_rate, synth_post_div_m);
goto out;
}
- r = *parent_rate * SYNTH_PHASE_K;
- if (rate > r)
+ r = req->best_parent_rate * SYNTH_PHASE_K;
+ if (req->rate > r)
goto out;
r = DIV_ROUND_UP_ULL(r, SYNTH_MAX_INT_DIV * SYNTH_MAX_DIV_M);
- if (rate < r)
+ if (req->rate < r)
goto out;
- r = rate;
+ r = req->rate;
out:
- return r;
+ req->rate = r;
+
+ return 0;
}
static int ti_fapll_synth_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -477,7 +483,7 @@ static const struct clk_ops ti_fapll_synt_ops = {
.disable = ti_fapll_synth_disable,
.is_enabled = ti_fapll_synth_is_enabled,
.recalc_rate = ti_fapll_synth_recalc_rate,
- .round_rate = ti_fapll_synth_round_rate,
+ .determine_rate = ti_fapll_synth_determine_rate,
.set_rate = ti_fapll_synth_set_rate,
};
diff --git a/drivers/clk/ux500/clk-prcmu.c b/drivers/clk/ux500/clk-prcmu.c
index 5cbf24c94606..f775e18acd46 100644
--- a/drivers/clk/ux500/clk-prcmu.c
+++ b/drivers/clk/ux500/clk-prcmu.c
@@ -53,11 +53,13 @@ static unsigned long clk_prcmu_recalc_rate(struct clk_hw *hw,
return prcmu_clock_rate(clk->cg_sel);
}
-static long clk_prcmu_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_prcmu_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_prcmu *clk = to_clk_prcmu(hw);
- return prcmu_round_clock_rate(clk->cg_sel, rate);
+ req->rate = prcmu_round_clock_rate(clk->cg_sel, req->rate);
+
+ return 0;
}
static int clk_prcmu_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -157,7 +159,7 @@ static const struct clk_ops clk_prcmu_scalable_ops = {
.prepare = clk_prcmu_prepare,
.unprepare = clk_prcmu_unprepare,
.recalc_rate = clk_prcmu_recalc_rate,
- .round_rate = clk_prcmu_round_rate,
+ .determine_rate = clk_prcmu_determine_rate,
.set_rate = clk_prcmu_set_rate,
};
@@ -169,7 +171,7 @@ static const struct clk_ops clk_prcmu_gate_ops = {
static const struct clk_ops clk_prcmu_scalable_rate_ops = {
.recalc_rate = clk_prcmu_recalc_rate,
- .round_rate = clk_prcmu_round_rate,
+ .determine_rate = clk_prcmu_determine_rate,
.set_rate = clk_prcmu_set_rate,
};
@@ -187,7 +189,7 @@ static const struct clk_ops clk_prcmu_opp_volt_scalable_ops = {
.prepare = clk_prcmu_opp_volt_prepare,
.unprepare = clk_prcmu_opp_volt_unprepare,
.recalc_rate = clk_prcmu_recalc_rate,
- .round_rate = clk_prcmu_round_rate,
+ .determine_rate = clk_prcmu_determine_rate,
.set_rate = clk_prcmu_set_rate,
};
diff --git a/drivers/clk/versatile/clk-icst.c b/drivers/clk/versatile/clk-icst.c
index b69c3fbdfbce..86ca04ad9fab 100644
--- a/drivers/clk/versatile/clk-icst.c
+++ b/drivers/clk/versatile/clk-icst.c
@@ -234,39 +234,51 @@ static unsigned long icst_recalc_rate(struct clk_hw *hw,
return icst->rate;
}
-static long icst_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int icst_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_icst *icst = to_icst(hw);
struct icst_vco vco;
if (icst->ctype == ICST_INTEGRATOR_AP_CM ||
icst->ctype == ICST_INTEGRATOR_CP_CM_CORE) {
- if (rate <= 12000000)
- return 12000000;
- if (rate >= 160000000)
- return 160000000;
- /* Slam to closest megahertz */
- return DIV_ROUND_CLOSEST(rate, 1000000) * 1000000;
+ if (req->rate <= 12000000)
+ req->rate = 12000000;
+ else if (req->rate >= 160000000)
+ req->rate = 160000000;
+ else {
+ /* Slam to closest megahertz */
+ req->rate = DIV_ROUND_CLOSEST(req->rate, 1000000) * 1000000;
+ }
+
+ return 0;
}
if (icst->ctype == ICST_INTEGRATOR_CP_CM_MEM) {
- if (rate <= 6000000)
- return 6000000;
- if (rate >= 66000000)
- return 66000000;
- /* Slam to closest 0.5 megahertz */
- return DIV_ROUND_CLOSEST(rate, 500000) * 500000;
+ if (req->rate <= 6000000)
+ req->rate = 6000000;
+ else if (req->rate >= 66000000)
+ req->rate = 66000000;
+ else {
+ /* Slam to closest 0.5 megahertz */
+ req->rate = DIV_ROUND_CLOSEST(req->rate, 500000) * 500000;
+ }
+
+ return 0;
}
if (icst->ctype == ICST_INTEGRATOR_AP_SYS) {
/* Divides between 3 and 50 MHz in steps of 0.25 MHz */
- if (rate <= 3000000)
- return 3000000;
- if (rate >= 50000000)
- return 5000000;
- /* Slam to closest 0.25 MHz */
- return DIV_ROUND_CLOSEST(rate, 250000) * 250000;
+ if (req->rate <= 3000000)
+ req->rate = 3000000;
+ else if (req->rate >= 50000000)
+ req->rate = 5000000;
+ else {
+ /* Slam to closest 0.25 MHz */
+ req->rate = DIV_ROUND_CLOSEST(req->rate, 250000) * 250000;
+ }
+
+ return 0;
}
if (icst->ctype == ICST_INTEGRATOR_AP_PCI) {
@@ -274,14 +286,20 @@ static long icst_round_rate(struct clk_hw *hw, unsigned long rate,
* If we're below or less than halfway from 25 to 33 MHz
* select 25 MHz
*/
- if (rate <= 25000000 || rate < 29000000)
- return 25000000;
- /* Else just return the default frequency */
- return 33000000;
+ if (req->rate <= 25000000 || req->rate < 29000000)
+ req->rate = 25000000;
+ else {
+ /* Else just return the default frequency */
+ req->rate = 33000000;
+ }
+
+ return 0;
}
- vco = icst_hz_to_vco(icst->params, rate);
- return icst_hz(icst->params, vco);
+ vco = icst_hz_to_vco(icst->params, req->rate);
+ req->rate = icst_hz(icst->params, vco);
+
+ return 0;
}
static int icst_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -329,7 +347,7 @@ static int icst_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops icst_ops = {
.recalc_rate = icst_recalc_rate,
- .round_rate = icst_round_rate,
+ .determine_rate = icst_determine_rate,
.set_rate = icst_set_rate,
};
diff --git a/drivers/clk/versatile/clk-vexpress-osc.c b/drivers/clk/versatile/clk-vexpress-osc.c
index c385ca2f4a74..9adbf5c33bd1 100644
--- a/drivers/clk/versatile/clk-vexpress-osc.c
+++ b/drivers/clk/versatile/clk-vexpress-osc.c
@@ -33,18 +33,18 @@ static unsigned long vexpress_osc_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long vexpress_osc_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int vexpress_osc_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct vexpress_osc *osc = to_vexpress_osc(hw);
- if (osc->rate_min && rate < osc->rate_min)
- rate = osc->rate_min;
+ if (osc->rate_min && req->rate < osc->rate_min)
+ req->rate = osc->rate_min;
- if (osc->rate_max && rate > osc->rate_max)
- rate = osc->rate_max;
+ if (osc->rate_max && req->rate > osc->rate_max)
+ req->rate = osc->rate_max;
- return rate;
+ return 0;
}
static int vexpress_osc_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -57,7 +57,7 @@ static int vexpress_osc_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops vexpress_osc_ops = {
.recalc_rate = vexpress_osc_recalc_rate,
- .round_rate = vexpress_osc_round_rate,
+ .determine_rate = vexpress_osc_determine_rate,
.set_rate = vexpress_osc_set_rate,
};
diff --git a/drivers/clk/visconti/pll.c b/drivers/clk/visconti/pll.c
index 8ca1bad61864..681721d85032 100644
--- a/drivers/clk/visconti/pll.c
+++ b/drivers/clk/visconti/pll.c
@@ -100,8 +100,8 @@ static unsigned long visconti_get_pll_rate_from_data(struct visconti_pll *pll,
return rate_table[0].rate;
}
-static long visconti_pll_round_rate(struct clk_hw *hw,
- unsigned long rate, unsigned long *prate)
+static int visconti_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct visconti_pll *pll = to_visconti_pll(hw);
const struct visconti_pll_rate_table *rate_table = pll->rate_table;
@@ -109,11 +109,16 @@ static long visconti_pll_round_rate(struct clk_hw *hw,
/* Assuming rate_table is in descending order */
for (i = 0; i < pll->rate_count; i++)
- if (rate >= rate_table[i].rate)
- return rate_table[i].rate;
+ if (req->rate >= rate_table[i].rate) {
+ req->rate = rate_table[i].rate;
+
+ return 0;
+ }
/* return minimum supported value */
- return rate_table[i - 1].rate;
+ req->rate = rate_table[i - 1].rate;
+
+ return 0;
}
static unsigned long visconti_pll_recalc_rate(struct clk_hw *hw,
@@ -232,7 +237,7 @@ static const struct clk_ops visconti_pll_ops = {
.enable = visconti_pll_enable,
.disable = visconti_pll_disable,
.is_enabled = visconti_pll_is_enabled,
- .round_rate = visconti_pll_round_rate,
+ .determine_rate = visconti_pll_determine_rate,
.recalc_rate = visconti_pll_recalc_rate,
.set_rate = visconti_pll_set_rate,
};
diff --git a/drivers/clk/x86/clk-cgu.c b/drivers/clk/x86/clk-cgu.c
index 89b53f280aee..d099667355f8 100644
--- a/drivers/clk/x86/clk-cgu.c
+++ b/drivers/clk/x86/clk-cgu.c
@@ -132,14 +132,15 @@ lgm_clk_divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
divider->flags, divider->width);
}
-static long
-lgm_clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int lgm_clk_divider_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct lgm_clk_divider *divider = to_lgm_clk_divider(hw);
- return divider_round_rate(hw, rate, prate, divider->table,
- divider->width, divider->flags);
+ req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate, divider->table,
+ divider->width, divider->flags);
+
+ return 0;
}
static int
@@ -182,7 +183,7 @@ static void lgm_clk_divider_disable(struct clk_hw *hw)
static const struct clk_ops lgm_clk_divider_ops = {
.recalc_rate = lgm_clk_divider_recalc_rate,
- .round_rate = lgm_clk_divider_round_rate,
+ .determine_rate = lgm_clk_divider_determine_rate,
.set_rate = lgm_clk_divider_set_rate,
.enable = lgm_clk_divider_enable,
.disable = lgm_clk_divider_disable,
@@ -487,15 +488,14 @@ lgm_clk_ddiv_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
-static long
-lgm_clk_ddiv_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int lgm_clk_ddiv_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
u32 div, ddiv1, ddiv2;
u64 rate64;
- div = DIV_ROUND_CLOSEST_ULL((u64)*prate, rate);
+ div = DIV_ROUND_CLOSEST_ULL((u64)req->best_parent_rate, req->rate);
/* if predivide bit is enabled, modify div by factor of 2.5 */
if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) {
@@ -503,14 +503,17 @@ lgm_clk_ddiv_round_rate(struct clk_hw *hw, unsigned long rate,
div = DIV_ROUND_CLOSEST_ULL((u64)div, 5);
}
- if (div <= 0)
- return *prate;
+ if (div <= 0) {
+ req->rate = req->best_parent_rate;
+
+ return 0;
+ }
if (lgm_clk_get_ddiv_val(div, &ddiv1, &ddiv2) != 0)
if (lgm_clk_get_ddiv_val(div + 1, &ddiv1, &ddiv2) != 0)
return -EINVAL;
- rate64 = *prate;
+ rate64 = req->best_parent_rate;
do_div(rate64, ddiv1);
do_div(rate64, ddiv2);
@@ -520,7 +523,9 @@ lgm_clk_ddiv_round_rate(struct clk_hw *hw, unsigned long rate,
rate64 = DIV_ROUND_CLOSEST_ULL(rate64, 5);
}
- return rate64;
+ req->rate = rate64;
+
+ return 0;
}
static const struct clk_ops lgm_clk_ddiv_ops = {
@@ -528,7 +533,7 @@ static const struct clk_ops lgm_clk_ddiv_ops = {
.enable = lgm_clk_ddiv_enable,
.disable = lgm_clk_ddiv_disable,
.set_rate = lgm_clk_ddiv_set_rate,
- .round_rate = lgm_clk_ddiv_round_rate,
+ .determine_rate = lgm_clk_ddiv_determine_rate,
};
int lgm_clk_register_ddiv(struct lgm_clk_provider *ctx,
diff --git a/drivers/clk/xilinx/clk-xlnx-clock-wizard.c b/drivers/clk/xilinx/clk-xlnx-clock-wizard.c
index 0295a13a811c..4a0136349f71 100644
--- a/drivers/clk/xilinx/clk-xlnx-clock-wizard.c
+++ b/drivers/clk/xilinx/clk-xlnx-clock-wizard.c
@@ -322,8 +322,8 @@ err_reconfig:
return err;
}
-static long clk_wzrd_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_wzrd_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
u8 div;
@@ -331,16 +331,18 @@ static long clk_wzrd_round_rate(struct clk_hw *hw, unsigned long rate,
* since we don't change parent rate we just round rate to closest
* achievable
*/
- div = DIV_ROUND_CLOSEST(*prate, rate);
+ div = DIV_ROUND_CLOSEST(req->best_parent_rate, req->rate);
- return *prate / div;
+ req->rate = req->best_parent_rate / div;
+
+ return 0;
}
static int clk_wzrd_get_divisors_ver(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
- u64 vco_freq, freq, diff, vcomin, vcomax;
+ u64 vco_freq, freq, diff, vcomin, vcomax, best_diff = -1ULL;
u32 m, d, o;
u32 mmin, mmax, dmin, dmax, omin, omax;
@@ -356,22 +358,26 @@ static int clk_wzrd_get_divisors_ver(struct clk_hw *hw, unsigned long rate,
for (m = mmin; m <= mmax; m++) {
for (d = dmin; d <= dmax; d++) {
vco_freq = DIV_ROUND_CLOSEST((parent_rate * m), d);
- if (vco_freq >= vcomin && vco_freq <= vcomax) {
- for (o = omin; o <= omax; o++) {
- freq = DIV_ROUND_CLOSEST_ULL(vco_freq, o);
- diff = abs(freq - rate);
-
- if (diff < WZRD_MIN_ERR) {
- divider->m = m;
- divider->d = d;
- divider->o = o;
- return 0;
- }
- }
+ if (vco_freq < vcomin || vco_freq > vcomax)
+ continue;
+
+ o = DIV_ROUND_CLOSEST_ULL(vco_freq, rate);
+ if (o < omin || o > omax)
+ continue;
+ freq = DIV_ROUND_CLOSEST_ULL(vco_freq, o);
+ diff = abs(freq - rate);
+
+ if (diff < best_diff) {
+ best_diff = diff;
+ divider->m = m;
+ divider->d = d;
+ divider->o = o;
+ if (!diff)
+ return 0;
}
}
}
- return -EBUSY;
+ return 0;
}
static int clk_wzrd_get_divisors(struct clk_hw *hw, unsigned long rate,
@@ -642,14 +648,14 @@ static unsigned long clk_wzrd_recalc_rate_all_ver(struct clk_hw *hw,
divider->flags, divider->width);
}
-static long clk_wzrd_round_rate_all(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_wzrd_determine_rate_all(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
u32 m, d, o;
int err;
- err = clk_wzrd_get_divisors(hw, rate, *prate);
+ err = clk_wzrd_get_divisors(hw, req->rate, req->best_parent_rate);
if (err)
return err;
@@ -657,19 +663,20 @@ static long clk_wzrd_round_rate_all(struct clk_hw *hw, unsigned long rate,
d = divider->d;
o = divider->o;
- rate = div_u64(*prate * (m * 1000 + divider->m_frac), d * (o * 1000 + divider->o_frac));
- return rate;
+ req->rate = div_u64(req->best_parent_rate * (m * 1000 + divider->m_frac),
+ d * (o * 1000 + divider->o_frac));
+ return 0;
}
-static long clk_wzrd_ver_round_rate_all(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_wzrd_ver_determine_rate_all(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
unsigned long int_freq;
u32 m, d, o, div, f;
int err;
- err = clk_wzrd_get_divisors_ver(hw, rate, *prate);
+ err = clk_wzrd_get_divisors_ver(hw, req->rate, req->best_parent_rate);
if (err)
return err;
@@ -678,36 +685,38 @@ static long clk_wzrd_ver_round_rate_all(struct clk_hw *hw, unsigned long rate,
o = divider->o;
div = d * o;
- int_freq = divider_recalc_rate(hw, *prate * m, div, divider->table,
+ int_freq = divider_recalc_rate(hw, req->best_parent_rate * m, div,
+ divider->table,
divider->flags, divider->width);
- if (rate > int_freq) {
- f = DIV_ROUND_CLOSEST_ULL(rate * WZRD_FRAC_POINTS, int_freq);
- rate = DIV_ROUND_CLOSEST(int_freq * f, WZRD_FRAC_POINTS);
+ if (req->rate > int_freq) {
+ f = DIV_ROUND_CLOSEST_ULL(req->rate * WZRD_FRAC_POINTS,
+ int_freq);
+ req->rate = DIV_ROUND_CLOSEST(int_freq * f, WZRD_FRAC_POINTS);
}
- return rate;
+ return 0;
}
static const struct clk_ops clk_wzrd_ver_divider_ops = {
- .round_rate = clk_wzrd_round_rate,
+ .determine_rate = clk_wzrd_determine_rate,
.set_rate = clk_wzrd_ver_dynamic_reconfig,
.recalc_rate = clk_wzrd_recalc_rate_ver,
};
static const struct clk_ops clk_wzrd_ver_div_all_ops = {
- .round_rate = clk_wzrd_ver_round_rate_all,
+ .determine_rate = clk_wzrd_ver_determine_rate_all,
.set_rate = clk_wzrd_dynamic_all_ver,
.recalc_rate = clk_wzrd_recalc_rate_all_ver,
};
static const struct clk_ops clk_wzrd_clk_divider_ops = {
- .round_rate = clk_wzrd_round_rate,
+ .determine_rate = clk_wzrd_determine_rate,
.set_rate = clk_wzrd_dynamic_reconfig,
.recalc_rate = clk_wzrd_recalc_rate,
};
static const struct clk_ops clk_wzrd_clk_div_all_ops = {
- .round_rate = clk_wzrd_round_rate_all,
+ .determine_rate = clk_wzrd_determine_rate_all,
.set_rate = clk_wzrd_dynamic_all,
.recalc_rate = clk_wzrd_recalc_rate_all,
};
@@ -769,14 +778,14 @@ static int clk_wzrd_dynamic_reconfig_f(struct clk_hw *hw, unsigned long rate,
WZRD_USEC_POLL, WZRD_TIMEOUT_POLL);
}
-static long clk_wzrd_round_rate_f(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_wzrd_determine_rate_f(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- return rate;
+ return 0;
}
static const struct clk_ops clk_wzrd_clk_divider_ops_f = {
- .round_rate = clk_wzrd_round_rate_f,
+ .determine_rate = clk_wzrd_determine_rate_f,
.set_rate = clk_wzrd_dynamic_reconfig_f,
.recalc_rate = clk_wzrd_recalc_ratef,
};
@@ -1108,7 +1117,7 @@ static int clk_wzrd_register_output_clocks(struct device *dev, int nr_outputs)
(dev,
clkout_name, clk_name, 0,
clk_wzrd->base,
- (WZRD_CLK_CFG_REG(is_versal, 3) + i * 8),
+ (WZRD_CLK_CFG_REG(is_versal, 2) + i * 8),
WZRD_CLKOUT_DIVIDE_SHIFT,
WZRD_CLKOUT_DIVIDE_WIDTH,
CLK_DIVIDER_ONE_BASED |
diff --git a/drivers/clk/xilinx/xlnx_vcu.c b/drivers/clk/xilinx/xlnx_vcu.c
index 1ded67bee06c..02699bc0f82c 100644
--- a/drivers/clk/xilinx/xlnx_vcu.c
+++ b/drivers/clk/xilinx/xlnx_vcu.c
@@ -311,18 +311,21 @@ static int xvcu_pll_set_div(struct vcu_pll *pll, int div)
return 0;
}
-static long xvcu_pll_round_rate(struct clk_hw *hw,
- unsigned long rate, unsigned long *parent_rate)
+static int xvcu_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct vcu_pll *pll = to_vcu_pll(hw);
unsigned int feedback_div;
- rate = clamp_t(unsigned long, rate, pll->fvco_min, pll->fvco_max);
+ req->rate = clamp_t(unsigned long, req->rate, pll->fvco_min,
+ pll->fvco_max);
- feedback_div = DIV_ROUND_CLOSEST_ULL(rate, *parent_rate);
+ feedback_div = DIV_ROUND_CLOSEST_ULL(req->rate, req->best_parent_rate);
feedback_div = clamp_t(unsigned int, feedback_div, 25, 125);
- return *parent_rate * feedback_div;
+ req->rate = req->best_parent_rate * feedback_div;
+
+ return 0;
}
static unsigned long xvcu_pll_recalc_rate(struct clk_hw *hw,
@@ -394,7 +397,7 @@ static void xvcu_pll_disable(struct clk_hw *hw)
static const struct clk_ops vcu_pll_ops = {
.enable = xvcu_pll_enable,
.disable = xvcu_pll_disable,
- .round_rate = xvcu_pll_round_rate,
+ .determine_rate = xvcu_pll_determine_rate,
.recalc_rate = xvcu_pll_recalc_rate,
.set_rate = xvcu_pll_set_rate,
};
diff --git a/drivers/clk/zynq/pll.c b/drivers/clk/zynq/pll.c
index e5f8fb704df2..5eca1c14981a 100644
--- a/drivers/clk/zynq/pll.c
+++ b/drivers/clk/zynq/pll.c
@@ -48,18 +48,20 @@ struct zynq_pll {
* @prate: Clock frequency of parent clock
* Return: frequency closest to @rate the hardware can generate.
*/
-static long zynq_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int zynq_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
u32 fbdiv;
- fbdiv = DIV_ROUND_CLOSEST(rate, *prate);
+ fbdiv = DIV_ROUND_CLOSEST(req->rate, req->best_parent_rate);
if (fbdiv < PLL_FBDIV_MIN)
fbdiv = PLL_FBDIV_MIN;
else if (fbdiv > PLL_FBDIV_MAX)
fbdiv = PLL_FBDIV_MAX;
- return *prate * fbdiv;
+ req->rate = req->best_parent_rate * fbdiv;
+
+ return 0;
}
/**
@@ -167,7 +169,7 @@ static const struct clk_ops zynq_pll_ops = {
.enable = zynq_pll_enable,
.disable = zynq_pll_disable,
.is_enabled = zynq_pll_is_enabled,
- .round_rate = zynq_pll_round_rate,
+ .determine_rate = zynq_pll_determine_rate,
.recalc_rate = zynq_pll_recalc_rate
};
diff --git a/drivers/clk/zynqmp/divider.c b/drivers/clk/zynqmp/divider.c
index 5a00487ae408..c824eeacd8eb 100644
--- a/drivers/clk/zynqmp/divider.c
+++ b/drivers/clk/zynqmp/divider.c
@@ -118,9 +118,8 @@ static unsigned long zynqmp_clk_divider_recalc_rate(struct clk_hw *hw,
*
* Return: 0 on success else error+reason
*/
-static long zynqmp_clk_divider_round_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long *prate)
+static int zynqmp_clk_divider_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct zynqmp_clk_divider *divider = to_zynqmp_clk_divider(hw);
const char *clk_name = clk_hw_get_name(hw);
@@ -145,17 +144,21 @@ static long zynqmp_clk_divider_round_rate(struct clk_hw *hw,
if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
bestdiv = 1 << bestdiv;
- return DIV_ROUND_UP_ULL((u64)*prate, bestdiv);
+ req->rate = DIV_ROUND_UP_ULL((u64)req->best_parent_rate, bestdiv);
+
+ return 0;
}
width = fls(divider->max_div);
- rate = divider_round_rate(hw, rate, prate, NULL, width, divider->flags);
+ req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate,
+ NULL, width, divider->flags);
- if (divider->is_frac && (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) && (rate % *prate))
- *prate = rate;
+ if (divider->is_frac && (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) &&
+ (req->rate % req->best_parent_rate))
+ req->best_parent_rate = req->rate;
- return rate;
+ return 0;
}
/**
@@ -199,13 +202,13 @@ static int zynqmp_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops zynqmp_clk_divider_ops = {
.recalc_rate = zynqmp_clk_divider_recalc_rate,
- .round_rate = zynqmp_clk_divider_round_rate,
+ .determine_rate = zynqmp_clk_divider_determine_rate,
.set_rate = zynqmp_clk_divider_set_rate,
};
static const struct clk_ops zynqmp_clk_divider_ro_ops = {
.recalc_rate = zynqmp_clk_divider_recalc_rate,
- .round_rate = zynqmp_clk_divider_round_rate,
+ .determine_rate = zynqmp_clk_divider_determine_rate,
};
/**
diff --git a/drivers/clk/zynqmp/pll.c b/drivers/clk/zynqmp/pll.c
index 7411a7fd50ac..630a3936c97c 100644
--- a/drivers/clk/zynqmp/pll.c
+++ b/drivers/clk/zynqmp/pll.c
@@ -98,29 +98,29 @@ static inline void zynqmp_pll_set_mode(struct clk_hw *hw, bool on)
*
* Return: Frequency closest to @rate the hardware can generate
*/
-static long zynqmp_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int zynqmp_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
u32 fbdiv;
u32 mult, div;
/* Let rate fall inside the range PS_PLL_VCO_MIN ~ PS_PLL_VCO_MAX */
- if (rate > PS_PLL_VCO_MAX) {
- div = DIV_ROUND_UP(rate, PS_PLL_VCO_MAX);
- rate = rate / div;
+ if (req->rate > PS_PLL_VCO_MAX) {
+ div = DIV_ROUND_UP(req->rate, PS_PLL_VCO_MAX);
+ req->rate = req->rate / div;
}
- if (rate < PS_PLL_VCO_MIN) {
- mult = DIV_ROUND_UP(PS_PLL_VCO_MIN, rate);
- rate = rate * mult;
+ if (req->rate < PS_PLL_VCO_MIN) {
+ mult = DIV_ROUND_UP(PS_PLL_VCO_MIN, req->rate);
+ req->rate = req->rate * mult;
}
- fbdiv = DIV_ROUND_CLOSEST(rate, *prate);
+ fbdiv = DIV_ROUND_CLOSEST(req->rate, req->best_parent_rate);
if (fbdiv < PLL_FBDIV_MIN || fbdiv > PLL_FBDIV_MAX) {
fbdiv = clamp_t(u32, fbdiv, PLL_FBDIV_MIN, PLL_FBDIV_MAX);
- rate = *prate * fbdiv;
+ req->rate = req->best_parent_rate * fbdiv;
}
- return rate;
+ return 0;
}
/**
@@ -294,7 +294,7 @@ static const struct clk_ops zynqmp_pll_ops = {
.enable = zynqmp_pll_enable,
.disable = zynqmp_pll_disable,
.is_enabled = zynqmp_pll_is_enabled,
- .round_rate = zynqmp_pll_round_rate,
+ .determine_rate = zynqmp_pll_determine_rate,
.recalc_rate = zynqmp_pll_recalc_rate,
.set_rate = zynqmp_pll_set_rate,
};
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 645f517a1ac2..ffcd23668763 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -395,8 +395,7 @@ config ARM_GLOBAL_TIMER
config ARM_GT_INITIAL_PRESCALER_VAL
int "ARM global timer initial prescaler value"
- default 2 if ARCH_ZYNQ
- default 1
+ default 0
depends on ARM_GLOBAL_TIMER
help
When the ARM global timer initializes, its current rate is declared
@@ -406,6 +405,7 @@ config ARM_GT_INITIAL_PRESCALER_VAL
bounds about how much the parent clock is allowed to decrease or
increase wrt the initial clock value.
This affects CPU_FREQ max delta from the initial frequency.
+ Use 0 to use auto-detection in the driver.
config ARM_TIMER_SP804
bool "Support for Dual Timer SP804 module"
@@ -474,11 +474,14 @@ config FSL_FTM_TIMER
help
Support for Freescale FlexTimer Module (FTM) timer.
-config VF_PIT_TIMER
- bool
+config NXP_PIT_TIMER
+ bool "NXP Periodic Interrupt Timer" if COMPILE_TEST
select CLKSRC_MMIO
help
- Support for Periodic Interrupt Timer on Freescale Vybrid Family SoCs.
+ Support for Periodic Interrupt Timer on Freescale / NXP
+ SoCs. This periodic timer is found on the Vybrid Family and
+ the Automotive S32G2/3 platforms. It contains 4 channels
+ where two can be coupled to form a 64 bits channel.
config SYS_SUPPORTS_SH_CMT
bool
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 205bf3b0a8f3..ec4452ee958f 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -49,7 +49,7 @@ obj-$(CONFIG_CLKSRC_LPC32XX) += timer-lpc32xx.o
obj-$(CONFIG_CLKSRC_MPS2) += mps2-timer.o
obj-$(CONFIG_CLKSRC_SAMSUNG_PWM) += samsung_pwm_timer.o
obj-$(CONFIG_FSL_FTM_TIMER) += timer-fsl-ftm.o
-obj-$(CONFIG_VF_PIT_TIMER) += timer-vf-pit.o
+obj-$(CONFIG_NXP_PIT_TIMER) += timer-nxp-pit.o
obj-$(CONFIG_CLKSRC_QCOM) += timer-qcom.o
obj-$(CONFIG_MTK_TIMER) += timer-mediatek.o
obj-$(CONFIG_MTK_CPUX_TIMER) += timer-mediatek-cpux.o
@@ -64,6 +64,7 @@ obj-$(CONFIG_REALTEK_OTTO_TIMER) += timer-rtl-otto.o
obj-$(CONFIG_ARC_TIMERS) += arc_timer.o
obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o
+obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer_mmio.o
obj-$(CONFIG_ARM_GLOBAL_TIMER) += arm_global_timer.o
obj-$(CONFIG_ARMV7M_SYSTICK) += armv7m_systick.o
obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp804.o
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 80ba6a54248c..90aeff44a276 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -34,42 +34,12 @@
#include <clocksource/arm_arch_timer.h>
-#define CNTTIDR 0x08
-#define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4))
-
-#define CNTACR(n) (0x40 + ((n) * 4))
-#define CNTACR_RPCT BIT(0)
-#define CNTACR_RVCT BIT(1)
-#define CNTACR_RFRQ BIT(2)
-#define CNTACR_RVOFF BIT(3)
-#define CNTACR_RWVT BIT(4)
-#define CNTACR_RWPT BIT(5)
-
-#define CNTPCT_LO 0x00
-#define CNTVCT_LO 0x08
-#define CNTFRQ 0x10
-#define CNTP_CVAL_LO 0x20
-#define CNTP_CTL 0x2c
-#define CNTV_CVAL_LO 0x30
-#define CNTV_CTL 0x3c
-
/*
* The minimum amount of time a generic counter is guaranteed to not roll over
* (40 years)
*/
#define MIN_ROLLOVER_SECS (40ULL * 365 * 24 * 3600)
-static unsigned arch_timers_present __initdata;
-
-struct arch_timer {
- void __iomem *base;
- struct clock_event_device evt;
-};
-
-static struct arch_timer *arch_timer_mem __ro_after_init;
-
-#define to_arch_timer(e) container_of(e, struct arch_timer, evt)
-
static u32 arch_timer_rate __ro_after_init;
static int arch_timer_ppi[ARCH_TIMER_MAX_TIMER_PPI] __ro_after_init;
@@ -85,7 +55,6 @@ static struct clock_event_device __percpu *arch_timer_evt;
static enum arch_timer_ppi_nr arch_timer_uses_ppi __ro_after_init = ARCH_TIMER_VIRT_PPI;
static bool arch_timer_c3stop __ro_after_init;
-static bool arch_timer_mem_use_virtual __ro_after_init;
static bool arch_counter_suspend_stop __ro_after_init;
#ifdef CONFIG_GENERIC_GETTIMEOFDAY
static enum vdso_clock_mode vdso_default = VDSO_CLOCKMODE_ARCHTIMER;
@@ -121,76 +90,6 @@ static int arch_counter_get_width(void)
/*
* Architected system timer support.
*/
-
-static __always_inline
-void arch_timer_reg_write(int access, enum arch_timer_reg reg, u64 val,
- struct clock_event_device *clk)
-{
- if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
- struct arch_timer *timer = to_arch_timer(clk);
- switch (reg) {
- case ARCH_TIMER_REG_CTRL:
- writel_relaxed((u32)val, timer->base + CNTP_CTL);
- break;
- case ARCH_TIMER_REG_CVAL:
- /*
- * Not guaranteed to be atomic, so the timer
- * must be disabled at this point.
- */
- writeq_relaxed(val, timer->base + CNTP_CVAL_LO);
- break;
- default:
- BUILD_BUG();
- }
- } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
- struct arch_timer *timer = to_arch_timer(clk);
- switch (reg) {
- case ARCH_TIMER_REG_CTRL:
- writel_relaxed((u32)val, timer->base + CNTV_CTL);
- break;
- case ARCH_TIMER_REG_CVAL:
- /* Same restriction as above */
- writeq_relaxed(val, timer->base + CNTV_CVAL_LO);
- break;
- default:
- BUILD_BUG();
- }
- } else {
- arch_timer_reg_write_cp15(access, reg, val);
- }
-}
-
-static __always_inline
-u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
- struct clock_event_device *clk)
-{
- u32 val;
-
- if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
- struct arch_timer *timer = to_arch_timer(clk);
- switch (reg) {
- case ARCH_TIMER_REG_CTRL:
- val = readl_relaxed(timer->base + CNTP_CTL);
- break;
- default:
- BUILD_BUG();
- }
- } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
- struct arch_timer *timer = to_arch_timer(clk);
- switch (reg) {
- case ARCH_TIMER_REG_CTRL:
- val = readl_relaxed(timer->base + CNTV_CTL);
- break;
- default:
- BUILD_BUG();
- }
- } else {
- val = arch_timer_reg_read_cp15(access, reg);
- }
-
- return val;
-}
-
static noinstr u64 raw_counter_get_cntpct_stable(void)
{
return __arch_counter_get_cntpct_stable();
@@ -424,7 +323,7 @@ void erratum_set_next_event_generic(const int access, unsigned long evt,
unsigned long ctrl;
u64 cval;
- ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
+ ctrl = arch_timer_reg_read_cp15(access, ARCH_TIMER_REG_CTRL);
ctrl |= ARCH_TIMER_CTRL_ENABLE;
ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
@@ -436,7 +335,7 @@ void erratum_set_next_event_generic(const int access, unsigned long evt,
write_sysreg(cval, cntv_cval_el0);
}
- arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
+ arch_timer_reg_write_cp15(access, ARCH_TIMER_REG_CTRL, ctrl);
}
static __maybe_unused int erratum_set_next_event_virt(unsigned long evt,
@@ -667,10 +566,10 @@ static __always_inline irqreturn_t timer_handler(const int access,
{
unsigned long ctrl;
- ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, evt);
+ ctrl = arch_timer_reg_read_cp15(access, ARCH_TIMER_REG_CTRL);
if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
ctrl |= ARCH_TIMER_CTRL_IT_MASK;
- arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, evt);
+ arch_timer_reg_write_cp15(access, ARCH_TIMER_REG_CTRL, ctrl);
evt->event_handler(evt);
return IRQ_HANDLED;
}
@@ -692,28 +591,14 @@ static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)
return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt);
}
-static irqreturn_t arch_timer_handler_phys_mem(int irq, void *dev_id)
-{
- struct clock_event_device *evt = dev_id;
-
- return timer_handler(ARCH_TIMER_MEM_PHYS_ACCESS, evt);
-}
-
-static irqreturn_t arch_timer_handler_virt_mem(int irq, void *dev_id)
-{
- struct clock_event_device *evt = dev_id;
-
- return timer_handler(ARCH_TIMER_MEM_VIRT_ACCESS, evt);
-}
-
static __always_inline int arch_timer_shutdown(const int access,
struct clock_event_device *clk)
{
unsigned long ctrl;
- ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
+ ctrl = arch_timer_reg_read_cp15(access, ARCH_TIMER_REG_CTRL);
ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
- arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
+ arch_timer_reg_write_cp15(access, ARCH_TIMER_REG_CTRL, ctrl);
return 0;
}
@@ -728,23 +613,13 @@ static int arch_timer_shutdown_phys(struct clock_event_device *clk)
return arch_timer_shutdown(ARCH_TIMER_PHYS_ACCESS, clk);
}
-static int arch_timer_shutdown_virt_mem(struct clock_event_device *clk)
-{
- return arch_timer_shutdown(ARCH_TIMER_MEM_VIRT_ACCESS, clk);
-}
-
-static int arch_timer_shutdown_phys_mem(struct clock_event_device *clk)
-{
- return arch_timer_shutdown(ARCH_TIMER_MEM_PHYS_ACCESS, clk);
-}
-
static __always_inline void set_next_event(const int access, unsigned long evt,
struct clock_event_device *clk)
{
unsigned long ctrl;
u64 cnt;
- ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
+ ctrl = arch_timer_reg_read_cp15(access, ARCH_TIMER_REG_CTRL);
ctrl |= ARCH_TIMER_CTRL_ENABLE;
ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
@@ -753,8 +628,8 @@ static __always_inline void set_next_event(const int access, unsigned long evt,
else
cnt = __arch_counter_get_cntvct();
- arch_timer_reg_write(access, ARCH_TIMER_REG_CVAL, evt + cnt, clk);
- arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
+ arch_timer_reg_write_cp15(access, ARCH_TIMER_REG_CVAL, evt + cnt);
+ arch_timer_reg_write_cp15(access, ARCH_TIMER_REG_CTRL, ctrl);
}
static int arch_timer_set_next_event_virt(unsigned long evt,
@@ -771,60 +646,6 @@ static int arch_timer_set_next_event_phys(unsigned long evt,
return 0;
}
-static noinstr u64 arch_counter_get_cnt_mem(struct arch_timer *t, int offset_lo)
-{
- u32 cnt_lo, cnt_hi, tmp_hi;
-
- do {
- cnt_hi = __le32_to_cpu((__le32 __force)__raw_readl(t->base + offset_lo + 4));
- cnt_lo = __le32_to_cpu((__le32 __force)__raw_readl(t->base + offset_lo));
- tmp_hi = __le32_to_cpu((__le32 __force)__raw_readl(t->base + offset_lo + 4));
- } while (cnt_hi != tmp_hi);
-
- return ((u64) cnt_hi << 32) | cnt_lo;
-}
-
-static __always_inline void set_next_event_mem(const int access, unsigned long evt,
- struct clock_event_device *clk)
-{
- struct arch_timer *timer = to_arch_timer(clk);
- unsigned long ctrl;
- u64 cnt;
-
- ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
-
- /* Timer must be disabled before programming CVAL */
- if (ctrl & ARCH_TIMER_CTRL_ENABLE) {
- ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
- arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
- }
-
- ctrl |= ARCH_TIMER_CTRL_ENABLE;
- ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
-
- if (access == ARCH_TIMER_MEM_VIRT_ACCESS)
- cnt = arch_counter_get_cnt_mem(timer, CNTVCT_LO);
- else
- cnt = arch_counter_get_cnt_mem(timer, CNTPCT_LO);
-
- arch_timer_reg_write(access, ARCH_TIMER_REG_CVAL, evt + cnt, clk);
- arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
-}
-
-static int arch_timer_set_next_event_virt_mem(unsigned long evt,
- struct clock_event_device *clk)
-{
- set_next_event_mem(ARCH_TIMER_MEM_VIRT_ACCESS, evt, clk);
- return 0;
-}
-
-static int arch_timer_set_next_event_phys_mem(unsigned long evt,
- struct clock_event_device *clk)
-{
- set_next_event_mem(ARCH_TIMER_MEM_PHYS_ACCESS, evt, clk);
- return 0;
-}
-
static u64 __arch_timer_check_delta(void)
{
#ifdef CONFIG_ARM64
@@ -850,63 +671,41 @@ static u64 __arch_timer_check_delta(void)
return CLOCKSOURCE_MASK(arch_counter_get_width());
}
-static void __arch_timer_setup(unsigned type,
- struct clock_event_device *clk)
+static void __arch_timer_setup(struct clock_event_device *clk)
{
+ typeof(clk->set_next_event) sne;
u64 max_delta;
clk->features = CLOCK_EVT_FEAT_ONESHOT;
- if (type == ARCH_TIMER_TYPE_CP15) {
- typeof(clk->set_next_event) sne;
-
- arch_timer_check_ool_workaround(ate_match_local_cap_id, NULL);
-
- if (arch_timer_c3stop)
- clk->features |= CLOCK_EVT_FEAT_C3STOP;
- clk->name = "arch_sys_timer";
- clk->rating = 450;
- clk->cpumask = cpumask_of(smp_processor_id());
- clk->irq = arch_timer_ppi[arch_timer_uses_ppi];
- switch (arch_timer_uses_ppi) {
- case ARCH_TIMER_VIRT_PPI:
- clk->set_state_shutdown = arch_timer_shutdown_virt;
- clk->set_state_oneshot_stopped = arch_timer_shutdown_virt;
- sne = erratum_handler(set_next_event_virt);
- break;
- case ARCH_TIMER_PHYS_SECURE_PPI:
- case ARCH_TIMER_PHYS_NONSECURE_PPI:
- case ARCH_TIMER_HYP_PPI:
- clk->set_state_shutdown = arch_timer_shutdown_phys;
- clk->set_state_oneshot_stopped = arch_timer_shutdown_phys;
- sne = erratum_handler(set_next_event_phys);
- break;
- default:
- BUG();
- }
+ arch_timer_check_ool_workaround(ate_match_local_cap_id, NULL);
- clk->set_next_event = sne;
- max_delta = __arch_timer_check_delta();
- } else {
- clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
- clk->name = "arch_mem_timer";
- clk->rating = 400;
- clk->cpumask = cpu_possible_mask;
- if (arch_timer_mem_use_virtual) {
- clk->set_state_shutdown = arch_timer_shutdown_virt_mem;
- clk->set_state_oneshot_stopped = arch_timer_shutdown_virt_mem;
- clk->set_next_event =
- arch_timer_set_next_event_virt_mem;
- } else {
- clk->set_state_shutdown = arch_timer_shutdown_phys_mem;
- clk->set_state_oneshot_stopped = arch_timer_shutdown_phys_mem;
- clk->set_next_event =
- arch_timer_set_next_event_phys_mem;
- }
-
- max_delta = CLOCKSOURCE_MASK(56);
+ if (arch_timer_c3stop)
+ clk->features |= CLOCK_EVT_FEAT_C3STOP;
+ clk->name = "arch_sys_timer";
+ clk->rating = 450;
+ clk->cpumask = cpumask_of(smp_processor_id());
+ clk->irq = arch_timer_ppi[arch_timer_uses_ppi];
+ switch (arch_timer_uses_ppi) {
+ case ARCH_TIMER_VIRT_PPI:
+ clk->set_state_shutdown = arch_timer_shutdown_virt;
+ clk->set_state_oneshot_stopped = arch_timer_shutdown_virt;
+ sne = erratum_handler(set_next_event_virt);
+ break;
+ case ARCH_TIMER_PHYS_SECURE_PPI:
+ case ARCH_TIMER_PHYS_NONSECURE_PPI:
+ case ARCH_TIMER_HYP_PPI:
+ clk->set_state_shutdown = arch_timer_shutdown_phys;
+ clk->set_state_oneshot_stopped = arch_timer_shutdown_phys;
+ sne = erratum_handler(set_next_event_phys);
+ break;
+ default:
+ BUG();
}
+ clk->set_next_event = sne;
+ max_delta = __arch_timer_check_delta();
+
clk->set_state_shutdown(clk);
clockevents_config_and_register(clk, arch_timer_rate, 0xf, max_delta);
@@ -1029,7 +828,7 @@ static int arch_timer_starting_cpu(unsigned int cpu)
struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
u32 flags;
- __arch_timer_setup(ARCH_TIMER_TYPE_CP15, clk);
+ __arch_timer_setup(clk);
flags = check_ppi_trigger(arch_timer_ppi[arch_timer_uses_ppi]);
enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], flags);
@@ -1075,22 +874,12 @@ static void __init arch_timer_of_configure_rate(u32 rate, struct device_node *np
pr_warn("frequency not available\n");
}
-static void __init arch_timer_banner(unsigned type)
+static void __init arch_timer_banner(void)
{
- pr_info("%s%s%s timer(s) running at %lu.%02luMHz (%s%s%s).\n",
- type & ARCH_TIMER_TYPE_CP15 ? "cp15" : "",
- type == (ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM) ?
- " and " : "",
- type & ARCH_TIMER_TYPE_MEM ? "mmio" : "",
+ pr_info("cp15 timer running at %lu.%02luMHz (%s).\n",
(unsigned long)arch_timer_rate / 1000000,
(unsigned long)(arch_timer_rate / 10000) % 100,
- type & ARCH_TIMER_TYPE_CP15 ?
- (arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) ? "virt" : "phys" :
- "",
- type == (ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM) ? "/" : "",
- type & ARCH_TIMER_TYPE_MEM ?
- arch_timer_mem_use_virtual ? "virt" : "phys" :
- "");
+ (arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) ? "virt" : "phys");
}
u32 arch_timer_get_rate(void)
@@ -1108,11 +897,6 @@ bool arch_timer_evtstrm_available(void)
return cpumask_test_cpu(raw_smp_processor_id(), &evtstrm_available);
}
-static noinstr u64 arch_counter_get_cntvct_mem(void)
-{
- return arch_counter_get_cnt_mem(arch_timer_mem, CNTVCT_LO);
-}
-
static struct arch_timer_kvm_info arch_timer_kvm_info;
struct arch_timer_kvm_info *arch_timer_get_kvm_info(void)
@@ -1120,42 +904,35 @@ struct arch_timer_kvm_info *arch_timer_get_kvm_info(void)
return &arch_timer_kvm_info;
}
-static void __init arch_counter_register(unsigned type)
+static void __init arch_counter_register(void)
{
u64 (*scr)(void);
+ u64 (*rd)(void);
u64 start_count;
int width;
- /* Register the CP15 based counter if we have one */
- if (type & ARCH_TIMER_TYPE_CP15) {
- u64 (*rd)(void);
-
- if ((IS_ENABLED(CONFIG_ARM64) && !is_hyp_mode_available()) ||
- arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) {
- if (arch_timer_counter_has_wa()) {
- rd = arch_counter_get_cntvct_stable;
- scr = raw_counter_get_cntvct_stable;
- } else {
- rd = arch_counter_get_cntvct;
- scr = arch_counter_get_cntvct;
- }
+ if ((IS_ENABLED(CONFIG_ARM64) && !is_hyp_mode_available()) ||
+ arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) {
+ if (arch_timer_counter_has_wa()) {
+ rd = arch_counter_get_cntvct_stable;
+ scr = raw_counter_get_cntvct_stable;
} else {
- if (arch_timer_counter_has_wa()) {
- rd = arch_counter_get_cntpct_stable;
- scr = raw_counter_get_cntpct_stable;
- } else {
- rd = arch_counter_get_cntpct;
- scr = arch_counter_get_cntpct;
- }
+ rd = arch_counter_get_cntvct;
+ scr = arch_counter_get_cntvct;
}
-
- arch_timer_read_counter = rd;
- clocksource_counter.vdso_clock_mode = vdso_default;
} else {
- arch_timer_read_counter = arch_counter_get_cntvct_mem;
- scr = arch_counter_get_cntvct_mem;
+ if (arch_timer_counter_has_wa()) {
+ rd = arch_counter_get_cntpct_stable;
+ scr = raw_counter_get_cntpct_stable;
+ } else {
+ rd = arch_counter_get_cntpct;
+ scr = arch_counter_get_cntpct;
+ }
}
+ arch_timer_read_counter = rd;
+ clocksource_counter.vdso_clock_mode = vdso_default;
+
width = arch_counter_get_width();
clocksource_counter.mask = CLOCKSOURCE_MASK(width);
cyclecounter.mask = CLOCKSOURCE_MASK(width);
@@ -1303,76 +1080,10 @@ out:
return err;
}
-static int __init arch_timer_mem_register(void __iomem *base, unsigned int irq)
-{
- int ret;
- irq_handler_t func;
-
- arch_timer_mem = kzalloc(sizeof(*arch_timer_mem), GFP_KERNEL);
- if (!arch_timer_mem)
- return -ENOMEM;
-
- arch_timer_mem->base = base;
- arch_timer_mem->evt.irq = irq;
- __arch_timer_setup(ARCH_TIMER_TYPE_MEM, &arch_timer_mem->evt);
-
- if (arch_timer_mem_use_virtual)
- func = arch_timer_handler_virt_mem;
- else
- func = arch_timer_handler_phys_mem;
-
- ret = request_irq(irq, func, IRQF_TIMER, "arch_mem_timer", &arch_timer_mem->evt);
- if (ret) {
- pr_err("Failed to request mem timer irq\n");
- kfree(arch_timer_mem);
- arch_timer_mem = NULL;
- }
-
- return ret;
-}
-
-static const struct of_device_id arch_timer_of_match[] __initconst = {
- { .compatible = "arm,armv7-timer", },
- { .compatible = "arm,armv8-timer", },
- {},
-};
-
-static const struct of_device_id arch_timer_mem_of_match[] __initconst = {
- { .compatible = "arm,armv7-timer-mem", },
- {},
-};
-
-static bool __init arch_timer_needs_of_probing(void)
-{
- struct device_node *dn;
- bool needs_probing = false;
- unsigned int mask = ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM;
-
- /* We have two timers, and both device-tree nodes are probed. */
- if ((arch_timers_present & mask) == mask)
- return false;
-
- /*
- * Only one type of timer is probed,
- * check if we have another type of timer node in device-tree.
- */
- if (arch_timers_present & ARCH_TIMER_TYPE_CP15)
- dn = of_find_matching_node(NULL, arch_timer_mem_of_match);
- else
- dn = of_find_matching_node(NULL, arch_timer_of_match);
-
- if (dn && of_device_is_available(dn))
- needs_probing = true;
-
- of_node_put(dn);
-
- return needs_probing;
-}
-
static int __init arch_timer_common_init(void)
{
- arch_timer_banner(arch_timers_present);
- arch_counter_register(arch_timers_present);
+ arch_timer_banner();
+ arch_counter_register();
return arch_timer_arch_init();
}
@@ -1421,13 +1132,11 @@ static int __init arch_timer_of_init(struct device_node *np)
u32 rate;
bool has_names;
- if (arch_timers_present & ARCH_TIMER_TYPE_CP15) {
+ if (arch_timer_evt) {
pr_warn("multiple nodes in dt, skipping\n");
return 0;
}
- arch_timers_present |= ARCH_TIMER_TYPE_CP15;
-
has_names = of_property_present(np, "interrupt-names");
for (i = ARCH_TIMER_PHYS_SECURE_PPI; i < ARCH_TIMER_MAX_TIMER_PPI; i++) {
@@ -1472,283 +1181,22 @@ static int __init arch_timer_of_init(struct device_node *np)
if (ret)
return ret;
- if (arch_timer_needs_of_probing())
- return 0;
-
return arch_timer_common_init();
}
TIMER_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_of_init);
TIMER_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_of_init);
-static u32 __init
-arch_timer_mem_frame_get_cntfrq(struct arch_timer_mem_frame *frame)
-{
- void __iomem *base;
- u32 rate;
-
- base = ioremap(frame->cntbase, frame->size);
- if (!base) {
- pr_err("Unable to map frame @ %pa\n", &frame->cntbase);
- return 0;
- }
-
- rate = readl_relaxed(base + CNTFRQ);
-
- iounmap(base);
-
- return rate;
-}
-
-static struct arch_timer_mem_frame * __init
-arch_timer_mem_find_best_frame(struct arch_timer_mem *timer_mem)
-{
- struct arch_timer_mem_frame *frame, *best_frame = NULL;
- void __iomem *cntctlbase;
- u32 cnttidr;
- int i;
-
- cntctlbase = ioremap(timer_mem->cntctlbase, timer_mem->size);
- if (!cntctlbase) {
- pr_err("Can't map CNTCTLBase @ %pa\n",
- &timer_mem->cntctlbase);
- return NULL;
- }
-
- cnttidr = readl_relaxed(cntctlbase + CNTTIDR);
-
- /*
- * Try to find a virtual capable frame. Otherwise fall back to a
- * physical capable frame.
- */
- for (i = 0; i < ARCH_TIMER_MEM_MAX_FRAMES; i++) {
- u32 cntacr = CNTACR_RFRQ | CNTACR_RWPT | CNTACR_RPCT |
- CNTACR_RWVT | CNTACR_RVOFF | CNTACR_RVCT;
-
- frame = &timer_mem->frame[i];
- if (!frame->valid)
- continue;
-
- /* Try enabling everything, and see what sticks */
- writel_relaxed(cntacr, cntctlbase + CNTACR(i));
- cntacr = readl_relaxed(cntctlbase + CNTACR(i));
-
- if ((cnttidr & CNTTIDR_VIRT(i)) &&
- !(~cntacr & (CNTACR_RWVT | CNTACR_RVCT))) {
- best_frame = frame;
- arch_timer_mem_use_virtual = true;
- break;
- }
-
- if (~cntacr & (CNTACR_RWPT | CNTACR_RPCT))
- continue;
-
- best_frame = frame;
- }
-
- iounmap(cntctlbase);
-
- return best_frame;
-}
-
-static int __init
-arch_timer_mem_frame_register(struct arch_timer_mem_frame *frame)
-{
- void __iomem *base;
- int ret, irq;
-
- if (arch_timer_mem_use_virtual)
- irq = frame->virt_irq;
- else
- irq = frame->phys_irq;
-
- if (!irq) {
- pr_err("Frame missing %s irq.\n",
- arch_timer_mem_use_virtual ? "virt" : "phys");
- return -EINVAL;
- }
-
- if (!request_mem_region(frame->cntbase, frame->size,
- "arch_mem_timer"))
- return -EBUSY;
-
- base = ioremap(frame->cntbase, frame->size);
- if (!base) {
- pr_err("Can't map frame's registers\n");
- return -ENXIO;
- }
-
- ret = arch_timer_mem_register(base, irq);
- if (ret) {
- iounmap(base);
- return ret;
- }
-
- arch_timers_present |= ARCH_TIMER_TYPE_MEM;
-
- return 0;
-}
-
-static int __init arch_timer_mem_of_init(struct device_node *np)
-{
- struct arch_timer_mem *timer_mem;
- struct arch_timer_mem_frame *frame;
- struct resource res;
- int ret = -EINVAL;
- u32 rate;
-
- timer_mem = kzalloc(sizeof(*timer_mem), GFP_KERNEL);
- if (!timer_mem)
- return -ENOMEM;
-
- if (of_address_to_resource(np, 0, &res))
- goto out;
- timer_mem->cntctlbase = res.start;
- timer_mem->size = resource_size(&res);
-
- for_each_available_child_of_node_scoped(np, frame_node) {
- u32 n;
- struct arch_timer_mem_frame *frame;
-
- if (of_property_read_u32(frame_node, "frame-number", &n)) {
- pr_err(FW_BUG "Missing frame-number.\n");
- goto out;
- }
- if (n >= ARCH_TIMER_MEM_MAX_FRAMES) {
- pr_err(FW_BUG "Wrong frame-number, only 0-%u are permitted.\n",
- ARCH_TIMER_MEM_MAX_FRAMES - 1);
- goto out;
- }
- frame = &timer_mem->frame[n];
-
- if (frame->valid) {
- pr_err(FW_BUG "Duplicated frame-number.\n");
- goto out;
- }
-
- if (of_address_to_resource(frame_node, 0, &res))
- goto out;
-
- frame->cntbase = res.start;
- frame->size = resource_size(&res);
-
- frame->virt_irq = irq_of_parse_and_map(frame_node,
- ARCH_TIMER_VIRT_SPI);
- frame->phys_irq = irq_of_parse_and_map(frame_node,
- ARCH_TIMER_PHYS_SPI);
-
- frame->valid = true;
- }
-
- frame = arch_timer_mem_find_best_frame(timer_mem);
- if (!frame) {
- pr_err("Unable to find a suitable frame in timer @ %pa\n",
- &timer_mem->cntctlbase);
- ret = -EINVAL;
- goto out;
- }
-
- rate = arch_timer_mem_frame_get_cntfrq(frame);
- arch_timer_of_configure_rate(rate, np);
-
- ret = arch_timer_mem_frame_register(frame);
- if (!ret && !arch_timer_needs_of_probing())
- ret = arch_timer_common_init();
-out:
- kfree(timer_mem);
- return ret;
-}
-TIMER_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem",
- arch_timer_mem_of_init);
-
#ifdef CONFIG_ACPI_GTDT
-static int __init
-arch_timer_mem_verify_cntfrq(struct arch_timer_mem *timer_mem)
-{
- struct arch_timer_mem_frame *frame;
- u32 rate;
- int i;
-
- for (i = 0; i < ARCH_TIMER_MEM_MAX_FRAMES; i++) {
- frame = &timer_mem->frame[i];
-
- if (!frame->valid)
- continue;
-
- rate = arch_timer_mem_frame_get_cntfrq(frame);
- if (rate == arch_timer_rate)
- continue;
-
- pr_err(FW_BUG "CNTFRQ mismatch: frame @ %pa: (0x%08lx), CPU: (0x%08lx)\n",
- &frame->cntbase,
- (unsigned long)rate, (unsigned long)arch_timer_rate);
-
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int __init arch_timer_mem_acpi_init(int platform_timer_count)
-{
- struct arch_timer_mem *timers, *timer;
- struct arch_timer_mem_frame *frame, *best_frame = NULL;
- int timer_count, i, ret = 0;
-
- timers = kcalloc(platform_timer_count, sizeof(*timers),
- GFP_KERNEL);
- if (!timers)
- return -ENOMEM;
-
- ret = acpi_arch_timer_mem_init(timers, &timer_count);
- if (ret || !timer_count)
- goto out;
-
- /*
- * While unlikely, it's theoretically possible that none of the frames
- * in a timer expose the combination of feature we want.
- */
- for (i = 0; i < timer_count; i++) {
- timer = &timers[i];
-
- frame = arch_timer_mem_find_best_frame(timer);
- if (!best_frame)
- best_frame = frame;
-
- ret = arch_timer_mem_verify_cntfrq(timer);
- if (ret) {
- pr_err("Disabling MMIO timers due to CNTFRQ mismatch\n");
- goto out;
- }
-
- if (!best_frame) /* implies !frame */
- /*
- * Only complain about missing suitable frames if we
- * haven't already found one in a previous iteration.
- */
- pr_err("Unable to find a suitable frame in timer @ %pa\n",
- &timer->cntctlbase);
- }
-
- if (best_frame)
- ret = arch_timer_mem_frame_register(best_frame);
-out:
- kfree(timers);
- return ret;
-}
-
-/* Initialize per-processor generic timer and memory-mapped timer(if present) */
static int __init arch_timer_acpi_init(struct acpi_table_header *table)
{
- int ret, platform_timer_count;
+ int ret;
- if (arch_timers_present & ARCH_TIMER_TYPE_CP15) {
+ if (arch_timer_evt) {
pr_warn("already initialized, skipping\n");
return -EINVAL;
}
- arch_timers_present |= ARCH_TIMER_TYPE_CP15;
-
- ret = acpi_gtdt_init(table, &platform_timer_count);
+ ret = acpi_gtdt_init(table, NULL);
if (ret)
return ret;
@@ -1790,10 +1238,6 @@ static int __init arch_timer_acpi_init(struct acpi_table_header *table)
if (ret)
return ret;
- if (platform_timer_count &&
- arch_timer_mem_acpi_init(platform_timer_count))
- pr_err("Failed to initialize memory-mapped timer.\n");
-
return arch_timer_common_init();
}
TIMER_ACPI_DECLARE(arch_timer, ACPI_SIG_GTDT, arch_timer_acpi_init);
diff --git a/drivers/clocksource/arm_arch_timer_mmio.c b/drivers/clocksource/arm_arch_timer_mmio.c
new file mode 100644
index 000000000000..ebe1987d651e
--- /dev/null
+++ b/drivers/clocksource/arm_arch_timer_mmio.c
@@ -0,0 +1,440 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ARM Generic Memory Mapped Timer support
+ *
+ * Split from drivers/clocksource/arm_arch_timer.c
+ *
+ * Copyright (C) 2011 ARM Ltd.
+ * All Rights Reserved
+ */
+
+#define pr_fmt(fmt) "arch_timer_mmio: " fmt
+
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+
+#include <clocksource/arm_arch_timer.h>
+
+#define CNTTIDR 0x08
+#define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4))
+
+#define CNTACR(n) (0x40 + ((n) * 4))
+#define CNTACR_RPCT BIT(0)
+#define CNTACR_RVCT BIT(1)
+#define CNTACR_RFRQ BIT(2)
+#define CNTACR_RVOFF BIT(3)
+#define CNTACR_RWVT BIT(4)
+#define CNTACR_RWPT BIT(5)
+
+#define CNTPCT_LO 0x00
+#define CNTVCT_LO 0x08
+#define CNTFRQ 0x10
+#define CNTP_CVAL_LO 0x20
+#define CNTP_CTL 0x2c
+#define CNTV_CVAL_LO 0x30
+#define CNTV_CTL 0x3c
+
+enum arch_timer_access {
+ PHYS_ACCESS,
+ VIRT_ACCESS,
+};
+
+struct arch_timer {
+ struct clock_event_device evt;
+ struct clocksource cs;
+ struct arch_timer_mem *gt_block;
+ void __iomem *base;
+ enum arch_timer_access access;
+ u32 rate;
+};
+
+#define evt_to_arch_timer(e) container_of(e, struct arch_timer, evt)
+#define cs_to_arch_timer(c) container_of(c, struct arch_timer, cs)
+
+static void arch_timer_mmio_write(struct arch_timer *timer,
+ enum arch_timer_reg reg, u64 val)
+{
+ switch (timer->access) {
+ case PHYS_ACCESS:
+ switch (reg) {
+ case ARCH_TIMER_REG_CTRL:
+ writel_relaxed((u32)val, timer->base + CNTP_CTL);
+ return;
+ case ARCH_TIMER_REG_CVAL:
+ /*
+ * Not guaranteed to be atomic, so the timer
+ * must be disabled at this point.
+ */
+ writeq_relaxed(val, timer->base + CNTP_CVAL_LO);
+ return;
+ }
+ break;
+ case VIRT_ACCESS:
+ switch (reg) {
+ case ARCH_TIMER_REG_CTRL:
+ writel_relaxed((u32)val, timer->base + CNTV_CTL);
+ return;
+ case ARCH_TIMER_REG_CVAL:
+ /* Same restriction as above */
+ writeq_relaxed(val, timer->base + CNTV_CVAL_LO);
+ return;
+ }
+ break;
+ }
+
+ /* Should never be here */
+ WARN_ON_ONCE(1);
+}
+
+static u32 arch_timer_mmio_read(struct arch_timer *timer, enum arch_timer_reg reg)
+{
+ switch (timer->access) {
+ case PHYS_ACCESS:
+ switch (reg) {
+ case ARCH_TIMER_REG_CTRL:
+ return readl_relaxed(timer->base + CNTP_CTL);
+ default:
+ break;
+ }
+ break;
+ case VIRT_ACCESS:
+ switch (reg) {
+ case ARCH_TIMER_REG_CTRL:
+ return readl_relaxed(timer->base + CNTV_CTL);
+ default:
+ break;
+ }
+ break;
+ }
+
+ /* Should never be here */
+ WARN_ON_ONCE(1);
+ return 0;
+}
+
+static noinstr u64 arch_counter_mmio_get_cnt(struct arch_timer *t)
+{
+ int offset_lo = t->access == VIRT_ACCESS ? CNTVCT_LO : CNTPCT_LO;
+ u32 cnt_lo, cnt_hi, tmp_hi;
+
+ do {
+ cnt_hi = __le32_to_cpu((__le32 __force)__raw_readl(t->base + offset_lo + 4));
+ cnt_lo = __le32_to_cpu((__le32 __force)__raw_readl(t->base + offset_lo));
+ tmp_hi = __le32_to_cpu((__le32 __force)__raw_readl(t->base + offset_lo + 4));
+ } while (cnt_hi != tmp_hi);
+
+ return ((u64) cnt_hi << 32) | cnt_lo;
+}
+
+static u64 arch_mmio_counter_read(struct clocksource *cs)
+{
+ struct arch_timer *at = cs_to_arch_timer(cs);
+
+ return arch_counter_mmio_get_cnt(at);
+}
+
+static int arch_timer_mmio_shutdown(struct clock_event_device *clk)
+{
+ struct arch_timer *at = evt_to_arch_timer(clk);
+ unsigned long ctrl;
+
+ ctrl = arch_timer_mmio_read(at, ARCH_TIMER_REG_CTRL);
+ ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
+ arch_timer_mmio_write(at, ARCH_TIMER_REG_CTRL, ctrl);
+
+ return 0;
+}
+
+static int arch_timer_mmio_set_next_event(unsigned long evt,
+ struct clock_event_device *clk)
+{
+ struct arch_timer *timer = evt_to_arch_timer(clk);
+ unsigned long ctrl;
+ u64 cnt;
+
+ ctrl = arch_timer_mmio_read(timer, ARCH_TIMER_REG_CTRL);
+
+ /* Timer must be disabled before programming CVAL */
+ if (ctrl & ARCH_TIMER_CTRL_ENABLE) {
+ ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
+ arch_timer_mmio_write(timer, ARCH_TIMER_REG_CTRL, ctrl);
+ }
+
+ ctrl |= ARCH_TIMER_CTRL_ENABLE;
+ ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
+
+ cnt = arch_counter_mmio_get_cnt(timer);
+
+ arch_timer_mmio_write(timer, ARCH_TIMER_REG_CVAL, evt + cnt);
+ arch_timer_mmio_write(timer, ARCH_TIMER_REG_CTRL, ctrl);
+ return 0;
+}
+
+static irqreturn_t arch_timer_mmio_handler(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = dev_id;
+ struct arch_timer *at = evt_to_arch_timer(evt);
+ unsigned long ctrl;
+
+ ctrl = arch_timer_mmio_read(at, ARCH_TIMER_REG_CTRL);
+ if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
+ ctrl |= ARCH_TIMER_CTRL_IT_MASK;
+ arch_timer_mmio_write(at, ARCH_TIMER_REG_CTRL, ctrl);
+ evt->event_handler(evt);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static struct arch_timer_mem_frame *find_best_frame(struct platform_device *pdev)
+{
+ struct arch_timer_mem_frame *frame, *best_frame = NULL;
+ struct arch_timer *at = platform_get_drvdata(pdev);
+ void __iomem *cntctlbase;
+ u32 cnttidr;
+
+ cntctlbase = ioremap(at->gt_block->cntctlbase, at->gt_block->size);
+ if (!cntctlbase) {
+ dev_err(&pdev->dev, "Can't map CNTCTLBase @ %pa\n",
+ &at->gt_block->cntctlbase);
+ return NULL;
+ }
+
+ cnttidr = readl_relaxed(cntctlbase + CNTTIDR);
+
+ /*
+ * Try to find a virtual capable frame. Otherwise fall back to a
+ * physical capable frame.
+ */
+ for (int i = 0; i < ARCH_TIMER_MEM_MAX_FRAMES; i++) {
+ u32 cntacr = CNTACR_RFRQ | CNTACR_RWPT | CNTACR_RPCT |
+ CNTACR_RWVT | CNTACR_RVOFF | CNTACR_RVCT;
+
+ frame = &at->gt_block->frame[i];
+ if (!frame->valid)
+ continue;
+
+ /* Try enabling everything, and see what sticks */
+ writel_relaxed(cntacr, cntctlbase + CNTACR(i));
+ cntacr = readl_relaxed(cntctlbase + CNTACR(i));
+
+ /* Pick a suitable frame for which we have an IRQ */
+ if ((cnttidr & CNTTIDR_VIRT(i)) &&
+ !(~cntacr & (CNTACR_RWVT | CNTACR_RVCT)) &&
+ frame->virt_irq) {
+ best_frame = frame;
+ at->access = VIRT_ACCESS;
+ break;
+ }
+
+ if ((~cntacr & (CNTACR_RWPT | CNTACR_RPCT)) ||
+ !frame->phys_irq)
+ continue;
+
+ at->access = PHYS_ACCESS;
+ best_frame = frame;
+ }
+
+ iounmap(cntctlbase);
+
+ return best_frame;
+}
+
+static void arch_timer_mmio_setup(struct arch_timer *at, int irq)
+{
+ at->evt = (struct clock_event_device) {
+ .features = (CLOCK_EVT_FEAT_ONESHOT |
+ CLOCK_EVT_FEAT_DYNIRQ),
+ .name = "arch_mem_timer",
+ .rating = 400,
+ .cpumask = cpu_possible_mask,
+ .irq = irq,
+ .set_next_event = arch_timer_mmio_set_next_event,
+ .set_state_oneshot_stopped = arch_timer_mmio_shutdown,
+ .set_state_shutdown = arch_timer_mmio_shutdown,
+ };
+
+ at->evt.set_state_shutdown(&at->evt);
+
+ clockevents_config_and_register(&at->evt, at->rate, 0xf,
+ (unsigned long)CLOCKSOURCE_MASK(56));
+
+ enable_irq(at->evt.irq);
+
+ at->cs = (struct clocksource) {
+ .name = "arch_mmio_counter",
+ .rating = 300,
+ .read = arch_mmio_counter_read,
+ .mask = CLOCKSOURCE_MASK(56),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ };
+
+ clocksource_register_hz(&at->cs, at->rate);
+}
+
+static int arch_timer_mmio_frame_register(struct platform_device *pdev,
+ struct arch_timer_mem_frame *frame)
+{
+ struct arch_timer *at = platform_get_drvdata(pdev);
+ struct device_node *np = pdev->dev.of_node;
+ int ret, irq;
+ u32 rate;
+
+ if (!devm_request_mem_region(&pdev->dev, frame->cntbase, frame->size,
+ "arch_mem_timer"))
+ return -EBUSY;
+
+ at->base = devm_ioremap(&pdev->dev, frame->cntbase, frame->size);
+ if (!at->base) {
+ dev_err(&pdev->dev, "Can't map frame's registers\n");
+ return -ENXIO;
+ }
+
+ /*
+ * Allow "clock-frequency" to override the probed rate. If neither
+ * lead to something useful, use the CPU timer frequency as the
+ * fallback. The nice thing about that last point is that we woudn't
+ * made it here if we didn't have a valid frequency.
+ */
+ rate = readl_relaxed(at->base + CNTFRQ);
+
+ if (!np || of_property_read_u32(np, "clock-frequency", &at->rate))
+ at->rate = rate;
+
+ if (!at->rate)
+ at->rate = arch_timer_get_rate();
+
+ irq = at->access == VIRT_ACCESS ? frame->virt_irq : frame->phys_irq;
+ ret = devm_request_irq(&pdev->dev, irq, arch_timer_mmio_handler,
+ IRQF_TIMER | IRQF_NO_AUTOEN, "arch_mem_timer",
+ &at->evt);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request mem timer irq\n");
+ return ret;
+ }
+
+ /* Afer this point, we're not allowed to fail anymore */
+ arch_timer_mmio_setup(at, irq);
+ return 0;
+}
+
+static int of_populate_gt_block(struct platform_device *pdev,
+ struct arch_timer *at)
+{
+ struct resource res;
+
+ if (of_address_to_resource(pdev->dev.of_node, 0, &res))
+ return -EINVAL;
+
+ at->gt_block->cntctlbase = res.start;
+ at->gt_block->size = resource_size(&res);
+
+ for_each_available_child_of_node_scoped(pdev->dev.of_node, frame_node) {
+ struct arch_timer_mem_frame *frame;
+ u32 n;
+
+ if (of_property_read_u32(frame_node, "frame-number", &n)) {
+ dev_err(&pdev->dev, FW_BUG "Missing frame-number\n");
+ return -EINVAL;
+ }
+ if (n >= ARCH_TIMER_MEM_MAX_FRAMES) {
+ dev_err(&pdev->dev,
+ FW_BUG "Wrong frame-number, only 0-%u are permitted\n",
+ ARCH_TIMER_MEM_MAX_FRAMES - 1);
+ return -EINVAL;
+ }
+
+ frame = &at->gt_block->frame[n];
+
+ if (frame->valid) {
+ dev_err(&pdev->dev, FW_BUG "Duplicated frame-number\n");
+ return -EINVAL;
+ }
+
+ if (of_address_to_resource(frame_node, 0, &res))
+ return -EINVAL;
+
+ frame->cntbase = res.start;
+ frame->size = resource_size(&res);
+
+ frame->phys_irq = irq_of_parse_and_map(frame_node, 0);
+ frame->virt_irq = irq_of_parse_and_map(frame_node, 1);
+
+ frame->valid = true;
+ }
+
+ return 0;
+}
+
+static int arch_timer_mmio_probe(struct platform_device *pdev)
+{
+ struct arch_timer_mem_frame *frame;
+ struct arch_timer *at;
+ struct device_node *np;
+ int ret;
+
+ np = pdev->dev.of_node;
+
+ at = devm_kmalloc(&pdev->dev, sizeof(*at), GFP_KERNEL | __GFP_ZERO);
+ if (!at)
+ return -ENOMEM;
+
+ if (np) {
+ at->gt_block = devm_kmalloc(&pdev->dev, sizeof(*at->gt_block),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!at->gt_block)
+ return -ENOMEM;
+ ret = of_populate_gt_block(pdev, at);
+ if (ret)
+ return ret;
+ } else {
+ at->gt_block = dev_get_platdata(&pdev->dev);
+ }
+
+ platform_set_drvdata(pdev, at);
+
+ frame = find_best_frame(pdev);
+ if (!frame) {
+ dev_err(&pdev->dev,
+ "Unable to find a suitable frame in timer @ %pa\n",
+ &at->gt_block->cntctlbase);
+ return -EINVAL;
+ }
+
+ ret = arch_timer_mmio_frame_register(pdev, frame);
+ if (!ret)
+ dev_info(&pdev->dev,
+ "mmio timer running at %lu.%02luMHz (%s)\n",
+ (unsigned long)at->rate / 1000000,
+ (unsigned long)(at->rate / 10000) % 100,
+ at->access == VIRT_ACCESS ? "virt" : "phys");
+
+ return ret;
+}
+
+static const struct of_device_id arch_timer_mmio_of_table[] = {
+ { .compatible = "arm,armv7-timer-mem", },
+ {}
+};
+
+static struct platform_driver arch_timer_mmio_drv = {
+ .driver = {
+ .name = "arch-timer-mmio",
+ .of_match_table = arch_timer_mmio_of_table,
+ },
+ .probe = arch_timer_mmio_probe,
+};
+builtin_platform_driver(arch_timer_mmio_drv);
+
+static struct platform_driver arch_timer_mmio_acpi_drv = {
+ .driver = {
+ .name = "gtdt-arm-mmio-timer",
+ },
+ .probe = arch_timer_mmio_probe,
+};
+builtin_platform_driver(arch_timer_mmio_acpi_drv);
diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c
index 2d86bbc2764a..5e3d6bb7e437 100644
--- a/drivers/clocksource/arm_global_timer.c
+++ b/drivers/clocksource/arm_global_timer.c
@@ -263,14 +263,13 @@ static void __init gt_delay_timer_init(void)
register_current_timer_delay(&gt_delay_timer);
}
-static int __init gt_clocksource_init(void)
+static int __init gt_clocksource_init(unsigned int psv)
{
writel(0, gt_base + GT_CONTROL);
writel(0, gt_base + GT_COUNTER0);
writel(0, gt_base + GT_COUNTER1);
/* set prescaler and enable timer on all the cores */
- writel(FIELD_PREP(GT_CONTROL_PRESCALER_MASK,
- CONFIG_ARM_GT_INITIAL_PRESCALER_VAL - 1) |
+ writel(FIELD_PREP(GT_CONTROL_PRESCALER_MASK, psv - 1) |
GT_CONTROL_TIMER_ENABLE, gt_base + GT_CONTROL);
#ifdef CONFIG_CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
@@ -338,11 +337,45 @@ static int gt_clk_rate_change_cb(struct notifier_block *nb,
return NOTIFY_DONE;
}
+struct gt_prescaler_config {
+ const char *compatible;
+ unsigned long prescaler;
+};
+
+static const struct gt_prescaler_config gt_prescaler_configs[] = {
+ /*
+ * On am43 the global timer clock is a child of the clock used for CPU
+ * OPPs, so the initial prescaler has to be compatible with all OPPs
+ * which are 300, 600, 720, 800 and 1000 with a fixed divider of 2, this
+ * gives us a GCD of 10. Initial frequency is 1000, so the prescaler is
+ * 50.
+ */
+ { .compatible = "ti,am43", .prescaler = 50 },
+ { .compatible = "xlnx,zynq-7000", .prescaler = 2 },
+ { .compatible = NULL }
+};
+
+static unsigned long gt_get_initial_prescaler_value(struct device_node *np)
+{
+ const struct gt_prescaler_config *config;
+
+ if (CONFIG_ARM_GT_INITIAL_PRESCALER_VAL != 0)
+ return CONFIG_ARM_GT_INITIAL_PRESCALER_VAL;
+
+ for (config = gt_prescaler_configs; config->compatible; config++) {
+ if (of_machine_is_compatible(config->compatible))
+ return config->prescaler;
+ }
+
+ return 1;
+}
+
static int __init global_timer_of_register(struct device_node *np)
{
struct clk *gt_clk;
static unsigned long gt_clk_rate;
int err;
+ unsigned long psv;
/*
* In A9 r2p0 the comparators for each processor with the global timer
@@ -378,8 +411,9 @@ static int __init global_timer_of_register(struct device_node *np)
goto out_unmap;
}
+ psv = gt_get_initial_prescaler_value(np);
gt_clk_rate = clk_get_rate(gt_clk);
- gt_target_rate = gt_clk_rate / CONFIG_ARM_GT_INITIAL_PRESCALER_VAL;
+ gt_target_rate = gt_clk_rate / psv;
gt_clk_rate_change_nb.notifier_call =
gt_clk_rate_change_cb;
err = clk_notifier_register(gt_clk, &gt_clk_rate_change_nb);
@@ -404,7 +438,7 @@ static int __init global_timer_of_register(struct device_node *np)
}
/* Register and immediately configure the timer on the boot CPU */
- err = gt_clocksource_init();
+ err = gt_clocksource_init(psv);
if (err)
goto out_irq;
diff --git a/drivers/clocksource/clps711x-timer.c b/drivers/clocksource/clps711x-timer.c
index e95fdc49c226..bbceb0289d45 100644
--- a/drivers/clocksource/clps711x-timer.c
+++ b/drivers/clocksource/clps711x-timer.c
@@ -78,24 +78,33 @@ static int __init clps711x_timer_init(struct device_node *np)
unsigned int irq = irq_of_parse_and_map(np, 0);
struct clk *clock = of_clk_get(np, 0);
void __iomem *base = of_iomap(np, 0);
+ int ret = 0;
if (!base)
return -ENOMEM;
- if (!irq)
- return -EINVAL;
- if (IS_ERR(clock))
- return PTR_ERR(clock);
+ if (!irq) {
+ ret = -EINVAL;
+ goto unmap_io;
+ }
+ if (IS_ERR(clock)) {
+ ret = PTR_ERR(clock);
+ goto unmap_io;
+ }
switch (of_alias_get_id(np, "timer")) {
case CLPS711X_CLKSRC_CLOCKSOURCE:
clps711x_clksrc_init(clock, base);
break;
case CLPS711X_CLKSRC_CLOCKEVENT:
- return _clps711x_clkevt_init(clock, base, irq);
+ ret = _clps711x_clkevt_init(clock, base, irq);
+ break;
default:
- return -EINVAL;
+ ret = -EINVAL;
+ break;
}
- return 0;
+unmap_io:
+ iounmap(base);
+ return ret;
}
TIMER_OF_DECLARE(clps711x, "cirrus,ep7209-timer", clps711x_timer_init);
diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c
index 2edc13ca184e..10356d4ec55c 100644
--- a/drivers/clocksource/hyperv_timer.c
+++ b/drivers/clocksource/hyperv_timer.c
@@ -549,14 +549,22 @@ static void __init hv_init_tsc_clocksource(void)
union hv_reference_tsc_msr tsc_msr;
/*
+ * When running as a guest partition:
+ *
* If Hyper-V offers TSC_INVARIANT, then the virtualized TSC correctly
* handles frequency and offset changes due to live migration,
* pause/resume, and other VM management operations. So lower the
* Hyper-V Reference TSC rating, causing the generic TSC to be used.
* TSC_INVARIANT is not offered on ARM64, so the Hyper-V Reference
* TSC will be preferred over the virtualized ARM64 arch counter.
+ *
+ * When running as the root partition:
+ *
+ * There is no HV_ACCESS_TSC_INVARIANT feature. Always lower the rating
+ * of the Hyper-V Reference TSC.
*/
- if (ms_hyperv.features & HV_ACCESS_TSC_INVARIANT) {
+ if ((ms_hyperv.features & HV_ACCESS_TSC_INVARIANT) ||
+ hv_root_partition()) {
hyperv_cs_tsc.rating = 250;
hyperv_cs_msr.rating = 245;
}
diff --git a/drivers/clocksource/ingenic-sysost.c b/drivers/clocksource/ingenic-sysost.c
index cb6fc2f152d4..e79cfb0b8e05 100644
--- a/drivers/clocksource/ingenic-sysost.c
+++ b/drivers/clocksource/ingenic-sysost.c
@@ -127,18 +127,23 @@ static u8 ingenic_ost_get_prescale(unsigned long rate, unsigned long req_rate)
return 2; /* /16 divider */
}
-static long ingenic_ost_round_rate(struct clk_hw *hw, unsigned long req_rate,
- unsigned long *parent_rate)
+static int ingenic_ost_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- unsigned long rate = *parent_rate;
+ unsigned long rate = req->best_parent_rate;
u8 prescale;
- if (req_rate > rate)
- return rate;
+ if (req->rate > rate) {
+ req->rate = rate;
- prescale = ingenic_ost_get_prescale(rate, req_rate);
+ return 0;
+ }
+
+ prescale = ingenic_ost_get_prescale(rate, req->rate);
- return rate >> (prescale * 2);
+ req->rate = rate >> (prescale * 2);
+
+ return 0;
}
static int ingenic_ost_percpu_timer_set_rate(struct clk_hw *hw, unsigned long req_rate,
@@ -175,14 +180,14 @@ static int ingenic_ost_global_timer_set_rate(struct clk_hw *hw, unsigned long re
static const struct clk_ops ingenic_ost_percpu_timer_ops = {
.recalc_rate = ingenic_ost_percpu_timer_recalc_rate,
- .round_rate = ingenic_ost_round_rate,
- .set_rate = ingenic_ost_percpu_timer_set_rate,
+ .determine_rate = ingenic_ost_determine_rate,
+ .set_rate = ingenic_ost_percpu_timer_set_rate,
};
static const struct clk_ops ingenic_ost_global_timer_ops = {
.recalc_rate = ingenic_ost_global_timer_recalc_rate,
- .round_rate = ingenic_ost_round_rate,
- .set_rate = ingenic_ost_global_timer_set_rate,
+ .determine_rate = ingenic_ost_determine_rate,
+ .set_rate = ingenic_ost_global_timer_set_rate,
};
static const char * const ingenic_ost_clk_parents[] = { "ext" };
diff --git a/drivers/clocksource/scx200_hrt.c b/drivers/clocksource/scx200_hrt.c
index c3536fffbe9a..5a99801a1657 100644
--- a/drivers/clocksource/scx200_hrt.c
+++ b/drivers/clocksource/scx200_hrt.c
@@ -52,6 +52,7 @@ static struct clocksource cs_hrt = {
.mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
/* mult, shift are set based on mhz27 flag */
+ .owner = THIS_MODULE,
};
static int __init init_hrt_clocksource(void)
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index b72b36e0abed..385eb94bbe7c 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -578,37 +578,74 @@ static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int sh_cmt_start(struct sh_cmt_channel *ch, unsigned long flag)
+static int sh_cmt_start_clocksource(struct sh_cmt_channel *ch)
{
int ret = 0;
unsigned long flags;
- if (flag & FLAG_CLOCKSOURCE)
- pm_runtime_get_sync(&ch->cmt->pdev->dev);
+ pm_runtime_get_sync(&ch->cmt->pdev->dev);
raw_spin_lock_irqsave(&ch->lock, flags);
- if (!(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) {
- if (flag & FLAG_CLOCKEVENT)
- pm_runtime_get_sync(&ch->cmt->pdev->dev);
+ if (!(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
ret = sh_cmt_enable(ch);
- }
if (ret)
goto out;
- ch->flags |= flag;
+
+ ch->flags |= FLAG_CLOCKSOURCE;
/* setup timeout if no clockevent */
- if (ch->cmt->num_channels == 1 &&
- flag == FLAG_CLOCKSOURCE && (!(ch->flags & FLAG_CLOCKEVENT)))
+ if (ch->cmt->num_channels == 1 && !(ch->flags & FLAG_CLOCKEVENT))
__sh_cmt_set_next(ch, ch->max_match_value);
+out:
+ raw_spin_unlock_irqrestore(&ch->lock, flags);
+
+ return ret;
+}
+
+static void sh_cmt_stop_clocksource(struct sh_cmt_channel *ch)
+{
+ unsigned long flags;
+ unsigned long f;
+
+ raw_spin_lock_irqsave(&ch->lock, flags);
+
+ f = ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE);
+
+ ch->flags &= ~FLAG_CLOCKSOURCE;
+
+ if (f && !(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
+ sh_cmt_disable(ch);
+
+ raw_spin_unlock_irqrestore(&ch->lock, flags);
+
+ pm_runtime_put(&ch->cmt->pdev->dev);
+}
+
+static int sh_cmt_start_clockevent(struct sh_cmt_channel *ch)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&ch->lock, flags);
+
+ if (!(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) {
+ pm_runtime_get_sync(&ch->cmt->pdev->dev);
+ ret = sh_cmt_enable(ch);
+ }
+
+ if (ret)
+ goto out;
+
+ ch->flags |= FLAG_CLOCKEVENT;
out:
raw_spin_unlock_irqrestore(&ch->lock, flags);
return ret;
}
-static void sh_cmt_stop(struct sh_cmt_channel *ch, unsigned long flag)
+static void sh_cmt_stop_clockevent(struct sh_cmt_channel *ch)
{
unsigned long flags;
unsigned long f;
@@ -616,22 +653,19 @@ static void sh_cmt_stop(struct sh_cmt_channel *ch, unsigned long flag)
raw_spin_lock_irqsave(&ch->lock, flags);
f = ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE);
- ch->flags &= ~flag;
+
+ ch->flags &= ~FLAG_CLOCKEVENT;
if (f && !(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) {
sh_cmt_disable(ch);
- if (flag & FLAG_CLOCKEVENT)
- pm_runtime_put(&ch->cmt->pdev->dev);
+ pm_runtime_put(&ch->cmt->pdev->dev);
}
/* adjust the timeout to maximum if only clocksource left */
- if ((flag == FLAG_CLOCKEVENT) && (ch->flags & FLAG_CLOCKSOURCE))
+ if (ch->flags & FLAG_CLOCKSOURCE)
__sh_cmt_set_next(ch, ch->max_match_value);
raw_spin_unlock_irqrestore(&ch->lock, flags);
-
- if (flag & FLAG_CLOCKSOURCE)
- pm_runtime_put(&ch->cmt->pdev->dev);
}
static struct sh_cmt_channel *cs_to_sh_cmt(struct clocksource *cs)
@@ -672,7 +706,7 @@ static int sh_cmt_clocksource_enable(struct clocksource *cs)
ch->total_cycles = 0;
- ret = sh_cmt_start(ch, FLAG_CLOCKSOURCE);
+ ret = sh_cmt_start_clocksource(ch);
if (!ret)
ch->cs_enabled = true;
@@ -685,7 +719,7 @@ static void sh_cmt_clocksource_disable(struct clocksource *cs)
WARN_ON(!ch->cs_enabled);
- sh_cmt_stop(ch, FLAG_CLOCKSOURCE);
+ sh_cmt_stop_clocksource(ch);
ch->cs_enabled = false;
}
@@ -696,7 +730,7 @@ static void sh_cmt_clocksource_suspend(struct clocksource *cs)
if (!ch->cs_enabled)
return;
- sh_cmt_stop(ch, FLAG_CLOCKSOURCE);
+ sh_cmt_stop_clocksource(ch);
dev_pm_genpd_suspend(&ch->cmt->pdev->dev);
}
@@ -708,7 +742,7 @@ static void sh_cmt_clocksource_resume(struct clocksource *cs)
return;
dev_pm_genpd_resume(&ch->cmt->pdev->dev);
- sh_cmt_start(ch, FLAG_CLOCKSOURCE);
+ sh_cmt_start_clocksource(ch);
}
static int sh_cmt_register_clocksource(struct sh_cmt_channel *ch,
@@ -740,7 +774,7 @@ static struct sh_cmt_channel *ced_to_sh_cmt(struct clock_event_device *ced)
static void sh_cmt_clock_event_start(struct sh_cmt_channel *ch, int periodic)
{
- sh_cmt_start(ch, FLAG_CLOCKEVENT);
+ sh_cmt_start_clockevent(ch);
if (periodic)
sh_cmt_set_next(ch, ((ch->cmt->rate + HZ/2) / HZ) - 1);
@@ -752,7 +786,7 @@ static int sh_cmt_clock_event_shutdown(struct clock_event_device *ced)
{
struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
- sh_cmt_stop(ch, FLAG_CLOCKEVENT);
+ sh_cmt_stop_clockevent(ch);
return 0;
}
@@ -763,7 +797,7 @@ static int sh_cmt_clock_event_set_state(struct clock_event_device *ced,
/* deal with old setting first */
if (clockevent_state_oneshot(ced) || clockevent_state_periodic(ced))
- sh_cmt_stop(ch, FLAG_CLOCKEVENT);
+ sh_cmt_stop_clockevent(ch);
dev_info(&ch->cmt->pdev->dev, "ch%u: used for %s clock events\n",
ch->index, periodic ? "periodic" : "oneshot");
diff --git a/drivers/clocksource/timer-cs5535.c b/drivers/clocksource/timer-cs5535.c
index d47acfe848ae..8af666c39890 100644
--- a/drivers/clocksource/timer-cs5535.c
+++ b/drivers/clocksource/timer-cs5535.c
@@ -101,6 +101,7 @@ static struct clock_event_device cs5535_clockevent = {
.tick_resume = mfgpt_shutdown,
.set_next_event = mfgpt_next_event,
.rating = 250,
+ .owner = THIS_MODULE,
};
static irqreturn_t mfgpt_tick(int irq, void *dev_id)
diff --git a/drivers/clocksource/timer-econet-en751221.c b/drivers/clocksource/timer-econet-en751221.c
index 3b449fdaafee..4008076b1a21 100644
--- a/drivers/clocksource/timer-econet-en751221.c
+++ b/drivers/clocksource/timer-econet-en751221.c
@@ -146,7 +146,7 @@ static int __init cevt_init(struct device_node *np)
for_each_possible_cpu(i) {
struct clock_event_device *cd = &per_cpu(econet_timer_pcpu, i);
- cd->rating = 310,
+ cd->rating = 310;
cd->features = CLOCK_EVT_FEAT_ONESHOT |
CLOCK_EVT_FEAT_C3STOP |
CLOCK_EVT_FEAT_PERCPU;
diff --git a/drivers/clocksource/timer-nxp-pit.c b/drivers/clocksource/timer-nxp-pit.c
new file mode 100644
index 000000000000..2d0a3554b6bf
--- /dev/null
+++ b/drivers/clocksource/timer-nxp-pit.c
@@ -0,0 +1,382 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright 2012-2013 Freescale Semiconductor, Inc.
+ * Copyright 2018,2021-2025 NXP
+ */
+#include <linux/interrupt.h>
+#include <linux/clockchips.h>
+#include <linux/cpuhotplug.h>
+#include <linux/clk.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/sched_clock.h>
+#include <linux/platform_device.h>
+
+/*
+ * Each pit takes 0x10 Bytes register space
+ */
+#define PIT0_OFFSET 0x100
+#define PIT_CH(n) (PIT0_OFFSET + 0x10 * (n))
+
+#define PITMCR(__base) (__base)
+
+#define PITMCR_FRZ BIT(0)
+#define PITMCR_MDIS BIT(1)
+
+#define PITLDVAL(__base) (__base)
+#define PITTCTRL(__base) ((__base) + 0x08)
+
+#define PITCVAL_OFFSET 0x04
+#define PITCVAL(__base) ((__base) + 0x04)
+
+#define PITTCTRL_TEN BIT(0)
+#define PITTCTRL_TIE BIT(1)
+
+#define PITTFLG(__base) ((__base) + 0x0c)
+
+#define PITTFLG_TIF BIT(0)
+
+struct pit_timer {
+ void __iomem *clksrc_base;
+ void __iomem *clkevt_base;
+ struct clock_event_device ced;
+ struct clocksource cs;
+ int rate;
+};
+
+struct pit_timer_data {
+ int max_pit_instances;
+};
+
+static DEFINE_PER_CPU(struct pit_timer *, pit_timers);
+
+/*
+ * Global structure for multiple PITs initialization
+ */
+static int pit_instances;
+static int max_pit_instances = 1;
+
+static void __iomem *sched_clock_base;
+
+static inline struct pit_timer *ced_to_pit(struct clock_event_device *ced)
+{
+ return container_of(ced, struct pit_timer, ced);
+}
+
+static inline struct pit_timer *cs_to_pit(struct clocksource *cs)
+{
+ return container_of(cs, struct pit_timer, cs);
+}
+
+static inline void pit_module_enable(void __iomem *base)
+{
+ writel(0, PITMCR(base));
+}
+
+static inline void pit_module_disable(void __iomem *base)
+{
+ writel(PITMCR_MDIS, PITMCR(base));
+}
+
+static inline void pit_timer_enable(void __iomem *base, bool tie)
+{
+ u32 val = PITTCTRL_TEN | (tie ? PITTCTRL_TIE : 0);
+
+ writel(val, PITTCTRL(base));
+}
+
+static inline void pit_timer_disable(void __iomem *base)
+{
+ writel(0, PITTCTRL(base));
+}
+
+static inline void pit_timer_set_counter(void __iomem *base, unsigned int cnt)
+{
+ writel(cnt, PITLDVAL(base));
+}
+
+static inline void pit_timer_irqack(struct pit_timer *pit)
+{
+ writel(PITTFLG_TIF, PITTFLG(pit->clkevt_base));
+}
+
+static u64 notrace pit_read_sched_clock(void)
+{
+ return ~readl(sched_clock_base);
+}
+
+static u64 pit_timer_clocksource_read(struct clocksource *cs)
+{
+ struct pit_timer *pit = cs_to_pit(cs);
+
+ return (u64)~readl(PITCVAL(pit->clksrc_base));
+}
+
+static int pit_clocksource_init(struct pit_timer *pit, const char *name,
+ void __iomem *base, unsigned long rate)
+{
+ /*
+ * The channels 0 and 1 can be chained to build a 64-bit
+ * timer. Let's use the channel 2 as a clocksource and leave
+ * the channels 0 and 1 unused for anyone else who needs them
+ */
+ pit->clksrc_base = base + PIT_CH(2);
+ pit->cs.name = name;
+ pit->cs.rating = 300;
+ pit->cs.read = pit_timer_clocksource_read;
+ pit->cs.mask = CLOCKSOURCE_MASK(32);
+ pit->cs.flags = CLOCK_SOURCE_IS_CONTINUOUS;
+
+ /* set the max load value and start the clock source counter */
+ pit_timer_disable(pit->clksrc_base);
+ pit_timer_set_counter(pit->clksrc_base, ~0);
+ pit_timer_enable(pit->clksrc_base, 0);
+
+ sched_clock_base = pit->clksrc_base + PITCVAL_OFFSET;
+ sched_clock_register(pit_read_sched_clock, 32, rate);
+
+ return clocksource_register_hz(&pit->cs, rate);
+}
+
+static int pit_set_next_event(unsigned long delta, struct clock_event_device *ced)
+{
+ struct pit_timer *pit = ced_to_pit(ced);
+
+ /*
+ * set a new value to PITLDVAL register will not restart the timer,
+ * to abort the current cycle and start a timer period with the new
+ * value, the timer must be disabled and enabled again.
+ * and the PITLAVAL should be set to delta minus one according to pit
+ * hardware requirement.
+ */
+ pit_timer_disable(pit->clkevt_base);
+ pit_timer_set_counter(pit->clkevt_base, delta - 1);
+ pit_timer_enable(pit->clkevt_base, true);
+
+ return 0;
+}
+
+static int pit_shutdown(struct clock_event_device *ced)
+{
+ struct pit_timer *pit = ced_to_pit(ced);
+
+ pit_timer_disable(pit->clkevt_base);
+
+ return 0;
+}
+
+static int pit_set_periodic(struct clock_event_device *ced)
+{
+ struct pit_timer *pit = ced_to_pit(ced);
+
+ pit_set_next_event(pit->rate / HZ, ced);
+
+ return 0;
+}
+
+static irqreturn_t pit_timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *ced = dev_id;
+ struct pit_timer *pit = ced_to_pit(ced);
+
+ pit_timer_irqack(pit);
+
+ /*
+ * pit hardware doesn't support oneshot, it will generate an interrupt
+ * and reload the counter value from PITLDVAL when PITCVAL reach zero,
+ * and start the counter again. So software need to disable the timer
+ * to stop the counter loop in ONESHOT mode.
+ */
+ if (likely(clockevent_state_oneshot(ced)))
+ pit_timer_disable(pit->clkevt_base);
+
+ ced->event_handler(ced);
+
+ return IRQ_HANDLED;
+}
+
+static int pit_clockevent_per_cpu_init(struct pit_timer *pit, const char *name,
+ void __iomem *base, unsigned long rate,
+ int irq, unsigned int cpu)
+{
+ int ret;
+
+ /*
+ * The channels 0 and 1 can be chained to build a 64-bit
+ * timer. Let's use the channel 3 as a clockevent and leave
+ * the channels 0 and 1 unused for anyone else who needs them
+ */
+ pit->clkevt_base = base + PIT_CH(3);
+ pit->rate = rate;
+
+ pit_timer_disable(pit->clkevt_base);
+
+ pit_timer_irqack(pit);
+
+ ret = request_irq(irq, pit_timer_interrupt, IRQF_TIMER | IRQF_NOBALANCING,
+ name, &pit->ced);
+ if (ret)
+ return ret;
+
+ pit->ced.cpumask = cpumask_of(cpu);
+ pit->ced.irq = irq;
+
+ pit->ced.name = name;
+ pit->ced.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+ pit->ced.set_state_shutdown = pit_shutdown;
+ pit->ced.set_state_periodic = pit_set_periodic;
+ pit->ced.set_next_event = pit_set_next_event;
+ pit->ced.rating = 300;
+
+ per_cpu(pit_timers, cpu) = pit;
+
+ return 0;
+}
+
+static void pit_clockevent_per_cpu_exit(struct pit_timer *pit, unsigned int cpu)
+{
+ pit_timer_disable(pit->clkevt_base);
+ free_irq(pit->ced.irq, &pit->ced);
+ per_cpu(pit_timers, cpu) = NULL;
+}
+
+static int pit_clockevent_starting_cpu(unsigned int cpu)
+{
+ struct pit_timer *pit = per_cpu(pit_timers, cpu);
+ int ret;
+
+ if (!pit)
+ return 0;
+
+ ret = irq_force_affinity(pit->ced.irq, cpumask_of(cpu));
+ if (ret) {
+ pit_clockevent_per_cpu_exit(pit, cpu);
+ return ret;
+ }
+
+ /*
+ * The value for the LDVAL register trigger is calculated as:
+ * LDVAL trigger = (period / clock period) - 1
+ * The pit is a 32-bit down count timer, when the counter value
+ * reaches 0, it will generate an interrupt, thus the minimal
+ * LDVAL trigger value is 1. And then the min_delta is
+ * minimal LDVAL trigger value + 1, and the max_delta is full 32-bit.
+ */
+ clockevents_config_and_register(&pit->ced, pit->rate, 2, 0xffffffff);
+
+ return 0;
+}
+
+static int pit_timer_init(struct device_node *np)
+{
+ struct pit_timer *pit;
+ struct clk *pit_clk;
+ void __iomem *timer_base;
+ const char *name = of_node_full_name(np);
+ unsigned long clk_rate;
+ int irq, ret;
+
+ pit = kzalloc(sizeof(*pit), GFP_KERNEL);
+ if (!pit)
+ return -ENOMEM;
+
+ ret = -ENXIO;
+ timer_base = of_iomap(np, 0);
+ if (!timer_base) {
+ pr_err("Failed to iomap\n");
+ goto out_kfree;
+ }
+
+ ret = -EINVAL;
+ irq = irq_of_parse_and_map(np, 0);
+ if (irq <= 0) {
+ pr_err("Failed to irq_of_parse_and_map\n");
+ goto out_iounmap;
+ }
+
+ pit_clk = of_clk_get(np, 0);
+ if (IS_ERR(pit_clk)) {
+ ret = PTR_ERR(pit_clk);
+ goto out_irq_dispose_mapping;
+ }
+
+ ret = clk_prepare_enable(pit_clk);
+ if (ret)
+ goto out_clk_put;
+
+ clk_rate = clk_get_rate(pit_clk);
+
+ pit_module_disable(timer_base);
+
+ ret = pit_clocksource_init(pit, name, timer_base, clk_rate);
+ if (ret) {
+ pr_err("Failed to initialize clocksource '%pOF'\n", np);
+ goto out_pit_module_disable;
+ }
+
+ ret = pit_clockevent_per_cpu_init(pit, name, timer_base, clk_rate, irq, pit_instances);
+ if (ret) {
+ pr_err("Failed to initialize clockevent '%pOF'\n", np);
+ goto out_pit_clocksource_unregister;
+ }
+
+ /* enable the pit module */
+ pit_module_enable(timer_base);
+
+ pit_instances++;
+
+ if (pit_instances == max_pit_instances) {
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "PIT timer:starting",
+ pit_clockevent_starting_cpu, NULL);
+ if (ret < 0)
+ goto out_pit_clocksource_unregister;
+ }
+
+ return 0;
+
+out_pit_clocksource_unregister:
+ clocksource_unregister(&pit->cs);
+out_pit_module_disable:
+ pit_module_disable(timer_base);
+ clk_disable_unprepare(pit_clk);
+out_clk_put:
+ clk_put(pit_clk);
+out_irq_dispose_mapping:
+ irq_dispose_mapping(irq);
+out_iounmap:
+ iounmap(timer_base);
+out_kfree:
+ kfree(pit);
+
+ return ret;
+}
+
+static int pit_timer_probe(struct platform_device *pdev)
+{
+ const struct pit_timer_data *pit_timer_data;
+
+ pit_timer_data = of_device_get_match_data(&pdev->dev);
+ if (pit_timer_data)
+ max_pit_instances = pit_timer_data->max_pit_instances;
+
+ return pit_timer_init(pdev->dev.of_node);
+}
+
+static struct pit_timer_data s32g2_data = { .max_pit_instances = 2 };
+
+static const struct of_device_id pit_timer_of_match[] = {
+ { .compatible = "nxp,s32g2-pit", .data = &s32g2_data },
+ { }
+};
+MODULE_DEVICE_TABLE(of, pit_timer_of_match);
+
+static struct platform_driver nxp_pit_driver = {
+ .driver = {
+ .name = "nxp-pit",
+ .of_match_table = pit_timer_of_match,
+ },
+ .probe = pit_timer_probe,
+};
+module_platform_driver(nxp_pit_driver);
+
+TIMER_OF_DECLARE(vf610, "fsl,vf610-pit", pit_timer_init);
diff --git a/drivers/clocksource/timer-nxp-stm.c b/drivers/clocksource/timer-nxp-stm.c
index d7ccf9001729..bbc40623728f 100644
--- a/drivers/clocksource/timer-nxp-stm.c
+++ b/drivers/clocksource/timer-nxp-stm.c
@@ -201,6 +201,7 @@ static int __init nxp_stm_clocksource_init(struct device *dev, struct stm_timer
stm_timer->cs.resume = nxp_stm_clocksource_resume;
stm_timer->cs.mask = CLOCKSOURCE_MASK(32);
stm_timer->cs.flags = CLOCK_SOURCE_IS_CONTINUOUS;
+ stm_timer->cs.owner = THIS_MODULE;
ret = clocksource_register_hz(&stm_timer->cs, stm_timer->rate);
if (ret)
@@ -314,6 +315,7 @@ static int __init nxp_stm_clockevent_per_cpu_init(struct device *dev, struct stm
stm_timer->ced.cpumask = cpumask_of(cpu);
stm_timer->ced.rating = 460;
stm_timer->ced.irq = irq;
+ stm_timer->ced.owner = THIS_MODULE;
per_cpu(stm_timers, cpu) = stm_timer;
diff --git a/drivers/clocksource/timer-rtl-otto.c b/drivers/clocksource/timer-rtl-otto.c
index 8a3068b36e75..6113d2fdd4de 100644
--- a/drivers/clocksource/timer-rtl-otto.c
+++ b/drivers/clocksource/timer-rtl-otto.c
@@ -38,14 +38,13 @@
#define RTTM_BIT_COUNT 28
#define RTTM_MIN_DELTA 8
#define RTTM_MAX_DELTA CLOCKSOURCE_MASK(28)
+#define RTTM_MAX_DIVISOR GENMASK(15, 0)
/*
- * Timers are derived from the LXB clock frequency. Usually this is a fixed
- * multiple of the 25 MHz oscillator. The 930X SOC is an exception from that.
- * Its LXB clock has only dividers and uses the switch PLL of 2.45 GHz as its
- * base. The only meaningful frequencies we can achieve from that are 175.000
- * MHz and 153.125 MHz. The greatest common divisor of all explained possible
- * speeds is 3125000. Pin the timers to this 3.125 MHz reference frequency.
+ * Timers are derived from the lexra bus (LXB) clock frequency. This is 175 MHz
+ * on RTL930x and 200 MHz on the other platforms. With 3.125 MHz choose a common
+ * divisor to have enough range and detail. This provides comparability between
+ * the different platforms.
*/
#define RTTM_TICKS_PER_SEC 3125000
@@ -55,11 +54,6 @@ struct rttm_cs {
};
/* Simple internal register functions */
-static inline void rttm_set_counter(void __iomem *base, unsigned int counter)
-{
- iowrite32(counter, base + RTTM_CNT);
-}
-
static inline unsigned int rttm_get_counter(void __iomem *base)
{
return ioread32(base + RTTM_CNT);
@@ -112,6 +106,22 @@ static irqreturn_t rttm_timer_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static void rttm_bounce_timer(void __iomem *base, u32 mode)
+{
+ /*
+ * When a running timer has less than ~5us left, a stop/start sequence
+ * might fail. While the details are unknown the most evident effect is
+ * that the subsequent interrupt will not be fired.
+ *
+ * As a workaround issue an intermediate restart with a very slow
+ * frequency of ~3kHz keeping the target counter (>=8). So the follow
+ * up restart will always be issued outside the critical window.
+ */
+
+ rttm_disable_timer(base);
+ rttm_enable_timer(base, mode, RTTM_MAX_DIVISOR);
+}
+
static void rttm_stop_timer(void __iomem *base)
{
rttm_disable_timer(base);
@@ -120,7 +130,6 @@ static void rttm_stop_timer(void __iomem *base)
static void rttm_start_timer(struct timer_of *to, u32 mode)
{
- rttm_set_counter(to->of_base.base, 0);
rttm_enable_timer(to->of_base.base, mode, to->of_clk.rate / RTTM_TICKS_PER_SEC);
}
@@ -129,7 +138,8 @@ static int rttm_next_event(unsigned long delta, struct clock_event_device *clkev
struct timer_of *to = to_timer_of(clkevt);
RTTM_DEBUG(to->of_base.base);
- rttm_stop_timer(to->of_base.base);
+ rttm_bounce_timer(to->of_base.base, RTTM_CTRL_COUNTER);
+ rttm_disable_timer(to->of_base.base);
rttm_set_period(to->of_base.base, delta);
rttm_start_timer(to, RTTM_CTRL_COUNTER);
@@ -141,7 +151,8 @@ static int rttm_state_oneshot(struct clock_event_device *clkevt)
struct timer_of *to = to_timer_of(clkevt);
RTTM_DEBUG(to->of_base.base);
- rttm_stop_timer(to->of_base.base);
+ rttm_bounce_timer(to->of_base.base, RTTM_CTRL_COUNTER);
+ rttm_disable_timer(to->of_base.base);
rttm_set_period(to->of_base.base, RTTM_TICKS_PER_SEC / HZ);
rttm_start_timer(to, RTTM_CTRL_COUNTER);
@@ -153,7 +164,8 @@ static int rttm_state_periodic(struct clock_event_device *clkevt)
struct timer_of *to = to_timer_of(clkevt);
RTTM_DEBUG(to->of_base.base);
- rttm_stop_timer(to->of_base.base);
+ rttm_bounce_timer(to->of_base.base, RTTM_CTRL_TIMER);
+ rttm_disable_timer(to->of_base.base);
rttm_set_period(to->of_base.base, RTTM_TICKS_PER_SEC / HZ);
rttm_start_timer(to, RTTM_CTRL_TIMER);
diff --git a/drivers/clocksource/timer-stm32-lp.c b/drivers/clocksource/timer-stm32-lp.c
index 6e7944ffd7c0..c2a699f5c1dd 100644
--- a/drivers/clocksource/timer-stm32-lp.c
+++ b/drivers/clocksource/timer-stm32-lp.c
@@ -211,6 +211,7 @@ static void stm32_clkevent_lp_init(struct stm32_lp_private *priv,
priv->clkevt.rating = STM32_LP_RATING;
priv->clkevt.suspend = stm32_clkevent_lp_suspend;
priv->clkevt.resume = stm32_clkevent_lp_resume;
+ priv->clkevt.owner = THIS_MODULE;
clockevents_config_and_register(&priv->clkevt, rate, 0x1,
STM32_LPTIM_MAX_ARR);
diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c
index 6b48a9006444..f827d3f98f60 100644
--- a/drivers/clocksource/timer-sun5i.c
+++ b/drivers/clocksource/timer-sun5i.c
@@ -185,6 +185,7 @@ static int sun5i_setup_clocksource(struct platform_device *pdev,
cs->clksrc.read = sun5i_clksrc_read;
cs->clksrc.mask = CLOCKSOURCE_MASK(32);
cs->clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS;
+ cs->clksrc.owner = THIS_MODULE;
ret = clocksource_register_hz(&cs->clksrc, rate);
if (ret) {
@@ -214,6 +215,7 @@ static int sun5i_setup_clockevent(struct platform_device *pdev,
ce->clkevt.rating = 340;
ce->clkevt.irq = irq;
ce->clkevt.cpumask = cpu_possible_mask;
+ ce->clkevt.owner = THIS_MODULE;
/* Enable timer0 interrupt */
val = readl(base + TIMER_IRQ_EN_REG);
diff --git a/drivers/clocksource/timer-tegra186.c b/drivers/clocksource/timer-tegra186.c
index e5394f98a02e..355558893e5f 100644
--- a/drivers/clocksource/timer-tegra186.c
+++ b/drivers/clocksource/timer-tegra186.c
@@ -159,7 +159,7 @@ static void tegra186_wdt_enable(struct tegra186_wdt *wdt)
tmr_writel(wdt->tmr, TMRCSSR_SRC_USEC, TMRCSSR);
/* configure timer (system reset happens on the fifth expiration) */
- value = TMRCR_PTV(wdt->base.timeout * USEC_PER_SEC / 5) |
+ value = TMRCR_PTV(wdt->base.timeout * (USEC_PER_SEC / 5)) |
TMRCR_PERIODIC | TMRCR_ENABLE;
tmr_writel(wdt->tmr, value, TMRCR);
@@ -231,7 +231,7 @@ static unsigned int tegra186_wdt_get_timeleft(struct watchdog_device *wdd)
{
struct tegra186_wdt *wdt = to_tegra186_wdt(wdd);
u32 expiration, val;
- u64 timeleft;
+ u32 timeleft;
if (!watchdog_active(&wdt->base)) {
/* return zero if the watchdog timer is not activated. */
@@ -266,21 +266,26 @@ static unsigned int tegra186_wdt_get_timeleft(struct watchdog_device *wdd)
* Calculate the time remaining by adding the time for the
* counter value to the time of the counter expirations that
* remain.
+ * Note: Since wdt->base.timeout is bound to 255, the maximum
+ * value added to timeleft is
+ * 255 * (1,000,000 / 5) * 4
+ * = 255 * 200,000 * 4
+ * = 204,000,000
+ * TMRSR_PCV is a 29-bit field.
+ * Its maximum value is 0x1fffffff = 536,870,911.
+ * 204,000,000 + 536,870,911 = 740,870,911 = 0x2C28CAFF.
+ * timeleft can therefore not overflow, and 64-bit calculations
+ * are not necessary.
*/
- timeleft += (((u64)wdt->base.timeout * USEC_PER_SEC) / 5) * (4 - expiration);
+ timeleft += (wdt->base.timeout * (USEC_PER_SEC / 5)) * (4 - expiration);
/*
* Convert the current counter value to seconds,
- * rounding up to the nearest second. Cast u64 to
- * u32 under the assumption that no overflow happens
- * when coverting to seconds.
+ * rounding to the nearest second.
*/
- timeleft = DIV_ROUND_CLOSEST_ULL(timeleft, USEC_PER_SEC);
+ timeleft = DIV_ROUND_CLOSEST(timeleft, USEC_PER_SEC);
- if (WARN_ON_ONCE(timeleft > U32_MAX))
- return U32_MAX;
-
- return lower_32_bits(timeleft);
+ return timeleft;
}
static const struct watchdog_ops tegra186_wdt_ops = {
@@ -328,16 +333,12 @@ static struct tegra186_wdt *tegra186_wdt_create(struct tegra186_timer *tegra,
wdt->base.parent = tegra->dev;
err = watchdog_init_timeout(&wdt->base, 5, tegra->dev);
- if (err < 0) {
- dev_err(tegra->dev, "failed to initialize timeout: %d\n", err);
+ if (err < 0)
return ERR_PTR(err);
- }
err = devm_watchdog_register_device(tegra->dev, &wdt->base);
- if (err < 0) {
- dev_err(tegra->dev, "failed to register WDT: %d\n", err);
+ if (err < 0)
return ERR_PTR(err);
- }
return wdt;
}
@@ -373,6 +374,7 @@ static int tegra186_timer_tsc_init(struct tegra186_timer *tegra)
tegra->tsc.read = tegra186_timer_tsc_read;
tegra->tsc.mask = CLOCKSOURCE_MASK(56);
tegra->tsc.flags = CLOCK_SOURCE_IS_CONTINUOUS;
+ tegra->tsc.owner = THIS_MODULE;
return clocksource_register_hz(&tegra->tsc, 31250000);
}
@@ -392,6 +394,7 @@ static int tegra186_timer_osc_init(struct tegra186_timer *tegra)
tegra->osc.read = tegra186_timer_osc_read;
tegra->osc.mask = CLOCKSOURCE_MASK(32);
tegra->osc.flags = CLOCK_SOURCE_IS_CONTINUOUS;
+ tegra->osc.owner = THIS_MODULE;
return clocksource_register_hz(&tegra->osc, 38400000);
}
@@ -411,6 +414,7 @@ static int tegra186_timer_usec_init(struct tegra186_timer *tegra)
tegra->usec.read = tegra186_timer_usec_read;
tegra->usec.mask = CLOCKSOURCE_MASK(32);
tegra->usec.flags = CLOCK_SOURCE_IS_CONTINUOUS;
+ tegra->usec.owner = THIS_MODULE;
return clocksource_register_hz(&tegra->usec, USEC_PER_SEC);
}
diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c
index e9e32df6b566..793e7cdcb1b1 100644
--- a/drivers/clocksource/timer-ti-dm.c
+++ b/drivers/clocksource/timer-ti-dm.c
@@ -31,6 +31,7 @@
#include <linux/platform_data/dmtimer-omap.h>
#include <clocksource/timer-ti-dm.h>
+#include <linux/delay.h>
/*
* timer errata flags
@@ -836,6 +837,48 @@ static int omap_dm_timer_set_match(struct omap_dm_timer *cookie, int enable,
return 0;
}
+static int omap_dm_timer_set_cap(struct omap_dm_timer *cookie,
+ int autoreload, bool config_period)
+{
+ struct dmtimer *timer;
+ struct device *dev;
+ int rc;
+ u32 l;
+
+ timer = to_dmtimer(cookie);
+ if (unlikely(!timer))
+ return -EINVAL;
+
+ dev = &timer->pdev->dev;
+ rc = pm_runtime_resume_and_get(dev);
+ if (rc)
+ return rc;
+ /*
+ * 1. Select autoreload mode. TIMER_TCLR[1] AR bit.
+ * 2. TIMER_TCLR[14]: Sets the functionality of the TIMER IO pin.
+ * 3. TIMER_TCLR[13] : Capture mode select bit.
+ * 3. TIMER_TCLR[9-8] : Select transition capture mode.
+ */
+
+ l = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
+
+ if (autoreload)
+ l |= OMAP_TIMER_CTRL_AR;
+
+ l |= OMAP_TIMER_CTRL_CAPTMODE | OMAP_TIMER_CTRL_GPOCFG;
+
+ if (config_period == true)
+ l |= OMAP_TIMER_CTRL_TCM_LOWTOHIGH; /* Time Period config */
+ else
+ l |= OMAP_TIMER_CTRL_TCM_BOTHEDGES; /* Duty Cycle config */
+
+ dmtimer_write(timer, OMAP_TIMER_CTRL_REG, l);
+
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+
static int omap_dm_timer_set_pwm(struct omap_dm_timer *cookie, int def_on,
int toggle, int trigger, int autoreload)
{
@@ -1023,23 +1066,92 @@ static unsigned int omap_dm_timer_read_counter(struct omap_dm_timer *cookie)
return __omap_dm_timer_read_counter(timer);
}
+static inline unsigned int __omap_dm_timer_cap(struct dmtimer *timer, int idx)
+{
+ return idx == 0 ? dmtimer_read(timer, OMAP_TIMER_CAPTURE_REG) :
+ dmtimer_read(timer, OMAP_TIMER_CAPTURE2_REG);
+}
+
static int omap_dm_timer_write_counter(struct omap_dm_timer *cookie, unsigned int value)
{
struct dmtimer *timer;
+ struct device *dev;
timer = to_dmtimer(cookie);
- if (unlikely(!timer || !atomic_read(&timer->enabled))) {
- pr_err("%s: timer not available or enabled.\n", __func__);
+ if (unlikely(!timer)) {
+ pr_err("%s: timer not available.\n", __func__);
return -EINVAL;
}
+ dev = &timer->pdev->dev;
+
+ pm_runtime_resume_and_get(dev);
dmtimer_write(timer, OMAP_TIMER_COUNTER_REG, value);
+ pm_runtime_put_sync(dev);
/* Save the context */
timer->context.tcrr = value;
return 0;
}
+/**
+ * omap_dm_timer_cap_counter() - Calculate the high count or period count depending on the
+ * configuration.
+ * @cookie:Pointer to OMAP DM timer
+ * @is_period:Whether to configure timer in period or duty cycle mode
+ *
+ * Return high count or period count if timer is enabled else appropriate error.
+ */
+static unsigned int omap_dm_timer_cap_counter(struct omap_dm_timer *cookie, bool is_period)
+{
+ struct dmtimer *timer;
+ unsigned int cap1 = 0;
+ unsigned int cap2 = 0;
+ u32 l, ret;
+
+ timer = to_dmtimer(cookie);
+ if (unlikely(!timer || !atomic_read(&timer->enabled))) {
+ pr_err("%s:timer is not available or enabled.%p\n", __func__, (void *)timer);
+ return -EINVAL;
+ }
+
+ /* Stop the timer */
+ omap_dm_timer_stop(cookie);
+
+ /* Clear the timer counter value to 0 */
+ ret = omap_dm_timer_write_counter(cookie, 0);
+ if (ret)
+ return ret;
+
+ /* Sets the timer capture configuration for period/duty cycle calculation */
+ ret = omap_dm_timer_set_cap(cookie, true, is_period);
+ if (ret) {
+ pr_err("%s: Failed to set timer capture configuration.\n", __func__);
+ return ret;
+ }
+ /* Start the timer */
+ omap_dm_timer_start(cookie);
+
+ /*
+ * 1 sec delay is given so as to provide
+ * enough time to capture low frequency signals.
+ */
+ msleep(1000);
+
+ cap1 = __omap_dm_timer_cap(timer, 0);
+ cap2 = __omap_dm_timer_cap(timer, 1);
+
+ /*
+ * Clears the TCLR configuration.
+ * The start bit must be set to 1 as the timer is already in start mode.
+ */
+ l = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
+ l &= ~(0xffff) | 0x1;
+ dmtimer_write(timer, OMAP_TIMER_CTRL_REG, l);
+
+ return (cap2-cap1);
+}
+
static int __maybe_unused omap_dm_timer_runtime_suspend(struct device *dev)
{
struct dmtimer *timer = dev_get_drvdata(dev);
@@ -1246,6 +1358,9 @@ static const struct omap_dm_timer_ops dmtimer_ops = {
.write_counter = omap_dm_timer_write_counter,
.read_status = omap_dm_timer_read_status,
.write_status = omap_dm_timer_write_status,
+ .set_cap = omap_dm_timer_set_cap,
+ .get_cap_status = omap_dm_timer_get_pwm_status,
+ .read_cap = omap_dm_timer_cap_counter,
};
static const struct dmtimer_platform_data omap3plus_pdata = {
diff --git a/drivers/clocksource/timer-vf-pit.c b/drivers/clocksource/timer-vf-pit.c
deleted file mode 100644
index 911c92146eca..000000000000
--- a/drivers/clocksource/timer-vf-pit.c
+++ /dev/null
@@ -1,194 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright 2012-2013 Freescale Semiconductor, Inc.
- */
-
-#include <linux/interrupt.h>
-#include <linux/clockchips.h>
-#include <linux/clk.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/sched_clock.h>
-
-/*
- * Each pit takes 0x10 Bytes register space
- */
-#define PITMCR 0x00
-#define PIT0_OFFSET 0x100
-#define PITn_OFFSET(n) (PIT0_OFFSET + 0x10 * (n))
-#define PITLDVAL 0x00
-#define PITCVAL 0x04
-#define PITTCTRL 0x08
-#define PITTFLG 0x0c
-
-#define PITMCR_MDIS (0x1 << 1)
-
-#define PITTCTRL_TEN (0x1 << 0)
-#define PITTCTRL_TIE (0x1 << 1)
-#define PITCTRL_CHN (0x1 << 2)
-
-#define PITTFLG_TIF 0x1
-
-static void __iomem *clksrc_base;
-static void __iomem *clkevt_base;
-static unsigned long cycle_per_jiffy;
-
-static inline void pit_timer_enable(void)
-{
- __raw_writel(PITTCTRL_TEN | PITTCTRL_TIE, clkevt_base + PITTCTRL);
-}
-
-static inline void pit_timer_disable(void)
-{
- __raw_writel(0, clkevt_base + PITTCTRL);
-}
-
-static inline void pit_irq_acknowledge(void)
-{
- __raw_writel(PITTFLG_TIF, clkevt_base + PITTFLG);
-}
-
-static u64 notrace pit_read_sched_clock(void)
-{
- return ~__raw_readl(clksrc_base + PITCVAL);
-}
-
-static int __init pit_clocksource_init(unsigned long rate)
-{
- /* set the max load value and start the clock source counter */
- __raw_writel(0, clksrc_base + PITTCTRL);
- __raw_writel(~0UL, clksrc_base + PITLDVAL);
- __raw_writel(PITTCTRL_TEN, clksrc_base + PITTCTRL);
-
- sched_clock_register(pit_read_sched_clock, 32, rate);
- return clocksource_mmio_init(clksrc_base + PITCVAL, "vf-pit", rate,
- 300, 32, clocksource_mmio_readl_down);
-}
-
-static int pit_set_next_event(unsigned long delta,
- struct clock_event_device *unused)
-{
- /*
- * set a new value to PITLDVAL register will not restart the timer,
- * to abort the current cycle and start a timer period with the new
- * value, the timer must be disabled and enabled again.
- * and the PITLAVAL should be set to delta minus one according to pit
- * hardware requirement.
- */
- pit_timer_disable();
- __raw_writel(delta - 1, clkevt_base + PITLDVAL);
- pit_timer_enable();
-
- return 0;
-}
-
-static int pit_shutdown(struct clock_event_device *evt)
-{
- pit_timer_disable();
- return 0;
-}
-
-static int pit_set_periodic(struct clock_event_device *evt)
-{
- pit_set_next_event(cycle_per_jiffy, evt);
- return 0;
-}
-
-static irqreturn_t pit_timer_interrupt(int irq, void *dev_id)
-{
- struct clock_event_device *evt = dev_id;
-
- pit_irq_acknowledge();
-
- /*
- * pit hardware doesn't support oneshot, it will generate an interrupt
- * and reload the counter value from PITLDVAL when PITCVAL reach zero,
- * and start the counter again. So software need to disable the timer
- * to stop the counter loop in ONESHOT mode.
- */
- if (likely(clockevent_state_oneshot(evt)))
- pit_timer_disable();
-
- evt->event_handler(evt);
-
- return IRQ_HANDLED;
-}
-
-static struct clock_event_device clockevent_pit = {
- .name = "VF pit timer",
- .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
- .set_state_shutdown = pit_shutdown,
- .set_state_periodic = pit_set_periodic,
- .set_next_event = pit_set_next_event,
- .rating = 300,
-};
-
-static int __init pit_clockevent_init(unsigned long rate, int irq)
-{
- __raw_writel(0, clkevt_base + PITTCTRL);
- __raw_writel(PITTFLG_TIF, clkevt_base + PITTFLG);
-
- BUG_ON(request_irq(irq, pit_timer_interrupt, IRQF_TIMER | IRQF_IRQPOLL,
- "VF pit timer", &clockevent_pit));
-
- clockevent_pit.cpumask = cpumask_of(0);
- clockevent_pit.irq = irq;
- /*
- * The value for the LDVAL register trigger is calculated as:
- * LDVAL trigger = (period / clock period) - 1
- * The pit is a 32-bit down count timer, when the counter value
- * reaches 0, it will generate an interrupt, thus the minimal
- * LDVAL trigger value is 1. And then the min_delta is
- * minimal LDVAL trigger value + 1, and the max_delta is full 32-bit.
- */
- clockevents_config_and_register(&clockevent_pit, rate, 2, 0xffffffff);
-
- return 0;
-}
-
-static int __init pit_timer_init(struct device_node *np)
-{
- struct clk *pit_clk;
- void __iomem *timer_base;
- unsigned long clk_rate;
- int irq, ret;
-
- timer_base = of_iomap(np, 0);
- if (!timer_base) {
- pr_err("Failed to iomap\n");
- return -ENXIO;
- }
-
- /*
- * PIT0 and PIT1 can be chained to build a 64-bit timer,
- * so choose PIT2 as clocksource, PIT3 as clockevent device,
- * and leave PIT0 and PIT1 unused for anyone else who needs them.
- */
- clksrc_base = timer_base + PITn_OFFSET(2);
- clkevt_base = timer_base + PITn_OFFSET(3);
-
- irq = irq_of_parse_and_map(np, 0);
- if (irq <= 0)
- return -EINVAL;
-
- pit_clk = of_clk_get(np, 0);
- if (IS_ERR(pit_clk))
- return PTR_ERR(pit_clk);
-
- ret = clk_prepare_enable(pit_clk);
- if (ret)
- return ret;
-
- clk_rate = clk_get_rate(pit_clk);
- cycle_per_jiffy = clk_rate / (HZ);
-
- /* enable the pit module */
- __raw_writel(~PITMCR_MDIS, timer_base + PITMCR);
-
- ret = pit_clocksource_init(clk_rate);
- if (ret)
- return ret;
-
- return pit_clockevent_init(clk_rate, irq);
-}
-TIMER_OF_DECLARE(vf610, "fsl,vf610-pit", pit_timer_init);
diff --git a/drivers/comedi/Kconfig b/drivers/comedi/Kconfig
index 93c68a40a17b..6dcc2567de6d 100644
--- a/drivers/comedi/Kconfig
+++ b/drivers/comedi/Kconfig
@@ -705,6 +705,15 @@ config COMEDI_ADL_PCI6208
To compile this driver as a module, choose M here: the module will be
called adl_pci6208.
+config COMEDI_ADL_PCI7250
+ tristate "ADLink PCI-7250 support"
+ help
+ Enable support for ADLink PCI-7250/LPCI-7250/LPCIe-7250 relay output
+ and isolated digital input boards.
+
+ To compile this driver as a module, choose M here: the module will be
+ called adl_pci7250.
+
config COMEDI_ADL_PCI7X3X
tristate "ADLink PCI-723X/743X isolated digital i/o board support"
depends on HAS_IOPORT
diff --git a/drivers/comedi/comedi_fops.c b/drivers/comedi/comedi_fops.c
index 23b7178522ae..7e2f2b1a1c36 100644
--- a/drivers/comedi/comedi_fops.c
+++ b/drivers/comedi/comedi_fops.c
@@ -1587,6 +1587,9 @@ static int do_insnlist_ioctl(struct comedi_device *dev,
memset(&data[n], 0, (MIN_SAMPLES - n) *
sizeof(unsigned int));
}
+ } else {
+ memset(data, 0, max_t(unsigned int, n, MIN_SAMPLES) *
+ sizeof(unsigned int));
}
ret = parse_insn(dev, insns + i, data, file);
if (ret < 0)
@@ -1670,6 +1673,8 @@ static int do_insn_ioctl(struct comedi_device *dev,
memset(&data[insn->n], 0,
(MIN_SAMPLES - insn->n) * sizeof(unsigned int));
}
+ } else {
+ memset(data, 0, n_data * sizeof(unsigned int));
}
ret = parse_insn(dev, insn, data, file);
if (ret < 0)
diff --git a/drivers/comedi/drivers.c b/drivers/comedi/drivers.c
index f1dc854928c1..c9ebaadc5e82 100644
--- a/drivers/comedi/drivers.c
+++ b/drivers/comedi/drivers.c
@@ -620,11 +620,9 @@ static int insn_rw_emulate_bits(struct comedi_device *dev,
unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int base_chan = (chan < 32) ? 0 : chan;
unsigned int _data[2];
+ unsigned int i;
int ret;
- if (insn->n == 0)
- return 0;
-
memset(_data, 0, sizeof(_data));
memset(&_insn, 0, sizeof(_insn));
_insn.insn = INSN_BITS;
@@ -635,18 +633,21 @@ static int insn_rw_emulate_bits(struct comedi_device *dev,
if (insn->insn == INSN_WRITE) {
if (!(s->subdev_flags & SDF_WRITABLE))
return -EINVAL;
- _data[0] = 1U << (chan - base_chan); /* mask */
- _data[1] = data[0] ? (1U << (chan - base_chan)) : 0; /* bits */
+ _data[0] = 1U << (chan - base_chan); /* mask */
}
+ for (i = 0; i < insn->n; i++) {
+ if (insn->insn == INSN_WRITE)
+ _data[1] = data[i] ? _data[0] : 0; /* bits */
- ret = s->insn_bits(dev, s, &_insn, _data);
- if (ret < 0)
- return ret;
+ ret = s->insn_bits(dev, s, &_insn, _data);
+ if (ret < 0)
+ return ret;
- if (insn->insn == INSN_READ)
- data[0] = (_data[1] >> (chan - base_chan)) & 1;
+ if (insn->insn == INSN_READ)
+ data[i] = (_data[1] >> (chan - base_chan)) & 1;
+ }
- return 1;
+ return insn->n;
}
static int __comedi_device_postconfig_async(struct comedi_device *dev,
diff --git a/drivers/comedi/drivers/Makefile b/drivers/comedi/drivers/Makefile
index b24ac00cab73..7b99a431330d 100644
--- a/drivers/comedi/drivers/Makefile
+++ b/drivers/comedi/drivers/Makefile
@@ -73,6 +73,7 @@ obj-$(CONFIG_COMEDI_ADDI_APCI_3120) += addi_apci_3120.o
obj-$(CONFIG_COMEDI_ADDI_APCI_3501) += addi_apci_3501.o
obj-$(CONFIG_COMEDI_ADDI_APCI_3XXX) += addi_apci_3xxx.o
obj-$(CONFIG_COMEDI_ADL_PCI6208) += adl_pci6208.o
+obj-$(CONFIG_COMEDI_ADL_PCI7250) += adl_pci7250.o
obj-$(CONFIG_COMEDI_ADL_PCI7X3X) += adl_pci7x3x.o
obj-$(CONFIG_COMEDI_ADL_PCI8164) += adl_pci8164.o
obj-$(CONFIG_COMEDI_ADL_PCI9111) += adl_pci9111.o
diff --git a/drivers/comedi/drivers/adl_pci7250.c b/drivers/comedi/drivers/adl_pci7250.c
new file mode 100644
index 000000000000..78c85a402435
--- /dev/null
+++ b/drivers/comedi/drivers/adl_pci7250.c
@@ -0,0 +1,220 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * adl_pci7250.c
+ *
+ * Comedi driver for ADLink PCI-7250 series cards.
+ *
+ * Copyright (C) 2015, 2025 Ian Abbott <abbotti@mev.co.uk>
+ */
+
+/*
+ * Driver: adl_pci7250
+ * Description: Driver for the ADLINK PCI-7250 relay output & digital input card
+ * Devices: [ADLINK] PCI-7250 (adl_pci7250) LPCI-7250 LPCIe-7250
+ * Author: Ian Abbott <abbotti@mev.co.uk>
+ * Status: works
+ * Updated: Mon, 02 Jun 2025 13:54:11 +0100
+ *
+ * The driver assumes that 3 PCI-7251 modules are fitted to the PCI-7250,
+ * giving 32 channels of relay outputs and 32 channels of isolated digital
+ * inputs. That is also the case for the LPCI-7250 and older LPCIe-7250
+ * cards although they do not physically support the PCI-7251 modules.
+ * Newer LPCIe-7250 cards have a different PCI subsystem device ID, so
+ * set the number of channels to 8 for these cards.
+ *
+ * Not fitting the PCI-7251 modules shouldn't do any harm, but the extra
+ * inputs and relay outputs won't work!
+ *
+ * Configuration Options: not applicable, uses PCI auto config
+ */
+
+#include <linux/module.h>
+#include <linux/comedi/comedi_pci.h>
+
+static unsigned char adl_pci7250_read8(struct comedi_device *dev,
+ unsigned int offset)
+{
+#ifdef CONFIG_HAS_IOPORT
+ if (!dev->mmio)
+ return inb(dev->iobase + offset);
+#endif
+ return readb(dev->mmio + offset);
+}
+
+static void adl_pci7250_write8(struct comedi_device *dev, unsigned int offset,
+ unsigned char val)
+{
+#ifdef CONFIG_HAS_IOPORT
+ if (!dev->mmio) {
+ outb(val, dev->iobase + offset);
+ return;
+ }
+#endif
+ writeb(val, dev->mmio + offset);
+}
+
+static int adl_pci7250_do_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ unsigned int mask = comedi_dio_update_state(s, data);
+
+ if (mask) {
+ unsigned int state = s->state;
+ unsigned int i;
+
+ for (i = 0; i * 8 < s->n_chan; i++) {
+ if ((mask & 0xffu) != 0) {
+ /* write relay data to even offset registers */
+ adl_pci7250_write8(dev, i * 2, state & 0xffu);
+ }
+ state >>= 8;
+ mask >>= 8;
+ }
+ }
+
+ data[1] = s->state;
+
+ return 2;
+}
+
+static int adl_pci7250_di_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ unsigned int value = 0;
+ unsigned int i;
+
+ for (i = 0; i * 8 < s->n_chan; i++) {
+ /* read DI value from odd offset registers */
+ value |= (unsigned int)adl_pci7250_read8(dev, i * 2 + 1) <<
+ (i * 8);
+ }
+
+ data[1] = value;
+
+ return 2;
+}
+
+static int pci7250_auto_attach(struct comedi_device *dev,
+ unsigned long context_unused)
+{
+ struct pci_dev *pcidev = comedi_to_pci_dev(dev);
+ struct comedi_subdevice *s;
+ unsigned int max_chans;
+ unsigned int i;
+ int ret;
+
+ ret = comedi_pci_enable(dev);
+ if (ret)
+ return ret;
+
+ if (pci_resource_len(pcidev, 2) < 8)
+ return -ENXIO;
+
+ /*
+ * Newer LPCIe-7250 boards use MMIO. Older LPCIe-7250, LPCI-7250, and
+ * PCI-7250 boards use Port I/O.
+ */
+ if (pci_resource_flags(pcidev, 2) & IORESOURCE_MEM) {
+ dev->mmio = pci_ioremap_bar(pcidev, 2);
+ if (!dev->mmio)
+ return -ENOMEM;
+ } else if (IS_ENABLED(CONFIG_HAS_IOPORT)) {
+ dev->iobase = pci_resource_start(pcidev, 2);
+ } else {
+ dev_err(dev->class_dev,
+ "error! need I/O port support\n");
+ return -ENXIO;
+ }
+
+ if (pcidev->subsystem_device == 0x7000) {
+ /*
+ * This is a newer LPCIe-7250 variant and cannot possibly
+ * have PCI-7251 modules fitted, so limit the number of
+ * channels to 8.
+ */
+ max_chans = 8;
+ } else {
+ /*
+ * It is unknown whether the board is a PCI-7250, an LPCI-7250,
+ * or an older LPCIe-7250 variant, so treat it as a PCI-7250
+ * and assume it can have PCI-7251 modules fitted to increase
+ * the number of channels to a maximum of 32.
+ */
+ max_chans = 32;
+ }
+
+ ret = comedi_alloc_subdevices(dev, 2);
+ if (ret)
+ return ret;
+
+ /* Relay digital output. */
+ s = &dev->subdevices[0];
+ s->type = COMEDI_SUBD_DO;
+ s->subdev_flags = SDF_WRITABLE;
+ s->n_chan = max_chans;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = adl_pci7250_do_insn_bits;
+ /* Read initial state of relays from the even offset registers. */
+ s->state = 0;
+ for (i = 0; i * 8 < max_chans; i++) {
+ s->state |= (unsigned int)adl_pci7250_read8(dev, i * 2) <<
+ (i * 8);
+ }
+
+ /* Isolated digital input. */
+ s = &dev->subdevices[1];
+ s->type = COMEDI_SUBD_DI;
+ s->subdev_flags = SDF_READABLE;
+ s->n_chan = max_chans;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = adl_pci7250_di_insn_bits;
+
+ return 0;
+}
+
+static struct comedi_driver adl_pci7250_driver = {
+ .driver_name = "adl_pci7250",
+ .module = THIS_MODULE,
+ .auto_attach = pci7250_auto_attach,
+ .detach = comedi_pci_detach,
+};
+
+static int adl_pci7250_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *id)
+{
+ return comedi_pci_auto_config(dev, &adl_pci7250_driver,
+ id->driver_data);
+}
+
+static const struct pci_device_id adl_pci7250_pci_table[] = {
+#ifdef CONFIG_HAS_IOPORT
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050,
+ 0x9999, 0x7250) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADLINK, 0x7250,
+ 0x9999, 0x7250) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADLINK, 0x7250,
+ PCI_VENDOR_ID_ADLINK, 0x7250) },
+#endif
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADLINK, 0x7250,
+ PCI_VENDOR_ID_ADLINK, 0x7000) }, /* newer LPCIe-7250 */
+ { 0 }
+};
+MODULE_DEVICE_TABLE(pci, adl_pci7250_pci_table);
+
+static struct pci_driver adl_pci7250_pci_driver = {
+ .name = "adl_pci7250",
+ .id_table = adl_pci7250_pci_table,
+ .probe = adl_pci7250_pci_probe,
+ .remove = comedi_pci_auto_unconfig,
+};
+module_comedi_pci_driver(adl_pci7250_driver, adl_pci7250_pci_driver);
+
+MODULE_AUTHOR("Comedi https://www.comedi.org");
+MODULE_DESCRIPTION("Comedi driver for ADLink PCI-7250 series boards");
+MODULE_LICENSE("GPL");
diff --git a/drivers/comedi/drivers/pcl726.c b/drivers/comedi/drivers/pcl726.c
index 0430630e6ebb..b542896fa0e4 100644
--- a/drivers/comedi/drivers/pcl726.c
+++ b/drivers/comedi/drivers/pcl726.c
@@ -328,7 +328,8 @@ static int pcl726_attach(struct comedi_device *dev,
* Hook up the external trigger source interrupt only if the
* user config option is valid and the board supports interrupts.
*/
- if (it->options[1] && (board->irq_mask & (1 << it->options[1]))) {
+ if (it->options[1] > 0 && it->options[1] < 16 &&
+ (board->irq_mask & (1U << it->options[1]))) {
ret = request_irq(it->options[1], pcl726_interrupt, 0,
dev->board_name, dev);
if (ret == 0) {
diff --git a/drivers/counter/ti-ecap-capture.c b/drivers/counter/ti-ecap-capture.c
index 3faaf7f60539..3586a7ab9887 100644
--- a/drivers/counter/ti-ecap-capture.c
+++ b/drivers/counter/ti-ecap-capture.c
@@ -465,11 +465,6 @@ static irqreturn_t ecap_cnt_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void ecap_cnt_pm_disable(void *dev)
-{
- pm_runtime_disable(dev);
-}
-
static int ecap_cnt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -523,12 +518,9 @@ static int ecap_cnt_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, counter_dev);
- pm_runtime_enable(dev);
-
- /* Register a cleanup callback to care for disabling PM */
- ret = devm_add_action_or_reset(dev, ecap_cnt_pm_disable, dev);
+ ret = devm_pm_runtime_enable(dev);
if (ret)
- return dev_err_probe(dev, ret, "failed to add pm disable action\n");
+ return ret;
ret = devm_counter_add(dev, counter_dev);
if (ret)
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 4f7f9201598d..083d8369a591 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -318,7 +318,6 @@ static u32 drv_read(struct acpi_cpufreq_data *data, const struct cpumask *mask)
return cmd.val;
}
-/* Called via smp_call_function_many(), on the target CPUs */
static void do_drv_write(void *_cmd)
{
struct drv_cmd *cmd = _cmd;
@@ -335,14 +334,8 @@ static void drv_write(struct acpi_cpufreq_data *data,
.val = val,
.func.write = data->cpu_freq_write,
};
- int this_cpu;
- this_cpu = get_cpu();
- if (cpumask_test_cpu(this_cpu, mask))
- do_drv_write(&cmd);
-
- smp_call_function_many(mask, do_drv_write, &cmd, 1);
- put_cpu();
+ on_each_cpu_mask(mask, do_drv_write, &cmd, true);
}
static u32 get_cur_val(const struct cpumask *mask, struct acpi_cpufreq_data *data)
diff --git a/drivers/cpufreq/airoha-cpufreq.c b/drivers/cpufreq/airoha-cpufreq.c
index 4fe39eadd163..b6b1cdc4d11d 100644
--- a/drivers/cpufreq/airoha-cpufreq.c
+++ b/drivers/cpufreq/airoha-cpufreq.c
@@ -107,6 +107,7 @@ static struct platform_driver airoha_cpufreq_driver = {
};
static const struct of_device_id airoha_cpufreq_match_list[] __initconst = {
+ { .compatible = "airoha,an7583" },
{ .compatible = "airoha,en7581" },
{},
};
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index bbc27ef9edf7..298e92d8cc03 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -872,10 +872,10 @@ static void amd_pstate_update_limits(struct cpufreq_policy *policy)
*/
static u32 amd_pstate_get_transition_delay_us(unsigned int cpu)
{
- u32 transition_delay_ns;
+ int transition_delay_ns;
transition_delay_ns = cppc_get_transition_latency(cpu);
- if (transition_delay_ns == CPUFREQ_ETERNAL) {
+ if (transition_delay_ns < 0) {
if (cpu_feature_enabled(X86_FEATURE_AMD_FAST_CPPC))
return AMD_PSTATE_FAST_CPPC_TRANSITION_DELAY;
else
@@ -891,10 +891,10 @@ static u32 amd_pstate_get_transition_delay_us(unsigned int cpu)
*/
static u32 amd_pstate_get_transition_latency(unsigned int cpu)
{
- u32 transition_latency;
+ int transition_latency;
transition_latency = cppc_get_transition_latency(cpu);
- if (transition_latency == CPUFREQ_ETERNAL)
+ if (transition_latency < 0)
return AMD_PSTATE_TRANSITION_LATENCY;
return transition_latency;
@@ -1554,13 +1554,15 @@ static void amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy)
pr_debug("CPU %d exiting\n", policy->cpu);
}
-static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
+static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy, bool policy_change)
{
struct amd_cpudata *cpudata = policy->driver_data;
union perf_cached perf;
u8 epp;
- if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq)
+ if (policy_change ||
+ policy->min != cpudata->min_limit_freq ||
+ policy->max != cpudata->max_limit_freq)
amd_pstate_update_min_max_limit(policy);
if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
@@ -1584,7 +1586,7 @@ static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
cpudata->policy = policy->policy;
- ret = amd_pstate_epp_update_limit(policy);
+ ret = amd_pstate_epp_update_limit(policy, true);
if (ret)
return ret;
@@ -1626,13 +1628,14 @@ static int amd_pstate_suspend(struct cpufreq_policy *policy)
* min_perf value across kexec reboots. If this CPU is just resumed back without kexec,
* the limits, epp and desired perf will get reset to the cached values in cpudata struct
*/
- ret = amd_pstate_update_perf(policy, perf.bios_min_perf, 0U, 0U, 0U, false);
+ ret = amd_pstate_update_perf(policy, perf.bios_min_perf,
+ FIELD_GET(AMD_CPPC_DES_PERF_MASK, cpudata->cppc_req_cached),
+ FIELD_GET(AMD_CPPC_MAX_PERF_MASK, cpudata->cppc_req_cached),
+ FIELD_GET(AMD_CPPC_EPP_PERF_MASK, cpudata->cppc_req_cached),
+ false);
if (ret)
return ret;
- /* invalidate to ensure it's rewritten during resume */
- cpudata->cppc_req_cached = 0;
-
/* set this flag to avoid setting core offline*/
cpudata->suspended = true;
@@ -1658,7 +1661,7 @@ static int amd_pstate_epp_resume(struct cpufreq_policy *policy)
int ret;
/* enable amd pstate from suspend state*/
- ret = amd_pstate_epp_update_limit(policy);
+ ret = amd_pstate_epp_update_limit(policy, false);
if (ret)
return ret;
diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c
index f28a4435fba7..0efe403a5980 100644
--- a/drivers/cpufreq/armada-37xx-cpufreq.c
+++ b/drivers/cpufreq/armada-37xx-cpufreq.c
@@ -265,7 +265,7 @@ static void __init armada37xx_cpufreq_avs_configure(struct regmap *base,
*/
target_vm = avs_map[l0_vdd_min] - 100;
- target_vm = target_vm > MIN_VOLT_MV ? target_vm : MIN_VOLT_MV;
+ target_vm = max(target_vm, MIN_VOLT_MV);
dvfs->avs[1] = armada_37xx_avs_val_match(target_vm);
/*
@@ -273,7 +273,7 @@ static void __init armada37xx_cpufreq_avs_configure(struct regmap *base,
* be larger than 1000mv
*/
target_vm = avs_map[l0_vdd_min] - 150;
- target_vm = target_vm > MIN_VOLT_MV ? target_vm : MIN_VOLT_MV;
+ target_vm = max(target_vm, MIN_VOLT_MV);
dvfs->avs[2] = dvfs->avs[3] = armada_37xx_avs_val_match(target_vm);
/*
diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c
index 5940d262374f..71450cca8e9f 100644
--- a/drivers/cpufreq/brcmstb-avs-cpufreq.c
+++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c
@@ -480,7 +480,7 @@ static bool brcm_avs_is_firmware_loaded(struct private_data *priv)
static unsigned int brcm_avs_cpufreq_get(unsigned int cpu)
{
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
struct private_data *priv;
if (!policy)
@@ -488,8 +488,6 @@ static unsigned int brcm_avs_cpufreq_get(unsigned int cpu)
priv = policy->driver_data;
- cpufreq_cpu_put(policy);
-
return brcm_avs_get_frequency(priv->base);
}
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index 4a17162a392d..e23d9abea135 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -50,8 +50,7 @@ struct cppc_freq_invariance {
static DEFINE_PER_CPU(struct cppc_freq_invariance, cppc_freq_inv);
static struct kthread_worker *kworker_fie;
-static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
- struct cppc_perf_fb_ctrs *fb_ctrs_t0,
+static int cppc_perf_from_fbctrs(struct cppc_perf_fb_ctrs *fb_ctrs_t0,
struct cppc_perf_fb_ctrs *fb_ctrs_t1);
/**
@@ -87,8 +86,7 @@ static void cppc_scale_freq_workfn(struct kthread_work *work)
return;
}
- perf = cppc_perf_from_fbctrs(cpu_data, &cppc_fi->prev_perf_fb_ctrs,
- &fb_ctrs);
+ perf = cppc_perf_from_fbctrs(&cppc_fi->prev_perf_fb_ctrs, &fb_ctrs);
if (!perf)
return;
@@ -310,6 +308,16 @@ static int cppc_verify_policy(struct cpufreq_policy_data *policy)
return 0;
}
+static unsigned int __cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
+{
+ int transition_latency_ns = cppc_get_transition_latency(cpu);
+
+ if (transition_latency_ns < 0)
+ return CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS / NSEC_PER_USEC;
+
+ return transition_latency_ns / NSEC_PER_USEC;
+}
+
/*
* The PCC subspace describes the rate at which platform can accept commands
* on the shared PCC channel (including READs which do not count towards freq
@@ -332,12 +340,12 @@ static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
return 10000;
}
}
- return cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
+ return __cppc_cpufreq_get_transition_delay_us(cpu);
}
#else
static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
{
- return cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
+ return __cppc_cpufreq_get_transition_delay_us(cpu);
}
#endif
@@ -684,8 +692,7 @@ static inline u64 get_delta(u64 t1, u64 t0)
return (u32)t1 - (u32)t0;
}
-static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
- struct cppc_perf_fb_ctrs *fb_ctrs_t0,
+static int cppc_perf_from_fbctrs(struct cppc_perf_fb_ctrs *fb_ctrs_t0,
struct cppc_perf_fb_ctrs *fb_ctrs_t1)
{
u64 delta_reference, delta_delivered;
@@ -725,8 +732,8 @@ static int cppc_get_perf_ctrs_sample(int cpu,
static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
{
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
struct cppc_perf_fb_ctrs fb_ctrs_t0 = {0}, fb_ctrs_t1 = {0};
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
struct cppc_cpudata *cpu_data;
u64 delivered_perf;
int ret;
@@ -736,8 +743,6 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
cpu_data = policy->driver_data;
- cpufreq_cpu_put(policy);
-
ret = cppc_get_perf_ctrs_sample(cpu, &fb_ctrs_t0, &fb_ctrs_t1);
if (ret) {
if (ret == -EFAULT)
@@ -747,8 +752,7 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
return 0;
}
- delivered_perf = cppc_perf_from_fbctrs(cpu_data, &fb_ctrs_t0,
- &fb_ctrs_t1);
+ delivered_perf = cppc_perf_from_fbctrs(&fb_ctrs_t0, &fb_ctrs_t1);
if (!delivered_perf)
goto out_invalid_counters;
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index 015dd393eaba..cd1816a12bb9 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -103,6 +103,7 @@ static const struct of_device_id allowlist[] __initconst = {
* platforms using "operating-points-v2" property.
*/
static const struct of_device_id blocklist[] __initconst = {
+ { .compatible = "airoha,an7583", },
{ .compatible = "airoha,en7581", },
{ .compatible = "allwinner,sun50i-a100" },
@@ -188,9 +189,11 @@ static const struct of_device_id blocklist[] __initconst = {
{ .compatible = "ti,omap3", },
{ .compatible = "ti,am625", },
{ .compatible = "ti,am62a7", },
+ { .compatible = "ti,am62d2", },
{ .compatible = "ti,am62p5", },
{ .compatible = "qcom,ipq5332", },
+ { .compatible = "qcom,ipq5424", },
{ .compatible = "qcom,ipq6018", },
{ .compatible = "qcom,ipq8064", },
{ .compatible = "qcom,ipq8074", },
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 506437489b4d..7d5079fd1688 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -104,7 +104,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
transition_latency = dev_pm_opp_get_max_transition_latency(cpu_dev);
if (!transition_latency)
- transition_latency = CPUFREQ_ETERNAL;
+ transition_latency = CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS;
cpumask_copy(policy->cpus, priv->cpus);
policy->driver_data = priv;
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index fc7eace8b65b..852e024facc3 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -664,10 +664,10 @@ unlock:
static unsigned int cpufreq_parse_policy(char *str_governor)
{
- if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN))
+ if (!strncasecmp(str_governor, "performance", strlen("performance")))
return CPUFREQ_POLICY_PERFORMANCE;
- if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN))
+ if (!strncasecmp(str_governor, "powersave", strlen("powersave")))
return CPUFREQ_POLICY_POWERSAVE;
return CPUFREQ_POLICY_UNKNOWN;
@@ -914,7 +914,7 @@ static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
const char *buf, size_t count)
{
unsigned int freq = 0;
- unsigned int ret;
+ int ret;
if (!policy->governor || !policy->governor->store_setspeed)
return -EINVAL;
@@ -1121,7 +1121,8 @@ static int cpufreq_init_policy(struct cpufreq_policy *policy)
if (has_target()) {
/* Update policy governor to the one used before hotplug. */
- gov = get_governor(policy->last_governor);
+ if (policy->last_governor[0] != '\0')
+ gov = get_governor(policy->last_governor);
if (gov) {
pr_debug("Restoring governor %s for cpu %d\n",
gov->name, policy->cpu);
@@ -1844,7 +1845,6 @@ static unsigned int cpufreq_verify_current_freq(struct cpufreq_policy *policy, b
*/
unsigned int cpufreq_quick_get(unsigned int cpu)
{
- struct cpufreq_policy *policy __free(put_cpufreq_policy) = NULL;
unsigned long flags;
read_lock_irqsave(&cpufreq_driver_lock, flags);
@@ -1859,7 +1859,7 @@ unsigned int cpufreq_quick_get(unsigned int cpu)
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
- policy = cpufreq_cpu_get(cpu);
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
if (policy)
return policy->cur;
@@ -1875,9 +1875,7 @@ EXPORT_SYMBOL(cpufreq_quick_get);
*/
unsigned int cpufreq_quick_get_max(unsigned int cpu)
{
- struct cpufreq_policy *policy __free(put_cpufreq_policy);
-
- policy = cpufreq_cpu_get(cpu);
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
if (policy)
return policy->max;
@@ -1893,9 +1891,7 @@ EXPORT_SYMBOL(cpufreq_quick_get_max);
*/
__weak unsigned int cpufreq_get_hw_max_freq(unsigned int cpu)
{
- struct cpufreq_policy *policy __free(put_cpufreq_policy);
-
- policy = cpufreq_cpu_get(cpu);
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
if (policy)
return policy->cpuinfo.max_freq;
@@ -1919,9 +1915,7 @@ static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
*/
unsigned int cpufreq_get(unsigned int cpu)
{
- struct cpufreq_policy *policy __free(put_cpufreq_policy);
-
- policy = cpufreq_cpu_get(cpu);
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
if (!policy)
return 0;
@@ -2750,9 +2744,7 @@ static void cpufreq_policy_refresh(struct cpufreq_policy *policy)
*/
void cpufreq_update_policy(unsigned int cpu)
{
- struct cpufreq_policy *policy __free(put_cpufreq_policy);
-
- policy = cpufreq_cpu_get(cpu);
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
if (!policy)
return;
@@ -2769,9 +2761,7 @@ EXPORT_SYMBOL(cpufreq_update_policy);
*/
void cpufreq_update_limits(unsigned int cpu)
{
- struct cpufreq_policy *policy __free(put_cpufreq_policy);
-
- policy = cpufreq_cpu_get(cpu);
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
if (!policy)
return;
@@ -2792,7 +2782,7 @@ int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state)
if (!policy->freq_table)
return -ENXIO;
- ret = cpufreq_frequency_table_cpuinfo(policy, policy->freq_table);
+ ret = cpufreq_frequency_table_cpuinfo(policy);
if (ret) {
pr_err("%s: Policy frequency update failed\n", __func__);
return ret;
@@ -2921,10 +2911,8 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
return -EPROBE_DEFER;
if (!driver_data || !driver_data->verify || !driver_data->init ||
- !(driver_data->setpolicy || driver_data->target_index ||
- driver_data->target) ||
- (driver_data->setpolicy && (driver_data->target_index ||
- driver_data->target)) ||
+ (driver_data->target_index && driver_data->target) ||
+ (!!driver_data->setpolicy == (driver_data->target_index || driver_data->target)) ||
(!driver_data->get_intermediate != !driver_data->target_intermediate) ||
(!driver_data->online != !driver_data->offline) ||
(driver_data->adjust_perf && !driver_data->fast_switch))
@@ -2953,6 +2941,15 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
goto err_null_driver;
}
+ /*
+ * Mark support for the scheduler's frequency invariance engine for
+ * drivers that implement target(), target_index() or fast_switch().
+ */
+ if (!cpufreq_driver->setpolicy) {
+ static_branch_enable_cpuslocked(&cpufreq_freq_invariance);
+ pr_debug("cpufreq: supports frequency invariance\n");
+ }
+
ret = subsys_interface_register(&cpufreq_interface);
if (ret)
goto err_boost_unreg;
@@ -2974,21 +2971,14 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
hp_online = ret;
ret = 0;
- /*
- * Mark support for the scheduler's frequency invariance engine for
- * drivers that implement target(), target_index() or fast_switch().
- */
- if (!cpufreq_driver->setpolicy) {
- static_branch_enable_cpuslocked(&cpufreq_freq_invariance);
- pr_debug("supports frequency invariance");
- }
-
pr_debug("driver %s up and running\n", driver_data->name);
goto out;
err_if_unreg:
subsys_interface_unregister(&cpufreq_interface);
err_boost_unreg:
+ if (!cpufreq_driver->setpolicy)
+ static_branch_disable_cpuslocked(&cpufreq_freq_invariance);
remove_boost_sysfs_file();
err_null_driver:
write_lock_irqsave(&cpufreq_driver_lock, flags);
@@ -3056,9 +3046,7 @@ static int __init cpufreq_core_init(void)
static bool cpufreq_policy_is_good_for_eas(unsigned int cpu)
{
- struct cpufreq_policy *policy __free(put_cpufreq_policy);
-
- policy = cpufreq_cpu_get(cpu);
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
if (!policy) {
pr_debug("cpufreq policy not set for CPU: %d\n", cpu);
return false;
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 56500b25d77c..cce6a8d113e1 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -152,9 +152,9 @@ static ssize_t sampling_down_factor_store(struct gov_attr_set *attr_set,
struct dbs_data *dbs_data = to_dbs_data(attr_set);
unsigned int input;
int ret;
- ret = sscanf(buf, "%u", &input);
+ ret = kstrtouint(buf, 0, &input);
- if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
+ if (ret || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
return -EINVAL;
dbs_data->sampling_down_factor = input;
@@ -168,9 +168,9 @@ static ssize_t up_threshold_store(struct gov_attr_set *attr_set,
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
unsigned int input;
int ret;
- ret = sscanf(buf, "%u", &input);
+ ret = kstrtouint(buf, 0, &input);
- if (ret != 1 || input > 100 || input <= cs_tuners->down_threshold)
+ if (ret || input > 100 || input <= cs_tuners->down_threshold)
return -EINVAL;
dbs_data->up_threshold = input;
@@ -184,10 +184,10 @@ static ssize_t down_threshold_store(struct gov_attr_set *attr_set,
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
unsigned int input;
int ret;
- ret = sscanf(buf, "%u", &input);
+ ret = kstrtouint(buf, 0, &input);
/* cannot be lower than 1 otherwise freq will not fall */
- if (ret != 1 || input < 1 || input >= dbs_data->up_threshold)
+ if (ret || input < 1 || input >= dbs_data->up_threshold)
return -EINVAL;
cs_tuners->down_threshold = input;
@@ -201,9 +201,9 @@ static ssize_t ignore_nice_load_store(struct gov_attr_set *attr_set,
unsigned int input;
int ret;
- ret = sscanf(buf, "%u", &input);
- if (ret != 1)
- return -EINVAL;
+ ret = kstrtouint(buf, 0, &input);
+ if (ret)
+ return ret;
if (input > 1)
input = 1;
@@ -226,10 +226,10 @@ static ssize_t freq_step_store(struct gov_attr_set *attr_set, const char *buf,
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
unsigned int input;
int ret;
- ret = sscanf(buf, "%u", &input);
+ ret = kstrtouint(buf, 0, &input);
- if (ret != 1)
- return -EINVAL;
+ if (ret)
+ return ret;
if (input > 100)
input = 100;
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 0e65d37c9231..a6ecc203f7b7 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -30,29 +30,6 @@ static struct od_ops od_ops;
static unsigned int default_powersave_bias;
/*
- * Not all CPUs want IO time to be accounted as busy; this depends on how
- * efficient idling at a higher frequency/voltage is.
- * Pavel Machek says this is not so for various generations of AMD and old
- * Intel systems.
- * Mike Chan (android.com) claims this is also not true for ARM.
- * Because of this, whitelist specific known (series) of CPUs by default, and
- * leave all others up to the user.
- */
-static int should_io_be_busy(void)
-{
-#if defined(CONFIG_X86)
- /*
- * For Intel, Core 2 (model 15) and later have an efficient idle.
- */
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
- boot_cpu_data.x86 == 6 &&
- boot_cpu_data.x86_model >= 15)
- return 1;
-#endif
- return 0;
-}
-
-/*
* Find right freq to be set now with powersave_bias on.
* Returns the freq_hi to be used right now and will set freq_hi_delay_us,
* freq_lo, and freq_lo_delay_us in percpu area for averaging freqs.
@@ -377,7 +354,7 @@ static int od_init(struct dbs_data *dbs_data)
dbs_data->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
dbs_data->ignore_nice_load = 0;
tuners->powersave_bias = default_powersave_bias;
- dbs_data->io_is_busy = should_io_be_busy();
+ dbs_data->io_is_busy = od_should_io_be_busy();
dbs_data->tuners = tuners;
return 0;
diff --git a/drivers/cpufreq/cpufreq_ondemand.h b/drivers/cpufreq/cpufreq_ondemand.h
index 1af8e5c4b86f..2ca8f1aaf2e3 100644
--- a/drivers/cpufreq/cpufreq_ondemand.h
+++ b/drivers/cpufreq/cpufreq_ondemand.h
@@ -24,3 +24,26 @@ static inline struct od_policy_dbs_info *to_dbs_info(struct policy_dbs_info *pol
struct od_dbs_tuners {
unsigned int powersave_bias;
};
+
+#ifdef CONFIG_X86
+#include <asm/cpu_device_id.h>
+
+/*
+ * Not all CPUs want IO time to be accounted as busy; this depends on
+ * how efficient idling at a higher frequency/voltage is.
+ *
+ * Pavel Machek says this is not so for various generations of AMD and
+ * old Intel systems. Mike Chan (android.com) claims this is also not
+ * true for ARM.
+ *
+ * Because of this, select a known series of Intel CPUs (Family 6 and
+ * later) by default, and leave all others up to the user.
+ */
+static inline bool od_should_io_be_busy(void)
+{
+ return (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
+ boot_cpu_data.x86_vfm >= INTEL_PENTIUM_PRO);
+}
+#else
+static inline bool od_should_io_be_busy(void) { return false; }
+#endif
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index 35de513af6c9..7f251daf03ce 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -28,22 +28,21 @@ static bool policy_has_boost_freq(struct cpufreq_policy *policy)
return false;
}
-int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
- struct cpufreq_frequency_table *table)
+int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy)
{
- struct cpufreq_frequency_table *pos;
+ struct cpufreq_frequency_table *pos, *table = policy->freq_table;
unsigned int min_freq = ~0;
unsigned int max_freq = 0;
- unsigned int freq;
+ unsigned int freq, i;
- cpufreq_for_each_valid_entry(pos, table) {
+ cpufreq_for_each_valid_entry_idx(pos, table, i) {
freq = pos->frequency;
if ((!cpufreq_boost_enabled() || !policy->boost_enabled)
&& (pos->flags & CPUFREQ_BOOST_FREQ))
continue;
- pr_debug("table entry %u: %u kHz\n", (int)(pos - table), freq);
+ pr_debug("table entry %u: %u kHz\n", i, freq);
if (freq < min_freq)
min_freq = freq;
if (freq > max_freq)
@@ -65,10 +64,9 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
return 0;
}
-int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy,
- struct cpufreq_frequency_table *table)
+int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy)
{
- struct cpufreq_frequency_table *pos;
+ struct cpufreq_frequency_table *pos, *table = policy->freq_table;
unsigned int freq, prev_smaller = 0;
bool found = false;
@@ -110,7 +108,7 @@ int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy)
if (!policy->freq_table)
return -ENODEV;
- return cpufreq_frequency_table_verify(policy, policy->freq_table);
+ return cpufreq_frequency_table_verify(policy);
}
EXPORT_SYMBOL_GPL(cpufreq_generic_frequency_table_verify);
@@ -128,7 +126,7 @@ int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
};
struct cpufreq_frequency_table *pos;
struct cpufreq_frequency_table *table = policy->freq_table;
- unsigned int freq, diff, i = 0;
+ unsigned int freq, diff, i;
int index;
pr_debug("request for target %u kHz (relation: %u) for cpu %u\n",
@@ -354,7 +352,7 @@ int cpufreq_table_validate_and_sort(struct cpufreq_policy *policy)
return 0;
}
- ret = cpufreq_frequency_table_cpuinfo(policy, policy->freq_table);
+ ret = cpufreq_frequency_table_cpuinfo(policy);
if (ret)
return ret;
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index db1c88e9d3f9..e93697d3edfd 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -442,7 +442,7 @@ soc_opp_out:
}
if (of_property_read_u32(np, "clock-latency", &transition_latency))
- transition_latency = CPUFREQ_ETERNAL;
+ transition_latency = CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS;
/*
* Calculate the ramp time for max voltage change in the
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 06a1c7dd081f..38897bb14a2c 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -620,24 +620,9 @@ static int min_perf_pct_min(void)
(cpu->pstate.min_pstate * 100 / turbo_pstate) : 0;
}
-static s16 intel_pstate_get_epb(struct cpudata *cpu_data)
-{
- u64 epb;
- int ret;
-
- if (!boot_cpu_has(X86_FEATURE_EPB))
- return -ENXIO;
-
- ret = rdmsrq_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
- if (ret)
- return (s16)ret;
-
- return (s16)(epb & 0x0f);
-}
-
static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data)
{
- s16 epp;
+ s16 epp = -EOPNOTSUPP;
if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
/*
@@ -651,34 +636,13 @@ static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data)
return epp;
}
epp = (hwp_req_data >> 24) & 0xff;
- } else {
- /* When there is no EPP present, HWP uses EPB settings */
- epp = intel_pstate_get_epb(cpu_data);
}
return epp;
}
-static int intel_pstate_set_epb(int cpu, s16 pref)
-{
- u64 epb;
- int ret;
-
- if (!boot_cpu_has(X86_FEATURE_EPB))
- return -ENXIO;
-
- ret = rdmsrq_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
- if (ret)
- return ret;
-
- epb = (epb & ~0x0f) | pref;
- wrmsrq_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, epb);
-
- return 0;
-}
-
/*
- * EPP/EPB display strings corresponding to EPP index in the
+ * EPP display strings corresponding to EPP index in the
* energy_perf_strings[]
* index String
*-------------------------------------
@@ -782,7 +746,7 @@ static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
u32 raw_epp)
{
int epp = -EINVAL;
- int ret;
+ int ret = -EOPNOTSUPP;
if (!pref_index)
epp = cpu_data->epp_default;
@@ -802,10 +766,6 @@ static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
return -EBUSY;
ret = intel_pstate_set_epp(cpu_data, epp);
- } else {
- if (epp == -EINVAL)
- epp = (pref_index - 1) << 2;
- ret = intel_pstate_set_epb(cpu_data->cpu, epp);
}
return ret;
@@ -937,11 +897,19 @@ static ssize_t show_base_frequency(struct cpufreq_policy *policy, char *buf)
cpufreq_freq_attr_ro(base_frequency);
+enum hwp_cpufreq_attr_index {
+ HWP_BASE_FREQUENCY_INDEX = 0,
+ HWP_PERFORMANCE_PREFERENCE_INDEX,
+ HWP_PERFORMANCE_AVAILABLE_PREFERENCES_INDEX,
+ HWP_CPUFREQ_ATTR_COUNT,
+};
+
static struct freq_attr *hwp_cpufreq_attrs[] = {
- &energy_performance_preference,
- &energy_performance_available_preferences,
- &base_frequency,
- NULL,
+ [HWP_BASE_FREQUENCY_INDEX] = &base_frequency,
+ [HWP_PERFORMANCE_PREFERENCE_INDEX] = &energy_performance_preference,
+ [HWP_PERFORMANCE_AVAILABLE_PREFERENCES_INDEX] =
+ &energy_performance_available_preferences,
+ [HWP_CPUFREQ_ATTR_COUNT] = NULL,
};
static bool no_cas __ro_after_init;
@@ -1034,8 +1002,8 @@ static bool hybrid_register_perf_domain(unsigned int cpu)
if (!cpu_dev)
return false;
- if (em_dev_register_perf_domain(cpu_dev, HYBRID_EM_STATE_COUNT, &cb,
- cpumask_of(cpu), false))
+ if (em_dev_register_pd_no_update(cpu_dev, HYBRID_EM_STATE_COUNT, &cb,
+ cpumask_of(cpu), false))
return false;
cpudata->pd_registered = true;
@@ -1337,9 +1305,8 @@ static void intel_pstate_hwp_set(unsigned int cpu)
if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
value &= ~GENMASK_ULL(31, 24);
value |= (u64)epp << 24;
- } else {
- intel_pstate_set_epb(cpu, epp);
}
+
skip_epp:
WRITE_ONCE(cpu_data->hwp_req_cached, value);
wrmsrq_on_cpu(cpu, MSR_HWP_REQUEST, value);
@@ -1411,6 +1378,9 @@ static void intel_pstate_hwp_offline(struct cpudata *cpu)
#define POWER_CTL_EE_ENABLE 1
#define POWER_CTL_EE_DISABLE 2
+/* Enable bit for Dynamic Efficiency Control (DEC) */
+#define POWER_CTL_DEC_ENABLE 27
+
static int power_ctl_ee_state;
static void set_power_ctl_ee_state(bool input)
@@ -1502,9 +1472,7 @@ static void __intel_pstate_update_max_freq(struct cpufreq_policy *policy,
static bool intel_pstate_update_max_freq(struct cpudata *cpudata)
{
- struct cpufreq_policy *policy __free(put_cpufreq_policy);
-
- policy = cpufreq_cpu_get(cpudata->cpu);
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpudata->cpu);
if (!policy)
return false;
@@ -1695,41 +1663,40 @@ unlock_driver:
return count;
}
-static void update_qos_request(enum freq_qos_req_type type)
+static void update_cpu_qos_request(int cpu, enum freq_qos_req_type type)
{
+ struct cpudata *cpudata = all_cpu_data[cpu];
+ unsigned int freq = cpudata->pstate.turbo_freq;
struct freq_qos_request *req;
- struct cpufreq_policy *policy;
- int i;
-
- for_each_possible_cpu(i) {
- struct cpudata *cpu = all_cpu_data[i];
- unsigned int freq, perf_pct;
- policy = cpufreq_cpu_get(i);
- if (!policy)
- continue;
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
+ if (!policy)
+ return;
- req = policy->driver_data;
- cpufreq_cpu_put(policy);
+ req = policy->driver_data;
+ if (!req)
+ return;
- if (!req)
- continue;
+ if (hwp_active)
+ intel_pstate_get_hwp_cap(cpudata);
- if (hwp_active)
- intel_pstate_get_hwp_cap(cpu);
+ if (type == FREQ_QOS_MIN) {
+ freq = DIV_ROUND_UP(freq * global.min_perf_pct, 100);
+ } else {
+ req++;
+ freq = (freq * global.max_perf_pct) / 100;
+ }
- if (type == FREQ_QOS_MIN) {
- perf_pct = global.min_perf_pct;
- } else {
- req++;
- perf_pct = global.max_perf_pct;
- }
+ if (freq_qos_update_request(req, freq) < 0)
+ pr_warn("Failed to update freq constraint: CPU%d\n", cpu);
+}
- freq = DIV_ROUND_UP(cpu->pstate.turbo_freq * perf_pct, 100);
+static void update_qos_requests(enum freq_qos_req_type type)
+{
+ int i;
- if (freq_qos_update_request(req, freq) < 0)
- pr_warn("Failed to update freq constraint: CPU%d\n", i);
- }
+ for_each_possible_cpu(i)
+ update_cpu_qos_request(i, type);
}
static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b,
@@ -1758,7 +1725,7 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b,
if (intel_pstate_driver == &intel_pstate)
intel_pstate_update_policies();
else
- update_qos_request(FREQ_QOS_MAX);
+ update_qos_requests(FREQ_QOS_MAX);
mutex_unlock(&intel_pstate_driver_lock);
@@ -1792,7 +1759,7 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b,
if (intel_pstate_driver == &intel_pstate)
intel_pstate_update_policies();
else
- update_qos_request(FREQ_QOS_MIN);
+ update_qos_requests(FREQ_QOS_MIN);
mutex_unlock(&intel_pstate_driver_lock);
@@ -2575,7 +2542,7 @@ static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
* that sample.time will always be reset before setting the utilization
* update hook and make the caller skip the sample then.
*/
- if (cpu->last_sample_time) {
+ if (likely(cpu->last_sample_time)) {
intel_pstate_calc_avg_perf(cpu);
return true;
}
@@ -2793,6 +2760,7 @@ static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
X86_MATCH(INTEL_GRANITERAPIDS_X, core_funcs),
X86_MATCH(INTEL_ATOM_CRESTMONT, core_funcs),
X86_MATCH(INTEL_ATOM_CRESTMONT_X, core_funcs),
+ X86_MATCH(INTEL_ATOM_DARKMONT_X, core_funcs),
{}
};
#endif
@@ -3801,6 +3769,26 @@ static const struct x86_cpu_id intel_hybrid_scaling_factor[] = {
{}
};
+static bool hwp_check_epp(void)
+{
+ if (boot_cpu_has(X86_FEATURE_HWP_EPP))
+ return true;
+
+ /* Without EPP support, don't expose EPP-related sysfs attributes. */
+ hwp_cpufreq_attrs[HWP_PERFORMANCE_PREFERENCE_INDEX] = NULL;
+ hwp_cpufreq_attrs[HWP_PERFORMANCE_AVAILABLE_PREFERENCES_INDEX] = NULL;
+
+ return false;
+}
+
+static bool hwp_check_dec(void)
+{
+ u64 power_ctl;
+
+ rdmsrq(MSR_IA32_POWER_CTL, power_ctl);
+ return !!(power_ctl & BIT(POWER_CTL_DEC_ENABLE));
+}
+
static int __init intel_pstate_init(void)
{
static struct cpudata **_all_cpu_data;
@@ -3821,23 +3809,32 @@ static int __init intel_pstate_init(void)
id = x86_match_cpu(hwp_support_ids);
if (id) {
- hwp_forced = intel_pstate_hwp_is_enabled();
+ bool epp_present = hwp_check_epp();
- if (hwp_forced)
+ /*
+ * If HWP is enabled already, there is no choice but to deal
+ * with it.
+ */
+ hwp_forced = intel_pstate_hwp_is_enabled();
+ if (hwp_forced) {
pr_info("HWP enabled by BIOS\n");
- else if (no_load)
+ no_hwp = 0;
+ } else if (no_load) {
return -ENODEV;
+ } else if (!epp_present && !hwp_check_dec()) {
+ /*
+ * Avoid enabling HWP for processors without EPP support
+ * unless the Dynamic Efficiency Control (DEC) enable
+ * bit (MSR_IA32_POWER_CTL, bit 27) is set because that
+ * means incomplete HWP implementation which is a corner
+ * case and supporting it is generally problematic.
+ */
+ no_hwp = 1;
+ }
copy_cpu_funcs(&core_funcs);
- /*
- * Avoid enabling HWP for processors without EPP support,
- * because that means incomplete HWP implementation which is a
- * corner case and supporting it is generally problematic.
- *
- * If HWP is enabled already, though, there is no choice but to
- * deal with it.
- */
- if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) || hwp_forced) {
+
+ if (!no_hwp) {
hwp_active = true;
hwp_mode_bdw = id->driver_data;
intel_pstate.attr = hwp_cpufreq_attrs;
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index ba0e08c8486a..49e76b44468a 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -953,6 +953,9 @@ static void __exit longhaul_exit(void)
struct cpufreq_policy *policy = cpufreq_cpu_get(0);
int i;
+ if (unlikely(!policy))
+ return;
+
for (i = 0; i < numscales; i++) {
if (mults[i] == maxmult) {
struct cpufreq_freqs freqs;
diff --git a/drivers/cpufreq/mediatek-cpufreq-hw.c b/drivers/cpufreq/mediatek-cpufreq-hw.c
index 74f1b4c796e4..ae4500ab4891 100644
--- a/drivers/cpufreq/mediatek-cpufreq-hw.c
+++ b/drivers/cpufreq/mediatek-cpufreq-hw.c
@@ -24,6 +24,8 @@
#define POLL_USEC 1000
#define TIMEOUT_USEC 300000
+#define FDVFS_FDIV_HZ (26 * 1000)
+
enum {
REG_FREQ_LUT_TABLE,
REG_FREQ_ENABLE,
@@ -35,7 +37,14 @@ enum {
REG_ARRAY_SIZE,
};
-struct mtk_cpufreq_data {
+struct mtk_cpufreq_priv {
+ struct device *dev;
+ const struct mtk_cpufreq_variant *variant;
+ void __iomem *fdvfs;
+};
+
+struct mtk_cpufreq_domain {
+ struct mtk_cpufreq_priv *parent;
struct cpufreq_frequency_table *table;
void __iomem *reg_bases[REG_ARRAY_SIZE];
struct resource *res;
@@ -43,20 +52,51 @@ struct mtk_cpufreq_data {
int nr_opp;
};
-static const u16 cpufreq_mtk_offsets[REG_ARRAY_SIZE] = {
- [REG_FREQ_LUT_TABLE] = 0x0,
- [REG_FREQ_ENABLE] = 0x84,
- [REG_FREQ_PERF_STATE] = 0x88,
- [REG_FREQ_HW_STATE] = 0x8c,
- [REG_EM_POWER_TBL] = 0x90,
- [REG_FREQ_LATENCY] = 0x110,
+struct mtk_cpufreq_variant {
+ int (*init)(struct mtk_cpufreq_priv *priv);
+ const u16 reg_offsets[REG_ARRAY_SIZE];
+ const bool is_hybrid_dvfs;
+};
+
+static const struct mtk_cpufreq_variant cpufreq_mtk_base_variant = {
+ .reg_offsets = {
+ [REG_FREQ_LUT_TABLE] = 0x0,
+ [REG_FREQ_ENABLE] = 0x84,
+ [REG_FREQ_PERF_STATE] = 0x88,
+ [REG_FREQ_HW_STATE] = 0x8c,
+ [REG_EM_POWER_TBL] = 0x90,
+ [REG_FREQ_LATENCY] = 0x110,
+ },
+};
+
+static int mtk_cpufreq_hw_mt8196_init(struct mtk_cpufreq_priv *priv)
+{
+ priv->fdvfs = devm_of_iomap(priv->dev, priv->dev->of_node, 0, NULL);
+ if (IS_ERR(priv->fdvfs))
+ return dev_err_probe(priv->dev, PTR_ERR(priv->fdvfs),
+ "failed to get fdvfs iomem\n");
+
+ return 0;
+}
+
+static const struct mtk_cpufreq_variant cpufreq_mtk_mt8196_variant = {
+ .init = mtk_cpufreq_hw_mt8196_init,
+ .reg_offsets = {
+ [REG_FREQ_LUT_TABLE] = 0x0,
+ [REG_FREQ_ENABLE] = 0x84,
+ [REG_FREQ_PERF_STATE] = 0x88,
+ [REG_FREQ_HW_STATE] = 0x8c,
+ [REG_EM_POWER_TBL] = 0x90,
+ [REG_FREQ_LATENCY] = 0x114,
+ },
+ .is_hybrid_dvfs = true,
};
static int __maybe_unused
mtk_cpufreq_get_cpu_power(struct device *cpu_dev, unsigned long *uW,
unsigned long *KHz)
{
- struct mtk_cpufreq_data *data;
+ struct mtk_cpufreq_domain *data;
struct cpufreq_policy *policy;
int i;
@@ -80,19 +120,38 @@ mtk_cpufreq_get_cpu_power(struct device *cpu_dev, unsigned long *uW,
return 0;
}
+static void mtk_cpufreq_hw_fdvfs_switch(unsigned int target_freq,
+ struct cpufreq_policy *policy)
+{
+ struct mtk_cpufreq_domain *data = policy->driver_data;
+ struct mtk_cpufreq_priv *priv = data->parent;
+ unsigned int cpu;
+
+ target_freq = DIV_ROUND_UP(target_freq, FDVFS_FDIV_HZ);
+ for_each_cpu(cpu, policy->real_cpus) {
+ writel_relaxed(target_freq, priv->fdvfs + cpu * 4);
+ }
+}
+
static int mtk_cpufreq_hw_target_index(struct cpufreq_policy *policy,
unsigned int index)
{
- struct mtk_cpufreq_data *data = policy->driver_data;
-
- writel_relaxed(index, data->reg_bases[REG_FREQ_PERF_STATE]);
+ struct mtk_cpufreq_domain *data = policy->driver_data;
+ unsigned int target_freq;
+
+ if (data->parent->fdvfs) {
+ target_freq = policy->freq_table[index].frequency;
+ mtk_cpufreq_hw_fdvfs_switch(target_freq, policy);
+ } else {
+ writel_relaxed(index, data->reg_bases[REG_FREQ_PERF_STATE]);
+ }
return 0;
}
static unsigned int mtk_cpufreq_hw_get(unsigned int cpu)
{
- struct mtk_cpufreq_data *data;
+ struct mtk_cpufreq_domain *data;
struct cpufreq_policy *policy;
unsigned int index;
@@ -111,18 +170,21 @@ static unsigned int mtk_cpufreq_hw_get(unsigned int cpu)
static unsigned int mtk_cpufreq_hw_fast_switch(struct cpufreq_policy *policy,
unsigned int target_freq)
{
- struct mtk_cpufreq_data *data = policy->driver_data;
+ struct mtk_cpufreq_domain *data = policy->driver_data;
unsigned int index;
index = cpufreq_table_find_index_dl(policy, target_freq, false);
- writel_relaxed(index, data->reg_bases[REG_FREQ_PERF_STATE]);
+ if (data->parent->fdvfs)
+ mtk_cpufreq_hw_fdvfs_switch(target_freq, policy);
+ else
+ writel_relaxed(index, data->reg_bases[REG_FREQ_PERF_STATE]);
return policy->freq_table[index].frequency;
}
static int mtk_cpu_create_freq_table(struct platform_device *pdev,
- struct mtk_cpufreq_data *data)
+ struct mtk_cpufreq_domain *data)
{
struct device *dev = &pdev->dev;
u32 temp, i, freq, prev_freq = 0;
@@ -157,9 +219,9 @@ static int mtk_cpu_create_freq_table(struct platform_device *pdev,
static int mtk_cpu_resources_init(struct platform_device *pdev,
struct cpufreq_policy *policy,
- const u16 *offsets)
+ struct mtk_cpufreq_priv *priv)
{
- struct mtk_cpufreq_data *data;
+ struct mtk_cpufreq_domain *data;
struct device *dev = &pdev->dev;
struct resource *res;
struct of_phandle_args args;
@@ -180,6 +242,15 @@ static int mtk_cpu_resources_init(struct platform_device *pdev,
index = args.args[0];
of_node_put(args.np);
+ /*
+ * In a cpufreq with hybrid DVFS, such as the MT8196, the first declared
+ * register range is for FDVFS, followed by the frequency domain MMIOs.
+ */
+ if (priv->variant->is_hybrid_dvfs)
+ index++;
+
+ data->parent = priv;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, index);
if (!res) {
dev_err(dev, "failed to get mem resource %d\n", index);
@@ -202,7 +273,7 @@ static int mtk_cpu_resources_init(struct platform_device *pdev,
data->res = res;
for (i = REG_FREQ_LUT_TABLE; i < REG_ARRAY_SIZE; i++)
- data->reg_bases[i] = base + offsets[i];
+ data->reg_bases[i] = base + priv->variant->reg_offsets[i];
ret = mtk_cpu_create_freq_table(pdev, data);
if (ret) {
@@ -223,7 +294,7 @@ static int mtk_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
{
struct platform_device *pdev = cpufreq_get_driver_data();
int sig, pwr_hw = CPUFREQ_HW_STATUS | SVS_HW_STATUS;
- struct mtk_cpufreq_data *data;
+ struct mtk_cpufreq_domain *data;
unsigned int latency;
int ret;
@@ -238,7 +309,7 @@ static int mtk_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
latency = readl_relaxed(data->reg_bases[REG_FREQ_LATENCY]) * 1000;
if (!latency)
- latency = CPUFREQ_ETERNAL;
+ latency = CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS;
policy->cpuinfo.transition_latency = latency;
policy->fast_switch_possible = true;
@@ -262,7 +333,7 @@ static int mtk_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
static void mtk_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
{
- struct mtk_cpufreq_data *data = policy->driver_data;
+ struct mtk_cpufreq_domain *data = policy->driver_data;
struct resource *res = data->res;
void __iomem *base = data->base;
@@ -275,7 +346,7 @@ static void mtk_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
static void mtk_cpufreq_register_em(struct cpufreq_policy *policy)
{
struct em_data_callback em_cb = EM_DATA_CB(mtk_cpufreq_get_cpu_power);
- struct mtk_cpufreq_data *data = policy->driver_data;
+ struct mtk_cpufreq_domain *data = policy->driver_data;
em_dev_register_perf_domain(get_cpu_device(policy->cpu), data->nr_opp,
&em_cb, policy->cpus, true);
@@ -297,6 +368,7 @@ static struct cpufreq_driver cpufreq_mtk_hw_driver = {
static int mtk_cpufreq_hw_driver_probe(struct platform_device *pdev)
{
+ struct mtk_cpufreq_priv *priv;
const void *data;
int ret, cpu;
struct device *cpu_dev;
@@ -320,7 +392,20 @@ static int mtk_cpufreq_hw_driver_probe(struct platform_device *pdev)
if (!data)
return -EINVAL;
- platform_set_drvdata(pdev, (void *) data);
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->variant = data;
+ priv->dev = &pdev->dev;
+
+ if (priv->variant->init) {
+ ret = priv->variant->init(priv);
+ if (ret)
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, priv);
cpufreq_mtk_hw_driver.driver_data = pdev;
ret = cpufreq_register_driver(&cpufreq_mtk_hw_driver);
@@ -336,7 +421,8 @@ static void mtk_cpufreq_hw_driver_remove(struct platform_device *pdev)
}
static const struct of_device_id mtk_cpufreq_hw_match[] = {
- { .compatible = "mediatek,cpufreq-hw", .data = &cpufreq_mtk_offsets },
+ { .compatible = "mediatek,cpufreq-hw", .data = &cpufreq_mtk_base_variant },
+ { .compatible = "mediatek,mt8196-cpufreq-hw", .data = &cpufreq_mtk_mt8196_variant },
{}
};
MODULE_DEVICE_TABLE(of, mtk_cpufreq_hw_match);
diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c
index f3f02c4b6888..5d50a231f944 100644
--- a/drivers/cpufreq/mediatek-cpufreq.c
+++ b/drivers/cpufreq/mediatek-cpufreq.c
@@ -123,7 +123,7 @@ static int mtk_cpufreq_voltage_tracking(struct mtk_cpu_dvfs_info *info,
soc_data->sram_max_volt);
return ret;
}
- } else if (pre_vproc > new_vproc) {
+ } else {
vproc = max(new_vproc,
pre_vsram - soc_data->max_volt_shift);
ret = regulator_set_voltage(proc_reg, vproc,
@@ -320,7 +320,6 @@ static int mtk_cpufreq_opp_notifier(struct notifier_block *nb,
struct dev_pm_opp *new_opp;
struct mtk_cpu_dvfs_info *info;
unsigned long freq, volt;
- struct cpufreq_policy *policy;
int ret = 0;
info = container_of(nb, struct mtk_cpu_dvfs_info, opp_nb);
@@ -353,12 +352,12 @@ static int mtk_cpufreq_opp_notifier(struct notifier_block *nb,
}
dev_pm_opp_put(new_opp);
- policy = cpufreq_cpu_get(info->opp_cpu);
- if (policy) {
+
+ struct cpufreq_policy *policy __free(put_cpufreq_policy)
+ = cpufreq_cpu_get(info->opp_cpu);
+ if (policy)
cpufreq_driver_target(policy, freq / 1000,
CPUFREQ_RELATION_L);
- cpufreq_cpu_put(policy);
- }
}
}
@@ -404,9 +403,11 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
}
info->cpu_clk = clk_get(cpu_dev, "cpu");
- if (IS_ERR(info->cpu_clk))
- return dev_err_probe(cpu_dev, PTR_ERR(info->cpu_clk),
- "cpu%d: failed to get cpu clk\n", cpu);
+ if (IS_ERR(info->cpu_clk)) {
+ ret = PTR_ERR(info->cpu_clk);
+ dev_err_probe(cpu_dev, ret, "cpu%d: failed to get cpu clk\n", cpu);
+ goto out_put_cci_dev;
+ }
info->inter_clk = clk_get(cpu_dev, "intermediate");
if (IS_ERR(info->inter_clk)) {
@@ -552,6 +553,10 @@ out_free_inter_clock:
out_free_mux_clock:
clk_put(info->cpu_clk);
+out_put_cci_dev:
+ if (info->soc_data->ccifreq_supported)
+ put_device(info->cci_dev);
+
return ret;
}
@@ -569,6 +574,8 @@ static void mtk_cpu_dvfs_info_release(struct mtk_cpu_dvfs_info *info)
clk_put(info->inter_clk);
dev_pm_opp_of_cpumask_remove_table(&info->cpus);
dev_pm_opp_unregister_notifier(info->cpu_dev, &info->opp_nb);
+ if (info->soc_data->ccifreq_supported)
+ put_device(info->cci_dev);
}
static int mtk_cpufreq_init(struct cpufreq_policy *policy)
diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c
index 54f8117103c8..765a5bb81829 100644
--- a/drivers/cpufreq/qcom-cpufreq-nvmem.c
+++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c
@@ -200,6 +200,10 @@ static int qcom_cpufreq_kryo_name_version(struct device *cpu_dev,
case QCOM_ID_IPQ9574:
drv->versions = 1 << (unsigned int)(*speedbin);
break;
+ case QCOM_ID_IPQ5424:
+ case QCOM_ID_IPQ5404:
+ drv->versions = (*speedbin == 0x3b) ? BIT(1) : BIT(0);
+ break;
case QCOM_ID_MSM8996SG:
case QCOM_ID_APQ8096SG:
drv->versions = 1 << ((unsigned int)(*speedbin) + 4);
@@ -591,6 +595,7 @@ static const struct of_device_id qcom_cpufreq_match_list[] __initconst __maybe_u
{ .compatible = "qcom,msm8996", .data = &match_data_kryo },
{ .compatible = "qcom,qcs404", .data = &match_data_qcs404 },
{ .compatible = "qcom,ipq5332", .data = &match_data_kryo },
+ { .compatible = "qcom,ipq5424", .data = &match_data_kryo },
{ .compatible = "qcom,ipq6018", .data = &match_data_ipq6018 },
{ .compatible = "qcom,ipq8064", .data = &match_data_ipq8064 },
{ .compatible = "qcom,ipq8074", .data = &match_data_ipq8074 },
diff --git a/drivers/cpufreq/rcpufreq_dt.rs b/drivers/cpufreq/rcpufreq_dt.rs
index 7e1fbf9a091f..53923b8ef7a1 100644
--- a/drivers/cpufreq/rcpufreq_dt.rs
+++ b/drivers/cpufreq/rcpufreq_dt.rs
@@ -28,15 +28,11 @@ fn find_supply_name_exact(dev: &Device, name: &str) -> Option<CString> {
/// Finds supply name for the CPU from DT.
fn find_supply_names(dev: &Device, cpu: cpu::CpuId) -> Option<KVec<CString>> {
// Try "cpu0" for older DTs, fallback to "cpu".
- let name = (cpu.as_u32() == 0)
+ (cpu.as_u32() == 0)
.then(|| find_supply_name_exact(dev, "cpu0"))
.flatten()
- .or_else(|| find_supply_name_exact(dev, "cpu"))?;
-
- let mut list = KVec::with_capacity(1, GFP_KERNEL).ok()?;
- list.push(name, GFP_KERNEL).ok()?;
-
- Some(list)
+ .or_else(|| find_supply_name_exact(dev, "cpu"))
+ .and_then(|name| kernel::kvec![name].ok())
}
/// Represents the cpufreq dt device.
@@ -123,7 +119,7 @@ impl cpufreq::Driver for CPUFreqDTDriver {
let mut transition_latency = opp_table.max_transition_latency_ns() as u32;
if transition_latency == 0 {
- transition_latency = cpufreq::ETERNAL_LATENCY_NS;
+ transition_latency = cpufreq::DEFAULT_TRANSITION_LATENCY_NS;
}
policy
diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c
index 76c888ed8d16..4215621deb3f 100644
--- a/drivers/cpufreq/s5pv210-cpufreq.c
+++ b/drivers/cpufreq/s5pv210-cpufreq.c
@@ -554,17 +554,15 @@ out_dmc0:
static int s5pv210_cpufreq_reboot_notifier_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(0);
int ret;
- struct cpufreq_policy *policy;
- policy = cpufreq_cpu_get(0);
if (!policy) {
pr_debug("cpufreq: get no policy for cpu0\n");
return NOTIFY_BAD;
}
ret = cpufreq_driver_target(policy, SLEEP_FREQ, 0);
- cpufreq_cpu_put(policy);
if (ret < 0)
return NOTIFY_BAD;
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index ef078426bfd5..d2a110079f5f 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -15,6 +15,7 @@
#include <linux/energy_model.h>
#include <linux/export.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/pm_opp.h>
#include <linux/pm_qos.h>
#include <linux/slab.h>
@@ -293,7 +294,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
latency = perf_ops->transition_latency_get(ph, domain);
if (!latency)
- latency = CPUFREQ_ETERNAL;
+ latency = CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS;
policy->cpuinfo.transition_latency = latency;
@@ -424,6 +425,15 @@ static bool scmi_dev_used_by_cpus(struct device *scmi_dev)
return true;
}
+ /*
+ * Older Broadcom STB chips had a "clocks" property for CPU node(s)
+ * that did not match the SCMI performance protocol node, if we got
+ * there, it means we had such an older Device Tree, therefore return
+ * true to preserve backwards compatibility.
+ */
+ if (of_machine_is_compatible("brcm,brcmstb"))
+ return true;
+
return false;
}
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
index dcbb0ae7dd47..e530345baddf 100644
--- a/drivers/cpufreq/scpi-cpufreq.c
+++ b/drivers/cpufreq/scpi-cpufreq.c
@@ -157,7 +157,7 @@ static int scpi_cpufreq_init(struct cpufreq_policy *policy)
latency = scpi_ops->get_transition_latency(cpu_dev);
if (!latency)
- latency = CPUFREQ_ETERNAL;
+ latency = CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS;
policy->cpuinfo.transition_latency = latency;
diff --git a/drivers/cpufreq/sh-cpufreq.c b/drivers/cpufreq/sh-cpufreq.c
index 9c0b01e00508..642ddb9ea217 100644
--- a/drivers/cpufreq/sh-cpufreq.c
+++ b/drivers/cpufreq/sh-cpufreq.c
@@ -89,11 +89,9 @@ static int sh_cpufreq_target(struct cpufreq_policy *policy,
static int sh_cpufreq_verify(struct cpufreq_policy_data *policy)
{
struct clk *cpuclk = &per_cpu(sh_cpuclk, policy->cpu);
- struct cpufreq_frequency_table *freq_table;
- freq_table = cpuclk->nr_freqs ? cpuclk->freq_table : NULL;
- if (freq_table)
- return cpufreq_frequency_table_verify(policy, freq_table);
+ if (policy->freq_table)
+ return cpufreq_frequency_table_verify(policy);
cpufreq_verify_within_cpu_limits(policy);
diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c
index 707c71090cc3..2a1550e1aa21 100644
--- a/drivers/cpufreq/spear-cpufreq.c
+++ b/drivers/cpufreq/spear-cpufreq.c
@@ -182,7 +182,7 @@ static int spear_cpufreq_probe(struct platform_device *pdev)
if (of_property_read_u32(np, "clock-latency",
&spear_cpufreq.transition_latency))
- spear_cpufreq.transition_latency = CPUFREQ_ETERNAL;
+ spear_cpufreq.transition_latency = CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS;
cnt = of_property_count_u32_elems(np, "cpufreq_tbl");
if (cnt <= 0) {
diff --git a/drivers/cpufreq/speedstep-lib.c b/drivers/cpufreq/speedstep-lib.c
index 0b66df4ed513..f8b42e981635 100644
--- a/drivers/cpufreq/speedstep-lib.c
+++ b/drivers/cpufreq/speedstep-lib.c
@@ -378,16 +378,16 @@ EXPORT_SYMBOL_GPL(speedstep_detect_processor);
* DETECT SPEEDSTEP SPEEDS *
*********************************************************************/
-unsigned int speedstep_get_freqs(enum speedstep_processor processor,
- unsigned int *low_speed,
- unsigned int *high_speed,
- unsigned int *transition_latency,
- void (*set_state) (unsigned int state))
+int speedstep_get_freqs(enum speedstep_processor processor,
+ unsigned int *low_speed,
+ unsigned int *high_speed,
+ unsigned int *transition_latency,
+ void (*set_state)(unsigned int state))
{
unsigned int prev_speed;
- unsigned int ret = 0;
unsigned long flags;
ktime_t tv1, tv2;
+ int ret = 0;
if ((!processor) || (!low_speed) || (!high_speed) || (!set_state))
return -EINVAL;
diff --git a/drivers/cpufreq/speedstep-lib.h b/drivers/cpufreq/speedstep-lib.h
index dc762ea786be..48329647d4c4 100644
--- a/drivers/cpufreq/speedstep-lib.h
+++ b/drivers/cpufreq/speedstep-lib.h
@@ -41,8 +41,8 @@ extern unsigned int speedstep_get_frequency(enum speedstep_processor processor);
* SPEEDSTEP_LOW; the second argument is zero so that no
* cpufreq_notify_transition calls are initiated.
*/
-extern unsigned int speedstep_get_freqs(enum speedstep_processor processor,
- unsigned int *low_speed,
- unsigned int *high_speed,
- unsigned int *transition_latency,
- void (*set_state) (unsigned int state));
+extern int speedstep_get_freqs(enum speedstep_processor processor,
+ unsigned int *low_speed,
+ unsigned int *high_speed,
+ unsigned int *transition_latency,
+ void (*set_state)(unsigned int state));
diff --git a/drivers/cpufreq/tegra186-cpufreq.c b/drivers/cpufreq/tegra186-cpufreq.c
index cbabb726c664..136ab102f636 100644
--- a/drivers/cpufreq/tegra186-cpufreq.c
+++ b/drivers/cpufreq/tegra186-cpufreq.c
@@ -93,23 +93,26 @@ static int tegra186_cpufreq_set_target(struct cpufreq_policy *policy,
{
struct tegra186_cpufreq_data *data = cpufreq_get_driver_data();
struct cpufreq_frequency_table *tbl = policy->freq_table + index;
- unsigned int edvd_offset = data->cpus[policy->cpu].edvd_offset;
+ unsigned int edvd_offset;
u32 edvd_val = tbl->driver_data;
+ u32 cpu;
- writel(edvd_val, data->regs + edvd_offset);
+ for_each_cpu(cpu, policy->cpus) {
+ edvd_offset = data->cpus[cpu].edvd_offset;
+ writel(edvd_val, data->regs + edvd_offset);
+ }
return 0;
}
static unsigned int tegra186_cpufreq_get(unsigned int cpu)
{
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
struct tegra186_cpufreq_data *data = cpufreq_get_driver_data();
struct tegra186_cpufreq_cluster *cluster;
- struct cpufreq_policy *policy;
unsigned int edvd_offset, cluster_id;
u32 ndiv;
- policy = cpufreq_cpu_get(cpu);
if (!policy)
return 0;
@@ -117,7 +120,6 @@ static unsigned int tegra186_cpufreq_get(unsigned int cpu)
ndiv = readl(data->regs + edvd_offset) & EDVD_CORE_VOLT_FREQ_F_MASK;
cluster_id = data->cpus[policy->cpu].bpmp_cluster_id;
cluster = &data->clusters[cluster_id];
- cpufreq_cpu_put(policy);
return (cluster->ref_clk_khz * ndiv) / cluster->div;
}
@@ -134,13 +136,14 @@ static struct cpufreq_driver tegra186_cpufreq_driver = {
static struct cpufreq_frequency_table *init_vhint_table(
struct platform_device *pdev, struct tegra_bpmp *bpmp,
- struct tegra186_cpufreq_cluster *cluster, unsigned int cluster_id)
+ struct tegra186_cpufreq_cluster *cluster, unsigned int cluster_id,
+ int *num_rates)
{
struct cpufreq_frequency_table *table;
struct mrq_cpu_vhint_request req;
struct tegra_bpmp_message msg;
struct cpu_vhint_data *data;
- int err, i, j, num_rates = 0;
+ int err, i, j;
dma_addr_t phys;
void *virt;
@@ -170,6 +173,7 @@ static struct cpufreq_frequency_table *init_vhint_table(
goto free;
}
+ *num_rates = 0;
for (i = data->vfloor; i <= data->vceil; i++) {
u16 ndiv = data->ndiv[i];
@@ -180,10 +184,10 @@ static struct cpufreq_frequency_table *init_vhint_table(
if (i > 0 && ndiv == data->ndiv[i - 1])
continue;
- num_rates++;
+ (*num_rates)++;
}
- table = devm_kcalloc(&pdev->dev, num_rates + 1, sizeof(*table),
+ table = devm_kcalloc(&pdev->dev, *num_rates + 1, sizeof(*table),
GFP_KERNEL);
if (!table) {
table = ERR_PTR(-ENOMEM);
@@ -225,7 +229,9 @@ static int tegra186_cpufreq_probe(struct platform_device *pdev)
{
struct tegra186_cpufreq_data *data;
struct tegra_bpmp *bpmp;
- unsigned int i = 0, err;
+ unsigned int i = 0, err, edvd_offset;
+ int num_rates = 0;
+ u32 edvd_val, cpu;
data = devm_kzalloc(&pdev->dev,
struct_size(data, clusters, TEGRA186_NUM_CLUSTERS),
@@ -248,10 +254,21 @@ static int tegra186_cpufreq_probe(struct platform_device *pdev)
for (i = 0; i < TEGRA186_NUM_CLUSTERS; i++) {
struct tegra186_cpufreq_cluster *cluster = &data->clusters[i];
- cluster->table = init_vhint_table(pdev, bpmp, cluster, i);
+ cluster->table = init_vhint_table(pdev, bpmp, cluster, i, &num_rates);
if (IS_ERR(cluster->table)) {
err = PTR_ERR(cluster->table);
goto put_bpmp;
+ } else if (!num_rates) {
+ err = -EINVAL;
+ goto put_bpmp;
+ }
+
+ for (cpu = 0; cpu < ARRAY_SIZE(tegra186_cpus); cpu++) {
+ if (data->cpus[cpu].bpmp_cluster_id == i) {
+ edvd_val = cluster->table[num_rates - 1].driver_data;
+ edvd_offset = data->cpus[cpu].edvd_offset;
+ writel(edvd_val, data->regs + edvd_offset);
+ }
}
}
diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
index 5a5147277cd0..6ee76f5fe9c5 100644
--- a/drivers/cpufreq/ti-cpufreq.c
+++ b/drivers/cpufreq/ti-cpufreq.c
@@ -72,7 +72,9 @@ enum {
#define AM62P5_EFUSE_O_MPU_OPP 15
#define AM62P5_EFUSE_S_MPU_OPP 19
+#define AM62P5_EFUSE_T_MPU_OPP 20
#define AM62P5_EFUSE_U_MPU_OPP 21
+#define AM62P5_EFUSE_V_MPU_OPP 22
#define AM62P5_SUPPORT_O_MPU_OPP BIT(0)
#define AM62P5_SUPPORT_U_MPU_OPP BIT(2)
@@ -153,7 +155,9 @@ static unsigned long am62p5_efuse_xlate(struct ti_cpufreq_data *opp_data,
unsigned long calculated_efuse = AM62P5_SUPPORT_O_MPU_OPP;
switch (efuse) {
+ case AM62P5_EFUSE_V_MPU_OPP:
case AM62P5_EFUSE_U_MPU_OPP:
+ case AM62P5_EFUSE_T_MPU_OPP:
case AM62P5_EFUSE_S_MPU_OPP:
calculated_efuse |= AM62P5_SUPPORT_U_MPU_OPP;
fallthrough;
@@ -307,9 +311,10 @@ static struct ti_cpufreq_soc_data am3517_soc_data = {
};
static const struct soc_device_attribute k3_cpufreq_soc[] = {
- { .family = "AM62X", .revision = "SR1.0" },
- { .family = "AM62AX", .revision = "SR1.0" },
- { .family = "AM62PX", .revision = "SR1.0" },
+ { .family = "AM62X", },
+ { .family = "AM62AX", },
+ { .family = "AM62PX", },
+ { .family = "AM62DX", },
{ /* sentinel */ }
};
@@ -457,6 +462,7 @@ static const struct of_device_id ti_cpufreq_of_match[] __maybe_unused = {
{ .compatible = "ti,omap36xx", .data = &omap36xx_soc_data, },
{ .compatible = "ti,am625", .data = &am625_soc_data, },
{ .compatible = "ti,am62a7", .data = &am62a7_soc_data, },
+ { .compatible = "ti,am62d2", .data = &am62a7_soc_data, },
{ .compatible = "ti,am62p5", .data = &am62p5_soc_data, },
/* legacy */
{ .compatible = "ti,omap3430", .data = &omap34xx_soc_data, },
diff --git a/drivers/cpufreq/virtual-cpufreq.c b/drivers/cpufreq/virtual-cpufreq.c
index 7dd1b0c263c7..6ffa16d239b2 100644
--- a/drivers/cpufreq/virtual-cpufreq.c
+++ b/drivers/cpufreq/virtual-cpufreq.c
@@ -250,7 +250,7 @@ static int virt_cpufreq_offline(struct cpufreq_policy *policy)
static int virt_cpufreq_verify_policy(struct cpufreq_policy_data *policy)
{
if (policy->freq_table)
- return cpufreq_frequency_table_verify(policy, policy->freq_table);
+ return cpufreq_frequency_table_verify(policy);
cpufreq_verify_within_cpu_limits(policy);
return 0;
diff --git a/drivers/cpuidle/cpuidle-qcom-spm.c b/drivers/cpuidle/cpuidle-qcom-spm.c
index 5f386761b156..7ab6f68b96a8 100644
--- a/drivers/cpuidle/cpuidle-qcom-spm.c
+++ b/drivers/cpuidle/cpuidle-qcom-spm.c
@@ -86,9 +86,9 @@ static const struct of_device_id qcom_idle_state_match[] = {
static int spm_cpuidle_register(struct device *cpuidle_dev, int cpu)
{
- struct platform_device *pdev = NULL;
+ struct platform_device *pdev;
struct device_node *cpu_node, *saw_node;
- struct cpuidle_qcom_spm_data *data = NULL;
+ struct cpuidle_qcom_spm_data *data;
int ret;
cpu_node = of_cpu_device_node_get(cpu);
@@ -96,20 +96,23 @@ static int spm_cpuidle_register(struct device *cpuidle_dev, int cpu)
return -ENODEV;
saw_node = of_parse_phandle(cpu_node, "qcom,saw", 0);
+ of_node_put(cpu_node);
if (!saw_node)
return -ENODEV;
pdev = of_find_device_by_node(saw_node);
of_node_put(saw_node);
- of_node_put(cpu_node);
if (!pdev)
return -ENODEV;
data = devm_kzalloc(cpuidle_dev, sizeof(*data), GFP_KERNEL);
- if (!data)
+ if (!data) {
+ put_device(&pdev->dev);
return -ENOMEM;
+ }
data->spm = dev_get_drvdata(&pdev->dev);
+ put_device(&pdev->dev);
if (!data->spm)
return -EINVAL;
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 0835da449db8..56132e843c99 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -635,8 +635,14 @@ static void __cpuidle_device_init(struct cpuidle_device *dev)
static int __cpuidle_register_device(struct cpuidle_device *dev)
{
struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
+ unsigned int cpu = dev->cpu;
int i, ret;
+ if (per_cpu(cpuidle_devices, cpu)) {
+ pr_info("CPU%d: cpuidle device already registered\n", cpu);
+ return -EEXIST;
+ }
+
if (!try_module_get(drv->owner))
return -EINVAL;
@@ -648,7 +654,7 @@ static int __cpuidle_register_device(struct cpuidle_device *dev)
dev->states_usage[i].disable |= CPUIDLE_STATE_DISABLED_BY_USER;
}
- per_cpu(cpuidle_devices, dev->cpu) = dev;
+ per_cpu(cpuidle_devices, cpu) = dev;
list_add(&dev->device_list, &cpuidle_detected_devices);
ret = cpuidle_coupled_register_device(dev);
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 52d5d26fc7c6..4d9aa5ce31f0 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -97,6 +97,14 @@ static inline int which_bucket(u64 duration_ns)
static DEFINE_PER_CPU(struct menu_device, menu_devices);
+static void menu_update_intervals(struct menu_device *data, unsigned int interval_us)
+{
+ /* Update the repeating-pattern data. */
+ data->intervals[data->interval_ptr++] = interval_us;
+ if (data->interval_ptr >= INTERVALS)
+ data->interval_ptr = 0;
+}
+
static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev);
/*
@@ -222,6 +230,14 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
if (data->needs_update) {
menu_update(drv, dev);
data->needs_update = 0;
+ } else if (!dev->last_residency_ns) {
+ /*
+ * This happens when the driver rejects the previously selected
+ * idle state and returns an error, so update the recent
+ * intervals table to prevent invalid information from being
+ * used going forward.
+ */
+ menu_update_intervals(data, UINT_MAX);
}
/* Find the shortest expected idle interval. */
@@ -271,20 +287,15 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
return 0;
}
- if (tick_nohz_tick_stopped()) {
- /*
- * If the tick is already stopped, the cost of possible short
- * idle duration misprediction is much higher, because the CPU
- * may be stuck in a shallow idle state for a long time as a
- * result of it. In that case say we might mispredict and use
- * the known time till the closest timer event for the idle
- * state selection.
- */
- if (predicted_ns < TICK_NSEC)
- predicted_ns = data->next_timer_ns;
- } else if (latency_req > predicted_ns) {
- latency_req = predicted_ns;
- }
+ /*
+ * If the tick is already stopped, the cost of possible short idle
+ * duration misprediction is much higher, because the CPU may be stuck
+ * in a shallow idle state for a long time as a result of it. In that
+ * case, say we might mispredict and use the known time till the closest
+ * timer event for the idle state selection.
+ */
+ if (tick_nohz_tick_stopped() && predicted_ns < TICK_NSEC)
+ predicted_ns = data->next_timer_ns;
/*
* Find the idle state with the lowest power while satisfying
@@ -300,48 +311,50 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
if (idx == -1)
idx = i; /* first enabled state */
- if (s->target_residency_ns > predicted_ns) {
- /*
- * Use a physical idle state, not busy polling, unless
- * a timer is going to trigger soon enough.
- */
- if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) &&
- s->exit_latency_ns <= latency_req &&
- s->target_residency_ns <= data->next_timer_ns) {
- predicted_ns = s->target_residency_ns;
- idx = i;
- break;
- }
- if (predicted_ns < TICK_NSEC)
- break;
-
- if (!tick_nohz_tick_stopped()) {
- /*
- * If the state selected so far is shallow,
- * waking up early won't hurt, so retain the
- * tick in that case and let the governor run
- * again in the next iteration of the loop.
- */
- predicted_ns = drv->states[idx].target_residency_ns;
- break;
- }
+ if (s->exit_latency_ns > latency_req)
+ break;
- /*
- * If the state selected so far is shallow and this
- * state's target residency matches the time till the
- * closest timer event, select this one to avoid getting
- * stuck in the shallow one for too long.
- */
- if (drv->states[idx].target_residency_ns < TICK_NSEC &&
- s->target_residency_ns <= delta_tick)
- idx = i;
+ if (s->target_residency_ns <= predicted_ns) {
+ idx = i;
+ continue;
+ }
- return idx;
+ /*
+ * Use a physical idle state, not busy polling, unless a timer
+ * is going to trigger soon enough.
+ */
+ if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) &&
+ s->target_residency_ns <= data->next_timer_ns) {
+ predicted_ns = s->target_residency_ns;
+ idx = i;
+ break;
}
- if (s->exit_latency_ns > latency_req)
+
+ if (predicted_ns < TICK_NSEC)
break;
- idx = i;
+ if (!tick_nohz_tick_stopped()) {
+ /*
+ * If the state selected so far is shallow, waking up
+ * early won't hurt, so retain the tick in that case and
+ * let the governor run again in the next iteration of
+ * the idle loop.
+ */
+ predicted_ns = drv->states[idx].target_residency_ns;
+ break;
+ }
+
+ /*
+ * If the state selected so far is shallow and this state's
+ * target residency matches the time till the closest timer
+ * event, select this one to avoid getting stuck in the shallow
+ * one for too long.
+ */
+ if (drv->states[idx].target_residency_ns < TICK_NSEC &&
+ s->target_residency_ns <= delta_tick)
+ idx = i;
+
+ return idx;
}
if (idx == -1)
@@ -482,10 +495,7 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
data->correction_factor[data->bucket] = new_factor;
- /* update the repeating-pattern data */
- data->intervals[data->interval_ptr++] = ktime_to_us(measured_ns);
- if (data->interval_ptr >= INTERVALS)
- data->interval_ptr = 0;
+ menu_update_intervals(data, ktime_to_us(measured_ns));
}
/**
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index d6f5da61cb7d..61de64817604 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -27,14 +27,14 @@ static ssize_t show_available_governors(struct device *dev,
mutex_lock(&cpuidle_lock);
list_for_each_entry(tmp, &cpuidle_governors, governor_list) {
- if (i >= (ssize_t) (PAGE_SIZE - (CPUIDLE_NAME_LEN + 2)))
+ if (i >= (ssize_t)(PAGE_SIZE - (CPUIDLE_NAME_LEN + 2)))
goto out;
- i += scnprintf(&buf[i], CPUIDLE_NAME_LEN + 1, "%s ", tmp->name);
+ i += sysfs_emit_at(buf, i, "%.*s ", CPUIDLE_NAME_LEN, tmp->name);
}
out:
- i+= sprintf(&buf[i], "\n");
+ i += sysfs_emit_at(buf, i, "\n");
mutex_unlock(&cpuidle_lock);
return i;
}
@@ -49,9 +49,9 @@ static ssize_t show_current_driver(struct device *dev,
spin_lock(&cpuidle_driver_lock);
drv = cpuidle_get_driver();
if (drv)
- ret = sprintf(buf, "%s\n", drv->name);
+ ret = sysfs_emit(buf, "%s\n", drv->name);
else
- ret = sprintf(buf, "none\n");
+ ret = sysfs_emit(buf, "none\n");
spin_unlock(&cpuidle_driver_lock);
return ret;
@@ -65,9 +65,9 @@ static ssize_t show_current_governor(struct device *dev,
mutex_lock(&cpuidle_lock);
if (cpuidle_curr_governor)
- ret = sprintf(buf, "%s\n", cpuidle_curr_governor->name);
+ ret = sysfs_emit(buf, "%s\n", cpuidle_curr_governor->name);
else
- ret = sprintf(buf, "none\n");
+ ret = sysfs_emit(buf, "none\n");
mutex_unlock(&cpuidle_lock);
return ret;
@@ -230,7 +230,7 @@ static struct cpuidle_state_attr attr_##_name = __ATTR(_name, 0644, show, store)
static ssize_t show_state_##_name(struct cpuidle_state *state, \
struct cpuidle_state_usage *state_usage, char *buf) \
{ \
- return sprintf(buf, "%u\n", state->_name);\
+ return sysfs_emit(buf, "%u\n", state->_name);\
}
#define define_show_state_ull_function(_name) \
@@ -238,7 +238,7 @@ static ssize_t show_state_##_name(struct cpuidle_state *state, \
struct cpuidle_state_usage *state_usage, \
char *buf) \
{ \
- return sprintf(buf, "%llu\n", state_usage->_name);\
+ return sysfs_emit(buf, "%llu\n", state_usage->_name);\
}
#define define_show_state_str_function(_name) \
@@ -247,8 +247,8 @@ static ssize_t show_state_##_name(struct cpuidle_state *state, \
char *buf) \
{ \
if (state->_name[0] == '\0')\
- return sprintf(buf, "<null>\n");\
- return sprintf(buf, "%s\n", state->_name);\
+ return sysfs_emit(buf, "<null>\n");\
+ return sysfs_emit(buf, "%s\n", state->_name);\
}
#define define_show_state_time_function(_name) \
@@ -256,7 +256,7 @@ static ssize_t show_state_##_name(struct cpuidle_state *state, \
struct cpuidle_state_usage *state_usage, \
char *buf) \
{ \
- return sprintf(buf, "%llu\n", ktime_to_us(state->_name##_ns)); \
+ return sysfs_emit(buf, "%llu\n", ktime_to_us(state->_name##_ns)); \
}
define_show_state_time_function(exit_latency)
@@ -273,14 +273,14 @@ static ssize_t show_state_time(struct cpuidle_state *state,
struct cpuidle_state_usage *state_usage,
char *buf)
{
- return sprintf(buf, "%llu\n", ktime_to_us(state_usage->time_ns));
+ return sysfs_emit(buf, "%llu\n", ktime_to_us(state_usage->time_ns));
}
static ssize_t show_state_disable(struct cpuidle_state *state,
struct cpuidle_state_usage *state_usage,
char *buf)
{
- return sprintf(buf, "%llu\n",
+ return sysfs_emit(buf, "%llu\n",
state_usage->disable & CPUIDLE_STATE_DISABLED_BY_USER);
}
@@ -310,7 +310,7 @@ static ssize_t show_state_default_status(struct cpuidle_state *state,
struct cpuidle_state_usage *state_usage,
char *buf)
{
- return sprintf(buf, "%s\n",
+ return sysfs_emit(buf, "%s\n",
state->flags & CPUIDLE_FLAG_OFF ? "disabled" : "enabled");
}
@@ -358,7 +358,7 @@ static ssize_t show_state_s2idle_##_name(struct cpuidle_state *state, \
struct cpuidle_state_usage *state_usage, \
char *buf) \
{ \
- return sprintf(buf, "%llu\n", state_usage->s2idle_##_name);\
+ return sysfs_emit(buf, "%llu\n", state_usage->s2idle_##_name);\
}
define_show_state_s2idle_ull_function(usage);
@@ -550,7 +550,7 @@ static ssize_t show_driver_name(struct cpuidle_driver *drv, char *buf)
ssize_t ret;
spin_lock(&cpuidle_driver_lock);
- ret = sprintf(buf, "%s\n", drv ? drv->name : "none");
+ ret = sysfs_emit(buf, "%s\n", drv ? drv->name : "none");
spin_unlock(&cpuidle_driver_lock);
return ret;
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 04b4c43b6bae..a6688d54984c 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -439,7 +439,7 @@ config CRYPTO_DEV_ATMEL_AUTHENC
config CRYPTO_DEV_ATMEL_AES
tristate "Support for Atmel AES hw accelerator"
- depends on ARCH_AT91 || COMPILE_TEST
+ depends on ARCH_MICROCHIP || COMPILE_TEST
select CRYPTO_AES
select CRYPTO_AEAD
select CRYPTO_SKCIPHER
@@ -725,6 +725,18 @@ config CRYPTO_DEV_TEGRA
Select this to enable Tegra Security Engine which accelerates various
AES encryption/decryption and HASH algorithms.
+config CRYPTO_DEV_XILINX_TRNG
+ tristate "Support for Xilinx True Random Generator"
+ depends on ZYNQMP_FIRMWARE || COMPILE_TEST
+ select CRYPTO_RNG
+ select HW_RANDOM
+ help
+ Xilinx Versal SoC driver provides kernel-side support for True Random Number
+ Generator and Pseudo random Number in CTR_DRBG mode as defined in NIST SP800-90A.
+
+ To compile this driver as a module, choose M here: the module
+ will be called xilinx-trng.
+
config CRYPTO_DEV_ZYNQMP_AES
tristate "Support for Xilinx ZynqMP AES hw accelerator"
depends on ZYNQMP_FIRMWARE || COMPILE_TEST
@@ -840,6 +852,7 @@ config CRYPTO_DEV_CCREE
If unsure say Y.
source "drivers/crypto/hisilicon/Kconfig"
+source "drivers/crypto/loongson/Kconfig"
source "drivers/crypto/amlogic/Kconfig"
@@ -863,5 +876,6 @@ config CRYPTO_DEV_SA2UL
source "drivers/crypto/aspeed/Kconfig"
source "drivers/crypto/starfive/Kconfig"
source "drivers/crypto/inside-secure/eip93/Kconfig"
+source "drivers/crypto/ti/Kconfig"
endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 22eadcc8f4a2..322ae8854e3e 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -44,7 +44,9 @@ obj-y += inside-secure/
obj-$(CONFIG_CRYPTO_DEV_ARTPEC6) += axis/
obj-y += xilinx/
obj-y += hisilicon/
+obj-y += loongson/
obj-$(CONFIG_CRYPTO_DEV_AMLOGIC_GXL) += amlogic/
obj-y += intel/
obj-y += starfive/
obj-y += cavium/
+obj-y += ti/
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
index 5663df49dd81..021614b65e39 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
@@ -111,7 +111,7 @@ static int sun8i_ce_cipher_fallback(struct skcipher_request *areq)
if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) {
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
- struct sun8i_ce_alg_template *algt __maybe_unused;
+ struct sun8i_ce_alg_template *algt;
algt = container_of(alg, struct sun8i_ce_alg_template,
alg.skcipher.base);
@@ -131,21 +131,19 @@ static int sun8i_ce_cipher_fallback(struct skcipher_request *areq)
return err;
}
-static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req)
+static int sun8i_ce_cipher_prepare(struct skcipher_request *areq,
+ struct ce_task *cet)
{
- struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
struct sun8i_ce_dev *ce = op->ce;
struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
struct sun8i_ce_alg_template *algt;
- struct sun8i_ce_flow *chan;
- struct ce_task *cet;
struct scatterlist *sg;
unsigned int todo, len, offset, ivsize;
u32 common, sym;
- int flow, i;
+ int i;
int nr_sgs = 0;
int nr_sgd = 0;
int err = 0;
@@ -163,14 +161,9 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req
if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
algt->stat_req++;
- flow = rctx->flow;
-
- chan = &ce->chanlist[flow];
-
- cet = chan->tl;
memset(cet, 0, sizeof(struct ce_task));
- cet->t_id = cpu_to_le32(flow);
+ cet->t_id = cpu_to_le32(rctx->flow);
common = ce->variant->alg_cipher[algt->ce_algo_id];
common |= rctx->op_dir | CE_COMM_INT;
cet->t_common_ctl = cpu_to_le32(common);
@@ -209,11 +202,11 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req
if (areq->iv && ivsize > 0) {
if (rctx->op_dir & CE_DECRYPTION) {
offset = areq->cryptlen - ivsize;
- scatterwalk_map_and_copy(chan->backup_iv, areq->src,
+ scatterwalk_map_and_copy(rctx->backup_iv, areq->src,
offset, ivsize, 0);
}
- memcpy(chan->bounce_iv, areq->iv, ivsize);
- rctx->addr_iv = dma_map_single(ce->dev, chan->bounce_iv, ivsize,
+ memcpy(rctx->bounce_iv, areq->iv, ivsize);
+ rctx->addr_iv = dma_map_single(ce->dev, rctx->bounce_iv, ivsize,
DMA_TO_DEVICE);
if (dma_mapping_error(ce->dev, rctx->addr_iv)) {
dev_err(ce->dev, "Cannot DMA MAP IV\n");
@@ -276,7 +269,6 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req
goto theend_sgs;
}
- chan->timeout = areq->cryptlen;
rctx->nr_sgs = ns;
rctx->nr_sgd = nd;
return 0;
@@ -300,13 +292,13 @@ theend_iv:
offset = areq->cryptlen - ivsize;
if (rctx->op_dir & CE_DECRYPTION) {
- memcpy(areq->iv, chan->backup_iv, ivsize);
- memzero_explicit(chan->backup_iv, ivsize);
+ memcpy(areq->iv, rctx->backup_iv, ivsize);
+ memzero_explicit(rctx->backup_iv, ivsize);
} else {
scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
ivsize, 0);
}
- memzero_explicit(chan->bounce_iv, ivsize);
+ memzero_explicit(rctx->bounce_iv, ivsize);
}
dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE);
@@ -315,24 +307,17 @@ theend:
return err;
}
-static void sun8i_ce_cipher_unprepare(struct crypto_engine *engine,
- void *async_req)
+static void sun8i_ce_cipher_unprepare(struct skcipher_request *areq,
+ struct ce_task *cet)
{
- struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
struct sun8i_ce_dev *ce = op->ce;
struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
- struct sun8i_ce_flow *chan;
- struct ce_task *cet;
unsigned int ivsize, offset;
int nr_sgs = rctx->nr_sgs;
int nr_sgd = rctx->nr_sgd;
- int flow;
- flow = rctx->flow;
- chan = &ce->chanlist[flow];
- cet = chan->tl;
ivsize = crypto_skcipher_ivsize(tfm);
if (areq->src == areq->dst) {
@@ -349,43 +334,43 @@ static void sun8i_ce_cipher_unprepare(struct crypto_engine *engine,
DMA_TO_DEVICE);
offset = areq->cryptlen - ivsize;
if (rctx->op_dir & CE_DECRYPTION) {
- memcpy(areq->iv, chan->backup_iv, ivsize);
- memzero_explicit(chan->backup_iv, ivsize);
+ memcpy(areq->iv, rctx->backup_iv, ivsize);
+ memzero_explicit(rctx->backup_iv, ivsize);
} else {
scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
ivsize, 0);
}
- memzero_explicit(chan->bounce_iv, ivsize);
+ memzero_explicit(rctx->bounce_iv, ivsize);
}
dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE);
}
-static void sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq)
-{
- struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(breq);
- struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
- struct sun8i_ce_dev *ce = op->ce;
- struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(breq);
- int flow, err;
-
- flow = rctx->flow;
- err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(breq->base.tfm));
- sun8i_ce_cipher_unprepare(engine, areq);
- local_bh_disable();
- crypto_finalize_skcipher_request(engine, breq, err);
- local_bh_enable();
-}
-
int sun8i_ce_cipher_do_one(struct crypto_engine *engine, void *areq)
{
- int err = sun8i_ce_cipher_prepare(engine, areq);
+ struct skcipher_request *req = skcipher_request_cast(areq);
+ struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct sun8i_cipher_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct sun8i_ce_dev *ce = ctx->ce;
+ struct sun8i_ce_flow *chan;
+ int err;
+
+ chan = &ce->chanlist[rctx->flow];
+ err = sun8i_ce_cipher_prepare(req, chan->tl);
if (err)
return err;
- sun8i_ce_cipher_run(engine, areq);
+ err = sun8i_ce_run_task(ce, rctx->flow,
+ crypto_tfm_alg_name(req->base.tfm));
+
+ sun8i_ce_cipher_unprepare(req, chan->tl);
+
+ local_bh_disable();
+ crypto_finalize_skcipher_request(engine, req, err);
+ local_bh_enable();
+
return 0;
}
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
index 658f520cee0c..c16bb6ce6ee3 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
@@ -169,6 +169,12 @@ static const struct ce_variant ce_r40_variant = {
.trng = CE_ID_NOTSUPP,
};
+static void sun8i_ce_dump_task_descriptors(struct sun8i_ce_flow *chan)
+{
+ print_hex_dump(KERN_INFO, "TASK: ", DUMP_PREFIX_NONE, 16, 4,
+ chan->tl, sizeof(struct ce_task), false);
+}
+
/*
* sun8i_ce_get_engine_number() get the next channel slot
* This is a simple round-robin way of getting the next channel
@@ -183,7 +189,6 @@ int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name)
{
u32 v;
int err = 0;
- struct ce_task *cet = ce->chanlist[flow].tl;
#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
ce->chanlist[flow].stat_req++;
@@ -210,11 +215,10 @@ int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name)
mutex_unlock(&ce->mlock);
wait_for_completion_interruptible_timeout(&ce->chanlist[flow].complete,
- msecs_to_jiffies(ce->chanlist[flow].timeout));
+ msecs_to_jiffies(CE_DMA_TIMEOUT_MS));
if (ce->chanlist[flow].status == 0) {
- dev_err(ce->dev, "DMA timeout for %s (tm=%d) on flow %d\n", name,
- ce->chanlist[flow].timeout, flow);
+ dev_err(ce->dev, "DMA timeout for %s on flow %d\n", name, flow);
err = -EFAULT;
}
/* No need to lock for this read, the channel is locked so
@@ -226,9 +230,8 @@ int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name)
/* Sadly, the error bit is not per flow */
if (v) {
dev_err(ce->dev, "CE ERROR: %x for flow %x\n", v, flow);
+ sun8i_ce_dump_task_descriptors(&ce->chanlist[flow]);
err = -EFAULT;
- print_hex_dump(KERN_INFO, "TASK: ", DUMP_PREFIX_NONE, 16, 4,
- cet, sizeof(struct ce_task), false);
}
if (v & CE_ERR_ALGO_NOTSUP)
dev_err(ce->dev, "CE ERROR: algorithm not supported\n");
@@ -245,9 +248,8 @@ int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name)
v &= 0xF;
if (v) {
dev_err(ce->dev, "CE ERROR: %x for flow %x\n", v, flow);
+ sun8i_ce_dump_task_descriptors(&ce->chanlist[flow]);
err = -EFAULT;
- print_hex_dump(KERN_INFO, "TASK: ", DUMP_PREFIX_NONE, 16, 4,
- cet, sizeof(struct ce_task), false);
}
if (v & CE_ERR_ALGO_NOTSUP)
dev_err(ce->dev, "CE ERROR: algorithm not supported\n");
@@ -261,9 +263,8 @@ int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name)
v &= 0xFF;
if (v) {
dev_err(ce->dev, "CE ERROR: %x for flow %x\n", v, flow);
+ sun8i_ce_dump_task_descriptors(&ce->chanlist[flow]);
err = -EFAULT;
- print_hex_dump(KERN_INFO, "TASK: ", DUMP_PREFIX_NONE, 16, 4,
- cet, sizeof(struct ce_task), false);
}
if (v & CE_ERR_ALGO_NOTSUP)
dev_err(ce->dev, "CE ERROR: algorithm not supported\n");
@@ -758,18 +759,6 @@ static int sun8i_ce_allocate_chanlist(struct sun8i_ce_dev *ce)
err = -ENOMEM;
goto error_engine;
}
- ce->chanlist[i].bounce_iv = devm_kmalloc(ce->dev, AES_BLOCK_SIZE,
- GFP_KERNEL | GFP_DMA);
- if (!ce->chanlist[i].bounce_iv) {
- err = -ENOMEM;
- goto error_engine;
- }
- ce->chanlist[i].backup_iv = devm_kmalloc(ce->dev, AES_BLOCK_SIZE,
- GFP_KERNEL);
- if (!ce->chanlist[i].backup_iv) {
- err = -ENOMEM;
- goto error_engine;
- }
}
return 0;
error_engine:
@@ -1063,7 +1052,7 @@ static int sun8i_ce_probe(struct platform_device *pdev)
pm_runtime_put_sync(ce->dev);
if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) {
- struct dentry *dbgfs_dir __maybe_unused;
+ struct dentry *dbgfs_dir;
struct dentry *dbgfs_stats __maybe_unused;
/* Ignore error of debugfs */
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
index 13bdfb8a2c62..d01594353d9a 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
@@ -26,7 +26,7 @@
static void sun8i_ce_hash_stat_fb_inc(struct crypto_ahash *tfm)
{
if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) {
- struct sun8i_ce_alg_template *algt __maybe_unused;
+ struct sun8i_ce_alg_template *algt;
struct ahash_alg *alg = crypto_ahash_alg(tfm);
algt = container_of(alg, struct sun8i_ce_alg_template,
@@ -58,7 +58,8 @@ int sun8i_ce_hash_init_tfm(struct crypto_ahash *tfm)
crypto_ahash_set_reqsize(tfm,
sizeof(struct sun8i_ce_hash_reqctx) +
- crypto_ahash_reqsize(op->fallback_tfm));
+ crypto_ahash_reqsize(op->fallback_tfm) +
+ CRYPTO_DMA_PADDING);
if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
memcpy(algt->fbname,
@@ -84,7 +85,7 @@ void sun8i_ce_hash_exit_tfm(struct crypto_ahash *tfm)
int sun8i_ce_hash_init(struct ahash_request *areq)
{
- struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
@@ -100,7 +101,7 @@ int sun8i_ce_hash_init(struct ahash_request *areq)
int sun8i_ce_hash_export(struct ahash_request *areq, void *out)
{
- struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
@@ -114,7 +115,7 @@ int sun8i_ce_hash_export(struct ahash_request *areq, void *out)
int sun8i_ce_hash_import(struct ahash_request *areq, const void *in)
{
- struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
@@ -128,7 +129,7 @@ int sun8i_ce_hash_import(struct ahash_request *areq, const void *in)
int sun8i_ce_hash_final(struct ahash_request *areq)
{
- struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
@@ -145,7 +146,7 @@ int sun8i_ce_hash_final(struct ahash_request *areq)
int sun8i_ce_hash_update(struct ahash_request *areq)
{
- struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
@@ -160,7 +161,7 @@ int sun8i_ce_hash_update(struct ahash_request *areq)
int sun8i_ce_hash_finup(struct ahash_request *areq)
{
- struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
@@ -178,7 +179,7 @@ int sun8i_ce_hash_finup(struct ahash_request *areq)
static int sun8i_ce_hash_digest_fb(struct ahash_request *areq)
{
- struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
@@ -238,19 +239,15 @@ static bool sun8i_ce_hash_need_fallback(struct ahash_request *areq)
int sun8i_ce_hash_digest(struct ahash_request *areq)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
- struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
- struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
- struct sun8i_ce_alg_template *algt;
- struct sun8i_ce_dev *ce;
+ struct sun8i_ce_hash_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
+ struct sun8i_ce_dev *ce = ctx->ce;
struct crypto_engine *engine;
int e;
if (sun8i_ce_hash_need_fallback(areq))
return sun8i_ce_hash_digest_fb(areq);
- algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base);
- ce = algt->ce;
-
e = sun8i_ce_get_engine_number(ce);
rctx->flow = e;
engine = ce->chanlist[e].engine;
@@ -316,28 +313,22 @@ static u64 hash_pad(__le32 *buf, unsigned int bufsize, u64 padi, u64 byte_count,
return j;
}
-int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
+static int sun8i_ce_hash_prepare(struct ahash_request *areq, struct ce_task *cet)
{
- struct ahash_request *areq = container_of(breq, struct ahash_request, base);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
- struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
struct sun8i_ce_alg_template *algt;
struct sun8i_ce_dev *ce;
- struct sun8i_ce_flow *chan;
- struct ce_task *cet;
struct scatterlist *sg;
- int nr_sgs, flow, err;
+ int nr_sgs, err;
unsigned int len;
u32 common;
u64 byte_count;
__le32 *bf;
- void *buf, *result;
int j, i, todo;
u64 bs;
int digestsize;
- dma_addr_t addr_res, addr_pad;
- int ns = sg_nents_for_len(areq->src, areq->nbytes);
algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base);
ce = algt->ce;
@@ -349,32 +340,16 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
if (digestsize == SHA384_DIGEST_SIZE)
digestsize = SHA512_DIGEST_SIZE;
- /* the padding could be up to two block. */
- buf = kcalloc(2, bs, GFP_KERNEL | GFP_DMA);
- if (!buf) {
- err = -ENOMEM;
- goto err_out;
- }
- bf = (__le32 *)buf;
-
- result = kzalloc(digestsize, GFP_KERNEL | GFP_DMA);
- if (!result) {
- err = -ENOMEM;
- goto err_free_buf;
- }
-
- flow = rctx->flow;
- chan = &ce->chanlist[flow];
+ bf = (__le32 *)rctx->pad;
if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
algt->stat_req++;
dev_dbg(ce->dev, "%s %s len=%d\n", __func__, crypto_tfm_alg_name(areq->base.tfm), areq->nbytes);
- cet = chan->tl;
memset(cet, 0, sizeof(struct ce_task));
- cet->t_id = cpu_to_le32(flow);
+ cet->t_id = cpu_to_le32(rctx->flow);
common = ce->variant->alg_hash[algt->ce_algo_id];
common |= CE_COMM_INT;
cet->t_common_ctl = cpu_to_le32(common);
@@ -382,11 +357,12 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
cet->t_sym_ctl = 0;
cet->t_asym_ctl = 0;
- nr_sgs = dma_map_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);
+ rctx->nr_sgs = sg_nents_for_len(areq->src, areq->nbytes);
+ nr_sgs = dma_map_sg(ce->dev, areq->src, rctx->nr_sgs, DMA_TO_DEVICE);
if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
err = -EINVAL;
- goto err_free_result;
+ goto err_out;
}
len = areq->nbytes;
@@ -401,10 +377,13 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
err = -EINVAL;
goto err_unmap_src;
}
- addr_res = dma_map_single(ce->dev, result, digestsize, DMA_FROM_DEVICE);
- cet->t_dst[0].addr = desc_addr_val_le32(ce, addr_res);
- cet->t_dst[0].len = cpu_to_le32(digestsize / 4);
- if (dma_mapping_error(ce->dev, addr_res)) {
+
+ rctx->result_len = digestsize;
+ rctx->addr_res = dma_map_single(ce->dev, rctx->result, rctx->result_len,
+ DMA_FROM_DEVICE);
+ cet->t_dst[0].addr = desc_addr_val_le32(ce, rctx->addr_res);
+ cet->t_dst[0].len = cpu_to_le32(rctx->result_len / 4);
+ if (dma_mapping_error(ce->dev, rctx->addr_res)) {
dev_err(ce->dev, "DMA map dest\n");
err = -EINVAL;
goto err_unmap_src;
@@ -432,10 +411,12 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
goto err_unmap_result;
}
- addr_pad = dma_map_single(ce->dev, buf, j * 4, DMA_TO_DEVICE);
- cet->t_src[i].addr = desc_addr_val_le32(ce, addr_pad);
+ rctx->pad_len = j * 4;
+ rctx->addr_pad = dma_map_single(ce->dev, rctx->pad, rctx->pad_len,
+ DMA_TO_DEVICE);
+ cet->t_src[i].addr = desc_addr_val_le32(ce, rctx->addr_pad);
cet->t_src[i].len = cpu_to_le32(j);
- if (dma_mapping_error(ce->dev, addr_pad)) {
+ if (dma_mapping_error(ce->dev, rctx->addr_pad)) {
dev_err(ce->dev, "DMA error on padding SG\n");
err = -EINVAL;
goto err_unmap_result;
@@ -446,29 +427,59 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
else
cet->t_dlen = cpu_to_le32(areq->nbytes / 4 + j);
- chan->timeout = areq->nbytes;
-
- err = sun8i_ce_run_task(ce, flow, crypto_ahash_alg_name(tfm));
-
- dma_unmap_single(ce->dev, addr_pad, j * 4, DMA_TO_DEVICE);
+ return 0;
err_unmap_result:
- dma_unmap_single(ce->dev, addr_res, digestsize, DMA_FROM_DEVICE);
- if (!err)
- memcpy(areq->result, result, crypto_ahash_digestsize(tfm));
+ dma_unmap_single(ce->dev, rctx->addr_res, rctx->result_len,
+ DMA_FROM_DEVICE);
err_unmap_src:
- dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);
+ dma_unmap_sg(ce->dev, areq->src, rctx->nr_sgs, DMA_TO_DEVICE);
-err_free_result:
- kfree(result);
+err_out:
+ return err;
+}
-err_free_buf:
- kfree(buf);
+static void sun8i_ce_hash_unprepare(struct ahash_request *areq,
+ struct ce_task *cet)
+{
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct sun8i_ce_hash_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct sun8i_ce_dev *ce = ctx->ce;
+
+ dma_unmap_single(ce->dev, rctx->addr_pad, rctx->pad_len, DMA_TO_DEVICE);
+ dma_unmap_single(ce->dev, rctx->addr_res, rctx->result_len,
+ DMA_FROM_DEVICE);
+ dma_unmap_sg(ce->dev, areq->src, rctx->nr_sgs, DMA_TO_DEVICE);
+}
+
+int sun8i_ce_hash_run(struct crypto_engine *engine, void *async_req)
+{
+ struct ahash_request *areq = ahash_request_cast(async_req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct sun8i_ce_hash_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
+ struct sun8i_ce_dev *ce = ctx->ce;
+ struct sun8i_ce_flow *chan;
+ int err;
+
+ chan = &ce->chanlist[rctx->flow];
+
+ err = sun8i_ce_hash_prepare(areq, chan->tl);
+ if (err)
+ return err;
+
+ err = sun8i_ce_run_task(ce, rctx->flow, crypto_ahash_alg_name(tfm));
+
+ sun8i_ce_hash_unprepare(areq, chan->tl);
+
+ if (!err)
+ memcpy(areq->result, rctx->result,
+ crypto_ahash_digestsize(tfm));
-err_out:
local_bh_disable();
- crypto_finalize_hash_request(engine, breq, err);
+ crypto_finalize_hash_request(engine, async_req, err);
local_bh_enable();
return 0;
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c
index 762459867b6c..d0a1ac66738b 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c
@@ -137,7 +137,6 @@ int sun8i_ce_prng_generate(struct crypto_rng *tfm, const u8 *src,
cet->t_dst[0].addr = desc_addr_val_le32(ce, dma_dst);
cet->t_dst[0].len = cpu_to_le32(todo / 4);
- ce->chanlist[flow].timeout = 2000;
err = sun8i_ce_run_task(ce, 3, "PRNG");
mutex_unlock(&ce->rnglock);
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c
index e1e8bc15202e..244529bf0616 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c
@@ -79,7 +79,6 @@ static int sun8i_ce_trng_read(struct hwrng *rng, void *data, size_t max, bool wa
cet->t_dst[0].addr = desc_addr_val_le32(ce, dma_dst);
cet->t_dst[0].len = cpu_to_le32(todo / 4);
- ce->chanlist[flow].timeout = todo;
err = sun8i_ce_run_task(ce, 3, "TRNG");
mutex_unlock(&ce->rnglock);
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
index 0f9a89067016..71f5a0cd3d45 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
@@ -106,9 +106,13 @@
#define MAX_SG 8
#define CE_MAX_CLOCKS 4
+#define CE_DMA_TIMEOUT_MS 3000
#define MAXFLOW 4
+#define CE_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
+#define CE_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
+
/*
* struct ce_clock - Describe clocks used by sun8i-ce
* @name: Name of clock needed by this variant
@@ -187,8 +191,6 @@ struct ce_task {
* @status: set to 1 by interrupt if task is done
* @t_phy: Physical address of task
* @tl: pointer to the current ce_task for this flow
- * @backup_iv: buffer which contain the next IV to store
- * @bounce_iv: buffer which contain the IV
* @stat_req: number of request done by this flow
*/
struct sun8i_ce_flow {
@@ -196,10 +198,7 @@ struct sun8i_ce_flow {
struct completion complete;
int status;
dma_addr_t t_phy;
- int timeout;
struct ce_task *tl;
- void *backup_iv;
- void *bounce_iv;
#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
unsigned long stat_req;
#endif
@@ -264,6 +263,8 @@ static inline __le32 desc_addr_val_le32(struct sun8i_ce_dev *dev,
* @nr_sgd: The number of destination SG (as given by dma_map_sg())
* @addr_iv: The IV addr returned by dma_map_single, need to unmap later
* @addr_key: The key addr returned by dma_map_single, need to unmap later
+ * @bounce_iv: Current IV buffer
+ * @backup_iv: Next IV buffer
* @fallback_req: request struct for invoking the fallback skcipher TFM
*/
struct sun8i_cipher_req_ctx {
@@ -273,6 +274,8 @@ struct sun8i_cipher_req_ctx {
int nr_sgd;
dma_addr_t addr_iv;
dma_addr_t addr_key;
+ u8 bounce_iv[AES_BLOCK_SIZE] __aligned(sizeof(u32));
+ u8 backup_iv[AES_BLOCK_SIZE];
struct skcipher_request fallback_req; // keep at the end
};
@@ -304,9 +307,23 @@ struct sun8i_ce_hash_tfm_ctx {
* struct sun8i_ce_hash_reqctx - context for an ahash request
* @fallback_req: pre-allocated fallback request
* @flow: the flow to use for this request
+ * @nr_sgs: number of entries in the source scatterlist
+ * @result_len: result length in bytes
+ * @pad_len: padding length in bytes
+ * @addr_res: DMA address of the result buffer, returned by dma_map_single()
+ * @addr_pad: DMA address of the padding buffer, returned by dma_map_single()
+ * @result: per-request result buffer
+ * @pad: per-request padding buffer (up to 2 blocks)
*/
struct sun8i_ce_hash_reqctx {
int flow;
+ int nr_sgs;
+ size_t result_len;
+ size_t pad_len;
+ dma_addr_t addr_res;
+ dma_addr_t addr_pad;
+ u8 result[CE_MAX_HASH_DIGEST_SIZE] __aligned(CRYPTO_DMA_ALIGN);
+ u8 pad[2 * CE_MAX_HASH_BLOCK_SIZE];
struct ahash_request fallback_req; // keep at the end
};
diff --git a/drivers/crypto/aspeed/aspeed-hace-crypto.c b/drivers/crypto/aspeed/aspeed-hace-crypto.c
index a72dfebc53ff..fa201dae1f81 100644
--- a/drivers/crypto/aspeed/aspeed-hace-crypto.c
+++ b/drivers/crypto/aspeed/aspeed-hace-crypto.c
@@ -346,7 +346,7 @@ free_req:
} else {
dma_unmap_sg(hace_dev->dev, req->dst, rctx->dst_nents,
- DMA_TO_DEVICE);
+ DMA_FROM_DEVICE);
dma_unmap_sg(hace_dev->dev, req->src, rctx->src_nents,
DMA_TO_DEVICE);
}
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index 098f5532f389..3b2a92029b16 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -512,7 +512,7 @@ static int atmel_tdes_crypt_start(struct atmel_tdes_dev *dd)
if (err && (dd->flags & TDES_FLAGS_FAST)) {
dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
- dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE);
+ dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
}
return err;
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index a93be395c878..320be5d77737 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -592,9 +592,9 @@ static int init_clocks(struct device *dev, const struct caam_imx_data *data)
int ret;
ctrlpriv->num_clks = data->num_clks;
- ctrlpriv->clks = devm_kmemdup(dev, data->clks,
- data->num_clks * sizeof(data->clks[0]),
- GFP_KERNEL);
+ ctrlpriv->clks = devm_kmemdup_array(dev, data->clks,
+ data->num_clks, sizeof(*data->clks),
+ GFP_KERNEL);
if (!ctrlpriv->clks)
return -ENOMEM;
@@ -703,12 +703,12 @@ static int caam_ctrl_rng_init(struct device *dev)
*/
if (needs_entropy_delay_adjustment())
ent_delay = 12000;
- if (!(ctrlpriv->rng4_sh_init || inst_handles)) {
+ if (!inst_handles) {
dev_info(dev,
"Entropy delay = %u\n",
ent_delay);
kick_trng(dev, ent_delay);
- ent_delay += 400;
+ ent_delay = ent_delay * 2;
}
/*
* if instantiate_rng(...) fails, the loop will rerun
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
index 394484929dae..a9626b30044a 100644
--- a/drivers/crypto/ccp/Makefile
+++ b/drivers/crypto/ccp/Makefile
@@ -13,7 +13,8 @@ ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o \
tee-dev.o \
platform-access.o \
dbc.o \
- hsti.o
+ hsti.o \
+ sfs.o
obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o
ccp-crypto-objs := ccp-crypto-main.o \
diff --git a/drivers/crypto/ccp/hsti.c b/drivers/crypto/ccp/hsti.c
index 1b39a4fb55c0..c29c6a9c0f3f 100644
--- a/drivers/crypto/ccp/hsti.c
+++ b/drivers/crypto/ccp/hsti.c
@@ -74,7 +74,7 @@ struct attribute_group psp_security_attr_group = {
.is_visible = psp_security_is_visible,
};
-static int psp_poulate_hsti(struct psp_device *psp)
+static int psp_populate_hsti(struct psp_device *psp)
{
struct hsti_request *req;
int ret;
@@ -84,11 +84,11 @@ static int psp_poulate_hsti(struct psp_device *psp)
return 0;
/* Allocate command-response buffer */
- req = kzalloc(sizeof(*req), GFP_KERNEL | __GFP_ZERO);
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return -ENOMEM;
- req->header.payload_size = sizeof(req);
+ req->header.payload_size = sizeof(*req);
ret = psp_send_platform_access_msg(PSP_CMD_HSTI_QUERY, (struct psp_request *)req);
if (ret)
@@ -114,7 +114,7 @@ int psp_init_hsti(struct psp_device *psp)
int ret;
if (PSP_FEATURE(psp, HSTI)) {
- ret = psp_poulate_hsti(psp);
+ ret = psp_populate_hsti(psp);
if (ret)
return ret;
}
diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
index 1c5a7189631e..9e21da0e298a 100644
--- a/drivers/crypto/ccp/psp-dev.c
+++ b/drivers/crypto/ccp/psp-dev.c
@@ -17,6 +17,7 @@
#include "psp-dev.h"
#include "sev-dev.h"
#include "tee-dev.h"
+#include "sfs.h"
#include "platform-access.h"
#include "dbc.h"
#include "hsti.h"
@@ -182,6 +183,17 @@ static int psp_check_tee_support(struct psp_device *psp)
return 0;
}
+static int psp_check_sfs_support(struct psp_device *psp)
+{
+ /* Check if device supports SFS feature */
+ if (!psp->capability.sfs) {
+ dev_dbg(psp->dev, "psp does not support SFS\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
static int psp_init(struct psp_device *psp)
{
int ret;
@@ -198,6 +210,12 @@ static int psp_init(struct psp_device *psp)
return ret;
}
+ if (!psp_check_sfs_support(psp)) {
+ ret = sfs_dev_init(psp);
+ if (ret)
+ return ret;
+ }
+
if (psp->vdata->platform_access) {
ret = platform_access_dev_init(psp);
if (ret)
@@ -302,6 +320,8 @@ void psp_dev_destroy(struct sp_device *sp)
tee_dev_destroy(psp);
+ sfs_dev_destroy(psp);
+
dbc_dev_destroy(psp);
platform_access_dev_destroy(psp);
diff --git a/drivers/crypto/ccp/psp-dev.h b/drivers/crypto/ccp/psp-dev.h
index e43ce87ede76..268c83f298cb 100644
--- a/drivers/crypto/ccp/psp-dev.h
+++ b/drivers/crypto/ccp/psp-dev.h
@@ -32,7 +32,8 @@ union psp_cap_register {
unsigned int sev :1,
tee :1,
dbc_thru_ext :1,
- rsvd1 :4,
+ sfs :1,
+ rsvd1 :3,
security_reporting :1,
fused_part :1,
rsvd2 :1,
@@ -68,6 +69,7 @@ struct psp_device {
void *tee_data;
void *platform_access_data;
void *dbc_data;
+ void *sfs_data;
union psp_cap_register capability;
};
@@ -118,12 +120,16 @@ struct psp_ext_request {
* @PSP_SUB_CMD_DBC_SET_UID: Set UID for DBC
* @PSP_SUB_CMD_DBC_GET_PARAMETER: Get parameter from DBC
* @PSP_SUB_CMD_DBC_SET_PARAMETER: Set parameter for DBC
+ * @PSP_SUB_CMD_SFS_GET_FW_VERS: Get firmware versions for ASP and other MP
+ * @PSP_SUB_CMD_SFS_UPDATE: Command to load, verify and execute SFS package
*/
enum psp_sub_cmd {
PSP_SUB_CMD_DBC_GET_NONCE = PSP_DYNAMIC_BOOST_GET_NONCE,
PSP_SUB_CMD_DBC_SET_UID = PSP_DYNAMIC_BOOST_SET_UID,
PSP_SUB_CMD_DBC_GET_PARAMETER = PSP_DYNAMIC_BOOST_GET_PARAMETER,
PSP_SUB_CMD_DBC_SET_PARAMETER = PSP_DYNAMIC_BOOST_SET_PARAMETER,
+ PSP_SUB_CMD_SFS_GET_FW_VERS = PSP_SFS_GET_FW_VERSIONS,
+ PSP_SUB_CMD_SFS_UPDATE = PSP_SFS_UPDATE,
};
int psp_extended_mailbox_cmd(struct psp_device *psp, unsigned int timeout_msecs,
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index e058ba027792..0d13d47c164b 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -28,6 +28,7 @@
#include <linux/fs_struct.h>
#include <linux/psp.h>
#include <linux/amd-iommu.h>
+#include <linux/crash_dump.h>
#include <asm/smp.h>
#include <asm/cacheflush.h>
@@ -82,6 +83,21 @@ MODULE_FIRMWARE("amd/amd_sev_fam19h_model1xh.sbin"); /* 4th gen EPYC */
static bool psp_dead;
static int psp_timeout;
+enum snp_hv_fixed_pages_state {
+ ALLOCATED,
+ HV_FIXED,
+};
+
+struct snp_hv_fixed_pages_entry {
+ struct list_head list;
+ struct page *page;
+ unsigned int order;
+ bool free;
+ enum snp_hv_fixed_pages_state page_state;
+};
+
+static LIST_HEAD(snp_hv_fixed_pages);
+
/* Trusted Memory Region (TMR):
* The TMR is a 1MB area that must be 1MB aligned. Use the page allocator
* to allocate the memory, which will return aligned memory for the specified
@@ -233,6 +249,8 @@ static int sev_cmd_buffer_len(int cmd)
case SEV_CMD_SNP_GUEST_REQUEST: return sizeof(struct sev_data_snp_guest_request);
case SEV_CMD_SNP_CONFIG: return sizeof(struct sev_user_data_snp_config);
case SEV_CMD_SNP_COMMIT: return sizeof(struct sev_data_snp_commit);
+ case SEV_CMD_SNP_FEATURE_INFO: return sizeof(struct sev_data_snp_feature_info);
+ case SEV_CMD_SNP_VLEK_LOAD: return sizeof(struct sev_user_data_snp_vlek_load);
default: return 0;
}
@@ -846,9 +864,10 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
struct sev_device *sev;
unsigned int cmdbuff_hi, cmdbuff_lo;
unsigned int phys_lsb, phys_msb;
- unsigned int reg, ret = 0;
+ unsigned int reg;
void *cmd_buf;
int buf_len;
+ int ret = 0;
if (!psp || !psp->sev_data)
return -ENODEV;
@@ -1073,6 +1092,247 @@ static void snp_set_hsave_pa(void *arg)
wrmsrq(MSR_VM_HSAVE_PA, 0);
}
+/* Hypervisor Fixed pages API interface */
+static void snp_hv_fixed_pages_state_update(struct sev_device *sev,
+ enum snp_hv_fixed_pages_state page_state)
+{
+ struct snp_hv_fixed_pages_entry *entry;
+
+ /* List is protected by sev_cmd_mutex */
+ lockdep_assert_held(&sev_cmd_mutex);
+
+ if (list_empty(&snp_hv_fixed_pages))
+ return;
+
+ list_for_each_entry(entry, &snp_hv_fixed_pages, list)
+ entry->page_state = page_state;
+}
+
+/*
+ * Allocate HV_FIXED pages in 2MB aligned sizes to ensure the whole
+ * 2MB pages are marked as HV_FIXED.
+ */
+struct page *snp_alloc_hv_fixed_pages(unsigned int num_2mb_pages)
+{
+ struct psp_device *psp_master = psp_get_master_device();
+ struct snp_hv_fixed_pages_entry *entry;
+ struct sev_device *sev;
+ unsigned int order;
+ struct page *page;
+
+ if (!psp_master || !psp_master->sev_data)
+ return NULL;
+
+ sev = psp_master->sev_data;
+
+ order = get_order(PMD_SIZE * num_2mb_pages);
+
+ /*
+ * SNP_INIT_EX is protected by sev_cmd_mutex, therefore this list
+ * also needs to be protected using the same mutex.
+ */
+ guard(mutex)(&sev_cmd_mutex);
+
+ /*
+ * This API uses SNP_INIT_EX to transition allocated pages to HV_Fixed
+ * page state, fail if SNP is already initialized.
+ */
+ if (sev->snp_initialized)
+ return NULL;
+
+ /* Re-use freed pages that match the request */
+ list_for_each_entry(entry, &snp_hv_fixed_pages, list) {
+ /* Hypervisor fixed page allocator implements exact fit policy */
+ if (entry->order == order && entry->free) {
+ entry->free = false;
+ memset(page_address(entry->page), 0,
+ (1 << entry->order) * PAGE_SIZE);
+ return entry->page;
+ }
+ }
+
+ page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
+ if (!page)
+ return NULL;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ __free_pages(page, order);
+ return NULL;
+ }
+
+ entry->page = page;
+ entry->order = order;
+ list_add_tail(&entry->list, &snp_hv_fixed_pages);
+
+ return page;
+}
+
+void snp_free_hv_fixed_pages(struct page *page)
+{
+ struct psp_device *psp_master = psp_get_master_device();
+ struct snp_hv_fixed_pages_entry *entry, *nentry;
+
+ if (!psp_master || !psp_master->sev_data)
+ return;
+
+ /*
+ * SNP_INIT_EX is protected by sev_cmd_mutex, therefore this list
+ * also needs to be protected using the same mutex.
+ */
+ guard(mutex)(&sev_cmd_mutex);
+
+ list_for_each_entry_safe(entry, nentry, &snp_hv_fixed_pages, list) {
+ if (entry->page != page)
+ continue;
+
+ /*
+ * HV_FIXED page state cannot be changed until reboot
+ * and they cannot be used by an SNP guest, so they cannot
+ * be returned back to the page allocator.
+ * Mark the pages as free internally to allow possible re-use.
+ */
+ if (entry->page_state == HV_FIXED) {
+ entry->free = true;
+ } else {
+ __free_pages(page, entry->order);
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ return;
+ }
+}
+
+static void snp_add_hv_fixed_pages(struct sev_device *sev, struct sev_data_range_list *range_list)
+{
+ struct snp_hv_fixed_pages_entry *entry;
+ struct sev_data_range *range;
+ int num_elements;
+
+ lockdep_assert_held(&sev_cmd_mutex);
+
+ if (list_empty(&snp_hv_fixed_pages))
+ return;
+
+ num_elements = list_count_nodes(&snp_hv_fixed_pages) +
+ range_list->num_elements;
+
+ /*
+ * Ensure the list of HV_FIXED pages that will be passed to firmware
+ * do not exceed the page-sized argument buffer.
+ */
+ if (num_elements * sizeof(*range) + sizeof(*range_list) > PAGE_SIZE) {
+ dev_warn(sev->dev, "Additional HV_Fixed pages cannot be accommodated, omitting\n");
+ return;
+ }
+
+ range = &range_list->ranges[range_list->num_elements];
+ list_for_each_entry(entry, &snp_hv_fixed_pages, list) {
+ range->base = page_to_pfn(entry->page) << PAGE_SHIFT;
+ range->page_count = 1 << entry->order;
+ range++;
+ }
+ range_list->num_elements = num_elements;
+}
+
+static void snp_leak_hv_fixed_pages(void)
+{
+ struct snp_hv_fixed_pages_entry *entry;
+
+ /* List is protected by sev_cmd_mutex */
+ lockdep_assert_held(&sev_cmd_mutex);
+
+ if (list_empty(&snp_hv_fixed_pages))
+ return;
+
+ list_for_each_entry(entry, &snp_hv_fixed_pages, list)
+ if (entry->page_state == HV_FIXED)
+ __snp_leak_pages(page_to_pfn(entry->page),
+ 1 << entry->order, false);
+}
+
+bool sev_is_snp_ciphertext_hiding_supported(void)
+{
+ struct psp_device *psp = psp_master;
+ struct sev_device *sev;
+
+ if (!psp || !psp->sev_data)
+ return false;
+
+ sev = psp->sev_data;
+
+ /*
+ * Feature information indicates if CipherTextHiding feature is
+ * supported by the SEV firmware and additionally platform status
+ * indicates if CipherTextHiding feature is enabled in the
+ * Platform BIOS.
+ */
+ return ((sev->snp_feat_info_0.ecx & SNP_CIPHER_TEXT_HIDING_SUPPORTED) &&
+ sev->snp_plat_status.ciphertext_hiding_cap);
+}
+EXPORT_SYMBOL_GPL(sev_is_snp_ciphertext_hiding_supported);
+
+static int snp_get_platform_data(struct sev_device *sev, int *error)
+{
+ struct sev_data_snp_feature_info snp_feat_info;
+ struct snp_feature_info *feat_info;
+ struct sev_data_snp_addr buf;
+ struct page *page;
+ int rc;
+
+ /*
+ * This function is expected to be called before SNP is
+ * initialized.
+ */
+ if (sev->snp_initialized)
+ return -EINVAL;
+
+ buf.address = __psp_pa(&sev->snp_plat_status);
+ rc = sev_do_cmd(SEV_CMD_SNP_PLATFORM_STATUS, &buf, error);
+ if (rc) {
+ dev_err(sev->dev, "SNP PLATFORM_STATUS command failed, ret = %d, error = %#x\n",
+ rc, *error);
+ return rc;
+ }
+
+ sev->api_major = sev->snp_plat_status.api_major;
+ sev->api_minor = sev->snp_plat_status.api_minor;
+ sev->build = sev->snp_plat_status.build_id;
+
+ /*
+ * Do feature discovery of the currently loaded firmware,
+ * and cache feature information from CPUID 0x8000_0024,
+ * sub-function 0.
+ */
+ if (!sev->snp_plat_status.feature_info)
+ return 0;
+
+ /*
+ * Use dynamically allocated structure for the SNP_FEATURE_INFO
+ * command to ensure structure is 8-byte aligned, and does not
+ * cross a page boundary.
+ */
+ page = alloc_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ feat_info = page_address(page);
+ snp_feat_info.length = sizeof(snp_feat_info);
+ snp_feat_info.ecx_in = 0;
+ snp_feat_info.feature_info_paddr = __psp_pa(feat_info);
+
+ rc = sev_do_cmd(SEV_CMD_SNP_FEATURE_INFO, &snp_feat_info, error);
+ if (!rc)
+ sev->snp_feat_info_0 = *feat_info;
+ else
+ dev_err(sev->dev, "SNP FEATURE_INFO command failed, ret = %d, error = %#x\n",
+ rc, *error);
+
+ __free_page(page);
+
+ return rc;
+}
+
static int snp_filter_reserved_mem_regions(struct resource *rs, void *arg)
{
struct sev_data_range_list *range_list = arg;
@@ -1103,7 +1363,7 @@ static int snp_filter_reserved_mem_regions(struct resource *rs, void *arg)
return 0;
}
-static int __sev_snp_init_locked(int *error)
+static int __sev_snp_init_locked(int *error, unsigned int max_snp_asid)
{
struct psp_device *psp = psp_master;
struct sev_data_snp_init_ex data;
@@ -1163,7 +1423,19 @@ static int __sev_snp_init_locked(int *error)
return rc;
}
+ /*
+ * Add HV_Fixed pages from other PSP sub-devices, such as SFS to the
+ * HV_Fixed page list.
+ */
+ snp_add_hv_fixed_pages(sev, snp_range_list);
+
memset(&data, 0, sizeof(data));
+
+ if (max_snp_asid) {
+ data.ciphertext_hiding_en = 1;
+ data.max_snp_asid = max_snp_asid;
+ }
+
data.init_rmp = 1;
data.list_paddr_en = 1;
data.list_paddr = __psp_pa(snp_range_list);
@@ -1202,6 +1474,7 @@ static int __sev_snp_init_locked(int *error)
return rc;
}
+ snp_hv_fixed_pages_state_update(sev, HV_FIXED);
sev->snp_initialized = true;
dev_dbg(sev->dev, "SEV-SNP firmware initialized\n");
@@ -1286,7 +1559,7 @@ static int __sev_platform_init_locked(int *error)
sev = psp_master->sev_data;
- if (sev->state == SEV_STATE_INIT)
+ if (sev->sev_plat_status.state == SEV_STATE_INIT)
return 0;
__sev_platform_init_handle_tmr(sev);
@@ -1318,7 +1591,7 @@ static int __sev_platform_init_locked(int *error)
return rc;
}
- sev->state = SEV_STATE_INIT;
+ sev->sev_plat_status.state = SEV_STATE_INIT;
/* Prepare for first SEV guest launch after INIT */
wbinvd_on_all_cpus();
@@ -1345,12 +1618,21 @@ static int _sev_platform_init_locked(struct sev_platform_init_args *args)
if (!psp_master || !psp_master->sev_data)
return -ENODEV;
+ /*
+ * Skip SNP/SEV initialization under a kdump kernel as SEV/SNP
+ * may already be initialized in the previous kernel. Since no
+ * SNP/SEV guests are run under a kdump kernel, there is no
+ * need to initialize SNP or SEV during kdump boot.
+ */
+ if (is_kdump_kernel())
+ return 0;
+
sev = psp_master->sev_data;
- if (sev->state == SEV_STATE_INIT)
+ if (sev->sev_plat_status.state == SEV_STATE_INIT)
return 0;
- rc = __sev_snp_init_locked(&args->error);
+ rc = __sev_snp_init_locked(&args->error, args->max_snp_asid);
if (rc && rc != -ENODEV)
return rc;
@@ -1384,7 +1666,7 @@ static int __sev_platform_shutdown_locked(int *error)
sev = psp->sev_data;
- if (sev->state == SEV_STATE_UNINIT)
+ if (sev->sev_plat_status.state == SEV_STATE_UNINIT)
return 0;
ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error);
@@ -1394,7 +1676,7 @@ static int __sev_platform_shutdown_locked(int *error)
return ret;
}
- sev->state = SEV_STATE_UNINIT;
+ sev->sev_plat_status.state = SEV_STATE_UNINIT;
dev_dbg(sev->dev, "SEV firmware shutdown\n");
return ret;
@@ -1433,7 +1715,7 @@ static int snp_move_to_init_state(struct sev_issue_cmd *argp, bool *shutdown_req
{
int error, rc;
- rc = __sev_snp_init_locked(&error);
+ rc = __sev_snp_init_locked(&error, 0);
if (rc) {
argp->error = SEV_RET_INVALID_PLATFORM_STATE;
return rc;
@@ -1502,7 +1784,7 @@ static int sev_ioctl_do_pek_pdh_gen(int cmd, struct sev_issue_cmd *argp, bool wr
if (!writable)
return -EPERM;
- if (sev->state == SEV_STATE_UNINIT) {
+ if (sev->sev_plat_status.state == SEV_STATE_UNINIT) {
rc = sev_move_to_init_state(argp, &shutdown_required);
if (rc)
return rc;
@@ -1551,7 +1833,7 @@ static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp, bool writable)
data.len = input.length;
cmd:
- if (sev->state == SEV_STATE_UNINIT) {
+ if (sev->sev_plat_status.state == SEV_STATE_UNINIT) {
ret = sev_move_to_init_state(argp, &shutdown_required);
if (ret)
goto e_free_blob;
@@ -1599,6 +1881,16 @@ static int sev_get_api_version(void)
struct sev_user_data_status status;
int error = 0, ret;
+ /*
+ * Cache SNP platform status and SNP feature information
+ * if SNP is available.
+ */
+ if (cc_platform_has(CC_ATTR_HOST_SEV_SNP)) {
+ ret = snp_get_platform_data(sev, &error);
+ if (ret)
+ return 1;
+ }
+
ret = sev_platform_status(&status, &error);
if (ret) {
dev_err(sev->dev,
@@ -1606,10 +1898,12 @@ static int sev_get_api_version(void)
return 1;
}
+ /* Cache SEV platform status */
+ sev->sev_plat_status = status;
+
sev->api_major = status.api_major;
sev->api_minor = status.api_minor;
sev->build = status.build;
- sev->state = status.state;
return 0;
}
@@ -1784,6 +2078,7 @@ static int __sev_snp_shutdown_locked(int *error, bool panic)
return ret;
}
+ snp_leak_hv_fixed_pages();
sev->snp_initialized = false;
dev_dbg(sev->dev, "SEV-SNP firmware shutdown\n");
@@ -1837,7 +2132,7 @@ static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp, bool writable)
data.oca_cert_len = input.oca_cert_len;
/* If platform is not in INIT state then transition it to INIT */
- if (sev->state != SEV_STATE_INIT) {
+ if (sev->sev_plat_status.state != SEV_STATE_INIT) {
ret = sev_move_to_init_state(argp, &shutdown_required);
if (ret)
goto e_free_oca;
@@ -2008,7 +2303,7 @@ static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable)
cmd:
/* If platform is not in INIT state then transition it to INIT. */
- if (sev->state != SEV_STATE_INIT) {
+ if (sev->sev_plat_status.state != SEV_STATE_INIT) {
if (!writable) {
ret = -EPERM;
goto e_free_cert;
@@ -2430,7 +2725,7 @@ static void __sev_firmware_shutdown(struct sev_device *sev, bool panic)
{
int error;
- __sev_platform_shutdown_locked(NULL);
+ __sev_platform_shutdown_locked(&error);
if (sev_es_tmr) {
/*
diff --git a/drivers/crypto/ccp/sev-dev.h b/drivers/crypto/ccp/sev-dev.h
index 3e4e5574e88a..ac03bd0848f7 100644
--- a/drivers/crypto/ccp/sev-dev.h
+++ b/drivers/crypto/ccp/sev-dev.h
@@ -42,7 +42,6 @@ struct sev_device {
struct sev_vdata *vdata;
- int state;
unsigned int int_rcvd;
wait_queue_head_t int_queue;
struct sev_misc_dev *misc;
@@ -57,6 +56,11 @@ struct sev_device {
bool cmd_buf_backup_active;
bool snp_initialized;
+
+ struct sev_user_data_status sev_plat_status;
+
+ struct sev_user_data_snp_status snp_plat_status;
+ struct snp_feature_info snp_feat_info_0;
};
int sev_dev_init(struct psp_device *psp);
@@ -65,4 +69,7 @@ void sev_dev_destroy(struct psp_device *psp);
void sev_pci_init(void);
void sev_pci_exit(void);
+struct page *snp_alloc_hv_fixed_pages(unsigned int num_2mb_pages);
+void snp_free_hv_fixed_pages(struct page *page);
+
#endif /* __SEV_DEV_H */
diff --git a/drivers/crypto/ccp/sfs.c b/drivers/crypto/ccp/sfs.c
new file mode 100644
index 000000000000..2f4beaafe7ec
--- /dev/null
+++ b/drivers/crypto/ccp/sfs.c
@@ -0,0 +1,311 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AMD Secure Processor Seamless Firmware Servicing support.
+ *
+ * Copyright (C) 2025 Advanced Micro Devices, Inc.
+ *
+ * Author: Ashish Kalra <ashish.kalra@amd.com>
+ */
+
+#include <linux/firmware.h>
+
+#include "sfs.h"
+#include "sev-dev.h"
+
+#define SFS_DEFAULT_TIMEOUT (10 * MSEC_PER_SEC)
+#define SFS_MAX_PAYLOAD_SIZE (2 * 1024 * 1024)
+#define SFS_NUM_2MB_PAGES_CMDBUF (SFS_MAX_PAYLOAD_SIZE / PMD_SIZE)
+#define SFS_NUM_PAGES_CMDBUF (SFS_MAX_PAYLOAD_SIZE / PAGE_SIZE)
+
+static DEFINE_MUTEX(sfs_ioctl_mutex);
+
+static struct sfs_misc_dev *misc_dev;
+
+static int send_sfs_cmd(struct sfs_device *sfs_dev, int msg)
+{
+ int ret;
+
+ sfs_dev->command_buf->hdr.status = 0;
+ sfs_dev->command_buf->hdr.sub_cmd_id = msg;
+
+ ret = psp_extended_mailbox_cmd(sfs_dev->psp,
+ SFS_DEFAULT_TIMEOUT,
+ (struct psp_ext_request *)sfs_dev->command_buf);
+ if (ret == -EIO) {
+ dev_dbg(sfs_dev->dev,
+ "msg 0x%x failed with PSP error: 0x%x, extended status: 0x%x\n",
+ msg, sfs_dev->command_buf->hdr.status,
+ *(u32 *)sfs_dev->command_buf->buf);
+ }
+
+ return ret;
+}
+
+static int send_sfs_get_fw_versions(struct sfs_device *sfs_dev)
+{
+ /*
+ * SFS_GET_FW_VERSIONS command needs the output buffer to be
+ * initialized to 0xC7 in every byte.
+ */
+ memset(sfs_dev->command_buf->sfs_buffer, 0xc7, PAGE_SIZE);
+ sfs_dev->command_buf->hdr.payload_size = 2 * PAGE_SIZE;
+
+ return send_sfs_cmd(sfs_dev, PSP_SFS_GET_FW_VERSIONS);
+}
+
+static int send_sfs_update_package(struct sfs_device *sfs_dev, const char *payload_name)
+{
+ char payload_path[PAYLOAD_NAME_SIZE + sizeof("amd/")];
+ const struct firmware *firmware;
+ unsigned long package_size;
+ int ret;
+
+ /* Sanitize userspace provided payload name */
+ if (!strnchr(payload_name, PAYLOAD_NAME_SIZE, '\0'))
+ return -EINVAL;
+
+ snprintf(payload_path, sizeof(payload_path), "amd/%s", payload_name);
+
+ ret = firmware_request_nowarn(&firmware, payload_path, sfs_dev->dev);
+ if (ret < 0) {
+ dev_warn_ratelimited(sfs_dev->dev, "firmware request failed for %s (%d)\n",
+ payload_path, ret);
+ return -ENOENT;
+ }
+
+ /*
+ * SFS Update Package command's input buffer contains TEE_EXT_CMD_BUFFER
+ * followed by the Update Package and it should be 64KB aligned.
+ */
+ package_size = ALIGN(firmware->size + PAGE_SIZE, 0x10000U);
+
+ /*
+ * SFS command buffer is a pre-allocated 2MB buffer, fail update package
+ * if SFS payload is larger than the pre-allocated command buffer.
+ */
+ if (package_size > SFS_MAX_PAYLOAD_SIZE) {
+ dev_warn_ratelimited(sfs_dev->dev,
+ "SFS payload size %ld larger than maximum supported payload size of %u\n",
+ package_size, SFS_MAX_PAYLOAD_SIZE);
+ release_firmware(firmware);
+ return -E2BIG;
+ }
+
+ /*
+ * Copy firmware data to a HV_Fixed memory region.
+ */
+ memcpy(sfs_dev->command_buf->sfs_buffer, firmware->data, firmware->size);
+ sfs_dev->command_buf->hdr.payload_size = package_size;
+
+ release_firmware(firmware);
+
+ return send_sfs_cmd(sfs_dev, PSP_SFS_UPDATE);
+}
+
+static long sfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct sfs_user_get_fw_versions __user *sfs_get_fw_versions;
+ struct sfs_user_update_package __user *sfs_update_package;
+ struct psp_device *psp_master = psp_get_master_device();
+ char payload_name[PAYLOAD_NAME_SIZE];
+ struct sfs_device *sfs_dev;
+ int ret = 0;
+
+ if (!psp_master || !psp_master->sfs_data)
+ return -ENODEV;
+
+ sfs_dev = psp_master->sfs_data;
+
+ guard(mutex)(&sfs_ioctl_mutex);
+
+ switch (cmd) {
+ case SFSIOCFWVERS:
+ dev_dbg(sfs_dev->dev, "in SFSIOCFWVERS\n");
+
+ sfs_get_fw_versions = (struct sfs_user_get_fw_versions __user *)arg;
+
+ ret = send_sfs_get_fw_versions(sfs_dev);
+ if (ret && ret != -EIO)
+ return ret;
+
+ /*
+ * Return SFS status and extended status back to userspace
+ * if PSP status indicated success or command error.
+ */
+ if (copy_to_user(&sfs_get_fw_versions->blob, sfs_dev->command_buf->sfs_buffer,
+ PAGE_SIZE))
+ return -EFAULT;
+ if (copy_to_user(&sfs_get_fw_versions->sfs_status,
+ &sfs_dev->command_buf->hdr.status,
+ sizeof(sfs_get_fw_versions->sfs_status)))
+ return -EFAULT;
+ if (copy_to_user(&sfs_get_fw_versions->sfs_extended_status,
+ &sfs_dev->command_buf->buf,
+ sizeof(sfs_get_fw_versions->sfs_extended_status)))
+ return -EFAULT;
+ break;
+ case SFSIOCUPDATEPKG:
+ dev_dbg(sfs_dev->dev, "in SFSIOCUPDATEPKG\n");
+
+ sfs_update_package = (struct sfs_user_update_package __user *)arg;
+
+ if (copy_from_user(payload_name, sfs_update_package->payload_name,
+ PAYLOAD_NAME_SIZE))
+ return -EFAULT;
+
+ ret = send_sfs_update_package(sfs_dev, payload_name);
+ if (ret && ret != -EIO)
+ return ret;
+
+ /*
+ * Return SFS status and extended status back to userspace
+ * if PSP status indicated success or command error.
+ */
+ if (copy_to_user(&sfs_update_package->sfs_status,
+ &sfs_dev->command_buf->hdr.status,
+ sizeof(sfs_update_package->sfs_status)))
+ return -EFAULT;
+ if (copy_to_user(&sfs_update_package->sfs_extended_status,
+ &sfs_dev->command_buf->buf,
+ sizeof(sfs_update_package->sfs_extended_status)))
+ return -EFAULT;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct file_operations sfs_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = sfs_ioctl,
+};
+
+static void sfs_exit(struct kref *ref)
+{
+ misc_deregister(&misc_dev->misc);
+ kfree(misc_dev);
+ misc_dev = NULL;
+}
+
+void sfs_dev_destroy(struct psp_device *psp)
+{
+ struct sfs_device *sfs_dev = psp->sfs_data;
+
+ if (!sfs_dev)
+ return;
+
+ /*
+ * Change SFS command buffer back to the default "Write-Back" type.
+ */
+ set_memory_wb((unsigned long)sfs_dev->command_buf, SFS_NUM_PAGES_CMDBUF);
+
+ snp_free_hv_fixed_pages(sfs_dev->page);
+
+ if (sfs_dev->misc)
+ kref_put(&misc_dev->refcount, sfs_exit);
+
+ psp->sfs_data = NULL;
+}
+
+/* Based on sev_misc_init() */
+static int sfs_misc_init(struct sfs_device *sfs)
+{
+ struct device *dev = sfs->dev;
+ int ret;
+
+ /*
+ * SFS feature support can be detected on multiple devices but the SFS
+ * FW commands must be issued on the master. During probe, we do not
+ * know the master hence we create /dev/sfs on the first device probe.
+ */
+ if (!misc_dev) {
+ struct miscdevice *misc;
+
+ misc_dev = kzalloc(sizeof(*misc_dev), GFP_KERNEL);
+ if (!misc_dev)
+ return -ENOMEM;
+
+ misc = &misc_dev->misc;
+ misc->minor = MISC_DYNAMIC_MINOR;
+ misc->name = "sfs";
+ misc->fops = &sfs_fops;
+ misc->mode = 0600;
+
+ ret = misc_register(misc);
+ if (ret)
+ return ret;
+
+ kref_init(&misc_dev->refcount);
+ } else {
+ kref_get(&misc_dev->refcount);
+ }
+
+ sfs->misc = misc_dev;
+ dev_dbg(dev, "registered SFS device\n");
+
+ return 0;
+}
+
+int sfs_dev_init(struct psp_device *psp)
+{
+ struct device *dev = psp->dev;
+ struct sfs_device *sfs_dev;
+ struct page *page;
+ int ret = -ENOMEM;
+
+ sfs_dev = devm_kzalloc(dev, sizeof(*sfs_dev), GFP_KERNEL);
+ if (!sfs_dev)
+ return -ENOMEM;
+
+ /*
+ * Pre-allocate 2MB command buffer for all SFS commands using
+ * SNP HV_Fixed page allocator which also transitions the
+ * SFS command buffer to HV_Fixed page state if SNP is enabled.
+ */
+ page = snp_alloc_hv_fixed_pages(SFS_NUM_2MB_PAGES_CMDBUF);
+ if (!page) {
+ dev_dbg(dev, "Command Buffer HV-Fixed page allocation failed\n");
+ goto cleanup_dev;
+ }
+ sfs_dev->page = page;
+ sfs_dev->command_buf = page_address(page);
+
+ dev_dbg(dev, "Command buffer 0x%px to be marked as HV_Fixed\n", sfs_dev->command_buf);
+
+ /*
+ * SFS command buffer must be mapped as non-cacheable.
+ */
+ ret = set_memory_uc((unsigned long)sfs_dev->command_buf, SFS_NUM_PAGES_CMDBUF);
+ if (ret) {
+ dev_dbg(dev, "Set memory uc failed\n");
+ goto cleanup_cmd_buf;
+ }
+
+ dev_dbg(dev, "Command buffer 0x%px marked uncacheable\n", sfs_dev->command_buf);
+
+ psp->sfs_data = sfs_dev;
+ sfs_dev->dev = dev;
+ sfs_dev->psp = psp;
+
+ ret = sfs_misc_init(sfs_dev);
+ if (ret)
+ goto cleanup_mem_attr;
+
+ dev_notice(sfs_dev->dev, "SFS support is available\n");
+
+ return 0;
+
+cleanup_mem_attr:
+ set_memory_wb((unsigned long)sfs_dev->command_buf, SFS_NUM_PAGES_CMDBUF);
+
+cleanup_cmd_buf:
+ snp_free_hv_fixed_pages(page);
+
+cleanup_dev:
+ psp->sfs_data = NULL;
+ devm_kfree(dev, sfs_dev);
+
+ return ret;
+}
diff --git a/drivers/crypto/ccp/sfs.h b/drivers/crypto/ccp/sfs.h
new file mode 100644
index 000000000000..97704c210efd
--- /dev/null
+++ b/drivers/crypto/ccp/sfs.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * AMD Platform Security Processor (PSP) Seamless Firmware (SFS) Support.
+ *
+ * Copyright (C) 2025 Advanced Micro Devices, Inc.
+ *
+ * Author: Ashish Kalra <ashish.kalra@amd.com>
+ */
+
+#ifndef __SFS_H__
+#define __SFS_H__
+
+#include <uapi/linux/psp-sfs.h>
+
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/psp-sev.h>
+#include <linux/psp-platform-access.h>
+#include <linux/set_memory.h>
+
+#include "psp-dev.h"
+
+struct sfs_misc_dev {
+ struct kref refcount;
+ struct miscdevice misc;
+};
+
+struct sfs_command {
+ struct psp_ext_req_buffer_hdr hdr;
+ u8 buf[PAGE_SIZE - sizeof(struct psp_ext_req_buffer_hdr)];
+ u8 sfs_buffer[];
+} __packed;
+
+struct sfs_device {
+ struct device *dev;
+ struct psp_device *psp;
+
+ struct page *page;
+ struct sfs_command *command_buf;
+
+ struct sfs_misc_dev *misc;
+};
+
+void sfs_dev_destroy(struct psp_device *psp);
+int sfs_dev_init(struct psp_device *psp);
+
+#endif /* __SFS_H__ */
diff --git a/drivers/crypto/chelsio/Kconfig b/drivers/crypto/chelsio/Kconfig
index 5dd3f6a4781a..37294bb74003 100644
--- a/drivers/crypto/chelsio/Kconfig
+++ b/drivers/crypto/chelsio/Kconfig
@@ -4,9 +4,9 @@ config CRYPTO_DEV_CHELSIO
depends on CHELSIO_T4
select CRYPTO_LIB_AES
select CRYPTO_LIB_GF128MUL
- select CRYPTO_SHA1
- select CRYPTO_SHA256
- select CRYPTO_SHA512
+ select CRYPTO_LIB_SHA1
+ select CRYPTO_LIB_SHA256
+ select CRYPTO_LIB_SHA512
select CRYPTO_AUTHENC
help
The Chelsio Crypto Co-processor driver for T6 adapters.
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index be21e4e2016c..22cbc343198a 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -51,7 +51,6 @@
#include <crypto/aes.h>
#include <crypto/algapi.h>
-#include <crypto/hash.h>
#include <crypto/gcm.h>
#include <crypto/sha1.h>
#include <crypto/sha2.h>
@@ -277,88 +276,60 @@ static void get_aes_decrypt_key(unsigned char *dec_key,
}
}
-static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
+static int chcr_prepare_hmac_key(const u8 *raw_key, unsigned int raw_key_len,
+ int digestsize, void *istate, void *ostate)
{
- struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
-
- switch (ds) {
+ __be32 *istate32 = istate, *ostate32 = ostate;
+ __be64 *istate64 = istate, *ostate64 = ostate;
+ union {
+ struct hmac_sha1_key sha1;
+ struct hmac_sha224_key sha224;
+ struct hmac_sha256_key sha256;
+ struct hmac_sha384_key sha384;
+ struct hmac_sha512_key sha512;
+ } k;
+
+ switch (digestsize) {
case SHA1_DIGEST_SIZE:
- base_hash = crypto_alloc_shash("sha1", 0, 0);
+ hmac_sha1_preparekey(&k.sha1, raw_key, raw_key_len);
+ for (int i = 0; i < ARRAY_SIZE(k.sha1.istate.h); i++) {
+ istate32[i] = cpu_to_be32(k.sha1.istate.h[i]);
+ ostate32[i] = cpu_to_be32(k.sha1.ostate.h[i]);
+ }
break;
case SHA224_DIGEST_SIZE:
- base_hash = crypto_alloc_shash("sha224", 0, 0);
+ hmac_sha224_preparekey(&k.sha224, raw_key, raw_key_len);
+ for (int i = 0; i < ARRAY_SIZE(k.sha224.key.istate.h); i++) {
+ istate32[i] = cpu_to_be32(k.sha224.key.istate.h[i]);
+ ostate32[i] = cpu_to_be32(k.sha224.key.ostate.h[i]);
+ }
break;
case SHA256_DIGEST_SIZE:
- base_hash = crypto_alloc_shash("sha256", 0, 0);
+ hmac_sha256_preparekey(&k.sha256, raw_key, raw_key_len);
+ for (int i = 0; i < ARRAY_SIZE(k.sha256.key.istate.h); i++) {
+ istate32[i] = cpu_to_be32(k.sha256.key.istate.h[i]);
+ ostate32[i] = cpu_to_be32(k.sha256.key.ostate.h[i]);
+ }
break;
case SHA384_DIGEST_SIZE:
- base_hash = crypto_alloc_shash("sha384", 0, 0);
+ hmac_sha384_preparekey(&k.sha384, raw_key, raw_key_len);
+ for (int i = 0; i < ARRAY_SIZE(k.sha384.key.istate.h); i++) {
+ istate64[i] = cpu_to_be64(k.sha384.key.istate.h[i]);
+ ostate64[i] = cpu_to_be64(k.sha384.key.ostate.h[i]);
+ }
break;
case SHA512_DIGEST_SIZE:
- base_hash = crypto_alloc_shash("sha512", 0, 0);
+ hmac_sha512_preparekey(&k.sha512, raw_key, raw_key_len);
+ for (int i = 0; i < ARRAY_SIZE(k.sha512.key.istate.h); i++) {
+ istate64[i] = cpu_to_be64(k.sha512.key.istate.h[i]);
+ ostate64[i] = cpu_to_be64(k.sha512.key.ostate.h[i]);
+ }
break;
+ default:
+ return -EINVAL;
}
-
- return base_hash;
-}
-
-static int chcr_compute_partial_hash(struct shash_desc *desc,
- char *iopad, char *result_hash,
- int digest_size)
-{
- struct sha1_state sha1_st;
- struct sha256_state sha256_st;
- struct sha512_state sha512_st;
- int error;
-
- if (digest_size == SHA1_DIGEST_SIZE) {
- error = crypto_shash_init(desc) ?:
- crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
- crypto_shash_export_core(desc, &sha1_st);
- memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
- } else if (digest_size == SHA224_DIGEST_SIZE) {
- error = crypto_shash_init(desc) ?:
- crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
- crypto_shash_export_core(desc, &sha256_st);
- memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
-
- } else if (digest_size == SHA256_DIGEST_SIZE) {
- error = crypto_shash_init(desc) ?:
- crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
- crypto_shash_export_core(desc, &sha256_st);
- memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
-
- } else if (digest_size == SHA384_DIGEST_SIZE) {
- error = crypto_shash_init(desc) ?:
- crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
- crypto_shash_export_core(desc, &sha512_st);
- memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
-
- } else if (digest_size == SHA512_DIGEST_SIZE) {
- error = crypto_shash_init(desc) ?:
- crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
- crypto_shash_export_core(desc, &sha512_st);
- memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
- } else {
- error = -EINVAL;
- pr_err("Unknown digest size %d\n", digest_size);
- }
- return error;
-}
-
-static void chcr_change_order(char *buf, int ds)
-{
- int i;
-
- if (ds == SHA512_DIGEST_SIZE) {
- for (i = 0; i < (ds / sizeof(u64)); i++)
- *((__be64 *)buf + i) =
- cpu_to_be64(*((u64 *)buf + i));
- } else {
- for (i = 0; i < (ds / sizeof(u32)); i++)
- *((__be32 *)buf + i) =
- cpu_to_be32(*((u32 *)buf + i));
- }
+ memzero_explicit(&k, sizeof(k));
+ return 0;
}
static inline int is_hmac(struct crypto_tfm *tfm)
@@ -1547,11 +1518,6 @@ static int get_alg_config(struct algo_param *params,
return 0;
}
-static inline void chcr_free_shash(struct crypto_shash *base_hash)
-{
- crypto_free_shash(base_hash);
-}
-
/**
* create_hash_wr - Create hash work request
* @req: Cipher req base
@@ -2202,53 +2168,13 @@ static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
- unsigned int digestsize = crypto_ahash_digestsize(tfm);
- unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
- unsigned int i, err = 0, updated_digestsize;
-
- SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
/* use the key to calculate the ipad and opad. ipad will sent with the
* first request's data. opad will be sent with the final hash result
* ipad in hmacctx->ipad and opad in hmacctx->opad location
*/
- shash->tfm = hmacctx->base_hash;
- if (keylen > bs) {
- err = crypto_shash_digest(shash, key, keylen,
- hmacctx->ipad);
- if (err)
- goto out;
- keylen = digestsize;
- } else {
- memcpy(hmacctx->ipad, key, keylen);
- }
- memset(hmacctx->ipad + keylen, 0, bs - keylen);
- unsafe_memcpy(hmacctx->opad, hmacctx->ipad, bs,
- "fortified memcpy causes -Wrestrict warning");
-
- for (i = 0; i < bs / sizeof(int); i++) {
- *((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
- *((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
- }
-
- updated_digestsize = digestsize;
- if (digestsize == SHA224_DIGEST_SIZE)
- updated_digestsize = SHA256_DIGEST_SIZE;
- else if (digestsize == SHA384_DIGEST_SIZE)
- updated_digestsize = SHA512_DIGEST_SIZE;
- err = chcr_compute_partial_hash(shash, hmacctx->ipad,
- hmacctx->ipad, digestsize);
- if (err)
- goto out;
- chcr_change_order(hmacctx->ipad, updated_digestsize);
-
- err = chcr_compute_partial_hash(shash, hmacctx->opad,
- hmacctx->opad, digestsize);
- if (err)
- goto out;
- chcr_change_order(hmacctx->opad, updated_digestsize);
-out:
- return err;
+ return chcr_prepare_hmac_key(key, keylen, crypto_ahash_digestsize(tfm),
+ hmacctx->ipad, hmacctx->opad);
}
static int chcr_aes_xts_setkey(struct crypto_skcipher *cipher, const u8 *key,
@@ -2344,30 +2270,11 @@ static int chcr_hmac_init(struct ahash_request *areq)
static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
{
- struct chcr_context *ctx = crypto_tfm_ctx(tfm);
- struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
- unsigned int digestsize =
- crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
-
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct chcr_ahash_req_ctx));
- hmacctx->base_hash = chcr_alloc_shash(digestsize);
- if (IS_ERR(hmacctx->base_hash))
- return PTR_ERR(hmacctx->base_hash);
return chcr_device_init(crypto_tfm_ctx(tfm));
}
-static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
-{
- struct chcr_context *ctx = crypto_tfm_ctx(tfm);
- struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
-
- if (hmacctx->base_hash) {
- chcr_free_shash(hmacctx->base_hash);
- hmacctx->base_hash = NULL;
- }
-}
-
inline void chcr_aead_common_exit(struct aead_request *req)
{
struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
@@ -3557,15 +3464,12 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
/* it contains auth and cipher key both*/
struct crypto_authenc_keys keys;
- unsigned int bs, subtype;
+ unsigned int subtype;
unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
- int err = 0, i, key_ctx_len = 0;
+ int err = 0, key_ctx_len = 0;
unsigned char ck_size = 0;
- unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
- struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
struct algo_param param;
int align;
- u8 *o_ptr = NULL;
crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
@@ -3613,68 +3517,26 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
aeadctx->enckey_len << 3);
}
- base_hash = chcr_alloc_shash(max_authsize);
- if (IS_ERR(base_hash)) {
- pr_err("Base driver cannot be loaded\n");
+
+ align = KEYCTX_ALIGN_PAD(max_authsize);
+ err = chcr_prepare_hmac_key(keys.authkey, keys.authkeylen, max_authsize,
+ actx->h_iopad,
+ actx->h_iopad + param.result_size + align);
+ if (err)
goto out;
- }
- {
- SHASH_DESC_ON_STACK(shash, base_hash);
-
- shash->tfm = base_hash;
- bs = crypto_shash_blocksize(base_hash);
- align = KEYCTX_ALIGN_PAD(max_authsize);
- o_ptr = actx->h_iopad + param.result_size + align;
-
- if (keys.authkeylen > bs) {
- err = crypto_shash_digest(shash, keys.authkey,
- keys.authkeylen,
- o_ptr);
- if (err) {
- pr_err("Base driver cannot be loaded\n");
- goto out;
- }
- keys.authkeylen = max_authsize;
- } else
- memcpy(o_ptr, keys.authkey, keys.authkeylen);
-
- /* Compute the ipad-digest*/
- memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
- memcpy(pad, o_ptr, keys.authkeylen);
- for (i = 0; i < bs >> 2; i++)
- *((unsigned int *)pad + i) ^= IPAD_DATA;
-
- if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
- max_authsize))
- goto out;
- /* Compute the opad-digest */
- memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
- memcpy(pad, o_ptr, keys.authkeylen);
- for (i = 0; i < bs >> 2; i++)
- *((unsigned int *)pad + i) ^= OPAD_DATA;
- if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
- goto out;
+ key_ctx_len = sizeof(struct _key_ctx) + roundup(keys.enckeylen, 16) +
+ (param.result_size + align) * 2;
+ aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size, 0, 1,
+ key_ctx_len >> 4);
+ actx->auth_mode = param.auth_mode;
+
+ memzero_explicit(&keys, sizeof(keys));
+ return 0;
- /* convert the ipad and opad digest to network order */
- chcr_change_order(actx->h_iopad, param.result_size);
- chcr_change_order(o_ptr, param.result_size);
- key_ctx_len = sizeof(struct _key_ctx) +
- roundup(keys.enckeylen, 16) +
- (param.result_size + align) * 2;
- aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
- 0, 1, key_ctx_len >> 4);
- actx->auth_mode = param.auth_mode;
- chcr_free_shash(base_hash);
-
- memzero_explicit(&keys, sizeof(keys));
- return 0;
- }
out:
aeadctx->enckey_len = 0;
memzero_explicit(&keys, sizeof(keys));
- if (!IS_ERR(base_hash))
- chcr_free_shash(base_hash);
return -EINVAL;
}
@@ -4490,7 +4352,6 @@ static int chcr_register_alg(void)
if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
a_hash->halg.base.cra_init = chcr_hmac_cra_init;
- a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
a_hash->init = chcr_hmac_init;
a_hash->setkey = chcr_ahash_setkey;
a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index 1d693b8436e6..e1e79e5f01e7 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -241,7 +241,6 @@ struct chcr_aead_ctx {
};
struct hmac_ctx {
- struct crypto_shash *base_hash;
u8 ipad[CHCR_HASH_MAX_BLOCK_SIZE_128];
u8 opad[CHCR_HASH_MAX_BLOCK_SIZE_128];
};
diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig
index 4137a8bf131f..4835bdebdbb3 100644
--- a/drivers/crypto/hisilicon/Kconfig
+++ b/drivers/crypto/hisilicon/Kconfig
@@ -69,7 +69,6 @@ config CRYPTO_DEV_HISI_HPRE
select CRYPTO_DEV_HISI_QM
select CRYPTO_DH
select CRYPTO_RSA
- select CRYPTO_CURVE25519
select CRYPTO_ECDH
help
Support for HiSilicon HPRE(High Performance RSA Engine)
diff --git a/drivers/crypto/hisilicon/debugfs.c b/drivers/crypto/hisilicon/debugfs.c
index 45e130b901eb..17eb236e9ee4 100644
--- a/drivers/crypto/hisilicon/debugfs.c
+++ b/drivers/crypto/hisilicon/debugfs.c
@@ -888,6 +888,7 @@ static int qm_diff_regs_init(struct hisi_qm *qm,
dfx_regs_uninit(qm, qm->debug.qm_diff_regs, ARRAY_SIZE(qm_diff_regs));
ret = PTR_ERR(qm->debug.acc_diff_regs);
qm->debug.acc_diff_regs = NULL;
+ qm->debug.qm_diff_regs = NULL;
return ret;
}
diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
index 1550c3818383..21ccf879f70c 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 HiSilicon Limited. */
#include <crypto/akcipher.h>
-#include <crypto/curve25519.h>
#include <crypto/dh.h>
#include <crypto/ecc_curve.h>
#include <crypto/ecdh.h>
@@ -106,16 +105,6 @@ struct hpre_ecdh_ctx {
dma_addr_t dma_g;
};
-struct hpre_curve25519_ctx {
- /* low address: p->a->k */
- unsigned char *p;
- dma_addr_t dma_p;
-
- /* gx coordinate */
- unsigned char *g;
- dma_addr_t dma_g;
-};
-
struct hpre_ctx {
struct hisi_qp *qp;
struct device *dev;
@@ -129,7 +118,6 @@ struct hpre_ctx {
struct hpre_rsa_ctx rsa;
struct hpre_dh_ctx dh;
struct hpre_ecdh_ctx ecdh;
- struct hpre_curve25519_ctx curve25519;
};
/* for ecc algorithms */
unsigned int curve_id;
@@ -146,7 +134,6 @@ struct hpre_asym_request {
struct akcipher_request *rsa;
struct kpp_request *dh;
struct kpp_request *ecdh;
- struct kpp_request *curve25519;
} areq;
int err;
int req_id;
@@ -1214,8 +1201,7 @@ static void hpre_key_to_big_end(u8 *data, int len)
}
}
-static void hpre_ecc_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all,
- bool is_ecdh)
+static void hpre_ecc_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
{
struct device *dev = ctx->dev;
unsigned int sz = ctx->key_sz;
@@ -1224,17 +1210,11 @@ static void hpre_ecc_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all,
if (is_clear_all)
hisi_qm_stop_qp(ctx->qp);
- if (is_ecdh && ctx->ecdh.p) {
+ if (ctx->ecdh.p) {
/* ecdh: p->a->k->b */
memzero_explicit(ctx->ecdh.p + shift, sz);
dma_free_coherent(dev, sz << 3, ctx->ecdh.p, ctx->ecdh.dma_p);
ctx->ecdh.p = NULL;
- } else if (!is_ecdh && ctx->curve25519.p) {
- /* curve25519: p->a->k */
- memzero_explicit(ctx->curve25519.p + shift, sz);
- dma_free_coherent(dev, sz << 2, ctx->curve25519.p,
- ctx->curve25519.dma_p);
- ctx->curve25519.p = NULL;
}
hpre_ctx_clear(ctx, is_clear_all);
@@ -1432,7 +1412,7 @@ static int hpre_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
return -EINVAL;
}
- hpre_ecc_clear_ctx(ctx, false, true);
+ hpre_ecc_clear_ctx(ctx, false);
ret = hpre_ecdh_set_param(ctx, &params);
if (ret < 0) {
@@ -1683,337 +1663,7 @@ static void hpre_ecdh_exit_tfm(struct crypto_kpp *tfm)
{
struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
- hpre_ecc_clear_ctx(ctx, true, true);
-}
-
-static void hpre_curve25519_fill_curve(struct hpre_ctx *ctx, const void *buf,
- unsigned int len)
-{
- u8 secret[CURVE25519_KEY_SIZE] = { 0 };
- unsigned int sz = ctx->key_sz;
- const struct ecc_curve *curve;
- unsigned int shift = sz << 1;
- void *p;
-
- /*
- * The key from 'buf' is in little-endian, we should preprocess it as
- * the description in rfc7748: "k[0] &= 248, k[31] &= 127, k[31] |= 64",
- * then convert it to big endian. Only in this way, the result can be
- * the same as the software curve-25519 that exists in crypto.
- */
- memcpy(secret, buf, len);
- curve25519_clamp_secret(secret);
- hpre_key_to_big_end(secret, CURVE25519_KEY_SIZE);
-
- p = ctx->curve25519.p + sz - len;
-
- curve = ecc_get_curve25519();
-
- /* fill curve parameters */
- fill_curve_param(p, curve->p, len, curve->g.ndigits);
- fill_curve_param(p + sz, curve->a, len, curve->g.ndigits);
- memcpy(p + shift, secret, len);
- fill_curve_param(p + shift + sz, curve->g.x, len, curve->g.ndigits);
- memzero_explicit(secret, CURVE25519_KEY_SIZE);
-}
-
-static int hpre_curve25519_set_param(struct hpre_ctx *ctx, const void *buf,
- unsigned int len)
-{
- struct device *dev = ctx->dev;
- unsigned int sz = ctx->key_sz;
- unsigned int shift = sz << 1;
-
- /* p->a->k->gx */
- if (!ctx->curve25519.p) {
- ctx->curve25519.p = dma_alloc_coherent(dev, sz << 2,
- &ctx->curve25519.dma_p,
- GFP_KERNEL);
- if (!ctx->curve25519.p)
- return -ENOMEM;
- }
-
- ctx->curve25519.g = ctx->curve25519.p + shift + sz;
- ctx->curve25519.dma_g = ctx->curve25519.dma_p + shift + sz;
-
- hpre_curve25519_fill_curve(ctx, buf, len);
-
- return 0;
-}
-
-static int hpre_curve25519_set_secret(struct crypto_kpp *tfm, const void *buf,
- unsigned int len)
-{
- struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
- struct device *dev = ctx->dev;
- int ret = -EINVAL;
-
- if (len != CURVE25519_KEY_SIZE ||
- !crypto_memneq(buf, curve25519_null_point, CURVE25519_KEY_SIZE)) {
- dev_err(dev, "key is null or key len is not 32bytes!\n");
- return ret;
- }
-
- /* Free old secret if any */
- hpre_ecc_clear_ctx(ctx, false, false);
-
- ctx->key_sz = CURVE25519_KEY_SIZE;
- ret = hpre_curve25519_set_param(ctx, buf, CURVE25519_KEY_SIZE);
- if (ret) {
- dev_err(dev, "failed to set curve25519 param, ret = %d!\n", ret);
- hpre_ecc_clear_ctx(ctx, false, false);
- return ret;
- }
-
- return 0;
-}
-
-static void hpre_curve25519_hw_data_clr_all(struct hpre_ctx *ctx,
- struct hpre_asym_request *req,
- struct scatterlist *dst,
- struct scatterlist *src)
-{
- struct device *dev = ctx->dev;
- struct hpre_sqe *sqe = &req->req;
- dma_addr_t dma;
-
- dma = le64_to_cpu(sqe->in);
- if (unlikely(dma_mapping_error(dev, dma)))
- return;
-
- if (src && req->src)
- dma_free_coherent(dev, ctx->key_sz, req->src, dma);
-
- dma = le64_to_cpu(sqe->out);
- if (unlikely(dma_mapping_error(dev, dma)))
- return;
-
- if (req->dst)
- dma_free_coherent(dev, ctx->key_sz, req->dst, dma);
- if (dst)
- dma_unmap_single(dev, dma, ctx->key_sz, DMA_FROM_DEVICE);
-}
-
-static void hpre_curve25519_cb(struct hpre_ctx *ctx, void *resp)
-{
- struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
- struct hpre_asym_request *req = NULL;
- struct kpp_request *areq;
- u64 overtime_thrhld;
- int ret;
-
- ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
- areq = req->areq.curve25519;
- areq->dst_len = ctx->key_sz;
-
- overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
- if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
- atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
-
- /* Do unmap before data processing */
- hpre_curve25519_hw_data_clr_all(ctx, req, areq->dst, areq->src);
-
- hpre_key_to_big_end(sg_virt(areq->dst), CURVE25519_KEY_SIZE);
-
- kpp_request_complete(areq, ret);
-
- atomic64_inc(&dfx[HPRE_RECV_CNT].value);
-}
-
-static int hpre_curve25519_msg_request_set(struct hpre_ctx *ctx,
- struct kpp_request *req)
-{
- struct hpre_asym_request *h_req;
- struct hpre_sqe *msg;
- int req_id;
- void *tmp;
-
- if (unlikely(req->dst_len < ctx->key_sz)) {
- req->dst_len = ctx->key_sz;
- return -EINVAL;
- }
-
- tmp = kpp_request_ctx(req);
- h_req = PTR_ALIGN(tmp, hpre_align_sz());
- h_req->cb = hpre_curve25519_cb;
- h_req->areq.curve25519 = req;
- msg = &h_req->req;
- memset(msg, 0, sizeof(*msg));
- msg->in = cpu_to_le64(DMA_MAPPING_ERROR);
- msg->out = cpu_to_le64(DMA_MAPPING_ERROR);
- msg->key = cpu_to_le64(ctx->curve25519.dma_p);
-
- msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT);
- msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
- h_req->ctx = ctx;
-
- req_id = hpre_add_req_to_ctx(h_req);
- if (req_id < 0)
- return -EBUSY;
-
- msg->tag = cpu_to_le16((u16)req_id);
- return 0;
-}
-
-static void hpre_curve25519_src_modulo_p(u8 *ptr)
-{
- int i;
-
- for (i = 0; i < CURVE25519_KEY_SIZE - 1; i++)
- ptr[i] = 0;
-
- /* The modulus is ptr's last byte minus '0xed'(last byte of p) */
- ptr[i] -= 0xed;
-}
-
-static int hpre_curve25519_src_init(struct hpre_asym_request *hpre_req,
- struct scatterlist *data, unsigned int len)
-{
- struct hpre_sqe *msg = &hpre_req->req;
- struct hpre_ctx *ctx = hpre_req->ctx;
- struct device *dev = ctx->dev;
- u8 p[CURVE25519_KEY_SIZE] = { 0 };
- const struct ecc_curve *curve;
- dma_addr_t dma = 0;
- u8 *ptr;
-
- if (len != CURVE25519_KEY_SIZE) {
- dev_err(dev, "sourc_data len is not 32bytes, len = %u!\n", len);
- return -EINVAL;
- }
-
- ptr = dma_alloc_coherent(dev, ctx->key_sz, &dma, GFP_KERNEL);
- if (unlikely(!ptr))
- return -ENOMEM;
-
- scatterwalk_map_and_copy(ptr, data, 0, len, 0);
-
- if (!crypto_memneq(ptr, curve25519_null_point, CURVE25519_KEY_SIZE)) {
- dev_err(dev, "gx is null!\n");
- goto err;
- }
-
- /*
- * Src_data(gx) is in little-endian order, MSB in the final byte should
- * be masked as described in RFC7748, then transform it to big-endian
- * form, then hisi_hpre can use the data.
- */
- ptr[31] &= 0x7f;
- hpre_key_to_big_end(ptr, CURVE25519_KEY_SIZE);
-
- curve = ecc_get_curve25519();
-
- fill_curve_param(p, curve->p, CURVE25519_KEY_SIZE, curve->g.ndigits);
-
- /*
- * When src_data equals (2^255 - 19) ~ (2^255 - 1), it is out of p,
- * we get its modulus to p, and then use it.
- */
- if (memcmp(ptr, p, ctx->key_sz) == 0) {
- dev_err(dev, "gx is p!\n");
- goto err;
- } else if (memcmp(ptr, p, ctx->key_sz) > 0) {
- hpre_curve25519_src_modulo_p(ptr);
- }
-
- hpre_req->src = ptr;
- msg->in = cpu_to_le64(dma);
- return 0;
-
-err:
- dma_free_coherent(dev, ctx->key_sz, ptr, dma);
- return -EINVAL;
-}
-
-static int hpre_curve25519_dst_init(struct hpre_asym_request *hpre_req,
- struct scatterlist *data, unsigned int len)
-{
- struct hpre_sqe *msg = &hpre_req->req;
- struct hpre_ctx *ctx = hpre_req->ctx;
- struct device *dev = ctx->dev;
- dma_addr_t dma;
-
- if (!data || !sg_is_last(data) || len != ctx->key_sz) {
- dev_err(dev, "data or data length is illegal!\n");
- return -EINVAL;
- }
-
- hpre_req->dst = NULL;
- dma = dma_map_single(dev, sg_virt(data), len, DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(dev, dma))) {
- dev_err(dev, "dma map data err!\n");
- return -ENOMEM;
- }
-
- msg->out = cpu_to_le64(dma);
- return 0;
-}
-
-static int hpre_curve25519_compute_value(struct kpp_request *req)
-{
- struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
- struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
- struct device *dev = ctx->dev;
- void *tmp = kpp_request_ctx(req);
- struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz());
- struct hpre_sqe *msg = &hpre_req->req;
- int ret;
-
- ret = hpre_curve25519_msg_request_set(ctx, req);
- if (unlikely(ret)) {
- dev_err(dev, "failed to set curve25519 request, ret = %d!\n", ret);
- return ret;
- }
-
- if (req->src) {
- ret = hpre_curve25519_src_init(hpre_req, req->src, req->src_len);
- if (unlikely(ret)) {
- dev_err(dev, "failed to init src data, ret = %d!\n",
- ret);
- goto clear_all;
- }
- } else {
- msg->in = cpu_to_le64(ctx->curve25519.dma_g);
- }
-
- ret = hpre_curve25519_dst_init(hpre_req, req->dst, req->dst_len);
- if (unlikely(ret)) {
- dev_err(dev, "failed to init dst data, ret = %d!\n", ret);
- goto clear_all;
- }
-
- msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_CURVE25519_MUL);
- ret = hpre_send(ctx, msg);
- if (likely(!ret))
- return -EINPROGRESS;
-
-clear_all:
- hpre_rm_req_from_ctx(hpre_req);
- hpre_curve25519_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
- return ret;
-}
-
-static unsigned int hpre_curve25519_max_size(struct crypto_kpp *tfm)
-{
- struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
-
- return ctx->key_sz;
-}
-
-static int hpre_curve25519_init_tfm(struct crypto_kpp *tfm)
-{
- struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
-
- kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd());
-
- return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
-}
-
-static void hpre_curve25519_exit_tfm(struct crypto_kpp *tfm)
-{
- struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
-
- hpre_ecc_clear_ctx(ctx, true, false);
+ hpre_ecc_clear_ctx(ctx, true);
}
static struct akcipher_alg rsa = {
@@ -2095,22 +1745,6 @@ static struct kpp_alg ecdh_curves[] = {
}
};
-static struct kpp_alg curve25519_alg = {
- .set_secret = hpre_curve25519_set_secret,
- .generate_public_key = hpre_curve25519_compute_value,
- .compute_shared_secret = hpre_curve25519_compute_value,
- .max_size = hpre_curve25519_max_size,
- .init = hpre_curve25519_init_tfm,
- .exit = hpre_curve25519_exit_tfm,
- .base = {
- .cra_ctxsize = sizeof(struct hpre_ctx),
- .cra_priority = HPRE_CRYPTO_ALG_PRI,
- .cra_name = "curve25519",
- .cra_driver_name = "hpre-curve25519",
- .cra_module = THIS_MODULE,
- },
-};
-
static int hpre_register_rsa(struct hisi_qm *qm)
{
int ret;
@@ -2192,28 +1826,6 @@ static void hpre_unregister_ecdh(struct hisi_qm *qm)
crypto_unregister_kpp(&ecdh_curves[i]);
}
-static int hpre_register_x25519(struct hisi_qm *qm)
-{
- int ret;
-
- if (!hpre_check_alg_support(qm, HPRE_DRV_X25519_MASK_CAP))
- return 0;
-
- ret = crypto_register_kpp(&curve25519_alg);
- if (ret)
- dev_err(&qm->pdev->dev, "failed to register x25519 (%d)!\n", ret);
-
- return ret;
-}
-
-static void hpre_unregister_x25519(struct hisi_qm *qm)
-{
- if (!hpre_check_alg_support(qm, HPRE_DRV_X25519_MASK_CAP))
- return;
-
- crypto_unregister_kpp(&curve25519_alg);
-}
-
int hpre_algs_register(struct hisi_qm *qm)
{
int ret = 0;
@@ -2236,17 +1848,11 @@ int hpre_algs_register(struct hisi_qm *qm)
if (ret)
goto unreg_dh;
- ret = hpre_register_x25519(qm);
- if (ret)
- goto unreg_ecdh;
-
hpre_available_devs++;
mutex_unlock(&hpre_algs_lock);
return ret;
-unreg_ecdh:
- hpre_unregister_ecdh(qm);
unreg_dh:
hpre_unregister_dh(qm);
unreg_rsa:
@@ -2262,7 +1868,6 @@ void hpre_algs_unregister(struct hisi_qm *qm)
if (--hpre_available_devs)
goto unlock;
- hpre_unregister_x25519(qm);
hpre_unregister_ecdh(qm);
hpre_unregister_dh(qm);
hpre_unregister_rsa(qm);
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index f5b47e5ff48a..b94fecd765ee 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -39,6 +39,7 @@
#define HPRE_HAC_RAS_NFE_ENB 0x301414
#define HPRE_HAC_RAS_FE_ENB 0x301418
#define HPRE_HAC_INT_SET 0x301500
+#define HPRE_AXI_ERROR_MASK GENMASK(21, 10)
#define HPRE_RNG_TIMEOUT_NUM 0x301A34
#define HPRE_CORE_INT_ENABLE 0
#define HPRE_RDCHN_INI_ST 0x301a00
@@ -78,6 +79,11 @@
#define HPRE_PREFETCH_ENABLE (~(BIT(0) | BIT(30)))
#define HPRE_PREFETCH_DISABLE BIT(30)
#define HPRE_SVA_DISABLE_READY (BIT(4) | BIT(8))
+#define HPRE_SVA_PREFTCH_DFX4 0x301144
+#define HPRE_WAIT_SVA_READY 500000
+#define HPRE_READ_SVA_STATUS_TIMES 3
+#define HPRE_WAIT_US_MIN 10
+#define HPRE_WAIT_US_MAX 20
/* clock gate */
#define HPRE_CLKGATE_CTL 0x301a10
@@ -466,6 +472,33 @@ struct hisi_qp *hpre_create_qp(u8 type)
return NULL;
}
+static int hpre_wait_sva_ready(struct hisi_qm *qm)
+{
+ u32 val, try_times = 0;
+ u8 count = 0;
+
+ /*
+ * Read the register value every 10-20us. If the value is 0 for three
+ * consecutive times, the SVA module is ready.
+ */
+ do {
+ val = readl(qm->io_base + HPRE_SVA_PREFTCH_DFX4);
+ if (val)
+ count = 0;
+ else if (++count == HPRE_READ_SVA_STATUS_TIMES)
+ break;
+
+ usleep_range(HPRE_WAIT_US_MIN, HPRE_WAIT_US_MAX);
+ } while (++try_times < HPRE_WAIT_SVA_READY);
+
+ if (try_times == HPRE_WAIT_SVA_READY) {
+ pci_err(qm->pdev, "failed to wait sva prefetch ready\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
static void hpre_config_pasid(struct hisi_qm *qm)
{
u32 val1, val2;
@@ -563,7 +596,7 @@ static void disable_flr_of_bme(struct hisi_qm *qm)
writel(PEH_AXUSER_CFG_ENABLE, qm->io_base + QM_PEH_AXUSER_CFG_ENABLE);
}
-static void hpre_open_sva_prefetch(struct hisi_qm *qm)
+static void hpre_close_sva_prefetch(struct hisi_qm *qm)
{
u32 val;
int ret;
@@ -571,20 +604,21 @@ static void hpre_open_sva_prefetch(struct hisi_qm *qm)
if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
return;
- /* Enable prefetch */
val = readl_relaxed(qm->io_base + HPRE_PREFETCH_CFG);
- val &= HPRE_PREFETCH_ENABLE;
+ val |= HPRE_PREFETCH_DISABLE;
writel(val, qm->io_base + HPRE_PREFETCH_CFG);
- ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_PREFETCH_CFG,
- val, !(val & HPRE_PREFETCH_DISABLE),
+ ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_SVA_PREFTCH_DFX,
+ val, !(val & HPRE_SVA_DISABLE_READY),
HPRE_REG_RD_INTVRL_US,
HPRE_REG_RD_TMOUT_US);
if (ret)
- pci_err(qm->pdev, "failed to open sva prefetch\n");
+ pci_err(qm->pdev, "failed to close sva prefetch\n");
+
+ (void)hpre_wait_sva_ready(qm);
}
-static void hpre_close_sva_prefetch(struct hisi_qm *qm)
+static void hpre_open_sva_prefetch(struct hisi_qm *qm)
{
u32 val;
int ret;
@@ -592,16 +626,24 @@ static void hpre_close_sva_prefetch(struct hisi_qm *qm)
if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
return;
+ /* Enable prefetch */
val = readl_relaxed(qm->io_base + HPRE_PREFETCH_CFG);
- val |= HPRE_PREFETCH_DISABLE;
+ val &= HPRE_PREFETCH_ENABLE;
writel(val, qm->io_base + HPRE_PREFETCH_CFG);
- ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_SVA_PREFTCH_DFX,
- val, !(val & HPRE_SVA_DISABLE_READY),
+ ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_PREFETCH_CFG,
+ val, !(val & HPRE_PREFETCH_DISABLE),
HPRE_REG_RD_INTVRL_US,
HPRE_REG_RD_TMOUT_US);
+ if (ret) {
+ pci_err(qm->pdev, "failed to open sva prefetch\n");
+ hpre_close_sva_prefetch(qm);
+ return;
+ }
+
+ ret = hpre_wait_sva_ready(qm);
if (ret)
- pci_err(qm->pdev, "failed to close sva prefetch\n");
+ hpre_close_sva_prefetch(qm);
}
static void hpre_enable_clock_gate(struct hisi_qm *qm)
@@ -721,6 +763,7 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
/* Config data buffer pasid needed by Kunpeng 920 */
hpre_config_pasid(qm);
+ hpre_open_sva_prefetch(qm);
hpre_enable_clock_gate(qm);
@@ -756,8 +799,7 @@ static void hpre_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
val1 = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
if (enable) {
val1 |= HPRE_AM_OOO_SHUTDOWN_ENABLE;
- val2 = hisi_qm_get_hw_info(qm, hpre_basic_info,
- HPRE_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+ val2 = qm->err_info.dev_err.shutdown_mask;
} else {
val1 &= ~HPRE_AM_OOO_SHUTDOWN_ENABLE;
val2 = 0x0;
@@ -771,38 +813,33 @@ static void hpre_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
static void hpre_hw_error_disable(struct hisi_qm *qm)
{
- u32 ce, nfe;
-
- ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CE_MASK_CAP, qm->cap_ver);
- nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);
+ struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err;
+ u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe;
/* disable hpre hw error interrupts */
- writel(ce | nfe | HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_INT_MASK);
+ writel(err_mask, qm->io_base + HPRE_INT_MASK);
/* disable HPRE block master OOO when nfe occurs on Kunpeng930 */
hpre_master_ooo_ctrl(qm, false);
}
static void hpre_hw_error_enable(struct hisi_qm *qm)
{
- u32 ce, nfe, err_en;
-
- ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CE_MASK_CAP, qm->cap_ver);
- nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);
+ struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err;
+ u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe;
/* clear HPRE hw error source if having */
- writel(ce | nfe | HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_HAC_SOURCE_INT);
+ writel(err_mask, qm->io_base + HPRE_HAC_SOURCE_INT);
/* configure error type */
- writel(ce, qm->io_base + HPRE_RAS_CE_ENB);
- writel(nfe, qm->io_base + HPRE_RAS_NFE_ENB);
- writel(HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_RAS_FE_ENB);
+ writel(dev_err->ce, qm->io_base + HPRE_RAS_CE_ENB);
+ writel(dev_err->nfe, qm->io_base + HPRE_RAS_NFE_ENB);
+ writel(dev_err->fe, qm->io_base + HPRE_RAS_FE_ENB);
/* enable HPRE block master OOO when nfe occurs on Kunpeng930 */
hpre_master_ooo_ctrl(qm, true);
/* enable hpre hw error interrupts */
- err_en = ce | nfe | HPRE_HAC_RAS_FE_ENABLE;
- writel(~err_en, qm->io_base + HPRE_INT_MASK);
+ writel(~err_mask, qm->io_base + HPRE_INT_MASK);
}
static inline struct hisi_qm *hpre_file_to_qm(struct hpre_debugfs_file *file)
@@ -1171,7 +1208,7 @@ static int hpre_pre_store_cap_reg(struct hisi_qm *qm)
size_t i, size;
size = ARRAY_SIZE(hpre_cap_query_info);
- hpre_cap = devm_kzalloc(dev, sizeof(*hpre_cap) * size, GFP_KERNEL);
+ hpre_cap = devm_kcalloc(dev, size, sizeof(*hpre_cap), GFP_KERNEL);
if (!hpre_cap)
return -ENOMEM;
@@ -1357,12 +1394,20 @@ static void hpre_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
static void hpre_disable_error_report(struct hisi_qm *qm, u32 err_type)
{
- u32 nfe_mask;
+ u32 nfe_mask = qm->err_info.dev_err.nfe;
- nfe_mask = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);
writel(nfe_mask & (~err_type), qm->io_base + HPRE_RAS_NFE_ENB);
}
+static void hpre_enable_error_report(struct hisi_qm *qm)
+{
+ u32 nfe_mask = qm->err_info.dev_err.nfe;
+ u32 ce_mask = qm->err_info.dev_err.ce;
+
+ writel(nfe_mask, qm->io_base + HPRE_RAS_NFE_ENB);
+ writel(ce_mask, qm->io_base + HPRE_RAS_CE_ENB);
+}
+
static void hpre_open_axi_master_ooo(struct hisi_qm *qm)
{
u32 value;
@@ -1380,16 +1425,18 @@ static enum acc_err_result hpre_get_err_result(struct hisi_qm *qm)
err_status = hpre_get_hw_err_status(qm);
if (err_status) {
- if (err_status & qm->err_info.ecc_2bits_mask)
+ if (err_status & qm->err_info.dev_err.ecc_2bits_mask)
qm->err_status.is_dev_ecc_mbit = true;
hpre_log_hw_error(qm, err_status);
- if (err_status & qm->err_info.dev_reset_mask) {
+ if (err_status & qm->err_info.dev_err.reset_mask) {
/* Disable the same error reporting until device is recovered. */
hpre_disable_error_report(qm, err_status);
return ACC_ERR_NEED_RESET;
}
hpre_clear_hw_err_status(qm, err_status);
+ /* Avoid firmware disable error report, re-enable. */
+ hpre_enable_error_report(qm);
}
return ACC_ERR_RECOVERED;
@@ -1400,28 +1447,64 @@ static bool hpre_dev_is_abnormal(struct hisi_qm *qm)
u32 err_status;
err_status = hpre_get_hw_err_status(qm);
- if (err_status & qm->err_info.dev_shutdown_mask)
+ if (err_status & qm->err_info.dev_err.shutdown_mask)
return true;
return false;
}
+static void hpre_disable_axi_error(struct hisi_qm *qm)
+{
+ struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err;
+ u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe;
+ u32 val;
+
+ val = ~(err_mask & (~HPRE_AXI_ERROR_MASK));
+ writel(val, qm->io_base + HPRE_INT_MASK);
+
+ if (qm->ver > QM_HW_V2)
+ writel(dev_err->shutdown_mask & (~HPRE_AXI_ERROR_MASK),
+ qm->io_base + HPRE_OOO_SHUTDOWN_SEL);
+}
+
+static void hpre_enable_axi_error(struct hisi_qm *qm)
+{
+ struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err;
+ u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe;
+
+ /* clear axi error source */
+ writel(HPRE_AXI_ERROR_MASK, qm->io_base + HPRE_HAC_SOURCE_INT);
+
+ writel(~err_mask, qm->io_base + HPRE_INT_MASK);
+
+ if (qm->ver > QM_HW_V2)
+ writel(dev_err->shutdown_mask, qm->io_base + HPRE_OOO_SHUTDOWN_SEL);
+}
+
static void hpre_err_info_init(struct hisi_qm *qm)
{
struct hisi_qm_err_info *err_info = &qm->err_info;
+ struct hisi_qm_err_mask *qm_err = &err_info->qm_err;
+ struct hisi_qm_err_mask *dev_err = &err_info->dev_err;
+
+ qm_err->fe = HPRE_HAC_RAS_FE_ENABLE;
+ qm_err->ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_QM_CE_MASK_CAP, qm->cap_ver);
+ qm_err->nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_QM_NFE_MASK_CAP, qm->cap_ver);
+ qm_err->shutdown_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
+ HPRE_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+ qm_err->reset_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
+ HPRE_QM_RESET_MASK_CAP, qm->cap_ver);
+ qm_err->ecc_2bits_mask = QM_ECC_MBIT;
+
+ dev_err->fe = HPRE_HAC_RAS_FE_ENABLE;
+ dev_err->ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CE_MASK_CAP, qm->cap_ver);
+ dev_err->nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);
+ dev_err->shutdown_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
+ HPRE_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+ dev_err->reset_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
+ HPRE_RESET_MASK_CAP, qm->cap_ver);
+ dev_err->ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR | HPRE_OOO_ECC_2BIT_ERR;
- err_info->fe = HPRE_HAC_RAS_FE_ENABLE;
- err_info->ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_QM_CE_MASK_CAP, qm->cap_ver);
- err_info->nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_QM_NFE_MASK_CAP, qm->cap_ver);
- err_info->ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR | HPRE_OOO_ECC_2BIT_ERR;
- err_info->dev_shutdown_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
- HPRE_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
- err_info->qm_shutdown_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
- HPRE_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
- err_info->qm_reset_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
- HPRE_QM_RESET_MASK_CAP, qm->cap_ver);
- err_info->dev_reset_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
- HPRE_RESET_MASK_CAP, qm->cap_ver);
err_info->msi_wr_port = HPRE_WR_MSI_PORT;
err_info->acpi_rst = "HRST";
}
@@ -1439,6 +1522,8 @@ static const struct hisi_qm_err_ini hpre_err_ini = {
.err_info_init = hpre_err_info_init,
.get_err_result = hpre_get_err_result,
.dev_is_abnormal = hpre_dev_is_abnormal,
+ .disable_axi_error = hpre_disable_axi_error,
+ .enable_axi_error = hpre_enable_axi_error,
};
static int hpre_pf_probe_init(struct hpre *hpre)
@@ -1450,8 +1535,6 @@ static int hpre_pf_probe_init(struct hpre *hpre)
if (ret)
return ret;
- hpre_open_sva_prefetch(qm);
-
hisi_qm_dev_err_init(qm);
ret = hpre_show_last_regs_init(qm);
if (ret)
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index 2e4ee7ecfdfb..a5b96adf2d1e 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -45,6 +45,8 @@
#define QM_SQ_TYPE_MASK GENMASK(3, 0)
#define QM_SQ_TAIL_IDX(sqc) ((le16_to_cpu((sqc).w11) >> 6) & 0x1)
+#define QM_SQC_DISABLE_QP (1U << 6)
+#define QM_XQC_RANDOM_DATA 0xaaaa
/* cqc shift */
#define QM_CQ_HOP_NUM_SHIFT 0
@@ -145,9 +147,9 @@
#define QM_RAS_CE_TIMES_PER_IRQ 1
#define QM_OOO_SHUTDOWN_SEL 0x1040f8
#define QM_AXI_RRESP_ERR BIT(0)
-#define QM_ECC_MBIT BIT(2)
#define QM_DB_TIMEOUT BIT(10)
#define QM_OF_FIFO_OF BIT(11)
+#define QM_RAS_AXI_ERROR (BIT(0) | BIT(1) | BIT(12))
#define QM_RESET_WAIT_TIMEOUT 400
#define QM_PEH_VENDOR_ID 0x1000d8
@@ -163,7 +165,6 @@
#define ACC_MASTER_TRANS_RETURN 0x300150
#define ACC_MASTER_GLOBAL_CTRL 0x300000
#define ACC_AM_CFG_PORT_WR_EN 0x30001c
-#define QM_RAS_NFE_MBIT_DISABLE ~QM_ECC_MBIT
#define ACC_AM_ROB_ECC_INT_STS 0x300104
#define ACC_ROB_ECC_ERR_MULTPL BIT(1)
#define QM_MSI_CAP_ENABLE BIT(16)
@@ -520,7 +521,7 @@ static bool qm_check_dev_error(struct hisi_qm *qm)
return false;
err_status = qm_get_hw_error_status(pf_qm);
- if (err_status & pf_qm->err_info.qm_shutdown_mask)
+ if (err_status & pf_qm->err_info.qm_err.shutdown_mask)
return true;
if (pf_qm->err_ini->dev_is_abnormal)
@@ -1395,17 +1396,17 @@ static void qm_hw_error_init_v1(struct hisi_qm *qm)
static void qm_hw_error_cfg(struct hisi_qm *qm)
{
- struct hisi_qm_err_info *err_info = &qm->err_info;
+ struct hisi_qm_err_mask *qm_err = &qm->err_info.qm_err;
- qm->error_mask = err_info->nfe | err_info->ce | err_info->fe;
+ qm->error_mask = qm_err->nfe | qm_err->ce | qm_err->fe;
/* clear QM hw residual error source */
writel(qm->error_mask, qm->io_base + QM_ABNORMAL_INT_SOURCE);
/* configure error type */
- writel(err_info->ce, qm->io_base + QM_RAS_CE_ENABLE);
+ writel(qm_err->ce, qm->io_base + QM_RAS_CE_ENABLE);
writel(QM_RAS_CE_TIMES_PER_IRQ, qm->io_base + QM_RAS_CE_THRESHOLD);
- writel(err_info->nfe, qm->io_base + QM_RAS_NFE_ENABLE);
- writel(err_info->fe, qm->io_base + QM_RAS_FE_ENABLE);
+ writel(qm_err->nfe, qm->io_base + QM_RAS_NFE_ENABLE);
+ writel(qm_err->fe, qm->io_base + QM_RAS_FE_ENABLE);
}
static void qm_hw_error_init_v2(struct hisi_qm *qm)
@@ -1434,7 +1435,7 @@ static void qm_hw_error_init_v3(struct hisi_qm *qm)
qm_hw_error_cfg(qm);
/* enable close master ooo when hardware error happened */
- writel(qm->err_info.qm_shutdown_mask, qm->io_base + QM_OOO_SHUTDOWN_SEL);
+ writel(qm->err_info.qm_err.shutdown_mask, qm->io_base + QM_OOO_SHUTDOWN_SEL);
irq_unmask = ~qm->error_mask;
irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
@@ -1496,6 +1497,7 @@ static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status)
static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm)
{
+ struct hisi_qm_err_mask *qm_err = &qm->err_info.qm_err;
u32 error_status;
error_status = qm_get_hw_error_status(qm);
@@ -1504,17 +1506,16 @@ static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm)
qm->err_status.is_qm_ecc_mbit = true;
qm_log_hw_error(qm, error_status);
- if (error_status & qm->err_info.qm_reset_mask) {
+ if (error_status & qm_err->reset_mask) {
/* Disable the same error reporting until device is recovered. */
- writel(qm->err_info.nfe & (~error_status),
- qm->io_base + QM_RAS_NFE_ENABLE);
+ writel(qm_err->nfe & (~error_status), qm->io_base + QM_RAS_NFE_ENABLE);
return ACC_ERR_NEED_RESET;
}
/* Clear error source if not need reset. */
writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE);
- writel(qm->err_info.nfe, qm->io_base + QM_RAS_NFE_ENABLE);
- writel(qm->err_info.ce, qm->io_base + QM_RAS_CE_ENABLE);
+ writel(qm_err->nfe, qm->io_base + QM_RAS_NFE_ENABLE);
+ writel(qm_err->ce, qm->io_base + QM_RAS_CE_ENABLE);
}
return ACC_ERR_RECOVERED;
@@ -2742,6 +2743,27 @@ static void qm_remove_uacce(struct hisi_qm *qm)
}
}
+static void qm_uacce_api_ver_init(struct hisi_qm *qm)
+{
+ struct uacce_device *uacce = qm->uacce;
+
+ switch (qm->ver) {
+ case QM_HW_V1:
+ uacce->api_ver = HISI_QM_API_VER_BASE;
+ break;
+ case QM_HW_V2:
+ uacce->api_ver = HISI_QM_API_VER2_BASE;
+ break;
+ case QM_HW_V3:
+ case QM_HW_V4:
+ uacce->api_ver = HISI_QM_API_VER3_BASE;
+ break;
+ default:
+ uacce->api_ver = HISI_QM_API_VER5_BASE;
+ break;
+ }
+}
+
static int qm_alloc_uacce(struct hisi_qm *qm)
{
struct pci_dev *pdev = qm->pdev;
@@ -2776,13 +2798,6 @@ static int qm_alloc_uacce(struct hisi_qm *qm)
uacce->priv = qm;
if (qm->ver == QM_HW_V1)
- uacce->api_ver = HISI_QM_API_VER_BASE;
- else if (qm->ver == QM_HW_V2)
- uacce->api_ver = HISI_QM_API_VER2_BASE;
- else
- uacce->api_ver = HISI_QM_API_VER3_BASE;
-
- if (qm->ver == QM_HW_V1)
mmio_page_nr = QM_DOORBELL_PAGE_NR;
else if (!test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps))
mmio_page_nr = QM_DOORBELL_PAGE_NR +
@@ -2801,6 +2816,7 @@ static int qm_alloc_uacce(struct hisi_qm *qm)
uacce->qf_pg_num[UACCE_QFRT_DUS] = dus_page_nr;
qm->uacce = uacce;
+ qm_uacce_api_ver_init(qm);
INIT_LIST_HEAD(&qm->isolate_data.qm_hw_errs);
mutex_init(&qm->isolate_data.isolate_lock);
@@ -3179,6 +3195,9 @@ static int qm_eq_aeq_ctx_cfg(struct hisi_qm *qm)
qm_init_eq_aeq_status(qm);
+ /* Before starting the dev, clear the memory and then configure to device using. */
+ memset(qm->qdma.va, 0, qm->qdma.size);
+
ret = qm_eq_ctx_cfg(qm);
if (ret) {
dev_err(dev, "Set eqc failed!\n");
@@ -3190,9 +3209,13 @@ static int qm_eq_aeq_ctx_cfg(struct hisi_qm *qm)
static int __hisi_qm_start(struct hisi_qm *qm)
{
+ struct device *dev = &qm->pdev->dev;
int ret;
- WARN_ON(!qm->qdma.va);
+ if (!qm->qdma.va) {
+ dev_err(dev, "qm qdma is NULL!\n");
+ return -EINVAL;
+ }
if (qm->fun_type == QM_HW_PF) {
ret = hisi_qm_set_vft(qm, 0, qm->qp_base, qm->qp_num);
@@ -3266,7 +3289,7 @@ static int qm_restart(struct hisi_qm *qm)
for (i = 0; i < qm->qp_num; i++) {
qp = &qm->qp_array[i];
if (atomic_read(&qp->qp_status.flags) == QP_STOP &&
- qp->is_resetting == true) {
+ qp->is_resetting == true && qp->is_in_kernel == true) {
ret = qm_start_qp_nolock(qp, 0);
if (ret < 0) {
dev_err(dev, "Failed to start qp%d!\n", i);
@@ -3298,24 +3321,44 @@ static void qm_stop_started_qp(struct hisi_qm *qm)
}
/**
- * qm_clear_queues() - Clear all queues memory in a qm.
- * @qm: The qm in which the queues will be cleared.
+ * qm_invalid_queues() - invalid all queues in use.
+ * @qm: The qm in which the queues will be invalidated.
*
- * This function clears all queues memory in a qm. Reset of accelerator can
- * use this to clear queues.
+ * This function invalid all queues in use. If the doorbell command is sent
+ * to device in user space after the device is reset, the device discards
+ * the doorbell command.
*/
-static void qm_clear_queues(struct hisi_qm *qm)
+static void qm_invalid_queues(struct hisi_qm *qm)
{
struct hisi_qp *qp;
+ struct qm_sqc *sqc;
+ struct qm_cqc *cqc;
int i;
+ /*
+ * Normal stop queues is no longer used and does not need to be
+ * invalid queues.
+ */
+ if (qm->status.stop_reason == QM_NORMAL)
+ return;
+
+ if (qm->status.stop_reason == QM_DOWN)
+ hisi_qm_cache_wb(qm);
+
for (i = 0; i < qm->qp_num; i++) {
qp = &qm->qp_array[i];
- if (qp->is_in_kernel && qp->is_resetting)
+ if (!qp->is_resetting)
+ continue;
+
+ /* Modify random data and set sqc close bit to invalid queue. */
+ sqc = qm->sqc + i;
+ cqc = qm->cqc + i;
+ sqc->w8 = cpu_to_le16(QM_XQC_RANDOM_DATA);
+ sqc->w13 = cpu_to_le16(QM_SQC_DISABLE_QP);
+ cqc->w8 = cpu_to_le16(QM_XQC_RANDOM_DATA);
+ if (qp->is_in_kernel)
memset(qp->qdma.va, 0, qp->qdma.size);
}
-
- memset(qm->qdma.va, 0, qm->qdma.size);
}
/**
@@ -3372,7 +3415,7 @@ int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r)
}
}
- qm_clear_queues(qm);
+ qm_invalid_queues(qm);
qm->status.stop_reason = QM_NORMAL;
err_unlock:
@@ -3617,19 +3660,19 @@ static int qm_vf_q_assign(struct hisi_qm *qm, u32 num_vfs)
return 0;
}
-static int qm_clear_vft_config(struct hisi_qm *qm)
+static void qm_clear_vft_config(struct hisi_qm *qm)
{
- int ret;
u32 i;
- for (i = 1; i <= qm->vfs_num; i++) {
- ret = hisi_qm_set_vft(qm, i, 0, 0);
- if (ret)
- return ret;
- }
- qm->vfs_num = 0;
+ /*
+ * When disabling SR-IOV, clear the configuration of each VF in the hardware
+ * sequentially. Failure to clear a single VF should not affect the clearing
+ * operation of other VFs.
+ */
+ for (i = 1; i <= qm->vfs_num; i++)
+ (void)hisi_qm_set_vft(qm, i, 0, 0);
- return 0;
+ qm->vfs_num = 0;
}
static int qm_func_shaper_enable(struct hisi_qm *qm, u32 fun_index, u32 qos)
@@ -3826,6 +3869,10 @@ static ssize_t qm_get_qos_value(struct hisi_qm *qm, const char *buf,
}
pdev = container_of(dev, struct pci_dev, dev);
+ if (pci_physfn(pdev) != qm->pdev) {
+ pci_err(qm->pdev, "the pdev input does not match the pf!\n");
+ return -EINVAL;
+ }
*fun_index = pdev->devfn;
@@ -3960,13 +4007,13 @@ int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs)
goto err_put_sync;
}
+ qm->vfs_num = num_vfs;
ret = pci_enable_sriov(pdev, num_vfs);
if (ret) {
pci_err(pdev, "Can't enable VF!\n");
qm_clear_vft_config(qm);
goto err_put_sync;
}
- qm->vfs_num = num_vfs;
pci_info(pdev, "VF enabled, vfs_num(=%d)!\n", num_vfs);
@@ -4001,11 +4048,10 @@ int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen)
}
pci_disable_sriov(pdev);
-
- qm->vfs_num = 0;
+ qm_clear_vft_config(qm);
qm_pm_put_sync(qm);
- return qm_clear_vft_config(qm);
+ return 0;
}
EXPORT_SYMBOL_GPL(hisi_qm_sriov_disable);
@@ -4179,9 +4225,9 @@ static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm)
!qm->err_status.is_qm_ecc_mbit &&
!qm->err_ini->close_axi_master_ooo) {
nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE);
- writel(nfe_enb & QM_RAS_NFE_MBIT_DISABLE,
+ writel(nfe_enb & ~qm->err_info.qm_err.ecc_2bits_mask,
qm->io_base + QM_RAS_NFE_ENABLE);
- writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET);
+ writel(qm->err_info.qm_err.ecc_2bits_mask, qm->io_base + QM_ABNORMAL_INT_SET);
}
}
@@ -4447,9 +4493,6 @@ static void qm_restart_prepare(struct hisi_qm *qm)
{
u32 value;
- if (qm->err_ini->open_sva_prefetch)
- qm->err_ini->open_sva_prefetch(qm);
-
if (qm->ver >= QM_HW_V3)
return;
@@ -4463,12 +4506,12 @@ static void qm_restart_prepare(struct hisi_qm *qm)
qm->io_base + ACC_AM_CFG_PORT_WR_EN);
/* clear dev ecc 2bit error source if having */
- value = qm_get_dev_err_status(qm) & qm->err_info.ecc_2bits_mask;
+ value = qm_get_dev_err_status(qm) & qm->err_info.dev_err.ecc_2bits_mask;
if (value && qm->err_ini->clear_dev_hw_err_status)
qm->err_ini->clear_dev_hw_err_status(qm, value);
/* clear QM ecc mbit error source */
- writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SOURCE);
+ writel(qm->err_info.qm_err.ecc_2bits_mask, qm->io_base + QM_ABNORMAL_INT_SOURCE);
/* clear AM Reorder Buffer ecc mbit source */
writel(ACC_ROB_ECC_ERR_MULTPL, qm->io_base + ACC_AM_ROB_ECC_INT_STS);
@@ -4495,6 +4538,34 @@ clear_flags:
qm->err_status.is_dev_ecc_mbit = false;
}
+static void qm_disable_axi_error(struct hisi_qm *qm)
+{
+ struct hisi_qm_err_mask *qm_err = &qm->err_info.qm_err;
+ u32 val;
+
+ val = ~(qm->error_mask & (~QM_RAS_AXI_ERROR));
+ writel(val, qm->io_base + QM_ABNORMAL_INT_MASK);
+ if (qm->ver > QM_HW_V2)
+ writel(qm_err->shutdown_mask & (~QM_RAS_AXI_ERROR),
+ qm->io_base + QM_OOO_SHUTDOWN_SEL);
+
+ if (qm->err_ini->disable_axi_error)
+ qm->err_ini->disable_axi_error(qm);
+}
+
+static void qm_enable_axi_error(struct hisi_qm *qm)
+{
+ /* clear axi error source */
+ writel(QM_RAS_AXI_ERROR, qm->io_base + QM_ABNORMAL_INT_SOURCE);
+
+ writel(~qm->error_mask, qm->io_base + QM_ABNORMAL_INT_MASK);
+ if (qm->ver > QM_HW_V2)
+ writel(qm->err_info.qm_err.shutdown_mask, qm->io_base + QM_OOO_SHUTDOWN_SEL);
+
+ if (qm->err_ini->enable_axi_error)
+ qm->err_ini->enable_axi_error(qm);
+}
+
static int qm_controller_reset_done(struct hisi_qm *qm)
{
struct pci_dev *pdev = qm->pdev;
@@ -4528,6 +4599,7 @@ static int qm_controller_reset_done(struct hisi_qm *qm)
qm_restart_prepare(qm);
hisi_qm_dev_err_init(qm);
+ qm_disable_axi_error(qm);
if (qm->err_ini->open_axi_master_ooo)
qm->err_ini->open_axi_master_ooo(qm);
@@ -4550,7 +4622,7 @@ static int qm_controller_reset_done(struct hisi_qm *qm)
ret = qm_wait_vf_prepare_finish(qm);
if (ret)
pci_err(pdev, "failed to start by vfs in soft reset!\n");
-
+ qm_enable_axi_error(qm);
qm_cmd_init(qm);
qm_restart_done(qm);
@@ -4731,6 +4803,15 @@ flr_done:
}
EXPORT_SYMBOL_GPL(hisi_qm_reset_done);
+static irqreturn_t qm_rsvd_irq(int irq, void *data)
+{
+ struct hisi_qm *qm = data;
+
+ dev_info(&qm->pdev->dev, "Reserved interrupt, ignore!\n");
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t qm_abnormal_irq(int irq, void *data)
{
struct hisi_qm *qm = data;
@@ -4760,8 +4841,6 @@ void hisi_qm_dev_shutdown(struct pci_dev *pdev)
ret = hisi_qm_stop(qm, QM_DOWN);
if (ret)
dev_err(&pdev->dev, "Fail to stop qm in shutdown!\n");
-
- hisi_qm_cache_wb(qm);
}
EXPORT_SYMBOL_GPL(hisi_qm_dev_shutdown);
@@ -5014,7 +5093,7 @@ static void qm_unregister_abnormal_irq(struct hisi_qm *qm)
struct pci_dev *pdev = qm->pdev;
u32 irq_vector, val;
- if (qm->fun_type == QM_HW_VF)
+ if (qm->fun_type == QM_HW_VF && qm->ver < QM_HW_V3)
return;
val = qm->cap_tables.qm_cap_table[QM_ABNORMAL_IRQ].cap_val;
@@ -5031,17 +5110,28 @@ static int qm_register_abnormal_irq(struct hisi_qm *qm)
u32 irq_vector, val;
int ret;
- if (qm->fun_type == QM_HW_VF)
- return 0;
-
val = qm->cap_tables.qm_cap_table[QM_ABNORMAL_IRQ].cap_val;
if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK))
return 0;
-
irq_vector = val & QM_IRQ_VECTOR_MASK;
+
+ /* For VF, this is a reserved interrupt in V3 version. */
+ if (qm->fun_type == QM_HW_VF) {
+ if (qm->ver < QM_HW_V3)
+ return 0;
+
+ ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_rsvd_irq,
+ IRQF_NO_AUTOEN, qm->dev_name, qm);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request reserved irq, ret = %d!\n", ret);
+ return ret;
+ }
+ return 0;
+ }
+
ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_abnormal_irq, 0, qm->dev_name, qm);
if (ret)
- dev_err(&qm->pdev->dev, "failed to request abnormal irq, ret = %d", ret);
+ dev_err(&qm->pdev->dev, "failed to request abnormal irq, ret = %d!\n", ret);
return ret;
}
@@ -5407,6 +5497,12 @@ static int hisi_qm_pci_init(struct hisi_qm *qm)
pci_set_master(pdev);
num_vec = qm_get_irq_num(qm);
+ if (!num_vec) {
+ dev_err(dev, "Device irq num is zero!\n");
+ ret = -EINVAL;
+ goto err_get_pci_res;
+ }
+ num_vec = roundup_pow_of_two(num_vec);
ret = pci_alloc_irq_vectors(pdev, num_vec, num_vec, PCI_IRQ_MSI);
if (ret < 0) {
dev_err(dev, "Failed to enable MSI vectors!\n");
diff --git a/drivers/crypto/hisilicon/sec/sec_drv.c b/drivers/crypto/hisilicon/sec/sec_drv.c
index ef0cb733c92c..129cb6faa0b7 100644
--- a/drivers/crypto/hisilicon/sec/sec_drv.c
+++ b/drivers/crypto/hisilicon/sec/sec_drv.c
@@ -922,7 +922,8 @@ static int sec_hw_init(struct sec_dev_info *info)
struct iommu_domain *domain;
u32 sec_ipv4_mask = 0;
u32 sec_ipv6_mask[10] = {};
- u32 i, ret;
+ int ret;
+ u32 i;
domain = iommu_get_domain_for_dev(info->dev);
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index d044ded0f290..31590d01139a 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -1944,14 +1944,12 @@ static void sec_request_uninit(struct sec_req *req)
static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req)
{
struct sec_qp_ctx *qp_ctx;
- int i;
+ int i = 0;
- for (i = 0; i < ctx->sec->ctx_q_num; i++) {
+ do {
qp_ctx = &ctx->qp_ctx[i];
req->req_id = sec_alloc_req_id(req, qp_ctx);
- if (req->req_id >= 0)
- break;
- }
+ } while (req->req_id < 0 && ++i < ctx->sec->ctx_q_num);
req->qp_ctx = qp_ctx;
req->backlog = &qp_ctx->backlog;
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index 72cf48d1f3ab..5eb2d6820742 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -47,6 +47,8 @@
#define SEC_RAS_FE_ENB_MSK 0x0
#define SEC_OOO_SHUTDOWN_SEL 0x301014
#define SEC_RAS_DISABLE 0x0
+#define SEC_AXI_ERROR_MASK (BIT(0) | BIT(1))
+
#define SEC_MEM_START_INIT_REG 0x301100
#define SEC_MEM_INIT_DONE_REG 0x301104
@@ -93,6 +95,16 @@
#define SEC_PREFETCH_ENABLE (~(BIT(0) | BIT(1) | BIT(11)))
#define SEC_PREFETCH_DISABLE BIT(1)
#define SEC_SVA_DISABLE_READY (BIT(7) | BIT(11))
+#define SEC_SVA_PREFETCH_INFO 0x301ED4
+#define SEC_SVA_STALL_NUM GENMASK(23, 8)
+#define SEC_SVA_PREFETCH_NUM GENMASK(2, 0)
+#define SEC_WAIT_SVA_READY 500000
+#define SEC_READ_SVA_STATUS_TIMES 3
+#define SEC_WAIT_US_MIN 10
+#define SEC_WAIT_US_MAX 20
+#define SEC_WAIT_QP_US_MIN 1000
+#define SEC_WAIT_QP_US_MAX 2000
+#define SEC_MAX_WAIT_TIMES 2000
#define SEC_DELAY_10_US 10
#define SEC_POLL_TIMEOUT_US 1000
@@ -464,6 +476,81 @@ static void sec_set_endian(struct hisi_qm *qm)
writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG);
}
+static int sec_wait_sva_ready(struct hisi_qm *qm, __u32 offset, __u32 mask)
+{
+ u32 val, try_times = 0;
+ u8 count = 0;
+
+ /*
+ * Read the register value every 10-20us. If the value is 0 for three
+ * consecutive times, the SVA module is ready.
+ */
+ do {
+ val = readl(qm->io_base + offset);
+ if (val & mask)
+ count = 0;
+ else if (++count == SEC_READ_SVA_STATUS_TIMES)
+ break;
+
+ usleep_range(SEC_WAIT_US_MIN, SEC_WAIT_US_MAX);
+ } while (++try_times < SEC_WAIT_SVA_READY);
+
+ if (try_times == SEC_WAIT_SVA_READY) {
+ pci_err(qm->pdev, "failed to wait sva prefetch ready\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void sec_close_sva_prefetch(struct hisi_qm *qm)
+{
+ u32 val;
+ int ret;
+
+ if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
+ return;
+
+ val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG);
+ val |= SEC_PREFETCH_DISABLE;
+ writel(val, qm->io_base + SEC_PREFETCH_CFG);
+
+ ret = readl_relaxed_poll_timeout(qm->io_base + SEC_SVA_TRANS,
+ val, !(val & SEC_SVA_DISABLE_READY),
+ SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US);
+ if (ret)
+ pci_err(qm->pdev, "failed to close sva prefetch\n");
+
+ (void)sec_wait_sva_ready(qm, SEC_SVA_PREFETCH_INFO, SEC_SVA_STALL_NUM);
+}
+
+static void sec_open_sva_prefetch(struct hisi_qm *qm)
+{
+ u32 val;
+ int ret;
+
+ if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
+ return;
+
+ /* Enable prefetch */
+ val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG);
+ val &= SEC_PREFETCH_ENABLE;
+ writel(val, qm->io_base + SEC_PREFETCH_CFG);
+
+ ret = readl_relaxed_poll_timeout(qm->io_base + SEC_PREFETCH_CFG,
+ val, !(val & SEC_PREFETCH_DISABLE),
+ SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US);
+ if (ret) {
+ pci_err(qm->pdev, "failed to open sva prefetch\n");
+ sec_close_sva_prefetch(qm);
+ return;
+ }
+
+ ret = sec_wait_sva_ready(qm, SEC_SVA_TRANS, SEC_SVA_PREFETCH_NUM);
+ if (ret)
+ sec_close_sva_prefetch(qm);
+}
+
static void sec_engine_sva_config(struct hisi_qm *qm)
{
u32 reg;
@@ -497,45 +584,7 @@ static void sec_engine_sva_config(struct hisi_qm *qm)
writel_relaxed(reg, qm->io_base +
SEC_INTERFACE_USER_CTRL1_REG);
}
-}
-
-static void sec_open_sva_prefetch(struct hisi_qm *qm)
-{
- u32 val;
- int ret;
-
- if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
- return;
-
- /* Enable prefetch */
- val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG);
- val &= SEC_PREFETCH_ENABLE;
- writel(val, qm->io_base + SEC_PREFETCH_CFG);
-
- ret = readl_relaxed_poll_timeout(qm->io_base + SEC_PREFETCH_CFG,
- val, !(val & SEC_PREFETCH_DISABLE),
- SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US);
- if (ret)
- pci_err(qm->pdev, "failed to open sva prefetch\n");
-}
-
-static void sec_close_sva_prefetch(struct hisi_qm *qm)
-{
- u32 val;
- int ret;
-
- if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
- return;
-
- val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG);
- val |= SEC_PREFETCH_DISABLE;
- writel(val, qm->io_base + SEC_PREFETCH_CFG);
-
- ret = readl_relaxed_poll_timeout(qm->io_base + SEC_SVA_TRANS,
- val, !(val & SEC_SVA_DISABLE_READY),
- SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US);
- if (ret)
- pci_err(qm->pdev, "failed to close sva prefetch\n");
+ sec_open_sva_prefetch(qm);
}
static void sec_enable_clock_gate(struct hisi_qm *qm)
@@ -666,8 +715,7 @@ static void sec_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
val1 = readl(qm->io_base + SEC_CONTROL_REG);
if (enable) {
val1 |= SEC_AXI_SHUTDOWN_ENABLE;
- val2 = hisi_qm_get_hw_info(qm, sec_basic_info,
- SEC_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+ val2 = qm->err_info.dev_err.shutdown_mask;
} else {
val1 &= SEC_AXI_SHUTDOWN_DISABLE;
val2 = 0x0;
@@ -681,7 +729,8 @@ static void sec_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
static void sec_hw_error_enable(struct hisi_qm *qm)
{
- u32 ce, nfe;
+ struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err;
+ u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe;
if (qm->ver == QM_HW_V1) {
writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK);
@@ -689,22 +738,19 @@ static void sec_hw_error_enable(struct hisi_qm *qm)
return;
}
- ce = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_CE_MASK_CAP, qm->cap_ver);
- nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_NFE_MASK_CAP, qm->cap_ver);
-
/* clear SEC hw error source if having */
- writel(ce | nfe | SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_CORE_INT_SOURCE);
+ writel(err_mask, qm->io_base + SEC_CORE_INT_SOURCE);
/* enable RAS int */
- writel(ce, qm->io_base + SEC_RAS_CE_REG);
- writel(SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_RAS_FE_REG);
- writel(nfe, qm->io_base + SEC_RAS_NFE_REG);
+ writel(dev_err->ce, qm->io_base + SEC_RAS_CE_REG);
+ writel(dev_err->fe, qm->io_base + SEC_RAS_FE_REG);
+ writel(dev_err->nfe, qm->io_base + SEC_RAS_NFE_REG);
/* enable SEC block master OOO when nfe occurs on Kunpeng930 */
sec_master_ooo_ctrl(qm, true);
/* enable SEC hw error interrupts */
- writel(ce | nfe | SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_CORE_INT_MASK);
+ writel(err_mask, qm->io_base + SEC_CORE_INT_MASK);
}
static void sec_hw_error_disable(struct hisi_qm *qm)
@@ -1061,12 +1107,20 @@ static void sec_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
static void sec_disable_error_report(struct hisi_qm *qm, u32 err_type)
{
- u32 nfe_mask;
+ u32 nfe_mask = qm->err_info.dev_err.nfe;
- nfe_mask = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_NFE_MASK_CAP, qm->cap_ver);
writel(nfe_mask & (~err_type), qm->io_base + SEC_RAS_NFE_REG);
}
+static void sec_enable_error_report(struct hisi_qm *qm)
+{
+ u32 nfe_mask = qm->err_info.dev_err.nfe;
+ u32 ce_mask = qm->err_info.dev_err.ce;
+
+ writel(nfe_mask, qm->io_base + SEC_RAS_NFE_REG);
+ writel(ce_mask, qm->io_base + SEC_RAS_CE_REG);
+}
+
static void sec_open_axi_master_ooo(struct hisi_qm *qm)
{
u32 val;
@@ -1082,16 +1136,18 @@ static enum acc_err_result sec_get_err_result(struct hisi_qm *qm)
err_status = sec_get_hw_err_status(qm);
if (err_status) {
- if (err_status & qm->err_info.ecc_2bits_mask)
+ if (err_status & qm->err_info.dev_err.ecc_2bits_mask)
qm->err_status.is_dev_ecc_mbit = true;
sec_log_hw_error(qm, err_status);
- if (err_status & qm->err_info.dev_reset_mask) {
+ if (err_status & qm->err_info.dev_err.reset_mask) {
/* Disable the same error reporting until device is recovered. */
sec_disable_error_report(qm, err_status);
return ACC_ERR_NEED_RESET;
}
sec_clear_hw_err_status(qm, err_status);
+ /* Avoid firmware disable error report, re-enable. */
+ sec_enable_error_report(qm);
}
return ACC_ERR_RECOVERED;
@@ -1102,28 +1158,62 @@ static bool sec_dev_is_abnormal(struct hisi_qm *qm)
u32 err_status;
err_status = sec_get_hw_err_status(qm);
- if (err_status & qm->err_info.dev_shutdown_mask)
+ if (err_status & qm->err_info.dev_err.shutdown_mask)
return true;
return false;
}
+static void sec_disable_axi_error(struct hisi_qm *qm)
+{
+ struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err;
+ u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe;
+
+ writel(err_mask & ~SEC_AXI_ERROR_MASK, qm->io_base + SEC_CORE_INT_MASK);
+
+ if (qm->ver > QM_HW_V2)
+ writel(dev_err->shutdown_mask & (~SEC_AXI_ERROR_MASK),
+ qm->io_base + SEC_OOO_SHUTDOWN_SEL);
+}
+
+static void sec_enable_axi_error(struct hisi_qm *qm)
+{
+ struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err;
+ u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe;
+
+ /* clear axi error source */
+ writel(SEC_AXI_ERROR_MASK, qm->io_base + SEC_CORE_INT_SOURCE);
+
+ writel(err_mask, qm->io_base + SEC_CORE_INT_MASK);
+
+ if (qm->ver > QM_HW_V2)
+ writel(dev_err->shutdown_mask, qm->io_base + SEC_OOO_SHUTDOWN_SEL);
+}
+
static void sec_err_info_init(struct hisi_qm *qm)
{
struct hisi_qm_err_info *err_info = &qm->err_info;
+ struct hisi_qm_err_mask *qm_err = &err_info->qm_err;
+ struct hisi_qm_err_mask *dev_err = &err_info->dev_err;
+
+ qm_err->fe = SEC_RAS_FE_ENB_MSK;
+ qm_err->ce = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_QM_CE_MASK_CAP, qm->cap_ver);
+ qm_err->nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_QM_NFE_MASK_CAP, qm->cap_ver);
+ qm_err->shutdown_mask = hisi_qm_get_hw_info(qm, sec_basic_info,
+ SEC_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+ qm_err->reset_mask = hisi_qm_get_hw_info(qm, sec_basic_info,
+ SEC_QM_RESET_MASK_CAP, qm->cap_ver);
+ qm_err->ecc_2bits_mask = QM_ECC_MBIT;
+
+ dev_err->fe = SEC_RAS_FE_ENB_MSK;
+ dev_err->ce = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_CE_MASK_CAP, qm->cap_ver);
+ dev_err->nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_NFE_MASK_CAP, qm->cap_ver);
+ dev_err->shutdown_mask = hisi_qm_get_hw_info(qm, sec_basic_info,
+ SEC_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+ dev_err->reset_mask = hisi_qm_get_hw_info(qm, sec_basic_info,
+ SEC_RESET_MASK_CAP, qm->cap_ver);
+ dev_err->ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC;
- err_info->fe = SEC_RAS_FE_ENB_MSK;
- err_info->ce = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_QM_CE_MASK_CAP, qm->cap_ver);
- err_info->nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_QM_NFE_MASK_CAP, qm->cap_ver);
- err_info->ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC;
- err_info->qm_shutdown_mask = hisi_qm_get_hw_info(qm, sec_basic_info,
- SEC_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
- err_info->dev_shutdown_mask = hisi_qm_get_hw_info(qm, sec_basic_info,
- SEC_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
- err_info->qm_reset_mask = hisi_qm_get_hw_info(qm, sec_basic_info,
- SEC_QM_RESET_MASK_CAP, qm->cap_ver);
- err_info->dev_reset_mask = hisi_qm_get_hw_info(qm, sec_basic_info,
- SEC_RESET_MASK_CAP, qm->cap_ver);
err_info->msi_wr_port = BIT(0);
err_info->acpi_rst = "SRST";
}
@@ -1141,6 +1231,8 @@ static const struct hisi_qm_err_ini sec_err_ini = {
.err_info_init = sec_err_info_init,
.get_err_result = sec_get_err_result,
.dev_is_abnormal = sec_dev_is_abnormal,
+ .disable_axi_error = sec_disable_axi_error,
+ .enable_axi_error = sec_enable_axi_error,
};
static int sec_pf_probe_init(struct sec_dev *sec)
@@ -1152,7 +1244,6 @@ static int sec_pf_probe_init(struct sec_dev *sec)
if (ret)
return ret;
- sec_open_sva_prefetch(qm);
hisi_qm_dev_err_init(qm);
sec_debug_regs_clear(qm);
ret = sec_show_last_regs_init(qm);
@@ -1169,7 +1260,7 @@ static int sec_pre_store_cap_reg(struct hisi_qm *qm)
size_t i, size;
size = ARRAY_SIZE(sec_cap_query_info);
- sec_cap = devm_kzalloc(&pdev->dev, sizeof(*sec_cap) * size, GFP_KERNEL);
+ sec_cap = devm_kcalloc(&pdev->dev, size, sizeof(*sec_cap), GFP_KERNEL);
if (!sec_cap)
return -ENOMEM;
diff --git a/drivers/crypto/hisilicon/zip/dae_main.c b/drivers/crypto/hisilicon/zip/dae_main.c
index 6f22e4c36e49..68aebd02fc84 100644
--- a/drivers/crypto/hisilicon/zip/dae_main.c
+++ b/drivers/crypto/hisilicon/zip/dae_main.c
@@ -15,6 +15,7 @@
#define DAE_REG_RD_TMOUT_US USEC_PER_SEC
#define DAE_ALG_NAME "hashagg"
+#define DAE_V5_ALG_NAME "hashagg\nudma\nhashjoin\ngather"
/* error */
#define DAE_AXI_CFG_OFFSET 0x331000
@@ -82,6 +83,7 @@ int hisi_dae_set_user_domain(struct hisi_qm *qm)
int hisi_dae_set_alg(struct hisi_qm *qm)
{
+ const char *alg_name;
size_t len;
if (!dae_is_support(qm))
@@ -90,9 +92,14 @@ int hisi_dae_set_alg(struct hisi_qm *qm)
if (!qm->uacce)
return 0;
+ if (qm->ver >= QM_HW_V5)
+ alg_name = DAE_V5_ALG_NAME;
+ else
+ alg_name = DAE_ALG_NAME;
+
len = strlen(qm->uacce->algs);
/* A line break may be required */
- if (len + strlen(DAE_ALG_NAME) + 1 >= QM_DEV_ALG_MAX_LEN) {
+ if (len + strlen(alg_name) + 1 >= QM_DEV_ALG_MAX_LEN) {
pci_err(qm->pdev, "algorithm name is too long!\n");
return -EINVAL;
}
@@ -100,7 +107,7 @@ int hisi_dae_set_alg(struct hisi_qm *qm)
if (len)
strcat((char *)qm->uacce->algs, "\n");
- strcat((char *)qm->uacce->algs, DAE_ALG_NAME);
+ strcat((char *)qm->uacce->algs, alg_name);
return 0;
}
@@ -168,6 +175,12 @@ static void hisi_dae_disable_error_report(struct hisi_qm *qm, u32 err_type)
writel(DAE_ERR_NFE_MASK & (~err_type), qm->io_base + DAE_ERR_NFE_OFFSET);
}
+static void hisi_dae_enable_error_report(struct hisi_qm *qm)
+{
+ writel(DAE_ERR_CE_MASK, qm->io_base + DAE_ERR_CE_OFFSET);
+ writel(DAE_ERR_NFE_MASK, qm->io_base + DAE_ERR_NFE_OFFSET);
+}
+
static void hisi_dae_log_hw_error(struct hisi_qm *qm, u32 err_type)
{
const struct hisi_dae_hw_error *err = dae_hw_error;
@@ -209,6 +222,8 @@ enum acc_err_result hisi_dae_get_err_result(struct hisi_qm *qm)
return ACC_ERR_NEED_RESET;
}
hisi_dae_clear_hw_err_status(qm, err_status);
+ /* Avoid firmware disable error report, re-enable. */
+ hisi_dae_enable_error_report(qm);
return ACC_ERR_RECOVERED;
}
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index d8ba23b7cc7d..4fcbe6bada06 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -65,6 +65,7 @@
#define HZIP_SRAM_ECC_ERR_NUM_SHIFT 16
#define HZIP_SRAM_ECC_ERR_ADDR_SHIFT 24
#define HZIP_CORE_INT_MASK_ALL GENMASK(12, 0)
+#define HZIP_AXI_ERROR_MASK (BIT(2) | BIT(3))
#define HZIP_SQE_SIZE 128
#define HZIP_PF_DEF_Q_NUM 64
#define HZIP_PF_DEF_Q_BASE 0
@@ -80,6 +81,7 @@
#define HZIP_ALG_GZIP_BIT GENMASK(3, 2)
#define HZIP_ALG_DEFLATE_BIT GENMASK(5, 4)
#define HZIP_ALG_LZ77_BIT GENMASK(7, 6)
+#define HZIP_ALG_LZ4_BIT GENMASK(9, 8)
#define HZIP_BUF_SIZE 22
#define HZIP_SQE_MASK_OFFSET 64
@@ -95,10 +97,16 @@
#define HZIP_PREFETCH_ENABLE (~(BIT(26) | BIT(17) | BIT(0)))
#define HZIP_SVA_PREFETCH_DISABLE BIT(26)
#define HZIP_SVA_DISABLE_READY (BIT(26) | BIT(30))
+#define HZIP_SVA_PREFETCH_NUM GENMASK(18, 16)
+#define HZIP_SVA_STALL_NUM GENMASK(15, 0)
#define HZIP_SHAPER_RATE_COMPRESS 750
#define HZIP_SHAPER_RATE_DECOMPRESS 140
-#define HZIP_DELAY_1_US 1
-#define HZIP_POLL_TIMEOUT_US 1000
+#define HZIP_DELAY_1_US 1
+#define HZIP_POLL_TIMEOUT_US 1000
+#define HZIP_WAIT_SVA_READY 500000
+#define HZIP_READ_SVA_STATUS_TIMES 3
+#define HZIP_WAIT_US_MIN 10
+#define HZIP_WAIT_US_MAX 20
/* clock gating */
#define HZIP_PEH_CFG_AUTO_GATE 0x3011A8
@@ -111,6 +119,9 @@
/* zip comp high performance */
#define HZIP_HIGH_PERF_OFFSET 0x301208
+#define HZIP_LIT_LEN_EN_OFFSET 0x301204
+#define HZIP_LIT_LEN_EN_EN BIT(4)
+
enum {
HZIP_HIGH_COMP_RATE,
HZIP_HIGH_COMP_PERF,
@@ -141,6 +152,12 @@ static const struct qm_dev_alg zip_dev_algs[] = { {
}, {
.alg_msk = HZIP_ALG_LZ77_BIT,
.alg = "lz77_zstd\n",
+ }, {
+ .alg_msk = HZIP_ALG_LZ77_BIT,
+ .alg = "lz77_only\n",
+ }, {
+ .alg_msk = HZIP_ALG_LZ4_BIT,
+ .alg = "lz4\n",
},
};
@@ -448,10 +465,23 @@ bool hisi_zip_alg_support(struct hisi_qm *qm, u32 alg)
return false;
}
-static int hisi_zip_set_high_perf(struct hisi_qm *qm)
+static void hisi_zip_literal_set(struct hisi_qm *qm)
+{
+ u32 val;
+
+ if (qm->ver < QM_HW_V3)
+ return;
+
+ val = readl_relaxed(qm->io_base + HZIP_LIT_LEN_EN_OFFSET);
+ val &= ~HZIP_LIT_LEN_EN_EN;
+
+ /* enable literal length in stream mode compression */
+ writel(val, qm->io_base + HZIP_LIT_LEN_EN_OFFSET);
+}
+
+static void hisi_zip_set_high_perf(struct hisi_qm *qm)
{
u32 val;
- int ret;
val = readl_relaxed(qm->io_base + HZIP_HIGH_PERF_OFFSET);
if (perf_mode == HZIP_HIGH_COMP_PERF)
@@ -461,16 +491,36 @@ static int hisi_zip_set_high_perf(struct hisi_qm *qm)
/* Set perf mode */
writel(val, qm->io_base + HZIP_HIGH_PERF_OFFSET);
- ret = readl_relaxed_poll_timeout(qm->io_base + HZIP_HIGH_PERF_OFFSET,
- val, val == perf_mode, HZIP_DELAY_1_US,
- HZIP_POLL_TIMEOUT_US);
- if (ret)
- pci_err(qm->pdev, "failed to set perf mode\n");
+}
- return ret;
+static int hisi_zip_wait_sva_ready(struct hisi_qm *qm, __u32 offset, __u32 mask)
+{
+ u32 val, try_times = 0;
+ u8 count = 0;
+
+ /*
+ * Read the register value every 10-20us. If the value is 0 for three
+ * consecutive times, the SVA module is ready.
+ */
+ do {
+ val = readl(qm->io_base + offset);
+ if (val & mask)
+ count = 0;
+ else if (++count == HZIP_READ_SVA_STATUS_TIMES)
+ break;
+
+ usleep_range(HZIP_WAIT_US_MIN, HZIP_WAIT_US_MAX);
+ } while (++try_times < HZIP_WAIT_SVA_READY);
+
+ if (try_times == HZIP_WAIT_SVA_READY) {
+ pci_err(qm->pdev, "failed to wait sva prefetch ready\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
}
-static void hisi_zip_open_sva_prefetch(struct hisi_qm *qm)
+static void hisi_zip_close_sva_prefetch(struct hisi_qm *qm)
{
u32 val;
int ret;
@@ -478,19 +528,20 @@ static void hisi_zip_open_sva_prefetch(struct hisi_qm *qm)
if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
return;
- /* Enable prefetch */
val = readl_relaxed(qm->io_base + HZIP_PREFETCH_CFG);
- val &= HZIP_PREFETCH_ENABLE;
+ val |= HZIP_SVA_PREFETCH_DISABLE;
writel(val, qm->io_base + HZIP_PREFETCH_CFG);
- ret = readl_relaxed_poll_timeout(qm->io_base + HZIP_PREFETCH_CFG,
- val, !(val & HZIP_SVA_PREFETCH_DISABLE),
+ ret = readl_relaxed_poll_timeout(qm->io_base + HZIP_SVA_TRANS,
+ val, !(val & HZIP_SVA_DISABLE_READY),
HZIP_DELAY_1_US, HZIP_POLL_TIMEOUT_US);
if (ret)
- pci_err(qm->pdev, "failed to open sva prefetch\n");
+ pci_err(qm->pdev, "failed to close sva prefetch\n");
+
+ (void)hisi_zip_wait_sva_ready(qm, HZIP_SVA_TRANS, HZIP_SVA_STALL_NUM);
}
-static void hisi_zip_close_sva_prefetch(struct hisi_qm *qm)
+static void hisi_zip_open_sva_prefetch(struct hisi_qm *qm)
{
u32 val;
int ret;
@@ -498,15 +549,23 @@ static void hisi_zip_close_sva_prefetch(struct hisi_qm *qm)
if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
return;
+ /* Enable prefetch */
val = readl_relaxed(qm->io_base + HZIP_PREFETCH_CFG);
- val |= HZIP_SVA_PREFETCH_DISABLE;
+ val &= HZIP_PREFETCH_ENABLE;
writel(val, qm->io_base + HZIP_PREFETCH_CFG);
- ret = readl_relaxed_poll_timeout(qm->io_base + HZIP_SVA_TRANS,
- val, !(val & HZIP_SVA_DISABLE_READY),
+ ret = readl_relaxed_poll_timeout(qm->io_base + HZIP_PREFETCH_CFG,
+ val, !(val & HZIP_SVA_PREFETCH_DISABLE),
HZIP_DELAY_1_US, HZIP_POLL_TIMEOUT_US);
+ if (ret) {
+ pci_err(qm->pdev, "failed to open sva prefetch\n");
+ hisi_zip_close_sva_prefetch(qm);
+ return;
+ }
+
+ ret = hisi_zip_wait_sva_ready(qm, HZIP_SVA_TRANS, HZIP_SVA_PREFETCH_NUM);
if (ret)
- pci_err(qm->pdev, "failed to close sva prefetch\n");
+ hisi_zip_close_sva_prefetch(qm);
}
static void hisi_zip_enable_clock_gate(struct hisi_qm *qm)
@@ -530,6 +589,7 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
void __iomem *base = qm->io_base;
u32 dcomp_bm, comp_bm;
u32 zip_core_en;
+ int ret;
/* qm user domain */
writel(AXUSER_BASE, base + QM_ARUSER_M_CFG_1);
@@ -565,6 +625,7 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
writel(AXUSER_BASE, base + HZIP_DATA_WUSER_32_63);
writel(AXUSER_BASE, base + HZIP_SGL_RUSER_32_63);
}
+ hisi_zip_open_sva_prefetch(qm);
/* let's open all compression/decompression cores */
@@ -580,9 +641,19 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) |
FIELD_PREP(CQC_CACHE_WB_THRD, 1), base + QM_CACHE_CTL);
+ hisi_zip_set_high_perf(qm);
+ hisi_zip_literal_set(qm);
hisi_zip_enable_clock_gate(qm);
- return hisi_dae_set_user_domain(qm);
+ ret = hisi_dae_set_user_domain(qm);
+ if (ret)
+ goto close_sva_prefetch;
+
+ return 0;
+
+close_sva_prefetch:
+ hisi_zip_close_sva_prefetch(qm);
+ return ret;
}
static void hisi_zip_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
@@ -592,8 +663,7 @@ static void hisi_zip_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
val1 = readl(qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
if (enable) {
val1 |= HZIP_AXI_SHUTDOWN_ENABLE;
- val2 = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
- ZIP_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+ val2 = qm->err_info.dev_err.shutdown_mask;
} else {
val1 &= ~HZIP_AXI_SHUTDOWN_ENABLE;
val2 = 0x0;
@@ -607,7 +677,8 @@ static void hisi_zip_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
static void hisi_zip_hw_error_enable(struct hisi_qm *qm)
{
- u32 nfe, ce;
+ struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err;
+ u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe;
if (qm->ver == QM_HW_V1) {
writel(HZIP_CORE_INT_MASK_ALL,
@@ -616,33 +687,29 @@ static void hisi_zip_hw_error_enable(struct hisi_qm *qm)
return;
}
- nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver);
- ce = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CE_MASK_CAP, qm->cap_ver);
-
/* clear ZIP hw error source if having */
- writel(ce | nfe | HZIP_CORE_INT_RAS_FE_ENB_MASK, qm->io_base + HZIP_CORE_INT_SOURCE);
+ writel(err_mask, qm->io_base + HZIP_CORE_INT_SOURCE);
/* configure error type */
- writel(ce, qm->io_base + HZIP_CORE_INT_RAS_CE_ENB);
- writel(HZIP_CORE_INT_RAS_FE_ENB_MASK, qm->io_base + HZIP_CORE_INT_RAS_FE_ENB);
- writel(nfe, qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
+ writel(dev_err->ce, qm->io_base + HZIP_CORE_INT_RAS_CE_ENB);
+ writel(dev_err->fe, qm->io_base + HZIP_CORE_INT_RAS_FE_ENB);
+ writel(dev_err->nfe, qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
hisi_zip_master_ooo_ctrl(qm, true);
/* enable ZIP hw error interrupts */
- writel(0, qm->io_base + HZIP_CORE_INT_MASK_REG);
+ writel(~err_mask, qm->io_base + HZIP_CORE_INT_MASK_REG);
hisi_dae_hw_error_enable(qm);
}
static void hisi_zip_hw_error_disable(struct hisi_qm *qm)
{
- u32 nfe, ce;
+ struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err;
+ u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe;
/* disable ZIP hw error interrupts */
- nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver);
- ce = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CE_MASK_CAP, qm->cap_ver);
- writel(ce | nfe | HZIP_CORE_INT_RAS_FE_ENB_MASK, qm->io_base + HZIP_CORE_INT_MASK_REG);
+ writel(err_mask, qm->io_base + HZIP_CORE_INT_MASK_REG);
hisi_zip_master_ooo_ctrl(qm, false);
@@ -1116,12 +1183,20 @@ static void hisi_zip_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
static void hisi_zip_disable_error_report(struct hisi_qm *qm, u32 err_type)
{
- u32 nfe_mask;
+ u32 nfe_mask = qm->err_info.dev_err.nfe;
- nfe_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver);
writel(nfe_mask & (~err_type), qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
}
+static void hisi_zip_enable_error_report(struct hisi_qm *qm)
+{
+ u32 nfe_mask = qm->err_info.dev_err.nfe;
+ u32 ce_mask = qm->err_info.dev_err.ce;
+
+ writel(nfe_mask, qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
+ writel(ce_mask, qm->io_base + HZIP_CORE_INT_RAS_CE_ENB);
+}
+
static void hisi_zip_open_axi_master_ooo(struct hisi_qm *qm)
{
u32 val;
@@ -1160,16 +1235,18 @@ static enum acc_err_result hisi_zip_get_err_result(struct hisi_qm *qm)
/* Get device hardware new error status */
err_status = hisi_zip_get_hw_err_status(qm);
if (err_status) {
- if (err_status & qm->err_info.ecc_2bits_mask)
+ if (err_status & qm->err_info.dev_err.ecc_2bits_mask)
qm->err_status.is_dev_ecc_mbit = true;
hisi_zip_log_hw_error(qm, err_status);
- if (err_status & qm->err_info.dev_reset_mask) {
+ if (err_status & qm->err_info.dev_err.reset_mask) {
/* Disable the same error reporting until device is recovered. */
hisi_zip_disable_error_report(qm, err_status);
- return ACC_ERR_NEED_RESET;
+ zip_result = ACC_ERR_NEED_RESET;
} else {
hisi_zip_clear_hw_err_status(qm, err_status);
+ /* Avoid firmware disable error report, re-enable. */
+ hisi_zip_enable_error_report(qm);
}
}
@@ -1185,7 +1262,7 @@ static bool hisi_zip_dev_is_abnormal(struct hisi_qm *qm)
u32 err_status;
err_status = hisi_zip_get_hw_err_status(qm);
- if (err_status & qm->err_info.dev_shutdown_mask)
+ if (err_status & qm->err_info.dev_err.shutdown_mask)
return true;
return hisi_dae_dev_is_abnormal(qm);
@@ -1196,23 +1273,59 @@ static int hisi_zip_set_priv_status(struct hisi_qm *qm)
return hisi_dae_close_axi_master_ooo(qm);
}
+static void hisi_zip_disable_axi_error(struct hisi_qm *qm)
+{
+ struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err;
+ u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe;
+ u32 val;
+
+ val = ~(err_mask & (~HZIP_AXI_ERROR_MASK));
+ writel(val, qm->io_base + HZIP_CORE_INT_MASK_REG);
+
+ if (qm->ver > QM_HW_V2)
+ writel(dev_err->shutdown_mask & (~HZIP_AXI_ERROR_MASK),
+ qm->io_base + HZIP_OOO_SHUTDOWN_SEL);
+}
+
+static void hisi_zip_enable_axi_error(struct hisi_qm *qm)
+{
+ struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err;
+ u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe;
+
+ /* clear axi error source */
+ writel(HZIP_AXI_ERROR_MASK, qm->io_base + HZIP_CORE_INT_SOURCE);
+
+ writel(~err_mask, qm->io_base + HZIP_CORE_INT_MASK_REG);
+
+ if (qm->ver > QM_HW_V2)
+ writel(dev_err->shutdown_mask, qm->io_base + HZIP_OOO_SHUTDOWN_SEL);
+}
+
static void hisi_zip_err_info_init(struct hisi_qm *qm)
{
struct hisi_qm_err_info *err_info = &qm->err_info;
+ struct hisi_qm_err_mask *qm_err = &err_info->qm_err;
+ struct hisi_qm_err_mask *dev_err = &err_info->dev_err;
+
+ qm_err->fe = HZIP_CORE_INT_RAS_FE_ENB_MASK;
+ qm_err->ce = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_QM_CE_MASK_CAP, qm->cap_ver);
+ qm_err->nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+ ZIP_QM_NFE_MASK_CAP, qm->cap_ver);
+ qm_err->ecc_2bits_mask = QM_ECC_MBIT;
+ qm_err->reset_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+ ZIP_QM_RESET_MASK_CAP, qm->cap_ver);
+ qm_err->shutdown_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+ ZIP_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+
+ dev_err->fe = HZIP_CORE_INT_RAS_FE_ENB_MASK;
+ dev_err->ce = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CE_MASK_CAP, qm->cap_ver);
+ dev_err->nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver);
+ dev_err->ecc_2bits_mask = HZIP_CORE_INT_STATUS_M_ECC;
+ dev_err->shutdown_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+ ZIP_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+ dev_err->reset_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+ ZIP_RESET_MASK_CAP, qm->cap_ver);
- err_info->fe = HZIP_CORE_INT_RAS_FE_ENB_MASK;
- err_info->ce = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_QM_CE_MASK_CAP, qm->cap_ver);
- err_info->nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
- ZIP_QM_NFE_MASK_CAP, qm->cap_ver);
- err_info->ecc_2bits_mask = HZIP_CORE_INT_STATUS_M_ECC;
- err_info->qm_shutdown_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
- ZIP_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
- err_info->dev_shutdown_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
- ZIP_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
- err_info->qm_reset_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
- ZIP_QM_RESET_MASK_CAP, qm->cap_ver);
- err_info->dev_reset_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
- ZIP_RESET_MASK_CAP, qm->cap_ver);
err_info->msi_wr_port = HZIP_WR_PORT;
err_info->acpi_rst = "ZRST";
}
@@ -1232,6 +1345,8 @@ static const struct hisi_qm_err_ini hisi_zip_err_ini = {
.get_err_result = hisi_zip_get_err_result,
.set_priv_status = hisi_zip_set_priv_status,
.dev_is_abnormal = hisi_zip_dev_is_abnormal,
+ .disable_axi_error = hisi_zip_disable_axi_error,
+ .enable_axi_error = hisi_zip_enable_axi_error,
};
static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
@@ -1251,11 +1366,6 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
if (ret)
return ret;
- ret = hisi_zip_set_high_perf(qm);
- if (ret)
- return ret;
-
- hisi_zip_open_sva_prefetch(qm);
hisi_qm_dev_err_init(qm);
hisi_zip_debug_regs_clear(qm);
@@ -1273,7 +1383,7 @@ static int zip_pre_store_cap_reg(struct hisi_qm *qm)
size_t i, size;
size = ARRAY_SIZE(zip_cap_query_info);
- zip_cap = devm_kzalloc(&pdev->dev, sizeof(*zip_cap) * size, GFP_KERNEL);
+ zip_cap = devm_kcalloc(&pdev->dev, size, sizeof(*zip_cap), GFP_KERNEL);
if (!zip_cap)
return -ENOMEM;
diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c
index 76b7ecb5624b..f22c12e36b56 100644
--- a/drivers/crypto/img-hash.c
+++ b/drivers/crypto/img-hash.c
@@ -700,7 +700,7 @@ static int img_hash_cra_init(struct crypto_tfm *tfm, const char *alg_name)
static int img_hash_cra_md5_init(struct crypto_tfm *tfm)
{
- return img_hash_cra_init(tfm, "md5-generic");
+ return img_hash_cra_init(tfm, "md5-lib");
}
static int img_hash_cra_sha1_init(struct crypto_tfm *tfm)
diff --git a/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c b/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c
index 8f9e21ced0fe..48281d882260 100644
--- a/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c
+++ b/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c
@@ -232,7 +232,7 @@ static int kmb_ocs_dma_prepare(struct ahash_request *req)
struct device *dev = rctx->hcu_dev->dev;
unsigned int remainder = 0;
unsigned int total;
- size_t nents;
+ int nents;
size_t count;
int rc;
int i;
@@ -253,6 +253,9 @@ static int kmb_ocs_dma_prepare(struct ahash_request *req)
/* Determine the number of scatter gather list entries to process. */
nents = sg_nents_for_len(req->src, rctx->sg_data_total - remainder);
+ if (nents < 0)
+ return nents;
+
/* If there are entries to process, map them. */
if (nents) {
rctx->sg_dma_nents = dma_map_sg(dev, req->src, nents,
diff --git a/drivers/crypto/intel/qat/Kconfig b/drivers/crypto/intel/qat/Kconfig
index 359c61f0c8a1..4b4861460dd4 100644
--- a/drivers/crypto/intel/qat/Kconfig
+++ b/drivers/crypto/intel/qat/Kconfig
@@ -6,12 +6,11 @@ config CRYPTO_DEV_QAT
select CRYPTO_SKCIPHER
select CRYPTO_AKCIPHER
select CRYPTO_DH
- select CRYPTO_HMAC
select CRYPTO_RSA
- select CRYPTO_SHA1
- select CRYPTO_SHA256
- select CRYPTO_SHA512
select CRYPTO_LIB_AES
+ select CRYPTO_LIB_SHA1
+ select CRYPTO_LIB_SHA256
+ select CRYPTO_LIB_SHA512
select FW_LOADER
select CRC8
diff --git a/drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c
index 48c62a14a6a7..c2e6f0cb7480 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c
@@ -89,26 +89,14 @@ err_chrdev_unreg:
return -EFAULT;
}
-static int adf_ctl_alloc_resources(struct adf_user_cfg_ctl_data **ctl_data,
- unsigned long arg)
+static struct adf_user_cfg_ctl_data *adf_ctl_alloc_resources(unsigned long arg)
{
struct adf_user_cfg_ctl_data *cfg_data;
- cfg_data = kzalloc(sizeof(*cfg_data), GFP_KERNEL);
- if (!cfg_data)
- return -ENOMEM;
-
- /* Initialize device id to NO DEVICE as 0 is a valid device id */
- cfg_data->device_id = ADF_CFG_NO_DEVICE;
-
- if (copy_from_user(cfg_data, (void __user *)arg, sizeof(*cfg_data))) {
+ cfg_data = memdup_user((void __user *)arg, sizeof(*cfg_data));
+ if (IS_ERR(cfg_data))
pr_err("QAT: failed to copy from user cfg_data.\n");
- kfree(cfg_data);
- return -EIO;
- }
-
- *ctl_data = cfg_data;
- return 0;
+ return cfg_data;
}
static int adf_add_key_value_data(struct adf_accel_dev *accel_dev,
@@ -188,13 +176,13 @@ out_err:
static int adf_ctl_ioctl_dev_config(struct file *fp, unsigned int cmd,
unsigned long arg)
{
- int ret;
struct adf_user_cfg_ctl_data *ctl_data;
struct adf_accel_dev *accel_dev;
+ int ret = 0;
- ret = adf_ctl_alloc_resources(&ctl_data, arg);
- if (ret)
- return ret;
+ ctl_data = adf_ctl_alloc_resources(arg);
+ if (IS_ERR(ctl_data))
+ return PTR_ERR(ctl_data);
accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id);
if (!accel_dev) {
@@ -267,9 +255,9 @@ static int adf_ctl_ioctl_dev_stop(struct file *fp, unsigned int cmd,
int ret;
struct adf_user_cfg_ctl_data *ctl_data;
- ret = adf_ctl_alloc_resources(&ctl_data, arg);
- if (ret)
- return ret;
+ ctl_data = adf_ctl_alloc_resources(arg);
+ if (IS_ERR(ctl_data))
+ return PTR_ERR(ctl_data);
if (adf_devmgr_verify_id(ctl_data->device_id)) {
pr_err("QAT: Device %d not found\n", ctl_data->device_id);
@@ -301,9 +289,9 @@ static int adf_ctl_ioctl_dev_start(struct file *fp, unsigned int cmd,
struct adf_user_cfg_ctl_data *ctl_data;
struct adf_accel_dev *accel_dev;
- ret = adf_ctl_alloc_resources(&ctl_data, arg);
- if (ret)
- return ret;
+ ctl_data = adf_ctl_alloc_resources(arg);
+ if (IS_ERR(ctl_data))
+ return PTR_ERR(ctl_data);
ret = -ENODEV;
accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen6_tl.c b/drivers/crypto/intel/qat/qat_common/adf_gen6_tl.c
index cf804f95838a..faa60b04c406 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen6_tl.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen6_tl.c
@@ -21,6 +21,25 @@
#define SLICE_IDX(sl) offsetof(struct icp_qat_fw_init_admin_slice_cnt, sl##_cnt)
+#define ADF_GEN6_TL_CMDQ_WAIT_COUNTER(_name) \
+ ADF_TL_COUNTER("cmdq_wait_" #_name, ADF_TL_SIMPLE_COUNT, \
+ ADF_TL_CMDQ_REG_OFF(_name, reg_tm_cmdq_wait_cnt, gen6))
+#define ADF_GEN6_TL_CMDQ_EXEC_COUNTER(_name) \
+ ADF_TL_COUNTER("cmdq_exec_" #_name, ADF_TL_SIMPLE_COUNT, \
+ ADF_TL_CMDQ_REG_OFF(_name, reg_tm_cmdq_exec_cnt, gen6))
+#define ADF_GEN6_TL_CMDQ_DRAIN_COUNTER(_name) \
+ ADF_TL_COUNTER("cmdq_drain_" #_name, ADF_TL_SIMPLE_COUNT, \
+ ADF_TL_CMDQ_REG_OFF(_name, reg_tm_cmdq_drain_cnt, \
+ gen6))
+
+#define CPR_QUEUE_COUNT 5
+#define DCPR_QUEUE_COUNT 3
+#define PKE_QUEUE_COUNT 1
+#define WAT_QUEUE_COUNT 7
+#define WCP_QUEUE_COUNT 7
+#define USC_QUEUE_COUNT 3
+#define ATH_QUEUE_COUNT 2
+
/* Device level counters. */
static const struct adf_tl_dbg_counter dev_counters[] = {
/* PCIe partial transactions. */
@@ -57,6 +76,10 @@ static const struct adf_tl_dbg_counter dev_counters[] = {
/* Maximum uTLB used. */
ADF_TL_COUNTER(AT_MAX_UTLB_USED_NAME, ADF_TL_SIMPLE_COUNT,
ADF_GEN6_TL_DEV_REG_OFF(reg_tl_at_max_utlb_used)),
+ /* Ring Empty average[ns] across all rings */
+ ADF_TL_COUNTER_LATENCY(RE_ACC_NAME, ADF_TL_COUNTER_NS_AVG,
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_re_acc),
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_re_cnt)),
};
/* Accelerator utilization counters */
@@ -95,6 +118,80 @@ static const struct adf_tl_dbg_counter sl_exec_counters[ADF_TL_SL_CNT_COUNT] = {
[SLICE_IDX(ath)] = ADF_GEN6_TL_SL_EXEC_COUNTER(ath),
};
+static const struct adf_tl_dbg_counter cnv_cmdq_counters[] = {
+ ADF_GEN6_TL_CMDQ_WAIT_COUNTER(cnv),
+ ADF_GEN6_TL_CMDQ_EXEC_COUNTER(cnv),
+ ADF_GEN6_TL_CMDQ_DRAIN_COUNTER(cnv)
+};
+
+#define NUM_CMDQ_COUNTERS ARRAY_SIZE(cnv_cmdq_counters)
+
+static const struct adf_tl_dbg_counter dcprz_cmdq_counters[] = {
+ ADF_GEN6_TL_CMDQ_WAIT_COUNTER(dcprz),
+ ADF_GEN6_TL_CMDQ_EXEC_COUNTER(dcprz),
+ ADF_GEN6_TL_CMDQ_DRAIN_COUNTER(dcprz)
+};
+
+static_assert(ARRAY_SIZE(dcprz_cmdq_counters) == NUM_CMDQ_COUNTERS);
+
+static const struct adf_tl_dbg_counter pke_cmdq_counters[] = {
+ ADF_GEN6_TL_CMDQ_WAIT_COUNTER(pke),
+ ADF_GEN6_TL_CMDQ_EXEC_COUNTER(pke),
+ ADF_GEN6_TL_CMDQ_DRAIN_COUNTER(pke)
+};
+
+static_assert(ARRAY_SIZE(pke_cmdq_counters) == NUM_CMDQ_COUNTERS);
+
+static const struct adf_tl_dbg_counter wat_cmdq_counters[] = {
+ ADF_GEN6_TL_CMDQ_WAIT_COUNTER(wat),
+ ADF_GEN6_TL_CMDQ_EXEC_COUNTER(wat),
+ ADF_GEN6_TL_CMDQ_DRAIN_COUNTER(wat)
+};
+
+static_assert(ARRAY_SIZE(wat_cmdq_counters) == NUM_CMDQ_COUNTERS);
+
+static const struct adf_tl_dbg_counter wcp_cmdq_counters[] = {
+ ADF_GEN6_TL_CMDQ_WAIT_COUNTER(wcp),
+ ADF_GEN6_TL_CMDQ_EXEC_COUNTER(wcp),
+ ADF_GEN6_TL_CMDQ_DRAIN_COUNTER(wcp)
+};
+
+static_assert(ARRAY_SIZE(wcp_cmdq_counters) == NUM_CMDQ_COUNTERS);
+
+static const struct adf_tl_dbg_counter ucs_cmdq_counters[] = {
+ ADF_GEN6_TL_CMDQ_WAIT_COUNTER(ucs),
+ ADF_GEN6_TL_CMDQ_EXEC_COUNTER(ucs),
+ ADF_GEN6_TL_CMDQ_DRAIN_COUNTER(ucs)
+};
+
+static_assert(ARRAY_SIZE(ucs_cmdq_counters) == NUM_CMDQ_COUNTERS);
+
+static const struct adf_tl_dbg_counter ath_cmdq_counters[] = {
+ ADF_GEN6_TL_CMDQ_WAIT_COUNTER(ath),
+ ADF_GEN6_TL_CMDQ_EXEC_COUNTER(ath),
+ ADF_GEN6_TL_CMDQ_DRAIN_COUNTER(ath)
+};
+
+static_assert(ARRAY_SIZE(ath_cmdq_counters) == NUM_CMDQ_COUNTERS);
+
+/* CMDQ drain counters. */
+static const struct adf_tl_dbg_counter *cmdq_counters[ADF_TL_SL_CNT_COUNT] = {
+ /* Compression accelerator execution count. */
+ [SLICE_IDX(cpr)] = cnv_cmdq_counters,
+ /* Decompression accelerator execution count. */
+ [SLICE_IDX(dcpr)] = dcprz_cmdq_counters,
+ /* PKE execution count. */
+ [SLICE_IDX(pke)] = pke_cmdq_counters,
+ /* Wireless Authentication accelerator execution count. */
+ [SLICE_IDX(wat)] = wat_cmdq_counters,
+ /* Wireless Cipher accelerator execution count. */
+ [SLICE_IDX(wcp)] = wcp_cmdq_counters,
+ /* UCS accelerator execution count. */
+ [SLICE_IDX(ucs)] = ucs_cmdq_counters,
+ /* Authentication accelerator execution count. */
+ [SLICE_IDX(ath)] = ath_cmdq_counters,
+};
+
/* Ring pair counters. */
static const struct adf_tl_dbg_counter rp_counters[] = {
/* PCIe partial transactions. */
@@ -122,12 +219,17 @@ static const struct adf_tl_dbg_counter rp_counters[] = {
/* Payload DevTLB miss rate. */
ADF_TL_COUNTER(AT_PAYLD_DTLB_MISS_NAME, ADF_TL_SIMPLE_COUNT,
ADF_GEN6_TL_RP_REG_OFF(reg_tl_at_payld_devtlb_miss)),
+ /* Ring Empty average[ns]. */
+ ADF_TL_COUNTER_LATENCY(RE_ACC_NAME, ADF_TL_COUNTER_NS_AVG,
+ ADF_GEN6_TL_RP_REG_OFF(reg_tl_re_acc),
+ ADF_GEN6_TL_RP_REG_OFF(reg_tl_re_cnt)),
};
void adf_gen6_init_tl_data(struct adf_tl_hw_data *tl_data)
{
tl_data->layout_sz = ADF_GEN6_TL_LAYOUT_SZ;
tl_data->slice_reg_sz = ADF_GEN6_TL_SLICE_REG_SZ;
+ tl_data->cmdq_reg_sz = ADF_GEN6_TL_CMDQ_REG_SZ;
tl_data->rp_reg_sz = ADF_GEN6_TL_RP_REG_SZ;
tl_data->num_hbuff = ADF_GEN6_TL_NUM_HIST_BUFFS;
tl_data->max_rp = ADF_GEN6_TL_MAX_RP_NUM;
@@ -139,8 +241,18 @@ void adf_gen6_init_tl_data(struct adf_tl_hw_data *tl_data)
tl_data->num_dev_counters = ARRAY_SIZE(dev_counters);
tl_data->sl_util_counters = sl_util_counters;
tl_data->sl_exec_counters = sl_exec_counters;
+ tl_data->cmdq_counters = cmdq_counters;
+ tl_data->num_cmdq_counters = NUM_CMDQ_COUNTERS;
tl_data->rp_counters = rp_counters;
tl_data->num_rp_counters = ARRAY_SIZE(rp_counters);
tl_data->max_sl_cnt = ADF_GEN6_TL_MAX_SLICES_PER_TYPE;
+
+ tl_data->multiplier.cpr_cnt = CPR_QUEUE_COUNT;
+ tl_data->multiplier.dcpr_cnt = DCPR_QUEUE_COUNT;
+ tl_data->multiplier.pke_cnt = PKE_QUEUE_COUNT;
+ tl_data->multiplier.wat_cnt = WAT_QUEUE_COUNT;
+ tl_data->multiplier.wcp_cnt = WCP_QUEUE_COUNT;
+ tl_data->multiplier.ucs_cnt = USC_QUEUE_COUNT;
+ tl_data->multiplier.ath_cnt = ATH_QUEUE_COUNT;
}
EXPORT_SYMBOL_GPL(adf_gen6_init_tl_data);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_telemetry.c b/drivers/crypto/intel/qat/qat_common/adf_telemetry.c
index 74fb0c2ed241..b64142db1f0d 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_telemetry.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_telemetry.c
@@ -212,6 +212,23 @@ int adf_tl_halt(struct adf_accel_dev *accel_dev)
return ret;
}
+static void adf_set_cmdq_cnt(struct adf_accel_dev *accel_dev,
+ struct adf_tl_hw_data *tl_data)
+{
+ struct icp_qat_fw_init_admin_slice_cnt *slice_cnt, *cmdq_cnt;
+
+ slice_cnt = &accel_dev->telemetry->slice_cnt;
+ cmdq_cnt = &accel_dev->telemetry->cmdq_cnt;
+
+ cmdq_cnt->cpr_cnt = slice_cnt->cpr_cnt * tl_data->multiplier.cpr_cnt;
+ cmdq_cnt->dcpr_cnt = slice_cnt->dcpr_cnt * tl_data->multiplier.dcpr_cnt;
+ cmdq_cnt->pke_cnt = slice_cnt->pke_cnt * tl_data->multiplier.pke_cnt;
+ cmdq_cnt->wat_cnt = slice_cnt->wat_cnt * tl_data->multiplier.wat_cnt;
+ cmdq_cnt->wcp_cnt = slice_cnt->wcp_cnt * tl_data->multiplier.wcp_cnt;
+ cmdq_cnt->ucs_cnt = slice_cnt->ucs_cnt * tl_data->multiplier.ucs_cnt;
+ cmdq_cnt->ath_cnt = slice_cnt->ath_cnt * tl_data->multiplier.ath_cnt;
+}
+
int adf_tl_run(struct adf_accel_dev *accel_dev, int state)
{
struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev);
@@ -235,6 +252,8 @@ int adf_tl_run(struct adf_accel_dev *accel_dev, int state)
return ret;
}
+ adf_set_cmdq_cnt(accel_dev, tl_data);
+
telemetry->hbuffs = state;
atomic_set(&telemetry->state, state);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_telemetry.h b/drivers/crypto/intel/qat/qat_common/adf_telemetry.h
index e54a406cc1b4..02d75c3c214a 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_telemetry.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_telemetry.h
@@ -28,19 +28,23 @@ struct dentry;
struct adf_tl_hw_data {
size_t layout_sz;
size_t slice_reg_sz;
+ size_t cmdq_reg_sz;
size_t rp_reg_sz;
size_t msg_cnt_off;
const struct adf_tl_dbg_counter *dev_counters;
const struct adf_tl_dbg_counter *sl_util_counters;
const struct adf_tl_dbg_counter *sl_exec_counters;
+ const struct adf_tl_dbg_counter **cmdq_counters;
const struct adf_tl_dbg_counter *rp_counters;
u8 num_hbuff;
u8 cpp_ns_per_cycle;
u8 bw_units_to_bytes;
u8 num_dev_counters;
u8 num_rp_counters;
+ u8 num_cmdq_counters;
u8 max_rp;
u8 max_sl_cnt;
+ struct icp_qat_fw_init_admin_slice_cnt multiplier;
};
struct adf_telemetry {
@@ -69,6 +73,7 @@ struct adf_telemetry {
struct mutex wr_lock;
struct delayed_work work_ctx;
struct icp_qat_fw_init_admin_slice_cnt slice_cnt;
+ struct icp_qat_fw_init_admin_slice_cnt cmdq_cnt;
};
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c
index a32db273842a..b81f70576683 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c
@@ -339,6 +339,48 @@ static int tl_calc_and_print_sl_counters(struct adf_accel_dev *accel_dev,
return 0;
}
+static int tl_print_cmdq_counter(struct adf_telemetry *telemetry,
+ const struct adf_tl_dbg_counter *ctr,
+ struct seq_file *s, u8 cnt_id, u8 counter)
+{
+ size_t cmdq_regs_sz = GET_TL_DATA(telemetry->accel_dev).cmdq_reg_sz;
+ size_t offset_inc = cnt_id * cmdq_regs_sz;
+ struct adf_tl_dbg_counter slice_ctr;
+ char cnt_name[MAX_COUNT_NAME_SIZE];
+
+ slice_ctr = *(ctr + counter);
+ slice_ctr.offset1 += offset_inc;
+ snprintf(cnt_name, MAX_COUNT_NAME_SIZE, "%s%d", slice_ctr.name, cnt_id);
+
+ return tl_calc_and_print_counter(telemetry, s, &slice_ctr, cnt_name);
+}
+
+static int tl_calc_and_print_cmdq_counters(struct adf_accel_dev *accel_dev,
+ struct seq_file *s, u8 cnt_type,
+ u8 cnt_id)
+{
+ struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev);
+ struct adf_telemetry *telemetry = accel_dev->telemetry;
+ const struct adf_tl_dbg_counter **cmdq_tl_counters;
+ const struct adf_tl_dbg_counter *ctr;
+ u8 counter;
+ int ret;
+
+ cmdq_tl_counters = tl_data->cmdq_counters;
+ ctr = cmdq_tl_counters[cnt_type];
+
+ for (counter = 0; counter < tl_data->num_cmdq_counters; counter++) {
+ ret = tl_print_cmdq_counter(telemetry, ctr, s, cnt_id, counter);
+ if (ret) {
+ dev_notice(&GET_DEV(accel_dev),
+ "invalid slice utilization counter type\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static void tl_print_msg_cnt(struct seq_file *s, u32 msg_cnt)
{
seq_printf(s, "%-*s", TL_KEY_MIN_PADDING, SNAPSHOT_CNT_MSG);
@@ -352,6 +394,7 @@ static int tl_print_dev_data(struct adf_accel_dev *accel_dev,
struct adf_telemetry *telemetry = accel_dev->telemetry;
const struct adf_tl_dbg_counter *dev_tl_counters;
u8 num_dev_counters = tl_data->num_dev_counters;
+ u8 *cmdq_cnt = (u8 *)&telemetry->cmdq_cnt;
u8 *sl_cnt = (u8 *)&telemetry->slice_cnt;
const struct adf_tl_dbg_counter *ctr;
unsigned int i;
@@ -387,6 +430,15 @@ static int tl_print_dev_data(struct adf_accel_dev *accel_dev,
}
}
+ /* Print per command queue telemetry. */
+ for (i = 0; i < ADF_TL_SL_CNT_COUNT; i++) {
+ for (j = 0; j < cmdq_cnt[i]; j++) {
+ ret = tl_calc_and_print_cmdq_counters(accel_dev, s, i, j);
+ if (ret)
+ return ret;
+ }
+ }
+
return 0;
}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.h b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.h
index 11cc9eae19b3..97c5eeaa1b17 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.h
@@ -17,6 +17,7 @@ struct adf_accel_dev;
#define LAT_ACC_NAME "gp_lat_acc_avg"
#define BW_IN_NAME "bw_in"
#define BW_OUT_NAME "bw_out"
+#define RE_ACC_NAME "re_acc_avg"
#define PAGE_REQ_LAT_NAME "at_page_req_lat_avg"
#define AT_TRANS_LAT_NAME "at_trans_lat_avg"
#define AT_MAX_UTLB_USED_NAME "at_max_tlb_used"
@@ -43,6 +44,10 @@ struct adf_accel_dev;
(ADF_TL_DEV_REG_OFF(slice##_slices[0], qat_gen) + \
offsetof(struct adf_##qat_gen##_tl_slice_data_regs, reg))
+#define ADF_TL_CMDQ_REG_OFF(slice, reg, qat_gen) \
+ (ADF_TL_DEV_REG_OFF(slice##_cmdq[0], qat_gen) + \
+ offsetof(struct adf_##qat_gen##_tl_cmdq_data_regs, reg))
+
#define ADF_TL_RP_REG_OFF(reg, qat_gen) \
(ADF_TL_DATA_REG_OFF(tl_ring_pairs_data_regs[0], qat_gen) + \
offsetof(struct adf_##qat_gen##_tl_ring_pair_data_regs, reg))
diff --git a/drivers/crypto/intel/qat/qat_common/qat_algs.c b/drivers/crypto/intel/qat/qat_common/qat_algs.c
index 43e6dd9b77b7..7f638a62e3ad 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_algs.c
@@ -5,12 +5,10 @@
#include <linux/crypto.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/cipher.h>
-#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
#include <crypto/aes.h>
#include <crypto/sha1.h>
#include <crypto/sha2.h>
-#include <crypto/hmac.h>
#include <crypto/algapi.h>
#include <crypto/authenc.h>
#include <crypto/scatterwalk.h>
@@ -68,16 +66,10 @@ struct qat_alg_aead_ctx {
dma_addr_t dec_cd_paddr;
struct icp_qat_fw_la_bulk_req enc_fw_req;
struct icp_qat_fw_la_bulk_req dec_fw_req;
- struct crypto_shash *hash_tfm;
enum icp_qat_hw_auth_algo qat_hash_alg;
+ unsigned int hash_digestsize;
+ unsigned int hash_blocksize;
struct qat_crypto_instance *inst;
- union {
- struct sha1_state sha1;
- struct sha256_state sha256;
- struct sha512_state sha512;
- };
- char ipad[SHA512_BLOCK_SIZE]; /* sufficient for SHA-1/SHA-256 as well */
- char opad[SHA512_BLOCK_SIZE];
};
struct qat_alg_skcipher_ctx {
@@ -94,125 +86,57 @@ struct qat_alg_skcipher_ctx {
int mode;
};
-static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
-{
- switch (qat_hash_alg) {
- case ICP_QAT_HW_AUTH_ALGO_SHA1:
- return ICP_QAT_HW_SHA1_STATE1_SZ;
- case ICP_QAT_HW_AUTH_ALGO_SHA256:
- return ICP_QAT_HW_SHA256_STATE1_SZ;
- case ICP_QAT_HW_AUTH_ALGO_SHA512:
- return ICP_QAT_HW_SHA512_STATE1_SZ;
- default:
- return -EFAULT;
- }
-}
-
static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
struct qat_alg_aead_ctx *ctx,
const u8 *auth_key,
unsigned int auth_keylen)
{
- SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
- int block_size = crypto_shash_blocksize(ctx->hash_tfm);
- int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
- __be32 *hash_state_out;
- __be64 *hash512_state_out;
- int i, offset;
-
- memset(ctx->ipad, 0, block_size);
- memset(ctx->opad, 0, block_size);
- shash->tfm = ctx->hash_tfm;
-
- if (auth_keylen > block_size) {
- int ret = crypto_shash_digest(shash, auth_key,
- auth_keylen, ctx->ipad);
- if (ret)
- return ret;
-
- memcpy(ctx->opad, ctx->ipad, digest_size);
- } else {
- memcpy(ctx->ipad, auth_key, auth_keylen);
- memcpy(ctx->opad, auth_key, auth_keylen);
+ switch (ctx->qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SHA1: {
+ struct hmac_sha1_key key;
+ __be32 *istate = (__be32 *)hash->sha.state1;
+ __be32 *ostate = (__be32 *)(hash->sha.state1 +
+ round_up(sizeof(key.istate.h), 8));
+
+ hmac_sha1_preparekey(&key, auth_key, auth_keylen);
+ for (int i = 0; i < ARRAY_SIZE(key.istate.h); i++) {
+ istate[i] = cpu_to_be32(key.istate.h[i]);
+ ostate[i] = cpu_to_be32(key.ostate.h[i]);
+ }
+ memzero_explicit(&key, sizeof(key));
+ return 0;
}
-
- for (i = 0; i < block_size; i++) {
- char *ipad_ptr = ctx->ipad + i;
- char *opad_ptr = ctx->opad + i;
- *ipad_ptr ^= HMAC_IPAD_VALUE;
- *opad_ptr ^= HMAC_OPAD_VALUE;
+ case ICP_QAT_HW_AUTH_ALGO_SHA256: {
+ struct hmac_sha256_key key;
+ __be32 *istate = (__be32 *)hash->sha.state1;
+ __be32 *ostate = (__be32 *)(hash->sha.state1 +
+ sizeof(key.key.istate.h));
+
+ hmac_sha256_preparekey(&key, auth_key, auth_keylen);
+ for (int i = 0; i < ARRAY_SIZE(key.key.istate.h); i++) {
+ istate[i] = cpu_to_be32(key.key.istate.h[i]);
+ ostate[i] = cpu_to_be32(key.key.ostate.h[i]);
+ }
+ memzero_explicit(&key, sizeof(key));
+ return 0;
}
-
- if (crypto_shash_init(shash))
- return -EFAULT;
-
- if (crypto_shash_update(shash, ctx->ipad, block_size))
- return -EFAULT;
-
- hash_state_out = (__be32 *)hash->sha.state1;
- hash512_state_out = (__be64 *)hash_state_out;
-
- switch (ctx->qat_hash_alg) {
- case ICP_QAT_HW_AUTH_ALGO_SHA1:
- if (crypto_shash_export_core(shash, &ctx->sha1))
- return -EFAULT;
- for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
- *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
- break;
- case ICP_QAT_HW_AUTH_ALGO_SHA256:
- if (crypto_shash_export_core(shash, &ctx->sha256))
- return -EFAULT;
- for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
- *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
- break;
- case ICP_QAT_HW_AUTH_ALGO_SHA512:
- if (crypto_shash_export_core(shash, &ctx->sha512))
- return -EFAULT;
- for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
- *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
- break;
- default:
- return -EFAULT;
+ case ICP_QAT_HW_AUTH_ALGO_SHA512: {
+ struct hmac_sha512_key key;
+ __be64 *istate = (__be64 *)hash->sha.state1;
+ __be64 *ostate = (__be64 *)(hash->sha.state1 +
+ sizeof(key.key.istate.h));
+
+ hmac_sha512_preparekey(&key, auth_key, auth_keylen);
+ for (int i = 0; i < ARRAY_SIZE(key.key.istate.h); i++) {
+ istate[i] = cpu_to_be64(key.key.istate.h[i]);
+ ostate[i] = cpu_to_be64(key.key.ostate.h[i]);
+ }
+ memzero_explicit(&key, sizeof(key));
+ return 0;
}
-
- if (crypto_shash_init(shash))
- return -EFAULT;
-
- if (crypto_shash_update(shash, ctx->opad, block_size))
- return -EFAULT;
-
- offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
- if (offset < 0)
- return -EFAULT;
-
- hash_state_out = (__be32 *)(hash->sha.state1 + offset);
- hash512_state_out = (__be64 *)hash_state_out;
-
- switch (ctx->qat_hash_alg) {
- case ICP_QAT_HW_AUTH_ALGO_SHA1:
- if (crypto_shash_export_core(shash, &ctx->sha1))
- return -EFAULT;
- for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
- *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
- break;
- case ICP_QAT_HW_AUTH_ALGO_SHA256:
- if (crypto_shash_export_core(shash, &ctx->sha256))
- return -EFAULT;
- for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
- *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
- break;
- case ICP_QAT_HW_AUTH_ALGO_SHA512:
- if (crypto_shash_export_core(shash, &ctx->sha512))
- return -EFAULT;
- for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
- *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
- break;
default:
return -EFAULT;
}
- memzero_explicit(ctx->ipad, block_size);
- memzero_explicit(ctx->opad, block_size);
- return 0;
}
static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
@@ -259,7 +183,7 @@ static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
ctx->qat_hash_alg, digestsize);
hash->sha.inner_setup.auth_counter.counter =
- cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
+ cpu_to_be32(ctx->hash_blocksize);
if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
return -EFAULT;
@@ -326,7 +250,7 @@ static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
struct icp_qat_hw_cipher_algo_blk *cipher =
(struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
sizeof(struct icp_qat_hw_auth_setup) +
- roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
+ roundup(ctx->hash_digestsize, 8) * 2);
struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req;
struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
@@ -346,7 +270,7 @@ static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
ctx->qat_hash_alg,
digestsize);
hash->sha.inner_setup.auth_counter.counter =
- cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
+ cpu_to_be32(ctx->hash_blocksize);
if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
return -EFAULT;
@@ -368,7 +292,7 @@ static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
cipher_cd_ctrl->cipher_cfg_offset =
(sizeof(struct icp_qat_hw_auth_setup) +
- roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
+ roundup(ctx->hash_digestsize, 8) * 2) >> 3;
ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
@@ -1150,32 +1074,35 @@ static int qat_alg_skcipher_xts_decrypt(struct skcipher_request *req)
}
static int qat_alg_aead_init(struct crypto_aead *tfm,
- enum icp_qat_hw_auth_algo hash,
- const char *hash_name)
+ enum icp_qat_hw_auth_algo hash_alg,
+ unsigned int hash_digestsize,
+ unsigned int hash_blocksize)
{
struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
- ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
- if (IS_ERR(ctx->hash_tfm))
- return PTR_ERR(ctx->hash_tfm);
- ctx->qat_hash_alg = hash;
+ ctx->qat_hash_alg = hash_alg;
+ ctx->hash_digestsize = hash_digestsize;
+ ctx->hash_blocksize = hash_blocksize;
crypto_aead_set_reqsize(tfm, sizeof(struct qat_crypto_request));
return 0;
}
static int qat_alg_aead_sha1_init(struct crypto_aead *tfm)
{
- return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
+ return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1,
+ SHA1_DIGEST_SIZE, SHA1_BLOCK_SIZE);
}
static int qat_alg_aead_sha256_init(struct crypto_aead *tfm)
{
- return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
+ return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256,
+ SHA256_DIGEST_SIZE, SHA256_BLOCK_SIZE);
}
static int qat_alg_aead_sha512_init(struct crypto_aead *tfm)
{
- return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
+ return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512,
+ SHA512_DIGEST_SIZE, SHA512_BLOCK_SIZE);
}
static void qat_alg_aead_exit(struct crypto_aead *tfm)
@@ -1184,8 +1111,6 @@ static void qat_alg_aead_exit(struct crypto_aead *tfm)
struct qat_crypto_instance *inst = ctx->inst;
struct device *dev;
- crypto_free_shash(ctx->hash_tfm);
-
if (!inst)
return;
diff --git a/drivers/crypto/intel/qat/qat_common/qat_uclo.c b/drivers/crypto/intel/qat/qat_common/qat_uclo.c
index 21d652a1c8ef..18c3e4416dc5 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_uclo.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_uclo.c
@@ -1900,7 +1900,7 @@ static int qat_uclo_map_objs_from_mof(struct icp_qat_mof_handle *mobj_handle)
if (sobj_hdr)
sobj_chunk_num = sobj_hdr->num_chunks;
- mobj_hdr = kzalloc((uobj_chunk_num + sobj_chunk_num) *
+ mobj_hdr = kcalloc(size_add(uobj_chunk_num, sobj_chunk_num),
sizeof(*mobj_hdr), GFP_KERNEL);
if (!mobj_hdr)
return -ENOMEM;
diff --git a/drivers/crypto/loongson/Kconfig b/drivers/crypto/loongson/Kconfig
new file mode 100644
index 000000000000..15475da8fc11
--- /dev/null
+++ b/drivers/crypto/loongson/Kconfig
@@ -0,0 +1,5 @@
+config CRYPTO_DEV_LOONGSON_RNG
+ tristate "Support for Loongson RNG Driver"
+ depends on MFD_LOONGSON_SE
+ help
+ Support for Loongson RNG Driver.
diff --git a/drivers/crypto/loongson/Makefile b/drivers/crypto/loongson/Makefile
new file mode 100644
index 000000000000..1ce5ec32b553
--- /dev/null
+++ b/drivers/crypto/loongson/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_CRYPTO_DEV_LOONGSON_RNG) += loongson-rng.o
diff --git a/drivers/crypto/loongson/loongson-rng.c b/drivers/crypto/loongson/loongson-rng.c
new file mode 100644
index 000000000000..3a4940260f9e
--- /dev/null
+++ b/drivers/crypto/loongson/loongson-rng.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 HiSilicon Limited. */
+/* Copyright (c) 2025 Loongson Technology Corporation Limited. */
+
+#include <linux/crypto.h>
+#include <linux/err.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mfd/loongson-se.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/random.h>
+#include <crypto/internal/rng.h>
+
+#define SE_SEED_SIZE 32
+
+struct loongson_rng_list {
+ struct mutex lock;
+ struct list_head list;
+ int registered;
+};
+
+struct loongson_rng {
+ u32 used;
+ struct loongson_se_engine *engine;
+ struct list_head list;
+ struct mutex lock;
+};
+
+struct loongson_rng_ctx {
+ struct loongson_rng *rng;
+};
+
+struct loongson_rng_cmd {
+ u32 cmd_id;
+ union {
+ u32 len;
+ u32 ret;
+ } u;
+ u32 seed_off;
+ u32 out_off;
+ u32 pad[4];
+};
+
+static struct loongson_rng_list rng_devices = {
+ .lock = __MUTEX_INITIALIZER(rng_devices.lock),
+ .list = LIST_HEAD_INIT(rng_devices.list),
+};
+
+static int loongson_rng_generate(struct crypto_rng *tfm, const u8 *src,
+ unsigned int slen, u8 *dstn, unsigned int dlen)
+{
+ struct loongson_rng_ctx *ctx = crypto_rng_ctx(tfm);
+ struct loongson_rng *rng = ctx->rng;
+ struct loongson_rng_cmd *cmd = rng->engine->command;
+ int err, len;
+
+ mutex_lock(&rng->lock);
+ cmd->seed_off = 0;
+ do {
+ len = min(dlen, rng->engine->buffer_size);
+ cmd = rng->engine->command;
+ cmd->u.len = len;
+ err = loongson_se_send_engine_cmd(rng->engine);
+ if (err)
+ break;
+
+ cmd = rng->engine->command_ret;
+ if (cmd->u.ret) {
+ err = -EIO;
+ break;
+ }
+
+ memcpy(dstn, rng->engine->data_buffer, len);
+ dlen -= len;
+ dstn += len;
+ } while (dlen > 0);
+ mutex_unlock(&rng->lock);
+
+ return err;
+}
+
+static int loongson_rng_init(struct crypto_tfm *tfm)
+{
+ struct loongson_rng_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct loongson_rng *rng;
+ u32 min_used = U32_MAX;
+
+ mutex_lock(&rng_devices.lock);
+ list_for_each_entry(rng, &rng_devices.list, list) {
+ if (rng->used < min_used) {
+ ctx->rng = rng;
+ min_used = rng->used;
+ }
+ }
+ ctx->rng->used++;
+ mutex_unlock(&rng_devices.lock);
+
+ return 0;
+}
+
+static void loongson_rng_exit(struct crypto_tfm *tfm)
+{
+ struct loongson_rng_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ mutex_lock(&rng_devices.lock);
+ ctx->rng->used--;
+ mutex_unlock(&rng_devices.lock);
+}
+
+static int loongson_rng_seed(struct crypto_rng *tfm, const u8 *seed,
+ unsigned int slen)
+{
+ struct loongson_rng_ctx *ctx = crypto_rng_ctx(tfm);
+ struct loongson_rng *rng = ctx->rng;
+ struct loongson_rng_cmd *cmd;
+ int err;
+
+ if (slen < SE_SEED_SIZE)
+ return -EINVAL;
+
+ slen = min(slen, rng->engine->buffer_size);
+
+ mutex_lock(&rng->lock);
+ cmd = rng->engine->command;
+ cmd->u.len = slen;
+ cmd->seed_off = rng->engine->buffer_off;
+ memcpy(rng->engine->data_buffer, seed, slen);
+ err = loongson_se_send_engine_cmd(rng->engine);
+ if (err)
+ goto out;
+
+ cmd = rng->engine->command_ret;
+ if (cmd->u.ret)
+ err = -EIO;
+out:
+ mutex_unlock(&rng->lock);
+
+ return err;
+}
+
+static struct rng_alg loongson_rng_alg = {
+ .generate = loongson_rng_generate,
+ .seed = loongson_rng_seed,
+ .seedsize = SE_SEED_SIZE,
+ .base = {
+ .cra_name = "stdrng",
+ .cra_driver_name = "loongson_stdrng",
+ .cra_priority = 300,
+ .cra_ctxsize = sizeof(struct loongson_rng_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = loongson_rng_init,
+ .cra_exit = loongson_rng_exit,
+ },
+};
+
+static int loongson_rng_probe(struct platform_device *pdev)
+{
+ struct loongson_rng_cmd *cmd;
+ struct loongson_rng *rng;
+ int ret = 0;
+
+ rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL);
+ if (!rng)
+ return -ENOMEM;
+
+ rng->engine = loongson_se_init_engine(pdev->dev.parent, SE_ENGINE_RNG);
+ if (!rng->engine)
+ return -ENODEV;
+ cmd = rng->engine->command;
+ cmd->cmd_id = SE_CMD_RNG;
+ cmd->out_off = rng->engine->buffer_off;
+ mutex_init(&rng->lock);
+
+ mutex_lock(&rng_devices.lock);
+
+ if (!rng_devices.registered) {
+ ret = crypto_register_rng(&loongson_rng_alg);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register crypto(%d)\n", ret);
+ goto out;
+ }
+ rng_devices.registered = 1;
+ }
+
+ list_add_tail(&rng->list, &rng_devices.list);
+out:
+ mutex_unlock(&rng_devices.lock);
+
+ return ret;
+}
+
+static struct platform_driver loongson_rng_driver = {
+ .probe = loongson_rng_probe,
+ .driver = {
+ .name = "loongson-rng",
+ },
+};
+module_platform_driver(loongson_rng_driver);
+
+MODULE_ALIAS("platform:loongson-rng");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Yinggang Gu <guyinggang@loongson.cn>");
+MODULE_AUTHOR("Qunqin Zhao <zhaoqunqin@loongson.cn>");
+MODULE_DESCRIPTION("Loongson Random Number Generator driver");
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
index cc47e361089a..ebdf4efa09d4 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
@@ -1615,7 +1615,7 @@ int otx2_cpt_dl_custom_egrp_create(struct otx2_cptpf_dev *cptpf,
return -EINVAL;
}
err_msg = "Invalid engine group format";
- strscpy(tmp_buf, ctx->val.vstr, strlen(ctx->val.vstr) + 1);
+ strscpy(tmp_buf, ctx->val.vstr);
start = tmp_buf;
has_se = has_ie = has_ae = false;
diff --git a/drivers/crypto/nx/nx-common-powernv.c b/drivers/crypto/nx/nx-common-powernv.c
index fd0a98b2fb1b..0493041ea088 100644
--- a/drivers/crypto/nx/nx-common-powernv.c
+++ b/drivers/crypto/nx/nx-common-powernv.c
@@ -1043,8 +1043,10 @@ static struct scomp_alg nx842_powernv_alg = {
.base.cra_priority = 300,
.base.cra_module = THIS_MODULE,
- .alloc_ctx = nx842_powernv_crypto_alloc_ctx,
- .free_ctx = nx842_crypto_free_ctx,
+ .streams = {
+ .alloc_ctx = nx842_powernv_crypto_alloc_ctx,
+ .free_ctx = nx842_crypto_free_ctx,
+ },
.compress = nx842_crypto_compress,
.decompress = nx842_crypto_decompress,
};
diff --git a/drivers/crypto/nx/nx-common-pseries.c b/drivers/crypto/nx/nx-common-pseries.c
index f528e072494a..fc0222ebe807 100644
--- a/drivers/crypto/nx/nx-common-pseries.c
+++ b/drivers/crypto/nx/nx-common-pseries.c
@@ -1020,8 +1020,10 @@ static struct scomp_alg nx842_pseries_alg = {
.base.cra_priority = 300,
.base.cra_module = THIS_MODULE,
- .alloc_ctx = nx842_pseries_crypto_alloc_ctx,
- .free_ctx = nx842_crypto_free_ctx,
+ .streams = {
+ .alloc_ctx = nx842_pseries_crypto_alloc_ctx,
+ .free_ctx = nx842_crypto_free_ctx,
+ },
.compress = nx842_crypto_compress,
.decompress = nx842_crypto_decompress,
};
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 244e24e52987..3cc802622dd5 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -32,6 +32,7 @@
#include <linux/pm_runtime.h>
#include <linux/scatterlist.h>
#include <linux/string.h>
+#include <linux/workqueue.h>
#include "omap-crypto.h"
#include "omap-aes.h"
@@ -221,7 +222,7 @@ static void omap_aes_dma_out_callback(void *data)
struct omap_aes_dev *dd = data;
/* dma_lch_out - completed */
- tasklet_schedule(&dd->done_task);
+ queue_work(system_bh_wq, &dd->done_task);
}
static int omap_aes_dma_init(struct omap_aes_dev *dd)
@@ -494,9 +495,9 @@ static void omap_aes_copy_ivout(struct omap_aes_dev *dd, u8 *ivbuf)
((u32 *)ivbuf)[i] = omap_aes_read(dd, AES_REG_IV(dd, i));
}
-static void omap_aes_done_task(unsigned long data)
+static void omap_aes_done_task(struct work_struct *t)
{
- struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
+ struct omap_aes_dev *dd = from_work(dd, t, done_task);
pr_debug("enter done_task\n");
@@ -925,7 +926,7 @@ static irqreturn_t omap_aes_irq(int irq, void *dev_id)
if (!dd->total)
/* All bytes read! */
- tasklet_schedule(&dd->done_task);
+ queue_work(system_bh_wq, &dd->done_task);
else
/* Enable DATA_IN interrupt for next block */
omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x2);
@@ -1140,7 +1141,7 @@ static int omap_aes_probe(struct platform_device *pdev)
(reg & dd->pdata->major_mask) >> dd->pdata->major_shift,
(reg & dd->pdata->minor_mask) >> dd->pdata->minor_shift);
- tasklet_init(&dd->done_task, omap_aes_done_task, (unsigned long)dd);
+ INIT_WORK(&dd->done_task, omap_aes_done_task);
err = omap_aes_dma_init(dd);
if (err == -EPROBE_DEFER) {
@@ -1229,7 +1230,7 @@ err_engine:
omap_aes_dma_cleanup(dd);
err_irq:
- tasklet_kill(&dd->done_task);
+ cancel_work_sync(&dd->done_task);
err_pm_disable:
pm_runtime_disable(dev);
err_res:
@@ -1264,7 +1265,7 @@ static void omap_aes_remove(struct platform_device *pdev)
crypto_engine_exit(dd->engine);
- tasklet_kill(&dd->done_task);
+ cancel_work_sync(&dd->done_task);
omap_aes_dma_cleanup(dd);
pm_runtime_disable(dd->dev);
}
diff --git a/drivers/crypto/omap-aes.h b/drivers/crypto/omap-aes.h
index 41d67780fd45..99c36a777e97 100644
--- a/drivers/crypto/omap-aes.h
+++ b/drivers/crypto/omap-aes.h
@@ -159,7 +159,7 @@ struct omap_aes_dev {
unsigned long flags;
int err;
- struct tasklet_struct done_task;
+ struct work_struct done_task;
struct aead_queue aead_queue;
spinlock_t lock;
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index 9c5538ae17db..149ebd77710b 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -32,6 +32,7 @@
#include <linux/pm_runtime.h>
#include <linux/scatterlist.h>
#include <linux/string.h>
+#include <linux/workqueue.h>
#include "omap-crypto.h"
@@ -130,7 +131,7 @@ struct omap_des_dev {
unsigned long flags;
int err;
- struct tasklet_struct done_task;
+ struct work_struct done_task;
struct skcipher_request *req;
struct crypto_engine *engine;
@@ -325,7 +326,7 @@ static void omap_des_dma_out_callback(void *data)
struct omap_des_dev *dd = data;
/* dma_lch_out - completed */
- tasklet_schedule(&dd->done_task);
+ queue_work(system_bh_wq, &dd->done_task);
}
static int omap_des_dma_init(struct omap_des_dev *dd)
@@ -580,9 +581,9 @@ static int omap_des_crypt_req(struct crypto_engine *engine,
omap_des_crypt_dma_start(dd);
}
-static void omap_des_done_task(unsigned long data)
+static void omap_des_done_task(struct work_struct *t)
{
- struct omap_des_dev *dd = (struct omap_des_dev *)data;
+ struct omap_des_dev *dd = from_work(dd, t, done_task);
int i;
pr_debug("enter done_task\n");
@@ -890,7 +891,7 @@ static irqreturn_t omap_des_irq(int irq, void *dev_id)
if (!dd->total)
/* All bytes read! */
- tasklet_schedule(&dd->done_task);
+ queue_work(system_bh_wq, &dd->done_task);
else
/* Enable DATA_IN interrupt for next block */
omap_des_write(dd, DES_REG_IRQ_ENABLE(dd), 0x2);
@@ -986,7 +987,7 @@ static int omap_des_probe(struct platform_device *pdev)
(reg & dd->pdata->major_mask) >> dd->pdata->major_shift,
(reg & dd->pdata->minor_mask) >> dd->pdata->minor_shift);
- tasklet_init(&dd->done_task, omap_des_done_task, (unsigned long)dd);
+ INIT_WORK(&dd->done_task, omap_des_done_task);
err = omap_des_dma_init(dd);
if (err == -EPROBE_DEFER) {
@@ -1053,7 +1054,7 @@ err_engine:
omap_des_dma_cleanup(dd);
err_irq:
- tasklet_kill(&dd->done_task);
+ cancel_work_sync(&dd->done_task);
err_get:
pm_runtime_disable(dev);
err_res:
@@ -1077,7 +1078,7 @@ static void omap_des_remove(struct platform_device *pdev)
crypto_engine_unregister_skcipher(
&dd->pdata->algs_info[i].algs_list[j]);
- tasklet_kill(&dd->done_task);
+ cancel_work_sync(&dd->done_task);
omap_des_dma_cleanup(dd);
pm_runtime_disable(dd->dev);
}
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 6328e8026b91..ff8aac02994a 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -37,6 +37,7 @@
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/string.h>
+#include <linux/workqueue.h>
#define MD5_DIGEST_SIZE 16
@@ -217,7 +218,7 @@ struct omap_sham_dev {
int irq;
int err;
struct dma_chan *dma_lch;
- struct tasklet_struct done_task;
+ struct work_struct done_task;
u8 polling_mode;
u8 xmit_buf[BUFLEN] OMAP_ALIGNED;
@@ -561,7 +562,7 @@ static void omap_sham_dma_callback(void *param)
struct omap_sham_dev *dd = param;
set_bit(FLAGS_DMA_READY, &dd->flags);
- tasklet_schedule(&dd->done_task);
+ queue_work(system_bh_wq, &dd->done_task);
}
static int omap_sham_xmit_dma(struct omap_sham_dev *dd, size_t length,
@@ -1703,9 +1704,9 @@ static struct ahash_engine_alg algs_sha384_sha512[] = {
},
};
-static void omap_sham_done_task(unsigned long data)
+static void omap_sham_done_task(struct work_struct *t)
{
- struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
+ struct omap_sham_dev *dd = from_work(dd, t, done_task);
int err = 0;
dev_dbg(dd->dev, "%s: flags=%lx\n", __func__, dd->flags);
@@ -1739,7 +1740,7 @@ finish:
static irqreturn_t omap_sham_irq_common(struct omap_sham_dev *dd)
{
set_bit(FLAGS_OUTPUT_READY, &dd->flags);
- tasklet_schedule(&dd->done_task);
+ queue_work(system_bh_wq, &dd->done_task);
return IRQ_HANDLED;
}
@@ -2059,7 +2060,7 @@ static int omap_sham_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dd);
INIT_LIST_HEAD(&dd->list);
- tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd);
+ INIT_WORK(&dd->done_task, omap_sham_done_task);
crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH);
err = (dev->of_node) ? omap_sham_get_res_of(dd, dev, &res) :
@@ -2194,7 +2195,7 @@ static void omap_sham_remove(struct platform_device *pdev)
&dd->pdata->algs_info[i].algs_list[j]);
dd->pdata->algs_info[i].registered--;
}
- tasklet_kill(&dd->done_task);
+ cancel_work_sync(&dd->done_task);
pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
index d6928ebe9526..b9f5a8b42e66 100644
--- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c
+++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
@@ -254,7 +254,7 @@ static void rk_hash_unprepare(struct crypto_engine *engine, void *breq)
struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
struct rk_crypto_info *rkc = rctx->dev;
- dma_unmap_sg(rkc->dev, areq->src, rctx->nrsg, DMA_TO_DEVICE);
+ dma_unmap_sg(rkc->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE);
}
static int rk_hash_run(struct crypto_engine *engine, void *breq)
diff --git a/drivers/crypto/starfive/jh7110-aes.c b/drivers/crypto/starfive/jh7110-aes.c
index 86a1a1fa9f8f..426b24889af8 100644
--- a/drivers/crypto/starfive/jh7110-aes.c
+++ b/drivers/crypto/starfive/jh7110-aes.c
@@ -511,8 +511,7 @@ static int starfive_aes_map_sg(struct starfive_cryp_dev *cryp,
stsg = sg_next(stsg), dtsg = sg_next(dtsg)) {
src_nents = dma_map_sg(cryp->dev, stsg, 1, DMA_BIDIRECTIONAL);
if (src_nents == 0)
- return dev_err_probe(cryp->dev, -ENOMEM,
- "dma_map_sg error\n");
+ return -ENOMEM;
dst_nents = src_nents;
len = min(sg_dma_len(stsg), remain);
@@ -528,13 +527,11 @@ static int starfive_aes_map_sg(struct starfive_cryp_dev *cryp,
for (stsg = src, dtsg = dst;;) {
src_nents = dma_map_sg(cryp->dev, stsg, 1, DMA_TO_DEVICE);
if (src_nents == 0)
- return dev_err_probe(cryp->dev, -ENOMEM,
- "dma_map_sg src error\n");
+ return -ENOMEM;
dst_nents = dma_map_sg(cryp->dev, dtsg, 1, DMA_FROM_DEVICE);
if (dst_nents == 0)
- return dev_err_probe(cryp->dev, -ENOMEM,
- "dma_map_sg dst error\n");
+ return -ENOMEM;
len = min(sg_dma_len(stsg), sg_dma_len(dtsg));
len = min(len, remain);
@@ -669,8 +666,7 @@ static int starfive_aes_aead_do_one_req(struct crypto_engine *engine, void *areq
if (cryp->assoclen) {
rctx->adata = kzalloc(cryp->assoclen + AES_BLOCK_SIZE, GFP_KERNEL);
if (!rctx->adata)
- return dev_err_probe(cryp->dev, -ENOMEM,
- "Failed to alloc memory for adata");
+ return -ENOMEM;
if (sg_copy_to_buffer(req->src, sg_nents_for_len(req->src, cryp->assoclen),
rctx->adata, cryp->assoclen) != cryp->assoclen)
diff --git a/drivers/crypto/starfive/jh7110-hash.c b/drivers/crypto/starfive/jh7110-hash.c
index 6cfe0238f615..e6839c7bfb73 100644
--- a/drivers/crypto/starfive/jh7110-hash.c
+++ b/drivers/crypto/starfive/jh7110-hash.c
@@ -229,8 +229,7 @@ static int starfive_hash_one_request(struct crypto_engine *engine, void *areq)
for_each_sg(rctx->in_sg, tsg, rctx->in_sg_len, i) {
src_nents = dma_map_sg(cryp->dev, tsg, 1, DMA_TO_DEVICE);
if (src_nents == 0)
- return dev_err_probe(cryp->dev, -ENOMEM,
- "dma_map_sg error\n");
+ return -ENOMEM;
ret = starfive_hash_dma_xfer(cryp, tsg);
dma_unmap_sg(cryp->dev, tsg, 1, DMA_TO_DEVICE);
diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
index a89b4c5d62a0..5e82e8a1f71a 100644
--- a/drivers/crypto/stm32/stm32-cryp.c
+++ b/drivers/crypto/stm32/stm32-cryp.c
@@ -2781,5 +2781,5 @@ static struct platform_driver stm32_cryp_driver = {
module_platform_driver(stm32_cryp_driver);
MODULE_AUTHOR("Fabien Dessenne <fabien.dessenne@st.com>");
-MODULE_DESCRIPTION("STMicrolectronics STM32 CRYP hardware driver");
+MODULE_DESCRIPTION("STMicroelectronics STM32 CRYP hardware driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/tegra/tegra-se-hash.c b/drivers/crypto/tegra/tegra-se-hash.c
index d09b4aaeecef..4a298ace6e9f 100644
--- a/drivers/crypto/tegra/tegra-se-hash.c
+++ b/drivers/crypto/tegra/tegra-se-hash.c
@@ -400,8 +400,9 @@ static int tegra_sha_do_update(struct ahash_request *req)
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
struct tegra_se *se = ctx->se;
- unsigned int nblks, nresidue, size, ret;
+ unsigned int nblks, nresidue, size;
u32 *cpuvaddr = se->cmdbuf->addr;
+ int ret;
nresidue = (req->nbytes + rctx->residue.size) % rctx->blk_size;
nblks = (req->nbytes + rctx->residue.size) / rctx->blk_size;
diff --git a/drivers/crypto/tegra/tegra-se-main.c b/drivers/crypto/tegra/tegra-se-main.c
index 1c94f1de0546..7237f14eaf5a 100644
--- a/drivers/crypto/tegra/tegra-se-main.c
+++ b/drivers/crypto/tegra/tegra-se-main.c
@@ -310,7 +310,7 @@ static int tegra_se_probe(struct platform_device *pdev)
se->engine = crypto_engine_alloc_init(dev, 0);
if (!se->engine)
- return dev_err_probe(dev, -ENOMEM, "failed to init crypto engine\n");
+ return -ENOMEM;
ret = crypto_engine_start(se->engine);
if (ret) {
diff --git a/drivers/crypto/ti/Kconfig b/drivers/crypto/ti/Kconfig
new file mode 100644
index 000000000000..d4f91c1e0cb5
--- /dev/null
+++ b/drivers/crypto/ti/Kconfig
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config CRYPTO_DEV_TI_DTHEV2
+ tristate "Support for TI DTHE V2 cryptography engine"
+ depends on ARCH_K3 || COMPILE_TEST
+ select CRYPTO_ENGINE
+ select CRYPTO_SKCIPHER
+ select CRYPTO_ECB
+ select CRYPTO_CBC
+ help
+ This enables support for the TI DTHE V2 hw cryptography engine
+ which can be found on TI K3 SOCs. Selecting this enables use
+ of hardware offloading for cryptographic algorithms on
+ these devices, providing enhanced resistance against side-channel
+ attacks.
diff --git a/drivers/crypto/ti/Makefile b/drivers/crypto/ti/Makefile
new file mode 100644
index 000000000000..b883078f203d
--- /dev/null
+++ b/drivers/crypto/ti/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_CRYPTO_DEV_TI_DTHEV2) += dthev2.o
+dthev2-objs := dthev2-common.o dthev2-aes.o
diff --git a/drivers/crypto/ti/dthev2-aes.c b/drivers/crypto/ti/dthev2-aes.c
new file mode 100644
index 000000000000..3547c41fa4ed
--- /dev/null
+++ b/drivers/crypto/ti/dthev2-aes.c
@@ -0,0 +1,413 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * K3 DTHE V2 crypto accelerator driver
+ *
+ * Copyright (C) Texas Instruments 2025 - https://www.ti.com
+ * Author: T Pratham <t-pratham@ti.com>
+ */
+
+#include <crypto/aead.h>
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/engine.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/skcipher.h>
+
+#include "dthev2-common.h"
+
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/scatterlist.h>
+
+/* Registers */
+
+// AES Engine
+#define DTHE_P_AES_BASE 0x7000
+#define DTHE_P_AES_KEY1_0 0x0038
+#define DTHE_P_AES_KEY1_1 0x003C
+#define DTHE_P_AES_KEY1_2 0x0030
+#define DTHE_P_AES_KEY1_3 0x0034
+#define DTHE_P_AES_KEY1_4 0x0028
+#define DTHE_P_AES_KEY1_5 0x002C
+#define DTHE_P_AES_KEY1_6 0x0020
+#define DTHE_P_AES_KEY1_7 0x0024
+#define DTHE_P_AES_IV_IN_0 0x0040
+#define DTHE_P_AES_IV_IN_1 0x0044
+#define DTHE_P_AES_IV_IN_2 0x0048
+#define DTHE_P_AES_IV_IN_3 0x004C
+#define DTHE_P_AES_CTRL 0x0050
+#define DTHE_P_AES_C_LENGTH_0 0x0054
+#define DTHE_P_AES_C_LENGTH_1 0x0058
+#define DTHE_P_AES_AUTH_LENGTH 0x005C
+#define DTHE_P_AES_DATA_IN_OUT 0x0060
+
+#define DTHE_P_AES_SYSCONFIG 0x0084
+#define DTHE_P_AES_IRQSTATUS 0x008C
+#define DTHE_P_AES_IRQENABLE 0x0090
+
+/* Register write values and macros */
+
+enum aes_ctrl_mode_masks {
+ AES_CTRL_ECB_MASK = 0x00,
+ AES_CTRL_CBC_MASK = BIT(5),
+};
+
+#define DTHE_AES_CTRL_MODE_CLEAR_MASK ~GENMASK(28, 5)
+
+#define DTHE_AES_CTRL_DIR_ENC BIT(2)
+
+#define DTHE_AES_CTRL_KEYSIZE_16B BIT(3)
+#define DTHE_AES_CTRL_KEYSIZE_24B BIT(4)
+#define DTHE_AES_CTRL_KEYSIZE_32B (BIT(3) | BIT(4))
+
+#define DTHE_AES_CTRL_SAVE_CTX_SET BIT(29)
+
+#define DTHE_AES_CTRL_OUTPUT_READY BIT_MASK(0)
+#define DTHE_AES_CTRL_INPUT_READY BIT_MASK(1)
+#define DTHE_AES_CTRL_SAVED_CTX_READY BIT_MASK(30)
+#define DTHE_AES_CTRL_CTX_READY BIT_MASK(31)
+
+#define DTHE_AES_SYSCONFIG_DMA_DATA_IN_OUT_EN GENMASK(6, 5)
+#define DTHE_AES_IRQENABLE_EN_ALL GENMASK(3, 0)
+
+/* Misc */
+#define AES_IV_SIZE AES_BLOCK_SIZE
+#define AES_BLOCK_WORDS (AES_BLOCK_SIZE / sizeof(u32))
+#define AES_IV_WORDS AES_BLOCK_WORDS
+
+static int dthe_cipher_init_tfm(struct crypto_skcipher *tfm)
+{
+ struct dthe_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct dthe_data *dev_data = dthe_get_dev(ctx);
+
+ ctx->dev_data = dev_data;
+ ctx->keylen = 0;
+
+ return 0;
+}
+
+static int dthe_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen)
+{
+ struct dthe_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256)
+ return -EINVAL;
+
+ ctx->keylen = keylen;
+ memcpy(ctx->key, key, keylen);
+
+ return 0;
+}
+
+static int dthe_aes_ecb_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen)
+{
+ struct dthe_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ ctx->aes_mode = DTHE_AES_ECB;
+
+ return dthe_aes_setkey(tfm, key, keylen);
+}
+
+static int dthe_aes_cbc_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen)
+{
+ struct dthe_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ ctx->aes_mode = DTHE_AES_CBC;
+
+ return dthe_aes_setkey(tfm, key, keylen);
+}
+
+static void dthe_aes_set_ctrl_key(struct dthe_tfm_ctx *ctx,
+ struct dthe_aes_req_ctx *rctx,
+ u32 *iv_in)
+{
+ struct dthe_data *dev_data = dthe_get_dev(ctx);
+ void __iomem *aes_base_reg = dev_data->regs + DTHE_P_AES_BASE;
+ u32 ctrl_val = 0;
+
+ writel_relaxed(ctx->key[0], aes_base_reg + DTHE_P_AES_KEY1_0);
+ writel_relaxed(ctx->key[1], aes_base_reg + DTHE_P_AES_KEY1_1);
+ writel_relaxed(ctx->key[2], aes_base_reg + DTHE_P_AES_KEY1_2);
+ writel_relaxed(ctx->key[3], aes_base_reg + DTHE_P_AES_KEY1_3);
+
+ if (ctx->keylen > AES_KEYSIZE_128) {
+ writel_relaxed(ctx->key[4], aes_base_reg + DTHE_P_AES_KEY1_4);
+ writel_relaxed(ctx->key[5], aes_base_reg + DTHE_P_AES_KEY1_5);
+ }
+ if (ctx->keylen == AES_KEYSIZE_256) {
+ writel_relaxed(ctx->key[6], aes_base_reg + DTHE_P_AES_KEY1_6);
+ writel_relaxed(ctx->key[7], aes_base_reg + DTHE_P_AES_KEY1_7);
+ }
+
+ if (rctx->enc)
+ ctrl_val |= DTHE_AES_CTRL_DIR_ENC;
+
+ if (ctx->keylen == AES_KEYSIZE_128)
+ ctrl_val |= DTHE_AES_CTRL_KEYSIZE_16B;
+ else if (ctx->keylen == AES_KEYSIZE_192)
+ ctrl_val |= DTHE_AES_CTRL_KEYSIZE_24B;
+ else
+ ctrl_val |= DTHE_AES_CTRL_KEYSIZE_32B;
+
+ // Write AES mode
+ ctrl_val &= DTHE_AES_CTRL_MODE_CLEAR_MASK;
+ switch (ctx->aes_mode) {
+ case DTHE_AES_ECB:
+ ctrl_val |= AES_CTRL_ECB_MASK;
+ break;
+ case DTHE_AES_CBC:
+ ctrl_val |= AES_CTRL_CBC_MASK;
+ break;
+ }
+
+ if (iv_in) {
+ ctrl_val |= DTHE_AES_CTRL_SAVE_CTX_SET;
+ for (int i = 0; i < AES_IV_WORDS; ++i)
+ writel_relaxed(iv_in[i],
+ aes_base_reg + DTHE_P_AES_IV_IN_0 + (DTHE_REG_SIZE * i));
+ }
+
+ writel_relaxed(ctrl_val, aes_base_reg + DTHE_P_AES_CTRL);
+}
+
+static void dthe_aes_dma_in_callback(void *data)
+{
+ struct skcipher_request *req = (struct skcipher_request *)data;
+ struct dthe_aes_req_ctx *rctx = skcipher_request_ctx(req);
+
+ complete(&rctx->aes_compl);
+}
+
+static int dthe_aes_run(struct crypto_engine *engine, void *areq)
+{
+ struct skcipher_request *req = container_of(areq, struct skcipher_request, base);
+ struct dthe_tfm_ctx *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
+ struct dthe_data *dev_data = dthe_get_dev(ctx);
+ struct dthe_aes_req_ctx *rctx = skcipher_request_ctx(req);
+
+ unsigned int len = req->cryptlen;
+ struct scatterlist *src = req->src;
+ struct scatterlist *dst = req->dst;
+
+ int src_nents = sg_nents_for_len(src, len);
+ int dst_nents;
+
+ int src_mapped_nents;
+ int dst_mapped_nents;
+
+ bool diff_dst;
+ enum dma_data_direction src_dir, dst_dir;
+
+ struct device *tx_dev, *rx_dev;
+ struct dma_async_tx_descriptor *desc_in, *desc_out;
+
+ int ret;
+
+ void __iomem *aes_base_reg = dev_data->regs + DTHE_P_AES_BASE;
+
+ u32 aes_irqenable_val = readl_relaxed(aes_base_reg + DTHE_P_AES_IRQENABLE);
+ u32 aes_sysconfig_val = readl_relaxed(aes_base_reg + DTHE_P_AES_SYSCONFIG);
+
+ aes_sysconfig_val |= DTHE_AES_SYSCONFIG_DMA_DATA_IN_OUT_EN;
+ writel_relaxed(aes_sysconfig_val, aes_base_reg + DTHE_P_AES_SYSCONFIG);
+
+ aes_irqenable_val |= DTHE_AES_IRQENABLE_EN_ALL;
+ writel_relaxed(aes_irqenable_val, aes_base_reg + DTHE_P_AES_IRQENABLE);
+
+ if (src == dst) {
+ diff_dst = false;
+ src_dir = DMA_BIDIRECTIONAL;
+ dst_dir = DMA_BIDIRECTIONAL;
+ } else {
+ diff_dst = true;
+ src_dir = DMA_TO_DEVICE;
+ dst_dir = DMA_FROM_DEVICE;
+ }
+
+ tx_dev = dmaengine_get_dma_device(dev_data->dma_aes_tx);
+ rx_dev = dmaengine_get_dma_device(dev_data->dma_aes_rx);
+
+ src_mapped_nents = dma_map_sg(tx_dev, src, src_nents, src_dir);
+ if (src_mapped_nents == 0) {
+ ret = -EINVAL;
+ goto aes_err;
+ }
+
+ if (!diff_dst) {
+ dst_nents = src_nents;
+ dst_mapped_nents = src_mapped_nents;
+ } else {
+ dst_nents = sg_nents_for_len(dst, len);
+ dst_mapped_nents = dma_map_sg(rx_dev, dst, dst_nents, dst_dir);
+ if (dst_mapped_nents == 0) {
+ dma_unmap_sg(tx_dev, src, src_nents, src_dir);
+ ret = -EINVAL;
+ goto aes_err;
+ }
+ }
+
+ desc_in = dmaengine_prep_slave_sg(dev_data->dma_aes_rx, dst, dst_mapped_nents,
+ DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc_in) {
+ dev_err(dev_data->dev, "IN prep_slave_sg() failed\n");
+ ret = -EINVAL;
+ goto aes_prep_err;
+ }
+
+ desc_out = dmaengine_prep_slave_sg(dev_data->dma_aes_tx, src, src_mapped_nents,
+ DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc_out) {
+ dev_err(dev_data->dev, "OUT prep_slave_sg() failed\n");
+ ret = -EINVAL;
+ goto aes_prep_err;
+ }
+
+ desc_in->callback = dthe_aes_dma_in_callback;
+ desc_in->callback_param = req;
+
+ init_completion(&rctx->aes_compl);
+
+ if (ctx->aes_mode == DTHE_AES_ECB)
+ dthe_aes_set_ctrl_key(ctx, rctx, NULL);
+ else
+ dthe_aes_set_ctrl_key(ctx, rctx, (u32 *)req->iv);
+
+ writel_relaxed(lower_32_bits(req->cryptlen), aes_base_reg + DTHE_P_AES_C_LENGTH_0);
+ writel_relaxed(upper_32_bits(req->cryptlen), aes_base_reg + DTHE_P_AES_C_LENGTH_1);
+
+ dmaengine_submit(desc_in);
+ dmaengine_submit(desc_out);
+
+ dma_async_issue_pending(dev_data->dma_aes_rx);
+ dma_async_issue_pending(dev_data->dma_aes_tx);
+
+ // Need to do a timeout to ensure finalise gets called if DMA callback fails for any reason
+ ret = wait_for_completion_timeout(&rctx->aes_compl, msecs_to_jiffies(DTHE_DMA_TIMEOUT_MS));
+ if (!ret) {
+ ret = -ETIMEDOUT;
+ dmaengine_terminate_sync(dev_data->dma_aes_rx);
+ dmaengine_terminate_sync(dev_data->dma_aes_tx);
+
+ for (int i = 0; i < AES_BLOCK_WORDS; ++i)
+ readl_relaxed(aes_base_reg + DTHE_P_AES_DATA_IN_OUT + (DTHE_REG_SIZE * i));
+ } else {
+ ret = 0;
+ }
+
+ // For modes other than ECB, read IV_OUT
+ if (ctx->aes_mode != DTHE_AES_ECB) {
+ u32 *iv_out = (u32 *)req->iv;
+
+ for (int i = 0; i < AES_IV_WORDS; ++i)
+ iv_out[i] = readl_relaxed(aes_base_reg +
+ DTHE_P_AES_IV_IN_0 +
+ (DTHE_REG_SIZE * i));
+ }
+
+aes_prep_err:
+ dma_unmap_sg(tx_dev, src, src_nents, src_dir);
+ if (dst_dir != DMA_BIDIRECTIONAL)
+ dma_unmap_sg(rx_dev, dst, dst_nents, dst_dir);
+
+aes_err:
+ local_bh_disable();
+ crypto_finalize_skcipher_request(dev_data->engine, req, ret);
+ local_bh_enable();
+ return ret;
+}
+
+static int dthe_aes_crypt(struct skcipher_request *req)
+{
+ struct dthe_tfm_ctx *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
+ struct dthe_data *dev_data = dthe_get_dev(ctx);
+ struct crypto_engine *engine;
+
+ /*
+ * If data is not a multiple of AES_BLOCK_SIZE, need to return -EINVAL
+ * If data length input is zero, no need to do any operation.
+ */
+ if (req->cryptlen % AES_BLOCK_SIZE)
+ return -EINVAL;
+
+ if (req->cryptlen == 0)
+ return 0;
+
+ engine = dev_data->engine;
+ return crypto_transfer_skcipher_request_to_engine(engine, req);
+}
+
+static int dthe_aes_encrypt(struct skcipher_request *req)
+{
+ struct dthe_aes_req_ctx *rctx = skcipher_request_ctx(req);
+
+ rctx->enc = 1;
+ return dthe_aes_crypt(req);
+}
+
+static int dthe_aes_decrypt(struct skcipher_request *req)
+{
+ struct dthe_aes_req_ctx *rctx = skcipher_request_ctx(req);
+
+ rctx->enc = 0;
+ return dthe_aes_crypt(req);
+}
+
+static struct skcipher_engine_alg cipher_algs[] = {
+ {
+ .base.init = dthe_cipher_init_tfm,
+ .base.setkey = dthe_aes_ecb_setkey,
+ .base.encrypt = dthe_aes_encrypt,
+ .base.decrypt = dthe_aes_decrypt,
+ .base.min_keysize = AES_MIN_KEY_SIZE,
+ .base.max_keysize = AES_MAX_KEY_SIZE,
+ .base.base = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ecb-aes-dthev2",
+ .cra_priority = 299,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_alignmask = AES_BLOCK_SIZE - 1,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct dthe_tfm_ctx),
+ .cra_reqsize = sizeof(struct dthe_aes_req_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .op.do_one_request = dthe_aes_run,
+ }, /* ECB AES */
+ {
+ .base.init = dthe_cipher_init_tfm,
+ .base.setkey = dthe_aes_cbc_setkey,
+ .base.encrypt = dthe_aes_encrypt,
+ .base.decrypt = dthe_aes_decrypt,
+ .base.min_keysize = AES_MIN_KEY_SIZE,
+ .base.max_keysize = AES_MAX_KEY_SIZE,
+ .base.ivsize = AES_IV_SIZE,
+ .base.base = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-dthev2",
+ .cra_priority = 299,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_alignmask = AES_BLOCK_SIZE - 1,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct dthe_tfm_ctx),
+ .cra_reqsize = sizeof(struct dthe_aes_req_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .op.do_one_request = dthe_aes_run,
+ } /* CBC AES */
+};
+
+int dthe_register_aes_algs(void)
+{
+ return crypto_engine_register_skciphers(cipher_algs, ARRAY_SIZE(cipher_algs));
+}
+
+void dthe_unregister_aes_algs(void)
+{
+ crypto_engine_unregister_skciphers(cipher_algs, ARRAY_SIZE(cipher_algs));
+}
diff --git a/drivers/crypto/ti/dthev2-common.c b/drivers/crypto/ti/dthev2-common.c
new file mode 100644
index 000000000000..c39d37933b9e
--- /dev/null
+++ b/drivers/crypto/ti/dthev2-common.c
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * K3 DTHE V2 crypto accelerator driver
+ *
+ * Copyright (C) Texas Instruments 2025 - https://www.ti.com
+ * Author: T Pratham <t-pratham@ti.com>
+ */
+
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/engine.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/skcipher.h>
+
+#include "dthev2-common.h"
+
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dmapool.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+
+#define DRIVER_NAME "dthev2"
+
+static struct dthe_list dthe_dev_list = {
+ .dev_list = LIST_HEAD_INIT(dthe_dev_list.dev_list),
+ .lock = __SPIN_LOCK_UNLOCKED(dthe_dev_list.lock),
+};
+
+struct dthe_data *dthe_get_dev(struct dthe_tfm_ctx *ctx)
+{
+ struct dthe_data *dev_data;
+
+ if (ctx->dev_data)
+ return ctx->dev_data;
+
+ spin_lock_bh(&dthe_dev_list.lock);
+ dev_data = list_first_entry(&dthe_dev_list.dev_list, struct dthe_data, list);
+ if (dev_data)
+ list_move_tail(&dev_data->list, &dthe_dev_list.dev_list);
+ spin_unlock_bh(&dthe_dev_list.lock);
+
+ return dev_data;
+}
+
+static int dthe_dma_init(struct dthe_data *dev_data)
+{
+ int ret;
+ struct dma_slave_config cfg;
+
+ dev_data->dma_aes_rx = NULL;
+ dev_data->dma_aes_tx = NULL;
+ dev_data->dma_sha_tx = NULL;
+
+ dev_data->dma_aes_rx = dma_request_chan(dev_data->dev, "rx");
+ if (IS_ERR(dev_data->dma_aes_rx)) {
+ return dev_err_probe(dev_data->dev, PTR_ERR(dev_data->dma_aes_rx),
+ "Unable to request rx DMA channel\n");
+ }
+
+ dev_data->dma_aes_tx = dma_request_chan(dev_data->dev, "tx1");
+ if (IS_ERR(dev_data->dma_aes_tx)) {
+ ret = dev_err_probe(dev_data->dev, PTR_ERR(dev_data->dma_aes_tx),
+ "Unable to request tx1 DMA channel\n");
+ goto err_dma_aes_tx;
+ }
+
+ dev_data->dma_sha_tx = dma_request_chan(dev_data->dev, "tx2");
+ if (IS_ERR(dev_data->dma_sha_tx)) {
+ ret = dev_err_probe(dev_data->dev, PTR_ERR(dev_data->dma_sha_tx),
+ "Unable to request tx2 DMA channel\n");
+ goto err_dma_sha_tx;
+ }
+
+ memzero_explicit(&cfg, sizeof(cfg));
+
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.src_maxburst = 4;
+
+ ret = dmaengine_slave_config(dev_data->dma_aes_rx, &cfg);
+ if (ret) {
+ dev_err(dev_data->dev, "Can't configure IN dmaengine slave: %d\n", ret);
+ goto err_dma_config;
+ }
+
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.dst_maxburst = 4;
+
+ ret = dmaengine_slave_config(dev_data->dma_aes_tx, &cfg);
+ if (ret) {
+ dev_err(dev_data->dev, "Can't configure OUT dmaengine slave: %d\n", ret);
+ goto err_dma_config;
+ }
+
+ return 0;
+
+err_dma_config:
+ dma_release_channel(dev_data->dma_sha_tx);
+err_dma_sha_tx:
+ dma_release_channel(dev_data->dma_aes_tx);
+err_dma_aes_tx:
+ dma_release_channel(dev_data->dma_aes_rx);
+
+ return ret;
+}
+
+static int dthe_register_algs(void)
+{
+ return dthe_register_aes_algs();
+}
+
+static void dthe_unregister_algs(void)
+{
+ dthe_unregister_aes_algs();
+}
+
+static int dthe_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dthe_data *dev_data;
+ int ret;
+
+ dev_data = devm_kzalloc(dev, sizeof(*dev_data), GFP_KERNEL);
+ if (!dev_data)
+ return -ENOMEM;
+
+ dev_data->dev = dev;
+ dev_data->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(dev_data->regs))
+ return PTR_ERR(dev_data->regs);
+
+ platform_set_drvdata(pdev, dev_data);
+
+ spin_lock(&dthe_dev_list.lock);
+ list_add(&dev_data->list, &dthe_dev_list.dev_list);
+ spin_unlock(&dthe_dev_list.lock);
+
+ ret = dthe_dma_init(dev_data);
+ if (ret)
+ goto probe_dma_err;
+
+ dev_data->engine = crypto_engine_alloc_init(dev, 1);
+ if (!dev_data->engine) {
+ ret = -ENOMEM;
+ goto probe_engine_err;
+ }
+
+ ret = crypto_engine_start(dev_data->engine);
+ if (ret) {
+ dev_err(dev, "Failed to start crypto engine\n");
+ goto probe_engine_start_err;
+ }
+
+ ret = dthe_register_algs();
+ if (ret) {
+ dev_err(dev, "Failed to register algs\n");
+ goto probe_engine_start_err;
+ }
+
+ return 0;
+
+probe_engine_start_err:
+ crypto_engine_exit(dev_data->engine);
+probe_engine_err:
+ dma_release_channel(dev_data->dma_aes_rx);
+ dma_release_channel(dev_data->dma_aes_tx);
+ dma_release_channel(dev_data->dma_sha_tx);
+probe_dma_err:
+ spin_lock(&dthe_dev_list.lock);
+ list_del(&dev_data->list);
+ spin_unlock(&dthe_dev_list.lock);
+
+ return ret;
+}
+
+static void dthe_remove(struct platform_device *pdev)
+{
+ struct dthe_data *dev_data = platform_get_drvdata(pdev);
+
+ spin_lock(&dthe_dev_list.lock);
+ list_del(&dev_data->list);
+ spin_unlock(&dthe_dev_list.lock);
+
+ dthe_unregister_algs();
+
+ crypto_engine_exit(dev_data->engine);
+
+ dma_release_channel(dev_data->dma_aes_rx);
+ dma_release_channel(dev_data->dma_aes_tx);
+ dma_release_channel(dev_data->dma_sha_tx);
+}
+
+static const struct of_device_id dthe_of_match[] = {
+ { .compatible = "ti,am62l-dthev2", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, dthe_of_match);
+
+static struct platform_driver dthe_driver = {
+ .probe = dthe_probe,
+ .remove = dthe_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = dthe_of_match,
+ },
+};
+
+module_platform_driver(dthe_driver);
+
+MODULE_AUTHOR("T Pratham <t-pratham@ti.com>");
+MODULE_DESCRIPTION("Texas Instruments DTHE V2 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/ti/dthev2-common.h b/drivers/crypto/ti/dthev2-common.h
new file mode 100644
index 000000000000..68c94acda8aa
--- /dev/null
+++ b/drivers/crypto/ti/dthev2-common.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * K3 DTHE V2 crypto accelerator driver
+ *
+ * Copyright (C) Texas Instruments 2025 - https://www.ti.com
+ * Author: T Pratham <t-pratham@ti.com>
+ */
+
+#ifndef __TI_DTHEV2_H__
+#define __TI_DTHEV2_H__
+
+#include <crypto/aead.h>
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/engine.h>
+#include <crypto/hash.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
+
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dmapool.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/scatterlist.h>
+
+#define DTHE_REG_SIZE 4
+#define DTHE_DMA_TIMEOUT_MS 2000
+
+enum dthe_aes_mode {
+ DTHE_AES_ECB = 0,
+ DTHE_AES_CBC,
+};
+
+/* Driver specific struct definitions */
+
+/**
+ * struct dthe_data - DTHE_V2 driver instance data
+ * @dev: Device pointer
+ * @regs: Base address of the register space
+ * @list: list node for dev
+ * @engine: Crypto engine instance
+ * @dma_aes_rx: AES Rx DMA Channel
+ * @dma_aes_tx: AES Tx DMA Channel
+ * @dma_sha_tx: SHA Tx DMA Channel
+ */
+struct dthe_data {
+ struct device *dev;
+ void __iomem *regs;
+ struct list_head list;
+ struct crypto_engine *engine;
+
+ struct dma_chan *dma_aes_rx;
+ struct dma_chan *dma_aes_tx;
+
+ struct dma_chan *dma_sha_tx;
+};
+
+/**
+ * struct dthe_list - device data list head
+ * @dev_list: linked list head
+ * @lock: Spinlock protecting accesses to the list
+ */
+struct dthe_list {
+ struct list_head dev_list;
+ spinlock_t lock;
+};
+
+/**
+ * struct dthe_tfm_ctx - Transform ctx struct containing ctx for all sub-components of DTHE V2
+ * @dev_data: Device data struct pointer
+ * @keylen: AES key length
+ * @key: AES key
+ * @aes_mode: AES mode
+ */
+struct dthe_tfm_ctx {
+ struct dthe_data *dev_data;
+ unsigned int keylen;
+ u32 key[AES_KEYSIZE_256 / sizeof(u32)];
+ enum dthe_aes_mode aes_mode;
+};
+
+/**
+ * struct dthe_aes_req_ctx - AES engine req ctx struct
+ * @enc: flag indicating encryption or decryption operation
+ * @aes_compl: Completion variable for use in manual completion in case of DMA callback failure
+ */
+struct dthe_aes_req_ctx {
+ int enc;
+ struct completion aes_compl;
+};
+
+/* Struct definitions end */
+
+struct dthe_data *dthe_get_dev(struct dthe_tfm_ctx *ctx);
+
+int dthe_register_aes_algs(void);
+void dthe_unregister_aes_algs(void);
+
+#endif
diff --git a/drivers/crypto/xilinx/Makefile b/drivers/crypto/xilinx/Makefile
index 730feff5b5f2..9b51636ef75e 100644
--- a/drivers/crypto/xilinx/Makefile
+++ b/drivers/crypto/xilinx/Makefile
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_CRYPTO_DEV_XILINX_TRNG) += xilinx-trng.o
obj-$(CONFIG_CRYPTO_DEV_ZYNQMP_AES) += zynqmp-aes-gcm.o
obj-$(CONFIG_CRYPTO_DEV_ZYNQMP_SHA3) += zynqmp-sha.o
diff --git a/drivers/crypto/xilinx/xilinx-trng.c b/drivers/crypto/xilinx/xilinx-trng.c
new file mode 100644
index 000000000000..4e4700d68127
--- /dev/null
+++ b/drivers/crypto/xilinx/xilinx-trng.c
@@ -0,0 +1,405 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AMD Versal True Random Number Generator driver
+ * Copyright (c) 2024 - 2025 Advanced Micro Devices, Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
+#include <crypto/internal/cipher.h>
+#include <crypto/internal/rng.h>
+#include <crypto/aes.h>
+
+/* TRNG Registers Offsets */
+#define TRNG_STATUS_OFFSET 0x4U
+#define TRNG_CTRL_OFFSET 0x8U
+#define TRNG_EXT_SEED_OFFSET 0x40U
+#define TRNG_PER_STRNG_OFFSET 0x80U
+#define TRNG_CORE_OUTPUT_OFFSET 0xC0U
+#define TRNG_RESET_OFFSET 0xD0U
+#define TRNG_OSC_EN_OFFSET 0xD4U
+
+/* Mask values */
+#define TRNG_RESET_VAL_MASK BIT(0)
+#define TRNG_OSC_EN_VAL_MASK BIT(0)
+#define TRNG_CTRL_PRNGSRST_MASK BIT(0)
+#define TRNG_CTRL_EUMODE_MASK BIT(8)
+#define TRNG_CTRL_TRSSEN_MASK BIT(2)
+#define TRNG_CTRL_PRNGSTART_MASK BIT(5)
+#define TRNG_CTRL_PRNGXS_MASK BIT(3)
+#define TRNG_CTRL_PRNGMODE_MASK BIT(7)
+#define TRNG_STATUS_DONE_MASK BIT(0)
+#define TRNG_STATUS_QCNT_MASK GENMASK(11, 9)
+#define TRNG_STATUS_QCNT_16_BYTES 0x800
+
+/* Sizes in bytes */
+#define TRNG_SEED_LEN_BYTES 48U
+#define TRNG_ENTROPY_SEED_LEN_BYTES 64U
+#define TRNG_SEC_STRENGTH_SHIFT 5U
+#define TRNG_SEC_STRENGTH_BYTES BIT(TRNG_SEC_STRENGTH_SHIFT)
+#define TRNG_BYTES_PER_REG 4U
+#define TRNG_RESET_DELAY 10
+#define TRNG_NUM_INIT_REGS 12U
+#define TRNG_READ_4_WORD 4
+#define TRNG_DATA_READ_DELAY 8000
+
+struct xilinx_rng {
+ void __iomem *rng_base;
+ struct device *dev;
+ struct mutex lock; /* Protect access to TRNG device */
+ struct hwrng trng;
+};
+
+struct xilinx_rng_ctx {
+ struct xilinx_rng *rng;
+};
+
+static struct xilinx_rng *xilinx_rng_dev;
+
+static void xtrng_readwrite32(void __iomem *addr, u32 mask, u8 value)
+{
+ u32 val;
+
+ val = ioread32(addr);
+ val = (val & (~mask)) | (mask & value);
+ iowrite32(val, addr);
+}
+
+static void xtrng_trng_reset(void __iomem *addr)
+{
+ xtrng_readwrite32(addr + TRNG_RESET_OFFSET, TRNG_RESET_VAL_MASK, TRNG_RESET_VAL_MASK);
+ udelay(TRNG_RESET_DELAY);
+ xtrng_readwrite32(addr + TRNG_RESET_OFFSET, TRNG_RESET_VAL_MASK, 0);
+}
+
+static void xtrng_hold_reset(void __iomem *addr)
+{
+ xtrng_readwrite32(addr + TRNG_CTRL_OFFSET, TRNG_CTRL_PRNGSRST_MASK,
+ TRNG_CTRL_PRNGSRST_MASK);
+ iowrite32(TRNG_RESET_VAL_MASK, addr + TRNG_RESET_OFFSET);
+ udelay(TRNG_RESET_DELAY);
+}
+
+static void xtrng_softreset(struct xilinx_rng *rng)
+{
+ xtrng_readwrite32(rng->rng_base + TRNG_CTRL_OFFSET, TRNG_CTRL_PRNGSRST_MASK,
+ TRNG_CTRL_PRNGSRST_MASK);
+ udelay(TRNG_RESET_DELAY);
+ xtrng_readwrite32(rng->rng_base + TRNG_CTRL_OFFSET, TRNG_CTRL_PRNGSRST_MASK, 0);
+}
+
+/* Return no. of bytes read */
+static size_t xtrng_readblock32(void __iomem *rng_base, __be32 *buf, int blocks32, bool wait)
+{
+ int read = 0, ret;
+ int timeout = 1;
+ int i, idx;
+ u32 val;
+
+ if (wait)
+ timeout = TRNG_DATA_READ_DELAY;
+
+ for (i = 0; i < (blocks32 * 2); i++) {
+ /* TRNG core generate data in 16 bytes. Read twice to complete 32 bytes read */
+ ret = readl_poll_timeout(rng_base + TRNG_STATUS_OFFSET, val,
+ (val & TRNG_STATUS_QCNT_MASK) ==
+ TRNG_STATUS_QCNT_16_BYTES, !!wait, timeout);
+ if (ret)
+ break;
+
+ for (idx = 0; idx < TRNG_READ_4_WORD; idx++) {
+ *(buf + read) = cpu_to_be32(ioread32(rng_base + TRNG_CORE_OUTPUT_OFFSET));
+ read += 1;
+ }
+ }
+ return read * 4;
+}
+
+static int xtrng_collect_random_data(struct xilinx_rng *rng, u8 *rand_gen_buf,
+ int no_of_random_bytes, bool wait)
+{
+ u8 randbuf[TRNG_SEC_STRENGTH_BYTES];
+ int byteleft, blocks, count = 0;
+ int ret;
+
+ byteleft = no_of_random_bytes & (TRNG_SEC_STRENGTH_BYTES - 1);
+ blocks = no_of_random_bytes >> TRNG_SEC_STRENGTH_SHIFT;
+ xtrng_readwrite32(rng->rng_base + TRNG_CTRL_OFFSET, TRNG_CTRL_PRNGSTART_MASK,
+ TRNG_CTRL_PRNGSTART_MASK);
+ if (blocks) {
+ ret = xtrng_readblock32(rng->rng_base, (__be32 *)rand_gen_buf, blocks, wait);
+ if (!ret)
+ return 0;
+ count += ret;
+ }
+
+ if (byteleft) {
+ ret = xtrng_readblock32(rng->rng_base, (__be32 *)randbuf, 1, wait);
+ if (!ret)
+ return count;
+ memcpy(rand_gen_buf + (blocks * TRNG_SEC_STRENGTH_BYTES), randbuf, byteleft);
+ count += byteleft;
+ }
+
+ xtrng_readwrite32(rng->rng_base + TRNG_CTRL_OFFSET,
+ TRNG_CTRL_PRNGMODE_MASK | TRNG_CTRL_PRNGSTART_MASK, 0U);
+
+ return count;
+}
+
+static void xtrng_write_multiple_registers(void __iomem *base_addr, u32 *values, size_t n)
+{
+ void __iomem *reg_addr;
+ size_t i;
+
+ /* Write seed value into EXTERNAL_SEED Registers in big endian format */
+ for (i = 0; i < n; i++) {
+ reg_addr = (base_addr + ((n - 1 - i) * TRNG_BYTES_PER_REG));
+ iowrite32((u32 __force)(cpu_to_be32(values[i])), reg_addr);
+ }
+}
+
+static void xtrng_enable_entropy(struct xilinx_rng *rng)
+{
+ iowrite32(TRNG_OSC_EN_VAL_MASK, rng->rng_base + TRNG_OSC_EN_OFFSET);
+ xtrng_softreset(rng);
+ iowrite32(TRNG_CTRL_EUMODE_MASK | TRNG_CTRL_TRSSEN_MASK, rng->rng_base + TRNG_CTRL_OFFSET);
+}
+
+static int xtrng_reseed_internal(struct xilinx_rng *rng)
+{
+ u8 entropy[TRNG_ENTROPY_SEED_LEN_BYTES];
+ u32 val;
+ int ret;
+
+ memset(entropy, 0, sizeof(entropy));
+ xtrng_enable_entropy(rng);
+
+ /* collect random data to use it as entropy (input for DF) */
+ ret = xtrng_collect_random_data(rng, entropy, TRNG_SEED_LEN_BYTES, true);
+ if (ret != TRNG_SEED_LEN_BYTES)
+ return -EINVAL;
+
+ xtrng_write_multiple_registers(rng->rng_base + TRNG_EXT_SEED_OFFSET,
+ (u32 *)entropy, TRNG_NUM_INIT_REGS);
+ /* select reseed operation */
+ iowrite32(TRNG_CTRL_PRNGXS_MASK, rng->rng_base + TRNG_CTRL_OFFSET);
+
+ /* Start the reseed operation with above configuration and wait for STATUS.Done bit to be
+ * set. Monitor STATUS.CERTF bit, if set indicates SP800-90B entropy health test has failed.
+ */
+ xtrng_readwrite32(rng->rng_base + TRNG_CTRL_OFFSET, TRNG_CTRL_PRNGSTART_MASK,
+ TRNG_CTRL_PRNGSTART_MASK);
+
+ ret = readl_poll_timeout(rng->rng_base + TRNG_STATUS_OFFSET, val,
+ (val & TRNG_STATUS_DONE_MASK) == TRNG_STATUS_DONE_MASK,
+ 1U, 15000U);
+ if (ret)
+ return ret;
+
+ xtrng_readwrite32(rng->rng_base + TRNG_CTRL_OFFSET, TRNG_CTRL_PRNGSTART_MASK, 0U);
+
+ return 0;
+}
+
+static int xtrng_random_bytes_generate(struct xilinx_rng *rng, u8 *rand_buf_ptr,
+ u32 rand_buf_size, int wait)
+{
+ int nbytes;
+ int ret;
+
+ xtrng_readwrite32(rng->rng_base + TRNG_CTRL_OFFSET,
+ TRNG_CTRL_PRNGMODE_MASK | TRNG_CTRL_PRNGXS_MASK,
+ TRNG_CTRL_PRNGMODE_MASK | TRNG_CTRL_PRNGXS_MASK);
+ nbytes = xtrng_collect_random_data(rng, rand_buf_ptr, rand_buf_size, wait);
+
+ ret = xtrng_reseed_internal(rng);
+ if (ret) {
+ dev_err(rng->dev, "Re-seed fail\n");
+ return ret;
+ }
+
+ return nbytes;
+}
+
+static int xtrng_trng_generate(struct crypto_rng *tfm, const u8 *src, u32 slen,
+ u8 *dst, u32 dlen)
+{
+ struct xilinx_rng_ctx *ctx = crypto_rng_ctx(tfm);
+ int ret;
+
+ mutex_lock(&ctx->rng->lock);
+ ret = xtrng_random_bytes_generate(ctx->rng, dst, dlen, true);
+ mutex_unlock(&ctx->rng->lock);
+
+ return ret < 0 ? ret : 0;
+}
+
+static int xtrng_trng_seed(struct crypto_rng *tfm, const u8 *seed, unsigned int slen)
+{
+ return 0;
+}
+
+static int xtrng_trng_init(struct crypto_tfm *rtfm)
+{
+ struct xilinx_rng_ctx *ctx = crypto_tfm_ctx(rtfm);
+
+ ctx->rng = xilinx_rng_dev;
+
+ return 0;
+}
+
+static struct rng_alg xtrng_trng_alg = {
+ .generate = xtrng_trng_generate,
+ .seed = xtrng_trng_seed,
+ .seedsize = 0,
+ .base = {
+ .cra_name = "stdrng",
+ .cra_driver_name = "xilinx-trng",
+ .cra_priority = 300,
+ .cra_ctxsize = sizeof(struct xilinx_rng_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = xtrng_trng_init,
+ },
+};
+
+static int xtrng_hwrng_trng_read(struct hwrng *hwrng, void *data, size_t max, bool wait)
+{
+ u8 buf[TRNG_SEC_STRENGTH_BYTES];
+ struct xilinx_rng *rng;
+ int ret = -EINVAL, i = 0;
+
+ rng = container_of(hwrng, struct xilinx_rng, trng);
+ /* Return in case wait not set and lock not available. */
+ if (!mutex_trylock(&rng->lock) && !wait)
+ return 0;
+ else if (!mutex_is_locked(&rng->lock) && wait)
+ mutex_lock(&rng->lock);
+
+ while (i < max) {
+ ret = xtrng_random_bytes_generate(rng, buf, TRNG_SEC_STRENGTH_BYTES, wait);
+ if (ret < 0)
+ break;
+
+ memcpy(data + i, buf, min_t(int, ret, (max - i)));
+ i += min_t(int, ret, (max - i));
+ }
+ mutex_unlock(&rng->lock);
+
+ return ret;
+}
+
+static int xtrng_hwrng_register(struct hwrng *trng)
+{
+ int ret;
+
+ trng->name = "Xilinx Versal Crypto Engine TRNG";
+ trng->read = xtrng_hwrng_trng_read;
+
+ ret = hwrng_register(trng);
+ if (ret)
+ pr_err("Fail to register the TRNG\n");
+
+ return ret;
+}
+
+static void xtrng_hwrng_unregister(struct hwrng *trng)
+{
+ hwrng_unregister(trng);
+}
+
+static int xtrng_probe(struct platform_device *pdev)
+{
+ struct xilinx_rng *rng;
+ int ret;
+
+ rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL);
+ if (!rng)
+ return -ENOMEM;
+
+ rng->dev = &pdev->dev;
+ rng->rng_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(rng->rng_base)) {
+ dev_err(&pdev->dev, "Failed to map resource %ld\n", PTR_ERR(rng->rng_base));
+ return PTR_ERR(rng->rng_base);
+ }
+
+ xtrng_trng_reset(rng->rng_base);
+ ret = xtrng_reseed_internal(rng);
+ if (ret) {
+ dev_err(&pdev->dev, "TRNG Seed fail\n");
+ return ret;
+ }
+
+ xilinx_rng_dev = rng;
+ mutex_init(&rng->lock);
+ ret = crypto_register_rng(&xtrng_trng_alg);
+ if (ret) {
+ dev_err(&pdev->dev, "Crypto Random device registration failed: %d\n", ret);
+ return ret;
+ }
+ ret = xtrng_hwrng_register(&rng->trng);
+ if (ret) {
+ dev_err(&pdev->dev, "HWRNG device registration failed: %d\n", ret);
+ goto crypto_rng_free;
+ }
+ platform_set_drvdata(pdev, rng);
+
+ return 0;
+
+crypto_rng_free:
+ crypto_unregister_rng(&xtrng_trng_alg);
+
+ return ret;
+}
+
+static void xtrng_remove(struct platform_device *pdev)
+{
+ struct xilinx_rng *rng;
+ u32 zero[TRNG_NUM_INIT_REGS] = { };
+
+ rng = platform_get_drvdata(pdev);
+ xtrng_hwrng_unregister(&rng->trng);
+ crypto_unregister_rng(&xtrng_trng_alg);
+ xtrng_write_multiple_registers(rng->rng_base + TRNG_EXT_SEED_OFFSET, zero,
+ TRNG_NUM_INIT_REGS);
+ xtrng_write_multiple_registers(rng->rng_base + TRNG_PER_STRNG_OFFSET, zero,
+ TRNG_NUM_INIT_REGS);
+ xtrng_hold_reset(rng->rng_base);
+ xilinx_rng_dev = NULL;
+}
+
+static const struct of_device_id xtrng_of_match[] = {
+ { .compatible = "xlnx,versal-trng", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, xtrng_of_match);
+
+static struct platform_driver xtrng_driver = {
+ .driver = {
+ .name = "xlnx,versal-trng",
+ .of_match_table = xtrng_of_match,
+ },
+ .probe = xtrng_probe,
+ .remove = xtrng_remove,
+};
+
+module_platform_driver(xtrng_driver);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Harsh Jain <h.jain@amd.com>");
+MODULE_AUTHOR("Mounika Botcha <mounika.botcha@amd.com>");
+MODULE_DESCRIPTION("True Random Number Generator Driver");
diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c
index 712624cba2b6..d7a5539d07d4 100644
--- a/drivers/cxl/acpi.c
+++ b/drivers/cxl/acpi.c
@@ -20,8 +20,7 @@ static const guid_t acpi_cxl_qtg_id_guid =
GUID_INIT(0xF365F9A6, 0xA7DE, 0x4071,
0xA6, 0x6A, 0xB4, 0x0C, 0x0B, 0x4F, 0x8E, 0x52);
-
-static u64 cxl_xor_hpa_to_spa(struct cxl_root_decoder *cxlrd, u64 hpa)
+static u64 cxl_apply_xor_maps(struct cxl_root_decoder *cxlrd, u64 addr)
{
struct cxl_cxims_data *cximsd = cxlrd->platform_data;
int hbiw = cxlrd->cxlsd.nr_targets;
@@ -30,19 +29,23 @@ static u64 cxl_xor_hpa_to_spa(struct cxl_root_decoder *cxlrd, u64 hpa)
/* No xormaps for host bridge interleave ways of 1 or 3 */
if (hbiw == 1 || hbiw == 3)
- return hpa;
+ return addr;
/*
- * For root decoders using xormaps (hbiw: 2,4,6,8,12,16) restore
- * the position bit to its value before the xormap was applied at
- * HPA->DPA translation.
+ * In regions using XOR interleave arithmetic the CXL HPA may not
+ * be the same as the SPA. This helper performs the SPA->CXL HPA
+ * or the CXL HPA->SPA translation. Since XOR is self-inverting,
+ * so is this function.
+ *
+ * For root decoders using xormaps (hbiw: 2,4,6,8,12,16) applying the
+ * xormaps will toggle a position bit.
*
* pos is the lowest set bit in an XORMAP
- * val is the XORALLBITS(HPA & XORMAP)
+ * val is the XORALLBITS(addr & XORMAP)
*
* XORALLBITS: The CXL spec (3.1 Table 9-22) defines XORALLBITS
* as an operation that outputs a single bit by XORing all the
- * bits in the input (hpa & xormap). Implement XORALLBITS using
+ * bits in the input (addr & xormap). Implement XORALLBITS using
* hweight64(). If the hamming weight is even the XOR of those
* bits results in val==0, if odd the XOR result is val==1.
*/
@@ -51,11 +54,11 @@ static u64 cxl_xor_hpa_to_spa(struct cxl_root_decoder *cxlrd, u64 hpa)
if (!cximsd->xormaps[i])
continue;
pos = __ffs(cximsd->xormaps[i]);
- val = (hweight64(hpa & cximsd->xormaps[i]) & 1);
- hpa = (hpa & ~(1ULL << pos)) | (val << pos);
+ val = (hweight64(addr & cximsd->xormaps[i]) & 1);
+ addr = (addr & ~(1ULL << pos)) | (val << pos);
}
- return hpa;
+ return addr;
}
struct cxl_cxims_context {
@@ -113,9 +116,9 @@ static unsigned long cfmws_to_decoder_flags(int restrictions)
{
unsigned long flags = CXL_DECODER_F_ENABLE;
- if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_TYPE2)
+ if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_DEVMEM)
flags |= CXL_DECODER_F_TYPE2;
- if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_TYPE3)
+ if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_HOSTONLYMEM)
flags |= CXL_DECODER_F_TYPE3;
if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_VOLATILE)
flags |= CXL_DECODER_F_RAM;
@@ -398,7 +401,6 @@ DEFINE_FREE(del_cxl_resource, struct resource *, if (_T) del_cxl_resource(_T))
static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws,
struct cxl_cfmws_context *ctx)
{
- int target_map[CXL_DECODER_MAX_INTERLEAVE];
struct cxl_port *root_port = ctx->root_port;
struct cxl_cxims_context cxims_ctx;
struct device *dev = ctx->dev;
@@ -416,8 +418,6 @@ static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws,
rc = eig_to_granularity(cfmws->granularity, &ig);
if (rc)
return rc;
- for (i = 0; i < ways; i++)
- target_map[i] = cfmws->interleave_targets[i];
struct resource *res __free(del_cxl_resource) = alloc_cxl_resource(
cfmws->base_hpa, cfmws->window_size, ctx->id++);
@@ -443,6 +443,8 @@ static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws,
.end = cfmws->base_hpa + cfmws->window_size - 1,
};
cxld->interleave_ways = ways;
+ for (i = 0; i < ways; i++)
+ cxld->target_map[i] = cfmws->interleave_targets[i];
/*
* Minimize the x1 granularity to advertise support for any
* valid region granularity
@@ -472,10 +474,16 @@ static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws,
cxlrd->qos_class = cfmws->qtg_id;
- if (cfmws->interleave_arithmetic == ACPI_CEDT_CFMWS_ARITHMETIC_XOR)
- cxlrd->hpa_to_spa = cxl_xor_hpa_to_spa;
+ if (cfmws->interleave_arithmetic == ACPI_CEDT_CFMWS_ARITHMETIC_XOR) {
+ cxlrd->ops = kzalloc(sizeof(*cxlrd->ops), GFP_KERNEL);
+ if (!cxlrd->ops)
+ return -ENOMEM;
+
+ cxlrd->ops->hpa_to_spa = cxl_apply_xor_maps;
+ cxlrd->ops->spa_to_hpa = cxl_apply_xor_maps;
+ }
- rc = cxl_decoder_add(cxld, target_map);
+ rc = cxl_decoder_add(cxld);
if (rc)
return rc;
diff --git a/drivers/cxl/core/cdat.c b/drivers/cxl/core/cdat.c
index c0af645425f4..c4bd6e8a0cf0 100644
--- a/drivers/cxl/core/cdat.c
+++ b/drivers/cxl/core/cdat.c
@@ -338,7 +338,7 @@ static int match_cxlrd_hb(struct device *dev, void *data)
guard(rwsem_read)(&cxl_rwsem.region);
for (int i = 0; i < cxlsd->nr_targets; i++) {
- if (host_bridge == cxlsd->target[i]->dport_dev)
+ if (cxlsd->target[i] && host_bridge == cxlsd->target[i]->dport_dev)
return 1;
}
@@ -440,8 +440,8 @@ static int cdat_sslbis_handler(union acpi_subtable_headers *header, void *arg,
} *tbl = (struct acpi_cdat_sslbis_table *)header;
int size = sizeof(header->cdat) + sizeof(tbl->sslbis_header);
struct acpi_cdat_sslbis *sslbis;
- struct cxl_port *port = arg;
- struct device *dev = &port->dev;
+ struct cxl_dport *dport = arg;
+ struct device *dev = &dport->port->dev;
int remain, entries, i;
u16 len;
@@ -467,8 +467,6 @@ static int cdat_sslbis_handler(union acpi_subtable_headers *header, void *arg,
u16 y = le16_to_cpu((__force __le16)tbl->entries[i].porty_id);
__le64 le_base;
__le16 le_val;
- struct cxl_dport *dport;
- unsigned long index;
u16 dsp_id;
u64 val;
@@ -499,28 +497,27 @@ static int cdat_sslbis_handler(union acpi_subtable_headers *header, void *arg,
val = cdat_normalize(le16_to_cpu(le_val), le64_to_cpu(le_base),
sslbis->data_type);
- xa_for_each(&port->dports, index, dport) {
- if (dsp_id == ACPI_CDAT_SSLBIS_ANY_PORT ||
- dsp_id == dport->port_id) {
- cxl_access_coordinate_set(dport->coord,
- sslbis->data_type,
- val);
- }
+ if (dsp_id == ACPI_CDAT_SSLBIS_ANY_PORT ||
+ dsp_id == dport->port_id) {
+ cxl_access_coordinate_set(dport->coord,
+ sslbis->data_type, val);
+ return 0;
}
}
return 0;
}
-void cxl_switch_parse_cdat(struct cxl_port *port)
+void cxl_switch_parse_cdat(struct cxl_dport *dport)
{
+ struct cxl_port *port = dport->port;
int rc;
if (!port->cdat.table)
return;
rc = cdat_table_parse(ACPI_CDAT_TYPE_SSLBIS, cdat_sslbis_handler,
- port, port->cdat.table, port->cdat.length);
+ dport, port->cdat.table, port->cdat.length);
rc = cdat_table_parse_output(rc);
if (rc)
dev_dbg(&port->dev, "Failed to parse SSLBIS: %d\n", rc);
@@ -1075,14 +1072,3 @@ void cxl_region_perf_data_calculate(struct cxl_region *cxlr,
cxlr->coord[i].write_bandwidth += perf->coord[i].write_bandwidth;
}
}
-
-int cxl_update_hmat_access_coordinates(int nid, struct cxl_region *cxlr,
- enum access_coordinate_class access)
-{
- return hmat_update_target_coordinates(nid, &cxlr->coord[access], access);
-}
-
-bool cxl_need_node_perf_attrs_update(int nid)
-{
- return !acpi_node_backed_by_real_pxm(nid);
-}
diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h
index 2669f251d677..1fb66132b777 100644
--- a/drivers/cxl/core/core.h
+++ b/drivers/cxl/core/core.h
@@ -135,11 +135,12 @@ enum cxl_poison_trace_type {
CXL_POISON_TRACE_CLEAR,
};
+enum poison_cmd_enabled_bits;
+bool cxl_memdev_has_poison_cmd(struct cxl_memdev *cxlmd,
+ enum poison_cmd_enabled_bits cmd);
+
long cxl_pci_get_latency(struct pci_dev *pdev);
int cxl_pci_get_bandwidth(struct pci_dev *pdev, struct access_coordinate *c);
-int cxl_update_hmat_access_coordinates(int nid, struct cxl_region *cxlr,
- enum access_coordinate_class access);
-bool cxl_need_node_perf_attrs_update(int nid);
int cxl_port_get_switch_dport_bandwidth(struct cxl_port *port,
struct access_coordinate *c);
@@ -147,6 +148,11 @@ int cxl_ras_init(void);
void cxl_ras_exit(void);
int cxl_gpf_port_setup(struct cxl_dport *dport);
+struct cxl_hdm;
+int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,
+ struct cxl_endpoint_dvsec_info *info);
+int cxl_port_get_possible_dports(struct cxl_port *port);
+
#ifdef CONFIG_CXL_FEATURES
struct cxl_feat_entry *
cxl_feature_info(struct cxl_features_state *cxlfs, const uuid_t *uuid);
diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c
index e9e1d555cec6..d3a094ca01ad 100644
--- a/drivers/cxl/core/hdm.c
+++ b/drivers/cxl/core/hdm.c
@@ -21,12 +21,11 @@ struct cxl_rwsem cxl_rwsem = {
.dpa = __RWSEM_INITIALIZER(cxl_rwsem.dpa),
};
-static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
- int *target_map)
+static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld)
{
int rc;
- rc = cxl_decoder_add_locked(cxld, target_map);
+ rc = cxl_decoder_add_locked(cxld);
if (rc) {
put_device(&cxld->dev);
dev_err(&port->dev, "Failed to add decoder\n");
@@ -50,12 +49,9 @@ static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
* are claimed and passed to the single dport. Disable the range until the first
* CXL region is enumerated / activated.
*/
-int devm_cxl_add_passthrough_decoder(struct cxl_port *port)
+static int devm_cxl_add_passthrough_decoder(struct cxl_port *port)
{
struct cxl_switch_decoder *cxlsd;
- struct cxl_dport *dport = NULL;
- int single_port_map[1];
- unsigned long index;
struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
/*
@@ -71,13 +67,8 @@ int devm_cxl_add_passthrough_decoder(struct cxl_port *port)
device_lock_assert(&port->dev);
- xa_for_each(&port->dports, index, dport)
- break;
- single_port_map[0] = dport->port_id;
-
- return add_hdm_decoder(port, &cxlsd->cxld, single_port_map);
+ return add_hdm_decoder(port, &cxlsd->cxld);
}
-EXPORT_SYMBOL_NS_GPL(devm_cxl_add_passthrough_decoder, "CXL");
static void parse_hdm_decoder_caps(struct cxl_hdm *cxlhdm)
{
@@ -147,8 +138,8 @@ static bool should_emulate_decoders(struct cxl_endpoint_dvsec_info *info)
* @port: cxl_port to map
* @info: cached DVSEC range register info
*/
-struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
- struct cxl_endpoint_dvsec_info *info)
+static struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
+ struct cxl_endpoint_dvsec_info *info)
{
struct cxl_register_map *reg_map = &port->reg_map;
struct device *dev = &port->dev;
@@ -197,13 +188,12 @@ struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
*/
if (should_emulate_decoders(info)) {
dev_dbg(dev, "Fallback map %d range register%s\n", info->ranges,
- info->ranges > 1 ? "s" : "");
+ str_plural(info->ranges));
cxlhdm->decoder_count = info->ranges;
}
return cxlhdm;
}
-EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_hdm, "CXL");
static void __cxl_dpa_debug(struct seq_file *file, struct resource *r, int depth)
{
@@ -984,7 +974,7 @@ static int cxl_setup_hdm_decoder_from_dvsec(
}
static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
- int *target_map, void __iomem *hdm, int which,
+ void __iomem *hdm, int which,
u64 *dpa_base, struct cxl_endpoint_dvsec_info *info)
{
struct cxl_endpoint_decoder *cxled = NULL;
@@ -1103,7 +1093,7 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
hi = readl(hdm + CXL_HDM_DECODER0_TL_HIGH(which));
target_list.value = (hi << 32) + lo;
for (i = 0; i < cxld->interleave_ways; i++)
- target_map[i] = target_list.target_id[i];
+ cxld->target_map[i] = target_list.target_id[i];
return 0;
}
@@ -1168,8 +1158,8 @@ static void cxl_settle_decoders(struct cxl_hdm *cxlhdm)
* @cxlhdm: Structure to populate with HDM capabilities
* @info: cached DVSEC range register info
*/
-int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
- struct cxl_endpoint_dvsec_info *info)
+static int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
+ struct cxl_endpoint_dvsec_info *info)
{
void __iomem *hdm = cxlhdm->regs.hdm_decoder;
struct cxl_port *port = cxlhdm->port;
@@ -1179,7 +1169,6 @@ int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
cxl_settle_decoders(cxlhdm);
for (i = 0; i < cxlhdm->decoder_count; i++) {
- int target_map[CXL_DECODER_MAX_INTERLEAVE] = { 0 };
int rc, target_count = cxlhdm->target_count;
struct cxl_decoder *cxld;
@@ -1207,8 +1196,7 @@ int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
cxld = &cxlsd->cxld;
}
- rc = init_hdm_decoder(port, cxld, target_map, hdm, i,
- &dpa_base, info);
+ rc = init_hdm_decoder(port, cxld, hdm, i, &dpa_base, info);
if (rc) {
dev_warn(&port->dev,
"Failed to initialize decoder%d.%d\n",
@@ -1216,7 +1204,7 @@ int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
put_device(&cxld->dev);
return rc;
}
- rc = add_hdm_decoder(port, cxld, target_map);
+ rc = add_hdm_decoder(port, cxld);
if (rc) {
dev_warn(&port->dev,
"Failed to add decoder%d.%d\n", port->id, i);
@@ -1226,4 +1214,71 @@ int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
return 0;
}
-EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_decoders, "CXL");
+
+/**
+ * __devm_cxl_switch_port_decoders_setup - allocate and setup switch decoders
+ * @port: CXL port context
+ *
+ * Return 0 or -errno on error
+ */
+int __devm_cxl_switch_port_decoders_setup(struct cxl_port *port)
+{
+ struct cxl_hdm *cxlhdm;
+
+ if (is_cxl_root(port) || is_cxl_endpoint(port))
+ return -EOPNOTSUPP;
+
+ cxlhdm = devm_cxl_setup_hdm(port, NULL);
+ if (!IS_ERR(cxlhdm))
+ return devm_cxl_enumerate_decoders(cxlhdm, NULL);
+
+ if (PTR_ERR(cxlhdm) != -ENODEV) {
+ dev_err(&port->dev, "Failed to map HDM decoder capability\n");
+ return PTR_ERR(cxlhdm);
+ }
+
+ if (cxl_port_get_possible_dports(port) == 1) {
+ dev_dbg(&port->dev, "Fallback to passthrough decoder\n");
+ return devm_cxl_add_passthrough_decoder(port);
+ }
+
+ dev_err(&port->dev, "HDM decoder capability not found\n");
+ return -ENXIO;
+}
+EXPORT_SYMBOL_NS_GPL(__devm_cxl_switch_port_decoders_setup, "CXL");
+
+/**
+ * devm_cxl_endpoint_decoders_setup - allocate and setup endpoint decoders
+ * @port: CXL port context
+ *
+ * Return 0 or -errno on error
+ */
+int devm_cxl_endpoint_decoders_setup(struct cxl_port *port)
+{
+ struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev);
+ struct cxl_endpoint_dvsec_info info = { .port = port };
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct cxl_hdm *cxlhdm;
+ int rc;
+
+ if (!is_cxl_endpoint(port))
+ return -EOPNOTSUPP;
+
+ rc = cxl_dvsec_rr_decode(cxlds, &info);
+ if (rc < 0)
+ return rc;
+
+ cxlhdm = devm_cxl_setup_hdm(port, &info);
+ if (IS_ERR(cxlhdm)) {
+ if (PTR_ERR(cxlhdm) == -ENODEV)
+ dev_err(&port->dev, "HDM decoder registers not found\n");
+ return PTR_ERR(cxlhdm);
+ }
+
+ rc = cxl_hdm_decode_init(cxlds, cxlhdm, &info);
+ if (rc)
+ return rc;
+
+ return devm_cxl_enumerate_decoders(cxlhdm, &info);
+}
+EXPORT_SYMBOL_NS_GPL(devm_cxl_endpoint_decoders_setup, "CXL");
diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
index c569e00a511f..e370d733e440 100644
--- a/drivers/cxl/core/memdev.c
+++ b/drivers/cxl/core/memdev.c
@@ -200,6 +200,14 @@ static ssize_t security_erase_store(struct device *dev,
static struct device_attribute dev_attr_security_erase =
__ATTR(erase, 0200, NULL, security_erase_store);
+bool cxl_memdev_has_poison_cmd(struct cxl_memdev *cxlmd,
+ enum poison_cmd_enabled_bits cmd)
+{
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+
+ return test_bit(cmd, mds->poison.enabled_cmds);
+}
+
static int cxl_get_poison_by_memdev(struct cxl_memdev *cxlmd)
{
struct cxl_dev_state *cxlds = cxlmd->cxlds;
@@ -276,7 +284,7 @@ static int cxl_validate_poison_dpa(struct cxl_memdev *cxlmd, u64 dpa)
return 0;
}
-int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
+int cxl_inject_poison_locked(struct cxl_memdev *cxlmd, u64 dpa)
{
struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox;
struct cxl_mbox_inject_poison inject;
@@ -288,13 +296,8 @@ int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
if (!IS_ENABLED(CONFIG_DEBUG_FS))
return 0;
- ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region);
- if ((rc = ACQUIRE_ERR(rwsem_read_intr, &region_rwsem)))
- return rc;
-
- ACQUIRE(rwsem_read_intr, dpa_rwsem)(&cxl_rwsem.dpa);
- if ((rc = ACQUIRE_ERR(rwsem_read_intr, &dpa_rwsem)))
- return rc;
+ lockdep_assert_held(&cxl_rwsem.dpa);
+ lockdep_assert_held(&cxl_rwsem.region);
rc = cxl_validate_poison_dpa(cxlmd, dpa);
if (rc)
@@ -324,9 +327,24 @@ int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
return 0;
}
+
+int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
+{
+ int rc;
+
+ ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &region_rwsem)))
+ return rc;
+
+ ACQUIRE(rwsem_read_intr, dpa_rwsem)(&cxl_rwsem.dpa);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &dpa_rwsem)))
+ return rc;
+
+ return cxl_inject_poison_locked(cxlmd, dpa);
+}
EXPORT_SYMBOL_NS_GPL(cxl_inject_poison, "CXL");
-int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
+int cxl_clear_poison_locked(struct cxl_memdev *cxlmd, u64 dpa)
{
struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox;
struct cxl_mbox_clear_poison clear;
@@ -338,13 +356,8 @@ int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
if (!IS_ENABLED(CONFIG_DEBUG_FS))
return 0;
- ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region);
- if ((rc = ACQUIRE_ERR(rwsem_read_intr, &region_rwsem)))
- return rc;
-
- ACQUIRE(rwsem_read_intr, dpa_rwsem)(&cxl_rwsem.dpa);
- if ((rc = ACQUIRE_ERR(rwsem_read_intr, &dpa_rwsem)))
- return rc;
+ lockdep_assert_held(&cxl_rwsem.dpa);
+ lockdep_assert_held(&cxl_rwsem.region);
rc = cxl_validate_poison_dpa(cxlmd, dpa);
if (rc)
@@ -383,6 +396,21 @@ int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
return 0;
}
+
+int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
+{
+ int rc;
+
+ ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &region_rwsem)))
+ return rc;
+
+ ACQUIRE(rwsem_read_intr, dpa_rwsem)(&cxl_rwsem.dpa);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &dpa_rwsem)))
+ return rc;
+
+ return cxl_clear_poison_locked(cxlmd, dpa);
+}
EXPORT_SYMBOL_NS_GPL(cxl_clear_poison, "CXL");
static struct attribute *cxl_memdev_attributes[] = {
diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c
index b50551601c2e..18825e1505d6 100644
--- a/drivers/cxl/core/pci.c
+++ b/drivers/cxl/core/pci.c
@@ -24,6 +24,53 @@ static unsigned short media_ready_timeout = 60;
module_param(media_ready_timeout, ushort, 0644);
MODULE_PARM_DESC(media_ready_timeout, "seconds to wait for media ready");
+static int pci_get_port_num(struct pci_dev *pdev)
+{
+ u32 lnkcap;
+ int type;
+
+ type = pci_pcie_type(pdev);
+ if (type != PCI_EXP_TYPE_DOWNSTREAM && type != PCI_EXP_TYPE_ROOT_PORT)
+ return -EINVAL;
+
+ if (pci_read_config_dword(pdev, pci_pcie_cap(pdev) + PCI_EXP_LNKCAP,
+ &lnkcap))
+ return -ENXIO;
+
+ return FIELD_GET(PCI_EXP_LNKCAP_PN, lnkcap);
+}
+
+/**
+ * __devm_cxl_add_dport_by_dev - allocate a dport by dport device
+ * @port: cxl_port that hosts the dport
+ * @dport_dev: 'struct device' of the dport
+ *
+ * Returns the allocated dport on success or ERR_PTR() of -errno on error
+ */
+struct cxl_dport *__devm_cxl_add_dport_by_dev(struct cxl_port *port,
+ struct device *dport_dev)
+{
+ struct cxl_register_map map;
+ struct pci_dev *pdev;
+ int port_num, rc;
+
+ if (!dev_is_pci(dport_dev))
+ return ERR_PTR(-EINVAL);
+
+ pdev = to_pci_dev(dport_dev);
+ port_num = pci_get_port_num(pdev);
+ if (port_num < 0)
+ return ERR_PTR(port_num);
+
+ rc = cxl_find_regblock(pdev, CXL_REGLOC_RBI_COMPONENT, &map);
+ if (rc)
+ return ERR_PTR(rc);
+
+ device_lock_assert(&port->dev);
+ return devm_cxl_add_dport(port, dport_dev, port_num, map.resource);
+}
+EXPORT_SYMBOL_NS_GPL(__devm_cxl_add_dport_by_dev, "CXL");
+
struct cxl_walk_context {
struct pci_bus *bus;
struct cxl_port *port;
@@ -1169,3 +1216,45 @@ int cxl_gpf_port_setup(struct cxl_dport *dport)
return 0;
}
+
+static int count_dports(struct pci_dev *pdev, void *data)
+{
+ struct cxl_walk_context *ctx = data;
+ int type = pci_pcie_type(pdev);
+
+ if (pdev->bus != ctx->bus)
+ return 0;
+ if (!pci_is_pcie(pdev))
+ return 0;
+ if (type != ctx->type)
+ return 0;
+
+ ctx->count++;
+ return 0;
+}
+
+int cxl_port_get_possible_dports(struct cxl_port *port)
+{
+ struct pci_bus *bus = cxl_port_to_pci_bus(port);
+ struct cxl_walk_context ctx;
+ int type;
+
+ if (!bus) {
+ dev_err(&port->dev, "No PCI bus found for port %s\n",
+ dev_name(&port->dev));
+ return -ENXIO;
+ }
+
+ if (pci_is_root_bus(bus))
+ type = PCI_EXP_TYPE_ROOT_PORT;
+ else
+ type = PCI_EXP_TYPE_DOWNSTREAM;
+
+ ctx = (struct cxl_walk_context) {
+ .bus = bus,
+ .type = type,
+ };
+ pci_walk_bus(bus, count_dports, &ctx);
+
+ return ctx.count;
+}
diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c
index 29197376b18e..d5f71eb1ade8 100644
--- a/drivers/cxl/core/port.c
+++ b/drivers/cxl/core/port.c
@@ -33,6 +33,15 @@
static DEFINE_IDA(cxl_port_ida);
static DEFINE_XARRAY(cxl_root_buses);
+/*
+ * The terminal device in PCI is NULL and @platform_bus
+ * for platform devices (for cxl_test)
+ */
+static bool is_cxl_host_bridge(struct device *dev)
+{
+ return (!dev || dev == &platform_bus);
+}
+
int cxl_num_decoders_committed(struct cxl_port *port)
{
lockdep_assert_held(&cxl_rwsem.region);
@@ -450,6 +459,7 @@ static void cxl_root_decoder_release(struct device *dev)
if (atomic_read(&cxlrd->region_id) >= 0)
memregion_free(atomic_read(&cxlrd->region_id));
__cxl_decoder_release(&cxlrd->cxlsd.cxld);
+ kfree(cxlrd->ops);
kfree(cxlrd);
}
@@ -740,6 +750,7 @@ static struct cxl_port *cxl_port_alloc(struct device *uport_dev,
xa_init(&port->dports);
xa_init(&port->endpoints);
xa_init(&port->regions);
+ port->component_reg_phys = CXL_RESOURCE_NONE;
device_initialize(dev);
lockdep_set_class_and_subclass(&dev->mutex, &cxl_port_key, port->depth);
@@ -858,9 +869,7 @@ static int cxl_port_add(struct cxl_port *port,
if (rc)
return rc;
- rc = cxl_port_setup_regs(port, component_reg_phys);
- if (rc)
- return rc;
+ port->component_reg_phys = component_reg_phys;
} else {
rc = dev_set_name(dev, "root%d", port->id);
if (rc)
@@ -1191,6 +1200,18 @@ __devm_cxl_add_dport(struct cxl_port *port, struct device *dport_dev,
cxl_debugfs_create_dport_dir(dport);
+ /*
+ * Setup port register if this is the first dport showed up. Having
+ * a dport also means that there is at least 1 active link.
+ */
+ if (port->nr_dports == 1 &&
+ port->component_reg_phys != CXL_RESOURCE_NONE) {
+ rc = cxl_port_setup_regs(port, port->component_reg_phys);
+ if (rc)
+ return ERR_PTR(rc);
+ port->component_reg_phys = CXL_RESOURCE_NONE;
+ }
+
return dport;
}
@@ -1348,21 +1369,6 @@ static struct cxl_port *find_cxl_port(struct device *dport_dev,
return port;
}
-static struct cxl_port *find_cxl_port_at(struct cxl_port *parent_port,
- struct device *dport_dev,
- struct cxl_dport **dport)
-{
- struct cxl_find_port_ctx ctx = {
- .dport_dev = dport_dev,
- .parent_port = parent_port,
- .dport = dport,
- };
- struct cxl_port *port;
-
- port = __find_cxl_port(&ctx);
- return port;
-}
-
/*
* All users of grandparent() are using it to walk PCIe-like switch port
* hierarchy. A PCIe switch is comprised of a bridge device representing the
@@ -1423,7 +1429,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_endpoint_autoremove, "CXL");
* through ->remove(). This "bottom-up" removal selectively removes individual
* child ports manually. This depends on devm_cxl_add_port() to not change is
* devm action registration order, and for dports to have already been
- * destroyed by reap_dports().
+ * destroyed by del_dports().
*/
static void delete_switch_port(struct cxl_port *port)
{
@@ -1432,18 +1438,24 @@ static void delete_switch_port(struct cxl_port *port)
devm_release_action(port->dev.parent, unregister_port, port);
}
-static void reap_dports(struct cxl_port *port)
+static void del_dport(struct cxl_dport *dport)
+{
+ struct cxl_port *port = dport->port;
+
+ devm_release_action(&port->dev, cxl_dport_unlink, dport);
+ devm_release_action(&port->dev, cxl_dport_remove, dport);
+ devm_kfree(&port->dev, dport);
+}
+
+static void del_dports(struct cxl_port *port)
{
struct cxl_dport *dport;
unsigned long index;
device_lock_assert(&port->dev);
- xa_for_each(&port->dports, index, dport) {
- devm_release_action(&port->dev, cxl_dport_unlink, dport);
- devm_release_action(&port->dev, cxl_dport_remove, dport);
- devm_kfree(&port->dev, dport);
- }
+ xa_for_each(&port->dports, index, dport)
+ del_dport(dport);
}
struct detach_ctx {
@@ -1501,7 +1513,7 @@ static void cxl_detach_ep(void *data)
*/
died = true;
port->dead = true;
- reap_dports(port);
+ del_dports(port);
}
device_unlock(&port->dev);
@@ -1532,16 +1544,157 @@ static resource_size_t find_component_registers(struct device *dev)
return map.resource;
}
+static int match_port_by_uport(struct device *dev, const void *data)
+{
+ const struct device *uport_dev = data;
+ struct cxl_port *port;
+
+ if (!is_cxl_port(dev))
+ return 0;
+
+ port = to_cxl_port(dev);
+ return uport_dev == port->uport_dev;
+}
+
+/*
+ * Function takes a device reference on the port device. Caller should do a
+ * put_device() when done.
+ */
+static struct cxl_port *find_cxl_port_by_uport(struct device *uport_dev)
+{
+ struct device *dev;
+
+ dev = bus_find_device(&cxl_bus_type, NULL, uport_dev, match_port_by_uport);
+ if (dev)
+ return to_cxl_port(dev);
+ return NULL;
+}
+
+static int update_decoder_targets(struct device *dev, void *data)
+{
+ struct cxl_dport *dport = data;
+ struct cxl_switch_decoder *cxlsd;
+ struct cxl_decoder *cxld;
+ int i;
+
+ if (!is_switch_decoder(dev))
+ return 0;
+
+ cxlsd = to_cxl_switch_decoder(dev);
+ cxld = &cxlsd->cxld;
+ guard(rwsem_write)(&cxl_rwsem.region);
+
+ for (i = 0; i < cxld->interleave_ways; i++) {
+ if (cxld->target_map[i] == dport->port_id) {
+ cxlsd->target[i] = dport;
+ dev_dbg(dev, "dport%d found in target list, index %d\n",
+ dport->port_id, i);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+DEFINE_FREE(del_cxl_dport, struct cxl_dport *, if (!IS_ERR_OR_NULL(_T)) del_dport(_T))
+static struct cxl_dport *cxl_port_add_dport(struct cxl_port *port,
+ struct device *dport_dev)
+{
+ struct cxl_dport *dport;
+ int rc;
+
+ device_lock_assert(&port->dev);
+ if (!port->dev.driver)
+ return ERR_PTR(-ENXIO);
+
+ dport = cxl_find_dport_by_dev(port, dport_dev);
+ if (dport) {
+ dev_dbg(&port->dev, "dport%d:%s already exists\n",
+ dport->port_id, dev_name(dport_dev));
+ return ERR_PTR(-EBUSY);
+ }
+
+ struct cxl_dport *new_dport __free(del_cxl_dport) =
+ devm_cxl_add_dport_by_dev(port, dport_dev);
+ if (IS_ERR(new_dport))
+ return new_dport;
+
+ cxl_switch_parse_cdat(new_dport);
+
+ if (ida_is_empty(&port->decoder_ida)) {
+ rc = devm_cxl_switch_port_decoders_setup(port);
+ if (rc)
+ return ERR_PTR(rc);
+ dev_dbg(&port->dev, "first dport%d:%s added with decoders\n",
+ new_dport->port_id, dev_name(dport_dev));
+ return no_free_ptr(new_dport);
+ }
+
+ /* New dport added, update the decoder targets */
+ device_for_each_child(&port->dev, new_dport, update_decoder_targets);
+
+ dev_dbg(&port->dev, "dport%d:%s added\n", new_dport->port_id,
+ dev_name(dport_dev));
+
+ return no_free_ptr(new_dport);
+}
+
+static struct cxl_dport *devm_cxl_create_port(struct device *ep_dev,
+ struct cxl_port *parent_port,
+ struct cxl_dport *parent_dport,
+ struct device *uport_dev,
+ struct device *dport_dev)
+{
+ resource_size_t component_reg_phys;
+
+ device_lock_assert(&parent_port->dev);
+ if (!parent_port->dev.driver) {
+ dev_warn(ep_dev,
+ "port %s:%s:%s disabled, failed to enumerate CXL.mem\n",
+ dev_name(&parent_port->dev), dev_name(uport_dev),
+ dev_name(dport_dev));
+ }
+
+ struct cxl_port *port __free(put_cxl_port) =
+ find_cxl_port_by_uport(uport_dev);
+ if (!port) {
+ component_reg_phys = find_component_registers(uport_dev);
+ port = devm_cxl_add_port(&parent_port->dev, uport_dev,
+ component_reg_phys, parent_dport);
+ if (IS_ERR(port))
+ return ERR_CAST(port);
+
+ /*
+ * retry to make sure a port is found. a port device
+ * reference is taken.
+ */
+ port = find_cxl_port_by_uport(uport_dev);
+ if (!port)
+ return ERR_PTR(-ENODEV);
+
+ dev_dbg(ep_dev, "created port %s:%s\n",
+ dev_name(&port->dev), dev_name(port->uport_dev));
+ } else {
+ /*
+ * Port was created before right before this function is
+ * called. Signal the caller to deal with it.
+ */
+ return ERR_PTR(-EAGAIN);
+ }
+
+ guard(device)(&port->dev);
+ return cxl_port_add_dport(port, dport_dev);
+}
+
static int add_port_attach_ep(struct cxl_memdev *cxlmd,
struct device *uport_dev,
struct device *dport_dev)
{
struct device *dparent = grandparent(dport_dev);
struct cxl_dport *dport, *parent_dport;
- resource_size_t component_reg_phys;
int rc;
- if (!dparent) {
+ if (is_cxl_host_bridge(dparent)) {
/*
* The iteration reached the topology root without finding the
* CXL-root 'cxl_port' on a previous iteration, fail for now to
@@ -1553,42 +1706,31 @@ static int add_port_attach_ep(struct cxl_memdev *cxlmd,
}
struct cxl_port *parent_port __free(put_cxl_port) =
- find_cxl_port(dparent, &parent_dport);
+ find_cxl_port_by_uport(dparent->parent);
if (!parent_port) {
/* iterate to create this parent_port */
return -EAGAIN;
}
- /*
- * Definition with __free() here to keep the sequence of
- * dereferencing the device of the port before the parent_port releasing.
- */
- struct cxl_port *port __free(put_cxl_port) = NULL;
scoped_guard(device, &parent_port->dev) {
- if (!parent_port->dev.driver) {
- dev_warn(&cxlmd->dev,
- "port %s:%s disabled, failed to enumerate CXL.mem\n",
- dev_name(&parent_port->dev), dev_name(uport_dev));
- return -ENXIO;
+ parent_dport = cxl_find_dport_by_dev(parent_port, dparent);
+ if (!parent_dport) {
+ parent_dport = cxl_port_add_dport(parent_port, dparent);
+ if (IS_ERR(parent_dport))
+ return PTR_ERR(parent_dport);
}
- port = find_cxl_port_at(parent_port, dport_dev, &dport);
- if (!port) {
- component_reg_phys = find_component_registers(uport_dev);
- port = devm_cxl_add_port(&parent_port->dev, uport_dev,
- component_reg_phys, parent_dport);
- if (IS_ERR(port))
- return PTR_ERR(port);
-
- /* retry find to pick up the new dport information */
- port = find_cxl_port_at(parent_port, dport_dev, &dport);
- if (!port)
- return -ENXIO;
+ dport = devm_cxl_create_port(&cxlmd->dev, parent_port,
+ parent_dport, uport_dev,
+ dport_dev);
+ if (IS_ERR(dport)) {
+ /* Port already exists, restart iteration */
+ if (PTR_ERR(dport) == -EAGAIN)
+ return 0;
+ return PTR_ERR(dport);
}
}
- dev_dbg(&cxlmd->dev, "add to new port %s:%s\n",
- dev_name(&port->dev), dev_name(port->uport_dev));
rc = cxl_add_ep(dport, &cxlmd->dev);
if (rc == -EBUSY) {
/*
@@ -1601,6 +1743,25 @@ static int add_port_attach_ep(struct cxl_memdev *cxlmd,
return rc;
}
+static struct cxl_dport *find_or_add_dport(struct cxl_port *port,
+ struct device *dport_dev)
+{
+ struct cxl_dport *dport;
+
+ device_lock_assert(&port->dev);
+ dport = cxl_find_dport_by_dev(port, dport_dev);
+ if (!dport) {
+ dport = cxl_port_add_dport(port, dport_dev);
+ if (IS_ERR(dport))
+ return dport;
+
+ /* New dport added, restart iteration */
+ return ERR_PTR(-EAGAIN);
+ }
+
+ return dport;
+}
+
int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd)
{
struct device *dev = &cxlmd->dev;
@@ -1629,11 +1790,7 @@ retry:
struct device *uport_dev;
struct cxl_dport *dport;
- /*
- * The terminal "grandparent" in PCI is NULL and @platform_bus
- * for platform devices
- */
- if (!dport_dev || dport_dev == &platform_bus)
+ if (is_cxl_host_bridge(dport_dev))
return 0;
uport_dev = dport_dev->parent;
@@ -1647,12 +1804,26 @@ retry:
dev_name(iter), dev_name(dport_dev),
dev_name(uport_dev));
struct cxl_port *port __free(put_cxl_port) =
- find_cxl_port(dport_dev, &dport);
+ find_cxl_port_by_uport(uport_dev);
if (port) {
dev_dbg(&cxlmd->dev,
"found already registered port %s:%s\n",
dev_name(&port->dev),
dev_name(port->uport_dev));
+
+ /*
+ * RP port enumerated by cxl_acpi without dport will
+ * have the dport added here.
+ */
+ scoped_guard(device, &port->dev) {
+ dport = find_or_add_dport(port, dport_dev);
+ if (IS_ERR(dport)) {
+ if (PTR_ERR(dport) == -EAGAIN)
+ goto retry;
+ return PTR_ERR(dport);
+ }
+ }
+
rc = cxl_add_ep(dport, &cxlmd->dev);
/*
@@ -1704,24 +1875,24 @@ struct cxl_port *cxl_mem_find_port(struct cxl_memdev *cxlmd,
EXPORT_SYMBOL_NS_GPL(cxl_mem_find_port, "CXL");
static int decoder_populate_targets(struct cxl_switch_decoder *cxlsd,
- struct cxl_port *port, int *target_map)
+ struct cxl_port *port)
{
+ struct cxl_decoder *cxld = &cxlsd->cxld;
int i;
- if (!target_map)
- return 0;
-
device_lock_assert(&port->dev);
if (xa_empty(&port->dports))
- return -EINVAL;
+ return 0;
guard(rwsem_write)(&cxl_rwsem.region);
for (i = 0; i < cxlsd->cxld.interleave_ways; i++) {
- struct cxl_dport *dport = find_dport(port, target_map[i]);
+ struct cxl_dport *dport = find_dport(port, cxld->target_map[i]);
- if (!dport)
- return -ENXIO;
+ if (!dport) {
+ /* dport may be activated later */
+ continue;
+ }
cxlsd->target[i] = dport;
}
@@ -1910,9 +2081,6 @@ EXPORT_SYMBOL_NS_GPL(cxl_endpoint_decoder_alloc, "CXL");
/**
* cxl_decoder_add_locked - Add a decoder with targets
* @cxld: The cxl decoder allocated by cxl_<type>_decoder_alloc()
- * @target_map: A list of downstream ports that this decoder can direct memory
- * traffic to. These numbers should correspond with the port number
- * in the PCIe Link Capabilities structure.
*
* Certain types of decoders may not have any targets. The main example of this
* is an endpoint device. A more awkward example is a hostbridge whose root
@@ -1926,7 +2094,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_endpoint_decoder_alloc, "CXL");
* Return: Negative error code if the decoder wasn't properly configured; else
* returns 0.
*/
-int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map)
+int cxl_decoder_add_locked(struct cxl_decoder *cxld)
{
struct cxl_port *port;
struct device *dev;
@@ -1947,7 +2115,7 @@ int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map)
if (!is_endpoint_decoder(dev)) {
struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev);
- rc = decoder_populate_targets(cxlsd, port, target_map);
+ rc = decoder_populate_targets(cxlsd, port);
if (rc && (cxld->flags & CXL_DECODER_F_ENABLE)) {
dev_err(&port->dev,
"Failed to populate active decoder targets\n");
@@ -1966,9 +2134,6 @@ EXPORT_SYMBOL_NS_GPL(cxl_decoder_add_locked, "CXL");
/**
* cxl_decoder_add - Add a decoder with targets
* @cxld: The cxl decoder allocated by cxl_<type>_decoder_alloc()
- * @target_map: A list of downstream ports that this decoder can direct memory
- * traffic to. These numbers should correspond with the port number
- * in the PCIe Link Capabilities structure.
*
* This is the unlocked variant of cxl_decoder_add_locked().
* See cxl_decoder_add_locked().
@@ -1976,7 +2141,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_decoder_add_locked, "CXL");
* Context: Process context. Takes and releases the device lock of the port that
* owns the @cxld.
*/
-int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map)
+int cxl_decoder_add(struct cxl_decoder *cxld)
{
struct cxl_port *port;
@@ -1989,7 +2154,7 @@ int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map)
port = to_cxl_port(cxld->dev.parent);
guard(device)(&port->dev);
- return cxl_decoder_add_locked(cxld, target_map);
+ return cxl_decoder_add_locked(cxld);
}
EXPORT_SYMBOL_NS_GPL(cxl_decoder_add, "CXL");
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index 71cc42d05248..e14c1d305b22 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -2,6 +2,7 @@
/* Copyright(c) 2022 Intel Corporation. All rights reserved. */
#include <linux/memregion.h>
#include <linux/genalloc.h>
+#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/memory.h>
@@ -10,6 +11,7 @@
#include <linux/sort.h>
#include <linux/idr.h>
#include <linux/memory-tiers.h>
+#include <linux/string_choices.h>
#include <cxlmem.h>
#include <cxl.h>
#include "core.h"
@@ -30,6 +32,12 @@
* 3. Decoder targets
*/
+/*
+ * nodemask that sets per node when the access_coordinates for the node has
+ * been updated by the CXL memory hotplug notifier.
+ */
+static nodemask_t nodemask_region_seen = NODE_MASK_NONE;
+
static struct cxl_region *to_cxl_region(struct device *dev);
#define __ACCESS_ATTR_RO(_level, _name) { \
@@ -1468,9 +1476,7 @@ static int cxl_port_setup_targets(struct cxl_port *port,
dev_name(port->uport_dev), dev_name(&port->dev),
__func__, cxld->interleave_ways,
cxld->interleave_granularity,
- (cxld->flags & CXL_DECODER_F_ENABLE) ?
- "enabled" :
- "disabled",
+ str_enabled_disabled(cxld->flags & CXL_DECODER_F_ENABLE),
cxld->hpa_range.start, cxld->hpa_range.end);
return -ENXIO;
}
@@ -1510,8 +1516,10 @@ add_target:
cxl_rr->nr_targets_set);
return -ENXIO;
}
- } else
+ } else {
cxlsd->target[cxl_rr->nr_targets_set] = ep->dport;
+ cxlsd->cxld.target_map[cxl_rr->nr_targets_set] = ep->dport->port_id;
+ }
inc = 1;
out_target_set:
cxl_rr->nr_targets_set += inc;
@@ -2442,14 +2450,8 @@ static bool cxl_region_update_coordinates(struct cxl_region *cxlr, int nid)
for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
if (cxlr->coord[i].read_bandwidth) {
- rc = 0;
- if (cxl_need_node_perf_attrs_update(nid))
- node_set_perf_attrs(nid, &cxlr->coord[i], i);
- else
- rc = cxl_update_hmat_access_coordinates(nid, cxlr, i);
-
- if (rc == 0)
- cset++;
+ node_update_perf_attrs(nid, &cxlr->coord[i], i);
+ cset++;
}
}
@@ -2487,6 +2489,10 @@ static int cxl_region_perf_attrs_callback(struct notifier_block *nb,
if (nid != region_nid)
return NOTIFY_DONE;
+ /* No action needed if node bit already set */
+ if (node_test_and_set(nid, nodemask_region_seen))
+ return NOTIFY_DONE;
+
if (!cxl_region_update_coordinates(cxlr, nid))
return NOTIFY_DONE;
@@ -2918,6 +2924,16 @@ static bool cxl_is_hpa_in_chunk(u64 hpa, struct cxl_region *cxlr, int pos)
return false;
}
+static bool has_hpa_to_spa(struct cxl_root_decoder *cxlrd)
+{
+ return cxlrd->ops && cxlrd->ops->hpa_to_spa;
+}
+
+static bool has_spa_to_hpa(struct cxl_root_decoder *cxlrd)
+{
+ return cxlrd->ops && cxlrd->ops->spa_to_hpa;
+}
+
u64 cxl_dpa_to_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd,
u64 dpa)
{
@@ -2972,8 +2988,8 @@ u64 cxl_dpa_to_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd,
hpa = hpa_offset + p->res->start + p->cache_size;
/* Root decoder translation overrides typical modulo decode */
- if (cxlrd->hpa_to_spa)
- hpa = cxlrd->hpa_to_spa(cxlrd, hpa);
+ if (has_hpa_to_spa(cxlrd))
+ hpa = cxlrd->ops->hpa_to_spa(cxlrd, hpa);
if (!cxl_resource_contains_addr(p->res, hpa)) {
dev_dbg(&cxlr->dev,
@@ -2982,12 +2998,107 @@ u64 cxl_dpa_to_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd,
}
/* Simple chunk check, by pos & gran, only applies to modulo decodes */
- if (!cxlrd->hpa_to_spa && (!cxl_is_hpa_in_chunk(hpa, cxlr, pos)))
+ if (!has_hpa_to_spa(cxlrd) && (!cxl_is_hpa_in_chunk(hpa, cxlr, pos)))
return ULLONG_MAX;
return hpa;
}
+struct dpa_result {
+ struct cxl_memdev *cxlmd;
+ u64 dpa;
+};
+
+static int region_offset_to_dpa_result(struct cxl_region *cxlr, u64 offset,
+ struct dpa_result *result)
+{
+ struct cxl_region_params *p = &cxlr->params;
+ struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
+ struct cxl_endpoint_decoder *cxled;
+ u64 hpa, hpa_offset, dpa_offset;
+ u64 bits_upper, bits_lower;
+ u64 shifted, rem, temp;
+ u16 eig = 0;
+ u8 eiw = 0;
+ int pos;
+
+ lockdep_assert_held(&cxl_rwsem.region);
+ lockdep_assert_held(&cxl_rwsem.dpa);
+
+ /* Input validation ensures valid ways and gran */
+ granularity_to_eig(p->interleave_granularity, &eig);
+ ways_to_eiw(p->interleave_ways, &eiw);
+
+ /*
+ * If the root decoder has SPA to CXL HPA callback, use it. Otherwise
+ * CXL HPA is assumed to equal SPA.
+ */
+ if (has_spa_to_hpa(cxlrd)) {
+ hpa = cxlrd->ops->spa_to_hpa(cxlrd, p->res->start + offset);
+ hpa_offset = hpa - p->res->start;
+ } else {
+ hpa_offset = offset;
+ }
+ /*
+ * Interleave position: CXL Spec 3.2 Section 8.2.4.20.13
+ * eiw < 8
+ * Position is in the IW bits at HPA_OFFSET[IG+8+IW-1:IG+8].
+ * Per spec "remove IW bits starting with bit position IG+8"
+ * eiw >= 8
+ * Position is not explicitly stored in HPA_OFFSET bits. It is
+ * derived from the modulo operation of the upper bits using
+ * the total number of interleave ways.
+ */
+ if (eiw < 8) {
+ pos = (hpa_offset >> (eig + 8)) & GENMASK(eiw - 1, 0);
+ } else {
+ shifted = hpa_offset >> (eig + 8);
+ div64_u64_rem(shifted, p->interleave_ways, &rem);
+ pos = rem;
+ }
+ if (pos < 0 || pos >= p->nr_targets) {
+ dev_dbg(&cxlr->dev, "Invalid position %d for %d targets\n",
+ pos, p->nr_targets);
+ return -ENXIO;
+ }
+
+ /*
+ * DPA offset: CXL Spec 3.2 Section 8.2.4.20.13
+ * Lower bits [IG+7:0] pass through unchanged
+ * (eiw < 8)
+ * Per spec: DPAOffset[51:IG+8] = (HPAOffset[51:IG+IW+8] >> IW)
+ * Clear the position bits to isolate upper section, then
+ * reverse the left shift by eiw that occurred during DPA->HPA
+ * (eiw >= 8)
+ * Per spec: DPAOffset[51:IG+8] = HPAOffset[51:IG+IW] / 3
+ * Extract upper bits from the correct bit range and divide by 3
+ * to recover the original DPA upper bits
+ */
+ bits_lower = hpa_offset & GENMASK_ULL(eig + 7, 0);
+ if (eiw < 8) {
+ temp = hpa_offset &= ~((u64)GENMASK(eig + eiw + 8 - 1, 0));
+ dpa_offset = temp >> eiw;
+ } else {
+ bits_upper = div64_u64(hpa_offset >> (eig + eiw), 3);
+ dpa_offset = bits_upper << (eig + 8);
+ }
+ dpa_offset |= bits_lower;
+
+ /* Look-up and return the result: a memdev and a DPA */
+ for (int i = 0; i < p->nr_targets; i++) {
+ cxled = p->targets[i];
+ if (cxled->pos != pos)
+ continue;
+ result->cxlmd = cxled_to_memdev(cxled);
+ result->dpa = cxl_dpa_resource_start(cxled) + dpa_offset;
+
+ return 0;
+ }
+ dev_err(&cxlr->dev, "No device found for position %d\n", pos);
+
+ return -ENXIO;
+}
+
static struct lock_class_key cxl_pmem_region_key;
static int cxl_pmem_region_alloc(struct cxl_region *cxlr)
@@ -3542,6 +3653,105 @@ static void shutdown_notifiers(void *_cxlr)
unregister_mt_adistance_algorithm(&cxlr->adist_notifier);
}
+static void remove_debugfs(void *dentry)
+{
+ debugfs_remove_recursive(dentry);
+}
+
+static int validate_region_offset(struct cxl_region *cxlr, u64 offset)
+{
+ struct cxl_region_params *p = &cxlr->params;
+ resource_size_t region_size;
+ u64 hpa;
+
+ if (offset < p->cache_size) {
+ dev_err(&cxlr->dev,
+ "Offset %#llx is within extended linear cache %pr\n",
+ offset, &p->cache_size);
+ return -EINVAL;
+ }
+
+ region_size = resource_size(p->res);
+ if (offset >= region_size) {
+ dev_err(&cxlr->dev, "Offset %#llx exceeds region size %pr\n",
+ offset, &region_size);
+ return -EINVAL;
+ }
+
+ hpa = p->res->start + offset;
+ if (hpa < p->res->start || hpa > p->res->end) {
+ dev_err(&cxlr->dev, "HPA %#llx not in region %pr\n", hpa,
+ p->res);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cxl_region_debugfs_poison_inject(void *data, u64 offset)
+{
+ struct dpa_result result = { .dpa = ULLONG_MAX, .cxlmd = NULL };
+ struct cxl_region *cxlr = data;
+ int rc;
+
+ ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &region_rwsem)))
+ return rc;
+
+ ACQUIRE(rwsem_read_intr, dpa_rwsem)(&cxl_rwsem.dpa);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &dpa_rwsem)))
+ return rc;
+
+ if (validate_region_offset(cxlr, offset))
+ return -EINVAL;
+
+ rc = region_offset_to_dpa_result(cxlr, offset, &result);
+ if (rc || !result.cxlmd || result.dpa == ULLONG_MAX) {
+ dev_dbg(&cxlr->dev,
+ "Failed to resolve DPA for region offset %#llx rc %d\n",
+ offset, rc);
+
+ return rc ? rc : -EINVAL;
+ }
+
+ return cxl_inject_poison_locked(result.cxlmd, result.dpa);
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(cxl_poison_inject_fops, NULL,
+ cxl_region_debugfs_poison_inject, "%llx\n");
+
+static int cxl_region_debugfs_poison_clear(void *data, u64 offset)
+{
+ struct dpa_result result = { .dpa = ULLONG_MAX, .cxlmd = NULL };
+ struct cxl_region *cxlr = data;
+ int rc;
+
+ ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &region_rwsem)))
+ return rc;
+
+ ACQUIRE(rwsem_read_intr, dpa_rwsem)(&cxl_rwsem.dpa);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &dpa_rwsem)))
+ return rc;
+
+ if (validate_region_offset(cxlr, offset))
+ return -EINVAL;
+
+ rc = region_offset_to_dpa_result(cxlr, offset, &result);
+ if (rc || !result.cxlmd || result.dpa == ULLONG_MAX) {
+ dev_dbg(&cxlr->dev,
+ "Failed to resolve DPA for region offset %#llx rc %d\n",
+ offset, rc);
+
+ return rc ? rc : -EINVAL;
+ }
+
+ return cxl_clear_poison_locked(result.cxlmd, result.dpa);
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(cxl_poison_clear_fops, NULL,
+ cxl_region_debugfs_poison_clear, "%llx\n");
+
static int cxl_region_can_probe(struct cxl_region *cxlr)
{
struct cxl_region_params *p = &cxlr->params;
@@ -3571,6 +3781,7 @@ static int cxl_region_probe(struct device *dev)
{
struct cxl_region *cxlr = to_cxl_region(dev);
struct cxl_region_params *p = &cxlr->params;
+ bool poison_supported = true;
int rc;
rc = cxl_region_can_probe(cxlr);
@@ -3594,6 +3805,31 @@ static int cxl_region_probe(struct device *dev)
if (rc)
return rc;
+ /* Create poison attributes if all memdevs support the capabilities */
+ for (int i = 0; i < p->nr_targets; i++) {
+ struct cxl_endpoint_decoder *cxled = p->targets[i];
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+
+ if (!cxl_memdev_has_poison_cmd(cxlmd, CXL_POISON_ENABLED_INJECT) ||
+ !cxl_memdev_has_poison_cmd(cxlmd, CXL_POISON_ENABLED_CLEAR)) {
+ poison_supported = false;
+ break;
+ }
+ }
+
+ if (poison_supported) {
+ struct dentry *dentry;
+
+ dentry = cxl_debugfs_create_dir(dev_name(dev));
+ debugfs_create_file("inject_poison", 0200, dentry, cxlr,
+ &cxl_poison_inject_fops);
+ debugfs_create_file("clear_poison", 0200, dentry, cxlr,
+ &cxl_poison_clear_fops);
+ rc = devm_add_action_or_reset(dev, remove_debugfs, dentry);
+ if (rc)
+ return rc;
+ }
+
switch (cxlr->mode) {
case CXL_PARTMODE_PMEM:
rc = devm_cxl_region_edac_register(cxlr);
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index 847e37be42c4..231ddccf8977 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -357,6 +357,9 @@ enum cxl_decoder_type {
* @target_type: accelerator vs expander (type2 vs type3) selector
* @region: currently assigned region for this decoder
* @flags: memory type capabilities and locking
+ * @target_map: cached copy of hardware port-id list, available at init
+ * before all @dport objects have been instantiated. While
+ * dport id is 8bit, CFMWS interleave targets are 32bits.
* @commit: device/decoder-type specific callback to commit settings to hw
* @reset: device/decoder-type specific callback to reset hw settings
*/
@@ -369,6 +372,7 @@ struct cxl_decoder {
enum cxl_decoder_type target_type;
struct cxl_region *region;
unsigned long flags;
+ u32 target_map[CXL_DECODER_MAX_INTERLEAVE];
int (*commit)(struct cxl_decoder *cxld);
void (*reset)(struct cxl_decoder *cxld);
};
@@ -419,27 +423,35 @@ struct cxl_switch_decoder {
};
struct cxl_root_decoder;
-typedef u64 (*cxl_hpa_to_spa_fn)(struct cxl_root_decoder *cxlrd, u64 hpa);
+/**
+ * struct cxl_rd_ops - CXL root decoder callback operations
+ * @hpa_to_spa: Convert host physical address to system physical address
+ * @spa_to_hpa: Convert system physical address to host physical address
+ */
+struct cxl_rd_ops {
+ u64 (*hpa_to_spa)(struct cxl_root_decoder *cxlrd, u64 hpa);
+ u64 (*spa_to_hpa)(struct cxl_root_decoder *cxlrd, u64 spa);
+};
/**
* struct cxl_root_decoder - Static platform CXL address decoder
* @res: host / parent resource for region allocations
* @cache_size: extended linear cache size if exists, otherwise zero.
* @region_id: region id for next region provisioning event
- * @hpa_to_spa: translate CXL host-physical-address to Platform system-physical-address
* @platform_data: platform specific configuration data
* @range_lock: sync region autodiscovery by address range
* @qos_class: QoS performance class cookie
+ * @ops: CXL root decoder operations
* @cxlsd: base cxl switch decoder
*/
struct cxl_root_decoder {
struct resource *res;
resource_size_t cache_size;
atomic_t region_id;
- cxl_hpa_to_spa_fn hpa_to_spa;
void *platform_data;
struct mutex range_lock;
int qos_class;
+ struct cxl_rd_ops *ops;
struct cxl_switch_decoder cxlsd;
};
@@ -595,6 +607,7 @@ struct cxl_dax_region {
* @cdat: Cached CDAT data
* @cdat_available: Should a CDAT attribute be available in sysfs
* @pci_latency: Upstream latency in picoseconds
+ * @component_reg_phys: Physical address of component register
*/
struct cxl_port {
struct device dev;
@@ -618,6 +631,7 @@ struct cxl_port {
} cdat;
bool cdat_available;
long pci_latency;
+ resource_size_t component_reg_phys;
};
/**
@@ -781,9 +795,9 @@ struct cxl_root_decoder *cxl_root_decoder_alloc(struct cxl_port *port,
unsigned int nr_targets);
struct cxl_switch_decoder *cxl_switch_decoder_alloc(struct cxl_port *port,
unsigned int nr_targets);
-int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map);
+int cxl_decoder_add(struct cxl_decoder *cxld);
struct cxl_endpoint_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port);
-int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map);
+int cxl_decoder_add_locked(struct cxl_decoder *cxld);
int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld);
static inline int cxl_root_decoder_autoremove(struct device *host,
struct cxl_root_decoder *cxlrd)
@@ -806,12 +820,10 @@ struct cxl_endpoint_dvsec_info {
struct range dvsec_range[2];
};
-struct cxl_hdm;
-struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
- struct cxl_endpoint_dvsec_info *info);
-int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
- struct cxl_endpoint_dvsec_info *info);
-int devm_cxl_add_passthrough_decoder(struct cxl_port *port);
+int devm_cxl_switch_port_decoders_setup(struct cxl_port *port);
+int __devm_cxl_switch_port_decoders_setup(struct cxl_port *port);
+int devm_cxl_endpoint_decoders_setup(struct cxl_port *port);
+
struct cxl_dev_state;
int cxl_dvsec_rr_decode(struct cxl_dev_state *cxlds,
struct cxl_endpoint_dvsec_info *info);
@@ -890,7 +902,7 @@ static inline u64 cxl_port_get_spa_cache_alias(struct cxl_port *endpoint,
#endif
void cxl_endpoint_parse_cdat(struct cxl_port *port);
-void cxl_switch_parse_cdat(struct cxl_port *port);
+void cxl_switch_parse_cdat(struct cxl_dport *dport);
int cxl_endpoint_get_perf_coordinates(struct cxl_port *port,
struct access_coordinate *coord);
@@ -905,6 +917,10 @@ void cxl_coordinates_combine(struct access_coordinate *out,
struct access_coordinate *c2);
bool cxl_endpoint_decoder_reset_detected(struct cxl_port *port);
+struct cxl_dport *devm_cxl_add_dport_by_dev(struct cxl_port *port,
+ struct device *dport_dev);
+struct cxl_dport *__devm_cxl_add_dport_by_dev(struct cxl_port *port,
+ struct device *dport_dev);
/*
* Unit test builds overrides this to __weak, find the 'strong' version
@@ -915,4 +931,21 @@ bool cxl_endpoint_decoder_reset_detected(struct cxl_port *port);
#endif
u16 cxl_gpf_get_dvsec(struct device *dev);
+
+/*
+ * Declaration for functions that are mocked by cxl_test that are called by
+ * cxl_core. The respective functions are defined as __foo() and called by
+ * cxl_core as foo(). The macros below ensures that those functions would
+ * exist as foo(). See tools/testing/cxl/cxl_core_exports.c and
+ * tools/testing/cxl/exports.h for setting up the mock functions. The dance
+ * is done to avoid a circular dependency where cxl_core calls a function that
+ * ends up being a mock function and goes to * cxl_test where it calls a
+ * cxl_core function.
+ */
+#ifndef CXL_TEST_ENABLE
+#define DECLARE_TESTABLE(x) __##x
+#define devm_cxl_add_dport_by_dev DECLARE_TESTABLE(devm_cxl_add_dport_by_dev)
+#define devm_cxl_switch_port_decoders_setup DECLARE_TESTABLE(devm_cxl_switch_port_decoders_setup)
+#endif
+
#endif /* __CXL_H__ */
diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
index 751478dfc410..434031a0c1f7 100644
--- a/drivers/cxl/cxlmem.h
+++ b/drivers/cxl/cxlmem.h
@@ -869,6 +869,8 @@ int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
int cxl_trigger_poison_list(struct cxl_memdev *cxlmd);
int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa);
int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa);
+int cxl_inject_poison_locked(struct cxl_memdev *cxlmd, u64 dpa);
+int cxl_clear_poison_locked(struct cxl_memdev *cxlmd, u64 dpa);
#ifdef CONFIG_CXL_EDAC_MEM_FEATURES
int devm_cxl_memdev_edac_register(struct cxl_memdev *cxlmd);
diff --git a/drivers/cxl/cxlpci.h b/drivers/cxl/cxlpci.h
index 54e219b0049e..7ae621e618e7 100644
--- a/drivers/cxl/cxlpci.h
+++ b/drivers/cxl/cxlpci.h
@@ -129,8 +129,6 @@ static inline bool cxl_pci_flit_256(struct pci_dev *pdev)
int devm_cxl_port_enumerate_dports(struct cxl_port *port);
struct cxl_dev_state;
-int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,
- struct cxl_endpoint_dvsec_info *info);
void read_cdat_data(struct cxl_port *port);
void cxl_cor_error_detected(struct pci_dev *pdev);
pci_ers_result_t cxl_error_detected(struct pci_dev *pdev,
diff --git a/drivers/cxl/port.c b/drivers/cxl/port.c
index cf32dc50b7a6..51c8f2f84717 100644
--- a/drivers/cxl/port.c
+++ b/drivers/cxl/port.c
@@ -59,55 +59,20 @@ static int discover_region(struct device *dev, void *unused)
static int cxl_switch_port_probe(struct cxl_port *port)
{
- struct cxl_hdm *cxlhdm;
- int rc;
+ /* Reset nr_dports for rebind of driver */
+ port->nr_dports = 0;
/* Cache the data early to ensure is_visible() works */
read_cdat_data(port);
- rc = devm_cxl_port_enumerate_dports(port);
- if (rc < 0)
- return rc;
-
- cxl_switch_parse_cdat(port);
-
- cxlhdm = devm_cxl_setup_hdm(port, NULL);
- if (!IS_ERR(cxlhdm))
- return devm_cxl_enumerate_decoders(cxlhdm, NULL);
-
- if (PTR_ERR(cxlhdm) != -ENODEV) {
- dev_err(&port->dev, "Failed to map HDM decoder capability\n");
- return PTR_ERR(cxlhdm);
- }
-
- if (rc == 1) {
- dev_dbg(&port->dev, "Fallback to passthrough decoder\n");
- return devm_cxl_add_passthrough_decoder(port);
- }
-
- dev_err(&port->dev, "HDM decoder capability not found\n");
- return -ENXIO;
+ return 0;
}
static int cxl_endpoint_port_probe(struct cxl_port *port)
{
- struct cxl_endpoint_dvsec_info info = { .port = port };
struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev);
- struct cxl_dev_state *cxlds = cxlmd->cxlds;
- struct cxl_hdm *cxlhdm;
int rc;
- rc = cxl_dvsec_rr_decode(cxlds, &info);
- if (rc < 0)
- return rc;
-
- cxlhdm = devm_cxl_setup_hdm(port, &info);
- if (IS_ERR(cxlhdm)) {
- if (PTR_ERR(cxlhdm) == -ENODEV)
- dev_err(&port->dev, "HDM decoder registers not found\n");
- return PTR_ERR(cxlhdm);
- }
-
/* Cache the data early to ensure is_visible() works */
read_cdat_data(port);
cxl_endpoint_parse_cdat(port);
@@ -117,11 +82,7 @@ static int cxl_endpoint_port_probe(struct cxl_port *port)
if (rc)
return rc;
- rc = cxl_hdm_decode_init(cxlds, cxlhdm, &info);
- if (rc)
- return rc;
-
- rc = devm_cxl_enumerate_decoders(cxlhdm, &info);
+ rc = devm_cxl_endpoint_decoders_setup(port);
if (rc)
return rc;
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index 54c480e874cb..d7714d8afb0f 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -388,7 +388,7 @@ static const struct super_operations dax_sops = {
.alloc_inode = dax_alloc_inode,
.destroy_inode = dax_destroy_inode,
.free_inode = dax_free_inode,
- .drop_inode = generic_delete_inode,
+ .drop_inode = inode_just_drop,
};
static int dax_init_fs_context(struct fs_context *fc)
diff --git a/drivers/devfreq/event/rockchip-dfi.c b/drivers/devfreq/event/rockchip-dfi.c
index 0470d7c175f4..5a2c9badcc64 100644
--- a/drivers/devfreq/event/rockchip-dfi.c
+++ b/drivers/devfreq/event/rockchip-dfi.c
@@ -34,15 +34,18 @@
/* DDRMON_CTRL */
#define DDRMON_CTRL 0x04
+#define DDRMON_CTRL_LPDDR5 BIT(6)
#define DDRMON_CTRL_DDR4 BIT(5)
#define DDRMON_CTRL_LPDDR4 BIT(4)
#define DDRMON_CTRL_HARDWARE_EN BIT(3)
#define DDRMON_CTRL_LPDDR23 BIT(2)
#define DDRMON_CTRL_SOFTWARE_EN BIT(1)
#define DDRMON_CTRL_TIMER_CNT_EN BIT(0)
-#define DDRMON_CTRL_DDR_TYPE_MASK (DDRMON_CTRL_DDR4 | \
+#define DDRMON_CTRL_DDR_TYPE_MASK (DDRMON_CTRL_LPDDR5 | \
+ DDRMON_CTRL_DDR4 | \
DDRMON_CTRL_LPDDR4 | \
DDRMON_CTRL_LPDDR23)
+#define DDRMON_CTRL_LP5_BANK_MODE_MASK GENMASK(8, 7)
#define DDRMON_CH0_WR_NUM 0x20
#define DDRMON_CH0_RD_NUM 0x24
@@ -116,12 +119,60 @@ struct rockchip_dfi {
int buswidth[DMC_MAX_CHANNELS];
int ddrmon_stride;
bool ddrmon_ctrl_single;
+ u32 lp5_bank_mode;
+ bool lp5_ckr; /* true if in 4:1 command-to-data clock ratio mode */
+ unsigned int count_multiplier; /* number of data clocks per count */
};
+static int rockchip_dfi_ddrtype_to_ctrl(struct rockchip_dfi *dfi, u32 *ctrl,
+ u32 *mask)
+{
+ u32 ddrmon_ver;
+
+ *mask = DDRMON_CTRL_DDR_TYPE_MASK;
+
+ switch (dfi->ddr_type) {
+ case ROCKCHIP_DDRTYPE_LPDDR2:
+ case ROCKCHIP_DDRTYPE_LPDDR3:
+ *ctrl = DDRMON_CTRL_LPDDR23;
+ break;
+ case ROCKCHIP_DDRTYPE_LPDDR4:
+ case ROCKCHIP_DDRTYPE_LPDDR4X:
+ *ctrl = DDRMON_CTRL_LPDDR4;
+ break;
+ case ROCKCHIP_DDRTYPE_LPDDR5:
+ ddrmon_ver = readl_relaxed(dfi->regs);
+ if (ddrmon_ver < 0x40) {
+ *ctrl = DDRMON_CTRL_LPDDR5 | dfi->lp5_bank_mode;
+ *mask |= DDRMON_CTRL_LP5_BANK_MODE_MASK;
+ break;
+ }
+
+ /*
+ * As it is unknown whether the unpleasant special case
+ * behaviour used by the vendor kernel is needed for any
+ * shipping hardware, ask users to report if they have
+ * some of that hardware.
+ */
+ dev_err(&dfi->edev->dev,
+ "unsupported DDRMON version 0x%04X, please let linux-rockchip know!\n",
+ ddrmon_ver);
+ return -EOPNOTSUPP;
+ default:
+ dev_err(&dfi->edev->dev, "unsupported memory type 0x%X\n",
+ dfi->ddr_type);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static int rockchip_dfi_enable(struct rockchip_dfi *dfi)
{
void __iomem *dfi_regs = dfi->regs;
int i, ret = 0;
+ u32 ctrl;
+ u32 ctrl_mask;
mutex_lock(&dfi->mutex);
@@ -135,8 +186,11 @@ static int rockchip_dfi_enable(struct rockchip_dfi *dfi)
goto out;
}
+ ret = rockchip_dfi_ddrtype_to_ctrl(dfi, &ctrl, &ctrl_mask);
+ if (ret)
+ goto out;
+
for (i = 0; i < dfi->max_channels; i++) {
- u32 ctrl = 0;
if (!(dfi->channel_mask & BIT(i)))
continue;
@@ -146,21 +200,7 @@ static int rockchip_dfi_enable(struct rockchip_dfi *dfi)
DDRMON_CTRL_SOFTWARE_EN | DDRMON_CTRL_HARDWARE_EN),
dfi_regs + i * dfi->ddrmon_stride + DDRMON_CTRL);
- /* set ddr type to dfi */
- switch (dfi->ddr_type) {
- case ROCKCHIP_DDRTYPE_LPDDR2:
- case ROCKCHIP_DDRTYPE_LPDDR3:
- ctrl = DDRMON_CTRL_LPDDR23;
- break;
- case ROCKCHIP_DDRTYPE_LPDDR4:
- case ROCKCHIP_DDRTYPE_LPDDR4X:
- ctrl = DDRMON_CTRL_LPDDR4;
- break;
- default:
- break;
- }
-
- writel_relaxed(HIWORD_UPDATE(ctrl, DDRMON_CTRL_DDR_TYPE_MASK),
+ writel_relaxed(HIWORD_UPDATE(ctrl, ctrl_mask),
dfi_regs + i * dfi->ddrmon_stride + DDRMON_CTRL);
/* enable count, use software mode */
@@ -435,7 +475,7 @@ static u64 rockchip_ddr_perf_event_get_count(struct perf_event *event)
switch (event->attr.config) {
case PERF_EVENT_CYCLES:
- count = total.c[0].clock_cycles;
+ count = total.c[0].clock_cycles * dfi->count_multiplier;
break;
case PERF_EVENT_READ_BYTES:
for (i = 0; i < dfi->max_channels; i++)
@@ -651,10 +691,14 @@ static int rockchip_ddr_perf_init(struct rockchip_dfi *dfi)
break;
case ROCKCHIP_DDRTYPE_LPDDR4:
case ROCKCHIP_DDRTYPE_LPDDR4X:
+ case ROCKCHIP_DDRTYPE_LPDDR5:
dfi->burst_len = 16;
break;
}
+ if (!dfi->count_multiplier)
+ dfi->count_multiplier = 1;
+
ret = perf_pmu_register(pmu, "rockchip_ddr", -1);
if (ret)
return ret;
@@ -726,7 +770,7 @@ static int rk3568_dfi_init(struct rockchip_dfi *dfi)
static int rk3588_dfi_init(struct rockchip_dfi *dfi)
{
struct regmap *regmap_pmu = dfi->regmap_pmu;
- u32 reg2, reg3, reg4;
+ u32 reg2, reg3, reg4, reg6;
regmap_read(regmap_pmu, RK3588_PMUGRF_OS_REG2, &reg2);
regmap_read(regmap_pmu, RK3588_PMUGRF_OS_REG3, &reg3);
@@ -751,6 +795,15 @@ static int rk3588_dfi_init(struct rockchip_dfi *dfi)
dfi->max_channels = 4;
dfi->ddrmon_stride = 0x4000;
+ dfi->count_multiplier = 2;
+
+ if (dfi->ddr_type == ROCKCHIP_DDRTYPE_LPDDR5) {
+ regmap_read(regmap_pmu, RK3588_PMUGRF_OS_REG6, &reg6);
+ dfi->lp5_bank_mode = FIELD_GET(RK3588_PMUGRF_OS_REG6_LP5_BANK_MODE, reg6) << 7;
+ dfi->lp5_ckr = FIELD_GET(RK3588_PMUGRF_OS_REG6_LP5_CKR, reg6);
+ if (dfi->lp5_ckr)
+ dfi->count_multiplier *= 2;
+ }
return 0;
};
diff --git a/drivers/devfreq/mtk-cci-devfreq.c b/drivers/devfreq/mtk-cci-devfreq.c
index 22fe9e631f8a..4c22be728f6a 100644
--- a/drivers/devfreq/mtk-cci-devfreq.c
+++ b/drivers/devfreq/mtk-cci-devfreq.c
@@ -86,7 +86,7 @@ static int mtk_ccifreq_set_voltage(struct mtk_ccifreq_drv *drv, int new_voltage)
soc_data->sram_max_volt);
return ret;
}
- } else if (pre_voltage > new_voltage) {
+ } else {
voltage = max(new_voltage,
pre_vsram - soc_data->max_volt_shift);
ret = regulator_set_voltage(drv->proc_reg, voltage,
@@ -386,7 +386,8 @@ out_disable_cci_clk:
out_free_resources:
if (regulator_is_enabled(drv->proc_reg))
regulator_disable(drv->proc_reg);
- if (drv->sram_reg && regulator_is_enabled(drv->sram_reg))
+ if (!IS_ERR_OR_NULL(drv->sram_reg) &&
+ regulator_is_enabled(drv->sram_reg))
regulator_disable(drv->sram_reg);
return ret;
diff --git a/drivers/dibs/Kconfig b/drivers/dibs/Kconfig
new file mode 100644
index 000000000000..5dc347b9b235
--- /dev/null
+++ b/drivers/dibs/Kconfig
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-2.0
+config DIBS
+ tristate "DIBS support"
+ default n
+ help
+ Direct Internal Buffer Sharing (DIBS)
+ A communication method that uses common physical (internal) memory
+ for synchronous direct access into a remote buffer.
+
+ Select this option to provide the abstraction layer between
+ dibs devices and dibs clients like the SMC protocol.
+ The module name is dibs.
+
+config DIBS_LO
+ bool "intra-OS shortcut with dibs loopback"
+ depends on DIBS
+ default n
+ help
+ DIBS_LO enables the creation of an software-emulated dibs device
+ named lo which can be used for transferring data when communication
+ occurs within the same OS. This helps in convenient testing of
+ dibs clients, since dibs loopback is independent of architecture or
+ hardware.
diff --git a/drivers/dibs/Makefile b/drivers/dibs/Makefile
new file mode 100644
index 000000000000..85805490c77f
--- /dev/null
+++ b/drivers/dibs/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# DIBS class module
+#
+
+dibs-y += dibs_main.o
+obj-$(CONFIG_DIBS) += dibs.o
+dibs-$(CONFIG_DIBS_LO) += dibs_loopback.o \ No newline at end of file
diff --git a/drivers/dibs/dibs_loopback.c b/drivers/dibs/dibs_loopback.c
new file mode 100644
index 000000000000..aa029e29c6b2
--- /dev/null
+++ b/drivers/dibs/dibs_loopback.c
@@ -0,0 +1,361 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Functions for dibs loopback/loopback-ism device.
+ *
+ * Copyright (c) 2024, Alibaba Inc.
+ *
+ * Author: Wen Gu <guwen@linux.alibaba.com>
+ * Tony Lu <tonylu@linux.alibaba.com>
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/dibs.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include "dibs_loopback.h"
+
+#define DIBS_LO_SUPPORT_NOCOPY 0x1
+#define DIBS_DMA_ADDR_INVALID (~(dma_addr_t)0)
+
+static const char dibs_lo_dev_name[] = "lo";
+/* global loopback device */
+static struct dibs_lo_dev *lo_dev;
+
+static u16 dibs_lo_get_fabric_id(struct dibs_dev *dibs)
+{
+ return DIBS_LOOPBACK_FABRIC;
+}
+
+static int dibs_lo_query_rgid(struct dibs_dev *dibs, const uuid_t *rgid,
+ u32 vid_valid, u32 vid)
+{
+ /* rgid should be the same as lgid */
+ if (!uuid_equal(rgid, &dibs->gid))
+ return -ENETUNREACH;
+ return 0;
+}
+
+static int dibs_lo_max_dmbs(void)
+{
+ return DIBS_LO_MAX_DMBS;
+}
+
+static int dibs_lo_register_dmb(struct dibs_dev *dibs, struct dibs_dmb *dmb,
+ struct dibs_client *client)
+{
+ struct dibs_lo_dmb_node *dmb_node, *tmp_node;
+ struct dibs_lo_dev *ldev;
+ struct folio *folio;
+ unsigned long flags;
+ int sba_idx, rc;
+
+ ldev = dibs->drv_priv;
+ sba_idx = dmb->idx;
+ /* check space for new dmb */
+ for_each_clear_bit(sba_idx, ldev->sba_idx_mask, DIBS_LO_MAX_DMBS) {
+ if (!test_and_set_bit(sba_idx, ldev->sba_idx_mask))
+ break;
+ }
+ if (sba_idx == DIBS_LO_MAX_DMBS)
+ return -ENOSPC;
+
+ dmb_node = kzalloc(sizeof(*dmb_node), GFP_KERNEL);
+ if (!dmb_node) {
+ rc = -ENOMEM;
+ goto err_bit;
+ }
+
+ dmb_node->sba_idx = sba_idx;
+ dmb_node->len = dmb->dmb_len;
+
+ /* not critical; fail under memory pressure and fallback to TCP */
+ folio = folio_alloc(GFP_KERNEL | __GFP_NOWARN | __GFP_NOMEMALLOC |
+ __GFP_NORETRY | __GFP_ZERO,
+ get_order(dmb_node->len));
+ if (!folio) {
+ rc = -ENOMEM;
+ goto err_node;
+ }
+ dmb_node->cpu_addr = folio_address(folio);
+ dmb_node->dma_addr = DIBS_DMA_ADDR_INVALID;
+ refcount_set(&dmb_node->refcnt, 1);
+
+again:
+ /* add new dmb into hash table */
+ get_random_bytes(&dmb_node->token, sizeof(dmb_node->token));
+ write_lock_bh(&ldev->dmb_ht_lock);
+ hash_for_each_possible(ldev->dmb_ht, tmp_node, list, dmb_node->token) {
+ if (tmp_node->token == dmb_node->token) {
+ write_unlock_bh(&ldev->dmb_ht_lock);
+ goto again;
+ }
+ }
+ hash_add(ldev->dmb_ht, &dmb_node->list, dmb_node->token);
+ write_unlock_bh(&ldev->dmb_ht_lock);
+ atomic_inc(&ldev->dmb_cnt);
+
+ dmb->idx = dmb_node->sba_idx;
+ dmb->dmb_tok = dmb_node->token;
+ dmb->cpu_addr = dmb_node->cpu_addr;
+ dmb->dma_addr = dmb_node->dma_addr;
+ dmb->dmb_len = dmb_node->len;
+
+ spin_lock_irqsave(&dibs->lock, flags);
+ dibs->dmb_clientid_arr[sba_idx] = client->id;
+ spin_unlock_irqrestore(&dibs->lock, flags);
+
+ return 0;
+
+err_node:
+ kfree(dmb_node);
+err_bit:
+ clear_bit(sba_idx, ldev->sba_idx_mask);
+ return rc;
+}
+
+static void __dibs_lo_unregister_dmb(struct dibs_lo_dev *ldev,
+ struct dibs_lo_dmb_node *dmb_node)
+{
+ /* remove dmb from hash table */
+ write_lock_bh(&ldev->dmb_ht_lock);
+ hash_del(&dmb_node->list);
+ write_unlock_bh(&ldev->dmb_ht_lock);
+
+ clear_bit(dmb_node->sba_idx, ldev->sba_idx_mask);
+ folio_put(virt_to_folio(dmb_node->cpu_addr));
+ kfree(dmb_node);
+
+ if (atomic_dec_and_test(&ldev->dmb_cnt))
+ wake_up(&ldev->ldev_release);
+}
+
+static int dibs_lo_unregister_dmb(struct dibs_dev *dibs, struct dibs_dmb *dmb)
+{
+ struct dibs_lo_dmb_node *dmb_node = NULL, *tmp_node;
+ struct dibs_lo_dev *ldev;
+ unsigned long flags;
+
+ ldev = dibs->drv_priv;
+
+ /* find dmb from hash table */
+ read_lock_bh(&ldev->dmb_ht_lock);
+ hash_for_each_possible(ldev->dmb_ht, tmp_node, list, dmb->dmb_tok) {
+ if (tmp_node->token == dmb->dmb_tok) {
+ dmb_node = tmp_node;
+ break;
+ }
+ }
+ read_unlock_bh(&ldev->dmb_ht_lock);
+ if (!dmb_node)
+ return -EINVAL;
+
+ if (refcount_dec_and_test(&dmb_node->refcnt)) {
+ spin_lock_irqsave(&dibs->lock, flags);
+ dibs->dmb_clientid_arr[dmb_node->sba_idx] = NO_DIBS_CLIENT;
+ spin_unlock_irqrestore(&dibs->lock, flags);
+
+ __dibs_lo_unregister_dmb(ldev, dmb_node);
+ }
+ return 0;
+}
+
+static int dibs_lo_support_dmb_nocopy(struct dibs_dev *dibs)
+{
+ return DIBS_LO_SUPPORT_NOCOPY;
+}
+
+static int dibs_lo_attach_dmb(struct dibs_dev *dibs, struct dibs_dmb *dmb)
+{
+ struct dibs_lo_dmb_node *dmb_node = NULL, *tmp_node;
+ struct dibs_lo_dev *ldev;
+
+ ldev = dibs->drv_priv;
+
+ /* find dmb_node according to dmb->dmb_tok */
+ read_lock_bh(&ldev->dmb_ht_lock);
+ hash_for_each_possible(ldev->dmb_ht, tmp_node, list, dmb->dmb_tok) {
+ if (tmp_node->token == dmb->dmb_tok) {
+ dmb_node = tmp_node;
+ break;
+ }
+ }
+ if (!dmb_node) {
+ read_unlock_bh(&ldev->dmb_ht_lock);
+ return -EINVAL;
+ }
+ read_unlock_bh(&ldev->dmb_ht_lock);
+
+ if (!refcount_inc_not_zero(&dmb_node->refcnt))
+ /* the dmb is being unregistered, but has
+ * not been removed from the hash table.
+ */
+ return -EINVAL;
+
+ /* provide dmb information */
+ dmb->idx = dmb_node->sba_idx;
+ dmb->dmb_tok = dmb_node->token;
+ dmb->cpu_addr = dmb_node->cpu_addr;
+ dmb->dma_addr = dmb_node->dma_addr;
+ dmb->dmb_len = dmb_node->len;
+ return 0;
+}
+
+static int dibs_lo_detach_dmb(struct dibs_dev *dibs, u64 token)
+{
+ struct dibs_lo_dmb_node *dmb_node = NULL, *tmp_node;
+ struct dibs_lo_dev *ldev;
+
+ ldev = dibs->drv_priv;
+
+ /* find dmb_node according to dmb->dmb_tok */
+ read_lock_bh(&ldev->dmb_ht_lock);
+ hash_for_each_possible(ldev->dmb_ht, tmp_node, list, token) {
+ if (tmp_node->token == token) {
+ dmb_node = tmp_node;
+ break;
+ }
+ }
+ if (!dmb_node) {
+ read_unlock_bh(&ldev->dmb_ht_lock);
+ return -EINVAL;
+ }
+ read_unlock_bh(&ldev->dmb_ht_lock);
+
+ if (refcount_dec_and_test(&dmb_node->refcnt))
+ __dibs_lo_unregister_dmb(ldev, dmb_node);
+ return 0;
+}
+
+static int dibs_lo_move_data(struct dibs_dev *dibs, u64 dmb_tok,
+ unsigned int idx, bool sf, unsigned int offset,
+ void *data, unsigned int size)
+{
+ struct dibs_lo_dmb_node *rmb_node = NULL, *tmp_node;
+ struct dibs_lo_dev *ldev;
+ u16 s_mask;
+ u8 client_id;
+ u32 sba_idx;
+
+ ldev = dibs->drv_priv;
+
+ read_lock_bh(&ldev->dmb_ht_lock);
+ hash_for_each_possible(ldev->dmb_ht, tmp_node, list, dmb_tok) {
+ if (tmp_node->token == dmb_tok) {
+ rmb_node = tmp_node;
+ break;
+ }
+ }
+ if (!rmb_node) {
+ read_unlock_bh(&ldev->dmb_ht_lock);
+ return -EINVAL;
+ }
+ memcpy((char *)rmb_node->cpu_addr + offset, data, size);
+ sba_idx = rmb_node->sba_idx;
+ read_unlock_bh(&ldev->dmb_ht_lock);
+
+ if (!sf)
+ return 0;
+
+ spin_lock(&dibs->lock);
+ client_id = dibs->dmb_clientid_arr[sba_idx];
+ s_mask = ror16(0x1000, idx);
+ if (likely(client_id != NO_DIBS_CLIENT && dibs->subs[client_id]))
+ dibs->subs[client_id]->ops->handle_irq(dibs, sba_idx, s_mask);
+ spin_unlock(&dibs->lock);
+
+ return 0;
+}
+
+static const struct dibs_dev_ops dibs_lo_ops = {
+ .get_fabric_id = dibs_lo_get_fabric_id,
+ .query_remote_gid = dibs_lo_query_rgid,
+ .max_dmbs = dibs_lo_max_dmbs,
+ .register_dmb = dibs_lo_register_dmb,
+ .unregister_dmb = dibs_lo_unregister_dmb,
+ .move_data = dibs_lo_move_data,
+ .support_mmapped_rdmb = dibs_lo_support_dmb_nocopy,
+ .attach_dmb = dibs_lo_attach_dmb,
+ .detach_dmb = dibs_lo_detach_dmb,
+};
+
+static void dibs_lo_dev_init(struct dibs_lo_dev *ldev)
+{
+ rwlock_init(&ldev->dmb_ht_lock);
+ hash_init(ldev->dmb_ht);
+ atomic_set(&ldev->dmb_cnt, 0);
+ init_waitqueue_head(&ldev->ldev_release);
+}
+
+static void dibs_lo_dev_exit(struct dibs_lo_dev *ldev)
+{
+ if (atomic_read(&ldev->dmb_cnt))
+ wait_event(ldev->ldev_release, !atomic_read(&ldev->dmb_cnt));
+}
+
+static int dibs_lo_dev_probe(void)
+{
+ struct dibs_lo_dev *ldev;
+ struct dibs_dev *dibs;
+ int ret;
+
+ ldev = kzalloc(sizeof(*ldev), GFP_KERNEL);
+ if (!ldev)
+ return -ENOMEM;
+
+ dibs = dibs_dev_alloc();
+ if (!dibs) {
+ kfree(ldev);
+ return -ENOMEM;
+ }
+
+ ldev->dibs = dibs;
+ dibs->drv_priv = ldev;
+ dibs_lo_dev_init(ldev);
+ uuid_gen(&dibs->gid);
+ dibs->ops = &dibs_lo_ops;
+
+ dibs->dev.parent = NULL;
+ dev_set_name(&dibs->dev, "%s", dibs_lo_dev_name);
+
+ ret = dibs_dev_add(dibs);
+ if (ret)
+ goto err_reg;
+ lo_dev = ldev;
+ return 0;
+
+err_reg:
+ kfree(dibs->dmb_clientid_arr);
+ /* pairs with dibs_dev_alloc() */
+ put_device(&dibs->dev);
+ kfree(ldev);
+
+ return ret;
+}
+
+static void dibs_lo_dev_remove(void)
+{
+ if (!lo_dev)
+ return;
+
+ dibs_dev_del(lo_dev->dibs);
+ dibs_lo_dev_exit(lo_dev);
+ /* pairs with dibs_dev_alloc() */
+ put_device(&lo_dev->dibs->dev);
+ kfree(lo_dev);
+ lo_dev = NULL;
+}
+
+int dibs_loopback_init(void)
+{
+ return dibs_lo_dev_probe();
+}
+
+void dibs_loopback_exit(void)
+{
+ dibs_lo_dev_remove();
+}
diff --git a/drivers/dibs/dibs_loopback.h b/drivers/dibs/dibs_loopback.h
new file mode 100644
index 000000000000..0664f6a8e662
--- /dev/null
+++ b/drivers/dibs/dibs_loopback.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * dibs loopback (aka loopback-ism) device structure definitions.
+ *
+ * Copyright (c) 2024, Alibaba Inc.
+ *
+ * Author: Wen Gu <guwen@linux.alibaba.com>
+ * Tony Lu <tonylu@linux.alibaba.com>
+ *
+ */
+
+#ifndef _DIBS_LOOPBACK_H
+#define _DIBS_LOOPBACK_H
+
+#include <linux/dibs.h>
+#include <linux/hashtable.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+
+#if IS_ENABLED(CONFIG_DIBS_LO)
+#define DIBS_LO_DMBS_HASH_BITS 12
+#define DIBS_LO_MAX_DMBS 5000
+
+struct dibs_lo_dmb_node {
+ struct hlist_node list;
+ u64 token;
+ u32 len;
+ u32 sba_idx;
+ void *cpu_addr;
+ dma_addr_t dma_addr;
+ refcount_t refcnt;
+};
+
+struct dibs_lo_dev {
+ struct dibs_dev *dibs;
+ atomic_t dmb_cnt;
+ rwlock_t dmb_ht_lock;
+ DECLARE_BITMAP(sba_idx_mask, DIBS_LO_MAX_DMBS);
+ DECLARE_HASHTABLE(dmb_ht, DIBS_LO_DMBS_HASH_BITS);
+ wait_queue_head_t ldev_release;
+};
+
+int dibs_loopback_init(void);
+void dibs_loopback_exit(void);
+#else
+static inline int dibs_loopback_init(void)
+{
+ return 0;
+}
+
+static inline void dibs_loopback_exit(void)
+{
+}
+#endif
+
+#endif /* _DIBS_LOOPBACK_H */
diff --git a/drivers/dibs/dibs_main.c b/drivers/dibs/dibs_main.c
new file mode 100644
index 000000000000..0374f8350ff7
--- /dev/null
+++ b/drivers/dibs/dibs_main.c
@@ -0,0 +1,278 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DIBS - Direct Internal Buffer Sharing
+ *
+ * Implementation of the DIBS class module
+ *
+ * Copyright IBM Corp. 2025
+ */
+#define KMSG_COMPONENT "dibs"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/dibs.h>
+
+#include "dibs_loopback.h"
+
+MODULE_DESCRIPTION("Direct Internal Buffer Sharing class");
+MODULE_LICENSE("GPL");
+
+static struct class *dibs_class;
+
+/* use an array rather a list for fast mapping: */
+static struct dibs_client *clients[MAX_DIBS_CLIENTS];
+static u8 max_client;
+static DEFINE_MUTEX(clients_lock);
+struct dibs_dev_list {
+ struct list_head list;
+ struct mutex mutex; /* protects dibs device list */
+};
+
+static struct dibs_dev_list dibs_dev_list = {
+ .list = LIST_HEAD_INIT(dibs_dev_list.list),
+ .mutex = __MUTEX_INITIALIZER(dibs_dev_list.mutex),
+};
+
+static void dibs_setup_forwarding(struct dibs_client *client,
+ struct dibs_dev *dibs)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dibs->lock, flags);
+ dibs->subs[client->id] = client;
+ spin_unlock_irqrestore(&dibs->lock, flags);
+}
+
+int dibs_register_client(struct dibs_client *client)
+{
+ struct dibs_dev *dibs;
+ int i, rc = -ENOSPC;
+
+ mutex_lock(&dibs_dev_list.mutex);
+ mutex_lock(&clients_lock);
+ for (i = 0; i < MAX_DIBS_CLIENTS; ++i) {
+ if (!clients[i]) {
+ clients[i] = client;
+ client->id = i;
+ if (i == max_client)
+ max_client++;
+ rc = 0;
+ break;
+ }
+ }
+ mutex_unlock(&clients_lock);
+
+ if (i < MAX_DIBS_CLIENTS) {
+ /* initialize with all devices that we got so far */
+ list_for_each_entry(dibs, &dibs_dev_list.list, list) {
+ dibs->priv[i] = NULL;
+ client->ops->add_dev(dibs);
+ dibs_setup_forwarding(client, dibs);
+ }
+ }
+ mutex_unlock(&dibs_dev_list.mutex);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(dibs_register_client);
+
+int dibs_unregister_client(struct dibs_client *client)
+{
+ struct dibs_dev *dibs;
+ unsigned long flags;
+ int max_dmbs;
+ int rc = 0;
+
+ mutex_lock(&dibs_dev_list.mutex);
+ list_for_each_entry(dibs, &dibs_dev_list.list, list) {
+ spin_lock_irqsave(&dibs->lock, flags);
+ max_dmbs = dibs->ops->max_dmbs();
+ for (int i = 0; i < max_dmbs; ++i) {
+ if (dibs->dmb_clientid_arr[i] == client->id) {
+ WARN(1, "%s: attempt to unregister '%s' with registered dmb(s)\n",
+ __func__, client->name);
+ rc = -EBUSY;
+ goto err_reg_dmb;
+ }
+ }
+ /* Stop forwarding IRQs and events */
+ dibs->subs[client->id] = NULL;
+ spin_unlock_irqrestore(&dibs->lock, flags);
+ clients[client->id]->ops->del_dev(dibs);
+ dibs->priv[client->id] = NULL;
+ }
+
+ mutex_lock(&clients_lock);
+ clients[client->id] = NULL;
+ if (client->id + 1 == max_client)
+ max_client--;
+ mutex_unlock(&clients_lock);
+
+ mutex_unlock(&dibs_dev_list.mutex);
+ return rc;
+
+err_reg_dmb:
+ spin_unlock_irqrestore(&dibs->lock, flags);
+ mutex_unlock(&dibs_dev_list.mutex);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(dibs_unregister_client);
+
+static void dibs_dev_release(struct device *dev)
+{
+ struct dibs_dev *dibs;
+
+ dibs = container_of(dev, struct dibs_dev, dev);
+
+ kfree(dibs);
+}
+
+struct dibs_dev *dibs_dev_alloc(void)
+{
+ struct dibs_dev *dibs;
+
+ dibs = kzalloc(sizeof(*dibs), GFP_KERNEL);
+ if (!dibs)
+ return dibs;
+ dibs->dev.release = dibs_dev_release;
+ dibs->dev.class = dibs_class;
+ device_initialize(&dibs->dev);
+
+ return dibs;
+}
+EXPORT_SYMBOL_GPL(dibs_dev_alloc);
+
+static ssize_t gid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct dibs_dev *dibs;
+
+ dibs = container_of(dev, struct dibs_dev, dev);
+
+ return sysfs_emit(buf, "%pUb\n", &dibs->gid);
+}
+static DEVICE_ATTR_RO(gid);
+
+static ssize_t fabric_id_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct dibs_dev *dibs;
+ u16 fabric_id;
+
+ dibs = container_of(dev, struct dibs_dev, dev);
+ fabric_id = dibs->ops->get_fabric_id(dibs);
+
+ return sysfs_emit(buf, "0x%04x\n", fabric_id);
+}
+static DEVICE_ATTR_RO(fabric_id);
+
+static struct attribute *dibs_dev_attrs[] = {
+ &dev_attr_gid.attr,
+ &dev_attr_fabric_id.attr,
+ NULL,
+};
+
+static const struct attribute_group dibs_dev_attr_group = {
+ .attrs = dibs_dev_attrs,
+};
+
+int dibs_dev_add(struct dibs_dev *dibs)
+{
+ int max_dmbs;
+ int i, ret;
+
+ max_dmbs = dibs->ops->max_dmbs();
+ spin_lock_init(&dibs->lock);
+ dibs->dmb_clientid_arr = kzalloc(max_dmbs, GFP_KERNEL);
+ if (!dibs->dmb_clientid_arr)
+ return -ENOMEM;
+ memset(dibs->dmb_clientid_arr, NO_DIBS_CLIENT, max_dmbs);
+
+ ret = device_add(&dibs->dev);
+ if (ret)
+ goto free_client_arr;
+
+ ret = sysfs_create_group(&dibs->dev.kobj, &dibs_dev_attr_group);
+ if (ret) {
+ dev_err(&dibs->dev, "sysfs_create_group failed for dibs_dev\n");
+ goto err_device_del;
+ }
+ mutex_lock(&dibs_dev_list.mutex);
+ mutex_lock(&clients_lock);
+ for (i = 0; i < max_client; ++i) {
+ if (clients[i]) {
+ clients[i]->ops->add_dev(dibs);
+ dibs_setup_forwarding(clients[i], dibs);
+ }
+ }
+ mutex_unlock(&clients_lock);
+ list_add(&dibs->list, &dibs_dev_list.list);
+ mutex_unlock(&dibs_dev_list.mutex);
+
+ return 0;
+
+err_device_del:
+ device_del(&dibs->dev);
+free_client_arr:
+ kfree(dibs->dmb_clientid_arr);
+ return ret;
+
+}
+EXPORT_SYMBOL_GPL(dibs_dev_add);
+
+void dibs_dev_del(struct dibs_dev *dibs)
+{
+ unsigned long flags;
+ int i;
+
+ sysfs_remove_group(&dibs->dev.kobj, &dibs_dev_attr_group);
+
+ spin_lock_irqsave(&dibs->lock, flags);
+ for (i = 0; i < MAX_DIBS_CLIENTS; ++i)
+ dibs->subs[i] = NULL;
+ spin_unlock_irqrestore(&dibs->lock, flags);
+
+ mutex_lock(&dibs_dev_list.mutex);
+ mutex_lock(&clients_lock);
+ for (i = 0; i < max_client; ++i) {
+ if (clients[i])
+ clients[i]->ops->del_dev(dibs);
+ }
+ mutex_unlock(&clients_lock);
+ list_del_init(&dibs->list);
+ mutex_unlock(&dibs_dev_list.mutex);
+
+ device_del(&dibs->dev);
+ kfree(dibs->dmb_clientid_arr);
+}
+EXPORT_SYMBOL_GPL(dibs_dev_del);
+
+static int __init dibs_init(void)
+{
+ int rc;
+
+ memset(clients, 0, sizeof(clients));
+ max_client = 0;
+
+ dibs_class = class_create("dibs");
+ if (IS_ERR(dibs_class))
+ return PTR_ERR(dibs_class);
+
+ rc = dibs_loopback_init();
+ if (rc)
+ pr_err("%s fails with %d\n", __func__, rc);
+
+ return rc;
+}
+
+static void __exit dibs_exit(void)
+{
+ dibs_loopback_exit();
+ class_destroy(dibs_class);
+}
+
+module_init(dibs_init);
+module_exit(dibs_exit);
diff --git a/drivers/dma-buf/dma-heap.c b/drivers/dma-buf/dma-heap.c
index 3cbe87d4a464..8ab49924f8b7 100644
--- a/drivers/dma-buf/dma-heap.c
+++ b/drivers/dma-buf/dma-heap.c
@@ -11,6 +11,7 @@
#include <linux/dma-buf.h>
#include <linux/dma-heap.h>
#include <linux/err.h>
+#include <linux/export.h>
#include <linux/list.h>
#include <linux/nospec.h>
#include <linux/syscalls.h>
@@ -202,6 +203,7 @@ void *dma_heap_get_drvdata(struct dma_heap *heap)
{
return heap->priv;
}
+EXPORT_SYMBOL_NS_GPL(dma_heap_get_drvdata, "DMA_BUF_HEAP");
/**
* dma_heap_get_name - get heap name
@@ -214,6 +216,7 @@ const char *dma_heap_get_name(struct dma_heap *heap)
{
return heap->name;
}
+EXPORT_SYMBOL_NS_GPL(dma_heap_get_name, "DMA_BUF_HEAP");
/**
* dma_heap_add - adds a heap to dmabuf heaps
@@ -303,6 +306,7 @@ err0:
kfree(heap);
return err_ret;
}
+EXPORT_SYMBOL_NS_GPL(dma_heap_add, "DMA_BUF_HEAP");
static char *dma_heap_devnode(const struct device *dev, umode_t *mode)
{
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 05c7c7d9e5a4..b8a74b1798ba 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -450,7 +450,7 @@ config MILBEAUT_XDMAC
config MMP_PDMA
tristate "MMP PDMA support"
- depends on ARCH_MMP || ARCH_PXA || COMPILE_TEST
+ depends on ARCH_MMP || ARCH_PXA || ARCH_SPACEMIT || COMPILE_TEST
select DMA_ENGINE
help
Support the MMP PDMA engine for PXA and MMP platform.
diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c
index b43255f914f3..8e5f7defa6b6 100644
--- a/drivers/dma/dw-edma/dw-edma-core.c
+++ b/drivers/dma/dw-edma/dw-edma-core.c
@@ -584,6 +584,25 @@ dw_edma_device_prep_interleaved_dma(struct dma_chan *dchan,
return dw_edma_device_transfer(&xfer);
}
+static void dw_hdma_set_callback_result(struct virt_dma_desc *vd,
+ enum dmaengine_tx_result result)
+{
+ u32 residue = 0;
+ struct dw_edma_desc *desc;
+ struct dmaengine_result *res;
+
+ if (!vd->tx.callback_result)
+ return;
+
+ desc = vd2dw_edma_desc(vd);
+ if (desc)
+ residue = desc->alloc_sz - desc->xfer_sz;
+
+ res = &vd->tx_result;
+ res->result = result;
+ res->residue = residue;
+}
+
static void dw_edma_done_interrupt(struct dw_edma_chan *chan)
{
struct dw_edma_desc *desc;
@@ -597,6 +616,8 @@ static void dw_edma_done_interrupt(struct dw_edma_chan *chan)
case EDMA_REQ_NONE:
desc = vd2dw_edma_desc(vd);
if (!desc->chunks_alloc) {
+ dw_hdma_set_callback_result(vd,
+ DMA_TRANS_NOERROR);
list_del(&vd->node);
vchan_cookie_complete(vd);
}
@@ -633,6 +654,7 @@ static void dw_edma_abort_interrupt(struct dw_edma_chan *chan)
spin_lock_irqsave(&chan->vc.lock, flags);
vd = vchan_next_desc(&chan->vc);
if (vd) {
+ dw_hdma_set_callback_result(vd, DMA_TRANS_ABORTED);
list_del(&vd->node);
vchan_cookie_complete(vd);
}
diff --git a/drivers/dma/dw/rzn1-dmamux.c b/drivers/dma/dw/rzn1-dmamux.c
index 4fb8508419db..deadf135681b 100644
--- a/drivers/dma/dw/rzn1-dmamux.c
+++ b/drivers/dma/dw/rzn1-dmamux.c
@@ -48,12 +48,16 @@ static void *rzn1_dmamux_route_allocate(struct of_phandle_args *dma_spec,
u32 mask;
int ret;
- if (dma_spec->args_count != RNZ1_DMAMUX_NCELLS)
- return ERR_PTR(-EINVAL);
+ if (dma_spec->args_count != RNZ1_DMAMUX_NCELLS) {
+ ret = -EINVAL;
+ goto put_device;
+ }
map = kzalloc(sizeof(*map), GFP_KERNEL);
- if (!map)
- return ERR_PTR(-ENOMEM);
+ if (!map) {
+ ret = -ENOMEM;
+ goto put_device;
+ }
chan = dma_spec->args[0];
map->req_idx = dma_spec->args[4];
@@ -94,12 +98,15 @@ static void *rzn1_dmamux_route_allocate(struct of_phandle_args *dma_spec,
if (ret)
goto clear_bitmap;
+ put_device(&pdev->dev);
return map;
clear_bitmap:
clear_bit(map->req_idx, dmamux->used_chans);
free_map:
kfree(map);
+put_device:
+ put_device(&pdev->dev);
return ERR_PTR(ret);
}
diff --git a/drivers/dma/idxd/defaults.c b/drivers/dma/idxd/defaults.c
index c607ae8dd12c..2bbbcd02a0da 100644
--- a/drivers/dma/idxd/defaults.c
+++ b/drivers/dma/idxd/defaults.c
@@ -36,12 +36,10 @@ int idxd_load_iaa_device_defaults(struct idxd_device *idxd)
group->num_wqs++;
/* set name to "iaa_crypto" */
- memset(wq->name, 0, WQ_NAME_SIZE + 1);
- strscpy(wq->name, "iaa_crypto", WQ_NAME_SIZE + 1);
+ strscpy_pad(wq->name, "iaa_crypto");
/* set driver_name to "crypto" */
- memset(wq->driver_name, 0, DRIVER_NAME_SIZE + 1);
- strscpy(wq->driver_name, "crypto", DRIVER_NAME_SIZE + 1);
+ strscpy_pad(wq->driver_name, "crypto");
engine = idxd->engines[0];
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 35bdefd3728b..2acc34b3daff 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -80,6 +80,8 @@ static struct pci_device_id idxd_pci_tbl[] = {
{ PCI_DEVICE_DATA(INTEL, IAA_DMR, &idxd_driver_data[IDXD_TYPE_IAX]) },
/* IAA PTL platforms */
{ PCI_DEVICE_DATA(INTEL, IAA_PTL, &idxd_driver_data[IDXD_TYPE_IAX]) },
+ /* IAA WCL platforms */
+ { PCI_DEVICE_DATA(INTEL, IAA_WCL, &idxd_driver_data[IDXD_TYPE_IAX]) },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, idxd_pci_tbl);
@@ -189,27 +191,30 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
idxd->wq_enable_map = bitmap_zalloc_node(idxd->max_wqs, GFP_KERNEL, dev_to_node(dev));
if (!idxd->wq_enable_map) {
rc = -ENOMEM;
- goto err_bitmap;
+ goto err_free_wqs;
}
for (i = 0; i < idxd->max_wqs; i++) {
wq = kzalloc_node(sizeof(*wq), GFP_KERNEL, dev_to_node(dev));
if (!wq) {
rc = -ENOMEM;
- goto err;
+ goto err_unwind;
}
idxd_dev_set_type(&wq->idxd_dev, IDXD_DEV_WQ);
conf_dev = wq_confdev(wq);
wq->id = i;
wq->idxd = idxd;
- device_initialize(wq_confdev(wq));
+ device_initialize(conf_dev);
conf_dev->parent = idxd_confdev(idxd);
conf_dev->bus = &dsa_bus_type;
conf_dev->type = &idxd_wq_device_type;
rc = dev_set_name(conf_dev, "wq%d.%d", idxd->id, wq->id);
- if (rc < 0)
- goto err;
+ if (rc < 0) {
+ put_device(conf_dev);
+ kfree(wq);
+ goto err_unwind;
+ }
mutex_init(&wq->wq_lock);
init_waitqueue_head(&wq->err_queue);
@@ -220,15 +225,20 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES;
wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev));
if (!wq->wqcfg) {
+ put_device(conf_dev);
+ kfree(wq);
rc = -ENOMEM;
- goto err;
+ goto err_unwind;
}
if (idxd->hw.wq_cap.op_config) {
wq->opcap_bmap = bitmap_zalloc(IDXD_MAX_OPCAP_BITS, GFP_KERNEL);
if (!wq->opcap_bmap) {
+ kfree(wq->wqcfg);
+ put_device(conf_dev);
+ kfree(wq);
rc = -ENOMEM;
- goto err_opcap_bmap;
+ goto err_unwind;
}
bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS);
}
@@ -239,13 +249,7 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
return 0;
-err_opcap_bmap:
- kfree(wq->wqcfg);
-
-err:
- put_device(conf_dev);
- kfree(wq);
-
+err_unwind:
while (--i >= 0) {
wq = idxd->wqs[i];
if (idxd->hw.wq_cap.op_config)
@@ -254,11 +258,10 @@ err:
conf_dev = wq_confdev(wq);
put_device(conf_dev);
kfree(wq);
-
}
bitmap_free(idxd->wq_enable_map);
-err_bitmap:
+err_free_wqs:
kfree(idxd->wqs);
return rc;
@@ -1291,10 +1294,12 @@ static void idxd_remove(struct pci_dev *pdev)
device_unregister(idxd_confdev(idxd));
idxd_shutdown(pdev);
idxd_device_remove_debugfs(idxd);
- idxd_cleanup(idxd);
+ perfmon_pmu_remove(idxd);
+ idxd_cleanup_interrupts(idxd);
+ if (device_pasid_enabled(idxd))
+ idxd_disable_system_pasid(idxd);
pci_iounmap(pdev, idxd->reg_base);
put_device(idxd_confdev(idxd));
- idxd_free(idxd);
pci_disable_device(pdev);
}
diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h
index 9c1c546fe443..8dc2e8bca779 100644
--- a/drivers/dma/idxd/registers.h
+++ b/drivers/dma/idxd/registers.h
@@ -3,13 +3,18 @@
#ifndef _IDXD_REGISTERS_H_
#define _IDXD_REGISTERS_H_
+#ifdef __KERNEL__
#include <uapi/linux/idxd.h>
+#else
+#include <linux/idxd.h>
+#endif
/* PCI Config */
#define PCI_DEVICE_ID_INTEL_DSA_GNRD 0x11fb
#define PCI_DEVICE_ID_INTEL_DSA_DMR 0x1212
#define PCI_DEVICE_ID_INTEL_IAA_DMR 0x1216
#define PCI_DEVICE_ID_INTEL_IAA_PTL 0xb02d
+#define PCI_DEVICE_ID_INTEL_IAA_WCL 0xfd2d
#define DEVICE_VERSION_1 0x100
#define DEVICE_VERSION_2 0x200
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 02a85d6f1bea..ed9e56de5a9b 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -256,7 +256,7 @@ struct sdma_script_start_addrs {
/* End of v3 array */
union { s32 v3_end; s32 mcu_2_zqspi_addr; };
/* End of v4 array */
- s32 v4_end[0];
+ s32 v4_end[];
};
/*
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index a180171087a8..12a4a4860a74 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -19,6 +19,8 @@
#define IOAT_DMA_DCA_ANY_CPU ~0
+int system_has_dca_enabled(struct pci_dev *pdev);
+
#define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, dma_dev)
#define to_dev(ioat_chan) (&(ioat_chan)->ioat_dma->pdev->dev)
#define to_pdev(ioat_chan) ((ioat_chan)->ioat_dma->pdev)
diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h
index 79e4e4c09c18..0373c48520c9 100644
--- a/drivers/dma/ioat/hw.h
+++ b/drivers/dma/ioat/hw.h
@@ -63,9 +63,6 @@
#define IOAT_VER_3_3 0x33 /* Version 3.3 */
#define IOAT_VER_3_4 0x34 /* Version 3.4 */
-
-int system_has_dca_enabled(struct pci_dev *pdev);
-
#define IOAT_DESC_SZ 64
struct ioat_dma_descriptor {
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index a95d31103d30..d07229a74886 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -15,6 +15,8 @@
#include <linux/device.h>
#include <linux/platform_data/mmp_dma.h>
#include <linux/dmapool.h>
+#include <linux/clk.h>
+#include <linux/reset.h>
#include <linux/of_dma.h>
#include <linux/of.h>
@@ -23,9 +25,12 @@
#define DCSR 0x0000
#define DALGN 0x00a0
#define DINT 0x00f0
-#define DDADR 0x0200
+#define DDADR(n) (0x0200 + ((n) << 4))
#define DSADR(n) (0x0204 + ((n) << 4))
#define DTADR(n) (0x0208 + ((n) << 4))
+#define DDADRH(n) (0x0300 + ((n) << 4))
+#define DSADRH(n) (0x0304 + ((n) << 4))
+#define DTADRH(n) (0x0308 + ((n) << 4))
#define DCMD 0x020c
#define DCSR_RUN BIT(31) /* Run Bit (read / write) */
@@ -42,6 +47,7 @@
#define DCSR_EORSTOPEN BIT(26) /* STOP on an EOR */
#define DCSR_SETCMPST BIT(25) /* Set Descriptor Compare Status */
#define DCSR_CLRCMPST BIT(24) /* Clear Descriptor Compare Status */
+#define DCSR_LPAEEN BIT(21) /* Long Physical Address Extension Enable */
#define DCSR_CMPST BIT(10) /* The Descriptor Compare Status */
#define DCSR_EORINTR BIT(9) /* The end of Receive */
@@ -74,6 +80,16 @@ struct mmp_pdma_desc_hw {
u32 dsadr; /* DSADR value for the current transfer */
u32 dtadr; /* DTADR value for the current transfer */
u32 dcmd; /* DCMD value for the current transfer */
+ /*
+ * The following 32-bit words are only used in the 64-bit, ie.
+ * LPAE (Long Physical Address Extension) mode.
+ * They are used to specify the high 32 bits of the descriptor's
+ * addresses.
+ */
+ u32 ddadrh; /* High 32-bit of DDADR */
+ u32 dsadrh; /* High 32-bit of DSADR */
+ u32 dtadrh; /* High 32-bit of DTADR */
+ u32 rsvd; /* reserved */
} __aligned(32);
struct mmp_pdma_desc_sw {
@@ -118,12 +134,55 @@ struct mmp_pdma_phy {
struct mmp_pdma_chan *vchan;
};
+/**
+ * struct mmp_pdma_ops - Operations for the MMP PDMA controller
+ *
+ * Hardware Register Operations (read/write hardware registers):
+ * @write_next_addr: Function to program address of next descriptor into
+ * DDADR/DDADRH
+ * @read_src_addr: Function to read the source address from DSADR/DSADRH
+ * @read_dst_addr: Function to read the destination address from DTADR/DTADRH
+ *
+ * Descriptor Memory Operations (manipulate descriptor structs in memory):
+ * @set_desc_next_addr: Function to set next descriptor address in descriptor
+ * @set_desc_src_addr: Function to set the source address in descriptor
+ * @set_desc_dst_addr: Function to set the destination address in descriptor
+ * @get_desc_src_addr: Function to get the source address from descriptor
+ * @get_desc_dst_addr: Function to get the destination address from descriptor
+ *
+ * Controller Configuration:
+ * @run_bits: Control bits in DCSR register for channel start/stop
+ * @dma_mask: DMA addressing capability of controller. 0 to use OF/platform
+ * settings, or explicit mask like DMA_BIT_MASK(32/64)
+ */
+struct mmp_pdma_ops {
+ /* Hardware Register Operations */
+ void (*write_next_addr)(struct mmp_pdma_phy *phy, dma_addr_t addr);
+ u64 (*read_src_addr)(struct mmp_pdma_phy *phy);
+ u64 (*read_dst_addr)(struct mmp_pdma_phy *phy);
+
+ /* Descriptor Memory Operations */
+ void (*set_desc_next_addr)(struct mmp_pdma_desc_hw *desc,
+ dma_addr_t addr);
+ void (*set_desc_src_addr)(struct mmp_pdma_desc_hw *desc,
+ dma_addr_t addr);
+ void (*set_desc_dst_addr)(struct mmp_pdma_desc_hw *desc,
+ dma_addr_t addr);
+ u64 (*get_desc_src_addr)(const struct mmp_pdma_desc_hw *desc);
+ u64 (*get_desc_dst_addr)(const struct mmp_pdma_desc_hw *desc);
+
+ /* Controller Configuration */
+ u32 run_bits;
+ u64 dma_mask;
+};
+
struct mmp_pdma_device {
int dma_channels;
void __iomem *base;
struct device *dev;
struct dma_device device;
struct mmp_pdma_phy *phy;
+ const struct mmp_pdma_ops *ops;
spinlock_t phy_lock; /* protect alloc/free phy channels */
};
@@ -136,24 +195,112 @@ struct mmp_pdma_device {
#define to_mmp_pdma_dev(dmadev) \
container_of(dmadev, struct mmp_pdma_device, device)
-static int mmp_pdma_config_write(struct dma_chan *dchan,
- struct dma_slave_config *cfg,
- enum dma_transfer_direction direction);
+/* For 32-bit PDMA */
+static void write_next_addr_32(struct mmp_pdma_phy *phy, dma_addr_t addr)
+{
+ writel(addr, phy->base + DDADR(phy->idx));
+}
+
+static u64 read_src_addr_32(struct mmp_pdma_phy *phy)
+{
+ return readl(phy->base + DSADR(phy->idx));
+}
+
+static u64 read_dst_addr_32(struct mmp_pdma_phy *phy)
+{
+ return readl(phy->base + DTADR(phy->idx));
+}
+
+static void set_desc_next_addr_32(struct mmp_pdma_desc_hw *desc, dma_addr_t addr)
+{
+ desc->ddadr = addr;
+}
+
+static void set_desc_src_addr_32(struct mmp_pdma_desc_hw *desc, dma_addr_t addr)
+{
+ desc->dsadr = addr;
+}
+
+static void set_desc_dst_addr_32(struct mmp_pdma_desc_hw *desc, dma_addr_t addr)
+{
+ desc->dtadr = addr;
+}
+
+static u64 get_desc_src_addr_32(const struct mmp_pdma_desc_hw *desc)
+{
+ return desc->dsadr;
+}
+
+static u64 get_desc_dst_addr_32(const struct mmp_pdma_desc_hw *desc)
+{
+ return desc->dtadr;
+}
+
+/* For 64-bit PDMA */
+static void write_next_addr_64(struct mmp_pdma_phy *phy, dma_addr_t addr)
+{
+ writel(lower_32_bits(addr), phy->base + DDADR(phy->idx));
+ writel(upper_32_bits(addr), phy->base + DDADRH(phy->idx));
+}
+
+static u64 read_src_addr_64(struct mmp_pdma_phy *phy)
+{
+ u32 low = readl(phy->base + DSADR(phy->idx));
+ u32 high = readl(phy->base + DSADRH(phy->idx));
+
+ return ((u64)high << 32) | low;
+}
-static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr)
+static u64 read_dst_addr_64(struct mmp_pdma_phy *phy)
{
- u32 reg = (phy->idx << 4) + DDADR;
+ u32 low = readl(phy->base + DTADR(phy->idx));
+ u32 high = readl(phy->base + DTADRH(phy->idx));
- writel(addr, phy->base + reg);
+ return ((u64)high << 32) | low;
}
+static void set_desc_next_addr_64(struct mmp_pdma_desc_hw *desc, dma_addr_t addr)
+{
+ desc->ddadr = lower_32_bits(addr);
+ desc->ddadrh = upper_32_bits(addr);
+}
+
+static void set_desc_src_addr_64(struct mmp_pdma_desc_hw *desc, dma_addr_t addr)
+{
+ desc->dsadr = lower_32_bits(addr);
+ desc->dsadrh = upper_32_bits(addr);
+}
+
+static void set_desc_dst_addr_64(struct mmp_pdma_desc_hw *desc, dma_addr_t addr)
+{
+ desc->dtadr = lower_32_bits(addr);
+ desc->dtadrh = upper_32_bits(addr);
+}
+
+static u64 get_desc_src_addr_64(const struct mmp_pdma_desc_hw *desc)
+{
+ return ((u64)desc->dsadrh << 32) | desc->dsadr;
+}
+
+static u64 get_desc_dst_addr_64(const struct mmp_pdma_desc_hw *desc)
+{
+ return ((u64)desc->dtadrh << 32) | desc->dtadr;
+}
+
+static int mmp_pdma_config_write(struct dma_chan *dchan,
+ struct dma_slave_config *cfg,
+ enum dma_transfer_direction direction);
+
static void enable_chan(struct mmp_pdma_phy *phy)
{
u32 reg, dalgn;
+ struct mmp_pdma_device *pdev;
if (!phy->vchan)
return;
+ pdev = to_mmp_pdma_dev(phy->vchan->chan.device);
+
reg = DRCMR(phy->vchan->drcmr);
writel(DRCMR_MAPVLD | phy->idx, phy->base + reg);
@@ -165,18 +312,29 @@ static void enable_chan(struct mmp_pdma_phy *phy)
writel(dalgn, phy->base + DALGN);
reg = (phy->idx << 2) + DCSR;
- writel(readl(phy->base + reg) | DCSR_RUN, phy->base + reg);
+ writel(readl(phy->base + reg) | pdev->ops->run_bits,
+ phy->base + reg);
}
static void disable_chan(struct mmp_pdma_phy *phy)
{
- u32 reg;
+ u32 reg, dcsr;
if (!phy)
return;
reg = (phy->idx << 2) + DCSR;
- writel(readl(phy->base + reg) & ~DCSR_RUN, phy->base + reg);
+ dcsr = readl(phy->base + reg);
+
+ if (phy->vchan) {
+ struct mmp_pdma_device *pdev;
+
+ pdev = to_mmp_pdma_dev(phy->vchan->chan.device);
+ writel(dcsr & ~pdev->ops->run_bits, phy->base + reg);
+ } else {
+ /* If no vchan, just clear the RUN bit */
+ writel(dcsr & ~DCSR_RUN, phy->base + reg);
+ }
}
static int clear_chan_irq(struct mmp_pdma_phy *phy)
@@ -295,6 +453,7 @@ static void mmp_pdma_free_phy(struct mmp_pdma_chan *pchan)
static void start_pending_queue(struct mmp_pdma_chan *chan)
{
struct mmp_pdma_desc_sw *desc;
+ struct mmp_pdma_device *pdev = to_mmp_pdma_dev(chan->chan.device);
/* still in running, irq will start the pending list */
if (!chan->idle) {
@@ -329,7 +488,7 @@ static void start_pending_queue(struct mmp_pdma_chan *chan)
* Program the descriptor's address into the DMA controller,
* then start the DMA transaction
*/
- set_desc(chan->phy, desc->async_tx.phys);
+ pdev->ops->write_next_addr(chan->phy, desc->async_tx.phys);
enable_chan(chan->phy);
chan->idle = false;
}
@@ -445,15 +604,14 @@ mmp_pdma_prep_memcpy(struct dma_chan *dchan,
size_t len, unsigned long flags)
{
struct mmp_pdma_chan *chan;
+ struct mmp_pdma_device *pdev;
struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
size_t copy = 0;
- if (!dchan)
- return NULL;
-
- if (!len)
+ if (!dchan || !len)
return NULL;
+ pdev = to_mmp_pdma_dev(dchan->device);
chan = to_mmp_pdma_chan(dchan);
chan->byte_align = false;
@@ -476,13 +634,14 @@ mmp_pdma_prep_memcpy(struct dma_chan *dchan,
chan->byte_align = true;
new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & copy);
- new->desc.dsadr = dma_src;
- new->desc.dtadr = dma_dst;
+ pdev->ops->set_desc_src_addr(&new->desc, dma_src);
+ pdev->ops->set_desc_dst_addr(&new->desc, dma_dst);
if (!first)
first = new;
else
- prev->desc.ddadr = new->async_tx.phys;
+ pdev->ops->set_desc_next_addr(&prev->desc,
+ new->async_tx.phys);
new->async_tx.cookie = 0;
async_tx_ack(&new->async_tx);
@@ -526,6 +685,7 @@ mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
unsigned long flags, void *context)
{
struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
+ struct mmp_pdma_device *pdev = to_mmp_pdma_dev(dchan->device);
struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new = NULL;
size_t len, avail;
struct scatterlist *sg;
@@ -557,17 +717,18 @@ mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & len);
if (dir == DMA_MEM_TO_DEV) {
- new->desc.dsadr = addr;
+ pdev->ops->set_desc_src_addr(&new->desc, addr);
new->desc.dtadr = chan->dev_addr;
} else {
new->desc.dsadr = chan->dev_addr;
- new->desc.dtadr = addr;
+ pdev->ops->set_desc_dst_addr(&new->desc, addr);
}
if (!first)
first = new;
else
- prev->desc.ddadr = new->async_tx.phys;
+ pdev->ops->set_desc_next_addr(&prev->desc,
+ new->async_tx.phys);
new->async_tx.cookie = 0;
async_tx_ack(&new->async_tx);
@@ -607,12 +768,15 @@ mmp_pdma_prep_dma_cyclic(struct dma_chan *dchan,
unsigned long flags)
{
struct mmp_pdma_chan *chan;
+ struct mmp_pdma_device *pdev;
struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
dma_addr_t dma_src, dma_dst;
if (!dchan || !len || !period_len)
return NULL;
+ pdev = to_mmp_pdma_dev(dchan->device);
+
/* the buffer length must be a multiple of period_len */
if (len % period_len != 0)
return NULL;
@@ -649,13 +813,14 @@ mmp_pdma_prep_dma_cyclic(struct dma_chan *dchan,
new->desc.dcmd = (chan->dcmd | DCMD_ENDIRQEN |
(DCMD_LENGTH & period_len));
- new->desc.dsadr = dma_src;
- new->desc.dtadr = dma_dst;
+ pdev->ops->set_desc_src_addr(&new->desc, dma_src);
+ pdev->ops->set_desc_dst_addr(&new->desc, dma_dst);
if (!first)
first = new;
else
- prev->desc.ddadr = new->async_tx.phys;
+ pdev->ops->set_desc_next_addr(&prev->desc,
+ new->async_tx.phys);
new->async_tx.cookie = 0;
async_tx_ack(&new->async_tx);
@@ -676,7 +841,7 @@ mmp_pdma_prep_dma_cyclic(struct dma_chan *dchan,
first->async_tx.cookie = -EBUSY;
/* make the cyclic link */
- new->desc.ddadr = first->async_tx.phys;
+ pdev->ops->set_desc_next_addr(&new->desc, first->async_tx.phys);
chan->cyclic_first = first;
return &first->async_tx;
@@ -762,7 +927,9 @@ static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan,
dma_cookie_t cookie)
{
struct mmp_pdma_desc_sw *sw;
- u32 curr, residue = 0;
+ struct mmp_pdma_device *pdev = to_mmp_pdma_dev(chan->chan.device);
+ u64 curr;
+ u32 residue = 0;
bool passed = false;
bool cyclic = chan->cyclic_first != NULL;
@@ -774,17 +941,18 @@ static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan,
return 0;
if (chan->dir == DMA_DEV_TO_MEM)
- curr = readl(chan->phy->base + DTADR(chan->phy->idx));
+ curr = pdev->ops->read_dst_addr(chan->phy);
else
- curr = readl(chan->phy->base + DSADR(chan->phy->idx));
+ curr = pdev->ops->read_src_addr(chan->phy);
list_for_each_entry(sw, &chan->chain_running, node) {
- u32 start, end, len;
+ u64 start, end;
+ u32 len;
if (chan->dir == DMA_DEV_TO_MEM)
- start = sw->desc.dtadr;
+ start = pdev->ops->get_desc_dst_addr(&sw->desc);
else
- start = sw->desc.dsadr;
+ start = pdev->ops->get_desc_src_addr(&sw->desc);
len = sw->desc.dcmd & DCMD_LENGTH;
end = start + len;
@@ -800,7 +968,7 @@ static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan,
if (passed) {
residue += len;
} else if (curr >= start && curr <= end) {
- residue += end - curr;
+ residue += (u32)(end - curr);
passed = true;
}
@@ -994,9 +1162,42 @@ static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, int idx, int irq)
return 0;
}
+static const struct mmp_pdma_ops marvell_pdma_v1_ops = {
+ .write_next_addr = write_next_addr_32,
+ .read_src_addr = read_src_addr_32,
+ .read_dst_addr = read_dst_addr_32,
+ .set_desc_next_addr = set_desc_next_addr_32,
+ .set_desc_src_addr = set_desc_src_addr_32,
+ .set_desc_dst_addr = set_desc_dst_addr_32,
+ .get_desc_src_addr = get_desc_src_addr_32,
+ .get_desc_dst_addr = get_desc_dst_addr_32,
+ .run_bits = (DCSR_RUN),
+ .dma_mask = 0, /* let OF/platform set DMA mask */
+};
+
+static const struct mmp_pdma_ops spacemit_k1_pdma_ops = {
+ .write_next_addr = write_next_addr_64,
+ .read_src_addr = read_src_addr_64,
+ .read_dst_addr = read_dst_addr_64,
+ .set_desc_next_addr = set_desc_next_addr_64,
+ .set_desc_src_addr = set_desc_src_addr_64,
+ .set_desc_dst_addr = set_desc_dst_addr_64,
+ .get_desc_src_addr = get_desc_src_addr_64,
+ .get_desc_dst_addr = get_desc_dst_addr_64,
+ .run_bits = (DCSR_RUN | DCSR_LPAEEN),
+ .dma_mask = DMA_BIT_MASK(64), /* force 64-bit DMA addr capability */
+};
+
static const struct of_device_id mmp_pdma_dt_ids[] = {
- { .compatible = "marvell,pdma-1.0", },
- {}
+ {
+ .compatible = "marvell,pdma-1.0",
+ .data = &marvell_pdma_v1_ops
+ }, {
+ .compatible = "spacemit,k1-pdma",
+ .data = &spacemit_k1_pdma_ops
+ }, {
+ /* sentinel */
+ }
};
MODULE_DEVICE_TABLE(of, mmp_pdma_dt_ids);
@@ -1019,6 +1220,8 @@ static int mmp_pdma_probe(struct platform_device *op)
{
struct mmp_pdma_device *pdev;
struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
+ struct clk *clk;
+ struct reset_control *rst;
int i, ret, irq = 0;
int dma_channels = 0, irq_num = 0;
const enum dma_slave_buswidth widths =
@@ -1037,6 +1240,19 @@ static int mmp_pdma_probe(struct platform_device *op)
if (IS_ERR(pdev->base))
return PTR_ERR(pdev->base);
+ clk = devm_clk_get_optional_enabled(pdev->dev, NULL);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ rst = devm_reset_control_get_optional_exclusive_deasserted(pdev->dev,
+ NULL);
+ if (IS_ERR(rst))
+ return PTR_ERR(rst);
+
+ pdev->ops = of_device_get_match_data(&op->dev);
+ if (!pdev->ops)
+ return -ENODEV;
+
if (pdev->dev->of_node) {
/* Parse new and deprecated dma-channels properties */
if (of_property_read_u32(pdev->dev->of_node, "dma-channels",
@@ -1098,7 +1314,10 @@ static int mmp_pdma_probe(struct platform_device *op)
pdev->device.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
pdev->device.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
- if (pdev->dev->coherent_dma_mask)
+ /* Set DMA mask based on ops->dma_mask, or OF/platform */
+ if (pdev->ops->dma_mask)
+ dma_set_mask(pdev->dev, pdev->ops->dma_mask);
+ else if (pdev->dev->coherent_dma_mask)
dma_set_mask(pdev->dev, pdev->dev->coherent_dma_mask);
else
dma_set_mask(pdev->dev, DMA_BIT_MASK(64));
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 1fdcb0f5c9e7..5e8386296046 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -1013,7 +1013,7 @@ static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
dma_async_device_unregister(&mv_chan->dmadev);
- dma_free_coherent(dev, MV_XOR_POOL_SIZE,
+ dma_free_wc(dev, MV_XOR_POOL_SIZE,
mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
dma_unmap_single(dev, mv_chan->dummy_src_addr,
MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
@@ -1163,7 +1163,7 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
err_free_irq:
free_irq(mv_chan->irq, mv_chan);
err_free_dma:
- dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE,
+ dma_free_wc(&pdev->dev, MV_XOR_POOL_SIZE,
mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
err_unmap_dst:
dma_unmap_single(dma_dev->dev, mv_chan->dummy_dst_addr,
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index 9d2a5a967a99..61500ad7c850 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -874,7 +874,7 @@ static int ppc440spe_dma2_pq_slot_count(dma_addr_t *srcs,
pr_err("%s: src_cnt=%d, state=%d, addr_count=%d, order=%lld\n",
__func__, src_cnt, state, addr_count, order);
for (i = 0; i < src_cnt; i++)
- pr_err("\t[%d] 0x%llx \n", i, srcs[i]);
+ pr_err("\t[%d] 0x%llx\n", i, srcs[i]);
BUG();
}
@@ -3636,7 +3636,7 @@ static void ppc440spe_adma_issue_pending(struct dma_chan *chan)
ppc440spe_chan = to_ppc440spe_adma_chan(chan);
dev_dbg(ppc440spe_chan->device->common.dev,
- "ppc440spe adma%d: %s %d \n", ppc440spe_chan->device->id,
+ "ppc440spe adma%d: %s %d\n", ppc440spe_chan->device->id,
__func__, ppc440spe_chan->pending);
if (ppc440spe_chan->pending) {
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
index bbc3276992bb..2cf060174795 100644
--- a/drivers/dma/qcom/bam_dma.c
+++ b/drivers/dma/qcom/bam_dma.c
@@ -1283,13 +1283,17 @@ static int bam_dma_probe(struct platform_device *pdev)
if (!bdev->bamclk) {
ret = of_property_read_u32(pdev->dev.of_node, "num-channels",
&bdev->num_channels);
- if (ret)
+ if (ret) {
dev_err(bdev->dev, "num-channels unspecified in dt\n");
+ return ret;
+ }
ret = of_property_read_u32(pdev->dev.of_node, "qcom,num-ees",
&bdev->num_ees);
- if (ret)
+ if (ret) {
dev_err(bdev->dev, "num-ees unspecified in dt\n");
+ return ret;
+ }
}
ret = clk_prepare_enable(bdev->bamclk);
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index 6b4fce453c85..834741adadaa 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -129,12 +129,25 @@ static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx)
const struct shdma_ops *ops = sdev->ops;
dev_dbg(schan->dev, "Bring up channel %d\n",
schan->id);
- /*
- * TODO: .xfer_setup() might fail on some platforms.
- * Make it int then, on error remove chunks from the
- * queue again
- */
- ops->setup_xfer(schan, schan->slave_id);
+
+ ret = ops->setup_xfer(schan, schan->slave_id);
+ if (ret < 0) {
+ dev_err(schan->dev, "setup_xfer failed: %d\n", ret);
+
+ /* Remove chunks from the queue and mark them as idle */
+ list_for_each_entry_safe(chunk, c, &schan->ld_queue, node) {
+ if (chunk->cookie == cookie) {
+ chunk->mark = DESC_IDLE;
+ list_move(&chunk->node, &schan->ld_free);
+ }
+ }
+
+ schan->pm_state = SHDMA_PM_ESTABLISHED;
+ ret = pm_runtime_put(schan->dev);
+
+ spin_unlock_irq(&schan->chan_lock);
+ return ret;
+ }
if (schan->pm_state == SHDMA_PM_PENDING)
shdma_chan_xfer_ld_queue(schan);
diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
index 093e449e19ee..603e15102e45 100644
--- a/drivers/dma/sh/shdmac.c
+++ b/drivers/dma/sh/shdmac.c
@@ -300,21 +300,30 @@ static bool sh_dmae_channel_busy(struct shdma_chan *schan)
return dmae_is_busy(sh_chan);
}
-static void sh_dmae_setup_xfer(struct shdma_chan *schan,
- int slave_id)
+static int sh_dmae_setup_xfer(struct shdma_chan *schan, int slave_id)
{
struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
shdma_chan);
+ int ret = 0;
if (slave_id >= 0) {
const struct sh_dmae_slave_config *cfg =
sh_chan->config;
- dmae_set_dmars(sh_chan, cfg->mid_rid);
- dmae_set_chcr(sh_chan, cfg->chcr);
+ ret = dmae_set_dmars(sh_chan, cfg->mid_rid);
+ if (ret < 0)
+ goto END;
+
+ ret = dmae_set_chcr(sh_chan, cfg->chcr);
+ if (ret < 0)
+ goto END;
+
} else {
dmae_init(sh_chan);
}
+
+END:
+ return ret;
}
/*
diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
index 3ed406f08c44..552be71db6c4 100644
--- a/drivers/dma/ti/edma.c
+++ b/drivers/dma/ti/edma.c
@@ -2064,8 +2064,8 @@ static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata,
* priority. So Q0 is the highest priority queue and the last queue has
* the lowest priority.
*/
- queue_priority_map = devm_kcalloc(dev, ecc->num_tc + 1, sizeof(s8),
- GFP_KERNEL);
+ queue_priority_map = devm_kcalloc(dev, ecc->num_tc + 1,
+ sizeof(*queue_priority_map), GFP_KERNEL);
if (!queue_priority_map)
return -ENOMEM;
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index a34d8f0ceed8..fabff602065f 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -2173,6 +2173,99 @@ error:
}
/**
+ * xilinx_dma_prep_peripheral_dma_vec - prepare descriptors for a DMA_SLAVE
+ * transaction from DMA vectors
+ * @dchan: DMA channel
+ * @vecs: Array of DMA vectors that should be transferred
+ * @nb: number of entries in @vecs
+ * @direction: DMA direction
+ * @flags: transfer ack flags
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *xilinx_dma_prep_peripheral_dma_vec(
+ struct dma_chan *dchan, const struct dma_vec *vecs, size_t nb,
+ enum dma_transfer_direction direction, unsigned long flags)
+{
+ struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+ struct xilinx_dma_tx_descriptor *desc;
+ struct xilinx_axidma_tx_segment *segment, *head, *prev = NULL;
+ size_t copy;
+ size_t sg_used;
+ unsigned int i;
+
+ if (!is_slave_direction(direction) || direction != chan->direction)
+ return NULL;
+
+ desc = xilinx_dma_alloc_tx_descriptor(chan);
+ if (!desc)
+ return NULL;
+
+ dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
+ desc->async_tx.tx_submit = xilinx_dma_tx_submit;
+
+ /* Build transactions using information from DMA vectors */
+ for (i = 0; i < nb; i++) {
+ sg_used = 0;
+
+ /* Loop until the entire dma_vec entry is used */
+ while (sg_used < vecs[i].len) {
+ struct xilinx_axidma_desc_hw *hw;
+
+ /* Get a free segment */
+ segment = xilinx_axidma_alloc_tx_segment(chan);
+ if (!segment)
+ goto error;
+
+ /*
+ * Calculate the maximum number of bytes to transfer,
+ * making sure it is less than the hw limit
+ */
+ copy = xilinx_dma_calc_copysize(chan, vecs[i].len,
+ sg_used);
+ hw = &segment->hw;
+
+ /* Fill in the descriptor */
+ xilinx_axidma_buf(chan, hw, vecs[i].addr, sg_used, 0);
+ hw->control = copy;
+
+ if (prev)
+ prev->hw.next_desc = segment->phys;
+
+ prev = segment;
+ sg_used += copy;
+
+ /*
+ * Insert the segment into the descriptor segments
+ * list.
+ */
+ list_add_tail(&segment->node, &desc->segments);
+ }
+ }
+
+ head = list_first_entry(&desc->segments, struct xilinx_axidma_tx_segment, node);
+ desc->async_tx.phys = head->phys;
+
+ /* For the last DMA_MEM_TO_DEV transfer, set EOP */
+ if (chan->direction == DMA_MEM_TO_DEV) {
+ segment->hw.control |= XILINX_DMA_BD_SOP;
+ segment = list_last_entry(&desc->segments,
+ struct xilinx_axidma_tx_segment,
+ node);
+ segment->hw.control |= XILINX_DMA_BD_EOP;
+ }
+
+ if (chan->xdev->has_axistream_connected)
+ desc->async_tx.metadata_ops = &xilinx_dma_metadata_ops;
+
+ return &desc->async_tx;
+
+error:
+ xilinx_dma_free_tx_descriptor(chan, desc);
+ return NULL;
+}
+
+/**
* xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
* @dchan: DMA channel
* @sgl: scatterlist to transfer to/from
@@ -3180,6 +3273,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
xdev->common.device_config = xilinx_dma_device_config;
if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask);
+ xdev->common.device_prep_peripheral_dma_vec = xilinx_dma_prep_peripheral_dma_vec;
xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
xdev->common.device_prep_dma_cyclic =
xilinx_dma_prep_dma_cyclic;
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
index d05fc5fcc77d..f7e584de4335 100644
--- a/drivers/dma/xilinx/zynqmp_dma.c
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -1173,9 +1173,9 @@ static void zynqmp_dma_remove(struct platform_device *pdev)
dma_async_device_unregister(&zdev->common);
zynqmp_dma_chan_remove(zdev->chan);
- pm_runtime_disable(zdev->dev);
- if (!pm_runtime_enabled(zdev->dev))
+ if (pm_runtime_active(zdev->dev))
zynqmp_dma_runtime_suspend(zdev->dev);
+ pm_runtime_disable(zdev->dev);
}
static const struct of_device_id zynqmp_dma_of_match[] = {
@@ -1193,6 +1193,7 @@ static struct platform_driver zynqmp_dma_driver = {
},
.probe = zynqmp_dma_probe,
.remove = zynqmp_dma_remove,
+ .shutdown = zynqmp_dma_remove,
};
module_platform_driver(zynqmp_dma_driver);
diff --git a/drivers/dpll/dpll_netlink.c b/drivers/dpll/dpll_netlink.c
index 036f21cac0a9..74c1f0ca95f2 100644
--- a/drivers/dpll/dpll_netlink.c
+++ b/drivers/dpll/dpll_netlink.c
@@ -165,6 +165,27 @@ dpll_msg_add_phase_offset_monitor(struct sk_buff *msg, struct dpll_device *dpll,
}
static int
+dpll_msg_add_phase_offset_avg_factor(struct sk_buff *msg,
+ struct dpll_device *dpll,
+ struct netlink_ext_ack *extack)
+{
+ const struct dpll_device_ops *ops = dpll_device_ops(dpll);
+ u32 factor;
+ int ret;
+
+ if (ops->phase_offset_avg_factor_get) {
+ ret = ops->phase_offset_avg_factor_get(dpll, dpll_priv(dpll),
+ &factor, extack);
+ if (ret)
+ return ret;
+ if (nla_put_u32(msg, DPLL_A_PHASE_OFFSET_AVG_FACTOR, factor))
+ return -EMSGSIZE;
+ }
+
+ return 0;
+}
+
+static int
dpll_msg_add_lock_status(struct sk_buff *msg, struct dpll_device *dpll,
struct netlink_ext_ack *extack)
{
@@ -211,8 +232,8 @@ static int
dpll_msg_add_clock_quality_level(struct sk_buff *msg, struct dpll_device *dpll,
struct netlink_ext_ack *extack)
{
+ DECLARE_BITMAP(qls, DPLL_CLOCK_QUALITY_LEVEL_MAX + 1) = { 0 };
const struct dpll_device_ops *ops = dpll_device_ops(dpll);
- DECLARE_BITMAP(qls, DPLL_CLOCK_QUALITY_LEVEL_MAX) = { 0 };
enum dpll_clock_quality_level ql;
int ret;
@@ -221,7 +242,7 @@ dpll_msg_add_clock_quality_level(struct sk_buff *msg, struct dpll_device *dpll,
ret = ops->clock_quality_level_get(dpll, dpll_priv(dpll), qls, extack);
if (ret)
return ret;
- for_each_set_bit(ql, qls, DPLL_CLOCK_QUALITY_LEVEL_MAX)
+ for_each_set_bit(ql, qls, DPLL_CLOCK_QUALITY_LEVEL_MAX + 1)
if (nla_put_u32(msg, DPLL_A_CLOCK_QUALITY_LEVEL, ql))
return -EMSGSIZE;
@@ -677,6 +698,9 @@ dpll_device_get_one(struct dpll_device *dpll, struct sk_buff *msg,
ret = dpll_msg_add_phase_offset_monitor(msg, dpll, extack);
if (ret)
return ret;
+ ret = dpll_msg_add_phase_offset_avg_factor(msg, dpll, extack);
+ if (ret)
+ return ret;
return 0;
}
@@ -840,6 +864,23 @@ dpll_phase_offset_monitor_set(struct dpll_device *dpll, struct nlattr *a,
}
static int
+dpll_phase_offset_avg_factor_set(struct dpll_device *dpll, struct nlattr *a,
+ struct netlink_ext_ack *extack)
+{
+ const struct dpll_device_ops *ops = dpll_device_ops(dpll);
+ u32 factor = nla_get_u32(a);
+
+ if (!ops->phase_offset_avg_factor_set) {
+ NL_SET_ERR_MSG_ATTR(extack, a,
+ "device not capable of changing phase offset average factor");
+ return -EOPNOTSUPP;
+ }
+
+ return ops->phase_offset_avg_factor_set(dpll, dpll_priv(dpll), factor,
+ extack);
+}
+
+static int
dpll_pin_freq_set(struct dpll_pin *pin, struct nlattr *a,
struct netlink_ext_ack *extack)
{
@@ -1736,14 +1777,25 @@ int dpll_nl_device_get_doit(struct sk_buff *skb, struct genl_info *info)
static int
dpll_set_from_nlattr(struct dpll_device *dpll, struct genl_info *info)
{
- int ret;
-
- if (info->attrs[DPLL_A_PHASE_OFFSET_MONITOR]) {
- struct nlattr *a = info->attrs[DPLL_A_PHASE_OFFSET_MONITOR];
+ struct nlattr *a;
+ int rem, ret;
- ret = dpll_phase_offset_monitor_set(dpll, a, info->extack);
- if (ret)
- return ret;
+ nla_for_each_attr(a, genlmsg_data(info->genlhdr),
+ genlmsg_len(info->genlhdr), rem) {
+ switch (nla_type(a)) {
+ case DPLL_A_PHASE_OFFSET_MONITOR:
+ ret = dpll_phase_offset_monitor_set(dpll, a,
+ info->extack);
+ if (ret)
+ return ret;
+ break;
+ case DPLL_A_PHASE_OFFSET_AVG_FACTOR:
+ ret = dpll_phase_offset_avg_factor_set(dpll, a,
+ info->extack);
+ if (ret)
+ return ret;
+ break;
+ }
}
return 0;
diff --git a/drivers/dpll/dpll_nl.c b/drivers/dpll/dpll_nl.c
index 9f2efaf25268..3c6d570babf8 100644
--- a/drivers/dpll/dpll_nl.c
+++ b/drivers/dpll/dpll_nl.c
@@ -42,9 +42,10 @@ static const struct nla_policy dpll_device_get_nl_policy[DPLL_A_ID + 1] = {
};
/* DPLL_CMD_DEVICE_SET - do */
-static const struct nla_policy dpll_device_set_nl_policy[DPLL_A_PHASE_OFFSET_MONITOR + 1] = {
+static const struct nla_policy dpll_device_set_nl_policy[DPLL_A_PHASE_OFFSET_AVG_FACTOR + 1] = {
[DPLL_A_ID] = { .type = NLA_U32, },
[DPLL_A_PHASE_OFFSET_MONITOR] = NLA_POLICY_MAX(NLA_U32, 1),
+ [DPLL_A_PHASE_OFFSET_AVG_FACTOR] = { .type = NLA_U32, },
};
/* DPLL_CMD_PIN_ID_GET - do */
@@ -112,7 +113,7 @@ static const struct genl_split_ops dpll_nl_ops[] = {
.doit = dpll_nl_device_set_doit,
.post_doit = dpll_post_doit,
.policy = dpll_device_set_nl_policy,
- .maxattr = DPLL_A_PHASE_OFFSET_MONITOR,
+ .maxattr = DPLL_A_PHASE_OFFSET_AVG_FACTOR,
.flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
},
{
diff --git a/drivers/dpll/zl3073x/Kconfig b/drivers/dpll/zl3073x/Kconfig
index 7db262ab8458..5bbca1400581 100644
--- a/drivers/dpll/zl3073x/Kconfig
+++ b/drivers/dpll/zl3073x/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config ZL3073X
- tristate "Microchip Azurite DPLL/PTP/SyncE devices"
+ tristate "Microchip Azurite DPLL/PTP/SyncE devices" if COMPILE_TEST
depends on NET
select DPLL
select NET_DEVLINK
@@ -16,9 +16,9 @@ config ZL3073X
config ZL3073X_I2C
tristate "I2C bus implementation for Microchip Azurite devices"
- depends on I2C && ZL3073X
+ depends on I2C && NET
select REGMAP_I2C
- default m
+ select ZL3073X
help
This is I2C bus implementation for Microchip Azurite DPLL/PTP/SyncE
devices.
@@ -28,9 +28,9 @@ config ZL3073X_I2C
config ZL3073X_SPI
tristate "SPI bus implementation for Microchip Azurite devices"
- depends on SPI && ZL3073X
+ depends on NET && SPI
select REGMAP_SPI
- default m
+ select ZL3073X
help
This is SPI bus implementation for Microchip Azurite DPLL/PTP/SyncE
devices.
diff --git a/drivers/dpll/zl3073x/Makefile b/drivers/dpll/zl3073x/Makefile
index c3e2f02f319d..84e22aae57e5 100644
--- a/drivers/dpll/zl3073x/Makefile
+++ b/drivers/dpll/zl3073x/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_ZL3073X) += zl3073x.o
-zl3073x-objs := core.o devlink.o dpll.o prop.o
+zl3073x-objs := core.o devlink.o dpll.o flash.o fw.o prop.o
obj-$(CONFIG_ZL3073X_I2C) += zl3073x_i2c.o
zl3073x_i2c-objs := i2c.o
diff --git a/drivers/dpll/zl3073x/core.c b/drivers/dpll/zl3073x/core.c
index 7ebcfc5ec1f0..092e7027948a 100644
--- a/drivers/dpll/zl3073x/core.c
+++ b/drivers/dpll/zl3073x/core.c
@@ -95,9 +95,9 @@ EXPORT_SYMBOL_NS_GPL(zl30735_chip_info, "ZL3073X");
#define ZL_RANGE_OFFSET 0x80
#define ZL_PAGE_SIZE 0x80
-#define ZL_NUM_PAGES 15
+#define ZL_NUM_PAGES 256
#define ZL_PAGE_SEL 0x7F
-#define ZL_PAGE_SEL_MASK GENMASK(3, 0)
+#define ZL_PAGE_SEL_MASK GENMASK(7, 0)
#define ZL_NUM_REGS (ZL_NUM_PAGES * ZL_PAGE_SIZE)
/* Regmap range configuration */
@@ -174,9 +174,10 @@ static bool
zl3073x_check_reg(struct zl3073x_dev *zldev, unsigned int reg, size_t size)
{
/* Check that multiop lock is held when accessing registers
- * from page 10 and above.
+ * from page 10 and above except the page 255 that does not
+ * need this protection.
*/
- if (ZL_REG_PAGE(reg) >= 10)
+ if (ZL_REG_PAGE(reg) >= 10 && ZL_REG_PAGE(reg) < 255)
lockdep_assert_held(&zldev->multiop_lock);
/* Check the index is in valid range for indexed register */
@@ -447,6 +448,152 @@ int zl3073x_mb_op(struct zl3073x_dev *zldev, unsigned int op_reg, u8 op_val,
}
/**
+ * zl3073x_do_hwreg_op - Perform HW register read/write operation
+ * @zldev: zl3073x device pointer
+ * @op: operation to perform
+ *
+ * Performs requested operation and waits for its completion.
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_do_hwreg_op(struct zl3073x_dev *zldev, u8 op)
+{
+ int rc;
+
+ /* Set requested operation and set pending bit */
+ rc = zl3073x_write_u8(zldev, ZL_REG_HWREG_OP, op | ZL_HWREG_OP_PENDING);
+ if (rc)
+ return rc;
+
+ /* Poll for completion - pending bit cleared */
+ return zl3073x_poll_zero_u8(zldev, ZL_REG_HWREG_OP,
+ ZL_HWREG_OP_PENDING);
+}
+
+/**
+ * zl3073x_read_hwreg - Read HW register
+ * @zldev: zl3073x device pointer
+ * @addr: HW register address
+ * @value: Value of the HW register
+ *
+ * Reads HW register value and stores it into @value.
+ *
+ * Return: 0 on success, <0 on error
+ */
+int zl3073x_read_hwreg(struct zl3073x_dev *zldev, u32 addr, u32 *value)
+{
+ int rc;
+
+ /* Set address to read data from */
+ rc = zl3073x_write_u32(zldev, ZL_REG_HWREG_ADDR, addr);
+ if (rc)
+ return rc;
+
+ /* Perform the read operation */
+ rc = zl3073x_do_hwreg_op(zldev, ZL_HWREG_OP_READ);
+ if (rc)
+ return rc;
+
+ /* Read the received data */
+ return zl3073x_read_u32(zldev, ZL_REG_HWREG_READ_DATA, value);
+}
+
+/**
+ * zl3073x_write_hwreg - Write value to HW register
+ * @zldev: zl3073x device pointer
+ * @addr: HW registers address
+ * @value: Value to be written to HW register
+ *
+ * Stores the requested value into HW register.
+ *
+ * Return: 0 on success, <0 on error
+ */
+int zl3073x_write_hwreg(struct zl3073x_dev *zldev, u32 addr, u32 value)
+{
+ int rc;
+
+ /* Set address to write data to */
+ rc = zl3073x_write_u32(zldev, ZL_REG_HWREG_ADDR, addr);
+ if (rc)
+ return rc;
+
+ /* Set data to be written */
+ rc = zl3073x_write_u32(zldev, ZL_REG_HWREG_WRITE_DATA, value);
+ if (rc)
+ return rc;
+
+ /* Perform the write operation */
+ return zl3073x_do_hwreg_op(zldev, ZL_HWREG_OP_WRITE);
+}
+
+/**
+ * zl3073x_update_hwreg - Update certain bits in HW register
+ * @zldev: zl3073x device pointer
+ * @addr: HW register address
+ * @value: Value to be written into HW register
+ * @mask: Bitmask indicating bits to be updated
+ *
+ * Reads given HW register, updates requested bits specified by value and
+ * mask and writes result back to HW register.
+ *
+ * Return: 0 on success, <0 on error
+ */
+int zl3073x_update_hwreg(struct zl3073x_dev *zldev, u32 addr, u32 value,
+ u32 mask)
+{
+ u32 tmp;
+ int rc;
+
+ rc = zl3073x_read_hwreg(zldev, addr, &tmp);
+ if (rc)
+ return rc;
+
+ tmp &= ~mask;
+ tmp |= value & mask;
+
+ return zl3073x_write_hwreg(zldev, addr, tmp);
+}
+
+/**
+ * zl3073x_write_hwreg_seq - Write HW registers sequence
+ * @zldev: pointer to device structure
+ * @seq: pointer to first sequence item
+ * @num_items: number of items in sequence
+ *
+ * Writes given HW registers sequence.
+ *
+ * Return: 0 on success, <0 on error
+ */
+int zl3073x_write_hwreg_seq(struct zl3073x_dev *zldev,
+ const struct zl3073x_hwreg_seq_item *seq,
+ size_t num_items)
+{
+ int i, rc = 0;
+
+ for (i = 0; i < num_items; i++) {
+ dev_dbg(zldev->dev, "Write 0x%0x [0x%0x] to 0x%0x",
+ seq[i].value, seq[i].mask, seq[i].addr);
+
+ if (seq[i].mask == U32_MAX)
+ /* Write value directly */
+ rc = zl3073x_write_hwreg(zldev, seq[i].addr,
+ seq[i].value);
+ else
+ /* Update only bits specified by the mask */
+ rc = zl3073x_update_hwreg(zldev, seq[i].addr,
+ seq[i].value, seq[i].mask);
+ if (rc)
+ return rc;
+
+ if (seq->wait)
+ msleep(seq->wait);
+ }
+
+ return rc;
+}
+
+/**
* zl3073x_ref_state_fetch - get input reference state
* @zldev: pointer to zl3073x_dev structure
* @index: input reference index to fetch state for
@@ -809,21 +956,169 @@ zl3073x_dev_periodic_work(struct kthread_work *work)
msecs_to_jiffies(500));
}
+int zl3073x_dev_phase_avg_factor_set(struct zl3073x_dev *zldev, u8 factor)
+{
+ u8 dpll_meas_ctrl, value;
+ int rc;
+
+ /* Read DPLL phase measurement control register */
+ rc = zl3073x_read_u8(zldev, ZL_REG_DPLL_MEAS_CTRL, &dpll_meas_ctrl);
+ if (rc)
+ return rc;
+
+ /* Convert requested factor to register value */
+ value = (factor + 1) & 0x0f;
+
+ /* Update phase measurement control register */
+ dpll_meas_ctrl &= ~ZL_DPLL_MEAS_CTRL_AVG_FACTOR;
+ dpll_meas_ctrl |= FIELD_PREP(ZL_DPLL_MEAS_CTRL_AVG_FACTOR, value);
+ rc = zl3073x_write_u8(zldev, ZL_REG_DPLL_MEAS_CTRL, dpll_meas_ctrl);
+ if (rc)
+ return rc;
+
+ /* Save the new factor */
+ zldev->phase_avg_factor = factor;
+
+ return 0;
+}
+
+/**
+ * zl3073x_dev_phase_meas_setup - setup phase offset measurement
+ * @zldev: pointer to zl3073x_dev structure
+ *
+ * Enable phase offset measurement block, set measurement averaging factor
+ * and enable DPLL-to-its-ref phase measurement for all DPLLs.
+ *
+ * Returns: 0 on success, <0 on error
+ */
+static int
+zl3073x_dev_phase_meas_setup(struct zl3073x_dev *zldev)
+{
+ struct zl3073x_dpll *zldpll;
+ u8 dpll_meas_ctrl, mask = 0;
+ int rc;
+
+ /* Setup phase measurement averaging factor */
+ rc = zl3073x_dev_phase_avg_factor_set(zldev, zldev->phase_avg_factor);
+ if (rc)
+ return rc;
+
+ /* Read DPLL phase measurement control register */
+ rc = zl3073x_read_u8(zldev, ZL_REG_DPLL_MEAS_CTRL, &dpll_meas_ctrl);
+ if (rc)
+ return rc;
+
+ /* Enable DPLL measurement block */
+ dpll_meas_ctrl |= ZL_DPLL_MEAS_CTRL_EN;
+
+ /* Update phase measurement control register */
+ rc = zl3073x_write_u8(zldev, ZL_REG_DPLL_MEAS_CTRL, dpll_meas_ctrl);
+ if (rc)
+ return rc;
+
+ /* Enable DPLL-to-connected-ref measurement for each channel */
+ list_for_each_entry(zldpll, &zldev->dplls, list)
+ mask |= BIT(zldpll->id);
+
+ return zl3073x_write_u8(zldev, ZL_REG_DPLL_PHASE_ERR_READ_MASK, mask);
+}
+
+/**
+ * zl3073x_dev_start - Start normal operation
+ * @zldev: zl3073x device pointer
+ * @full: perform full initialization
+ *
+ * The function starts normal operation, which means registering all DPLLs and
+ * their pins, and starting monitoring. If full initialization is requested,
+ * the function additionally initializes the phase offset measurement block and
+ * fetches hardware-invariant parameters.
+ *
+ * Return: 0 on success, <0 on error
+ */
+int zl3073x_dev_start(struct zl3073x_dev *zldev, bool full)
+{
+ struct zl3073x_dpll *zldpll;
+ int rc;
+
+ if (full) {
+ /* Fetch device state */
+ rc = zl3073x_dev_state_fetch(zldev);
+ if (rc)
+ return rc;
+
+ /* Setup phase offset measurement block */
+ rc = zl3073x_dev_phase_meas_setup(zldev);
+ if (rc) {
+ dev_err(zldev->dev,
+ "Failed to setup phase measurement\n");
+ return rc;
+ }
+ }
+
+ /* Register all DPLLs */
+ list_for_each_entry(zldpll, &zldev->dplls, list) {
+ rc = zl3073x_dpll_register(zldpll);
+ if (rc) {
+ dev_err_probe(zldev->dev, rc,
+ "Failed to register DPLL%u\n",
+ zldpll->id);
+ return rc;
+ }
+ }
+
+ /* Perform initial firmware fine phase correction */
+ rc = zl3073x_dpll_init_fine_phase_adjust(zldev);
+ if (rc) {
+ dev_err_probe(zldev->dev, rc,
+ "Failed to init fine phase correction\n");
+ return rc;
+ }
+
+ /* Start monitoring */
+ kthread_queue_delayed_work(zldev->kworker, &zldev->work, 0);
+
+ return 0;
+}
+
+/**
+ * zl3073x_dev_stop - Stop normal operation
+ * @zldev: zl3073x device pointer
+ *
+ * The function stops the normal operation that mean deregistration of all
+ * DPLLs and their pins and stop monitoring.
+ *
+ * Return: 0 on success, <0 on error
+ */
+void zl3073x_dev_stop(struct zl3073x_dev *zldev)
+{
+ struct zl3073x_dpll *zldpll;
+
+ /* Stop monitoring */
+ kthread_cancel_delayed_work_sync(&zldev->work);
+
+ /* Unregister all DPLLs */
+ list_for_each_entry(zldpll, &zldev->dplls, list) {
+ if (zldpll->dpll_dev)
+ zl3073x_dpll_unregister(zldpll);
+ }
+}
+
static void zl3073x_dev_dpll_fini(void *ptr)
{
struct zl3073x_dpll *zldpll, *next;
struct zl3073x_dev *zldev = ptr;
- /* Stop monitoring thread */
+ /* Stop monitoring and unregister DPLLs */
+ zl3073x_dev_stop(zldev);
+
+ /* Destroy monitoring thread */
if (zldev->kworker) {
- kthread_cancel_delayed_work_sync(&zldev->work);
kthread_destroy_worker(zldev->kworker);
zldev->kworker = NULL;
}
- /* Release DPLLs */
+ /* Free all DPLLs */
list_for_each_entry_safe(zldpll, next, &zldev->dplls, list) {
- zl3073x_dpll_unregister(zldpll);
list_del(&zldpll->list);
zl3073x_dpll_free(zldpll);
}
@@ -839,7 +1134,7 @@ zl3073x_devm_dpll_init(struct zl3073x_dev *zldev, u8 num_dplls)
INIT_LIST_HEAD(&zldev->dplls);
- /* Initialize all DPLLs */
+ /* Allocate all DPLLs */
for (i = 0; i < num_dplls; i++) {
zldpll = zl3073x_dpll_alloc(zldev, i);
if (IS_ERR(zldpll)) {
@@ -849,25 +1144,9 @@ zl3073x_devm_dpll_init(struct zl3073x_dev *zldev, u8 num_dplls)
goto error;
}
- rc = zl3073x_dpll_register(zldpll);
- if (rc) {
- dev_err_probe(zldev->dev, rc,
- "Failed to register DPLL%u\n", i);
- zl3073x_dpll_free(zldpll);
- goto error;
- }
-
list_add_tail(&zldpll->list, &zldev->dplls);
}
- /* Perform initial firmware fine phase correction */
- rc = zl3073x_dpll_init_fine_phase_adjust(zldev);
- if (rc) {
- dev_err_probe(zldev->dev, rc,
- "Failed to init fine phase correction\n");
- goto error;
- }
-
/* Initialize monitoring thread */
kthread_init_delayed_work(&zldev->work, zl3073x_dev_periodic_work);
kworker = kthread_run_worker(0, "zl3073x-%s", dev_name(zldev->dev));
@@ -875,9 +1154,14 @@ zl3073x_devm_dpll_init(struct zl3073x_dev *zldev, u8 num_dplls)
rc = PTR_ERR(kworker);
goto error;
}
-
zldev->kworker = kworker;
- kthread_queue_delayed_work(zldev->kworker, &zldev->work, 0);
+
+ /* Start normal operation */
+ rc = zl3073x_dev_start(zldev, true);
+ if (rc) {
+ dev_err_probe(zldev->dev, rc, "Failed to start device\n");
+ goto error;
+ }
/* Add devres action to release DPLL related resources */
rc = devm_add_action_or_reset(zldev->dev, zl3073x_dev_dpll_fini, zldev);
@@ -893,46 +1177,6 @@ error:
}
/**
- * zl3073x_dev_phase_meas_setup - setup phase offset measurement
- * @zldev: pointer to zl3073x_dev structure
- * @num_channels: number of DPLL channels
- *
- * Enable phase offset measurement block, set measurement averaging factor
- * and enable DPLL-to-its-ref phase measurement for all DPLLs.
- *
- * Returns: 0 on success, <0 on error
- */
-static int
-zl3073x_dev_phase_meas_setup(struct zl3073x_dev *zldev, int num_channels)
-{
- u8 dpll_meas_ctrl, mask;
- int i, rc;
-
- /* Read DPLL phase measurement control register */
- rc = zl3073x_read_u8(zldev, ZL_REG_DPLL_MEAS_CTRL, &dpll_meas_ctrl);
- if (rc)
- return rc;
-
- /* Setup phase measurement averaging factor */
- dpll_meas_ctrl &= ~ZL_DPLL_MEAS_CTRL_AVG_FACTOR;
- dpll_meas_ctrl |= FIELD_PREP(ZL_DPLL_MEAS_CTRL_AVG_FACTOR, 3);
-
- /* Enable DPLL measurement block */
- dpll_meas_ctrl |= ZL_DPLL_MEAS_CTRL_EN;
-
- /* Update phase measurement control register */
- rc = zl3073x_write_u8(zldev, ZL_REG_DPLL_MEAS_CTRL, dpll_meas_ctrl);
- if (rc)
- return rc;
-
- /* Enable DPLL-to-connected-ref measurement for each channel */
- for (i = 0, mask = 0; i < num_channels; i++)
- mask |= BIT(i);
-
- return zl3073x_write_u8(zldev, ZL_REG_DPLL_PHASE_ERR_READ_MASK, mask);
-}
-
-/**
* zl3073x_dev_probe - initialize zl3073x device
* @zldev: pointer to zl3073x device
* @chip_info: chip info based on compatible
@@ -991,6 +1235,9 @@ int zl3073x_dev_probe(struct zl3073x_dev *zldev,
*/
zldev->clock_id = get_random_u64();
+ /* Default phase offset averaging factor */
+ zldev->phase_avg_factor = 2;
+
/* Initialize mutex for operations where multiple reads, writes
* and/or polls are required to be done atomically.
*/
@@ -999,17 +1246,6 @@ int zl3073x_dev_probe(struct zl3073x_dev *zldev,
return dev_err_probe(zldev->dev, rc,
"Failed to initialize mutex\n");
- /* Fetch device state */
- rc = zl3073x_dev_state_fetch(zldev);
- if (rc)
- return rc;
-
- /* Setup phase offset measurement block */
- rc = zl3073x_dev_phase_meas_setup(zldev, chip_info->num_channels);
- if (rc)
- return dev_err_probe(zldev->dev, rc,
- "Failed to setup phase measurement\n");
-
/* Register DPLL channels */
rc = zl3073x_devm_dpll_init(zldev, chip_info->num_channels);
if (rc)
diff --git a/drivers/dpll/zl3073x/core.h b/drivers/dpll/zl3073x/core.h
index 71af2c800110..1dca4ddcf235 100644
--- a/drivers/dpll/zl3073x/core.h
+++ b/drivers/dpll/zl3073x/core.h
@@ -3,6 +3,7 @@
#ifndef _ZL3073X_CORE_H
#define _ZL3073X_CORE_H
+#include <linux/bitfield.h>
#include <linux/kthread.h>
#include <linux/list.h>
#include <linux/mutex.h>
@@ -67,19 +68,19 @@ struct zl3073x_synth {
* @dev: pointer to device
* @regmap: regmap to access device registers
* @multiop_lock: to serialize multiple register operations
- * @clock_id: clock id of the device
* @ref: array of input references' invariants
* @out: array of outs' invariants
* @synth: array of synths' invariants
* @dplls: list of DPLLs
* @kworker: thread for periodic work
* @work: periodic work
+ * @clock_id: clock id of the device
+ * @phase_avg_factor: phase offset measurement averaging factor
*/
struct zl3073x_dev {
struct device *dev;
struct regmap *regmap;
struct mutex multiop_lock;
- u64 clock_id;
/* Invariants */
struct zl3073x_ref ref[ZL3073X_NUM_REFS];
@@ -92,6 +93,10 @@ struct zl3073x_dev {
/* Monitor */
struct kthread_worker *kworker;
struct kthread_delayed_work work;
+
+ /* Devlink parameters */
+ u64 clock_id;
+ u8 phase_avg_factor;
};
struct zl3073x_chip_info {
@@ -111,10 +116,42 @@ struct zl3073x_dev *zl3073x_devm_alloc(struct device *dev);
int zl3073x_dev_probe(struct zl3073x_dev *zldev,
const struct zl3073x_chip_info *chip_info);
+int zl3073x_dev_start(struct zl3073x_dev *zldev, bool full);
+void zl3073x_dev_stop(struct zl3073x_dev *zldev);
+
+static inline u8 zl3073x_dev_phase_avg_factor_get(struct zl3073x_dev *zldev)
+{
+ return zldev->phase_avg_factor;
+}
+
+int zl3073x_dev_phase_avg_factor_set(struct zl3073x_dev *zldev, u8 factor);
+
/**********************
* Registers operations
**********************/
+/**
+ * struct zl3073x_hwreg_seq_item - HW register write sequence item
+ * @addr: HW register to be written
+ * @value: value to be written to HW register
+ * @mask: bitmask indicating bits to be updated
+ * @wait: number of ms to wait after register write
+ */
+struct zl3073x_hwreg_seq_item {
+ u32 addr;
+ u32 value;
+ u32 mask;
+ u32 wait;
+};
+
+#define HWREG_SEQ_ITEM(_addr, _value, _mask, _wait) \
+{ \
+ .addr = _addr, \
+ .value = FIELD_PREP_CONST(_mask, _value), \
+ .mask = _mask, \
+ .wait = _wait, \
+}
+
int zl3073x_mb_op(struct zl3073x_dev *zldev, unsigned int op_reg, u8 op_val,
unsigned int mask_reg, u16 mask_val);
int zl3073x_poll_zero_u8(struct zl3073x_dev *zldev, unsigned int reg, u8 mask);
@@ -126,6 +163,13 @@ int zl3073x_write_u8(struct zl3073x_dev *zldev, unsigned int reg, u8 val);
int zl3073x_write_u16(struct zl3073x_dev *zldev, unsigned int reg, u16 val);
int zl3073x_write_u32(struct zl3073x_dev *zldev, unsigned int reg, u32 val);
int zl3073x_write_u48(struct zl3073x_dev *zldev, unsigned int reg, u64 val);
+int zl3073x_read_hwreg(struct zl3073x_dev *zldev, u32 addr, u32 *value);
+int zl3073x_write_hwreg(struct zl3073x_dev *zldev, u32 addr, u32 value);
+int zl3073x_update_hwreg(struct zl3073x_dev *zldev, u32 addr, u32 value,
+ u32 mask);
+int zl3073x_write_hwreg_seq(struct zl3073x_dev *zldev,
+ const struct zl3073x_hwreg_seq_item *seq,
+ size_t num_items);
/*****************
* Misc operations
diff --git a/drivers/dpll/zl3073x/devlink.c b/drivers/dpll/zl3073x/devlink.c
index 7e7fe726ee37..ccc22332b346 100644
--- a/drivers/dpll/zl3073x/devlink.c
+++ b/drivers/dpll/zl3073x/devlink.c
@@ -9,6 +9,8 @@
#include "core.h"
#include "devlink.h"
#include "dpll.h"
+#include "flash.h"
+#include "fw.h"
#include "regs.h"
/**
@@ -86,14 +88,12 @@ zl3073x_devlink_reload_down(struct devlink *devlink, bool netns_change,
struct netlink_ext_ack *extack)
{
struct zl3073x_dev *zldev = devlink_priv(devlink);
- struct zl3073x_dpll *zldpll;
if (action != DEVLINK_RELOAD_ACTION_DRIVER_REINIT)
return -EOPNOTSUPP;
- /* Unregister all DPLLs */
- list_for_each_entry(zldpll, &zldev->dplls, list)
- zl3073x_dpll_unregister(zldpll);
+ /* Stop normal operation */
+ zl3073x_dev_stop(zldev);
return 0;
}
@@ -107,7 +107,6 @@ zl3073x_devlink_reload_up(struct devlink *devlink,
{
struct zl3073x_dev *zldev = devlink_priv(devlink);
union devlink_param_value val;
- struct zl3073x_dpll *zldpll;
int rc;
if (action != DEVLINK_RELOAD_ACTION_DRIVER_REINIT)
@@ -125,24 +124,156 @@ zl3073x_devlink_reload_up(struct devlink *devlink,
zldev->clock_id = val.vu64;
}
- /* Re-register all DPLLs */
- list_for_each_entry(zldpll, &zldev->dplls, list) {
- rc = zl3073x_dpll_register(zldpll);
- if (rc)
- dev_warn(zldev->dev,
- "Failed to re-register DPLL%u\n", zldpll->id);
- }
+ /* Restart normal operation */
+ rc = zl3073x_dev_start(zldev, false);
+ if (rc)
+ dev_warn(zldev->dev, "Failed to re-start normal operation\n");
*actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
return 0;
}
+void zl3073x_devlink_flash_notify(struct zl3073x_dev *zldev, const char *msg,
+ const char *component, u32 done, u32 total)
+{
+ struct devlink *devlink = priv_to_devlink(zldev);
+
+ devlink_flash_update_status_notify(devlink, msg, component, done,
+ total);
+}
+
+/**
+ * zl3073x_devlink_flash_prepare - Prepare and enter flash mode
+ * @zldev: zl3073x device pointer
+ * @zlfw: pointer to loaded firmware
+ * @extack: netlink extack pointer to report errors
+ *
+ * The function stops normal operation and switches the device to flash mode.
+ * If an error occurs the normal operation is resumed.
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_devlink_flash_prepare(struct zl3073x_dev *zldev,
+ struct zl3073x_fw *zlfw,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_fw_component *util;
+ int rc;
+
+ util = zlfw->component[ZL_FW_COMPONENT_UTIL];
+ if (!util) {
+ zl3073x_devlink_flash_notify(zldev,
+ "Utility is missing in firmware",
+ NULL, 0, 0);
+ return -ENOEXEC;
+ }
+
+ /* Stop normal operation prior entering flash mode */
+ zl3073x_dev_stop(zldev);
+
+ rc = zl3073x_flash_mode_enter(zldev, util->data, util->size, extack);
+ if (rc) {
+ zl3073x_devlink_flash_notify(zldev,
+ "Failed to enter flash mode",
+ NULL, 0, 0);
+
+ /* Resume normal operation */
+ zl3073x_dev_start(zldev, true);
+
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * zl3073x_devlink_flash_finish - Leave flash mode and resume normal operation
+ * @zldev: zl3073x device pointer
+ * @extack: netlink extack pointer to report errors
+ *
+ * The function switches the device back to standard mode and resumes normal
+ * operation.
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_devlink_flash_finish(struct zl3073x_dev *zldev,
+ struct netlink_ext_ack *extack)
+{
+ int rc;
+
+ /* Reset device CPU to normal mode */
+ zl3073x_flash_mode_leave(zldev, extack);
+
+ /* Resume normal operation */
+ rc = zl3073x_dev_start(zldev, true);
+ if (rc)
+ zl3073x_devlink_flash_notify(zldev,
+ "Failed to start normal operation",
+ NULL, 0, 0);
+
+ return rc;
+}
+
+/**
+ * zl3073x_devlink_flash_update - Devlink flash update callback
+ * @devlink: devlink structure pointer
+ * @params: flashing parameters pointer
+ * @extack: netlink extack pointer to report errors
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_devlink_flash_update(struct devlink *devlink,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dev *zldev = devlink_priv(devlink);
+ struct zl3073x_fw *zlfw;
+ int rc = 0;
+
+ zlfw = zl3073x_fw_load(zldev, params->fw->data, params->fw->size,
+ extack);
+ if (IS_ERR(zlfw)) {
+ zl3073x_devlink_flash_notify(zldev, "Failed to load firmware",
+ NULL, 0, 0);
+ rc = PTR_ERR(zlfw);
+ goto finish;
+ }
+
+ /* Stop normal operation and enter flash mode */
+ rc = zl3073x_devlink_flash_prepare(zldev, zlfw, extack);
+ if (rc)
+ goto finish;
+
+ rc = zl3073x_fw_flash(zldev, zlfw, extack);
+ if (rc) {
+ zl3073x_devlink_flash_finish(zldev, extack);
+ goto finish;
+ }
+
+ /* Resume normal mode */
+ rc = zl3073x_devlink_flash_finish(zldev, extack);
+
+finish:
+ if (!IS_ERR(zlfw))
+ zl3073x_fw_free(zlfw);
+
+ zl3073x_devlink_flash_notify(zldev,
+ rc ? "Flashing failed" : "Flashing done",
+ NULL, 0, 0);
+
+ return rc;
+}
+
static const struct devlink_ops zl3073x_devlink_ops = {
.info_get = zl3073x_devlink_info_get,
.reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT),
.reload_down = zl3073x_devlink_reload_down,
.reload_up = zl3073x_devlink_reload_up,
+ .flash_update = zl3073x_devlink_flash_update,
};
static void
diff --git a/drivers/dpll/zl3073x/devlink.h b/drivers/dpll/zl3073x/devlink.h
index 037720db204f..63dfd6fa1cd6 100644
--- a/drivers/dpll/zl3073x/devlink.h
+++ b/drivers/dpll/zl3073x/devlink.h
@@ -9,4 +9,7 @@ struct zl3073x_dev *zl3073x_devm_alloc(struct device *dev);
int zl3073x_devlink_register(struct zl3073x_dev *zldev);
+void zl3073x_devlink_flash_notify(struct zl3073x_dev *zldev, const char *msg,
+ const char *component, u32 done, u32 total);
+
#endif /* _ZL3073X_DEVLINK_H */
diff --git a/drivers/dpll/zl3073x/dpll.c b/drivers/dpll/zl3073x/dpll.c
index 3e42e9e7fd27..93dc93eec79e 100644
--- a/drivers/dpll/zl3073x/dpll.c
+++ b/drivers/dpll/zl3073x/dpll.c
@@ -1577,6 +1577,59 @@ zl3073x_dpll_mode_get(const struct dpll_device *dpll, void *dpll_priv,
}
static int
+zl3073x_dpll_phase_offset_avg_factor_get(const struct dpll_device *dpll,
+ void *dpll_priv, u32 *factor,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *zldpll = dpll_priv;
+
+ *factor = zl3073x_dev_phase_avg_factor_get(zldpll->dev);
+
+ return 0;
+}
+
+static void
+zl3073x_dpll_change_work(struct work_struct *work)
+{
+ struct zl3073x_dpll *zldpll;
+
+ zldpll = container_of(work, struct zl3073x_dpll, change_work);
+ dpll_device_change_ntf(zldpll->dpll_dev);
+}
+
+static int
+zl3073x_dpll_phase_offset_avg_factor_set(const struct dpll_device *dpll,
+ void *dpll_priv, u32 factor,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *item, *zldpll = dpll_priv;
+ int rc;
+
+ if (factor > 15) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "Phase offset average factor has to be from range <0,15>");
+ return -EINVAL;
+ }
+
+ rc = zl3073x_dev_phase_avg_factor_set(zldpll->dev, factor);
+ if (rc) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "Failed to set phase offset averaging factor");
+ return rc;
+ }
+
+ /* The averaging factor is common for all DPLL channels so after change
+ * we have to send a notification for other DPLL devices.
+ */
+ list_for_each_entry(item, &zldpll->dev->dplls, list) {
+ if (item != zldpll)
+ schedule_work(&item->change_work);
+ }
+
+ return 0;
+}
+
+static int
zl3073x_dpll_phase_offset_monitor_get(const struct dpll_device *dpll,
void *dpll_priv,
enum dpll_feature_state *state,
@@ -1635,6 +1688,8 @@ static const struct dpll_pin_ops zl3073x_dpll_output_pin_ops = {
static const struct dpll_device_ops zl3073x_dpll_device_ops = {
.lock_status_get = zl3073x_dpll_lock_status_get,
.mode_get = zl3073x_dpll_mode_get,
+ .phase_offset_avg_factor_get = zl3073x_dpll_phase_offset_avg_factor_get,
+ .phase_offset_avg_factor_set = zl3073x_dpll_phase_offset_avg_factor_set,
.phase_offset_monitor_get = zl3073x_dpll_phase_offset_monitor_get,
.phase_offset_monitor_set = zl3073x_dpll_phase_offset_monitor_set,
};
@@ -1983,6 +2038,8 @@ zl3073x_dpll_device_unregister(struct zl3073x_dpll *zldpll)
{
WARN(!zldpll->dpll_dev, "DPLL device is not registered\n");
+ cancel_work_sync(&zldpll->change_work);
+
dpll_device_unregister(zldpll->dpll_dev, &zl3073x_dpll_device_ops,
zldpll);
dpll_device_put(zldpll->dpll_dev);
@@ -2258,6 +2315,7 @@ zl3073x_dpll_alloc(struct zl3073x_dev *zldev, u8 ch)
zldpll->dev = zldev;
zldpll->id = ch;
INIT_LIST_HEAD(&zldpll->pins);
+ INIT_WORK(&zldpll->change_work, zl3073x_dpll_change_work);
return zldpll;
}
diff --git a/drivers/dpll/zl3073x/dpll.h b/drivers/dpll/zl3073x/dpll.h
index 304910ffc9c0..e8c39b44b356 100644
--- a/drivers/dpll/zl3073x/dpll.h
+++ b/drivers/dpll/zl3073x/dpll.h
@@ -20,6 +20,7 @@
* @dpll_dev: pointer to registered DPLL device
* @lock_status: last saved DPLL lock status
* @pins: list of pins
+ * @change_work: device change notification work
*/
struct zl3073x_dpll {
struct list_head list;
@@ -32,6 +33,7 @@ struct zl3073x_dpll {
struct dpll_device *dpll_dev;
enum dpll_lock_status lock_status;
struct list_head pins;
+ struct work_struct change_work;
};
struct zl3073x_dpll *zl3073x_dpll_alloc(struct zl3073x_dev *zldev, u8 ch);
diff --git a/drivers/dpll/zl3073x/flash.c b/drivers/dpll/zl3073x/flash.c
new file mode 100644
index 000000000000..83452a77e3e9
--- /dev/null
+++ b/drivers/dpll/zl3073x/flash.c
@@ -0,0 +1,666 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/array_size.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/dev_printk.h>
+#include <linux/errno.h>
+#include <linux/jiffies.h>
+#include <linux/minmax.h>
+#include <linux/netlink.h>
+#include <linux/sched/signal.h>
+#include <linux/sizes.h>
+#include <linux/sprintf.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/unaligned.h>
+#include <net/devlink.h>
+
+#include "core.h"
+#include "devlink.h"
+#include "flash.h"
+
+#define ZL_FLASH_ERR_PFX "FW update failed: "
+#define ZL_FLASH_ERR_MSG(_extack, _msg, ...) \
+ NL_SET_ERR_MSG_FMT_MOD((_extack), ZL_FLASH_ERR_PFX _msg, \
+ ## __VA_ARGS__)
+
+/**
+ * zl3073x_flash_download - Download image block to device memory
+ * @zldev: zl3073x device structure
+ * @component: name of the component to be downloaded
+ * @addr: device memory target address
+ * @data: pointer to data to download
+ * @size: size of data to download
+ * @extack: netlink extack pointer to report errors
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_flash_download(struct zl3073x_dev *zldev, const char *component,
+ u32 addr, const void *data, size_t size,
+ struct netlink_ext_ack *extack)
+{
+#define ZL_CHECK_DELAY 5000 /* Check for interrupt each 5 seconds */
+ unsigned long check_time;
+ const void *ptr, *end;
+ int rc = 0;
+
+ dev_dbg(zldev->dev, "Downloading %zu bytes to device memory at 0x%0x\n",
+ size, addr);
+
+ check_time = jiffies + msecs_to_jiffies(ZL_CHECK_DELAY);
+
+ for (ptr = data, end = data + size; ptr < end; ptr += 4, addr += 4) {
+ /* Write current word to HW memory */
+ rc = zl3073x_write_hwreg(zldev, addr,
+ get_unaligned((u32 *)ptr));
+ if (rc) {
+ ZL_FLASH_ERR_MSG(extack,
+ "failed to write to memory at 0x%0x",
+ addr);
+ return rc;
+ }
+
+ if (time_is_before_jiffies(check_time)) {
+ if (signal_pending(current)) {
+ ZL_FLASH_ERR_MSG(extack,
+ "Flashing interrupted");
+ return -EINTR;
+ }
+
+ check_time = jiffies + msecs_to_jiffies(ZL_CHECK_DELAY);
+ }
+
+ /* Report status each 1 kB block */
+ if ((ptr - data) % 1024 == 0)
+ zl3073x_devlink_flash_notify(zldev, "Downloading image",
+ component, ptr - data,
+ size);
+ }
+
+ zl3073x_devlink_flash_notify(zldev, "Downloading image", component,
+ ptr - data, size);
+
+ dev_dbg(zldev->dev, "%zu bytes downloaded to device memory\n", size);
+
+ return rc;
+}
+
+/**
+ * zl3073x_flash_error_check - Check for flash utility errors
+ * @zldev: zl3073x device structure
+ * @extack: netlink extack pointer to report errors
+ *
+ * The function checks for errors detected by the flash utility and
+ * reports them if any were found.
+ *
+ * Return: 0 on success, -EIO when errors are detected
+ */
+static int
+zl3073x_flash_error_check(struct zl3073x_dev *zldev,
+ struct netlink_ext_ack *extack)
+{
+ u32 count, cause;
+ int rc;
+
+ rc = zl3073x_read_u32(zldev, ZL_REG_ERROR_COUNT, &count);
+ if (rc)
+ return rc;
+ else if (!count)
+ return 0; /* No error */
+
+ rc = zl3073x_read_u32(zldev, ZL_REG_ERROR_CAUSE, &cause);
+ if (rc)
+ return rc;
+
+ /* Report errors */
+ ZL_FLASH_ERR_MSG(extack,
+ "utility error occurred: count=%u cause=0x%x", count,
+ cause);
+
+ return -EIO;
+}
+
+/**
+ * zl3073x_flash_wait_ready - Check or wait for utility to be ready to flash
+ * @zldev: zl3073x device structure
+ * @timeout_ms: timeout for the waiting
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_flash_wait_ready(struct zl3073x_dev *zldev, unsigned int timeout_ms)
+{
+#define ZL_FLASH_POLL_DELAY_MS 100
+ unsigned long timeout;
+ int rc, i;
+
+ dev_dbg(zldev->dev, "Waiting for flashing to be ready\n");
+
+ timeout = jiffies + msecs_to_jiffies(timeout_ms);
+
+ for (i = 0; time_is_after_jiffies(timeout); i++) {
+ u8 value;
+
+ /* Check for interrupt each 1s */
+ if (i > 9) {
+ if (signal_pending(current))
+ return -EINTR;
+ i = 0;
+ }
+
+ rc = zl3073x_read_u8(zldev, ZL_REG_WRITE_FLASH, &value);
+ if (rc)
+ return rc;
+
+ value = FIELD_GET(ZL_WRITE_FLASH_OP, value);
+
+ if (value == ZL_WRITE_FLASH_OP_DONE)
+ return 0; /* Successfully done */
+
+ msleep(ZL_FLASH_POLL_DELAY_MS);
+ }
+
+ return -ETIMEDOUT;
+}
+
+/**
+ * zl3073x_flash_cmd_wait - Perform flash operation and wait for finish
+ * @zldev: zl3073x device structure
+ * @operation: operation to perform
+ * @extack: netlink extack pointer to report errors
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_flash_cmd_wait(struct zl3073x_dev *zldev, u32 operation,
+ struct netlink_ext_ack *extack)
+{
+#define ZL_FLASH_PHASE1_TIMEOUT_MS 60000 /* up to 1 minute */
+#define ZL_FLASH_PHASE2_TIMEOUT_MS 120000 /* up to 2 minutes */
+ u8 value;
+ int rc;
+
+ dev_dbg(zldev->dev, "Sending flash command: 0x%x\n", operation);
+
+ rc = zl3073x_flash_wait_ready(zldev, ZL_FLASH_PHASE1_TIMEOUT_MS);
+ if (rc)
+ return rc;
+
+ /* Issue the requested operation */
+ rc = zl3073x_read_u8(zldev, ZL_REG_WRITE_FLASH, &value);
+ if (rc)
+ return rc;
+
+ value &= ~ZL_WRITE_FLASH_OP;
+ value |= FIELD_PREP(ZL_WRITE_FLASH_OP, operation);
+
+ rc = zl3073x_write_u8(zldev, ZL_REG_WRITE_FLASH, value);
+ if (rc)
+ return rc;
+
+ /* Wait for command completion */
+ rc = zl3073x_flash_wait_ready(zldev, ZL_FLASH_PHASE2_TIMEOUT_MS);
+ if (rc)
+ return rc;
+
+ return zl3073x_flash_error_check(zldev, extack);
+}
+
+/**
+ * zl3073x_flash_get_sector_size - Get flash sector size
+ * @zldev: zl3073x device structure
+ * @sector_size: sector size returned by the function
+ *
+ * The function reads the flash sector size detected by flash utility and
+ * stores it into @sector_size.
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_flash_get_sector_size(struct zl3073x_dev *zldev, size_t *sector_size)
+{
+ u8 flash_info;
+ int rc;
+
+ rc = zl3073x_read_u8(zldev, ZL_REG_FLASH_INFO, &flash_info);
+ if (rc)
+ return rc;
+
+ switch (FIELD_GET(ZL_FLASH_INFO_SECTOR_SIZE, flash_info)) {
+ case ZL_FLASH_INFO_SECTOR_4K:
+ *sector_size = SZ_4K;
+ break;
+ case ZL_FLASH_INFO_SECTOR_64K:
+ *sector_size = SZ_64K;
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+
+ return rc;
+}
+
+/**
+ * zl3073x_flash_block - Download and flash memory block
+ * @zldev: zl3073x device structure
+ * @component: component name
+ * @operation: flash operation to perform
+ * @page: destination flash page
+ * @addr: device memory address to load data
+ * @data: pointer to data to be flashed
+ * @size: size of data
+ * @extack: netlink extack pointer to report errors
+ *
+ * The function downloads the memory block given by the @data pointer and
+ * the size @size and flashes it into internal memory on flash page @page.
+ * The internal flash operation performed by the firmware is specified by
+ * the @operation parameter.
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_flash_block(struct zl3073x_dev *zldev, const char *component,
+ u32 operation, u32 page, u32 addr, const void *data,
+ size_t size, struct netlink_ext_ack *extack)
+{
+ int rc;
+
+ /* Download block to device memory */
+ rc = zl3073x_flash_download(zldev, component, addr, data, size, extack);
+ if (rc)
+ return rc;
+
+ /* Set address to flash from */
+ rc = zl3073x_write_u32(zldev, ZL_REG_IMAGE_START_ADDR, addr);
+ if (rc)
+ return rc;
+
+ /* Set size of block to flash */
+ rc = zl3073x_write_u32(zldev, ZL_REG_IMAGE_SIZE, size);
+ if (rc)
+ return rc;
+
+ /* Set destination page to flash */
+ rc = zl3073x_write_u32(zldev, ZL_REG_FLASH_INDEX_WRITE, page);
+ if (rc)
+ return rc;
+
+ /* Set filling pattern */
+ rc = zl3073x_write_u32(zldev, ZL_REG_FILL_PATTERN, U32_MAX);
+ if (rc)
+ return rc;
+
+ zl3073x_devlink_flash_notify(zldev, "Flashing image", component, 0,
+ size);
+
+ dev_dbg(zldev->dev, "Flashing %zu bytes to page %u\n", size, page);
+
+ /* Execute sectors flash operation */
+ rc = zl3073x_flash_cmd_wait(zldev, operation, extack);
+ if (rc)
+ return rc;
+
+ zl3073x_devlink_flash_notify(zldev, "Flashing image", component, size,
+ size);
+
+ return 0;
+}
+
+/**
+ * zl3073x_flash_sectors - Flash sectors
+ * @zldev: zl3073x device structure
+ * @component: component name
+ * @page: destination flash page
+ * @addr: device memory address to load data
+ * @data: pointer to data to be flashed
+ * @size: size of data
+ * @extack: netlink extack pointer to report errors
+ *
+ * The function flashes given @data with size of @size to the internal flash
+ * memory block starting from page @page. The function uses sector flash
+ * method and has to take into account the flash sector size reported by
+ * flashing utility. Input data are spliced into blocks according this
+ * sector size and each block is flashed separately.
+ *
+ * Return: 0 on success, <0 on error
+ */
+int zl3073x_flash_sectors(struct zl3073x_dev *zldev, const char *component,
+ u32 page, u32 addr, const void *data, size_t size,
+ struct netlink_ext_ack *extack)
+{
+#define ZL_FLASH_MAX_BLOCK_SIZE 0x0001E000
+#define ZL_FLASH_PAGE_SIZE 256
+ size_t max_block_size, block_size, sector_size;
+ const void *ptr, *end;
+ int rc;
+
+ /* Get flash sector size */
+ rc = zl3073x_flash_get_sector_size(zldev, &sector_size);
+ if (rc) {
+ ZL_FLASH_ERR_MSG(extack, "Failed to get flash sector size");
+ return rc;
+ }
+
+ /* Determine max block size depending on sector size */
+ max_block_size = ALIGN_DOWN(ZL_FLASH_MAX_BLOCK_SIZE, sector_size);
+
+ for (ptr = data, end = data + size; ptr < end; ptr += block_size) {
+ char comp_str[32];
+
+ block_size = min_t(size_t, max_block_size, end - ptr);
+
+ /* Add suffix '-partN' if the requested component size is
+ * greater than max_block_size.
+ */
+ if (max_block_size < size)
+ snprintf(comp_str, sizeof(comp_str), "%s-part%zu",
+ component, (ptr - data) / max_block_size + 1);
+ else
+ strscpy(comp_str, component);
+
+ /* Flash the memory block */
+ rc = zl3073x_flash_block(zldev, comp_str,
+ ZL_WRITE_FLASH_OP_SECTORS, page, addr,
+ ptr, block_size, extack);
+ if (rc)
+ goto finish;
+
+ /* Move to next page */
+ page += block_size / ZL_FLASH_PAGE_SIZE;
+ }
+
+finish:
+ zl3073x_devlink_flash_notify(zldev,
+ rc ? "Flashing failed" : "Flashing done",
+ component, 0, 0);
+
+ return rc;
+}
+
+/**
+ * zl3073x_flash_page - Flash page
+ * @zldev: zl3073x device structure
+ * @component: component name
+ * @page: destination flash page
+ * @addr: device memory address to load data
+ * @data: pointer to data to be flashed
+ * @size: size of data
+ * @extack: netlink extack pointer to report errors
+ *
+ * The function flashes given @data with size of @size to the internal flash
+ * memory block starting with page @page.
+ *
+ * Return: 0 on success, <0 on error
+ */
+int zl3073x_flash_page(struct zl3073x_dev *zldev, const char *component,
+ u32 page, u32 addr, const void *data, size_t size,
+ struct netlink_ext_ack *extack)
+{
+ int rc;
+
+ /* Flash the memory block */
+ rc = zl3073x_flash_block(zldev, component, ZL_WRITE_FLASH_OP_PAGE, page,
+ addr, data, size, extack);
+
+ zl3073x_devlink_flash_notify(zldev,
+ rc ? "Flashing failed" : "Flashing done",
+ component, 0, 0);
+
+ return rc;
+}
+
+/**
+ * zl3073x_flash_page_copy - Copy flash page
+ * @zldev: zl3073x device structure
+ * @component: component name
+ * @src_page: source page to copy
+ * @dst_page: destination page
+ * @extack: netlink extack pointer to report errors
+ *
+ * The function copies one flash page specified by @src_page into the flash
+ * page specified by @dst_page.
+ *
+ * Return: 0 on success, <0 on error
+ */
+int zl3073x_flash_page_copy(struct zl3073x_dev *zldev, const char *component,
+ u32 src_page, u32 dst_page,
+ struct netlink_ext_ack *extack)
+{
+ int rc;
+
+ /* Set source page to be copied */
+ rc = zl3073x_write_u32(zldev, ZL_REG_FLASH_INDEX_READ, src_page);
+ if (rc)
+ return rc;
+
+ /* Set destination page for the copy */
+ rc = zl3073x_write_u32(zldev, ZL_REG_FLASH_INDEX_WRITE, dst_page);
+ if (rc)
+ return rc;
+
+ /* Perform copy operation */
+ rc = zl3073x_flash_cmd_wait(zldev, ZL_WRITE_FLASH_OP_COPY_PAGE, extack);
+ if (rc)
+ ZL_FLASH_ERR_MSG(extack, "Failed to copy page %u to page %u",
+ src_page, dst_page);
+
+ return rc;
+}
+
+/**
+ * zl3073x_flash_mode_verify - Check flash utility
+ * @zldev: zl3073x device structure
+ *
+ * Return: 0 if the flash utility is ready, <0 on error
+ */
+static int
+zl3073x_flash_mode_verify(struct zl3073x_dev *zldev)
+{
+ u8 family, release;
+ u32 hash;
+ int rc;
+
+ rc = zl3073x_read_u32(zldev, ZL_REG_FLASH_HASH, &hash);
+ if (rc)
+ return rc;
+
+ rc = zl3073x_read_u8(zldev, ZL_REG_FLASH_FAMILY, &family);
+ if (rc)
+ return rc;
+
+ rc = zl3073x_read_u8(zldev, ZL_REG_FLASH_RELEASE, &release);
+ if (rc)
+ return rc;
+
+ dev_dbg(zldev->dev,
+ "Flash utility check: hash 0x%08x, fam 0x%02x, rel 0x%02x\n",
+ hash, family, release);
+
+ /* Return success for correct family */
+ return (family == 0x21) ? 0 : -ENODEV;
+}
+
+static int
+zl3073x_flash_host_ctrl_enable(struct zl3073x_dev *zldev)
+{
+ u8 host_ctrl;
+ int rc;
+
+ /* Enable host control */
+ rc = zl3073x_read_u8(zldev, ZL_REG_HOST_CONTROL, &host_ctrl);
+ if (rc)
+ return rc;
+
+ host_ctrl |= ZL_HOST_CONTROL_ENABLE;
+
+ return zl3073x_write_u8(zldev, ZL_REG_HOST_CONTROL, host_ctrl);
+}
+
+/**
+ * zl3073x_flash_mode_enter - Switch the device to flash mode
+ * @zldev: zl3073x device structure
+ * @util_ptr: buffer with flash utility
+ * @util_size: size of buffer with flash utility
+ * @extack: netlink extack pointer to report errors
+ *
+ * The function prepares and switches the device into flash mode.
+ *
+ * The procedure:
+ * 1) Stop device CPU by specific HW register sequence
+ * 2) Download flash utility to device memory
+ * 3) Resume device CPU by specific HW register sequence
+ * 4) Check communication with flash utility
+ * 5) Enable host control necessary to access flash API
+ * 6) Check for potential error detected by the utility
+ *
+ * The API provided by normal firmware is not available in flash mode
+ * so the caller has to ensure that this API is not used in this mode.
+ *
+ * After performing flash operation the caller should call
+ * @zl3073x_flash_mode_leave to return back to normal operation.
+ *
+ * Return: 0 on success, <0 on error.
+ */
+int zl3073x_flash_mode_enter(struct zl3073x_dev *zldev, const void *util_ptr,
+ size_t util_size, struct netlink_ext_ack *extack)
+{
+ /* Sequence to be written prior utility download */
+ static const struct zl3073x_hwreg_seq_item pre_seq[] = {
+ HWREG_SEQ_ITEM(0x80000400, 1, BIT(0), 0),
+ HWREG_SEQ_ITEM(0x80206340, 1, BIT(4), 0),
+ HWREG_SEQ_ITEM(0x10000000, 1, BIT(2), 0),
+ HWREG_SEQ_ITEM(0x10000024, 0x00000001, U32_MAX, 0),
+ HWREG_SEQ_ITEM(0x10000020, 0x00000001, U32_MAX, 0),
+ HWREG_SEQ_ITEM(0x10000000, 1, BIT(10), 1000),
+ };
+ /* Sequence to be written after utility download */
+ static const struct zl3073x_hwreg_seq_item post_seq[] = {
+ HWREG_SEQ_ITEM(0x10400004, 0x000000C0, U32_MAX, 0),
+ HWREG_SEQ_ITEM(0x10400008, 0x00000000, U32_MAX, 0),
+ HWREG_SEQ_ITEM(0x10400010, 0x20000000, U32_MAX, 0),
+ HWREG_SEQ_ITEM(0x10400014, 0x20000004, U32_MAX, 0),
+ HWREG_SEQ_ITEM(0x10000000, 1, GENMASK(10, 9), 0),
+ HWREG_SEQ_ITEM(0x10000020, 0x00000000, U32_MAX, 0),
+ HWREG_SEQ_ITEM(0x10000000, 0, BIT(0), 1000),
+ };
+ int rc;
+
+ zl3073x_devlink_flash_notify(zldev, "Prepare flash mode", "utility",
+ 0, 0);
+
+ /* Execure pre-load sequence */
+ rc = zl3073x_write_hwreg_seq(zldev, pre_seq, ARRAY_SIZE(pre_seq));
+ if (rc) {
+ ZL_FLASH_ERR_MSG(extack, "cannot execute pre-load sequence");
+ goto error;
+ }
+
+ /* Download utility image to device memory */
+ rc = zl3073x_flash_download(zldev, "utility", 0x20000000, util_ptr,
+ util_size, extack);
+ if (rc) {
+ ZL_FLASH_ERR_MSG(extack, "cannot download flash utility");
+ goto error;
+ }
+
+ /* Execute post-load sequence */
+ rc = zl3073x_write_hwreg_seq(zldev, post_seq, ARRAY_SIZE(post_seq));
+ if (rc) {
+ ZL_FLASH_ERR_MSG(extack, "cannot execute post-load sequence");
+ goto error;
+ }
+
+ /* Check that utility identifies itself correctly */
+ rc = zl3073x_flash_mode_verify(zldev);
+ if (rc) {
+ ZL_FLASH_ERR_MSG(extack, "flash utility check failed");
+ goto error;
+ }
+
+ /* Enable host control */
+ rc = zl3073x_flash_host_ctrl_enable(zldev);
+ if (rc) {
+ ZL_FLASH_ERR_MSG(extack, "cannot enable host control");
+ goto error;
+ }
+
+ zl3073x_devlink_flash_notify(zldev, "Flash mode enabled", "utility",
+ 0, 0);
+
+ return 0;
+
+error:
+ zl3073x_flash_mode_leave(zldev, extack);
+
+ return rc;
+}
+
+/**
+ * zl3073x_flash_mode_leave - Leave flash mode
+ * @zldev: zl3073x device structure
+ * @extack: netlink extack pointer to report errors
+ *
+ * The function instructs the device to leave the flash mode and
+ * to return back to normal operation.
+ *
+ * The procedure:
+ * 1) Set reset flag
+ * 2) Reset the device CPU by specific HW register sequence
+ * 3) Wait for the device to be ready
+ * 4) Check the reset flag was cleared
+ *
+ * Return: 0 on success, <0 on error
+ */
+int zl3073x_flash_mode_leave(struct zl3073x_dev *zldev,
+ struct netlink_ext_ack *extack)
+{
+ /* Sequence to be written after flash */
+ static const struct zl3073x_hwreg_seq_item fw_reset_seq[] = {
+ HWREG_SEQ_ITEM(0x80000404, 1, BIT(0), 0),
+ HWREG_SEQ_ITEM(0x80000410, 1, BIT(0), 0),
+ };
+ u8 reset_status;
+ int rc;
+
+ zl3073x_devlink_flash_notify(zldev, "Leaving flash mode", "utility",
+ 0, 0);
+
+ /* Read reset status register */
+ rc = zl3073x_read_u8(zldev, ZL_REG_RESET_STATUS, &reset_status);
+ if (rc)
+ return rc;
+
+ /* Set reset bit */
+ reset_status |= ZL_REG_RESET_STATUS_RESET;
+
+ /* Update reset status register */
+ rc = zl3073x_write_u8(zldev, ZL_REG_RESET_STATUS, reset_status);
+ if (rc)
+ return rc;
+
+ /* We do not check the return value here as the sequence resets
+ * the device CPU and the last write always return an error.
+ */
+ zl3073x_write_hwreg_seq(zldev, fw_reset_seq, ARRAY_SIZE(fw_reset_seq));
+
+ /* Wait for the device to be ready */
+ msleep(500);
+
+ /* Read again the reset status register */
+ rc = zl3073x_read_u8(zldev, ZL_REG_RESET_STATUS, &reset_status);
+ if (rc)
+ return rc;
+
+ /* Check the reset bit was cleared */
+ if (reset_status & ZL_REG_RESET_STATUS_RESET) {
+ dev_err(zldev->dev,
+ "Reset not confirmed after switch to normal mode\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/dpll/zl3073x/flash.h b/drivers/dpll/zl3073x/flash.h
new file mode 100644
index 000000000000..effe1b16b359
--- /dev/null
+++ b/drivers/dpll/zl3073x/flash.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __ZL3073X_FLASH_H
+#define __ZL3073X_FLASH_H
+
+#include <linux/types.h>
+
+struct netlink_ext_ack;
+struct zl3073x_dev;
+
+int zl3073x_flash_mode_enter(struct zl3073x_dev *zldev, const void *util_ptr,
+ size_t util_size, struct netlink_ext_ack *extack);
+
+int zl3073x_flash_mode_leave(struct zl3073x_dev *zldev,
+ struct netlink_ext_ack *extack);
+
+int zl3073x_flash_page(struct zl3073x_dev *zldev, const char *component,
+ u32 page, u32 addr, const void *data, size_t size,
+ struct netlink_ext_ack *extack);
+
+int zl3073x_flash_page_copy(struct zl3073x_dev *zldev, const char *component,
+ u32 src_page, u32 dst_page,
+ struct netlink_ext_ack *extack);
+
+int zl3073x_flash_sectors(struct zl3073x_dev *zldev, const char *component,
+ u32 page, u32 addr, const void *data, size_t size,
+ struct netlink_ext_ack *extack);
+
+#endif /* __ZL3073X_FLASH_H */
diff --git a/drivers/dpll/zl3073x/fw.c b/drivers/dpll/zl3073x/fw.c
new file mode 100644
index 000000000000..d5418ff74886
--- /dev/null
+++ b/drivers/dpll/zl3073x/fw.c
@@ -0,0 +1,419 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/array_size.h>
+#include <linux/build_bug.h>
+#include <linux/dev_printk.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/netlink.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include "core.h"
+#include "flash.h"
+#include "fw.h"
+
+#define ZL3073X_FW_ERR_PFX "FW load failed: "
+#define ZL3073X_FW_ERR_MSG(_extack, _msg, ...) \
+ NL_SET_ERR_MSG_FMT_MOD((_extack), ZL3073X_FW_ERR_PFX _msg, \
+ ## __VA_ARGS__)
+
+enum zl3073x_flash_type {
+ ZL3073X_FLASH_TYPE_NONE = 0,
+ ZL3073X_FLASH_TYPE_SECTORS,
+ ZL3073X_FLASH_TYPE_PAGE,
+ ZL3073X_FLASH_TYPE_PAGE_AND_COPY,
+};
+
+struct zl3073x_fw_component_info {
+ const char *name;
+ size_t max_size;
+ enum zl3073x_flash_type flash_type;
+ u32 load_addr;
+ u32 dest_page;
+ u32 copy_page;
+};
+
+static const struct zl3073x_fw_component_info component_info[] = {
+ [ZL_FW_COMPONENT_UTIL] = {
+ .name = "utility",
+ .max_size = 0x2300,
+ .load_addr = 0x20000000,
+ .flash_type = ZL3073X_FLASH_TYPE_NONE,
+ },
+ [ZL_FW_COMPONENT_FW1] = {
+ .name = "firmware1",
+ .max_size = 0x35000,
+ .load_addr = 0x20002000,
+ .flash_type = ZL3073X_FLASH_TYPE_SECTORS,
+ .dest_page = 0x020,
+ },
+ [ZL_FW_COMPONENT_FW2] = {
+ .name = "firmware2",
+ .max_size = 0x0040,
+ .load_addr = 0x20000000,
+ .flash_type = ZL3073X_FLASH_TYPE_PAGE_AND_COPY,
+ .dest_page = 0x3e0,
+ .copy_page = 0x000,
+ },
+ [ZL_FW_COMPONENT_FW3] = {
+ .name = "firmware3",
+ .max_size = 0x0248,
+ .load_addr = 0x20000400,
+ .flash_type = ZL3073X_FLASH_TYPE_PAGE_AND_COPY,
+ .dest_page = 0x3e4,
+ .copy_page = 0x004,
+ },
+ [ZL_FW_COMPONENT_CFG0] = {
+ .name = "config0",
+ .max_size = 0x1000,
+ .load_addr = 0x20000000,
+ .flash_type = ZL3073X_FLASH_TYPE_PAGE,
+ .dest_page = 0x3d0,
+ },
+ [ZL_FW_COMPONENT_CFG1] = {
+ .name = "config1",
+ .max_size = 0x1000,
+ .load_addr = 0x20000000,
+ .flash_type = ZL3073X_FLASH_TYPE_PAGE,
+ .dest_page = 0x3c0,
+ },
+ [ZL_FW_COMPONENT_CFG2] = {
+ .name = "config2",
+ .max_size = 0x1000,
+ .load_addr = 0x20000000,
+ .flash_type = ZL3073X_FLASH_TYPE_PAGE,
+ .dest_page = 0x3b0,
+ },
+ [ZL_FW_COMPONENT_CFG3] = {
+ .name = "config3",
+ .max_size = 0x1000,
+ .load_addr = 0x20000000,
+ .flash_type = ZL3073X_FLASH_TYPE_PAGE,
+ .dest_page = 0x3a0,
+ },
+ [ZL_FW_COMPONENT_CFG4] = {
+ .name = "config4",
+ .max_size = 0x1000,
+ .load_addr = 0x20000000,
+ .flash_type = ZL3073X_FLASH_TYPE_PAGE,
+ .dest_page = 0x390,
+ },
+ [ZL_FW_COMPONENT_CFG5] = {
+ .name = "config5",
+ .max_size = 0x1000,
+ .load_addr = 0x20000000,
+ .flash_type = ZL3073X_FLASH_TYPE_PAGE,
+ .dest_page = 0x380,
+ },
+ [ZL_FW_COMPONENT_CFG6] = {
+ .name = "config6",
+ .max_size = 0x1000,
+ .load_addr = 0x20000000,
+ .flash_type = ZL3073X_FLASH_TYPE_PAGE,
+ .dest_page = 0x370,
+ },
+};
+
+/* Sanity check */
+static_assert(ARRAY_SIZE(component_info) == ZL_FW_NUM_COMPONENTS);
+
+/**
+ * zl3073x_fw_component_alloc - Alloc structure to hold firmware component
+ * @size: size of buffer to store data
+ *
+ * Return: pointer to allocated component structure or NULL on error.
+ */
+static struct zl3073x_fw_component *
+zl3073x_fw_component_alloc(size_t size)
+{
+ struct zl3073x_fw_component *comp;
+
+ comp = kzalloc(sizeof(*comp), GFP_KERNEL);
+ if (!comp)
+ return NULL;
+
+ comp->size = size;
+ comp->data = kzalloc(size, GFP_KERNEL);
+ if (!comp->data) {
+ kfree(comp);
+ return NULL;
+ }
+
+ return comp;
+}
+
+/**
+ * zl3073x_fw_component_free - Free allocated component structure
+ * @comp: pointer to allocated component
+ */
+static void
+zl3073x_fw_component_free(struct zl3073x_fw_component *comp)
+{
+ if (comp)
+ kfree(comp->data);
+
+ kfree(comp);
+}
+
+/**
+ * zl3073x_fw_component_id_get - Get ID for firmware component name
+ * @name: input firmware component name
+ *
+ * Return:
+ * - ZL3073X_FW_COMPONENT_* ID for known component name
+ * - ZL3073X_FW_COMPONENT_INVALID if the given name is unknown
+ */
+static enum zl3073x_fw_component_id
+zl3073x_fw_component_id_get(const char *name)
+{
+ enum zl3073x_fw_component_id id;
+
+ for (id = 0; id < ZL_FW_NUM_COMPONENTS; id++)
+ if (!strcasecmp(name, component_info[id].name))
+ return id;
+
+ return ZL_FW_COMPONENT_INVALID;
+}
+
+/**
+ * zl3073x_fw_component_load - Load component from firmware source
+ * @zldev: zl3073x device structure
+ * @pcomp: pointer to loaded component
+ * @psrc: data pointer to load component from
+ * @psize: remaining bytes in buffer
+ * @extack: netlink extack pointer to report errors
+ *
+ * The function allocates single firmware component and loads the data from
+ * the buffer specified by @psrc and @psize. Pointer to allocated component
+ * is stored in output @pcomp. Source data pointer @psrc and remaining bytes
+ * @psize are updated accordingly.
+ *
+ * Return:
+ * * 1 when component was allocated and loaded
+ * * 0 when there is no component to load
+ * * <0 on error
+ */
+static ssize_t
+zl3073x_fw_component_load(struct zl3073x_dev *zldev,
+ struct zl3073x_fw_component **pcomp,
+ const char **psrc, size_t *psize,
+ struct netlink_ext_ack *extack)
+{
+ const struct zl3073x_fw_component_info *info;
+ struct zl3073x_fw_component *comp = NULL;
+ struct device *dev = zldev->dev;
+ enum zl3073x_fw_component_id id;
+ char buf[32], name[16];
+ u32 count, size, *dest;
+ int pos, rc;
+
+ /* Fetch image name and size from input */
+ strscpy(buf, *psrc, min(sizeof(buf), *psize));
+ rc = sscanf(buf, "%15s %u %n", name, &count, &pos);
+ if (!rc) {
+ /* No more data */
+ return 0;
+ } else if (rc == 1 || count > U32_MAX / sizeof(u32)) {
+ ZL3073X_FW_ERR_MSG(extack, "invalid component size");
+ return -EINVAL;
+ }
+ *psrc += pos;
+ *psize -= pos;
+
+ dev_dbg(dev, "Firmware component '%s' found\n", name);
+
+ id = zl3073x_fw_component_id_get(name);
+ if (id == ZL_FW_COMPONENT_INVALID) {
+ ZL3073X_FW_ERR_MSG(extack, "unknown component type '%s'", name);
+ return -EINVAL;
+ }
+
+ info = &component_info[id];
+ size = count * sizeof(u32); /* get size in bytes */
+
+ /* Check image size validity */
+ if (size > component_info[id].max_size) {
+ ZL3073X_FW_ERR_MSG(extack,
+ "[%s] component is too big (%u bytes)\n",
+ info->name, size);
+ return -EINVAL;
+ }
+
+ dev_dbg(dev, "Indicated component image size: %u bytes\n", size);
+
+ /* Alloc component */
+ comp = zl3073x_fw_component_alloc(size);
+ if (!comp) {
+ ZL3073X_FW_ERR_MSG(extack, "failed to alloc memory");
+ return -ENOMEM;
+ }
+ comp->id = id;
+
+ /* Load component data from firmware source */
+ for (dest = comp->data; count; count--, dest++) {
+ strscpy(buf, *psrc, min(sizeof(buf), *psize));
+ rc = sscanf(buf, "%x %n", dest, &pos);
+ if (!rc)
+ goto err_data;
+
+ *psrc += pos;
+ *psize -= pos;
+ }
+
+ *pcomp = comp;
+
+ return 1;
+
+err_data:
+ ZL3073X_FW_ERR_MSG(extack, "[%s] invalid or missing data", info->name);
+
+ zl3073x_fw_component_free(comp);
+
+ return -ENODATA;
+}
+
+/**
+ * zl3073x_fw_free - Free allocated firmware
+ * @fw: firmware pointer
+ *
+ * The function frees existing firmware allocated by @zl3073x_fw_load.
+ */
+void zl3073x_fw_free(struct zl3073x_fw *fw)
+{
+ size_t i;
+
+ if (!fw)
+ return;
+
+ for (i = 0; i < ZL_FW_NUM_COMPONENTS; i++)
+ zl3073x_fw_component_free(fw->component[i]);
+
+ kfree(fw);
+}
+
+/**
+ * zl3073x_fw_load - Load all components from source
+ * @zldev: zl3073x device structure
+ * @data: source buffer pointer
+ * @size: size of source buffer
+ * @extack: netlink extack pointer to report errors
+ *
+ * The functions allocate firmware structure and loads all components from
+ * the given buffer specified by @data and @size.
+ *
+ * Return: pointer to firmware on success, error pointer on error
+ */
+struct zl3073x_fw *zl3073x_fw_load(struct zl3073x_dev *zldev, const char *data,
+ size_t size, struct netlink_ext_ack *extack)
+{
+ struct zl3073x_fw_component *comp;
+ enum zl3073x_fw_component_id id;
+ struct zl3073x_fw *fw;
+ ssize_t rc;
+
+ /* Allocate firmware structure */
+ fw = kzalloc(sizeof(*fw), GFP_KERNEL);
+ if (!fw)
+ return ERR_PTR(-ENOMEM);
+
+ do {
+ /* Load single component */
+ rc = zl3073x_fw_component_load(zldev, &comp, &data, &size,
+ extack);
+ if (rc <= 0)
+ /* Everything was read or error occurred */
+ break;
+
+ id = comp->id;
+
+ /* Report error if the given component is present twice
+ * or more.
+ */
+ if (fw->component[id]) {
+ ZL3073X_FW_ERR_MSG(extack,
+ "duplicate component '%s' detected",
+ component_info[id].name);
+ zl3073x_fw_component_free(comp);
+ rc = -EINVAL;
+ break;
+ }
+
+ fw->component[id] = comp;
+ } while (true);
+
+ if (rc) {
+ /* Free allocated firmware in case of error */
+ zl3073x_fw_free(fw);
+ return ERR_PTR(rc);
+ }
+
+ return fw;
+}
+
+/**
+ * zl3073x_flash_bundle_flash - Flash all components
+ * @zldev: zl3073x device structure
+ * @components: pointer to components array
+ * @extack: netlink extack pointer to report errors
+ *
+ * Returns 0 in case of success or negative number otherwise.
+ */
+static int
+zl3073x_fw_component_flash(struct zl3073x_dev *zldev,
+ struct zl3073x_fw_component *comp,
+ struct netlink_ext_ack *extack)
+{
+ const struct zl3073x_fw_component_info *info;
+ int rc;
+
+ info = &component_info[comp->id];
+
+ switch (info->flash_type) {
+ case ZL3073X_FLASH_TYPE_NONE:
+ /* Non-flashable component - used for utility */
+ return 0;
+ case ZL3073X_FLASH_TYPE_SECTORS:
+ rc = zl3073x_flash_sectors(zldev, info->name, info->dest_page,
+ info->load_addr, comp->data,
+ comp->size, extack);
+ break;
+ case ZL3073X_FLASH_TYPE_PAGE:
+ rc = zl3073x_flash_page(zldev, info->name, info->dest_page,
+ info->load_addr, comp->data, comp->size,
+ extack);
+ break;
+ case ZL3073X_FLASH_TYPE_PAGE_AND_COPY:
+ rc = zl3073x_flash_page(zldev, info->name, info->dest_page,
+ info->load_addr, comp->data, comp->size,
+ extack);
+ if (!rc)
+ rc = zl3073x_flash_page_copy(zldev, info->name,
+ info->dest_page,
+ info->copy_page, extack);
+ break;
+ }
+ if (rc)
+ ZL3073X_FW_ERR_MSG(extack, "Failed to flash component '%s'",
+ info->name);
+
+ return rc;
+}
+
+int zl3073x_fw_flash(struct zl3073x_dev *zldev, struct zl3073x_fw *zlfw,
+ struct netlink_ext_ack *extack)
+{
+ int i, rc = 0;
+
+ for (i = 0; i < ZL_FW_NUM_COMPONENTS; i++) {
+ if (!zlfw->component[i])
+ continue; /* Component is not present */
+
+ rc = zl3073x_fw_component_flash(zldev, zlfw->component[i],
+ extack);
+ if (rc)
+ break;
+ }
+
+ return rc;
+}
diff --git a/drivers/dpll/zl3073x/fw.h b/drivers/dpll/zl3073x/fw.h
new file mode 100644
index 000000000000..fcaa89ab075e
--- /dev/null
+++ b/drivers/dpll/zl3073x/fw.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _ZL3073X_FW_H
+#define _ZL3073X_FW_H
+
+/*
+ * enum zl3073x_fw_component_id - Identifiers for possible flash components
+ */
+enum zl3073x_fw_component_id {
+ ZL_FW_COMPONENT_INVALID = -1,
+ ZL_FW_COMPONENT_UTIL = 0,
+ ZL_FW_COMPONENT_FW1,
+ ZL_FW_COMPONENT_FW2,
+ ZL_FW_COMPONENT_FW3,
+ ZL_FW_COMPONENT_CFG0,
+ ZL_FW_COMPONENT_CFG1,
+ ZL_FW_COMPONENT_CFG2,
+ ZL_FW_COMPONENT_CFG3,
+ ZL_FW_COMPONENT_CFG4,
+ ZL_FW_COMPONENT_CFG5,
+ ZL_FW_COMPONENT_CFG6,
+ ZL_FW_NUM_COMPONENTS
+};
+
+/**
+ * struct zl3073x_fw_component - Firmware component
+ * @id: Flash component ID
+ * @size: Size of the buffer
+ * @data: Pointer to buffer with component data
+ */
+struct zl3073x_fw_component {
+ enum zl3073x_fw_component_id id;
+ size_t size;
+ void *data;
+};
+
+/**
+ * struct zl3073x_fw - Firmware bundle
+ * @component: firmware components array
+ */
+struct zl3073x_fw {
+ struct zl3073x_fw_component *component[ZL_FW_NUM_COMPONENTS];
+};
+
+struct zl3073x_fw *zl3073x_fw_load(struct zl3073x_dev *zldev, const char *data,
+ size_t size, struct netlink_ext_ack *extack);
+void zl3073x_fw_free(struct zl3073x_fw *fw);
+
+int zl3073x_fw_flash(struct zl3073x_dev *zldev, struct zl3073x_fw *zlfw,
+ struct netlink_ext_ack *extack);
+
+#endif /* _ZL3073X_FW_H */
diff --git a/drivers/dpll/zl3073x/regs.h b/drivers/dpll/zl3073x/regs.h
index 614e33128a5c..19a25325bd9c 100644
--- a/drivers/dpll/zl3073x/regs.h
+++ b/drivers/dpll/zl3073x/regs.h
@@ -72,6 +72,9 @@
#define ZL_REG_FW_VER ZL_REG(0, 0x05, 2)
#define ZL_REG_CUSTOM_CONFIG_VER ZL_REG(0, 0x07, 4)
+#define ZL_REG_RESET_STATUS ZL_REG(0, 0x18, 1)
+#define ZL_REG_RESET_STATUS_RESET BIT(0)
+
/*************************
* Register Page 2, Status
*************************/
@@ -260,4 +263,52 @@
#define ZL_REG_OUTPUT_ESYNC_WIDTH ZL_REG(14, 0x18, 4)
#define ZL_REG_OUTPUT_PHASE_COMP ZL_REG(14, 0x20, 4)
+/*
+ * Register Page 255 - HW registers access
+ */
+#define ZL_REG_HWREG_OP ZL_REG(0xff, 0x00, 1)
+#define ZL_HWREG_OP_WRITE 0x28
+#define ZL_HWREG_OP_READ 0x29
+#define ZL_HWREG_OP_PENDING BIT(1)
+
+#define ZL_REG_HWREG_ADDR ZL_REG(0xff, 0x04, 4)
+#define ZL_REG_HWREG_WRITE_DATA ZL_REG(0xff, 0x08, 4)
+#define ZL_REG_HWREG_READ_DATA ZL_REG(0xff, 0x0c, 4)
+
+/*
+ * Registers available in flash mode
+ */
+#define ZL_REG_FLASH_HASH ZL_REG(0, 0x78, 4)
+#define ZL_REG_FLASH_FAMILY ZL_REG(0, 0x7c, 1)
+#define ZL_REG_FLASH_RELEASE ZL_REG(0, 0x7d, 1)
+
+#define ZL_REG_HOST_CONTROL ZL_REG(1, 0x02, 1)
+#define ZL_HOST_CONTROL_ENABLE BIT(0)
+
+#define ZL_REG_IMAGE_START_ADDR ZL_REG(1, 0x04, 4)
+#define ZL_REG_IMAGE_SIZE ZL_REG(1, 0x08, 4)
+#define ZL_REG_FLASH_INDEX_READ ZL_REG(1, 0x0c, 4)
+#define ZL_REG_FLASH_INDEX_WRITE ZL_REG(1, 0x10, 4)
+#define ZL_REG_FILL_PATTERN ZL_REG(1, 0x14, 4)
+
+#define ZL_REG_WRITE_FLASH ZL_REG(1, 0x18, 1)
+#define ZL_WRITE_FLASH_OP GENMASK(2, 0)
+#define ZL_WRITE_FLASH_OP_DONE 0x0
+#define ZL_WRITE_FLASH_OP_SECTORS 0x2
+#define ZL_WRITE_FLASH_OP_PAGE 0x3
+#define ZL_WRITE_FLASH_OP_COPY_PAGE 0x4
+
+#define ZL_REG_FLASH_INFO ZL_REG(2, 0x00, 1)
+#define ZL_FLASH_INFO_SECTOR_SIZE GENMASK(3, 0)
+#define ZL_FLASH_INFO_SECTOR_4K 0
+#define ZL_FLASH_INFO_SECTOR_64K 1
+
+#define ZL_REG_ERROR_COUNT ZL_REG(2, 0x04, 4)
+#define ZL_REG_ERROR_CAUSE ZL_REG(2, 0x08, 4)
+
+#define ZL_REG_OP_STATE ZL_REG(2, 0x14, 1)
+#define ZL_OP_STATE_NO_COMMAND 0
+#define ZL_OP_STATE_PENDING 1
+#define ZL_OP_STATE_DONE 2
+
#endif /* _ZL3073X_REGS_H */
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 19ad3c3b675d..39352b9b7a7e 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -576,4 +576,20 @@ config EDAC_LOONGSON
errors (CE) only. Loongson-3A5000/3C5000/3D5000/3A6000/3C6000
are compatible.
+config EDAC_CORTEX_A72
+ tristate "ARM Cortex A72"
+ depends on ARM64
+ help
+ Support for L1/L2 cache error detection for ARM Cortex A72 processor.
+ The detected and reported errors are from reading CPU/L2 memory error
+ syndrome registers.
+
+config EDAC_VERSALNET
+ tristate "AMD VersalNET DDR Controller"
+ depends on CDX_CONTROLLER && ARCH_ZYNQMP
+ help
+ Support for single bit error correction, double bit error detection
+ and other system errors from various IP subsystems like RPU, NOCs,
+ HNICX, PL on the AMD Versal NET DDR memory controller.
+
endif # EDAC
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index a8f2d8f6c894..1c14796410a3 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -88,3 +88,5 @@ obj-$(CONFIG_EDAC_NPCM) += npcm_edac.o
obj-$(CONFIG_EDAC_ZYNQMP) += zynqmp_edac.o
obj-$(CONFIG_EDAC_VERSAL) += versal_edac.o
obj-$(CONFIG_EDAC_LOONGSON) += loongson_edac.o
+obj-$(CONFIG_EDAC_VERSALNET) += versalnet_edac.o
+obj-$(CONFIG_EDAC_CORTEX_A72) += a72_edac.o
diff --git a/drivers/edac/a72_edac.c b/drivers/edac/a72_edac.c
new file mode 100644
index 000000000000..9262d75c3855
--- /dev/null
+++ b/drivers/edac/a72_edac.c
@@ -0,0 +1,225 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cortex A72 EDAC L1 and L2 cache error detection
+ *
+ * Copyright (c) 2020 Pengutronix, Sascha Hauer <s.hauer@pengutronix.de>
+ * Copyright (c) 2025 Microsoft Corporation, <vijayb@linux.microsoft.com>
+ *
+ * Based on Code from:
+ * Copyright (c) 2018, NXP Semiconductor
+ * Author: York Sun <york.sun@nxp.com>
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/bitfield.h>
+#include <asm/smp_plat.h>
+
+#include "edac_module.h"
+
+#define DRVNAME "a72-edac"
+
+#define SYS_CPUMERRSR_EL1 sys_reg(3, 1, 15, 2, 2)
+#define SYS_L2MERRSR_EL1 sys_reg(3, 1, 15, 2, 3)
+
+#define CPUMERRSR_EL1_RAMID GENMASK(30, 24)
+#define L2MERRSR_EL1_CPUID_WAY GENMASK(21, 18)
+
+#define CPUMERRSR_EL1_VALID BIT(31)
+#define CPUMERRSR_EL1_FATAL BIT(63)
+#define L2MERRSR_EL1_VALID BIT(31)
+#define L2MERRSR_EL1_FATAL BIT(63)
+
+#define L1_I_TAG_RAM 0x00
+#define L1_I_DATA_RAM 0x01
+#define L1_D_TAG_RAM 0x08
+#define L1_D_DATA_RAM 0x09
+#define TLB_RAM 0x18
+
+#define MESSAGE_SIZE 64
+
+struct mem_err_synd_reg {
+ u64 cpu_mesr;
+ u64 l2_mesr;
+};
+
+static struct cpumask compat_mask;
+
+static void report_errors(struct edac_device_ctl_info *edac_ctl, int cpu,
+ struct mem_err_synd_reg *mesr)
+{
+ u64 cpu_mesr = mesr->cpu_mesr;
+ u64 l2_mesr = mesr->l2_mesr;
+ char msg[MESSAGE_SIZE];
+
+ if (cpu_mesr & CPUMERRSR_EL1_VALID) {
+ const char *str;
+ bool fatal = cpu_mesr & CPUMERRSR_EL1_FATAL;
+
+ switch (FIELD_GET(CPUMERRSR_EL1_RAMID, cpu_mesr)) {
+ case L1_I_TAG_RAM:
+ str = "L1-I Tag RAM";
+ break;
+ case L1_I_DATA_RAM:
+ str = "L1-I Data RAM";
+ break;
+ case L1_D_TAG_RAM:
+ str = "L1-D Tag RAM";
+ break;
+ case L1_D_DATA_RAM:
+ str = "L1-D Data RAM";
+ break;
+ case TLB_RAM:
+ str = "TLB RAM";
+ break;
+ default:
+ str = "Unspecified";
+ break;
+ }
+
+ snprintf(msg, MESSAGE_SIZE, "%s %s error(s) on CPU %d",
+ str, fatal ? "fatal" : "correctable", cpu);
+
+ if (fatal)
+ edac_device_handle_ue(edac_ctl, cpu, 0, msg);
+ else
+ edac_device_handle_ce(edac_ctl, cpu, 0, msg);
+ }
+
+ if (l2_mesr & L2MERRSR_EL1_VALID) {
+ bool fatal = l2_mesr & L2MERRSR_EL1_FATAL;
+
+ snprintf(msg, MESSAGE_SIZE, "L2 %s error(s) on CPU %d CPUID/WAY 0x%lx",
+ fatal ? "fatal" : "correctable", cpu,
+ FIELD_GET(L2MERRSR_EL1_CPUID_WAY, l2_mesr));
+ if (fatal)
+ edac_device_handle_ue(edac_ctl, cpu, 1, msg);
+ else
+ edac_device_handle_ce(edac_ctl, cpu, 1, msg);
+ }
+}
+
+static void read_errors(void *data)
+{
+ struct mem_err_synd_reg *mesr = data;
+
+ mesr->cpu_mesr = read_sysreg_s(SYS_CPUMERRSR_EL1);
+ if (mesr->cpu_mesr & CPUMERRSR_EL1_VALID) {
+ write_sysreg_s(0, SYS_CPUMERRSR_EL1);
+ isb();
+ }
+ mesr->l2_mesr = read_sysreg_s(SYS_L2MERRSR_EL1);
+ if (mesr->l2_mesr & L2MERRSR_EL1_VALID) {
+ write_sysreg_s(0, SYS_L2MERRSR_EL1);
+ isb();
+ }
+}
+
+static void a72_edac_check(struct edac_device_ctl_info *edac_ctl)
+{
+ struct mem_err_synd_reg mesr;
+ int cpu;
+
+ cpus_read_lock();
+ for_each_cpu_and(cpu, cpu_online_mask, &compat_mask) {
+ smp_call_function_single(cpu, read_errors, &mesr, true);
+ report_errors(edac_ctl, cpu, &mesr);
+ }
+ cpus_read_unlock();
+}
+
+static int a72_edac_probe(struct platform_device *pdev)
+{
+ struct edac_device_ctl_info *edac_ctl;
+ struct device *dev = &pdev->dev;
+ int rc;
+
+ edac_ctl = edac_device_alloc_ctl_info(0, "cpu",
+ num_possible_cpus(), "L", 2, 1,
+ edac_device_alloc_index());
+ if (!edac_ctl)
+ return -ENOMEM;
+
+ edac_ctl->edac_check = a72_edac_check;
+ edac_ctl->dev = dev;
+ edac_ctl->mod_name = dev_name(dev);
+ edac_ctl->dev_name = dev_name(dev);
+ edac_ctl->ctl_name = DRVNAME;
+ dev_set_drvdata(dev, edac_ctl);
+
+ rc = edac_device_add_device(edac_ctl);
+ if (rc)
+ goto out_dev;
+
+ return 0;
+
+out_dev:
+ edac_device_free_ctl_info(edac_ctl);
+
+ return rc;
+}
+
+static void a72_edac_remove(struct platform_device *pdev)
+{
+ struct edac_device_ctl_info *edac_ctl = dev_get_drvdata(&pdev->dev);
+
+ edac_device_del_device(edac_ctl->dev);
+ edac_device_free_ctl_info(edac_ctl);
+}
+
+static const struct of_device_id cortex_arm64_edac_of_match[] = {
+ { .compatible = "arm,cortex-a72" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, cortex_arm64_edac_of_match);
+
+static struct platform_driver a72_edac_driver = {
+ .probe = a72_edac_probe,
+ .remove = a72_edac_remove,
+ .driver = {
+ .name = DRVNAME,
+ },
+};
+
+static struct platform_device *a72_pdev;
+
+static int __init a72_edac_driver_init(void)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct device_node *np __free(device_node) = of_cpu_device_node_get(cpu);
+ if (np) {
+ if (of_match_node(cortex_arm64_edac_of_match, np) &&
+ of_property_read_bool(np, "edac-enabled")) {
+ cpumask_set_cpu(cpu, &compat_mask);
+ }
+ } else {
+ pr_warn("failed to find device node for CPU %d\n", cpu);
+ }
+ }
+
+ if (cpumask_empty(&compat_mask))
+ return 0;
+
+ a72_pdev = platform_device_register_simple(DRVNAME, -1, NULL, 0);
+ if (IS_ERR(a72_pdev)) {
+ pr_err("failed to register A72 EDAC device\n");
+ return PTR_ERR(a72_pdev);
+ }
+
+ return platform_driver_register(&a72_edac_driver);
+}
+
+static void __exit a72_edac_driver_exit(void)
+{
+ platform_device_unregister(a72_pdev);
+ platform_driver_unregister(&a72_edac_driver);
+}
+
+module_init(a72_edac_driver_init);
+module_exit(a72_edac_driver_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
+MODULE_DESCRIPTION("Cortex A72 L1 and L2 cache EDAC driver");
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index cae52c654a15..103b2c2eba2a 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -128,7 +128,6 @@ static ssize_t altr_sdr_mc_err_inject_write(struct file *file,
ptemp = dma_alloc_coherent(mci->pdev, 16, &dma_handle, GFP_KERNEL);
if (!ptemp) {
- dma_free_coherent(mci->pdev, 16, ptemp, dma_handle);
edac_printk(KERN_ERR, EDAC_MC,
"Inject: Buffer Allocation error\n");
return -ENOMEM;
@@ -2131,8 +2130,8 @@ static int altr_edac_a10_probe(struct platform_device *pdev)
edac->irq_chip.name = pdev->dev.of_node->name;
edac->irq_chip.irq_mask = a10_eccmgr_irq_mask;
edac->irq_chip.irq_unmask = a10_eccmgr_irq_unmask;
- edac->domain = irq_domain_create_linear(of_fwnode_handle(pdev->dev.of_node),
- 64, &a10_eccmgr_ic_ops, edac);
+ edac->domain = irq_domain_create_linear(dev_fwnode(&pdev->dev), 64, &a10_eccmgr_ic_ops,
+ edac);
if (!edac->domain) {
dev_err(&pdev->dev, "Error adding IRQ domain\n");
return -ENOMEM;
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 07f1e9dc1ca7..2f6ab783bf20 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -3923,6 +3923,26 @@ static int per_family_init(struct amd64_pvt *pvt)
pvt->ctl_name = "F1Ah_M40h";
pvt->flags.zn_regs_v2 = 1;
break;
+ case 0x50 ... 0x57:
+ pvt->ctl_name = "F1Ah_M50h";
+ pvt->max_mcs = 16;
+ pvt->flags.zn_regs_v2 = 1;
+ break;
+ case 0x90 ... 0x9f:
+ pvt->ctl_name = "F1Ah_M90h";
+ pvt->max_mcs = 8;
+ pvt->flags.zn_regs_v2 = 1;
+ break;
+ case 0xa0 ... 0xaf:
+ pvt->ctl_name = "F1Ah_MA0h";
+ pvt->max_mcs = 8;
+ pvt->flags.zn_regs_v2 = 1;
+ break;
+ case 0xc0 ... 0xc7:
+ pvt->ctl_name = "F1Ah_MC0h";
+ pvt->max_mcs = 16;
+ pvt->flags.zn_regs_v2 = 1;
+ break;
}
break;
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 17228d07de4c..d70b8a8d0b09 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -96,7 +96,7 @@
/* Hardware limit on ChipSelect rows per MC and processors per system */
#define NUM_CHIPSELECTS 8
#define DRAM_RANGES 8
-#define NUM_CONTROLLERS 12
+#define NUM_CONTROLLERS 16
#define ON true
#define OFF false
diff --git a/drivers/edac/ecs.c b/drivers/edac/ecs.c
index 51c451c7f0f0..51c451c7f0f0 100755..100644
--- a/drivers/edac/ecs.c
+++ b/drivers/edac/ecs.c
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 0f338adf7d93..8689631f1905 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -305,6 +305,14 @@ DEVICE_CHANNEL(ch10_dimm_label, S_IRUGO | S_IWUSR,
channel_dimm_label_show, channel_dimm_label_store, 10);
DEVICE_CHANNEL(ch11_dimm_label, S_IRUGO | S_IWUSR,
channel_dimm_label_show, channel_dimm_label_store, 11);
+DEVICE_CHANNEL(ch12_dimm_label, S_IRUGO | S_IWUSR,
+ channel_dimm_label_show, channel_dimm_label_store, 12);
+DEVICE_CHANNEL(ch13_dimm_label, S_IRUGO | S_IWUSR,
+ channel_dimm_label_show, channel_dimm_label_store, 13);
+DEVICE_CHANNEL(ch14_dimm_label, S_IRUGO | S_IWUSR,
+ channel_dimm_label_show, channel_dimm_label_store, 14);
+DEVICE_CHANNEL(ch15_dimm_label, S_IRUGO | S_IWUSR,
+ channel_dimm_label_show, channel_dimm_label_store, 15);
/* Total possible dynamic DIMM Label attribute file table */
static struct attribute *dynamic_csrow_dimm_attr[] = {
@@ -320,6 +328,10 @@ static struct attribute *dynamic_csrow_dimm_attr[] = {
&dev_attr_legacy_ch9_dimm_label.attr.attr,
&dev_attr_legacy_ch10_dimm_label.attr.attr,
&dev_attr_legacy_ch11_dimm_label.attr.attr,
+ &dev_attr_legacy_ch12_dimm_label.attr.attr,
+ &dev_attr_legacy_ch13_dimm_label.attr.attr,
+ &dev_attr_legacy_ch14_dimm_label.attr.attr,
+ &dev_attr_legacy_ch15_dimm_label.attr.attr,
NULL
};
@@ -348,6 +360,14 @@ DEVICE_CHANNEL(ch10_ce_count, S_IRUGO,
channel_ce_count_show, NULL, 10);
DEVICE_CHANNEL(ch11_ce_count, S_IRUGO,
channel_ce_count_show, NULL, 11);
+DEVICE_CHANNEL(ch12_ce_count, S_IRUGO,
+ channel_ce_count_show, NULL, 12);
+DEVICE_CHANNEL(ch13_ce_count, S_IRUGO,
+ channel_ce_count_show, NULL, 13);
+DEVICE_CHANNEL(ch14_ce_count, S_IRUGO,
+ channel_ce_count_show, NULL, 14);
+DEVICE_CHANNEL(ch15_ce_count, S_IRUGO,
+ channel_ce_count_show, NULL, 15);
/* Total possible dynamic ce_count attribute file table */
static struct attribute *dynamic_csrow_ce_count_attr[] = {
@@ -363,6 +383,10 @@ static struct attribute *dynamic_csrow_ce_count_attr[] = {
&dev_attr_legacy_ch9_ce_count.attr.attr,
&dev_attr_legacy_ch10_ce_count.attr.attr,
&dev_attr_legacy_ch11_ce_count.attr.attr,
+ &dev_attr_legacy_ch12_ce_count.attr.attr,
+ &dev_attr_legacy_ch13_ce_count.attr.attr,
+ &dev_attr_legacy_ch14_ce_count.attr.attr,
+ &dev_attr_legacy_ch15_ce_count.attr.attr,
NULL
};
diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c
index bf4171ac191d..2010a47149f4 100644
--- a/drivers/edac/i10nm_base.c
+++ b/drivers/edac/i10nm_base.c
@@ -468,17 +468,18 @@ static int i10nm_get_imc_num(struct res_config *cfg)
return -ENODEV;
}
- if (imc_num > I10NM_NUM_DDR_IMC) {
- i10nm_printk(KERN_ERR, "Need to make I10NM_NUM_DDR_IMC >= %d\n", imc_num);
- return -EINVAL;
- }
-
if (cfg->ddr_imc_num != imc_num) {
/*
- * Store the number of present DDR memory controllers.
+ * Update the configuration data to reflect the number of
+ * present DDR memory controllers.
*/
cfg->ddr_imc_num = imc_num;
edac_dbg(2, "Set DDR MC number: %d", imc_num);
+
+ /* Release and reallocate skx_dev list with the updated number. */
+ skx_remove();
+ if (skx_get_all_bus_mappings(cfg, &i10nm_edac_list) <= 0)
+ return -ENODEV;
}
return 0;
@@ -1057,6 +1058,15 @@ static bool i10nm_check_ecc(struct skx_imc *imc, int chan)
return !!GET_BITFIELD(mcmtr, 2, 2);
}
+static bool i10nm_channel_disabled(struct skx_imc *imc, int chan)
+{
+ u32 mcmtr = I10NM_GET_MCMTR(imc, chan);
+
+ edac_dbg(1, "mc%d ch%d mcmtr reg %x\n", imc->mc, chan, mcmtr);
+
+ return (mcmtr == ~0 || GET_BITFIELD(mcmtr, 18, 18));
+}
+
static int i10nm_get_dimm_config(struct mem_ctl_info *mci,
struct res_config *cfg)
{
@@ -1070,6 +1080,11 @@ static int i10nm_get_dimm_config(struct mem_ctl_info *mci,
if (!imc->mbase)
continue;
+ if (i10nm_channel_disabled(imc, i)) {
+ edac_dbg(1, "mc%d ch%d is disabled.\n", imc->mc, i);
+ continue;
+ }
+
ndimms = 0;
if (res_cfg->type != GNR)
diff --git a/drivers/edac/ie31200_edac.c b/drivers/edac/ie31200_edac.c
index 5c1fa1c0d12e..5a080ab65476 100644
--- a/drivers/edac/ie31200_edac.c
+++ b/drivers/edac/ie31200_edac.c
@@ -99,6 +99,8 @@
/* Alder Lake-S */
#define PCI_DEVICE_ID_INTEL_IE31200_ADL_S_1 0x4660
+#define PCI_DEVICE_ID_INTEL_IE31200_ADL_S_2 0x4668 /* 8P+4E, e.g. i7-12700K */
+#define PCI_DEVICE_ID_INTEL_IE31200_ADL_S_3 0x4648 /* 6P+4E, e.g. i5-12600K */
/* Bartlett Lake-S */
#define PCI_DEVICE_ID_INTEL_IE31200_BTL_S_1 0x4639
@@ -761,6 +763,8 @@ static const struct pci_device_id ie31200_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_RPL_S_6), (kernel_ulong_t)&rpl_s_cfg},
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_RPL_HX_1), (kernel_ulong_t)&rpl_s_cfg},
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_ADL_S_1), (kernel_ulong_t)&rpl_s_cfg},
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_ADL_S_2), (kernel_ulong_t)&rpl_s_cfg},
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_ADL_S_3), (kernel_ulong_t)&rpl_s_cfg},
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_BTL_S_1), (kernel_ulong_t)&rpl_s_cfg},
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_BTL_S_2), (kernel_ulong_t)&rpl_s_cfg},
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_BTL_S_3), (kernel_ulong_t)&rpl_s_cfg},
diff --git a/drivers/edac/mem_repair.c b/drivers/edac/mem_repair.c
index 108d69209146..108d69209146 100755..100644
--- a/drivers/edac/mem_repair.c
+++ b/drivers/edac/mem_repair.c
diff --git a/drivers/edac/scrub.c b/drivers/edac/scrub.c
index f9d02af2fc3a..f9d02af2fc3a 100755..100644
--- a/drivers/edac/scrub.c
+++ b/drivers/edac/scrub.c
diff --git a/drivers/edac/skx_base.c b/drivers/edac/skx_base.c
index 29897b21fb8e..078ddf95cc6e 100644
--- a/drivers/edac/skx_base.c
+++ b/drivers/edac/skx_base.c
@@ -33,6 +33,15 @@ static unsigned int nvdimm_count;
#define MASK26 0x3FFFFFF /* Mask for 2^26 */
#define MASK29 0x1FFFFFFF /* Mask for 2^29 */
+static struct res_config skx_cfg = {
+ .type = SKX,
+ .decs_did = 0x2016,
+ .busno_cfg_offset = 0xcc,
+ .ddr_imc_num = 2,
+ .ddr_chan_num = 3,
+ .ddr_dimm_num = 2,
+};
+
static struct skx_dev *get_skx_dev(struct pci_bus *bus, u8 idx)
{
struct skx_dev *d;
@@ -52,7 +61,7 @@ enum munittype {
struct munit {
u16 did;
- u16 devfn[SKX_NUM_IMC];
+ u16 devfn[2];
u8 busidx;
u8 per_socket;
enum munittype mtype;
@@ -89,11 +98,11 @@ static int get_all_munits(const struct munit *m)
if (!pdev)
break;
ndev++;
- if (m->per_socket == SKX_NUM_IMC) {
- for (i = 0; i < SKX_NUM_IMC; i++)
+ if (m->per_socket == skx_cfg.ddr_imc_num) {
+ for (i = 0; i < skx_cfg.ddr_imc_num; i++)
if (m->devfn[i] == pdev->devfn)
break;
- if (i == SKX_NUM_IMC)
+ if (i == skx_cfg.ddr_imc_num)
goto fail;
}
d = get_skx_dev(pdev->bus, m->busidx);
@@ -157,12 +166,6 @@ fail:
return -ENODEV;
}
-static struct res_config skx_cfg = {
- .type = SKX,
- .decs_did = 0x2016,
- .busno_cfg_offset = 0xcc,
-};
-
static const struct x86_cpu_id skx_cpuids[] = {
X86_MATCH_VFM(INTEL_SKYLAKE_X, &skx_cfg),
{ }
@@ -186,11 +189,11 @@ static int skx_get_dimm_config(struct mem_ctl_info *mci, struct res_config *cfg)
/* Only the mcmtr on the first channel is effective */
pci_read_config_dword(imc->chan[0].cdev, 0x87c, &mcmtr);
- for (i = 0; i < SKX_NUM_CHANNELS; i++) {
+ for (i = 0; i < cfg->ddr_chan_num; i++) {
ndimms = 0;
pci_read_config_dword(imc->chan[i].cdev, 0x8C, &amap);
pci_read_config_dword(imc->chan[i].cdev, 0x400, &mcddrtcfg);
- for (j = 0; j < SKX_NUM_DIMMS; j++) {
+ for (j = 0; j < cfg->ddr_dimm_num; j++) {
dimm = edac_get_dimm(mci, i, j, 0);
pci_read_config_dword(imc->chan[i].cdev,
0x80 + 4 * j, &mtr);
@@ -620,6 +623,7 @@ static int __init skx_init(void)
return -ENODEV;
cfg = (struct res_config *)id->driver_data;
+ skx_set_res_cfg(cfg);
rc = skx_get_hi_lo(0x2034, off, &skx_tolm, &skx_tohm);
if (rc)
@@ -652,10 +656,13 @@ static int __init skx_init(void)
goto fail;
edac_dbg(2, "src_id = %d\n", src_id);
- for (i = 0; i < SKX_NUM_IMC; i++) {
+ for (i = 0; i < cfg->ddr_imc_num; i++) {
d->imc[i].mc = mc++;
d->imc[i].lmc = i;
d->imc[i].src_id = src_id;
+ d->imc[i].num_channels = cfg->ddr_chan_num;
+ d->imc[i].num_dimms = cfg->ddr_dimm_num;
+
rc = skx_register_mci(&d->imc[i], d->imc[i].chan[0].cdev,
"Skylake Socket", EDAC_MOD_STR,
skx_get_dimm_config, cfg);
diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c
index 39c733dbc5b9..724842f512ac 100644
--- a/drivers/edac/skx_common.c
+++ b/drivers/edac/skx_common.c
@@ -14,9 +14,11 @@
* Copyright (c) 2018, Intel Corporation.
*/
+#include <linux/topology.h>
#include <linux/acpi.h>
#include <linux/dmi.h>
#include <linux/adxl.h>
+#include <linux/overflow.h>
#include <acpi/nfit.h>
#include <asm/mce.h>
#include <asm/uv/uv.h>
@@ -130,8 +132,8 @@ static void skx_init_mc_mapping(struct skx_dev *d)
* the logical indices of the memory controllers enumerated by the
* EDAC driver.
*/
- for (int i = 0; i < NUM_IMC; i++)
- d->mc_mapping[i] = i;
+ for (int i = 0; i < d->num_imc; i++)
+ d->imc[i].mc_mapping = i;
}
void skx_set_mc_mapping(struct skx_dev *d, u8 pmc, u8 lmc)
@@ -139,22 +141,28 @@ void skx_set_mc_mapping(struct skx_dev *d, u8 pmc, u8 lmc)
edac_dbg(0, "Set the mapping of mc phy idx to logical idx: %02d -> %02d\n",
pmc, lmc);
- d->mc_mapping[pmc] = lmc;
+ d->imc[lmc].mc_mapping = pmc;
}
EXPORT_SYMBOL_GPL(skx_set_mc_mapping);
-static u8 skx_get_mc_mapping(struct skx_dev *d, u8 pmc)
+static int skx_get_mc_mapping(struct skx_dev *d, u8 pmc)
{
- edac_dbg(0, "Get the mapping of mc phy idx to logical idx: %02d -> %02d\n",
- pmc, d->mc_mapping[pmc]);
+ for (int lmc = 0; lmc < d->num_imc; lmc++) {
+ if (d->imc[lmc].mc_mapping == pmc) {
+ edac_dbg(0, "Get the mapping of mc phy idx to logical idx: %02d -> %02d\n",
+ pmc, lmc);
- return d->mc_mapping[pmc];
+ return lmc;
+ }
+ }
+
+ return -1;
}
static bool skx_adxl_decode(struct decoded_addr *res, enum error_source err_src)
{
+ int i, lmc, len = 0;
struct skx_dev *d;
- int i, len = 0;
if (res->addr >= skx_tohm || (res->addr >= skx_tolm &&
res->addr < BIT_ULL(32))) {
@@ -200,7 +208,7 @@ static bool skx_adxl_decode(struct decoded_addr *res, enum error_source err_src)
res->cs = (int)adxl_values[component_indices[INDEX_CS]];
}
- if (res->imc > NUM_IMC - 1 || res->imc < 0) {
+ if (res->imc < 0) {
skx_printk(KERN_ERR, "Bad imc %d\n", res->imc);
return false;
}
@@ -218,7 +226,13 @@ static bool skx_adxl_decode(struct decoded_addr *res, enum error_source err_src)
return false;
}
- res->imc = skx_get_mc_mapping(d, res->imc);
+ lmc = skx_get_mc_mapping(d, res->imc);
+ if (lmc < 0) {
+ skx_printk(KERN_ERR, "No lmc for imc %d\n", res->imc);
+ return false;
+ }
+
+ res->imc = lmc;
for (i = 0; i < adxl_component_count; i++) {
if (adxl_values[i] == ~0x0ull)
@@ -265,7 +279,7 @@ static int skx_get_pkg_id(struct skx_dev *d, u8 *id)
struct cpuinfo_x86 *c = &cpu_data(cpu);
if (c->initialized && cpu_to_node(cpu) == node) {
- *id = c->topo.pkg_id;
+ *id = topology_physical_package_id(cpu);
return 0;
}
}
@@ -320,10 +334,10 @@ static int get_width(u32 mtr)
*/
int skx_get_all_bus_mappings(struct res_config *cfg, struct list_head **list)
{
+ int ndev = 0, imc_num = cfg->ddr_imc_num + cfg->hbm_imc_num;
struct pci_dev *pdev, *prev;
struct skx_dev *d;
u32 reg;
- int ndev = 0;
prev = NULL;
for (;;) {
@@ -331,7 +345,7 @@ int skx_get_all_bus_mappings(struct res_config *cfg, struct list_head **list)
if (!pdev)
break;
ndev++;
- d = kzalloc(sizeof(*d), GFP_KERNEL);
+ d = kzalloc(struct_size(d, imc, imc_num), GFP_KERNEL);
if (!d) {
pci_dev_put(pdev);
return -ENOMEM;
@@ -354,8 +368,10 @@ int skx_get_all_bus_mappings(struct res_config *cfg, struct list_head **list)
d->seg = GET_BITFIELD(reg, 16, 23);
}
- edac_dbg(2, "busses: 0x%x, 0x%x, 0x%x, 0x%x\n",
- d->bus[0], d->bus[1], d->bus[2], d->bus[3]);
+ d->num_imc = imc_num;
+
+ edac_dbg(2, "busses: 0x%x, 0x%x, 0x%x, 0x%x, imcs %d\n",
+ d->bus[0], d->bus[1], d->bus[2], d->bus[3], imc_num);
list_add_tail(&d->list, &dev_edac_list);
prev = pdev;
@@ -541,10 +557,10 @@ int skx_register_mci(struct skx_imc *imc, struct pci_dev *pdev,
/* Allocate a new MC control structure */
layers[0].type = EDAC_MC_LAYER_CHANNEL;
- layers[0].size = NUM_CHANNELS;
+ layers[0].size = imc->num_channels;
layers[0].is_virt_csrow = false;
layers[1].type = EDAC_MC_LAYER_SLOT;
- layers[1].size = NUM_DIMMS;
+ layers[1].size = imc->num_dimms;
layers[1].is_virt_csrow = true;
mci = edac_mc_alloc(imc->mc, ARRAY_SIZE(layers), layers,
sizeof(struct skx_pvt));
@@ -784,7 +800,7 @@ void skx_remove(void)
list_for_each_entry_safe(d, tmp, &dev_edac_list, list) {
list_del(&d->list);
- for (i = 0; i < NUM_IMC; i++) {
+ for (i = 0; i < d->num_imc; i++) {
if (d->imc[i].mci)
skx_unregister_mci(&d->imc[i]);
@@ -794,7 +810,7 @@ void skx_remove(void)
if (d->imc[i].mbase)
iounmap(d->imc[i].mbase);
- for (j = 0; j < NUM_CHANNELS; j++) {
+ for (j = 0; j < d->imc[i].num_channels; j++) {
if (d->imc[i].chan[j].cdev)
pci_dev_put(d->imc[i].chan[j].cdev);
}
diff --git a/drivers/edac/skx_common.h b/drivers/edac/skx_common.h
index ec4966f7ea40..73ba89786cdf 100644
--- a/drivers/edac/skx_common.h
+++ b/drivers/edac/skx_common.h
@@ -29,23 +29,18 @@
#define GET_BITFIELD(v, lo, hi) \
(((v) & GENMASK_ULL((hi), (lo))) >> (lo))
-#define SKX_NUM_IMC 2 /* Memory controllers per socket */
#define SKX_NUM_CHANNELS 3 /* Channels per memory controller */
#define SKX_NUM_DIMMS 2 /* Max DIMMS per channel */
-#define I10NM_NUM_DDR_IMC 12
#define I10NM_NUM_DDR_CHANNELS 2
#define I10NM_NUM_DDR_DIMMS 2
-#define I10NM_NUM_HBM_IMC 16
#define I10NM_NUM_HBM_CHANNELS 2
#define I10NM_NUM_HBM_DIMMS 1
-#define I10NM_NUM_IMC (I10NM_NUM_DDR_IMC + I10NM_NUM_HBM_IMC)
#define I10NM_NUM_CHANNELS MAX(I10NM_NUM_DDR_CHANNELS, I10NM_NUM_HBM_CHANNELS)
#define I10NM_NUM_DIMMS MAX(I10NM_NUM_DDR_DIMMS, I10NM_NUM_HBM_DIMMS)
-#define NUM_IMC MAX(SKX_NUM_IMC, I10NM_NUM_IMC)
#define NUM_CHANNELS MAX(SKX_NUM_CHANNELS, I10NM_NUM_CHANNELS)
#define NUM_DIMMS MAX(SKX_NUM_DIMMS, I10NM_NUM_DIMMS)
@@ -134,16 +129,7 @@ struct skx_dev {
struct pci_dev *uracu; /* for i10nm CPU */
struct pci_dev *pcu_cr3; /* for HBM memory detection */
u32 mcroute;
- /*
- * Some server BIOS may hide certain memory controllers, and the
- * EDAC driver skips those hidden memory controllers. However, the
- * ADXL still decodes memory error address using physical memory
- * controller indices. The mapping table is used to convert the
- * physical indices (reported by ADXL) to the logical indices
- * (used the EDAC driver) of present memory controllers during the
- * error handling process.
- */
- u8 mc_mapping[NUM_IMC];
+ int num_imc;
struct skx_imc {
struct mem_ctl_info *mci;
struct pci_dev *mdev; /* for i10nm CPU */
@@ -155,6 +141,16 @@ struct skx_dev {
u8 mc; /* system wide mc# */
u8 lmc; /* socket relative mc# */
u8 src_id;
+ /*
+ * Some server BIOS may hide certain memory controllers, and the
+ * EDAC driver skips those hidden memory controllers. However, the
+ * ADXL still decodes memory error address using physical memory
+ * controller indices. The mapping table is used to convert the
+ * physical indices (reported by ADXL) to the logical indices
+ * (used the EDAC driver) of present memory controllers during the
+ * error handling process.
+ */
+ u8 mc_mapping;
struct skx_channel {
struct pci_dev *cdev;
struct pci_dev *edev;
@@ -171,7 +167,7 @@ struct skx_dev {
u8 colbits;
} dimms[NUM_DIMMS];
} chan[NUM_CHANNELS];
- } imc[NUM_IMC];
+ } imc[];
};
struct skx_pvt {
diff --git a/drivers/edac/versalnet_edac.c b/drivers/edac/versalnet_edac.c
new file mode 100644
index 000000000000..7c5db8bf0595
--- /dev/null
+++ b/drivers/edac/versalnet_edac.c
@@ -0,0 +1,960 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AMD Versal NET memory controller driver
+ * Copyright (C) 2025 Advanced Micro Devices, Inc.
+ */
+
+#include <linux/cdx/edac_cdx_pcol.h>
+#include <linux/edac.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/ras.h>
+#include <linux/remoteproc.h>
+#include <linux/rpmsg.h>
+#include <linux/sizes.h>
+#include <ras/ras_event.h>
+
+#include "edac_module.h"
+
+/* Granularity of reported error in bytes */
+#define MC5_ERR_GRAIN 1
+#define MC_GET_DDR_CONFIG_IN_LEN 4
+
+#define MC5_IRQ_CE_MASK GENMASK(18, 15)
+#define MC5_IRQ_UE_MASK GENMASK(14, 11)
+
+#define MC5_RANK_1_MASK GENMASK(11, 6)
+#define MASK_24 GENMASK(29, 24)
+#define MASK_0 GENMASK(5, 0)
+
+#define MC5_LRANK_1_MASK GENMASK(11, 6)
+#define MC5_LRANK_2_MASK GENMASK(17, 12)
+#define MC5_BANK1_MASK GENMASK(11, 6)
+#define MC5_GRP_0_MASK GENMASK(17, 12)
+#define MC5_GRP_1_MASK GENMASK(23, 18)
+
+#define MC5_REGHI_ROW 7
+#define MC5_EACHBIT 1
+#define MC5_ERR_TYPE_CE 0
+#define MC5_ERR_TYPE_UE 1
+#define MC5_HIGH_MEM_EN BIT(20)
+#define MC5_MEM_MASK GENMASK(19, 0)
+#define MC5_X16_BASE 256
+#define MC5_X16_ECC 32
+#define MC5_X16_SIZE (MC5_X16_BASE + MC5_X16_ECC)
+#define MC5_X32_SIZE 576
+#define MC5_HIMEM_BASE (256 * SZ_1M)
+#define MC5_ILC_HIMEM_EN BIT(28)
+#define MC5_ILC_MEM GENMASK(27, 0)
+#define MC5_INTERLEAVE_SEL GENMASK(3, 0)
+#define MC5_BUS_WIDTH_MASK GENMASK(19, 18)
+#define MC5_NUM_CHANS_MASK BIT(17)
+#define MC5_RANK_MASK GENMASK(15, 14)
+
+#define ERROR_LEVEL 2
+#define ERROR_ID 3
+#define TOTAL_ERR_LENGTH 5
+#define MSG_ERR_OFFSET 8
+#define MSG_ERR_LENGTH 9
+#define ERROR_DATA 10
+#define MCDI_RESPONSE 0xFF
+
+#define REG_MAX 152
+#define ADEC_MAX 152
+#define NUM_CONTROLLERS 8
+#define REGS_PER_CONTROLLER 19
+#define ADEC_NUM 19
+#define BUFFER_SZ 80
+
+#define XDDR5_BUS_WIDTH_64 0
+#define XDDR5_BUS_WIDTH_32 1
+#define XDDR5_BUS_WIDTH_16 2
+
+/**
+ * struct ecc_error_info - ECC error log information.
+ * @burstpos: Burst position.
+ * @lrank: Logical Rank number.
+ * @rank: Rank number.
+ * @group: Group number.
+ * @bank: Bank number.
+ * @col: Column number.
+ * @row: Row number.
+ * @rowhi: Row number higher bits.
+ * @i: Combined ECC error vector containing encoded values of burst position,
+ * rank, bank, column, and row information.
+ */
+union ecc_error_info {
+ struct {
+ u32 burstpos:3;
+ u32 lrank:4;
+ u32 rank:2;
+ u32 group:3;
+ u32 bank:2;
+ u32 col:11;
+ u32 row:7;
+ u32 rowhi;
+ };
+ u64 i;
+} __packed;
+
+/* Row and column bit positions in the address decoder (ADEC) registers. */
+union row_col_mapping {
+ struct {
+ u32 row0:6;
+ u32 row1:6;
+ u32 row2:6;
+ u32 row3:6;
+ u32 row4:6;
+ u32 reserved:2;
+ };
+ struct {
+ u32 col1:6;
+ u32 col2:6;
+ u32 col3:6;
+ u32 col4:6;
+ u32 col5:6;
+ u32 reservedcol:2;
+ };
+ u32 i;
+} __packed;
+
+/**
+ * struct ecc_status - ECC status information to report.
+ * @ceinfo: Correctable errors.
+ * @ueinfo: Uncorrected errors.
+ * @channel: Channel number.
+ * @error_type: Error type.
+ */
+struct ecc_status {
+ union ecc_error_info ceinfo[2];
+ union ecc_error_info ueinfo[2];
+ u8 channel;
+ u8 error_type;
+};
+
+/**
+ * struct mc_priv - DDR memory controller private instance data.
+ * @message: Buffer for framing the event specific info.
+ * @stat: ECC status information.
+ * @error_id: The error id.
+ * @error_level: The error level.
+ * @dwidth: Width of data bus excluding ECC bits.
+ * @part_len: The support of the message received.
+ * @regs: The registers sent on the rpmsg.
+ * @adec: Address decode registers.
+ * @mci: Memory controller interface.
+ * @ept: rpmsg endpoint.
+ * @mcdi: The mcdi handle.
+ */
+struct mc_priv {
+ char message[256];
+ struct ecc_status stat;
+ u32 error_id;
+ u32 error_level;
+ u32 dwidth;
+ u32 part_len;
+ u32 regs[REG_MAX];
+ u32 adec[ADEC_MAX];
+ struct mem_ctl_info *mci[NUM_CONTROLLERS];
+ struct rpmsg_endpoint *ept;
+ struct cdx_mcdi *mcdi;
+};
+
+/*
+ * Address decoder (ADEC) registers to match the order in which the register
+ * information is received from the firmware.
+ */
+enum adec_info {
+ CONF = 0,
+ ADEC0,
+ ADEC1,
+ ADEC2,
+ ADEC3,
+ ADEC4,
+ ADEC5,
+ ADEC6,
+ ADEC7,
+ ADEC8,
+ ADEC9,
+ ADEC10,
+ ADEC11,
+ ADEC12,
+ ADEC13,
+ ADEC14,
+ ADEC15,
+ ADEC16,
+ ADECILC,
+};
+
+enum reg_info {
+ ISR = 0,
+ IMR,
+ ECCR0_ERR_STATUS,
+ ECCR0_ADDR_LO,
+ ECCR0_ADDR_HI,
+ ECCR0_DATA_LO,
+ ECCR0_DATA_HI,
+ ECCR0_PAR,
+ ECCR1_ERR_STATUS,
+ ECCR1_ADDR_LO,
+ ECCR1_ADDR_HI,
+ ECCR1_DATA_LO,
+ ECCR1_DATA_HI,
+ ECCR1_PAR,
+ XMPU_ERR,
+ XMPU_ERR_ADDR_L0,
+ XMPU_ERR_ADDR_HI,
+ XMPU_ERR_AXI_ID,
+ ADEC_CHK_ERR_LOG,
+};
+
+static bool get_ddr_info(u32 *error_data, struct mc_priv *priv)
+{
+ u32 reglo, reghi, parity, eccr0_val, eccr1_val, isr;
+ struct ecc_status *p;
+
+ isr = error_data[ISR];
+
+ if (!(isr & (MC5_IRQ_UE_MASK | MC5_IRQ_CE_MASK)))
+ return false;
+
+ eccr0_val = error_data[ECCR0_ERR_STATUS];
+ eccr1_val = error_data[ECCR1_ERR_STATUS];
+
+ if (!eccr0_val && !eccr1_val)
+ return false;
+
+ p = &priv->stat;
+
+ if (!eccr0_val)
+ p->channel = 1;
+ else
+ p->channel = 0;
+
+ reglo = error_data[ECCR0_ADDR_LO];
+ reghi = error_data[ECCR0_ADDR_HI];
+ if (isr & MC5_IRQ_CE_MASK)
+ p->ceinfo[0].i = reglo | (u64)reghi << 32;
+ else if (isr & MC5_IRQ_UE_MASK)
+ p->ueinfo[0].i = reglo | (u64)reghi << 32;
+
+ parity = error_data[ECCR0_PAR];
+ edac_dbg(2, "ERR DATA: 0x%08X%08X PARITY: 0x%08X\n",
+ reghi, reglo, parity);
+
+ reglo = error_data[ECCR1_ADDR_LO];
+ reghi = error_data[ECCR1_ADDR_HI];
+ if (isr & MC5_IRQ_CE_MASK)
+ p->ceinfo[1].i = reglo | (u64)reghi << 32;
+ else if (isr & MC5_IRQ_UE_MASK)
+ p->ueinfo[1].i = reglo | (u64)reghi << 32;
+
+ parity = error_data[ECCR1_PAR];
+ edac_dbg(2, "ERR DATA: 0x%08X%08X PARITY: 0x%08X\n",
+ reghi, reglo, parity);
+
+ return true;
+}
+
+/**
+ * convert_to_physical - Convert @error_data to a physical address.
+ * @priv: DDR memory controller private instance data.
+ * @pinf: ECC error info structure.
+ * @controller: Controller number of the MC5
+ * @error_data: the DDRMC5 ADEC address decoder register data
+ *
+ * Return: physical address of the DDR memory.
+ */
+static unsigned long convert_to_physical(struct mc_priv *priv,
+ union ecc_error_info pinf,
+ int controller, int *error_data)
+{
+ u32 row, blk, rsh_req_addr, interleave, ilc_base_ctrl_add, ilc_himem_en, reg, offset;
+ u64 high_mem_base, high_mem_offset, low_mem_offset, ilcmem_base;
+ unsigned long err_addr = 0, addr;
+ union row_col_mapping cols;
+ union row_col_mapping rows;
+ u32 col_bit_0;
+
+ row = pinf.rowhi << MC5_REGHI_ROW | pinf.row;
+ offset = controller * ADEC_NUM;
+
+ reg = error_data[ADEC6];
+ rows.i = reg;
+ err_addr |= (row & BIT(0)) << rows.row0;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row1;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row2;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row3;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row4;
+ row >>= MC5_EACHBIT;
+
+ reg = error_data[ADEC7];
+ rows.i = reg;
+ err_addr |= (row & BIT(0)) << rows.row0;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row1;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row2;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row3;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row4;
+ row >>= MC5_EACHBIT;
+
+ reg = error_data[ADEC8];
+ rows.i = reg;
+ err_addr |= (row & BIT(0)) << rows.row0;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row1;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row2;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row3;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row4;
+
+ reg = error_data[ADEC9];
+ rows.i = reg;
+
+ err_addr |= (row & BIT(0)) << rows.row0;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row1;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row2;
+ row >>= MC5_EACHBIT;
+
+ col_bit_0 = FIELD_GET(MASK_24, error_data[ADEC9]);
+ pinf.col >>= 1;
+ err_addr |= (pinf.col & 1) << col_bit_0;
+
+ cols.i = error_data[ADEC10];
+ err_addr |= (pinf.col & 1) << cols.col1;
+ pinf.col >>= 1;
+ err_addr |= (pinf.col & 1) << cols.col2;
+ pinf.col >>= 1;
+ err_addr |= (pinf.col & 1) << cols.col3;
+ pinf.col >>= 1;
+ err_addr |= (pinf.col & 1) << cols.col4;
+ pinf.col >>= 1;
+ err_addr |= (pinf.col & 1) << cols.col5;
+ pinf.col >>= 1;
+
+ cols.i = error_data[ADEC11];
+ err_addr |= (pinf.col & 1) << cols.col1;
+ pinf.col >>= 1;
+ err_addr |= (pinf.col & 1) << cols.col2;
+ pinf.col >>= 1;
+ err_addr |= (pinf.col & 1) << cols.col3;
+ pinf.col >>= 1;
+ err_addr |= (pinf.col & 1) << cols.col4;
+ pinf.col >>= 1;
+ err_addr |= (pinf.col & 1) << cols.col5;
+ pinf.col >>= 1;
+
+ reg = error_data[ADEC12];
+ err_addr |= (pinf.bank & BIT(0)) << (reg & MASK_0);
+ pinf.bank >>= MC5_EACHBIT;
+ err_addr |= (pinf.bank & BIT(0)) << FIELD_GET(MC5_BANK1_MASK, reg);
+ pinf.bank >>= MC5_EACHBIT;
+
+ err_addr |= (pinf.bank & BIT(0)) << FIELD_GET(MC5_GRP_0_MASK, reg);
+ pinf.group >>= MC5_EACHBIT;
+ err_addr |= (pinf.bank & BIT(0)) << FIELD_GET(MC5_GRP_1_MASK, reg);
+ pinf.group >>= MC5_EACHBIT;
+ err_addr |= (pinf.bank & BIT(0)) << FIELD_GET(MASK_24, reg);
+ pinf.group >>= MC5_EACHBIT;
+
+ reg = error_data[ADEC4];
+ err_addr |= (pinf.rank & BIT(0)) << (reg & MASK_0);
+ pinf.rank >>= MC5_EACHBIT;
+ err_addr |= (pinf.rank & BIT(0)) << FIELD_GET(MC5_RANK_1_MASK, reg);
+ pinf.rank >>= MC5_EACHBIT;
+
+ reg = error_data[ADEC5];
+ err_addr |= (pinf.lrank & BIT(0)) << (reg & MASK_0);
+ pinf.lrank >>= MC5_EACHBIT;
+ err_addr |= (pinf.lrank & BIT(0)) << FIELD_GET(MC5_LRANK_1_MASK, reg);
+ pinf.lrank >>= MC5_EACHBIT;
+ err_addr |= (pinf.lrank & BIT(0)) << FIELD_GET(MC5_LRANK_2_MASK, reg);
+ pinf.lrank >>= MC5_EACHBIT;
+ err_addr |= (pinf.lrank & BIT(0)) << FIELD_GET(MASK_24, reg);
+ pinf.lrank >>= MC5_EACHBIT;
+
+ high_mem_base = (priv->adec[ADEC2 + offset] & MC5_MEM_MASK) * MC5_HIMEM_BASE;
+ interleave = priv->adec[ADEC13 + offset] & MC5_INTERLEAVE_SEL;
+
+ high_mem_offset = priv->adec[ADEC3 + offset] & MC5_MEM_MASK;
+ low_mem_offset = priv->adec[ADEC1 + offset] & MC5_MEM_MASK;
+ reg = priv->adec[ADEC14 + offset];
+ ilc_himem_en = !!(reg & MC5_ILC_HIMEM_EN);
+ ilcmem_base = (reg & MC5_ILC_MEM) * SZ_1M;
+ if (ilc_himem_en)
+ ilc_base_ctrl_add = ilcmem_base - high_mem_offset;
+ else
+ ilc_base_ctrl_add = ilcmem_base - low_mem_offset;
+
+ if (priv->dwidth == DEV_X16) {
+ blk = err_addr / MC5_X16_SIZE;
+ rsh_req_addr = (blk << 8) + ilc_base_ctrl_add;
+ err_addr = rsh_req_addr * interleave * 2;
+ } else {
+ blk = err_addr / MC5_X32_SIZE;
+ rsh_req_addr = (blk << 9) + ilc_base_ctrl_add;
+ err_addr = rsh_req_addr * interleave * 2;
+ }
+
+ if ((priv->adec[ADEC2 + offset] & MC5_HIGH_MEM_EN) && err_addr >= high_mem_base)
+ addr = err_addr - high_mem_offset;
+ else
+ addr = err_addr - low_mem_offset;
+
+ return addr;
+}
+
+/**
+ * handle_error - Handle errors.
+ * @priv: DDR memory controller private instance data.
+ * @stat: ECC status structure.
+ * @ctl_num: Controller number of the MC5
+ * @error_data: the MC5 ADEC address decoder register data
+ *
+ * Handles ECC correctable and uncorrectable errors.
+ */
+static void handle_error(struct mc_priv *priv, struct ecc_status *stat,
+ int ctl_num, int *error_data)
+{
+ union ecc_error_info pinf;
+ struct mem_ctl_info *mci;
+ unsigned long pa;
+ phys_addr_t pfn;
+ int err;
+
+ if (WARN_ON_ONCE(ctl_num > NUM_CONTROLLERS))
+ return;
+
+ mci = priv->mci[ctl_num];
+
+ if (stat->error_type == MC5_ERR_TYPE_CE) {
+ pinf = stat->ceinfo[stat->channel];
+ snprintf(priv->message, sizeof(priv->message),
+ "Error type:%s Controller %d Addr at %lx\n",
+ "CE", ctl_num, convert_to_physical(priv, pinf, ctl_num, error_data));
+
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ 1, 0, 0, 0, 0, 0, -1,
+ priv->message, "");
+ }
+
+ if (stat->error_type == MC5_ERR_TYPE_UE) {
+ pinf = stat->ueinfo[stat->channel];
+ snprintf(priv->message, sizeof(priv->message),
+ "Error type:%s controller %d Addr at %lx\n",
+ "UE", ctl_num, convert_to_physical(priv, pinf, ctl_num, error_data));
+
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ 1, 0, 0, 0, 0, 0, -1,
+ priv->message, "");
+ pa = convert_to_physical(priv, pinf, ctl_num, error_data);
+ pfn = PHYS_PFN(pa);
+
+ if (IS_ENABLED(CONFIG_MEMORY_FAILURE)) {
+ err = memory_failure(pfn, MF_ACTION_REQUIRED);
+ if (err)
+ edac_dbg(2, "memory_failure() error: %d", err);
+ else
+ edac_dbg(2, "Poison page at PA 0x%lx\n", pa);
+ }
+ }
+}
+
+static void mc_init(struct mem_ctl_info *mci, struct device *dev)
+{
+ struct mc_priv *priv = mci->pvt_info;
+ struct csrow_info *csi;
+ struct dimm_info *dimm;
+ u32 row;
+ int ch;
+
+ /* Initialize controller capabilities and configuration */
+ mci->mtype_cap = MEM_FLAG_DDR5;
+ mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
+ mci->scrub_cap = SCRUB_HW_SRC;
+ mci->scrub_mode = SCRUB_NONE;
+
+ mci->edac_cap = EDAC_FLAG_SECDED;
+ mci->ctl_name = "VersalNET DDR5";
+ mci->dev_name = dev_name(dev);
+ mci->mod_name = "versalnet_edac";
+
+ edac_op_state = EDAC_OPSTATE_INT;
+
+ for (row = 0; row < mci->nr_csrows; row++) {
+ csi = mci->csrows[row];
+ for (ch = 0; ch < csi->nr_channels; ch++) {
+ dimm = csi->channels[ch]->dimm;
+ dimm->edac_mode = EDAC_SECDED;
+ dimm->mtype = MEM_DDR5;
+ dimm->grain = MC5_ERR_GRAIN;
+ dimm->dtype = priv->dwidth;
+ }
+ }
+}
+
+#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
+
+static unsigned int mcdi_rpc_timeout(struct cdx_mcdi *cdx, unsigned int cmd)
+{
+ return MCDI_RPC_TIMEOUT;
+}
+
+static void mcdi_request(struct cdx_mcdi *cdx,
+ const struct cdx_dword *hdr, size_t hdr_len,
+ const struct cdx_dword *sdu, size_t sdu_len)
+{
+ void *send_buf;
+ int ret;
+
+ send_buf = kzalloc(hdr_len + sdu_len, GFP_KERNEL);
+ if (!send_buf)
+ return;
+
+ memcpy(send_buf, hdr, hdr_len);
+ memcpy(send_buf + hdr_len, sdu, sdu_len);
+
+ ret = rpmsg_send(cdx->ept, send_buf, hdr_len + sdu_len);
+ if (ret)
+ dev_err(&cdx->rpdev->dev, "Failed to send rpmsg data: %d\n", ret);
+
+ kfree(send_buf);
+}
+
+static const struct cdx_mcdi_ops mcdi_ops = {
+ .mcdi_rpc_timeout = mcdi_rpc_timeout,
+ .mcdi_request = mcdi_request,
+};
+
+static void get_ddr_config(u32 index, u32 *buffer, struct cdx_mcdi *amd_mcdi)
+{
+ size_t outlen;
+ int ret;
+
+ MCDI_DECLARE_BUF(inbuf, MC_GET_DDR_CONFIG_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, BUFFER_SZ);
+
+ MCDI_SET_DWORD(inbuf, EDAC_GET_DDR_CONFIG_IN_CONTROLLER_INDEX, index);
+
+ ret = cdx_mcdi_rpc(amd_mcdi, MC_CMD_EDAC_GET_DDR_CONFIG, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (!ret)
+ memcpy(buffer, MCDI_PTR(outbuf, GET_DDR_CONFIG),
+ (ADEC_NUM * 4));
+}
+
+static int setup_mcdi(struct mc_priv *mc_priv)
+{
+ struct cdx_mcdi *amd_mcdi;
+ int ret, i;
+
+ amd_mcdi = kzalloc(sizeof(*amd_mcdi), GFP_KERNEL);
+ if (!amd_mcdi)
+ return -ENOMEM;
+
+ amd_mcdi->mcdi_ops = &mcdi_ops;
+ ret = cdx_mcdi_init(amd_mcdi);
+ if (ret) {
+ kfree(amd_mcdi);
+ return ret;
+ }
+
+ amd_mcdi->ept = mc_priv->ept;
+ mc_priv->mcdi = amd_mcdi;
+
+ for (i = 0; i < NUM_CONTROLLERS; i++)
+ get_ddr_config(i, &mc_priv->adec[ADEC_NUM * i], amd_mcdi);
+
+ return 0;
+}
+
+static const guid_t amd_versalnet_guid = GUID_INIT(0x82678888, 0xa556, 0x44f2,
+ 0xb8, 0xb4, 0x45, 0x56, 0x2e,
+ 0x8c, 0x5b, 0xec);
+
+static int rpmsg_cb(struct rpmsg_device *rpdev, void *data,
+ int len, void *priv, u32 src)
+{
+ struct mc_priv *mc_priv = dev_get_drvdata(&rpdev->dev);
+ const guid_t *sec_type = &guid_null;
+ u32 length, offset, error_id;
+ u32 *result = (u32 *)data;
+ struct ecc_status *p;
+ int i, j, k, sec_sev;
+ const char *err_str;
+ u32 *adec_data;
+
+ if (*(u8 *)data == MCDI_RESPONSE) {
+ cdx_mcdi_process_cmd(mc_priv->mcdi, (struct cdx_dword *)data, len);
+ return 0;
+ }
+
+ sec_sev = result[ERROR_LEVEL];
+ error_id = result[ERROR_ID];
+ length = result[MSG_ERR_LENGTH];
+ offset = result[MSG_ERR_OFFSET];
+
+ if (result[TOTAL_ERR_LENGTH] > length) {
+ if (!mc_priv->part_len)
+ mc_priv->part_len = length;
+ else
+ mc_priv->part_len += length;
+ /*
+ * The data can come in 2 stretches. Construct the regs from 2
+ * messages the offset indicates the offset from which the data is to
+ * be taken
+ */
+ for (i = 0 ; i < length; i++) {
+ k = offset + i;
+ j = ERROR_DATA + i;
+ mc_priv->regs[k] = result[j];
+ }
+ if (mc_priv->part_len < result[TOTAL_ERR_LENGTH])
+ return 0;
+ mc_priv->part_len = 0;
+ }
+
+ mc_priv->error_id = error_id;
+ mc_priv->error_level = result[ERROR_LEVEL];
+
+ switch (error_id) {
+ case 5: err_str = "General Software Non-Correctable error"; break;
+ case 6: err_str = "CFU error"; break;
+ case 7: err_str = "CFRAME error"; break;
+ case 10: err_str = "DDRMC Microblaze Correctable ECC error"; break;
+ case 11: err_str = "DDRMC Microblaze Non-Correctable ECC error"; break;
+ case 15: err_str = "MMCM error"; break;
+ case 16: err_str = "HNICX Correctable error"; break;
+ case 17: err_str = "HNICX Non-Correctable error"; break;
+
+ case 18:
+ p = &mc_priv->stat;
+ memset(p, 0, sizeof(struct ecc_status));
+ p->error_type = MC5_ERR_TYPE_CE;
+ for (i = 0 ; i < NUM_CONTROLLERS; i++) {
+ if (get_ddr_info(&mc_priv->regs[i * REGS_PER_CONTROLLER], mc_priv)) {
+ adec_data = mc_priv->adec + ADEC_NUM * i;
+ handle_error(mc_priv, &mc_priv->stat, i, adec_data);
+ }
+ }
+ return 0;
+ case 19:
+ p = &mc_priv->stat;
+ memset(p, 0, sizeof(struct ecc_status));
+ p->error_type = MC5_ERR_TYPE_UE;
+ for (i = 0 ; i < NUM_CONTROLLERS; i++) {
+ if (get_ddr_info(&mc_priv->regs[i * REGS_PER_CONTROLLER], mc_priv)) {
+ adec_data = mc_priv->adec + ADEC_NUM * i;
+ handle_error(mc_priv, &mc_priv->stat, i, adec_data);
+ }
+ }
+ return 0;
+
+ case 21: err_str = "GT Non-Correctable error"; break;
+ case 22: err_str = "PL Sysmon Correctable error"; break;
+ case 23: err_str = "PL Sysmon Non-Correctable error"; break;
+ case 111: err_str = "LPX unexpected dfx activation error"; break;
+ case 114: err_str = "INT_LPD Non-Correctable error"; break;
+ case 116: err_str = "INT_OCM Non-Correctable error"; break;
+ case 117: err_str = "INT_FPD Correctable error"; break;
+ case 118: err_str = "INT_FPD Non-Correctable error"; break;
+ case 120: err_str = "INT_IOU Non-Correctable error"; break;
+ case 123: err_str = "err_int_irq from APU GIC Distributor"; break;
+ case 124: err_str = "fault_int_irq from APU GIC Distribute"; break;
+ case 132 ... 139: err_str = "FPX SPLITTER error"; break;
+ case 140: err_str = "APU Cluster 0 error"; break;
+ case 141: err_str = "APU Cluster 1 error"; break;
+ case 142: err_str = "APU Cluster 2 error"; break;
+ case 143: err_str = "APU Cluster 3 error"; break;
+ case 145: err_str = "WWDT1 LPX error"; break;
+ case 147: err_str = "IPI error"; break;
+ case 152 ... 153: err_str = "AFIFS error"; break;
+ case 154 ... 155: err_str = "LPX glitch error"; break;
+ case 185 ... 186: err_str = "FPX AFIFS error"; break;
+ case 195 ... 199: err_str = "AFIFM error"; break;
+ case 108: err_str = "PSM Correctable error"; break;
+ case 59: err_str = "PMC correctable error"; break;
+ case 60: err_str = "PMC Un correctable error"; break;
+ case 43 ... 47: err_str = "PMC Sysmon error"; break;
+ case 163 ... 184: err_str = "RPU error"; break;
+ case 148: err_str = "OCM0 correctable error"; break;
+ case 149: err_str = "OCM1 correctable error"; break;
+ case 150: err_str = "OCM0 Un-correctable error"; break;
+ case 151: err_str = "OCM1 Un-correctable error"; break;
+ case 189: err_str = "PSX_CMN_3 PD block consolidated error"; break;
+ case 191: err_str = "FPD_INT_WRAP PD block consolidated error"; break;
+ case 232: err_str = "CRAM Un-Correctable error"; break;
+ default: err_str = "VERSAL_EDAC_ERR_ID: %d"; break;
+ }
+
+ snprintf(mc_priv->message,
+ sizeof(mc_priv->message),
+ "[VERSAL_EDAC_ERR_ID: %d] Error type: %s", error_id, err_str);
+
+ /* Convert to bytes */
+ length = result[TOTAL_ERR_LENGTH] * 4;
+ log_non_standard_event(sec_type, &amd_versalnet_guid, mc_priv->message,
+ sec_sev, (void *)&result[ERROR_DATA], length);
+
+ return 0;
+}
+
+static struct rpmsg_device_id amd_rpmsg_id_table[] = {
+ { .name = "error_ipc" },
+ { },
+};
+MODULE_DEVICE_TABLE(rpmsg, amd_rpmsg_id_table);
+
+static int rpmsg_probe(struct rpmsg_device *rpdev)
+{
+ struct rpmsg_channel_info chinfo;
+ struct mc_priv *pg;
+
+ pg = (struct mc_priv *)amd_rpmsg_id_table[0].driver_data;
+ chinfo.src = RPMSG_ADDR_ANY;
+ chinfo.dst = rpdev->dst;
+ strscpy(chinfo.name, amd_rpmsg_id_table[0].name,
+ strlen(amd_rpmsg_id_table[0].name));
+
+ pg->ept = rpmsg_create_ept(rpdev, rpmsg_cb, NULL, chinfo);
+ if (!pg->ept)
+ return dev_err_probe(&rpdev->dev, -ENXIO, "Failed to create ept for channel %s\n",
+ chinfo.name);
+
+ dev_set_drvdata(&rpdev->dev, pg);
+
+ return 0;
+}
+
+static void rpmsg_remove(struct rpmsg_device *rpdev)
+{
+ struct mc_priv *mc_priv = dev_get_drvdata(&rpdev->dev);
+
+ rpmsg_destroy_ept(mc_priv->ept);
+ dev_set_drvdata(&rpdev->dev, NULL);
+}
+
+static struct rpmsg_driver amd_rpmsg_driver = {
+ .drv.name = KBUILD_MODNAME,
+ .probe = rpmsg_probe,
+ .remove = rpmsg_remove,
+ .callback = rpmsg_cb,
+ .id_table = amd_rpmsg_id_table,
+};
+
+static void versal_edac_release(struct device *dev)
+{
+ kfree(dev);
+}
+
+static int init_versalnet(struct mc_priv *priv, struct platform_device *pdev)
+{
+ u32 num_chans, rank, dwidth, config;
+ struct edac_mc_layer layers[2];
+ struct mem_ctl_info *mci;
+ struct device *dev;
+ enum dev_type dt;
+ char *name;
+ int rc, i;
+
+ for (i = 0; i < NUM_CONTROLLERS; i++) {
+ config = priv->adec[CONF + i * ADEC_NUM];
+ num_chans = FIELD_GET(MC5_NUM_CHANS_MASK, config);
+ rank = 1 << FIELD_GET(MC5_RANK_MASK, config);
+ dwidth = FIELD_GET(MC5_BUS_WIDTH_MASK, config);
+
+ switch (dwidth) {
+ case XDDR5_BUS_WIDTH_16:
+ dt = DEV_X16;
+ break;
+ case XDDR5_BUS_WIDTH_32:
+ dt = DEV_X32;
+ break;
+ case XDDR5_BUS_WIDTH_64:
+ dt = DEV_X64;
+ break;
+ default:
+ dt = DEV_UNKNOWN;
+ }
+
+ if (dt == DEV_UNKNOWN)
+ continue;
+
+ /* Find the first enabled device and register that one. */
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = rank;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = num_chans;
+ layers[1].is_virt_csrow = false;
+
+ rc = -ENOMEM;
+ mci = edac_mc_alloc(i, ARRAY_SIZE(layers), layers,
+ sizeof(struct mc_priv));
+ if (!mci) {
+ edac_printk(KERN_ERR, EDAC_MC, "Failed memory allocation for MC%d\n", i);
+ goto err_alloc;
+ }
+
+ priv->mci[i] = mci;
+ priv->dwidth = dt;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ dev->release = versal_edac_release;
+ name = kmalloc(32, GFP_KERNEL);
+ sprintf(name, "versal-net-ddrmc5-edac-%d", i);
+ dev->init_name = name;
+ rc = device_register(dev);
+ if (rc)
+ goto err_alloc;
+
+ mci->pdev = dev;
+
+ platform_set_drvdata(pdev, priv);
+
+ mc_init(mci, dev);
+ rc = edac_mc_add_mc(mci);
+ if (rc) {
+ edac_printk(KERN_ERR, EDAC_MC, "Failed to register MC%d with EDAC core\n", i);
+ goto err_alloc;
+ }
+ }
+ return 0;
+
+err_alloc:
+ while (i--) {
+ mci = priv->mci[i];
+ if (!mci)
+ continue;
+
+ if (mci->pdev) {
+ device_unregister(mci->pdev);
+ edac_mc_del_mc(mci->pdev);
+ }
+
+ edac_mc_free(mci);
+ }
+
+ return rc;
+}
+
+static void remove_versalnet(struct mc_priv *priv)
+{
+ struct mem_ctl_info *mci;
+ int i;
+
+ for (i = 0; i < NUM_CONTROLLERS; i++) {
+ device_unregister(priv->mci[i]->pdev);
+ mci = edac_mc_del_mc(priv->mci[i]->pdev);
+ if (!mci)
+ return;
+
+ edac_mc_free(mci);
+ }
+}
+
+static int mc_probe(struct platform_device *pdev)
+{
+ struct device_node *r5_core_node;
+ struct mc_priv *priv;
+ struct rproc *rp;
+ int rc;
+
+ r5_core_node = of_parse_phandle(pdev->dev.of_node, "amd,rproc", 0);
+ if (!r5_core_node) {
+ dev_err(&pdev->dev, "amd,rproc: invalid phandle\n");
+ return -EINVAL;
+ }
+
+ rp = rproc_get_by_phandle(r5_core_node->phandle);
+ if (!rp)
+ return -EPROBE_DEFER;
+
+ rc = rproc_boot(rp);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed to attach to remote processor\n");
+ goto err_rproc_boot;
+ }
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ rc = -ENOMEM;
+ goto err_alloc;
+ }
+
+ amd_rpmsg_id_table[0].driver_data = (kernel_ulong_t)priv;
+
+ rc = register_rpmsg_driver(&amd_rpmsg_driver);
+ if (rc) {
+ edac_printk(KERN_ERR, EDAC_MC, "Failed to register RPMsg driver: %d\n", rc);
+ goto err_alloc;
+ }
+
+ rc = setup_mcdi(priv);
+ if (rc)
+ goto err_unreg;
+
+ priv->mcdi->r5_rproc = rp;
+
+ rc = init_versalnet(priv, pdev);
+ if (rc)
+ goto err_init;
+
+ return 0;
+
+err_init:
+ cdx_mcdi_finish(priv->mcdi);
+
+err_unreg:
+ unregister_rpmsg_driver(&amd_rpmsg_driver);
+
+err_alloc:
+ rproc_shutdown(rp);
+
+err_rproc_boot:
+ rproc_put(rp);
+
+ return rc;
+}
+
+static void mc_remove(struct platform_device *pdev)
+{
+ struct mc_priv *priv = platform_get_drvdata(pdev);
+
+ unregister_rpmsg_driver(&amd_rpmsg_driver);
+ remove_versalnet(priv);
+ rproc_shutdown(priv->mcdi->r5_rproc);
+ cdx_mcdi_finish(priv->mcdi);
+}
+
+static const struct of_device_id amd_edac_match[] = {
+ { .compatible = "xlnx,versal-net-ddrmc5", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, amd_edac_match);
+
+static struct platform_driver amd_ddr_edac_mc_driver = {
+ .driver = {
+ .name = "versal-net-edac",
+ .of_match_table = amd_edac_match,
+ },
+ .probe = mc_probe,
+ .remove = mc_remove,
+};
+
+module_platform_driver(amd_ddr_edac_mc_driver);
+
+MODULE_AUTHOR("AMD Inc");
+MODULE_DESCRIPTION("Versal NET EDAC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index a6f6d467aacf..aec46bf03302 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -134,6 +134,19 @@ config EXTCON_MAX8997
Maxim MAX8997 PMIC. The MAX8997 MUIC is a USB port accessory
detector and switch.
+config EXTCON_MAX14526
+ tristate "Maxim MAX14526 EXTCON Support"
+ depends on I2C
+ select IRQ_DOMAIN
+ select REGMAP_I2C
+ help
+ If you say yes here you get support for the Maxim MAX14526
+ MUIC device. The MAX14526 MUIC is a USB port accessory
+ detector and switch. The MAX14526 is designed to simplify
+ interface requirements on portable devices by multiplexing
+ common inputs (USB, UART, Microphone, Stereo Audio and
+ Composite Video) on a single micro/mini USB connector.
+
config EXTCON_PALMAS
tristate "Palmas USB EXTCON support"
depends on MFD_PALMAS
diff --git a/drivers/extcon/Makefile b/drivers/extcon/Makefile
index 0d6d23faf748..6482f2bfd661 100644
--- a/drivers/extcon/Makefile
+++ b/drivers/extcon/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_EXTCON_MAX3355) += extcon-max3355.o
obj-$(CONFIG_EXTCON_MAX77693) += extcon-max77693.o
obj-$(CONFIG_EXTCON_MAX77843) += extcon-max77843.o
obj-$(CONFIG_EXTCON_MAX8997) += extcon-max8997.o
+obj-$(CONFIG_EXTCON_MAX14526) += extcon-max14526.o
obj-$(CONFIG_EXTCON_PALMAS) += extcon-palmas.o
obj-$(CONFIG_EXTCON_PTN5150) += extcon-ptn5150.o
obj-$(CONFIG_EXTCON_QCOM_SPMI_MISC) += extcon-qcom-spmi-misc.o
diff --git a/drivers/extcon/extcon-adc-jack.c b/drivers/extcon/extcon-adc-jack.c
index 46c40d85c2ac..7e3c9f38297b 100644
--- a/drivers/extcon/extcon-adc-jack.c
+++ b/drivers/extcon/extcon-adc-jack.c
@@ -164,6 +164,8 @@ static void adc_jack_remove(struct platform_device *pdev)
{
struct adc_jack_data *data = platform_get_drvdata(pdev);
+ if (data->wakeup_source)
+ device_init_wakeup(&pdev->dev, false);
free_irq(data->irq, data);
cancel_work_sync(&data->handler.work);
}
diff --git a/drivers/extcon/extcon-axp288.c b/drivers/extcon/extcon-axp288.c
index d3bcbe839c09..19856dddade6 100644
--- a/drivers/extcon/extcon-axp288.c
+++ b/drivers/extcon/extcon-axp288.c
@@ -470,7 +470,7 @@ static int axp288_extcon_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- device_init_wakeup(dev, true);
+ devm_device_init_wakeup(dev);
platform_set_drvdata(pdev, info);
return 0;
diff --git a/drivers/extcon/extcon-fsa9480.c b/drivers/extcon/extcon-fsa9480.c
index b11b43171063..a031eb0914a0 100644
--- a/drivers/extcon/extcon-fsa9480.c
+++ b/drivers/extcon/extcon-fsa9480.c
@@ -317,7 +317,7 @@ static int fsa9480_probe(struct i2c_client *client)
return ret;
}
- device_init_wakeup(info->dev, true);
+ devm_device_init_wakeup(info->dev);
fsa9480_detect_dev(info);
return 0;
diff --git a/drivers/extcon/extcon-max14526.c b/drivers/extcon/extcon-max14526.c
new file mode 100644
index 000000000000..3750a5c20612
--- /dev/null
+++ b/drivers/extcon/extcon-max14526.c
@@ -0,0 +1,302 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/device.h>
+#include <linux/devm-helpers.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/extcon-provider.h>
+#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/regmap.h>
+
+/* I2C addresses of MUIC internal registers */
+#define MAX14526_DEVICE_ID 0x00
+#define MAX14526_ID 0x02
+
+/* CONTROL_1 register masks */
+#define MAX14526_CONTROL_1 0x01
+#define ID_2P2 BIT(6)
+#define ID_620 BIT(5)
+#define ID_200 BIT(4)
+#define VLDO BIT(3)
+#define SEMREN BIT(2)
+#define ADC_EN BIT(1)
+#define CP_EN BIT(0)
+
+/* CONTROL_2 register masks */
+#define MAX14526_CONTROL_2 0x02
+#define INTPOL BIT(7)
+#define INT_EN BIT(6)
+#define MIC_LP BIT(5)
+#define CP_AUD BIT(4)
+#define CHG_TYPE BIT(1)
+#define USB_DET_DIS BIT(0)
+
+/* SW_CONTROL register masks */
+#define MAX14526_SW_CONTROL 0x03
+#define SW_DATA 0x00
+#define SW_UART 0x01
+#define SW_AUDIO 0x02
+#define SW_OPEN 0x07
+
+/* INT_STATUS register masks */
+#define MAX14526_INT_STAT 0x04
+#define CHGDET BIT(7)
+#define MR_COMP BIT(6)
+#define SENDEND BIT(5)
+#define V_VBUS BIT(4)
+
+/* STATUS register masks */
+#define MAX14526_STATUS 0x05
+#define CPORT BIT(7)
+#define CHPORT BIT(6)
+#define C1COMP BIT(0)
+
+enum max14526_idno_resistance {
+ MAX14526_GND,
+ MAX14526_24KOHM,
+ MAX14526_56KOHM,
+ MAX14526_100KOHM,
+ MAX14526_130KOHM,
+ MAX14526_180KOHM,
+ MAX14526_240KOHM,
+ MAX14526_330KOHM,
+ MAX14526_430KOHM,
+ MAX14526_620KOHM,
+ MAX14526_910KOHM,
+ MAX14526_OPEN
+};
+
+enum max14526_field_idx {
+ VENDOR_ID, CHIP_REV, /* DEVID */
+ DM, DP, /* SW_CONTROL */
+ MAX14526_N_REGMAP_FIELDS
+};
+
+static const struct reg_field max14526_reg_field[MAX14526_N_REGMAP_FIELDS] = {
+ [VENDOR_ID] = REG_FIELD(MAX14526_DEVICE_ID, 4, 7),
+ [CHIP_REV] = REG_FIELD(MAX14526_DEVICE_ID, 0, 3),
+ [DM] = REG_FIELD(MAX14526_SW_CONTROL, 0, 2),
+ [DP] = REG_FIELD(MAX14526_SW_CONTROL, 3, 5),
+};
+
+struct max14526_data {
+ struct i2c_client *client;
+ struct extcon_dev *edev;
+
+ struct regmap *regmap;
+ struct regmap_field *rfield[MAX14526_N_REGMAP_FIELDS];
+
+ int last_state;
+ int cable;
+};
+
+enum max14526_muic_modes {
+ MAX14526_OTG = MAX14526_GND, /* no power */
+ MAX14526_MHL = MAX14526_56KOHM, /* no power */
+ MAX14526_OTG_Y = MAX14526_GND | V_VBUS,
+ MAX14526_MHL_CHG = MAX14526_GND | V_VBUS | CHGDET,
+ MAX14526_NONE = MAX14526_OPEN,
+ MAX14526_USB = MAX14526_OPEN | V_VBUS,
+ MAX14526_CHG = MAX14526_OPEN | V_VBUS | CHGDET,
+};
+
+static const unsigned int max14526_extcon_cable[] = {
+ EXTCON_USB,
+ EXTCON_USB_HOST,
+ EXTCON_CHG_USB_FAST,
+ EXTCON_DISP_MHL,
+ EXTCON_NONE,
+};
+
+static int max14526_ap_usb_mode(struct max14526_data *priv)
+{
+ struct device *dev = &priv->client->dev;
+ int ret;
+
+ /* Enable USB Path */
+ ret = regmap_field_write(priv->rfield[DM], SW_DATA);
+ if (ret)
+ return ret;
+
+ ret = regmap_field_write(priv->rfield[DP], SW_DATA);
+ if (ret)
+ return ret;
+
+ /* Enable 200K, Charger Pump and ADC */
+ ret = regmap_write(priv->regmap, MAX14526_CONTROL_1,
+ ID_200 | ADC_EN | CP_EN);
+ if (ret)
+ return ret;
+
+ dev_dbg(dev, "AP USB mode set\n");
+
+ return 0;
+}
+
+static irqreturn_t max14526_interrupt(int irq, void *dev_id)
+{
+ struct max14526_data *priv = dev_id;
+ struct device *dev = &priv->client->dev;
+ int state, ret;
+
+ /*
+ * Upon an MUIC IRQ (MUIC_INT_N falls), wait at least 70ms
+ * before reading INT_STAT and STATUS. After the reads,
+ * MUIC_INT_N returns to high (but the INT_STAT and STATUS
+ * contents will be held).
+ */
+ msleep(100);
+
+ ret = regmap_read(priv->regmap, MAX14526_INT_STAT, &state);
+ if (ret)
+ dev_err(dev, "failed to read MUIC state %d\n", ret);
+
+ if (state == priv->last_state)
+ return IRQ_HANDLED;
+
+ /* Detach previous device */
+ extcon_set_state_sync(priv->edev, priv->cable, false);
+
+ switch (state) {
+ case MAX14526_USB:
+ priv->cable = EXTCON_USB;
+ break;
+
+ case MAX14526_CHG:
+ priv->cable = EXTCON_CHG_USB_FAST;
+ break;
+
+ case MAX14526_OTG:
+ case MAX14526_OTG_Y:
+ priv->cable = EXTCON_USB_HOST;
+ break;
+
+ case MAX14526_MHL:
+ case MAX14526_MHL_CHG:
+ priv->cable = EXTCON_DISP_MHL;
+ break;
+
+ case MAX14526_NONE:
+ default:
+ priv->cable = EXTCON_NONE;
+ break;
+ }
+
+ extcon_set_state_sync(priv->edev, priv->cable, true);
+
+ priv->last_state = state;
+
+ return IRQ_HANDLED;
+}
+
+static const struct regmap_config max14526_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = MAX14526_STATUS,
+};
+
+static int max14526_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct max14526_data *priv;
+ int ret, dev_id, rev, i;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->client = client;
+ i2c_set_clientdata(client, priv);
+
+ priv->regmap = devm_regmap_init_i2c(client, &max14526_regmap_config);
+ if (IS_ERR(priv->regmap))
+ return dev_err_probe(dev, PTR_ERR(priv->regmap), "cannot allocate regmap\n");
+
+ for (i = 0; i < MAX14526_N_REGMAP_FIELDS; i++) {
+ priv->rfield[i] = devm_regmap_field_alloc(dev, priv->regmap,
+ max14526_reg_field[i]);
+ if (IS_ERR(priv->rfield[i]))
+ return dev_err_probe(dev, PTR_ERR(priv->rfield[i]),
+ "cannot allocate regmap field\n");
+ }
+
+ /* Detect if MUIC version is supported */
+ ret = regmap_field_read(priv->rfield[VENDOR_ID], &dev_id);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to read MUIC ID\n");
+
+ regmap_field_read(priv->rfield[CHIP_REV], &rev);
+
+ if (dev_id == MAX14526_ID)
+ dev_info(dev, "detected MAX14526 MUIC with id 0x%x, rev 0x%x\n", dev_id, rev);
+ else
+ dev_err_probe(dev, -EINVAL, "MUIC vendor id 0x%X is not recognized\n", dev_id);
+
+ priv->edev = devm_extcon_dev_allocate(dev, max14526_extcon_cable);
+ if (IS_ERR(priv->edev))
+ return dev_err_probe(dev, (IS_ERR(priv->edev)),
+ "failed to allocate extcon device\n");
+
+ ret = devm_extcon_dev_register(dev, priv->edev);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to register extcon device\n");
+
+ ret = max14526_ap_usb_mode(priv);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to set AP USB mode\n");
+
+ regmap_write_bits(priv->regmap, MAX14526_CONTROL_2, INT_EN, INT_EN);
+ regmap_write_bits(priv->regmap, MAX14526_CONTROL_2, USB_DET_DIS, (u32)~USB_DET_DIS);
+
+ ret = devm_request_threaded_irq(dev, client->irq, NULL, &max14526_interrupt,
+ IRQF_ONESHOT | IRQF_SHARED, client->name, priv);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to register IRQ\n");
+
+ irq_wake_thread(client->irq, priv);
+
+ return 0;
+}
+
+static int max14526_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct max14526_data *priv = i2c_get_clientdata(client);
+
+ irq_wake_thread(client->irq, priv);
+
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(max14526_pm_ops, NULL, max14526_resume);
+
+static const struct of_device_id max14526_match[] = {
+ { .compatible = "maxim,max14526" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, max14526_match);
+
+static const struct i2c_device_id max14526_id[] = {
+ { "max14526" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max14526_id);
+
+static struct i2c_driver max14526_driver = {
+ .driver = {
+ .name = "max14526",
+ .of_match_table = max14526_match,
+ .pm = &max14526_pm_ops,
+ },
+ .probe = max14526_probe,
+ .id_table = max14526_id,
+};
+module_i2c_driver(max14526_driver);
+
+MODULE_AUTHOR("Svyatoslav Ryhel <clamor95@gmail.com>");
+MODULE_DESCRIPTION("MAX14526 extcon driver to support MUIC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/extcon/extcon-qcom-spmi-misc.c b/drivers/extcon/extcon-qcom-spmi-misc.c
index 53de581a393a..afaba5685c3d 100644
--- a/drivers/extcon/extcon-qcom-spmi-misc.c
+++ b/drivers/extcon/extcon-qcom-spmi-misc.c
@@ -155,7 +155,7 @@ static int qcom_usb_extcon_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, info);
- device_init_wakeup(dev, 1);
+ devm_device_init_wakeup(dev);
/* Perform initial detection */
qcom_usb_extcon_detect_cable(&info->wq_detcable.work);
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
index aae774e7a5c3..e5e0174a0335 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -229,8 +229,7 @@ void fw_schedule_bus_reset(struct fw_card *card, bool delayed, bool short_reset)
/* Use an arbitrary short delay to combine multiple reset requests. */
fw_card_get(card);
- if (!queue_delayed_work(fw_workqueue, &card->br_work,
- delayed ? DIV_ROUND_UP(HZ, 100) : 0))
+ if (!queue_delayed_work(fw_workqueue, &card->br_work, delayed ? msecs_to_jiffies(10) : 0))
fw_card_put(card);
}
EXPORT_SYMBOL(fw_schedule_bus_reset);
@@ -241,10 +240,10 @@ static void br_work(struct work_struct *work)
/* Delay for 2s after last reset per IEEE 1394 clause 8.2.1. */
if (card->reset_jiffies != 0 &&
- time_before64(get_jiffies_64(), card->reset_jiffies + 2 * HZ)) {
+ time_is_after_jiffies64(card->reset_jiffies + secs_to_jiffies(2))) {
trace_bus_reset_postpone(card->index, card->generation, card->br_short);
- if (!queue_delayed_work(fw_workqueue, &card->br_work, 2 * HZ))
+ if (!queue_delayed_work(fw_workqueue, &card->br_work, secs_to_jiffies(2)))
fw_card_put(card);
return;
}
@@ -280,225 +279,254 @@ void fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
fw_card_put(card);
}
-static void bm_work(struct work_struct *work)
+enum bm_contention_outcome {
+ // The bus management contention window is not expired.
+ BM_CONTENTION_OUTCOME_WITHIN_WINDOW = 0,
+ // The IRM node has link off.
+ BM_CONTENTION_OUTCOME_IRM_HAS_LINK_OFF,
+ // The IRM node complies IEEE 1394:1994 only.
+ BM_CONTENTION_OUTCOME_IRM_COMPLIES_1394_1995_ONLY,
+ // Another bus reset, BM work has been rescheduled.
+ BM_CONTENTION_OUTCOME_AT_NEW_GENERATION,
+ // We have been unable to send the lock request to IRM node due to some local problem.
+ BM_CONTENTION_OUTCOME_LOCAL_PROBLEM_AT_TRANSACTION,
+ // The lock request failed, maybe the IRM isn't really IRM capable after all.
+ BM_CONTENTION_OUTCOME_IRM_IS_NOT_CAPABLE_FOR_IRM,
+ // Somebody else is BM.
+ BM_CONTENTION_OUTCOME_IRM_HOLDS_ANOTHER_NODE_AS_BM,
+ // The local node succeeds after contending for bus manager.
+ BM_CONTENTION_OUTCOME_IRM_HOLDS_LOCAL_NODE_AS_BM,
+};
+
+static enum bm_contention_outcome contend_for_bm(struct fw_card *card)
+__must_hold(&card->lock)
{
- static const char gap_count_table[] = {
- 63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40
+ int generation = card->generation;
+ int local_id = card->local_node->node_id;
+ __be32 data[2] = {
+ cpu_to_be32(BUS_MANAGER_ID_NOT_REGISTERED),
+ cpu_to_be32(local_id),
};
- struct fw_card *card = from_work(card, work, bm_work.work);
- struct fw_device *root_device, *irm_device;
- struct fw_node *root_node;
- int root_id, new_root_id, irm_id, bm_id, local_id;
- int gap_count, generation, grace, rcode;
- bool do_reset = false;
- bool root_device_is_running;
- bool root_device_is_cmc;
- bool irm_is_1394_1995_only;
- bool keep_this_irm;
- __be32 transaction_data[2];
-
- spin_lock_irq(&card->lock);
+ bool grace = time_is_before_jiffies64(card->reset_jiffies + msecs_to_jiffies(125));
+ bool irm_is_1394_1995_only = false;
+ bool keep_this_irm = false;
+ struct fw_node *irm_node;
+ struct fw_device *irm_device;
+ int irm_node_id;
+ int rcode;
+
+ lockdep_assert_held(&card->lock);
+
+ if (!grace) {
+ if (!is_next_generation(generation, card->bm_generation) || card->bm_abdicate)
+ return BM_CONTENTION_OUTCOME_WITHIN_WINDOW;
+ }
- if (card->local_node == NULL) {
- spin_unlock_irq(&card->lock);
- goto out_put_card;
+ irm_node = card->irm_node;
+ if (!irm_node->link_on) {
+ fw_notice(card, "IRM has link off, making local node (%02x) root\n", local_id);
+ return BM_CONTENTION_OUTCOME_IRM_HAS_LINK_OFF;
}
- generation = card->generation;
+ irm_device = fw_node_get_device(irm_node);
+ if (irm_device && irm_device->config_rom) {
+ irm_is_1394_1995_only = (irm_device->config_rom[2] & 0x000000f0) == 0;
- root_node = card->root_node;
- fw_node_get(root_node);
- root_device = root_node->data;
- root_device_is_running = root_device &&
- atomic_read(&root_device->state) == FW_DEVICE_RUNNING;
- root_device_is_cmc = root_device && root_device->cmc;
+ // Canon MV5i works unreliably if it is not root node.
+ keep_this_irm = irm_device->config_rom[3] >> 8 == CANON_OUI;
+ }
- irm_device = card->irm_node->data;
- irm_is_1394_1995_only = irm_device && irm_device->config_rom &&
- (irm_device->config_rom[2] & 0x000000f0) == 0;
+ if (irm_is_1394_1995_only && !keep_this_irm) {
+ fw_notice(card, "IRM is not 1394a compliant, making local node (%02x) root\n",
+ local_id);
+ return BM_CONTENTION_OUTCOME_IRM_COMPLIES_1394_1995_ONLY;
+ }
- /* Canon MV5i works unreliably if it is not root node. */
- keep_this_irm = irm_device && irm_device->config_rom &&
- irm_device->config_rom[3] >> 8 == CANON_OUI;
+ irm_node_id = irm_node->node_id;
- root_id = root_node->node_id;
- irm_id = card->irm_node->node_id;
- local_id = card->local_node->node_id;
+ spin_unlock_irq(&card->lock);
- grace = time_after64(get_jiffies_64(),
- card->reset_jiffies + DIV_ROUND_UP(HZ, 8));
+ rcode = fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, irm_node_id, generation,
+ SCODE_100, CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID, data,
+ sizeof(data));
- if ((is_next_generation(generation, card->bm_generation) &&
- !card->bm_abdicate) ||
- (card->bm_generation != generation && grace)) {
- /*
- * This first step is to figure out who is IRM and
- * then try to become bus manager. If the IRM is not
- * well defined (e.g. does not have an active link
- * layer or does not responds to our lock request, we
- * will have to do a little vigilante bus management.
- * In that case, we do a goto into the gap count logic
- * so that when we do the reset, we still optimize the
- * gap count. That could well save a reset in the
- * next generation.
- */
+ spin_lock_irq(&card->lock);
- if (!card->irm_node->link_on) {
- new_root_id = local_id;
- fw_notice(card, "%s, making local node (%02x) root\n",
- "IRM has link off", new_root_id);
- goto pick_me;
+ switch (rcode) {
+ case RCODE_GENERATION:
+ return BM_CONTENTION_OUTCOME_AT_NEW_GENERATION;
+ case RCODE_SEND_ERROR:
+ return BM_CONTENTION_OUTCOME_LOCAL_PROBLEM_AT_TRANSACTION;
+ case RCODE_COMPLETE:
+ {
+ int bm_id = be32_to_cpu(data[0]);
+
+ // Used by cdev layer for "struct fw_cdev_event_bus_reset".
+ if (bm_id != BUS_MANAGER_ID_NOT_REGISTERED)
+ card->bm_node_id = 0xffc0 & bm_id;
+ else
+ card->bm_node_id = local_id;
+
+ if (bm_id != BUS_MANAGER_ID_NOT_REGISTERED)
+ return BM_CONTENTION_OUTCOME_IRM_HOLDS_ANOTHER_NODE_AS_BM;
+ else
+ return BM_CONTENTION_OUTCOME_IRM_HOLDS_LOCAL_NODE_AS_BM;
+ }
+ default:
+ if (!keep_this_irm) {
+ fw_notice(card, "BM lock failed (%s), making local node (%02x) root\n",
+ fw_rcode_string(rcode), local_id);
+ return BM_CONTENTION_OUTCOME_IRM_COMPLIES_1394_1995_ONLY;
+ } else {
+ return BM_CONTENTION_OUTCOME_IRM_IS_NOT_CAPABLE_FOR_IRM;
}
+ }
+}
- if (irm_is_1394_1995_only && !keep_this_irm) {
- new_root_id = local_id;
- fw_notice(card, "%s, making local node (%02x) root\n",
- "IRM is not 1394a compliant", new_root_id);
- goto pick_me;
- }
+DEFINE_FREE(node_unref, struct fw_node *, if (_T) fw_node_put(_T))
+DEFINE_FREE(card_unref, struct fw_card *, if (_T) fw_card_put(_T))
+
+static void bm_work(struct work_struct *work)
+{
+ static const char gap_count_table[] = {
+ 63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40
+ };
+ struct fw_card *card __free(card_unref) = from_work(card, work, bm_work.work);
+ struct fw_node *root_node __free(node_unref) = NULL;
+ int root_id, new_root_id, irm_id, local_id;
+ int expected_gap_count, generation;
+ bool stand_for_root = false;
- transaction_data[0] = cpu_to_be32(0x3f);
- transaction_data[1] = cpu_to_be32(local_id);
+ spin_lock_irq(&card->lock);
+ if (card->local_node == NULL) {
spin_unlock_irq(&card->lock);
+ return;
+ }
- rcode = fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
- irm_id, generation, SCODE_100,
- CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID,
- transaction_data, 8);
+ generation = card->generation;
- if (rcode == RCODE_GENERATION)
- /* Another bus reset, BM work has been rescheduled. */
- goto out;
+ root_node = fw_node_get(card->root_node);
- bm_id = be32_to_cpu(transaction_data[0]);
+ root_id = root_node->node_id;
+ irm_id = card->irm_node->node_id;
+ local_id = card->local_node->node_id;
- scoped_guard(spinlock_irq, &card->lock) {
- if (rcode == RCODE_COMPLETE && generation == card->generation)
- card->bm_node_id =
- bm_id == 0x3f ? local_id : 0xffc0 | bm_id;
- }
+ if (card->bm_generation != generation) {
+ enum bm_contention_outcome result = contend_for_bm(card);
- if (rcode == RCODE_COMPLETE && bm_id != 0x3f) {
- /* Somebody else is BM. Only act as IRM. */
- if (local_id == irm_id)
+ switch (result) {
+ case BM_CONTENTION_OUTCOME_WITHIN_WINDOW:
+ spin_unlock_irq(&card->lock);
+ fw_schedule_bm_work(card, msecs_to_jiffies(125));
+ return;
+ case BM_CONTENTION_OUTCOME_IRM_HAS_LINK_OFF:
+ stand_for_root = true;
+ break;
+ case BM_CONTENTION_OUTCOME_IRM_COMPLIES_1394_1995_ONLY:
+ stand_for_root = true;
+ break;
+ case BM_CONTENTION_OUTCOME_AT_NEW_GENERATION:
+ // BM work has been rescheduled.
+ spin_unlock_irq(&card->lock);
+ return;
+ case BM_CONTENTION_OUTCOME_LOCAL_PROBLEM_AT_TRANSACTION:
+ // Let's try again later and hope that the local problem has gone away by
+ // then.
+ spin_unlock_irq(&card->lock);
+ fw_schedule_bm_work(card, msecs_to_jiffies(125));
+ return;
+ case BM_CONTENTION_OUTCOME_IRM_IS_NOT_CAPABLE_FOR_IRM:
+ // Let's do a bus reset and pick the local node as root, and thus, IRM.
+ stand_for_root = true;
+ break;
+ case BM_CONTENTION_OUTCOME_IRM_HOLDS_ANOTHER_NODE_AS_BM:
+ if (local_id == irm_id) {
+ // Only acts as IRM.
+ spin_unlock_irq(&card->lock);
allocate_broadcast_channel(card, generation);
-
- goto out;
- }
-
- if (rcode == RCODE_SEND_ERROR) {
- /*
- * We have been unable to send the lock request due to
- * some local problem. Let's try again later and hope
- * that the problem has gone away by then.
- */
- fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8));
- goto out;
+ spin_lock_irq(&card->lock);
+ }
+ fallthrough;
+ case BM_CONTENTION_OUTCOME_IRM_HOLDS_LOCAL_NODE_AS_BM:
+ default:
+ card->bm_generation = generation;
+ break;
}
+ }
- spin_lock_irq(&card->lock);
-
- if (rcode != RCODE_COMPLETE && !keep_this_irm) {
- /*
- * The lock request failed, maybe the IRM
- * isn't really IRM capable after all. Let's
- * do a bus reset and pick the local node as
- * root, and thus, IRM.
- */
- new_root_id = local_id;
- fw_notice(card, "BM lock failed (%s), making local node (%02x) root\n",
- fw_rcode_string(rcode), new_root_id);
- goto pick_me;
+ // We're bus manager for this generation, so next step is to make sure we have an active
+ // cycle master and do gap count optimization.
+ if (!stand_for_root) {
+ if (card->gap_count == GAP_COUNT_MISMATCHED) {
+ // If self IDs have inconsistent gap counts, do a
+ // bus reset ASAP. The config rom read might never
+ // complete, so don't wait for it. However, still
+ // send a PHY configuration packet prior to the
+ // bus reset. The PHY configuration packet might
+ // fail, but 1394-2008 8.4.5.2 explicitly permits
+ // it in this case, so it should be safe to try.
+ stand_for_root = true;
+
+ // We must always send a bus reset if the gap count
+ // is inconsistent, so bypass the 5-reset limit.
+ card->bm_retries = 0;
+ } else {
+ // Now investigate root node.
+ struct fw_device *root_device = fw_node_get_device(root_node);
+
+ if (root_device == NULL) {
+ // Either link_on is false, or we failed to read the
+ // config rom. In either case, pick another root.
+ stand_for_root = true;
+ } else {
+ bool root_device_is_running =
+ atomic_read(&root_device->state) == FW_DEVICE_RUNNING;
+
+ if (!root_device_is_running) {
+ // If we haven't probed this device yet, bail out now
+ // and let's try again once that's done.
+ spin_unlock_irq(&card->lock);
+ return;
+ } else if (!root_device->cmc) {
+ // Current root has an active link layer and we
+ // successfully read the config rom, but it's not
+ // cycle master capable.
+ stand_for_root = true;
+ }
+ }
}
- } else if (card->bm_generation != generation) {
- /*
- * We weren't BM in the last generation, and the last
- * bus reset is less than 125ms ago. Reschedule this job.
- */
- spin_unlock_irq(&card->lock);
- fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8));
- goto out;
}
- /*
- * We're bus manager for this generation, so next step is to
- * make sure we have an active cycle master and do gap count
- * optimization.
- */
- card->bm_generation = generation;
-
- if (card->gap_count == 0) {
- /*
- * If self IDs have inconsistent gap counts, do a
- * bus reset ASAP. The config rom read might never
- * complete, so don't wait for it. However, still
- * send a PHY configuration packet prior to the
- * bus reset. The PHY configuration packet might
- * fail, but 1394-2008 8.4.5.2 explicitly permits
- * it in this case, so it should be safe to try.
- */
- new_root_id = local_id;
- /*
- * We must always send a bus reset if the gap count
- * is inconsistent, so bypass the 5-reset limit.
- */
- card->bm_retries = 0;
- } else if (root_device == NULL) {
- /*
- * Either link_on is false, or we failed to read the
- * config rom. In either case, pick another root.
- */
+ if (stand_for_root) {
new_root_id = local_id;
- } else if (!root_device_is_running) {
- /*
- * If we haven't probed this device yet, bail out now
- * and let's try again once that's done.
- */
- spin_unlock_irq(&card->lock);
- goto out;
- } else if (root_device_is_cmc) {
- /*
- * We will send out a force root packet for this
- * node as part of the gap count optimization.
- */
- new_root_id = root_id;
} else {
- /*
- * Current root has an active link layer and we
- * successfully read the config rom, but it's not
- * cycle master capable.
- */
- new_root_id = local_id;
+ // We will send out a force root packet for this node as part of the gap count
+ // optimization on behalf of the node.
+ new_root_id = root_id;
}
- pick_me:
/*
* Pick a gap count from 1394a table E-1. The table doesn't cover
* the typically much larger 1394b beta repeater delays though.
*/
if (!card->beta_repeaters_present &&
root_node->max_hops < ARRAY_SIZE(gap_count_table))
- gap_count = gap_count_table[root_node->max_hops];
+ expected_gap_count = gap_count_table[root_node->max_hops];
else
- gap_count = 63;
+ expected_gap_count = 63;
- /*
- * Finally, figure out if we should do a reset or not. If we have
- * done less than 5 resets with the same physical topology and we
- * have either a new root or a new gap count setting, let's do it.
- */
-
- if (card->bm_retries++ < 5 &&
- (card->gap_count != gap_count || new_root_id != root_id))
- do_reset = true;
+ // Finally, figure out if we should do a reset or not. If we have done less than 5 resets
+ // with the same physical topology and we have either a new root or a new gap count
+ // setting, let's do it.
+ if (card->bm_retries++ < 5 && (card->gap_count != expected_gap_count || new_root_id != root_id)) {
+ int card_gap_count = card->gap_count;
- spin_unlock_irq(&card->lock);
+ spin_unlock_irq(&card->lock);
- if (do_reset) {
fw_notice(card, "phy config: new root=%x, gap_count=%d\n",
- new_root_id, gap_count);
- fw_send_phy_config(card, new_root_id, generation, gap_count);
+ new_root_id, expected_gap_count);
+ fw_send_phy_config(card, new_root_id, generation, expected_gap_count);
/*
* Where possible, use a short bus reset to minimize
* disruption to isochronous transfers. But in the event
@@ -511,31 +539,27 @@ static void bm_work(struct work_struct *work)
* may treat it as two, causing a gap count inconsistency
* again. Using a long bus reset prevents this.
*/
- reset_bus(card, card->gap_count != 0);
+ reset_bus(card, card_gap_count != 0);
/* Will allocate broadcast channel after the reset. */
- goto out;
- }
+ } else {
+ struct fw_device *root_device = fw_node_get_device(root_node);
- if (root_device_is_cmc) {
- /*
- * Make sure that the cycle master sends cycle start packets.
- */
- transaction_data[0] = cpu_to_be32(CSR_STATE_BIT_CMSTR);
- rcode = fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST,
- root_id, generation, SCODE_100,
- CSR_REGISTER_BASE + CSR_STATE_SET,
- transaction_data, 4);
- if (rcode == RCODE_GENERATION)
- goto out;
- }
+ spin_unlock_irq(&card->lock);
- if (local_id == irm_id)
- allocate_broadcast_channel(card, generation);
+ if (root_device && root_device->cmc) {
+ // Make sure that the cycle master sends cycle start packets.
+ __be32 data = cpu_to_be32(CSR_STATE_BIT_CMSTR);
+ int rcode = fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST,
+ root_id, generation, SCODE_100,
+ CSR_REGISTER_BASE + CSR_STATE_SET,
+ &data, sizeof(data));
+ if (rcode == RCODE_GENERATION)
+ return;
+ }
- out:
- fw_node_put(root_node);
- out_put_card:
- fw_card_put(card);
+ if (local_id == irm_id)
+ allocate_broadcast_channel(card, generation);
+ }
}
void fw_card_initialize(struct fw_card *card,
@@ -547,20 +571,24 @@ void fw_card_initialize(struct fw_card *card,
card->index = atomic_inc_return(&index);
card->driver = driver;
card->device = device;
- card->current_tlabel = 0;
- card->tlabel_mask = 0;
- card->split_timeout_hi = DEFAULT_SPLIT_TIMEOUT / 8000;
- card->split_timeout_lo = (DEFAULT_SPLIT_TIMEOUT % 8000) << 19;
- card->split_timeout_cycles = DEFAULT_SPLIT_TIMEOUT;
- card->split_timeout_jiffies =
- DIV_ROUND_UP(DEFAULT_SPLIT_TIMEOUT * HZ, 8000);
+
+ card->transactions.current_tlabel = 0;
+ card->transactions.tlabel_mask = 0;
+ INIT_LIST_HEAD(&card->transactions.list);
+ spin_lock_init(&card->transactions.lock);
+
+ card->split_timeout.hi = DEFAULT_SPLIT_TIMEOUT / 8000;
+ card->split_timeout.lo = (DEFAULT_SPLIT_TIMEOUT % 8000) << 19;
+ card->split_timeout.cycles = DEFAULT_SPLIT_TIMEOUT;
+ card->split_timeout.jiffies = isoc_cycles_to_jiffies(DEFAULT_SPLIT_TIMEOUT);
+ spin_lock_init(&card->split_timeout.lock);
+
card->color = 0;
card->broadcast_channel = BROADCAST_CHANNEL_INITIAL;
kref_init(&card->kref);
init_completion(&card->done);
- INIT_LIST_HEAD(&card->transaction_list);
- INIT_LIST_HEAD(&card->phy_receiver_list);
+
spin_lock_init(&card->lock);
card->local_node = NULL;
@@ -570,9 +598,13 @@ void fw_card_initialize(struct fw_card *card,
}
EXPORT_SYMBOL(fw_card_initialize);
+DEFINE_FREE(workqueue_destroy, struct workqueue_struct *, if (_T) destroy_workqueue(_T))
+
int fw_card_add(struct fw_card *card, u32 max_receive, u32 link_speed, u64 guid,
unsigned int supported_isoc_contexts)
{
+ struct workqueue_struct *isoc_wq __free(workqueue_destroy) = NULL;
+ struct workqueue_struct *async_wq __free(workqueue_destroy) = NULL;
int ret;
// This workqueue should be:
@@ -587,10 +619,10 @@ int fw_card_add(struct fw_card *card, u32 max_receive, u32 link_speed, u64 guid,
// * == WQ_SYSFS Parameters are available via sysfs.
// * max_active == n_it + n_ir A hardIRQ could notify events for multiple isochronous
// contexts if they are scheduled to the same cycle.
- card->isoc_wq = alloc_workqueue("firewire-isoc-card%u",
- WQ_UNBOUND | WQ_FREEZABLE | WQ_HIGHPRI | WQ_SYSFS,
- supported_isoc_contexts, card->index);
- if (!card->isoc_wq)
+ isoc_wq = alloc_workqueue("firewire-isoc-card%u",
+ WQ_UNBOUND | WQ_FREEZABLE | WQ_HIGHPRI | WQ_SYSFS,
+ supported_isoc_contexts, card->index);
+ if (!isoc_wq)
return -ENOMEM;
// This workqueue should be:
@@ -602,14 +634,14 @@ int fw_card_add(struct fw_card *card, u32 max_receive, u32 link_speed, u64 guid,
// * == WQ_SYSFS Parameters are available via sysfs.
// * max_active == 4 A hardIRQ could notify events for a pair of requests and
// response AR/AT contexts.
- card->async_wq = alloc_workqueue("firewire-async-card%u",
- WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_HIGHPRI | WQ_SYSFS,
- 4, card->index);
- if (!card->async_wq) {
- ret = -ENOMEM;
- goto err_isoc;
- }
+ async_wq = alloc_workqueue("firewire-async-card%u",
+ WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_HIGHPRI | WQ_SYSFS,
+ 4, card->index);
+ if (!async_wq)
+ return -ENOMEM;
+ card->isoc_wq = isoc_wq;
+ card->async_wq = async_wq;
card->max_receive = max_receive;
card->link_speed = link_speed;
card->guid = guid;
@@ -617,18 +649,18 @@ int fw_card_add(struct fw_card *card, u32 max_receive, u32 link_speed, u64 guid,
scoped_guard(mutex, &card_mutex) {
generate_config_rom(card, tmp_config_rom);
ret = card->driver->enable(card, tmp_config_rom, config_rom_length);
- if (ret < 0)
- goto err_async;
+ if (ret < 0) {
+ card->isoc_wq = NULL;
+ card->async_wq = NULL;
+ return ret;
+ }
+ retain_and_null_ptr(isoc_wq);
+ retain_and_null_ptr(async_wq);
list_add_tail(&card->link, &card_list);
}
return 0;
-err_async:
- destroy_workqueue(card->async_wq);
-err_isoc:
- destroy_workqueue(card->isoc_wq);
- return ret;
}
EXPORT_SYMBOL(fw_card_add);
@@ -773,7 +805,7 @@ void fw_core_remove_card(struct fw_card *card)
destroy_workqueue(card->isoc_wq);
destroy_workqueue(card->async_wq);
- WARN_ON(!list_empty(&card->transaction_list));
+ WARN_ON(!list_empty(&card->transactions.list));
}
EXPORT_SYMBOL(fw_core_remove_card);
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 78b10c6ef7fe..49dc1612c691 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -41,12 +41,15 @@
/*
* ABI version history is documented in linux/firewire-cdev.h.
*/
-#define FW_CDEV_KERNEL_VERSION 5
+#define FW_CDEV_KERNEL_VERSION 6
#define FW_CDEV_VERSION_EVENT_REQUEST2 4
#define FW_CDEV_VERSION_ALLOCATE_REGION_END 4
#define FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW 5
#define FW_CDEV_VERSION_EVENT_ASYNC_TSTAMP 6
+static DEFINE_SPINLOCK(phy_receiver_list_lock);
+static LIST_HEAD(phy_receiver_list);
+
struct client {
u32 version;
struct fw_device *device;
@@ -937,11 +940,12 @@ static int ioctl_add_descriptor(struct client *client, union ioctl_arg *arg)
if (a->length > 256)
return -EINVAL;
- r = kmalloc(sizeof(*r) + a->length * 4, GFP_KERNEL);
+ r = kmalloc(struct_size(r, data, a->length), GFP_KERNEL);
if (r == NULL)
return -ENOMEM;
- if (copy_from_user(r->data, u64_to_uptr(a->data), a->length * 4)) {
+ if (copy_from_user(r->data, u64_to_uptr(a->data),
+ flex_array_size(r, data, a->length))) {
ret = -EFAULT;
goto failed;
}
@@ -1324,8 +1328,8 @@ static void iso_resource_work(struct work_struct *work)
todo = r->todo;
// Allow 1000ms grace period for other reallocations.
if (todo == ISO_RES_ALLOC &&
- time_before64(get_jiffies_64(), client->device->card->reset_jiffies + HZ)) {
- schedule_iso_resource(r, DIV_ROUND_UP(HZ, 3));
+ time_is_after_jiffies64(client->device->card->reset_jiffies + secs_to_jiffies(1))) {
+ schedule_iso_resource(r, msecs_to_jiffies(333));
skip = true;
} else {
// We could be called twice within the same generation.
@@ -1669,15 +1673,16 @@ static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg)
static int ioctl_receive_phy_packets(struct client *client, union ioctl_arg *arg)
{
struct fw_cdev_receive_phy_packets *a = &arg->receive_phy_packets;
- struct fw_card *card = client->device->card;
/* Access policy: Allow this ioctl only on local nodes' device files. */
if (!client->device->is_local)
return -ENOSYS;
- guard(spinlock_irq)(&card->lock);
+ // NOTE: This can be without irq when we can guarantee that __fw_send_request() for local
+ // destination never runs in any type of IRQ context.
+ scoped_guard(spinlock_irq, &phy_receiver_list_lock)
+ list_move_tail(&client->phy_receiver_link, &phy_receiver_list);
- list_move_tail(&client->phy_receiver_link, &card->phy_receiver_list);
client->phy_receiver_closure = a->closure;
return 0;
@@ -1687,10 +1692,17 @@ void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p)
{
struct client *client;
- guard(spinlock_irqsave)(&card->lock);
+ // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for local
+ // destination never runs in any type of IRQ context.
+ guard(spinlock_irqsave)(&phy_receiver_list_lock);
+
+ list_for_each_entry(client, &phy_receiver_list, phy_receiver_link) {
+ struct inbound_phy_packet_event *e;
+
+ if (client->device->card != card)
+ continue;
- list_for_each_entry(client, &card->phy_receiver_list, phy_receiver_link) {
- struct inbound_phy_packet_event *e = kmalloc(sizeof(*e) + 8, GFP_ATOMIC);
+ e = kmalloc(sizeof(*e) + 8, GFP_ATOMIC);
if (e == NULL)
break;
@@ -1857,7 +1869,9 @@ static int fw_device_op_release(struct inode *inode, struct file *file)
struct client_resource *resource;
unsigned long index;
- scoped_guard(spinlock_irq, &client->device->card->lock)
+ // NOTE: This can be without irq when we can guarantee that __fw_send_request() for local
+ // destination never runs in any type of IRQ context.
+ scoped_guard(spinlock_irq, &phy_receiver_list_lock)
list_del(&client->phy_receiver_link);
scoped_guard(mutex, &client->device->client_list_mutex)
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index aeacd4cfd694..457a0da024a7 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -847,16 +847,15 @@ static void fw_schedule_device_work(struct fw_device *device,
*/
#define MAX_RETRIES 10
-#define RETRY_DELAY (3 * HZ)
-#define INITIAL_DELAY (HZ / 2)
-#define SHUTDOWN_DELAY (2 * HZ)
+#define RETRY_DELAY secs_to_jiffies(3)
+#define INITIAL_DELAY msecs_to_jiffies(500)
+#define SHUTDOWN_DELAY secs_to_jiffies(2)
static void fw_device_shutdown(struct work_struct *work)
{
struct fw_device *device = from_work(device, work, work.work);
- if (time_before64(get_jiffies_64(),
- device->card->reset_jiffies + SHUTDOWN_DELAY)
+ if (time_is_after_jiffies64(device->card->reset_jiffies + SHUTDOWN_DELAY)
&& !list_empty(&device->card->link)) {
fw_schedule_device_work(device, SHUTDOWN_DELAY);
return;
@@ -887,7 +886,7 @@ static void fw_device_release(struct device *dev)
* bus manager work looks at this node.
*/
scoped_guard(spinlock_irqsave, &card->lock)
- device->node->data = NULL;
+ fw_node_set_device(device->node, NULL);
fw_node_put(device->node);
kfree(device->config_rom);
@@ -1007,7 +1006,7 @@ static void fw_device_init(struct work_struct *work)
int ret;
/*
- * All failure paths here set node->data to NULL, so that we
+ * All failure paths here call fw_node_set_device(node, NULL), so that we
* don't try to do device_for_each_child() on a kfree()'d
* device.
*/
@@ -1051,9 +1050,9 @@ static void fw_device_init(struct work_struct *work)
struct fw_node *obsolete_node = reused->node;
device->node = obsolete_node;
- device->node->data = device;
+ fw_node_set_device(device->node, device);
reused->node = current_node;
- reused->node->data = reused;
+ fw_node_set_device(reused->node, reused);
reused->max_speed = device->max_speed;
reused->node_id = current_node->node_id;
@@ -1292,7 +1291,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
* FW_NODE_UPDATED callbacks can update the node_id
* and generation for the device.
*/
- node->data = device;
+ fw_node_set_device(node, device);
/*
* Many devices are slow to respond after bus resets,
@@ -1307,7 +1306,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
case FW_NODE_INITIATED_RESET:
case FW_NODE_LINK_ON:
- device = node->data;
+ device = fw_node_get_device(node);
if (device == NULL)
goto create;
@@ -1324,7 +1323,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
break;
case FW_NODE_UPDATED:
- device = node->data;
+ device = fw_node_get_device(node);
if (device == NULL)
break;
@@ -1339,7 +1338,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
case FW_NODE_DESTROYED:
case FW_NODE_LINK_OFF:
- if (!node->data)
+ if (!fw_node_get_device(node))
break;
/*
@@ -1354,7 +1353,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
* the device in shutdown state to have that code fail
* to create the device.
*/
- device = node->data;
+ device = fw_node_get_device(node);
if (atomic_xchg(&device->state,
FW_DEVICE_GONE) == FW_DEVICE_RUNNING) {
device->workfn = fw_device_shutdown;
diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c
index 74a6aa7d8cc9..2f73bcd5696f 100644
--- a/drivers/firewire/core-topology.c
+++ b/drivers/firewire/core-topology.c
@@ -241,7 +241,7 @@ static struct fw_node *build_tree(struct fw_card *card, const u32 *sid, int self
// If PHYs report different gap counts, set an invalid count which will force a gap
// count reconfiguration and a reset.
if (phy_packet_self_id_zero_get_gap_count(self_id_sequence[0]) != gap_count)
- gap_count = 0;
+ gap_count = GAP_COUNT_MISMATCHED;
update_hop_count(node);
@@ -325,9 +325,11 @@ static void report_found_node(struct fw_card *card,
card->bm_retries = 0;
}
-/* Must be called with card->lock held */
void fw_destroy_nodes(struct fw_card *card)
+__must_hold(&card->lock)
{
+ lockdep_assert_held(&card->lock);
+
card->color++;
if (card->local_node != NULL)
for_each_fw_node(card, card->local_node, report_lost_node);
@@ -435,20 +437,22 @@ static void update_tree(struct fw_card *card, struct fw_node *root)
}
}
-static void update_topology_map(struct fw_card *card,
- u32 *self_ids, int self_id_count)
+static void update_topology_map(__be32 *buffer, size_t buffer_size, int root_node_id,
+ const u32 *self_ids, int self_id_count)
{
- int node_count = (card->root_node->node_id & 0x3f) + 1;
- __be32 *map = card->topology_map;
+ __be32 *map = buffer;
+ int node_count = (root_node_id & 0x3f) + 1;
+
+ memset(map, 0, buffer_size);
*map++ = cpu_to_be32((self_id_count + 2) << 16);
- *map++ = cpu_to_be32(be32_to_cpu(card->topology_map[1]) + 1);
+ *map++ = cpu_to_be32(be32_to_cpu(buffer[1]) + 1);
*map++ = cpu_to_be32((node_count << 16) | self_id_count);
while (self_id_count--)
*map++ = cpu_to_be32p(self_ids++);
- fw_compute_block_crc(card->topology_map);
+ fw_compute_block_crc(buffer);
}
void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
@@ -458,46 +462,45 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
trace_bus_reset_handle(card->index, generation, node_id, bm_abdicate, self_ids, self_id_count);
- guard(spinlock_irqsave)(&card->lock);
-
- /*
- * If the selfID buffer is not the immediate successor of the
- * previously processed one, we cannot reliably compare the
- * old and new topologies.
- */
- if (!is_next_generation(generation, card->generation) &&
- card->local_node != NULL) {
- fw_destroy_nodes(card);
- card->bm_retries = 0;
+ scoped_guard(spinlock, &card->lock) {
+ // If the selfID buffer is not the immediate successor of the
+ // previously processed one, we cannot reliably compare the
+ // old and new topologies.
+ if (!is_next_generation(generation, card->generation) && card->local_node != NULL) {
+ fw_destroy_nodes(card);
+ card->bm_retries = 0;
+ }
+ card->broadcast_channel_allocated = card->broadcast_channel_auto_allocated;
+ card->node_id = node_id;
+ // Update node_id before generation to prevent anybody from using
+ // a stale node_id together with a current generation.
+ smp_wmb();
+ card->generation = generation;
+ card->reset_jiffies = get_jiffies_64();
+ card->bm_node_id = 0xffff;
+ card->bm_abdicate = bm_abdicate;
+
+ local_node = build_tree(card, self_ids, self_id_count, generation);
+
+ card->color++;
+
+ if (local_node == NULL) {
+ fw_err(card, "topology build failed\n");
+ // FIXME: We need to issue a bus reset in this case.
+ } else if (card->local_node == NULL) {
+ card->local_node = local_node;
+ for_each_fw_node(card, local_node, report_found_node);
+ } else {
+ update_tree(card, local_node);
+ }
}
- card->broadcast_channel_allocated = card->broadcast_channel_auto_allocated;
- card->node_id = node_id;
- /*
- * Update node_id before generation to prevent anybody from using
- * a stale node_id together with a current generation.
- */
- smp_wmb();
- card->generation = generation;
- card->reset_jiffies = get_jiffies_64();
- card->bm_node_id = 0xffff;
- card->bm_abdicate = bm_abdicate;
fw_schedule_bm_work(card, 0);
- local_node = build_tree(card, self_ids, self_id_count, generation);
-
- update_topology_map(card, self_ids, self_id_count);
-
- card->color++;
-
- if (local_node == NULL) {
- fw_err(card, "topology build failed\n");
- /* FIXME: We need to issue a bus reset in this case. */
- } else if (card->local_node == NULL) {
- card->local_node = local_node;
- for_each_fw_node(card, local_node, report_found_node);
- } else {
- update_tree(card, local_node);
+ // Just used by transaction layer.
+ scoped_guard(spinlock, &card->topology_map.lock) {
+ update_topology_map(card->topology_map.buffer, sizeof(card->topology_map.buffer),
+ card->root_node->node_id, self_ids, self_id_count);
}
}
EXPORT_SYMBOL(fw_core_handle_bus_reset);
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index d28477d84697..dd3656a0c1ff 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -49,12 +49,14 @@ static int close_transaction(struct fw_transaction *transaction, struct fw_card
{
struct fw_transaction *t = NULL, *iter;
- scoped_guard(spinlock_irqsave, &card->lock) {
- list_for_each_entry(iter, &card->transaction_list, link) {
+ // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for
+ // local destination never runs in any type of IRQ context.
+ scoped_guard(spinlock_irqsave, &card->transactions.lock) {
+ list_for_each_entry(iter, &card->transactions.list, link) {
if (iter == transaction) {
if (try_cancel_split_timeout(iter)) {
list_del_init(&iter->link);
- card->tlabel_mask &= ~(1ULL << iter->tlabel);
+ card->transactions.tlabel_mask &= ~(1ULL << iter->tlabel);
t = iter;
}
break;
@@ -117,11 +119,11 @@ static void split_transaction_timeout_callback(struct timer_list *timer)
struct fw_transaction *t = timer_container_of(t, timer, split_timeout_timer);
struct fw_card *card = t->card;
- scoped_guard(spinlock_irqsave, &card->lock) {
+ scoped_guard(spinlock_irqsave, &card->transactions.lock) {
if (list_empty(&t->link))
return;
list_del(&t->link);
- card->tlabel_mask &= ~(1ULL << t->tlabel);
+ card->transactions.tlabel_mask &= ~(1ULL << t->tlabel);
}
if (!t->with_tstamp) {
@@ -135,14 +137,18 @@ static void split_transaction_timeout_callback(struct timer_list *timer)
static void start_split_transaction_timeout(struct fw_transaction *t,
struct fw_card *card)
{
- guard(spinlock_irqsave)(&card->lock);
+ unsigned long delta;
if (list_empty(&t->link) || WARN_ON(t->is_split_transaction))
return;
t->is_split_transaction = true;
- mod_timer(&t->split_timeout_timer,
- jiffies + card->split_timeout_jiffies);
+
+ // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for
+ // local destination never runs in any type of IRQ context.
+ scoped_guard(spinlock_irqsave, &card->split_timeout.lock)
+ delta = card->split_timeout.jiffies;
+ mod_timer(&t->split_timeout_timer, jiffies + delta);
}
static u32 compute_split_timeout_timestamp(struct fw_card *card, u32 request_timestamp);
@@ -162,8 +168,12 @@ static void transmit_complete_callback(struct fw_packet *packet,
break;
case ACK_PENDING:
{
- t->split_timeout_cycle =
- compute_split_timeout_timestamp(card, packet->timestamp) & 0xffff;
+ // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for
+ // local destination never runs in any type of IRQ context.
+ scoped_guard(spinlock_irqsave, &card->split_timeout.lock) {
+ t->split_timeout_cycle =
+ compute_split_timeout_timestamp(card, packet->timestamp) & 0xffff;
+ }
start_split_transaction_timeout(t, card);
break;
}
@@ -259,18 +269,21 @@ static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
}
static int allocate_tlabel(struct fw_card *card)
+__must_hold(&card->transactions_lock)
{
int tlabel;
- tlabel = card->current_tlabel;
- while (card->tlabel_mask & (1ULL << tlabel)) {
+ lockdep_assert_held(&card->transactions.lock);
+
+ tlabel = card->transactions.current_tlabel;
+ while (card->transactions.tlabel_mask & (1ULL << tlabel)) {
tlabel = (tlabel + 1) & 0x3f;
- if (tlabel == card->current_tlabel)
+ if (tlabel == card->transactions.current_tlabel)
return -EBUSY;
}
- card->current_tlabel = (tlabel + 1) & 0x3f;
- card->tlabel_mask |= 1ULL << tlabel;
+ card->transactions.current_tlabel = (tlabel + 1) & 0x3f;
+ card->transactions.tlabel_mask |= 1ULL << tlabel;
return tlabel;
}
@@ -331,7 +344,6 @@ void __fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode
void *payload, size_t length, union fw_transaction_callback callback,
bool with_tstamp, void *callback_data)
{
- unsigned long flags;
int tlabel;
/*
@@ -339,11 +351,11 @@ void __fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode
* the list while holding the card spinlock.
*/
- spin_lock_irqsave(&card->lock, flags);
-
- tlabel = allocate_tlabel(card);
+ // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for
+ // local destination never runs in any type of IRQ context.
+ scoped_guard(spinlock_irqsave, &card->transactions.lock)
+ tlabel = allocate_tlabel(card);
if (tlabel < 0) {
- spin_unlock_irqrestore(&card->lock, flags);
if (!with_tstamp) {
callback.without_tstamp(card, RCODE_SEND_ERROR, NULL, 0, callback_data);
} else {
@@ -368,15 +380,22 @@ void __fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode
t->callback = callback;
t->with_tstamp = with_tstamp;
t->callback_data = callback_data;
-
- fw_fill_request(&t->packet, tcode, t->tlabel, destination_id, card->node_id, generation,
- speed, offset, payload, length);
t->packet.callback = transmit_complete_callback;
- list_add_tail(&t->link, &card->transaction_list);
+ // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for
+ // local destination never runs in any type of IRQ context.
+ scoped_guard(spinlock_irqsave, &card->lock) {
+ // The node_id field of fw_card can be updated when handling SelfIDComplete.
+ fw_fill_request(&t->packet, tcode, t->tlabel, destination_id, card->node_id,
+ generation, speed, offset, payload, length);
+ }
- spin_unlock_irqrestore(&card->lock, flags);
+ // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for
+ // local destination never runs in any type of IRQ context.
+ scoped_guard(spinlock_irqsave, &card->transactions.lock)
+ list_add_tail(&t->link, &card->transactions.list);
+ // Safe with no lock, since the index field of fw_card is immutable once assigned.
trace_async_request_outbound_initiate((uintptr_t)t, card->index, generation, speed,
t->packet.header, payload,
tcode_is_read_request(tcode) ? 0 : length / 4);
@@ -458,7 +477,7 @@ static struct fw_packet phy_config_packet = {
void fw_send_phy_config(struct fw_card *card,
int node_id, int generation, int gap_count)
{
- long timeout = DIV_ROUND_UP(HZ, 10);
+ long timeout = msecs_to_jiffies(100);
u32 data = 0;
phy_packet_set_packet_identifier(&data, PHY_PACKET_PACKET_IDENTIFIER_PHY_CONFIG);
@@ -550,6 +569,23 @@ const struct fw_address_region fw_unit_space_region =
{ .start = 0xfffff0000900ULL, .end = 0x1000000000000ULL, };
#endif /* 0 */
+static void complete_address_handler(struct kref *kref)
+{
+ struct fw_address_handler *handler = container_of(kref, struct fw_address_handler, kref);
+
+ complete(&handler->done);
+}
+
+static void get_address_handler(struct fw_address_handler *handler)
+{
+ kref_get(&handler->kref);
+}
+
+static int put_address_handler(struct fw_address_handler *handler)
+{
+ return kref_put(&handler->kref, complete_address_handler);
+}
+
/**
* fw_core_add_address_handler() - register for incoming requests
* @handler: callback
@@ -596,6 +632,8 @@ int fw_core_add_address_handler(struct fw_address_handler *handler,
if (other != NULL) {
handler->offset += other->length;
} else {
+ init_completion(&handler->done);
+ kref_init(&handler->kref);
list_add_tail_rcu(&handler->link, &address_handler_list);
ret = 0;
break;
@@ -621,6 +659,9 @@ void fw_core_remove_address_handler(struct fw_address_handler *handler)
list_del_rcu(&handler->link);
synchronize_rcu();
+
+ if (!put_address_handler(handler))
+ wait_for_completion(&handler->done);
}
EXPORT_SYMBOL(fw_core_remove_address_handler);
@@ -757,11 +798,14 @@ EXPORT_SYMBOL(fw_fill_response);
static u32 compute_split_timeout_timestamp(struct fw_card *card,
u32 request_timestamp)
+__must_hold(&card->split_timeout.lock)
{
unsigned int cycles;
u32 timestamp;
- cycles = card->split_timeout_cycles;
+ lockdep_assert_held(&card->split_timeout.lock);
+
+ cycles = card->split_timeout.cycles;
cycles += request_timestamp & 0x1fff;
timestamp = request_timestamp & ~0x1fff;
@@ -812,9 +856,12 @@ static struct fw_request *allocate_request(struct fw_card *card,
return NULL;
kref_init(&request->kref);
+ // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for
+ // local destination never runs in any type of IRQ context.
+ scoped_guard(spinlock_irqsave, &card->split_timeout.lock)
+ request->response.timestamp = compute_split_timeout_timestamp(card, p->timestamp);
+
request->response.speed = p->speed;
- request->response.timestamp =
- compute_split_timeout_timestamp(card, p->timestamp);
request->response.generation = p->generation;
request->response.ack = 0;
request->response.callback = free_response_callback;
@@ -914,22 +961,31 @@ static void handle_exclusive_region_request(struct fw_card *card,
handler = lookup_enclosing_address_handler(&address_handler_list, offset,
request->length);
if (handler)
- handler->address_callback(card, request, tcode, destination, source,
- p->generation, offset, request->data,
- request->length, handler->callback_data);
+ get_address_handler(handler);
}
- if (!handler)
+ if (!handler) {
fw_send_response(card, request, RCODE_ADDRESS_ERROR);
+ return;
+ }
+
+ // Outside the RCU read-side critical section. Without spinlock. With reference count.
+ handler->address_callback(card, request, tcode, destination, source, p->generation, offset,
+ request->data, request->length, handler->callback_data);
+ put_address_handler(handler);
}
+// To use kmalloc allocator efficiently, this should be power of two.
+#define BUFFER_ON_KERNEL_STACK_SIZE 4
+
static void handle_fcp_region_request(struct fw_card *card,
struct fw_packet *p,
struct fw_request *request,
unsigned long long offset)
{
- struct fw_address_handler *handler;
- int tcode, destination, source;
+ struct fw_address_handler *buffer_on_kernel_stack[BUFFER_ON_KERNEL_STACK_SIZE];
+ struct fw_address_handler *handler, **handlers;
+ int tcode, destination, source, i, count, buffer_size;
if ((offset != (CSR_REGISTER_BASE | CSR_FCP_COMMAND) &&
offset != (CSR_REGISTER_BASE | CSR_FCP_RESPONSE)) ||
@@ -950,15 +1006,55 @@ static void handle_fcp_region_request(struct fw_card *card,
return;
}
+ count = 0;
+ handlers = buffer_on_kernel_stack;
+ buffer_size = ARRAY_SIZE(buffer_on_kernel_stack);
scoped_guard(rcu) {
list_for_each_entry_rcu(handler, &address_handler_list, link) {
- if (is_enclosing_handler(handler, offset, request->length))
- handler->address_callback(card, request, tcode, destination, source,
- p->generation, offset, request->data,
- request->length, handler->callback_data);
+ if (is_enclosing_handler(handler, offset, request->length)) {
+ if (count >= buffer_size) {
+ int next_size = buffer_size * 2;
+ struct fw_address_handler **buffer_on_kernel_heap;
+
+ if (handlers == buffer_on_kernel_stack)
+ buffer_on_kernel_heap = NULL;
+ else
+ buffer_on_kernel_heap = handlers;
+
+ buffer_on_kernel_heap =
+ krealloc_array(buffer_on_kernel_heap, next_size,
+ sizeof(*buffer_on_kernel_heap), GFP_ATOMIC);
+ // FCP is used for purposes unrelated to significant system
+ // resources (e.g. storage or networking), so allocation
+ // failures are not considered so critical.
+ if (!buffer_on_kernel_heap)
+ break;
+
+ if (handlers == buffer_on_kernel_stack) {
+ memcpy(buffer_on_kernel_heap, buffer_on_kernel_stack,
+ sizeof(buffer_on_kernel_stack));
+ }
+
+ handlers = buffer_on_kernel_heap;
+ buffer_size = next_size;
+ }
+ get_address_handler(handler);
+ handlers[count++] = handler;
+ }
}
}
+ for (i = 0; i < count; ++i) {
+ handler = handlers[i];
+ handler->address_callback(card, request, tcode, destination, source,
+ p->generation, offset, request->data,
+ request->length, handler->callback_data);
+ put_address_handler(handler);
+ }
+
+ if (handlers != buffer_on_kernel_stack)
+ kfree(handlers);
+
fw_send_response(card, request, RCODE_COMPLETE);
}
@@ -1040,12 +1136,14 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
break;
}
- scoped_guard(spinlock_irqsave, &card->lock) {
- list_for_each_entry(iter, &card->transaction_list, link) {
+ // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for
+ // local destination never runs in any type of IRQ context.
+ scoped_guard(spinlock_irqsave, &card->transactions.lock) {
+ list_for_each_entry(iter, &card->transactions.list, link) {
if (iter->node_id == source && iter->tlabel == tlabel) {
if (try_cancel_split_timeout(iter)) {
list_del_init(&iter->link);
- card->tlabel_mask &= ~(1ULL << iter->tlabel);
+ card->transactions.tlabel_mask &= ~(1ULL << iter->tlabel);
t = iter;
}
break;
@@ -1125,7 +1223,11 @@ static void handle_topology_map(struct fw_card *card, struct fw_request *request
}
start = (offset - topology_map_region.start) / 4;
- memcpy(payload, &card->topology_map[start], length);
+
+ // NOTE: This can be without irqsave when we can guarantee that fw_send_request() for local
+ // destination never runs in any type of IRQ context.
+ scoped_guard(spinlock_irqsave, &card->topology_map.lock)
+ memcpy(payload, &card->topology_map.buffer[start], length);
fw_send_response(card, request, RCODE_COMPLETE);
}
@@ -1140,16 +1242,17 @@ static const struct fw_address_region registers_region =
.end = CSR_REGISTER_BASE | CSR_CONFIG_ROM, };
static void update_split_timeout(struct fw_card *card)
+__must_hold(&card->split_timeout.lock)
{
unsigned int cycles;
- cycles = card->split_timeout_hi * 8000 + (card->split_timeout_lo >> 19);
+ cycles = card->split_timeout.hi * 8000 + (card->split_timeout.lo >> 19);
/* minimum per IEEE 1394, maximum which doesn't overflow OHCI */
cycles = clamp(cycles, 800u, 3u * 8000u);
- card->split_timeout_cycles = cycles;
- card->split_timeout_jiffies = DIV_ROUND_UP(cycles * HZ, 8000);
+ card->split_timeout.cycles = cycles;
+ card->split_timeout.jiffies = isoc_cycles_to_jiffies(cycles);
}
static void handle_registers(struct fw_card *card, struct fw_request *request,
@@ -1199,12 +1302,15 @@ static void handle_registers(struct fw_card *card, struct fw_request *request,
case CSR_SPLIT_TIMEOUT_HI:
if (tcode == TCODE_READ_QUADLET_REQUEST) {
- *data = cpu_to_be32(card->split_timeout_hi);
+ *data = cpu_to_be32(card->split_timeout.hi);
} else if (tcode == TCODE_WRITE_QUADLET_REQUEST) {
- guard(spinlock_irqsave)(&card->lock);
-
- card->split_timeout_hi = be32_to_cpu(*data) & 7;
- update_split_timeout(card);
+ // NOTE: This can be without irqsave when we can guarantee that
+ // __fw_send_request() for local destination never runs in any type of IRQ
+ // context.
+ scoped_guard(spinlock_irqsave, &card->split_timeout.lock) {
+ card->split_timeout.hi = be32_to_cpu(*data) & 7;
+ update_split_timeout(card);
+ }
} else {
rcode = RCODE_TYPE_ERROR;
}
@@ -1212,12 +1318,15 @@ static void handle_registers(struct fw_card *card, struct fw_request *request,
case CSR_SPLIT_TIMEOUT_LO:
if (tcode == TCODE_READ_QUADLET_REQUEST) {
- *data = cpu_to_be32(card->split_timeout_lo);
+ *data = cpu_to_be32(card->split_timeout.lo);
} else if (tcode == TCODE_WRITE_QUADLET_REQUEST) {
- guard(spinlock_irqsave)(&card->lock);
-
- card->split_timeout_lo = be32_to_cpu(*data) & 0xfff80000;
- update_split_timeout(card);
+ // NOTE: This can be without irqsave when we can guarantee that
+ // __fw_send_request() for local destination never runs in any type of IRQ
+ // context.
+ scoped_guard(spinlock_irqsave, &card->split_timeout.lock) {
+ card->split_timeout.lo = be32_to_cpu(*data) & 0xfff80000;
+ update_split_timeout(card);
+ }
} else {
rcode = RCODE_TYPE_ERROR;
}
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
index 9b298af1cac0..e67395ce26b5 100644
--- a/drivers/firewire/core.h
+++ b/drivers/firewire/core.h
@@ -27,6 +27,11 @@ struct fw_packet;
/* -card */
+// This is the arbitrary value we use to indicate a mismatched gap count.
+#define GAP_COUNT_MISMATCHED 0
+
+#define isoc_cycles_to_jiffies(cycles) usecs_to_jiffies((u32)div_u64((u64)cycles * USEC_PER_SEC, 8000))
+
extern __printf(2, 3)
void fw_err(const struct fw_card *card, const char *fmt, ...);
extern __printf(2, 3)
@@ -167,6 +172,9 @@ static inline void fw_iso_context_init_work(struct fw_iso_context *ctx, work_fun
/* -topology */
+// The initial value of BUS_MANAGER_ID register, to express nothing registered.
+#define BUS_MANAGER_ID_NOT_REGISTERED 0x3f
+
enum {
FW_NODE_CREATED,
FW_NODE_UPDATED,
@@ -194,8 +202,8 @@ struct fw_node {
/* For serializing node topology into a list. */
struct list_head link;
- /* Upper layer specific data. */
- void *data;
+ // The device when already associated, else NULL.
+ struct fw_device *device;
struct fw_node *ports[] __counted_by(port_count);
};
@@ -219,6 +227,16 @@ static inline void fw_node_put(struct fw_node *node)
kref_put(&node->kref, release_node);
}
+static inline struct fw_device *fw_node_get_device(struct fw_node *node)
+{
+ return node->device;
+}
+
+static inline void fw_node_set_device(struct fw_node *node, struct fw_device *device)
+{
+ node->device = device;
+}
+
void fw_core_handle_bus_reset(struct fw_card *card, int node_id,
int generation, int self_id_count, u32 *self_ids, bool bm_abdicate);
void fw_destroy_nodes(struct fw_card *card);
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 5d8301b0f3aa..030aed5453a1 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -228,13 +228,10 @@ struct fw_ohci {
__le32 *self_id;
dma_addr_t self_id_bus;
- struct work_struct bus_reset_work;
u32 self_id_buffer[512];
};
-static struct workqueue_struct *selfid_workqueue;
-
static inline struct fw_ohci *fw_ohci(struct fw_card *card)
{
return container_of(card, struct fw_ohci, card);
@@ -393,225 +390,10 @@ MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
", IR wake unreliable = " __stringify(QUIRK_IR_WAKE)
")");
-#define OHCI_PARAM_DEBUG_AT_AR 1
-#define OHCI_PARAM_DEBUG_SELFIDS 2
-#define OHCI_PARAM_DEBUG_IRQS 4
-
-static int param_debug;
-module_param_named(debug, param_debug, int, 0644);
-MODULE_PARM_DESC(debug, "Verbose logging, deprecated in v6.11 kernel or later. (default = 0"
- ", AT/AR events = " __stringify(OHCI_PARAM_DEBUG_AT_AR)
- ", self-IDs = " __stringify(OHCI_PARAM_DEBUG_SELFIDS)
- ", IRQs = " __stringify(OHCI_PARAM_DEBUG_IRQS)
- ", or a combination, or all = -1)");
-
static bool param_remote_dma;
module_param_named(remote_dma, param_remote_dma, bool, 0444);
MODULE_PARM_DESC(remote_dma, "Enable unfiltered remote DMA (default = N)");
-static void log_irqs(struct fw_ohci *ohci, u32 evt)
-{
- if (likely(!(param_debug & OHCI_PARAM_DEBUG_IRQS)))
- return;
-
- ohci_notice(ohci, "IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
- evt & OHCI1394_selfIDComplete ? " selfID" : "",
- evt & OHCI1394_RQPkt ? " AR_req" : "",
- evt & OHCI1394_RSPkt ? " AR_resp" : "",
- evt & OHCI1394_reqTxComplete ? " AT_req" : "",
- evt & OHCI1394_respTxComplete ? " AT_resp" : "",
- evt & OHCI1394_isochRx ? " IR" : "",
- evt & OHCI1394_isochTx ? " IT" : "",
- evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "",
- evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "",
- evt & OHCI1394_cycle64Seconds ? " cycle64Seconds" : "",
- evt & OHCI1394_cycleInconsistent ? " cycleInconsistent" : "",
- evt & OHCI1394_regAccessFail ? " regAccessFail" : "",
- evt & OHCI1394_unrecoverableError ? " unrecoverableError" : "",
- evt & OHCI1394_busReset ? " busReset" : "",
- evt & ~(OHCI1394_selfIDComplete | OHCI1394_RQPkt |
- OHCI1394_RSPkt | OHCI1394_reqTxComplete |
- OHCI1394_respTxComplete | OHCI1394_isochRx |
- OHCI1394_isochTx | OHCI1394_postedWriteErr |
- OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds |
- OHCI1394_cycleInconsistent |
- OHCI1394_regAccessFail | OHCI1394_busReset)
- ? " ?" : "");
-}
-
-static void log_selfids(struct fw_ohci *ohci, int generation, int self_id_count)
-{
- static const char *const speed[] = {
- [0] = "S100", [1] = "S200", [2] = "S400", [3] = "beta",
- };
- static const char *const power[] = {
- [0] = "+0W", [1] = "+15W", [2] = "+30W", [3] = "+45W",
- [4] = "-3W", [5] = " ?W", [6] = "-3..-6W", [7] = "-3..-10W",
- };
- static const char port[] = {
- [PHY_PACKET_SELF_ID_PORT_STATUS_NONE] = '.',
- [PHY_PACKET_SELF_ID_PORT_STATUS_NCONN] = '-',
- [PHY_PACKET_SELF_ID_PORT_STATUS_PARENT] = 'p',
- [PHY_PACKET_SELF_ID_PORT_STATUS_CHILD] = 'c',
- };
- struct self_id_sequence_enumerator enumerator = {
- .cursor = ohci->self_id_buffer,
- .quadlet_count = self_id_count,
- };
-
- if (likely(!(param_debug & OHCI_PARAM_DEBUG_SELFIDS)))
- return;
-
- ohci_notice(ohci, "%d selfIDs, generation %d, local node ID %04x\n",
- self_id_count, generation, ohci->node_id);
-
- while (enumerator.quadlet_count > 0) {
- unsigned int quadlet_count;
- unsigned int port_index;
- const u32 *s;
- int i;
-
- s = self_id_sequence_enumerator_next(&enumerator, &quadlet_count);
- if (IS_ERR(s))
- break;
-
- ohci_notice(ohci,
- "selfID 0: %08x, phy %d [%c%c%c] %s gc=%d %s %s%s%s\n",
- *s,
- phy_packet_self_id_get_phy_id(*s),
- port[self_id_sequence_get_port_status(s, quadlet_count, 0)],
- port[self_id_sequence_get_port_status(s, quadlet_count, 1)],
- port[self_id_sequence_get_port_status(s, quadlet_count, 2)],
- speed[*s >> 14 & 3], *s >> 16 & 63,
- power[*s >> 8 & 7], *s >> 22 & 1 ? "L" : "",
- *s >> 11 & 1 ? "c" : "", *s & 2 ? "i" : "");
-
- port_index = 3;
- for (i = 1; i < quadlet_count; ++i) {
- ohci_notice(ohci,
- "selfID n: %08x, phy %d [%c%c%c%c%c%c%c%c]\n",
- s[i],
- phy_packet_self_id_get_phy_id(s[i]),
- port[self_id_sequence_get_port_status(s, quadlet_count, port_index)],
- port[self_id_sequence_get_port_status(s, quadlet_count, port_index + 1)],
- port[self_id_sequence_get_port_status(s, quadlet_count, port_index + 2)],
- port[self_id_sequence_get_port_status(s, quadlet_count, port_index + 3)],
- port[self_id_sequence_get_port_status(s, quadlet_count, port_index + 4)],
- port[self_id_sequence_get_port_status(s, quadlet_count, port_index + 5)],
- port[self_id_sequence_get_port_status(s, quadlet_count, port_index + 6)],
- port[self_id_sequence_get_port_status(s, quadlet_count, port_index + 7)]
- );
-
- port_index += 8;
- }
- }
-}
-
-static const char *evts[] = {
- [0x00] = "evt_no_status", [0x01] = "-reserved-",
- [0x02] = "evt_long_packet", [0x03] = "evt_missing_ack",
- [0x04] = "evt_underrun", [0x05] = "evt_overrun",
- [0x06] = "evt_descriptor_read", [0x07] = "evt_data_read",
- [0x08] = "evt_data_write", [0x09] = "evt_bus_reset",
- [0x0a] = "evt_timeout", [0x0b] = "evt_tcode_err",
- [0x0c] = "-reserved-", [0x0d] = "-reserved-",
- [0x0e] = "evt_unknown", [0x0f] = "evt_flushed",
- [0x10] = "-reserved-", [0x11] = "ack_complete",
- [0x12] = "ack_pending ", [0x13] = "-reserved-",
- [0x14] = "ack_busy_X", [0x15] = "ack_busy_A",
- [0x16] = "ack_busy_B", [0x17] = "-reserved-",
- [0x18] = "-reserved-", [0x19] = "-reserved-",
- [0x1a] = "-reserved-", [0x1b] = "ack_tardy",
- [0x1c] = "-reserved-", [0x1d] = "ack_data_error",
- [0x1e] = "ack_type_error", [0x1f] = "-reserved-",
- [0x20] = "pending/cancelled",
-};
-
-static void log_ar_at_event(struct fw_ohci *ohci,
- char dir, int speed, u32 *header, int evt)
-{
- static const char *const tcodes[] = {
- [TCODE_WRITE_QUADLET_REQUEST] = "QW req",
- [TCODE_WRITE_BLOCK_REQUEST] = "BW req",
- [TCODE_WRITE_RESPONSE] = "W resp",
- [0x3] = "-reserved-",
- [TCODE_READ_QUADLET_REQUEST] = "QR req",
- [TCODE_READ_BLOCK_REQUEST] = "BR req",
- [TCODE_READ_QUADLET_RESPONSE] = "QR resp",
- [TCODE_READ_BLOCK_RESPONSE] = "BR resp",
- [TCODE_CYCLE_START] = "cycle start",
- [TCODE_LOCK_REQUEST] = "Lk req",
- [TCODE_STREAM_DATA] = "async stream packet",
- [TCODE_LOCK_RESPONSE] = "Lk resp",
- [0xc] = "-reserved-",
- [0xd] = "-reserved-",
- [TCODE_LINK_INTERNAL] = "link internal",
- [0xf] = "-reserved-",
- };
- int tcode = async_header_get_tcode(header);
- char specific[12];
-
- if (likely(!(param_debug & OHCI_PARAM_DEBUG_AT_AR)))
- return;
-
- if (unlikely(evt >= ARRAY_SIZE(evts)))
- evt = 0x1f;
-
- if (evt == OHCI1394_evt_bus_reset) {
- ohci_notice(ohci, "A%c evt_bus_reset, generation %d\n",
- dir, (header[2] >> 16) & 0xff);
- return;
- }
-
- switch (tcode) {
- case TCODE_WRITE_QUADLET_REQUEST:
- case TCODE_READ_QUADLET_RESPONSE:
- case TCODE_CYCLE_START:
- snprintf(specific, sizeof(specific), " = %08x",
- be32_to_cpu((__force __be32)header[3]));
- break;
- case TCODE_WRITE_BLOCK_REQUEST:
- case TCODE_READ_BLOCK_REQUEST:
- case TCODE_READ_BLOCK_RESPONSE:
- case TCODE_LOCK_REQUEST:
- case TCODE_LOCK_RESPONSE:
- snprintf(specific, sizeof(specific), " %x,%x",
- async_header_get_data_length(header),
- async_header_get_extended_tcode(header));
- break;
- default:
- specific[0] = '\0';
- }
-
- switch (tcode) {
- case TCODE_STREAM_DATA:
- ohci_notice(ohci, "A%c %s, %s\n",
- dir, evts[evt], tcodes[tcode]);
- break;
- case TCODE_LINK_INTERNAL:
- ohci_notice(ohci, "A%c %s, PHY %08x %08x\n",
- dir, evts[evt], header[1], header[2]);
- break;
- case TCODE_WRITE_QUADLET_REQUEST:
- case TCODE_WRITE_BLOCK_REQUEST:
- case TCODE_READ_QUADLET_REQUEST:
- case TCODE_READ_BLOCK_REQUEST:
- case TCODE_LOCK_REQUEST:
- ohci_notice(ohci,
- "A%c spd %x tl %02x, %04x -> %04x, %s, %s, %012llx%s\n",
- dir, speed, async_header_get_tlabel(header),
- async_header_get_source(header), async_header_get_destination(header),
- evts[evt], tcodes[tcode], async_header_get_offset(header), specific);
- break;
- default:
- ohci_notice(ohci,
- "A%c spd %x tl %02x, %04x -> %04x, %s, %s%s\n",
- dir, speed, async_header_get_tlabel(header),
- async_header_get_source(header), async_header_get_destination(header),
- evts[evt], tcodes[tcode], specific);
- }
-}
-
static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data)
{
writel(data, ohci->registers + offset);
@@ -957,8 +739,6 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
p.timestamp = status & 0xffff;
p.generation = ohci->request_generation;
- log_ar_at_event(ohci, 'R', p.speed, p.header, evt);
-
/*
* Several controllers, notably from NEC and VIA, forget to
* write ack_complete status at PHY packet reception.
@@ -977,7 +757,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
*
* Alas some chips sometimes emit bus reset packets with a
* wrong generation. We set the correct generation for these
- * at a slightly incorrect time (in bus_reset_work).
+ * at a slightly incorrect time (in handle_selfid_complete_event).
*/
if (evt == OHCI1394_evt_bus_reset) {
if (!(ohci->quirks & QUIRK_RESET_PACKET))
@@ -1566,8 +1346,6 @@ static int handle_at_packet(struct context *context,
evt = le16_to_cpu(last->transfer_status) & 0x1f;
packet->timestamp = le16_to_cpu(last->res_count);
- log_ar_at_event(ohci, 'T', packet->speed, packet->header, evt);
-
switch (evt) {
case OHCI1394_evt_timeout:
/* Async response transmit timed out. */
@@ -1772,6 +1550,25 @@ static void at_context_transmit(struct at_context *ctx, struct fw_packet *packet
static void detect_dead_context(struct fw_ohci *ohci,
const char *name, unsigned int regs)
{
+ static const char *const evts[] = {
+ [0x00] = "evt_no_status", [0x01] = "-reserved-",
+ [0x02] = "evt_long_packet", [0x03] = "evt_missing_ack",
+ [0x04] = "evt_underrun", [0x05] = "evt_overrun",
+ [0x06] = "evt_descriptor_read", [0x07] = "evt_data_read",
+ [0x08] = "evt_data_write", [0x09] = "evt_bus_reset",
+ [0x0a] = "evt_timeout", [0x0b] = "evt_tcode_err",
+ [0x0c] = "-reserved-", [0x0d] = "-reserved-",
+ [0x0e] = "evt_unknown", [0x0f] = "evt_flushed",
+ [0x10] = "-reserved-", [0x11] = "ack_complete",
+ [0x12] = "ack_pending ", [0x13] = "-reserved-",
+ [0x14] = "ack_busy_X", [0x15] = "ack_busy_A",
+ [0x16] = "ack_busy_B", [0x17] = "-reserved-",
+ [0x18] = "-reserved-", [0x19] = "-reserved-",
+ [0x1a] = "-reserved-", [0x1b] = "ack_tardy",
+ [0x1c] = "-reserved-", [0x1d] = "ack_data_error",
+ [0x1e] = "ack_type_error", [0x1f] = "-reserved-",
+ [0x20] = "pending/cancelled",
+ };
u32 ctl;
ctl = reg_read(ohci, CONTROL_SET(regs));
@@ -2030,9 +1827,9 @@ static int find_and_insert_self_id(struct fw_ohci *ohci, int self_id_count)
return self_id_count;
}
-static void bus_reset_work(struct work_struct *work)
+static irqreturn_t handle_selfid_complete_event(int irq, void *data)
{
- struct fw_ohci *ohci = from_work(ohci, work, bus_reset_work);
+ struct fw_ohci *ohci = data;
int self_id_count, generation, new_generation, i, j;
u32 reg, quadlet;
void *free_rom = NULL;
@@ -2043,11 +1840,11 @@ static void bus_reset_work(struct work_struct *work)
if (!(reg & OHCI1394_NodeID_idValid)) {
ohci_notice(ohci,
"node ID not valid, new bus reset in progress\n");
- return;
+ goto end;
}
if ((reg & OHCI1394_NodeID_nodeNumber) == 63) {
ohci_notice(ohci, "malconfigured bus\n");
- return;
+ goto end;
}
ohci->node_id = reg & (OHCI1394_NodeID_busNumber |
OHCI1394_NodeID_nodeNumber);
@@ -2061,8 +1858,11 @@ static void bus_reset_work(struct work_struct *work)
reg = reg_read(ohci, OHCI1394_SelfIDCount);
if (ohci1394_self_id_count_is_error(reg)) {
ohci_notice(ohci, "self ID receive error\n");
- return;
+ goto end;
}
+
+ trace_self_id_complete(ohci->card.index, reg, ohci->self_id, has_be_header_quirk(ohci));
+
/*
* The count in the SelfIDCount register is the number of
* bytes in the self ID receive buffer. Since we also receive
@@ -2073,7 +1873,7 @@ static void bus_reset_work(struct work_struct *work)
if (self_id_count > 252) {
ohci_notice(ohci, "bad selfIDSize (%08x)\n", reg);
- return;
+ goto end;
}
quadlet = cond_le32_to_cpu(ohci->self_id[0], has_be_header_quirk(ohci));
@@ -2100,7 +1900,7 @@ static void bus_reset_work(struct work_struct *work)
ohci_notice(ohci, "bad self ID %d/%d (%08x != ~%08x)\n",
j, self_id_count, id, id2);
- return;
+ goto end;
}
ohci->self_id_buffer[j] = id;
}
@@ -2110,13 +1910,13 @@ static void bus_reset_work(struct work_struct *work)
if (self_id_count < 0) {
ohci_notice(ohci,
"could not construct local self ID\n");
- return;
+ goto end;
}
}
if (self_id_count == 0) {
ohci_notice(ohci, "no self IDs\n");
- return;
+ goto end;
}
rmb();
@@ -2138,7 +1938,7 @@ static void bus_reset_work(struct work_struct *work)
new_generation = ohci1394_self_id_count_get_generation(reg);
if (new_generation != generation) {
ohci_notice(ohci, "new bus reset, discarding self ids\n");
- return;
+ goto end;
}
// FIXME: Document how the locking works.
@@ -2195,12 +1995,12 @@ static void bus_reset_work(struct work_struct *work)
if (free_rom)
dmam_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, free_rom, free_rom_bus);
- log_selfids(ohci, generation, self_id_count);
-
fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation,
self_id_count, ohci->self_id_buffer,
ohci->csr_state_setclear_abdicate);
ohci->csr_state_setclear_abdicate = false;
+end:
+ return IRQ_HANDLED;
}
static irqreturn_t irq_handler(int irq, void *data)
@@ -2214,11 +2014,6 @@ static irqreturn_t irq_handler(int irq, void *data)
if (!event || !~event)
return IRQ_NONE;
- if (unlikely(param_debug > 0)) {
- dev_notice_ratelimited(ohci->card.device,
- "The debug parameter is superseded by tracepoints events, and deprecated.");
- }
-
/*
* busReset and postedWriteErr events must not be cleared yet
* (OHCI 1.1 clauses 7.2.3.2 and 13.2.8.1)
@@ -2226,21 +2021,11 @@ static irqreturn_t irq_handler(int irq, void *data)
reg_write(ohci, OHCI1394_IntEventClear,
event & ~(OHCI1394_busReset | OHCI1394_postedWriteErr));
trace_irqs(ohci->card.index, event);
- log_irqs(ohci, event);
- // The flag is masked again at bus_reset_work() scheduled by selfID event.
+
+ // The flag is masked again at handle_selfid_complete_event() scheduled by selfID event.
if (event & OHCI1394_busReset)
reg_write(ohci, OHCI1394_IntMaskClear, OHCI1394_busReset);
- if (event & OHCI1394_selfIDComplete) {
- if (trace_self_id_complete_enabled()) {
- u32 reg = reg_read(ohci, OHCI1394_SelfIDCount);
-
- trace_self_id_complete(ohci->card.index, reg, ohci->self_id,
- has_be_header_quirk(ohci));
- }
- queue_work(selfid_workqueue, &ohci->bus_reset_work);
- }
-
if (event & OHCI1394_RQPkt)
queue_work(ohci->card.async_wq, &ohci->ar_request_ctx.work);
@@ -2311,7 +2096,10 @@ static irqreturn_t irq_handler(int irq, void *data)
} else
flush_writes(ohci);
- return IRQ_HANDLED;
+ if (event & OHCI1394_selfIDComplete)
+ return IRQ_WAKE_THREAD;
+ else
+ return IRQ_HANDLED;
}
static int software_reset(struct fw_ohci *ohci)
@@ -2624,7 +2412,7 @@ static int ohci_set_config_rom(struct fw_card *card,
* then set up the real values for the two registers.
*
* We use ohci->lock to avoid racing with the code that sets
- * ohci->next_config_rom to NULL (see bus_reset_work).
+ * ohci->next_config_rom to NULL (see handle_selfid_complete_event).
*/
next_config_rom = dmam_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
@@ -2705,7 +2493,6 @@ static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
dma_unmap_single(ohci->card.device, packet->payload_bus,
packet->payload_length, DMA_TO_DEVICE);
- log_ar_at_event(ohci, 'T', packet->speed, packet->header, 0x20);
driver_data->packet = NULL;
packet->ack = RCODE_CANCELLED;
@@ -3695,7 +3482,6 @@ static int pci_probe(struct pci_dev *dev,
u32 bus_options, max_receive, link_speed, version;
u64 guid;
int i, flags, irq, err;
- size_t size;
if (dev->vendor == PCI_VENDOR_ID_PINNACLE_SYSTEMS) {
dev_err(&dev->dev, "Pinnacle MovieBoard is not yet supported\n");
@@ -3722,8 +3508,6 @@ static int pci_probe(struct pci_dev *dev,
spin_lock_init(&ohci->lock);
mutex_init(&ohci->phy_reg_mutex);
- INIT_WORK(&ohci->bus_reset_work, bus_reset_work);
-
if (!(pci_resource_flags(dev, 0) & IORESOURCE_MEM) ||
pci_resource_len(dev, 0) < OHCI1394_REGISTER_SIZE) {
ohci_err(ohci, "invalid MMIO resource\n");
@@ -3791,8 +3575,7 @@ static int pci_probe(struct pci_dev *dev,
reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0);
ohci->ir_context_mask = ohci->ir_context_support;
ohci->n_ir = hweight32(ohci->ir_context_mask);
- size = sizeof(struct iso_context) * ohci->n_ir;
- ohci->ir_context_list = devm_kzalloc(&dev->dev, size, GFP_KERNEL);
+ ohci->ir_context_list = devm_kcalloc(&dev->dev, ohci->n_ir, sizeof(struct iso_context), GFP_KERNEL);
if (!ohci->ir_context_list)
return -ENOMEM;
@@ -3806,8 +3589,7 @@ static int pci_probe(struct pci_dev *dev,
reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
ohci->it_context_mask = ohci->it_context_support;
ohci->n_it = hweight32(ohci->it_context_mask);
- size = sizeof(struct iso_context) * ohci->n_it;
- ohci->it_context_list = devm_kzalloc(&dev->dev, size, GFP_KERNEL);
+ ohci->it_context_list = devm_kcalloc(&dev->dev, ohci->n_it, sizeof(struct iso_context), GFP_KERNEL);
if (!ohci->it_context_list)
return -ENOMEM;
@@ -3832,7 +3614,9 @@ static int pci_probe(struct pci_dev *dev,
goto fail_msi;
}
- err = request_threaded_irq(irq, irq_handler, NULL,
+ // IRQF_ONESHOT is not applied so that any events are handled in the hardIRQ handler during
+ // invoking the threaded IRQ handler for SelfIDComplete event.
+ err = request_threaded_irq(irq, irq_handler, handle_selfid_complete_event,
pci_dev_msi_enabled(dev) ? 0 : IRQF_SHARED, ohci_driver_name,
ohci);
if (err < 0) {
@@ -3876,7 +3660,6 @@ static void pci_remove(struct pci_dev *dev)
reg_write(ohci, OHCI1394_IntMaskClear, ~0);
flush_writes(ohci);
}
- cancel_work_sync(&ohci->bus_reset_work);
fw_core_remove_card(&ohci->card);
/*
@@ -3949,17 +3732,12 @@ static struct pci_driver fw_ohci_pci_driver = {
static int __init fw_ohci_init(void)
{
- selfid_workqueue = alloc_workqueue(KBUILD_MODNAME, WQ_MEM_RECLAIM, 0);
- if (!selfid_workqueue)
- return -ENOMEM;
-
return pci_register_driver(&fw_ohci_pci_driver);
}
static void __exit fw_ohci_cleanup(void)
{
pci_unregister_driver(&fw_ohci_pci_driver);
- destroy_workqueue(selfid_workqueue);
}
module_init(fw_ohci_init);
diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c
index 24e59ddf85e7..c7698cfaa4e8 100644
--- a/drivers/firmware/arm_scmi/bus.c
+++ b/drivers/firmware/arm_scmi/bus.c
@@ -401,8 +401,8 @@ static void scmi_device_release(struct device *dev)
static void __scmi_device_destroy(struct scmi_device *scmi_dev)
{
- pr_debug("(%s) Destroying SCMI device '%s' for protocol 0x%x (%s)\n",
- of_node_full_name(scmi_dev->dev.parent->of_node),
+ pr_debug("(%pOF) Destroying SCMI device '%s' for protocol 0x%x (%s)\n",
+ scmi_dev->dev.parent->of_node,
dev_name(&scmi_dev->dev), scmi_dev->protocol_id,
scmi_dev->name);
@@ -474,9 +474,8 @@ __scmi_device_create(struct device_node *np, struct device *parent,
if (retval)
goto put_dev;
- pr_debug("(%s) Created SCMI device '%s' for protocol 0x%x (%s)\n",
- of_node_full_name(parent->of_node),
- dev_name(&scmi_dev->dev), protocol, name);
+ pr_debug("(%pOF) Created SCMI device '%s' for protocol 0x%x (%s)\n",
+ parent->of_node, dev_name(&scmi_dev->dev), protocol, name);
return scmi_dev;
put_dev:
@@ -493,8 +492,8 @@ _scmi_device_create(struct device_node *np, struct device *parent,
sdev = __scmi_device_create(np, parent, protocol, name);
if (!sdev)
- pr_err("(%s) Failed to create device for protocol 0x%x (%s)\n",
- of_node_full_name(parent->of_node), protocol, name);
+ pr_err("(%pOF) Failed to create device for protocol 0x%x (%s)\n",
+ parent->of_node, protocol, name);
return sdev;
}
diff --git a/drivers/firmware/arm_scmi/quirks.c b/drivers/firmware/arm_scmi/quirks.c
index 03960aca3610..03848283c2a0 100644
--- a/drivers/firmware/arm_scmi/quirks.c
+++ b/drivers/firmware/arm_scmi/quirks.c
@@ -71,6 +71,7 @@
*/
#include <linux/ctype.h>
+#include <linux/cleanup.h>
#include <linux/device.h>
#include <linux/export.h>
#include <linux/hashtable.h>
@@ -89,9 +90,9 @@
struct scmi_quirk {
bool enabled;
const char *name;
- char *vendor;
- char *sub_vendor_id;
- char *impl_ver_range;
+ const char *vendor;
+ const char *sub_vendor_id;
+ const char *impl_ver_range;
u32 start_range;
u32 end_range;
struct static_key_false *key;
@@ -217,7 +218,7 @@ static unsigned int scmi_quirk_signature(const char *vend, const char *sub_vend)
static int scmi_quirk_range_parse(struct scmi_quirk *quirk)
{
- const char *last, *first = quirk->impl_ver_range;
+ const char *last, *first __free(kfree) = NULL;
size_t len;
char *sep;
int ret;
@@ -228,8 +229,12 @@ static int scmi_quirk_range_parse(struct scmi_quirk *quirk)
if (!len)
return 0;
+ first = kmemdup(quirk->impl_ver_range, len + 1, GFP_KERNEL);
+ if (!first)
+ return -ENOMEM;
+
last = first + len - 1;
- sep = strchr(quirk->impl_ver_range, '-');
+ sep = strchr(first, '-');
if (sep)
*sep = '\0';
diff --git a/drivers/firmware/arm_scmi/transports/mailbox.c b/drivers/firmware/arm_scmi/transports/mailbox.c
index bd041c99b92b..ae0f67e6cc45 100644
--- a/drivers/firmware/arm_scmi/transports/mailbox.c
+++ b/drivers/firmware/arm_scmi/transports/mailbox.c
@@ -127,8 +127,8 @@ static int mailbox_chan_validate(struct device *cdev, int *a2p_rx_chan,
(num_mb == 1 && num_sh != 1) || (num_mb == 3 && num_sh != 2) ||
(num_mb == 4 && num_sh != 2)) {
dev_warn(cdev,
- "Invalid channel descriptor for '%s' - mbs:%d shm:%d\n",
- of_node_full_name(np), num_mb, num_sh);
+ "Invalid channel descriptor for '%pOF' - mbs:%d shm:%d\n",
+ np, num_mb, num_sh);
return -EINVAL;
}
@@ -140,8 +140,7 @@ static int mailbox_chan_validate(struct device *cdev, int *a2p_rx_chan,
of_parse_phandle(np, "shmem", 1);
if (!np_tx || !np_rx || np_tx == np_rx) {
- dev_warn(cdev, "Invalid shmem descriptor for '%s'\n",
- of_node_full_name(np));
+ dev_warn(cdev, "Invalid shmem descriptor for '%pOF'\n", np);
ret = -EINVAL;
}
}
diff --git a/drivers/firmware/arm_scmi/transports/optee.c b/drivers/firmware/arm_scmi/transports/optee.c
index 3949a877e17d..dc0f46340153 100644
--- a/drivers/firmware/arm_scmi/transports/optee.c
+++ b/drivers/firmware/arm_scmi/transports/optee.c
@@ -498,7 +498,7 @@ static void scmi_optee_mark_txdone(struct scmi_chan_info *cinfo, int ret,
mutex_unlock(&channel->mu);
}
-static struct scmi_transport_ops scmi_optee_ops = {
+static const struct scmi_transport_ops scmi_optee_ops = {
.chan_available = scmi_optee_chan_available,
.chan_setup = scmi_optee_chan_setup,
.chan_free = scmi_optee_chan_free,
diff --git a/drivers/firmware/arm_scmi/transports/virtio.c b/drivers/firmware/arm_scmi/transports/virtio.c
index cb934db9b2b4..326c4a93e44b 100644
--- a/drivers/firmware/arm_scmi/transports/virtio.c
+++ b/drivers/firmware/arm_scmi/transports/virtio.c
@@ -871,6 +871,9 @@ static int scmi_vio_probe(struct virtio_device *vdev)
/* Ensure initialized scmi_vdev is visible */
smp_store_mb(scmi_vdev, vdev);
+ /* Set device ready */
+ virtio_device_ready(vdev);
+
ret = platform_driver_register(&scmi_virtio_driver);
if (ret) {
vdev->priv = NULL;
diff --git a/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c
index a8915d3b4df5..700a3f24f4ef 100644
--- a/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c
+++ b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c
@@ -25,7 +25,10 @@
enum scmi_imx_misc_protocol_cmd {
SCMI_IMX_MISC_CTRL_SET = 0x3,
SCMI_IMX_MISC_CTRL_GET = 0x4,
+ SCMI_IMX_MISC_DISCOVER_BUILD_INFO = 0x6,
SCMI_IMX_MISC_CTRL_NOTIFY = 0x8,
+ SCMI_IMX_MISC_CFG_INFO_GET = 0xC,
+ SCMI_IMX_MISC_BOARD_INFO = 0xE,
};
struct scmi_imx_misc_info {
@@ -65,6 +68,27 @@ struct scmi_imx_misc_ctrl_get_out {
__le32 val[];
};
+struct scmi_imx_misc_buildinfo_out {
+ __le32 buildnum;
+ __le32 buildcommit;
+#define MISC_MAX_BUILDDATE 16
+ u8 builddate[MISC_MAX_BUILDDATE];
+#define MISC_MAX_BUILDTIME 16
+ u8 buildtime[MISC_MAX_BUILDTIME];
+};
+
+struct scmi_imx_misc_board_info_out {
+ __le32 attributes;
+#define MISC_MAX_BRDNAME 16
+ u8 brdname[MISC_MAX_BRDNAME];
+};
+
+struct scmi_imx_misc_cfg_info_out {
+ __le32 msel;
+#define MISC_MAX_CFGNAME 16
+ u8 cfgname[MISC_MAX_CFGNAME];
+};
+
static int scmi_imx_misc_attributes_get(const struct scmi_protocol_handle *ph,
struct scmi_imx_misc_info *mi)
{
@@ -272,6 +296,81 @@ static int scmi_imx_misc_ctrl_set(const struct scmi_protocol_handle *ph,
return ret;
}
+static int scmi_imx_misc_build_info_discover(const struct scmi_protocol_handle *ph)
+{
+ char date[MISC_MAX_BUILDDATE], time[MISC_MAX_BUILDTIME];
+ struct scmi_imx_misc_buildinfo_out *out;
+ struct scmi_xfer *t;
+ int ret;
+
+ ret = ph->xops->xfer_get_init(ph, SCMI_IMX_MISC_DISCOVER_BUILD_INFO, 0,
+ sizeof(*out), &t);
+ if (ret)
+ return ret;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ out = t->rx.buf;
+ strscpy(date, out->builddate, MISC_MAX_BUILDDATE);
+ strscpy(time, out->buildtime, MISC_MAX_BUILDTIME);
+ dev_info(ph->dev, "SM Version\t= Build %u, Commit %08x %s %s\n",
+ le32_to_cpu(out->buildnum), le32_to_cpu(out->buildcommit),
+ date, time);
+ }
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_imx_misc_board_info(const struct scmi_protocol_handle *ph)
+{
+ struct scmi_imx_misc_board_info_out *out;
+ char name[MISC_MAX_BRDNAME];
+ struct scmi_xfer *t;
+ int ret;
+
+ ret = ph->xops->xfer_get_init(ph, SCMI_IMX_MISC_BOARD_INFO, 0, sizeof(*out), &t);
+ if (ret)
+ return ret;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ out = t->rx.buf;
+ strscpy(name, out->brdname, MISC_MAX_BRDNAME);
+ dev_info(ph->dev, "Board\t\t= %s, attr=0x%08x\n",
+ name, le32_to_cpu(out->attributes));
+ }
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_imx_misc_cfg_info_get(const struct scmi_protocol_handle *ph)
+{
+ struct scmi_imx_misc_cfg_info_out *out;
+ char name[MISC_MAX_CFGNAME];
+ struct scmi_xfer *t;
+ int ret;
+
+ ret = ph->xops->xfer_get_init(ph, SCMI_IMX_MISC_CFG_INFO_GET, 0, sizeof(*out), &t);
+ if (ret)
+ return ret;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ out = t->rx.buf;
+ strscpy(name, out->cfgname, MISC_MAX_CFGNAME);
+ dev_info(ph->dev, "SM Config\t= %s, mSel = %u\n",
+ name, le32_to_cpu(out->msel));
+ }
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
static const struct scmi_imx_misc_proto_ops scmi_imx_misc_proto_ops = {
.misc_ctrl_set = scmi_imx_misc_ctrl_set,
.misc_ctrl_get = scmi_imx_misc_ctrl_get,
@@ -299,6 +398,18 @@ static int scmi_imx_misc_protocol_init(const struct scmi_protocol_handle *ph)
if (ret)
return ret;
+ ret = scmi_imx_misc_build_info_discover(ph);
+ if (ret && ret != -EOPNOTSUPP)
+ return ret;
+
+ ret = scmi_imx_misc_board_info(ph);
+ if (ret && ret != -EOPNOTSUPP)
+ return ret;
+
+ ret = scmi_imx_misc_cfg_info_get(ph);
+ if (ret && ret != -EOPNOTSUPP)
+ return ret;
+
return ph->set_priv(ph, minfo, version);
}
diff --git a/drivers/firmware/arm_scmi/vendors/imx/imx95.rst b/drivers/firmware/arm_scmi/vendors/imx/imx95.rst
index 4e246a78a042..741f4eace350 100644
--- a/drivers/firmware/arm_scmi/vendors/imx/imx95.rst
+++ b/drivers/firmware/arm_scmi/vendors/imx/imx95.rst
@@ -1660,6 +1660,7 @@ protocol_id: 0x84
|Name |Description |
+--------------------+---------------------------------------------------------+
|int32 status |SUCCESS: system log return |
+| |NOT_SUPPORTED: system log not available |
+--------------------+---------------------------------------------------------+
|uint32 numLogflags |Descriptor for the log data returned by this call. |
| |Bits[31:20] Number of remaining log words. |
@@ -1670,6 +1671,30 @@ protocol_id: 0x84
|uint32 syslog[N] |Log data array, N is defined in bits[11:0] of numLogflags|
+--------------------+---------------------------------------------------------+
+MISC_BOARD_INFO
+~~~~~~~~~~~~~~~
+
+message_id: 0xE
+protocol_id: 0x84
+
++--------------------+---------------------------------------------------------+
+|Return values |
++--------------------+---------------------------------------------------------+
+|Name |Description |
++--------------------+---------------------------------------------------------+
+|int32 status |SUCCESS: config name return |
+| |NOT_SUPPORTED: name not available |
++--------------------+---------------------------------------------------------+
+|uint32 attributes |Board-specific attributes reserved for future expansion |
+| |without breaking backwards compatibility. The firmware |
+| |sets the value to 0 |
++--------------------+---------------------------------------------------------+
+|uint8 boardname[16] |Board name. NULL terminated ASCII string, up to 16 bytes |
+| |in length. This is System Manager(SM) firmware-exported |
+| |board-name and may not align with the board name in the |
+| |device tree. |
++--------------------+---------------------------------------------------------+
+
NEGOTIATE_PROTOCOL_VERSION
~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/drivers/firmware/arm_scmi/voltage.c b/drivers/firmware/arm_scmi/voltage.c
index fda6a1573609..17127880e10a 100644
--- a/drivers/firmware/arm_scmi/voltage.c
+++ b/drivers/firmware/arm_scmi/voltage.c
@@ -393,7 +393,7 @@ static int scmi_voltage_domains_num_get(const struct scmi_protocol_handle *ph)
return vinfo->num_domains;
}
-static struct scmi_voltage_proto_ops voltage_proto_ops = {
+static const struct scmi_voltage_proto_ops voltage_proto_ops = {
.num_domains_get = scmi_voltage_domains_num_get,
.info_get = scmi_voltage_info_get,
.config_set = scmi_voltage_config_set,
diff --git a/drivers/firmware/broadcom/bcm47xx_sprom.c b/drivers/firmware/broadcom/bcm47xx_sprom.c
index 14fbcd11657c..fdcd3a07abcd 100644
--- a/drivers/firmware/broadcom/bcm47xx_sprom.c
+++ b/drivers/firmware/broadcom/bcm47xx_sprom.c
@@ -404,7 +404,7 @@ static void bcm47xx_sprom_fill_auto(struct ssb_sprom *sprom,
ENTRY(0x00000700, u8, pre, "noiselvl5gua1", noiselvl5gua[1], 0, fb);
ENTRY(0x00000700, u8, pre, "noiselvl5gua2", noiselvl5gua[2], 0, fb);
}
-#undef ENTRY /* It's specififc, uses local variable, don't use it (again). */
+#undef ENTRY /* It's specific, uses local variable, don't use it (again). */
static void bcm47xx_fill_sprom_path_r4589(struct ssb_sprom *sprom,
const char *prefix, bool fallback)
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index 16baa038d412..29e0729299f5 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -263,6 +263,15 @@ config EFI_COCO_SECRET
virt/coco/efi_secret module to access the secrets, which in turn
allows userspace programs to access the injected secrets.
+config OVMF_DEBUG_LOG
+ bool "Expose OVMF firmware debug log via sysfs"
+ depends on EFI
+ help
+ Recent versions of the Open Virtual Machine Firmware
+ (edk2-stable202508 + newer) can write their debug log to a memory
+ buffer. This driver exposes the log content via sysfs
+ (/sys/firmware/efi/ovmf_debug_log).
+
config UNACCEPTED_MEMORY
bool
depends on EFI_STUB
diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile
index a2d0009560d0..8efbcf699e4f 100644
--- a/drivers/firmware/efi/Makefile
+++ b/drivers/firmware/efi/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_APPLE_PROPERTIES) += apple-properties.o
obj-$(CONFIG_EFI_RCI2_TABLE) += rci2-table.o
obj-$(CONFIG_EFI_EMBEDDED_FIRMWARE) += embedded-firmware.o
obj-$(CONFIG_LOAD_UEFI_KEYS) += mokvar-table.o
+obj-$(CONFIG_OVMF_DEBUG_LOG) += ovmf-debug-log.o
obj-$(CONFIG_SYSFB) += sysfb_efi.o
diff --git a/drivers/firmware/efi/efi-init.c b/drivers/firmware/efi/efi-init.c
index a00e07b853f2..a65c2d5b9e7b 100644
--- a/drivers/firmware/efi/efi-init.c
+++ b/drivers/firmware/efi/efi-init.c
@@ -12,6 +12,7 @@
#include <linux/efi.h>
#include <linux/fwnode.h>
#include <linux/init.h>
+#include <linux/kexec_handover.h>
#include <linux/memblock.h>
#include <linux/mm_types.h>
#include <linux/of.h>
@@ -164,12 +165,32 @@ static __init void reserve_regions(void)
pr_info("Processing EFI memory map:\n");
/*
- * Discard memblocks discovered so far: if there are any at this
- * point, they originate from memory nodes in the DT, and UEFI
- * uses its own memory map instead.
+ * Discard memblocks discovered so far except for KHO scratch
+ * regions. Most memblocks at this point originate from memory nodes
+ * in the DT and UEFI uses its own memory map instead. However, if
+ * KHO is enabled, scratch regions, which are good known memory
+ * must be preserved.
*/
memblock_dump_all();
- memblock_remove(0, PHYS_ADDR_MAX);
+
+ if (is_kho_boot()) {
+ struct memblock_region *r;
+
+ /* Remove all non-KHO regions */
+ for_each_mem_region(r) {
+ if (!memblock_is_kho_scratch(r)) {
+ memblock_remove(r->base, r->size);
+ r--;
+ }
+ }
+ } else {
+ /*
+ * KHO is disabled. Discard memblocks discovered so far:
+ * if there are any at this point, they originate from memory
+ * nodes in the DT, and UEFI uses its own memory map instead.
+ */
+ memblock_remove(0, PHYS_ADDR_MAX);
+ }
for_each_efi_memory_desc(md) {
paddr = md->phys_addr;
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index e57bff702b5f..1ce428e2ac8a 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -45,6 +45,7 @@ struct efi __read_mostly efi = {
.esrt = EFI_INVALID_TABLE_ADDR,
.tpm_log = EFI_INVALID_TABLE_ADDR,
.tpm_final_log = EFI_INVALID_TABLE_ADDR,
+ .ovmf_debug_log = EFI_INVALID_TABLE_ADDR,
#ifdef CONFIG_LOAD_UEFI_KEYS
.mokvar_table = EFI_INVALID_TABLE_ADDR,
#endif
@@ -473,6 +474,10 @@ static int __init efisubsys_init(void)
platform_device_register_simple("efi_secret", 0, NULL, 0);
#endif
+ if (IS_ENABLED(CONFIG_OVMF_DEBUG_LOG) &&
+ efi.ovmf_debug_log != EFI_INVALID_TABLE_ADDR)
+ ovmf_log_probe(efi.ovmf_debug_log);
+
return 0;
err_remove_group:
@@ -617,6 +622,9 @@ static const efi_config_table_type_t common_tables[] __initconst = {
{LINUX_EFI_MEMRESERVE_TABLE_GUID, &mem_reserve, "MEMRESERVE" },
{LINUX_EFI_INITRD_MEDIA_GUID, &initrd, "INITRD" },
{EFI_RT_PROPERTIES_TABLE_GUID, &rt_prop, "RTPROP" },
+#ifdef CONFIG_OVMF_DEBUG_LOG
+ {OVMF_MEMORY_LOG_TABLE_GUID, &efi.ovmf_debug_log, "OvmfDebugLog" },
+#endif
#ifdef CONFIG_EFI_RCI2_TABLE
{DELLEMC_EFI_RCI2_TABLE_GUID, &rci2_table_phys },
#endif
diff --git a/drivers/firmware/efi/libstub/printk.c b/drivers/firmware/efi/libstub/printk.c
index 3a67a2cea7bd..bc599212c05d 100644
--- a/drivers/firmware/efi/libstub/printk.c
+++ b/drivers/firmware/efi/libstub/printk.c
@@ -5,13 +5,13 @@
#include <linux/ctype.h>
#include <linux/efi.h>
#include <linux/kernel.h>
-#include <linux/printk.h> /* For CONSOLE_LOGLEVEL_* */
+#include <linux/kern_levels.h>
#include <asm/efi.h>
#include <asm/setup.h>
#include "efistub.h"
-int efi_loglevel = CONSOLE_LOGLEVEL_DEFAULT;
+int efi_loglevel = LOGLEVEL_NOTICE;
/**
* efi_char16_puts() - Write a UCS-2 encoded string to the console
diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
index cafc90d4caaf..761121a77f9e 100644
--- a/drivers/firmware/efi/libstub/x86-stub.c
+++ b/drivers/firmware/efi/libstub/x86-stub.c
@@ -300,7 +300,7 @@ efi_status_t efi_adjust_memory_range_protection(unsigned long start,
return EFI_SUCCESS;
/*
- * Don't modify memory region attributes, they are
+ * Don't modify memory region attributes, if they are
* already suitable, to lower the possibility to
* encounter firmware bugs.
*/
@@ -315,11 +315,13 @@ efi_status_t efi_adjust_memory_range_protection(unsigned long start,
next = desc.base_address + desc.length;
/*
- * Only system memory is suitable for trampoline/kernel image placement,
- * so only this type of memory needs its attributes to be modified.
+ * Only system memory and more reliable memory are suitable for
+ * trampoline/kernel image placement. So only those memory types
+ * may need to have attributes modified.
*/
- if (desc.gcd_memory_type != EfiGcdMemoryTypeSystemMemory ||
+ if ((desc.gcd_memory_type != EfiGcdMemoryTypeSystemMemory &&
+ desc.gcd_memory_type != EfiGcdMemoryTypeMoreReliable) ||
(desc.attributes & (EFI_MEMORY_RO | EFI_MEMORY_XP)) == 0)
continue;
@@ -788,7 +790,9 @@ static efi_status_t efi_decompress_kernel(unsigned long *kernel_entry,
*kernel_entry = addr + entry;
- return efi_adjust_memory_range_protection(addr, kernel_text_size);
+ return efi_adjust_memory_range_protection(addr, kernel_text_size) ?:
+ efi_adjust_memory_range_protection(addr + kernel_inittext_offset,
+ kernel_inittext_size);
}
static void __noreturn enter_kernel(unsigned long kernel_addr,
diff --git a/drivers/firmware/efi/ovmf-debug-log.c b/drivers/firmware/efi/ovmf-debug-log.c
new file mode 100644
index 000000000000..5b2471ffaeed
--- /dev/null
+++ b/drivers/firmware/efi/ovmf-debug-log.c
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/efi.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/kobject.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sysfs.h>
+
+#define OVMF_DEBUG_LOG_MAGIC1 0x3167646d666d766f // "ovmfmdg1"
+#define OVMF_DEBUG_LOG_MAGIC2 0x3267646d666d766f // "ovmfmdg2"
+
+struct ovmf_debug_log_header {
+ u64 magic1;
+ u64 magic2;
+ u64 hdr_size;
+ u64 log_size;
+ u64 lock; // edk2 spinlock
+ u64 head_off;
+ u64 tail_off;
+ u64 truncated;
+ u8 fw_version[128];
+};
+
+static struct ovmf_debug_log_header *hdr;
+static u8 *logbuf;
+static u64 logbufsize;
+
+static ssize_t ovmf_log_read(struct file *filp, struct kobject *kobj,
+ const struct bin_attribute *attr, char *buf,
+ loff_t offset, size_t count)
+{
+ u64 start, end;
+
+ start = hdr->head_off + offset;
+ if (hdr->head_off > hdr->tail_off && start >= hdr->log_size)
+ start -= hdr->log_size;
+
+ end = start + count;
+ if (start > hdr->tail_off) {
+ if (end > hdr->log_size)
+ end = hdr->log_size;
+ } else {
+ if (end > hdr->tail_off)
+ end = hdr->tail_off;
+ }
+
+ if (start > logbufsize || end > logbufsize)
+ return 0;
+ if (start >= end)
+ return 0;
+
+ memcpy(buf, logbuf + start, end - start);
+ return end - start;
+}
+
+static struct bin_attribute ovmf_log_bin_attr = {
+ .attr = {
+ .name = "ovmf_debug_log",
+ .mode = 0444,
+ },
+ .read = ovmf_log_read,
+};
+
+int __init ovmf_log_probe(unsigned long ovmf_debug_log_table)
+{
+ int ret = -EINVAL;
+ u64 size;
+
+ /* map + verify header */
+ hdr = memremap(ovmf_debug_log_table, sizeof(*hdr), MEMREMAP_WB);
+ if (!hdr) {
+ pr_err("OVMF debug log: header map failed\n");
+ return -EINVAL;
+ }
+
+ if (hdr->magic1 != OVMF_DEBUG_LOG_MAGIC1 ||
+ hdr->magic2 != OVMF_DEBUG_LOG_MAGIC2) {
+ printk(KERN_ERR "OVMF debug log: magic mismatch\n");
+ goto err_unmap;
+ }
+
+ size = hdr->hdr_size + hdr->log_size;
+ pr_info("OVMF debug log: firmware version: \"%s\"\n", hdr->fw_version);
+ pr_info("OVMF debug log: buffer size: %lluk\n", size / 1024);
+
+ /* map complete log buffer */
+ memunmap(hdr);
+ hdr = memremap(ovmf_debug_log_table, size, MEMREMAP_WB);
+ if (!hdr) {
+ pr_err("OVMF debug log: buffer map failed\n");
+ return -EINVAL;
+ }
+ logbuf = (void *)hdr + hdr->hdr_size;
+ logbufsize = hdr->log_size;
+
+ ovmf_log_bin_attr.size = size;
+ ret = sysfs_create_bin_file(efi_kobj, &ovmf_log_bin_attr);
+ if (ret != 0) {
+ pr_err("OVMF debug log: sysfs register failed\n");
+ goto err_unmap;
+ }
+
+ return 0;
+
+err_unmap:
+ memunmap(hdr);
+ return ret;
+}
diff --git a/drivers/firmware/efi/stmm/tee_stmm_efi.c b/drivers/firmware/efi/stmm/tee_stmm_efi.c
index f741ca279052..65c0fe1ba275 100644
--- a/drivers/firmware/efi/stmm/tee_stmm_efi.c
+++ b/drivers/firmware/efi/stmm/tee_stmm_efi.c
@@ -143,6 +143,10 @@ static efi_status_t mm_communicate(u8 *comm_buf, size_t payload_size)
return var_hdr->ret_status;
}
+#define COMM_BUF_SIZE(__payload_size) (MM_COMMUNICATE_HEADER_SIZE + \
+ MM_VARIABLE_COMMUNICATE_SIZE + \
+ (__payload_size))
+
/**
* setup_mm_hdr() - Allocate a buffer for StandAloneMM and initialize the
* header data.
@@ -150,11 +154,9 @@ static efi_status_t mm_communicate(u8 *comm_buf, size_t payload_size)
* @dptr: pointer address to store allocated buffer
* @payload_size: payload size
* @func: standAloneMM function number
- * @ret: EFI return code
* Return: pointer to corresponding StandAloneMM function buffer or NULL
*/
-static void *setup_mm_hdr(u8 **dptr, size_t payload_size, size_t func,
- efi_status_t *ret)
+static void *setup_mm_hdr(u8 **dptr, size_t payload_size, size_t func)
{
const efi_guid_t mm_var_guid = EFI_MM_VARIABLE_GUID;
struct efi_mm_communicate_header *mm_hdr;
@@ -169,17 +171,13 @@ static void *setup_mm_hdr(u8 **dptr, size_t payload_size, size_t func,
if (max_buffer_size &&
max_buffer_size < (MM_COMMUNICATE_HEADER_SIZE +
MM_VARIABLE_COMMUNICATE_SIZE + payload_size)) {
- *ret = EFI_INVALID_PARAMETER;
return NULL;
}
- comm_buf = kzalloc(MM_COMMUNICATE_HEADER_SIZE +
- MM_VARIABLE_COMMUNICATE_SIZE + payload_size,
- GFP_KERNEL);
- if (!comm_buf) {
- *ret = EFI_OUT_OF_RESOURCES;
+ comm_buf = alloc_pages_exact(COMM_BUF_SIZE(payload_size),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!comm_buf)
return NULL;
- }
mm_hdr = (struct efi_mm_communicate_header *)comm_buf;
memcpy(&mm_hdr->header_guid, &mm_var_guid, sizeof(mm_hdr->header_guid));
@@ -187,9 +185,7 @@ static void *setup_mm_hdr(u8 **dptr, size_t payload_size, size_t func,
var_hdr = (struct smm_variable_communicate_header *)mm_hdr->data;
var_hdr->function = func;
- if (dptr)
- *dptr = comm_buf;
- *ret = EFI_SUCCESS;
+ *dptr = comm_buf;
return var_hdr->data;
}
@@ -212,10 +208,9 @@ static efi_status_t get_max_payload(size_t *size)
payload_size = sizeof(*var_payload);
var_payload = setup_mm_hdr(&comm_buf, payload_size,
- SMM_VARIABLE_FUNCTION_GET_PAYLOAD_SIZE,
- &ret);
+ SMM_VARIABLE_FUNCTION_GET_PAYLOAD_SIZE);
if (!var_payload)
- return EFI_OUT_OF_RESOURCES;
+ return EFI_DEVICE_ERROR;
ret = mm_communicate(comm_buf, payload_size);
if (ret != EFI_SUCCESS)
@@ -239,7 +234,7 @@ static efi_status_t get_max_payload(size_t *size)
*/
*size -= 2;
out:
- kfree(comm_buf);
+ free_pages_exact(comm_buf, COMM_BUF_SIZE(payload_size));
return ret;
}
@@ -259,9 +254,9 @@ static efi_status_t get_property_int(u16 *name, size_t name_size,
smm_property = setup_mm_hdr(
&comm_buf, payload_size,
- SMM_VARIABLE_FUNCTION_VAR_CHECK_VARIABLE_PROPERTY_GET, &ret);
+ SMM_VARIABLE_FUNCTION_VAR_CHECK_VARIABLE_PROPERTY_GET);
if (!smm_property)
- return EFI_OUT_OF_RESOURCES;
+ return EFI_DEVICE_ERROR;
memcpy(&smm_property->guid, vendor, sizeof(smm_property->guid));
smm_property->name_size = name_size;
@@ -282,7 +277,7 @@ static efi_status_t get_property_int(u16 *name, size_t name_size,
memcpy(var_property, &smm_property->property, sizeof(*var_property));
out:
- kfree(comm_buf);
+ free_pages_exact(comm_buf, COMM_BUF_SIZE(payload_size));
return ret;
}
@@ -315,9 +310,9 @@ static efi_status_t tee_get_variable(u16 *name, efi_guid_t *vendor,
payload_size = MM_VARIABLE_ACCESS_HEADER_SIZE + name_size + tmp_dsize;
var_acc = setup_mm_hdr(&comm_buf, payload_size,
- SMM_VARIABLE_FUNCTION_GET_VARIABLE, &ret);
+ SMM_VARIABLE_FUNCTION_GET_VARIABLE);
if (!var_acc)
- return EFI_OUT_OF_RESOURCES;
+ return EFI_DEVICE_ERROR;
/* Fill in contents */
memcpy(&var_acc->guid, vendor, sizeof(var_acc->guid));
@@ -347,7 +342,7 @@ static efi_status_t tee_get_variable(u16 *name, efi_guid_t *vendor,
memcpy(data, (u8 *)var_acc->name + var_acc->name_size,
var_acc->data_size);
out:
- kfree(comm_buf);
+ free_pages_exact(comm_buf, COMM_BUF_SIZE(payload_size));
return ret;
}
@@ -380,10 +375,9 @@ static efi_status_t tee_get_next_variable(unsigned long *name_size,
payload_size = MM_VARIABLE_GET_NEXT_HEADER_SIZE + out_name_size;
var_getnext = setup_mm_hdr(&comm_buf, payload_size,
- SMM_VARIABLE_FUNCTION_GET_NEXT_VARIABLE_NAME,
- &ret);
+ SMM_VARIABLE_FUNCTION_GET_NEXT_VARIABLE_NAME);
if (!var_getnext)
- return EFI_OUT_OF_RESOURCES;
+ return EFI_DEVICE_ERROR;
/* Fill in contents */
memcpy(&var_getnext->guid, guid, sizeof(var_getnext->guid));
@@ -404,7 +398,7 @@ static efi_status_t tee_get_next_variable(unsigned long *name_size,
memcpy(name, var_getnext->name, var_getnext->name_size);
out:
- kfree(comm_buf);
+ free_pages_exact(comm_buf, COMM_BUF_SIZE(payload_size));
return ret;
}
@@ -437,9 +431,9 @@ static efi_status_t tee_set_variable(efi_char16_t *name, efi_guid_t *vendor,
* the properties, if the allocation fails
*/
var_acc = setup_mm_hdr(&comm_buf, payload_size,
- SMM_VARIABLE_FUNCTION_SET_VARIABLE, &ret);
+ SMM_VARIABLE_FUNCTION_SET_VARIABLE);
if (!var_acc)
- return EFI_OUT_OF_RESOURCES;
+ return EFI_DEVICE_ERROR;
/*
* The API has the ability to override RO flags. If no RO check was
@@ -467,7 +461,7 @@ static efi_status_t tee_set_variable(efi_char16_t *name, efi_guid_t *vendor,
ret = mm_communicate(comm_buf, payload_size);
dev_dbg(pvt_data.dev, "Set Variable %s %d %lx\n", __FILE__, __LINE__, ret);
out:
- kfree(comm_buf);
+ free_pages_exact(comm_buf, COMM_BUF_SIZE(payload_size));
return ret;
}
@@ -492,10 +486,9 @@ static efi_status_t tee_query_variable_info(u32 attributes,
payload_size = sizeof(*mm_query_info);
mm_query_info = setup_mm_hdr(&comm_buf, payload_size,
- SMM_VARIABLE_FUNCTION_QUERY_VARIABLE_INFO,
- &ret);
+ SMM_VARIABLE_FUNCTION_QUERY_VARIABLE_INFO);
if (!mm_query_info)
- return EFI_OUT_OF_RESOURCES;
+ return EFI_DEVICE_ERROR;
mm_query_info->attr = attributes;
ret = mm_communicate(comm_buf, payload_size);
@@ -507,7 +500,7 @@ static efi_status_t tee_query_variable_info(u32 attributes,
*max_variable_size = mm_query_info->max_variable_size;
out:
- kfree(comm_buf);
+ free_pages_exact(comm_buf, COMM_BUF_SIZE(payload_size));
return ret;
}
diff --git a/drivers/firmware/meson/Kconfig b/drivers/firmware/meson/Kconfig
index f2fdd3756648..179f5d46d8dd 100644
--- a/drivers/firmware/meson/Kconfig
+++ b/drivers/firmware/meson/Kconfig
@@ -5,7 +5,7 @@
config MESON_SM
tristate "Amlogic Secure Monitor driver"
depends on ARCH_MESON || COMPILE_TEST
- default y
+ default ARCH_MESON
depends on ARM64_4K_PAGES
help
Say y here to enable the Amlogic secure monitor driver
diff --git a/drivers/firmware/meson/meson_sm.c b/drivers/firmware/meson/meson_sm.c
index f25a9746249b..3ab67aaa9e5d 100644
--- a/drivers/firmware/meson/meson_sm.c
+++ b/drivers/firmware/meson/meson_sm.c
@@ -232,11 +232,16 @@ EXPORT_SYMBOL(meson_sm_call_write);
struct meson_sm_firmware *meson_sm_get(struct device_node *sm_node)
{
struct platform_device *pdev = of_find_device_by_node(sm_node);
+ struct meson_sm_firmware *fw;
if (!pdev)
return NULL;
- return platform_get_drvdata(pdev);
+ fw = platform_get_drvdata(pdev);
+
+ put_device(&pdev->dev);
+
+ return fw;
}
EXPORT_SYMBOL_GPL(meson_sm_get);
diff --git a/drivers/firmware/qcom/qcom_scm.c b/drivers/firmware/qcom/qcom_scm.c
index 26cd0458aacd..e777b7cb9b12 100644
--- a/drivers/firmware/qcom/qcom_scm.c
+++ b/drivers/firmware/qcom/qcom_scm.c
@@ -1119,7 +1119,7 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
if (ret) {
dev_err(__scm->dev,
"Assign memory protection call failed %d\n", ret);
- return -EINVAL;
+ return ret;
}
*srcvm = next_vm;
@@ -1994,11 +1994,14 @@ static const struct of_device_id qcom_scm_qseecom_allowlist[] __maybe_unused = {
{ .compatible = "asus,vivobook-s15" },
{ .compatible = "asus,zenbook-a14-ux3407qa" },
{ .compatible = "asus,zenbook-a14-ux3407ra" },
+ { .compatible = "dell,inspiron-14-plus-7441" },
+ { .compatible = "dell,latitude-7455" },
{ .compatible = "dell,xps13-9345" },
{ .compatible = "hp,elitebook-ultra-g1q" },
{ .compatible = "hp,omnibook-x14" },
{ .compatible = "huawei,gaokun3" },
{ .compatible = "lenovo,flex-5g" },
+ { .compatible = "lenovo,thinkbook-16" },
{ .compatible = "lenovo,thinkpad-t14s" },
{ .compatible = "lenovo,thinkpad-x13s", },
{ .compatible = "lenovo,yoga-slim7x" },
@@ -2006,6 +2009,7 @@ static const struct of_device_id qcom_scm_qseecom_allowlist[] __maybe_unused = {
{ .compatible = "microsoft,blackrock" },
{ .compatible = "microsoft,romulus13", },
{ .compatible = "microsoft,romulus15", },
+ { .compatible = "qcom,hamoa-iot-evk" },
{ .compatible = "qcom,sc8180x-primus" },
{ .compatible = "qcom,x1e001de-devkit" },
{ .compatible = "qcom,x1e80100-crd" },
@@ -2094,6 +2098,122 @@ static int qcom_scm_qseecom_init(struct qcom_scm *scm)
#endif /* CONFIG_QCOM_QSEECOM */
/**
+ * qcom_scm_qtee_invoke_smc() - Invoke a QTEE object.
+ * @inbuf: start address of memory area used for inbound buffer.
+ * @inbuf_size: size of the memory area used for inbound buffer.
+ * @outbuf: start address of memory area used for outbound buffer.
+ * @outbuf_size: size of the memory area used for outbound buffer.
+ * @result: result of QTEE object invocation.
+ * @response_type: response type returned by QTEE.
+ *
+ * @response_type determines how the contents of @inbuf and @outbuf
+ * should be processed.
+ *
+ * Return: On success, return 0 or <0 on failure.
+ */
+int qcom_scm_qtee_invoke_smc(phys_addr_t inbuf, size_t inbuf_size,
+ phys_addr_t outbuf, size_t outbuf_size,
+ u64 *result, u64 *response_type)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_SMCINVOKE,
+ .cmd = QCOM_SCM_SMCINVOKE_INVOKE,
+ .owner = ARM_SMCCC_OWNER_TRUSTED_OS,
+ .args[0] = inbuf,
+ .args[1] = inbuf_size,
+ .args[2] = outbuf,
+ .args[3] = outbuf_size,
+ .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_RW, QCOM_SCM_VAL,
+ QCOM_SCM_RW, QCOM_SCM_VAL),
+ };
+ struct qcom_scm_res res;
+ int ret;
+
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
+ if (ret)
+ return ret;
+
+ if (response_type)
+ *response_type = res.result[0];
+
+ if (result)
+ *result = res.result[1];
+
+ return 0;
+}
+EXPORT_SYMBOL(qcom_scm_qtee_invoke_smc);
+
+/**
+ * qcom_scm_qtee_callback_response() - Submit response for callback request.
+ * @buf: start address of memory area used for outbound buffer.
+ * @buf_size: size of the memory area used for outbound buffer.
+ * @result: Result of QTEE object invocation.
+ * @response_type: Response type returned by QTEE.
+ *
+ * @response_type determines how the contents of @buf should be processed.
+ *
+ * Return: On success, return 0 or <0 on failure.
+ */
+int qcom_scm_qtee_callback_response(phys_addr_t buf, size_t buf_size,
+ u64 *result, u64 *response_type)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_SMCINVOKE,
+ .cmd = QCOM_SCM_SMCINVOKE_CB_RSP,
+ .owner = ARM_SMCCC_OWNER_TRUSTED_OS,
+ .args[0] = buf,
+ .args[1] = buf_size,
+ .arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_RW, QCOM_SCM_VAL),
+ };
+ struct qcom_scm_res res;
+ int ret;
+
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
+ if (ret)
+ return ret;
+
+ if (response_type)
+ *response_type = res.result[0];
+
+ if (result)
+ *result = res.result[1];
+
+ return 0;
+}
+EXPORT_SYMBOL(qcom_scm_qtee_callback_response);
+
+static void qcom_scm_qtee_free(void *data)
+{
+ struct platform_device *qtee_dev = data;
+
+ platform_device_unregister(qtee_dev);
+}
+
+static void qcom_scm_qtee_init(struct qcom_scm *scm)
+{
+ struct platform_device *qtee_dev;
+ u64 result, response_type;
+ int ret;
+
+ /*
+ * Probe for smcinvoke support. This will fail due to invalid buffers,
+ * but first, it checks whether the call is supported in QTEE syscall
+ * handler. If it is not supported, -EIO is returned.
+ */
+ ret = qcom_scm_qtee_invoke_smc(0, 0, 0, 0, &result, &response_type);
+ if (ret == -EIO)
+ return;
+
+ /* Setup QTEE interface device. */
+ qtee_dev = platform_device_register_data(scm->dev, "qcomtee",
+ PLATFORM_DEVID_NONE, NULL, 0);
+ if (IS_ERR(qtee_dev))
+ return;
+
+ devm_add_action_or_reset(scm->dev, qcom_scm_qtee_free, qtee_dev);
+}
+
+/**
* qcom_scm_is_available() - Checks if SCM is available
*/
bool qcom_scm_is_available(void)
@@ -2325,6 +2445,9 @@ static int qcom_scm_probe(struct platform_device *pdev)
ret = qcom_scm_qseecom_init(scm);
WARN(ret < 0, "failed to initialize qseecom: %d\n", ret);
+ /* Initialize the QTEE object interface. */
+ qcom_scm_qtee_init(scm);
+
return 0;
}
diff --git a/drivers/firmware/qcom/qcom_scm.h b/drivers/firmware/qcom/qcom_scm.h
index 0e8dd838099e..a56c8212cc0c 100644
--- a/drivers/firmware/qcom/qcom_scm.h
+++ b/drivers/firmware/qcom/qcom_scm.h
@@ -156,6 +156,13 @@ int qcom_scm_shm_bridge_enable(struct device *scm_dev);
#define QCOM_SCM_SVC_GPU 0x28
#define QCOM_SCM_SVC_GPU_INIT_REGS 0x01
+/* ARM_SMCCC_OWNER_TRUSTED_OS calls */
+
+#define QCOM_SCM_SVC_SMCINVOKE 0x06
+#define QCOM_SCM_SMCINVOKE_INVOKE_LEGACY 0x00
+#define QCOM_SCM_SMCINVOKE_CB_RSP 0x01
+#define QCOM_SCM_SMCINVOKE_INVOKE 0x02
+
/* common error codes */
#define QCOM_SCM_V2_EBUSY -12
#define QCOM_SCM_ENOMEM -5
diff --git a/drivers/firmware/qcom/qcom_tzmem.c b/drivers/firmware/qcom/qcom_tzmem.c
index ea0a35355657..9f232e53115e 100644
--- a/drivers/firmware/qcom/qcom_tzmem.c
+++ b/drivers/firmware/qcom/qcom_tzmem.c
@@ -77,6 +77,7 @@ static bool qcom_tzmem_using_shm_bridge;
/* List of machines that are known to not support SHM bridge correctly. */
static const char *const qcom_tzmem_blacklist[] = {
+ "qcom,sc7180", /* hang in rmtfs memory assignment */
"qcom,sc8180x",
"qcom,sdm670", /* failure in GPU firmware loading */
"qcom,sdm845", /* reset in rmtfs memory assignment */
@@ -109,7 +110,19 @@ notsupp:
return 0;
}
-static int qcom_tzmem_init_area(struct qcom_tzmem_area *area)
+/**
+ * qcom_tzmem_shm_bridge_create() - Create a SHM bridge.
+ * @paddr: Physical address of the memory to share.
+ * @size: Size of the memory to share.
+ * @handle: Handle to the SHM bridge.
+ *
+ * On platforms that support SHM bridge, this function creates a SHM bridge
+ * for the given memory region with QTEE. The handle returned by this function
+ * must be passed to qcom_tzmem_shm_bridge_delete() to free the SHM bridge.
+ *
+ * Return: On success, returns 0; on failure, returns < 0.
+ */
+int qcom_tzmem_shm_bridge_create(phys_addr_t paddr, size_t size, u64 *handle)
{
u64 pfn_and_ns_perm, ipfn_and_s_perm, size_and_flags;
int ret;
@@ -117,17 +130,49 @@ static int qcom_tzmem_init_area(struct qcom_tzmem_area *area)
if (!qcom_tzmem_using_shm_bridge)
return 0;
- pfn_and_ns_perm = (u64)area->paddr | QCOM_SCM_PERM_RW;
- ipfn_and_s_perm = (u64)area->paddr | QCOM_SCM_PERM_RW;
- size_and_flags = area->size | (1 << QCOM_SHM_BRIDGE_NUM_VM_SHIFT);
+ pfn_and_ns_perm = paddr | QCOM_SCM_PERM_RW;
+ ipfn_and_s_perm = paddr | QCOM_SCM_PERM_RW;
+ size_and_flags = size | (1 << QCOM_SHM_BRIDGE_NUM_VM_SHIFT);
+
+ ret = qcom_scm_shm_bridge_create(pfn_and_ns_perm, ipfn_and_s_perm,
+ size_and_flags, QCOM_SCM_VMID_HLOS,
+ handle);
+ if (ret) {
+ dev_err(qcom_tzmem_dev,
+ "SHM Bridge failed: ret %d paddr 0x%pa, size %zu\n",
+ ret, &paddr, size);
+
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qcom_tzmem_shm_bridge_create);
+
+/**
+ * qcom_tzmem_shm_bridge_delete() - Delete a SHM bridge.
+ * @handle: Handle to the SHM bridge.
+ *
+ * On platforms that support SHM bridge, this function deletes the SHM bridge
+ * for the given memory region. The handle must be the same as the one
+ * returned by qcom_tzmem_shm_bridge_create().
+ */
+void qcom_tzmem_shm_bridge_delete(u64 handle)
+{
+ if (qcom_tzmem_using_shm_bridge)
+ qcom_scm_shm_bridge_delete(handle);
+}
+EXPORT_SYMBOL_GPL(qcom_tzmem_shm_bridge_delete);
+
+static int qcom_tzmem_init_area(struct qcom_tzmem_area *area)
+{
+ int ret;
u64 *handle __free(kfree) = kzalloc(sizeof(*handle), GFP_KERNEL);
if (!handle)
return -ENOMEM;
- ret = qcom_scm_shm_bridge_create(pfn_and_ns_perm, ipfn_and_s_perm,
- size_and_flags, QCOM_SCM_VMID_HLOS,
- handle);
+ ret = qcom_tzmem_shm_bridge_create(area->paddr, area->size, handle);
if (ret)
return ret;
@@ -140,10 +185,7 @@ static void qcom_tzmem_cleanup_area(struct qcom_tzmem_area *area)
{
u64 *handle = area->priv;
- if (!qcom_tzmem_using_shm_bridge)
- return;
-
- qcom_scm_shm_bridge_delete(*handle);
+ qcom_tzmem_shm_bridge_delete(*handle);
kfree(handle);
}
diff --git a/drivers/firmware/samsung/exynos-acpm-pmic.c b/drivers/firmware/samsung/exynos-acpm-pmic.c
index 39b33a356ebd..961d7599e422 100644
--- a/drivers/firmware/samsung/exynos-acpm-pmic.c
+++ b/drivers/firmware/samsung/exynos-acpm-pmic.c
@@ -4,7 +4,9 @@
* Copyright 2020 Google LLC.
* Copyright 2024 Linaro Ltd.
*/
+#include <linux/array_size.h>
#include <linux/bitfield.h>
+#include <linux/errno.h>
#include <linux/firmware/samsung/exynos-acpm-protocol.h>
#include <linux/ktime.h>
#include <linux/types.h>
@@ -33,6 +35,19 @@ enum exynos_acpm_pmic_func {
ACPM_PMIC_BULK_WRITE,
};
+static const int acpm_pmic_linux_errmap[] = {
+ [0] = 0, /* ACPM_PMIC_SUCCESS */
+ [1] = -EACCES, /* Read register can't be accessed or issues to access it. */
+ [2] = -EACCES, /* Write register can't be accessed or issues to access it. */
+};
+
+static int acpm_pmic_to_linux_err(int err)
+{
+ if (err >= 0 && err < ARRAY_SIZE(acpm_pmic_linux_errmap))
+ return acpm_pmic_linux_errmap[err];
+ return -EIO;
+}
+
static inline u32 acpm_pmic_set_bulk(u32 data, unsigned int i)
{
return (data & ACPM_PMIC_BULK_MASK) << (ACPM_PMIC_BULK_SHIFT * i);
@@ -79,7 +94,7 @@ int acpm_pmic_read_reg(const struct acpm_handle *handle,
*buf = FIELD_GET(ACPM_PMIC_VALUE, xfer.rxd[1]);
- return FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]);
+ return acpm_pmic_to_linux_err(FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]));
}
static void acpm_pmic_init_bulk_read_cmd(u32 cmd[4], u8 type, u8 reg, u8 chan,
@@ -110,7 +125,7 @@ int acpm_pmic_bulk_read(const struct acpm_handle *handle,
if (ret)
return ret;
- ret = FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]);
+ ret = acpm_pmic_to_linux_err(FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]));
if (ret)
return ret;
@@ -150,7 +165,7 @@ int acpm_pmic_write_reg(const struct acpm_handle *handle,
if (ret)
return ret;
- return FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]);
+ return acpm_pmic_to_linux_err(FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]));
}
static void acpm_pmic_init_bulk_write_cmd(u32 cmd[4], u8 type, u8 reg, u8 chan,
@@ -190,7 +205,7 @@ int acpm_pmic_bulk_write(const struct acpm_handle *handle,
if (ret)
return ret;
- return FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]);
+ return acpm_pmic_to_linux_err(FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]));
}
static void acpm_pmic_init_update_cmd(u32 cmd[4], u8 type, u8 reg, u8 chan,
@@ -220,5 +235,5 @@ int acpm_pmic_update_reg(const struct acpm_handle *handle,
if (ret)
return ret;
- return FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]);
+ return acpm_pmic_to_linux_err(FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]));
}
diff --git a/drivers/firmware/tegra/bpmp-tegra186.c b/drivers/firmware/tegra/bpmp-tegra186.c
index 7cfc5fdfa49d..64863db7a715 100644
--- a/drivers/firmware/tegra/bpmp-tegra186.c
+++ b/drivers/firmware/tegra/bpmp-tegra186.c
@@ -198,7 +198,10 @@ static int tegra186_bpmp_dram_init(struct tegra_bpmp *bpmp)
err = of_reserved_mem_region_to_resource(bpmp->dev->of_node, 0, &res);
if (err < 0) {
- dev_warn(bpmp->dev, "failed to parse memory region: %d\n", err);
+ if (err != -ENODEV)
+ dev_warn(bpmp->dev,
+ "failed to parse memory region: %d\n", err);
+
return err;
}
diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
index ae5fd1936ad3..49fd2ae01055 100644
--- a/drivers/firmware/ti_sci.c
+++ b/drivers/firmware/ti_sci.c
@@ -2015,6 +2015,47 @@ fail:
return ret;
}
+/**
+ * ti_sci_cmd_lpm_abort() - Abort entry to LPM by clearing selection of LPM to enter
+ * @dev: Device pointer corresponding to the SCI entity
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_lpm_abort(struct device *dev)
+{
+ struct ti_sci_info *info = dev_get_drvdata(dev);
+ struct ti_sci_msg_hdr *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ int ret = 0;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_LPM_ABORT,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+ if (!ti_sci_is_response_ack(resp))
+ ret = -ENODEV;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
static int ti_sci_cmd_core_reboot(const struct ti_sci_handle *handle)
{
struct ti_sci_info *info;
@@ -3739,11 +3780,22 @@ static int __maybe_unused ti_sci_resume_noirq(struct device *dev)
return 0;
}
+static void __maybe_unused ti_sci_pm_complete(struct device *dev)
+{
+ struct ti_sci_info *info = dev_get_drvdata(dev);
+
+ if (info->fw_caps & MSG_FLAG_CAPS_LPM_ABORT) {
+ if (ti_sci_cmd_lpm_abort(dev))
+ dev_err(dev, "LPM clear selection failed.\n");
+ }
+}
+
static const struct dev_pm_ops ti_sci_pm_ops = {
#ifdef CONFIG_PM_SLEEP
.suspend = ti_sci_suspend,
.suspend_noirq = ti_sci_suspend_noirq,
.resume_noirq = ti_sci_resume_noirq,
+ .complete = ti_sci_pm_complete,
#endif
};
@@ -3876,10 +3928,11 @@ static int ti_sci_probe(struct platform_device *pdev)
}
ti_sci_msg_cmd_query_fw_caps(&info->handle, &info->fw_caps);
- dev_dbg(dev, "Detected firmware capabilities: %s%s%s\n",
+ dev_dbg(dev, "Detected firmware capabilities: %s%s%s%s\n",
info->fw_caps & MSG_FLAG_CAPS_GENERIC ? "Generic" : "",
info->fw_caps & MSG_FLAG_CAPS_LPM_PARTIAL_IO ? " Partial-IO" : "",
- info->fw_caps & MSG_FLAG_CAPS_LPM_DM_MANAGED ? " DM-Managed" : ""
+ info->fw_caps & MSG_FLAG_CAPS_LPM_DM_MANAGED ? " DM-Managed" : "",
+ info->fw_caps & MSG_FLAG_CAPS_LPM_ABORT ? " LPM-Abort" : ""
);
ti_sci_setup_ops(info);
diff --git a/drivers/firmware/ti_sci.h b/drivers/firmware/ti_sci.h
index 053387d7baa0..701c416b2e78 100644
--- a/drivers/firmware/ti_sci.h
+++ b/drivers/firmware/ti_sci.h
@@ -42,6 +42,7 @@
#define TI_SCI_MSG_SET_IO_ISOLATION 0x0307
#define TI_SCI_MSG_LPM_SET_DEVICE_CONSTRAINT 0x0309
#define TI_SCI_MSG_LPM_SET_LATENCY_CONSTRAINT 0x030A
+#define TI_SCI_MSG_LPM_ABORT 0x0311
/* Resource Management Requests */
#define TI_SCI_MSG_GET_RESOURCE_RANGE 0x1500
@@ -147,6 +148,7 @@ struct ti_sci_msg_req_reboot {
* MSG_FLAG_CAPS_GENERIC: Generic capability (LPM not supported)
* MSG_FLAG_CAPS_LPM_PARTIAL_IO: Partial IO in LPM
* MSG_FLAG_CAPS_LPM_DM_MANAGED: LPM can be managed by DM
+ * MSG_FLAG_CAPS_LPM_ABORT: Abort entry to LPM
*
* Response to a generic message with message type TI_SCI_MSG_QUERY_FW_CAPS
* providing currently available SOC/firmware capabilities. SoC that don't
@@ -157,6 +159,7 @@ struct ti_sci_msg_resp_query_fw_caps {
#define MSG_FLAG_CAPS_GENERIC TI_SCI_MSG_FLAG(0)
#define MSG_FLAG_CAPS_LPM_PARTIAL_IO TI_SCI_MSG_FLAG(4)
#define MSG_FLAG_CAPS_LPM_DM_MANAGED TI_SCI_MSG_FLAG(5)
+#define MSG_FLAG_CAPS_LPM_ABORT TI_SCI_MSG_FLAG(9)
#define MSG_MASK_CAPS_LPM GENMASK_ULL(4, 1)
u64 fw_caps;
} __packed;
diff --git a/drivers/fpga/zynq-fpga.c b/drivers/fpga/zynq-fpga.c
index 0be0d569589d..b7629a0e4813 100644
--- a/drivers/fpga/zynq-fpga.c
+++ b/drivers/fpga/zynq-fpga.c
@@ -405,12 +405,12 @@ static int zynq_fpga_ops_write(struct fpga_manager *mgr, struct sg_table *sgt)
}
}
- priv->dma_nelms =
- dma_map_sgtable(mgr->dev.parent, sgt, DMA_TO_DEVICE, 0);
- if (priv->dma_nelms == 0) {
+ err = dma_map_sgtable(mgr->dev.parent, sgt, DMA_TO_DEVICE, 0);
+ if (err) {
dev_err(&mgr->dev, "Unable to DMA map (TO_DEVICE)\n");
- return -ENOMEM;
+ return err;
}
+ priv->dma_nelms = sgt->nents;
/* enable clock */
err = clk_enable(priv->clk);
diff --git a/drivers/fwctl/mlx5/main.c b/drivers/fwctl/mlx5/main.c
index f93aa0cecdb9..3dacccf7855c 100644
--- a/drivers/fwctl/mlx5/main.c
+++ b/drivers/fwctl/mlx5/main.c
@@ -58,6 +58,9 @@ enum {
MLX5_CMD_OP_QUERY_DC_CNAK_TRACE = 0x716,
MLX5_CMD_OP_QUERY_NVMF_BACKEND_CONTROLLER = 0x722,
MLX5_CMD_OP_QUERY_NVMF_NAMESPACE_CONTEXT = 0x728,
+ MLX5_CMD_OP_QUERY_ADJACENT_FUNCTIONS_ID = 0x730,
+ MLX5_CMD_OP_DELEGATE_VHCA_MANAGEMENT = 0x731,
+ MLX5_CMD_OP_QUERY_DELEGATED_VHCA = 0x732,
MLX5_CMD_OP_QUERY_BURST_SIZE = 0x813,
MLX5_CMD_OP_QUERY_DIAGNOSTIC_PARAMS = 0x819,
MLX5_CMD_OP_SET_DIAGNOSTIC_PARAMS = 0x820,
@@ -188,6 +191,7 @@ static bool mlx5ctl_validate_rpc(const void *in, enum fwctl_rpc_scope scope)
* filter commands manually for now.
*/
switch (opcode) {
+ case MLX5_CMD_OP_MODIFY_CONG_STATUS:
case MLX5_CMD_OP_POSTPONE_CONNECTED_QP_TIMEOUT:
case MLX5_CMD_OP_QUERY_ADAPTER:
case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS:
@@ -196,6 +200,7 @@ static bool mlx5ctl_validate_rpc(const void *in, enum fwctl_rpc_scope scope)
case MLX5_CMD_OP_QUERY_OTHER_HCA_CAP:
case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
case MLX5_CMD_OPCODE_QUERY_VUID:
+ case MLX5_CMD_OP_DELEGATE_VHCA_MANAGEMENT:
/*
* FW limits SET_HCA_CAP on the tools UID to only the other function
* mode which is used for function pre-configuration
@@ -281,6 +286,8 @@ static bool mlx5ctl_validate_rpc(const void *in, enum fwctl_rpc_scope scope)
case MLX5_CMD_OP_QUERY_XRQ:
case MLX5_CMD_OP_USER_QUERY_XRQ_DC_PARAMS_ENTRY:
case MLX5_CMD_OP_USER_QUERY_XRQ_ERROR_PARAMS:
+ case MLX5_CMD_OP_QUERY_ADJACENT_FUNCTIONS_ID:
+ case MLX5_CMD_OP_QUERY_DELEGATED_VHCA:
return scope >= FWCTL_RPC_DEBUG_READ_ONLY;
case MLX5_CMD_OP_SET_DIAGNOSTIC_PARAMS:
@@ -345,7 +352,7 @@ static void *mlx5ctl_fw_rpc(struct fwctl_uctx *uctx, enum fwctl_rpc_scope scope,
*/
if (ret && ret != -EREMOTEIO) {
if (rpc_out != rpc_in)
- kfree(rpc_out);
+ kvfree(rpc_out);
return ERR_PTR(ret);
}
return rpc_out;
diff --git a/drivers/fwctl/pds/main.c b/drivers/fwctl/pds/main.c
index 9b9d1f6b5556..1809853f6353 100644
--- a/drivers/fwctl/pds/main.c
+++ b/drivers/fwctl/pds/main.c
@@ -6,6 +6,7 @@
#include <linux/pci.h>
#include <linux/vmalloc.h>
#include <linux/bitfield.h>
+#include <linux/string.h>
#include <uapi/fwctl/fwctl.h>
#include <uapi/fwctl/pds.h>
@@ -366,18 +367,10 @@ static void *pdsfc_fw_rpc(struct fwctl_uctx *uctx, enum fwctl_rpc_scope scope,
return ERR_PTR(err);
if (rpc->in.len > 0) {
- in_payload = kzalloc(rpc->in.len, GFP_KERNEL);
- if (!in_payload) {
- dev_err(dev, "Failed to allocate in_payload\n");
- err = -ENOMEM;
- goto err_out;
- }
-
- if (copy_from_user(in_payload, u64_to_user_ptr(rpc->in.payload),
- rpc->in.len)) {
+ in_payload = memdup_user(u64_to_user_ptr(rpc->in.payload), rpc->in.len);
+ if (IS_ERR(in_payload)) {
dev_dbg(dev, "Failed to copy in_payload from user\n");
- err = -EFAULT;
- goto err_in_payload;
+ return in_payload;
}
in_payload_dma_addr = dma_map_single(dev->parent, in_payload,
@@ -453,7 +446,6 @@ err_out_payload:
rpc->in.len, DMA_TO_DEVICE);
err_in_payload:
kfree(in_payload);
-err_out:
if (err)
return ERR_PTR(err);
@@ -481,7 +473,7 @@ static int pdsfc_probe(struct auxiliary_device *adev,
pdsfc = fwctl_alloc_device(&padev->vf_pdev->dev, &pdsfc_ops,
struct pdsfc_dev, fwctl);
if (!pdsfc)
- return dev_err_probe(dev, -ENOMEM, "Failed to allocate fwctl device struct\n");
+ return -ENOMEM;
pdsfc->padev = padev;
err = pdsfc_identify(pdsfc);
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index e43abb322fa6..7ee3afbc2b05 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -3,6 +3,9 @@
# GPIO infrastructure and drivers
#
+config GPIOLIB_LEGACY
+ def_bool y
+
menuconfig GPIOLIB
bool "GPIO Support"
help
@@ -12,9 +15,6 @@ menuconfig GPIOLIB
If unsure, say N.
-config GPIOLIB_LEGACY
- def_bool y
-
if GPIOLIB
config GPIOLIB_FASTPATH_LIMIT
@@ -303,7 +303,7 @@ config GPIO_EN7523
config GPIO_EP93XX
def_bool y
- depends on ARCH_EP93XX
+ depends on ARCH_EP93XX || COMPILE_TEST
select GPIO_GENERIC
select GPIOLIB_IRQCHIP
@@ -408,8 +408,7 @@ config GPIO_IMX_SCU
config GPIO_IXP4XX
bool "Intel IXP4xx GPIO"
- depends on ARCH_IXP4XX
- depends on OF
+ depends on (ARCH_IXP4XX && OF) || COMPILE_TEST
select GPIO_GENERIC
select GPIOLIB_IRQCHIP
select IRQ_DOMAIN_HIERARCHY
@@ -437,6 +436,7 @@ config GPIO_LOONGSON_64BIT
depends on LOONGARCH || COMPILE_TEST
depends on OF_GPIO
select GPIO_GENERIC
+ select GPIOLIB_IRQCHIP
help
Say yes here to support the GPIO functionality of a number of
Loongson series of chips. The Loongson GPIO controller supports
@@ -485,7 +485,6 @@ config GPIO_MM_LANTIQ
config GPIO_MPC5200
def_bool y
depends on PPC_MPC52xx
- select OF_GPIO_MM_GPIOCHIP
config GPIO_MPC8XXX
bool "MPC512x/MPC8xxx/QorIQ GPIO support"
@@ -735,7 +734,8 @@ config GPIO_TANGIER
If built as a module its name will be gpio-tangier.
config GPIO_TB10X
- bool
+ bool "Abilis Systems TB10x GPIO controller"
+ depends on ARC_PLAT_TB10X || COMPILE_TEST
select GPIO_GENERIC
select GENERIC_IRQ_CHIP
select OF_GPIO
@@ -884,7 +884,7 @@ config GPIO_ZYNQMP_MODEPIN
config GPIO_LOONGSON1
tristate "Loongson1 GPIO support"
- depends on MACH_LOONGSON32
+ depends on MACH_LOONGSON32 || COMPILE_TEST
select GPIO_GENERIC
help
Say Y or M here to support GPIO on Loongson1 SoCs.
@@ -1194,14 +1194,18 @@ config GPIO_PCA953X
4 bits: pca9536, pca9537
8 bits: max7310, max7315, pca6107, pca9534, pca9538, pca9554,
- pca9556, pca9557, pca9574, tca6408, tca9554, xra1202
+ pca9556, pca9557, pca9574, tca6408, tca9554, xra1202,
+ pcal6408, pcal9554b, tca9538
16 bits: max7312, max7313, pca9535, pca9539, pca9555, pca9575,
- tca6416
+ tca6416, pca6416, pcal6416, pcal9535, pcal9555a, max7318,
+ tca9539
+
+ 18 bits: tca6418
- 24 bits: tca6424
+ 24 bits: tca6424, pcal6524
- 40 bits: pca9505, pca9698
+ 40 bits: pca9505, pca9698, pca9506
config GPIO_PCA953X_IRQ
bool "Interrupt controller support for PCA953x"
@@ -1492,6 +1496,18 @@ config GPIO_MADERA
help
Support for GPIOs on Cirrus Logic Madera class codecs.
+config GPIO_MAX7360
+ tristate "MAX7360 GPIO support"
+ depends on MFD_MAX7360
+ select GPIO_REGMAP
+ select REGMAP_IRQ
+ help
+ Allows to use MAX7360 I/O Expander PWM lines as GPIO and keypad COL
+ lines as GPO.
+
+ This driver can also be built as a module. If so, the module will be
+ called gpio-max7360.
+
config GPIO_MAX77620
tristate "GPIO support for PMIC MAX77620 and MAX20024"
depends on MFD_MAX77620
@@ -1522,6 +1538,18 @@ config GPIO_MAX77759
This driver can also be built as a module. If so, the module will be
called gpio-max77759.
+config GPIO_NCT6694
+ tristate "Nuvoton NCT6694 GPIO controller support"
+ depends on MFD_NCT6694
+ select GENERIC_IRQ_CHIP
+ select GPIOLIB_IRQCHIP
+ help
+ This driver supports 8 GPIO pins per bank that can all be interrupt
+ sources.
+
+ This driver can also be built as a module. If so, the module will be
+ called gpio-nct6694.
+
config GPIO_PALMAS
tristate "TI PALMAS series PMICs GPIO"
depends on MFD_PALMAS
@@ -1559,7 +1587,7 @@ config GPIO_SL28CPLD
called gpio-sl28cpld.
config GPIO_STMPE
- bool "STMPE GPIOs"
+ tristate "STMPE GPIOs"
depends on MFD_STMPE
depends on OF_GPIO
select GPIOLIB_IRQCHIP
@@ -1923,6 +1951,17 @@ config GPIO_MPSSE
GPIO driver for FTDI's MPSSE interface. These can do input and
output. Each MPSSE provides 16 IO pins.
+config GPIO_USBIO
+ tristate "Intel USBIO GPIO support"
+ depends on USB_USBIO
+ default USB_USBIO
+ help
+ Select this option to enable GPIO driver for the INTEL
+ USBIO driver stack.
+
+ This driver can also be built as a module. If so, the module
+ will be called gpio_usbio.
+
endmenu
menu "Virtual GPIO drivers"
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 379f55e9ed1e..ec296fa14bfd 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -106,6 +106,7 @@ obj-$(CONFIG_GPIO_MAX7300) += gpio-max7300.o
obj-$(CONFIG_GPIO_MAX7301) += gpio-max7301.o
obj-$(CONFIG_GPIO_MAX730X) += gpio-max730x.o
obj-$(CONFIG_GPIO_MAX732X) += gpio-max732x.o
+obj-$(CONFIG_GPIO_MAX7360) += gpio-max7360.o
obj-$(CONFIG_GPIO_MAX77620) += gpio-max77620.o
obj-$(CONFIG_GPIO_MAX77650) += gpio-max77650.o
obj-$(CONFIG_GPIO_MAX77759) += gpio-max77759.o
@@ -128,6 +129,7 @@ obj-$(CONFIG_GPIO_MT7621) += gpio-mt7621.o
obj-$(CONFIG_GPIO_MVEBU) += gpio-mvebu.o
obj-$(CONFIG_GPIO_MXC) += gpio-mxc.o
obj-$(CONFIG_GPIO_MXS) += gpio-mxs.o
+obj-$(CONFIG_GPIO_NCT6694) += gpio-nct6694.o
obj-$(CONFIG_GPIO_NOMADIK) += gpio-nomadik.o
obj-$(CONFIG_GPIO_NPCM_SGPIO) += gpio-npcm-sgpio.o
obj-$(CONFIG_GPIO_OCTEON) += gpio-octeon.o
@@ -192,6 +194,7 @@ obj-$(CONFIG_GPIO_TS5500) += gpio-ts5500.o
obj-$(CONFIG_GPIO_TWL4030) += gpio-twl4030.o
obj-$(CONFIG_GPIO_TWL6040) += gpio-twl6040.o
obj-$(CONFIG_GPIO_UNIPHIER) += gpio-uniphier.o
+obj-$(CONFIG_GPIO_USBIO) += gpio-usbio.o
obj-$(CONFIG_GPIO_VF610) += gpio-vf610.o
obj-$(CONFIG_GPIO_VIPERBOARD) += gpio-viperboard.o
obj-$(CONFIG_GPIO_VIRTUSER) += gpio-virtuser.o
diff --git a/drivers/gpio/TODO b/drivers/gpio/TODO
index 7a09a4f58551..8ed74e05903a 100644
--- a/drivers/gpio/TODO
+++ b/drivers/gpio/TODO
@@ -131,11 +131,6 @@ Work items:
helpers (x86 inb()/outb()) and convert port-mapped I/O drivers to use
this with dry-coding and sending to maintainers to test
-- Move the MMIO GPIO specific fields out of struct gpio_chip into a
- dedicated structure. Currently every GPIO chip has them if gpio-mmio is
- enabled in Kconfig even if it itself doesn't register with the helper
- library.
-
-------------------------------------------------------------------------------
Generic regmap GPIO
@@ -176,18 +171,6 @@ cannot be converted yet, but watch this space!
-------------------------------------------------------------------------------
-Convert all GPIO chips to using the new, value returning line setters
-
-struct gpio_chip's set() and set_multiple() callbacks are now deprecated. They
-return void and thus do not allow drivers to indicate failure to set the line
-value back to the caller.
-
-We've now added new variants - set_rv() and set_multiple_rv() that return an
-integer. Let's convert all GPIO drivers treewide to use the new callbacks,
-remove the old ones and finally rename the new ones back to the old names.
-
--------------------------------------------------------------------------------
-
Remove legacy sysfs features
We have two parallel per-chip class devices and per-exported-line attribute
diff --git a/drivers/gpio/gpio-74x164.c b/drivers/gpio/gpio-74x164.c
index 4dd5c2c330bb..c226524efeba 100644
--- a/drivers/gpio/gpio-74x164.c
+++ b/drivers/gpio/gpio-74x164.c
@@ -141,8 +141,8 @@ static int gen_74x164_probe(struct spi_device *spi)
chip->gpio_chip.label = spi->modalias;
chip->gpio_chip.direction_output = gen_74x164_direction_output;
chip->gpio_chip.get = gen_74x164_get_value;
- chip->gpio_chip.set_rv = gen_74x164_set_value;
- chip->gpio_chip.set_multiple_rv = gen_74x164_set_multiple;
+ chip->gpio_chip.set = gen_74x164_set_value;
+ chip->gpio_chip.set_multiple = gen_74x164_set_multiple;
chip->gpio_chip.base = -1;
chip->gpio_chip.ngpio = GEN_74X164_NUMBER_GPIOS * chip->registers;
chip->gpio_chip.can_sleep = true;
diff --git a/drivers/gpio/gpio-adnp.c b/drivers/gpio/gpio-adnp.c
index dc2b941c3726..e5ac2d211013 100644
--- a/drivers/gpio/gpio-adnp.c
+++ b/drivers/gpio/gpio-adnp.c
@@ -430,7 +430,7 @@ static int adnp_gpio_setup(struct adnp *adnp, unsigned int num_gpios,
chip->direction_input = adnp_gpio_direction_input;
chip->direction_output = adnp_gpio_direction_output;
chip->get = adnp_gpio_get;
- chip->set_rv = adnp_gpio_set;
+ chip->set = adnp_gpio_set;
chip->can_sleep = true;
if (IS_ENABLED(CONFIG_DEBUG_FS))
diff --git a/drivers/gpio/gpio-adp5520.c b/drivers/gpio/gpio-adp5520.c
index 57d12c10cbda..6305c8b7dc05 100644
--- a/drivers/gpio/gpio-adp5520.c
+++ b/drivers/gpio/gpio-adp5520.c
@@ -122,7 +122,7 @@ static int adp5520_gpio_probe(struct platform_device *pdev)
gc->direction_input = adp5520_gpio_direction_input;
gc->direction_output = adp5520_gpio_direction_output;
gc->get = adp5520_gpio_get_value;
- gc->set_rv = adp5520_gpio_set_value;
+ gc->set = adp5520_gpio_set_value;
gc->can_sleep = true;
gc->base = pdata->gpio_start;
diff --git a/drivers/gpio/gpio-adp5585.c b/drivers/gpio/gpio-adp5585.c
index b2c8836c5f84..0fd3cc26d017 100644
--- a/drivers/gpio/gpio-adp5585.c
+++ b/drivers/gpio/gpio-adp5585.c
@@ -428,7 +428,7 @@ static int adp5585_gpio_probe(struct platform_device *pdev)
gc->direction_input = adp5585_gpio_direction_input;
gc->direction_output = adp5585_gpio_direction_output;
gc->get = adp5585_gpio_get_value;
- gc->set_rv = adp5585_gpio_set_value;
+ gc->set = adp5585_gpio_set_value;
gc->set_config = adp5585_gpio_set_config;
gc->request = adp5585_gpio_request;
gc->free = adp5585_gpio_free;
diff --git a/drivers/gpio/gpio-aggregator.c b/drivers/gpio/gpio-aggregator.c
index 6f941db02c04..37600faf4a4b 100644
--- a/drivers/gpio/gpio-aggregator.c
+++ b/drivers/gpio/gpio-aggregator.c
@@ -12,6 +12,7 @@
#include <linux/configfs.h>
#include <linux/ctype.h>
#include <linux/delay.h>
+#include <linux/export.h>
#include <linux/idr.h>
#include <linux/kernel.h>
#include <linux/list.h>
@@ -28,6 +29,7 @@
#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/forwarder.h>
#include <linux/gpio/machine.h>
#include "dev-sync-probe.h"
@@ -244,18 +246,34 @@ struct gpiochip_fwd {
spinlock_t slock; /* protects tmp[] if !can_sleep */
};
struct gpiochip_fwd_timing *delay_timings;
+ void *data;
+ unsigned long *valid_mask;
unsigned long tmp[]; /* values and descs for multiple ops */
};
-#define fwd_tmp_values(fwd) &(fwd)->tmp[0]
-#define fwd_tmp_descs(fwd) (void *)&(fwd)->tmp[BITS_TO_LONGS((fwd)->chip.ngpio)]
+#define fwd_tmp_values(fwd) (&(fwd)->tmp[0])
+#define fwd_tmp_descs(fwd) ((void *)&(fwd)->tmp[BITS_TO_LONGS((fwd)->chip.ngpio)])
#define fwd_tmp_size(ngpios) (BITS_TO_LONGS((ngpios)) + (ngpios))
+static int gpio_fwd_request(struct gpio_chip *chip, unsigned int offset)
+{
+ struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
+
+ return test_bit(offset, fwd->valid_mask) ? 0 : -ENODEV;
+}
+
static int gpio_fwd_get_direction(struct gpio_chip *chip, unsigned int offset)
{
struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
+ /*
+ * get_direction() is called during gpiochip registration, return
+ * -ENODEV if there is no GPIO desc for the line.
+ */
+ if (!test_bit(offset, fwd->valid_mask))
+ return -ENODEV;
+
return gpiod_get_direction(fwd->descs[offset]);
}
@@ -453,10 +471,11 @@ static int gpiochip_fwd_delay_of_xlate(struct gpio_chip *chip,
return line;
}
-static int gpiochip_fwd_setup_delay_line(struct device *dev, struct gpio_chip *chip,
- struct gpiochip_fwd *fwd)
+static int gpiochip_fwd_setup_delay_line(struct gpiochip_fwd *fwd)
{
- fwd->delay_timings = devm_kcalloc(dev, chip->ngpio,
+ struct gpio_chip *chip = &fwd->chip;
+
+ fwd->delay_timings = devm_kcalloc(chip->parent, chip->ngpio,
sizeof(*fwd->delay_timings),
GFP_KERNEL);
if (!fwd->delay_timings)
@@ -468,91 +487,367 @@ static int gpiochip_fwd_setup_delay_line(struct device *dev, struct gpio_chip *c
return 0;
}
#else
-static int gpiochip_fwd_setup_delay_line(struct device *dev, struct gpio_chip *chip,
- struct gpiochip_fwd *fwd)
+static int gpiochip_fwd_setup_delay_line(struct gpiochip_fwd *fwd)
{
return 0;
}
#endif /* !CONFIG_OF_GPIO */
/**
- * gpiochip_fwd_create() - Create a new GPIO forwarder
- * @dev: Parent device pointer
- * @ngpios: Number of GPIOs in the forwarder.
- * @descs: Array containing the GPIO descriptors to forward to.
- * This array must contain @ngpios entries, and must not be deallocated
- * before the forwarder has been destroyed again.
- * @features: Bitwise ORed features as defined with FWD_FEATURE_*.
+ * gpiochip_fwd_get_gpiochip - Get the GPIO chip for the GPIO forwarder
+ * @fwd: GPIO forwarder
*
- * This function creates a new gpiochip, which forwards all GPIO operations to
- * the passed GPIO descriptors.
+ * Returns: The GPIO chip for the GPIO forwarder
+ */
+struct gpio_chip *gpiochip_fwd_get_gpiochip(struct gpiochip_fwd *fwd)
+{
+ return &fwd->chip;
+}
+EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_get_gpiochip, "GPIO_FORWARDER");
+
+/**
+ * gpiochip_fwd_get_data - Get driver-private data for the GPIO forwarder
+ * @fwd: GPIO forwarder
*
- * Return: An opaque object pointer, or an ERR_PTR()-encoded negative error
- * code on failure.
+ * Returns: The driver-private data for the GPIO forwarder
*/
-static struct gpiochip_fwd *gpiochip_fwd_create(struct device *dev,
- unsigned int ngpios,
- struct gpio_desc *descs[],
- unsigned long features)
+void *gpiochip_fwd_get_data(struct gpiochip_fwd *fwd)
+{
+ return fwd->data;
+}
+EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_get_data, "GPIO_FORWARDER");
+
+/**
+ * gpiochip_fwd_gpio_request - Request a line of the GPIO forwarder
+ * @fwd: GPIO forwarder
+ * @offset: the offset of the line to request
+ *
+ * Returns: 0 on success, or negative errno on failure.
+ */
+int gpiochip_fwd_gpio_request(struct gpiochip_fwd *fwd, unsigned int offset)
+{
+ struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd);
+
+ return gpio_fwd_request(gc, offset);
+}
+EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_request, "GPIO_FORWARDER");
+
+/**
+ * gpiochip_fwd_gpio_get_direction - Return the current direction of a GPIO forwarder line
+ * @fwd: GPIO forwarder
+ * @offset: the offset of the line
+ *
+ * Returns: 0 for output, 1 for input, or an error code in case of error.
+ */
+int gpiochip_fwd_gpio_get_direction(struct gpiochip_fwd *fwd, unsigned int offset)
+{
+ struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd);
+
+ return gpio_fwd_get_direction(gc, offset);
+}
+EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_get_direction, "GPIO_FORWARDER");
+
+/**
+ * gpiochip_fwd_gpio_direction_output - Set a GPIO forwarder line direction to
+ * output
+ * @fwd: GPIO forwarder
+ * @offset: the offset of the line
+ * @value: value to set
+ *
+ * Returns: 0 on success, or negative errno on failure.
+ */
+int gpiochip_fwd_gpio_direction_output(struct gpiochip_fwd *fwd, unsigned int offset,
+ int value)
+{
+ struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd);
+
+ return gpio_fwd_direction_output(gc, offset, value);
+}
+EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_direction_output, "GPIO_FORWARDER");
+
+/**
+ * gpiochip_fwd_gpio_direction_input - Set a GPIO forwarder line direction to input
+ * @fwd: GPIO forwarder
+ * @offset: the offset of the line
+ *
+ * Returns: 0 on success, or negative errno on failure.
+ */
+int gpiochip_fwd_gpio_direction_input(struct gpiochip_fwd *fwd, unsigned int offset)
+{
+ struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd);
+
+ return gpio_fwd_direction_input(gc, offset);
+}
+EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_direction_input, "GPIO_FORWARDER");
+
+/**
+ * gpiochip_fwd_gpio_get - Return a GPIO forwarder line's value
+ * @fwd: GPIO forwarder
+ * @offset: the offset of the line
+ *
+ * Returns: The GPIO's logical value, i.e. taking the ACTIVE_LOW status into
+ * account, or negative errno on failure.
+ */
+int gpiochip_fwd_gpio_get(struct gpiochip_fwd *fwd, unsigned int offset)
+{
+ struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd);
+
+ return gpio_fwd_get(gc, offset);
+}
+EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_get, "GPIO_FORWARDER");
+
+/**
+ * gpiochip_fwd_gpio_get_multiple - Get values for multiple GPIO forwarder lines
+ * @fwd: GPIO forwarder
+ * @mask: bit mask array; one bit per line; BITS_PER_LONG bits per word defines
+ * which lines are to be read
+ * @bits: bit value array; one bit per line; BITS_PER_LONG bits per word will
+ * contains the read values for the lines specified by mask
+ *
+ * Returns: 0 on success, or negative errno on failure.
+ */
+int gpiochip_fwd_gpio_get_multiple(struct gpiochip_fwd *fwd, unsigned long *mask,
+ unsigned long *bits)
+{
+ struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd);
+
+ return gpio_fwd_get_multiple_locked(gc, mask, bits);
+}
+EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_get_multiple, "GPIO_FORWARDER");
+
+/**
+ * gpiochip_fwd_gpio_set - Assign value to a GPIO forwarder line.
+ * @fwd: GPIO forwarder
+ * @offset: the offset of the line
+ * @value: value to set
+ *
+ * Returns: 0 on success, or negative errno on failure.
+ */
+int gpiochip_fwd_gpio_set(struct gpiochip_fwd *fwd, unsigned int offset, int value)
+{
+ struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd);
+
+ return gpio_fwd_set(gc, offset, value);
+}
+EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_set, "GPIO_FORWARDER");
+
+/**
+ * gpiochip_fwd_gpio_set_multiple - Assign values to multiple GPIO forwarder lines
+ * @fwd: GPIO forwarder
+ * @mask: bit mask array; one bit per output; BITS_PER_LONG bits per word
+ * defines which outputs are to be changed
+ * @bits: bit value array; one bit per output; BITS_PER_LONG bits per word
+ * defines the values the outputs specified by mask are to be set to
+ *
+ * Returns: 0 on success, or negative errno on failure.
+ */
+int gpiochip_fwd_gpio_set_multiple(struct gpiochip_fwd *fwd, unsigned long *mask,
+ unsigned long *bits)
+{
+ struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd);
+
+ return gpio_fwd_set_multiple_locked(gc, mask, bits);
+}
+EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_set_multiple, "GPIO_FORWARDER");
+
+/**
+ * gpiochip_fwd_gpio_set_config - Set @config for a GPIO forwarder line
+ * @fwd: GPIO forwarder
+ * @offset: the offset of the line
+ * @config: Same packed config format as generic pinconf
+ *
+ * Returns: 0 on success, %-ENOTSUPP if the controller doesn't support setting
+ * the configuration.
+ */
+int gpiochip_fwd_gpio_set_config(struct gpiochip_fwd *fwd, unsigned int offset,
+ unsigned long config)
+{
+ struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd);
+
+ return gpio_fwd_set_config(gc, offset, config);
+}
+EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_set_config, "GPIO_FORWARDER");
+
+/**
+ * gpiochip_fwd_gpio_to_irq - Return the IRQ corresponding to a GPIO forwarder line
+ * @fwd: GPIO forwarder
+ * @offset: the offset of the line
+ *
+ * Returns: The Linux IRQ corresponding to the passed line, or an error code in
+ * case of error.
+ */
+int gpiochip_fwd_gpio_to_irq(struct gpiochip_fwd *fwd, unsigned int offset)
+{
+ struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd);
+
+ return gpio_fwd_to_irq(gc, offset);
+}
+EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_to_irq, "GPIO_FORWARDER");
+
+/**
+ * devm_gpiochip_fwd_alloc - Allocate and initialize a new GPIO forwarder
+ * @dev: Parent device pointer
+ * @ngpios: Number of GPIOs in the forwarder
+ *
+ * Returns: An opaque object pointer, or an ERR_PTR()-encoded negative error
+ * code on failure.
+ */
+struct gpiochip_fwd *devm_gpiochip_fwd_alloc(struct device *dev,
+ unsigned int ngpios)
{
- const char *label = dev_name(dev);
struct gpiochip_fwd *fwd;
struct gpio_chip *chip;
- unsigned int i;
- int error;
- fwd = devm_kzalloc(dev, struct_size(fwd, tmp, fwd_tmp_size(ngpios)),
- GFP_KERNEL);
+ fwd = devm_kzalloc(dev, struct_size(fwd, tmp, fwd_tmp_size(ngpios)), GFP_KERNEL);
if (!fwd)
return ERR_PTR(-ENOMEM);
- chip = &fwd->chip;
-
- /*
- * If any of the GPIO lines are sleeping, then the entire forwarder
- * will be sleeping.
- * If any of the chips support .set_config(), then the forwarder will
- * support setting configs.
- */
- for (i = 0; i < ngpios; i++) {
- struct gpio_chip *parent = gpiod_to_chip(descs[i]);
+ fwd->descs = devm_kcalloc(dev, ngpios, sizeof(*fwd->descs), GFP_KERNEL);
+ if (!fwd->descs)
+ return ERR_PTR(-ENOMEM);
- dev_dbg(dev, "%u => gpio %d irq %d\n", i,
- desc_to_gpio(descs[i]), gpiod_to_irq(descs[i]));
+ fwd->valid_mask = devm_bitmap_zalloc(dev, ngpios, GFP_KERNEL);
+ if (!fwd->valid_mask)
+ return ERR_PTR(-ENOMEM);
- if (gpiod_cansleep(descs[i]))
- chip->can_sleep = true;
- if (parent && parent->set_config)
- chip->set_config = gpio_fwd_set_config;
- }
+ chip = &fwd->chip;
- chip->label = label;
+ chip->label = dev_name(dev);
chip->parent = dev;
chip->owner = THIS_MODULE;
+ chip->request = gpio_fwd_request;
chip->get_direction = gpio_fwd_get_direction;
chip->direction_input = gpio_fwd_direction_input;
chip->direction_output = gpio_fwd_direction_output;
chip->get = gpio_fwd_get;
chip->get_multiple = gpio_fwd_get_multiple_locked;
- chip->set_rv = gpio_fwd_set;
- chip->set_multiple_rv = gpio_fwd_set_multiple_locked;
+ chip->set = gpio_fwd_set;
+ chip->set_multiple = gpio_fwd_set_multiple_locked;
chip->to_irq = gpio_fwd_to_irq;
chip->base = -1;
chip->ngpio = ngpios;
- fwd->descs = descs;
+
+ return fwd;
+}
+EXPORT_SYMBOL_NS_GPL(devm_gpiochip_fwd_alloc, "GPIO_FORWARDER");
+
+/**
+ * gpiochip_fwd_desc_add - Add a GPIO desc in the forwarder
+ * @fwd: GPIO forwarder
+ * @desc: GPIO descriptor to register
+ * @offset: offset for the GPIO in the forwarder
+ *
+ * Returns: 0 on success, or negative errno on failure.
+ */
+int gpiochip_fwd_desc_add(struct gpiochip_fwd *fwd, struct gpio_desc *desc,
+ unsigned int offset)
+{
+ struct gpio_chip *chip = &fwd->chip;
+
+ if (offset >= chip->ngpio)
+ return -EINVAL;
+
+ if (test_and_set_bit(offset, fwd->valid_mask))
+ return -EEXIST;
+
+ /*
+ * If any of the GPIO lines are sleeping, then the entire forwarder
+ * will be sleeping.
+ */
+ if (gpiod_cansleep(desc))
+ chip->can_sleep = true;
+
+ fwd->descs[offset] = desc;
+
+ dev_dbg(chip->parent, "%u => gpio %d irq %d\n", offset,
+ desc_to_gpio(desc), gpiod_to_irq(desc));
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_desc_add, "GPIO_FORWARDER");
+
+/**
+ * gpiochip_fwd_desc_free - Remove a GPIO desc from the forwarder
+ * @fwd: GPIO forwarder
+ * @offset: offset of GPIO desc to remove
+ */
+void gpiochip_fwd_desc_free(struct gpiochip_fwd *fwd, unsigned int offset)
+{
+ if (test_and_clear_bit(offset, fwd->valid_mask))
+ gpiod_put(fwd->descs[offset]);
+}
+EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_desc_free, "GPIO_FORWARDER");
+
+/**
+ * gpiochip_fwd_register - Register a GPIO forwarder
+ * @fwd: GPIO forwarder
+ * @data: driver-private data associated with this forwarder
+ *
+ * Returns: 0 on success, or negative errno on failure.
+ */
+int gpiochip_fwd_register(struct gpiochip_fwd *fwd, void *data)
+{
+ struct gpio_chip *chip = &fwd->chip;
+
+ /*
+ * Some gpio_desc were not registered. They will be registered at runtime
+ * but we have to suppose they can sleep.
+ */
+ if (!bitmap_full(fwd->valid_mask, chip->ngpio))
+ chip->can_sleep = true;
if (chip->can_sleep)
mutex_init(&fwd->mlock);
else
spin_lock_init(&fwd->slock);
+ fwd->data = data;
+
+ return devm_gpiochip_add_data(chip->parent, chip, fwd);
+}
+EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_register, "GPIO_FORWARDER");
+
+/**
+ * gpiochip_fwd_create() - Create a new GPIO forwarder
+ * @dev: Parent device pointer
+ * @ngpios: Number of GPIOs in the forwarder.
+ * @descs: Array containing the GPIO descriptors to forward to.
+ * This array must contain @ngpios entries, and can be deallocated
+ * as the forwarder has its own array.
+ * @features: Bitwise ORed features as defined with FWD_FEATURE_*.
+ *
+ * This function creates a new gpiochip, which forwards all GPIO operations to
+ * the passed GPIO descriptors.
+ *
+ * Return: An opaque object pointer, or an ERR_PTR()-encoded negative error
+ * code on failure.
+ */
+static struct gpiochip_fwd *gpiochip_fwd_create(struct device *dev,
+ unsigned int ngpios,
+ struct gpio_desc *descs[],
+ unsigned long features)
+{
+ struct gpiochip_fwd *fwd;
+ unsigned int i;
+ int error;
+
+ fwd = devm_gpiochip_fwd_alloc(dev, ngpios);
+ if (IS_ERR(fwd))
+ return fwd;
+
+ for (i = 0; i < ngpios; i++) {
+ error = gpiochip_fwd_desc_add(fwd, descs[i], i);
+ if (error)
+ return ERR_PTR(error);
+ }
+
if (features & FWD_FEATURE_DELAY) {
- error = gpiochip_fwd_setup_delay_line(dev, chip, fwd);
+ error = gpiochip_fwd_setup_delay_line(fwd);
if (error)
return ERR_PTR(error);
}
- error = devm_gpiochip_add_data(dev, chip, fwd);
+ error = gpiochip_fwd_register(fwd, NULL);
if (error)
return ERR_PTR(error);
@@ -1334,6 +1629,7 @@ static int gpio_aggregator_probe(struct platform_device *pdev)
return PTR_ERR(fwd);
platform_set_drvdata(pdev, fwd);
+ devm_kfree(dev, descs);
return 0;
}
diff --git a/drivers/gpio/gpio-altera-a10sr.c b/drivers/gpio/gpio-altera-a10sr.c
index 77a674cf99e4..4524c18a87e7 100644
--- a/drivers/gpio/gpio-altera-a10sr.c
+++ b/drivers/gpio/gpio-altera-a10sr.c
@@ -69,7 +69,7 @@ static const struct gpio_chip altr_a10sr_gc = {
.label = "altr_a10sr_gpio",
.owner = THIS_MODULE,
.get = altr_a10sr_gpio_get,
- .set_rv = altr_a10sr_gpio_set,
+ .set = altr_a10sr_gpio_set,
.direction_input = altr_a10sr_gpio_direction_input,
.direction_output = altr_a10sr_gpio_direction_output,
.can_sleep = true,
diff --git a/drivers/gpio/gpio-altera.c b/drivers/gpio/gpio-altera.c
index 1b28525726d7..9508d764cce4 100644
--- a/drivers/gpio/gpio-altera.c
+++ b/drivers/gpio/gpio-altera.c
@@ -259,7 +259,7 @@ static int altera_gpio_probe(struct platform_device *pdev)
altera_gc->gc.direction_input = altera_gpio_direction_input;
altera_gc->gc.direction_output = altera_gpio_direction_output;
altera_gc->gc.get = altera_gpio_get;
- altera_gc->gc.set_rv = altera_gpio_set;
+ altera_gc->gc.set = altera_gpio_set;
altera_gc->gc.owner = THIS_MODULE;
altera_gc->gc.parent = &pdev->dev;
altera_gc->gc.base = -1;
diff --git a/drivers/gpio/gpio-amd-fch.c b/drivers/gpio/gpio-amd-fch.c
index f8d0cea46049..e6c6c3ec7656 100644
--- a/drivers/gpio/gpio-amd-fch.c
+++ b/drivers/gpio/gpio-amd-fch.c
@@ -165,7 +165,7 @@ static int amd_fch_gpio_probe(struct platform_device *pdev)
priv->gc.direction_output = amd_fch_gpio_direction_output;
priv->gc.get_direction = amd_fch_gpio_get_direction;
priv->gc.get = amd_fch_gpio_get;
- priv->gc.set_rv = amd_fch_gpio_set;
+ priv->gc.set = amd_fch_gpio_set;
spin_lock_init(&priv->lock);
diff --git a/drivers/gpio/gpio-amd8111.c b/drivers/gpio/gpio-amd8111.c
index 425d8472f744..15fd5e210d74 100644
--- a/drivers/gpio/gpio-amd8111.c
+++ b/drivers/gpio/gpio-amd8111.c
@@ -165,7 +165,7 @@ static struct amd_gpio gp = {
.ngpio = 32,
.request = amd_gpio_request,
.free = amd_gpio_free,
- .set_rv = amd_gpio_set,
+ .set = amd_gpio_set,
.get = amd_gpio_get,
.direction_output = amd_gpio_dirout,
.direction_input = amd_gpio_dirin,
diff --git a/drivers/gpio/gpio-amdpt.c b/drivers/gpio/gpio-amdpt.c
index b70036587d9c..8458a6949c65 100644
--- a/drivers/gpio/gpio-amdpt.c
+++ b/drivers/gpio/gpio-amdpt.c
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/spinlock.h>
#include <linux/acpi.h>
#include <linux/platform_device.h>
@@ -24,54 +25,50 @@
#define PT_SYNC_REG 0x28
struct pt_gpio_chip {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
void __iomem *reg_base;
};
static int pt_gpio_request(struct gpio_chip *gc, unsigned offset)
{
+ struct gpio_generic_chip *gen_gc = to_gpio_generic_chip(gc);
struct pt_gpio_chip *pt_gpio = gpiochip_get_data(gc);
- unsigned long flags;
u32 using_pins;
dev_dbg(gc->parent, "pt_gpio_request offset=%x\n", offset);
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(gen_gc);
using_pins = readl(pt_gpio->reg_base + PT_SYNC_REG);
if (using_pins & BIT(offset)) {
dev_warn(gc->parent, "PT GPIO pin %x reconfigured\n",
offset);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
return -EINVAL;
}
writel(using_pins | BIT(offset), pt_gpio->reg_base + PT_SYNC_REG);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
-
return 0;
}
static void pt_gpio_free(struct gpio_chip *gc, unsigned offset)
{
+ struct gpio_generic_chip *gen_gc = to_gpio_generic_chip(gc);
struct pt_gpio_chip *pt_gpio = gpiochip_get_data(gc);
- unsigned long flags;
u32 using_pins;
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(gen_gc);
using_pins = readl(pt_gpio->reg_base + PT_SYNC_REG);
using_pins &= ~BIT(offset);
writel(using_pins, pt_gpio->reg_base + PT_SYNC_REG);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
-
dev_dbg(gc->parent, "pt_gpio_free offset=%x\n", offset);
}
static int pt_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct device *dev = &pdev->dev;
struct pt_gpio_chip *pt_gpio;
int ret = 0;
@@ -91,22 +88,27 @@ static int pt_gpio_probe(struct platform_device *pdev)
return PTR_ERR(pt_gpio->reg_base);
}
- ret = bgpio_init(&pt_gpio->gc, dev, 4,
- pt_gpio->reg_base + PT_INPUTDATA_REG,
- pt_gpio->reg_base + PT_OUTPUTDATA_REG, NULL,
- pt_gpio->reg_base + PT_DIRECTION_REG, NULL,
- BGPIOF_READ_OUTPUT_REG_SET);
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = pt_gpio->reg_base + PT_INPUTDATA_REG,
+ .set = pt_gpio->reg_base + PT_OUTPUTDATA_REG,
+ .dirout = pt_gpio->reg_base + PT_DIRECTION_REG,
+ .flags = GPIO_GENERIC_READ_OUTPUT_REG_SET,
+ };
+
+ ret = gpio_generic_chip_init(&pt_gpio->chip, &config);
if (ret) {
- dev_err(dev, "bgpio_init failed\n");
+ dev_err(dev, "failed to initialize the generic GPIO chip\n");
return ret;
}
- pt_gpio->gc.owner = THIS_MODULE;
- pt_gpio->gc.request = pt_gpio_request;
- pt_gpio->gc.free = pt_gpio_free;
- pt_gpio->gc.ngpio = (uintptr_t)device_get_match_data(dev);
+ pt_gpio->chip.gc.owner = THIS_MODULE;
+ pt_gpio->chip.gc.request = pt_gpio_request;
+ pt_gpio->chip.gc.free = pt_gpio_free;
+ pt_gpio->chip.gc.ngpio = (uintptr_t)device_get_match_data(dev);
- ret = devm_gpiochip_add_data(dev, &pt_gpio->gc, pt_gpio);
+ ret = devm_gpiochip_add_data(dev, &pt_gpio->chip.gc, pt_gpio);
if (ret) {
dev_err(dev, "Failed to register GPIO lib\n");
return ret;
diff --git a/drivers/gpio/gpio-arizona.c b/drivers/gpio/gpio-arizona.c
index 89ffde693019..a7e98d395d8e 100644
--- a/drivers/gpio/gpio-arizona.c
+++ b/drivers/gpio/gpio-arizona.c
@@ -138,7 +138,7 @@ static const struct gpio_chip template_chip = {
.direction_input = arizona_gpio_direction_in,
.get = arizona_gpio_get,
.direction_output = arizona_gpio_direction_out,
- .set_rv = arizona_gpio_set,
+ .set = arizona_gpio_set,
.can_sleep = true,
};
diff --git a/drivers/gpio/gpio-aspeed-sgpio.c b/drivers/gpio/gpio-aspeed-sgpio.c
index 00b31497ecff..7622f9e9f54a 100644
--- a/drivers/gpio/gpio-aspeed-sgpio.c
+++ b/drivers/gpio/gpio-aspeed-sgpio.c
@@ -596,7 +596,7 @@ static int __init aspeed_sgpio_probe(struct platform_device *pdev)
gpio->chip.request = NULL;
gpio->chip.free = NULL;
gpio->chip.get = aspeed_sgpio_get;
- gpio->chip.set_rv = aspeed_sgpio_set;
+ gpio->chip.set = aspeed_sgpio_set;
gpio->chip.set_config = aspeed_sgpio_set_config;
gpio->chip.label = dev_name(&pdev->dev);
gpio->chip.base = -1;
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
index 2d340a343a17..7953a9c4e36d 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -1352,7 +1352,7 @@ static int aspeed_gpio_probe(struct platform_device *pdev)
gpio->chip.request = aspeed_gpio_request;
gpio->chip.free = aspeed_gpio_free;
gpio->chip.get = aspeed_gpio_get;
- gpio->chip.set_rv = aspeed_gpio_set;
+ gpio->chip.set = aspeed_gpio_set;
gpio->chip.set_config = aspeed_gpio_set_config;
gpio->chip.label = dev_name(&pdev->dev);
gpio->chip.base = -1;
diff --git a/drivers/gpio/gpio-ath79.c b/drivers/gpio/gpio-ath79.c
index de4cc12e5e03..2ad9f6ac6636 100644
--- a/drivers/gpio/gpio-ath79.c
+++ b/drivers/gpio/gpio-ath79.c
@@ -10,6 +10,7 @@
#include <linux/device.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/mod_devicetable.h>
@@ -28,17 +29,17 @@
#define AR71XX_GPIO_REG_INT_MASK 0x24
struct ath79_gpio_ctrl {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
void __iomem *base;
- raw_spinlock_t lock;
unsigned long both_edges;
};
static struct ath79_gpio_ctrl *irq_data_to_ath79_gpio(struct irq_data *data)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+ struct gpio_generic_chip *gen_gc = to_gpio_generic_chip(gc);
- return container_of(gc, struct ath79_gpio_ctrl, gc);
+ return container_of(gen_gc, struct ath79_gpio_ctrl, chip);
}
static u32 ath79_gpio_read(struct ath79_gpio_ctrl *ctrl, unsigned reg)
@@ -70,48 +71,43 @@ static void ath79_gpio_irq_unmask(struct irq_data *data)
{
struct ath79_gpio_ctrl *ctrl = irq_data_to_ath79_gpio(data);
u32 mask = BIT(irqd_to_hwirq(data));
- unsigned long flags;
- gpiochip_enable_irq(&ctrl->gc, irqd_to_hwirq(data));
- raw_spin_lock_irqsave(&ctrl->lock, flags);
+ gpiochip_enable_irq(&ctrl->chip.gc, irqd_to_hwirq(data));
+
+ guard(gpio_generic_lock_irqsave)(&ctrl->chip);
+
ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_MASK, mask, mask);
- raw_spin_unlock_irqrestore(&ctrl->lock, flags);
}
static void ath79_gpio_irq_mask(struct irq_data *data)
{
struct ath79_gpio_ctrl *ctrl = irq_data_to_ath79_gpio(data);
u32 mask = BIT(irqd_to_hwirq(data));
- unsigned long flags;
- raw_spin_lock_irqsave(&ctrl->lock, flags);
- ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_MASK, mask, 0);
- raw_spin_unlock_irqrestore(&ctrl->lock, flags);
- gpiochip_disable_irq(&ctrl->gc, irqd_to_hwirq(data));
+ scoped_guard(gpio_generic_lock_irqsave, &ctrl->chip)
+ ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_MASK, mask, 0);
+
+ gpiochip_disable_irq(&ctrl->chip.gc, irqd_to_hwirq(data));
}
static void ath79_gpio_irq_enable(struct irq_data *data)
{
struct ath79_gpio_ctrl *ctrl = irq_data_to_ath79_gpio(data);
u32 mask = BIT(irqd_to_hwirq(data));
- unsigned long flags;
- raw_spin_lock_irqsave(&ctrl->lock, flags);
+ guard(gpio_generic_lock_irqsave)(&ctrl->chip);
ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_ENABLE, mask, mask);
ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_MASK, mask, mask);
- raw_spin_unlock_irqrestore(&ctrl->lock, flags);
}
static void ath79_gpio_irq_disable(struct irq_data *data)
{
struct ath79_gpio_ctrl *ctrl = irq_data_to_ath79_gpio(data);
u32 mask = BIT(irqd_to_hwirq(data));
- unsigned long flags;
- raw_spin_lock_irqsave(&ctrl->lock, flags);
+ guard(gpio_generic_lock_irqsave)(&ctrl->chip);
ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_MASK, mask, 0);
ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_ENABLE, mask, 0);
- raw_spin_unlock_irqrestore(&ctrl->lock, flags);
}
static int ath79_gpio_irq_set_type(struct irq_data *data,
@@ -120,7 +116,6 @@ static int ath79_gpio_irq_set_type(struct irq_data *data,
struct ath79_gpio_ctrl *ctrl = irq_data_to_ath79_gpio(data);
u32 mask = BIT(irqd_to_hwirq(data));
u32 type = 0, polarity = 0;
- unsigned long flags;
bool disabled;
switch (flow_type) {
@@ -142,7 +137,7 @@ static int ath79_gpio_irq_set_type(struct irq_data *data,
return -EINVAL;
}
- raw_spin_lock_irqsave(&ctrl->lock, flags);
+ guard(gpio_generic_lock_irqsave)(&ctrl->chip);
if (flow_type == IRQ_TYPE_EDGE_BOTH) {
ctrl->both_edges |= mask;
@@ -167,8 +162,6 @@ static int ath79_gpio_irq_set_type(struct irq_data *data,
ath79_gpio_update_bits(
ctrl, AR71XX_GPIO_REG_INT_ENABLE, mask, mask);
- raw_spin_unlock_irqrestore(&ctrl->lock, flags);
-
return 0;
}
@@ -187,28 +180,27 @@ static void ath79_gpio_irq_handler(struct irq_desc *desc)
{
struct gpio_chip *gc = irq_desc_get_handler_data(desc);
struct irq_chip *irqchip = irq_desc_get_chip(desc);
+ struct gpio_generic_chip *gen_gc = to_gpio_generic_chip(gc);
struct ath79_gpio_ctrl *ctrl =
- container_of(gc, struct ath79_gpio_ctrl, gc);
- unsigned long flags, pending;
+ container_of(gen_gc, struct ath79_gpio_ctrl, chip);
+ unsigned long pending;
u32 both_edges, state;
int irq;
chained_irq_enter(irqchip, desc);
- raw_spin_lock_irqsave(&ctrl->lock, flags);
-
- pending = ath79_gpio_read(ctrl, AR71XX_GPIO_REG_INT_PENDING);
+ scoped_guard(gpio_generic_lock_irqsave, &ctrl->chip) {
+ pending = ath79_gpio_read(ctrl, AR71XX_GPIO_REG_INT_PENDING);
- /* Update the polarity of the both edges irqs */
- both_edges = ctrl->both_edges & pending;
- if (both_edges) {
- state = ath79_gpio_read(ctrl, AR71XX_GPIO_REG_IN);
- ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_POLARITY,
- both_edges, ~state);
+ /* Update the polarity of the both edges irqs */
+ both_edges = ctrl->both_edges & pending;
+ if (both_edges) {
+ state = ath79_gpio_read(ctrl, AR71XX_GPIO_REG_IN);
+ ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_POLARITY,
+ both_edges, ~state);
+ }
}
- raw_spin_unlock_irqrestore(&ctrl->lock, flags);
-
for_each_set_bit(irq, &pending, gc->ngpio)
generic_handle_domain_irq(gc->irq.domain, irq);
@@ -224,6 +216,7 @@ MODULE_DEVICE_TABLE(of, ath79_gpio_of_match);
static int ath79_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct device *dev = &pdev->dev;
struct ath79_gpio_ctrl *ctrl;
struct gpio_irq_chip *girq;
@@ -252,22 +245,25 @@ static int ath79_gpio_probe(struct platform_device *pdev)
if (IS_ERR(ctrl->base))
return PTR_ERR(ctrl->base);
- raw_spin_lock_init(&ctrl->lock);
- err = bgpio_init(&ctrl->gc, dev, 4,
- ctrl->base + AR71XX_GPIO_REG_IN,
- ctrl->base + AR71XX_GPIO_REG_SET,
- ctrl->base + AR71XX_GPIO_REG_CLEAR,
- oe_inverted ? NULL : ctrl->base + AR71XX_GPIO_REG_OE,
- oe_inverted ? ctrl->base + AR71XX_GPIO_REG_OE : NULL,
- 0);
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = ctrl->base + AR71XX_GPIO_REG_IN,
+ .set = ctrl->base + AR71XX_GPIO_REG_SET,
+ .clr = ctrl->base + AR71XX_GPIO_REG_CLEAR,
+ .dirout = oe_inverted ? NULL : ctrl->base + AR71XX_GPIO_REG_OE,
+ .dirin = oe_inverted ? ctrl->base + AR71XX_GPIO_REG_OE : NULL,
+ };
+
+ err = gpio_generic_chip_init(&ctrl->chip, &config);
if (err) {
- dev_err(dev, "bgpio_init failed\n");
+ dev_err(dev, "failed to initialize generic GPIO chip\n");
return err;
}
/* Optional interrupt setup */
if (device_property_read_bool(dev, "interrupt-controller")) {
- girq = &ctrl->gc.irq;
+ girq = &ctrl->chip.gc.irq;
gpio_irq_chip_set_chip(girq, &ath79_gpio_irqchip);
girq->parent_handler = ath79_gpio_irq_handler;
girq->num_parents = 1;
@@ -280,7 +276,7 @@ static int ath79_gpio_probe(struct platform_device *pdev)
girq->handler = handle_simple_irq;
}
- return devm_gpiochip_add_data(dev, &ctrl->gc, ctrl);
+ return devm_gpiochip_add_data(dev, &ctrl->chip.gc, ctrl);
}
static struct platform_driver ath79_gpio_driver = {
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index 8f22cb36004d..208b71c59d58 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -339,7 +339,7 @@ static const struct gpio_chip template_chip = {
.direction_input = bcm_kona_gpio_direction_input,
.get = bcm_kona_gpio_get,
.direction_output = bcm_kona_gpio_direction_output,
- .set_rv = bcm_kona_gpio_set,
+ .set = bcm_kona_gpio_set,
.set_config = bcm_kona_gpio_set_config,
.to_irq = bcm_kona_gpio_to_irq,
.base = 0,
diff --git a/drivers/gpio/gpio-bd71815.c b/drivers/gpio/gpio-bd71815.c
index 36701500925e..afb18a5a9d79 100644
--- a/drivers/gpio/gpio-bd71815.c
+++ b/drivers/gpio/gpio-bd71815.c
@@ -85,7 +85,7 @@ static const struct gpio_chip bd71815gpo_chip = {
.owner = THIS_MODULE,
.get = bd71815gpo_get,
.get_direction = bd71815gpo_direction_get,
- .set_rv = bd71815gpo_set,
+ .set = bd71815gpo_set,
.set_config = bd71815_gpio_set_config,
.can_sleep = true,
};
diff --git a/drivers/gpio/gpio-bd71828.c b/drivers/gpio/gpio-bd71828.c
index 4ba151e5cf25..e439dbfffc62 100644
--- a/drivers/gpio/gpio-bd71828.c
+++ b/drivers/gpio/gpio-bd71828.c
@@ -109,7 +109,7 @@ static int bd71828_probe(struct platform_device *pdev)
bdgpio->gpio.set_config = bd71828_gpio_set_config;
bdgpio->gpio.can_sleep = true;
bdgpio->gpio.get = bd71828_gpio_get;
- bdgpio->gpio.set_rv = bd71828_gpio_set;
+ bdgpio->gpio.set = bd71828_gpio_set;
bdgpio->gpio.base = -1;
/*
diff --git a/drivers/gpio/gpio-bd9571mwv.c b/drivers/gpio/gpio-bd9571mwv.c
index 8df1361e3e84..7c95bb36511e 100644
--- a/drivers/gpio/gpio-bd9571mwv.c
+++ b/drivers/gpio/gpio-bd9571mwv.c
@@ -88,7 +88,7 @@ static const struct gpio_chip template_chip = {
.direction_input = bd9571mwv_gpio_direction_input,
.direction_output = bd9571mwv_gpio_direction_output,
.get = bd9571mwv_gpio_get,
- .set_rv = bd9571mwv_gpio_set,
+ .set = bd9571mwv_gpio_set,
.base = -1,
.ngpio = 2,
.can_sleep = true,
diff --git a/drivers/gpio/gpio-blzp1600.c b/drivers/gpio/gpio-blzp1600.c
index 055cb296ae54..0f8c826ba876 100644
--- a/drivers/gpio/gpio-blzp1600.c
+++ b/drivers/gpio/gpio-blzp1600.c
@@ -6,6 +6,7 @@
#include <linux/errno.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -36,7 +37,7 @@
struct blzp1600_gpio {
void __iomem *base;
- struct gpio_chip gc;
+ struct gpio_generic_chip gen_gc;
int irq;
};
@@ -76,7 +77,7 @@ static void blzp1600_gpio_irq_mask(struct irq_data *d)
{
struct blzp1600_gpio *chip = get_blzp1600_gpio_from_irq_data(d);
- guard(raw_spinlock_irqsave)(&chip->gc.bgpio_lock);
+ guard(gpio_generic_lock_irqsave)(&chip->gen_gc);
blzp1600_gpio_rmw(chip->base + GPIO_IM_REG, BIT(d->hwirq), 1);
}
@@ -84,7 +85,7 @@ static void blzp1600_gpio_irq_unmask(struct irq_data *d)
{
struct blzp1600_gpio *chip = get_blzp1600_gpio_from_irq_data(d);
- guard(raw_spinlock_irqsave)(&chip->gc.bgpio_lock);
+ guard(gpio_generic_lock_irqsave)(&chip->gen_gc);
blzp1600_gpio_rmw(chip->base + GPIO_IM_REG, BIT(d->hwirq), 0);
}
@@ -99,9 +100,9 @@ static void blzp1600_gpio_irq_enable(struct irq_data *d)
{
struct blzp1600_gpio *chip = get_blzp1600_gpio_from_irq_data(d);
- gpiochip_enable_irq(&chip->gc, irqd_to_hwirq(d));
+ gpiochip_enable_irq(&chip->gen_gc.gc, irqd_to_hwirq(d));
- guard(raw_spinlock_irqsave)(&chip->gc.bgpio_lock);
+ guard(gpio_generic_lock_irqsave)(&chip->gen_gc);
blzp1600_gpio_rmw(chip->base + GPIO_DIR_REG, BIT(d->hwirq), 0);
blzp1600_gpio_rmw(chip->base + GPIO_IEN_REG, BIT(d->hwirq), 1);
}
@@ -110,9 +111,9 @@ static void blzp1600_gpio_irq_disable(struct irq_data *d)
{
struct blzp1600_gpio *chip = get_blzp1600_gpio_from_irq_data(d);
- guard(raw_spinlock_irqsave)(&chip->gc.bgpio_lock);
+ guard(gpio_generic_lock_irqsave)(&chip->gen_gc);
blzp1600_gpio_rmw(chip->base + GPIO_IEN_REG, BIT(d->hwirq), 0);
- gpiochip_disable_irq(&chip->gc, irqd_to_hwirq(d));
+ gpiochip_disable_irq(&chip->gen_gc.gc, irqd_to_hwirq(d));
}
static int blzp1600_gpio_irq_set_type(struct irq_data *d, u32 type)
@@ -121,7 +122,7 @@ static int blzp1600_gpio_irq_set_type(struct irq_data *d, u32 type)
u32 edge_level, single_both, fall_rise;
int mask = BIT(d->hwirq);
- guard(raw_spinlock_irqsave)(&chip->gc.bgpio_lock);
+ guard(gpio_generic_lock_irqsave)(&chip->gen_gc);
edge_level = blzp1600_gpio_read(chip, GPIO_IS_REG);
single_both = blzp1600_gpio_read(chip, GPIO_IBE_REG);
fall_rise = blzp1600_gpio_read(chip, GPIO_IEV_REG);
@@ -186,8 +187,8 @@ static void blzp1600_gpio_irqhandler(struct irq_desc *desc)
chained_irq_enter(irqchip, desc);
irq_status = blzp1600_gpio_read(gpio, GPIO_RIS_REG);
- for_each_set_bit(hwirq, &irq_status, gpio->gc.ngpio)
- generic_handle_domain_irq(gpio->gc.irq.domain, hwirq);
+ for_each_set_bit(hwirq, &irq_status, gpio->gen_gc.gc.ngpio)
+ generic_handle_domain_irq(gpio->gen_gc.gc.irq.domain, hwirq);
chained_irq_exit(irqchip, desc);
}
@@ -197,7 +198,7 @@ static int blzp1600_gpio_set_debounce(struct gpio_chip *gc, unsigned int offset,
{
struct blzp1600_gpio *chip = gpiochip_get_data(gc);
- guard(raw_spinlock_irqsave)(&chip->gc.bgpio_lock);
+ guard(gpio_generic_lock_irqsave)(&chip->gen_gc);
blzp1600_gpio_rmw(chip->base + GPIO_DB_REG, BIT(offset), debounce);
return 0;
@@ -216,6 +217,7 @@ static int blzp1600_gpio_set_config(struct gpio_chip *gc, unsigned int offset, u
static int blzp1600_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct blzp1600_gpio *chip;
struct gpio_chip *gc;
int ret;
@@ -228,14 +230,21 @@ static int blzp1600_gpio_probe(struct platform_device *pdev)
if (IS_ERR(chip->base))
return PTR_ERR(chip->base);
- ret = bgpio_init(&chip->gc, &pdev->dev, 4, chip->base + GPIO_IDATA_REG,
- chip->base + GPIO_SET_REG, chip->base + GPIO_CLR_REG,
- chip->base + GPIO_DIR_REG, NULL, 0);
+ config = (struct gpio_generic_chip_config) {
+ .dev = &pdev->dev,
+ .sz = 4,
+ .dat = chip->base + GPIO_IDATA_REG,
+ .set = chip->base + GPIO_SET_REG,
+ .clr = chip->base + GPIO_CLR_REG,
+ .dirout = chip->base + GPIO_DIR_REG,
+ };
+
+ ret = gpio_generic_chip_init(&chip->gen_gc, &config);
if (ret)
return dev_err_probe(&pdev->dev, ret, "Failed to register generic gpio\n");
/* configure the gpio chip */
- gc = &chip->gc;
+ gc = &chip->gen_gc.gc;
gc->set_config = blzp1600_gpio_set_config;
if (device_property_present(&pdev->dev, "interrupt-controller")) {
diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c
index e29a9589b3cc..f40c9472588b 100644
--- a/drivers/gpio/gpio-brcmstb.c
+++ b/drivers/gpio/gpio-brcmstb.c
@@ -3,6 +3,7 @@
#include <linux/bitops.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/of.h>
#include <linux/module.h>
#include <linux/irqdomain.h>
@@ -37,7 +38,7 @@ enum gio_reg_index {
struct brcmstb_gpio_bank {
struct list_head node;
int id;
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
struct brcmstb_gpio_priv *parent_priv;
u32 width;
u32 wake_active;
@@ -72,19 +73,18 @@ __brcmstb_gpio_get_active_irqs(struct brcmstb_gpio_bank *bank)
{
void __iomem *reg_base = bank->parent_priv->reg_base;
- return bank->gc.read_reg(reg_base + GIO_STAT(bank->id)) &
- bank->gc.read_reg(reg_base + GIO_MASK(bank->id));
+ return gpio_generic_read_reg(&bank->chip, reg_base + GIO_STAT(bank->id)) &
+ gpio_generic_read_reg(&bank->chip, reg_base + GIO_MASK(bank->id));
}
static unsigned long
brcmstb_gpio_get_active_irqs(struct brcmstb_gpio_bank *bank)
{
unsigned long status;
- unsigned long flags;
- raw_spin_lock_irqsave(&bank->gc.bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(&bank->chip);
+
status = __brcmstb_gpio_get_active_irqs(bank);
- raw_spin_unlock_irqrestore(&bank->gc.bgpio_lock, flags);
return status;
}
@@ -92,26 +92,26 @@ brcmstb_gpio_get_active_irqs(struct brcmstb_gpio_bank *bank)
static int brcmstb_gpio_hwirq_to_offset(irq_hw_number_t hwirq,
struct brcmstb_gpio_bank *bank)
{
- return hwirq - bank->gc.offset;
+ return hwirq - bank->chip.gc.offset;
}
static void brcmstb_gpio_set_imask(struct brcmstb_gpio_bank *bank,
unsigned int hwirq, bool enable)
{
- struct gpio_chip *gc = &bank->gc;
struct brcmstb_gpio_priv *priv = bank->parent_priv;
u32 mask = BIT(brcmstb_gpio_hwirq_to_offset(hwirq, bank));
u32 imask;
- unsigned long flags;
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
- imask = gc->read_reg(priv->reg_base + GIO_MASK(bank->id));
+ guard(gpio_generic_lock_irqsave)(&bank->chip);
+
+ imask = gpio_generic_read_reg(&bank->chip,
+ priv->reg_base + GIO_MASK(bank->id));
if (enable)
imask |= mask;
else
imask &= ~mask;
- gc->write_reg(priv->reg_base + GIO_MASK(bank->id), imask);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+ gpio_generic_write_reg(&bank->chip,
+ priv->reg_base + GIO_MASK(bank->id), imask);
}
static int brcmstb_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
@@ -150,7 +150,8 @@ static void brcmstb_gpio_irq_ack(struct irq_data *d)
struct brcmstb_gpio_priv *priv = bank->parent_priv;
u32 mask = BIT(brcmstb_gpio_hwirq_to_offset(d->hwirq, bank));
- gc->write_reg(priv->reg_base + GIO_STAT(bank->id), mask);
+ gpio_generic_write_reg(&bank->chip,
+ priv->reg_base + GIO_STAT(bank->id), mask);
}
static int brcmstb_gpio_irq_set_type(struct irq_data *d, unsigned int type)
@@ -162,7 +163,6 @@ static int brcmstb_gpio_irq_set_type(struct irq_data *d, unsigned int type)
u32 edge_insensitive, iedge_insensitive;
u32 edge_config, iedge_config;
u32 level, ilevel;
- unsigned long flags;
switch (type) {
case IRQ_TYPE_LEVEL_LOW:
@@ -194,23 +194,25 @@ static int brcmstb_gpio_irq_set_type(struct irq_data *d, unsigned int type)
return -EINVAL;
}
- raw_spin_lock_irqsave(&bank->gc.bgpio_lock, flags);
-
- iedge_config = bank->gc.read_reg(priv->reg_base +
- GIO_EC(bank->id)) & ~mask;
- iedge_insensitive = bank->gc.read_reg(priv->reg_base +
- GIO_EI(bank->id)) & ~mask;
- ilevel = bank->gc.read_reg(priv->reg_base +
- GIO_LEVEL(bank->id)) & ~mask;
-
- bank->gc.write_reg(priv->reg_base + GIO_EC(bank->id),
- iedge_config | edge_config);
- bank->gc.write_reg(priv->reg_base + GIO_EI(bank->id),
- iedge_insensitive | edge_insensitive);
- bank->gc.write_reg(priv->reg_base + GIO_LEVEL(bank->id),
- ilevel | level);
+ guard(gpio_generic_lock_irqsave)(&bank->chip);
+
+ iedge_config = gpio_generic_read_reg(&bank->chip,
+ priv->reg_base + GIO_EC(bank->id)) & ~mask;
+ iedge_insensitive = gpio_generic_read_reg(&bank->chip,
+ priv->reg_base + GIO_EI(bank->id)) & ~mask;
+ ilevel = gpio_generic_read_reg(&bank->chip,
+ priv->reg_base + GIO_LEVEL(bank->id)) & ~mask;
+
+ gpio_generic_write_reg(&bank->chip,
+ priv->reg_base + GIO_EC(bank->id),
+ iedge_config | edge_config);
+ gpio_generic_write_reg(&bank->chip,
+ priv->reg_base + GIO_EI(bank->id),
+ iedge_insensitive | edge_insensitive);
+ gpio_generic_write_reg(&bank->chip,
+ priv->reg_base + GIO_LEVEL(bank->id),
+ ilevel | level);
- raw_spin_unlock_irqrestore(&bank->gc.bgpio_lock, flags);
return 0;
}
@@ -263,7 +265,7 @@ static void brcmstb_gpio_irq_bank_handler(struct brcmstb_gpio_bank *bank)
{
struct brcmstb_gpio_priv *priv = bank->parent_priv;
struct irq_domain *domain = priv->irq_domain;
- int hwbase = bank->gc.offset;
+ int hwbase = bank->chip.gc.offset;
unsigned long status;
while ((status = brcmstb_gpio_get_active_irqs(bank))) {
@@ -303,7 +305,7 @@ static struct brcmstb_gpio_bank *brcmstb_gpio_hwirq_to_bank(
/* banks are in descending order */
list_for_each_entry_reverse(bank, &priv->bank_list, node) {
- i += bank->gc.ngpio;
+ i += bank->chip.gc.ngpio;
if (hwirq < i)
return bank;
}
@@ -332,7 +334,7 @@ static int brcmstb_gpio_irq_map(struct irq_domain *d, unsigned int irq,
dev_dbg(&pdev->dev, "Mapping irq %d for gpio line %d (bank %d)\n",
irq, (int)hwirq, bank->id);
- ret = irq_set_chip_data(irq, &bank->gc);
+ ret = irq_set_chip_data(irq, &bank->chip.gc);
if (ret < 0)
return ret;
irq_set_lockdep_class(irq, &brcmstb_gpio_irq_lock_class,
@@ -394,7 +396,7 @@ static void brcmstb_gpio_remove(struct platform_device *pdev)
* more important to actually perform all of the steps.
*/
list_for_each_entry(bank, &priv->bank_list, node)
- gpiochip_remove(&bank->gc);
+ gpiochip_remove(&bank->chip.gc);
}
static int brcmstb_gpio_of_xlate(struct gpio_chip *gc,
@@ -412,7 +414,7 @@ static int brcmstb_gpio_of_xlate(struct gpio_chip *gc,
if (WARN_ON(gpiospec->args_count < gc->of_gpio_n_cells))
return -EINVAL;
- offset = gpiospec->args[0] - bank->gc.offset;
+ offset = gpiospec->args[0] - bank->chip.gc.offset;
if (offset >= gc->ngpio || offset < 0)
return -EINVAL;
@@ -493,19 +495,17 @@ out_free_domain:
static void brcmstb_gpio_bank_save(struct brcmstb_gpio_priv *priv,
struct brcmstb_gpio_bank *bank)
{
- struct gpio_chip *gc = &bank->gc;
unsigned int i;
for (i = 0; i < GIO_REG_STAT; i++)
- bank->saved_regs[i] = gc->read_reg(priv->reg_base +
- GIO_BANK_OFF(bank->id, i));
+ bank->saved_regs[i] = gpio_generic_read_reg(&bank->chip,
+ priv->reg_base + GIO_BANK_OFF(bank->id, i));
}
static void brcmstb_gpio_quiesce(struct device *dev, bool save)
{
struct brcmstb_gpio_priv *priv = dev_get_drvdata(dev);
struct brcmstb_gpio_bank *bank;
- struct gpio_chip *gc;
u32 imask;
/* disable non-wake interrupt */
@@ -513,8 +513,6 @@ static void brcmstb_gpio_quiesce(struct device *dev, bool save)
disable_irq(priv->parent_irq);
list_for_each_entry(bank, &priv->bank_list, node) {
- gc = &bank->gc;
-
if (save)
brcmstb_gpio_bank_save(priv, bank);
@@ -523,8 +521,9 @@ static void brcmstb_gpio_quiesce(struct device *dev, bool save)
imask = bank->wake_active;
else
imask = 0;
- gc->write_reg(priv->reg_base + GIO_MASK(bank->id),
- imask);
+ gpio_generic_write_reg(&bank->chip,
+ priv->reg_base + GIO_MASK(bank->id),
+ imask);
}
}
@@ -538,12 +537,12 @@ static void brcmstb_gpio_shutdown(struct platform_device *pdev)
static void brcmstb_gpio_bank_restore(struct brcmstb_gpio_priv *priv,
struct brcmstb_gpio_bank *bank)
{
- struct gpio_chip *gc = &bank->gc;
unsigned int i;
for (i = 0; i < GIO_REG_STAT; i++)
- gc->write_reg(priv->reg_base + GIO_BANK_OFF(bank->id, i),
- bank->saved_regs[i]);
+ gpio_generic_write_reg(&bank->chip,
+ priv->reg_base + GIO_BANK_OFF(bank->id, i),
+ bank->saved_regs[i]);
}
static int brcmstb_gpio_suspend(struct device *dev)
@@ -585,6 +584,7 @@ static const struct dev_pm_ops brcmstb_gpio_pm_ops = {
static int brcmstb_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
void __iomem *reg_base;
@@ -630,7 +630,7 @@ static int brcmstb_gpio_probe(struct platform_device *pdev)
* else leave I/O in little endian mode.
*/
#if defined(CONFIG_MIPS) && defined(__BIG_ENDIAN)
- flags = BGPIOF_BIG_ENDIAN_BYTE_ORDER;
+ flags = GPIO_GENERIC_BIG_ENDIAN_BYTE_ORDER;
#endif
of_property_for_each_u32(np, "brcm,gpio-bank-widths", bank_width) {
@@ -665,17 +665,24 @@ static int brcmstb_gpio_probe(struct platform_device *pdev)
bank->width = bank_width;
}
+ gc = &bank->chip.gc;
+
/*
* Regs are 4 bytes wide, have data reg, no set/clear regs,
* and direction bits have 0 = output and 1 = input
*/
- gc = &bank->gc;
- err = bgpio_init(gc, dev, 4,
- reg_base + GIO_DATA(bank->id),
- NULL, NULL, NULL,
- reg_base + GIO_IODIR(bank->id), flags);
+
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = reg_base + GIO_DATA(bank->id),
+ .dirin = reg_base + GIO_IODIR(bank->id),
+ .flags = flags,
+ };
+
+ err = gpio_generic_chip_init(&bank->chip, &config);
if (err) {
- dev_err(dev, "bgpio_init() failed\n");
+ dev_err(dev, "failed to initialize generic GPIO chip\n");
goto fail;
}
@@ -700,7 +707,8 @@ static int brcmstb_gpio_probe(struct platform_device *pdev)
* be retained from S5 cold boot
*/
need_wakeup_event |= !!__brcmstb_gpio_get_active_irqs(bank);
- gc->write_reg(reg_base + GIO_MASK(bank->id), 0);
+ gpio_generic_write_reg(&bank->chip,
+ reg_base + GIO_MASK(bank->id), 0);
err = gpiochip_add_data(gc, bank);
if (err) {
diff --git a/drivers/gpio/gpio-bt8xx.c b/drivers/gpio/gpio-bt8xx.c
index 7c9e81fea37a..05401da03ca3 100644
--- a/drivers/gpio/gpio-bt8xx.c
+++ b/drivers/gpio/gpio-bt8xx.c
@@ -145,7 +145,7 @@ static void bt8xxgpio_gpio_setup(struct bt8xxgpio *bg)
c->direction_input = bt8xxgpio_gpio_direction_input;
c->get = bt8xxgpio_gpio_get;
c->direction_output = bt8xxgpio_gpio_direction_output;
- c->set_rv = bt8xxgpio_gpio_set;
+ c->set = bt8xxgpio_gpio_set;
c->dbg_show = NULL;
c->base = modparam_gpiobase;
c->ngpio = BT8XXGPIO_NR_GPIOS;
diff --git a/drivers/gpio/gpio-cadence.c b/drivers/gpio/gpio-cadence.c
index c647953521c7..b75734ca22dd 100644
--- a/drivers/gpio/gpio-cadence.c
+++ b/drivers/gpio/gpio-cadence.c
@@ -181,7 +181,7 @@ static int cdns_gpio_probe(struct platform_device *pdev)
config.dat = cgpio->regs + CDNS_GPIO_INPUT_VALUE;
config.set = cgpio->regs + CDNS_GPIO_OUTPUT_VALUE;
config.dirin = cgpio->regs + CDNS_GPIO_DIRECTION_MODE;
- config.flags = BGPIOF_READ_OUTPUT_REG_SET;
+ config.flags = GPIO_GENERIC_READ_OUTPUT_REG_SET;
ret = gpio_generic_chip_init(&cgpio->gen_gc, &config);
if (ret) {
diff --git a/drivers/gpio/gpio-cgbc.c b/drivers/gpio/gpio-cgbc.c
index 1495bec62456..0efa1b61001a 100644
--- a/drivers/gpio/gpio-cgbc.c
+++ b/drivers/gpio/gpio-cgbc.c
@@ -171,7 +171,7 @@ static int cgbc_gpio_probe(struct platform_device *pdev)
chip->direction_output = cgbc_gpio_direction_output;
chip->get_direction = cgbc_gpio_get_direction;
chip->get = cgbc_gpio_get;
- chip->set_rv = cgbc_gpio_set;
+ chip->set = cgbc_gpio_set;
chip->ngpio = CGBC_GPIO_NGPIO;
ret = devm_mutex_init(dev, &gpio->lock);
diff --git a/drivers/gpio/gpio-creg-snps.c b/drivers/gpio/gpio-creg-snps.c
index 8b49f02c7896..f8ea961fa1de 100644
--- a/drivers/gpio/gpio-creg-snps.c
+++ b/drivers/gpio/gpio-creg-snps.c
@@ -167,7 +167,7 @@ static int creg_gpio_probe(struct platform_device *pdev)
hcg->gc.label = dev_name(dev);
hcg->gc.base = -1;
hcg->gc.ngpio = ngpios;
- hcg->gc.set_rv = creg_gpio_set;
+ hcg->gc.set = creg_gpio_set;
hcg->gc.direction_output = creg_gpio_dir_out;
ret = devm_gpiochip_add_data(dev, &hcg->gc, hcg);
diff --git a/drivers/gpio/gpio-cros-ec.c b/drivers/gpio/gpio-cros-ec.c
index 53cd5ff6247b..435483826c6e 100644
--- a/drivers/gpio/gpio-cros-ec.c
+++ b/drivers/gpio/gpio-cros-ec.c
@@ -188,7 +188,7 @@ static int cros_ec_gpio_probe(struct platform_device *pdev)
gc->can_sleep = true;
gc->label = dev_name(dev);
gc->base = -1;
- gc->set_rv = cros_ec_gpio_set;
+ gc->set = cros_ec_gpio_set;
gc->get = cros_ec_gpio_get;
gc->get_direction = cros_ec_gpio_get_direction;
diff --git a/drivers/gpio/gpio-crystalcove.c b/drivers/gpio/gpio-crystalcove.c
index 8db7cca3a060..0fb5c06d0886 100644
--- a/drivers/gpio/gpio-crystalcove.c
+++ b/drivers/gpio/gpio-crystalcove.c
@@ -349,7 +349,7 @@ static int crystalcove_gpio_probe(struct platform_device *pdev)
cg->chip.direction_input = crystalcove_gpio_dir_in;
cg->chip.direction_output = crystalcove_gpio_dir_out;
cg->chip.get = crystalcove_gpio_get;
- cg->chip.set_rv = crystalcove_gpio_set;
+ cg->chip.set = crystalcove_gpio_set;
cg->chip.base = -1;
cg->chip.ngpio = CRYSTALCOVE_VGPIO_NUM;
cg->chip.can_sleep = true;
diff --git a/drivers/gpio/gpio-cs5535.c b/drivers/gpio/gpio-cs5535.c
index 143d1f4173a6..8affe4e9f90e 100644
--- a/drivers/gpio/gpio-cs5535.c
+++ b/drivers/gpio/gpio-cs5535.c
@@ -296,7 +296,7 @@ static struct cs5535_gpio_chip cs5535_gpio_chip = {
.request = chip_gpio_request,
.get = chip_gpio_get,
- .set_rv = chip_gpio_set,
+ .set = chip_gpio_set,
.direction_input = chip_direction_input,
.direction_output = chip_direction_output,
diff --git a/drivers/gpio/gpio-da9052.c b/drivers/gpio/gpio-da9052.c
index 6482c5b267db..495f0ee58505 100644
--- a/drivers/gpio/gpio-da9052.c
+++ b/drivers/gpio/gpio-da9052.c
@@ -172,7 +172,7 @@ static const struct gpio_chip reference_gp = {
.label = "da9052-gpio",
.owner = THIS_MODULE,
.get = da9052_gpio_get,
- .set_rv = da9052_gpio_set,
+ .set = da9052_gpio_set,
.direction_input = da9052_gpio_direction_input,
.direction_output = da9052_gpio_direction_output,
.to_irq = da9052_gpio_to_irq,
diff --git a/drivers/gpio/gpio-da9055.c b/drivers/gpio/gpio-da9055.c
index 3d9d0c700100..a09bd6eb93cf 100644
--- a/drivers/gpio/gpio-da9055.c
+++ b/drivers/gpio/gpio-da9055.c
@@ -116,7 +116,7 @@ static const struct gpio_chip reference_gp = {
.label = "da9055-gpio",
.owner = THIS_MODULE,
.get = da9055_gpio_get,
- .set_rv = da9055_gpio_set,
+ .set = da9055_gpio_set,
.direction_input = da9055_gpio_direction_input,
.direction_output = da9055_gpio_direction_output,
.to_irq = da9055_gpio_to_irq,
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index 8f3a36d0191d..538f27209ce7 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -202,7 +202,7 @@ static int davinci_gpio_probe(struct platform_device *pdev)
chips->chip.direction_input = davinci_direction_in;
chips->chip.get = davinci_gpio_get;
chips->chip.direction_output = davinci_direction_out;
- chips->chip.set_rv = davinci_gpio_set;
+ chips->chip.set = davinci_gpio_set;
chips->chip.ngpio = ngpio;
chips->chip.base = -1;
diff --git a/drivers/gpio/gpio-dln2.c b/drivers/gpio/gpio-dln2.c
index 4bd3c47eaf93..4670ffd7ea7f 100644
--- a/drivers/gpio/gpio-dln2.c
+++ b/drivers/gpio/gpio-dln2.c
@@ -469,7 +469,7 @@ static int dln2_gpio_probe(struct platform_device *pdev)
dln2->gpio.base = -1;
dln2->gpio.ngpio = pins;
dln2->gpio.can_sleep = true;
- dln2->gpio.set_rv = dln2_gpio_set;
+ dln2->gpio.set = dln2_gpio_set;
dln2->gpio.get = dln2_gpio_get;
dln2->gpio.request = dln2_gpio_request;
dln2->gpio.free = dln2_gpio_free;
diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c
index 43b667b41f5d..b42ff46d292b 100644
--- a/drivers/gpio/gpio-dwapb.c
+++ b/drivers/gpio/gpio-dwapb.c
@@ -8,6 +8,7 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -99,7 +100,7 @@ struct dwapb_gpio_port_irqchip {
};
struct dwapb_gpio_port {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
struct dwapb_gpio_port_irqchip *pirq;
struct dwapb_gpio *gpio;
#ifdef CONFIG_PM_SLEEP
@@ -107,8 +108,12 @@ struct dwapb_gpio_port {
#endif
unsigned int idx;
};
-#define to_dwapb_gpio(_gc) \
- (container_of(_gc, struct dwapb_gpio_port, gc)->gpio)
+
+static inline struct dwapb_gpio *to_dwapb_gpio(struct gpio_chip *gc)
+{
+ return container_of(to_gpio_generic_chip(gc),
+ struct dwapb_gpio_port, chip)->gpio;
+}
struct dwapb_gpio {
struct device *dev;
@@ -148,19 +153,19 @@ static inline u32 gpio_reg_convert(struct dwapb_gpio *gpio, unsigned int offset)
static inline u32 dwapb_read(struct dwapb_gpio *gpio, unsigned int offset)
{
- struct gpio_chip *gc = &gpio->ports[0].gc;
- void __iomem *reg_base = gpio->regs;
+ struct gpio_generic_chip *chip = &gpio->ports[0].chip;
+ void __iomem *reg_base = gpio->regs;
- return gc->read_reg(reg_base + gpio_reg_convert(gpio, offset));
+ return gpio_generic_read_reg(chip, reg_base + gpio_reg_convert(gpio, offset));
}
static inline void dwapb_write(struct dwapb_gpio *gpio, unsigned int offset,
u32 val)
{
- struct gpio_chip *gc = &gpio->ports[0].gc;
- void __iomem *reg_base = gpio->regs;
+ struct gpio_generic_chip *chip = &gpio->ports[0].chip;
+ void __iomem *reg_base = gpio->regs;
- gc->write_reg(reg_base + gpio_reg_convert(gpio, offset), val);
+ gpio_generic_write_reg(chip, reg_base + gpio_reg_convert(gpio, offset), val);
}
static struct dwapb_gpio_port *dwapb_offs_to_port(struct dwapb_gpio *gpio, unsigned int offs)
@@ -186,7 +191,7 @@ static void dwapb_toggle_trigger(struct dwapb_gpio *gpio, unsigned int offs)
if (!port)
return;
- gc = &port->gc;
+ gc = &port->chip.gc;
pol = dwapb_read(gpio, GPIO_INT_POLARITY);
/* Just read the current value right out of the data register */
@@ -201,13 +206,13 @@ static void dwapb_toggle_trigger(struct dwapb_gpio *gpio, unsigned int offs)
static u32 dwapb_do_irq(struct dwapb_gpio *gpio)
{
- struct gpio_chip *gc = &gpio->ports[0].gc;
+ struct gpio_generic_chip *gen_gc = &gpio->ports[0].chip;
unsigned long irq_status;
irq_hw_number_t hwirq;
irq_status = dwapb_read(gpio, GPIO_INTSTATUS);
for_each_set_bit(hwirq, &irq_status, DWAPB_MAX_GPIOS) {
- int gpio_irq = irq_find_mapping(gc->irq.domain, hwirq);
+ int gpio_irq = irq_find_mapping(gen_gc->gc.irq.domain, hwirq);
u32 irq_type = irq_get_trigger_type(gpio_irq);
generic_handle_irq(gpio_irq);
@@ -237,27 +242,27 @@ static irqreturn_t dwapb_irq_handler_mfd(int irq, void *dev_id)
static void dwapb_irq_ack(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct gpio_generic_chip *gen_gc = to_gpio_generic_chip(gc);
struct dwapb_gpio *gpio = to_dwapb_gpio(gc);
u32 val = BIT(irqd_to_hwirq(d));
- unsigned long flags;
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(gen_gc);
+
dwapb_write(gpio, GPIO_PORTA_EOI, val);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
}
static void dwapb_irq_mask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct gpio_generic_chip *gen_gc = to_gpio_generic_chip(gc);
struct dwapb_gpio *gpio = to_dwapb_gpio(gc);
irq_hw_number_t hwirq = irqd_to_hwirq(d);
- unsigned long flags;
u32 val;
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
- val = dwapb_read(gpio, GPIO_INTMASK) | BIT(hwirq);
- dwapb_write(gpio, GPIO_INTMASK, val);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+ scoped_guard(gpio_generic_lock_irqsave, gen_gc) {
+ val = dwapb_read(gpio, GPIO_INTMASK) | BIT(hwirq);
+ dwapb_write(gpio, GPIO_INTMASK, val);
+ }
gpiochip_disable_irq(gc, hwirq);
}
@@ -265,59 +270,61 @@ static void dwapb_irq_mask(struct irq_data *d)
static void dwapb_irq_unmask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct gpio_generic_chip *gen_gc = to_gpio_generic_chip(gc);
struct dwapb_gpio *gpio = to_dwapb_gpio(gc);
irq_hw_number_t hwirq = irqd_to_hwirq(d);
- unsigned long flags;
u32 val;
gpiochip_enable_irq(gc, hwirq);
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(gen_gc);
+
val = dwapb_read(gpio, GPIO_INTMASK) & ~BIT(hwirq);
dwapb_write(gpio, GPIO_INTMASK, val);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
}
static void dwapb_irq_enable(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct gpio_generic_chip *gen_gc = to_gpio_generic_chip(gc);
struct dwapb_gpio *gpio = to_dwapb_gpio(gc);
irq_hw_number_t hwirq = irqd_to_hwirq(d);
- unsigned long flags;
u32 val;
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(gen_gc);
+
val = dwapb_read(gpio, GPIO_INTEN) | BIT(hwirq);
dwapb_write(gpio, GPIO_INTEN, val);
val = dwapb_read(gpio, GPIO_INTMASK) & ~BIT(hwirq);
dwapb_write(gpio, GPIO_INTMASK, val);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
}
static void dwapb_irq_disable(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct gpio_generic_chip *gen_gc = to_gpio_generic_chip(gc);
struct dwapb_gpio *gpio = to_dwapb_gpio(gc);
irq_hw_number_t hwirq = irqd_to_hwirq(d);
- unsigned long flags;
u32 val;
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(gen_gc);
+
val = dwapb_read(gpio, GPIO_INTMASK) | BIT(hwirq);
dwapb_write(gpio, GPIO_INTMASK, val);
val = dwapb_read(gpio, GPIO_INTEN) & ~BIT(hwirq);
dwapb_write(gpio, GPIO_INTEN, val);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
}
static int dwapb_irq_set_type(struct irq_data *d, u32 type)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct gpio_generic_chip *gen_gc = to_gpio_generic_chip(gc);
struct dwapb_gpio *gpio = to_dwapb_gpio(gc);
irq_hw_number_t bit = irqd_to_hwirq(d);
- unsigned long level, polarity, flags;
+ unsigned long level, polarity;
+
+ guard(gpio_generic_lock_irqsave)(gen_gc);
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
level = dwapb_read(gpio, GPIO_INTTYPE_LEVEL);
polarity = dwapb_read(gpio, GPIO_INT_POLARITY);
@@ -352,7 +359,6 @@ static int dwapb_irq_set_type(struct irq_data *d, u32 type)
dwapb_write(gpio, GPIO_INTTYPE_LEVEL, level);
if (type != IRQ_TYPE_EDGE_BOTH)
dwapb_write(gpio, GPIO_INT_POLARITY, polarity);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
return 0;
}
@@ -393,11 +399,12 @@ static int dwapb_gpio_set_debounce(struct gpio_chip *gc,
unsigned offset, unsigned debounce)
{
struct dwapb_gpio_port *port = gpiochip_get_data(gc);
+ struct gpio_generic_chip *gen_gc = to_gpio_generic_chip(gc);
struct dwapb_gpio *gpio = port->gpio;
- unsigned long flags, val_deb;
+ unsigned long val_deb;
unsigned long mask = BIT(offset);
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(gen_gc);
val_deb = dwapb_read(gpio, GPIO_PORTA_DEBOUNCE);
if (debounce)
@@ -406,8 +413,6 @@ static int dwapb_gpio_set_debounce(struct gpio_chip *gc,
val_deb &= ~mask;
dwapb_write(gpio, GPIO_PORTA_DEBOUNCE, val_deb);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
-
return 0;
}
@@ -445,7 +450,7 @@ static void dwapb_configure_irqs(struct dwapb_gpio *gpio,
struct dwapb_port_property *pp)
{
struct dwapb_gpio_port_irqchip *pirq;
- struct gpio_chip *gc = &port->gc;
+ struct gpio_chip *gc = &port->chip.gc;
struct gpio_irq_chip *girq;
int err;
@@ -501,6 +506,7 @@ static int dwapb_gpio_add_port(struct dwapb_gpio *gpio,
struct dwapb_port_property *pp,
unsigned int offs)
{
+ struct gpio_generic_chip_config config;
struct dwapb_gpio_port *port;
void __iomem *dat, *set, *dirout;
int err;
@@ -519,32 +525,39 @@ static int dwapb_gpio_add_port(struct dwapb_gpio *gpio,
set = gpio->regs + GPIO_SWPORTA_DR + pp->idx * GPIO_SWPORT_DR_STRIDE;
dirout = gpio->regs + GPIO_SWPORTA_DDR + pp->idx * GPIO_SWPORT_DDR_STRIDE;
+ config = (struct gpio_generic_chip_config) {
+ .dev = gpio->dev,
+ .sz = 4,
+ .dat = dat,
+ .set = set,
+ .dirout = dirout,
+ };
+
/* This registers 32 GPIO lines per port */
- err = bgpio_init(&port->gc, gpio->dev, 4, dat, set, NULL, dirout,
- NULL, 0);
+ err = gpio_generic_chip_init(&port->chip, &config);
if (err) {
dev_err(gpio->dev, "failed to init gpio chip for port%d\n",
port->idx);
return err;
}
- port->gc.fwnode = pp->fwnode;
- port->gc.ngpio = pp->ngpio;
- port->gc.base = pp->gpio_base;
- port->gc.request = gpiochip_generic_request;
- port->gc.free = gpiochip_generic_free;
+ port->chip.gc.fwnode = pp->fwnode;
+ port->chip.gc.ngpio = pp->ngpio;
+ port->chip.gc.base = pp->gpio_base;
+ port->chip.gc.request = gpiochip_generic_request;
+ port->chip.gc.free = gpiochip_generic_free;
/* Only port A support debounce */
if (pp->idx == 0)
- port->gc.set_config = dwapb_gpio_set_config;
+ port->chip.gc.set_config = dwapb_gpio_set_config;
else
- port->gc.set_config = gpiochip_generic_config;
+ port->chip.gc.set_config = gpiochip_generic_config;
/* Only port A can provide interrupts in all configurations of the IP */
if (pp->idx == 0)
dwapb_configure_irqs(gpio, port, pp);
- err = devm_gpiochip_add_data(gpio->dev, &port->gc, port);
+ err = devm_gpiochip_add_data(gpio->dev, &port->chip.gc, port);
if (err) {
dev_err(gpio->dev, "failed to register gpiochip for port%d\n",
port->idx);
@@ -750,38 +763,37 @@ static int dwapb_gpio_probe(struct platform_device *pdev)
static int dwapb_gpio_suspend(struct device *dev)
{
struct dwapb_gpio *gpio = dev_get_drvdata(dev);
- struct gpio_chip *gc = &gpio->ports[0].gc;
- unsigned long flags;
+ struct gpio_generic_chip *gen_gc = &gpio->ports[0].chip;
int i;
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
- for (i = 0; i < gpio->nr_ports; i++) {
- unsigned int offset;
- unsigned int idx = gpio->ports[i].idx;
- struct dwapb_context *ctx = gpio->ports[i].ctx;
+ scoped_guard(gpio_generic_lock_irqsave, gen_gc) {
+ for (i = 0; i < gpio->nr_ports; i++) {
+ unsigned int offset;
+ unsigned int idx = gpio->ports[i].idx;
+ struct dwapb_context *ctx = gpio->ports[i].ctx;
- offset = GPIO_SWPORTA_DDR + idx * GPIO_SWPORT_DDR_STRIDE;
- ctx->dir = dwapb_read(gpio, offset);
+ offset = GPIO_SWPORTA_DDR + idx * GPIO_SWPORT_DDR_STRIDE;
+ ctx->dir = dwapb_read(gpio, offset);
- offset = GPIO_SWPORTA_DR + idx * GPIO_SWPORT_DR_STRIDE;
- ctx->data = dwapb_read(gpio, offset);
+ offset = GPIO_SWPORTA_DR + idx * GPIO_SWPORT_DR_STRIDE;
+ ctx->data = dwapb_read(gpio, offset);
- offset = GPIO_EXT_PORTA + idx * GPIO_EXT_PORT_STRIDE;
- ctx->ext = dwapb_read(gpio, offset);
+ offset = GPIO_EXT_PORTA + idx * GPIO_EXT_PORT_STRIDE;
+ ctx->ext = dwapb_read(gpio, offset);
- /* Only port A can provide interrupts */
- if (idx == 0) {
- ctx->int_mask = dwapb_read(gpio, GPIO_INTMASK);
- ctx->int_en = dwapb_read(gpio, GPIO_INTEN);
- ctx->int_pol = dwapb_read(gpio, GPIO_INT_POLARITY);
- ctx->int_type = dwapb_read(gpio, GPIO_INTTYPE_LEVEL);
- ctx->int_deb = dwapb_read(gpio, GPIO_PORTA_DEBOUNCE);
-
- /* Mask out interrupts */
- dwapb_write(gpio, GPIO_INTMASK, ~ctx->wake_en);
+ /* Only port A can provide interrupts */
+ if (idx == 0) {
+ ctx->int_mask = dwapb_read(gpio, GPIO_INTMASK);
+ ctx->int_en = dwapb_read(gpio, GPIO_INTEN);
+ ctx->int_pol = dwapb_read(gpio, GPIO_INT_POLARITY);
+ ctx->int_type = dwapb_read(gpio, GPIO_INTTYPE_LEVEL);
+ ctx->int_deb = dwapb_read(gpio, GPIO_PORTA_DEBOUNCE);
+
+ /* Mask out interrupts */
+ dwapb_write(gpio, GPIO_INTMASK, ~ctx->wake_en);
+ }
}
}
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
clk_bulk_disable_unprepare(DWAPB_NR_CLOCKS, gpio->clks);
@@ -791,8 +803,8 @@ static int dwapb_gpio_suspend(struct device *dev)
static int dwapb_gpio_resume(struct device *dev)
{
struct dwapb_gpio *gpio = dev_get_drvdata(dev);
- struct gpio_chip *gc = &gpio->ports[0].gc;
- unsigned long flags;
+ struct gpio_chip *gc = &gpio->ports[0].chip.gc;
+ struct gpio_generic_chip *gen_gc = to_gpio_generic_chip(gc);
int i, err;
err = clk_bulk_prepare_enable(DWAPB_NR_CLOCKS, gpio->clks);
@@ -801,7 +813,8 @@ static int dwapb_gpio_resume(struct device *dev)
return err;
}
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(gen_gc);
+
for (i = 0; i < gpio->nr_ports; i++) {
unsigned int offset;
unsigned int idx = gpio->ports[i].idx;
@@ -828,7 +841,6 @@ static int dwapb_gpio_resume(struct device *dev)
dwapb_write(gpio, GPIO_PORTA_EOI, 0xffffffff);
}
}
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
return 0;
}
diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c
index f2973d0b7138..50fafeda8d7e 100644
--- a/drivers/gpio/gpio-eic-sprd.c
+++ b/drivers/gpio/gpio-eic-sprd.c
@@ -663,7 +663,7 @@ static int sprd_eic_probe(struct platform_device *pdev)
sprd_eic->chip.request = sprd_eic_request;
sprd_eic->chip.free = sprd_eic_free;
sprd_eic->chip.set_config = sprd_eic_set_config;
- sprd_eic->chip.set_rv = sprd_eic_set;
+ sprd_eic->chip.set = sprd_eic_set;
fallthrough;
case SPRD_EIC_ASYNC:
case SPRD_EIC_SYNC:
diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
index 015f1ac32dd9..a214b0672726 100644
--- a/drivers/gpio/gpio-em.c
+++ b/drivers/gpio/gpio-em.c
@@ -306,7 +306,7 @@ static int em_gio_probe(struct platform_device *pdev)
gpio_chip->direction_input = em_gio_direction_input;
gpio_chip->get = em_gio_get;
gpio_chip->direction_output = em_gio_direction_output;
- gpio_chip->set_rv = em_gio_set;
+ gpio_chip->set = em_gio_set;
gpio_chip->to_irq = em_gio_to_irq;
gpio_chip->request = pinctrl_gpio_request;
gpio_chip->free = em_gio_free;
diff --git a/drivers/gpio/gpio-ep93xx.c b/drivers/gpio/gpio-ep93xx.c
index 58d2464c07bc..1f56e44ffc9a 100644
--- a/drivers/gpio/gpio-ep93xx.c
+++ b/drivers/gpio/gpio-ep93xx.c
@@ -9,16 +9,17 @@
* linux/arch/arm/mach-ep93xx/core.c
*/
+#include <linux/bitops.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
-#include <linux/slab.h>
-#include <linux/gpio/driver.h>
-#include <linux/bitops.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
#include <linux/seq_file.h>
+#include <linux/slab.h>
struct ep93xx_gpio_irq_chip {
void __iomem *base;
@@ -31,11 +32,14 @@ struct ep93xx_gpio_irq_chip {
struct ep93xx_gpio_chip {
void __iomem *base;
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
struct ep93xx_gpio_irq_chip *eic;
};
-#define to_ep93xx_gpio_chip(x) container_of(x, struct ep93xx_gpio_chip, gc)
+static struct ep93xx_gpio_chip *to_ep93xx_gpio_chip(struct gpio_chip *gc)
+{
+ return container_of(to_gpio_generic_chip(gc), struct ep93xx_gpio_chip, chip);
+}
static struct ep93xx_gpio_irq_chip *to_ep93xx_gpio_irq_chip(struct gpio_chip *gc)
{
@@ -267,7 +271,7 @@ static const struct irq_chip gpio_eic_irq_chip = {
static int ep93xx_setup_irqs(struct platform_device *pdev,
struct ep93xx_gpio_chip *egc)
{
- struct gpio_chip *gc = &egc->gc;
+ struct gpio_chip *gc = &egc->chip.gc;
struct device *dev = &pdev->dev;
struct gpio_irq_chip *girq = &gc->irq;
int ret, irq, i;
@@ -327,6 +331,7 @@ static int ep93xx_setup_irqs(struct platform_device *pdev,
static int ep93xx_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct ep93xx_gpio_chip *egc;
struct gpio_chip *gc;
void __iomem *data;
@@ -345,8 +350,16 @@ static int ep93xx_gpio_probe(struct platform_device *pdev)
if (IS_ERR(dir))
return PTR_ERR(dir);
- gc = &egc->gc;
- ret = bgpio_init(gc, &pdev->dev, 1, data, NULL, NULL, dir, NULL, 0);
+ gc = &egc->chip.gc;
+
+ config = (struct gpio_generic_chip_config) {
+ .dev = &pdev->dev,
+ .sz = 1,
+ .dat = data,
+ .dirout = dir,
+ };
+
+ ret = gpio_generic_chip_init(&egc->chip, &config);
if (ret)
return dev_err_probe(&pdev->dev, ret, "unable to init generic GPIO\n");
diff --git a/drivers/gpio/gpio-exar.c b/drivers/gpio/gpio-exar.c
index beb98286d13e..9053662f1817 100644
--- a/drivers/gpio/gpio-exar.c
+++ b/drivers/gpio/gpio-exar.c
@@ -211,7 +211,7 @@ static int gpio_exar_probe(struct platform_device *pdev)
exar_gpio->gpio_chip.direction_input = exar_direction_input;
exar_gpio->gpio_chip.get_direction = exar_get_direction;
exar_gpio->gpio_chip.get = exar_get_value;
- exar_gpio->gpio_chip.set_rv = exar_set_value;
+ exar_gpio->gpio_chip.set = exar_set_value;
exar_gpio->gpio_chip.base = -1;
exar_gpio->gpio_chip.ngpio = ngpios;
exar_gpio->index = index;
diff --git a/drivers/gpio/gpio-f7188x.c b/drivers/gpio/gpio-f7188x.c
index dfcd3634f279..4d5b927ad70f 100644
--- a/drivers/gpio/gpio-f7188x.c
+++ b/drivers/gpio/gpio-f7188x.c
@@ -173,7 +173,7 @@ static int f7188x_gpio_set_config(struct gpio_chip *chip, unsigned offset,
.direction_input = f7188x_gpio_direction_in, \
.get = f7188x_gpio_get, \
.direction_output = f7188x_gpio_direction_out, \
- .set_rv = f7188x_gpio_set, \
+ .set = f7188x_gpio_set, \
.set_config = f7188x_gpio_set_config, \
.base = -1, \
.ngpio = _ngpio, \
diff --git a/drivers/gpio/gpio-ftgpio010.c b/drivers/gpio/gpio-ftgpio010.c
index c35eaa2851d8..11e6907c3b54 100644
--- a/drivers/gpio/gpio-ftgpio010.c
+++ b/drivers/gpio/gpio-ftgpio010.c
@@ -10,12 +10,14 @@
* MXC GPIO support. (c) 2008 Daniel Mack <daniel@caiaq.de>
* Copyright 2008 Juergen Beisert, kernel@pengutronix.de
*/
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
#include <linux/gpio/driver.h>
-#include <linux/io.h>
+#include <linux/gpio/generic.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/platform_device.h>
-#include <linux/bitops.h>
-#include <linux/clk.h>
/* GPIO registers definition */
#define GPIO_DATA_OUT 0x00
@@ -40,13 +42,13 @@
/**
* struct ftgpio_gpio - Gemini GPIO state container
* @dev: containing device for this instance
- * @gc: gpiochip for this instance
+ * @chip: generic GPIO chip for this instance
* @base: remapped I/O-memory base
* @clk: silicon clock
*/
struct ftgpio_gpio {
struct device *dev;
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
void __iomem *base;
struct clk *clk;
};
@@ -233,6 +235,7 @@ static const struct irq_chip ftgpio_irq_chip = {
static int ftgpio_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct device *dev = &pdev->dev;
struct ftgpio_gpio *g;
struct gpio_irq_chip *girq;
@@ -261,27 +264,30 @@ static int ftgpio_gpio_probe(struct platform_device *pdev)
*/
return PTR_ERR(g->clk);
- ret = bgpio_init(&g->gc, dev, 4,
- g->base + GPIO_DATA_IN,
- g->base + GPIO_DATA_SET,
- g->base + GPIO_DATA_CLR,
- g->base + GPIO_DIR,
- NULL,
- 0);
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = g->base + GPIO_DATA_IN,
+ .set = g->base + GPIO_DATA_SET,
+ .clr = g->base + GPIO_DATA_CLR,
+ .dirout = g->base + GPIO_DIR,
+ };
+
+ ret = gpio_generic_chip_init(&g->chip, &config);
if (ret)
return dev_err_probe(dev, ret, "unable to init generic GPIO\n");
- g->gc.label = dev_name(dev);
- g->gc.base = -1;
- g->gc.parent = dev;
- g->gc.owner = THIS_MODULE;
- /* ngpio is set by bgpio_init() */
+ g->chip.gc.label = dev_name(dev);
+ g->chip.gc.base = -1;
+ g->chip.gc.parent = dev;
+ g->chip.gc.owner = THIS_MODULE;
+ /* ngpio is set by gpio_generic_chip_init() */
/* We need a silicon clock to do debounce */
if (!IS_ERR(g->clk))
- g->gc.set_config = ftgpio_gpio_set_config;
+ g->chip.gc.set_config = ftgpio_gpio_set_config;
- girq = &g->gc.irq;
+ girq = &g->chip.gc.irq;
gpio_irq_chip_set_chip(girq, &ftgpio_irq_chip);
girq->parent_handler = ftgpio_gpio_irq_handler;
girq->num_parents = 1;
@@ -302,7 +308,7 @@ static int ftgpio_gpio_probe(struct platform_device *pdev)
/* Clear any use of debounce */
writel(0x0, g->base + GPIO_DEBOUNCE_EN);
- return devm_gpiochip_add_data(dev, &g->gc, g);
+ return devm_gpiochip_add_data(dev, &g->chip.gc, g);
}
static const struct of_device_id ftgpio_gpio_of_match[] = {
diff --git a/drivers/gpio/gpio-ge.c b/drivers/gpio/gpio-ge.c
index 5dc49648d8e3..66bdff36eb61 100644
--- a/drivers/gpio/gpio-ge.c
+++ b/drivers/gpio/gpio-ge.c
@@ -16,6 +16,7 @@
*/
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mod_devicetable.h>
@@ -51,24 +52,36 @@ MODULE_DEVICE_TABLE(of, gef_gpio_ids);
static int __init gef_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct device *dev = &pdev->dev;
+ struct gpio_generic_chip *chip;
struct gpio_chip *gc;
void __iomem *regs;
int ret;
- gc = devm_kzalloc(dev, sizeof(*gc), GFP_KERNEL);
- if (!gc)
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
return -ENOMEM;
regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);
- ret = bgpio_init(gc, dev, 4, regs + GEF_GPIO_IN, regs + GEF_GPIO_OUT,
- NULL, NULL, regs + GEF_GPIO_DIRECT,
- BGPIOF_BIG_ENDIAN_BYTE_ORDER);
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = regs + GEF_GPIO_IN,
+ .set = regs + GEF_GPIO_OUT,
+ .dirin = regs + GEF_GPIO_DIRECT,
+ .flags = GPIO_GENERIC_BIG_ENDIAN_BYTE_ORDER,
+ };
+
+ ret = gpio_generic_chip_init(chip, &config);
if (ret)
- return dev_err_probe(dev, ret, "bgpio_init failed\n");
+ return dev_err_probe(dev, ret,
+ "failed to initialize the generic GPIO chip\n");
+
+ gc = &chip->gc;
/* Setup pointers to chip functions */
gc->label = devm_kasprintf(dev, GFP_KERNEL, "%pfw", dev_fwnode(dev));
diff --git a/drivers/gpio/gpio-graniterapids.c b/drivers/gpio/gpio-graniterapids.c
index f25283e5239d..121bf29a27f5 100644
--- a/drivers/gpio/gpio-graniterapids.c
+++ b/drivers/gpio/gpio-graniterapids.c
@@ -159,7 +159,7 @@ static const struct gpio_chip gnr_gpio_chip = {
.owner = THIS_MODULE,
.request = gnr_gpio_request,
.get = gnr_gpio_get,
- .set_rv = gnr_gpio_set,
+ .set = gnr_gpio_set,
.get_direction = gnr_gpio_get_direction,
.direction_input = gnr_gpio_direction_input,
.direction_output = gnr_gpio_direction_output,
diff --git a/drivers/gpio/gpio-grgpio.c b/drivers/gpio/gpio-grgpio.c
index f3f8bab62f94..0c0f97fa14fc 100644
--- a/drivers/gpio/gpio-grgpio.c
+++ b/drivers/gpio/gpio-grgpio.c
@@ -19,6 +19,7 @@
#include <linux/bitops.h>
#include <linux/err.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -59,7 +60,7 @@ struct grgpio_lirq {
};
struct grgpio_priv {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
void __iomem *regs;
struct device *dev;
@@ -91,13 +92,12 @@ struct grgpio_priv {
static void grgpio_set_imask(struct grgpio_priv *priv, unsigned int offset,
int val)
{
- struct gpio_chip *gc = &priv->gc;
-
if (val)
priv->imask |= BIT(offset);
else
priv->imask &= ~BIT(offset);
- gc->write_reg(priv->regs + GRGPIO_IMASK, priv->imask);
+
+ gpio_generic_write_reg(&priv->chip, priv->regs + GRGPIO_IMASK, priv->imask);
}
static int grgpio_to_irq(struct gpio_chip *gc, unsigned offset)
@@ -118,7 +118,6 @@ static int grgpio_to_irq(struct gpio_chip *gc, unsigned offset)
static int grgpio_irq_set_type(struct irq_data *d, unsigned int type)
{
struct grgpio_priv *priv = irq_data_get_irq_chip_data(d);
- unsigned long flags;
u32 mask = BIT(d->hwirq);
u32 ipol;
u32 iedge;
@@ -146,15 +145,13 @@ static int grgpio_irq_set_type(struct irq_data *d, unsigned int type)
return -EINVAL;
}
- raw_spin_lock_irqsave(&priv->gc.bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(&priv->chip);
- ipol = priv->gc.read_reg(priv->regs + GRGPIO_IPOL) & ~mask;
- iedge = priv->gc.read_reg(priv->regs + GRGPIO_IEDGE) & ~mask;
+ ipol = gpio_generic_read_reg(&priv->chip, priv->regs + GRGPIO_IPOL) & ~mask;
+ iedge = gpio_generic_read_reg(&priv->chip, priv->regs + GRGPIO_IEDGE) & ~mask;
- priv->gc.write_reg(priv->regs + GRGPIO_IPOL, ipol | pol);
- priv->gc.write_reg(priv->regs + GRGPIO_IEDGE, iedge | edge);
-
- raw_spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
+ gpio_generic_write_reg(&priv->chip, priv->regs + GRGPIO_IPOL, ipol | pol);
+ gpio_generic_write_reg(&priv->chip, priv->regs + GRGPIO_IEDGE, iedge | edge);
return 0;
}
@@ -163,29 +160,23 @@ static void grgpio_irq_mask(struct irq_data *d)
{
struct grgpio_priv *priv = irq_data_get_irq_chip_data(d);
int offset = d->hwirq;
- unsigned long flags;
-
- raw_spin_lock_irqsave(&priv->gc.bgpio_lock, flags);
- grgpio_set_imask(priv, offset, 0);
+ scoped_guard(gpio_generic_lock_irqsave, &priv->chip)
+ grgpio_set_imask(priv, offset, 0);
- raw_spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
-
- gpiochip_disable_irq(&priv->gc, d->hwirq);
+ gpiochip_disable_irq(&priv->chip.gc, d->hwirq);
}
static void grgpio_irq_unmask(struct irq_data *d)
{
struct grgpio_priv *priv = irq_data_get_irq_chip_data(d);
int offset = d->hwirq;
- unsigned long flags;
- gpiochip_enable_irq(&priv->gc, d->hwirq);
- raw_spin_lock_irqsave(&priv->gc.bgpio_lock, flags);
+ gpiochip_enable_irq(&priv->chip.gc, d->hwirq);
- grgpio_set_imask(priv, offset, 1);
+ guard(gpio_generic_lock_irqsave)(&priv->chip);
- raw_spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
+ grgpio_set_imask(priv, offset, 1);
}
static const struct irq_chip grgpio_irq_chip = {
@@ -200,12 +191,11 @@ static const struct irq_chip grgpio_irq_chip = {
static irqreturn_t grgpio_irq_handler(int irq, void *dev)
{
struct grgpio_priv *priv = dev;
- int ngpio = priv->gc.ngpio;
- unsigned long flags;
+ int ngpio = priv->chip.gc.ngpio;
int i;
int match = 0;
- raw_spin_lock_irqsave(&priv->gc.bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(&priv->chip);
/*
* For each gpio line, call its interrupt handler if it its underlying
@@ -221,8 +211,6 @@ static irqreturn_t grgpio_irq_handler(int irq, void *dev)
}
}
- raw_spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
-
if (!match)
dev_warn(priv->dev, "No gpio line matched irq %d\n", irq);
@@ -253,13 +241,18 @@ static int grgpio_irq_map(struct irq_domain *d, unsigned int irq,
dev_dbg(priv->dev, "Mapping irq %d for gpio line %d\n",
irq, offset);
- raw_spin_lock_irqsave(&priv->gc.bgpio_lock, flags);
+ gpio_generic_chip_lock_irqsave(&priv->chip, flags);
/* Request underlying irq if not already requested */
lirq->irq = irq;
uirq = &priv->uirqs[lirq->index];
if (uirq->refcnt == 0) {
- raw_spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
+ /*
+ * FIXME: This is not how locking works at all, you can't just
+ * release the lock for a moment to do something that can't
+ * sleep...
+ */
+ gpio_generic_chip_unlock_irqrestore(&priv->chip, flags);
ret = request_irq(uirq->uirq, grgpio_irq_handler, 0,
dev_name(priv->dev), priv);
if (ret) {
@@ -268,11 +261,11 @@ static int grgpio_irq_map(struct irq_domain *d, unsigned int irq,
uirq->uirq);
return ret;
}
- raw_spin_lock_irqsave(&priv->gc.bgpio_lock, flags);
+ gpio_generic_chip_lock_irqsave(&priv->chip, flags);
}
uirq->refcnt++;
- raw_spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
+ gpio_generic_chip_unlock_irqrestore(&priv->chip, flags);
/* Setup irq */
irq_set_chip_data(irq, priv);
@@ -290,13 +283,13 @@ static void grgpio_irq_unmap(struct irq_domain *d, unsigned int irq)
struct grgpio_lirq *lirq;
struct grgpio_uirq *uirq;
unsigned long flags;
- int ngpio = priv->gc.ngpio;
+ int ngpio = priv->chip.gc.ngpio;
int i;
irq_set_chip_and_handler(irq, NULL, NULL);
irq_set_chip_data(irq, NULL);
- raw_spin_lock_irqsave(&priv->gc.bgpio_lock, flags);
+ gpio_generic_chip_lock_irqsave(&priv->chip, flags);
/* Free underlying irq if last user unmapped */
index = -1;
@@ -315,13 +308,13 @@ static void grgpio_irq_unmap(struct irq_domain *d, unsigned int irq)
uirq = &priv->uirqs[lirq->index];
uirq->refcnt--;
if (uirq->refcnt == 0) {
- raw_spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
+ gpio_generic_chip_unlock_irqrestore(&priv->chip, flags);
free_irq(uirq->uirq, priv);
return;
}
}
- raw_spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
+ gpio_generic_chip_unlock_irqrestore(&priv->chip, flags);
}
static void grgpio_irq_domain_remove(void *data)
@@ -341,6 +334,7 @@ static const struct irq_domain_ops grgpio_irq_domain_ops = {
static int grgpio_probe(struct platform_device *ofdev)
{
struct device_node *np = ofdev->dev.of_node;
+ struct gpio_generic_chip_config config;
struct device *dev = &ofdev->dev;
void __iomem *regs;
struct gpio_chip *gc;
@@ -359,17 +353,24 @@ static int grgpio_probe(struct platform_device *ofdev)
if (IS_ERR(regs))
return PTR_ERR(regs);
- gc = &priv->gc;
- err = bgpio_init(gc, dev, 4, regs + GRGPIO_DATA,
- regs + GRGPIO_OUTPUT, NULL, regs + GRGPIO_DIR, NULL,
- BGPIOF_BIG_ENDIAN_BYTE_ORDER);
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = regs + GRGPIO_DATA,
+ .set = regs + GRGPIO_OUTPUT,
+ .dirout = regs + GRGPIO_DIR,
+ .flags = GPIO_GENERIC_BIG_ENDIAN_BYTE_ORDER,
+ };
+
+ gc = &priv->chip.gc;
+ err = gpio_generic_chip_init(&priv->chip, &config);
if (err) {
- dev_err(dev, "bgpio_init() failed\n");
+ dev_err(dev, "failed to initialize the generic GPIO chip\n");
return err;
}
priv->regs = regs;
- priv->imask = gc->read_reg(regs + GRGPIO_IMASK);
+ priv->imask = gpio_generic_read_reg(&priv->chip, regs + GRGPIO_IMASK);
priv->dev = dev;
gc->owner = THIS_MODULE;
diff --git a/drivers/gpio/gpio-gw-pld.c b/drivers/gpio/gpio-gw-pld.c
index a40ba99a3aea..2e5d97b7363f 100644
--- a/drivers/gpio/gpio-gw-pld.c
+++ b/drivers/gpio/gpio-gw-pld.c
@@ -86,7 +86,7 @@ static int gw_pld_probe(struct i2c_client *client)
gw->chip.direction_input = gw_pld_input8;
gw->chip.get = gw_pld_get8;
gw->chip.direction_output = gw_pld_output8;
- gw->chip.set_rv = gw_pld_set8;
+ gw->chip.set = gw_pld_set8;
gw->client = client;
/*
diff --git a/drivers/gpio/gpio-hisi.c b/drivers/gpio/gpio-hisi.c
index ef5cc654a24e..d26298c8351b 100644
--- a/drivers/gpio/gpio-hisi.c
+++ b/drivers/gpio/gpio-hisi.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2020 HiSilicon Limited. */
+
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
@@ -33,7 +35,7 @@
#define HISI_GPIO_DRIVER_NAME "gpio-hisi"
struct hisi_gpio {
- struct gpio_chip chip;
+ struct gpio_generic_chip chip;
struct device *dev;
void __iomem *reg_base;
unsigned int line_num;
@@ -43,8 +45,8 @@ struct hisi_gpio {
static inline u32 hisi_gpio_read_reg(struct gpio_chip *chip,
unsigned int off)
{
- struct hisi_gpio *hisi_gpio =
- container_of(chip, struct hisi_gpio, chip);
+ struct hisi_gpio *hisi_gpio = container_of(to_gpio_generic_chip(chip),
+ struct hisi_gpio, chip);
void __iomem *reg = hisi_gpio->reg_base + off;
return readl(reg);
@@ -53,8 +55,8 @@ static inline u32 hisi_gpio_read_reg(struct gpio_chip *chip,
static inline void hisi_gpio_write_reg(struct gpio_chip *chip,
unsigned int off, u32 val)
{
- struct hisi_gpio *hisi_gpio =
- container_of(chip, struct hisi_gpio, chip);
+ struct hisi_gpio *hisi_gpio = container_of(to_gpio_generic_chip(chip),
+ struct hisi_gpio, chip);
void __iomem *reg = hisi_gpio->reg_base + off;
writel(val, reg);
@@ -180,14 +182,14 @@ static void hisi_gpio_irq_disable(struct irq_data *d)
static void hisi_gpio_irq_handler(struct irq_desc *desc)
{
struct hisi_gpio *hisi_gpio = irq_desc_get_handler_data(desc);
- unsigned long irq_msk = hisi_gpio_read_reg(&hisi_gpio->chip,
+ unsigned long irq_msk = hisi_gpio_read_reg(&hisi_gpio->chip.gc,
HISI_GPIO_INTSTATUS_WX);
struct irq_chip *irq_c = irq_desc_get_chip(desc);
int hwirq;
chained_irq_enter(irq_c, desc);
for_each_set_bit(hwirq, &irq_msk, HISI_GPIO_LINE_NUM_MAX)
- generic_handle_domain_irq(hisi_gpio->chip.irq.domain,
+ generic_handle_domain_irq(hisi_gpio->chip.gc.irq.domain,
hwirq);
chained_irq_exit(irq_c, desc);
}
@@ -206,7 +208,7 @@ static const struct irq_chip hisi_gpio_irq_chip = {
static void hisi_gpio_init_irq(struct hisi_gpio *hisi_gpio)
{
- struct gpio_chip *chip = &hisi_gpio->chip;
+ struct gpio_chip *chip = &hisi_gpio->chip.gc;
struct gpio_irq_chip *girq_chip = &chip->irq;
gpio_irq_chip_set_chip(girq_chip, &hisi_gpio_irq_chip);
@@ -264,6 +266,7 @@ static void hisi_gpio_get_pdata(struct device *dev,
static int hisi_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct device *dev = &pdev->dev;
struct hisi_gpio *hisi_gpio;
int port_num;
@@ -289,27 +292,32 @@ static int hisi_gpio_probe(struct platform_device *pdev)
hisi_gpio->dev = dev;
- ret = bgpio_init(&hisi_gpio->chip, hisi_gpio->dev, 0x4,
- hisi_gpio->reg_base + HISI_GPIO_EXT_PORT_WX,
- hisi_gpio->reg_base + HISI_GPIO_SWPORT_DR_SET_WX,
- hisi_gpio->reg_base + HISI_GPIO_SWPORT_DR_CLR_WX,
- hisi_gpio->reg_base + HISI_GPIO_SWPORT_DDR_SET_WX,
- hisi_gpio->reg_base + HISI_GPIO_SWPORT_DDR_CLR_WX,
- BGPIOF_NO_SET_ON_INPUT);
+ config = (struct gpio_generic_chip_config) {
+ .dev = hisi_gpio->dev,
+ .sz = 4,
+ .dat = hisi_gpio->reg_base + HISI_GPIO_EXT_PORT_WX,
+ .set = hisi_gpio->reg_base + HISI_GPIO_SWPORT_DR_SET_WX,
+ .clr = hisi_gpio->reg_base + HISI_GPIO_SWPORT_DR_CLR_WX,
+ .dirout = hisi_gpio->reg_base + HISI_GPIO_SWPORT_DDR_SET_WX,
+ .dirin = hisi_gpio->reg_base + HISI_GPIO_SWPORT_DDR_CLR_WX,
+ .flags = GPIO_GENERIC_NO_SET_ON_INPUT |
+ GPIO_GENERIC_UNREADABLE_REG_DIR,
+ };
+
+ ret = gpio_generic_chip_init(&hisi_gpio->chip, &config);
if (ret) {
dev_err(dev, "failed to init, ret = %d\n", ret);
return ret;
}
- hisi_gpio->chip.set_config = hisi_gpio_set_config;
- hisi_gpio->chip.ngpio = hisi_gpio->line_num;
- hisi_gpio->chip.bgpio_dir_unreadable = 1;
- hisi_gpio->chip.base = -1;
+ hisi_gpio->chip.gc.set_config = hisi_gpio_set_config;
+ hisi_gpio->chip.gc.ngpio = hisi_gpio->line_num;
+ hisi_gpio->chip.gc.base = -1;
if (hisi_gpio->irq > 0)
hisi_gpio_init_irq(hisi_gpio);
- ret = devm_gpiochip_add_data(dev, &hisi_gpio->chip, hisi_gpio);
+ ret = devm_gpiochip_add_data(dev, &hisi_gpio->chip.gc, hisi_gpio);
if (ret) {
dev_err(dev, "failed to register gpiochip, ret = %d\n", ret);
return ret;
diff --git a/drivers/gpio/gpio-hlwd.c b/drivers/gpio/gpio-hlwd.c
index 0580f6712bea..043ce5ef3b07 100644
--- a/drivers/gpio/gpio-hlwd.c
+++ b/drivers/gpio/gpio-hlwd.c
@@ -6,6 +6,7 @@
// Nintendo Wii (Hollywood) GPIO driver
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -48,7 +49,7 @@
#define HW_GPIO_OWNER 0x3c
struct hlwd_gpio {
- struct gpio_chip gpioc;
+ struct gpio_generic_chip gpioc;
struct device *dev;
void __iomem *regs;
int irq;
@@ -61,45 +62,44 @@ static void hlwd_gpio_irqhandler(struct irq_desc *desc)
struct hlwd_gpio *hlwd =
gpiochip_get_data(irq_desc_get_handler_data(desc));
struct irq_chip *chip = irq_desc_get_chip(desc);
- unsigned long flags;
unsigned long pending;
int hwirq;
u32 emulated_pending;
- raw_spin_lock_irqsave(&hlwd->gpioc.bgpio_lock, flags);
- pending = ioread32be(hlwd->regs + HW_GPIOB_INTFLAG);
- pending &= ioread32be(hlwd->regs + HW_GPIOB_INTMASK);
+ scoped_guard(gpio_generic_lock_irqsave, &hlwd->gpioc) {
+ pending = ioread32be(hlwd->regs + HW_GPIOB_INTFLAG);
+ pending &= ioread32be(hlwd->regs + HW_GPIOB_INTMASK);
- /* Treat interrupts due to edge trigger emulation separately */
- emulated_pending = hlwd->edge_emulation & pending;
- pending &= ~emulated_pending;
- if (emulated_pending) {
- u32 level, rising, falling;
+ /* Treat interrupts due to edge trigger emulation separately */
+ emulated_pending = hlwd->edge_emulation & pending;
+ pending &= ~emulated_pending;
+ if (emulated_pending) {
+ u32 level, rising, falling;
- level = ioread32be(hlwd->regs + HW_GPIOB_INTLVL);
- rising = level & emulated_pending;
- falling = ~level & emulated_pending;
+ level = ioread32be(hlwd->regs + HW_GPIOB_INTLVL);
+ rising = level & emulated_pending;
+ falling = ~level & emulated_pending;
- /* Invert the levels */
- iowrite32be(level ^ emulated_pending,
- hlwd->regs + HW_GPIOB_INTLVL);
+ /* Invert the levels */
+ iowrite32be(level ^ emulated_pending,
+ hlwd->regs + HW_GPIOB_INTLVL);
- /* Ack all emulated-edge interrupts */
- iowrite32be(emulated_pending, hlwd->regs + HW_GPIOB_INTFLAG);
+ /* Ack all emulated-edge interrupts */
+ iowrite32be(emulated_pending, hlwd->regs + HW_GPIOB_INTFLAG);
- /* Signal interrupts only on the correct edge */
- rising &= hlwd->rising_edge;
- falling &= hlwd->falling_edge;
+ /* Signal interrupts only on the correct edge */
+ rising &= hlwd->rising_edge;
+ falling &= hlwd->falling_edge;
- /* Mark emulated interrupts as pending */
- pending |= rising | falling;
+ /* Mark emulated interrupts as pending */
+ pending |= rising | falling;
+ }
}
- raw_spin_unlock_irqrestore(&hlwd->gpioc.bgpio_lock, flags);
chained_irq_enter(chip, desc);
for_each_set_bit(hwirq, &pending, 32)
- generic_handle_domain_irq(hlwd->gpioc.irq.domain, hwirq);
+ generic_handle_domain_irq(hlwd->gpioc.gc.irq.domain, hwirq);
chained_irq_exit(chip, desc);
}
@@ -116,30 +116,29 @@ static void hlwd_gpio_irq_mask(struct irq_data *data)
{
struct hlwd_gpio *hlwd =
gpiochip_get_data(irq_data_get_irq_chip_data(data));
- unsigned long flags;
u32 mask;
- raw_spin_lock_irqsave(&hlwd->gpioc.bgpio_lock, flags);
- mask = ioread32be(hlwd->regs + HW_GPIOB_INTMASK);
- mask &= ~BIT(data->hwirq);
- iowrite32be(mask, hlwd->regs + HW_GPIOB_INTMASK);
- raw_spin_unlock_irqrestore(&hlwd->gpioc.bgpio_lock, flags);
- gpiochip_disable_irq(&hlwd->gpioc, irqd_to_hwirq(data));
+ scoped_guard(gpio_generic_lock_irqsave, &hlwd->gpioc) {
+ mask = ioread32be(hlwd->regs + HW_GPIOB_INTMASK);
+ mask &= ~BIT(data->hwirq);
+ iowrite32be(mask, hlwd->regs + HW_GPIOB_INTMASK);
+ }
+ gpiochip_disable_irq(&hlwd->gpioc.gc, irqd_to_hwirq(data));
}
static void hlwd_gpio_irq_unmask(struct irq_data *data)
{
struct hlwd_gpio *hlwd =
gpiochip_get_data(irq_data_get_irq_chip_data(data));
- unsigned long flags;
u32 mask;
- gpiochip_enable_irq(&hlwd->gpioc, irqd_to_hwirq(data));
- raw_spin_lock_irqsave(&hlwd->gpioc.bgpio_lock, flags);
+ gpiochip_enable_irq(&hlwd->gpioc.gc, irqd_to_hwirq(data));
+
+ guard(gpio_generic_lock_irqsave)(&hlwd->gpioc);
+
mask = ioread32be(hlwd->regs + HW_GPIOB_INTMASK);
mask |= BIT(data->hwirq);
iowrite32be(mask, hlwd->regs + HW_GPIOB_INTMASK);
- raw_spin_unlock_irqrestore(&hlwd->gpioc.bgpio_lock, flags);
}
static void hlwd_gpio_irq_enable(struct irq_data *data)
@@ -173,10 +172,9 @@ static int hlwd_gpio_irq_set_type(struct irq_data *data, unsigned int flow_type)
{
struct hlwd_gpio *hlwd =
gpiochip_get_data(irq_data_get_irq_chip_data(data));
- unsigned long flags;
u32 level;
- raw_spin_lock_irqsave(&hlwd->gpioc.bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(&hlwd->gpioc);
hlwd->edge_emulation &= ~BIT(data->hwirq);
@@ -197,11 +195,9 @@ static int hlwd_gpio_irq_set_type(struct irq_data *data, unsigned int flow_type)
hlwd_gpio_irq_setup_emulation(hlwd, data->hwirq, flow_type);
break;
default:
- raw_spin_unlock_irqrestore(&hlwd->gpioc.bgpio_lock, flags);
return -EINVAL;
}
- raw_spin_unlock_irqrestore(&hlwd->gpioc.bgpio_lock, flags);
return 0;
}
@@ -225,6 +221,7 @@ static const struct irq_chip hlwd_gpio_irq_chip = {
static int hlwd_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct hlwd_gpio *hlwd;
u32 ngpios;
int res;
@@ -244,25 +241,31 @@ static int hlwd_gpio_probe(struct platform_device *pdev)
* systems where the AHBPROT memory firewall hasn't been configured to
* permit PPC access to HW_GPIO_*.
*
- * Note that this has to happen before bgpio_init reads the
- * HW_GPIOB_OUT and HW_GPIOB_DIR, because otherwise it reads the wrong
- * values.
+ * Note that this has to happen before gpio_generic_chip_init() reads
+ * the HW_GPIOB_OUT and HW_GPIOB_DIR, because otherwise it reads the
+ * wrong values.
*/
iowrite32be(0xffffffff, hlwd->regs + HW_GPIO_OWNER);
- res = bgpio_init(&hlwd->gpioc, &pdev->dev, 4,
- hlwd->regs + HW_GPIOB_IN, hlwd->regs + HW_GPIOB_OUT,
- NULL, hlwd->regs + HW_GPIOB_DIR, NULL,
- BGPIOF_BIG_ENDIAN_BYTE_ORDER);
+ config = (struct gpio_generic_chip_config) {
+ .dev = &pdev->dev,
+ .sz = 4,
+ .dat = hlwd->regs + HW_GPIOB_IN,
+ .set = hlwd->regs + HW_GPIOB_OUT,
+ .dirout = hlwd->regs + HW_GPIOB_DIR,
+ .flags = GPIO_GENERIC_BIG_ENDIAN_BYTE_ORDER,
+ };
+
+ res = gpio_generic_chip_init(&hlwd->gpioc, &config);
if (res < 0) {
- dev_warn(&pdev->dev, "bgpio_init failed: %d\n", res);
+ dev_warn(&pdev->dev, "failed to initialize generic GPIO chip: %d\n", res);
return res;
}
res = of_property_read_u32(pdev->dev.of_node, "ngpios", &ngpios);
if (res)
ngpios = 32;
- hlwd->gpioc.ngpio = ngpios;
+ hlwd->gpioc.gc.ngpio = ngpios;
/* Mask and ack all interrupts */
iowrite32be(0, hlwd->regs + HW_GPIOB_INTMASK);
@@ -282,7 +285,7 @@ static int hlwd_gpio_probe(struct platform_device *pdev)
return hlwd->irq;
}
- girq = &hlwd->gpioc.irq;
+ girq = &hlwd->gpioc.gc.irq;
gpio_irq_chip_set_chip(girq, &hlwd_gpio_irq_chip);
girq->parent_handler = hlwd_gpio_irqhandler;
girq->num_parents = 1;
@@ -296,7 +299,7 @@ static int hlwd_gpio_probe(struct platform_device *pdev)
girq->handler = handle_level_irq;
}
- return devm_gpiochip_add_data(&pdev->dev, &hlwd->gpioc, hlwd);
+ return devm_gpiochip_add_data(&pdev->dev, &hlwd->gpioc.gc, hlwd);
}
static const struct of_device_id hlwd_gpio_match[] = {
diff --git a/drivers/gpio/gpio-htc-egpio.c b/drivers/gpio/gpio-htc-egpio.c
index b1844a676c7c..2eaed83214d8 100644
--- a/drivers/gpio/gpio-htc-egpio.c
+++ b/drivers/gpio/gpio-htc-egpio.c
@@ -324,7 +324,7 @@ static int __init egpio_probe(struct platform_device *pdev)
chip->parent = &pdev->dev;
chip->owner = THIS_MODULE;
chip->get = egpio_get;
- chip->set_rv = egpio_set;
+ chip->set = egpio_set;
chip->direction_input = egpio_direction_input;
chip->direction_output = egpio_direction_output;
chip->get_direction = egpio_get_direction;
diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
index 67089b2423d8..1802c9116ffe 100644
--- a/drivers/gpio/gpio-ich.c
+++ b/drivers/gpio/gpio-ich.c
@@ -273,7 +273,7 @@ static void ichx_gpiolib_setup(struct gpio_chip *chip)
chip->get = ichx_priv.desc->get ?
ichx_priv.desc->get : ichx_gpio_get;
- chip->set_rv = ichx_gpio_set;
+ chip->set = ichx_gpio_set;
chip->get_direction = ichx_gpio_get_direction;
chip->direction_input = ichx_gpio_direction_input;
chip->direction_output = ichx_gpio_direction_output;
diff --git a/drivers/gpio/gpio-idt3243x.c b/drivers/gpio/gpio-idt3243x.c
index 535f25514455..56f1f1e57b69 100644
--- a/drivers/gpio/gpio-idt3243x.c
+++ b/drivers/gpio/gpio-idt3243x.c
@@ -3,6 +3,7 @@
#include <linux/bitops.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
@@ -18,7 +19,7 @@
#define IDT_GPIO_ISTAT 0x0C
struct idt_gpio_ctrl {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
void __iomem *pic;
void __iomem *gpio;
u32 mask_cache;
@@ -50,14 +51,13 @@ static int idt_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type)
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct idt_gpio_ctrl *ctrl = gpiochip_get_data(gc);
unsigned int sense = flow_type & IRQ_TYPE_SENSE_MASK;
- unsigned long flags;
u32 ilevel;
/* hardware only supports level triggered */
if (sense == IRQ_TYPE_NONE || (sense & IRQ_TYPE_EDGE_BOTH))
return -EINVAL;
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(&ctrl->chip);
ilevel = readl(ctrl->gpio + IDT_GPIO_ILEVEL);
if (sense & IRQ_TYPE_LEVEL_HIGH)
@@ -68,7 +68,6 @@ static int idt_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type)
writel(ilevel, ctrl->gpio + IDT_GPIO_ILEVEL);
irq_set_handler_locked(d, handle_level_irq);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
return 0;
}
@@ -84,14 +83,11 @@ static void idt_gpio_mask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct idt_gpio_ctrl *ctrl = gpiochip_get_data(gc);
- unsigned long flags;
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
-
- ctrl->mask_cache |= BIT(d->hwirq);
- writel(ctrl->mask_cache, ctrl->pic + IDT_PIC_IRQ_MASK);
-
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+ scoped_guard(gpio_generic_lock_irqsave, &ctrl->chip) {
+ ctrl->mask_cache |= BIT(d->hwirq);
+ writel(ctrl->mask_cache, ctrl->pic + IDT_PIC_IRQ_MASK);
+ }
gpiochip_disable_irq(gc, irqd_to_hwirq(d));
}
@@ -100,15 +96,13 @@ static void idt_gpio_unmask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct idt_gpio_ctrl *ctrl = gpiochip_get_data(gc);
- unsigned long flags;
gpiochip_enable_irq(gc, irqd_to_hwirq(d));
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+
+ guard(gpio_generic_lock_irqsave)(&ctrl->chip);
ctrl->mask_cache &= ~BIT(d->hwirq);
writel(ctrl->mask_cache, ctrl->pic + IDT_PIC_IRQ_MASK);
-
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
}
static int idt_gpio_irq_init_hw(struct gpio_chip *gc)
@@ -134,6 +128,7 @@ static const struct irq_chip idt_gpio_irqchip = {
static int idt_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct device *dev = &pdev->dev;
struct gpio_irq_chip *girq;
struct idt_gpio_ctrl *ctrl;
@@ -150,18 +145,24 @@ static int idt_gpio_probe(struct platform_device *pdev)
if (IS_ERR(ctrl->gpio))
return PTR_ERR(ctrl->gpio);
- ctrl->gc.parent = dev;
+ ctrl->chip.gc.parent = dev;
+
+ config = (struct gpio_generic_chip_config) {
+ .dev = &pdev->dev,
+ .sz = 4,
+ .dat = ctrl->gpio + IDT_GPIO_DATA,
+ .dirout = ctrl->gpio + IDT_GPIO_DIR,
+ };
- ret = bgpio_init(&ctrl->gc, &pdev->dev, 4, ctrl->gpio + IDT_GPIO_DATA,
- NULL, NULL, ctrl->gpio + IDT_GPIO_DIR, NULL, 0);
+ ret = gpio_generic_chip_init(&ctrl->chip, &config);
if (ret) {
- dev_err(dev, "bgpio_init failed\n");
+ dev_err(dev, "failed to initialize the generic GPIO chip\n");
return ret;
}
ret = device_property_read_u32(dev, "ngpios", &ngpios);
if (!ret)
- ctrl->gc.ngpio = ngpios;
+ ctrl->chip.gc.ngpio = ngpios;
if (device_property_read_bool(dev, "interrupt-controller")) {
ctrl->pic = devm_platform_ioremap_resource_byname(pdev, "pic");
@@ -172,7 +173,7 @@ static int idt_gpio_probe(struct platform_device *pdev)
if (parent_irq < 0)
return parent_irq;
- girq = &ctrl->gc.irq;
+ girq = &ctrl->chip.gc.irq;
gpio_irq_chip_set_chip(girq, &idt_gpio_irqchip);
girq->init_hw = idt_gpio_irq_init_hw;
girq->parent_handler = idt_gpio_dispatch;
@@ -188,7 +189,7 @@ static int idt_gpio_probe(struct platform_device *pdev)
girq->handler = handle_bad_irq;
}
- return devm_gpiochip_add_data(&pdev->dev, &ctrl->gc, ctrl);
+ return devm_gpiochip_add_data(&pdev->dev, &ctrl->chip.gc, ctrl);
}
static const struct of_device_id idt_gpio_of_match[] = {
diff --git a/drivers/gpio/gpio-imx-scu.c b/drivers/gpio/gpio-imx-scu.c
index 1693dbf1b777..0a75afecf9f8 100644
--- a/drivers/gpio/gpio-imx-scu.c
+++ b/drivers/gpio/gpio-imx-scu.c
@@ -102,7 +102,7 @@ static int imx_scu_gpio_probe(struct platform_device *pdev)
gc->ngpio = ARRAY_SIZE(scu_rsrc_arr);
gc->label = dev_name(dev);
gc->get = imx_scu_gpio_get;
- gc->set_rv = imx_scu_gpio_set;
+ gc->set = imx_scu_gpio_set;
gc->get_direction = imx_scu_gpio_get_direction;
platform_set_drvdata(pdev, priv);
diff --git a/drivers/gpio/gpio-it87.c b/drivers/gpio/gpio-it87.c
index d8184b527bac..5d677bcfccf2 100644
--- a/drivers/gpio/gpio-it87.c
+++ b/drivers/gpio/gpio-it87.c
@@ -267,7 +267,7 @@ static const struct gpio_chip it87_template_chip = {
.request = it87_gpio_request,
.get = it87_gpio_get,
.direction_input = it87_gpio_direction_in,
- .set_rv = it87_gpio_set,
+ .set = it87_gpio_set,
.direction_output = it87_gpio_direction_out,
.base = -1
};
diff --git a/drivers/gpio/gpio-ixp4xx.c b/drivers/gpio/gpio-ixp4xx.c
index 28a8a6a8f05f..f34d87869c8b 100644
--- a/drivers/gpio/gpio-ixp4xx.c
+++ b/drivers/gpio/gpio-ixp4xx.c
@@ -8,6 +8,7 @@
#include <linux/bitops.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
@@ -53,14 +54,14 @@
/**
* struct ixp4xx_gpio - IXP4 GPIO state container
+ * @chip: generic GPIO chip for this instance
* @dev: containing device for this instance
- * @gc: gpiochip for this instance
* @base: remapped I/O-memory base
* @irq_edge: Each bit represents an IRQ: 1: edge-triggered,
* 0: level triggered
*/
struct ixp4xx_gpio {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
struct device *dev;
void __iomem *base;
unsigned long long irq_edge;
@@ -100,7 +101,6 @@ static int ixp4xx_gpio_irq_set_type(struct irq_data *d, unsigned int type)
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct ixp4xx_gpio *g = gpiochip_get_data(gc);
int line = d->hwirq;
- unsigned long flags;
u32 int_style;
u32 int_reg;
u32 val;
@@ -144,26 +144,24 @@ static int ixp4xx_gpio_irq_set_type(struct irq_data *d, unsigned int type)
int_reg = IXP4XX_REG_GPIT1;
}
- raw_spin_lock_irqsave(&g->gc.bgpio_lock, flags);
-
- /* Clear the style for the appropriate pin */
- val = __raw_readl(g->base + int_reg);
- val &= ~(IXP4XX_GPIO_STYLE_MASK << (line * IXP4XX_GPIO_STYLE_SIZE));
- __raw_writel(val, g->base + int_reg);
-
- __raw_writel(BIT(line), g->base + IXP4XX_REG_GPIS);
+ scoped_guard(gpio_generic_lock_irqsave, &g->chip) {
+ /* Clear the style for the appropriate pin */
+ val = __raw_readl(g->base + int_reg);
+ val &= ~(IXP4XX_GPIO_STYLE_MASK << (line * IXP4XX_GPIO_STYLE_SIZE));
+ __raw_writel(val, g->base + int_reg);
- /* Set the new style */
- val = __raw_readl(g->base + int_reg);
- val |= (int_style << (line * IXP4XX_GPIO_STYLE_SIZE));
- __raw_writel(val, g->base + int_reg);
+ __raw_writel(BIT(line), g->base + IXP4XX_REG_GPIS);
- /* Force-configure this line as an input */
- val = __raw_readl(g->base + IXP4XX_REG_GPOE);
- val |= BIT(d->hwirq);
- __raw_writel(val, g->base + IXP4XX_REG_GPOE);
+ /* Set the new style */
+ val = __raw_readl(g->base + int_reg);
+ val |= (int_style << (line * IXP4XX_GPIO_STYLE_SIZE));
+ __raw_writel(val, g->base + int_reg);
- raw_spin_unlock_irqrestore(&g->gc.bgpio_lock, flags);
+ /* Force-configure this line as an input */
+ val = __raw_readl(g->base + IXP4XX_REG_GPOE);
+ val |= BIT(d->hwirq);
+ __raw_writel(val, g->base + IXP4XX_REG_GPOE);
+ }
/* This parent only accept level high (asserted) */
return irq_chip_set_type_parent(d, IRQ_TYPE_LEVEL_HIGH);
@@ -206,6 +204,7 @@ static int ixp4xx_gpio_child_to_parent_hwirq(struct gpio_chip *gc,
static int ixp4xx_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
unsigned long flags;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
@@ -290,35 +289,38 @@ static int ixp4xx_gpio_probe(struct platform_device *pdev)
* for big endian.
*/
#if defined(CONFIG_CPU_BIG_ENDIAN)
- flags = BGPIOF_BIG_ENDIAN_BYTE_ORDER;
+ flags = GPIO_GENERIC_BIG_ENDIAN_BYTE_ORDER;
#else
flags = 0;
#endif
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = g->base + IXP4XX_REG_GPIN,
+ .set = g->base + IXP4XX_REG_GPOUT,
+ .dirin = g->base + IXP4XX_REG_GPOE,
+ .flags = flags,
+ };
+
/* Populate and register gpio chip */
- ret = bgpio_init(&g->gc, dev, 4,
- g->base + IXP4XX_REG_GPIN,
- g->base + IXP4XX_REG_GPOUT,
- NULL,
- NULL,
- g->base + IXP4XX_REG_GPOE,
- flags);
+ ret = gpio_generic_chip_init(&g->chip, &config);
if (ret) {
dev_err(dev, "unable to init generic GPIO\n");
return ret;
}
- g->gc.ngpio = 16;
- g->gc.label = "IXP4XX_GPIO_CHIP";
+ g->chip.gc.ngpio = 16;
+ g->chip.gc.label = "IXP4XX_GPIO_CHIP";
/*
* TODO: when we have migrated to device tree and all GPIOs
* are fetched using phandles, set this to -1 to get rid of
* the fixed gpiochip base.
*/
- g->gc.base = 0;
- g->gc.parent = &pdev->dev;
- g->gc.owner = THIS_MODULE;
+ g->chip.gc.base = 0;
+ g->chip.gc.parent = &pdev->dev;
+ g->chip.gc.owner = THIS_MODULE;
- girq = &g->gc.irq;
+ girq = &g->chip.gc.irq;
gpio_irq_chip_set_chip(girq, &ixp4xx_gpio_irqchip);
girq->fwnode = dev_fwnode(dev);
girq->parent_domain = parent;
@@ -326,7 +328,7 @@ static int ixp4xx_gpio_probe(struct platform_device *pdev)
girq->handler = handle_bad_irq;
girq->default_type = IRQ_TYPE_NONE;
- ret = devm_gpiochip_add_data(dev, &g->gc, g);
+ ret = devm_gpiochip_add_data(dev, &g->chip.gc, g);
if (ret) {
dev_err(dev, "failed to add SoC gpiochip\n");
return ret;
diff --git a/drivers/gpio/gpio-janz-ttl.c b/drivers/gpio/gpio-janz-ttl.c
index 9f548eda3888..b0c4a3346e7d 100644
--- a/drivers/gpio/gpio-janz-ttl.c
+++ b/drivers/gpio/gpio-janz-ttl.c
@@ -171,7 +171,7 @@ static int ttl_probe(struct platform_device *pdev)
gpio->parent = &pdev->dev;
gpio->label = pdev->name;
gpio->get = ttl_get_value;
- gpio->set_rv = ttl_set_value;
+ gpio->set = ttl_set_value;
gpio->owner = THIS_MODULE;
/* request dynamic allocation */
diff --git a/drivers/gpio/gpio-kempld.c b/drivers/gpio/gpio-kempld.c
index e38e604baa22..923aad3ab4d4 100644
--- a/drivers/gpio/gpio-kempld.c
+++ b/drivers/gpio/gpio-kempld.c
@@ -169,7 +169,7 @@ static int kempld_gpio_probe(struct platform_device *pdev)
chip->direction_output = kempld_gpio_direction_output;
chip->get_direction = kempld_gpio_get_direction;
chip->get = kempld_gpio_get;
- chip->set_rv = kempld_gpio_set;
+ chip->set = kempld_gpio_set;
chip->ngpio = kempld_gpio_pincount(pld);
if (chip->ngpio == 0) {
dev_err(dev, "No GPIO pins detected\n");
diff --git a/drivers/gpio/gpio-latch.c b/drivers/gpio/gpio-latch.c
index 3d0ff09284fb..c64aaa896766 100644
--- a/drivers/gpio/gpio-latch.c
+++ b/drivers/gpio/gpio-latch.c
@@ -166,11 +166,11 @@ static int gpio_latch_probe(struct platform_device *pdev)
if (gpio_latch_can_sleep(priv, n_latches)) {
priv->gc.can_sleep = true;
- priv->gc.set_rv = gpio_latch_set_can_sleep;
+ priv->gc.set = gpio_latch_set_can_sleep;
mutex_init(&priv->mutex);
} else {
priv->gc.can_sleep = false;
- priv->gc.set_rv = gpio_latch_set;
+ priv->gc.set = gpio_latch_set;
spin_lock_init(&priv->spinlock);
}
diff --git a/drivers/gpio/gpio-ljca.c b/drivers/gpio/gpio-ljca.c
index 61524a9ba765..3b4f8830c741 100644
--- a/drivers/gpio/gpio-ljca.c
+++ b/drivers/gpio/gpio-ljca.c
@@ -437,7 +437,7 @@ static int ljca_gpio_probe(struct auxiliary_device *auxdev,
ljca_gpio->gc.direction_output = ljca_gpio_direction_output;
ljca_gpio->gc.get_direction = ljca_gpio_get_direction;
ljca_gpio->gc.get = ljca_gpio_get_value;
- ljca_gpio->gc.set_rv = ljca_gpio_set_value;
+ ljca_gpio->gc.set = ljca_gpio_set_value;
ljca_gpio->gc.set_config = ljca_gpio_set_config;
ljca_gpio->gc.init_valid_mask = ljca_gpio_init_valid_mask;
ljca_gpio->gc.can_sleep = true;
diff --git a/drivers/gpio/gpio-logicvc.c b/drivers/gpio/gpio-logicvc.c
index 19cd2847467c..cb9dbcc290ad 100644
--- a/drivers/gpio/gpio-logicvc.c
+++ b/drivers/gpio/gpio-logicvc.c
@@ -134,7 +134,7 @@ static int logicvc_gpio_probe(struct platform_device *pdev)
logicvc->chip.ngpio = LOGICVC_CTRL_GPIO_BITS +
LOGICVC_POWER_CTRL_GPIO_BITS;
logicvc->chip.get = logicvc_gpio_get;
- logicvc->chip.set_rv = logicvc_gpio_set;
+ logicvc->chip.set = logicvc_gpio_set;
logicvc->chip.direction_output = logicvc_gpio_direction_output;
return devm_gpiochip_add_data(dev, &logicvc->chip, logicvc);
diff --git a/drivers/gpio/gpio-loongson-64bit.c b/drivers/gpio/gpio-loongson-64bit.c
index add09971d26a..02f181cb219e 100644
--- a/drivers/gpio/gpio-loongson-64bit.c
+++ b/drivers/gpio/gpio-loongson-64bit.c
@@ -7,12 +7,16 @@
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/irqdesc.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/err.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/platform_device.h>
#include <linux/bitops.h>
+#include <linux/reset.h>
#include <asm/types.h>
enum loongson_gpio_mode {
@@ -27,10 +31,18 @@ struct loongson_gpio_chip_data {
unsigned int out_offset;
unsigned int in_offset;
unsigned int inten_offset;
+ unsigned int intpol_offset;
+ unsigned int intedge_offset;
+ unsigned int intclr_offset;
+ unsigned int intsts_offset;
+ unsigned int intdual_offset;
+ unsigned int intr_num;
+ irq_flow_handler_t irq_handler;
+ const struct irq_chip *girqchip;
};
struct loongson_gpio_chip {
- struct gpio_chip chip;
+ struct gpio_generic_chip chip;
spinlock_t lock;
void __iomem *reg_base;
const struct loongson_gpio_chip_data *chip_data;
@@ -38,7 +50,8 @@ struct loongson_gpio_chip {
static inline struct loongson_gpio_chip *to_loongson_gpio_chip(struct gpio_chip *chip)
{
- return container_of(chip, struct loongson_gpio_chip, chip);
+ return container_of(to_gpio_generic_chip(chip),
+ struct loongson_gpio_chip, chip);
}
static inline void loongson_commit_direction(struct loongson_gpio_chip *lgpio, unsigned int pin,
@@ -135,39 +148,184 @@ static int loongson_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
return platform_get_irq(pdev, offset);
}
-static int loongson_gpio_init(struct device *dev, struct loongson_gpio_chip *lgpio,
+static void loongson_gpio_irq_ack(struct irq_data *data)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct loongson_gpio_chip *lgpio = to_loongson_gpio_chip(chip);
+ irq_hw_number_t hwirq = irqd_to_hwirq(data);
+
+ writeb(0x1, lgpio->reg_base + lgpio->chip_data->intclr_offset + hwirq);
+}
+
+static void loongson_gpio_irq_mask(struct irq_data *data)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct loongson_gpio_chip *lgpio = to_loongson_gpio_chip(chip);
+ irq_hw_number_t hwirq = irqd_to_hwirq(data);
+
+ writeb(0x0, lgpio->reg_base + lgpio->chip_data->inten_offset + hwirq);
+}
+
+static void loongson_gpio_irq_unmask(struct irq_data *data)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct loongson_gpio_chip *lgpio = to_loongson_gpio_chip(chip);
+ irq_hw_number_t hwirq = irqd_to_hwirq(data);
+
+ writeb(0x1, lgpio->reg_base + lgpio->chip_data->inten_offset + hwirq);
+}
+
+static int loongson_gpio_irq_set_type(struct irq_data *data, unsigned int type)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct loongson_gpio_chip *lgpio = to_loongson_gpio_chip(chip);
+ irq_hw_number_t hwirq = irqd_to_hwirq(data);
+ u8 pol = 0, edge = 0, dual = 0;
+
+ if ((type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) {
+ edge = 1;
+ dual = 1;
+ irq_set_handler_locked(data, handle_edge_irq);
+ } else {
+ switch (type) {
+ case IRQ_TYPE_LEVEL_HIGH:
+ pol = 1;
+ fallthrough;
+ case IRQ_TYPE_LEVEL_LOW:
+ irq_set_handler_locked(data, handle_level_irq);
+ break;
+
+ case IRQ_TYPE_EDGE_RISING:
+ pol = 1;
+ fallthrough;
+ case IRQ_TYPE_EDGE_FALLING:
+ edge = 1;
+ irq_set_handler_locked(data, handle_edge_irq);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ }
+
+ writeb(pol, lgpio->reg_base + lgpio->chip_data->intpol_offset + hwirq);
+ writeb(edge, lgpio->reg_base + lgpio->chip_data->intedge_offset + hwirq);
+ writeb(dual, lgpio->reg_base + lgpio->chip_data->intdual_offset + hwirq);
+
+ return 0;
+}
+
+static void loongson_gpio_ls2k0300_irq_handler(struct irq_desc *desc)
+{
+ struct loongson_gpio_chip *lgpio = irq_desc_get_handler_data(desc);
+ struct irq_chip *girqchip = irq_desc_get_chip(desc);
+ int i;
+
+ chained_irq_enter(girqchip, desc);
+
+ for (i = 0; i < lgpio->chip.gc.ngpio; i++) {
+ /*
+ * For the GPIO controller of LS2K0300, interrupts status bits
+ * may be wrongly set even if the corresponding interrupt is
+ * disabled. Thus interrupt enable bits are checked along with
+ * status bits to detect interrupts reliably.
+ */
+ if (readb(lgpio->reg_base + lgpio->chip_data->intsts_offset + i) &&
+ readb(lgpio->reg_base + lgpio->chip_data->inten_offset + i))
+ generic_handle_domain_irq(lgpio->chip.gc.irq.domain, i);
+ }
+
+ chained_irq_exit(girqchip, desc);
+}
+
+static const struct irq_chip loongson_gpio_ls2k0300_irqchip = {
+ .irq_ack = loongson_gpio_irq_ack,
+ .irq_mask = loongson_gpio_irq_mask,
+ .irq_unmask = loongson_gpio_irq_unmask,
+ .irq_set_type = loongson_gpio_irq_set_type,
+ .flags = IRQCHIP_IMMUTABLE | IRQCHIP_SKIP_SET_WAKE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
+static int loongson_gpio_init_irqchip(struct platform_device *pdev,
+ struct loongson_gpio_chip *lgpio)
+{
+ const struct loongson_gpio_chip_data *data = lgpio->chip_data;
+ struct gpio_chip *chip = &lgpio->chip.gc;
+ int i;
+
+ chip->irq.default_type = IRQ_TYPE_NONE;
+ chip->irq.handler = handle_bad_irq;
+ chip->irq.parent_handler = data->irq_handler;
+ chip->irq.parent_handler_data = lgpio;
+ gpio_irq_chip_set_chip(&chip->irq, data->girqchip);
+
+ chip->irq.num_parents = data->intr_num;
+ chip->irq.parents = devm_kcalloc(&pdev->dev, data->intr_num,
+ sizeof(*chip->irq.parents), GFP_KERNEL);
+ if (!chip->parent)
+ return -ENOMEM;
+
+ for (i = 0; i < data->intr_num; i++) {
+ int ret;
+
+ ret = platform_get_irq(pdev, i);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to get IRQ %d\n", i);
+ chip->irq.parents[i] = ret;
+ }
+
+ for (i = 0; i < data->intr_num; i++) {
+ writeb(0x0, lgpio->reg_base + data->inten_offset + i);
+ writeb(0x1, lgpio->reg_base + data->intclr_offset + i);
+ }
+
+ return 0;
+}
+
+static int loongson_gpio_init(struct platform_device *pdev, struct loongson_gpio_chip *lgpio,
void __iomem *reg_base)
{
+ struct gpio_generic_chip_config config;
int ret;
lgpio->reg_base = reg_base;
if (lgpio->chip_data->mode == BIT_CTRL_MODE) {
- ret = bgpio_init(&lgpio->chip, dev, 8,
- lgpio->reg_base + lgpio->chip_data->in_offset,
- lgpio->reg_base + lgpio->chip_data->out_offset,
- NULL, NULL,
- lgpio->reg_base + lgpio->chip_data->conf_offset,
- 0);
+ config = (struct gpio_generic_chip_config) {
+ .dev = &pdev->dev,
+ .sz = 8,
+ .dat = lgpio->reg_base + lgpio->chip_data->in_offset,
+ .set = lgpio->reg_base + lgpio->chip_data->out_offset,
+ .dirin = lgpio->reg_base + lgpio->chip_data->conf_offset,
+ };
+
+ ret = gpio_generic_chip_init(&lgpio->chip, &config);
if (ret) {
- dev_err(dev, "unable to init generic GPIO\n");
+ dev_err(&pdev->dev, "unable to init generic GPIO\n");
return ret;
}
} else {
- lgpio->chip.direction_input = loongson_gpio_direction_input;
- lgpio->chip.get = loongson_gpio_get;
- lgpio->chip.get_direction = loongson_gpio_get_direction;
- lgpio->chip.direction_output = loongson_gpio_direction_output;
- lgpio->chip.set_rv = loongson_gpio_set;
- lgpio->chip.parent = dev;
+ lgpio->chip.gc.direction_input = loongson_gpio_direction_input;
+ lgpio->chip.gc.get = loongson_gpio_get;
+ lgpio->chip.gc.get_direction = loongson_gpio_get_direction;
+ lgpio->chip.gc.direction_output = loongson_gpio_direction_output;
+ lgpio->chip.gc.set = loongson_gpio_set;
+ lgpio->chip.gc.parent = &pdev->dev;
spin_lock_init(&lgpio->lock);
}
- lgpio->chip.label = lgpio->chip_data->label;
- lgpio->chip.can_sleep = false;
- if (lgpio->chip_data->inten_offset)
- lgpio->chip.to_irq = loongson_gpio_to_irq;
+ lgpio->chip.gc.label = lgpio->chip_data->label;
+ lgpio->chip.gc.can_sleep = false;
+ if (lgpio->chip_data->girqchip) {
+ ret = loongson_gpio_init_irqchip(pdev, lgpio);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "failed to initialize irqchip\n");
+ } else if (lgpio->chip_data->inten_offset) {
+ lgpio->chip.gc.to_irq = loongson_gpio_to_irq;
+ }
- return devm_gpiochip_add_data(dev, &lgpio->chip, lgpio);
+ return devm_gpiochip_add_data(&pdev->dev, &lgpio->chip.gc, lgpio);
}
static int loongson_gpio_probe(struct platform_device *pdev)
@@ -175,6 +333,7 @@ static int loongson_gpio_probe(struct platform_device *pdev)
void __iomem *reg_base;
struct loongson_gpio_chip *lgpio;
struct device *dev = &pdev->dev;
+ struct reset_control *rst;
lgpio = devm_kzalloc(dev, sizeof(*lgpio), GFP_KERNEL);
if (!lgpio)
@@ -186,7 +345,11 @@ static int loongson_gpio_probe(struct platform_device *pdev)
if (IS_ERR(reg_base))
return PTR_ERR(reg_base);
- return loongson_gpio_init(dev, lgpio, reg_base);
+ rst = devm_reset_control_get_optional_exclusive_deasserted(&pdev->dev, NULL);
+ if (IS_ERR(rst))
+ return dev_err_probe(&pdev->dev, PTR_ERR(rst), "failed to get reset control\n");
+
+ return loongson_gpio_init(pdev, lgpio, reg_base);
}
static const struct loongson_gpio_chip_data loongson_gpio_ls2k_data = {
@@ -198,6 +361,23 @@ static const struct loongson_gpio_chip_data loongson_gpio_ls2k_data = {
.inten_offset = 0x30,
};
+static const struct loongson_gpio_chip_data loongson_gpio_ls2k0300_data = {
+ .label = "ls2k0300_gpio",
+ .mode = BYTE_CTRL_MODE,
+ .conf_offset = 0x800,
+ .in_offset = 0xa00,
+ .out_offset = 0x900,
+ .inten_offset = 0xb00,
+ .intpol_offset = 0xc00,
+ .intedge_offset = 0xd00,
+ .intclr_offset = 0xe00,
+ .intsts_offset = 0xf00,
+ .intdual_offset = 0xf80,
+ .intr_num = 7,
+ .irq_handler = loongson_gpio_ls2k0300_irq_handler,
+ .girqchip = &loongson_gpio_ls2k0300_irqchip,
+};
+
static const struct loongson_gpio_chip_data loongson_gpio_ls2k0500_data0 = {
.label = "ls2k0500_gpio",
.mode = BIT_CTRL_MODE,
@@ -295,6 +475,10 @@ static const struct of_device_id loongson_gpio_of_match[] = {
.data = &loongson_gpio_ls2k_data,
},
{
+ .compatible = "loongson,ls2k0300-gpio",
+ .data = &loongson_gpio_ls2k0300_data,
+ },
+ {
.compatible = "loongson,ls2k0500-gpio0",
.data = &loongson_gpio_ls2k0500_data0,
},
diff --git a/drivers/gpio/gpio-loongson.c b/drivers/gpio/gpio-loongson.c
index 8f3668169ebf..f3e0559f969d 100644
--- a/drivers/gpio/gpio-loongson.c
+++ b/drivers/gpio/gpio-loongson.c
@@ -106,7 +106,7 @@ static int loongson_gpio_probe(struct platform_device *pdev)
gc->base = 0;
gc->ngpio = LOONGSON_N_GPIO;
gc->get = loongson_gpio_get_value;
- gc->set_rv = loongson_gpio_set_value;
+ gc->set = loongson_gpio_set_value;
gc->direction_input = loongson_gpio_direction_input;
gc->direction_output = loongson_gpio_direction_output;
diff --git a/drivers/gpio/gpio-loongson1.c b/drivers/gpio/gpio-loongson1.c
index 6ca3b969db4d..9750a7a17508 100644
--- a/drivers/gpio/gpio-loongson1.c
+++ b/drivers/gpio/gpio-loongson1.c
@@ -5,10 +5,11 @@
* Copyright (C) 2015-2023 Keguang Zhang <keguang.zhang@gmail.com>
*/
+#include <linux/bitops.h>
#include <linux/module.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/platform_device.h>
-#include <linux/bitops.h>
/* Loongson 1 GPIO Register Definitions */
#define GPIO_CFG 0x0
@@ -17,19 +18,18 @@
#define GPIO_OUTPUT 0x30
struct ls1x_gpio_chip {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
void __iomem *reg_base;
};
static int ls1x_gpio_request(struct gpio_chip *gc, unsigned int offset)
{
struct ls1x_gpio_chip *ls1x_gc = gpiochip_get_data(gc);
- unsigned long flags;
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(&ls1x_gc->chip);
+
__raw_writel(__raw_readl(ls1x_gc->reg_base + GPIO_CFG) | BIT(offset),
ls1x_gc->reg_base + GPIO_CFG);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
return 0;
}
@@ -37,16 +37,16 @@ static int ls1x_gpio_request(struct gpio_chip *gc, unsigned int offset)
static void ls1x_gpio_free(struct gpio_chip *gc, unsigned int offset)
{
struct ls1x_gpio_chip *ls1x_gc = gpiochip_get_data(gc);
- unsigned long flags;
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(&ls1x_gc->chip);
+
__raw_writel(__raw_readl(ls1x_gc->reg_base + GPIO_CFG) & ~BIT(offset),
ls1x_gc->reg_base + GPIO_CFG);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
}
static int ls1x_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct device *dev = &pdev->dev;
struct ls1x_gpio_chip *ls1x_gc;
int ret;
@@ -59,29 +59,35 @@ static int ls1x_gpio_probe(struct platform_device *pdev)
if (IS_ERR(ls1x_gc->reg_base))
return PTR_ERR(ls1x_gc->reg_base);
- ret = bgpio_init(&ls1x_gc->gc, dev, 4, ls1x_gc->reg_base + GPIO_DATA,
- ls1x_gc->reg_base + GPIO_OUTPUT, NULL,
- NULL, ls1x_gc->reg_base + GPIO_DIR, 0);
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = ls1x_gc->reg_base + GPIO_DATA,
+ .set = ls1x_gc->reg_base + GPIO_OUTPUT,
+ .dirin = ls1x_gc->reg_base + GPIO_DIR,
+ };
+
+ ret = gpio_generic_chip_init(&ls1x_gc->chip, &config);
if (ret)
goto err;
- ls1x_gc->gc.owner = THIS_MODULE;
- ls1x_gc->gc.request = ls1x_gpio_request;
- ls1x_gc->gc.free = ls1x_gpio_free;
+ ls1x_gc->chip.gc.owner = THIS_MODULE;
+ ls1x_gc->chip.gc.request = ls1x_gpio_request;
+ ls1x_gc->chip.gc.free = ls1x_gpio_free;
/*
* Clear ngpio to let gpiolib get the correct number
* by reading ngpios property
*/
- ls1x_gc->gc.ngpio = 0;
+ ls1x_gc->chip.gc.ngpio = 0;
- ret = devm_gpiochip_add_data(dev, &ls1x_gc->gc, ls1x_gc);
+ ret = devm_gpiochip_add_data(dev, &ls1x_gc->chip.gc, ls1x_gc);
if (ret)
goto err;
platform_set_drvdata(pdev, ls1x_gc);
dev_info(dev, "GPIO controller registered with %d pins\n",
- ls1x_gc->gc.ngpio);
+ ls1x_gc->chip.gc.ngpio);
return 0;
err:
diff --git a/drivers/gpio/gpio-lp3943.c b/drivers/gpio/gpio-lp3943.c
index 52ab3ac4844c..e8e00daff7df 100644
--- a/drivers/gpio/gpio-lp3943.c
+++ b/drivers/gpio/gpio-lp3943.c
@@ -184,7 +184,7 @@ static const struct gpio_chip lp3943_gpio_chip = {
.direction_input = lp3943_gpio_direction_input,
.get = lp3943_gpio_get,
.direction_output = lp3943_gpio_direction_output,
- .set_rv = lp3943_gpio_set,
+ .set = lp3943_gpio_set,
.base = -1,
.ngpio = LP3943_MAX_GPIO,
.can_sleep = 1,
diff --git a/drivers/gpio/gpio-lp873x.c b/drivers/gpio/gpio-lp873x.c
index 1908ed302e92..5376708a81bf 100644
--- a/drivers/gpio/gpio-lp873x.c
+++ b/drivers/gpio/gpio-lp873x.c
@@ -124,7 +124,7 @@ static const struct gpio_chip template_chip = {
.direction_input = lp873x_gpio_direction_input,
.direction_output = lp873x_gpio_direction_output,
.get = lp873x_gpio_get,
- .set_rv = lp873x_gpio_set,
+ .set = lp873x_gpio_set,
.set_config = lp873x_gpio_set_config,
.base = -1,
.ngpio = 2,
diff --git a/drivers/gpio/gpio-lp87565.c b/drivers/gpio/gpio-lp87565.c
index 8ea687d5d028..0f337c1283b2 100644
--- a/drivers/gpio/gpio-lp87565.c
+++ b/drivers/gpio/gpio-lp87565.c
@@ -139,7 +139,7 @@ static const struct gpio_chip template_chip = {
.direction_input = lp87565_gpio_direction_input,
.direction_output = lp87565_gpio_direction_output,
.get = lp87565_gpio_get,
- .set_rv = lp87565_gpio_set,
+ .set = lp87565_gpio_set,
.set_config = lp87565_gpio_set_config,
.base = -1,
.ngpio = 3,
diff --git a/drivers/gpio/gpio-lpc18xx.c b/drivers/gpio/gpio-lpc18xx.c
index 2dbfbf90176c..37a2342eb2e6 100644
--- a/drivers/gpio/gpio-lpc18xx.c
+++ b/drivers/gpio/gpio-lpc18xx.c
@@ -327,7 +327,7 @@ static const struct gpio_chip lpc18xx_chip = {
.free = gpiochip_generic_free,
.direction_input = lpc18xx_gpio_direction_input,
.direction_output = lpc18xx_gpio_direction_output,
- .set_rv = lpc18xx_gpio_set,
+ .set = lpc18xx_gpio_set,
.get = lpc18xx_gpio_get,
.ngpio = LPC18XX_MAX_PORTS * LPC18XX_PINS_PER_PORT,
.owner = THIS_MODULE,
diff --git a/drivers/gpio/gpio-lpc32xx.c b/drivers/gpio/gpio-lpc32xx.c
index 6668b8bd9f1e..37fc54fc7385 100644
--- a/drivers/gpio/gpio-lpc32xx.c
+++ b/drivers/gpio/gpio-lpc32xx.c
@@ -407,7 +407,7 @@ static struct lpc32xx_gpio_chip lpc32xx_gpiochip[] = {
.direction_input = lpc32xx_gpio_dir_input_p012,
.get = lpc32xx_gpio_get_value_p012,
.direction_output = lpc32xx_gpio_dir_output_p012,
- .set_rv = lpc32xx_gpio_set_value_p012,
+ .set = lpc32xx_gpio_set_value_p012,
.request = lpc32xx_gpio_request,
.to_irq = lpc32xx_gpio_to_irq_p01,
.base = LPC32XX_GPIO_P0_GRP,
@@ -423,7 +423,7 @@ static struct lpc32xx_gpio_chip lpc32xx_gpiochip[] = {
.direction_input = lpc32xx_gpio_dir_input_p012,
.get = lpc32xx_gpio_get_value_p012,
.direction_output = lpc32xx_gpio_dir_output_p012,
- .set_rv = lpc32xx_gpio_set_value_p012,
+ .set = lpc32xx_gpio_set_value_p012,
.request = lpc32xx_gpio_request,
.to_irq = lpc32xx_gpio_to_irq_p01,
.base = LPC32XX_GPIO_P1_GRP,
@@ -439,7 +439,7 @@ static struct lpc32xx_gpio_chip lpc32xx_gpiochip[] = {
.direction_input = lpc32xx_gpio_dir_input_p012,
.get = lpc32xx_gpio_get_value_p012,
.direction_output = lpc32xx_gpio_dir_output_p012,
- .set_rv = lpc32xx_gpio_set_value_p012,
+ .set = lpc32xx_gpio_set_value_p012,
.request = lpc32xx_gpio_request,
.base = LPC32XX_GPIO_P2_GRP,
.ngpio = LPC32XX_GPIO_P2_MAX,
@@ -454,7 +454,7 @@ static struct lpc32xx_gpio_chip lpc32xx_gpiochip[] = {
.direction_input = lpc32xx_gpio_dir_input_p3,
.get = lpc32xx_gpio_get_value_p3,
.direction_output = lpc32xx_gpio_dir_output_p3,
- .set_rv = lpc32xx_gpio_set_value_p3,
+ .set = lpc32xx_gpio_set_value_p3,
.request = lpc32xx_gpio_request,
.to_irq = lpc32xx_gpio_to_irq_gpio_p3,
.base = LPC32XX_GPIO_P3_GRP,
@@ -482,7 +482,7 @@ static struct lpc32xx_gpio_chip lpc32xx_gpiochip[] = {
.chip = {
.label = "gpo_p3",
.direction_output = lpc32xx_gpio_dir_out_always,
- .set_rv = lpc32xx_gpo_set_value,
+ .set = lpc32xx_gpo_set_value,
.get = lpc32xx_gpo_get_value,
.request = lpc32xx_gpio_request,
.base = LPC32XX_GPO_P3_GRP,
diff --git a/drivers/gpio/gpio-macsmc.c b/drivers/gpio/gpio-macsmc.c
index 7570d9e89adf..30ef258e7655 100644
--- a/drivers/gpio/gpio-macsmc.c
+++ b/drivers/gpio/gpio-macsmc.c
@@ -261,7 +261,7 @@ static int macsmc_gpio_probe(struct platform_device *pdev)
smcgp->gc.label = "macsmc-pmu-gpio";
smcgp->gc.owner = THIS_MODULE;
smcgp->gc.get = macsmc_gpio_get;
- smcgp->gc.set_rv = macsmc_gpio_set;
+ smcgp->gc.set = macsmc_gpio_set;
smcgp->gc.get_direction = macsmc_gpio_get_direction;
smcgp->gc.init_valid_mask = macsmc_gpio_init_valid_mask;
smcgp->gc.can_sleep = true;
diff --git a/drivers/gpio/gpio-madera.c b/drivers/gpio/gpio-madera.c
index e73e72d62bc8..551faf9655b2 100644
--- a/drivers/gpio/gpio-madera.c
+++ b/drivers/gpio/gpio-madera.c
@@ -109,7 +109,7 @@ static const struct gpio_chip madera_gpio_chip = {
.direction_input = madera_gpio_direction_in,
.get = madera_gpio_get,
.direction_output = madera_gpio_direction_out,
- .set_rv = madera_gpio_set,
+ .set = madera_gpio_set,
.set_config = gpiochip_generic_config,
.can_sleep = true,
};
diff --git a/drivers/gpio/gpio-max730x.c b/drivers/gpio/gpio-max730x.c
index 75d414d8c992..84c7c2dca822 100644
--- a/drivers/gpio/gpio-max730x.c
+++ b/drivers/gpio/gpio-max730x.c
@@ -188,7 +188,7 @@ int __max730x_probe(struct max7301 *ts)
ts->chip.direction_input = max7301_direction_input;
ts->chip.get = max7301_get;
ts->chip.direction_output = max7301_direction_output;
- ts->chip.set_rv = max7301_set;
+ ts->chip.set = max7301_set;
ts->chip.ngpio = PIN_NUMBER;
ts->chip.can_sleep = true;
diff --git a/drivers/gpio/gpio-max732x.c b/drivers/gpio/gpio-max732x.c
index d5ffedb086af..a61d670ceeda 100644
--- a/drivers/gpio/gpio-max732x.c
+++ b/drivers/gpio/gpio-max732x.c
@@ -585,8 +585,8 @@ static int max732x_setup_gpio(struct max732x_chip *chip,
gc->direction_input = max732x_gpio_direction_input;
if (chip->dir_output) {
gc->direction_output = max732x_gpio_direction_output;
- gc->set_rv = max732x_gpio_set_value;
- gc->set_multiple_rv = max732x_gpio_set_multiple;
+ gc->set = max732x_gpio_set_value;
+ gc->set_multiple = max732x_gpio_set_multiple;
}
gc->get = max732x_gpio_get_value;
gc->can_sleep = true;
diff --git a/drivers/gpio/gpio-max7360.c b/drivers/gpio/gpio-max7360.c
new file mode 100644
index 000000000000..db92a43776a9
--- /dev/null
+++ b/drivers/gpio/gpio-max7360.c
@@ -0,0 +1,257 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2025 Bootlin
+ *
+ * Author: Kamel BOUHARA <kamel.bouhara@bootlin.com>
+ * Author: Mathieu Dubois-Briand <mathieu.dubois-briand@bootlin.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitmap.h>
+#include <linux/err.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio/regmap.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/max7360.h>
+#include <linux/minmax.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+
+#define MAX7360_GPIO_PORT 1
+#define MAX7360_GPIO_COL 2
+
+struct max7360_gpio_plat_data {
+ unsigned int function;
+};
+
+static struct max7360_gpio_plat_data max7360_gpio_port_plat = { .function = MAX7360_GPIO_PORT };
+static struct max7360_gpio_plat_data max7360_gpio_col_plat = { .function = MAX7360_GPIO_COL };
+
+static int max7360_get_available_gpos(struct device *dev, unsigned int *available_gpios)
+{
+ u32 columns;
+ int ret;
+
+ ret = device_property_read_u32(dev->parent, "keypad,num-columns", &columns);
+ if (ret) {
+ dev_err(dev, "Failed to read columns count\n");
+ return ret;
+ }
+
+ *available_gpios = min(MAX7360_MAX_GPO, MAX7360_MAX_KEY_COLS - columns);
+
+ return 0;
+}
+
+static int max7360_gpo_init_valid_mask(struct gpio_chip *gc,
+ unsigned long *valid_mask,
+ unsigned int ngpios)
+{
+ unsigned int available_gpios;
+ int ret;
+
+ ret = max7360_get_available_gpos(gc->parent, &available_gpios);
+ if (ret)
+ return ret;
+
+ bitmap_clear(valid_mask, 0, MAX7360_MAX_KEY_COLS - available_gpios);
+
+ return 0;
+}
+
+static int max7360_set_gpos_count(struct device *dev, struct regmap *regmap)
+{
+ /*
+ * MAX7360 COL0 to COL7 pins can be used either as keypad columns,
+ * general purpose output or a mix of both.
+ * By default, all pins are used as keypad, here we update this
+ * configuration to allow to use some of them as GPIOs.
+ */
+ unsigned int available_gpios;
+ unsigned int val;
+ int ret;
+
+ ret = max7360_get_available_gpos(dev, &available_gpios);
+ if (ret)
+ return ret;
+
+ /*
+ * Configure which GPIOs will be used for keypad.
+ * MAX7360_REG_DEBOUNCE contains configuration both for keypad debounce
+ * timings and gpos/keypad columns repartition. Only the later is
+ * modified here.
+ */
+ val = FIELD_PREP(MAX7360_PORTS, available_gpios);
+ ret = regmap_write_bits(regmap, MAX7360_REG_DEBOUNCE, MAX7360_PORTS, val);
+ if (ret)
+ dev_err(dev, "Failed to write max7360 columns/gpos configuration");
+
+ return ret;
+}
+
+static int max7360_gpio_reg_mask_xlate(struct gpio_regmap *gpio,
+ unsigned int base, unsigned int offset,
+ unsigned int *reg, unsigned int *mask)
+{
+ if (base == MAX7360_REG_PWMBASE) {
+ /*
+ * GPIO output is using PWM duty cycle registers: one register
+ * per line, with value being either 0 or 255.
+ */
+ *reg = base + offset;
+ *mask = GENMASK(7, 0);
+ } else {
+ *reg = base;
+ *mask = BIT(offset);
+ }
+
+ return 0;
+}
+
+static const struct regmap_irq max7360_regmap_irqs[MAX7360_MAX_GPIO] = {
+ REGMAP_IRQ_REG(0, 0, BIT(0)),
+ REGMAP_IRQ_REG(1, 0, BIT(1)),
+ REGMAP_IRQ_REG(2, 0, BIT(2)),
+ REGMAP_IRQ_REG(3, 0, BIT(3)),
+ REGMAP_IRQ_REG(4, 0, BIT(4)),
+ REGMAP_IRQ_REG(5, 0, BIT(5)),
+ REGMAP_IRQ_REG(6, 0, BIT(6)),
+ REGMAP_IRQ_REG(7, 0, BIT(7)),
+};
+
+static int max7360_handle_mask_sync(const int index,
+ const unsigned int mask_buf_def,
+ const unsigned int mask_buf,
+ void *const irq_drv_data)
+{
+ struct regmap *regmap = irq_drv_data;
+ int ret;
+
+ for (unsigned int i = 0; i < MAX7360_MAX_GPIO; i++) {
+ ret = regmap_assign_bits(regmap, MAX7360_REG_PWMCFG(i),
+ MAX7360_PORT_CFG_INTERRUPT_MASK, mask_buf & BIT(i));
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int max7360_gpio_probe(struct platform_device *pdev)
+{
+ const struct max7360_gpio_plat_data *plat_data;
+ struct gpio_regmap_config gpio_config = { };
+ struct regmap_irq_chip *irq_chip;
+ struct device *dev = &pdev->dev;
+ struct regmap *regmap;
+ unsigned int outconf;
+ int ret;
+
+ regmap = dev_get_regmap(dev->parent, NULL);
+ if (!regmap)
+ return dev_err_probe(dev, -ENODEV, "could not get parent regmap\n");
+
+ plat_data = device_get_match_data(dev);
+ if (plat_data->function == MAX7360_GPIO_PORT) {
+ if (device_property_read_bool(dev, "interrupt-controller")) {
+ /*
+ * Port GPIOs with interrupt-controller property: add IRQ
+ * controller.
+ */
+ gpio_config.regmap_irq_flags = IRQF_ONESHOT | IRQF_SHARED;
+ gpio_config.regmap_irq_line =
+ fwnode_irq_get_byname(dev_fwnode(dev->parent), "inti");
+ if (gpio_config.regmap_irq_line < 0)
+ return dev_err_probe(dev, gpio_config.regmap_irq_line,
+ "Failed to get IRQ\n");
+
+ /* Create custom IRQ configuration. */
+ irq_chip = devm_kzalloc(dev, sizeof(*irq_chip), GFP_KERNEL);
+ gpio_config.regmap_irq_chip = irq_chip;
+ if (!irq_chip)
+ return -ENOMEM;
+
+ irq_chip->name = dev_name(dev);
+ irq_chip->status_base = MAX7360_REG_GPIOIN;
+ irq_chip->status_is_level = true;
+ irq_chip->num_regs = 1;
+ irq_chip->num_irqs = MAX7360_MAX_GPIO;
+ irq_chip->irqs = max7360_regmap_irqs;
+ irq_chip->handle_mask_sync = max7360_handle_mask_sync;
+ irq_chip->irq_drv_data = regmap;
+
+ for (unsigned int i = 0; i < MAX7360_MAX_GPIO; i++) {
+ ret = regmap_write_bits(regmap, MAX7360_REG_PWMCFG(i),
+ MAX7360_PORT_CFG_INTERRUPT_EDGES,
+ MAX7360_PORT_CFG_INTERRUPT_EDGES);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to enable interrupts\n");
+ }
+ }
+
+ /*
+ * Port GPIOs: set output mode configuration (constant-current or not).
+ * This property is optional.
+ */
+ ret = device_property_read_u32(dev, "maxim,constant-current-disable", &outconf);
+ if (!ret) {
+ ret = regmap_write(regmap, MAX7360_REG_GPIOOUTM, outconf);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to set constant-current configuration\n");
+ }
+ }
+
+ /* Add gpio device. */
+ gpio_config.parent = dev;
+ gpio_config.regmap = regmap;
+ if (plat_data->function == MAX7360_GPIO_PORT) {
+ gpio_config.ngpio = MAX7360_MAX_GPIO;
+ gpio_config.reg_dat_base = GPIO_REGMAP_ADDR(MAX7360_REG_GPIOIN);
+ gpio_config.reg_set_base = GPIO_REGMAP_ADDR(MAX7360_REG_PWMBASE);
+ gpio_config.reg_dir_out_base = GPIO_REGMAP_ADDR(MAX7360_REG_GPIOCTRL);
+ gpio_config.ngpio_per_reg = MAX7360_MAX_GPIO;
+ gpio_config.reg_mask_xlate = max7360_gpio_reg_mask_xlate;
+ } else {
+ ret = max7360_set_gpos_count(dev, regmap);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to set GPOS pin count\n");
+
+ gpio_config.reg_set_base = GPIO_REGMAP_ADDR(MAX7360_REG_PORTS);
+ gpio_config.ngpio = MAX7360_MAX_KEY_COLS;
+ gpio_config.init_valid_mask = max7360_gpo_init_valid_mask;
+ }
+
+ return PTR_ERR_OR_ZERO(devm_gpio_regmap_register(dev, &gpio_config));
+}
+
+static const struct of_device_id max7360_gpio_of_match[] = {
+ {
+ .compatible = "maxim,max7360-gpo",
+ .data = &max7360_gpio_col_plat
+ }, {
+ .compatible = "maxim,max7360-gpio",
+ .data = &max7360_gpio_port_plat
+ }, {
+ }
+};
+MODULE_DEVICE_TABLE(of, max7360_gpio_of_match);
+
+static struct platform_driver max7360_gpio_driver = {
+ .driver = {
+ .name = "max7360-gpio",
+ .of_match_table = max7360_gpio_of_match,
+ },
+ .probe = max7360_gpio_probe,
+};
+module_platform_driver(max7360_gpio_driver);
+
+MODULE_DESCRIPTION("MAX7360 GPIO driver");
+MODULE_AUTHOR("Kamel BOUHARA <kamel.bouhara@bootlin.com>");
+MODULE_AUTHOR("Mathieu Dubois-Briand <mathieu.dubois-briand@bootlin.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-max77620.c b/drivers/gpio/gpio-max77620.c
index af7af8e40afe..02eca400b307 100644
--- a/drivers/gpio/gpio-max77620.c
+++ b/drivers/gpio/gpio-max77620.c
@@ -311,7 +311,7 @@ static int max77620_gpio_probe(struct platform_device *pdev)
mgpio->gpio_chip.direction_input = max77620_gpio_dir_input;
mgpio->gpio_chip.get = max77620_gpio_get;
mgpio->gpio_chip.direction_output = max77620_gpio_dir_output;
- mgpio->gpio_chip.set_rv = max77620_gpio_set;
+ mgpio->gpio_chip.set = max77620_gpio_set;
mgpio->gpio_chip.set_config = max77620_gpio_set_config;
mgpio->gpio_chip.ngpio = MAX77620_GPIO_NR;
mgpio->gpio_chip.can_sleep = 1;
diff --git a/drivers/gpio/gpio-max77650.c b/drivers/gpio/gpio-max77650.c
index a553e141059f..4540da4c1418 100644
--- a/drivers/gpio/gpio-max77650.c
+++ b/drivers/gpio/gpio-max77650.c
@@ -166,7 +166,7 @@ static int max77650_gpio_probe(struct platform_device *pdev)
chip->gc.direction_input = max77650_gpio_direction_input;
chip->gc.direction_output = max77650_gpio_direction_output;
- chip->gc.set_rv = max77650_gpio_set_value;
+ chip->gc.set = max77650_gpio_set_value;
chip->gc.get = max77650_gpio_get_value;
chip->gc.get_direction = max77650_gpio_get_direction;
chip->gc.set_config = max77650_gpio_set_config;
diff --git a/drivers/gpio/gpio-max77759.c b/drivers/gpio/gpio-max77759.c
index 7fe8e6f697d0..5e48eb03e7b3 100644
--- a/drivers/gpio/gpio-max77759.c
+++ b/drivers/gpio/gpio-max77759.c
@@ -469,7 +469,7 @@ static int max77759_gpio_probe(struct platform_device *pdev)
chip->gc.direction_input = max77759_gpio_direction_input;
chip->gc.direction_output = max77759_gpio_direction_output;
chip->gc.get = max77759_gpio_get_value;
- chip->gc.set_rv = max77759_gpio_set_value;
+ chip->gc.set = max77759_gpio_set_value;
girq = &chip->gc.irq;
gpio_irq_chip_set_chip(girq, &max77759_gpio_irq_chip);
diff --git a/drivers/gpio/gpio-mb86s7x.c b/drivers/gpio/gpio-mb86s7x.c
index 5ee2991ecdfd..581a71872eab 100644
--- a/drivers/gpio/gpio-mb86s7x.c
+++ b/drivers/gpio/gpio-mb86s7x.c
@@ -180,7 +180,7 @@ static int mb86s70_gpio_probe(struct platform_device *pdev)
gchip->gc.request = mb86s70_gpio_request;
gchip->gc.free = mb86s70_gpio_free;
gchip->gc.get = mb86s70_gpio_get;
- gchip->gc.set_rv = mb86s70_gpio_set;
+ gchip->gc.set = mb86s70_gpio_set;
gchip->gc.to_irq = mb86s70_gpio_to_irq;
gchip->gc.label = dev_name(&pdev->dev);
gchip->gc.ngpio = 32;
diff --git a/drivers/gpio/gpio-mc33880.c b/drivers/gpio/gpio-mc33880.c
index e68956104161..9a40e9579e95 100644
--- a/drivers/gpio/gpio-mc33880.c
+++ b/drivers/gpio/gpio-mc33880.c
@@ -103,7 +103,7 @@ static int mc33880_probe(struct spi_device *spi)
mc->spi = spi;
mc->chip.label = DRIVER_NAME;
- mc->chip.set_rv = mc33880_set;
+ mc->chip.set = mc33880_set;
mc->chip.base = pdata->base;
mc->chip.ngpio = PIN_NUMBER;
mc->chip.can_sleep = true;
diff --git a/drivers/gpio/gpio-menz127.c b/drivers/gpio/gpio-menz127.c
index ebe5da4933bc..da2bf9381cc4 100644
--- a/drivers/gpio/gpio-menz127.c
+++ b/drivers/gpio/gpio-menz127.c
@@ -12,6 +12,7 @@
#include <linux/mcb.h>
#include <linux/bitops.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#define MEN_Z127_CTRL 0x00
#define MEN_Z127_PSR 0x04
@@ -30,7 +31,7 @@
(db <= MEN_Z127_DB_MAX_US))
struct men_z127_gpio {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
void __iomem *reg_base;
struct resource *mem;
};
@@ -64,7 +65,7 @@ static int men_z127_debounce(struct gpio_chip *gc, unsigned gpio,
debounce /= 50;
}
- raw_spin_lock(&gc->bgpio_lock);
+ guard(gpio_generic_lock)(&priv->chip);
db_en = readl(priv->reg_base + MEN_Z127_DBER);
@@ -79,8 +80,6 @@ static int men_z127_debounce(struct gpio_chip *gc, unsigned gpio,
writel(db_en, priv->reg_base + MEN_Z127_DBER);
writel(db_cnt, priv->reg_base + GPIO_TO_DBCNT_REG(gpio));
- raw_spin_unlock(&gc->bgpio_lock);
-
return 0;
}
@@ -91,7 +90,8 @@ static int men_z127_set_single_ended(struct gpio_chip *gc,
struct men_z127_gpio *priv = gpiochip_get_data(gc);
u32 od_en;
- raw_spin_lock(&gc->bgpio_lock);
+ guard(gpio_generic_lock)(&priv->chip);
+
od_en = readl(priv->reg_base + MEN_Z127_ODER);
if (param == PIN_CONFIG_DRIVE_OPEN_DRAIN)
@@ -101,7 +101,6 @@ static int men_z127_set_single_ended(struct gpio_chip *gc,
od_en &= ~BIT(offset);
writel(od_en, priv->reg_base + MEN_Z127_ODER);
- raw_spin_unlock(&gc->bgpio_lock);
return 0;
}
@@ -137,6 +136,7 @@ static void men_z127_release_mem(void *data)
static int men_z127_probe(struct mcb_device *mdev,
const struct mcb_device_id *id)
{
+ struct gpio_generic_chip_config config;
struct men_z127_gpio *men_z127_gpio;
struct device *dev = &mdev->dev;
int ret;
@@ -163,18 +163,21 @@ static int men_z127_probe(struct mcb_device *mdev,
mcb_set_drvdata(mdev, men_z127_gpio);
- ret = bgpio_init(&men_z127_gpio->gc, &mdev->dev, 4,
- men_z127_gpio->reg_base + MEN_Z127_PSR,
- men_z127_gpio->reg_base + MEN_Z127_CTRL,
- NULL,
- men_z127_gpio->reg_base + MEN_Z127_GPIODR,
- NULL, 0);
+ config = (struct gpio_generic_chip_config) {
+ .dev = &mdev->dev,
+ .sz = 4,
+ .dat = men_z127_gpio->reg_base + MEN_Z127_PSR,
+ .set = men_z127_gpio->reg_base + MEN_Z127_CTRL,
+ .dirout = men_z127_gpio->reg_base + MEN_Z127_GPIODR,
+ };
+
+ ret = gpio_generic_chip_init(&men_z127_gpio->chip, &config);
if (ret)
return ret;
- men_z127_gpio->gc.set_config = men_z127_set_config;
+ men_z127_gpio->chip.gc.set_config = men_z127_set_config;
- ret = devm_gpiochip_add_data(dev, &men_z127_gpio->gc, men_z127_gpio);
+ ret = devm_gpiochip_add_data(dev, &men_z127_gpio->chip.gc, men_z127_gpio);
if (ret)
return dev_err_probe(dev, ret,
"failed to register MEN 16Z127 GPIO controller");
diff --git a/drivers/gpio/gpio-ml-ioh.c b/drivers/gpio/gpio-ml-ioh.c
index 12cf36f9ca63..f6af81bf2b13 100644
--- a/drivers/gpio/gpio-ml-ioh.c
+++ b/drivers/gpio/gpio-ml-ioh.c
@@ -224,7 +224,7 @@ static void ioh_gpio_setup(struct ioh_gpio *chip, int num_port)
gpio->direction_input = ioh_gpio_direction_input;
gpio->get = ioh_gpio_get;
gpio->direction_output = ioh_gpio_direction_output;
- gpio->set_rv = ioh_gpio_set;
+ gpio->set = ioh_gpio_set;
gpio->dbg_show = NULL;
gpio->base = -1;
gpio->ngpio = num_port;
diff --git a/drivers/gpio/gpio-mlxbf.c b/drivers/gpio/gpio-mlxbf.c
index 1fa9973f55b9..a18fedbc463e 100644
--- a/drivers/gpio/gpio-mlxbf.c
+++ b/drivers/gpio/gpio-mlxbf.c
@@ -4,6 +4,7 @@
#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -37,7 +38,7 @@ struct mlxbf_gpio_context_save_regs {
/* Device state structure. */
struct mlxbf_gpio_state {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
/* Memory Address */
void __iomem *base;
@@ -49,6 +50,7 @@ struct mlxbf_gpio_state {
static int mlxbf_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct mlxbf_gpio_state *gs;
struct device *dev = &pdev->dev;
struct gpio_chip *gc;
@@ -62,21 +64,24 @@ static int mlxbf_gpio_probe(struct platform_device *pdev)
if (IS_ERR(gs->base))
return PTR_ERR(gs->base);
- gc = &gs->gc;
- ret = bgpio_init(gc, dev, 8,
- gs->base + MLXBF_GPIO_PIN_STATE,
- NULL,
- NULL,
- gs->base + MLXBF_GPIO_PIN_DIR_O,
- gs->base + MLXBF_GPIO_PIN_DIR_I,
- 0);
+ gc = &gs->chip.gc;
+
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 8,
+ .dat = gs->base + MLXBF_GPIO_PIN_STATE,
+ .dirout = gs->base + MLXBF_GPIO_PIN_DIR_O,
+ .dirin = gs->base + MLXBF_GPIO_PIN_DIR_I,
+ };
+
+ ret = gpio_generic_chip_init(&gs->chip, &config);
if (ret)
return -ENODEV;
gc->owner = THIS_MODULE;
gc->ngpio = MLXBF_GPIO_NR;
- ret = devm_gpiochip_add_data(dev, &gs->gc, gs);
+ ret = devm_gpiochip_add_data(dev, &gs->chip.gc, gs);
if (ret) {
dev_err(&pdev->dev, "Failed adding memory mapped gpiochip\n");
return ret;
diff --git a/drivers/gpio/gpio-mlxbf2.c b/drivers/gpio/gpio-mlxbf2.c
index 6f3dda6b635f..abffce3894fc 100644
--- a/drivers/gpio/gpio-mlxbf2.c
+++ b/drivers/gpio/gpio-mlxbf2.c
@@ -6,8 +6,10 @@
#include <linux/bitfield.h>
#include <linux/bitops.h>
+#include <linux/cleanup.h>
#include <linux/device.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/ioport.h>
@@ -65,7 +67,7 @@ struct mlxbf2_gpio_context_save_regs {
/* BlueField-2 gpio block context structure. */
struct mlxbf2_gpio_context {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
/* YU GPIO blocks address */
void __iomem *gpio_io;
@@ -132,7 +134,7 @@ static int mlxbf2_gpio_lock_acquire(struct mlxbf2_gpio_context *gs)
u32 arm_gpio_lock_val;
mutex_lock(yu_arm_gpio_lock_param.lock);
- raw_spin_lock(&gs->gc.bgpio_lock);
+ gpio_generic_chip_lock(&gs->chip);
arm_gpio_lock_val = readl(yu_arm_gpio_lock_param.io);
@@ -140,7 +142,7 @@ static int mlxbf2_gpio_lock_acquire(struct mlxbf2_gpio_context *gs)
* When lock active bit[31] is set, ModeX is write enabled
*/
if (YU_LOCK_ACTIVE_BIT(arm_gpio_lock_val)) {
- raw_spin_unlock(&gs->gc.bgpio_lock);
+ gpio_generic_chip_unlock(&gs->chip);
mutex_unlock(yu_arm_gpio_lock_param.lock);
return -EINVAL;
}
@@ -154,11 +156,11 @@ static int mlxbf2_gpio_lock_acquire(struct mlxbf2_gpio_context *gs)
* Release the YU arm_gpio_lock after changing the direction mode.
*/
static void mlxbf2_gpio_lock_release(struct mlxbf2_gpio_context *gs)
- __releases(&gs->gc.bgpio_lock)
+ __releases(&gs->chip.lock)
__releases(yu_arm_gpio_lock_param.lock)
{
writel(YU_ARM_GPIO_LOCK_RELEASE, yu_arm_gpio_lock_param.io);
- raw_spin_unlock(&gs->gc.bgpio_lock);
+ gpio_generic_chip_unlock(&gs->chip);
mutex_unlock(yu_arm_gpio_lock_param.lock);
}
@@ -235,11 +237,10 @@ static void mlxbf2_gpio_irq_enable(struct irq_data *irqd)
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
struct mlxbf2_gpio_context *gs = gpiochip_get_data(gc);
int offset = irqd_to_hwirq(irqd);
- unsigned long flags;
u32 val;
gpiochip_enable_irq(gc, irqd_to_hwirq(irqd));
- raw_spin_lock_irqsave(&gs->gc.bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(&gs->chip);
val = readl(gs->gpio_io + YU_GPIO_CAUSE_OR_CLRCAUSE);
val |= BIT(offset);
writel(val, gs->gpio_io + YU_GPIO_CAUSE_OR_CLRCAUSE);
@@ -247,7 +248,6 @@ static void mlxbf2_gpio_irq_enable(struct irq_data *irqd)
val = readl(gs->gpio_io + YU_GPIO_CAUSE_OR_EVTEN0);
val |= BIT(offset);
writel(val, gs->gpio_io + YU_GPIO_CAUSE_OR_EVTEN0);
- raw_spin_unlock_irqrestore(&gs->gc.bgpio_lock, flags);
}
static void mlxbf2_gpio_irq_disable(struct irq_data *irqd)
@@ -255,21 +255,21 @@ static void mlxbf2_gpio_irq_disable(struct irq_data *irqd)
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
struct mlxbf2_gpio_context *gs = gpiochip_get_data(gc);
int offset = irqd_to_hwirq(irqd);
- unsigned long flags;
u32 val;
- raw_spin_lock_irqsave(&gs->gc.bgpio_lock, flags);
- val = readl(gs->gpio_io + YU_GPIO_CAUSE_OR_EVTEN0);
- val &= ~BIT(offset);
- writel(val, gs->gpio_io + YU_GPIO_CAUSE_OR_EVTEN0);
- raw_spin_unlock_irqrestore(&gs->gc.bgpio_lock, flags);
+ scoped_guard(gpio_generic_lock_irqsave, &gs->chip) {
+ val = readl(gs->gpio_io + YU_GPIO_CAUSE_OR_EVTEN0);
+ val &= ~BIT(offset);
+ writel(val, gs->gpio_io + YU_GPIO_CAUSE_OR_EVTEN0);
+ }
+
gpiochip_disable_irq(gc, irqd_to_hwirq(irqd));
}
static irqreturn_t mlxbf2_gpio_irq_handler(int irq, void *ptr)
{
struct mlxbf2_gpio_context *gs = ptr;
- struct gpio_chip *gc = &gs->gc;
+ struct gpio_chip *gc = &gs->chip.gc;
unsigned long pending;
u32 level;
@@ -288,7 +288,6 @@ mlxbf2_gpio_irq_set_type(struct irq_data *irqd, unsigned int type)
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
struct mlxbf2_gpio_context *gs = gpiochip_get_data(gc);
int offset = irqd_to_hwirq(irqd);
- unsigned long flags;
bool fall = false;
bool rise = false;
u32 val;
@@ -308,7 +307,8 @@ mlxbf2_gpio_irq_set_type(struct irq_data *irqd, unsigned int type)
return -EINVAL;
}
- raw_spin_lock_irqsave(&gs->gc.bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(&gs->chip);
+
if (fall) {
val = readl(gs->gpio_io + YU_GPIO_CAUSE_FALL_EN);
val |= BIT(offset);
@@ -320,7 +320,6 @@ mlxbf2_gpio_irq_set_type(struct irq_data *irqd, unsigned int type)
val |= BIT(offset);
writel(val, gs->gpio_io + YU_GPIO_CAUSE_RISE_EN);
}
- raw_spin_unlock_irqrestore(&gs->gc.bgpio_lock, flags);
return 0;
}
@@ -347,6 +346,7 @@ static const struct irq_chip mlxbf2_gpio_irq_chip = {
static int
mlxbf2_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct mlxbf2_gpio_context *gs;
struct device *dev = &pdev->dev;
struct gpio_irq_chip *girq;
@@ -369,37 +369,34 @@ mlxbf2_gpio_probe(struct platform_device *pdev)
return PTR_ERR(gs->gpio_io);
ret = mlxbf2_gpio_get_lock_res(pdev);
- if (ret) {
- dev_err(dev, "Failed to get yu_arm_gpio_lock resource\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get yu_arm_gpio_lock resource\n");
if (device_property_read_u32(dev, "npins", &npins))
npins = MLXBF2_GPIO_MAX_PINS_PER_BLOCK;
- gc = &gs->gc;
+ gc = &gs->chip.gc;
- ret = bgpio_init(gc, dev, 4,
- gs->gpio_io + YU_GPIO_DATAIN,
- gs->gpio_io + YU_GPIO_DATASET,
- gs->gpio_io + YU_GPIO_DATACLEAR,
- NULL,
- NULL,
- 0);
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = gs->gpio_io + YU_GPIO_DATAIN,
+ .set = gs->gpio_io + YU_GPIO_DATASET,
+ .clr = gs->gpio_io + YU_GPIO_DATACLEAR,
+ };
- if (ret) {
- dev_err(dev, "bgpio_init failed\n");
- return ret;
- }
+ ret = gpio_generic_chip_init(&gs->chip, &config);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to initialize the generic GPIO chip\n");
gc->direction_input = mlxbf2_gpio_direction_input;
gc->direction_output = mlxbf2_gpio_direction_output;
gc->ngpio = npins;
gc->owner = THIS_MODULE;
- irq = platform_get_irq(pdev, 0);
+ irq = platform_get_irq_optional(pdev, 0);
if (irq >= 0) {
- girq = &gs->gc.irq;
+ girq = &gs->chip.gc.irq;
gpio_irq_chip_set_chip(girq, &mlxbf2_gpio_irq_chip);
girq->handler = handle_simple_irq;
girq->default_type = IRQ_TYPE_NONE;
@@ -414,19 +411,15 @@ mlxbf2_gpio_probe(struct platform_device *pdev)
*/
ret = devm_request_irq(dev, irq, mlxbf2_gpio_irq_handler,
IRQF_SHARED, name, gs);
- if (ret) {
- dev_err(dev, "failed to request IRQ");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to request IRQ");
}
platform_set_drvdata(pdev, gs);
- ret = devm_gpiochip_add_data(dev, &gs->gc, gs);
- if (ret) {
- dev_err(dev, "Failed adding memory mapped gpiochip\n");
- return ret;
- }
+ ret = devm_gpiochip_add_data(dev, &gs->chip.gc, gs);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed adding memory mapped gpiochip\n");
return 0;
}
diff --git a/drivers/gpio/gpio-mlxbf3.c b/drivers/gpio/gpio-mlxbf3.c
index 9875e34bde72..4770578269ba 100644
--- a/drivers/gpio/gpio-mlxbf3.c
+++ b/drivers/gpio/gpio-mlxbf3.c
@@ -6,6 +6,7 @@
#include <linux/device.h>
#include <linux/err.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
@@ -42,7 +43,7 @@
#define MLXBF_GPIO_CLR_ALL_INTS GENMASK(31, 0)
struct mlxbf3_gpio_context {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
/* YU GPIO block address */
void __iomem *gpio_set_io;
@@ -58,18 +59,17 @@ static void mlxbf3_gpio_irq_enable(struct irq_data *irqd)
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
struct mlxbf3_gpio_context *gs = gpiochip_get_data(gc);
irq_hw_number_t offset = irqd_to_hwirq(irqd);
- unsigned long flags;
u32 val;
gpiochip_enable_irq(gc, offset);
- raw_spin_lock_irqsave(&gs->gc.bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(&gs->chip);
+
writel(BIT(offset), gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_CLRCAUSE);
val = readl(gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_EVTEN0);
val |= BIT(offset);
writel(val, gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_EVTEN0);
- raw_spin_unlock_irqrestore(&gs->gc.bgpio_lock, flags);
}
static void mlxbf3_gpio_irq_disable(struct irq_data *irqd)
@@ -77,16 +77,15 @@ static void mlxbf3_gpio_irq_disable(struct irq_data *irqd)
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
struct mlxbf3_gpio_context *gs = gpiochip_get_data(gc);
irq_hw_number_t offset = irqd_to_hwirq(irqd);
- unsigned long flags;
u32 val;
- raw_spin_lock_irqsave(&gs->gc.bgpio_lock, flags);
- val = readl(gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_EVTEN0);
- val &= ~BIT(offset);
- writel(val, gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_EVTEN0);
+ scoped_guard(gpio_generic_lock_irqsave, &gs->chip) {
+ val = readl(gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_EVTEN0);
+ val &= ~BIT(offset);
+ writel(val, gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_EVTEN0);
- writel(BIT(offset), gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_CLRCAUSE);
- raw_spin_unlock_irqrestore(&gs->gc.bgpio_lock, flags);
+ writel(BIT(offset), gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_CLRCAUSE);
+ }
gpiochip_disable_irq(gc, offset);
}
@@ -94,7 +93,7 @@ static void mlxbf3_gpio_irq_disable(struct irq_data *irqd)
static irqreturn_t mlxbf3_gpio_irq_handler(int irq, void *ptr)
{
struct mlxbf3_gpio_context *gs = ptr;
- struct gpio_chip *gc = &gs->gc;
+ struct gpio_chip *gc = &gs->chip.gc;
unsigned long pending;
u32 level;
@@ -113,37 +112,33 @@ mlxbf3_gpio_irq_set_type(struct irq_data *irqd, unsigned int type)
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
struct mlxbf3_gpio_context *gs = gpiochip_get_data(gc);
irq_hw_number_t offset = irqd_to_hwirq(irqd);
- unsigned long flags;
u32 val;
- raw_spin_lock_irqsave(&gs->gc.bgpio_lock, flags);
-
- switch (type & IRQ_TYPE_SENSE_MASK) {
- case IRQ_TYPE_EDGE_BOTH:
- val = readl(gs->gpio_io + MLXBF_GPIO_CAUSE_FALL_EN);
- val |= BIT(offset);
- writel(val, gs->gpio_io + MLXBF_GPIO_CAUSE_FALL_EN);
- val = readl(gs->gpio_io + MLXBF_GPIO_CAUSE_RISE_EN);
- val |= BIT(offset);
- writel(val, gs->gpio_io + MLXBF_GPIO_CAUSE_RISE_EN);
- break;
- case IRQ_TYPE_EDGE_RISING:
- val = readl(gs->gpio_io + MLXBF_GPIO_CAUSE_RISE_EN);
- val |= BIT(offset);
- writel(val, gs->gpio_io + MLXBF_GPIO_CAUSE_RISE_EN);
- break;
- case IRQ_TYPE_EDGE_FALLING:
- val = readl(gs->gpio_io + MLXBF_GPIO_CAUSE_FALL_EN);
- val |= BIT(offset);
- writel(val, gs->gpio_io + MLXBF_GPIO_CAUSE_FALL_EN);
- break;
- default:
- raw_spin_unlock_irqrestore(&gs->gc.bgpio_lock, flags);
- return -EINVAL;
+ scoped_guard(gpio_generic_lock_irqsave, &gs->chip) {
+ switch (type & IRQ_TYPE_SENSE_MASK) {
+ case IRQ_TYPE_EDGE_BOTH:
+ val = readl(gs->gpio_io + MLXBF_GPIO_CAUSE_FALL_EN);
+ val |= BIT(offset);
+ writel(val, gs->gpio_io + MLXBF_GPIO_CAUSE_FALL_EN);
+ val = readl(gs->gpio_io + MLXBF_GPIO_CAUSE_RISE_EN);
+ val |= BIT(offset);
+ writel(val, gs->gpio_io + MLXBF_GPIO_CAUSE_RISE_EN);
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ val = readl(gs->gpio_io + MLXBF_GPIO_CAUSE_RISE_EN);
+ val |= BIT(offset);
+ writel(val, gs->gpio_io + MLXBF_GPIO_CAUSE_RISE_EN);
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ val = readl(gs->gpio_io + MLXBF_GPIO_CAUSE_FALL_EN);
+ val |= BIT(offset);
+ writel(val, gs->gpio_io + MLXBF_GPIO_CAUSE_FALL_EN);
+ break;
+ default:
+ return -EINVAL;
+ }
}
- raw_spin_unlock_irqrestore(&gs->gc.bgpio_lock, flags);
-
irq_set_handler_locked(irqd, handle_edge_irq);
return 0;
@@ -186,13 +181,12 @@ static int mlxbf3_gpio_add_pin_ranges(struct gpio_chip *chip)
static int mlxbf3_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct device *dev = &pdev->dev;
struct mlxbf3_gpio_context *gs;
struct gpio_irq_chip *girq;
struct gpio_chip *gc;
- char *colon_ptr;
int ret, irq;
- long num;
gs = devm_kzalloc(dev, sizeof(*gs), GFP_KERNEL);
if (!gs)
@@ -213,60 +207,53 @@ static int mlxbf3_gpio_probe(struct platform_device *pdev)
gs->gpio_clr_io = devm_platform_ioremap_resource(pdev, 3);
if (IS_ERR(gs->gpio_clr_io))
return PTR_ERR(gs->gpio_clr_io);
- gc = &gs->gc;
-
- ret = bgpio_init(gc, dev, 4,
- gs->gpio_io + MLXBF_GPIO_READ_DATA_IN,
- gs->gpio_set_io + MLXBF_GPIO_FW_DATA_OUT_SET,
- gs->gpio_clr_io + MLXBF_GPIO_FW_DATA_OUT_CLEAR,
- gs->gpio_set_io + MLXBF_GPIO_FW_OUTPUT_ENABLE_SET,
- gs->gpio_clr_io + MLXBF_GPIO_FW_OUTPUT_ENABLE_CLEAR, 0);
+ gc = &gs->chip.gc;
+
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = gs->gpio_io + MLXBF_GPIO_READ_DATA_IN,
+ .set = gs->gpio_set_io + MLXBF_GPIO_FW_DATA_OUT_SET,
+ .clr = gs->gpio_clr_io + MLXBF_GPIO_FW_DATA_OUT_CLEAR,
+ .dirout = gs->gpio_set_io + MLXBF_GPIO_FW_OUTPUT_ENABLE_SET,
+ .dirin = gs->gpio_clr_io + MLXBF_GPIO_FW_OUTPUT_ENABLE_CLEAR,
+ };
+
+ ret = gpio_generic_chip_init(&gs->chip, &config);
if (ret)
- return dev_err_probe(dev, ret, "%s: bgpio_init() failed", __func__);
+ return dev_err_probe(dev, ret,
+ "%s: failed to initialize the generic GPIO chip",
+ __func__);
gc->request = gpiochip_generic_request;
gc->free = gpiochip_generic_free;
gc->owner = THIS_MODULE;
gc->add_pin_ranges = mlxbf3_gpio_add_pin_ranges;
- colon_ptr = strchr(dev_name(dev), ':');
- if (!colon_ptr) {
- dev_err(dev, "invalid device name format\n");
- return -EINVAL;
- }
-
- ret = kstrtol(++colon_ptr, 16, &num);
- if (ret) {
- dev_err(dev, "invalid device instance\n");
- return ret;
- }
-
- if (!num) {
- irq = platform_get_irq(pdev, 0);
- if (irq >= 0) {
- girq = &gs->gc.irq;
- gpio_irq_chip_set_chip(girq, &gpio_mlxbf3_irqchip);
- girq->default_type = IRQ_TYPE_NONE;
- /* This will let us handle the parent IRQ in the driver */
- girq->num_parents = 0;
- girq->parents = NULL;
- girq->parent_handler = NULL;
- girq->handler = handle_bad_irq;
-
- /*
- * Directly request the irq here instead of passing
- * a flow-handler because the irq is shared.
- */
- ret = devm_request_irq(dev, irq, mlxbf3_gpio_irq_handler,
- IRQF_SHARED, dev_name(dev), gs);
- if (ret)
- return dev_err_probe(dev, ret, "failed to request IRQ");
- }
+ irq = platform_get_irq_optional(pdev, 0);
+ if (irq >= 0) {
+ girq = &gs->chip.gc.irq;
+ gpio_irq_chip_set_chip(girq, &gpio_mlxbf3_irqchip);
+ girq->default_type = IRQ_TYPE_NONE;
+ /* This will let us handle the parent IRQ in the driver */
+ girq->num_parents = 0;
+ girq->parents = NULL;
+ girq->parent_handler = NULL;
+ girq->handler = handle_bad_irq;
+
+ /*
+ * Directly request the irq here instead of passing
+ * a flow-handler because the irq is shared.
+ */
+ ret = devm_request_irq(dev, irq, mlxbf3_gpio_irq_handler,
+ IRQF_SHARED, dev_name(dev), gs);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to request IRQ");
}
platform_set_drvdata(pdev, gs);
- ret = devm_gpiochip_add_data(dev, &gs->gc, gs);
+ ret = devm_gpiochip_add_data(dev, gc, gs);
if (ret)
dev_err_probe(dev, ret, "Failed adding memory mapped gpiochip\n");
diff --git a/drivers/gpio/gpio-mm-lantiq.c b/drivers/gpio/gpio-mm-lantiq.c
index 897a1e004681..8f1405733d98 100644
--- a/drivers/gpio/gpio-mm-lantiq.c
+++ b/drivers/gpio/gpio-mm-lantiq.c
@@ -111,7 +111,7 @@ static int ltq_mm_probe(struct platform_device *pdev)
chip->mmchip.gc.ngpio = 16;
chip->mmchip.gc.direction_output = ltq_mm_dir_out;
- chip->mmchip.gc.set_rv = ltq_mm_set;
+ chip->mmchip.gc.set = ltq_mm_set;
chip->mmchip.save_regs = ltq_mm_save_regs;
/* store the shadow value if one was passed by the devicetree */
diff --git a/drivers/gpio/gpio-mmio.c b/drivers/gpio/gpio-mmio.c
index cf878c2ea6bf..7d6dd36cf1ae 100644
--- a/drivers/gpio/gpio-mmio.c
+++ b/drivers/gpio/gpio-mmio.c
@@ -57,6 +57,7 @@ o ` ~~~~\___/~~~~ ` controller in FPGA is ,.`
#include <linux/types.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include "gpiolib.h"
@@ -124,20 +125,23 @@ static unsigned long bgpio_read32be(void __iomem *reg)
static unsigned long bgpio_line2mask(struct gpio_chip *gc, unsigned int line)
{
- if (gc->be_bits)
- return BIT(gc->bgpio_bits - 1 - line);
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
+
+ if (chip->be_bits)
+ return BIT(chip->bits - 1 - line);
return BIT(line);
}
static int bgpio_get_set(struct gpio_chip *gc, unsigned int gpio)
{
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
unsigned long pinmask = bgpio_line2mask(gc, gpio);
- bool dir = !!(gc->bgpio_dir & pinmask);
+ bool dir = !!(chip->sdir & pinmask);
if (dir)
- return !!(gc->read_reg(gc->reg_set) & pinmask);
- else
- return !!(gc->read_reg(gc->reg_dat) & pinmask);
+ return !!(chip->read_reg(chip->reg_set) & pinmask);
+
+ return !!(chip->read_reg(chip->reg_dat) & pinmask);
}
/*
@@ -147,26 +151,28 @@ static int bgpio_get_set(struct gpio_chip *gc, unsigned int gpio)
static int bgpio_get_set_multiple(struct gpio_chip *gc, unsigned long *mask,
unsigned long *bits)
{
- unsigned long get_mask = 0;
- unsigned long set_mask = 0;
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
+ unsigned long get_mask = 0, set_mask = 0;
/* Make sure we first clear any bits that are zero when we read the register */
*bits &= ~*mask;
- set_mask = *mask & gc->bgpio_dir;
- get_mask = *mask & ~gc->bgpio_dir;
+ set_mask = *mask & chip->sdir;
+ get_mask = *mask & ~chip->sdir;
if (set_mask)
- *bits |= gc->read_reg(gc->reg_set) & set_mask;
+ *bits |= chip->read_reg(chip->reg_set) & set_mask;
if (get_mask)
- *bits |= gc->read_reg(gc->reg_dat) & get_mask;
+ *bits |= chip->read_reg(chip->reg_dat) & get_mask;
return 0;
}
static int bgpio_get(struct gpio_chip *gc, unsigned int gpio)
{
- return !!(gc->read_reg(gc->reg_dat) & bgpio_line2mask(gc, gpio));
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
+
+ return !!(chip->read_reg(chip->reg_dat) & bgpio_line2mask(gc, gpio));
}
/*
@@ -175,9 +181,11 @@ static int bgpio_get(struct gpio_chip *gc, unsigned int gpio)
static int bgpio_get_multiple(struct gpio_chip *gc, unsigned long *mask,
unsigned long *bits)
{
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
+
/* Make sure we first clear any bits that are zero when we read the register */
*bits &= ~*mask;
- *bits |= gc->read_reg(gc->reg_dat) & *mask;
+ *bits |= chip->read_reg(chip->reg_dat) & *mask;
return 0;
}
@@ -187,6 +195,7 @@ static int bgpio_get_multiple(struct gpio_chip *gc, unsigned long *mask,
static int bgpio_get_multiple_be(struct gpio_chip *gc, unsigned long *mask,
unsigned long *bits)
{
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
unsigned long readmask = 0;
unsigned long val;
int bit;
@@ -199,7 +208,7 @@ static int bgpio_get_multiple_be(struct gpio_chip *gc, unsigned long *mask,
readmask |= bgpio_line2mask(gc, bit);
/* Read the register */
- val = gc->read_reg(gc->reg_dat) & readmask;
+ val = chip->read_reg(chip->reg_dat) & readmask;
/*
* Mirror the result into the "bits" result, this will give line 0
@@ -218,19 +227,20 @@ static int bgpio_set_none(struct gpio_chip *gc, unsigned int gpio, int val)
static int bgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
unsigned long mask = bgpio_line2mask(gc, gpio);
unsigned long flags;
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ raw_spin_lock_irqsave(&chip->lock, flags);
if (val)
- gc->bgpio_data |= mask;
+ chip->sdata |= mask;
else
- gc->bgpio_data &= ~mask;
+ chip->sdata &= ~mask;
- gc->write_reg(gc->reg_dat, gc->bgpio_data);
+ chip->write_reg(chip->reg_dat, chip->sdata);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+ raw_spin_unlock_irqrestore(&chip->lock, flags);
return 0;
}
@@ -238,31 +248,32 @@ static int bgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
static int bgpio_set_with_clear(struct gpio_chip *gc, unsigned int gpio,
int val)
{
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
unsigned long mask = bgpio_line2mask(gc, gpio);
if (val)
- gc->write_reg(gc->reg_set, mask);
+ chip->write_reg(chip->reg_set, mask);
else
- gc->write_reg(gc->reg_clr, mask);
+ chip->write_reg(chip->reg_clr, mask);
return 0;
}
static int bgpio_set_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
- unsigned long mask = bgpio_line2mask(gc, gpio);
- unsigned long flags;
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
+ unsigned long mask = bgpio_line2mask(gc, gpio), flags;
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ raw_spin_lock_irqsave(&chip->lock, flags);
if (val)
- gc->bgpio_data |= mask;
+ chip->sdata |= mask;
else
- gc->bgpio_data &= ~mask;
+ chip->sdata &= ~mask;
- gc->write_reg(gc->reg_set, gc->bgpio_data);
+ chip->write_reg(chip->reg_set, chip->sdata);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+ raw_spin_unlock_irqrestore(&chip->lock, flags);
return 0;
}
@@ -272,12 +283,13 @@ static void bgpio_multiple_get_masks(struct gpio_chip *gc,
unsigned long *set_mask,
unsigned long *clear_mask)
{
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
int i;
*set_mask = 0;
*clear_mask = 0;
- for_each_set_bit(i, mask, gc->bgpio_bits) {
+ for_each_set_bit(i, mask, chip->bits) {
if (test_bit(i, bits))
*set_mask |= bgpio_line2mask(gc, i);
else
@@ -290,25 +302,27 @@ static void bgpio_set_multiple_single_reg(struct gpio_chip *gc,
unsigned long *bits,
void __iomem *reg)
{
- unsigned long flags;
- unsigned long set_mask, clear_mask;
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
+ unsigned long flags, set_mask, clear_mask;
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ raw_spin_lock_irqsave(&chip->lock, flags);
bgpio_multiple_get_masks(gc, mask, bits, &set_mask, &clear_mask);
- gc->bgpio_data |= set_mask;
- gc->bgpio_data &= ~clear_mask;
+ chip->sdata |= set_mask;
+ chip->sdata &= ~clear_mask;
- gc->write_reg(reg, gc->bgpio_data);
+ chip->write_reg(reg, chip->sdata);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+ raw_spin_unlock_irqrestore(&chip->lock, flags);
}
static int bgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
unsigned long *bits)
{
- bgpio_set_multiple_single_reg(gc, mask, bits, gc->reg_dat);
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
+
+ bgpio_set_multiple_single_reg(gc, mask, bits, chip->reg_dat);
return 0;
}
@@ -316,7 +330,9 @@ static int bgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
static int bgpio_set_multiple_set(struct gpio_chip *gc, unsigned long *mask,
unsigned long *bits)
{
- bgpio_set_multiple_single_reg(gc, mask, bits, gc->reg_set);
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
+
+ bgpio_set_multiple_single_reg(gc, mask, bits, chip->reg_set);
return 0;
}
@@ -325,21 +341,24 @@ static int bgpio_set_multiple_with_clear(struct gpio_chip *gc,
unsigned long *mask,
unsigned long *bits)
{
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
unsigned long set_mask, clear_mask;
bgpio_multiple_get_masks(gc, mask, bits, &set_mask, &clear_mask);
if (set_mask)
- gc->write_reg(gc->reg_set, set_mask);
+ chip->write_reg(chip->reg_set, set_mask);
if (clear_mask)
- gc->write_reg(gc->reg_clr, clear_mask);
+ chip->write_reg(chip->reg_clr, clear_mask);
return 0;
}
static int bgpio_dir_return(struct gpio_chip *gc, unsigned int gpio, bool dir_out)
{
- if (!gc->bgpio_pinctrl)
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
+
+ if (!chip->pinctrl)
return 0;
if (dir_out)
@@ -367,46 +386,49 @@ static int bgpio_dir_out_err(struct gpio_chip *gc, unsigned int gpio,
static int bgpio_simple_dir_out(struct gpio_chip *gc, unsigned int gpio,
int val)
{
- gc->set_rv(gc, gpio, val);
+ gc->set(gc, gpio, val);
return bgpio_dir_return(gc, gpio, true);
}
static int bgpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
{
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
unsigned long flags;
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ raw_spin_lock_irqsave(&chip->lock, flags);
- gc->bgpio_dir &= ~bgpio_line2mask(gc, gpio);
+ chip->sdir &= ~bgpio_line2mask(gc, gpio);
- if (gc->reg_dir_in)
- gc->write_reg(gc->reg_dir_in, ~gc->bgpio_dir);
- if (gc->reg_dir_out)
- gc->write_reg(gc->reg_dir_out, gc->bgpio_dir);
+ if (chip->reg_dir_in)
+ chip->write_reg(chip->reg_dir_in, ~chip->sdir);
+ if (chip->reg_dir_out)
+ chip->write_reg(chip->reg_dir_out, chip->sdir);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+ raw_spin_unlock_irqrestore(&chip->lock, flags);
return bgpio_dir_return(gc, gpio, false);
}
static int bgpio_get_dir(struct gpio_chip *gc, unsigned int gpio)
{
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
+
/* Return 0 if output, 1 if input */
- if (gc->bgpio_dir_unreadable) {
- if (gc->bgpio_dir & bgpio_line2mask(gc, gpio))
+ if (chip->dir_unreadable) {
+ if (chip->sdir & bgpio_line2mask(gc, gpio))
return GPIO_LINE_DIRECTION_OUT;
return GPIO_LINE_DIRECTION_IN;
}
- if (gc->reg_dir_out) {
- if (gc->read_reg(gc->reg_dir_out) & bgpio_line2mask(gc, gpio))
+ if (chip->reg_dir_out) {
+ if (chip->read_reg(chip->reg_dir_out) & bgpio_line2mask(gc, gpio))
return GPIO_LINE_DIRECTION_OUT;
return GPIO_LINE_DIRECTION_IN;
}
- if (gc->reg_dir_in)
- if (!(gc->read_reg(gc->reg_dir_in) & bgpio_line2mask(gc, gpio)))
+ if (chip->reg_dir_in)
+ if (!(chip->read_reg(chip->reg_dir_in) & bgpio_line2mask(gc, gpio)))
return GPIO_LINE_DIRECTION_OUT;
return GPIO_LINE_DIRECTION_IN;
@@ -414,62 +436,62 @@ static int bgpio_get_dir(struct gpio_chip *gc, unsigned int gpio)
static void bgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
{
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
unsigned long flags;
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ raw_spin_lock_irqsave(&chip->lock, flags);
- gc->bgpio_dir |= bgpio_line2mask(gc, gpio);
+ chip->sdir |= bgpio_line2mask(gc, gpio);
- if (gc->reg_dir_in)
- gc->write_reg(gc->reg_dir_in, ~gc->bgpio_dir);
- if (gc->reg_dir_out)
- gc->write_reg(gc->reg_dir_out, gc->bgpio_dir);
+ if (chip->reg_dir_in)
+ chip->write_reg(chip->reg_dir_in, ~chip->sdir);
+ if (chip->reg_dir_out)
+ chip->write_reg(chip->reg_dir_out, chip->sdir);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+ raw_spin_unlock_irqrestore(&chip->lock, flags);
}
static int bgpio_dir_out_dir_first(struct gpio_chip *gc, unsigned int gpio,
int val)
{
bgpio_dir_out(gc, gpio, val);
- gc->set_rv(gc, gpio, val);
+ gc->set(gc, gpio, val);
return bgpio_dir_return(gc, gpio, true);
}
static int bgpio_dir_out_val_first(struct gpio_chip *gc, unsigned int gpio,
int val)
{
- gc->set_rv(gc, gpio, val);
+ gc->set(gc, gpio, val);
bgpio_dir_out(gc, gpio, val);
return bgpio_dir_return(gc, gpio, true);
}
static int bgpio_setup_accessors(struct device *dev,
- struct gpio_chip *gc,
+ struct gpio_generic_chip *chip,
bool byte_be)
{
-
- switch (gc->bgpio_bits) {
+ switch (chip->bits) {
case 8:
- gc->read_reg = bgpio_read8;
- gc->write_reg = bgpio_write8;
+ chip->read_reg = bgpio_read8;
+ chip->write_reg = bgpio_write8;
break;
case 16:
if (byte_be) {
- gc->read_reg = bgpio_read16be;
- gc->write_reg = bgpio_write16be;
+ chip->read_reg = bgpio_read16be;
+ chip->write_reg = bgpio_write16be;
} else {
- gc->read_reg = bgpio_read16;
- gc->write_reg = bgpio_write16;
+ chip->read_reg = bgpio_read16;
+ chip->write_reg = bgpio_write16;
}
break;
case 32:
if (byte_be) {
- gc->read_reg = bgpio_read32be;
- gc->write_reg = bgpio_write32be;
+ chip->read_reg = bgpio_read32be;
+ chip->write_reg = bgpio_write32be;
} else {
- gc->read_reg = bgpio_read32;
- gc->write_reg = bgpio_write32;
+ chip->read_reg = bgpio_read32;
+ chip->write_reg = bgpio_write32;
}
break;
#if BITS_PER_LONG >= 64
@@ -479,13 +501,13 @@ static int bgpio_setup_accessors(struct device *dev,
"64 bit big endian byte order unsupported\n");
return -EINVAL;
} else {
- gc->read_reg = bgpio_read64;
- gc->write_reg = bgpio_write64;
+ chip->read_reg = bgpio_read64;
+ chip->write_reg = bgpio_write64;
}
break;
#endif /* BITS_PER_LONG >= 64 */
default:
- dev_err(dev, "unsupported data width %u bits\n", gc->bgpio_bits);
+ dev_err(dev, "unsupported data width %u bits\n", chip->bits);
return -EINVAL;
}
@@ -514,38 +536,36 @@ static int bgpio_setup_accessors(struct device *dev,
* - an input direction register (named "dirin") where a 1 bit indicates
* the GPIO is an input.
*/
-static int bgpio_setup_io(struct gpio_chip *gc,
- void __iomem *dat,
- void __iomem *set,
- void __iomem *clr,
- unsigned long flags)
+static int bgpio_setup_io(struct gpio_generic_chip *chip,
+ const struct gpio_generic_chip_config *cfg)
{
+ struct gpio_chip *gc = &chip->gc;
- gc->reg_dat = dat;
- if (!gc->reg_dat)
+ chip->reg_dat = cfg->dat;
+ if (!chip->reg_dat)
return -EINVAL;
- if (set && clr) {
- gc->reg_set = set;
- gc->reg_clr = clr;
- gc->set_rv = bgpio_set_with_clear;
- gc->set_multiple_rv = bgpio_set_multiple_with_clear;
- } else if (set && !clr) {
- gc->reg_set = set;
- gc->set_rv = bgpio_set_set;
- gc->set_multiple_rv = bgpio_set_multiple_set;
- } else if (flags & BGPIOF_NO_OUTPUT) {
- gc->set_rv = bgpio_set_none;
- gc->set_multiple_rv = NULL;
+ if (cfg->set && cfg->clr) {
+ chip->reg_set = cfg->set;
+ chip->reg_clr = cfg->clr;
+ gc->set = bgpio_set_with_clear;
+ gc->set_multiple = bgpio_set_multiple_with_clear;
+ } else if (cfg->set && !cfg->clr) {
+ chip->reg_set = cfg->set;
+ gc->set = bgpio_set_set;
+ gc->set_multiple = bgpio_set_multiple_set;
+ } else if (cfg->flags & GPIO_GENERIC_NO_OUTPUT) {
+ gc->set = bgpio_set_none;
+ gc->set_multiple = NULL;
} else {
- gc->set_rv = bgpio_set;
- gc->set_multiple_rv = bgpio_set_multiple;
+ gc->set = bgpio_set;
+ gc->set_multiple = bgpio_set_multiple;
}
- if (!(flags & BGPIOF_UNREADABLE_REG_SET) &&
- (flags & BGPIOF_READ_OUTPUT_REG_SET)) {
+ if (!(cfg->flags & GPIO_GENERIC_UNREADABLE_REG_SET) &&
+ (cfg->flags & GPIO_GENERIC_READ_OUTPUT_REG_SET)) {
gc->get = bgpio_get_set;
- if (!gc->be_bits)
+ if (!chip->be_bits)
gc->get_multiple = bgpio_get_set_multiple;
/*
* We deliberately avoid assigning the ->get_multiple() call
@@ -556,7 +576,7 @@ static int bgpio_setup_io(struct gpio_chip *gc,
*/
} else {
gc->get = bgpio_get;
- if (gc->be_bits)
+ if (chip->be_bits)
gc->get_multiple = bgpio_get_multiple_be;
else
gc->get_multiple = bgpio_get_multiple;
@@ -565,27 +585,27 @@ static int bgpio_setup_io(struct gpio_chip *gc,
return 0;
}
-static int bgpio_setup_direction(struct gpio_chip *gc,
- void __iomem *dirout,
- void __iomem *dirin,
- unsigned long flags)
+static int bgpio_setup_direction(struct gpio_generic_chip *chip,
+ const struct gpio_generic_chip_config *cfg)
{
- if (dirout || dirin) {
- gc->reg_dir_out = dirout;
- gc->reg_dir_in = dirin;
- if (flags & BGPIOF_NO_SET_ON_INPUT)
+ struct gpio_chip *gc = &chip->gc;
+
+ if (cfg->dirout || cfg->dirin) {
+ chip->reg_dir_out = cfg->dirout;
+ chip->reg_dir_in = cfg->dirin;
+ if (cfg->flags & GPIO_GENERIC_NO_SET_ON_INPUT)
gc->direction_output = bgpio_dir_out_dir_first;
else
gc->direction_output = bgpio_dir_out_val_first;
gc->direction_input = bgpio_dir_in;
gc->get_direction = bgpio_get_dir;
} else {
- if (flags & BGPIOF_NO_OUTPUT)
+ if (cfg->flags & GPIO_GENERIC_NO_OUTPUT)
gc->direction_output = bgpio_dir_out_err;
else
gc->direction_output = bgpio_simple_dir_out;
- if (flags & BGPIOF_NO_INPUT)
+ if (cfg->flags & GPIO_GENERIC_NO_INPUT)
gc->direction_input = bgpio_dir_in_err;
else
gc->direction_input = bgpio_simple_dir_in;
@@ -594,117 +614,101 @@ static int bgpio_setup_direction(struct gpio_chip *gc,
return 0;
}
-static int bgpio_request(struct gpio_chip *chip, unsigned gpio_pin)
+static int bgpio_request(struct gpio_chip *gc, unsigned int gpio_pin)
{
- if (gpio_pin >= chip->ngpio)
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
+
+ if (gpio_pin >= gc->ngpio)
return -EINVAL;
- if (chip->bgpio_pinctrl)
- return gpiochip_generic_request(chip, gpio_pin);
+ if (chip->pinctrl)
+ return gpiochip_generic_request(gc, gpio_pin);
return 0;
}
/**
- * bgpio_init() - Initialize generic GPIO accessor functions
- * @gc: the GPIO chip to set up
- * @dev: the parent device of the new GPIO chip (compulsory)
- * @sz: the size (width) of the MMIO registers in bytes, typically 1, 2 or 4
- * @dat: MMIO address for the register to READ the value of the GPIO lines, it
- * is expected that a 1 in the corresponding bit in this register means the
- * line is asserted
- * @set: MMIO address for the register to SET the value of the GPIO lines, it is
- * expected that we write the line with 1 in this register to drive the GPIO line
- * high.
- * @clr: MMIO address for the register to CLEAR the value of the GPIO lines, it is
- * expected that we write the line with 1 in this register to drive the GPIO line
- * low. It is allowed to leave this address as NULL, in that case the SET register
- * will be assumed to also clear the GPIO lines, by actively writing the line
- * with 0.
- * @dirout: MMIO address for the register to set the line as OUTPUT. It is assumed
- * that setting a line to 1 in this register will turn that line into an
- * output line. Conversely, setting the line to 0 will turn that line into
- * an input.
- * @dirin: MMIO address for the register to set this line as INPUT. It is assumed
- * that setting a line to 1 in this register will turn that line into an
- * input line. Conversely, setting the line to 0 will turn that line into
- * an output.
- * @flags: Different flags that will affect the behaviour of the device, such as
- * endianness etc.
+ * gpio_generic_chip_init() - Initialize a generic GPIO chip.
+ * @chip: Generic GPIO chip to set up.
+ * @cfg: Generic GPIO chip configuration.
+ *
+ * Returns 0 on success, negative error number on failure.
*/
-int bgpio_init(struct gpio_chip *gc, struct device *dev,
- unsigned long sz, void __iomem *dat, void __iomem *set,
- void __iomem *clr, void __iomem *dirout, void __iomem *dirin,
- unsigned long flags)
+int gpio_generic_chip_init(struct gpio_generic_chip *chip,
+ const struct gpio_generic_chip_config *cfg)
{
+ struct gpio_chip *gc = &chip->gc;
+ unsigned long flags = cfg->flags;
+ struct device *dev = cfg->dev;
int ret;
- if (!is_power_of_2(sz))
+ if (!is_power_of_2(cfg->sz))
return -EINVAL;
- gc->bgpio_bits = sz * 8;
- if (gc->bgpio_bits > BITS_PER_LONG)
+ chip->bits = cfg->sz * 8;
+ if (chip->bits > BITS_PER_LONG)
return -EINVAL;
- raw_spin_lock_init(&gc->bgpio_lock);
+ raw_spin_lock_init(&chip->lock);
gc->parent = dev;
gc->label = dev_name(dev);
gc->base = -1;
gc->request = bgpio_request;
- gc->be_bits = !!(flags & BGPIOF_BIG_ENDIAN);
+ chip->be_bits = !!(flags & GPIO_GENERIC_BIG_ENDIAN);
ret = gpiochip_get_ngpios(gc, dev);
if (ret)
- gc->ngpio = gc->bgpio_bits;
+ gc->ngpio = chip->bits;
- ret = bgpio_setup_io(gc, dat, set, clr, flags);
+ ret = bgpio_setup_io(chip, cfg);
if (ret)
return ret;
- ret = bgpio_setup_accessors(dev, gc, flags & BGPIOF_BIG_ENDIAN_BYTE_ORDER);
+ ret = bgpio_setup_accessors(dev, chip,
+ flags & GPIO_GENERIC_BIG_ENDIAN_BYTE_ORDER);
if (ret)
return ret;
- ret = bgpio_setup_direction(gc, dirout, dirin, flags);
+ ret = bgpio_setup_direction(chip, cfg);
if (ret)
return ret;
- if (flags & BGPIOF_PINCTRL_BACKEND) {
- gc->bgpio_pinctrl = true;
+ if (flags & GPIO_GENERIC_PINCTRL_BACKEND) {
+ chip->pinctrl = true;
/* Currently this callback is only used for pincontrol */
gc->free = gpiochip_generic_free;
}
- gc->bgpio_data = gc->read_reg(gc->reg_dat);
- if (gc->set_rv == bgpio_set_set &&
- !(flags & BGPIOF_UNREADABLE_REG_SET))
- gc->bgpio_data = gc->read_reg(gc->reg_set);
+ chip->sdata = chip->read_reg(chip->reg_dat);
+ if (gc->set == bgpio_set_set &&
+ !(flags & GPIO_GENERIC_UNREADABLE_REG_SET))
+ chip->sdata = chip->read_reg(chip->reg_set);
- if (flags & BGPIOF_UNREADABLE_REG_DIR)
- gc->bgpio_dir_unreadable = true;
+ if (flags & GPIO_GENERIC_UNREADABLE_REG_DIR)
+ chip->dir_unreadable = true;
/*
* Inspect hardware to find initial direction setting.
*/
- if ((gc->reg_dir_out || gc->reg_dir_in) &&
- !(flags & BGPIOF_UNREADABLE_REG_DIR)) {
- if (gc->reg_dir_out)
- gc->bgpio_dir = gc->read_reg(gc->reg_dir_out);
- else if (gc->reg_dir_in)
- gc->bgpio_dir = ~gc->read_reg(gc->reg_dir_in);
+ if ((chip->reg_dir_out || chip->reg_dir_in) &&
+ !(flags & GPIO_GENERIC_UNREADABLE_REG_DIR)) {
+ if (chip->reg_dir_out)
+ chip->sdir = chip->read_reg(chip->reg_dir_out);
+ else if (chip->reg_dir_in)
+ chip->sdir = ~chip->read_reg(chip->reg_dir_in);
/*
* If we have two direction registers, synchronise
* input setting to output setting, the library
* can not handle a line being input and output at
* the same time.
*/
- if (gc->reg_dir_out && gc->reg_dir_in)
- gc->write_reg(gc->reg_dir_in, ~gc->bgpio_dir);
+ if (chip->reg_dir_out && chip->reg_dir_in)
+ chip->write_reg(chip->reg_dir_in, ~chip->sdir);
}
return ret;
}
-EXPORT_SYMBOL_GPL(bgpio_init);
+EXPORT_SYMBOL_GPL(gpio_generic_chip_init);
#if IS_ENABLED(CONFIG_GPIO_GENERIC_PLATFORM)
@@ -730,12 +734,15 @@ static const struct of_device_id bgpio_of_match[] = {
{ .compatible = "brcm,bcm6345-gpio" },
{ .compatible = "wd,mbl-gpio" },
{ .compatible = "ni,169445-nand-gpio" },
+ { .compatible = "intel,ixp4xx-expansion-bus-mmio-gpio" },
{ }
};
MODULE_DEVICE_TABLE(of, bgpio_of_match);
static int bgpio_pdev_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
+ struct gpio_generic_chip *gen_gc;
struct device *dev = &pdev->dev;
struct resource *r;
void __iomem *dat;
@@ -747,7 +754,6 @@ static int bgpio_pdev_probe(struct platform_device *pdev)
unsigned long flags = 0;
unsigned int base;
int err;
- struct gpio_chip *gc;
const char *label;
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dat");
@@ -776,23 +782,34 @@ static int bgpio_pdev_probe(struct platform_device *pdev)
if (IS_ERR(dirin))
return PTR_ERR(dirin);
- gc = devm_kzalloc(&pdev->dev, sizeof(*gc), GFP_KERNEL);
- if (!gc)
+ gen_gc = devm_kzalloc(&pdev->dev, sizeof(*gen_gc), GFP_KERNEL);
+ if (!gen_gc)
return -ENOMEM;
if (device_is_big_endian(dev))
- flags |= BGPIOF_BIG_ENDIAN_BYTE_ORDER;
+ flags |= GPIO_GENERIC_BIG_ENDIAN_BYTE_ORDER;
if (device_property_read_bool(dev, "no-output"))
- flags |= BGPIOF_NO_OUTPUT;
-
- err = bgpio_init(gc, dev, sz, dat, set, clr, dirout, dirin, flags);
+ flags |= GPIO_GENERIC_NO_OUTPUT;
+
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = sz,
+ .dat = dat,
+ .set = set,
+ .clr = clr,
+ .dirout = dirout,
+ .dirin = dirin,
+ .flags = flags,
+ };
+
+ err = gpio_generic_chip_init(gen_gc, &config);
if (err)
return err;
err = device_property_read_string(dev, "label", &label);
if (!err)
- gc->label = label;
+ gen_gc->gc.label = label;
/*
* This property *must not* be used in device-tree sources, it's only
@@ -800,11 +817,11 @@ static int bgpio_pdev_probe(struct platform_device *pdev)
*/
err = device_property_read_u32(dev, "gpio-mmio,base", &base);
if (!err && base <= INT_MAX)
- gc->base = base;
+ gen_gc->gc.base = base;
- platform_set_drvdata(pdev, gc);
+ platform_set_drvdata(pdev, &gen_gc->gc);
- return devm_gpiochip_add_data(&pdev->dev, gc, NULL);
+ return devm_gpiochip_add_data(&pdev->dev, &gen_gc->gc, NULL);
}
static const struct platform_device_id bgpio_id_table[] = {
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index 266c0953d914..a7d69f3835c1 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -449,9 +449,9 @@ static int gpio_mockup_probe(struct platform_device *pdev)
gc->owner = THIS_MODULE;
gc->parent = dev;
gc->get = gpio_mockup_get;
- gc->set_rv = gpio_mockup_set;
+ gc->set = gpio_mockup_set;
gc->get_multiple = gpio_mockup_get_multiple;
- gc->set_multiple_rv = gpio_mockup_set_multiple;
+ gc->set_multiple = gpio_mockup_set_multiple;
gc->direction_output = gpio_mockup_dirout;
gc->direction_input = gpio_mockup_dirin;
gc->get_direction = gpio_mockup_get_direction;
diff --git a/drivers/gpio/gpio-moxtet.c b/drivers/gpio/gpio-moxtet.c
index 27dd9c3e7b77..4eb9f1a2779b 100644
--- a/drivers/gpio/gpio-moxtet.c
+++ b/drivers/gpio/gpio-moxtet.c
@@ -140,7 +140,7 @@ static int moxtet_gpio_probe(struct device *dev)
chip->gpio_chip.direction_input = moxtet_gpio_direction_input;
chip->gpio_chip.direction_output = moxtet_gpio_direction_output;
chip->gpio_chip.get = moxtet_gpio_get_value;
- chip->gpio_chip.set_rv = moxtet_gpio_set_value;
+ chip->gpio_chip.set = moxtet_gpio_set_value;
chip->gpio_chip.base = -1;
chip->gpio_chip.ngpio = MOXTET_GPIO_NGPIOS;
diff --git a/drivers/gpio/gpio-mpc5200.c b/drivers/gpio/gpio-mpc5200.c
index 40d587176a75..00f209157fd0 100644
--- a/drivers/gpio/gpio-mpc5200.c
+++ b/drivers/gpio/gpio-mpc5200.c
@@ -8,7 +8,7 @@
#include <linux/of.h>
#include <linux/kernel.h>
#include <linux/slab.h>
-#include <linux/gpio/legacy-of-mm-gpiochip.h>
+#include <linux/gpio/driver.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/module.h>
@@ -19,7 +19,8 @@
static DEFINE_SPINLOCK(gpio_lock);
struct mpc52xx_gpiochip {
- struct of_mm_gpio_chip mmchip;
+ struct gpio_chip gc;
+ void __iomem *regs;
unsigned int shadow_dvo;
unsigned int shadow_gpioe;
unsigned int shadow_ddr;
@@ -43,8 +44,8 @@ struct mpc52xx_gpiochip {
*/
static int mpc52xx_wkup_gpio_get(struct gpio_chip *gc, unsigned int gpio)
{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
- struct mpc52xx_gpio_wkup __iomem *regs = mm_gc->regs;
+ struct mpc52xx_gpiochip *chip = gpiochip_get_data(gc);
+ struct mpc52xx_gpio_wkup __iomem *regs = chip->regs;
unsigned int ret;
ret = (in_8(&regs->wkup_ival) >> (7 - gpio)) & 1;
@@ -57,9 +58,8 @@ static int mpc52xx_wkup_gpio_get(struct gpio_chip *gc, unsigned int gpio)
static inline void
__mpc52xx_wkup_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct mpc52xx_gpiochip *chip = gpiochip_get_data(gc);
- struct mpc52xx_gpio_wkup __iomem *regs = mm_gc->regs;
+ struct mpc52xx_gpio_wkup __iomem *regs = chip->regs;
if (val)
chip->shadow_dvo |= 1 << (7 - gpio);
@@ -87,9 +87,8 @@ mpc52xx_wkup_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
static int mpc52xx_wkup_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct mpc52xx_gpiochip *chip = gpiochip_get_data(gc);
- struct mpc52xx_gpio_wkup __iomem *regs = mm_gc->regs;
+ struct mpc52xx_gpio_wkup __iomem *regs = chip->regs;
unsigned long flags;
spin_lock_irqsave(&gpio_lock, flags);
@@ -110,9 +109,8 @@ static int mpc52xx_wkup_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
static int
mpc52xx_wkup_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
- struct mpc52xx_gpio_wkup __iomem *regs = mm_gc->regs;
struct mpc52xx_gpiochip *chip = gpiochip_get_data(gc);
+ struct mpc52xx_gpio_wkup __iomem *regs = chip->regs;
unsigned long flags;
spin_lock_irqsave(&gpio_lock, flags);
@@ -136,30 +134,41 @@ mpc52xx_wkup_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
static int mpc52xx_wkup_gpiochip_probe(struct platform_device *ofdev)
{
+ struct device *dev = &ofdev->dev;
+ struct device_node *np = dev->of_node;
struct mpc52xx_gpiochip *chip;
struct mpc52xx_gpio_wkup __iomem *regs;
struct gpio_chip *gc;
int ret;
- chip = devm_kzalloc(&ofdev->dev, sizeof(*chip), GFP_KERNEL);
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
platform_set_drvdata(ofdev, chip);
- gc = &chip->mmchip.gc;
+ gc = &chip->gc;
+ gc->base = -1;
gc->ngpio = 8;
gc->direction_input = mpc52xx_wkup_gpio_dir_in;
gc->direction_output = mpc52xx_wkup_gpio_dir_out;
gc->get = mpc52xx_wkup_gpio_get;
- gc->set_rv = mpc52xx_wkup_gpio_set;
+ gc->set = mpc52xx_wkup_gpio_set;
+
+ gc->label = devm_kasprintf(dev, GFP_KERNEL, "%pOF", np);
+ if (!gc->label)
+ return -ENOMEM;
- ret = of_mm_gpiochip_add_data(ofdev->dev.of_node, &chip->mmchip, chip);
+ chip->regs = devm_of_iomap(dev, np, 0, NULL);
+ if (IS_ERR(chip->regs))
+ return PTR_ERR(chip->regs);
+
+ ret = devm_gpiochip_add_data(dev, gc, chip);
if (ret)
return ret;
- regs = chip->mmchip.regs;
+ regs = chip->regs;
chip->shadow_gpioe = in_8(&regs->wkup_gpioe);
chip->shadow_ddr = in_8(&regs->wkup_ddr);
chip->shadow_dvo = in_8(&regs->wkup_dvo);
@@ -167,13 +176,6 @@ static int mpc52xx_wkup_gpiochip_probe(struct platform_device *ofdev)
return 0;
}
-static void mpc52xx_gpiochip_remove(struct platform_device *ofdev)
-{
- struct mpc52xx_gpiochip *chip = platform_get_drvdata(ofdev);
-
- of_mm_gpiochip_remove(&chip->mmchip);
-}
-
static const struct of_device_id mpc52xx_wkup_gpiochip_match[] = {
{ .compatible = "fsl,mpc5200-gpio-wkup", },
{}
@@ -185,7 +187,6 @@ static struct platform_driver mpc52xx_wkup_gpiochip_driver = {
.of_match_table = mpc52xx_wkup_gpiochip_match,
},
.probe = mpc52xx_wkup_gpiochip_probe,
- .remove = mpc52xx_gpiochip_remove,
};
/*
@@ -207,8 +208,8 @@ static struct platform_driver mpc52xx_wkup_gpiochip_driver = {
*/
static int mpc52xx_simple_gpio_get(struct gpio_chip *gc, unsigned int gpio)
{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
- struct mpc52xx_gpio __iomem *regs = mm_gc->regs;
+ struct mpc52xx_gpiochip *chip = gpiochip_get_data(gc);
+ struct mpc52xx_gpio __iomem *regs = chip->regs;
unsigned int ret;
ret = (in_be32(&regs->simple_ival) >> (31 - gpio)) & 1;
@@ -219,9 +220,8 @@ static int mpc52xx_simple_gpio_get(struct gpio_chip *gc, unsigned int gpio)
static inline void
__mpc52xx_simple_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct mpc52xx_gpiochip *chip = gpiochip_get_data(gc);
- struct mpc52xx_gpio __iomem *regs = mm_gc->regs;
+ struct mpc52xx_gpio __iomem *regs = chip->regs;
if (val)
chip->shadow_dvo |= 1 << (31 - gpio);
@@ -248,9 +248,8 @@ mpc52xx_simple_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
static int mpc52xx_simple_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct mpc52xx_gpiochip *chip = gpiochip_get_data(gc);
- struct mpc52xx_gpio __iomem *regs = mm_gc->regs;
+ struct mpc52xx_gpio __iomem *regs = chip->regs;
unsigned long flags;
spin_lock_irqsave(&gpio_lock, flags);
@@ -271,9 +270,8 @@ static int mpc52xx_simple_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
static int
mpc52xx_simple_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct mpc52xx_gpiochip *chip = gpiochip_get_data(gc);
- struct mpc52xx_gpio __iomem *regs = mm_gc->regs;
+ struct mpc52xx_gpio __iomem *regs = chip->regs;
unsigned long flags;
spin_lock_irqsave(&gpio_lock, flags);
@@ -298,30 +296,41 @@ mpc52xx_simple_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
static int mpc52xx_simple_gpiochip_probe(struct platform_device *ofdev)
{
+ struct device *dev = &ofdev->dev;
+ struct device_node *np = dev->of_node;
struct mpc52xx_gpiochip *chip;
struct gpio_chip *gc;
struct mpc52xx_gpio __iomem *regs;
int ret;
- chip = devm_kzalloc(&ofdev->dev, sizeof(*chip), GFP_KERNEL);
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
platform_set_drvdata(ofdev, chip);
- gc = &chip->mmchip.gc;
+ gc = &chip->gc;
+ gc->base = -1;
gc->ngpio = 32;
gc->direction_input = mpc52xx_simple_gpio_dir_in;
gc->direction_output = mpc52xx_simple_gpio_dir_out;
gc->get = mpc52xx_simple_gpio_get;
- gc->set_rv = mpc52xx_simple_gpio_set;
+ gc->set = mpc52xx_simple_gpio_set;
+
+ gc->label = devm_kasprintf(dev, GFP_KERNEL, "%pOF", np);
+ if (!gc->label)
+ return -ENOMEM;
+
+ chip->regs = devm_of_iomap(dev, np, 0, NULL);
+ if (IS_ERR(chip->regs))
+ return PTR_ERR(chip->regs);
- ret = of_mm_gpiochip_add_data(ofdev->dev.of_node, &chip->mmchip, chip);
+ ret = devm_gpiochip_add_data(dev, gc, chip);
if (ret)
return ret;
- regs = chip->mmchip.regs;
+ regs = chip->regs;
chip->shadow_gpioe = in_be32(&regs->simple_gpioe);
chip->shadow_ddr = in_be32(&regs->simple_ddr);
chip->shadow_dvo = in_be32(&regs->simple_dvo);
@@ -340,7 +349,6 @@ static struct platform_driver mpc52xx_simple_gpiochip_driver = {
.of_match_table = mpc52xx_simple_gpiochip_match,
},
.probe = mpc52xx_simple_gpiochip_probe,
- .remove = mpc52xx_gpiochip_remove,
};
static struct platform_driver * const drivers[] = {
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index 121efdd71e45..bfe828734ee1 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -9,6 +9,7 @@
#include <linux/acpi.h>
#include <linux/bitops.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -34,7 +35,7 @@
#define GPIO_IBE 0x18
struct mpc8xxx_gpio_chip {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
void __iomem *regs;
raw_spinlock_t lock;
@@ -66,9 +67,11 @@ static int mpc8572_gpio_get(struct gpio_chip *gc, unsigned int gpio)
struct mpc8xxx_gpio_chip *mpc8xxx_gc = gpiochip_get_data(gc);
u32 out_mask, out_shadow;
- out_mask = gc->read_reg(mpc8xxx_gc->regs + GPIO_DIR);
- val = gc->read_reg(mpc8xxx_gc->regs + GPIO_DAT) & ~out_mask;
- out_shadow = gc->bgpio_data & out_mask;
+ out_mask = gpio_generic_read_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_DIR);
+ val = gpio_generic_read_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_DAT) & ~out_mask;
+ out_shadow = mpc8xxx_gc->chip.sdata & out_mask;
return !!((val | out_shadow) & mpc_pin2mask(gpio));
}
@@ -108,12 +111,13 @@ static int mpc8xxx_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
static irqreturn_t mpc8xxx_gpio_irq_cascade(int irq, void *data)
{
struct mpc8xxx_gpio_chip *mpc8xxx_gc = data;
- struct gpio_chip *gc = &mpc8xxx_gc->gc;
unsigned long mask;
int i;
- mask = gc->read_reg(mpc8xxx_gc->regs + GPIO_IER)
- & gc->read_reg(mpc8xxx_gc->regs + GPIO_IMR);
+ mask = gpio_generic_read_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_IER) &
+ gpio_generic_read_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_IMR);
for_each_set_bit(i, &mask, 32)
generic_handle_domain_irq(mpc8xxx_gc->irq, 31 - i);
@@ -124,15 +128,17 @@ static void mpc8xxx_irq_unmask(struct irq_data *d)
{
struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d);
irq_hw_number_t hwirq = irqd_to_hwirq(d);
- struct gpio_chip *gc = &mpc8xxx_gc->gc;
+ struct gpio_chip *gc = &mpc8xxx_gc->chip.gc;
unsigned long flags;
gpiochip_enable_irq(gc, hwirq);
raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
- gc->write_reg(mpc8xxx_gc->regs + GPIO_IMR,
- gc->read_reg(mpc8xxx_gc->regs + GPIO_IMR)
+ gpio_generic_write_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_IMR,
+ gpio_generic_read_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_IMR)
| mpc_pin2mask(irqd_to_hwirq(d)));
raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
@@ -142,13 +148,14 @@ static void mpc8xxx_irq_mask(struct irq_data *d)
{
struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d);
irq_hw_number_t hwirq = irqd_to_hwirq(d);
- struct gpio_chip *gc = &mpc8xxx_gc->gc;
+ struct gpio_chip *gc = &mpc8xxx_gc->chip.gc;
unsigned long flags;
raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
- gc->write_reg(mpc8xxx_gc->regs + GPIO_IMR,
- gc->read_reg(mpc8xxx_gc->regs + GPIO_IMR)
+ gpio_generic_write_reg(&mpc8xxx_gc->chip, mpc8xxx_gc->regs + GPIO_IMR,
+ gpio_generic_read_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_IMR)
& ~mpc_pin2mask(irqd_to_hwirq(d)));
raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
@@ -159,32 +166,34 @@ static void mpc8xxx_irq_mask(struct irq_data *d)
static void mpc8xxx_irq_ack(struct irq_data *d)
{
struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d);
- struct gpio_chip *gc = &mpc8xxx_gc->gc;
- gc->write_reg(mpc8xxx_gc->regs + GPIO_IER,
+ gpio_generic_write_reg(&mpc8xxx_gc->chip, mpc8xxx_gc->regs + GPIO_IER,
mpc_pin2mask(irqd_to_hwirq(d)));
}
static int mpc8xxx_irq_set_type(struct irq_data *d, unsigned int flow_type)
{
struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d);
- struct gpio_chip *gc = &mpc8xxx_gc->gc;
unsigned long flags;
switch (flow_type) {
case IRQ_TYPE_EDGE_FALLING:
case IRQ_TYPE_LEVEL_LOW:
raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
- gc->write_reg(mpc8xxx_gc->regs + GPIO_ICR,
- gc->read_reg(mpc8xxx_gc->regs + GPIO_ICR)
+ gpio_generic_write_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_ICR,
+ gpio_generic_read_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_ICR)
| mpc_pin2mask(irqd_to_hwirq(d)));
raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
break;
case IRQ_TYPE_EDGE_BOTH:
raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
- gc->write_reg(mpc8xxx_gc->regs + GPIO_ICR,
- gc->read_reg(mpc8xxx_gc->regs + GPIO_ICR)
+ gpio_generic_write_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_ICR,
+ gpio_generic_read_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_ICR)
& ~mpc_pin2mask(irqd_to_hwirq(d)));
raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
break;
@@ -199,7 +208,6 @@ static int mpc8xxx_irq_set_type(struct irq_data *d, unsigned int flow_type)
static int mpc512x_irq_set_type(struct irq_data *d, unsigned int flow_type)
{
struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d);
- struct gpio_chip *gc = &mpc8xxx_gc->gc;
unsigned long gpio = irqd_to_hwirq(d);
void __iomem *reg;
unsigned int shift;
@@ -217,7 +225,9 @@ static int mpc512x_irq_set_type(struct irq_data *d, unsigned int flow_type)
case IRQ_TYPE_EDGE_FALLING:
case IRQ_TYPE_LEVEL_LOW:
raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
- gc->write_reg(reg, (gc->read_reg(reg) & ~(3 << shift))
+ gpio_generic_write_reg(&mpc8xxx_gc->chip, reg,
+ (gpio_generic_read_reg(&mpc8xxx_gc->chip,
+ reg) & ~(3 << shift))
| (2 << shift));
raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
break;
@@ -225,14 +235,18 @@ static int mpc512x_irq_set_type(struct irq_data *d, unsigned int flow_type)
case IRQ_TYPE_EDGE_RISING:
case IRQ_TYPE_LEVEL_HIGH:
raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
- gc->write_reg(reg, (gc->read_reg(reg) & ~(3 << shift))
+ gpio_generic_write_reg(&mpc8xxx_gc->chip, reg,
+ (gpio_generic_read_reg(&mpc8xxx_gc->chip,
+ reg) & ~(3 << shift))
| (1 << shift));
raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
break;
case IRQ_TYPE_EDGE_BOTH:
raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
- gc->write_reg(reg, (gc->read_reg(reg) & ~(3 << shift)));
+ gpio_generic_write_reg(&mpc8xxx_gc->chip, reg,
+ (gpio_generic_read_reg(&mpc8xxx_gc->chip,
+ reg) & ~(3 << shift)));
raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
break;
@@ -309,6 +323,7 @@ static const struct of_device_id mpc8xxx_gpio_ids[] = {
static int mpc8xxx_probe(struct platform_device *pdev)
{
const struct mpc8xxx_gpio_devtype *devtype = NULL;
+ struct gpio_generic_chip_config config;
struct mpc8xxx_gpio_chip *mpc8xxx_gc;
struct device *dev = &pdev->dev;
struct fwnode_handle *fwnode;
@@ -327,26 +342,28 @@ static int mpc8xxx_probe(struct platform_device *pdev)
if (IS_ERR(mpc8xxx_gc->regs))
return PTR_ERR(mpc8xxx_gc->regs);
- gc = &mpc8xxx_gc->gc;
+ gc = &mpc8xxx_gc->chip.gc;
gc->parent = dev;
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = mpc8xxx_gc->regs + GPIO_DAT,
+ .dirout = mpc8xxx_gc->regs + GPIO_DIR,
+ .flags = GPIO_GENERIC_BIG_ENDIAN
+ };
+
if (device_property_read_bool(dev, "little-endian")) {
- ret = bgpio_init(gc, dev, 4, mpc8xxx_gc->regs + GPIO_DAT,
- NULL, NULL, mpc8xxx_gc->regs + GPIO_DIR,
- NULL, BGPIOF_BIG_ENDIAN);
- if (ret)
- return ret;
dev_dbg(dev, "GPIO registers are LITTLE endian\n");
} else {
- ret = bgpio_init(gc, dev, 4, mpc8xxx_gc->regs + GPIO_DAT,
- NULL, NULL, mpc8xxx_gc->regs + GPIO_DIR,
- NULL, BGPIOF_BIG_ENDIAN
- | BGPIOF_BIG_ENDIAN_BYTE_ORDER);
- if (ret)
- return ret;
+ config.flags |= GPIO_GENERIC_BIG_ENDIAN_BYTE_ORDER;
dev_dbg(dev, "GPIO registers are BIG endian\n");
}
+ ret = gpio_generic_chip_init(&mpc8xxx_gc->chip, &config);
+ if (ret)
+ return ret;
+
mpc8xxx_gc->direction_output = gc->direction_output;
devtype = device_get_match_data(dev);
@@ -379,10 +396,14 @@ static int mpc8xxx_probe(struct platform_device *pdev)
device_is_compatible(dev, "fsl,ls1028a-gpio") ||
device_is_compatible(dev, "fsl,ls1088a-gpio") ||
is_acpi_node(fwnode)) {
- gc->write_reg(mpc8xxx_gc->regs + GPIO_IBE, 0xffffffff);
+ gpio_generic_write_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_IBE, 0xffffffff);
/* Also, latch state of GPIOs configured as output by bootloader. */
- gc->bgpio_data = gc->read_reg(mpc8xxx_gc->regs + GPIO_DAT) &
- gc->read_reg(mpc8xxx_gc->regs + GPIO_DIR);
+ mpc8xxx_gc->chip.sdata =
+ gpio_generic_read_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_DAT) &
+ gpio_generic_read_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_DIR);
}
ret = devm_gpiochip_add_data(dev, gc, mpc8xxx_gc);
@@ -405,8 +426,10 @@ static int mpc8xxx_probe(struct platform_device *pdev)
return 0;
/* ack and mask all irqs */
- gc->write_reg(mpc8xxx_gc->regs + GPIO_IER, 0xffffffff);
- gc->write_reg(mpc8xxx_gc->regs + GPIO_IMR, 0);
+ gpio_generic_write_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_IER, 0xffffffff);
+ gpio_generic_write_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_IMR, 0);
ret = devm_request_irq(dev, mpc8xxx_gc->irqn,
mpc8xxx_gpio_irq_cascade,
diff --git a/drivers/gpio/gpio-mpfs.c b/drivers/gpio/gpio-mpfs.c
index 3415cb7ebb0f..9468795b9634 100644
--- a/drivers/gpio/gpio-mpfs.c
+++ b/drivers/gpio/gpio-mpfs.c
@@ -69,7 +69,7 @@ static int mpfs_gpio_direction_output(struct gpio_chip *gc, unsigned int gpio_in
struct mpfs_gpio_chip *mpfs_gpio = gpiochip_get_data(gc);
regmap_update_bits(mpfs_gpio->regs, MPFS_GPIO_CTRL(gpio_index),
- MPFS_GPIO_DIR_MASK, MPFS_GPIO_EN_IN);
+ MPFS_GPIO_DIR_MASK, MPFS_GPIO_EN_OUT | MPFS_GPIO_EN_OUT_BUF);
regmap_update_bits(mpfs_gpio->regs, mpfs_gpio->offsets->outp, BIT(gpio_index),
value << gpio_index);
@@ -150,7 +150,7 @@ static int mpfs_gpio_probe(struct platform_device *pdev)
mpfs_gpio->gc.direction_output = mpfs_gpio_direction_output;
mpfs_gpio->gc.get_direction = mpfs_gpio_get_direction;
mpfs_gpio->gc.get = mpfs_gpio_get;
- mpfs_gpio->gc.set_rv = mpfs_gpio_set;
+ mpfs_gpio->gc.set = mpfs_gpio_set;
mpfs_gpio->gc.base = -1;
mpfs_gpio->gc.ngpio = ngpios;
mpfs_gpio->gc.label = dev_name(dev);
diff --git a/drivers/gpio/gpio-mpsse.c b/drivers/gpio/gpio-mpsse.c
index b17de08e9e03..9f42bb30b4ec 100644
--- a/drivers/gpio/gpio-mpsse.c
+++ b/drivers/gpio/gpio-mpsse.c
@@ -448,9 +448,9 @@ static int gpio_mpsse_probe(struct usb_interface *interface,
priv->gpio.direction_input = gpio_mpsse_direction_input;
priv->gpio.direction_output = gpio_mpsse_direction_output;
priv->gpio.get = gpio_mpsse_gpio_get;
- priv->gpio.set_rv = gpio_mpsse_gpio_set;
+ priv->gpio.set = gpio_mpsse_gpio_set;
priv->gpio.get_multiple = gpio_mpsse_get_multiple;
- priv->gpio.set_multiple_rv = gpio_mpsse_set_multiple;
+ priv->gpio.set_multiple = gpio_mpsse_set_multiple;
priv->gpio.base = -1;
priv->gpio.ngpio = 16;
priv->gpio.offset = priv->intf_id * priv->gpio.ngpio;
diff --git a/drivers/gpio/gpio-msc313.c b/drivers/gpio/gpio-msc313.c
index 992339a89d19..b0cccd856840 100644
--- a/drivers/gpio/gpio-msc313.c
+++ b/drivers/gpio/gpio-msc313.c
@@ -658,7 +658,7 @@ static int msc313_gpio_probe(struct platform_device *pdev)
gpiochip->direction_input = msc313_gpio_direction_input;
gpiochip->direction_output = msc313_gpio_direction_output;
gpiochip->get = msc313_gpio_get;
- gpiochip->set_rv = msc313_gpio_set;
+ gpiochip->set = msc313_gpio_set;
gpiochip->base = -1;
gpiochip->ngpio = gpio->gpio_data->num;
gpiochip->names = gpio->gpio_data->names;
diff --git a/drivers/gpio/gpio-mt7621.c b/drivers/gpio/gpio-mt7621.c
index 93facbebb80e..91230be51587 100644
--- a/drivers/gpio/gpio-mt7621.c
+++ b/drivers/gpio/gpio-mt7621.c
@@ -6,11 +6,11 @@
#include <linux/err.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/spinlock.h>
#define MTK_BANK_CNT 3
#define MTK_BANK_WIDTH 32
@@ -30,8 +30,7 @@
struct mtk_gc {
struct irq_chip irq_chip;
- struct gpio_chip chip;
- spinlock_t lock;
+ struct gpio_generic_chip chip;
int bank;
u32 rising;
u32 falling;
@@ -59,27 +58,29 @@ struct mtk {
static inline struct mtk_gc *
to_mediatek_gpio(struct gpio_chip *chip)
{
- return container_of(chip, struct mtk_gc, chip);
+ struct gpio_generic_chip *gen_gc = to_gpio_generic_chip(chip);
+
+ return container_of(gen_gc, struct mtk_gc, chip);
}
static inline void
mtk_gpio_w32(struct mtk_gc *rg, u32 offset, u32 val)
{
- struct gpio_chip *gc = &rg->chip;
+ struct gpio_chip *gc = &rg->chip.gc;
struct mtk *mtk = gpiochip_get_data(gc);
offset = (rg->bank * GPIO_BANK_STRIDE) + offset;
- gc->write_reg(mtk->base + offset, val);
+ gpio_generic_write_reg(&rg->chip, mtk->base + offset, val);
}
static inline u32
mtk_gpio_r32(struct mtk_gc *rg, u32 offset)
{
- struct gpio_chip *gc = &rg->chip;
+ struct gpio_chip *gc = &rg->chip.gc;
struct mtk *mtk = gpiochip_get_data(gc);
offset = (rg->bank * GPIO_BANK_STRIDE) + offset;
- return gc->read_reg(mtk->base + offset);
+ return gpio_generic_read_reg(&rg->chip, mtk->base + offset);
}
static irqreturn_t
@@ -108,12 +109,12 @@ mediatek_gpio_irq_unmask(struct irq_data *d)
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct mtk_gc *rg = to_mediatek_gpio(gc);
int pin = d->hwirq;
- unsigned long flags;
u32 rise, fall, high, low;
gpiochip_enable_irq(gc, d->hwirq);
- spin_lock_irqsave(&rg->lock, flags);
+ guard(gpio_generic_lock_irqsave)(&rg->chip);
+
rise = mtk_gpio_r32(rg, GPIO_REG_REDGE);
fall = mtk_gpio_r32(rg, GPIO_REG_FEDGE);
high = mtk_gpio_r32(rg, GPIO_REG_HLVL);
@@ -122,7 +123,6 @@ mediatek_gpio_irq_unmask(struct irq_data *d)
mtk_gpio_w32(rg, GPIO_REG_FEDGE, fall | (BIT(pin) & rg->falling));
mtk_gpio_w32(rg, GPIO_REG_HLVL, high | (BIT(pin) & rg->hlevel));
mtk_gpio_w32(rg, GPIO_REG_LLVL, low | (BIT(pin) & rg->llevel));
- spin_unlock_irqrestore(&rg->lock, flags);
}
static void
@@ -131,19 +131,18 @@ mediatek_gpio_irq_mask(struct irq_data *d)
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct mtk_gc *rg = to_mediatek_gpio(gc);
int pin = d->hwirq;
- unsigned long flags;
u32 rise, fall, high, low;
- spin_lock_irqsave(&rg->lock, flags);
- rise = mtk_gpio_r32(rg, GPIO_REG_REDGE);
- fall = mtk_gpio_r32(rg, GPIO_REG_FEDGE);
- high = mtk_gpio_r32(rg, GPIO_REG_HLVL);
- low = mtk_gpio_r32(rg, GPIO_REG_LLVL);
- mtk_gpio_w32(rg, GPIO_REG_FEDGE, fall & ~BIT(pin));
- mtk_gpio_w32(rg, GPIO_REG_REDGE, rise & ~BIT(pin));
- mtk_gpio_w32(rg, GPIO_REG_HLVL, high & ~BIT(pin));
- mtk_gpio_w32(rg, GPIO_REG_LLVL, low & ~BIT(pin));
- spin_unlock_irqrestore(&rg->lock, flags);
+ scoped_guard(gpio_generic_lock_irqsave, &rg->chip) {
+ rise = mtk_gpio_r32(rg, GPIO_REG_REDGE);
+ fall = mtk_gpio_r32(rg, GPIO_REG_FEDGE);
+ high = mtk_gpio_r32(rg, GPIO_REG_HLVL);
+ low = mtk_gpio_r32(rg, GPIO_REG_LLVL);
+ mtk_gpio_w32(rg, GPIO_REG_FEDGE, fall & ~BIT(pin));
+ mtk_gpio_w32(rg, GPIO_REG_REDGE, rise & ~BIT(pin));
+ mtk_gpio_w32(rg, GPIO_REG_HLVL, high & ~BIT(pin));
+ mtk_gpio_w32(rg, GPIO_REG_LLVL, low & ~BIT(pin));
+ }
gpiochip_disable_irq(gc, d->hwirq);
}
@@ -220,6 +219,7 @@ static const struct irq_chip mt7621_irq_chip = {
static int
mediatek_gpio_bank_probe(struct device *dev, int bank)
{
+ struct gpio_generic_chip_config config;
struct mtk *mtk = dev_get_drvdata(dev);
struct mtk_gc *rg;
void __iomem *dat, *set, *ctrl, *diro;
@@ -228,7 +228,6 @@ mediatek_gpio_bank_probe(struct device *dev, int bank)
rg = &mtk->gc_map[bank];
memset(rg, 0, sizeof(*rg));
- spin_lock_init(&rg->lock);
rg->bank = bank;
dat = mtk->base + GPIO_REG_DATA + (rg->bank * GPIO_BANK_STRIDE);
@@ -236,21 +235,30 @@ mediatek_gpio_bank_probe(struct device *dev, int bank)
ctrl = mtk->base + GPIO_REG_DCLR + (rg->bank * GPIO_BANK_STRIDE);
diro = mtk->base + GPIO_REG_CTRL + (rg->bank * GPIO_BANK_STRIDE);
- ret = bgpio_init(&rg->chip, dev, 4, dat, set, ctrl, diro, NULL,
- BGPIOF_NO_SET_ON_INPUT);
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = dat,
+ .set = set,
+ .clr = ctrl,
+ .dirout = diro,
+ .flags = GPIO_GENERIC_NO_SET_ON_INPUT,
+ };
+
+ ret = gpio_generic_chip_init(&rg->chip, &config);
if (ret) {
- dev_err(dev, "bgpio_init() failed\n");
+ dev_err(dev, "failed to initialize generic GPIO chip\n");
return ret;
}
- rg->chip.of_gpio_n_cells = 2;
- rg->chip.of_xlate = mediatek_gpio_xlate;
- rg->chip.label = devm_kasprintf(dev, GFP_KERNEL, "%s-bank%d",
+ rg->chip.gc.of_gpio_n_cells = 2;
+ rg->chip.gc.of_xlate = mediatek_gpio_xlate;
+ rg->chip.gc.label = devm_kasprintf(dev, GFP_KERNEL, "%s-bank%d",
dev_name(dev), bank);
- if (!rg->chip.label)
+ if (!rg->chip.gc.label)
return -ENOMEM;
- rg->chip.offset = bank * MTK_BANK_WIDTH;
+ rg->chip.gc.offset = bank * MTK_BANK_WIDTH;
if (mtk->gpio_irq) {
struct gpio_irq_chip *girq;
@@ -261,7 +269,7 @@ mediatek_gpio_bank_probe(struct device *dev, int bank)
*/
ret = devm_request_irq(dev, mtk->gpio_irq,
mediatek_gpio_irq_handler, IRQF_SHARED,
- rg->chip.label, &rg->chip);
+ rg->chip.gc.label, &rg->chip.gc);
if (ret) {
dev_err(dev, "Error requesting IRQ %d: %d\n",
@@ -269,7 +277,7 @@ mediatek_gpio_bank_probe(struct device *dev, int bank)
return ret;
}
- girq = &rg->chip.irq;
+ girq = &rg->chip.gc.irq;
gpio_irq_chip_set_chip(girq, &mt7621_irq_chip);
/* This will let us handle the parent IRQ in the driver */
girq->parent_handler = NULL;
@@ -279,17 +287,17 @@ mediatek_gpio_bank_probe(struct device *dev, int bank)
girq->handler = handle_simple_irq;
}
- ret = devm_gpiochip_add_data(dev, &rg->chip, mtk);
+ ret = devm_gpiochip_add_data(dev, &rg->chip.gc, mtk);
if (ret < 0) {
dev_err(dev, "Could not register gpio %d, ret=%d\n",
- rg->chip.ngpio, ret);
+ rg->chip.gc.ngpio, ret);
return ret;
}
/* set polarity to low for all gpios */
mtk_gpio_w32(rg, GPIO_REG_POL, 0);
- dev_info(dev, "registering %d gpios\n", rg->chip.ngpio);
+ dev_info(dev, "registering %d gpios\n", rg->chip.gc.ngpio);
return 0;
}
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 24792b8eb083..ac799fced950 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -602,7 +602,6 @@ static const struct regmap_config mvebu_gpio_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
- .fast_io = true,
};
/*
@@ -899,7 +898,7 @@ static void mvebu_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
msk = BIT(i);
is_out = !(io_conf & msk);
- seq_printf(s, " gpio-%-3d (%-20.20s)", chip->base + i, label);
+ seq_printf(s, " gpio-%-3d (%-20.20s)", i, label);
if (is_out) {
seq_printf(s, " out %s %s\n",
@@ -1168,7 +1167,7 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
mvchip->chip.direction_input = mvebu_gpio_direction_input;
mvchip->chip.get = mvebu_gpio_get;
mvchip->chip.direction_output = mvebu_gpio_direction_output;
- mvchip->chip.set_rv = mvebu_gpio_set;
+ mvchip->chip.set = mvebu_gpio_set;
if (have_irqs)
mvchip->chip.to_irq = mvebu_gpio_to_irq;
mvchip->chip.base = id * MVEBU_MAX_GPIO_PER_BANK;
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index 433cbadc3a4c..52060b3ec745 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -481,7 +481,7 @@ static int mxc_gpio_probe(struct platform_device *pdev)
config.dat = port->base + GPIO_PSR;
config.set = port->base + GPIO_DR;
config.dirout = port->base + GPIO_GDIR;
- config.flags = BGPIOF_READ_OUTPUT_REG_SET;
+ config.flags = GPIO_GENERIC_READ_OUTPUT_REG_SET;
err = gpio_generic_chip_init(&port->gen_gc, &config);
if (err)
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
index 0ea46f3d04e1..5635694bf9f4 100644
--- a/drivers/gpio/gpio-mxs.c
+++ b/drivers/gpio/gpio-mxs.c
@@ -7,17 +7,18 @@
// Copyright (C) 2004-2010 Freescale Semiconductor, Inc. All Rights Reserved.
#include <linux/err.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-#include <linux/gpio/driver.h>
-#include <linux/module.h>
#define MXS_SET 0x4
#define MXS_CLR 0x8
@@ -48,7 +49,7 @@ struct mxs_gpio_port {
int id;
int irq;
struct irq_domain *domain;
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
struct device *dev;
enum mxs_gpio_id devid;
u32 both_edges;
@@ -258,6 +259,7 @@ MODULE_DEVICE_TABLE(of, mxs_gpio_dt_ids);
static int mxs_gpio_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
+ struct gpio_generic_chip_config config;
struct device_node *parent;
static void __iomem *base;
struct mxs_gpio_port *port;
@@ -319,19 +321,24 @@ static int mxs_gpio_probe(struct platform_device *pdev)
irq_set_chained_handler_and_data(port->irq, mxs_gpio_irq_handler,
port);
- err = bgpio_init(&port->gc, &pdev->dev, 4,
- port->base + PINCTRL_DIN(port),
- port->base + PINCTRL_DOUT(port) + MXS_SET,
- port->base + PINCTRL_DOUT(port) + MXS_CLR,
- port->base + PINCTRL_DOE(port), NULL, 0);
+ config = (struct gpio_generic_chip_config) {
+ .dev = &pdev->dev,
+ .sz = 4,
+ .dat = port->base + PINCTRL_DIN(port),
+ .set = port->base + PINCTRL_DOUT(port) + MXS_SET,
+ .clr = port->base + PINCTRL_DOUT(port) + MXS_CLR,
+ .dirout = port->base + PINCTRL_DOE(port),
+ };
+
+ err = gpio_generic_chip_init(&port->chip, &config);
if (err)
goto out_irqdomain_remove;
- port->gc.to_irq = mxs_gpio_to_irq;
- port->gc.get_direction = mxs_gpio_get_direction;
- port->gc.base = port->id * 32;
+ port->chip.gc.to_irq = mxs_gpio_to_irq;
+ port->chip.gc.get_direction = mxs_gpio_get_direction;
+ port->chip.gc.base = port->id * 32;
- err = gpiochip_add_data(&port->gc, port);
+ err = gpiochip_add_data(&port->chip.gc, port);
if (err)
goto out_irqdomain_remove;
diff --git a/drivers/gpio/gpio-nct6694.c b/drivers/gpio/gpio-nct6694.c
new file mode 100644
index 000000000000..a8607f0d9915
--- /dev/null
+++ b/drivers/gpio/gpio-nct6694.c
@@ -0,0 +1,499 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Nuvoton NCT6694 GPIO controller driver based on USB interface.
+ *
+ * Copyright (C) 2025 Nuvoton Technology Corp.
+ */
+
+#include <linux/bits.h>
+#include <linux/gpio/driver.h>
+#include <linux/idr.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/nct6694.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+/*
+ * USB command module type for NCT6694 GPIO controller.
+ * This defines the module type used for communication with the NCT6694
+ * GPIO controller over the USB interface.
+ */
+#define NCT6694_GPIO_MOD 0xFF
+
+#define NCT6694_GPIO_VER 0x90
+#define NCT6694_GPIO_VALID 0x110
+#define NCT6694_GPI_DATA 0x120
+#define NCT6694_GPO_DIR 0x170
+#define NCT6694_GPO_TYPE 0x180
+#define NCT6694_GPO_DATA 0x190
+
+#define NCT6694_GPI_STS 0x130
+#define NCT6694_GPI_CLR 0x140
+#define NCT6694_GPI_FALLING 0x150
+#define NCT6694_GPI_RISING 0x160
+
+#define NCT6694_NR_GPIO 8
+
+struct nct6694_gpio_data {
+ struct nct6694 *nct6694;
+ struct gpio_chip gpio;
+ struct mutex lock;
+ /* Protect irq operation */
+ struct mutex irq_lock;
+
+ unsigned char reg_val;
+ unsigned char irq_trig_falling;
+ unsigned char irq_trig_rising;
+
+ /* Current gpio group */
+ unsigned char group;
+ int irq;
+};
+
+static int nct6694_get_direction(struct gpio_chip *gpio, unsigned int offset)
+{
+ struct nct6694_gpio_data *data = gpiochip_get_data(gpio);
+ const struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_GPIO_MOD,
+ .offset = cpu_to_le16(NCT6694_GPO_DIR + data->group),
+ .len = cpu_to_le16(sizeof(data->reg_val))
+ };
+ int ret;
+
+ guard(mutex)(&data->lock);
+
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd, &data->reg_val);
+ if (ret < 0)
+ return ret;
+
+ return !(BIT(offset) & data->reg_val);
+}
+
+static int nct6694_direction_input(struct gpio_chip *gpio, unsigned int offset)
+{
+ struct nct6694_gpio_data *data = gpiochip_get_data(gpio);
+ const struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_GPIO_MOD,
+ .offset = cpu_to_le16(NCT6694_GPO_DIR + data->group),
+ .len = cpu_to_le16(sizeof(data->reg_val))
+ };
+ int ret;
+
+ guard(mutex)(&data->lock);
+
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd, &data->reg_val);
+ if (ret < 0)
+ return ret;
+
+ data->reg_val &= ~BIT(offset);
+
+ return nct6694_write_msg(data->nct6694, &cmd_hd, &data->reg_val);
+}
+
+static int nct6694_direction_output(struct gpio_chip *gpio,
+ unsigned int offset, int val)
+{
+ struct nct6694_gpio_data *data = gpiochip_get_data(gpio);
+ struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_GPIO_MOD,
+ .offset = cpu_to_le16(NCT6694_GPO_DIR + data->group),
+ .len = cpu_to_le16(sizeof(data->reg_val))
+ };
+ int ret;
+
+ guard(mutex)(&data->lock);
+
+ /* Set direction to output */
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd, &data->reg_val);
+ if (ret < 0)
+ return ret;
+
+ data->reg_val |= BIT(offset);
+ ret = nct6694_write_msg(data->nct6694, &cmd_hd, &data->reg_val);
+ if (ret < 0)
+ return ret;
+
+ /* Then set output level */
+ cmd_hd.offset = cpu_to_le16(NCT6694_GPO_DATA + data->group);
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd, &data->reg_val);
+ if (ret < 0)
+ return ret;
+
+ if (val)
+ data->reg_val |= BIT(offset);
+ else
+ data->reg_val &= ~BIT(offset);
+
+ return nct6694_write_msg(data->nct6694, &cmd_hd, &data->reg_val);
+}
+
+static int nct6694_get_value(struct gpio_chip *gpio, unsigned int offset)
+{
+ struct nct6694_gpio_data *data = gpiochip_get_data(gpio);
+ struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_GPIO_MOD,
+ .offset = cpu_to_le16(NCT6694_GPO_DIR + data->group),
+ .len = cpu_to_le16(sizeof(data->reg_val))
+ };
+ int ret;
+
+ guard(mutex)(&data->lock);
+
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd, &data->reg_val);
+ if (ret < 0)
+ return ret;
+
+ if (BIT(offset) & data->reg_val) {
+ cmd_hd.offset = cpu_to_le16(NCT6694_GPO_DATA + data->group);
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd, &data->reg_val);
+ if (ret < 0)
+ return ret;
+
+ return !!(BIT(offset) & data->reg_val);
+ }
+
+ cmd_hd.offset = cpu_to_le16(NCT6694_GPI_DATA + data->group);
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd, &data->reg_val);
+ if (ret < 0)
+ return ret;
+
+ return !!(BIT(offset) & data->reg_val);
+}
+
+static int nct6694_set_value(struct gpio_chip *gpio, unsigned int offset,
+ int val)
+{
+ struct nct6694_gpio_data *data = gpiochip_get_data(gpio);
+ const struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_GPIO_MOD,
+ .offset = cpu_to_le16(NCT6694_GPO_DATA + data->group),
+ .len = cpu_to_le16(sizeof(data->reg_val))
+ };
+ int ret;
+
+ guard(mutex)(&data->lock);
+
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd, &data->reg_val);
+ if (ret < 0)
+ return ret;
+
+ if (val)
+ data->reg_val |= BIT(offset);
+ else
+ data->reg_val &= ~BIT(offset);
+
+ return nct6694_write_msg(data->nct6694, &cmd_hd, &data->reg_val);
+}
+
+static int nct6694_set_config(struct gpio_chip *gpio, unsigned int offset,
+ unsigned long config)
+{
+ struct nct6694_gpio_data *data = gpiochip_get_data(gpio);
+ const struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_GPIO_MOD,
+ .offset = cpu_to_le16(NCT6694_GPO_TYPE + data->group),
+ .len = cpu_to_le16(sizeof(data->reg_val))
+ };
+ int ret;
+
+ guard(mutex)(&data->lock);
+
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd, &data->reg_val);
+ if (ret < 0)
+ return ret;
+
+ switch (pinconf_to_config_param(config)) {
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ data->reg_val |= BIT(offset);
+ break;
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ data->reg_val &= ~BIT(offset);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return nct6694_write_msg(data->nct6694, &cmd_hd, &data->reg_val);
+}
+
+static int nct6694_init_valid_mask(struct gpio_chip *gpio,
+ unsigned long *valid_mask,
+ unsigned int ngpios)
+{
+ struct nct6694_gpio_data *data = gpiochip_get_data(gpio);
+ const struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_GPIO_MOD,
+ .offset = cpu_to_le16(NCT6694_GPIO_VALID + data->group),
+ .len = cpu_to_le16(sizeof(data->reg_val))
+ };
+ int ret;
+
+ guard(mutex)(&data->lock);
+
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd, &data->reg_val);
+ if (ret < 0)
+ return ret;
+
+ *valid_mask = data->reg_val;
+
+ return ret;
+}
+
+static irqreturn_t nct6694_irq_handler(int irq, void *priv)
+{
+ struct nct6694_gpio_data *data = priv;
+ struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_GPIO_MOD,
+ .offset = cpu_to_le16(NCT6694_GPI_STS + data->group),
+ .len = cpu_to_le16(sizeof(data->reg_val))
+ };
+ unsigned char status;
+ int ret;
+
+ guard(mutex)(&data->lock);
+
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd, &data->reg_val);
+ if (ret)
+ return IRQ_NONE;
+
+ status = data->reg_val;
+
+ while (status) {
+ int bit = __ffs(status);
+
+ data->reg_val = BIT(bit);
+ handle_nested_irq(irq_find_mapping(data->gpio.irq.domain, bit));
+ status &= ~BIT(bit);
+ cmd_hd.offset = cpu_to_le16(NCT6694_GPI_CLR + data->group);
+ nct6694_write_msg(data->nct6694, &cmd_hd, &data->reg_val);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int nct6694_get_irq_trig(struct nct6694_gpio_data *data)
+{
+ struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_GPIO_MOD,
+ .offset = cpu_to_le16(NCT6694_GPI_FALLING + data->group),
+ .len = cpu_to_le16(sizeof(data->reg_val))
+ };
+ int ret;
+
+ guard(mutex)(&data->lock);
+
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd, &data->irq_trig_falling);
+ if (ret)
+ return ret;
+
+ cmd_hd.offset = cpu_to_le16(NCT6694_GPI_RISING + data->group);
+ return nct6694_read_msg(data->nct6694, &cmd_hd, &data->irq_trig_rising);
+}
+
+static void nct6694_irq_mask(struct irq_data *d)
+{
+ struct gpio_chip *gpio = irq_data_get_irq_chip_data(d);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ gpiochip_disable_irq(gpio, hwirq);
+}
+
+static void nct6694_irq_unmask(struct irq_data *d)
+{
+ struct gpio_chip *gpio = irq_data_get_irq_chip_data(d);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ gpiochip_enable_irq(gpio, hwirq);
+}
+
+static int nct6694_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct gpio_chip *gpio = irq_data_get_irq_chip_data(d);
+ struct nct6694_gpio_data *data = gpiochip_get_data(gpio);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ guard(mutex)(&data->lock);
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ data->irq_trig_rising |= BIT(hwirq);
+ break;
+
+ case IRQ_TYPE_EDGE_FALLING:
+ data->irq_trig_falling |= BIT(hwirq);
+ break;
+
+ case IRQ_TYPE_EDGE_BOTH:
+ data->irq_trig_rising |= BIT(hwirq);
+ data->irq_trig_falling |= BIT(hwirq);
+ break;
+
+ default:
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static void nct6694_irq_bus_lock(struct irq_data *d)
+{
+ struct gpio_chip *gpio = irq_data_get_irq_chip_data(d);
+ struct nct6694_gpio_data *data = gpiochip_get_data(gpio);
+
+ mutex_lock(&data->irq_lock);
+}
+
+static void nct6694_irq_bus_sync_unlock(struct irq_data *d)
+{
+ struct gpio_chip *gpio = irq_data_get_irq_chip_data(d);
+ struct nct6694_gpio_data *data = gpiochip_get_data(gpio);
+ struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_GPIO_MOD,
+ .offset = cpu_to_le16(NCT6694_GPI_FALLING + data->group),
+ .len = cpu_to_le16(sizeof(data->reg_val))
+ };
+
+ scoped_guard(mutex, &data->lock) {
+ nct6694_write_msg(data->nct6694, &cmd_hd, &data->irq_trig_falling);
+
+ cmd_hd.offset = cpu_to_le16(NCT6694_GPI_RISING + data->group);
+ nct6694_write_msg(data->nct6694, &cmd_hd, &data->irq_trig_rising);
+ }
+
+ mutex_unlock(&data->irq_lock);
+}
+
+static const struct irq_chip nct6694_irq_chip = {
+ .name = "gpio-nct6694",
+ .irq_mask = nct6694_irq_mask,
+ .irq_unmask = nct6694_irq_unmask,
+ .irq_set_type = nct6694_irq_set_type,
+ .irq_bus_lock = nct6694_irq_bus_lock,
+ .irq_bus_sync_unlock = nct6694_irq_bus_sync_unlock,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
+static void nct6694_irq_dispose_mapping(void *d)
+{
+ struct nct6694_gpio_data *data = d;
+
+ irq_dispose_mapping(data->irq);
+}
+
+static void nct6694_gpio_ida_free(void *d)
+{
+ struct nct6694_gpio_data *data = d;
+ struct nct6694 *nct6694 = data->nct6694;
+
+ ida_free(&nct6694->gpio_ida, data->group);
+}
+
+static int nct6694_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct nct6694 *nct6694 = dev_get_drvdata(dev->parent);
+ struct nct6694_gpio_data *data;
+ struct gpio_irq_chip *girq;
+ int ret, i;
+ char **names;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->nct6694 = nct6694;
+
+ ret = ida_alloc(&nct6694->gpio_ida, GFP_KERNEL);
+ if (ret < 0)
+ return ret;
+ data->group = ret;
+
+ ret = devm_add_action_or_reset(dev, nct6694_gpio_ida_free, data);
+ if (ret)
+ return ret;
+
+ names = devm_kcalloc(dev, NCT6694_NR_GPIO, sizeof(char *),
+ GFP_KERNEL);
+ if (!names)
+ return -ENOMEM;
+
+ for (i = 0; i < NCT6694_NR_GPIO; i++) {
+ names[i] = devm_kasprintf(dev, GFP_KERNEL, "GPIO%X%d",
+ data->group, i);
+ if (!names[i])
+ return -ENOMEM;
+ }
+
+ data->irq = irq_create_mapping(nct6694->domain,
+ NCT6694_IRQ_GPIO0 + data->group);
+ if (!data->irq)
+ return -EINVAL;
+
+ ret = devm_add_action_or_reset(dev, nct6694_irq_dispose_mapping, data);
+ if (ret)
+ return ret;
+
+ data->gpio.names = (const char * const*)names;
+ data->gpio.label = pdev->name;
+ data->gpio.direction_input = nct6694_direction_input;
+ data->gpio.get = nct6694_get_value;
+ data->gpio.direction_output = nct6694_direction_output;
+ data->gpio.set = nct6694_set_value;
+ data->gpio.get_direction = nct6694_get_direction;
+ data->gpio.set_config = nct6694_set_config;
+ data->gpio.init_valid_mask = nct6694_init_valid_mask;
+ data->gpio.base = -1;
+ data->gpio.can_sleep = false;
+ data->gpio.owner = THIS_MODULE;
+ data->gpio.ngpio = NCT6694_NR_GPIO;
+
+ platform_set_drvdata(pdev, data);
+
+ ret = devm_mutex_init(dev, &data->lock);
+ if (ret)
+ return ret;
+
+ ret = devm_mutex_init(dev, &data->irq_lock);
+ if (ret)
+ return ret;
+
+ ret = nct6694_get_irq_trig(data);
+ if (ret) {
+ dev_err_probe(dev, ret, "Failed to get irq trigger type\n");
+ return ret;
+ }
+
+ girq = &data->gpio.irq;
+ gpio_irq_chip_set_chip(girq, &nct6694_irq_chip);
+ girq->parent_handler = NULL;
+ girq->num_parents = 0;
+ girq->parents = NULL;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_level_irq;
+ girq->threaded = true;
+
+ ret = devm_request_threaded_irq(dev, data->irq, NULL, nct6694_irq_handler,
+ IRQF_ONESHOT | IRQF_SHARED,
+ "gpio-nct6694", data);
+ if (ret) {
+ dev_err_probe(dev, ret, "Failed to request irq\n");
+ return ret;
+ }
+
+ return devm_gpiochip_add_data(dev, &data->gpio, data);
+}
+
+static struct platform_driver nct6694_gpio_driver = {
+ .driver = {
+ .name = "nct6694-gpio",
+ },
+ .probe = nct6694_gpio_probe,
+};
+
+module_platform_driver(nct6694_gpio_driver);
+
+MODULE_DESCRIPTION("USB-GPIO controller driver for NCT6694");
+MODULE_AUTHOR("Ming Yu <tmyu0@nuvoton.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:nct6694-gpio");
diff --git a/drivers/gpio/gpio-nomadik.c b/drivers/gpio/gpio-nomadik.c
index 296d13845b30..97c5cd33279d 100644
--- a/drivers/gpio/gpio-nomadik.c
+++ b/drivers/gpio/gpio-nomadik.c
@@ -20,6 +20,7 @@
*/
#include <linux/cleanup.h>
#include <linux/clk.h>
+#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
@@ -396,10 +397,12 @@ static int nmk_gpio_get_mode(struct nmk_gpio_chip *nmk_chip, int offset)
}
void nmk_gpio_dbg_show_one(struct seq_file *s, struct pinctrl_dev *pctldev,
- struct gpio_chip *chip, unsigned int offset,
- unsigned int gpio)
+ struct gpio_chip *chip, unsigned int offset)
{
struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(chip);
+#ifdef CONFIG_PINCTRL_NOMADIK
+ struct gpio_desc *desc;
+#endif
int mode;
bool is_out;
bool data_out;
@@ -425,15 +428,15 @@ void nmk_gpio_dbg_show_one(struct seq_file *s, struct pinctrl_dev *pctldev,
data_out = !!(readl(nmk_chip->addr + NMK_GPIO_DAT) & BIT(offset));
mode = nmk_gpio_get_mode(nmk_chip, offset);
#ifdef CONFIG_PINCTRL_NOMADIK
- if (mode == NMK_GPIO_ALT_C && pctldev)
- mode = nmk_prcm_gpiocr_get_mode(pctldev, gpio);
+ if (mode == NMK_GPIO_ALT_C && pctldev) {
+ desc = gpio_device_get_desc(chip->gpiodev, offset);
+ mode = nmk_prcm_gpiocr_get_mode(pctldev, desc_to_gpio(desc));
+ }
#endif
if (is_out) {
seq_printf(s, " gpio-%-3d (%-20.20s) out %s %s",
- gpio,
- label ?: "(none)",
- str_hi_lo(data_out),
+ offset, label ?: "(none)", str_hi_lo(data_out),
(mode < 0) ? "unknown" : modes[mode]);
} else {
int irq = chip->to_irq(chip, offset);
@@ -445,9 +448,7 @@ void nmk_gpio_dbg_show_one(struct seq_file *s, struct pinctrl_dev *pctldev,
};
seq_printf(s, " gpio-%-3d (%-20.20s) in %s %s",
- gpio,
- label ?: "(none)",
- pulls[pullidx],
+ offset, label ?: "(none)", pulls[pullidx],
(mode < 0) ? "unknown" : modes[mode]);
val = nmk_gpio_get_input(chip, offset);
@@ -479,10 +480,10 @@ void nmk_gpio_dbg_show_one(struct seq_file *s, struct pinctrl_dev *pctldev,
static void nmk_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
{
- unsigned int i, gpio = chip->base;
+ unsigned int i;
- for (i = 0; i < chip->ngpio; i++, gpio++) {
- nmk_gpio_dbg_show_one(s, NULL, chip, i, gpio);
+ for (i = 0; i < chip->ngpio; i++) {
+ nmk_gpio_dbg_show_one(s, NULL, chip, i);
seq_puts(s, "\n");
}
}
@@ -674,7 +675,7 @@ static int nmk_gpio_probe(struct platform_device *pdev)
chip->direction_input = nmk_gpio_make_input;
chip->get = nmk_gpio_get_input;
chip->direction_output = nmk_gpio_make_output;
- chip->set_rv = nmk_gpio_set_output;
+ chip->set = nmk_gpio_set_output;
chip->dbg_show = nmk_gpio_dbg_show;
chip->can_sleep = false;
chip->owner = THIS_MODULE;
diff --git a/drivers/gpio/gpio-npcm-sgpio.c b/drivers/gpio/gpio-npcm-sgpio.c
index 25b203a89e38..83c77a2c0623 100644
--- a/drivers/gpio/gpio-npcm-sgpio.c
+++ b/drivers/gpio/gpio-npcm-sgpio.c
@@ -211,7 +211,7 @@ static int npcm_sgpio_dir_in(struct gpio_chip *gc, unsigned int offset)
static int npcm_sgpio_dir_out(struct gpio_chip *gc, unsigned int offset, int val)
{
- return gc->set_rv(gc, offset, val);
+ return gc->set(gc, offset, val);
}
static int npcm_sgpio_get_direction(struct gpio_chip *gc, unsigned int offset)
@@ -546,7 +546,7 @@ static int npcm_sgpio_probe(struct platform_device *pdev)
gpio->chip.direction_output = npcm_sgpio_dir_out;
gpio->chip.get_direction = npcm_sgpio_get_direction;
gpio->chip.get = npcm_sgpio_get;
- gpio->chip.set_rv = npcm_sgpio_set;
+ gpio->chip.set = npcm_sgpio_set;
gpio->chip.label = dev_name(&pdev->dev);
gpio->chip.base = -1;
diff --git a/drivers/gpio/gpio-octeon.c b/drivers/gpio/gpio-octeon.c
index 24966161742a..777e20c608dc 100644
--- a/drivers/gpio/gpio-octeon.c
+++ b/drivers/gpio/gpio-octeon.c
@@ -108,7 +108,7 @@ static int octeon_gpio_probe(struct platform_device *pdev)
chip->direction_input = octeon_gpio_dir_in;
chip->get = octeon_gpio_get;
chip->direction_output = octeon_gpio_dir_out;
- chip->set_rv = octeon_gpio_set;
+ chip->set = octeon_gpio_set;
err = devm_gpiochip_add_data(&pdev->dev, chip, gpio);
if (err)
return err;
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index ed5c88a5c520..a268c76bdca6 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -1046,8 +1046,8 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct device *pm_dev)
bank->chip.get_multiple = omap_gpio_get_multiple;
bank->chip.direction_output = omap_gpio_output;
bank->chip.set_config = omap_gpio_set_config;
- bank->chip.set_rv = omap_gpio_set;
- bank->chip.set_multiple_rv = omap_gpio_set_multiple;
+ bank->chip.set = omap_gpio_set;
+ bank->chip.set_multiple = omap_gpio_set_multiple;
if (bank->is_mpuio) {
bank->chip.label = "mpuio";
if (bank->regs->wkup_en)
diff --git a/drivers/gpio/gpio-palmas.c b/drivers/gpio/gpio-palmas.c
index 9329d8ce8f59..e377f6dd4ccf 100644
--- a/drivers/gpio/gpio-palmas.c
+++ b/drivers/gpio/gpio-palmas.c
@@ -166,7 +166,7 @@ static int palmas_gpio_probe(struct platform_device *pdev)
palmas_gpio->gpio_chip.direction_input = palmas_gpio_input;
palmas_gpio->gpio_chip.direction_output = palmas_gpio_output;
palmas_gpio->gpio_chip.to_irq = palmas_gpio_to_irq;
- palmas_gpio->gpio_chip.set_rv = palmas_gpio_set;
+ palmas_gpio->gpio_chip.set = palmas_gpio_set;
palmas_gpio->gpio_chip.get = palmas_gpio_get;
palmas_gpio->gpio_chip.parent = &pdev->dev;
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 69906a9af7e6..b46927f55038 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -789,10 +789,10 @@ static void pca953x_setup_gpio(struct pca953x_chip *chip, int gpios)
gc->direction_input = pca953x_gpio_direction_input;
gc->direction_output = pca953x_gpio_direction_output;
gc->get = pca953x_gpio_get_value;
- gc->set_rv = pca953x_gpio_set_value;
+ gc->set = pca953x_gpio_set_value;
gc->get_direction = pca953x_gpio_get_direction;
gc->get_multiple = pca953x_gpio_get_multiple;
- gc->set_multiple_rv = pca953x_gpio_set_multiple;
+ gc->set_multiple = pca953x_gpio_set_multiple;
gc->set_config = pca953x_gpio_set_config;
gc->can_sleep = true;
diff --git a/drivers/gpio/gpio-pca9570.c b/drivers/gpio/gpio-pca9570.c
index a33246f20fd8..c5a1287079a0 100644
--- a/drivers/gpio/gpio-pca9570.c
+++ b/drivers/gpio/gpio-pca9570.c
@@ -126,7 +126,7 @@ static int pca9570_probe(struct i2c_client *client)
gpio->chip.owner = THIS_MODULE;
gpio->chip.get_direction = pca9570_get_direction;
gpio->chip.get = pca9570_get;
- gpio->chip.set_rv = pca9570_set;
+ gpio->chip.set = pca9570_set;
gpio->chip.base = -1;
gpio->chip_data = device_get_match_data(&client->dev);
gpio->chip.ngpio = gpio->chip_data->ngpio;
diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c
index a04203680333..3b9de8c3d924 100644
--- a/drivers/gpio/gpio-pcf857x.c
+++ b/drivers/gpio/gpio-pcf857x.c
@@ -295,8 +295,8 @@ static int pcf857x_probe(struct i2c_client *client)
gpio->chip.owner = THIS_MODULE;
gpio->chip.get = pcf857x_get;
gpio->chip.get_multiple = pcf857x_get_multiple;
- gpio->chip.set_rv = pcf857x_set;
- gpio->chip.set_multiple_rv = pcf857x_set_multiple;
+ gpio->chip.set = pcf857x_set;
+ gpio->chip.set_multiple = pcf857x_set_multiple;
gpio->chip.direction_input = pcf857x_input;
gpio->chip.direction_output = pcf857x_output;
gpio->chip.ngpio = (uintptr_t)i2c_get_match_data(client);
diff --git a/drivers/gpio/gpio-pch.c b/drivers/gpio/gpio-pch.c
index c6f313342ba0..9925687e05fb 100644
--- a/drivers/gpio/gpio-pch.c
+++ b/drivers/gpio/gpio-pch.c
@@ -219,7 +219,7 @@ static void pch_gpio_setup(struct pch_gpio *chip)
gpio->direction_input = pch_gpio_direction_input;
gpio->get = pch_gpio_get;
gpio->direction_output = pch_gpio_direction_output;
- gpio->set_rv = pch_gpio_set;
+ gpio->set = pch_gpio_set;
gpio->base = -1;
gpio->ngpio = gpio_pins[chip->ioh];
gpio->can_sleep = false;
diff --git a/drivers/gpio/gpio-pisosr.c b/drivers/gpio/gpio-pisosr.c
index a69b74866a13..7ec6a46ed600 100644
--- a/drivers/gpio/gpio-pisosr.c
+++ b/drivers/gpio/gpio-pisosr.c
@@ -108,11 +108,6 @@ static const struct gpio_chip template_chip = {
.can_sleep = true,
};
-static void pisosr_mutex_destroy(void *lock)
-{
- mutex_destroy(lock);
-}
-
static int pisosr_gpio_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
@@ -139,8 +134,7 @@ static int pisosr_gpio_probe(struct spi_device *spi)
return dev_err_probe(dev, PTR_ERR(gpio->load_gpio),
"Unable to allocate load GPIO\n");
- mutex_init(&gpio->lock);
- ret = devm_add_action_or_reset(dev, pisosr_mutex_destroy, &gpio->lock);
+ ret = devm_mutex_init(dev, &gpio->lock);
if (ret)
return ret;
diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c
index 98cfac4eac85..02e4ffcf5a6f 100644
--- a/drivers/gpio/gpio-pl061.c
+++ b/drivers/gpio/gpio-pl061.c
@@ -330,7 +330,7 @@ static int pl061_probe(struct amba_device *adev, const struct amba_id *id)
pl061->gc.direction_input = pl061_direction_input;
pl061->gc.direction_output = pl061_direction_output;
pl061->gc.get = pl061_get_value;
- pl061->gc.set_rv = pl061_set_value;
+ pl061->gc.set = pl061_set_value;
pl061->gc.ngpio = PL061_GPIO_NR;
pl061->gc.label = dev_name(dev);
pl061->gc.parent = dev;
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index 13f7da2a9486..fa22f3faa163 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -355,7 +355,7 @@ static int pxa_init_gpio_chip(struct pxa_gpio_chip *pchip, int ngpio, void __iom
pchip->chip.direction_input = pxa_gpio_direction_input;
pchip->chip.direction_output = pxa_gpio_direction_output;
pchip->chip.get = pxa_gpio_get;
- pchip->chip.set_rv = pxa_gpio_set;
+ pchip->chip.set = pxa_gpio_set;
pchip->chip.to_irq = pxa_gpio_to_irq;
pchip->chip.ngpio = ngpio;
pchip->chip.request = gpiochip_generic_request;
@@ -499,8 +499,6 @@ static void pxa_mask_muxed_gpio(struct irq_data *d)
gfer = readl_relaxed(base + GFER_OFFSET) & ~GPIO_bit(gpio);
writel_relaxed(grer, base + GRER_OFFSET);
writel_relaxed(gfer, base + GFER_OFFSET);
-
- gpiochip_disable_irq(&pchip->chip, gpio);
}
static int pxa_gpio_set_wake(struct irq_data *d, unsigned int on)
@@ -520,21 +518,17 @@ static void pxa_unmask_muxed_gpio(struct irq_data *d)
unsigned int gpio = irqd_to_hwirq(d);
struct pxa_gpio_bank *c = gpio_to_pxabank(&pchip->chip, gpio);
- gpiochip_enable_irq(&pchip->chip, gpio);
-
c->irq_mask |= GPIO_bit(gpio);
update_edge_detect(c);
}
-static const struct irq_chip pxa_muxed_gpio_chip = {
+static struct irq_chip pxa_muxed_gpio_chip = {
.name = "GPIO",
.irq_ack = pxa_ack_muxed_gpio,
.irq_mask = pxa_mask_muxed_gpio,
.irq_unmask = pxa_unmask_muxed_gpio,
.irq_set_type = pxa_gpio_irq_type,
.irq_set_wake = pxa_gpio_set_wake,
- .flags = IRQCHIP_IMMUTABLE,
- GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static int pxa_gpio_nums(struct platform_device *pdev)
diff --git a/drivers/gpio/gpio-raspberrypi-exp.c b/drivers/gpio/gpio-raspberrypi-exp.c
index b4b607515a04..40413e06b69c 100644
--- a/drivers/gpio/gpio-raspberrypi-exp.c
+++ b/drivers/gpio/gpio-raspberrypi-exp.c
@@ -232,7 +232,7 @@ static int rpi_exp_gpio_probe(struct platform_device *pdev)
rpi_gpio->gc.direction_output = rpi_exp_gpio_dir_out;
rpi_gpio->gc.get_direction = rpi_exp_gpio_get_direction;
rpi_gpio->gc.get = rpi_exp_gpio_get;
- rpi_gpio->gc.set_rv = rpi_exp_gpio_set;
+ rpi_gpio->gc.set = rpi_exp_gpio_set;
rpi_gpio->gc.can_sleep = true;
return devm_gpiochip_add_data(dev, &rpi_gpio->gc, rpi_gpio);
diff --git a/drivers/gpio/gpio-rc5t583.c b/drivers/gpio/gpio-rc5t583.c
index cf3e91d235df..5a69e4534591 100644
--- a/drivers/gpio/gpio-rc5t583.c
+++ b/drivers/gpio/gpio-rc5t583.c
@@ -118,7 +118,7 @@ static int rc5t583_gpio_probe(struct platform_device *pdev)
rc5t583_gpio->gpio_chip.free = rc5t583_gpio_free,
rc5t583_gpio->gpio_chip.direction_input = rc5t583_gpio_dir_input,
rc5t583_gpio->gpio_chip.direction_output = rc5t583_gpio_dir_output,
- rc5t583_gpio->gpio_chip.set_rv = rc5t583_gpio_set,
+ rc5t583_gpio->gpio_chip.set = rc5t583_gpio_set,
rc5t583_gpio->gpio_chip.get = rc5t583_gpio_get,
rc5t583_gpio->gpio_chip.to_irq = rc5t583_gpio_to_irq,
rc5t583_gpio->gpio_chip.ngpio = RC5T583_MAX_GPIO,
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index cd31580effa9..86777e097fd8 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -535,8 +535,8 @@ static int gpio_rcar_probe(struct platform_device *pdev)
gpio_chip->get = gpio_rcar_get;
gpio_chip->get_multiple = gpio_rcar_get_multiple;
gpio_chip->direction_output = gpio_rcar_direction_output;
- gpio_chip->set_rv = gpio_rcar_set;
- gpio_chip->set_multiple_rv = gpio_rcar_set_multiple;
+ gpio_chip->set = gpio_rcar_set;
+ gpio_chip->set_multiple = gpio_rcar_set_multiple;
gpio_chip->label = name;
gpio_chip->parent = dev;
gpio_chip->owner = THIS_MODULE;
diff --git a/drivers/gpio/gpio-rda.c b/drivers/gpio/gpio-rda.c
index cb2f63eee2aa..7bbc6f0ce4c8 100644
--- a/drivers/gpio/gpio-rda.c
+++ b/drivers/gpio/gpio-rda.c
@@ -8,6 +8,7 @@
#include <linux/bitops.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -35,7 +36,7 @@
#define RDA_GPIO_BANK_NR 32
struct rda_gpio {
- struct gpio_chip chip;
+ struct gpio_generic_chip chip;
void __iomem *base;
spinlock_t lock;
int irq;
@@ -208,6 +209,7 @@ static const struct irq_chip rda_gpio_irq_chip = {
static int rda_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct device *dev = &pdev->dev;
struct gpio_irq_chip *girq;
struct rda_gpio *rda_gpio;
@@ -235,24 +237,29 @@ static int rda_gpio_probe(struct platform_device *pdev)
spin_lock_init(&rda_gpio->lock);
- ret = bgpio_init(&rda_gpio->chip, dev, 4,
- rda_gpio->base + RDA_GPIO_VAL,
- rda_gpio->base + RDA_GPIO_SET,
- rda_gpio->base + RDA_GPIO_CLR,
- rda_gpio->base + RDA_GPIO_OEN_SET_OUT,
- rda_gpio->base + RDA_GPIO_OEN_SET_IN,
- BGPIOF_READ_OUTPUT_REG_SET);
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = rda_gpio->base + RDA_GPIO_VAL,
+ .set = rda_gpio->base + RDA_GPIO_SET,
+ .clr = rda_gpio->base + RDA_GPIO_CLR,
+ .dirout = rda_gpio->base + RDA_GPIO_OEN_SET_OUT,
+ .dirin = rda_gpio->base + RDA_GPIO_OEN_SET_IN,
+ .flags = GPIO_GENERIC_READ_OUTPUT_REG_SET,
+ };
+
+ ret = gpio_generic_chip_init(&rda_gpio->chip, &config);
if (ret) {
- dev_err(dev, "bgpio_init failed\n");
+ dev_err(dev, "failed to initialize the generic GPIO chip\n");
return ret;
}
- rda_gpio->chip.label = dev_name(dev);
- rda_gpio->chip.ngpio = ngpios;
- rda_gpio->chip.base = -1;
+ rda_gpio->chip.gc.label = dev_name(dev);
+ rda_gpio->chip.gc.ngpio = ngpios;
+ rda_gpio->chip.gc.base = -1;
if (rda_gpio->irq >= 0) {
- girq = &rda_gpio->chip.irq;
+ girq = &rda_gpio->chip.gc.irq;
gpio_irq_chip_set_chip(girq, &rda_gpio_irq_chip);
girq->handler = handle_bad_irq;
girq->default_type = IRQ_TYPE_NONE;
@@ -269,7 +276,7 @@ static int rda_gpio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, rda_gpio);
- return devm_gpiochip_add_data(dev, &rda_gpio->chip, rda_gpio);
+ return devm_gpiochip_add_data(dev, &rda_gpio->chip.gc, rda_gpio);
}
static const struct of_device_id rda_gpio_of_match[] = {
diff --git a/drivers/gpio/gpio-rdc321x.c b/drivers/gpio/gpio-rdc321x.c
index a75ed8021de5..ba62b81aa8ae 100644
--- a/drivers/gpio/gpio-rdc321x.c
+++ b/drivers/gpio/gpio-rdc321x.c
@@ -159,7 +159,7 @@ static int rdc321x_gpio_probe(struct platform_device *pdev)
rdc321x_gpio_dev->chip.direction_input = rdc_gpio_direction_input;
rdc321x_gpio_dev->chip.direction_output = rdc_gpio_config;
rdc321x_gpio_dev->chip.get = rdc_gpio_get_value;
- rdc321x_gpio_dev->chip.set_rv = rdc_gpio_set_value;
+ rdc321x_gpio_dev->chip.set = rdc_gpio_set_value;
rdc321x_gpio_dev->chip.base = 0;
rdc321x_gpio_dev->chip.ngpio = pdata->max_gpios;
diff --git a/drivers/gpio/gpio-realtek-otto.c b/drivers/gpio/gpio-realtek-otto.c
index d6418f89d3f6..de527f4fc6c2 100644
--- a/drivers/gpio/gpio-realtek-otto.c
+++ b/drivers/gpio/gpio-realtek-otto.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
-#include <linux/gpio/driver.h>
#include <linux/cpumask.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/irq.h>
#include <linux/minmax.h>
#include <linux/mod_devicetable.h>
@@ -41,7 +42,7 @@
/**
* realtek_gpio_ctrl - Realtek Otto GPIO driver data
*
- * @gc: Associated gpio_chip instance
+ * @chip: Associated gpio_generic_chip instance
* @base: Base address of the register block for a GPIO bank
* @lock: Lock for accessing the IRQ registers and values
* @intr_mask: Mask for interrupts lines
@@ -64,7 +65,7 @@
* IMR on changes.
*/
struct realtek_gpio_ctrl {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
void __iomem *base;
void __iomem *cpumask_base;
struct cpumask cpu_irq_maskable;
@@ -101,7 +102,7 @@ static struct realtek_gpio_ctrl *irq_data_to_ctrl(struct irq_data *data)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
- return container_of(gc, struct realtek_gpio_ctrl, gc);
+ return container_of(to_gpio_generic_chip(gc), struct realtek_gpio_ctrl, chip);
}
/*
@@ -194,7 +195,7 @@ static void realtek_gpio_irq_unmask(struct irq_data *data)
unsigned int line = irqd_to_hwirq(data);
unsigned long flags;
- gpiochip_enable_irq(&ctrl->gc, line);
+ gpiochip_enable_irq(&ctrl->chip.gc, line);
raw_spin_lock_irqsave(&ctrl->lock, flags);
ctrl->intr_mask[line] = REALTEK_GPIO_IMR_LINE_MASK;
@@ -213,7 +214,7 @@ static void realtek_gpio_irq_mask(struct irq_data *data)
realtek_gpio_update_line_imr(ctrl, line);
raw_spin_unlock_irqrestore(&ctrl->lock, flags);
- gpiochip_disable_irq(&ctrl->gc, line);
+ gpiochip_disable_irq(&ctrl->chip.gc, line);
}
static int realtek_gpio_irq_set_type(struct irq_data *data, unsigned int flow_type)
@@ -356,8 +357,9 @@ MODULE_DEVICE_TABLE(of, realtek_gpio_of_match);
static int realtek_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct device *dev = &pdev->dev;
- unsigned long bgpio_flags;
+ unsigned long gen_gc_flags;
unsigned int dev_flags;
struct gpio_irq_chip *girq;
struct realtek_gpio_ctrl *ctrl;
@@ -388,32 +390,37 @@ static int realtek_gpio_probe(struct platform_device *pdev)
raw_spin_lock_init(&ctrl->lock);
if (dev_flags & GPIO_PORTS_REVERSED) {
- bgpio_flags = 0;
+ gen_gc_flags = 0;
ctrl->bank_read = realtek_gpio_bank_read;
ctrl->bank_write = realtek_gpio_bank_write;
ctrl->line_imr_pos = realtek_gpio_line_imr_pos;
} else {
- bgpio_flags = BGPIOF_BIG_ENDIAN_BYTE_ORDER;
+ gen_gc_flags = GPIO_GENERIC_BIG_ENDIAN_BYTE_ORDER;
ctrl->bank_read = realtek_gpio_bank_read_swapped;
ctrl->bank_write = realtek_gpio_bank_write_swapped;
ctrl->line_imr_pos = realtek_gpio_line_imr_pos_swapped;
}
- err = bgpio_init(&ctrl->gc, dev, 4,
- ctrl->base + REALTEK_GPIO_REG_DATA, NULL, NULL,
- ctrl->base + REALTEK_GPIO_REG_DIR, NULL,
- bgpio_flags);
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = ctrl->base + REALTEK_GPIO_REG_DATA,
+ .dirout = ctrl->base + REALTEK_GPIO_REG_DIR,
+ .flags = gen_gc_flags,
+ };
+
+ err = gpio_generic_chip_init(&ctrl->chip, &config);
if (err) {
dev_err(dev, "unable to init generic GPIO");
return err;
}
- ctrl->gc.ngpio = ngpios;
- ctrl->gc.owner = THIS_MODULE;
+ ctrl->chip.gc.ngpio = ngpios;
+ ctrl->chip.gc.owner = THIS_MODULE;
irq = platform_get_irq_optional(pdev, 0);
if (!(dev_flags & GPIO_INTERRUPTS_DISABLED) && irq > 0) {
- girq = &ctrl->gc.irq;
+ girq = &ctrl->chip.gc.irq;
gpio_irq_chip_set_chip(girq, &realtek_gpio_irq_chip);
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_bad_irq;
@@ -442,7 +449,7 @@ static int realtek_gpio_probe(struct platform_device *pdev)
cpumask_set_cpu(cpu, &ctrl->cpu_irq_maskable);
}
- return devm_gpiochip_add_data(dev, &ctrl->gc, ctrl);
+ return devm_gpiochip_add_data(dev, &ctrl->chip.gc, ctrl);
}
static struct platform_driver realtek_gpio_driver = {
diff --git a/drivers/gpio/gpio-reg.c b/drivers/gpio/gpio-reg.c
index d8da99f97385..f2238196faf1 100644
--- a/drivers/gpio/gpio-reg.c
+++ b/drivers/gpio/gpio-reg.c
@@ -46,7 +46,7 @@ static int gpio_reg_direction_output(struct gpio_chip *gc, unsigned offset,
if (r->direction & BIT(offset))
return -ENOTSUPP;
- gc->set_rv(gc, offset, value);
+ gc->set(gc, offset, value);
return 0;
}
@@ -161,9 +161,9 @@ struct gpio_chip *gpio_reg_init(struct device *dev, void __iomem *reg,
r->gc.get_direction = gpio_reg_get_direction;
r->gc.direction_input = gpio_reg_direction_input;
r->gc.direction_output = gpio_reg_direction_output;
- r->gc.set_rv = gpio_reg_set;
+ r->gc.set = gpio_reg_set;
r->gc.get = gpio_reg_get;
- r->gc.set_multiple_rv = gpio_reg_set_multiple;
+ r->gc.set_multiple = gpio_reg_set_multiple;
if (irqs)
r->gc.to_irq = gpio_reg_to_irq;
r->gc.base = base;
diff --git a/drivers/gpio/gpio-regmap.c b/drivers/gpio/gpio-regmap.c
index 87c4225784cf..ab9e4077fa60 100644
--- a/drivers/gpio/gpio-regmap.c
+++ b/drivers/gpio/gpio-regmap.c
@@ -32,6 +32,11 @@ struct gpio_regmap {
unsigned int reg_dir_in_base;
unsigned int reg_dir_out_base;
+#ifdef CONFIG_REGMAP_IRQ
+ int regmap_irq_line;
+ struct regmap_irq_chip_data *irq_chip_data;
+#endif
+
int (*reg_mask_xlate)(struct gpio_regmap *gpio, unsigned int base,
unsigned int offset, unsigned int *reg,
unsigned int *mask);
@@ -215,6 +220,7 @@ EXPORT_SYMBOL_GPL(gpio_regmap_get_drvdata);
*/
struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config)
{
+ struct irq_domain *irq_domain;
struct gpio_regmap *gpio;
struct gpio_chip *chip;
int ret;
@@ -255,14 +261,15 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config
chip->names = config->names;
chip->label = config->label ?: dev_name(config->parent);
chip->can_sleep = regmap_might_sleep(config->regmap);
+ chip->init_valid_mask = config->init_valid_mask;
chip->request = gpiochip_generic_request;
chip->free = gpiochip_generic_free;
chip->get = gpio_regmap_get;
if (gpio->reg_set_base && gpio->reg_clr_base)
- chip->set_rv = gpio_regmap_set_with_clear;
+ chip->set = gpio_regmap_set_with_clear;
else if (gpio->reg_set_base)
- chip->set_rv = gpio_regmap_set;
+ chip->set = gpio_regmap_set;
chip->get_direction = gpio_regmap_get_direction;
if (gpio->reg_dir_in_base || gpio->reg_dir_out_base) {
@@ -274,7 +281,7 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config
if (!chip->ngpio) {
ret = gpiochip_get_ngpios(chip, chip->parent);
if (ret)
- return ERR_PTR(ret);
+ goto err_free_gpio;
}
/* if not set, assume there is only one register */
@@ -295,8 +302,22 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config
if (ret < 0)
goto err_free_gpio;
- if (config->irq_domain) {
- ret = gpiochip_irqchip_add_domain(chip, config->irq_domain);
+#ifdef CONFIG_REGMAP_IRQ
+ if (config->regmap_irq_chip) {
+ gpio->regmap_irq_line = config->regmap_irq_line;
+ ret = regmap_add_irq_chip_fwnode(dev_fwnode(config->parent), config->regmap,
+ config->regmap_irq_line, config->regmap_irq_flags,
+ 0, config->regmap_irq_chip, &gpio->irq_chip_data);
+ if (ret)
+ goto err_free_gpio;
+
+ irq_domain = regmap_irq_get_domain(gpio->irq_chip_data);
+ } else
+#endif
+ irq_domain = config->irq_domain;
+
+ if (irq_domain) {
+ ret = gpiochip_irqchip_add_domain(chip, irq_domain);
if (ret)
goto err_remove_gpiochip;
}
@@ -317,6 +338,11 @@ EXPORT_SYMBOL_GPL(gpio_regmap_register);
*/
void gpio_regmap_unregister(struct gpio_regmap *gpio)
{
+#ifdef CONFIG_REGMAP_IRQ
+ if (gpio->irq_chip_data)
+ regmap_del_irq_chip(gpio->regmap_irq_line, gpio->irq_chip_data);
+#endif
+
gpiochip_remove(&gpio->gpio_chip);
kfree(gpio);
}
diff --git a/drivers/gpio/gpio-rockchip.c b/drivers/gpio/gpio-rockchip.c
index ecd60ff9e1dd..47174eb3ba76 100644
--- a/drivers/gpio/gpio-rockchip.c
+++ b/drivers/gpio/gpio-rockchip.c
@@ -327,7 +327,7 @@ static int rockchip_gpio_to_irq(struct gpio_chip *gc, unsigned int offset)
static const struct gpio_chip rockchip_gpiolib_chip = {
.request = gpiochip_generic_request,
.free = gpiochip_generic_free,
- .set_rv = rockchip_gpio_set,
+ .set = rockchip_gpio_set,
.get = rockchip_gpio_get,
.get_direction = rockchip_gpio_get_direction,
.direction_input = rockchip_gpio_direction_input,
@@ -769,7 +769,7 @@ static int rockchip_gpio_probe(struct platform_device *pdev)
list_del(&cfg->head);
switch (cfg->param) {
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
ret = rockchip_gpio_direction_output(&bank->gpio_chip, cfg->pin, cfg->arg);
if (ret)
dev_warn(dev, "setting output pin %u to %u failed\n", cfg->pin,
diff --git a/drivers/gpio/gpio-rtd.c b/drivers/gpio/gpio-rtd.c
index 25bbd749b019..d46b40dd5283 100644
--- a/drivers/gpio/gpio-rtd.c
+++ b/drivers/gpio/gpio-rtd.c
@@ -565,7 +565,7 @@ static int rtd_gpio_probe(struct platform_device *pdev)
data->gpio_chip.get_direction = rtd_gpio_get_direction;
data->gpio_chip.direction_input = rtd_gpio_direction_input;
data->gpio_chip.direction_output = rtd_gpio_direction_output;
- data->gpio_chip.set_rv = rtd_gpio_set;
+ data->gpio_chip.set = rtd_gpio_set;
data->gpio_chip.get = rtd_gpio_get;
data->gpio_chip.set_config = rtd_gpio_set_config;
data->gpio_chip.parent = dev;
diff --git a/drivers/gpio/gpio-sa1100.c b/drivers/gpio/gpio-sa1100.c
index e9d054d78ccb..7f6a62f5d1ee 100644
--- a/drivers/gpio/gpio-sa1100.c
+++ b/drivers/gpio/gpio-sa1100.c
@@ -99,7 +99,7 @@ static struct sa1100_gpio_chip sa1100_gpio_chip = {
.get_direction = sa1100_get_direction,
.direction_input = sa1100_direction_input,
.direction_output = sa1100_direction_output,
- .set_rv = sa1100_gpio_set,
+ .set = sa1100_gpio_set,
.get = sa1100_gpio_get,
.to_irq = sa1100_to_irq,
.base = 0,
diff --git a/drivers/gpio/gpio-sama5d2-piobu.c b/drivers/gpio/gpio-sama5d2-piobu.c
index c31244cf5e89..5005688f6e67 100644
--- a/drivers/gpio/gpio-sama5d2-piobu.c
+++ b/drivers/gpio/gpio-sama5d2-piobu.c
@@ -196,7 +196,7 @@ static int sama5d2_piobu_probe(struct platform_device *pdev)
piobu->chip.direction_input = sama5d2_piobu_direction_input;
piobu->chip.direction_output = sama5d2_piobu_direction_output;
piobu->chip.get = sama5d2_piobu_get;
- piobu->chip.set_rv = sama5d2_piobu_set;
+ piobu->chip.set = sama5d2_piobu_set;
piobu->chip.base = -1;
piobu->chip.ngpio = PIOBU_NUM;
piobu->chip.can_sleep = 0;
diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c
index 833ffdd98d74..966d16a6d515 100644
--- a/drivers/gpio/gpio-sch.c
+++ b/drivers/gpio/gpio-sch.c
@@ -167,7 +167,7 @@ static const struct gpio_chip sch_gpio_chip = {
.direction_input = sch_gpio_direction_in,
.get = sch_gpio_get,
.direction_output = sch_gpio_direction_out,
- .set_rv = sch_gpio_set,
+ .set = sch_gpio_set,
.get_direction = sch_gpio_get_direction,
};
diff --git a/drivers/gpio/gpio-sch311x.c b/drivers/gpio/gpio-sch311x.c
index 44fb5fc21fb8..f95566998d30 100644
--- a/drivers/gpio/gpio-sch311x.c
+++ b/drivers/gpio/gpio-sch311x.c
@@ -297,7 +297,7 @@ static int sch311x_gpio_probe(struct platform_device *pdev)
block->chip.get_direction = sch311x_gpio_get_direction;
block->chip.set_config = sch311x_gpio_set_config;
block->chip.get = sch311x_gpio_get;
- block->chip.set_rv = sch311x_gpio_set;
+ block->chip.set = sch311x_gpio_set;
block->chip.ngpio = 8;
block->chip.parent = &pdev->dev;
block->chip.base = sch311x_gpio_blocks[i].base;
diff --git a/drivers/gpio/gpio-sifive.c b/drivers/gpio/gpio-sifive.c
index 067c8edb62e2..94ef2efbd14f 100644
--- a/drivers/gpio/gpio-sifive.c
+++ b/drivers/gpio/gpio-sifive.c
@@ -7,6 +7,7 @@
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/property.h>
@@ -32,7 +33,7 @@
struct sifive_gpio {
void __iomem *base;
- struct gpio_chip gc;
+ struct gpio_generic_chip gen_gc;
struct regmap *regs;
unsigned long irq_state;
unsigned int trigger[SIFIVE_GPIO_MAX];
@@ -41,10 +42,10 @@ struct sifive_gpio {
static void sifive_gpio_set_ie(struct sifive_gpio *chip, unsigned int offset)
{
- unsigned long flags;
unsigned int trigger;
- raw_spin_lock_irqsave(&chip->gc.bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(&chip->gen_gc);
+
trigger = (chip->irq_state & BIT(offset)) ? chip->trigger[offset] : 0;
regmap_update_bits(chip->regs, SIFIVE_GPIO_RISE_IE, BIT(offset),
(trigger & IRQ_TYPE_EDGE_RISING) ? BIT(offset) : 0);
@@ -54,7 +55,6 @@ static void sifive_gpio_set_ie(struct sifive_gpio *chip, unsigned int offset)
(trigger & IRQ_TYPE_LEVEL_HIGH) ? BIT(offset) : 0);
regmap_update_bits(chip->regs, SIFIVE_GPIO_LOW_IE, BIT(offset),
(trigger & IRQ_TYPE_LEVEL_LOW) ? BIT(offset) : 0);
- raw_spin_unlock_irqrestore(&chip->gc.bgpio_lock, flags);
}
static int sifive_gpio_irq_set_type(struct irq_data *d, unsigned int trigger)
@@ -72,13 +72,12 @@ static int sifive_gpio_irq_set_type(struct irq_data *d, unsigned int trigger)
}
static void sifive_gpio_irq_enable(struct irq_data *d)
-{
+ {
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct sifive_gpio *chip = gpiochip_get_data(gc);
irq_hw_number_t hwirq = irqd_to_hwirq(d);
int offset = hwirq % SIFIVE_GPIO_MAX;
u32 bit = BIT(offset);
- unsigned long flags;
gpiochip_enable_irq(gc, hwirq);
irq_chip_enable_parent(d);
@@ -86,13 +85,13 @@ static void sifive_gpio_irq_enable(struct irq_data *d)
/* Switch to input */
gc->direction_input(gc, offset);
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
- /* Clear any sticky pending interrupts */
- regmap_write(chip->regs, SIFIVE_GPIO_RISE_IP, bit);
- regmap_write(chip->regs, SIFIVE_GPIO_FALL_IP, bit);
- regmap_write(chip->regs, SIFIVE_GPIO_HIGH_IP, bit);
- regmap_write(chip->regs, SIFIVE_GPIO_LOW_IP, bit);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+ scoped_guard(gpio_generic_lock_irqsave, &chip->gen_gc) {
+ /* Clear any sticky pending interrupts */
+ regmap_write(chip->regs, SIFIVE_GPIO_RISE_IP, bit);
+ regmap_write(chip->regs, SIFIVE_GPIO_FALL_IP, bit);
+ regmap_write(chip->regs, SIFIVE_GPIO_HIGH_IP, bit);
+ regmap_write(chip->regs, SIFIVE_GPIO_LOW_IP, bit);
+ }
/* Enable interrupts */
assign_bit(offset, &chip->irq_state, 1);
@@ -118,15 +117,14 @@ static void sifive_gpio_irq_eoi(struct irq_data *d)
struct sifive_gpio *chip = gpiochip_get_data(gc);
int offset = irqd_to_hwirq(d) % SIFIVE_GPIO_MAX;
u32 bit = BIT(offset);
- unsigned long flags;
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
- /* Clear all pending interrupts */
- regmap_write(chip->regs, SIFIVE_GPIO_RISE_IP, bit);
- regmap_write(chip->regs, SIFIVE_GPIO_FALL_IP, bit);
- regmap_write(chip->regs, SIFIVE_GPIO_HIGH_IP, bit);
- regmap_write(chip->regs, SIFIVE_GPIO_LOW_IP, bit);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+ scoped_guard(gpio_generic_lock_irqsave, &chip->gen_gc) {
+ /* Clear all pending interrupts */
+ regmap_write(chip->regs, SIFIVE_GPIO_RISE_IP, bit);
+ regmap_write(chip->regs, SIFIVE_GPIO_FALL_IP, bit);
+ regmap_write(chip->regs, SIFIVE_GPIO_HIGH_IP, bit);
+ regmap_write(chip->regs, SIFIVE_GPIO_LOW_IP, bit);
+ }
irq_chip_eoi_parent(d);
}
@@ -174,12 +172,12 @@ static const struct regmap_config sifive_gpio_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
- .fast_io = true,
.disable_locking = true,
};
static int sifive_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct device *dev = &pdev->dev;
struct irq_domain *parent;
struct gpio_irq_chip *girq;
@@ -218,13 +216,17 @@ static int sifive_gpio_probe(struct platform_device *pdev)
*/
parent = irq_get_irq_data(chip->irq_number[0])->domain;
- ret = bgpio_init(&chip->gc, dev, 4,
- chip->base + SIFIVE_GPIO_INPUT_VAL,
- chip->base + SIFIVE_GPIO_OUTPUT_VAL,
- NULL,
- chip->base + SIFIVE_GPIO_OUTPUT_EN,
- chip->base + SIFIVE_GPIO_INPUT_EN,
- BGPIOF_READ_OUTPUT_REG_SET);
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = chip->base + SIFIVE_GPIO_INPUT_VAL,
+ .set = chip->base + SIFIVE_GPIO_OUTPUT_VAL,
+ .dirout = chip->base + SIFIVE_GPIO_OUTPUT_EN,
+ .dirin = chip->base + SIFIVE_GPIO_INPUT_EN,
+ .flags = GPIO_GENERIC_READ_OUTPUT_REG_SET,
+ };
+
+ ret = gpio_generic_chip_init(&chip->gen_gc, &config);
if (ret) {
dev_err(dev, "unable to init generic GPIO\n");
return ret;
@@ -237,12 +239,12 @@ static int sifive_gpio_probe(struct platform_device *pdev)
regmap_write(chip->regs, SIFIVE_GPIO_LOW_IE, 0);
chip->irq_state = 0;
- chip->gc.base = -1;
- chip->gc.ngpio = ngpio;
- chip->gc.label = dev_name(dev);
- chip->gc.parent = dev;
- chip->gc.owner = THIS_MODULE;
- girq = &chip->gc.irq;
+ chip->gen_gc.gc.base = -1;
+ chip->gen_gc.gc.ngpio = ngpio;
+ chip->gen_gc.gc.label = dev_name(dev);
+ chip->gen_gc.gc.parent = dev;
+ chip->gen_gc.gc.owner = THIS_MODULE;
+ girq = &chip->gen_gc.gc.irq;
gpio_irq_chip_set_chip(girq, &sifive_gpio_irqchip);
girq->fwnode = dev_fwnode(dev);
girq->parent_domain = parent;
@@ -250,7 +252,7 @@ static int sifive_gpio_probe(struct platform_device *pdev)
girq->handler = handle_bad_irq;
girq->default_type = IRQ_TYPE_NONE;
- return gpiochip_add_data(&chip->gc, chip);
+ return gpiochip_add_data(&chip->gen_gc.gc, chip);
}
static const struct of_device_id sifive_gpio_match[] = {
diff --git a/drivers/gpio/gpio-sim.c b/drivers/gpio/gpio-sim.c
index 9503296422fd..a83f5238427c 100644
--- a/drivers/gpio/gpio-sim.c
+++ b/drivers/gpio/gpio-sim.c
@@ -262,8 +262,7 @@ static void gpio_sim_dbg_show(struct seq_file *seq, struct gpio_chip *gc)
guard(mutex)(&chip->lock);
for_each_hwgpio(gc, i, label)
- seq_printf(seq, " gpio-%-3d (%s) %s,%s\n",
- gc->base + i,
+ seq_printf(seq, " gpio-%-3d (%s) %s,%s\n", i,
label ?: "<unused>",
test_bit(i, chip->direction_map) ? "input" :
test_bit(i, chip->value_map) ? "output-high" :
@@ -486,9 +485,9 @@ static int gpio_sim_add_bank(struct fwnode_handle *swnode, struct device *dev)
gc->parent = dev;
gc->fwnode = swnode;
gc->get = gpio_sim_get;
- gc->set_rv = gpio_sim_set;
+ gc->set = gpio_sim_set;
gc->get_multiple = gpio_sim_get_multiple;
- gc->set_multiple_rv = gpio_sim_set_multiple;
+ gc->set_multiple = gpio_sim_set_multiple;
gc->direction_output = gpio_sim_direction_output;
gc->direction_input = gpio_sim_direction_input;
gc->get_direction = gpio_sim_get_direction;
diff --git a/drivers/gpio/gpio-siox.c b/drivers/gpio/gpio-siox.c
index 95355dda621b..958034b9f3f3 100644
--- a/drivers/gpio/gpio-siox.c
+++ b/drivers/gpio/gpio-siox.c
@@ -237,7 +237,7 @@ static int gpio_siox_probe(struct siox_device *sdevice)
gc->parent = dev;
gc->owner = THIS_MODULE;
gc->get = gpio_siox_get;
- gc->set_rv = gpio_siox_set;
+ gc->set = gpio_siox_set;
gc->direction_input = gpio_siox_direction_input;
gc->direction_output = gpio_siox_direction_output;
gc->get_direction = gpio_siox_get_direction;
diff --git a/drivers/gpio/gpio-sodaville.c b/drivers/gpio/gpio-sodaville.c
index abd13c79ace0..37c133837729 100644
--- a/drivers/gpio/gpio-sodaville.c
+++ b/drivers/gpio/gpio-sodaville.c
@@ -9,6 +9,7 @@
#include <linux/errno.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -39,7 +40,7 @@ struct sdv_gpio_chip_data {
void __iomem *gpio_pub_base;
struct irq_domain *id;
struct irq_chip_generic *gc;
- struct gpio_chip chip;
+ struct gpio_generic_chip gen_gc;
};
static int sdv_gpio_pub_set_type(struct irq_data *d, unsigned int type)
@@ -180,6 +181,7 @@ static int sdv_register_irqsupport(struct sdv_gpio_chip_data *sd,
static int sdv_gpio_probe(struct pci_dev *pdev,
const struct pci_device_id *pci_id)
{
+ struct gpio_generic_chip_config config;
struct sdv_gpio_chip_data *sd;
int ret;
u32 mux_val;
@@ -206,15 +208,21 @@ static int sdv_gpio_probe(struct pci_dev *pdev,
if (!ret)
writel(mux_val, sd->gpio_pub_base + GPMUXCTL);
- ret = bgpio_init(&sd->chip, &pdev->dev, 4,
- sd->gpio_pub_base + GPINR, sd->gpio_pub_base + GPOUTR,
- NULL, sd->gpio_pub_base + GPOER, NULL, 0);
+ config = (struct gpio_generic_chip_config) {
+ .dev = &pdev->dev,
+ .sz = 4,
+ .dat = sd->gpio_pub_base + GPINR,
+ .set = sd->gpio_pub_base + GPOUTR,
+ .dirout = sd->gpio_pub_base + GPOER,
+ };
+
+ ret = gpio_generic_chip_init(&sd->gen_gc, &config);
if (ret)
return ret;
- sd->chip.ngpio = SDV_NUM_PUB_GPIOS;
+ sd->gen_gc.gc.ngpio = SDV_NUM_PUB_GPIOS;
- ret = devm_gpiochip_add_data(&pdev->dev, &sd->chip, sd);
+ ret = devm_gpiochip_add_data(&pdev->dev, &sd->gen_gc.gc, sd);
if (ret < 0) {
dev_err(&pdev->dev, "gpiochip_add() failed.\n");
return ret;
diff --git a/drivers/gpio/gpio-spacemit-k1.c b/drivers/gpio/gpio-spacemit-k1.c
index 3cc75c701ec4..eb66a15c002f 100644
--- a/drivers/gpio/gpio-spacemit-k1.c
+++ b/drivers/gpio/gpio-spacemit-k1.c
@@ -6,6 +6,7 @@
#include <linux/clk.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -38,7 +39,7 @@
struct spacemit_gpio;
struct spacemit_gpio_bank {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
struct spacemit_gpio *sg;
void __iomem *base;
u32 irq_mask;
@@ -72,7 +73,7 @@ static irqreturn_t spacemit_gpio_irq_handler(int irq, void *dev_id)
return IRQ_NONE;
for_each_set_bit(n, &pending, BITS_PER_LONG)
- handle_nested_irq(irq_find_mapping(gb->gc.irq.domain, n));
+ handle_nested_irq(irq_find_mapping(gb->chip.gc.irq.domain, n));
return IRQ_HANDLED;
}
@@ -143,7 +144,7 @@ static void spacemit_gpio_irq_print_chip(struct irq_data *data, struct seq_file
{
struct spacemit_gpio_bank *gb = irq_data_get_irq_chip_data(data);
- seq_printf(p, "%s-%d", dev_name(gb->gc.parent), spacemit_gpio_bank_index(gb));
+ seq_printf(p, "%s-%d", dev_name(gb->chip.gc.parent), spacemit_gpio_bank_index(gb));
}
static struct irq_chip spacemit_gpio_chip = {
@@ -165,7 +166,7 @@ static bool spacemit_of_node_instance_match(struct gpio_chip *gc, unsigned int i
if (i >= SPACEMIT_NR_BANKS)
return false;
- return (gc == &sg->sgb[i].gc);
+ return (gc == &sg->sgb[i].chip.gc);
}
static int spacemit_gpio_add_bank(struct spacemit_gpio *sg,
@@ -173,7 +174,8 @@ static int spacemit_gpio_add_bank(struct spacemit_gpio *sg,
int index, int irq)
{
struct spacemit_gpio_bank *gb = &sg->sgb[index];
- struct gpio_chip *gc = &gb->gc;
+ struct gpio_generic_chip_config config;
+ struct gpio_chip *gc = &gb->chip.gc;
struct device *dev = sg->dev;
struct gpio_irq_chip *girq;
void __iomem *dat, *set, *clr, *dirin, *dirout;
@@ -187,9 +189,20 @@ static int spacemit_gpio_add_bank(struct spacemit_gpio *sg,
dirin = gb->base + SPACEMIT_GCDR;
dirout = gb->base + SPACEMIT_GSDR;
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = dat,
+ .set = set,
+ .clr = clr,
+ .dirout = dirout,
+ .dirin = dirin,
+ .flags = GPIO_GENERIC_UNREADABLE_REG_SET |
+ GPIO_GENERIC_UNREADABLE_REG_DIR,
+ };
+
/* This registers 32 GPIO lines per bank */
- ret = bgpio_init(gc, dev, 4, dat, set, clr, dirout, dirin,
- BGPIOF_UNREADABLE_REG_SET | BGPIOF_UNREADABLE_REG_DIR);
+ ret = gpio_generic_chip_init(&gb->chip, &config);
if (ret)
return dev_err_probe(dev, ret, "failed to init gpio chip\n");
@@ -221,7 +234,7 @@ static int spacemit_gpio_add_bank(struct spacemit_gpio *sg,
ret = devm_request_threaded_irq(dev, irq, NULL,
spacemit_gpio_irq_handler,
IRQF_ONESHOT | IRQF_SHARED,
- gb->gc.label, gb);
+ gb->chip.gc.label, gb);
if (ret < 0)
return dev_err_probe(dev, ret, "failed to register IRQ\n");
diff --git a/drivers/gpio/gpio-spear-spics.c b/drivers/gpio/gpio-spear-spics.c
index 55f0e8afa291..96a0e1211500 100644
--- a/drivers/gpio/gpio-spear-spics.c
+++ b/drivers/gpio/gpio-spear-spics.c
@@ -140,7 +140,7 @@ static int spics_gpio_probe(struct platform_device *pdev)
spics->chip.request = spics_request;
spics->chip.free = spics_free;
spics->chip.direction_output = spics_direction_output;
- spics->chip.set_rv = spics_set_value;
+ spics->chip.set = spics_set_value;
spics->chip.label = dev_name(&pdev->dev);
spics->chip.parent = &pdev->dev;
spics->chip.owner = THIS_MODULE;
diff --git a/drivers/gpio/gpio-sprd.c b/drivers/gpio/gpio-sprd.c
index bbd5bf51c088..413bcd0a4240 100644
--- a/drivers/gpio/gpio-sprd.c
+++ b/drivers/gpio/gpio-sprd.c
@@ -245,7 +245,7 @@ static int sprd_gpio_probe(struct platform_device *pdev)
sprd_gpio->chip.request = sprd_gpio_request;
sprd_gpio->chip.free = sprd_gpio_free;
sprd_gpio->chip.get = sprd_gpio_get;
- sprd_gpio->chip.set_rv = sprd_gpio_set;
+ sprd_gpio->chip.set = sprd_gpio_set;
sprd_gpio->chip.direction_input = sprd_gpio_direction_input;
sprd_gpio->chip.direction_output = sprd_gpio_direction_output;
diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c
index 0a270156e0be..6faf30347a36 100644
--- a/drivers/gpio/gpio-stmpe.c
+++ b/drivers/gpio/gpio-stmpe.c
@@ -136,7 +136,7 @@ static const struct gpio_chip template_chip = {
.direction_input = stmpe_gpio_direction_input,
.get = stmpe_gpio_get,
.direction_output = stmpe_gpio_direction_output,
- .set_rv = stmpe_gpio_set,
+ .set = stmpe_gpio_set,
.request = stmpe_gpio_request,
.can_sleep = true,
};
@@ -262,9 +262,8 @@ static void stmpe_gpio_irq_unmask(struct irq_data *d)
stmpe_gpio->regs[REG_IE][regoffset] |= mask;
}
-static void stmpe_dbg_show_one(struct seq_file *s,
- struct gpio_chip *gc,
- unsigned offset, unsigned gpio)
+static void stmpe_dbg_show_one(struct seq_file *s, struct gpio_chip *gc,
+ unsigned int offset)
{
struct stmpe_gpio *stmpe_gpio = gpiochip_get_data(gc);
struct stmpe *stmpe = stmpe_gpio->stmpe;
@@ -286,7 +285,7 @@ static void stmpe_dbg_show_one(struct seq_file *s,
if (dir) {
seq_printf(s, " gpio-%-3d (%-20.20s) out %s",
- gpio, label ?: "(none)", str_hi_lo(val));
+ offset, label ?: "(none)", str_hi_lo(val));
} else {
u8 edge_det_reg;
u8 rise_reg;
@@ -354,7 +353,7 @@ static void stmpe_dbg_show_one(struct seq_file *s,
irqen = !!(ret & mask);
seq_printf(s, " gpio-%-3d (%-20.20s) in %s %13s %13s %25s %25s",
- gpio, label ?: "(none)",
+ offset, label ?: "(none)",
str_hi_lo(val),
edge_det_values[edge_det],
irqen ? "IRQ-enabled" : "IRQ-disabled",
@@ -366,10 +365,9 @@ static void stmpe_dbg_show_one(struct seq_file *s,
static void stmpe_dbg_show(struct seq_file *s, struct gpio_chip *gc)
{
unsigned i;
- unsigned gpio = gc->base;
- for (i = 0; i < gc->ngpio; i++, gpio++) {
- stmpe_dbg_show_one(s, gc, i, gpio);
+ for (i = 0; i < gc->ngpio; i++) {
+ stmpe_dbg_show_one(s, gc, i);
seq_putc(s, '\n');
}
}
@@ -534,10 +532,16 @@ static int stmpe_gpio_probe(struct platform_device *pdev)
return devm_gpiochip_add_data(dev, &stmpe_gpio->chip, stmpe_gpio);
}
+static const struct of_device_id stmpe_gpio_of_matches[] = {
+ { .compatible = "st,stmpe-gpio", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, stmpe_gpio_of_matches);
+
static struct platform_driver stmpe_gpio_driver = {
.driver = {
- .suppress_bind_attrs = true,
- .name = "stmpe-gpio",
+ .name = "stmpe-gpio",
+ .of_match_table = stmpe_gpio_of_matches,
},
.probe = stmpe_gpio_probe,
};
@@ -547,3 +551,13 @@ static int __init stmpe_gpio_init(void)
return platform_driver_register(&stmpe_gpio_driver);
}
subsys_initcall(stmpe_gpio_init);
+
+static void __exit stmpe_gpio_exit(void)
+{
+ platform_driver_unregister(&stmpe_gpio_driver);
+}
+module_exit(stmpe_gpio_exit);
+
+MODULE_DESCRIPTION("STMPE expander GPIO");
+MODULE_AUTHOR("Rabin Vincent <rabin.vincent@stericsson.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-stp-xway.c b/drivers/gpio/gpio-stp-xway.c
index fdda8de6ca36..493c027afdd6 100644
--- a/drivers/gpio/gpio-stp-xway.c
+++ b/drivers/gpio/gpio-stp-xway.c
@@ -249,7 +249,7 @@ static int xway_stp_probe(struct platform_device *pdev)
chip->gc.label = "stp-xway";
chip->gc.direction_output = xway_stp_dir_out;
chip->gc.get = xway_stp_get;
- chip->gc.set_rv = xway_stp_set;
+ chip->gc.set = xway_stp_set;
chip->gc.request = xway_stp_request;
chip->gc.base = -1;
chip->gc.owner = THIS_MODULE;
diff --git a/drivers/gpio/gpio-syscon.c b/drivers/gpio/gpio-syscon.c
index f86f78655c24..40064d4cf47f 100644
--- a/drivers/gpio/gpio-syscon.c
+++ b/drivers/gpio/gpio-syscon.c
@@ -115,7 +115,7 @@ static int syscon_gpio_dir_out(struct gpio_chip *chip, unsigned offset, int val)
BIT(offs % SYSCON_REG_BITS));
}
- return chip->set_rv(chip, offset, val);
+ return chip->set(chip, offset, val);
}
static const struct syscon_gpio_data clps711x_mctrl_gpio = {
@@ -251,7 +251,7 @@ static int syscon_gpio_probe(struct platform_device *pdev)
if (priv->data->flags & GPIO_SYSCON_FEAT_IN)
priv->chip.direction_input = syscon_gpio_dir_in;
if (priv->data->flags & GPIO_SYSCON_FEAT_OUT) {
- priv->chip.set_rv = priv->data->set ? : syscon_gpio_set;
+ priv->chip.set = priv->data->set ? : syscon_gpio_set;
priv->chip.direction_output = syscon_gpio_dir_out;
}
diff --git a/drivers/gpio/gpio-tangier.c b/drivers/gpio/gpio-tangier.c
index ce17b98e0623..ba5a8ede8912 100644
--- a/drivers/gpio/gpio-tangier.c
+++ b/drivers/gpio/gpio-tangier.c
@@ -430,7 +430,7 @@ int devm_tng_gpio_probe(struct device *dev, struct tng_gpio *gpio)
gpio->chip.direction_input = tng_gpio_direction_input;
gpio->chip.direction_output = tng_gpio_direction_output;
gpio->chip.get = tng_gpio_get;
- gpio->chip.set_rv = tng_gpio_set;
+ gpio->chip.set = tng_gpio_set;
gpio->chip.get_direction = tng_gpio_get_direction;
gpio->chip.set_config = tng_gpio_set_config;
gpio->chip.base = info->base;
diff --git a/drivers/gpio/gpio-tb10x.c b/drivers/gpio/gpio-tb10x.c
index 1869ee7f9423..09a448ce3eec 100644
--- a/drivers/gpio/gpio-tb10x.c
+++ b/drivers/gpio/gpio-tb10x.c
@@ -7,20 +7,20 @@
* Christian Ruppert <christian.ruppert@abilis.com>
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
+#include <linux/bitops.h>
#include <linux/gpio/driver.h>
-#include <linux/slab.h>
-#include <linux/irq.h>
-#include <linux/irqdomain.h>
+#include <linux/gpio/generic.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
-#include <linux/spinlock.h>
-#include <linux/bitops.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
#define TB10X_GPIO_DIR_IN (0x00000000)
#define TB10X_GPIO_DIR_OUT (0x00000001)
@@ -36,13 +36,13 @@
* @base: register base address
* @domain: IRQ domain of GPIO generated interrupts managed by this controller
* @irq: Interrupt line of parent interrupt controller
- * @gc: gpio_chip structure associated to this GPIO controller
+ * @chip: Generic GPIO chip structure associated with this GPIO controller
*/
struct tb10x_gpio {
void __iomem *base;
struct irq_domain *domain;
int irq;
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
};
static inline u32 tb10x_reg_read(struct tb10x_gpio *gpio, unsigned int offs)
@@ -60,16 +60,13 @@ static inline void tb10x_set_bits(struct tb10x_gpio *gpio, unsigned int offs,
u32 mask, u32 val)
{
u32 r;
- unsigned long flags;
- raw_spin_lock_irqsave(&gpio->gc.bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(&gpio->chip);
r = tb10x_reg_read(gpio, offs);
r = (r & ~mask) | (val & mask);
tb10x_reg_write(gpio, offs, r);
-
- raw_spin_unlock_irqrestore(&gpio->gc.bgpio_lock, flags);
}
static int tb10x_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
@@ -107,6 +104,7 @@ static irqreturn_t tb10x_gpio_irq_cascade(int irq, void *data)
static int tb10x_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct tb10x_gpio *tb10x_gpio;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
@@ -127,9 +125,9 @@ static int tb10x_gpio_probe(struct platform_device *pdev)
if (IS_ERR(tb10x_gpio->base))
return PTR_ERR(tb10x_gpio->base);
- tb10x_gpio->gc.label =
+ tb10x_gpio->chip.gc.label =
devm_kasprintf(dev, GFP_KERNEL, "%pOF", pdev->dev.of_node);
- if (!tb10x_gpio->gc.label)
+ if (!tb10x_gpio->chip.gc.label)
return -ENOMEM;
/*
@@ -137,29 +135,30 @@ static int tb10x_gpio_probe(struct platform_device *pdev)
* the lines, no special set or clear registers and a data direction register
* wher 1 means "output".
*/
- ret = bgpio_init(&tb10x_gpio->gc, dev, 4,
- tb10x_gpio->base + OFFSET_TO_REG_DATA,
- NULL,
- NULL,
- tb10x_gpio->base + OFFSET_TO_REG_DDR,
- NULL,
- 0);
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = tb10x_gpio->base + OFFSET_TO_REG_DATA,
+ .dirout = tb10x_gpio->base + OFFSET_TO_REG_DDR,
+ };
+
+ ret = gpio_generic_chip_init(&tb10x_gpio->chip, &config);
if (ret) {
dev_err(dev, "unable to init generic GPIO\n");
return ret;
}
- tb10x_gpio->gc.base = -1;
- tb10x_gpio->gc.parent = dev;
- tb10x_gpio->gc.owner = THIS_MODULE;
+ tb10x_gpio->chip.gc.base = -1;
+ tb10x_gpio->chip.gc.parent = dev;
+ tb10x_gpio->chip.gc.owner = THIS_MODULE;
/*
- * ngpio is set by bgpio_init() but we override it, this .request()
- * callback also overrides the one set up by generic GPIO.
+ * ngpio is set by gpio_generic_chip_init() but we override it, this
+ * .request() callback also overrides the one set up by generic GPIO.
*/
- tb10x_gpio->gc.ngpio = ngpio;
- tb10x_gpio->gc.request = gpiochip_generic_request;
- tb10x_gpio->gc.free = gpiochip_generic_free;
+ tb10x_gpio->chip.gc.ngpio = ngpio;
+ tb10x_gpio->chip.gc.request = gpiochip_generic_request;
+ tb10x_gpio->chip.gc.free = gpiochip_generic_free;
- ret = devm_gpiochip_add_data(dev, &tb10x_gpio->gc, tb10x_gpio);
+ ret = devm_gpiochip_add_data(dev, &tb10x_gpio->chip.gc, tb10x_gpio);
if (ret < 0) {
dev_err(dev, "Could not add gpiochip.\n");
return ret;
@@ -174,7 +173,7 @@ static int tb10x_gpio_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- tb10x_gpio->gc.to_irq = tb10x_gpio_to_irq;
+ tb10x_gpio->chip.gc.to_irq = tb10x_gpio_to_irq;
tb10x_gpio->irq = ret;
ret = devm_request_irq(dev, ret, tb10x_gpio_irq_cascade,
@@ -183,14 +182,15 @@ static int tb10x_gpio_probe(struct platform_device *pdev)
if (ret != 0)
return ret;
- tb10x_gpio->domain = irq_domain_create_linear(dev_fwnode(dev), tb10x_gpio->gc.ngpio,
+ tb10x_gpio->domain = irq_domain_create_linear(dev_fwnode(dev),
+ tb10x_gpio->chip.gc.ngpio,
&irq_generic_chip_ops, NULL);
if (!tb10x_gpio->domain) {
return -ENOMEM;
}
ret = irq_alloc_domain_generic_chips(tb10x_gpio->domain,
- tb10x_gpio->gc.ngpio, 1, tb10x_gpio->gc.label,
+ tb10x_gpio->chip.gc.ngpio, 1, tb10x_gpio->chip.gc.label,
handle_edge_irq, IRQ_NOREQUEST, IRQ_NOPROBE,
IRQ_GC_INIT_MASK_CACHE);
if (ret)
@@ -218,9 +218,9 @@ static void tb10x_gpio_remove(struct platform_device *pdev)
{
struct tb10x_gpio *tb10x_gpio = platform_get_drvdata(pdev);
- if (tb10x_gpio->gc.to_irq) {
+ if (tb10x_gpio->chip.gc.to_irq) {
irq_remove_generic_chip(tb10x_gpio->domain->gc->gc[0],
- BIT(tb10x_gpio->gc.ngpio) - 1, 0, 0);
+ BIT(tb10x_gpio->chip.gc.ngpio) - 1, 0, 0);
kfree(tb10x_gpio->domain->gc);
irq_domain_remove(tb10x_gpio->domain);
}
diff --git a/drivers/gpio/gpio-tc3589x.c b/drivers/gpio/gpio-tc3589x.c
index 0bd32809fd68..90d048f9da08 100644
--- a/drivers/gpio/gpio-tc3589x.c
+++ b/drivers/gpio/gpio-tc3589x.c
@@ -149,7 +149,7 @@ static const struct gpio_chip template_chip = {
.label = "tc3589x",
.owner = THIS_MODULE,
.get = tc3589x_gpio_get,
- .set_rv = tc3589x_gpio_set,
+ .set = tc3589x_gpio_set,
.direction_output = tc3589x_gpio_direction_output,
.direction_input = tc3589x_gpio_direction_input,
.get_direction = tc3589x_gpio_get_direction,
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 126fd12550aa..15a5762a82c2 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -720,7 +720,7 @@ static int tegra_gpio_probe(struct platform_device *pdev)
tgi->gc.direction_input = tegra_gpio_direction_input;
tgi->gc.get = tegra_gpio_get;
tgi->gc.direction_output = tegra_gpio_direction_output;
- tgi->gc.set_rv = tegra_gpio_set;
+ tgi->gc.set = tegra_gpio_set;
tgi->gc.get_direction = tegra_gpio_get_direction;
tgi->gc.base = 0;
tgi->gc.ngpio = tgi->bank_count * 32;
diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
index f902da15c419..4d3db6e06eeb 100644
--- a/drivers/gpio/gpio-tegra186.c
+++ b/drivers/gpio/gpio-tegra186.c
@@ -20,6 +20,7 @@
#include <dt-bindings/gpio/tegra194-gpio.h>
#include <dt-bindings/gpio/tegra234-gpio.h>
#include <dt-bindings/gpio/tegra241-gpio.h>
+#include <dt-bindings/gpio/tegra256-gpio.h>
/* security registers */
#define TEGRA186_GPIO_CTL_SCR 0x0c
@@ -891,7 +892,7 @@ static int tegra186_gpio_probe(struct platform_device *pdev)
gpio->gpio.direction_input = tegra186_gpio_direction_input;
gpio->gpio.direction_output = tegra186_gpio_direction_output;
gpio->gpio.get = tegra186_gpio_get;
- gpio->gpio.set_rv = tegra186_gpio_set;
+ gpio->gpio.set = tegra186_gpio_set;
gpio->gpio.set_config = tegra186_gpio_set_config;
gpio->gpio.add_pin_ranges = tegra186_gpio_add_pin_ranges;
gpio->gpio.init_valid_mask = tegra186_init_valid_mask;
@@ -1279,6 +1280,30 @@ static const struct tegra_gpio_soc tegra241_aon_soc = {
.has_vm_support = false,
};
+#define TEGRA256_MAIN_GPIO_PORT(_name, _bank, _port, _pins) \
+ [TEGRA256_MAIN_GPIO_PORT_##_name] = { \
+ .name = #_name, \
+ .bank = _bank, \
+ .port = _port, \
+ .pins = _pins, \
+ }
+
+static const struct tegra_gpio_port tegra256_main_ports[] = {
+ TEGRA256_MAIN_GPIO_PORT(A, 0, 0, 8),
+ TEGRA256_MAIN_GPIO_PORT(B, 0, 1, 8),
+ TEGRA256_MAIN_GPIO_PORT(C, 0, 2, 8),
+ TEGRA256_MAIN_GPIO_PORT(D, 0, 3, 8),
+};
+
+static const struct tegra_gpio_soc tegra256_main_soc = {
+ .num_ports = ARRAY_SIZE(tegra256_main_ports),
+ .ports = tegra256_main_ports,
+ .name = "tegra256-gpio-main",
+ .instance = 1,
+ .num_irqs_per_bank = 8,
+ .has_vm_support = true,
+};
+
static const struct of_device_id tegra186_gpio_of_match[] = {
{
.compatible = "nvidia,tegra186-gpio",
@@ -1299,6 +1324,9 @@ static const struct of_device_id tegra186_gpio_of_match[] = {
.compatible = "nvidia,tegra234-gpio-aon",
.data = &tegra234_aon_soc
}, {
+ .compatible = "nvidia,tegra256-gpio",
+ .data = &tegra256_main_soc
+ }, {
/* sentinel */
}
};
diff --git a/drivers/gpio/gpio-thunderx.c b/drivers/gpio/gpio-thunderx.c
index eb6a1f0279c0..be96853063ba 100644
--- a/drivers/gpio/gpio-thunderx.c
+++ b/drivers/gpio/gpio-thunderx.c
@@ -533,8 +533,8 @@ static int thunderx_gpio_probe(struct pci_dev *pdev,
chip->direction_input = thunderx_gpio_dir_in;
chip->get = thunderx_gpio_get;
chip->direction_output = thunderx_gpio_dir_out;
- chip->set_rv = thunderx_gpio_set;
- chip->set_multiple_rv = thunderx_gpio_set_multiple;
+ chip->set = thunderx_gpio_set;
+ chip->set_multiple = thunderx_gpio_set_multiple;
chip->set_config = thunderx_gpio_set_config;
girq = &chip->irq;
gpio_irq_chip_set_chip(girq, &thunderx_gpio_irq_chip);
diff --git a/drivers/gpio/gpio-timberdale.c b/drivers/gpio/gpio-timberdale.c
index fbb883089189..f488939dd00a 100644
--- a/drivers/gpio/gpio-timberdale.c
+++ b/drivers/gpio/gpio-timberdale.c
@@ -137,7 +137,7 @@ static int timbgpio_irq_type(struct irq_data *d, unsigned trigger)
u32 ver;
int ret = 0;
- if (offset < 0 || offset > tgpio->gpio.ngpio)
+ if (offset < 0 || offset >= tgpio->gpio.ngpio)
return -EINVAL;
ver = ioread32(tgpio->membase + TGPIO_VER);
@@ -253,7 +253,7 @@ static int timbgpio_probe(struct platform_device *pdev)
gc->direction_input = timbgpio_gpio_direction_input;
gc->get = timbgpio_gpio_get;
gc->direction_output = timbgpio_gpio_direction_output;
- gc->set_rv = timbgpio_gpio_set;
+ gc->set = timbgpio_gpio_set;
gc->to_irq = (irq >= 0 && tgpio->irq_base > 0) ? timbgpio_to_irq : NULL;
gc->dbg_show = NULL;
gc->base = pdata->gpio_base;
diff --git a/drivers/gpio/gpio-tpic2810.c b/drivers/gpio/gpio-tpic2810.c
index d5b8568ab061..866ff2d436d5 100644
--- a/drivers/gpio/gpio-tpic2810.c
+++ b/drivers/gpio/gpio-tpic2810.c
@@ -80,8 +80,8 @@ static const struct gpio_chip template_chip = {
.owner = THIS_MODULE,
.get_direction = tpic2810_get_direction,
.direction_output = tpic2810_direction_output,
- .set_rv = tpic2810_set,
- .set_multiple_rv = tpic2810_set_multiple,
+ .set = tpic2810_set,
+ .set_multiple = tpic2810_set_multiple,
.base = -1,
.ngpio = 8,
.can_sleep = true,
diff --git a/drivers/gpio/gpio-tps65086.c b/drivers/gpio/gpio-tps65086.c
index 08fa061b73ef..84b17b83476f 100644
--- a/drivers/gpio/gpio-tps65086.c
+++ b/drivers/gpio/gpio-tps65086.c
@@ -69,7 +69,7 @@ static const struct gpio_chip template_chip = {
.direction_input = tps65086_gpio_direction_input,
.direction_output = tps65086_gpio_direction_output,
.get = tps65086_gpio_get,
- .set_rv = tps65086_gpio_set,
+ .set = tps65086_gpio_set,
.base = -1,
.ngpio = 4,
.can_sleep = true,
diff --git a/drivers/gpio/gpio-tps65218.c b/drivers/gpio/gpio-tps65218.c
index 49cd7754ed05..3b4c41f5ef55 100644
--- a/drivers/gpio/gpio-tps65218.c
+++ b/drivers/gpio/gpio-tps65218.c
@@ -169,7 +169,7 @@ static const struct gpio_chip template_chip = {
.request = tps65218_gpio_request,
.direction_output = tps65218_gpio_output,
.get = tps65218_gpio_get,
- .set_rv = tps65218_gpio_set,
+ .set = tps65218_gpio_set,
.set_config = tps65218_gpio_set_config,
.can_sleep = true,
.ngpio = 3,
diff --git a/drivers/gpio/gpio-tps65219.c b/drivers/gpio/gpio-tps65219.c
index c0177088c54c..158f63bcf10c 100644
--- a/drivers/gpio/gpio-tps65219.c
+++ b/drivers/gpio/gpio-tps65219.c
@@ -203,7 +203,7 @@ static const struct gpio_chip tps65214_template_chip = {
.direction_input = tps65219_gpio_direction_input,
.direction_output = tps65219_gpio_direction_output,
.get = tps65219_gpio_get,
- .set_rv = tps65219_gpio_set,
+ .set = tps65219_gpio_set,
.base = -1,
.ngpio = 2,
.can_sleep = true,
@@ -216,7 +216,7 @@ static const struct gpio_chip tps65219_template_chip = {
.direction_input = tps65219_gpio_direction_input,
.direction_output = tps65219_gpio_direction_output,
.get = tps65219_gpio_get,
- .set_rv = tps65219_gpio_set,
+ .set = tps65219_gpio_set,
.base = -1,
.ngpio = 3,
.can_sleep = true,
diff --git a/drivers/gpio/gpio-tps6586x.c b/drivers/gpio/gpio-tps6586x.c
index f1ced092f38a..aaacbb54bf5d 100644
--- a/drivers/gpio/gpio-tps6586x.c
+++ b/drivers/gpio/gpio-tps6586x.c
@@ -98,7 +98,7 @@ static int tps6586x_gpio_probe(struct platform_device *pdev)
/* FIXME: add handling of GPIOs as dedicated inputs */
tps6586x_gpio->gpio_chip.direction_output = tps6586x_gpio_output;
- tps6586x_gpio->gpio_chip.set_rv = tps6586x_gpio_set;
+ tps6586x_gpio->gpio_chip.set = tps6586x_gpio_set;
tps6586x_gpio->gpio_chip.get = tps6586x_gpio_get;
tps6586x_gpio->gpio_chip.to_irq = tps6586x_gpio_to_irq;
diff --git a/drivers/gpio/gpio-tps65910.c b/drivers/gpio/gpio-tps65910.c
index 3204f55394cf..25e9f41efe78 100644
--- a/drivers/gpio/gpio-tps65910.c
+++ b/drivers/gpio/gpio-tps65910.c
@@ -139,7 +139,7 @@ static int tps65910_gpio_probe(struct platform_device *pdev)
tps65910_gpio->gpio_chip.can_sleep = true;
tps65910_gpio->gpio_chip.direction_input = tps65910_gpio_input;
tps65910_gpio->gpio_chip.direction_output = tps65910_gpio_output;
- tps65910_gpio->gpio_chip.set_rv = tps65910_gpio_set;
+ tps65910_gpio->gpio_chip.set = tps65910_gpio_set;
tps65910_gpio->gpio_chip.get = tps65910_gpio_get;
tps65910_gpio->gpio_chip.parent = &pdev->dev;
diff --git a/drivers/gpio/gpio-tps65912.c b/drivers/gpio/gpio-tps65912.c
index d586ccfbfc56..7a2c5685c2fd 100644
--- a/drivers/gpio/gpio-tps65912.c
+++ b/drivers/gpio/gpio-tps65912.c
@@ -92,7 +92,7 @@ static const struct gpio_chip template_chip = {
.direction_input = tps65912_gpio_direction_input,
.direction_output = tps65912_gpio_direction_output,
.get = tps65912_gpio_get,
- .set_rv = tps65912_gpio_set,
+ .set = tps65912_gpio_set,
.base = -1,
.ngpio = 5,
.can_sleep = true,
diff --git a/drivers/gpio/gpio-tps68470.c b/drivers/gpio/gpio-tps68470.c
index 3b8805c854f7..d4fbdf90e190 100644
--- a/drivers/gpio/gpio-tps68470.c
+++ b/drivers/gpio/gpio-tps68470.c
@@ -142,7 +142,7 @@ static int tps68470_gpio_probe(struct platform_device *pdev)
tps68470_gpio->gc.direction_output = tps68470_gpio_output;
tps68470_gpio->gc.get = tps68470_gpio_get;
tps68470_gpio->gc.get_direction = tps68470_gpio_get_direction;
- tps68470_gpio->gc.set_rv = tps68470_gpio_set;
+ tps68470_gpio->gc.set = tps68470_gpio_set;
tps68470_gpio->gc.can_sleep = true;
tps68470_gpio->gc.names = tps68470_names;
tps68470_gpio->gc.ngpio = TPS68470_N_GPIO;
diff --git a/drivers/gpio/gpio-tqmx86.c b/drivers/gpio/gpio-tqmx86.c
index 056799ecce6a..27dd09273292 100644
--- a/drivers/gpio/gpio-tqmx86.c
+++ b/drivers/gpio/gpio-tqmx86.c
@@ -370,7 +370,7 @@ static int tqmx86_gpio_probe(struct platform_device *pdev)
chip->direction_output = tqmx86_gpio_direction_output;
chip->get_direction = tqmx86_gpio_get_direction;
chip->get = tqmx86_gpio_get;
- chip->set_rv = tqmx86_gpio_set;
+ chip->set = tqmx86_gpio_set;
chip->ngpio = TQMX86_NGPIO;
chip->parent = pdev->dev.parent;
diff --git a/drivers/gpio/gpio-ts4800.c b/drivers/gpio/gpio-ts4800.c
index 4748e3d47106..992ee231db9f 100644
--- a/drivers/gpio/gpio-ts4800.c
+++ b/drivers/gpio/gpio-ts4800.c
@@ -6,9 +6,10 @@
*/
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#define DEFAULT_PIN_NUMBER 16
#define INPUT_REG_OFFSET 0x00
@@ -17,13 +18,14 @@
static int ts4800_gpio_probe(struct platform_device *pdev)
{
- struct device_node *node;
- struct gpio_chip *chip;
+ struct gpio_generic_chip_config config;
+ struct device *dev = &pdev->dev;
+ struct gpio_generic_chip *chip;
void __iomem *base_addr;
int retval;
u32 ngpios;
- chip = devm_kzalloc(&pdev->dev, sizeof(struct gpio_chip), GFP_KERNEL);
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
@@ -31,29 +33,28 @@ static int ts4800_gpio_probe(struct platform_device *pdev)
if (IS_ERR(base_addr))
return PTR_ERR(base_addr);
- node = pdev->dev.of_node;
- if (!node)
- return -EINVAL;
-
- retval = of_property_read_u32(node, "ngpios", &ngpios);
+ retval = device_property_read_u32(dev, "ngpios", &ngpios);
if (retval == -EINVAL)
ngpios = DEFAULT_PIN_NUMBER;
else if (retval)
return retval;
- retval = bgpio_init(chip, &pdev->dev, 2, base_addr + INPUT_REG_OFFSET,
- base_addr + OUTPUT_REG_OFFSET, NULL,
- base_addr + DIRECTION_REG_OFFSET, NULL, 0);
- if (retval) {
- dev_err(&pdev->dev, "bgpio_init failed\n");
- return retval;
- }
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 2,
+ .dat = base_addr + INPUT_REG_OFFSET,
+ .set = base_addr + OUTPUT_REG_OFFSET,
+ .dirout = base_addr + DIRECTION_REG_OFFSET,
+ };
- chip->ngpio = ngpios;
+ retval = gpio_generic_chip_init(chip, &config);
+ if (retval)
+ return dev_err_probe(dev, retval,
+ "failed to initialize the generic GPIO chip\n");
- platform_set_drvdata(pdev, chip);
+ chip->gc.ngpio = ngpios;
- return devm_gpiochip_add_data(&pdev->dev, chip, NULL);
+ return devm_gpiochip_add_data(dev, &chip->gc, NULL);
}
static const struct of_device_id ts4800_gpio_of_match[] = {
diff --git a/drivers/gpio/gpio-ts4900.c b/drivers/gpio/gpio-ts4900.c
index 35dd2d09b4d4..d9ee8fc77ccd 100644
--- a/drivers/gpio/gpio-ts4900.c
+++ b/drivers/gpio/gpio-ts4900.c
@@ -119,7 +119,7 @@ static const struct gpio_chip template_chip = {
.direction_input = ts4900_gpio_direction_input,
.direction_output = ts4900_gpio_direction_output,
.get = ts4900_gpio_get,
- .set_rv = ts4900_gpio_set,
+ .set = ts4900_gpio_set,
.base = -1,
.can_sleep = true,
};
diff --git a/drivers/gpio/gpio-ts5500.c b/drivers/gpio/gpio-ts5500.c
index bb432ed73698..3c7f2efe10fd 100644
--- a/drivers/gpio/gpio-ts5500.c
+++ b/drivers/gpio/gpio-ts5500.c
@@ -340,7 +340,7 @@ static int ts5500_dio_probe(struct platform_device *pdev)
priv->gpio_chip.direction_input = ts5500_gpio_input;
priv->gpio_chip.direction_output = ts5500_gpio_output;
priv->gpio_chip.get = ts5500_gpio_get;
- priv->gpio_chip.set_rv = ts5500_gpio_set;
+ priv->gpio_chip.set = ts5500_gpio_set;
priv->gpio_chip.to_irq = ts5500_gpio_to_irq;
priv->gpio_chip.base = -1;
diff --git a/drivers/gpio/gpio-twl4030.c b/drivers/gpio/gpio-twl4030.c
index e39e39e3ef85..a851702befde 100644
--- a/drivers/gpio/gpio-twl4030.c
+++ b/drivers/gpio/gpio-twl4030.c
@@ -419,7 +419,7 @@ static const struct gpio_chip template_chip = {
.direction_output = twl_direction_out,
.get_direction = twl_get_direction,
.get = twl_get,
- .set_rv = twl_set,
+ .set = twl_set,
.to_irq = twl_to_irq,
.can_sleep = true,
};
@@ -597,9 +597,7 @@ no_irqs:
ret = devm_add_action_or_reset(&pdev->dev, gpio_twl4030_power_off_action, d);
if (ret)
- return dev_err_probe(&pdev->dev, ret,
- "failed to install power off handler\n");
-
+ return ret;
}
return 0;
diff --git a/drivers/gpio/gpio-twl6040.c b/drivers/gpio/gpio-twl6040.c
index b2196b62b528..4ec9bcd40439 100644
--- a/drivers/gpio/gpio-twl6040.c
+++ b/drivers/gpio/gpio-twl6040.c
@@ -69,7 +69,7 @@ static struct gpio_chip twl6040gpo_chip = {
.get = twl6040gpo_get,
.direction_output = twl6040gpo_direction_out,
.get_direction = twl6040gpo_get_direction,
- .set_rv = twl6040gpo_set,
+ .set = twl6040gpo_set,
.can_sleep = true,
};
diff --git a/drivers/gpio/gpio-uniphier.c b/drivers/gpio/gpio-uniphier.c
index 8939556f42b6..197bb1d22b3c 100644
--- a/drivers/gpio/gpio-uniphier.c
+++ b/drivers/gpio/gpio-uniphier.c
@@ -386,8 +386,8 @@ static int uniphier_gpio_probe(struct platform_device *pdev)
chip->direction_input = uniphier_gpio_direction_input;
chip->direction_output = uniphier_gpio_direction_output;
chip->get = uniphier_gpio_get;
- chip->set_rv = uniphier_gpio_set;
- chip->set_multiple_rv = uniphier_gpio_set_multiple;
+ chip->set = uniphier_gpio_set;
+ chip->set_multiple = uniphier_gpio_set_multiple;
chip->to_irq = uniphier_gpio_to_irq;
chip->base = -1;
chip->ngpio = ngpios;
diff --git a/drivers/gpio/gpio-usbio.c b/drivers/gpio/gpio-usbio.c
new file mode 100644
index 000000000000..34d42c743d5b
--- /dev/null
+++ b/drivers/gpio/gpio-usbio.c
@@ -0,0 +1,248 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2025 Intel Corporation.
+ * Copyright (c) 2025 Red Hat, Inc.
+ */
+
+#include <linux/acpi.h>
+#include <linux/auxiliary_bus.h>
+#include <linux/cleanup.h>
+#include <linux/device.h>
+#include <linux/gpio/driver.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/usb/usbio.h>
+
+struct usbio_gpio_bank {
+ u8 config[USBIO_GPIOSPERBANK];
+ u32 bitmap;
+};
+
+struct usbio_gpio {
+ struct mutex config_mutex; /* Protects banks[x].config */
+ struct usbio_gpio_bank banks[USBIO_MAX_GPIOBANKS];
+ struct gpio_chip gc;
+ struct auxiliary_device *adev;
+};
+
+static const struct acpi_device_id usbio_gpio_acpi_hids[] = {
+ { "INTC1007" }, /* MTL */
+ { "INTC10B2" }, /* ARL */
+ { "INTC10B5" }, /* LNL */
+ { "INTC10D1" }, /* MTL-CVF */
+ { "INTC10E2" }, /* PTL */
+ { }
+};
+
+static void usbio_gpio_get_bank_and_pin(struct gpio_chip *gc, unsigned int offset,
+ struct usbio_gpio_bank **bank_ret,
+ unsigned int *pin_ret)
+{
+ struct usbio_gpio *gpio = gpiochip_get_data(gc);
+ struct device *dev = &gpio->adev->dev;
+ struct usbio_gpio_bank *bank;
+ unsigned int pin;
+
+ bank = &gpio->banks[offset / USBIO_GPIOSPERBANK];
+ pin = offset % USBIO_GPIOSPERBANK;
+ if (~bank->bitmap & BIT(pin)) {
+ /* The FW bitmap sometimes is invalid, warn and continue */
+ dev_warn_once(dev, FW_BUG "GPIO %u is not in FW pins bitmap\n", offset);
+ }
+
+ *bank_ret = bank;
+ *pin_ret = pin;
+}
+
+static int usbio_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
+{
+ struct usbio_gpio_bank *bank;
+ unsigned int pin;
+ u8 cfg;
+
+ usbio_gpio_get_bank_and_pin(gc, offset, &bank, &pin);
+
+ cfg = bank->config[pin] & USBIO_GPIO_PINMOD_MASK;
+
+ return (cfg == USBIO_GPIO_PINMOD_OUTPUT) ?
+ GPIO_LINE_DIRECTION_OUT : GPIO_LINE_DIRECTION_IN;
+}
+
+static int usbio_gpio_get(struct gpio_chip *gc, unsigned int offset)
+{
+ struct usbio_gpio *gpio = gpiochip_get_data(gc);
+ struct usbio_gpio_bank *bank;
+ struct usbio_gpio_rw gbuf;
+ unsigned int pin;
+ int ret;
+
+ usbio_gpio_get_bank_and_pin(gc, offset, &bank, &pin);
+
+ gbuf.bankid = offset / USBIO_GPIOSPERBANK;
+ gbuf.pincount = 1;
+ gbuf.pin = pin;
+
+ ret = usbio_control_msg(gpio->adev, USBIO_PKTTYPE_GPIO, USBIO_GPIOCMD_READ,
+ &gbuf, sizeof(gbuf) - sizeof(gbuf.value),
+ &gbuf, sizeof(gbuf));
+ if (ret != sizeof(gbuf))
+ return (ret < 0) ? ret : -EPROTO;
+
+ return (le32_to_cpu(gbuf.value) >> pin) & 1;
+}
+
+static int usbio_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
+{
+ struct usbio_gpio *gpio = gpiochip_get_data(gc);
+ struct usbio_gpio_bank *bank;
+ struct usbio_gpio_rw gbuf;
+ unsigned int pin;
+
+ usbio_gpio_get_bank_and_pin(gc, offset, &bank, &pin);
+
+ gbuf.bankid = offset / USBIO_GPIOSPERBANK;
+ gbuf.pincount = 1;
+ gbuf.pin = pin;
+ gbuf.value = cpu_to_le32(value << pin);
+
+ return usbio_control_msg(gpio->adev, USBIO_PKTTYPE_GPIO, USBIO_GPIOCMD_WRITE,
+ &gbuf, sizeof(gbuf), NULL, 0);
+}
+
+static int usbio_gpio_update_config(struct gpio_chip *gc, unsigned int offset,
+ u8 mask, u8 value)
+{
+ struct usbio_gpio *gpio = gpiochip_get_data(gc);
+ struct usbio_gpio_bank *bank;
+ struct usbio_gpio_init gbuf;
+ unsigned int pin;
+
+ usbio_gpio_get_bank_and_pin(gc, offset, &bank, &pin);
+
+ guard(mutex)(&gpio->config_mutex);
+
+ bank->config[pin] &= ~mask;
+ bank->config[pin] |= value;
+
+ gbuf.bankid = offset / USBIO_GPIOSPERBANK;
+ gbuf.config = bank->config[pin];
+ gbuf.pincount = 1;
+ gbuf.pin = pin;
+
+ return usbio_control_msg(gpio->adev, USBIO_PKTTYPE_GPIO, USBIO_GPIOCMD_INIT,
+ &gbuf, sizeof(gbuf), NULL, 0);
+}
+
+static int usbio_gpio_direction_input(struct gpio_chip *gc, unsigned int offset)
+{
+ return usbio_gpio_update_config(gc, offset, USBIO_GPIO_PINMOD_MASK,
+ USBIO_GPIO_SET_PINMOD(USBIO_GPIO_PINMOD_INPUT));
+}
+
+static int usbio_gpio_direction_output(struct gpio_chip *gc,
+ unsigned int offset, int value)
+{
+ int ret;
+
+ ret = usbio_gpio_update_config(gc, offset, USBIO_GPIO_PINMOD_MASK,
+ USBIO_GPIO_SET_PINMOD(USBIO_GPIO_PINMOD_OUTPUT));
+ if (ret)
+ return ret;
+
+ return usbio_gpio_set(gc, offset, value);
+}
+
+static int usbio_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
+ unsigned long config)
+{
+ u8 value;
+
+ switch (pinconf_to_config_param(config)) {
+ case PIN_CONFIG_BIAS_PULL_PIN_DEFAULT:
+ value = USBIO_GPIO_SET_PINCFG(USBIO_GPIO_PINCFG_DEFAULT);
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ value = USBIO_GPIO_SET_PINCFG(USBIO_GPIO_PINCFG_PULLUP);
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ value = USBIO_GPIO_SET_PINCFG(USBIO_GPIO_PINCFG_PULLDOWN);
+ break;
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ value = USBIO_GPIO_SET_PINCFG(USBIO_GPIO_PINCFG_PUSHPULL);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return usbio_gpio_update_config(gc, offset, USBIO_GPIO_PINCFG_MASK, value);
+}
+
+static int usbio_gpio_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *adev_id)
+{
+ struct usbio_gpio_bank_desc *bank_desc;
+ struct device *dev = &adev->dev;
+ struct usbio_gpio *gpio;
+ int bank, ret;
+
+ bank_desc = dev_get_platdata(dev);
+ if (!bank_desc)
+ return -EINVAL;
+
+ gpio = devm_kzalloc(dev, sizeof(*gpio), GFP_KERNEL);
+ if (!gpio)
+ return -ENOMEM;
+
+ ret = devm_mutex_init(dev, &gpio->config_mutex);
+ if (ret)
+ return ret;
+
+ gpio->adev = adev;
+
+ usbio_acpi_bind(gpio->adev, usbio_gpio_acpi_hids);
+
+ for (bank = 0; bank < USBIO_MAX_GPIOBANKS && bank_desc[bank].bmap; bank++)
+ gpio->banks[bank].bitmap = le32_to_cpu(bank_desc[bank].bmap);
+
+ gpio->gc.label = ACPI_COMPANION(dev) ?
+ acpi_dev_name(ACPI_COMPANION(dev)) : dev_name(dev);
+ gpio->gc.parent = dev;
+ gpio->gc.owner = THIS_MODULE;
+ gpio->gc.get_direction = usbio_gpio_get_direction;
+ gpio->gc.direction_input = usbio_gpio_direction_input;
+ gpio->gc.direction_output = usbio_gpio_direction_output;
+ gpio->gc.get = usbio_gpio_get;
+ gpio->gc.set = usbio_gpio_set;
+ gpio->gc.set_config = usbio_gpio_set_config;
+ gpio->gc.base = -1;
+ gpio->gc.ngpio = bank * USBIO_GPIOSPERBANK;
+ gpio->gc.can_sleep = true;
+
+ ret = devm_gpiochip_add_data(dev, &gpio->gc, gpio);
+ if (ret)
+ return ret;
+
+ if (has_acpi_companion(dev))
+ acpi_dev_clear_dependencies(ACPI_COMPANION(dev));
+
+ return 0;
+}
+
+static const struct auxiliary_device_id usbio_gpio_id_table[] = {
+ { "usbio.usbio-gpio" },
+ { }
+};
+MODULE_DEVICE_TABLE(auxiliary, usbio_gpio_id_table);
+
+static struct auxiliary_driver usbio_gpio_driver = {
+ .name = USBIO_GPIO_CLIENT,
+ .probe = usbio_gpio_probe,
+ .id_table = usbio_gpio_id_table
+};
+module_auxiliary_driver(usbio_gpio_driver);
+
+MODULE_DESCRIPTION("Intel USBIO GPIO driver");
+MODULE_AUTHOR("Israel Cepeda <israel.a.cepeda.lopez@intel.com>");
+MODULE_AUTHOR("Hans de Goede <hansg@kernel.org>");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("USBIO");
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
index 7de0d5b53d56..aa8586d8a787 100644
--- a/drivers/gpio/gpio-vf610.c
+++ b/drivers/gpio/gpio-vf610.c
@@ -10,6 +10,7 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -28,7 +29,7 @@ struct fsl_gpio_soc_data {
};
struct vf610_gpio_port {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
void __iomem *base;
void __iomem *gpio_base;
const struct fsl_gpio_soc_data *sdata;
@@ -108,7 +109,7 @@ static void vf610_gpio_irq_handler(struct irq_desc *desc)
for_each_set_bit(pin, &irq_isfr, VF610_GPIO_PER_PORT) {
vf610_gpio_writel(BIT(pin), port->base + PORT_ISFR);
- generic_handle_domain_irq(port->gc.irq.domain, pin);
+ generic_handle_domain_irq(port->chip.gc.irq.domain, pin);
}
chained_irq_exit(chip, desc);
@@ -214,6 +215,7 @@ static void vf610_gpio_disable_clk(void *data)
static int vf610_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct device *dev = &pdev->dev;
struct vf610_gpio_port *port;
struct gpio_chip *gc;
@@ -293,22 +295,27 @@ static int vf610_gpio_probe(struct platform_device *pdev)
return ret;
}
- gc = &port->gc;
- flags = BGPIOF_PINCTRL_BACKEND;
+ gc = &port->chip.gc;
+ flags = GPIO_GENERIC_PINCTRL_BACKEND;
/*
* We only read the output register for current value on output
* lines if the direction register is available so we can switch
* direction.
*/
if (port->sdata->have_paddr)
- flags |= BGPIOF_READ_OUTPUT_REG_SET;
- ret = bgpio_init(gc, dev, 4,
- port->gpio_base + GPIO_PDIR,
- port->gpio_base + GPIO_PDOR,
- NULL,
- port->sdata->have_paddr ? port->gpio_base + GPIO_PDDR : NULL,
- NULL,
- flags);
+ flags |= GPIO_GENERIC_READ_OUTPUT_REG_SET;
+
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = port->gpio_base + GPIO_PDIR,
+ .set = port->gpio_base + GPIO_PDOR,
+ .dirout = port->sdata->have_paddr ?
+ port->gpio_base + GPIO_PDDR : NULL,
+ .flags = flags,
+ };
+
+ ret = gpio_generic_chip_init(&port->chip, &config);
if (ret)
return dev_err_probe(dev, ret, "unable to init generic GPIO\n");
gc->label = dev_name(dev);
diff --git a/drivers/gpio/gpio-viperboard.c b/drivers/gpio/gpio-viperboard.c
index e8e906b54d51..15e495c109d2 100644
--- a/drivers/gpio/gpio-viperboard.c
+++ b/drivers/gpio/gpio-viperboard.c
@@ -408,7 +408,7 @@ static int vprbrd_gpio_probe(struct platform_device *pdev)
vb_gpio->gpioa.base = -1;
vb_gpio->gpioa.ngpio = 16;
vb_gpio->gpioa.can_sleep = true;
- vb_gpio->gpioa.set_rv = vprbrd_gpioa_set;
+ vb_gpio->gpioa.set = vprbrd_gpioa_set;
vb_gpio->gpioa.get = vprbrd_gpioa_get;
vb_gpio->gpioa.direction_input = vprbrd_gpioa_direction_input;
vb_gpio->gpioa.direction_output = vprbrd_gpioa_direction_output;
@@ -424,7 +424,7 @@ static int vprbrd_gpio_probe(struct platform_device *pdev)
vb_gpio->gpiob.base = -1;
vb_gpio->gpiob.ngpio = 16;
vb_gpio->gpiob.can_sleep = true;
- vb_gpio->gpiob.set_rv = vprbrd_gpiob_set;
+ vb_gpio->gpiob.set = vprbrd_gpiob_set;
vb_gpio->gpiob.get = vprbrd_gpiob_get;
vb_gpio->gpiob.direction_input = vprbrd_gpiob_direction_input;
vb_gpio->gpiob.direction_output = vprbrd_gpiob_direction_output;
diff --git a/drivers/gpio/gpio-virtio.c b/drivers/gpio/gpio-virtio.c
index 07552611da98..17e040991e46 100644
--- a/drivers/gpio/gpio-virtio.c
+++ b/drivers/gpio/gpio-virtio.c
@@ -567,7 +567,7 @@ static int virtio_gpio_probe(struct virtio_device *vdev)
vgpio->gc.direction_input = virtio_gpio_direction_input;
vgpio->gc.direction_output = virtio_gpio_direction_output;
vgpio->gc.get = virtio_gpio_get;
- vgpio->gc.set_rv = virtio_gpio_set;
+ vgpio->gc.set = virtio_gpio_set;
vgpio->gc.ngpio = ngpio;
vgpio->gc.base = -1; /* Allocate base dynamically */
vgpio->gc.label = dev_name(dev);
diff --git a/drivers/gpio/gpio-visconti.c b/drivers/gpio/gpio-visconti.c
index 5bd965c18a46..6d5d829634ad 100644
--- a/drivers/gpio/gpio-visconti.c
+++ b/drivers/gpio/gpio-visconti.c
@@ -10,6 +10,7 @@
#include <linux/bitops.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/module.h>
@@ -32,7 +33,7 @@
struct visconti_gpio {
void __iomem *base;
spinlock_t lock; /* protect gpio register */
- struct gpio_chip gpio_chip;
+ struct gpio_generic_chip chip;
struct device *dev;
};
@@ -158,6 +159,7 @@ static const struct irq_chip visconti_gpio_irq_chip = {
static int visconti_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct device *dev = &pdev->dev;
struct visconti_gpio *priv;
struct gpio_irq_chip *girq;
@@ -189,19 +191,22 @@ static int visconti_gpio_probe(struct platform_device *pdev)
return -ENODEV;
}
- ret = bgpio_init(&priv->gpio_chip, dev, 4,
- priv->base + GPIO_IDATA,
- priv->base + GPIO_OSET,
- priv->base + GPIO_OCLR,
- priv->base + GPIO_DIR,
- NULL,
- 0);
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = priv->base + GPIO_IDATA,
+ .set = priv->base + GPIO_OSET,
+ .clr = priv->base + GPIO_OCLR,
+ .dirout = priv->base + GPIO_DIR,
+ };
+
+ ret = gpio_generic_chip_init(&priv->chip, &config);
if (ret) {
dev_err(dev, "unable to init generic GPIO\n");
return ret;
}
- girq = &priv->gpio_chip.irq;
+ girq = &priv->chip.gc.irq;
gpio_irq_chip_set_chip(girq, &visconti_gpio_irq_chip);
girq->fwnode = dev_fwnode(dev);
girq->parent_domain = parent;
@@ -210,7 +215,7 @@ static int visconti_gpio_probe(struct platform_device *pdev)
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_level_irq;
- return devm_gpiochip_add_data(dev, &priv->gpio_chip, priv);
+ return devm_gpiochip_add_data(dev, &priv->chip.gc, priv);
}
static const struct of_device_id visconti_gpio_of_match[] = {
diff --git a/drivers/gpio/gpio-vx855.c b/drivers/gpio/gpio-vx855.c
index a3bceac7854c..84b3a973a503 100644
--- a/drivers/gpio/gpio-vx855.c
+++ b/drivers/gpio/gpio-vx855.c
@@ -216,7 +216,7 @@ static void vx855gpio_gpio_setup(struct vx855_gpio *vg)
c->direction_input = vx855gpio_direction_input;
c->direction_output = vx855gpio_direction_output;
c->get = vx855gpio_get;
- c->set_rv = vx855gpio_set;
+ c->set = vx855gpio_set;
c->set_config = vx855gpio_set_config;
c->dbg_show = NULL;
c->base = 0;
diff --git a/drivers/gpio/gpio-wcd934x.c b/drivers/gpio/gpio-wcd934x.c
index c89da9a22016..572b85e77370 100644
--- a/drivers/gpio/gpio-wcd934x.c
+++ b/drivers/gpio/gpio-wcd934x.c
@@ -98,12 +98,12 @@ static int wcd_gpio_probe(struct platform_device *pdev)
chip->direction_output = wcd_gpio_direction_output;
chip->get_direction = wcd_gpio_get_direction;
chip->get = wcd_gpio_get;
- chip->set_rv = wcd_gpio_set;
+ chip->set = wcd_gpio_set;
chip->parent = dev;
chip->base = -1;
chip->ngpio = WCD934X_NPINS;
chip->label = dev_name(dev);
- chip->can_sleep = false;
+ chip->can_sleep = true;
return devm_gpiochip_add_data(dev, chip, data);
}
diff --git a/drivers/gpio/gpio-wcove.c b/drivers/gpio/gpio-wcove.c
index f7df3d5fc71c..4a5e20e936a9 100644
--- a/drivers/gpio/gpio-wcove.c
+++ b/drivers/gpio/gpio-wcove.c
@@ -439,7 +439,7 @@ static int wcove_gpio_probe(struct platform_device *pdev)
wg->chip.direction_output = wcove_gpio_dir_out;
wg->chip.get_direction = wcove_gpio_get_direction;
wg->chip.get = wcove_gpio_get;
- wg->chip.set_rv = wcove_gpio_set;
+ wg->chip.set = wcove_gpio_set;
wg->chip.set_config = wcove_gpio_set_config;
wg->chip.base = -1;
wg->chip.ngpio = WCOVE_VGPIO_NUM;
diff --git a/drivers/gpio/gpio-winbond.c b/drivers/gpio/gpio-winbond.c
index 421655b5d4c2..dcfda738fd69 100644
--- a/drivers/gpio/gpio-winbond.c
+++ b/drivers/gpio/gpio-winbond.c
@@ -494,7 +494,7 @@ static struct gpio_chip winbond_gpio_chip = {
.can_sleep = true,
.get = winbond_gpio_get,
.direction_input = winbond_gpio_direction_in,
- .set_rv = winbond_gpio_set,
+ .set = winbond_gpio_set,
.direction_output = winbond_gpio_direction_out,
};
diff --git a/drivers/gpio/gpio-wm831x.c b/drivers/gpio/gpio-wm831x.c
index ab58aa7c0b99..489479d6f32b 100644
--- a/drivers/gpio/gpio-wm831x.c
+++ b/drivers/gpio/gpio-wm831x.c
@@ -159,7 +159,6 @@ static void wm831x_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
int i, tristated;
for (i = 0; i < chip->ngpio; i++) {
- int gpio = i + chip->base;
int reg;
const char *pull, *powerdomain;
@@ -175,13 +174,13 @@ static void wm831x_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
}
seq_printf(s, " gpio-%-3d (%-20.20s) ",
- gpio, label ?: "Unrequested");
+ i, label ?: "Unrequested");
reg = wm831x_reg_read(wm831x, WM831X_GPIO1_CONTROL + i);
if (reg < 0) {
dev_err(wm831x->dev,
"GPIO control %d read failed: %d\n",
- gpio, reg);
+ i, reg);
seq_putc(s, '\n');
continue;
}
@@ -253,7 +252,7 @@ static const struct gpio_chip template_chip = {
.direction_input = wm831x_gpio_direction_in,
.get = wm831x_gpio_get,
.direction_output = wm831x_gpio_direction_out,
- .set_rv = wm831x_gpio_set,
+ .set = wm831x_gpio_set,
.to_irq = wm831x_gpio_to_irq,
.set_config = wm831x_set_config,
.dbg_show = wm831x_gpio_dbg_show,
diff --git a/drivers/gpio/gpio-wm8350.c b/drivers/gpio/gpio-wm8350.c
index 9a7677f841fc..46923b23a72e 100644
--- a/drivers/gpio/gpio-wm8350.c
+++ b/drivers/gpio/gpio-wm8350.c
@@ -93,7 +93,7 @@ static const struct gpio_chip template_chip = {
.direction_input = wm8350_gpio_direction_in,
.get = wm8350_gpio_get,
.direction_output = wm8350_gpio_direction_out,
- .set_rv = wm8350_gpio_set,
+ .set = wm8350_gpio_set,
.to_irq = wm8350_gpio_to_irq,
.can_sleep = true,
};
diff --git a/drivers/gpio/gpio-wm8994.c b/drivers/gpio/gpio-wm8994.c
index ccc005628dd2..a0665cf3ff2f 100644
--- a/drivers/gpio/gpio-wm8994.c
+++ b/drivers/gpio/gpio-wm8994.c
@@ -194,7 +194,6 @@ static void wm8994_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
int i;
for (i = 0; i < chip->ngpio; i++) {
- int gpio = i + chip->base;
int reg;
/* We report the GPIO even if it's not requested since
@@ -208,14 +207,13 @@ static void wm8994_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
continue;
}
- seq_printf(s, " gpio-%-3d (%-20.20s) ", gpio,
+ seq_printf(s, " gpio-%-3d (%-20.20s) ", i,
label ?: "Unrequested");
reg = wm8994_reg_read(wm8994, WM8994_GPIO_1 + i);
if (reg < 0) {
dev_err(wm8994->dev,
- "GPIO control %d read failed: %d\n",
- gpio, reg);
+ "GPIO control %d read failed: %d\n", i, reg);
seq_printf(s, "\n");
continue;
}
@@ -256,7 +254,7 @@ static const struct gpio_chip template_chip = {
.direction_input = wm8994_gpio_direction_in,
.get = wm8994_gpio_get,
.direction_output = wm8994_gpio_direction_out,
- .set_rv = wm8994_gpio_set,
+ .set = wm8994_gpio_set,
.set_config = wm8994_gpio_set_config,
.to_irq = wm8994_gpio_to_irq,
.dbg_show = wm8994_gpio_dbg_show,
diff --git a/drivers/gpio/gpio-xgene-sb.c b/drivers/gpio/gpio-xgene-sb.c
index b51b1fa726bb..661259f026e1 100644
--- a/drivers/gpio/gpio-xgene-sb.c
+++ b/drivers/gpio/gpio-xgene-sb.c
@@ -21,6 +21,7 @@
#include <linux/types.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include "gpiolib-acpi.h"
@@ -40,7 +41,7 @@
/**
* struct xgene_gpio_sb - GPIO-Standby private data structure.
- * @gc: memory-mapped GPIO controllers.
+ * @chip: Generic GPIO chip data
* @regs: GPIO register base offset
* @irq_domain: GPIO interrupt domain
* @irq_start: GPIO pin that start support interrupt
@@ -48,7 +49,7 @@
* @parent_irq_base: Start parent HWIRQ
*/
struct xgene_gpio_sb {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
void __iomem *regs;
struct irq_domain *irq_domain;
u16 irq_start;
@@ -62,14 +63,15 @@ struct xgene_gpio_sb {
static void xgene_gpio_set_bit(struct gpio_chip *gc,
void __iomem *reg, u32 gpio, int val)
{
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
u32 data;
- data = gc->read_reg(reg);
+ data = gpio_generic_read_reg(chip, reg);
if (val)
data |= GPIO_MASK(gpio);
else
data &= ~GPIO_MASK(gpio);
- gc->write_reg(reg, data);
+ gpio_generic_write_reg(chip, reg, data);
}
static int xgene_gpio_sb_irq_set_type(struct irq_data *d, unsigned int type)
@@ -91,9 +93,9 @@ static int xgene_gpio_sb_irq_set_type(struct irq_data *d, unsigned int type)
break;
}
- xgene_gpio_set_bit(&priv->gc, priv->regs + MPA_GPIO_SEL_LO,
+ xgene_gpio_set_bit(&priv->chip.gc, priv->regs + MPA_GPIO_SEL_LO,
gpio * 2, 1);
- xgene_gpio_set_bit(&priv->gc, priv->regs + MPA_GPIO_INT_LVL,
+ xgene_gpio_set_bit(&priv->chip.gc, priv->regs + MPA_GPIO_INT_LVL,
d->hwirq, lvl_type);
/* Propagate IRQ type setting to parent */
@@ -109,14 +111,14 @@ static void xgene_gpio_sb_irq_mask(struct irq_data *d)
irq_chip_mask_parent(d);
- gpiochip_disable_irq(&priv->gc, d->hwirq);
+ gpiochip_disable_irq(&priv->chip.gc, d->hwirq);
}
static void xgene_gpio_sb_irq_unmask(struct irq_data *d)
{
struct xgene_gpio_sb *priv = irq_data_get_irq_chip_data(d);
- gpiochip_enable_irq(&priv->gc, d->hwirq);
+ gpiochip_enable_irq(&priv->chip.gc, d->hwirq);
irq_chip_unmask_parent(d);
}
@@ -155,15 +157,15 @@ static int xgene_gpio_sb_domain_activate(struct irq_domain *d,
u32 gpio = HWIRQ_TO_GPIO(priv, irq_data->hwirq);
int ret;
- ret = gpiochip_lock_as_irq(&priv->gc, gpio);
+ ret = gpiochip_lock_as_irq(&priv->chip.gc, gpio);
if (ret) {
- dev_err(priv->gc.parent,
+ dev_err(priv->chip.gc.parent,
"Unable to configure XGene GPIO standby pin %d as IRQ\n",
gpio);
return ret;
}
- xgene_gpio_set_bit(&priv->gc, priv->regs + MPA_GPIO_SEL_LO,
+ xgene_gpio_set_bit(&priv->chip.gc, priv->regs + MPA_GPIO_SEL_LO,
gpio * 2, 1);
return 0;
}
@@ -174,8 +176,8 @@ static void xgene_gpio_sb_domain_deactivate(struct irq_domain *d,
struct xgene_gpio_sb *priv = d->host_data;
u32 gpio = HWIRQ_TO_GPIO(priv, irq_data->hwirq);
- gpiochip_unlock_as_irq(&priv->gc, gpio);
- xgene_gpio_set_bit(&priv->gc, priv->regs + MPA_GPIO_SEL_LO,
+ gpiochip_unlock_as_irq(&priv->chip.gc, gpio);
+ xgene_gpio_set_bit(&priv->chip.gc, priv->regs + MPA_GPIO_SEL_LO,
gpio * 2, 0);
}
@@ -237,6 +239,7 @@ static const struct irq_domain_ops xgene_gpio_sb_domain_ops = {
static int xgene_gpio_sb_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct xgene_gpio_sb *priv;
int ret;
void __iomem *regs;
@@ -263,14 +266,19 @@ static int xgene_gpio_sb_probe(struct platform_device *pdev)
return -ENODEV;
}
- ret = bgpio_init(&priv->gc, &pdev->dev, 4,
- regs + MPA_GPIO_IN_ADDR,
- regs + MPA_GPIO_OUT_ADDR, NULL,
- regs + MPA_GPIO_OE_ADDR, NULL, 0);
+ config = (struct gpio_generic_chip_config) {
+ .dev = &pdev->dev,
+ .sz = 4,
+ .dat = regs + MPA_GPIO_IN_ADDR,
+ .set = regs + MPA_GPIO_OUT_ADDR,
+ .dirout = regs + MPA_GPIO_OE_ADDR,
+ };
+
+ ret = gpio_generic_chip_init(&priv->chip, &config);
if (ret)
return ret;
- priv->gc.to_irq = xgene_gpio_sb_to_irq;
+ priv->chip.gc.to_irq = xgene_gpio_sb_to_irq;
/* Retrieve start irq pin, use default if property not found */
priv->irq_start = XGENE_DFLT_IRQ_START_PIN;
@@ -283,12 +291,12 @@ static int xgene_gpio_sb_probe(struct platform_device *pdev)
priv->nirq = val32;
/* Retrieve number gpio, use default if property not found */
- priv->gc.ngpio = XGENE_DFLT_MAX_NGPIO;
+ priv->chip.gc.ngpio = XGENE_DFLT_MAX_NGPIO;
if (!device_property_read_u32(&pdev->dev, "apm,nr-gpios", &val32))
- priv->gc.ngpio = val32;
+ priv->chip.gc.ngpio = val32;
dev_info(&pdev->dev, "Support %d gpios, %d irqs start from pin %d\n",
- priv->gc.ngpio, priv->nirq, priv->irq_start);
+ priv->chip.gc.ngpio, priv->nirq, priv->irq_start);
platform_set_drvdata(pdev, priv);
@@ -298,9 +306,9 @@ static int xgene_gpio_sb_probe(struct platform_device *pdev)
if (!priv->irq_domain)
return -ENODEV;
- priv->gc.irq.domain = priv->irq_domain;
+ priv->chip.gc.irq.domain = priv->irq_domain;
- ret = devm_gpiochip_add_data(&pdev->dev, &priv->gc, priv);
+ ret = devm_gpiochip_add_data(&pdev->dev, &priv->chip.gc, priv);
if (ret) {
dev_err(&pdev->dev,
"failed to register X-Gene GPIO Standby driver\n");
@@ -311,7 +319,7 @@ static int xgene_gpio_sb_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "X-Gene GPIO Standby driver registered\n");
/* Register interrupt handlers for GPIO signaled ACPI Events */
- acpi_gpiochip_request_interrupts(&priv->gc);
+ acpi_gpiochip_request_interrupts(&priv->chip.gc);
return ret;
}
@@ -320,7 +328,7 @@ static void xgene_gpio_sb_remove(struct platform_device *pdev)
{
struct xgene_gpio_sb *priv = platform_get_drvdata(pdev);
- acpi_gpiochip_free_interrupts(&priv->gc);
+ acpi_gpiochip_free_interrupts(&priv->chip.gc);
irq_domain_remove(priv->irq_domain);
}
diff --git a/drivers/gpio/gpio-xgene.c b/drivers/gpio/gpio-xgene.c
index 28f794e5eb26..4f627de3f56c 100644
--- a/drivers/gpio/gpio-xgene.c
+++ b/drivers/gpio/gpio-xgene.c
@@ -178,7 +178,7 @@ static int xgene_gpio_probe(struct platform_device *pdev)
gpio->chip.direction_input = xgene_gpio_dir_in;
gpio->chip.direction_output = xgene_gpio_dir_out;
gpio->chip.get = xgene_gpio_get;
- gpio->chip.set_rv = xgene_gpio_set;
+ gpio->chip.set = xgene_gpio_set;
gpio->chip.label = dev_name(&pdev->dev);
gpio->chip.base = -1;
diff --git a/drivers/gpio/gpio-xgs-iproc.c b/drivers/gpio/gpio-xgs-iproc.c
index 93544e98ccbd..77eb29dcc217 100644
--- a/drivers/gpio/gpio-xgs-iproc.c
+++ b/drivers/gpio/gpio-xgs-iproc.c
@@ -3,11 +3,12 @@
* Copyright (C) 2017 Broadcom
*/
-#include <linux/gpio/driver.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -28,7 +29,7 @@
#define IPROC_GPIO_CCA_INT_EDGE 0x24
struct iproc_gpio_chip {
- struct gpio_chip gc;
+ struct gpio_generic_chip gen_gc;
spinlock_t lock;
struct device *dev;
void __iomem *base;
@@ -38,7 +39,7 @@ struct iproc_gpio_chip {
static inline struct iproc_gpio_chip *
to_iproc_gpio(struct gpio_chip *gc)
{
- return container_of(gc, struct iproc_gpio_chip, gc);
+ return container_of(to_gpio_generic_chip(gc), struct iproc_gpio_chip, gen_gc);
}
static void iproc_gpio_irq_ack(struct irq_data *d)
@@ -213,6 +214,7 @@ static const struct irq_chip iproc_gpio_irq_chip = {
static int iproc_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct device *dev = &pdev->dev;
struct device_node *dn = pdev->dev.of_node;
struct iproc_gpio_chip *chip;
@@ -231,21 +233,23 @@ static int iproc_gpio_probe(struct platform_device *pdev)
if (IS_ERR(chip->base))
return PTR_ERR(chip->base);
- ret = bgpio_init(&chip->gc, dev, 4,
- chip->base + IPROC_GPIO_CCA_DIN,
- chip->base + IPROC_GPIO_CCA_DOUT,
- NULL,
- chip->base + IPROC_GPIO_CCA_OUT_EN,
- NULL,
- 0);
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = chip->base + IPROC_GPIO_CCA_DIN,
+ .set = chip->base + IPROC_GPIO_CCA_DOUT,
+ .dirout = chip->base + IPROC_GPIO_CCA_OUT_EN,
+ };
+
+ ret = gpio_generic_chip_init(&chip->gen_gc, &config);
if (ret) {
dev_err(dev, "unable to init GPIO chip\n");
return ret;
}
- chip->gc.label = dev_name(dev);
+ chip->gen_gc.gc.label = dev_name(dev);
if (!of_property_read_u32(dn, "ngpios", &num_gpios))
- chip->gc.ngpio = num_gpios;
+ chip->gen_gc.gc.ngpio = num_gpios;
irq = platform_get_irq(pdev, 0);
if (irq > 0) {
@@ -266,13 +270,13 @@ static int iproc_gpio_probe(struct platform_device *pdev)
* a flow-handler because the irq is shared.
*/
ret = devm_request_irq(dev, irq, iproc_gpio_irq_handler,
- IRQF_SHARED, chip->gc.label, &chip->gc);
+ IRQF_SHARED, chip->gen_gc.gc.label, &chip->gen_gc.gc);
if (ret) {
dev_err(dev, "Fail to request IRQ%d: %d\n", irq, ret);
return ret;
}
- girq = &chip->gc.irq;
+ girq = &chip->gen_gc.gc.irq;
gpio_irq_chip_set_chip(girq, &iproc_gpio_irq_chip);
/* This will let us handle the parent IRQ in the driver */
girq->parent_handler = NULL;
@@ -282,7 +286,7 @@ static int iproc_gpio_probe(struct platform_device *pdev)
girq->handler = handle_simple_irq;
}
- ret = devm_gpiochip_add_data(dev, &chip->gc, chip);
+ ret = devm_gpiochip_add_data(dev, &chip->gen_gc.gc, chip);
if (ret) {
dev_err(dev, "unable to add GPIO chip\n");
return ret;
diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c
index 36d91cacc2d9..83675ac81077 100644
--- a/drivers/gpio/gpio-xilinx.c
+++ b/drivers/gpio/gpio-xilinx.c
@@ -604,10 +604,10 @@ static int xgpio_probe(struct platform_device *pdev)
chip->gc.direction_input = xgpio_dir_in;
chip->gc.direction_output = xgpio_dir_out;
chip->gc.get = xgpio_get;
- chip->gc.set_rv = xgpio_set;
+ chip->gc.set = xgpio_set;
chip->gc.request = xgpio_request;
chip->gc.free = xgpio_free;
- chip->gc.set_multiple_rv = xgpio_set_multiple;
+ chip->gc.set_multiple = xgpio_set_multiple;
chip->gc.label = dev_name(dev);
diff --git a/drivers/gpio/gpio-xlp.c b/drivers/gpio/gpio-xlp.c
index bcd2dfec462d..aede6324387f 100644
--- a/drivers/gpio/gpio-xlp.c
+++ b/drivers/gpio/gpio-xlp.c
@@ -274,7 +274,7 @@ static int xlp_gpio_probe(struct platform_device *pdev)
gc->ngpio = 70;
gc->direction_output = xlp_gpio_dir_output;
gc->direction_input = xlp_gpio_dir_input;
- gc->set_rv = xlp_gpio_set;
+ gc->set = xlp_gpio_set;
gc->get = xlp_gpio_get;
spin_lock_init(&priv->lock);
diff --git a/drivers/gpio/gpio-xra1403.c b/drivers/gpio/gpio-xra1403.c
index 70402c6b5407..7f3c98f9f902 100644
--- a/drivers/gpio/gpio-xra1403.c
+++ b/drivers/gpio/gpio-xra1403.c
@@ -135,8 +135,7 @@ static void xra1403_dbg_show(struct seq_file *s, struct gpio_chip *chip)
gcr = value[XRA_GCR + 1] << 8 | value[XRA_GCR];
gsr = value[XRA_GSR + 1] << 8 | value[XRA_GSR];
for_each_requested_gpio(chip, i, label) {
- seq_printf(s, " gpio-%-3d (%-12s) %s %s\n",
- chip->base + i, label,
+ seq_printf(s, " gpio-%-3d (%-12s) %s %s\n", i, label,
(gcr & BIT(i)) ? "in" : "out",
str_hi_lo(gsr & BIT(i)));
}
@@ -164,7 +163,7 @@ static int xra1403_probe(struct spi_device *spi)
xra->chip.direction_output = xra1403_direction_output;
xra->chip.get_direction = xra1403_get_direction;
xra->chip.get = xra1403_get;
- xra->chip.set_rv = xra1403_set;
+ xra->chip.set = xra1403_set;
xra->chip.dbg_show = xra1403_dbg_show;
diff --git a/drivers/gpio/gpio-xtensa.c b/drivers/gpio/gpio-xtensa.c
index e7ff3c60324d..4418947a10e5 100644
--- a/drivers/gpio/gpio-xtensa.c
+++ b/drivers/gpio/gpio-xtensa.c
@@ -132,7 +132,7 @@ static struct gpio_chip expstate_chip = {
.ngpio = 32,
.get_direction = xtensa_expstate_get_direction,
.get = xtensa_expstate_get_value,
- .set_rv = xtensa_expstate_set_value,
+ .set = xtensa_expstate_set_value,
};
static int xtensa_gpio_probe(struct platform_device *pdev)
diff --git a/drivers/gpio/gpio-zevio.c b/drivers/gpio/gpio-zevio.c
index 0799f7976710..29375bea2289 100644
--- a/drivers/gpio/gpio-zevio.c
+++ b/drivers/gpio/gpio-zevio.c
@@ -161,7 +161,7 @@ static int zevio_gpio_to_irq(struct gpio_chip *chip, unsigned pin)
static const struct gpio_chip zevio_gpio_chip = {
.direction_input = zevio_gpio_direction_input,
.direction_output = zevio_gpio_direction_output,
- .set_rv = zevio_gpio_set,
+ .set = zevio_gpio_set,
.get = zevio_gpio_get,
.to_irq = zevio_gpio_to_irq,
.base = 0,
diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c
index b22b4e25c68d..0ffd76e8951f 100644
--- a/drivers/gpio/gpio-zynq.c
+++ b/drivers/gpio/gpio-zynq.c
@@ -932,7 +932,7 @@ static int zynq_gpio_probe(struct platform_device *pdev)
chip->owner = THIS_MODULE;
chip->parent = &pdev->dev;
chip->get = zynq_gpio_get_value;
- chip->set_rv = zynq_gpio_set_value;
+ chip->set = zynq_gpio_set_value;
chip->request = zynq_gpio_request;
chip->free = zynq_gpio_free;
chip->direction_input = zynq_gpio_dir_in;
diff --git a/drivers/gpio/gpio-zynqmp-modepin.c b/drivers/gpio/gpio-zynqmp-modepin.c
index 6dc5d7acb89c..5e651482e985 100644
--- a/drivers/gpio/gpio-zynqmp-modepin.c
+++ b/drivers/gpio/gpio-zynqmp-modepin.c
@@ -130,7 +130,7 @@ static int modepin_gpio_probe(struct platform_device *pdev)
chip->owner = THIS_MODULE;
chip->parent = &pdev->dev;
chip->get = modepin_gpio_get_value;
- chip->set_rv = modepin_gpio_set_value;
+ chip->set = modepin_gpio_set_value;
chip->direction_input = modepin_gpio_dir_in;
chip->direction_output = modepin_gpio_dir_out;
chip->label = dev_name(&pdev->dev);
diff --git a/drivers/gpio/gpiolib-acpi-core.c b/drivers/gpio/gpiolib-acpi-core.c
index 12b24a717e43..284e762d92c4 100644
--- a/drivers/gpio/gpiolib-acpi-core.c
+++ b/drivers/gpio/gpiolib-acpi-core.c
@@ -942,8 +942,9 @@ struct gpio_desc *acpi_find_gpio(struct fwnode_handle *fwnode,
{
struct acpi_device *adev = to_acpi_device_node(fwnode);
bool can_fallback = acpi_can_fallback_to_crs(adev, con_id);
- struct acpi_gpio_info info;
+ struct acpi_gpio_info info = {};
struct gpio_desc *desc;
+ int ret;
desc = __acpi_find_gpio(fwnode, con_id, idx, can_fallback, &info);
if (IS_ERR(desc))
@@ -957,6 +958,12 @@ struct gpio_desc *acpi_find_gpio(struct fwnode_handle *fwnode,
acpi_gpio_update_gpiod_flags(dflags, &info);
acpi_gpio_update_gpiod_lookup_flags(lookupflags, &info);
+
+ /* ACPI uses hundredths of milliseconds units */
+ ret = gpio_set_debounce_timeout(desc, info.debounce * 10);
+ if (ret)
+ return ERR_PTR(ret);
+
return desc;
}
@@ -992,7 +999,7 @@ int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *con_id,
int ret;
for (i = 0, idx = 0; idx <= index; i++) {
- struct acpi_gpio_info info;
+ struct acpi_gpio_info info = {};
struct gpio_desc *desc;
/* Ignore -EPROBE_DEFER, it only matters if idx matches */
diff --git a/drivers/gpio/gpiolib-acpi-quirks.c b/drivers/gpio/gpiolib-acpi-quirks.c
index c13545dce349..7b95d1b03361 100644
--- a/drivers/gpio/gpiolib-acpi-quirks.c
+++ b/drivers/gpio/gpiolib-acpi-quirks.c
@@ -319,6 +319,18 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
},
{
/*
+ * Same as G1619-04. New model.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GPD"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "G1619-05"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_wake = "PNP0C50:00@8",
+ },
+ },
+ {
+ /*
* Spurious wakeups from GPIO 11
* Found in BIOS 1.04
* https://gitlab.freedesktop.org/drm/amd/-/issues/3954
@@ -344,6 +356,20 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
.ignore_interrupt = "AMDI0030:00@8",
},
},
+ {
+ /*
+ * Spurious wakeups from TP_ATTN# pin
+ * Found in BIOS 5.35
+ * https://gitlab.freedesktop.org/drm/amd/-/issues/4482
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "ProArt PX13"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_wake = "ASCP1A00:00@8",
+ },
+ },
{} /* Terminating entry */
};
diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
index e6a289fa0f8f..175836467f21 100644
--- a/drivers/gpio/gpiolib-cdev.c
+++ b/drivers/gpio/gpiolib-cdev.c
@@ -144,17 +144,17 @@ static void linehandle_flags_to_desc_flags(u32 lflags, unsigned long *flagsp)
{
unsigned long flags = READ_ONCE(*flagsp);
- assign_bit(FLAG_ACTIVE_LOW, &flags,
+ assign_bit(GPIOD_FLAG_ACTIVE_LOW, &flags,
lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW);
- assign_bit(FLAG_OPEN_DRAIN, &flags,
+ assign_bit(GPIOD_FLAG_OPEN_DRAIN, &flags,
lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN);
- assign_bit(FLAG_OPEN_SOURCE, &flags,
+ assign_bit(GPIOD_FLAG_OPEN_SOURCE, &flags,
lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE);
- assign_bit(FLAG_PULL_UP, &flags,
+ assign_bit(GPIOD_FLAG_PULL_UP, &flags,
lflags & GPIOHANDLE_REQUEST_BIAS_PULL_UP);
- assign_bit(FLAG_PULL_DOWN, &flags,
+ assign_bit(GPIOD_FLAG_PULL_DOWN, &flags,
lflags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN);
- assign_bit(FLAG_BIAS_DISABLE, &flags,
+ assign_bit(GPIOD_FLAG_BIAS_DISABLE, &flags,
lflags & GPIOHANDLE_REQUEST_BIAS_DISABLE);
WRITE_ONCE(*flagsp, flags);
@@ -238,7 +238,7 @@ static long linehandle_ioctl(struct file *file, unsigned int cmd,
* All line descriptors were created at once with the same
* flags so just check if the first one is really output.
*/
- if (!test_bit(FLAG_IS_OUT, &lh->descs[0]->flags))
+ if (!test_bit(GPIOD_FLAG_IS_OUT, &lh->descs[0]->flags))
return -EPERM;
if (copy_from_user(&ghd, ip, sizeof(ghd)))
@@ -599,10 +599,10 @@ static void linereq_put_event(struct linereq *lr,
static u64 line_event_timestamp(struct line *line)
{
- if (test_bit(FLAG_EVENT_CLOCK_REALTIME, &line->desc->flags))
+ if (test_bit(GPIOD_FLAG_EVENT_CLOCK_REALTIME, &line->desc->flags))
return ktime_get_real_ns();
else if (IS_ENABLED(CONFIG_HTE) &&
- test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags))
+ test_bit(GPIOD_FLAG_EVENT_CLOCK_HTE, &line->desc->flags))
return line->timestamp_ns;
return ktime_get_ns();
@@ -725,11 +725,11 @@ static int hte_edge_setup(struct line *line, u64 eflags)
struct hte_ts_desc *hdesc = &line->hdesc;
if (eflags & GPIO_V2_LINE_FLAG_EDGE_RISING)
- flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
+ flags |= test_bit(GPIOD_FLAG_ACTIVE_LOW, &line->desc->flags) ?
HTE_FALLING_EDGE_TS :
HTE_RISING_EDGE_TS;
if (eflags & GPIO_V2_LINE_FLAG_EDGE_FALLING)
- flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
+ flags |= test_bit(GPIOD_FLAG_ACTIVE_LOW, &line->desc->flags) ?
HTE_RISING_EDGE_TS :
HTE_FALLING_EDGE_TS;
@@ -831,7 +831,7 @@ static bool debounced_value(struct line *line)
*/
value = READ_ONCE(line->level);
- if (test_bit(FLAG_ACTIVE_LOW, &line->desc->flags))
+ if (test_bit(GPIOD_FLAG_ACTIVE_LOW, &line->desc->flags))
value = !value;
return value;
@@ -939,7 +939,7 @@ static int debounce_setup(struct line *line, unsigned int debounce_period_us)
return level;
if (!(IS_ENABLED(CONFIG_HTE) &&
- test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags))) {
+ test_bit(GPIOD_FLAG_EVENT_CLOCK_HTE, &line->desc->flags))) {
irq = gpiod_to_irq(line->desc);
if (irq < 0)
return -ENXIO;
@@ -1061,10 +1061,10 @@ static int edge_detector_setup(struct line *line,
return -ENXIO;
if (eflags & GPIO_V2_LINE_FLAG_EDGE_RISING)
- irqflags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
+ irqflags |= test_bit(GPIOD_FLAG_ACTIVE_LOW, &line->desc->flags) ?
IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
if (eflags & GPIO_V2_LINE_FLAG_EDGE_FALLING)
- irqflags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
+ irqflags |= test_bit(GPIOD_FLAG_ACTIVE_LOW, &line->desc->flags) ?
IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
irqflags |= IRQF_ONESHOT;
@@ -1237,34 +1237,34 @@ static void gpio_v2_line_config_flags_to_desc_flags(u64 lflags,
{
unsigned long flags = READ_ONCE(*flagsp);
- assign_bit(FLAG_ACTIVE_LOW, &flags,
+ assign_bit(GPIOD_FLAG_ACTIVE_LOW, &flags,
lflags & GPIO_V2_LINE_FLAG_ACTIVE_LOW);
if (lflags & GPIO_V2_LINE_FLAG_OUTPUT)
- set_bit(FLAG_IS_OUT, &flags);
+ set_bit(GPIOD_FLAG_IS_OUT, &flags);
else if (lflags & GPIO_V2_LINE_FLAG_INPUT)
- clear_bit(FLAG_IS_OUT, &flags);
+ clear_bit(GPIOD_FLAG_IS_OUT, &flags);
- assign_bit(FLAG_EDGE_RISING, &flags,
+ assign_bit(GPIOD_FLAG_EDGE_RISING, &flags,
lflags & GPIO_V2_LINE_FLAG_EDGE_RISING);
- assign_bit(FLAG_EDGE_FALLING, &flags,
+ assign_bit(GPIOD_FLAG_EDGE_FALLING, &flags,
lflags & GPIO_V2_LINE_FLAG_EDGE_FALLING);
- assign_bit(FLAG_OPEN_DRAIN, &flags,
+ assign_bit(GPIOD_FLAG_OPEN_DRAIN, &flags,
lflags & GPIO_V2_LINE_FLAG_OPEN_DRAIN);
- assign_bit(FLAG_OPEN_SOURCE, &flags,
+ assign_bit(GPIOD_FLAG_OPEN_SOURCE, &flags,
lflags & GPIO_V2_LINE_FLAG_OPEN_SOURCE);
- assign_bit(FLAG_PULL_UP, &flags,
+ assign_bit(GPIOD_FLAG_PULL_UP, &flags,
lflags & GPIO_V2_LINE_FLAG_BIAS_PULL_UP);
- assign_bit(FLAG_PULL_DOWN, &flags,
+ assign_bit(GPIOD_FLAG_PULL_DOWN, &flags,
lflags & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN);
- assign_bit(FLAG_BIAS_DISABLE, &flags,
+ assign_bit(GPIOD_FLAG_BIAS_DISABLE, &flags,
lflags & GPIO_V2_LINE_FLAG_BIAS_DISABLED);
- assign_bit(FLAG_EVENT_CLOCK_REALTIME, &flags,
+ assign_bit(GPIOD_FLAG_EVENT_CLOCK_REALTIME, &flags,
lflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME);
- assign_bit(FLAG_EVENT_CLOCK_HTE, &flags,
+ assign_bit(GPIOD_FLAG_EVENT_CLOCK_HTE, &flags,
lflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE);
WRITE_ONCE(*flagsp, flags);
@@ -2115,10 +2115,10 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
}
if (eflags & GPIOEVENT_REQUEST_RISING_EDGE)
- irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
+ irqflags |= test_bit(GPIOD_FLAG_ACTIVE_LOW, &desc->flags) ?
IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
if (eflags & GPIOEVENT_REQUEST_FALLING_EDGE)
- irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
+ irqflags |= test_bit(GPIOD_FLAG_ACTIVE_LOW, &desc->flags) ?
IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
irqflags |= IRQF_ONESHOT;
@@ -2253,7 +2253,7 @@ static void gpio_desc_to_lineinfo(struct gpio_desc *desc,
scoped_guard(srcu, &desc->gdev->desc_srcu) {
label = gpiod_get_label(desc);
- if (label && test_bit(FLAG_REQUESTED, &dflags))
+ if (label && test_bit(GPIOD_FLAG_REQUESTED, &dflags))
strscpy(info->consumer, label,
sizeof(info->consumer));
}
@@ -2270,10 +2270,10 @@ static void gpio_desc_to_lineinfo(struct gpio_desc *desc,
* The definitive test that a line is available to userspace is to
* request it.
*/
- if (test_bit(FLAG_REQUESTED, &dflags) ||
- test_bit(FLAG_IS_HOGGED, &dflags) ||
- test_bit(FLAG_EXPORT, &dflags) ||
- test_bit(FLAG_SYSFS, &dflags) ||
+ if (test_bit(GPIOD_FLAG_REQUESTED, &dflags) ||
+ test_bit(GPIOD_FLAG_IS_HOGGED, &dflags) ||
+ test_bit(GPIOD_FLAG_EXPORT, &dflags) ||
+ test_bit(GPIOD_FLAG_SYSFS, &dflags) ||
!gpiochip_line_is_valid(guard.gc, info->offset)) {
info->flags |= GPIO_V2_LINE_FLAG_USED;
} else if (!atomic) {
@@ -2281,34 +2281,34 @@ static void gpio_desc_to_lineinfo(struct gpio_desc *desc,
info->flags |= GPIO_V2_LINE_FLAG_USED;
}
- if (test_bit(FLAG_IS_OUT, &dflags))
+ if (test_bit(GPIOD_FLAG_IS_OUT, &dflags))
info->flags |= GPIO_V2_LINE_FLAG_OUTPUT;
else
info->flags |= GPIO_V2_LINE_FLAG_INPUT;
- if (test_bit(FLAG_ACTIVE_LOW, &dflags))
+ if (test_bit(GPIOD_FLAG_ACTIVE_LOW, &dflags))
info->flags |= GPIO_V2_LINE_FLAG_ACTIVE_LOW;
- if (test_bit(FLAG_OPEN_DRAIN, &dflags))
+ if (test_bit(GPIOD_FLAG_OPEN_DRAIN, &dflags))
info->flags |= GPIO_V2_LINE_FLAG_OPEN_DRAIN;
- if (test_bit(FLAG_OPEN_SOURCE, &dflags))
+ if (test_bit(GPIOD_FLAG_OPEN_SOURCE, &dflags))
info->flags |= GPIO_V2_LINE_FLAG_OPEN_SOURCE;
- if (test_bit(FLAG_BIAS_DISABLE, &dflags))
+ if (test_bit(GPIOD_FLAG_BIAS_DISABLE, &dflags))
info->flags |= GPIO_V2_LINE_FLAG_BIAS_DISABLED;
- if (test_bit(FLAG_PULL_DOWN, &dflags))
+ if (test_bit(GPIOD_FLAG_PULL_DOWN, &dflags))
info->flags |= GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN;
- if (test_bit(FLAG_PULL_UP, &dflags))
+ if (test_bit(GPIOD_FLAG_PULL_UP, &dflags))
info->flags |= GPIO_V2_LINE_FLAG_BIAS_PULL_UP;
- if (test_bit(FLAG_EDGE_RISING, &dflags))
+ if (test_bit(GPIOD_FLAG_EDGE_RISING, &dflags))
info->flags |= GPIO_V2_LINE_FLAG_EDGE_RISING;
- if (test_bit(FLAG_EDGE_FALLING, &dflags))
+ if (test_bit(GPIOD_FLAG_EDGE_FALLING, &dflags))
info->flags |= GPIO_V2_LINE_FLAG_EDGE_FALLING;
- if (test_bit(FLAG_EVENT_CLOCK_REALTIME, &dflags))
+ if (test_bit(GPIOD_FLAG_EVENT_CLOCK_REALTIME, &dflags))
info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME;
- else if (test_bit(FLAG_EVENT_CLOCK_HTE, &dflags))
+ else if (test_bit(GPIOD_FLAG_EVENT_CLOCK_HTE, &dflags))
info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE;
debounce_period_us = READ_ONCE(desc->debounce_period_us);
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 37ab78243fab..fad4edf9cc5c 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -878,7 +878,7 @@ static void of_gpiochip_remove_hog(struct gpio_chip *chip,
{
struct gpio_desc *desc;
- for_each_gpio_desc_with_flag(chip, desc, FLAG_IS_HOGGED)
+ for_each_gpio_desc_with_flag(chip, desc, GPIOD_FLAG_IS_HOGGED)
if (READ_ONCE(desc->hog) == hog)
gpiochip_free_own_desc(desc);
}
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
index b64106f1cb7b..9a849245b358 100644
--- a/drivers/gpio/gpiolib-sysfs.c
+++ b/drivers/gpio/gpiolib-sysfs.c
@@ -131,7 +131,7 @@ static ssize_t direction_show(struct device *dev,
scoped_guard(mutex, &data->mutex) {
gpiod_get_direction(desc);
- value = !!test_bit(FLAG_IS_OUT, &desc->flags);
+ value = !!test_bit(GPIOD_FLAG_IS_OUT, &desc->flags);
}
return sysfs_emit(buf, "%s\n", value ? "out" : "in");
@@ -226,14 +226,14 @@ static int gpio_sysfs_request_irq(struct gpiod_data *data, unsigned char flags)
irq_flags = IRQF_SHARED;
if (flags & GPIO_IRQF_TRIGGER_FALLING) {
- irq_flags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
+ irq_flags |= test_bit(GPIOD_FLAG_ACTIVE_LOW, &desc->flags) ?
IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
- set_bit(FLAG_EDGE_FALLING, &desc->flags);
+ set_bit(GPIOD_FLAG_EDGE_FALLING, &desc->flags);
}
if (flags & GPIO_IRQF_TRIGGER_RISING) {
- irq_flags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
+ irq_flags |= test_bit(GPIOD_FLAG_ACTIVE_LOW, &desc->flags) ?
IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
- set_bit(FLAG_EDGE_RISING, &desc->flags);
+ set_bit(GPIOD_FLAG_EDGE_RISING, &desc->flags);
}
/*
@@ -260,8 +260,8 @@ static int gpio_sysfs_request_irq(struct gpiod_data *data, unsigned char flags)
err_unlock:
gpiochip_unlock_as_irq(guard.gc, gpio_chip_hwgpio(desc));
err_clr_bits:
- clear_bit(FLAG_EDGE_RISING, &desc->flags);
- clear_bit(FLAG_EDGE_FALLING, &desc->flags);
+ clear_bit(GPIOD_FLAG_EDGE_RISING, &desc->flags);
+ clear_bit(GPIOD_FLAG_EDGE_FALLING, &desc->flags);
return ret;
}
@@ -281,8 +281,8 @@ static void gpio_sysfs_free_irq(struct gpiod_data *data)
data->irq_flags = 0;
free_irq(data->irq, data);
gpiochip_unlock_as_irq(guard.gc, gpio_chip_hwgpio(desc));
- clear_bit(FLAG_EDGE_RISING, &desc->flags);
- clear_bit(FLAG_EDGE_FALLING, &desc->flags);
+ clear_bit(GPIOD_FLAG_EDGE_RISING, &desc->flags);
+ clear_bit(GPIOD_FLAG_EDGE_FALLING, &desc->flags);
}
static const char *const trigger_names[] = {
@@ -347,10 +347,10 @@ static int gpio_sysfs_set_active_low(struct gpiod_data *data, int value)
struct gpio_desc *desc = data->desc;
int status = 0;
- if (!!test_bit(FLAG_ACTIVE_LOW, &desc->flags) == !!value)
+ if (!!test_bit(GPIOD_FLAG_ACTIVE_LOW, &desc->flags) == !!value)
return 0;
- assign_bit(FLAG_ACTIVE_LOW, &desc->flags, value);
+ assign_bit(GPIOD_FLAG_ACTIVE_LOW, &desc->flags, value);
/* reconfigure poll(2) support if enabled on one edge only */
if (flags == GPIO_IRQF_TRIGGER_FALLING ||
@@ -373,7 +373,7 @@ static ssize_t active_low_show(struct device *dev,
int value;
scoped_guard(mutex, &data->mutex)
- value = !!test_bit(FLAG_ACTIVE_LOW, &desc->flags);
+ value = !!test_bit(GPIOD_FLAG_ACTIVE_LOW, &desc->flags);
return sysfs_emit(buf, "%d\n", value);
}
@@ -418,7 +418,7 @@ static umode_t gpio_is_visible(struct kobject *kobj, struct attribute *attr,
mode = 0;
if (!data->direction_can_change &&
- test_bit(FLAG_IS_OUT, &data->desc->flags))
+ test_bit(GPIOD_FLAG_IS_OUT, &data->desc->flags))
mode = 0;
#endif /* CONFIG_GPIO_SYSFS_LEGACY */
}
@@ -486,7 +486,7 @@ static int export_gpio_desc(struct gpio_desc *desc)
}
/*
- * No extra locking here; FLAG_SYSFS just signifies that the
+ * No extra locking here; GPIOD_FLAG_SYSFS just signifies that the
* request and export were done by on behalf of userspace, so
* they may be undone on its behalf too.
*/
@@ -505,7 +505,7 @@ static int export_gpio_desc(struct gpio_desc *desc)
if (ret < 0) {
gpiod_free(desc);
} else {
- set_bit(FLAG_SYSFS, &desc->flags);
+ set_bit(GPIOD_FLAG_SYSFS, &desc->flags);
gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED);
}
@@ -515,11 +515,11 @@ static int export_gpio_desc(struct gpio_desc *desc)
static int unexport_gpio_desc(struct gpio_desc *desc)
{
/*
- * No extra locking here; FLAG_SYSFS just signifies that the
+ * No extra locking here; GPIOD_FLAG_SYSFS just signifies that the
* request and export were done by on behalf of userspace, so
* they may be undone on its behalf too.
*/
- if (!test_and_clear_bit(FLAG_SYSFS, &desc->flags))
+ if (!test_and_clear_bit(GPIOD_FLAG_SYSFS, &desc->flags))
return -EINVAL;
gpiod_unexport(desc);
@@ -748,14 +748,14 @@ int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
if (!guard.gc)
return -ENODEV;
- if (test_and_set_bit(FLAG_EXPORT, &desc->flags))
+ if (test_and_set_bit(GPIOD_FLAG_EXPORT, &desc->flags))
return -EPERM;
gdev = desc->gdev;
guard(mutex)(&sysfs_lock);
- if (!test_bit(FLAG_REQUESTED, &desc->flags)) {
+ if (!test_bit(GPIOD_FLAG_REQUESTED, &desc->flags)) {
gpiod_dbg(desc, "%s: unavailable (not requested)\n", __func__);
status = -EPERM;
goto err_clear_bit;
@@ -866,7 +866,7 @@ err_free_data:
#endif /* CONFIG_GPIO_SYSFS_LEGACY */
kfree(desc_data);
err_clear_bit:
- clear_bit(FLAG_EXPORT, &desc->flags);
+ clear_bit(GPIOD_FLAG_EXPORT, &desc->flags);
gpiod_dbg(desc, "%s: status %d\n", __func__, status);
return status;
}
@@ -937,7 +937,7 @@ void gpiod_unexport(struct gpio_desc *desc)
}
scoped_guard(mutex, &sysfs_lock) {
- if (!test_bit(FLAG_EXPORT, &desc->flags))
+ if (!test_bit(GPIOD_FLAG_EXPORT, &desc->flags))
return;
gdev = gpiod_to_gpio_device(desc);
@@ -956,7 +956,7 @@ void gpiod_unexport(struct gpio_desc *desc)
return;
list_del(&desc_data->list);
- clear_bit(FLAG_EXPORT, &desc->flags);
+ clear_bit(GPIOD_FLAG_EXPORT, &desc->flags);
#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY)
sysfs_put(desc_data->value_kn);
device_unregister(desc_data->dev);
@@ -1073,7 +1073,7 @@ void gpiochip_sysfs_unregister(struct gpio_device *gdev)
return;
/* unregister gpiod class devices owned by sysfs */
- for_each_gpio_desc_with_flag(chip, desc, FLAG_SYSFS) {
+ for_each_gpio_desc_with_flag(chip, desc, GPIOD_FLAG_SYSFS) {
gpiod_unexport(desc);
gpiod_free(desc);
}
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index a93d2a9355e2..9952e412da50 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -127,10 +127,10 @@ const char *gpiod_get_label(struct gpio_desc *desc)
label = srcu_dereference_check(desc->label, &desc->gdev->desc_srcu,
srcu_read_lock_held(&desc->gdev->desc_srcu));
- if (test_bit(FLAG_USED_AS_IRQ, &flags))
+ if (test_bit(GPIOD_FLAG_USED_AS_IRQ, &flags))
return label ? label->str : "interrupt";
- if (!test_bit(FLAG_REQUESTED, &flags))
+ if (!test_bit(GPIOD_FLAG_REQUESTED, &flags))
return NULL;
return label ? label->str : NULL;
@@ -450,8 +450,8 @@ int gpiod_get_direction(struct gpio_desc *desc)
* Open drain emulation using input mode may incorrectly report
* input here, fix that up.
*/
- if (test_bit(FLAG_OPEN_DRAIN, &flags) &&
- test_bit(FLAG_IS_OUT, &flags))
+ if (test_bit(GPIOD_FLAG_OPEN_DRAIN, &flags) &&
+ test_bit(GPIOD_FLAG_IS_OUT, &flags))
return 0;
if (!guard.gc->get_direction)
@@ -468,7 +468,7 @@ int gpiod_get_direction(struct gpio_desc *desc)
if (ret > 0)
ret = 1;
- assign_bit(FLAG_IS_OUT, &flags, !ret);
+ assign_bit(GPIOD_FLAG_IS_OUT, &flags, !ret);
WRITE_ONCE(desc->flags, flags);
return ret;
@@ -846,7 +846,7 @@ static void gpiochip_free_remaining_irqs(struct gpio_chip *gc)
{
struct gpio_desc *desc;
- for_each_gpio_desc_with_flag(gc, desc, FLAG_USED_AS_IRQ)
+ for_each_gpio_desc_with_flag(gc, desc, GPIOD_FLAG_USED_AS_IRQ)
gpiod_free_irqs(desc);
}
@@ -1037,11 +1037,6 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
int base = 0;
int ret;
- /* Only allow one set() and one set_multiple(). */
- if ((gc->set && gc->set_rv) ||
- (gc->set_multiple && gc->set_multiple_rv))
- return -EINVAL;
-
/*
* First: allocate and populate the internal stat container, and
* set up the struct device.
@@ -1174,10 +1169,10 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
* lock here.
*/
if (gc->get_direction && gpiochip_line_is_valid(gc, desc_index))
- assign_bit(FLAG_IS_OUT, &desc->flags,
+ assign_bit(GPIOD_FLAG_IS_OUT, &desc->flags,
!gc->get_direction(gc, desc_index));
else
- assign_bit(FLAG_IS_OUT,
+ assign_bit(GPIOD_FLAG_IS_OUT,
&desc->flags, !gc->direction_input);
}
@@ -2354,11 +2349,13 @@ int gpiochip_add_pingroup_range(struct gpio_chip *gc,
EXPORT_SYMBOL_GPL(gpiochip_add_pingroup_range);
/**
- * gpiochip_add_pin_range() - add a range for GPIO <-> pin mapping
+ * gpiochip_add_pin_range_with_pins() - add a range for GPIO <-> pin mapping
* @gc: the gpiochip to add the range for
* @pinctl_name: the dev_name() of the pin controller to map to
* @gpio_offset: the start offset in the current gpio_chip number space
* @pin_offset: the start offset in the pin controller number space
+ * @pins: the list of non consecutive pins to accumulate in this range (if not
+ * NULL, pin_offset is ignored by pinctrl core)
* @npins: the number of pins from the offset of each pin space (GPIO and
* pin controller) to accumulate in this range
*
@@ -2370,9 +2367,12 @@ EXPORT_SYMBOL_GPL(gpiochip_add_pingroup_range);
* Returns:
* 0 on success, or a negative errno on failure.
*/
-int gpiochip_add_pin_range(struct gpio_chip *gc, const char *pinctl_name,
- unsigned int gpio_offset, unsigned int pin_offset,
- unsigned int npins)
+int gpiochip_add_pin_range_with_pins(struct gpio_chip *gc,
+ const char *pinctl_name,
+ unsigned int gpio_offset,
+ unsigned int pin_offset,
+ unsigned int const *pins,
+ unsigned int npins)
{
struct gpio_pin_range *pin_range;
struct gpio_device *gdev = gc->gpiodev;
@@ -2390,6 +2390,7 @@ int gpiochip_add_pin_range(struct gpio_chip *gc, const char *pinctl_name,
pin_range->range.name = gc->label;
pin_range->range.base = gdev->base + gpio_offset;
pin_range->range.pin_base = pin_offset;
+ pin_range->range.pins = pins;
pin_range->range.npins = npins;
pin_range->pctldev = pinctrl_find_and_add_gpio_range(pinctl_name,
&pin_range->range);
@@ -2399,16 +2400,21 @@ int gpiochip_add_pin_range(struct gpio_chip *gc, const char *pinctl_name,
kfree(pin_range);
return ret;
}
- chip_dbg(gc, "created GPIO range %d->%d ==> %s PIN %d->%d\n",
- gpio_offset, gpio_offset + npins - 1,
- pinctl_name,
- pin_offset, pin_offset + npins - 1);
+ if (pin_range->range.pins)
+ chip_dbg(gc, "created GPIO range %d->%d ==> %s %d sparse PIN range { %d, ... }",
+ gpio_offset, gpio_offset + npins - 1,
+ pinctl_name, npins, pins[0]);
+ else
+ chip_dbg(gc, "created GPIO range %d->%d ==> %s PIN %d->%d\n",
+ gpio_offset, gpio_offset + npins - 1,
+ pinctl_name,
+ pin_offset, pin_offset + npins - 1);
list_add_tail(&pin_range->node, &gdev->pin_ranges);
return 0;
}
-EXPORT_SYMBOL_GPL(gpiochip_add_pin_range);
+EXPORT_SYMBOL_GPL(gpiochip_add_pin_range_with_pins);
/**
* gpiochip_remove_pin_ranges() - remove all the GPIO <-> pin mappings
@@ -2443,7 +2449,7 @@ static int gpiod_request_commit(struct gpio_desc *desc, const char *label)
if (!guard.gc)
return -ENODEV;
- if (test_and_set_bit(FLAG_REQUESTED, &desc->flags))
+ if (test_and_set_bit(GPIOD_FLAG_REQUESTED, &desc->flags))
return -EBUSY;
offset = gpio_chip_hwgpio(desc);
@@ -2472,7 +2478,7 @@ static int gpiod_request_commit(struct gpio_desc *desc, const char *label)
return 0;
out_clear_bit:
- clear_bit(FLAG_REQUESTED, &desc->flags);
+ clear_bit(GPIOD_FLAG_REQUESTED, &desc->flags);
return ret;
}
@@ -2506,20 +2512,20 @@ static void gpiod_free_commit(struct gpio_desc *desc)
flags = READ_ONCE(desc->flags);
- if (guard.gc && test_bit(FLAG_REQUESTED, &flags)) {
+ if (guard.gc && test_bit(GPIOD_FLAG_REQUESTED, &flags)) {
if (guard.gc->free)
guard.gc->free(guard.gc, gpio_chip_hwgpio(desc));
- clear_bit(FLAG_ACTIVE_LOW, &flags);
- clear_bit(FLAG_REQUESTED, &flags);
- clear_bit(FLAG_OPEN_DRAIN, &flags);
- clear_bit(FLAG_OPEN_SOURCE, &flags);
- clear_bit(FLAG_PULL_UP, &flags);
- clear_bit(FLAG_PULL_DOWN, &flags);
- clear_bit(FLAG_BIAS_DISABLE, &flags);
- clear_bit(FLAG_EDGE_RISING, &flags);
- clear_bit(FLAG_EDGE_FALLING, &flags);
- clear_bit(FLAG_IS_HOGGED, &flags);
+ clear_bit(GPIOD_FLAG_ACTIVE_LOW, &flags);
+ clear_bit(GPIOD_FLAG_REQUESTED, &flags);
+ clear_bit(GPIOD_FLAG_OPEN_DRAIN, &flags);
+ clear_bit(GPIOD_FLAG_OPEN_SOURCE, &flags);
+ clear_bit(GPIOD_FLAG_PULL_UP, &flags);
+ clear_bit(GPIOD_FLAG_PULL_DOWN, &flags);
+ clear_bit(GPIOD_FLAG_BIAS_DISABLE, &flags);
+ clear_bit(GPIOD_FLAG_EDGE_RISING, &flags);
+ clear_bit(GPIOD_FLAG_EDGE_FALLING, &flags);
+ clear_bit(GPIOD_FLAG_IS_HOGGED, &flags);
#ifdef CONFIG_OF_DYNAMIC
WRITE_ONCE(desc->hog, NULL);
#endif
@@ -2562,7 +2568,7 @@ char *gpiochip_dup_line_label(struct gpio_chip *gc, unsigned int offset)
if (IS_ERR(desc))
return NULL;
- if (!test_bit(FLAG_REQUESTED, &desc->flags))
+ if (!test_bit(GPIOD_FLAG_REQUESTED, &desc->flags))
return NULL;
guard(srcu)(&desc->gdev->desc_srcu);
@@ -2730,11 +2736,11 @@ static int gpio_set_bias(struct gpio_desc *desc)
flags = READ_ONCE(desc->flags);
- if (test_bit(FLAG_BIAS_DISABLE, &flags))
+ if (test_bit(GPIOD_FLAG_BIAS_DISABLE, &flags))
bias = PIN_CONFIG_BIAS_DISABLE;
- else if (test_bit(FLAG_PULL_UP, &flags))
+ else if (test_bit(GPIOD_FLAG_PULL_UP, &flags))
bias = PIN_CONFIG_BIAS_PULL_UP;
- else if (test_bit(FLAG_PULL_DOWN, &flags))
+ else if (test_bit(GPIOD_FLAG_PULL_DOWN, &flags))
bias = PIN_CONFIG_BIAS_PULL_DOWN;
else
return 0;
@@ -2876,7 +2882,7 @@ int gpiod_direction_input_nonotify(struct gpio_desc *desc)
}
}
if (ret == 0) {
- clear_bit(FLAG_IS_OUT, &desc->flags);
+ clear_bit(GPIOD_FLAG_IS_OUT, &desc->flags);
ret = gpio_set_bias(desc);
}
@@ -2891,19 +2897,14 @@ static int gpiochip_set(struct gpio_chip *gc, unsigned int offset, int value)
lockdep_assert_held(&gc->gpiodev->srcu);
- if (WARN_ON(unlikely(!gc->set && !gc->set_rv)))
+ if (WARN_ON(unlikely(!gc->set)))
return -EOPNOTSUPP;
- if (gc->set_rv) {
- ret = gc->set_rv(gc, offset, value);
- if (ret > 0)
- ret = -EBADE;
-
- return ret;
- }
+ ret = gc->set(gc, offset, value);
+ if (ret > 0)
+ ret = -EBADE;
- gc->set(gc, offset, value);
- return 0;
+ return ret;
}
static int gpiod_direction_output_raw_commit(struct gpio_desc *desc, int value)
@@ -2919,7 +2920,7 @@ static int gpiod_direction_output_raw_commit(struct gpio_desc *desc, int value)
* output-only, but if there is then not even a .set() operation it
* is pretty tricky to drive the output line.
*/
- if (!guard.gc->set && !guard.gc->set_rv && !guard.gc->direction_output) {
+ if (!guard.gc->set && !guard.gc->direction_output) {
gpiod_warn(desc,
"%s: missing set() and direction_output() operations\n",
__func__);
@@ -2954,7 +2955,7 @@ static int gpiod_direction_output_raw_commit(struct gpio_desc *desc, int value)
}
if (!ret)
- set_bit(FLAG_IS_OUT, &desc->flags);
+ set_bit(GPIOD_FLAG_IS_OUT, &desc->flags);
trace_gpio_value(desc_to_gpio(desc), 0, val);
trace_gpio_direction(desc_to_gpio(desc), 0, ret);
return ret;
@@ -3020,21 +3021,21 @@ int gpiod_direction_output_nonotify(struct gpio_desc *desc, int value)
flags = READ_ONCE(desc->flags);
- if (test_bit(FLAG_ACTIVE_LOW, &flags))
+ if (test_bit(GPIOD_FLAG_ACTIVE_LOW, &flags))
value = !value;
else
value = !!value;
/* GPIOs used for enabled IRQs shall not be set as output */
- if (test_bit(FLAG_USED_AS_IRQ, &flags) &&
- test_bit(FLAG_IRQ_IS_ENABLED, &flags)) {
+ if (test_bit(GPIOD_FLAG_USED_AS_IRQ, &flags) &&
+ test_bit(GPIOD_FLAG_IRQ_IS_ENABLED, &flags)) {
gpiod_err(desc,
"%s: tried to set a GPIO tied to an IRQ as output\n",
__func__);
return -EIO;
}
- if (test_bit(FLAG_OPEN_DRAIN, &flags)) {
+ if (test_bit(GPIOD_FLAG_OPEN_DRAIN, &flags)) {
/* First see if we can enable open drain in hardware */
ret = gpio_set_config(desc, PIN_CONFIG_DRIVE_OPEN_DRAIN);
if (!ret)
@@ -3042,7 +3043,7 @@ int gpiod_direction_output_nonotify(struct gpio_desc *desc, int value)
/* Emulate open drain by not actively driving the line high */
if (value)
goto set_output_flag;
- } else if (test_bit(FLAG_OPEN_SOURCE, &flags)) {
+ } else if (test_bit(GPIOD_FLAG_OPEN_SOURCE, &flags)) {
ret = gpio_set_config(desc, PIN_CONFIG_DRIVE_OPEN_SOURCE);
if (!ret)
goto set_output_value;
@@ -3069,7 +3070,7 @@ set_output_flag:
* set the IS_OUT flag or otherwise we won't be able to set the line
* value anymore.
*/
- set_bit(FLAG_IS_OUT, &desc->flags);
+ set_bit(GPIOD_FLAG_IS_OUT, &desc->flags);
return 0;
}
@@ -3209,10 +3210,10 @@ int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
{
VALIDATE_DESC(desc);
/*
- * Handle FLAG_TRANSITORY first, enabling queries to gpiolib for
+ * Handle GPIOD_FLAG_TRANSITORY first, enabling queries to gpiolib for
* persistence state.
*/
- assign_bit(FLAG_TRANSITORY, &desc->flags, transitory);
+ assign_bit(GPIOD_FLAG_TRANSITORY, &desc->flags, transitory);
/* If the driver supports it, set the persistence state now */
return gpio_set_config_with_argument_optional(desc,
@@ -3230,7 +3231,7 @@ int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
int gpiod_is_active_low(const struct gpio_desc *desc)
{
VALIDATE_DESC(desc);
- return test_bit(FLAG_ACTIVE_LOW, &desc->flags);
+ return test_bit(GPIOD_FLAG_ACTIVE_LOW, &desc->flags);
}
EXPORT_SYMBOL_GPL(gpiod_is_active_low);
@@ -3241,7 +3242,7 @@ EXPORT_SYMBOL_GPL(gpiod_is_active_low);
void gpiod_toggle_active_low(struct gpio_desc *desc)
{
VALIDATE_DESC_VOID(desc);
- change_bit(FLAG_ACTIVE_LOW, &desc->flags);
+ change_bit(GPIOD_FLAG_ACTIVE_LOW, &desc->flags);
gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_CONFIG);
}
EXPORT_SYMBOL_GPL(gpiod_toggle_active_low);
@@ -3447,7 +3448,7 @@ int gpiod_get_array_value_complex(bool raw, bool can_sleep,
int hwgpio = gpio_chip_hwgpio(desc);
int value = test_bit(hwgpio, bits);
- if (!raw && test_bit(FLAG_ACTIVE_LOW, &desc->flags))
+ if (!raw && test_bit(GPIOD_FLAG_ACTIVE_LOW, &desc->flags))
value = !value;
__assign_bit(j, value_bitmap, value);
trace_gpio_value(desc_to_gpio(desc), 1, value);
@@ -3509,7 +3510,7 @@ int gpiod_get_value(const struct gpio_desc *desc)
if (value < 0)
return value;
- if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
+ if (test_bit(GPIOD_FLAG_ACTIVE_LOW, &desc->flags))
value = !value;
return value;
@@ -3592,7 +3593,7 @@ static int gpio_set_open_drain_value_commit(struct gpio_desc *desc, bool value)
} else {
ret = gpiochip_direction_output(guard.gc, offset, 0);
if (!ret)
- set_bit(FLAG_IS_OUT, &desc->flags);
+ set_bit(GPIOD_FLAG_IS_OUT, &desc->flags);
}
trace_gpio_direction(desc_to_gpio(desc), value, ret);
if (ret < 0)
@@ -3619,7 +3620,7 @@ static int gpio_set_open_source_value_commit(struct gpio_desc *desc, bool value)
if (value) {
ret = gpiochip_direction_output(guard.gc, offset, 1);
if (!ret)
- set_bit(FLAG_IS_OUT, &desc->flags);
+ set_bit(GPIOD_FLAG_IS_OUT, &desc->flags);
} else {
ret = gpiochip_direction_input(guard.gc, offset);
}
@@ -3634,7 +3635,7 @@ static int gpio_set_open_source_value_commit(struct gpio_desc *desc, bool value)
static int gpiod_set_raw_value_commit(struct gpio_desc *desc, bool value)
{
- if (unlikely(!test_bit(FLAG_IS_OUT, &desc->flags)))
+ if (unlikely(!test_bit(GPIOD_FLAG_IS_OUT, &desc->flags)))
return -EPERM;
CLASS(gpio_chip_guard, guard)(desc);
@@ -3665,19 +3666,14 @@ static int gpiochip_set_multiple(struct gpio_chip *gc,
lockdep_assert_held(&gc->gpiodev->srcu);
- if (gc->set_multiple_rv) {
- ret = gc->set_multiple_rv(gc, mask, bits);
+ if (gc->set_multiple) {
+ ret = gc->set_multiple(gc, mask, bits);
if (ret > 0)
ret = -EBADE;
return ret;
}
- if (gc->set_multiple) {
- gc->set_multiple(gc, mask, bits);
- return 0;
- }
-
/* set outputs if the corresponding mask bit is set */
for_each_set_bit(i, mask, gc->ngpio) {
ret = gpiochip_set(gc, i, test_bit(i, bits));
@@ -3709,7 +3705,7 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
WARN_ON(array_info->gdev->can_sleep);
for (i = 0; i < array_size; i++) {
- if (unlikely(!test_bit(FLAG_IS_OUT,
+ if (unlikely(!test_bit(GPIOD_FLAG_IS_OUT,
&desc_array[i]->flags)))
return -EPERM;
}
@@ -3773,7 +3769,7 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
int hwgpio = gpio_chip_hwgpio(desc);
int value = test_bit(i, value_bitmap);
- if (unlikely(!test_bit(FLAG_IS_OUT, &desc->flags)))
+ if (unlikely(!test_bit(GPIOD_FLAG_IS_OUT, &desc->flags)))
return -EPERM;
/*
@@ -3783,16 +3779,16 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
*/
if (!raw && !(array_info &&
test_bit(i, array_info->invert_mask)) &&
- test_bit(FLAG_ACTIVE_LOW, &desc->flags))
+ test_bit(GPIOD_FLAG_ACTIVE_LOW, &desc->flags))
value = !value;
trace_gpio_value(desc_to_gpio(desc), 0, value);
/*
* collect all normal outputs belonging to the same chip
* open drain and open source outputs are set individually
*/
- if (test_bit(FLAG_OPEN_DRAIN, &desc->flags) && !raw) {
+ if (test_bit(GPIOD_FLAG_OPEN_DRAIN, &desc->flags) && !raw) {
gpio_set_open_drain_value_commit(desc, value);
- } else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags) && !raw) {
+ } else if (test_bit(GPIOD_FLAG_OPEN_SOURCE, &desc->flags) && !raw) {
gpio_set_open_source_value_commit(desc, value);
} else {
__set_bit(hwgpio, mask);
@@ -3858,12 +3854,12 @@ EXPORT_SYMBOL_GPL(gpiod_set_raw_value);
*/
static int gpiod_set_value_nocheck(struct gpio_desc *desc, int value)
{
- if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
+ if (test_bit(GPIOD_FLAG_ACTIVE_LOW, &desc->flags))
value = !value;
- if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
+ if (test_bit(GPIOD_FLAG_OPEN_DRAIN, &desc->flags))
return gpio_set_open_drain_value_commit(desc, value);
- else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
+ else if (test_bit(GPIOD_FLAG_OPEN_SOURCE, &desc->flags))
return gpio_set_open_source_value_commit(desc, value);
return gpiod_set_raw_value_commit(desc, value);
@@ -4067,16 +4063,16 @@ int gpiochip_lock_as_irq(struct gpio_chip *gc, unsigned int offset)
}
/* To be valid for IRQ the line needs to be input or open drain */
- if (test_bit(FLAG_IS_OUT, &desc->flags) &&
- !test_bit(FLAG_OPEN_DRAIN, &desc->flags)) {
+ if (test_bit(GPIOD_FLAG_IS_OUT, &desc->flags) &&
+ !test_bit(GPIOD_FLAG_OPEN_DRAIN, &desc->flags)) {
chip_err(gc,
"%s: tried to flag a GPIO set as output for IRQ\n",
__func__);
return -EIO;
}
- set_bit(FLAG_USED_AS_IRQ, &desc->flags);
- set_bit(FLAG_IRQ_IS_ENABLED, &desc->flags);
+ set_bit(GPIOD_FLAG_USED_AS_IRQ, &desc->flags);
+ set_bit(GPIOD_FLAG_IRQ_IS_ENABLED, &desc->flags);
return 0;
}
@@ -4098,8 +4094,8 @@ void gpiochip_unlock_as_irq(struct gpio_chip *gc, unsigned int offset)
if (IS_ERR(desc))
return;
- clear_bit(FLAG_USED_AS_IRQ, &desc->flags);
- clear_bit(FLAG_IRQ_IS_ENABLED, &desc->flags);
+ clear_bit(GPIOD_FLAG_USED_AS_IRQ, &desc->flags);
+ clear_bit(GPIOD_FLAG_IRQ_IS_ENABLED, &desc->flags);
}
EXPORT_SYMBOL_GPL(gpiochip_unlock_as_irq);
@@ -4108,8 +4104,8 @@ void gpiochip_disable_irq(struct gpio_chip *gc, unsigned int offset)
struct gpio_desc *desc = gpiochip_get_desc(gc, offset);
if (!IS_ERR(desc) &&
- !WARN_ON(!test_bit(FLAG_USED_AS_IRQ, &desc->flags)))
- clear_bit(FLAG_IRQ_IS_ENABLED, &desc->flags);
+ !WARN_ON(!test_bit(GPIOD_FLAG_USED_AS_IRQ, &desc->flags)))
+ clear_bit(GPIOD_FLAG_IRQ_IS_ENABLED, &desc->flags);
}
EXPORT_SYMBOL_GPL(gpiochip_disable_irq);
@@ -4118,14 +4114,14 @@ void gpiochip_enable_irq(struct gpio_chip *gc, unsigned int offset)
struct gpio_desc *desc = gpiochip_get_desc(gc, offset);
if (!IS_ERR(desc) &&
- !WARN_ON(!test_bit(FLAG_USED_AS_IRQ, &desc->flags))) {
+ !WARN_ON(!test_bit(GPIOD_FLAG_USED_AS_IRQ, &desc->flags))) {
/*
* We must not be output when using IRQ UNLESS we are
* open drain.
*/
- WARN_ON(test_bit(FLAG_IS_OUT, &desc->flags) &&
- !test_bit(FLAG_OPEN_DRAIN, &desc->flags));
- set_bit(FLAG_IRQ_IS_ENABLED, &desc->flags);
+ WARN_ON(test_bit(GPIOD_FLAG_IS_OUT, &desc->flags) &&
+ !test_bit(GPIOD_FLAG_OPEN_DRAIN, &desc->flags));
+ set_bit(GPIOD_FLAG_IRQ_IS_ENABLED, &desc->flags);
}
}
EXPORT_SYMBOL_GPL(gpiochip_enable_irq);
@@ -4135,7 +4131,7 @@ bool gpiochip_line_is_irq(struct gpio_chip *gc, unsigned int offset)
if (offset >= gc->ngpio)
return false;
- return test_bit(FLAG_USED_AS_IRQ, &gc->gpiodev->descs[offset].flags);
+ return test_bit(GPIOD_FLAG_USED_AS_IRQ, &gc->gpiodev->descs[offset].flags);
}
EXPORT_SYMBOL_GPL(gpiochip_line_is_irq);
@@ -4168,7 +4164,7 @@ bool gpiochip_line_is_open_drain(struct gpio_chip *gc, unsigned int offset)
if (offset >= gc->ngpio)
return false;
- return test_bit(FLAG_OPEN_DRAIN, &gc->gpiodev->descs[offset].flags);
+ return test_bit(GPIOD_FLAG_OPEN_DRAIN, &gc->gpiodev->descs[offset].flags);
}
EXPORT_SYMBOL_GPL(gpiochip_line_is_open_drain);
@@ -4177,7 +4173,7 @@ bool gpiochip_line_is_open_source(struct gpio_chip *gc, unsigned int offset)
if (offset >= gc->ngpio)
return false;
- return test_bit(FLAG_OPEN_SOURCE, &gc->gpiodev->descs[offset].flags);
+ return test_bit(GPIOD_FLAG_OPEN_SOURCE, &gc->gpiodev->descs[offset].flags);
}
EXPORT_SYMBOL_GPL(gpiochip_line_is_open_source);
@@ -4186,7 +4182,7 @@ bool gpiochip_line_is_persistent(struct gpio_chip *gc, unsigned int offset)
if (offset >= gc->ngpio)
return false;
- return !test_bit(FLAG_TRANSITORY, &gc->gpiodev->descs[offset].flags);
+ return !test_bit(GPIOD_FLAG_TRANSITORY, &gc->gpiodev->descs[offset].flags);
}
EXPORT_SYMBOL_GPL(gpiochip_line_is_persistent);
@@ -4228,7 +4224,7 @@ int gpiod_get_value_cansleep(const struct gpio_desc *desc)
if (value < 0)
return value;
- if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
+ if (test_bit(GPIOD_FLAG_ACTIVE_LOW, &desc->flags))
value = !value;
return value;
@@ -4619,6 +4615,23 @@ static struct gpio_desc *gpiod_find_by_fwnode(struct fwnode_handle *fwnode,
return desc;
}
+static struct gpio_desc *gpiod_fwnode_lookup(struct fwnode_handle *fwnode,
+ struct device *consumer,
+ const char *con_id,
+ unsigned int idx,
+ enum gpiod_flags *flags,
+ unsigned long *lookupflags)
+{
+ struct gpio_desc *desc;
+
+ desc = gpiod_find_by_fwnode(fwnode, consumer, con_id, idx, flags, lookupflags);
+ if (gpiod_not_found(desc) && !IS_ERR_OR_NULL(fwnode))
+ desc = gpiod_find_by_fwnode(fwnode->secondary, consumer, con_id,
+ idx, flags, lookupflags);
+
+ return desc;
+}
+
struct gpio_desc *gpiod_find_and_request(struct device *consumer,
struct fwnode_handle *fwnode,
const char *con_id,
@@ -4637,8 +4650,8 @@ struct gpio_desc *gpiod_find_and_request(struct device *consumer,
int ret = 0;
scoped_guard(srcu, &gpio_devices_srcu) {
- desc = gpiod_find_by_fwnode(fwnode, consumer, con_id, idx,
- &flags, &lookupflags);
+ desc = gpiod_fwnode_lookup(fwnode, consumer, con_id, idx,
+ &flags, &lookupflags);
if (gpiod_not_found(desc) && platform_lookup_allowed) {
/*
* Either we are not using DT or ACPI, or their lookup
@@ -4810,10 +4823,10 @@ int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
int ret;
if (lflags & GPIO_ACTIVE_LOW)
- set_bit(FLAG_ACTIVE_LOW, &desc->flags);
+ set_bit(GPIOD_FLAG_ACTIVE_LOW, &desc->flags);
if (lflags & GPIO_OPEN_DRAIN)
- set_bit(FLAG_OPEN_DRAIN, &desc->flags);
+ set_bit(GPIOD_FLAG_OPEN_DRAIN, &desc->flags);
else if (dflags & GPIOD_FLAGS_BIT_OPEN_DRAIN) {
/*
* This enforces open drain mode from the consumer side.
@@ -4821,13 +4834,13 @@ int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
* should *REALLY* have specified them as open drain in the
* first place, so print a little warning here.
*/
- set_bit(FLAG_OPEN_DRAIN, &desc->flags);
+ set_bit(GPIOD_FLAG_OPEN_DRAIN, &desc->flags);
gpiod_warn(desc,
"enforced open drain please flag it properly in DT/ACPI DSDT/board file\n");
}
if (lflags & GPIO_OPEN_SOURCE)
- set_bit(FLAG_OPEN_SOURCE, &desc->flags);
+ set_bit(GPIOD_FLAG_OPEN_SOURCE, &desc->flags);
if (((lflags & GPIO_PULL_UP) && (lflags & GPIO_PULL_DOWN)) ||
((lflags & GPIO_PULL_UP) && (lflags & GPIO_PULL_DISABLE)) ||
@@ -4838,11 +4851,11 @@ int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
}
if (lflags & GPIO_PULL_UP)
- set_bit(FLAG_PULL_UP, &desc->flags);
+ set_bit(GPIOD_FLAG_PULL_UP, &desc->flags);
else if (lflags & GPIO_PULL_DOWN)
- set_bit(FLAG_PULL_DOWN, &desc->flags);
+ set_bit(GPIOD_FLAG_PULL_DOWN, &desc->flags);
else if (lflags & GPIO_PULL_DISABLE)
- set_bit(FLAG_BIAS_DISABLE, &desc->flags);
+ set_bit(GPIOD_FLAG_BIAS_DISABLE, &desc->flags);
ret = gpiod_set_transitory(desc, (lflags & GPIO_TRANSITORY));
if (ret < 0)
@@ -4947,7 +4960,7 @@ int gpiod_hog(struct gpio_desc *desc, const char *name,
if (!guard.gc)
return -ENODEV;
- if (test_and_set_bit(FLAG_IS_HOGGED, &desc->flags))
+ if (test_and_set_bit(GPIOD_FLAG_IS_HOGGED, &desc->flags))
return 0;
hwnum = gpio_chip_hwgpio(desc);
@@ -4955,7 +4968,7 @@ int gpiod_hog(struct gpio_desc *desc, const char *name,
local_desc = gpiochip_request_own_desc(guard.gc, hwnum, name,
lflags, dflags);
if (IS_ERR(local_desc)) {
- clear_bit(FLAG_IS_HOGGED, &desc->flags);
+ clear_bit(GPIOD_FLAG_IS_HOGGED, &desc->flags);
ret = PTR_ERR(local_desc);
pr_err("requesting hog GPIO %s (chip %s, offset %d) failed, %d\n",
name, gdev->label, hwnum, ret);
@@ -4978,7 +4991,7 @@ static void gpiochip_free_hogs(struct gpio_chip *gc)
{
struct gpio_desc *desc;
- for_each_gpio_desc_with_flag(gc, desc, FLAG_IS_HOGGED)
+ for_each_gpio_desc_with_flag(gc, desc, GPIOD_FLAG_IS_HOGGED)
gpiochip_free_own_desc(desc);
}
@@ -5093,8 +5106,8 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
} else {
dflags = READ_ONCE(desc->flags);
/* Exclude open drain or open source from fast output */
- if (test_bit(FLAG_OPEN_DRAIN, &dflags) ||
- test_bit(FLAG_OPEN_SOURCE, &dflags))
+ if (test_bit(GPIOD_FLAG_OPEN_DRAIN, &dflags) ||
+ test_bit(GPIOD_FLAG_OPEN_SOURCE, &dflags))
__clear_bit(descs->ndescs,
array_info->set_mask);
/* Identify 'fast' pins which require invertion */
@@ -5252,12 +5265,12 @@ static void gpiolib_dbg_show(struct seq_file *s, struct gpio_device *gdev)
for_each_gpio_desc(gc, desc) {
guard(srcu)(&desc->gdev->desc_srcu);
flags = READ_ONCE(desc->flags);
- is_irq = test_bit(FLAG_USED_AS_IRQ, &flags);
- if (is_irq || test_bit(FLAG_REQUESTED, &flags)) {
+ is_irq = test_bit(GPIOD_FLAG_USED_AS_IRQ, &flags);
+ if (is_irq || test_bit(GPIOD_FLAG_REQUESTED, &flags)) {
gpiod_get_direction(desc);
- is_out = test_bit(FLAG_IS_OUT, &flags);
+ is_out = test_bit(GPIOD_FLAG_IS_OUT, &flags);
value = gpio_chip_get_value(gc, desc);
- active_low = test_bit(FLAG_ACTIVE_LOW, &flags);
+ active_low = test_bit(GPIOD_FLAG_ACTIVE_LOW, &flags);
seq_printf(s, " gpio-%-3u (%-20.20s|%-20.20s) %s %s %s%s\n",
gpio, desc->name ?: "", gpiod_get_label(desc),
is_out ? "out" : "in ",
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index 9b74738a9ca5..2a003a7311e7 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -186,24 +186,24 @@ struct gpio_desc {
struct gpio_device *gdev;
unsigned long flags;
/* flag symbols are bit numbers */
-#define FLAG_REQUESTED 0
-#define FLAG_IS_OUT 1
-#define FLAG_EXPORT 2 /* protected by sysfs_lock */
-#define FLAG_SYSFS 3 /* exported via /sys/class/gpio/control */
-#define FLAG_ACTIVE_LOW 6 /* value has active low */
-#define FLAG_OPEN_DRAIN 7 /* Gpio is open drain type */
-#define FLAG_OPEN_SOURCE 8 /* Gpio is open source type */
-#define FLAG_USED_AS_IRQ 9 /* GPIO is connected to an IRQ */
-#define FLAG_IRQ_IS_ENABLED 10 /* GPIO is connected to an enabled IRQ */
-#define FLAG_IS_HOGGED 11 /* GPIO is hogged */
-#define FLAG_TRANSITORY 12 /* GPIO may lose value in sleep or reset */
-#define FLAG_PULL_UP 13 /* GPIO has pull up enabled */
-#define FLAG_PULL_DOWN 14 /* GPIO has pull down enabled */
-#define FLAG_BIAS_DISABLE 15 /* GPIO has pull disabled */
-#define FLAG_EDGE_RISING 16 /* GPIO CDEV detects rising edge events */
-#define FLAG_EDGE_FALLING 17 /* GPIO CDEV detects falling edge events */
-#define FLAG_EVENT_CLOCK_REALTIME 18 /* GPIO CDEV reports REALTIME timestamps in events */
-#define FLAG_EVENT_CLOCK_HTE 19 /* GPIO CDEV reports hardware timestamps in events */
+#define GPIOD_FLAG_REQUESTED 0 /* GPIO is in use */
+#define GPIOD_FLAG_IS_OUT 1 /* GPIO is in output mode */
+#define GPIOD_FLAG_EXPORT 2 /* GPIO is exported to user-space */
+#define GPIOD_FLAG_SYSFS 3 /* GPIO is exported via /sys/class/gpio */
+#define GPIOD_FLAG_ACTIVE_LOW 6 /* GPIO is active-low */
+#define GPIOD_FLAG_OPEN_DRAIN 7 /* GPIO is open drain type */
+#define GPIOD_FLAG_OPEN_SOURCE 8 /* GPIO is open source type */
+#define GPIOD_FLAG_USED_AS_IRQ 9 /* GPIO is connected to an IRQ */
+#define GPIOD_FLAG_IRQ_IS_ENABLED 10 /* GPIO is connected to an enabled IRQ */
+#define GPIOD_FLAG_IS_HOGGED 11 /* GPIO is hogged */
+#define GPIOD_FLAG_TRANSITORY 12 /* GPIO may lose value in sleep or reset */
+#define GPIOD_FLAG_PULL_UP 13 /* GPIO has pull up enabled */
+#define GPIOD_FLAG_PULL_DOWN 14 /* GPIO has pull down enabled */
+#define GPIOD_FLAG_BIAS_DISABLE 15 /* GPIO has pull disabled */
+#define GPIOD_FLAG_EDGE_RISING 16 /* GPIO CDEV detects rising edge events */
+#define GPIOD_FLAG_EDGE_FALLING 17 /* GPIO CDEV detects falling edge events */
+#define GPIOD_FLAG_EVENT_CLOCK_REALTIME 18 /* GPIO CDEV reports REALTIME timestamps in events */
+#define GPIOD_FLAG_EVENT_CLOCK_HTE 19 /* GPIO CDEV reports hardware timestamps in events */
/* Connection label */
struct gpio_desc_label __rcu *label;
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index f7ea8e895c0c..7e6bc0b3a589 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -396,9 +396,11 @@ source "drivers/gpu/drm/sprd/Kconfig"
source "drivers/gpu/drm/imagination/Kconfig"
+source "drivers/gpu/drm/tyr/Kconfig"
+
config DRM_HYPERV
tristate "DRM Support for Hyper-V synthetic video device"
- depends on DRM && PCI && HYPERV
+ depends on DRM && PCI && HYPERV_VMBUS
select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select DRM_GEM_SHMEM_HELPER
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 4dafbdc8f86a..4b2f7d794275 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -220,6 +220,7 @@ obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo/
obj-$(CONFIG_DRM_LIMA) += lima/
obj-$(CONFIG_DRM_PANFROST) += panfrost/
obj-$(CONFIG_DRM_PANTHOR) += panthor/
+obj-$(CONFIG_DRM_TYR) += tyr/
obj-$(CONFIG_DRM_ASPEED_GFX) += aspeed/
obj-$(CONFIG_DRM_MCDE) += mcde/
obj-$(CONFIG_DRM_TIDSS) += tidss/
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 930de203d533..64e7acff8f18 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -84,7 +84,8 @@ amdgpu-y += \
vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o arct_reg_init.o mxgpu_nv.o \
nbio_v7_2.o hdp_v4_0.o hdp_v5_0.o aldebaran_reg_init.o aldebaran.o soc21.o soc24.o \
sienna_cichlid.o smu_v13_0_10.o nbio_v4_3.o hdp_v6_0.o nbio_v7_7.o hdp_v5_2.o lsdma_v6_0.o \
- nbio_v7_9.o aqua_vanjaram.o nbio_v7_11.o lsdma_v7_0.o hdp_v7_0.o nbif_v6_3_1.o
+ nbio_v7_9.o aqua_vanjaram.o nbio_v7_11.o lsdma_v7_0.o hdp_v7_0.o nbif_v6_3_1.o \
+ cyan_skillfish_reg_init.o
# add DF block
amdgpu-y += \
@@ -137,7 +138,6 @@ amdgpu-y += \
# add DCE block
amdgpu-y += \
dce_v10_0.o \
- dce_v11_0.o \
amdgpu_vkms.o
# add GFX block
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index ef3af170dda4..2a0df4cabb99 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -63,6 +63,7 @@
#include "kgd_pp_interface.h"
#include "amd_shared.h"
+#include "amdgpu_utils.h"
#include "amdgpu_mode.h"
#include "amdgpu_ih.h"
#include "amdgpu_irq.h"
@@ -434,7 +435,6 @@ struct amdgpu_clock {
uint32_t default_mclk;
uint32_t default_sclk;
uint32_t default_dispclk;
- uint32_t current_dispclk;
uint32_t dp_extclk;
uint32_t max_pixel_clock;
};
@@ -545,7 +545,7 @@ struct amdgpu_wb {
* this value can be accessed directly by using the offset as an index.
* For the GPU address, it is necessary to use gpu_addr and the offset.
*/
- volatile uint32_t *wb;
+ uint32_t *wb;
/**
* @gpu_addr:
@@ -721,7 +721,7 @@ int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
/* VRAM scratch page for HDP bug, default vram page */
struct amdgpu_mem_scratch {
struct amdgpu_bo *robj;
- volatile uint32_t *ptr;
+ uint32_t *ptr;
u64 gpu_addr;
};
@@ -752,6 +752,7 @@ typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, u
struct amdgpu_mmio_remap {
u32 reg_offset;
resource_size_t bus_addr;
+ struct amdgpu_bo *bo;
};
/* Define the HW IP blocks will be used in driver , add more if necessary */
@@ -819,6 +820,20 @@ struct amdgpu_ip_map_info {
uint32_t mask);
};
+enum amdgpu_uid_type {
+ AMDGPU_UID_TYPE_XCD,
+ AMDGPU_UID_TYPE_AID,
+ AMDGPU_UID_TYPE_SOC,
+ AMDGPU_UID_TYPE_MAX
+};
+
+#define AMDGPU_UID_INST_MAX 8 /* max number of instances for each UID type */
+
+struct amdgpu_uid {
+ uint64_t uid[AMDGPU_UID_TYPE_MAX][AMDGPU_UID_INST_MAX];
+ struct amdgpu_device *adev;
+};
+
struct amd_powerplay {
void *pp_handle;
const struct amd_pm_funcs *pp_funcs;
@@ -896,6 +911,9 @@ struct amdgpu_pcie_reset_ctx {
bool in_link_reset;
bool occurs_dpc;
bool audio_suspended;
+ struct pci_dev *swus;
+ struct pci_saved_state *swus_pcistate;
+ struct pci_saved_state *swds_pcistate;
};
/*
@@ -929,12 +947,6 @@ enum amdgpu_enforce_isolation_mode {
AMDGPU_ENFORCE_ISOLATION_NO_CLEANER_SHADER = 3,
};
-
-/*
- * Non-zero (true) if the GPU has VRAM. Zero (false) otherwise.
- */
-#define AMDGPU_HAS_VRAM(_adev) ((_adev)->gmc.real_vram_size)
-
struct amdgpu_device {
struct device *dev;
struct pci_dev *pdev;
@@ -1138,9 +1150,6 @@ struct amdgpu_device {
/* for userq and VM fences */
struct amdgpu_seq64 seq64;
- /* KFD */
- struct amdgpu_kfd_dev kfd;
-
/* UMC */
struct amdgpu_umc umc;
@@ -1302,6 +1311,12 @@ struct amdgpu_device {
struct list_head userq_mgr_list;
struct mutex userq_mutex;
bool userq_halt_for_enforce_isolation;
+ struct amdgpu_uid *uid_info;
+
+ /* KFD
+ * Must be last --ends in a flexible-array member.
+ */
+ struct amdgpu_kfd_dev kfd;
};
static inline uint32_t amdgpu_ip_version(const struct amdgpu_device *adev,
@@ -1785,4 +1800,9 @@ static inline int amdgpu_device_bus_status_check(struct amdgpu_device *adev)
return 0;
}
+void amdgpu_device_set_uid(struct amdgpu_uid *uid_info,
+ enum amdgpu_uid_type type, uint8_t inst,
+ uint64_t uid);
+uint64_t amdgpu_device_get_uid(struct amdgpu_uid *uid_info,
+ enum amdgpu_uid_type type, uint8_t inst);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
index cbc40cad581b..9b3180449150 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
@@ -76,6 +76,7 @@ static void aca_banks_release(struct aca_banks *banks)
list_for_each_entry_safe(node, tmp, &banks->list, node) {
list_del(&node->node);
kvfree(node);
+ banks->nr_banks--;
}
}
@@ -130,6 +131,27 @@ static void aca_smu_bank_dump(struct amdgpu_device *adev, int idx, int total, st
RAS_EVENT_LOG(adev, event_id, HW_ERR "hardware error logged by the scrubber\n");
}
+static bool aca_bank_hwip_is_matched(struct aca_bank *bank, enum aca_hwip_type type)
+{
+
+ struct aca_hwip *hwip;
+ int hwid, mcatype;
+ u64 ipid;
+
+ if (!bank || type == ACA_HWIP_TYPE_UNKNOW)
+ return false;
+
+ hwip = &aca_hwid_mcatypes[type];
+ if (!hwip->hwid)
+ return false;
+
+ ipid = bank->regs[ACA_REG_IDX_IPID];
+ hwid = ACA_REG__IPID__HARDWAREID(ipid);
+ mcatype = ACA_REG__IPID__MCATYPE(ipid);
+
+ return hwip->hwid == hwid && hwip->mcatype == mcatype;
+}
+
static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_smu_type type,
int start, int count,
struct aca_banks *banks, struct ras_query_context *qctx)
@@ -168,6 +190,15 @@ static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_smu_
bank.smu_err_type = type;
+ /*
+ * Poison being consumed when injecting a UE while running background workloads,
+ * which are unexpected.
+ */
+ if (type == ACA_SMU_TYPE_UE &&
+ ACA_REG__STATUS__POISON(bank.regs[ACA_REG_IDX_STATUS]) &&
+ !aca_bank_hwip_is_matched(&bank, ACA_HWIP_TYPE_UMC))
+ continue;
+
aca_smu_bank_dump(adev, i, count, &bank, qctx);
ret = aca_banks_add_bank(banks, &bank);
@@ -178,27 +209,6 @@ static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_smu_
return 0;
}
-static bool aca_bank_hwip_is_matched(struct aca_bank *bank, enum aca_hwip_type type)
-{
-
- struct aca_hwip *hwip;
- int hwid, mcatype;
- u64 ipid;
-
- if (!bank || type == ACA_HWIP_TYPE_UNKNOW)
- return false;
-
- hwip = &aca_hwid_mcatypes[type];
- if (!hwip->hwid)
- return false;
-
- ipid = bank->regs[ACA_REG_IDX_IPID];
- hwid = ACA_REG__IPID__HARDWAREID(ipid);
- mcatype = ACA_REG__IPID__MCATYPE(ipid);
-
- return hwip->hwid == hwid && hwip->mcatype == mcatype;
-}
-
static bool aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank, enum aca_smu_type type)
{
const struct aca_bank_ops *bank_ops = handle->bank_ops;
@@ -229,6 +239,7 @@ static struct aca_bank_error *new_bank_error(struct aca_error *aerr, struct aca_
mutex_lock(&aerr->lock);
list_add_tail(&bank_error->node, &aerr->list);
+ aerr->nr_errors++;
mutex_unlock(&aerr->lock);
return bank_error;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index fbe7616555c8..a2879d2b7c8e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -250,16 +250,24 @@ void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool suspend_proc)
{
- if (adev->kfd.dev)
- kgd2kfd_suspend(adev->kfd.dev, suspend_proc);
+ if (adev->kfd.dev) {
+ if (adev->in_s0ix)
+ kgd2kfd_stop_sched_all_nodes(adev->kfd.dev);
+ else
+ kgd2kfd_suspend(adev->kfd.dev, suspend_proc);
+ }
}
int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool resume_proc)
{
int r = 0;
- if (adev->kfd.dev)
- r = kgd2kfd_resume(adev->kfd.dev, resume_proc);
+ if (adev->kfd.dev) {
+ if (adev->in_s0ix)
+ r = kgd2kfd_start_sched_all_nodes(adev->kfd.dev);
+ else
+ r = kgd2kfd_resume(adev->kfd.dev, resume_proc);
+ }
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 33eb4826b58b..9e120c934cc1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -107,11 +107,13 @@ struct amdgpu_kfd_dev {
bool init_complete;
struct work_struct reset_work;
- /* HMM page migration MEMORY_DEVICE_PRIVATE mapping */
- struct dev_pagemap pgmap;
-
/* Client for KFD BO GEM handle allocations */
struct drm_client_dev client;
+
+ /* HMM page migration MEMORY_DEVICE_PRIVATE mapping
+ * Must be last --ends in a flexible-array member.
+ */
+ struct dev_pagemap pgmap;
};
enum kgd_engine_type {
@@ -426,7 +428,9 @@ void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask);
int kgd2kfd_check_and_lock_kfd(struct kfd_dev *kfd);
void kgd2kfd_unlock_kfd(struct kfd_dev *kfd);
int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id);
+int kgd2kfd_start_sched_all_nodes(struct kfd_dev *kfd);
int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id);
+int kgd2kfd_stop_sched_all_nodes(struct kfd_dev *kfd);
bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id);
bool kgd2kfd_vmfault_fast_path(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry,
bool retry_fault);
@@ -516,11 +520,21 @@ static inline int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id)
return 0;
}
+static inline int kgd2kfd_start_sched_all_nodes(struct kfd_dev *kfd)
+{
+ return 0;
+}
+
static inline int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id)
{
return 0;
}
+static inline int kgd2kfd_stop_sched_all_nodes(struct kfd_dev *kfd)
+{
+ return 0;
+}
+
static inline bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id)
{
return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
index 04ef0ca10541..0239114fb6c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
@@ -352,7 +352,7 @@ static int kgd_hqd_dump(struct amdgpu_device *adev,
(*dump)[i++][1] = RREG32_SOC15_IP(GC, addr); \
} while (0)
- *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
@@ -449,7 +449,7 @@ static int kgd_hqd_sdma_dump(struct amdgpu_device *adev,
#undef HQD_N_REGS
#define HQD_N_REGS (19+6+7+10)
- *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
index 6d08bc2781a3..f2278a0937ff 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
@@ -338,7 +338,7 @@ static int hqd_dump_v10_3(struct amdgpu_device *adev,
(*dump)[i++][1] = RREG32_SOC15_IP(GC, addr); \
} while (0)
- *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
@@ -435,7 +435,7 @@ static int hqd_sdma_dump_v10_3(struct amdgpu_device *adev,
#undef HQD_N_REGS
#define HQD_N_REGS (19+6+7+12)
- *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c
index e0e6a6a49d90..aaccf0b9947d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c
@@ -323,7 +323,7 @@ static int hqd_dump_v11(struct amdgpu_device *adev,
(*dump)[i++][1] = RREG32(addr); \
} while (0)
- *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
@@ -420,7 +420,7 @@ static int hqd_sdma_dump_v11(struct amdgpu_device *adev,
#undef HQD_N_REGS
#define HQD_N_REGS (7+11+1+12+12)
- *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v12.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v12.c
index 6f0dc23c901b..e0ceab400b2d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v12.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v12.c
@@ -115,7 +115,7 @@ static int hqd_dump_v12(struct amdgpu_device *adev,
(*dump)[i++][1] = RREG32(addr); \
} while (0)
- *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
@@ -146,7 +146,7 @@ static int hqd_sdma_dump_v12(struct amdgpu_device *adev,
#undef HQD_N_REGS
#define HQD_N_REGS (last_reg - first_reg + 1)
- *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 260165bbe373..83020963dfde 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -213,19 +213,35 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
spin_lock(&kfd_mem_limit.mem_limit_lock);
if (kfd_mem_limit.system_mem_used + system_mem_needed >
- kfd_mem_limit.max_system_mem_limit)
+ kfd_mem_limit.max_system_mem_limit) {
pr_debug("Set no_system_mem_limit=1 if using shared memory\n");
+ if (!no_system_mem_limit) {
+ ret = -ENOMEM;
+ goto release;
+ }
+ }
- if ((kfd_mem_limit.system_mem_used + system_mem_needed >
- kfd_mem_limit.max_system_mem_limit && !no_system_mem_limit) ||
- (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
- kfd_mem_limit.max_ttm_mem_limit) ||
- (adev && xcp_id >= 0 && adev->kfd.vram_used[xcp_id] + vram_needed >
- vram_size - reserved_for_pt - reserved_for_ras - atomic64_read(&adev->vram_pin_size))) {
+ if (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
+ kfd_mem_limit.max_ttm_mem_limit) {
ret = -ENOMEM;
goto release;
}
+ /*if is_app_apu is false and apu_prefer_gtt is true, it is an APU with
+ * carve out < gtt. In that case, VRAM allocation will go to gtt domain, skip
+ * VRAM check since ttm_mem_limit check already cover this allocation
+ */
+
+ if (adev && xcp_id >= 0 && (!adev->apu_prefer_gtt || adev->gmc.is_app_apu)) {
+ uint64_t vram_available =
+ vram_size - reserved_for_pt - reserved_for_ras -
+ atomic64_read(&adev->vram_pin_size);
+ if (adev->kfd.vram_used[xcp_id] + vram_needed > vram_available) {
+ ret = -ENOMEM;
+ goto release;
+ }
+ }
+
/* Update memory accounting by decreasing available system
* memory, TTM memory and GPU memory as computed above
*/
@@ -494,7 +510,8 @@ static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
return amdgpu_sync_fence(sync, vm->last_update, GFP_KERNEL);
}
-static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
+static uint64_t get_pte_flags(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct kgd_mem *mem)
{
uint32_t mapping_flags = AMDGPU_VM_PAGE_READABLE |
AMDGPU_VM_MTYPE_DEFAULT;
@@ -504,7 +521,7 @@ static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE)
mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
- return amdgpu_gem_va_map_flags(adev, mapping_flags);
+ return mapping_flags;
}
/**
@@ -961,7 +978,7 @@ static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem,
goto unwind;
}
attachment[i]->va = va;
- attachment[i]->pte_flags = get_pte_flags(adev, mem);
+ attachment[i]->pte_flags = get_pte_flags(adev, vm, mem);
attachment[i]->adev = adev;
list_add(&attachment[i]->list, &mem->attachments);
@@ -1072,7 +1089,7 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
return 0;
}
- ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages, &range);
+ ret = amdgpu_ttm_tt_get_user_pages(bo, &range);
if (ret) {
if (ret == -EAGAIN)
pr_debug("Failed to get user pages, try again\n");
@@ -1086,6 +1103,9 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
pr_err("%s: Failed to reserve BO\n", __func__);
goto release_out;
}
+
+ amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, range);
+
amdgpu_bo_placement_from_domain(bo, mem->domain);
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (ret)
@@ -1626,11 +1646,15 @@ size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev,
uint64_t vram_available, system_mem_available, ttm_mem_available;
spin_lock(&kfd_mem_limit.mem_limit_lock);
- vram_available = KFD_XCP_MEMORY_SIZE(adev, xcp_id)
- - adev->kfd.vram_used_aligned[xcp_id]
- - atomic64_read(&adev->vram_pin_size)
- - reserved_for_pt
- - reserved_for_ras;
+ if (adev->apu_prefer_gtt && !adev->gmc.is_app_apu)
+ vram_available = KFD_XCP_MEMORY_SIZE(adev, xcp_id)
+ - adev->kfd.vram_used_aligned[xcp_id];
+ else
+ vram_available = KFD_XCP_MEMORY_SIZE(adev, xcp_id)
+ - adev->kfd.vram_used_aligned[xcp_id]
+ - atomic64_read(&adev->vram_pin_size)
+ - reserved_for_pt
+ - reserved_for_ras;
if (adev->apu_prefer_gtt) {
system_mem_available = no_system_mem_limit ?
@@ -2544,8 +2568,7 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
}
/* Get updated user pages */
- ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages,
- &mem->range);
+ ret = amdgpu_ttm_tt_get_user_pages(bo, &mem->range);
if (ret) {
pr_debug("Failed %d to get user pages\n", ret);
@@ -2563,17 +2586,24 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
* from the KFD, trigger a segmentation fault in VM debug mode.
*/
if (amdgpu_ttm_adev(bo->tbo.bdev)->debug_vm_userptr) {
+ struct kfd_process *p;
+
pr_err("Pid %d unmapped memory before destroying userptr at GPU addr 0x%llx\n",
pid_nr(process_info->pid), mem->va);
// Send GPU VM fault to user space
- kfd_signal_vm_fault_event_with_userptr(kfd_lookup_process_by_pid(process_info->pid),
- mem->va);
+ p = kfd_lookup_process_by_pid(process_info->pid);
+ if (p) {
+ kfd_signal_vm_fault_event_with_userptr(p, mem->va);
+ kfd_unref_process(p);
+ }
}
ret = 0;
}
+ amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, mem->range);
+
mutex_lock(&process_info->notifier_lock);
/* Mark the BO as valid unless it was invalidated
@@ -2969,9 +2999,22 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu *
struct amdgpu_device *adev = amdgpu_ttm_adev(
peer_vm->root.bo->tbo.bdev);
+ struct amdgpu_fpriv *fpriv =
+ container_of(peer_vm, struct amdgpu_fpriv, vm);
+
+ ret = amdgpu_vm_bo_update(adev, fpriv->prt_va, false);
+ if (ret) {
+ dev_dbg(adev->dev,
+ "Memory eviction: handle PRT moved failed, pid %8d. Try again.\n",
+ pid_nr(process_info->pid));
+ goto validate_map_fail;
+ }
+
ret = amdgpu_vm_handle_moved(adev, peer_vm, &exec.ticket);
if (ret) {
- pr_debug("Memory eviction: handle moved failed. Try again\n");
+ dev_dbg(adev->dev,
+ "Memory eviction: handle moved failed, pid %8d. Try again.\n",
+ pid_nr(process_info->pid));
goto validate_map_fail;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index e476e45b996a..763f2b8dcf13 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -706,7 +706,6 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev)
}
adev->clock.dp_extclk =
le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
- adev->clock.current_dispclk = adev->clock.default_dispclk;
adev->clock.max_pixel_clock = le16_to_cpu(firmware_info->info.usMaxPixelClock);
if (adev->clock.max_pixel_clock == 0)
@@ -1816,16 +1815,43 @@ static ssize_t amdgpu_atombios_get_vbios_version(struct device *dev,
return sysfs_emit(buf, "%s\n", ctx->vbios_pn);
}
+static ssize_t amdgpu_atombios_get_vbios_build(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ struct atom_context *ctx = adev->mode_info.atom_context;
+
+ return sysfs_emit(buf, "%s\n", ctx->build_num);
+}
+
static DEVICE_ATTR(vbios_version, 0444, amdgpu_atombios_get_vbios_version,
NULL);
+static DEVICE_ATTR(vbios_build, 0444, amdgpu_atombios_get_vbios_build, NULL);
static struct attribute *amdgpu_vbios_version_attrs[] = {
- &dev_attr_vbios_version.attr,
- NULL
+ &dev_attr_vbios_version.attr, &dev_attr_vbios_build.attr, NULL
};
+static umode_t amdgpu_vbios_version_attrs_is_visible(struct kobject *kobj,
+ struct attribute *attr,
+ int index)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ struct atom_context *ctx = adev->mode_info.atom_context;
+
+ if (attr == &dev_attr_vbios_build.attr && !strlen(ctx->build_num))
+ return 0;
+
+ return attr->mode;
+}
+
const struct attribute_group amdgpu_vbios_version_attr_group = {
- .attrs = amdgpu_vbios_version_attrs
+ .attrs = amdgpu_vbios_version_attrs,
+ .is_visible = amdgpu_vbios_version_attrs_is_visible,
};
int amdgpu_atombios_sysfs_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index 702f6610d024..66fb37b64388 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -184,43 +184,36 @@ void amdgpu_bo_list_put(struct amdgpu_bo_list *list)
int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in,
struct drm_amdgpu_bo_list_entry **info_param)
{
- const void __user *uptr = u64_to_user_ptr(in->bo_info_ptr);
const uint32_t info_size = sizeof(struct drm_amdgpu_bo_list_entry);
+ const void __user *uptr = u64_to_user_ptr(in->bo_info_ptr);
+ const uint32_t bo_info_size = in->bo_info_size;
+ const uint32_t bo_number = in->bo_number;
struct drm_amdgpu_bo_list_entry *info;
- int r;
-
- info = kvmalloc_array(in->bo_number, info_size, GFP_KERNEL);
- if (!info)
- return -ENOMEM;
/* copy the handle array from userspace to a kernel buffer */
- r = -EFAULT;
- if (likely(info_size == in->bo_info_size)) {
- unsigned long bytes = in->bo_number *
- in->bo_info_size;
-
- if (copy_from_user(info, uptr, bytes))
- goto error_free;
-
+ if (likely(info_size == bo_info_size)) {
+ info = vmemdup_array_user(uptr, bo_number, info_size);
+ if (IS_ERR(info))
+ return PTR_ERR(info);
} else {
- unsigned long bytes = min(in->bo_info_size, info_size);
+ const uint32_t bytes = min(bo_info_size, info_size);
unsigned i;
- memset(info, 0, in->bo_number * info_size);
- for (i = 0; i < in->bo_number; ++i) {
- if (copy_from_user(&info[i], uptr, bytes))
- goto error_free;
+ info = kvmalloc_array(bo_number, info_size, GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
- uptr += in->bo_info_size;
+ memset(info, 0, bo_number * info_size);
+ for (i = 0; i < bo_number; ++i, uptr += bo_info_size) {
+ if (copy_from_user(&info[i], uptr, bytes)) {
+ kvfree(info);
+ return -EFAULT;
+ }
}
}
*info_param = info;
return 0;
-
-error_free:
- kvfree(info);
- return r;
}
int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
index 555cd6d877c3..a716c9886c74 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
@@ -38,7 +38,6 @@ struct amdgpu_bo_list_entry {
struct amdgpu_bo *bo;
struct amdgpu_bo_va *bo_va;
uint32_t priority;
- struct page **user_pages;
struct hmm_range *range;
bool user_invalidated;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 5e375e9c4f5d..47e9bfba0642 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -398,30 +398,28 @@ static void amdgpu_connector_add_common_modes(struct drm_encoder *encoder,
struct drm_display_mode *mode = NULL;
struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
int i;
- static const struct mode_size {
+ int n;
+ struct mode_size {
+ char name[DRM_DISPLAY_MODE_LEN];
int w;
int h;
- } common_modes[17] = {
- { 640, 480},
- { 720, 480},
- { 800, 600},
- { 848, 480},
- {1024, 768},
- {1152, 768},
- {1280, 720},
- {1280, 800},
- {1280, 854},
- {1280, 960},
- {1280, 1024},
- {1440, 900},
- {1400, 1050},
- {1680, 1050},
- {1600, 1200},
- {1920, 1080},
- {1920, 1200}
+ } common_modes[] = {
+ { "640x480", 640, 480},
+ { "800x600", 800, 600},
+ { "1024x768", 1024, 768},
+ { "1280x720", 1280, 720},
+ { "1280x800", 1280, 800},
+ {"1280x1024", 1280, 1024},
+ { "1440x900", 1440, 900},
+ {"1680x1050", 1680, 1050},
+ {"1600x1200", 1600, 1200},
+ {"1920x1080", 1920, 1080},
+ {"1920x1200", 1920, 1200}
};
- for (i = 0; i < 17; i++) {
+ n = ARRAY_SIZE(common_modes);
+
+ for (i = 0; i < n; i++) {
if (amdgpu_encoder->devices & (ATOM_DEVICE_TV_SUPPORT)) {
if (common_modes[i].w > 1024 ||
common_modes[i].h > 768)
@@ -434,12 +432,11 @@ static void amdgpu_connector_add_common_modes(struct drm_encoder *encoder,
common_modes[i].h == native_mode->vdisplay))
continue;
}
- if (common_modes[i].w < 320 || common_modes[i].h < 200)
- continue;
mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false);
if (!mode)
return;
+ strscpy(mode->name, common_modes[i].name, DRM_DISPLAY_MODE_LEN);
drm_mode_probed_add(connector, mode);
}
@@ -1195,29 +1192,69 @@ static void amdgpu_connector_dvi_force(struct drm_connector *connector)
amdgpu_connector->use_digital = true;
}
+/**
+ * amdgpu_max_hdmi_pixel_clock - Return max supported HDMI (TMDS) pixel clock
+ * @adev: pointer to amdgpu_device
+ *
+ * Return: maximum supported HDMI (TMDS) pixel clock in KHz.
+ */
+static int amdgpu_max_hdmi_pixel_clock(const struct amdgpu_device *adev)
+{
+ if (adev->asic_type >= CHIP_POLARIS10)
+ return 600000;
+ else if (adev->asic_type >= CHIP_TONGA)
+ return 300000;
+ else
+ return 297000;
+}
+
+/**
+ * amdgpu_connector_dvi_mode_valid - Validate a mode on DVI/HDMI connectors
+ * @connector: DRM connector to validate the mode on
+ * @mode: display mode to validate
+ *
+ * Validate the given display mode on DVI and HDMI connectors, including
+ * analog signals on DVI-I.
+ *
+ * Return: drm_mode_status indicating whether the mode is valid.
+ */
static enum drm_mode_status amdgpu_connector_dvi_mode_valid(struct drm_connector *connector,
const struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+ const int max_hdmi_pixel_clock = amdgpu_max_hdmi_pixel_clock(adev);
+ const int max_dvi_single_link_pixel_clock = 165000;
+ int max_digital_pixel_clock_khz;
/* XXX check mode bandwidth */
- if (amdgpu_connector->use_digital && (mode->clock > 165000)) {
- if ((amdgpu_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I) ||
- (amdgpu_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D) ||
- (amdgpu_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_B)) {
- return MODE_OK;
- } else if (connector->display_info.is_hdmi) {
- /* HDMI 1.3+ supports max clock of 340 Mhz */
- if (mode->clock > 340000)
- return MODE_CLOCK_HIGH;
- else
- return MODE_OK;
- } else {
- return MODE_CLOCK_HIGH;
+ if (amdgpu_connector->use_digital) {
+ switch (amdgpu_connector->connector_object_id) {
+ case CONNECTOR_OBJECT_ID_HDMI_TYPE_A:
+ max_digital_pixel_clock_khz = max_hdmi_pixel_clock;
+ break;
+ case CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I:
+ case CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D:
+ max_digital_pixel_clock_khz = max_dvi_single_link_pixel_clock;
+ break;
+ case CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I:
+ case CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D:
+ case CONNECTOR_OBJECT_ID_HDMI_TYPE_B:
+ max_digital_pixel_clock_khz = max_dvi_single_link_pixel_clock * 2;
+ break;
}
+
+ /* When the display EDID claims that it's an HDMI display,
+ * we use the HDMI encoder mode of the display HW,
+ * so we should verify against the max HDMI clock here.
+ */
+ if (connector->display_info.is_hdmi)
+ max_digital_pixel_clock_khz = max_hdmi_pixel_clock;
+
+ if (mode->clock > max_digital_pixel_clock_khz)
+ return MODE_CLOCK_HIGH;
}
/* check against the max pixel clock */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
index 25252231a68a..ef996493115f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
@@ -68,7 +68,6 @@ void amdgpu_cper_entry_fill_hdr(struct amdgpu_device *adev,
hdr->error_severity = sev;
hdr->valid_bits.platform_id = 1;
- hdr->valid_bits.partition_id = 1;
hdr->valid_bits.timestamp = 1;
amdgpu_cper_get_timestamp(&hdr->timestamp);
@@ -174,7 +173,7 @@ int amdgpu_cper_entry_fill_runtime_section(struct amdgpu_device *adev,
struct cper_sec_nonstd_err *section;
bool poison;
- poison = (sev == CPER_SEV_NON_FATAL_CORRECTED) ? false : true;
+ poison = sev != CPER_SEV_NON_FATAL_CORRECTED;
section_desc = (struct cper_sec_desc *)((uint8_t *)hdr + SEC_DESC_OFFSET(idx));
section = (struct cper_sec_nonstd_err *)((uint8_t *)hdr +
NONSTD_SEC_OFFSET(hdr->sec_cnt, idx));
@@ -206,6 +205,7 @@ int amdgpu_cper_entry_fill_bad_page_threshold_section(struct amdgpu_device *adev
{
struct cper_sec_desc *section_desc;
struct cper_sec_nonstd_err *section;
+ uint32_t socket_id;
section_desc = (struct cper_sec_desc *)((uint8_t *)hdr + SEC_DESC_OFFSET(idx));
section = (struct cper_sec_nonstd_err *)((uint8_t *)hdr +
@@ -219,11 +219,17 @@ int amdgpu_cper_entry_fill_bad_page_threshold_section(struct amdgpu_device *adev
section->hdr.valid_bits.err_context_cnt = 1;
section->info.error_type = RUNTIME;
+ section->info.valid_bits.ms_chk = 1;
section->info.ms_chk_bits.err_type_valid = 1;
+ section->info.ms_chk_bits.err_type = 1;
+ section->info.ms_chk_bits.pcc = 1;
section->ctx.reg_ctx_type = CPER_CTX_TYPE_CRASH;
section->ctx.reg_arr_size = sizeof(section->ctx.reg_dump);
/* Hardcoded Reg dump for bad page threshold CPER */
+ socket_id = (adev->smuio.funcs && adev->smuio.funcs->get_socket_id) ?
+ adev->smuio.funcs->get_socket_id(adev) :
+ 0;
section->ctx.reg_dump[CPER_ACA_REG_CTL_LO] = 0x1;
section->ctx.reg_dump[CPER_ACA_REG_CTL_HI] = 0x0;
section->ctx.reg_dump[CPER_ACA_REG_STATUS_LO] = 0x137;
@@ -234,8 +240,8 @@ int amdgpu_cper_entry_fill_bad_page_threshold_section(struct amdgpu_device *adev
section->ctx.reg_dump[CPER_ACA_REG_MISC0_HI] = 0x0;
section->ctx.reg_dump[CPER_ACA_REG_CONFIG_LO] = 0x2;
section->ctx.reg_dump[CPER_ACA_REG_CONFIG_HI] = 0x1ff;
- section->ctx.reg_dump[CPER_ACA_REG_IPID_LO] = 0x0;
- section->ctx.reg_dump[CPER_ACA_REG_IPID_HI] = 0x96;
+ section->ctx.reg_dump[CPER_ACA_REG_IPID_LO] = (socket_id / 4) & 0x01;
+ section->ctx.reg_dump[CPER_ACA_REG_IPID_HI] = 0x096 | (((socket_id % 4) & 0x3) << 12);
section->ctx.reg_dump[CPER_ACA_REG_SYND_LO] = 0x0;
section->ctx.reg_dump[CPER_ACA_REG_SYND_HI] = 0x0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index a2adaacf6adb..9cd7741d2254 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -29,6 +29,7 @@
#include <linux/pagemap.h>
#include <linux/sync_file.h>
#include <linux/dma-buf.h>
+#include <linux/hmm.h>
#include <drm/amdgpu_drm.h>
#include <drm/drm_syncobj.h>
@@ -178,25 +179,17 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
unsigned int num_ibs[AMDGPU_CS_GANG_SIZE] = { };
struct amdgpu_vm *vm = &fpriv->vm;
- uint64_t *chunk_array_user;
uint64_t *chunk_array;
uint32_t uf_offset = 0;
size_t size;
int ret;
int i;
- chunk_array = kvmalloc_array(cs->in.num_chunks, sizeof(uint64_t),
- GFP_KERNEL);
- if (!chunk_array)
- return -ENOMEM;
-
- /* get chunks */
- chunk_array_user = u64_to_user_ptr(cs->in.chunks);
- if (copy_from_user(chunk_array, chunk_array_user,
- sizeof(uint64_t)*cs->in.num_chunks)) {
- ret = -EFAULT;
- goto free_chunk;
- }
+ chunk_array = memdup_array_user(u64_to_user_ptr(cs->in.chunks),
+ cs->in.num_chunks,
+ sizeof(uint64_t));
+ if (IS_ERR(chunk_array))
+ return PTR_ERR(chunk_array);
p->nchunks = cs->in.num_chunks;
p->chunks = kvmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
@@ -209,7 +202,6 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
for (i = 0; i < p->nchunks; i++) {
struct drm_amdgpu_cs_chunk __user *chunk_ptr = NULL;
struct drm_amdgpu_cs_chunk user_chunk;
- uint32_t __user *cdata;
chunk_ptr = u64_to_user_ptr(chunk_array[i]);
if (copy_from_user(&user_chunk, chunk_ptr,
@@ -222,20 +214,16 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
p->chunks[i].length_dw = user_chunk.length_dw;
size = p->chunks[i].length_dw;
- cdata = u64_to_user_ptr(user_chunk.chunk_data);
- p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t),
- GFP_KERNEL);
- if (p->chunks[i].kdata == NULL) {
- ret = -ENOMEM;
+ p->chunks[i].kdata = vmemdup_array_user(u64_to_user_ptr(user_chunk.chunk_data),
+ size,
+ sizeof(uint32_t));
+ if (IS_ERR(p->chunks[i].kdata)) {
+ ret = PTR_ERR(p->chunks[i].kdata);
i--;
goto free_partial_kdata;
}
size *= sizeof(uint32_t);
- if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
- ret = -EFAULT;
- goto free_partial_kdata;
- }
/* Assume the worst on the following checks */
ret = -EINVAL;
@@ -286,7 +274,7 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
}
}
- if (!p->gang_size) {
+ if (!p->gang_size || (amdgpu_sriov_vf(p->adev) && p->gang_size > 1)) {
ret = -EINVAL;
goto free_all_kdata;
}
@@ -396,7 +384,7 @@ static int amdgpu_cs_p2_ib(struct amdgpu_cs_parser *p,
chunk_ib->ib_bytes : 0,
AMDGPU_IB_POOL_DELAYED, ib);
if (r) {
- DRM_ERROR("Failed to get ib !\n");
+ drm_err(adev_to_drm(p->adev), "Failed to get ib !\n");
return r;
}
@@ -468,7 +456,7 @@ static int amdgpu_syncobj_lookup_and_add(struct amdgpu_cs_parser *p,
r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence);
if (r) {
- DRM_ERROR("syncobj %u failed to find fence @ %llu (%d)!\n",
+ drm_err(adev_to_drm(p->adev), "syncobj %u failed to find fence @ %llu (%d)!\n",
handle, point, r);
return r;
}
@@ -896,26 +884,13 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
bool userpage_invalidated = false;
struct amdgpu_bo *bo = e->bo;
- int i;
-
- e->user_pages = kvcalloc(bo->tbo.ttm->num_pages,
- sizeof(struct page *),
- GFP_KERNEL);
- if (!e->user_pages) {
- DRM_ERROR("kvmalloc_array failure\n");
- r = -ENOMEM;
- goto out_free_user_pages;
- }
- r = amdgpu_ttm_tt_get_user_pages(bo, e->user_pages, &e->range);
- if (r) {
- kvfree(e->user_pages);
- e->user_pages = NULL;
+ r = amdgpu_ttm_tt_get_user_pages(bo, &e->range);
+ if (r)
goto out_free_user_pages;
- }
for (i = 0; i < bo->tbo.ttm->num_pages; i++) {
- if (bo->tbo.ttm->pages[i] != e->user_pages[i]) {
+ if (bo->tbo.ttm->pages[i] != hmm_pfn_to_page(e->range->hmm_pfns[i])) {
userpage_invalidated = true;
break;
}
@@ -959,7 +934,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
}
if (amdgpu_ttm_tt_is_userptr(e->bo->tbo.ttm) &&
- e->user_invalidated && e->user_pages) {
+ e->user_invalidated) {
amdgpu_bo_placement_from_domain(e->bo,
AMDGPU_GEM_DOMAIN_CPU);
r = ttm_bo_validate(&e->bo->tbo, &e->bo->placement,
@@ -968,11 +943,8 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
goto out_free_user_pages;
amdgpu_ttm_tt_set_user_pages(e->bo->tbo.ttm,
- e->user_pages);
+ e->range);
}
-
- kvfree(e->user_pages);
- e->user_pages = NULL;
}
amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
@@ -983,7 +955,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
r = amdgpu_vm_validate(p->adev, &fpriv->vm, NULL,
amdgpu_cs_bo_validate, p);
if (r) {
- DRM_ERROR("amdgpu_vm_validate() failed.\n");
+ drm_err(adev_to_drm(p->adev), "amdgpu_vm_validate() failed.\n");
goto out_free_user_pages;
}
@@ -1014,11 +986,7 @@ out_free_user_pages:
amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
struct amdgpu_bo *bo = e->bo;
- if (!e->user_pages)
- continue;
amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, e->range);
- kvfree(e->user_pages);
- e->user_pages = NULL;
e->range = NULL;
}
mutex_unlock(&p->bo_list->bo_list_mutex);
@@ -1061,13 +1029,13 @@ static int amdgpu_cs_patch_ibs(struct amdgpu_cs_parser *p,
va_start = ib->gpu_addr & AMDGPU_GMC_HOLE_MASK;
r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m);
if (r) {
- DRM_ERROR("IB va_start is invalid\n");
+ drm_err(adev_to_drm(p->adev), "IB va_start is invalid\n");
return r;
}
if ((va_start + ib->length_dw * 4) >
(m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
- DRM_ERROR("IB va_start+ib_bytes is invalid\n");
+ drm_err(adev_to_drm(p->adev), "IB va_start+ib_bytes is invalid\n");
return -EINVAL;
}
@@ -1139,6 +1107,9 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
}
}
+ if (!amdgpu_vm_ready(vm))
+ return -EINVAL;
+
r = amdgpu_vm_clear_freed(adev, vm, NULL);
if (r)
return r;
@@ -1235,7 +1206,7 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entities[p->gang_leader_idx]);
if (r) {
if (r != -ERESTARTSYS)
- DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n");
+ drm_err(adev_to_drm(p->adev), "amdgpu_ctx_wait_prev_fence failed.\n");
return r;
}
@@ -1448,7 +1419,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
r = amdgpu_cs_parser_init(&parser, adev, filp, data);
if (r) {
- DRM_ERROR_RATELIMITED("Failed to initialize parser %d!\n", r);
+ drm_err_ratelimited(dev, "Failed to initialize parser %d!\n", r);
return r;
}
@@ -1463,9 +1434,9 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
r = amdgpu_cs_parser_bos(&parser, data);
if (r) {
if (r == -ENOMEM)
- DRM_ERROR("Not enough memory for command submission!\n");
+ drm_err(dev, "Not enough memory for command submission!\n");
else if (r != -ERESTARTSYS && r != -EAGAIN)
- DRM_DEBUG("Failed to process the buffer list %d!\n", r);
+ drm_dbg(dev, "Failed to process the buffer list %d!\n", r);
goto error_fini;
}
@@ -1764,30 +1735,21 @@ int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
{
struct amdgpu_device *adev = drm_to_adev(dev);
union drm_amdgpu_wait_fences *wait = data;
- uint32_t fence_count = wait->in.fence_count;
- struct drm_amdgpu_fence *fences_user;
struct drm_amdgpu_fence *fences;
int r;
/* Get the fences from userspace */
- fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
- GFP_KERNEL);
- if (fences == NULL)
- return -ENOMEM;
-
- fences_user = u64_to_user_ptr(wait->in.fences);
- if (copy_from_user(fences, fences_user,
- sizeof(struct drm_amdgpu_fence) * fence_count)) {
- r = -EFAULT;
- goto err_free_fences;
- }
+ fences = memdup_array_user(u64_to_user_ptr(wait->in.fences),
+ wait->in.fence_count,
+ sizeof(struct drm_amdgpu_fence));
+ if (IS_ERR(fences))
+ return PTR_ERR(fences);
if (wait->in.wait_all)
r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences);
else
r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences);
-err_free_fences:
kfree(fences);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 0e6e2e2acf5b..a70651050acf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -2136,12 +2136,14 @@ static int amdgpu_pt_info_read(struct seq_file *m, void *unused)
struct drm_file *file;
struct amdgpu_fpriv *fpriv;
struct amdgpu_bo *root_bo;
+ struct amdgpu_device *adev;
int r;
file = m->private;
if (!file)
return -EINVAL;
+ adev = drm_to_adev(file->minor->dev);
fpriv = file->driver_priv;
if (!fpriv || !fpriv->vm.root.bo)
return -ENODEV;
@@ -2153,7 +2155,11 @@ static int amdgpu_pt_info_read(struct seq_file *m, void *unused)
return -EINVAL;
}
- seq_printf(m, "gpu_address: 0x%llx\n", amdgpu_bo_gpu_offset(fpriv->vm.root.bo));
+ seq_printf(m, "pd_address: 0x%llx\n", amdgpu_gmc_pd_addr(fpriv->vm.root.bo));
+ seq_printf(m, "max_pfn: 0x%llx\n", adev->vm_manager.max_pfn);
+ seq_printf(m, "num_level: 0x%x\n", adev->vm_manager.num_level);
+ seq_printf(m, "block_size: 0x%x\n", adev->vm_manager.block_size);
+ seq_printf(m, "fragment_size: 0x%x\n", adev->vm_manager.fragment_size);
amdgpu_bo_unreserve(root_bo);
amdgpu_bo_unref(&root_bo);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 6f93473436be..7a899fb4de29 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -95,6 +95,7 @@ MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
+MODULE_FIRMWARE("amdgpu/cyan_skillfish_gpu_info.bin");
#define AMDGPU_RESUME_MS 2000
#define AMDGPU_MAX_RETRY_LIMIT 2
@@ -178,6 +179,8 @@ struct amdgpu_init_level amdgpu_init_minimal_xgmi = {
BIT(AMD_IP_BLOCK_TYPE_PSP)
};
+static void amdgpu_device_load_switch_state(struct amdgpu_device *adev);
+
static inline bool amdgpu_ip_member_of_hwini(struct amdgpu_device *adev,
enum amd_ip_block_type block)
{
@@ -2445,6 +2448,33 @@ int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
return 1;
}
+static const char *ip_block_names[] = {
+ [AMD_IP_BLOCK_TYPE_COMMON] = "common",
+ [AMD_IP_BLOCK_TYPE_GMC] = "gmc",
+ [AMD_IP_BLOCK_TYPE_IH] = "ih",
+ [AMD_IP_BLOCK_TYPE_SMC] = "smu",
+ [AMD_IP_BLOCK_TYPE_PSP] = "psp",
+ [AMD_IP_BLOCK_TYPE_DCE] = "dce",
+ [AMD_IP_BLOCK_TYPE_GFX] = "gfx",
+ [AMD_IP_BLOCK_TYPE_SDMA] = "sdma",
+ [AMD_IP_BLOCK_TYPE_UVD] = "uvd",
+ [AMD_IP_BLOCK_TYPE_VCE] = "vce",
+ [AMD_IP_BLOCK_TYPE_ACP] = "acp",
+ [AMD_IP_BLOCK_TYPE_VCN] = "vcn",
+ [AMD_IP_BLOCK_TYPE_MES] = "mes",
+ [AMD_IP_BLOCK_TYPE_JPEG] = "jpeg",
+ [AMD_IP_BLOCK_TYPE_VPE] = "vpe",
+ [AMD_IP_BLOCK_TYPE_UMSCH_MM] = "umsch_mm",
+ [AMD_IP_BLOCK_TYPE_ISP] = "isp",
+};
+
+static const char *ip_block_name(struct amdgpu_device *adev, enum amd_ip_block_type type)
+{
+ int idx = (int)type;
+
+ return idx < ARRAY_SIZE(ip_block_names) ? ip_block_names[idx] : "unknown";
+}
+
/**
* amdgpu_device_ip_block_add
*
@@ -2473,8 +2503,13 @@ int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
break;
}
- dev_info(adev->dev, "detected ip block number %d <%s>\n",
- adev->num_ip_blocks, ip_block_version->funcs->name);
+ dev_info(adev->dev, "detected ip block number %d <%s_v%d_%d_%d> (%s)\n",
+ adev->num_ip_blocks,
+ ip_block_name(adev, ip_block_version->type),
+ ip_block_version->major,
+ ip_block_version->minor,
+ ip_block_version->rev,
+ ip_block_version->funcs->name);
adev->ip_blocks[adev->num_ip_blocks].adev = adev;
@@ -2570,9 +2605,6 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
adev->firmware.gpu_info_fw = NULL;
- if (adev->mman.discovery_bin)
- return 0;
-
switch (adev->asic_type) {
default:
return 0;
@@ -2594,8 +2626,13 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
chip_name = "arcturus";
break;
case CHIP_NAVI12:
+ if (adev->mman.discovery_bin)
+ return 0;
chip_name = "navi12";
break;
+ case CHIP_CYAN_SKILLFISH:
+ chip_name = "cyan_skillfish";
+ break;
}
err = amdgpu_ucode_request(adev, &adev->firmware.gpu_info_fw,
@@ -2675,6 +2712,24 @@ out:
return err;
}
+static void amdgpu_uid_init(struct amdgpu_device *adev)
+{
+ /* Initialize the UID for the device */
+ adev->uid_info = kzalloc(sizeof(struct amdgpu_uid), GFP_KERNEL);
+ if (!adev->uid_info) {
+ dev_warn(adev->dev, "Failed to allocate memory for UID\n");
+ return;
+ }
+ adev->uid_info->adev = adev;
+}
+
+static void amdgpu_uid_fini(struct amdgpu_device *adev)
+{
+ /* Free the UID memory */
+ kfree(adev->uid_info);
+ adev->uid_info = NULL;
+}
+
/**
* amdgpu_device_ip_early_init - run early init for hardware IPs
*
@@ -2858,6 +2913,8 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
if (adev->gmc.xgmi.supported)
amdgpu_xgmi_early_init(adev);
+ if (amdgpu_is_multi_aid(adev))
+ amdgpu_uid_init(adev);
ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
if (ip_block->status.valid != false)
amdgpu_amdkfd_device_probe(adev);
@@ -3271,6 +3328,7 @@ static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
* always assumed to be lost.
*/
switch (amdgpu_asic_reset_method(adev)) {
+ case AMD_RESET_METHOD_LEGACY:
case AMD_RESET_METHOD_LINK:
case AMD_RESET_METHOD_BACO:
case AMD_RESET_METHOD_MODE1:
@@ -3389,7 +3447,7 @@ static int amdgpu_device_enable_mgpu_fan_boost(void)
for (i = 0; i < mgpu_info.num_dgpu; i++) {
gpu_ins = &(mgpu_info.gpu_ins[i]);
adev = gpu_ins->adev;
- if (!(adev->flags & AMD_IS_APU) &&
+ if (!(adev->flags & AMD_IS_APU || amdgpu_sriov_multi_vf_mode(adev)) &&
!gpu_ins->mgpu_fan_enabled) {
ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
if (ret)
@@ -3648,6 +3706,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
}
amdgpu_ras_fini(adev);
+ amdgpu_uid_fini(adev);
return 0;
}
@@ -4992,7 +5051,8 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
adev->reset_domain = NULL;
kfree(adev->pci_state);
-
+ kfree(adev->pcie_reset_ctx.swds_pcistate);
+ kfree(adev->pcie_reset_ctx.swus_pcistate);
}
/**
@@ -5012,6 +5072,10 @@ static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
if (!adev->in_s4 && (adev->flags & AMD_IS_APU))
return 0;
+ /* No need to evict when going to S5 through S4 callbacks */
+ if (system_state == SYSTEM_POWER_OFF)
+ return 0;
+
ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
if (ret) {
dev_warn(adev->dev, "evicting device resources failed\n");
@@ -5136,7 +5200,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients)
adev->in_suspend = true;
if (amdgpu_sriov_vf(adev)) {
- if (!adev->in_s0ix && !adev->in_runpm)
+ if (!adev->in_runpm)
amdgpu_amdkfd_suspend_process(adev);
amdgpu_virt_fini_data_exchange(adev);
r = amdgpu_virt_request_full_gpu(adev, false);
@@ -5156,10 +5220,8 @@ int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients)
amdgpu_device_ip_suspend_phase1(adev);
- if (!adev->in_s0ix) {
- amdgpu_amdkfd_suspend(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
- amdgpu_userq_suspend(adev);
- }
+ amdgpu_amdkfd_suspend(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
+ amdgpu_userq_suspend(adev);
r = amdgpu_device_evict_resources(adev);
if (r)
@@ -5254,15 +5316,13 @@ int amdgpu_device_resume(struct drm_device *dev, bool notify_clients)
goto exit;
}
- if (!adev->in_s0ix) {
- r = amdgpu_amdkfd_resume(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
- if (r)
- goto exit;
+ r = amdgpu_amdkfd_resume(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
+ if (r)
+ goto exit;
- r = amdgpu_userq_resume(adev);
- if (r)
- goto exit;
- }
+ r = amdgpu_userq_resume(adev);
+ if (r)
+ goto exit;
r = amdgpu_device_ip_late_init(adev);
if (r)
@@ -5275,7 +5335,7 @@ exit:
amdgpu_virt_init_data_exchange(adev);
amdgpu_virt_release_full_gpu(adev, true);
- if (!adev->in_s0ix && !r && !adev->in_runpm)
+ if (!r && !adev->in_runpm)
r = amdgpu_amdkfd_resume_process(adev);
}
@@ -5701,7 +5761,7 @@ int amdgpu_device_link_reset(struct amdgpu_device *adev)
dev_info(adev->dev, "GPU link reset\n");
- if (!adev->pcie_reset_ctx.occurs_dpc)
+ if (!amdgpu_reset_in_dpc(adev))
ret = amdgpu_dpm_link_reset(adev);
if (ret)
@@ -5830,6 +5890,7 @@ int amdgpu_device_reinit_after_reset(struct amdgpu_reset_context *reset_context)
amdgpu_set_init_level(tmp_adev, init_level);
if (full_reset) {
/* post card */
+ amdgpu_reset_set_dpc_status(tmp_adev, false);
amdgpu_ras_clear_err_state(tmp_adev);
r = amdgpu_device_asic_init(tmp_adev);
if (r) {
@@ -6136,12 +6197,11 @@ static int amdgpu_device_health_check(struct list_head *device_list_handle)
return ret;
}
-static int amdgpu_device_recovery_prepare(struct amdgpu_device *adev,
+static void amdgpu_device_recovery_prepare(struct amdgpu_device *adev,
struct list_head *device_list,
struct amdgpu_hive_info *hive)
{
struct amdgpu_device *tmp_adev = NULL;
- int r;
/*
* Build list of devices to reset.
@@ -6153,7 +6213,7 @@ static int amdgpu_device_recovery_prepare(struct amdgpu_device *adev,
list_add_tail(&tmp_adev->reset_list, device_list);
if (adev->shutdown)
tmp_adev->shutdown = true;
- if (adev->pcie_reset_ctx.occurs_dpc)
+ if (amdgpu_reset_in_dpc(adev))
tmp_adev->pcie_reset_ctx.in_link_reset = true;
}
if (!list_is_first(&adev->reset_list, device_list))
@@ -6161,14 +6221,6 @@ static int amdgpu_device_recovery_prepare(struct amdgpu_device *adev,
} else {
list_add_tail(&adev->reset_list, device_list);
}
-
- if (!amdgpu_sriov_vf(adev) && (!adev->pcie_reset_ctx.occurs_dpc)) {
- r = amdgpu_device_health_check(device_list);
- if (r)
- return r;
- }
-
- return 0;
}
static void amdgpu_device_recovery_get_reset_lock(struct amdgpu_device *adev,
@@ -6237,9 +6289,8 @@ static void amdgpu_device_halt_activities(struct amdgpu_device *adev,
drm_client_dev_suspend(adev_to_drm(tmp_adev), false);
/* disable ras on ALL IPs */
- if (!need_emergency_restart &&
- (!adev->pcie_reset_ctx.occurs_dpc) &&
- amdgpu_device_ip_need_full_reset(tmp_adev))
+ if (!need_emergency_restart && !amdgpu_reset_in_dpc(adev) &&
+ amdgpu_device_ip_need_full_reset(tmp_adev))
amdgpu_ras_suspend(tmp_adev);
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
@@ -6267,11 +6318,7 @@ static int amdgpu_device_asic_reset(struct amdgpu_device *adev,
retry: /* Rest of adevs pre asic reset from XGMI hive. */
list_for_each_entry(tmp_adev, device_list, reset_list) {
- if (adev->pcie_reset_ctx.occurs_dpc)
- tmp_adev->no_hw_access = true;
r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context);
- if (adev->pcie_reset_ctx.occurs_dpc)
- tmp_adev->no_hw_access = false;
/*TODO Should we stop ?*/
if (r) {
dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
@@ -6342,23 +6389,28 @@ static int amdgpu_device_sched_resume(struct list_head *device_list,
if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled)
drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
- if (tmp_adev->asic_reset_res)
- r = tmp_adev->asic_reset_res;
-
- tmp_adev->asic_reset_res = 0;
-
- if (r) {
+ if (tmp_adev->asic_reset_res) {
/* bad news, how to tell it to userspace ?
* for ras error, we should report GPU bad status instead of
* reset failure
*/
if (reset_context->src != AMDGPU_RESET_SRC_RAS ||
!amdgpu_ras_eeprom_check_err_threshold(tmp_adev))
- dev_info(tmp_adev->dev, "GPU reset(%d) failed\n",
- atomic_read(&tmp_adev->gpu_reset_counter));
- amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
+ dev_info(
+ tmp_adev->dev,
+ "GPU reset(%d) failed with error %d \n",
+ atomic_read(
+ &tmp_adev->gpu_reset_counter),
+ tmp_adev->asic_reset_res);
+ amdgpu_vf_error_put(tmp_adev,
+ AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0,
+ tmp_adev->asic_reset_res);
+ if (!r)
+ r = tmp_adev->asic_reset_res;
+ tmp_adev->asic_reset_res = 0;
} else {
- dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
+ dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n",
+ atomic_read(&tmp_adev->gpu_reset_counter));
if (amdgpu_acpi_smart_shift_update(tmp_adev,
AMDGPU_SS_DEV_D0))
dev_warn(tmp_adev->dev,
@@ -6449,8 +6501,9 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
emergency_restart();
}
- dev_info(adev->dev, "GPU %s begin!\n",
- need_emergency_restart ? "jobs stop":"reset");
+ dev_info(adev->dev, "GPU %s begin!. Source: %d\n",
+ need_emergency_restart ? "jobs stop" : "reset",
+ reset_context->src);
if (!amdgpu_sriov_vf(adev))
hive = amdgpu_get_xgmi_hive(adev);
@@ -6461,8 +6514,13 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
reset_context->hive = hive;
INIT_LIST_HEAD(&device_list);
- if (amdgpu_device_recovery_prepare(adev, &device_list, hive))
- goto end_reset;
+ amdgpu_device_recovery_prepare(adev, &device_list, hive);
+
+ if (!amdgpu_sriov_vf(adev)) {
+ r = amdgpu_device_health_check(&device_list);
+ if (r)
+ goto end_reset;
+ }
/* We need to lock reset domain only once both for XGMI and single device */
amdgpu_device_recovery_get_reset_lock(adev, &device_list);
@@ -6884,17 +6942,13 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta
{
struct drm_device *dev = pci_get_drvdata(pdev);
struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
+ struct amdgpu_hive_info *hive __free(xgmi_put_hive) =
+ amdgpu_get_xgmi_hive(adev);
struct amdgpu_reset_context reset_context;
struct list_head device_list;
dev_info(adev->dev, "PCI error: detected callback!!\n");
- if (!amdgpu_dpm_is_link_reset_supported(adev)) {
- dev_warn(adev->dev, "No support for XGMI hive yet...\n");
- return PCI_ERS_RESULT_DISCONNECT;
- }
-
adev->pci_channel_state = state;
switch (state) {
@@ -6904,10 +6958,23 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta
case pci_channel_io_frozen:
/* Fatal error, prepare for slot reset */
dev_info(adev->dev, "pci_channel_io_frozen: state(%d)!!\n", state);
+ if (hive) {
+ /* Hive devices should be able to support FW based
+ * link reset on other devices, if not return.
+ */
+ if (!amdgpu_dpm_is_link_reset_supported(adev)) {
+ dev_warn(adev->dev,
+ "No support for XGMI hive yet...\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+ /* Set dpc status only if device is part of hive
+ * Non-hive devices should be able to recover after
+ * link reset.
+ */
+ amdgpu_reset_set_dpc_status(adev, true);
- if (hive)
mutex_lock(&hive->hive_lock);
- adev->pcie_reset_ctx.occurs_dpc = true;
+ }
memset(&reset_context, 0, sizeof(reset_context));
INIT_LIST_HEAD(&device_list);
@@ -6915,10 +6982,8 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta
amdgpu_device_recovery_get_reset_lock(adev, &device_list);
amdgpu_device_halt_activities(adev, NULL, &reset_context, &device_list,
hive, false);
- if (hive) {
+ if (hive)
mutex_unlock(&hive->hive_lock);
- amdgpu_put_xgmi_hive(hive);
- }
return PCI_ERS_RESULT_NEED_RESET;
case pci_channel_io_perm_failure:
/* Permanent error, prepare for device removal */
@@ -6966,22 +7031,34 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
struct amdgpu_device *tmp_adev;
struct amdgpu_hive_info *hive;
struct list_head device_list;
- int r = 0, i;
+ struct pci_dev *link_dev;
+ int r = 0, i, timeout;
u32 memsize;
-
- /* PCI error slot reset should be skipped During RAS recovery */
- if ((amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)) &&
- amdgpu_ras_in_recovery(adev))
- return PCI_ERS_RESULT_RECOVERED;
+ u16 status;
dev_info(adev->dev, "PCI error: slot reset callback!!\n");
memset(&reset_context, 0, sizeof(reset_context));
- /* wait for asic to come out of reset */
- msleep(700);
+ if (adev->pcie_reset_ctx.swus)
+ link_dev = adev->pcie_reset_ctx.swus;
+ else
+ link_dev = adev->pdev;
+ /* wait for asic to come out of reset, timeout = 10s */
+ timeout = 10000;
+ do {
+ usleep_range(10000, 10500);
+ r = pci_read_config_word(link_dev, PCI_VENDOR_ID, &status);
+ timeout -= 10;
+ } while (timeout > 0 && (status != PCI_VENDOR_ID_ATI) &&
+ (status != PCI_VENDOR_ID_AMD));
+
+ if ((status != PCI_VENDOR_ID_ATI) && (status != PCI_VENDOR_ID_AMD)) {
+ r = -ETIME;
+ goto out;
+ }
+ amdgpu_device_load_switch_state(adev);
/* Restore PCI confspace */
amdgpu_device_load_pci_state(pdev);
@@ -7076,7 +7153,6 @@ void amdgpu_pci_resume(struct pci_dev *pdev)
amdgpu_device_sched_resume(&device_list, NULL, NULL);
amdgpu_device_gpu_resume(adev, &device_list, false);
amdgpu_device_recovery_put_reset_lock(adev, &device_list);
- adev->pcie_reset_ctx.occurs_dpc = false;
if (hive) {
mutex_unlock(&hive->hive_lock);
@@ -7084,6 +7160,65 @@ void amdgpu_pci_resume(struct pci_dev *pdev)
}
}
+static void amdgpu_device_cache_switch_state(struct amdgpu_device *adev)
+{
+ struct pci_dev *swus, *swds;
+ int r;
+
+ swds = pci_upstream_bridge(adev->pdev);
+ if (!swds || swds->vendor != PCI_VENDOR_ID_ATI ||
+ pci_pcie_type(swds) != PCI_EXP_TYPE_DOWNSTREAM)
+ return;
+ swus = pci_upstream_bridge(swds);
+ if (!swus ||
+ (swus->vendor != PCI_VENDOR_ID_ATI &&
+ swus->vendor != PCI_VENDOR_ID_AMD) ||
+ pci_pcie_type(swus) != PCI_EXP_TYPE_UPSTREAM)
+ return;
+
+ /* If already saved, return */
+ if (adev->pcie_reset_ctx.swus)
+ return;
+ /* Upstream bridge is ATI, assume it's SWUS/DS architecture */
+ r = pci_save_state(swds);
+ if (r)
+ return;
+ adev->pcie_reset_ctx.swds_pcistate = pci_store_saved_state(swds);
+
+ r = pci_save_state(swus);
+ if (r)
+ return;
+ adev->pcie_reset_ctx.swus_pcistate = pci_store_saved_state(swus);
+
+ adev->pcie_reset_ctx.swus = swus;
+}
+
+static void amdgpu_device_load_switch_state(struct amdgpu_device *adev)
+{
+ struct pci_dev *pdev;
+ int r;
+
+ if (!adev->pcie_reset_ctx.swds_pcistate ||
+ !adev->pcie_reset_ctx.swus_pcistate)
+ return;
+
+ pdev = adev->pcie_reset_ctx.swus;
+ r = pci_load_saved_state(pdev, adev->pcie_reset_ctx.swus_pcistate);
+ if (!r) {
+ pci_restore_state(pdev);
+ } else {
+ dev_warn(adev->dev, "Failed to load SWUS state, err:%d\n", r);
+ return;
+ }
+
+ pdev = pci_upstream_bridge(adev->pdev);
+ r = pci_load_saved_state(pdev, adev->pcie_reset_ctx.swds_pcistate);
+ if (!r)
+ pci_restore_state(pdev);
+ else
+ dev_warn(adev->dev, "Failed to load SWDS state, err:%d\n", r);
+}
+
bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
@@ -7108,6 +7243,8 @@ bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
return false;
}
+ amdgpu_device_cache_switch_state(adev);
+
return true;
}
@@ -7494,3 +7631,53 @@ ssize_t amdgpu_show_reset_mask(char *buf, uint32_t supported_reset)
size += sysfs_emit_at(buf, size, "\n");
return size;
}
+
+void amdgpu_device_set_uid(struct amdgpu_uid *uid_info,
+ enum amdgpu_uid_type type, uint8_t inst,
+ uint64_t uid)
+{
+ if (!uid_info)
+ return;
+
+ if (type >= AMDGPU_UID_TYPE_MAX) {
+ dev_err_once(uid_info->adev->dev, "Invalid UID type %d\n",
+ type);
+ return;
+ }
+
+ if (inst >= AMDGPU_UID_INST_MAX) {
+ dev_err_once(uid_info->adev->dev, "Invalid UID instance %d\n",
+ inst);
+ return;
+ }
+
+ if (uid_info->uid[type][inst] != 0) {
+ dev_warn_once(
+ uid_info->adev->dev,
+ "Overwriting existing UID %llu for type %d instance %d\n",
+ uid_info->uid[type][inst], type, inst);
+ }
+
+ uid_info->uid[type][inst] = uid;
+}
+
+u64 amdgpu_device_get_uid(struct amdgpu_uid *uid_info,
+ enum amdgpu_uid_type type, uint8_t inst)
+{
+ if (!uid_info)
+ return 0;
+
+ if (type >= AMDGPU_UID_TYPE_MAX) {
+ dev_err_once(uid_info->adev->dev, "Invalid UID type %d\n",
+ type);
+ return 0;
+ }
+
+ if (inst >= AMDGPU_UID_INST_MAX) {
+ dev_err_once(uid_info->adev->dev, "Invalid UID instance %d\n",
+ inst);
+ return 0;
+ }
+
+ return uid_info->uid[type][inst];
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index 81b3443c8d7f..73401f0aeb34 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -276,7 +276,7 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
u32 msg;
if (!amdgpu_sriov_vf(adev)) {
- /* It can take up to a second for IFWI init to complete on some dGPUs,
+ /* It can take up to two second for IFWI init to complete on some dGPUs,
* but generally it should be in the 60-100ms range. Normally this starts
* as soon as the device gets power so by the time the OS loads this has long
* completed. However, when a card is hotplugged via e.g., USB4, we need to
@@ -284,7 +284,7 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
* continue.
*/
- for (i = 0; i < 1000; i++) {
+ for (i = 0; i < 2000; i++) {
msg = RREG32(mmMP0_SMN_C2PMSG_33);
if (msg & 0x80000000)
break;
@@ -2124,7 +2124,6 @@ static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(11, 0, 5):
case IP_VERSION(11, 0, 9):
case IP_VERSION(11, 0, 7):
- case IP_VERSION(11, 0, 8):
case IP_VERSION(11, 0, 11):
case IP_VERSION(11, 0, 12):
case IP_VERSION(11, 0, 13):
@@ -2132,6 +2131,10 @@ static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(11, 5, 2):
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
break;
+ case IP_VERSION(11, 0, 8):
+ if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2)
+ amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
+ break;
case IP_VERSION(12, 0, 0):
case IP_VERSION(12, 0, 1):
amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
@@ -2555,40 +2558,11 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_VEGA10:
- case CHIP_VEGA12:
- case CHIP_RAVEN:
- case CHIP_VEGA20:
- case CHIP_ARCTURUS:
- case CHIP_ALDEBARAN:
- /* this is not fatal. We have a fallback below
- * if the new firmwares are not present. some of
- * this will be overridden below to keep things
- * consistent with the current behavior.
+ /* This is not fatal. We only need the discovery
+ * binary for sysfs. We don't need it for a
+ * functional system.
*/
- r = amdgpu_discovery_reg_base_init(adev);
- if (!r) {
- amdgpu_discovery_harvest_ip(adev);
- amdgpu_discovery_get_gfx_info(adev);
- amdgpu_discovery_get_mall_info(adev);
- amdgpu_discovery_get_vcn_info(adev);
- }
- break;
- default:
- r = amdgpu_discovery_reg_base_init(adev);
- if (r) {
- drm_err(&adev->ddev, "discovery failed: %d\n", r);
- return r;
- }
-
- amdgpu_discovery_harvest_ip(adev);
- amdgpu_discovery_get_gfx_info(adev);
- amdgpu_discovery_get_mall_info(adev);
- amdgpu_discovery_get_vcn_info(adev);
- break;
- }
-
- switch (adev->asic_type) {
- case CHIP_VEGA10:
+ amdgpu_discovery_init(adev);
vega10_reg_base_init(adev);
adev->sdma.num_instances = 2;
adev->gmc.num_umc = 4;
@@ -2611,6 +2585,11 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 0);
break;
case CHIP_VEGA12:
+ /* This is not fatal. We only need the discovery
+ * binary for sysfs. We don't need it for a
+ * functional system.
+ */
+ amdgpu_discovery_init(adev);
vega10_reg_base_init(adev);
adev->sdma.num_instances = 2;
adev->gmc.num_umc = 4;
@@ -2633,6 +2612,11 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 1);
break;
case CHIP_RAVEN:
+ /* This is not fatal. We only need the discovery
+ * binary for sysfs. We don't need it for a
+ * functional system.
+ */
+ amdgpu_discovery_init(adev);
vega10_reg_base_init(adev);
adev->sdma.num_instances = 1;
adev->vcn.num_vcn_inst = 1;
@@ -2674,6 +2658,11 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
}
break;
case CHIP_VEGA20:
+ /* This is not fatal. We only need the discovery
+ * binary for sysfs. We don't need it for a
+ * functional system.
+ */
+ amdgpu_discovery_init(adev);
vega20_reg_base_init(adev);
adev->sdma.num_instances = 2;
adev->gmc.num_umc = 8;
@@ -2697,6 +2686,11 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 1, 0);
break;
case CHIP_ARCTURUS:
+ /* This is not fatal. We only need the discovery
+ * binary for sysfs. We don't need it for a
+ * functional system.
+ */
+ amdgpu_discovery_init(adev);
arct_reg_base_init(adev);
adev->sdma.num_instances = 8;
adev->vcn.num_vcn_inst = 2;
@@ -2725,6 +2719,11 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 5, 0);
break;
case CHIP_ALDEBARAN:
+ /* This is not fatal. We only need the discovery
+ * binary for sysfs. We don't need it for a
+ * functional system.
+ */
+ amdgpu_discovery_init(adev);
aldebaran_reg_base_init(adev);
adev->sdma.num_instances = 5;
adev->vcn.num_vcn_inst = 2;
@@ -2750,7 +2749,47 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 6, 0);
adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0);
break;
+ case CHIP_CYAN_SKILLFISH:
+ if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) {
+ r = amdgpu_discovery_reg_base_init(adev);
+ if (r)
+ return -EINVAL;
+
+ amdgpu_discovery_harvest_ip(adev);
+ amdgpu_discovery_get_gfx_info(adev);
+ amdgpu_discovery_get_mall_info(adev);
+ amdgpu_discovery_get_vcn_info(adev);
+ } else {
+ cyan_skillfish_reg_base_init(adev);
+ adev->sdma.num_instances = 2;
+ adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(2, 0, 3);
+ adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(2, 0, 3);
+ adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(5, 0, 1);
+ adev->ip_versions[HDP_HWIP][0] = IP_VERSION(5, 0, 1);
+ adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(5, 0, 1);
+ adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(5, 0, 1);
+ adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 5, 0);
+ adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(2, 1, 1);
+ adev->ip_versions[UMC_HWIP][0] = IP_VERSION(8, 1, 1);
+ adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 8);
+ adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 8);
+ adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 1);
+ adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 8);
+ adev->ip_versions[GC_HWIP][0] = IP_VERSION(10, 1, 3);
+ adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 0, 3);
+ }
+ break;
default:
+ r = amdgpu_discovery_reg_base_init(adev);
+ if (r) {
+ drm_err(&adev->ddev, "discovery failed: %d\n", r);
+ return r;
+ }
+
+ amdgpu_discovery_harvest_ip(adev);
+ amdgpu_discovery_get_gfx_info(adev);
+ amdgpu_discovery_get_mall_info(adev);
+ amdgpu_discovery_get_vcn_info(adev);
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index ff98c87b2e0b..8561ad7f6180 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -285,6 +285,36 @@ static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
return ret;
}
+static int amdgpu_dma_buf_vmap(struct dma_buf *dma_buf, struct iosys_map *map)
+{
+ struct drm_gem_object *obj = dma_buf->priv;
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+ int ret;
+
+ /*
+ * Pin to keep buffer in place while it's vmap'ed. The actual
+ * domain is not that important as long as it's mapable. Using
+ * GTT and VRAM should be compatible with most use cases.
+ */
+ ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT | AMDGPU_GEM_DOMAIN_VRAM);
+ if (ret)
+ return ret;
+ ret = drm_gem_dmabuf_vmap(dma_buf, map);
+ if (ret)
+ amdgpu_bo_unpin(bo);
+
+ return ret;
+}
+
+static void amdgpu_dma_buf_vunmap(struct dma_buf *dma_buf, struct iosys_map *map)
+{
+ struct drm_gem_object *obj = dma_buf->priv;
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+
+ drm_gem_dmabuf_vunmap(dma_buf, map);
+ amdgpu_bo_unpin(bo);
+}
+
const struct dma_buf_ops amdgpu_dmabuf_ops = {
.attach = amdgpu_dma_buf_attach,
.pin = amdgpu_dma_buf_pin,
@@ -294,8 +324,8 @@ const struct dma_buf_ops amdgpu_dmabuf_ops = {
.release = drm_gem_dmabuf_release,
.begin_cpu_access = amdgpu_dma_buf_begin_cpu_access,
.mmap = drm_gem_dmabuf_mmap,
- .vmap = drm_gem_dmabuf_vmap,
- .vunmap = drm_gem_dmabuf_vunmap,
+ .vmap = amdgpu_dma_buf_vmap,
+ .vunmap = amdgpu_dma_buf_vunmap,
};
/**
@@ -313,11 +343,23 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_gem_object *gobj,
{
struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
struct dma_buf *buf;
+ struct ttm_operation_ctx ctx = {
+ .interruptible = true,
+ .no_wait_gpu = true,
+ /* We opt to avoid OOM on system pages allocations */
+ .gfp_retry_mayfail = true,
+ .allow_res_evict = false,
+ };
+ int ret;
if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
return ERR_PTR(-EPERM);
+ ret = ttm_bo_setup_export(&bo->tbo, &ctx);
+ if (ret)
+ return ERR_PTR(ret);
+
buf = drm_gem_prime_export(gobj, flags);
if (!IS_ERR(buf))
buf->ops = &amdgpu_dmabuf_ops;
@@ -514,7 +556,7 @@ bool amdgpu_dmabuf_is_xgmi_accessible(struct amdgpu_device *adev,
return false;
if (drm_gem_is_imported(obj)) {
- struct dma_buf *dma_buf = obj->dma_buf;
+ struct dma_buf *dma_buf = obj->import_attach->dmabuf;
if (dma_buf->ops != &amdgpu_dmabuf_ops)
/* No XGMI with non AMD GPUs */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 395c6be901ce..bff25ef3e2d0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -886,7 +886,7 @@ module_param_named(dcfeaturemask, amdgpu_dc_feature_mask, uint, 0444);
/**
* DOC: dcdebugmask (uint)
- * Override display features enabled. See enum DC_DEBUG_MASK in drivers/gpu/drm/amd/include/amd_shared.h.
+ * Display debug options. See enum DC_DEBUG_MASK in drivers/gpu/drm/amd/include/amd_shared.h.
*/
MODULE_PARM_DESC(dcdebugmask, "all debug options disabled (default))");
module_param_named(dcdebugmask, amdgpu_dc_debug_mask, uint, 0444);
@@ -960,7 +960,7 @@ module_param_named(tmz, amdgpu_tmz, int, 0444);
*/
MODULE_PARM_DESC(
freesync_video,
- "Enable freesync modesetting optimization feature (0 = off (default), 1 = on)");
+ "Adds additional modes via VRR for refresh changes without a full modeset (0 = off (default), 1 = on)");
module_param_named(freesync_video, amdgpu_freesync_vid_mode, uint, 0444);
/**
@@ -2172,6 +2172,11 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x7410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN},
/* CYAN_SKILLFISH */
+ {0x1002, 0x13DB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU},
+ {0x1002, 0x13F9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU},
+ {0x1002, 0x13FA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU},
+ {0x1002, 0x13FB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU},
+ {0x1002, 0x13FC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU},
{0x1002, 0x13FE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU},
{0x1002, 0x143F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU},
@@ -2597,6 +2602,7 @@ static int amdgpu_pmops_suspend(struct device *dev)
else if (amdgpu_acpi_is_s3_active(adev))
adev->in_s3 = true;
if (!adev->in_s0ix && !adev->in_s3) {
+#if IS_ENABLED(CONFIG_SUSPEND)
/* don't allow going deep first time followed by s2idle the next time */
if (adev->last_suspend_state != PM_SUSPEND_ON &&
adev->last_suspend_state != pm_suspend_target_state) {
@@ -2604,11 +2610,14 @@ static int amdgpu_pmops_suspend(struct device *dev)
pm_suspend_target_state);
return -EINVAL;
}
+#endif
return 0;
}
+#if IS_ENABLED(CONFIG_SUSPEND)
/* cache the state last used for suspend */
adev->last_suspend_state = pm_suspend_target_state;
+#endif
return amdgpu_device_suspend(drm_dev, true);
}
@@ -2665,7 +2674,7 @@ static int amdgpu_pmops_thaw(struct device *dev)
struct drm_device *drm_dev = dev_get_drvdata(dev);
/* do not resume device if it's normal hibernation */
- if (!pm_hibernate_is_recovering())
+ if (!pm_hibernate_is_recovering() && !pm_hibernation_mode_is_suspend())
return 0;
return amdgpu_device_resume(drm_dev, true);
@@ -2933,11 +2942,14 @@ static int amdgpu_drm_release(struct inode *inode, struct file *filp)
{
struct drm_file *file_priv = filp->private_data;
struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
+ struct drm_device *dev = file_priv->minor->dev;
+ int idx;
- if (fpriv) {
+ if (fpriv && drm_dev_enter(dev, &idx)) {
fpriv->evf_mgr.fd_closing = true;
amdgpu_eviction_fence_destroy(&fpriv->evf_mgr);
amdgpu_userq_mgr_fini(&fpriv->userq_mgr);
+ drm_dev_exit(idx);
}
return drm_release(inode, filp);
@@ -2964,15 +2976,15 @@ out:
}
static const struct dev_pm_ops amdgpu_pm_ops = {
- .prepare = amdgpu_pmops_prepare,
- .complete = amdgpu_pmops_complete,
- .suspend = amdgpu_pmops_suspend,
- .suspend_noirq = amdgpu_pmops_suspend_noirq,
- .resume = amdgpu_pmops_resume,
- .freeze = amdgpu_pmops_freeze,
- .thaw = amdgpu_pmops_thaw,
- .poweroff = amdgpu_pmops_poweroff,
- .restore = amdgpu_pmops_restore,
+ .prepare = pm_sleep_ptr(amdgpu_pmops_prepare),
+ .complete = pm_sleep_ptr(amdgpu_pmops_complete),
+ .suspend = pm_sleep_ptr(amdgpu_pmops_suspend),
+ .suspend_noirq = pm_sleep_ptr(amdgpu_pmops_suspend_noirq),
+ .resume = pm_sleep_ptr(amdgpu_pmops_resume),
+ .freeze = pm_sleep_ptr(amdgpu_pmops_freeze),
+ .thaw = pm_sleep_ptr(amdgpu_pmops_thaw),
+ .poweroff = pm_sleep_ptr(amdgpu_pmops_poweroff),
+ .restore = pm_sleep_ptr(amdgpu_pmops_restore),
.runtime_suspend = amdgpu_pmops_runtime_suspend,
.runtime_resume = amdgpu_pmops_runtime_resume,
.runtime_idle = amdgpu_pmops_runtime_idle,
@@ -3044,6 +3056,7 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
DRM_IOCTL_DEF_DRV(AMDGPU_USERQ, amdgpu_userq_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_USERQ_SIGNAL, amdgpu_userq_signal_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_USERQ_WAIT, amdgpu_userq_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(AMDGPU_GEM_LIST_HANDLES, amdgpu_gem_list_handles_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
};
static const struct drm_driver amdgpu_kms_driver = {
@@ -3117,7 +3130,7 @@ static struct pci_driver amdgpu_kms_pci_driver = {
.probe = amdgpu_pci_probe,
.remove = amdgpu_pci_remove,
.shutdown = amdgpu_pci_shutdown,
- .driver.pm = &amdgpu_pm_ops,
+ .driver.pm = pm_ptr(&amdgpu_pm_ops),
.err_handler = &amdgpu_pci_err_handler,
.dev_groups = amdgpu_sysfs_groups,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
index 91d638098889..b349bb3676d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
@@ -70,6 +70,7 @@ void amdgpu_show_fdinfo(struct drm_printer *p, struct drm_file *file)
[AMDGPU_PL_GWS] = "gws",
[AMDGPU_PL_OA] = "oa",
[AMDGPU_PL_DOORBELL] = "doorbell",
+ [AMDGPU_PL_MMIO_REMAP] = "mmioremap",
};
unsigned int hw_ip, i;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 9e7506965cab..fd8cca241da6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -120,7 +120,6 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
am_fence = kzalloc(sizeof(*am_fence), GFP_KERNEL);
if (!am_fence)
return -ENOMEM;
- am_fence->context = 0;
} else {
am_fence = af;
}
@@ -738,7 +737,7 @@ void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring)
}
-/**
+/*
* Kernel queue reset handling
*
* The driver can reset individual queues for most engines, but those queues
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 6626a6e64ff5..b7ebae289bea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -317,7 +317,8 @@ static int amdgpu_gem_object_open(struct drm_gem_object *obj,
*/
if (!vm->is_compute_context || !vm->process_info)
return 0;
- if (!drm_gem_is_imported(obj) || !dma_buf_is_dynamic(obj->dma_buf))
+ if (!drm_gem_is_imported(obj) ||
+ !dma_buf_is_dynamic(obj->import_attach->dmabuf))
return 0;
mutex_lock_nested(&vm->process_info->lock, 1);
if (!WARN_ON(!vm->process_info->eviction_fence)) {
@@ -442,15 +443,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
int r;
/* reject invalid gem flags */
- if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
- AMDGPU_GEM_CREATE_CPU_GTT_USWC |
- AMDGPU_GEM_CREATE_VRAM_CLEARED |
- AMDGPU_GEM_CREATE_VM_ALWAYS_VALID |
- AMDGPU_GEM_CREATE_EXPLICIT_SYNC |
- AMDGPU_GEM_CREATE_ENCRYPTED |
- AMDGPU_GEM_CREATE_GFX12_DCC |
- AMDGPU_GEM_CREATE_DISCARDABLE))
+ if (flags & ~AMDGPU_GEM_CREATE_SETTABLE_MASK)
return -EINVAL;
/* reject invalid gem domains */
@@ -465,6 +458,9 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
/* always clear VRAM */
flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED;
+ if (args->in.domains & AMDGPU_GEM_DOMAIN_MMIO_REMAP)
+ return -EINVAL;
+
/* create a gem object to contain this object in */
if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
@@ -576,8 +572,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
goto release_object;
if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
- r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages,
- &range);
+ r = amdgpu_ttm_tt_get_user_pages(bo, &range);
if (r)
goto release_object;
@@ -585,6 +580,8 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
if (r)
goto user_pages_done;
+ amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, range);
+
amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
amdgpu_bo_unreserve(bo);
@@ -790,36 +787,6 @@ error:
return fence;
}
-/**
- * amdgpu_gem_va_map_flags - map GEM UAPI flags into hardware flags
- *
- * @adev: amdgpu_device pointer
- * @flags: GEM UAPI flags
- *
- * Returns the GEM UAPI flags mapped into hardware for the ASIC.
- */
-uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags)
-{
- uint64_t pte_flag = 0;
-
- if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
- pte_flag |= AMDGPU_PTE_EXECUTABLE;
- if (flags & AMDGPU_VM_PAGE_READABLE)
- pte_flag |= AMDGPU_PTE_READABLE;
- if (flags & AMDGPU_VM_PAGE_WRITEABLE)
- pte_flag |= AMDGPU_PTE_WRITEABLE;
- if (flags & AMDGPU_VM_PAGE_PRT)
- pte_flag |= AMDGPU_PTE_PRT_FLAG(adev);
- if (flags & AMDGPU_VM_PAGE_NOALLOC)
- pte_flag |= AMDGPU_PTE_NOALLOC;
-
- if (adev->gmc.gmc_funcs->map_mtype)
- pte_flag |= amdgpu_gmc_map_mtype(adev,
- flags & AMDGPU_VM_MTYPE_MASK);
-
- return pte_flag;
-}
-
int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
@@ -840,7 +807,6 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct dma_fence_chain *timeline_chain = NULL;
struct dma_fence *fence;
struct drm_exec exec;
- uint64_t va_flags;
uint64_t vm_size;
int r = 0;
@@ -944,10 +910,9 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
switch (args->operation) {
case AMDGPU_VA_OP_MAP:
- va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
args->offset_in_bo, args->map_size,
- va_flags);
+ args->flags);
break;
case AMDGPU_VA_OP_UNMAP:
r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address);
@@ -959,10 +924,9 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
args->map_size);
break;
case AMDGPU_VA_OP_REPLACE:
- va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
args->offset_in_bo, args->map_size,
- va_flags);
+ args->flags);
break;
default:
break;
@@ -996,17 +960,34 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
struct drm_gem_object *gobj;
struct amdgpu_vm_bo_base *base;
struct amdgpu_bo *robj;
+ struct drm_exec exec;
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
int r;
+ if (args->padding)
+ return -EINVAL;
+
gobj = drm_gem_object_lookup(filp, args->handle);
if (!gobj)
return -ENOENT;
robj = gem_to_amdgpu_bo(gobj);
- r = amdgpu_bo_reserve(robj, false);
- if (unlikely(r))
- goto out;
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
+ DRM_EXEC_IGNORE_DUPLICATES, 0);
+ drm_exec_until_all_locked(&exec) {
+ r = drm_exec_lock_obj(&exec, gobj);
+ drm_exec_retry_on_contention(&exec);
+ if (r)
+ goto out_exec;
+
+ if (args->op == AMDGPU_GEM_OP_GET_MAPPING_INFO) {
+ r = amdgpu_vm_lock_pd(&fpriv->vm, &exec, 0);
+ drm_exec_retry_on_contention(&exec);
+ if (r)
+ goto out_exec;
+ }
+ }
switch (args->op) {
case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
@@ -1017,7 +998,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
info.alignment = robj->tbo.page_alignment << PAGE_SHIFT;
info.domains = robj->preferred_domains;
info.domain_flags = robj->flags;
- amdgpu_bo_unreserve(robj);
+ drm_exec_fini(&exec);
if (copy_to_user(out, &info, sizeof(info)))
r = -EFAULT;
break;
@@ -1026,20 +1007,17 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
if (drm_gem_is_imported(&robj->tbo.base) &&
args->value & AMDGPU_GEM_DOMAIN_VRAM) {
r = -EINVAL;
- amdgpu_bo_unreserve(robj);
- break;
+ goto out_exec;
}
if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
r = -EPERM;
- amdgpu_bo_unreserve(robj);
- break;
+ goto out_exec;
}
for (base = robj->vm_bo; base; base = base->next)
if (amdgpu_xgmi_same_hive(amdgpu_ttm_adev(robj->tbo.bdev),
amdgpu_ttm_adev(base->vm->root.bo->tbo.bdev))) {
r = -EINVAL;
- amdgpu_bo_unreserve(robj);
- goto out;
+ goto out_exec;
}
@@ -1052,17 +1030,146 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
amdgpu_vm_bo_invalidate(robj, true);
+ drm_exec_fini(&exec);
+ break;
+ case AMDGPU_GEM_OP_GET_MAPPING_INFO: {
+ struct amdgpu_bo_va *bo_va = amdgpu_vm_bo_find(&fpriv->vm, robj);
+ struct drm_amdgpu_gem_vm_entry *vm_entries;
+ struct amdgpu_bo_va_mapping *mapping;
+ int num_mappings = 0;
+ /*
+ * num_entries is set as an input to the size of the user-allocated array of
+ * drm_amdgpu_gem_vm_entry stored at args->value.
+ * num_entries is sent back as output as the number of mappings the bo has.
+ * If that number is larger than the size of the array, the ioctl must
+ * be retried.
+ */
+ vm_entries = kvcalloc(args->num_entries, sizeof(*vm_entries), GFP_KERNEL);
+ if (!vm_entries)
+ return -ENOMEM;
+
+ amdgpu_vm_bo_va_for_each_valid_mapping(bo_va, mapping) {
+ if (num_mappings < args->num_entries) {
+ vm_entries[num_mappings].addr = mapping->start * AMDGPU_GPU_PAGE_SIZE;
+ vm_entries[num_mappings].size = (mapping->last - mapping->start + 1) * AMDGPU_GPU_PAGE_SIZE;
+ vm_entries[num_mappings].offset = mapping->offset;
+ vm_entries[num_mappings].flags = mapping->flags;
+ }
+ num_mappings += 1;
+ }
+
+ amdgpu_vm_bo_va_for_each_invalid_mapping(bo_va, mapping) {
+ if (num_mappings < args->num_entries) {
+ vm_entries[num_mappings].addr = mapping->start * AMDGPU_GPU_PAGE_SIZE;
+ vm_entries[num_mappings].size = (mapping->last - mapping->start + 1) * AMDGPU_GPU_PAGE_SIZE;
+ vm_entries[num_mappings].offset = mapping->offset;
+ vm_entries[num_mappings].flags = mapping->flags;
+ }
+ num_mappings += 1;
+ }
+
+ drm_exec_fini(&exec);
+
+ if (num_mappings > 0 && num_mappings <= args->num_entries)
+ if (copy_to_user(u64_to_user_ptr(args->value), vm_entries, num_mappings * sizeof(*vm_entries)))
+ r = -EFAULT;
+
+ args->num_entries = num_mappings;
- amdgpu_bo_unreserve(robj);
+ kvfree(vm_entries);
break;
+ }
default:
- amdgpu_bo_unreserve(robj);
+ drm_exec_fini(&exec);
r = -EINVAL;
}
-out:
drm_gem_object_put(gobj);
return r;
+out_exec:
+ drm_exec_fini(&exec);
+ drm_gem_object_put(gobj);
+ return r;
+}
+
+/**
+ * amdgpu_gem_list_handles_ioctl - get information about a process' buffer objects
+ *
+ * @dev: drm device pointer
+ * @data: drm_amdgpu_gem_list_handles
+ * @filp: drm file pointer
+ *
+ * num_entries is set as an input to the size of the entries array.
+ * num_entries is sent back as output as the number of bos in the process.
+ * If that number is larger than the size of the array, the ioctl must
+ * be retried.
+ *
+ * Returns:
+ * 0 for success, -errno for errors.
+ */
+int amdgpu_gem_list_handles_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct drm_amdgpu_gem_list_handles *args = data;
+ struct drm_amdgpu_gem_list_handles_entry *bo_entries;
+ struct drm_gem_object *gobj;
+ int id, ret = 0;
+ int bo_index = 0;
+ int num_bos = 0;
+
+ spin_lock(&filp->table_lock);
+ idr_for_each_entry(&filp->object_idr, gobj, id)
+ num_bos += 1;
+ spin_unlock(&filp->table_lock);
+
+ if (args->num_entries < num_bos) {
+ args->num_entries = num_bos;
+ return 0;
+ }
+
+ if (num_bos == 0) {
+ args->num_entries = 0;
+ return 0;
+ }
+
+ bo_entries = kvcalloc(num_bos, sizeof(*bo_entries), GFP_KERNEL);
+ if (!bo_entries)
+ return -ENOMEM;
+
+ spin_lock(&filp->table_lock);
+ idr_for_each_entry(&filp->object_idr, gobj, id) {
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
+ struct drm_amdgpu_gem_list_handles_entry *bo_entry;
+
+ if (bo_index >= num_bos) {
+ ret = -EAGAIN;
+ break;
+ }
+
+ bo_entry = &bo_entries[bo_index];
+
+ bo_entry->size = amdgpu_bo_size(bo);
+ bo_entry->alloc_flags = bo->flags & AMDGPU_GEM_CREATE_SETTABLE_MASK;
+ bo_entry->preferred_domains = bo->preferred_domains;
+ bo_entry->gem_handle = id;
+ bo_entry->alignment = bo->tbo.page_alignment;
+
+ if (bo->tbo.base.import_attach)
+ bo_entry->flags |= AMDGPU_GEM_LIST_HANDLES_FLAG_IS_IMPORT;
+
+ bo_index += 1;
+ }
+ spin_unlock(&filp->table_lock);
+
+ args->num_entries = bo_index;
+
+ if (!ret)
+ if (copy_to_user(u64_to_user_ptr(args->entries), bo_entries, num_bos * sizeof(*bo_entries)))
+ ret = -EFAULT;
+
+ kvfree(bo_entries);
+
+ return ret;
}
static int amdgpu_gem_align_pitch(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
index 3a8f57900a3a..b558336bc4c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
@@ -63,13 +63,28 @@ int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
-uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags);
int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
+int amdgpu_gem_list_handles_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
+#define AMDGPU_GEM_CREATE_SETTABLE_MASK (AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | \
+ AMDGPU_GEM_CREATE_NO_CPU_ACCESS | \
+ AMDGPU_GEM_CREATE_CPU_GTT_USWC | \
+ AMDGPU_GEM_CREATE_VRAM_CLEARED | \
+ AMDGPU_GEM_CREATE_VM_ALWAYS_VALID | \
+ AMDGPU_GEM_CREATE_EXPLICIT_SYNC | \
+ AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE | \
+ AMDGPU_GEM_CREATE_ENCRYPTED | \
+ AMDGPU_GEM_CREATE_GFX12_DCC | \
+ AMDGPU_GEM_CREATE_DISCARDABLE | \
+ AMDGPU_GEM_CREATE_COHERENT | \
+ AMDGPU_GEM_CREATE_UNCACHED | \
+ AMDGPU_GEM_CREATE_EXT_COHERENT)
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index c80c8f543532..ebe2b4c68b0f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -1102,6 +1102,9 @@ uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg, uint32_t xcc_
might_sleep();
while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
+ if (amdgpu_in_reset(adev))
+ goto failed_kiq_read;
+
msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
}
@@ -1171,6 +1174,8 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint3
might_sleep();
while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
+ if (amdgpu_in_reset(adev))
+ goto failed_kiq_write;
msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
@@ -1474,7 +1479,8 @@ static int amdgpu_gfx_run_cleaner_shader_job(struct amdgpu_ring *ring)
owner = (void *)(unsigned long)atomic_inc_return(&counter);
r = amdgpu_job_alloc_with_ib(ring->adev, &entity, owner,
- 64, 0, &job);
+ 64, 0, &job,
+ AMDGPU_KERNEL_JOB_ID_CLEANER_SHADER);
if (r)
goto err;
@@ -2279,7 +2285,7 @@ void amdgpu_gfx_profile_ring_end_use(struct amdgpu_ring *ring)
* Return:
* return the latest index.
*/
-u32 amdgpu_gfx_csb_preamble_start(volatile u32 *buffer)
+u32 amdgpu_gfx_csb_preamble_start(u32 *buffer)
{
u32 count = 0;
@@ -2303,7 +2309,7 @@ u32 amdgpu_gfx_csb_preamble_start(volatile u32 *buffer)
* Return:
* return the latest index.
*/
-u32 amdgpu_gfx_csb_data_parser(struct amdgpu_device *adev, volatile u32 *buffer, u32 count)
+u32 amdgpu_gfx_csb_data_parser(struct amdgpu_device *adev, u32 *buffer, u32 count)
{
const struct cs_section_def *sect = NULL;
const struct cs_extent_def *ext = NULL;
@@ -2330,7 +2336,7 @@ u32 amdgpu_gfx_csb_data_parser(struct amdgpu_device *adev, volatile u32 *buffer,
* @buffer: This is an output variable that gets the PACKET3 preamble end.
* @count: Index to start set the preemble end.
*/
-void amdgpu_gfx_csb_preamble_end(volatile u32 *buffer, u32 count)
+void amdgpu_gfx_csb_preamble_end(u32 *buffer, u32 count)
{
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 08f268dab8f5..fb5f7a0ee029 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -642,9 +642,9 @@ void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring);
void amdgpu_gfx_profile_idle_work_handler(struct work_struct *work);
void amdgpu_gfx_profile_ring_begin_use(struct amdgpu_ring *ring);
void amdgpu_gfx_profile_ring_end_use(struct amdgpu_ring *ring);
-u32 amdgpu_gfx_csb_preamble_start(volatile u32 *buffer);
-u32 amdgpu_gfx_csb_data_parser(struct amdgpu_device *adev, volatile u32 *buffer, u32 count);
-void amdgpu_gfx_csb_preamble_end(volatile u32 *buffer, u32 count);
+u32 amdgpu_gfx_csb_preamble_start(u32 *buffer);
+u32 amdgpu_gfx_csb_data_parser(struct amdgpu_device *adev, u32 *buffer, u32 count);
+void amdgpu_gfx_csb_preamble_end(u32 *buffer, u32 count);
void amdgpu_debugfs_gfx_sched_mask_init(struct amdgpu_device *adev);
void amdgpu_debugfs_compute_sched_mask_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 97b562a79ea8..9dcf51991b5b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -690,7 +690,7 @@ void amdgpu_gmc_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
r = amdgpu_job_alloc_with_ib(ring->adev, &adev->mman.high_pr,
AMDGPU_FENCE_OWNER_UNDEFINED,
16 * 4, AMDGPU_IB_POOL_IMMEDIATE,
- &job);
+ &job, AMDGPU_KERNEL_JOB_ID_FLUSH_GPU_TLB);
if (r)
goto error_alloc;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index 397c6ccdb903..55097ca10738 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -154,15 +154,15 @@ struct amdgpu_gmc_funcs {
unsigned pasid);
/* enable/disable PRT support */
void (*set_prt)(struct amdgpu_device *adev, bool enable);
- /* map mtype to hardware flags */
- uint64_t (*map_mtype)(struct amdgpu_device *adev, uint32_t flags);
/* get the pde for a given mc addr */
void (*get_vm_pde)(struct amdgpu_device *adev, int level,
u64 *dst, u64 *flags);
- /* get the pte flags to use for a BO VA mapping */
+ /* get the pte flags to use for PTEs */
void (*get_vm_pte)(struct amdgpu_device *adev,
- struct amdgpu_bo_va_mapping *mapping,
- uint64_t *flags);
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo,
+ uint32_t vm_flags,
+ uint64_t *pte_flags);
/* override per-page pte flags */
void (*override_vm_pte_flags)(struct amdgpu_device *dev,
struct amdgpu_vm *vm,
@@ -356,9 +356,10 @@ struct amdgpu_gmc {
#define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr))
#define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid))
-#define amdgpu_gmc_map_mtype(adev, flags) (adev)->gmc.gmc_funcs->map_mtype((adev),(flags))
#define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags))
-#define amdgpu_gmc_get_vm_pte(adev, mapping, flags) (adev)->gmc.gmc_funcs->get_vm_pte((adev), (mapping), (flags))
+#define amdgpu_gmc_get_vm_pte(adev, vm, bo, vm_flags, pte_flags) \
+ ((adev)->gmc.gmc_funcs->get_vm_pte((adev), (vm), (bo), (vm_flags), \
+ (pte_flags)))
#define amdgpu_gmc_override_vm_pte_flags(adev, vm, addr, pte_flags) \
(adev)->gmc.gmc_funcs->override_vm_pte_flags \
((adev), (vm), (addr), (pte_flags))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
index e36fede7f74c..2c6a6b858112 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
@@ -167,13 +167,12 @@ void amdgpu_hmm_unregister(struct amdgpu_bo *bo)
int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
uint64_t start, uint64_t npages, bool readonly,
- void *owner, struct page **pages,
+ void *owner,
struct hmm_range **phmm_range)
{
struct hmm_range *hmm_range;
unsigned long end;
unsigned long timeout;
- unsigned long i;
unsigned long *pfns;
int r = 0;
@@ -222,14 +221,6 @@ retry:
hmm_range->start = start;
hmm_range->hmm_pfns = pfns;
- /*
- * Due to default_flags, all pages are HMM_PFN_VALID or
- * hmm_range_fault() fails. FIXME: The pages cannot be touched outside
- * the notifier_lock, and mmu_interval_read_retry() must be done first.
- */
- for (i = 0; pages && i < npages; i++)
- pages[i] = hmm_pfn_to_page(pfns[i]);
-
*phmm_range = hmm_range;
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h
index e2edcd010ccc..953e1d06de20 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h
@@ -33,7 +33,7 @@
int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
uint64_t start, uint64_t npages, bool readonly,
- void *owner, struct page **pages,
+ void *owner,
struct hmm_range **phmm_range);
bool amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
index 57101d24422f..9cb72f0c5277 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
@@ -184,7 +184,7 @@ struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev,
snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
"AMDGPU i2c hw bus %s", name);
i2c->adapter.algo = &amdgpu_atombios_i2c_algo;
- ret = i2c_add_adapter(&i2c->adapter);
+ ret = devm_i2c_add_adapter(dev->dev, &i2c->adapter);
if (ret)
goto out_free;
} else {
@@ -215,15 +215,6 @@ out_free:
}
-void amdgpu_i2c_destroy(struct amdgpu_i2c_chan *i2c)
-{
- if (!i2c)
- return;
- WARN_ON(i2c->has_aux);
- i2c_del_adapter(&i2c->adapter);
- kfree(i2c);
-}
-
void amdgpu_i2c_init(struct amdgpu_device *adev)
{
if (!adev->is_atom_fw) {
@@ -248,12 +239,9 @@ void amdgpu_i2c_fini(struct amdgpu_device *adev)
{
int i;
- for (i = 0; i < AMDGPU_MAX_I2C_BUS; i++) {
- if (adev->i2c_bus[i]) {
- amdgpu_i2c_destroy(adev->i2c_bus[i]);
+ for (i = 0; i < AMDGPU_MAX_I2C_BUS; i++)
+ if (adev->i2c_bus[i])
adev->i2c_bus[i] = NULL;
- }
- }
}
/* looks up bus based on id */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index 5dd78a9cb12d..3ef5bc95642c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -275,13 +275,12 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
{
struct amdgpu_device *adev = ring->adev;
unsigned vmhub = ring->vm_hub;
- struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
uint64_t fence_context = adev->fence_context + ring->idx;
bool needs_flush = vm->use_cpu_for_update;
uint64_t updates = amdgpu_vm_tlb_seq(vm);
int r;
- *id = id_mgr->reserved;
+ *id = vm->reserved_vmid[vmhub];
if ((*id)->owner != vm->immediate.fence_context ||
!amdgpu_vmid_compatible(*id, job) ||
(*id)->flushed_updates < updates ||
@@ -474,40 +473,61 @@ bool amdgpu_vmid_uses_reserved(struct amdgpu_vm *vm, unsigned int vmhub)
return vm->reserved_vmid[vmhub];
}
-int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
+/*
+ * amdgpu_vmid_alloc_reserved - reserve a specific VMID for this vm
+ * @adev: amdgpu device structure
+ * @vm: the VM to reserve an ID for
+ * @vmhub: the VMHUB which should be used
+ *
+ * Mostly used to have a reserved VMID for debugging and SPM.
+ *
+ * Returns: 0 for success, -ENOENT if an ID is already reserved.
+ */
+int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, struct amdgpu_vm *vm,
unsigned vmhub)
{
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
+ struct amdgpu_vmid *id;
+ int r = 0;
mutex_lock(&id_mgr->lock);
-
- ++id_mgr->reserved_use_count;
- if (!id_mgr->reserved) {
- struct amdgpu_vmid *id;
-
- id = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid,
- list);
- /* Remove from normal round robin handling */
- list_del_init(&id->list);
- id_mgr->reserved = id;
+ if (vm->reserved_vmid[vmhub])
+ goto unlock;
+ if (id_mgr->reserved_vmid) {
+ r = -ENOENT;
+ goto unlock;
}
-
+ /* Remove from normal round robin handling */
+ id = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid, list);
+ list_del_init(&id->list);
+ vm->reserved_vmid[vmhub] = id;
+ id_mgr->reserved_vmid = true;
mutex_unlock(&id_mgr->lock);
+
return 0;
+unlock:
+ mutex_unlock(&id_mgr->lock);
+ return r;
}
-void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
+/*
+ * amdgpu_vmid_free_reserved - free up a reserved VMID again
+ * @adev: amdgpu device structure
+ * @vm: the VM with the reserved ID
+ * @vmhub: the VMHUB which should be used
+ */
+void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, struct amdgpu_vm *vm,
unsigned vmhub)
{
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
mutex_lock(&id_mgr->lock);
- if (!--id_mgr->reserved_use_count) {
- /* give the reserved ID back to normal round robin */
- list_add(&id_mgr->reserved->list, &id_mgr->ids_lru);
- id_mgr->reserved = NULL;
+ if (vm->reserved_vmid[vmhub]) {
+ list_add(&vm->reserved_vmid[vmhub]->list,
+ &id_mgr->ids_lru);
+ vm->reserved_vmid[vmhub] = NULL;
+ id_mgr->reserved_vmid = false;
}
-
mutex_unlock(&id_mgr->lock);
}
@@ -574,7 +594,6 @@ void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
mutex_init(&id_mgr->lock);
INIT_LIST_HEAD(&id_mgr->ids_lru);
- id_mgr->reserved_use_count = 0;
/* for GC <10, SDMA uses MMHUB so use first_kfd_vmid for both GC and MM */
if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 0, 0))
@@ -594,11 +613,6 @@ void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru);
}
}
- /* alloc a default reserved vmid to enforce isolation */
- for (i = 0; i < (adev->xcp_mgr ? adev->xcp_mgr->num_xcps : 1); i++) {
- if (adev->enforce_isolation[i] != AMDGPU_ENFORCE_ISOLATION_DISABLE)
- amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(i));
- }
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
index 240fa6751260..b3649cd3af56 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
@@ -67,8 +67,7 @@ struct amdgpu_vmid_mgr {
unsigned num_ids;
struct list_head ids_lru;
struct amdgpu_vmid ids[AMDGPU_NUM_VMID];
- struct amdgpu_vmid *reserved;
- unsigned int reserved_use_count;
+ bool reserved_vmid;
};
int amdgpu_pasid_alloc(unsigned int bits);
@@ -79,10 +78,10 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv,
bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
struct amdgpu_vmid *id);
bool amdgpu_vmid_uses_reserved(struct amdgpu_vm *vm, unsigned int vmhub);
-int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
- unsigned vmhub);
-void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
- unsigned vmhub);
+int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ unsigned vmhub);
+void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ unsigned vmhub);
int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_job *job, struct dma_fence **fence);
void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
index 7f7ea046e209..f58b6be7fccc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
@@ -56,14 +56,14 @@ struct amdgpu_ih_ring {
bool use_bus_addr;
struct amdgpu_bo *ring_obj;
- volatile uint32_t *ring;
+ uint32_t *ring;
uint64_t gpu_addr;
uint64_t wptr_addr;
- volatile uint32_t *wptr_cpu;
+ uint32_t *wptr_cpu;
uint64_t rptr_addr;
- volatile uint32_t *rptr_cpu;
+ uint32_t *rptr_cpu;
bool enabled;
unsigned rptr;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index e6061d45f142..d020a890a0ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -209,11 +209,12 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev,
struct drm_sched_entity *entity, void *owner,
size_t size, enum amdgpu_ib_pool_type pool_type,
- struct amdgpu_job **job)
+ struct amdgpu_job **job, u64 k_job_id)
{
int r;
- r = amdgpu_job_alloc(adev, NULL, entity, owner, 1, job, 0);
+ r = amdgpu_job_alloc(adev, NULL, entity, owner, 1, job,
+ k_job_id);
if (r)
return r;
@@ -365,13 +366,6 @@ amdgpu_job_prepare_job(struct drm_sched_job *sched_job,
dev_err(ring->adev->dev, "Error getting VM ID (%d)\n", r);
goto error;
}
- /*
- * The VM structure might be released after the VMID is
- * assigned, we had multiple problems with people trying to use
- * the VM pointer so better set it to NULL.
- */
- if (!fence)
- job->vm = NULL;
return fence;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
index 2f302266662b..4a6487eb6cb5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
@@ -44,6 +44,22 @@
struct amdgpu_fence;
enum amdgpu_ib_pool_type;
+/* Internal kernel job ids. (decreasing values, starting from U64_MAX). */
+#define AMDGPU_KERNEL_JOB_ID_VM_UPDATE (18446744073709551615ULL)
+#define AMDGPU_KERNEL_JOB_ID_VM_UPDATE_PDES (18446744073709551614ULL)
+#define AMDGPU_KERNEL_JOB_ID_VM_UPDATE_RANGE (18446744073709551613ULL)
+#define AMDGPU_KERNEL_JOB_ID_VM_PT_CLEAR (18446744073709551612ULL)
+#define AMDGPU_KERNEL_JOB_ID_TTM_MAP_BUFFER (18446744073709551611ULL)
+#define AMDGPU_KERNEL_JOB_ID_TTM_ACCESS_MEMORY_SDMA (18446744073709551610ULL)
+#define AMDGPU_KERNEL_JOB_ID_TTM_COPY_BUFFER (18446744073709551609ULL)
+#define AMDGPU_KERNEL_JOB_ID_CLEAR_ON_RELEASE (18446744073709551608ULL)
+#define AMDGPU_KERNEL_JOB_ID_MOVE_BLIT (18446744073709551607ULL)
+#define AMDGPU_KERNEL_JOB_ID_TTM_CLEAR_BUFFER (18446744073709551606ULL)
+#define AMDGPU_KERNEL_JOB_ID_CLEANER_SHADER (18446744073709551605ULL)
+#define AMDGPU_KERNEL_JOB_ID_FLUSH_GPU_TLB (18446744073709551604ULL)
+#define AMDGPU_KERNEL_JOB_ID_KFD_GART_MAP (18446744073709551603ULL)
+#define AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST (18446744073709551602ULL)
+
struct amdgpu_job {
struct drm_sched_job base;
struct amdgpu_vm *vm;
@@ -96,7 +112,8 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev,
struct drm_sched_entity *entity, void *owner,
size_t size, enum amdgpu_ib_pool_type pool_type,
- struct amdgpu_job **job);
+ struct amdgpu_job **job,
+ u64 k_job_id);
void amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds,
struct amdgpu_bo *gws, struct amdgpu_bo *oa);
void amdgpu_job_free_resources(struct amdgpu_job *job);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
index 82d58ac7afb0..6b7d66b6d4cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
@@ -121,10 +121,12 @@ static void amdgpu_jpeg_idle_work_handler(struct work_struct *work)
fences += amdgpu_fence_count_emitted(&adev->jpeg.inst[i].ring_dec[j]);
}
- if (!fences && !atomic_read(&adev->jpeg.total_submission_cnt))
+ if (!fences && !atomic_read(&adev->jpeg.total_submission_cnt)) {
+ mutex_lock(&adev->jpeg.jpeg_pg_lock);
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_JPEG,
AMD_PG_STATE_GATE);
- else
+ mutex_unlock(&adev->jpeg.jpeg_pg_lock);
+ } else
schedule_delayed_work(&adev->jpeg.idle_work, JPEG_IDLE_TIMEOUT);
}
@@ -194,7 +196,8 @@ static int amdgpu_jpeg_dec_set_reg(struct amdgpu_ring *ring, uint32_t handle,
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
- AMDGPU_IB_POOL_DIRECT, &job);
+ AMDGPU_IB_POOL_DIRECT, &job,
+ AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
return r;
@@ -368,7 +371,7 @@ static int amdgpu_debugfs_jpeg_sched_mask_set(void *data, u64 val)
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
ring = &adev->jpeg.inst[i].ring_dec[j];
- if (val & (1 << ((i * adev->jpeg.num_jpeg_rings) + j)))
+ if (val & (BIT_ULL(1) << ((i * adev->jpeg.num_jpeg_rings) + j)))
ring->sched.ready = true;
else
ring->sched.ready = false;
@@ -537,3 +540,68 @@ void amdgpu_jpeg_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_pri
drm_printf(p, "\nInactive Instance:JPEG%d\n", i);
}
}
+
+static inline bool amdgpu_jpeg_reg_valid(u32 reg)
+{
+ if (reg < JPEG_REG_RANGE_START || reg > JPEG_REG_RANGE_END ||
+ (reg >= JPEG_ATOMIC_RANGE_START && reg <= JPEG_ATOMIC_RANGE_END))
+ return false;
+ else
+ return true;
+}
+
+/**
+ * amdgpu_jpeg_dec_parse_cs - command submission parser
+ *
+ * @parser: Command submission parser context
+ * @job: the job to parse
+ * @ib: the IB to parse
+ *
+ * Parse the command stream, return -EINVAL for invalid packet,
+ * 0 otherwise
+ */
+
+int amdgpu_jpeg_dec_parse_cs(struct amdgpu_cs_parser *parser,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib)
+{
+ u32 i, reg, res, cond, type;
+ struct amdgpu_device *adev = parser->adev;
+
+ for (i = 0; i < ib->length_dw ; i += 2) {
+ reg = CP_PACKETJ_GET_REG(ib->ptr[i]);
+ res = CP_PACKETJ_GET_RES(ib->ptr[i]);
+ cond = CP_PACKETJ_GET_COND(ib->ptr[i]);
+ type = CP_PACKETJ_GET_TYPE(ib->ptr[i]);
+
+ if (res) /* only support 0 at the moment */
+ return -EINVAL;
+
+ switch (type) {
+ case PACKETJ_TYPE0:
+ if (cond != PACKETJ_CONDITION_CHECK0 ||
+ !amdgpu_jpeg_reg_valid(reg)) {
+ dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
+ return -EINVAL;
+ }
+ break;
+ case PACKETJ_TYPE3:
+ if (cond != PACKETJ_CONDITION_CHECK3 ||
+ !amdgpu_jpeg_reg_valid(reg)) {
+ dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
+ return -EINVAL;
+ }
+ break;
+ case PACKETJ_TYPE6:
+ if (ib->ptr[i] == CP_PACKETJ_NOP)
+ continue;
+ dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
+ return -EINVAL;
+ default:
+ dev_err(adev->dev, "Unknown packet type %d !\n", type);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
index 4f0775e39b54..346ae0ab09d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
@@ -25,11 +25,18 @@
#define __AMDGPU_JPEG_H__
#include "amdgpu_ras.h"
+#include "amdgpu_cs.h"
#define AMDGPU_MAX_JPEG_INSTANCES 4
#define AMDGPU_MAX_JPEG_RINGS 10
#define AMDGPU_MAX_JPEG_RINGS_4_0_3 8
+#define JPEG_REG_RANGE_START 0x4000
+#define JPEG_REG_RANGE_END 0x41c2
+#define JPEG_ATOMIC_RANGE_START 0x4120
+#define JPEG_ATOMIC_RANGE_END 0x412A
+
+
#define AMDGPU_JPEG_HARVEST_JPEG0 (1 << 0)
#define AMDGPU_JPEG_HARVEST_JPEG1 (1 << 1)
@@ -170,5 +177,8 @@ int amdgpu_jpeg_reg_dump_init(struct amdgpu_device *adev,
const struct amdgpu_hwip_reg_entry *reg, u32 count);
void amdgpu_jpeg_dump_ip_state(struct amdgpu_ip_block *ip_block);
void amdgpu_jpeg_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p);
+int amdgpu_jpeg_dec_parse_cs(struct amdgpu_cs_parser *parser,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib);
#endif /*__AMDGPU_JPEG_H__*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 8a76960803c6..a9327472c651 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -939,6 +939,10 @@ out:
if (adev->gfx.config.ta_cntl2_truncate_coord_mode)
dev_info->ids_flags |= AMDGPU_IDS_FLAGS_CONFORMANT_TRUNC_COORD;
+ /* Gang submit is not supported under SRIOV currently */
+ if (!amdgpu_sriov_vf(adev))
+ dev_info->ids_flags |= AMDGPU_IDS_FLAGS_GANG_SUBMIT;
+
if (amdgpu_passthrough(adev))
dev_info->ids_flags |= (AMDGPU_IDS_FLAGS_MODE_PT <<
AMDGPU_IDS_FLAGS_MODE_SHIFT) &
@@ -1417,14 +1421,10 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
amdgpu_debugfs_vm_init(file_priv);
- r = amdgpu_vm_init(adev, &fpriv->vm, fpriv->xcp_id);
+ r = amdgpu_vm_init(adev, &fpriv->vm, fpriv->xcp_id, pasid);
if (r)
goto error_pasid;
- r = amdgpu_vm_set_pasid(adev, &fpriv->vm, pasid);
- if (r)
- goto error_vm;
-
fpriv->prt_va = amdgpu_vm_bo_add(adev, &fpriv->vm, NULL);
if (!fpriv->prt_va) {
r = -ENOMEM;
@@ -1464,10 +1464,8 @@ error_vm:
amdgpu_vm_fini(adev, &fpriv->vm);
error_pasid:
- if (pasid) {
+ if (pasid)
amdgpu_pasid_free(pasid);
- amdgpu_vm_set_pasid(adev, &fpriv->vm, 0);
- }
kfree(fpriv);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
index 135598502c8d..5bf9be073cdd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
@@ -191,6 +191,20 @@ int amdgpu_mes_init(struct amdgpu_device *adev)
if (r)
goto error_doorbell;
+ if (adev->mes.hung_queue_db_array_size) {
+ r = amdgpu_bo_create_kernel(adev,
+ adev->mes.hung_queue_db_array_size * sizeof(u32),
+ PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->mes.hung_queue_db_array_gpu_obj,
+ &adev->mes.hung_queue_db_array_gpu_addr,
+ &adev->mes.hung_queue_db_array_cpu_addr);
+ if (r) {
+ dev_warn(adev->dev, "failed to create MES hung db array buffer (%d)", r);
+ goto error_doorbell;
+ }
+ }
+
return 0;
error_doorbell:
@@ -216,6 +230,10 @@ void amdgpu_mes_fini(struct amdgpu_device *adev)
{
int i;
+ amdgpu_bo_free_kernel(&adev->mes.hung_queue_db_array_gpu_obj,
+ &adev->mes.hung_queue_db_array_gpu_addr,
+ &adev->mes.hung_queue_db_array_cpu_addr);
+
amdgpu_bo_free_kernel(&adev->mes.event_log_gpu_obj,
&adev->mes.event_log_gpu_addr,
&adev->mes.event_log_cpu_addr);
@@ -366,6 +384,53 @@ int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev,
return r;
}
+int amdgpu_mes_get_hung_queue_db_array_size(struct amdgpu_device *adev)
+{
+ return adev->mes.hung_queue_db_array_size;
+}
+
+int amdgpu_mes_detect_and_reset_hung_queues(struct amdgpu_device *adev,
+ int queue_type,
+ bool detect_only,
+ unsigned int *hung_db_num,
+ u32 *hung_db_array)
+
+{
+ struct mes_detect_and_reset_queue_input input;
+ u32 *db_array = adev->mes.hung_queue_db_array_cpu_addr;
+ int r, i;
+
+ if (!hung_db_num || !hung_db_array)
+ return -EINVAL;
+
+ if ((queue_type != AMDGPU_RING_TYPE_GFX) &&
+ (queue_type != AMDGPU_RING_TYPE_COMPUTE) &&
+ (queue_type != AMDGPU_RING_TYPE_SDMA))
+ return -EINVAL;
+
+ /* Clear the doorbell array before detection */
+ memset(adev->mes.hung_queue_db_array_cpu_addr, 0,
+ adev->mes.hung_queue_db_array_size * sizeof(u32));
+ input.queue_type = queue_type;
+ input.detect_only = detect_only;
+
+ r = adev->mes.funcs->detect_and_reset_hung_queues(&adev->mes,
+ &input);
+ if (r) {
+ dev_err(adev->dev, "failed to detect and reset\n");
+ } else {
+ *hung_db_num = 0;
+ for (i = 0; i < adev->mes.hung_queue_db_array_size; i++) {
+ if (db_array[i] != AMDGPU_MES_INVALID_DB_OFFSET) {
+ hung_db_array[i] = db_array[i];
+ *hung_db_num += 1;
+ }
+ }
+ }
+
+ return r;
+}
+
uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg)
{
struct mes_misc_op_input op_input;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
index c0d2c195fe2e..6b506fc72f58 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
@@ -41,6 +41,7 @@
#define AMDGPU_MES_API_VERSION_MASK 0x00fff000
#define AMDGPU_MES_FEAT_VERSION_MASK 0xff000000
#define AMDGPU_MES_MSCRATCH_SIZE 0x40000
+#define AMDGPU_MES_INVALID_DB_OFFSET 0xffffffff
enum amdgpu_mes_priority_level {
AMDGPU_MES_PRIORITY_LEVEL_LOW = 0,
@@ -147,6 +148,10 @@ struct amdgpu_mes {
uint64_t resource_1_gpu_addr[AMDGPU_MAX_MES_PIPES];
void *resource_1_addr[AMDGPU_MAX_MES_PIPES];
+ int hung_queue_db_array_size;
+ struct amdgpu_bo *hung_queue_db_array_gpu_obj;
+ uint64_t hung_queue_db_array_gpu_addr;
+ void *hung_queue_db_array_cpu_addr;
};
struct amdgpu_mes_gang {
@@ -280,6 +285,18 @@ struct mes_reset_queue_input {
bool is_kq;
};
+struct mes_detect_and_reset_queue_input {
+ uint32_t queue_type;
+ bool detect_only;
+};
+
+struct mes_inv_tlbs_pasid_input {
+ uint32_t xcc_id;
+ uint16_t pasid;
+ uint8_t hub_id;
+ uint8_t flush_type;
+};
+
enum mes_misc_opcode {
MES_MISC_OP_WRITE_REG,
MES_MISC_OP_READ_REG,
@@ -367,6 +384,13 @@ struct amdgpu_mes_funcs {
int (*reset_hw_queue)(struct amdgpu_mes *mes,
struct mes_reset_queue_input *input);
+
+ int (*detect_and_reset_hung_queues)(struct amdgpu_mes *mes,
+ struct mes_detect_and_reset_queue_input *input);
+
+
+ int (*invalidate_tlbs_pasid)(struct amdgpu_mes *mes,
+ struct mes_inv_tlbs_pasid_input *input);
};
#define amdgpu_mes_kiq_hw_init(adev) (adev)->mes.kiq_hw_init((adev))
@@ -390,6 +414,13 @@ int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev,
unsigned int vmid,
bool use_mmio);
+int amdgpu_mes_get_hung_queue_db_array_size(struct amdgpu_device *adev);
+int amdgpu_mes_detect_and_reset_hung_queues(struct amdgpu_device *adev,
+ int queue_type,
+ bool detect_only,
+ unsigned int *hung_db_num,
+ u32 *hung_db_array);
+
uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg);
int amdgpu_mes_wreg(struct amdgpu_device *adev,
uint32_t reg, uint32_t val);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 6da4f946cac0..20460cfd09bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -496,8 +496,6 @@ struct amdgpu_crtc {
struct drm_connector *connector;
/* for dpm */
u32 line_time;
- u32 wm_low;
- u32 wm_high;
u32 lb_vblank_lead_lines;
struct drm_display_mode hw_mode;
/* for virtual dce */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
index e56ba93a8df6..a974265837f0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
@@ -55,7 +55,8 @@ u64 amdgpu_nbio_get_pcie_replay_count(struct amdgpu_device *adev)
bool amdgpu_nbio_is_replay_cnt_supported(struct amdgpu_device *adev)
{
- if (amdgpu_sriov_vf(adev) || !adev->asic_funcs->get_pcie_replay_count ||
+ if (amdgpu_sriov_vf(adev) || !adev->asic_funcs ||
+ !adev->asic_funcs->get_pcie_replay_count ||
(!adev->nbio.funcs || !adev->nbio.funcs->get_pcie_replay_count))
return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 122a88294883..e08f58de4b17 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -153,6 +153,14 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
c++;
}
+ if (domain & AMDGPU_GEM_DOMAIN_MMIO_REMAP) {
+ places[c].fpfn = 0;
+ places[c].lpfn = 0;
+ places[c].mem_type = AMDGPU_PL_MMIO_REMAP;
+ places[c].flags = 0;
+ c++;
+ }
+
if (domain & AMDGPU_GEM_DOMAIN_GTT) {
places[c].fpfn = 0;
places[c].lpfn = 0;
@@ -1313,7 +1321,8 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
if (r)
goto out;
- r = amdgpu_fill_buffer(abo, 0, &bo->base._resv, &fence, true);
+ r = amdgpu_fill_buffer(abo, 0, &bo->base._resv, &fence, true,
+ AMDGPU_KERNEL_JOB_ID_CLEAR_ON_RELEASE);
if (WARN_ON(r))
goto out;
@@ -1545,6 +1554,8 @@ uint32_t amdgpu_bo_mem_stats_placement(struct amdgpu_bo *bo)
return AMDGPU_PL_OA;
case AMDGPU_GEM_DOMAIN_DOORBELL:
return AMDGPU_PL_DOORBELL;
+ case AMDGPU_GEM_DOMAIN_MMIO_REMAP:
+ return AMDGPU_PL_MMIO_REMAP;
default:
return TTM_PL_SYSTEM;
}
@@ -1628,6 +1639,9 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
case AMDGPU_PL_DOORBELL:
placement = "DOORBELL";
break;
+ case AMDGPU_PL_MMIO_REMAP:
+ placement = "MMIO REMAP";
+ break;
case TTM_PL_SYSTEM:
default:
placement = "CPU";
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index c316920f3450..656b8a931dae 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -69,7 +69,7 @@ struct amdgpu_bo_va_mapping {
uint64_t last;
uint64_t __subtree_last;
uint64_t offset;
- uint64_t flags;
+ uint32_t flags;
};
/* User space allocated BO in a VM */
@@ -167,6 +167,8 @@ static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type)
return AMDGPU_GEM_DOMAIN_OA;
case AMDGPU_PL_DOORBELL:
return AMDGPU_GEM_DOMAIN_DOORBELL;
+ case AMDGPU_PL_MMIO_REMAP:
+ return AMDGPU_GEM_DOMAIN_MMIO_REMAP;
default:
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 0bd51a04be79..8c0e5d03de50 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -448,7 +448,7 @@ static int psp_sw_init(struct amdgpu_ip_block *ip_block)
psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
if (!psp->cmd) {
dev_err(adev->dev, "Failed to allocate memory to command buffer!\n");
- ret = -ENOMEM;
+ return -ENOMEM;
}
adev->psp.xgmi_context.supports_extended_data =
@@ -666,6 +666,10 @@ static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id)
return "FB_FW_RESERV_ADDR";
case GFX_CMD_ID_FB_FW_RESERV_EXT_ADDR:
return "FB_FW_RESERV_EXT_ADDR";
+ case GFX_CMD_ID_SRIOV_SPATIAL_PART:
+ return "SPATIAL_PARTITION";
+ case GFX_CMD_ID_FB_NPS_MODE:
+ return "NPS_MODE_CHANGE";
default:
return "UNKNOWN CMD";
}
@@ -877,9 +881,7 @@ static int psp_tmr_init(struct psp_context *psp)
pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
ret = amdgpu_bo_create_kernel(psp->adev, tmr_size,
PSP_TMR_ALIGNMENT,
- AMDGPU_HAS_VRAM(psp->adev) ?
- AMDGPU_GEM_DOMAIN_VRAM :
- AMDGPU_GEM_DOMAIN_GTT,
+ AMDGPU_GEM_DOMAIN_GTT | AMDGPU_GEM_DOMAIN_VRAM,
&psp->tmr_bo, &psp->tmr_mc_addr,
pptr);
}
@@ -1039,15 +1041,28 @@ int psp_update_fw_reservation(struct psp_context *psp)
{
int ret;
uint64_t reserv_addr, reserv_addr_ext;
- uint32_t reserv_size, reserv_size_ext;
+ uint32_t reserv_size, reserv_size_ext, mp0_ip_ver;
struct amdgpu_device *adev = psp->adev;
+ mp0_ip_ver = amdgpu_ip_version(adev, MP0_HWIP, 0);
+
if (amdgpu_sriov_vf(psp->adev))
return 0;
- if ((amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(14, 0, 2)) &&
- (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(14, 0, 3)))
+ switch (mp0_ip_ver) {
+ case IP_VERSION(14, 0, 2):
+ if (adev->psp.sos.fw_version < 0x3b0e0d)
+ return 0;
+ break;
+
+ case IP_VERSION(14, 0, 3):
+ if (adev->psp.sos.fw_version < 0x3a0e14)
+ return 0;
+ break;
+
+ default:
return 0;
+ }
ret = psp_get_fw_reservation_info(psp, GFX_CMD_ID_FB_FW_RESERV_ADDR, &reserv_addr, &reserv_size);
if (ret)
@@ -2337,7 +2352,7 @@ static int psp_securedisplay_initialize(struct psp_context *psp)
}
ret = psp_ta_load(psp, &psp->securedisplay_context.context);
- if (!ret) {
+ if (!ret && !psp->securedisplay_context.context.resp_status) {
psp->securedisplay_context.context.initialized = true;
mutex_init(&psp->securedisplay_context.mutex);
} else
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
index 38face981c3e..6e8aad91bcd3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
@@ -171,13 +171,9 @@ static ssize_t ta_if_load_debugfs_write(struct file *fp, const char *buf, size_t
copy_pos += sizeof(uint32_t);
- ta_bin = kzalloc(ta_bin_len, GFP_KERNEL);
- if (!ta_bin)
- return -ENOMEM;
- if (copy_from_user((void *)ta_bin, &buf[copy_pos], ta_bin_len)) {
- ret = -EFAULT;
- goto err_free_bin;
- }
+ ta_bin = memdup_user(&buf[copy_pos], ta_bin_len);
+ if (IS_ERR(ta_bin))
+ return PTR_ERR(ta_bin);
/* Set TA context and functions */
set_ta_context_funcs(psp, ta_type, &context);
@@ -327,13 +323,9 @@ static ssize_t ta_if_invoke_debugfs_write(struct file *fp, const char *buf, size
return -EFAULT;
copy_pos += sizeof(uint32_t);
- shared_buf = kzalloc(shared_buf_len, GFP_KERNEL);
- if (!shared_buf)
- return -ENOMEM;
- if (copy_from_user((void *)shared_buf, &buf[copy_pos], shared_buf_len)) {
- ret = -EFAULT;
- goto err_free_shared_buf;
- }
+ shared_buf = memdup_user(&buf[copy_pos], shared_buf_len);
+ if (IS_ERR(shared_buf))
+ return PTR_ERR(shared_buf);
set_ta_context_funcs(psp, ta_type, &context);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 540817e296da..e0ee21150860 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -122,12 +122,15 @@ const char *get_ras_block_str(struct ras_common_if *ras_block)
/* typical ECC bad page rate is 1 bad page per 100MB VRAM */
#define RAS_BAD_PAGE_COVER (100 * 1024 * 1024ULL)
-#define MAX_UMC_POISON_POLLING_TIME_ASYNC 300 //ms
+#define MAX_UMC_POISON_POLLING_TIME_ASYNC 10
#define AMDGPU_RAS_RETIRE_PAGE_INTERVAL 100 //ms
#define MAX_FLUSH_RETIRE_DWORK_TIMES 100
+#define BYPASS_ALLOCATED_ADDRESS 0x0
+#define BYPASS_INITIALIZATION_ADDRESS 0x1
+
enum amdgpu_ras_retire_page_reservation {
AMDGPU_RAS_RETIRE_PAGE_RESERVED,
AMDGPU_RAS_RETIRE_PAGE_PENDING,
@@ -136,10 +139,14 @@ enum amdgpu_ras_retire_page_reservation {
atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0);
-static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
+static int amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
uint64_t addr);
-static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
+static int amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
uint64_t addr);
+
+static void amdgpu_ras_critical_region_init(struct amdgpu_device *adev);
+static void amdgpu_ras_critical_region_fini(struct amdgpu_device *adev);
+
#ifdef CONFIG_X86_MCE_AMD
static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev);
struct mce_notifier_adev_list {
@@ -169,18 +176,16 @@ static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t addre
struct eeprom_table_record err_rec;
int ret;
- if ((address >= adev->gmc.mc_vram_size) ||
- (address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
+ ret = amdgpu_ras_check_bad_page(adev, address);
+ if (ret == -EINVAL) {
dev_warn(adev->dev,
- "RAS WARN: input address 0x%llx is invalid.\n",
- address);
+ "RAS WARN: input address 0x%llx is invalid.\n",
+ address);
return -EINVAL;
- }
-
- if (amdgpu_ras_check_bad_page(adev, address)) {
+ } else if (ret == 1) {
dev_warn(adev->dev,
- "RAS WARN: 0x%llx has already been marked as bad page!\n",
- address);
+ "RAS WARN: 0x%llx has already been marked as bad page!\n",
+ address);
return 0;
}
@@ -207,6 +212,56 @@ static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t addre
return 0;
}
+static int amdgpu_check_address_validity(struct amdgpu_device *adev,
+ uint64_t address, uint64_t flags)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct amdgpu_vram_block_info blk_info;
+ uint64_t page_pfns[32] = {0};
+ int i, ret, count;
+ bool hit = false;
+
+ if (amdgpu_ip_version(adev, UMC_HWIP, 0) < IP_VERSION(12, 0, 0))
+ return 0;
+
+ if (amdgpu_sriov_vf(adev)) {
+ if (amdgpu_virt_check_vf_critical_region(adev, address, &hit))
+ return -EPERM;
+ return hit ? -EACCES : 0;
+ }
+
+ if ((address >= adev->gmc.mc_vram_size) ||
+ (address >= RAS_UMC_INJECT_ADDR_LIMIT))
+ return -EFAULT;
+
+ count = amdgpu_umc_lookup_bad_pages_in_a_row(adev,
+ address, page_pfns, ARRAY_SIZE(page_pfns));
+ if (count <= 0)
+ return -EPERM;
+
+ for (i = 0; i < count; i++) {
+ memset(&blk_info, 0, sizeof(blk_info));
+ ret = amdgpu_vram_mgr_query_address_block_info(&adev->mman.vram_mgr,
+ page_pfns[i] << AMDGPU_GPU_PAGE_SHIFT, &blk_info);
+ if (!ret) {
+ /* The input address that needs to be checked is allocated by
+ * current calling process, so it is necessary to exclude
+ * the calling process.
+ */
+ if ((flags == BYPASS_ALLOCATED_ADDRESS) &&
+ ((blk_info.task.pid != task_pid_nr(current)) ||
+ strncmp(blk_info.task.comm, current->comm, TASK_COMM_LEN)))
+ return -EACCES;
+ else if ((flags == BYPASS_INITIALIZATION_ADDRESS) &&
+ (blk_info.task.pid == con->init_task_pid) &&
+ !strncmp(blk_info.task.comm, con->init_task_comm, TASK_COMM_LEN))
+ return -EACCES;
+ }
+ }
+
+ return 0;
+}
+
static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
@@ -297,6 +352,8 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
op = 2;
else if (strstr(str, "retire_page") != NULL)
op = 3;
+ else if (strstr(str, "check_address") != NULL)
+ op = 4;
else if (str[0] && str[1] && str[2] && str[3])
/* ascii string, but commands are not matched. */
return -EINVAL;
@@ -311,6 +368,15 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
data->inject.address = address;
return 0;
+ } else if (op == 4) {
+ if (sscanf(str, "%*s 0x%llx 0x%llx", &address, &value) != 2 &&
+ sscanf(str, "%*s %llu %llu", &address, &value) != 2)
+ return -EINVAL;
+
+ data->op = op;
+ data->inject.address = address;
+ data->inject.value = value;
+ return 0;
}
if (amdgpu_ras_find_block_id_by_name(block_name, &block_id))
@@ -500,6 +566,9 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f,
return size;
else
return ret;
+ } else if (data.op == 4) {
+ ret = amdgpu_check_address_validity(adev, data.inject.address, data.inject.value);
+ return ret ? ret : size;
}
if (!amdgpu_ras_is_supported(adev, data.head.block))
@@ -513,22 +582,16 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f,
ret = amdgpu_ras_feature_enable(adev, &data.head, 1);
break;
case 2:
- if ((data.inject.address >= adev->gmc.mc_vram_size &&
- adev->gmc.mc_vram_size) ||
- (data.inject.address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
- dev_warn(adev->dev, "RAS WARN: input address "
- "0x%llx is invalid.",
+ /* umc ce/ue error injection for a bad page is not allowed */
+ if (data.head.block == AMDGPU_RAS_BLOCK__UMC)
+ ret = amdgpu_ras_check_bad_page(adev, data.inject.address);
+ if (ret == -EINVAL) {
+ dev_warn(adev->dev, "RAS WARN: input address 0x%llx is invalid.",
data.inject.address);
- ret = -EINVAL;
break;
- }
-
- /* umc ce/ue error injection for a bad page is not allowed */
- if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) &&
- amdgpu_ras_check_bad_page(adev, data.inject.address)) {
- dev_warn(adev->dev, "RAS WARN: inject: 0x%llx has "
- "already been marked as bad!\n",
- data.inject.address);
+ } else if (ret == 1) {
+ dev_warn(adev->dev, "RAS WARN: inject: 0x%llx has already been marked as bad!\n",
+ data.inject.address);
break;
}
@@ -2566,18 +2629,26 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
goto out;
}
- *bps = kmalloc(sizeof(struct ras_badpage) * data->count, GFP_KERNEL);
+ *bps = kmalloc_array(data->count, sizeof(struct ras_badpage), GFP_KERNEL);
if (!*bps) {
ret = -ENOMEM;
goto out;
}
for (; i < data->count; i++) {
+ if (!data->bps[i].ts)
+ continue;
+
(*bps)[i] = (struct ras_badpage){
.bp = data->bps[i].retired_page,
.size = AMDGPU_GPU_PAGE_SIZE,
.flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
};
+
+ if (amdgpu_ras_check_critical_address(adev,
+ data->bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT))
+ continue;
+
status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr,
data->bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT);
if (status == -EBUSY)
@@ -2586,7 +2657,7 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
(*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
}
- *count = data->count;
+ *count = con->bad_page_num;
out:
mutex_unlock(&con->recovery_lock);
return ret;
@@ -2638,6 +2709,7 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
struct amdgpu_device *adev = ras->adev;
struct list_head device_list, *device_list_handle = NULL;
struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
+ unsigned int error_query_mode;
enum ras_event_type type;
if (hive) {
@@ -2666,6 +2738,13 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
device_list_handle = &device_list;
}
+ if (amdgpu_ras_get_error_query_mode(adev, &error_query_mode)) {
+ if (error_query_mode == AMDGPU_RAS_FIRMWARE_ERROR_QUERY) {
+ /* wait 500ms to ensure pmfw polling mca bank info done */
+ msleep(500);
+ }
+ }
+
type = amdgpu_ras_get_fatal_error_event(adev);
list_for_each_entry(remote_adev,
device_list_handle, gmc.xgmi.head) {
@@ -2722,7 +2801,7 @@ static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
unsigned int old_space = data->count + data->space_left;
unsigned int new_space = old_space + pages;
unsigned int align_space = ALIGN(new_space, 512);
- void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
+ void *bps = kmalloc_array(align_space, sizeof(*data->bps), GFP_KERNEL);
if (!bps) {
return -ENOMEM;
@@ -2814,8 +2893,11 @@ static int __amdgpu_ras_restore_bad_pages(struct amdgpu_device *adev,
for (j = 0; j < count; j++) {
if (amdgpu_ras_check_bad_page_unlock(con,
- bps[j].retired_page << AMDGPU_GPU_PAGE_SHIFT))
+ bps[j].retired_page << AMDGPU_GPU_PAGE_SHIFT)) {
+ data->count++;
+ data->space_left--;
continue;
+ }
if (!data->space_left &&
amdgpu_ras_realloc_eh_data_space(adev, data, 256)) {
@@ -2828,6 +2910,7 @@ static int __amdgpu_ras_restore_bad_pages(struct amdgpu_device *adev,
sizeof(struct eeprom_table_record));
data->count++;
data->space_left--;
+ con->bad_page_num++;
}
return 0;
@@ -2974,7 +3057,7 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
ret = __amdgpu_ras_convert_rec_array_from_rom(adev,
&bps[i], &err_data, nps);
if (ret)
- control->ras_num_bad_pages -= adev->umc.retire_unit;
+ con->bad_page_num -= adev->umc.retire_unit;
i += (adev->umc.retire_unit - 1);
} else {
break;
@@ -2988,8 +3071,10 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
ret = __amdgpu_ras_convert_rec_from_rom(adev,
&bps[i], &err_data, nps);
if (ret)
- control->ras_num_bad_pages -= adev->umc.retire_unit;
+ con->bad_page_num -= adev->umc.retire_unit;
}
+
+ con->eh_data->count_saved = con->eh_data->count;
} else {
ret = __amdgpu_ras_restore_bad_pages(adev, bps, pages);
}
@@ -3012,7 +3097,7 @@ int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_err_handler_data *data;
struct amdgpu_ras_eeprom_control *control;
- int save_count, unit_num, bad_page_num, i;
+ int save_count, unit_num, i;
if (!con || !con->eh_data) {
if (new_cnt)
@@ -3033,27 +3118,26 @@ int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
mutex_lock(&con->recovery_lock);
control = &con->eeprom_control;
data = con->eh_data;
- bad_page_num = control->ras_num_bad_pages;
- save_count = data->count - bad_page_num;
+ unit_num = data->count / adev->umc.retire_unit - control->ras_num_recs;
+ save_count = con->bad_page_num - control->ras_num_bad_pages;
mutex_unlock(&con->recovery_lock);
- unit_num = save_count / adev->umc.retire_unit;
if (new_cnt)
*new_cnt = unit_num;
/* only new entries are saved */
- if (save_count > 0) {
+ if (unit_num > 0) {
/*old asics only save pa to eeprom like before*/
if (IP_VERSION_MAJ(amdgpu_ip_version(adev, UMC_HWIP, 0)) < 12) {
if (amdgpu_ras_eeprom_append(control,
- &data->bps[bad_page_num], save_count)) {
+ &data->bps[data->count_saved], unit_num)) {
dev_err(adev->dev, "Failed to save EEPROM table data!");
return -EIO;
}
} else {
for (i = 0; i < unit_num; i++) {
if (amdgpu_ras_eeprom_append(control,
- &data->bps[bad_page_num +
+ &data->bps[data->count_saved +
i * adev->umc.retire_unit], 1)) {
dev_err(adev->dev, "Failed to save EEPROM table data!");
return -EIO;
@@ -3062,6 +3146,7 @@ int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
}
dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count);
+ data->count_saved = data->count;
}
return 0;
@@ -3116,17 +3201,17 @@ static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
}
}
+ ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs, true);
+ if (ret)
+ goto out;
+
ret = amdgpu_ras_eeprom_check(control);
if (ret)
goto out;
/* HW not usable */
- if (amdgpu_ras_is_rma(adev)) {
+ if (amdgpu_ras_is_rma(adev))
ret = -EHWPOISON;
- goto out;
- }
-
- ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs, true);
}
out:
@@ -3134,18 +3219,24 @@ out:
return ret;
}
-static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
+static int amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
uint64_t addr)
{
struct ras_err_handler_data *data = con->eh_data;
+ struct amdgpu_device *adev = con->adev;
int i;
+ if ((addr >= adev->gmc.mc_vram_size &&
+ adev->gmc.mc_vram_size) ||
+ (addr >= RAS_UMC_INJECT_ADDR_LIMIT))
+ return -EINVAL;
+
addr >>= AMDGPU_GPU_PAGE_SHIFT;
for (i = 0; i < data->count; i++)
if (addr == data->bps[i].retired_page)
- return true;
+ return 1;
- return false;
+ return 0;
}
/*
@@ -3153,11 +3244,11 @@ static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
*
* Note: this check is only for umc block
*/
-static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
+static int amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
uint64_t addr)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
- bool ret = false;
+ int ret = 0;
if (!con || !con->eh_data)
return ret;
@@ -3241,7 +3332,7 @@ static void amdgpu_ras_ecc_log_init(struct ras_ecc_log_info *ecc_log)
INIT_RADIX_TREE(&ecc_log->de_page_tree, GFP_KERNEL);
ecc_log->de_queried_count = 0;
- ecc_log->prev_de_queried_count = 0;
+ ecc_log->consumption_q_count = 0;
}
static void amdgpu_ras_ecc_log_fini(struct ras_ecc_log_info *ecc_log)
@@ -3261,7 +3352,7 @@ static void amdgpu_ras_ecc_log_fini(struct ras_ecc_log_info *ecc_log)
mutex_destroy(&ecc_log->lock);
ecc_log->de_queried_count = 0;
- ecc_log->prev_de_queried_count = 0;
+ ecc_log->consumption_q_count = 0;
}
static bool amdgpu_ras_schedule_retirement_dwork(struct amdgpu_ras *con,
@@ -3287,7 +3378,6 @@ static void amdgpu_ras_do_page_retirement(struct work_struct *work)
page_retirement_dwork.work);
struct amdgpu_device *adev = con->adev;
struct ras_err_data err_data;
- unsigned long err_cnt;
/* If gpu reset is ongoing, delay retiring the bad pages */
if (amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev)) {
@@ -3299,13 +3389,9 @@ static void amdgpu_ras_do_page_retirement(struct work_struct *work)
amdgpu_ras_error_data_init(&err_data);
amdgpu_umc_handle_bad_pages(adev, &err_data);
- err_cnt = err_data.err_addr_cnt;
amdgpu_ras_error_data_fini(&err_data);
- if (err_cnt && amdgpu_ras_is_rma(adev))
- amdgpu_ras_reset_gpu(adev);
-
amdgpu_ras_schedule_retirement_dwork(con,
AMDGPU_RAS_RETIRE_PAGE_INTERVAL);
}
@@ -3316,49 +3402,39 @@ static int amdgpu_ras_poison_creation_handler(struct amdgpu_device *adev,
int ret = 0;
struct ras_ecc_log_info *ecc_log;
struct ras_query_if info;
- uint32_t timeout = 0;
+ u32 timeout = MAX_UMC_POISON_POLLING_TIME_ASYNC;
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
- uint64_t de_queried_count;
- uint32_t new_detect_count, total_detect_count;
- uint32_t need_query_count = poison_creation_count;
+ u64 de_queried_count;
+ u64 consumption_q_count;
enum ras_event_type type = RAS_EVENT_TYPE_POISON_CREATION;
memset(&info, 0, sizeof(info));
info.head.block = AMDGPU_RAS_BLOCK__UMC;
ecc_log = &ras->umc_ecc_log;
- total_detect_count = 0;
+ ecc_log->de_queried_count = 0;
+ ecc_log->consumption_q_count = 0;
+
do {
ret = amdgpu_ras_query_error_status_with_event(adev, &info, type);
if (ret)
return ret;
de_queried_count = ecc_log->de_queried_count;
- if (de_queried_count > ecc_log->prev_de_queried_count) {
- new_detect_count = de_queried_count - ecc_log->prev_de_queried_count;
- ecc_log->prev_de_queried_count = de_queried_count;
- timeout = 0;
- } else {
- new_detect_count = 0;
- }
+ consumption_q_count = ecc_log->consumption_q_count;
- if (new_detect_count) {
- total_detect_count += new_detect_count;
- } else {
- if (!timeout && need_query_count)
- timeout = MAX_UMC_POISON_POLLING_TIME_ASYNC;
+ if (de_queried_count && consumption_q_count)
+ break;
- if (timeout) {
- if (!--timeout)
- break;
- msleep(1);
- }
- }
- } while (total_detect_count < need_query_count);
+ msleep(100);
+ } while (--timeout);
- if (total_detect_count)
+ if (de_queried_count)
schedule_delayed_work(&ras->page_retirement_dwork, 0);
+ if (amdgpu_ras_is_rma(adev) && atomic_cmpxchg(&ras->rma_in_recovery, 0, 1) == 0)
+ amdgpu_ras_reset_gpu(adev);
+
return 0;
}
@@ -3394,6 +3470,12 @@ static int amdgpu_ras_poison_consumption_handler(struct amdgpu_device *adev,
reset_flags |= msg.reset;
}
+ /*
+ * Try to ensure poison creation handler is completed first
+ * to set rma if bad page exceed threshold.
+ */
+ flush_delayed_work(&con->page_retirement_dwork);
+
/* for RMA, amdgpu_ras_poison_creation_handler will trigger gpu reset */
if (reset_flags && !amdgpu_ras_is_rma(adev)) {
if (reset_flags & AMDGPU_RAS_GPU_RESET_MODE1_RESET)
@@ -3403,8 +3485,6 @@ static int amdgpu_ras_poison_consumption_handler(struct amdgpu_device *adev,
else
reset = reset_flags;
- flush_delayed_work(&con->page_retirement_dwork);
-
con->gpu_reset_flags |= reset;
amdgpu_ras_reset_gpu(adev);
@@ -3434,6 +3514,7 @@ static int amdgpu_ras_page_retirement_thread(void *param)
if (kthread_should_stop())
break;
+ mutex_lock(&con->poison_lock);
gpu_reset = 0;
do {
@@ -3446,7 +3527,8 @@ static int amdgpu_ras_page_retirement_thread(void *param)
atomic_sub(poison_creation_count, &con->poison_creation_count);
atomic_sub(poison_creation_count, &con->page_retirement_req_cnt);
}
- } while (atomic_read(&con->poison_creation_count));
+ } while (atomic_read(&con->poison_creation_count) &&
+ !atomic_read(&con->poison_consumption_count));
if (ret != -EIO) {
msg_count = kfifo_len(&con->poison_fifo);
@@ -3463,6 +3545,7 @@ static int amdgpu_ras_page_retirement_thread(void *param)
/* gpu mode-1 reset is ongoing or just completed ras mode-1 reset */
/* Clear poison creation request */
atomic_set(&con->poison_creation_count, 0);
+ atomic_set(&con->poison_consumption_count, 0);
/* Clear poison fifo */
amdgpu_ras_clear_poison_fifo(adev);
@@ -3487,9 +3570,12 @@ static int amdgpu_ras_page_retirement_thread(void *param)
atomic_sub(msg_count, &con->page_retirement_req_cnt);
}
+ atomic_set(&con->poison_consumption_count, 0);
+
/* Wake up work to save bad pages to eeprom */
schedule_delayed_work(&con->page_retirement_dwork, 0);
}
+ mutex_unlock(&con->poison_lock);
}
return 0;
@@ -3570,8 +3656,10 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev, bool init_bp_info)
}
mutex_init(&con->recovery_lock);
+ mutex_init(&con->poison_lock);
INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery);
atomic_set(&con->in_recovery, 0);
+ atomic_set(&con->rma_in_recovery, 0);
con->eeprom_control.bad_channel_bitmap = 0;
max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count(&con->eeprom_control);
@@ -3589,6 +3677,7 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev, bool init_bp_info)
init_waitqueue_head(&con->page_retirement_wq);
atomic_set(&con->page_retirement_req_cnt, 0);
atomic_set(&con->poison_creation_count, 0);
+ atomic_set(&con->poison_consumption_count, 0);
con->page_retirement_thread =
kthread_run(amdgpu_ras_page_retirement_thread, adev, "umc_page_retirement");
if (IS_ERR(con->page_retirement_thread)) {
@@ -3661,6 +3750,8 @@ static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
kfree(data);
mutex_unlock(&con->recovery_lock);
+ amdgpu_ras_critical_region_init(adev);
+
return 0;
}
/* recovery end */
@@ -4087,6 +4178,12 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
goto release_con;
}
+ con->init_task_pid = task_pid_nr(current);
+ get_task_comm(con->init_task_comm, current);
+
+ mutex_init(&con->critical_region_lock);
+ INIT_LIST_HEAD(&con->critical_region_head);
+
dev_info(adev->dev, "RAS INFO: ras initialized successfully, "
"hardware ability[%x] ras_mask[%x]\n",
adev->ras_hw_enabled, adev->ras_enabled);
@@ -4366,6 +4463,9 @@ int amdgpu_ras_fini(struct amdgpu_device *adev)
if (!adev->ras_enabled || !con)
return 0;
+ amdgpu_ras_critical_region_fini(adev);
+ mutex_destroy(&con->critical_region_lock);
+
list_for_each_entry_safe(ras_node, tmp, &adev->ras_list, node) {
if (ras_node->ras_obj) {
obj = ras_node->ras_obj;
@@ -5274,6 +5374,9 @@ int amdgpu_ras_reserve_page(struct amdgpu_device *adev, uint64_t pfn)
uint64_t start = pfn << AMDGPU_GPU_PAGE_SHIFT;
int ret = 0;
+ if (amdgpu_ras_check_critical_address(adev, start))
+ return 0;
+
mutex_lock(&con->page_rsv_lock);
ret = amdgpu_vram_mgr_query_page_status(mgr, start);
if (ret == -ENOENT)
@@ -5310,3 +5413,80 @@ bool amdgpu_ras_is_rma(struct amdgpu_device *adev)
return con->is_rma;
}
+
+int amdgpu_ras_add_critical_region(struct amdgpu_device *adev,
+ struct amdgpu_bo *bo)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct amdgpu_vram_mgr_resource *vres;
+ struct ras_critical_region *region;
+ struct drm_buddy_block *block;
+ int ret = 0;
+
+ if (!bo || !bo->tbo.resource)
+ return -EINVAL;
+
+ vres = to_amdgpu_vram_mgr_resource(bo->tbo.resource);
+
+ mutex_lock(&con->critical_region_lock);
+
+ /* Check if the bo had been recorded */
+ list_for_each_entry(region, &con->critical_region_head, node)
+ if (region->bo == bo)
+ goto out;
+
+ /* Record new critical amdgpu bo */
+ list_for_each_entry(block, &vres->blocks, link) {
+ region = kzalloc(sizeof(*region), GFP_KERNEL);
+ if (!region) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ region->bo = bo;
+ region->start = amdgpu_vram_mgr_block_start(block);
+ region->size = amdgpu_vram_mgr_block_size(block);
+ list_add_tail(&region->node, &con->critical_region_head);
+ }
+
+out:
+ mutex_unlock(&con->critical_region_lock);
+
+ return ret;
+}
+
+static void amdgpu_ras_critical_region_init(struct amdgpu_device *adev)
+{
+ amdgpu_ras_add_critical_region(adev, adev->mman.fw_reserved_memory);
+}
+
+static void amdgpu_ras_critical_region_fini(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_critical_region *region, *tmp;
+
+ mutex_lock(&con->critical_region_lock);
+ list_for_each_entry_safe(region, tmp, &con->critical_region_head, node) {
+ list_del(&region->node);
+ kfree(region);
+ }
+ mutex_unlock(&con->critical_region_lock);
+}
+
+bool amdgpu_ras_check_critical_address(struct amdgpu_device *adev, uint64_t addr)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_critical_region *region;
+ bool ret = false;
+
+ mutex_lock(&con->critical_region_lock);
+ list_for_each_entry(region, &con->critical_region_head, node) {
+ if ((region->start <= addr) &&
+ (addr < (region->start + region->size))) {
+ ret = true;
+ break;
+ }
+ }
+ mutex_unlock(&con->critical_region_lock);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index 927d6bff734a..6cf0dfd38be8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -492,8 +492,15 @@ struct ras_ecc_err {
struct ras_ecc_log_info {
struct mutex lock;
struct radix_tree_root de_page_tree;
- uint64_t de_queried_count;
- uint64_t prev_de_queried_count;
+ uint64_t de_queried_count;
+ uint64_t consumption_q_count;
+};
+
+struct ras_critical_region {
+ struct list_head node;
+ struct amdgpu_bo *bo;
+ uint64_t start;
+ uint64_t size;
};
struct amdgpu_ras {
@@ -515,6 +522,7 @@ struct amdgpu_ras {
/* gpu recovery */
struct work_struct recovery_work;
atomic_t in_recovery;
+ atomic_t rma_in_recovery;
struct amdgpu_device *adev;
/* error handler data */
struct ras_err_handler_data *eh_data;
@@ -557,6 +565,7 @@ struct amdgpu_ras {
struct mutex page_retirement_lock;
atomic_t page_retirement_req_cnt;
atomic_t poison_creation_count;
+ atomic_t poison_consumption_count;
struct mutex page_rsv_lock;
DECLARE_KFIFO(poison_fifo, struct ras_poison_msg, 128);
struct ras_ecc_log_info umc_ecc_log;
@@ -570,6 +579,17 @@ struct amdgpu_ras {
struct ras_event_manager *event_mgr;
uint64_t reserved_pages_in_bytes;
+
+ pid_t init_task_pid;
+ char init_task_comm[TASK_COMM_LEN];
+
+ int bad_page_num;
+
+ struct list_head critical_region_head;
+ struct mutex critical_region_lock;
+
+ /* Protect poison injection */
+ struct mutex poison_lock;
};
struct ras_fs_data {
@@ -608,6 +628,7 @@ struct ras_err_handler_data {
struct eeprom_table_record *bps;
/* the count of entries */
int count;
+ int count_saved;
/* the space can place new entries */
int space_left;
};
@@ -973,6 +994,9 @@ int amdgpu_ras_mark_ras_event_caller(struct amdgpu_device *adev, enum ras_event_
int amdgpu_ras_reserve_page(struct amdgpu_device *adev, uint64_t pfn);
+int amdgpu_ras_add_critical_region(struct amdgpu_device *adev, struct amdgpu_bo *bo);
+bool amdgpu_ras_check_critical_address(struct amdgpu_device *adev, uint64_t addr);
+
int amdgpu_ras_put_poison_req(struct amdgpu_device *adev,
enum amdgpu_ras_block block, uint16_t pasid,
pasid_notify pasid_fn, void *data, uint32_t reset);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index 9bda9ad13f88..3eb3fb55ccb0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -743,8 +743,7 @@ amdgpu_ras_eeprom_append_table(struct amdgpu_ras_eeprom_control *control,
else
control->ras_num_mca_recs += num;
- control->ras_num_bad_pages = control->ras_num_pa_recs +
- control->ras_num_mca_recs * adev->umc.retire_unit;
+ control->ras_num_bad_pages = con->bad_page_num;
Out:
kfree(buf);
return res;
@@ -766,6 +765,10 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control)
dev_warn(adev->dev,
"Saved bad pages %d reaches threshold value %d\n",
control->ras_num_bad_pages, ras->bad_page_cnt_threshold);
+
+ if (adev->cper.enabled && amdgpu_cper_generate_bp_threshold_record(adev))
+ dev_warn(adev->dev, "fail to generate bad page threshold cper records\n");
+
if ((amdgpu_bad_page_threshold != -1) &&
(amdgpu_bad_page_threshold != -2)) {
control->tbl_hdr.header = RAS_TABLE_HDR_BAD;
@@ -774,9 +777,10 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control)
control->tbl_rai.health_percent = 0;
}
ras->is_rma = true;
- /* ignore the -ENOTSUPP return value */
- amdgpu_dpm_send_rma_reason(adev);
}
+
+ /* ignore the -ENOTSUPP return value */
+ amdgpu_dpm_send_rma_reason(adev);
}
if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1)
@@ -1457,8 +1461,7 @@ int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control)
if (!__get_eeprom_i2c_addr(adev, control))
return -EINVAL;
- control->ras_num_bad_pages = control->ras_num_pa_recs +
- control->ras_num_mca_recs * adev->umc.retire_unit;
+ control->ras_num_bad_pages = ras->bad_page_num;
if (hdr->header == RAS_TABLE_HDR_VAL) {
dev_dbg(adev->dev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
index 50fcd86e1033..be2e56ce1355 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
@@ -91,6 +91,7 @@ static inline void amdgpu_res_first(struct ttm_resource *res,
break;
case TTM_PL_TT:
case AMDGPU_PL_DOORBELL:
+ case AMDGPU_PL_MMIO_REMAP:
node = to_ttm_range_mgr_node(res)->mm_nodes;
while (start >= node->size << PAGE_SHIFT)
start -= node++->size << PAGE_SHIFT;
@@ -153,6 +154,7 @@ static inline void amdgpu_res_next(struct amdgpu_res_cursor *cur, uint64_t size)
break;
case TTM_PL_TT:
case AMDGPU_PL_DOORBELL:
+ case AMDGPU_PL_MMIO_REMAP:
node = cur->node;
cur->node = ++node;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
index dabfbdf6f1ce..28c4ad62f50e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
@@ -340,6 +340,9 @@ void amdgpu_reset_get_desc(struct amdgpu_reset_context *rst_ctxt, char *buf,
case AMDGPU_RESET_SRC_USER:
strscpy(buf, "user trigger", len);
break;
+ case AMDGPU_RESET_SRC_USERQ:
+ strscpy(buf, "user queue trigger", len);
+ break;
default:
strscpy(buf, "unknown", len);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
index 4d9b9701139b..07b4d37f1db6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
@@ -43,6 +43,7 @@ enum AMDGPU_RESET_SRCS {
AMDGPU_RESET_SRC_MES,
AMDGPU_RESET_SRC_HWS,
AMDGPU_RESET_SRC_USER,
+ AMDGPU_RESET_SRC_USERQ,
};
struct amdgpu_reset_context {
@@ -160,4 +161,16 @@ int amdgpu_reset_do_xgmi_reset_on_init(
bool amdgpu_reset_in_recovery(struct amdgpu_device *adev);
+static inline void amdgpu_reset_set_dpc_status(struct amdgpu_device *adev,
+ bool status)
+{
+ adev->pcie_reset_ctx.occurs_dpc = status;
+ adev->no_hw_access = status;
+}
+
+static inline bool amdgpu_reset_in_dpc(struct amdgpu_device *adev)
+{
+ return adev->pcie_reset_ctx.occurs_dpc;
+}
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 6379bb25bf5c..8f6ce948c684 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -364,7 +364,8 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
/* Allocate ring buffer */
if (ring->ring_obj == NULL) {
- r = amdgpu_bo_create_kernel(adev, ring->ring_size + ring->funcs->extra_dw, PAGE_SIZE,
+ r = amdgpu_bo_create_kernel(adev, ring->ring_size + ring->funcs->extra_bytes,
+ PAGE_SIZE,
AMDGPU_GEM_DOMAIN_GTT,
&ring->ring_obj,
&ring->gpu_addr,
@@ -421,8 +422,6 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
dma_fence_put(ring->vmid_wait);
ring->vmid_wait = NULL;
ring->me = 0;
-
- ring->adev->rings[ring->idx] = NULL;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 7670f5d82b9e..b6b649179776 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -114,7 +114,7 @@ struct amdgpu_sched {
*/
struct amdgpu_fence_driver {
uint64_t gpu_addr;
- volatile uint32_t *cpu_addr;
+ uint32_t *cpu_addr;
/* sync_seq is protected by ring emission lock */
uint32_t sync_seq;
atomic_t last_seq;
@@ -211,7 +211,18 @@ struct amdgpu_ring_funcs {
bool support_64bit_ptrs;
bool no_user_fence;
bool secure_submission_supported;
- unsigned extra_dw;
+
+ /**
+ * @extra_bytes:
+ *
+ * Optional extra space in bytes that is added to the ring size
+ * when allocating the BO that holds the contents of the ring.
+ * This space isn't used for command submission to the ring,
+ * but is just there to satisfy some hardware requirements or
+ * implement workarounds. It's up to the implementation of each
+ * specific ring to initialize this space.
+ */
+ unsigned extra_bytes;
/* ring read/write ptr handling */
u64 (*get_rptr)(struct amdgpu_ring *ring);
@@ -298,7 +309,7 @@ struct amdgpu_ring {
unsigned int ring_backup_entries_to_copy;
unsigned rptr_offs;
u64 rptr_gpu_addr;
- volatile u32 *rptr_cpu_addr;
+ u32 *rptr_cpu_addr;
/**
* @wptr:
@@ -378,19 +389,19 @@ struct amdgpu_ring {
* This is the CPU address pointer in the writeback slot. This is used
* to commit changes to the GPU.
*/
- volatile u32 *wptr_cpu_addr;
+ u32 *wptr_cpu_addr;
unsigned fence_offs;
u64 fence_gpu_addr;
- volatile u32 *fence_cpu_addr;
+ u32 *fence_cpu_addr;
uint64_t current_ctx;
char name[16];
u32 trail_seq;
unsigned trail_fence_offs;
u64 trail_fence_gpu_addr;
- volatile u32 *trail_fence_cpu_addr;
+ u32 *trail_fence_cpu_addr;
unsigned cond_exe_offs;
u64 cond_exe_gpu_addr;
- volatile u32 *cond_exe_cpu_addr;
+ u32 *cond_exe_cpu_addr;
unsigned int set_q_mode_offs;
u32 *set_q_mode_ptr;
u64 set_q_mode_token;
@@ -470,10 +481,7 @@ static inline void amdgpu_ring_set_preempt_cond_exec(struct amdgpu_ring *ring,
static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring)
{
- int i = 0;
- while (i <= ring->buf_mask)
- ring->ring[i++] = ring->funcs->nop;
-
+ memset32(ring->ring, ring->funcs->nop, ring->buf_mask + 1);
}
static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
index db5791e1a7ce..5aa830a02d80 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
@@ -89,7 +89,7 @@ void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev, int xcc_id)
int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws)
{
const u32 *src_ptr;
- volatile u32 *dst_ptr;
+ u32 *dst_ptr;
u32 i;
int r;
@@ -189,7 +189,7 @@ int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev)
void amdgpu_gfx_rlc_setup_cp_table(struct amdgpu_device *adev)
{
const __le32 *fw_data;
- volatile u32 *dst_ptr;
+ u32 *dst_ptr;
int me, i, max_me;
u32 bo_offset = 0;
u32 table_offset, table_size;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
index c210625be220..2ce310b31942 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
@@ -251,7 +251,7 @@ struct amdgpu_rlc_funcs {
* and it also provides a pointer to it which is used by the firmware
* to load the clear state in some cases.
*/
- void (*get_csb_buffer)(struct amdgpu_device *adev, volatile u32 *buffer);
+ void (*get_csb_buffer)(struct amdgpu_device *adev, u32 *buffer);
int (*get_cp_table_num)(struct amdgpu_device *adev);
int (*resume)(struct amdgpu_device *adev);
void (*stop)(struct amdgpu_device *adev);
@@ -275,19 +275,19 @@ struct amdgpu_rlc {
/* for power gating */
struct amdgpu_bo *save_restore_obj;
uint64_t save_restore_gpu_addr;
- volatile uint32_t *sr_ptr;
+ uint32_t *sr_ptr;
const u32 *reg_list;
u32 reg_list_size;
/* for clear state */
struct amdgpu_bo *clear_state_obj;
uint64_t clear_state_gpu_addr;
- volatile uint32_t *cs_ptr;
+ uint32_t *cs_ptr;
const struct cs_section_def *cs_data;
u32 clear_state_size;
/* for cp tables */
struct amdgpu_bo *cp_table_obj;
uint64_t cp_table_gpu_addr;
- volatile uint32_t *cp_table_ptr;
+ uint32_t *cp_table_ptr;
u32 cp_table_size;
/* safe mode for updating CG/PG state */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
index d45ebfb642ca..a0b479d5fff1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
@@ -67,9 +67,9 @@ static inline u64 amdgpu_seq64_get_va_base(struct amdgpu_device *adev)
int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_bo_va **bo_va)
{
- u64 seq64_addr, va_flags;
struct amdgpu_bo *bo;
struct drm_exec exec;
+ u64 seq64_addr;
int r;
bo = adev->seq64.sbo;
@@ -94,9 +94,9 @@ int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm,
seq64_addr = amdgpu_seq64_get_va_base(adev) & AMDGPU_GMC_HOLE_MASK;
- va_flags = amdgpu_gem_va_map_flags(adev, AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_MTYPE_UC);
- r = amdgpu_vm_bo_map(adev, *bo_va, seq64_addr, 0, AMDGPU_VA_RESERVED_SEQ64_SIZE,
- va_flags);
+ r = amdgpu_vm_bo_map(adev, *bo_va, seq64_addr, 0,
+ AMDGPU_VA_RESERVED_SEQ64_SIZE,
+ AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_MTYPE_UC);
if (r) {
DRM_ERROR("failed to do bo_map on userq sem, err=%d\n", r);
amdgpu_vm_bo_del(adev, *bo_va);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 27ab4e754b2a..aa9ee5dffa45 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -123,6 +123,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
case AMDGPU_PL_GWS:
case AMDGPU_PL_OA:
case AMDGPU_PL_DOORBELL:
+ case AMDGPU_PL_MMIO_REMAP:
placement->num_placement = 0;
return;
@@ -226,7 +227,8 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
r = amdgpu_job_alloc_with_ib(adev, &adev->mman.high_pr,
AMDGPU_FENCE_OWNER_UNDEFINED,
num_dw * 4 + num_bytes,
- AMDGPU_IB_POOL_DELAYED, &job);
+ AMDGPU_IB_POOL_DELAYED, &job,
+ AMDGPU_KERNEL_JOB_ID_TTM_MAP_BUFFER);
if (r)
return r;
@@ -406,7 +408,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
struct dma_fence *wipe_fence = NULL;
r = amdgpu_fill_buffer(abo, 0, NULL, &wipe_fence,
- false);
+ false, AMDGPU_KERNEL_JOB_ID_MOVE_BLIT);
if (r) {
goto error;
} else if (wipe_fence) {
@@ -447,7 +449,8 @@ bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
return false;
if (res->mem_type == TTM_PL_SYSTEM || res->mem_type == TTM_PL_TT ||
- res->mem_type == AMDGPU_PL_PREEMPT || res->mem_type == AMDGPU_PL_DOORBELL)
+ res->mem_type == AMDGPU_PL_PREEMPT || res->mem_type == AMDGPU_PL_DOORBELL ||
+ res->mem_type == AMDGPU_PL_MMIO_REMAP)
return true;
if (res->mem_type != TTM_PL_VRAM)
@@ -538,10 +541,12 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
old_mem->mem_type == AMDGPU_PL_GWS ||
old_mem->mem_type == AMDGPU_PL_OA ||
old_mem->mem_type == AMDGPU_PL_DOORBELL ||
+ old_mem->mem_type == AMDGPU_PL_MMIO_REMAP ||
new_mem->mem_type == AMDGPU_PL_GDS ||
new_mem->mem_type == AMDGPU_PL_GWS ||
new_mem->mem_type == AMDGPU_PL_OA ||
- new_mem->mem_type == AMDGPU_PL_DOORBELL) {
+ new_mem->mem_type == AMDGPU_PL_DOORBELL ||
+ new_mem->mem_type == AMDGPU_PL_MMIO_REMAP) {
/* Nothing to save here */
amdgpu_bo_move_notify(bo, evict, new_mem);
ttm_bo_move_null(bo, new_mem);
@@ -629,6 +634,12 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
mem->bus.is_iomem = true;
mem->bus.caching = ttm_uncached;
break;
+ case AMDGPU_PL_MMIO_REMAP:
+ mem->bus.offset = mem->start << PAGE_SHIFT;
+ mem->bus.offset += adev->rmmio_remap.bus_addr;
+ mem->bus.is_iomem = true;
+ mem->bus.caching = ttm_uncached;
+ break;
default:
return -EINVAL;
}
@@ -646,6 +657,8 @@ static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
if (bo->resource->mem_type == AMDGPU_PL_DOORBELL)
return ((uint64_t)(adev->doorbell.base + cursor.start)) >> PAGE_SHIFT;
+ else if (bo->resource->mem_type == AMDGPU_PL_MMIO_REMAP)
+ return ((uint64_t)(adev->rmmio_remap.bus_addr + cursor.start)) >> PAGE_SHIFT;
return (adev->gmc.aper_base + cursor.start) >> PAGE_SHIFT;
}
@@ -695,7 +708,7 @@ struct amdgpu_ttm_tt {
* Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only
* once afterwards to stop HMM tracking
*/
-int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages,
+int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
struct hmm_range **range)
{
struct ttm_tt *ttm = bo->tbo.ttm;
@@ -732,7 +745,7 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages,
readonly = amdgpu_ttm_tt_is_readonly(ttm);
r = amdgpu_hmm_range_get_pages(&bo->notifier, start, ttm->num_pages,
- readonly, NULL, pages, range);
+ readonly, NULL, range);
out_unlock:
mmap_read_unlock(mm);
if (r)
@@ -784,12 +797,12 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
* that backs user memory and will ultimately be mapped into the device
* address space.
*/
-void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
+void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct hmm_range *range)
{
unsigned long i;
for (i = 0; i < ttm->num_pages; ++i)
- ttm->pages[i] = pages ? pages[i] : NULL;
+ ttm->pages[i] = range ? hmm_pfn_to_page(range->hmm_pfns[i]) : NULL;
}
/*
@@ -1355,7 +1368,8 @@ uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem)
if (mem && (mem->mem_type == TTM_PL_TT ||
mem->mem_type == AMDGPU_PL_DOORBELL ||
- mem->mem_type == AMDGPU_PL_PREEMPT)) {
+ mem->mem_type == AMDGPU_PL_PREEMPT ||
+ mem->mem_type == AMDGPU_PL_MMIO_REMAP)) {
flags |= AMDGPU_PTE_SYSTEM;
if (ttm->caching == ttm_cached)
@@ -1510,7 +1524,8 @@ static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo,
r = amdgpu_job_alloc_with_ib(adev, &adev->mman.high_pr,
AMDGPU_FENCE_OWNER_UNDEFINED,
num_dw * 4, AMDGPU_IB_POOL_DELAYED,
- &job);
+ &job,
+ AMDGPU_KERNEL_JOB_ID_TTM_ACCESS_MEMORY_SDMA);
if (r)
goto out;
@@ -1841,6 +1856,59 @@ static void amdgpu_ttm_pools_fini(struct amdgpu_device *adev)
adev->mman.ttm_pools = NULL;
}
+/**
+ * amdgpu_ttm_mmio_remap_bo_init - Allocate the singleton 4K MMIO_REMAP BO
+ * @adev: amdgpu device
+ *
+ * Allocates a one-page (4K) GEM BO in AMDGPU_GEM_DOMAIN_MMIO_REMAP when the
+ * hardware exposes a remap base (adev->rmmio_remap.bus_addr) and the host
+ * PAGE_SIZE is <= AMDGPU_GPU_PAGE_SIZE (4K). The BO is created as a regular
+ * GEM object (amdgpu_bo_create).
+ *
+ * Return:
+ * * 0 on success or intentional skip (feature not present/unsupported)
+ * * negative errno on allocation failure
+ */
+static int amdgpu_ttm_mmio_remap_bo_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_bo_param bp;
+ int r;
+
+ /* Skip if HW doesn't expose remap, or if PAGE_SIZE > AMDGPU_GPU_PAGE_SIZE (4K). */
+ if (!adev->rmmio_remap.bus_addr || PAGE_SIZE > AMDGPU_GPU_PAGE_SIZE)
+ return 0;
+
+ memset(&bp, 0, sizeof(bp));
+
+ /* Create exactly one GEM BO in the MMIO_REMAP domain. */
+ bp.type = ttm_bo_type_device; /* userspace-mappable GEM */
+ bp.size = AMDGPU_GPU_PAGE_SIZE; /* 4K */
+ bp.byte_align = AMDGPU_GPU_PAGE_SIZE;
+ bp.domain = AMDGPU_GEM_DOMAIN_MMIO_REMAP;
+ bp.flags = 0;
+ bp.resv = NULL;
+ bp.bo_ptr_size = sizeof(struct amdgpu_bo);
+
+ r = amdgpu_bo_create(adev, &bp, &adev->rmmio_remap.bo);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+/**
+ * amdgpu_ttm_mmio_remap_bo_fini - Free the singleton MMIO_REMAP BO
+ * @adev: amdgpu device
+ *
+ * Frees the kernel-owned MMIO_REMAP BO if it was allocated by
+ * amdgpu_ttm_mmio_remap_bo_init().
+ */
+static void amdgpu_ttm_mmio_remap_bo_fini(struct amdgpu_device *adev)
+{
+ amdgpu_bo_unref(&adev->rmmio_remap.bo);
+ adev->rmmio_remap.bo = NULL;
+}
+
/*
* amdgpu_ttm_init - Init the memory management (ttm) as well as various
* gtt/vram related fields.
@@ -1877,11 +1945,13 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
}
adev->mman.initialized = true;
- /* Initialize VRAM pool with all of VRAM divided into pages */
- r = amdgpu_vram_mgr_init(adev);
- if (r) {
- dev_err(adev->dev, "Failed initializing VRAM heap.\n");
- return r;
+ if (!adev->gmc.is_app_apu) {
+ /* Initialize VRAM pool with all of VRAM divided into pages */
+ r = amdgpu_vram_mgr_init(adev);
+ if (r) {
+ dev_err(adev->dev, "Failed initializing VRAM heap.\n");
+ return r;
+ }
}
/* Change the size here instead of the init above so only lpfn is affected */
@@ -2008,6 +2078,18 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
return r;
}
+ /* Initialize MMIO-remap pool (single page 4K) */
+ r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_MMIO_REMAP, 1);
+ if (r) {
+ dev_err(adev->dev, "Failed initializing MMIO-remap heap.\n");
+ return r;
+ }
+
+ /* Allocate the singleton MMIO_REMAP BO (4K) if supported */
+ r = amdgpu_ttm_mmio_remap_bo_init(adev);
+ if (r)
+ return r;
+
/* Initialize preemptible memory pool */
r = amdgpu_preempt_mgr_init(adev);
if (r) {
@@ -2070,6 +2152,8 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
}
amdgpu_bo_free_kernel(&adev->mman.sdma_access_bo, NULL,
&adev->mman.sdma_access_ptr);
+
+ amdgpu_ttm_mmio_remap_bo_fini(adev);
amdgpu_ttm_fw_reserve_vram_fini(adev);
amdgpu_ttm_drv_reserve_vram_fini(adev);
@@ -2082,7 +2166,8 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
drm_dev_exit(idx);
}
- amdgpu_vram_mgr_fini(adev);
+ if (!adev->gmc.is_app_apu)
+ amdgpu_vram_mgr_fini(adev);
amdgpu_gtt_mgr_fini(adev);
amdgpu_preempt_mgr_fini(adev);
amdgpu_doorbell_fini(adev);
@@ -2091,6 +2176,7 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS);
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA);
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_DOORBELL);
+ ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_MMIO_REMAP);
ttm_device_fini(&adev->mman.bdev);
adev->mman.initialized = false;
dev_info(adev->dev, "amdgpu: ttm finalized\n");
@@ -2167,7 +2253,7 @@ static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev,
struct dma_resv *resv,
bool vm_needs_flush,
struct amdgpu_job **job,
- bool delayed)
+ bool delayed, u64 k_job_id)
{
enum amdgpu_ib_pool_type pool = direct_submit ?
AMDGPU_IB_POOL_DIRECT :
@@ -2177,7 +2263,7 @@ static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev,
&adev->mman.high_pr;
r = amdgpu_job_alloc_with_ib(adev, entity,
AMDGPU_FENCE_OWNER_UNDEFINED,
- num_dw * 4, pool, job);
+ num_dw * 4, pool, job, k_job_id);
if (r)
return r;
@@ -2217,7 +2303,8 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
num_loops = DIV_ROUND_UP(byte_count, max_bytes);
num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
r = amdgpu_ttm_prepare_job(adev, direct_submit, num_dw,
- resv, vm_needs_flush, &job, false);
+ resv, vm_needs_flush, &job, false,
+ AMDGPU_KERNEL_JOB_ID_TTM_COPY_BUFFER);
if (r)
return r;
@@ -2252,7 +2339,8 @@ static int amdgpu_ttm_fill_mem(struct amdgpu_ring *ring, uint32_t src_data,
uint64_t dst_addr, uint32_t byte_count,
struct dma_resv *resv,
struct dma_fence **fence,
- bool vm_needs_flush, bool delayed)
+ bool vm_needs_flush, bool delayed,
+ u64 k_job_id)
{
struct amdgpu_device *adev = ring->adev;
unsigned int num_loops, num_dw;
@@ -2265,7 +2353,7 @@ static int amdgpu_ttm_fill_mem(struct amdgpu_ring *ring, uint32_t src_data,
num_loops = DIV_ROUND_UP_ULL(byte_count, max_bytes);
num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->fill_num_dw, 8);
r = amdgpu_ttm_prepare_job(adev, false, num_dw, resv, vm_needs_flush,
- &job, delayed);
+ &job, delayed, k_job_id);
if (r)
return r;
@@ -2335,7 +2423,8 @@ int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo,
goto err;
r = amdgpu_ttm_fill_mem(ring, 0, addr, size, resv,
- &next, true, true);
+ &next, true, true,
+ AMDGPU_KERNEL_JOB_ID_TTM_CLEAR_BUFFER);
if (r)
goto err;
@@ -2354,7 +2443,8 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
uint32_t src_data,
struct dma_resv *resv,
struct dma_fence **f,
- bool delayed)
+ bool delayed,
+ u64 k_job_id)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
@@ -2384,7 +2474,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
goto error;
r = amdgpu_ttm_fill_mem(ring, src_data, to, cur_size, resv,
- &next, true, delayed);
+ &next, true, delayed, k_job_id);
if (r)
goto error;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 2309df3f68a9..0be2728aa872 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -34,7 +34,8 @@
#define AMDGPU_PL_OA (TTM_PL_PRIV + 2)
#define AMDGPU_PL_PREEMPT (TTM_PL_PRIV + 3)
#define AMDGPU_PL_DOORBELL (TTM_PL_PRIV + 4)
-#define __AMDGPU_PL_NUM (TTM_PL_PRIV + 5)
+#define AMDGPU_PL_MMIO_REMAP (TTM_PL_PRIV + 5)
+#define __AMDGPU_PL_NUM (TTM_PL_PRIV + 6)
#define AMDGPU_GTT_MAX_TRANSFER_SIZE 512
#define AMDGPU_GTT_NUM_TRANSFER_WINDOWS 2
@@ -182,14 +183,15 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
uint32_t src_data,
struct dma_resv *resv,
struct dma_fence **fence,
- bool delayed);
+ bool delayed,
+ u64 k_job_id);
int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo);
void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo);
uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type);
#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
-int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages,
+int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
struct hmm_range **range);
void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm,
struct hmm_range *range);
@@ -197,7 +199,6 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
struct hmm_range *range);
#else
static inline int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
- struct page **pages,
struct hmm_range **range)
{
return -EPERM;
@@ -213,7 +214,7 @@ static inline bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
}
#endif
-void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages);
+void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct hmm_range *range);
int amdgpu_ttm_tt_get_userptr(const struct ttm_buffer_object *tbo,
uint64_t *user_addr);
int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
index c92b8794aa73..2e039fb778ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
@@ -252,6 +252,7 @@ int amdgpu_umc_pasid_poison_handler(struct amdgpu_device *adev,
block, pasid, pasid_fn, data, reset);
if (!ret) {
atomic_inc(&con->page_retirement_req_cnt);
+ atomic_inc(&con->poison_consumption_count);
wake_up(&con->page_retirement_wq);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
index c3ace8030530..1add21160d21 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
@@ -44,8 +44,41 @@ u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev)
return userq_ip_mask;
}
+int amdgpu_userq_input_va_validate(struct amdgpu_vm *vm, u64 addr,
+ u64 expected_size)
+{
+ struct amdgpu_bo_va_mapping *va_map;
+ u64 user_addr;
+ u64 size;
+ int r = 0;
+
+ user_addr = (addr & AMDGPU_GMC_HOLE_MASK) >> AMDGPU_GPU_PAGE_SHIFT;
+ size = expected_size >> AMDGPU_GPU_PAGE_SHIFT;
+
+ r = amdgpu_bo_reserve(vm->root.bo, false);
+ if (r)
+ return r;
+
+ va_map = amdgpu_vm_bo_lookup_mapping(vm, user_addr);
+ if (!va_map) {
+ r = -EINVAL;
+ goto out_err;
+ }
+ /* Only validate the userq whether resident in the VM mapping range */
+ if (user_addr >= va_map->start &&
+ va_map->last - user_addr + 1 >= size) {
+ amdgpu_bo_unreserve(vm->root.bo);
+ return 0;
+ }
+
+ r = -EINVAL;
+out_err:
+ amdgpu_bo_unreserve(vm->root.bo);
+ return r;
+}
+
static int
-amdgpu_userq_unmap_helper(struct amdgpu_userq_mgr *uq_mgr,
+amdgpu_userq_preempt_helper(struct amdgpu_userq_mgr *uq_mgr,
struct amdgpu_usermode_queue *queue)
{
struct amdgpu_device *adev = uq_mgr->adev;
@@ -54,6 +87,49 @@ amdgpu_userq_unmap_helper(struct amdgpu_userq_mgr *uq_mgr,
int r = 0;
if (queue->state == AMDGPU_USERQ_STATE_MAPPED) {
+ r = userq_funcs->preempt(uq_mgr, queue);
+ if (r) {
+ queue->state = AMDGPU_USERQ_STATE_HUNG;
+ } else {
+ queue->state = AMDGPU_USERQ_STATE_PREEMPTED;
+ }
+ }
+
+ return r;
+}
+
+static int
+amdgpu_userq_restore_helper(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ const struct amdgpu_userq_funcs *userq_funcs =
+ adev->userq_funcs[queue->queue_type];
+ int r = 0;
+
+ if (queue->state == AMDGPU_USERQ_STATE_PREEMPTED) {
+ r = userq_funcs->restore(uq_mgr, queue);
+ if (r) {
+ queue->state = AMDGPU_USERQ_STATE_HUNG;
+ } else {
+ queue->state = AMDGPU_USERQ_STATE_MAPPED;
+ }
+ }
+
+ return r;
+}
+
+static int
+amdgpu_userq_unmap_helper(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ const struct amdgpu_userq_funcs *userq_funcs =
+ adev->userq_funcs[queue->queue_type];
+ int r = 0;
+
+ if ((queue->state == AMDGPU_USERQ_STATE_MAPPED) ||
+ (queue->state == AMDGPU_USERQ_STATE_PREEMPTED)) {
r = userq_funcs->unmap(uq_mgr, queue);
if (r)
queue->state = AMDGPU_USERQ_STATE_HUNG;
@@ -112,22 +188,6 @@ amdgpu_userq_cleanup(struct amdgpu_userq_mgr *uq_mgr,
kfree(queue);
}
-int
-amdgpu_userq_active(struct amdgpu_userq_mgr *uq_mgr)
-{
- struct amdgpu_usermode_queue *queue;
- int queue_id;
- int ret = 0;
-
- mutex_lock(&uq_mgr->userq_mutex);
- /* Resume all the queues for this process */
- idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id)
- ret += queue->state == AMDGPU_USERQ_STATE_MAPPED;
-
- mutex_unlock(&uq_mgr->userq_mutex);
- return ret;
-}
-
static struct amdgpu_usermode_queue *
amdgpu_userq_find(struct amdgpu_userq_mgr *uq_mgr, int qid)
{
@@ -323,6 +383,11 @@ amdgpu_userq_destroy(struct drm_file *filp, int queue_id)
debugfs_remove_recursive(queue->debugfs_queue);
#endif
r = amdgpu_userq_unmap_helper(uq_mgr, queue);
+ /*TODO: It requires a reset for userq hw unmap error*/
+ if (unlikely(r != AMDGPU_USERQ_STATE_UNMAPPED)) {
+ drm_warn(adev_to_drm(uq_mgr->adev), "trying to destroy a HW mapping userq\n");
+ queue->state = AMDGPU_USERQ_STATE_HUNG;
+ }
amdgpu_userq_cleanup(uq_mgr, queue, queue_id);
mutex_unlock(&uq_mgr->userq_mutex);
@@ -364,7 +429,7 @@ static int amdgpu_mqd_info_read(struct seq_file *m, void *unused)
return -EINVAL;
}
- seq_printf(m, "queue_type %d\n", queue->queue_type);
+ seq_printf(m, "queue_type: %d\n", queue->queue_type);
seq_printf(m, "mqd_gpu_address: 0x%llx\n", amdgpu_bo_gpu_offset(queue->mqd.obj));
amdgpu_bo_unreserve(bo);
@@ -404,27 +469,10 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
(args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK) >>
AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_SHIFT;
- /* Usermode queues are only supported for GFX IP as of now */
- if (args->in.ip_type != AMDGPU_HW_IP_GFX &&
- args->in.ip_type != AMDGPU_HW_IP_DMA &&
- args->in.ip_type != AMDGPU_HW_IP_COMPUTE) {
- drm_file_err(uq_mgr->file, "Usermode queue doesn't support IP type %u\n",
- args->in.ip_type);
- return -EINVAL;
- }
-
r = amdgpu_userq_priority_permit(filp, priority);
if (r)
return r;
- if ((args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE) &&
- (args->in.ip_type != AMDGPU_HW_IP_GFX) &&
- (args->in.ip_type != AMDGPU_HW_IP_COMPUTE) &&
- !amdgpu_is_tmz(adev)) {
- drm_file_err(uq_mgr->file, "Secure only supported on GFX/Compute queues\n");
- return -EINVAL;
- }
-
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) {
drm_file_err(uq_mgr->file, "pm_runtime_get_sync() failed for userqueue create\n");
@@ -456,6 +504,15 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
r = -ENOMEM;
goto unlock;
}
+
+ /* Validate the userq virtual address.*/
+ if (amdgpu_userq_input_va_validate(&fpriv->vm, args->in.queue_va, args->in.queue_size) ||
+ amdgpu_userq_input_va_validate(&fpriv->vm, args->in.rptr_va, AMDGPU_GPU_PAGE_SIZE) ||
+ amdgpu_userq_input_va_validate(&fpriv->vm, args->in.wptr_va, AMDGPU_GPU_PAGE_SIZE)) {
+ r = -EINVAL;
+ kfree(queue);
+ goto unlock;
+ }
queue->doorbell_handle = args->in.doorbell_handle;
queue->queue_type = args->in.ip_type;
queue->vm = &fpriv->vm;
@@ -471,6 +528,7 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
if (index == (uint64_t)-EINVAL) {
drm_file_err(uq_mgr->file, "Failed to get doorbell for queue\n");
kfree(queue);
+ r = -EINVAL;
goto unlock;
}
@@ -542,22 +600,45 @@ unlock:
return r;
}
-int amdgpu_userq_ioctl(struct drm_device *dev, void *data,
- struct drm_file *filp)
+static int amdgpu_userq_input_args_validate(struct drm_device *dev,
+ union drm_amdgpu_userq *args,
+ struct drm_file *filp)
{
- union drm_amdgpu_userq *args = data;
- int r;
+ struct amdgpu_device *adev = drm_to_adev(dev);
switch (args->in.op) {
case AMDGPU_USERQ_OP_CREATE:
if (args->in.flags & ~(AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK |
AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE))
return -EINVAL;
- r = amdgpu_userq_create(filp, args);
- if (r)
- drm_file_err(filp, "Failed to create usermode queue\n");
- break;
+ /* Usermode queues are only supported for GFX IP as of now */
+ if (args->in.ip_type != AMDGPU_HW_IP_GFX &&
+ args->in.ip_type != AMDGPU_HW_IP_DMA &&
+ args->in.ip_type != AMDGPU_HW_IP_COMPUTE) {
+ drm_file_err(filp, "Usermode queue doesn't support IP type %u\n",
+ args->in.ip_type);
+ return -EINVAL;
+ }
+
+ if ((args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE) &&
+ (args->in.ip_type != AMDGPU_HW_IP_GFX) &&
+ (args->in.ip_type != AMDGPU_HW_IP_COMPUTE) &&
+ !amdgpu_is_tmz(adev)) {
+ drm_file_err(filp, "Secure only supported on GFX/Compute queues\n");
+ return -EINVAL;
+ }
+ if (args->in.queue_va == AMDGPU_BO_INVALID_OFFSET ||
+ args->in.queue_va == 0 ||
+ args->in.queue_size == 0) {
+ drm_file_err(filp, "invalidate userq queue va or size\n");
+ return -EINVAL;
+ }
+ if (!args->in.wptr_va || !args->in.rptr_va) {
+ drm_file_err(filp, "invalidate userq queue rptr or wptr\n");
+ return -EINVAL;
+ }
+ break;
case AMDGPU_USERQ_OP_FREE:
if (args->in.ip_type ||
args->in.doorbell_handle ||
@@ -567,10 +648,34 @@ int amdgpu_userq_ioctl(struct drm_device *dev, void *data,
args->in.queue_size ||
args->in.rptr_va ||
args->in.wptr_va ||
- args->in.wptr_va ||
args->in.mqd ||
args->in.mqd_size)
return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int amdgpu_userq_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ union drm_amdgpu_userq *args = data;
+ int r;
+
+ if (amdgpu_userq_input_args_validate(dev, args, filp) < 0)
+ return -EINVAL;
+
+ switch (args->in.op) {
+ case AMDGPU_USERQ_OP_CREATE:
+ r = amdgpu_userq_create(filp, args);
+ if (r)
+ drm_file_err(filp, "Failed to create usermode queue\n");
+ break;
+
+ case AMDGPU_USERQ_OP_FREE:
r = amdgpu_userq_destroy(filp, args->in.queue_id);
if (r)
drm_file_err(filp, "Failed to destroy usermode queue\n");
@@ -593,7 +698,7 @@ amdgpu_userq_restore_all(struct amdgpu_userq_mgr *uq_mgr)
/* Resume all the queues for this process */
idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
- r = amdgpu_userq_map_helper(uq_mgr, queue);
+ r = amdgpu_userq_restore_helper(uq_mgr, queue);
if (r)
ret = r;
}
@@ -603,108 +708,106 @@ amdgpu_userq_restore_all(struct amdgpu_userq_mgr *uq_mgr)
return ret;
}
+static int amdgpu_userq_validate_vm(void *param, struct amdgpu_bo *bo)
+{
+ struct ttm_operation_ctx ctx = { false, false };
+
+ amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
+ return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+}
+
+/* Handle all BOs on the invalidated list, validate them and update the PTs */
static int
-amdgpu_userq_validate_vm_bo(void *_unused, struct amdgpu_bo *bo)
+amdgpu_userq_bo_validate(struct amdgpu_device *adev, struct drm_exec *exec,
+ struct amdgpu_vm *vm)
{
struct ttm_operation_ctx ctx = { false, false };
+ struct amdgpu_bo_va *bo_va;
+ struct amdgpu_bo *bo;
int ret;
- amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
+ spin_lock(&vm->status_lock);
+ while (!list_empty(&vm->invalidated)) {
+ bo_va = list_first_entry(&vm->invalidated,
+ struct amdgpu_bo_va,
+ base.vm_status);
+ spin_unlock(&vm->status_lock);
- ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
- if (ret)
- DRM_ERROR("Fail to validate\n");
+ bo = bo_va->base.bo;
+ ret = drm_exec_prepare_obj(exec, &bo->tbo.base, 2);
+ if (unlikely(ret))
+ return ret;
- return ret;
+ amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
+ ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (ret)
+ return ret;
+
+ /* This moves the bo_va to the done list */
+ ret = amdgpu_vm_bo_update(adev, bo_va, false);
+ if (ret)
+ return ret;
+
+ spin_lock(&vm->status_lock);
+ }
+ spin_unlock(&vm->status_lock);
+
+ return 0;
}
+/* Make sure the whole VM is ready to be used */
static int
-amdgpu_userq_validate_bos(struct amdgpu_userq_mgr *uq_mgr)
+amdgpu_userq_vm_validate(struct amdgpu_userq_mgr *uq_mgr)
{
struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
- struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_device *adev = uq_mgr->adev;
+ struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_va *bo_va;
- struct ww_acquire_ctx *ticket;
struct drm_exec exec;
- struct amdgpu_bo *bo;
- struct dma_resv *resv;
- bool clear, unlock;
- int ret = 0;
+ int ret;
drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
drm_exec_until_all_locked(&exec) {
- ret = amdgpu_vm_lock_pd(vm, &exec, 2);
+ ret = amdgpu_vm_lock_pd(vm, &exec, 1);
drm_exec_retry_on_contention(&exec);
- if (unlikely(ret)) {
- drm_file_err(uq_mgr->file, "Failed to lock PD\n");
+ if (unlikely(ret))
goto unlock_all;
- }
-
- /* Lock the done list */
- list_for_each_entry(bo_va, &vm->done, base.vm_status) {
- bo = bo_va->base.bo;
- if (!bo)
- continue;
-
- ret = drm_exec_lock_obj(&exec, &bo->tbo.base);
- drm_exec_retry_on_contention(&exec);
- if (unlikely(ret))
- goto unlock_all;
- }
- }
-
- spin_lock(&vm->status_lock);
- while (!list_empty(&vm->moved)) {
- bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va,
- base.vm_status);
- spin_unlock(&vm->status_lock);
- /* Per VM BOs never need to bo cleared in the page tables */
- ret = amdgpu_vm_bo_update(adev, bo_va, false);
- if (ret)
+ ret = amdgpu_vm_lock_done_list(vm, &exec, 1);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(ret))
goto unlock_all;
- spin_lock(&vm->status_lock);
- }
- ticket = &exec.ticket;
- while (!list_empty(&vm->invalidated)) {
- bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
- base.vm_status);
- resv = bo_va->base.bo->tbo.base.resv;
- spin_unlock(&vm->status_lock);
-
- bo = bo_va->base.bo;
- ret = amdgpu_userq_validate_vm_bo(NULL, bo);
- if (ret) {
- drm_file_err(uq_mgr->file, "Failed to validate BO\n");
+ /* This validates PDs, PTs and per VM BOs */
+ ret = amdgpu_vm_validate(adev, vm, NULL,
+ amdgpu_userq_validate_vm,
+ NULL);
+ if (unlikely(ret))
goto unlock_all;
- }
- /* Try to reserve the BO to avoid clearing its ptes */
- if (!adev->debug_vm && dma_resv_trylock(resv)) {
- clear = false;
- unlock = true;
- /* The caller is already holding the reservation lock */
- } else if (dma_resv_locking_ctx(resv) == ticket) {
- clear = false;
- unlock = false;
- /* Somebody else is using the BO right now */
- } else {
- clear = true;
- unlock = false;
- }
+ /* This locks and validates the remaining evicted BOs */
+ ret = amdgpu_userq_bo_validate(adev, &exec, vm);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(ret))
+ goto unlock_all;
+ }
- ret = amdgpu_vm_bo_update(adev, bo_va, clear);
+ ret = amdgpu_vm_handle_moved(adev, vm, NULL);
+ if (ret)
+ goto unlock_all;
- if (unlock)
- dma_resv_unlock(resv);
- if (ret)
- goto unlock_all;
+ ret = amdgpu_vm_update_pdes(adev, vm, false);
+ if (ret)
+ goto unlock_all;
- spin_lock(&vm->status_lock);
- }
- spin_unlock(&vm->status_lock);
+ /*
+ * We need to wait for all VM updates to finish before restarting the
+ * queues. Using the done list like that is now ok since everything is
+ * locked in place.
+ */
+ list_for_each_entry(bo_va, &vm->done, base.vm_status)
+ dma_fence_wait(bo_va->last_pt_update, false);
+ dma_fence_wait(vm->last_update, false);
ret = amdgpu_eviction_fence_replace_fence(&fpriv->evf_mgr, &exec);
if (ret)
@@ -725,7 +828,7 @@ static void amdgpu_userq_restore_worker(struct work_struct *work)
mutex_lock(&uq_mgr->userq_mutex);
- ret = amdgpu_userq_validate_bos(uq_mgr);
+ ret = amdgpu_userq_vm_validate(uq_mgr);
if (ret) {
drm_file_err(uq_mgr->file, "Failed to validate BOs to restore\n");
goto unlock;
@@ -750,7 +853,7 @@ amdgpu_userq_evict_all(struct amdgpu_userq_mgr *uq_mgr)
/* Try to unmap all the queues in this process ctx */
idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
- r = amdgpu_userq_unmap_helper(uq_mgr, queue);
+ r = amdgpu_userq_preempt_helper(uq_mgr, queue);
if (r)
ret = r;
}
@@ -876,7 +979,10 @@ int amdgpu_userq_suspend(struct amdgpu_device *adev)
cancel_delayed_work_sync(&uqm->resume_work);
mutex_lock(&uqm->userq_mutex);
idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
- r = amdgpu_userq_unmap_helper(uqm, queue);
+ if (adev->in_s0ix)
+ r = amdgpu_userq_preempt_helper(uqm, queue);
+ else
+ r = amdgpu_userq_unmap_helper(uqm, queue);
if (r)
ret = r;
}
@@ -901,7 +1007,10 @@ int amdgpu_userq_resume(struct amdgpu_device *adev)
list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
mutex_lock(&uqm->userq_mutex);
idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
- r = amdgpu_userq_map_helper(uqm, queue);
+ if (adev->in_s0ix)
+ r = amdgpu_userq_restore_helper(uqm, queue);
+ else
+ r = amdgpu_userq_map_helper(uqm, queue);
if (r)
ret = r;
}
@@ -935,7 +1044,7 @@ int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev,
if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
(queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
(queue->xcp_id == idx)) {
- r = amdgpu_userq_unmap_helper(uqm, queue);
+ r = amdgpu_userq_preempt_helper(uqm, queue);
if (r)
ret = r;
}
@@ -969,7 +1078,7 @@ int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
(queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
(queue->xcp_id == idx)) {
- r = amdgpu_userq_map_helper(uqm, queue);
+ r = amdgpu_userq_restore_helper(uqm, queue);
if (r)
ret = r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
index b1ca91b7cda4..c027dd916672 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
@@ -78,6 +78,12 @@ struct amdgpu_userq_funcs {
struct amdgpu_usermode_queue *queue);
int (*map)(struct amdgpu_userq_mgr *uq_mgr,
struct amdgpu_usermode_queue *queue);
+ int (*preempt)(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue);
+ int (*restore)(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue);
+ int (*detect_and_reset)(struct amdgpu_device *adev,
+ int queue_type);
};
/* Usermode queues for gfx */
@@ -114,8 +120,6 @@ void amdgpu_userq_destroy_object(struct amdgpu_userq_mgr *uq_mgr,
void amdgpu_userq_evict(struct amdgpu_userq_mgr *uq_mgr,
struct amdgpu_eviction_fence *ev_fence);
-int amdgpu_userq_active(struct amdgpu_userq_mgr *uq_mgr);
-
void amdgpu_userq_ensure_ev_fence(struct amdgpu_userq_mgr *userq_mgr,
struct amdgpu_eviction_fence_mgr *evf_mgr);
@@ -133,4 +137,6 @@ int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev,
int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
u32 idx);
+int amdgpu_userq_input_va_validate(struct amdgpu_vm *vm, u64 addr,
+ u64 expected_size);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
index c2a983ff23c9..761bad98da3e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
@@ -67,6 +67,14 @@ static u64 amdgpu_userq_fence_read(struct amdgpu_userq_fence_driver *fence_drv)
return le64_to_cpu(*fence_drv->cpu_addr);
}
+static void
+amdgpu_userq_fence_write(struct amdgpu_userq_fence_driver *fence_drv,
+ u64 seq)
+{
+ if (fence_drv->cpu_addr)
+ *fence_drv->cpu_addr = cpu_to_le64(seq);
+}
+
int amdgpu_userq_fence_driver_alloc(struct amdgpu_device *adev,
struct amdgpu_usermode_queue *userq)
{
@@ -276,7 +284,7 @@ static int amdgpu_userq_fence_create(struct amdgpu_usermode_queue *userq,
/* Check if hardware has already processed the job */
spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
- if (!dma_fence_is_signaled_locked(fence))
+ if (!dma_fence_is_signaled(fence))
list_add_tail(&userq_fence->link, &fence_drv->fences);
else
dma_fence_put(fence);
@@ -408,6 +416,40 @@ static void amdgpu_userq_fence_cleanup(struct dma_fence *fence)
dma_fence_put(fence);
}
+static void
+amdgpu_userq_fence_driver_set_error(struct amdgpu_userq_fence *fence,
+ int error)
+{
+ struct amdgpu_userq_fence_driver *fence_drv = fence->fence_drv;
+ unsigned long flags;
+ struct dma_fence *f;
+
+ spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
+
+ f = rcu_dereference_protected(&fence->base,
+ lockdep_is_held(&fence_drv->fence_list_lock));
+ if (f && !dma_fence_is_signaled_locked(f))
+ dma_fence_set_error(f, error);
+ spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
+}
+
+void
+amdgpu_userq_fence_driver_force_completion(struct amdgpu_usermode_queue *userq)
+{
+ struct dma_fence *f = userq->last_fence;
+
+ if (f) {
+ struct amdgpu_userq_fence *fence = to_amdgpu_userq_fence(f);
+ struct amdgpu_userq_fence_driver *fence_drv = fence->fence_drv;
+ u64 wptr = fence->base.seqno;
+
+ amdgpu_userq_fence_driver_set_error(fence, -ECANCELED);
+ amdgpu_userq_fence_write(fence_drv, wptr);
+ amdgpu_userq_fence_driver_process(fence_drv);
+
+ }
+}
+
int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h
index 97a125ab8a78..d76add2afc77 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h
@@ -67,6 +67,7 @@ int amdgpu_userq_fence_driver_alloc(struct amdgpu_device *adev,
struct amdgpu_usermode_queue *userq);
void amdgpu_userq_fence_driver_free(struct amdgpu_usermode_queue *userq);
void amdgpu_userq_fence_driver_process(struct amdgpu_userq_fence_driver *fence_drv);
+void amdgpu_userq_fence_driver_force_completion(struct amdgpu_usermode_queue *userq);
void amdgpu_userq_fence_driver_destroy(struct kref *ref);
int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_utils.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_utils.h
new file mode 100644
index 000000000000..1e40ca3b1584
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_utils.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef AMDGPU_UTILS_H_
+#define AMDGPU_UTILS_H_
+
+/* ---------- Generic 2‑bit capability attribute encoding ----------
+ * 00 INVALID, 01 RO, 10 WO, 11 RW
+ */
+enum amdgpu_cap_attr {
+ AMDGPU_CAP_ATTR_INVALID = 0,
+ AMDGPU_CAP_ATTR_RO = 1 << 0,
+ AMDGPU_CAP_ATTR_WO = 1 << 1,
+ AMDGPU_CAP_ATTR_RW = (AMDGPU_CAP_ATTR_RO | AMDGPU_CAP_ATTR_WO),
+};
+
+#define AMDGPU_CAP_ATTR_BITS 2
+#define AMDGPU_CAP_ATTR_MAX ((1U << AMDGPU_CAP_ATTR_BITS) - 1)
+
+/* Internal helper to build helpers for a given enum NAME */
+#define DECLARE_ATTR_CAP_CLASS_HELPERS(NAME) \
+enum { NAME##_BITMAP_BITS = NAME##_COUNT * AMDGPU_CAP_ATTR_BITS }; \
+struct NAME##_caps { \
+ DECLARE_BITMAP(bmap, NAME##_BITMAP_BITS); \
+}; \
+static inline unsigned int NAME##_ATTR_START(enum NAME##_cap_id cap) \
+{ return (unsigned int)cap * AMDGPU_CAP_ATTR_BITS; } \
+static inline void NAME##_attr_init(struct NAME##_caps *c) \
+{ if (c) bitmap_zero(c->bmap, NAME##_BITMAP_BITS); } \
+static inline int NAME##_attr_set(struct NAME##_caps *c, \
+ enum NAME##_cap_id cap, enum amdgpu_cap_attr attr) \
+{ \
+ if (!c) \
+ return -EINVAL; \
+ if (cap >= NAME##_COUNT) \
+ return -EINVAL; \
+ if ((unsigned int)attr > AMDGPU_CAP_ATTR_MAX) \
+ return -EINVAL; \
+ bitmap_write(c->bmap, (unsigned long)attr, \
+ NAME##_ATTR_START(cap), AMDGPU_CAP_ATTR_BITS); \
+ return 0; \
+} \
+static inline int NAME##_attr_get(const struct NAME##_caps *c, \
+ enum NAME##_cap_id cap, enum amdgpu_cap_attr *out) \
+{ \
+ unsigned long v; \
+ if (!c || !out) \
+ return -EINVAL; \
+ if (cap >= NAME##_COUNT) \
+ return -EINVAL; \
+ v = bitmap_read(c->bmap, NAME##_ATTR_START(cap), AMDGPU_CAP_ATTR_BITS); \
+ *out = (enum amdgpu_cap_attr)v; \
+ return 0; \
+} \
+static inline bool NAME##_cap_is_ro(const struct NAME##_caps *c, enum NAME##_cap_id id) \
+{ enum amdgpu_cap_attr a; return !NAME##_attr_get(c, id, &a) && a == AMDGPU_CAP_ATTR_RO; } \
+static inline bool NAME##_cap_is_wo(const struct NAME##_caps *c, enum NAME##_cap_id id) \
+{ enum amdgpu_cap_attr a; return !NAME##_attr_get(c, id, &a) && a == AMDGPU_CAP_ATTR_WO; } \
+static inline bool NAME##_cap_is_rw(const struct NAME##_caps *c, enum NAME##_cap_id id) \
+{ enum amdgpu_cap_attr a; return !NAME##_attr_get(c, id, &a) && a == AMDGPU_CAP_ATTR_RW; }
+
+/* Element expander for enum creation */
+#define _CAP_ENUM_ELEM(x) x,
+
+/* Public macro: declare enum + helpers from an X‑macro list */
+#define DECLARE_ATTR_CAP_CLASS(NAME, LIST_MACRO) \
+ enum NAME##_cap_id { LIST_MACRO(_CAP_ENUM_ELEM) NAME##_COUNT }; \
+ DECLARE_ATTR_CAP_CLASS_HELPERS(NAME)
+
+#endif /* AMDGPU_UTILS_H_ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 74758b5ffc6c..5c38f0d30c87 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -1136,7 +1136,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
r = amdgpu_job_alloc_with_ib(ring->adev, &adev->uvd.entity,
AMDGPU_FENCE_OWNER_UNDEFINED,
64, direct ? AMDGPU_IB_POOL_DIRECT :
- AMDGPU_IB_POOL_DELAYED, &job);
+ AMDGPU_IB_POOL_DELAYED, &job,
+ AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index b9060bcd4806..ce318f5de047 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -449,7 +449,7 @@ static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
r = amdgpu_job_alloc_with_ib(ring->adev, &ring->adev->vce.entity,
AMDGPU_FENCE_OWNER_UNDEFINED,
ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
- &job);
+ &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
return r;
@@ -540,7 +540,8 @@ static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
AMDGPU_FENCE_OWNER_UNDEFINED,
ib_size_dw * 4,
direct ? AMDGPU_IB_POOL_DIRECT :
- AMDGPU_IB_POOL_DELAYED, &job);
+ AMDGPU_IB_POOL_DELAYED, &job,
+ AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index f1f67521c29c..5e0786ea911b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -92,6 +92,7 @@ MODULE_FIRMWARE(FIRMWARE_VCN5_0_0);
MODULE_FIRMWARE(FIRMWARE_VCN5_0_1);
static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
+static void amdgpu_vcn_reg_dump_fini(struct amdgpu_device *adev);
int amdgpu_vcn_early_init(struct amdgpu_device *adev, int i)
{
@@ -184,16 +185,16 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev, int i)
dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf;
vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf;
dev_info(adev->dev,
- "Found VCN firmware Version ENC: %u.%u DEC: %u VEP: %u Revision: %u\n",
- enc_major, enc_minor, dec_ver, vep, fw_rev);
+ "[VCN instance %d] Found VCN firmware Version ENC: %u.%u DEC: %u VEP: %u Revision: %u\n",
+ i, enc_major, enc_minor, dec_ver, vep, fw_rev);
} else {
unsigned int version_major, version_minor, family_id;
family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
- dev_info(adev->dev, "Found VCN firmware Version: %u.%u Family ID: %u\n",
- version_major, version_minor, family_id);
+ dev_info(adev->dev, "[VCN instance %d] Found VCN firmware Version: %u.%u Family ID: %u\n",
+ i, version_major, version_minor, family_id);
}
bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE;
@@ -256,12 +257,12 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev, int i)
return 0;
}
-int amdgpu_vcn_sw_fini(struct amdgpu_device *adev, int i)
+void amdgpu_vcn_sw_fini(struct amdgpu_device *adev, int i)
{
int j;
if (adev->vcn.harvest_config & (1 << i))
- return 0;
+ return;
amdgpu_bo_free_kernel(
&adev->vcn.inst[i].dpg_sram_bo,
@@ -285,10 +286,12 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev, int i)
amdgpu_ucode_release(&adev->vcn.inst[0].fw);
adev->vcn.inst[i].fw = NULL;
}
+
+ if (adev->vcn.reg_list)
+ amdgpu_vcn_reg_dump_fini(adev);
+
mutex_destroy(&adev->vcn.inst[i].vcn_pg_lock);
mutex_destroy(&adev->vcn.inst[i].vcn1_jpeg1_workaround);
-
- return 0;
}
bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type type, uint32_t vcn_instance)
@@ -352,8 +355,6 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev, int i)
if (adev->vcn.harvest_config & (1 << i))
return 0;
- cancel_delayed_work_sync(&adev->vcn.inst[i].idle_work);
-
/* err_event_athub and dpc recovery will corrupt VCPU buffer, so we need to
* restore fw data and clear buffer in amdgpu_vcn_resume() */
if (in_ras_intr || adev->pcie_reset_ctx.in_link_reset)
@@ -405,6 +406,54 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev, int i)
return 0;
}
+void amdgpu_vcn_get_profile(struct amdgpu_device *adev)
+{
+ int r;
+
+ mutex_lock(&adev->vcn.workload_profile_mutex);
+
+ if (adev->vcn.workload_profile_active) {
+ mutex_unlock(&adev->vcn.workload_profile_mutex);
+ return;
+ }
+ r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
+ true);
+ if (r)
+ dev_warn(adev->dev,
+ "(%d) failed to enable video power profile mode\n", r);
+ else
+ adev->vcn.workload_profile_active = true;
+ mutex_unlock(&adev->vcn.workload_profile_mutex);
+}
+
+void amdgpu_vcn_put_profile(struct amdgpu_device *adev)
+{
+ bool pg = true;
+ int r, i;
+
+ mutex_lock(&adev->vcn.workload_profile_mutex);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.inst[i].cur_state != AMD_PG_STATE_GATE) {
+ pg = false;
+ break;
+ }
+ }
+
+ if (pg) {
+ r = amdgpu_dpm_switch_power_profile(
+ adev, PP_SMC_POWER_PROFILE_VIDEO, false);
+ if (r)
+ dev_warn(
+ adev->dev,
+ "(%d) failed to disable video power profile mode\n",
+ r);
+ else
+ adev->vcn.workload_profile_active = false;
+ }
+
+ mutex_unlock(&adev->vcn.workload_profile_mutex);
+}
+
static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
{
struct amdgpu_vcn_inst *vcn_inst =
@@ -412,7 +461,6 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
struct amdgpu_device *adev = vcn_inst->adev;
unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0};
unsigned int i = vcn_inst->inst, j;
- int r = 0;
if (adev->vcn.harvest_config & (1 << i))
return;
@@ -438,16 +486,11 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
fences += fence[i];
if (!fences && !atomic_read(&vcn_inst->total_submission_cnt)) {
+ mutex_lock(&vcn_inst->vcn_pg_lock);
vcn_inst->set_pg_state(vcn_inst, AMD_PG_STATE_GATE);
- mutex_lock(&adev->vcn.workload_profile_mutex);
- if (adev->vcn.workload_profile_active) {
- r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
- false);
- if (r)
- dev_warn(adev->dev, "(%d) failed to disable video power profile mode\n", r);
- adev->vcn.workload_profile_active = false;
- }
- mutex_unlock(&adev->vcn.workload_profile_mutex);
+ mutex_unlock(&vcn_inst->vcn_pg_lock);
+ amdgpu_vcn_put_profile(adev);
+
} else {
schedule_delayed_work(&vcn_inst->idle_work, VCN_IDLE_TIMEOUT);
}
@@ -457,30 +500,11 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_vcn_inst *vcn_inst = &adev->vcn.inst[ring->me];
- int r = 0;
atomic_inc(&vcn_inst->total_submission_cnt);
cancel_delayed_work_sync(&vcn_inst->idle_work);
- /* We can safely return early here because we've cancelled the
- * the delayed work so there is no one else to set it to false
- * and we don't care if someone else sets it to true.
- */
- if (adev->vcn.workload_profile_active)
- goto pg_lock;
-
- mutex_lock(&adev->vcn.workload_profile_mutex);
- if (!adev->vcn.workload_profile_active) {
- r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
- true);
- if (r)
- dev_warn(adev->dev, "(%d) failed to switch to video power profile mode\n", r);
- adev->vcn.workload_profile_active = true;
- }
- mutex_unlock(&adev->vcn.workload_profile_mutex);
-
-pg_lock:
mutex_lock(&vcn_inst->vcn_pg_lock);
vcn_inst->set_pg_state(vcn_inst, AMD_PG_STATE_UNGATE);
@@ -508,6 +532,7 @@ pg_lock:
vcn_inst->pause_dpg_mode(vcn_inst, &new_state);
}
mutex_unlock(&vcn_inst->vcn_pg_lock);
+ amdgpu_vcn_get_profile(adev);
}
void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
@@ -601,7 +626,7 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
64, AMDGPU_IB_POOL_DIRECT,
- &job);
+ &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
goto err;
@@ -781,7 +806,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
- &job);
+ &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
goto err;
@@ -911,7 +936,7 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
- &job);
+ &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
return r;
@@ -978,7 +1003,7 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
- &job);
+ &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
return r;
@@ -1132,7 +1157,7 @@ static ssize_t amdgpu_debugfs_vcn_fwlog_read(struct file *f, char __user *buf,
{
struct amdgpu_vcn_inst *vcn;
void *log_buf;
- volatile struct amdgpu_vcn_fwlog *plog;
+ struct amdgpu_vcn_fwlog *plog;
unsigned int read_pos, write_pos, available, i, read_bytes = 0;
unsigned int read_num[2] = {0};
@@ -1145,7 +1170,7 @@ static ssize_t amdgpu_debugfs_vcn_fwlog_read(struct file *f, char __user *buf,
log_buf = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size;
- plog = (volatile struct amdgpu_vcn_fwlog *)log_buf;
+ plog = (struct amdgpu_vcn_fwlog *)log_buf;
read_pos = plog->rptr;
write_pos = plog->wptr;
@@ -1212,11 +1237,11 @@ void amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device *adev, uint8_t i,
void amdgpu_vcn_fwlog_init(struct amdgpu_vcn_inst *vcn)
{
#if defined(CONFIG_DEBUG_FS)
- volatile uint32_t *flag = vcn->fw_shared.cpu_addr;
+ uint32_t *flag = vcn->fw_shared.cpu_addr;
void *fw_log_cpu_addr = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size;
uint64_t fw_log_gpu_addr = vcn->fw_shared.gpu_addr + vcn->fw_shared.mem_size;
- volatile struct amdgpu_vcn_fwlog *log_buf = fw_log_cpu_addr;
- volatile struct amdgpu_fw_shared_fw_logging *fw_log = vcn->fw_shared.cpu_addr
+ struct amdgpu_vcn_fwlog *log_buf = fw_log_cpu_addr;
+ struct amdgpu_fw_shared_fw_logging *fw_log = vcn->fw_shared.cpu_addr
+ vcn->fw_shared.log_offset;
*flag |= cpu_to_le32(AMDGPU_VCN_FW_LOGGING_FLAG);
fw_log->is_enabled = 1;
@@ -1527,3 +1552,86 @@ int amdgpu_vcn_ring_reset(struct amdgpu_ring *ring,
return amdgpu_vcn_reset_engine(adev, ring->me);
}
+
+int amdgpu_vcn_reg_dump_init(struct amdgpu_device *adev,
+ const struct amdgpu_hwip_reg_entry *reg, u32 count)
+{
+ adev->vcn.ip_dump = kcalloc(adev->vcn.num_vcn_inst * count,
+ sizeof(uint32_t), GFP_KERNEL);
+ if (!adev->vcn.ip_dump)
+ return -ENOMEM;
+ adev->vcn.reg_list = reg;
+ adev->vcn.reg_count = count;
+
+ return 0;
+}
+
+static void amdgpu_vcn_reg_dump_fini(struct amdgpu_device *adev)
+{
+ kfree(adev->vcn.ip_dump);
+ adev->vcn.ip_dump = NULL;
+ adev->vcn.reg_list = NULL;
+ adev->vcn.reg_count = 0;
+}
+
+void amdgpu_vcn_dump_ip_state(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int i, j;
+ bool is_powered;
+ u32 inst_off;
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ inst_off = i * adev->vcn.reg_count;
+ /* mmUVD_POWER_STATUS is always readable and is the first in reg_list */
+ adev->vcn.ip_dump[inst_off] =
+ RREG32(SOC15_REG_ENTRY_OFFSET_INST(adev->vcn.reg_list[0], i));
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF) !=
+ UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
+
+ if (is_powered)
+ for (j = 1; j < adev->vcn.reg_count; j++)
+ adev->vcn.ip_dump[inst_off + j] =
+ RREG32(SOC15_REG_ENTRY_OFFSET_INST(adev->vcn.reg_list[j], i));
+ }
+}
+
+void amdgpu_vcn_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int i, j;
+ bool is_powered;
+ u32 inst_off;
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i)) {
+ drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
+ continue;
+ }
+
+ inst_off = i * adev->vcn.reg_count;
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF) !=
+ UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
+
+ if (is_powered) {
+ drm_printf(p, "\nActive Instance:VCN%d\n", i);
+ for (j = 0; j < adev->vcn.reg_count; j++)
+ drm_printf(p, "%-50s \t 0x%08x\n", adev->vcn.reg_list[j].reg_name,
+ adev->vcn.ip_dump[inst_off + j]);
+ } else {
+ drm_printf(p, "\nInactive Instance:VCN%d\n", i);
+ }
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index 0bc0a94d7cf0..dc8a17bcc3c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -237,6 +237,8 @@
#define AMDGPU_DRM_KEY_INJECT_WORKAROUND_VCNFW_ASD_HANDSHAKING 2
+struct amdgpu_hwip_reg_entry;
+
enum amdgpu_vcn_caps {
AMDGPU_VCN_RRMT_ENABLED,
};
@@ -362,6 +364,8 @@ struct amdgpu_vcn {
bool workload_profile_active;
struct mutex workload_profile_mutex;
+ u32 reg_count;
+ const struct amdgpu_hwip_reg_entry *reg_list;
};
struct amdgpu_fw_shared_rb_ptrs_struct {
@@ -497,7 +501,7 @@ struct amdgpu_vcn5_fw_shared {
struct amdgpu_fw_shared_rb_setup rb_setup;
struct amdgpu_fw_shared_smu_interface_info smu_dpm_interface;
struct amdgpu_fw_shared_drm_key_wa drm_key_wa;
- uint8_t pad3[9];
+ uint8_t pad3[404];
};
#define VCN_BLOCK_ENCODE_DISABLE_MASK 0x80
@@ -512,7 +516,7 @@ enum vcn_ring_type {
int amdgpu_vcn_early_init(struct amdgpu_device *adev, int i);
int amdgpu_vcn_sw_init(struct amdgpu_device *adev, int i);
-int amdgpu_vcn_sw_fini(struct amdgpu_device *adev, int i);
+void amdgpu_vcn_sw_fini(struct amdgpu_device *adev, int i);
int amdgpu_vcn_suspend(struct amdgpu_device *adev, int i);
int amdgpu_vcn_resume(struct amdgpu_device *adev, int i);
void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring);
@@ -557,4 +561,11 @@ int vcn_set_powergating_state(struct amdgpu_ip_block *ip_block,
int amdgpu_vcn_ring_reset(struct amdgpu_ring *ring,
unsigned int vmid,
struct amdgpu_fence *guilty_fence);
+int amdgpu_vcn_reg_dump_init(struct amdgpu_device *adev,
+ const struct amdgpu_hwip_reg_entry *reg, u32 count);
+void amdgpu_vcn_dump_ip_state(struct amdgpu_ip_block *ip_block);
+void amdgpu_vcn_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p);
+void amdgpu_vcn_get_profile(struct amdgpu_device *adev);
+void amdgpu_vcn_put_profile(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 13f0cdeb59c4..3328ab63376b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -828,11 +828,14 @@ static void amdgpu_virt_init_ras(struct amdgpu_device *adev)
{
ratelimit_state_init(&adev->virt.ras.ras_error_cnt_rs, 5 * HZ, 1);
ratelimit_state_init(&adev->virt.ras.ras_cper_dump_rs, 5 * HZ, 1);
+ ratelimit_state_init(&adev->virt.ras.ras_chk_criti_rs, 5 * HZ, 1);
ratelimit_set_flags(&adev->virt.ras.ras_error_cnt_rs,
RATELIMIT_MSG_ON_RELEASE);
ratelimit_set_flags(&adev->virt.ras.ras_cper_dump_rs,
RATELIMIT_MSG_ON_RELEASE);
+ ratelimit_set_flags(&adev->virt.ras.ras_chk_criti_rs,
+ RATELIMIT_MSG_ON_RELEASE);
mutex_init(&adev->virt.ras.ras_telemetry_mutex);
@@ -1501,3 +1504,55 @@ void amdgpu_virt_request_bad_pages(struct amdgpu_device *adev)
if (virt->ops && virt->ops->req_bad_pages)
virt->ops->req_bad_pages(adev);
}
+
+static int amdgpu_virt_cache_chk_criti_hit(struct amdgpu_device *adev,
+ struct amdsriov_ras_telemetry *host_telemetry,
+ bool *hit)
+{
+ struct amd_sriov_ras_chk_criti *tmp = NULL;
+ uint32_t checksum, used_size;
+
+ checksum = host_telemetry->header.checksum;
+ used_size = host_telemetry->header.used_size;
+
+ if (used_size > (AMD_SRIOV_RAS_TELEMETRY_SIZE_KB << 10))
+ return 0;
+
+ tmp = kmemdup(&host_telemetry->body.chk_criti, used_size, GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ if (checksum != amd_sriov_msg_checksum(tmp, used_size, 0, 0))
+ goto out;
+
+ if (hit)
+ *hit = tmp->hit ? true : false;
+
+out:
+ kfree(tmp);
+
+ return 0;
+}
+
+int amdgpu_virt_check_vf_critical_region(struct amdgpu_device *adev, u64 addr, bool *hit)
+{
+ struct amdgpu_virt *virt = &adev->virt;
+ int r = -EPERM;
+
+ if (!virt->ops || !virt->ops->req_ras_chk_criti)
+ return -EOPNOTSUPP;
+
+ /* Host allows 15 ras telemetry requests per 60 seconds. Afterwhich, the Host
+ * will ignore incoming guest messages. Ratelimit the guest messages to
+ * prevent guest self DOS.
+ */
+ if (__ratelimit(&virt->ras.ras_chk_criti_rs)) {
+ mutex_lock(&virt->ras.ras_telemetry_mutex);
+ if (!virt->ops->req_ras_chk_criti(adev, addr))
+ r = amdgpu_virt_cache_chk_criti_hit(
+ adev, virt->fw_reserve.ras_telemetry, hit);
+ mutex_unlock(&virt->ras.ras_telemetry_mutex);
+ }
+
+ return r;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 3da3ebb1d9a1..d1172c8e58c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -98,6 +98,7 @@ struct amdgpu_virt_ops {
int (*req_ras_err_count)(struct amdgpu_device *adev);
int (*req_ras_cper_dump)(struct amdgpu_device *adev, u64 vf_rptr);
int (*req_bad_pages)(struct amdgpu_device *adev);
+ int (*req_ras_chk_criti)(struct amdgpu_device *adev, u64 addr);
};
/*
@@ -252,10 +253,15 @@ struct amdgpu_virt_ras_err_handler_data {
struct amdgpu_virt_ras {
struct ratelimit_state ras_error_cnt_rs;
struct ratelimit_state ras_cper_dump_rs;
+ struct ratelimit_state ras_chk_criti_rs;
struct mutex ras_telemetry_mutex;
uint64_t cper_rptr;
};
+#define AMDGPU_VIRT_CAPS_LIST(X) X(AMDGPU_VIRT_CAP_POWER_LIMIT)
+
+DECLARE_ATTR_CAP_CLASS(amdgpu_virt, AMDGPU_VIRT_CAPS_LIST);
+
/* GPU virtualization */
struct amdgpu_virt {
uint32_t caps;
@@ -267,12 +273,14 @@ struct amdgpu_virt {
struct amdgpu_irq_src rcv_irq;
struct work_struct flr_work;
- struct work_struct bad_pages_work;
+ struct work_struct req_bad_pages_work;
+ struct work_struct handle_bad_pages_work;
struct amdgpu_mm_table mm_table;
const struct amdgpu_virt_ops *ops;
struct amdgpu_vf_error_buffer vf_errors;
struct amdgpu_virt_fw_reserve fw_reserve;
+ struct amdgpu_virt_caps virt_caps;
uint32_t gim_feature;
uint32_t reg_access_mode;
int req_init_data_ver;
@@ -447,4 +455,5 @@ int amdgpu_virt_ras_telemetry_post_reset(struct amdgpu_device *adev);
bool amdgpu_virt_ras_telemetry_block_en(struct amdgpu_device *adev,
enum amdgpu_ras_block block);
void amdgpu_virt_request_bad_pages(struct amdgpu_device *adev);
+int amdgpu_virt_check_vf_critical_region(struct amdgpu_device *adev, u64 addr, bool *hit);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
index 155bb9891a17..79bad9cbe2ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
@@ -14,7 +14,6 @@
#include "dce_v8_0.h"
#endif
#include "dce_v10_0.h"
-#include "dce_v11_0.h"
#include "ivsrcid/ivsrcid_vislands30.h"
#include "amdgpu_vkms.h"
#include "amdgpu_display.h"
@@ -581,13 +580,6 @@ static int amdgpu_vkms_hw_init(struct amdgpu_ip_block *ip_block)
case CHIP_TONGA:
dce_v10_0_disable_dce(adev);
break;
- case CHIP_CARRIZO:
- case CHIP_STONEY:
- case CHIP_POLARIS10:
- case CHIP_POLARIS11:
- case CHIP_VEGAM:
- dce_v11_0_disable_dce(adev);
- break;
case CHIP_TOPAZ:
#ifdef CONFIG_DRM_AMDGPU_SI
case CHIP_HAINAN:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 5cacf5717016..c1a801203949 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -128,43 +128,14 @@ struct amdgpu_vm_tlb_seq_struct {
};
/**
- * amdgpu_vm_set_pasid - manage pasid and vm ptr mapping
- *
- * @adev: amdgpu_device pointer
- * @vm: amdgpu_vm pointer
- * @pasid: the pasid the VM is using on this GPU
- *
- * Set the pasid this VM is using on this GPU, can also be used to remove the
- * pasid by passing in zero.
+ * amdgpu_vm_assert_locked - check if VM is correctly locked
+ * @vm: the VM which schould be tested
*
+ * Asserts that the VM root PD is locked.
*/
-int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- u32 pasid)
+static void amdgpu_vm_assert_locked(struct amdgpu_vm *vm)
{
- int r;
-
- if (vm->pasid == pasid)
- return 0;
-
- if (vm->pasid) {
- r = xa_err(xa_erase_irq(&adev->vm_manager.pasids, vm->pasid));
- if (r < 0)
- return r;
-
- vm->pasid = 0;
- }
-
- if (pasid) {
- r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm,
- GFP_KERNEL));
- if (r < 0)
- return r;
-
- vm->pasid = pasid;
- }
-
-
- return 0;
+ dma_resv_assert_held(vm->root.bo->tbo.base.resv);
}
/**
@@ -181,6 +152,7 @@ static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
struct amdgpu_bo *bo = vm_bo->bo;
vm_bo->moved = true;
+ amdgpu_vm_assert_locked(vm);
spin_lock(&vm_bo->vm->status_lock);
if (bo->tbo.type == ttm_bo_type_kernel)
list_move(&vm_bo->vm_status, &vm->evicted);
@@ -198,6 +170,7 @@ static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
*/
static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
{
+ amdgpu_vm_assert_locked(vm_bo->vm);
spin_lock(&vm_bo->vm->status_lock);
list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
spin_unlock(&vm_bo->vm->status_lock);
@@ -213,6 +186,7 @@ static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
*/
static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)
{
+ amdgpu_vm_assert_locked(vm_bo->vm);
spin_lock(&vm_bo->vm->status_lock);
list_move(&vm_bo->vm_status, &vm_bo->vm->idle);
spin_unlock(&vm_bo->vm->status_lock);
@@ -260,6 +234,7 @@ static void amdgpu_vm_bo_evicted_user(struct amdgpu_vm_bo_base *vm_bo)
*/
static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
{
+ amdgpu_vm_assert_locked(vm_bo->vm);
if (vm_bo->bo->parent) {
spin_lock(&vm_bo->vm->status_lock);
list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
@@ -279,6 +254,7 @@ static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
*/
static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo)
{
+ amdgpu_vm_assert_locked(vm_bo->vm);
spin_lock(&vm_bo->vm->status_lock);
list_move(&vm_bo->vm_status, &vm_bo->vm->done);
spin_unlock(&vm_bo->vm->status_lock);
@@ -295,10 +271,13 @@ static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm)
{
struct amdgpu_vm_bo_base *vm_bo, *tmp;
+ amdgpu_vm_assert_locked(vm);
+
spin_lock(&vm->status_lock);
list_splice_init(&vm->done, &vm->invalidated);
list_for_each_entry(vm_bo, &vm->invalidated, vm_status)
vm_bo->moved = true;
+
list_for_each_entry_safe(vm_bo, tmp, &vm->idle, vm_status) {
struct amdgpu_bo *bo = vm_bo->bo;
@@ -327,6 +306,7 @@ static void amdgpu_vm_update_shared(struct amdgpu_vm_bo_base *base)
uint32_t bo_memtype = amdgpu_bo_mem_stats_placement(bo);
bool shared;
+ dma_resv_assert_held(bo->tbo.base.resv);
spin_lock(&vm->status_lock);
shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base);
if (base->shared != shared) {
@@ -485,6 +465,42 @@ int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec,
}
/**
+ * amdgpu_vm_lock_done_list - lock all BOs on the done list
+ * @vm: vm providing the BOs
+ * @exec: drm execution context
+ * @num_fences: number of extra fences to reserve
+ *
+ * Lock the BOs on the done list in the DRM execution context.
+ */
+int amdgpu_vm_lock_done_list(struct amdgpu_vm *vm, struct drm_exec *exec,
+ unsigned int num_fences)
+{
+ struct list_head *prev = &vm->done;
+ struct amdgpu_bo_va *bo_va;
+ struct amdgpu_bo *bo;
+ int ret;
+
+ /* We can only trust prev->next while holding the lock */
+ spin_lock(&vm->status_lock);
+ while (!list_is_head(prev->next, &vm->done)) {
+ bo_va = list_entry(prev->next, typeof(*bo_va), base.vm_status);
+ spin_unlock(&vm->status_lock);
+
+ bo = bo_va->base.bo;
+ if (bo) {
+ ret = drm_exec_prepare_obj(exec, &bo->tbo.base, 1);
+ if (unlikely(ret))
+ return ret;
+ }
+ spin_lock(&vm->status_lock);
+ prev = prev->next;
+ }
+ spin_unlock(&vm->status_lock);
+
+ return 0;
+}
+
+/**
* amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU
*
* @adev: amdgpu device pointer
@@ -616,18 +632,7 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
spin_unlock(&vm->status_lock);
bo = bo_base->bo;
-
- if (dma_resv_locking_ctx(bo->tbo.base.resv) != ticket) {
- struct amdgpu_task_info *ti = amdgpu_vm_get_task_info_vm(vm);
-
- pr_warn_ratelimited("Evicted user BO is not reserved\n");
- if (ti) {
- pr_warn_ratelimited("pid %d\n", ti->task.pid);
- amdgpu_vm_put_task_info(ti);
- }
-
- return -EINVAL;
- }
+ dma_resv_assert_held(bo->tbo.base.resv);
r = validate(param, bo);
if (r)
@@ -654,22 +659,31 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
* Check if all VM PDs/PTs are ready for updates
*
* Returns:
- * True if VM is not evicting.
+ * True if VM is not evicting and all VM entities are not stopped
*/
bool amdgpu_vm_ready(struct amdgpu_vm *vm)
{
- bool empty;
bool ret;
+ amdgpu_vm_assert_locked(vm);
+
amdgpu_vm_eviction_lock(vm);
ret = !vm->evicting;
amdgpu_vm_eviction_unlock(vm);
spin_lock(&vm->status_lock);
- empty = list_empty(&vm->evicted);
+ ret &= list_empty(&vm->evicted);
spin_unlock(&vm->status_lock);
- return ret && empty;
+ spin_lock(&vm->immediate.lock);
+ ret &= !vm->immediate.stopped;
+ spin_unlock(&vm->immediate.lock);
+
+ spin_lock(&vm->delayed.lock);
+ ret &= !vm->delayed.stopped;
+ spin_unlock(&vm->delayed.lock);
+
+ return ret;
}
/**
@@ -955,6 +969,8 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
LIST_HEAD(relocated);
int r, idx;
+ amdgpu_vm_assert_locked(vm);
+
spin_lock(&vm->status_lock);
list_splice_init(&vm->relocated, &relocated);
spin_unlock(&vm->status_lock);
@@ -970,7 +986,8 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
params.vm = vm;
params.immediate = immediate;
- r = vm->update_funcs->prepare(&params, NULL);
+ r = vm->update_funcs->prepare(&params, NULL,
+ AMDGPU_KERNEL_JOB_ID_VM_UPDATE_PDES);
if (r)
goto error;
@@ -1139,7 +1156,8 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
dma_fence_put(tmp);
}
- r = vm->update_funcs->prepare(&params, sync);
+ r = vm->update_funcs->prepare(&params, sync,
+ AMDGPU_KERNEL_JOB_ID_VM_UPDATE_RANGE);
if (r)
goto error_free;
@@ -1276,7 +1294,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
struct drm_gem_object *obj = &bo->tbo.base;
if (drm_gem_is_imported(obj) && bo_va->is_xgmi) {
- struct dma_buf *dma_buf = obj->dma_buf;
+ struct dma_buf *dma_buf = obj->import_attach->dmabuf;
struct drm_gem_object *gobj = dma_buf->priv;
struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
@@ -1332,13 +1350,14 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
* but in case of something, we filter the flags in first place
*/
- if (!(mapping->flags & AMDGPU_PTE_READABLE))
+ if (!(mapping->flags & AMDGPU_VM_PAGE_READABLE))
update_flags &= ~AMDGPU_PTE_READABLE;
- if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
+ if (!(mapping->flags & AMDGPU_VM_PAGE_WRITEABLE))
update_flags &= ~AMDGPU_PTE_WRITEABLE;
/* Apply ASIC specific mapping flags */
- amdgpu_gmc_get_vm_pte(adev, mapping, &update_flags);
+ amdgpu_gmc_get_vm_pte(adev, vm, bo, mapping->flags,
+ &update_flags);
trace_amdgpu_vm_bo_update(mapping);
@@ -1479,7 +1498,7 @@ static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *mapping,
struct dma_fence *fence)
{
- if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev))
+ if (mapping->flags & AMDGPU_VM_PAGE_PRT)
amdgpu_vm_add_prt_cb(adev, fence);
kfree(mapping);
}
@@ -1758,7 +1777,7 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
list_add(&mapping->list, &bo_va->invalids);
amdgpu_vm_it_insert(mapping, &vm->va);
- if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev))
+ if (mapping->flags & AMDGPU_VM_PAGE_PRT)
amdgpu_vm_prt_get(adev);
if (amdgpu_vm_is_bo_always_valid(vm, bo) && !bo_va->base.moved)
@@ -1818,7 +1837,7 @@ static int amdgpu_vm_verify_parameters(struct amdgpu_device *adev,
int amdgpu_vm_bo_map(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
uint64_t saddr, uint64_t offset,
- uint64_t size, uint64_t flags)
+ uint64_t size, uint32_t flags)
{
struct amdgpu_bo_va_mapping *mapping, *tmp;
struct amdgpu_bo *bo = bo_va->base.bo;
@@ -1877,7 +1896,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
uint64_t saddr, uint64_t offset,
- uint64_t size, uint64_t flags)
+ uint64_t size, uint32_t flags)
{
struct amdgpu_bo_va_mapping *mapping;
struct amdgpu_bo *bo = bo_va->base.bo;
@@ -2530,6 +2549,7 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
* @adev: amdgpu_device pointer
* @vm: requested vm
* @xcp_id: GPU partition selection id
+ * @pasid: the pasid the VM is using on this GPU
*
* Init @vm fields.
*
@@ -2537,7 +2557,7 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
* 0 for success, error for failure.
*/
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- int32_t xcp_id)
+ int32_t xcp_id, uint32_t pasid)
{
struct amdgpu_bo *root_bo;
struct amdgpu_bo_vm *root;
@@ -2613,12 +2633,26 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (r)
dev_dbg(adev->dev, "Failed to create task info for VM\n");
+ /* Store new PASID in XArray (if non-zero) */
+ if (pasid != 0) {
+ r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm, GFP_KERNEL));
+ if (r < 0)
+ goto error_free_root;
+
+ vm->pasid = pasid;
+ }
+
amdgpu_bo_unreserve(vm->root.bo);
amdgpu_bo_unref(&root_bo);
return 0;
error_free_root:
+ /* If PASID was partially set, erase it from XArray before failing */
+ if (vm->pasid != 0) {
+ xa_erase_irq(&adev->vm_manager.pasids, vm->pasid);
+ vm->pasid = 0;
+ }
amdgpu_vm_pt_free_root(adev, vm);
amdgpu_bo_unreserve(vm->root.bo);
amdgpu_bo_unref(&root_bo);
@@ -2724,7 +2758,11 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
root = amdgpu_bo_ref(vm->root.bo);
amdgpu_bo_reserve(root, true);
- amdgpu_vm_set_pasid(adev, vm, 0);
+ /* Remove PASID mapping before destroying VM */
+ if (vm->pasid != 0) {
+ xa_erase_irq(&adev->vm_manager.pasids, vm->pasid);
+ vm->pasid = 0;
+ }
dma_fence_wait(vm->last_unlocked, false);
dma_fence_put(vm->last_unlocked);
dma_fence_wait(vm->last_tlb_flush, false);
@@ -2734,7 +2772,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
dma_fence_put(vm->last_tlb_flush);
list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
- if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev) && prt_fini_needed) {
+ if (mapping->flags & AMDGPU_VM_PAGE_PRT && prt_fini_needed) {
amdgpu_vm_prt_fini(adev, vm);
prt_fini_needed = false;
}
@@ -2765,10 +2803,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
dma_fence_put(vm->last_update);
for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) {
- if (vm->reserved_vmid[i]) {
- amdgpu_vmid_free_reserved(adev, i);
- vm->reserved_vmid[i] = false;
- }
+ amdgpu_vmid_free_reserved(adev, vm, i);
}
ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move);
@@ -2864,6 +2899,7 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
union drm_amdgpu_vm *args = data;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_fpriv *fpriv = filp->driver_priv;
+ struct amdgpu_vm *vm = &fpriv->vm;
/* No valid flags defined yet */
if (args->in.flags)
@@ -2872,17 +2908,10 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
switch (args->in.op) {
case AMDGPU_VM_OP_RESERVE_VMID:
/* We only have requirement to reserve vmid from gfxhub */
- if (!fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) {
- amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(0));
- fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = true;
- }
-
+ amdgpu_vmid_alloc_reserved(adev, vm, AMDGPU_GFXHUB(0));
break;
case AMDGPU_VM_OP_UNRESERVE_VMID:
- if (fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) {
- amdgpu_vmid_free_reserved(adev, AMDGPU_GFXHUB(0));
- fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = false;
- }
+ amdgpu_vmid_free_reserved(adev, vm, AMDGPU_GFXHUB(0));
break;
default:
return -EINVAL;
@@ -3020,6 +3049,8 @@ void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)
unsigned int total_done_objs = 0;
unsigned int id = 0;
+ amdgpu_vm_assert_locked(vm);
+
spin_lock(&vm->status_lock);
seq_puts(m, "\tIdle BOs:\n");
list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index fd086efd8457..cf0ec94e8a07 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -308,7 +308,7 @@ struct amdgpu_vm_update_params {
struct amdgpu_vm_update_funcs {
int (*map_table)(struct amdgpu_bo_vm *bo);
int (*prepare)(struct amdgpu_vm_update_params *p,
- struct amdgpu_sync *sync);
+ struct amdgpu_sync *sync, u64 k_job_id);
int (*update)(struct amdgpu_vm_update_params *p,
struct amdgpu_bo_vm *bo, uint64_t pe, uint64_t addr,
unsigned count, uint32_t incr, uint64_t flags);
@@ -349,12 +349,16 @@ struct amdgpu_vm {
/* Memory statistics for this vm, protected by status_lock */
struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM];
+ /*
+ * The following lists contain amdgpu_vm_bo_base objects for either
+ * PDs, PTs or per VM BOs. The state transits are:
+ *
+ * evicted -> relocated (PDs, PTs) or moved (per VM BOs) -> idle
+ */
+
/* Per-VM and PT BOs who needs a validation */
struct list_head evicted;
- /* BOs for user mode queues that need a validation */
- struct list_head evicted_user;
-
/* PT BOs which relocated and their parent need an update */
struct list_head relocated;
@@ -364,15 +368,29 @@ struct amdgpu_vm {
/* All BOs of this VM not currently in the state machine */
struct list_head idle;
+ /*
+ * The following lists contain amdgpu_vm_bo_base objects for BOs which
+ * have their own dma_resv object and not depend on the root PD. Their
+ * state transits are:
+ *
+ * evicted_user or invalidated -> done
+ */
+
+ /* BOs for user mode queues that need a validation */
+ struct list_head evicted_user;
+
/* regular invalidated BOs, but not yet updated in the PT */
struct list_head invalidated;
- /* BO mappings freed, but not yet updated in the PT */
- struct list_head freed;
-
/* BOs which are invalidated, has been updated in the PTs */
struct list_head done;
+ /*
+ * This list contains amdgpu_bo_va_mapping objects which have been freed
+ * but not updated in the PTs
+ */
+ struct list_head freed;
+
/* contains the page directory */
struct amdgpu_vm_bo_base root;
struct dma_fence *last_update;
@@ -394,7 +412,7 @@ struct amdgpu_vm {
struct dma_fence *last_unlocked;
unsigned int pasid;
- bool reserved_vmid[AMDGPU_MAX_VMHUBS];
+ struct amdgpu_vmid *reserved_vmid[AMDGPU_MAX_VMHUBS];
/* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */
bool use_cpu_for_update;
@@ -482,15 +500,14 @@ extern const struct amdgpu_vm_update_funcs amdgpu_vm_sdma_funcs;
void amdgpu_vm_manager_init(struct amdgpu_device *adev);
void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
-int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- u32 pasid);
-
long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout);
-int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id);
+int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id, uint32_t pasid);
int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec,
unsigned int num_fences);
+int amdgpu_vm_lock_done_list(struct amdgpu_vm *vm, struct drm_exec *exec,
+ unsigned int num_fences);
bool amdgpu_vm_ready(struct amdgpu_vm *vm);
uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm);
int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
@@ -538,11 +555,11 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
int amdgpu_vm_bo_map(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
uint64_t addr, uint64_t offset,
- uint64_t size, uint64_t flags);
+ uint64_t size, uint32_t flags);
int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
uint64_t addr, uint64_t offset,
- uint64_t size, uint64_t flags);
+ uint64_t size, uint32_t flags);
int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
uint64_t addr);
@@ -670,4 +687,9 @@ void amdgpu_vm_tlb_fence_create(struct amdgpu_device *adev,
void amdgpu_vm_print_task_info(struct amdgpu_device *adev,
struct amdgpu_task_info *task_info);
+#define amdgpu_vm_bo_va_for_each_valid_mapping(bo_va, mapping) \
+ list_for_each_entry(mapping, &(bo_va)->valids, list)
+#define amdgpu_vm_bo_va_for_each_invalid_mapping(bo_va, mapping) \
+ list_for_each_entry(mapping, &(bo_va)->invalids, list)
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
index 0c1ef5850a5e..22e2e5b47341 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
@@ -40,12 +40,14 @@ static int amdgpu_vm_cpu_map_table(struct amdgpu_bo_vm *table)
*
* @p: see amdgpu_vm_update_params definition
* @sync: sync obj with fences to wait on
+ * @k_job_id: the id for tracing/debug purposes
*
* Returns:
* Negativ errno, 0 for success.
*/
static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p,
- struct amdgpu_sync *sync)
+ struct amdgpu_sync *sync,
+ u64 k_job_id)
{
if (!sync)
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
index 30022123b0bf..f794fb1cc06e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
@@ -26,6 +26,7 @@
#include "amdgpu.h"
#include "amdgpu_trace.h"
#include "amdgpu_vm.h"
+#include "amdgpu_job.h"
/*
* amdgpu_vm_pt_cursor - state for for_each_amdgpu_vm_pt
@@ -395,7 +396,8 @@ int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,
params.vm = vm;
params.immediate = immediate;
- r = vm->update_funcs->prepare(&params, NULL);
+ r = vm->update_funcs->prepare(&params, NULL,
+ AMDGPU_KERNEL_JOB_ID_VM_PT_CLEAR);
if (r)
goto exit;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
index 46d9fb433ab2..36805dcfa159 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
@@ -40,7 +40,7 @@ static int amdgpu_vm_sdma_map_table(struct amdgpu_bo_vm *table)
/* Allocate a new job for @count PTE updates */
static int amdgpu_vm_sdma_alloc_job(struct amdgpu_vm_update_params *p,
- unsigned int count)
+ unsigned int count, u64 k_job_id)
{
enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE
: AMDGPU_IB_POOL_DELAYED;
@@ -56,7 +56,7 @@ static int amdgpu_vm_sdma_alloc_job(struct amdgpu_vm_update_params *p,
ndw = min(ndw, AMDGPU_VM_SDMA_MAX_NUM_DW);
r = amdgpu_job_alloc_with_ib(p->adev, entity, AMDGPU_FENCE_OWNER_VM,
- ndw * 4, pool, &p->job);
+ ndw * 4, pool, &p->job, k_job_id);
if (r)
return r;
@@ -69,16 +69,17 @@ static int amdgpu_vm_sdma_alloc_job(struct amdgpu_vm_update_params *p,
*
* @p: see amdgpu_vm_update_params definition
* @sync: amdgpu_sync object with fences to wait for
+ * @k_job_id: identifier of the job, for tracing purpose
*
* Returns:
* Negativ errno, 0 for success.
*/
static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
- struct amdgpu_sync *sync)
+ struct amdgpu_sync *sync, u64 k_job_id)
{
int r;
- r = amdgpu_vm_sdma_alloc_job(p, 0);
+ r = amdgpu_vm_sdma_alloc_job(p, 0, k_job_id);
if (r)
return r;
@@ -249,7 +250,8 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
if (r)
return r;
- r = amdgpu_vm_sdma_alloc_job(p, count);
+ r = amdgpu_vm_sdma_alloc_job(p, count,
+ AMDGPU_KERNEL_JOB_ID_VM_UPDATE);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
index 121ee17b522b..474bfe36c0c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
@@ -379,9 +379,10 @@ static int vpe_sw_init(struct amdgpu_ip_block *ip_block)
if (ret)
goto out;
- /* TODO: Add queue reset mask when FW fully supports it */
adev->vpe.supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->vpe.ring);
+ if (!amdgpu_sriov_vf(adev))
+ adev->vpe.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
ret = amdgpu_vpe_sysfs_reset_mask_init(adev);
if (ret)
goto out;
@@ -435,6 +436,8 @@ static int vpe_hw_fini(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_vpe *vpe = &adev->vpe;
+ cancel_delayed_work_sync(&adev->vpe.idle_work);
+
vpe_ring_stop(vpe);
/* Power off VPE */
@@ -445,10 +448,6 @@ static int vpe_hw_fini(struct amdgpu_ip_block *ip_block)
static int vpe_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = ip_block->adev;
-
- cancel_delayed_work_sync(&adev->vpe.idle_work);
-
return vpe_hw_fini(ip_block);
}
@@ -874,6 +873,27 @@ static void vpe_ring_end_use(struct amdgpu_ring *ring)
schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT);
}
+static int vpe_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int r;
+
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+
+ r = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE,
+ AMD_PG_STATE_GATE);
+ if (r)
+ return r;
+ r = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE,
+ AMD_PG_STATE_UNGATE);
+ if (r)
+ return r;
+
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
+}
+
static ssize_t amdgpu_get_vpe_reset_mask(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -942,6 +962,7 @@ static const struct amdgpu_ring_funcs vpe_ring_funcs = {
.preempt_ib = vpe_ring_preempt_ib,
.begin_use = vpe_ring_begin_use,
.end_use = vpe_ring_end_use,
+ .reset = vpe_ring_reset,
};
static void vpe_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 07c936e90d8e..a5adb2ed9b3c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -396,43 +396,33 @@ out:
return ret;
}
-static void amdgpu_dummy_vram_mgr_debug(struct ttm_resource_manager *man,
- struct drm_printer *printer)
+int amdgpu_vram_mgr_query_address_block_info(struct amdgpu_vram_mgr *mgr,
+ uint64_t address, struct amdgpu_vram_block_info *info)
{
- DRM_DEBUG_DRIVER("Dummy vram mgr debug\n");
-}
-
-static bool amdgpu_dummy_vram_mgr_compatible(struct ttm_resource_manager *man,
- struct ttm_resource *res,
- const struct ttm_place *place,
- size_t size)
-{
- DRM_DEBUG_DRIVER("Dummy vram mgr compatible\n");
- return false;
-}
+ struct amdgpu_vram_mgr_resource *vres;
+ struct drm_buddy_block *block;
+ u64 start, size;
+ int ret = -ENOENT;
-static bool amdgpu_dummy_vram_mgr_intersects(struct ttm_resource_manager *man,
- struct ttm_resource *res,
- const struct ttm_place *place,
- size_t size)
-{
- DRM_DEBUG_DRIVER("Dummy vram mgr intersects\n");
- return true;
-}
+ mutex_lock(&mgr->lock);
+ list_for_each_entry(vres, &mgr->allocated_vres_list, vres_node) {
+ list_for_each_entry(block, &vres->blocks, link) {
+ start = amdgpu_vram_mgr_block_start(block);
+ size = amdgpu_vram_mgr_block_size(block);
+ if ((start <= address) && (address < (start + size))) {
+ info->start = start;
+ info->size = size;
+ memcpy(&info->task, &vres->task, sizeof(vres->task));
+ ret = 0;
+ goto out;
+ }
+ }
+ }
-static void amdgpu_dummy_vram_mgr_del(struct ttm_resource_manager *man,
- struct ttm_resource *res)
-{
- DRM_DEBUG_DRIVER("Dummy vram mgr deleted\n");
-}
+out:
+ mutex_unlock(&mgr->lock);
-static int amdgpu_dummy_vram_mgr_new(struct ttm_resource_manager *man,
- struct ttm_buffer_object *tbo,
- const struct ttm_place *place,
- struct ttm_resource **res)
-{
- DRM_DEBUG_DRIVER("Dummy vram mgr new\n");
- return -ENOSPC;
+ return ret;
}
/**
@@ -568,6 +558,10 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
remaining_size -= size;
}
+ vres->task.pid = task_pid_nr(current);
+ get_task_comm(vres->task.comm, current);
+ list_add_tail(&vres->vres_node, &mgr->allocated_vres_list);
+
if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS && adjust_dcc_size) {
struct drm_buddy_block *dcc_block;
unsigned long dcc_start;
@@ -645,12 +639,15 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
uint64_t vis_usage = 0;
mutex_lock(&mgr->lock);
+
+ list_del(&vres->vres_node);
+ memset(&vres->task, 0, sizeof(vres->task));
+
list_for_each_entry(block, &vres->blocks, link)
vis_usage += amdgpu_vram_mgr_vis_size(adev, block);
- amdgpu_vram_mgr_do_reserve(man);
-
drm_buddy_free_list(mm, &vres->blocks, vres->flags);
+ amdgpu_vram_mgr_do_reserve(man);
mutex_unlock(&mgr->lock);
atomic64_sub(vis_usage, &mgr->vis_usage);
@@ -896,14 +893,6 @@ static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man,
mutex_unlock(&mgr->lock);
}
-static const struct ttm_resource_manager_func amdgpu_dummy_vram_mgr_func = {
- .alloc = amdgpu_dummy_vram_mgr_new,
- .free = amdgpu_dummy_vram_mgr_del,
- .intersects = amdgpu_dummy_vram_mgr_intersects,
- .compatible = amdgpu_dummy_vram_mgr_compatible,
- .debug = amdgpu_dummy_vram_mgr_debug
-};
-
static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = {
.alloc = amdgpu_vram_mgr_new,
.free = amdgpu_vram_mgr_del,
@@ -934,18 +923,13 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev)
mutex_init(&mgr->lock);
INIT_LIST_HEAD(&mgr->reservations_pending);
INIT_LIST_HEAD(&mgr->reserved_pages);
+ INIT_LIST_HEAD(&mgr->allocated_vres_list);
mgr->default_page_size = PAGE_SIZE;
- if (!adev->gmc.is_app_apu) {
- man->func = &amdgpu_vram_mgr_func;
-
- err = drm_buddy_init(&mgr->mm, man->size, PAGE_SIZE);
- if (err)
- return err;
- } else {
- man->func = &amdgpu_dummy_vram_mgr_func;
- DRM_INFO("Setup dummy vram mgr\n");
- }
+ man->func = &amdgpu_vram_mgr_func;
+ err = drm_buddy_init(&mgr->mm, man->size, PAGE_SIZE);
+ if (err)
+ return err;
ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, &mgr->manager);
ttm_resource_manager_set_used(man, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h
index 2c88d5fd87da..5f5fd9a911c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h
@@ -35,12 +35,26 @@ struct amdgpu_vram_mgr {
struct list_head reserved_pages;
atomic64_t vis_usage;
u64 default_page_size;
+ struct list_head allocated_vres_list;
+};
+
+struct amdgpu_vres_task {
+ pid_t pid;
+ char comm[TASK_COMM_LEN];
+};
+
+struct amdgpu_vram_block_info {
+ u64 start;
+ u64 size;
+ struct amdgpu_vres_task task;
};
struct amdgpu_vram_mgr_resource {
struct ttm_resource base;
struct list_head blocks;
unsigned long flags;
+ struct list_head vres_node;
+ struct amdgpu_vres_task task;
};
static inline u64 amdgpu_vram_mgr_block_start(struct drm_buddy_block *block)
@@ -72,4 +86,7 @@ static inline void amdgpu_vram_mgr_set_cleared(struct ttm_resource *res)
ares->flags |= DRM_BUDDY_CLEARED;
}
+int amdgpu_vram_mgr_query_address_block_info(struct amdgpu_vram_mgr *mgr,
+ uint64_t address, struct amdgpu_vram_block_info *info);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
index c417f8689220..1083db8cea2e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
@@ -120,6 +120,25 @@ static void __amdgpu_xcp_add_block(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
xcp->valid = true;
}
+static void __amdgpu_xcp_set_unique_id(struct amdgpu_xcp_mgr *xcp_mgr,
+ int xcp_id)
+{
+ struct amdgpu_xcp *xcp = &xcp_mgr->xcp[xcp_id];
+ struct amdgpu_device *adev = xcp_mgr->adev;
+ uint32_t inst_mask;
+ uint64_t uid;
+ int i;
+
+ if (!amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &inst_mask) &&
+ inst_mask) {
+ i = GET_INST(GC, (ffs(inst_mask) - 1));
+ uid = amdgpu_device_get_uid(xcp_mgr->adev->uid_info,
+ AMDGPU_UID_TYPE_XCD, i);
+ if (uid)
+ xcp->unique_id = uid;
+ }
+}
+
int amdgpu_xcp_init(struct amdgpu_xcp_mgr *xcp_mgr, int num_xcps, int mode)
{
struct amdgpu_device *adev = xcp_mgr->adev;
@@ -158,6 +177,7 @@ int amdgpu_xcp_init(struct amdgpu_xcp_mgr *xcp_mgr, int num_xcps, int mode)
else
xcp_mgr->xcp[i].mem_id = mem_id;
}
+ __amdgpu_xcp_set_unique_id(xcp_mgr, i);
}
xcp_mgr->num_xcps = num_xcps;
@@ -406,6 +426,7 @@ void amdgpu_xcp_dev_unplug(struct amdgpu_device *adev)
p_ddev->primary->dev = adev->xcp_mgr->xcp[i].pdev;
p_ddev->driver = adev->xcp_mgr->xcp[i].driver;
p_ddev->vma_offset_manager = adev->xcp_mgr->xcp[i].vma_offset_manager;
+ amdgpu_xcp_drm_dev_free(p_ddev);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h
index 70a0f8400b57..1928d9e224fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h
@@ -112,6 +112,7 @@ struct amdgpu_xcp {
struct amdgpu_sched gpu_sched[AMDGPU_HW_IP_NUM][AMDGPU_RING_PRIO_MAX];
struct amdgpu_xcp_mgr *xcp_mgr;
struct kobject kobj;
+ uint64_t unique_id;
};
struct amdgpu_xcp_mgr {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
index bba0b26fee8f..5f36aff17e79 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
@@ -126,4 +126,8 @@ uint32_t amdgpu_xgmi_get_max_bandwidth(struct amdgpu_device *adev);
void amgpu_xgmi_set_max_speed_width(struct amdgpu_device *adev,
uint16_t max_speed, uint8_t max_width);
+
+/* Cleanup macro for use with __free(xgmi_put_hive) */
+DEFINE_FREE(xgmi_put_hive, struct amdgpu_hive_info *, if (_T) amdgpu_put_xgmi_hive(_T))
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
index 33edad1f9dcd..3a79ed7d8031 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
@@ -405,12 +405,17 @@ struct amd_sriov_ras_cper_dump {
uint32_t buf[];
};
+struct amd_sriov_ras_chk_criti {
+ uint32_t hit;
+};
+
struct amdsriov_ras_telemetry {
struct amd_sriov_ras_telemetry_header header;
union {
struct amd_sriov_ras_telemetry_error_count error_count;
struct amd_sriov_ras_cper_dump cper_dump;
+ struct amd_sriov_ras_chk_criti chk_criti;
} body;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
index 914cf4bfb033..811124ff88a8 100644
--- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
+++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
@@ -227,6 +227,7 @@ static int __aqua_vanjaram_get_px_mode_info(struct amdgpu_xcp_mgr *xcp_mgr,
uint16_t *nps_modes)
{
struct amdgpu_device *adev = xcp_mgr->adev;
+ uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
if (!num_xcp || !nps_modes || !(xcp_mgr->supp_xcp_modes & BIT(px_mode)))
return -EINVAL;
@@ -250,12 +251,14 @@ static int __aqua_vanjaram_get_px_mode_info(struct amdgpu_xcp_mgr *xcp_mgr,
*num_xcp = 4;
*nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
BIT(AMDGPU_NPS4_PARTITION_MODE);
+ if (gc_ver == IP_VERSION(9, 5, 0))
+ *nps_modes |= BIT(AMDGPU_NPS2_PARTITION_MODE);
break;
case AMDGPU_CPX_PARTITION_MODE:
*num_xcp = NUM_XCC(adev->gfx.xcc_mask);
*nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
BIT(AMDGPU_NPS4_PARTITION_MODE);
- if (amdgpu_sriov_vf(adev))
+ if (gc_ver == IP_VERSION(9, 5, 0))
*nps_modes |= BIT(AMDGPU_NPS2_PARTITION_MODE);
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c
index 427b073de2fc..7a063e44d429 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom.c
+++ b/drivers/gpu/drm/amd/amdgpu/atom.c
@@ -1246,6 +1246,10 @@ static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index,
ectx.last_jump_jiffies = 0;
if (ws) {
ectx.ws = kcalloc(4, ws, GFP_KERNEL);
+ if (!ectx.ws) {
+ ret = -ENOMEM;
+ goto free;
+ }
ectx.ws_size = ws;
} else {
ectx.ws = NULL;
@@ -1494,6 +1498,28 @@ static void atom_get_vbios_version(struct atom_context *ctx)
}
}
+static void atom_get_vbios_build(struct atom_context *ctx)
+{
+ unsigned char *atom_rom_hdr;
+ unsigned char *str;
+ uint16_t base, len;
+
+ base = CU16(ATOM_ROM_TABLE_PTR);
+ atom_rom_hdr = CSTR(base);
+
+ str = CSTR(CU16(base + ATOM_ROM_CFG_PTR));
+ /* Skip config string */
+ while (str < atom_rom_hdr && *str++)
+ ;
+ /* Skip change list string */
+ while (str < atom_rom_hdr && *str++)
+ ;
+
+ len = min(atom_rom_hdr - str, STRLEN_NORMAL);
+ if (len)
+ strscpy(ctx->build_num, str, len);
+}
+
struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios)
{
int base;
@@ -1554,6 +1580,7 @@ struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios)
atom_get_vbios_pn(ctx);
atom_get_vbios_date(ctx);
atom_get_vbios_version(ctx);
+ atom_get_vbios_build(ctx);
return ctx;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.h b/drivers/gpu/drm/amd/amdgpu/atom.h
index b807f6639a4c..825ff28731f5 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom.h
+++ b/drivers/gpu/drm/amd/amdgpu/atom.h
@@ -37,6 +37,7 @@ struct drm_device;
#define ATOM_ROM_MAGIC "ATOM"
#define ATOM_ROM_MAGIC_PTR 4
+#define ATOM_ROM_CFG_PTR 0xC
#define ATOM_ROM_MSG_PTR 0x10
#define ATOM_ROM_CMD_PTR 0x1E
#define ATOM_ROM_DATA_PTR 0x20
@@ -151,6 +152,7 @@ struct atom_context {
uint32_t version;
uint8_t vbios_ver_str[STRLEN_NORMAL];
uint8_t date[STRLEN_NORMAL];
+ uint8_t build_num[STRLEN_NORMAL];
};
extern int amdgpu_atom_debug;
diff --git a/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c b/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c
new file mode 100644
index 000000000000..96616a865aac
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "nv.h"
+
+#include "soc15_common.h"
+#include "soc15_hw_ip.h"
+#include "cyan_skillfish_ip_offset.h"
+
+int cyan_skillfish_reg_base_init(struct amdgpu_device *adev)
+{
+ /* HW has more IP blocks, only initialized the blocke needed by driver */
+ uint32_t i;
+
+ adev->gfx.xcc_mask = 1;
+ for (i = 0 ; i < MAX_INSTANCE ; ++i) {
+ adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
+ adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i]));
+ adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i]));
+ adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i]));
+ adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i]));
+ adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i]));
+ adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i]));
+ adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(UVD0_BASE.instance[i]));
+ adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i]));
+ adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DMU_BASE.instance[i]));
+ adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i]));
+ adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
+ adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
+ adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
+ adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i]));
+ adev->reg_offset[CLK_HWIP][i] = (uint32_t *)(&(CLK_BASE.instance[i]));
+ }
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index bf7c22f81cda..72ca6538b2e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -1141,8 +1141,7 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
/* save values for DPM */
amdgpu_crtc->line_time = line_time;
- amdgpu_crtc->wm_high = latency_watermark_a;
- amdgpu_crtc->wm_low = latency_watermark_b;
+
/* Save number of lines the linebuffer leads before the scanout */
amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
}
@@ -1462,17 +1461,12 @@ static int dce_v10_0_audio_init(struct amdgpu_device *adev)
static void dce_v10_0_audio_fini(struct amdgpu_device *adev)
{
- int i;
-
if (!amdgpu_audio)
return;
if (!adev->mode_info.audio.enabled)
return;
- for (i = 0; i < adev->mode_info.audio.num_pins; i++)
- dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
-
adev->mode_info.audio.enabled = false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
deleted file mode 100644
index 47e05783c4a0..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ /dev/null
@@ -1,3823 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <drm/drm_edid.h>
-#include <drm/drm_fourcc.h>
-#include <drm/drm_modeset_helper.h>
-#include <drm/drm_modeset_helper_vtables.h>
-#include <drm/drm_vblank.h>
-
-#include "amdgpu.h"
-#include "amdgpu_pm.h"
-#include "amdgpu_i2c.h"
-#include "vid.h"
-#include "atom.h"
-#include "amdgpu_atombios.h"
-#include "atombios_crtc.h"
-#include "atombios_encoders.h"
-#include "amdgpu_pll.h"
-#include "amdgpu_connectors.h"
-#include "amdgpu_display.h"
-#include "dce_v11_0.h"
-
-#include "dce/dce_11_0_d.h"
-#include "dce/dce_11_0_sh_mask.h"
-#include "dce/dce_11_0_enum.h"
-#include "oss/oss_3_0_d.h"
-#include "oss/oss_3_0_sh_mask.h"
-#include "gmc/gmc_8_1_d.h"
-#include "gmc/gmc_8_1_sh_mask.h"
-
-#include "ivsrcid/ivsrcid_vislands30.h"
-
-static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev);
-static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev);
-static void dce_v11_0_hpd_int_ack(struct amdgpu_device *adev, int hpd);
-
-static const u32 crtc_offsets[] =
-{
- CRTC0_REGISTER_OFFSET,
- CRTC1_REGISTER_OFFSET,
- CRTC2_REGISTER_OFFSET,
- CRTC3_REGISTER_OFFSET,
- CRTC4_REGISTER_OFFSET,
- CRTC5_REGISTER_OFFSET,
- CRTC6_REGISTER_OFFSET
-};
-
-static const u32 hpd_offsets[] =
-{
- HPD0_REGISTER_OFFSET,
- HPD1_REGISTER_OFFSET,
- HPD2_REGISTER_OFFSET,
- HPD3_REGISTER_OFFSET,
- HPD4_REGISTER_OFFSET,
- HPD5_REGISTER_OFFSET
-};
-
-static const uint32_t dig_offsets[] = {
- DIG0_REGISTER_OFFSET,
- DIG1_REGISTER_OFFSET,
- DIG2_REGISTER_OFFSET,
- DIG3_REGISTER_OFFSET,
- DIG4_REGISTER_OFFSET,
- DIG5_REGISTER_OFFSET,
- DIG6_REGISTER_OFFSET,
- DIG7_REGISTER_OFFSET,
- DIG8_REGISTER_OFFSET
-};
-
-static const struct {
- uint32_t reg;
- uint32_t vblank;
- uint32_t vline;
- uint32_t hpd;
-
-} interrupt_status_offsets[] = { {
- .reg = mmDISP_INTERRUPT_STATUS,
- .vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
- .vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
- .hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
-}, {
- .reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
- .vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
- .vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
- .hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
-}, {
- .reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
- .vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
- .vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
- .hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
-}, {
- .reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
- .vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
- .vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
- .hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
-}, {
- .reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
- .vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
- .vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
- .hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
-}, {
- .reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
- .vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
- .vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
- .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
-} };
-
-static const u32 cz_golden_settings_a11[] =
-{
- mmCRTC_DOUBLE_BUFFER_CONTROL, 0x00010101, 0x00010000,
- mmFBC_MISC, 0x1f311fff, 0x14300000,
-};
-
-static const u32 cz_mgcg_cgcg_init[] =
-{
- mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
- mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
-};
-
-static const u32 stoney_golden_settings_a11[] =
-{
- mmCRTC_DOUBLE_BUFFER_CONTROL, 0x00010101, 0x00010000,
- mmFBC_MISC, 0x1f311fff, 0x14302000,
-};
-
-static const u32 polaris11_golden_settings_a11[] =
-{
- mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
- mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
- mmFBC_DEBUG1, 0xffffffff, 0x00000008,
- mmFBC_MISC, 0x9f313fff, 0x14302008,
- mmHDMI_CONTROL, 0x313f031f, 0x00000011,
-};
-
-static const u32 polaris10_golden_settings_a11[] =
-{
- mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
- mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
- mmFBC_MISC, 0x9f313fff, 0x14302008,
- mmHDMI_CONTROL, 0x313f031f, 0x00000011,
-};
-
-static void dce_v11_0_init_golden_registers(struct amdgpu_device *adev)
-{
- switch (adev->asic_type) {
- case CHIP_CARRIZO:
- amdgpu_device_program_register_sequence(adev,
- cz_mgcg_cgcg_init,
- ARRAY_SIZE(cz_mgcg_cgcg_init));
- amdgpu_device_program_register_sequence(adev,
- cz_golden_settings_a11,
- ARRAY_SIZE(cz_golden_settings_a11));
- break;
- case CHIP_STONEY:
- amdgpu_device_program_register_sequence(adev,
- stoney_golden_settings_a11,
- ARRAY_SIZE(stoney_golden_settings_a11));
- break;
- case CHIP_POLARIS11:
- case CHIP_POLARIS12:
- amdgpu_device_program_register_sequence(adev,
- polaris11_golden_settings_a11,
- ARRAY_SIZE(polaris11_golden_settings_a11));
- break;
- case CHIP_POLARIS10:
- case CHIP_VEGAM:
- amdgpu_device_program_register_sequence(adev,
- polaris10_golden_settings_a11,
- ARRAY_SIZE(polaris10_golden_settings_a11));
- break;
- default:
- break;
- }
-}
-
-static u32 dce_v11_0_audio_endpt_rreg(struct amdgpu_device *adev,
- u32 block_offset, u32 reg)
-{
- unsigned long flags;
- u32 r;
-
- spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
- WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
- r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset);
- spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
-
- return r;
-}
-
-static void dce_v11_0_audio_endpt_wreg(struct amdgpu_device *adev,
- u32 block_offset, u32 reg, u32 v)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
- WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
- WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v);
- spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
-}
-
-static u32 dce_v11_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
-{
- if (crtc < 0 || crtc >= adev->mode_info.num_crtc)
- return 0;
- else
- return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
-}
-
-static void dce_v11_0_pageflip_interrupt_init(struct amdgpu_device *adev)
-{
- unsigned i;
-
- /* Enable pflip interrupts */
- for (i = 0; i < adev->mode_info.num_crtc; i++)
- amdgpu_irq_get(adev, &adev->pageflip_irq, i);
-}
-
-static void dce_v11_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
-{
- unsigned i;
-
- /* Disable pflip interrupts */
- for (i = 0; i < adev->mode_info.num_crtc; i++)
- amdgpu_irq_put(adev, &adev->pageflip_irq, i);
-}
-
-/**
- * dce_v11_0_page_flip - pageflip callback.
- *
- * @adev: amdgpu_device pointer
- * @crtc_id: crtc to cleanup pageflip on
- * @crtc_base: new address of the crtc (GPU MC address)
- * @async: asynchronous flip
- *
- * Triggers the actual pageflip by updating the primary
- * surface base address.
- */
-static void dce_v11_0_page_flip(struct amdgpu_device *adev,
- int crtc_id, u64 crtc_base, bool async)
-{
- struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
- struct drm_framebuffer *fb = amdgpu_crtc->base.primary->fb;
- u32 tmp;
-
- /* flip immediate for async, default is vsync */
- tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
- GRPH_SURFACE_UPDATE_IMMEDIATE_EN, async ? 1 : 0);
- WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
- /* update pitch */
- WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset,
- fb->pitches[0] / fb->format->cpp[0]);
- /* update the scanout addresses */
- WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
- upper_32_bits(crtc_base));
- /* writing to the low address triggers the update */
- WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
- lower_32_bits(crtc_base));
- /* post the write */
- RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
-}
-
-static int dce_v11_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
- u32 *vbl, u32 *position)
-{
- if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
- return -EINVAL;
-
- *vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
- *position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
-
- return 0;
-}
-
-/**
- * dce_v11_0_hpd_sense - hpd sense callback.
- *
- * @adev: amdgpu_device pointer
- * @hpd: hpd (hotplug detect) pin
- *
- * Checks if a digital monitor is connected (evergreen+).
- * Returns true if connected, false if not connected.
- */
-static bool dce_v11_0_hpd_sense(struct amdgpu_device *adev,
- enum amdgpu_hpd_id hpd)
-{
- bool connected = false;
-
- if (hpd >= adev->mode_info.num_hpd)
- return connected;
-
- if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[hpd]) &
- DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK)
- connected = true;
-
- return connected;
-}
-
-/**
- * dce_v11_0_hpd_set_polarity - hpd set polarity callback.
- *
- * @adev: amdgpu_device pointer
- * @hpd: hpd (hotplug detect) pin
- *
- * Set the polarity of the hpd pin (evergreen+).
- */
-static void dce_v11_0_hpd_set_polarity(struct amdgpu_device *adev,
- enum amdgpu_hpd_id hpd)
-{
- u32 tmp;
- bool connected = dce_v11_0_hpd_sense(adev, hpd);
-
- if (hpd >= adev->mode_info.num_hpd)
- return;
-
- tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
- if (connected)
- tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 0);
- else
- tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 1);
- WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
-}
-
-/**
- * dce_v11_0_hpd_init - hpd setup callback.
- *
- * @adev: amdgpu_device pointer
- *
- * Setup the hpd pins used by the card (evergreen+).
- * Enable the pin, set the polarity, and enable the hpd interrupts.
- */
-static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
-{
- struct drm_device *dev = adev_to_drm(adev);
- struct drm_connector *connector;
- struct drm_connector_list_iter iter;
- u32 tmp;
-
- drm_connector_list_iter_begin(dev, &iter);
- drm_for_each_connector_iter(connector, &iter) {
- struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
-
- if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
- continue;
-
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
- connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
- /* don't try to enable hpd on eDP or LVDS avoid breaking the
- * aux dp channel on imac and help (but not completely fix)
- * https://bugzilla.redhat.com/show_bug.cgi?id=726143
- * also avoid interrupt storms during dpms.
- */
- tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
- tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
- WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
- continue;
- }
-
- tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
- tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1);
- WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
-
- tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd]);
- tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
- DC_HPD_CONNECT_INT_DELAY,
- AMDGPU_HPD_CONNECT_INT_DELAY_IN_MS);
- tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
- DC_HPD_DISCONNECT_INT_DELAY,
- AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS);
- WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
-
- dce_v11_0_hpd_int_ack(adev, amdgpu_connector->hpd.hpd);
- dce_v11_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
- amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
- }
- drm_connector_list_iter_end(&iter);
-}
-
-/**
- * dce_v11_0_hpd_fini - hpd tear down callback.
- *
- * @adev: amdgpu_device pointer
- *
- * Tear down the hpd pins used by the card (evergreen+).
- * Disable the hpd interrupts.
- */
-static void dce_v11_0_hpd_fini(struct amdgpu_device *adev)
-{
- struct drm_device *dev = adev_to_drm(adev);
- struct drm_connector *connector;
- struct drm_connector_list_iter iter;
- u32 tmp;
-
- drm_connector_list_iter_begin(dev, &iter);
- drm_for_each_connector_iter(connector, &iter) {
- struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
-
- if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
- continue;
-
- tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
- tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 0);
- WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
-
- amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
- }
- drm_connector_list_iter_end(&iter);
-}
-
-static u32 dce_v11_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
-{
- return mmDC_GPIO_HPD_A;
-}
-
-static bool dce_v11_0_is_display_hung(struct amdgpu_device *adev)
-{
- u32 crtc_hung = 0;
- u32 crtc_status[6];
- u32 i, j, tmp;
-
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN)) {
- crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
- crtc_hung |= (1 << i);
- }
- }
-
- for (j = 0; j < 10; j++) {
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- if (crtc_hung & (1 << i)) {
- tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
- if (tmp != crtc_status[i])
- crtc_hung &= ~(1 << i);
- }
- }
- if (crtc_hung == 0)
- return false;
- udelay(100);
- }
-
- return true;
-}
-
-static void dce_v11_0_set_vga_render_state(struct amdgpu_device *adev,
- bool render)
-{
- u32 tmp;
-
- /* Lockout access through VGA aperture*/
- tmp = RREG32(mmVGA_HDP_CONTROL);
- if (render)
- tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 0);
- else
- tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
- WREG32(mmVGA_HDP_CONTROL, tmp);
-
- /* disable VGA render */
- tmp = RREG32(mmVGA_RENDER_CONTROL);
- if (render)
- tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 1);
- else
- tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
- WREG32(mmVGA_RENDER_CONTROL, tmp);
-}
-
-static int dce_v11_0_get_num_crtc (struct amdgpu_device *adev)
-{
- int num_crtc = 0;
-
- switch (adev->asic_type) {
- case CHIP_CARRIZO:
- num_crtc = 3;
- break;
- case CHIP_STONEY:
- num_crtc = 2;
- break;
- case CHIP_POLARIS10:
- case CHIP_VEGAM:
- num_crtc = 6;
- break;
- case CHIP_POLARIS11:
- case CHIP_POLARIS12:
- num_crtc = 5;
- break;
- default:
- num_crtc = 0;
- }
- return num_crtc;
-}
-
-void dce_v11_0_disable_dce(struct amdgpu_device *adev)
-{
- /*Disable VGA render and enabled crtc, if has DCE engine*/
- if (amdgpu_atombios_has_dce_engine_info(adev)) {
- u32 tmp;
- int crtc_enabled, i;
-
- dce_v11_0_set_vga_render_state(adev, false);
-
- /*Disable crtc*/
- for (i = 0; i < dce_v11_0_get_num_crtc(adev); i++) {
- crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
- CRTC_CONTROL, CRTC_MASTER_EN);
- if (crtc_enabled) {
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
- tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
- tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
- WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
- }
- }
- }
-}
-
-static void dce_v11_0_program_fmt(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
- struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
- int bpc = 0;
- u32 tmp = 0;
- enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
-
- if (connector) {
- struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- bpc = amdgpu_connector_get_monitor_bpc(connector);
- dither = amdgpu_connector->dither;
- }
-
- /* LVDS/eDP FMT is set up by atom */
- if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
- return;
-
- /* not needed for analog */
- if ((amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
- (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
- return;
-
- if (bpc == 0)
- return;
-
- switch (bpc) {
- case 6:
- if (dither == AMDGPU_FMT_DITHER_ENABLE) {
- /* XXX sort out optimal dither settings */
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 0);
- } else {
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 0);
- }
- break;
- case 8:
- if (dither == AMDGPU_FMT_DITHER_ENABLE) {
- /* XXX sort out optimal dither settings */
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 1);
- } else {
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 1);
- }
- break;
- case 10:
- if (dither == AMDGPU_FMT_DITHER_ENABLE) {
- /* XXX sort out optimal dither settings */
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 2);
- } else {
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 2);
- }
- break;
- default:
- /* not needed */
- break;
- }
-
- WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-}
-
-
-/* display watermark setup */
-/**
- * dce_v11_0_line_buffer_adjust - Set up the line buffer
- *
- * @adev: amdgpu_device pointer
- * @amdgpu_crtc: the selected display controller
- * @mode: the current display mode on the selected display
- * controller
- *
- * Setup up the line buffer allocation for
- * the selected display controller (CIK).
- * Returns the line buffer size in pixels.
- */
-static u32 dce_v11_0_line_buffer_adjust(struct amdgpu_device *adev,
- struct amdgpu_crtc *amdgpu_crtc,
- struct drm_display_mode *mode)
-{
- u32 tmp, buffer_alloc, i, mem_cfg;
- u32 pipe_offset = amdgpu_crtc->crtc_id;
- /*
- * Line Buffer Setup
- * There are 6 line buffers, one for each display controllers.
- * There are 3 partitions per LB. Select the number of partitions
- * to enable based on the display width. For display widths larger
- * than 4096, you need use to use 2 display controllers and combine
- * them using the stereo blender.
- */
- if (amdgpu_crtc->base.enabled && mode) {
- if (mode->crtc_hdisplay < 1920) {
- mem_cfg = 1;
- buffer_alloc = 2;
- } else if (mode->crtc_hdisplay < 2560) {
- mem_cfg = 2;
- buffer_alloc = 2;
- } else if (mode->crtc_hdisplay < 4096) {
- mem_cfg = 0;
- buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
- } else {
- DRM_DEBUG_KMS("Mode too big for LB!\n");
- mem_cfg = 0;
- buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
- }
- } else {
- mem_cfg = 1;
- buffer_alloc = 0;
- }
-
- tmp = RREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, LB_MEMORY_CTRL, LB_MEMORY_CONFIG, mem_cfg);
- WREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset, tmp);
-
- tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset);
- tmp = REG_SET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATED, buffer_alloc);
- WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset, tmp);
-
- for (i = 0; i < adev->usec_timeout; i++) {
- tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset);
- if (REG_GET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATION_COMPLETED))
- break;
- udelay(1);
- }
-
- if (amdgpu_crtc->base.enabled && mode) {
- switch (mem_cfg) {
- case 0:
- default:
- return 4096 * 2;
- case 1:
- return 1920 * 2;
- case 2:
- return 2560 * 2;
- }
- }
-
- /* controller not enabled, so no lb used */
- return 0;
-}
-
-/**
- * cik_get_number_of_dram_channels - get the number of dram channels
- *
- * @adev: amdgpu_device pointer
- *
- * Look up the number of video ram channels (CIK).
- * Used for display watermark bandwidth calculations
- * Returns the number of dram channels
- */
-static u32 cik_get_number_of_dram_channels(struct amdgpu_device *adev)
-{
- u32 tmp = RREG32(mmMC_SHARED_CHMAP);
-
- switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
- case 0:
- default:
- return 1;
- case 1:
- return 2;
- case 2:
- return 4;
- case 3:
- return 8;
- case 4:
- return 3;
- case 5:
- return 6;
- case 6:
- return 10;
- case 7:
- return 12;
- case 8:
- return 16;
- }
-}
-
-struct dce10_wm_params {
- u32 dram_channels; /* number of dram channels */
- u32 yclk; /* bandwidth per dram data pin in kHz */
- u32 sclk; /* engine clock in kHz */
- u32 disp_clk; /* display clock in kHz */
- u32 src_width; /* viewport width */
- u32 active_time; /* active display time in ns */
- u32 blank_time; /* blank time in ns */
- bool interlaced; /* mode is interlaced */
- fixed20_12 vsc; /* vertical scale ratio */
- u32 num_heads; /* number of active crtcs */
- u32 bytes_per_pixel; /* bytes per pixel display + overlay */
- u32 lb_size; /* line buffer allocated to pipe */
- u32 vtaps; /* vertical scaler taps */
-};
-
-/**
- * dce_v11_0_dram_bandwidth - get the dram bandwidth
- *
- * @wm: watermark calculation data
- *
- * Calculate the raw dram bandwidth (CIK).
- * Used for display watermark bandwidth calculations
- * Returns the dram bandwidth in MBytes/s
- */
-static u32 dce_v11_0_dram_bandwidth(struct dce10_wm_params *wm)
-{
- /* Calculate raw DRAM Bandwidth */
- fixed20_12 dram_efficiency; /* 0.7 */
- fixed20_12 yclk, dram_channels, bandwidth;
- fixed20_12 a;
-
- a.full = dfixed_const(1000);
- yclk.full = dfixed_const(wm->yclk);
- yclk.full = dfixed_div(yclk, a);
- dram_channels.full = dfixed_const(wm->dram_channels * 4);
- a.full = dfixed_const(10);
- dram_efficiency.full = dfixed_const(7);
- dram_efficiency.full = dfixed_div(dram_efficiency, a);
- bandwidth.full = dfixed_mul(dram_channels, yclk);
- bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
-
- return dfixed_trunc(bandwidth);
-}
-
-/**
- * dce_v11_0_dram_bandwidth_for_display - get the dram bandwidth for display
- *
- * @wm: watermark calculation data
- *
- * Calculate the dram bandwidth used for display (CIK).
- * Used for display watermark bandwidth calculations
- * Returns the dram bandwidth for display in MBytes/s
- */
-static u32 dce_v11_0_dram_bandwidth_for_display(struct dce10_wm_params *wm)
-{
- /* Calculate DRAM Bandwidth and the part allocated to display. */
- fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
- fixed20_12 yclk, dram_channels, bandwidth;
- fixed20_12 a;
-
- a.full = dfixed_const(1000);
- yclk.full = dfixed_const(wm->yclk);
- yclk.full = dfixed_div(yclk, a);
- dram_channels.full = dfixed_const(wm->dram_channels * 4);
- a.full = dfixed_const(10);
- disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
- disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
- bandwidth.full = dfixed_mul(dram_channels, yclk);
- bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
-
- return dfixed_trunc(bandwidth);
-}
-
-/**
- * dce_v11_0_data_return_bandwidth - get the data return bandwidth
- *
- * @wm: watermark calculation data
- *
- * Calculate the data return bandwidth used for display (CIK).
- * Used for display watermark bandwidth calculations
- * Returns the data return bandwidth in MBytes/s
- */
-static u32 dce_v11_0_data_return_bandwidth(struct dce10_wm_params *wm)
-{
- /* Calculate the display Data return Bandwidth */
- fixed20_12 return_efficiency; /* 0.8 */
- fixed20_12 sclk, bandwidth;
- fixed20_12 a;
-
- a.full = dfixed_const(1000);
- sclk.full = dfixed_const(wm->sclk);
- sclk.full = dfixed_div(sclk, a);
- a.full = dfixed_const(10);
- return_efficiency.full = dfixed_const(8);
- return_efficiency.full = dfixed_div(return_efficiency, a);
- a.full = dfixed_const(32);
- bandwidth.full = dfixed_mul(a, sclk);
- bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
-
- return dfixed_trunc(bandwidth);
-}
-
-/**
- * dce_v11_0_dmif_request_bandwidth - get the dmif bandwidth
- *
- * @wm: watermark calculation data
- *
- * Calculate the dmif bandwidth used for display (CIK).
- * Used for display watermark bandwidth calculations
- * Returns the dmif bandwidth in MBytes/s
- */
-static u32 dce_v11_0_dmif_request_bandwidth(struct dce10_wm_params *wm)
-{
- /* Calculate the DMIF Request Bandwidth */
- fixed20_12 disp_clk_request_efficiency; /* 0.8 */
- fixed20_12 disp_clk, bandwidth;
- fixed20_12 a, b;
-
- a.full = dfixed_const(1000);
- disp_clk.full = dfixed_const(wm->disp_clk);
- disp_clk.full = dfixed_div(disp_clk, a);
- a.full = dfixed_const(32);
- b.full = dfixed_mul(a, disp_clk);
-
- a.full = dfixed_const(10);
- disp_clk_request_efficiency.full = dfixed_const(8);
- disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
-
- bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
-
- return dfixed_trunc(bandwidth);
-}
-
-/**
- * dce_v11_0_available_bandwidth - get the min available bandwidth
- *
- * @wm: watermark calculation data
- *
- * Calculate the min available bandwidth used for display (CIK).
- * Used for display watermark bandwidth calculations
- * Returns the min available bandwidth in MBytes/s
- */
-static u32 dce_v11_0_available_bandwidth(struct dce10_wm_params *wm)
-{
- /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
- u32 dram_bandwidth = dce_v11_0_dram_bandwidth(wm);
- u32 data_return_bandwidth = dce_v11_0_data_return_bandwidth(wm);
- u32 dmif_req_bandwidth = dce_v11_0_dmif_request_bandwidth(wm);
-
- return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
-}
-
-/**
- * dce_v11_0_average_bandwidth - get the average available bandwidth
- *
- * @wm: watermark calculation data
- *
- * Calculate the average available bandwidth used for display (CIK).
- * Used for display watermark bandwidth calculations
- * Returns the average available bandwidth in MBytes/s
- */
-static u32 dce_v11_0_average_bandwidth(struct dce10_wm_params *wm)
-{
- /* Calculate the display mode Average Bandwidth
- * DisplayMode should contain the source and destination dimensions,
- * timing, etc.
- */
- fixed20_12 bpp;
- fixed20_12 line_time;
- fixed20_12 src_width;
- fixed20_12 bandwidth;
- fixed20_12 a;
-
- a.full = dfixed_const(1000);
- line_time.full = dfixed_const(wm->active_time + wm->blank_time);
- line_time.full = dfixed_div(line_time, a);
- bpp.full = dfixed_const(wm->bytes_per_pixel);
- src_width.full = dfixed_const(wm->src_width);
- bandwidth.full = dfixed_mul(src_width, bpp);
- bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
- bandwidth.full = dfixed_div(bandwidth, line_time);
-
- return dfixed_trunc(bandwidth);
-}
-
-/**
- * dce_v11_0_latency_watermark - get the latency watermark
- *
- * @wm: watermark calculation data
- *
- * Calculate the latency watermark (CIK).
- * Used for display watermark bandwidth calculations
- * Returns the latency watermark in ns
- */
-static u32 dce_v11_0_latency_watermark(struct dce10_wm_params *wm)
-{
- /* First calculate the latency in ns */
- u32 mc_latency = 2000; /* 2000 ns. */
- u32 available_bandwidth = dce_v11_0_available_bandwidth(wm);
- u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
- u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
- u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
- u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
- (wm->num_heads * cursor_line_pair_return_time);
- u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
- u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
- u32 tmp, dmif_size = 12288;
- fixed20_12 a, b, c;
-
- if (wm->num_heads == 0)
- return 0;
-
- a.full = dfixed_const(2);
- b.full = dfixed_const(1);
- if ((wm->vsc.full > a.full) ||
- ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
- (wm->vtaps >= 5) ||
- ((wm->vsc.full >= a.full) && wm->interlaced))
- max_src_lines_per_dst_line = 4;
- else
- max_src_lines_per_dst_line = 2;
-
- a.full = dfixed_const(available_bandwidth);
- b.full = dfixed_const(wm->num_heads);
- a.full = dfixed_div(a, b);
- tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
- tmp = min(dfixed_trunc(a), tmp);
-
- lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
-
- a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
- b.full = dfixed_const(1000);
- c.full = dfixed_const(lb_fill_bw);
- b.full = dfixed_div(c, b);
- a.full = dfixed_div(a, b);
- line_fill_time = dfixed_trunc(a);
-
- if (line_fill_time < wm->active_time)
- return latency;
- else
- return latency + (line_fill_time - wm->active_time);
-
-}
-
-/**
- * dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display - check
- * average and available dram bandwidth
- *
- * @wm: watermark calculation data
- *
- * Check if the display average bandwidth fits in the display
- * dram bandwidth (CIK).
- * Used for display watermark bandwidth calculations
- * Returns true if the display fits, false if not.
- */
-static bool dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce10_wm_params *wm)
-{
- if (dce_v11_0_average_bandwidth(wm) <=
- (dce_v11_0_dram_bandwidth_for_display(wm) / wm->num_heads))
- return true;
- else
- return false;
-}
-
-/**
- * dce_v11_0_average_bandwidth_vs_available_bandwidth - check
- * average and available bandwidth
- *
- * @wm: watermark calculation data
- *
- * Check if the display average bandwidth fits in the display
- * available bandwidth (CIK).
- * Used for display watermark bandwidth calculations
- * Returns true if the display fits, false if not.
- */
-static bool dce_v11_0_average_bandwidth_vs_available_bandwidth(struct dce10_wm_params *wm)
-{
- if (dce_v11_0_average_bandwidth(wm) <=
- (dce_v11_0_available_bandwidth(wm) / wm->num_heads))
- return true;
- else
- return false;
-}
-
-/**
- * dce_v11_0_check_latency_hiding - check latency hiding
- *
- * @wm: watermark calculation data
- *
- * Check latency hiding (CIK).
- * Used for display watermark bandwidth calculations
- * Returns true if the display fits, false if not.
- */
-static bool dce_v11_0_check_latency_hiding(struct dce10_wm_params *wm)
-{
- u32 lb_partitions = wm->lb_size / wm->src_width;
- u32 line_time = wm->active_time + wm->blank_time;
- u32 latency_tolerant_lines;
- u32 latency_hiding;
- fixed20_12 a;
-
- a.full = dfixed_const(1);
- if (wm->vsc.full > a.full)
- latency_tolerant_lines = 1;
- else {
- if (lb_partitions <= (wm->vtaps + 1))
- latency_tolerant_lines = 1;
- else
- latency_tolerant_lines = 2;
- }
-
- latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
-
- if (dce_v11_0_latency_watermark(wm) <= latency_hiding)
- return true;
- else
- return false;
-}
-
-/**
- * dce_v11_0_program_watermarks - program display watermarks
- *
- * @adev: amdgpu_device pointer
- * @amdgpu_crtc: the selected display controller
- * @lb_size: line buffer size
- * @num_heads: number of display controllers in use
- *
- * Calculate and program the display watermarks for the
- * selected display controller (CIK).
- */
-static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
- struct amdgpu_crtc *amdgpu_crtc,
- u32 lb_size, u32 num_heads)
-{
- struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
- struct dce10_wm_params wm_low, wm_high;
- u32 active_time;
- u32 line_time = 0;
- u32 latency_watermark_a = 0, latency_watermark_b = 0;
- u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
-
- if (amdgpu_crtc->base.enabled && num_heads && mode) {
- active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
- (u32)mode->clock);
- line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
- (u32)mode->clock);
- line_time = min_t(u32, line_time, 65535);
-
- /* watermark for high clocks */
- if (adev->pm.dpm_enabled) {
- wm_high.yclk =
- amdgpu_dpm_get_mclk(adev, false) * 10;
- wm_high.sclk =
- amdgpu_dpm_get_sclk(adev, false) * 10;
- } else {
- wm_high.yclk = adev->pm.current_mclk * 10;
- wm_high.sclk = adev->pm.current_sclk * 10;
- }
-
- wm_high.disp_clk = mode->clock;
- wm_high.src_width = mode->crtc_hdisplay;
- wm_high.active_time = active_time;
- wm_high.blank_time = line_time - wm_high.active_time;
- wm_high.interlaced = false;
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- wm_high.interlaced = true;
- wm_high.vsc = amdgpu_crtc->vsc;
- wm_high.vtaps = 1;
- if (amdgpu_crtc->rmx_type != RMX_OFF)
- wm_high.vtaps = 2;
- wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
- wm_high.lb_size = lb_size;
- wm_high.dram_channels = cik_get_number_of_dram_channels(adev);
- wm_high.num_heads = num_heads;
-
- /* set for high clocks */
- latency_watermark_a = min_t(u32, dce_v11_0_latency_watermark(&wm_high), 65535);
-
- /* possibly force display priority to high */
- /* should really do this at mode validation time... */
- if (!dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
- !dce_v11_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
- !dce_v11_0_check_latency_hiding(&wm_high) ||
- (adev->mode_info.disp_priority == 2)) {
- DRM_DEBUG_KMS("force priority to high\n");
- }
-
- /* watermark for low clocks */
- if (adev->pm.dpm_enabled) {
- wm_low.yclk =
- amdgpu_dpm_get_mclk(adev, true) * 10;
- wm_low.sclk =
- amdgpu_dpm_get_sclk(adev, true) * 10;
- } else {
- wm_low.yclk = adev->pm.current_mclk * 10;
- wm_low.sclk = adev->pm.current_sclk * 10;
- }
-
- wm_low.disp_clk = mode->clock;
- wm_low.src_width = mode->crtc_hdisplay;
- wm_low.active_time = active_time;
- wm_low.blank_time = line_time - wm_low.active_time;
- wm_low.interlaced = false;
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- wm_low.interlaced = true;
- wm_low.vsc = amdgpu_crtc->vsc;
- wm_low.vtaps = 1;
- if (amdgpu_crtc->rmx_type != RMX_OFF)
- wm_low.vtaps = 2;
- wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
- wm_low.lb_size = lb_size;
- wm_low.dram_channels = cik_get_number_of_dram_channels(adev);
- wm_low.num_heads = num_heads;
-
- /* set for low clocks */
- latency_watermark_b = min_t(u32, dce_v11_0_latency_watermark(&wm_low), 65535);
-
- /* possibly force display priority to high */
- /* should really do this at mode validation time... */
- if (!dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
- !dce_v11_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
- !dce_v11_0_check_latency_hiding(&wm_low) ||
- (adev->mode_info.disp_priority == 2)) {
- DRM_DEBUG_KMS("force priority to high\n");
- }
- lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
- }
-
- /* select wm A */
- wm_mask = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 1);
- WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
- tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_a);
- tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time);
- WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp);
- /* select wm B */
- tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 2);
- WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
- tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_b);
- tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time);
- WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp);
- /* restore original selection */
- WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, wm_mask);
-
- /* save values for DPM */
- amdgpu_crtc->line_time = line_time;
- amdgpu_crtc->wm_high = latency_watermark_a;
- amdgpu_crtc->wm_low = latency_watermark_b;
- /* Save number of lines the linebuffer leads before the scanout */
- amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
-}
-
-/**
- * dce_v11_0_bandwidth_update - program display watermarks
- *
- * @adev: amdgpu_device pointer
- *
- * Calculate and program the display watermarks and line
- * buffer allocation (CIK).
- */
-static void dce_v11_0_bandwidth_update(struct amdgpu_device *adev)
-{
- struct drm_display_mode *mode = NULL;
- u32 num_heads = 0, lb_size;
- int i;
-
- amdgpu_display_update_priority(adev);
-
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- if (adev->mode_info.crtcs[i]->base.enabled)
- num_heads++;
- }
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- mode = &adev->mode_info.crtcs[i]->base.mode;
- lb_size = dce_v11_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode);
- dce_v11_0_program_watermarks(adev, adev->mode_info.crtcs[i],
- lb_size, num_heads);
- }
-}
-
-static void dce_v11_0_audio_get_connected_pins(struct amdgpu_device *adev)
-{
- int i;
- u32 offset, tmp;
-
- for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
- offset = adev->mode_info.audio.pin[i].offset;
- tmp = RREG32_AUDIO_ENDPT(offset,
- ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
- if (((tmp &
- AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK) >>
- AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT) == 1)
- adev->mode_info.audio.pin[i].connected = false;
- else
- adev->mode_info.audio.pin[i].connected = true;
- }
-}
-
-static struct amdgpu_audio_pin *dce_v11_0_audio_get_pin(struct amdgpu_device *adev)
-{
- int i;
-
- dce_v11_0_audio_get_connected_pins(adev);
-
- for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
- if (adev->mode_info.audio.pin[i].connected)
- return &adev->mode_info.audio.pin[i];
- }
- DRM_ERROR("No connected audio pins found!\n");
- return NULL;
-}
-
-static void dce_v11_0_afmt_audio_select_pin(struct drm_encoder *encoder)
-{
- struct amdgpu_device *adev = drm_to_adev(encoder->dev);
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
- u32 tmp;
-
- if (!dig || !dig->afmt || !dig->afmt->pin)
- return;
-
- tmp = RREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT, dig->afmt->pin->id);
- WREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset, tmp);
-}
-
-static void dce_v11_0_audio_write_latency_fields(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
-{
- struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
- struct drm_connector *connector;
- struct drm_connector_list_iter iter;
- struct amdgpu_connector *amdgpu_connector = NULL;
- u32 tmp;
- int interlace = 0;
-
- if (!dig || !dig->afmt || !dig->afmt->pin)
- return;
-
- drm_connector_list_iter_begin(dev, &iter);
- drm_for_each_connector_iter(connector, &iter) {
- if (connector->encoder == encoder) {
- amdgpu_connector = to_amdgpu_connector(connector);
- break;
- }
- }
- drm_connector_list_iter_end(&iter);
-
- if (!amdgpu_connector) {
- DRM_ERROR("Couldn't find encoder's connector\n");
- return;
- }
-
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- interlace = 1;
- if (connector->latency_present[interlace]) {
- tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
- VIDEO_LIPSYNC, connector->video_latency[interlace]);
- tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
- AUDIO_LIPSYNC, connector->audio_latency[interlace]);
- } else {
- tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
- VIDEO_LIPSYNC, 0);
- tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
- AUDIO_LIPSYNC, 0);
- }
- WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
- ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
-}
-
-static void dce_v11_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
- struct drm_connector *connector;
- struct drm_connector_list_iter iter;
- struct amdgpu_connector *amdgpu_connector = NULL;
- u32 tmp;
- u8 *sadb = NULL;
- int sad_count;
-
- if (!dig || !dig->afmt || !dig->afmt->pin)
- return;
-
- drm_connector_list_iter_begin(dev, &iter);
- drm_for_each_connector_iter(connector, &iter) {
- if (connector->encoder == encoder) {
- amdgpu_connector = to_amdgpu_connector(connector);
- break;
- }
- }
- drm_connector_list_iter_end(&iter);
-
- if (!amdgpu_connector) {
- DRM_ERROR("Couldn't find encoder's connector\n");
- return;
- }
-
- sad_count = drm_edid_to_speaker_allocation(amdgpu_connector->edid, &sadb);
- if (sad_count < 0) {
- DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
- sad_count = 0;
- }
-
- /* program the speaker allocation */
- tmp = RREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
- ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
- DP_CONNECTION, 0);
- /* set HDMI mode */
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
- HDMI_CONNECTION, 1);
- if (sad_count)
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
- SPEAKER_ALLOCATION, sadb[0]);
- else
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
- SPEAKER_ALLOCATION, 5); /* stereo */
- WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
- ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
-
- kfree(sadb);
-}
-
-static void dce_v11_0_audio_write_sad_regs(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
- struct drm_connector *connector;
- struct drm_connector_list_iter iter;
- struct amdgpu_connector *amdgpu_connector = NULL;
- struct cea_sad *sads;
- int i, sad_count;
-
- static const u16 eld_reg_to_type[][2] = {
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
- };
-
- if (!dig || !dig->afmt || !dig->afmt->pin)
- return;
-
- drm_connector_list_iter_begin(dev, &iter);
- drm_for_each_connector_iter(connector, &iter) {
- if (connector->encoder == encoder) {
- amdgpu_connector = to_amdgpu_connector(connector);
- break;
- }
- }
- drm_connector_list_iter_end(&iter);
-
- if (!amdgpu_connector) {
- DRM_ERROR("Couldn't find encoder's connector\n");
- return;
- }
-
- sad_count = drm_edid_to_sad(amdgpu_connector->edid, &sads);
- if (sad_count < 0)
- DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
- if (sad_count <= 0)
- return;
- BUG_ON(!sads);
-
- for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
- u32 tmp = 0;
- u8 stereo_freqs = 0;
- int max_channels = -1;
- int j;
-
- for (j = 0; j < sad_count; j++) {
- struct cea_sad *sad = &sads[j];
-
- if (sad->format == eld_reg_to_type[i][1]) {
- if (sad->channels > max_channels) {
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
- MAX_CHANNELS, sad->channels);
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
- DESCRIPTOR_BYTE_2, sad->byte2);
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
- SUPPORTED_FREQUENCIES, sad->freq);
- max_channels = sad->channels;
- }
-
- if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
- stereo_freqs |= sad->freq;
- else
- break;
- }
- }
-
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
- SUPPORTED_FREQUENCIES_STEREO, stereo_freqs);
- WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, eld_reg_to_type[i][0], tmp);
- }
-
- kfree(sads);
-}
-
-static void dce_v11_0_audio_enable(struct amdgpu_device *adev,
- struct amdgpu_audio_pin *pin,
- bool enable)
-{
- if (!pin)
- return;
-
- WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
- enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0);
-}
-
-static const u32 pin_offsets[] =
-{
- AUD0_REGISTER_OFFSET,
- AUD1_REGISTER_OFFSET,
- AUD2_REGISTER_OFFSET,
- AUD3_REGISTER_OFFSET,
- AUD4_REGISTER_OFFSET,
- AUD5_REGISTER_OFFSET,
- AUD6_REGISTER_OFFSET,
- AUD7_REGISTER_OFFSET,
-};
-
-static int dce_v11_0_audio_init(struct amdgpu_device *adev)
-{
- int i;
-
- if (!amdgpu_audio)
- return 0;
-
- adev->mode_info.audio.enabled = true;
-
- switch (adev->asic_type) {
- case CHIP_CARRIZO:
- case CHIP_STONEY:
- adev->mode_info.audio.num_pins = 7;
- break;
- case CHIP_POLARIS10:
- case CHIP_VEGAM:
- adev->mode_info.audio.num_pins = 8;
- break;
- case CHIP_POLARIS11:
- case CHIP_POLARIS12:
- adev->mode_info.audio.num_pins = 6;
- break;
- default:
- return -EINVAL;
- }
-
- for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
- adev->mode_info.audio.pin[i].channels = -1;
- adev->mode_info.audio.pin[i].rate = -1;
- adev->mode_info.audio.pin[i].bits_per_sample = -1;
- adev->mode_info.audio.pin[i].status_bits = 0;
- adev->mode_info.audio.pin[i].category_code = 0;
- adev->mode_info.audio.pin[i].connected = false;
- adev->mode_info.audio.pin[i].offset = pin_offsets[i];
- adev->mode_info.audio.pin[i].id = i;
- /* disable audio. it will be set up later */
- /* XXX remove once we switch to ip funcs */
- dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
- }
-
- return 0;
-}
-
-static void dce_v11_0_audio_fini(struct amdgpu_device *adev)
-{
- int i;
-
- if (!amdgpu_audio)
- return;
-
- if (!adev->mode_info.audio.enabled)
- return;
-
- for (i = 0; i < adev->mode_info.audio.num_pins; i++)
- dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
-
- adev->mode_info.audio.enabled = false;
-}
-
-/*
- * update the N and CTS parameters for a given pixel clock rate
- */
-static void dce_v11_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
-{
- struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
- u32 tmp;
-
- tmp = RREG32(mmHDMI_ACR_32_0 + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_0, HDMI_ACR_CTS_32, acr.cts_32khz);
- WREG32(mmHDMI_ACR_32_0 + dig->afmt->offset, tmp);
- tmp = RREG32(mmHDMI_ACR_32_1 + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_1, HDMI_ACR_N_32, acr.n_32khz);
- WREG32(mmHDMI_ACR_32_1 + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmHDMI_ACR_44_0 + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_0, HDMI_ACR_CTS_44, acr.cts_44_1khz);
- WREG32(mmHDMI_ACR_44_0 + dig->afmt->offset, tmp);
- tmp = RREG32(mmHDMI_ACR_44_1 + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_1, HDMI_ACR_N_44, acr.n_44_1khz);
- WREG32(mmHDMI_ACR_44_1 + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmHDMI_ACR_48_0 + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_0, HDMI_ACR_CTS_48, acr.cts_48khz);
- WREG32(mmHDMI_ACR_48_0 + dig->afmt->offset, tmp);
- tmp = RREG32(mmHDMI_ACR_48_1 + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_1, HDMI_ACR_N_48, acr.n_48khz);
- WREG32(mmHDMI_ACR_48_1 + dig->afmt->offset, tmp);
-
-}
-
-/*
- * build a HDMI Video Info Frame
- */
-static void dce_v11_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
- void *buffer, size_t size)
-{
- struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
- uint8_t *frame = buffer + 3;
- uint8_t *header = buffer;
-
- WREG32(mmAFMT_AVI_INFO0 + dig->afmt->offset,
- frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
- WREG32(mmAFMT_AVI_INFO1 + dig->afmt->offset,
- frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
- WREG32(mmAFMT_AVI_INFO2 + dig->afmt->offset,
- frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
- WREG32(mmAFMT_AVI_INFO3 + dig->afmt->offset,
- frame[0xC] | (frame[0xD] << 8) | (header[1] << 24));
-}
-
-static void dce_v11_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
-{
- struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
- u32 dto_phase = 24 * 1000;
- u32 dto_modulo = clock;
- u32 tmp;
-
- if (!dig || !dig->afmt)
- return;
-
- /* XXX two dtos; generally use dto0 for hdmi */
- /* Express [24MHz / target pixel clock] as an exact rational
- * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
- * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
- */
- tmp = RREG32(mmDCCG_AUDIO_DTO_SOURCE);
- tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL,
- amdgpu_crtc->crtc_id);
- WREG32(mmDCCG_AUDIO_DTO_SOURCE, tmp);
- WREG32(mmDCCG_AUDIO_DTO0_PHASE, dto_phase);
- WREG32(mmDCCG_AUDIO_DTO0_MODULE, dto_modulo);
-}
-
-/*
- * update the info frames with the data from the current display mode
- */
-static void dce_v11_0_afmt_setmode(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
-{
- struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
- struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
- u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
- struct hdmi_avi_infoframe frame;
- ssize_t err;
- u32 tmp;
- int bpc = 8;
-
- if (!dig || !dig->afmt)
- return;
-
- /* Silent, r600_hdmi_enable will raise WARN for us */
- if (!dig->afmt->enabled)
- return;
-
- /* hdmi deep color mode general control packets setup, if bpc > 8 */
- if (encoder->crtc) {
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
- bpc = amdgpu_crtc->bpc;
- }
-
- /* disable audio prior to setting up hw */
- dig->afmt->pin = dce_v11_0_audio_get_pin(adev);
- dce_v11_0_audio_enable(adev, dig->afmt->pin, false);
-
- dce_v11_0_audio_set_dto(encoder, mode->clock);
-
- tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1);
- WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp); /* send null packets when required */
-
- WREG32(mmAFMT_AUDIO_CRC_CONTROL + dig->afmt->offset, 0x1000);
-
- tmp = RREG32(mmHDMI_CONTROL + dig->afmt->offset);
- switch (bpc) {
- case 0:
- case 6:
- case 8:
- case 16:
- default:
- tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 0);
- tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 0);
- DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n",
- connector->name, bpc);
- break;
- case 10:
- tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1);
- tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 1);
- DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n",
- connector->name);
- break;
- case 12:
- tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1);
- tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 2);
- DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n",
- connector->name);
- break;
- }
- WREG32(mmHDMI_CONTROL + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1); /* send null packets when required */
- tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, 1); /* send general control packets */
- tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, 1); /* send general control packets every frame */
- WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
- /* enable audio info frames (frames won't be set until audio is enabled) */
- tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1);
- /* required for audio info values to be updated */
- tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 1);
- WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset);
- /* required for audio info values to be updated */
- tmp = REG_SET_FIELD(tmp, AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1);
- WREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
- /* anything other than 0 */
- tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE, 2);
- WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
-
- WREG32(mmHDMI_GC + dig->afmt->offset, 0); /* unset HDMI_GC_AVMUTE */
-
- tmp = RREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset);
- /* set the default audio delay */
- tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, 1);
- /* should be suffient for all audio modes and small enough for all hblanks */
- tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, 3);
- WREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
- /* allow 60958 channel status fields to be updated */
- tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
- WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset);
- if (bpc > 8)
- /* clear SW CTS value */
- tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 0);
- else
- /* select SW CTS value */
- tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 1);
- /* allow hw to sent ACR packets when required */
- tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, 1);
- WREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset, tmp);
-
- dce_v11_0_afmt_update_ACR(encoder, mode->clock);
-
- tmp = RREG32(mmAFMT_60958_0 + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, 1);
- WREG32(mmAFMT_60958_0 + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmAFMT_60958_1 + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2);
- WREG32(mmAFMT_60958_1 + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmAFMT_60958_2 + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, 3);
- tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, 4);
- tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, 5);
- tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, 6);
- tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, 7);
- tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, 8);
- WREG32(mmAFMT_60958_2 + dig->afmt->offset, tmp);
-
- dce_v11_0_audio_write_speaker_allocation(encoder);
-
- WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset,
- (0xff << AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT));
-
- dce_v11_0_afmt_audio_select_pin(encoder);
- dce_v11_0_audio_write_sad_regs(encoder);
- dce_v11_0_audio_write_latency_fields(encoder, mode);
-
- err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode);
- if (err < 0) {
- DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
- return;
- }
-
- err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
- if (err < 0) {
- DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
- return;
- }
-
- dce_v11_0_afmt_update_avi_infoframe(encoder, buffer, sizeof(buffer));
-
- tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
- /* enable AVI info frames */
- tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 1);
- /* required for audio info values to be updated */
- tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 1);
- WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE, 2);
- WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
- /* send audio packets */
- tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1);
- WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
-
- WREG32(mmAFMT_RAMP_CONTROL0 + dig->afmt->offset, 0x00FFFFFF);
- WREG32(mmAFMT_RAMP_CONTROL1 + dig->afmt->offset, 0x007FFFFF);
- WREG32(mmAFMT_RAMP_CONTROL2 + dig->afmt->offset, 0x00000001);
- WREG32(mmAFMT_RAMP_CONTROL3 + dig->afmt->offset, 0x00000001);
-
- /* enable audio after to setting up hw */
- dce_v11_0_audio_enable(adev, dig->afmt->pin, true);
-}
-
-static void dce_v11_0_afmt_enable(struct drm_encoder *encoder, bool enable)
-{
- struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
-
- if (!dig || !dig->afmt)
- return;
-
- /* Silent, r600_hdmi_enable will raise WARN for us */
- if (enable && dig->afmt->enabled)
- return;
- if (!enable && !dig->afmt->enabled)
- return;
-
- if (!enable && dig->afmt->pin) {
- dce_v11_0_audio_enable(adev, dig->afmt->pin, false);
- dig->afmt->pin = NULL;
- }
-
- dig->afmt->enabled = enable;
-
- DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
- enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
-}
-
-static int dce_v11_0_afmt_init(struct amdgpu_device *adev)
-{
- int i;
-
- for (i = 0; i < adev->mode_info.num_dig; i++)
- adev->mode_info.afmt[i] = NULL;
-
- /* DCE11 has audio blocks tied to DIG encoders */
- for (i = 0; i < adev->mode_info.num_dig; i++) {
- adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
- if (adev->mode_info.afmt[i]) {
- adev->mode_info.afmt[i]->offset = dig_offsets[i];
- adev->mode_info.afmt[i]->id = i;
- } else {
- int j;
- for (j = 0; j < i; j++) {
- kfree(adev->mode_info.afmt[j]);
- adev->mode_info.afmt[j] = NULL;
- }
- return -ENOMEM;
- }
- }
- return 0;
-}
-
-static void dce_v11_0_afmt_fini(struct amdgpu_device *adev)
-{
- int i;
-
- for (i = 0; i < adev->mode_info.num_dig; i++) {
- kfree(adev->mode_info.afmt[i]);
- adev->mode_info.afmt[i] = NULL;
- }
-}
-
-static const u32 vga_control_regs[6] =
-{
- mmD1VGA_CONTROL,
- mmD2VGA_CONTROL,
- mmD3VGA_CONTROL,
- mmD4VGA_CONTROL,
- mmD5VGA_CONTROL,
- mmD6VGA_CONTROL,
-};
-
-static void dce_v11_0_vga_enable(struct drm_crtc *crtc, bool enable)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- u32 vga_control;
-
- vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
- if (enable)
- WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | 1);
- else
- WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control);
-}
-
-static void dce_v11_0_grph_enable(struct drm_crtc *crtc, bool enable)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
-
- if (enable)
- WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1);
- else
- WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 0);
-}
-
-static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int x, int y, int atomic)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct drm_framebuffer *target_fb;
- struct drm_gem_object *obj;
- struct amdgpu_bo *abo;
- uint64_t fb_location, tiling_flags;
- uint32_t fb_format, fb_pitch_pixels;
- u32 fb_swap = REG_SET_FIELD(0, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, ENDIAN_NONE);
- u32 pipe_config;
- u32 tmp, viewport_w, viewport_h;
- int r;
- bool bypass_lut = false;
-
- /* no fb bound */
- if (!atomic && !crtc->primary->fb) {
- DRM_DEBUG_KMS("No FB bound\n");
- return 0;
- }
-
- if (atomic)
- target_fb = fb;
- else
- target_fb = crtc->primary->fb;
-
- /* If atomic, assume fb object is pinned & idle & fenced and
- * just update base pointers
- */
- obj = target_fb->obj[0];
- abo = gem_to_amdgpu_bo(obj);
- r = amdgpu_bo_reserve(abo, false);
- if (unlikely(r != 0))
- return r;
-
- if (!atomic) {
- abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
- r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
- if (unlikely(r != 0)) {
- amdgpu_bo_unreserve(abo);
- return -EINVAL;
- }
- }
- fb_location = amdgpu_bo_gpu_offset(abo);
-
- amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
- amdgpu_bo_unreserve(abo);
-
- pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
-
- switch (target_fb->format->format) {
- case DRM_FORMAT_C8:
- fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 0);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
- break;
- case DRM_FORMAT_XRGB4444:
- case DRM_FORMAT_ARGB4444:
- fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 2);
-#ifdef __BIG_ENDIAN
- fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
- ENDIAN_8IN16);
-#endif
- break;
- case DRM_FORMAT_XRGB1555:
- case DRM_FORMAT_ARGB1555:
- fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
-#ifdef __BIG_ENDIAN
- fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
- ENDIAN_8IN16);
-#endif
- break;
- case DRM_FORMAT_BGRX5551:
- case DRM_FORMAT_BGRA5551:
- fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 5);
-#ifdef __BIG_ENDIAN
- fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
- ENDIAN_8IN16);
-#endif
- break;
- case DRM_FORMAT_RGB565:
- fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1);
-#ifdef __BIG_ENDIAN
- fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
- ENDIAN_8IN16);
-#endif
- break;
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_ARGB8888:
- fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
-#ifdef __BIG_ENDIAN
- fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
- ENDIAN_8IN32);
-#endif
- break;
- case DRM_FORMAT_XRGB2101010:
- case DRM_FORMAT_ARGB2101010:
- fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1);
-#ifdef __BIG_ENDIAN
- fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
- ENDIAN_8IN32);
-#endif
- /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
- bypass_lut = true;
- break;
- case DRM_FORMAT_BGRX1010102:
- case DRM_FORMAT_BGRA1010102:
- fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 4);
-#ifdef __BIG_ENDIAN
- fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
- ENDIAN_8IN32);
-#endif
- /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
- bypass_lut = true;
- break;
- case DRM_FORMAT_XBGR8888:
- case DRM_FORMAT_ABGR8888:
- fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
- fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_RED_CROSSBAR, 2);
- fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_BLUE_CROSSBAR, 2);
-#ifdef __BIG_ENDIAN
- fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
- ENDIAN_8IN32);
-#endif
- break;
- default:
- DRM_ERROR("Unsupported screen format %p4cc\n",
- &target_fb->format->format);
- return -EINVAL;
- }
-
- if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
- unsigned bankw, bankh, mtaspect, tile_split, num_banks;
-
- bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
- bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
- mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
- tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
- num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
-
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_NUM_BANKS, num_banks);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE,
- ARRAY_2D_TILED_THIN1);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_TILE_SPLIT,
- tile_split);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_WIDTH, bankw);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_HEIGHT, bankh);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MACRO_TILE_ASPECT,
- mtaspect);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MICRO_TILE_MODE,
- ADDR_SURF_MICRO_TILING_DISPLAY);
- } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE,
- ARRAY_1D_TILED_THIN1);
- }
-
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_PIPE_CONFIG,
- pipe_config);
-
- dce_v11_0_vga_enable(crtc, false);
-
- /* Make sure surface address is updated at vertical blank rather than
- * horizontal blank
- */
- tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
- GRPH_SURFACE_UPDATE_H_RETRACE_EN, 0);
- WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-
- WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
- upper_32_bits(fb_location));
- WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
- upper_32_bits(fb_location));
- WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
- (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
- WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
- (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK);
- WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
- WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
-
- /*
- * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
- * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
- * retain the full precision throughout the pipeline.
- */
- tmp = RREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset);
- if (bypass_lut)
- tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 1);
- else
- tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 0);
- WREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset, tmp);
-
- if (bypass_lut)
- DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
-
- WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
- WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
- WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
- WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
- WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
- WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
-
- fb_pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0];
- WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
-
- dce_v11_0_grph_enable(crtc, true);
-
- WREG32(mmLB_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
- target_fb->height);
-
- x &= ~3;
- y &= ~1;
- WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
- (x << 16) | y);
- viewport_w = crtc->mode.hdisplay;
- viewport_h = (crtc->mode.vdisplay + 1) & ~1;
- WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
- (viewport_w << 16) | viewport_h);
-
- /* set pageflip to happen anywhere in vblank interval */
- WREG32(mmCRTC_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
-
- if (!atomic && fb && fb != crtc->primary->fb) {
- abo = gem_to_amdgpu_bo(fb->obj[0]);
- r = amdgpu_bo_reserve(abo, true);
- if (unlikely(r != 0))
- return r;
- amdgpu_bo_unpin(abo);
- amdgpu_bo_unreserve(abo);
- }
-
- /* Bytes per pixel may have changed */
- dce_v11_0_bandwidth_update(adev);
-
- return 0;
-}
-
-static void dce_v11_0_set_interleave(struct drm_crtc *crtc,
- struct drm_display_mode *mode)
-{
- struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- u32 tmp;
-
- tmp = RREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset);
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 1);
- else
- tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 0);
- WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset, tmp);
-}
-
-static void dce_v11_0_crtc_load_lut(struct drm_crtc *crtc)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- u16 *r, *g, *b;
- int i;
- u32 tmp;
-
- DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
-
- tmp = RREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, INPUT_CSC_CONTROL, INPUT_CSC_GRPH_MODE, 0);
- WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-
- tmp = RREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_BYPASS, 1);
- WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-
- tmp = RREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, INPUT_GAMMA_CONTROL, GRPH_INPUT_GAMMA_MODE, 0);
- WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-
- WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
-
- WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
- WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
- WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
-
- WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
- WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
- WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
-
- WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
- WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
-
- WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
- r = crtc->gamma_store;
- g = r + crtc->gamma_size;
- b = g + crtc->gamma_size;
- for (i = 0; i < 256; i++) {
- WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
- ((*r++ & 0xffc0) << 14) |
- ((*g++ & 0xffc0) << 4) |
- (*b++ >> 6));
- }
-
- tmp = RREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, GRPH_DEGAMMA_MODE, 0);
- tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, CURSOR_DEGAMMA_MODE, 0);
- tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, CURSOR2_DEGAMMA_MODE, 0);
- WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-
- tmp = RREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, GAMUT_REMAP_CONTROL, GRPH_GAMUT_REMAP_MODE, 0);
- WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-
- tmp = RREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, REGAMMA_CONTROL, GRPH_REGAMMA_MODE, 0);
- WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-
- tmp = RREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, OUTPUT_CSC_CONTROL, OUTPUT_CSC_GRPH_MODE, 0);
- WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-
- /* XXX match this to the depth of the crtc fmt block, move to modeset? */
- WREG32(mmDENORM_CONTROL + amdgpu_crtc->crtc_offset, 0);
- /* XXX this only needs to be programmed once per crtc at startup,
- * not sure where the best place for it is
- */
- tmp = RREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, ALPHA_CONTROL, CURSOR_ALPHA_BLND_ENA, 1);
- WREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-}
-
-static int dce_v11_0_pick_dig_encoder(struct drm_encoder *encoder)
-{
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
-
- switch (amdgpu_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- if (dig->linkb)
- return 1;
- else
- return 0;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- if (dig->linkb)
- return 3;
- else
- return 2;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- if (dig->linkb)
- return 5;
- else
- return 4;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
- return 6;
- default:
- DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
- return 0;
- }
-}
-
-/**
- * dce_v11_0_pick_pll - Allocate a PPLL for use by the crtc.
- *
- * @crtc: drm crtc
- *
- * Returns the PPLL (Pixel PLL) to be used by the crtc. For DP monitors
- * a single PPLL can be used for all DP crtcs/encoders. For non-DP
- * monitors a dedicated PPLL must be used. If a particular board has
- * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
- * as there is no need to program the PLL itself. If we are not able to
- * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
- * avoid messing up an existing monitor.
- *
- * Asic specific PLL information
- *
- * DCE 10.x
- * Tonga
- * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP)
- * CI
- * - PPLL0, PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
- *
- */
-static u32 dce_v11_0_pick_pll(struct drm_crtc *crtc)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- u32 pll_in_use;
- int pll;
-
- if ((adev->asic_type == CHIP_POLARIS10) ||
- (adev->asic_type == CHIP_POLARIS11) ||
- (adev->asic_type == CHIP_POLARIS12) ||
- (adev->asic_type == CHIP_VEGAM)) {
- struct amdgpu_encoder *amdgpu_encoder =
- to_amdgpu_encoder(amdgpu_crtc->encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
-
- if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
- return ATOM_DP_DTO;
-
- switch (amdgpu_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- if (dig->linkb)
- return ATOM_COMBOPHY_PLL1;
- else
- return ATOM_COMBOPHY_PLL0;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- if (dig->linkb)
- return ATOM_COMBOPHY_PLL3;
- else
- return ATOM_COMBOPHY_PLL2;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- if (dig->linkb)
- return ATOM_COMBOPHY_PLL5;
- else
- return ATOM_COMBOPHY_PLL4;
- default:
- DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
- return ATOM_PPLL_INVALID;
- }
- }
-
- if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
- if (adev->clock.dp_extclk)
- /* skip PPLL programming if using ext clock */
- return ATOM_PPLL_INVALID;
- else {
- /* use the same PPLL for all DP monitors */
- pll = amdgpu_pll_get_shared_dp_ppll(crtc);
- if (pll != ATOM_PPLL_INVALID)
- return pll;
- }
- } else {
- /* use the same PPLL for all monitors with the same clock */
- pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
- if (pll != ATOM_PPLL_INVALID)
- return pll;
- }
-
- /* XXX need to determine what plls are available on each DCE11 part */
- pll_in_use = amdgpu_pll_get_use_mask(crtc);
- if (adev->flags & AMD_IS_APU) {
- if (!(pll_in_use & (1 << ATOM_PPLL1)))
- return ATOM_PPLL1;
- if (!(pll_in_use & (1 << ATOM_PPLL0)))
- return ATOM_PPLL0;
- DRM_ERROR("unable to allocate a PPLL\n");
- return ATOM_PPLL_INVALID;
- } else {
- if (!(pll_in_use & (1 << ATOM_PPLL2)))
- return ATOM_PPLL2;
- if (!(pll_in_use & (1 << ATOM_PPLL1)))
- return ATOM_PPLL1;
- if (!(pll_in_use & (1 << ATOM_PPLL0)))
- return ATOM_PPLL0;
- DRM_ERROR("unable to allocate a PPLL\n");
- return ATOM_PPLL_INVALID;
- }
- return ATOM_PPLL_INVALID;
-}
-
-static void dce_v11_0_lock_cursor(struct drm_crtc *crtc, bool lock)
-{
- struct amdgpu_device *adev = drm_to_adev(crtc->dev);
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- uint32_t cur_lock;
-
- cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
- if (lock)
- cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 1);
- else
- cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 0);
- WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
-}
-
-static void dce_v11_0_hide_cursor(struct drm_crtc *crtc)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = drm_to_adev(crtc->dev);
- u32 tmp;
-
- tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 0);
- WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-}
-
-static void dce_v11_0_show_cursor(struct drm_crtc *crtc)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = drm_to_adev(crtc->dev);
- u32 tmp;
-
- WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
- upper_32_bits(amdgpu_crtc->cursor_addr));
- WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
- lower_32_bits(amdgpu_crtc->cursor_addr));
-
- tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 1);
- tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_MODE, 2);
- WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-}
-
-static int dce_v11_0_cursor_move_locked(struct drm_crtc *crtc,
- int x, int y)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = drm_to_adev(crtc->dev);
- int xorigin = 0, yorigin = 0;
-
- amdgpu_crtc->cursor_x = x;
- amdgpu_crtc->cursor_y = y;
-
- /* avivo cursor are offset into the total surface */
- x += crtc->x;
- y += crtc->y;
- DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
-
- if (x < 0) {
- xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
- x = 0;
- }
- if (y < 0) {
- yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
- y = 0;
- }
-
- WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
- WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
- WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
- ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
-
- return 0;
-}
-
-static int dce_v11_0_crtc_cursor_move(struct drm_crtc *crtc,
- int x, int y)
-{
- int ret;
-
- dce_v11_0_lock_cursor(crtc, true);
- ret = dce_v11_0_cursor_move_locked(crtc, x, y);
- dce_v11_0_lock_cursor(crtc, false);
-
- return ret;
-}
-
-static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
- struct drm_file *file_priv,
- uint32_t handle,
- uint32_t width,
- uint32_t height,
- int32_t hot_x,
- int32_t hot_y)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_gem_object *obj;
- struct amdgpu_bo *aobj;
- int ret;
-
- if (!handle) {
- /* turn off cursor */
- dce_v11_0_hide_cursor(crtc);
- obj = NULL;
- goto unpin;
- }
-
- if ((width > amdgpu_crtc->max_cursor_width) ||
- (height > amdgpu_crtc->max_cursor_height)) {
- DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
- return -EINVAL;
- }
-
- obj = drm_gem_object_lookup(file_priv, handle);
- if (!obj) {
- DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
- return -ENOENT;
- }
-
- aobj = gem_to_amdgpu_bo(obj);
- ret = amdgpu_bo_reserve(aobj, false);
- if (ret != 0) {
- drm_gem_object_put(obj);
- return ret;
- }
-
- aobj->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
- ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
- amdgpu_bo_unreserve(aobj);
- if (ret) {
- DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
- drm_gem_object_put(obj);
- return ret;
- }
- amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
-
- dce_v11_0_lock_cursor(crtc, true);
-
- if (width != amdgpu_crtc->cursor_width ||
- height != amdgpu_crtc->cursor_height ||
- hot_x != amdgpu_crtc->cursor_hot_x ||
- hot_y != amdgpu_crtc->cursor_hot_y) {
- int x, y;
-
- x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
- y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
-
- dce_v11_0_cursor_move_locked(crtc, x, y);
-
- amdgpu_crtc->cursor_width = width;
- amdgpu_crtc->cursor_height = height;
- amdgpu_crtc->cursor_hot_x = hot_x;
- amdgpu_crtc->cursor_hot_y = hot_y;
- }
-
- dce_v11_0_show_cursor(crtc);
- dce_v11_0_lock_cursor(crtc, false);
-
-unpin:
- if (amdgpu_crtc->cursor_bo) {
- struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
- ret = amdgpu_bo_reserve(aobj, true);
- if (likely(ret == 0)) {
- amdgpu_bo_unpin(aobj);
- amdgpu_bo_unreserve(aobj);
- }
- drm_gem_object_put(amdgpu_crtc->cursor_bo);
- }
-
- amdgpu_crtc->cursor_bo = obj;
- return 0;
-}
-
-static void dce_v11_0_cursor_reset(struct drm_crtc *crtc)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-
- if (amdgpu_crtc->cursor_bo) {
- dce_v11_0_lock_cursor(crtc, true);
-
- dce_v11_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
- amdgpu_crtc->cursor_y);
-
- dce_v11_0_show_cursor(crtc);
-
- dce_v11_0_lock_cursor(crtc, false);
- }
-}
-
-static int dce_v11_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, uint32_t size,
- struct drm_modeset_acquire_ctx *ctx)
-{
- dce_v11_0_crtc_load_lut(crtc);
-
- return 0;
-}
-
-static void dce_v11_0_crtc_destroy(struct drm_crtc *crtc)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-
- drm_crtc_cleanup(crtc);
- kfree(amdgpu_crtc);
-}
-
-static const struct drm_crtc_funcs dce_v11_0_crtc_funcs = {
- .cursor_set2 = dce_v11_0_crtc_cursor_set2,
- .cursor_move = dce_v11_0_crtc_cursor_move,
- .gamma_set = dce_v11_0_crtc_gamma_set,
- .set_config = amdgpu_display_crtc_set_config,
- .destroy = dce_v11_0_crtc_destroy,
- .page_flip_target = amdgpu_display_crtc_page_flip_target,
- .get_vblank_counter = amdgpu_get_vblank_counter_kms,
- .enable_vblank = amdgpu_enable_vblank_kms,
- .disable_vblank = amdgpu_disable_vblank_kms,
- .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
-};
-
-static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
-{
- struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- unsigned type;
-
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- amdgpu_crtc->enabled = true;
- amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
- dce_v11_0_vga_enable(crtc, true);
- amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
- dce_v11_0_vga_enable(crtc, false);
- /* Make sure VBLANK and PFLIP interrupts are still enabled */
- type = amdgpu_display_crtc_idx_to_irq_type(adev,
- amdgpu_crtc->crtc_id);
- amdgpu_irq_update(adev, &adev->crtc_irq, type);
- amdgpu_irq_update(adev, &adev->pageflip_irq, type);
- drm_crtc_vblank_on(crtc);
- dce_v11_0_crtc_load_lut(crtc);
- break;
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- case DRM_MODE_DPMS_OFF:
- drm_crtc_vblank_off(crtc);
- if (amdgpu_crtc->enabled) {
- dce_v11_0_vga_enable(crtc, true);
- amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
- dce_v11_0_vga_enable(crtc, false);
- }
- amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
- amdgpu_crtc->enabled = false;
- break;
- }
- /* adjust pm to dpms */
- amdgpu_dpm_compute_clocks(adev);
-}
-
-static void dce_v11_0_crtc_prepare(struct drm_crtc *crtc)
-{
- /* disable crtc pair power gating before programming */
- amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
- amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
- dce_v11_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
-}
-
-static void dce_v11_0_crtc_commit(struct drm_crtc *crtc)
-{
- dce_v11_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
- amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
-}
-
-static void dce_v11_0_crtc_disable(struct drm_crtc *crtc)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_atom_ss ss;
- int i;
-
- dce_v11_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
- if (crtc->primary->fb) {
- int r;
- struct amdgpu_bo *abo;
-
- abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]);
- r = amdgpu_bo_reserve(abo, true);
- if (unlikely(r))
- DRM_ERROR("failed to reserve abo before unpin\n");
- else {
- amdgpu_bo_unpin(abo);
- amdgpu_bo_unreserve(abo);
- }
- }
- /* disable the GRPH */
- dce_v11_0_grph_enable(crtc, false);
-
- amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
-
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- if (adev->mode_info.crtcs[i] &&
- adev->mode_info.crtcs[i]->enabled &&
- i != amdgpu_crtc->crtc_id &&
- amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
- /* one other crtc is using this pll don't turn
- * off the pll
- */
- goto done;
- }
- }
-
- switch (amdgpu_crtc->pll_id) {
- case ATOM_PPLL0:
- case ATOM_PPLL1:
- case ATOM_PPLL2:
- /* disable the ppll */
- amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
- 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
- break;
- case ATOM_COMBOPHY_PLL0:
- case ATOM_COMBOPHY_PLL1:
- case ATOM_COMBOPHY_PLL2:
- case ATOM_COMBOPHY_PLL3:
- case ATOM_COMBOPHY_PLL4:
- case ATOM_COMBOPHY_PLL5:
- /* disable the ppll */
- amdgpu_atombios_crtc_program_pll(crtc, ATOM_CRTC_INVALID, amdgpu_crtc->pll_id,
- 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
- break;
- default:
- break;
- }
-done:
- amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
- amdgpu_crtc->adjusted_clock = 0;
- amdgpu_crtc->encoder = NULL;
- amdgpu_crtc->connector = NULL;
-}
-
-static int dce_v11_0_crtc_mode_set(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode,
- int x, int y, struct drm_framebuffer *old_fb)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
-
- if (!amdgpu_crtc->adjusted_clock)
- return -EINVAL;
-
- if ((adev->asic_type == CHIP_POLARIS10) ||
- (adev->asic_type == CHIP_POLARIS11) ||
- (adev->asic_type == CHIP_POLARIS12) ||
- (adev->asic_type == CHIP_VEGAM)) {
- struct amdgpu_encoder *amdgpu_encoder =
- to_amdgpu_encoder(amdgpu_crtc->encoder);
- int encoder_mode =
- amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder);
-
- /* SetPixelClock calculates the plls and ss values now */
- amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id,
- amdgpu_crtc->pll_id,
- encoder_mode, amdgpu_encoder->encoder_id,
- adjusted_mode->clock, 0, 0, 0, 0,
- amdgpu_crtc->bpc, amdgpu_crtc->ss_enabled, &amdgpu_crtc->ss);
- } else {
- amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
- }
- amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
- dce_v11_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
- amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
- amdgpu_atombios_crtc_scaler_setup(crtc);
- dce_v11_0_cursor_reset(crtc);
- /* update the hw version fpr dpm */
- amdgpu_crtc->hw_mode = *adjusted_mode;
-
- return 0;
-}
-
-static bool dce_v11_0_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct drm_encoder *encoder;
-
- /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- if (encoder->crtc == crtc) {
- amdgpu_crtc->encoder = encoder;
- amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
- break;
- }
- }
- if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
- amdgpu_crtc->encoder = NULL;
- amdgpu_crtc->connector = NULL;
- return false;
- }
- if (!amdgpu_display_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
- return false;
- if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
- return false;
- /* pick pll */
- amdgpu_crtc->pll_id = dce_v11_0_pick_pll(crtc);
- /* if we can't get a PPLL for a non-DP encoder, fail */
- if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
- !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
- return false;
-
- return true;
-}
-
-static int dce_v11_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
-{
- return dce_v11_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
-}
-
-static int dce_v11_0_crtc_set_base_atomic(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int x, int y, enum mode_set_atomic state)
-{
- return dce_v11_0_crtc_do_set_base(crtc, fb, x, y, 1);
-}
-
-static const struct drm_crtc_helper_funcs dce_v11_0_crtc_helper_funcs = {
- .dpms = dce_v11_0_crtc_dpms,
- .mode_fixup = dce_v11_0_crtc_mode_fixup,
- .mode_set = dce_v11_0_crtc_mode_set,
- .mode_set_base = dce_v11_0_crtc_set_base,
- .mode_set_base_atomic = dce_v11_0_crtc_set_base_atomic,
- .prepare = dce_v11_0_crtc_prepare,
- .commit = dce_v11_0_crtc_commit,
- .disable = dce_v11_0_crtc_disable,
- .get_scanout_position = amdgpu_crtc_get_scanout_position,
-};
-
-static void dce_v11_0_panic_flush(struct drm_plane *plane)
-{
- struct drm_framebuffer *fb;
- struct amdgpu_crtc *amdgpu_crtc;
- struct amdgpu_device *adev;
- uint32_t fb_format;
-
- if (!plane->fb)
- return;
-
- fb = plane->fb;
- amdgpu_crtc = to_amdgpu_crtc(plane->crtc);
- adev = drm_to_adev(fb->dev);
-
- /* Disable DC tiling */
- fb_format = RREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset);
- fb_format &= ~GRPH_CONTROL__GRPH_ARRAY_MODE_MASK;
- WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
-
-}
-
-static const struct drm_plane_helper_funcs dce_v11_0_drm_primary_plane_helper_funcs = {
- .get_scanout_buffer = amdgpu_display_get_scanout_buffer,
- .panic_flush = dce_v11_0_panic_flush,
-};
-
-static int dce_v11_0_crtc_init(struct amdgpu_device *adev, int index)
-{
- struct amdgpu_crtc *amdgpu_crtc;
-
- amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
- (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
- if (amdgpu_crtc == NULL)
- return -ENOMEM;
-
- drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_v11_0_crtc_funcs);
-
- drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
- amdgpu_crtc->crtc_id = index;
- adev->mode_info.crtcs[index] = amdgpu_crtc;
-
- amdgpu_crtc->max_cursor_width = 128;
- amdgpu_crtc->max_cursor_height = 128;
- adev_to_drm(adev)->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
- adev_to_drm(adev)->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
-
- switch (amdgpu_crtc->crtc_id) {
- case 0:
- default:
- amdgpu_crtc->crtc_offset = CRTC0_REGISTER_OFFSET;
- break;
- case 1:
- amdgpu_crtc->crtc_offset = CRTC1_REGISTER_OFFSET;
- break;
- case 2:
- amdgpu_crtc->crtc_offset = CRTC2_REGISTER_OFFSET;
- break;
- case 3:
- amdgpu_crtc->crtc_offset = CRTC3_REGISTER_OFFSET;
- break;
- case 4:
- amdgpu_crtc->crtc_offset = CRTC4_REGISTER_OFFSET;
- break;
- case 5:
- amdgpu_crtc->crtc_offset = CRTC5_REGISTER_OFFSET;
- break;
- }
-
- amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
- amdgpu_crtc->adjusted_clock = 0;
- amdgpu_crtc->encoder = NULL;
- amdgpu_crtc->connector = NULL;
- drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v11_0_crtc_helper_funcs);
- drm_plane_helper_add(amdgpu_crtc->base.primary, &dce_v11_0_drm_primary_plane_helper_funcs);
-
- return 0;
-}
-
-static int dce_v11_0_early_init(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
-
- adev->audio_endpt_rreg = &dce_v11_0_audio_endpt_rreg;
- adev->audio_endpt_wreg = &dce_v11_0_audio_endpt_wreg;
-
- dce_v11_0_set_display_funcs(adev);
-
- adev->mode_info.num_crtc = dce_v11_0_get_num_crtc(adev);
-
- switch (adev->asic_type) {
- case CHIP_CARRIZO:
- adev->mode_info.num_hpd = 6;
- adev->mode_info.num_dig = 9;
- break;
- case CHIP_STONEY:
- adev->mode_info.num_hpd = 6;
- adev->mode_info.num_dig = 9;
- break;
- case CHIP_POLARIS10:
- case CHIP_VEGAM:
- adev->mode_info.num_hpd = 6;
- adev->mode_info.num_dig = 6;
- break;
- case CHIP_POLARIS11:
- case CHIP_POLARIS12:
- adev->mode_info.num_hpd = 5;
- adev->mode_info.num_dig = 5;
- break;
- default:
- /* FIXME: not supported yet */
- return -EINVAL;
- }
-
- dce_v11_0_set_irq_funcs(adev);
-
- return 0;
-}
-
-static int dce_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
-{
- int r, i;
- struct amdgpu_device *adev = ip_block->adev;
-
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
- if (r)
- return r;
- }
-
- for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; i < 20; i += 2) {
- r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i, &adev->pageflip_irq);
- if (r)
- return r;
- }
-
- /* HPD hotplug */
- r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
- if (r)
- return r;
-
- adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs;
-
- adev_to_drm(adev)->mode_config.async_page_flip = true;
-
- adev_to_drm(adev)->mode_config.max_width = 16384;
- adev_to_drm(adev)->mode_config.max_height = 16384;
-
- adev_to_drm(adev)->mode_config.preferred_depth = 24;
- adev_to_drm(adev)->mode_config.prefer_shadow = 1;
-
- adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
-
- r = amdgpu_display_modeset_create_props(adev);
- if (r)
- return r;
-
- adev_to_drm(adev)->mode_config.max_width = 16384;
- adev_to_drm(adev)->mode_config.max_height = 16384;
-
-
- /* allocate crtcs */
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- r = dce_v11_0_crtc_init(adev, i);
- if (r)
- return r;
- }
-
- if (amdgpu_atombios_get_connector_info_from_object_table(adev))
- amdgpu_display_print_display_setup(adev_to_drm(adev));
- else
- return -EINVAL;
-
- /* setup afmt */
- r = dce_v11_0_afmt_init(adev);
- if (r)
- return r;
-
- r = dce_v11_0_audio_init(adev);
- if (r)
- return r;
-
- /* Disable vblank IRQs aggressively for power-saving */
- /* XXX: can this be enabled for DC? */
- adev_to_drm(adev)->vblank_disable_immediate = true;
-
- r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc);
- if (r)
- return r;
-
- INIT_DELAYED_WORK(&adev->hotplug_work,
- amdgpu_display_hotplug_work_func);
-
- drm_kms_helper_poll_init(adev_to_drm(adev));
-
- adev->mode_info.mode_config_initialized = true;
- return 0;
-}
-
-static int dce_v11_0_sw_fini(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
-
- drm_edid_free(adev->mode_info.bios_hardcoded_edid);
-
- drm_kms_helper_poll_fini(adev_to_drm(adev));
-
- dce_v11_0_audio_fini(adev);
-
- dce_v11_0_afmt_fini(adev);
-
- drm_mode_config_cleanup(adev_to_drm(adev));
- adev->mode_info.mode_config_initialized = false;
-
- return 0;
-}
-
-static int dce_v11_0_hw_init(struct amdgpu_ip_block *ip_block)
-{
- int i;
- struct amdgpu_device *adev = ip_block->adev;
-
- dce_v11_0_init_golden_registers(adev);
-
- /* disable vga render */
- dce_v11_0_set_vga_render_state(adev, false);
- /* init dig PHYs, disp eng pll */
- amdgpu_atombios_crtc_powergate_init(adev);
- amdgpu_atombios_encoder_init_dig(adev);
- if ((adev->asic_type == CHIP_POLARIS10) ||
- (adev->asic_type == CHIP_POLARIS11) ||
- (adev->asic_type == CHIP_POLARIS12) ||
- (adev->asic_type == CHIP_VEGAM)) {
- amdgpu_atombios_crtc_set_dce_clock(adev, adev->clock.default_dispclk,
- DCE_CLOCK_TYPE_DISPCLK, ATOM_GCK_DFS);
- amdgpu_atombios_crtc_set_dce_clock(adev, 0,
- DCE_CLOCK_TYPE_DPREFCLK, ATOM_GCK_DFS);
- } else {
- amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
- }
-
- /* initialize hpd */
- dce_v11_0_hpd_init(adev);
-
- for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
- dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
- }
-
- dce_v11_0_pageflip_interrupt_init(adev);
-
- return 0;
-}
-
-static int dce_v11_0_hw_fini(struct amdgpu_ip_block *ip_block)
-{
- int i;
- struct amdgpu_device *adev = ip_block->adev;
-
- dce_v11_0_hpd_fini(adev);
-
- for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
- dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
- }
-
- dce_v11_0_pageflip_interrupt_fini(adev);
-
- flush_delayed_work(&adev->hotplug_work);
-
- return 0;
-}
-
-static int dce_v11_0_suspend(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int r;
-
- r = amdgpu_display_suspend_helper(adev);
- if (r)
- return r;
-
- adev->mode_info.bl_level =
- amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
-
- return dce_v11_0_hw_fini(ip_block);
-}
-
-static int dce_v11_0_resume(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int ret;
-
- amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
- adev->mode_info.bl_level);
-
- ret = dce_v11_0_hw_init(ip_block);
-
- /* turn on the BL */
- if (adev->mode_info.bl_encoder) {
- u8 bl_level = amdgpu_display_backlight_get_level(adev,
- adev->mode_info.bl_encoder);
- amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
- bl_level);
- }
- if (ret)
- return ret;
-
- return amdgpu_display_resume_helper(adev);
-}
-
-static bool dce_v11_0_is_idle(struct amdgpu_ip_block *ip_block)
-{
- return true;
-}
-
-static int dce_v11_0_soft_reset(struct amdgpu_ip_block *ip_block)
-{
- u32 srbm_soft_reset = 0, tmp;
- struct amdgpu_device *adev = ip_block->adev;
-
- if (dce_v11_0_is_display_hung(adev))
- srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
-
- if (srbm_soft_reset) {
- tmp = RREG32(mmSRBM_SOFT_RESET);
- tmp |= srbm_soft_reset;
- dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32(mmSRBM_SOFT_RESET, tmp);
- tmp = RREG32(mmSRBM_SOFT_RESET);
-
- udelay(50);
-
- tmp &= ~srbm_soft_reset;
- WREG32(mmSRBM_SOFT_RESET, tmp);
- tmp = RREG32(mmSRBM_SOFT_RESET);
-
- /* Wait a little for things to settle down */
- udelay(50);
- }
- return 0;
-}
-
-static void dce_v11_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
- int crtc,
- enum amdgpu_interrupt_state state)
-{
- u32 lb_interrupt_mask;
-
- if (crtc >= adev->mode_info.num_crtc) {
- DRM_DEBUG("invalid crtc %d\n", crtc);
- return;
- }
-
- switch (state) {
- case AMDGPU_IRQ_STATE_DISABLE:
- lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
- lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
- VBLANK_INTERRUPT_MASK, 0);
- WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
- break;
- case AMDGPU_IRQ_STATE_ENABLE:
- lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
- lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
- VBLANK_INTERRUPT_MASK, 1);
- WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
- break;
- default:
- break;
- }
-}
-
-static void dce_v11_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
- int crtc,
- enum amdgpu_interrupt_state state)
-{
- u32 lb_interrupt_mask;
-
- if (crtc >= adev->mode_info.num_crtc) {
- DRM_DEBUG("invalid crtc %d\n", crtc);
- return;
- }
-
- switch (state) {
- case AMDGPU_IRQ_STATE_DISABLE:
- lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
- lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
- VLINE_INTERRUPT_MASK, 0);
- WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
- break;
- case AMDGPU_IRQ_STATE_ENABLE:
- lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
- lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
- VLINE_INTERRUPT_MASK, 1);
- WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
- break;
- default:
- break;
- }
-}
-
-static int dce_v11_0_set_hpd_irq_state(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- unsigned hpd,
- enum amdgpu_interrupt_state state)
-{
- u32 tmp;
-
- if (hpd >= adev->mode_info.num_hpd) {
- DRM_DEBUG("invalid hpd %d\n", hpd);
- return 0;
- }
-
- switch (state) {
- case AMDGPU_IRQ_STATE_DISABLE:
- tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
- tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
- WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
- break;
- case AMDGPU_IRQ_STATE_ENABLE:
- tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
- tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 1);
- WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
- break;
- default:
- break;
- }
-
- return 0;
-}
-
-static int dce_v11_0_set_crtc_irq_state(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- unsigned type,
- enum amdgpu_interrupt_state state)
-{
- switch (type) {
- case AMDGPU_CRTC_IRQ_VBLANK1:
- dce_v11_0_set_crtc_vblank_interrupt_state(adev, 0, state);
- break;
- case AMDGPU_CRTC_IRQ_VBLANK2:
- dce_v11_0_set_crtc_vblank_interrupt_state(adev, 1, state);
- break;
- case AMDGPU_CRTC_IRQ_VBLANK3:
- dce_v11_0_set_crtc_vblank_interrupt_state(adev, 2, state);
- break;
- case AMDGPU_CRTC_IRQ_VBLANK4:
- dce_v11_0_set_crtc_vblank_interrupt_state(adev, 3, state);
- break;
- case AMDGPU_CRTC_IRQ_VBLANK5:
- dce_v11_0_set_crtc_vblank_interrupt_state(adev, 4, state);
- break;
- case AMDGPU_CRTC_IRQ_VBLANK6:
- dce_v11_0_set_crtc_vblank_interrupt_state(adev, 5, state);
- break;
- case AMDGPU_CRTC_IRQ_VLINE1:
- dce_v11_0_set_crtc_vline_interrupt_state(adev, 0, state);
- break;
- case AMDGPU_CRTC_IRQ_VLINE2:
- dce_v11_0_set_crtc_vline_interrupt_state(adev, 1, state);
- break;
- case AMDGPU_CRTC_IRQ_VLINE3:
- dce_v11_0_set_crtc_vline_interrupt_state(adev, 2, state);
- break;
- case AMDGPU_CRTC_IRQ_VLINE4:
- dce_v11_0_set_crtc_vline_interrupt_state(adev, 3, state);
- break;
- case AMDGPU_CRTC_IRQ_VLINE5:
- dce_v11_0_set_crtc_vline_interrupt_state(adev, 4, state);
- break;
- case AMDGPU_CRTC_IRQ_VLINE6:
- dce_v11_0_set_crtc_vline_interrupt_state(adev, 5, state);
- break;
- default:
- break;
- }
- return 0;
-}
-
-static int dce_v11_0_set_pageflip_irq_state(struct amdgpu_device *adev,
- struct amdgpu_irq_src *src,
- unsigned type,
- enum amdgpu_interrupt_state state)
-{
- u32 reg;
-
- if (type >= adev->mode_info.num_crtc) {
- DRM_ERROR("invalid pageflip crtc %d\n", type);
- return -EINVAL;
- }
-
- reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
- if (state == AMDGPU_IRQ_STATE_DISABLE)
- WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
- reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
- else
- WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
- reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
-
- return 0;
-}
-
-static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- struct amdgpu_iv_entry *entry)
-{
- unsigned long flags;
- unsigned crtc_id;
- struct amdgpu_crtc *amdgpu_crtc;
- struct amdgpu_flip_work *works;
-
- crtc_id = (entry->src_id - 8) >> 1;
- amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
-
- if (crtc_id >= adev->mode_info.num_crtc) {
- DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
- return -EINVAL;
- }
-
- if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
- GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
- WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
- GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
-
- /* IRQ could occur when in initial stage */
- if(amdgpu_crtc == NULL)
- return 0;
-
- spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
- works = amdgpu_crtc->pflip_works;
- if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
- DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
- "AMDGPU_FLIP_SUBMITTED(%d)\n",
- amdgpu_crtc->pflip_status,
- AMDGPU_FLIP_SUBMITTED);
- spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
- return 0;
- }
-
- /* page flip completed. clean up */
- amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
- amdgpu_crtc->pflip_works = NULL;
-
- /* wakeup usersapce */
- if(works->event)
- drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
-
- spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
-
- drm_crtc_vblank_put(&amdgpu_crtc->base);
- schedule_work(&works->unpin_work);
-
- return 0;
-}
-
-static void dce_v11_0_hpd_int_ack(struct amdgpu_device *adev,
- int hpd)
-{
- u32 tmp;
-
- if (hpd >= adev->mode_info.num_hpd) {
- DRM_DEBUG("invalid hpd %d\n", hpd);
- return;
- }
-
- tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
- tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_ACK, 1);
- WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
-}
-
-static void dce_v11_0_crtc_vblank_int_ack(struct amdgpu_device *adev,
- int crtc)
-{
- u32 tmp;
-
- if (crtc < 0 || crtc >= adev->mode_info.num_crtc) {
- DRM_DEBUG("invalid crtc %d\n", crtc);
- return;
- }
-
- tmp = RREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc]);
- tmp = REG_SET_FIELD(tmp, LB_VBLANK_STATUS, VBLANK_ACK, 1);
- WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], tmp);
-}
-
-static void dce_v11_0_crtc_vline_int_ack(struct amdgpu_device *adev,
- int crtc)
-{
- u32 tmp;
-
- if (crtc < 0 || crtc >= adev->mode_info.num_crtc) {
- DRM_DEBUG("invalid crtc %d\n", crtc);
- return;
- }
-
- tmp = RREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc]);
- tmp = REG_SET_FIELD(tmp, LB_VLINE_STATUS, VLINE_ACK, 1);
- WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], tmp);
-}
-
-static int dce_v11_0_crtc_irq(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- struct amdgpu_iv_entry *entry)
-{
- unsigned crtc = entry->src_id - 1;
- uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
- unsigned int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev,
- crtc);
-
- switch (entry->src_data[0]) {
- case 0: /* vblank */
- if (disp_int & interrupt_status_offsets[crtc].vblank)
- dce_v11_0_crtc_vblank_int_ack(adev, crtc);
- else
- DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
-
- if (amdgpu_irq_enabled(adev, source, irq_type)) {
- drm_handle_vblank(adev_to_drm(adev), crtc);
- }
- DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
-
- break;
- case 1: /* vline */
- if (disp_int & interrupt_status_offsets[crtc].vline)
- dce_v11_0_crtc_vline_int_ack(adev, crtc);
- else
- DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
-
- DRM_DEBUG("IH: D%d vline\n", crtc + 1);
-
- break;
- default:
- DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
- break;
- }
-
- return 0;
-}
-
-static int dce_v11_0_hpd_irq(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- struct amdgpu_iv_entry *entry)
-{
- uint32_t disp_int, mask;
- unsigned hpd;
-
- if (entry->src_data[0] >= adev->mode_info.num_hpd) {
- DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
- return 0;
- }
-
- hpd = entry->src_data[0];
- disp_int = RREG32(interrupt_status_offsets[hpd].reg);
- mask = interrupt_status_offsets[hpd].hpd;
-
- if (disp_int & mask) {
- dce_v11_0_hpd_int_ack(adev, hpd);
- schedule_delayed_work(&adev->hotplug_work, 0);
- DRM_DEBUG("IH: HPD%d\n", hpd + 1);
- }
-
- return 0;
-}
-
-static int dce_v11_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
- enum amd_clockgating_state state)
-{
- return 0;
-}
-
-static int dce_v11_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
- enum amd_powergating_state state)
-{
- return 0;
-}
-
-static const struct amd_ip_funcs dce_v11_0_ip_funcs = {
- .name = "dce_v11_0",
- .early_init = dce_v11_0_early_init,
- .sw_init = dce_v11_0_sw_init,
- .sw_fini = dce_v11_0_sw_fini,
- .hw_init = dce_v11_0_hw_init,
- .hw_fini = dce_v11_0_hw_fini,
- .suspend = dce_v11_0_suspend,
- .resume = dce_v11_0_resume,
- .is_idle = dce_v11_0_is_idle,
- .soft_reset = dce_v11_0_soft_reset,
- .set_clockgating_state = dce_v11_0_set_clockgating_state,
- .set_powergating_state = dce_v11_0_set_powergating_state,
-};
-
-static void dce_v11_0_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
-
- amdgpu_encoder->pixel_clock = adjusted_mode->clock;
-
- /* need to call this here rather than in prepare() since we need some crtc info */
- amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
-
- /* set scaler clears this on some chips */
- dce_v11_0_set_interleave(encoder->crtc, mode);
-
- if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
- dce_v11_0_afmt_enable(encoder, true);
- dce_v11_0_afmt_setmode(encoder, adjusted_mode);
- }
-}
-
-static void dce_v11_0_encoder_prepare(struct drm_encoder *encoder)
-{
- struct amdgpu_device *adev = drm_to_adev(encoder->dev);
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
-
- if ((amdgpu_encoder->active_device &
- (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
- (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
- ENCODER_OBJECT_ID_NONE)) {
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
- if (dig) {
- dig->dig_encoder = dce_v11_0_pick_dig_encoder(encoder);
- if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
- dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
- }
- }
-
- amdgpu_atombios_scratch_regs_lock(adev, true);
-
- if (connector) {
- struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
-
- /* select the clock/data port if it uses a router */
- if (amdgpu_connector->router.cd_valid)
- amdgpu_i2c_router_select_cd_port(amdgpu_connector);
-
- /* turn eDP panel on for mode set */
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
- amdgpu_atombios_encoder_set_edp_panel_power(connector,
- ATOM_TRANSMITTER_ACTION_POWER_ON);
- }
-
- /* this is needed for the pll/ss setup to work correctly in some cases */
- amdgpu_atombios_encoder_set_crtc_source(encoder);
- /* set up the FMT blocks */
- dce_v11_0_program_fmt(encoder);
-}
-
-static void dce_v11_0_encoder_commit(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
-
- /* need to call this here as we need the crtc set up */
- amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
- amdgpu_atombios_scratch_regs_lock(adev, false);
-}
-
-static void dce_v11_0_encoder_disable(struct drm_encoder *encoder)
-{
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig;
-
- amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
-
- if (amdgpu_atombios_encoder_is_digital(encoder)) {
- if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
- dce_v11_0_afmt_enable(encoder, false);
- dig = amdgpu_encoder->enc_priv;
- dig->dig_encoder = -1;
- }
- amdgpu_encoder->active_device = 0;
-}
-
-/* these are handled by the primary encoders */
-static void dce_v11_0_ext_prepare(struct drm_encoder *encoder)
-{
-
-}
-
-static void dce_v11_0_ext_commit(struct drm_encoder *encoder)
-{
-
-}
-
-static void
-dce_v11_0_ext_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
-
-}
-
-static void dce_v11_0_ext_disable(struct drm_encoder *encoder)
-{
-
-}
-
-static void
-dce_v11_0_ext_dpms(struct drm_encoder *encoder, int mode)
-{
-
-}
-
-static const struct drm_encoder_helper_funcs dce_v11_0_ext_helper_funcs = {
- .dpms = dce_v11_0_ext_dpms,
- .prepare = dce_v11_0_ext_prepare,
- .mode_set = dce_v11_0_ext_mode_set,
- .commit = dce_v11_0_ext_commit,
- .disable = dce_v11_0_ext_disable,
- /* no detect for TMDS/LVDS yet */
-};
-
-static const struct drm_encoder_helper_funcs dce_v11_0_dig_helper_funcs = {
- .dpms = amdgpu_atombios_encoder_dpms,
- .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
- .prepare = dce_v11_0_encoder_prepare,
- .mode_set = dce_v11_0_encoder_mode_set,
- .commit = dce_v11_0_encoder_commit,
- .disable = dce_v11_0_encoder_disable,
- .detect = amdgpu_atombios_encoder_dig_detect,
-};
-
-static const struct drm_encoder_helper_funcs dce_v11_0_dac_helper_funcs = {
- .dpms = amdgpu_atombios_encoder_dpms,
- .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
- .prepare = dce_v11_0_encoder_prepare,
- .mode_set = dce_v11_0_encoder_mode_set,
- .commit = dce_v11_0_encoder_commit,
- .detect = amdgpu_atombios_encoder_dac_detect,
-};
-
-static void dce_v11_0_encoder_destroy(struct drm_encoder *encoder)
-{
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
- amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
- kfree(amdgpu_encoder->enc_priv);
- drm_encoder_cleanup(encoder);
- kfree(amdgpu_encoder);
-}
-
-static const struct drm_encoder_funcs dce_v11_0_encoder_funcs = {
- .destroy = dce_v11_0_encoder_destroy,
-};
-
-static void dce_v11_0_encoder_add(struct amdgpu_device *adev,
- uint32_t encoder_enum,
- uint32_t supported_device,
- u16 caps)
-{
- struct drm_device *dev = adev_to_drm(adev);
- struct drm_encoder *encoder;
- struct amdgpu_encoder *amdgpu_encoder;
-
- /* see if we already added it */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- amdgpu_encoder = to_amdgpu_encoder(encoder);
- if (amdgpu_encoder->encoder_enum == encoder_enum) {
- amdgpu_encoder->devices |= supported_device;
- return;
- }
-
- }
-
- /* add a new one */
- amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
- if (!amdgpu_encoder)
- return;
-
- encoder = &amdgpu_encoder->base;
- switch (adev->mode_info.num_crtc) {
- case 1:
- encoder->possible_crtcs = 0x1;
- break;
- case 2:
- default:
- encoder->possible_crtcs = 0x3;
- break;
- case 3:
- encoder->possible_crtcs = 0x7;
- break;
- case 4:
- encoder->possible_crtcs = 0xf;
- break;
- case 5:
- encoder->possible_crtcs = 0x1f;
- break;
- case 6:
- encoder->possible_crtcs = 0x3f;
- break;
- }
-
- amdgpu_encoder->enc_priv = NULL;
-
- amdgpu_encoder->encoder_enum = encoder_enum;
- amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
- amdgpu_encoder->devices = supported_device;
- amdgpu_encoder->rmx_type = RMX_OFF;
- amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
- amdgpu_encoder->is_ext_encoder = false;
- amdgpu_encoder->caps = caps;
-
- switch (amdgpu_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
- drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
- DRM_MODE_ENCODER_DAC, NULL);
- drm_encoder_helper_add(encoder, &dce_v11_0_dac_helper_funcs);
- break;
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
- if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
- amdgpu_encoder->rmx_type = RMX_FULL;
- drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
- DRM_MODE_ENCODER_LVDS, NULL);
- amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
- } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
- drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
- DRM_MODE_ENCODER_DAC, NULL);
- amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
- } else {
- drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
- amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
- }
- drm_encoder_helper_add(encoder, &dce_v11_0_dig_helper_funcs);
- break;
- case ENCODER_OBJECT_ID_SI170B:
- case ENCODER_OBJECT_ID_CH7303:
- case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
- case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
- case ENCODER_OBJECT_ID_TITFP513:
- case ENCODER_OBJECT_ID_VT1623:
- case ENCODER_OBJECT_ID_HDMI_SI1930:
- case ENCODER_OBJECT_ID_TRAVIS:
- case ENCODER_OBJECT_ID_NUTMEG:
- /* these are handled by the primary encoders */
- amdgpu_encoder->is_ext_encoder = true;
- if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
- drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
- DRM_MODE_ENCODER_LVDS, NULL);
- else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
- drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
- DRM_MODE_ENCODER_DAC, NULL);
- else
- drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
- drm_encoder_helper_add(encoder, &dce_v11_0_ext_helper_funcs);
- break;
- }
-}
-
-static const struct amdgpu_display_funcs dce_v11_0_display_funcs = {
- .bandwidth_update = &dce_v11_0_bandwidth_update,
- .vblank_get_counter = &dce_v11_0_vblank_get_counter,
- .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
- .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
- .hpd_sense = &dce_v11_0_hpd_sense,
- .hpd_set_polarity = &dce_v11_0_hpd_set_polarity,
- .hpd_get_gpio_reg = &dce_v11_0_hpd_get_gpio_reg,
- .page_flip = &dce_v11_0_page_flip,
- .page_flip_get_scanoutpos = &dce_v11_0_crtc_get_scanoutpos,
- .add_encoder = &dce_v11_0_encoder_add,
- .add_connector = &amdgpu_connector_add,
-};
-
-static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev)
-{
- adev->mode_info.funcs = &dce_v11_0_display_funcs;
-}
-
-static const struct amdgpu_irq_src_funcs dce_v11_0_crtc_irq_funcs = {
- .set = dce_v11_0_set_crtc_irq_state,
- .process = dce_v11_0_crtc_irq,
-};
-
-static const struct amdgpu_irq_src_funcs dce_v11_0_pageflip_irq_funcs = {
- .set = dce_v11_0_set_pageflip_irq_state,
- .process = dce_v11_0_pageflip_irq,
-};
-
-static const struct amdgpu_irq_src_funcs dce_v11_0_hpd_irq_funcs = {
- .set = dce_v11_0_set_hpd_irq_state,
- .process = dce_v11_0_hpd_irq,
-};
-
-static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev)
-{
- if (adev->mode_info.num_crtc > 0)
- adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc;
- else
- adev->crtc_irq.num_types = 0;
- adev->crtc_irq.funcs = &dce_v11_0_crtc_irq_funcs;
-
- adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
- adev->pageflip_irq.funcs = &dce_v11_0_pageflip_irq_funcs;
-
- adev->hpd_irq.num_types = adev->mode_info.num_hpd;
- adev->hpd_irq.funcs = &dce_v11_0_hpd_irq_funcs;
-}
-
-const struct amdgpu_ip_block_version dce_v11_0_ip_block =
-{
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 11,
- .minor = 0,
- .rev = 0,
- .funcs = &dce_v11_0_ip_funcs,
-};
-
-const struct amdgpu_ip_block_version dce_v11_2_ip_block =
-{
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 11,
- .minor = 2,
- .rev = 0,
- .funcs = &dce_v11_0_ip_funcs,
-};
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 276c025c4c03..acc887a58518 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -1034,7 +1034,6 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
/* save values for DPM */
amdgpu_crtc->line_time = line_time;
- amdgpu_crtc->wm_high = latency_watermark_a;
/* Save number of lines the linebuffer leads before the scanout */
amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
@@ -1451,17 +1450,12 @@ static int dce_v6_0_audio_init(struct amdgpu_device *adev)
static void dce_v6_0_audio_fini(struct amdgpu_device *adev)
{
- int i;
-
if (!amdgpu_audio)
return;
if (!adev->mode_info.audio.enabled)
return;
- for (i = 0; i < adev->mode_info.audio.num_pins; i++)
- dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
-
adev->mode_info.audio.enabled = false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index e62ccf9eb73d..2ccd6aad8dd6 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -1096,8 +1096,7 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
/* save values for DPM */
amdgpu_crtc->line_time = line_time;
- amdgpu_crtc->wm_high = latency_watermark_a;
- amdgpu_crtc->wm_low = latency_watermark_b;
+
/* Save number of lines the linebuffer leads before the scanout */
amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
}
@@ -1443,17 +1442,12 @@ static int dce_v8_0_audio_init(struct amdgpu_device *adev)
static void dce_v8_0_audio_fini(struct amdgpu_device *adev)
{
- int i;
-
if (!amdgpu_audio)
return;
if (!adev->mode_info.audio.enabled)
return;
- for (i = 0; i < adev->mode_info.audio.num_pins; i++)
- dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
-
adev->mode_info.audio.enabled = false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 7bd506f06eb1..8841d7213de4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -4075,7 +4075,7 @@ static int gfx_v10_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
struct dma_fence *f = NULL;
unsigned int index;
uint64_t gpu_addr;
- volatile uint32_t *cpu_ptr;
+ uint32_t *cpu_ptr;
long r;
memset(&ib, 0, sizeof(ib));
@@ -4322,8 +4322,7 @@ static u32 gfx_v10_0_get_csb_size(struct amdgpu_device *adev)
return count;
}
-static void gfx_v10_0_get_csb_buffer(struct amdgpu_device *adev,
- volatile u32 *buffer)
+static void gfx_v10_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer)
{
u32 count = 0;
int ctx_reg_offset;
@@ -7668,19 +7667,17 @@ static int gfx_v10_0_soft_reset(struct amdgpu_ip_block *ip_block)
/* Disable MEC parsing/prefetching */
gfx_v10_0_cp_compute_enable(adev, false);
- if (grbm_soft_reset) {
- tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
- tmp |= grbm_soft_reset;
- dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+ tmp |= grbm_soft_reset;
+ dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
- udelay(50);
+ udelay(50);
- tmp &= ~grbm_soft_reset;
- WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
- }
+ tmp &= ~grbm_soft_reset;
+ WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
/* Wait a little for things to settle down */
udelay(50);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index c01c241a1b06..66c47c466532 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -603,7 +603,7 @@ static int gfx_v11_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
struct dma_fence *f = NULL;
unsigned index;
uint64_t gpu_addr;
- volatile uint32_t *cpu_ptr;
+ uint32_t *cpu_ptr;
long r;
/* MES KIQ fw hasn't indirect buffer support for now */
@@ -850,8 +850,7 @@ static u32 gfx_v11_0_get_csb_size(struct amdgpu_device *adev)
return count;
}
-static void gfx_v11_0_get_csb_buffer(struct amdgpu_device *adev,
- volatile u32 *buffer)
+static void gfx_v11_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer)
{
u32 count = 0;
int ctx_reg_offset;
@@ -1612,9 +1611,9 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 3):
if (!adev->gfx.disable_uq &&
- adev->gfx.me_fw_version >= 2390 &&
- adev->gfx.pfp_fw_version >= 2530 &&
- adev->gfx.mec_fw_version >= 2600 &&
+ adev->gfx.me_fw_version >= 2420 &&
+ adev->gfx.pfp_fw_version >= 2580 &&
+ adev->gfx.mec_fw_version >= 2650 &&
adev->mes.fw_version[0] >= 120) {
adev->userq_funcs[AMDGPU_HW_IP_GFX] = &userq_mes_funcs;
adev->userq_funcs[AMDGPU_HW_IP_COMPUTE] = &userq_mes_funcs;
@@ -1654,6 +1653,21 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
}
}
break;
+ case IP_VERSION(11, 0, 1):
+ case IP_VERSION(11, 0, 4):
+ adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex;
+ adev->gfx.cleaner_shader_size = sizeof(gfx_11_0_3_cleaner_shader_hex);
+ if (adev->gfx.pfp_fw_version >= 102 &&
+ adev->gfx.mec_fw_version >= 66 &&
+ adev->mes.fw_version[0] >= 128) {
+ adev->gfx.enable_cleaner_shader = true;
+ r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
+ if (r) {
+ adev->gfx.enable_cleaner_shader = false;
+ dev_err(adev->dev, "Failed to initialize cleaner shader\n");
+ }
+ }
+ break;
case IP_VERSION(11, 5, 0):
case IP_VERSION(11, 5, 1):
adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex;
@@ -4129,6 +4143,8 @@ static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
#endif
if (prop->tmz_queue)
tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, TMZ_MATCH, 1);
+ if (!prop->kernel_queue)
+ tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_NON_PRIV, 1);
mqd->cp_gfx_hqd_cntl = tmp;
/* set up cp_doorbell_control */
@@ -4281,8 +4297,10 @@ static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH,
prop->allow_tunneling);
- tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
- tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
+ if (prop->kernel_queue) {
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
+ }
if (prop->tmz_queue)
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TMZ, 1);
mqd->cp_hqd_pq_control = tmp;
@@ -4639,8 +4657,7 @@ static int gfx_v11_0_gfxhub_enable(struct amdgpu_device *adev)
amdgpu_device_flush_hdp(adev, NULL);
- value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
- false : true;
+ value = amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS;
adev->gfxhub.funcs->set_fault_enable_default(adev, value);
/* TODO investigate why this and the hdp flush above is needed,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
index 3e138527d534..710ec9c34e43 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
@@ -497,7 +497,7 @@ static int gfx_v12_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
struct dma_fence *f = NULL;
unsigned index;
uint64_t gpu_addr;
- volatile uint32_t *cpu_ptr;
+ uint32_t *cpu_ptr;
long r;
/* MES KIQ fw hasn't indirect buffer support for now */
@@ -685,8 +685,7 @@ static u32 gfx_v12_0_get_csb_size(struct amdgpu_device *adev)
return count;
}
-static void gfx_v12_0_get_csb_buffer(struct amdgpu_device *adev,
- volatile u32 *buffer)
+static void gfx_v12_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer)
{
u32 count = 0, clustercount = 0, i;
const struct cs_section_def *sect = NULL;
@@ -3026,6 +3025,8 @@ static int gfx_v12_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
#endif
if (prop->tmz_queue)
tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, TMZ_MATCH, 1);
+ if (!prop->kernel_queue)
+ tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_NON_PRIV, 1);
mqd->cp_gfx_hqd_cntl = tmp;
/* set up cp_doorbell_control */
@@ -3175,8 +3176,10 @@ static int gfx_v12_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
(order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1));
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0);
- tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
- tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
+ if (prop->kernel_queue) {
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
+ }
if (prop->tmz_queue)
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TMZ, 1);
mqd->cp_hqd_pq_control = tmp;
@@ -3520,8 +3523,7 @@ static int gfx_v12_0_gfxhub_enable(struct amdgpu_device *adev)
amdgpu_device_flush_hdp(adev, NULL);
- value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
- false : true;
+ value = amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS;
adev->gfxhub.funcs->set_fault_enable_default(adev, value);
/* TODO investigate why this and the hdp flush above is needed,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index 70d7a1f434c4..7693b7953426 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -86,7 +86,7 @@ MODULE_FIRMWARE("amdgpu/hainan_ce.bin");
MODULE_FIRMWARE("amdgpu/hainan_rlc.bin");
static u32 gfx_v6_0_get_csb_size(struct amdgpu_device *adev);
-static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer);
+static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer);
//static void gfx_v6_0_init_cp_pg_table(struct amdgpu_device *adev);
static void gfx_v6_0_init_pg(struct amdgpu_device *adev);
@@ -2354,7 +2354,7 @@ static void gfx_v6_0_ring_emit_wreg(struct amdgpu_ring *ring,
static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
{
const u32 *src_ptr;
- volatile u32 *dst_ptr;
+ u32 *dst_ptr;
u32 dws;
u64 reg_list_mc_addr;
const struct cs_section_def *cs_data;
@@ -2855,8 +2855,7 @@ static u32 gfx_v6_0_get_csb_size(struct amdgpu_device *adev)
return count;
}
-static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev,
- volatile u32 *buffer)
+static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer)
{
u32 count = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 2aa323dab34e..5976ed55d9db 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -883,7 +883,7 @@ static const u32 kalindi_rlc_save_restore_register_list[] = {
};
static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev);
-static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer);
+static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer);
static void gfx_v7_0_init_pg(struct amdgpu_device *adev);
static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev);
@@ -3882,8 +3882,7 @@ static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev)
return count;
}
-static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev,
- volatile u32 *buffer)
+static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer)
{
u32 count = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 367449d8061b..0856ff65288c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -1220,8 +1220,7 @@ out:
return err;
}
-static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev,
- volatile u32 *buffer)
+static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer)
{
u32 count = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 20b30f4b3c7d..dd19a97436db 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1648,8 +1648,7 @@ static u32 gfx_v9_0_get_csb_size(struct amdgpu_device *adev)
return count;
}
-static void gfx_v9_0_get_csb_buffer(struct amdgpu_device *adev,
- volatile u32 *buffer)
+static void gfx_v9_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer)
{
u32 count = 0;
@@ -2650,6 +2649,9 @@ static void gfx_v9_0_init_sq_config(struct amdgpu_device *adev)
!READ_ONCE(adev->barrier_has_auto_waitcnt));
WREG32_SOC15(GC, 0, mmSQ_CONFIG, tmp);
break;
+ case IP_VERSION(9, 4, 2):
+ gfx_v9_4_2_init_sq(adev);
+ break;
default:
break;
}
@@ -4172,19 +4174,17 @@ static int gfx_v9_0_soft_reset(struct amdgpu_ip_block *ip_block)
/* Disable MEC parsing/prefetching */
gfx_v9_0_cp_compute_enable(adev, false);
- if (grbm_soft_reset) {
- tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
- tmp |= grbm_soft_reset;
- dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+ tmp |= grbm_soft_reset;
+ dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
- udelay(50);
+ udelay(50);
- tmp &= ~grbm_soft_reset;
- WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
- }
+ tmp &= ~grbm_soft_reset;
+ WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
/* Wait a little for things to settle down */
udelay(50);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
index c48cd47b531f..8058ea91ecaf 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
@@ -748,6 +748,18 @@ void gfx_v9_4_2_init_golden_registers(struct amdgpu_device *adev,
}
}
+void gfx_v9_4_2_init_sq(struct amdgpu_device *adev)
+{
+ uint32_t data;
+
+ if (adev->gfx.mec_fw_version >= 98) {
+ adev->gmc.xnack_flags |= AMDGPU_GMC_XNACK_FLAG_CHAIN;
+ data = RREG32_SOC15(GC, 0, regSQ_CONFIG1);
+ data = REG_SET_FIELD(data, SQ_CONFIG1, DISABLE_XNACK_CHECK_IN_RETRY_DISABLE, 1);
+ WREG32_SOC15(GC, 0, regSQ_CONFIG1, data);
+ }
+}
+
void gfx_v9_4_2_debug_trap_config_init(struct amdgpu_device *adev,
uint32_t first_vmid,
uint32_t last_vmid)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h
index 7584624b641c..a603724c1dfc 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h
@@ -28,6 +28,7 @@ void gfx_v9_4_2_debug_trap_config_init(struct amdgpu_device *adev,
uint32_t first_vmid, uint32_t last_vmid);
void gfx_v9_4_2_init_golden_registers(struct amdgpu_device *adev,
uint32_t die_id);
+void gfx_v9_4_2_init_sq(struct amdgpu_device *adev);
void gfx_v9_4_2_set_power_brake_sequence(struct amdgpu_device *adev);
int gfx_v9_4_2_do_edc_gpr_workarounds(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
index 51babf5c78c8..77f9d5b9a556 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
@@ -2461,19 +2461,17 @@ static int gfx_v9_4_3_soft_reset(struct amdgpu_ip_block *ip_block)
/* Disable MEC parsing/prefetching */
gfx_v9_4_3_xcc_cp_compute_enable(adev, false, 0);
- if (grbm_soft_reset) {
- tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
- tmp |= grbm_soft_reset;
- dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
-
- udelay(50);
-
- tmp &= ~grbm_soft_reset;
- WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
- }
+ tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
+ tmp |= grbm_soft_reset;
+ dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~grbm_soft_reset;
+ WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
/* Wait a little for things to settle down */
udelay(50);
@@ -3562,6 +3560,7 @@ static int gfx_v9_4_3_reset_kcq(struct amdgpu_ring *ring,
struct amdgpu_device *adev = ring->adev;
struct amdgpu_kiq *kiq = &adev->gfx.kiq[ring->xcc_id];
struct amdgpu_ring *kiq_ring = &kiq->ring;
+ int reset_mode = AMDGPU_RESET_TYPE_PER_QUEUE;
unsigned long flags;
int r;
@@ -3599,6 +3598,7 @@ pipe_reset:
if (!(adev->gfx.compute_supported_reset & AMDGPU_RESET_TYPE_PER_PIPE))
return -EOPNOTSUPP;
r = gfx_v9_4_3_reset_hw_pipe(ring);
+ reset_mode = AMDGPU_RESET_TYPE_PER_PIPE;
dev_info(adev->dev, "ring: %s pipe reset :%s\n", ring->name,
r ? "failed" : "successfully");
if (r)
@@ -3621,10 +3621,20 @@ pipe_reset:
r = amdgpu_ring_test_ring(kiq_ring);
spin_unlock_irqrestore(&kiq->ring_lock, flags);
if (r) {
+ if (reset_mode == AMDGPU_RESET_TYPE_PER_QUEUE)
+ goto pipe_reset;
+
dev_err(adev->dev, "fail to remap queue\n");
return r;
}
+ if (reset_mode == AMDGPU_RESET_TYPE_PER_QUEUE) {
+ r = amdgpu_ring_test_ring(ring);
+ if (r)
+ goto pipe_reset;
+ }
+
+
return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 7923f491cf73..d7499be8c4bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -466,24 +466,6 @@ static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int
* 0 valid
*/
-static uint64_t gmc_v10_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
-{
- switch (flags) {
- case AMDGPU_VM_MTYPE_DEFAULT:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_NC);
- case AMDGPU_VM_MTYPE_NC:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_NC);
- case AMDGPU_VM_MTYPE_WC:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_WC);
- case AMDGPU_VM_MTYPE_CC:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_CC);
- case AMDGPU_VM_MTYPE_UC:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_UC);
- default:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_NC);
- }
-}
-
static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level,
uint64_t *addr, uint64_t *flags)
{
@@ -508,21 +490,39 @@ static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level,
}
static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev,
- struct amdgpu_bo_va_mapping *mapping,
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo,
+ uint32_t vm_flags,
uint64_t *flags)
{
- struct amdgpu_bo *bo = mapping->bo_va->base.bo;
-
- *flags &= ~AMDGPU_PTE_EXECUTABLE;
- *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
+ if (vm_flags & AMDGPU_VM_PAGE_EXECUTABLE)
+ *flags |= AMDGPU_PTE_EXECUTABLE;
+ else
+ *flags &= ~AMDGPU_PTE_EXECUTABLE;
- *flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
- *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
+ switch (vm_flags & AMDGPU_VM_MTYPE_MASK) {
+ case AMDGPU_VM_MTYPE_DEFAULT:
+ case AMDGPU_VM_MTYPE_NC:
+ default:
+ *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_NC);
+ break;
+ case AMDGPU_VM_MTYPE_WC:
+ *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_WC);
+ break;
+ case AMDGPU_VM_MTYPE_CC:
+ *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_CC);
+ break;
+ case AMDGPU_VM_MTYPE_UC:
+ *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_UC);
+ break;
+ }
- *flags &= ~AMDGPU_PTE_NOALLOC;
- *flags |= (mapping->flags & AMDGPU_PTE_NOALLOC);
+ if (vm_flags & AMDGPU_VM_PAGE_NOALLOC)
+ *flags |= AMDGPU_PTE_NOALLOC;
+ else
+ *flags &= ~AMDGPU_PTE_NOALLOC;
- if (mapping->flags & AMDGPU_PTE_PRT) {
+ if (vm_flags & AMDGPU_VM_PAGE_PRT) {
*flags |= AMDGPU_PTE_PRT;
*flags |= AMDGPU_PTE_SNOOPED;
*flags |= AMDGPU_PTE_LOG;
@@ -563,7 +563,6 @@ static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = {
.flush_gpu_tlb_pasid = gmc_v10_0_flush_gpu_tlb_pasid,
.emit_flush_gpu_tlb = gmc_v10_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping,
- .map_mtype = gmc_v10_0_map_mtype,
.get_vm_pde = gmc_v10_0_get_vm_pde,
.get_vm_pte = gmc_v10_0_get_vm_pte,
.get_vbios_fb_size = gmc_v10_0_get_vbios_fb_size,
@@ -964,8 +963,7 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
/* Flush HDP after it is initialized */
amdgpu_device_flush_hdp(adev, NULL);
- value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
- false : true;
+ value = amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS;
if (!adev->in_s0ix)
adev->gfxhub.funcs->set_fault_enable_default(adev, value);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
index f15d691e9a20..7bc389d9f5c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
@@ -430,24 +430,6 @@ static void gmc_v11_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int
* 0 valid
*/
-static uint64_t gmc_v11_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
-{
- switch (flags) {
- case AMDGPU_VM_MTYPE_DEFAULT:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_NC);
- case AMDGPU_VM_MTYPE_NC:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_NC);
- case AMDGPU_VM_MTYPE_WC:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_WC);
- case AMDGPU_VM_MTYPE_CC:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_CC);
- case AMDGPU_VM_MTYPE_UC:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_UC);
- default:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_NC);
- }
-}
-
static void gmc_v11_0_get_vm_pde(struct amdgpu_device *adev, int level,
uint64_t *addr, uint64_t *flags)
{
@@ -472,21 +454,39 @@ static void gmc_v11_0_get_vm_pde(struct amdgpu_device *adev, int level,
}
static void gmc_v11_0_get_vm_pte(struct amdgpu_device *adev,
- struct amdgpu_bo_va_mapping *mapping,
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo,
+ uint32_t vm_flags,
uint64_t *flags)
{
- struct amdgpu_bo *bo = mapping->bo_va->base.bo;
-
- *flags &= ~AMDGPU_PTE_EXECUTABLE;
- *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
+ if (vm_flags & AMDGPU_VM_PAGE_EXECUTABLE)
+ *flags |= AMDGPU_PTE_EXECUTABLE;
+ else
+ *flags &= ~AMDGPU_PTE_EXECUTABLE;
- *flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
- *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
+ switch (vm_flags & AMDGPU_VM_MTYPE_MASK) {
+ case AMDGPU_VM_MTYPE_DEFAULT:
+ case AMDGPU_VM_MTYPE_NC:
+ default:
+ *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_NC);
+ break;
+ case AMDGPU_VM_MTYPE_WC:
+ *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_WC);
+ break;
+ case AMDGPU_VM_MTYPE_CC:
+ *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_CC);
+ break;
+ case AMDGPU_VM_MTYPE_UC:
+ *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_UC);
+ break;
+ }
- *flags &= ~AMDGPU_PTE_NOALLOC;
- *flags |= (mapping->flags & AMDGPU_PTE_NOALLOC);
+ if (vm_flags & AMDGPU_VM_PAGE_NOALLOC)
+ *flags |= AMDGPU_PTE_NOALLOC;
+ else
+ *flags &= ~AMDGPU_PTE_NOALLOC;
- if (mapping->flags & AMDGPU_PTE_PRT) {
+ if (vm_flags & AMDGPU_VM_PAGE_PRT) {
*flags |= AMDGPU_PTE_PRT;
*flags |= AMDGPU_PTE_SNOOPED;
*flags |= AMDGPU_PTE_LOG;
@@ -527,7 +527,6 @@ static const struct amdgpu_gmc_funcs gmc_v11_0_gmc_funcs = {
.flush_gpu_tlb_pasid = gmc_v11_0_flush_gpu_tlb_pasid,
.emit_flush_gpu_tlb = gmc_v11_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v11_0_emit_pasid_mapping,
- .map_mtype = gmc_v11_0_map_mtype,
.get_vm_pde = gmc_v11_0_get_vm_pde,
.get_vm_pte = gmc_v11_0_get_vm_pte,
.get_vbios_fb_size = gmc_v11_0_get_vbios_fb_size,
@@ -906,8 +905,7 @@ static int gmc_v11_0_gart_enable(struct amdgpu_device *adev)
/* Flush HDP after it is initialized */
amdgpu_device_flush_hdp(adev, NULL);
- value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
- false : true;
+ value = amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS;
adev->mmhub.funcs->set_fault_enable_default(adev, value);
gmc_v11_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB0(0), 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
index de763105fdfd..f4a19357ccbc 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
@@ -336,6 +336,22 @@ static void gmc_v12_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
uint16_t queried;
int vmid, i;
+ if (adev->enable_uni_mes && adev->mes.ring[AMDGPU_MES_SCHED_PIPE].sched.ready &&
+ (adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x84) {
+ struct mes_inv_tlbs_pasid_input input = {0};
+ input.pasid = pasid;
+ input.flush_type = flush_type;
+ input.hub_id = AMDGPU_GFXHUB(0);
+ /* MES will invalidate all gc_hub for the device from master */
+ adev->mes.funcs->invalidate_tlbs_pasid(&adev->mes, &input);
+ if (all_hub) {
+ /* Only need to invalidate mm_hub now, gfx12 only support one mmhub */
+ input.hub_id = AMDGPU_MMHUB0(0);
+ adev->mes.funcs->invalidate_tlbs_pasid(&adev->mes, &input);
+ }
+ return;
+ }
+
for (vmid = 1; vmid < 16; vmid++) {
bool valid;
@@ -453,20 +469,6 @@ static void gmc_v12_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid
* 0 valid
*/
-static uint64_t gmc_v12_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
-{
- switch (flags) {
- case AMDGPU_VM_MTYPE_DEFAULT:
- return AMDGPU_PTE_MTYPE_GFX12(0ULL, MTYPE_NC);
- case AMDGPU_VM_MTYPE_NC:
- return AMDGPU_PTE_MTYPE_GFX12(0ULL, MTYPE_NC);
- case AMDGPU_VM_MTYPE_UC:
- return AMDGPU_PTE_MTYPE_GFX12(0ULL, MTYPE_UC);
- default:
- return AMDGPU_PTE_MTYPE_GFX12(0ULL, MTYPE_NC);
- }
-}
-
static void gmc_v12_0_get_vm_pde(struct amdgpu_device *adev, int level,
uint64_t *addr, uint64_t *flags)
{
@@ -490,18 +492,35 @@ static void gmc_v12_0_get_vm_pde(struct amdgpu_device *adev, int level,
}
static void gmc_v12_0_get_vm_pte(struct amdgpu_device *adev,
- struct amdgpu_bo_va_mapping *mapping,
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo,
+ uint32_t vm_flags,
uint64_t *flags)
{
- struct amdgpu_bo *bo = mapping->bo_va->base.bo;
+ if (vm_flags & AMDGPU_VM_PAGE_EXECUTABLE)
+ *flags |= AMDGPU_PTE_EXECUTABLE;
+ else
+ *flags &= ~AMDGPU_PTE_EXECUTABLE;
- *flags &= ~AMDGPU_PTE_EXECUTABLE;
- *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
+ switch (vm_flags & AMDGPU_VM_MTYPE_MASK) {
+ case AMDGPU_VM_MTYPE_DEFAULT:
+ *flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_NC);
+ break;
+ case AMDGPU_VM_MTYPE_NC:
+ default:
+ *flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_NC);
+ break;
+ case AMDGPU_VM_MTYPE_UC:
+ *flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_UC);
+ break;
+ }
- *flags &= ~AMDGPU_PTE_MTYPE_GFX12_MASK;
- *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_GFX12_MASK);
+ if (vm_flags & AMDGPU_VM_PAGE_NOALLOC)
+ *flags |= AMDGPU_PTE_NOALLOC;
+ else
+ *flags &= ~AMDGPU_PTE_NOALLOC;
- if (mapping->flags & AMDGPU_PTE_PRT_GFX12) {
+ if (vm_flags & AMDGPU_VM_PAGE_PRT) {
*flags |= AMDGPU_PTE_PRT_GFX12;
*flags |= AMDGPU_PTE_SNOOPED;
*flags |= AMDGPU_PTE_SYSTEM;
@@ -543,7 +562,6 @@ static const struct amdgpu_gmc_funcs gmc_v12_0_gmc_funcs = {
.flush_gpu_tlb_pasid = gmc_v12_0_flush_gpu_tlb_pasid,
.emit_flush_gpu_tlb = gmc_v12_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v12_0_emit_pasid_mapping,
- .map_mtype = gmc_v12_0_map_mtype,
.get_vm_pde = gmc_v12_0_get_vm_pde,
.get_vm_pte = gmc_v12_0_get_vm_pte,
.get_vbios_fb_size = gmc_v12_0_get_vbios_fb_size,
@@ -876,8 +894,7 @@ static int gmc_v12_0_gart_enable(struct amdgpu_device *adev)
/* Flush HDP after it is initialized */
amdgpu_device_flush_hdp(adev, NULL);
- value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
- false : true;
+ value = amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS;
adev->mmhub.funcs->set_fault_enable_default(adev, value);
gmc_v12_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB0(0), 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 8030fcd64210..f6ad7911f1e6 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -382,7 +382,9 @@ static void gmc_v6_0_get_vm_pde(struct amdgpu_device *adev, int level,
}
static void gmc_v6_0_get_vm_pte(struct amdgpu_device *adev,
- struct amdgpu_bo_va_mapping *mapping,
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo,
+ uint32_t vm_flags,
uint64_t *flags)
{
*flags &= ~AMDGPU_PTE_EXECUTABLE;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index a8d5795084fc..93d7ccb7d013 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -504,7 +504,9 @@ static void gmc_v7_0_get_vm_pde(struct amdgpu_device *adev, int level,
}
static void gmc_v7_0_get_vm_pte(struct amdgpu_device *adev,
- struct amdgpu_bo_va_mapping *mapping,
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo,
+ uint32_t vm_flags,
uint64_t *flags)
{
*flags &= ~AMDGPU_PTE_EXECUTABLE;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index b45fa0cea9d2..c5e2a2c41e06 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -716,11 +716,15 @@ static void gmc_v8_0_get_vm_pde(struct amdgpu_device *adev, int level,
}
static void gmc_v8_0_get_vm_pte(struct amdgpu_device *adev,
- struct amdgpu_bo_va_mapping *mapping,
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo,
+ uint32_t vm_flags,
uint64_t *flags)
{
- *flags &= ~AMDGPU_PTE_EXECUTABLE;
- *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
+ if (vm_flags & AMDGPU_VM_PAGE_EXECUTABLE)
+ *flags |= AMDGPU_PTE_EXECUTABLE;
+ else
+ *flags &= ~AMDGPU_PTE_EXECUTABLE;
*flags &= ~AMDGPU_PTE_PRT;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index c4d69cf4e06c..0d1dd587db5f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -1073,27 +1073,6 @@ static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int v
* 0 valid
*/
-static uint64_t gmc_v9_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
-
-{
- switch (flags) {
- case AMDGPU_VM_MTYPE_DEFAULT:
- return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_NC);
- case AMDGPU_VM_MTYPE_NC:
- return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_NC);
- case AMDGPU_VM_MTYPE_WC:
- return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_WC);
- case AMDGPU_VM_MTYPE_RW:
- return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_RW);
- case AMDGPU_VM_MTYPE_CC:
- return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_CC);
- case AMDGPU_VM_MTYPE_UC:
- return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_UC);
- default:
- return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_NC);
- }
-}
-
static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
uint64_t *addr, uint64_t *flags)
{
@@ -1123,6 +1102,7 @@ static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct amdgpu_bo *bo,
+ uint32_t vm_flags,
uint64_t *flags)
{
struct amdgpu_device *bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
@@ -1236,25 +1216,43 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
}
static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
- struct amdgpu_bo_va_mapping *mapping,
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo,
+ uint32_t vm_flags,
uint64_t *flags)
{
- struct amdgpu_bo *bo = mapping->bo_va->base.bo;
-
- *flags &= ~AMDGPU_PTE_EXECUTABLE;
- *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
+ if (vm_flags & AMDGPU_VM_PAGE_EXECUTABLE)
+ *flags |= AMDGPU_PTE_EXECUTABLE;
+ else
+ *flags &= ~AMDGPU_PTE_EXECUTABLE;
- *flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
- *flags |= mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK;
+ switch (vm_flags & AMDGPU_VM_MTYPE_MASK) {
+ case AMDGPU_VM_MTYPE_DEFAULT:
+ case AMDGPU_VM_MTYPE_NC:
+ default:
+ *flags = AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_NC);
+ break;
+ case AMDGPU_VM_MTYPE_WC:
+ *flags |= AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_WC);
+ break;
+ case AMDGPU_VM_MTYPE_RW:
+ *flags |= AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_RW);
+ break;
+ case AMDGPU_VM_MTYPE_CC:
+ *flags |= AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_CC);
+ break;
+ case AMDGPU_VM_MTYPE_UC:
+ *flags |= AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_UC);
+ break;
+ }
- if (mapping->flags & AMDGPU_PTE_PRT) {
+ if (vm_flags & AMDGPU_VM_PAGE_PRT) {
*flags |= AMDGPU_PTE_PRT;
*flags &= ~AMDGPU_PTE_VALID;
}
if ((*flags & AMDGPU_PTE_VALID) && bo)
- gmc_v9_0_get_coherence_flags(adev, mapping->bo_va->base.vm, bo,
- flags);
+ gmc_v9_0_get_coherence_flags(adev, vm, bo, vm_flags, flags);
}
static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev,
@@ -1391,7 +1389,6 @@ static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
.flush_gpu_tlb_pasid = gmc_v9_0_flush_gpu_tlb_pasid,
.emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
- .map_mtype = gmc_v9_0_map_mtype,
.get_vm_pde = gmc_v9_0_get_vm_pde,
.get_vm_pte = gmc_v9_0_get_vm_pte,
.override_vm_pte_flags = gmc_v9_0_override_vm_pte_flags,
@@ -1837,11 +1834,19 @@ static void gmc_v9_0_save_registers(struct amdgpu_device *adev)
static void gmc_v9_4_3_init_vram_info(struct amdgpu_device *adev)
{
+ static const u32 regBIF_BIOS_SCRATCH_4 = 0x50;
+ u32 vram_info;
+
adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM;
adev->gmc.vram_width = 128 * 64;
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM3E;
+
+ if (!(adev->flags & AMD_IS_APU) && !amdgpu_sriov_vf(adev)) {
+ vram_info = RREG32(regBIF_BIOS_SCRATCH_4);
+ adev->gmc.vram_vendor = vram_info & 0xF;
+ }
}
static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
index 5900b560b7de..333e9c30c091 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
@@ -587,8 +587,7 @@ static int ih_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
/* use gpu virtual address for ih ring
* until ih_checken is programmed to allow
* use bus address for ih ring by psp bl */
- use_bus_addr =
- (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) ? false : true;
+ use_bus_addr = adev->firmware.load_type != AMDGPU_FW_LOAD_PSP;
r = amdgpu_ih_ring_init(adev, &adev->irq.ih, IH_RING_SIZE, use_bus_addr);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c
index 068ed849dbad..95b3f4e55ec3 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c
@@ -562,8 +562,7 @@ static int ih_v6_1_sw_init(struct amdgpu_ip_block *ip_block)
/* use gpu virtual address for ih ring
* until ih_checken is programmed to allow
* use bus address for ih ring by psp bl */
- use_bus_addr =
- (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) ? false : true;
+ use_bus_addr = adev->firmware.load_type != AMDGPU_FW_LOAD_PSP;
r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, use_bus_addr);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c
index 40a3530e0453..b32ea4129c61 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c
@@ -552,8 +552,7 @@ static int ih_v7_0_sw_init(struct amdgpu_ip_block *ip_block)
/* use gpu virtual address for ih ring
* until ih_checken is programmed to allow
* use bus address for ih ring by psp bl */
- use_bus_addr =
- (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) ? false : true;
+ use_bus_addr = adev->firmware.load_type != AMDGPU_FW_LOAD_PSP;
r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, use_bus_addr);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c
index a887df520414..4258d3e0b706 100644
--- a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c
@@ -29,6 +29,8 @@
#include "amdgpu.h"
#include "isp_v4_1_1.h"
+MODULE_FIRMWARE("amdgpu/isp_4_1_1.bin");
+
#define ISP_PERFORMANCE_STATE_LOW 0
#define ISP_PERFORMANCE_STATE_HIGH 1
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
index 9e428e669ada..b5bb7f4d607c 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
@@ -557,7 +557,7 @@ static const struct amdgpu_ring_funcs jpeg_v1_0_decode_ring_vm_funcs = {
.nop = PACKET0(0x81ff, 0),
.support_64bit_ptrs = false,
.no_user_fence = true,
- .extra_dw = 64,
+ .extra_bytes = 256,
.get_rptr = jpeg_v1_0_decode_ring_get_rptr,
.get_wptr = jpeg_v1_0_decode_ring_get_wptr,
.set_wptr = jpeg_v1_0_decode_ring_set_wptr,
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
index 58239c405fda..27c76bd424cf 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
@@ -23,7 +23,6 @@
#include "amdgpu.h"
#include "amdgpu_jpeg.h"
-#include "amdgpu_cs.h"
#include "amdgpu_pm.h"
#include "soc15.h"
#include "soc15d.h"
@@ -806,7 +805,7 @@ static const struct amdgpu_ring_funcs jpeg_v2_0_dec_ring_vm_funcs = {
.get_rptr = jpeg_v2_0_dec_ring_get_rptr,
.get_wptr = jpeg_v2_0_dec_ring_get_wptr,
.set_wptr = jpeg_v2_0_dec_ring_set_wptr,
- .parse_cs = jpeg_v2_dec_ring_parse_cs,
+ .parse_cs = amdgpu_jpeg_dec_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
@@ -854,58 +853,3 @@ const struct amdgpu_ip_block_version jpeg_v2_0_ip_block = {
.rev = 0,
.funcs = &jpeg_v2_0_ip_funcs,
};
-
-/**
- * jpeg_v2_dec_ring_parse_cs - command submission parser
- *
- * @parser: Command submission parser context
- * @job: the job to parse
- * @ib: the IB to parse
- *
- * Parse the command stream, return -EINVAL for invalid packet,
- * 0 otherwise
- */
-int jpeg_v2_dec_ring_parse_cs(struct amdgpu_cs_parser *parser,
- struct amdgpu_job *job,
- struct amdgpu_ib *ib)
-{
- u32 i, reg, res, cond, type;
- struct amdgpu_device *adev = parser->adev;
-
- for (i = 0; i < ib->length_dw ; i += 2) {
- reg = CP_PACKETJ_GET_REG(ib->ptr[i]);
- res = CP_PACKETJ_GET_RES(ib->ptr[i]);
- cond = CP_PACKETJ_GET_COND(ib->ptr[i]);
- type = CP_PACKETJ_GET_TYPE(ib->ptr[i]);
-
- if (res) /* only support 0 at the moment */
- return -EINVAL;
-
- switch (type) {
- case PACKETJ_TYPE0:
- if (cond != PACKETJ_CONDITION_CHECK0 || reg < JPEG_REG_RANGE_START ||
- reg > JPEG_REG_RANGE_END) {
- dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
- return -EINVAL;
- }
- break;
- case PACKETJ_TYPE3:
- if (cond != PACKETJ_CONDITION_CHECK3 || reg < JPEG_REG_RANGE_START ||
- reg > JPEG_REG_RANGE_END) {
- dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
- return -EINVAL;
- }
- break;
- case PACKETJ_TYPE6:
- if (ib->ptr[i] == CP_PACKETJ_NOP)
- continue;
- dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
- return -EINVAL;
- default:
- dev_err(adev->dev, "Unknown packet type %d !\n", type);
- return -EINVAL;
- }
- }
-
- return 0;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h
index 63fadda7a673..654e43e83e2c 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h
@@ -45,9 +45,6 @@
#define JRBC_DEC_EXTERNAL_REG_WRITE_ADDR 0x18000
-#define JPEG_REG_RANGE_START 0x4000
-#define JPEG_REG_RANGE_END 0x41c2
-
void jpeg_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring);
void jpeg_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring);
void jpeg_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
@@ -60,9 +57,6 @@ void jpeg_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr);
void jpeg_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
void jpeg_v2_0_dec_ring_nop(struct amdgpu_ring *ring, uint32_t count);
-int jpeg_v2_dec_ring_parse_cs(struct amdgpu_cs_parser *parser,
- struct amdgpu_job *job,
- struct amdgpu_ib *ib);
extern const struct amdgpu_ip_block_version jpeg_v2_0_ip_block;
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
index 3e2c389242db..20983f126b49 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
@@ -696,7 +696,7 @@ static const struct amdgpu_ring_funcs jpeg_v2_5_dec_ring_vm_funcs = {
.get_rptr = jpeg_v2_5_dec_ring_get_rptr,
.get_wptr = jpeg_v2_5_dec_ring_get_wptr,
.set_wptr = jpeg_v2_5_dec_ring_set_wptr,
- .parse_cs = jpeg_v2_dec_ring_parse_cs,
+ .parse_cs = amdgpu_jpeg_dec_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
@@ -727,7 +727,7 @@ static const struct amdgpu_ring_funcs jpeg_v2_6_dec_ring_vm_funcs = {
.get_rptr = jpeg_v2_5_dec_ring_get_rptr,
.get_wptr = jpeg_v2_5_dec_ring_get_wptr,
.set_wptr = jpeg_v2_5_dec_ring_set_wptr,
- .parse_cs = jpeg_v2_dec_ring_parse_cs,
+ .parse_cs = amdgpu_jpeg_dec_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
index a44eb2667664..d1a011c40ba2 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
@@ -597,7 +597,7 @@ static const struct amdgpu_ring_funcs jpeg_v3_0_dec_ring_vm_funcs = {
.get_rptr = jpeg_v3_0_dec_ring_get_rptr,
.get_wptr = jpeg_v3_0_dec_ring_get_wptr,
.set_wptr = jpeg_v3_0_dec_ring_set_wptr,
- .parse_cs = jpeg_v2_dec_ring_parse_cs,
+ .parse_cs = amdgpu_jpeg_dec_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
index da3ee69f1a3b..33db2c1ae6cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
@@ -762,7 +762,7 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_dec_ring_vm_funcs = {
.get_rptr = jpeg_v4_0_dec_ring_get_rptr,
.get_wptr = jpeg_v4_0_dec_ring_get_wptr,
.set_wptr = jpeg_v4_0_dec_ring_set_wptr,
- .parse_cs = jpeg_v2_dec_ring_parse_cs,
+ .parse_cs = amdgpu_jpeg_dec_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
index b86288a69e7b..aae7328973d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
@@ -444,7 +444,7 @@ static int jpeg_v4_0_3_hw_fini(struct amdgpu_ip_block *ip_block)
ret = jpeg_v4_0_3_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
}
- if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG))
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG) && !amdgpu_sriov_vf(adev))
amdgpu_irq_put(adev, &adev->jpeg.inst->ras_poison_irq, 0);
return ret;
@@ -1177,7 +1177,7 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_3_dec_ring_vm_funcs = {
.get_rptr = jpeg_v4_0_3_dec_ring_get_rptr,
.get_wptr = jpeg_v4_0_3_dec_ring_get_wptr,
.set_wptr = jpeg_v4_0_3_dec_ring_set_wptr,
- .parse_cs = jpeg_v2_dec_ring_parse_cs,
+ .parse_cs = amdgpu_jpeg_dec_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
index 481d1a2dbe5a..54fd9c800c40 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
@@ -686,7 +686,7 @@ static int jpeg_v4_0_5_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = ip_block->adev;
- bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ bool enable = state == AMD_CG_STATE_GATE;
int i;
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
@@ -807,7 +807,7 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_5_dec_ring_vm_funcs = {
.get_rptr = jpeg_v4_0_5_dec_ring_get_rptr,
.get_wptr = jpeg_v4_0_5_dec_ring_get_wptr,
.set_wptr = jpeg_v4_0_5_dec_ring_set_wptr,
- .parse_cs = jpeg_v2_dec_ring_parse_cs,
+ .parse_cs = amdgpu_jpeg_dec_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
index e0a71909252b..46bf15dce2bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
@@ -584,7 +584,7 @@ static int jpeg_v5_0_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = ip_block->adev;
- bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ bool enable = state == AMD_CG_STATE_GATE;
if (enable) {
if (!jpeg_v5_0_0_is_idle(ip_block))
@@ -683,7 +683,7 @@ static const struct amdgpu_ring_funcs jpeg_v5_0_0_dec_ring_vm_funcs = {
.get_rptr = jpeg_v5_0_0_dec_ring_get_rptr,
.get_wptr = jpeg_v5_0_0_dec_ring_get_wptr,
.set_wptr = jpeg_v5_0_0_dec_ring_set_wptr,
- .parse_cs = jpeg_v2_dec_ring_parse_cs,
+ .parse_cs = amdgpu_jpeg_dec_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
index 54523dc1f702..baf097d2e1ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
@@ -196,6 +196,14 @@ static int jpeg_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block)
}
}
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG)) {
+ r = amdgpu_jpeg_ras_sw_init(adev);
+ if (r) {
+ dev_err(adev->dev, "Failed to initialize jpeg ras block!\n");
+ return r;
+ }
+ }
+
r = amdgpu_jpeg_reg_dump_init(adev, jpeg_reg_list_5_0_1, ARRAY_SIZE(jpeg_reg_list_5_0_1));
if (r)
return r;
@@ -307,7 +315,7 @@ static int jpeg_v5_0_1_hw_fini(struct amdgpu_ip_block *ip_block)
ret = jpeg_v5_0_1_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
}
- if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG))
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG) && !amdgpu_sriov_vf(adev))
amdgpu_irq_put(adev, &adev->jpeg.inst->ras_poison_irq, 0);
return ret;
@@ -689,7 +697,7 @@ static int jpeg_v5_0_1_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = ip_block->adev;
- bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ bool enable = state == AMD_CG_STATE_GATE;
int i;
@@ -1016,8 +1024,9 @@ static int jpeg_v5_0_1_aca_bank_parser(struct aca_handle *handle, struct aca_ban
/* reference to smu driver if header file */
static int jpeg_v5_0_1_err_codes[] = {
- 16, 17, 18, 19, 20, 21, 22, 23, /* JPEG[0-7][S|D] */
- 24, 25, 26, 27, 28, 29, 30, 31
+ 16, 17, 18, 19, 20, 21, 22, 23, /* JPEG[0-9][S|D] */
+ 24, 25, 26, 27, 28, 29, 30, 31,
+ 48, 49, 50, 51,
};
static bool jpeg_v5_0_1_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank,
@@ -1058,6 +1067,11 @@ static int jpeg_v5_0_1_ras_late_init(struct amdgpu_device *adev, struct ras_comm
if (r)
return r;
+ r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__JPEG,
+ &jpeg_v5_0_1_aca_info, NULL);
+ if (r)
+ goto late_fini;
+
if (amdgpu_ras_is_supported(adev, ras_block->block) &&
adev->jpeg.inst->ras_poison_irq.funcs) {
r = amdgpu_irq_get(adev, &adev->jpeg.inst->ras_poison_irq, 0);
@@ -1065,11 +1079,6 @@ static int jpeg_v5_0_1_ras_late_init(struct amdgpu_device *adev, struct ras_comm
goto late_fini;
}
- r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__JPEG,
- &jpeg_v5_0_1_aca_info, NULL);
- if (r)
- goto late_fini;
-
return 0;
late_fini:
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c
index d6f50b13e2ba..2db9b2c63693 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c
@@ -21,6 +21,7 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
+#include <drm/drm_drv.h>
#include "amdgpu.h"
#include "amdgpu_gfx.h"
#include "mes_userqueue.h"
@@ -198,6 +199,53 @@ static int mes_userq_create_ctx_space(struct amdgpu_userq_mgr *uq_mgr,
return 0;
}
+static int mes_userq_detect_and_reset(struct amdgpu_device *adev,
+ int queue_type)
+{
+ int db_array_size = amdgpu_mes_get_hung_queue_db_array_size(adev);
+ struct mes_detect_and_reset_queue_input input;
+ struct amdgpu_usermode_queue *queue;
+ struct amdgpu_userq_mgr *uqm, *tmp;
+ unsigned int hung_db_num = 0;
+ int queue_id, r, i;
+ u32 db_array[4];
+
+ if (db_array_size > 4) {
+ dev_err(adev->dev, "DB array size (%d vs 4) too small\n",
+ db_array_size);
+ return -EINVAL;
+ }
+
+ memset(&input, 0x0, sizeof(struct mes_detect_and_reset_queue_input));
+
+ input.queue_type = queue_type;
+
+ amdgpu_mes_lock(&adev->mes);
+ r = amdgpu_mes_detect_and_reset_hung_queues(adev, queue_type, false,
+ &hung_db_num, db_array);
+ amdgpu_mes_unlock(&adev->mes);
+ if (r) {
+ dev_err(adev->dev, "Failed to detect and reset queues, err (%d)\n", r);
+ } else if (hung_db_num) {
+ list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
+ idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
+ if (queue->queue_type == queue_type) {
+ for (i = 0; i < hung_db_num; i++) {
+ if (queue->doorbell_index == db_array[i]) {
+ queue->state = AMDGPU_USERQ_STATE_HUNG;
+ atomic_inc(&adev->gpu_reset_counter);
+ amdgpu_userq_fence_driver_force_completion(queue);
+ drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE, NULL);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ return r;
+}
+
static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
struct drm_amdgpu_userq_in *args_in,
struct amdgpu_usermode_queue *queue)
@@ -206,6 +254,7 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
struct amdgpu_mqd *mqd_hw_default = &adev->mqds[queue->queue_type];
struct drm_amdgpu_userq_in *mqd_user = args_in;
struct amdgpu_mqd_prop *userq_props;
+ struct amdgpu_gfx_shadow_info shadow_info;
int r;
/* Structure to initialize MQD for userqueue using generic MQD init function */
@@ -215,13 +264,6 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
return -ENOMEM;
}
- if (!mqd_user->wptr_va || !mqd_user->rptr_va ||
- !mqd_user->queue_va || mqd_user->queue_size == 0) {
- DRM_ERROR("Invalid MQD parameters for userqueue\n");
- r = -EINVAL;
- goto free_props;
- }
-
r = amdgpu_userq_create_object(uq_mgr, &queue->mqd, mqd_hw_default->mqd_size);
if (r) {
DRM_ERROR("Failed to create MQD object for userqueue\n");
@@ -238,6 +280,8 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
userq_props->doorbell_index = queue->doorbell_index;
userq_props->fence_address = queue->fence_drv->gpu_addr;
+ if (adev->gfx.funcs->get_gfx_shadow_info)
+ adev->gfx.funcs->get_gfx_shadow_info(adev, &shadow_info, true);
if (queue->queue_type == AMDGPU_HW_IP_COMPUTE) {
struct drm_amdgpu_userq_mqd_compute_gfx11 *compute_mqd;
@@ -254,6 +298,10 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
goto free_mqd;
}
+ if (amdgpu_userq_input_va_validate(queue->vm, compute_mqd->eop_va,
+ max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE)))
+ goto free_mqd;
+
userq_props->eop_gpu_addr = compute_mqd->eop_va;
userq_props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL;
userq_props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM;
@@ -281,6 +329,11 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
userq_props->csa_addr = mqd_gfx_v11->csa_va;
userq_props->tmz_queue =
mqd_user->flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE;
+
+ if (amdgpu_userq_input_va_validate(queue->vm, mqd_gfx_v11->shadow_va,
+ shadow_info.shadow_size))
+ goto free_mqd;
+
kfree(mqd_gfx_v11);
} else if (queue->queue_type == AMDGPU_HW_IP_DMA) {
struct drm_amdgpu_userq_mqd_sdma_gfx11 *mqd_sdma_v11;
@@ -298,6 +351,10 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
goto free_mqd;
}
+ if (amdgpu_userq_input_va_validate(queue->vm, mqd_sdma_v11->csa_va,
+ shadow_info.csa_size))
+ goto free_mqd;
+
userq_props->csa_addr = mqd_sdma_v11->csa_va;
kfree(mqd_sdma_v11);
}
@@ -347,9 +404,82 @@ mes_userq_mqd_destroy(struct amdgpu_userq_mgr *uq_mgr,
amdgpu_userq_destroy_object(uq_mgr, &queue->mqd);
}
+static int mes_userq_preempt(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ struct mes_suspend_gang_input queue_input;
+ struct amdgpu_userq_obj *ctx = &queue->fw_obj;
+ signed long timeout = 2100000; /* 2100 ms */
+ u64 fence_gpu_addr;
+ u32 fence_offset;
+ u64 *fence_ptr;
+ int i, r;
+
+ if (queue->state != AMDGPU_USERQ_STATE_MAPPED)
+ return 0;
+ r = amdgpu_device_wb_get(adev, &fence_offset);
+ if (r)
+ return r;
+
+ fence_gpu_addr = adev->wb.gpu_addr + (fence_offset * 4);
+ fence_ptr = (u64 *)&adev->wb.wb[fence_offset];
+ *fence_ptr = 0;
+
+ memset(&queue_input, 0x0, sizeof(struct mes_suspend_gang_input));
+ queue_input.gang_context_addr = ctx->gpu_addr + AMDGPU_USERQ_PROC_CTX_SZ;
+ queue_input.suspend_fence_addr = fence_gpu_addr;
+ queue_input.suspend_fence_value = 1;
+ amdgpu_mes_lock(&adev->mes);
+ r = adev->mes.funcs->suspend_gang(&adev->mes, &queue_input);
+ amdgpu_mes_unlock(&adev->mes);
+ if (r) {
+ DRM_ERROR("Failed to suspend gang: %d\n", r);
+ goto out;
+ }
+
+ for (i = 0; i < timeout; i++) {
+ if (*fence_ptr == 1)
+ goto out;
+ udelay(1);
+ }
+ r = -ETIMEDOUT;
+
+out:
+ amdgpu_device_wb_free(adev, fence_offset);
+ return r;
+}
+
+static int mes_userq_restore(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ struct mes_resume_gang_input queue_input;
+ struct amdgpu_userq_obj *ctx = &queue->fw_obj;
+ int r;
+
+ if (queue->state == AMDGPU_USERQ_STATE_HUNG)
+ return -EINVAL;
+ if (queue->state != AMDGPU_USERQ_STATE_PREEMPTED)
+ return 0;
+
+ memset(&queue_input, 0x0, sizeof(struct mes_resume_gang_input));
+ queue_input.gang_context_addr = ctx->gpu_addr + AMDGPU_USERQ_PROC_CTX_SZ;
+
+ amdgpu_mes_lock(&adev->mes);
+ r = adev->mes.funcs->resume_gang(&adev->mes, &queue_input);
+ amdgpu_mes_unlock(&adev->mes);
+ if (r)
+ dev_err(adev->dev, "Failed to resume queue, err (%d)\n", r);
+ return r;
+}
+
const struct amdgpu_userq_funcs userq_mes_funcs = {
.mqd_create = mes_userq_mqd_create,
.mqd_destroy = mes_userq_mqd_destroy,
.unmap = mes_userq_unmap,
.map = mes_userq_map,
+ .detect_and_reset = mes_userq_detect_and_reset,
+ .preempt = mes_userq_preempt,
+ .restore = mes_userq_restore,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index 28eb846280dd..e82188431f79 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -66,6 +66,8 @@ static int mes_v11_0_kiq_hw_fini(struct amdgpu_device *adev);
#define GFX_MES_DRAM_SIZE 0x80000
#define MES11_HW_RESOURCE_1_SIZE (128 * AMDGPU_GPU_PAGE_SIZE)
+#define MES11_HUNG_DB_OFFSET_ARRAY_SIZE 4
+
static void mes_v11_0_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
@@ -641,8 +643,9 @@ static int mes_v11_0_misc_op(struct amdgpu_mes *mes,
break;
case MES_MISC_OP_CHANGE_CONFIG:
if ((mes->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) < 0x63) {
- dev_err(mes->adev->dev, "MES FW version must be larger than 0x63 to support limit single process feature.\n");
- return -EINVAL;
+ dev_warn_once(mes->adev->dev,
+ "MES FW version must be larger than 0x63 to support limit single process feature.\n");
+ return 0;
}
misc_pkt.opcode = MESAPI_MISC__CHANGE_CONFIG;
misc_pkt.change_config.opcode =
@@ -710,6 +713,12 @@ static int mes_v11_0_set_hw_resources(struct amdgpu_mes *mes)
mes_set_hw_res_pkt.enable_reg_active_poll = 1;
mes_set_hw_res_pkt.enable_level_process_quantum_check = 1;
mes_set_hw_res_pkt.oversubscription_timer = 50;
+ if ((mes->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x7f)
+ mes_set_hw_res_pkt.enable_lr_compute_wa = 1;
+ else
+ dev_info_once(mes->adev->dev,
+ "MES FW version must be >= 0x7f to enable LR compute workaround.\n");
+
if (amdgpu_mes_log_enable) {
mes_set_hw_res_pkt.enable_mes_event_int_logging = 1;
mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr =
@@ -783,6 +792,32 @@ static int mes_v11_0_reset_hw_queue(struct amdgpu_mes *mes,
offsetof(union MESAPI__RESET, api_status));
}
+static int mes_v11_0_detect_and_reset_hung_queues(struct amdgpu_mes *mes,
+ struct mes_detect_and_reset_queue_input *input)
+{
+ union MESAPI__RESET mes_reset_queue_pkt;
+
+ memset(&mes_reset_queue_pkt, 0, sizeof(mes_reset_queue_pkt));
+
+ mes_reset_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
+ mes_reset_queue_pkt.header.opcode = MES_SCH_API_RESET;
+ mes_reset_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
+
+ mes_reset_queue_pkt.queue_type =
+ convert_to_mes_queue_type(input->queue_type);
+ mes_reset_queue_pkt.doorbell_offset_addr =
+ mes->hung_queue_db_array_gpu_addr;
+
+ if (input->detect_only)
+ mes_reset_queue_pkt.hang_detect_only = 1;
+ else
+ mes_reset_queue_pkt.hang_detect_then_reset = 1;
+
+ return mes_v11_0_submit_pkt_and_poll_completion(mes,
+ &mes_reset_queue_pkt, sizeof(mes_reset_queue_pkt),
+ offsetof(union MESAPI__RESET, api_status));
+}
+
static const struct amdgpu_mes_funcs mes_v11_0_funcs = {
.add_hw_queue = mes_v11_0_add_hw_queue,
.remove_hw_queue = mes_v11_0_remove_hw_queue,
@@ -792,6 +827,7 @@ static const struct amdgpu_mes_funcs mes_v11_0_funcs = {
.resume_gang = mes_v11_0_resume_gang,
.misc_op = mes_v11_0_misc_op,
.reset_hw_queue = mes_v11_0_reset_hw_queue,
+ .detect_and_reset_hung_queues = mes_v11_0_detect_and_reset_hung_queues,
};
static int mes_v11_0_allocate_ucode_buffer(struct amdgpu_device *adev,
@@ -1684,6 +1720,8 @@ static int mes_v11_0_early_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
int pipe, r;
+ adev->mes.hung_queue_db_array_size =
+ MES11_HUNG_DB_OFFSET_ARRAY_SIZE;
for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
if (!adev->enable_mes_kiq && pipe == AMDGPU_MES_KIQ_PIPE)
continue;
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
index 6b222630f3fa..aff06f06aeee 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
@@ -47,6 +47,8 @@ static int mes_v12_0_kiq_hw_fini(struct amdgpu_device *adev);
#define MES_EOP_SIZE 2048
+#define MES12_HUNG_DB_OFFSET_ARRAY_SIZE 4
+
static void mes_v12_0_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
@@ -108,6 +110,7 @@ static const char *mes_v12_0_opcodes[] = {
"SET_SE_MODE",
"SET_GANG_SUBMIT",
"SET_HW_RSRC_1",
+ "INVALIDATE_TLBS",
};
static const char *mes_v12_0_misc_opcodes[] = {
@@ -567,13 +570,41 @@ static int mes_v12_0_unmap_legacy_queue(struct amdgpu_mes *mes,
static int mes_v12_0_suspend_gang(struct amdgpu_mes *mes,
struct mes_suspend_gang_input *input)
{
- return 0;
+ union MESAPI__SUSPEND mes_suspend_gang_pkt;
+
+ memset(&mes_suspend_gang_pkt, 0, sizeof(mes_suspend_gang_pkt));
+
+ mes_suspend_gang_pkt.header.type = MES_API_TYPE_SCHEDULER;
+ mes_suspend_gang_pkt.header.opcode = MES_SCH_API_SUSPEND;
+ mes_suspend_gang_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
+
+ mes_suspend_gang_pkt.suspend_all_gangs = input->suspend_all_gangs;
+ mes_suspend_gang_pkt.gang_context_addr = input->gang_context_addr;
+ mes_suspend_gang_pkt.suspend_fence_addr = input->suspend_fence_addr;
+ mes_suspend_gang_pkt.suspend_fence_value = input->suspend_fence_value;
+
+ return mes_v12_0_submit_pkt_and_poll_completion(mes, AMDGPU_MES_SCHED_PIPE,
+ &mes_suspend_gang_pkt, sizeof(mes_suspend_gang_pkt),
+ offsetof(union MESAPI__SUSPEND, api_status));
}
static int mes_v12_0_resume_gang(struct amdgpu_mes *mes,
struct mes_resume_gang_input *input)
{
- return 0;
+ union MESAPI__RESUME mes_resume_gang_pkt;
+
+ memset(&mes_resume_gang_pkt, 0, sizeof(mes_resume_gang_pkt));
+
+ mes_resume_gang_pkt.header.type = MES_API_TYPE_SCHEDULER;
+ mes_resume_gang_pkt.header.opcode = MES_SCH_API_RESUME;
+ mes_resume_gang_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
+
+ mes_resume_gang_pkt.resume_all_gangs = input->resume_all_gangs;
+ mes_resume_gang_pkt.gang_context_addr = input->gang_context_addr;
+
+ return mes_v12_0_submit_pkt_and_poll_completion(mes, AMDGPU_MES_SCHED_PIPE,
+ &mes_resume_gang_pkt, sizeof(mes_resume_gang_pkt),
+ offsetof(union MESAPI__RESUME, api_status));
}
static int mes_v12_0_query_sched_status(struct amdgpu_mes *mes, int pipe)
@@ -738,6 +769,11 @@ static int mes_v12_0_set_hw_resources(struct amdgpu_mes *mes, int pipe)
mes_set_hw_res_pkt.use_different_vmid_compute = 1;
mes_set_hw_res_pkt.enable_reg_active_poll = 1;
mes_set_hw_res_pkt.enable_level_process_quantum_check = 1;
+ if ((mes->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x82)
+ mes_set_hw_res_pkt.enable_lr_compute_wa = 1;
+ else
+ dev_info_once(adev->dev,
+ "MES FW version must be >= 0x82 to enable LR compute workaround.\n");
/*
* Keep oversubscribe timer for sdma . When we have unmapped doorbell
@@ -879,6 +915,74 @@ static int mes_v12_0_reset_hw_queue(struct amdgpu_mes *mes,
offsetof(union MESAPI__RESET, api_status));
}
+static int mes_v12_0_detect_and_reset_hung_queues(struct amdgpu_mes *mes,
+ struct mes_detect_and_reset_queue_input *input)
+{
+ union MESAPI__RESET mes_reset_queue_pkt;
+
+ memset(&mes_reset_queue_pkt, 0, sizeof(mes_reset_queue_pkt));
+
+ mes_reset_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
+ mes_reset_queue_pkt.header.opcode = MES_SCH_API_RESET;
+ mes_reset_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
+
+ mes_reset_queue_pkt.queue_type =
+ convert_to_mes_queue_type(input->queue_type);
+ mes_reset_queue_pkt.doorbell_offset_addr =
+ mes->hung_queue_db_array_gpu_addr;
+
+ if (input->detect_only)
+ mes_reset_queue_pkt.hang_detect_only = 1;
+ else
+ mes_reset_queue_pkt.hang_detect_then_reset = 1;
+
+ return mes_v12_0_submit_pkt_and_poll_completion(mes, AMDGPU_MES_SCHED_PIPE,
+ &mes_reset_queue_pkt, sizeof(mes_reset_queue_pkt),
+ offsetof(union MESAPI__RESET, api_status));
+}
+
+static int mes_v12_inv_tlb_convert_hub_id(uint8_t id)
+{
+ /*
+ * MES doesn't support invalidate gc_hub on slave xcc individually
+ * master xcc will invalidate all gc_hub for the partition
+ */
+ if (AMDGPU_IS_GFXHUB(id))
+ return 0;
+ else if (AMDGPU_IS_MMHUB0(id))
+ return 1;
+ else
+ return -EINVAL;
+
+}
+
+static int mes_v12_0_inv_tlbs_pasid(struct amdgpu_mes *mes,
+ struct mes_inv_tlbs_pasid_input *input)
+{
+ union MESAPI__INV_TLBS mes_inv_tlbs;
+ int ret;
+
+ memset(&mes_inv_tlbs, 0, sizeof(mes_inv_tlbs));
+
+ mes_inv_tlbs.header.type = MES_API_TYPE_SCHEDULER;
+ mes_inv_tlbs.header.opcode = MES_SCH_API_INV_TLBS;
+ mes_inv_tlbs.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
+
+ mes_inv_tlbs.invalidate_tlbs.inv_sel = 0;
+ mes_inv_tlbs.invalidate_tlbs.flush_type = input->flush_type;
+ mes_inv_tlbs.invalidate_tlbs.inv_sel_id = input->pasid;
+
+ /*convert amdgpu_mes_hub_id to mes expected hub_id */
+ ret = mes_v12_inv_tlb_convert_hub_id(input->hub_id);
+ if (ret < 0)
+ return -EINVAL;
+ mes_inv_tlbs.invalidate_tlbs.hub_id = ret;
+ return mes_v12_0_submit_pkt_and_poll_completion(mes, AMDGPU_MES_KIQ_PIPE,
+ &mes_inv_tlbs, sizeof(mes_inv_tlbs),
+ offsetof(union MESAPI__INV_TLBS, api_status));
+
+}
+
static const struct amdgpu_mes_funcs mes_v12_0_funcs = {
.add_hw_queue = mes_v12_0_add_hw_queue,
.remove_hw_queue = mes_v12_0_remove_hw_queue,
@@ -888,6 +992,8 @@ static const struct amdgpu_mes_funcs mes_v12_0_funcs = {
.resume_gang = mes_v12_0_resume_gang,
.misc_op = mes_v12_0_misc_op,
.reset_hw_queue = mes_v12_0_reset_hw_queue,
+ .invalidate_tlbs_pasid = mes_v12_0_inv_tlbs_pasid,
+ .detect_and_reset_hung_queues = mes_v12_0_detect_and_reset_hung_queues,
};
static int mes_v12_0_allocate_ucode_buffer(struct amdgpu_device *adev,
@@ -1793,6 +1899,8 @@ static int mes_v12_0_early_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
int pipe, r;
+ adev->mes.hung_queue_db_array_size =
+ MES12_HUNG_DB_OFFSET_ARRAY_SIZE;
for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
r = amdgpu_mes_init_microcode(adev, pipe);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c
index 134c4ec10887..910337dc28d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c
@@ -36,40 +36,47 @@
static const char *mmhub_client_ids_v3_0_1[][2] = {
[0][0] = "VMC",
+ [1][0] = "ISPXT",
+ [2][0] = "ISPIXT",
[4][0] = "DCEDMC",
[5][0] = "DCEVGA",
[6][0] = "MP0",
[7][0] = "MP1",
- [8][0] = "MPIO",
- [16][0] = "HDP",
- [17][0] = "LSDMA",
- [18][0] = "JPEG",
- [19][0] = "VCNU0",
- [21][0] = "VSCH",
- [22][0] = "VCNU1",
- [23][0] = "VCN1",
- [32+20][0] = "VCN0",
- [2][1] = "DBGUNBIO",
+ [8][0] = "MPM",
+ [12][0] = "ISPTNR",
+ [14][0] = "ISPCRD0",
+ [15][0] = "ISPCRD1",
+ [16][0] = "ISPCRD2",
+ [22][0] = "HDP",
+ [23][0] = "LSDMA",
+ [24][0] = "JPEG",
+ [27][0] = "VSCH",
+ [28][0] = "VCNU",
+ [29][0] = "VCN",
+ [1][1] = "ISPXT",
+ [2][1] = "ISPIXT",
[3][1] = "DCEDWB",
[4][1] = "DCEDMC",
[5][1] = "DCEVGA",
[6][1] = "MP0",
[7][1] = "MP1",
- [8][1] = "MPIO",
- [10][1] = "DBGU0",
- [11][1] = "DBGU1",
- [12][1] = "DBGU2",
- [13][1] = "DBGU3",
- [14][1] = "XDP",
- [15][1] = "OSSSYS",
- [16][1] = "HDP",
- [17][1] = "LSDMA",
- [18][1] = "JPEG",
- [19][1] = "VCNU0",
- [20][1] = "VCN0",
- [21][1] = "VSCH",
- [22][1] = "VCNU1",
- [23][1] = "VCN1",
+ [8][1] = "MPM",
+ [10][1] = "ISPMWR0",
+ [11][1] = "ISPMWR1",
+ [12][1] = "ISPTNR",
+ [13][1] = "ISPSWR",
+ [14][1] = "ISPCWR0",
+ [15][1] = "ISPCWR1",
+ [16][1] = "ISPCWR2",
+ [17][1] = "ISPCWR3",
+ [18][1] = "XDP",
+ [21][1] = "OSSSYS",
+ [22][1] = "HDP",
+ [23][1] = "LSDMA",
+ [24][1] = "JPEG",
+ [27][1] = "VSCH",
+ [28][1] = "VCNU",
+ [29][1] = "VCN",
};
static uint32_t mmhub_v3_0_1_get_invalidate_req(unsigned int vmid,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c
index bc3d6c2fc87a..f6fc9778bc30 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c
@@ -40,30 +40,129 @@
static const char *mmhub_client_ids_v3_3[][2] = {
[0][0] = "VMC",
+ [1][0] = "ISPXT",
+ [2][0] = "ISPIXT",
[4][0] = "DCEDMC",
[6][0] = "MP0",
[7][0] = "MP1",
[8][0] = "MPM",
+ [9][0] = "ISPPDPRD",
+ [10][0] = "ISPCSTATRD",
+ [11][0] = "ISPBYRPRD",
+ [12][0] = "ISPRGBPRD",
+ [13][0] = "ISPMCFPRD",
+ [14][0] = "ISPMCFPRD1",
+ [15][0] = "ISPYUVPRD",
+ [16][0] = "ISPMCSCRD",
+ [17][0] = "ISPGDCRD",
+ [18][0] = "ISPLMERD",
+ [22][0] = "ISPXT1",
+ [23][0] = "ISPIXT1",
[24][0] = "HDP",
[25][0] = "LSDMA",
[26][0] = "JPEG",
[27][0] = "VPE",
+ [28][0] = "VSCH",
[29][0] = "VCNU",
[30][0] = "VCN",
+ [1][1] = "ISPXT",
+ [2][1] = "ISPIXT",
[3][1] = "DCEDWB",
[4][1] = "DCEDMC",
+ [5][1] = "ISPCSISWR",
[6][1] = "MP0",
[7][1] = "MP1",
[8][1] = "MPM",
+ [9][1] = "ISPPDPWR",
+ [10][1] = "ISPCSTATWR",
+ [11][1] = "ISPBYRPWR",
+ [12][1] = "ISPRGBPWR",
+ [13][1] = "ISPMCFPWR",
+ [14][1] = "ISPMWR0",
+ [15][1] = "ISPYUVPWR",
+ [16][1] = "ISPMCSCWR",
+ [17][1] = "ISPGDCWR",
+ [18][1] = "ISPLMEWR",
+ [20][1] = "ISPMWR2",
[21][1] = "OSSSYS",
+ [22][1] = "ISPXT1",
+ [23][1] = "ISPIXT1",
[24][1] = "HDP",
[25][1] = "LSDMA",
[26][1] = "JPEG",
[27][1] = "VPE",
+ [28][1] = "VSCH",
[29][1] = "VCNU",
[30][1] = "VCN",
};
+static const char *mmhub_client_ids_v3_3_1[][2] = {
+ [0][0] = "VMC",
+ [4][0] = "DCEDMC",
+ [6][0] = "MP0",
+ [7][0] = "MP1",
+ [8][0] = "MPM",
+ [24][0] = "HDP",
+ [25][0] = "LSDMA",
+ [26][0] = "JPEG0",
+ [27][0] = "VPE0",
+ [28][0] = "VSCH",
+ [29][0] = "VCNU0",
+ [30][0] = "VCN0",
+ [32+1][0] = "ISPXT",
+ [32+2][0] = "ISPIXT",
+ [32+9][0] = "ISPPDPRD",
+ [32+10][0] = "ISPCSTATRD",
+ [32+11][0] = "ISPBYRPRD",
+ [32+12][0] = "ISPRGBPRD",
+ [32+13][0] = "ISPMCFPRD",
+ [32+14][0] = "ISPMCFPRD1",
+ [32+15][0] = "ISPYUVPRD",
+ [32+16][0] = "ISPMCSCRD",
+ [32+17][0] = "ISPGDCRD",
+ [32+18][0] = "ISPLMERD",
+ [32+22][0] = "ISPXT1",
+ [32+23][0] = "ISPIXT1",
+ [32+26][0] = "JPEG1",
+ [32+27][0] = "VPE1",
+ [32+29][0] = "VCNU1",
+ [32+30][0] = "VCN1",
+ [3][1] = "DCEDWB",
+ [4][1] = "DCEDMC",
+ [6][1] = "MP0",
+ [7][1] = "MP1",
+ [8][1] = "MPM",
+ [21][1] = "OSSSYS",
+ [24][1] = "HDP",
+ [25][1] = "LSDMA",
+ [26][1] = "JPEG0",
+ [27][1] = "VPE0",
+ [28][1] = "VSCH",
+ [29][1] = "VCNU0",
+ [30][1] = "VCN0",
+ [32+1][1] = "ISPXT",
+ [32+2][1] = "ISPIXT",
+ [32+5][1] = "ISPCSISWR",
+ [32+9][1] = "ISPPDPWR",
+ [32+10][1] = "ISPCSTATWR",
+ [32+11][1] = "ISPBYRPWR",
+ [32+12][1] = "ISPRGBPWR",
+ [32+13][1] = "ISPMCFPWR",
+ [32+14][1] = "ISPMWR0",
+ [32+15][1] = "ISPYUVPWR",
+ [32+16][1] = "ISPMCSCWR",
+ [32+17][1] = "ISPGDCWR",
+ [32+18][1] = "ISPLMEWR",
+ [32+19][1] = "ISPMWR1",
+ [32+20][1] = "ISPMWR2",
+ [32+22][1] = "ISPXT1",
+ [32+23][1] = "ISPIXT1",
+ [32+26][1] = "JPEG1",
+ [32+27][1] = "VPE1",
+ [32+29][1] = "VCNU1",
+ [32+30][1] = "VCN1",
+};
+
static uint32_t mmhub_v3_3_get_invalidate_req(unsigned int vmid,
uint32_t flush_type)
{
@@ -102,12 +201,16 @@ mmhub_v3_3_print_l2_protection_fault_status(struct amdgpu_device *adev,
switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
case IP_VERSION(3, 3, 0):
- case IP_VERSION(3, 3, 1):
case IP_VERSION(3, 3, 2):
mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_v3_3) ?
mmhub_client_ids_v3_3[cid][rw] :
cid == 0x140 ? "UMSCH" : NULL;
break;
+ case IP_VERSION(3, 3, 1):
+ mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_v3_3_1) ?
+ mmhub_client_ids_v3_3_1[cid][rw] :
+ cid == 0x140 ? "UMSCH" : NULL;
+ break;
default:
mmhub_cid = NULL;
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 48101a34e049..9a40107a0869 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -292,14 +292,32 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
}
}
-static void xgpu_ai_mailbox_bad_pages_work(struct work_struct *work)
+static void xgpu_ai_mailbox_req_bad_pages_work(struct work_struct *work)
{
- struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, bad_pages_work);
+ struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, req_bad_pages_work);
struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
if (down_read_trylock(&adev->reset_domain->sem)) {
amdgpu_virt_fini_data_exchange(adev);
amdgpu_virt_request_bad_pages(adev);
+ up_read(&adev->reset_domain->sem);
+ }
+}
+
+/**
+ * xgpu_ai_mailbox_handle_bad_pages_work - Reinitialize the data exchange region to get fresh bad page information
+ * @work: pointer to the work_struct
+ *
+ * This work handler is triggered when bad pages are ready, and it reinitializes
+ * the data exchange region to retrieve updated bad page information from the host.
+ */
+static void xgpu_ai_mailbox_handle_bad_pages_work(struct work_struct *work)
+{
+ struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, handle_bad_pages_work);
+ struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
+
+ if (down_read_trylock(&adev->reset_domain->sem)) {
+ amdgpu_virt_fini_data_exchange(adev);
amdgpu_virt_init_data_exchange(adev);
up_read(&adev->reset_domain->sem);
}
@@ -327,10 +345,15 @@ static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev,
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
switch (event) {
+ case IDH_RAS_BAD_PAGES_READY:
+ xgpu_ai_mailbox_send_ack(adev);
+ if (amdgpu_sriov_runtime(adev))
+ schedule_work(&adev->virt.handle_bad_pages_work);
+ break;
case IDH_RAS_BAD_PAGES_NOTIFICATION:
xgpu_ai_mailbox_send_ack(adev);
if (amdgpu_sriov_runtime(adev))
- schedule_work(&adev->virt.bad_pages_work);
+ schedule_work(&adev->virt.req_bad_pages_work);
break;
case IDH_UNRECOV_ERR_NOTIFICATION:
xgpu_ai_mailbox_send_ack(adev);
@@ -415,7 +438,8 @@ int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev)
}
INIT_WORK(&adev->virt.flr_work, xgpu_ai_mailbox_flr_work);
- INIT_WORK(&adev->virt.bad_pages_work, xgpu_ai_mailbox_bad_pages_work);
+ INIT_WORK(&adev->virt.req_bad_pages_work, xgpu_ai_mailbox_req_bad_pages_work);
+ INIT_WORK(&adev->virt.handle_bad_pages_work, xgpu_ai_mailbox_handle_bad_pages_work);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
index f6d8597452ed..e5282a5d05d9 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
@@ -202,8 +202,8 @@ send_request:
case IDH_REQ_RAS_CPER_DUMP:
event = IDH_RAS_CPER_DUMP_READY;
break;
- case IDH_REQ_RAS_BAD_PAGES:
- event = IDH_RAS_BAD_PAGES_READY;
+ case IDH_REQ_RAS_CHK_CRITI:
+ event = IDH_REQ_RAS_CHK_CRITI_READY;
break;
default:
break;
@@ -359,14 +359,32 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
}
}
-static void xgpu_nv_mailbox_bad_pages_work(struct work_struct *work)
+static void xgpu_nv_mailbox_req_bad_pages_work(struct work_struct *work)
{
- struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, bad_pages_work);
+ struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, req_bad_pages_work);
struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
if (down_read_trylock(&adev->reset_domain->sem)) {
amdgpu_virt_fini_data_exchange(adev);
amdgpu_virt_request_bad_pages(adev);
+ up_read(&adev->reset_domain->sem);
+ }
+}
+
+/**
+ * xgpu_nv_mailbox_handle_bad_pages_work - Reinitialize the data exchange region to get fresh bad page information
+ * @work: pointer to the work_struct
+ *
+ * This work handler is triggered when bad pages are ready, and it reinitializes
+ * the data exchange region to retrieve updated bad page information from the host.
+ */
+static void xgpu_nv_mailbox_handle_bad_pages_work(struct work_struct *work)
+{
+ struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, handle_bad_pages_work);
+ struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
+
+ if (down_read_trylock(&adev->reset_domain->sem)) {
+ amdgpu_virt_fini_data_exchange(adev);
amdgpu_virt_init_data_exchange(adev);
up_read(&adev->reset_domain->sem);
}
@@ -397,10 +415,15 @@ static int xgpu_nv_mailbox_rcv_irq(struct amdgpu_device *adev,
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
switch (event) {
+ case IDH_RAS_BAD_PAGES_READY:
+ xgpu_nv_mailbox_send_ack(adev);
+ if (amdgpu_sriov_runtime(adev))
+ schedule_work(&adev->virt.handle_bad_pages_work);
+ break;
case IDH_RAS_BAD_PAGES_NOTIFICATION:
xgpu_nv_mailbox_send_ack(adev);
if (amdgpu_sriov_runtime(adev))
- schedule_work(&adev->virt.bad_pages_work);
+ schedule_work(&adev->virt.req_bad_pages_work);
break;
case IDH_UNRECOV_ERR_NOTIFICATION:
xgpu_nv_mailbox_send_ack(adev);
@@ -485,7 +508,8 @@ int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev)
}
INIT_WORK(&adev->virt.flr_work, xgpu_nv_mailbox_flr_work);
- INIT_WORK(&adev->virt.bad_pages_work, xgpu_nv_mailbox_bad_pages_work);
+ INIT_WORK(&adev->virt.req_bad_pages_work, xgpu_nv_mailbox_req_bad_pages_work);
+ INIT_WORK(&adev->virt.handle_bad_pages_work, xgpu_nv_mailbox_handle_bad_pages_work);
return 0;
}
@@ -535,6 +559,16 @@ static int xgpu_nv_req_ras_bad_pages(struct amdgpu_device *adev)
return xgpu_nv_send_access_requests(adev, IDH_REQ_RAS_BAD_PAGES);
}
+static int xgpu_nv_check_vf_critical_region(struct amdgpu_device *adev, u64 addr)
+{
+ uint32_t addr_hi, addr_lo;
+
+ addr_hi = (uint32_t)(addr >> 32);
+ addr_lo = (uint32_t)(addr & 0xFFFFFFFF);
+ return xgpu_nv_send_access_requests_with_param(
+ adev, IDH_REQ_RAS_CHK_CRITI, addr_hi, addr_lo, 0);
+}
+
const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
.req_full_gpu = xgpu_nv_request_full_gpu_access,
.rel_full_gpu = xgpu_nv_release_full_gpu_access,
@@ -548,4 +582,5 @@ const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
.req_ras_err_count = xgpu_nv_req_ras_err_count,
.req_ras_cper_dump = xgpu_nv_req_ras_cper_dump,
.req_bad_pages = xgpu_nv_req_ras_bad_pages,
+ .req_ras_chk_criti = xgpu_nv_check_vf_critical_region
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
index 5808689562cc..c1083e5e41e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
@@ -43,6 +43,7 @@ enum idh_request {
IDH_REQ_RAS_ERROR_COUNT = 203,
IDH_REQ_RAS_CPER_DUMP = 204,
IDH_REQ_RAS_BAD_PAGES = 205,
+ IDH_REQ_RAS_CHK_CRITI = 206
};
enum idh_event {
@@ -62,6 +63,7 @@ enum idh_event {
IDH_RAS_BAD_PAGES_READY = 15,
IDH_RAS_BAD_PAGES_NOTIFICATION = 16,
IDH_UNRECOV_ERR_NOTIFICATION = 17,
+ IDH_REQ_RAS_CHK_CRITI_READY = 18,
IDH_TEXT_MESSAGE = 255,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index d5002ff931d8..860bc5cb03c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -151,9 +151,9 @@ static void nbio_v7_4_sdma_doorbell_range(struct amdgpu_device *adev, int instan
* BIF_SDMA0_DOORBELL_RANGE: 0x3bc0
* BIF_SDMA1_DOORBELL_RANGE: 0x3bc4
* BIF_SDMA2_DOORBELL_RANGE: 0x3bd8
-+ * BIF_SDMA4_DOORBELL_RANGE:
-+ * ARCTURUS: 0x3be0
-+ * ALDEBARAN: 0x3be4
+ * BIF_SDMA4_DOORBELL_RANGE:
+ * ARCTURUS: 0x3be0
+ * ALDEBARAN: 0x3be4
*/
if (adev->asic_type == CHIP_ALDEBARAN && instance == 4)
reg = instance + 0x4 + 0x1 +
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.h b/drivers/gpu/drm/amd/amdgpu/nv.h
index 83e9782aef39..8f4817404f10 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.h
+++ b/drivers/gpu/drm/amd/amdgpu/nv.h
@@ -31,5 +31,6 @@ extern const struct amdgpu_ip_block_version nv_common_ip_block;
void nv_grbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid);
void nv_set_virt_ops(struct amdgpu_device *adev);
+int cyan_skillfish_reg_base_init(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index 6cc05d36e359..64b240b51f1a 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -149,12 +149,12 @@ static int psp_v11_0_wait_for_bootloader(struct psp_context *psp)
int ret;
int retry_loop;
- for (retry_loop = 0; retry_loop < 10; retry_loop++) {
+ for (retry_loop = 0; retry_loop < 20; retry_loop++) {
/* Wait for bootloader to signify that is
ready having bit 31 of C2PMSG_35 set to 1 */
ret = psp_wait_for(
psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
- 0x80000000, 0x80000000, PSP_WAITREG_NOVERBOSE);
+ 0x80000000, 0x8000FFFF, PSP_WAITREG_NOVERBOSE);
if (ret == 0)
return 0;
@@ -397,18 +397,6 @@ static int psp_v11_0_mode1_reset(struct psp_context *psp)
msleep(500);
- offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_33);
-
- ret = psp_wait_for(psp, offset, MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK,
- 0);
-
- if (ret) {
- DRM_INFO("psp mode 1 reset failed!\n");
- return -EINVAL;
- }
-
- DRM_INFO("psp mode1 reset succeed \n");
-
return 0;
}
@@ -665,7 +653,8 @@ static const struct psp_funcs psp_v11_0_funcs = {
.ring_get_wptr = psp_v11_0_ring_get_wptr,
.ring_set_wptr = psp_v11_0_ring_set_wptr,
.load_usbc_pd_fw = psp_v11_0_load_usbc_pd_fw,
- .read_usbc_pd_fw = psp_v11_0_read_usbc_pd_fw
+ .read_usbc_pd_fw = psp_v11_0_read_usbc_pd_fw,
+ .wait_for_bootloader = psp_v11_0_wait_for_bootloader
};
void psp_v11_0_set_psp_funcs(struct psp_context *psp)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
index e6d8eddda2bf..db6e41967f12 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
@@ -1377,7 +1377,7 @@ static int sdma_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
case IP_VERSION(6, 0, 0):
- if ((adev->sdma.instance[0].fw_version >= 24) && !adev->sdma.disable_uq)
+ if ((adev->sdma.instance[0].fw_version >= 27) && !adev->sdma.disable_uq)
adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
break;
case IP_VERSION(6, 0, 1):
@@ -1385,11 +1385,11 @@ static int sdma_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
break;
case IP_VERSION(6, 0, 2):
- if ((adev->sdma.instance[0].fw_version >= 21) && !adev->sdma.disable_uq)
+ if ((adev->sdma.instance[0].fw_version >= 23) && !adev->sdma.disable_uq)
adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
break;
case IP_VERSION(6, 0, 3):
- if ((adev->sdma.instance[0].fw_version >= 25) && !adev->sdma.disable_uq)
+ if ((adev->sdma.instance[0].fw_version >= 27) && !adev->sdma.disable_uq)
adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
break;
case IP_VERSION(6, 1, 0):
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
index b8b06d4c5882..326ecc8d37d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
@@ -1353,7 +1353,7 @@ static int sdma_v7_0_sw_init(struct amdgpu_ip_block *ip_block)
switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
case IP_VERSION(7, 0, 0):
case IP_VERSION(7, 0, 1):
- if ((adev->sdma.instance[0].fw_version >= 7836028) && !adev->sdma.disable_uq)
+ if ((adev->sdma.instance[0].fw_version >= 7966358) && !adev->sdma.disable_uq)
adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
index dd2d66090d23..68aef47254a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
@@ -743,7 +743,7 @@ int smu_v11_0_i2c_control_init(struct amdgpu_device *adev)
adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
- res = i2c_add_adapter(control);
+ res = devm_i2c_add_adapter(adev->dev, control);
if (res)
DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
@@ -752,9 +752,6 @@ int smu_v11_0_i2c_control_init(struct amdgpu_device *adev)
void smu_v11_0_i2c_control_fini(struct amdgpu_device *adev)
{
- struct i2c_adapter *control = adev->pm.ras_eeprom_i2c_bus;
-
- i2c_del_adapter(control);
adev->pm.ras_eeprom_i2c_bus = NULL;
adev->pm.fru_eeprom_i2c_bus = NULL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index c457be3a3c56..9785fada4fa7 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -741,7 +741,6 @@ static void soc15_reg_base_init(struct amdgpu_device *adev)
void soc15_set_virt_ops(struct amdgpu_device *adev)
{
adev->virt.ops = &xgpu_ai_virt_ops;
-
/* init soc15 reg base early enough so we can
* request request full access for sriov before
* set_ip_blocks. */
@@ -1218,6 +1217,8 @@ static int soc15_common_early_init(struct amdgpu_ip_block *ip_block)
AMD_PG_SUPPORT_JPEG;
/*TODO: need a new external_rev_id for GC 9.4.4? */
adev->external_rev_id = adev->rev_id + 0x46;
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
+ adev->external_rev_id = adev->rev_id + 0x50;
break;
default:
/* FIXME: not supported yet */
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
index e590cbdd8de9..8dc32787d625 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
@@ -536,8 +536,11 @@ static int umc_v12_0_update_ecc_status(struct amdgpu_device *adev,
hwid = REG_GET_FIELD(ipid, MCMP1_IPIDT0, HardwareID);
mcatype = REG_GET_FIELD(ipid, MCMP1_IPIDT0, McaType);
- if ((hwid != MCA_UMC_HWID_V12_0) || (mcatype != MCA_UMC_MCATYPE_V12_0))
+ /* The IP block decode of consumption is SMU */
+ if (hwid != MCA_UMC_HWID_V12_0 || mcatype != MCA_UMC_MCATYPE_V12_0) {
+ con->umc_ecc_log.consumption_q_count++;
return 0;
+ }
if (!status)
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
index 5dbaebb592b3..2e79a3afc774 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
@@ -623,7 +623,22 @@ static void uvd_v3_1_enable_mgcg(struct amdgpu_device *adev,
*
* @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
- * Initialize the hardware, boot up the VCPU and do some testing
+ * Initialize the hardware, boot up the VCPU and do some testing.
+ *
+ * On SI, the UVD is meant to be used in a specific power state,
+ * or alternatively the driver can manually enable its clock.
+ * In amdgpu we use the dedicated UVD power state when DPM is enabled.
+ * Calling amdgpu_dpm_enable_uvd makes DPM select the UVD power state
+ * for the SMU and afterwards enables the UVD clock.
+ * This is automatically done by amdgpu_uvd_ring_begin_use when work
+ * is submitted to the UVD ring. Here, we have to call it manually
+ * in order to power up UVD before firmware validation.
+ *
+ * Note that we must not disable the UVD clock here, as that would
+ * cause the ring test to fail. However, UVD is powered off
+ * automatically after the ring test: amdgpu_uvd_ring_end_use calls
+ * the UVD idle work handler which will disable the UVD clock when
+ * all fences are signalled.
*/
static int uvd_v3_1_hw_init(struct amdgpu_ip_block *ip_block)
{
@@ -633,6 +648,15 @@ static int uvd_v3_1_hw_init(struct amdgpu_ip_block *ip_block)
int r;
uvd_v3_1_mc_resume(adev);
+ uvd_v3_1_enable_mgcg(adev, true);
+
+ /* Make sure UVD is powered during FW validation.
+ * It's going to be automatically powered off after the ring test.
+ */
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_uvd(adev, true);
+ else
+ amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
r = uvd_v3_1_fw_validate(adev);
if (r) {
@@ -640,9 +664,6 @@ static int uvd_v3_1_hw_init(struct amdgpu_ip_block *ip_block)
return r;
}
- uvd_v3_1_enable_mgcg(adev, true);
- amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
-
uvd_v3_1_start(adev);
r = amdgpu_ring_test_helper(ring);
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 1c07b701d0e4..ceb94bbb03a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -217,7 +217,8 @@ static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
- AMDGPU_IB_POOL_DIRECT, &job);
+ AMDGPU_IB_POOL_DIRECT, &job,
+ AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
return r;
@@ -281,7 +282,8 @@ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
- AMDGPU_IB_POOL_DIRECT, &job);
+ AMDGPU_IB_POOL_DIRECT, &job,
+ AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 9d237b5937fb..1f8866f3f63c 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -225,7 +225,8 @@ static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, u32 handle,
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
- AMDGPU_IB_POOL_DIRECT, &job);
+ AMDGPU_IB_POOL_DIRECT, &job,
+ AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
return r;
@@ -288,7 +289,8 @@ static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, u32 handle,
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
- AMDGPU_IB_POOL_DIRECT, &job);
+ AMDGPU_IB_POOL_DIRECT, &job,
+ AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index c74947705d77..a316797875a8 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -193,7 +193,7 @@ static int vcn_v1_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->vcn.inst[0].pause_dpg_mode = vcn_v1_0_pause_dpg_mode;
if (amdgpu_vcnfw_log) {
- volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
+ struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
fw_shared->present_flag_0 = 0;
amdgpu_vcn_fwlog_init(adev->vcn.inst);
@@ -230,11 +230,11 @@ static int vcn_v1_0_sw_fini(struct amdgpu_ip_block *ip_block)
jpeg_v1_0_sw_fini(ip_block);
- r = amdgpu_vcn_sw_fini(adev, 0);
+ amdgpu_vcn_sw_fini(adev, 0);
kfree(adev->vcn.ip_dump);
- return r;
+ return 0;
}
/**
@@ -1338,7 +1338,6 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
- ring = &adev->vcn.inst->ring_dec;
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
@@ -1399,7 +1398,6 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL,
UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
- ring = &adev->vcn.inst->ring_dec;
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index 68b4371df0f1..8897dcc9c1a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -136,10 +136,8 @@ static int vcn_v2_0_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
int i, r;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_0);
- uint32_t *ptr;
struct amdgpu_device *adev = ip_block->adev;
- volatile struct amdgpu_fw_shared *fw_shared;
+ struct amdgpu_fw_shared *fw_shared;
/* VCN DEC TRAP */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
@@ -232,14 +230,9 @@ static int vcn_v2_0_sw_init(struct amdgpu_ip_block *ip_block)
if (amdgpu_vcnfw_log)
amdgpu_vcn_fwlog_init(adev->vcn.inst);
- /* Allocate memory for VCN IP Dump buffer */
- ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
- if (!ptr) {
- DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
- adev->vcn.ip_dump = NULL;
- } else {
- adev->vcn.ip_dump = ptr;
- }
+ r = amdgpu_vcn_reg_dump_init(adev, vcn_reg_list_2_0, ARRAY_SIZE(vcn_reg_list_2_0));
+ if (r)
+ return r;
r = amdgpu_vcn_sysfs_reset_mask_init(adev);
if (r)
@@ -259,7 +252,7 @@ static int vcn_v2_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
int r, idx;
struct amdgpu_device *adev = ip_block->adev;
- volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
+ struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
fw_shared->present_flag_0 = 0;
@@ -274,11 +267,9 @@ static int vcn_v2_0_sw_fini(struct amdgpu_ip_block *ip_block)
amdgpu_vcn_sysfs_reset_mask_fini(adev);
- r = amdgpu_vcn_sw_fini(adev, 0);
-
- kfree(adev->vcn.ip_dump);
+ amdgpu_vcn_sw_fini(adev, 0);
- return r;
+ return 0;
}
/**
@@ -862,9 +853,10 @@ static void vcn_v2_0_enable_static_power_gating(struct amdgpu_vcn_inst *vinst)
static int vcn_v2_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
{
struct amdgpu_device *adev = vinst->adev;
- volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
+ struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
uint32_t rb_bufsz, tmp;
+ int ret;
vcn_v2_0_enable_static_power_gating(vinst);
@@ -948,8 +940,13 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
UVD, 0, mmUVD_MASTINT_EN),
UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
- if (indirect)
- amdgpu_vcn_psp_update_sram(adev, 0, 0);
+ if (indirect) {
+ ret = amdgpu_vcn_psp_update_sram(adev, 0, 0);
+ if (ret) {
+ dev_err(adev->dev, "vcn sram load failed %d\n", ret);
+ return ret;
+ }
+ }
/* force RBC into idle state */
rb_bufsz = order_base_2(ring->ring_size);
@@ -1004,7 +1001,7 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
static int vcn_v2_0_start(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
- volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
+ struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
uint32_t rb_bufsz, tmp;
uint32_t lmi_swap_cntl;
@@ -1311,7 +1308,7 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
if (!ret_code) {
- volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
+ struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
/* pause DPG */
reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
@@ -2095,66 +2092,6 @@ static int vcn_v2_0_start_sriov(struct amdgpu_device *adev)
return vcn_v2_0_start_mmsch(adev, &adev->virt.mm_table);
}
-static void vcn_v2_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_0);
- uint32_t inst_off, is_powered;
-
- if (!adev->vcn.ip_dump)
- return;
-
- drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i)) {
- drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
- continue;
- }
-
- inst_off = i * reg_count;
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered) {
- drm_printf(p, "\nActive Instance:VCN%d\n", i);
- for (j = 0; j < reg_count; j++)
- drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_2_0[j].reg_name,
- adev->vcn.ip_dump[inst_off + j]);
- } else {
- drm_printf(p, "\nInactive Instance:VCN%d\n", i);
- }
- }
-}
-
-static void vcn_v2_0_dump_ip_state(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- bool is_powered;
- uint32_t inst_off;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_0);
-
- if (!adev->vcn.ip_dump)
- return;
-
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
-
- inst_off = i * reg_count;
- /* mmUVD_POWER_STATUS is always readable and is first element of the array */
- adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, mmUVD_POWER_STATUS);
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered)
- for (j = 1; j < reg_count; j++)
- adev->vcn.ip_dump[inst_off + j] =
- RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_2_0[j], i));
- }
-}
-
static const struct amd_ip_funcs vcn_v2_0_ip_funcs = {
.name = "vcn_v2_0",
.early_init = vcn_v2_0_early_init,
@@ -2168,8 +2105,8 @@ static const struct amd_ip_funcs vcn_v2_0_ip_funcs = {
.wait_for_idle = vcn_v2_0_wait_for_idle,
.set_clockgating_state = vcn_v2_0_set_clockgating_state,
.set_powergating_state = vcn_set_powergating_state,
- .dump_ip_state = vcn_v2_0_dump_ip_state,
- .print_ip_state = vcn_v2_0_print_ip_state,
+ .dump_ip_state = amdgpu_vcn_dump_ip_state,
+ .print_ip_state = amdgpu_vcn_print_ip_state,
};
static const struct amdgpu_ring_funcs vcn_v2_0_dec_ring_vm_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index bc30a5326866..cebee453871c 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -116,7 +116,6 @@ static void vcn_v2_5_idle_work_handler(struct work_struct *work)
struct amdgpu_device *adev = vcn_inst->adev;
unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0};
unsigned int i, j;
- int r = 0;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
struct amdgpu_vcn_inst *v = &adev->vcn.inst[i];
@@ -149,15 +148,7 @@ static void vcn_v2_5_idle_work_handler(struct work_struct *work)
if (!fences && !atomic_read(&adev->vcn.inst[0].total_submission_cnt)) {
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
AMD_PG_STATE_GATE);
- mutex_lock(&adev->vcn.workload_profile_mutex);
- if (adev->vcn.workload_profile_active) {
- r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
- false);
- if (r)
- dev_warn(adev->dev, "(%d) failed to disable video power profile mode\n", r);
- adev->vcn.workload_profile_active = false;
- }
- mutex_unlock(&adev->vcn.workload_profile_mutex);
+ amdgpu_vcn_put_profile(adev);
} else {
schedule_delayed_work(&adev->vcn.inst[0].idle_work, VCN_IDLE_TIMEOUT);
}
@@ -167,7 +158,6 @@ static void vcn_v2_5_ring_begin_use(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_vcn_inst *v = &adev->vcn.inst[ring->me];
- int r = 0;
atomic_inc(&adev->vcn.inst[0].total_submission_cnt);
@@ -177,20 +167,6 @@ static void vcn_v2_5_ring_begin_use(struct amdgpu_ring *ring)
* the delayed work so there is no one else to set it to false
* and we don't care if someone else sets it to true.
*/
- if (adev->vcn.workload_profile_active)
- goto pg_lock;
-
- mutex_lock(&adev->vcn.workload_profile_mutex);
- if (!adev->vcn.workload_profile_active) {
- r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
- true);
- if (r)
- dev_warn(adev->dev, "(%d) failed to switch to video power profile mode\n", r);
- adev->vcn.workload_profile_active = true;
- }
- mutex_unlock(&adev->vcn.workload_profile_mutex);
-
-pg_lock:
mutex_lock(&adev->vcn.inst[0].vcn_pg_lock);
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
AMD_PG_STATE_UNGATE);
@@ -218,6 +194,7 @@ pg_lock:
v->pause_dpg_mode(v, &new_state);
}
mutex_unlock(&adev->vcn.inst[0].vcn_pg_lock);
+ amdgpu_vcn_get_profile(adev);
}
static void vcn_v2_5_ring_end_use(struct amdgpu_ring *ring)
@@ -297,12 +274,10 @@ static int vcn_v2_5_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
int i, j, r;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_5);
- uint32_t *ptr;
struct amdgpu_device *adev = ip_block->adev;
for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
- volatile struct amdgpu_fw_shared *fw_shared;
+ struct amdgpu_fw_shared *fw_shared;
if (adev->vcn.harvest_config & (1 << j))
continue;
@@ -423,14 +398,9 @@ static int vcn_v2_5_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- /* Allocate memory for VCN IP Dump buffer */
- ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
- if (!ptr) {
- DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
- adev->vcn.ip_dump = NULL;
- } else {
- adev->vcn.ip_dump = ptr;
- }
+ r = amdgpu_vcn_reg_dump_init(adev, vcn_reg_list_2_5, ARRAY_SIZE(vcn_reg_list_2_5));
+ if (r)
+ return r;
r = amdgpu_vcn_sysfs_reset_mask_init(adev);
if (r)
@@ -450,7 +420,7 @@ static int vcn_v2_5_sw_fini(struct amdgpu_ip_block *ip_block)
{
int i, r, idx;
struct amdgpu_device *adev = ip_block->adev;
- volatile struct amdgpu_fw_shared *fw_shared;
+ struct amdgpu_fw_shared *fw_shared;
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
@@ -472,13 +442,9 @@ static int vcn_v2_5_sw_fini(struct amdgpu_ip_block *ip_block)
r = amdgpu_vcn_suspend(adev, i);
if (r)
return r;
- r = amdgpu_vcn_sw_fini(adev, i);
- if (r)
- return r;
+ amdgpu_vcn_sw_fini(adev, i);
}
- kfree(adev->vcn.ip_dump);
-
return 0;
}
@@ -1032,9 +998,10 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
{
struct amdgpu_device *adev = vinst->adev;
int inst_idx = vinst->inst;
- volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
+ struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
uint32_t rb_bufsz, tmp;
+ int ret;
/* disable register anti-hang mechanism */
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 1,
@@ -1125,8 +1092,13 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
VCN, 0, mmUVD_MASTINT_EN),
UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
- if (indirect)
- amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
+ if (indirect) {
+ ret = amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
+ if (ret) {
+ dev_err(adev->dev, "vcn sram load failed %d\n", ret);
+ return ret;
+ }
+ }
ring = &adev->vcn.inst[inst_idx].ring_dec;
/* force RBC into idle state */
@@ -1183,7 +1155,7 @@ static int vcn_v2_5_start(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
int i = vinst->inst;
- volatile struct amdgpu_fw_shared *fw_shared =
+ struct amdgpu_fw_shared *fw_shared =
adev->vcn.inst[i].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
uint32_t rb_bufsz, tmp;
@@ -1695,7 +1667,7 @@ static int vcn_v2_5_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
if (!ret_code) {
- volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
+ struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
/* pause DPG */
reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
@@ -2127,66 +2099,6 @@ static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev)
}
}
-static void vcn_v2_5_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_5);
- uint32_t inst_off, is_powered;
-
- if (!adev->vcn.ip_dump)
- return;
-
- drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i)) {
- drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
- continue;
- }
-
- inst_off = i * reg_count;
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered) {
- drm_printf(p, "\nActive Instance:VCN%d\n", i);
- for (j = 0; j < reg_count; j++)
- drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_2_5[j].reg_name,
- adev->vcn.ip_dump[inst_off + j]);
- } else {
- drm_printf(p, "\nInactive Instance:VCN%d\n", i);
- }
- }
-}
-
-static void vcn_v2_5_dump_ip_state(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- bool is_powered;
- uint32_t inst_off;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_5);
-
- if (!adev->vcn.ip_dump)
- return;
-
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
-
- inst_off = i * reg_count;
- /* mmUVD_POWER_STATUS is always readable and is first element of the array */
- adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, mmUVD_POWER_STATUS);
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered)
- for (j = 1; j < reg_count; j++)
- adev->vcn.ip_dump[inst_off + j] =
- RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_2_5[j], i));
- }
-}
-
static const struct amd_ip_funcs vcn_v2_5_ip_funcs = {
.name = "vcn_v2_5",
.early_init = vcn_v2_5_early_init,
@@ -2200,8 +2112,8 @@ static const struct amd_ip_funcs vcn_v2_5_ip_funcs = {
.wait_for_idle = vcn_v2_5_wait_for_idle,
.set_clockgating_state = vcn_v2_5_set_clockgating_state,
.set_powergating_state = vcn_set_powergating_state,
- .dump_ip_state = vcn_v2_5_dump_ip_state,
- .print_ip_state = vcn_v2_5_print_ip_state,
+ .dump_ip_state = amdgpu_vcn_dump_ip_state,
+ .print_ip_state = amdgpu_vcn_print_ip_state,
};
static const struct amd_ip_funcs vcn_v2_6_ip_funcs = {
@@ -2217,8 +2129,8 @@ static const struct amd_ip_funcs vcn_v2_6_ip_funcs = {
.wait_for_idle = vcn_v2_5_wait_for_idle,
.set_clockgating_state = vcn_v2_5_set_clockgating_state,
.set_powergating_state = vcn_set_powergating_state,
- .dump_ip_state = vcn_v2_5_dump_ip_state,
- .print_ip_state = vcn_v2_5_print_ip_state,
+ .dump_ip_state = amdgpu_vcn_dump_ip_state,
+ .print_ip_state = amdgpu_vcn_print_ip_state,
};
const struct amdgpu_ip_block_version vcn_v2_5_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index 4b8f4407047f..d9cf8f0feeb3 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -175,8 +175,6 @@ static int vcn_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_ring *ring;
int i, j, r;
int vcn_doorbell_index = 0;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_3_0);
- uint32_t *ptr;
struct amdgpu_device *adev = ip_block->adev;
/*
@@ -193,7 +191,7 @@ static int vcn_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
}
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- volatile struct amdgpu_fw_shared *fw_shared;
+ struct amdgpu_fw_shared *fw_shared;
if (adev->vcn.harvest_config & (1 << i))
continue;
@@ -304,14 +302,9 @@ static int vcn_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
return r;
}
- /* Allocate memory for VCN IP Dump buffer */
- ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
- if (ptr == NULL) {
- DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
- adev->vcn.ip_dump = NULL;
- } else {
- adev->vcn.ip_dump = ptr;
- }
+ r = amdgpu_vcn_reg_dump_init(adev, vcn_reg_list_3_0, ARRAY_SIZE(vcn_reg_list_3_0));
+ if (r)
+ return r;
r = amdgpu_vcn_sysfs_reset_mask_init(adev);
if (r)
@@ -334,7 +327,7 @@ static int vcn_v3_0_sw_fini(struct amdgpu_ip_block *ip_block)
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- volatile struct amdgpu_fw_shared *fw_shared;
+ struct amdgpu_fw_shared *fw_shared;
if (adev->vcn.harvest_config & (1 << i))
continue;
@@ -356,12 +349,9 @@ static int vcn_v3_0_sw_fini(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- r = amdgpu_vcn_sw_fini(adev, i);
- if (r)
- return r;
+ amdgpu_vcn_sw_fini(adev, i);
}
- kfree(adev->vcn.ip_dump);
return 0;
}
@@ -1039,9 +1029,10 @@ static int vcn_v3_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
{
struct amdgpu_device *adev = vinst->adev;
int inst_idx = vinst->inst;
- volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
+ struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
uint32_t rb_bufsz, tmp;
+ int ret;
/* disable register anti-hang mechanism */
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 1,
@@ -1134,8 +1125,13 @@ static int vcn_v3_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, inst_idx, mmUVD_VCPU_CNTL), tmp, 0, indirect);
- if (indirect)
- amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
+ if (indirect) {
+ ret = amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
+ if (ret) {
+ dev_err(adev->dev, "vcn sram load failed %d\n", ret);
+ return ret;
+ }
+ }
ring = &adev->vcn.inst[inst_idx].ring_dec;
/* force RBC into idle state */
@@ -1198,7 +1194,7 @@ static int vcn_v3_0_start(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
int i = vinst->inst;
- volatile struct amdgpu_fw_shared *fw_shared;
+ struct amdgpu_fw_shared *fw_shared;
struct amdgpu_ring *ring;
uint32_t rb_bufsz, tmp;
int j, k, r;
@@ -1719,7 +1715,7 @@ static int vcn_v3_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
{
struct amdgpu_device *adev = vinst->adev;
int inst_idx = vinst->inst;
- volatile struct amdgpu_fw_shared *fw_shared;
+ struct amdgpu_fw_shared *fw_shared;
struct amdgpu_ring *ring;
uint32_t reg_data = 0;
int ret_code;
@@ -1838,7 +1834,7 @@ static uint64_t vcn_v3_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
static void vcn_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- volatile struct amdgpu_fw_shared *fw_shared;
+ struct amdgpu_fw_shared *fw_shared;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
/*whenever update RBC_RB_WPTR, we save the wptr in shared rb.wptr and scratch2 */
@@ -1888,15 +1884,19 @@ static int vcn_v3_0_limit_sched(struct amdgpu_cs_parser *p,
struct amdgpu_job *job)
{
struct drm_gpu_scheduler **scheds;
-
- /* The create msg must be in the first IB submitted */
- if (atomic_read(&job->base.entity->fence_seq))
- return -EINVAL;
+ struct dma_fence *fence;
/* if VCN0 is harvested, we can't support AV1 */
if (p->adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0)
return -EINVAL;
+ /* wait for all jobs to finish before switching to instance 0 */
+ fence = amdgpu_ctx_get_fence(p->ctx, job->base.entity, ~0ull);
+ if (fence) {
+ dma_fence_wait(fence, false);
+ dma_fence_put(fence);
+ }
+
scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_DEC]
[AMDGPU_RING_PRIO_DEFAULT].sched;
drm_sched_entity_modify_sched(job->base.entity, scheds, 1);
@@ -2342,67 +2342,6 @@ static void vcn_v3_0_set_irq_funcs(struct amdgpu_device *adev)
}
}
-static void vcn_v3_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_3_0);
- uint32_t inst_off;
- bool is_powered;
-
- if (!adev->vcn.ip_dump)
- return;
-
- drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i)) {
- drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
- continue;
- }
-
- inst_off = i * reg_count;
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered) {
- drm_printf(p, "\nActive Instance:VCN%d\n", i);
- for (j = 0; j < reg_count; j++)
- drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_3_0[j].reg_name,
- adev->vcn.ip_dump[inst_off + j]);
- } else {
- drm_printf(p, "\nInactive Instance:VCN%d\n", i);
- }
- }
-}
-
-static void vcn_v3_0_dump_ip_state(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- bool is_powered;
- uint32_t inst_off;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_3_0);
-
- if (!adev->vcn.ip_dump)
- return;
-
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
-
- inst_off = i * reg_count;
- /* mmUVD_POWER_STATUS is always readable and is first element of the array */
- adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, mmUVD_POWER_STATUS);
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered)
- for (j = 1; j < reg_count; j++)
- adev->vcn.ip_dump[inst_off + j] =
- RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_3_0[j], i));
- }
-}
-
static const struct amd_ip_funcs vcn_v3_0_ip_funcs = {
.name = "vcn_v3_0",
.early_init = vcn_v3_0_early_init,
@@ -2416,8 +2355,8 @@ static const struct amd_ip_funcs vcn_v3_0_ip_funcs = {
.wait_for_idle = vcn_v3_0_wait_for_idle,
.set_clockgating_state = vcn_v3_0_set_clockgating_state,
.set_powergating_state = vcn_set_powergating_state,
- .dump_ip_state = vcn_v3_0_dump_ip_state,
- .print_ip_state = vcn_v3_0_print_ip_state,
+ .dump_ip_state = amdgpu_vcn_dump_ip_state,
+ .print_ip_state = amdgpu_vcn_print_ip_state,
};
const struct amdgpu_ip_block_version vcn_v3_0_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index 1924e075b66f..3ae666522d57 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -148,7 +148,7 @@ static int vcn_v4_0_early_init(struct amdgpu_ip_block *ip_block)
static int vcn_v4_0_fw_shared_init(struct amdgpu_device *adev, int inst_idx)
{
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
@@ -183,8 +183,6 @@ static int vcn_v4_0_sw_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_ring *ring;
struct amdgpu_device *adev = ip_block->adev;
int i, r;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0);
- uint32_t *ptr;
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
if (adev->vcn.harvest_config & (1 << i))
@@ -255,14 +253,9 @@ static int vcn_v4_0_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- /* Allocate memory for VCN IP Dump buffer */
- ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
- if (!ptr) {
- DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
- adev->vcn.ip_dump = NULL;
- } else {
- adev->vcn.ip_dump = ptr;
- }
+ r = amdgpu_vcn_reg_dump_init(adev, vcn_reg_list_4_0, ARRAY_SIZE(vcn_reg_list_4_0));
+ if (r)
+ return r;
r = amdgpu_vcn_sysfs_reset_mask_init(adev);
if (r)
@@ -285,7 +278,7 @@ static int vcn_v4_0_sw_fini(struct amdgpu_ip_block *ip_block)
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
if (adev->vcn.harvest_config & (1 << i))
continue;
@@ -309,13 +302,8 @@ static int vcn_v4_0_sw_fini(struct amdgpu_ip_block *ip_block)
amdgpu_vcn_sysfs_reset_mask_fini(adev);
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- r = amdgpu_vcn_sw_fini(adev, i);
- if (r)
- return r;
- }
-
- kfree(adev->vcn.ip_dump);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++)
+ amdgpu_vcn_sw_fini(adev, i);
return 0;
}
@@ -1009,9 +997,10 @@ static int vcn_v4_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
{
struct amdgpu_device *adev = vinst->adev;
int inst_idx = vinst->inst;
- volatile struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
+ struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
uint32_t tmp;
+ int ret;
/* disable register anti-hang mechanism */
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 1,
@@ -1094,8 +1083,13 @@ static int vcn_v4_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
- if (indirect)
- amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
+ if (indirect) {
+ ret = amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
+ if (ret) {
+ dev_err(adev->dev, "vcn sram load failed %d\n", ret);
+ return ret;
+ }
+ }
ring = &adev->vcn.inst[inst_idx].ring_enc[0];
@@ -1143,7 +1137,7 @@ static int vcn_v4_0_start(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
int i = vinst->inst;
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
struct amdgpu_ring *ring;
uint32_t tmp;
int j, k, r;
@@ -1360,8 +1354,8 @@ static int vcn_v4_0_start_sriov(struct amdgpu_device *adev)
struct mmsch_v4_0_cmd_end end = { {0} };
struct mmsch_v4_0_init_header header;
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
- volatile struct amdgpu_fw_shared_rb_setup *rb_setup;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_fw_shared_rb_setup *rb_setup;
direct_wt.cmd_header.command_type =
MMSCH_COMMAND__DIRECT_REG_WRITE;
@@ -1612,7 +1606,7 @@ static int vcn_v4_0_stop(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
int i = vinst->inst;
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
uint32_t tmp;
int r = 0;
@@ -1624,7 +1618,6 @@ static int vcn_v4_0_stop(struct amdgpu_vcn_inst *vinst)
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
vcn_v4_0_stop_dpg_mode(vinst);
- r = 0;
goto done;
}
@@ -1808,15 +1801,19 @@ static int vcn_v4_0_limit_sched(struct amdgpu_cs_parser *p,
struct amdgpu_job *job)
{
struct drm_gpu_scheduler **scheds;
-
- /* The create msg must be in the first IB submitted */
- if (atomic_read(&job->base.entity->fence_seq))
- return -EINVAL;
+ struct dma_fence *fence;
/* if VCN0 is harvested, we can't support AV1 */
if (p->adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0)
return -EINVAL;
+ /* wait for all jobs to finish before switching to instance 0 */
+ fence = amdgpu_ctx_get_fence(p->ctx, job->base.entity, ~0ull);
+ if (fence) {
+ dma_fence_wait(fence, false);
+ dma_fence_put(fence);
+ }
+
scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_ENC]
[AMDGPU_RING_PRIO_0].sched;
drm_sched_entity_modify_sched(job->base.entity, scheds, 1);
@@ -1907,22 +1904,16 @@ out:
#define RADEON_VCN_ENGINE_TYPE_ENCODE (0x00000002)
#define RADEON_VCN_ENGINE_TYPE_DECODE (0x00000003)
-
#define RADEON_VCN_ENGINE_INFO (0x30000001)
-#define RADEON_VCN_ENGINE_INFO_MAX_OFFSET 16
-
#define RENCODE_ENCODE_STANDARD_AV1 2
#define RENCODE_IB_PARAM_SESSION_INIT 0x00000003
-#define RENCODE_IB_PARAM_SESSION_INIT_MAX_OFFSET 64
-/* return the offset in ib if id is found, -1 otherwise
- * to speed up the searching we only search upto max_offset
- */
-static int vcn_v4_0_enc_find_ib_param(struct amdgpu_ib *ib, uint32_t id, int max_offset)
+/* return the offset in ib if id is found, -1 otherwise */
+static int vcn_v4_0_enc_find_ib_param(struct amdgpu_ib *ib, uint32_t id, int start)
{
int i;
- for (i = 0; i < ib->length_dw && i < max_offset && ib->ptr[i] >= 8; i += ib->ptr[i]/4) {
+ for (i = start; i < ib->length_dw && ib->ptr[i] >= 8; i += ib->ptr[i] / 4) {
if (ib->ptr[i + 1] == id)
return i;
}
@@ -1937,33 +1928,29 @@ static int vcn_v4_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
struct amdgpu_vcn_decode_buffer *decode_buffer;
uint64_t addr;
uint32_t val;
- int idx;
+ int idx = 0, sidx;
/* The first instance can decode anything */
if (!ring->me)
return 0;
- /* RADEON_VCN_ENGINE_INFO is at the top of ib block */
- idx = vcn_v4_0_enc_find_ib_param(ib, RADEON_VCN_ENGINE_INFO,
- RADEON_VCN_ENGINE_INFO_MAX_OFFSET);
- if (idx < 0) /* engine info is missing */
- return 0;
-
- val = amdgpu_ib_get_value(ib, idx + 2); /* RADEON_VCN_ENGINE_TYPE */
- if (val == RADEON_VCN_ENGINE_TYPE_DECODE) {
- decode_buffer = (struct amdgpu_vcn_decode_buffer *)&ib->ptr[idx + 6];
-
- if (!(decode_buffer->valid_buf_flag & 0x1))
- return 0;
-
- addr = ((u64)decode_buffer->msg_buffer_address_hi) << 32 |
- decode_buffer->msg_buffer_address_lo;
- return vcn_v4_0_dec_msg(p, job, addr);
- } else if (val == RADEON_VCN_ENGINE_TYPE_ENCODE) {
- idx = vcn_v4_0_enc_find_ib_param(ib, RENCODE_IB_PARAM_SESSION_INIT,
- RENCODE_IB_PARAM_SESSION_INIT_MAX_OFFSET);
- if (idx >= 0 && ib->ptr[idx + 2] == RENCODE_ENCODE_STANDARD_AV1)
- return vcn_v4_0_limit_sched(p, job);
+ while ((idx = vcn_v4_0_enc_find_ib_param(ib, RADEON_VCN_ENGINE_INFO, idx)) >= 0) {
+ val = amdgpu_ib_get_value(ib, idx + 2); /* RADEON_VCN_ENGINE_TYPE */
+ if (val == RADEON_VCN_ENGINE_TYPE_DECODE) {
+ decode_buffer = (struct amdgpu_vcn_decode_buffer *)&ib->ptr[idx + 6];
+
+ if (!(decode_buffer->valid_buf_flag & 0x1))
+ return 0;
+
+ addr = ((u64)decode_buffer->msg_buffer_address_hi) << 32 |
+ decode_buffer->msg_buffer_address_lo;
+ return vcn_v4_0_dec_msg(p, job, addr);
+ } else if (val == RADEON_VCN_ENGINE_TYPE_ENCODE) {
+ sidx = vcn_v4_0_enc_find_ib_param(ib, RENCODE_IB_PARAM_SESSION_INIT, idx);
+ if (sidx >= 0 && ib->ptr[sidx + 2] == RENCODE_ENCODE_STANDARD_AV1)
+ return vcn_v4_0_limit_sched(p, job);
+ }
+ idx += ib->ptr[idx] / 4;
}
return 0;
}
@@ -1990,7 +1977,7 @@ static struct amdgpu_ring_funcs vcn_v4_0_unified_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_ENC,
.align_mask = 0x3f,
.nop = VCN_ENC_CMD_NO_OP,
- .extra_dw = sizeof(struct amdgpu_vcn_rb_metadata),
+ .extra_bytes = sizeof(struct amdgpu_vcn_rb_metadata),
.get_rptr = vcn_v4_0_unified_ring_get_rptr,
.get_wptr = vcn_v4_0_unified_ring_get_wptr,
.set_wptr = vcn_v4_0_unified_ring_set_wptr,
@@ -2246,67 +2233,6 @@ static void vcn_v4_0_set_irq_funcs(struct amdgpu_device *adev)
}
}
-static void vcn_v4_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0);
- uint32_t inst_off, is_powered;
-
- if (!adev->vcn.ip_dump)
- return;
-
- drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i)) {
- drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
- continue;
- }
-
- inst_off = i * reg_count;
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered) {
- drm_printf(p, "\nActive Instance:VCN%d\n", i);
- for (j = 0; j < reg_count; j++)
- drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_4_0[j].reg_name,
- adev->vcn.ip_dump[inst_off + j]);
- } else {
- drm_printf(p, "\nInactive Instance:VCN%d\n", i);
- }
- }
-}
-
-static void vcn_v4_0_dump_ip_state(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- bool is_powered;
- uint32_t inst_off;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0);
-
- if (!adev->vcn.ip_dump)
- return;
-
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
-
- inst_off = i * reg_count;
- /* mmUVD_POWER_STATUS is always readable and is first element of the array */
- adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, regUVD_POWER_STATUS);
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered)
- for (j = 1; j < reg_count; j++)
- adev->vcn.ip_dump[inst_off + j] =
- RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_4_0[j],
- i));
- }
-}
-
static const struct amd_ip_funcs vcn_v4_0_ip_funcs = {
.name = "vcn_v4_0",
.early_init = vcn_v4_0_early_init,
@@ -2320,8 +2246,8 @@ static const struct amd_ip_funcs vcn_v4_0_ip_funcs = {
.wait_for_idle = vcn_v4_0_wait_for_idle,
.set_clockgating_state = vcn_v4_0_set_clockgating_state,
.set_powergating_state = vcn_set_powergating_state,
- .dump_ip_state = vcn_v4_0_dump_ip_state,
- .print_ip_state = vcn_v4_0_print_ip_state,
+ .dump_ip_state = amdgpu_vcn_dump_ip_state,
+ .print_ip_state = amdgpu_vcn_print_ip_state,
};
const struct amdgpu_ip_block_version vcn_v4_0_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
index 2a3663b551af..eacf4e93ba2f 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
@@ -134,6 +134,19 @@ static int vcn_v4_0_3_early_init(struct amdgpu_ip_block *ip_block)
return 0;
}
+static int vcn_v4_0_3_late_init(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+
+ adev->vcn.supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
+
+ if (amdgpu_dpm_reset_vcn_is_supported(adev))
+ adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+
+ return 0;
+}
+
static int vcn_v4_0_3_fw_shared_init(struct amdgpu_device *adev, int inst_idx)
{
struct amdgpu_vcn4_fw_shared *fw_shared;
@@ -160,8 +173,6 @@ static int vcn_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
int i, r, vcn_inst;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_3);
- uint32_t *ptr;
/* VCN DEC TRAP */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
@@ -201,7 +212,11 @@ static int vcn_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block)
ring->vm_hub = AMDGPU_MMHUB0(adev->vcn.inst[i].aid_id);
sprintf(ring->name, "vcn_unified_%d", adev->vcn.inst[i].aid_id);
- r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
+
+ /* There are no per-instance irq source IDs on 4.0.3, the IH
+ * packets use a separate field to differentiate instances.
+ */
+ r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[0].irq, 0,
AMDGPU_RING_PRIO_DEFAULT,
&adev->vcn.inst[i].sched_score);
if (r)
@@ -213,10 +228,6 @@ static int vcn_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block)
adev->vcn.inst[i].pause_dpg_mode = vcn_v4_0_3_pause_dpg_mode;
}
- /* TODO: Add queue reset mask when FW fully supports it */
- adev->vcn.supported_reset =
- amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
-
if (amdgpu_sriov_vf(adev)) {
r = amdgpu_virt_alloc_mm_table(adev);
if (r)
@@ -231,20 +242,11 @@ static int vcn_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block)
}
}
- /* Allocate memory for VCN IP Dump buffer */
- ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
- if (!ptr) {
- DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
- adev->vcn.ip_dump = NULL;
- } else {
- adev->vcn.ip_dump = ptr;
- }
-
- r = amdgpu_vcn_sysfs_reset_mask_init(adev);
+ r = amdgpu_vcn_reg_dump_init(adev, vcn_reg_list_4_0_3, ARRAY_SIZE(vcn_reg_list_4_0_3));
if (r)
return r;
- return 0;
+ return amdgpu_vcn_sysfs_reset_mask_init(adev);
}
/**
@@ -261,7 +263,7 @@ static int vcn_v4_0_3_sw_fini(struct amdgpu_ip_block *ip_block)
if (drm_dev_enter(&adev->ddev, &idx)) {
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
fw_shared->present_flag_0 = 0;
@@ -281,13 +283,8 @@ static int vcn_v4_0_3_sw_fini(struct amdgpu_ip_block *ip_block)
amdgpu_vcn_sysfs_reset_mask_fini(adev);
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- r = amdgpu_vcn_sw_fini(adev, i);
- if (r)
- return r;
- }
-
- kfree(adev->vcn.ip_dump);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++)
+ amdgpu_vcn_sw_fini(adev, i);
return 0;
}
@@ -391,7 +388,7 @@ static int vcn_v4_0_3_hw_fini(struct amdgpu_ip_block *ip_block)
vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
}
- if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN) && !amdgpu_sriov_vf(adev))
amdgpu_irq_put(adev, &adev->vcn.inst->ras_poison_irq, 0);
return 0;
@@ -848,10 +845,10 @@ static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
{
struct amdgpu_device *adev = vinst->adev;
int inst_idx = vinst->inst;
- volatile struct amdgpu_vcn4_fw_shared *fw_shared =
+ struct amdgpu_vcn4_fw_shared *fw_shared =
adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
- int vcn_inst;
+ int vcn_inst, ret;
uint32_t tmp;
vcn_inst = GET_INST(VCN, inst_idx);
@@ -944,8 +941,13 @@ static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
VCN, 0, regUVD_MASTINT_EN),
UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
- if (indirect)
- amdgpu_vcn_psp_update_sram(adev, inst_idx, AMDGPU_UCODE_ID_VCN0_RAM);
+ if (indirect) {
+ ret = amdgpu_vcn_psp_update_sram(adev, inst_idx, AMDGPU_UCODE_ID_VCN0_RAM);
+ if (ret) {
+ dev_err(adev->dev, "vcn sram load failed %d\n", ret);
+ return ret;
+ }
+ }
ring = &adev->vcn.inst[inst_idx].ring_enc[0];
@@ -1010,8 +1012,8 @@ static int vcn_v4_0_3_start_sriov(struct amdgpu_device *adev)
struct mmsch_v4_0_cmd_end end = { {0} };
struct mmsch_v4_0_3_init_header header;
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
- volatile struct amdgpu_fw_shared_rb_setup *rb_setup;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_fw_shared_rb_setup *rb_setup;
direct_wt.cmd_header.command_type =
MMSCH_COMMAND__DIRECT_REG_WRITE;
@@ -1185,7 +1187,7 @@ static int vcn_v4_0_3_start(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
int i = vinst->inst;
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
struct amdgpu_ring *ring;
int j, k, r, vcn_inst;
uint32_t tmp;
@@ -1395,7 +1397,7 @@ static int vcn_v4_0_3_stop(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
int i = vinst->inst;
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
int r = 0, vcn_inst;
uint32_t tmp;
@@ -1872,71 +1874,10 @@ static void vcn_v4_0_3_set_irq_funcs(struct amdgpu_device *adev)
adev->vcn.inst->ras_poison_irq.funcs = &vcn_v4_0_3_ras_irq_funcs;
}
-static void vcn_v4_0_3_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_3);
- uint32_t inst_off, is_powered;
-
- if (!adev->vcn.ip_dump)
- return;
-
- drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i)) {
- drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
- continue;
- }
-
- inst_off = i * reg_count;
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered) {
- drm_printf(p, "\nActive Instance:VCN%d\n", i);
- for (j = 0; j < reg_count; j++)
- drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_4_0_3[j].reg_name,
- adev->vcn.ip_dump[inst_off + j]);
- } else {
- drm_printf(p, "\nInactive Instance:VCN%d\n", i);
- }
- }
-}
-
-static void vcn_v4_0_3_dump_ip_state(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- bool is_powered;
- uint32_t inst_off, inst_id;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_3);
-
- if (!adev->vcn.ip_dump)
- return;
-
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
-
- inst_id = GET_INST(VCN, i);
- inst_off = i * reg_count;
- /* mmUVD_POWER_STATUS is always readable and is first element of the array */
- adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, inst_id, regUVD_POWER_STATUS);
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered)
- for (j = 1; j < reg_count; j++)
- adev->vcn.ip_dump[inst_off + j] =
- RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_4_0_3[j],
- inst_id));
- }
-}
-
static const struct amd_ip_funcs vcn_v4_0_3_ip_funcs = {
.name = "vcn_v4_0_3",
.early_init = vcn_v4_0_3_early_init,
+ .late_init = vcn_v4_0_3_late_init,
.sw_init = vcn_v4_0_3_sw_init,
.sw_fini = vcn_v4_0_3_sw_fini,
.hw_init = vcn_v4_0_3_hw_init,
@@ -1947,8 +1888,8 @@ static const struct amd_ip_funcs vcn_v4_0_3_ip_funcs = {
.wait_for_idle = vcn_v4_0_3_wait_for_idle,
.set_clockgating_state = vcn_v4_0_3_set_clockgating_state,
.set_powergating_state = vcn_set_powergating_state,
- .dump_ip_state = vcn_v4_0_3_dump_ip_state,
- .print_ip_state = vcn_v4_0_3_print_ip_state,
+ .dump_ip_state = amdgpu_vcn_dump_ip_state,
+ .print_ip_state = amdgpu_vcn_print_ip_state,
};
const struct amdgpu_ip_block_version vcn_v4_0_3_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
index caf2d95a85d4..b107ee80e472 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
@@ -147,12 +147,9 @@ static int vcn_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_ring *ring;
struct amdgpu_device *adev = ip_block->adev;
int i, r;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_5);
- uint32_t *ptr;
-
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
if (adev->vcn.harvest_config & (1 << i))
continue;
@@ -233,15 +230,9 @@ static int vcn_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block)
return r;
}
- /* Allocate memory for VCN IP Dump buffer */
- ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
- if (!ptr) {
- DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
- adev->vcn.ip_dump = NULL;
- } else {
- adev->vcn.ip_dump = ptr;
- }
- return 0;
+ r = amdgpu_vcn_reg_dump_init(adev, vcn_reg_list_4_0_5, ARRAY_SIZE(vcn_reg_list_4_0_5));
+
+ return r;
}
/**
@@ -258,7 +249,7 @@ static int vcn_v4_0_5_sw_fini(struct amdgpu_ip_block *ip_block)
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
if (adev->vcn.harvest_config & (1 << i))
continue;
@@ -279,13 +270,9 @@ static int vcn_v4_0_5_sw_fini(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- r = amdgpu_vcn_sw_fini(adev, i);
- if (r)
- return r;
+ amdgpu_vcn_sw_fini(adev, i);
}
- kfree(adev->vcn.ip_dump);
-
return 0;
}
@@ -923,9 +910,10 @@ static int vcn_v4_0_5_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
{
struct amdgpu_device *adev = vinst->adev;
int inst_idx = vinst->inst;
- volatile struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
+ struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
uint32_t tmp;
+ int ret;
/* disable register anti-hang mechanism */
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 1,
@@ -1006,8 +994,13 @@ static int vcn_v4_0_5_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
VCN, inst_idx, regUVD_MASTINT_EN),
UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
- if (indirect)
- amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
+ if (indirect) {
+ ret = amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
+ if (ret) {
+ dev_err(adev->dev, "vcn sram load failed %d\n", ret);
+ return ret;
+ }
+ }
ring = &adev->vcn.inst[inst_idx].ring_enc[0];
@@ -1054,7 +1047,7 @@ static int vcn_v4_0_5_start(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
int i = vinst->inst;
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
struct amdgpu_ring *ring;
uint32_t tmp;
int j, k, r;
@@ -1273,7 +1266,7 @@ static int vcn_v4_0_5_stop(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
int i = vinst->inst;
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
uint32_t tmp;
int r = 0;
@@ -1596,7 +1589,7 @@ static int vcn_v4_0_5_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = ip_block->adev;
- bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ bool enable = state == AMD_CG_STATE_GATE;
int i;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
@@ -1704,67 +1697,6 @@ static void vcn_v4_0_5_set_irq_funcs(struct amdgpu_device *adev)
}
}
-static void vcn_v4_0_5_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_5);
- uint32_t inst_off, is_powered;
-
- if (!adev->vcn.ip_dump)
- return;
-
- drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i)) {
- drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
- continue;
- }
-
- inst_off = i * reg_count;
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered) {
- drm_printf(p, "\nActive Instance:VCN%d\n", i);
- for (j = 0; j < reg_count; j++)
- drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_4_0_5[j].reg_name,
- adev->vcn.ip_dump[inst_off + j]);
- } else {
- drm_printf(p, "\nInactive Instance:VCN%d\n", i);
- }
- }
-}
-
-static void vcn_v4_0_5_dump_ip_state(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- bool is_powered;
- uint32_t inst_off;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_5);
-
- if (!adev->vcn.ip_dump)
- return;
-
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
-
- inst_off = i * reg_count;
- /* mmUVD_POWER_STATUS is always readable and is first element of the array */
- adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, regUVD_POWER_STATUS);
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered)
- for (j = 1; j < reg_count; j++)
- adev->vcn.ip_dump[inst_off + j] =
- RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_4_0_5[j],
- i));
- }
-}
-
static const struct amd_ip_funcs vcn_v4_0_5_ip_funcs = {
.name = "vcn_v4_0_5",
.early_init = vcn_v4_0_5_early_init,
@@ -1778,8 +1710,8 @@ static const struct amd_ip_funcs vcn_v4_0_5_ip_funcs = {
.wait_for_idle = vcn_v4_0_5_wait_for_idle,
.set_clockgating_state = vcn_v4_0_5_set_clockgating_state,
.set_powergating_state = vcn_set_powergating_state,
- .dump_ip_state = vcn_v4_0_5_dump_ip_state,
- .print_ip_state = vcn_v4_0_5_print_ip_state,
+ .dump_ip_state = amdgpu_vcn_dump_ip_state,
+ .print_ip_state = amdgpu_vcn_print_ip_state,
};
const struct amdgpu_ip_block_version vcn_v4_0_5_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
index 07a6e9582880..0202df5db1e1 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
@@ -115,21 +115,6 @@ static int vcn_v5_0_0_early_init(struct amdgpu_ip_block *ip_block)
return 0;
}
-void vcn_v5_0_0_alloc_ip_dump(struct amdgpu_device *adev)
-{
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0);
- uint32_t *ptr;
-
- /* Allocate memory for VCN IP Dump buffer */
- ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
- if (!ptr) {
- DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
- adev->vcn.ip_dump = NULL;
- } else {
- adev->vcn.ip_dump = ptr;
- }
-}
-
/**
* vcn_v5_0_0_sw_init - sw init for VCN block
*
@@ -144,7 +129,7 @@ static int vcn_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block)
int i, r;
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- volatile struct amdgpu_vcn5_fw_shared *fw_shared;
+ struct amdgpu_vcn5_fw_shared *fw_shared;
if (adev->vcn.harvest_config & (1 << i))
continue;
@@ -201,7 +186,9 @@ static int vcn_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block)
if (!amdgpu_sriov_vf(adev))
adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
- vcn_v5_0_0_alloc_ip_dump(adev);
+ r = amdgpu_vcn_reg_dump_init(adev, vcn_reg_list_5_0, ARRAY_SIZE(vcn_reg_list_5_0));
+ if (r)
+ return r;
r = amdgpu_vcn_sysfs_reset_mask_init(adev);
if (r)
@@ -224,7 +211,7 @@ static int vcn_v5_0_0_sw_fini(struct amdgpu_ip_block *ip_block)
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- volatile struct amdgpu_vcn5_fw_shared *fw_shared;
+ struct amdgpu_vcn5_fw_shared *fw_shared;
if (adev->vcn.harvest_config & (1 << i))
continue;
@@ -245,13 +232,8 @@ static int vcn_v5_0_0_sw_fini(struct amdgpu_ip_block *ip_block)
amdgpu_vcn_sysfs_reset_mask_fini(adev);
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- r = amdgpu_vcn_sw_fini(adev, i);
- if (r)
- return r;
- }
-
- kfree(adev->vcn.ip_dump);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++)
+ amdgpu_vcn_sw_fini(adev, i);
return 0;
}
@@ -710,9 +692,10 @@ static int vcn_v5_0_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
{
struct amdgpu_device *adev = vinst->adev;
int inst_idx = vinst->inst;
- volatile struct amdgpu_vcn5_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
+ struct amdgpu_vcn5_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
uint32_t tmp;
+ int ret;
/* disable register anti-hang mechanism */
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 1,
@@ -766,8 +749,13 @@ static int vcn_v5_0_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
VCN, inst_idx, regUVD_MASTINT_EN),
UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
- if (indirect)
- amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
+ if (indirect) {
+ ret = amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
+ if (ret) {
+ dev_err(adev->dev, "%s: vcn sram load failed %d\n", __func__, ret);
+ return ret;
+ }
+ }
ring = &adev->vcn.inst[inst_idx].ring_enc[0];
@@ -814,7 +802,7 @@ static int vcn_v5_0_0_start(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
int i = vinst->inst;
- volatile struct amdgpu_vcn5_fw_shared *fw_shared;
+ struct amdgpu_vcn5_fw_shared *fw_shared;
struct amdgpu_ring *ring;
uint32_t tmp;
int j, k, r;
@@ -1007,7 +995,7 @@ static int vcn_v5_0_0_stop(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
int i = vinst->inst;
- volatile struct amdgpu_vcn5_fw_shared *fw_shared;
+ struct amdgpu_vcn5_fw_shared *fw_shared;
uint32_t tmp;
int r = 0;
@@ -1320,7 +1308,7 @@ static int vcn_v5_0_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = ip_block->adev;
- bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ bool enable = state == AMD_CG_STATE_GATE;
int i;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
@@ -1428,67 +1416,6 @@ static void vcn_v5_0_0_set_irq_funcs(struct amdgpu_device *adev)
}
}
-void vcn_v5_0_0_print_ip_state(struct amdgpu_ip_block *ip_block,
- struct drm_printer *p)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0);
- uint32_t inst_off, is_powered;
-
- if (!adev->vcn.ip_dump)
- return;
-
- drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i)) {
- drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
- continue;
- }
-
- inst_off = i * reg_count;
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered) {
- drm_printf(p, "\nActive Instance:VCN%d\n", i);
- for (j = 0; j < reg_count; j++)
- drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_5_0[j].reg_name,
- adev->vcn.ip_dump[inst_off + j]);
- } else {
- drm_printf(p, "\nInactive Instance:VCN%d\n", i);
- }
- }
-}
-
-void vcn_v5_0_0_dump_ip_state(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- bool is_powered;
- uint32_t inst_off;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0);
-
- if (!adev->vcn.ip_dump)
- return;
-
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
-
- inst_off = i * reg_count;
- /* mmUVD_POWER_STATUS is always readable and is first element of the array */
- adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, regUVD_POWER_STATUS);
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered)
- for (j = 1; j < reg_count; j++)
- adev->vcn.ip_dump[inst_off + j] =
- RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_5_0[j], i));
- }
-}
-
static const struct amd_ip_funcs vcn_v5_0_0_ip_funcs = {
.name = "vcn_v5_0_0",
.early_init = vcn_v5_0_0_early_init,
@@ -1502,8 +1429,8 @@ static const struct amd_ip_funcs vcn_v5_0_0_ip_funcs = {
.wait_for_idle = vcn_v5_0_0_wait_for_idle,
.set_clockgating_state = vcn_v5_0_0_set_clockgating_state,
.set_powergating_state = vcn_set_powergating_state,
- .dump_ip_state = vcn_v5_0_0_dump_ip_state,
- .print_ip_state = vcn_v5_0_0_print_ip_state,
+ .dump_ip_state = amdgpu_vcn_dump_ip_state,
+ .print_ip_state = amdgpu_vcn_print_ip_state,
};
const struct amdgpu_ip_block_version vcn_v5_0_0_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.h b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.h
index b8927652bc50..51bbccd4360f 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.h
@@ -32,11 +32,6 @@
#define VCN_VID_IP_ADDRESS 0x0
#define VCN_AON_IP_ADDRESS 0x30000
-void vcn_v5_0_0_alloc_ip_dump(struct amdgpu_device *adev);
-void vcn_v5_0_0_print_ip_state(struct amdgpu_ip_block *ip_block,
- struct drm_printer *p);
-void vcn_v5_0_0_dump_ip_state(struct amdgpu_ip_block *ip_block);
-
extern const struct amdgpu_ip_block_version vcn_v5_0_0_ip_block;
#endif /* __VCN_V5_0_0_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
index cdefd7fcb0da..714350cabf2f 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
@@ -40,6 +40,40 @@
#include <drm/drm_drv.h>
+static const struct amdgpu_hwip_reg_entry vcn_reg_list_5_0_1[] = {
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_POWER_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA0),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA1),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_CMD),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_CTL),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_DATA),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_MASK),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_PAUSE)
+};
+
static int vcn_v5_0_1_start_sriov(struct amdgpu_device *adev);
static void vcn_v5_0_1_set_unified_ring_funcs(struct amdgpu_device *adev);
static void vcn_v5_0_1_set_irq_funcs(struct amdgpu_device *adev);
@@ -79,6 +113,25 @@ static int vcn_v5_0_1_early_init(struct amdgpu_ip_block *ip_block)
return 0;
}
+static int vcn_v5_0_1_late_init(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+
+ adev->vcn.supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
+
+ switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
+ case IP_VERSION(13, 0, 12):
+ if ((adev->psp.sos.fw_version >= 0x00450025) && amdgpu_dpm_reset_vcn_is_supported(adev))
+ adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
static void vcn_v5_0_1_fw_shared_init(struct amdgpu_device *adev, int inst_idx)
{
struct amdgpu_vcn5_fw_shared *fw_shared;
@@ -153,17 +206,23 @@ static int vcn_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block)
vcn_v5_0_1_fw_shared_init(adev, i);
}
- /* TODO: Add queue reset mask when FW fully supports it */
- adev->vcn.supported_reset =
- amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
-
if (amdgpu_sriov_vf(adev)) {
r = amdgpu_virt_alloc_mm_table(adev);
if (r)
return r;
}
- vcn_v5_0_0_alloc_ip_dump(adev);
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN)) {
+ r = amdgpu_vcn_ras_sw_init(adev);
+ if (r) {
+ dev_err(adev->dev, "Failed to initialize vcn ras block!\n");
+ return r;
+ }
+ }
+
+ r = amdgpu_vcn_reg_dump_init(adev, vcn_reg_list_5_0_1, ARRAY_SIZE(vcn_reg_list_5_0_1));
+ if (r)
+ return r;
return amdgpu_vcn_sysfs_reset_mask_init(adev);
}
@@ -182,7 +241,7 @@ static int vcn_v5_0_1_sw_fini(struct amdgpu_ip_block *ip_block)
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- volatile struct amdgpu_vcn5_fw_shared *fw_shared;
+ struct amdgpu_vcn5_fw_shared *fw_shared;
fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
fw_shared->present_flag_0 = 0;
@@ -201,15 +260,27 @@ static int vcn_v5_0_1_sw_fini(struct amdgpu_ip_block *ip_block)
return r;
}
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- r = amdgpu_vcn_sw_fini(adev, i);
- if (r)
- return r;
- }
-
amdgpu_vcn_sysfs_reset_mask_fini(adev);
- kfree(adev->vcn.ip_dump);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++)
+ amdgpu_vcn_sw_fini(adev, i);
+
+ return 0;
+}
+
+static int vcn_v5_0_1_hw_init_inst(struct amdgpu_device *adev, int i)
+{
+ struct amdgpu_ring *ring;
+ int vcn_inst;
+
+ vcn_inst = GET_INST(VCN, i);
+ ring = &adev->vcn.inst[i].ring_enc[0];
+
+ if (ring->use_doorbell)
+ adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+ ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
+ 11 * vcn_inst),
+ adev->vcn.inst[i].aid_id);
return 0;
}
@@ -225,7 +296,7 @@ static int vcn_v5_0_1_hw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
- int i, r, vcn_inst;
+ int i, r;
if (amdgpu_sriov_vf(adev)) {
r = vcn_v5_0_1_start_sriov(adev);
@@ -243,14 +314,8 @@ static int vcn_v5_0_1_hw_init(struct amdgpu_ip_block *ip_block)
if (RREG32_SOC15(VCN, GET_INST(VCN, 0), regVCN_RRMT_CNTL) & 0x100)
adev->vcn.caps |= AMDGPU_VCN_CAPS(RRMT_ENABLED);
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- vcn_inst = GET_INST(VCN, i);
ring = &adev->vcn.inst[i].ring_enc[0];
-
- if (ring->use_doorbell)
- adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
- ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
- 11 * vcn_inst),
- adev->vcn.inst[i].aid_id);
+ vcn_v5_0_1_hw_init_inst(adev, i);
/* Re-init fw_shared, if required */
vcn_v5_0_1_fw_shared_init(adev, i);
@@ -284,7 +349,7 @@ static int vcn_v5_0_1_hw_fini(struct amdgpu_ip_block *ip_block)
vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
}
- if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN) && !amdgpu_sriov_vf(adev))
amdgpu_irq_put(adev, &adev->vcn.inst->ras_poison_irq, 0);
return 0;
@@ -601,11 +666,11 @@ static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
{
struct amdgpu_device *adev = vinst->adev;
int inst_idx = vinst->inst;
- volatile struct amdgpu_vcn5_fw_shared *fw_shared =
+ struct amdgpu_vcn5_fw_shared *fw_shared =
adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__PAUSE};
- int vcn_inst;
+ int vcn_inst, ret;
uint32_t tmp;
vcn_inst = GET_INST(VCN, inst_idx);
@@ -666,8 +731,13 @@ static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
VCN, 0, regUVD_MASTINT_EN),
UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
- if (indirect)
- amdgpu_vcn_psp_update_sram(adev, inst_idx, AMDGPU_UCODE_ID_VCN0_RAM);
+ if (indirect) {
+ ret = amdgpu_vcn_psp_update_sram(adev, inst_idx, AMDGPU_UCODE_ID_VCN0_RAM);
+ if (ret) {
+ dev_err(adev->dev, "vcn sram load failed %d\n", ret);
+ return ret;
+ }
+ }
/* resetting ring, fw should not check RB ring */
fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
@@ -732,8 +802,8 @@ static int vcn_v5_0_1_start_sriov(struct amdgpu_device *adev)
struct mmsch_v5_0_cmd_end end = { {0} };
struct mmsch_v5_0_init_header header;
- volatile struct amdgpu_vcn5_fw_shared *fw_shared;
- volatile struct amdgpu_fw_shared_rb_setup *rb_setup;
+ struct amdgpu_vcn5_fw_shared *fw_shared;
+ struct amdgpu_fw_shared_rb_setup *rb_setup;
direct_wt.cmd_header.command_type =
MMSCH_COMMAND__DIRECT_REG_WRITE;
@@ -907,7 +977,7 @@ static int vcn_v5_0_1_start(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
int i = vinst->inst;
- volatile struct amdgpu_vcn5_fw_shared *fw_shared;
+ struct amdgpu_vcn5_fw_shared *fw_shared;
struct amdgpu_ring *ring;
uint32_t tmp;
int j, k, r, vcn_inst;
@@ -1099,7 +1169,7 @@ static int vcn_v5_0_1_stop(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
int i = vinst->inst;
- volatile struct amdgpu_vcn5_fw_shared *fw_shared;
+ struct amdgpu_vcn5_fw_shared *fw_shared;
uint32_t tmp;
int r = 0, vcn_inst;
@@ -1229,6 +1299,31 @@ static void vcn_v5_0_1_unified_ring_set_wptr(struct amdgpu_ring *ring)
}
}
+static int vcn_v5_0_1_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
+{
+ int r = 0;
+ int vcn_inst;
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[ring->me];
+
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+
+ vcn_inst = GET_INST(VCN, ring->me);
+ r = amdgpu_dpm_reset_vcn(adev, 1 << vcn_inst);
+
+ if (r) {
+ DRM_DEV_ERROR(adev->dev, "VCN reset fail : %d\n", r);
+ return r;
+ }
+
+ vcn_v5_0_1_hw_init_inst(adev, ring->me);
+ vcn_v5_0_1_start_dpg_mode(vinst, vinst->indirect_sram);
+
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
+}
+
static const struct amdgpu_ring_funcs vcn_v5_0_1_unified_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_ENC,
.align_mask = 0x3f,
@@ -1257,6 +1352,7 @@ static const struct amdgpu_ring_funcs vcn_v5_0_1_unified_ring_vm_funcs = {
.emit_wreg = vcn_v4_0_3_enc_ring_emit_wreg,
.emit_reg_wait = vcn_v4_0_3_enc_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = vcn_v5_0_1_ring_reset,
};
/**
@@ -1460,7 +1556,7 @@ static void vcn_v5_0_1_set_irq_funcs(struct amdgpu_device *adev)
static const struct amd_ip_funcs vcn_v5_0_1_ip_funcs = {
.name = "vcn_v5_0_1",
.early_init = vcn_v5_0_1_early_init,
- .late_init = NULL,
+ .late_init = vcn_v5_0_1_late_init,
.sw_init = vcn_v5_0_1_sw_init,
.sw_fini = vcn_v5_0_1_sw_fini,
.hw_init = vcn_v5_0_1_hw_init,
@@ -1475,8 +1571,8 @@ static const struct amd_ip_funcs vcn_v5_0_1_ip_funcs = {
.post_soft_reset = NULL,
.set_clockgating_state = vcn_v5_0_1_set_clockgating_state,
.set_powergating_state = vcn_set_powergating_state,
- .dump_ip_state = vcn_v5_0_0_dump_ip_state,
- .print_ip_state = vcn_v5_0_0_print_ip_state,
+ .dump_ip_state = amdgpu_vcn_dump_ip_state,
+ .print_ip_state = amdgpu_vcn_print_ip_state,
};
const struct amdgpu_ip_block_version vcn_v5_0_1_ip_block = {
@@ -1557,7 +1653,7 @@ static int vcn_v5_0_1_aca_bank_parser(struct aca_handle *handle, struct aca_bank
/* reference to smu driver if header file */
static int vcn_v5_0_1_err_codes[] = {
- 14, 15, /* VCN */
+ 14, 15, 47, /* VCN [D|V|S] */
};
static bool vcn_v5_0_1_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank,
@@ -1603,6 +1699,13 @@ static int vcn_v5_0_1_ras_late_init(struct amdgpu_device *adev, struct ras_commo
if (r)
goto late_fini;
+ if (amdgpu_ras_is_supported(adev, ras_block->block) &&
+ adev->vcn.inst->ras_poison_irq.funcs) {
+ r = amdgpu_irq_get(adev, &adev->vcn.inst->ras_poison_irq, 0);
+ if (r)
+ goto late_fini;
+ }
+
return 0;
late_fini:
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 9b3510e53112..a611a7345125 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -67,7 +67,6 @@
#include "sdma_v2_4.h"
#include "sdma_v3_0.h"
#include "dce_v10_0.h"
-#include "dce_v11_0.h"
#include "iceland_ih.h"
#include "tonga_ih.h"
#include "cz_ih.h"
@@ -2124,8 +2123,6 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
else if (amdgpu_device_has_dc_support(adev))
amdgpu_device_ip_block_add(adev, &dm_ip_block);
#endif
- else
- amdgpu_device_ip_block_add(adev, &dce_v11_2_ip_block);
amdgpu_device_ip_block_add(adev, &uvd_v6_3_ip_block);
amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block);
break;
@@ -2142,8 +2139,6 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
else if (amdgpu_device_has_dc_support(adev))
amdgpu_device_ip_block_add(adev, &dm_ip_block);
#endif
- else
- amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block);
amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block);
amdgpu_device_ip_block_add(adev, &vce_v3_1_ip_block);
#if defined(CONFIG_DRM_AMD_ACP)
@@ -2163,8 +2158,6 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
else if (amdgpu_device_has_dc_support(adev))
amdgpu_device_ip_block_add(adev, &dm_ip_block);
#endif
- else
- amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block);
amdgpu_device_ip_block_add(adev, &uvd_v6_2_ip_block);
amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block);
#if defined(CONFIG_DRM_AMD_ACP)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 828a9ceef1e7..0f0719528bcc 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -521,15 +521,10 @@ static int kfd_ioctl_set_cu_mask(struct file *filp, struct kfd_process *p,
cu_mask_size = sizeof(uint32_t) * (max_num_cus/32);
}
- minfo.cu_mask.ptr = kzalloc(cu_mask_size, GFP_KERNEL);
- if (!minfo.cu_mask.ptr)
- return -ENOMEM;
-
- retval = copy_from_user(minfo.cu_mask.ptr, cu_mask_ptr, cu_mask_size);
- if (retval) {
+ minfo.cu_mask.ptr = memdup_user(cu_mask_ptr, cu_mask_size);
+ if (IS_ERR(minfo.cu_mask.ptr)) {
pr_debug("Could not copy CU mask from userspace");
- retval = -EFAULT;
- goto out;
+ return PTR_ERR(minfo.cu_mask.ptr);
}
mutex_lock(&p->mutex);
@@ -538,7 +533,6 @@ static int kfd_ioctl_set_cu_mask(struct file *filp, struct kfd_process *p,
mutex_unlock(&p->mutex);
-out:
kfree(minfo.cu_mask.ptr);
return retval;
}
@@ -1070,7 +1064,12 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
svm_range_list_lock_and_flush_work(&p->svms, current->mm);
mutex_lock(&p->svms.lock);
mmap_write_unlock(current->mm);
- if (interval_tree_iter_first(&p->svms.objects,
+
+ /* Skip a special case that allocates VRAM without VA,
+ * VA will be invalid of 0.
+ */
+ if (!(!args->va_addr && (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM)) &&
+ interval_tree_iter_first(&p->svms.objects,
args->va_addr >> PAGE_SHIFT,
(args->va_addr + args->size - 1) >> PAGE_SHIFT)) {
pr_err("Address: 0x%llx already allocated by SVM\n",
@@ -2566,8 +2565,8 @@ static int criu_restore(struct file *filep,
pr_debug("CRIU restore (num_devices:%u num_bos:%u num_objects:%u priv_data_size:%llu)\n",
args->num_devices, args->num_bos, args->num_objects, args->priv_data_size);
- if (!args->bos || !args->devices || !args->priv_data || !args->priv_data_size ||
- !args->num_devices || !args->num_bos)
+ if ((args->num_bos > 0 && !args->bos) || !args->devices || !args->priv_data ||
+ !args->priv_data_size || !args->num_devices)
return -EINVAL;
mutex_lock(&p->mutex);
@@ -3252,8 +3251,10 @@ static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
int retcode = -EINVAL;
bool ptrace_attached = false;
- if (nr >= AMDKFD_CORE_IOCTL_COUNT)
+ if (nr >= AMDKFD_CORE_IOCTL_COUNT) {
+ retcode = -ENOTTY;
goto err_i1;
+ }
if ((nr >= AMDKFD_COMMAND_START) && (nr < AMDKFD_COMMAND_END)) {
u32 amdkfd_size;
@@ -3266,8 +3267,10 @@ static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
asize = amdkfd_size;
cmd = ioctl->cmd;
- } else
+ } else {
+ retcode = -ENOTTY;
goto err_i1;
+ }
dev_dbg(kfd_device, "ioctl cmd 0x%x (#0x%x), arg 0x%lx\n", cmd, nr, arg);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 7e749f9b6d69..e9cfb80bd436 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -495,6 +495,7 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
mutex_init(&kfd->doorbell_mutex);
ida_init(&kfd->doorbell_ida);
+ atomic_set(&kfd->kfd_processes_count, 0);
return kfd;
}
@@ -1133,7 +1134,15 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
}
for (i = 0; i < kfd->num_nodes; i++) {
- node = kfd->nodes[i];
+ /* Race if another thread in b/w
+ * kfd_cleanup_nodes and kfree(kfd),
+ * when kfd->nodes[i] = NULL
+ */
+ if (kfd->nodes[i])
+ node = kfd->nodes[i];
+ else
+ return;
+
spin_lock_irqsave(&node->interrupt_lock, flags);
if (node->interrupts_active
@@ -1485,6 +1494,15 @@ int kgd2kfd_check_and_lock_kfd(struct kfd_dev *kfd)
mutex_lock(&kfd_processes_mutex);
+ /* kfd_processes_count is per kfd_dev, return -EBUSY without
+ * further check
+ */
+ if (!!atomic_read(&kfd->kfd_processes_count)) {
+ pr_debug("process_wq_release not finished\n");
+ r = -EBUSY;
+ goto out;
+ }
+
if (hash_empty(kfd_processes_table) && !kfd_is_locked(kfd))
goto out;
@@ -1550,6 +1568,25 @@ int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id)
return ret;
}
+int kgd2kfd_start_sched_all_nodes(struct kfd_dev *kfd)
+{
+ struct kfd_node *node;
+ int i, r;
+
+ if (!kfd->init_complete)
+ return 0;
+
+ for (i = 0; i < kfd->num_nodes; i++) {
+ node = kfd->nodes[i];
+ r = node->dqm->ops.unhalt(node->dqm);
+ if (r) {
+ dev_err(kfd_device, "Error in starting scheduler\n");
+ return r;
+ }
+ }
+ return 0;
+}
+
int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id)
{
struct kfd_node *node;
@@ -1567,6 +1604,23 @@ int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id)
return node->dqm->ops.halt(node->dqm);
}
+int kgd2kfd_stop_sched_all_nodes(struct kfd_dev *kfd)
+{
+ struct kfd_node *node;
+ int i, r;
+
+ if (!kfd->init_complete)
+ return 0;
+
+ for (i = 0; i < kfd->num_nodes; i++) {
+ node = kfd->nodes[i];
+ r = node->dqm->ops.halt(node->dqm);
+ if (r)
+ return r;
+ }
+ return 0;
+}
+
bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id)
{
struct kfd_node *node;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 2d91027e2a74..6c5c7c1bf5ed 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -2725,7 +2725,7 @@ static void get_queue_checkpoint_info(struct device_queue_manager *dqm,
dqm_lock(dqm);
mqd_mgr = dqm->mqd_mgrs[mqd_type];
- *mqd_size = mqd_mgr->mqd_size;
+ *mqd_size = mqd_mgr->mqd_size * NUM_XCC(mqd_mgr->dev->xcc_mask);
*ctl_stack_size = 0;
if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE && mqd_mgr->get_checkpoint_info)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index 2b0a830f5b29..fb3129883a4c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -46,11 +46,7 @@ static bool kq_initialize(struct kernel_queue *kq, struct kfd_node *dev,
int retval;
union PM4_MES_TYPE_3_HEADER nop;
- if (WARN_ON(type != KFD_QUEUE_TYPE_DIQ && type != KFD_QUEUE_TYPE_HIQ))
- return false;
-
- pr_debug("Initializing queue type %d size %d\n", KFD_QUEUE_TYPE_HIQ,
- queue_size);
+ pr_debug("Initializing queue type %d size %d\n", type, queue_size);
memset(&prop, 0, sizeof(prop));
memset(&nop, 0, sizeof(nop));
@@ -69,6 +65,7 @@ static bool kq_initialize(struct kernel_queue *kq, struct kfd_node *dev,
kq->mqd_mgr = dev->dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ];
break;
default:
+ WARN(1, "Invalid queue type %d\n", type);
dev_err(dev->adev->dev, "Invalid queue type %d\n", type);
return false;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index 79251f22b702..59a5a3fea65d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -39,22 +39,22 @@
#endif
#define dev_fmt(fmt) "kfd_migrate: " fmt
-static uint64_t
-svm_migrate_direct_mapping_addr(struct amdgpu_device *adev, uint64_t addr)
+static u64
+svm_migrate_direct_mapping_addr(struct amdgpu_device *adev, u64 addr)
{
return addr + amdgpu_ttm_domain_start(adev, TTM_PL_VRAM);
}
static int
-svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages,
- dma_addr_t *addr, uint64_t *gart_addr, uint64_t flags)
+svm_migrate_gart_map(struct amdgpu_ring *ring, u64 npages,
+ dma_addr_t *addr, u64 *gart_addr, u64 flags)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_job *job;
unsigned int num_dw, num_bytes;
struct dma_fence *fence;
- uint64_t src_addr, dst_addr;
- uint64_t pte_flags;
+ u64 src_addr, dst_addr;
+ u64 pte_flags;
void *cpu_addr;
int r;
@@ -68,7 +68,8 @@ svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages,
AMDGPU_FENCE_OWNER_UNDEFINED,
num_dw * 4 + num_bytes,
AMDGPU_IB_POOL_DELAYED,
- &job);
+ &job,
+ AMDGPU_KERNEL_JOB_ID_KFD_GART_MAP);
if (r)
return r;
@@ -122,15 +123,15 @@ svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages,
static int
svm_migrate_copy_memory_gart(struct amdgpu_device *adev, dma_addr_t *sys,
- uint64_t *vram, uint64_t npages,
+ u64 *vram, u64 npages,
enum MIGRATION_COPY_DIR direction,
struct dma_fence **mfence)
{
- const uint64_t GTT_MAX_PAGES = AMDGPU_GTT_MAX_TRANSFER_SIZE;
+ const u64 GTT_MAX_PAGES = AMDGPU_GTT_MAX_TRANSFER_SIZE;
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
- uint64_t gart_s, gart_d;
+ u64 gart_s, gart_d;
struct dma_fence *next;
- uint64_t size;
+ u64 size;
int r;
mutex_lock(&adev->mman.gtt_window_lock);
@@ -260,39 +261,39 @@ static void svm_migrate_put_sys_page(unsigned long addr)
put_page(page);
}
-static unsigned long svm_migrate_unsuccessful_pages(struct migrate_vma *migrate)
+static unsigned long svm_migrate_successful_pages(struct migrate_vma *migrate)
{
- unsigned long upages = 0;
+ unsigned long mpages = 0;
unsigned long i;
for (i = 0; i < migrate->npages; i++) {
- if (migrate->src[i] & MIGRATE_PFN_VALID &&
- !(migrate->src[i] & MIGRATE_PFN_MIGRATE))
- upages++;
+ if (migrate->dst[i] & MIGRATE_PFN_VALID &&
+ migrate->src[i] & MIGRATE_PFN_MIGRATE)
+ mpages++;
}
- return upages;
+ return mpages;
}
static int
svm_migrate_copy_to_vram(struct kfd_node *node, struct svm_range *prange,
struct migrate_vma *migrate, struct dma_fence **mfence,
- dma_addr_t *scratch, uint64_t ttm_res_offset)
+ dma_addr_t *scratch, u64 ttm_res_offset)
{
- uint64_t npages = migrate->npages;
+ u64 npages = migrate->npages;
struct amdgpu_device *adev = node->adev;
struct device *dev = adev->dev;
struct amdgpu_res_cursor cursor;
- uint64_t mpages = 0;
+ u64 mpages = 0;
dma_addr_t *src;
- uint64_t *dst;
- uint64_t i, j;
+ u64 *dst;
+ u64 i, j;
int r;
pr_debug("svms 0x%p [0x%lx 0x%lx 0x%llx]\n", prange->svms, prange->start,
prange->last, ttm_res_offset);
src = scratch;
- dst = (uint64_t *)(scratch + npages);
+ dst = (u64 *)(scratch + npages);
amdgpu_res_first(prange->ttm_res, ttm_res_offset,
npages << PAGE_SHIFT, &cursor);
@@ -385,11 +386,11 @@ out_free_vram_pages:
static long
svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange,
- struct vm_area_struct *vma, uint64_t start,
- uint64_t end, uint32_t trigger, uint64_t ttm_res_offset)
+ struct vm_area_struct *vma, u64 start,
+ u64 end, uint32_t trigger, u64 ttm_res_offset)
{
struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
- uint64_t npages = (end - start) >> PAGE_SHIFT;
+ u64 npages = (end - start) >> PAGE_SHIFT;
struct amdgpu_device *adev = node->adev;
struct kfd_process_device *pdd;
struct dma_fence *mfence = NULL;
@@ -408,7 +409,7 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange,
migrate.pgmap_owner = SVM_ADEV_PGMAP_OWNER(adev);
buf = kvcalloc(npages,
- 2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t),
+ 2 * sizeof(*migrate.src) + sizeof(u64) + sizeof(dma_addr_t),
GFP_KERNEL);
if (!buf)
goto out;
@@ -447,9 +448,9 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange,
svm_migrate_copy_done(adev, mfence);
migrate_vma_finalize(&migrate);
- mpages = cpages - svm_migrate_unsuccessful_pages(&migrate);
- pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
- mpages, cpages, migrate.npages);
+ mpages = svm_migrate_successful_pages(&migrate);
+ pr_debug("migrated/collected/requested 0x%lx/0x%lx/0x%lx\n",
+ mpages, cpages, migrate.npages);
svm_range_dma_unmap_dev(adev->dev, scratch, 0, npages);
@@ -490,7 +491,7 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
{
unsigned long addr, start, end;
struct vm_area_struct *vma;
- uint64_t ttm_res_offset;
+ u64 ttm_res_offset;
struct kfd_node *node;
unsigned long mpages = 0;
long r = 0;
@@ -580,14 +581,14 @@ static void svm_migrate_page_free(struct page *page)
static int
svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
struct migrate_vma *migrate, struct dma_fence **mfence,
- dma_addr_t *scratch, uint64_t npages)
+ dma_addr_t *scratch, u64 npages)
{
struct device *dev = adev->dev;
- uint64_t *src;
+ u64 *src;
dma_addr_t *dst;
struct page *dpage;
- uint64_t i = 0, j;
- uint64_t addr;
+ u64 i = 0, j;
+ u64 addr;
int r = 0;
pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start,
@@ -595,7 +596,7 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
addr = migrate->start;
- src = (uint64_t *)(scratch + npages);
+ src = (u64 *)(scratch + npages);
dst = scratch;
for (i = 0, j = 0; i < npages; i++, addr += PAGE_SIZE) {
@@ -683,12 +684,11 @@ out_oom:
*/
static long
svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange,
- struct vm_area_struct *vma, uint64_t start, uint64_t end,
+ struct vm_area_struct *vma, u64 start, u64 end,
uint32_t trigger, struct page *fault_page)
{
struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
- uint64_t npages = (end - start) >> PAGE_SHIFT;
- unsigned long upages = npages;
+ u64 npages = (end - start) >> PAGE_SHIFT;
unsigned long cpages = 0;
unsigned long mpages = 0;
struct amdgpu_device *adev = node->adev;
@@ -710,7 +710,7 @@ svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange,
migrate.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
buf = kvcalloc(npages,
- 2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t),
+ 2 * sizeof(*migrate.src) + sizeof(u64) + sizeof(dma_addr_t),
GFP_KERNEL);
if (!buf)
goto out;
@@ -736,7 +736,6 @@ svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange,
if (!cpages) {
pr_debug("failed collect migrate device pages [0x%lx 0x%lx]\n",
prange->start, prange->last);
- upages = svm_migrate_unsuccessful_pages(&migrate);
goto out_free;
}
if (cpages != npages)
@@ -749,9 +748,9 @@ svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange,
scratch, npages);
migrate_vma_pages(&migrate);
- upages = svm_migrate_unsuccessful_pages(&migrate);
- pr_debug("unsuccessful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
- upages, cpages, migrate.npages);
+ mpages = svm_migrate_successful_pages(&migrate);
+ pr_debug("migrated/collected/requested 0x%lx/0x%lx/0x%lx\n",
+ mpages, cpages, migrate.npages);
svm_migrate_copy_done(adev, mfence);
migrate_vma_finalize(&migrate);
@@ -764,8 +763,7 @@ out_free:
start >> PAGE_SHIFT, end >> PAGE_SHIFT,
node->id, 0, trigger, r);
out:
- if (!r && cpages) {
- mpages = cpages - upages;
+ if (!r && mpages) {
pdd = svm_range_get_pdd_by_node(prange, node);
if (pdd)
WRITE_ONCE(pdd->page_out, pdd->page_out + mpages);
@@ -848,6 +846,9 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
}
if (r >= 0) {
+ WARN_ONCE(prange->vram_pages < mpages,
+ "Recorded vram pages(0x%llx) should not be less than migration pages(0x%lx).",
+ prange->vram_pages, mpages);
prange->vram_pages -= mpages;
/* prange does not have vram page set its actual_loc to system
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
index aee2212e52f6..33aa23450b3f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
@@ -78,8 +78,8 @@ err_ioctl:
static void kfd_exit(void)
{
kfd_cleanup_processes();
- kfd_debugfs_fini();
kfd_process_destroy_wq();
+ kfd_debugfs_fini();
kfd_procfs_shutdown();
kfd_topology_shutdown();
kfd_chardev_exit();
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index 97933d2a3803..f2dee320fada 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -373,7 +373,7 @@ static void get_checkpoint_info(struct mqd_manager *mm, void *mqd, u32 *ctl_stac
{
struct v9_mqd *m = get_mqd(mqd);
- *ctl_stack_size = m->cp_hqd_cntl_stack_size;
+ *ctl_stack_size = m->cp_hqd_cntl_stack_size * NUM_XCC(mm->dev->xcc_mask);
}
static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, void *ctl_stack_dst)
@@ -388,6 +388,24 @@ static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, voi
memcpy(ctl_stack_dst, ctl_stack, m->cp_hqd_cntl_stack_size);
}
+static void checkpoint_mqd_v9_4_3(struct mqd_manager *mm,
+ void *mqd,
+ void *mqd_dst,
+ void *ctl_stack_dst)
+{
+ struct v9_mqd *m;
+ int xcc;
+ uint64_t size = get_mqd(mqd)->cp_mqd_stride_size;
+
+ for (xcc = 0; xcc < NUM_XCC(mm->dev->xcc_mask); xcc++) {
+ m = get_mqd(mqd + size * xcc);
+
+ checkpoint_mqd(mm, m,
+ (uint8_t *)mqd_dst + sizeof(*m) * xcc,
+ (uint8_t *)ctl_stack_dst + m->cp_hqd_cntl_stack_size * xcc);
+ }
+}
+
static void restore_mqd(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *qp,
@@ -764,13 +782,35 @@ static void restore_mqd_v9_4_3(struct mqd_manager *mm, void **mqd,
const void *mqd_src,
const void *ctl_stack_src, u32 ctl_stack_size)
{
- restore_mqd(mm, mqd, mqd_mem_obj, gart_addr, qp, mqd_src, ctl_stack_src, ctl_stack_size);
- if (amdgpu_sriov_multi_vf_mode(mm->dev->adev)) {
- struct v9_mqd *m;
+ struct kfd_mem_obj xcc_mqd_mem_obj;
+ u32 mqd_ctl_stack_size;
+ struct v9_mqd *m;
+ u32 num_xcc;
+ int xcc;
- m = (struct v9_mqd *) mqd_mem_obj->cpu_ptr;
- m->cp_hqd_pq_doorbell_control |= 1 <<
- CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_MODE__SHIFT;
+ uint64_t offset = mm->mqd_stride(mm, qp);
+
+ mm->dev->dqm->current_logical_xcc_start++;
+
+ num_xcc = NUM_XCC(mm->dev->xcc_mask);
+ mqd_ctl_stack_size = ctl_stack_size / num_xcc;
+
+ memset(&xcc_mqd_mem_obj, 0x0, sizeof(struct kfd_mem_obj));
+
+ /* Set the MQD pointer and gart address to XCC0 MQD */
+ *mqd = mqd_mem_obj->cpu_ptr;
+ if (gart_addr)
+ *gart_addr = mqd_mem_obj->gpu_addr;
+
+ for (xcc = 0; xcc < num_xcc; xcc++) {
+ get_xcc_mqd(mqd_mem_obj, &xcc_mqd_mem_obj, offset * xcc);
+ restore_mqd(mm, (void **)&m,
+ &xcc_mqd_mem_obj,
+ NULL,
+ qp,
+ (uint8_t *)mqd_src + xcc * sizeof(*m),
+ (uint8_t *)ctl_stack_src + xcc * mqd_ctl_stack_size,
+ mqd_ctl_stack_size);
}
}
static int destroy_mqd_v9_4_3(struct mqd_manager *mm, void *mqd,
@@ -906,7 +946,6 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
mqd->free_mqd = kfd_free_mqd_cp;
mqd->is_occupied = kfd_is_occupied_cp;
mqd->get_checkpoint_info = get_checkpoint_info;
- mqd->checkpoint_mqd = checkpoint_mqd;
mqd->mqd_size = sizeof(struct v9_mqd);
mqd->mqd_stride = mqd_stride_v9;
#if defined(CONFIG_DEBUG_FS)
@@ -918,16 +957,18 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
mqd->init_mqd = init_mqd_v9_4_3;
mqd->load_mqd = load_mqd_v9_4_3;
mqd->update_mqd = update_mqd_v9_4_3;
- mqd->restore_mqd = restore_mqd_v9_4_3;
mqd->destroy_mqd = destroy_mqd_v9_4_3;
mqd->get_wave_state = get_wave_state_v9_4_3;
+ mqd->checkpoint_mqd = checkpoint_mqd_v9_4_3;
+ mqd->restore_mqd = restore_mqd_v9_4_3;
} else {
mqd->init_mqd = init_mqd;
mqd->load_mqd = load_mqd;
mqd->update_mqd = update_mqd;
- mqd->restore_mqd = restore_mqd;
mqd->destroy_mqd = kfd_destroy_mqd_cp;
mqd->get_wave_state = get_wave_state;
+ mqd->checkpoint_mqd = checkpoint_mqd;
+ mqd->restore_mqd = restore_mqd;
}
break;
case KFD_MQD_TYPE_HIQ:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 67694bcd9464..70ef051511bb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -111,7 +111,14 @@
#define KFD_KERNEL_QUEUE_SIZE 2048
-#define KFD_UNMAP_LATENCY_MS (4000)
+/* KFD_UNMAP_LATENCY_MS is the timeout CP waiting for SDMA preemption. One XCC
+ * can be associated to 2 SDMA engines. queue_preemption_timeout_ms is the time
+ * driver waiting for CP returning the UNMAP_QUEUE fence. Thus the math is
+ * queue_preemption_timeout_ms = sdma_preemption_time * 2 + cp workload
+ * The format here makes CP workload 10% of total timeout
+ */
+#define KFD_UNMAP_LATENCY_MS \
+ ((queue_preemption_timeout_ms - queue_preemption_timeout_ms / 10) >> 1)
#define KFD_MAX_SDMA_QUEUES 128
@@ -375,6 +382,8 @@ struct kfd_dev {
/* for dynamic partitioning */
int kfd_dev_lock;
+
+ atomic_t kfd_processes_count;
};
enum kfd_mempool {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 5be28c6c4f6a..ddfe30c13e9d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -1088,6 +1088,8 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
pdd->runtime_inuse = false;
}
+ atomic_dec(&pdd->dev->kfd->kfd_processes_count);
+
kfree(pdd);
p->pdds[i] = NULL;
}
@@ -1649,6 +1651,8 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev,
/* Init idr used for memory handle translation */
idr_init(&pdd->alloc_idr);
+ atomic_inc(&dev->kfd->kfd_processes_count);
+
return pdd;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index c643e0ccec52..7fbb5c274ccc 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -914,7 +914,10 @@ static int criu_checkpoint_queues_device(struct kfd_process_device *pdd,
q_data = (struct kfd_criu_queue_priv_data *)q_private_data;
- /* data stored in this order: priv_data, mqd, ctl_stack */
+ /*
+ * data stored in this order:
+ * priv_data, mqd[xcc0], mqd[xcc1],..., ctl_stack[xcc0], ctl_stack[xcc1]...
+ */
q_data->mqd_size = mqd_size;
q_data->ctl_stack_size = ctl_stack_size;
@@ -963,7 +966,7 @@ int kfd_criu_checkpoint_queues(struct kfd_process *p,
}
static void set_queue_properties_from_criu(struct queue_properties *qp,
- struct kfd_criu_queue_priv_data *q_data)
+ struct kfd_criu_queue_priv_data *q_data, uint32_t num_xcc)
{
qp->is_interop = false;
qp->queue_percent = q_data->q_percent;
@@ -976,7 +979,11 @@ static void set_queue_properties_from_criu(struct queue_properties *qp,
qp->eop_ring_buffer_size = q_data->eop_ring_buffer_size;
qp->ctx_save_restore_area_address = q_data->ctx_save_restore_area_address;
qp->ctx_save_restore_area_size = q_data->ctx_save_restore_area_size;
- qp->ctl_stack_size = q_data->ctl_stack_size;
+ if (q_data->type == KFD_QUEUE_TYPE_COMPUTE)
+ qp->ctl_stack_size = q_data->ctl_stack_size / num_xcc;
+ else
+ qp->ctl_stack_size = q_data->ctl_stack_size;
+
qp->type = q_data->type;
qp->format = q_data->format;
}
@@ -1036,12 +1043,15 @@ int kfd_criu_restore_queue(struct kfd_process *p,
goto exit;
}
- /* data stored in this order: mqd, ctl_stack */
+ /*
+ * data stored in this order:
+ * mqd[xcc0], mqd[xcc1],..., ctl_stack[xcc0], ctl_stack[xcc1]...
+ */
mqd = q_extra_data;
ctl_stack = mqd + q_data->mqd_size;
memset(&qp, 0, sizeof(qp));
- set_queue_properties_from_criu(&qp, q_data);
+ set_queue_properties_from_criu(&qp, q_data, NUM_XCC(pdd->dev->adev->gfx.xcc_mask));
print_queue_properties(&qp);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index a0f22ea6d15a..9d72411c3379 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -1189,7 +1189,7 @@ svm_nodes_in_same_hive(struct kfd_node *node_a, struct kfd_node *node_b)
}
static uint64_t
-svm_range_get_pte_flags(struct kfd_node *node,
+svm_range_get_pte_flags(struct kfd_node *node, struct amdgpu_vm *vm,
struct svm_range *prange, int domain)
{
struct kfd_node *bo_node;
@@ -1292,10 +1292,6 @@ svm_range_get_pte_flags(struct kfd_node *node,
AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
}
- mapping_flags |= AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE;
-
- if (flags & KFD_IOCTL_SVM_FLAG_GPU_RO)
- mapping_flags &= ~AMDGPU_VM_PAGE_WRITEABLE;
if (flags & KFD_IOCTL_SVM_FLAG_GPU_EXEC)
mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
@@ -1305,7 +1301,10 @@ svm_range_get_pte_flags(struct kfd_node *node,
if (gc_ip_version >= IP_VERSION(12, 0, 0))
pte_flags |= AMDGPU_PTE_IS_PTE;
- pte_flags |= amdgpu_gem_va_map_flags(node->adev, mapping_flags);
+ amdgpu_gmc_get_vm_pte(node->adev, vm, NULL, mapping_flags, &pte_flags);
+ pte_flags |= AMDGPU_PTE_READABLE;
+ if (!(flags & KFD_IOCTL_SVM_FLAG_GPU_RO))
+ pte_flags |= AMDGPU_PTE_WRITEABLE;
return pte_flags;
}
@@ -1412,7 +1411,7 @@ svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange,
pr_debug("Mapping range [0x%lx 0x%llx] on domain: %s\n",
last_start, prange->start + i, last_domain ? "GPU" : "CPU");
- pte_flags = svm_range_get_pte_flags(pdd->dev, prange, last_domain);
+ pte_flags = svm_range_get_pte_flags(pdd->dev, vm, prange, last_domain);
if (readonly)
pte_flags &= ~AMDGPU_PTE_WRITEABLE;
@@ -1714,9 +1713,32 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
next = min(vma->vm_end, end);
npages = (next - addr) >> PAGE_SHIFT;
+ /* HMM requires at least READ permissions. If provided with PROT_NONE,
+ * unmap the memory. If it's not already mapped, this is a no-op
+ * If PROT_WRITE is provided without READ, warn first then unmap
+ */
+ if (!(vma->vm_flags & VM_READ)) {
+ unsigned long e, s;
+
+ svm_range_lock(prange);
+ if (vma->vm_flags & VM_WRITE)
+ pr_debug("VM_WRITE without VM_READ is not supported");
+ s = max(start, prange->start);
+ e = min(end, prange->last);
+ if (e >= s)
+ r = svm_range_unmap_from_gpus(prange, s, e,
+ KFD_SVM_UNMAP_TRIGGER_UNMAP_FROM_CPU);
+ svm_range_unlock(prange);
+ /* If unmap returns non-zero, we'll bail on the next for loop
+ * iteration, so just leave r and continue
+ */
+ addr = next;
+ continue;
+ }
+
WRITE_ONCE(p->svms.faulting_task, current);
r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages,
- readonly, owner, NULL,
+ readonly, owner,
&hmm_range);
WRITE_ONCE(p->svms.faulting_task, NULL);
if (r)
@@ -3023,6 +3045,8 @@ retry_write_locked:
if (svms->checkpoint_ts[gpuidx] != 0) {
if (amdgpu_ih_ts_after_or_equal(ts, svms->checkpoint_ts[gpuidx])) {
pr_debug("draining retry fault, drop fault 0x%llx\n", addr);
+ if (write_locked)
+ mmap_write_downgrade(mm);
r = -EAGAIN;
goto out_unlock_svms;
} else {
@@ -4239,7 +4263,7 @@ svm_ioctl(struct kfd_process *p, enum kfd_ioctl_svm_op op, uint64_t start,
r = svm_range_get_attr(p, mm, start, size, nattrs, attrs);
break;
default:
- r = EINVAL;
+ r = -EINVAL;
break;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 4ec73f33535e..5c98746eb72d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -530,6 +530,8 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
sysfs_show_32bit_prop(buffer, offs, "sdma_fw_version",
dev->gpu->kfd->sdma_fw_version);
sysfs_show_64bit_prop(buffer, offs, "unique_id",
+ dev->gpu->xcp ?
+ dev->gpu->xcp->unique_id :
dev->gpu->adev->unique_id);
sysfs_show_32bit_prop(buffer, offs, "num_xcc",
NUM_XCC(dev->gpu->xcc_mask));
@@ -1587,7 +1589,8 @@ static int kfd_dev_create_p2p_links(void)
break;
if (!dev->gpu || !dev->gpu->adev ||
(dev->gpu->kfd->hive_id &&
- dev->gpu->kfd->hive_id == new_dev->gpu->kfd->hive_id))
+ dev->gpu->kfd->hive_id == new_dev->gpu->kfd->hive_id &&
+ amdgpu_xgmi_get_is_sharing_enabled(dev->gpu->adev, new_dev->gpu->adev)))
goto next;
/* check if node(s) is/are peer accessible in one direction or bi-direction */
diff --git a/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c b/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c
index 8bc36f04b1b7..44009aa8216e 100644
--- a/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c
+++ b/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c
@@ -46,18 +46,29 @@ static const struct drm_driver amdgpu_xcp_driver = {
static int8_t pdev_num;
static struct xcp_device *xcp_dev[MAX_XCP_PLATFORM_DEVICE];
+static DEFINE_MUTEX(xcp_mutex);
int amdgpu_xcp_drm_dev_alloc(struct drm_device **ddev)
{
struct platform_device *pdev;
struct xcp_device *pxcp_dev;
char dev_name[20];
- int ret;
+ int ret, i;
+
+ guard(mutex)(&xcp_mutex);
if (pdev_num >= MAX_XCP_PLATFORM_DEVICE)
return -ENODEV;
- snprintf(dev_name, sizeof(dev_name), "amdgpu_xcp_%d", pdev_num);
+ for (i = 0; i < MAX_XCP_PLATFORM_DEVICE; i++) {
+ if (!xcp_dev[i])
+ break;
+ }
+
+ if (i >= MAX_XCP_PLATFORM_DEVICE)
+ return -ENODEV;
+
+ snprintf(dev_name, sizeof(dev_name), "amdgpu_xcp_%d", i);
pdev = platform_device_register_simple(dev_name, -1, NULL, 0);
if (IS_ERR(pdev))
return PTR_ERR(pdev);
@@ -73,8 +84,8 @@ int amdgpu_xcp_drm_dev_alloc(struct drm_device **ddev)
goto out_devres;
}
- xcp_dev[pdev_num] = pxcp_dev;
- xcp_dev[pdev_num]->pdev = pdev;
+ xcp_dev[i] = pxcp_dev;
+ xcp_dev[i]->pdev = pdev;
*ddev = &pxcp_dev->drm;
pdev_num++;
@@ -89,16 +100,43 @@ out_unregister:
}
EXPORT_SYMBOL(amdgpu_xcp_drm_dev_alloc);
-void amdgpu_xcp_drv_release(void)
+static void free_xcp_dev(int8_t index)
{
- for (--pdev_num; pdev_num >= 0; --pdev_num) {
- struct platform_device *pdev = xcp_dev[pdev_num]->pdev;
+ if ((index < MAX_XCP_PLATFORM_DEVICE) && (xcp_dev[index])) {
+ struct platform_device *pdev = xcp_dev[index]->pdev;
devres_release_group(&pdev->dev, NULL);
platform_device_unregister(pdev);
- xcp_dev[pdev_num] = NULL;
+
+ xcp_dev[index] = NULL;
+ pdev_num--;
+ }
+}
+
+void amdgpu_xcp_drm_dev_free(struct drm_device *ddev)
+{
+ int8_t i;
+
+ guard(mutex)(&xcp_mutex);
+
+ for (i = 0; i < MAX_XCP_PLATFORM_DEVICE; i++) {
+ if ((xcp_dev[i]) && (&xcp_dev[i]->drm == ddev)) {
+ free_xcp_dev(i);
+ break;
+ }
+ }
+}
+EXPORT_SYMBOL(amdgpu_xcp_drm_dev_free);
+
+void amdgpu_xcp_drv_release(void)
+{
+ int8_t i;
+
+ guard(mutex)(&xcp_mutex);
+
+ for (i = 0; pdev_num && i < MAX_XCP_PLATFORM_DEVICE; i++) {
+ free_xcp_dev(i);
}
- pdev_num = 0;
}
EXPORT_SYMBOL(amdgpu_xcp_drv_release);
diff --git a/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.h b/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.h
index c1c4b679bf95..580a1602c8e3 100644
--- a/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.h
+++ b/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.h
@@ -25,5 +25,6 @@
#define _AMDGPU_XCP_DRV_H_
int amdgpu_xcp_drm_dev_alloc(struct drm_device **ddev);
+void amdgpu_xcp_drm_dev_free(struct drm_device *ddev);
void amdgpu_xcp_drv_release(void);
#endif /* _AMDGPU_XCP_DRV_H_ */
diff --git a/drivers/gpu/drm/amd/display/Makefile b/drivers/gpu/drm/amd/display/Makefile
index 89d605de0595..0084a8d55254 100644
--- a/drivers/gpu/drm/amd/display/Makefile
+++ b/drivers/gpu/drm/amd/display/Makefile
@@ -44,6 +44,7 @@ subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/mmhubbub
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/mpc
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/opp
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/pg
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/soc_and_ip_translator
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/inc
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/freesync
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/color
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 2a175fc0399c..0d03e324d5b9 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
@@ -39,13 +40,11 @@
#include "dc/dc_stat.h"
#include "dc/dc_state.h"
#include "amdgpu_dm_trace.h"
-#include "dpcd_defs.h"
#include "link/protocols/link_dpcd.h"
#include "link_service_types.h"
#include "link/protocols/link_dp_capability.h"
#include "link/protocols/link_ddc.h"
-#include "vid.h"
#include "amdgpu.h"
#include "amdgpu_display.h"
#include "amdgpu_ucode.h"
@@ -56,7 +55,6 @@
#include "amdgpu_dm_hdcp.h"
#include <drm/display/drm_hdcp_helper.h>
#include "amdgpu_dm_wb.h"
-#include "amdgpu_pm.h"
#include "amdgpu_atombios.h"
#include "amd_shared.h"
@@ -82,6 +80,7 @@
#include <linux/component.h>
#include <linux/sort.h>
+#include <drm/drm_privacy_screen_consumer.h>
#include <drm/display/drm_dp_mst_helper.h>
#include <drm/display/drm_hdmi_helper.h>
#include <drm/drm_atomic.h>
@@ -102,15 +101,6 @@
#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
-#include "dcn/dcn_1_0_offset.h"
-#include "dcn/dcn_1_0_sh_mask.h"
-#include "soc15_hw_ip.h"
-#include "soc15_common.h"
-#include "vega10_ip_offset.h"
-
-#include "gc/gc_11_0_0_offset.h"
-#include "gc/gc_11_0_0_sh_mask.h"
-
#include "modules/inc/mod_freesync.h"
#include "modules/power/power_helpers.h"
@@ -243,6 +233,7 @@ static int amdgpu_dm_encoder_init(struct drm_device *dev,
static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
+static int amdgpu_dm_atomic_setup_commit(struct drm_atomic_state *state);
static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
static int amdgpu_dm_atomic_check(struct drm_device *dev,
@@ -427,8 +418,7 @@ static inline bool update_planes_and_stream_adapter(struct dc *dc,
/*
* Previous frame finished and HW is ready for optimization.
*/
- if (update_type == UPDATE_TYPE_FAST)
- dc_post_update_surfaces_to_stream(dc);
+ dc_post_update_surfaces_to_stream(dc);
return dc_update_planes_and_stream(dc,
array_of_surface_update,
@@ -541,6 +531,50 @@ static void dm_pflip_high_irq(void *interrupt_params)
amdgpu_crtc->crtc_id, amdgpu_crtc, vrr_active, (int)!e);
}
+static void dm_handle_vmin_vmax_update(struct work_struct *offload_work)
+{
+ struct vupdate_offload_work *work = container_of(offload_work, struct vupdate_offload_work, work);
+ struct amdgpu_device *adev = work->adev;
+ struct dc_stream_state *stream = work->stream;
+ struct dc_crtc_timing_adjust *adjust = work->adjust;
+
+ mutex_lock(&adev->dm.dc_lock);
+ dc_stream_adjust_vmin_vmax(adev->dm.dc, stream, adjust);
+ mutex_unlock(&adev->dm.dc_lock);
+
+ dc_stream_release(stream);
+ kfree(work->adjust);
+ kfree(work);
+}
+
+static void schedule_dc_vmin_vmax(struct amdgpu_device *adev,
+ struct dc_stream_state *stream,
+ struct dc_crtc_timing_adjust *adjust)
+{
+ struct vupdate_offload_work *offload_work = kzalloc(sizeof(*offload_work), GFP_KERNEL);
+ if (!offload_work) {
+ drm_dbg_driver(adev_to_drm(adev), "Failed to allocate vupdate_offload_work\n");
+ return;
+ }
+
+ struct dc_crtc_timing_adjust *adjust_copy = kzalloc(sizeof(*adjust_copy), GFP_KERNEL);
+ if (!adjust_copy) {
+ drm_dbg_driver(adev_to_drm(adev), "Failed to allocate adjust_copy\n");
+ kfree(offload_work);
+ return;
+ }
+
+ dc_stream_retain(stream);
+ memcpy(adjust_copy, adjust, sizeof(*adjust_copy));
+
+ INIT_WORK(&offload_work->work, dm_handle_vmin_vmax_update);
+ offload_work->adev = adev;
+ offload_work->stream = stream;
+ offload_work->adjust = adjust_copy;
+
+ queue_work(system_wq, &offload_work->work);
+}
+
static void dm_vupdate_high_irq(void *interrupt_params)
{
struct common_irq_params *irq_params = interrupt_params;
@@ -578,22 +612,27 @@ static void dm_vupdate_high_irq(void *interrupt_params)
* page-flip completion events that have been queued to us
* if a pageflip happened inside front-porch.
*/
- if (vrr_active) {
+ if (vrr_active && acrtc->dm_irq_params.stream) {
+ bool replay_en = acrtc->dm_irq_params.stream->link->replay_settings.replay_feature_enabled;
+ bool psr_en = acrtc->dm_irq_params.stream->link->psr_settings.psr_feature_enabled;
+ bool fs_active_var_en = acrtc->dm_irq_params.freesync_config.state
+ == VRR_STATE_ACTIVE_VARIABLE;
+
amdgpu_dm_crtc_handle_vblank(acrtc);
/* BTR processing for pre-DCE12 ASICs */
- if (acrtc->dm_irq_params.stream &&
- adev->family < AMDGPU_FAMILY_AI) {
+ if (adev->family < AMDGPU_FAMILY_AI) {
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
mod_freesync_handle_v_update(
adev->dm.freesync_module,
acrtc->dm_irq_params.stream,
&acrtc->dm_irq_params.vrr_params);
- dc_stream_adjust_vmin_vmax(
- adev->dm.dc,
- acrtc->dm_irq_params.stream,
- &acrtc->dm_irq_params.vrr_params.adjust);
+ if (fs_active_var_en || (!fs_active_var_en && !replay_en && !psr_en)) {
+ schedule_dc_vmin_vmax(adev,
+ acrtc->dm_irq_params.stream,
+ &acrtc->dm_irq_params.vrr_params.adjust);
+ }
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
}
}
@@ -676,15 +715,20 @@ static void dm_crtc_high_irq(void *interrupt_params)
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
if (acrtc->dm_irq_params.stream &&
- acrtc->dm_irq_params.vrr_params.supported &&
- acrtc->dm_irq_params.freesync_config.state ==
- VRR_STATE_ACTIVE_VARIABLE) {
+ acrtc->dm_irq_params.vrr_params.supported) {
+ bool replay_en = acrtc->dm_irq_params.stream->link->replay_settings.replay_feature_enabled;
+ bool psr_en = acrtc->dm_irq_params.stream->link->psr_settings.psr_feature_enabled;
+ bool fs_active_var_en = acrtc->dm_irq_params.freesync_config.state == VRR_STATE_ACTIVE_VARIABLE;
+
mod_freesync_handle_v_update(adev->dm.freesync_module,
acrtc->dm_irq_params.stream,
&acrtc->dm_irq_params.vrr_params);
- dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
- &acrtc->dm_irq_params.vrr_params.adjust);
+ /* update vmin_vmax only if freesync is enabled, or only if PSR and REPLAY are disabled */
+ if (fs_active_var_en || (!fs_active_var_en && !replay_en && !psr_en)) {
+ schedule_dc_vmin_vmax(adev, acrtc->dm_irq_params.stream,
+ &acrtc->dm_irq_params.vrr_params.adjust);
+ }
}
/*
@@ -1956,6 +2000,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
init_data.flags.disable_ips_in_vpb = 0;
+ /* DCN35 and above supports dynamic DTBCLK switch */
+ if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 5, 0))
+ init_data.flags.allow_0_dtb_clk = true;
+
/* Enable DWB for tested platforms only */
if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0))
init_data.num_virtual_links = 1;
@@ -2037,6 +2085,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
dc_hardware_init(adev->dm.dc);
+ adev->dm.restore_backlight = true;
+
adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev);
if (!adev->dm.hpd_rx_offload_wq) {
drm_err(adev_to_drm(adev), "failed to create hpd rx offload workqueue.\n");
@@ -2142,7 +2192,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
drm_err(adev_to_drm(adev),
- "failed to initialize sw for display support.\n");
+ "failed to initialize vblank for display support.\n");
goto error;
}
@@ -2901,7 +2951,7 @@ static int dm_oem_i2c_hw_init(struct amdgpu_device *adev)
return -ENOMEM;
}
- r = i2c_add_adapter(&oem_i2c->base);
+ r = devm_i2c_add_adapter(adev->dev, &oem_i2c->base);
if (r) {
drm_info(adev_to_drm(adev), "Failed to register oem i2c\n");
kfree(oem_i2c);
@@ -2963,8 +3013,6 @@ static int dm_hw_fini(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
- kfree(adev->dm.oem_i2c);
-
amdgpu_dm_hpd_fini(adev);
amdgpu_dm_irq_fini(adev);
@@ -2992,14 +3040,20 @@ static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
drm_warn(adev_to_drm(adev), "Failed to %s pflip interrupts\n",
enable ? "enable" : "disable");
- if (enable) {
- if (amdgpu_dm_crtc_vrr_active(to_dm_crtc_state(acrtc->base.state)))
- rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, true);
- } else
- rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, false);
-
- if (rc)
- drm_warn(adev_to_drm(adev), "Failed to %sable vupdate interrupt\n", enable ? "en" : "dis");
+ if (dc_supports_vrr(adev->dm.dc->ctx->dce_version)) {
+ if (enable) {
+ if (amdgpu_dm_crtc_vrr_active(
+ to_dm_crtc_state(acrtc->base.state)))
+ rc = amdgpu_dm_crtc_set_vupdate_irq(
+ &acrtc->base, true);
+ } else
+ rc = amdgpu_dm_crtc_set_vupdate_irq(
+ &acrtc->base, false);
+
+ if (rc)
+ drm_warn(adev_to_drm(adev), "Failed to %sable vupdate interrupt\n",
+ enable ? "en" : "dis");
+ }
irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
/* During gpu-reset we disable and then enable vblank irq, so
@@ -3127,25 +3181,6 @@ static void dm_destroy_cached_state(struct amdgpu_device *adev)
dm->cached_state = NULL;
}
-static void dm_complete(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
-
- dm_destroy_cached_state(adev);
-}
-
-static int dm_prepare_suspend(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
-
- if (amdgpu_in_reset(adev))
- return 0;
-
- WARN_ON(adev->dm.cached_state);
-
- return dm_cache_state(adev);
-}
-
static int dm_suspend(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
@@ -3407,6 +3442,7 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
dc_resume(dm->dc);
+ adev->dm.restore_backlight = true;
amdgpu_dm_irq_resume_early(adev);
@@ -3571,10 +3607,8 @@ static const struct amd_ip_funcs amdgpu_dm_funcs = {
.early_fini = amdgpu_dm_early_fini,
.hw_init = dm_hw_init,
.hw_fini = dm_hw_fini,
- .prepare_suspend = dm_prepare_suspend,
.suspend = dm_suspend,
.resume = dm_resume,
- .complete = dm_complete,
.is_idle = dm_is_idle,
.wait_for_idle = dm_wait_for_idle,
.check_soft_reset = dm_check_soft_reset,
@@ -3607,23 +3641,25 @@ static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
.atomic_commit_tail = amdgpu_dm_atomic_commit_tail,
- .atomic_commit_setup = drm_dp_mst_atomic_setup_commit,
+ .atomic_commit_setup = amdgpu_dm_atomic_setup_commit,
};
static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
{
+ const struct drm_panel_backlight_quirk *panel_backlight_quirk;
struct amdgpu_dm_backlight_caps *caps;
struct drm_connector *conn_base;
struct amdgpu_device *adev;
struct drm_luminance_range_info *luminance_range;
- int min_input_signal_override;
+ struct drm_device *drm;
if (aconnector->bl_idx == -1 ||
aconnector->dc_link->connector_signal != SIGNAL_TYPE_EDP)
return;
conn_base = &aconnector->base;
- adev = drm_to_adev(conn_base->dev);
+ drm = conn_base->dev;
+ adev = drm_to_adev(drm);
caps = &adev->dm.backlight_caps[aconnector->bl_idx];
caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
@@ -3656,9 +3692,24 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
else
caps->aux_min_input_signal = 1;
- min_input_signal_override = drm_get_panel_min_brightness_quirk(aconnector->drm_edid);
- if (min_input_signal_override >= 0)
- caps->min_input_signal = min_input_signal_override;
+ panel_backlight_quirk =
+ drm_get_panel_backlight_quirk(aconnector->drm_edid);
+ if (!IS_ERR_OR_NULL(panel_backlight_quirk)) {
+ if (panel_backlight_quirk->min_brightness) {
+ caps->min_input_signal =
+ panel_backlight_quirk->min_brightness - 1;
+ drm_info(drm,
+ "Applying panel backlight quirk, min_brightness: %d\n",
+ caps->min_input_signal);
+ }
+ if (panel_backlight_quirk->brightness_mask) {
+ drm_info(drm,
+ "Applying panel backlight quirk, brightness_mask: 0x%X\n",
+ panel_backlight_quirk->brightness_mask);
+ caps->brightness_mask =
+ panel_backlight_quirk->brightness_mask;
+ }
+ }
}
DEFINE_FREE(sink_release, struct dc_sink *, if (_T) dc_sink_release(_T))
@@ -4756,16 +4807,16 @@ static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
return 1;
}
-/* Rescale from [min..max] to [0..MAX_BACKLIGHT_LEVEL] */
+/* Rescale from [min..max] to [0..AMDGPU_MAX_BL_LEVEL] */
static inline u32 scale_input_to_fw(int min, int max, u64 input)
{
- return DIV_ROUND_CLOSEST_ULL(input * MAX_BACKLIGHT_LEVEL, max - min);
+ return DIV_ROUND_CLOSEST_ULL(input * AMDGPU_MAX_BL_LEVEL, max - min);
}
-/* Rescale from [0..MAX_BACKLIGHT_LEVEL] to [min..max] */
+/* Rescale from [0..AMDGPU_MAX_BL_LEVEL] to [min..max] */
static inline u32 scale_fw_to_input(int min, int max, u64 input)
{
- return min + DIV_ROUND_CLOSEST_ULL(input * (max - min), MAX_BACKLIGHT_LEVEL);
+ return min + DIV_ROUND_CLOSEST_ULL(input * (max - min), AMDGPU_MAX_BL_LEVEL);
}
static void convert_custom_brightness(const struct amdgpu_dm_backlight_caps *caps,
@@ -4773,8 +4824,8 @@ static void convert_custom_brightness(const struct amdgpu_dm_backlight_caps *cap
uint32_t *user_brightness)
{
u32 brightness = scale_input_to_fw(min, max, *user_brightness);
- u8 prev_signal = 0, prev_lum = 0;
- int i = 0;
+ u8 lower_signal, upper_signal, upper_lum, lower_lum, lum;
+ int left, right;
if (amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE)
return;
@@ -4782,32 +4833,54 @@ static void convert_custom_brightness(const struct amdgpu_dm_backlight_caps *cap
if (!caps->data_points)
return;
- /* choose start to run less interpolation steps */
- if (caps->luminance_data[caps->data_points/2].input_signal > brightness)
- i = caps->data_points/2;
- do {
- u8 signal = caps->luminance_data[i].input_signal;
- u8 lum = caps->luminance_data[i].luminance;
+ /*
+ * Handle the case where brightness is below the first data point
+ * Interpolate between (0,0) and (first_signal, first_lum)
+ */
+ if (brightness < caps->luminance_data[0].input_signal) {
+ lum = DIV_ROUND_CLOSEST(caps->luminance_data[0].luminance * brightness,
+ caps->luminance_data[0].input_signal);
+ goto scale;
+ }
- /*
- * brightness == signal: luminance is percent numerator
- * brightness < signal: interpolate between previous and current luminance numerator
- * brightness > signal: find next data point
- */
- if (brightness > signal) {
- prev_signal = signal;
- prev_lum = lum;
- i++;
- continue;
+ left = 0;
+ right = caps->data_points - 1;
+ while (left <= right) {
+ int mid = left + (right - left) / 2;
+ u8 signal = caps->luminance_data[mid].input_signal;
+
+ /* Exact match found */
+ if (signal == brightness) {
+ lum = caps->luminance_data[mid].luminance;
+ goto scale;
}
- if (brightness < signal)
- lum = prev_lum + DIV_ROUND_CLOSEST((lum - prev_lum) *
- (brightness - prev_signal),
- signal - prev_signal);
- *user_brightness = scale_fw_to_input(min, max,
- DIV_ROUND_CLOSEST(lum * brightness, 101));
- return;
- } while (i < caps->data_points);
+
+ if (signal < brightness)
+ left = mid + 1;
+ else
+ right = mid - 1;
+ }
+
+ /* verify bound */
+ if (left >= caps->data_points)
+ left = caps->data_points - 1;
+
+ /* At this point, left > right */
+ lower_signal = caps->luminance_data[right].input_signal;
+ upper_signal = caps->luminance_data[left].input_signal;
+ lower_lum = caps->luminance_data[right].luminance;
+ upper_lum = caps->luminance_data[left].luminance;
+
+ /* interpolate */
+ if (right == left || !lower_lum)
+ lum = upper_lum;
+ else
+ lum = lower_lum + DIV_ROUND_CLOSEST((upper_lum - lower_lum) *
+ (brightness - lower_signal),
+ upper_signal - lower_signal);
+scale:
+ *user_brightness = scale_fw_to_input(min, max,
+ DIV_ROUND_CLOSEST(lum * brightness, 101));
}
static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
@@ -4858,6 +4931,10 @@ static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
brightness = convert_brightness_from_user(caps, dm->brightness[bl_idx]);
link = (struct dc_link *)dm->backlight_link[bl_idx];
+ /* Apply brightness quirk */
+ if (caps->brightness_mask)
+ brightness |= caps->brightness_mask;
+
/* Change brightness based on AUX property */
mutex_lock(&dm->dc_lock);
if (dm->dc->caps.ips_support && dm->dc->ctx->dmub_srv->idle_allowed) {
@@ -4926,10 +5003,8 @@ static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
if (caps.aux_support) {
u32 avg, peak;
- bool rc;
- rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
- if (!rc)
+ if (!dc_link_get_backlight_level_nits(link, &avg, &peak))
return dm->brightness[bl_idx];
return convert_brightness_to_user(&caps, avg);
}
@@ -4995,8 +5070,11 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
} else
props.brightness = props.max_brightness = MAX_BACKLIGHT_LEVEL;
- if (caps->data_points && !(amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE))
+ if (caps->data_points && !(amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE)) {
drm_info(drm, "Using custom brightness curve\n");
+ props.scale = BACKLIGHT_SCALE_NON_LINEAR;
+ } else
+ props.scale = BACKLIGHT_SCALE_LINEAR;
props.type = BACKLIGHT_RAW;
snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
@@ -6359,6 +6437,10 @@ static void fill_stream_properties_from_drm_display_mode(
&& aconnector
&& aconnector->force_yuv420_output)
timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
+ else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR422)
+ && aconnector
+ && aconnector->force_yuv422_output)
+ timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR422;
else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
@@ -6390,13 +6472,15 @@ static void fill_stream_properties_from_drm_display_mode(
(struct drm_connector *)connector,
mode_in);
if (err < 0)
- drm_warn_once(connector->dev, "Failed to setup avi infoframe on connector %s: %zd \n", connector->name, err);
+ drm_warn_once(connector->dev, "Failed to setup avi infoframe on connector %s: %zd\n",
+ connector->name, err);
timing_out->vic = avi_frame.video_code;
err = drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame,
(struct drm_connector *)connector,
mode_in);
if (err < 0)
- drm_warn_once(connector->dev, "Failed to setup vendor infoframe on connector %s: %zd \n", connector->name, err);
+ drm_warn_once(connector->dev, "Failed to setup vendor infoframe on connector %s: %zd\n",
+ connector->name, err);
timing_out->hdmi_vic = hv_frame.vic;
}
@@ -7314,10 +7398,6 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
- if (aconnector->i2c) {
- i2c_del_adapter(&aconnector->i2c->base);
- kfree(aconnector->i2c);
- }
kfree(aconnector->dm_dp_aux.aux.name);
kfree(connector);
@@ -7617,6 +7697,7 @@ create_validate_stream_for_sink(struct drm_connector *connector,
bpc_limit = 8;
do {
+ drm_dbg_kms(connector->dev, "Trying with %d bpc\n", requested_bpc);
stream = create_stream_for_sink(connector, drm_mode,
dm_state, old_stream,
requested_bpc);
@@ -7652,16 +7733,41 @@ create_validate_stream_for_sink(struct drm_connector *connector,
} while (stream == NULL && requested_bpc >= bpc_limit);
- if ((dc_result == DC_FAIL_ENC_VALIDATE ||
- dc_result == DC_EXCEED_DONGLE_CAP) &&
- !aconnector->force_yuv420_output) {
- DRM_DEBUG_KMS("%s:%d Retry forcing yuv420 encoding\n",
- __func__, __LINE__);
-
- aconnector->force_yuv420_output = true;
+ switch (dc_result) {
+ /*
+ * If we failed to validate DP bandwidth stream with the requested RGB color depth,
+ * we try to fallback and configure in order:
+ * YUV422 (8bpc, 6bpc)
+ * YUV420 (8bpc, 6bpc)
+ */
+ case DC_FAIL_ENC_VALIDATE:
+ case DC_EXCEED_DONGLE_CAP:
+ case DC_NO_DP_LINK_BANDWIDTH:
+ /* recursively entered twice and already tried both YUV422 and YUV420 */
+ if (aconnector->force_yuv422_output && aconnector->force_yuv420_output)
+ break;
+ /* first failure; try YUV422 */
+ if (!aconnector->force_yuv422_output) {
+ drm_dbg_kms(connector->dev, "%s:%d Validation failed with %d, retrying w/ YUV422\n",
+ __func__, __LINE__, dc_result);
+ aconnector->force_yuv422_output = true;
+ /* recursively entered and YUV422 failed, try YUV420 */
+ } else if (!aconnector->force_yuv420_output) {
+ drm_dbg_kms(connector->dev, "%s:%d Validation failed with %d, retrying w/ YUV420\n",
+ __func__, __LINE__, dc_result);
+ aconnector->force_yuv420_output = true;
+ }
stream = create_validate_stream_for_sink(connector, drm_mode,
- dm_state, old_stream);
+ dm_state, old_stream);
+ aconnector->force_yuv422_output = false;
aconnector->force_yuv420_output = false;
+ break;
+ case DC_OK:
+ break;
+ default:
+ drm_dbg_kms(connector->dev, "%s:%d Unhandled validation failure %d\n",
+ __func__, __LINE__, dc_result);
+ break;
}
return stream;
@@ -7792,6 +7898,9 @@ amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(conn);
int ret;
+ if (WARN_ON(unlikely(!old_con_state || !new_con_state)))
+ return -EINVAL;
+
trace_amdgpu_dm_connector_atomic_check(new_con_state);
if (conn->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
@@ -7803,6 +7912,14 @@ amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
if (!crtc)
return 0;
+ if (new_con_state->privacy_screen_sw_state != old_con_state->privacy_screen_sw_state) {
+ new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(new_crtc_state))
+ return PTR_ERR(new_crtc_state);
+
+ new_crtc_state->mode_changed = true;
+ }
+
if (new_con_state->colorspace != old_con_state->colorspace) {
new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (IS_ERR(new_crtc_state))
@@ -7934,7 +8051,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
if (IS_ERR(mst_state))
return PTR_ERR(mst_state);
- mst_state->pbn_div.full = dfixed_const(dm_mst_get_pbn_divider(aconnector->mst_root->dc_link));
+ mst_state->pbn_div.full = dm_mst_get_pbn_divider(aconnector->mst_root->dc_link);
if (!state->duplicated) {
int max_bpc = conn_state->max_requested_bpc;
@@ -8158,6 +8275,10 @@ static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
{"1920x1200", 1920, 1200}
};
+ if ((connector->connector_type != DRM_MODE_CONNECTOR_eDP) &&
+ (connector->connector_type != DRM_MODE_CONNECTOR_LVDS))
+ return;
+
n = ARRAY_SIZE(common_modes);
for (i = 0; i < n; i++) {
@@ -8378,8 +8499,7 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
drm_add_modes_noedid(connector, 1920, 1080);
} else {
amdgpu_dm_connector_ddc_get_modes(connector, drm_edid);
- if (encoder && (connector->connector_type != DRM_MODE_CONNECTOR_eDP) &&
- (connector->connector_type != DRM_MODE_CONNECTOR_LVDS))
+ if (encoder)
amdgpu_dm_connector_add_common_modes(encoder, connector);
amdgpu_dm_connector_add_freesync_modes(connector, drm_edid);
}
@@ -8498,6 +8618,18 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
if (adev->dm.hdcp_workqueue)
drm_connector_attach_content_protection_property(&aconnector->base, true);
}
+
+ if (connector_type == DRM_MODE_CONNECTOR_eDP) {
+ struct drm_privacy_screen *privacy_screen;
+
+ privacy_screen = drm_privacy_screen_get(adev_to_drm(adev)->dev, NULL);
+ if (!IS_ERR(privacy_screen)) {
+ drm_connector_attach_privacy_screen_provider(&aconnector->base,
+ privacy_screen);
+ } else if (PTR_ERR(privacy_screen) != -ENODEV) {
+ drm_warn(adev_to_drm(adev), "Error getting privacy-screen\n");
+ }
+ }
}
static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
@@ -8627,7 +8759,7 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
}
aconnector->i2c = i2c;
- res = i2c_add_adapter(&i2c->base);
+ res = devm_i2c_add_adapter(dm->adev->dev, &i2c->base);
if (res) {
drm_err(adev_to_drm(dm->adev), "Failed to register hw i2c %d\n", link->link_index);
@@ -8725,7 +8857,16 @@ static int amdgpu_dm_encoder_init(struct drm_device *dev,
static void manage_dm_interrupts(struct amdgpu_device *adev,
struct amdgpu_crtc *acrtc,
struct dm_crtc_state *acrtc_state)
-{
+{ /*
+ * We cannot be sure that the frontend index maps to the same
+ * backend index - some even map to more than one.
+ * So we have to go through the CRTC to find the right IRQ.
+ */
+ int irq_type = amdgpu_display_crtc_idx_to_irq_type(
+ adev,
+ acrtc->crtc_id);
+ struct drm_device *dev = adev_to_drm(adev);
+
struct drm_vblank_crtc_config config = {0};
struct dc_crtc_timing *timing;
int offdelay;
@@ -8778,7 +8919,35 @@ static void manage_dm_interrupts(struct amdgpu_device *adev,
drm_crtc_vblank_on_config(&acrtc->base,
&config);
+ /* Allow RX6xxx, RX7700, RX7800 GPUs to call amdgpu_irq_get.*/
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(3, 0, 0):
+ case IP_VERSION(3, 0, 2):
+ case IP_VERSION(3, 0, 3):
+ case IP_VERSION(3, 2, 0):
+ if (amdgpu_irq_get(adev, &adev->pageflip_irq, irq_type))
+ drm_err(dev, "DM_IRQ: Cannot get pageflip irq!\n");
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ if (amdgpu_irq_get(adev, &adev->vline0_irq, irq_type))
+ drm_err(dev, "DM_IRQ: Cannot get vline0 irq!\n");
+#endif
+ }
+
} else {
+ /* Allow RX6xxx, RX7700, RX7800 GPUs to call amdgpu_irq_put.*/
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(3, 0, 0):
+ case IP_VERSION(3, 0, 2):
+ case IP_VERSION(3, 0, 3):
+ case IP_VERSION(3, 2, 0):
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ if (amdgpu_irq_put(adev, &adev->vline0_irq, irq_type))
+ drm_err(dev, "DM_IRQ: Cannot put vline0 irq!\n");
+#endif
+ if (amdgpu_irq_put(adev, &adev->pageflip_irq, irq_type))
+ drm_err(dev, "DM_IRQ: Cannot put pageflip irq!\n");
+ }
+
drm_crtc_vblank_off(&acrtc->base);
}
}
@@ -9800,7 +9969,6 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
bool mode_set_reset_required = false;
u32 i;
struct dc_commit_streams_params params = {dc_state->streams, dc_state->stream_count};
- bool set_backlight_level = false;
/* Disable writeback */
for_each_old_connector_in_state(state, connector, old_con_state, i) {
@@ -9920,7 +10088,6 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
acrtc->hw_mode = new_crtc_state->mode;
crtc->hwmode = new_crtc_state->mode;
mode_set_reset_required = true;
- set_backlight_level = true;
} else if (modereset_required(new_crtc_state)) {
drm_dbg_atomic(dev,
"Atomic commit: RESET. crtc id %d:[%p]\n",
@@ -9977,13 +10144,16 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
* to fix a flicker issue.
* It will cause the dm->actual_brightness is not the current panel brightness
* level. (the dm->brightness is the correct panel level)
- * So we set the backlight level with dm->brightness value after set mode
+ * So we set the backlight level with dm->brightness value after initial
+ * set mode. Use restore_backlight flag to avoid setting backlight level
+ * for every subsequent mode set.
*/
- if (set_backlight_level) {
+ if (dm->restore_backlight) {
for (i = 0; i < dm->num_of_edps; i++) {
if (dm->backlight_dev[i])
amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
}
+ dm->restore_backlight = false;
}
}
@@ -10086,69 +10256,40 @@ static void dm_set_writeback(struct amdgpu_display_manager *dm,
drm_writeback_queue_job(wb_conn, new_con_state);
}
-/**
- * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
- * @state: The atomic state to commit
- *
- * This will tell DC to commit the constructed DC state from atomic_check,
- * programming the hardware. Any failures here implies a hardware failure, since
- * atomic check should have filtered anything non-kosher.
- */
-static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
+static void amdgpu_dm_update_hdcp(struct drm_atomic_state *state)
{
+ struct drm_connector_state *old_con_state, *new_con_state;
struct drm_device *dev = state->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_display_manager *dm = &adev->dm;
- struct dm_atomic_state *dm_state;
- struct dc_state *dc_state = NULL;
- u32 i, j;
- struct drm_crtc *crtc;
- struct drm_crtc_state *old_crtc_state, *new_crtc_state;
- unsigned long flags;
- bool wait_for_vblank = true;
struct drm_connector *connector;
- struct drm_connector_state *old_con_state, *new_con_state;
- struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
- int crtc_disable_count = 0;
-
- trace_amdgpu_dm_atomic_commit_tail_begin(state);
-
- drm_atomic_helper_update_legacy_modeset_state(dev, state);
- drm_dp_mst_atomic_wait_for_dependencies(state);
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ int i;
- dm_state = dm_atomic_get_new_state(state);
- if (dm_state && dm_state->context) {
- dc_state = dm_state->context;
- amdgpu_dm_commit_streams(state, dc_state);
- }
+ if (!adev->dm.hdcp_workqueue)
+ return;
for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct dm_crtc_state *dm_new_crtc_state;
struct amdgpu_dm_connector *aconnector;
- if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ if (!connector || connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
continue;
aconnector = to_amdgpu_dm_connector(connector);
- if (!adev->dm.hdcp_workqueue)
- continue;
+ drm_dbg(dev, "[HDCP_DM] -------------- i : %x ----------\n", i);
- pr_debug("[HDCP_DM] -------------- i : %x ----------\n", i);
-
- if (!connector)
- continue;
-
- pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n",
+ drm_dbg(dev, "[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n",
connector->index, connector->status, connector->dpms);
- pr_debug("[HDCP_DM] state protection old: %x new: %x\n",
+ drm_dbg(dev, "[HDCP_DM] state protection old: %x new: %x\n",
old_con_state->content_protection, new_con_state->content_protection);
if (aconnector->dc_sink) {
if (aconnector->dc_sink->sink_signal != SIGNAL_TYPE_VIRTUAL &&
aconnector->dc_sink->sink_signal != SIGNAL_TYPE_NONE) {
- pr_debug("[HDCP_DM] pipe_ctx dispname=%s\n",
+ drm_dbg(dev, "[HDCP_DM] pipe_ctx dispname=%s\n",
aconnector->dc_sink->edid_caps.display_name);
}
}
@@ -10162,7 +10303,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
}
if (old_crtc_state)
- pr_debug("old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
+ drm_dbg(dev, "old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
old_crtc_state->enable,
old_crtc_state->active,
old_crtc_state->mode_changed,
@@ -10170,29 +10311,13 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
old_crtc_state->connectors_changed);
if (new_crtc_state)
- pr_debug("NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
+ drm_dbg(dev, "NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
new_crtc_state->enable,
new_crtc_state->active,
new_crtc_state->mode_changed,
new_crtc_state->active_changed,
new_crtc_state->connectors_changed);
- }
-
- for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
- struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
- struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
- struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
- if (!adev->dm.hdcp_workqueue)
- continue;
-
- new_crtc_state = NULL;
- old_crtc_state = NULL;
-
- if (acrtc) {
- new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
- old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
- }
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
@@ -10236,7 +10361,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
new_con_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED)
enable_encryption = true;
- drm_info(adev_to_drm(adev), "[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption);
+ drm_info(dev, "[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption);
if (aconnector->dc_link)
hdcp_update_display(
@@ -10244,6 +10369,78 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
new_con_state->hdcp_content_type, enable_encryption);
}
}
+}
+
+static int amdgpu_dm_atomic_setup_commit(struct drm_atomic_state *state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
+ int i, ret;
+
+ ret = drm_dp_mst_atomic_setup_commit(state);
+ if (ret)
+ return ret;
+
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ /*
+ * Color management settings. We also update color properties
+ * when a modeset is needed, to ensure it gets reprogrammed.
+ */
+ if (dm_new_crtc_state->base.active && dm_new_crtc_state->stream &&
+ (dm_new_crtc_state->base.color_mgmt_changed ||
+ dm_old_crtc_state->regamma_tf != dm_new_crtc_state->regamma_tf ||
+ drm_atomic_crtc_needs_modeset(new_crtc_state))) {
+ ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
+ if (ret) {
+ drm_dbg_atomic(state->dev, "Failed to update color state\n");
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
+ * @state: The atomic state to commit
+ *
+ * This will tell DC to commit the constructed DC state from atomic_check,
+ * programming the hardware. Any failures here implies a hardware failure, since
+ * atomic check should have filtered anything non-kosher.
+ */
+static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
+{
+ struct drm_device *dev = state->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct dm_atomic_state *dm_state;
+ struct dc_state *dc_state = NULL;
+ u32 i, j;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ unsigned long flags;
+ bool wait_for_vblank = true;
+ struct drm_connector *connector;
+ struct drm_connector_state *old_con_state = NULL, *new_con_state = NULL;
+ struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
+ int crtc_disable_count = 0;
+
+ trace_amdgpu_dm_atomic_commit_tail_begin(state);
+
+ drm_atomic_helper_update_legacy_modeset_state(dev, state);
+ drm_dp_mst_atomic_wait_for_dependencies(state);
+
+ dm_state = dm_atomic_get_new_state(state);
+ if (dm_state && dm_state->context) {
+ dc_state = dm_state->context;
+ amdgpu_dm_commit_streams(state, dc_state);
+ }
+
+ amdgpu_dm_update_hdcp(state);
/* Handle connector state changes */
for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
@@ -10346,6 +10543,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
&stream_update);
mutex_unlock(&dm->dc_lock);
kfree(dummy_updates);
+
+ drm_connector_update_privacy_screen(new_con_state);
}
/**
@@ -10397,6 +10596,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
if (amdgpu_dm_crc_window_is_activated(crtc)) {
uint8_t cnt;
+
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
for (cnt = 0; cnt < MAX_CRC_WINDOW_NUM; cnt++) {
if (acrtc->dm_irq_params.window_param[cnt].enable) {
@@ -10699,6 +10899,8 @@ static void get_freesync_config_for_crtc(
} else {
config.state = VRR_STATE_INACTIVE;
}
+ } else {
+ config.state = VRR_STATE_UNSUPPORTED;
}
out:
new_crtc_state->freesync_config = config;
@@ -11016,7 +11218,7 @@ skip_modeset:
if (dm_new_crtc_state->base.color_mgmt_changed ||
dm_old_crtc_state->regamma_tf != dm_new_crtc_state->regamma_tf ||
drm_atomic_crtc_needs_modeset(new_crtc_state)) {
- ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
+ ret = amdgpu_dm_check_crtc_color_mgmt(dm_new_crtc_state, true);
if (ret)
goto fail;
}
@@ -12600,7 +12802,7 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
dm_con_state = to_dm_connector_state(connector->state);
- if (!adev->dm.freesync_module)
+ if (!adev->dm.freesync_module || !dc_supports_vrr(sink->ctx->dce_version))
goto update;
edid = drm_edid_raw(drm_edid); // FIXME: Get rid of drm_edid_raw()
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index b937da0a4e4a..009f206226f0 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright (C) 2015-2020 Advanced Micro Devices, Inc. All rights reserved.
*
@@ -152,6 +153,20 @@ struct idle_workqueue {
bool running;
};
+/**
+ * struct vupdate_offload_work - Work data for offloading task from vupdate handler
+ * @work: Kernel work data for the work event
+ * @adev: amdgpu_device back pointer
+ * @stream: DC stream associated with the crtc
+ * @adjust: DC CRTC timing adjust to be applied to the crtc
+ */
+struct vupdate_offload_work {
+ struct work_struct work;
+ struct amdgpu_device *adev;
+ struct dc_stream_state *stream;
+ struct dc_crtc_timing_adjust *adjust;
+};
+
#define MAX_LUMINANCE_DATA_POINTS 99
/**
@@ -201,6 +216,11 @@ struct amdgpu_dm_backlight_caps {
*/
bool aux_support;
/**
+ * @brightness_mask: After deriving brightness, OR it with this mask.
+ * Workaround for panels with issues with certain brightness values.
+ */
+ u32 brightness_mask;
+ /**
* @ac_level: the default brightness if booted on AC
*/
u8 ac_level;
@@ -611,6 +631,13 @@ struct amdgpu_display_manager {
u32 actual_brightness[AMDGPU_DM_MAX_NUM_EDP];
/**
+ * @restore_backlight:
+ *
+ * Flag to indicate whether to restore backlight after modeset.
+ */
+ bool restore_backlight;
+
+ /**
* @aux_hpd_discon_quirk:
*
* quirk for hpd discon while aux is on-going.
@@ -753,6 +780,9 @@ struct amdgpu_dm_connector {
uint16_t vc_full_pbn;
struct mutex handle_mst_msg_ready;
+ /* branch device specific data */
+ uint32_t branch_ieee_oui;
+
/* TODO see if we can merge with ddc_bus or make a dm_connector */
struct amdgpu_i2c_adapter *i2c;
@@ -776,6 +806,7 @@ struct amdgpu_dm_connector {
bool fake_enable;
bool force_yuv420_output;
+ bool force_yuv422_output;
struct dsc_preferred_settings dsc_settings;
union dp_downstream_port_present mst_downstream_port_present;
/* Cached display modes */
@@ -1023,6 +1054,8 @@ void amdgpu_dm_init_color_mod(void);
int amdgpu_dm_create_color_properties(struct amdgpu_device *adev);
int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state);
int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc);
+int amdgpu_dm_check_crtc_color_mgmt(struct dm_crtc_state *crtc,
+ bool check_only);
int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
struct drm_plane_state *plane_state,
struct dc_plane_state *dc_plane_state);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
index ebabfe3a512f..a4ac6d442278 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
@@ -27,7 +28,6 @@
#include "amdgpu_dm.h"
#include "dc.h"
#include "modules/color/color_gamma.h"
-#include "basics/conversion.h"
/**
* DOC: overview
@@ -566,12 +566,11 @@ static int __set_output_tf(struct dc_transfer_func *func,
return res ? 0 : -ENOMEM;
}
-static int amdgpu_dm_set_atomic_regamma(struct dc_stream_state *stream,
+static int amdgpu_dm_set_atomic_regamma(struct dc_transfer_func *out_tf,
const struct drm_color_lut *regamma_lut,
uint32_t regamma_size, bool has_rom,
enum dc_transfer_func_predefined tf)
{
- struct dc_transfer_func *out_tf = &stream->out_transfer_func;
int ret = 0;
if (regamma_size || tf != TRANSFER_FUNCTION_LINEAR) {
@@ -821,7 +820,7 @@ int amdgpu_dm_verify_lut3d_size(struct amdgpu_device *adev,
struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state);
const struct drm_color_lut *shaper = NULL, *lut3d = NULL;
uint32_t exp_size, size, dim_size = MAX_COLOR_3DLUT_SIZE;
- bool has_3dlut = adev->dm.dc->caps.color.dpp.hw_3d_lut;
+ bool has_3dlut = adev->dm.dc->caps.color.dpp.hw_3d_lut || adev->dm.dc->caps.color.mpc.preblend;
/* shaper LUT is only available if 3D LUT color caps */
exp_size = has_3dlut ? MAX_COLOR_LUT_ENTRIES : 0;
@@ -885,33 +884,33 @@ int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state)
}
/**
- * amdgpu_dm_update_crtc_color_mgmt: Maps DRM color management to DC stream.
+ * amdgpu_dm_check_crtc_color_mgmt: Check if DRM color props are programmable by DC.
* @crtc: amdgpu_dm crtc state
+ * @check_only: only check color state without update dc stream
*
- * With no plane level color management properties we're free to use any
- * of the HW blocks as long as the CRTC CTM always comes before the
- * CRTC RGM and after the CRTC DGM.
- *
- * - The CRTC RGM block will be placed in the RGM LUT block if it is non-linear.
- * - The CRTC DGM block will be placed in the DGM LUT block if it is non-linear.
- * - The CRTC CTM will be placed in the gamut remap block if it is non-linear.
+ * This function just verifies CRTC LUT sizes, if there is enough space for
+ * output transfer function and if its parameters can be calculated by AMD
+ * color module. It also adjusts some settings for programming CRTC degamma at
+ * plane stage, using plane DGM block.
*
* The RGM block is typically more fully featured and accurate across
* all ASICs - DCE can't support a custom non-linear CRTC DGM.
*
* For supporting both plane level color management and CRTC level color
- * management at once we have to either restrict the usage of CRTC properties
- * or blend adjustments together.
+ * management at once we have to either restrict the usage of some CRTC
+ * properties or blend adjustments together.
*
* Returns:
- * 0 on success. Error code if setup fails.
+ * 0 on success. Error code if validation fails.
*/
-int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
+
+int amdgpu_dm_check_crtc_color_mgmt(struct dm_crtc_state *crtc,
+ bool check_only)
{
struct dc_stream_state *stream = crtc->stream;
struct amdgpu_device *adev = drm_to_adev(crtc->base.state->dev);
bool has_rom = adev->asic_type <= CHIP_RAVEN;
- struct drm_color_ctm *ctm = NULL;
+ struct dc_transfer_func *out_tf;
const struct drm_color_lut *degamma_lut, *regamma_lut;
uint32_t degamma_size, regamma_size;
bool has_regamma, has_degamma;
@@ -940,6 +939,14 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
crtc->cm_has_degamma = false;
crtc->cm_is_degamma_srgb = false;
+ if (check_only) {
+ out_tf = kvzalloc(sizeof(*out_tf), GFP_KERNEL);
+ if (!out_tf)
+ return -ENOMEM;
+ } else {
+ out_tf = &stream->out_transfer_func;
+ }
+
/* Setup regamma and degamma. */
if (is_legacy) {
/*
@@ -954,8 +961,8 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
* inverse color ramp in legacy userspace.
*/
crtc->cm_is_degamma_srgb = true;
- stream->out_transfer_func.type = TF_TYPE_DISTRIBUTED_POINTS;
- stream->out_transfer_func.tf = TRANSFER_FUNCTION_SRGB;
+ out_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
+ out_tf->tf = TRANSFER_FUNCTION_SRGB;
/*
* Note: although we pass has_rom as parameter here, we never
* actually use ROM because the color module only takes the ROM
@@ -963,16 +970,12 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
*
* See more in mod_color_calculate_regamma_params()
*/
- r = __set_legacy_tf(&stream->out_transfer_func, regamma_lut,
+ r = __set_legacy_tf(out_tf, regamma_lut,
regamma_size, has_rom);
- if (r)
- return r;
} else {
regamma_size = has_regamma ? regamma_size : 0;
- r = amdgpu_dm_set_atomic_regamma(stream, regamma_lut,
+ r = amdgpu_dm_set_atomic_regamma(out_tf, regamma_lut,
regamma_size, has_rom, tf);
- if (r)
- return r;
}
/*
@@ -981,6 +984,43 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
* have to place the CTM in the OCSC in that case.
*/
crtc->cm_has_degamma = has_degamma;
+ if (check_only)
+ kvfree(out_tf);
+
+ return r;
+}
+
+/**
+ * amdgpu_dm_update_crtc_color_mgmt: Maps DRM color management to DC stream.
+ * @crtc: amdgpu_dm crtc state
+ *
+ * With no plane level color management properties we're free to use any
+ * of the HW blocks as long as the CRTC CTM always comes before the
+ * CRTC RGM and after the CRTC DGM.
+ *
+ * - The CRTC RGM block will be placed in the RGM LUT block if it is non-linear.
+ * - The CRTC DGM block will be placed in the DGM LUT block if it is non-linear.
+ * - The CRTC CTM will be placed in the gamut remap block if it is non-linear.
+ *
+ * The RGM block is typically more fully featured and accurate across
+ * all ASICs - DCE can't support a custom non-linear CRTC DGM.
+ *
+ * For supporting both plane level color management and CRTC level color
+ * management at once we have to either restrict the usage of CRTC properties
+ * or blend adjustments together.
+ *
+ * Returns:
+ * 0 on success. Error code if setup fails.
+ */
+int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
+{
+ struct dc_stream_state *stream = crtc->stream;
+ struct drm_color_ctm *ctm = NULL;
+ int ret;
+
+ ret = amdgpu_dm_check_crtc_color_mgmt(crtc, false);
+ if (ret)
+ return ret;
/* Setup CRTC CTM. */
if (crtc->base.ctm) {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
index 033bd817d871..e20aa7438066 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
index 3da056c8d20b..95bdb8699d7f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2019 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
index 2551823382f8..1ec9d03ad747 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
@@ -218,8 +218,10 @@ static void amdgpu_dm_idle_worker(struct work_struct *work)
break;
}
- if (idle_work->enable)
+ if (idle_work->enable) {
+ dc_post_update_surfaces_to_stream(idle_work->dm->dc);
dc_allow_idle_optimizations(idle_work->dm->dc, true);
+ }
mutex_unlock(&idle_work->dm->dc_lock);
}
idle_work->dm->idle_workqueue->running = false;
@@ -273,8 +275,10 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
vblank_work->acrtc->dm_irq_params.allow_sr_entry);
}
- if (dm->active_vblank_irq_count == 0)
+ if (dm->active_vblank_irq_count == 0) {
+ dc_post_update_surfaces_to_stream(dm->dc);
dc_allow_idle_optimizations(dm->dc, true);
+ }
mutex_unlock(&dm->dc_lock);
@@ -299,12 +303,35 @@ static inline int amdgpu_dm_crtc_set_vblank(struct drm_crtc *crtc, bool enable)
irq_type = amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
if (enable) {
- /* vblank irq on -> Only need vupdate irq in vrr mode */
- if (amdgpu_dm_crtc_vrr_active(acrtc_state))
- rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, true);
- } else {
- /* vblank irq off -> vupdate irq off */
- rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, false);
+ struct dc *dc = adev->dm.dc;
+ struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
+ struct psr_settings *psr = &acrtc_state->stream->link->psr_settings;
+ struct replay_settings *pr = &acrtc_state->stream->link->replay_settings;
+ bool sr_supported = (psr->psr_version != DC_PSR_VERSION_UNSUPPORTED) ||
+ pr->config.replay_supported;
+
+ /*
+ * IPS & self-refresh feature can cause vblank counter resets between
+ * vblank disable and enable.
+ * It may cause system stuck due to waiting for the vblank counter.
+ * Call this function to estimate missed vblanks by using timestamps and
+ * update the vblank counter in DRM.
+ */
+ if (dc->caps.ips_support &&
+ dc->config.disable_ips != DMUB_IPS_DISABLE_ALL &&
+ sr_supported && vblank->config.disable_immediate)
+ drm_crtc_vblank_restore(crtc);
+ }
+
+ if (dc_supports_vrr(dm->dc->ctx->dce_version)) {
+ if (enable) {
+ /* vblank irq on -> Only need vupdate irq in vrr mode */
+ if (amdgpu_dm_crtc_vrr_active(acrtc_state))
+ rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, true);
+ } else {
+ /* vblank irq off -> vupdate irq off */
+ rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, false);
+ }
}
if (rc)
@@ -661,6 +688,15 @@ static int amdgpu_dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
return -EINVAL;
}
+ if (!state->legacy_cursor_update && amdgpu_dm_crtc_vrr_active(dm_crtc_state)) {
+ struct drm_plane_state *primary_state;
+
+ /* Pull in primary plane for correct VRR handling */
+ primary_state = drm_atomic_get_plane_state(state, crtc->primary);
+ if (IS_ERR(primary_state))
+ return PTR_ERR(primary_state);
+ }
+
/* In some use cases, like reset, no stream is attached */
if (!dm_crtc_state->stream)
return 0;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index b726bcd18e29..f263e1a4537e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
@@ -3106,6 +3107,35 @@ static int replay_get_state(void *data, u64 *val)
}
/*
+ * Start / Stop capture Replay residency
+ */
+static int replay_set_residency(void *data, u64 val)
+{
+ struct amdgpu_dm_connector *connector = data;
+ struct dc_link *link = connector->dc_link;
+ bool is_start = (val != 0);
+ u32 residency = 0;
+
+ link->dc->link_srv->edp_replay_residency(link, &residency, is_start, PR_RESIDENCY_MODE_PHY);
+ return 0;
+}
+
+/*
+ * Read Replay residency
+ */
+static int replay_get_residency(void *data, u64 *val)
+{
+ struct amdgpu_dm_connector *connector = data;
+ struct dc_link *link = connector->dc_link;
+ u32 residency = 0;
+
+ link->dc->link_srv->edp_replay_residency(link, &residency, false, PR_RESIDENCY_MODE_PHY);
+ *val = (u64)residency;
+
+ return 0;
+}
+
+/*
* Read PSR state
*/
static int psr_get(void *data, u64 *val)
@@ -3324,7 +3354,8 @@ DEFINE_DEBUGFS_ATTRIBUTE(dmcub_trace_event_state_fops, dmcub_trace_event_state_g
dmcub_trace_event_state_set, "%llu\n");
DEFINE_DEBUGFS_ATTRIBUTE(replay_state_fops, replay_get_state, NULL, "%llu\n");
-
+DEFINE_DEBUGFS_ATTRIBUTE(replay_residency_fops, replay_get_residency, replay_set_residency,
+ "%llu\n");
DEFINE_DEBUGFS_ATTRIBUTE(psr_fops, psr_get, NULL, "%llu\n");
DEFINE_DEBUGFS_ATTRIBUTE(psr_residency_fops, psr_read_residency, NULL,
"%llu\n");
@@ -3502,6 +3533,8 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector)
debugfs_create_file("replay_capability", 0444, dir, connector,
&replay_capability_fops);
debugfs_create_file("replay_state", 0444, dir, connector, &replay_state_fops);
+ debugfs_create_file_unsafe("replay_residency", 0444, dir,
+ connector, &replay_residency_fops);
debugfs_create_file_unsafe("psr_capability", 0444, dir, connector, &psr_capability_fops);
debugfs_create_file_unsafe("psr_state", 0444, dir, connector, &psr_fops);
debugfs_create_file_unsafe("psr_residency", 0444, dir,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h
index 071200473c27..122cdc124b3b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index b1d1897f5eaf..19038f336155 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2019 Advanced Micro Devices, Inc.
*
@@ -222,6 +223,7 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
display_adjust.disable = MOD_HDCP_DISPLAY_NOT_DISABLE;
link_adjust.auth_delay = 2;
+ link_adjust.retry_limit = MAX_NUM_OF_ATTEMPTS;
if (content_type == DRM_MODE_HDCP_CONTENT_TYPE0) {
link_adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0;
@@ -571,6 +573,7 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
link->dp.usb4_enabled = config->usb4_enabled;
display->adjust.disable = MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION;
link->adjust.auth_delay = 2;
+ link->adjust.retry_limit = MAX_NUM_OF_ATTEMPTS;
link->adjust.hdcp1.disable = 0;
hdcp_w->encryption_status[display->index] = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
@@ -765,14 +768,18 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev,
struct mod_hdcp_ddc_funcs *ddc_funcs = &config->ddc.funcs;
config->psp.handle = &adev->psp;
- if (dc->ctx->dce_version == DCN_VERSION_3_1 ||
+ if (dc->ctx->dce_version == DCN_VERSION_3_1 ||
dc->ctx->dce_version == DCN_VERSION_3_14 ||
dc->ctx->dce_version == DCN_VERSION_3_15 ||
- dc->ctx->dce_version == DCN_VERSION_3_5 ||
+ dc->ctx->dce_version == DCN_VERSION_3_16 ||
+ dc->ctx->dce_version == DCN_VERSION_3_2 ||
+ dc->ctx->dce_version == DCN_VERSION_3_21 ||
+ dc->ctx->dce_version == DCN_VERSION_3_5 ||
dc->ctx->dce_version == DCN_VERSION_3_51 ||
- dc->ctx->dce_version == DCN_VERSION_3_6 ||
- dc->ctx->dce_version == DCN_VERSION_3_16)
+ dc->ctx->dce_version == DCN_VERSION_3_6 ||
+ dc->ctx->dce_version == DCN_VERSION_4_01)
config->psp.caps.dtm_v3_supported = 1;
+
config->ddc.handle = dc_get_link_at_index(dc, i);
ddc_funcs->write_i2c = lp_write_i2c;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
index 69b445b011c8..4faa344f196e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2019 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index 9e3e51a2dc49..fe100e4c9801 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index b61e210f6246..a1c722112c22 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h
index ba17c23b2706..4f6b58f4f90d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h
index 6c9de834455b..3c9995275cbd 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2020 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 7187d5aedf0a..5e92eaa67aa3 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
@@ -329,6 +330,34 @@ static bool retrieve_downstream_port_device(struct amdgpu_dm_connector *aconnect
return true;
}
+static bool retrieve_branch_specific_data(struct amdgpu_dm_connector *aconnector)
+{
+ struct drm_connector *connector = &aconnector->base;
+ struct drm_dp_mst_port *port = aconnector->mst_output_port;
+ struct drm_dp_mst_port *port_parent;
+ struct drm_dp_aux *immediate_upstream_aux;
+ struct drm_dp_desc branch_desc;
+
+ if (!port->parent)
+ return false;
+
+ port_parent = port->parent->port_parent;
+
+ immediate_upstream_aux = port_parent ? &port_parent->aux : port->mgr->aux;
+
+ if (drm_dp_read_desc(immediate_upstream_aux, &branch_desc, true))
+ return false;
+
+ aconnector->branch_ieee_oui = (branch_desc.ident.oui[0] << 16) +
+ (branch_desc.ident.oui[1] << 8) +
+ (branch_desc.ident.oui[2]);
+
+ drm_dbg_dp(port->aux.drm_dev, "MST branch oui 0x%x detected at %s\n",
+ aconnector->branch_ieee_oui, connector->name);
+
+ return true;
+}
+
static int dm_dp_mst_get_modes(struct drm_connector *connector)
{
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
@@ -668,6 +697,9 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
drm_connector_set_path_property(connector, pathprop);
+ if (!retrieve_branch_specific_data(aconnector))
+ aconnector->branch_ieee_oui = 0;
+
/*
* Initialize connector state before adding the connectror to drm and
* framebuffer lists
@@ -809,6 +841,7 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
drm_dp_aux_init(&aconnector->dm_dp_aux.aux);
drm_dp_cec_register_connector(&aconnector->dm_dp_aux.aux,
&aconnector->base);
+ drm_dp_dpcd_set_probe(&aconnector->dm_dp_aux.aux, false);
if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
return;
@@ -821,13 +854,20 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
drm_connector_attach_dp_subconnector_property(&aconnector->base);
}
-int dm_mst_get_pbn_divider(struct dc_link *link)
+uint32_t dm_mst_get_pbn_divider(struct dc_link *link)
{
+ uint32_t pbn_div_x100;
+ uint64_t dividend, divisor;
+
if (!link)
return 0;
- return dc_link_bandwidth_kbps(link,
- dc_link_get_link_cap(link)) / (8 * 1000 * 54);
+ dividend = (uint64_t)dc_link_bandwidth_kbps(link, dc_link_get_link_cap(link)) * 100;
+ divisor = 8 * 1000 * 54;
+
+ pbn_div_x100 = div64_u64(dividend, divisor);
+
+ return dfixed_const(pbn_div_x100) / 100;
}
struct dsc_mst_fairness_params {
@@ -1762,14 +1802,20 @@ static bool dp_get_link_current_set_bw(struct drm_dp_aux *aux, uint32_t *cur_lin
union lane_count_set lane_count;
u8 dp_link_encoding;
u8 link_bw_set = 0;
+ u8 data[16] = {0};
*cur_link_bw = 0;
- if (drm_dp_dpcd_read(aux, DP_MAIN_LINK_CHANNEL_CODING_SET, &dp_link_encoding, 1) != 1 ||
- drm_dp_dpcd_read(aux, DP_LANE_COUNT_SET, &lane_count.raw, 1) != 1 ||
- drm_dp_dpcd_read(aux, DP_LINK_BW_SET, &link_bw_set, 1) != 1)
+ if (drm_dp_dpcd_read(aux, DP_LINK_BW_SET, data, 16) != 16)
return false;
+ dp_link_encoding = data[DP_MAIN_LINK_CHANNEL_CODING_SET - DP_LINK_BW_SET];
+ link_bw_set = data[DP_LINK_BW_SET - DP_LINK_BW_SET];
+ lane_count.raw = data[DP_LANE_COUNT_SET - DP_LINK_BW_SET];
+
+ drm_dbg_dp(aux->drm_dev, "MST_DSC downlink setting: %d, 0x%x x %d\n",
+ dp_link_encoding, link_bw_set, lane_count.bits.LANE_COUNT_SET);
+
switch (dp_link_encoding) {
case DP_8b_10b_ENCODING:
link_rate = link_bw_set;
@@ -1866,8 +1912,10 @@ enum dc_status dm_dp_mst_is_port_support_mode(
end_link_bw = aconnector->mst_local_bw;
}
- if (end_link_bw > 0 && stream_kbps > end_link_bw) {
- DRM_DEBUG_DRIVER("MST_DSC dsc decode at last link."
+ if (end_link_bw > 0 &&
+ stream_kbps > end_link_bw &&
+ aconnector->branch_ieee_oui != DP_BRANCH_DEVICE_ID_90CC24) {
+ DRM_DEBUG_DRIVER("MST_DSC dsc decode at last link. "
"Mode required bw can't fit into last link\n");
return DC_FAIL_BANDWIDTH_VALIDATE;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
index 600d6e221011..6f7ea684b555 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
@@ -59,7 +60,7 @@ enum mst_msg_ready_type {
struct amdgpu_display_manager;
struct amdgpu_dm_connector;
-int dm_mst_get_pbn_divider(struct dc_link *link);
+uint32_t dm_mst_get_pbn_divider(struct dc_link *link);
void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
struct amdgpu_dm_connector *aconnector,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
index eef51652ca35..e027798ece03 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
@@ -146,7 +146,7 @@ static void amdgpu_dm_plane_add_modifier(uint64_t **mods, uint64_t *size, uint64
if (*cap - *size < 1) {
uint64_t new_cap = *cap * 2;
- uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
+ uint64_t *new_mods = kmalloc_array(new_cap, sizeof(uint64_t), GFP_KERNEL);
if (!new_mods) {
kfree(*mods);
@@ -732,7 +732,7 @@ static int amdgpu_dm_plane_get_plane_modifiers(struct amdgpu_device *adev, unsig
if (adev->family < AMDGPU_FAMILY_AI)
return 0;
- *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
+ *mods = kmalloc_array(capacity, sizeof(uint64_t), GFP_KERNEL);
if (plane_type == DRM_PLANE_TYPE_CURSOR) {
amdgpu_dm_plane_add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
@@ -1633,7 +1633,7 @@ dm_atomic_plane_attach_color_mgmt_properties(struct amdgpu_display_manager *dm,
drm_object_attach_property(&plane->base,
dm->adev->mode_info.plane_ctm_property, 0);
- if (dpp_color_caps.hw_3d_lut) {
+ if (dpp_color_caps.hw_3d_lut || dm->dc->caps.color.mpc.preblend) {
drm_object_attach_property(&plane->base,
mode_info.plane_shaper_lut_property, 0);
drm_object_attach_property(&plane->base,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
index 848c5b4bb301..11b2ea6edf95 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
@@ -97,6 +98,7 @@ bool dm_pp_apply_display_requirements(
const struct dm_pp_single_disp_config *dc_cfg =
&pp_display_cfg->disp_configs[i];
adev->pm.pm_display_cfg.displays[i].controller_id = dc_cfg->pipe_idx + 1;
+ adev->pm.pm_display_cfg.displays[i].pixel_clock = dc_cfg->pixel_clock;
}
amdgpu_dpm_display_configuration_change(adev, &adev->pm.pm_display_cfg);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
index ff7b867ae98b..fd491b7a3cd7 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
@@ -26,7 +27,6 @@
#include "amdgpu_dm_psr.h"
#include "dc_dmub_srv.h"
#include "dc.h"
-#include "dm_helpers.h"
#include "amdgpu_dm.h"
#include "modules/power/power_helpers.h"
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
index e2366321a3c1..4fb8626913cf 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c
index 41f07f13a7b5..80704d709e44 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2023 Advanced Micro Devices, Inc.
*
@@ -30,7 +31,7 @@
#include "amdgpu_dm.h"
#include "modules/power/power_helpers.h"
#include "dmub/inc/dmub_cmd.h"
-#include "dc/inc/link.h"
+#include "dc/inc/link_service.h"
/*
* amdgpu_dm_link_supports_replay() - check if the link supports replay
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h
index 8126bdb1eb6b..73b6c67ae5e7 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
index 0005f5f8f34f..8550d5e8b753 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
@@ -1,3 +1,4 @@
+//SPDX-License-Identifier: MIT
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
@@ -52,11 +53,11 @@ void dm_perf_trace_timestamp(const char *func_name, unsigned int line, struct dc
func_name, line);
}
-void dm_trace_smu_msg(uint32_t msg_id, uint32_t param_in, struct dc_context *ctx)
+void dm_trace_smu_enter(uint32_t msg_id, uint32_t param_in, unsigned int delay, struct dc_context *ctx)
{
}
-void dm_trace_smu_delay(uint32_t delay, struct dc_context *ctx)
+void dm_trace_smu_exit(bool success, uint32_t response, struct dc_context *ctx)
{
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
index 95f890fda8aa..aa56fd6d56c3 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
@@ -1,3 +1,4 @@
+//SPDX-License-Identifier: MIT
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
index 3c9ecea7eebc..dc943abd6dba 100644
--- a/drivers/gpu/drm/amd/display/dc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/Makefile
@@ -37,6 +37,7 @@ DC_LIBS += dcn301
DC_LIBS += dcn31
DC_LIBS += dml
DC_LIBS += dml2
+DC_LIBS += soc_and_ip_translator
endif
DC_LIBS += dce120
diff --git a/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c
index d897f8a30ede..4da5adab799c 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c
@@ -1136,7 +1136,7 @@ static void calculate_bandwidth(
}
}
}
- data->total_dmifmc_urgent_trips = bw_ceil2(bw_div(data->total_requests_for_adjusted_dmif_size, (bw_add(dceip->dmif_request_buffer_size, bw_int_to_fixed(vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel * data->number_of_dram_channels)))), bw_int_to_fixed(1));
+ data->total_dmifmc_urgent_trips = bw_ceil2(bw_div(data->total_requests_for_adjusted_dmif_size, (bw_add(dceip->dmif_request_buffer_size, bw_int_to_fixed((uint64_t)vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel * data->number_of_dram_channels)))), bw_int_to_fixed(1));
data->total_dmifmc_urgent_latency = bw_mul(vbios->dmifmc_urgent_latency, data->total_dmifmc_urgent_trips);
data->total_display_reads_required_data = bw_int_to_fixed(0);
data->total_display_reads_required_dram_access_data = bw_int_to_fixed(0);
diff --git a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
index 452206b5095e..6073cadde76c 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
@@ -284,7 +284,7 @@ struct fixed31_32 dc_fixpt_cos(struct fixed31_32 arg)
dc_fixpt_mul(
square,
res),
- n * (n - 1)));
+ (long long)n * (n - 1)));
n -= 2;
} while (n != 0);
diff --git a/drivers/gpu/drm/amd/display/dc/basics/vector.c b/drivers/gpu/drm/amd/display/dc/basics/vector.c
index 6d2924114a3e..b413a672c2c0 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/vector.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/vector.c
@@ -170,7 +170,7 @@ bool dal_vector_remove_at_index(
memmove(
vector->container + (index * vector->struct_size),
vector->container + ((index + 1) * vector->struct_size),
- (vector->count - index - 1) * vector->struct_size);
+ (size_t)(vector->count - index - 1) * vector->struct_size);
vector->count -= 1;
return true;
@@ -219,7 +219,7 @@ bool dal_vector_insert_at(
memmove(
insert_address + vector->struct_size,
insert_address,
- vector->struct_size * (vector->count - position));
+ (size_t)vector->struct_size * (vector->count - position));
memmove(
insert_address,
@@ -271,7 +271,7 @@ struct vector *dal_vector_clone(
/* copy vector's data */
memmove(vec_cloned->container, vector->container,
- vec_cloned->struct_size * vec_cloned->capacity);
+ (size_t)vec_cloned->struct_size * vec_cloned->capacity);
return vec_cloned;
}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
index 67f08495b7e6..154fd2c18e88 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -174,11 +174,8 @@ static struct graphics_object_id bios_parser_get_connector_id(
return object_id;
}
- if (tbl->ucNumberOfObjects <= i) {
- dm_error("Can't find connector id %d in connector table of size %d.\n",
- i, tbl->ucNumberOfObjects);
+ if (tbl->ucNumberOfObjects <= i)
return object_id;
- }
id = le16_to_cpu(tbl->asObjects[i].usObjectID);
object_id = object_id_from_bios_object_id(id);
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
index 2bcae0643e61..58e88778da7f 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
@@ -993,7 +993,7 @@ static enum bp_result set_pixel_clock_v3(
allocation.sPCLKInput.usFbDiv =
cpu_to_le16((uint16_t)bp_params->feedback_divider);
allocation.sPCLKInput.ucFracFbDiv =
- (uint8_t)bp_params->fractional_feedback_divider;
+ (uint8_t)(bp_params->fractional_feedback_divider / 100000);
allocation.sPCLKInput.ucPostDiv =
(uint8_t)bp_params->pixel_clock_post_divider;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index 33b9d36619ff..15cf13ec5302 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -28,7 +28,7 @@
#include "dccg.h"
#include "clk_mgr_internal.h"
#include "dc_state_priv.h"
-#include "link.h"
+#include "link_service.h"
#include "dce100/dce_clk_mgr.h"
#include "dce110/dce110_clk_mgr.h"
@@ -158,7 +158,6 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
return NULL;
}
dce60_clk_mgr_construct(ctx, clk_mgr);
- dce_clk_mgr_construct(ctx, clk_mgr);
return &clk_mgr->base;
}
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
index 26feefbb8990..6131ede2db7a 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
@@ -72,9 +72,9 @@ static const struct state_dependent_clocks dce80_max_clks_by_state[] = {
/* ClocksStateLow */
{ .display_clk_khz = 352000, .pixel_clk_khz = 330000},
/* ClocksStateNominal */
-{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 },
+{ .display_clk_khz = 625000, .pixel_clk_khz = 400000 },
/* ClocksStatePerformance */
-{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 } };
+{ .display_clk_khz = 625000, .pixel_clk_khz = 400000 } };
int dentist_get_divider_from_did(int did)
{
@@ -245,6 +245,11 @@ int dce_set_clock(
pxl_clk_params.target_pixel_clock_100hz = requested_clk_khz * 10;
pxl_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
+ /* DCE 6.0, DCE 6.4: engine clock is the same as PLL0 */
+ if (clk_mgr_base->ctx->dce_version == DCE_VERSION_6_0 ||
+ clk_mgr_base->ctx->dce_version == DCE_VERSION_6_4)
+ pxl_clk_params.pll_id = CLOCK_SOURCE_ID_PLL0;
+
if (clk_mgr_dce->dfs_bypass_active)
pxl_clk_params.flags.SET_DISPCLK_DFS_BYPASS = true;
@@ -386,8 +391,6 @@ static void dce_pplib_apply_display_requirements(
{
struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
- pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context);
-
dce110_fill_display_configs(context, pp_display_cfg);
if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0)
@@ -400,11 +403,9 @@ static void dce_update_clocks(struct clk_mgr *clk_mgr_base,
{
struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dm_pp_power_level_change_request level_change_req;
- int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz;
-
- /*TODO: W/A for dal3 linux, investigate why this works */
- if (!clk_mgr_dce->dfs_bypass_active)
- patched_disp_clk = patched_disp_clk * 115 / 100;
+ const int max_disp_clk =
+ clk_mgr_dce->max_clks_by_state[DM_PP_CLOCKS_STATE_PERFORMANCE].display_clk_khz;
+ int patched_disp_clk = MIN(max_disp_clk, context->bw_ctx.bw.dce.dispclk_khz);
level_change_req.power_level = dce_get_required_clocks_state(clk_mgr_base, context);
/* get max clock state from PPLIB */
@@ -462,6 +463,9 @@ void dce_clk_mgr_construct(
clk_mgr->max_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
clk_mgr->cur_min_clks_state = DM_PP_CLOCKS_STATE_INVALID;
+ base->clks.max_supported_dispclk_khz =
+ clk_mgr->max_clks_by_state[DM_PP_CLOCKS_STATE_PERFORMANCE].display_clk_khz;
+
dce_clock_read_integrated_info(clk_mgr);
dce_clock_read_ss_info(clk_mgr);
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c
index f8409453434c..d50b9440210e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c
@@ -120,9 +120,15 @@ void dce110_fill_display_configs(
const struct dc_state *context,
struct dm_pp_display_configuration *pp_display_cfg)
{
+ struct dc *dc = context->clk_mgr->ctx->dc;
int j;
int num_cfgs = 0;
+ pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context);
+ pp_display_cfg->disp_clk_khz = dc->clk_mgr->clks.dispclk_khz;
+ pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0;
+ pp_display_cfg->crtc_index = dc->res_pool->res_cap->num_timing_generator;
+
for (j = 0; j < context->stream_count; j++) {
int k;
@@ -158,12 +164,29 @@ void dce110_fill_display_configs(
stream->link->cur_link_settings.link_rate;
cfg->link_settings.link_spread =
stream->link->cur_link_settings.link_spread;
- cfg->sym_clock = stream->phy_pix_clk;
+ cfg->pixel_clock = stream->phy_pix_clk;
/* Round v_refresh*/
cfg->v_refresh = stream->timing.pix_clk_100hz * 100;
cfg->v_refresh /= stream->timing.h_total;
cfg->v_refresh = (cfg->v_refresh + stream->timing.v_total / 2)
/ stream->timing.v_total;
+
+ /* Find first CRTC index and calculate its line time.
+ * This is necessary for DPM on SI GPUs.
+ */
+ if (cfg->pipe_idx < pp_display_cfg->crtc_index) {
+ const struct dc_crtc_timing *timing =
+ &context->streams[0]->timing;
+
+ pp_display_cfg->crtc_index = cfg->pipe_idx;
+ pp_display_cfg->line_time_in_us =
+ timing->h_total * 10000 / timing->pix_clk_100hz;
+ }
+ }
+
+ if (!num_cfgs) {
+ pp_display_cfg->crtc_index = 0;
+ pp_display_cfg->line_time_in_us = 0;
}
pp_display_cfg->display_count = num_cfgs;
@@ -223,25 +246,8 @@ void dce11_pplib_apply_display_requirements(
pp_display_cfg->min_engine_clock_deep_sleep_khz
= context->bw_ctx.bw.dce.sclk_deep_sleep_khz;
- pp_display_cfg->avail_mclk_switch_time_us =
- dce110_get_min_vblank_time_us(context);
- /* TODO: dce11.2*/
- pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0;
-
- pp_display_cfg->disp_clk_khz = dc->clk_mgr->clks.dispclk_khz;
-
dce110_fill_display_configs(context, pp_display_cfg);
- /* TODO: is this still applicable?*/
- if (pp_display_cfg->display_count == 1) {
- const struct dc_crtc_timing *timing =
- &context->streams[0]->timing;
-
- pp_display_cfg->crtc_index =
- pp_display_cfg->disp_configs[0].pipe_idx;
- pp_display_cfg->line_time_in_us = timing->h_total * 10000 / timing->pix_clk_100hz;
- }
-
if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0)
dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c
index 0267644717b2..69dd80d9f738 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c
@@ -83,22 +83,13 @@ static const struct state_dependent_clocks dce60_max_clks_by_state[] = {
static int dce60_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
- int dprefclk_wdivider;
- int dp_ref_clk_khz;
- int target_div;
+ struct dc_context *ctx = clk_mgr_base->ctx;
+ int dp_ref_clk_khz = 0;
- /* DCE6 has no DPREFCLK_CNTL to read DP Reference Clock source */
-
- /* Read the mmDENTIST_DISPCLK_CNTL to get the currently
- * programmed DID DENTIST_DPREFCLK_WDIVIDER*/
- REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, &dprefclk_wdivider);
-
- /* Convert DENTIST_DPREFCLK_WDIVIDERto actual divider*/
- target_div = dentist_get_divider_from_did(dprefclk_wdivider);
-
- /* Calculate the current DFS clock, in kHz.*/
- dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
- * clk_mgr->base.dentist_vco_freq_khz) / target_div;
+ if (ASIC_REV_IS_TAHITI_P(ctx->asic_id.hw_internal_rev))
+ dp_ref_clk_khz = ctx->dc_bios->fw_info.default_display_engine_pll_frequency;
+ else
+ dp_ref_clk_khz = clk_mgr_base->clks.dispclk_khz;
return dce_adjust_dp_ref_freq_for_ss(clk_mgr, dp_ref_clk_khz);
}
@@ -109,8 +100,6 @@ static void dce60_pplib_apply_display_requirements(
{
struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
- pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context);
-
dce110_fill_display_configs(context, pp_display_cfg);
if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0)
@@ -123,11 +112,9 @@ static void dce60_update_clocks(struct clk_mgr *clk_mgr_base,
{
struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dm_pp_power_level_change_request level_change_req;
- int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz;
-
- /*TODO: W/A for dal3 linux, investigate why this works */
- if (!clk_mgr_dce->dfs_bypass_active)
- patched_disp_clk = patched_disp_clk * 115 / 100;
+ const int max_disp_clk =
+ clk_mgr_dce->max_clks_by_state[DM_PP_CLOCKS_STATE_PERFORMANCE].display_clk_khz;
+ int patched_disp_clk = MIN(max_disp_clk, context->bw_ctx.bw.dce.dispclk_khz);
level_change_req.power_level = dce_get_required_clocks_state(clk_mgr_base, context);
/* get max clock state from PPLIB */
@@ -160,6 +147,8 @@ void dce60_clk_mgr_construct(
struct dc_context *ctx,
struct clk_mgr_internal *clk_mgr)
{
+ struct clk_mgr *base = &clk_mgr->base;
+
dce_clk_mgr_construct(ctx, clk_mgr);
memcpy(clk_mgr->max_clks_by_state,
@@ -170,5 +159,8 @@ void dce60_clk_mgr_construct(
clk_mgr->clk_mgr_shift = &disp_clk_shift;
clk_mgr->clk_mgr_mask = &disp_clk_mask;
clk_mgr->base.funcs = &dce60_funcs;
+
+ base->clks.max_supported_dispclk_khz =
+ clk_mgr->max_clks_by_state[DM_PP_CLOCKS_STATE_PERFORMANCE].display_clk_khz;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c
index 3253115a153d..827bc2431d5d 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c
@@ -69,7 +69,7 @@ static uint32_t dcn30_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, un
/* handle DALSMC_Result_CmdRejectedBusy? */
- TRACE_SMU_DELAY(delay_us * (initial_max_retries - max_retries), clk_mgr->base.ctx);
+ TRACE_SMU_MSG_DELAY(0, 0, delay_us * (initial_max_retries - max_retries), clk_mgr->base.ctx);
return reg;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
index 9e2ef0e724fc..7aee02d56292 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
@@ -563,6 +563,7 @@ static void vg_clk_mgr_helper_populate_bw_params(
{
int i, j;
struct clk_bw_params *bw_params = clk_mgr->base.bw_params;
+ uint32_t max_dispclk = 0, max_dppclk = 0;
j = -1;
@@ -584,6 +585,15 @@ static void vg_clk_mgr_helper_populate_bw_params(
return;
}
+ /* dispclk and dppclk can be max at any voltage, same number of levels for both */
+ if (clock_table->NumDispClkLevelsEnabled <= VG_NUM_DISPCLK_DPM_LEVELS &&
+ clock_table->NumDispClkLevelsEnabled <= VG_NUM_DPPCLK_DPM_LEVELS) {
+ max_dispclk = find_max_clk_value(clock_table->DispClocks, clock_table->NumDispClkLevelsEnabled);
+ max_dppclk = find_max_clk_value(clock_table->DppClocks, clock_table->NumDispClkLevelsEnabled);
+ } else {
+ ASSERT(0);
+ }
+
bw_params->clk_table.num_entries = j + 1;
for (i = 0; i < bw_params->clk_table.num_entries - 1; i++, j--) {
@@ -591,11 +601,17 @@ static void vg_clk_mgr_helper_populate_bw_params(
bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[j].memclk;
bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[j].voltage;
bw_params->clk_table.entries[i].dcfclk_mhz = find_dcfclk_for_voltage(clock_table, clock_table->DfPstateTable[j].voltage);
+
+ /* Now update clocks we do read */
+ bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk;
+ bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk;
}
bw_params->clk_table.entries[i].fclk_mhz = clock_table->DfPstateTable[j].fclk;
bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[j].memclk;
bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[j].voltage;
bw_params->clk_table.entries[i].dcfclk_mhz = find_max_clk_value(clock_table->DcfClocks, VG_NUM_DCFCLK_DPM_LEVELS);
+ bw_params->clk_table.entries[i].dispclk_mhz = find_max_clk_value(clock_table->DispClocks, VG_NUM_DISPCLK_DPM_LEVELS);
+ bw_params->clk_table.entries[i].dppclk_mhz = find_max_clk_value(clock_table->DppClocks, VG_NUM_DPPCLK_DPM_LEVELS);
bw_params->vram_type = bios_info->memory_type;
bw_params->num_channels = bios_info->ma_channel_number;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
index bc123f1884da..051052bd10c9 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
@@ -47,7 +47,7 @@
#include "dcn30/dcn30_clk_mgr.h"
#include "dc_dmub_srv.h"
-#include "link.h"
+#include "link_service.h"
#include "logger_types.h"
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
index 91d872d6d392..9e63fa72101c 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
@@ -48,7 +48,7 @@
#include "dcn31/dcn31_clk_mgr.h"
#include "dc_dmub_srv.h"
-#include "link.h"
+#include "link_service.h"
#include "dcn314_smu.h"
@@ -77,6 +77,7 @@ static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0x02401800, 0, 0, 0,
#undef DC_LOGGER
#define DC_LOGGER \
clk_mgr->base.base.ctx->logger
+
#define regCLK1_CLK_PLL_REQ 0x0237
#define regCLK1_CLK_PLL_REQ_BASE_IDX 0
@@ -87,8 +88,70 @@ static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0x02401800, 0, 0, 0,
#define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L
#define CLK1_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L
+#define regCLK1_CLK0_DFS_CNTL 0x0269
+#define regCLK1_CLK0_DFS_CNTL_BASE_IDX 0
+#define regCLK1_CLK1_DFS_CNTL 0x026c
+#define regCLK1_CLK1_DFS_CNTL_BASE_IDX 0
+#define regCLK1_CLK2_DFS_CNTL 0x026f
+#define regCLK1_CLK2_DFS_CNTL_BASE_IDX 0
+#define regCLK1_CLK3_DFS_CNTL 0x0272
+#define regCLK1_CLK3_DFS_CNTL_BASE_IDX 0
+#define regCLK1_CLK4_DFS_CNTL 0x0275
+#define regCLK1_CLK4_DFS_CNTL_BASE_IDX 0
+#define regCLK1_CLK5_DFS_CNTL 0x0278
+#define regCLK1_CLK5_DFS_CNTL_BASE_IDX 0
+
+#define regCLK1_CLK0_CURRENT_CNT 0x02fb
+#define regCLK1_CLK0_CURRENT_CNT_BASE_IDX 0
+#define regCLK1_CLK1_CURRENT_CNT 0x02fc
+#define regCLK1_CLK1_CURRENT_CNT_BASE_IDX 0
+#define regCLK1_CLK2_CURRENT_CNT 0x02fd
+#define regCLK1_CLK2_CURRENT_CNT_BASE_IDX 0
+#define regCLK1_CLK3_CURRENT_CNT 0x02fe
+#define regCLK1_CLK3_CURRENT_CNT_BASE_IDX 0
+#define regCLK1_CLK4_CURRENT_CNT 0x02ff
+#define regCLK1_CLK4_CURRENT_CNT_BASE_IDX 0
+#define regCLK1_CLK5_CURRENT_CNT 0x0300
+#define regCLK1_CLK5_CURRENT_CNT_BASE_IDX 0
+
+#define regCLK1_CLK0_BYPASS_CNTL 0x028a
+#define regCLK1_CLK0_BYPASS_CNTL_BASE_IDX 0
+#define regCLK1_CLK1_BYPASS_CNTL 0x0293
+#define regCLK1_CLK1_BYPASS_CNTL_BASE_IDX 0
#define regCLK1_CLK2_BYPASS_CNTL 0x029c
#define regCLK1_CLK2_BYPASS_CNTL_BASE_IDX 0
+#define regCLK1_CLK3_BYPASS_CNTL 0x02a5
+#define regCLK1_CLK3_BYPASS_CNTL_BASE_IDX 0
+#define regCLK1_CLK4_BYPASS_CNTL 0x02ae
+#define regCLK1_CLK4_BYPASS_CNTL_BASE_IDX 0
+#define regCLK1_CLK5_BYPASS_CNTL 0x02b7
+#define regCLK1_CLK5_BYPASS_CNTL_BASE_IDX 0
+
+#define regCLK1_CLK0_DS_CNTL 0x0283
+#define regCLK1_CLK0_DS_CNTL_BASE_IDX 0
+#define regCLK1_CLK1_DS_CNTL 0x028c
+#define regCLK1_CLK1_DS_CNTL_BASE_IDX 0
+#define regCLK1_CLK2_DS_CNTL 0x0295
+#define regCLK1_CLK2_DS_CNTL_BASE_IDX 0
+#define regCLK1_CLK3_DS_CNTL 0x029e
+#define regCLK1_CLK3_DS_CNTL_BASE_IDX 0
+#define regCLK1_CLK4_DS_CNTL 0x02a7
+#define regCLK1_CLK4_DS_CNTL_BASE_IDX 0
+#define regCLK1_CLK5_DS_CNTL 0x02b0
+#define regCLK1_CLK5_DS_CNTL_BASE_IDX 0
+
+#define regCLK1_CLK0_ALLOW_DS 0x0284
+#define regCLK1_CLK0_ALLOW_DS_BASE_IDX 0
+#define regCLK1_CLK1_ALLOW_DS 0x028d
+#define regCLK1_CLK1_ALLOW_DS_BASE_IDX 0
+#define regCLK1_CLK2_ALLOW_DS 0x0296
+#define regCLK1_CLK2_ALLOW_DS_BASE_IDX 0
+#define regCLK1_CLK3_ALLOW_DS 0x029f
+#define regCLK1_CLK3_ALLOW_DS_BASE_IDX 0
+#define regCLK1_CLK4_ALLOW_DS 0x02a8
+#define regCLK1_CLK4_ALLOW_DS_BASE_IDX 0
+#define regCLK1_CLK5_ALLOW_DS 0x02b1
+#define regCLK1_CLK5_ALLOW_DS_BASE_IDX 0
#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL__SHIFT 0x0
#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV__SHIFT 0x10
@@ -185,6 +248,8 @@ void dcn314_init_clocks(struct clk_mgr *clk_mgr)
{
struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz;
+ struct clk_mgr_dcn314 *clk_mgr_dcn314 = TO_CLK_MGR_DCN314(clk_mgr_int);
+ struct clk_log_info log_info = {0};
memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
// Assumption is that boot state always supports pstate
@@ -200,6 +265,9 @@ void dcn314_init_clocks(struct clk_mgr *clk_mgr)
dce_adjust_dp_ref_freq_for_ss(clk_mgr_int, clk_mgr->dprefclk_khz);
else
clk_mgr->dp_dto_source_clock_in_khz = clk_mgr->dprefclk_khz;
+
+ dcn314_dump_clk_registers(&clk_mgr->boot_snapshot, &clk_mgr_dcn314->base.base, &log_info);
+ clk_mgr->clks.dispclk_khz = clk_mgr->boot_snapshot.dispclk * 1000;
}
void dcn314_update_clocks(struct clk_mgr *clk_mgr_base,
@@ -218,6 +286,8 @@ void dcn314_update_clocks(struct clk_mgr *clk_mgr_base,
if (dc->work_arounds.skip_clock_update)
return;
+ display_count = dcn314_get_active_display_cnt_wa(dc, context);
+
/*
* if it is safe to lower, but we are already in the lower state, we don't have to do anything
* also if safe to lower is false, we just go in the higher state
@@ -236,7 +306,6 @@ void dcn314_update_clocks(struct clk_mgr *clk_mgr_base,
}
/* check that we're not already in lower */
if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) {
- display_count = dcn314_get_active_display_cnt_wa(dc, context);
/* if we can go lower, go lower */
if (display_count == 0) {
union display_idle_optimization_u idle_info = { 0 };
@@ -293,11 +362,19 @@ void dcn314_update_clocks(struct clk_mgr *clk_mgr_base,
update_dppclk = true;
}
- if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
+ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz) &&
+ (new_clocks->dispclk_khz > 0 || (safe_to_lower && display_count == 0))) {
+ int requested_dispclk_khz = new_clocks->dispclk_khz;
+
dcn314_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true);
+ /* Clamp the requested clock to PMFW based on their limit. */
+ if (dc->debug.min_disp_clk_khz > 0 && requested_dispclk_khz < dc->debug.min_disp_clk_khz)
+ requested_dispclk_khz = dc->debug.min_disp_clk_khz;
+
+ dcn314_smu_set_dispclk(clk_mgr, requested_dispclk_khz);
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
- dcn314_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
+
dcn314_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false);
update_dispclk = true;
@@ -385,10 +462,65 @@ bool dcn314_are_clock_states_equal(struct dc_clocks *a,
return true;
}
-static void dcn314_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
+
+static void dcn314_dump_clk_registers_internal(struct dcn35_clk_internal *internal, struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ // read dtbclk
+ internal->CLK1_CLK4_CURRENT_CNT = REG_READ(CLK1_CLK4_CURRENT_CNT);
+ internal->CLK1_CLK4_BYPASS_CNTL = REG_READ(CLK1_CLK4_BYPASS_CNTL);
+
+ // read dcfclk
+ internal->CLK1_CLK3_CURRENT_CNT = REG_READ(CLK1_CLK3_CURRENT_CNT);
+ internal->CLK1_CLK3_BYPASS_CNTL = REG_READ(CLK1_CLK3_BYPASS_CNTL);
+
+ // read dcf deep sleep divider
+ internal->CLK1_CLK3_DS_CNTL = REG_READ(CLK1_CLK3_DS_CNTL);
+ internal->CLK1_CLK3_ALLOW_DS = REG_READ(CLK1_CLK3_ALLOW_DS);
+
+ // read dppclk
+ internal->CLK1_CLK1_CURRENT_CNT = REG_READ(CLK1_CLK1_CURRENT_CNT);
+ internal->CLK1_CLK1_BYPASS_CNTL = REG_READ(CLK1_CLK1_BYPASS_CNTL);
+
+ // read dprefclk
+ internal->CLK1_CLK2_CURRENT_CNT = REG_READ(CLK1_CLK2_CURRENT_CNT);
+ internal->CLK1_CLK2_BYPASS_CNTL = REG_READ(CLK1_CLK2_BYPASS_CNTL);
+
+ // read dispclk
+ internal->CLK1_CLK0_CURRENT_CNT = REG_READ(CLK1_CLK0_CURRENT_CNT);
+ internal->CLK1_CLK0_BYPASS_CNTL = REG_READ(CLK1_CLK0_BYPASS_CNTL);
+}
+
+void dcn314_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info)
{
- return;
+
+ struct dcn35_clk_internal internal = {0};
+
+ dcn314_dump_clk_registers_internal(&internal, clk_mgr_base);
+
+ regs_and_bypass->dcfclk = internal.CLK1_CLK3_CURRENT_CNT / 10;
+ regs_and_bypass->dcf_deep_sleep_divider = internal.CLK1_CLK3_DS_CNTL / 10;
+ regs_and_bypass->dcf_deep_sleep_allow = internal.CLK1_CLK3_ALLOW_DS;
+ regs_and_bypass->dprefclk = internal.CLK1_CLK2_CURRENT_CNT / 10;
+ regs_and_bypass->dispclk = internal.CLK1_CLK0_CURRENT_CNT / 10;
+ regs_and_bypass->dppclk = internal.CLK1_CLK1_CURRENT_CNT / 10;
+ regs_and_bypass->dtbclk = internal.CLK1_CLK4_CURRENT_CNT / 10;
+
+ regs_and_bypass->dppclk_bypass = internal.CLK1_CLK1_BYPASS_CNTL & 0x0007;
+ if (regs_and_bypass->dppclk_bypass < 0 || regs_and_bypass->dppclk_bypass > 4)
+ regs_and_bypass->dppclk_bypass = 0;
+ regs_and_bypass->dcfclk_bypass = internal.CLK1_CLK3_BYPASS_CNTL & 0x0007;
+ if (regs_and_bypass->dcfclk_bypass < 0 || regs_and_bypass->dcfclk_bypass > 4)
+ regs_and_bypass->dcfclk_bypass = 0;
+ regs_and_bypass->dispclk_bypass = internal.CLK1_CLK0_BYPASS_CNTL & 0x0007;
+ if (regs_and_bypass->dispclk_bypass < 0 || regs_and_bypass->dispclk_bypass > 4)
+ regs_and_bypass->dispclk_bypass = 0;
+ regs_and_bypass->dprefclk_bypass = internal.CLK1_CLK2_BYPASS_CNTL & 0x0007;
+ if (regs_and_bypass->dprefclk_bypass < 0 || regs_and_bypass->dprefclk_bypass > 4)
+ regs_and_bypass->dprefclk_bypass = 0;
+
}
static struct clk_bw_params dcn314_bw_params = {
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h
index 002c28e80720..0577eb527bc3 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h
@@ -65,4 +65,9 @@ void dcn314_clk_mgr_construct(struct dc_context *ctx,
void dcn314_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr_int);
+
+void dcn314_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
+ struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info);
+
+
#endif //__DCN314_CLK_MGR_H__
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
index e4d22f74f986..b315ed91e010 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
@@ -46,7 +46,7 @@
#define DC_LOGGER \
clk_mgr->base.base.ctx->logger
-#include "link.h"
+#include "link_service.h"
#define TO_CLK_MGR_DCN315(clk_mgr)\
container_of(clk_mgr, struct clk_mgr_dcn315, base)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
index 49efea0c8fcf..1769b1f26e75 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
@@ -39,7 +39,7 @@
#include "dcn316_smu.h"
#include "dm_helpers.h"
#include "dc_dmub_srv.h"
-#include "link.h"
+#include "link_service.h"
// DCN316 this is CLK1 instance
#define MAX_INSTANCE 7
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
index 084994c650c4..7da7b41bd092 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
@@ -33,7 +33,7 @@
#include "reg_helper.h"
#include "core_types.h"
#include "dm_helpers.h"
-#include "link.h"
+#include "link_service.h"
#include "dc_state_priv.h"
#include "atomfirmware.h"
#include "dcn32_smu13_driver_if.h"
@@ -1047,11 +1047,8 @@ static void dcn32_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base)
&num_entries_per_clk->num_fclk_levels);
clk_mgr_base->bw_params->dc_mode_limit.fclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_FCLK);
- if (num_entries_per_clk->num_memclk_levels >= num_entries_per_clk->num_fclk_levels) {
- num_levels = num_entries_per_clk->num_memclk_levels;
- } else {
- num_levels = num_entries_per_clk->num_fclk_levels;
- }
+ num_levels = max(num_entries_per_clk->num_memclk_levels, num_entries_per_clk->num_fclk_levels);
+
clk_mgr_base->bw_params->max_memclk_mhz =
clk_mgr_base->bw_params->clk_table.entries[num_entries_per_clk->num_memclk_levels - 1].memclk_mhz;
clk_mgr_base->bw_params->clk_table.num_entries = num_levels ? num_levels : 1;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c
index cf2d35363e8b..5d80fdf63ffc 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c
@@ -63,7 +63,8 @@ static uint32_t dcn32_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, un
udelay(delay_us);
} while (max_retries--);
- TRACE_SMU_DELAY(delay_us * (initial_max_retries - max_retries), clk_mgr->base.ctx);
+ TRACE_SMU_MSG_DELAY(0, 0, delay_us * (initial_max_retries - max_retries), clk_mgr->base.ctx);
+
return reg;
}
@@ -120,7 +121,7 @@ static uint32_t dcn32_smu_wait_for_response_delay(struct clk_mgr_internal *clk_m
*total_delay_us += delay_us;
} while (max_retries--);
- TRACE_SMU_DELAY(*total_delay_us, clk_mgr->base.ctx);
+ TRACE_SMU_MSG_DELAY(0, 0, *total_delay_us, clk_mgr->base.ctx);
return reg;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
index bb1ac12a2b09..b11383fba35f 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
@@ -44,7 +44,7 @@
#include "dcn31/dcn31_clk_mgr.h"
#include "dc_dmub_srv.h"
-#include "link.h"
+#include "link_service.h"
#include "logger_types.h"
#undef DC_LOGGER
@@ -587,9 +587,118 @@ bool dcn35_are_clock_states_equal(struct dc_clocks *a,
return true;
}
-static void dcn35_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
+static void dcn35_save_clk_registers_internal(struct dcn35_clk_internal *internal, struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ // read dtbclk
+ internal->CLK1_CLK4_CURRENT_CNT = REG_READ(CLK1_CLK4_CURRENT_CNT);
+ internal->CLK1_CLK4_BYPASS_CNTL = REG_READ(CLK1_CLK4_BYPASS_CNTL);
+
+ // read dcfclk
+ internal->CLK1_CLK3_CURRENT_CNT = REG_READ(CLK1_CLK3_CURRENT_CNT);
+ internal->CLK1_CLK3_BYPASS_CNTL = REG_READ(CLK1_CLK3_BYPASS_CNTL);
+
+ // read dcf deep sleep divider
+ internal->CLK1_CLK3_DS_CNTL = REG_READ(CLK1_CLK3_DS_CNTL);
+ internal->CLK1_CLK3_ALLOW_DS = REG_READ(CLK1_CLK3_ALLOW_DS);
+
+ // read dppclk
+ internal->CLK1_CLK1_CURRENT_CNT = REG_READ(CLK1_CLK1_CURRENT_CNT);
+ internal->CLK1_CLK1_BYPASS_CNTL = REG_READ(CLK1_CLK1_BYPASS_CNTL);
+
+ // read dprefclk
+ internal->CLK1_CLK2_CURRENT_CNT = REG_READ(CLK1_CLK2_CURRENT_CNT);
+ internal->CLK1_CLK2_BYPASS_CNTL = REG_READ(CLK1_CLK2_BYPASS_CNTL);
+
+ // read dispclk
+ internal->CLK1_CLK0_CURRENT_CNT = REG_READ(CLK1_CLK0_CURRENT_CNT);
+ internal->CLK1_CLK0_BYPASS_CNTL = REG_READ(CLK1_CLK0_BYPASS_CNTL);
+}
+
+static void dcn35_save_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
struct clk_mgr_dcn35 *clk_mgr)
{
+ struct dcn35_clk_internal internal = {0};
+ char *bypass_clks[5] = {"0x0 DFS", "0x1 REFCLK", "0x2 ERROR", "0x3 400 FCH", "0x4 600 FCH"};
+
+ dcn35_save_clk_registers_internal(&internal, &clk_mgr->base.base);
+
+ regs_and_bypass->dcfclk = internal.CLK1_CLK3_CURRENT_CNT / 10;
+ regs_and_bypass->dcf_deep_sleep_divider = internal.CLK1_CLK3_DS_CNTL / 10;
+ regs_and_bypass->dcf_deep_sleep_allow = internal.CLK1_CLK3_ALLOW_DS;
+ regs_and_bypass->dprefclk = internal.CLK1_CLK2_CURRENT_CNT / 10;
+ regs_and_bypass->dispclk = internal.CLK1_CLK0_CURRENT_CNT / 10;
+ regs_and_bypass->dppclk = internal.CLK1_CLK1_CURRENT_CNT / 10;
+ regs_and_bypass->dtbclk = internal.CLK1_CLK4_CURRENT_CNT / 10;
+
+ regs_and_bypass->dppclk_bypass = internal.CLK1_CLK1_BYPASS_CNTL & 0x0007;
+ if (regs_and_bypass->dppclk_bypass < 0 || regs_and_bypass->dppclk_bypass > 4)
+ regs_and_bypass->dppclk_bypass = 0;
+ regs_and_bypass->dcfclk_bypass = internal.CLK1_CLK3_BYPASS_CNTL & 0x0007;
+ if (regs_and_bypass->dcfclk_bypass < 0 || regs_and_bypass->dcfclk_bypass > 4)
+ regs_and_bypass->dcfclk_bypass = 0;
+ regs_and_bypass->dispclk_bypass = internal.CLK1_CLK0_BYPASS_CNTL & 0x0007;
+ if (regs_and_bypass->dispclk_bypass < 0 || regs_and_bypass->dispclk_bypass > 4)
+ regs_and_bypass->dispclk_bypass = 0;
+ regs_and_bypass->dprefclk_bypass = internal.CLK1_CLK2_BYPASS_CNTL & 0x0007;
+ if (regs_and_bypass->dprefclk_bypass < 0 || regs_and_bypass->dprefclk_bypass > 4)
+ regs_and_bypass->dprefclk_bypass = 0;
+
+ if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) {
+ DC_LOG_SMU("clk_type,clk_value,deepsleep_cntl,deepsleep_allow,bypass\n");
+
+ DC_LOG_SMU("dcfclk,%d,%d,%d,%s\n",
+ regs_and_bypass->dcfclk,
+ regs_and_bypass->dcf_deep_sleep_divider,
+ regs_and_bypass->dcf_deep_sleep_allow,
+ bypass_clks[(int) regs_and_bypass->dcfclk_bypass]);
+
+ DC_LOG_SMU("dprefclk,%d,N/A,N/A,%s\n",
+ regs_and_bypass->dprefclk,
+ bypass_clks[(int) regs_and_bypass->dprefclk_bypass]);
+
+ DC_LOG_SMU("dispclk,%d,N/A,N/A,%s\n",
+ regs_and_bypass->dispclk,
+ bypass_clks[(int) regs_and_bypass->dispclk_bypass]);
+
+ // REGISTER VALUES
+ DC_LOG_SMU("reg_name,value,clk_type");
+
+ DC_LOG_SMU("CLK1_CLK3_CURRENT_CNT,%d,dcfclk",
+ internal.CLK1_CLK3_CURRENT_CNT);
+
+ DC_LOG_SMU("CLK1_CLK4_CURRENT_CNT,%d,dtbclk",
+ internal.CLK1_CLK4_CURRENT_CNT);
+
+ DC_LOG_SMU("CLK1_CLK3_DS_CNTL,%d,dcf_deep_sleep_divider",
+ internal.CLK1_CLK3_DS_CNTL);
+
+ DC_LOG_SMU("CLK1_CLK3_ALLOW_DS,%d,dcf_deep_sleep_allow",
+ internal.CLK1_CLK3_ALLOW_DS);
+
+ DC_LOG_SMU("CLK1_CLK2_CURRENT_CNT,%d,dprefclk",
+ internal.CLK1_CLK2_CURRENT_CNT);
+
+ DC_LOG_SMU("CLK1_CLK0_CURRENT_CNT,%d,dispclk",
+ internal.CLK1_CLK0_CURRENT_CNT);
+
+ DC_LOG_SMU("CLK1_CLK1_CURRENT_CNT,%d,dppclk",
+ internal.CLK1_CLK1_CURRENT_CNT);
+
+ DC_LOG_SMU("CLK1_CLK3_BYPASS_CNTL,%d,dcfclk_bypass",
+ internal.CLK1_CLK3_BYPASS_CNTL);
+
+ DC_LOG_SMU("CLK1_CLK2_BYPASS_CNTL,%d,dprefclk_bypass",
+ internal.CLK1_CLK2_BYPASS_CNTL);
+
+ DC_LOG_SMU("CLK1_CLK0_BYPASS_CNTL,%d,dispclk_bypass",
+ internal.CLK1_CLK0_BYPASS_CNTL);
+
+ DC_LOG_SMU("CLK1_CLK1_BYPASS_CNTL,%d,dppclk_bypass",
+ internal.CLK1_CLK1_BYPASS_CNTL);
+
+ }
}
static bool dcn35_is_spll_ssc_enabled(struct clk_mgr *clk_mgr_base)
@@ -623,6 +732,7 @@ static void init_clk_states(struct clk_mgr *clk_mgr)
void dcn35_init_clocks(struct clk_mgr *clk_mgr)
{
struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
+ struct clk_mgr_dcn35 *clk_mgr_dcn35 = TO_CLK_MGR_DCN35(clk_mgr_int);
init_clk_states(clk_mgr);
@@ -633,6 +743,13 @@ void dcn35_init_clocks(struct clk_mgr *clk_mgr)
else
clk_mgr->dp_dto_source_clock_in_khz = clk_mgr->dprefclk_khz;
+ dcn35_save_clk_registers(&clk_mgr->boot_snapshot, clk_mgr_dcn35);
+
+ clk_mgr->clks.ref_dtbclk_khz = clk_mgr->boot_snapshot.dtbclk * 10;
+ if (clk_mgr->boot_snapshot.dtbclk > 59000) {
+ /*dtbclk enabled based on */
+ clk_mgr->clks.dtbclk_en = true;
+ }
}
static struct clk_bw_params dcn35_bw_params = {
.vram_type = Ddr4MemType,
@@ -1323,7 +1440,7 @@ void dcn35_clk_mgr_construct(
dcn35_bw_params.wm_table = ddr5_wm_table;
}
/* Saved clocks configured at boot for debug purposes */
- dcn35_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, clk_mgr);
+ dcn35_save_clk_registers(&clk_mgr->base.base.boot_snapshot, clk_mgr);
clk_mgr->base.base.dprefclk_khz = dcn35_smu_get_dprefclk(&clk_mgr->base);
clk_mgr->base.base.clks.ref_dtbclk_khz = 600000;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
index b59703467128..306016c1f109 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
@@ -13,7 +13,7 @@
#include "reg_helper.h"
#include "core_types.h"
#include "dm_helpers.h"
-#include "link.h"
+#include "link_service.h"
#include "dc_state_priv.h"
#include "atomfirmware.h"
@@ -162,7 +162,7 @@ static void dcn401_init_single_clock(struct clk_mgr_internal *clk_mgr, PPCLK_e c
unsigned int i;
char *entry_i = (char *)entry_0;
- uint32_t ret = dcn30_smu_get_dpm_freq_by_index(clk_mgr, clk, 0xFF);
+ uint32_t ret = dcn401_smu_get_dpm_freq_by_index(clk_mgr, clk, 0xFF);
if (ret & (1 << 31))
/* fine-grained, only min and max */
@@ -174,7 +174,7 @@ static void dcn401_init_single_clock(struct clk_mgr_internal *clk_mgr, PPCLK_e c
/* if the initial message failed, num_levels will be 0 */
for (i = 0; i < *num_levels && i < ARRAY_SIZE(clk_mgr->base.bw_params->clk_table.entries); i++) {
- *((unsigned int *)entry_i) = (dcn30_smu_get_dpm_freq_by_index(clk_mgr, clk, i) & 0xFFFF);
+ *((unsigned int *)entry_i) = (dcn401_smu_get_dpm_freq_by_index(clk_mgr, clk, i) & 0xFFFF);
entry_i += sizeof(clk_mgr->base.bw_params->clk_table.entries[0]);
}
}
@@ -231,20 +231,20 @@ void dcn401_init_clocks(struct clk_mgr *clk_mgr_base)
clk_mgr->smu_present = false;
clk_mgr->dpm_present = false;
- if (!clk_mgr_base->force_smu_not_present && dcn30_smu_get_smu_version(clk_mgr, &clk_mgr->smu_ver))
+ if (!clk_mgr_base->force_smu_not_present && dcn401_smu_get_smu_version(clk_mgr, &clk_mgr->smu_ver))
clk_mgr->smu_present = true;
if (!clk_mgr->smu_present)
return;
- dcn30_smu_check_driver_if_version(clk_mgr);
- dcn30_smu_check_msg_header_version(clk_mgr);
+ dcn401_smu_check_driver_if_version(clk_mgr);
+ dcn401_smu_check_msg_header_version(clk_mgr);
/* DCFCLK */
dcn401_init_single_clock(clk_mgr, PPCLK_DCFCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].dcfclk_mhz,
&num_entries_per_clk->num_dcfclk_levels);
- clk_mgr_base->bw_params->dc_mode_limit.dcfclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DCFCLK);
+ clk_mgr_base->bw_params->dc_mode_limit.dcfclk_mhz = dcn401_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DCFCLK);
if (num_entries_per_clk->num_dcfclk_levels && clk_mgr_base->bw_params->dc_mode_limit.dcfclk_mhz ==
clk_mgr_base->bw_params->clk_table.entries[num_entries_per_clk->num_dcfclk_levels - 1].dcfclk_mhz)
clk_mgr_base->bw_params->dc_mode_limit.dcfclk_mhz = 0;
@@ -253,7 +253,7 @@ void dcn401_init_clocks(struct clk_mgr *clk_mgr_base)
dcn401_init_single_clock(clk_mgr, PPCLK_SOCCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].socclk_mhz,
&num_entries_per_clk->num_socclk_levels);
- clk_mgr_base->bw_params->dc_mode_limit.socclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_SOCCLK);
+ clk_mgr_base->bw_params->dc_mode_limit.socclk_mhz = dcn401_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_SOCCLK);
if (num_entries_per_clk->num_socclk_levels && clk_mgr_base->bw_params->dc_mode_limit.socclk_mhz ==
clk_mgr_base->bw_params->clk_table.entries[num_entries_per_clk->num_socclk_levels - 1].socclk_mhz)
clk_mgr_base->bw_params->dc_mode_limit.socclk_mhz = 0;
@@ -263,7 +263,7 @@ void dcn401_init_clocks(struct clk_mgr *clk_mgr_base)
dcn401_init_single_clock(clk_mgr, PPCLK_DTBCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].dtbclk_mhz,
&num_entries_per_clk->num_dtbclk_levels);
- clk_mgr_base->bw_params->dc_mode_limit.dtbclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DTBCLK);
+ clk_mgr_base->bw_params->dc_mode_limit.dtbclk_mhz = dcn401_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DTBCLK);
if (num_entries_per_clk->num_dtbclk_levels && clk_mgr_base->bw_params->dc_mode_limit.dtbclk_mhz ==
clk_mgr_base->bw_params->clk_table.entries[num_entries_per_clk->num_dtbclk_levels - 1].dtbclk_mhz)
clk_mgr_base->bw_params->dc_mode_limit.dtbclk_mhz = 0;
@@ -273,7 +273,7 @@ void dcn401_init_clocks(struct clk_mgr *clk_mgr_base)
dcn401_init_single_clock(clk_mgr, PPCLK_DISPCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].dispclk_mhz,
&num_entries_per_clk->num_dispclk_levels);
- clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DISPCLK);
+ clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz = dcn401_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DISPCLK);
if (num_entries_per_clk->num_dispclk_levels && clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz ==
clk_mgr_base->bw_params->clk_table.entries[num_entries_per_clk->num_dispclk_levels - 1].dispclk_mhz)
clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz = 0;
@@ -1318,8 +1318,8 @@ static void dcn401_notify_wm_ranges(struct clk_mgr *clk_mgr_base)
table->Watermarks.WatermarkRow[i].WmSetting = i;
table->Watermarks.WatermarkRow[i].Flags = clk_mgr->base.bw_params->wm_table.nv_entries[i].pmfw_breakdown.wm_type;
}
- dcn30_smu_set_dram_addr_high(clk_mgr, clk_mgr->wm_range_table_addr >> 32);
- dcn30_smu_set_dram_addr_low(clk_mgr, clk_mgr->wm_range_table_addr & 0xFFFFFFFF);
+ dcn401_smu_set_dram_addr_high(clk_mgr, clk_mgr->wm_range_table_addr >> 32);
+ dcn401_smu_set_dram_addr_low(clk_mgr, clk_mgr->wm_range_table_addr & 0xFFFFFFFF);
dcn401_smu_transfer_wm_table_dram_2_smu(clk_mgr);
}
@@ -1390,7 +1390,7 @@ static void dcn401_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base)
clk_mgr_base->bw_params->clk_table.entries[num_entries_per_clk->num_memclk_levels - 1].memclk_mhz;
}
- clk_mgr_base->bw_params->dc_mode_limit.memclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_UCLK);
+ clk_mgr_base->bw_params->dc_mode_limit.memclk_mhz = dcn401_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_UCLK);
if (num_entries_per_clk->num_memclk_levels && clk_mgr_base->bw_params->dc_mode_limit.memclk_mhz ==
clk_mgr_base->bw_params->clk_table.entries[num_entries_per_clk->num_memclk_levels - 1].memclk_mhz)
clk_mgr_base->bw_params->dc_mode_limit.memclk_mhz = 0;
@@ -1399,16 +1399,12 @@ static void dcn401_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base)
dcn401_init_single_clock(clk_mgr, PPCLK_FCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].fclk_mhz,
&num_entries_per_clk->num_fclk_levels);
- clk_mgr_base->bw_params->dc_mode_limit.fclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_FCLK);
+ clk_mgr_base->bw_params->dc_mode_limit.fclk_mhz = dcn401_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_FCLK);
if (num_entries_per_clk->num_fclk_levels && clk_mgr_base->bw_params->dc_mode_limit.fclk_mhz ==
clk_mgr_base->bw_params->clk_table.entries[num_entries_per_clk->num_fclk_levels - 1].fclk_mhz)
clk_mgr_base->bw_params->dc_mode_limit.fclk_mhz = 0;
- if (num_entries_per_clk->num_memclk_levels >= num_entries_per_clk->num_fclk_levels) {
- num_levels = num_entries_per_clk->num_memclk_levels;
- } else {
- num_levels = num_entries_per_clk->num_fclk_levels;
- }
+ num_levels = max(num_entries_per_clk->num_memclk_levels, num_entries_per_clk->num_fclk_levels);
clk_mgr_base->bw_params->clk_table.num_entries = num_levels ? num_levels : 1;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.c
index 21c35528f61f..3a263840893e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.c
@@ -57,6 +57,8 @@ static bool dcn401_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, uin
/* Wait for response register to be ready */
dcn401_smu_wait_for_response(clk_mgr, 10, 200000);
+ TRACE_SMU_MSG_ENTER(msg_id, param_in, clk_mgr->base.ctx);
+
/* Clear response register */
REG_WRITE(DAL_RESP_REG, 0);
@@ -71,9 +73,11 @@ static bool dcn401_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, uin
if (param_out)
*param_out = REG_READ(DAL_ARG_REG);
+ TRACE_SMU_MSG_EXIT(true, param_out ? *param_out : 0, clk_mgr->base.ctx);
return true;
}
+ TRACE_SMU_MSG_EXIT(false, 0, clk_mgr->base.ctx);
return false;
}
@@ -102,8 +106,6 @@ static uint32_t dcn401_smu_wait_for_response_delay(struct clk_mgr_internal *clk_
*total_delay_us += delay_us;
} while (max_retries--);
- TRACE_SMU_DELAY(*total_delay_us, clk_mgr->base.ctx);
-
return reg;
}
@@ -115,6 +117,8 @@ static bool dcn401_smu_send_msg_with_param_delay(struct clk_mgr_internal *clk_mg
/* Wait for response register to be ready */
dcn401_smu_wait_for_response_delay(clk_mgr, 10, 200000, &delay1_us);
+ TRACE_SMU_MSG_ENTER(msg_id, param_in, clk_mgr->base.ctx);
+
/* Clear response register */
REG_WRITE(DAL_RESP_REG, 0);
@@ -124,18 +128,71 @@ static bool dcn401_smu_send_msg_with_param_delay(struct clk_mgr_internal *clk_mg
/* Trigger the message transaction by writing the message ID */
REG_WRITE(DAL_MSG_REG, msg_id);
- TRACE_SMU_MSG(msg_id, param_in, clk_mgr->base.ctx);
-
/* Wait for response */
if (dcn401_smu_wait_for_response_delay(clk_mgr, 10, 200000, &delay2_us) == DALSMC_Result_OK) {
if (param_out)
*param_out = REG_READ(DAL_ARG_REG);
*total_delay_us = delay1_us + delay2_us;
+ TRACE_SMU_MSG_EXIT(true, param_out ? *param_out : 0, clk_mgr->base.ctx);
return true;
}
*total_delay_us = delay1_us + 2000000;
+ TRACE_SMU_MSG_EXIT(false, 0, clk_mgr->base.ctx);
+ return false;
+}
+
+bool dcn401_smu_get_smu_version(struct clk_mgr_internal *clk_mgr, unsigned int *version)
+{
+ smu_print("SMU Get SMU version\n");
+
+ if (dcn401_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_GetSmuVersion, 0, version)) {
+
+ smu_print("SMU version: %d\n", *version);
+
+ return true;
+ }
+
+ return false;
+}
+
+/* Message output should match SMU11_DRIVER_IF_VERSION in smu11_driver_if.h */
+bool dcn401_smu_check_driver_if_version(struct clk_mgr_internal *clk_mgr)
+{
+ uint32_t response = 0;
+
+ smu_print("SMU Check driver if version\n");
+
+ if (dcn401_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_GetDriverIfVersion, 0, &response)) {
+
+ smu_print("SMU driver if version: %d\n", response);
+
+ if (response == SMU14_DRIVER_IF_VERSION)
+ return true;
+ }
+
+ return false;
+}
+
+/* Message output should match DALSMC_VERSION in dalsmc.h */
+bool dcn401_smu_check_msg_header_version(struct clk_mgr_internal *clk_mgr)
+{
+ uint32_t response = 0;
+
+ smu_print("SMU Check msg header version\n");
+
+ if (dcn401_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_GetMsgHeaderVersion, 0, &response)) {
+
+ smu_print("SMU msg header version: %d\n", response);
+
+ if (response == DALSMC_VERSION)
+ return true;
+ }
+
return false;
}
@@ -163,6 +220,22 @@ void dcn401_smu_send_cab_for_uclk_message(struct clk_mgr_internal *clk_mgr, unsi
smu_print("Numways for SubVP : %d\n", num_ways);
}
+void dcn401_smu_set_dram_addr_high(struct clk_mgr_internal *clk_mgr, uint32_t addr_high)
+{
+ smu_print("SMU Set DRAM addr high: %d\n", addr_high);
+
+ dcn401_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_SetDalDramAddrHigh, addr_high, NULL);
+}
+
+void dcn401_smu_set_dram_addr_low(struct clk_mgr_internal *clk_mgr, uint32_t addr_low)
+{
+ smu_print("SMU Set DRAM addr low: %d\n", addr_low);
+
+ dcn401_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_SetDalDramAddrLow, addr_low, NULL);
+}
+
void dcn401_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr)
{
smu_print("SMU Transfer WM table DRAM 2 SMU\n");
@@ -348,3 +421,52 @@ unsigned int dcn401_smu_get_num_of_umc_channels(struct clk_mgr_internal *clk_mgr
return response;
}
+
+/*
+ * Frequency in MHz returned in lower 16 bits for valid DPM level
+ *
+ * Call with dpm_level = 0xFF to query features, return value will be:
+ * Bits 7:0 - number of DPM levels
+ * Bit 28 - 1 = auto DPM on
+ * Bit 29 - 1 = sweep DPM on
+ * Bit 30 - 1 = forced DPM on
+ * Bit 31 - 0 = discrete, 1 = fine-grained
+ *
+ * With fine-grained DPM, only min and max frequencies will be reported
+ *
+ * Returns 0 on failure
+ */
+unsigned int dcn401_smu_get_dpm_freq_by_index(struct clk_mgr_internal *clk_mgr, uint32_t clk, uint8_t dpm_level)
+{
+ uint32_t response = 0;
+
+ /* bits 23:16 for clock type, lower 8 bits for DPM level */
+ uint32_t param = (clk << 16) | dpm_level;
+
+ smu_print("SMU Get dpm freq by index: clk = %d, dpm_level = %d\n", clk, dpm_level);
+
+ dcn401_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_GetDpmFreqByIndex, param, &response);
+
+ smu_print("SMU dpm freq: %d MHz\n", response);
+
+ return response;
+}
+
+/* Returns the max DPM frequency in DC mode in MHz, 0 on failure */
+unsigned int dcn401_smu_get_dc_mode_max_dpm_freq(struct clk_mgr_internal *clk_mgr, uint32_t clk)
+{
+ uint32_t response = 0;
+
+ /* bits 23:16 for clock type */
+ uint32_t param = clk << 16;
+
+ smu_print("SMU Get DC mode max DPM freq: clk = %d\n", clk);
+
+ dcn401_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_GetDcModeMaxDpmFreq, param, &response);
+
+ smu_print("SMU DC mode max DMP freq: %d MHz\n", response);
+
+ return response;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.h
index e02eb1294b37..4f5ac603e822 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.h
@@ -7,11 +7,17 @@
#include "os_types.h"
#include "core_types.h"
-#include "dcn32/dcn32_clk_mgr_smu_msg.h"
+struct clk_mgr_internal;
+
+bool dcn401_smu_get_smu_version(struct clk_mgr_internal *clk_mgr, unsigned int *version);
+bool dcn401_smu_check_driver_if_version(struct clk_mgr_internal *clk_mgr);
+bool dcn401_smu_check_msg_header_version(struct clk_mgr_internal *clk_mgr);
void dcn401_smu_send_fclk_pstate_message(struct clk_mgr_internal *clk_mgr, bool support);
void dcn401_smu_send_uclk_pstate_message(struct clk_mgr_internal *clk_mgr, bool support);
void dcn401_smu_send_cab_for_uclk_message(struct clk_mgr_internal *clk_mgr, unsigned int num_ways);
+void dcn401_smu_set_dram_addr_high(struct clk_mgr_internal *clk_mgr, uint32_t addr_high);
+void dcn401_smu_set_dram_addr_low(struct clk_mgr_internal *clk_mgr, uint32_t addr_low);
void dcn401_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr);
void dcn401_smu_set_pme_workaround(struct clk_mgr_internal *clk_mgr);
unsigned int dcn401_smu_set_hard_min_by_freq(struct clk_mgr_internal *clk_mgr, uint32_t clk, uint16_t freq_mhz);
@@ -29,5 +35,7 @@ bool dcn401_smu_set_subvp_uclk_fclk_hardmin(struct clk_mgr_internal *clk_mgr,
void dcn401_smu_set_min_deep_sleep_dcef_clk(struct clk_mgr_internal *clk_mgr, uint32_t freq_mhz);
void dcn401_smu_set_num_of_displays(struct clk_mgr_internal *clk_mgr, uint32_t num_displays);
unsigned int dcn401_smu_get_num_of_umc_channels(struct clk_mgr_internal *clk_mgr);
+unsigned int dcn401_smu_get_dc_mode_max_dpm_freq(struct clk_mgr_internal *clk_mgr, uint32_t clk);
+unsigned int dcn401_smu_get_dpm_freq_by_index(struct clk_mgr_internal *clk_mgr, uint32_t clk, uint8_t dpm_level);
#endif /* __DCN401_CLK_MGR_SMU_MSG_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 28aca7017f0f..5f2d5638c819 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -60,7 +60,7 @@
#include "link_encoder.h"
#include "link_enc_cfg.h"
-#include "link.h"
+#include "link_service.h"
#include "dm_helpers.h"
#include "mem_input.h"
@@ -84,6 +84,7 @@
#if defined(CONFIG_DRM_AMD_DC_FP)
#include "dml2/dml2_internal_types.h"
+#include "soc_and_ip_translator.h"
#endif
#include "dce/dmub_outbox.h"
@@ -217,11 +218,24 @@ static bool create_links(
connectors_num,
num_virtual_links);
- // condition loop on link_count to allow skipping invalid indices
+ /* When getting the number of connectors, the VBIOS reports the number of valid indices,
+ * but it doesn't say which indices are valid, and not every index has an actual connector.
+ * So, if we don't find a connector on an index, that is not an error.
+ *
+ * - There is no guarantee that the first N indices will be valid
+ * - VBIOS may report a higher amount of valid indices than there are actual connectors
+ * - Some VBIOS have valid configurations for more connectors than there actually are
+ * on the card. This may be because the manufacturer used the same VBIOS for different
+ * variants of the same card.
+ */
for (i = 0; dc->link_count < connectors_num && i < MAX_LINKS; i++) {
+ struct graphics_object_id connector_id = bios->funcs->get_connector_id(bios, i);
struct link_init_data link_init_params = {0};
struct dc_link *link;
+ if (connector_id.id == CONNECTOR_ID_UNKNOWN)
+ continue;
+
DC_LOG_DC("BIOS object table - printing link object info for connector number: %d, link_index: %d", i, dc->link_count);
link_init_params.ctx = dc->ctx;
@@ -446,7 +460,9 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
* avoid conflicting with firmware updates.
*/
if (dc->ctx->dce_version > DCE_VERSION_MAX) {
- if (dc->optimized_required || dc->wm_optimized_required) {
+ if (dc->optimized_required &&
+ (stream->adjust.v_total_max != adjust->v_total_max ||
+ stream->adjust.v_total_min != adjust->v_total_min)) {
stream->adjust.timing_adjust_pending = true;
return false;
}
@@ -934,21 +950,24 @@ static void dc_destruct(struct dc *dc)
}
dc_destroy_resource_pool(dc);
-
+#ifdef CONFIG_DRM_AMD_DC_FP
+ dc_destroy_soc_and_ip_translator(&dc->soc_and_ip_translator);
+#endif
if (dc->link_srv)
link_destroy_link_service(&dc->link_srv);
- if (dc->ctx->gpio_service)
- dal_gpio_service_destroy(&dc->ctx->gpio_service);
-
- if (dc->ctx->created_bios)
- dal_bios_parser_destroy(&dc->ctx->dc_bios);
+ if (dc->ctx) {
+ if (dc->ctx->gpio_service)
+ dal_gpio_service_destroy(&dc->ctx->gpio_service);
- kfree(dc->ctx->logger);
- dc_perf_trace_destroy(&dc->ctx->perf_trace);
+ if (dc->ctx->created_bios)
+ dal_bios_parser_destroy(&dc->ctx->dc_bios);
+ kfree(dc->ctx->logger);
+ dc_perf_trace_destroy(&dc->ctx->perf_trace);
- kfree(dc->ctx);
- dc->ctx = NULL;
+ kfree(dc->ctx);
+ dc->ctx = NULL;
+ }
kfree(dc->bw_vbios);
dc->bw_vbios = NULL;
@@ -1137,6 +1156,9 @@ static bool dc_construct(struct dc *dc,
dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
DC_FP_END();
}
+ dc->soc_and_ip_translator = dc_create_soc_and_ip_translator(dc_ctx->dce_version);
+ if (!dc->soc_and_ip_translator)
+ goto fail;
#endif
if (!create_links(dc, init_params->num_virtual_links))
@@ -2397,6 +2419,18 @@ enum dc_status dc_commit_streams(struct dc *dc, struct dc_commit_streams_params
goto fail;
}
+ /*
+ * If not already seamless, make transition seamless by inserting intermediate minimal transition
+ */
+ if (dc->hwss.is_pipe_topology_transition_seamless &&
+ !dc->hwss.is_pipe_topology_transition_seamless(dc, dc->current_state, context)) {
+ res = commit_minimal_transition_state(dc, context);
+ if (res != DC_OK) {
+ BREAK_TO_DEBUGGER();
+ goto fail;
+ }
+ }
+
res = dc_commit_state_no_check(dc, context);
for (i = 0; i < params->stream_count; i++) {
@@ -2543,7 +2577,6 @@ void dc_post_update_surfaces_to_stream(struct dc *dc)
}
dc->optimized_required = false;
- dc->wm_optimized_required = false;
}
bool dc_set_generic_gpio_for_stereo(bool enable,
@@ -3022,8 +3055,6 @@ enum surface_update_type dc_check_update_surfaces_for_stream(
} else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
dc->optimized_required = true;
}
-
- dc->optimized_required |= dc->wm_optimized_required;
}
return type;
@@ -3279,6 +3310,9 @@ static void copy_stream_update_to_stream(struct dc *dc,
if (update->adaptive_sync_infopacket)
stream->adaptive_sync_infopacket = *update->adaptive_sync_infopacket;
+ if (update->avi_infopacket)
+ stream->avi_infopacket = *update->avi_infopacket;
+
if (update->dither_option)
stream->dither_option = *update->dither_option;
@@ -3376,7 +3410,7 @@ static void update_seamless_boot_flags(struct dc *dc,
int surface_count,
struct dc_stream_state *stream)
{
- if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) {
+ if (get_seamless_boot_stream_count(context) > 0 && (surface_count > 0 || stream->dpms_off)) {
/* Optimize seamless boot flag keeps clocks and watermarks high until
* first flip. After first flip, optimization is required to lower
* bandwidth. Important to note that it is expected UEFI will
@@ -3573,7 +3607,8 @@ static void commit_planes_do_stream_update(struct dc *dc,
stream_update->vsp_infopacket ||
stream_update->hfvsif_infopacket ||
stream_update->adaptive_sync_infopacket ||
- stream_update->vtem_infopacket) {
+ stream_update->vtem_infopacket ||
+ stream_update->avi_infopacket) {
resource_build_info_frame(pipe_ctx);
dc->hwss.update_info_frame(pipe_ctx);
@@ -4135,7 +4170,7 @@ static void commit_planes_for_stream(struct dc *dc,
}
if (dc->hwseq->funcs.wait_for_pipe_update_if_needed)
- dc->hwseq->funcs.wait_for_pipe_update_if_needed(dc, top_pipe_to_program, update_type == UPDATE_TYPE_FAST);
+ dc->hwseq->funcs.wait_for_pipe_update_if_needed(dc, top_pipe_to_program, update_type < UPDATE_TYPE_FULL);
if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
if (dc->hwss.subvp_pipe_control_lock)
@@ -5045,6 +5080,7 @@ static bool full_update_required(struct dc *dc,
stream_update->hfvsif_infopacket ||
stream_update->vtem_infopacket ||
stream_update->adaptive_sync_infopacket ||
+ stream_update->avi_infopacket ||
stream_update->dpms_off ||
stream_update->allow_freesync ||
stream_update->vrr_active_variable ||
@@ -5088,129 +5124,6 @@ static bool fast_update_only(struct dc *dc,
&& !full_update_required(dc, srf_updates, surface_count, stream_update, stream);
}
-static bool update_planes_and_stream_v1(struct dc *dc,
- struct dc_surface_update *srf_updates, int surface_count,
- struct dc_stream_state *stream,
- struct dc_stream_update *stream_update,
- struct dc_state *state)
-{
- const struct dc_stream_status *stream_status;
- enum surface_update_type update_type;
- struct dc_state *context;
- struct dc_context *dc_ctx = dc->ctx;
- int i, j;
- struct dc_fast_update fast_update[MAX_SURFACES] = {0};
-
- dc_exit_ips_for_hw_access(dc);
-
- populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);
- stream_status = dc_stream_get_status(stream);
- context = dc->current_state;
-
- update_type = dc_check_update_surfaces_for_stream(
- dc, srf_updates, surface_count, stream_update, stream_status);
- /* It is possible to receive a flip for one plane while there are multiple flip_immediate planes in the same stream.
- * E.g. Desktop and MPO plane are flip_immediate but only the MPO plane received a flip
- * Force the other flip_immediate planes to flip so GSL doesn't wait for a flip that won't come.
- */
- force_immediate_gsl_plane_flip(dc, srf_updates, surface_count);
-
- if (update_type >= UPDATE_TYPE_FULL) {
-
- /* initialize scratch memory for building context */
- context = dc_state_create_copy(state);
- if (context == NULL) {
- DC_ERROR("Failed to allocate new validate context!\n");
- return false;
- }
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
- struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
-
- if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
- new_pipe->plane_state->force_full_update = true;
- }
- } else if (update_type == UPDATE_TYPE_FAST) {
- /*
- * Previous frame finished and HW is ready for optimization.
- */
- dc_post_update_surfaces_to_stream(dc);
- }
-
- for (i = 0; i < surface_count; i++) {
- struct dc_plane_state *surface = srf_updates[i].surface;
-
- copy_surface_update_to_plane(surface, &srf_updates[i]);
-
- if (update_type >= UPDATE_TYPE_MED) {
- for (j = 0; j < dc->res_pool->pipe_count; j++) {
- struct pipe_ctx *pipe_ctx =
- &context->res_ctx.pipe_ctx[j];
-
- if (pipe_ctx->plane_state != surface)
- continue;
-
- resource_build_scaling_params(pipe_ctx);
- }
- }
- }
-
- copy_stream_update_to_stream(dc, context, stream, stream_update);
-
- if (update_type >= UPDATE_TYPE_FULL) {
- if (dc->res_pool->funcs->validate_bandwidth(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING) != DC_OK) {
- DC_ERROR("Mode validation failed for stream update!\n");
- dc_state_release(context);
- return false;
- }
- }
-
- TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
-
- if (fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream) &&
- !dc->debug.enable_legacy_fast_update) {
- commit_planes_for_stream_fast(dc,
- srf_updates,
- surface_count,
- stream,
- stream_update,
- update_type,
- context);
- } else {
- commit_planes_for_stream(
- dc,
- srf_updates,
- surface_count,
- stream,
- stream_update,
- update_type,
- context);
- }
- /*update current_State*/
- if (dc->current_state != context) {
-
- struct dc_state *old = dc->current_state;
-
- dc->current_state = context;
- dc_state_release(old);
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
-
- if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
- pipe_ctx->plane_state->force_full_update = false;
- }
- }
-
- /* Legacy optimization path for DCE. */
- if (update_type >= UPDATE_TYPE_FULL && dc_ctx->dce_version < DCE_VERSION_MAX) {
- dc_post_update_surfaces_to_stream(dc);
- TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
- }
- return true;
-}
-
static bool update_planes_and_stream_v2(struct dc *dc,
struct dc_surface_update *srf_updates, int surface_count,
struct dc_stream_state *stream,
@@ -5468,12 +5381,10 @@ void dc_commit_updates_for_stream(struct dc *dc,
if (dc->ctx->dce_version >= DCN_VERSION_4_01) {
ret = update_planes_and_stream_v3(dc, srf_updates, surface_count,
stream, stream_update);
- } else if (dc->ctx->dce_version >= DCN_VERSION_3_2) {
+ } else {
ret = update_planes_and_stream_v2(dc, srf_updates, surface_count,
stream, stream_update);
- } else
- ret = update_planes_and_stream_v1(dc, srf_updates, surface_count, stream,
- stream_update, state);
+ }
if (ret && dc->ctx->dce_version >= DCN_VERSION_3_2)
clear_update_flags(srf_updates, surface_count, stream);
@@ -5713,8 +5624,8 @@ void dc_allow_idle_optimizations_internal(struct dc *dc, bool allow, char const
subvp_pipe_type[i] = dc_state_get_pipe_subvp_type(context, pipe);
}
}
-
- DC_LOG_DC("%s: allow_idle=%d\n HardMinUClk_Khz=%d HardMinDramclk_Khz=%d\n Pipe_0=%d Pipe_1=%d Pipe_2=%d Pipe_3=%d Pipe_4=%d Pipe_5=%d (caller=%s)\n",
+ if (!dc->caps.is_apu)
+ DC_LOG_DC("%s: allow_idle=%d\n HardMinUClk_Khz=%d HardMinDramclk_Khz=%d\n Pipe_0=%d Pipe_1=%d Pipe_2=%d Pipe_3=%d Pipe_4=%d Pipe_5=%d (caller=%s)\n",
__func__, allow, idle_fclk_khz, idle_dramclk_khz, subvp_pipe_type[0], subvp_pipe_type[1], subvp_pipe_type[2],
subvp_pipe_type[3], subvp_pipe_type[4], subvp_pipe_type[5], caller_name);
@@ -6449,3 +6360,21 @@ bool dc_can_clear_cursor_limit(struct dc *dc)
return false;
}
+
+void dc_get_underflow_debug_data_for_otg(struct dc *dc, int primary_otg_inst,
+ struct dc_underflow_debug_data *out_data)
+{
+ struct timing_generator *tg = NULL;
+
+ for (int i = 0; i < MAX_PIPES; i++) {
+ if (dc->res_pool->timing_generators[i] &&
+ dc->res_pool->timing_generators[i]->inst == primary_otg_inst) {
+ tg = dc->res_pool->timing_generators[i];
+ break;
+ }
+ }
+
+ dc_exit_ips_for_hw_access(dc);
+ if (dc->hwss.get_underflow_debug_data)
+ dc->hwss.get_underflow_debug_data(dc, tg, out_data);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
index ec4e80e5b6eb..d82b1cb467f4 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
@@ -1177,6 +1177,8 @@ void hwss_wait_for_odm_update_pending_complete(struct dc *dc, struct dc_state *c
tg = otg_master->stream_res.tg;
if (tg->funcs->wait_odm_doublebuffer_pending_clear)
tg->funcs->wait_odm_doublebuffer_pending_clear(tg);
+ if (tg->funcs->wait_otg_disable)
+ tg->funcs->wait_otg_disable(tg);
}
/* ODM update may require to reprogram blank pattern for each OPP */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
index 814f68d76257..a180f68f711c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
@@ -24,7 +24,7 @@
#include "link_enc_cfg.h"
#include "resource.h"
-#include "link.h"
+#include "link_service.h"
#define DC_LOGGER dc->ctx->logger
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
index 130455f2802a..9acd30019717 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
@@ -33,8 +33,9 @@
* dc.h with detail interface documentation, then add function implementation
* in this file which calls link functions.
*/
-#include "link.h"
+#include "link_service.h"
#include "dce/dce_i2c.h"
+
struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_index)
{
if (link_index >= MAX_LINKS)
@@ -520,3 +521,10 @@ enum dc_status dc_link_validate_dp_tunneling_bandwidth(const struct dc *dc, cons
return dc->link_srv->validate_dp_tunnel_bandwidth(dc, new_ctx);
}
+void dc_link_get_alpm_support(struct dc_link *link,
+ bool *auxless_support,
+ bool *auxwake_support)
+{
+ link->dc->link_srv->edp_get_alpm_support(link, auxless_support, auxwake_support);
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 4d6181e7c612..bc5dedf5f60c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -40,7 +40,7 @@
#include "virtual/virtual_stream_encoder.h"
#include "dpcd_defs.h"
#include "link_enc_cfg.h"
-#include "link.h"
+#include "link_service.h"
#include "clk_mgr.h"
#include "dc_state_priv.h"
#include "dc_stream_priv.h"
@@ -95,7 +95,6 @@
#define DC_LOGGER \
dc->ctx->logger
#define DC_LOGGER_INIT(logger)
-
#include "dml2/dml2_wrapper.h"
#define UNABLE_TO_SPLIT -1
@@ -165,7 +164,13 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
case FAMILY_NV:
dc_version = DCN_VERSION_2_0;
- if (asic_id.chip_id == DEVICE_ID_NV_13FE || asic_id.chip_id == DEVICE_ID_NV_143F) {
+ if (asic_id.chip_id == DEVICE_ID_NV_13FE ||
+ asic_id.chip_id == DEVICE_ID_NV_143F ||
+ asic_id.chip_id == DEVICE_ID_NV_13F9 ||
+ asic_id.chip_id == DEVICE_ID_NV_13FA ||
+ asic_id.chip_id == DEVICE_ID_NV_13FB ||
+ asic_id.chip_id == DEVICE_ID_NV_13FC ||
+ asic_id.chip_id == DEVICE_ID_NV_13DB) {
dc_version = DCN_VERSION_2_01;
break;
}
@@ -2143,7 +2148,7 @@ int resource_get_odm_slice_dst_width(struct pipe_ctx *otg_master,
h_active = timing->h_addressable +
timing->h_border_left +
timing->h_border_right +
- otg_master->hblank_borrow;
+ otg_master->dsc_padding_params.dsc_hactive_padding;
width = h_active / count;
if (otg_master->stream_res.tg)
@@ -4261,39 +4266,33 @@ fail:
return res;
}
+#if defined(CONFIG_DRM_AMD_DC_FP)
+#endif /* CONFIG_DRM_AMD_DC_FP */
+
/**
- * decide_hblank_borrow - Decides the horizontal blanking borrow value for a given pipe context.
+ * calculate_timing_params_for_dsc_with_padding - Calculates timing parameters for DSC with padding.
* @pipe_ctx: Pointer to the pipe context structure.
*
- * This function calculates the horizontal blanking borrow value for a given pipe context based on the
+ * This function calculates the timing parameters for a given pipe context based on the
* display stream compression (DSC) configuration. If the horizontal active pixels (hactive) are less
- * than the total width of the DSC slices, it sets the hblank_borrow value to the difference. If the
- * total horizontal timing minus the hblank_borrow value is less than 32, it resets the hblank_borrow
+ * than the total width of the DSC slices, it sets the dsc_hactive_padding value to the difference. If the
+ * total horizontal timing minus the dsc_hactive_padding value is less than 32, it resets the dsc_hactive_padding
* value to 0.
*/
-static void decide_hblank_borrow(struct pipe_ctx *pipe_ctx)
+static void calculate_timing_params_for_dsc_with_padding(struct pipe_ctx *pipe_ctx)
{
- uint32_t hactive;
- uint32_t ceil_slice_width;
struct dc_stream_state *stream = NULL;
if (!pipe_ctx)
return;
stream = pipe_ctx->stream;
+ pipe_ctx->dsc_padding_params.dsc_hactive_padding = 0;
+ pipe_ctx->dsc_padding_params.dsc_htotal_padding = 0;
- if (stream->timing.flags.DSC) {
- hactive = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right;
-
- /* Assume if determined slices does not divide Hactive evenly, Hborrow is needed for padding*/
- if (hactive % stream->timing.dsc_cfg.num_slices_h != 0) {
- ceil_slice_width = (hactive / stream->timing.dsc_cfg.num_slices_h) + 1;
- pipe_ctx->hblank_borrow = ceil_slice_width * stream->timing.dsc_cfg.num_slices_h - hactive;
+ if (stream)
+ pipe_ctx->dsc_padding_params.dsc_pix_clk_100hz = stream->timing.pix_clk_100hz;
- if (stream->timing.h_total - hactive - pipe_ctx->hblank_borrow < 32)
- pipe_ctx->hblank_borrow = 0;
- }
- }
}
/**
@@ -4336,7 +4335,7 @@ enum dc_status dc_validate_global_state(
/* Decide whether hblank borrow is needed and save it in pipe_ctx */
if (dc->debug.enable_hblank_borrow)
- decide_hblank_borrow(pipe_ctx);
+ calculate_timing_params_for_dsc_with_padding(pipe_ctx);
if (dc->res_pool->funcs->patch_unknown_plane_state &&
pipe_ctx->plane_state &&
@@ -4411,8 +4410,14 @@ static void set_avi_info_frame(
unsigned int fr_ind = pipe_ctx->stream->timing.fr_index;
enum dc_timing_3d_format format;
+ if (stream->avi_infopacket.valid) {
+ *info_packet = stream->avi_infopacket;
+ return;
+ }
+
memset(&hdmi_info, 0, sizeof(union hdmi_info_packet));
+
color_space = pipe_ctx->stream->output_color_space;
if (color_space == COLOR_SPACE_UNKNOWN)
color_space = (stream->timing.pixel_encoding == PIXEL_ENCODING_RGB) ?
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stat.c b/drivers/gpu/drm/amd/display/dc/core/dc_stat.c
index fe9f99f1bdf9..f976ffd6d466 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stat.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stat.c
@@ -65,7 +65,7 @@ void dc_stat_get_dmub_notification(const struct dc *dc, struct dmub_notification
notify->type == DMUB_NOTIFICATION_DPIA_NOTIFICATION ||
notify->type == DMUB_NOTIFICATION_SET_CONFIG_REPLY) {
notify->link_index =
- get_link_index_from_dpia_port_index(dc, notify->link_index);
+ get_link_index_from_dpia_port_index(dc, notify->instance);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
index 883054bb18e7..c61300a7cb1c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
@@ -211,7 +211,7 @@ struct dc_state *dc_state_create(struct dc *dc, struct dc_state_create_params *p
return NULL;
}
- if (!dml2_create(dc, &dc->dml2_dc_power_options, &state->bw_ctx.dml2_dc_power_source)) {
+ if (dc->caps.dcmode_power_limits_present && !dml2_create(dc, &dc->dml2_dc_power_options, &state->bw_ctx.dml2_dc_power_source)) {
dc_state_release(state);
return NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 4d6bc9fd4faa..9ac2d41f8fca 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -316,6 +316,9 @@ bool dc_stream_set_cursor_attributes(
{
bool result = false;
+ if (!stream)
+ return false;
+
if (dc_stream_check_cursor_attributes(stream, stream->ctx->dc->current_state, attributes)) {
stream->cursor_attributes = *attributes;
result = true;
@@ -331,7 +334,10 @@ bool dc_stream_program_cursor_attributes(
struct dc *dc;
bool reset_idle_optimizations = false;
- dc = stream ? stream->ctx->dc : NULL;
+ if (!stream)
+ return false;
+
+ dc = stream->ctx->dc;
if (dc_stream_set_cursor_attributes(stream, attributes)) {
dc_z10_restore(dc);
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 59c07756130d..98f0b6b3c213 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -55,7 +55,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.340"
+#define DC_VER "3.2.351"
/**
* MAX_SURFACES - representative of the upper bound of surfaces that can be piped to a single CRTC
@@ -234,6 +234,7 @@ struct lut3d_caps {
* @ogam_ram: programmable out gamma LUT
* @ocsc: output color space conversion matrix
* @num_3dluts: MPC 3D LUT; always assumes a preceding shaper LUT
+ * @num_rmcm_3dluts: number of RMCM 3D LUTS; always assumes a preceding shaper LUT
* @shared_3d_lut: shared 3D LUT flag. Can be either DPP or MPC, but single
* instance
* @ogam_rom_caps: pre-definied curve caps for regamma 1D LUT
@@ -694,6 +695,15 @@ struct dc_clocks {
int idle_fclk_khz;
int subvp_prefetch_dramclk_khz;
int subvp_prefetch_fclk_khz;
+
+ /* Stutter efficiency is technically not clock values
+ * but stored here so the values are part of the update_clocks call similar to num_ways
+ * Efficiencies are stored as percentage (0-100)
+ */
+ struct {
+ uint8_t base_efficiency; //LP1
+ uint8_t low_power_efficiency; //LP2
+ } stutter_efficiency;
};
struct dc_bw_validation_profile {
@@ -839,7 +849,8 @@ union dpia_debug_options {
uint32_t enable_force_tbt3_work_around:1; /* bit 4 */
uint32_t disable_usb4_pm_support:1; /* bit 5 */
uint32_t enable_usb4_bw_zero_alloc_patch:1; /* bit 6 */
- uint32_t reserved:25;
+ uint32_t enable_bw_allocation_mode:1; /* bit 7 */
+ uint32_t reserved:24;
} bits;
uint32_t raw;
};
@@ -1072,6 +1083,7 @@ struct dc_debug_options {
unsigned int force_mall_ss_num_ways;
bool alloc_extra_way_for_cursor;
uint32_t subvp_extra_lines;
+ bool disable_force_pstate_allow_on_hw_release;
bool force_usr_allow;
/* uses value at boot and disables switch */
bool disable_dtb_ref_clk_switch;
@@ -1145,6 +1157,13 @@ struct dc_debug_options {
bool enable_hblank_borrow;
bool force_subvp_df_throttle;
uint32_t acpi_transition_bitmasks[MAX_PIPES];
+ bool enable_pg_cntl_debug_logs;
+ unsigned int auxless_alpm_lfps_setup_ns;
+ unsigned int auxless_alpm_lfps_period_ns;
+ unsigned int auxless_alpm_lfps_silence_ns;
+ unsigned int auxless_alpm_lfps_t1t2_us;
+ short auxless_alpm_lfps_t1t2_offset_us;
+ bool disable_stutter_for_wm_program;
};
@@ -1305,6 +1324,32 @@ union dc_3dlut_state {
};
+#define MATRIX_9C__DIM_128_ALIGNED_LEN 16 // 9+8 : 9 * 8 + 7 * 8 = 72 + 56 = 128 % 128 = 0
+#define MATRIX_17C__DIM_128_ALIGNED_LEN 32 //17+15: 17 * 8 + 15 * 8 = 136 + 120 = 256 % 128 = 0
+#define MATRIX_33C__DIM_128_ALIGNED_LEN 64 //17+47: 17 * 8 + 47 * 8 = 136 + 376 = 512 % 128 = 0
+
+struct lut_rgb {
+ uint16_t b;
+ uint16_t g;
+ uint16_t r;
+ uint16_t padding;
+};
+
+//this structure maps directly to how the lut will read it from memory
+struct lut_mem_mapping {
+ union {
+ //NATIVE MODE 1, 2
+ //RGB layout [b][g][r] //red is 128 byte aligned
+ //BGR layout [r][g][b] //blue is 128 byte aligned
+ struct lut_rgb rgb_17c[17][17][MATRIX_17C__DIM_128_ALIGNED_LEN];
+ struct lut_rgb rgb_33c[33][33][MATRIX_33C__DIM_128_ALIGNED_LEN];
+
+ //TRANSFORMED
+ uint16_t linear_rgb[(33*33*33*4/128+1)*128];
+ };
+ uint16_t size;
+};
+
struct dc_rmcm_3dlut {
bool isInUse;
const struct dc_stream_state *stream;
@@ -1347,7 +1392,6 @@ union surface_update_flags {
uint32_t in_transfer_func_change:1;
uint32_t input_csc_change:1;
uint32_t coeff_reduction_change:1;
- uint32_t output_tf_change:1;
uint32_t pixel_format_change:1;
uint32_t plane_size_change:1;
uint32_t gamut_remap_change:1;
@@ -1691,7 +1735,6 @@ struct dc {
/* Require to optimize clocks and bandwidth for added/removed planes */
bool optimized_required;
- bool wm_optimized_required;
bool idle_optimizations_allowed;
bool enable_c20_dtm_b0;
@@ -1733,7 +1776,7 @@ struct dc {
struct dml2_configuration_options dml2_options;
struct dml2_configuration_options dml2_dc_power_options;
enum dc_acpi_cm_power_state power_state;
-
+ struct soc_and_ip_translator *soc_and_ip_translator;
};
struct dc_scaling_info {
@@ -1786,6 +1829,23 @@ struct dc_surface_update {
struct dc_bias_and_scale bias_and_scale;
};
+struct dc_underflow_debug_data {
+ uint32_t otg_inst;
+ uint32_t otg_underflow;
+ uint32_t h_position;
+ uint32_t v_position;
+ uint32_t otg_frame_count;
+ struct dc_underflow_per_hubp_debug_data {
+ uint32_t hubp_underflow;
+ uint32_t hubp_in_blank;
+ uint32_t hubp_readline;
+ uint32_t det_config_error;
+ } hubps[MAX_PIPES];
+ uint32_t curr_det_sizes[MAX_PIPES];
+ uint32_t target_det_sizes[MAX_PIPES];
+ uint32_t compbuf_config_error;
+};
+
/*
* Create a new surface with default parameters;
*/
@@ -1804,8 +1864,6 @@ void dc_3dlut_func_retain(struct dc_3dlut *lut);
void dc_post_update_surfaces_to_stream(
struct dc *dc);
-#include "dc_stream.h"
-
/**
* struct dc_validation_set - Struct to store surface/stream associations for validation
*/
@@ -2447,6 +2505,12 @@ void dc_link_dp_dpia_handle_usb4_bandwidth_allocation_for_link(
*/
enum dc_status dc_link_validate_dp_tunneling_bandwidth(const struct dc *dc, const struct dc_state *new_ctx);
+/*
+ * Get if ALPM is supported by the link
+ */
+void dc_link_get_alpm_support(struct dc_link *link, bool *auxless_support,
+ bool *auxwake_support);
+
/* Sink Interfaces - A sink corresponds to a display output device */
struct dc_container_id {
@@ -2674,4 +2738,17 @@ bool dc_is_timing_changed(struct dc_stream_state *cur_stream,
bool dc_is_cursor_limit_pending(struct dc *dc);
bool dc_can_clear_cursor_limit(struct dc *dc);
+/**
+ * dc_get_underflow_debug_data_for_otg() - Retrieve underflow debug data.
+ *
+ * @dc: Pointer to the display core context.
+ * @primary_otg_inst: Instance index of the primary OTG that underflowed.
+ * @out_data: Pointer to a dc_underflow_debug_data struct to be filled with debug information.
+ *
+ * This function collects and logs underflow-related HW states when underflow happens,
+ * including OTG underflow status, current read positions, frame count, and per-HUBP debug data.
+ * The results are stored in the provided out_data structure for further analysis or logging.
+ */
+void dc_get_underflow_debug_data_for_otg(struct dc *dc, int primary_otg_inst, struct dc_underflow_debug_data *out_data);
+
#endif /* DC_INTERFACE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index f5ef1a07078e..53a088ebddef 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -2010,11 +2010,12 @@ bool dmub_lsdma_init(struct dc_dmub_srv *dc_dmub_srv)
return result;
}
-bool dmub_lsdma_send_linear_copy_packet(
+bool dmub_lsdma_send_linear_copy_command(
struct dc_dmub_srv *dc_dmub_srv,
uint64_t src_addr,
uint64_t dst_addr,
- uint32_t count)
+ uint32_t count
+)
{
struct dc_context *dc_ctx = dc_dmub_srv->ctx;
union dmub_rb_cmd cmd;
@@ -2042,9 +2043,54 @@ bool dmub_lsdma_send_linear_copy_packet(
return result;
}
+bool dmub_lsdma_send_linear_sub_window_copy_command(
+ struct dc_dmub_srv *dc_dmub_srv,
+ struct lsdma_linear_sub_window_copy_params copy_data
+)
+{
+ struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+ union dmub_rb_cmd cmd;
+ enum dm_dmub_wait_type wait_type;
+ struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data;
+ bool result;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.cmd_common.header.type = DMUB_CMD__LSDMA;
+ cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_LINEAR_SUB_WINDOW_COPY;
+ wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT;
+
+ lsdma_data->u.linear_sub_window_copy_data.tmz = copy_data.tmz;
+ lsdma_data->u.linear_sub_window_copy_data.element_size = copy_data.element_size;
+ lsdma_data->u.linear_sub_window_copy_data.src_lo = copy_data.src_lo;
+ lsdma_data->u.linear_sub_window_copy_data.src_hi = copy_data.src_hi;
+ lsdma_data->u.linear_sub_window_copy_data.src_x = copy_data.src_x;
+ lsdma_data->u.linear_sub_window_copy_data.src_y = copy_data.src_y;
+ lsdma_data->u.linear_sub_window_copy_data.src_pitch = copy_data.src_pitch;
+ lsdma_data->u.linear_sub_window_copy_data.src_slice_pitch = copy_data.src_slice_pitch;
+ lsdma_data->u.linear_sub_window_copy_data.dst_lo = copy_data.dst_lo;
+ lsdma_data->u.linear_sub_window_copy_data.dst_hi = copy_data.dst_hi;
+ lsdma_data->u.linear_sub_window_copy_data.dst_x = copy_data.dst_x;
+ lsdma_data->u.linear_sub_window_copy_data.dst_y = copy_data.dst_y;
+ lsdma_data->u.linear_sub_window_copy_data.dst_pitch = copy_data.dst_pitch;
+ lsdma_data->u.linear_sub_window_copy_data.dst_slice_pitch = copy_data.dst_slice_pitch;
+ lsdma_data->u.linear_sub_window_copy_data.rect_x = copy_data.rect_x;
+ lsdma_data->u.linear_sub_window_copy_data.rect_y = copy_data.rect_y;
+ lsdma_data->u.linear_sub_window_copy_data.src_cache_policy = copy_data.src_cache_policy;
+ lsdma_data->u.linear_sub_window_copy_data.dst_cache_policy = copy_data.dst_cache_policy;
+
+ result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type);
+
+ if (!result)
+ DC_ERROR("LSDMA Linear Sub Window Copy failed in DMUB");
+
+ return result;
+}
+
bool dmub_lsdma_send_tiled_to_tiled_copy_command(
struct dc_dmub_srv *dc_dmub_srv,
- struct lsdma_send_tiled_to_tiled_copy_command_params params)
+ struct lsdma_send_tiled_to_tiled_copy_command_params params
+)
{
struct dc_context *dc_ctx = dc_dmub_srv->ctx;
union dmub_rb_cmd cmd;
@@ -2066,8 +2112,8 @@ bool dmub_lsdma_send_tiled_to_tiled_copy_command(
lsdma_data->u.tiled_copy_data.src_y = params.src_y;
lsdma_data->u.tiled_copy_data.dst_x = params.dst_x;
lsdma_data->u.tiled_copy_data.dst_y = params.dst_y;
- lsdma_data->u.tiled_copy_data.src_width = params.src_width - 1; // LSDMA controller expects width -1
- lsdma_data->u.tiled_copy_data.dst_width = params.dst_width - 1; // LSDMA controller expects width -1
+ lsdma_data->u.tiled_copy_data.src_width = params.src_width;
+ lsdma_data->u.tiled_copy_data.dst_width = params.dst_width;
lsdma_data->u.tiled_copy_data.src_swizzle_mode = params.swizzle_mode;
lsdma_data->u.tiled_copy_data.dst_swizzle_mode = params.swizzle_mode;
lsdma_data->u.tiled_copy_data.src_element_size = params.element_size;
@@ -2078,8 +2124,8 @@ bool dmub_lsdma_send_tiled_to_tiled_copy_command(
lsdma_data->u.tiled_copy_data.tmz = params.tmz;
lsdma_data->u.tiled_copy_data.read_compress = params.read_compress;
lsdma_data->u.tiled_copy_data.write_compress = params.write_compress;
- lsdma_data->u.tiled_copy_data.src_height = params.src_height - 1; // LSDMA controller expects height -1
- lsdma_data->u.tiled_copy_data.dst_height = params.dst_height - 1; // LSDMA controller expects height -1
+ lsdma_data->u.tiled_copy_data.src_height = params.src_height;
+ lsdma_data->u.tiled_copy_data.dst_height = params.dst_height;
lsdma_data->u.tiled_copy_data.data_format = params.data_format;
lsdma_data->u.tiled_copy_data.max_com = params.max_com;
lsdma_data->u.tiled_copy_data.max_uncom = params.max_uncom;
@@ -2097,7 +2143,8 @@ bool dmub_lsdma_send_pio_copy_command(
uint64_t src_addr,
uint64_t dst_addr,
uint32_t byte_count,
- uint32_t overlap_disable)
+ uint32_t overlap_disable
+)
{
struct dc_context *dc_ctx = dc_dmub_srv->ctx;
union dmub_rb_cmd cmd;
@@ -2130,7 +2177,8 @@ bool dmub_lsdma_send_pio_constfill_command(
struct dc_dmub_srv *dc_dmub_srv,
uint64_t dst_addr,
uint32_t byte_count,
- uint32_t data)
+ uint32_t data
+)
{
struct dc_context *dc_ctx = dc_dmub_srv->ctx;
union dmub_rb_cmd cmd;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
index 8ea320f21269..7ef93444ef3c 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
@@ -211,11 +211,45 @@ void dc_dmub_srv_fams2_passthrough_flip(
int surface_count);
bool dmub_lsdma_init(struct dc_dmub_srv *dc_dmub_srv);
-bool dmub_lsdma_send_linear_copy_packet(
+bool dmub_lsdma_send_linear_copy_command(
struct dc_dmub_srv *dc_dmub_srv,
uint64_t src_addr,
uint64_t dst_addr,
uint32_t count);
+
+struct lsdma_linear_sub_window_copy_params {
+ uint32_t src_lo;
+ uint32_t src_hi;
+
+ uint32_t dst_lo;
+ uint32_t dst_hi;
+
+ uint32_t src_x : 16;
+ uint32_t src_y : 16;
+
+ uint32_t dst_x : 16;
+ uint32_t dst_y : 16;
+
+ uint32_t rect_x : 16;
+ uint32_t rect_y : 16;
+
+ uint32_t src_pitch : 16;
+ uint32_t dst_pitch : 16;
+
+ uint32_t src_slice_pitch;
+ uint32_t dst_slice_pitch;
+
+ uint32_t tmz : 1;
+ uint32_t element_size : 3;
+ uint32_t src_cache_policy : 3;
+ uint32_t dst_cache_policy : 3;
+ uint32_t padding : 22;
+};
+
+bool dmub_lsdma_send_linear_sub_window_copy_command(
+ struct dc_dmub_srv *dc_dmub_srv,
+ struct lsdma_linear_sub_window_copy_params copy_data
+);
bool dmub_lsdma_send_pio_copy_command(
struct dc_dmub_srv *dc_dmub_srv,
uint64_t src_addr,
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index 5ce1be362534..db669ccb1d58 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -1021,7 +1021,8 @@ union dp_128b_132b_supported_lttpr_link_rates {
union dp_alpm_lttpr_cap {
struct {
uint8_t AUX_LESS_ALPM_SUPPORTED :1;
- uint8_t RESERVED :7;
+ uint8_t ASSR_SUPPORTED :1;
+ uint8_t RESERVED :6;
} bits;
uint8_t raw;
};
@@ -1119,10 +1120,11 @@ union dp_128b_132b_training_aux_rd_interval {
union edp_alpm_caps {
struct {
- uint8_t AUX_WAKE_ALPM_CAP :1;
- uint8_t PM_STATE_2A_SUPPORT :1;
- uint8_t AUX_LESS_ALPM_CAP :1;
- uint8_t RESERVED :5;
+ uint8_t AUX_WAKE_ALPM_CAP :1;
+ uint8_t PM_STATE_2A_SUPPORT :1;
+ uint8_t AUX_LESS_ALPM_CAP :1;
+ uint8_t AUX_LESS_ALPM_ML_PHY_SLEEP_STATUS_SUPPORTED :1;
+ uint8_t RESERVED :4;
} bits;
uint8_t raw;
};
@@ -1282,6 +1284,7 @@ struct dpcd_caps {
union dp_receive_port0_cap receive_port0_cap;
/* Indicates the number of SST links supported by MSO (Multi-Stream Output) */
uint8_t mso_cap_sst_links_supported;
+ uint8_t dp_edp_general_cap_2;
};
union dpcd_sink_ext_caps {
@@ -1347,7 +1350,9 @@ union dpcd_alpm_configuration {
struct {
unsigned char ENABLE : 1;
unsigned char IRQ_HPD_ENABLE : 1;
- unsigned char RESERVED : 6;
+ unsigned char ALPM_MODE_SEL : 1;
+ unsigned char ACDS_PERIOD_DURATION : 1;
+ unsigned char RESERVED : 4;
} bits;
unsigned char raw;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c
index 7217de258851..5a365bd19933 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c
@@ -732,7 +732,7 @@ char *dce_version_to_string(const int version)
case DCN_VERSION_3_03:
return "DCN 3.0.3";
case DCN_VERSION_3_1:
- return "DCN 3.1";
+ return "DCN 3.1.2";
case DCN_VERSION_3_14:
return "DCN 3.1.4";
case DCN_VERSION_3_15:
@@ -755,3 +755,8 @@ char *dce_version_to_string(const int version)
return "Unknown";
}
}
+
+bool dc_supports_vrr(const enum dce_version v)
+{
+ return v >= DCE_VERSION_8_0;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
index 7f57661433eb..55704d4457ef 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
@@ -128,7 +128,7 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl
spl_in->odm_slice_index = resource_get_odm_slice_index(pipe_ctx);
// Make spl input basic out info output_size width point to stream h active
spl_in->basic_out.output_size.width =
- stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right + pipe_ctx->hblank_borrow;
+ stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right + pipe_ctx->dsc_padding_params.dsc_hactive_padding;
// Make spl input basic out info output_size height point to v active
spl_in->basic_out.output_size.height =
stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index 5fc6fea211de..76cf9fdedab0 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -203,6 +203,7 @@ struct dc_stream_state {
struct dc_info_packet hfvsif_infopacket;
struct dc_info_packet vtem_infopacket;
struct dc_info_packet adaptive_sync_infopacket;
+ struct dc_info_packet avi_infopacket;
uint8_t dsc_packed_pps[128];
struct rect src; /* composition area */
struct rect dst; /* stream addressable area */
@@ -335,6 +336,8 @@ struct dc_stream_update {
struct dc_info_packet *hfvsif_infopacket;
struct dc_info_packet *vtem_infopacket;
struct dc_info_packet *adaptive_sync_infopacket;
+ struct dc_info_packet *avi_infopacket;
+
bool *dpms_off;
bool integer_scaling_update;
bool *allow_freesync;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 375ca2f13b7a..b5aa03a3e39c 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -563,6 +563,12 @@ struct dc_info_packet_128 {
uint8_t sb[128];
};
+struct dc_edid_read_policy {
+ uint32_t max_retry_count;
+ uint32_t delay_time_ms;
+ uint32_t ignore_checksum;
+};
+
#define DC_PLANE_UPDATE_TIMES_MAX 10
struct dc_plane_flip_time {
@@ -571,6 +577,12 @@ struct dc_plane_flip_time {
unsigned int prev_update_time_in_us;
};
+enum dc_alpm_mode {
+ DC_ALPM_AUXWAKE = 0,
+ DC_ALPM_AUXLESS = 1,
+ DC_ALPM_UNSUPPORTED = 0xF,
+};
+
enum dc_psr_state {
PSR_STATE0 = 0x0,
PSR_STATE1,
@@ -616,6 +628,7 @@ struct psr_config {
unsigned int line_time_in_us;
uint8_t rate_control_caps;
uint16_t dsc_slice_height;
+ bool os_request_force_ffu;
};
union dmcu_psr_level {
@@ -728,6 +741,7 @@ struct psr_context {
unsigned int line_time_in_us;
uint8_t rate_control_caps;
uint16_t dsc_slice_height;
+ bool os_request_force_ffu;
};
struct colorspace_transform {
@@ -1137,6 +1151,10 @@ struct replay_config {
bool low_rr_supported;
/* Replay Video Conferencing Optimization Enabled */
bool replay_video_conferencing_optimization_enabled;
+ /* Replay alpm mode */
+ enum dc_alpm_mode alpm_mode;
+ /* Replay full screen only */
+ bool os_request_force_ffu;
};
/* Replay feature flags*/
@@ -1199,6 +1217,7 @@ struct dc_panel_config {
bool rc_disable;
bool rc_allow_static_screen;
bool rc_allow_fullscreen_VPB;
+ bool read_psrcap_again;
unsigned int replay_enable_option;
} psr;
/* ABM */
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c
index 58c84f555c0f..de6d62401362 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c
@@ -39,6 +39,7 @@
#define CTX \
dccg_dcn->base.ctx
+#include "logger_types.h"
#define DC_LOGGER \
dccg->ctx->logger
@@ -133,30 +134,34 @@ enum dsc_clk_source {
};
-static void dccg35_set_dsc_clk_rcg(struct dccg *dccg, int inst, bool enable)
+static void dccg35_set_dsc_clk_rcg(struct dccg *dccg, int inst, bool allow_rcg)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
- if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dsc && enable)
+ if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dsc && allow_rcg)
return;
switch (inst) {
case 0:
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK0_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK0_ROOT_GATE_DISABLE, allow_rcg ? 0 : 1);
break;
case 1:
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK1_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK1_ROOT_GATE_DISABLE, allow_rcg ? 0 : 1);
break;
case 2:
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK2_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK2_ROOT_GATE_DISABLE, allow_rcg ? 0 : 1);
break;
case 3:
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK3_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK3_ROOT_GATE_DISABLE, allow_rcg ? 0 : 1);
break;
default:
BREAK_TO_DEBUGGER();
return;
}
+
+ /* Wait for clock to ramp */
+ if (!allow_rcg)
+ udelay(10);
}
static void dccg35_set_symclk32_se_rcg(
@@ -385,35 +390,34 @@ static void dccg35_set_dtbclk_p_rcg(struct dccg *dccg, int inst, bool enable)
}
}
-static void dccg35_set_dppclk_rcg(struct dccg *dccg,
- int inst, bool enable)
+static void dccg35_set_dppclk_rcg(struct dccg *dccg, int inst, bool allow_rcg)
{
-
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
-
- if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dpp && enable)
+ if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dpp && allow_rcg)
return;
switch (inst) {
case 0:
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK0_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK0_ROOT_GATE_DISABLE, allow_rcg ? 0 : 1);
break;
case 1:
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK1_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK1_ROOT_GATE_DISABLE, allow_rcg ? 0 : 1);
break;
case 2:
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK2_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK2_ROOT_GATE_DISABLE, allow_rcg ? 0 : 1);
break;
case 3:
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK3_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK3_ROOT_GATE_DISABLE, allow_rcg ? 0 : 1);
break;
default:
BREAK_TO_DEBUGGER();
break;
}
- //DC_LOG_DEBUG("%s: inst(%d) DPPCLK rcg_disable: %d\n", __func__, inst, enable ? 0 : 1);
+ /* Wait for clock to ramp */
+ if (!allow_rcg)
+ udelay(10);
}
static void dccg35_set_dpstreamclk_rcg(
@@ -1133,7 +1137,7 @@ static void dcn35_set_dppclk_enable(struct dccg *dccg,
default:
break;
}
- //DC_LOG_DEBUG("%s: dpp_inst(%d) DPPCLK_EN = %d\n", __func__, dpp_inst, enable);
+ DC_LOG_DEBUG("%s: dpp_inst(%d) DPPCLK_EN = %d\n", __func__, dpp_inst, enable);
}
@@ -1177,32 +1181,34 @@ static void dccg35_update_dpp_dto(struct dccg *dccg, int dpp_inst,
}
static void dccg35_set_dppclk_root_clock_gating(struct dccg *dccg,
- uint32_t dpp_inst, uint32_t enable)
+ uint32_t dpp_inst, uint32_t disallow_rcg)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
- if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dpp)
+ if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dpp && !disallow_rcg)
return;
switch (dpp_inst) {
case 0:
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK0_ROOT_GATE_DISABLE, enable);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK0_ROOT_GATE_DISABLE, disallow_rcg);
break;
case 1:
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK1_ROOT_GATE_DISABLE, enable);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK1_ROOT_GATE_DISABLE, disallow_rcg);
break;
case 2:
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK2_ROOT_GATE_DISABLE, enable);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK2_ROOT_GATE_DISABLE, disallow_rcg);
break;
case 3:
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK3_ROOT_GATE_DISABLE, enable);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK3_ROOT_GATE_DISABLE, disallow_rcg);
break;
default:
break;
}
- //DC_LOG_DEBUG("%s: dpp_inst(%d) rcg: %d\n", __func__, dpp_inst, enable);
+ /* Wait for clock to ramp */
+ if (disallow_rcg)
+ udelay(10);
}
static void dccg35_get_pixel_rate_div(
@@ -1401,6 +1407,10 @@ static void dccg35_set_dtbclk_dto(
* PIPEx_DTO_SRC_SEL should not be programmed during DTBCLK update since OTG may still be on, and the
* programming is handled in program_pix_clk() regardless, so it can be removed from here.
*/
+ DC_LOG_DEBUG("%s: OTG%d DTBCLK DTO enabled: pixclk_khz=%d, ref_dtbclk_khz=%d, req_dtbclk_khz=%d, phase=%d, modulo=%d\n",
+ __func__, params->otg_inst, params->pixclk_khz,
+ params->ref_dtbclk_khz, req_dtbclk_khz, phase, modulo);
+
} else {
switch (params->otg_inst) {
case 0:
@@ -1426,6 +1436,8 @@ static void dccg35_set_dtbclk_dto(
REG_WRITE(DTBCLK_DTO_MODULO[params->otg_inst], 0);
REG_WRITE(DTBCLK_DTO_PHASE[params->otg_inst], 0);
+
+ DC_LOG_DEBUG("%s: OTG%d DTBCLK DTO disabled\n", __func__, params->otg_inst);
}
}
@@ -1470,6 +1482,8 @@ static void dccg35_set_dpstreamclk(
BREAK_TO_DEBUGGER();
return;
}
+ DC_LOG_DEBUG("%s: dp_hpo_inst(%d) DPSTREAMCLK_EN = %d, DPSTREAMCLK_SRC_SEL = %d\n",
+ __func__, dp_hpo_inst, (src == REFCLK) ? 0 : 1, otg_inst);
}
@@ -1509,6 +1523,8 @@ static void dccg35_set_dpstreamclk_root_clock_gating(
BREAK_TO_DEBUGGER();
return;
}
+ DC_LOG_DEBUG("%s: dp_hpo_inst(%d) DPSTREAMCLK_ROOT_GATE_DISABLE = %d\n",
+ __func__, dp_hpo_inst, enable ? 1 : 0);
}
@@ -1548,7 +1564,7 @@ static void dccg35_set_physymclk_root_clock_gating(
BREAK_TO_DEBUGGER();
return;
}
- //DC_LOG_DEBUG("%s: dpp_inst(%d) PHYESYMCLK_ROOT_GATE_DISABLE:\n", __func__, phy_inst, enable ? 0 : 1);
+ DC_LOG_DEBUG("%s: dpp_inst(%d) PHYESYMCLK_ROOT_GATE_DISABLE: %d\n", __func__, phy_inst, enable ? 0 : 1);
}
@@ -1621,6 +1637,8 @@ static void dccg35_set_physymclk(
BREAK_TO_DEBUGGER();
return;
}
+ DC_LOG_DEBUG("%s: phy_inst(%d) PHYxSYMCLK_EN = %d, PHYxSYMCLK_SRC_SEL = %d\n",
+ __func__, phy_inst, force_enable ? 1 : 0, clk_src);
}
static void dccg35_set_valid_pixel_rate(
@@ -1668,6 +1686,7 @@ static void dccg35_dpp_root_clock_control(
}
dccg->dpp_clock_gated[dpp_inst] = !clock_on;
+ DC_LOG_DEBUG("%s: dpp_inst(%d) clock_on = %d\n", __func__, dpp_inst, clock_on);
}
static void dccg35_disable_symclk32_se(
@@ -1726,6 +1745,7 @@ static void dccg35_disable_symclk32_se(
BREAK_TO_DEBUGGER();
return;
}
+
}
static void dccg35_init_cb(struct dccg *dccg)
@@ -1733,7 +1753,6 @@ static void dccg35_init_cb(struct dccg *dccg)
(void)dccg;
/* Any RCG should be done when driver enter low power mode*/
}
-
void dccg35_init(struct dccg *dccg)
{
int otg_inst;
@@ -1748,6 +1767,8 @@ void dccg35_init(struct dccg *dccg)
for (otg_inst = 0; otg_inst < 2; otg_inst++) {
dccg31_disable_symclk32_le(dccg, otg_inst);
dccg31_set_symclk32_le_root_clock_gating(dccg, otg_inst, false);
+ DC_LOG_DEBUG("%s: OTG%d SYMCLK32_LE disabled and root clock gating disabled\n",
+ __func__, otg_inst);
}
// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
@@ -1760,6 +1781,8 @@ void dccg35_init(struct dccg *dccg)
dccg35_set_dpstreamclk(dccg, REFCLK, otg_inst,
otg_inst);
dccg35_set_dpstreamclk_root_clock_gating(dccg, otg_inst, false);
+ DC_LOG_DEBUG("%s: OTG%d DPSTREAMCLK disabled and root clock gating disabled\n",
+ __func__, otg_inst);
}
/*
@@ -1782,8 +1805,7 @@ static void dccg35_enable_dscclk(struct dccg *dccg, int inst)
//Disable DTO
switch (inst) {
case 0:
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK0_ROOT_GATE_DISABLE, 1);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK0_ROOT_GATE_DISABLE, 1);
REG_UPDATE_2(DSCCLK0_DTO_PARAM,
DSCCLK0_DTO_PHASE, 0,
@@ -1791,8 +1813,7 @@ static void dccg35_enable_dscclk(struct dccg *dccg, int inst)
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK0_EN, 1);
break;
case 1:
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK1_ROOT_GATE_DISABLE, 1);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK1_ROOT_GATE_DISABLE, 1);
REG_UPDATE_2(DSCCLK1_DTO_PARAM,
DSCCLK1_DTO_PHASE, 0,
@@ -1800,8 +1821,7 @@ static void dccg35_enable_dscclk(struct dccg *dccg, int inst)
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK1_EN, 1);
break;
case 2:
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK2_ROOT_GATE_DISABLE, 1);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK2_ROOT_GATE_DISABLE, 1);
REG_UPDATE_2(DSCCLK2_DTO_PARAM,
DSCCLK2_DTO_PHASE, 0,
@@ -1809,8 +1829,7 @@ static void dccg35_enable_dscclk(struct dccg *dccg, int inst)
REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK2_EN, 1);
break;
case 3:
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK3_ROOT_GATE_DISABLE, 1);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK3_ROOT_GATE_DISABLE, 1);
REG_UPDATE_2(DSCCLK3_DTO_PARAM,
DSCCLK3_DTO_PHASE, 0,
@@ -1821,6 +1840,9 @@ static void dccg35_enable_dscclk(struct dccg *dccg, int inst)
BREAK_TO_DEBUGGER();
return;
}
+
+ /* Wait for clock to ramp */
+ udelay(10);
}
static void dccg35_disable_dscclk(struct dccg *dccg,
@@ -1864,6 +1886,9 @@ static void dccg35_disable_dscclk(struct dccg *dccg,
default:
return;
}
+
+ /* Wait for clock ramp */
+ udelay(10);
}
static void dccg35_enable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, uint32_t link_enc_inst)
@@ -2349,10 +2374,7 @@ static void dccg35_disable_symclk_se_cb(
void dccg35_root_gate_disable_control(struct dccg *dccg, uint32_t pipe_idx, uint32_t disable_clock_gating)
{
-
- if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp) {
- dccg35_set_dppclk_root_clock_gating(dccg, pipe_idx, disable_clock_gating);
- }
+ dccg35_set_dppclk_root_clock_gating(dccg, pipe_idx, disable_clock_gating);
}
static const struct dccg_funcs dccg35_funcs_new = {
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
index 668ee2d405fd..0b8ed9b94d3c 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
@@ -619,7 +619,7 @@ void dccg401_set_dp_dto(
dto_integer = div_u64(params->pixclk_hz, dto_modulo_hz);
dto_phase_hz = params->pixclk_hz - dto_integer * dto_modulo_hz;
- if (dto_phase_hz <= 0) {
+ if (dto_phase_hz <= 0 && dto_integer <= 0) {
/* negative pixel rate should never happen */
BREAK_TO_DEBUGGER();
return;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index bb4ac5042c80..673bb87d2c17 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -725,14 +725,18 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
for (i = 0; i < AUX_MAX_RETRIES; i++) {
DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
LOG_FLAG_I2cAux_DceAux,
- "dce_aux_transfer_with_retries: link_index=%u: START: retry %d of %d: address=0x%04x length=%u write=%d mot=%d",
+ "dce_aux_transfer_with_retries: link_index=%u: START: retry %d of %d: "
+ "address=0x%04x length=%u write=%d mot=%d is_i2c=%d is_dpia=%d ddc_hw_inst=%d",
ddc && ddc->link ? ddc->link->link_index : UINT_MAX,
i + 1,
(int)AUX_MAX_RETRIES,
payload->address,
payload->length,
(unsigned int) payload->write,
- (unsigned int) payload->mot);
+ (unsigned int) payload->mot,
+ payload->i2c_over_aux,
+ (ddc->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) ? true : false,
+ ddc->link->ddc_hw_inst);
if (payload->write)
dce_aux_log_payload(" write", payload->data, payload->length, 16);
@@ -746,7 +750,9 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
LOG_FLAG_I2cAux_DceAux,
- "dce_aux_transfer_with_retries: link_index=%u: END: retry %d of %d: address=0x%04x length=%u write=%d mot=%d: ret=%d operation_result=%d payload->reply=%u",
+ "dce_aux_transfer_with_retries: link_index=%u: END: retry %d of %d: "
+ "address=0x%04x length=%u write=%d mot=%d: ret=%d operation_result=%d "
+ "payload->reply=%u is_i2c=%d is_dpia=%d ddc_hw_inst=%d",
ddc && ddc->link ? ddc->link->link_index : UINT_MAX,
i + 1,
(int)AUX_MAX_RETRIES,
@@ -756,7 +762,10 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
(unsigned int) payload->mot,
ret,
(int)operation_result,
- (unsigned int) *payload->reply);
+ (unsigned int) *payload->reply,
+ payload->i2c_over_aux,
+ (ddc->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) ? true : false,
+ ddc->link->ddc_hw_inst);
if (!payload->write)
dce_aux_log_payload(" read", payload->data, ret > 0 ? ret : 0, 16);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
index 0421b267a0b5..365dd2e37aea 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
@@ -591,7 +591,7 @@ static bool dce_i2c_hw_engine_submit_payload(struct dce_i2c_hw *dce_i2c_hw,
DCE_I2C_TRANSACTION_ACTION_I2C_WRITE;
- request.address = (uint8_t) ((payload->address << 1) | !payload->write);
+ request.address = (uint8_t) ((payload->address << 1) | (payload->write ? 0 : 1));
request.length = payload->length;
request.data = payload->data;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c
index e188447c8156..2d73b94c515c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c
@@ -451,7 +451,7 @@ static bool dce_i2c_sw_engine_submit_payload(struct dce_i2c_sw *engine,
DCE_I2C_TRANSACTION_ACTION_I2C_WRITE_MOT :
DCE_I2C_TRANSACTION_ACTION_I2C_WRITE;
- request.address = (uint8_t) ((payload->address << 1) | !payload->write);
+ request.address = (uint8_t) ((payload->address << 1) | (payload->write ? 0 : 1));
request.length = payload->length;
request.data = payload->data;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
index 4a9d07c31bc5..0c50fe266c8a 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
@@ -896,13 +896,13 @@ void dce110_link_encoder_construct(
enc110->base.id, &bp_cap_info);
/* Override features with DCE-specific values */
- if (BP_RESULT_OK == result) {
+ if (result == BP_RESULT_OK) {
enc110->base.features.flags.bits.IS_HBR2_CAPABLE =
bp_cap_info.DP_HBR2_EN;
enc110->base.features.flags.bits.IS_HBR3_CAPABLE =
bp_cap_info.DP_HBR3_EN;
enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
- } else {
+ } else if (result != BP_RESULT_NORECORD) {
DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
__func__,
result);
@@ -1798,13 +1798,13 @@ void dce60_link_encoder_construct(
enc110->base.id, &bp_cap_info);
/* Override features with DCE-specific values */
- if (BP_RESULT_OK == result) {
+ if (result == BP_RESULT_OK) {
enc110->base.features.flags.bits.IS_HBR2_CAPABLE =
bp_cap_info.DP_HBR2_EN;
enc110->base.features.flags.bits.IS_HBR3_CAPABLE =
bp_cap_info.DP_HBR3_EN;
enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
- } else {
+ } else if (result != BP_RESULT_NORECORD) {
DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
__func__,
result);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
index 2b1673d69ea8..1ab5ae9b5ea5 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
@@ -154,10 +154,13 @@ static bool dce60_setup_scaling_configuration(
REG_SET(SCL_BYPASS_CONTROL, 0, SCL_BYPASS_MODE, 0);
if (data->taps.h_taps + data->taps.v_taps <= 2) {
- /* Set bypass */
-
- /* DCE6 has no SCL_MODE register, skip scale mode programming */
+ /* Disable scaler functionality */
+ REG_WRITE(SCL_SCALER_ENABLE, 0);
+ /* Clear registers that can cause glitches even when the scaler is off */
+ REG_WRITE(SCL_TAP_CONTROL, 0);
+ REG_WRITE(SCL_AUTOMATIC_MODE_CONTROL, 0);
+ REG_WRITE(SCL_F_SHARP_CONTROL, 0);
return false;
}
@@ -165,7 +168,7 @@ static bool dce60_setup_scaling_configuration(
SCL_H_NUM_OF_TAPS, data->taps.h_taps - 1,
SCL_V_NUM_OF_TAPS, data->taps.v_taps - 1);
- /* DCE6 has no SCL_MODE register, skip scale mode programming */
+ REG_WRITE(SCL_SCALER_ENABLE, 1);
/* DCE6 has no SCL_BOUNDARY_MODE bit, skip replace out of bound pixels */
@@ -502,6 +505,8 @@ static void dce60_transform_set_scaler(
REG_SET(DC_LB_MEM_SIZE, 0,
DC_LB_MEM_SIZE, xfm_dce->lb_memory_size);
+ REG_WRITE(SCL_UPDATE, 0x00010000);
+
/* Clear SCL_F_SHARP_CONTROL value to 0 */
REG_WRITE(SCL_F_SHARP_CONTROL, 0);
@@ -527,8 +532,7 @@ static void dce60_transform_set_scaler(
if (coeffs_v != xfm_dce->filter_v || coeffs_h != xfm_dce->filter_h) {
/* 4. Program vertical filters */
if (xfm_dce->filter_v == NULL)
- REG_SET(SCL_VERT_FILTER_CONTROL, 0,
- SCL_V_2TAP_HARDCODE_COEF_EN, 0);
+ REG_WRITE(SCL_VERT_FILTER_CONTROL, 0);
program_multi_taps_filter(
xfm_dce,
data->taps.v_taps,
@@ -542,8 +546,7 @@ static void dce60_transform_set_scaler(
/* 5. Program horizontal filters */
if (xfm_dce->filter_h == NULL)
- REG_SET(SCL_HORZ_FILTER_CONTROL, 0,
- SCL_H_2TAP_HARDCODE_COEF_EN, 0);
+ REG_WRITE(SCL_HORZ_FILTER_CONTROL, 0);
program_multi_taps_filter(
xfm_dce,
data->taps.h_taps,
@@ -566,6 +569,8 @@ static void dce60_transform_set_scaler(
/* DCE6 has no SCL_COEF_UPDATE_COMPLETE bit to flip to new coefficient memory */
/* DCE6 DATA_FORMAT register does not support ALPHA_EN */
+
+ REG_WRITE(SCL_UPDATE, 0);
}
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h
index cbce194ec7b8..eb716e8337e2 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h
@@ -155,6 +155,9 @@
SRI(SCL_COEF_RAM_TAP_DATA, SCL, id), \
SRI(VIEWPORT_START, SCL, id), \
SRI(VIEWPORT_SIZE, SCL, id), \
+ SRI(SCL_SCALER_ENABLE, SCL, id), \
+ SRI(SCL_HORZ_FILTER_INIT_RGB_LUMA, SCL, id), \
+ SRI(SCL_HORZ_FILTER_INIT_CHROMA, SCL, id), \
SRI(SCL_HORZ_FILTER_SCALE_RATIO, SCL, id), \
SRI(SCL_VERT_FILTER_SCALE_RATIO, SCL, id), \
SRI(SCL_VERT_FILTER_INIT, SCL, id), \
@@ -590,6 +593,7 @@ struct dce_transform_registers {
uint32_t SCL_VERT_FILTER_SCALE_RATIO;
uint32_t SCL_HORZ_FILTER_INIT;
#if defined(CONFIG_DRM_AMD_DC_SI)
+ uint32_t SCL_SCALER_ENABLE;
uint32_t SCL_HORZ_FILTER_INIT_RGB_LUMA;
uint32_t SCL_HORZ_FILTER_INIT_CHROMA;
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
index ff3b8244ba3d..87af4fdc04a6 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
@@ -391,7 +391,7 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
sizeof(DP_SINK_DEVICE_STR_ID_1)))
link->psr_settings.force_ffu_mode = 1;
- copy_settings_data->force_ffu_mode = link->psr_settings.force_ffu_mode;
+ copy_settings_data->force_ffu_mode = link->psr_settings.force_ffu_mode || psr_context->os_request_force_ffu;
if (((link->dpcd_caps.fec_cap.bits.FEC_CAPABLE &&
!link->dc->debug.disable_fec) &&
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
index e7a318e26d38..f9542edff14b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
@@ -3,8 +3,8 @@
// Copyright 2024 Advanced Micro Devices, Inc.
#include "dc.h"
+#include "link_service.h"
#include "dc_dmub_srv.h"
-#include "dc_dp_types.h"
#include "dmub/dmub_srv.h"
#include "core_types.h"
#include "dmub_replay.h"
@@ -44,45 +44,21 @@ static void dmub_replay_get_state(struct dmub_replay *dmub, enum replay_state *s
/*
* Enable/Disable Replay.
*/
-static void dmub_replay_enable(struct dmub_replay *dmub, bool enable, bool wait, uint8_t panel_inst,
- struct dc_link *link)
+static void dmub_replay_enable(struct dmub_replay *dmub, bool enable, bool wait, uint8_t panel_inst)
{
union dmub_rb_cmd cmd;
struct dc_context *dc = dmub->ctx;
uint32_t retry_count;
enum replay_state state = REPLAY_STATE_0;
- struct pipe_ctx *pipe_ctx = NULL;
- struct resource_context *res_ctx = &link->ctx->dc->current_state->res_ctx;
- uint8_t i;
memset(&cmd, 0, sizeof(cmd));
cmd.replay_enable.header.type = DMUB_CMD__REPLAY;
cmd.replay_enable.data.panel_inst = panel_inst;
cmd.replay_enable.header.sub_type = DMUB_CMD__REPLAY_ENABLE;
- if (enable) {
+ if (enable)
cmd.replay_enable.data.enable = REPLAY_ENABLE;
- // hpo stream/link encoder assignments are not static, need to update everytime we try to enable replay
- if (link->cur_link_settings.link_rate >= LINK_RATE_UHBR10) {
- for (i = 0; i < MAX_PIPES; i++) {
- if (res_ctx &&
- res_ctx->pipe_ctx[i].stream &&
- res_ctx->pipe_ctx[i].stream->link &&
- res_ctx->pipe_ctx[i].stream->link == link &&
- res_ctx->pipe_ctx[i].stream->link->connector_signal == SIGNAL_TYPE_EDP) {
- pipe_ctx = &res_ctx->pipe_ctx[i];
- //TODO: refactor for multi edp support
- break;
- }
- }
-
- if (!pipe_ctx)
- return;
-
- cmd.replay_enable.data.hpo_stream_enc_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
- cmd.replay_enable.data.hpo_link_enc_inst = pipe_ctx->link_res.hpo_dp_link_enc->inst;
- }
- } else
+ else
cmd.replay_enable.data.enable = REPLAY_DISABLE;
cmd.replay_enable.header.payload_bytes = sizeof(struct dmub_rb_cmd_replay_enable_data);
@@ -174,17 +150,6 @@ static bool dmub_replay_copy_settings(struct dmub_replay *dmub,
copy_settings_data->digbe_inst = replay_context->digbe_inst;
copy_settings_data->digfe_inst = replay_context->digfe_inst;
- if (link->cur_link_settings.link_rate >= LINK_RATE_UHBR10) {
- if (pipe_ctx->stream_res.hpo_dp_stream_enc)
- copy_settings_data->hpo_stream_enc_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
- else
- copy_settings_data->hpo_stream_enc_inst = 0;
- if (pipe_ctx->link_res.hpo_dp_link_enc)
- copy_settings_data->hpo_link_enc_inst = pipe_ctx->link_res.hpo_dp_link_enc->inst;
- else
- copy_settings_data->hpo_link_enc_inst = 0;
- }
-
if (pipe_ctx->plane_res.dpp)
copy_settings_data->dpp_inst = pipe_ctx->plane_res.dpp->inst;
else
@@ -204,6 +169,7 @@ static bool dmub_replay_copy_settings(struct dmub_replay *dmub,
copy_settings_data->max_deviation_line = link->dpcd_caps.pr_info.max_deviation_line;
copy_settings_data->smu_optimizations_en = link->replay_settings.replay_smu_opt_enable;
copy_settings_data->replay_timing_sync_supported = link->replay_settings.config.replay_timing_sync_supported;
+ copy_settings_data->replay_support_fast_resync_in_ultra_sleep_mode = link->replay_settings.config.replay_support_fast_resync_in_ultra_sleep_mode;
copy_settings_data->debug.bitfields.enable_ips_visual_confirm = dc->dc->debug.enable_ips_visual_confirm;
@@ -225,6 +191,18 @@ static bool dmub_replay_copy_settings(struct dmub_replay *dmub,
else
copy_settings_data->flags.bitfields.force_wakeup_by_tps3 = 0;
+ copy_settings_data->flags.bitfields.alpm_mode = (enum dmub_alpm_mode)link->replay_settings.config.alpm_mode;
+ if (link->replay_settings.config.alpm_mode == DC_ALPM_AUXLESS) {
+ copy_settings_data->auxless_alpm_data.lfps_setup_ns = dc->dc->debug.auxless_alpm_lfps_setup_ns;
+ copy_settings_data->auxless_alpm_data.lfps_period_ns = dc->dc->debug.auxless_alpm_lfps_period_ns;
+ copy_settings_data->auxless_alpm_data.lfps_silence_ns = dc->dc->debug.auxless_alpm_lfps_silence_ns;
+ copy_settings_data->auxless_alpm_data.lfps_t1_t2_override_us =
+ dc->dc->debug.auxless_alpm_lfps_t1t2_us;
+ copy_settings_data->auxless_alpm_data.lfps_t1_t2_offset_us =
+ dc->dc->debug.auxless_alpm_lfps_t1t2_offset_us;
+ copy_settings_data->auxless_alpm_data.lttpr_count = link->dc->link_srv->dp_get_lttpr_count(link);
+ }
+
dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
return true;
@@ -247,7 +225,6 @@ static void dmub_replay_set_coasting_vtotal(struct dmub_replay *dmub,
pCmd->header.type = DMUB_CMD__REPLAY;
pCmd->header.sub_type = DMUB_CMD__REPLAY_SET_COASTING_VTOTAL;
pCmd->header.payload_bytes = sizeof(struct dmub_cmd_replay_set_coasting_vtotal_data);
- pCmd->replay_set_coasting_vtotal_data.panel_inst = panel_inst;
pCmd->replay_set_coasting_vtotal_data.coasting_vtotal = (coasting_vtotal & 0xFFFF);
pCmd->replay_set_coasting_vtotal_data.coasting_vtotal_high = (coasting_vtotal & 0xFFFF0000) >> 16;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h
index ccbe385e132c..e6346c0ffc0e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h
@@ -19,7 +19,7 @@ struct dmub_replay_funcs {
void (*replay_get_state)(struct dmub_replay *dmub, enum replay_state *state,
uint8_t panel_inst);
void (*replay_enable)(struct dmub_replay *dmub, bool enable, bool wait,
- uint8_t panel_inst, struct dc_link *link);
+ uint8_t panel_inst);
bool (*replay_copy_settings)(struct dmub_replay *dmub, struct dc_link *link,
struct replay_context *replay_context, uint8_t panel_inst);
void (*replay_set_power_opt)(struct dmub_replay *dmub, unsigned int power_opt,
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_link_encoder.c
index e0558a78b11c..1c1228116487 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_link_encoder.c
@@ -812,7 +812,7 @@ bool dcn10_link_encoder_validate_output_with_stream(
enc10, &stream->timing);
break;
case SIGNAL_TYPE_EDP:
- is_valid = (stream->timing.pixel_encoding == PIXEL_ENCODING_RGB) ? true : false;
+ is_valid = stream->timing.pixel_encoding == PIXEL_ENCODING_RGB;
break;
case SIGNAL_TYPE_VIRTUAL:
is_valid = true;
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c
index 22e66b375a7f..d928b4dcf6b8 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c
@@ -28,7 +28,7 @@
#include "dcn10_stream_encoder.h"
#include "reg_helper.h"
#include "hw_shared.h"
-#include "link.h"
+#include "link_service.h"
#include "dpcd_defs.h"
#include "dcn30/dcn30_afmt.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn20/dcn20_stream_encoder.c
index 0b47aeb60e79..bec0b4aaeb2b 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn20/dcn20_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn20/dcn20_stream_encoder.c
@@ -29,7 +29,7 @@
#include "dcn20_stream_encoder.h"
#include "reg_helper.h"
#include "hw_shared.h"
-#include "link.h"
+#include "link_service.h"
#include "dpcd_defs.h"
#define DC_LOGGER \
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn31/dcn31_dio_link_encoder.c
index 9a92f73d5b7f..84cc2ddc52fe 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn31/dcn31_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn31/dcn31_dio_link_encoder.c
@@ -37,7 +37,7 @@
#include "link_enc_cfg.h"
#include "dc_dmub_srv.h"
#include "dal_asic_id.h"
-#include "link.h"
+#include "link_service.h"
#define CTX \
enc10->base.ctx
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c
index ae81451a3a72..3e85e9c3d2cb 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c
@@ -30,7 +30,7 @@
#include "dcn314_dio_stream_encoder.h"
#include "reg_helper.h"
#include "hw_shared.h"
-#include "link.h"
+#include "link_service.h"
#include "dpcd_defs.h"
#define DC_LOGGER \
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn32/dcn32_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn32/dcn32_dio_stream_encoder.c
index 1a9bb614c41e..3523d1cdc1a3 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn32/dcn32_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn32/dcn32_dio_stream_encoder.c
@@ -29,7 +29,7 @@
#include "dcn32_dio_stream_encoder.h"
#include "reg_helper.h"
#include "hw_shared.h"
-#include "link.h"
+#include "link_service.h"
#include "dpcd_defs.h"
#define DC_LOGGER \
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.c
index 6ab2a218b769..fd5d1dbf9dc6 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.c
@@ -29,7 +29,7 @@
#include "dcn35_dio_stream_encoder.h"
#include "reg_helper.h"
#include "hw_shared.h"
-#include "link.h"
+#include "link_service.h"
#include "dpcd_defs.h"
#define DC_LOGGER \
@@ -397,7 +397,7 @@ static bool enc35_is_fifo_enabled(struct stream_encoder *enc)
uint32_t reset_val;
REG_GET(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, &reset_val);
- return (reset_val == 0) ? false : true;
+ return reset_val != 0;
}
void enc35_disable_fifo(struct stream_encoder *enc)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c
index d5fa551dd3c9..99aab70ef3e1 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c
@@ -32,7 +32,7 @@
#include "dcn401_dio_stream_encoder.h"
#include "reg_helper.h"
#include "hw_shared.h"
-#include "link.h"
+#include "link_service.h"
#include "dpcd_defs.h"
#define DC_LOGGER \
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h
index 7b9c22c45453..fbbf9c757b3c 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services.h
@@ -277,12 +277,13 @@ void dm_perf_trace_timestamp(const char *func_name, unsigned int line, struct dc
/*
* SMU message tracing
*/
-void dm_trace_smu_msg(uint32_t msg_id, uint32_t param_in, struct dc_context *ctx);
-void dm_trace_smu_delay(uint32_t delay, struct dc_context *ctx);
-
-#define TRACE_SMU_MSG(msg_id, param_in, ctx) dm_trace_smu_msg(msg_id, param_in, ctx)
-#define TRACE_SMU_DELAY(response_delay, ctx) dm_trace_smu_delay(response_delay, ctx)
+void dm_trace_smu_enter(uint32_t msg_id, uint32_t param_in, unsigned int delay, struct dc_context *ctx);
+void dm_trace_smu_exit(bool success, uint32_t response, struct dc_context *ctx);
+#define TRACE_SMU_MSG_DELAY(msg_id, param_in, delay, ctx) dm_trace_smu_enter(msg_id, param_in, delay, ctx)
+#define TRACE_SMU_MSG(msg_id, param_in, ctx) dm_trace_smu_enter(msg_id, param_in, 0, ctx)
+#define TRACE_SMU_MSG_ENTER(msg_id, param_in, ctx) dm_trace_smu_enter(msg_id, param_in, 0, ctx)
+#define TRACE_SMU_MSG_EXIT(success, response, ctx) dm_trace_smu_exit(success, response, ctx)
/*
* DMUB Interfaces
@@ -311,4 +312,6 @@ void dm_dtn_log_end(struct dc_context *ctx,
char *dce_version_to_string(const int version);
+bool dc_supports_vrr(const enum dce_version v);
+
#endif /* __DM_SERVICES_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services_types.h b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
index bf63da266a18..3b093b8699ab 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
@@ -127,7 +127,7 @@ struct dm_pp_single_disp_config {
uint32_t src_height;
uint32_t src_width;
uint32_t v_refresh;
- uint32_t sym_clock; /* HDMI only */
+ uint32_t pixel_clock; /* Pixel clock in KHz (for HDMI only: normalized) */
struct dc_link_settings link_settings; /* DP only */
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
index 2a2eaf6adf26..7aaf13bbd4e4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
@@ -30,8 +30,7 @@
#include "dcn20/dcn20_resource.h"
#include "dcn21/dcn21_resource.h"
#include "clk_mgr/dcn21/rn_clk_mgr.h"
-
-#include "link.h"
+#include "link_service.h"
#include "dcn20_fpu.h"
#include "dc_state_priv.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
index 390c1a77fda6..9c58ff1069d6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
@@ -646,7 +646,7 @@ static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib,
// the dpte_group_bytes is reduced for the specific case of vertical
// access of a tile surface that has dpte request of 8x1 ptes.
- if (!surf_linear & (log2_dpte_req_height_ptes == 0) & surf_vert) //reduced, in this case, will have page fault within a group
+ if (!surf_linear && (log2_dpte_req_height_ptes == 0) && surf_vert) //reduced, in this case, will have page fault within a group
rq_sizing_param->dpte_group_bytes = 512;
else
//full size
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
index 843d6004258c..570e6e39eb45 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
@@ -646,7 +646,7 @@ static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib,
// the dpte_group_bytes is reduced for the specific case of vertical
// access of a tile surface that has dpte request of 8x1 ptes.
- if (!surf_linear & (log2_dpte_req_height_ptes == 0) & surf_vert) //reduced, in this case, will have page fault within a group
+ if (!surf_linear && (log2_dpte_req_height_ptes == 0) && surf_vert) //reduced, in this case, will have page fault within a group
rq_sizing_param->dpte_group_bytes = 512;
else
//full size
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
index 5718000627b0..f549da082c01 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
@@ -652,7 +652,7 @@ static void get_meta_and_pte_attr(
if (hostvm_enable)
rq_sizing_param->dpte_group_bytes = 512;
else {
- if (!surf_linear & (log2_dpte_req_height_ptes == 0) & surf_vert) //reduced, in this case, will have page fault within a group
+ if (!surf_linear && (log2_dpte_req_height_ptes == 0) && surf_vert) //reduced, in this case, will have page fault within a group
rq_sizing_param->dpte_group_bytes = 512;
else
//full size
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
index 8d4873f80df0..4fb37df54d59 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
@@ -620,7 +620,7 @@ static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib,
if (hostvm_enable)
rq_sizing_param->dpte_group_bytes = 512;
else {
- if (!surf_linear & (log2_dpte_req_height_ptes == 0) & surf_vert) //reduced, in this case, will have page fault within a group
+ if (!surf_linear && (log2_dpte_req_height_ptes == 0) && surf_vert) //reduced, in this case, will have page fault within a group
rq_sizing_param->dpte_group_bytes = 512;
else
rq_sizing_param->dpte_group_bytes = 2048;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
index 0c0b2d67c9cd..1aaa77265eed 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
@@ -326,7 +326,7 @@ void dcn301_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_p
struct dcn301_resource_pool *pool = TO_DCN301_RES_POOL(dc->res_pool);
struct clk_limit_table *clk_table = &bw_params->clk_table;
unsigned int i, closest_clk_lvl;
- int j;
+ int j = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0;
dc_assert_fp_enabled();
@@ -338,6 +338,15 @@ void dcn301_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_p
dcn3_01_soc.num_chans = bw_params->num_channels;
ASSERT(clk_table->num_entries);
+
+ /* Prepass to find max clocks independent of voltage level. */
+ for (i = 0; i < clk_table->num_entries; ++i) {
+ if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz)
+ max_dispclk_mhz = clk_table->entries[i].dispclk_mhz;
+ if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz)
+ max_dppclk_mhz = clk_table->entries[i].dppclk_mhz;
+ }
+
for (i = 0; i < clk_table->num_entries; i++) {
/* loop backwards*/
for (closest_clk_lvl = 0, j = dcn3_01_soc.num_states - 1; j >= 0; j--) {
@@ -353,8 +362,13 @@ void dcn301_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_p
s[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
s[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2;
- s[i].dispclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
- s[i].dppclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
+ /* Clocks independent of voltage level. */
+ s[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz :
+ dcn3_01_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
+
+ s[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz :
+ dcn3_01_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
+
s[i].dram_bw_per_chan_gbps =
dcn3_01_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
s[i].dscclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
@@ -435,12 +449,12 @@ void dcn301_fpu_calculate_wm_and_dlg(struct dc *dc,
&context->bw_ctx.dml, pipes, pipe_cnt);
/* WM Set C */
table_entry = &bw_params->wm_table.entries[WM_C];
- vlevel = min(max(vlevel_req, 2), vlevel_max);
+ vlevel = clamp(vlevel_req, 2, vlevel_max);
calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.c,
&context->bw_ctx.dml, pipes, pipe_cnt);
/* WM Set B */
table_entry = &bw_params->wm_table.entries[WM_B];
- vlevel = min(max(vlevel_req, 1), vlevel_max);
+ vlevel = clamp(vlevel_req, 1, vlevel_max);
calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.b,
&context->bw_ctx.dml, pipes, pipe_cnt);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.c
index 8da97a96b1ce..8d7c59ec701d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.c
@@ -280,7 +280,7 @@ void dcn302_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_p
j = 0;
/* create the final dcfclk and uclk table */
while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) {
- if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) {
+ if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) {
dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
} else {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c
index e968870a4b81..b5d3fd4c3694 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c
@@ -285,7 +285,7 @@ void dcn303_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_p
j = 0;
/* create the final dcfclk and uclk table */
while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) {
- if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) {
+ if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) {
dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
} else {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
index 17a21bcbde17..1a28061bb9ff 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
@@ -808,6 +808,8 @@ void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
int dcn_get_max_non_odm_pix_rate_100hz(struct _vcs_dpi_soc_bounding_box_st *soc)
{
+ dc_assert_fp_enabled();
+
return soc->clock_limits[0].dispclk_mhz * 10000.0 / (1.0 + soc->dcn_downspread_percent / 100.0);
}
@@ -815,6 +817,8 @@ int dcn_get_approx_det_segs_required_for_pstate(
struct _vcs_dpi_soc_bounding_box_st *soc,
int pix_clk_100hz, int bpp, int seg_size_kb)
{
+ dc_assert_fp_enabled();
+
/* Roughly calculate required crb to hide latency. In practice there is slightly
* more buffer available for latency hiding
*/
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
index c46bda2141ac..bfeb01477f0c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
@@ -615,7 +615,7 @@ static void get_meta_and_pte_attr(
if (hostvm_enable)
rq_sizing_param->dpte_group_bytes = 512;
else {
- if (!surf_linear & (log2_dpte_req_height_ptes == 0) & surf_vert) //reduced, in this case, will have page fault within a group
+ if (!surf_linear && (log2_dpte_req_height_ptes == 0) && surf_vert) //reduced, in this case, will have page fault within a group
rq_sizing_param->dpte_group_bytes = 512;
else
rq_sizing_param->dpte_group_bytes = 2048;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c
index b7d2a0caec11..04df263ff65e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c
@@ -703,7 +703,7 @@ static void get_meta_and_pte_attr(
if (hostvm_enable)
rq_sizing_param->dpte_group_bytes = 512;
else {
- if (!surf_linear & (log2_dpte_req_height_ptes == 0) & surf_vert) //reduced, in this case, will have page fault within a group
+ if (!surf_linear && (log2_dpte_req_height_ptes == 0) && surf_vert) //reduced, in this case, will have page fault within a group
rq_sizing_param->dpte_group_bytes = 512;
else
rq_sizing_param->dpte_group_bytes = 2048;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index 6160952245b4..8a0f128722b0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -31,7 +31,7 @@
// We need this includes for WATERMARKS_* defines
#include "clk_mgr/dcn32/dcn32_smu13_driver_if.h"
#include "dcn30/dcn30_resource.h"
-#include "link.h"
+#include "link_service.h"
#include "dc_state_priv.h"
#define DC_LOGGER_INIT(logger)
@@ -3229,7 +3229,7 @@ void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_pa
j = 0;
// create the final dcfclk and uclk table
while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) {
- if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) {
+ if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) {
dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
} else {
@@ -3401,7 +3401,7 @@ bool dcn32_allow_subvp_with_active_margin(struct pipe_ctx *pipe)
uint32_t height = subvp_active_margin_list.res[i].height;
refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 +
- pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1);
+ (uint64_t)pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1);
refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total);
refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c
index 9ba6cb67655f..6c75aa82327a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c
@@ -139,7 +139,6 @@ void dml32_rq_dlg_get_rq_reg(display_rq_regs_st *rq_regs,
if (dual_plane) {
unsigned int p1_pte_row_height_linear = get_dpte_row_height_linear_c(mode_lib, e2e_pipe_param,
num_pipes, pipe_idx);
- ;
if (src->sw_mode == dm_sw_linear)
ASSERT(p1_pte_row_height_linear >= 8);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
index 8839faf42207..e0a1dc89ce43 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
@@ -779,7 +779,7 @@ void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p
j = 0;
// create the final dcfclk and uclk table
while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) {
- if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) {
+ if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) {
dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
} else {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
index 5d73efa2f0c9..817a370e80a7 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
@@ -31,7 +31,7 @@
#include "dml/dcn31/dcn31_fpu.h"
#include "dml/dml_inline_defs.h"
-#include "link.h"
+#include "link_service.h"
#define DC_LOGGER_INIT(logger)
@@ -445,6 +445,8 @@ int dcn35_populate_dml_pipes_from_context_fpu(struct dc *dc,
bool upscaled = false;
const unsigned int max_allowed_vblank_nom = 1023;
+ dc_assert_fp_enabled();
+
dcn31_populate_dml_pipes_from_context(dc, context, pipes,
validate_mode);
@@ -498,9 +500,7 @@ int dcn35_populate_dml_pipes_from_context_fpu(struct dc *dc,
pipes[pipe_cnt].pipe.src.unbounded_req_mode = false;
- DC_FP_START();
dcn31_zero_pipe_dcc_fraction(pipes, pipe_cnt);
- DC_FP_END();
pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
pipes[pipe_cnt].pipe.src.dcc_rate = 3;
@@ -581,6 +581,8 @@ void dcn35_decide_zstate_support(struct dc *dc, struct dc_state *context)
unsigned int i, plane_count = 0;
DC_LOGGER_INIT(dc->ctx->logger);
+ dc_assert_fp_enabled();
+
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (context->res_ctx.pipe_ctx[i].plane_state)
plane_count++;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
index 6f516af82956..77023b619f1e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
@@ -10,7 +10,7 @@
#include "dml/dcn35/dcn35_fpu.h"
#include "dml/dml_inline_defs.h"
-#include "link.h"
+#include "link_service.h"
#define DC_LOGGER_INIT(logger)
@@ -478,6 +478,8 @@ int dcn351_populate_dml_pipes_from_context_fpu(struct dc *dc,
bool upscaled = false;
const unsigned int max_allowed_vblank_nom = 1023;
+ dc_assert_fp_enabled();
+
dcn31_populate_dml_pipes_from_context(dc, context, pipes,
validate_mode);
@@ -531,9 +533,7 @@ int dcn351_populate_dml_pipes_from_context_fpu(struct dc *dc,
pipes[pipe_cnt].pipe.src.unbounded_req_mode = false;
- DC_FP_START();
dcn31_zero_pipe_dcc_fraction(pipes, pipe_cnt);
- DC_FP_END();
pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
pipes[pipe_cnt].pipe.src.dcc_rate = 3;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
index 715f9019a33e..4b9b2e84d381 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
@@ -6529,7 +6529,7 @@ static noinline_for_stack void dml_prefetch_check(struct display_mode_lib_st *mo
mode_lib->ms.TotImmediateFlipBytes = 0;
for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
if (!(mode_lib->ms.policy.ImmediateFlipRequirement[k] == dml_immediate_flip_not_required)) {
- mode_lib->ms.TotImmediateFlipBytes = mode_lib->ms.TotImmediateFlipBytes + mode_lib->ms.NoOfDPP[j][k] * mode_lib->ms.PDEAndMetaPTEBytesPerFrame[j][k] + mode_lib->ms.MetaRowBytes[j][k];
+ mode_lib->ms.TotImmediateFlipBytes = mode_lib->ms.TotImmediateFlipBytes + mode_lib->ms.NoOfDPP[j][k] * (mode_lib->ms.PDEAndMetaPTEBytesPerFrame[j][k] + mode_lib->ms.MetaRowBytes[j][k]);
if (mode_lib->ms.use_one_row_for_frame_flip[j][k]) {
mode_lib->ms.TotImmediateFlipBytes = mode_lib->ms.TotImmediateFlipBytes + mode_lib->ms.NoOfDPP[j][k] * (2 * mode_lib->ms.DPTEBytesPerRow[j][k]);
} else {
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
index a06217a9eef6..bf5e7f4e0416 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
@@ -8,7 +8,7 @@
#include "dml2_internal_types.h"
#include "dml21_utils.h"
#include "dml21_translation_helper.h"
-#include "bounding_boxes/dcn4_soc_bb.h"
+#include "soc_and_ip_translator.h"
static void dml21_populate_pmo_options(struct dml2_pmo_options *pmo_options,
const struct dc *in_dc,
@@ -38,375 +38,37 @@ static void dml21_populate_pmo_options(struct dml2_pmo_options *pmo_options,
pmo_options->disable_drr_clamped_when_var_active = in_dc->debug.disable_fams_gaming == INGAME_FAMS_DISABLE;
}
-/*
- * Populate dml_init based on default static values in soc bb. The default
- * values are for reference and support at least minimal operation of current
- * SoC and DCN hardware. The values could be modifed by subsequent override
- * functions to reflect our true hardware capability.
- */
-static void populate_default_dml_init_params(struct dml2_initialize_instance_in_out *dml_init,
- const struct dml2_configuration_options *config,
- const struct dc *in_dc)
+static enum dml2_project_id dml21_dcn_revision_to_dml2_project_id(enum dce_version dcn_version)
{
- switch (in_dc->ctx->dce_version) {
+ enum dml2_project_id project_id;
+ switch (dcn_version) {
case DCN_VERSION_4_01:
- dml_init->options.project_id = dml2_project_dcn4x_stage2_auto_drr_svp;
- dml21_populate_pmo_options(&dml_init->options.pmo_options, in_dc, config);
- dml_init->soc_bb = dml2_socbb_dcn401;
- dml_init->soc_bb.qos_parameters = dml_dcn4_variant_a_soc_qos_params;
- dml_init->ip_caps = dml2_dcn401_max_ip_caps;
+ project_id = dml2_project_dcn4x_stage2_auto_drr_svp;
break;
default:
- memset(dml_init, 0, sizeof(*dml_init));
+ project_id = dml2_project_invalid;
DC_ERR("unsupported dcn version for DML21!");
- return;
- }
-}
-
-static void override_dml_init_with_values_from_hardware_default(struct dml2_initialize_instance_in_out *dml_init,
- const struct dml2_configuration_options *config,
- const struct dc *in_dc)
-{
- dml_init->soc_bb.dchub_refclk_mhz = in_dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
- dml_init->soc_bb.dprefclk_mhz = in_dc->clk_mgr->dprefclk_khz / 1000;
- dml_init->soc_bb.dispclk_dppclk_vco_speed_mhz = in_dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
-}
-
-/*
- * SMU stands for System Management Unit. It is a power management processor.
- * It owns the initialization of dc's clock table and programming of clock values
- * based on dc's requests.
- * Our clock values in base soc bb is a dummy placeholder. The real clock values
- * are retrieved from SMU firmware to dc clock table at runtime.
- * This function overrides our dummy placeholder values with real values in dc
- * clock table.
- */
-static void override_dml_init_with_values_from_smu(
- struct dml2_initialize_instance_in_out *dml_init,
- const struct dml2_configuration_options *config,
- const struct dc *in_dc)
-{
- int i;
- const struct clk_bw_params *dc_bw_params = in_dc->clk_mgr->bw_params;
- const struct clk_limit_table *dc_clk_table = &dc_bw_params->clk_table;
- struct dml2_soc_state_table *dml_clk_table = &dml_init->soc_bb.clk_table;
-
- if (!in_dc->clk_mgr->funcs->is_smu_present ||
- !in_dc->clk_mgr->funcs->is_smu_present(in_dc->clk_mgr))
- /* skip if smu is not present */
- return;
-
- /* dcfclk */
- if (dc_clk_table->num_entries_per_clk.num_dcfclk_levels) {
- dml_clk_table->dcfclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dcfclk_levels;
- for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
- if (i < dml_clk_table->dcfclk.num_clk_values) {
- if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dcfclk_mhz &&
- dc_clk_table->entries[i].dcfclk_mhz > dc_bw_params->dc_mode_limit.dcfclk_mhz) {
- if (i == 0 || dc_clk_table->entries[i-1].dcfclk_mhz < dc_bw_params->dc_mode_limit.dcfclk_mhz) {
- dml_clk_table->dcfclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dcfclk_mhz * 1000;
- dml_clk_table->dcfclk.num_clk_values = i + 1;
- } else {
- dml_clk_table->dcfclk.clk_values_khz[i] = 0;
- dml_clk_table->dcfclk.num_clk_values = i;
- }
- } else {
- dml_clk_table->dcfclk.clk_values_khz[i] = dc_clk_table->entries[i].dcfclk_mhz * 1000;
- }
- } else {
- dml_clk_table->dcfclk.clk_values_khz[i] = 0;
- }
- }
- }
-
- /* fclk */
- if (dc_clk_table->num_entries_per_clk.num_fclk_levels) {
- dml_clk_table->fclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_fclk_levels;
- for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
- if (i < dml_clk_table->fclk.num_clk_values) {
- if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.fclk_mhz &&
- dc_clk_table->entries[i].fclk_mhz > dc_bw_params->dc_mode_limit.fclk_mhz) {
- if (i == 0 || dc_clk_table->entries[i-1].fclk_mhz < dc_bw_params->dc_mode_limit.fclk_mhz) {
- dml_clk_table->fclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.fclk_mhz * 1000;
- dml_clk_table->fclk.num_clk_values = i + 1;
- } else {
- dml_clk_table->fclk.clk_values_khz[i] = 0;
- dml_clk_table->fclk.num_clk_values = i;
- }
- } else {
- dml_clk_table->fclk.clk_values_khz[i] = dc_clk_table->entries[i].fclk_mhz * 1000;
- }
- } else {
- dml_clk_table->fclk.clk_values_khz[i] = 0;
- }
- }
- }
-
- /* uclk */
- if (dc_clk_table->num_entries_per_clk.num_memclk_levels) {
- dml_clk_table->uclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_memclk_levels;
- for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
- if (i < dml_clk_table->uclk.num_clk_values) {
- if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.memclk_mhz &&
- dc_clk_table->entries[i].memclk_mhz > dc_bw_params->dc_mode_limit.memclk_mhz) {
- if (i == 0 || dc_clk_table->entries[i-1].memclk_mhz < dc_bw_params->dc_mode_limit.memclk_mhz) {
- dml_clk_table->uclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.memclk_mhz * 1000;
- dml_clk_table->uclk.num_clk_values = i + 1;
- } else {
- dml_clk_table->uclk.clk_values_khz[i] = 0;
- dml_clk_table->uclk.num_clk_values = i;
- }
- } else {
- dml_clk_table->uclk.clk_values_khz[i] = dc_clk_table->entries[i].memclk_mhz * 1000;
- }
- } else {
- dml_clk_table->uclk.clk_values_khz[i] = 0;
- }
- }
- }
-
- /* dispclk */
- if (dc_clk_table->num_entries_per_clk.num_dispclk_levels) {
- dml_clk_table->dispclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dispclk_levels;
- for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
- if (i < dml_clk_table->dispclk.num_clk_values) {
- if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dispclk_mhz &&
- dc_clk_table->entries[i].dispclk_mhz > dc_bw_params->dc_mode_limit.dispclk_mhz) {
- if (i == 0 || dc_clk_table->entries[i-1].dispclk_mhz < dc_bw_params->dc_mode_limit.dispclk_mhz) {
- dml_clk_table->dispclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dispclk_mhz * 1000;
- dml_clk_table->dispclk.num_clk_values = i + 1;
- } else {
- dml_clk_table->dispclk.clk_values_khz[i] = 0;
- dml_clk_table->dispclk.num_clk_values = i;
- }
- } else {
- dml_clk_table->dispclk.clk_values_khz[i] = dc_clk_table->entries[i].dispclk_mhz * 1000;
- }
- } else {
- dml_clk_table->dispclk.clk_values_khz[i] = 0;
- }
- }
- }
-
- /* dppclk */
- if (dc_clk_table->num_entries_per_clk.num_dppclk_levels) {
- dml_clk_table->dppclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dppclk_levels;
- for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
- if (i < dml_clk_table->dppclk.num_clk_values) {
- if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dppclk_mhz &&
- dc_clk_table->entries[i].dppclk_mhz > dc_bw_params->dc_mode_limit.dppclk_mhz) {
- if (i == 0 || dc_clk_table->entries[i-1].dppclk_mhz < dc_bw_params->dc_mode_limit.dppclk_mhz) {
- dml_clk_table->dppclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dppclk_mhz * 1000;
- dml_clk_table->dppclk.num_clk_values = i + 1;
- } else {
- dml_clk_table->dppclk.clk_values_khz[i] = 0;
- dml_clk_table->dppclk.num_clk_values = i;
- }
- } else {
- dml_clk_table->dppclk.clk_values_khz[i] = dc_clk_table->entries[i].dppclk_mhz * 1000;
- }
- } else {
- dml_clk_table->dppclk.clk_values_khz[i] = 0;
- }
- }
- }
-
- /* dtbclk */
- if (dc_clk_table->num_entries_per_clk.num_dtbclk_levels) {
- dml_clk_table->dtbclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dtbclk_levels;
- for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
- if (i < dml_clk_table->dtbclk.num_clk_values) {
- if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dtbclk_mhz &&
- dc_clk_table->entries[i].dtbclk_mhz > dc_bw_params->dc_mode_limit.dtbclk_mhz) {
- if (i == 0 || dc_clk_table->entries[i-1].dtbclk_mhz < dc_bw_params->dc_mode_limit.dtbclk_mhz) {
- dml_clk_table->dtbclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dtbclk_mhz * 1000;
- dml_clk_table->dtbclk.num_clk_values = i + 1;
- } else {
- dml_clk_table->dtbclk.clk_values_khz[i] = 0;
- dml_clk_table->dtbclk.num_clk_values = i;
- }
- } else {
- dml_clk_table->dtbclk.clk_values_khz[i] = dc_clk_table->entries[i].dtbclk_mhz * 1000;
- }
- } else {
- dml_clk_table->dtbclk.clk_values_khz[i] = 0;
- }
- }
- }
-
- /* socclk */
- if (dc_clk_table->num_entries_per_clk.num_socclk_levels) {
- dml_clk_table->socclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_socclk_levels;
- for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
- if (i < dml_clk_table->socclk.num_clk_values) {
- if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.socclk_mhz &&
- dc_clk_table->entries[i].socclk_mhz > dc_bw_params->dc_mode_limit.socclk_mhz) {
- if (i == 0 || dc_clk_table->entries[i-1].socclk_mhz < dc_bw_params->dc_mode_limit.socclk_mhz) {
- dml_clk_table->socclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.socclk_mhz * 1000;
- dml_clk_table->socclk.num_clk_values = i + 1;
- } else {
- dml_clk_table->socclk.clk_values_khz[i] = 0;
- dml_clk_table->socclk.num_clk_values = i;
- }
- } else {
- dml_clk_table->socclk.clk_values_khz[i] = dc_clk_table->entries[i].socclk_mhz * 1000;
- }
- } else {
- dml_clk_table->socclk.clk_values_khz[i] = 0;
- }
- }
- }
-}
-
-static void override_dml_init_with_values_from_vbios(
- struct dml2_initialize_instance_in_out *dml_init,
- const struct dml2_configuration_options *config,
- const struct dc *in_dc)
-{
- const struct clk_bw_params *dc_bw_params = in_dc->clk_mgr->bw_params;
- struct dml2_soc_bb *dml_soc_bb = &dml_init->soc_bb;
- struct dml2_soc_state_table *dml_clk_table = &dml_init->soc_bb.clk_table;
-
- if (in_dc->ctx->dc_bios->bb_info.dram_clock_change_latency_100ns > 0)
- dml_soc_bb->power_management_parameters.dram_clk_change_blackout_us =
- (in_dc->ctx->dc_bios->bb_info.dram_clock_change_latency_100ns + 9) / 10;
-
- if (in_dc->ctx->dc_bios->bb_info.dram_sr_enter_exit_latency_100ns > 0)
- dml_soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us =
- (in_dc->ctx->dc_bios->bb_info.dram_sr_enter_exit_latency_100ns + 9) / 10;
-
- if (in_dc->ctx->dc_bios->bb_info.dram_sr_exit_latency_100ns > 0)
- dml_soc_bb->power_management_parameters.stutter_exit_latency_us =
- (in_dc->ctx->dc_bios->bb_info.dram_sr_exit_latency_100ns + 9) / 10;
-
- if (dc_bw_params->num_channels) {
- dml_clk_table->dram_config.channel_count = dc_bw_params->num_channels;
- dml_soc_bb->mall_allocated_for_dcn_mbytes = in_dc->caps.mall_size_total / 1048576;
- } else if (in_dc->ctx->dc_bios->vram_info.num_chans) {
- dml_clk_table->dram_config.channel_count = in_dc->ctx->dc_bios->vram_info.num_chans;
- dml_soc_bb->mall_allocated_for_dcn_mbytes = in_dc->caps.mall_size_total / 1048576;
- }
-
- if (dc_bw_params->dram_channel_width_bytes) {
- dml_clk_table->dram_config.channel_width_bytes = dc_bw_params->dram_channel_width_bytes;
- } else if (in_dc->ctx->dc_bios->vram_info.dram_channel_width_bytes) {
- dml_clk_table->dram_config.channel_width_bytes = in_dc->ctx->dc_bios->vram_info.dram_channel_width_bytes;
+ break;
}
- dml_init->soc_bb.xtalclk_mhz = in_dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency / 1000;
+ return project_id;
}
-
-static void override_dml_init_with_values_from_dmub(struct dml2_initialize_instance_in_out *dml_init,
+void dml21_populate_dml_init_params(struct dml2_initialize_instance_in_out *dml_init,
const struct dml2_configuration_options *config,
const struct dc *in_dc)
{
- /*
- * TODO - There seems to be overlaps between the values overriden from
- * dmub and vbios. Investigate and identify the values that DMUB needs
- * to own.
- */
-// const struct dmub_soc_bb_params *dmub_bb_params =
-// (const struct dmub_soc_bb_params *)config->bb_from_dmub;
-
-// if (dmub_bb_params == NULL)
-// return;
-
-// if (dmub_bb_params->dram_clk_change_blackout_ns > 0)
-// dml_init->soc_bb.power_management_parameters.dram_clk_change_blackout_us =
-// (double) dmub_bb_params->dram_clk_change_blackout_ns / 1000.0;
-// if (dmub_bb_params->dram_clk_change_read_only_ns > 0)
-// dml_init->soc_bb.power_management_parameters.dram_clk_change_read_only_us =
-// (double) dmub_bb_params->dram_clk_change_read_only_ns / 1000.0;
-// if (dmub_bb_params->dram_clk_change_write_only_ns > 0)
-// dml_init->soc_bb.power_management_parameters.dram_clk_change_write_only_us =
-// (double) dmub_bb_params->dram_clk_change_write_only_ns / 1000.0;
-// if (dmub_bb_params->fclk_change_blackout_ns > 0)
-// dml_init->soc_bb.power_management_parameters.fclk_change_blackout_us =
-// (double) dmub_bb_params->fclk_change_blackout_ns / 1000.0;
-// if (dmub_bb_params->g7_ppt_blackout_ns > 0)
-// dml_init->soc_bb.power_management_parameters.g7_ppt_blackout_us =
-// (double) dmub_bb_params->g7_ppt_blackout_ns / 1000.0;
-// if (dmub_bb_params->stutter_enter_plus_exit_latency_ns > 0)
-// dml_init->soc_bb.power_management_parameters.stutter_enter_plus_exit_latency_us =
-// (double) dmub_bb_params->stutter_enter_plus_exit_latency_ns / 1000.0;
-// if (dmub_bb_params->stutter_exit_latency_ns > 0)
-// dml_init->soc_bb.power_management_parameters.stutter_exit_latency_us =
-// (double) dmub_bb_params->stutter_exit_latency_ns / 1000.0;
-// if (dmub_bb_params->z8_stutter_enter_plus_exit_latency_ns > 0)
-// dml_init->soc_bb.power_management_parameters.z8_stutter_enter_plus_exit_latency_us =
-// (double) dmub_bb_params->z8_stutter_enter_plus_exit_latency_ns / 1000.0;
-// if (dmub_bb_params->z8_stutter_exit_latency_ns > 0)
-// dml_init->soc_bb.power_management_parameters.z8_stutter_exit_latency_us =
-// (double) dmub_bb_params->z8_stutter_exit_latency_ns / 1000.0;
-// if (dmub_bb_params->z8_min_idle_time_ns > 0)
-// dml_init->soc_bb.power_management_parameters.z8_min_idle_time =
-// (double) dmub_bb_params->z8_min_idle_time_ns / 1000.0;
-// #ifndef TRIM_DML2_DCN6B_IP_SENSITIVE
-// if (dmub_bb_params->type_b_dram_clk_change_blackout_ns > 0)
-// dml_init->soc_bb.power_management_parameters.lpddr5_dram_clk_change_blackout_us =
-// (double) dmub_bb_params->type_b_dram_clk_change_blackout_ns / 1000.0;
-// if (dmub_bb_params->type_b_ppt_blackout_ns > 0)
-// dml_init->soc_bb.power_management_parameters.lpddr5_ppt_blackout_us =
-// (double) dmub_bb_params->type_b_ppt_blackout_ns / 1000.0;
-// #else
-// if (dmub_bb_params->type_b_dram_clk_change_blackout_ns > 0)
-// dml_init->soc_bb.power_management_parameters.type_b_dram_clk_change_blackout_us =
-// (double) dmub_bb_params->type_b_dram_clk_change_blackout_ns / 1000.0;
-// if (dmub_bb_params->type_b_ppt_blackout_ns > 0)
-// dml_init->soc_bb.power_management_parameters.type_b_ppt_blackout_us =
-// (double) dmub_bb_params->type_b_ppt_blackout_ns / 1000.0;
-// #endif
-// if (dmub_bb_params->vmin_limit_dispclk_khz > 0)
-// dml_init->soc_bb.vmin_limit.dispclk_khz = dmub_bb_params->vmin_limit_dispclk_khz;
-// if (dmub_bb_params->vmin_limit_dcfclk_khz > 0)
-// dml_init->soc_bb.vmin_limit.dcfclk_khz = dmub_bb_params->vmin_limit_dcfclk_khz;
-// if (dmub_bb_params->g7_temperature_read_blackout_ns > 0)
-// dml_init->soc_bb.power_management_parameters.g7_temperature_read_blackout_us =
-// (double) dmub_bb_params->g7_temperature_read_blackout_ns / 1000.0;
-}
+ dml_init->options.project_id = dml21_dcn_revision_to_dml2_project_id(in_dc->ctx->dce_version);
-static void override_dml_init_with_values_from_software_policy(struct dml2_initialize_instance_in_out *dml_init,
- const struct dml2_configuration_options *config,
- const struct dc *in_dc)
-{
- if (!config->use_native_soc_bb_construction) {
+ if (config->use_native_soc_bb_construction) {
+ in_dc->soc_and_ip_translator->translator_funcs->get_soc_bb(&dml_init->soc_bb, in_dc, config);
+ in_dc->soc_and_ip_translator->translator_funcs->get_ip_caps(&dml_init->ip_caps);
+ } else {
dml_init->soc_bb = config->external_socbb_ip_params->soc_bb;
dml_init->ip_caps = config->external_socbb_ip_params->ip_params;
}
- if (in_dc->bb_overrides.sr_exit_time_ns)
- dml_init->soc_bb.power_management_parameters.stutter_exit_latency_us =
- in_dc->bb_overrides.sr_exit_time_ns / 1000.0;
-
- if (in_dc->bb_overrides.sr_enter_plus_exit_time_ns)
- dml_init->soc_bb.power_management_parameters.stutter_enter_plus_exit_latency_us =
- in_dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0;
-
- if (in_dc->bb_overrides.dram_clock_change_latency_ns)
- dml_init->soc_bb.power_management_parameters.dram_clk_change_blackout_us =
- in_dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
-
- if (in_dc->bb_overrides.fclk_clock_change_latency_ns)
- dml_init->soc_bb.power_management_parameters.fclk_change_blackout_us =
- in_dc->bb_overrides.fclk_clock_change_latency_ns / 1000.0;
-}
-
-void dml21_populate_dml_init_params(struct dml2_initialize_instance_in_out *dml_init,
- const struct dml2_configuration_options *config,
- const struct dc *in_dc)
-{
- populate_default_dml_init_params(dml_init, config, in_dc);
-
- override_dml_init_with_values_from_hardware_default(dml_init, config, in_dc);
-
- override_dml_init_with_values_from_smu(dml_init, config, in_dc);
-
- override_dml_init_with_values_from_vbios(dml_init, config, in_dc);
-
- override_dml_init_with_values_from_dmub(dml_init, config, in_dc);
-
- override_dml_init_with_values_from_software_policy(dml_init, config, in_dc);
+ dml21_populate_pmo_options(&dml_init->options.pmo_options, in_dc, config);
}
static unsigned int calc_max_hardware_v_total(const struct dc_stream_state *stream)
@@ -422,25 +84,29 @@ static unsigned int calc_max_hardware_v_total(const struct dc_stream_state *stre
static void populate_dml21_timing_config_from_stream_state(struct dml2_timing_cfg *timing,
struct dc_stream_state *stream,
+ struct pipe_ctx *pipe_ctx,
struct dml2_context *dml_ctx)
{
unsigned int hblank_start, vblank_start, min_hardware_refresh_in_uhz;
+ uint32_t pix_clk_100hz;
- timing->h_active = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right;
+ timing->h_active = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right + pipe_ctx->dsc_padding_params.dsc_hactive_padding;
timing->v_active = stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top;
timing->h_front_porch = stream->timing.h_front_porch;
timing->v_front_porch = stream->timing.v_front_porch;
timing->pixel_clock_khz = stream->timing.pix_clk_100hz / 10;
+ if (pipe_ctx->dsc_padding_params.dsc_hactive_padding != 0)
+ timing->pixel_clock_khz = pipe_ctx->dsc_padding_params.dsc_pix_clk_100hz / 10;
if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
timing->pixel_clock_khz *= 2;
- timing->h_total = stream->timing.h_total;
+ timing->h_total = stream->timing.h_total + pipe_ctx->dsc_padding_params.dsc_htotal_padding;
timing->v_total = stream->timing.v_total;
timing->h_sync_width = stream->timing.h_sync_width;
timing->interlaced = stream->timing.flags.INTERLACE;
hblank_start = stream->timing.h_total - stream->timing.h_front_porch;
- timing->h_blank_end = hblank_start - stream->timing.h_addressable
+ timing->h_blank_end = hblank_start - stream->timing.h_addressable - pipe_ctx->dsc_padding_params.dsc_hactive_padding
- stream->timing.h_border_left - stream->timing.h_border_right;
if (hblank_start < stream->timing.h_addressable)
@@ -459,15 +125,16 @@ static void populate_dml21_timing_config_from_stream_state(struct dml2_timing_cf
/* limit min refresh rate to DC cap */
min_hardware_refresh_in_uhz = stream->timing.min_refresh_in_uhz;
if (stream->ctx->dc->caps.max_v_total != 0) {
- min_hardware_refresh_in_uhz = div64_u64((stream->timing.pix_clk_100hz * 100000000ULL),
- (stream->timing.h_total * (long long)calc_max_hardware_v_total(stream)));
+ if (pipe_ctx->dsc_padding_params.dsc_hactive_padding != 0) {
+ pix_clk_100hz = pipe_ctx->dsc_padding_params.dsc_pix_clk_100hz;
+ } else {
+ pix_clk_100hz = stream->timing.pix_clk_100hz;
+ }
+ min_hardware_refresh_in_uhz = div64_u64((pix_clk_100hz * 100000000ULL),
+ (timing->h_total * (long long)calc_max_hardware_v_total(stream)));
}
- if (stream->timing.min_refresh_in_uhz > min_hardware_refresh_in_uhz) {
- timing->drr_config.min_refresh_uhz = stream->timing.min_refresh_in_uhz;
- } else {
- timing->drr_config.min_refresh_uhz = min_hardware_refresh_in_uhz;
- }
+ timing->drr_config.min_refresh_uhz = max(stream->timing.min_refresh_in_uhz, min_hardware_refresh_in_uhz);
if (dml_ctx->config.callbacks.get_max_flickerless_instant_vtotal_increase &&
stream->ctx->dc->config.enable_fpo_flicker_detection == 1)
@@ -515,21 +182,6 @@ static void populate_dml21_timing_config_from_stream_state(struct dml2_timing_cf
timing->vblank_nom = timing->v_total - timing->v_active;
}
-/**
- * adjust_dml21_hblank_timing_config_from_pipe_ctx - Adjusts the horizontal blanking timing configuration
- * based on the pipe context.
- * @timing: Pointer to the dml2_timing_cfg structure to be adjusted.
- * @pipe: Pointer to the pipe_ctx structure containing the horizontal blanking borrow value.
- *
- * This function modifies the horizontal active and blank end timings by adding and subtracting
- * the horizontal blanking borrow value from the pipe context, respectively.
- */
-static void adjust_dml21_hblank_timing_config_from_pipe_ctx(struct dml2_timing_cfg *timing, struct pipe_ctx *pipe)
-{
- timing->h_active += pipe->hblank_borrow;
- timing->h_blank_end -= pipe->hblank_borrow;
-}
-
static void populate_dml21_output_config_from_stream_state(struct dml2_link_output_cfg *output,
struct dc_stream_state *stream, const struct pipe_ctx *pipe)
{
@@ -829,7 +481,9 @@ static const struct scaler_data *get_scaler_data_for_plane(
temp_pipe->plane_state = pipe->plane_state;
temp_pipe->plane_res.scl_data.taps = pipe->plane_res.scl_data.taps;
temp_pipe->stream_res = pipe->stream_res;
- temp_pipe->hblank_borrow = pipe->hblank_borrow;
+ temp_pipe->dsc_padding_params.dsc_hactive_padding = pipe->dsc_padding_params.dsc_hactive_padding;
+ temp_pipe->dsc_padding_params.dsc_htotal_padding = pipe->dsc_padding_params.dsc_htotal_padding;
+ temp_pipe->dsc_padding_params.dsc_pix_clk_100hz = pipe->dsc_padding_params.dsc_pix_clk_100hz;
dml_ctx->config.callbacks.build_scaling_params(temp_pipe);
break;
}
@@ -1097,8 +751,7 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s
disp_cfg_stream_location = dml_dispcfg->num_streams++;
ASSERT(disp_cfg_stream_location >= 0 && disp_cfg_stream_location < __DML2_WRAPPER_MAX_STREAMS_PLANES__);
- populate_dml21_timing_config_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].timing, context->streams[stream_index], dml_ctx);
- adjust_dml21_hblank_timing_config_from_pipe_ctx(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].timing, &context->res_ctx.pipe_ctx[stream_index]);
+ populate_dml21_timing_config_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].timing, context->streams[stream_index], &context->res_ctx.pipe_ctx[stream_index], dml_ctx);
populate_dml21_output_config_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].output, context->streams[stream_index], &context->res_ctx.pipe_ctx[stream_index]);
populate_dml21_stream_overrides_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location], context->streams[stream_index], &context->stream_status[stream_index]);
@@ -1165,6 +818,8 @@ void dml21_copy_clocks_to_dc_state(struct dml2_context *in_ctx, struct dc_state
context->bw_ctx.bw.dcn.clk.socclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.socclk_khz;
context->bw_ctx.bw.dcn.clk.subvp_prefetch_dramclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.svp_prefetch_no_throttle.uclk_khz;
context->bw_ctx.bw.dcn.clk.subvp_prefetch_fclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.svp_prefetch_no_throttle.fclk_khz;
+ context->bw_ctx.bw.dcn.clk.stutter_efficiency.base_efficiency = in_ctx->v21.mode_programming.programming->stutter.base_percent_efficiency;
+ context->bw_ctx.bw.dcn.clk.stutter_efficiency.low_power_efficiency = in_ctx->v21.mode_programming.programming->stutter.low_power_percent_efficiency;
}
static struct dml2_dchub_watermark_regs *wm_set_index_to_dc_wm_set(union dcn_watermark_set *watermarks, const enum dml2_dchub_watermark_reg_set_index wm_index)
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
index 03de3cf06ae5..08f7f03b1023 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
@@ -60,7 +60,7 @@ static void dml21_init(const struct dc *in_dc, struct dml2_context *dml_ctx, con
DC_FP_START();
- dml21_populate_dml_init_params(&dml_ctx->v21.dml_init, config, in_dc);
+ dml21_populate_dml_init_params(&dml_ctx->v21.dml_init, &dml_ctx->config, in_dc);
dml2_initialize_instance(&dml_ctx->v21.dml_init);
@@ -224,7 +224,9 @@ static bool dml21_mode_check_and_programming(const struct dc *in_dc, struct dc_s
dml_ctx->config.svp_pstate.callbacks.release_phantom_streams_and_planes(in_dc, context);
/* Populate stream, plane mappings and other fields in display config. */
+ DC_FP_START();
result = dml21_map_dc_state_into_dml_display_cfg(in_dc, context, dml_ctx);
+ DC_FP_END();
if (!result)
return false;
@@ -279,7 +281,9 @@ static bool dml21_check_mode_support(const struct dc *in_dc, struct dc_state *co
dml_ctx->config.svp_pstate.callbacks.release_phantom_streams_and_planes(in_dc, context);
mode_support->dml2_instance = dml_init->dml2_instance;
+ DC_FP_START();
dml21_map_dc_state_into_dml_display_cfg(in_dc, context, dml_ctx);
+ DC_FP_END();
dml_ctx->v21.mode_programming.dml2_instance->scratch.build_mode_programming_locals.mode_programming_params.programming = dml_ctx->v21.mode_programming.programming;
DC_FP_START();
is_supported = dml2_check_mode_supported(mode_support);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h
index b05030926ce8..91955bbe24b8 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h
@@ -159,6 +159,8 @@ struct dml2_dchub_watermark_regs {
uint32_t sr_exit;
uint32_t sr_enter_z8;
uint32_t sr_exit_z8;
+ uint32_t sr_enter_low_power;
+ uint32_t sr_exit_low_power;
uint32_t uclk_pstate;
uint32_t fclk_pstate;
uint32_t temp_read_or_ppt;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h
index 8c9f414aa6bf..176f55947664 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h
@@ -96,6 +96,8 @@ struct dml2_soc_power_management_parameters {
double g7_temperature_read_blackout_us;
double stutter_enter_plus_exit_latency_us;
double stutter_exit_latency_us;
+ double low_power_stutter_enter_plus_exit_latency_us;
+ double low_power_stutter_exit_latency_us;
double z8_stutter_enter_plus_exit_latency_us;
double z8_stutter_exit_latency_us;
double z8_min_idle_time;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h
index 98c0234e2f47..41adb1104d0f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h
@@ -16,9 +16,9 @@ struct dml2_instance;
enum dml2_project_id {
dml2_project_invalid = 0,
- dml2_project_dcn4x_stage1 = 1,
- dml2_project_dcn4x_stage2 = 2,
- dml2_project_dcn4x_stage2_auto_drr_svp = 3,
+ dml2_project_dcn4x_stage1,
+ dml2_project_dcn4x_stage2,
+ dml2_project_dcn4x_stage2_auto_drr_svp,
};
enum dml2_pstate_change_support {
@@ -417,6 +417,8 @@ struct dml2_display_cfg_programming {
struct {
bool supported_in_blank; // Changing to configurations where this is false requires stutter to be disabled during the transition
+ uint8_t base_percent_efficiency; //LP1
+ uint8_t low_power_percent_efficiency; //LP2
} stutter;
struct {
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
index b9cff2198511..bf62d42b3f78 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
@@ -1238,18 +1238,27 @@ static void CalculateDETBufferSize(
static double CalculateRequiredDispclk(
enum dml2_odm_mode ODMMode,
- double PixelClock)
+ double PixelClock,
+ bool isTMDS420)
{
+ double DispClk;
if (ODMMode == dml2_odm_mode_combine_4to1) {
- return PixelClock / 4.0;
+ DispClk = PixelClock / 4.0;
} else if (ODMMode == dml2_odm_mode_combine_3to1) {
- return PixelClock / 3.0;
+ DispClk = PixelClock / 3.0;
} else if (ODMMode == dml2_odm_mode_combine_2to1) {
- return PixelClock / 2.0;
+ DispClk = PixelClock / 2.0;
} else {
- return PixelClock;
+ DispClk = PixelClock;
+ }
+
+ if (isTMDS420) {
+ double TMDS420MinPixClock = PixelClock / 2.0;
+ DispClk = math_max2(DispClk, TMDS420MinPixClock);
}
+
+ return DispClk;
}
static double TruncToValidBPP(
@@ -4122,11 +4131,12 @@ static noinline_for_stack void CalculateODMMode(
bool success;
bool UseDSC = DSCEnable && (NumberOfDSCSlices > 0);
enum dml2_odm_mode DecidedODMMode;
+ bool isTMDS420 = (OutFormat == dml2_420 && Output == dml2_hdmi);
- SurfaceRequiredDISPCLKWithoutODMCombine = CalculateRequiredDispclk(dml2_odm_mode_bypass, PixelClock);
- SurfaceRequiredDISPCLKWithODMCombineTwoToOne = CalculateRequiredDispclk(dml2_odm_mode_combine_2to1, PixelClock);
- SurfaceRequiredDISPCLKWithODMCombineThreeToOne = CalculateRequiredDispclk(dml2_odm_mode_combine_3to1, PixelClock);
- SurfaceRequiredDISPCLKWithODMCombineFourToOne = CalculateRequiredDispclk(dml2_odm_mode_combine_4to1, PixelClock);
+ SurfaceRequiredDISPCLKWithoutODMCombine = CalculateRequiredDispclk(dml2_odm_mode_bypass, PixelClock, isTMDS420);
+ SurfaceRequiredDISPCLKWithODMCombineTwoToOne = CalculateRequiredDispclk(dml2_odm_mode_combine_2to1, PixelClock, isTMDS420);
+ SurfaceRequiredDISPCLKWithODMCombineThreeToOne = CalculateRequiredDispclk(dml2_odm_mode_combine_3to1, PixelClock, isTMDS420);
+ SurfaceRequiredDISPCLKWithODMCombineFourToOne = CalculateRequiredDispclk(dml2_odm_mode_combine_4to1, PixelClock, isTMDS420);
#ifdef __DML_VBA_DEBUG__
DML_LOG_VERBOSE("DML::%s: ODMUse = %d\n", __func__, ODMUse);
DML_LOG_VERBOSE("DML::%s: Output = %d\n", __func__, Output);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.c
index 28394de02885..640087e862f8 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.c
@@ -10,7 +10,7 @@ bool dml2_core_create(enum dml2_project_id project_id, struct dml2_core_instance
{
bool result = false;
- if (out == 0)
+ if (!out)
return false;
memset(out, 0, sizeof(struct dml2_core_instance));
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h
index 28687565ac22..ffb8c09f37a5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h
@@ -201,6 +201,8 @@ struct dml2_core_internal_watermarks {
double WritebackFCLKChangeWatermark;
double StutterExitWatermark;
double StutterEnterPlusExitWatermark;
+ double LowPowerStutterExitWatermark;
+ double LowPowerStutterEnterPlusExitWatermark;
double Z8StutterExitWatermark;
double Z8StutterEnterPlusExitWatermark;
double USRRetrainingWatermark;
@@ -877,6 +879,9 @@ struct dml2_core_internal_mode_program {
double Z8StutterEfficiency;
unsigned int Z8NumberOfStutterBurstsPerFrame;
double Z8StutterEfficiencyNotIncludingVBlank;
+ double LowPowerStutterEfficiency;
+ double LowPowerStutterEfficiencyNotIncludingVBlank;
+ unsigned int LowPowerNumberOfStutterBurstsPerFrame;
double StutterPeriod;
double Z8StutterEfficiencyBestCase;
unsigned int Z8NumberOfStutterBurstsPerFrameBestCase;
@@ -1016,6 +1021,8 @@ struct dml2_core_internal_SOCParametersList {
double FCLKChangeLatency;
double SRExitTime;
double SREnterPlusExitTime;
+ double SRExitTimeLowPower;
+ double SREnterPlusExitTimeLowPower;
double SRExitZ8Time;
double SREnterPlusExitZ8Time;
double USRRetrainingLatency;
@@ -1851,9 +1858,11 @@ struct dml2_core_calcs_CalculateStutterEfficiency_params {
unsigned int CompbufReservedSpaceZs;
bool hw_debug5;
double SRExitTime;
+ double SRExitTimeLowPower;
double SRExitZ8Time;
bool SynchronizeTimings;
double StutterEnterPlusExitWatermark;
+ double LowPowerStutterEnterPlusExitWatermark;
double Z8StutterEnterPlusExitWatermark;
bool ProgressiveToInterlaceUnitInOPP;
double *MinTTUVBlank;
@@ -1879,7 +1888,10 @@ struct dml2_core_calcs_CalculateStutterEfficiency_params {
// output
double *StutterEfficiencyNotIncludingVBlank;
double *StutterEfficiency;
+ double *LowPowerStutterEfficiencyNotIncludingVBlank;
+ double *LowPowerStutterEfficiency;
unsigned int *NumberOfStutterBurstsPerFrame;
+ unsigned int *LowPowerNumberOfStutterBurstsPerFrame;
double *Z8StutterEfficiencyNotIncludingVBlank;
double *Z8StutterEfficiency;
unsigned int *Z8NumberOfStutterBurstsPerFrame;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.c
index 3861bc6c9621..dfd01440737d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.c
@@ -20,7 +20,7 @@ bool dml2_dpmm_create(enum dml2_project_id project_id, struct dml2_dpmm_instance
{
bool result = false;
- if (out == 0)
+ if (!out)
return false;
memset(out, 0, sizeof(struct dml2_dpmm_instance));
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.c
index cd3fbc0591d8..c60b8fe90819 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.c
@@ -15,7 +15,7 @@ bool dml2_mcg_create(enum dml2_project_id project_id, struct dml2_mcg_instance *
{
bool result = false;
- if (out == 0)
+ if (!out)
return false;
memset(out, 0, sizeof(struct dml2_mcg_instance));
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c
index e763c8e45da8..1b9579a32ff2 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c
@@ -48,18 +48,19 @@ static void set_reserved_time_on_all_planes_with_stream_index(struct display_con
static void remove_duplicates(double *list_a, int *list_a_size)
{
- int cur_element = 0;
- // For all elements b[i] in list_b[]
- while (cur_element < *list_a_size - 1) {
- if (list_a[cur_element] == list_a[cur_element + 1]) {
- for (int j = cur_element + 1; j < *list_a_size - 1; j++) {
- list_a[j] = list_a[j + 1];
- }
- *list_a_size = *list_a_size - 1;
- } else {
- cur_element++;
+ int j = 0;
+
+ if (*list_a_size == 0)
+ return;
+
+ for (int i = 1; i < *list_a_size; i++) {
+ if (list_a[j] != list_a[i]) {
+ j++;
+ list_a[j] = list_a[i];
}
}
+
+ *list_a_size = j + 1;
}
static bool increase_mpc_combine_factor(unsigned int *mpc_combine_factor, unsigned int limit)
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c
index 7ed0242a4b33..55d2464365d0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c
@@ -26,7 +26,7 @@ bool dml2_pmo_create(enum dml2_project_id project_id, struct dml2_pmo_instance *
{
bool result = false;
- if (out == 0)
+ if (!out)
return false;
memset(out, 0, sizeof(struct dml2_pmo_instance));
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
index 5f1b49a50049..4cfe64aa8492 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
@@ -473,7 +473,6 @@ static void sort_pipes_for_splitting(struct dc_plane_pipe_pool *pipes)
{
bool sorted, swapped;
unsigned int cur_index;
- unsigned int temp;
int odm_slice_index;
for (odm_slice_index = 0; odm_slice_index < pipes->num_pipes_assigned_to_plane_for_odm_combine; odm_slice_index++) {
@@ -489,9 +488,8 @@ static void sort_pipes_for_splitting(struct dc_plane_pipe_pool *pipes)
swapped = false;
while (!sorted) {
if (pipes->pipes_assigned_to_plane[odm_slice_index][cur_index] > pipes->pipes_assigned_to_plane[odm_slice_index][cur_index + 1]) {
- temp = pipes->pipes_assigned_to_plane[odm_slice_index][cur_index];
- pipes->pipes_assigned_to_plane[odm_slice_index][cur_index] = pipes->pipes_assigned_to_plane[odm_slice_index][cur_index + 1];
- pipes->pipes_assigned_to_plane[odm_slice_index][cur_index + 1] = temp;
+ swap(pipes->pipes_assigned_to_plane[odm_slice_index][cur_index + 1],
+ pipes->pipes_assigned_to_plane[odm_slice_index][cur_index]);
swapped = true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c
index a56e75cdf712..c59f825cfae9 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c
@@ -654,14 +654,14 @@ static void set_phantom_stream_timing(struct dml2_context *ctx, struct dc_state
unsigned int svp_height,
unsigned int svp_vstartup)
{
- unsigned int i, pipe_idx;
+ unsigned int i;
double line_time, fp_and_sync_width_time;
struct pipe_ctx *pipe;
uint32_t phantom_vactive, phantom_bp, pstate_width_fw_delay_lines;
static const double cvt_rb_vblank_max = ((double) 460 / (1000 * 1000));
// Find DML pipe index (pipe_idx) using dc_pipe_idx
- for (i = 0, pipe_idx = 0; i < ctx->config.dcn_pipe_count; i++) {
+ for (i = 0; i < ctx->config.dcn_pipe_count; i++) {
pipe = &state->res_ctx.pipe_ctx[i];
if (!pipe->stream)
@@ -669,8 +669,6 @@ static void set_phantom_stream_timing(struct dml2_context *ctx, struct dc_state
if (i == dc_pipe_idx)
break;
-
- pipe_idx++;
}
// Calculate lines required for pstate allow width and FW processing delays
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
index 0318260370ed..9deb03a18ccc 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
@@ -535,7 +535,7 @@ static bool dml2_validate_only(struct dc_state *context, enum dc_validate_mode v
if (result)
result = does_configuration_meet_sw_policies(dml2, &dml2->v20.scratch.cur_display_config, &dml2->v20.scratch.mode_support_info);
- return (result == 1) ? true : false;
+ return result == 1;
}
static void dml2_apply_debug_options(const struct dc *dc, struct dml2_context *dml2)
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
index 75fb77bca83b..01480a04f85e 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
@@ -520,6 +520,15 @@ void dpp1_dppclk_control(
REG_UPDATE(DPP_CONTROL, DPP_CLOCK_ENABLE, 0);
}
+void dpp_force_disable_cursor(struct dpp *dpp_base)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+
+ /* Force disable cursor */
+ REG_UPDATE(CURSOR0_CONTROL, CUR0_ENABLE, 0);
+ dpp_base->pos.cur0_ctl.bits.cur0_enable = 0;
+}
+
static const struct dpp_funcs dcn10_dpp_funcs = {
.dpp_read_state = dpp_read_state,
.dpp_reset = dpp_reset,
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h
index c48139bed11f..f466182963f7 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h
@@ -1525,4 +1525,6 @@ void dpp1_construct(struct dcn10_dpp *dpp1,
void dpp1_cm_get_gamut_remap(struct dpp *dpp_base,
struct dpp_grph_csc_adjustment *adjust);
+void dpp_force_disable_cursor(struct dpp *dpp_base);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
index 2d70586cef40..09be2a90cc79 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
@@ -1494,6 +1494,7 @@ static struct dpp_funcs dcn30_dpp_funcs = {
.dpp_dppclk_control = dpp1_dppclk_control,
.dpp_set_hdr_multiplier = dpp3_set_hdr_multiplier,
.dpp_get_gamut_remap = dpp3_cm_get_gamut_remap,
+ .dpp_force_disable_cursor = dpp_force_disable_cursor,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h
index 5a6a861402b3..5f6b431ec398 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h
@@ -673,6 +673,16 @@ struct dcn401_dpp {
struct pwl_params pwl_data;
};
+enum dcn401_dscl_mode_sel {
+ DCN401_DSCL_MODE_SCALING_444_BYPASS = 0,
+ DCN401_DSCL_MODE_SCALING_444_RGB_ENABLE = 1,
+ DCN401_DSCL_MODE_SCALING_444_YCBCR_ENABLE = 2,
+ DCN401_DSCL_MODE_SCALING_420_YCBCR_ENABLE = 3,
+ DCN401_DSCL_MODE_SCALING_420_LUMA_BYPASS = 4,
+ DCN401_DSCL_MODE_SCALING_420_CHROMA_BYPASS = 5,
+ DCN401_DSCL_MODE_DSCL_BYPASS = 6
+};
+
bool dpp401_construct(struct dcn401_dpp *dpp401,
struct dc_context *ctx,
uint32_t inst,
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c
index 2f92e7d4981b..6df3419f825f 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c
@@ -78,16 +78,6 @@ enum dscl_autocal_mode {
AUTOCAL_MODE_AUTOREPLICATE = 3
};
-enum dscl_mode_sel {
- DSCL_MODE_SCALING_444_BYPASS = 0,
- DSCL_MODE_SCALING_444_RGB_ENABLE = 1,
- DSCL_MODE_SCALING_444_YCBCR_ENABLE = 2,
- DSCL_MODE_SCALING_420_YCBCR_ENABLE = 3,
- DSCL_MODE_SCALING_420_LUMA_BYPASS = 4,
- DSCL_MODE_SCALING_420_CHROMA_BYPASS = 5,
- DSCL_MODE_DSCL_BYPASS = 6
-};
-
static int dpp401_dscl_get_pixel_depth_val(enum lb_pixel_depth depth)
{
if (depth == LB_PIXEL_DEPTH_30BPP)
@@ -122,7 +112,7 @@ static bool dpp401_dscl_is_420_format(enum pixel_format format)
return false;
}
-static enum dscl_mode_sel dpp401_dscl_get_dscl_mode(
+static enum dcn401_dscl_mode_sel dpp401_dscl_get_dscl_mode(
struct dpp *dpp_base,
const struct scaler_data *data,
bool dbg_always_scale)
@@ -132,7 +122,7 @@ static enum dscl_mode_sel dpp401_dscl_get_dscl_mode(
if (dpp_base->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT) {
/* DSCL is processing data in fixed format */
if (data->format == PIXEL_FORMAT_FP16)
- return DSCL_MODE_DSCL_BYPASS;
+ return DCN401_DSCL_MODE_DSCL_BYPASS;
}
if (data->ratios.horz.value == one
@@ -140,20 +130,20 @@ static enum dscl_mode_sel dpp401_dscl_get_dscl_mode(
&& data->ratios.horz_c.value == one
&& data->ratios.vert_c.value == one
&& !dbg_always_scale)
- return DSCL_MODE_SCALING_444_BYPASS;
+ return DCN401_DSCL_MODE_SCALING_444_BYPASS;
if (!dpp401_dscl_is_420_format(data->format)) {
if (dpp401_dscl_is_video_format(data->format))
- return DSCL_MODE_SCALING_444_YCBCR_ENABLE;
+ return DCN401_DSCL_MODE_SCALING_444_YCBCR_ENABLE;
else
- return DSCL_MODE_SCALING_444_RGB_ENABLE;
+ return DCN401_DSCL_MODE_SCALING_444_RGB_ENABLE;
}
if (data->ratios.horz.value == one && data->ratios.vert.value == one)
- return DSCL_MODE_SCALING_420_LUMA_BYPASS;
+ return DCN401_DSCL_MODE_SCALING_420_LUMA_BYPASS;
if (data->ratios.horz_c.value == one && data->ratios.vert_c.value == one)
- return DSCL_MODE_SCALING_420_CHROMA_BYPASS;
+ return DCN401_DSCL_MODE_SCALING_420_CHROMA_BYPASS;
- return DSCL_MODE_SCALING_420_YCBCR_ENABLE;
+ return DCN401_DSCL_MODE_SCALING_420_YCBCR_ENABLE;
}
static void dpp401_power_on_dscl(
@@ -1071,7 +1061,7 @@ void dpp401_dscl_set_scaler_manual_scale(struct dpp *dpp_base,
uint32_t v_num_taps_c = scl_data->taps.v_taps_c - 1;
uint32_t h_num_taps = scl_data->taps.h_taps - 1;
uint32_t h_num_taps_c = scl_data->taps.h_taps_c - 1;
- enum dscl_mode_sel dscl_mode = dpp401_dscl_get_dscl_mode(
+ enum dcn401_dscl_mode_sel dscl_mode = dpp401_dscl_get_dscl_mode(
dpp_base, scl_data, dpp_base->ctx->dc->debug.always_scale);
bool ycbcr = scl_data->format >= PIXEL_FORMAT_VIDEO_BEGIN
&& scl_data->format <= PIXEL_FORMAT_VIDEO_END;
@@ -1102,7 +1092,7 @@ void dpp401_dscl_set_scaler_manual_scale(struct dpp *dpp_base,
dpp->scl_data = *scl_data;
if ((dpp->base.ctx->dc->config.use_spl) && (!dpp->base.ctx->dc->debug.disable_spl)) {
- dscl_mode = (enum dscl_mode_sel) scl_data->dscl_prog_data.dscl_mode;
+ dscl_mode = (enum dcn401_dscl_mode_sel) scl_data->dscl_prog_data.dscl_mode;
rect = (struct rect *)&scl_data->dscl_prog_data.recout;
mpc_width = scl_data->dscl_prog_data.mpc_size.width;
mpc_height = scl_data->dscl_prog_data.mpc_size.height;
@@ -1112,7 +1102,7 @@ void dpp401_dscl_set_scaler_manual_scale(struct dpp *dpp_base,
h_num_taps_c = scl_data->dscl_prog_data.taps.h_taps_c;
}
if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.dscl) {
- if (dscl_mode != DSCL_MODE_DSCL_BYPASS)
+ if (dscl_mode != DCN401_DSCL_MODE_DSCL_BYPASS)
dpp401_power_on_dscl(dpp_base, true);
}
@@ -1139,7 +1129,7 @@ void dpp401_dscl_set_scaler_manual_scale(struct dpp *dpp_base,
/* SCL mode */
REG_UPDATE(SCL_MODE, DSCL_MODE, dscl_mode);
- if (dscl_mode == DSCL_MODE_DSCL_BYPASS) {
+ if (dscl_mode == DCN401_DSCL_MODE_DSCL_BYPASS) {
if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.dscl)
dpp401_power_on_dscl(dpp_base, false);
return;
@@ -1149,7 +1139,7 @@ void dpp401_dscl_set_scaler_manual_scale(struct dpp *dpp_base,
lb_config = dpp401_dscl_find_lb_memory_config(dpp, scl_data);
dpp401_dscl_set_lb(dpp, &scl_data->lb_params, lb_config);
- if (dscl_mode == DSCL_MODE_SCALING_444_BYPASS) {
+ if (dscl_mode == DCN401_DSCL_MODE_SCALING_444_BYPASS) {
if (dpp->base.ctx->dc->config.prefer_easf)
dpp401_dscl_disable_easf(dpp_base, scl_data);
dpp401_dscl_program_isharp(dpp_base, scl_data, program_isharp_1dlut, &bs_coeffs_updated);
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
index 1f53a9f0c0ac..e4144b244332 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
@@ -1157,6 +1157,11 @@ static bool setup_dsc_config(
if (!is_dsc_possible)
goto done;
+ /* increase miniumum slice count to meet sink slice width limitations */
+ min_slices_h = dc_fixpt_ceil(dc_fixpt_max(
+ dc_fixpt_div_int(dc_fixpt_from_int(pic_width), dsc_common_caps.max_slice_width), // sink min
+ dc_fixpt_from_int(min_slices_h))); // source min
+
min_slices_h = fit_num_slices_up(dsc_common_caps.slice_caps, min_slices_h);
/* increase minimum slice count to meet sink throughput limitations */
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c
index bd1b9aef6d5c..89f0d999bf35 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c
@@ -406,9 +406,10 @@ bool dsc_prepare_config(const struct dsc_config *dsc_cfg, struct dsc_reg_values
dsc_reg_vals->alternate_ich_encoding_en = dsc_reg_vals->pps.dsc_version_minor == 1 ? 0 : 1;
dsc_reg_vals->ich_reset_at_eol = (dsc_cfg->is_odm || dsc_reg_vals->num_slices_h > 1) ? 0xF : 0;
+ // Need to find the ceiling value for the slice width
+ dsc_reg_vals->pps.slice_width = (dsc_cfg->pic_width + dsc_cfg->dc_dsc_cfg.num_slices_h - 1) / dsc_cfg->dc_dsc_cfg.num_slices_h;
// TODO: in addition to validating slice height (pic height must be divisible by slice height),
// see what happens when the same condition doesn't apply for slice_width/pic_width.
- dsc_reg_vals->pps.slice_width = dsc_cfg->pic_width / dsc_cfg->dc_dsc_cfg.num_slices_h;
dsc_reg_vals->pps.slice_height = dsc_cfg->pic_height / dsc_cfg->dc_dsc_cfg.num_slices_v;
ASSERT(dsc_reg_vals->pps.slice_height * dsc_cfg->dc_dsc_cfg.num_slices_v == dsc_cfg->pic_height);
diff --git a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
index 1313a7c5d87b..73a1e6a03719 100644
--- a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
+++ b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
@@ -28,7 +28,7 @@
#include "include/hdcp_msg_types.h"
#include "include/signal_types.h"
#include "core_types.h"
-#include "link.h"
+#include "link_service.h"
#include "link_hwss.h"
#include "link/protocols/link_dpcd.h"
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c
index d347bb06577a..e7e5f6d4778e 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c
@@ -440,6 +440,35 @@ void hubbub3_init_watermarks(struct hubbub *hubbub)
REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, reg);
}
+void hubbub3_get_det_sizes(struct hubbub *hubbub, uint32_t *curr_det_sizes, uint32_t *target_det_sizes)
+{
+ struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
+
+ REG_GET_2(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT, &curr_det_sizes[0],
+ DET0_SIZE, &target_det_sizes[0]);
+
+ REG_GET_2(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT, &curr_det_sizes[1],
+ DET1_SIZE, &target_det_sizes[1]);
+
+ REG_GET_2(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT, &curr_det_sizes[2],
+ DET2_SIZE, &target_det_sizes[2]);
+
+ REG_GET_2(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT, &curr_det_sizes[3],
+ DET3_SIZE, &target_det_sizes[3]);
+
+}
+
+uint32_t hubbub3_compbuf_config_error(struct hubbub *hubbub)
+{
+ struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
+ uint32_t compbuf_config_error = 0;
+
+ REG_GET(DCHUBBUB_COMPBUF_CTRL, CONFIG_ERROR,
+ &compbuf_config_error);
+
+ return compbuf_config_error;
+}
+
static const struct hubbub_funcs hubbub30_funcs = {
.update_dchub = hubbub2_update_dchub,
.init_dchub_sys_ctx = hubbub3_init_dchub_sys_ctx,
@@ -457,6 +486,8 @@ static const struct hubbub_funcs hubbub30_funcs = {
.force_pstate_change_control = hubbub3_force_pstate_change_control,
.init_watermarks = hubbub3_init_watermarks,
.hubbub_read_state = hubbub2_read_state,
+ .get_det_sizes = hubbub3_get_det_sizes,
+ .compbuf_config_error = hubbub3_compbuf_config_error,
};
void hubbub3_construct(struct dcn20_hubbub *hubbub3,
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.h b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.h
index ca6233e8f1f4..49a469969d36 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.h
@@ -133,4 +133,10 @@ void hubbub3_force_pstate_change_control(struct hubbub *hubbub,
void hubbub3_init_watermarks(struct hubbub *hubbub);
+void hubbub3_get_det_sizes(struct hubbub *hubbub,
+ uint32_t *curr_det_sizes,
+ uint32_t *target_det_sizes);
+
+uint32_t hubbub3_compbuf_config_error(struct hubbub *hubbub);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c
index b98505b240a7..cdb20251a154 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c
@@ -1071,6 +1071,8 @@ static const struct hubbub_funcs hubbub31_funcs = {
.program_compbuf_size = dcn31_program_compbuf_size,
.init_crb = dcn31_init_crb,
.hubbub_read_state = hubbub2_read_state,
+ .get_det_sizes = hubbub3_get_det_sizes,
+ .compbuf_config_error = hubbub3_compbuf_config_error,
};
void hubbub31_construct(struct dcn20_hubbub *hubbub31,
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c
index 32a6be543105..4d4ca6d77bbd 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c
@@ -28,6 +28,7 @@
#include "dcn32_hubbub.h"
#include "dm_services.h"
#include "reg_helper.h"
+#include "dal_asic_id.h"
#define CTX \
@@ -72,6 +73,14 @@ static void dcn32_init_crb(struct hubbub *hubbub)
REG_UPDATE(DCHUBBUB_DEBUG_CTRL_0, DET_DEPTH, 0x47F);
}
+static void hubbub32_set_sdp_control(struct hubbub *hubbub, bool dc_control)
+{
+ struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
+
+ REG_UPDATE(DCHUBBUB_SDPIF_CFG0,
+ SDPIF_PORT_CONTROL, dc_control);
+}
+
void hubbub32_set_request_limit(struct hubbub *hubbub, int memory_channel_count, int words_per_channel)
{
struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
@@ -754,8 +763,18 @@ static bool hubbub32_program_watermarks(
unsigned int refclk_mhz,
bool safe_to_lower)
{
+ struct dc *dc = hubbub->ctx->dc;
bool wm_pending = false;
+ if (!safe_to_lower && dc->debug.disable_stutter_for_wm_program &&
+ (ASICREV_IS_GC_11_0_0(dc->ctx->asic_id.hw_internal_rev) ||
+ ASICREV_IS_GC_11_0_3(dc->ctx->asic_id.hw_internal_rev))) {
+ /* before raising watermarks, SDP control give to DF, stutter must be disabled */
+ wm_pending = true;
+ hubbub32_set_sdp_control(hubbub, false);
+ hubbub1_allow_self_refresh_control(hubbub, false);
+ }
+
if (hubbub32_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
wm_pending = true;
@@ -786,10 +805,20 @@ static bool hubbub32_program_watermarks(
REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0x1FF);*/
- if (safe_to_lower || hubbub->ctx->dc->debug.disable_stutter)
- hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
+ if (safe_to_lower) {
+ /* after lowering watermarks, stutter setting is restored, SDP control given to DC */
+ hubbub1_allow_self_refresh_control(hubbub, !dc->debug.disable_stutter);
+
+ if (dc->debug.disable_stutter_for_wm_program &&
+ (ASICREV_IS_GC_11_0_0(dc->ctx->asic_id.hw_internal_rev) ||
+ ASICREV_IS_GC_11_0_3(dc->ctx->asic_id.hw_internal_rev))) {
+ hubbub32_set_sdp_control(hubbub, true);
+ }
+ } else if (dc->debug.disable_stutter) {
+ hubbub1_allow_self_refresh_control(hubbub, !dc->debug.disable_stutter);
+ }
- hubbub32_force_usr_retraining_allow(hubbub, hubbub->ctx->dc->debug.force_usr_allow);
+ hubbub32_force_usr_retraining_allow(hubbub, dc->debug.force_usr_allow);
return wm_pending;
}
@@ -974,8 +1003,7 @@ void hubbub32_init(struct hubbub *hubbub)
ignore the "df_pre_cstate_req" from the SDP port control.
only the DCN will determine when to connect the SDP port
*/
- REG_UPDATE(DCHUBBUB_SDPIF_CFG0,
- SDPIF_PORT_CONTROL, 1);
+ hubbub32_set_sdp_control(hubbub, true);
/*Set SDP's max outstanding request to 512
must set the register back to 0 (max outstanding = 256) in zero frame buffer mode*/
REG_UPDATE(DCHUBBUB_SDPIF_CFG1,
@@ -1009,6 +1037,8 @@ static const struct hubbub_funcs hubbub32_funcs = {
.force_usr_retraining_allow = hubbub32_force_usr_retraining_allow,
.set_request_limit = hubbub32_set_request_limit,
.get_mall_en = hubbub32_get_mall_en,
+ .get_det_sizes = hubbub3_get_det_sizes,
+ .compbuf_config_error = hubbub3_compbuf_config_error,
};
void hubbub32_construct(struct dcn20_hubbub *hubbub2,
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c
index 6d41953011f5..a443722a8632 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c
@@ -589,6 +589,8 @@ static const struct hubbub_funcs hubbub35_funcs = {
.hubbub_read_state = hubbub2_read_state,
.force_usr_retraining_allow = hubbub32_force_usr_retraining_allow,
.dchubbub_init = hubbub35_init,
+ .get_det_sizes = hubbub3_get_det_sizes,
+ .compbuf_config_error = hubbub3_compbuf_config_error,
};
void hubbub35_construct(struct dcn20_hubbub *hubbub2,
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c
index 92fab471b183..a36273a52880 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c
@@ -1247,6 +1247,8 @@ static const struct hubbub_funcs hubbub4_01_funcs = {
.program_compbuf_segments = dcn401_program_compbuf_segments,
.wait_for_det_update = dcn401_wait_for_det_update,
.program_arbiter = dcn401_program_arbiter,
+ .get_det_sizes = hubbub3_get_det_sizes,
+ .compbuf_config_error = hubbub3_compbuf_config_error,
};
void hubbub401_construct(struct dcn20_hubbub *hubbub2,
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h
index f8f991785d4f..cf2eb9793008 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h
@@ -104,7 +104,8 @@
SRI(DCN_SURF1_TTU_CNTL1, HUBPREQ, id),\
SRI(DCN_CUR0_TTU_CNTL0, HUBPREQ, id),\
SRI(DCN_CUR0_TTU_CNTL1, HUBPREQ, id),\
- SRI(HUBP_CLK_CNTL, HUBP, id)
+ SRI(HUBP_CLK_CNTL, HUBP, id),\
+ SRI(HUBPRET_READ_LINE_VALUE, HUBPRET, id)
/* Register address initialization macro for ASICs with VM */
#define HUBP_REG_LIST_DCN_VM(id)\
@@ -249,7 +250,8 @@
uint32_t CURSOR_POSITION; \
uint32_t CURSOR_HOT_SPOT; \
uint32_t CURSOR_DST_OFFSET; \
- uint32_t HUBP_CLK_CNTL
+ uint32_t HUBP_CLK_CNTL; \
+ uint32_t HUBPRET_READ_LINE_VALUE
#define HUBP_SF(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
@@ -622,6 +624,8 @@
type DCN_VM_SYSTEM_APERTURE_DEFAULT_SYSTEM;\
type DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB;\
type DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB;\
+ type PIPE_READ_LINE;\
+ type HUBP_SEG_ALLOC_ERR_STATUS;\
/* todo: get these from GVM instead of reading registers ourselves */\
type PAGE_DIRECTORY_ENTRY_HI32;\
type PAGE_DIRECTORY_ENTRY_LO32;\
@@ -671,6 +675,7 @@ struct dcn_fl_regs_st {
uint32_t lut_done;
uint32_t lut_addr_mode;
uint32_t lut_width;
+ uint32_t lut_mpc_width;
uint32_t lut_tmz;
uint32_t lut_crossbar_sel_r;
uint32_t lut_crossbar_sel_g;
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h
index 62369be070ea..f325db555102 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h
@@ -264,6 +264,7 @@
type HUBP_3DLUT_DONE;\
type HUBP_3DLUT_ADDRESSING_MODE;\
type HUBP_3DLUT_WIDTH;\
+ type HUBP_3DLUT_MPC_WIDTH;\
type HUBP_3DLUT_TMZ;\
type HUBP_3DLUT_CROSSBAR_SELECT_Y_G;\
type HUBP_3DLUT_CROSSBAR_SELECT_CB_B;\
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c
index 0da70b50e86d..556214b2227d 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c
@@ -505,6 +505,30 @@ void hubp3_init(struct hubp *hubp)
hubp_reset(hubp);
}
+uint32_t hubp3_get_current_read_line(struct hubp *hubp)
+{
+ uint32_t read_line = 0;
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ REG_GET(HUBPRET_READ_LINE_VALUE,
+ PIPE_READ_LINE,
+ &read_line);
+
+ return read_line;
+}
+
+unsigned int hubp3_get_underflow_status(struct hubp *hubp)
+{
+ uint32_t hubp_underflow = 0;
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ REG_GET(DCHUBP_CNTL,
+ HUBP_UNDERFLOW_STATUS,
+ &hubp_underflow);
+
+ return hubp_underflow;
+}
+
static struct hubp_funcs dcn30_hubp_funcs = {
.hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,
.hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled,
@@ -534,6 +558,8 @@ static struct hubp_funcs dcn30_hubp_funcs = {
.hubp_soft_reset = hubp1_soft_reset,
.hubp_set_flip_int = hubp1_set_flip_int,
.hubp_clear_tiling = hubp3_clear_tiling,
+ .hubp_get_underflow_status = hubp3_get_underflow_status,
+ .hubp_get_current_read_line = hubp3_get_current_read_line,
};
bool hubp3_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h
index b7d7adf0b58c..842f4eb72cc8 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h
@@ -243,7 +243,8 @@
HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_6, REFCYC_PER_META_CHUNK_FLIP_C, mask_sh),\
HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_5, REFCYC_PER_VM_GROUP_VBLANK, mask_sh),\
HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_6, REFCYC_PER_VM_REQ_VBLANK, mask_sh),\
- HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, VM_GROUP_SIZE, mask_sh)
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, VM_GROUP_SIZE, mask_sh),\
+ HUBP_SF(HUBPRET0_HUBPRET_READ_LINE_VALUE, PIPE_READ_LINE, mask_sh)
bool hubp3_construct(
struct dcn20_hubp *hubp2,
@@ -299,6 +300,11 @@ void hubp3_init(struct hubp *hubp);
void hubp3_clear_tiling(struct hubp *hubp);
+uint32_t hubp3_get_current_read_line(struct hubp *hubp);
+
+uint32_t hubp3_get_underflow_status(struct hubp *hubp);
+
+
#endif /* __DC_HUBP_DCN30_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c
index 7fd582a8a4ba..47101847c2b7 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c
@@ -68,6 +68,18 @@ void hubp31_program_extended_blank_value(
hubp31_program_extended_blank(hubp, min_dst_y_next_start_optimized);
}
+uint32_t hubp31_get_det_config_error(struct hubp *hubp)
+{
+ uint32_t config_error = 0;
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ REG_GET(DCHUBP_CNTL,
+ HUBP_SEG_ALLOC_ERR_STATUS,
+ &config_error);
+
+ return config_error;
+}
+
static struct hubp_funcs dcn31_hubp_funcs = {
.hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,
.hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled,
@@ -98,6 +110,9 @@ static struct hubp_funcs dcn31_hubp_funcs = {
.hubp_in_blank = hubp1_in_blank,
.program_extended_blank = hubp31_program_extended_blank,
.hubp_clear_tiling = hubp3_clear_tiling,
+ .hubp_get_underflow_status = hubp3_get_underflow_status,
+ .hubp_get_current_read_line = hubp3_get_current_read_line,
+ .hubp_get_det_config_error = hubp31_get_det_config_error,
};
bool hubp31_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.h
index d688db79b750..5952c4671507 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.h
@@ -228,7 +228,9 @@
HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_6, REFCYC_PER_META_CHUNK_FLIP_C, mask_sh),\
HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_5, REFCYC_PER_VM_GROUP_VBLANK, mask_sh),\
HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_6, REFCYC_PER_VM_REQ_VBLANK, mask_sh),\
- HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, VM_GROUP_SIZE, mask_sh)
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, VM_GROUP_SIZE, mask_sh),\
+ HUBP_SF(HUBPRET0_HUBPRET_READ_LINE_VALUE, PIPE_READ_LINE, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_SEG_ALLOC_ERR_STATUS, mask_sh)
bool hubp31_construct(
@@ -246,4 +248,6 @@ void hubp31_set_unbounded_requesting(struct hubp *hubp, bool enable);
void hubp31_program_extended_blank_value(
struct hubp *hubp, unsigned int min_dst_y_next_start_optimized);
+uint32_t hubp31_get_det_config_error(struct hubp *hubp);
+
#endif /* __DC_HUBP_DCN31_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c
index f3a21c623f44..a5f23bb2a76a 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c
@@ -206,6 +206,9 @@ static struct hubp_funcs dcn32_hubp_funcs = {
.hubp_update_mall_sel = hubp32_update_mall_sel,
.hubp_prepare_subvp_buffering = hubp32_prepare_subvp_buffering,
.hubp_clear_tiling = hubp3_clear_tiling,
+ .hubp_get_underflow_status = hubp3_get_underflow_status,
+ .hubp_get_current_read_line = hubp3_get_current_read_line,
+ .hubp_get_det_config_error = hubp31_get_det_config_error,
};
bool hubp32_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c
index 6d060ba12da8..b140808f21af 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c
@@ -218,6 +218,9 @@ static struct hubp_funcs dcn35_hubp_funcs = {
.hubp_in_blank = hubp1_in_blank,
.program_extended_blank = hubp31_program_extended_blank_value,
.hubp_clear_tiling = hubp3_clear_tiling,
+ .hubp_get_underflow_status = hubp3_get_underflow_status,
+ .hubp_get_current_read_line = hubp3_get_current_read_line,
+ .hubp_get_det_config_error = hubp31_get_det_config_error,
};
bool hubp35_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
index 705b98b1b6cc..0fcbc6a35be6 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
@@ -127,6 +127,43 @@ void hubp401_program_3dlut_fl_format(struct hubp *hubp, enum hubp_3dlut_fl_forma
REG_UPDATE(_3DLUT_FL_CONFIG, HUBP0_3DLUT_FL_FORMAT, format);
}
+void hubp401_program_3dlut_fl_config(
+ struct hubp *hubp,
+ struct hubp_fl_3dlut_config *cfg)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ uint32_t mpc_width = {(cfg->width == 17) ? 0 : 1};
+ uint32_t width = {cfg->width};
+
+ if (cfg->layout == DC_CM2_GPU_MEM_LAYOUT_1D_PACKED_LINEAR)
+ width = (cfg->width == 17) ? 4916 : 35940;
+
+ REG_UPDATE_2(_3DLUT_FL_CONFIG,
+ HUBP0_3DLUT_FL_MODE, cfg->mode,
+ HUBP0_3DLUT_FL_FORMAT, cfg->format);
+
+ REG_UPDATE_2(_3DLUT_FL_BIAS_SCALE,
+ HUBP0_3DLUT_FL_BIAS, cfg->bias,
+ HUBP0_3DLUT_FL_SCALE, cfg->scale);
+
+ REG_UPDATE(HUBP_3DLUT_ADDRESS_HIGH,
+ HUBP_3DLUT_ADDRESS_HIGH, cfg->address.lut3d.addr.high_part);
+ REG_UPDATE(HUBP_3DLUT_ADDRESS_LOW,
+ HUBP_3DLUT_ADDRESS_LOW, cfg->address.lut3d.addr.low_part);
+
+ //cross bar
+ REG_UPDATE_8(HUBP_3DLUT_CONTROL,
+ HUBP_3DLUT_MPC_WIDTH, mpc_width,
+ HUBP_3DLUT_WIDTH, width,
+ HUBP_3DLUT_CROSSBAR_SELECT_CR_R, cfg->crossbar_bit_slice_cr_r,
+ HUBP_3DLUT_CROSSBAR_SELECT_Y_G, cfg->crossbar_bit_slice_y_g,
+ HUBP_3DLUT_CROSSBAR_SELECT_CB_B, cfg->crossbar_bit_slice_cb_b,
+ HUBP_3DLUT_ADDRESSING_MODE, cfg->addr_mode,
+ HUBP_3DLUT_TMZ, cfg->protection_bits,
+ HUBP_3DLUT_ENABLE, cfg->enabled ? 1 : 0);
+}
+
void hubp401_update_mall_sel(struct hubp *hubp, uint32_t mall_sel, bool c_cursor)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
@@ -1033,6 +1070,10 @@ static struct hubp_funcs dcn401_hubp_funcs = {
.hubp_program_3dlut_fl_crossbar = hubp401_program_3dlut_fl_crossbar,
.hubp_get_3dlut_fl_done = hubp401_get_3dlut_fl_done,
.hubp_clear_tiling = hubp401_clear_tiling,
+ .hubp_program_3dlut_fl_config = hubp401_program_3dlut_fl_config,
+ .hubp_get_underflow_status = hubp3_get_underflow_status,
+ .hubp_get_current_read_line = hubp3_get_current_read_line,
+ .hubp_get_det_config_error = hubp31_get_det_config_error,
};
bool hubp401_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h
index 608e6153fa68..fdabbeec8ffa 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h
@@ -252,7 +252,9 @@
HUBP_SF(HUBP0_DCHUBP_MCACHEID_CONFIG, MCACHEID_MALL_PREF_1H_P0, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_MCACHEID_CONFIG, MCACHEID_MALL_PREF_2H_P0, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_MCACHEID_CONFIG, MCACHEID_MALL_PREF_1H_P1, mask_sh),\
- HUBP_SF(HUBP0_DCHUBP_MCACHEID_CONFIG, MCACHEID_MALL_PREF_2H_P1, mask_sh)
+ HUBP_SF(HUBP0_DCHUBP_MCACHEID_CONFIG, MCACHEID_MALL_PREF_2H_P1, mask_sh),\
+ HUBP_SF(HUBPRET0_HUBPRET_READ_LINE_VALUE, PIPE_READ_LINE, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_SEG_ALLOC_ERR_STATUS, mask_sh)
void hubp401_update_mall_sel(struct hubp *hubp, uint32_t mall_sel, bool c_cursor);
@@ -349,6 +351,10 @@ void hubp401_program_3dlut_fl_format(struct hubp *hubp, enum hubp_3dlut_fl_forma
void hubp401_program_3dlut_fl_mode(struct hubp *hubp, enum hubp_3dlut_fl_mode mode);
+void hubp401_program_3dlut_fl_config(
+ struct hubp *hubp,
+ struct hubp_fl_3dlut_config *cfg);
+
void hubp401_clear_tiling(struct hubp *hubp);
void hubp401_vready_at_or_After_vsync(struct hubp *hubp,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
index 4ea13d0bf815..24184b4eb352 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
@@ -48,7 +48,7 @@
#include "link_encoder.h"
#include "link_enc_cfg.h"
#include "link_hwss.h"
-#include "link.h"
+#include "link_service.h"
#include "dccg.h"
#include "clock_source.h"
#include "clk_mgr.h"
@@ -671,6 +671,7 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx)
uint32_t early_control = 0;
struct timing_generator *tg = pipe_ctx->stream_res.tg;
+ link_hwss->setup_stream_attribute(pipe_ctx);
link_hwss->setup_stream_encoder(pipe_ctx);
dc->hwss.update_info_frame(pipe_ctx);
@@ -1269,7 +1270,7 @@ void dce110_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
pipe_ctx->stream_res.stream_enc->funcs->set_avmute(pipe_ctx->stream_res.stream_enc, enable);
}
-static enum audio_dto_source translate_to_dto_source(enum controller_id crtc_id)
+enum audio_dto_source translate_to_dto_source(enum controller_id crtc_id)
{
switch (crtc_id) {
case CONTROLLER_ID_D0:
@@ -1289,7 +1290,7 @@ static enum audio_dto_source translate_to_dto_source(enum controller_id crtc_id)
}
}
-static void populate_audio_dp_link_info(
+void populate_audio_dp_link_info(
const struct pipe_ctx *pipe_ctx,
struct audio_dp_link_info *dp_link_info)
{
@@ -1924,10 +1925,8 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
get_edp_streams(context, edp_streams, &edp_stream_num);
- // Check fastboot support, disable on DCE8 because of blank screens
- if (edp_num && edp_stream_num && dc->ctx->dce_version != DCE_VERSION_8_0 &&
- dc->ctx->dce_version != DCE_VERSION_8_1 &&
- dc->ctx->dce_version != DCE_VERSION_8_3) {
+ /* Check fastboot support, disable on DCE 6-8 because of blank screens */
+ if (edp_num && edp_stream_num && dc->ctx->dce_version < DCE_VERSION_10_0) {
for (i = 0; i < edp_num; i++) {
edp_link = edp_links[i];
if (edp_link != edp_streams[0]->link)
@@ -2254,7 +2253,7 @@ static bool should_enable_fbc(struct dc *dc,
/*
* Enable FBC
*/
-static void enable_fbc(
+void enable_fbc(
struct dc *dc,
struct dc_state *context)
{
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h
index 7cd8c1576988..9c032e449481 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h
@@ -114,5 +114,12 @@ void build_audio_output(
struct dc_state *state,
const struct pipe_ctx *pipe_ctx,
struct audio_output *audio_output);
+enum audio_dto_source translate_to_dto_source(enum controller_id crtc_id);
+void populate_audio_dp_link_info(
+ const struct pipe_ctx *pipe_ctx,
+ struct audio_dp_link_info *dp_link_info);
+void enable_fbc(
+ struct dc *dc,
+ struct dc_state *context);
#endif /* __DC_HWSS_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
index 39910f73ecd0..e9fe97f0c4ea 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
@@ -55,7 +55,7 @@
#include "dce/dmub_hw_lock_mgr.h"
#include "dc_trace.h"
#include "dce/dmub_outbox.h"
-#include "link.h"
+#include "link_service.h"
#include "dc_state_priv.h"
#define DC_LOGGER \
@@ -328,19 +328,25 @@ static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
}
DTN_INFO("\n=======HUBP FL======\n");
- DTN_INFO(
- "HUBP FL: Enabled Done adr_mode width tmz xbar_sel_R xbar_sel_G xbar_sel_B adr_hi adr_low REFCYC Bias Scale Mode Format\n");
+ static const char * const pLabels[] = {
+ "inst", "Enabled ", "Done ", "adr_mode ", "width ", "mpc_width ",
+ "tmz", "xbar_sel_R", "xbar_sel_G", "xbar_sel_B", "adr_hi ",
+ "adr_low", "REFCYC", "Bias", "Scale", "Mode",
+ "Format", "prefetch"};
+
for (i = 0; i < pool->pipe_count; i++) {
struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
struct dcn_fl_regs_st *fl_regs = &s->fl_regs;
+ struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;
if (!s->blank_en) {
- DTN_INFO("[%2d]: %5xh %6xh %5d %6d %8xh %2xh %6xh %6d %8d %8d %7d %8xh %5x %5x %5x",
+ uint32_t values[] = {
pool->hubps[i]->inst,
fl_regs->lut_enable,
fl_regs->lut_done,
fl_regs->lut_addr_mode,
fl_regs->lut_width,
+ fl_regs->lut_mpc_width,
fl_regs->lut_tmz,
fl_regs->lut_crossbar_sel_r,
fl_regs->lut_crossbar_sel_g,
@@ -351,8 +357,13 @@ static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
fl_regs->lut_fl_bias,
fl_regs->lut_fl_scale,
fl_regs->lut_fl_mode,
- fl_regs->lut_fl_format);
- DTN_INFO("\n");
+ fl_regs->lut_fl_format,
+ dlg_regs->dst_y_prefetch};
+
+ int num_elements = 18;
+
+ for (int j = 0; j < num_elements; j++)
+ DTN_INFO("%s \t %8xh\n", pLabels[j], values[j]);
}
}
@@ -541,19 +552,43 @@ static void dcn10_log_color_state(struct dc *dc,
dc->caps.color.mpc.ogam_ram,
dc->caps.color.mpc.ocsc);
DTN_INFO("===== MPC RMCM 3DLUT =====\n");
- DTN_INFO("MPCC: SIZE MODE MODE_CUR RD_SEL 30BIT_EN WR_EN_MASK RAM_SEL OUT_NORM_FACTOR FL_SEL OUT_OFFSET OUT_SCALE FL_DONE SOFT_UNDERFLOW HARD_UNDERFLOW MEM_PWR_ST FORCE DIS MODE\n");
+ static const char * const pLabels[] = {
+ "MPCC", "SIZE", "MODE", "MODE_CUR", "RD_SEL",
+ "30BIT_EN", "WR_EN_MASK", "RAM_SEL", "OUT_NORM_FACTOR", "FL_SEL",
+ "OUT_OFFSET", "OUT_SCALE", "FL_DONE", "SOFT_UNDERFLOW", "HARD_UNDERFLOW",
+ "MEM_PWR_ST", "FORCE", "DIS", "MODE"};
+
for (i = 0; i < pool->mpcc_count; i++) {
struct mpcc_state s = {0};
pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
- if (s.opp_id != 0xf)
- DTN_INFO("[%2d]: %4xh %4xh %6xh %4x %4x %4x %4x %4x %4xh %4xh %6xh %4x %4x %4x %4x %4x %4x %4x\n",
- i, s.rmcm_regs.rmcm_3dlut_size, s.rmcm_regs.rmcm_3dlut_mode, s.rmcm_regs.rmcm_3dlut_mode_cur,
- s.rmcm_regs.rmcm_3dlut_read_sel, s.rmcm_regs.rmcm_3dlut_30bit_en, s.rmcm_regs.rmcm_3dlut_wr_en_mask,
- s.rmcm_regs.rmcm_3dlut_ram_sel, s.rmcm_regs.rmcm_3dlut_out_norm_factor, s.rmcm_regs.rmcm_3dlut_fl_sel,
- s.rmcm_regs.rmcm_3dlut_out_offset_r, s.rmcm_regs.rmcm_3dlut_out_scale_r, s.rmcm_regs.rmcm_3dlut_fl_done,
- s.rmcm_regs.rmcm_3dlut_fl_soft_underflow, s.rmcm_regs.rmcm_3dlut_fl_hard_underflow, s.rmcm_regs.rmcm_3dlut_mem_pwr_state,
- s.rmcm_regs.rmcm_3dlut_mem_pwr_force, s.rmcm_regs.rmcm_3dlut_mem_pwr_dis, s.rmcm_regs.rmcm_3dlut_mem_pwr_mode);
+ if (s.opp_id != 0xf) {
+ uint32_t values[] = {
+ i,
+ s.rmcm_regs.rmcm_3dlut_size,
+ s.rmcm_regs.rmcm_3dlut_mode,
+ s.rmcm_regs.rmcm_3dlut_mode_cur,
+ s.rmcm_regs.rmcm_3dlut_read_sel,
+ s.rmcm_regs.rmcm_3dlut_30bit_en,
+ s.rmcm_regs.rmcm_3dlut_wr_en_mask,
+ s.rmcm_regs.rmcm_3dlut_ram_sel,
+ s.rmcm_regs.rmcm_3dlut_out_norm_factor,
+ s.rmcm_regs.rmcm_3dlut_fl_sel,
+ s.rmcm_regs.rmcm_3dlut_out_offset_r,
+ s.rmcm_regs.rmcm_3dlut_out_scale_r,
+ s.rmcm_regs.rmcm_3dlut_fl_done,
+ s.rmcm_regs.rmcm_3dlut_fl_soft_underflow,
+ s.rmcm_regs.rmcm_3dlut_fl_hard_underflow,
+ s.rmcm_regs.rmcm_3dlut_mem_pwr_state,
+ s.rmcm_regs.rmcm_3dlut_mem_pwr_force,
+ s.rmcm_regs.rmcm_3dlut_mem_pwr_dis,
+ s.rmcm_regs.rmcm_3dlut_mem_pwr_mode};
+
+ int num_elements = 19;
+
+ for (int j = 0; j < num_elements; j++)
+ DTN_INFO("%s \t %8xh\n", pLabels[j], values[j]);
+ }
}
DTN_INFO("\n");
DTN_INFO("===== MPC RMCM Shaper =====\n");
@@ -3312,7 +3347,7 @@ void dcn10_prepare_bandwidth(
context,
false);
- dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub,
+ dc->optimized_required = hubbub->funcs->program_watermarks(hubbub,
&context->bw_ctx.bw.dcn.watermarks,
dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
true);
@@ -3628,6 +3663,8 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
int y_plane = pipe_ctx->plane_state->dst_rect.y;
int x_pos = pos_cpy.x;
int y_pos = pos_cpy.y;
+ int clip_x = pipe_ctx->plane_state->clip_rect.x;
+ int clip_width = pipe_ctx->plane_state->clip_rect.width;
if ((pipe_ctx->top_pipe != NULL) || (pipe_ctx->bottom_pipe != NULL)) {
if ((pipe_ctx->plane_state->src_rect.width != pipe_ctx->plane_res.scl_data.viewport.width) ||
@@ -3646,7 +3683,7 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
*/
/**
- * Translate cursor from stream space to plane space.
+ * Translate cursor and clip offset from stream space to plane space.
*
* If the cursor is scaled then we need to scale the position
* to be in the approximately correct place. We can't do anything
@@ -3663,6 +3700,10 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_state->dst_rect.width;
y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height /
pipe_ctx->plane_state->dst_rect.height;
+ clip_x = (clip_x - x_plane) * pipe_ctx->plane_state->src_rect.width /
+ pipe_ctx->plane_state->dst_rect.width;
+ clip_width = clip_width * pipe_ctx->plane_state->src_rect.width /
+ pipe_ctx->plane_state->dst_rect.width;
}
/**
@@ -3709,30 +3750,18 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
if (param.rotation == ROTATION_ANGLE_0) {
- int viewport_width =
- pipe_ctx->plane_res.scl_data.viewport.width;
- int viewport_x =
- pipe_ctx->plane_res.scl_data.viewport.x;
if (param.mirror) {
- if (pipe_split_on || odm_combine_on) {
- if (pos_cpy.x >= viewport_width + viewport_x) {
- pos_cpy.x = 2 * viewport_width
- - pos_cpy.x + 2 * viewport_x;
- } else {
- uint32_t temp_x = pos_cpy.x;
-
- pos_cpy.x = 2 * viewport_x - pos_cpy.x;
- if (temp_x >= viewport_x +
- (int)hubp->curs_attr.width || pos_cpy.x
- <= (int)hubp->curs_attr.width +
- pipe_ctx->plane_state->src_rect.x) {
- pos_cpy.x = 2 * viewport_width - temp_x;
- }
- }
- } else {
- pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
- }
+ /*
+ * The plane is split into multiple viewports.
+ * The combination of all viewports span the
+ * entirety of the clip rect.
+ *
+ * For no pipe_split, viewport_width is represents
+ * the full width of the clip_rect, so we can just
+ * mirror it.
+ */
+ pos_cpy.x = clip_width - pos_cpy.x + 2 * clip_x;
}
}
// Swap axis and mirror horizontally
@@ -3802,30 +3831,17 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
}
// Mirror horizontally and vertically
else if (param.rotation == ROTATION_ANGLE_180) {
- int viewport_width =
- pipe_ctx->plane_res.scl_data.viewport.width;
- int viewport_x =
- pipe_ctx->plane_res.scl_data.viewport.x;
-
if (!param.mirror) {
- if (pipe_split_on || odm_combine_on) {
- if (pos_cpy.x >= viewport_width + viewport_x) {
- pos_cpy.x = 2 * viewport_width
- - pos_cpy.x + 2 * viewport_x;
- } else {
- uint32_t temp_x = pos_cpy.x;
-
- pos_cpy.x = 2 * viewport_x - pos_cpy.x;
- if (temp_x >= viewport_x +
- (int)hubp->curs_attr.width || pos_cpy.x
- <= (int)hubp->curs_attr.width +
- pipe_ctx->plane_state->src_rect.x) {
- pos_cpy.x = temp_x + viewport_width;
- }
- }
- } else {
- pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
- }
+ /*
+ * The plane is split into multiple viewports.
+ * The combination of all viewports span the
+ * entirety of the clip rect.
+ *
+ * For no pipe_split, viewport_width is represents
+ * the full width of the clip_rect, so we can just
+ * mirror it.
+ */
+ pos_cpy.x = clip_width - pos_cpy.x + 2 * clip_x;
}
/**
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
index 3207addbd4eb..9477c9f9e196 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
@@ -54,7 +54,7 @@
#include "dpcd_defs.h"
#include "inc/link_enc_cfg.h"
#include "link_hwss.h"
-#include "link.h"
+#include "link_service.h"
#include "dc_state_priv.h"
#define DC_LOGGER \
@@ -955,7 +955,7 @@ enum dc_status dcn20_enable_stream_timing(
return DC_ERROR_UNEXPECTED;
}
- fsleep(stream->timing.v_total * (stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz));
+ udelay(stream->timing.v_total * (stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz));
params.vertical_total_min = stream->adjust.v_total_min;
params.vertical_total_max = stream->adjust.v_total_max;
@@ -1982,10 +1982,8 @@ static void dcn20_program_pipe(
* updating on slave planes
*/
if (pipe_ctx->update_flags.bits.enable ||
- pipe_ctx->update_flags.bits.plane_changed ||
- pipe_ctx->stream->update_flags.bits.out_tf ||
- (pipe_ctx->plane_state &&
- pipe_ctx->plane_state->update_flags.bits.output_tf_change))
+ pipe_ctx->update_flags.bits.plane_changed ||
+ pipe_ctx->stream->update_flags.bits.out_tf)
hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
/* If the pipe has been enabled or has a different opp, we
@@ -2390,10 +2388,10 @@ void dcn20_prepare_bandwidth(
}
/* program dchubbub watermarks:
- * For assigning wm_optimized_required, use |= operator since we don't want
+ * For assigning optimized_required, use |= operator since we don't want
* to clear the value if the optimize has not happened yet
*/
- dc->wm_optimized_required |= hubbub->funcs->program_watermarks(hubbub,
+ dc->optimized_required |= hubbub->funcs->program_watermarks(hubbub,
&context->bw_ctx.bw.dcn.watermarks,
dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
false);
@@ -2406,10 +2404,10 @@ void dcn20_prepare_bandwidth(
if (hubbub->funcs->program_compbuf_size) {
if (context->bw_ctx.dml.ip.min_comp_buffer_size_kbytes) {
compbuf_size_kb = context->bw_ctx.dml.ip.min_comp_buffer_size_kbytes;
- dc->wm_optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.dml.ip.min_comp_buffer_size_kbytes);
+ dc->optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.dml.ip.min_comp_buffer_size_kbytes);
} else {
compbuf_size_kb = context->bw_ctx.bw.dcn.compbuf_size_kb;
- dc->wm_optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.bw.dcn.compbuf_size_kb);
+ dc->optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.bw.dcn.compbuf_size_kb);
}
hubbub->funcs->program_compbuf_size(hubbub, compbuf_size_kb, false);
@@ -3054,6 +3052,8 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
link_enc->transmitter - TRANSMITTER_UNIPHY_A);
}
+ link_hwss->setup_stream_attribute(pipe_ctx);
+
if (dc->res_pool->dccg->funcs->set_pixel_rate_div)
dc->res_pool->dccg->funcs->set_pixel_rate_div(
dc->res_pool->dccg,
@@ -3129,7 +3129,8 @@ void dcn20_fpga_init_hw(struct dc *dc)
res_pool->dccg->funcs->dccg_init(res_pool->dccg);
//Enable ability to power gate / don't force power on permanently
- hws->funcs.enable_power_gating_plane(hws, true);
+ if (hws->funcs.enable_power_gating_plane)
+ hws->funcs.enable_power_gating_plane(hws, true);
// Specific to FPGA dccg and registers
REG_WRITE(RBBMIF_TIMEOUT_DIS, 0xFFFFFFFF);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c
index 61efb15572ff..e2269211553c 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c
@@ -35,7 +35,7 @@
#include "hw/clk_mgr.h"
#include "dc_dmub_srv.h"
#include "abm.h"
-#include "link.h"
+#include "link_service.h"
#define DC_LOGGER_INIT(logger)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
index 37a239219dfe..e47ed5571dfd 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
@@ -50,7 +50,7 @@
#include "dpcd_defs.h"
#include "dcn20/dcn20_hwseq.h"
#include "dcn30/dcn30_resource.h"
-#include "link.h"
+#include "link_service.h"
#include "dc_state_priv.h"
@@ -1228,3 +1228,51 @@ void dcn30_wait_for_all_pending_updates(const struct pipe_ctx *pipe_ctx)
}
}
}
+
+void dcn30_get_underflow_debug_data(const struct dc *dc,
+ struct timing_generator *tg,
+ struct dc_underflow_debug_data *out_data)
+{
+ struct hubbub *hubbub = dc->res_pool->hubbub;
+
+ if (tg) {
+ uint32_t v_blank_start = 0, v_blank_end = 0;
+
+ out_data->otg_inst = tg->inst;
+
+ tg->funcs->get_scanoutpos(tg,
+ &v_blank_start,
+ &v_blank_end,
+ &out_data->h_position,
+ &out_data->v_position);
+
+ out_data->otg_frame_count = tg->funcs->get_frame_count(tg);
+
+ out_data->otg_underflow = tg->funcs->is_optc_underflow_occurred(tg);
+ }
+
+ for (int i = 0; i < MAX_PIPES; i++) {
+ struct hubp *hubp = dc->res_pool->hubps[i];
+
+ if (hubp) {
+ if (hubp->funcs->hubp_get_underflow_status)
+ out_data->hubps[i].hubp_underflow = hubp->funcs->hubp_get_underflow_status(hubp);
+
+ if (hubp->funcs->hubp_in_blank)
+ out_data->hubps[i].hubp_in_blank = hubp->funcs->hubp_in_blank(hubp);
+
+ if (hubp->funcs->hubp_get_current_read_line)
+ out_data->hubps[i].hubp_readline = hubp->funcs->hubp_get_current_read_line(hubp);
+
+ if (hubp->funcs->hubp_get_det_config_error)
+ out_data->hubps[i].det_config_error = hubp->funcs->hubp_get_det_config_error(hubp);
+ }
+ }
+
+ if (hubbub->funcs->get_det_sizes)
+ hubbub->funcs->get_det_sizes(hubbub, out_data->curr_det_sizes, out_data->target_det_sizes);
+
+ if (hubbub->funcs->compbuf_config_error)
+ out_data->compbuf_config_error = hubbub->funcs->compbuf_config_error(hubbub);
+
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h
index 4b90b781c4f2..40afbbfb5b9c 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h
@@ -29,6 +29,7 @@
#include "hw_sequencer_private.h"
struct dc;
+struct dc_underflow_debug_data;
void dcn30_init_hw(struct dc *dc);
void dcn30_program_all_writeback_pipes_in_tree(
@@ -98,4 +99,8 @@ void dcn30_prepare_bandwidth(struct dc *dc,
void dcn30_wait_for_all_pending_updates(const struct pipe_ctx *pipe_ctx);
+void dcn30_get_underflow_debug_data(const struct dc *dc,
+ struct timing_generator *tg,
+ struct dc_underflow_debug_data *out_data);
+
#endif /* __DC_HWSS_DCN30_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c
index 2ac5d54d1626..d7ff55669bac 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c
@@ -110,6 +110,7 @@ static const struct hw_sequencer_funcs dcn30_funcs = {
.update_visual_confirm_color = dcn10_update_visual_confirm_color,
.is_abm_supported = dcn21_is_abm_supported,
.wait_for_all_pending_updates = dcn30_wait_for_all_pending_updates,
+ .get_underflow_debug_data = dcn30_get_underflow_debug_data,
};
static const struct hwseq_private_funcs dcn30_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
index 8ba934b83957..b822f2dffff0 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
@@ -45,7 +45,7 @@
#include "link_hwss.h"
#include "dpcd_defs.h"
#include "dce/dmub_outbox.h"
-#include "link.h"
+#include "link_service.h"
#include "dcn10/dcn10_hwseq.h"
#include "dcn21/dcn21_hwseq.h"
#include "inc/link_enc_cfg.h"
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c
index 556f4fe57eda..5a6a459da224 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c
@@ -112,6 +112,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
.exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
.update_visual_confirm_color = dcn10_update_visual_confirm_color,
.setup_hpo_hw_control = dcn31_setup_hpo_hw_control,
+ .get_underflow_debug_data = dcn30_get_underflow_debug_data,
};
static const struct hwseq_private_funcs dcn31_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
index e68f21fd5f0f..f925f669f2a4 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
@@ -46,7 +46,7 @@
#include "link_hwss.h"
#include "dpcd_defs.h"
#include "dce/dmub_outbox.h"
-#include "link.h"
+#include "link_service.h"
#include "dcn10/dcn10_hwseq.h"
#include "inc/link_enc_cfg.h"
#include "dcn30/dcn30_vpg.h"
@@ -528,3 +528,75 @@ void dcn314_disable_link_output(struct dc_link *link,
apply_symclk_on_tx_off_wa(link);
}
+
+/**
+ * dcn314_dpp_pg_control - DPP power gate control.
+ *
+ * @hws: dce_hwseq reference.
+ * @dpp_inst: DPP instance reference.
+ * @power_on: true if we want to enable power gate, false otherwise.
+ *
+ * Enable or disable power gate in the specific DPP instance.
+ * If power gating is disabled, will force disable cursor in the DPP instance.
+ */
+void dcn314_dpp_pg_control(
+ struct dce_hwseq *hws,
+ unsigned int dpp_inst,
+ bool power_on)
+{
+ uint32_t power_gate = power_on ? 0 : 1;
+ uint32_t pwr_status = power_on ? 0 : 2;
+
+
+ if (hws->ctx->dc->debug.disable_dpp_power_gate) {
+ /* Workaround for DCN314 with disabled power gating */
+ if (!power_on) {
+
+ /* Force disable cursor if power gating is disabled */
+ struct dpp *dpp = hws->ctx->dc->res_pool->dpps[dpp_inst];
+ if (dpp && dpp->funcs->dpp_force_disable_cursor)
+ dpp->funcs->dpp_force_disable_cursor(dpp);
+ }
+ return;
+ }
+ if (REG(DOMAIN1_PG_CONFIG) == 0)
+ return;
+
+ switch (dpp_inst) {
+ case 0: /* DPP0 */
+ REG_UPDATE(DOMAIN1_PG_CONFIG,
+ DOMAIN1_POWER_GATE, power_gate);
+
+ REG_WAIT(DOMAIN1_PG_STATUS,
+ DOMAIN1_PGFSM_PWR_STATUS, pwr_status,
+ 1, 1000);
+ break;
+ case 1: /* DPP1 */
+ REG_UPDATE(DOMAIN3_PG_CONFIG,
+ DOMAIN3_POWER_GATE, power_gate);
+
+ REG_WAIT(DOMAIN3_PG_STATUS,
+ DOMAIN3_PGFSM_PWR_STATUS, pwr_status,
+ 1, 1000);
+ break;
+ case 2: /* DPP2 */
+ REG_UPDATE(DOMAIN5_PG_CONFIG,
+ DOMAIN5_POWER_GATE, power_gate);
+
+ REG_WAIT(DOMAIN5_PG_STATUS,
+ DOMAIN5_PGFSM_PWR_STATUS, pwr_status,
+ 1, 1000);
+ break;
+ case 3: /* DPP3 */
+ REG_UPDATE(DOMAIN7_PG_CONFIG,
+ DOMAIN7_POWER_GATE, power_gate);
+
+ REG_WAIT(DOMAIN7_PG_STATUS,
+ DOMAIN7_PGFSM_PWR_STATUS, pwr_status,
+ 1, 1000);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.h
index 2305ad282f21..6c072d0274ea 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.h
@@ -47,4 +47,6 @@ void dcn314_dpp_root_clock_control(struct dce_hwseq *hws, unsigned int dpp_inst,
void dcn314_disable_link_output(struct dc_link *link, const struct link_resource *link_res, enum signal_type signal);
+void dcn314_dpp_pg_control(struct dce_hwseq *hws, unsigned int dpp_inst, bool power_on);
+
#endif /* __DC_HWSS_DCN314_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c
index f5112742edf9..79faab1125d4 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c
@@ -115,6 +115,7 @@ static const struct hw_sequencer_funcs dcn314_funcs = {
.update_visual_confirm_color = dcn10_update_visual_confirm_color,
.calculate_pix_rate_divider = dcn314_calculate_pix_rate_divider,
.setup_hpo_hw_control = dcn31_setup_hpo_hw_control,
+ .get_underflow_debug_data = dcn30_get_underflow_debug_data,
};
static const struct hwseq_private_funcs dcn314_private_funcs = {
@@ -141,6 +142,7 @@ static const struct hwseq_private_funcs dcn314_private_funcs = {
.enable_power_gating_plane = dcn314_enable_power_gating_plane,
.dpp_root_clock_control = dcn314_dpp_root_clock_control,
.hubp_pg_control = dcn31_hubp_pg_control,
+ .dpp_pg_control = dcn314_dpp_pg_control,
.program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree,
.update_odm = dcn314_update_odm,
.dsc_pg_control = dcn314_dsc_pg_control,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
index 416b1dca3dac..f39292952702 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
@@ -49,7 +49,7 @@
#include "dcn20/dcn20_optc.h"
#include "dce/dmub_hw_lock_mgr.h"
#include "dcn32/dcn32_resource.h"
-#include "link.h"
+#include "link_service.h"
#include "../dcn20/dcn20_hwseq.h"
#include "dc_state_priv.h"
@@ -1052,7 +1052,7 @@ void dcn32_update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
}
/* Enable DSC hw block */
- dsc_cfg.pic_width = (stream->timing.h_addressable + pipe_ctx->hblank_borrow +
+ dsc_cfg.pic_width = (stream->timing.h_addressable + pipe_ctx->dsc_padding_params.dsc_hactive_padding +
stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt;
dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom;
dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
index b971356d30b1..c19ef075c882 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
@@ -121,6 +121,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
.calculate_pix_rate_divider = dcn32_calculate_pix_rate_divider,
.program_outstanding_updates = dcn32_program_outstanding_updates,
.wait_for_all_pending_updates = dcn30_wait_for_all_pending_updates,
+ .get_underflow_debug_data = dcn30_get_underflow_debug_data,
};
static const struct hwseq_private_funcs dcn32_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
index a267f574b619..05011061822c 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
@@ -46,7 +46,7 @@
#include "link_hwss.h"
#include "dpcd_defs.h"
#include "dce/dmub_outbox.h"
-#include "link.h"
+#include "link_service.h"
#include "dcn10/dcn10_hwseq.h"
#include "inc/link_enc_cfg.h"
#include "dcn30/dcn30_vpg.h"
@@ -113,6 +113,14 @@ static void enable_memory_low_power(struct dc *dc)
}
#endif
+static void print_pg_status(struct dc *dc, const char *debug_func, const char *debug_log)
+{
+ if (dc->debug.enable_pg_cntl_debug_logs && dc->res_pool->pg_cntl) {
+ if (dc->res_pool->pg_cntl->funcs->print_pg_status)
+ dc->res_pool->pg_cntl->funcs->print_pg_status(dc->res_pool->pg_cntl, debug_func, debug_log);
+ }
+}
+
void dcn35_set_dmu_fgcg(struct dce_hwseq *hws, bool enable)
{
REG_UPDATE_3(DMU_CLK_CNTL,
@@ -137,6 +145,8 @@ void dcn35_init_hw(struct dc *dc)
uint32_t user_level = MAX_BACKLIGHT_LEVEL;
int i;
+ print_pg_status(dc, __func__, ": start");
+
if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
@@ -200,10 +210,7 @@ void dcn35_init_hw(struct dc *dc)
/* we want to turn off all dp displays before doing detection */
dc->link_srv->blank_all_dp_displays(dc);
-/*
- if (hws->funcs.enable_power_gating_plane)
- hws->funcs.enable_power_gating_plane(dc->hwseq, true);
-*/
+
if (res_pool->hubbub && res_pool->hubbub->funcs->dchubbub_init)
res_pool->hubbub->funcs->dchubbub_init(dc->res_pool->hubbub);
/* If taking control over from VBIOS, we may want to optimize our first
@@ -236,6 +243,8 @@ void dcn35_init_hw(struct dc *dc)
}
hws->funcs.init_pipes(dc, dc->current_state);
+ print_pg_status(dc, __func__, ": after init_pipes");
+
if (dc->res_pool->hubbub->funcs->allow_self_refresh_control &&
!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter)
dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
@@ -312,6 +321,7 @@ void dcn35_init_hw(struct dc *dc)
if (dc->res_pool->pg_cntl->funcs->init_pg_status)
dc->res_pool->pg_cntl->funcs->init_pg_status(dc->res_pool->pg_cntl);
}
+ print_pg_status(dc, __func__, ": after init_pg_status");
}
static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
@@ -500,97 +510,6 @@ void dcn35_physymclk_root_clock_control(struct dce_hwseq *hws, unsigned int phy_
}
}
-void dcn35_dsc_pg_control(
- struct dce_hwseq *hws,
- unsigned int dsc_inst,
- bool power_on)
-{
- uint32_t power_gate = power_on ? 0 : 1;
- uint32_t pwr_status = power_on ? 0 : 2;
- uint32_t org_ip_request_cntl = 0;
-
- if (hws->ctx->dc->debug.disable_dsc_power_gate)
- return;
- if (hws->ctx->dc->debug.ignore_pg)
- return;
- REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
- if (org_ip_request_cntl == 0)
- REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
-
- switch (dsc_inst) {
- case 0: /* DSC0 */
- REG_UPDATE(DOMAIN16_PG_CONFIG,
- DOMAIN_POWER_GATE, power_gate);
-
- REG_WAIT(DOMAIN16_PG_STATUS,
- DOMAIN_PGFSM_PWR_STATUS, pwr_status,
- 1, 1000);
- break;
- case 1: /* DSC1 */
- REG_UPDATE(DOMAIN17_PG_CONFIG,
- DOMAIN_POWER_GATE, power_gate);
-
- REG_WAIT(DOMAIN17_PG_STATUS,
- DOMAIN_PGFSM_PWR_STATUS, pwr_status,
- 1, 1000);
- break;
- case 2: /* DSC2 */
- REG_UPDATE(DOMAIN18_PG_CONFIG,
- DOMAIN_POWER_GATE, power_gate);
-
- REG_WAIT(DOMAIN18_PG_STATUS,
- DOMAIN_PGFSM_PWR_STATUS, pwr_status,
- 1, 1000);
- break;
- case 3: /* DSC3 */
- REG_UPDATE(DOMAIN19_PG_CONFIG,
- DOMAIN_POWER_GATE, power_gate);
-
- REG_WAIT(DOMAIN19_PG_STATUS,
- DOMAIN_PGFSM_PWR_STATUS, pwr_status,
- 1, 1000);
- break;
- default:
- BREAK_TO_DEBUGGER();
- break;
- }
-
- if (org_ip_request_cntl == 0)
- REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0);
-}
-
-void dcn35_enable_power_gating_plane(struct dce_hwseq *hws, bool enable)
-{
- bool force_on = true; /* disable power gating */
- uint32_t org_ip_request_cntl = 0;
-
- if (hws->ctx->dc->debug.disable_hubp_power_gate)
- return;
- if (hws->ctx->dc->debug.ignore_pg)
- return;
- REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
- if (org_ip_request_cntl == 0)
- REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
- /* DCHUBP0/1/2/3/4/5 */
- REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
- REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
- /* DPP0/1/2/3/4/5 */
- REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
- REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
-
- force_on = true; /* disable power gating */
- if (enable && !hws->ctx->dc->debug.disable_dsc_power_gate)
- force_on = false;
-
- /* DCS0/1/2/3/4 */
- REG_UPDATE(DOMAIN16_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
- REG_UPDATE(DOMAIN17_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
- REG_UPDATE(DOMAIN18_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
- REG_UPDATE(DOMAIN19_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
-
-
-}
-
/* In headless boot cases, DIG may be turned
* on which causes HW/SW discrepancies.
* To avoid this, power down hardware on boot
@@ -1453,6 +1372,8 @@ void dcn35_prepare_bandwidth(
}
dcn20_prepare_bandwidth(dc, context);
+
+ print_pg_status(dc, __func__, ": after rcg and power up");
}
void dcn35_optimize_bandwidth(
@@ -1461,6 +1382,8 @@ void dcn35_optimize_bandwidth(
{
struct pg_block_update pg_update_state;
+ print_pg_status(dc, __func__, ": before rcg and power up");
+
dcn20_optimize_bandwidth(dc, context);
if (dc->hwss.calc_blocks_to_gate) {
@@ -1472,6 +1395,8 @@ void dcn35_optimize_bandwidth(
if (dc->hwss.root_clock_control)
dc->hwss.root_clock_control(dc, &pg_update_state, false);
}
+
+ print_pg_status(dc, __func__, ": after rcg and power up");
}
void dcn35_set_drr(struct pipe_ctx **pipe_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
index a3ccf805bd16..f2f16a0bdb4f 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
@@ -115,7 +115,6 @@ static const struct hw_sequencer_funcs dcn35_funcs = {
.exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
.update_visual_confirm_color = dcn10_update_visual_confirm_color,
.apply_idle_power_optimizations = dcn35_apply_idle_power_optimizations,
- .update_dsc_pg = dcn32_update_dsc_pg,
.calc_blocks_to_gate = dcn35_calc_blocks_to_gate,
.calc_blocks_to_ungate = dcn35_calc_blocks_to_ungate,
.hw_block_power_up = dcn35_hw_block_power_up,
@@ -128,6 +127,7 @@ static const struct hw_sequencer_funcs dcn35_funcs = {
.enable_plane = dcn20_enable_plane,
.update_dchubp_dpp = dcn20_update_dchubp_dpp,
.post_unlock_reset_opp = dcn20_post_unlock_reset_opp,
+ .get_underflow_debug_data = dcn30_get_underflow_debug_data,
};
static const struct hwseq_private_funcs dcn35_private_funcs = {
@@ -150,7 +150,6 @@ static const struct hwseq_private_funcs dcn35_private_funcs = {
.plane_atomic_disable = dcn35_plane_atomic_disable,
//.plane_atomic_disable = dcn20_plane_atomic_disable,/*todo*/
//.hubp_pg_control = dcn35_hubp_pg_control,
- .enable_power_gating_plane = dcn35_enable_power_gating_plane,
.dpp_root_clock_control = dcn35_dpp_root_clock_control,
.dpstream_root_clock_control = dcn35_dpstream_root_clock_control,
.physymclk_root_clock_control = dcn35_physymclk_root_clock_control,
@@ -165,7 +164,6 @@ static const struct hwseq_private_funcs dcn35_private_funcs = {
.calculate_dccg_k1_k2_values = dcn32_calculate_dccg_k1_k2_values,
.resync_fifo_dccg_dio = dcn314_resync_fifo_dccg_dio,
.is_dp_dig_pixel_rate_div_policy = dcn35_is_dp_dig_pixel_rate_div_policy,
- .dsc_pg_control = dcn35_dsc_pg_control,
.dsc_pg_status = dcn32_dsc_pg_status,
.enable_plane = dcn35_enable_plane,
.wait_for_pipe_update_if_needed = dcn10_wait_for_pipe_update_if_needed,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
index 58f2be2a326b..09e60158f0b5 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
@@ -114,7 +114,6 @@ static const struct hw_sequencer_funcs dcn351_funcs = {
.exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
.update_visual_confirm_color = dcn10_update_visual_confirm_color,
.apply_idle_power_optimizations = dcn35_apply_idle_power_optimizations,
- .update_dsc_pg = dcn32_update_dsc_pg,
.calc_blocks_to_gate = dcn351_calc_blocks_to_gate,
.calc_blocks_to_ungate = dcn351_calc_blocks_to_ungate,
.hw_block_power_up = dcn351_hw_block_power_up,
@@ -123,6 +122,7 @@ static const struct hw_sequencer_funcs dcn351_funcs = {
.set_long_vtotal = dcn35_set_long_vblank,
.calculate_pix_rate_divider = dcn32_calculate_pix_rate_divider,
.setup_hpo_hw_control = dcn35_setup_hpo_hw_control,
+ .get_underflow_debug_data = dcn30_get_underflow_debug_data,
};
static const struct hwseq_private_funcs dcn351_private_funcs = {
@@ -145,7 +145,6 @@ static const struct hwseq_private_funcs dcn351_private_funcs = {
.plane_atomic_disable = dcn35_plane_atomic_disable,
//.plane_atomic_disable = dcn20_plane_atomic_disable,/*todo*/
//.hubp_pg_control = dcn35_hubp_pg_control,
- .enable_power_gating_plane = dcn35_enable_power_gating_plane,
.dpp_root_clock_control = dcn35_dpp_root_clock_control,
.dpstream_root_clock_control = dcn35_dpstream_root_clock_control,
.physymclk_root_clock_control = dcn35_physymclk_root_clock_control,
@@ -159,7 +158,6 @@ static const struct hwseq_private_funcs dcn351_private_funcs = {
.setup_hpo_hw_control = dcn35_setup_hpo_hw_control,
.calculate_dccg_k1_k2_values = dcn32_calculate_dccg_k1_k2_values,
.is_dp_dig_pixel_rate_div_policy = dcn35_is_dp_dig_pixel_rate_div_policy,
- .dsc_pg_control = dcn35_dsc_pg_control,
.dsc_pg_status = dcn32_dsc_pg_status,
.enable_plane = dcn35_enable_plane,
.wait_for_pipe_update_if_needed = dcn10_wait_for_pipe_update_if_needed,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
index cc9f40d97af2..7c276c319086 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
@@ -25,7 +25,7 @@
#include "dpcd_defs.h"
#include "clk_mgr.h"
#include "dsc.h"
-#include "link.h"
+#include "link_service.h"
#include "dce/dmub_hw_lock_mgr.h"
#include "dcn10/dcn10_cm_common.h"
@@ -810,9 +810,12 @@ enum dc_status dcn401_enable_stream_timing(
if (dc->hwseq->funcs.PLAT_58856_wa && (!dc_is_dp_signal(stream->signal)))
dc->hwseq->funcs.PLAT_58856_wa(context, pipe_ctx);
- /* if we are borrowing from hblank, h_addressable needs to be adjusted */
- if (dc->debug.enable_hblank_borrow)
- patched_crtc_timing.h_addressable = patched_crtc_timing.h_addressable + pipe_ctx->hblank_borrow;
+ /* if we are padding, h_addressable needs to be adjusted */
+ if (dc->debug.enable_hblank_borrow) {
+ patched_crtc_timing.h_addressable = patched_crtc_timing.h_addressable + pipe_ctx->dsc_padding_params.dsc_hactive_padding;
+ patched_crtc_timing.h_total = patched_crtc_timing.h_total + pipe_ctx->dsc_padding_params.dsc_htotal_padding;
+ patched_crtc_timing.pix_clk_100hz = pipe_ctx->dsc_padding_params.dsc_pix_clk_100hz;
+ }
pipe_ctx->stream_res.tg->funcs->program_timing(
pipe_ctx->stream_res.tg,
@@ -965,6 +968,8 @@ void dcn401_enable_stream(struct pipe_ctx *pipe_ctx)
}
}
+ link_hwss->setup_stream_attribute(pipe_ctx);
+
if (dc->res_pool->dccg->funcs->set_pixel_rate_div) {
dc->res_pool->dccg->funcs->set_pixel_rate_div(
dc->res_pool->dccg,
@@ -1378,22 +1383,22 @@ void dcn401_prepare_bandwidth(struct dc *dc,
false);
/* program dchubbub watermarks:
- * For assigning wm_optimized_required, use |= operator since we don't want
+ * For assigning optimized_required, use |= operator since we don't want
* to clear the value if the optimize has not happened yet
*/
- dc->wm_optimized_required |= hubbub->funcs->program_watermarks(hubbub,
+ dc->optimized_required |= hubbub->funcs->program_watermarks(hubbub,
&context->bw_ctx.bw.dcn.watermarks,
dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
false);
/* update timeout thresholds */
if (hubbub->funcs->program_arbiter) {
- dc->wm_optimized_required |= hubbub->funcs->program_arbiter(hubbub, &context->bw_ctx.bw.dcn.arb_regs, false);
+ dc->optimized_required |= hubbub->funcs->program_arbiter(hubbub, &context->bw_ctx.bw.dcn.arb_regs, false);
}
/* decrease compbuf size */
if (hubbub->funcs->program_compbuf_segments) {
compbuf_size = context->bw_ctx.bw.dcn.arb_regs.compbuf_size;
- dc->wm_optimized_required |= (compbuf_size != dc->current_state->bw_ctx.bw.dcn.arb_regs.compbuf_size);
+ dc->optimized_required |= (compbuf_size != dc->current_state->bw_ctx.bw.dcn.arb_regs.compbuf_size);
hubbub->funcs->program_compbuf_segments(hubbub, compbuf_size, false);
}
@@ -1619,20 +1624,28 @@ void dcn401_unblank_stream(struct pipe_ctx *pipe_ctx,
void dcn401_hardware_release(struct dc *dc)
{
- dc_dmub_srv_fams2_update_config(dc, dc->current_state, false);
-
- /* If pstate unsupported, or still supported
- * by firmware, force it supported by dcn
- */
- if (dc->current_state) {
- if ((!dc->clk_mgr->clks.p_state_change_support ||
- dc->current_state->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable) &&
- dc->res_pool->hubbub->funcs->force_pstate_change_control)
- dc->res_pool->hubbub->funcs->force_pstate_change_control(
- dc->res_pool->hubbub, true, true);
-
- dc->current_state->bw_ctx.bw.dcn.clk.p_state_change_support = true;
- dc->clk_mgr->funcs->update_clocks(dc->clk_mgr, dc->current_state, true);
+ if (!dc->debug.disable_force_pstate_allow_on_hw_release) {
+ dc_dmub_srv_fams2_update_config(dc, dc->current_state, false);
+
+ /* If pstate unsupported, or still supported
+ * by firmware, force it supported by dcn
+ */
+ if (dc->current_state) {
+ if ((!dc->clk_mgr->clks.p_state_change_support ||
+ dc->current_state->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable) &&
+ dc->res_pool->hubbub->funcs->force_pstate_change_control)
+ dc->res_pool->hubbub->funcs->force_pstate_change_control(
+ dc->res_pool->hubbub, true, true);
+
+ dc->current_state->bw_ctx.bw.dcn.clk.p_state_change_support = true;
+ dc->clk_mgr->funcs->update_clocks(dc->clk_mgr, dc->current_state, true);
+ }
+ } else {
+ if (dc->current_state) {
+ dc->clk_mgr->clks.p_state_change_support = false;
+ dc->clk_mgr->funcs->update_clocks(dc->clk_mgr, dc->current_state, true);
+ }
+ dc_dmub_srv_fams2_update_config(dc, dc->current_state, false);
}
}
@@ -2019,10 +2032,8 @@ void dcn401_program_pipe(
* updating on slave planes
*/
if (pipe_ctx->update_flags.bits.enable ||
- pipe_ctx->update_flags.bits.plane_changed ||
- pipe_ctx->stream->update_flags.bits.out_tf ||
- (pipe_ctx->plane_state &&
- pipe_ctx->plane_state->update_flags.bits.output_tf_change))
+ pipe_ctx->update_flags.bits.plane_changed ||
+ pipe_ctx->stream->update_flags.bits.out_tf)
hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
/* If the pipe has been enabled or has a different opp, we
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
index fe7aceb2f510..d6e11b7e4fce 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
@@ -104,6 +104,7 @@ static const struct hw_sequencer_funcs dcn401_funcs = {
.enable_plane = dcn20_enable_plane,
.update_dchubp_dpp = dcn20_update_dchubp_dpp,
.post_unlock_reset_opp = dcn20_post_unlock_reset_opp,
+ .get_underflow_debug_data = dcn30_get_underflow_debug_data,
};
static const struct hwseq_private_funcs dcn401_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
index 9df8030e37f7..1723bbcf2c46 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
@@ -47,6 +47,7 @@ struct link_resource;
struct dc_dmub_cmd;
struct pg_block_update;
struct drr_params;
+struct dc_underflow_debug_data;
struct subvp_pipe_control_lock_fast_params {
struct dc *dc;
@@ -475,6 +476,9 @@ struct hw_sequencer_funcs {
struct dc_state *context);
void (*post_unlock_reset_opp)(struct dc *dc,
struct pipe_ctx *opp_head);
+ void (*get_underflow_debug_data)(const struct dc *dc,
+ struct timing_generator *tg,
+ struct dc_underflow_debug_data *out_data);
};
void color_space_to_black_color(
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index f0d7185153b2..d11893f8c916 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -433,7 +433,14 @@ enum p_state_switch_method {
P_STATE_V_ACTIVE,
P_STATE_SUB_VP,
P_STATE_DRR_SUB_VP,
- P_STATE_V_BLANK_SUB_VP
+ P_STATE_V_BLANK_SUB_VP,
+};
+
+struct dsc_padding_params {
+ /* pixels borrowed from hblank to hactive */
+ uint8_t dsc_hactive_padding;
+ uint32_t dsc_htotal_padding;
+ uint32_t dsc_pix_clk_100hz;
};
struct pipe_ctx {
@@ -493,8 +500,7 @@ struct pipe_ctx {
/* subvp_index: only valid if the pipe is a SUBVP_MAIN*/
uint8_t subvp_index;
struct pixel_rate_divider pixel_rate_divider;
- /* pixels borrowed from hblank to hactive */
- uint8_t hblank_borrow;
+ struct dsc_padding_params dsc_padding_params;
/* next vupdate */
uint32_t next_vupdate;
uint32_t wait_frame_count;
@@ -683,6 +689,7 @@ struct replay_context {
/* Controller Id used for Dig Fe source select */
enum controller_id controllerId;
unsigned int line_time_in_ns;
+ bool os_request_force_ffu;
};
enum dc_replay_enable {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
index 52b745667ef7..843a18287c83 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
@@ -137,6 +137,19 @@ struct dcn_hubbub_state {
uint32_t dram_state_cntl;
};
+struct hubbub_system_latencies {
+ uint32_t max_latency_ns;
+ uint32_t avg_latency_ns;
+ uint32_t min_latency_ns;
+};
+
+struct hubbub_urgent_latency_params {
+ uint32_t refclk_mhz;
+ uint32_t t_win_ns;
+ uint32_t bandwidth_mbps;
+ uint32_t bw_factor_x1000;
+};
+
struct hubbub_funcs {
void (*update_dchub)(
struct hubbub *hubbub,
@@ -229,6 +242,17 @@ struct hubbub_funcs {
void (*program_compbuf_segments)(struct hubbub *hubbub, unsigned compbuf_size_seg, bool safe_to_increase);
void (*wait_for_det_update)(struct hubbub *hubbub, int hubp_inst);
bool (*program_arbiter)(struct hubbub *hubbub, struct dml2_display_arb_regs *arb_regs, bool safe_to_lower);
+ void (*get_det_sizes)(struct hubbub *hubbub, uint32_t *curr_det_sizes, uint32_t *target_det_sizes);
+ uint32_t (*compbuf_config_error)(struct hubbub *hubbub);
+ struct hubbub_perfmon_funcs{
+ void (*start_system_latency_measurement)(struct hubbub *hubbub);
+ void (*get_system_latency_result)(struct hubbub *hubbub, uint32_t refclk_mhz, struct hubbub_system_latencies *latencies);
+ void (*start_in_order_bandwidth_measurement)(struct hubbub *hubbub);
+ void (*get_in_order_bandwidth_result)(struct hubbub *hubbub, uint32_t refclk_mhz, uint32_t *bandwidth_mbps);
+ void (*start_urgent_ramp_latency_measurement)(struct hubbub *hubbub, const struct hubbub_urgent_latency_params *params);
+ void (*get_urgent_ramp_latency_result)(struct hubbub *hubbub, uint32_t refclk_mhz, uint32_t *latency_ns);
+ void (*reset)(struct hubbub *hubbub);
+ } perfmon;
};
struct hubbub {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
index 0c5675d1c593..1b7c085dc2cc 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
@@ -349,6 +349,9 @@ struct dpp_funcs {
struct dpp *dpp_base,
enum dc_color_space color_space,
struct dc_csc_transform cursor_csc_color_matrix);
+
+ void (*dpp_force_disable_cursor)(struct dpp *dpp_base);
+
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index cee29e89ec5c..2b874d2cc61c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -89,7 +89,7 @@ enum hubp_3dlut_fl_addressing_mode {
enum hubp_3dlut_fl_width {
hubp_3dlut_fl_width_17 = 17,
hubp_3dlut_fl_width_33 = 33,
- hubp_3dlut_fl_width_transformed = 4916
+ hubp_3dlut_fl_width_transformed = 4916, //mpc default
};
enum hubp_3dlut_fl_crossbar_bit_slice {
@@ -99,6 +99,22 @@ enum hubp_3dlut_fl_crossbar_bit_slice {
hubp_3dlut_fl_crossbar_bit_slice_48_63 = 3
};
+struct hubp_fl_3dlut_config {
+ bool enabled;
+ enum hubp_3dlut_fl_width width;
+ enum hubp_3dlut_fl_mode mode;
+ enum hubp_3dlut_fl_format format;
+ uint16_t bias;
+ uint16_t scale;
+ struct dc_plane_address address;
+ enum hubp_3dlut_fl_addressing_mode addr_mode;
+ enum dc_cm2_gpu_mem_layout layout;
+ uint8_t protection_bits;
+ enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_y_g;
+ enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cb_b;
+ enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cr_r;
+};
+
struct hubp {
const struct hubp_funcs *funcs;
struct dc_context *ctx;
@@ -288,7 +304,10 @@ struct hubp_funcs {
enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cb_b,
enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cr_r);
int (*hubp_get_3dlut_fl_done)(struct hubp *hubp);
+ void (*hubp_program_3dlut_fl_config)(struct hubp *hubp, struct hubp_fl_3dlut_config *cfg);
void (*hubp_clear_tiling)(struct hubp *hubp);
+ uint32_t (*hubp_get_current_read_line)(struct hubp *hubp);
+ uint32_t (*hubp_get_det_config_error)(struct hubp *hubp);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
index 7641439f6ca0..22960ee03dee 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
@@ -115,6 +115,16 @@ enum MCM_LUT_ID {
MCM_LUT_SHAPER
};
+struct mpc_fl_3dlut_config {
+ bool enabled;
+ uint16_t width;
+ bool select_lut_bank_a;
+ uint16_t bit_depth;
+ int hubp_index;
+ uint16_t bias;
+ uint16_t scale;
+};
+
union mcm_lut_params {
const struct pwl_params *pwl;
const struct tetrahedral_params *lut3d;
@@ -1059,21 +1069,6 @@ struct mpc_funcs {
*/
void (*program_lut_mode)(struct mpc *mpc, const enum MCM_LUT_ID id, const enum MCM_LUT_XABLE xable,
bool lut_bank_a, int mpcc_id);
- /**
- * @program_3dlut_size:
- *
- * Program 3D LUT size.
- *
- * Parameters:
- * - [in/out] mpc - MPC context.
- * - [in] is_17x17x17 - is 3dlut 17x17x17
- * - [in] mpcc_id
- *
- * Return:
- *
- * void
- */
- void (*program_3dlut_size)(struct mpc *mpc, bool is_17x17x17, int mpcc_id);
/**
* @mcm:
@@ -1098,6 +1093,7 @@ struct mpc_funcs {
* MPC RMCM new HW sequential programming functions
*/
struct {
+ void (*fl_3dlut_configure)(struct mpc *mpc, struct mpc_fl_3dlut_config *cfg, int mpcc_id);
void (*enable_3dlut_fl)(struct mpc *mpc, bool enable, int mpcc_id);
void (*update_3dlut_fast_load_select)(struct mpc *mpc, int mpcc_id, int hubp_idx);
void (*program_lut_read_write_control)(struct mpc *mpc, const enum MCM_LUT_ID id,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h b/drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h
index 44f86cc2d1d6..227e3f8d7e5f 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h
@@ -49,6 +49,7 @@ struct pg_cntl_funcs {
void (*mem_pg_control)(struct pg_cntl *pg_cntl, bool power_on);
void (*dio_pg_control)(struct pg_cntl *pg_cntl, bool power_on);
void (*init_pg_status)(struct pg_cntl *pg_cntl);
+ void (*print_pg_status)(struct pg_cntl *pg_cntl, const char *debug_func, const char *debug_log);
};
#endif //__DC_PG_CNTL_H__
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index 267ace4eef8a..f2de2cf23859 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -374,6 +374,7 @@ struct timing_generator_funcs {
void (*wait_drr_doublebuffer_pending_clear)(struct timing_generator *tg);
void (*set_long_vtotal)(struct timing_generator *optc, const struct long_vtotal_params *params);
void (*wait_odm_doublebuffer_pending_clear)(struct timing_generator *tg);
+ void (*wait_otg_disable)(struct timing_generator *optc);
bool (*get_optc_double_buffer_pending)(struct timing_generator *tg);
bool (*get_otg_double_buffer_pending)(struct timing_generator *tg);
bool (*get_pipe_update_pending)(struct timing_generator *tg);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link.h b/drivers/gpu/drm/amd/display/dc/inc/link_service.h
index f2503402c10e..1e34e84160aa 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/link.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/link_service.h
@@ -42,8 +42,8 @@
* dc_link_exports.c or other dc files implement dc.h
*
* DC to Link:
- * dc_link_exports.c or other dc files include link.h
- * link_factory.c implements link.h
+ * dc_link_exports.c or other dc files include link_service.h
+ * link_factory.c implements link_service.h
*
* Link sub-component to Link sub-component:
* link_factory.c includes --> link_xxx.h
@@ -73,7 +73,7 @@
* 2. Implement your function in the suitable link_xxx.c file.
* 3. Assign the function to link_service in link_factory.c
* 4. NEVER include link_xxx.h headers outside link component.
- * 5. NEVER include link.h on DM side.
+ * 5. NEVER include link_service.h on DM side.
*/
#include "core_types.h"
@@ -218,7 +218,10 @@ struct link_service {
bool (*dp_overwrite_extended_receiver_cap)(struct dc_link *link);
enum lttpr_mode (*dp_decide_lttpr_mode)(struct dc_link *link,
struct dc_link_settings *link_setting);
-
+ uint8_t (*dp_get_lttpr_count)(struct dc_link *link);
+ void (*edp_get_alpm_support)(struct dc_link *link,
+ bool *auxless_support,
+ bool *auxwake_support);
/*************************** DP DPIA/PHY ******************************/
void (*dpia_handle_usb4_bandwidth_allocation_for_link)(
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index a890f581f4e8..4e26a16a8743 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -45,6 +45,7 @@ enum dce_version resource_parse_asic_id(
struct resource_caps {
int num_timing_generator;
int num_opp;
+ int num_dpp;
int num_video_plane;
int num_audio;
int num_stream_encoder;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/soc_and_ip_translator.h b/drivers/gpu/drm/amd/display/dc/inc/soc_and_ip_translator.h
new file mode 100644
index 000000000000..23daf98b8aa8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/soc_and_ip_translator.h
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2025 Advanced Micro Devices, Inc.
+
+#ifndef __SOC_AND_IP_TRANSLATOR_H__
+#define __SOC_AND_IP_TRANSLATOR_H__
+
+#include "dc.h"
+#include "dml_top_soc_parameter_types.h"
+
+struct soc_and_ip_translator_funcs {
+ void (*get_soc_bb)(struct dml2_soc_bb *soc_bb, const struct dc *dc, const struct dml2_configuration_options *config);
+ void (*get_ip_caps)(struct dml2_ip_capabilities *dml_ip_caps);
+};
+
+struct soc_and_ip_translator {
+ const struct soc_and_ip_translator_funcs *translator_funcs;
+};
+
+struct soc_and_ip_translator *dc_create_soc_and_ip_translator(enum dce_version dc_version);
+void dc_destroy_soc_and_ip_translator(struct soc_and_ip_translator **soc_and_ip_translator);
+
+
+#endif // __SOC_AND_IP_TRANSLATOR_H__
diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
index 2956c2b3ad1a..9e33bf937a69 100644
--- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
+++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
@@ -76,6 +76,8 @@ static void dp_retrain_link_dp_test(struct dc_link *link,
uint8_t count;
int i;
struct audio_output audio_output[MAX_PIPES];
+ struct dc_stream_state *streams_on_link[MAX_PIPES];
+ int num_streams_on_link = 0;
needs_divider_update = (link->dc->link_srv->dp_get_encoding_format(link_setting) !=
link->dc->link_srv->dp_get_encoding_format((const struct dc_link_settings *) &link->cur_link_settings));
@@ -138,12 +140,19 @@ static void dp_retrain_link_dp_test(struct dc_link *link,
pipes[i]->stream_res.tg->funcs->enable_crtc(pipes[i]->stream_res.tg);
// Set DPMS on with stream update
- for (i = 0; i < state->stream_count; i++)
- if (state->streams[i] && state->streams[i]->link && state->streams[i]->link == link) {
- stream_update.stream = state->streams[i];
+ // Cache all streams on current link since dc_update_planes_and_stream might kill current_state
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (state->streams[i] && state->streams[i]->link && state->streams[i]->link == link)
+ streams_on_link[num_streams_on_link++] = state->streams[i];
+ }
+
+ for (i = 0; i < num_streams_on_link; i++) {
+ if (streams_on_link[i] && streams_on_link[i]->link && streams_on_link[i]->link == link) {
+ stream_update.stream = streams_on_link[i];
stream_update.dpms_off = &dpms_off;
- dc_update_planes_and_stream(state->clk_mgr->ctx->dc, NULL, 0, state->streams[i], &stream_update);
+ dc_update_planes_and_stream(state->clk_mgr->ctx->dc, NULL, 0, streams_on_link[i], &stream_update);
}
+ }
}
static void dp_test_send_link_training(struct dc_link *link)
diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h
index eae23ea7f6ec..033650cdb811 100644
--- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h
+++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h
@@ -24,7 +24,7 @@
*/
#ifndef __LINK_DP_CTS_H__
#define __LINK_DP_CTS_H__
-#include "link.h"
+#include "link_service.h"
void dp_handle_automated_test(struct dc_link *link);
bool dp_set_test_pattern(
struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.h b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.h
index ab437a0c9101..9ff4a6c46a2b 100644
--- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.h
+++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.h
@@ -24,7 +24,7 @@
*/
#ifndef __LINK_DP_TRACE_H__
#define __LINK_DP_TRACE_H__
-#include "link.h"
+#include "link_service.h"
void dp_trace_init(struct dc_link *link);
void dp_trace_reset(struct dc_link *link);
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
index b68bcc9fca0a..892907991f91 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
@@ -138,8 +138,7 @@ void setup_dio_stream_attribute(struct pipe_ctx *pipe_ctx)
stream_encoder->funcs->dvi_set_stream_attribute(
stream_encoder,
&stream->timing,
- (stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK) ?
- true : false);
+ stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK);
else if (dc_is_lvds_signal(stream->signal))
stream_encoder->funcs->lvds_set_stream_attribute(
stream_encoder,
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h
index 45f0e091fcb0..4a25210a344f 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h
@@ -27,7 +27,7 @@
#define __LINK_HWSS_DIO_H__
#include "link_hwss.h"
-#include "link.h"
+#include "link_service.h"
const struct link_hwss *get_dio_link_hwss(void);
bool can_use_dio_link_hwss(const struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.h
index 9ac08a332540..cf578a8662a4 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.h
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.h
@@ -25,7 +25,7 @@
#ifndef __LINK_HWSS_DIO_FIXED_VS_PE_RETIMER_H__
#define __LINK_HWSS_DIO_FIXED_VS_PE_RETIMER_H__
-#include "link.h"
+#include "link_service.h"
uint32_t dp_dio_fixed_vs_pe_retimer_get_lttpr_write_address(struct dc_link *link);
uint8_t dp_dio_fixed_vs_pe_retimer_lane_cfg_to_hw_cfg(struct dc_link *link);
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h
index 1d3ed8ca83b5..7c9005bc2587 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h
@@ -26,7 +26,7 @@
#define __LINK_HWSS_HPO_DP_H__
#include "link_hwss.h"
-#include "link.h"
+#include "link_service.h"
void set_hpo_dp_throttled_vcp_size(struct pipe_ctx *pipe_ctx,
struct fixed31_32 throttled_vcp_size);
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.h
index 82301187bc7c..8bf36827ecfb 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.h
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.h
@@ -25,7 +25,7 @@
#ifndef __LINK_HWSS_HPO_FIXED_VS_PE_RETIMER_DP_H__
#define __LINK_HWSS_HPO_FIXED_VS_PE_RETIMER_DP_H__
-#include "link.h"
+#include "link_service.h"
bool requires_fixed_vs_pe_retimer_hpo_link_hwss(const struct dc_link *link);
const struct link_hwss *get_hpo_fixed_vs_pe_retimer_dp_link_hwss(void);
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
index 827b630daf49..85303167a553 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
@@ -656,7 +656,7 @@ static bool wait_for_entering_dp_alt_mode(struct dc_link *link)
return true;
is_in_alt_mode = link->link_enc->funcs->is_in_alt_mode(link->link_enc);
- DC_LOG_DC("DP Alt mode state on HPD: %d\n", is_in_alt_mode);
+ DC_LOG_DC("DP Alt mode state on HPD: %d Link=%d\n", is_in_alt_mode, link->link_index);
if (is_in_alt_mode)
return true;
@@ -1140,6 +1140,10 @@ static bool detect_link_and_local_sink(struct dc_link *link,
if (sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A &&
!sink->edid_caps.edid_hdmi)
sink->sink_signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ else if (dc_is_dvi_signal(sink->sink_signal) &&
+ aud_support->hdmi_audio_native &&
+ sink->edid_caps.edid_hdmi)
+ sink->sink_signal = SIGNAL_TYPE_HDMI_TYPE_A;
if (link->local_sink && dc_is_dp_signal(sink_caps.signal))
dp_trace_init(link);
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.h b/drivers/gpu/drm/amd/display/dc/link/link_detection.h
index 7da05078721e..1ab29476060b 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_detection.h
+++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.h
@@ -25,7 +25,7 @@
#ifndef __DC_LINK_DETECTION_H__
#define __DC_LINK_DETECTION_H__
-#include "link.h"
+#include "link_service.h"
bool link_detect(struct dc_link *link, enum dc_detect_reason reason);
bool link_detect_connection_type(struct dc_link *link,
enum dc_connection_type *type);
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
index 8c8682f743d6..83419e1a9036 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
@@ -832,7 +832,7 @@ void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
enum optc_dsc_mode optc_dsc_mode;
/* Enable DSC hw block */
- dsc_cfg.pic_width = (stream->timing.h_addressable + pipe_ctx->hblank_borrow +
+ dsc_cfg.pic_width = (stream->timing.h_addressable + pipe_ctx->dsc_padding_params.dsc_hactive_padding +
stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt;
dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom;
dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
@@ -2358,9 +2358,9 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
if (pipe_ctx->stream->sink) {
if (pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_VIRTUAL &&
pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_NONE) {
- DC_LOG_DC("%s pipe_ctx dispname=%s signal=%x\n", __func__,
+ DC_LOG_DC("%s pipe_ctx dispname=%s signal=%x link=%d\n", __func__,
pipe_ctx->stream->sink->edid_caps.display_name,
- pipe_ctx->stream->signal);
+ pipe_ctx->stream->signal, link->link_index);
}
}
@@ -2458,7 +2458,6 @@ void link_set_dpms_on(
struct link_encoder *link_enc = pipe_ctx->link_res.dio_link_enc;
enum otg_out_mux_dest otg_out_dest = OUT_MUX_DIO;
struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg;
- const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
bool apply_edp_fast_boot_optimization =
pipe_ctx->stream->apply_edp_fast_boot_optimization;
@@ -2474,9 +2473,10 @@ void link_set_dpms_on(
if (pipe_ctx->stream->sink) {
if (pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_VIRTUAL &&
pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_NONE) {
- DC_LOG_DC("%s pipe_ctx dispname=%s signal=%x\n", __func__,
+ DC_LOG_DC("%s pipe_ctx dispname=%s signal=%x link=%d\n", __func__,
pipe_ctx->stream->sink->edid_caps.display_name,
- pipe_ctx->stream->signal);
+ pipe_ctx->stream->signal,
+ link->link_index);
}
}
@@ -2502,8 +2502,6 @@ void link_set_dpms_on(
pipe_ctx->stream_res.tg->funcs->set_out_mux(pipe_ctx->stream_res.tg, otg_out_dest);
}
- link_hwss->setup_stream_attribute(pipe_ctx);
-
pipe_ctx->stream->apply_edp_fast_boot_optimization = false;
// Enable VPG before building infoframe
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.h b/drivers/gpu/drm/amd/display/dc/link/link_dpms.h
index 9398f9c1666a..bd6fc63064a3 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.h
+++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.h
@@ -26,7 +26,7 @@
#ifndef __DC_LINK_DPMS_H__
#define __DC_LINK_DPMS_H__
-#include "link.h"
+#include "link_service.h"
void link_set_dpms_on(
struct dc_state *state,
struct pipe_ctx *pipe_ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
index de1143dbbd25..31a73867cd4c 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
@@ -165,6 +165,8 @@ static void construct_link_service_dp_capability(struct link_service *link_srv)
link_srv->dp_overwrite_extended_receiver_cap =
dp_overwrite_extended_receiver_cap;
link_srv->dp_decide_lttpr_mode = dp_decide_lttpr_mode;
+ link_srv->dp_get_lttpr_count = dp_get_lttpr_count;
+ link_srv->edp_get_alpm_support = edp_get_alpm_support;
}
/* link dp phy/dpia implements basic dp phy/dpia functionality such as
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.h b/drivers/gpu/drm/amd/display/dc/link/link_factory.h
index e96220d48d03..aad36ca1a31c 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_factory.h
+++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.h
@@ -24,7 +24,7 @@
*/
#ifndef __LINK_FACTORY_H__
#define __LINK_FACTORY_H__
-#include "link.h"
+#include "link_service.h"
struct dc_link *link_create(const struct link_init_data *init_params);
void link_destroy(struct dc_link **link);
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_resource.h b/drivers/gpu/drm/amd/display/dc/link/link_resource.h
index 1907bda3cb6e..f7aa3bc3a93a 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/link/link_resource.h
@@ -24,7 +24,7 @@
*/
#ifndef __LINK_RESOURCE_H__
#define __LINK_RESOURCE_H__
-#include "link.h"
+#include "link_service.h"
void link_get_cur_res_map(const struct dc *dc, uint32_t *map);
void link_restore_res_map(const struct dc *dc, uint32_t *map);
void link_get_cur_link_res(const struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.c b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
index aecaf37eee35..acdc162de535 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_validation.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
@@ -408,8 +408,10 @@ enum dc_status link_validate_dp_tunnel_bandwidth(const struct dc *dc, const stru
link = stream->link;
if (!(link && (stream->signal == SIGNAL_TYPE_DISPLAY_PORT
- || stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
- && link->hpd_status))
+ || stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)))
+ continue;
+
+ if ((link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) && (link->hpd_status == false))
continue;
dp_tunnel_settings = get_dp_tunnel_settings(new_ctx, stream);
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.h b/drivers/gpu/drm/amd/display/dc/link/link_validation.h
index 9553c81053fe..595774e76453 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_validation.h
+++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.h
@@ -24,7 +24,7 @@
*/
#ifndef __LINK_VALIDATION_H__
#define __LINK_VALIDATION_H__
-#include "link.h"
+#include "link_service.h"
enum dc_status link_validate_mode_timing(
const struct dc_stream_state *stream,
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h
index a3e25e55bed6..d3e6f01a6a90 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h
@@ -26,7 +26,7 @@
#ifndef __DAL_DDC_SERVICE_H__
#define __DAL_DDC_SERVICE_H__
-#include "link.h"
+#include "link_service.h"
#define AUX_POWER_UP_WA_DELAY 500
#define I2C_OVER_AUX_DEFER_WA_DELAY 70
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
index 651926e547b9..b12c11bd6a14 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
@@ -1525,8 +1525,8 @@ bool read_is_mst_supported(struct dc_link *link)
return false;
}
- rev.raw = 0;
- cap.raw = 0;
+ rev.raw = 0;
+ cap.raw = 0;
st = core_link_read_dpcd(link, DP_DPCD_REV, &rev.raw,
sizeof(rev));
@@ -2125,13 +2125,13 @@ void detect_edp_sink_caps(struct dc_link *link)
&backlight_adj_cap, sizeof(backlight_adj_cap));
link->dpcd_caps.dynamic_backlight_capable_edp =
- (backlight_adj_cap & DP_EDP_DYNAMIC_BACKLIGHT_CAP) ? true:false;
+ (backlight_adj_cap & DP_EDP_DYNAMIC_BACKLIGHT_CAP) ? true : false;
core_link_read_dpcd(link, DP_EDP_GENERAL_CAP_1,
&general_edp_cap, sizeof(general_edp_cap));
link->dpcd_caps.set_power_state_capable_edp =
- (general_edp_cap & DP_EDP_SET_POWER_CAP) ? true:false;
+ (general_edp_cap & DP_EDP_SET_POWER_CAP) ? true : false;
set_default_brightness_aux(link);
@@ -2195,6 +2195,12 @@ void detect_edp_sink_caps(struct dc_link *link)
DP_EDP_MSO_LINK_CAPABILITIES,
(uint8_t *)&link->dpcd_caps.mso_cap_sst_links_supported,
sizeof(link->dpcd_caps.mso_cap_sst_links_supported));
+ /*
+ * Read eDP general capability 2
+ */
+ core_link_read_dpcd(link, DP_EDP_GENERAL_CAP_2,
+ (uint8_t *)&link->dpcd_caps.dp_edp_general_cap_2,
+ sizeof(link->dpcd_caps.dp_edp_general_cap_2));
}
bool dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_settings *max_link_enc_cap)
@@ -2506,3 +2512,40 @@ bool dp_is_sink_present(struct dc_link *link)
return present;
}
+
+uint8_t dp_get_lttpr_count(struct dc_link *link)
+{
+ if (dp_is_lttpr_present(link))
+ return dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+
+ return 0;
+}
+
+void edp_get_alpm_support(struct dc_link *link,
+ bool *auxless_support,
+ bool *auxwake_support)
+{
+ bool lttpr_present = dp_is_lttpr_present(link);
+
+ if (auxless_support == NULL || auxwake_support == NULL)
+ return;
+
+ *auxless_support = false;
+ *auxwake_support = false;
+
+ if (!dc_is_embedded_signal(link->connector_signal))
+ return;
+
+ if (link->dpcd_caps.alpm_caps.bits.AUX_LESS_ALPM_CAP) {
+ if (lttpr_present) {
+ if (link->dpcd_caps.lttpr_caps.alpm.bits.AUX_LESS_ALPM_SUPPORTED)
+ *auxless_support = true;
+ } else
+ *auxless_support = true;
+ }
+
+ if (link->dpcd_caps.alpm_caps.bits.AUX_WAKE_ALPM_CAP) {
+ if (!lttpr_present)
+ *auxwake_support = true;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h
index 940b147cc5d4..6e17f72a752f 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h
@@ -26,7 +26,7 @@
#ifndef __DC_LINK_DP_CAPABILITY_H__
#define __DC_LINK_DP_CAPABILITY_H__
-#include "link.h"
+#include "link_service.h"
bool detect_dp_sink_caps(struct dc_link *link);
@@ -108,4 +108,10 @@ uint32_t link_bw_kbps_from_raw_frl_link_rate_data(uint8_t bw);
bool dp_overwrite_extended_receiver_cap(struct dc_link *link);
+uint8_t dp_get_lttpr_count(struct dc_link *link);
+
+void edp_get_alpm_support(struct dc_link *link,
+ bool *auxless_support,
+ bool *auxwake_support);
+
#endif /* __DC_LINK_DP_CAPABILITY_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h
index a61edfc9ca7a..7cd03fa4892b 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h
@@ -27,7 +27,7 @@
#ifndef __DC_LINK_DPIA_H__
#define __DC_LINK_DPIA_H__
-#include "link.h"
+#include "link_service.h"
/* Read tunneling device capability from DPCD and update link capability
* accordingly.
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
index 819bf2d8ba53..8a3c18ae97a7 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
@@ -48,8 +48,7 @@
*/
static bool link_dp_is_bw_alloc_available(struct dc_link *link)
{
- return (link && link->hpd_status
- && link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling
+ return (link && link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling
&& link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc
&& link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.bits.driver_bw_alloc_support);
}
@@ -226,35 +225,40 @@ bool link_dpia_enable_usb4_dp_bw_alloc_mode(struct dc_link *link)
bool ret = false;
uint8_t val;
- if (link->hpd_status) {
- val = DPTX_BW_ALLOC_MODE_ENABLE | DPTX_BW_ALLOC_UNMASK_IRQ;
+ if (link->dc->debug.dpia_debug.bits.enable_bw_allocation_mode == false) {
+ DC_LOG_DEBUG("%s: link[%d] DPTX BW allocation mode disabled", __func__, link->link_index);
+ return false;
+ }
- if (core_link_write_dpcd(link, DPTX_BW_ALLOCATION_MODE_CONTROL, &val, sizeof(uint8_t)) == DC_OK) {
- DC_LOG_DEBUG("%s: link[%d] DPTX BW allocation mode enabled", __func__, link->link_index);
+ val = DPTX_BW_ALLOC_MODE_ENABLE | DPTX_BW_ALLOC_UNMASK_IRQ;
- retrieve_usb4_dp_bw_allocation_info(link);
+ if (core_link_write_dpcd(link, DPTX_BW_ALLOCATION_MODE_CONTROL, &val, sizeof(uint8_t)) == DC_OK) {
+ DC_LOG_DEBUG("%s: link[%d] DPTX BW allocation mode enabled", __func__, link->link_index);
- if (link->dpia_bw_alloc_config.nrd_max_link_rate && link->dpia_bw_alloc_config.nrd_max_lane_count) {
- link->reported_link_cap.link_rate = link->dpia_bw_alloc_config.nrd_max_link_rate;
- link->reported_link_cap.lane_count = link->dpia_bw_alloc_config.nrd_max_lane_count;
- }
+ retrieve_usb4_dp_bw_allocation_info(link);
- link->dpia_bw_alloc_config.bw_alloc_enabled = true;
- ret = true;
-
- if (link->dc->debug.dpia_debug.bits.enable_usb4_bw_zero_alloc_patch) {
- /*
- * During DP tunnel creation, the CM preallocates BW
- * and reduces the estimated BW of other DPIAs.
- * The CM releases the preallocation only when the allocation is complete.
- * Perform a zero allocation to make the CM release the preallocation
- * and correctly update the estimated BW for all DPIAs per host router.
- */
- link_dp_dpia_allocate_usb4_bandwidth_for_stream(link, 0);
- }
- } else
- DC_LOG_DEBUG("%s: link[%d] failed to enable DPTX BW allocation mode", __func__, link->link_index);
- }
+ if (
+ link->dpia_bw_alloc_config.nrd_max_link_rate
+ && link->dpia_bw_alloc_config.nrd_max_lane_count) {
+ link->reported_link_cap.link_rate = link->dpia_bw_alloc_config.nrd_max_link_rate;
+ link->reported_link_cap.lane_count = link->dpia_bw_alloc_config.nrd_max_lane_count;
+ }
+
+ link->dpia_bw_alloc_config.bw_alloc_enabled = true;
+ ret = true;
+
+ if (link->dc->debug.dpia_debug.bits.enable_usb4_bw_zero_alloc_patch) {
+ /*
+ * During DP tunnel creation, the CM preallocates BW
+ * and reduces the estimated BW of other DPIAs.
+ * The CM releases the preallocation only when the allocation is complete.
+ * Perform a zero allocation to make the CM release the preallocation
+ * and correctly update the estimated BW for all DPIAs per host router.
+ */
+ link_dp_dpia_allocate_usb4_bandwidth_for_stream(link, 0);
+ }
+ } else
+ DC_LOG_DEBUG("%s: link[%d] failed to enable DPTX BW allocation mode", __func__, link->link_index);
return ret;
}
@@ -297,15 +301,12 @@ void dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int pe
{
if (link && link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling
&& link->dpia_bw_alloc_config.bw_alloc_enabled) {
- //1. Hot Plug
- if (link->hpd_status && peak_bw > 0) {
+ if (peak_bw > 0) {
// If DP over USB4 then we need to check BW allocation
link->dpia_bw_alloc_config.link_max_bw = peak_bw;
link_dpia_send_bw_alloc_request(link, peak_bw);
- }
- //2. Cold Unplug
- else if (!link->hpd_status)
+ } else
dpia_bw_alloc_unplug(link);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h
index 41efcb3e44e2..30cd8e2b9d35 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h
@@ -26,7 +26,7 @@
#ifndef DC_INC_LINK_DP_DPIA_BW_H_
#define DC_INC_LINK_DP_DPIA_BW_H_
-#include "link.h"
+#include "link_service.h"
/*
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.h
index ac33730fedd4..87516fb3b45a 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.h
@@ -26,7 +26,7 @@
#ifndef __DC_LINK_DP_IRQ_HANDLER_H__
#define __DC_LINK_DP_IRQ_HANDLER_H__
-#include "link.h"
+#include "link_service.h"
bool dp_parse_link_loss_status(
struct dc_link *link,
union hpd_irq_data *hpd_irq_dpcd_data);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h
index ab1c1f8f1f8b..58e154494582 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h
@@ -26,7 +26,7 @@
#ifndef __DC_LINK_DP_PHY_H__
#define __DC_LINK_DP_PHY_H__
-#include "link.h"
+#include "link_service.h"
void dp_enable_link_phy(
struct dc_link *link,
const struct link_resource *link_res,
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
index 2dc1a660e504..08e2b572e0ff 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
@@ -1018,7 +1018,12 @@ static enum link_training_result dpcd_exit_training_mode(struct dc_link *link, e
{
enum dc_status status;
uint8_t sink_status = 0;
- uint8_t i;
+ uint32_t i;
+ uint8_t lttpr_count = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+ uint32_t intra_hop_disable_time_ms = (lttpr_count > 0 ? lttpr_count * 300 : 10);
+
+ // Each hop could theoretically take over 256ms (max 128b/132b AUX RD INTERVAL)
+ // To be safe, allow 300ms per LTTPR and 10ms for no LTTPR case
/* clear training pattern set */
status = dpcd_set_training_pattern(link, DP_TRAINING_PATTERN_VIDEOIDLE);
@@ -1028,7 +1033,7 @@ static enum link_training_result dpcd_exit_training_mode(struct dc_link *link, e
if (encoding == DP_128b_132b_ENCODING) {
/* poll for intra-hop disable */
- for (i = 0; i < 10; i++) {
+ for (i = 0; i < intra_hop_disable_time_ms; i++) {
if ((core_link_read_dpcd(link, DP_SINK_STATUS, &sink_status, 1) == DC_OK) &&
(sink_status & DP_INTRA_HOP_AUX_REPLY_INDICATION) == 0)
break;
@@ -1724,6 +1729,15 @@ bool perform_link_training_with_retries(
break;
}
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
+ stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST &&
+ !link->dc->config.enable_dpia_pre_training) {
+ if (j == (attempts - 1))
+ do_fallback = true;
+ else
+ do_fallback = false;
+ }
+
if (j == (attempts - 1)) {
DC_LOG_WARNING(
"%s: Link(%d) training attempt %u of %d failed @ rate(%d) x lane(%d) @ spread = %x : fail reason:(%d)\n",
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h
index 574b083e0936..ce52de22ab7a 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h
@@ -26,7 +26,7 @@
#ifndef __DC_LINK_DP_TRAINING_H__
#define __DC_LINK_DP_TRAINING_H__
-#include "link.h"
+#include "link_service.h"
bool perform_link_training_with_retries(
const struct dc_link_settings *link_setting,
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.h
index 08d787a1e451..c2717c678c72 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.h
@@ -25,7 +25,7 @@
#ifndef __LINK_DPCD_H__
#define __LINK_DPCD_H__
-#include "link.h"
+#include "link_service.h"
#include "dpcd_defs.h"
enum dc_status core_link_read_dpcd(
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
index e7927b8f5ba3..5e806edbb9f6 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
@@ -703,6 +703,20 @@ bool edp_setup_psr(struct dc_link *link,
if (!link)
return false;
+ /* This is a workaround: some vendors require the source to
+ * read the PSR cap; otherwise, the vendor's PSR feature will
+ * fall back to its default behavior, causing a misconfiguration
+ * of this feature.
+ */
+ if (link->panel_config.psr.read_psrcap_again) {
+ dm_helpers_dp_read_dpcd(
+ link->ctx,
+ link,
+ DP_PSR_SUPPORT,
+ &link->dpcd_caps.psr_info.psr_version,
+ sizeof(link->dpcd_caps.psr_info.psr_version));
+ }
+
//Clear PSR cfg
memset(&psr_configuration, 0, sizeof(psr_configuration));
dm_helpers_dp_write_dpcd(
@@ -870,6 +884,8 @@ bool edp_setup_psr(struct dc_link *link,
psr_context->dsc_slice_height = psr_config->dsc_slice_height;
+ psr_context->os_request_force_ffu = psr_config->os_request_force_ffu;
+
if (psr) {
link->psr_settings.psr_feature_enabled = psr->funcs->psr_copy_settings(psr,
link, psr_context, panel_inst);
@@ -944,7 +960,7 @@ bool edp_set_replay_allow_active(struct dc_link *link, const bool *allow_active,
// TODO: Handle mux change case if force_static is set
// If force_static is set, just change the replay_allow_active state directly
if (replay != NULL && link->replay_settings.replay_feature_enabled)
- replay->funcs->replay_enable(replay, *allow_active, wait, panel_inst, link);
+ replay->funcs->replay_enable(replay, *allow_active, wait, panel_inst);
link->replay_settings.replay_allow_active = *allow_active;
}
@@ -1029,6 +1045,8 @@ bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream
replay_context.line_time_in_ns = lineTimeInNs;
+ replay_context.os_request_force_ffu = link->replay_settings.config.os_request_force_ffu;
+
link->replay_settings.replay_feature_enabled =
replay->funcs->replay_copy_settings(replay, link, &replay_context, panel_inst);
if (link->replay_settings.replay_feature_enabled) {
@@ -1042,7 +1060,13 @@ bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream
(uint8_t *)&(replay_config.raw), sizeof(uint8_t));
memset(&alpm_config, 0, sizeof(alpm_config));
- alpm_config.bits.ENABLE = 1;
+ alpm_config.bits.ENABLE = link->replay_settings.config.alpm_mode != DC_ALPM_UNSUPPORTED ? 1 : 0;
+
+ if (link->replay_settings.config.alpm_mode == DC_ALPM_AUXLESS) {
+ alpm_config.bits.ALPM_MODE_SEL = 1;
+ alpm_config.bits.ACDS_PERIOD_DURATION = 0;
+ }
+
dm_helpers_dp_write_dpcd(
link->ctx,
link,
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
index 4a475d5b9dde..62a6344e613e 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
@@ -25,7 +25,7 @@
#ifndef __DC_LINK_EDP_PANEL_CONTROL_H__
#define __DC_LINK_EDP_PANEL_CONTROL_H__
-#include "link.h"
+#include "link_service.h"
enum dp_panel_mode dp_get_panel_mode(struct dc_link *link);
void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.h
index 4fb526b264f9..af529328ba17 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.h
@@ -26,7 +26,7 @@
#ifndef __DC_LINK_HPD_H__
#define __DC_LINK_HPD_H__
-#include "link.h"
+#include "link_service.h"
enum hpd_source_id get_hpd_line(struct dc_link *link);
/*
diff --git a/drivers/gpu/drm/amd/display/dc/mmhubbub/dcn20/dcn20_mmhubbub.c b/drivers/gpu/drm/amd/display/dc/mmhubbub/dcn20/dcn20_mmhubbub.c
index 259a98e4ee2c..2a422e223bf2 100644
--- a/drivers/gpu/drm/amd/display/dc/mmhubbub/dcn20/dcn20_mmhubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/mmhubbub/dcn20/dcn20_mmhubbub.c
@@ -284,7 +284,7 @@ void mcifwb2_dump_frame(struct mcif_wb *mcif_wb,
REG_UPDATE(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_LOCK, 0xf);
- memcpy(dest_luma_buffer, luma_buffer, mcif_params->luma_pitch * dest_height);
+ memcpy(dest_luma_buffer, luma_buffer, (size_t)mcif_params->luma_pitch * dest_height);
memcpy(dest_chroma_buffer, chroma_buffer, mcif_params->chroma_pitch * dest_height / 2);
REG_UPDATE(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_LOCK, 0x0);
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c
index f3fb3fe13757..e1a0308dee57 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c
@@ -287,13 +287,6 @@ void mpc401_program_lut_read_write_control(struct mpc *mpc, const enum MCM_LUT_I
}
}
-void mpc401_program_3dlut_size(struct mpc *mpc, bool is_17x17x17, int mpcc_id)
-{
- struct dcn401_mpc *mpc401 = TO_DCN401_MPC(mpc);
-
- REG_UPDATE(MPCC_MCM_3DLUT_MODE[mpcc_id], MPCC_MCM_3DLUT_SIZE, is_17x17x17 ? 0 : 1);
-}
-
void mpc_program_gamut_remap(
struct mpc *mpc,
unsigned int mpcc_id,
@@ -611,7 +604,6 @@ static const struct mpc_funcs dcn401_mpc_funcs = {
.populate_lut = mpc401_populate_lut,
.program_lut_read_write_control = mpc401_program_lut_read_write_control,
.program_lut_mode = mpc401_program_lut_mode,
- .program_3dlut_size = mpc401_program_3dlut_size,
};
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h
index eb0c68d0b0c7..fdc42f8ab3ff 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h
@@ -221,11 +221,6 @@ void mpc401_program_lut_read_write_control(
bool lut_bank_a,
int mpcc_id);
-void mpc401_program_3dlut_size(
- struct mpc *mpc,
- bool is_17x17x17,
- int mpcc_id);
-
void mpc401_set_gamut_remap(
struct mpc *mpc,
int mpcc_id,
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h
index d159e3ed3bb3..ead92ad78a23 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h
@@ -62,6 +62,7 @@
SF(OTG0_OTG_CONTROL, OTG_DISABLE_POINT_CNTL, mask_sh),\
SF(OTG0_OTG_CONTROL, OTG_FIELD_NUMBER_CNTL, mask_sh),\
SF(OTG0_OTG_CONTROL, OTG_OUT_MUX, mask_sh),\
+ SF(OTG0_OTG_CONTROL, OTG_CURRENT_MASTER_EN_STATE, mask_sh),\
SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_EN, mask_sh),\
SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_SYNC_OUTPUT_LINE_NUM, mask_sh),\
SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_SYNC_OUTPUT_POLARITY, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
index 72bff94cb57d..52d5ea98c86b 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
@@ -162,6 +162,8 @@ static bool optc35_disable_crtc(struct timing_generator *optc)
REG_WAIT(OTG_CLOCK_CONTROL,
OTG_BUSY, 0,
1, 100000);
+ REG_WAIT(OTG_CONTROL, OTG_CURRENT_MASTER_EN_STATE, 0, 1, 100000);
+
optc1_clear_optc_underflow(optc);
return true;
@@ -428,6 +430,21 @@ static void optc35_set_long_vtotal(
}
}
+static void optc35_wait_otg_disable(struct timing_generator *optc)
+{
+ struct optc *optc1;
+ uint32_t is_master_en;
+
+ if (!optc || !optc->ctx)
+ return;
+
+ optc1 = DCN10TG_FROM_TG(optc);
+
+ REG_GET(OTG_CONTROL, OTG_MASTER_EN, &is_master_en);
+ if (!is_master_en)
+ REG_WAIT(OTG_CLOCK_CONTROL, OTG_CURRENT_MASTER_EN_STATE, 0, 1, 100000);
+}
+
static const struct timing_generator_funcs dcn35_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
@@ -479,6 +496,7 @@ static const struct timing_generator_funcs dcn35_tg_funcs = {
.set_odm_bypass = optc32_set_odm_bypass,
.set_odm_combine = optc35_set_odm_combine,
.get_optc_source = optc2_get_optc_source,
+ .wait_otg_disable = optc35_wait_otg_disable,
.set_h_timing_div_manual_mode = optc32_set_h_timing_div_manual_mode,
.set_out_mux = optc3_set_out_mux,
.set_drr_trigger_window = optc3_set_drr_trigger_window,
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c
index ff79c38287df..5af13706e601 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c
@@ -226,6 +226,11 @@ bool optc401_disable_crtc(struct timing_generator *optc)
REG_UPDATE(CONTROL,
VTG0_ENABLE, 0);
+ // wait until CRTC_CURRENT_MASTER_EN_STATE == 0
+ REG_WAIT(OTG_CONTROL,
+ OTG_CURRENT_MASTER_EN_STATE,
+ 0, 10, 15000);
+
/* CRTC disabled, so disable clock. */
REG_WAIT(OTG_CLOCK_CONTROL,
OTG_BUSY, 0,
diff --git a/drivers/gpu/drm/amd/display/dc/pg/dcn35/dcn35_pg_cntl.c b/drivers/gpu/drm/amd/display/dc/pg/dcn35/dcn35_pg_cntl.c
index af21c0a27f86..72bd43f9bbe2 100644
--- a/drivers/gpu/drm/amd/display/dc/pg/dcn35/dcn35_pg_cntl.c
+++ b/drivers/gpu/drm/amd/display/dc/pg/dcn35/dcn35_pg_cntl.c
@@ -79,16 +79,12 @@ void pg_cntl35_dsc_pg_control(struct pg_cntl *pg_cntl, unsigned int dsc_inst, bo
uint32_t power_gate = power_on ? 0 : 1;
uint32_t pwr_status = power_on ? 0 : 2;
uint32_t org_ip_request_cntl = 0;
- bool block_enabled;
-
- /*need to enable dscclk regardless DSC_PG*/
- if (pg_cntl->ctx->dc->res_pool->dccg->funcs->enable_dsc && power_on)
- pg_cntl->ctx->dc->res_pool->dccg->funcs->enable_dsc(
- pg_cntl->ctx->dc->res_pool->dccg, dsc_inst);
+ bool block_enabled = false;
+ bool skip_pg = pg_cntl->ctx->dc->debug.ignore_pg ||
+ pg_cntl->ctx->dc->debug.disable_dsc_power_gate ||
+ pg_cntl->ctx->dc->idle_optimizations_allowed;
- if (pg_cntl->ctx->dc->debug.ignore_pg ||
- pg_cntl->ctx->dc->debug.disable_dsc_power_gate ||
- pg_cntl->ctx->dc->idle_optimizations_allowed)
+ if (skip_pg && !power_on)
return;
block_enabled = pg_cntl35_dsc_pg_status(pg_cntl, dsc_inst);
@@ -111,7 +107,7 @@ void pg_cntl35_dsc_pg_control(struct pg_cntl *pg_cntl, unsigned int dsc_inst, bo
REG_WAIT(DOMAIN16_PG_STATUS,
DOMAIN_PGFSM_PWR_STATUS, pwr_status,
- 1, 1000);
+ 1, 10000);
break;
case 1: /* DSC1 */
REG_UPDATE(DOMAIN17_PG_CONFIG,
@@ -119,7 +115,7 @@ void pg_cntl35_dsc_pg_control(struct pg_cntl *pg_cntl, unsigned int dsc_inst, bo
REG_WAIT(DOMAIN17_PG_STATUS,
DOMAIN_PGFSM_PWR_STATUS, pwr_status,
- 1, 1000);
+ 1, 10000);
break;
case 2: /* DSC2 */
REG_UPDATE(DOMAIN18_PG_CONFIG,
@@ -127,7 +123,7 @@ void pg_cntl35_dsc_pg_control(struct pg_cntl *pg_cntl, unsigned int dsc_inst, bo
REG_WAIT(DOMAIN18_PG_STATUS,
DOMAIN_PGFSM_PWR_STATUS, pwr_status,
- 1, 1000);
+ 1, 10000);
break;
case 3: /* DSC3 */
REG_UPDATE(DOMAIN19_PG_CONFIG,
@@ -135,7 +131,7 @@ void pg_cntl35_dsc_pg_control(struct pg_cntl *pg_cntl, unsigned int dsc_inst, bo
REG_WAIT(DOMAIN19_PG_STATUS,
DOMAIN_PGFSM_PWR_STATUS, pwr_status,
- 1, 1000);
+ 1, 10000);
break;
default:
BREAK_TO_DEBUGGER();
@@ -144,12 +140,6 @@ void pg_cntl35_dsc_pg_control(struct pg_cntl *pg_cntl, unsigned int dsc_inst, bo
if (dsc_inst < MAX_PIPES)
pg_cntl->pg_pipe_res_enable[PG_DSC][dsc_inst] = power_on;
-
- if (pg_cntl->ctx->dc->res_pool->dccg->funcs->disable_dsc && !power_on) {
- /*this is to disable dscclk*/
- pg_cntl->ctx->dc->res_pool->dccg->funcs->disable_dsc(
- pg_cntl->ctx->dc->res_pool->dccg, dsc_inst);
- }
}
static bool pg_cntl35_hubp_dpp_pg_status(struct pg_cntl *pg_cntl, unsigned int hubp_dpp_inst)
@@ -189,11 +179,12 @@ void pg_cntl35_hubp_dpp_pg_control(struct pg_cntl *pg_cntl, unsigned int hubp_dp
uint32_t pwr_status = power_on ? 0 : 2;
uint32_t org_ip_request_cntl;
bool block_enabled;
+ bool skip_pg = pg_cntl->ctx->dc->debug.ignore_pg ||
+ pg_cntl->ctx->dc->debug.disable_hubp_power_gate ||
+ pg_cntl->ctx->dc->debug.disable_dpp_power_gate ||
+ pg_cntl->ctx->dc->idle_optimizations_allowed;
- if (pg_cntl->ctx->dc->debug.ignore_pg ||
- pg_cntl->ctx->dc->debug.disable_hubp_power_gate ||
- pg_cntl->ctx->dc->debug.disable_dpp_power_gate ||
- pg_cntl->ctx->dc->idle_optimizations_allowed)
+ if (skip_pg && !power_on)
return;
block_enabled = pg_cntl35_hubp_dpp_pg_status(pg_cntl, hubp_dpp_inst);
@@ -213,22 +204,22 @@ void pg_cntl35_hubp_dpp_pg_control(struct pg_cntl *pg_cntl, unsigned int hubp_dp
case 0:
/* DPP0 & HUBP0 */
REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN_POWER_GATE, power_gate);
- REG_WAIT(DOMAIN0_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000);
+ REG_WAIT(DOMAIN0_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 10000);
break;
case 1:
/* DPP1 & HUBP1 */
REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN_POWER_GATE, power_gate);
- REG_WAIT(DOMAIN1_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000);
+ REG_WAIT(DOMAIN1_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 10000);
break;
case 2:
/* DPP2 & HUBP2 */
REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN_POWER_GATE, power_gate);
- REG_WAIT(DOMAIN2_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000);
+ REG_WAIT(DOMAIN2_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 10000);
break;
case 3:
/* DPP3 & HUBP3 */
REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN_POWER_GATE, power_gate);
- REG_WAIT(DOMAIN3_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 1000);
+ REG_WAIT(DOMAIN3_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, pwr_status, 1, 10000);
break;
default:
BREAK_TO_DEBUGGER();
@@ -501,6 +492,36 @@ void pg_cntl35_init_pg_status(struct pg_cntl *pg_cntl)
pg_cntl->pg_res_enable[PG_DWB] = block_enabled;
}
+static void pg_cntl35_print_pg_status(struct pg_cntl *pg_cntl, const char *debug_func, const char *debug_log)
+{
+ int i = 0;
+ bool block_enabled = false;
+
+ DC_LOG_DEBUG("%s: %s", debug_func, debug_log);
+
+ DC_LOG_DEBUG("PG_CNTL status:\n");
+
+ block_enabled = pg_cntl35_io_clk_status(pg_cntl);
+ DC_LOG_DEBUG("ONO0=%d (DCCG, DIO, DCIO)\n", block_enabled ? 1 : 0);
+
+ block_enabled = pg_cntl35_mem_status(pg_cntl);
+ DC_LOG_DEBUG("ONO1=%d (DCHUBBUB, DCHVM, DCHUBBUBMEM)\n", block_enabled ? 1 : 0);
+
+ block_enabled = pg_cntl35_plane_otg_status(pg_cntl);
+ DC_LOG_DEBUG("ONO2=%d (MPC, OPP, OPTC, DWB)\n", block_enabled ? 1 : 0);
+
+ block_enabled = pg_cntl35_hpo_pg_status(pg_cntl);
+ DC_LOG_DEBUG("ONO3=%d (HPO)\n", block_enabled ? 1 : 0);
+
+ for (i = 0; i < pg_cntl->ctx->dc->res_pool->pipe_count; i++) {
+ block_enabled = pg_cntl35_hubp_dpp_pg_status(pg_cntl, i);
+ DC_LOG_DEBUG("ONO%d=%d (DCHUBP%d, DPP%d)\n", 4 + i * 2, block_enabled ? 1 : 0, i, i);
+
+ block_enabled = pg_cntl35_dsc_pg_status(pg_cntl, i);
+ DC_LOG_DEBUG("ONO%d=%d (DSC%d)\n", 5 + i * 2, block_enabled ? 1 : 0, i);
+ }
+}
+
static const struct pg_cntl_funcs pg_cntl35_funcs = {
.init_pg_status = pg_cntl35_init_pg_status,
.dsc_pg_control = pg_cntl35_dsc_pg_control,
@@ -511,7 +532,8 @@ static const struct pg_cntl_funcs pg_cntl35_funcs = {
.mpcc_pg_control = pg_cntl35_mpcc_pg_control,
.opp_pg_control = pg_cntl35_opp_pg_control,
.optc_pg_control = pg_cntl35_optc_pg_control,
- .dwb_pg_control = pg_cntl35_dwb_pg_control
+ .dwb_pg_control = pg_cntl35_dwb_pg_control,
+ .print_pg_status = pg_cntl35_print_pg_status
};
struct pg_cntl *pg_cntl35_create(
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c
index 3a51be63f020..c4b4dc3ad8c9 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c
@@ -29,6 +29,7 @@
#include "stream_encoder.h"
#include "resource.h"
+#include "clk_mgr.h"
#include "include/irq_service_interface.h"
#include "virtual/virtual_stream_encoder.h"
#include "dce110/dce110_resource.h"
@@ -836,17 +837,24 @@ static enum dc_status build_mapped_resource(
return DC_OK;
}
-static enum dc_status dce100_validate_bandwidth(
+enum dc_status dce100_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
enum dc_validate_mode validate_mode)
{
int i;
bool at_least_one_pipe = false;
+ struct dc_stream_state *stream = NULL;
+ const uint32_t max_pix_clk_khz = max(dc->clk_mgr->clks.max_supported_dispclk_khz, 400000);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
- if (context->res_ctx.pipe_ctx[i].stream)
+ stream = context->res_ctx.pipe_ctx[i].stream;
+ if (stream) {
at_least_one_pipe = true;
+
+ if (stream->timing.pix_clk_100hz >= max_pix_clk_khz * 10)
+ return DC_FAIL_BANDWIDTH_VALIDATE;
+ }
}
if (at_least_one_pipe) {
@@ -854,7 +862,16 @@ static enum dc_status dce100_validate_bandwidth(
context->bw_ctx.bw.dce.dispclk_khz = 681000;
context->bw_ctx.bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ;
} else {
- context->bw_ctx.bw.dce.dispclk_khz = 0;
+ /* On DCE 6.0 and 6.4 the PLL0 is both the display engine clock and
+ * the DP clock, and shouldn't be turned off. Just select the display
+ * clock value from its low power mode.
+ */
+ if (dc->ctx->dce_version == DCE_VERSION_6_0 ||
+ dc->ctx->dce_version == DCE_VERSION_6_4)
+ context->bw_ctx.bw.dce.dispclk_khz = 352000;
+ else
+ context->bw_ctx.bw.dce.dispclk_khz = 0;
+
context->bw_ctx.bw.dce.yclk_khz = 0;
}
@@ -881,7 +898,7 @@ static bool dce100_validate_surface_sets(
return true;
}
-static enum dc_status dce100_validate_global(
+enum dc_status dce100_validate_global(
struct dc *dc,
struct dc_state *context)
{
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.h
index fecab7c560f5..dd150a4b4610 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.h
@@ -41,6 +41,15 @@ struct resource_pool *dce100_create_resource_pool(
enum dc_status dce100_validate_plane(const struct dc_plane_state *plane_state, struct dc_caps *caps);
+enum dc_status dce100_validate_global(
+ struct dc *dc,
+ struct dc_state *context);
+
+enum dc_status dce100_validate_bandwidth(
+ struct dc *dc,
+ struct dc_state *context,
+ enum dc_validate_mode validate_mode);
+
enum dc_status dce100_add_stream_to_ctx(
struct dc *dc,
struct dc_state *new_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
index 164ba796f64c..869a8e515fc0 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
@@ -1111,12 +1111,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
&clks);
dc->bw_vbios->low_yclk = bw_frc_to_fixed(
- clks.clocks_in_khz[0] * memory_type_multiplier, 1000);
+ (int64_t)clks.clocks_in_khz[0] * memory_type_multiplier, 1000);
dc->bw_vbios->mid_yclk = bw_frc_to_fixed(
- clks.clocks_in_khz[clks.num_levels>>1] * memory_type_multiplier,
+ (int64_t)clks.clocks_in_khz[clks.num_levels>>1] * memory_type_multiplier,
1000);
dc->bw_vbios->high_yclk = bw_frc_to_fixed(
- clks.clocks_in_khz[clks.num_levels-1] * memory_type_multiplier,
+ (int64_t)clks.clocks_in_khz[clks.num_levels-1] * memory_type_multiplier,
1000);
return;
@@ -1152,12 +1152,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
* YCLK = UMACLK*m_memoryTypeMultiplier
*/
dc->bw_vbios->low_yclk = bw_frc_to_fixed(
- mem_clks.data[0].clocks_in_khz * memory_type_multiplier, 1000);
+ (int64_t)mem_clks.data[0].clocks_in_khz * memory_type_multiplier, 1000);
dc->bw_vbios->mid_yclk = bw_frc_to_fixed(
- mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * memory_type_multiplier,
+ (int64_t)mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * memory_type_multiplier,
1000);
dc->bw_vbios->high_yclk = bw_frc_to_fixed(
- mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * memory_type_multiplier,
+ (int64_t)mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * memory_type_multiplier,
1000);
/* Now notify PPLib/SMU about which Watermarks sets they should select
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c
index eb1e158d3436..540e04ec1e2d 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c
@@ -67,7 +67,7 @@
#include "reg_helper.h"
#include "dce100/dce100_resource.h"
-#include "link.h"
+#include "link_service.h"
#ifndef mmDP0_DP_DPHY_INTERNAL_CTRL
#define mmDP0_DP_DPHY_INTERNAL_CTRL 0x210f
@@ -990,12 +990,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
memory_type_multiplier = MEMORY_TYPE_HBM;
dc->bw_vbios->low_yclk = bw_frc_to_fixed(
- mem_clks.data[0].clocks_in_khz * memory_type_multiplier, 1000);
+ (int64_t)mem_clks.data[0].clocks_in_khz * memory_type_multiplier, 1000);
dc->bw_vbios->mid_yclk = bw_frc_to_fixed(
- mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * memory_type_multiplier,
+ (int64_t)mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * memory_type_multiplier,
1000);
dc->bw_vbios->high_yclk = bw_frc_to_fixed(
- mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * memory_type_multiplier,
+ (int64_t)mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * memory_type_multiplier,
1000);
/* Now notify PPLib/SMU about which Watermarks sets they should select
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c
index 58b59d52dc9d..b75be6ad64f6 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c
@@ -34,6 +34,7 @@
#include "stream_encoder.h"
#include "resource.h"
+#include "clk_mgr.h"
#include "include/irq_service_interface.h"
#include "irq/dce60/irq_service_dce60.h"
#include "dce110/dce110_timing_generator.h"
@@ -373,7 +374,7 @@ static const struct resource_caps res_cap = {
.num_timing_generator = 6,
.num_audio = 6,
.num_stream_encoder = 6,
- .num_pll = 2,
+ .num_pll = 3,
.num_ddc = 6,
};
@@ -389,7 +390,7 @@ static const struct resource_caps res_cap_64 = {
.num_timing_generator = 2,
.num_audio = 2,
.num_stream_encoder = 2,
- .num_pll = 2,
+ .num_pll = 3,
.num_ddc = 2,
};
@@ -403,13 +404,13 @@ static const struct dc_plane_cap plane_cap = {
},
.max_upscale_factor = {
- .argb8888 = 16000,
+ .argb8888 = 1,
.nv12 = 1,
.fp16 = 1
},
.max_downscale_factor = {
- .argb8888 = 250,
+ .argb8888 = 1,
.nv12 = 1,
.fp16 = 1
}
@@ -863,61 +864,6 @@ static void dce60_resource_destruct(struct dce110_resource_pool *pool)
}
}
-static enum dc_status dce60_validate_bandwidth(
- struct dc *dc,
- struct dc_state *context,
- enum dc_validate_mode validate_mode)
-{
- int i;
- bool at_least_one_pipe = false;
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- if (context->res_ctx.pipe_ctx[i].stream)
- at_least_one_pipe = true;
- }
-
- if (at_least_one_pipe) {
- /* TODO implement when needed but for now hardcode max value*/
- context->bw_ctx.bw.dce.dispclk_khz = 681000;
- context->bw_ctx.bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ;
- } else {
- context->bw_ctx.bw.dce.dispclk_khz = 0;
- context->bw_ctx.bw.dce.yclk_khz = 0;
- }
-
- return DC_OK;
-}
-
-static bool dce60_validate_surface_sets(
- struct dc_state *context)
-{
- int i;
-
- for (i = 0; i < context->stream_count; i++) {
- if (context->stream_status[i].plane_count == 0)
- continue;
-
- if (context->stream_status[i].plane_count > 1)
- return false;
-
- if (context->stream_status[i].plane_states[0]->format
- >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
- return false;
- }
-
- return true;
-}
-
-static enum dc_status dce60_validate_global(
- struct dc *dc,
- struct dc_state *context)
-{
- if (!dce60_validate_surface_sets(context))
- return DC_FAIL_SURFACE_VALIDATE;
-
- return DC_OK;
-}
-
static void dce60_destroy_resource_pool(struct resource_pool **pool)
{
struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool);
@@ -931,10 +877,10 @@ static const struct resource_funcs dce60_res_pool_funcs = {
.destroy = dce60_destroy_resource_pool,
.link_enc_create = dce60_link_encoder_create,
.panel_cntl_create = dce60_panel_cntl_create,
- .validate_bandwidth = dce60_validate_bandwidth,
+ .validate_bandwidth = dce100_validate_bandwidth,
.validate_plane = dce100_validate_plane,
.add_stream_to_ctx = dce100_add_stream_to_ctx,
- .validate_global = dce60_validate_global,
+ .validate_global = dce100_validate_global,
.find_first_free_match_stream_enc_for_link = dce100_find_first_free_match_stream_enc_for_link
};
@@ -973,21 +919,24 @@ static bool dce60_construct(
if (bp->fw_info_valid && bp->fw_info.external_clock_source_frequency_for_dp != 0) {
pool->base.dp_clock_source =
- dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true);
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true);
+ /* DCE 6.0 and 6.4: PLL0 can only be used with DP. Don't initialize it here. */
pool->base.clock_sources[0] =
- dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], false);
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
pool->base.clock_sources[1] =
- dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false);
pool->base.clk_src_count = 2;
} else {
pool->base.dp_clock_source =
- dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], true);
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], true);
pool->base.clock_sources[0] =
- dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
- pool->base.clk_src_count = 1;
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
+ pool->base.clock_sources[1] =
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false);
+ pool->base.clk_src_count = 2;
}
if (pool->base.dp_clock_source == NULL) {
@@ -1365,21 +1314,24 @@ static bool dce64_construct(
if (bp->fw_info_valid && bp->fw_info.external_clock_source_frequency_for_dp != 0) {
pool->base.dp_clock_source =
- dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true);
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true);
+ /* DCE 6.0 and 6.4: PLL0 can only be used with DP. Don't initialize it here. */
pool->base.clock_sources[0] =
- dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[0], false);
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
pool->base.clock_sources[1] =
- dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[1], false);
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false);
pool->base.clk_src_count = 2;
} else {
pool->base.dp_clock_source =
- dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[0], true);
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], true);
pool->base.clock_sources[0] =
- dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[1], false);
- pool->base.clk_src_count = 1;
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
+ pool->base.clock_sources[1] =
+ dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false);
+ pool->base.clk_src_count = 2;
}
if (pool->base.dp_clock_source == NULL) {
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
index 3e8b0ac11d90..5b7769745202 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
@@ -32,6 +32,7 @@
#include "stream_encoder.h"
#include "resource.h"
+#include "clk_mgr.h"
#include "include/irq_service_interface.h"
#include "irq/dce80/irq_service_dce80.h"
#include "dce110/dce110_timing_generator.h"
@@ -869,61 +870,6 @@ static void dce80_resource_destruct(struct dce110_resource_pool *pool)
}
}
-static enum dc_status dce80_validate_bandwidth(
- struct dc *dc,
- struct dc_state *context,
- enum dc_validate_mode validate_mode)
-{
- int i;
- bool at_least_one_pipe = false;
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- if (context->res_ctx.pipe_ctx[i].stream)
- at_least_one_pipe = true;
- }
-
- if (at_least_one_pipe) {
- /* TODO implement when needed but for now hardcode max value*/
- context->bw_ctx.bw.dce.dispclk_khz = 681000;
- context->bw_ctx.bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ;
- } else {
- context->bw_ctx.bw.dce.dispclk_khz = 0;
- context->bw_ctx.bw.dce.yclk_khz = 0;
- }
-
- return DC_OK;
-}
-
-static bool dce80_validate_surface_sets(
- struct dc_state *context)
-{
- int i;
-
- for (i = 0; i < context->stream_count; i++) {
- if (context->stream_status[i].plane_count == 0)
- continue;
-
- if (context->stream_status[i].plane_count > 1)
- return false;
-
- if (context->stream_status[i].plane_states[0]->format
- >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
- return false;
- }
-
- return true;
-}
-
-static enum dc_status dce80_validate_global(
- struct dc *dc,
- struct dc_state *context)
-{
- if (!dce80_validate_surface_sets(context))
- return DC_FAIL_SURFACE_VALIDATE;
-
- return DC_OK;
-}
-
static void dce80_destroy_resource_pool(struct resource_pool **pool)
{
struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool);
@@ -937,10 +883,10 @@ static const struct resource_funcs dce80_res_pool_funcs = {
.destroy = dce80_destroy_resource_pool,
.link_enc_create = dce80_link_encoder_create,
.panel_cntl_create = dce80_panel_cntl_create,
- .validate_bandwidth = dce80_validate_bandwidth,
+ .validate_bandwidth = dce100_validate_bandwidth,
.validate_plane = dce100_validate_plane,
.add_stream_to_ctx = dce100_add_stream_to_ctx,
- .validate_global = dce80_validate_global,
+ .validate_global = dce100_validate_global,
.find_first_free_match_stream_enc_for_link = dce100_find_first_free_match_stream_enc_for_link
};
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
index f9cbdad3ef37..84b38d2d6967 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
@@ -85,7 +85,7 @@
#include "vm_helper.h"
#include "link_enc_cfg.h"
-#include "link.h"
+#include "link_service.h"
#define DC_LOGGER_INIT(logger)
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
index 895349d9ca07..ff63f59ff928 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
@@ -60,7 +60,7 @@
#include "dml/display_mode_vba.h"
#include "dcn30/dcn30_dccg.h"
#include "dcn10/dcn10_resource.h"
-#include "link.h"
+#include "link_service.h"
#include "dce/dce_panel_cntl.h"
#include "dcn30/dcn30_dwb.h"
@@ -2192,7 +2192,7 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
j = 0;
// create the final dcfclk and uclk table
while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) {
- if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) {
+ if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) {
dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
} else {
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
index 3345068a878c..61623cb518d9 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
@@ -47,7 +47,8 @@
#include "dcn10/dcn10_resource.h"
-#include "link.h"
+#include "link_service.h"
+
#include "dce/dce_abm.h"
#include "dce/dce_audio.h"
#include "dce/dce_aux.h"
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
index 3479e1eab4cd..02b9a84f2db3 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
@@ -47,7 +47,7 @@
#include "dcn10/dcn10_resource.h"
-#include "link.h"
+#include "link_service.h"
#include "dce/dce_abm.h"
#include "dce/dce_audio.h"
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
index 663c49cce4aa..d4917a35b991 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
@@ -927,6 +927,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.enable_legacy_fast_update = true,
.using_dml2 = false,
.disable_dsc_power_gate = true,
+ .min_disp_clk_khz = 100000,
};
static const struct dc_panel_config panel_config_defaults = {
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
index 9917b366f00c..3965a7f1b64b 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
@@ -69,7 +69,7 @@
#include "dml/display_mode_vba.h"
#include "dcn32/dcn32_dccg.h"
#include "dcn10/dcn10_resource.h"
-#include "link.h"
+#include "link_service.h"
#include "dcn31/dcn31_panel_cntl.h"
#include "dcn30/dcn30_dwb.h"
@@ -739,6 +739,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.fpo_vactive_min_active_margin_us = 200,
.fpo_vactive_max_blank_us = 1000,
.enable_legacy_fast_update = false,
+ .disable_stutter_for_wm_program = true
};
static struct dce_aux *dcn32_aux_engine_create(
@@ -2852,7 +2853,7 @@ struct pipe_ctx *dcn32_acquire_free_pipe_as_secondary_opp_head(
free_pipe->plane_res.xfm = pool->transforms[free_pipe_idx];
free_pipe->plane_res.dpp = pool->dpps[free_pipe_idx];
free_pipe->plane_res.mpcc_inst = pool->dpps[free_pipe_idx]->inst;
- free_pipe->hblank_borrow = otg_master->hblank_borrow;
+ free_pipe->dsc_padding_params = otg_master->dsc_padding_params;
if (free_pipe->stream->timing.flags.DSC == 1) {
dcn20_acquire_dsc(free_pipe->stream->ctx->dc,
&new_ctx->res_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
index 82f966cf4ed2..99f0432288b4 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
@@ -1141,7 +1141,8 @@ unsigned int dcn32_get_max_hw_cursor_size(const struct dc *dc,
SRI_ARR(DCN_SURF1_TTU_CNTL1, HUBPREQ, id), \
SRI_ARR(DCN_CUR0_TTU_CNTL0, HUBPREQ, id), \
SRI_ARR(DCN_CUR0_TTU_CNTL1, HUBPREQ, id), \
- SRI_ARR(HUBP_CLK_CNTL, HUBP, id)
+ SRI_ARR(HUBP_CLK_CNTL, HUBP, id), \
+ SRI_ARR(HUBPRET_READ_LINE_VALUE, HUBPRET, id)
#define HUBP_REG_LIST_DCN2_COMMON_RI(id) \
HUBP_REG_LIST_DCN_RI(id), HUBP_REG_LIST_DCN_VM_RI(id), \
SRI_ARR(PREFETCH_SETTINGS, HUBPREQ, id), \
@@ -1229,7 +1230,8 @@ unsigned int dcn32_get_max_hw_cursor_size(const struct dc *dc,
SR(DCHUBBUB_ARB_MALL_CNTL), \
SR(DCN_VM_FAULT_ADDR_MSB), SR(DCN_VM_FAULT_ADDR_LSB), \
SR(DCN_VM_FAULT_CNTL), SR(DCN_VM_FAULT_STATUS), \
- SR(SDPIF_REQUEST_RATE_LIMIT)
+ SR(SDPIF_REQUEST_RATE_LIMIT), \
+ SR(DCHUBBUB_SDPIF_CFG0)
/* DCCG */
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
index 061c0907d802..ad214986f7ac 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
@@ -72,7 +72,7 @@
#include "dml/display_mode_vba.h"
#include "dcn32/dcn32_dccg.h"
#include "dcn10/dcn10_resource.h"
-#include "link.h"
+#include "link_service.h"
#include "dcn31/dcn31_panel_cntl.h"
#include "dcn30/dcn30_dwb.h"
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
index 8475c6eec547..fff57f23f4f7 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
@@ -61,7 +61,7 @@
#include "dcn31/dcn31_hpo_dp_stream_encoder.h"
#include "dcn31/dcn31_hpo_dp_link_encoder.h"
#include "dcn32/dcn32_hpo_dp_link_encoder.h"
-#include "link.h"
+#include "link_service.h"
#include "dcn31/dcn31_apg.h"
#include "dcn32/dcn32_dio_link_encoder.h"
#include "dcn31/dcn31_vpg.h"
@@ -1760,6 +1760,20 @@ enum dc_status dcn35_patch_unknown_plane_state(struct dc_plane_state *plane_stat
}
+static int populate_dml_pipes_from_context_fpu(struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ enum dc_validate_mode validate_mode)
+{
+ int ret;
+
+ DC_FP_START();
+ ret = dcn35_populate_dml_pipes_from_context_fpu(dc, context, pipes, validate_mode);
+ DC_FP_END();
+
+ return ret;
+}
+
static struct resource_funcs dcn35_res_pool_funcs = {
.destroy = dcn35_destroy_resource_pool,
.link_enc_create = dcn35_link_encoder_create,
@@ -1770,7 +1784,7 @@ static struct resource_funcs dcn35_res_pool_funcs = {
.validate_bandwidth = dcn35_validate_bandwidth,
.calculate_wm_and_dlg = NULL,
.update_soc_for_wm_a = dcn31_update_soc_for_wm_a,
- .populate_dml_pipes = dcn35_populate_dml_pipes_from_context_fpu,
+ .populate_dml_pipes = populate_dml_pipes_from_context_fpu,
.acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer,
.release_pipe = dcn20_release_pipe,
.add_stream_to_ctx = dcn30_add_stream_to_ctx,
@@ -1900,9 +1914,6 @@ static bool dcn35_resource_construct(
dc->caps.num_of_host_routers = 2;
dc->caps.num_of_dpias_per_host_router = 2;
- dc->caps.num_of_host_routers = 2;
- dc->caps.num_of_dpias_per_host_router = 2;
-
/* max_disp_clock_khz_at_vmin is slightly lower than the STA value in order
* to provide some margin.
* It's expected for furture ASIC to have equal or higher value, in order to
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
index 0971c0f74186..0abd163b425e 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
@@ -40,7 +40,7 @@
#include "dcn31/dcn31_hpo_dp_stream_encoder.h"
#include "dcn31/dcn31_hpo_dp_link_encoder.h"
#include "dcn32/dcn32_hpo_dp_link_encoder.h"
-#include "link.h"
+#include "link_service.h"
#include "dcn31/dcn31_apg.h"
#include "dcn32/dcn32_dio_link_encoder.h"
#include "dcn31/dcn31_vpg.h"
@@ -1732,6 +1732,21 @@ static enum dc_status dcn351_validate_bandwidth(struct dc *dc,
return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
}
+static int populate_dml_pipes_from_context_fpu(struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ enum dc_validate_mode validate_mode)
+{
+ int ret;
+
+ DC_FP_START();
+ ret = dcn351_populate_dml_pipes_from_context_fpu(dc, context, pipes, validate_mode);
+ DC_FP_END();
+
+ return ret;
+
+}
+
static struct resource_funcs dcn351_res_pool_funcs = {
.destroy = dcn351_destroy_resource_pool,
.link_enc_create = dcn35_link_encoder_create,
@@ -1742,7 +1757,7 @@ static struct resource_funcs dcn351_res_pool_funcs = {
.validate_bandwidth = dcn351_validate_bandwidth,
.calculate_wm_and_dlg = NULL,
.update_soc_for_wm_a = dcn31_update_soc_for_wm_a,
- .populate_dml_pipes = dcn351_populate_dml_pipes_from_context_fpu,
+ .populate_dml_pipes = populate_dml_pipes_from_context_fpu,
.acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer,
.release_pipe = dcn20_release_pipe,
.add_stream_to_ctx = dcn30_add_stream_to_ctx,
@@ -1872,9 +1887,6 @@ static bool dcn351_resource_construct(
dc->caps.num_of_host_routers = 2;
dc->caps.num_of_dpias_per_host_router = 2;
- dc->caps.num_of_host_routers = 2;
- dc->caps.num_of_dpias_per_host_router = 2;
-
/* max_disp_clock_khz_at_vmin is slightly lower than the STA value in order
* to provide some margin.
* It's expected for furture ASIC to have equal or higher value, in order to
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c
index 8bae7fcedc22..ca125ee6c2fb 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c
@@ -40,7 +40,7 @@
#include "dcn31/dcn31_hpo_dp_stream_encoder.h"
#include "dcn31/dcn31_hpo_dp_link_encoder.h"
#include "dcn32/dcn32_hpo_dp_link_encoder.h"
-#include "link.h"
+#include "link_service.h"
#include "dcn31/dcn31_apg.h"
#include "dcn32/dcn32_dio_link_encoder.h"
#include "dcn31/dcn31_vpg.h"
@@ -1734,6 +1734,20 @@ static enum dc_status dcn35_validate_bandwidth(struct dc *dc,
}
+static int populate_dml_pipes_from_context_fpu(struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ enum dc_validate_mode validate_mode)
+{
+ int ret;
+
+ DC_FP_START();
+ ret = dcn35_populate_dml_pipes_from_context_fpu(dc, context, pipes, validate_mode);
+ DC_FP_END();
+
+ return ret;
+}
+
static struct resource_funcs dcn36_res_pool_funcs = {
.destroy = dcn36_destroy_resource_pool,
.link_enc_create = dcn35_link_encoder_create,
@@ -1744,7 +1758,7 @@ static struct resource_funcs dcn36_res_pool_funcs = {
.validate_bandwidth = dcn35_validate_bandwidth,
.calculate_wm_and_dlg = NULL,
.update_soc_for_wm_a = dcn31_update_soc_for_wm_a,
- .populate_dml_pipes = dcn35_populate_dml_pipes_from_context_fpu,
+ .populate_dml_pipes = populate_dml_pipes_from_context_fpu,
.acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer,
.release_pipe = dcn20_release_pipe,
.add_stream_to_ctx = dcn30_add_stream_to_ctx,
@@ -1873,9 +1887,6 @@ static bool dcn36_resource_construct(
dc->caps.num_of_host_routers = 2;
dc->caps.num_of_dpias_per_host_router = 2;
- dc->caps.num_of_host_routers = 2;
- dc->caps.num_of_dpias_per_host_router = 2;
-
/* max_disp_clock_khz_at_vmin is slightly lower than the STA value in order
* to provide some margin.
* It's expected for furture ASIC to have equal or higher value, in order to
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
index b3988e38d0a6..1d18807e4749 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
@@ -50,7 +50,7 @@
#include "dml/display_mode_vba.h"
#include "dcn401/dcn401_dccg.h"
#include "dcn10/dcn10_resource.h"
-#include "link.h"
+#include "link_service.h"
#include "link_enc_cfg.h"
#include "dcn31/dcn31_panel_cntl.h"
@@ -708,6 +708,7 @@ static const struct dc_debug_options debug_defaults_drv = {
},
.use_max_lb = true,
.force_disable_subvp = false,
+ .disable_force_pstate_allow_on_hw_release = false,
.exit_idle_opt_for_cursor_updates = true,
.using_dml2 = true,
.using_dml21 = true,
@@ -1698,6 +1699,9 @@ static void dcn401_build_pipe_pix_clk_params(struct pipe_ctx *pipe_ctx)
pixel_clk_params->requested_pix_clk_100hz = stream->timing.pix_clk_100hz;
+ if (pipe_ctx->dsc_padding_params.dsc_hactive_padding != 0)
+ pixel_clk_params->requested_pix_clk_100hz = pipe_ctx->dsc_padding_params.dsc_pix_clk_100hz;
+
if (!pipe_ctx->stream->ctx->dc->config.unify_link_enc_assignment)
link_enc = link_enc_cfg_get_link_enc(link);
if (link_enc)
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h
index 2ae6831c31ef..0fc66487d800 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h
@@ -140,7 +140,8 @@ void dcn401_prepare_mcache_programming(struct dc *dc, struct dc_state *context);
SRI_ARR(UCLK_PSTATE_FORCE, HUBPREQ, id), \
HUBP_3DLUT_FL_REG_LIST_DCN401(id), \
SRI_ARR(DCSURF_VIEWPORT_MCACHE_SPLIT_COORDINATE, HUBP, id), \
- SRI_ARR(DCHUBP_MCACHEID_CONFIG, HUBP, id)
+ SRI_ARR(DCHUBP_MCACHEID_CONFIG, HUBP, id), \
+ SRI_ARR(HUBPRET_READ_LINE_VALUE, HUBPRET, id)
/* ABM */
#define ABM_DCN401_REG_LIST_RI(id) \
diff --git a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/Makefile b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/Makefile
new file mode 100644
index 000000000000..bc93356a0b5b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/Makefile
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: MIT
+#
+# Copyright 2025 Advanced Micro Devices, Inc.
+# Makefile for bounding box component.
+# Floating point required due to nature of bounding box values
+
+soc_and_ip_translator_ccflags := $(CC_FLAGS_FPU)
+soc_and_ip_translator_rcflags := $(CC_FLAGS_NO_FPU)
+
+CFLAGS_$(AMDDALPATH)/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.o := $(soc_and_ip_translator_ccflags)
+
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.o := $(soc_and_ip_translator_rcflags)
+
+soc_and_ip_translator := soc_and_ip_translator.o
+soc_and_ip_translator += dcn401/dcn401_soc_and_ip_translator.o
+
+AMD_DAL_soc_and_ip_translator := $(addprefix $(AMDDALPATH)/dc/soc_and_ip_translator/, $(soc_and_ip_translator))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_soc_and_ip_translator)
diff --git a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.c b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.c
new file mode 100644
index 000000000000..3190c76eb482
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.c
@@ -0,0 +1,304 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2025 Advanced Micro Devices, Inc.
+
+#include "dcn401_soc_and_ip_translator.h"
+#include "bounding_boxes/dcn4_soc_bb.h"
+
+/* soc_and_ip_translator component used to get up-to-date values for bounding box.
+ * Bounding box values are stored in several locations and locations can vary with DCN revision.
+ * This component provides an interface to get DCN-specific bounding box values.
+ */
+
+static void get_default_soc_bb(struct dml2_soc_bb *soc_bb)
+{
+ memcpy(soc_bb, &dml2_socbb_dcn401, sizeof(struct dml2_soc_bb));
+ memcpy(&soc_bb->qos_parameters, &dml_dcn4_variant_a_soc_qos_params, sizeof(struct dml2_soc_qos_parameters));
+}
+
+/*
+ * DC clock table is obtained from SMU during runtime.
+ * SMU stands for System Management Unit. It is a power management processor.
+ * It owns the initialization of dc's clock table and programming of clock values
+ * based on dc's requests.
+ * Our clock values in base soc bb is a dummy placeholder. The real clock values
+ * are retrieved from SMU firmware to dc clock table at runtime.
+ * This function overrides our dummy placeholder values with real values in dc
+ * clock table.
+ */
+static void dcn401_convert_dc_clock_table_to_soc_bb_clock_table(
+ struct dml2_soc_state_table *dml_clk_table,
+ const struct clk_bw_params *dc_bw_params,
+ bool use_clock_dc_limits)
+{
+ int i;
+ const struct clk_limit_table *dc_clk_table;
+
+ if (dc_bw_params == NULL)
+ /* skip if bw params could not be obtained from smu */
+ return;
+
+ dc_clk_table = &dc_bw_params->clk_table;
+
+ /* dcfclk */
+ if (dc_clk_table->num_entries_per_clk.num_dcfclk_levels) {
+ dml_clk_table->dcfclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dcfclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->dcfclk.num_clk_values) {
+ if (use_clock_dc_limits && dc_bw_params->dc_mode_limit.dcfclk_mhz &&
+ dc_clk_table->entries[i].dcfclk_mhz > dc_bw_params->dc_mode_limit.dcfclk_mhz) {
+ if (i == 0 || dc_clk_table->entries[i-1].dcfclk_mhz < dc_bw_params->dc_mode_limit.dcfclk_mhz) {
+ dml_clk_table->dcfclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dcfclk_mhz * 1000;
+ dml_clk_table->dcfclk.num_clk_values = i + 1;
+ } else {
+ dml_clk_table->dcfclk.clk_values_khz[i] = 0;
+ dml_clk_table->dcfclk.num_clk_values = i;
+ }
+ } else {
+ dml_clk_table->dcfclk.clk_values_khz[i] = dc_clk_table->entries[i].dcfclk_mhz * 1000;
+ }
+ } else {
+ dml_clk_table->dcfclk.clk_values_khz[i] = 0;
+ }
+ }
+ }
+
+ /* fclk */
+ if (dc_clk_table->num_entries_per_clk.num_fclk_levels) {
+ dml_clk_table->fclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_fclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->fclk.num_clk_values) {
+ if (use_clock_dc_limits && dc_bw_params->dc_mode_limit.fclk_mhz &&
+ dc_clk_table->entries[i].fclk_mhz > dc_bw_params->dc_mode_limit.fclk_mhz) {
+ if (i == 0 || dc_clk_table->entries[i-1].fclk_mhz < dc_bw_params->dc_mode_limit.fclk_mhz) {
+ dml_clk_table->fclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.fclk_mhz * 1000;
+ dml_clk_table->fclk.num_clk_values = i + 1;
+ } else {
+ dml_clk_table->fclk.clk_values_khz[i] = 0;
+ dml_clk_table->fclk.num_clk_values = i;
+ }
+ } else {
+ dml_clk_table->fclk.clk_values_khz[i] = dc_clk_table->entries[i].fclk_mhz * 1000;
+ }
+ } else {
+ dml_clk_table->fclk.clk_values_khz[i] = 0;
+ }
+ }
+ }
+
+ /* uclk */
+ if (dc_clk_table->num_entries_per_clk.num_memclk_levels) {
+ dml_clk_table->uclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_memclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->uclk.num_clk_values) {
+ if (use_clock_dc_limits && dc_bw_params->dc_mode_limit.memclk_mhz &&
+ dc_clk_table->entries[i].memclk_mhz > dc_bw_params->dc_mode_limit.memclk_mhz) {
+ if (i == 0 || dc_clk_table->entries[i-1].memclk_mhz < dc_bw_params->dc_mode_limit.memclk_mhz) {
+ dml_clk_table->uclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.memclk_mhz * 1000;
+ dml_clk_table->uclk.num_clk_values = i + 1;
+ } else {
+ dml_clk_table->uclk.clk_values_khz[i] = 0;
+ dml_clk_table->uclk.num_clk_values = i;
+ }
+ } else {
+ dml_clk_table->uclk.clk_values_khz[i] = dc_clk_table->entries[i].memclk_mhz * 1000;
+ }
+ } else {
+ dml_clk_table->uclk.clk_values_khz[i] = 0;
+ }
+ }
+ }
+
+ /* dispclk */
+ if (dc_clk_table->num_entries_per_clk.num_dispclk_levels) {
+ dml_clk_table->dispclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dispclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->dispclk.num_clk_values) {
+ if (use_clock_dc_limits && dc_bw_params->dc_mode_limit.dispclk_mhz &&
+ dc_clk_table->entries[i].dispclk_mhz > dc_bw_params->dc_mode_limit.dispclk_mhz) {
+ if (i == 0 || dc_clk_table->entries[i-1].dispclk_mhz < dc_bw_params->dc_mode_limit.dispclk_mhz) {
+ dml_clk_table->dispclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dispclk_mhz * 1000;
+ dml_clk_table->dispclk.num_clk_values = i + 1;
+ } else {
+ dml_clk_table->dispclk.clk_values_khz[i] = 0;
+ dml_clk_table->dispclk.num_clk_values = i;
+ }
+ } else {
+ dml_clk_table->dispclk.clk_values_khz[i] = dc_clk_table->entries[i].dispclk_mhz * 1000;
+ }
+ } else {
+ dml_clk_table->dispclk.clk_values_khz[i] = 0;
+ }
+ }
+ }
+
+ /* dppclk */
+ if (dc_clk_table->num_entries_per_clk.num_dppclk_levels) {
+ dml_clk_table->dppclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dppclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->dppclk.num_clk_values) {
+ if (use_clock_dc_limits && dc_bw_params->dc_mode_limit.dppclk_mhz &&
+ dc_clk_table->entries[i].dppclk_mhz > dc_bw_params->dc_mode_limit.dppclk_mhz) {
+ if (i == 0 || dc_clk_table->entries[i-1].dppclk_mhz < dc_bw_params->dc_mode_limit.dppclk_mhz) {
+ dml_clk_table->dppclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dppclk_mhz * 1000;
+ dml_clk_table->dppclk.num_clk_values = i + 1;
+ } else {
+ dml_clk_table->dppclk.clk_values_khz[i] = 0;
+ dml_clk_table->dppclk.num_clk_values = i;
+ }
+ } else {
+ dml_clk_table->dppclk.clk_values_khz[i] = dc_clk_table->entries[i].dppclk_mhz * 1000;
+ }
+ } else {
+ dml_clk_table->dppclk.clk_values_khz[i] = 0;
+ }
+ }
+ }
+
+ /* dtbclk */
+ if (dc_clk_table->num_entries_per_clk.num_dtbclk_levels) {
+ dml_clk_table->dtbclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dtbclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->dtbclk.num_clk_values) {
+ if (use_clock_dc_limits && dc_bw_params->dc_mode_limit.dtbclk_mhz &&
+ dc_clk_table->entries[i].dtbclk_mhz > dc_bw_params->dc_mode_limit.dtbclk_mhz) {
+ if (i == 0 || dc_clk_table->entries[i-1].dtbclk_mhz < dc_bw_params->dc_mode_limit.dtbclk_mhz) {
+ dml_clk_table->dtbclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dtbclk_mhz * 1000;
+ dml_clk_table->dtbclk.num_clk_values = i + 1;
+ } else {
+ dml_clk_table->dtbclk.clk_values_khz[i] = 0;
+ dml_clk_table->dtbclk.num_clk_values = i;
+ }
+ } else {
+ dml_clk_table->dtbclk.clk_values_khz[i] = dc_clk_table->entries[i].dtbclk_mhz * 1000;
+ }
+ } else {
+ dml_clk_table->dtbclk.clk_values_khz[i] = 0;
+ }
+ }
+ }
+
+ /* socclk */
+ if (dc_clk_table->num_entries_per_clk.num_socclk_levels) {
+ dml_clk_table->socclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_socclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->socclk.num_clk_values) {
+ if (use_clock_dc_limits && dc_bw_params->dc_mode_limit.socclk_mhz &&
+ dc_clk_table->entries[i].socclk_mhz > dc_bw_params->dc_mode_limit.socclk_mhz) {
+ if (i == 0 || dc_clk_table->entries[i-1].socclk_mhz < dc_bw_params->dc_mode_limit.socclk_mhz) {
+ dml_clk_table->socclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.socclk_mhz * 1000;
+ dml_clk_table->socclk.num_clk_values = i + 1;
+ } else {
+ dml_clk_table->socclk.clk_values_khz[i] = 0;
+ dml_clk_table->socclk.num_clk_values = i;
+ }
+ } else {
+ dml_clk_table->socclk.clk_values_khz[i] = dc_clk_table->entries[i].socclk_mhz * 1000;
+ }
+ } else {
+ dml_clk_table->socclk.clk_values_khz[i] = 0;
+ }
+ }
+ }
+
+ /* dram config */
+ dml_clk_table->dram_config.channel_count = dc_bw_params->num_channels;
+ dml_clk_table->dram_config.channel_width_bytes = dc_bw_params->dram_channel_width_bytes;
+}
+
+void dcn401_update_soc_bb_with_values_from_clk_mgr(struct dml2_soc_bb *soc_bb, const struct dc *dc, const struct dml2_configuration_options *config)
+{
+ soc_bb->dprefclk_mhz = dc->clk_mgr->dprefclk_khz / 1000;
+ soc_bb->dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
+ soc_bb->mall_allocated_for_dcn_mbytes = dc->caps.mall_size_total / (1024 * 1024);
+
+ if (dc->clk_mgr->funcs->is_smu_present &&
+ dc->clk_mgr->funcs->is_smu_present(dc->clk_mgr)) {
+ dcn401_convert_dc_clock_table_to_soc_bb_clock_table(&soc_bb->clk_table,
+ dc->clk_mgr->bw_params,
+ config->use_clock_dc_limits);
+ }
+}
+
+void dcn401_update_soc_bb_with_values_from_vbios(struct dml2_soc_bb *soc_bb, const struct dc *dc)
+{
+ soc_bb->dchub_refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
+ soc_bb->xtalclk_mhz = dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency / 1000;
+
+ /* latencies in vbios are platform specific and should be used if provided */
+ if (dc->ctx->dc_bios->bb_info.dram_clock_change_latency_100ns)
+ soc_bb->power_management_parameters.dram_clk_change_blackout_us =
+ dc->ctx->dc_bios->bb_info.dram_clock_change_latency_100ns / 10.0;
+
+ if (dc->ctx->dc_bios->bb_info.dram_sr_enter_exit_latency_100ns)
+ soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us =
+ dc->ctx->dc_bios->bb_info.dram_sr_enter_exit_latency_100ns / 10.0;
+
+ if (dc->ctx->dc_bios->bb_info.dram_sr_exit_latency_100ns)
+ soc_bb->power_management_parameters.stutter_exit_latency_us =
+ dc->ctx->dc_bios->bb_info.dram_sr_exit_latency_100ns / 10.0;
+}
+
+void dcn401_update_soc_bb_with_values_from_software_policy(struct dml2_soc_bb *soc_bb, const struct dc *dc)
+{
+ /* set if the value is provided */
+ if (dc->bb_overrides.sr_exit_time_ns)
+ soc_bb->power_management_parameters.stutter_exit_latency_us =
+ dc->bb_overrides.sr_exit_time_ns / 1000.0;
+
+ if (dc->bb_overrides.sr_enter_plus_exit_time_ns)
+ soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us =
+ dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0;
+
+ if (dc->bb_overrides.dram_clock_change_latency_ns)
+ soc_bb->power_management_parameters.dram_clk_change_blackout_us =
+ dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
+
+ if (dc->bb_overrides.fclk_clock_change_latency_ns)
+ soc_bb->power_management_parameters.fclk_change_blackout_us =
+ dc->bb_overrides.fclk_clock_change_latency_ns / 1000.0;
+
+ //Z8 values not expected nor used on DCN401 but still added for completeness
+ if (dc->bb_overrides.sr_exit_z8_time_ns)
+ soc_bb->power_management_parameters.z8_stutter_exit_latency_us =
+ dc->bb_overrides.sr_exit_z8_time_ns / 1000.0;
+
+ if (dc->bb_overrides.sr_enter_plus_exit_z8_time_ns)
+ soc_bb->power_management_parameters.z8_stutter_enter_plus_exit_latency_us =
+ dc->bb_overrides.sr_enter_plus_exit_z8_time_ns / 1000.0;
+}
+
+static void apply_soc_bb_updates(struct dml2_soc_bb *soc_bb, const struct dc *dc, const struct dml2_configuration_options *config)
+{
+ /* Individual modification can be overwritten even if it was obtained by a previous function.
+ * Modifications are acquired in order of priority (lowest to highest).
+ */
+ dc_assert_fp_enabled();
+
+ dcn401_update_soc_bb_with_values_from_clk_mgr(soc_bb, dc, config);
+ dcn401_update_soc_bb_with_values_from_vbios(soc_bb, dc);
+ dcn401_update_soc_bb_with_values_from_software_policy(soc_bb, dc);
+}
+
+void dcn401_get_soc_bb(struct dml2_soc_bb *soc_bb, const struct dc *dc, const struct dml2_configuration_options *config)
+{
+ //get default soc_bb with static values
+ get_default_soc_bb(soc_bb);
+ //update soc_bb values with more accurate values
+ apply_soc_bb_updates(soc_bb, dc, config);
+}
+
+static void dcn401_get_ip_caps(struct dml2_ip_capabilities *ip_caps)
+{
+ *ip_caps = dml2_dcn401_max_ip_caps;
+}
+
+static struct soc_and_ip_translator_funcs dcn401_translator_funcs = {
+ .get_soc_bb = dcn401_get_soc_bb,
+ .get_ip_caps = dcn401_get_ip_caps,
+};
+
+void dcn401_construct_soc_and_ip_translator(struct soc_and_ip_translator *soc_and_ip_translator)
+{
+ soc_and_ip_translator->translator_funcs = &dcn401_translator_funcs;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h
new file mode 100644
index 000000000000..21d842857601
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2025 Advanced Micro Devices, Inc.
+
+#ifndef _DCN401_SOC_AND_IP_TRANSLATOR_H_
+#define _DCN401_SOC_AND_IP_TRANSLATOR_H_
+
+#include "core_types.h"
+#include "dc.h"
+#include "clk_mgr.h"
+#include "soc_and_ip_translator.h"
+#include "dml2/dml21/inc/dml_top_soc_parameter_types.h"
+
+void dcn401_construct_soc_and_ip_translator(struct soc_and_ip_translator *soc_and_ip_translator);
+
+/* Functions that can be re-used by higher DCN revisions of this component */
+void dcn401_get_soc_bb(struct dml2_soc_bb *soc_bb, const struct dc *dc, const struct dml2_configuration_options *config);
+void dcn401_update_soc_bb_with_values_from_clk_mgr(struct dml2_soc_bb *soc_bb, const struct dc *dc, const struct dml2_configuration_options *config);
+void dcn401_update_soc_bb_with_values_from_vbios(struct dml2_soc_bb *soc_bb, const struct dc *dc);
+void dcn401_update_soc_bb_with_values_from_software_policy(struct dml2_soc_bb *soc_bb, const struct dc *dc);
+
+#endif /* _DCN401_SOC_AND_IP_TRANSLATOR_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.c b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.c
new file mode 100644
index 000000000000..c9e224d262c9
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.c
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2025 Advanced Micro Devices, Inc.
+
+#include "dcn42_soc_and_ip_translator.h"
+#include "soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h"
+#include "bounding_boxes/dcn42_soc_bb.h"
+
+/* soc_and_ip_translator component used to get up-to-date values for bounding box.
+ * Bounding box values are stored in several locations and locations can vary with DCN revision.
+ * This component provides an interface to get DCN-specific bounding box values.
+ */
+
+static void dcn42_get_ip_caps(struct dml2_ip_capabilities *ip_caps)
+{
+ *ip_caps = dml2_dcn42_max_ip_caps;
+}
+
+static struct soc_and_ip_translator_funcs dcn42_translator_funcs = {
+ .get_soc_bb = dcn401_get_soc_bb,
+ .get_ip_caps = dcn42_get_ip_caps,
+};
+
+void dcn42_construct_soc_and_ip_translator(struct soc_and_ip_translator *soc_and_ip_translator)
+{
+ soc_and_ip_translator->translator_funcs = &dcn42_translator_funcs;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.h b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.h
new file mode 100644
index 000000000000..914dcbb369a7
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.h
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2025 Advanced Micro Devices, Inc.
+
+#ifndef _DCN42_SOC_AND_IP_TRANSLATOR_H_
+#define _DCN42_SOC_AND_IP_TRANSLATOR_H_
+
+#include "core_types.h"
+#include "dc.h"
+#include "clk_mgr.h"
+#include "dml_top_soc_parameter_types.h"
+#include "soc_and_ip_translator.h"
+
+void dcn42_construct_soc_and_ip_translator(struct soc_and_ip_translator *soc_and_ip_translator);
+
+#endif /* _DCN42_SOC_AND_IP_TRANSLATOR_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/soc_and_ip_translator.c b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/soc_and_ip_translator.c
new file mode 100644
index 000000000000..0fc0e5a6c171
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/soc_and_ip_translator.c
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2025 Advanced Micro Devices, Inc.
+
+#include "soc_and_ip_translator.h"
+#include "soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h"
+
+static void dc_construct_soc_and_ip_translator(struct soc_and_ip_translator *soc_and_ip_translator,
+ enum dce_version dc_version)
+{
+ switch (dc_version) {
+ case DCN_VERSION_4_01:
+ dcn401_construct_soc_and_ip_translator(soc_and_ip_translator);
+ break;
+ default:
+ break;
+ }
+}
+
+struct soc_and_ip_translator *dc_create_soc_and_ip_translator(enum dce_version dc_version)
+{
+ struct soc_and_ip_translator *soc_and_ip_translator;
+
+ soc_and_ip_translator = kzalloc(sizeof(*soc_and_ip_translator), GFP_KERNEL);
+ if (!soc_and_ip_translator)
+ return NULL;
+
+ dc_construct_soc_and_ip_translator(soc_and_ip_translator, dc_version);
+
+ return soc_and_ip_translator;
+}
+
+void dc_destroy_soc_and_ip_translator(struct soc_and_ip_translator **soc_and_ip_translator)
+{
+ kfree(*soc_and_ip_translator);
+ *soc_and_ip_translator = NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c
index 55b929ca7982..b1fb0f8a253a 100644
--- a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c
+++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c
@@ -641,16 +641,16 @@ static void spl_calculate_inits_and_viewports(struct spl_in *spl_in,
/* this gives the direction of the cositing (negative will move
* left, right otherwise)
*/
- int sign = 1;
+ int h_sign = flip_horz_scan_dir ? -1 : 1;
+ int v_sign = flip_vert_scan_dir ? -1 : 1;
switch (spl_in->basic_in.cositing) {
-
case CHROMA_COSITING_TOPLEFT:
- init_adj_h = spl_fixpt_from_fraction(sign, 4);
- init_adj_v = spl_fixpt_from_fraction(sign, 4);
+ init_adj_h = spl_fixpt_from_fraction(h_sign, 4);
+ init_adj_v = spl_fixpt_from_fraction(v_sign, 4);
break;
case CHROMA_COSITING_LEFT:
- init_adj_h = spl_fixpt_from_fraction(sign, 4);
+ init_adj_h = spl_fixpt_from_fraction(h_sign, 4);
init_adj_v = spl_fixpt_zero;
break;
case CHROMA_COSITING_NONE:
diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c
index ad088d70e189..6ffc74fc9dcd 100644
--- a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c
@@ -44,6 +44,11 @@ static void virtual_stream_encoder_dvi_set_stream_attribute(
struct dc_crtc_timing *crtc_timing,
bool is_dual_link) {}
+static void virtual_stream_encoder_lvds_set_stream_attribute(
+ struct stream_encoder *enc,
+ struct dc_crtc_timing *crtc_timing)
+{}
+
static void virtual_stream_encoder_set_throttled_vcp_size(
struct stream_encoder *enc,
struct fixed31_32 avg_time_slots_per_mtp)
@@ -115,6 +120,8 @@ static const struct stream_encoder_funcs virtual_str_enc_funcs = {
virtual_stream_encoder_hdmi_set_stream_attribute,
.dvi_set_stream_attribute =
virtual_stream_encoder_dvi_set_stream_attribute,
+ .lvds_set_stream_attribute =
+ virtual_stream_encoder_lvds_set_stream_attribute,
.set_throttled_vcp_size =
virtual_stream_encoder_set_throttled_vcp_size,
.update_hdmi_info_packets =
diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index 0bafb6710761..338fdc651f2c 100644
--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
@@ -316,6 +316,7 @@ struct dmub_srv_hw_params {
bool disable_sldo_opt;
bool enable_non_transparent_setconfig;
bool lower_hbr3_phy_ssc;
+ bool override_hbr3_pll_vco;
};
/**
@@ -567,6 +568,7 @@ struct dmub_srv {
bool sw_init;
bool hw_init;
+ bool dpia_supported;
uint64_t fb_base;
uint64_t fb_offset;
@@ -597,6 +599,8 @@ struct dmub_notification {
enum dmub_notification_type type;
uint8_t link_index;
uint8_t result;
+ /* notify instance from DMUB */
+ uint8_t instance;
bool pending_notification;
union {
struct aux_reply_data aux_reply;
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index c587b3441e07..92248224b713 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -843,7 +843,8 @@ union dmub_fw_boot_options {
uint32_t ips_sequential_ono: 1; /**< 1 to enable sequential ONO IPS sequence */
uint32_t disable_sldo_opt: 1; /**< 1 to disable SLDO optimizations */
uint32_t lower_hbr3_phy_ssc: 1; /**< 1 to lower hbr3 phy ssc to 0.125 percent */
- uint32_t reserved : 6; /**< reserved */
+ uint32_t override_hbr3_pll_vco: 1; /**< 1 to override the hbr3 pll vco to 0 */
+ uint32_t reserved : 5; /**< reserved */
} bits; /**< boot bits */
uint32_t all; /**< 32-bit access to bits */
};
@@ -882,7 +883,7 @@ enum dmub_shared_state_feature_id {
/**
* struct dmub_shared_state_ips_fw - Firmware signals for IPS.
*/
- union dmub_shared_state_ips_fw_signals {
+union dmub_shared_state_ips_fw_signals {
struct {
uint32_t ips1_commit : 1; /**< 1 if in IPS1 or IPS0 RCG */
uint32_t ips2_commit : 1; /**< 1 if in IPS2 */
@@ -897,7 +898,7 @@ enum dmub_shared_state_feature_id {
/**
* struct dmub_shared_state_ips_signals - Firmware signals for IPS.
*/
- union dmub_shared_state_ips_driver_signals {
+union dmub_shared_state_ips_driver_signals {
struct {
uint32_t allow_pg : 1; /**< 1 if PG is allowed */
uint32_t allow_ips1 : 1; /**< 1 is IPS1 is allowed */
@@ -1990,18 +1991,19 @@ struct dmub_cmd_lsdma_data {
struct lsdma_tiled_copy_data {
uint32_t src_addr_lo;
uint32_t src_addr_hi;
+
uint32_t dst_addr_lo;
uint32_t dst_addr_hi;
uint32_t src_x : 16;
uint32_t src_y : 16;
- uint32_t src_width : 16;
- uint32_t src_height : 16;
-
uint32_t dst_x : 16;
uint32_t dst_y : 16;
+ uint32_t src_width : 16;
+ uint32_t src_height : 16;
+
uint32_t dst_width : 16;
uint32_t dst_height : 16;
@@ -2034,23 +2036,58 @@ struct dmub_cmd_lsdma_data {
uint32_t padding : 30;
} tiled_copy_data;
struct lsdma_linear_copy_data {
+ uint32_t src_lo;
+ uint32_t src_hi;
+
+ uint32_t dst_lo;
+ uint32_t dst_hi;
+
uint32_t count : 30;
uint32_t cache_policy_dst : 2;
uint32_t tmz : 1;
uint32_t cache_policy_src : 2;
uint32_t padding : 29;
-
+ } linear_copy_data;
+ struct lsdma_linear_sub_window_copy_data {
uint32_t src_lo;
uint32_t src_hi;
+
uint32_t dst_lo;
uint32_t dst_hi;
- } linear_copy_data;
+
+ uint32_t src_x : 16;
+ uint32_t src_y : 16;
+
+ uint32_t dst_x : 16;
+ uint32_t dst_y : 16;
+
+ uint32_t rect_x : 16;
+ uint32_t rect_y : 16;
+
+ uint32_t src_pitch : 16;
+ uint32_t dst_pitch : 16;
+
+ uint32_t src_slice_pitch;
+ uint32_t dst_slice_pitch;
+
+ uint32_t tmz : 1;
+ uint32_t element_size : 3;
+ uint32_t src_cache_policy : 3;
+ uint32_t dst_cache_policy : 3;
+ uint32_t reserved0 : 22;
+ } linear_sub_window_copy_data;
struct lsdma_reg_write_data {
uint32_t reg_addr;
uint32_t reg_data;
} reg_write_data;
struct lsdma_pio_copy_data {
+ uint32_t src_lo;
+ uint32_t src_hi;
+
+ uint32_t dst_lo;
+ uint32_t dst_hi;
+
union {
struct {
uint32_t byte_count : 26;
@@ -2063,12 +2100,11 @@ struct dmub_cmd_lsdma_data {
} fields;
uint32_t raw;
} packet;
- uint32_t src_lo;
- uint32_t src_hi;
- uint32_t dst_lo;
- uint32_t dst_hi;
} pio_copy_data;
struct lsdma_pio_constfill_data {
+ uint32_t dst_lo;
+ uint32_t dst_hi;
+
union {
struct {
uint32_t byte_count : 26;
@@ -2081,14 +2117,12 @@ struct dmub_cmd_lsdma_data {
} fields;
uint32_t raw;
} packet;
- uint32_t dst_lo;
- uint32_t dst_hi;
+
uint32_t data;
} pio_constfill_data;
uint32_t all[14];
} u;
-
};
struct dmub_rb_cmd_lsdma {
@@ -2330,6 +2364,7 @@ struct dmub_cmd_fams2_global_config {
union dmub_fams2_global_feature_config features;
uint32_t recovery_timeout_us;
uint32_t hwfq_flip_programming_delay_us;
+ uint32_t max_allow_to_target_delta_us; // how early DCN could assert P-State allow compared to the P-State target
};
union dmub_cmd_fams2_config {
@@ -3986,6 +4021,10 @@ enum dmub_cmd_replay_type {
*/
DMUB_CMD__REPLAY_DISABLED_ADAPTIVE_SYNC_SDP = 8,
/**
+ * Set version
+ */
+ DMUB_CMD__REPLAY_SET_VERSION = 9,
+ /**
* Set Replay General command.
*/
DMUB_CMD__REPLAY_SET_GENERAL_CMD = 16,
@@ -4015,6 +4054,10 @@ struct dmub_alpm_auxless_data {
uint16_t lfps_t1_t2_override_us;
short lfps_t1_t2_offset_us;
uint8_t lttpr_count;
+ /*
+ * Padding to align structure to 4 byte boundary.
+ */
+ uint8_t pad[1];
};
/**
@@ -4048,14 +4091,6 @@ struct dmub_cmd_replay_copy_settings_data {
*/
uint8_t digbe_inst;
/**
- * @hpo_stream_enc_inst: HPO stream encoder instance
- */
- uint8_t hpo_stream_enc_inst;
- /**
- * @hpo_link_enc_inst: HPO link encoder instance
- */
- uint8_t hpo_link_enc_inst;
- /**
* AUX HW instance.
*/
uint8_t aux_inst;
@@ -4099,14 +4134,82 @@ struct dmub_cmd_replay_copy_settings_data {
* Use for AUX-less ALPM LFPS wake operation
*/
struct dmub_alpm_auxless_data auxless_alpm_data;
-
+ /**
+ * @hpo_stream_enc_inst: HPO stream encoder instance
+ */
+ uint8_t hpo_stream_enc_inst;
+ /**
+ * @hpo_link_enc_inst: HPO link encoder instance
+ */
+ uint8_t hpo_link_enc_inst;
+ /**
+ * Determines if fast resync in ultra sleep mode is enabled/disabled.
+ */
+ uint8_t replay_support_fast_resync_in_ultra_sleep_mode;
/**
* @pad: Align structure to 4 byte boundary.
*/
+ uint8_t pad[1];
+};
+
+
+/**
+ * Replay versions.
+ */
+enum replay_version {
+ /**
+ * FreeSync Replay
+ */
+ REPLAY_VERSION_FREESYNC_REPLAY = 0,
+ /**
+ * Panel Replay
+ */
+ REPLAY_VERSION_PANEL_REPLAY = 1,
+ /**
+ * Replay not supported.
+ */
+ REPLAY_VERSION_UNSUPPORTED = 0xFF,
+};
+
+/**
+ * Data passed from driver to FW in a DMUB_CMD___SET_REPLAY_VERSION command.
+ */
+struct dmub_cmd_replay_set_version_data {
+ /**
+ * Panel Instance.
+ * Panel instance to identify which psr_state to use
+ * Currently the support is only for 0 or 1
+ */
+ uint8_t panel_inst;
+ /**
+ * PSR version that FW should implement.
+ */
+ enum replay_version version;
+ /**
+ * PSR control version.
+ */
+ uint8_t cmd_version;
+ /**
+ * Explicit padding to 4 byte boundary.
+ */
uint8_t pad[2];
};
/**
+ * Definition of a DMUB_CMD__REPLAY_SET_VERSION command.
+ */
+struct dmub_rb_cmd_replay_set_version {
+ /**
+ * Command header.
+ */
+ struct dmub_cmd_header header;
+ /**
+ * Data passed from driver to FW in a DMUB_CMD__REPLAY_SET_VERSION command.
+ */
+ struct dmub_cmd_replay_set_version_data replay_set_version_data;
+};
+
+/**
* Definition of a DMUB_CMD__REPLAY_COPY_SETTINGS command.
*/
struct dmub_rb_cmd_replay_copy_settings {
@@ -4159,18 +4262,6 @@ struct dmub_rb_cmd_replay_enable_data {
* This does not support HDMI/DP2 for now.
*/
uint8_t phy_rate;
- /**
- * @hpo_stream_enc_inst: HPO stream encoder instance
- */
- uint8_t hpo_stream_enc_inst;
- /**
- * @hpo_link_enc_inst: HPO link encoder instance
- */
- uint8_t hpo_link_enc_inst;
- /**
- * @pad: Align structure to 4 byte boundary.
- */
- uint8_t pad[2];
};
/**
@@ -4470,6 +4561,10 @@ union dmub_replay_cmd_set {
*/
struct dmub_cmd_replay_disabled_adaptive_sync_sdp_data disabled_adaptive_sync_sdp_data;
/**
+ * Definition of DMUB_CMD__REPLAY_SET_VERSION command data.
+ */
+ struct dmub_cmd_replay_set_version_data version_data;
+ /**
* Definition of DMUB_CMD__REPLAY_SET_GENERAL_CMD command data.
*/
struct dmub_cmd_replay_set_general_cmd_data set_general_cmd_data;
@@ -4685,21 +4780,25 @@ enum dmub_cmd_lsdma_type {
*/
DMUB_CMD__LSDMA_LINEAR_COPY = 1,
/**
+ * LSDMA copies data from source to destination linearly in sub window
+ */
+ DMUB_CMD__LSDMA_LINEAR_SUB_WINDOW_COPY = 2,
+ /**
* Send the tiled-to-tiled copy command
*/
- DMUB_CMD__LSDMA_TILED_TO_TILED_COPY = 2,
+ DMUB_CMD__LSDMA_TILED_TO_TILED_COPY = 3,
/**
* Send the poll reg write command
*/
- DMUB_CMD__LSDMA_POLL_REG_WRITE = 3,
+ DMUB_CMD__LSDMA_POLL_REG_WRITE = 4,
/**
* Send the pio copy command
*/
- DMUB_CMD__LSDMA_PIO_COPY = 4,
+ DMUB_CMD__LSDMA_PIO_COPY = 5,
/**
* Send the pio constfill command
*/
- DMUB_CMD__LSDMA_PIO_CONSTFILL = 5,
+ DMUB_CMD__LSDMA_PIO_CONSTFILL = 6,
};
struct abm_ace_curve {
@@ -5914,6 +6013,9 @@ enum ips_residency_mode {
IPS_RESIDENCY__IPS2,
IPS_RESIDENCY__IPS1_RCG,
IPS_RESIDENCY__IPS1_ONO2_ON,
+ IPS_RESIDENCY__IPS1_Z8_RETENTION,
+ IPS_RESIDENCY__PG_ONO_LAST_SEEN_IN_IPS,
+ IPS_RESIDENCY__PG_ONO_CURRENT_STATE
};
#define NUM_IPS_HISTOGRAM_BUCKETS 16
@@ -5927,6 +6029,8 @@ struct dmub_ips_residency_info {
uint32_t histogram[NUM_IPS_HISTOGRAM_BUCKETS];
uint64_t total_time_us;
uint64_t total_inactive_time_us;
+ uint32_t ono_pg_state_at_collection;
+ uint32_t ono_pg_state_last_seen_in_ips;
};
/**
@@ -6223,6 +6327,10 @@ union dmub_rb_cmd {
* Definition of a DMUB_CMD__IDLE_OPT_SET_DC_POWER_STATE command.
*/
struct dmub_rb_cmd_idle_opt_set_dc_power_state idle_opt_set_dc_power_state;
+ /**
+ * Definition of a DMUB_CMD__REPLAY_SET_VERSION command.
+ */
+ struct dmub_rb_cmd_replay_set_version replay_set_version;
/*
* Definition of a DMUB_CMD__REPLAY_COPY_SETTINGS command.
*/
@@ -6431,15 +6539,18 @@ static inline bool dmub_rb_full(struct dmub_rb *rb)
static inline bool dmub_rb_push_front(struct dmub_rb *rb,
const union dmub_rb_cmd *cmd)
{
- uint64_t volatile *dst = (uint64_t volatile *)((uint8_t *)(rb->base_address) + rb->wrpt);
- const uint64_t *src = (const uint64_t *)cmd;
+ uint8_t *dst = (uint8_t *)(rb->base_address) + rb->wrpt;
+ const uint8_t *src = (const uint8_t *)cmd;
uint8_t i;
+ if (rb->capacity == 0)
+ return false;
+
if (dmub_rb_full(rb))
return false;
// copying data
- for (i = 0; i < DMUB_RB_CMD_SIZE / sizeof(uint64_t); i++)
+ for (i = 0; i < DMUB_RB_CMD_SIZE; i++)
*dst++ = *src++;
rb->wrpt += DMUB_RB_CMD_SIZE;
@@ -6464,6 +6575,9 @@ static inline bool dmub_rb_out_push_front(struct dmub_rb *rb,
uint8_t *dst = (uint8_t *)(rb->base_address) + rb->wrpt;
const uint8_t *src = (const uint8_t *)cmd;
+ if (rb->capacity == 0)
+ return false;
+
if (dmub_rb_full(rb))
return false;
@@ -6509,6 +6623,9 @@ static inline void dmub_rb_get_rptr_with_offset(struct dmub_rb *rb,
uint32_t num_cmds,
uint32_t *next_rptr)
{
+ if (rb->capacity == 0)
+ return;
+
*next_rptr = rb->rptr + DMUB_RB_CMD_SIZE * num_cmds;
if (*next_rptr >= rb->capacity)
@@ -6572,6 +6689,9 @@ static inline bool dmub_rb_out_front(struct dmub_rb *rb,
*/
static inline bool dmub_rb_pop_front(struct dmub_rb *rb)
{
+ if (rb->capacity == 0)
+ return false;
+
if (dmub_rb_empty(rb))
return false;
@@ -6596,6 +6716,9 @@ static inline void dmub_rb_flush_pending(const struct dmub_rb *rb)
uint32_t rptr = rb->rptr;
uint32_t wptr = rb->wrpt;
+ if (rb->capacity == 0)
+ return;
+
while (rptr != wptr) {
uint64_t *data = (uint64_t *)((uint8_t *)(rb->base_address) + rptr);
uint8_t i;
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
index 3f38db752b84..4777c7203b2c 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
@@ -377,6 +377,7 @@ void dmub_dcn31_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmu
boot_options.bits.dpia_hpd_int_enable_supported = params->dpia_hpd_int_enable_supported;
boot_options.bits.power_optimization = params->power_optimization;
boot_options.bits.lower_hbr3_phy_ssc = params->lower_hbr3_phy_ssc;
+ boot_options.bits.override_hbr3_pll_vco = params->override_hbr3_pll_vco;
boot_options.bits.sel_mux_phy_c_d_phy_f_g = (dmub->asic == DMUB_ASIC_DCN31B) ? 1 : 0;
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
index e7056205b050..ce041f6239dc 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
@@ -89,44 +89,50 @@ static inline void dmub_dcn32_translate_addr(const union dmub_addr *addr_in,
void dmub_dcn32_reset(struct dmub_srv *dmub)
{
union dmub_gpint_data_register cmd;
- const uint32_t timeout = 30;
- uint32_t in_reset, scratch, i;
+ const uint32_t timeout = 100000;
+ uint32_t in_reset, is_enabled, scratch, i, pwait_mode;
REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &in_reset);
+ REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_enabled);
- if (in_reset == 0) {
+ if (in_reset == 0 && is_enabled != 0) {
cmd.bits.status = 1;
cmd.bits.command_code = DMUB_GPINT__STOP_FW;
cmd.bits.param = 0;
dmub->hw_funcs.set_gpint(dmub, cmd);
- /**
- * Timeout covers both the ACK and the wait
- * for remaining work to finish.
- *
- * This is mostly bound by the PHY disable sequence.
- * Each register check will be greater than 1us, so
- * don't bother using udelay.
- */
-
for (i = 0; i < timeout; ++i) {
if (dmub->hw_funcs.is_gpint_acked(dmub, cmd))
break;
+
+ udelay(1);
}
for (i = 0; i < timeout; ++i) {
- scratch = dmub->hw_funcs.get_gpint_response(dmub);
+ scratch = REG_READ(DMCUB_SCRATCH7);
if (scratch == DMUB_GPINT__STOP_FW_RESPONSE)
break;
+
+ udelay(1);
}
+ for (i = 0; i < timeout; ++i) {
+ REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &pwait_mode);
+ if (pwait_mode & (1 << 0))
+ break;
+
+ udelay(1);
+ }
/* Force reset in case we timed out, DMCUB is likely hung. */
}
- REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1);
- REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
- REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1);
+ if (is_enabled) {
+ REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1);
+ udelay(1);
+ REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
+ }
+
REG_WRITE(DMCUB_INBOX1_RPTR, 0);
REG_WRITE(DMCUB_INBOX1_WPTR, 0);
REG_WRITE(DMCUB_OUTBOX1_RPTR, 0);
@@ -135,7 +141,7 @@ void dmub_dcn32_reset(struct dmub_srv *dmub)
REG_WRITE(DMCUB_OUTBOX0_WPTR, 0);
REG_WRITE(DMCUB_SCRATCH0, 0);
- /* Clear the GPINT command manually so we don't reset again. */
+ /* Clear the GPINT command manually so we don't send anything during boot. */
cmd.all = 0;
dmub->hw_funcs.set_gpint(dmub, cmd);
}
@@ -419,8 +425,8 @@ uint32_t dmub_dcn32_get_current_time(struct dmub_srv *dmub)
void dmub_dcn32_get_diagnostic_data(struct dmub_srv *dmub)
{
- uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset;
- uint32_t is_traceport_enabled, is_cw0_enabled, is_cw6_enabled;
+ uint32_t is_dmub_enabled, is_soft_reset, is_pwait;
+ uint32_t is_traceport_enabled, is_cw6_enabled;
struct dmub_timeout_info timeout = {0};
if (!dmub)
@@ -470,18 +476,15 @@ void dmub_dcn32_get_diagnostic_data(struct dmub_srv *dmub)
REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled);
dmub->debug.is_dmcub_enabled = is_dmub_enabled;
+ REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &is_pwait);
+ dmub->debug.is_pwait = is_pwait;
+
REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &is_soft_reset);
dmub->debug.is_dmcub_soft_reset = is_soft_reset;
- REG_GET(DMCUB_SEC_CNTL, DMCUB_SEC_RESET_STATUS, &is_sec_reset);
- dmub->debug.is_dmcub_secure_reset = is_sec_reset;
-
REG_GET(DMCUB_CNTL, DMCUB_TRACEPORT_EN, &is_traceport_enabled);
dmub->debug.is_traceport_en = is_traceport_enabled;
- REG_GET(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_ENABLE, &is_cw0_enabled);
- dmub->debug.is_cw0_enabled = is_cw0_enabled;
-
REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled);
dmub->debug.is_cw6_enabled = is_cw6_enabled;
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h
index 1a229450c53d..daf81027d663 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h
@@ -89,6 +89,9 @@ struct dmub_srv;
DMUB_SR(DMCUB_REGION5_OFFSET) \
DMUB_SR(DMCUB_REGION5_OFFSET_HIGH) \
DMUB_SR(DMCUB_REGION5_TOP_ADDRESS) \
+ DMUB_SR(DMCUB_REGION6_OFFSET) \
+ DMUB_SR(DMCUB_REGION6_OFFSET_HIGH) \
+ DMUB_SR(DMCUB_REGION6_TOP_ADDRESS) \
DMUB_SR(DMCUB_SCRATCH0) \
DMUB_SR(DMCUB_SCRATCH1) \
DMUB_SR(DMCUB_SCRATCH2) \
@@ -155,6 +158,8 @@ struct dmub_srv;
DMUB_SF(DMCUB_REGION4_TOP_ADDRESS, DMCUB_REGION4_ENABLE) \
DMUB_SF(DMCUB_REGION5_TOP_ADDRESS, DMCUB_REGION5_TOP_ADDRESS) \
DMUB_SF(DMCUB_REGION5_TOP_ADDRESS, DMCUB_REGION5_ENABLE) \
+ DMUB_SF(DMCUB_REGION6_TOP_ADDRESS, DMCUB_REGION6_TOP_ADDRESS) \
+ DMUB_SF(DMCUB_REGION6_TOP_ADDRESS, DMCUB_REGION6_ENABLE) \
DMUB_SF(CC_DC_PIPE_DIS, DC_DMCUB_ENABLE) \
DMUB_SF(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET) \
DMUB_SF(DCN_VM_FB_LOCATION_BASE, FB_BASE) \
@@ -162,7 +167,8 @@ struct dmub_srv;
DMUB_SF(DMCUB_INBOX0_WPTR, DMCUB_INBOX0_WPTR) \
DMUB_SF(DMCUB_REGION3_TMR_AXI_SPACE, DMCUB_REGION3_TMR_AXI_SPACE) \
DMUB_SF(DMCUB_INTERRUPT_ENABLE, DMCUB_GPINT_IH_INT_EN) \
- DMUB_SF(DMCUB_INTERRUPT_ACK, DMCUB_GPINT_IH_INT_ACK)
+ DMUB_SF(DMCUB_INTERRUPT_ACK, DMCUB_GPINT_IH_INT_ACK) \
+ DMUB_SF(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS)
struct dmub_srv_dcn32_reg_offset {
#define DMUB_SR(reg) uint32_t reg;
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
index 2228d62adc7e..834e5434ccb8 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
@@ -400,13 +400,14 @@ union dmub_fw_boot_options dmub_dcn35_get_fw_boot_option(struct dmub_srv *dmub)
void dmub_dcn35_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmub_srv_hw_params *params)
{
union dmub_fw_boot_options boot_options = {0};
- union dmub_fw_boot_options cur_boot_options = {0};
- cur_boot_options = dmub_dcn35_get_fw_boot_option(dmub);
+ if (!dmub->dpia_supported) {
+ dmub->dpia_supported = dmub_dcn35_get_fw_boot_option(dmub).bits.enable_dpia;
+ }
boot_options.bits.z10_disable = params->disable_z10;
boot_options.bits.dpia_supported = params->dpia_supported;
- boot_options.bits.enable_dpia = cur_boot_options.bits.enable_dpia && !params->disable_dpia;
+ boot_options.bits.enable_dpia = dmub->dpia_supported && !params->disable_dpia;
boot_options.bits.usb4_cm_version = params->usb4_cm_version;
boot_options.bits.dpia_hpd_int_enable_supported = params->dpia_hpd_int_enable_supported;
boot_options.bits.power_optimization = params->power_optimization;
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c
index 567c5b1aeb7a..e7a58b140388 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c
@@ -71,7 +71,7 @@ enum dmub_status dmub_srv_stat_get_notification(struct dmub_srv *dmub,
switch (cmd.cmd_common.header.type) {
case DMUB_OUT_CMD__DP_AUX_REPLY:
notify->type = DMUB_NOTIFICATION_AUX_REPLY;
- notify->link_index = cmd.dp_aux_reply.control.instance;
+ notify->instance = cmd.dp_aux_reply.control.instance;
notify->result = cmd.dp_aux_reply.control.result;
dmub_memcpy((void *)&notify->aux_reply,
(void *)&cmd.dp_aux_reply.reply_data, sizeof(struct aux_reply_data));
@@ -84,17 +84,17 @@ enum dmub_status dmub_srv_stat_get_notification(struct dmub_srv *dmub,
notify->type = DMUB_NOTIFICATION_HPD_IRQ;
}
- notify->link_index = cmd.dp_hpd_notify.hpd_data.instance;
+ notify->instance = cmd.dp_hpd_notify.hpd_data.instance;
notify->result = AUX_RET_SUCCESS;
break;
case DMUB_OUT_CMD__SET_CONFIG_REPLY:
notify->type = DMUB_NOTIFICATION_SET_CONFIG_REPLY;
- notify->link_index = cmd.set_config_reply.set_config_reply_control.instance;
+ notify->instance = cmd.set_config_reply.set_config_reply_control.instance;
notify->sc_status = cmd.set_config_reply.set_config_reply_control.status;
break;
case DMUB_OUT_CMD__DPIA_NOTIFICATION:
notify->type = DMUB_NOTIFICATION_DPIA_NOTIFICATION;
- notify->link_index = cmd.dpia_notification.payload.header.instance;
+ notify->instance = cmd.dpia_notification.payload.header.instance;
break;
case DMUB_OUT_CMD__HPD_SENSE_NOTIFY:
notify->type = DMUB_NOTIFICATION_HPD_SENSE_NOTIFY;
diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
index 5fc29164e4b4..8aea50aa9533 100644
--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
@@ -213,6 +213,11 @@ enum {
#endif
#define DEVICE_ID_NV_13FE 0x13FE // CYAN_SKILLFISH
#define DEVICE_ID_NV_143F 0x143F
+#define DEVICE_ID_NV_13F9 0x13F9
+#define DEVICE_ID_NV_13FA 0x13FA
+#define DEVICE_ID_NV_13FB 0x13FB
+#define DEVICE_ID_NV_13FC 0x13FC
+#define DEVICE_ID_NV_13DB 0x13DB
#define FAMILY_VGH 144
#define DEVICE_ID_VGH_163F 0x163F
#define DEVICE_ID_VGH_1435 0x1435
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index 71efd2770c99..ce421bcddcb0 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -226,8 +226,8 @@ static void update_v_total_for_static_ramp(
unsigned int target_duration_in_us =
calc_duration_in_us_from_refresh_in_uhz(
in_out_vrr->fixed.target_refresh_in_uhz);
- bool ramp_direction_is_up = (current_duration_in_us >
- target_duration_in_us) ? true : false;
+ bool ramp_direction_is_up = current_duration_in_us >
+ target_duration_in_us;
/* Calculate ratio between new and current frame duration with 3 digit */
unsigned int frame_duration_ratio = div64_u64(1000000,
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
index 5e01c6e24cbc..c760216a6240 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
@@ -29,6 +29,7 @@ static void push_error_status(struct mod_hdcp *hdcp,
enum mod_hdcp_status status)
{
struct mod_hdcp_trace *trace = &hdcp->connection.trace;
+ const uint8_t retry_limit = hdcp->connection.link.adjust.retry_limit;
if (trace->error_count < MAX_NUM_OF_ERROR_TRACE) {
trace->errors[trace->error_count].status = status;
@@ -39,11 +40,11 @@ static void push_error_status(struct mod_hdcp *hdcp,
if (is_hdcp1(hdcp)) {
hdcp->connection.hdcp1_retry_count++;
- if (hdcp->connection.hdcp1_retry_count == MAX_NUM_OF_ATTEMPTS)
+ if (hdcp->connection.hdcp1_retry_count == retry_limit)
hdcp->connection.link.adjust.hdcp1.disable = 1;
} else if (is_hdcp2(hdcp)) {
hdcp->connection.hdcp2_retry_count++;
- if (hdcp->connection.hdcp2_retry_count == MAX_NUM_OF_ATTEMPTS)
+ if (hdcp->connection.hdcp2_retry_count == retry_limit)
hdcp->connection.link.adjust.hdcp2.disable = 1;
}
}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
index e58e7b93810b..6b7db8ec9a53 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
@@ -260,6 +260,9 @@ enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp)
return MOD_HDCP_STATUS_FAILURE;
}
+ if (!display)
+ return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
+
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
mutex_lock(&psp->hdcp_context.mutex);
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
index c42468bb70ac..b51ddf2846df 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
@@ -220,6 +220,7 @@ struct mod_hdcp_link_adjustment_hdcp2 {
struct mod_hdcp_link_adjustment {
uint8_t auth_delay;
+ uint8_t retry_limit;
struct mod_hdcp_link_adjustment_hdcp1 hdcp1;
struct mod_hdcp_link_adjustment_hdcp2 hdcp2;
};
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index bfb446736ca8..75efda2969cf 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -239,18 +239,51 @@ enum amd_harvest_ip_mask {
AMD_HARVEST_IP_DMU_MASK = 0x4,
};
+/**
+ * enum DC_FEATURE_MASK - Bits that control DC feature defaults
+ */
enum DC_FEATURE_MASK {
//Default value can be found at "uint amdgpu_dc_feature_mask"
- DC_FBC_MASK = (1 << 0), //0x1, disabled by default
- DC_MULTI_MON_PP_MCLK_SWITCH_MASK = (1 << 1), //0x2, enabled by default
- DC_DISABLE_FRACTIONAL_PWM_MASK = (1 << 2), //0x4, disabled by default
- DC_PSR_MASK = (1 << 3), //0x8, disabled by default for dcn < 3.1
- DC_EDP_NO_POWER_SEQUENCING = (1 << 4), //0x10, disabled by default
- DC_DISABLE_LTTPR_DP1_4A = (1 << 5), //0x20, disabled by default
- DC_DISABLE_LTTPR_DP2_0 = (1 << 6), //0x40, disabled by default
- DC_PSR_ALLOW_SMU_OPT = (1 << 7), //0x80, disabled by default
- DC_PSR_ALLOW_MULTI_DISP_OPT = (1 << 8), //0x100, disabled by default
- DC_REPLAY_MASK = (1 << 9), //0x200, disabled by default for dcn < 3.1.4
+ /**
+ * @DC_FBC_MASK: (0x1) disabled by default
+ */
+ DC_FBC_MASK = (1 << 0),
+ /**
+ * @DC_MULTI_MON_PP_MCLK_SWITCH_MASK: (0x2) enabled by default
+ */
+ DC_MULTI_MON_PP_MCLK_SWITCH_MASK = (1 << 1),
+ /**
+ * @DC_DISABLE_FRACTIONAL_PWM_MASK: (0x4) disabled by default
+ */
+ DC_DISABLE_FRACTIONAL_PWM_MASK = (1 << 2),
+ /**
+ * @DC_PSR_MASK: (0x8) disabled by default for DCN < 3.1
+ */
+ DC_PSR_MASK = (1 << 3),
+ /**
+ * @DC_EDP_NO_POWER_SEQUENCING: (0x10) disabled by default
+ */
+ DC_EDP_NO_POWER_SEQUENCING = (1 << 4),
+ /**
+ * @DC_DISABLE_LTTPR_DP1_4A: (0x20) disabled by default
+ */
+ DC_DISABLE_LTTPR_DP1_4A = (1 << 5),
+ /**
+ * @DC_DISABLE_LTTPR_DP2_0: (0x40) disabled by default
+ */
+ DC_DISABLE_LTTPR_DP2_0 = (1 << 6),
+ /**
+ * @DC_PSR_ALLOW_SMU_OPT: (0x80) disabled by default
+ */
+ DC_PSR_ALLOW_SMU_OPT = (1 << 7),
+ /**
+ * @DC_PSR_ALLOW_MULTI_DISP_OPT: (0x100) disabled by default
+ */
+ DC_PSR_ALLOW_MULTI_DISP_OPT = (1 << 8),
+ /**
+ * @DC_REPLAY_MASK: (0x200) disabled by default for DCN < 3.1.4
+ */
+ DC_REPLAY_MASK = (1 << 9),
};
/**
@@ -258,64 +291,64 @@ enum DC_FEATURE_MASK {
*/
enum DC_DEBUG_MASK {
/**
- * @DC_DISABLE_PIPE_SPLIT: If set, disable pipe-splitting
+ * @DC_DISABLE_PIPE_SPLIT: (0x1) If set, disable pipe-splitting
*/
DC_DISABLE_PIPE_SPLIT = 0x1,
/**
- * @DC_DISABLE_STUTTER: If set, disable memory stutter mode
+ * @DC_DISABLE_STUTTER: (0x2) If set, disable memory stutter mode
*/
DC_DISABLE_STUTTER = 0x2,
/**
- * @DC_DISABLE_DSC: If set, disable display stream compression
+ * @DC_DISABLE_DSC: (0x4) If set, disable display stream compression
*/
DC_DISABLE_DSC = 0x4,
/**
- * @DC_DISABLE_CLOCK_GATING: If set, disable clock gating optimizations
+ * @DC_DISABLE_CLOCK_GATING: (0x8) If set, disable clock gating optimizations
*/
DC_DISABLE_CLOCK_GATING = 0x8,
/**
- * @DC_DISABLE_PSR: If set, disable Panel self refresh v1 and PSR-SU
+ * @DC_DISABLE_PSR: (0x10) If set, disable Panel self refresh v1 and PSR-SU
*/
DC_DISABLE_PSR = 0x10,
/**
- * @DC_FORCE_SUBVP_MCLK_SWITCH: If set, force mclk switch in subvp, even
+ * @DC_FORCE_SUBVP_MCLK_SWITCH: (0x20) If set, force mclk switch in subvp, even
* if mclk switch in vblank is possible
*/
DC_FORCE_SUBVP_MCLK_SWITCH = 0x20,
/**
- * @DC_DISABLE_MPO: If set, disable multi-plane offloading
+ * @DC_DISABLE_MPO: (0x40) If set, disable multi-plane offloading
*/
DC_DISABLE_MPO = 0x40,
/**
- * @DC_ENABLE_DPIA_TRACE: If set, enable trace logging for DPIA
+ * @DC_ENABLE_DPIA_TRACE: (0x80) If set, enable trace logging for DPIA
*/
DC_ENABLE_DPIA_TRACE = 0x80,
/**
- * @DC_ENABLE_DML2: If set, force usage of DML2, even if the DCN version
+ * @DC_ENABLE_DML2: (0x100) If set, force usage of DML2, even if the DCN version
* does not default to it.
*/
DC_ENABLE_DML2 = 0x100,
/**
- * @DC_DISABLE_PSR_SU: If set, disable PSR SU
+ * @DC_DISABLE_PSR_SU: (0x200) If set, disable PSR SU
*/
DC_DISABLE_PSR_SU = 0x200,
/**
- * @DC_DISABLE_REPLAY: If set, disable Panel Replay
+ * @DC_DISABLE_REPLAY: (0x400) If set, disable Panel Replay
*/
DC_DISABLE_REPLAY = 0x400,
/**
- * @DC_DISABLE_IPS: If set, disable all Idle Power States, all the time.
+ * @DC_DISABLE_IPS: (0x800) If set, disable all Idle Power States, all the time.
* If more than one IPS debug bit is set, the lowest bit takes
* precedence. For example, if DC_FORCE_IPS_ENABLE and
* DC_DISABLE_IPS_DYNAMIC are set, then DC_DISABLE_IPS_DYNAMIC takes
@@ -324,56 +357,57 @@ enum DC_DEBUG_MASK {
DC_DISABLE_IPS = 0x800,
/**
- * @DC_DISABLE_IPS_DYNAMIC: If set, disable all IPS, all the time,
+ * @DC_DISABLE_IPS_DYNAMIC: (0x1000) If set, disable all IPS, all the time,
* *except* when driver goes into suspend.
*/
DC_DISABLE_IPS_DYNAMIC = 0x1000,
/**
- * @DC_DISABLE_IPS2_DYNAMIC: If set, disable IPS2 (IPS1 allowed) if
+ * @DC_DISABLE_IPS2_DYNAMIC: (0x2000) If set, disable IPS2 (IPS1 allowed) if
* there is an enabled display. Otherwise, enable all IPS.
*/
DC_DISABLE_IPS2_DYNAMIC = 0x2000,
/**
- * @DC_FORCE_IPS_ENABLE: If set, force enable all IPS, all the time.
+ * @DC_FORCE_IPS_ENABLE: (0x4000) If set, force enable all IPS, all the time.
*/
DC_FORCE_IPS_ENABLE = 0x4000,
/**
- * @DC_DISABLE_ACPI_EDID: If set, don't attempt to fetch EDID for
+ * @DC_DISABLE_ACPI_EDID: (0x8000) If set, don't attempt to fetch EDID for
* eDP display from ACPI _DDC method.
*/
DC_DISABLE_ACPI_EDID = 0x8000,
/**
- * @DC_DISABLE_HDMI_CEC: If set, disable HDMI-CEC feature in amdgpu driver.
+ * @DC_DISABLE_HDMI_CEC: (0x10000) If set, disable HDMI-CEC feature in amdgpu driver.
*/
DC_DISABLE_HDMI_CEC = 0x10000,
/**
- * @DC_DISABLE_SUBVP_FAMS: If set, disable DCN Sub-Viewport & Firmware Assisted
+ * @DC_DISABLE_SUBVP_FAMS: (0x20000) If set, disable DCN Sub-Viewport & Firmware Assisted
* Memory Clock Switching (FAMS) feature in amdgpu driver.
*/
DC_DISABLE_SUBVP_FAMS = 0x20000,
/**
- * @DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE: If set, disable support for custom brightness curves
+ * @DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE: (0x40000) If set, disable support for custom
+ * brightness curves
*/
DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE = 0x40000,
/**
- * @DC_HDCP_LC_FORCE_FW_ENABLE: If set, use HDCP Locality Check FW
+ * @DC_HDCP_LC_FORCE_FW_ENABLE: (0x80000) If set, use HDCP Locality Check FW
* path regardless of reported HW capabilities.
*/
DC_HDCP_LC_FORCE_FW_ENABLE = 0x80000,
/**
- * @DC_HDCP_LC_ENABLE_SW_FALLBACK: If set, upon HDCP Locality Check FW
+ * @DC_HDCP_LC_ENABLE_SW_FALLBACK: (0x100000) If set, upon HDCP Locality Check FW
* path failure, retry using legacy SW path.
*/
DC_HDCP_LC_ENABLE_SW_FALLBACK = 0x100000,
/**
- * @DC_SKIP_DETECTION_LT: If set, skip detection link training
+ * @DC_SKIP_DETECTION_LT: (0x200000) If set, skip detection link training
*/
DC_SKIP_DETECTION_LT = 0x200000,
};
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h
index 9de01ae574c0..067eddd9c62d 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h
@@ -4115,6 +4115,7 @@
#define mmSCL0_SCL_COEF_RAM_CONFLICT_STATUS 0x1B55
#define mmSCL0_SCL_COEF_RAM_SELECT 0x1B40
#define mmSCL0_SCL_COEF_RAM_TAP_DATA 0x1B41
+#define mmSCL0_SCL_SCALER_ENABLE 0x1B42
#define mmSCL0_SCL_CONTROL 0x1B44
#define mmSCL0_SCL_DEBUG 0x1B6A
#define mmSCL0_SCL_DEBUG2 0x1B69
@@ -4144,6 +4145,7 @@
#define mmSCL1_SCL_COEF_RAM_CONFLICT_STATUS 0x1E55
#define mmSCL1_SCL_COEF_RAM_SELECT 0x1E40
#define mmSCL1_SCL_COEF_RAM_TAP_DATA 0x1E41
+#define mmSCL1_SCL_SCALER_ENABLE 0x1E42
#define mmSCL1_SCL_CONTROL 0x1E44
#define mmSCL1_SCL_DEBUG 0x1E6A
#define mmSCL1_SCL_DEBUG2 0x1E69
@@ -4173,6 +4175,7 @@
#define mmSCL2_SCL_COEF_RAM_CONFLICT_STATUS 0x4155
#define mmSCL2_SCL_COEF_RAM_SELECT 0x4140
#define mmSCL2_SCL_COEF_RAM_TAP_DATA 0x4141
+#define mmSCL2_SCL_SCALER_ENABLE 0x4142
#define mmSCL2_SCL_CONTROL 0x4144
#define mmSCL2_SCL_DEBUG 0x416A
#define mmSCL2_SCL_DEBUG2 0x4169
@@ -4202,6 +4205,7 @@
#define mmSCL3_SCL_COEF_RAM_CONFLICT_STATUS 0x4455
#define mmSCL3_SCL_COEF_RAM_SELECT 0x4440
#define mmSCL3_SCL_COEF_RAM_TAP_DATA 0x4441
+#define mmSCL3_SCL_SCALER_ENABLE 0x4442
#define mmSCL3_SCL_CONTROL 0x4444
#define mmSCL3_SCL_DEBUG 0x446A
#define mmSCL3_SCL_DEBUG2 0x4469
@@ -4231,6 +4235,7 @@
#define mmSCL4_SCL_COEF_RAM_CONFLICT_STATUS 0x4755
#define mmSCL4_SCL_COEF_RAM_SELECT 0x4740
#define mmSCL4_SCL_COEF_RAM_TAP_DATA 0x4741
+#define mmSCL4_SCL_SCALER_ENABLE 0x4742
#define mmSCL4_SCL_CONTROL 0x4744
#define mmSCL4_SCL_DEBUG 0x476A
#define mmSCL4_SCL_DEBUG2 0x4769
@@ -4260,6 +4265,7 @@
#define mmSCL5_SCL_COEF_RAM_CONFLICT_STATUS 0x4A55
#define mmSCL5_SCL_COEF_RAM_SELECT 0x4A40
#define mmSCL5_SCL_COEF_RAM_TAP_DATA 0x4A41
+#define mmSCL5_SCL_SCALER_ENABLE 0x4A42
#define mmSCL5_SCL_CONTROL 0x4A44
#define mmSCL5_SCL_DEBUG 0x4A6A
#define mmSCL5_SCL_DEBUG2 0x4A69
@@ -4287,6 +4293,7 @@
#define mmSCL_COEF_RAM_CONFLICT_STATUS 0x1B55
#define mmSCL_COEF_RAM_SELECT 0x1B40
#define mmSCL_COEF_RAM_TAP_DATA 0x1B41
+#define mmSCL_SCALER_ENABLE 0x1B42
#define mmSCL_CONTROL 0x1B44
#define mmSCL_DEBUG 0x1B6A
#define mmSCL_DEBUG2 0x1B69
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h
index 2d6a598a6c25..9317a7afa621 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h
@@ -8650,6 +8650,8 @@
#define REGAMMA_LUT_INDEX__REGAMMA_LUT_INDEX__SHIFT 0x00000000
#define REGAMMA_LUT_WRITE_EN_MASK__REGAMMA_LUT_WRITE_EN_MASK_MASK 0x00000007L
#define REGAMMA_LUT_WRITE_EN_MASK__REGAMMA_LUT_WRITE_EN_MASK__SHIFT 0x00000000
+#define SCL_SCALER_ENABLE__SCL_SCALE_EN_MASK 0x00000001L
+#define SCL_SCALER_ENABLE__SCL_SCALE_EN__SHIFT 0x00000000
#define SCL_ALU_CONTROL__SCL_ALU_DISABLE_MASK 0x00000001L
#define SCL_ALU_CONTROL__SCL_ALU_DISABLE__SHIFT 0x00000000
#define SCL_BYPASS_CONTROL__SCL_BYPASS_MODE_MASK 0x00000003L
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index 5c86423c2e92..3d083010e734 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -211,7 +211,7 @@ atom_bios_string = "ATOM"
};
*/
-#pragma pack(1) /* BIOS data must use byte aligment*/
+#pragma pack(1) /* BIOS data must use byte alignment*/
enum atombios_image_offset{
OFFSET_TO_ATOM_ROM_HEADER_POINTER = 0x00000048,
@@ -255,8 +255,8 @@ struct atom_rom_header_v2_2
uint16_t subsystem_vendor_id;
uint16_t subsystem_id;
uint16_t pci_info_offset;
- uint16_t masterhwfunction_offset; //Offest for SW to get all command function offsets, Don't change the position
- uint16_t masterdatatable_offset; //Offest for SW to get all data table offsets, Don't change the position
+ uint16_t masterhwfunction_offset; //Offset for SW to get all command function offsets, Don't change the position
+ uint16_t masterdatatable_offset; //Offset for SW to get all data table offsets, Don't change the position
uint16_t reserved;
uint32_t pspdirtableoffset;
};
@@ -453,7 +453,7 @@ struct atom_dtd_format
uint8_t refreshrate;
};
-/* atom_dtd_format.modemiscinfo defintion */
+/* atom_dtd_format.modemiscinfo definition */
enum atom_dtd_format_modemiscinfo{
ATOM_HSYNC_POLARITY = 0x0002,
ATOM_VSYNC_POLARITY = 0x0004,
@@ -678,7 +678,7 @@ struct lcd_info_v2_1
uint32_t reserved1[8];
};
-/* lcd_info_v2_1.panel_misc defintion */
+/* lcd_info_v2_1.panel_misc definition */
enum atom_lcd_info_panel_misc{
ATOM_PANEL_MISC_FPDI =0x0002,
};
@@ -716,7 +716,7 @@ enum atom_gpio_pin_assignment_gpio_id {
/* gpio_id pre-define id for multiple usage */
/* GPIO use to control PCIE_VDDC in certain SLT board */
PCIE_VDDC_CONTROL_GPIO_PINID = 56,
- /* if PP_AC_DC_SWITCH_GPIO_PINID in Gpio_Pin_LutTable, AC/DC swithing feature is enable */
+ /* if PP_AC_DC_SWITCH_GPIO_PINID in Gpio_Pin_LutTable, AC/DC switching feature is enable */
PP_AC_DC_SWITCH_GPIO_PINID = 60,
/* VDDC_REGULATOR_VRHOT_GPIO_PINID in Gpio_Pin_LutTable, VRHot feature is enable */
VDDC_VRHOT_GPIO_PINID = 61,
@@ -734,7 +734,7 @@ enum atom_gpio_pin_assignment_gpio_id {
struct atom_gpio_pin_lut_v2_1
{
struct atom_common_table_header table_header;
- /*the real number of this included in the structure is calcualted by using the (whole structure size - the header size)/size of atom_gpio_pin_lut */
+ /*the real number of this included in the structure is calculated by using the (whole structure size - the header size)/size of atom_gpio_pin_lut */
struct atom_gpio_pin_assignment gpio_pin[];
};
@@ -997,7 +997,7 @@ enum atom_connector_layout_info_mini_type_def {
enum atom_display_device_tag_def{
ATOM_DISPLAY_LCD1_SUPPORT = 0x0002, //an embedded display is either an LVDS or eDP signal type of display
- ATOM_DISPLAY_LCD2_SUPPORT = 0x0020, //second edp device tag 0x0020 for backward compability
+ ATOM_DISPLAY_LCD2_SUPPORT = 0x0020, //second edp device tag 0x0020 for backward compatibility
ATOM_DISPLAY_DFP1_SUPPORT = 0x0008,
ATOM_DISPLAY_DFP2_SUPPORT = 0x0080,
ATOM_DISPLAY_DFP3_SUPPORT = 0x0200,
@@ -1011,7 +1011,7 @@ struct atom_display_object_path_v2
{
uint16_t display_objid; //Connector Object ID or Misc Object ID
uint16_t disp_recordoffset;
- uint16_t encoderobjid; //first encoder closer to the connector, could be either an external or intenal encoder
+ uint16_t encoderobjid; //first encoder closer to the connector, could be either an external or internal encoder
uint16_t extencoderobjid; //2nd encoder after the first encoder, from the connector point of view;
uint16_t encoder_recordoffset;
uint16_t extencoder_recordoffset;
@@ -1023,7 +1023,7 @@ struct atom_display_object_path_v2
struct atom_display_object_path_v3 {
uint16_t display_objid; //Connector Object ID or Misc Object ID
uint16_t disp_recordoffset;
- uint16_t encoderobjid; //first encoder closer to the connector, could be either an external or intenal encoder
+ uint16_t encoderobjid; //first encoder closer to the connector, could be either an external or internal encoder
uint16_t reserved1; //only on USBC case, otherwise always = 0
uint16_t reserved2; //reserved and always = 0
uint16_t reserved3; //reserved and always = 0
@@ -3547,7 +3547,7 @@ struct atom_voltage_object_header_v4{
enum atom_voltage_object_mode
{
VOLTAGE_OBJ_GPIO_LUT = 0, //VOLTAGE and GPIO Lookup table ->atom_gpio_voltage_object_v4
- VOLTAGE_OBJ_VR_I2C_INIT_SEQ = 3, //VOLTAGE REGULATOR INIT sequece through I2C -> atom_i2c_voltage_object_v4
+ VOLTAGE_OBJ_VR_I2C_INIT_SEQ = 3, //VOLTAGE REGULATOR INIT sequence through I2C -> atom_i2c_voltage_object_v4
VOLTAGE_OBJ_PHASE_LUT = 4, //Set Vregulator Phase lookup table ->atom_gpio_voltage_object_v4
VOLTAGE_OBJ_SVID2 = 7, //Indicate voltage control by SVID2 ->atom_svid2_voltage_object_v4
VOLTAGE_OBJ_EVV = 8,
@@ -3585,7 +3585,7 @@ struct atom_gpio_voltage_object_v4
{
struct atom_voltage_object_header_v4 header; // voltage mode = VOLTAGE_OBJ_GPIO_LUT or VOLTAGE_OBJ_PHASE_LUT
uint8_t gpio_control_id; // default is 0 which indicate control through CG VID mode
- uint8_t gpio_entry_num; // indiate the entry numbers of Votlage/Gpio value Look up table
+ uint8_t gpio_entry_num; // indicate the entry numbers of Votlage/Gpio value Look up table
uint8_t phase_delay_us; // phase delay in unit of micro second
uint8_t reserved;
uint32_t gpio_mask_val; // GPIO Mask value
@@ -4507,8 +4507,8 @@ struct amd_acpi_description_header{
struct uefi_acpi_vfct{
struct amd_acpi_description_header sheader;
uint8_t tableUUID[16]; //0x24
- uint32_t vbiosimageoffset; //0x34. Offset to the first GOP_VBIOS_CONTENT block from the beginning of the stucture.
- uint32_t lib1Imageoffset; //0x38. Offset to the first GOP_LIB1_CONTENT block from the beginning of the stucture.
+ uint32_t vbiosimageoffset; //0x34. Offset to the first GOP_VBIOS_CONTENT block from the beginning of the structure.
+ uint32_t lib1Imageoffset; //0x38. Offset to the first GOP_LIB1_CONTENT block from the beginning of the structure.
uint32_t reserved[4]; //0x3C
};
@@ -4540,7 +4540,7 @@ struct gop_lib1_content {
/*
***************************************************************************
Scratch Register definitions
- Each number below indicates which scratch regiser request, Active and
+ Each number below indicates which scratch register request, Active and
Connect all share the same definitions as display_device_tag defines
***************************************************************************
*/
diff --git a/drivers/gpu/drm/amd/include/dm_pp_interface.h b/drivers/gpu/drm/amd/include/dm_pp_interface.h
index acd1cef61b7c..349544504c93 100644
--- a/drivers/gpu/drm/amd/include/dm_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/dm_pp_interface.h
@@ -65,6 +65,7 @@ struct single_display_configuration {
uint32_t view_resolution_cy;
enum amd_pp_display_config_type displayconfigtype;
uint32_t vertical_refresh; /* for active display */
+ uint32_t pixel_clock; /* Pixel clock in KHz (for HDMI only: normalized) */
};
#define MAX_NUM_DISPLAY 32
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index e2b1ea7467b0..2b0cdb2a2775 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -30,6 +30,12 @@ extern const struct amdgpu_ip_block_version smu_v12_0_ip_block;
extern const struct amdgpu_ip_block_version smu_v13_0_ip_block;
extern const struct amdgpu_ip_block_version smu_v14_0_ip_block;
+enum smu_temp_metric_type {
+ SMU_TEMP_METRIC_BASEBOARD,
+ SMU_TEMP_METRIC_GPUBOARD,
+ SMU_TEMP_METRIC_MAX,
+};
+
enum smu_event_type {
SMU_EVENT_RESET_COMPLETE = 0,
};
@@ -156,6 +162,10 @@ enum amd_pp_sensors {
AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK,
AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK,
AMDGPU_PP_SENSOR_VCN_LOAD,
+ AMDGPU_PP_SENSOR_NODEPOWERLIMIT,
+ AMDGPU_PP_SENSOR_NODEPOWER,
+ AMDGPU_PP_SENSOR_GPPTRESIDENCY,
+ AMDGPU_PP_SENSOR_MAXNODEPOWERLIMIT,
};
enum amd_pp_task {
@@ -496,6 +506,8 @@ struct amd_pm_funcs {
int (*set_df_cstate)(void *handle, enum pp_df_cstate state);
int (*set_xgmi_pstate)(void *handle, uint32_t pstate);
ssize_t (*get_gpu_metrics)(void *handle, void **table);
+ ssize_t (*get_temp_metrics)(void *handle, enum smu_temp_metric_type type, void *table);
+ bool (*temp_metrics_is_supported)(void *handle, enum smu_temp_metric_type type);
ssize_t (*get_xcp_metrics)(void *handle, int xcp_id, void *table);
ssize_t (*get_pm_metrics)(void *handle, void *pmmetrics, size_t size);
int (*set_watermarks_for_clock_ranges)(void *handle,
@@ -1595,6 +1607,79 @@ struct amdgpu_pm_metrics {
uint8_t data[];
};
+enum amdgpu_vr_temp {
+ AMDGPU_VDDCR_VDD0_TEMP,
+ AMDGPU_VDDCR_VDD1_TEMP,
+ AMDGPU_VDDCR_VDD2_TEMP,
+ AMDGPU_VDDCR_VDD3_TEMP,
+ AMDGPU_VDDCR_SOC_A_TEMP,
+ AMDGPU_VDDCR_SOC_C_TEMP,
+ AMDGPU_VDDCR_SOCIO_A_TEMP,
+ AMDGPU_VDDCR_SOCIO_C_TEMP,
+ AMDGPU_VDD_085_HBM_TEMP,
+ AMDGPU_VDDCR_11_HBM_B_TEMP,
+ AMDGPU_VDDCR_11_HBM_D_TEMP,
+ AMDGPU_VDD_USR_TEMP,
+ AMDGPU_VDDIO_11_E32_TEMP,
+ AMDGPU_VR_MAX_TEMP_ENTRIES,
+};
+
+enum amdgpu_system_temp {
+ AMDGPU_UBB_FPGA_TEMP,
+ AMDGPU_UBB_FRONT_TEMP,
+ AMDGPU_UBB_BACK_TEMP,
+ AMDGPU_UBB_OAM7_TEMP,
+ AMDGPU_UBB_IBC_TEMP,
+ AMDGPU_UBB_UFPGA_TEMP,
+ AMDGPU_UBB_OAM1_TEMP,
+ AMDGPU_OAM_0_1_HSC_TEMP,
+ AMDGPU_OAM_2_3_HSC_TEMP,
+ AMDGPU_OAM_4_5_HSC_TEMP,
+ AMDGPU_OAM_6_7_HSC_TEMP,
+ AMDGPU_UBB_FPGA_0V72_VR_TEMP,
+ AMDGPU_UBB_FPGA_3V3_VR_TEMP,
+ AMDGPU_RETIMER_0_1_2_3_1V2_VR_TEMP,
+ AMDGPU_RETIMER_4_5_6_7_1V2_VR_TEMP,
+ AMDGPU_RETIMER_0_1_0V9_VR_TEMP,
+ AMDGPU_RETIMER_4_5_0V9_VR_TEMP,
+ AMDGPU_RETIMER_2_3_0V9_VR_TEMP,
+ AMDGPU_RETIMER_6_7_0V9_VR_TEMP,
+ AMDGPU_OAM_0_1_2_3_3V3_VR_TEMP,
+ AMDGPU_OAM_4_5_6_7_3V3_VR_TEMP,
+ AMDGPU_IBC_HSC_TEMP,
+ AMDGPU_IBC_TEMP,
+ AMDGPU_SYSTEM_MAX_TEMP_ENTRIES = 32,
+};
+
+enum amdgpu_node_temp {
+ AMDGPU_RETIMER_X_TEMP,
+ AMDGPU_OAM_X_IBC_TEMP,
+ AMDGPU_OAM_X_IBC_2_TEMP,
+ AMDGPU_OAM_X_VDD18_VR_TEMP,
+ AMDGPU_OAM_X_04_HBM_B_VR_TEMP,
+ AMDGPU_OAM_X_04_HBM_D_VR_TEMP,
+ AMDGPU_NODE_MAX_TEMP_ENTRIES = 12,
+};
+
+struct amdgpu_gpuboard_temp_metrics_v1_0 {
+ struct metrics_table_header common_header;
+ uint16_t label_version;
+ uint16_t node_id;
+ uint64_t accumulation_counter;
+ /* Encoded temperature in Celcius, 24:31 is sensor id 0:23 is temp value */
+ uint32_t node_temp[AMDGPU_NODE_MAX_TEMP_ENTRIES];
+ uint32_t vr_temp[AMDGPU_VR_MAX_TEMP_ENTRIES];
+};
+
+struct amdgpu_baseboard_temp_metrics_v1_0 {
+ struct metrics_table_header common_header;
+ uint16_t label_version;
+ uint16_t node_id;
+ uint64_t accumulation_counter;
+ /* Encoded temperature in Celcius, 24:31 is sensor id 0:23 is temp value */
+ uint32_t system_temp[AMDGPU_SYSTEM_MAX_TEMP_ENTRIES];
+};
+
struct amdgpu_partition_metrics_v1_0 {
struct metrics_table_header common_header;
/* Current clocks (Mhz) */
diff --git a/drivers/gpu/drm/amd/include/mes_v11_api_def.h b/drivers/gpu/drm/amd/include/mes_v11_api_def.h
index 15680c3f4970..ab1cfc92dbeb 100644
--- a/drivers/gpu/drm/amd/include/mes_v11_api_def.h
+++ b/drivers/gpu/drm/amd/include/mes_v11_api_def.h
@@ -238,7 +238,8 @@ union MESAPI_SET_HW_RESOURCES {
uint32_t enable_mes_sch_stb_log : 1;
uint32_t limit_single_process : 1;
uint32_t is_strix_tmz_wa_enabled :1;
- uint32_t reserved : 13;
+ uint32_t enable_lr_compute_wa : 1;
+ uint32_t reserved : 12;
};
uint32_t uint32_t_all;
};
diff --git a/drivers/gpu/drm/amd/include/mes_v12_api_def.h b/drivers/gpu/drm/amd/include/mes_v12_api_def.h
index d85ffab2aff9..69611c7e30e3 100644
--- a/drivers/gpu/drm/amd/include/mes_v12_api_def.h
+++ b/drivers/gpu/drm/amd/include/mes_v12_api_def.h
@@ -66,6 +66,7 @@ enum MES_SCH_API_OPCODE {
MES_SCH_API_SET_SE_MODE = 17,
MES_SCH_API_SET_GANG_SUBMIT = 18,
MES_SCH_API_SET_HW_RSRC_1 = 19,
+ MES_SCH_API_INV_TLBS = 20,
MES_SCH_API_MAX = 0xFF
};
@@ -286,7 +287,8 @@ union MESAPI_SET_HW_RESOURCES {
uint32_t limit_single_process : 1;
uint32_t unmapped_doorbell_handling: 2;
uint32_t enable_mes_fence_int: 1;
- uint32_t reserved : 10;
+ uint32_t enable_lr_compute_wa : 1;
+ uint32_t reserved : 9;
};
uint32_t uint32_all;
};
@@ -870,6 +872,35 @@ union MESAPI__SET_GANG_SUBMIT {
uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS];
};
+/*
+ * @inv_sel 0-select pasid as input to do the invalidation , 1-select vmid
+ * @flush_type 0-old style, 1-light weight, 2-heavyweight, 3-heavyweight2
+ * @inv_sel_id specific pasid when inv_sel is 0 and specific vmid if inv_sel is 1
+ * @hub_id 0-gc_hub, 1-mm_hub
+ */
+struct INV_TLBS {
+ uint8_t inv_sel;
+ uint8_t flush_type;
+ uint16_t inv_sel_id;
+ uint32_t hub_id;
+ /* If following two inv_range setting are all 0 , whole VM will be invalidated,
+ * otherwise only required range be invalidated
+ */
+ uint64_t inv_range_va_start;
+ uint64_t inv_range_size;
+ uint64_t reserved;
+};
+
+union MESAPI__INV_TLBS {
+ struct {
+ union MES_API_HEADER header;
+ struct MES_API_STATUS api_status;
+ struct INV_TLBS invalidate_tlbs;
+ };
+
+ uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS];
+};
+
#pragma pack(pop)
#endif
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index 71d986dd7a6e..518d07afc7df 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -764,10 +764,6 @@ int amdgpu_dpm_send_rma_reason(struct amdgpu_device *adev)
ret = smu_send_rma_reason(smu);
mutex_unlock(&adev->pm.mutex);
- if (adev->cper.enabled)
- if (amdgpu_cper_generate_bp_threshold_record(adev))
- dev_warn(adev->dev, "fail to generate bad page threshold cper records\n");
-
return ret;
}
@@ -824,6 +820,21 @@ int amdgpu_dpm_reset_vcn(struct amdgpu_device *adev, uint32_t inst_mask)
return ret;
}
+bool amdgpu_dpm_reset_vcn_is_supported(struct amdgpu_device *adev)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ bool ret;
+
+ if (!is_support_sw_smu(adev))
+ return false;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = smu_reset_vcn_is_supported(smu);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,
enum pp_clock_type type,
uint32_t *min,
@@ -2038,6 +2049,66 @@ int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev,
}
/**
+ * amdgpu_dpm_get_temp_metrics - Retrieve metrics for a specific compute
+ * partition
+ * @adev: Pointer to the device.
+ * @type: Identifier for the temperature type metrics to be fetched.
+ * @table: Pointer to a buffer where the metrics will be stored. If NULL, the
+ * function returns the size of the metrics structure.
+ *
+ * This function retrieves metrics for a specific temperature type, If the
+ * table parameter is NULL, the function returns the size of the metrics
+ * structure without populating it.
+ *
+ * Return: Size of the metrics structure on success, or a negative error code on failure.
+ */
+ssize_t amdgpu_dpm_get_temp_metrics(struct amdgpu_device *adev,
+ enum smu_temp_metric_type type, void *table)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret;
+
+ if (!pp_funcs->get_temp_metrics ||
+ !amdgpu_dpm_is_temp_metrics_supported(adev, type))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->get_temp_metrics(adev->powerplay.pp_handle, type, table);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+/**
+ * amdgpu_dpm_is_temp_metrics_supported - Return if specific temperature metrics support
+ * is available
+ * @adev: Pointer to the device.
+ * @type: Identifier for the temperature type metrics to be fetched.
+ *
+ * This function returns metrics if specific temperature metrics type is supported or not.
+ *
+ * Return: True in case of metrics type supported else false.
+ */
+bool amdgpu_dpm_is_temp_metrics_supported(struct amdgpu_device *adev,
+ enum smu_temp_metric_type type)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ bool support_temp_metrics = false;
+
+ if (!pp_funcs->temp_metrics_is_supported)
+ return support_temp_metrics;
+
+ if (is_support_sw_smu(adev)) {
+ mutex_lock(&adev->pm.mutex);
+ support_temp_metrics =
+ pp_funcs->temp_metrics_is_supported(adev->powerplay.pp_handle, type);
+ mutex_unlock(&adev->pm.mutex);
+ }
+
+ return support_temp_metrics;
+}
+
+/**
* amdgpu_dpm_get_xcp_metrics - Retrieve metrics for a specific compute
* partition
* @adev: Pointer to the device.
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c
index 42efe838fa85..b5e9c3ecf703 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c
@@ -27,69 +27,69 @@
#include "amdgpu_smu.h"
#include "amdgpu_dpm_internal.h"
-void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev)
+void amdgpu_dpm_get_display_cfg(struct amdgpu_device *adev)
{
struct drm_device *ddev = adev_to_drm(adev);
+ struct amd_pp_display_configuration *cfg = &adev->pm.pm_display_cfg;
+ struct single_display_configuration *display_cfg;
struct drm_crtc *crtc;
struct amdgpu_crtc *amdgpu_crtc;
+ struct amdgpu_connector *conn;
+ int num_crtcs = 0;
+ int vrefresh;
+ u32 vblank_in_pixels, vblank_time_us;
+
+ cfg->min_vblank_time = 0xffffffff; /* if the displays are off, vblank time is max */
- adev->pm.dpm.new_active_crtcs = 0;
- adev->pm.dpm.new_active_crtc_count = 0;
if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
- list_for_each_entry(crtc,
- &ddev->mode_config.crtc_list, head) {
+ list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
amdgpu_crtc = to_amdgpu_crtc(crtc);
- if (amdgpu_crtc->enabled) {
- adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
- adev->pm.dpm.new_active_crtc_count++;
- }
- }
- }
-}
-u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
-{
- struct drm_device *dev = adev_to_drm(adev);
- struct drm_crtc *crtc;
- struct amdgpu_crtc *amdgpu_crtc;
- u32 vblank_in_pixels;
- u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
+ /* The array should only contain active displays. */
+ if (!amdgpu_crtc->enabled)
+ continue;
+
+ conn = to_amdgpu_connector(amdgpu_crtc->connector);
+ display_cfg = &adev->pm.pm_display_cfg.displays[num_crtcs++];
+
+ if (amdgpu_crtc->hw_mode.clock) {
+ vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
- if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- amdgpu_crtc = to_amdgpu_crtc(crtc);
- if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
vblank_in_pixels =
amdgpu_crtc->hw_mode.crtc_htotal *
(amdgpu_crtc->hw_mode.crtc_vblank_end -
amdgpu_crtc->hw_mode.crtc_vdisplay +
(amdgpu_crtc->v_border * 2));
- vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock;
- break;
- }
- }
- }
+ vblank_time_us =
+ vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock;
- return vblank_time_us;
-}
+ /* The legacy (non-DC) code has issues with mclk switching
+ * with refresh rates over 120 Hz. Disable mclk switching.
+ */
+ if (vrefresh > 120)
+ vblank_time_us = 0;
-u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev)
-{
- struct drm_device *dev = adev_to_drm(adev);
- struct drm_crtc *crtc;
- struct amdgpu_crtc *amdgpu_crtc;
- u32 vrefresh = 0;
+ /* Find minimum vblank time. */
+ if (vblank_time_us < cfg->min_vblank_time)
+ cfg->min_vblank_time = vblank_time_us;
- if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- amdgpu_crtc = to_amdgpu_crtc(crtc);
- if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
- vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
- break;
+ /* Find vertical refresh rate of first active display. */
+ if (!cfg->vrefresh)
+ cfg->vrefresh = vrefresh;
}
+
+ if (amdgpu_crtc->crtc_id < cfg->crtc_index) {
+ /* Find first active CRTC and its line time. */
+ cfg->crtc_index = amdgpu_crtc->crtc_id;
+ cfg->line_time_in_us = amdgpu_crtc->line_time;
+ }
+
+ display_cfg->controller_id = amdgpu_crtc->crtc_id;
+ display_cfg->pixel_clock = conn->pixelclock_for_modeset;
}
}
- return vrefresh;
+ cfg->display_clk = adev->clock.default_dispclk;
+ cfg->num_display = num_crtcs;
}
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index 4b64851fdb42..b5fbb0fd1dc0 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -110,9 +110,10 @@ static int amdgpu_pm_dev_state_check(struct amdgpu_device *adev, bool runpm)
bool runpm_check = runpm ? adev->in_runpm : false;
if (amdgpu_in_reset(adev))
- return -EPERM;
+ return -EBUSY;
+
if (adev->in_suspend && !runpm_check)
- return -EPERM;
+ return -EBUSY;
return 0;
}
@@ -1420,9 +1421,9 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
return -EINVAL;
}
-static int amdgpu_hwmon_get_sensor_generic(struct amdgpu_device *adev,
- enum amd_pp_sensors sensor,
- void *query)
+static int amdgpu_pm_get_sensor_generic(struct amdgpu_device *adev,
+ enum amd_pp_sensors sensor,
+ void *query)
{
int r, size = sizeof(uint32_t);
@@ -1455,7 +1456,7 @@ static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev,
unsigned int value;
int r;
- r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_LOAD, &value);
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_LOAD, &value);
if (r)
return r;
@@ -1479,7 +1480,7 @@ static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,
unsigned int value;
int r;
- r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MEM_LOAD, &value);
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MEM_LOAD, &value);
if (r)
return r;
@@ -1503,7 +1504,7 @@ static ssize_t amdgpu_get_vcn_busy_percent(struct device *dev,
unsigned int value;
int r;
- r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VCN_LOAD, &value);
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VCN_LOAD, &value);
if (r)
return r;
@@ -1782,7 +1783,7 @@ static int amdgpu_show_powershift_percent(struct device *dev,
uint32_t ss_power;
int r = 0, i;
- r = amdgpu_hwmon_get_sensor_generic(adev, sensor, (void *)&ss_power);
+ r = amdgpu_pm_get_sensor_generic(adev, sensor, (void *)&ss_power);
if (r == -EOPNOTSUPP) {
/* sensor not available on dGPU, try to read from APU */
adev = NULL;
@@ -1795,7 +1796,7 @@ static int amdgpu_show_powershift_percent(struct device *dev,
}
mutex_unlock(&mgpu_info.mutex);
if (adev)
- r = amdgpu_hwmon_get_sensor_generic(adev, sensor, (void *)&ss_power);
+ r = amdgpu_pm_get_sensor_generic(adev, sensor, (void *)&ss_power);
}
if (r)
@@ -1905,11 +1906,11 @@ static int ss_bias_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
if (!amdgpu_device_supports_smart_shift(adev))
*states = ATTR_STATE_UNSUPPORTED;
- else if (amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE,
- (void *)&ss_power))
+ else if (amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE,
+ (void *)&ss_power))
*states = ATTR_STATE_UNSUPPORTED;
- else if (amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_DGPU_SHARE,
- (void *)&ss_power))
+ else if (amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_DGPU_SHARE,
+ (void *)&ss_power))
*states = ATTR_STATE_UNSUPPORTED;
return 0;
@@ -2073,6 +2074,265 @@ static int pp_dpm_clk_default_attr_update(struct amdgpu_device *adev, struct amd
return 0;
}
+/**
+ * DOC: board
+ *
+ * Certain SOCs can support various board attributes reporting. This is useful
+ * for user application to monitor various board reated attributes.
+ *
+ * The amdgpu driver provides a sysfs API for reporting board attributes. Presently,
+ * seven types of attributes are reported. Baseboard temperature and
+ * gpu board temperature are reported as binary files. Npm status, current node power limit,
+ * max node power limit, node power and global ppt residency is reported as ASCII text file.
+ *
+ * * .. code-block:: console
+ *
+ * hexdump /sys/bus/pci/devices/.../board/baseboard_temp
+ *
+ * hexdump /sys/bus/pci/devices/.../board/gpuboard_temp
+ *
+ * hexdump /sys/bus/pci/devices/.../board/npm_status
+ *
+ * hexdump /sys/bus/pci/devices/.../board/cur_node_power_limit
+ *
+ * hexdump /sys/bus/pci/devices/.../board/max_node_power_limit
+ *
+ * hexdump /sys/bus/pci/devices/.../board/node_power
+ *
+ * hexdump /sys/bus/pci/devices/.../board/global_ppt_resid
+ */
+
+/**
+ * DOC: baseboard_temp
+ *
+ * The amdgpu driver provides a sysfs API for retrieving current baseboard
+ * temperature metrics data. The file baseboard_temp is used for this.
+ * Reading the file will dump all the current baseboard temperature metrics data.
+ */
+static ssize_t amdgpu_get_baseboard_temp_metrics(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ ssize_t size;
+ int ret;
+
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
+
+ size = amdgpu_dpm_get_temp_metrics(adev, SMU_TEMP_METRIC_BASEBOARD, NULL);
+ if (size <= 0)
+ goto out;
+ if (size >= PAGE_SIZE) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ amdgpu_dpm_get_temp_metrics(adev, SMU_TEMP_METRIC_BASEBOARD, buf);
+
+out:
+ amdgpu_pm_put_access(adev);
+
+ if (ret)
+ return ret;
+
+ return size;
+}
+
+/**
+ * DOC: gpuboard_temp
+ *
+ * The amdgpu driver provides a sysfs API for retrieving current gpuboard
+ * temperature metrics data. The file gpuboard_temp is used for this.
+ * Reading the file will dump all the current gpuboard temperature metrics data.
+ */
+static ssize_t amdgpu_get_gpuboard_temp_metrics(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ ssize_t size;
+ int ret;
+
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
+
+ size = amdgpu_dpm_get_temp_metrics(adev, SMU_TEMP_METRIC_GPUBOARD, NULL);
+ if (size <= 0)
+ goto out;
+ if (size >= PAGE_SIZE) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ amdgpu_dpm_get_temp_metrics(adev, SMU_TEMP_METRIC_GPUBOARD, buf);
+
+out:
+ amdgpu_pm_put_access(adev);
+
+ if (ret)
+ return ret;
+
+ return size;
+}
+
+/**
+ * DOC: cur_node_power_limit
+ *
+ * The amdgpu driver provides a sysfs API for retrieving current node power limit.
+ * The file cur_node_power_limit is used for this.
+ */
+static ssize_t amdgpu_show_cur_node_power_limit(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ u32 nplimit;
+ int r;
+
+ /* get the current node power limit */
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_NODEPOWERLIMIT,
+ (void *)&nplimit);
+ if (r)
+ return r;
+
+ return sysfs_emit(buf, "%u\n", nplimit);
+}
+
+/**
+ * DOC: node_power
+ *
+ * The amdgpu driver provides a sysfs API for retrieving current node power.
+ * The file node_power is used for this.
+ */
+static ssize_t amdgpu_show_node_power(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ u32 npower;
+ int r;
+
+ /* get the node power */
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_NODEPOWER,
+ (void *)&npower);
+ if (r)
+ return r;
+
+ return sysfs_emit(buf, "%u\n", npower);
+}
+
+/**
+ * DOC: npm_status
+ *
+ * The amdgpu driver provides a sysfs API for retrieving current node power management status.
+ * The file npm_status is used for this. It shows the status as enabled or disabled based on
+ * current node power value. If node power is zero, status is disabled else enabled.
+ */
+static ssize_t amdgpu_show_npm_status(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ u32 npower;
+ int r;
+
+ /* get the node power */
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_NODEPOWER,
+ (void *)&npower);
+ if (r)
+ return r;
+
+ return sysfs_emit(buf, "%s\n", npower ? "enabled" : "disabled");
+}
+
+/**
+ * DOC: global_ppt_resid
+ *
+ * The amdgpu driver provides a sysfs API for retrieving global ppt residency.
+ * The file global_ppt_resid is used for this.
+ */
+static ssize_t amdgpu_show_global_ppt_resid(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ u32 gpptresid;
+ int r;
+
+ /* get the global ppt residency */
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPPTRESIDENCY,
+ (void *)&gpptresid);
+ if (r)
+ return r;
+
+ return sysfs_emit(buf, "%u\n", gpptresid);
+}
+
+/**
+ * DOC: max_node_power_limit
+ *
+ * The amdgpu driver provides a sysfs API for retrieving maximum node power limit.
+ * The file max_node_power_limit is used for this.
+ */
+static ssize_t amdgpu_show_max_node_power_limit(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ u32 max_nplimit;
+ int r;
+
+ /* get the max node power limit */
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MAXNODEPOWERLIMIT,
+ (void *)&max_nplimit);
+ if (r)
+ return r;
+
+ return sysfs_emit(buf, "%u\n", max_nplimit);
+}
+
+static DEVICE_ATTR(baseboard_temp, 0444, amdgpu_get_baseboard_temp_metrics, NULL);
+static DEVICE_ATTR(gpuboard_temp, 0444, amdgpu_get_gpuboard_temp_metrics, NULL);
+static DEVICE_ATTR(cur_node_power_limit, 0444, amdgpu_show_cur_node_power_limit, NULL);
+static DEVICE_ATTR(node_power, 0444, amdgpu_show_node_power, NULL);
+static DEVICE_ATTR(global_ppt_resid, 0444, amdgpu_show_global_ppt_resid, NULL);
+static DEVICE_ATTR(max_node_power_limit, 0444, amdgpu_show_max_node_power_limit, NULL);
+static DEVICE_ATTR(npm_status, 0444, amdgpu_show_npm_status, NULL);
+
+static struct attribute *board_attrs[] = {
+ &dev_attr_baseboard_temp.attr,
+ &dev_attr_gpuboard_temp.attr,
+ NULL
+};
+
+static umode_t amdgpu_board_attr_visible(struct kobject *kobj, struct attribute *attr, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+
+ if (attr == &dev_attr_baseboard_temp.attr) {
+ if (!amdgpu_dpm_is_temp_metrics_supported(adev, SMU_TEMP_METRIC_BASEBOARD))
+ return 0;
+ }
+
+ if (attr == &dev_attr_gpuboard_temp.attr) {
+ if (!amdgpu_dpm_is_temp_metrics_supported(adev, SMU_TEMP_METRIC_GPUBOARD))
+ return 0;
+ }
+
+ return attr->mode;
+}
+
+const struct attribute_group amdgpu_board_attr_group = {
+ .name = "board",
+ .attrs = board_attrs,
+ .is_visible = amdgpu_board_attr_visible,
+};
+
/* pm policy attributes */
struct amdgpu_pm_policy_attr {
struct device_attribute dev_attr;
@@ -2507,18 +2767,18 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
switch (channel) {
case PP_TEMP_JUNCTION:
/* get current junction temperature */
- r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
- (void *)&temp);
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
+ (void *)&temp);
break;
case PP_TEMP_EDGE:
/* get current edge temperature */
- r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_EDGE_TEMP,
- (void *)&temp);
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_EDGE_TEMP,
+ (void *)&temp);
break;
case PP_TEMP_MEM:
/* get current memory temperature */
- r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MEM_TEMP,
- (void *)&temp);
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MEM_TEMP,
+ (void *)&temp);
break;
default:
r = -EINVAL;
@@ -2780,8 +3040,8 @@ static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
u32 min_rpm = 0;
int r;
- r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
- (void *)&min_rpm);
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
+ (void *)&min_rpm);
if (r)
return r;
@@ -2797,8 +3057,8 @@ static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
u32 max_rpm = 0;
int r;
- r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
- (void *)&max_rpm);
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
+ (void *)&max_rpm);
if (r)
return r;
@@ -2931,8 +3191,8 @@ static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
int r;
/* get the voltage */
- r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDGFX,
- (void *)&vddgfx);
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDGFX,
+ (void *)&vddgfx);
if (r)
return r;
@@ -2948,8 +3208,8 @@ static ssize_t amdgpu_hwmon_show_vddboard(struct device *dev,
int r;
/* get the voltage */
- r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDBOARD,
- (void *)&vddboard);
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDBOARD,
+ (void *)&vddboard);
if (r)
return r;
@@ -2982,8 +3242,8 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
return -EINVAL;
/* get the voltage */
- r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDNB,
- (void *)&vddnb);
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDNB,
+ (void *)&vddnb);
if (r)
return r;
@@ -3005,7 +3265,7 @@ static int amdgpu_hwmon_get_power(struct device *dev,
u32 query = 0;
int r;
- r = amdgpu_hwmon_get_sensor_generic(adev, sensor, (void *)&query);
+ r = amdgpu_pm_get_sensor_generic(adev, sensor, (void *)&query);
if (r)
return r;
@@ -3125,9 +3385,6 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
int err;
u32 value;
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
-
err = kstrtou32(buf, 10, &value);
if (err)
return err;
@@ -3158,8 +3415,8 @@ static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
int r;
/* get the sclk */
- r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GFX_SCLK,
- (void *)&sclk);
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GFX_SCLK,
+ (void *)&sclk);
if (r)
return r;
@@ -3182,8 +3439,8 @@ static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
int r;
/* get the sclk */
- r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GFX_MCLK,
- (void *)&mclk);
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GFX_MCLK,
+ (void *)&mclk);
if (r)
return r;
@@ -3458,14 +3715,20 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
effective_mode &= ~S_IWUSR;
/* not implemented yet for APUs other than GC 10.3.1 (vangogh) and 9.4.3 */
- if (((adev->family == AMDGPU_FAMILY_SI) ||
- ((adev->flags & AMD_IS_APU) && (gc_ver != IP_VERSION(10, 3, 1)) &&
- (gc_ver != IP_VERSION(9, 4, 3) && gc_ver != IP_VERSION(9, 4, 4)))) &&
- (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
- attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr ||
- attr == &sensor_dev_attr_power1_cap.dev_attr.attr ||
- attr == &sensor_dev_attr_power1_cap_default.dev_attr.attr))
- return 0;
+ if (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
+ attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr ||
+ attr == &sensor_dev_attr_power1_cap.dev_attr.attr ||
+ attr == &sensor_dev_attr_power1_cap_default.dev_attr.attr) {
+ if (adev->family == AMDGPU_FAMILY_SI ||
+ ((adev->flags & AMD_IS_APU) && gc_ver != IP_VERSION(10, 3, 1) &&
+ (gc_ver != IP_VERSION(9, 4, 3) && gc_ver != IP_VERSION(9, 4, 4))) ||
+ (amdgpu_sriov_vf(adev) && gc_ver == IP_VERSION(11, 0, 3)))
+ return 0;
+ }
+
+ if (attr == &sensor_dev_attr_power1_cap.dev_attr.attr &&
+ amdgpu_virt_cap_is_rw(&adev->virt.virt_caps, AMDGPU_VIRT_CAP_POWER_LIMIT))
+ effective_mode |= S_IWUSR;
/* not implemented yet for APUs having < GC 9.3.0 (Renoir) */
if (((adev->family == AMDGPU_FAMILY_SI) ||
@@ -3475,10 +3738,12 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
/* not all products support both average and instantaneous */
if (attr == &sensor_dev_attr_power1_average.dev_attr.attr &&
- amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER, (void *)&tmp) == -EOPNOTSUPP)
+ amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER,
+ (void *)&tmp) == -EOPNOTSUPP)
return 0;
if (attr == &sensor_dev_attr_power1_input.dev_attr.attr &&
- amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER, (void *)&tmp) == -EOPNOTSUPP)
+ amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER,
+ (void *)&tmp) == -EOPNOTSUPP)
return 0;
/* hide max/min values if we can't both query and manage the fan */
@@ -3517,8 +3782,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
/* only few boards support vddboard */
if ((attr == &sensor_dev_attr_in2_input.dev_attr.attr ||
attr == &sensor_dev_attr_in2_label.dev_attr.attr) &&
- amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDBOARD,
- (void *)&tmp) == -EOPNOTSUPP)
+ amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDBOARD,
+ (void *)&tmp) == -EOPNOTSUPP)
return 0;
/* no mclk on APUs other than gc 9,4,3*/
@@ -4400,6 +4665,7 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
{
enum amdgpu_sriov_vf_mode mode;
uint32_t mask = 0;
+ uint32_t tmp;
int ret;
if (adev->pm.sysfs_initialized)
@@ -4461,6 +4727,28 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
goto err_out0;
}
+ if (amdgpu_dpm_is_temp_metrics_supported(adev, SMU_TEMP_METRIC_GPUBOARD)) {
+ ret = devm_device_add_group(adev->dev,
+ &amdgpu_board_attr_group);
+ if (ret)
+ goto err_out0;
+ if (amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MAXNODEPOWERLIMIT,
+ (void *)&tmp) != -EOPNOTSUPP) {
+ sysfs_add_file_to_group(&adev->dev->kobj,
+ &dev_attr_cur_node_power_limit.attr,
+ amdgpu_board_attr_group.name);
+ sysfs_add_file_to_group(&adev->dev->kobj, &dev_attr_node_power.attr,
+ amdgpu_board_attr_group.name);
+ sysfs_add_file_to_group(&adev->dev->kobj, &dev_attr_global_ppt_resid.attr,
+ amdgpu_board_attr_group.name);
+ sysfs_add_file_to_group(&adev->dev->kobj,
+ &dev_attr_max_node_power_limit.attr,
+ amdgpu_board_attr_group.name);
+ sysfs_add_file_to_group(&adev->dev->kobj, &dev_attr_npm_status.attr,
+ amdgpu_board_attr_group.name);
+ }
+ }
+
adev->pm.sysfs_initialized = true;
return 0;
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
index 768317ee1486..65c1d98af26c 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
@@ -263,10 +263,6 @@ struct amdgpu_dpm {
u32 voltage_response_time;
u32 backbias_response_time;
void *priv;
- u32 new_active_crtcs;
- int new_active_crtc_count;
- u32 current_active_crtcs;
- int current_active_crtc_count;
struct amdgpu_dpm_dynamic_state dyn_state;
struct amdgpu_dpm_fan fan;
u32 tdp_limit;
@@ -526,6 +522,8 @@ int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev,
int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table);
ssize_t amdgpu_dpm_get_xcp_metrics(struct amdgpu_device *adev, int xcp_id,
void *table);
+ssize_t amdgpu_dpm_get_temp_metrics(struct amdgpu_device *adev,
+ enum smu_temp_metric_type type, void *table);
/**
* @get_pm_metrics: Get one snapshot of power management metrics from PMFW. The
@@ -613,5 +611,8 @@ ssize_t amdgpu_dpm_get_pm_policy_info(struct amdgpu_device *adev,
int amdgpu_dpm_reset_sdma(struct amdgpu_device *adev, uint32_t inst_mask);
bool amdgpu_dpm_reset_sdma_is_supported(struct amdgpu_device *adev);
int amdgpu_dpm_reset_vcn(struct amdgpu_device *adev, uint32_t inst_mask);
+bool amdgpu_dpm_reset_vcn_is_supported(struct amdgpu_device *adev);
+bool amdgpu_dpm_is_temp_metrics_supported(struct amdgpu_device *adev,
+ enum smu_temp_metric_type type);
#endif
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm_internal.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm_internal.h
index 5c2a89f0d5d5..cc6d7ba040e9 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm_internal.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm_internal.h
@@ -23,10 +23,6 @@
#ifndef __AMDGPU_DPM_INTERNAL_H__
#define __AMDGPU_DPM_INTERNAL_H__
-void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev);
-
-u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev);
-
-u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev);
+void amdgpu_dpm_get_display_cfg(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
index 307ebf7e3226..33eb85dd68e9 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
@@ -2299,7 +2299,7 @@ static void kv_apply_state_adjust_rules(struct amdgpu_device *adev,
if (pi->sys_info.nb_dpm_enable) {
force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) ||
- pi->video_start || (adev->pm.dpm.new_active_crtc_count >= 3) ||
+ pi->video_start || (adev->pm.pm_display_cfg.num_display >= 3) ||
pi->disable_nb_ps3_in_battery;
ps->dpm0_pg_nb_ps_lo = force_high ? 0x2 : 0x3;
ps->dpm0_pg_nb_ps_hi = 0x2;
@@ -2358,7 +2358,7 @@ static int kv_calculate_nbps_level_settings(struct amdgpu_device *adev)
return 0;
force_high = ((mclk >= pi->sys_info.nbp_memory_clock[3]) ||
- (adev->pm.dpm.new_active_crtc_count >= 3) || pi->video_start);
+ (adev->pm.pm_display_cfg.num_display >= 3) || pi->video_start);
if (force_high) {
for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
index ea3ace882a10..c7ed0b457129 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
@@ -771,8 +771,7 @@ static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
int i;
struct amdgpu_ps *ps;
u32 ui_class;
- bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ?
- true : false;
+ bool single_display = adev->pm.pm_display_cfg.num_display < 2;
/* check if the vblank period is too short to adjust the mclk */
if (single_display && adev->powerplay.pp_funcs->vblank_too_short) {
@@ -945,9 +944,6 @@ static int amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
amdgpu_dpm_post_set_power_state(adev);
- adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
- adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
-
if (pp_funcs->force_performance_level) {
if (adev->pm.dpm.thermal_active) {
enum amd_dpm_forced_level level = adev->pm.dpm.forced_level;
@@ -968,7 +964,8 @@ void amdgpu_legacy_dpm_compute_clocks(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- amdgpu_dpm_get_active_displays(adev);
+ if (!adev->dc_enabled)
+ amdgpu_dpm_get_display_cfg(adev);
amdgpu_dpm_change_power_state_locked(adev);
}
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
index 52e732be59e3..cf9932e68055 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
@@ -3081,11 +3081,17 @@ static int si_get_vce_clock_voltage(struct amdgpu_device *adev,
static bool si_dpm_vblank_too_short(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
+ u32 vblank_time = adev->pm.pm_display_cfg.min_vblank_time;
/* we never hit the non-gddr5 limit so disable it */
u32 switch_limit = adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 0;
- if (vblank_time < switch_limit)
+ /* Consider zero vblank time too short and disable MCLK switching.
+ * Note that the vblank time is set to maximum when no displays are attached,
+ * so we'll still enable MCLK switching in that case.
+ */
+ if (vblank_time == 0)
+ return true;
+ else if (vblank_time < switch_limit)
return true;
else
return false;
@@ -3441,6 +3447,8 @@ static void rv770_get_engine_memory_ss(struct amdgpu_device *adev)
static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
struct amdgpu_ps *rps)
{
+ const struct amd_pp_display_configuration *display_cfg =
+ &adev->pm.pm_display_cfg;
struct si_ps *ps = si_get_ps(rps);
struct amdgpu_clock_and_voltage_limits *max_limits;
bool disable_mclk_switching = false;
@@ -3449,6 +3457,7 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
u16 vddc, vddci, min_vce_voltage = 0;
u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
u32 max_sclk = 0, max_mclk = 0;
+ u32 high_pixelclock_count = 0;
int i;
if (adev->asic_type == CHIP_HAINAN) {
@@ -3476,6 +3485,30 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
}
}
+ /* We define "high pixelclock" for SI as higher than necessary for 4K 30Hz.
+ * For example, 4K 60Hz and 1080p 144Hz fall into this category.
+ * Find number of such displays connected.
+ */
+ for (i = 0; i < display_cfg->num_display; i++) {
+ /* The array only contains active displays. */
+ if (display_cfg->displays[i].pixel_clock > 297000)
+ high_pixelclock_count++;
+ }
+
+ /* These are some ad-hoc fixes to some issues observed with SI GPUs.
+ * They are necessary because we don't have something like dce_calcs
+ * for these GPUs to calculate bandwidth requirements.
+ */
+ if (high_pixelclock_count) {
+ /* On Oland, we observe some flickering when two 4K 60Hz
+ * displays are connected, possibly because voltage is too low.
+ * Raise the voltage by requiring a higher SCLK.
+ * (Voltage cannot be adjusted independently without also SCLK.)
+ */
+ if (high_pixelclock_count > 1 && adev->asic_type == CHIP_OLAND)
+ disable_sclk_switching = true;
+ }
+
if (rps->vce_active) {
rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk;
@@ -3486,7 +3519,7 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
rps->ecclk = 0;
}
- if ((adev->pm.dpm.new_active_crtc_count > 1) ||
+ if ((adev->pm.pm_display_cfg.num_display > 1) ||
si_dpm_vblank_too_short(adev))
disable_mclk_switching = true;
@@ -3634,7 +3667,7 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
ps->performance_levels[i].mclk,
max_limits->vddc, &ps->performance_levels[i].vddc);
btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk,
- adev->clock.current_dispclk,
+ display_cfg->display_clk,
max_limits->vddc, &ps->performance_levels[i].vddc);
}
@@ -4159,16 +4192,16 @@ static void si_program_ds_registers(struct amdgpu_device *adev)
static void si_program_display_gap(struct amdgpu_device *adev)
{
+ const struct amd_pp_display_configuration *cfg = &adev->pm.pm_display_cfg;
u32 tmp, pipe;
- int i;
tmp = RREG32(mmCG_DISPLAY_GAP_CNTL) & ~(CG_DISPLAY_GAP_CNTL__DISP1_GAP_MASK | CG_DISPLAY_GAP_CNTL__DISP2_GAP_MASK);
- if (adev->pm.dpm.new_active_crtc_count > 0)
+ if (cfg->num_display > 0)
tmp |= R600_PM_DISPLAY_GAP_VBLANK_OR_WM << CG_DISPLAY_GAP_CNTL__DISP1_GAP__SHIFT;
else
tmp |= R600_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP1_GAP__SHIFT;
- if (adev->pm.dpm.new_active_crtc_count > 1)
+ if (cfg->num_display > 1)
tmp |= R600_PM_DISPLAY_GAP_VBLANK_OR_WM << CG_DISPLAY_GAP_CNTL__DISP2_GAP__SHIFT;
else
tmp |= R600_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP2_GAP__SHIFT;
@@ -4178,17 +4211,8 @@ static void si_program_display_gap(struct amdgpu_device *adev)
tmp = RREG32(DCCG_DISP_SLOW_SELECT_REG);
pipe = (tmp & DCCG_DISP1_SLOW_SELECT_MASK) >> DCCG_DISP1_SLOW_SELECT_SHIFT;
- if ((adev->pm.dpm.new_active_crtc_count > 0) &&
- (!(adev->pm.dpm.new_active_crtcs & (1 << pipe)))) {
- /* find the first active crtc */
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- if (adev->pm.dpm.new_active_crtcs & (1 << i))
- break;
- }
- if (i == adev->mode_info.num_crtc)
- pipe = 0;
- else
- pipe = i;
+ if (cfg->num_display > 0 && pipe != cfg->crtc_index) {
+ pipe = cfg->crtc_index;
tmp &= ~DCCG_DISP1_SLOW_SELECT_MASK;
tmp |= DCCG_DISP1_SLOW_SELECT(pipe);
@@ -4199,7 +4223,7 @@ static void si_program_display_gap(struct amdgpu_device *adev)
* This can be a problem on PowerXpress systems or if you want to use the card
* for offscreen rendering or compute if there are no crtcs enabled.
*/
- si_notify_smc_display_change(adev, adev->pm.dpm.new_active_crtc_count > 0);
+ si_notify_smc_display_change(adev, cfg->num_display > 0);
}
static void si_enable_spread_spectrum(struct amdgpu_device *adev, bool enable)
@@ -5508,7 +5532,7 @@ static int si_convert_power_level_to_smc(struct amdgpu_device *adev,
(pl->mclk <= pi->mclk_stutter_mode_threshold) &&
!eg_pi->uvd_enabled &&
(RREG32(mmDPG_PIPE_STUTTER_CONTROL) & DPG_PIPE_STUTTER_CONTROL__STUTTER_ENABLE_MASK) &&
- (adev->pm.dpm.new_active_crtc_count <= 2)) {
+ (adev->pm.pm_display_cfg.num_display <= 2)) {
level->mcFlags |= SISLANDS_SMC_MC_STUTTER_EN;
}
@@ -5637,14 +5661,10 @@ static int si_populate_smc_t(struct amdgpu_device *adev,
static int si_disable_ulv(struct amdgpu_device *adev)
{
- struct si_power_info *si_pi = si_get_pi(adev);
- struct si_ulv_param *ulv = &si_pi->ulv;
-
- if (ulv->supported)
- return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
- 0 : -EINVAL;
+ PPSMC_Result r;
- return 0;
+ r = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableULV);
+ return (r == PPSMC_Result_OK) ? 0 : -EINVAL;
}
static bool si_is_state_ulv_compatible(struct amdgpu_device *adev,
@@ -5661,7 +5681,7 @@ static bool si_is_state_ulv_compatible(struct amdgpu_device *adev,
/* XXX validate against display requirements! */
for (i = 0; i < adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count; i++) {
- if (adev->clock.current_dispclk <=
+ if (adev->pm.pm_display_cfg.display_clk <=
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].clk) {
if (ulv->pl.vddc <
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].v)
@@ -5815,39 +5835,36 @@ static int si_upload_ulv_state(struct amdgpu_device *adev)
static int si_upload_smc_data(struct amdgpu_device *adev)
{
- struct amdgpu_crtc *amdgpu_crtc = NULL;
- int i;
+ const struct amd_pp_display_configuration *cfg = &adev->pm.pm_display_cfg;
+ u32 crtc_index = 0;
+ u32 mclk_change_block_cp_min = 0;
+ u32 mclk_change_block_cp_max = 0;
- if (adev->pm.dpm.new_active_crtc_count == 0)
- return 0;
+ /* When a display is plugged in, program these so that the SMC
+ * performs MCLK switching when it doesn't cause flickering.
+ * When no display is plugged in, there is no need to restrict
+ * MCLK switching, so program them to zero.
+ */
+ if (cfg->num_display) {
+ crtc_index = cfg->crtc_index;
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- if (adev->pm.dpm.new_active_crtcs & (1 << i)) {
- amdgpu_crtc = adev->mode_info.crtcs[i];
- break;
+ if (cfg->line_time_in_us) {
+ mclk_change_block_cp_min = 200 / cfg->line_time_in_us;
+ mclk_change_block_cp_max = 100 / cfg->line_time_in_us;
}
}
- if (amdgpu_crtc == NULL)
- return 0;
-
- if (amdgpu_crtc->line_time <= 0)
- return 0;
-
- if (si_write_smc_soft_register(adev,
- SI_SMC_SOFT_REGISTER_crtc_index,
- amdgpu_crtc->crtc_id) != PPSMC_Result_OK)
- return 0;
+ si_write_smc_soft_register(adev,
+ SI_SMC_SOFT_REGISTER_crtc_index,
+ crtc_index);
- if (si_write_smc_soft_register(adev,
- SI_SMC_SOFT_REGISTER_mclk_change_block_cp_min,
- amdgpu_crtc->wm_high / amdgpu_crtc->line_time) != PPSMC_Result_OK)
- return 0;
+ si_write_smc_soft_register(adev,
+ SI_SMC_SOFT_REGISTER_mclk_change_block_cp_min,
+ mclk_change_block_cp_min);
- if (si_write_smc_soft_register(adev,
- SI_SMC_SOFT_REGISTER_mclk_change_block_cp_max,
- amdgpu_crtc->wm_low / amdgpu_crtc->line_time) != PPSMC_Result_OK)
- return 0;
+ si_write_smc_soft_register(adev,
+ SI_SMC_SOFT_REGISTER_mclk_change_block_cp_max,
+ mclk_change_block_cp_max);
return 0;
}
@@ -7954,6 +7971,7 @@ static void si_dpm_print_power_state(void *handle,
amdgpu_dpm_dbg_print_class_info(adev, rps->class, rps->class2);
amdgpu_dpm_dbg_print_cap_info(adev, rps->caps);
drm_dbg(adev_to_drm(adev), "\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+ drm_dbg(adev_to_drm(adev), "\tvce evclk: %d ecclk: %d\n", rps->evclk, rps->ecclk);
for (i = 0; i < ps->performance_level_count; i++) {
pl = &ps->performance_levels[i];
drm_dbg(adev_to_drm(adev), "\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n",
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c
index 4e65ab9e931c..281a5e377aee 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c
@@ -172,20 +172,42 @@ PPSMC_Result amdgpu_si_send_msg_to_smc(struct amdgpu_device *adev,
{
u32 tmp;
int i;
+ int usec_timeout;
+
+ /* SMC seems to process some messages exceptionally slowly. */
+ switch (msg) {
+ case PPSMC_MSG_NoForcedLevel:
+ case PPSMC_MSG_SetEnabledLevels:
+ case PPSMC_MSG_SetForcedLevels:
+ case PPSMC_MSG_DisableULV:
+ case PPSMC_MSG_SwitchToSwState:
+ usec_timeout = 1000000; /* 1 sec */
+ break;
+ default:
+ usec_timeout = 200000; /* 200 ms */
+ break;
+ }
if (!amdgpu_si_is_smc_running(adev))
return PPSMC_Result_Failed;
WREG32(mmSMC_MESSAGE_0, msg);
- for (i = 0; i < adev->usec_timeout; i++) {
+ for (i = 0; i < usec_timeout; i++) {
tmp = RREG32(mmSMC_RESP_0);
if (tmp != 0)
break;
udelay(1);
}
- return (PPSMC_Result)RREG32(mmSMC_RESP_0);
+ tmp = RREG32(mmSMC_RESP_0);
+ if (tmp == 0) {
+ drm_warn(adev_to_drm(adev),
+ "%s timeout on message: %x (SMC_SCRATCH0: %x)\n",
+ __func__, msg, RREG32(mmSMC_SCRATCH0));
+ }
+
+ return (PPSMC_Result)tmp;
}
PPSMC_Result amdgpu_si_wait_for_smc_inactive(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
index b48a031cbba0..554492dfa3c0 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
@@ -1554,16 +1554,7 @@ static void pp_pm_compute_clocks(void *handle)
struct amdgpu_device *adev = hwmgr->adev;
if (!adev->dc_enabled) {
- amdgpu_dpm_get_active_displays(adev);
- adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
- adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
- adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
- /* we have issues with mclk switching with
- * refresh rates over 120 hz on the non-DC code.
- */
- if (adev->pm.pm_display_cfg.vrefresh > 120)
- adev->pm.pm_display_cfg.min_vblank_time = 0;
-
+ amdgpu_dpm_get_display_cfg(adev);
pp_display_configuration_change(handle,
&adev->pm.pm_display_cfg);
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
index 8d40ed0f0e83..ce166a7f8e42 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
@@ -563,8 +563,8 @@ bool atomctrl_is_voltage_controlled_by_gpio_v3(
PP_ASSERT_WITH_CODE((NULL != voltage_info),
"Could not find Voltage Table in BIOS.", return false;);
- ret = (NULL != atomctrl_lookup_voltage_type_v3
- (voltage_info, voltage_type, voltage_mode)) ? true : false;
+ ret = atomctrl_lookup_voltage_type_v3
+ (voltage_info, voltage_type, voltage_mode) != NULL;
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
index 9a821563bc8e..14ccd743ca1d 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
@@ -1032,7 +1032,7 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
data->clock_vol_info.vdd_dep_on_fclk;
uint32_t i, now, size = 0;
uint32_t min_freq, max_freq = 0;
- uint32_t ret = 0;
+ int ret = 0;
switch (type) {
case PP_SCLK:
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
index 5e43ad2b2956..d2dbd90bb427 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
@@ -2540,9 +2540,8 @@ static int fiji_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
static bool fiji_is_dpm_running(struct pp_hwmgr *hwmgr)
{
- return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
- CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
- ? true : false;
+ return PHM_READ_INDIRECT_FIELD(hwmgr->device,
+ CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON) == 1;
}
static int fiji_update_dpm_settings(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
index 17d2f5bff4a7..1f50f1e74c48 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
@@ -2655,9 +2655,8 @@ static int iceland_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
static bool iceland_is_dpm_running(struct pp_hwmgr *hwmgr)
{
- return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
- CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
- ? true : false;
+ return PHM_READ_INDIRECT_FIELD(hwmgr->device,
+ CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON) == 1;
}
const struct pp_smumgr_func iceland_smu_funcs = {
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
index ff6b563ecbf5..bf6d09572cfc 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
@@ -2578,9 +2578,8 @@ static int polaris10_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
static bool polaris10_is_dpm_running(struct pp_hwmgr *hwmgr)
{
- return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
- CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
- ? true : false;
+ return PHM_READ_INDIRECT_FIELD(hwmgr->device,
+ CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON) == 1;
}
static int polaris10_update_dpm_settings(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c
index baf51cd82a35..0d4cbe4113a0 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c
@@ -401,7 +401,7 @@ failed:
int smu7_check_fw_load_finish(struct pp_hwmgr *hwmgr, uint32_t fw_type)
{
struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
- uint32_t ret;
+ int ret;
ret = phm_wait_on_indirect_register(hwmgr, mmSMC_IND_INDEX_11,
smu_data->soft_regs_start + smum_get_offsetof(hwmgr,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
index 6fe6e6abb5d8..2e21f9d066cb 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
@@ -3139,9 +3139,8 @@ static int tonga_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
static bool tonga_is_dpm_running(struct pp_hwmgr *hwmgr)
{
- return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
- CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
- ? true : false;
+ return PHM_READ_INDIRECT_FIELD(hwmgr->device,
+ CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON) == 1;
}
static int tonga_update_dpm_settings(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 756afe78a6e5..fb8086859857 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -77,6 +77,9 @@ static void smu_power_profile_mode_get(struct smu_context *smu,
static void smu_power_profile_mode_put(struct smu_context *smu,
enum PP_SMC_POWER_PROFILE profile_mode);
static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type);
+static int smu_od_edit_dpm_table(void *handle,
+ enum PP_OD_DPM_TABLE_COMMAND type,
+ long *input, uint32_t size);
static int smu_sys_get_pp_feature_mask(void *handle,
char *buf)
@@ -1312,6 +1315,33 @@ static void smu_init_power_profile(struct smu_context *smu)
smu_power_profile_mode_get(smu, smu->power_profile_mode);
}
+void smu_feature_cap_set(struct smu_context *smu, enum smu_feature_cap_id fea_id)
+{
+ struct smu_feature_cap *fea_cap = &smu->fea_cap;
+
+ if (fea_id >= SMU_FEATURE_CAP_ID__COUNT)
+ return;
+
+ set_bit(fea_id, fea_cap->cap_map);
+}
+
+bool smu_feature_cap_test(struct smu_context *smu, enum smu_feature_cap_id fea_id)
+{
+ struct smu_feature_cap *fea_cap = &smu->fea_cap;
+
+ if (fea_id >= SMU_FEATURE_CAP_ID__COUNT)
+ return false;
+
+ return test_bit(fea_id, fea_cap->cap_map);
+}
+
+static void smu_feature_cap_init(struct smu_context *smu)
+{
+ struct smu_feature_cap *fea_cap = &smu->fea_cap;
+
+ bitmap_zero(fea_cap->cap_map, SMU_FEATURE_CAP_ID__COUNT);
+}
+
static int smu_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
@@ -1344,6 +1374,8 @@ static int smu_sw_init(struct amdgpu_ip_block *ip_block)
INIT_DELAYED_WORK(&smu->swctf_delayed_work,
smu_swctf_delayed_work_handler);
+ smu_feature_cap_init(smu);
+
ret = smu_smc_table_sw_init(smu);
if (ret) {
dev_err(adev->dev, "Failed to sw init smc table!\n");
@@ -1893,7 +1925,6 @@ static int smu_hw_init(struct amdgpu_ip_block *ip_block)
for (i = 0; i < adev->vcn.num_vcn_inst; i++)
smu_dpm_set_vcn_enable(smu, true, i);
smu_dpm_set_jpeg_enable(smu, true);
- smu_dpm_set_vpe_enable(smu, true);
smu_dpm_set_umsch_mm_enable(smu, true);
smu_set_mall_enable(smu);
smu_set_gfx_cgpg(smu, true);
@@ -2101,7 +2132,6 @@ static int smu_hw_fini(struct amdgpu_ip_block *ip_block)
}
smu_dpm_set_jpeg_enable(smu, false);
adev->jpeg.cur_state = AMD_PG_STATE_GATE;
- smu_dpm_set_vpe_enable(smu, false);
smu_dpm_set_umsch_mm_enable(smu, false);
if (!smu->pm_enabled)
@@ -2195,6 +2225,7 @@ static int smu_resume(struct amdgpu_ip_block *ip_block)
int ret;
struct amdgpu_device *adev = ip_block->adev;
struct smu_context *smu = adev->powerplay.pp_handle;
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
if (amdgpu_sriov_multi_vf_mode(adev))
return 0;
@@ -2226,6 +2257,18 @@ static int smu_resume(struct amdgpu_ip_block *ip_block)
adev->pm.dpm_enabled = true;
+ if (smu->current_power_limit) {
+ ret = smu_set_power_limit(smu, smu->current_power_limit);
+ if (ret && ret != -EOPNOTSUPP)
+ return ret;
+ }
+
+ if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL && smu->od_enabled) {
+ ret = smu_od_edit_dpm_table(smu, PP_OD_COMMIT_DPM_TABLE, NULL, 0);
+ if (ret)
+ return ret;
+ }
+
dev_info(adev->dev, "SMU is resumed successfully!\n");
return 0;
@@ -3491,15 +3534,10 @@ bool smu_mode1_reset_is_support(struct smu_context *smu)
bool smu_link_reset_is_support(struct smu_context *smu)
{
- bool ret = false;
-
if (!smu->pm_enabled)
return false;
- if (smu->ppt_funcs && smu->ppt_funcs->link_reset_is_support)
- ret = smu->ppt_funcs->link_reset_is_support(smu);
-
- return ret;
+ return smu_feature_cap_test(smu, SMU_FEATURE_CAP_ID__LINK_RESET);
}
int smu_mode1_reset(struct smu_context *smu)
@@ -3815,6 +3853,51 @@ int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type,
return ret;
}
+static ssize_t smu_sys_get_temp_metrics(void *handle, enum smu_temp_metric_type type, void *table)
+{
+ struct smu_context *smu = handle;
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *tables = smu_table->tables;
+ enum smu_table_id table_id;
+
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
+
+ if (!smu->smu_temp.temp_funcs || !smu->smu_temp.temp_funcs->get_temp_metrics)
+ return -EOPNOTSUPP;
+
+ table_id = smu_metrics_get_temp_table_id(type);
+
+ if (table_id == SMU_TABLE_COUNT)
+ return -EINVAL;
+
+ /* If the request is to get size alone, return the cached table size */
+ if (!table && tables[table_id].cache.size)
+ return tables[table_id].cache.size;
+
+ if (smu_table_cache_is_valid(&tables[table_id])) {
+ memcpy(table, tables[table_id].cache.buffer,
+ tables[table_id].cache.size);
+ return tables[table_id].cache.size;
+ }
+
+ return smu->smu_temp.temp_funcs->get_temp_metrics(smu, type, table);
+}
+
+static bool smu_temp_metrics_is_supported(void *handle, enum smu_temp_metric_type type)
+{
+ struct smu_context *smu = handle;
+ bool ret = false;
+
+ if (!smu->pm_enabled)
+ return false;
+
+ if (smu->smu_temp.temp_funcs && smu->smu_temp.temp_funcs->temp_metrics_is_supported)
+ ret = smu->smu_temp.temp_funcs->temp_metrics_is_supported(smu, type);
+
+ return ret;
+}
+
static ssize_t smu_sys_get_xcp_metrics(void *handle, int xcp_id, void *table)
{
struct smu_context *smu = handle;
@@ -3887,6 +3970,8 @@ static const struct amd_pm_funcs swsmu_pm_funcs = {
.get_dpm_clock_table = smu_get_dpm_clock_table,
.get_smu_prv_buf_details = smu_get_prv_buffer_details,
.get_xcp_metrics = smu_sys_get_xcp_metrics,
+ .get_temp_metrics = smu_sys_get_temp_metrics,
+ .temp_metrics_is_supported = smu_temp_metrics_is_supported,
};
int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event,
@@ -4042,12 +4127,7 @@ int smu_send_rma_reason(struct smu_context *smu)
*/
bool smu_reset_sdma_is_supported(struct smu_context *smu)
{
- bool ret = false;
-
- if (smu->ppt_funcs && smu->ppt_funcs->reset_sdma_is_supported)
- ret = smu->ppt_funcs->reset_sdma_is_supported(smu);
-
- return ret;
+ return smu_feature_cap_test(smu, SMU_FEATURE_CAP_ID__SDMA_RESET);
}
int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask)
@@ -4060,6 +4140,11 @@ int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask)
return ret;
}
+bool smu_reset_vcn_is_supported(struct smu_context *smu)
+{
+ return smu_feature_cap_test(smu, SMU_FEATURE_CAP_ID__VCN_RESET);
+}
+
int smu_reset_vcn(struct smu_context *smu, uint32_t inst_mask)
{
if (smu->ppt_funcs && smu->ppt_funcs->dpm_reset_vcn)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index b52e194397e2..582c186d8b62 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -249,6 +249,14 @@ struct smu_user_dpm_profile {
tables[table_id].domain = d; \
} while (0)
+struct smu_table_cache {
+ void *buffer;
+ size_t size;
+ /* interval in ms*/
+ uint32_t interval;
+ unsigned long last_cache_time;
+};
+
struct smu_table {
uint64_t size;
uint32_t align;
@@ -257,6 +265,7 @@ struct smu_table {
void *cpu_addr;
struct amdgpu_bo *bo;
uint32_t version;
+ struct smu_table_cache cache;
};
enum smu_perf_level_designation {
@@ -322,6 +331,9 @@ enum smu_table_id {
SMU_TABLE_ECCINFO,
SMU_TABLE_COMBO_PPTABLE,
SMU_TABLE_WIFIBAND,
+ SMU_TABLE_GPUBOARD_TEMP_METRICS,
+ SMU_TABLE_BASEBOARD_TEMP_METRICS,
+ SMU_TABLE_PMFW_SYSTEM_METRICS,
SMU_TABLE_COUNT,
};
@@ -396,6 +408,10 @@ struct smu_dpm_context {
struct smu_dpm_policy_ctxt *dpm_policies;
};
+struct smu_temp_context {
+ const struct smu_temp_funcs *temp_funcs;
+};
+
struct smu_power_gate {
bool uvd_gated;
bool vce_gated;
@@ -512,6 +528,17 @@ enum smu_fw_status {
*/
#define SMU_WBRF_EVENT_HANDLING_PACE 10
+enum smu_feature_cap_id {
+ SMU_FEATURE_CAP_ID__LINK_RESET = 0,
+ SMU_FEATURE_CAP_ID__SDMA_RESET,
+ SMU_FEATURE_CAP_ID__VCN_RESET,
+ SMU_FEATURE_CAP_ID__COUNT,
+};
+
+struct smu_feature_cap {
+ DECLARE_BITMAP(cap_map, SMU_FEATURE_CAP_ID__COUNT);
+};
+
struct smu_context {
struct amdgpu_device *adev;
struct amdgpu_irq_src irq_source;
@@ -529,10 +556,12 @@ struct smu_context {
struct smu_table_context smu_table;
struct smu_dpm_context smu_dpm;
struct smu_power_context smu_power;
+ struct smu_temp_context smu_temp;
struct smu_feature smu_feature;
struct amd_pp_display_configuration *display_config;
struct smu_baco_context smu_baco;
struct smu_temperature_range thermal_range;
+ struct smu_feature_cap fea_cap;
void *od_settings;
struct smu_umd_pstate_table pstate_table;
@@ -624,6 +653,28 @@ struct smu_context {
struct i2c_adapter;
/**
+ * struct smu_temp_funcs - Callbacks used to get temperature data.
+ */
+struct smu_temp_funcs {
+ /**
+ * @get_temp_metrics: Calibrate voltage/frequency curve to fit the system's
+ * power delivery and voltage margins. Required for adaptive
+ * @type Temperature metrics type(baseboard/gpuboard)
+ * Return: Size of &table
+ */
+ ssize_t (*get_temp_metrics)(struct smu_context *smu,
+ enum smu_temp_metric_type type, void *table);
+
+ /**
+ * @temp_metrics_is_support: Get if specific temperature metrics is supported
+ * @type Temperature metrics type(baseboard/gpuboard)
+ * Return: true if supported else false
+ */
+ bool (*temp_metrics_is_supported)(struct smu_context *smu, enum smu_temp_metric_type type);
+
+};
+
+/**
* struct pptable_funcs - Callbacks used to interact with the SMU.
*/
struct pptable_funcs {
@@ -1234,11 +1285,6 @@ struct pptable_funcs {
bool (*mode1_reset_is_support)(struct smu_context *smu);
/**
- * @link_reset_is_support: Check if GPU supports link reset.
- */
- bool (*link_reset_is_support)(struct smu_context *smu);
-
- /**
* @mode1_reset: Perform mode1 reset.
*
* Complete GPU reset.
@@ -1388,10 +1434,6 @@ struct pptable_funcs {
* @reset_sdma: message SMU to soft reset sdma instance.
*/
int (*reset_sdma)(struct smu_context *smu, uint32_t inst_mask);
- /**
- * @reset_sdma_is_supported: Check if support resets the SDMA engine.
- */
- bool (*reset_sdma_is_supported)(struct smu_context *smu);
/**
* @reset_vcn: message SMU to soft reset vcn instance.
@@ -1622,6 +1664,71 @@ typedef struct {
struct smu_dpm_policy *smu_get_pm_policy(struct smu_context *smu,
enum pp_pm_policy p_type);
+static inline enum smu_table_id
+smu_metrics_get_temp_table_id(enum smu_temp_metric_type type)
+{
+ switch (type) {
+ case SMU_TEMP_METRIC_BASEBOARD:
+ return SMU_TABLE_BASEBOARD_TEMP_METRICS;
+ case SMU_TEMP_METRIC_GPUBOARD:
+ return SMU_TABLE_GPUBOARD_TEMP_METRICS;
+ default:
+ return SMU_TABLE_COUNT;
+ }
+
+ return SMU_TABLE_COUNT;
+}
+
+static inline void smu_table_cache_update_time(struct smu_table *table,
+ unsigned long time)
+{
+ table->cache.last_cache_time = time;
+}
+
+static inline bool smu_table_cache_is_valid(struct smu_table *table)
+{
+ if (!table->cache.buffer || !table->cache.last_cache_time ||
+ !table->cache.interval || !table->cache.size ||
+ time_after(jiffies,
+ table->cache.last_cache_time +
+ msecs_to_jiffies(table->cache.interval)))
+ return false;
+
+ return true;
+}
+
+static inline int smu_table_cache_init(struct smu_context *smu,
+ enum smu_table_id table_id, size_t size,
+ uint32_t cache_interval)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *tables = smu_table->tables;
+
+ tables[table_id].cache.buffer = kzalloc(size, GFP_KERNEL);
+ if (!tables[table_id].cache.buffer)
+ return -ENOMEM;
+
+ tables[table_id].cache.last_cache_time = 0;
+ tables[table_id].cache.interval = cache_interval;
+ tables[table_id].cache.size = size;
+
+ return 0;
+}
+
+static inline void smu_table_cache_fini(struct smu_context *smu,
+ enum smu_table_id table_id)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *tables = smu_table->tables;
+
+ if (tables[table_id].cache.buffer) {
+ kfree(tables[table_id].cache.buffer);
+ tables[table_id].cache.buffer = NULL;
+ tables[table_id].cache.last_cache_time = 0;
+ tables[table_id].cache.interval = 0;
+ }
+}
+
#if !defined(SWSMU_CODE_LAYER_L2) && !defined(SWSMU_CODE_LAYER_L3) && !defined(SWSMU_CODE_LAYER_L4)
int smu_get_power_limit(void *handle,
uint32_t *limit,
@@ -1673,10 +1780,14 @@ int smu_send_rma_reason(struct smu_context *smu);
int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask);
bool smu_reset_sdma_is_supported(struct smu_context *smu);
int smu_reset_vcn(struct smu_context *smu, uint32_t inst_mask);
+bool smu_reset_vcn_is_supported(struct smu_context *smu);
int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type,
int level);
ssize_t smu_get_pm_policy_info(struct smu_context *smu,
enum pp_pm_policy p_type, char *sysbuf);
#endif
+
+void smu_feature_cap_set(struct smu_context *smu, enum smu_feature_cap_id fea_id);
+bool smu_feature_cap_test(struct smu_context *smu, enum smu_feature_cap_id fea_id);
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h
index 0a2ca544f4e3..bf6aa9620911 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h
@@ -135,7 +135,63 @@ typedef enum {
GFX_DVM_MARGIN_COUNT
} GFX_DVM_MARGIN_e;
-#define SMU_METRICS_TABLE_VERSION 0x13
+typedef enum{
+ SYSTEM_TEMP_UBB_FPGA,
+ SYSTEM_TEMP_UBB_FRONT,
+ SYSTEM_TEMP_UBB_BACK,
+ SYSTEM_TEMP_UBB_OAM7,
+ SYSTEM_TEMP_UBB_IBC,
+ SYSTEM_TEMP_UBB_UFPGA,
+ SYSTEM_TEMP_UBB_OAM1,
+ SYSTEM_TEMP_OAM_0_1_HSC,
+ SYSTEM_TEMP_OAM_2_3_HSC,
+ SYSTEM_TEMP_OAM_4_5_HSC,
+ SYSTEM_TEMP_OAM_6_7_HSC,
+ SYSTEM_TEMP_UBB_FPGA_0V72_VR,
+ SYSTEM_TEMP_UBB_FPGA_3V3_VR,
+ SYSTEM_TEMP_RETIMER_0_1_2_3_1V2_VR,
+ SYSTEM_TEMP_RETIMER_4_5_6_7_1V2_VR,
+ SYSTEM_TEMP_RETIMER_0_1_0V9_VR,
+ SYSTEM_TEMP_RETIMER_4_5_0V9_VR,
+ SYSTEM_TEMP_RETIMER_2_3_0V9_VR,
+ SYSTEM_TEMP_RETIMER_6_7_0V9_VR,
+ SYSTEM_TEMP_OAM_0_1_2_3_3V3_VR,
+ SYSTEM_TEMP_OAM_4_5_6_7_3V3_VR,
+ SYSTEM_TEMP_IBC_HSC,
+ SYSTEM_TEMP_IBC,
+ SYSTEM_TEMP_MAX_ENTRIES = 32
+} SYSTEM_TEMP_e;
+
+typedef enum{
+ NODE_TEMP_RETIMER,
+ NODE_TEMP_IBC_TEMP,
+ NODE_TEMP_IBC_2_TEMP,
+ NODE_TEMP_VDD18_VR_TEMP,
+ NODE_TEMP_04_HBM_B_VR_TEMP,
+ NODE_TEMP_04_HBM_D_VR_TEMP,
+ NODE_TEMP_MAX_TEMP_ENTRIES = 12
+} NODE_TEMP_e;
+
+typedef enum {
+ SVI_VDDCR_VDD0_TEMP,
+ SVI_VDDCR_VDD1_TEMP,
+ SVI_VDDCR_VDD2_TEMP,
+ SVI_VDDCR_VDD3_TEMP,
+ SVI_VDDCR_SOC_A_TEMP,
+ SVI_VDDCR_SOC_C_TEMP,
+ SVI_VDDCR_SOCIO_A_TEMP,
+ SVI_VDDCR_SOCIO_C_TEMP,
+ SVI_VDD_085_HBM_TEMP,
+ SVI_VDDCR_11_HBM_B_TEMP,
+ SVI_VDDCR_11_HBM_D_TEMP,
+ SVI_VDD_USR_TEMP,
+ SVI_VDDIO_11_E32_TEMP,
+ SVI_MAX_TEMP_ENTRIES, // 13
+} SVI_TEMP_e;
+
+#define SMU_METRICS_TABLE_VERSION 0x14
+
+#define SMU_SYSTEM_METRICS_TABLE_VERSION 0x1
typedef struct __attribute__((packed, aligned(4))) {
uint64_t AccumulationCounter;
@@ -231,11 +287,32 @@ typedef struct __attribute__((packed, aligned(4))) {
uint64_t GfxclkBelowHostLimitThmAcc[8];
uint64_t GfxclkBelowHostLimitTotalAcc[8];
uint64_t GfxclkLowUtilizationAcc[8];
+
+ uint32_t AidTemperature[4];
+ uint32_t XcdTemperature[8];
+ uint32_t HbmTemperature[8];
} MetricsTable_t;
#define SMU_VF_METRICS_TABLE_MASK (1 << 31)
#define SMU_VF_METRICS_TABLE_VERSION (0x6 | SMU_VF_METRICS_TABLE_MASK)
+#pragma pack(push, 4)
+typedef struct {
+ uint64_t AccumulationCounter; // Last update timestamp
+ uint16_t LabelVersion; // Defaults to 0.
+ uint16_t NodeIdentifier; // Unique identifier to each node on system.
+ int16_t SystemTemperatures[SYSTEM_TEMP_MAX_ENTRIES]; // Signed integer temperature value in Celsius, unused fields are set to 0xFFFF
+ int16_t NodeTemperatures[NODE_TEMP_MAX_TEMP_ENTRIES]; // Signed integer temperature value in Celsius, unused fields are set to 0xFFFF
+ int16_t VrTemperatures[SVI_MAX_TEMP_ENTRIES]; // Signed integer temperature value in Celsius
+ int16_t spare[7];
+
+ //NPM: NODE POWER MANAGEMENT
+ uint32_t NodePowerLimit;
+ uint32_t NodePower;
+ uint32_t GlobalPPTResidencyAcc;
+} SystemMetricsTable_t;
+#pragma pack(pop)
+
typedef struct __attribute__((packed, aligned(4))) {
uint32_t AccumulationCounter;
uint32_t InstGfxclk_TargFreq;
@@ -287,6 +364,9 @@ typedef struct {
// General info
uint32_t pldmVersion[2];
+
+ //Node Power Limit
+ uint32_t MaxNodePowerLimit;
} StaticMetricsTable_t;
#pragma pack(pop)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_ppsmc.h
index e1f490b6ce64..4b066c42e0ec 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_ppsmc.h
@@ -116,7 +116,12 @@
#define PPSMC_MSG_DumpErrorRecord 0x57
#define PPSMC_MSG_EraseRasTable 0x58
#define PPSMC_MSG_GetStaticMetricsTable 0x59
-#define PPSMC_Message_Count 0x5A
+#define PPSMC_MSG_ResetVfArbitersByIndex 0x5A
+#define PPSMC_MSG_GetBadPageSeverity 0x5B
+#define PPSMC_MSG_GetSystemMetricsTable 0x5C
+#define PPSMC_MSG_GetSystemMetricsVersion 0x5D
+#define PPSMC_MSG_ResetVCN 0x5E
+#define PPSMC_Message_Count 0x5F
//PPSMC Reset Types for driver msg argument
#define PPSMC_RESET_TYPE_DRIVER_MODE_1_RESET 0x1
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
index 41f268313613..63a088ef7169 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
@@ -94,9 +94,9 @@
#define PPSMC_MSG_RmaDueToBadPageThreshold 0x43
#define PPSMC_MSG_SetThrottlingPolicy 0x44
#define PPSMC_MSG_ResetSDMA 0x4D
-#define PPSMC_MSG_ResetVCN 0x4E
#define PPSMC_MSG_GetStaticMetricsTable 0x59
-#define PPSMC_Message_Count 0x5A
+#define PPSMC_MSG_ResetVCN 0x5B
+#define PPSMC_Message_Count 0x5C
//PPSMC Reset Types for driver msg argument
#define PPSMC_RESET_TYPE_DRIVER_MODE_1_RESET 0x1
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
index d7a9e41820fa..2256c77da636 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
@@ -278,7 +278,8 @@
__SMU_DUMMY_MAP(MALLPowerState), \
__SMU_DUMMY_MAP(ResetSDMA), \
__SMU_DUMMY_MAP(ResetVCN), \
- __SMU_DUMMY_MAP(GetStaticMetricsTable),
+ __SMU_DUMMY_MAP(GetStaticMetricsTable), \
+ __SMU_DUMMY_MAP(GetSystemMetricsTable),
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type
@@ -469,6 +470,7 @@ enum smu_feature_mask {
/* Message category flags */
#define SMU_MSG_VF_FLAG (1U << 0)
#define SMU_MSG_RAS_PRI (1U << 1)
+#define SMU_MSG_NO_PRECHECK (1U << 2)
/* Firmware capability flags */
#define SMU_FW_CAP_RAS_PRI (1U << 0)
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v13_0_0_pptable.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_0_pptable.h
index 251ed011b3b0..251ed011b3b0 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v13_0_0_pptable.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_0_pptable.h
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index 9ad46f545d15..4fff78da81ff 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -1745,10 +1745,10 @@ static int arcturus_i2c_control_init(struct smu_context *smu)
snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i);
i2c_set_adapdata(control, smu_i2c);
- res = i2c_add_adapter(control);
+ res = devm_i2c_add_adapter(adev->dev, control);
if (res) {
DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
- goto Out_err;
+ return res;
}
}
@@ -1756,27 +1756,12 @@ static int arcturus_i2c_control_init(struct smu_context *smu)
adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[1].adapter;
return 0;
-Out_err:
- for ( ; i >= 0; i--) {
- struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
- struct i2c_adapter *control = &smu_i2c->adapter;
-
- i2c_del_adapter(control);
- }
- return res;
}
static void arcturus_i2c_control_fini(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- int i;
-
- for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
- struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
- struct i2c_adapter *control = &smu_i2c->adapter;
- i2c_del_adapter(control);
- }
adev->pm.ras_eeprom_i2c_bus = NULL;
adev->pm.fru_eeprom_i2c_bus = NULL;
}
@@ -1897,7 +1882,7 @@ static ssize_t arcturus_get_gpu_metrics(struct smu_context *smu,
ret = smu_cmn_get_metrics_table(smu,
&metrics,
- true);
+ false);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index aac202d0c30e..0028f10ead42 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -3145,10 +3145,10 @@ static int navi10_i2c_control_init(struct smu_context *smu)
control->quirks = &navi10_i2c_control_quirks;
i2c_set_adapdata(control, smu_i2c);
- res = i2c_add_adapter(control);
+ res = devm_i2c_add_adapter(adev->dev, control);
if (res) {
DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
- goto Out_err;
+ return res;
}
}
@@ -3156,27 +3156,12 @@ static int navi10_i2c_control_init(struct smu_context *smu)
adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[1].adapter;
return 0;
-Out_err:
- for ( ; i >= 0; i--) {
- struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
- struct i2c_adapter *control = &smu_i2c->adapter;
-
- i2c_del_adapter(control);
- }
- return res;
}
static void navi10_i2c_control_fini(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- int i;
- for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
- struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
- struct i2c_adapter *control = &smu_i2c->adapter;
-
- i2c_del_adapter(control);
- }
adev->pm.ras_eeprom_i2c_bus = NULL;
adev->pm.fru_eeprom_i2c_bus = NULL;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index d57591509aed..31c2c0386b1f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -2648,10 +2648,10 @@ static int sienna_cichlid_i2c_control_init(struct smu_context *smu)
control->quirks = &sienna_cichlid_i2c_control_quirks;
i2c_set_adapdata(control, smu_i2c);
- res = i2c_add_adapter(control);
+ res = devm_i2c_add_adapter(adev->dev, control);
if (res) {
DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
- goto Out_err;
+ return res;
}
}
/* assign the buses used for the FRU EEPROM and RAS EEPROM */
@@ -2660,27 +2660,12 @@ static int sienna_cichlid_i2c_control_init(struct smu_context *smu)
adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
return 0;
-Out_err:
- for ( ; i >= 0; i--) {
- struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
- struct i2c_adapter *control = &smu_i2c->adapter;
-
- i2c_del_adapter(control);
- }
- return res;
}
static void sienna_cichlid_i2c_control_fini(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- int i;
- for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
- struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
- struct i2c_adapter *control = &smu_i2c->adapter;
-
- i2c_del_adapter(control);
- }
adev->pm.ras_eeprom_i2c_bus = NULL;
adev->pm.fru_eeprom_i2c_bus = NULL;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
index e97b0cf19197..3baf20f4c373 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
@@ -470,7 +470,7 @@ static int renoir_od_edit_dpm_table(struct smu_context *smu,
static int renoir_set_fine_grain_gfx_freq_parameters(struct smu_context *smu)
{
uint32_t min = 0, max = 0;
- uint32_t ret = 0;
+ int ret = 0;
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_GetMinGfxclkFrequency,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index c63d2e28954d..18d5d0704509 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -1641,33 +1641,22 @@ static int aldebaran_i2c_control_init(struct smu_context *smu)
control->quirks = &aldebaran_i2c_control_quirks;
i2c_set_adapdata(control, smu_i2c);
- res = i2c_add_adapter(control);
+ res = devm_i2c_add_adapter(adev->dev, control);
if (res) {
DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
- goto Out_err;
+ return res;
}
adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
return 0;
-Out_err:
- i2c_del_adapter(control);
-
- return res;
}
static void aldebaran_i2c_control_fini(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- int i;
-
- for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
- struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
- struct i2c_adapter *control = &smu_i2c->adapter;
- i2c_del_adapter(control);
- }
adev->pm.ras_eeprom_i2c_bus = NULL;
adev->pm.fru_eeprom_i2c_bus = NULL;
}
@@ -1781,7 +1770,7 @@ static ssize_t aldebaran_get_gpu_metrics(struct smu_context *smu,
ret = smu_cmn_get_metrics_table(smu,
&metrics,
- true);
+ false);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index 1a1f2a6b2e52..a89075e25717 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -288,7 +288,8 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
* Considering above, we just leave user a verbal message instead
* of halt driver loading.
*/
- if (if_version != smu->smc_driver_if_version) {
+ if (smu->smc_driver_if_version != SMU_IGNORE_IF_VERSION &&
+ if_version != smu->smc_driver_if_version) {
dev_info(adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
"smu fw program = %d, smu fw version = 0x%08x (%d.%d.%d)\n",
smu->smc_driver_if_version, if_version,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index e084ed99ec0e..c1062e5f0393 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -2825,10 +2825,10 @@ static int smu_v13_0_0_i2c_control_init(struct smu_context *smu)
control->quirks = &smu_v13_0_0_i2c_control_quirks;
i2c_set_adapdata(control, smu_i2c);
- res = i2c_add_adapter(control);
+ res = devm_i2c_add_adapter(adev->dev, control);
if (res) {
DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
- goto Out_err;
+ return res;
}
}
@@ -2838,27 +2838,12 @@ static int smu_v13_0_0_i2c_control_init(struct smu_context *smu)
adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
return 0;
-Out_err:
- for ( ; i >= 0; i--) {
- struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
- struct i2c_adapter *control = &smu_i2c->adapter;
-
- i2c_del_adapter(control);
- }
- return res;
}
static void smu_v13_0_0_i2c_control_fini(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- int i;
- for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
- struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
- struct i2c_adapter *control = &smu_i2c->adapter;
-
- i2c_del_adapter(control);
- }
adev->pm.ras_eeprom_i2c_bus = NULL;
adev->pm.fru_eeprom_i2c_bus = NULL;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
index 02a455a31c25..cb3fea9e8cf3 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
@@ -83,7 +83,6 @@ const struct cmn2asic_mapping smu_v13_0_12_feature_mask_map[SMU_FEATURE_COUNT] =
SMU_13_0_12_FEA_MAP(SMU_FEATURE_PIT_BIT, FEATURE_PIT),
};
-// clang-format off
const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0),
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1),
@@ -106,7 +105,7 @@ const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1),
MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0),
MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 1),
- MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDriverReset, SMU_MSG_RAS_PRI),
+ MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDriverReset, SMU_MSG_RAS_PRI | SMU_MSG_NO_PRECHECK),
MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0),
MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0),
MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize, 0),
@@ -137,9 +136,57 @@ const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(RmaDueToBadPageThreshold, PPSMC_MSG_RmaDueToBadPageThreshold, 0),
MSG_MAP(SetThrottlingPolicy, PPSMC_MSG_SetThrottlingPolicy, 0),
MSG_MAP(ResetSDMA, PPSMC_MSG_ResetSDMA, 0),
+ MSG_MAP(ResetVCN, PPSMC_MSG_ResetVCN, 0),
MSG_MAP(GetStaticMetricsTable, PPSMC_MSG_GetStaticMetricsTable, 1),
+ MSG_MAP(GetSystemMetricsTable, PPSMC_MSG_GetSystemMetricsTable, 1),
};
+int smu_v13_0_12_tables_init(struct smu_context *smu)
+{
+ struct amdgpu_baseboard_temp_metrics_v1_0 *baseboard_temp_metrics;
+ struct amdgpu_gpuboard_temp_metrics_v1_0 *gpuboard_temp_metrics;
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *tables = smu_table->tables;
+ struct smu_table_cache *cache;
+ int ret;
+
+ ret = smu_table_cache_init(smu, SMU_TABLE_PMFW_SYSTEM_METRICS,
+ smu_v13_0_12_get_system_metrics_size(), 5);
+
+ if (ret)
+ return ret;
+
+ ret = smu_table_cache_init(smu, SMU_TABLE_BASEBOARD_TEMP_METRICS,
+ sizeof(*baseboard_temp_metrics), 50);
+ if (ret)
+ return ret;
+ /* Initialize base board temperature metrics */
+ cache = &(tables[SMU_TABLE_BASEBOARD_TEMP_METRICS].cache);
+ baseboard_temp_metrics =
+ (struct amdgpu_baseboard_temp_metrics_v1_0 *) cache->buffer;
+ smu_cmn_init_baseboard_temp_metrics(baseboard_temp_metrics, 1, 0);
+ /* Initialize GPU board temperature metrics */
+ ret = smu_table_cache_init(smu, SMU_TABLE_GPUBOARD_TEMP_METRICS,
+ sizeof(*gpuboard_temp_metrics), 50);
+ if (ret) {
+ smu_table_cache_fini(smu, SMU_TABLE_PMFW_SYSTEM_METRICS);
+ smu_table_cache_fini(smu, SMU_TABLE_BASEBOARD_TEMP_METRICS);
+ return ret;
+ }
+ cache = &(tables[SMU_TABLE_GPUBOARD_TEMP_METRICS].cache);
+ gpuboard_temp_metrics = (struct amdgpu_gpuboard_temp_metrics_v1_0 *)cache->buffer;
+ smu_cmn_init_gpuboard_temp_metrics(gpuboard_temp_metrics, 1, 0);
+
+ return 0;
+}
+
+void smu_v13_0_12_tables_fini(struct smu_context *smu)
+{
+ smu_table_cache_fini(smu, SMU_TABLE_BASEBOARD_TEMP_METRICS);
+ smu_table_cache_fini(smu, SMU_TABLE_GPUBOARD_TEMP_METRICS);
+ smu_table_cache_fini(smu, SMU_TABLE_PMFW_SYSTEM_METRICS);
+}
+
static int smu_v13_0_12_get_enabled_mask(struct smu_context *smu,
uint64_t *feature_mask)
{
@@ -187,6 +234,11 @@ int smu_v13_0_12_get_max_metrics_size(void)
return max(sizeof(StaticMetricsTable_t), sizeof(MetricsTable_t));
}
+size_t smu_v13_0_12_get_system_metrics_size(void)
+{
+ return sizeof(SystemMetricsTable_t);
+}
+
static void smu_v13_0_12_init_xgmi_data(struct smu_context *smu,
StaticMetricsTable_t *static_metrics)
{
@@ -220,7 +272,7 @@ int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu)
struct PPTable_t *pptable =
(struct PPTable_t *)smu_table->driver_pptable;
uint32_t table_version;
- int ret, i;
+ int ret, i, n;
if (!pptable->Init) {
ret = smu_v13_0_6_get_static_metrics_table(smu);
@@ -259,6 +311,22 @@ int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu)
/* use AID0 serial number by default */
pptable->PublicSerialNumber_AID =
static_metrics->PublicSerialNumber_AID[0];
+
+ amdgpu_device_set_uid(smu->adev->uid_info, AMDGPU_UID_TYPE_SOC,
+ 0, pptable->PublicSerialNumber_AID);
+ n = ARRAY_SIZE(static_metrics->PublicSerialNumber_AID);
+ for (i = 0; i < n; i++) {
+ amdgpu_device_set_uid(
+ smu->adev->uid_info, AMDGPU_UID_TYPE_AID, i,
+ static_metrics->PublicSerialNumber_AID[i]);
+ }
+ n = ARRAY_SIZE(static_metrics->PublicSerialNumber_XCD);
+ for (i = 0; i < n; i++) {
+ amdgpu_device_set_uid(
+ smu->adev->uid_info, AMDGPU_UID_TYPE_XCD, i,
+ static_metrics->PublicSerialNumber_XCD[i]);
+ }
+
ret = smu_v13_0_12_fru_get_product_info(smu, static_metrics);
if (ret)
return ret;
@@ -274,6 +342,9 @@ int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu)
static_metrics->pldmVersion[0] != 0xFFFFFFFF)
smu->adev->firmware.pldm_version =
static_metrics->pldmVersion[0];
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(NPM_METRICS)))
+ pptable->MaxNodePowerLimit =
+ SMUQ10_ROUND(static_metrics->MaxNodePowerLimit);
smu_v13_0_12_init_xgmi_data(smu, static_metrics);
pptable->Init = true;
}
@@ -359,6 +430,292 @@ int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu,
return 0;
}
+static int smu_v13_0_12_get_system_metrics_table(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *table = &smu_table->driver_table;
+ struct smu_table *tables = smu_table->tables;
+ struct smu_table *sys_table;
+ int ret;
+
+ sys_table = &tables[SMU_TABLE_PMFW_SYSTEM_METRICS];
+ if (smu_table_cache_is_valid(sys_table))
+ return 0;
+
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetSystemMetricsTable, NULL);
+ if (ret) {
+ dev_info(smu->adev->dev,
+ "Failed to export system metrics table!\n");
+ return ret;
+ }
+
+ amdgpu_asic_invalidate_hdp(smu->adev, NULL);
+ smu_table_cache_update_time(sys_table, jiffies);
+ memcpy(sys_table->cache.buffer, table->cpu_addr,
+ smu_v13_0_12_get_system_metrics_size());
+
+ return 0;
+}
+
+static enum amdgpu_node_temp smu_v13_0_12_get_node_sensor_type(NODE_TEMP_e type)
+{
+ switch (type) {
+ case NODE_TEMP_RETIMER:
+ return AMDGPU_RETIMER_X_TEMP;
+ case NODE_TEMP_IBC_TEMP:
+ return AMDGPU_OAM_X_IBC_TEMP;
+ case NODE_TEMP_IBC_2_TEMP:
+ return AMDGPU_OAM_X_IBC_2_TEMP;
+ case NODE_TEMP_VDD18_VR_TEMP:
+ return AMDGPU_OAM_X_VDD18_VR_TEMP;
+ case NODE_TEMP_04_HBM_B_VR_TEMP:
+ return AMDGPU_OAM_X_04_HBM_B_VR_TEMP;
+ case NODE_TEMP_04_HBM_D_VR_TEMP:
+ return AMDGPU_OAM_X_04_HBM_D_VR_TEMP;
+ default:
+ return -EINVAL;
+ }
+}
+
+static enum amdgpu_vr_temp smu_v13_0_12_get_vr_sensor_type(SVI_TEMP_e type)
+{
+ switch (type) {
+ case SVI_VDDCR_VDD0_TEMP:
+ return AMDGPU_VDDCR_VDD0_TEMP;
+ case SVI_VDDCR_VDD1_TEMP:
+ return AMDGPU_VDDCR_VDD1_TEMP;
+ case SVI_VDDCR_VDD2_TEMP:
+ return AMDGPU_VDDCR_VDD2_TEMP;
+ case SVI_VDDCR_VDD3_TEMP:
+ return AMDGPU_VDDCR_VDD3_TEMP;
+ case SVI_VDDCR_SOC_A_TEMP:
+ return AMDGPU_VDDCR_SOC_A_TEMP;
+ case SVI_VDDCR_SOC_C_TEMP:
+ return AMDGPU_VDDCR_SOC_C_TEMP;
+ case SVI_VDDCR_SOCIO_A_TEMP:
+ return AMDGPU_VDDCR_SOCIO_A_TEMP;
+ case SVI_VDDCR_SOCIO_C_TEMP:
+ return AMDGPU_VDDCR_SOCIO_C_TEMP;
+ case SVI_VDD_085_HBM_TEMP:
+ return AMDGPU_VDD_085_HBM_TEMP;
+ case SVI_VDDCR_11_HBM_B_TEMP:
+ return AMDGPU_VDDCR_11_HBM_B_TEMP;
+ case SVI_VDDCR_11_HBM_D_TEMP:
+ return AMDGPU_VDDCR_11_HBM_D_TEMP;
+ case SVI_VDD_USR_TEMP:
+ return AMDGPU_VDD_USR_TEMP;
+ case SVI_VDDIO_11_E32_TEMP:
+ return AMDGPU_VDDIO_11_E32_TEMP;
+ default:
+ return -EINVAL;
+ }
+}
+
+static enum amdgpu_system_temp smu_v13_0_12_get_system_sensor_type(SYSTEM_TEMP_e type)
+{
+ switch (type) {
+ case SYSTEM_TEMP_UBB_FPGA:
+ return AMDGPU_UBB_FPGA_TEMP;
+ case SYSTEM_TEMP_UBB_FRONT:
+ return AMDGPU_UBB_FRONT_TEMP;
+ case SYSTEM_TEMP_UBB_BACK:
+ return AMDGPU_UBB_BACK_TEMP;
+ case SYSTEM_TEMP_UBB_OAM7:
+ return AMDGPU_UBB_OAM7_TEMP;
+ case SYSTEM_TEMP_UBB_IBC:
+ return AMDGPU_UBB_IBC_TEMP;
+ case SYSTEM_TEMP_UBB_UFPGA:
+ return AMDGPU_UBB_UFPGA_TEMP;
+ case SYSTEM_TEMP_UBB_OAM1:
+ return AMDGPU_UBB_OAM1_TEMP;
+ case SYSTEM_TEMP_OAM_0_1_HSC:
+ return AMDGPU_OAM_0_1_HSC_TEMP;
+ case SYSTEM_TEMP_OAM_2_3_HSC:
+ return AMDGPU_OAM_2_3_HSC_TEMP;
+ case SYSTEM_TEMP_OAM_4_5_HSC:
+ return AMDGPU_OAM_4_5_HSC_TEMP;
+ case SYSTEM_TEMP_OAM_6_7_HSC:
+ return AMDGPU_OAM_6_7_HSC_TEMP;
+ case SYSTEM_TEMP_UBB_FPGA_0V72_VR:
+ return AMDGPU_UBB_FPGA_0V72_VR_TEMP;
+ case SYSTEM_TEMP_UBB_FPGA_3V3_VR:
+ return AMDGPU_UBB_FPGA_3V3_VR_TEMP;
+ case SYSTEM_TEMP_RETIMER_0_1_2_3_1V2_VR:
+ return AMDGPU_RETIMER_0_1_2_3_1V2_VR_TEMP;
+ case SYSTEM_TEMP_RETIMER_4_5_6_7_1V2_VR:
+ return AMDGPU_RETIMER_4_5_6_7_1V2_VR_TEMP;
+ case SYSTEM_TEMP_RETIMER_0_1_0V9_VR:
+ return AMDGPU_RETIMER_0_1_0V9_VR_TEMP;
+ case SYSTEM_TEMP_RETIMER_4_5_0V9_VR:
+ return AMDGPU_RETIMER_4_5_0V9_VR_TEMP;
+ case SYSTEM_TEMP_RETIMER_2_3_0V9_VR:
+ return AMDGPU_RETIMER_2_3_0V9_VR_TEMP;
+ case SYSTEM_TEMP_RETIMER_6_7_0V9_VR:
+ return AMDGPU_RETIMER_6_7_0V9_VR_TEMP;
+ case SYSTEM_TEMP_OAM_0_1_2_3_3V3_VR:
+ return AMDGPU_OAM_0_1_2_3_3V3_VR_TEMP;
+ case SYSTEM_TEMP_OAM_4_5_6_7_3V3_VR:
+ return AMDGPU_OAM_4_5_6_7_3V3_VR_TEMP;
+ case SYSTEM_TEMP_IBC_HSC:
+ return AMDGPU_IBC_HSC_TEMP;
+ case SYSTEM_TEMP_IBC:
+ return AMDGPU_IBC_TEMP;
+ default:
+ return -EINVAL;
+ }
+}
+
+static bool smu_v13_0_12_is_temp_metrics_supported(struct smu_context *smu,
+ enum smu_temp_metric_type type)
+{
+ switch (type) {
+ case SMU_TEMP_METRIC_BASEBOARD:
+ if (smu->adev->gmc.xgmi.physical_node_id == 0 &&
+ smu->adev->gmc.xgmi.num_physical_nodes > 1 &&
+ smu_v13_0_6_cap_supported(smu, SMU_CAP(TEMP_METRICS)))
+ return true;
+ break;
+ case SMU_TEMP_METRIC_GPUBOARD:
+ return smu_v13_0_6_cap_supported(smu, SMU_CAP(TEMP_METRICS));
+ default:
+ break;
+ }
+
+ return false;
+}
+
+int smu_v13_0_12_get_npm_data(struct smu_context *smu,
+ enum amd_pp_sensors sensor,
+ uint32_t *value)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct PPTable_t *pptable =
+ (struct PPTable_t *)smu_table->driver_pptable;
+ struct smu_table *tables = smu_table->tables;
+ SystemMetricsTable_t *metrics;
+ struct smu_table *sys_table;
+ int ret;
+
+ if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(NPM_METRICS)))
+ return -EOPNOTSUPP;
+
+ if (sensor == AMDGPU_PP_SENSOR_MAXNODEPOWERLIMIT) {
+ *value = pptable->MaxNodePowerLimit;
+ return 0;
+ }
+
+ ret = smu_v13_0_12_get_system_metrics_table(smu);
+ if (ret)
+ return ret;
+
+ sys_table = &tables[SMU_TABLE_PMFW_SYSTEM_METRICS];
+ metrics = (SystemMetricsTable_t *)sys_table->cache.buffer;
+
+ switch (sensor) {
+ case AMDGPU_PP_SENSOR_NODEPOWERLIMIT:
+ *value = SMUQ10_ROUND(metrics->NodePowerLimit);
+ break;
+ case AMDGPU_PP_SENSOR_NODEPOWER:
+ *value = SMUQ10_ROUND(metrics->NodePower);
+ break;
+ case AMDGPU_PP_SENSOR_GPPTRESIDENCY:
+ *value = SMUQ10_ROUND(metrics->GlobalPPTResidencyAcc);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static ssize_t smu_v13_0_12_get_temp_metrics(struct smu_context *smu,
+ enum smu_temp_metric_type type, void *table)
+{
+ struct amdgpu_baseboard_temp_metrics_v1_0 *baseboard_temp_metrics;
+ struct amdgpu_gpuboard_temp_metrics_v1_0 *gpuboard_temp_metrics;
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *tables = smu_table->tables;
+ SystemMetricsTable_t *metrics;
+ struct smu_table *data_table;
+ struct smu_table *sys_table;
+ int ret, sensor_type;
+ u32 idx, sensors;
+ ssize_t size;
+
+ if (type == SMU_TEMP_METRIC_BASEBOARD) {
+ /* Initialize base board temperature metrics */
+ data_table =
+ &smu->smu_table.tables[SMU_TABLE_BASEBOARD_TEMP_METRICS];
+ baseboard_temp_metrics =
+ (struct amdgpu_baseboard_temp_metrics_v1_0 *)
+ data_table->cache.buffer;
+ size = sizeof(*baseboard_temp_metrics);
+ } else {
+ data_table =
+ &smu->smu_table.tables[SMU_TABLE_GPUBOARD_TEMP_METRICS];
+ gpuboard_temp_metrics =
+ (struct amdgpu_gpuboard_temp_metrics_v1_0 *)
+ data_table->cache.buffer;
+ size = sizeof(*baseboard_temp_metrics);
+ }
+
+ ret = smu_v13_0_12_get_system_metrics_table(smu);
+ if (ret)
+ return ret;
+
+ sys_table = &tables[SMU_TABLE_PMFW_SYSTEM_METRICS];
+ metrics = (SystemMetricsTable_t *)sys_table->cache.buffer;
+ smu_table_cache_update_time(data_table, jiffies);
+
+ if (type == SMU_TEMP_METRIC_GPUBOARD) {
+ gpuboard_temp_metrics->accumulation_counter = metrics->AccumulationCounter;
+ gpuboard_temp_metrics->label_version = metrics->LabelVersion;
+ gpuboard_temp_metrics->node_id = metrics->NodeIdentifier;
+
+ idx = 0;
+ for (sensors = 0; sensors < NODE_TEMP_MAX_TEMP_ENTRIES; sensors++) {
+ if (metrics->NodeTemperatures[sensors] != -1) {
+ sensor_type = smu_v13_0_12_get_node_sensor_type(sensors);
+ gpuboard_temp_metrics->node_temp[idx] =
+ ((int)metrics->NodeTemperatures[sensors]) & 0xFFFFFF;
+ gpuboard_temp_metrics->node_temp[idx] |= (sensor_type << 24);
+ idx++;
+ }
+ }
+
+ idx = 0;
+
+ for (sensors = 0; sensors < SVI_MAX_TEMP_ENTRIES; sensors++) {
+ if (metrics->VrTemperatures[sensors] != -1) {
+ sensor_type = smu_v13_0_12_get_vr_sensor_type(sensors);
+ gpuboard_temp_metrics->vr_temp[idx] =
+ ((int)metrics->VrTemperatures[sensors]) & 0xFFFFFF;
+ gpuboard_temp_metrics->vr_temp[idx] |= (sensor_type << 24);
+ idx++;
+ }
+ }
+ } else if (type == SMU_TEMP_METRIC_BASEBOARD) {
+ baseboard_temp_metrics->accumulation_counter = metrics->AccumulationCounter;
+ baseboard_temp_metrics->label_version = metrics->LabelVersion;
+ baseboard_temp_metrics->node_id = metrics->NodeIdentifier;
+
+ idx = 0;
+ for (sensors = 0; sensors < SYSTEM_TEMP_MAX_ENTRIES; sensors++) {
+ if (metrics->SystemTemperatures[sensors] != -1) {
+ sensor_type = smu_v13_0_12_get_system_sensor_type(sensors);
+ baseboard_temp_metrics->system_temp[idx] =
+ ((int)metrics->SystemTemperatures[sensors]) & 0xFFFFFF;
+ baseboard_temp_metrics->system_temp[idx] |= (sensor_type << 24);
+ idx++;
+ }
+ }
+ }
+
+ memcpy(table, data_table->cache.buffer, size);
+
+ return size;
+}
+
ssize_t smu_v13_0_12_get_xcp_metrics(struct smu_context *smu, struct amdgpu_xcp *xcp, void *table, void *smu_metrics)
{
const u8 num_jpeg_rings = NUM_JPEG_RINGS_FW;
@@ -572,3 +929,8 @@ ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table, void
return sizeof(*gpu_metrics);
}
+
+const struct smu_temp_funcs smu_v13_0_12_temp_funcs = {
+ .temp_metrics_is_supported = smu_v13_0_12_is_temp_metrics_supported,
+ .get_temp_metrics = smu_v13_0_12_get_temp_metrics,
+};
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index 9cc294f4708b..285cf7979693 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -143,9 +143,9 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU
MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1),
MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1),
MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1),
- MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0),
+ MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 1),
MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 1),
- MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDriverReset, SMU_MSG_RAS_PRI),
+ MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDriverReset, SMU_MSG_RAS_PRI | SMU_MSG_NO_PRECHECK),
MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0),
MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0),
MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize, 0),
@@ -177,7 +177,7 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU
MSG_MAP(SetThrottlingPolicy, PPSMC_MSG_SetThrottlingPolicy, 0),
MSG_MAP(ResetSDMA, PPSMC_MSG_ResetSDMA, 0),
MSG_MAP(ResetVCN, PPSMC_MSG_ResetVCN, 0),
- MSG_MAP(GetStaticMetricsTable, PPSMC_MSG_GetStaticMetricsTable, 0),
+ MSG_MAP(GetStaticMetricsTable, PPSMC_MSG_GetStaticMetricsTable, 1),
};
// clang-format on
@@ -312,6 +312,8 @@ static void smu_v13_0_14_init_caps(struct smu_context *smu)
smu_v13_0_6_cap_set(smu, SMU_CAP(PER_INST_METRICS));
if (fw_ver >= 0x5551200)
smu_v13_0_6_cap_set(smu, SMU_CAP(SDMA_RESET));
+ if (fw_ver >= 0x5551800)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(VCN_RESET));
if (fw_ver >= 0x5551600) {
smu_v13_0_6_cap_set(smu, SMU_CAP(STATIC_METRICS));
smu_v13_0_6_cap_set(smu, SMU_CAP(BOARD_VOLTAGE));
@@ -350,6 +352,20 @@ static void smu_v13_0_12_init_caps(struct smu_context *smu)
smu_v13_0_6_cap_set(smu, SMU_CAP(BOARD_VOLTAGE));
smu_v13_0_6_cap_set(smu, SMU_CAP(PLDM_VERSION));
}
+
+ if (fw_ver > 0x04560900)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(VCN_RESET));
+
+ if (fw_ver >= 0x04560700) {
+ if (fw_ver >= 0x04560900) {
+ smu_v13_0_6_cap_set(smu, SMU_CAP(TEMP_METRICS));
+ if (smu->adev->gmc.xgmi.physical_node_id == 0)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(NPM_METRICS));
+ } else if (!amdgpu_sriov_vf(smu->adev))
+ smu_v13_0_6_cap_set(smu, SMU_CAP(TEMP_METRICS));
+ } else {
+ smu_v13_0_12_tables_fini(smu);
+ }
}
static void smu_v13_0_6_init_caps(struct smu_context *smu)
@@ -402,19 +418,40 @@ static void smu_v13_0_6_init_caps(struct smu_context *smu)
if ((pgm == 7 && fw_ver >= 0x7550E00) ||
(pgm == 0 && fw_ver >= 0x00557E00))
smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS));
- if ((pgm == 0 && fw_ver >= 0x00557F01) ||
- (pgm == 7 && fw_ver >= 0x7551000)) {
- smu_v13_0_6_cap_set(smu, SMU_CAP(STATIC_METRICS));
- smu_v13_0_6_cap_set(smu, SMU_CAP(BOARD_VOLTAGE));
+
+ if (amdgpu_sriov_vf(adev)) {
+ if (fw_ver >= 0x00558200)
+ amdgpu_virt_attr_set(&adev->virt.virt_caps,
+ AMDGPU_VIRT_CAP_POWER_LIMIT,
+ AMDGPU_CAP_ATTR_RW);
+ if ((pgm == 0 && fw_ver >= 0x00558000) ||
+ (pgm == 7 && fw_ver >= 0x7551000)) {
+ smu_v13_0_6_cap_set(smu,
+ SMU_CAP(STATIC_METRICS));
+ smu_v13_0_6_cap_set(smu,
+ SMU_CAP(BOARD_VOLTAGE));
+ smu_v13_0_6_cap_set(smu, SMU_CAP(PLDM_VERSION));
+ }
+ } else {
+ if ((pgm == 0 && fw_ver >= 0x00557F01) ||
+ (pgm == 7 && fw_ver >= 0x7551000)) {
+ smu_v13_0_6_cap_set(smu,
+ SMU_CAP(STATIC_METRICS));
+ smu_v13_0_6_cap_set(smu,
+ SMU_CAP(BOARD_VOLTAGE));
+ }
+ if ((pgm == 0 && fw_ver >= 0x00558000) ||
+ (pgm == 7 && fw_ver >= 0x7551000))
+ smu_v13_0_6_cap_set(smu, SMU_CAP(PLDM_VERSION));
}
- if ((pgm == 0 && fw_ver >= 0x00558000) ||
- (pgm == 7 && fw_ver >= 0x7551000))
- smu_v13_0_6_cap_set(smu, SMU_CAP(PLDM_VERSION));
}
if (((pgm == 7) && (fw_ver >= 0x7550700)) ||
((pgm == 0) && (fw_ver >= 0x00557900)) ||
((pgm == 4) && (fw_ver >= 0x4557000)))
smu_v13_0_6_cap_set(smu, SMU_CAP(SDMA_RESET));
+
+ if ((pgm == 0) && (fw_ver >= 0x00558200))
+ smu_v13_0_6_cap_set(smu, SMU_CAP(VCN_RESET));
}
static void smu_v13_0_x_init_caps(struct smu_context *smu)
@@ -511,8 +548,12 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *tables = smu_table->tables;
+ void *gpu_metrics_table __free(kfree) = NULL;
+ void *driver_pptable __free(kfree) = NULL;
+ void *metrics_table __free(kfree) = NULL;
struct amdgpu_device *adev = smu->adev;
int gpu_metrcs_size = METRICS_TABLE_SIZE;
+ int ret;
if (!(adev->flags & AMD_IS_APU))
SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU13_TOOL_SIZE,
@@ -528,27 +569,36 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu)
PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT);
- smu_table->metrics_table = kzalloc(METRICS_TABLE_SIZE, GFP_KERNEL);
- if (!smu_table->metrics_table)
+ SMU_TABLE_INIT(tables, SMU_TABLE_PMFW_SYSTEM_METRICS,
+ smu_v13_0_12_get_system_metrics_size(), PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT);
+
+ metrics_table = kzalloc(METRICS_TABLE_SIZE, GFP_KERNEL);
+ if (!metrics_table)
return -ENOMEM;
smu_table->metrics_time = 0;
smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_8);
- smu_table->gpu_metrics_table =
+ gpu_metrics_table =
kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
- if (!smu_table->gpu_metrics_table) {
- kfree(smu_table->metrics_table);
+ if (!gpu_metrics_table)
return -ENOMEM;
- }
- smu_table->driver_pptable =
- kzalloc(sizeof(struct PPTable_t), GFP_KERNEL);
- if (!smu_table->driver_pptable) {
- kfree(smu_table->metrics_table);
- kfree(smu_table->gpu_metrics_table);
+ driver_pptable = kzalloc(sizeof(struct PPTable_t), GFP_KERNEL);
+ if (!driver_pptable)
return -ENOMEM;
+
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) ==
+ IP_VERSION(13, 0, 12)) {
+ ret = smu_v13_0_12_tables_init(smu);
+ if (ret)
+ return ret;
}
+ smu_table->gpu_metrics_table = no_free_ptr(gpu_metrics_table);
+ smu_table->metrics_table = no_free_ptr(metrics_table);
+ smu_table->driver_pptable = no_free_ptr(driver_pptable);
+
return 0;
}
@@ -677,6 +727,13 @@ static int smu_v13_0_6_init_smc_tables(struct smu_context *smu)
return ret;
}
+static int smu_v13_0_6_fini_smc_tables(struct smu_context *smu)
+{
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12))
+ smu_v13_0_12_tables_fini(smu);
+ return smu_v13_0_fini_smc_tables(smu);
+}
+
static int smu_v13_0_6_get_allowed_feature_mask(struct smu_context *smu,
uint32_t *feature_mask,
uint32_t num)
@@ -803,7 +860,7 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
struct PPTable_t *pptable =
(struct PPTable_t *)smu_table->driver_pptable;
int version = smu_v13_0_6_get_metrics_version(smu);
- int ret, i, retry = 100;
+ int ret, i, retry = 100, n;
uint32_t table_version;
uint16_t max_speed;
uint8_t max_width;
@@ -865,6 +922,23 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
pptable->PublicSerialNumber_AID =
GET_METRIC_FIELD(PublicSerialNumber_AID, version)[0];
+ amdgpu_device_set_uid(smu->adev->uid_info, AMDGPU_UID_TYPE_SOC,
+ 0, pptable->PublicSerialNumber_AID);
+ n = ARRAY_SIZE(metrics_v0->PublicSerialNumber_AID);
+ for (i = 0; i < n; i++) {
+ amdgpu_device_set_uid(
+ smu->adev->uid_info, AMDGPU_UID_TYPE_AID, i,
+ GET_METRIC_FIELD(PublicSerialNumber_AID,
+ version)[i]);
+ }
+ n = ARRAY_SIZE(metrics_v0->PublicSerialNumber_XCD);
+ for (i = 0; i < n; i++) {
+ amdgpu_device_set_uid(
+ smu->adev->uid_info, AMDGPU_UID_TYPE_XCD, i,
+ GET_METRIC_FIELD(PublicSerialNumber_XCD,
+ version)[i]);
+ }
+
pptable->Init = true;
if (smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) {
ret = smu_v13_0_6_get_static_metrics_table(smu);
@@ -1731,6 +1805,15 @@ static int smu_v13_0_6_read_sensor(struct smu_context *smu,
ret = -EOPNOTSUPP;
break;
}
+ case AMDGPU_PP_SENSOR_NODEPOWERLIMIT:
+ case AMDGPU_PP_SENSOR_NODEPOWER:
+ case AMDGPU_PP_SENSOR_GPPTRESIDENCY:
+ case AMDGPU_PP_SENSOR_MAXNODEPOWERLIMIT:
+ ret = smu_v13_0_12_get_npm_data(smu, sensor, (uint32_t *)data);
+ if (ret)
+ return ret;
+ *size = 4;
+ break;
case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
default:
ret = -EOPNOTSUPP;
@@ -2426,10 +2509,10 @@ static int smu_v13_0_6_i2c_control_init(struct smu_context *smu)
control->quirks = &smu_v13_0_6_i2c_control_quirks;
i2c_set_adapdata(control, smu_i2c);
- res = i2c_add_adapter(control);
+ res = devm_i2c_add_adapter(adev->dev, control);
if (res) {
DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
- goto Out_err;
+ return res;
}
}
@@ -2437,27 +2520,12 @@ static int smu_v13_0_6_i2c_control_init(struct smu_context *smu)
adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
return 0;
-Out_err:
- for ( ; i >= 0; i--) {
- struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
- struct i2c_adapter *control = &smu_i2c->adapter;
-
- i2c_del_adapter(control);
- }
- return res;
}
static void smu_v13_0_6_i2c_control_fini(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- int i;
- for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
- struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
- struct i2c_adapter *control = &smu_i2c->adapter;
-
- i2c_del_adapter(control);
- }
adev->pm.ras_eeprom_i2c_bus = NULL;
adev->pm.fru_eeprom_i2c_bus = NULL;
}
@@ -2560,9 +2628,9 @@ static ssize_t smu_v13_0_6_get_xcp_metrics(struct smu_context *smu, int xcp_id,
const u8 num_jpeg_rings = AMDGPU_MAX_JPEG_RINGS_4_0_3;
int version = smu_v13_0_6_get_metrics_version(smu);
struct amdgpu_partition_metrics_v1_0 *xcp_metrics;
+ MetricsTableV0_t *metrics_v0 __free(kfree) = NULL;
struct amdgpu_device *adev = smu->adev;
int ret, inst, i, j, k, idx;
- MetricsTableV0_t *metrics_v0;
MetricsTableV1_t *metrics_v1;
MetricsTableV2_t *metrics_v2;
struct amdgpu_xcp *xcp;
@@ -2587,17 +2655,14 @@ static ssize_t smu_v13_0_6_get_xcp_metrics(struct smu_context *smu, int xcp_id,
return -ENOMEM;
ret = smu_v13_0_6_get_metrics_table(smu, metrics_v0, false);
- if (ret) {
- kfree(metrics_v0);
+ if (ret)
return ret;
- }
if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) ==
IP_VERSION(13, 0, 12) &&
- smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) {
- ret = smu_v13_0_12_get_xcp_metrics(smu, xcp, table, metrics_v0);
- goto out;
- }
+ smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS)))
+ return smu_v13_0_12_get_xcp_metrics(smu, xcp, table,
+ metrics_v0);
metrics_v1 = (MetricsTableV1_t *)metrics_v0;
metrics_v2 = (MetricsTableV2_t *)metrics_v0;
@@ -2668,8 +2733,6 @@ static ssize_t smu_v13_0_6_get_xcp_metrics(struct smu_context *smu, int xcp_id,
idx++;
}
}
-out:
- kfree(metrics_v0);
return sizeof(*xcp_metrics);
}
@@ -2680,31 +2743,26 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
struct gpu_metrics_v1_8 *gpu_metrics =
(struct gpu_metrics_v1_8 *)smu_table->gpu_metrics_table;
int version = smu_v13_0_6_get_metrics_version(smu);
+ MetricsTableV0_t *metrics_v0 __free(kfree) = NULL;
int ret = 0, xcc_id, inst, i, j, k, idx;
struct amdgpu_device *adev = smu->adev;
- MetricsTableV0_t *metrics_v0;
MetricsTableV1_t *metrics_v1;
MetricsTableV2_t *metrics_v2;
struct amdgpu_xcp *xcp;
u16 link_width_level;
- ssize_t num_bytes;
u8 num_jpeg_rings;
u32 inst_mask;
bool per_inst;
metrics_v0 = kzalloc(METRICS_TABLE_SIZE, GFP_KERNEL);
ret = smu_v13_0_6_get_metrics_table(smu, metrics_v0, false);
- if (ret) {
- kfree(metrics_v0);
+ if (ret)
return ret;
- }
- if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12) &&
- smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) {
- num_bytes = smu_v13_0_12_get_gpu_metrics(smu, table, metrics_v0);
- kfree(metrics_v0);
- return num_bytes;
- }
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) ==
+ IP_VERSION(13, 0, 12) &&
+ smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS)))
+ return smu_v13_0_12_get_gpu_metrics(smu, table, metrics_v0);
metrics_v1 = (MetricsTableV1_t *)metrics_v0;
metrics_v2 = (MetricsTableV2_t *)metrics_v0;
@@ -2890,7 +2948,6 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
gpu_metrics->firmware_timestamp = GET_METRIC_FIELD(Timestamp, version);
*table = (void *)gpu_metrics;
- kfree(metrics_v0);
return sizeof(*gpu_metrics);
}
@@ -3076,7 +3133,7 @@ static inline bool smu_v13_0_6_is_link_reset_supported(struct smu_context *smu)
struct amdgpu_device *adev = smu->adev;
int var = (adev->pdev->device & 0xF);
- if (var == 0x1)
+ if (var == 0x0 || var == 0x1 || var == 0x3)
return true;
return false;
@@ -3152,6 +3209,11 @@ static int smu_v13_0_6_reset_sdma(struct smu_context *smu, uint32_t inst_mask)
return ret;
}
+static bool smu_v13_0_6_reset_vcn_is_supported(struct smu_context *smu)
+{
+ return smu_v13_0_6_cap_supported(smu, SMU_CAP(VCN_RESET));
+}
+
static int smu_v13_0_6_reset_vcn(struct smu_context *smu, uint32_t inst_mask)
{
int ret = 0;
@@ -3165,6 +3227,20 @@ static int smu_v13_0_6_reset_vcn(struct smu_context *smu, uint32_t inst_mask)
}
+static int smu_v13_0_6_post_init(struct smu_context *smu)
+{
+ if (smu_v13_0_6_is_link_reset_supported(smu))
+ smu_feature_cap_set(smu, SMU_FEATURE_CAP_ID__LINK_RESET);
+
+ if (smu_v13_0_6_reset_sdma_is_supported(smu))
+ smu_feature_cap_set(smu, SMU_FEATURE_CAP_ID__SDMA_RESET);
+
+ if (smu_v13_0_6_reset_vcn_is_supported(smu))
+ smu_feature_cap_set(smu, SMU_FEATURE_CAP_ID__VCN_RESET);
+
+ return 0;
+}
+
static int mca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable)
{
struct smu_context *smu = adev->powerplay.pp_handle;
@@ -3781,6 +3857,12 @@ static const struct aca_smu_funcs smu_v13_0_6_aca_smu_funcs = {
.parse_error_code = aca_smu_parse_error_code,
};
+static void smu_v13_0_6_set_temp_funcs(struct smu_context *smu)
+{
+ smu->smu_temp.temp_funcs = (amdgpu_ip_version(smu->adev, MP1_HWIP, 0)
+ == IP_VERSION(13, 0, 12)) ? &smu_v13_0_12_temp_funcs : NULL;
+}
+
static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
/* init dpm */
.get_allowed_feature_mask = smu_v13_0_6_get_allowed_feature_mask,
@@ -3797,7 +3879,7 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
.init_microcode = smu_v13_0_6_init_microcode,
.fini_microcode = smu_v13_0_fini_microcode,
.init_smc_tables = smu_v13_0_6_init_smc_tables,
- .fini_smc_tables = smu_v13_0_fini_smc_tables,
+ .fini_smc_tables = smu_v13_0_6_fini_smc_tables,
.init_power = smu_v13_0_init_power,
.fini_power = smu_v13_0_fini_power,
.check_fw_status = smu_v13_0_6_check_fw_status,
@@ -3828,7 +3910,6 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
.get_xcp_metrics = smu_v13_0_6_get_xcp_metrics,
.get_thermal_temperature_range = smu_v13_0_6_get_thermal_temperature_range,
.mode1_reset_is_support = smu_v13_0_6_is_mode1_reset_supported,
- .link_reset_is_support = smu_v13_0_6_is_link_reset_supported,
.mode1_reset = smu_v13_0_6_mode1_reset,
.mode2_reset = smu_v13_0_6_mode2_reset,
.link_reset = smu_v13_0_6_link_reset,
@@ -3838,8 +3919,8 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
.send_hbm_bad_pages_num = smu_v13_0_6_smu_send_hbm_bad_page_num,
.send_rma_reason = smu_v13_0_6_send_rma_reason,
.reset_sdma = smu_v13_0_6_reset_sdma,
- .reset_sdma_is_supported = smu_v13_0_6_reset_sdma_is_supported,
.dpm_reset_vcn = smu_v13_0_6_reset_vcn,
+ .post_init = smu_v13_0_6_post_init,
};
void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu)
@@ -3851,9 +3932,11 @@ void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu)
smu->feature_map = (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12)) ?
smu_v13_0_12_feature_mask_map : smu_v13_0_6_feature_mask_map;
smu->table_map = smu_v13_0_6_table_map;
- smu->smc_driver_if_version = SMU13_0_6_DRIVER_IF_VERSION;
+ smu->smc_driver_if_version = SMU_IGNORE_IF_VERSION;
smu->smc_fw_caps |= SMU_FW_CAP_RAS_PRI;
smu_v13_0_set_smu_mailbox_registers(smu);
+ smu_v13_0_6_set_temp_funcs(smu);
amdgpu_mca_smu_init_funcs(smu->adev, &smu_v13_0_6_mca_smu_funcs);
amdgpu_aca_set_smu_funcs(smu->adev, &smu_v13_0_6_aca_smu_funcs);
}
+
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
index 67b30674fd31..7ef5f3e66c27 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
@@ -49,6 +49,7 @@ struct PPTable_t {
uint32_t MaxLclkDpmRange;
uint32_t MinLclkDpmRange;
uint64_t PublicSerialNumber_AID;
+ uint32_t MaxNodePowerLimit;
bool Init;
};
@@ -64,10 +65,13 @@ enum smu_v13_0_6_caps {
SMU_CAP(RMA_MSG),
SMU_CAP(ACA_SYND),
SMU_CAP(SDMA_RESET),
+ SMU_CAP(VCN_RESET),
SMU_CAP(STATIC_METRICS),
SMU_CAP(HST_LIMIT_METRICS),
SMU_CAP(BOARD_VOLTAGE),
SMU_CAP(PLDM_VERSION),
+ SMU_CAP(TEMP_METRICS),
+ SMU_CAP(NPM_METRICS),
SMU_CAP(ALL),
};
@@ -79,6 +83,7 @@ int smu_v13_0_6_get_metrics_table(struct smu_context *smu, void *metrics_table,
bool smu_v13_0_12_is_dpm_running(struct smu_context *smu);
int smu_v13_0_12_get_max_metrics_size(void);
+size_t smu_v13_0_12_get_system_metrics_size(void);
int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu);
int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu,
MetricsMember_t member, uint32_t *value);
@@ -86,6 +91,12 @@ ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table, void
ssize_t smu_v13_0_12_get_xcp_metrics(struct smu_context *smu,
struct amdgpu_xcp *xcp, void *table,
void *smu_metrics);
+int smu_v13_0_12_tables_init(struct smu_context *smu);
+void smu_v13_0_12_tables_fini(struct smu_context *smu);
+int smu_v13_0_12_get_npm_data(struct smu_context *smu,
+ enum amd_pp_sensors sensor,
+ uint32_t *value);
extern const struct cmn2asic_mapping smu_v13_0_12_feature_mask_map[];
extern const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[];
+extern const struct smu_temp_funcs smu_v13_0_12_temp_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
index 3aea32baea3d..086501cc5213 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
@@ -1697,9 +1697,11 @@ static int smu_v14_0_2_get_power_limit(struct smu_context *smu,
uint32_t *min_power_limit)
{
struct smu_table_context *table_context = &smu->smu_table;
+ struct smu_14_0_2_powerplay_table *powerplay_table =
+ table_context->power_play_table;
PPTable_t *pptable = table_context->driver_pptable;
CustomSkuTable_t *skutable = &pptable->CustomSkuTable;
- uint32_t power_limit;
+ uint32_t power_limit, od_percent_upper = 0, od_percent_lower = 0;
uint32_t msg_limit = pptable->SkuTable.MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
if (smu_v14_0_get_current_power_limit(smu, &power_limit))
@@ -1712,11 +1714,29 @@ static int smu_v14_0_2_get_power_limit(struct smu_context *smu,
if (default_power_limit)
*default_power_limit = power_limit;
- if (max_power_limit)
- *max_power_limit = msg_limit;
+ if (powerplay_table) {
+ if (smu->od_enabled &&
+ smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_PPT_BIT)) {
+ od_percent_upper = pptable->SkuTable.OverDriveLimitsBasicMax.Ppt;
+ od_percent_lower = pptable->SkuTable.OverDriveLimitsBasicMin.Ppt;
+ } else if (smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_PPT_BIT)) {
+ od_percent_upper = 0;
+ od_percent_lower = pptable->SkuTable.OverDriveLimitsBasicMin.Ppt;
+ }
+ }
+
+ dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
+ od_percent_upper, od_percent_lower, power_limit);
- if (min_power_limit)
- *min_power_limit = 0;
+ if (max_power_limit) {
+ *max_power_limit = msg_limit * (100 + od_percent_upper);
+ *max_power_limit /= 100;
+ }
+
+ if (min_power_limit) {
+ *min_power_limit = power_limit * (100 + od_percent_lower);
+ *min_power_limit /= 100;
+ }
return 0;
}
@@ -2067,10 +2087,10 @@ static int smu_v14_0_2_i2c_control_init(struct smu_context *smu)
control->quirks = &smu_v14_0_2_i2c_control_quirks;
i2c_set_adapdata(control, smu_i2c);
- res = i2c_add_adapter(control);
+ res = devm_i2c_add_adapter(adev->dev, control);
if (res) {
DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
- goto Out_err;
+ return res;
}
}
@@ -2080,27 +2100,12 @@ static int smu_v14_0_2_i2c_control_init(struct smu_context *smu)
adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
return 0;
-Out_err:
- for ( ; i >= 0; i--) {
- struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
- struct i2c_adapter *control = &smu_i2c->adapter;
-
- i2c_del_adapter(control);
- }
- return res;
}
static void smu_v14_0_2_i2c_control_fini(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- int i;
- for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
- struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
- struct i2c_adapter *control = &smu_i2c->adapter;
-
- i2c_del_adapter(control);
- }
adev->pm.ras_eeprom_i2c_bus = NULL;
adev->pm.fru_eeprom_i2c_bus = NULL;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index 59f9abd0f7b8..f532f7c69259 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -256,11 +256,12 @@ static int __smu_cmn_ras_filter_msg(struct smu_context *smu,
{
struct amdgpu_device *adev = smu->adev;
uint32_t flags, resp;
- bool fed_status;
+ bool fed_status, pri;
flags = __smu_cmn_get_msg_flags(smu, msg);
*poll = true;
+ pri = !!(flags & SMU_MSG_NO_PRECHECK);
/* When there is RAS fatal error, FW won't process non-RAS priority
* messages. Don't allow any messages other than RAS priority messages.
*/
@@ -272,15 +273,18 @@ static int __smu_cmn_ras_filter_msg(struct smu_context *smu,
smu_get_message_name(smu, msg));
return -EACCES;
}
+ }
+ if (pri || fed_status) {
/* FW will ignore non-priority messages when a RAS fatal error
- * is detected. Hence it is possible that a previous message
- * wouldn't have got response. Allow to continue without polling
- * for response status for priority messages.
+ * or reset condition is detected. Hence it is possible that a
+ * previous message wouldn't have got response. Allow to
+ * continue without polling for response status for priority
+ * messages.
*/
resp = RREG32(smu->resp_reg);
dev_dbg(adev->dev,
- "Sending RAS priority message %s response status: %x",
+ "Sending priority message %s response status: %x",
smu_get_message_name(smu, msg), resp);
if (resp == 0)
*poll = false;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
index a608cdbdada4..0ae91c8b6d72 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
@@ -40,6 +40,8 @@
#define SMU_IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL 0x8
#define SMU_IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY 0x9
+#define SMU_IGNORE_IF_VERSION 0xFFFFFFFF
+
#define smu_cmn_init_soft_gpu_metrics(ptr, frev, crev) \
do { \
typecheck(struct gpu_metrics_v##frev##_##crev *, (ptr)); \
@@ -65,6 +67,32 @@
header->structure_size = sizeof(*tmp); \
} while (0)
+#define smu_cmn_init_baseboard_temp_metrics(ptr, fr, cr) \
+ do { \
+ typecheck(struct amdgpu_baseboard_temp_metrics_v##fr##_##cr *, \
+ (ptr)); \
+ struct amdgpu_baseboard_temp_metrics_v##fr##_##cr *tmp = (ptr); \
+ struct metrics_table_header *header = \
+ (struct metrics_table_header *)tmp; \
+ memset(header, 0xFF, sizeof(*tmp)); \
+ header->format_revision = fr; \
+ header->content_revision = cr; \
+ header->structure_size = sizeof(*tmp); \
+ } while (0)
+
+#define smu_cmn_init_gpuboard_temp_metrics(ptr, fr, cr) \
+ do { \
+ typecheck(struct amdgpu_gpuboard_temp_metrics_v##fr##_##cr *, \
+ (ptr)); \
+ struct amdgpu_gpuboard_temp_metrics_v##fr##_##cr *tmp = (ptr); \
+ struct metrics_table_header *header = \
+ (struct metrics_table_header *)tmp; \
+ memset(header, 0xFF, sizeof(*tmp)); \
+ header->format_revision = fr; \
+ header->content_revision = cr; \
+ header->structure_size = sizeof(*tmp); \
+ } while (0)
+
extern const int link_speed[];
/* Helper to Convert from PCIE Gen 1/2/3/4/5/6 to 0.1 GT/s speed units */
diff --git a/drivers/gpu/drm/amd/ras/rascore/Makefile b/drivers/gpu/drm/amd/ras/rascore/Makefile
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/Makefile
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h b/drivers/gpu/drm/amd/ras/rascore/ras_core_status.h
index 0d878ca3acba..144fbe4ceb9a 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_core_status.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: MIT */
/*
- * Copyright 2014 Advanced Micro Devices, Inc.
+ * Copyright 2025 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -21,12 +22,16 @@
*
*/
-#ifndef __DCE_V11_0_H__
-#define __DCE_V11_0_H__
-
-extern const struct amdgpu_ip_block_version dce_v11_0_ip_block;
-extern const struct amdgpu_ip_block_version dce_v11_2_ip_block;
-
-void dce_v11_0_disable_dce(struct amdgpu_device *adev);
+#ifndef __RAS_CORE_STATUS_H__
+#define __RAS_CORE_STATUS_H__
+#define RAS_CORE_OK 0
+#define RAS_CORE_NOT_SUPPORTED 248
+#define RAS_CORE_FAIL_ERROR_QUERY 249
+#define RAS_CORE_FAIL_ERROR_INJECTION 250
+#define RAS_CORE_FAIL_FATAL_RECOVERY 251
+#define RAS_CORE_FAIL_POISON_CONSUMPTION 252
+#define RAS_CORE_FAIL_POISON_CREATION 253
+#define RAS_CORE_FAIL_NO_VALID_BANKS 254
+#define RAS_CORE_GPU_IN_MODE1_RESET 255
#endif
diff --git a/drivers/gpu/drm/ast/ast_2100.c b/drivers/gpu/drm/ast/ast_2100.c
index 477ee15eff5d..829e3b8b0d19 100644
--- a/drivers/gpu/drm/ast/ast_2100.c
+++ b/drivers/gpu/drm/ast/ast_2100.c
@@ -32,6 +32,39 @@
#include "ast_post.h"
/*
+ * DRAM type
+ */
+
+static enum ast_dram_layout ast_2100_get_dram_layout_p2a(struct ast_device *ast)
+{
+ u32 mcr_cfg;
+ enum ast_dram_layout dram_layout;
+
+ ast_write32(ast, 0xf004, 0x1e6e0000);
+ ast_write32(ast, 0xf000, 0x1);
+ mcr_cfg = ast_read32(ast, 0x10004);
+
+ switch (mcr_cfg & 0x0c) {
+ case 0:
+ case 4:
+ default:
+ dram_layout = AST_DRAM_512Mx16;
+ break;
+ case 8:
+ if (mcr_cfg & 0x40)
+ dram_layout = AST_DRAM_1Gx16;
+ else
+ dram_layout = AST_DRAM_512Mx32;
+ break;
+ case 0xc:
+ dram_layout = AST_DRAM_1Gx32;
+ break;
+ }
+
+ return dram_layout;
+}
+
+/*
* POST
*/
@@ -266,6 +299,7 @@ static void ast_post_chip_2100(struct ast_device *ast)
u8 j;
u32 data, temp, i;
const struct ast_dramstruct *dram_reg_info;
+ enum ast_dram_layout dram_layout = ast_2100_get_dram_layout_p2a(ast);
j = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
@@ -292,11 +326,17 @@ static void ast_post_chip_2100(struct ast_device *ast)
for (i = 0; i < 15; i++)
udelay(dram_reg_info->data);
} else if (AST_DRAMSTRUCT_IS(dram_reg_info, DRAM_TYPE)) {
- data = dram_reg_info->data;
- if (ast->dram_type == AST_DRAM_1Gx16)
+ switch (dram_layout) {
+ case AST_DRAM_1Gx16:
data = 0x00000d89;
- else if (ast->dram_type == AST_DRAM_1Gx32)
+ break;
+ case AST_DRAM_1Gx32:
data = 0x00000c8d;
+ break;
+ default:
+ data = dram_reg_info->data;
+ break;
+ }
temp = ast_read32(ast, 0x12070);
temp &= 0xc;
diff --git a/drivers/gpu/drm/ast/ast_dp.c b/drivers/gpu/drm/ast/ast_dp.c
index 19c04687b0fe..8e650a02c528 100644
--- a/drivers/gpu/drm/ast/ast_dp.c
+++ b/drivers/gpu/drm/ast/ast_dp.c
@@ -134,7 +134,7 @@ static int ast_astdp_read_edid_block(void *data, u8 *buf, unsigned int block, si
* 3. The Delays are often longer a lot when system resume from S3/S4.
*/
if (j)
- mdelay(j + 1);
+ msleep(j + 1);
/* Wait for EDID offset to show up in mirror register */
vgacrd7 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xd7);
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index e37a55295ed7..c15aef014f69 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -98,13 +98,15 @@ enum ast_config_mode {
ast_use_defaults
};
-#define AST_DRAM_512Mx16 0
-#define AST_DRAM_1Gx16 1
-#define AST_DRAM_512Mx32 2
-#define AST_DRAM_1Gx32 3
-#define AST_DRAM_2Gx16 6
-#define AST_DRAM_4Gx16 7
-#define AST_DRAM_8Gx16 8
+enum ast_dram_layout {
+ AST_DRAM_512Mx16 = 0,
+ AST_DRAM_1Gx16 = 1,
+ AST_DRAM_512Mx32 = 2,
+ AST_DRAM_1Gx32 = 3,
+ AST_DRAM_2Gx16 = 6,
+ AST_DRAM_4Gx16 = 7,
+ AST_DRAM_8Gx16 = 8,
+};
/*
* Hardware cursor
@@ -172,10 +174,6 @@ struct ast_device {
enum ast_config_mode config_mode;
enum ast_chip chip;
- uint32_t dram_bus_width;
- uint32_t dram_type;
- uint32_t mclk;
-
void __iomem *vram;
unsigned long vram_base;
unsigned long vram_size;
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 44b9b5f659fc..3eea6a6cdacd 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -210,126 +210,6 @@ static void ast_detect_tx_chip(struct ast_device *ast, bool need_post)
drm_info(dev, "Using %s\n", info_str[ast->tx_chip]);
}
-static int ast_get_dram_info(struct ast_device *ast)
-{
- struct drm_device *dev = &ast->base;
- struct device_node *np = dev->dev->of_node;
- uint32_t mcr_cfg, mcr_scu_mpll, mcr_scu_strap;
- uint32_t denum, num, div, ref_pll, dsel;
-
- switch (ast->config_mode) {
- case ast_use_dt:
- /*
- * If some properties are missing, use reasonable
- * defaults for GEN5
- */
- if (of_property_read_u32(np, "aspeed,mcr-configuration",
- &mcr_cfg))
- mcr_cfg = 0x00000577;
- if (of_property_read_u32(np, "aspeed,mcr-scu-mpll",
- &mcr_scu_mpll))
- mcr_scu_mpll = 0x000050C0;
- if (of_property_read_u32(np, "aspeed,mcr-scu-strap",
- &mcr_scu_strap))
- mcr_scu_strap = 0;
- break;
- case ast_use_p2a:
- ast_write32(ast, 0xf004, 0x1e6e0000);
- ast_write32(ast, 0xf000, 0x1);
- mcr_cfg = ast_read32(ast, 0x10004);
- mcr_scu_mpll = ast_read32(ast, 0x10120);
- mcr_scu_strap = ast_read32(ast, 0x10170);
- break;
- case ast_use_defaults:
- default:
- ast->dram_bus_width = 16;
- ast->dram_type = AST_DRAM_1Gx16;
- if (IS_AST_GEN6(ast))
- ast->mclk = 800;
- else
- ast->mclk = 396;
- return 0;
- }
-
- if (mcr_cfg & 0x40)
- ast->dram_bus_width = 16;
- else
- ast->dram_bus_width = 32;
-
- if (IS_AST_GEN6(ast)) {
- switch (mcr_cfg & 0x03) {
- case 0:
- ast->dram_type = AST_DRAM_1Gx16;
- break;
- default:
- case 1:
- ast->dram_type = AST_DRAM_2Gx16;
- break;
- case 2:
- ast->dram_type = AST_DRAM_4Gx16;
- break;
- case 3:
- ast->dram_type = AST_DRAM_8Gx16;
- break;
- }
- } else if (IS_AST_GEN4(ast) || IS_AST_GEN5(ast)) {
- switch (mcr_cfg & 0x03) {
- case 0:
- ast->dram_type = AST_DRAM_512Mx16;
- break;
- default:
- case 1:
- ast->dram_type = AST_DRAM_1Gx16;
- break;
- case 2:
- ast->dram_type = AST_DRAM_2Gx16;
- break;
- case 3:
- ast->dram_type = AST_DRAM_4Gx16;
- break;
- }
- } else {
- switch (mcr_cfg & 0x0c) {
- case 0:
- case 4:
- ast->dram_type = AST_DRAM_512Mx16;
- break;
- case 8:
- if (mcr_cfg & 0x40)
- ast->dram_type = AST_DRAM_1Gx16;
- else
- ast->dram_type = AST_DRAM_512Mx32;
- break;
- case 0xc:
- ast->dram_type = AST_DRAM_1Gx32;
- break;
- }
- }
-
- if (mcr_scu_strap & 0x2000)
- ref_pll = 14318;
- else
- ref_pll = 12000;
-
- denum = mcr_scu_mpll & 0x1f;
- num = (mcr_scu_mpll & 0x3fe0) >> 5;
- dsel = (mcr_scu_mpll & 0xc000) >> 14;
- switch (dsel) {
- case 3:
- div = 0x4;
- break;
- case 2:
- case 1:
- div = 0x2;
- break;
- default:
- div = 0x1;
- break;
- }
- ast->mclk = ref_pll * (num + 2) / ((denum + 2) * (div * 1000));
- return 0;
-}
-
struct drm_device *ast_device_create(struct pci_dev *pdev,
const struct drm_driver *drv,
enum ast_chip chip,
@@ -352,12 +232,6 @@ struct drm_device *ast_device_create(struct pci_dev *pdev,
ast->regs = regs;
ast->ioregs = ioregs;
- ret = ast_get_dram_info(ast);
- if (ret)
- return ERR_PTR(ret);
- drm_info(dev, "dram MCLK=%u Mhz type=%d bus_width=%d\n",
- ast->mclk, ast->dram_type, ast->dram_bus_width);
-
ast_detect_tx_chip(ast, need_post);
switch (ast->tx_chip) {
case AST_TX_ASTDP:
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index b9e0ca85226a..a250afd8d662 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -120,8 +120,8 @@ config DRM_ITE_IT6505
select DRM_DISPLAY_DP_AUX_BUS
select DRM_KMS_HELPER
select EXTCON
- select CRYPTO
- select CRYPTO_HASH
+ select CRYPTO_LIB_SHA1
+ select REGMAP_I2C
help
ITE IT6505 DisplayPort bridge chip driver.
@@ -316,6 +316,19 @@ config DRM_SIMPLE_BRIDGE
Support for non-programmable DRM bridges, such as ADI ADV7123, TI
THS8134 and THS8135 or passive resistor ladder DACs.
+config DRM_SOLOMON_SSD2825
+ tristate "SSD2825 RGB/DSI bridge"
+ depends on SPI_MASTER && OF
+ select DRM_MIPI_DSI
+ select DRM_KMS_HELPER
+ select DRM_PANEL
+ help
+ Say Y here if you want support for the Solomon SSD2825 RGB/DSI
+ SPI bridge driver.
+
+ Say M here if you want to support this hardware as a module.
+ The module will be named "ssd2825".
+
config DRM_THINE_THC63LVD1024
tristate "Thine THC63LVD1024 LVDS decoder bridge"
depends on OF
@@ -438,6 +451,18 @@ config DRM_TI_TPD12S015
Texas Instruments TPD12S015 HDMI level shifter and ESD protection
driver.
+config DRM_WAVESHARE_BRIDGE
+ tristate "Waveshare DSI bridge"
+ depends on OF
+ depends on BACKLIGHT_CLASS_DEVICE
+ select DRM_PANEL_BRIDGE
+ select DRM_KMS_HELPER
+ select DRM_MIPI_DSI
+ select REGMAP_I2C
+ help
+ Driver for waveshare DSI to DPI bridge board.
+ Please say Y if you have such hardware
+
source "drivers/gpu/drm/bridge/analogix/Kconfig"
source "drivers/gpu/drm/bridge/adv7511/Kconfig"
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index 245e8a27e3fc..c7dc03182e59 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_DRM_SIL_SII8620) += sil-sii8620.o
obj-$(CONFIG_DRM_SII902X) += sii902x.o
obj-$(CONFIG_DRM_SII9234) += sii9234.o
obj-$(CONFIG_DRM_SIMPLE_BRIDGE) += simple-bridge.o
+obj-$(CONFIG_DRM_SOLOMON_SSD2825) += ssd2825.o
obj-$(CONFIG_DRM_THINE_THC63LVD1024) += thc63lvd1024.o
obj-$(CONFIG_DRM_TOSHIBA_TC358762) += tc358762.o
obj-$(CONFIG_DRM_TOSHIBA_TC358764) += tc358764.o
@@ -40,6 +41,7 @@ obj-$(CONFIG_DRM_TI_SN65DSI86) += ti-sn65dsi86.o
obj-$(CONFIG_DRM_TI_TDP158) += ti-tdp158.o
obj-$(CONFIG_DRM_TI_TFP410) += ti-tfp410.o
obj-$(CONFIG_DRM_TI_TPD12S015) += ti-tpd12s015.o
+obj-$(CONFIG_DRM_WAVESHARE_BRIDGE) += waveshare-dsi.o
obj-$(CONFIG_DRM_NWL_MIPI_DSI) += nwl-dsi.o
obj-$(CONFIG_DRM_ITE_IT66121) += ite-it66121.o
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h
index 85ebead9809c..8be7266fd4f4 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511.h
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h
@@ -195,13 +195,14 @@
#define ADV7511_I2S_IEC958_DIRECT 3
#define ADV7511_PACKET(p, x) ((p) * 0x20 + (x))
-#define ADV7511_PACKET_SDP(x) ADV7511_PACKET(0, x)
+#define ADV7511_PACKET_SPD(x) ADV7511_PACKET(0, x)
#define ADV7511_PACKET_MPEG(x) ADV7511_PACKET(1, x)
#define ADV7511_PACKET_ACP(x) ADV7511_PACKET(2, x)
#define ADV7511_PACKET_ISRC1(x) ADV7511_PACKET(3, x)
#define ADV7511_PACKET_ISRC2(x) ADV7511_PACKET(4, x)
#define ADV7511_PACKET_GM(x) ADV7511_PACKET(5, x)
-#define ADV7511_PACKET_SPARE(x) ADV7511_PACKET(6, x)
+#define ADV7511_PACKET_SPARE1(x) ADV7511_PACKET(6, x)
+#define ADV7511_PACKET_SPARE2(x) ADV7511_PACKET(7, x)
#define ADV7511_REG_CEC_TX_FRAME_HDR 0x00
#define ADV7511_REG_CEC_TX_FRAME_DATA0 0x01
@@ -348,6 +349,7 @@ struct adv7511 {
struct i2c_client *i2c_cec;
struct regmap *regmap;
+ struct regmap *regmap_packet;
struct regmap *regmap_cec;
enum drm_connector_status status;
bool powered;
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
index 766b1c96bc88..87e7e820810a 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
@@ -12,6 +12,8 @@
#include <sound/soc.h>
#include <linux/of_graph.h>
+#include <drm/display/drm_hdmi_state_helper.h>
+
#include "adv7511.h"
static void adv7511_calc_cts_n(unsigned int f_tmds, unsigned int fs,
@@ -155,17 +157,8 @@ int adv7511_hdmi_audio_prepare(struct drm_bridge *bridge,
regmap_update_bits(adv7511->regmap, ADV7511_REG_I2C_FREQ_ID_CFG,
ADV7511_I2C_FREQ_ID_CFG_RATE_MASK, rate << 4);
- /* send current Audio infoframe values while updating */
- regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE,
- BIT(5), BIT(5));
-
- regmap_write(adv7511->regmap, ADV7511_REG_AUDIO_INFOFRAME(0), 0x1);
-
- /* use Audio infoframe updated info */
- regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE,
- BIT(5), 0);
-
- return 0;
+ return drm_atomic_helper_connector_hdmi_update_audio_infoframe(connector,
+ &hparms->cea);
}
int adv7511_hdmi_audio_startup(struct drm_bridge *bridge,
@@ -188,15 +181,9 @@ int adv7511_hdmi_audio_startup(struct drm_bridge *bridge,
/* not copyrighted */
regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CFG1,
BIT(5), BIT(5));
- /* enable audio infoframes */
- regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE1,
- BIT(3), BIT(3));
/* AV mute disable */
regmap_update_bits(adv7511->regmap, ADV7511_REG_GC(0),
BIT(7) | BIT(6), BIT(7));
- /* use Audio infoframe updated info */
- regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE,
- BIT(5), 0);
/* enable SPDIF receiver */
if (adv7511->audio_source == ADV7511_AUDIO_SOURCE_SPDIF)
@@ -214,4 +201,6 @@ void adv7511_hdmi_audio_shutdown(struct drm_bridge *bridge,
if (adv7511->audio_source == ADV7511_AUDIO_SOURCE_SPDIF)
regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG,
BIT(7), 0);
+
+ drm_atomic_helper_connector_hdmi_clear_audio_infoframe(connector);
}
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index 00d6417c177b..b9be86541307 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -132,6 +132,13 @@ static const struct regmap_config adv7511_regmap_config = {
.volatile_reg = adv7511_register_volatile,
};
+static const struct regmap_config adv7511_packet_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = 0xff,
+};
+
/* -----------------------------------------------------------------------------
* Hardware configuration
*/
@@ -886,9 +893,18 @@ static int adv7511_bridge_hdmi_clear_infoframe(struct drm_bridge *bridge,
struct adv7511 *adv7511 = bridge_to_adv7511(bridge);
switch (type) {
+ case HDMI_INFOFRAME_TYPE_AUDIO:
+ adv7511_packet_disable(adv7511, ADV7511_PACKET_ENABLE_AUDIO_INFOFRAME);
+ break;
case HDMI_INFOFRAME_TYPE_AVI:
adv7511_packet_disable(adv7511, ADV7511_PACKET_ENABLE_AVI_INFOFRAME);
break;
+ case HDMI_INFOFRAME_TYPE_SPD:
+ adv7511_packet_disable(adv7511, ADV7511_PACKET_ENABLE_SPD);
+ break;
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ adv7511_packet_disable(adv7511, ADV7511_PACKET_ENABLE_SPARE1);
+ break;
default:
drm_dbg_driver(adv7511->bridge.dev, "Unsupported HDMI InfoFrame %x\n", type);
break;
@@ -903,16 +919,52 @@ static int adv7511_bridge_hdmi_write_infoframe(struct drm_bridge *bridge,
{
struct adv7511 *adv7511 = bridge_to_adv7511(bridge);
- adv7511_bridge_hdmi_clear_infoframe(bridge, type);
-
switch (type) {
+ case HDMI_INFOFRAME_TYPE_AUDIO:
+ /* send current Audio infoframe values while updating */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE,
+ BIT(5), BIT(5));
+
+ /* The Audio infoframe id is not configurable */
+ regmap_bulk_write(adv7511->regmap, ADV7511_REG_AUDIO_INFOFRAME_VERSION,
+ buffer + 1, len - 1);
+
+ /* use Audio infoframe updated info */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE,
+ BIT(5), 0);
+
+ adv7511_packet_enable(adv7511, ADV7511_PACKET_ENABLE_AUDIO_INFOFRAME);
+ break;
case HDMI_INFOFRAME_TYPE_AVI:
+ /* send current AVI infoframe values while updating */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE,
+ BIT(6), BIT(6));
+
/* The AVI infoframe id is not configurable */
regmap_bulk_write(adv7511->regmap, ADV7511_REG_AVI_INFOFRAME_VERSION,
buffer + 1, len - 1);
+ regmap_write(adv7511->regmap, ADV7511_REG_AUDIO_INFOFRAME_LENGTH, 0x2);
+ regmap_write(adv7511->regmap, ADV7511_REG_AUDIO_INFOFRAME(1), 0x1);
+
+ /* use AVI infoframe updated info */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE,
+ BIT(6), 0);
+
adv7511_packet_enable(adv7511, ADV7511_PACKET_ENABLE_AVI_INFOFRAME);
break;
+ case HDMI_INFOFRAME_TYPE_SPD:
+ adv7511_packet_disable(adv7511, ADV7511_PACKET_ENABLE_SPD);
+ regmap_bulk_write(adv7511->regmap_packet, ADV7511_PACKET_SPD(0),
+ buffer, len);
+ adv7511_packet_enable(adv7511, ADV7511_PACKET_ENABLE_SPD);
+ break;
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ adv7511_packet_disable(adv7511, ADV7511_PACKET_ENABLE_SPARE1);
+ regmap_bulk_write(adv7511->regmap_packet, ADV7511_PACKET_SPARE1(0),
+ buffer, len);
+ adv7511_packet_enable(adv7511, ADV7511_PACKET_ENABLE_SPARE1);
+ break;
default:
drm_dbg_driver(adv7511->bridge.dev, "Unsupported HDMI InfoFrame %x\n", type);
break;
@@ -1242,6 +1294,13 @@ static int adv7511_probe(struct i2c_client *i2c)
goto err_i2c_unregister_edid;
}
+ adv7511->regmap_packet = devm_regmap_init_i2c(adv7511->i2c_packet,
+ &adv7511_packet_config);
+ if (IS_ERR(adv7511->regmap_packet)) {
+ ret = PTR_ERR(adv7511->regmap_packet);
+ goto err_i2c_unregister_packet;
+ }
+
regmap_write(adv7511->regmap, ADV7511_REG_PACKET_I2C_ADDR,
adv7511->i2c_packet->addr << 1);
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index ed35e567d117..efe534977d12 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -1474,8 +1474,8 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data)
dp = devm_drm_bridge_alloc(dev, struct analogix_dp_device, bridge,
&analogix_dp_bridge_funcs);
- if (!dp)
- return ERR_PTR(-ENOMEM);
+ if (IS_ERR(dp))
+ return ERR_CAST(dp);
dp->dev = &pdev->dev;
dp->dpms_mode = DRM_MODE_DPMS_OFF;
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
index c0ad8f59e483..6f3fdcb6afdb 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
@@ -2604,6 +2604,7 @@ static int anx7625_link_bridge(struct drm_dp_aux *aux)
platform->bridge.type = platform->pdata.panel_bridge ?
DRM_MODE_CONNECTOR_eDP :
DRM_MODE_CONNECTOR_DisplayPort;
+ platform->bridge.support_hdcp = true;
drm_bridge_add(&platform->bridge);
@@ -2677,7 +2678,7 @@ static int anx7625_i2c_probe(struct i2c_client *client)
ret = devm_request_threaded_irq(dev, platform->pdata.intp_irq,
NULL, anx7625_intr_hpd_isr,
IRQF_TRIGGER_FALLING |
- IRQF_ONESHOT,
+ IRQF_ONESHOT | IRQF_NO_AUTOEN,
"anx7625-intp", platform);
if (ret) {
DRM_DEV_ERROR(dev, "fail to request irq\n");
@@ -2746,8 +2747,10 @@ static int anx7625_i2c_probe(struct i2c_client *client)
}
/* Add work function */
- if (platform->pdata.intp_irq)
+ if (platform->pdata.intp_irq) {
+ enable_irq(platform->pdata.intp_irq);
queue_work(platform->workqueue, &platform->work);
+ }
if (platform->pdata.audio_en)
anx7625_register_audio(dev, platform);
diff --git a/drivers/gpu/drm/bridge/aux-bridge.c b/drivers/gpu/drm/bridge/aux-bridge.c
index b63304d3a80f..b3e4cdff61d6 100644
--- a/drivers/gpu/drm/bridge/aux-bridge.c
+++ b/drivers/gpu/drm/bridge/aux-bridge.c
@@ -18,6 +18,7 @@ static void drm_aux_bridge_release(struct device *dev)
{
struct auxiliary_device *adev = to_auxiliary_dev(dev);
+ of_node_put(dev->of_node);
ida_free(&drm_aux_bridge_ida, adev->id);
kfree(adev);
@@ -65,6 +66,7 @@ int drm_aux_bridge_register(struct device *parent)
ret = auxiliary_device_init(adev);
if (ret) {
+ of_node_put(adev->dev.of_node);
ida_free(&drm_aux_bridge_ida, adev->id);
kfree(adev);
return ret;
diff --git a/drivers/gpu/drm/bridge/cadence/Kconfig b/drivers/gpu/drm/bridge/cadence/Kconfig
index cced81633ddc..f1d8a8a151d8 100644
--- a/drivers/gpu/drm/bridge/cadence/Kconfig
+++ b/drivers/gpu/drm/bridge/cadence/Kconfig
@@ -6,6 +6,7 @@ config DRM_CDNS_DSI
select DRM_PANEL_BRIDGE
select GENERIC_PHY
select GENERIC_PHY_MIPI_DPHY
+ select VIDEOMODE_HELPERS
depends on OF
help
Support Cadence DPI to DSI bridge. This is an internal
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c
index a57ca8c3bdae..09b289f0fcbf 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c
@@ -9,6 +9,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_probe_helper.h>
#include <video/mipi_display.h>
+#include <video/videomode.h>
#include <linux/clk.h>
#include <linux/interrupt.h>
@@ -417,7 +418,8 @@
#define DSI_OUTPUT_PORT 0
#define DSI_INPUT_PORT(inputid) (1 + (inputid))
-#define DSI_HBP_FRAME_OVERHEAD 12
+#define DSI_HBP_FRAME_PULSE_OVERHEAD 12
+#define DSI_HBP_FRAME_EVENT_OVERHEAD 16
#define DSI_HSA_FRAME_OVERHEAD 14
#define DSI_HFP_FRAME_OVERHEAD 6
#define DSI_HSS_VSS_VSE_FRAME_OVERHEAD 4
@@ -452,15 +454,6 @@ bridge_to_cdns_dsi_input(struct drm_bridge *bridge)
return container_of(bridge, struct cdns_dsi_input, bridge);
}
-static unsigned int mode_to_dpi_hfp(const struct drm_display_mode *mode,
- bool mode_valid_check)
-{
- if (mode_valid_check)
- return mode->hsync_start - mode->hdisplay;
-
- return mode->crtc_hsync_start - mode->crtc_hdisplay;
-}
-
static unsigned int dpi_to_dsi_timing(unsigned int dpi_timing,
unsigned int dpi_bpp,
unsigned int dsi_pkt_overhead)
@@ -476,145 +469,77 @@ static unsigned int dpi_to_dsi_timing(unsigned int dpi_timing,
}
static int cdns_dsi_mode2cfg(struct cdns_dsi *dsi,
- const struct drm_display_mode *mode,
- struct cdns_dsi_cfg *dsi_cfg,
- bool mode_valid_check)
+ const struct videomode *vm,
+ struct cdns_dsi_cfg *dsi_cfg)
{
struct cdns_dsi_output *output = &dsi->output;
- unsigned int tmp;
- bool sync_pulse = false;
+ u32 dpi_hsa, dpi_hbp, dpi_hfp, dpi_hact;
+ bool sync_pulse;
int bpp;
+ dpi_hsa = vm->hsync_len;
+ dpi_hbp = vm->hback_porch;
+ dpi_hfp = vm->hfront_porch;
+ dpi_hact = vm->hactive;
+
memset(dsi_cfg, 0, sizeof(*dsi_cfg));
- if (output->dev->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
- sync_pulse = true;
+ sync_pulse = output->dev->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE;
bpp = mipi_dsi_pixel_format_to_bpp(output->dev->format);
- if (mode_valid_check)
- tmp = mode->htotal -
- (sync_pulse ? mode->hsync_end : mode->hsync_start);
- else
- tmp = mode->crtc_htotal -
- (sync_pulse ?
- mode->crtc_hsync_end : mode->crtc_hsync_start);
-
- dsi_cfg->hbp = dpi_to_dsi_timing(tmp, bpp, DSI_HBP_FRAME_OVERHEAD);
-
if (sync_pulse) {
- if (mode_valid_check)
- tmp = mode->hsync_end - mode->hsync_start;
- else
- tmp = mode->crtc_hsync_end - mode->crtc_hsync_start;
+ dsi_cfg->hbp = dpi_to_dsi_timing(dpi_hbp, bpp,
+ DSI_HBP_FRAME_PULSE_OVERHEAD);
- dsi_cfg->hsa = dpi_to_dsi_timing(tmp, bpp,
+ dsi_cfg->hsa = dpi_to_dsi_timing(dpi_hsa, bpp,
DSI_HSA_FRAME_OVERHEAD);
- }
-
- dsi_cfg->hact = dpi_to_dsi_timing(mode_valid_check ?
- mode->hdisplay : mode->crtc_hdisplay,
- bpp, 0);
- dsi_cfg->hfp = dpi_to_dsi_timing(mode_to_dpi_hfp(mode, mode_valid_check),
- bpp, DSI_HFP_FRAME_OVERHEAD);
-
- return 0;
-}
-
-static int cdns_dsi_adjust_phy_config(struct cdns_dsi *dsi,
- struct cdns_dsi_cfg *dsi_cfg,
- struct phy_configure_opts_mipi_dphy *phy_cfg,
- const struct drm_display_mode *mode,
- bool mode_valid_check)
-{
- struct cdns_dsi_output *output = &dsi->output;
- unsigned long long dlane_bps;
- unsigned long adj_dsi_htotal;
- unsigned long dsi_htotal;
- unsigned long dpi_htotal;
- unsigned long dpi_hz;
- unsigned int dsi_hfp_ext;
- unsigned int lanes = output->dev->lanes;
-
- dsi_htotal = dsi_cfg->hbp + DSI_HBP_FRAME_OVERHEAD;
- if (output->dev->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
- dsi_htotal += dsi_cfg->hsa + DSI_HSA_FRAME_OVERHEAD;
+ } else {
+ dsi_cfg->hbp = dpi_to_dsi_timing(dpi_hbp + dpi_hsa, bpp,
+ DSI_HBP_FRAME_EVENT_OVERHEAD);
- dsi_htotal += dsi_cfg->hact;
- dsi_htotal += dsi_cfg->hfp + DSI_HFP_FRAME_OVERHEAD;
-
- /*
- * Make sure DSI htotal is aligned on a lane boundary when calculating
- * the expected data rate. This is done by extending HFP in case of
- * misalignment.
- */
- adj_dsi_htotal = dsi_htotal;
- if (dsi_htotal % lanes)
- adj_dsi_htotal += lanes - (dsi_htotal % lanes);
+ dsi_cfg->hsa = 0;
+ }
- dpi_hz = (mode_valid_check ? mode->clock : mode->crtc_clock) * 1000;
- dlane_bps = (unsigned long long)dpi_hz * adj_dsi_htotal;
+ dsi_cfg->hact = dpi_to_dsi_timing(dpi_hact, bpp, 0);
- /* data rate in bytes/sec is not an integer, refuse the mode. */
- dpi_htotal = mode_valid_check ? mode->htotal : mode->crtc_htotal;
- if (do_div(dlane_bps, lanes * dpi_htotal))
- return -EINVAL;
+ dsi_cfg->hfp = dpi_to_dsi_timing(dpi_hfp, bpp, DSI_HFP_FRAME_OVERHEAD);
- /* data rate was in bytes/sec, convert to bits/sec. */
- phy_cfg->hs_clk_rate = dlane_bps * 8;
+ dsi_cfg->htotal = dsi_cfg->hact + dsi_cfg->hfp + DSI_HFP_FRAME_OVERHEAD;
- dsi_hfp_ext = adj_dsi_htotal - dsi_htotal;
- dsi_cfg->hfp += dsi_hfp_ext;
- dsi_cfg->htotal = dsi_htotal + dsi_hfp_ext;
+ if (sync_pulse) {
+ dsi_cfg->htotal += dsi_cfg->hbp + DSI_HBP_FRAME_PULSE_OVERHEAD;
+ dsi_cfg->htotal += dsi_cfg->hsa + DSI_HSA_FRAME_OVERHEAD;
+ } else {
+ dsi_cfg->htotal += dsi_cfg->hbp + DSI_HBP_FRAME_EVENT_OVERHEAD;
+ }
return 0;
}
static int cdns_dsi_check_conf(struct cdns_dsi *dsi,
- const struct drm_display_mode *mode,
- struct cdns_dsi_cfg *dsi_cfg,
- bool mode_valid_check)
+ const struct videomode *vm,
+ struct cdns_dsi_cfg *dsi_cfg)
{
struct cdns_dsi_output *output = &dsi->output;
struct phy_configure_opts_mipi_dphy *phy_cfg = &output->phy_opts.mipi_dphy;
- unsigned long dsi_hss_hsa_hse_hbp;
unsigned int nlanes = output->dev->lanes;
- int mode_clock = (mode_valid_check ? mode->clock : mode->crtc_clock);
int ret;
- ret = cdns_dsi_mode2cfg(dsi, mode, dsi_cfg, mode_valid_check);
+ ret = cdns_dsi_mode2cfg(dsi, vm, dsi_cfg);
if (ret)
return ret;
- ret = phy_mipi_dphy_get_default_config(mode_clock * 1000,
+ ret = phy_mipi_dphy_get_default_config(vm->pixelclock,
mipi_dsi_pixel_format_to_bpp(output->dev->format),
nlanes, phy_cfg);
if (ret)
return ret;
- ret = cdns_dsi_adjust_phy_config(dsi, dsi_cfg, phy_cfg, mode, mode_valid_check);
- if (ret)
- return ret;
-
ret = phy_validate(dsi->dphy, PHY_MODE_MIPI_DPHY, 0, &output->phy_opts);
if (ret)
return ret;
- dsi_hss_hsa_hse_hbp = dsi_cfg->hbp + DSI_HBP_FRAME_OVERHEAD;
- if (output->dev->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
- dsi_hss_hsa_hse_hbp += dsi_cfg->hsa + DSI_HSA_FRAME_OVERHEAD;
-
- /*
- * Make sure DPI(HFP) > DSI(HSS+HSA+HSE+HBP) to guarantee that the FIFO
- * is empty before we start a receiving a new line on the DPI
- * interface.
- */
- if ((u64)phy_cfg->hs_clk_rate *
- mode_to_dpi_hfp(mode, mode_valid_check) * nlanes <
- (u64)dsi_hss_hsa_hse_hbp *
- (mode_valid_check ? mode->clock : mode->crtc_clock) * 1000)
- return -EINVAL;
-
return 0;
}
@@ -644,8 +569,7 @@ cdns_dsi_bridge_mode_valid(struct drm_bridge *bridge,
struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
struct cdns_dsi *dsi = input_to_dsi(input);
struct cdns_dsi_output *output = &dsi->output;
- struct cdns_dsi_cfg dsi_cfg;
- int bpp, ret;
+ int bpp;
/*
* VFP_DSI should be less than VFP_DPI and VFP_DSI should be at
@@ -663,10 +587,6 @@ cdns_dsi_bridge_mode_valid(struct drm_bridge *bridge,
if ((mode->hdisplay * bpp) % 32)
return MODE_H_ILLEGAL;
- ret = cdns_dsi_check_conf(dsi, mode, &dsi_cfg, true);
- if (ret)
- return MODE_BAD;
-
return MODE_OK;
}
@@ -882,7 +802,13 @@ static void cdns_dsi_bridge_atomic_pre_enable(struct drm_bridge *bridge,
tx_byte_period = DIV_ROUND_DOWN_ULL((u64)NSEC_PER_SEC * 8,
phy_cfg->hs_clk_rate);
- reg_wakeup = (phy_cfg->hs_prepare + phy_cfg->hs_zero) / tx_byte_period;
+
+ /*
+ * Estimated time [in clock cycles] to perform LP->HS on D-PHY.
+ * It is not clear how to calculate this, so for now,
+ * set it to 1/10 of the total number of clocks in a line.
+ */
+ reg_wakeup = dsi_cfg.htotal / nlanes / 10;
writel(REG_WAKEUP_TIME(reg_wakeup) | REG_LINE_DURATION(tmp),
dsi->regs + VID_DPHY_TIME);
@@ -989,6 +915,28 @@ static u32 *cdns_dsi_bridge_get_input_bus_fmts(struct drm_bridge *bridge,
return input_fmts;
}
+static long cdns_dsi_round_pclk(struct cdns_dsi *dsi, unsigned long pclk)
+{
+ struct cdns_dsi_output *output = &dsi->output;
+ unsigned int nlanes = output->dev->lanes;
+ union phy_configure_opts phy_opts = { 0 };
+ u32 bitspp;
+ int ret;
+
+ bitspp = mipi_dsi_pixel_format_to_bpp(output->dev->format);
+
+ ret = phy_mipi_dphy_get_default_config(pclk, bitspp, nlanes,
+ &phy_opts.mipi_dphy);
+ if (ret)
+ return ret;
+
+ ret = phy_validate(dsi->dphy, PHY_MODE_MIPI_DPHY, 0, &phy_opts);
+ if (ret)
+ return ret;
+
+ return div_u64((u64)phy_opts.mipi_dphy.hs_clk_rate * nlanes, bitspp);
+}
+
static int cdns_dsi_bridge_atomic_check(struct drm_bridge *bridge,
struct drm_bridge_state *bridge_state,
struct drm_crtc_state *crtc_state,
@@ -997,10 +945,32 @@ static int cdns_dsi_bridge_atomic_check(struct drm_bridge *bridge,
struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
struct cdns_dsi *dsi = input_to_dsi(input);
struct cdns_dsi_bridge_state *dsi_state = to_cdns_dsi_bridge_state(bridge_state);
- const struct drm_display_mode *mode = &crtc_state->mode;
+ struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
struct cdns_dsi_cfg *dsi_cfg = &dsi_state->dsi_cfg;
+ struct videomode vm;
+ long pclk;
+
+ /* cdns-dsi requires negative syncs */
+ adjusted_mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
+ adjusted_mode->flags |= DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC;
- return cdns_dsi_check_conf(dsi, mode, dsi_cfg, false);
+ /*
+ * The DPHY PLL has quite a coarsely grained clock rate options. See
+ * what hsclk rate we can achieve based on the pixel clock, convert it
+ * back to pixel clock, set that to the adjusted_mode->clock. This is
+ * all in hopes that the CRTC will be able to provide us the requested
+ * clock, as otherwise the DPI and DSI clocks will be out of sync.
+ */
+
+ pclk = cdns_dsi_round_pclk(dsi, adjusted_mode->clock * 1000);
+ if (pclk < 0)
+ return (int)pclk;
+
+ adjusted_mode->clock = pclk / 1000;
+
+ drm_display_mode_to_videomode(adjusted_mode, &vm);
+
+ return cdns_dsi_check_conf(dsi, &vm, dsi_cfg);
}
static struct drm_bridge_state *
@@ -1082,10 +1052,6 @@ static int cdns_dsi_attach(struct mipi_dsi_host *host,
if (output->dev)
return -EBUSY;
- /* We do not support burst mode yet. */
- if (dev->mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
- return -ENOTSUPP;
-
/*
* The host <-> device link might be described using an OF-graph
* representation, in this case we extract the device of_node from
@@ -1442,4 +1408,3 @@ MODULE_AUTHOR("Boris Brezillon <boris.brezillon@bootlin.com>");
MODULE_DESCRIPTION("Cadence DSI driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:cdns-dsi");
-
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
index a614d1384f71..38726ae1bf15 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
@@ -1984,8 +1984,10 @@ static void cdns_mhdp_atomic_enable(struct drm_bridge *bridge,
mhdp_state = to_cdns_mhdp_bridge_state(new_state);
mhdp_state->current_mode = drm_mode_duplicate(bridge->dev, mode);
- if (!mhdp_state->current_mode)
- return;
+ if (!mhdp_state->current_mode) {
+ ret = -EINVAL;
+ goto out;
+ }
drm_mode_set_name(mhdp_state->current_mode);
diff --git a/drivers/gpu/drm/bridge/display-connector.c b/drivers/gpu/drm/bridge/display-connector.c
index 52b7b5889e6f..e9f16dbc9535 100644
--- a/drivers/gpu/drm/bridge/display-connector.c
+++ b/drivers/gpu/drm/bridge/display-connector.c
@@ -108,7 +108,7 @@ static u32 *display_connector_get_output_bus_fmts(struct drm_bridge *bridge,
struct drm_connector_state *conn_state,
unsigned int *num_output_fmts)
{
- struct drm_bridge *prev_bridge = drm_bridge_get_prev_bridge(bridge);
+ struct drm_bridge *prev_bridge __free(drm_bridge_put) = drm_bridge_get_prev_bridge(bridge);
struct drm_bridge_state *prev_bridge_state;
if (!prev_bridge || !prev_bridge->funcs->atomic_get_output_bus_fmts) {
@@ -151,7 +151,7 @@ static u32 *display_connector_get_input_bus_fmts(struct drm_bridge *bridge,
u32 output_fmt,
unsigned int *num_input_fmts)
{
- struct drm_bridge *prev_bridge = drm_bridge_get_prev_bridge(bridge);
+ struct drm_bridge *prev_bridge __free(drm_bridge_put) = drm_bridge_get_prev_bridge(bridge);
struct drm_bridge_state *prev_bridge_state;
if (!prev_bridge || !prev_bridge->funcs->atomic_get_input_bus_fmts) {
@@ -373,7 +373,8 @@ static int display_connector_probe(struct platform_device *pdev)
if (conn->bridge.ddc)
conn->bridge.ops |= DRM_BRIDGE_OP_EDID
| DRM_BRIDGE_OP_DETECT;
- if (conn->hpd_gpio)
+ /* Detecting the monitor requires reading DPCD */
+ if (conn->hpd_gpio && type != DRM_MODE_CONNECTOR_DisplayPort)
conn->bridge.ops |= DRM_BRIDGE_OP_DETECT;
if (conn->hpd_irq >= 0)
conn->bridge.ops |= DRM_BRIDGE_OP_HPD;
diff --git a/drivers/gpu/drm/bridge/imx/imx93-mipi-dsi.c b/drivers/gpu/drm/bridge/imx/imx93-mipi-dsi.c
index bea8346515b8..8f7a0d46601a 100644
--- a/drivers/gpu/drm/bridge/imx/imx93-mipi-dsi.c
+++ b/drivers/gpu/drm/bridge/imx/imx93-mipi-dsi.c
@@ -492,14 +492,12 @@ static int imx93_dsi_get_phy_configure_opts(struct imx93_dsi *dsi,
static enum drm_mode_status
imx93_dsi_validate_mode(struct imx93_dsi *dsi, const struct drm_display_mode *mode)
{
- struct drm_bridge *bridge = dw_mipi_dsi_get_bridge(dsi->dmd);
+ struct drm_bridge *dmd_bridge = dw_mipi_dsi_get_bridge(dsi->dmd);
+ struct drm_bridge *last_bridge __free(drm_bridge_put) =
+ drm_bridge_chain_get_last_bridge(dmd_bridge->encoder);
- /* Get the last bridge */
- while (drm_bridge_get_next_bridge(bridge))
- bridge = drm_bridge_get_next_bridge(bridge);
-
- if ((bridge->ops & DRM_BRIDGE_OP_DETECT) &&
- (bridge->ops & DRM_BRIDGE_OP_EDID)) {
+ if ((last_bridge->ops & DRM_BRIDGE_OP_DETECT) &&
+ (last_bridge->ops & DRM_BRIDGE_OP_EDID)) {
unsigned long pixel_clock_rate = mode->clock * 1000;
unsigned long rounded_rate;
diff --git a/drivers/gpu/drm/bridge/ite-it6263.c b/drivers/gpu/drm/bridge/ite-it6263.c
index cf813672b4ff..2eb8fba7016c 100644
--- a/drivers/gpu/drm/bridge/ite-it6263.c
+++ b/drivers/gpu/drm/bridge/ite-it6263.c
@@ -146,6 +146,7 @@
#define HDMI_COLOR_DEPTH_24 FIELD_PREP(HDMI_COLOR_DEPTH, 4)
#define HDMI_REG_PKT_GENERAL_CTRL 0xc6
+#define HDMI_REG_PKT_NULL_CTRL 0xc9
#define HDMI_REG_AVI_INFOFRM_CTRL 0xcd
#define ENABLE_PKT BIT(0)
#define REPEAT_PKT BIT(1)
@@ -154,6 +155,12 @@
* 3) HDMI register bank1: 0x130 ~ 0x1ff (HDMI packet registers)
*/
+/* NULL packet registers */
+/* Header Byte(HB): n = 0 ~ 2 */
+#define HDMI_REG_PKT_HB(n) (0x138 + (n))
+/* Packet Byte(PB): n = 0 ~ 27(HDMI_MAX_INFOFRAME_SIZE), n = 0 for checksum */
+#define HDMI_REG_PKT_PB(n) (0x13b + (n))
+
/* AVI packet registers */
#define HDMI_REG_AVI_DB1 0x158
#define HDMI_REG_AVI_DB2 0x159
@@ -224,7 +231,9 @@ static bool it6263_hdmi_writeable_reg(struct device *dev, unsigned int reg)
case HDMI_REG_HDMI_MODE:
case HDMI_REG_GCP:
case HDMI_REG_PKT_GENERAL_CTRL:
+ case HDMI_REG_PKT_NULL_CTRL:
case HDMI_REG_AVI_INFOFRM_CTRL:
+ case HDMI_REG_PKT_HB(0) ... HDMI_REG_PKT_PB(HDMI_MAX_INFOFRAME_SIZE):
case HDMI_REG_AVI_DB1:
case HDMI_REG_AVI_DB2:
case HDMI_REG_AVI_DB3:
@@ -755,10 +764,16 @@ static int it6263_hdmi_clear_infoframe(struct drm_bridge *bridge,
{
struct it6263 *it = bridge_to_it6263(bridge);
- if (type == HDMI_INFOFRAME_TYPE_AVI)
+ switch (type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
regmap_write(it->hdmi_regmap, HDMI_REG_AVI_INFOFRM_CTRL, 0);
- else
+ break;
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ regmap_write(it->hdmi_regmap, HDMI_REG_PKT_NULL_CTRL, 0);
+ break;
+ default:
dev_dbg(it->dev, "unsupported HDMI infoframe 0x%x\n", type);
+ }
return 0;
}
@@ -770,27 +785,36 @@ static int it6263_hdmi_write_infoframe(struct drm_bridge *bridge,
struct it6263 *it = bridge_to_it6263(bridge);
struct regmap *regmap = it->hdmi_regmap;
- if (type != HDMI_INFOFRAME_TYPE_AVI) {
+ switch (type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
+ /* write the first AVI infoframe data byte chunk(DB1-DB5) */
+ regmap_bulk_write(regmap, HDMI_REG_AVI_DB1,
+ &buffer[HDMI_INFOFRAME_HEADER_SIZE],
+ HDMI_AVI_DB_CHUNK1_SIZE);
+
+ /* write the second AVI infoframe data byte chunk(DB6-DB13) */
+ regmap_bulk_write(regmap, HDMI_REG_AVI_DB6,
+ &buffer[HDMI_INFOFRAME_HEADER_SIZE +
+ HDMI_AVI_DB_CHUNK1_SIZE],
+ HDMI_AVI_DB_CHUNK2_SIZE);
+
+ /* write checksum */
+ regmap_write(regmap, HDMI_REG_AVI_CSUM, buffer[3]);
+
+ regmap_write(regmap, HDMI_REG_AVI_INFOFRM_CTRL,
+ ENABLE_PKT | REPEAT_PKT);
+ break;
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ /* write header and payload */
+ regmap_bulk_write(regmap, HDMI_REG_PKT_HB(0), buffer, len);
+
+ regmap_write(regmap, HDMI_REG_PKT_NULL_CTRL,
+ ENABLE_PKT | REPEAT_PKT);
+ break;
+ default:
dev_dbg(it->dev, "unsupported HDMI infoframe 0x%x\n", type);
- return 0;
}
- /* write the first AVI infoframe data byte chunk(DB1-DB5) */
- regmap_bulk_write(regmap, HDMI_REG_AVI_DB1,
- &buffer[HDMI_INFOFRAME_HEADER_SIZE],
- HDMI_AVI_DB_CHUNK1_SIZE);
-
- /* write the second AVI infoframe data byte chunk(DB6-DB13) */
- regmap_bulk_write(regmap, HDMI_REG_AVI_DB6,
- &buffer[HDMI_INFOFRAME_HEADER_SIZE +
- HDMI_AVI_DB_CHUNK1_SIZE],
- HDMI_AVI_DB_CHUNK2_SIZE);
-
- /* write checksum */
- regmap_write(regmap, HDMI_REG_AVI_CSUM, buffer[3]);
-
- regmap_write(regmap, HDMI_REG_AVI_INFOFRM_CTRL, ENABLE_PKT | REPEAT_PKT);
-
return 0;
}
diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c
index 89649c17ffad..a094803ba7aa 100644
--- a/drivers/gpu/drm/bridge/ite-it6505.c
+++ b/drivers/gpu/drm/bridge/ite-it6505.c
@@ -21,7 +21,7 @@
#include <linux/wait.h>
#include <linux/bitfield.h>
-#include <crypto/hash.h>
+#include <crypto/sha1.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/display/drm_hdcp_helper.h>
@@ -2107,35 +2107,6 @@ static void it6505_hdcp_part1_auth(struct it6505 *it6505)
it6505->hdcp_status = HDCP_AUTH_GOING;
}
-static int it6505_sha1_digest(struct it6505 *it6505, u8 *sha1_input,
- unsigned int size, u8 *output_av)
-{
- struct shash_desc *desc;
- struct crypto_shash *tfm;
- int err;
- struct device *dev = it6505->dev;
-
- tfm = crypto_alloc_shash("sha1", 0, 0);
- if (IS_ERR(tfm)) {
- dev_err(dev, "crypto_alloc_shash sha1 failed");
- return PTR_ERR(tfm);
- }
- desc = kzalloc(sizeof(*desc) + crypto_shash_descsize(tfm), GFP_KERNEL);
- if (!desc) {
- crypto_free_shash(tfm);
- return -ENOMEM;
- }
-
- desc->tfm = tfm;
- err = crypto_shash_digest(desc, sha1_input, size, output_av);
- if (err)
- dev_err(dev, "crypto_shash_digest sha1 failed");
-
- crypto_free_shash(tfm);
- kfree(desc);
- return err;
-}
-
static int it6505_setup_sha1_input(struct it6505 *it6505, u8 *sha1_input)
{
struct device *dev = it6505->dev;
@@ -2205,7 +2176,7 @@ static bool it6505_hdcp_part2_ksvlist_check(struct it6505 *it6505)
return false;
}
- it6505_sha1_digest(it6505, it6505->sha1_input, i, (u8 *)av);
+ sha1(it6505->sha1_input, i, (u8 *)av);
/*1B-05 V' must retry 3 times */
for (retry = 0; retry < 3; retry++) {
err = it6505_get_dpcd(it6505, DP_AUX_HDCP_V_PRIME(0), (u8 *)bv,
diff --git a/drivers/gpu/drm/bridge/samsung-dsim.c b/drivers/gpu/drm/bridge/samsung-dsim.c
index b5dd71f6a990..eabc4c32f6ab 100644
--- a/drivers/gpu/drm/bridge/samsung-dsim.c
+++ b/drivers/gpu/drm/bridge/samsung-dsim.c
@@ -31,11 +31,10 @@
/* returns true iff both arguments logically differs */
#define NEQV(a, b) (!(a) ^ !(b))
-/* DSIM_STATUS */
+/* DSIM_STATUS or DSIM_DPHY_STATUS */
#define DSIM_STOP_STATE_DAT(x) (((x) & 0xf) << 0)
#define DSIM_STOP_STATE_CLK BIT(8)
#define DSIM_TX_READY_HS_CLK BIT(10)
-#define DSIM_PLL_STABLE BIT(31)
/* DSIM_SWRST */
#define DSIM_FUNCRST BIT(16)
@@ -46,17 +45,13 @@
#define DSIM_BTA_TIMEOUT(x) ((x) << 16)
/* DSIM_CLKCTRL */
-#define DSIM_ESC_PRESCALER(x) (((x) & 0xffff) << 0)
-#define DSIM_ESC_PRESCALER_MASK (0xffff << 0)
-#define DSIM_LANE_ESC_CLK_EN_CLK BIT(19)
-#define DSIM_LANE_ESC_CLK_EN_DATA(x) (((x) & 0xf) << 20)
-#define DSIM_LANE_ESC_CLK_EN_DATA_MASK (0xf << 20)
-#define DSIM_BYTE_CLKEN BIT(24)
-#define DSIM_BYTE_CLK_SRC(x) (((x) & 0x3) << 25)
-#define DSIM_BYTE_CLK_SRC_MASK (0x3 << 25)
-#define DSIM_PLL_BYPASS BIT(27)
-#define DSIM_ESC_CLKEN BIT(28)
-#define DSIM_TX_REQUEST_HSCLK BIT(31)
+#define DSIM_ESC_PRESCALER(x) (((x) & 0xffff) << 0)
+#define DSIM_ESC_PRESCALER_MASK (0xffff << 0)
+#define DSIM_LANE_ESC_CLK_EN_DATA(x, offset) (((x) & 0xf) << offset)
+#define DSIM_LANE_ESC_CLK_EN_DATA_MASK(offset) (0xf << offset)
+#define DSIM_BYTE_CLK_SRC(x) (((x) & 0x3) << 25)
+#define DSIM_BYTE_CLK_SRC_MASK (0x3 << 25)
+#define DSIM_PLL_BYPASS BIT(27)
/* DSIM_CONFIG */
#define DSIM_LANE_EN_CLK BIT(0)
@@ -91,7 +86,6 @@
*/
#define DSIM_HSE_DISABLE_MODE BIT(23)
#define DSIM_AUTO_MODE BIT(24)
-#define DSIM_VIDEO_MODE BIT(25)
#define DSIM_BURST_MODE BIT(26)
#define DSIM_SYNC_INFORM BIT(27)
#define DSIM_EOT_DISABLE BIT(28)
@@ -129,9 +123,9 @@
#define DSIM_MAIN_HBP_MASK ((0xffff) << 0)
/* DSIM_MSYNC */
-#define DSIM_MAIN_VSA(x) ((x) << 22)
+#define DSIM_MAIN_VSA(x, offset) ((x) << offset)
#define DSIM_MAIN_HSA(x) ((x) << 0)
-#define DSIM_MAIN_VSA_MASK ((0x3ff) << 22)
+#define DSIM_MAIN_VSA_MASK(offset) ((0x3ff) << offset)
#define DSIM_MAIN_HSA_MASK ((0xffff) << 0)
/* DSIM_SDRESOL */
@@ -157,6 +151,11 @@
#define DSIM_INT_RX_ECC_ERR BIT(15)
#define DSIM_INT_RX_CRC_ERR BIT(14)
+/* DSIM_SFRCTRL */
+#define DSIM_SFR_CTRL_STAND_BY BIT(4)
+#define DSIM_SFR_CTRL_SHADOW_UPDATE BIT(1)
+#define DSIM_SFR_CTRL_SHADOW_EN BIT(0)
+
/* DSIM_FIFOCTRL */
#define DSIM_RX_DATA_FULL BIT(25)
#define DSIM_RX_DATA_EMPTY BIT(24)
@@ -191,9 +190,7 @@
#define DSIM_PLL_DPDNSWAP_DAT (1 << 24)
#define DSIM_FREQ_BAND(x) ((x) << 24)
#define DSIM_PLL_EN BIT(23)
-#define DSIM_PLL_P(x, offset) ((x) << (offset))
-#define DSIM_PLL_M(x) ((x) << 4)
-#define DSIM_PLL_S(x) ((x) << 1)
+#define DSIM_PLL(x, offset) ((x) << (offset))
/* DSIM_PHYCTRL */
#define DSIM_PHYCTRL_ULPS_EXIT(x) (((x) & 0x1ff) << 0)
@@ -222,25 +219,42 @@
#define DSI_XFER_TIMEOUT_MS 100
#define DSI_RX_FIFO_EMPTY 0x30800002
-#define OLD_SCLK_MIPI_CLK_NAME "pll_clk"
-
#define PS_TO_CYCLE(ps, hz) DIV64_U64_ROUND_CLOSEST(((ps) * (hz)), 1000000000000ULL)
-static const char *const clk_names[5] = {
- "bus_clk",
- "sclk_mipi",
- "phyclk_mipidphy0_bitclkdiv8",
- "phyclk_mipidphy0_rxclkesc0",
- "sclk_rgb_vclk_to_dsim0"
-};
-
enum samsung_dsim_transfer_type {
EXYNOS_DSI_TX,
EXYNOS_DSI_RX,
};
+static struct clk_bulk_data exynos3_clk_bulk_data[] = {
+ { .id = "bus_clk" },
+ { .id = "pll_clk" },
+};
+
+static struct clk_bulk_data exynos4_clk_bulk_data[] = {
+ { .id = "bus_clk" },
+ { .id = "sclk_mipi" },
+};
+
+static struct clk_bulk_data exynos5433_clk_bulk_data[] = {
+ { .id = "bus_clk" },
+ { .id = "sclk_mipi" },
+ { .id = "phyclk_mipidphy0_bitclkdiv8" },
+ { .id = "phyclk_mipidphy0_rxclkesc0" },
+ { .id = "sclk_rgb_vclk_to_dsim0" },
+};
+
+static struct clk_bulk_data exynos7870_clk_bulk_data[] = {
+ { .id = "bus" },
+ { .id = "pll" },
+ { .id = "byte" },
+ { .id = "esc" },
+};
+
enum reg_idx {
- DSIM_STATUS_REG, /* Status register */
+ DSIM_STATUS_REG, /* Status register (legacy) */
+ DSIM_LINK_STATUS_REG, /* Link status register */
+ DSIM_DPHY_STATUS_REG, /* D-PHY status register */
DSIM_SWRST_REG, /* Software reset register */
DSIM_CLKCTRL_REG, /* Clock control register */
DSIM_TIMEOUT_REG, /* Time out register */
@@ -255,6 +269,7 @@ enum reg_idx {
DSIM_PKTHDR_REG, /* Packet Header FIFO register */
DSIM_PAYLOAD_REG, /* Payload FIFO register */
DSIM_RXFIFO_REG, /* Read FIFO register */
+ DSIM_SFRCTRL_REG, /* SFR standby and shadow control register */
DSIM_FIFOCTRL_REG, /* FIFO status and control register */
DSIM_PLLCTRL_REG, /* PLL control register */
DSIM_PHYCTRL_REG,
@@ -312,6 +327,32 @@ static const unsigned int exynos5433_reg_ofs[] = {
[DSIM_PHYTIMING2_REG] = 0xBC,
};
+static const unsigned int exynos7870_reg_ofs[] = {
+ [DSIM_LINK_STATUS_REG] = 0x04,
+ [DSIM_DPHY_STATUS_REG] = 0x08,
+ [DSIM_SWRST_REG] = 0x0C,
+ [DSIM_CLKCTRL_REG] = 0x10,
+ [DSIM_TIMEOUT_REG] = 0x14,
+ [DSIM_ESCMODE_REG] = 0x1C,
+ [DSIM_MDRESOL_REG] = 0x20,
+ [DSIM_MVPORCH_REG] = 0x24,
+ [DSIM_MHPORCH_REG] = 0x28,
+ [DSIM_MSYNC_REG] = 0x2C,
+ [DSIM_CONFIG_REG] = 0x30,
+ [DSIM_INTSRC_REG] = 0x34,
+ [DSIM_INTMSK_REG] = 0x38,
+ [DSIM_PKTHDR_REG] = 0x3C,
+ [DSIM_PAYLOAD_REG] = 0x40,
+ [DSIM_RXFIFO_REG] = 0x44,
+ [DSIM_SFRCTRL_REG] = 0x48,
+ [DSIM_FIFOCTRL_REG] = 0x4C,
+ [DSIM_PLLCTRL_REG] = 0x94,
+ [DSIM_PHYCTRL_REG] = 0xA4,
+ [DSIM_PHYTIMING_REG] = 0xB4,
+ [DSIM_PHYTIMING1_REG] = 0xB8,
+ [DSIM_PHYTIMING2_REG] = 0xBC,
+};
+
enum reg_value_idx {
RESET_TYPE,
PLL_TIMER,
@@ -384,6 +425,24 @@ static const unsigned int exynos5433_reg_values[] = {
[PHYTIMING_HS_TRAIL] = DSIM_PHYTIMING2_HS_TRAIL(0x0c),
};
+static const unsigned int exynos7870_reg_values[] = {
+ [RESET_TYPE] = DSIM_SWRST,
+ [PLL_TIMER] = 80000,
+ [STOP_STATE_CNT] = 0xa,
+ [PHYCTRL_ULPS_EXIT] = DSIM_PHYCTRL_ULPS_EXIT(0x177),
+ [PHYCTRL_VREG_LP] = 0,
+ [PHYCTRL_SLEW_UP] = 0,
+ [PHYTIMING_LPX] = DSIM_PHYTIMING_LPX(0x07),
+ [PHYTIMING_HS_EXIT] = DSIM_PHYTIMING_HS_EXIT(0x0c),
+ [PHYTIMING_CLK_PREPARE] = DSIM_PHYTIMING1_CLK_PREPARE(0x08),
+ [PHYTIMING_CLK_ZERO] = DSIM_PHYTIMING1_CLK_ZERO(0x2b),
+ [PHYTIMING_CLK_POST] = DSIM_PHYTIMING1_CLK_POST(0x0d),
+ [PHYTIMING_CLK_TRAIL] = DSIM_PHYTIMING1_CLK_TRAIL(0x09),
+ [PHYTIMING_HS_PREPARE] = DSIM_PHYTIMING2_HS_PREPARE(0x09),
+ [PHYTIMING_HS_ZERO] = DSIM_PHYTIMING2_HS_ZERO(0x0f),
+ [PHYTIMING_HS_TRAIL] = DSIM_PHYTIMING2_HS_TRAIL(0x0c),
+};
+
static const unsigned int imx8mm_dsim_reg_values[] = {
[RESET_TYPE] = DSIM_SWRST,
[PLL_TIMER] = 500,
@@ -405,13 +464,26 @@ static const unsigned int imx8mm_dsim_reg_values[] = {
static const struct samsung_dsim_driver_data exynos3_dsi_driver_data = {
.reg_ofs = exynos_reg_ofs,
.plltmr_reg = 0x50,
+ .has_legacy_status_reg = 1,
.has_freqband = 1,
.has_clklane_stop = 1,
- .num_clks = 2,
+ .clk_data = exynos3_clk_bulk_data,
+ .num_clks = ARRAY_SIZE(exynos3_clk_bulk_data),
.max_freq = 1000,
+ .wait_for_hdr_fifo = 1,
.wait_for_reset = 1,
.num_bits_resol = 11,
+ .video_mode_bit = 25,
+ .pll_stable_bit = 31,
+ .esc_clken_bit = 28,
+ .byte_clken_bit = 24,
+ .tx_req_hsclk_bit = 31,
+ .lane_esc_clk_bit = 19,
+ .lane_esc_data_offset = 20,
.pll_p_offset = 13,
+ .pll_m_offset = 4,
+ .pll_s_offset = 1,
+ .main_vsa_offset = 22,
.reg_values = reg_values,
.pll_fin_min = 6,
.pll_fin_max = 12,
@@ -424,13 +496,26 @@ static const struct samsung_dsim_driver_data exynos3_dsi_driver_data = {
static const struct samsung_dsim_driver_data exynos4_dsi_driver_data = {
.reg_ofs = exynos_reg_ofs,
.plltmr_reg = 0x50,
+ .has_legacy_status_reg = 1,
.has_freqband = 1,
.has_clklane_stop = 1,
- .num_clks = 2,
+ .clk_data = exynos4_clk_bulk_data,
+ .num_clks = ARRAY_SIZE(exynos4_clk_bulk_data),
.max_freq = 1000,
+ .wait_for_hdr_fifo = 1,
.wait_for_reset = 1,
.num_bits_resol = 11,
+ .video_mode_bit = 25,
+ .pll_stable_bit = 31,
+ .esc_clken_bit = 28,
+ .byte_clken_bit = 24,
+ .tx_req_hsclk_bit = 31,
+ .lane_esc_clk_bit = 19,
+ .lane_esc_data_offset = 20,
.pll_p_offset = 13,
+ .pll_m_offset = 4,
+ .pll_s_offset = 1,
+ .main_vsa_offset = 22,
.reg_values = reg_values,
.pll_fin_min = 6,
.pll_fin_max = 12,
@@ -443,11 +528,24 @@ static const struct samsung_dsim_driver_data exynos4_dsi_driver_data = {
static const struct samsung_dsim_driver_data exynos5_dsi_driver_data = {
.reg_ofs = exynos_reg_ofs,
.plltmr_reg = 0x58,
- .num_clks = 2,
+ .has_legacy_status_reg = 1,
+ .clk_data = exynos3_clk_bulk_data,
+ .num_clks = ARRAY_SIZE(exynos3_clk_bulk_data),
.max_freq = 1000,
+ .wait_for_hdr_fifo = 1,
.wait_for_reset = 1,
.num_bits_resol = 11,
+ .video_mode_bit = 25,
+ .pll_stable_bit = 31,
+ .esc_clken_bit = 28,
+ .byte_clken_bit = 24,
+ .tx_req_hsclk_bit = 31,
+ .lane_esc_clk_bit = 19,
+ .lane_esc_data_offset = 20,
.pll_p_offset = 13,
+ .pll_m_offset = 4,
+ .pll_s_offset = 1,
+ .main_vsa_offset = 22,
.reg_values = reg_values,
.pll_fin_min = 6,
.pll_fin_max = 12,
@@ -459,12 +557,25 @@ static const struct samsung_dsim_driver_data exynos5_dsi_driver_data = {
static const struct samsung_dsim_driver_data exynos5433_dsi_driver_data = {
.reg_ofs = exynos5433_reg_ofs,
.plltmr_reg = 0xa0,
+ .has_legacy_status_reg = 1,
.has_clklane_stop = 1,
- .num_clks = 5,
+ .clk_data = exynos5433_clk_bulk_data,
+ .num_clks = ARRAY_SIZE(exynos5433_clk_bulk_data),
.max_freq = 1500,
+ .wait_for_hdr_fifo = 1,
.wait_for_reset = 0,
.num_bits_resol = 12,
+ .video_mode_bit = 25,
+ .pll_stable_bit = 31,
+ .esc_clken_bit = 28,
+ .byte_clken_bit = 24,
+ .tx_req_hsclk_bit = 31,
+ .lane_esc_clk_bit = 19,
+ .lane_esc_data_offset = 20,
.pll_p_offset = 13,
+ .pll_m_offset = 4,
+ .pll_s_offset = 1,
+ .main_vsa_offset = 22,
.reg_values = exynos5433_reg_values,
.pll_fin_min = 6,
.pll_fin_max = 12,
@@ -476,12 +587,25 @@ static const struct samsung_dsim_driver_data exynos5433_dsi_driver_data = {
static const struct samsung_dsim_driver_data exynos5422_dsi_driver_data = {
.reg_ofs = exynos5433_reg_ofs,
.plltmr_reg = 0xa0,
+ .has_legacy_status_reg = 1,
.has_clklane_stop = 1,
- .num_clks = 2,
+ .clk_data = exynos3_clk_bulk_data,
+ .num_clks = ARRAY_SIZE(exynos3_clk_bulk_data),
.max_freq = 1500,
+ .wait_for_hdr_fifo = 1,
.wait_for_reset = 1,
.num_bits_resol = 12,
+ .video_mode_bit = 25,
+ .pll_stable_bit = 31,
+ .esc_clken_bit = 28,
+ .byte_clken_bit = 24,
+ .tx_req_hsclk_bit = 31,
+ .lane_esc_clk_bit = 19,
+ .lane_esc_data_offset = 20,
.pll_p_offset = 13,
+ .pll_m_offset = 4,
+ .pll_s_offset = 1,
+ .main_vsa_offset = 22,
.reg_values = exynos5422_reg_values,
.pll_fin_min = 6,
.pll_fin_max = 12,
@@ -490,19 +614,62 @@ static const struct samsung_dsim_driver_data exynos5422_dsi_driver_data = {
.min_freq = 500,
};
+static const struct samsung_dsim_driver_data exynos7870_dsi_driver_data = {
+ .reg_ofs = exynos7870_reg_ofs,
+ .plltmr_reg = 0xa0,
+ .has_clklane_stop = 1,
+ .has_sfrctrl = 1,
+ .clk_data = exynos7870_clk_bulk_data,
+ .num_clks = ARRAY_SIZE(exynos7870_clk_bulk_data),
+ .max_freq = 1500,
+ .wait_for_hdr_fifo = 0,
+ .wait_for_reset = 1,
+ .num_bits_resol = 12,
+ .video_mode_bit = 18,
+ .pll_stable_bit = 24,
+ .esc_clken_bit = 16,
+ .byte_clken_bit = 17,
+ .tx_req_hsclk_bit = 20,
+ .lane_esc_clk_bit = 8,
+ .lane_esc_data_offset = 9,
+ .pll_p_offset = 13,
+ .pll_m_offset = 3,
+ .pll_s_offset = 0,
+ .main_vsa_offset = 16,
+ .reg_values = exynos7870_reg_values,
+ .pll_fin_min = 6,
+ .pll_fin_max = 12,
+ .m_min = 41,
+ .m_max = 125,
+ .min_freq = 500,
+};
+
static const struct samsung_dsim_driver_data imx8mm_dsi_driver_data = {
.reg_ofs = exynos5433_reg_ofs,
.plltmr_reg = 0xa0,
+ .has_legacy_status_reg = 1,
.has_clklane_stop = 1,
- .num_clks = 2,
+ .clk_data = exynos4_clk_bulk_data,
+ .num_clks = ARRAY_SIZE(exynos4_clk_bulk_data),
.max_freq = 2100,
+ .wait_for_hdr_fifo = 1,
.wait_for_reset = 0,
.num_bits_resol = 12,
+ .video_mode_bit = 25,
+ .pll_stable_bit = 31,
+ .esc_clken_bit = 28,
+ .byte_clken_bit = 24,
+ .tx_req_hsclk_bit = 31,
+ .lane_esc_clk_bit = 19,
+ .lane_esc_data_offset = 20,
/*
* Unlike Exynos, PLL_P(PMS_P) offset 14 is used in i.MX8M Mini/Nano/Plus
* downstream driver - drivers/gpu/drm/bridge/sec-dsim.c
*/
.pll_p_offset = 14,
+ .pll_m_offset = 4,
+ .pll_s_offset = 1,
+ .main_vsa_offset = 22,
.reg_values = imx8mm_dsim_reg_values,
.pll_fin_min = 2,
.pll_fin_max = 30,
@@ -518,6 +685,7 @@ samsung_dsim_types[DSIM_TYPE_COUNT] = {
[DSIM_TYPE_EXYNOS5410] = &exynos5_dsi_driver_data,
[DSIM_TYPE_EXYNOS5422] = &exynos5422_dsi_driver_data,
[DSIM_TYPE_EXYNOS5433] = &exynos5433_dsi_driver_data,
+ [DSIM_TYPE_EXYNOS7870] = &exynos7870_dsi_driver_data,
[DSIM_TYPE_IMX8MM] = &imx8mm_dsi_driver_data,
[DSIM_TYPE_IMX8MP] = &imx8mm_dsi_driver_data,
};
@@ -653,8 +821,9 @@ static unsigned long samsung_dsim_set_pll(struct samsung_dsim *dsi,
writel(driver_data->reg_values[PLL_TIMER],
dsi->reg_base + driver_data->plltmr_reg);
- reg = DSIM_PLL_EN | DSIM_PLL_P(p, driver_data->pll_p_offset) |
- DSIM_PLL_M(m) | DSIM_PLL_S(s);
+ reg = DSIM_PLL_EN | DSIM_PLL(p, driver_data->pll_p_offset)
+ | DSIM_PLL(m, driver_data->pll_m_offset)
+ | DSIM_PLL(s, driver_data->pll_s_offset);
if (driver_data->has_freqband) {
static const unsigned long freq_bands[] = {
@@ -682,14 +851,17 @@ static unsigned long samsung_dsim_set_pll(struct samsung_dsim *dsi,
samsung_dsim_write(dsi, DSIM_PLLCTRL_REG, reg);
- timeout = 1000;
+ timeout = 3000;
do {
if (timeout-- == 0) {
dev_err(dsi->dev, "PLL failed to stabilize\n");
return 0;
}
- reg = samsung_dsim_read(dsi, DSIM_STATUS_REG);
- } while ((reg & DSIM_PLL_STABLE) == 0);
+ if (driver_data->has_legacy_status_reg)
+ reg = samsung_dsim_read(dsi, DSIM_STATUS_REG);
+ else
+ reg = samsung_dsim_read(dsi, DSIM_LINK_STATUS_REG);
+ } while ((reg & BIT(driver_data->pll_stable_bit)) == 0);
dsi->hs_clock = fout;
@@ -698,6 +870,7 @@ static unsigned long samsung_dsim_set_pll(struct samsung_dsim *dsi,
static int samsung_dsim_enable_clock(struct samsung_dsim *dsi)
{
+ const struct samsung_dsim_driver_data *driver_data = dsi->driver_data;
unsigned long hs_clk, byte_clk, esc_clk, pix_clk;
unsigned long esc_div;
u32 reg;
@@ -731,15 +904,17 @@ static int samsung_dsim_enable_clock(struct samsung_dsim *dsi)
hs_clk, byte_clk, esc_clk);
reg = samsung_dsim_read(dsi, DSIM_CLKCTRL_REG);
- reg &= ~(DSIM_ESC_PRESCALER_MASK | DSIM_LANE_ESC_CLK_EN_CLK
- | DSIM_LANE_ESC_CLK_EN_DATA_MASK | DSIM_PLL_BYPASS
- | DSIM_BYTE_CLK_SRC_MASK);
- reg |= DSIM_ESC_CLKEN | DSIM_BYTE_CLKEN
- | DSIM_ESC_PRESCALER(esc_div)
- | DSIM_LANE_ESC_CLK_EN_CLK
- | DSIM_LANE_ESC_CLK_EN_DATA(BIT(dsi->lanes) - 1)
- | DSIM_BYTE_CLK_SRC(0)
- | DSIM_TX_REQUEST_HSCLK;
+ reg &= ~(DSIM_ESC_PRESCALER_MASK | BIT(driver_data->lane_esc_clk_bit)
+ | DSIM_LANE_ESC_CLK_EN_DATA_MASK(driver_data->lane_esc_data_offset)
+ | DSIM_PLL_BYPASS
+ | DSIM_BYTE_CLK_SRC_MASK);
+ reg |= BIT(driver_data->esc_clken_bit) | BIT(driver_data->byte_clken_bit)
+ | DSIM_ESC_PRESCALER(esc_div)
+ | BIT(driver_data->lane_esc_clk_bit)
+ | DSIM_LANE_ESC_CLK_EN_DATA(BIT(dsi->lanes) - 1,
+ driver_data->lane_esc_data_offset)
+ | DSIM_BYTE_CLK_SRC(0)
+ | BIT(driver_data->tx_req_hsclk_bit);
samsung_dsim_write(dsi, DSIM_CLKCTRL_REG, reg);
return 0;
@@ -843,11 +1018,14 @@ static void samsung_dsim_set_phy_ctrl(struct samsung_dsim *dsi)
static void samsung_dsim_disable_clock(struct samsung_dsim *dsi)
{
+ const struct samsung_dsim_driver_data *driver_data = dsi->driver_data;
u32 reg;
reg = samsung_dsim_read(dsi, DSIM_CLKCTRL_REG);
- reg &= ~(DSIM_LANE_ESC_CLK_EN_CLK | DSIM_LANE_ESC_CLK_EN_DATA_MASK
- | DSIM_ESC_CLKEN | DSIM_BYTE_CLKEN);
+ reg &= ~(BIT(driver_data->lane_esc_clk_bit)
+ | DSIM_LANE_ESC_CLK_EN_DATA_MASK(driver_data->lane_esc_data_offset)
+ | BIT(driver_data->esc_clken_bit)
+ | BIT(driver_data->byte_clken_bit));
samsung_dsim_write(dsi, DSIM_CLKCTRL_REG, reg);
reg = samsung_dsim_read(dsi, DSIM_PLLCTRL_REG);
@@ -891,7 +1069,7 @@ static int samsung_dsim_init_link(struct samsung_dsim *dsi)
* mode, otherwise it will support command mode.
*/
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
- reg |= DSIM_VIDEO_MODE;
+ reg |= BIT(driver_data->video_mode_bit);
/*
* The user manual describes that following bits are ignored in
@@ -962,7 +1140,10 @@ static int samsung_dsim_init_link(struct samsung_dsim *dsi)
return -EFAULT;
}
- reg = samsung_dsim_read(dsi, DSIM_STATUS_REG);
+ if (driver_data->has_legacy_status_reg)
+ reg = samsung_dsim_read(dsi, DSIM_STATUS_REG);
+ else
+ reg = samsung_dsim_read(dsi, DSIM_DPHY_STATUS_REG);
if ((reg & DSIM_STOP_STATE_DAT(lanes_mask))
!= DSIM_STOP_STATE_DAT(lanes_mask))
continue;
@@ -983,6 +1164,7 @@ static void samsung_dsim_set_display_mode(struct samsung_dsim *dsi)
{
struct drm_display_mode *m = &dsi->mode;
unsigned int num_bits_resol = dsi->driver_data->num_bits_resol;
+ unsigned int main_vsa_offset = dsi->driver_data->main_vsa_offset;
u32 reg;
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
@@ -1009,7 +1191,7 @@ static void samsung_dsim_set_display_mode(struct samsung_dsim *dsi)
reg = DSIM_MAIN_HFP(hfp) | DSIM_MAIN_HBP(hbp);
samsung_dsim_write(dsi, DSIM_MHPORCH_REG, reg);
- reg = DSIM_MAIN_VSA(m->vsync_end - m->vsync_start)
+ reg = DSIM_MAIN_VSA(m->vsync_end - m->vsync_start, main_vsa_offset)
| DSIM_MAIN_HSA(hsa);
samsung_dsim_write(dsi, DSIM_MSYNC_REG, reg);
}
@@ -1023,6 +1205,7 @@ static void samsung_dsim_set_display_mode(struct samsung_dsim *dsi)
static void samsung_dsim_set_display_enable(struct samsung_dsim *dsi, bool enable)
{
+ const struct samsung_dsim_driver_data *driver_data = dsi->driver_data;
u32 reg;
reg = samsung_dsim_read(dsi, DSIM_MDRESOL_REG);
@@ -1031,6 +1214,15 @@ static void samsung_dsim_set_display_enable(struct samsung_dsim *dsi, bool enabl
else
reg &= ~DSIM_MAIN_STAND_BY;
samsung_dsim_write(dsi, DSIM_MDRESOL_REG, reg);
+
+ if (driver_data->has_sfrctrl) {
+ reg = samsung_dsim_read(dsi, DSIM_SFRCTRL_REG);
+ if (enable)
+ reg |= DSIM_SFR_CTRL_STAND_BY;
+ else
+ reg &= ~DSIM_SFR_CTRL_STAND_BY;
+ samsung_dsim_write(dsi, DSIM_SFRCTRL_REG, reg);
+ }
}
static int samsung_dsim_wait_for_hdr_fifo(struct samsung_dsim *dsi)
@@ -1087,6 +1279,7 @@ static void samsung_dsim_send_to_fifo(struct samsung_dsim *dsi,
{
struct device *dev = dsi->dev;
struct mipi_dsi_packet *pkt = &xfer->packet;
+ const struct samsung_dsim_driver_data *driver_data = dsi->driver_data;
const u8 *payload = pkt->payload + xfer->tx_done;
u16 length = pkt->payload_length - xfer->tx_done;
bool first = !xfer->tx_done;
@@ -1127,9 +1320,11 @@ static void samsung_dsim_send_to_fifo(struct samsung_dsim *dsi,
return;
reg = get_unaligned_le32(pkt->header);
- if (samsung_dsim_wait_for_hdr_fifo(dsi)) {
- dev_err(dev, "waiting for header FIFO timed out\n");
- return;
+ if (driver_data->wait_for_hdr_fifo) {
+ if (samsung_dsim_wait_for_hdr_fifo(dsi)) {
+ dev_err(dev, "waiting for header FIFO timed out\n");
+ return;
+ }
}
if (NEQV(xfer->flags & MIPI_DSI_MSG_USE_LPM,
@@ -1922,7 +2117,7 @@ int samsung_dsim_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct samsung_dsim *dsi;
- int ret, i;
+ int ret;
dsi = devm_drm_bridge_alloc(dev, struct samsung_dsim, bridge, &samsung_dsim_bridge_funcs);
if (IS_ERR(dsi))
@@ -1946,23 +2141,11 @@ int samsung_dsim_probe(struct platform_device *pdev)
if (ret)
return dev_err_probe(dev, ret, "failed to get regulators\n");
- dsi->clks = devm_kcalloc(dev, dsi->driver_data->num_clks,
- sizeof(*dsi->clks), GFP_KERNEL);
- if (!dsi->clks)
- return -ENOMEM;
-
- for (i = 0; i < dsi->driver_data->num_clks; i++) {
- dsi->clks[i] = devm_clk_get(dev, clk_names[i]);
- if (IS_ERR(dsi->clks[i])) {
- if (strcmp(clk_names[i], "sclk_mipi") == 0) {
- dsi->clks[i] = devm_clk_get(dev, OLD_SCLK_MIPI_CLK_NAME);
- if (!IS_ERR(dsi->clks[i]))
- continue;
- }
-
- dev_info(dev, "failed to get the clock: %s\n", clk_names[i]);
- return PTR_ERR(dsi->clks[i]);
- }
+ ret = devm_clk_bulk_get(dev, dsi->driver_data->num_clks,
+ dsi->driver_data->clk_data);
+ if (ret) {
+ dev_err(dev, "failed to get clocks in bulk (%d)\n", ret);
+ return ret;
}
dsi->reg_base = devm_platform_ioremap_resource(pdev, 0);
@@ -2035,7 +2218,7 @@ static int samsung_dsim_suspend(struct device *dev)
{
struct samsung_dsim *dsi = dev_get_drvdata(dev);
const struct samsung_dsim_driver_data *driver_data = dsi->driver_data;
- int ret, i;
+ int ret;
usleep_range(10000, 20000);
@@ -2051,8 +2234,7 @@ static int samsung_dsim_suspend(struct device *dev)
phy_power_off(dsi->phy);
- for (i = driver_data->num_clks - 1; i > -1; i--)
- clk_disable_unprepare(dsi->clks[i]);
+ clk_bulk_disable_unprepare(driver_data->num_clks, driver_data->clk_data);
ret = regulator_bulk_disable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
if (ret < 0)
@@ -2065,7 +2247,7 @@ static int samsung_dsim_resume(struct device *dev)
{
struct samsung_dsim *dsi = dev_get_drvdata(dev);
const struct samsung_dsim_driver_data *driver_data = dsi->driver_data;
- int ret, i;
+ int ret;
ret = regulator_bulk_enable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
if (ret < 0) {
@@ -2073,11 +2255,9 @@ static int samsung_dsim_resume(struct device *dev)
return ret;
}
- for (i = 0; i < driver_data->num_clks; i++) {
- ret = clk_prepare_enable(dsi->clks[i]);
- if (ret < 0)
- goto err_clk;
- }
+ ret = clk_bulk_prepare_enable(driver_data->num_clks, driver_data->clk_data);
+ if (ret < 0)
+ goto err_clk;
ret = phy_power_on(dsi->phy);
if (ret < 0) {
@@ -2088,8 +2268,7 @@ static int samsung_dsim_resume(struct device *dev)
return 0;
err_clk:
- while (--i > -1)
- clk_disable_unprepare(dsi->clks[i]);
+ clk_bulk_disable_unprepare(driver_data->num_clks, driver_data->clk_data);
regulator_bulk_disable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
return ret;
diff --git a/drivers/gpu/drm/bridge/simple-bridge.c b/drivers/gpu/drm/bridge/simple-bridge.c
index 3d15ddd39470..e4d0bc2200f8 100644
--- a/drivers/gpu/drm/bridge/simple-bridge.c
+++ b/drivers/gpu/drm/bridge/simple-bridge.c
@@ -262,6 +262,16 @@ static const struct of_device_id simple_bridge_match[] = {
.connector_type = DRM_MODE_CONNECTOR_VGA,
},
}, {
+ .compatible = "radxa,ra620",
+ .data = &(const struct simple_bridge_info) {
+ .connector_type = DRM_MODE_CONNECTOR_HDMIA,
+ },
+ }, {
+ .compatible = "realtek,rtd2171",
+ .data = &(const struct simple_bridge_info) {
+ .connector_type = DRM_MODE_CONNECTOR_HDMIA,
+ },
+ }, {
.compatible = "ti,opa362",
.data = &(const struct simple_bridge_info) {
.connector_type = DRM_MODE_CONNECTOR_Composite,
diff --git a/drivers/gpu/drm/bridge/ssd2825.c b/drivers/gpu/drm/bridge/ssd2825.c
new file mode 100644
index 000000000000..f2fdbf7c117d
--- /dev/null
+++ b/drivers/gpu/drm/bridge/ssd2825.c
@@ -0,0 +1,775 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+#include <linux/units.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <video/mipi_display.h>
+
+#define SSD2825_DEVICE_ID_REG 0xb0
+#define SSD2825_RGB_INTERFACE_CTRL_REG_1 0xb1
+#define SSD2825_RGB_INTERFACE_CTRL_REG_2 0xb2
+#define SSD2825_RGB_INTERFACE_CTRL_REG_3 0xb3
+#define SSD2825_RGB_INTERFACE_CTRL_REG_4 0xb4
+#define SSD2825_RGB_INTERFACE_CTRL_REG_5 0xb5
+#define SSD2825_RGB_INTERFACE_CTRL_REG_6 0xb6
+#define SSD2825_NON_BURST_EV BIT(2)
+#define SSD2825_BURST BIT(3)
+#define SSD2825_PCKL_HIGH BIT(13)
+#define SSD2825_HSYNC_HIGH BIT(14)
+#define SSD2825_VSYNC_HIGH BIT(15)
+#define SSD2825_CONFIGURATION_REG 0xb7
+#define SSD2825_CONF_REG_HS BIT(0)
+#define SSD2825_CONF_REG_CKE BIT(1)
+#define SSD2825_CONF_REG_SLP BIT(2)
+#define SSD2825_CONF_REG_VEN BIT(3)
+#define SSD2825_CONF_REG_HCLK BIT(4)
+#define SSD2825_CONF_REG_CSS BIT(5)
+#define SSD2825_CONF_REG_DCS BIT(6)
+#define SSD2825_CONF_REG_REN BIT(7)
+#define SSD2825_CONF_REG_ECD BIT(8)
+#define SSD2825_CONF_REG_EOT BIT(9)
+#define SSD2825_CONF_REG_LPE BIT(10)
+#define SSD2825_VC_CTRL_REG 0xb8
+#define SSD2825_PLL_CTRL_REG 0xb9
+#define SSD2825_PLL_CONFIGURATION_REG 0xba
+#define SSD2825_CLOCK_CTRL_REG 0xbb
+#define SSD2825_PACKET_SIZE_CTRL_REG_1 0xbc
+#define SSD2825_PACKET_SIZE_CTRL_REG_2 0xbd
+#define SSD2825_PACKET_SIZE_CTRL_REG_3 0xbe
+#define SSD2825_PACKET_DROP_REG 0xbf
+#define SSD2825_OPERATION_CTRL_REG 0xc0
+#define SSD2825_MAX_RETURN_SIZE_REG 0xc1
+#define SSD2825_RETURN_DATA_COUNT_REG 0xc2
+#define SSD2825_ACK_RESPONSE_REG 0xc3
+#define SSD2825_LINE_CTRL_REG 0xc4
+#define SSD2825_INTERRUPT_CTRL_REG 0xc5
+#define SSD2825_INTERRUPT_STATUS_REG 0xc6
+#define SSD2825_ERROR_STATUS_REG 0xc7
+#define SSD2825_DATA_FORMAT_REG 0xc8
+#define SSD2825_DELAY_ADJ_REG_1 0xc9
+#define SSD2825_DELAY_ADJ_REG_2 0xca
+#define SSD2825_DELAY_ADJ_REG_3 0xcb
+#define SSD2825_DELAY_ADJ_REG_4 0xcc
+#define SSD2825_DELAY_ADJ_REG_5 0xcd
+#define SSD2825_DELAY_ADJ_REG_6 0xce
+#define SSD2825_HS_TX_TIMER_REG_1 0xcf
+#define SSD2825_HS_TX_TIMER_REG_2 0xd0
+#define SSD2825_LP_RX_TIMER_REG_1 0xd1
+#define SSD2825_LP_RX_TIMER_REG_2 0xd2
+#define SSD2825_TE_STATUS_REG 0xd3
+#define SSD2825_SPI_READ_REG 0xd4
+#define SSD2825_SPI_READ_REG_RESET 0xfa
+#define SSD2825_PLL_LOCK_REG 0xd5
+#define SSD2825_TEST_REG 0xd6
+#define SSD2825_TE_COUNT_REG 0xd7
+#define SSD2825_ANALOG_CTRL_REG_1 0xd8
+#define SSD2825_ANALOG_CTRL_REG_2 0xd9
+#define SSD2825_ANALOG_CTRL_REG_3 0xda
+#define SSD2825_ANALOG_CTRL_REG_4 0xdb
+#define SSD2825_INTERRUPT_OUT_CTRL_REG 0xdc
+#define SSD2825_RGB_INTERFACE_CTRL_REG_7 0xdd
+#define SSD2825_LANE_CONFIGURATION_REG 0xde
+#define SSD2825_DELAY_ADJ_REG_7 0xdf
+#define SSD2825_INPUT_PIN_CTRL_REG_1 0xe0
+#define SSD2825_INPUT_PIN_CTRL_REG_2 0xe1
+#define SSD2825_BIDIR_PIN_CTRL_REG_1 0xe2
+#define SSD2825_BIDIR_PIN_CTRL_REG_2 0xe3
+#define SSD2825_BIDIR_PIN_CTRL_REG_3 0xe4
+#define SSD2825_BIDIR_PIN_CTRL_REG_4 0xe5
+#define SSD2825_BIDIR_PIN_CTRL_REG_5 0xe6
+#define SSD2825_BIDIR_PIN_CTRL_REG_6 0xe7
+#define SSD2825_BIDIR_PIN_CTRL_REG_7 0xe8
+#define SSD2825_CABC_BRIGHTNESS_CTRL_REG_1 0xe9
+#define SSD2825_CABC_BRIGHTNESS_CTRL_REG_2 0xea
+#define SSD2825_CABC_BRIGHTNESS_STATUS_REG 0xeb
+#define SSD2825_READ_REG 0xff
+
+#define SSD2825_COM_BYTE 0x00
+#define SSD2825_DAT_BYTE 0x01
+
+#define SSD2828_LP_CLOCK_DIVIDER(n) (((n) - 1) & 0x3f)
+#define SSD2825_LP_MIN_CLK 5000 /* KHz */
+#define SSD2825_REF_MIN_CLK 2000 /* KHz */
+
+static const struct regulator_bulk_data ssd2825_supplies[] = {
+ { .supply = "dvdd" },
+ { .supply = "avdd" },
+ { .supply = "vddio" },
+};
+
+struct ssd2825_dsi_output {
+ struct mipi_dsi_device *dev;
+ struct drm_panel *panel;
+ struct drm_bridge *bridge;
+};
+
+struct ssd2825_priv {
+ struct spi_device *spi;
+ struct device *dev;
+
+ struct gpio_desc *reset_gpio;
+ struct regulator_bulk_data *supplies;
+
+ struct clk *tx_clk;
+
+ struct mipi_dsi_host dsi_host;
+ struct drm_bridge bridge;
+ struct ssd2825_dsi_output output;
+
+ struct mutex mlock; /* for host transfer operations */
+
+ u32 pd_lines; /* number of Parallel Port Input Data Lines */
+ u32 dsi_lanes; /* number of DSI Lanes */
+
+ /* Parameters for PLL programming */
+ u32 pll_freq_kbps; /* PLL in kbps */
+ u32 nibble_freq_khz; /* PLL div by 4 */
+
+ u32 hzd; /* HS Zero Delay in ns*/
+ u32 hpd; /* HS Prepare Delay is ns */
+};
+
+static inline struct ssd2825_priv *dsi_host_to_ssd2825(struct mipi_dsi_host *host)
+{
+ return container_of(host, struct ssd2825_priv, dsi_host);
+}
+
+static inline struct ssd2825_priv *bridge_to_ssd2825(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct ssd2825_priv, bridge);
+}
+
+static int ssd2825_write_raw(struct ssd2825_priv *priv, u8 high_byte, u8 low_byte)
+{
+ struct spi_device *spi = priv->spi;
+ u8 tx_buf[2];
+
+ /*
+ * Low byte is the value, high byte defines type of
+ * write cycle, 0 for command and 1 for data.
+ */
+ tx_buf[0] = low_byte;
+ tx_buf[1] = high_byte;
+
+ return spi_write(spi, tx_buf, 2);
+}
+
+static int ssd2825_write_reg(struct ssd2825_priv *priv, u8 reg, u16 command)
+{
+ u8 datal = (command & 0x00FF);
+ u8 datah = (command & 0xFF00) >> 8;
+ int ret;
+
+ /* Command write cycle */
+ ret = ssd2825_write_raw(priv, SSD2825_COM_BYTE, reg);
+ if (ret)
+ return ret;
+
+ /* Data write cycle bits 7-0 */
+ ret = ssd2825_write_raw(priv, SSD2825_DAT_BYTE, datal);
+ if (ret)
+ return ret;
+
+ /* Data write cycle bits 15-8 */
+ ret = ssd2825_write_raw(priv, SSD2825_DAT_BYTE, datah);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ssd2825_write_dsi(struct ssd2825_priv *priv, const u8 *command, int len)
+{
+ int ret, i;
+
+ ret = ssd2825_write_reg(priv, SSD2825_PACKET_SIZE_CTRL_REG_1, len);
+ if (ret)
+ return ret;
+
+ ret = ssd2825_write_raw(priv, SSD2825_COM_BYTE, SSD2825_PACKET_DROP_REG);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < len; i++) {
+ ret = ssd2825_write_raw(priv, SSD2825_DAT_BYTE, command[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ssd2825_read_raw(struct ssd2825_priv *priv, u8 cmd, u16 *data)
+{
+ struct spi_device *spi = priv->spi;
+ struct spi_message msg;
+ struct spi_transfer xfer[2];
+ u8 tx_buf[2];
+ u8 rx_buf[2];
+ int ret;
+
+ memset(&xfer, 0, sizeof(xfer));
+
+ tx_buf[1] = (cmd & 0xFF00) >> 8;
+ tx_buf[0] = (cmd & 0x00FF);
+
+ xfer[0].tx_buf = tx_buf;
+ xfer[0].bits_per_word = 9;
+ xfer[0].len = 2;
+
+ xfer[1].rx_buf = rx_buf;
+ xfer[1].bits_per_word = 16;
+ xfer[1].len = 2;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer[0], &msg);
+ spi_message_add_tail(&xfer[1], &msg);
+
+ ret = spi_sync(spi, &msg);
+ if (ret) {
+ dev_err(&spi->dev, "ssd2825 read raw failed %d\n", ret);
+ return ret;
+ }
+
+ *data = rx_buf[1] | (rx_buf[0] << 8);
+
+ return 0;
+}
+
+static int ssd2825_read_reg(struct ssd2825_priv *priv, u8 reg, u16 *data)
+{
+ int ret;
+
+ /* Reset the read register */
+ ret = ssd2825_write_reg(priv, SSD2825_SPI_READ_REG, SSD2825_SPI_READ_REG_RESET);
+ if (ret)
+ return ret;
+
+ /* Push the address to read */
+ ret = ssd2825_write_raw(priv, SSD2825_COM_BYTE, reg);
+ if (ret)
+ return ret;
+
+ /* Perform a reading cycle */
+ ret = ssd2825_read_raw(priv, SSD2825_SPI_READ_REG_RESET, data);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ssd2825_dsi_host_attach(struct mipi_dsi_host *host, struct mipi_dsi_device *dev)
+{
+ struct ssd2825_priv *priv = dsi_host_to_ssd2825(host);
+ struct drm_bridge *bridge;
+ struct drm_panel *panel;
+ struct device_node *ep;
+ int ret;
+
+ if (dev->lanes > 4) {
+ dev_err(priv->dev, "unsupported number of data lanes(%u)\n", dev->lanes);
+ return -EINVAL;
+ }
+
+ /*
+ * ssd2825 supports both Video and Pulse mode, but the driver only
+ * implements Video (event) mode currently
+ */
+ if (!(dev->mode_flags & MIPI_DSI_MODE_VIDEO)) {
+ dev_err(priv->dev, "Only MIPI_DSI_MODE_VIDEO is supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ ret = drm_of_find_panel_or_bridge(host->dev->of_node, 1, 0, &panel, &bridge);
+ if (ret)
+ return ret;
+
+ if (panel) {
+ bridge = drm_panel_bridge_add_typed(panel, DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(bridge))
+ return PTR_ERR(bridge);
+ }
+
+ priv->output.dev = dev;
+ priv->output.bridge = bridge;
+ priv->output.panel = panel;
+
+ priv->dsi_lanes = dev->lanes;
+
+ /* get input ep (port0/endpoint0) */
+ ret = -EINVAL;
+ ep = of_graph_get_endpoint_by_regs(host->dev->of_node, 0, 0);
+ if (ep) {
+ ret = of_property_read_u32(ep, "bus-width", &priv->pd_lines);
+ of_node_put(ep);
+ }
+
+ if (ret)
+ priv->pd_lines = mipi_dsi_pixel_format_to_bpp(dev->format);
+
+ drm_bridge_add(&priv->bridge);
+
+ return 0;
+}
+
+static int ssd2825_dsi_host_detach(struct mipi_dsi_host *host, struct mipi_dsi_device *dev)
+{
+ struct ssd2825_priv *priv = dsi_host_to_ssd2825(host);
+
+ drm_bridge_remove(&priv->bridge);
+ if (priv->output.panel)
+ drm_panel_bridge_remove(priv->output.bridge);
+
+ return 0;
+}
+
+static ssize_t ssd2825_dsi_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct ssd2825_priv *priv = dsi_host_to_ssd2825(host);
+ u16 config;
+ int ret;
+
+ if (msg->rx_len) {
+ dev_warn(priv->dev, "MIPI rx is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ guard(mutex)(&priv->mlock);
+
+ ret = ssd2825_read_reg(priv, SSD2825_CONFIGURATION_REG, &config);
+ if (ret)
+ return ret;
+
+ switch (msg->type) {
+ case MIPI_DSI_DCS_SHORT_WRITE:
+ case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
+ case MIPI_DSI_DCS_LONG_WRITE:
+ config |= SSD2825_CONF_REG_DCS;
+ break;
+ case MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM:
+ case MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM:
+ case MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM:
+ case MIPI_DSI_GENERIC_LONG_WRITE:
+ config &= ~SSD2825_CONF_REG_DCS;
+ break;
+ case MIPI_DSI_DCS_READ:
+ case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM:
+ case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM:
+ case MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM:
+ default:
+ return 0;
+ }
+
+ ret = ssd2825_write_reg(priv, SSD2825_CONFIGURATION_REG, config);
+ if (ret)
+ return ret;
+
+ ret = ssd2825_write_reg(priv, SSD2825_VC_CTRL_REG, 0x0000);
+ if (ret)
+ return ret;
+
+ ret = ssd2825_write_dsi(priv, msg->tx_buf, msg->tx_len);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct mipi_dsi_host_ops ssd2825_dsi_host_ops = {
+ .attach = ssd2825_dsi_host_attach,
+ .detach = ssd2825_dsi_host_detach,
+ .transfer = ssd2825_dsi_host_transfer,
+};
+
+static void ssd2825_hw_reset(struct ssd2825_priv *priv)
+{
+ gpiod_set_value_cansleep(priv->reset_gpio, 1);
+ usleep_range(5000, 6000);
+ gpiod_set_value_cansleep(priv->reset_gpio, 0);
+ usleep_range(5000, 6000);
+}
+
+/*
+ * PLL configuration register settings.
+ *
+ * See the "PLL Configuration Register Description" in the SSD2825 datasheet.
+ */
+static u16 construct_pll_config(struct ssd2825_priv *priv,
+ u32 desired_pll_freq_kbps, u32 reference_freq_khz)
+{
+ u32 div_factor = 1, mul_factor, fr = 0;
+
+ while (reference_freq_khz / (div_factor + 1) >= SSD2825_REF_MIN_CLK)
+ div_factor++;
+ if (div_factor > 31)
+ div_factor = 31;
+
+ mul_factor = DIV_ROUND_UP(desired_pll_freq_kbps * div_factor,
+ reference_freq_khz);
+
+ priv->pll_freq_kbps = reference_freq_khz * mul_factor / div_factor;
+ priv->nibble_freq_khz = priv->pll_freq_kbps / 4;
+
+ if (priv->pll_freq_kbps >= 501000)
+ fr = 3;
+ else if (priv->pll_freq_kbps >= 251000)
+ fr = 2;
+ else if (priv->pll_freq_kbps >= 126000)
+ fr = 1;
+
+ return (fr << 14) | (div_factor << 8) | mul_factor;
+}
+
+static int ssd2825_setup_pll(struct ssd2825_priv *priv,
+ const struct drm_display_mode *mode)
+{
+ u16 pll_config, lp_div;
+ u32 nibble_delay, pclk_mult, tx_freq_khz;
+ u8 hzd, hpd;
+
+ tx_freq_khz = clk_get_rate(priv->tx_clk) / KILO;
+ if (!tx_freq_khz)
+ tx_freq_khz = SSD2825_REF_MIN_CLK;
+
+ pclk_mult = priv->pd_lines / priv->dsi_lanes + 1;
+ pll_config = construct_pll_config(priv, pclk_mult * mode->clock,
+ tx_freq_khz);
+
+ lp_div = priv->pll_freq_kbps / (SSD2825_LP_MIN_CLK * 8);
+
+ /* nibble_delay in nanoseconds */
+ nibble_delay = MICRO / priv->nibble_freq_khz;
+
+ hzd = priv->hzd / nibble_delay;
+ hpd = (priv->hpd - 4 * nibble_delay) / nibble_delay;
+
+ /* Disable PLL */
+ ssd2825_write_reg(priv, SSD2825_PLL_CTRL_REG, 0x0000);
+ ssd2825_write_reg(priv, SSD2825_LINE_CTRL_REG, 0x0001);
+
+ /* Set delays */
+ ssd2825_write_reg(priv, SSD2825_DELAY_ADJ_REG_1, (hzd << 8) | hpd);
+
+ /* Set PLL coefficients */
+ ssd2825_write_reg(priv, SSD2825_PLL_CONFIGURATION_REG, pll_config);
+
+ /* Clock Control Register */
+ ssd2825_write_reg(priv, SSD2825_CLOCK_CTRL_REG,
+ SSD2828_LP_CLOCK_DIVIDER(lp_div));
+
+ /* Enable PLL */
+ ssd2825_write_reg(priv, SSD2825_PLL_CTRL_REG, 0x0001);
+ ssd2825_write_reg(priv, SSD2825_VC_CTRL_REG, 0);
+
+ return 0;
+}
+
+static void ssd2825_bridge_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
+ struct ssd2825_priv *priv = bridge_to_ssd2825(bridge);
+ struct mipi_dsi_device *dsi_dev = priv->output.dev;
+ const struct drm_crtc_state *crtc_state;
+ const struct drm_display_mode *mode;
+ struct drm_connector *connector;
+ struct drm_crtc *crtc;
+ u32 input_bus_flags = bridge->timings->input_bus_flags;
+ u16 flags = 0, config;
+ u8 pixel_format;
+ int ret;
+
+ /* Power Sequence */
+ ret = clk_prepare_enable(priv->tx_clk);
+ if (ret)
+ dev_err(priv->dev, "error enabling tx_clk (%d)\n", ret);
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ssd2825_supplies), priv->supplies);
+ if (ret)
+ dev_err(priv->dev, "error enabling regulators (%d)\n", ret);
+
+ usleep_range(1000, 2000);
+
+ ssd2825_hw_reset(priv);
+
+ /* Perform SW reset */
+ ssd2825_write_reg(priv, SSD2825_OPERATION_CTRL_REG, 0x0100);
+
+ /* Set pixel format */
+ switch (dsi_dev->format) {
+ case MIPI_DSI_FMT_RGB565:
+ pixel_format = 0x00;
+ break;
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ pixel_format = 0x01;
+ break;
+ case MIPI_DSI_FMT_RGB666:
+ pixel_format = 0x02;
+ break;
+ case MIPI_DSI_FMT_RGB888:
+ default:
+ pixel_format = 0x03;
+ break;
+ }
+
+ connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
+ crtc = drm_atomic_get_new_connector_state(state, connector)->crtc;
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ mode = &crtc_state->adjusted_mode;
+
+ /* Set panel timings */
+ ssd2825_write_reg(priv, SSD2825_RGB_INTERFACE_CTRL_REG_1,
+ ((mode->vtotal - mode->vsync_end) << 8) |
+ (mode->htotal - mode->hsync_end));
+ ssd2825_write_reg(priv, SSD2825_RGB_INTERFACE_CTRL_REG_2,
+ ((mode->vtotal - mode->vsync_start) << 8) |
+ (mode->htotal - mode->hsync_start));
+ ssd2825_write_reg(priv, SSD2825_RGB_INTERFACE_CTRL_REG_3,
+ ((mode->vsync_start - mode->vdisplay) << 8) |
+ (mode->hsync_start - mode->hdisplay));
+ ssd2825_write_reg(priv, SSD2825_RGB_INTERFACE_CTRL_REG_4, mode->hdisplay);
+ ssd2825_write_reg(priv, SSD2825_RGB_INTERFACE_CTRL_REG_5, mode->vdisplay);
+
+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+ flags |= SSD2825_HSYNC_HIGH;
+
+ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+ flags |= SSD2825_VSYNC_HIGH;
+
+ if (dsi_dev->mode_flags & MIPI_DSI_MODE_VIDEO)
+ flags |= SSD2825_NON_BURST_EV;
+
+ if (input_bus_flags & DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE)
+ flags |= SSD2825_PCKL_HIGH;
+
+ ssd2825_write_reg(priv, SSD2825_RGB_INTERFACE_CTRL_REG_6, flags | pixel_format);
+ ssd2825_write_reg(priv, SSD2825_LANE_CONFIGURATION_REG, dsi_dev->lanes - 1);
+ ssd2825_write_reg(priv, SSD2825_TEST_REG, 0x0004);
+
+ /* Call PLL configuration */
+ ssd2825_setup_pll(priv, mode);
+
+ usleep_range(10000, 11000);
+
+ config = SSD2825_CONF_REG_HS | SSD2825_CONF_REG_CKE | SSD2825_CONF_REG_DCS |
+ SSD2825_CONF_REG_ECD | SSD2825_CONF_REG_EOT;
+
+ if (dsi_dev->mode_flags & MIPI_DSI_MODE_LPM)
+ config &= ~SSD2825_CONF_REG_HS;
+
+ if (dsi_dev->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET)
+ config &= ~SSD2825_CONF_REG_EOT;
+
+ /* Initial DSI configuration register set */
+ ssd2825_write_reg(priv, SSD2825_CONFIGURATION_REG, config);
+ ssd2825_write_reg(priv, SSD2825_VC_CTRL_REG, 0);
+
+ if (priv->output.panel)
+ drm_panel_enable(priv->output.panel);
+}
+
+static void ssd2825_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
+ struct ssd2825_priv *priv = bridge_to_ssd2825(bridge);
+ struct mipi_dsi_device *dsi_dev = priv->output.dev;
+ u16 config;
+
+ config = SSD2825_CONF_REG_HS | SSD2825_CONF_REG_DCS |
+ SSD2825_CONF_REG_ECD | SSD2825_CONF_REG_EOT;
+
+ if (dsi_dev->mode_flags & MIPI_DSI_MODE_VIDEO)
+ config |= SSD2825_CONF_REG_VEN;
+
+ if (dsi_dev->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET)
+ config &= ~SSD2825_CONF_REG_EOT;
+
+ /* Complete configuration after DSI commands were sent */
+ ssd2825_write_reg(priv, SSD2825_CONFIGURATION_REG, config);
+ ssd2825_write_reg(priv, SSD2825_PLL_CTRL_REG, 0x0001);
+ ssd2825_write_reg(priv, SSD2825_VC_CTRL_REG, 0x0000);
+}
+
+static void ssd2825_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
+ struct ssd2825_priv *priv = bridge_to_ssd2825(bridge);
+ int ret;
+
+ msleep(100);
+
+ /* Exit DSI configuration register set */
+ ssd2825_write_reg(priv, SSD2825_CONFIGURATION_REG,
+ SSD2825_CONF_REG_ECD | SSD2825_CONF_REG_EOT);
+ ssd2825_write_reg(priv, SSD2825_VC_CTRL_REG, 0);
+
+ /* HW disable */
+ gpiod_set_value_cansleep(priv->reset_gpio, 1);
+ usleep_range(5000, 6000);
+
+ ret = regulator_bulk_disable(ARRAY_SIZE(ssd2825_supplies),
+ priv->supplies);
+ if (ret < 0)
+ dev_err(priv->dev, "error disabling regulators (%d)\n", ret);
+
+ clk_disable_unprepare(priv->tx_clk);
+}
+
+static int ssd2825_bridge_attach(struct drm_bridge *bridge, struct drm_encoder *encoder,
+ enum drm_bridge_attach_flags flags)
+{
+ struct ssd2825_priv *priv = bridge_to_ssd2825(bridge);
+
+ return drm_bridge_attach(bridge->encoder, priv->output.bridge, bridge,
+ flags);
+}
+
+static enum drm_mode_status
+ssd2825_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode)
+{
+ if (mode->hdisplay > 1366)
+ return MODE_H_ILLEGAL;
+
+ if (mode->vdisplay > 1366)
+ return MODE_V_ILLEGAL;
+
+ return MODE_OK;
+}
+
+static bool ssd2825_mode_fixup(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ /* Default to positive sync */
+
+ if (!(adjusted_mode->flags &
+ (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
+ adjusted_mode->flags |= DRM_MODE_FLAG_PHSYNC;
+
+ if (!(adjusted_mode->flags &
+ (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
+ adjusted_mode->flags |= DRM_MODE_FLAG_PVSYNC;
+
+ return true;
+}
+
+static const struct drm_bridge_funcs ssd2825_bridge_funcs = {
+ .attach = ssd2825_bridge_attach,
+ .mode_valid = ssd2825_bridge_mode_valid,
+ .mode_fixup = ssd2825_mode_fixup,
+
+ .atomic_pre_enable = ssd2825_bridge_atomic_pre_enable,
+ .atomic_enable = ssd2825_bridge_atomic_enable,
+ .atomic_disable = ssd2825_bridge_atomic_disable,
+
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+};
+
+static const struct drm_bridge_timings default_ssd2825_timings = {
+ .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE
+ | DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE
+ | DRM_BUS_FLAG_DE_HIGH,
+};
+
+static int ssd2825_probe(struct spi_device *spi)
+{
+ struct ssd2825_priv *priv;
+ struct device *dev = &spi->dev;
+ struct device_node *np = dev->of_node;
+ int ret;
+
+ /* Driver supports only 8 bit 3 Wire mode */
+ spi->bits_per_word = 9;
+
+ ret = spi_setup(spi);
+ if (ret)
+ return ret;
+
+ priv = devm_drm_bridge_alloc(dev, struct ssd2825_priv, bridge, &ssd2825_bridge_funcs);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
+
+ spi_set_drvdata(spi, priv);
+
+ priv->spi = spi;
+ priv->dev = dev;
+
+ mutex_init(&priv->mlock);
+
+ priv->tx_clk = devm_clk_get_optional(dev, NULL);
+ if (IS_ERR(priv->tx_clk))
+ return dev_err_probe(dev, PTR_ERR(priv->tx_clk),
+ "can't retrieve bridge tx_clk\n");
+
+ priv->reset_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(priv->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(priv->reset_gpio),
+ "failed to get reset GPIO\n");
+
+ ret = devm_regulator_bulk_get_const(dev, ARRAY_SIZE(ssd2825_supplies),
+ ssd2825_supplies, &priv->supplies);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to get regulators\n");
+
+ priv->hzd = 133; /* ns */
+ device_property_read_u32(dev, "solomon,hs-zero-delay-ns", &priv->hzd);
+
+ priv->hpd = 40; /* ns */
+ device_property_read_u32(dev, "solomon,hs-prep-delay-ns", &priv->hpd);
+
+ priv->dsi_host.dev = dev;
+ priv->dsi_host.ops = &ssd2825_dsi_host_ops;
+
+ priv->bridge.timings = &default_ssd2825_timings;
+ priv->bridge.of_node = np;
+
+ return mipi_dsi_host_register(&priv->dsi_host);
+}
+
+static void ssd2825_remove(struct spi_device *spi)
+{
+ struct ssd2825_priv *priv = spi_get_drvdata(spi);
+
+ mipi_dsi_host_unregister(&priv->dsi_host);
+}
+
+static const struct of_device_id ssd2825_of_match[] = {
+ { .compatible = "solomon,ssd2825" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ssd2825_of_match);
+
+static struct spi_driver ssd2825_driver = {
+ .driver = {
+ .name = "ssd2825",
+ .of_match_table = ssd2825_of_match,
+ },
+ .probe = ssd2825_probe,
+ .remove = ssd2825_remove,
+};
+module_spi_driver(ssd2825_driver);
+
+MODULE_AUTHOR("Svyatoslav Ryhel <clamor95@gmail.com>");
+MODULE_DESCRIPTION("Solomon SSD2825 RGB to MIPI-DSI bridge driver SPI");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/synopsys/Kconfig b/drivers/gpu/drm/bridge/synopsys/Kconfig
index f3ab2f985f8c..2c5e532410de 100644
--- a/drivers/gpu/drm/bridge/synopsys/Kconfig
+++ b/drivers/gpu/drm/bridge/synopsys/Kconfig
@@ -1,4 +1,11 @@
# SPDX-License-Identifier: GPL-2.0-only
+config DRM_DW_DP
+ tristate
+ select DRM_DISPLAY_HELPER
+ select DRM_DISPLAY_DP_HELPER
+ select DRM_KMS_HELPER
+ select REGMAP_MMIO
+
config DRM_DW_HDMI
tristate
select DRM_DISPLAY_HDMI_HELPER
diff --git a/drivers/gpu/drm/bridge/synopsys/Makefile b/drivers/gpu/drm/bridge/synopsys/Makefile
index 9dc376d220ad..4dada44029ac 100644
--- a/drivers/gpu/drm/bridge/synopsys/Makefile
+++ b/drivers/gpu/drm/bridge/synopsys/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_DRM_DW_DP) += dw-dp.o
obj-$(CONFIG_DRM_DW_HDMI) += dw-hdmi.o
obj-$(CONFIG_DRM_DW_HDMI_AHB_AUDIO) += dw-hdmi-ahb-audio.o
obj-$(CONFIG_DRM_DW_HDMI_GP_AUDIO) += dw-hdmi-gp-audio.o
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-dp.c b/drivers/gpu/drm/bridge/synopsys/dw-dp.c
new file mode 100644
index 000000000000..9bbfe8da3de0
--- /dev/null
+++ b/drivers/gpu/drm/bridge/synopsys/dw-dp.c
@@ -0,0 +1,2095 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Synopsys DesignWare Cores DisplayPort Transmitter Controller
+ *
+ * Copyright (c) 2025 Rockchip Electronics Co., Ltd.
+ *
+ * Author: Andy Yan <andy.yan@rock-chips.com>
+ */
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/iopoll.h>
+#include <linux/irq.h>
+#include <linux/media-bus-format.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/phy/phy.h>
+#include <linux/unaligned.h>
+
+#include <drm/bridge/dw_dp.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
+#include <drm/display/drm_dp_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_of.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
+
+#define DW_DP_VERSION_NUMBER 0x0000
+#define DW_DP_VERSION_TYPE 0x0004
+#define DW_DP_ID 0x0008
+
+#define DW_DP_CONFIG_REG1 0x0100
+#define DW_DP_CONFIG_REG2 0x0104
+#define DW_DP_CONFIG_REG3 0x0108
+
+#define DW_DP_CCTL 0x0200
+#define FORCE_HPD BIT(4)
+#define DEFAULT_FAST_LINK_TRAIN_EN BIT(2)
+#define ENHANCE_FRAMING_EN BIT(1)
+#define SCRAMBLE_DIS BIT(0)
+#define DW_DP_SOFT_RESET_CTRL 0x0204
+#define VIDEO_RESET BIT(5)
+#define AUX_RESET BIT(4)
+#define AUDIO_SAMPLER_RESET BIT(3)
+#define HDCP_MODULE_RESET BIT(2)
+#define PHY_SOFT_RESET BIT(1)
+#define CONTROLLER_RESET BIT(0)
+
+#define DW_DP_VSAMPLE_CTRL 0x0300
+#define PIXEL_MODE_SELECT GENMASK(22, 21)
+#define VIDEO_MAPPING GENMASK(20, 16)
+#define VIDEO_STREAM_ENABLE BIT(5)
+
+#define DW_DP_VSAMPLE_STUFF_CTRL1 0x0304
+
+#define DW_DP_VSAMPLE_STUFF_CTRL2 0x0308
+
+#define DW_DP_VINPUT_POLARITY_CTRL 0x030c
+#define DE_IN_POLARITY BIT(2)
+#define HSYNC_IN_POLARITY BIT(1)
+#define VSYNC_IN_POLARITY BIT(0)
+
+#define DW_DP_VIDEO_CONFIG1 0x0310
+#define HACTIVE GENMASK(31, 16)
+#define HBLANK GENMASK(15, 2)
+#define I_P BIT(1)
+#define R_V_BLANK_IN_OSC BIT(0)
+
+#define DW_DP_VIDEO_CONFIG2 0x0314
+#define VBLANK GENMASK(31, 16)
+#define VACTIVE GENMASK(15, 0)
+
+#define DW_DP_VIDEO_CONFIG3 0x0318
+#define H_SYNC_WIDTH GENMASK(31, 16)
+#define H_FRONT_PORCH GENMASK(15, 0)
+
+#define DW_DP_VIDEO_CONFIG4 0x031c
+#define V_SYNC_WIDTH GENMASK(31, 16)
+#define V_FRONT_PORCH GENMASK(15, 0)
+
+#define DW_DP_VIDEO_CONFIG5 0x0320
+#define INIT_THRESHOLD_HI GENMASK(22, 21)
+#define AVERAGE_BYTES_PER_TU_FRAC GENMASK(19, 16)
+#define INIT_THRESHOLD GENMASK(13, 7)
+#define AVERAGE_BYTES_PER_TU GENMASK(6, 0)
+
+#define DW_DP_VIDEO_MSA1 0x0324
+#define VSTART GENMASK(31, 16)
+#define HSTART GENMASK(15, 0)
+
+#define DW_DP_VIDEO_MSA2 0x0328
+#define MISC0 GENMASK(31, 24)
+
+#define DW_DP_VIDEO_MSA3 0x032c
+#define MISC1 GENMASK(31, 24)
+
+#define DW_DP_VIDEO_HBLANK_INTERVAL 0x0330
+#define HBLANK_INTERVAL_EN BIT(16)
+#define HBLANK_INTERVAL GENMASK(15, 0)
+
+#define DW_DP_AUD_CONFIG1 0x0400
+#define AUDIO_TIMESTAMP_VERSION_NUM GENMASK(29, 24)
+#define AUDIO_PACKET_ID GENMASK(23, 16)
+#define AUDIO_MUTE BIT(15)
+#define NUM_CHANNELS GENMASK(14, 12)
+#define HBR_MODE_ENABLE BIT(10)
+#define AUDIO_DATA_WIDTH GENMASK(9, 5)
+#define AUDIO_DATA_IN_EN GENMASK(4, 1)
+#define AUDIO_INF_SELECT BIT(0)
+
+#define DW_DP_SDP_VERTICAL_CTRL 0x0500
+#define EN_VERTICAL_SDP BIT(2)
+#define EN_AUDIO_STREAM_SDP BIT(1)
+#define EN_AUDIO_TIMESTAMP_SDP BIT(0)
+#define DW_DP_SDP_HORIZONTAL_CTRL 0x0504
+#define EN_HORIZONTAL_SDP BIT(2)
+#define DW_DP_SDP_STATUS_REGISTER 0x0508
+#define DW_DP_SDP_MANUAL_CTRL 0x050c
+#define DW_DP_SDP_STATUS_EN 0x0510
+
+#define DW_DP_SDP_REGISTER_BANK 0x0600
+#define SDP_REGS GENMASK(31, 0)
+
+#define DW_DP_PHYIF_CTRL 0x0a00
+#define PHY_WIDTH BIT(25)
+#define PHY_POWERDOWN GENMASK(20, 17)
+#define PHY_BUSY GENMASK(15, 12)
+#define SSC_DIS BIT(16)
+#define XMIT_ENABLE GENMASK(11, 8)
+#define PHY_LANES GENMASK(7, 6)
+#define PHY_RATE GENMASK(5, 4)
+#define TPS_SEL GENMASK(3, 0)
+
+#define DW_DP_PHY_TX_EQ 0x0a04
+#define DW_DP_CUSTOMPAT0 0x0a08
+#define DW_DP_CUSTOMPAT1 0x0a0c
+#define DW_DP_CUSTOMPAT2 0x0a10
+#define DW_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET 0x0a14
+#define DW_DP_PHYIF_PWRDOWN_CTRL 0x0a18
+
+#define DW_DP_AUX_CMD 0x0b00
+#define AUX_CMD_TYPE GENMASK(31, 28)
+#define AUX_ADDR GENMASK(27, 8)
+#define I2C_ADDR_ONLY BIT(4)
+#define AUX_LEN_REQ GENMASK(3, 0)
+
+#define DW_DP_AUX_STATUS 0x0b04
+#define AUX_TIMEOUT BIT(17)
+#define AUX_BYTES_READ GENMASK(23, 19)
+#define AUX_STATUS GENMASK(7, 4)
+
+#define DW_DP_AUX_DATA0 0x0b08
+#define DW_DP_AUX_DATA1 0x0b0c
+#define DW_DP_AUX_DATA2 0x0b10
+#define DW_DP_AUX_DATA3 0x0b14
+
+#define DW_DP_GENERAL_INTERRUPT 0x0d00
+#define VIDEO_FIFO_OVERFLOW_STREAM0 BIT(6)
+#define AUDIO_FIFO_OVERFLOW_STREAM0 BIT(5)
+#define SDP_EVENT_STREAM0 BIT(4)
+#define AUX_CMD_INVALID BIT(3)
+#define HDCP_EVENT BIT(2)
+#define AUX_REPLY_EVENT BIT(1)
+#define HPD_EVENT BIT(0)
+
+#define DW_DP_GENERAL_INTERRUPT_ENABLE 0x0d04
+#define HDCP_EVENT_EN BIT(2)
+#define AUX_REPLY_EVENT_EN BIT(1)
+#define HPD_EVENT_EN BIT(0)
+
+#define DW_DP_HPD_STATUS 0x0d08
+#define HPD_STATE GENMASK(11, 9)
+#define HPD_STATUS BIT(8)
+#define HPD_HOT_UNPLUG BIT(2)
+#define HPD_HOT_PLUG BIT(1)
+#define HPD_IRQ BIT(0)
+
+#define DW_DP_HPD_INTERRUPT_ENABLE 0x0d0c
+#define HPD_UNPLUG_ERR_EN BIT(3)
+#define HPD_UNPLUG_EN BIT(2)
+#define HPD_PLUG_EN BIT(1)
+#define HPD_IRQ_EN BIT(0)
+
+#define DW_DP_HDCP_CFG 0x0e00
+#define DPCD12PLUS BIT(7)
+#define CP_IRQ BIT(6)
+#define BYPENCRYPTION BIT(5)
+#define HDCP_LOCK BIT(4)
+#define ENCRYPTIONDISABLE BIT(3)
+#define ENABLE_HDCP_13 BIT(2)
+#define ENABLE_HDCP BIT(1)
+
+#define DW_DP_HDCP_OBS 0x0e04
+#define HDCP22_RE_AUTHENTICATION_REQ BIT(31)
+#define HDCP22_AUTHENTICATION_FAILED BIT(30)
+#define HDCP22_AUTHENTICATION_SUCCESS BIT(29)
+#define HDCP22_CAPABLE_SINK BIT(28)
+#define HDCP22_SINK_CAP_CHECK_COMPLETE BIT(27)
+#define HDCP22_STATE GENMASK(26, 24)
+#define HDCP22_BOOTED BIT(23)
+#define HDCP13_BSTATUS GENMASK(22, 19)
+#define REPEATER BIT(18)
+#define HDCP_CAPABLE BIT(17)
+#define STATEE GENMASK(16, 14)
+#define STATEOEG GENMASK(13, 11)
+#define STATER GENMASK(10, 8)
+#define STATEA GENMASK(7, 4)
+#define SUBSTATEA GENMASK(3, 1)
+#define HDCPENGAGED BIT(0)
+
+#define DW_DP_HDCP_APIINTCLR 0x0e08
+#define DW_DP_HDCP_APIINTSTAT 0x0e0c
+#define DW_DP_HDCP_APIINTMSK 0x0e10
+#define HDCP22_GPIOINT BIT(8)
+#define HDCP_ENGAGED BIT(7)
+#define HDCP_FAILED BIT(6)
+#define KSVSHA1CALCDONEINT BIT(5)
+#define AUXRESPNACK7TIMES BIT(4)
+#define AUXRESPTIMEOUT BIT(3)
+#define AUXRESPDEFER7TIMES BIT(2)
+#define KSVACCESSINT BIT(0)
+
+#define DW_DP_HDCP_KSVMEMCTRL 0x0e18
+#define KSVSHA1STATUS BIT(4)
+#define KSVMEMACCESS BIT(1)
+#define KSVMEMREQUEST BIT(0)
+
+#define DW_DP_HDCP_REG_BKSV0 0x3600
+#define DW_DP_HDCP_REG_BKSV1 0x3604
+#define DW_DP_HDCP_REG_ANCONF 0x3608
+#define AN_BYPASS BIT(0)
+
+#define DW_DP_HDCP_REG_AN0 0x360c
+#define DW_DP_HDCP_REG_AN1 0x3610
+#define DW_DP_HDCP_REG_RMLCTL 0x3614
+#define ODPK_DECRYPT_ENABLE BIT(0)
+
+#define DW_DP_HDCP_REG_RMLSTS 0x3618
+#define IDPK_WR_OK_STS BIT(6)
+#define IDPK_DATA_INDEX GENMASK(5, 0)
+#define DW_DP_HDCP_REG_SEED 0x361c
+#define DW_DP_HDCP_REG_DPK0 0x3620
+#define DW_DP_HDCP_REG_DPK1 0x3624
+#define DW_DP_HDCP22_GPIOSTS 0x3628
+#define DW_DP_HDCP22_GPIOCHNGSTS 0x362c
+#define DW_DP_HDCP_REG_DPK_CRC 0x3630
+
+#define DW_DP_MAX_REGISTER DW_DP_HDCP_REG_DPK_CRC
+
+#define SDP_REG_BANK_SIZE 16
+
+struct dw_dp_link_caps {
+ bool enhanced_framing;
+ bool tps3_supported;
+ bool tps4_supported;
+ bool fast_training;
+ bool channel_coding;
+ bool ssc;
+};
+
+struct dw_dp_link_train_set {
+ unsigned int voltage_swing[4];
+ unsigned int pre_emphasis[4];
+ bool voltage_max_reached[4];
+ bool pre_max_reached[4];
+};
+
+struct dw_dp_link_train {
+ struct dw_dp_link_train_set adjust;
+ bool clock_recovered;
+ bool channel_equalized;
+};
+
+struct dw_dp_link {
+ u8 dpcd[DP_RECEIVER_CAP_SIZE];
+ unsigned char revision;
+ unsigned int rate;
+ unsigned int lanes;
+ u8 sink_count;
+ u8 vsc_sdp_supported;
+ struct dw_dp_link_caps caps;
+ struct dw_dp_link_train train;
+ struct drm_dp_desc desc;
+};
+
+struct dw_dp_bridge_state {
+ struct drm_bridge_state base;
+ struct drm_display_mode mode;
+ u8 video_mapping;
+ u8 color_format;
+ u8 bpc;
+ u8 bpp;
+};
+
+struct dw_dp_sdp {
+ struct dp_sdp base;
+ unsigned long flags;
+};
+
+struct dw_dp_hotplug {
+ bool long_hpd;
+};
+
+struct dw_dp {
+ struct drm_bridge bridge;
+ struct device *dev;
+ struct regmap *regmap;
+ struct phy *phy;
+ struct clk *apb_clk;
+ struct clk *aux_clk;
+ struct clk *i2s_clk;
+ struct clk *spdif_clk;
+ struct clk *hdcp_clk;
+ struct reset_control *rstc;
+ struct completion complete;
+ int irq;
+ struct work_struct hpd_work;
+ struct dw_dp_hotplug hotplug;
+ /* Serialize hpd status access */
+ struct mutex irq_lock;
+
+ struct drm_dp_aux aux;
+
+ struct dw_dp_link link;
+ struct dw_dp_plat_data plat_data;
+ u8 pixel_mode;
+
+ DECLARE_BITMAP(sdp_reg_bank, SDP_REG_BANK_SIZE);
+};
+
+enum {
+ DW_DP_RGB_6BIT,
+ DW_DP_RGB_8BIT,
+ DW_DP_RGB_10BIT,
+ DW_DP_RGB_12BIT,
+ DW_DP_RGB_16BIT,
+ DW_DP_YCBCR444_8BIT,
+ DW_DP_YCBCR444_10BIT,
+ DW_DP_YCBCR444_12BIT,
+ DW_DP_YCBCR444_16BIT,
+ DW_DP_YCBCR422_8BIT,
+ DW_DP_YCBCR422_10BIT,
+ DW_DP_YCBCR422_12BIT,
+ DW_DP_YCBCR422_16BIT,
+ DW_DP_YCBCR420_8BIT,
+ DW_DP_YCBCR420_10BIT,
+ DW_DP_YCBCR420_12BIT,
+ DW_DP_YCBCR420_16BIT,
+};
+
+enum {
+ DW_DP_MP_SINGLE_PIXEL,
+ DW_DP_MP_DUAL_PIXEL,
+ DW_DP_MP_QUAD_PIXEL,
+};
+
+enum {
+ DW_DP_SDP_VERTICAL_INTERVAL = BIT(0),
+ DW_DP_SDP_HORIZONTAL_INTERVAL = BIT(1),
+};
+
+enum {
+ DW_DP_HPD_STATE_IDLE,
+ DW_DP_HPD_STATE_UNPLUG,
+ DP_DP_HPD_STATE_TIMEOUT = 4,
+ DW_DP_HPD_STATE_PLUG = 7
+};
+
+enum {
+ DW_DP_PHY_PATTERN_NONE,
+ DW_DP_PHY_PATTERN_TPS_1,
+ DW_DP_PHY_PATTERN_TPS_2,
+ DW_DP_PHY_PATTERN_TPS_3,
+ DW_DP_PHY_PATTERN_TPS_4,
+ DW_DP_PHY_PATTERN_SERM,
+ DW_DP_PHY_PATTERN_PBRS7,
+ DW_DP_PHY_PATTERN_CUSTOM_80BIT,
+ DW_DP_PHY_PATTERN_CP2520_1,
+ DW_DP_PHY_PATTERN_CP2520_2,
+};
+
+struct dw_dp_output_format {
+ u32 bus_format;
+ u32 color_format;
+ u8 video_mapping;
+ u8 bpc;
+ u8 bpp;
+};
+
+#define to_dw_dp_bridge_state(s) container_of(s, struct dw_dp_bridge_state, base)
+
+static const struct dw_dp_output_format dw_dp_output_formats[] = {
+ { MEDIA_BUS_FMT_RGB101010_1X30, DRM_COLOR_FORMAT_RGB444, DW_DP_RGB_10BIT, 10, 30 },
+ { MEDIA_BUS_FMT_RGB888_1X24, DRM_COLOR_FORMAT_RGB444, DW_DP_RGB_8BIT, 8, 24 },
+ { MEDIA_BUS_FMT_YUV10_1X30, DRM_COLOR_FORMAT_YCBCR444, DW_DP_YCBCR444_10BIT, 10, 30 },
+ { MEDIA_BUS_FMT_YUV8_1X24, DRM_COLOR_FORMAT_YCBCR444, DW_DP_YCBCR444_8BIT, 8, 24},
+ { MEDIA_BUS_FMT_YUYV10_1X20, DRM_COLOR_FORMAT_YCBCR422, DW_DP_YCBCR422_10BIT, 10, 20 },
+ { MEDIA_BUS_FMT_YUYV8_1X16, DRM_COLOR_FORMAT_YCBCR422, DW_DP_YCBCR422_8BIT, 8, 16 },
+ { MEDIA_BUS_FMT_UYYVYY10_0_5X30, DRM_COLOR_FORMAT_YCBCR420, DW_DP_YCBCR420_10BIT, 10, 15 },
+ { MEDIA_BUS_FMT_UYYVYY8_0_5X24, DRM_COLOR_FORMAT_YCBCR420, DW_DP_YCBCR420_8BIT, 8, 12 },
+ { MEDIA_BUS_FMT_RGB666_1X24_CPADHI, DRM_COLOR_FORMAT_RGB444, DW_DP_RGB_6BIT, 6, 18 },
+};
+
+static const struct dw_dp_output_format *dw_dp_get_output_format(u32 bus_format)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(dw_dp_output_formats); i++)
+ if (dw_dp_output_formats[i].bus_format == bus_format)
+ return &dw_dp_output_formats[i];
+
+ return NULL;
+}
+
+static inline struct dw_dp *bridge_to_dp(struct drm_bridge *b)
+{
+ return container_of(b, struct dw_dp, bridge);
+}
+
+static struct dw_dp_bridge_state *dw_dp_get_bridge_state(struct dw_dp *dp)
+{
+ struct dw_dp_bridge_state *dw_bridge_state;
+ struct drm_bridge_state *state;
+
+ state = drm_priv_to_bridge_state(dp->bridge.base.state);
+ if (!state)
+ return NULL;
+
+ dw_bridge_state = to_dw_dp_bridge_state(state);
+ if (!dw_bridge_state)
+ return NULL;
+
+ return dw_bridge_state;
+}
+
+static inline void dw_dp_phy_set_pattern(struct dw_dp *dp, u32 pattern)
+{
+ regmap_update_bits(dp->regmap, DW_DP_PHYIF_CTRL, TPS_SEL,
+ FIELD_PREP(TPS_SEL, pattern));
+}
+
+static void dw_dp_phy_xmit_enable(struct dw_dp *dp, u32 lanes)
+{
+ u32 xmit_enable;
+
+ switch (lanes) {
+ case 4:
+ case 2:
+ case 1:
+ xmit_enable = GENMASK(lanes - 1, 0);
+ break;
+ case 0:
+ default:
+ xmit_enable = 0;
+ break;
+ }
+
+ regmap_update_bits(dp->regmap, DW_DP_PHYIF_CTRL, XMIT_ENABLE,
+ FIELD_PREP(XMIT_ENABLE, xmit_enable));
+}
+
+static bool dw_dp_bandwidth_ok(struct dw_dp *dp,
+ const struct drm_display_mode *mode, u32 bpp,
+ unsigned int lanes, unsigned int rate)
+{
+ u32 max_bw, req_bw;
+
+ req_bw = mode->clock * bpp / 8;
+ max_bw = lanes * rate;
+ if (req_bw > max_bw)
+ return false;
+
+ return true;
+}
+
+static bool dw_dp_hpd_detect(struct dw_dp *dp)
+{
+ u32 value;
+
+ regmap_read(dp->regmap, DW_DP_HPD_STATUS, &value);
+
+ return FIELD_GET(HPD_STATE, value) == DW_DP_HPD_STATE_PLUG;
+}
+
+static void dw_dp_link_caps_reset(struct dw_dp_link_caps *caps)
+{
+ caps->enhanced_framing = false;
+ caps->tps3_supported = false;
+ caps->tps4_supported = false;
+ caps->fast_training = false;
+ caps->channel_coding = false;
+}
+
+static void dw_dp_link_reset(struct dw_dp_link *link)
+{
+ link->vsc_sdp_supported = 0;
+ link->sink_count = 0;
+ link->revision = 0;
+ link->rate = 0;
+ link->lanes = 0;
+
+ dw_dp_link_caps_reset(&link->caps);
+ memset(link->dpcd, 0, sizeof(link->dpcd));
+}
+
+static int dw_dp_link_parse(struct dw_dp *dp, struct drm_connector *connector)
+{
+ struct dw_dp_link *link = &dp->link;
+ int ret;
+
+ dw_dp_link_reset(link);
+
+ ret = drm_dp_read_dpcd_caps(&dp->aux, link->dpcd);
+ if (ret < 0)
+ return ret;
+
+ drm_dp_read_desc(&dp->aux, &link->desc, drm_dp_is_branch(link->dpcd));
+
+ if (drm_dp_read_sink_count_cap(connector, link->dpcd, &link->desc)) {
+ ret = drm_dp_read_sink_count(&dp->aux);
+ if (ret < 0)
+ return ret;
+
+ link->sink_count = ret;
+
+ /* Dongle connected, but no display */
+ if (!link->sink_count)
+ return -ENODEV;
+ }
+
+ link->vsc_sdp_supported = drm_dp_vsc_sdp_supported(&dp->aux, link->dpcd);
+
+ link->revision = link->dpcd[DP_DPCD_REV];
+ link->rate = min_t(u32, min(dp->plat_data.max_link_rate,
+ dp->phy->attrs.max_link_rate * 100),
+ drm_dp_max_link_rate(link->dpcd));
+ link->lanes = min_t(u8, phy_get_bus_width(dp->phy),
+ drm_dp_max_lane_count(link->dpcd));
+
+ link->caps.enhanced_framing = drm_dp_enhanced_frame_cap(link->dpcd);
+ link->caps.tps3_supported = drm_dp_tps3_supported(link->dpcd);
+ link->caps.tps4_supported = drm_dp_tps4_supported(link->dpcd);
+ link->caps.fast_training = drm_dp_fast_training_cap(link->dpcd);
+ link->caps.channel_coding = drm_dp_channel_coding_supported(link->dpcd);
+ link->caps.ssc = !!(link->dpcd[DP_MAX_DOWNSPREAD] & DP_MAX_DOWNSPREAD_0_5);
+
+ return 0;
+}
+
+static int dw_dp_link_train_update_vs_emph(struct dw_dp *dp)
+{
+ struct dw_dp_link *link = &dp->link;
+ struct dw_dp_link_train_set *train_set = &link->train.adjust;
+ unsigned int lanes = dp->link.lanes;
+ union phy_configure_opts phy_cfg;
+ unsigned int *vs, *pe;
+ int i, ret;
+ u8 buf[4];
+
+ vs = train_set->voltage_swing;
+ pe = train_set->pre_emphasis;
+
+ for (i = 0; i < lanes; i++) {
+ phy_cfg.dp.voltage[i] = vs[i];
+ phy_cfg.dp.pre[i] = pe[i];
+ }
+
+ phy_cfg.dp.set_lanes = false;
+ phy_cfg.dp.set_rate = false;
+ phy_cfg.dp.set_voltages = true;
+
+ ret = phy_configure(dp->phy, &phy_cfg);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < lanes; i++) {
+ buf[i] = (vs[i] << DP_TRAIN_VOLTAGE_SWING_SHIFT) |
+ (pe[i] << DP_TRAIN_PRE_EMPHASIS_SHIFT);
+ if (train_set->voltage_max_reached[i])
+ buf[i] |= DP_TRAIN_MAX_SWING_REACHED;
+ if (train_set->pre_max_reached[i])
+ buf[i] |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+ }
+
+ ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET, buf, lanes);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int dw_dp_phy_configure(struct dw_dp *dp, unsigned int rate,
+ unsigned int lanes, bool ssc)
+{
+ union phy_configure_opts phy_cfg;
+ int ret;
+
+ /* Move PHY to P3 */
+ regmap_update_bits(dp->regmap, DW_DP_PHYIF_CTRL, PHY_POWERDOWN,
+ FIELD_PREP(PHY_POWERDOWN, 0x3));
+
+ phy_cfg.dp.lanes = lanes;
+ phy_cfg.dp.link_rate = rate / 100;
+ phy_cfg.dp.ssc = ssc;
+ phy_cfg.dp.set_lanes = true;
+ phy_cfg.dp.set_rate = true;
+ phy_cfg.dp.set_voltages = false;
+ ret = phy_configure(dp->phy, &phy_cfg);
+ if (ret)
+ return ret;
+
+ regmap_update_bits(dp->regmap, DW_DP_PHYIF_CTRL, PHY_LANES,
+ FIELD_PREP(PHY_LANES, lanes / 2));
+
+ /* Move PHY to P0 */
+ regmap_update_bits(dp->regmap, DW_DP_PHYIF_CTRL, PHY_POWERDOWN,
+ FIELD_PREP(PHY_POWERDOWN, 0x0));
+
+ dw_dp_phy_xmit_enable(dp, lanes);
+
+ return 0;
+}
+
+static int dw_dp_link_configure(struct dw_dp *dp)
+{
+ struct dw_dp_link *link = &dp->link;
+ u8 buf[2];
+ int ret;
+
+ ret = dw_dp_phy_configure(dp, link->rate, link->lanes, link->caps.ssc);
+ if (ret)
+ return ret;
+
+ buf[0] = drm_dp_link_rate_to_bw_code(link->rate);
+ buf[1] = link->lanes;
+
+ if (link->caps.enhanced_framing) {
+ buf[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+ regmap_update_bits(dp->regmap, DW_DP_CCTL, ENHANCE_FRAMING_EN,
+ FIELD_PREP(ENHANCE_FRAMING_EN, 1));
+ } else {
+ regmap_update_bits(dp->regmap, DW_DP_CCTL, ENHANCE_FRAMING_EN,
+ FIELD_PREP(ENHANCE_FRAMING_EN, 0));
+ }
+
+ ret = drm_dp_dpcd_write(&dp->aux, DP_LINK_BW_SET, buf, sizeof(buf));
+ if (ret < 0)
+ return ret;
+
+ buf[0] = link->caps.ssc ? DP_SPREAD_AMP_0_5 : 0;
+ buf[1] = link->caps.channel_coding ? DP_SET_ANSI_8B10B : 0;
+
+ ret = drm_dp_dpcd_write(&dp->aux, DP_DOWNSPREAD_CTRL, buf, sizeof(buf));
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void dw_dp_link_train_init(struct dw_dp_link_train *train)
+{
+ struct dw_dp_link_train_set *adj = &train->adjust;
+ unsigned int i;
+
+ for (i = 0; i < 4; i++) {
+ adj->voltage_swing[i] = 0;
+ adj->pre_emphasis[i] = 0;
+ adj->voltage_max_reached[i] = false;
+ adj->pre_max_reached[i] = false;
+ }
+
+ train->clock_recovered = false;
+ train->channel_equalized = false;
+}
+
+static bool dw_dp_link_train_valid(const struct dw_dp_link_train *train)
+{
+ return train->clock_recovered && train->channel_equalized;
+}
+
+static int dw_dp_link_train_set_pattern(struct dw_dp *dp, u32 pattern)
+{
+ u8 buf = 0;
+ int ret;
+
+ if (pattern && pattern != DP_TRAINING_PATTERN_4) {
+ buf |= DP_LINK_SCRAMBLING_DISABLE;
+
+ regmap_update_bits(dp->regmap, DW_DP_CCTL, SCRAMBLE_DIS,
+ FIELD_PREP(SCRAMBLE_DIS, 1));
+ } else {
+ regmap_update_bits(dp->regmap, DW_DP_CCTL, SCRAMBLE_DIS,
+ FIELD_PREP(SCRAMBLE_DIS, 0));
+ }
+
+ switch (pattern) {
+ case DP_TRAINING_PATTERN_DISABLE:
+ dw_dp_phy_set_pattern(dp, DW_DP_PHY_PATTERN_NONE);
+ break;
+ case DP_TRAINING_PATTERN_1:
+ dw_dp_phy_set_pattern(dp, DW_DP_PHY_PATTERN_TPS_1);
+ break;
+ case DP_TRAINING_PATTERN_2:
+ dw_dp_phy_set_pattern(dp, DW_DP_PHY_PATTERN_TPS_2);
+ break;
+ case DP_TRAINING_PATTERN_3:
+ dw_dp_phy_set_pattern(dp, DW_DP_PHY_PATTERN_TPS_3);
+ break;
+ case DP_TRAINING_PATTERN_4:
+ dw_dp_phy_set_pattern(dp, DW_DP_PHY_PATTERN_TPS_4);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
+ buf | pattern);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static u8 dw_dp_voltage_max(u8 preemph)
+{
+ switch (preemph & DP_TRAIN_PRE_EMPHASIS_MASK) {
+ case DP_TRAIN_PRE_EMPH_LEVEL_0:
+ return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
+ case DP_TRAIN_PRE_EMPH_LEVEL_1:
+ return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
+ case DP_TRAIN_PRE_EMPH_LEVEL_2:
+ return DP_TRAIN_VOLTAGE_SWING_LEVEL_1;
+ case DP_TRAIN_PRE_EMPH_LEVEL_3:
+ default:
+ return DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
+ }
+}
+
+static bool dw_dp_link_get_adjustments(struct dw_dp_link *link,
+ u8 status[DP_LINK_STATUS_SIZE])
+{
+ struct dw_dp_link_train_set *adj = &link->train.adjust;
+ unsigned int i;
+ bool changed = false;
+ u8 v = 0;
+ u8 p = 0;
+
+ for (i = 0; i < link->lanes; i++) {
+ v = drm_dp_get_adjust_request_voltage(status, i);
+ v >>= DP_TRAIN_VOLTAGE_SWING_SHIFT;
+ p = drm_dp_get_adjust_request_pre_emphasis(status, i);
+ p >>= DP_TRAIN_PRE_EMPHASIS_SHIFT;
+
+ if (v != adj->voltage_swing[i] || p != adj->pre_emphasis[i])
+ changed = true;
+
+ if (p >= (DP_TRAIN_PRE_EMPH_LEVEL_3 >> DP_TRAIN_PRE_EMPHASIS_SHIFT)) {
+ adj->pre_emphasis[i] = DP_TRAIN_PRE_EMPH_LEVEL_3 >>
+ DP_TRAIN_PRE_EMPHASIS_SHIFT;
+ adj->pre_max_reached[i] = true;
+ } else {
+ adj->pre_emphasis[i] = p;
+ adj->pre_max_reached[i] = false;
+ }
+
+ v = min(v, dw_dp_voltage_max(p));
+ if (v >= (DP_TRAIN_VOLTAGE_SWING_LEVEL_3 >> DP_TRAIN_VOLTAGE_SWING_SHIFT)) {
+ adj->voltage_swing[i] = DP_TRAIN_VOLTAGE_SWING_LEVEL_3 >>
+ DP_TRAIN_VOLTAGE_SWING_SHIFT;
+ adj->voltage_max_reached[i] = true;
+ } else {
+ adj->voltage_swing[i] = v;
+ adj->voltage_max_reached[i] = false;
+ }
+ }
+
+ return changed;
+}
+
+static int dw_dp_link_clock_recovery(struct dw_dp *dp)
+{
+ struct dw_dp_link *link = &dp->link;
+ u8 status[DP_LINK_STATUS_SIZE];
+ unsigned int tries = 0;
+ int ret;
+ bool adj_changed;
+
+ ret = dw_dp_link_train_set_pattern(dp, DP_TRAINING_PATTERN_1);
+ if (ret)
+ return ret;
+
+ for (;;) {
+ ret = dw_dp_link_train_update_vs_emph(dp);
+ if (ret)
+ return ret;
+
+ drm_dp_link_train_clock_recovery_delay(&dp->aux, link->dpcd);
+
+ ret = drm_dp_dpcd_read_link_status(&dp->aux, status);
+ if (ret < 0) {
+ dev_err(dp->dev, "failed to read link status: %d\n", ret);
+ return ret;
+ }
+
+ if (drm_dp_clock_recovery_ok(status, link->lanes)) {
+ link->train.clock_recovered = true;
+ break;
+ }
+
+ /*
+ * According to DP spec 1.4, if current ADJ is the same
+ * with previous REQ, we need to retry 5 times.
+ */
+ adj_changed = dw_dp_link_get_adjustments(link, status);
+ if (!adj_changed)
+ tries++;
+ else
+ tries = 0;
+
+ if (tries == 5)
+ break;
+ }
+
+ return 0;
+}
+
+static int dw_dp_link_channel_equalization(struct dw_dp *dp)
+{
+ struct dw_dp_link *link = &dp->link;
+ u8 status[DP_LINK_STATUS_SIZE], pattern;
+ unsigned int tries;
+ int ret;
+
+ if (link->caps.tps4_supported)
+ pattern = DP_TRAINING_PATTERN_4;
+ else if (link->caps.tps3_supported)
+ pattern = DP_TRAINING_PATTERN_3;
+ else
+ pattern = DP_TRAINING_PATTERN_2;
+ ret = dw_dp_link_train_set_pattern(dp, pattern);
+ if (ret)
+ return ret;
+
+ for (tries = 1; tries < 5; tries++) {
+ ret = dw_dp_link_train_update_vs_emph(dp);
+ if (ret)
+ return ret;
+
+ drm_dp_link_train_channel_eq_delay(&dp->aux, link->dpcd);
+
+ ret = drm_dp_dpcd_read_link_status(&dp->aux, status);
+ if (ret < 0)
+ return ret;
+
+ if (!drm_dp_clock_recovery_ok(status, link->lanes)) {
+ dev_err(dp->dev, "clock recovery lost while equalizing channel\n");
+ link->train.clock_recovered = false;
+ break;
+ }
+
+ if (drm_dp_channel_eq_ok(status, link->lanes)) {
+ link->train.channel_equalized = true;
+ break;
+ }
+
+ dw_dp_link_get_adjustments(link, status);
+ }
+
+ return 0;
+}
+
+static int dw_dp_link_downgrade(struct dw_dp *dp)
+{
+ struct dw_dp_link *link = &dp->link;
+ struct dw_dp_bridge_state *state;
+
+ state = dw_dp_get_bridge_state(dp);
+
+ switch (link->rate) {
+ case 162000:
+ return -EINVAL;
+ case 270000:
+ link->rate = 162000;
+ break;
+ case 540000:
+ link->rate = 270000;
+ break;
+ case 810000:
+ link->rate = 540000;
+ break;
+ }
+
+ if (!dw_dp_bandwidth_ok(dp, &state->mode, state->bpp, link->lanes,
+ link->rate))
+ return -E2BIG;
+
+ return 0;
+}
+
+static int dw_dp_link_train_full(struct dw_dp *dp)
+{
+ struct dw_dp_link *link = &dp->link;
+ int ret;
+
+retry:
+ dw_dp_link_train_init(&link->train);
+
+ dev_dbg(dp->dev, "full-training link: %u lane%s at %u MHz\n",
+ link->lanes, (link->lanes > 1) ? "s" : "", link->rate / 100);
+
+ ret = dw_dp_link_configure(dp);
+ if (ret < 0) {
+ dev_err(dp->dev, "failed to configure DP link: %d\n", ret);
+ return ret;
+ }
+
+ ret = dw_dp_link_clock_recovery(dp);
+ if (ret < 0) {
+ dev_err(dp->dev, "clock recovery failed: %d\n", ret);
+ goto out;
+ }
+
+ if (!link->train.clock_recovered) {
+ dev_err(dp->dev, "clock recovery failed, downgrading link\n");
+
+ ret = dw_dp_link_downgrade(dp);
+ if (ret < 0)
+ goto out;
+ else
+ goto retry;
+ }
+
+ dev_dbg(dp->dev, "clock recovery succeeded\n");
+
+ ret = dw_dp_link_channel_equalization(dp);
+ if (ret < 0) {
+ dev_err(dp->dev, "channel equalization failed: %d\n", ret);
+ goto out;
+ }
+
+ if (!link->train.channel_equalized) {
+ dev_err(dp->dev, "channel equalization failed, downgrading link\n");
+
+ ret = dw_dp_link_downgrade(dp);
+ if (ret < 0)
+ goto out;
+ else
+ goto retry;
+ }
+
+ dev_dbg(dp->dev, "channel equalization succeeded\n");
+
+out:
+ dw_dp_link_train_set_pattern(dp, DP_TRAINING_PATTERN_DISABLE);
+ return ret;
+}
+
+static int dw_dp_link_train_fast(struct dw_dp *dp)
+{
+ struct dw_dp_link *link = &dp->link;
+ int ret;
+ u8 status[DP_LINK_STATUS_SIZE];
+ u8 pattern;
+
+ dw_dp_link_train_init(&link->train);
+
+ dev_dbg(dp->dev, "fast-training link: %u lane%s at %u MHz\n",
+ link->lanes, (link->lanes > 1) ? "s" : "", link->rate / 100);
+
+ ret = dw_dp_link_configure(dp);
+ if (ret < 0) {
+ dev_err(dp->dev, "failed to configure DP link: %d\n", ret);
+ return ret;
+ }
+
+ ret = dw_dp_link_train_set_pattern(dp, DP_TRAINING_PATTERN_1);
+ if (ret)
+ goto out;
+
+ usleep_range(500, 1000);
+
+ if (link->caps.tps4_supported)
+ pattern = DP_TRAINING_PATTERN_4;
+ else if (link->caps.tps3_supported)
+ pattern = DP_TRAINING_PATTERN_3;
+ else
+ pattern = DP_TRAINING_PATTERN_2;
+ ret = dw_dp_link_train_set_pattern(dp, pattern);
+ if (ret)
+ goto out;
+
+ usleep_range(500, 1000);
+
+ ret = drm_dp_dpcd_read_link_status(&dp->aux, status);
+ if (ret < 0) {
+ dev_err(dp->dev, "failed to read link status: %d\n", ret);
+ goto out;
+ }
+
+ if (!drm_dp_clock_recovery_ok(status, link->lanes)) {
+ dev_err(dp->dev, "clock recovery failed\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ if (!drm_dp_channel_eq_ok(status, link->lanes)) {
+ dev_err(dp->dev, "channel equalization failed\n");
+ ret = -EIO;
+ goto out;
+ }
+
+out:
+ dw_dp_link_train_set_pattern(dp, DP_TRAINING_PATTERN_DISABLE);
+ return ret;
+}
+
+static int dw_dp_link_train(struct dw_dp *dp)
+{
+ struct dw_dp_link *link = &dp->link;
+ int ret;
+
+ if (link->caps.fast_training) {
+ if (dw_dp_link_train_valid(&link->train)) {
+ ret = dw_dp_link_train_fast(dp);
+ if (ret < 0)
+ dev_err(dp->dev, "fast link training failed: %d\n", ret);
+ else
+ return 0;
+ }
+ }
+
+ ret = dw_dp_link_train_full(dp);
+ if (ret < 0) {
+ dev_err(dp->dev, "full link training failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dw_dp_send_sdp(struct dw_dp *dp, struct dw_dp_sdp *sdp)
+{
+ const u8 *payload = sdp->base.db;
+ u32 reg;
+ int i, nr;
+
+ nr = find_first_zero_bit(dp->sdp_reg_bank, SDP_REG_BANK_SIZE);
+ if (nr < SDP_REG_BANK_SIZE)
+ set_bit(nr, dp->sdp_reg_bank);
+ else
+ return -EBUSY;
+
+ reg = DW_DP_SDP_REGISTER_BANK + nr * 9 * 4;
+
+ /* SDP header */
+ regmap_write(dp->regmap, reg, get_unaligned_le32(&sdp->base.sdp_header));
+
+ /* SDP data payload */
+ for (i = 1; i < 9; i++, payload += 4)
+ regmap_write(dp->regmap, reg + i * 4,
+ FIELD_PREP(SDP_REGS, get_unaligned_le32(payload)));
+
+ if (sdp->flags & DW_DP_SDP_VERTICAL_INTERVAL)
+ regmap_update_bits(dp->regmap, DW_DP_SDP_VERTICAL_CTRL,
+ EN_VERTICAL_SDP << nr,
+ EN_VERTICAL_SDP << nr);
+
+ if (sdp->flags & DW_DP_SDP_HORIZONTAL_INTERVAL)
+ regmap_update_bits(dp->regmap, DW_DP_SDP_HORIZONTAL_CTRL,
+ EN_HORIZONTAL_SDP << nr,
+ EN_HORIZONTAL_SDP << nr);
+
+ return 0;
+}
+
+static int dw_dp_send_vsc_sdp(struct dw_dp *dp)
+{
+ struct dw_dp_bridge_state *state;
+ struct dw_dp_sdp sdp = {};
+ struct drm_dp_vsc_sdp vsc = {};
+
+ state = dw_dp_get_bridge_state(dp);
+ if (!state)
+ return -EINVAL;
+
+ vsc.bpc = state->bpc;
+
+ vsc.sdp_type = DP_SDP_VSC;
+ vsc.revision = 0x5;
+ vsc.length = 0x13;
+ vsc.content_type = DP_CONTENT_TYPE_NOT_DEFINED;
+
+ sdp.flags = DW_DP_SDP_VERTICAL_INTERVAL;
+
+ switch (state->color_format) {
+ case DRM_COLOR_FORMAT_YCBCR444:
+ vsc.pixelformat = DP_PIXELFORMAT_YUV444;
+ break;
+ case DRM_COLOR_FORMAT_YCBCR420:
+ vsc.pixelformat = DP_PIXELFORMAT_YUV420;
+ break;
+ case DRM_COLOR_FORMAT_YCBCR422:
+ vsc.pixelformat = DP_PIXELFORMAT_YUV422;
+ break;
+ case DRM_COLOR_FORMAT_RGB444:
+ default:
+ vsc.pixelformat = DP_PIXELFORMAT_RGB;
+ break;
+ }
+
+ if (state->color_format == DRM_COLOR_FORMAT_RGB444) {
+ vsc.colorimetry = DP_COLORIMETRY_DEFAULT;
+ vsc.dynamic_range = DP_DYNAMIC_RANGE_VESA;
+ } else {
+ vsc.colorimetry = DP_COLORIMETRY_BT709_YCC;
+ vsc.dynamic_range = DP_DYNAMIC_RANGE_CTA;
+ }
+
+ drm_dp_vsc_sdp_pack(&vsc, &sdp.base);
+
+ return dw_dp_send_sdp(dp, &sdp);
+}
+
+static int dw_dp_video_set_pixel_mode(struct dw_dp *dp)
+{
+ switch (dp->pixel_mode) {
+ case DW_DP_MP_SINGLE_PIXEL:
+ case DW_DP_MP_DUAL_PIXEL:
+ case DW_DP_MP_QUAD_PIXEL:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ regmap_update_bits(dp->regmap, DW_DP_VSAMPLE_CTRL, PIXEL_MODE_SELECT,
+ FIELD_PREP(PIXEL_MODE_SELECT, dp->pixel_mode));
+
+ return 0;
+}
+
+static bool dw_dp_video_need_vsc_sdp(struct dw_dp *dp)
+{
+ struct dw_dp_link *link = &dp->link;
+ struct dw_dp_bridge_state *state;
+
+ state = dw_dp_get_bridge_state(dp);
+ if (!state)
+ return -EINVAL;
+
+ if (!link->vsc_sdp_supported)
+ return false;
+
+ if (state->color_format == DRM_COLOR_FORMAT_YCBCR420)
+ return true;
+
+ return false;
+}
+
+static int dw_dp_video_set_msa(struct dw_dp *dp, u8 color_format, u8 bpc,
+ u16 vstart, u16 hstart)
+{
+ u16 misc = 0;
+
+ if (dw_dp_video_need_vsc_sdp(dp))
+ misc |= DP_MSA_MISC_COLOR_VSC_SDP;
+
+ switch (color_format) {
+ case DRM_COLOR_FORMAT_RGB444:
+ misc |= DP_MSA_MISC_COLOR_RGB;
+ break;
+ case DRM_COLOR_FORMAT_YCBCR444:
+ misc |= DP_MSA_MISC_COLOR_YCBCR_444_BT709;
+ break;
+ case DRM_COLOR_FORMAT_YCBCR422:
+ misc |= DP_MSA_MISC_COLOR_YCBCR_422_BT709;
+ break;
+ case DRM_COLOR_FORMAT_YCBCR420:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (bpc) {
+ case 6:
+ misc |= DP_MSA_MISC_6_BPC;
+ break;
+ case 8:
+ misc |= DP_MSA_MISC_8_BPC;
+ break;
+ case 10:
+ misc |= DP_MSA_MISC_10_BPC;
+ break;
+ case 12:
+ misc |= DP_MSA_MISC_12_BPC;
+ break;
+ case 16:
+ misc |= DP_MSA_MISC_16_BPC;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ regmap_write(dp->regmap, DW_DP_VIDEO_MSA1,
+ FIELD_PREP(VSTART, vstart) | FIELD_PREP(HSTART, hstart));
+ regmap_write(dp->regmap, DW_DP_VIDEO_MSA2, FIELD_PREP(MISC0, misc));
+ regmap_write(dp->regmap, DW_DP_VIDEO_MSA3, FIELD_PREP(MISC1, misc >> 8));
+
+ return 0;
+}
+
+static void dw_dp_video_disable(struct dw_dp *dp)
+{
+ regmap_update_bits(dp->regmap, DW_DP_VSAMPLE_CTRL, VIDEO_STREAM_ENABLE,
+ FIELD_PREP(VIDEO_STREAM_ENABLE, 0));
+}
+
+static int dw_dp_video_enable(struct dw_dp *dp)
+{
+ struct dw_dp_link *link = &dp->link;
+ struct dw_dp_bridge_state *state;
+ struct drm_display_mode *mode;
+ u8 color_format, bpc, bpp;
+ u8 init_threshold, vic;
+ u32 hstart, hactive, hblank, h_sync_width, h_front_porch;
+ u32 vstart, vactive, vblank, v_sync_width, v_front_porch;
+ u32 peak_stream_bandwidth, link_bandwidth;
+ u32 average_bytes_per_tu, average_bytes_per_tu_frac;
+ u32 ts, hblank_interval;
+ u32 value;
+ int ret;
+
+ state = dw_dp_get_bridge_state(dp);
+ if (!state)
+ return -EINVAL;
+
+ bpc = state->bpc;
+ bpp = state->bpp;
+ color_format = state->color_format;
+ mode = &state->mode;
+
+ vstart = mode->vtotal - mode->vsync_start;
+ hstart = mode->htotal - mode->hsync_start;
+
+ ret = dw_dp_video_set_pixel_mode(dp);
+ if (ret)
+ return ret;
+
+ ret = dw_dp_video_set_msa(dp, color_format, bpc, vstart, hstart);
+ if (ret)
+ return ret;
+
+ regmap_update_bits(dp->regmap, DW_DP_VSAMPLE_CTRL, VIDEO_MAPPING,
+ FIELD_PREP(VIDEO_MAPPING, state->video_mapping));
+
+ /* Configure DW_DP_VINPUT_POLARITY_CTRL register */
+ value = 0;
+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+ value |= FIELD_PREP(HSYNC_IN_POLARITY, 1);
+ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+ value |= FIELD_PREP(VSYNC_IN_POLARITY, 1);
+ regmap_write(dp->regmap, DW_DP_VINPUT_POLARITY_CTRL, value);
+
+ /* Configure DW_DP_VIDEO_CONFIG1 register */
+ hactive = mode->hdisplay;
+ hblank = mode->htotal - mode->hdisplay;
+ value = FIELD_PREP(HACTIVE, hactive) | FIELD_PREP(HBLANK, hblank);
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ value |= FIELD_PREP(I_P, 1);
+ vic = drm_match_cea_mode(mode);
+ if (vic == 5 || vic == 6 || vic == 7 ||
+ vic == 10 || vic == 11 || vic == 20 ||
+ vic == 21 || vic == 22 || vic == 39 ||
+ vic == 25 || vic == 26 || vic == 40 ||
+ vic == 44 || vic == 45 || vic == 46 ||
+ vic == 50 || vic == 51 || vic == 54 ||
+ vic == 55 || vic == 58 || vic == 59)
+ value |= R_V_BLANK_IN_OSC;
+ regmap_write(dp->regmap, DW_DP_VIDEO_CONFIG1, value);
+
+ /* Configure DW_DP_VIDEO_CONFIG2 register */
+ vblank = mode->vtotal - mode->vdisplay;
+ vactive = mode->vdisplay;
+ regmap_write(dp->regmap, DW_DP_VIDEO_CONFIG2,
+ FIELD_PREP(VBLANK, vblank) | FIELD_PREP(VACTIVE, vactive));
+
+ /* Configure DW_DP_VIDEO_CONFIG3 register */
+ h_sync_width = mode->hsync_end - mode->hsync_start;
+ h_front_porch = mode->hsync_start - mode->hdisplay;
+ regmap_write(dp->regmap, DW_DP_VIDEO_CONFIG3,
+ FIELD_PREP(H_SYNC_WIDTH, h_sync_width) |
+ FIELD_PREP(H_FRONT_PORCH, h_front_porch));
+
+ /* Configure DW_DP_VIDEO_CONFIG4 register */
+ v_sync_width = mode->vsync_end - mode->vsync_start;
+ v_front_porch = mode->vsync_start - mode->vdisplay;
+ regmap_write(dp->regmap, DW_DP_VIDEO_CONFIG4,
+ FIELD_PREP(V_SYNC_WIDTH, v_sync_width) |
+ FIELD_PREP(V_FRONT_PORCH, v_front_porch));
+
+ /* Configure DW_DP_VIDEO_CONFIG5 register */
+ peak_stream_bandwidth = mode->clock * bpp / 8;
+ link_bandwidth = (link->rate / 1000) * link->lanes;
+ ts = peak_stream_bandwidth * 64 / link_bandwidth;
+ average_bytes_per_tu = ts / 1000;
+ average_bytes_per_tu_frac = ts / 100 - average_bytes_per_tu * 10;
+ if (dp->pixel_mode == DW_DP_MP_SINGLE_PIXEL) {
+ if (average_bytes_per_tu < 6)
+ init_threshold = 32;
+ else if (hblank <= 80 && color_format != DRM_COLOR_FORMAT_YCBCR420)
+ init_threshold = 12;
+ else if (hblank <= 40 && color_format == DRM_COLOR_FORMAT_YCBCR420)
+ init_threshold = 3;
+ else
+ init_threshold = 16;
+ } else {
+ u32 t1 = 0, t2 = 0, t3 = 0;
+
+ switch (bpc) {
+ case 6:
+ t1 = (4 * 1000 / 9) * link->lanes;
+ break;
+ case 8:
+ if (color_format == DRM_COLOR_FORMAT_YCBCR422) {
+ t1 = (1000 / 2) * link->lanes;
+ } else {
+ if (dp->pixel_mode == DW_DP_MP_DUAL_PIXEL)
+ t1 = (1000 / 3) * link->lanes;
+ else
+ t1 = (3000 / 16) * link->lanes;
+ }
+ break;
+ case 10:
+ if (color_format == DRM_COLOR_FORMAT_YCBCR422)
+ t1 = (2000 / 5) * link->lanes;
+ else
+ t1 = (4000 / 15) * link->lanes;
+ break;
+ case 12:
+ if (color_format == DRM_COLOR_FORMAT_YCBCR422) {
+ if (dp->pixel_mode == DW_DP_MP_DUAL_PIXEL)
+ t1 = (1000 / 6) * link->lanes;
+ else
+ t1 = (1000 / 3) * link->lanes;
+ } else {
+ t1 = (2000 / 9) * link->lanes;
+ }
+ break;
+ case 16:
+ if (color_format != DRM_COLOR_FORMAT_YCBCR422 &&
+ dp->pixel_mode == DW_DP_MP_DUAL_PIXEL)
+ t1 = (1000 / 6) * link->lanes;
+ else
+ t1 = (1000 / 4) * link->lanes;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (color_format == DRM_COLOR_FORMAT_YCBCR420)
+ t2 = (link->rate / 4) * 1000 / (mode->clock / 2);
+ else
+ t2 = (link->rate / 4) * 1000 / mode->clock;
+
+ if (average_bytes_per_tu_frac)
+ t3 = average_bytes_per_tu + 1;
+ else
+ t3 = average_bytes_per_tu;
+ init_threshold = t1 * t2 * t3 / (1000 * 1000);
+ if (init_threshold <= 16 || average_bytes_per_tu < 10)
+ init_threshold = 40;
+ }
+
+ regmap_write(dp->regmap, DW_DP_VIDEO_CONFIG5,
+ FIELD_PREP(INIT_THRESHOLD_HI, init_threshold >> 6) |
+ FIELD_PREP(AVERAGE_BYTES_PER_TU_FRAC, average_bytes_per_tu_frac) |
+ FIELD_PREP(INIT_THRESHOLD, init_threshold) |
+ FIELD_PREP(AVERAGE_BYTES_PER_TU, average_bytes_per_tu));
+
+ /* Configure DW_DP_VIDEO_HBLANK_INTERVAL register */
+ hblank_interval = hblank * (link->rate / 4) / mode->clock;
+ regmap_write(dp->regmap, DW_DP_VIDEO_HBLANK_INTERVAL,
+ FIELD_PREP(HBLANK_INTERVAL_EN, 1) |
+ FIELD_PREP(HBLANK_INTERVAL, hblank_interval));
+
+ /* Video stream enable */
+ regmap_update_bits(dp->regmap, DW_DP_VSAMPLE_CTRL, VIDEO_STREAM_ENABLE,
+ FIELD_PREP(VIDEO_STREAM_ENABLE, 1));
+
+ if (dw_dp_video_need_vsc_sdp(dp))
+ dw_dp_send_vsc_sdp(dp);
+
+ return 0;
+}
+
+static void dw_dp_hpd_init(struct dw_dp *dp)
+{
+ /* Enable all HPD interrupts */
+ regmap_update_bits(dp->regmap, DW_DP_HPD_INTERRUPT_ENABLE,
+ HPD_UNPLUG_EN | HPD_PLUG_EN | HPD_IRQ_EN,
+ FIELD_PREP(HPD_UNPLUG_EN, 1) |
+ FIELD_PREP(HPD_PLUG_EN, 1) |
+ FIELD_PREP(HPD_IRQ_EN, 1));
+
+ /* Enable all top-level interrupts */
+ regmap_update_bits(dp->regmap, DW_DP_GENERAL_INTERRUPT_ENABLE,
+ HPD_EVENT_EN, FIELD_PREP(HPD_EVENT_EN, 1));
+}
+
+static void dw_dp_aux_init(struct dw_dp *dp)
+{
+ regmap_update_bits(dp->regmap, DW_DP_GENERAL_INTERRUPT_ENABLE,
+ AUX_REPLY_EVENT_EN, FIELD_PREP(AUX_REPLY_EVENT_EN, 1));
+}
+
+static void dw_dp_init_hw(struct dw_dp *dp)
+{
+ regmap_update_bits(dp->regmap, DW_DP_CCTL, DEFAULT_FAST_LINK_TRAIN_EN,
+ FIELD_PREP(DEFAULT_FAST_LINK_TRAIN_EN, 0));
+
+ dw_dp_hpd_init(dp);
+ dw_dp_aux_init(dp);
+}
+
+static int dw_dp_aux_write_data(struct dw_dp *dp, const u8 *buffer, size_t size)
+{
+ size_t i, j;
+
+ for (i = 0; i < DIV_ROUND_UP(size, 4); i++) {
+ size_t num = min_t(size_t, size - i * 4, 4);
+ u32 value = 0;
+
+ for (j = 0; j < num; j++)
+ value |= buffer[i * 4 + j] << (j * 8);
+
+ regmap_write(dp->regmap, DW_DP_AUX_DATA0 + i * 4, value);
+ }
+
+ return size;
+}
+
+static int dw_dp_aux_read_data(struct dw_dp *dp, u8 *buffer, size_t size)
+{
+ size_t i, j;
+
+ for (i = 0; i < DIV_ROUND_UP(size, 4); i++) {
+ size_t num = min_t(size_t, size - i * 4, 4);
+ u32 value;
+
+ regmap_read(dp->regmap, DW_DP_AUX_DATA0 + i * 4, &value);
+
+ for (j = 0; j < num; j++)
+ buffer[i * 4 + j] = value >> (j * 8);
+ }
+
+ return size;
+}
+
+static ssize_t dw_dp_aux_transfer(struct drm_dp_aux *aux,
+ struct drm_dp_aux_msg *msg)
+{
+ struct dw_dp *dp = container_of(aux, struct dw_dp, aux);
+ unsigned long timeout = msecs_to_jiffies(10);
+ u32 status, value;
+ ssize_t ret = 0;
+
+ if (WARN_ON(msg->size > 16))
+ return -E2BIG;
+
+ switch (msg->request & ~DP_AUX_I2C_MOT) {
+ case DP_AUX_NATIVE_WRITE:
+ case DP_AUX_I2C_WRITE:
+ case DP_AUX_I2C_WRITE_STATUS_UPDATE:
+ ret = dw_dp_aux_write_data(dp, msg->buffer, msg->size);
+ if (ret < 0)
+ return ret;
+ break;
+ case DP_AUX_NATIVE_READ:
+ case DP_AUX_I2C_READ:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (msg->size > 0)
+ value = FIELD_PREP(AUX_LEN_REQ, msg->size - 1);
+ else
+ value = FIELD_PREP(I2C_ADDR_ONLY, 1);
+ value |= FIELD_PREP(AUX_CMD_TYPE, msg->request);
+ value |= FIELD_PREP(AUX_ADDR, msg->address);
+ regmap_write(dp->regmap, DW_DP_AUX_CMD, value);
+
+ status = wait_for_completion_timeout(&dp->complete, timeout);
+ if (!status) {
+ dev_err(dp->dev, "timeout waiting for AUX reply\n");
+ return -ETIMEDOUT;
+ }
+
+ regmap_read(dp->regmap, DW_DP_AUX_STATUS, &value);
+ if (value & AUX_TIMEOUT)
+ return -ETIMEDOUT;
+
+ msg->reply = FIELD_GET(AUX_STATUS, value);
+
+ if (msg->size > 0 && msg->reply == DP_AUX_NATIVE_REPLY_ACK) {
+ if (msg->request & DP_AUX_I2C_READ) {
+ size_t count = FIELD_GET(AUX_BYTES_READ, value) - 1;
+
+ if (count != msg->size)
+ return -EBUSY;
+
+ ret = dw_dp_aux_read_data(dp, msg->buffer, count);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * Limits for the video timing for DP:
+ * 1. the hfp should be 2 pixels aligned;
+ * 2. the minimum hsync should be 9 pixel;
+ * 3. the minimum hbp should be 16 pixel;
+ */
+static int dw_dp_bridge_atomic_check(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
+ struct dw_dp *dp = bridge_to_dp(bridge);
+ struct dw_dp_bridge_state *state;
+ const struct dw_dp_output_format *fmt;
+ struct drm_display_mode *mode;
+ int min_hbp = 16;
+ int min_hsync = 9;
+
+ state = to_dw_dp_bridge_state(bridge_state);
+ mode = &state->mode;
+
+ fmt = dw_dp_get_output_format(bridge_state->output_bus_cfg.format);
+ if (!fmt)
+ return -EINVAL;
+
+ state->video_mapping = fmt->video_mapping;
+ state->color_format = fmt->color_format;
+ state->bpc = fmt->bpc;
+ state->bpp = fmt->bpp;
+
+ if ((adjusted_mode->hsync_start - adjusted_mode->hdisplay) & 0x1) {
+ adjusted_mode->hsync_start += 1;
+ dev_warn(dp->dev, "hfp is not 2 pixeel aligned, fixup to aligned hfp\n");
+ }
+
+ if (adjusted_mode->hsync_end - adjusted_mode->hsync_start < min_hsync) {
+ adjusted_mode->hsync_end = adjusted_mode->hsync_start + min_hsync;
+ dev_warn(dp->dev, "hsync is too narrow, fixup to min hsync:%d\n", min_hsync);
+ }
+
+ if (adjusted_mode->htotal - adjusted_mode->hsync_end < min_hbp) {
+ adjusted_mode->htotal = adjusted_mode->hsync_end + min_hbp;
+ dev_warn(dp->dev, "hbp is too narrow, fixup to min hbp:%d\n", min_hbp);
+ }
+
+ drm_mode_copy(mode, adjusted_mode);
+
+ return 0;
+}
+
+static enum drm_mode_status dw_dp_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode)
+{
+ struct dw_dp *dp = bridge_to_dp(bridge);
+ struct dw_dp_link *link = &dp->link;
+ u32 min_bpp;
+
+ if (info->color_formats & DRM_COLOR_FORMAT_YCBCR420 &&
+ link->vsc_sdp_supported &&
+ (drm_mode_is_420_only(info, mode) || drm_mode_is_420_also(info, mode)))
+ min_bpp = 12;
+ else if (info->color_formats & DRM_COLOR_FORMAT_YCBCR422)
+ min_bpp = 16;
+ else if (info->color_formats & DRM_COLOR_FORMAT_RGB444)
+ min_bpp = 18;
+ else
+ min_bpp = 24;
+
+ if (!link->vsc_sdp_supported &&
+ drm_mode_is_420_only(info, mode))
+ return MODE_NO_420;
+
+ if (!dw_dp_bandwidth_ok(dp, mode, min_bpp, link->lanes, link->rate))
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+static bool dw_dp_needs_link_retrain(struct dw_dp *dp)
+{
+ struct dw_dp_link *link = &dp->link;
+ u8 link_status[DP_LINK_STATUS_SIZE];
+
+ if (!dw_dp_link_train_valid(&link->train))
+ return false;
+
+ if (drm_dp_dpcd_read_link_status(&dp->aux, link_status) < 0)
+ return false;
+
+ /* Retrain if Channel EQ or CR not ok */
+ return !drm_dp_channel_eq_ok(link_status, dp->link.lanes);
+}
+
+static void dw_dp_link_disable(struct dw_dp *dp)
+{
+ struct dw_dp_link *link = &dp->link;
+
+ if (dw_dp_hpd_detect(dp))
+ drm_dp_link_power_down(&dp->aux, dp->link.revision);
+
+ dw_dp_phy_xmit_enable(dp, 0);
+
+ phy_power_off(dp->phy);
+
+ link->train.clock_recovered = false;
+ link->train.channel_equalized = false;
+}
+
+static int dw_dp_link_enable(struct dw_dp *dp)
+{
+ int ret;
+
+ ret = phy_power_on(dp->phy);
+ if (ret)
+ return ret;
+
+ ret = drm_dp_link_power_up(&dp->aux, dp->link.revision);
+ if (ret < 0)
+ return ret;
+
+ ret = dw_dp_link_train(dp);
+
+ return ret;
+}
+
+static void dw_dp_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
+ struct dw_dp *dp = bridge_to_dp(bridge);
+ struct drm_connector *connector;
+ struct drm_connector_state *conn_state;
+ int ret;
+
+ connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
+ if (!connector) {
+ dev_err(dp->dev, "failed to get connector\n");
+ return;
+ }
+
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
+ if (!conn_state) {
+ dev_err(dp->dev, "failed to get connector state\n");
+ return;
+ }
+
+ set_bit(0, dp->sdp_reg_bank);
+
+ ret = dw_dp_link_enable(dp);
+ if (ret < 0) {
+ dev_err(dp->dev, "failed to enable link: %d\n", ret);
+ return;
+ }
+
+ ret = dw_dp_video_enable(dp);
+ if (ret < 0) {
+ dev_err(dp->dev, "failed to enable video: %d\n", ret);
+ return;
+ }
+}
+
+static void dw_dp_reset(struct dw_dp *dp)
+{
+ int val;
+
+ disable_irq(dp->irq);
+ regmap_update_bits(dp->regmap, DW_DP_SOFT_RESET_CTRL, CONTROLLER_RESET,
+ FIELD_PREP(CONTROLLER_RESET, 1));
+ usleep_range(10, 20);
+ regmap_update_bits(dp->regmap, DW_DP_SOFT_RESET_CTRL, CONTROLLER_RESET,
+ FIELD_PREP(CONTROLLER_RESET, 0));
+
+ dw_dp_init_hw(dp);
+ regmap_read_poll_timeout(dp->regmap, DW_DP_HPD_STATUS, val,
+ FIELD_GET(HPD_HOT_PLUG, val), 200, 200000);
+ regmap_write(dp->regmap, DW_DP_HPD_STATUS, HPD_HOT_PLUG);
+ enable_irq(dp->irq);
+}
+
+static void dw_dp_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
+ struct dw_dp *dp = bridge_to_dp(bridge);
+
+ dw_dp_video_disable(dp);
+ dw_dp_link_disable(dp);
+ bitmap_zero(dp->sdp_reg_bank, SDP_REG_BANK_SIZE);
+ dw_dp_reset(dp);
+}
+
+static bool dw_dp_hpd_detect_link(struct dw_dp *dp, struct drm_connector *connector)
+{
+ int ret;
+
+ ret = phy_power_on(dp->phy);
+ if (ret < 0)
+ return false;
+ ret = dw_dp_link_parse(dp, connector);
+ phy_power_off(dp->phy);
+
+ return !ret;
+}
+
+static enum drm_connector_status dw_dp_bridge_detect(struct drm_bridge *bridge,
+ struct drm_connector *connector)
+{
+ struct dw_dp *dp = bridge_to_dp(bridge);
+
+ if (!dw_dp_hpd_detect(dp))
+ return connector_status_disconnected;
+
+ if (!dw_dp_hpd_detect_link(dp, connector))
+ return connector_status_disconnected;
+
+ return connector_status_connected;
+}
+
+static const struct drm_edid *dw_dp_bridge_edid_read(struct drm_bridge *bridge,
+ struct drm_connector *connector)
+{
+ struct dw_dp *dp = bridge_to_dp(bridge);
+ const struct drm_edid *edid;
+ int ret;
+
+ ret = phy_power_on(dp->phy);
+ if (ret)
+ return NULL;
+
+ edid = drm_edid_read_ddc(connector, &dp->aux.ddc);
+
+ phy_power_off(dp->phy);
+
+ return edid;
+}
+
+static u32 *dw_dp_bridge_atomic_get_output_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ unsigned int *num_output_fmts)
+{
+ struct dw_dp *dp = bridge_to_dp(bridge);
+ struct dw_dp_link *link = &dp->link;
+ struct drm_display_info *di = &conn_state->connector->display_info;
+ struct drm_display_mode mode = crtc_state->mode;
+ const struct dw_dp_output_format *fmt;
+ u32 i, j = 0;
+ u32 *output_fmts;
+
+ *num_output_fmts = 0;
+
+ output_fmts = kcalloc(ARRAY_SIZE(dw_dp_output_formats), sizeof(*output_fmts), GFP_KERNEL);
+ if (!output_fmts)
+ return NULL;
+
+ for (i = 0; i < ARRAY_SIZE(dw_dp_output_formats); i++) {
+ fmt = &dw_dp_output_formats[i];
+
+ if (fmt->bpc > conn_state->max_bpc)
+ continue;
+
+ if (!(fmt->color_format & di->color_formats))
+ continue;
+
+ if (fmt->color_format == DRM_COLOR_FORMAT_YCBCR420 &&
+ !link->vsc_sdp_supported)
+ continue;
+
+ if (fmt->color_format != DRM_COLOR_FORMAT_YCBCR420 &&
+ drm_mode_is_420_only(di, &mode))
+ continue;
+
+ if (!dw_dp_bandwidth_ok(dp, &mode, fmt->bpp, link->lanes, link->rate))
+ continue;
+
+ output_fmts[j++] = fmt->bus_format;
+ }
+
+ *num_output_fmts = j;
+
+ return output_fmts;
+}
+
+static struct drm_bridge_state *dw_dp_bridge_atomic_duplicate_state(struct drm_bridge *bridge)
+{
+ struct dw_dp_bridge_state *state;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ __drm_atomic_helper_bridge_duplicate_state(bridge, &state->base);
+
+ return &state->base;
+}
+
+static const struct drm_bridge_funcs dw_dp_bridge_funcs = {
+ .atomic_duplicate_state = dw_dp_bridge_atomic_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_get_input_bus_fmts = drm_atomic_helper_bridge_propagate_bus_fmt,
+ .atomic_get_output_bus_fmts = dw_dp_bridge_atomic_get_output_bus_fmts,
+ .atomic_check = dw_dp_bridge_atomic_check,
+ .mode_valid = dw_dp_bridge_mode_valid,
+ .atomic_enable = dw_dp_bridge_atomic_enable,
+ .atomic_disable = dw_dp_bridge_atomic_disable,
+ .detect = dw_dp_bridge_detect,
+ .edid_read = dw_dp_bridge_edid_read,
+};
+
+static int dw_dp_link_retrain(struct dw_dp *dp)
+{
+ struct drm_device *dev = dp->bridge.dev;
+ struct drm_modeset_acquire_ctx ctx;
+ int ret;
+
+ if (!dw_dp_needs_link_retrain(dp))
+ return 0;
+
+ dev_dbg(dp->dev, "Retraining link\n");
+
+ drm_modeset_acquire_init(&ctx, 0);
+ for (;;) {
+ ret = drm_modeset_lock(&dev->mode_config.connection_mutex, &ctx);
+ if (ret != -EDEADLK)
+ break;
+
+ drm_modeset_backoff(&ctx);
+ }
+
+ if (!ret)
+ ret = dw_dp_link_train(dp);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ return ret;
+}
+
+static void dw_dp_hpd_work(struct work_struct *work)
+{
+ struct dw_dp *dp = container_of(work, struct dw_dp, hpd_work);
+ bool long_hpd;
+ int ret;
+
+ mutex_lock(&dp->irq_lock);
+ long_hpd = dp->hotplug.long_hpd;
+ mutex_unlock(&dp->irq_lock);
+
+ dev_dbg(dp->dev, "[drm] Get hpd irq - %s\n", long_hpd ? "long" : "short");
+
+ if (!long_hpd) {
+ if (dw_dp_needs_link_retrain(dp)) {
+ ret = dw_dp_link_retrain(dp);
+ if (ret)
+ dev_warn(dp->dev, "Retrain link failed\n");
+ }
+ } else {
+ drm_helper_hpd_irq_event(dp->bridge.dev);
+ }
+}
+
+static void dw_dp_handle_hpd_event(struct dw_dp *dp)
+{
+ u32 value;
+
+ mutex_lock(&dp->irq_lock);
+ regmap_read(dp->regmap, DW_DP_HPD_STATUS, &value);
+
+ if (value & HPD_IRQ) {
+ dev_dbg(dp->dev, "IRQ from the HPD\n");
+ dp->hotplug.long_hpd = false;
+ regmap_write(dp->regmap, DW_DP_HPD_STATUS, HPD_IRQ);
+ }
+
+ if (value & HPD_HOT_PLUG) {
+ dev_dbg(dp->dev, "Hot plug detected\n");
+ dp->hotplug.long_hpd = true;
+ regmap_write(dp->regmap, DW_DP_HPD_STATUS, HPD_HOT_PLUG);
+ }
+
+ if (value & HPD_HOT_UNPLUG) {
+ dev_dbg(dp->dev, "Unplug detected\n");
+ dp->hotplug.long_hpd = true;
+ regmap_write(dp->regmap, DW_DP_HPD_STATUS, HPD_HOT_UNPLUG);
+ }
+ mutex_unlock(&dp->irq_lock);
+
+ schedule_work(&dp->hpd_work);
+}
+
+static irqreturn_t dw_dp_irq(int irq, void *data)
+{
+ struct dw_dp *dp = data;
+ u32 value;
+
+ regmap_read(dp->regmap, DW_DP_GENERAL_INTERRUPT, &value);
+ if (!value)
+ return IRQ_NONE;
+
+ if (value & HPD_EVENT)
+ dw_dp_handle_hpd_event(dp);
+
+ if (value & AUX_REPLY_EVENT) {
+ regmap_write(dp->regmap, DW_DP_GENERAL_INTERRUPT, AUX_REPLY_EVENT);
+ complete(&dp->complete);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static const struct regmap_range dw_dp_readable_ranges[] = {
+ regmap_reg_range(DW_DP_VERSION_NUMBER, DW_DP_ID),
+ regmap_reg_range(DW_DP_CONFIG_REG1, DW_DP_CONFIG_REG3),
+ regmap_reg_range(DW_DP_CCTL, DW_DP_SOFT_RESET_CTRL),
+ regmap_reg_range(DW_DP_VSAMPLE_CTRL, DW_DP_VIDEO_HBLANK_INTERVAL),
+ regmap_reg_range(DW_DP_AUD_CONFIG1, DW_DP_AUD_CONFIG1),
+ regmap_reg_range(DW_DP_SDP_VERTICAL_CTRL, DW_DP_SDP_STATUS_EN),
+ regmap_reg_range(DW_DP_PHYIF_CTRL, DW_DP_PHYIF_PWRDOWN_CTRL),
+ regmap_reg_range(DW_DP_AUX_CMD, DW_DP_AUX_DATA3),
+ regmap_reg_range(DW_DP_GENERAL_INTERRUPT, DW_DP_HPD_INTERRUPT_ENABLE),
+};
+
+static const struct regmap_access_table dw_dp_readable_table = {
+ .yes_ranges = dw_dp_readable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dw_dp_readable_ranges),
+};
+
+static const struct regmap_config dw_dp_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .max_register = DW_DP_MAX_REGISTER,
+ .rd_table = &dw_dp_readable_table,
+};
+
+static void dw_dp_phy_exit(void *data)
+{
+ struct dw_dp *dp = data;
+
+ phy_exit(dp->phy);
+}
+
+struct dw_dp *dw_dp_bind(struct device *dev, struct drm_encoder *encoder,
+ const struct dw_dp_plat_data *plat_data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dw_dp *dp;
+ struct drm_bridge *bridge;
+ void __iomem *res;
+ int ret;
+
+ dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
+ if (!dp)
+ return ERR_PTR(-ENOMEM);
+
+ dp = devm_drm_bridge_alloc(dev, struct dw_dp, bridge, &dw_dp_bridge_funcs);
+ if (IS_ERR(dp))
+ return ERR_CAST(dp);
+
+ dp->dev = dev;
+ dp->pixel_mode = DW_DP_MP_QUAD_PIXEL;
+
+ dp->plat_data.max_link_rate = plat_data->max_link_rate;
+ bridge = &dp->bridge;
+ mutex_init(&dp->irq_lock);
+ INIT_WORK(&dp->hpd_work, dw_dp_hpd_work);
+ init_completion(&dp->complete);
+
+ res = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(res))
+ return ERR_CAST(res);
+
+ dp->regmap = devm_regmap_init_mmio(dev, res, &dw_dp_regmap_config);
+ if (IS_ERR(dp->regmap)) {
+ dev_err_probe(dev, PTR_ERR(dp->regmap), "failed to create regmap\n");
+ return ERR_CAST(dp->regmap);
+ }
+
+ dp->phy = devm_of_phy_get(dev, dev->of_node, NULL);
+ if (IS_ERR(dp->phy)) {
+ dev_err_probe(dev, PTR_ERR(dp->phy), "failed to get phy\n");
+ return ERR_CAST(dp->phy);
+ }
+
+ dp->apb_clk = devm_clk_get_enabled(dev, "apb");
+ if (IS_ERR(dp->apb_clk)) {
+ dev_err_probe(dev, PTR_ERR(dp->apb_clk), "failed to get apb clock\n");
+ return ERR_CAST(dp->apb_clk);
+ }
+
+ dp->aux_clk = devm_clk_get_enabled(dev, "aux");
+ if (IS_ERR(dp->aux_clk)) {
+ dev_err_probe(dev, PTR_ERR(dp->aux_clk), "failed to get aux clock\n");
+ return ERR_CAST(dp->aux_clk);
+ }
+
+ dp->i2s_clk = devm_clk_get(dev, "i2s");
+ if (IS_ERR(dp->i2s_clk)) {
+ dev_err_probe(dev, PTR_ERR(dp->i2s_clk), "failed to get i2s clock\n");
+ return ERR_CAST(dp->i2s_clk);
+ }
+
+ dp->spdif_clk = devm_clk_get(dev, "spdif");
+ if (IS_ERR(dp->spdif_clk)) {
+ dev_err_probe(dev, PTR_ERR(dp->spdif_clk), "failed to get spdif clock\n");
+ return ERR_CAST(dp->spdif_clk);
+ }
+
+ dp->hdcp_clk = devm_clk_get(dev, "hdcp");
+ if (IS_ERR(dp->hdcp_clk)) {
+ dev_err_probe(dev, PTR_ERR(dp->hdcp_clk), "failed to get hdcp clock\n");
+ return ERR_CAST(dp->hdcp_clk);
+ }
+
+ dp->rstc = devm_reset_control_get(dev, NULL);
+ if (IS_ERR(dp->rstc)) {
+ dev_err_probe(dev, PTR_ERR(dp->rstc), "failed to get reset control\n");
+ return ERR_CAST(dp->rstc);
+ }
+
+ bridge->of_node = dev->of_node;
+ bridge->ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_HPD;
+ bridge->type = DRM_MODE_CONNECTOR_DisplayPort;
+ bridge->ycbcr_420_allowed = true;
+
+ dp->aux.dev = dev;
+ dp->aux.drm_dev = encoder->dev;
+ dp->aux.name = dev_name(dev);
+ dp->aux.transfer = dw_dp_aux_transfer;
+ ret = drm_dp_aux_register(&dp->aux);
+ if (ret) {
+ dev_err_probe(dev, ret, "Aux register failed\n");
+ return ERR_PTR(ret);
+ }
+
+ ret = drm_bridge_attach(encoder, bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret)
+ dev_err_probe(dev, ret, "Failed to attach bridge\n");
+
+ dw_dp_init_hw(dp);
+
+ ret = phy_init(dp->phy);
+ if (ret) {
+ dev_err_probe(dev, ret, "phy init failed\n");
+ return ERR_PTR(ret);
+ }
+
+ ret = devm_add_action_or_reset(dev, dw_dp_phy_exit, dp);
+ if (ret)
+ return ERR_PTR(ret);
+
+ dp->irq = platform_get_irq(pdev, 0);
+ if (dp->irq < 0)
+ return ERR_PTR(ret);
+
+ ret = devm_request_threaded_irq(dev, dp->irq, NULL, dw_dp_irq,
+ IRQF_ONESHOT, dev_name(dev), dp);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to request irq\n");
+ return ERR_PTR(ret);
+ }
+
+ return dp;
+}
+EXPORT_SYMBOL_GPL(dw_dp_bind);
+
+MODULE_AUTHOR("Andy Yan <andyshrk@163.com>");
+MODULE_DESCRIPTION("DW DP Core Library");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index e3a8c0c0c945..ae0d08e5e960 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -393,6 +393,17 @@ static int __maybe_unused ti_sn65dsi86_resume(struct device *dev)
gpiod_set_value_cansleep(pdata->enable_gpio, 1);
/*
+ * After EN is deasserted and an external clock is detected, the bridge
+ * will sample GPIO3:1 to determine its frequency. The driver will
+ * overwrite this setting in ti_sn_bridge_set_refclk_freq(). But this is
+ * racy. Thus we have to wait a couple of us. According to the datasheet
+ * the GPIO lines has to be stable at least 5 us (td5) but it seems that
+ * is not enough and the refclk frequency value is still lost or
+ * overwritten by the bridge itself. Waiting for 20us seems to work.
+ */
+ usleep_range(20, 30);
+
+ /*
* If we have a reference clock we can enable communication w/ the
* panel (including the aux channel) w/out any need for an input clock
* so we can do it in resume which lets us read the EDID before
@@ -1836,7 +1847,7 @@ static int ti_sn_gpio_probe(struct auxiliary_device *adev,
pdata->gchip.direction_input = ti_sn_bridge_gpio_direction_input;
pdata->gchip.direction_output = ti_sn_bridge_gpio_direction_output;
pdata->gchip.get = ti_sn_bridge_gpio_get;
- pdata->gchip.set_rv = ti_sn_bridge_gpio_set;
+ pdata->gchip.set = ti_sn_bridge_gpio_set;
pdata->gchip.can_sleep = true;
pdata->gchip.names = ti_sn_bridge_gpio_names;
pdata->gchip.ngpio = SN_NUM_GPIOS;
diff --git a/drivers/gpu/drm/bridge/waveshare-dsi.c b/drivers/gpu/drm/bridge/waveshare-dsi.c
new file mode 100644
index 000000000000..43f4e7412d72
--- /dev/null
+++ b/drivers/gpu/drm/bridge/waveshare-dsi.c
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2025 NXP
+ * Based on panel-raspberrypi-touchscreen by Broadcom
+ */
+
+#include <linux/backlight.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/regmap.h>
+
+#include <drm/drm_bridge.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+
+struct ws_bridge {
+ struct drm_bridge bridge;
+ struct drm_bridge *next_bridge;
+ struct backlight_device *backlight;
+ struct device *dev;
+ struct regmap *reg_map;
+};
+
+static const struct regmap_config ws_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xff,
+};
+
+static struct ws_bridge *bridge_to_ws_bridge(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct ws_bridge, bridge);
+}
+
+static int ws_bridge_attach_dsi(struct ws_bridge *ws)
+{
+ const struct mipi_dsi_device_info info = {
+ .type = "ws-bridge",
+ .channel = 0,
+ .node = NULL,
+ };
+ struct device_node *dsi_host_node;
+ struct device *dev = ws->dev;
+ struct mipi_dsi_device *dsi;
+ struct mipi_dsi_host *host;
+ int ret;
+
+ dsi_host_node = of_graph_get_remote_node(dev->of_node, 0, 0);
+ if (!dsi_host_node) {
+ dev_err(dev, "Failed to get remote port\n");
+ return -ENODEV;
+ }
+ host = of_find_mipi_dsi_host_by_node(dsi_host_node);
+ of_node_put(dsi_host_node);
+ if (!host)
+ return dev_err_probe(dev, -EPROBE_DEFER, "Failed to find dsi_host\n");
+
+ dsi = devm_mipi_dsi_device_register_full(dev, host, &info);
+ if (IS_ERR(dsi))
+ return dev_err_probe(dev, PTR_ERR(dsi), "Failed to create dsi device\n");
+
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO_HSE | MIPI_DSI_MODE_VIDEO |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->lanes = 2;
+
+ ret = devm_mipi_dsi_attach(dev, dsi);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to attach dsi to host\n");
+
+ return 0;
+}
+
+static int ws_bridge_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
+ enum drm_bridge_attach_flags flags)
+{
+ struct ws_bridge *ws = bridge_to_ws_bridge(bridge);
+ int ret;
+
+ ret = ws_bridge_attach_dsi(ws);
+ if (ret)
+ return ret;
+
+ return drm_bridge_attach(encoder, ws->next_bridge,
+ &ws->bridge, flags);
+}
+
+static void ws_bridge_bridge_enable(struct drm_bridge *bridge)
+{
+ struct ws_bridge *ws = bridge_to_ws_bridge(bridge);
+
+ regmap_write(ws->reg_map, 0xad, 0x01);
+ backlight_enable(ws->backlight);
+}
+
+static void ws_bridge_bridge_disable(struct drm_bridge *bridge)
+{
+ struct ws_bridge *ws = bridge_to_ws_bridge(bridge);
+
+ backlight_disable(ws->backlight);
+ regmap_write(ws->reg_map, 0xad, 0x00);
+}
+
+static const struct drm_bridge_funcs ws_bridge_bridge_funcs = {
+ .enable = ws_bridge_bridge_enable,
+ .disable = ws_bridge_bridge_disable,
+ .attach = ws_bridge_bridge_attach,
+};
+
+static int ws_bridge_bl_update_status(struct backlight_device *bl)
+{
+ struct ws_bridge *ws = bl_get_data(bl);
+
+ regmap_write(ws->reg_map, 0xab, 0xff - backlight_get_brightness(bl));
+ regmap_write(ws->reg_map, 0xaa, 0x01);
+
+ return 0;
+}
+
+static const struct backlight_ops ws_bridge_bl_ops = {
+ .update_status = ws_bridge_bl_update_status,
+};
+
+static struct backlight_device *ws_bridge_create_backlight(struct ws_bridge *ws)
+{
+ const struct backlight_properties props = {
+ .type = BACKLIGHT_RAW,
+ .brightness = 255,
+ .max_brightness = 255,
+ };
+ struct device *dev = ws->dev;
+
+ return devm_backlight_device_register(dev, dev_name(dev), dev, ws,
+ &ws_bridge_bl_ops, &props);
+}
+
+static int ws_bridge_probe(struct i2c_client *i2c)
+{
+ struct device *dev = &i2c->dev;
+ struct drm_panel *panel;
+ struct ws_bridge *ws;
+ int ret;
+
+ ws = devm_drm_bridge_alloc(dev, struct ws_bridge, bridge, &ws_bridge_bridge_funcs);
+ if (IS_ERR(ws))
+ return PTR_ERR(ws);
+
+ ws->dev = dev;
+
+ ws->reg_map = devm_regmap_init_i2c(i2c, &ws_regmap_config);
+ if (IS_ERR(ws->reg_map))
+ return dev_err_probe(dev, PTR_ERR(ws->reg_map), "Failed to allocate regmap\n");
+
+ ret = drm_of_find_panel_or_bridge(dev->of_node, 1, -1, &panel, NULL);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to find remote panel\n");
+
+ ws->next_bridge = devm_drm_panel_bridge_add(dev, panel);
+ if (IS_ERR(ws->next_bridge))
+ return PTR_ERR(ws->next_bridge);
+
+ ws->backlight = ws_bridge_create_backlight(ws);
+ if (IS_ERR(ws->backlight)) {
+ ret = PTR_ERR(ws->backlight);
+ dev_err(dev, "Failed to create backlight: %d\n", ret);
+ return ret;
+ }
+
+ regmap_write(ws->reg_map, 0xc0, 0x01);
+ regmap_write(ws->reg_map, 0xc2, 0x01);
+ regmap_write(ws->reg_map, 0xac, 0x01);
+
+ ws->bridge.type = DRM_MODE_CONNECTOR_DPI;
+ ws->bridge.of_node = dev->of_node;
+ devm_drm_bridge_add(dev, &ws->bridge);
+
+ return 0;
+}
+
+static const struct of_device_id ws_bridge_of_ids[] = {
+ {.compatible = "waveshare,dsi2dpi",},
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, ws_bridge_of_ids);
+
+static struct i2c_driver ws_bridge_driver = {
+ .driver = {
+ .name = "ws_dsi2dpi",
+ .of_match_table = ws_bridge_of_ids,
+ },
+ .probe = ws_bridge_probe,
+};
+module_i2c_driver(ws_bridge_driver);
+
+MODULE_AUTHOR("Joseph Guo <qijian.guo@nxp.com>");
+MODULE_DESCRIPTION("Waveshare DSI2DPI bridge driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/display/drm_bridge_connector.c b/drivers/gpu/drm/display/drm_bridge_connector.c
index 5eb7e9bfe361..baacd21e7341 100644
--- a/drivers/gpu/drm/display/drm_bridge_connector.c
+++ b/drivers/gpu/drm/display/drm_bridge_connector.c
@@ -20,6 +20,7 @@
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/display/drm_hdcp_helper.h>
#include <drm/display/drm_hdmi_audio_helper.h>
#include <drm/display/drm_hdmi_cec_helper.h>
#include <drm/display/drm_hdmi_helper.h>
@@ -641,6 +642,7 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
struct drm_bridge *bridge, *panel_bridge = NULL;
unsigned int supported_formats = BIT(HDMI_COLORSPACE_RGB);
unsigned int max_bpc = 8;
+ bool support_hdcp = false;
int connector_type;
int ret;
@@ -749,12 +751,11 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
return ERR_PTR(-EINVAL);
}
- if (!drm_bridge_get_next_bridge(bridge))
+ if (drm_bridge_is_last(bridge))
connector_type = bridge->type;
#ifdef CONFIG_OF
- if (!drm_bridge_get_next_bridge(bridge) &&
- bridge->of_node)
+ if (drm_bridge_is_last(bridge) && bridge->of_node)
connector->fwnode = fwnode_handle_get(of_fwnode_handle(bridge->of_node));
#endif
@@ -763,6 +764,9 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
if (drm_bridge_is_panel(bridge))
panel_bridge = bridge;
+
+ if (bridge->support_hdcp)
+ support_hdcp = true;
}
if (connector_type == DRM_MODE_CONNECTOR_Unknown)
@@ -772,8 +776,6 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
if (!connector->ycbcr_420_allowed)
supported_formats &= ~BIT(HDMI_COLORSPACE_YUV420);
- bridge = bridge_connector->bridge_hdmi;
-
ret = drmm_connector_hdmi_init(drm, connector,
bridge_connector->bridge_hdmi->vendor,
bridge_connector->bridge_hdmi->product,
@@ -816,6 +818,8 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
if (bridge_connector->bridge_hdmi_cec &&
bridge_connector->bridge_hdmi_cec->ops & DRM_BRIDGE_OP_HDMI_CEC_NOTIFIER) {
+ bridge = bridge_connector->bridge_hdmi_cec;
+
ret = drmm_connector_hdmi_cec_notifier_register(connector,
NULL,
bridge->hdmi_cec_dev);
@@ -825,6 +829,8 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
if (bridge_connector->bridge_hdmi_cec &&
bridge_connector->bridge_hdmi_cec->ops & DRM_BRIDGE_OP_HDMI_CEC_ADAPTER) {
+ bridge = bridge_connector->bridge_hdmi_cec;
+
ret = drmm_connector_hdmi_cec_register(connector,
&drm_bridge_connector_hdmi_cec_funcs,
bridge->hdmi_cec_adapter_name,
@@ -845,6 +851,10 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
if (panel_bridge)
drm_panel_bridge_set_orientation(connector, panel_bridge);
+ if (support_hdcp && IS_REACHABLE(CONFIG_DRM_DISPLAY_HELPER) &&
+ IS_ENABLED(CONFIG_DRM_DISPLAY_HDCP_HELPER))
+ drm_connector_attach_content_protection_property(connector, true);
+
return connector;
}
EXPORT_SYMBOL_GPL(drm_bridge_connector_init);
diff --git a/drivers/gpu/drm/display/drm_dp_cec.c b/drivers/gpu/drm/display/drm_dp_cec.c
index 3b50d817c839..436bfe9f9081 100644
--- a/drivers/gpu/drm/display/drm_dp_cec.c
+++ b/drivers/gpu/drm/display/drm_dp_cec.c
@@ -42,7 +42,7 @@
*
* https://hverkuil.home.xs4all.nl/cec-status.txt
*
- * Please mail me (hverkuil@xs4all.nl) if you find an adapter that works
+ * Please mail me (hverkuil@kernel.org) if you find an adapter that works
* and is not yet listed there.
*
* Note that the current implementation does not support CEC over an MST hub.
diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c
index 1ecc3df7e316..4aaeae4fa03c 100644
--- a/drivers/gpu/drm/display/drm_dp_helper.c
+++ b/drivers/gpu/drm/display/drm_dp_helper.c
@@ -3962,6 +3962,7 @@ int drm_edp_backlight_set_level(struct drm_dp_aux *aux, const struct drm_edp_bac
int ret;
unsigned int offset = DP_EDP_BACKLIGHT_BRIGHTNESS_MSB;
u8 buf[3] = { 0 };
+ size_t len = 2;
/* The panel uses the PWM for controlling brightness levels */
if (!(bl->aux_set || bl->luminance_set))
@@ -3974,6 +3975,7 @@ int drm_edp_backlight_set_level(struct drm_dp_aux *aux, const struct drm_edp_bac
buf[1] = (level & 0x00ff00) >> 8;
buf[2] = (level & 0xff0000) >> 16;
offset = DP_EDP_PANEL_TARGET_LUMINANCE_VALUE;
+ len = 3;
} else if (bl->lsb_reg_used) {
buf[0] = (level & 0xff00) >> 8;
buf[1] = (level & 0x00ff);
@@ -3981,7 +3983,7 @@ int drm_edp_backlight_set_level(struct drm_dp_aux *aux, const struct drm_edp_bac
buf[0] = level;
}
- ret = drm_dp_dpcd_write_data(aux, offset, buf, sizeof(buf));
+ ret = drm_dp_dpcd_write_data(aux, offset, buf, len);
if (ret < 0) {
drm_err(aux->drm_dev,
"%s: Failed to write aux backlight level: %d\n",
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index ef56b474acf5..d5ebe6ea0acb 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -456,6 +456,7 @@ mode_fixup(struct drm_atomic_state *state)
ret = drm_atomic_bridge_chain_check(bridge,
new_crtc_state,
new_conn_state);
+ drm_bridge_put(bridge);
if (ret) {
drm_dbg_atomic(encoder->dev, "Bridge atomic check failed\n");
return ret;
@@ -527,6 +528,7 @@ static enum drm_mode_status mode_valid_path(struct drm_connector *connector,
bridge = drm_bridge_chain_get_first_bridge(encoder);
ret = drm_bridge_chain_mode_valid(bridge, &connector->display_info,
mode);
+ drm_bridge_put(bridge);
if (ret != MODE_OK) {
drm_dbg_atomic(encoder->dev, "[BRIDGE] mode_valid() failed\n");
return ret;
@@ -1212,6 +1214,7 @@ encoder_bridge_disable(struct drm_device *dev, struct drm_atomic_state *state)
*/
bridge = drm_bridge_chain_get_first_bridge(encoder);
drm_atomic_bridge_chain_disable(bridge, state);
+ drm_bridge_put(bridge);
/* Right function depends upon target state. */
if (funcs) {
@@ -1329,6 +1332,7 @@ encoder_bridge_post_disable(struct drm_device *dev, struct drm_atomic_state *sta
*/
bridge = drm_bridge_chain_get_first_bridge(encoder);
drm_atomic_bridge_chain_post_disable(bridge, state);
+ drm_bridge_put(bridge);
}
}
@@ -1501,6 +1505,7 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *state)
bridge = drm_bridge_chain_get_first_bridge(encoder);
drm_bridge_chain_mode_set(bridge, mode, adjusted_mode);
+ drm_bridge_put(bridge);
}
}
@@ -1580,6 +1585,7 @@ encoder_bridge_pre_enable(struct drm_device *dev, struct drm_atomic_state *state
*/
bridge = drm_bridge_chain_get_first_bridge(encoder);
drm_atomic_bridge_chain_pre_enable(bridge, state);
+ drm_bridge_put(bridge);
}
}
@@ -1655,6 +1661,7 @@ encoder_bridge_enable(struct drm_device *dev, struct drm_atomic_state *state)
}
drm_atomic_bridge_chain_enable(bridge, state);
+ drm_bridge_put(bridge);
}
}
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index ecc73d52bfae..85dbdaa4a2e2 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -1078,19 +1078,20 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
}
if (async_flip) {
- /* check if the prop does a nop change */
- if ((prop != config->prop_fb_id &&
- prop != config->prop_in_fence_fd &&
- prop != config->prop_fb_damage_clips)) {
- ret = drm_atomic_plane_get_property(plane, plane_state,
- prop, &old_val);
- ret = drm_atomic_check_prop_changes(ret, old_val, prop_value, prop);
- }
+ /* no-op changes are always allowed */
+ ret = drm_atomic_plane_get_property(plane, plane_state,
+ prop, &old_val);
+ ret = drm_atomic_check_prop_changes(ret, old_val, prop_value, prop);
- /* ask the driver if this non-primary plane is supported */
- if (plane->type != DRM_PLANE_TYPE_PRIMARY) {
- ret = -EINVAL;
+ /* fail everything that isn't no-op or a pure flip */
+ if (ret && prop != config->prop_fb_id &&
+ prop != config->prop_in_fence_fd &&
+ prop != config->prop_fb_damage_clips) {
+ break;
+ }
+ if (ret && plane->type != DRM_PLANE_TYPE_PRIMARY) {
+ /* ask the driver if this non-primary plane is supported */
if (plane_funcs && plane_funcs->atomic_async_check)
ret = plane_funcs->atomic_async_check(plane, state, true);
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index dd45d9b504d8..d031447eebc9 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -941,11 +941,11 @@ static int select_bus_fmt_recursive(struct drm_bridge *first_bridge,
{
unsigned int i, num_in_bus_fmts = 0;
struct drm_bridge_state *cur_state;
- struct drm_bridge *prev_bridge;
+ struct drm_bridge *prev_bridge __free(drm_bridge_put) =
+ drm_bridge_get_prev_bridge(cur_bridge);
u32 *in_bus_fmts;
int ret;
- prev_bridge = drm_bridge_get_prev_bridge(cur_bridge);
cur_state = drm_atomic_get_new_bridge_state(crtc_state->state,
cur_bridge);
@@ -1227,6 +1227,7 @@ EXPORT_SYMBOL(drm_atomic_bridge_chain_check);
/**
* drm_bridge_detect - check if anything is attached to the bridge output
* @bridge: bridge control structure
+ * @connector: attached connector
*
* If the bridge supports output detection, as reported by the
* DRM_BRIDGE_OP_DETECT bridge ops flag, call &drm_bridge_funcs.detect for the
@@ -1434,6 +1435,9 @@ static void drm_bridge_debugfs_show_bridge(struct drm_printer *p,
unsigned int idx)
{
drm_printf(p, "bridge[%u]: %ps\n", idx, bridge->funcs);
+
+ drm_printf(p, "\trefcount: %u\n", kref_read(&bridge->refcount));
+
drm_printf(p, "\ttype: [%d] %s\n",
bridge->type,
drm_get_connector_type_name(bridge->type));
diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c
index 37a3270bc3c2..131c1c9ae92f 100644
--- a/drivers/gpu/drm/drm_color_mgmt.c
+++ b/drivers/gpu/drm/drm_color_mgmt.c
@@ -817,6 +817,40 @@ void drm_crtc_load_palette_8(struct drm_crtc *crtc, const struct drm_color_lut *
}
EXPORT_SYMBOL(drm_crtc_load_palette_8);
+static void fill_palette_332(struct drm_crtc *crtc, u16 r, u16 g, u16 b,
+ drm_crtc_set_lut_func set_palette)
+{
+ unsigned int i = (r << 5) | (g << 2) | b; /* 8-bit palette index */
+
+ /* Expand R (3-bit) G (3-bit) and B (2-bit) values to 16-bit values */
+ r = (r << 13) | (r << 10) | (r << 7) | (r << 4) | (r << 1) | (r >> 2);
+ g = (g << 13) | (g << 10) | (g << 7) | (g << 4) | (g << 1) | (g >> 2);
+ b = (b << 14) | (b << 12) | (b << 10) | (b << 8) | (b << 6) | (b << 4) | (b << 2) | b;
+
+ set_palette(crtc, i, r, g, b);
+}
+
+/**
+ * drm_crtc_fill_palette_332 - Programs a default palette for R332-like formats
+ * @crtc: The displaying CRTC
+ * @set_palette: Callback for programming the hardware gamma LUT
+ *
+ * Programs an RGB332 palette to hardware.
+ */
+void drm_crtc_fill_palette_332(struct drm_crtc *crtc, drm_crtc_set_lut_func set_palette)
+{
+ unsigned int r, g, b;
+
+ /* Limits of 8-8-4 are the maximum number of values for each channel. */
+ for (r = 0; r < 8; ++r) {
+ for (g = 0; g < 8; ++g) {
+ for (b = 0; b < 4; ++b)
+ fill_palette_332(crtc, r, g, b, set_palette);
+ }
+ }
+}
+EXPORT_SYMBOL(drm_crtc_fill_palette_332);
+
static void fill_palette_8(struct drm_crtc *crtc, unsigned int i,
drm_crtc_set_lut_func set_palette)
{
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index cdd591b11488..8e3cb08241c8 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -532,6 +532,8 @@ static const char *drm_get_wedge_recovery(unsigned int opt)
return "rebind";
case DRM_WEDGE_RECOVERY_BUS_RESET:
return "bus-reset";
+ case DRM_WEDGE_RECOVERY_VENDOR:
+ return "vendor-specific";
default:
return NULL;
}
@@ -694,7 +696,6 @@ static void drm_dev_init_release(struct drm_device *dev, void *res)
mutex_destroy(&dev->master_mutex);
mutex_destroy(&dev->clientlist_mutex);
mutex_destroy(&dev->filelist_mutex);
- mutex_destroy(&dev->struct_mutex);
}
static int drm_dev_init(struct drm_device *dev,
@@ -735,7 +736,6 @@ static int drm_dev_init(struct drm_device *dev,
INIT_LIST_HEAD(&dev->vblank_event_list);
spin_lock_init(&dev->event_lock);
- mutex_init(&dev->struct_mutex);
mutex_init(&dev->filelist_mutex);
mutex_init(&dev->clientlist_mutex);
mutex_init(&dev->master_mutex);
diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c
index 8f3daf38ca63..006836554cc2 100644
--- a/drivers/gpu/drm/drm_format_helper.c
+++ b/drivers/gpu/drm/drm_format_helper.c
@@ -1243,6 +1243,9 @@ int drm_fb_blit(struct iosys_map *dst, const unsigned int *dst_pitch, uint32_t d
} else if (dst_format == DRM_FORMAT_BGRX8888) {
drm_fb_swab(dst, dst_pitch, src, fb, clip, false, state);
return 0;
+ } else if (dst_format == DRM_FORMAT_RGB332) {
+ drm_fb_xrgb8888_to_rgb332(dst, dst_pitch, src, fb, clip, state);
+ return 0;
}
}
@@ -1253,6 +1256,25 @@ int drm_fb_blit(struct iosys_map *dst, const unsigned int *dst_pitch, uint32_t d
}
EXPORT_SYMBOL(drm_fb_blit);
+static void drm_fb_gray8_to_gray2_line(void *dbuf, const void *sbuf, unsigned int pixels)
+{
+ u8 *dbuf8 = dbuf;
+ const u8 *sbuf8 = sbuf;
+ u8 px;
+
+ while (pixels) {
+ unsigned int i, bits = min(pixels, 4U);
+ u8 byte = 0;
+
+ for (i = 0; i < bits; i++, pixels--) {
+ byte >>= 2;
+ px = (*sbuf8++ * 3 + 127) / 255;
+ byte |= (px &= 0x03) << 6;
+ }
+ *dbuf8++ = byte;
+ }
+}
+
static void drm_fb_gray8_to_mono_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
u8 *dbuf8 = dbuf;
@@ -1359,3 +1381,92 @@ void drm_fb_xrgb8888_to_mono(struct iosys_map *dst, const unsigned int *dst_pitc
}
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_mono);
+
+/**
+ * drm_fb_xrgb8888_to_gray2 - Convert XRGB8888 to gray2
+ * @dst: Array of gray2 destination buffer
+ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
+ * within @dst; can be NULL if scanlines are stored next to each other.
+ * @src: Array of XRGB8888 source buffers
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ * @state: Transform and conversion state
+ *
+ * This function copies parts of a framebuffer to display memory and converts the
+ * color format during the process. Destination and framebuffer formats must match. The
+ * parameters @dst, @dst_pitch and @src refer to arrays. Each array must have at
+ * least as many entries as there are planes in @fb's format. Each entry stores the
+ * value for the format's respective color plane at the same index.
+ *
+ * This function does not apply clipping on @dst (i.e. the destination is at the
+ * top-left corner). The first pixel (upper left corner of the clip rectangle) will
+ * be converted and copied to the two first bits (LSB) in the first byte of the gray2
+ * destination buffer. If the caller requires that the first pixel in a byte must
+ * be located at an x-coordinate that is a multiple of 8, then the caller must take
+ * care itself of supplying a suitable clip rectangle.
+ *
+ * DRM doesn't have native gray2 support. Drivers can use this function for
+ * gray2 devices that don't support XRGB8888 natively. Such drivers can
+ * announce the commonly supported XR24 format to userspace and use this function
+ * to convert to the native format.
+ *
+ */
+void drm_fb_xrgb8888_to_gray2(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state)
+{
+ static const unsigned int default_dst_pitch[DRM_FORMAT_MAX_PLANES] = {
+ 0, 0, 0, 0
+ };
+ unsigned int linepixels = drm_rect_width(clip);
+ unsigned int lines = drm_rect_height(clip);
+ unsigned int cpp = fb->format->cpp[0];
+ unsigned int len_src32 = linepixels * cpp;
+ struct drm_device *dev = fb->dev;
+ void *vaddr = src[0].vaddr;
+ unsigned int dst_pitch_0;
+ unsigned int y;
+ u8 *gray2 = dst[0].vaddr, *gray8;
+ u32 *src32;
+
+ if (drm_WARN_ON(dev, fb->format->format != DRM_FORMAT_XRGB8888))
+ return;
+
+ if (!dst_pitch)
+ dst_pitch = default_dst_pitch;
+ dst_pitch_0 = dst_pitch[0];
+
+ /*
+ * The gray2 destination buffer contains 2 bit per pixel
+ */
+ if (!dst_pitch_0)
+ dst_pitch_0 = DIV_ROUND_UP(linepixels, 4);
+
+ /*
+ * The dma memory is write-combined so reads are uncached.
+ * Speed up by fetching one line at a time.
+ *
+ * Also, format conversion from XR24 to gray2 are done
+ * line-by-line but are converted to 8-bit grayscale as an
+ * intermediate step.
+ *
+ * Allocate a buffer to be used for both copying from the cma
+ * memory and to store the intermediate grayscale line pixels.
+ */
+ src32 = drm_format_conv_state_reserve(state, len_src32 + linepixels, GFP_KERNEL);
+ if (!src32)
+ return;
+
+ gray8 = (u8 *)src32 + len_src32;
+
+ vaddr += clip_offset(clip, fb->pitches[0], cpp);
+ for (y = 0; y < lines; y++) {
+ src32 = memcpy(src32, vaddr, len_src32);
+ drm_fb_xrgb8888_to_gray8_line(gray8, src32, linepixels);
+ drm_fb_gray8_to_gray2_line(gray2, gray8, linepixels);
+ vaddr += fb->pitches[0];
+ gray2 += dst_pitch_0;
+ }
+}
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_gray2);
+
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 6a44351e58b7..f884d155a832 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -187,6 +187,7 @@ void drm_gem_private_object_init(struct drm_device *dev,
kref_init(&obj->refcount);
obj->handle_count = 0;
obj->size = size;
+ mutex_init(&obj->gpuva.lock);
dma_resv_init(&obj->_resv);
if (!obj->resv)
obj->resv = &obj->_resv;
@@ -210,6 +211,7 @@ void drm_gem_private_object_fini(struct drm_gem_object *obj)
WARN_ON(obj->dma_buf);
dma_resv_fini(&obj->_resv);
+ mutex_destroy(&obj->gpuva.lock);
}
EXPORT_SYMBOL(drm_gem_private_object_fini);
@@ -332,7 +334,12 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
if (obj->funcs->close)
obj->funcs->close(obj, file_priv);
+ mutex_lock(&file_priv->prime.lock);
+
drm_prime_remove_buf_handle(&file_priv->prime, id);
+
+ mutex_unlock(&file_priv->prime.lock);
+
drm_vma_node_revoke(&obj->vma_node, file_priv);
drm_gem_object_handle_put_unlocked(obj);
@@ -621,7 +628,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)
struct page **pages;
struct folio *folio;
struct folio_batch fbatch;
- long i, j, npages;
+ unsigned long i, j, npages;
if (WARN_ON(!obj->filp))
return ERR_PTR(-EINVAL);
@@ -645,7 +652,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)
i = 0;
while (i < npages) {
- long nr;
+ unsigned long nr;
folio = shmem_read_folio_gfp(mapping, i,
mapping_gfp_mask(mapping));
if (IS_ERR(folio))
@@ -870,14 +877,6 @@ long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
}
EXPORT_SYMBOL(drm_gem_dma_resv_wait);
-/**
- * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
- * @dev: drm_device
- * @data: ioctl data
- * @file_priv: drm file-private structure
- *
- * Releases the handle to an mm object.
- */
int
drm_gem_close_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -893,17 +892,6 @@ drm_gem_close_ioctl(struct drm_device *dev, void *data,
return ret;
}
-/**
- * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl
- * @dev: drm_device
- * @data: ioctl data
- * @file_priv: drm file-private structure
- *
- * Create a global name for an object, returning the name.
- *
- * Note that the name does not hold a reference; when the object
- * is freed, the name goes away.
- */
int
drm_gem_flink_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -943,17 +931,6 @@ err:
return ret;
}
-/**
- * drm_gem_open_ioctl - implementation of the GEM_OPEN ioctl
- * @dev: drm_device
- * @data: ioctl data
- * @file_priv: drm file-private structure
- *
- * Open an object using the global name, returning a handle and the size.
- *
- * This handle (of course) holds a reference to the object, so the object
- * will not go away until the handle is deleted.
- */
int
drm_gem_open_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -988,6 +965,57 @@ err:
return ret;
}
+int drm_gem_change_handle_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_gem_change_handle *args = data;
+ struct drm_gem_object *obj;
+ int ret;
+
+ if (!drm_core_check_feature(dev, DRIVER_GEM))
+ return -EOPNOTSUPP;
+
+ obj = drm_gem_object_lookup(file_priv, args->handle);
+ if (!obj)
+ return -ENOENT;
+
+ if (args->handle == args->new_handle)
+ return 0;
+
+ mutex_lock(&file_priv->prime.lock);
+
+ spin_lock(&file_priv->table_lock);
+ ret = idr_alloc(&file_priv->object_idr, obj,
+ args->new_handle, args->new_handle + 1, GFP_NOWAIT);
+ spin_unlock(&file_priv->table_lock);
+
+ if (ret < 0)
+ goto out_unlock;
+
+ if (obj->dma_buf) {
+ ret = drm_prime_add_buf_handle(&file_priv->prime, obj->dma_buf, args->new_handle);
+ if (ret < 0) {
+ spin_lock(&file_priv->table_lock);
+ idr_remove(&file_priv->object_idr, args->new_handle);
+ spin_unlock(&file_priv->table_lock);
+ goto out_unlock;
+ }
+
+ drm_prime_remove_buf_handle(&file_priv->prime, args->handle);
+ }
+
+ ret = 0;
+
+ spin_lock(&file_priv->table_lock);
+ idr_remove(&file_priv->object_idr, args->handle);
+ spin_unlock(&file_priv->table_lock);
+
+out_unlock:
+ mutex_unlock(&file_priv->prime.lock);
+
+ return ret;
+}
+
/**
* drm_gem_open - initializes GEM file-private structures at devnode open time
* @dev: drm_device which is being opened by userspace
diff --git a/drivers/gpu/drm/drm_gpusvm.c b/drivers/gpu/drm/drm_gpusvm.c
index 5bb4c77db2c3..cb906765897e 100644
--- a/drivers/gpu/drm/drm_gpusvm.c
+++ b/drivers/gpu/drm/drm_gpusvm.c
@@ -271,107 +271,50 @@ npages_in_range(unsigned long start, unsigned long end)
}
/**
- * drm_gpusvm_range_find() - Find GPU SVM range from GPU SVM notifier
- * @notifier: Pointer to the GPU SVM notifier structure.
- * @start: Start address of the range
- * @end: End address of the range
+ * drm_gpusvm_notifier_find() - Find GPU SVM notifier from GPU SVM
+ * @gpusvm: Pointer to the GPU SVM structure.
+ * @start: Start address of the notifier
+ * @end: End address of the notifier
*
- * Return: A pointer to the drm_gpusvm_range if found or NULL
+ * Return: A pointer to the drm_gpusvm_notifier if found or NULL
*/
-struct drm_gpusvm_range *
-drm_gpusvm_range_find(struct drm_gpusvm_notifier *notifier, unsigned long start,
- unsigned long end)
+struct drm_gpusvm_notifier *
+drm_gpusvm_notifier_find(struct drm_gpusvm *gpusvm, unsigned long start,
+ unsigned long end)
{
struct interval_tree_node *itree;
- itree = interval_tree_iter_first(&notifier->root, start, end - 1);
+ itree = interval_tree_iter_first(&gpusvm->root, start, end - 1);
if (itree)
- return container_of(itree, struct drm_gpusvm_range, itree);
+ return container_of(itree, struct drm_gpusvm_notifier, itree);
else
return NULL;
}
-EXPORT_SYMBOL_GPL(drm_gpusvm_range_find);
+EXPORT_SYMBOL_GPL(drm_gpusvm_notifier_find);
/**
- * drm_gpusvm_for_each_range_safe() - Safely iterate over GPU SVM ranges in a notifier
- * @range__: Iterator variable for the ranges
- * @next__: Iterator variable for the ranges temporay storage
- * @notifier__: Pointer to the GPU SVM notifier
- * @start__: Start address of the range
- * @end__: End address of the range
- *
- * This macro is used to iterate over GPU SVM ranges in a notifier while
- * removing ranges from it.
- */
-#define drm_gpusvm_for_each_range_safe(range__, next__, notifier__, start__, end__) \
- for ((range__) = drm_gpusvm_range_find((notifier__), (start__), (end__)), \
- (next__) = __drm_gpusvm_range_next(range__); \
- (range__) && (drm_gpusvm_range_start(range__) < (end__)); \
- (range__) = (next__), (next__) = __drm_gpusvm_range_next(range__))
-
-/**
- * __drm_gpusvm_notifier_next() - get the next drm_gpusvm_notifier in the list
- * @notifier: a pointer to the current drm_gpusvm_notifier
+ * drm_gpusvm_range_find() - Find GPU SVM range from GPU SVM notifier
+ * @notifier: Pointer to the GPU SVM notifier structure.
+ * @start: Start address of the range
+ * @end: End address of the range
*
- * Return: A pointer to the next drm_gpusvm_notifier if available, or NULL if
- * the current notifier is the last one or if the input notifier is
- * NULL.
+ * Return: A pointer to the drm_gpusvm_range if found or NULL
*/
-static struct drm_gpusvm_notifier *
-__drm_gpusvm_notifier_next(struct drm_gpusvm_notifier *notifier)
-{
- if (notifier && !list_is_last(&notifier->entry,
- &notifier->gpusvm->notifier_list))
- return list_next_entry(notifier, entry);
-
- return NULL;
-}
-
-static struct drm_gpusvm_notifier *
-notifier_iter_first(struct rb_root_cached *root, unsigned long start,
- unsigned long last)
+struct drm_gpusvm_range *
+drm_gpusvm_range_find(struct drm_gpusvm_notifier *notifier, unsigned long start,
+ unsigned long end)
{
struct interval_tree_node *itree;
- itree = interval_tree_iter_first(root, start, last);
+ itree = interval_tree_iter_first(&notifier->root, start, end - 1);
if (itree)
- return container_of(itree, struct drm_gpusvm_notifier, itree);
+ return container_of(itree, struct drm_gpusvm_range, itree);
else
return NULL;
}
-
-/**
- * drm_gpusvm_for_each_notifier() - Iterate over GPU SVM notifiers in a gpusvm
- * @notifier__: Iterator variable for the notifiers
- * @notifier__: Pointer to the GPU SVM notifier
- * @start__: Start address of the notifier
- * @end__: End address of the notifier
- *
- * This macro is used to iterate over GPU SVM notifiers in a gpusvm.
- */
-#define drm_gpusvm_for_each_notifier(notifier__, gpusvm__, start__, end__) \
- for ((notifier__) = notifier_iter_first(&(gpusvm__)->root, (start__), (end__) - 1); \
- (notifier__) && (drm_gpusvm_notifier_start(notifier__) < (end__)); \
- (notifier__) = __drm_gpusvm_notifier_next(notifier__))
-
-/**
- * drm_gpusvm_for_each_notifier_safe() - Safely iterate over GPU SVM notifiers in a gpusvm
- * @notifier__: Iterator variable for the notifiers
- * @next__: Iterator variable for the notifiers temporay storage
- * @notifier__: Pointer to the GPU SVM notifier
- * @start__: Start address of the notifier
- * @end__: End address of the notifier
- *
- * This macro is used to iterate over GPU SVM notifiers in a gpusvm while
- * removing notifiers from it.
- */
-#define drm_gpusvm_for_each_notifier_safe(notifier__, next__, gpusvm__, start__, end__) \
- for ((notifier__) = notifier_iter_first(&(gpusvm__)->root, (start__), (end__) - 1), \
- (next__) = __drm_gpusvm_notifier_next(notifier__); \
- (notifier__) && (drm_gpusvm_notifier_start(notifier__) < (end__)); \
- (notifier__) = (next__), (next__) = __drm_gpusvm_notifier_next(notifier__))
+EXPORT_SYMBOL_GPL(drm_gpusvm_range_find);
/**
* drm_gpusvm_notifier_invalidate() - Invalidate a GPU SVM notifier.
@@ -418,7 +361,6 @@ static const struct mmu_interval_notifier_ops drm_gpusvm_notifier_ops = {
* @name: Name of the GPU SVM.
* @drm: Pointer to the DRM device structure.
* @mm: Pointer to the mm_struct for the address space.
- * @device_private_page_owner: Device private pages owner.
* @mm_start: Start address of GPU SVM.
* @mm_range: Range of the GPU SVM.
* @notifier_size: Size of individual notifiers.
@@ -430,23 +372,35 @@ static const struct mmu_interval_notifier_ops drm_gpusvm_notifier_ops = {
*
* This function initializes the GPU SVM.
*
+ * Note: If only using the simple drm_gpusvm_pages API (get/unmap/free),
+ * then only @gpusvm, @name, and @drm are expected. However, the same base
+ * @gpusvm can also be used with both modes together in which case the full
+ * setup is needed, where the core drm_gpusvm_pages API will simply never use
+ * the other fields.
+ *
* Return: 0 on success, a negative error code on failure.
*/
int drm_gpusvm_init(struct drm_gpusvm *gpusvm,
const char *name, struct drm_device *drm,
- struct mm_struct *mm, void *device_private_page_owner,
+ struct mm_struct *mm,
unsigned long mm_start, unsigned long mm_range,
unsigned long notifier_size,
const struct drm_gpusvm_ops *ops,
const unsigned long *chunk_sizes, int num_chunks)
{
- if (!ops->invalidate || !num_chunks)
- return -EINVAL;
+ if (mm) {
+ if (!ops->invalidate || !num_chunks)
+ return -EINVAL;
+ mmgrab(mm);
+ } else {
+ /* No full SVM mode, only core drm_gpusvm_pages API. */
+ if (ops || num_chunks || mm_range || notifier_size)
+ return -EINVAL;
+ }
gpusvm->name = name;
gpusvm->drm = drm;
gpusvm->mm = mm;
- gpusvm->device_private_page_owner = device_private_page_owner;
gpusvm->mm_start = mm_start;
gpusvm->mm_range = mm_range;
gpusvm->notifier_size = notifier_size;
@@ -454,7 +408,6 @@ int drm_gpusvm_init(struct drm_gpusvm *gpusvm,
gpusvm->chunk_sizes = chunk_sizes;
gpusvm->num_chunks = num_chunks;
- mmgrab(mm);
gpusvm->root = RB_ROOT_CACHED;
INIT_LIST_HEAD(&gpusvm->notifier_list);
@@ -473,22 +426,6 @@ int drm_gpusvm_init(struct drm_gpusvm *gpusvm,
EXPORT_SYMBOL_GPL(drm_gpusvm_init);
/**
- * drm_gpusvm_notifier_find() - Find GPU SVM notifier
- * @gpusvm: Pointer to the GPU SVM structure
- * @fault_addr: Fault address
- *
- * This function finds the GPU SVM notifier associated with the fault address.
- *
- * Return: Pointer to the GPU SVM notifier on success, NULL otherwise.
- */
-static struct drm_gpusvm_notifier *
-drm_gpusvm_notifier_find(struct drm_gpusvm *gpusvm,
- unsigned long fault_addr)
-{
- return notifier_iter_first(&gpusvm->root, fault_addr, fault_addr + 1);
-}
-
-/**
* to_drm_gpusvm_notifier() - retrieve the container struct for a given rbtree node
* @node: a pointer to the rbtree node embedded within a drm_gpusvm_notifier struct
*
@@ -562,7 +499,8 @@ void drm_gpusvm_fini(struct drm_gpusvm *gpusvm)
drm_gpusvm_range_remove(gpusvm, range);
}
- mmdrop(gpusvm->mm);
+ if (gpusvm->mm)
+ mmdrop(gpusvm->mm);
WARN_ON(!RB_EMPTY_ROOT(&gpusvm->root.rb_root));
}
EXPORT_SYMBOL_GPL(drm_gpusvm_fini);
@@ -702,18 +640,48 @@ drm_gpusvm_range_alloc(struct drm_gpusvm *gpusvm,
range->itree.start = ALIGN_DOWN(fault_addr, chunk_size);
range->itree.last = ALIGN(fault_addr + 1, chunk_size) - 1;
INIT_LIST_HEAD(&range->entry);
- range->notifier_seq = LONG_MAX;
- range->flags.migrate_devmem = migrate_devmem ? 1 : 0;
+ range->pages.notifier_seq = LONG_MAX;
+ range->pages.flags.migrate_devmem = migrate_devmem ? 1 : 0;
return range;
}
/**
+ * drm_gpusvm_hmm_pfn_to_order() - Get the largest CPU mapping order.
+ * @hmm_pfn: The current hmm_pfn.
+ * @hmm_pfn_index: Index of the @hmm_pfn within the pfn array.
+ * @npages: Number of pages within the pfn array i.e the hmm range size.
+ *
+ * To allow skipping PFNs with the same flags (like when they belong to
+ * the same huge PTE) when looping over the pfn array, take a given a hmm_pfn,
+ * and return the largest order that will fit inside the CPU PTE, but also
+ * crucially accounting for the original hmm range boundaries.
+ *
+ * Return: The largest order that will safely fit within the size of the hmm_pfn
+ * CPU PTE.
+ */
+static unsigned int drm_gpusvm_hmm_pfn_to_order(unsigned long hmm_pfn,
+ unsigned long hmm_pfn_index,
+ unsigned long npages)
+{
+ unsigned long size;
+
+ size = 1UL << hmm_pfn_to_map_order(hmm_pfn);
+ size -= (hmm_pfn & ~HMM_PFN_FLAGS) & (size - 1);
+ hmm_pfn_index += size;
+ if (hmm_pfn_index > npages)
+ size -= (hmm_pfn_index - npages);
+
+ return ilog2(size);
+}
+
+/**
* drm_gpusvm_check_pages() - Check pages
* @gpusvm: Pointer to the GPU SVM structure
* @notifier: Pointer to the GPU SVM notifier structure
* @start: Start address
* @end: End address
+ * @dev_private_owner: The device private page owner
*
* Check if pages between start and end have been faulted in on the CPU. Use to
* prevent migration of pages without CPU backing store.
@@ -722,14 +690,15 @@ drm_gpusvm_range_alloc(struct drm_gpusvm *gpusvm,
*/
static bool drm_gpusvm_check_pages(struct drm_gpusvm *gpusvm,
struct drm_gpusvm_notifier *notifier,
- unsigned long start, unsigned long end)
+ unsigned long start, unsigned long end,
+ void *dev_private_owner)
{
struct hmm_range hmm_range = {
.default_flags = 0,
.notifier = &notifier->notifier,
.start = start,
.end = end,
- .dev_private_owner = gpusvm->device_private_page_owner,
+ .dev_private_owner = dev_private_owner,
};
unsigned long timeout =
jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
@@ -766,7 +735,7 @@ static bool drm_gpusvm_check_pages(struct drm_gpusvm *gpusvm,
err = -EFAULT;
goto err_free;
}
- i += 0x1 << hmm_pfn_to_map_order(pfns[i]);
+ i += 0x1 << drm_gpusvm_hmm_pfn_to_order(pfns[i], i, npages);
}
err_free:
@@ -783,6 +752,7 @@ err_free:
* @gpuva_start: Start address of GPUVA which mirrors CPU
* @gpuva_end: End address of GPUVA which mirrors CPU
* @check_pages_threshold: Check CPU pages for present threshold
+ * @dev_private_owner: The device private page owner
*
* This function determines the chunk size for the GPU SVM range based on the
* fault address, GPU SVM chunk sizes, existing GPU SVM ranges, and the virtual
@@ -797,7 +767,8 @@ drm_gpusvm_range_chunk_size(struct drm_gpusvm *gpusvm,
unsigned long fault_addr,
unsigned long gpuva_start,
unsigned long gpuva_end,
- unsigned long check_pages_threshold)
+ unsigned long check_pages_threshold,
+ void *dev_private_owner)
{
unsigned long start, end;
int i = 0;
@@ -844,7 +815,7 @@ retry:
* process-many-malloc' mallocs at least 64k at a time.
*/
if (end - start <= check_pages_threshold &&
- !drm_gpusvm_check_pages(gpusvm, notifier, start, end)) {
+ !drm_gpusvm_check_pages(gpusvm, notifier, start, end, dev_private_owner)) {
++i;
goto retry;
}
@@ -943,7 +914,7 @@ drm_gpusvm_range_find_or_insert(struct drm_gpusvm *gpusvm,
if (!mmget_not_zero(mm))
return ERR_PTR(-EFAULT);
- notifier = drm_gpusvm_notifier_find(gpusvm, fault_addr);
+ notifier = drm_gpusvm_notifier_find(gpusvm, fault_addr, fault_addr + 1);
if (!notifier) {
notifier = drm_gpusvm_notifier_alloc(gpusvm, fault_addr);
if (IS_ERR(notifier)) {
@@ -987,7 +958,8 @@ drm_gpusvm_range_find_or_insert(struct drm_gpusvm *gpusvm,
chunk_size = drm_gpusvm_range_chunk_size(gpusvm, notifier, vas,
fault_addr, gpuva_start,
gpuva_end,
- ctx->check_pages_threshold);
+ ctx->check_pages_threshold,
+ ctx->device_private_page_owner);
if (chunk_size == LONG_MAX) {
err = -EINVAL;
goto err_notifier_remove;
@@ -1024,31 +996,31 @@ err_mmunlock:
EXPORT_SYMBOL_GPL(drm_gpusvm_range_find_or_insert);
/**
- * __drm_gpusvm_range_unmap_pages() - Unmap pages associated with a GPU SVM range (internal)
+ * __drm_gpusvm_unmap_pages() - Unmap pages associated with GPU SVM pages (internal)
* @gpusvm: Pointer to the GPU SVM structure
- * @range: Pointer to the GPU SVM range structure
+ * @svm_pages: Pointer to the GPU SVM pages structure
* @npages: Number of pages to unmap
*
- * This function unmap pages associated with a GPU SVM range. Assumes and
+ * This function unmap pages associated with a GPU SVM pages struct. Assumes and
* asserts correct locking is in place when called.
*/
-static void __drm_gpusvm_range_unmap_pages(struct drm_gpusvm *gpusvm,
- struct drm_gpusvm_range *range,
- unsigned long npages)
+static void __drm_gpusvm_unmap_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_pages *svm_pages,
+ unsigned long npages)
{
- unsigned long i, j;
- struct drm_pagemap *dpagemap = range->dpagemap;
+ struct drm_pagemap *dpagemap = svm_pages->dpagemap;
struct device *dev = gpusvm->drm->dev;
+ unsigned long i, j;
lockdep_assert_held(&gpusvm->notifier_lock);
- if (range->flags.has_dma_mapping) {
- struct drm_gpusvm_range_flags flags = {
- .__flags = range->flags.__flags,
+ if (svm_pages->flags.has_dma_mapping) {
+ struct drm_gpusvm_pages_flags flags = {
+ .__flags = svm_pages->flags.__flags,
};
for (i = 0, j = 0; i < npages; j++) {
- struct drm_pagemap_device_addr *addr = &range->dma_addr[j];
+ struct drm_pagemap_addr *addr = &svm_pages->dma_addr[j];
if (addr->proto == DRM_INTERCONNECT_SYSTEM)
dma_unmap_page(dev,
@@ -1064,31 +1036,52 @@ static void __drm_gpusvm_range_unmap_pages(struct drm_gpusvm *gpusvm,
/* WRITE_ONCE pairs with READ_ONCE for opportunistic checks */
flags.has_devmem_pages = false;
flags.has_dma_mapping = false;
- WRITE_ONCE(range->flags.__flags, flags.__flags);
+ WRITE_ONCE(svm_pages->flags.__flags, flags.__flags);
- range->dpagemap = NULL;
+ svm_pages->dpagemap = NULL;
}
}
/**
- * drm_gpusvm_range_free_pages() - Free pages associated with a GPU SVM range
+ * __drm_gpusvm_free_pages() - Free dma array associated with GPU SVM pages
* @gpusvm: Pointer to the GPU SVM structure
- * @range: Pointer to the GPU SVM range structure
+ * @svm_pages: Pointer to the GPU SVM pages structure
*
* This function frees the dma address array associated with a GPU SVM range.
*/
-static void drm_gpusvm_range_free_pages(struct drm_gpusvm *gpusvm,
- struct drm_gpusvm_range *range)
+static void __drm_gpusvm_free_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_pages *svm_pages)
{
lockdep_assert_held(&gpusvm->notifier_lock);
- if (range->dma_addr) {
- kvfree(range->dma_addr);
- range->dma_addr = NULL;
+ if (svm_pages->dma_addr) {
+ kvfree(svm_pages->dma_addr);
+ svm_pages->dma_addr = NULL;
}
}
/**
+ * drm_gpusvm_free_pages() - Free dma-mapping associated with GPU SVM pages
+ * struct
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @svm_pages: Pointer to the GPU SVM pages structure
+ * @npages: Number of mapped pages
+ *
+ * This function unmaps and frees the dma address array associated with a GPU
+ * SVM pages struct.
+ */
+void drm_gpusvm_free_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_pages *svm_pages,
+ unsigned long npages)
+{
+ drm_gpusvm_notifier_lock(gpusvm);
+ __drm_gpusvm_unmap_pages(gpusvm, svm_pages, npages);
+ __drm_gpusvm_free_pages(gpusvm, svm_pages);
+ drm_gpusvm_notifier_unlock(gpusvm);
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_free_pages);
+
+/**
* drm_gpusvm_range_remove() - Remove GPU SVM range
* @gpusvm: Pointer to the GPU SVM structure
* @range: Pointer to the GPU SVM range to be removed
@@ -1107,13 +1100,14 @@ void drm_gpusvm_range_remove(struct drm_gpusvm *gpusvm,
drm_gpusvm_driver_lock_held(gpusvm);
notifier = drm_gpusvm_notifier_find(gpusvm,
- drm_gpusvm_range_start(range));
+ drm_gpusvm_range_start(range),
+ drm_gpusvm_range_start(range) + 1);
if (WARN_ON_ONCE(!notifier))
return;
drm_gpusvm_notifier_lock(gpusvm);
- __drm_gpusvm_range_unmap_pages(gpusvm, range, npages);
- drm_gpusvm_range_free_pages(gpusvm, range);
+ __drm_gpusvm_unmap_pages(gpusvm, &range->pages, npages);
+ __drm_gpusvm_free_pages(gpusvm, &range->pages);
__drm_gpusvm_range_remove(notifier, range);
drm_gpusvm_notifier_unlock(gpusvm);
@@ -1179,6 +1173,28 @@ void drm_gpusvm_range_put(struct drm_gpusvm_range *range)
EXPORT_SYMBOL_GPL(drm_gpusvm_range_put);
/**
+ * drm_gpusvm_pages_valid() - GPU SVM range pages valid
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @svm_pages: Pointer to the GPU SVM pages structure
+ *
+ * This function determines if a GPU SVM range pages are valid. Expected be
+ * called holding gpusvm->notifier_lock and as the last step before committing a
+ * GPU binding. This is akin to a notifier seqno check in the HMM documentation
+ * but due to wider notifiers (i.e., notifiers which span multiple ranges) this
+ * function is required for finer grained checking (i.e., per range) if pages
+ * are valid.
+ *
+ * Return: True if GPU SVM range has valid pages, False otherwise
+ */
+static bool drm_gpusvm_pages_valid(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_pages *svm_pages)
+{
+ lockdep_assert_held(&gpusvm->notifier_lock);
+
+ return svm_pages->flags.has_devmem_pages || svm_pages->flags.has_dma_mapping;
+}
+
+/**
* drm_gpusvm_range_pages_valid() - GPU SVM range pages valid
* @gpusvm: Pointer to the GPU SVM structure
* @range: Pointer to the GPU SVM range structure
@@ -1195,9 +1211,7 @@ EXPORT_SYMBOL_GPL(drm_gpusvm_range_put);
bool drm_gpusvm_range_pages_valid(struct drm_gpusvm *gpusvm,
struct drm_gpusvm_range *range)
{
- lockdep_assert_held(&gpusvm->notifier_lock);
-
- return range->flags.has_devmem_pages || range->flags.has_dma_mapping;
+ return drm_gpusvm_pages_valid(gpusvm, &range->pages);
}
EXPORT_SYMBOL_GPL(drm_gpusvm_range_pages_valid);
@@ -1211,66 +1225,71 @@ EXPORT_SYMBOL_GPL(drm_gpusvm_range_pages_valid);
*
* Return: True if GPU SVM range has valid pages, False otherwise
*/
-static bool
-drm_gpusvm_range_pages_valid_unlocked(struct drm_gpusvm *gpusvm,
- struct drm_gpusvm_range *range)
+static bool drm_gpusvm_pages_valid_unlocked(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_pages *svm_pages)
{
bool pages_valid;
- if (!range->dma_addr)
+ if (!svm_pages->dma_addr)
return false;
drm_gpusvm_notifier_lock(gpusvm);
- pages_valid = drm_gpusvm_range_pages_valid(gpusvm, range);
+ pages_valid = drm_gpusvm_pages_valid(gpusvm, svm_pages);
if (!pages_valid)
- drm_gpusvm_range_free_pages(gpusvm, range);
+ __drm_gpusvm_free_pages(gpusvm, svm_pages);
drm_gpusvm_notifier_unlock(gpusvm);
return pages_valid;
}
/**
- * drm_gpusvm_range_get_pages() - Get pages for a GPU SVM range
+ * drm_gpusvm_get_pages() - Get pages and populate GPU SVM pages struct
* @gpusvm: Pointer to the GPU SVM structure
- * @range: Pointer to the GPU SVM range structure
+ * @svm_pages: The SVM pages to populate. This will contain the dma-addresses
+ * @mm: The mm corresponding to the CPU range
+ * @notifier: The corresponding notifier for the given CPU range
+ * @pages_start: Start CPU address for the pages
+ * @pages_end: End CPU address for the pages (exclusive)
* @ctx: GPU SVM context
*
- * This function gets pages for a GPU SVM range and ensures they are mapped for
- * DMA access.
+ * This function gets and maps pages for CPU range and ensures they are
+ * mapped for DMA access.
*
* Return: 0 on success, negative error code on failure.
*/
-int drm_gpusvm_range_get_pages(struct drm_gpusvm *gpusvm,
- struct drm_gpusvm_range *range,
- const struct drm_gpusvm_ctx *ctx)
+int drm_gpusvm_get_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_pages *svm_pages,
+ struct mm_struct *mm,
+ struct mmu_interval_notifier *notifier,
+ unsigned long pages_start, unsigned long pages_end,
+ const struct drm_gpusvm_ctx *ctx)
{
- struct mmu_interval_notifier *notifier = &range->notifier->notifier;
struct hmm_range hmm_range = {
.default_flags = HMM_PFN_REQ_FAULT | (ctx->read_only ? 0 :
HMM_PFN_REQ_WRITE),
.notifier = notifier,
- .start = drm_gpusvm_range_start(range),
- .end = drm_gpusvm_range_end(range),
- .dev_private_owner = gpusvm->device_private_page_owner,
+ .start = pages_start,
+ .end = pages_end,
+ .dev_private_owner = ctx->device_private_page_owner,
};
- struct mm_struct *mm = gpusvm->mm;
void *zdd;
unsigned long timeout =
jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
unsigned long i, j;
- unsigned long npages = npages_in_range(drm_gpusvm_range_start(range),
- drm_gpusvm_range_end(range));
+ unsigned long npages = npages_in_range(pages_start, pages_end);
unsigned long num_dma_mapped;
unsigned int order = 0;
unsigned long *pfns;
int err = 0;
struct dev_pagemap *pagemap;
struct drm_pagemap *dpagemap;
- struct drm_gpusvm_range_flags flags;
+ struct drm_gpusvm_pages_flags flags;
+ enum dma_data_direction dma_dir = ctx->read_only ? DMA_TO_DEVICE :
+ DMA_BIDIRECTIONAL;
retry:
hmm_range.notifier_seq = mmu_interval_read_begin(notifier);
- if (drm_gpusvm_range_pages_valid_unlocked(gpusvm, range))
+ if (drm_gpusvm_pages_valid_unlocked(gpusvm, svm_pages))
goto set_seqno;
pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL);
@@ -1310,7 +1329,7 @@ map_pages:
*/
drm_gpusvm_notifier_lock(gpusvm);
- flags.__flags = range->flags.__flags;
+ flags.__flags = svm_pages->flags.__flags;
if (flags.unmapped) {
drm_gpusvm_notifier_unlock(gpusvm);
err = -EFAULT;
@@ -1323,13 +1342,12 @@ map_pages:
goto retry;
}
- if (!range->dma_addr) {
+ if (!svm_pages->dma_addr) {
/* Unlock and restart mapping to allocate memory. */
drm_gpusvm_notifier_unlock(gpusvm);
- range->dma_addr = kvmalloc_array(npages,
- sizeof(*range->dma_addr),
- GFP_KERNEL);
- if (!range->dma_addr) {
+ svm_pages->dma_addr =
+ kvmalloc_array(npages, sizeof(*svm_pages->dma_addr), GFP_KERNEL);
+ if (!svm_pages->dma_addr) {
err = -ENOMEM;
goto err_free;
}
@@ -1342,7 +1360,7 @@ map_pages:
for (i = 0, j = 0; i < npages; ++j) {
struct page *page = hmm_pfn_to_page(pfns[i]);
- order = hmm_pfn_to_map_order(pfns[i]);
+ order = drm_gpusvm_hmm_pfn_to_order(pfns[i], i, npages);
if (is_device_private_page(page) ||
is_device_coherent_page(page)) {
if (zdd != page->zone_device_data && i > 0) {
@@ -1368,13 +1386,13 @@ map_pages:
goto err_unmap;
}
}
- range->dma_addr[j] =
+ svm_pages->dma_addr[j] =
dpagemap->ops->device_map(dpagemap,
gpusvm->drm->dev,
page, order,
- DMA_BIDIRECTIONAL);
+ dma_dir);
if (dma_mapping_error(gpusvm->drm->dev,
- range->dma_addr[j].addr)) {
+ svm_pages->dma_addr[j].addr)) {
err = -EFAULT;
goto err_unmap;
}
@@ -1394,15 +1412,15 @@ map_pages:
addr = dma_map_page(gpusvm->drm->dev,
page, 0,
PAGE_SIZE << order,
- DMA_BIDIRECTIONAL);
+ dma_dir);
if (dma_mapping_error(gpusvm->drm->dev, addr)) {
err = -EFAULT;
goto err_unmap;
}
- range->dma_addr[j] = drm_pagemap_device_addr_encode
+ svm_pages->dma_addr[j] = drm_pagemap_addr_encode
(addr, DRM_INTERCONNECT_SYSTEM, order,
- DMA_BIDIRECTIONAL);
+ dma_dir);
}
i += 1 << order;
num_dma_mapped = i;
@@ -1411,21 +1429,21 @@ map_pages:
if (pagemap) {
flags.has_devmem_pages = true;
- range->dpagemap = dpagemap;
+ svm_pages->dpagemap = dpagemap;
}
/* WRITE_ONCE pairs with READ_ONCE for opportunistic checks */
- WRITE_ONCE(range->flags.__flags, flags.__flags);
+ WRITE_ONCE(svm_pages->flags.__flags, flags.__flags);
drm_gpusvm_notifier_unlock(gpusvm);
kvfree(pfns);
set_seqno:
- range->notifier_seq = hmm_range.notifier_seq;
+ svm_pages->notifier_seq = hmm_range.notifier_seq;
return 0;
err_unmap:
- __drm_gpusvm_range_unmap_pages(gpusvm, range, num_dma_mapped);
+ __drm_gpusvm_unmap_pages(gpusvm, svm_pages, num_dma_mapped);
drm_gpusvm_notifier_unlock(gpusvm);
err_free:
kvfree(pfns);
@@ -1433,11 +1451,62 @@ err_free:
goto retry;
return err;
}
+EXPORT_SYMBOL_GPL(drm_gpusvm_get_pages);
+
+/**
+ * drm_gpusvm_range_get_pages() - Get pages for a GPU SVM range
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @range: Pointer to the GPU SVM range structure
+ * @ctx: GPU SVM context
+ *
+ * This function gets pages for a GPU SVM range and ensures they are mapped for
+ * DMA access.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int drm_gpusvm_range_get_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_range *range,
+ const struct drm_gpusvm_ctx *ctx)
+{
+ return drm_gpusvm_get_pages(gpusvm, &range->pages, gpusvm->mm,
+ &range->notifier->notifier,
+ drm_gpusvm_range_start(range),
+ drm_gpusvm_range_end(range), ctx);
+}
EXPORT_SYMBOL_GPL(drm_gpusvm_range_get_pages);
/**
+ * drm_gpusvm_unmap_pages() - Unmap GPU svm pages
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @svm_pages: Pointer to the GPU SVM pages structure
+ * @npages: Number of pages in @svm_pages.
+ * @ctx: GPU SVM context
+ *
+ * This function unmaps pages associated with a GPU SVM pages struct. If
+ * @in_notifier is set, it is assumed that gpusvm->notifier_lock is held in
+ * write mode; if it is clear, it acquires gpusvm->notifier_lock in read mode.
+ * Must be called in the invalidate() callback of the corresponding notifier for
+ * IOMMU security model.
+ */
+void drm_gpusvm_unmap_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_pages *svm_pages,
+ unsigned long npages,
+ const struct drm_gpusvm_ctx *ctx)
+{
+ if (ctx->in_notifier)
+ lockdep_assert_held_write(&gpusvm->notifier_lock);
+ else
+ drm_gpusvm_notifier_lock(gpusvm);
+
+ __drm_gpusvm_unmap_pages(gpusvm, svm_pages, npages);
+
+ if (!ctx->in_notifier)
+ drm_gpusvm_notifier_unlock(gpusvm);
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_unmap_pages);
+
+/**
* drm_gpusvm_range_unmap_pages() - Unmap pages associated with a GPU SVM range
- * drm_gpusvm_range_evict() - Evict GPU SVM range
* @gpusvm: Pointer to the GPU SVM structure
* @range: Pointer to the GPU SVM range structure
* @ctx: GPU SVM context
@@ -1455,15 +1524,7 @@ void drm_gpusvm_range_unmap_pages(struct drm_gpusvm *gpusvm,
unsigned long npages = npages_in_range(drm_gpusvm_range_start(range),
drm_gpusvm_range_end(range));
- if (ctx->in_notifier)
- lockdep_assert_held_write(&gpusvm->notifier_lock);
- else
- drm_gpusvm_notifier_lock(gpusvm);
-
- __drm_gpusvm_range_unmap_pages(gpusvm, range, npages);
-
- if (!ctx->in_notifier)
- drm_gpusvm_notifier_unlock(gpusvm);
+ return drm_gpusvm_unmap_pages(gpusvm, &range->pages, npages, ctx);
}
EXPORT_SYMBOL_GPL(drm_gpusvm_range_unmap_pages);
@@ -1561,10 +1622,10 @@ void drm_gpusvm_range_set_unmapped(struct drm_gpusvm_range *range,
{
lockdep_assert_held_write(&range->gpusvm->notifier_lock);
- range->flags.unmapped = true;
+ range->pages.flags.unmapped = true;
if (drm_gpusvm_range_start(range) < mmu_range->start ||
drm_gpusvm_range_end(range) > mmu_range->end)
- range->flags.partial_unmap = true;
+ range->pages.flags.partial_unmap = true;
}
EXPORT_SYMBOL_GPL(drm_gpusvm_range_set_unmapped);
diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c
index bbc7fecb6f4a..af63f4d00315 100644
--- a/drivers/gpu/drm/drm_gpuvm.c
+++ b/drivers/gpu/drm/drm_gpuvm.c
@@ -40,7 +40,7 @@
* mapping's backing &drm_gem_object buffers.
*
* &drm_gem_object buffers maintain a list of &drm_gpuva objects representing
- * all existent GPU VA mappings using this &drm_gem_object as backing buffer.
+ * all existing GPU VA mappings using this &drm_gem_object as backing buffer.
*
* GPU VAs can be flagged as sparse, such that drivers may use GPU VAs to also
* keep track of sparse PTEs in order to support Vulkan 'Sparse Resources'.
@@ -72,7 +72,7 @@
* but it can also be a 'dummy' object, which can be allocated with
* drm_gpuvm_resv_object_alloc().
*
- * In order to connect a struct drm_gpuva its backing &drm_gem_object each
+ * In order to connect a struct drm_gpuva to its backing &drm_gem_object each
* &drm_gem_object maintains a list of &drm_gpuvm_bo structures, and each
* &drm_gpuvm_bo contains a list of &drm_gpuva structures.
*
@@ -81,7 +81,7 @@
* This is ensured by the API through drm_gpuvm_bo_obtain() and
* drm_gpuvm_bo_obtain_prealloc() which first look into the corresponding
* &drm_gem_object list of &drm_gpuvm_bos for an existing instance of this
- * particular combination. If not existent a new instance is created and linked
+ * particular combination. If not present, a new instance is created and linked
* to the &drm_gem_object.
*
* &drm_gpuvm_bo structures, since unique for a given &drm_gpuvm, are also used
@@ -108,7 +108,7 @@
* sequence of operations to satisfy a given map or unmap request.
*
* Therefore the DRM GPU VA manager provides an algorithm implementing splitting
- * and merging of existent GPU VA mappings with the ones that are requested to
+ * and merging of existing GPU VA mappings with the ones that are requested to
* be mapped or unmapped. This feature is required by the Vulkan API to
* implement Vulkan 'Sparse Memory Bindings' - drivers UAPIs often refer to this
* as VM BIND.
@@ -119,7 +119,7 @@
* execute in order to integrate the new mapping cleanly into the current state
* of the GPU VA space.
*
- * Depending on how the new GPU VA mapping intersects with the existent mappings
+ * Depending on how the new GPU VA mapping intersects with the existing mappings
* of the GPU VA space the &drm_gpuvm_ops callbacks contain an arbitrary amount
* of unmap operations, a maximum of two remap operations and a single map
* operation. The caller might receive no callback at all if no operation is
@@ -139,16 +139,16 @@
* one unmap operation and one or two map operations, such that drivers can
* derive the page table update delta accordingly.
*
- * Note that there can't be more than two existent mappings to split up, one at
+ * Note that there can't be more than two existing mappings to split up, one at
* the beginning and one at the end of the new mapping, hence there is a
* maximum of two remap operations.
*
* Analogous to drm_gpuvm_sm_map() drm_gpuvm_sm_unmap() uses &drm_gpuvm_ops to
* call back into the driver in order to unmap a range of GPU VA space. The
- * logic behind this function is way simpler though: For all existent mappings
+ * logic behind this function is way simpler though: For all existing mappings
* enclosed by the given range unmap operations are created. For mappings which
- * are only partically located within the given range, remap operations are
- * created such that those mappings are split up and re-mapped partically.
+ * are only partially located within the given range, remap operations are
+ * created such that those mappings are split up and re-mapped partially.
*
* As an alternative to drm_gpuvm_sm_map() and drm_gpuvm_sm_unmap(),
* drm_gpuvm_sm_map_ops_create() and drm_gpuvm_sm_unmap_ops_create() can be used
@@ -168,7 +168,7 @@
* provided helper functions drm_gpuva_map(), drm_gpuva_remap() and
* drm_gpuva_unmap() instead.
*
- * The following diagram depicts the basic relationships of existent GPU VA
+ * The following diagram depicts the basic relationships of existing GPU VA
* mappings, a newly requested mapping and the resulting mappings as implemented
* by drm_gpuvm_sm_map() - it doesn't cover any arbitrary combinations of these.
*
@@ -218,7 +218,7 @@
*
*
* 4) Existent mapping is a left aligned subset of the requested one, hence
- * replace the existent one.
+ * replace the existing one.
*
* ::
*
@@ -236,9 +236,9 @@
* and/or non-contiguous BO offset.
*
*
- * 5) Requested mapping's range is a left aligned subset of the existent one,
+ * 5) Requested mapping's range is a left aligned subset of the existing one,
* but backed by a different BO. Hence, map the requested mapping and split
- * the existent one adjusting its BO offset.
+ * the existing one adjusting its BO offset.
*
* ::
*
@@ -271,9 +271,9 @@
* new: |-----|-----| (a.bo_offset=n, a'.bo_offset=n+1)
*
*
- * 7) Requested mapping's range is a right aligned subset of the existent one,
+ * 7) Requested mapping's range is a right aligned subset of the existing one,
* but backed by a different BO. Hence, map the requested mapping and split
- * the existent one, without adjusting the BO offset.
+ * the existing one, without adjusting the BO offset.
*
* ::
*
@@ -304,7 +304,7 @@
*
* 9) Existent mapping is overlapped at the end by the requested mapping backed
* by a different BO. Hence, map the requested mapping and split up the
- * existent one, without adjusting the BO offset.
+ * existing one, without adjusting the BO offset.
*
* ::
*
@@ -334,9 +334,9 @@
* new: |-----|-----------| (a'.bo_offset=n, a.bo_offset=n+1)
*
*
- * 11) Requested mapping's range is a centered subset of the existent one
+ * 11) Requested mapping's range is a centered subset of the existing one
* having a different backing BO. Hence, map the requested mapping and split
- * up the existent one in two mappings, adjusting the BO offset of the right
+ * up the existing one in two mappings, adjusting the BO offset of the right
* one accordingly.
*
* ::
@@ -351,7 +351,7 @@
* new: |-----|-----|-----| (a.bo_offset=n,b.bo_offset=m,a'.bo_offset=n+2)
*
*
- * 12) Requested mapping is a contiguous subset of the existent one. Split it
+ * 12) Requested mapping is a contiguous subset of the existing one. Split it
* up, but indicate that the backing PTEs could be kept.
*
* ::
@@ -367,7 +367,7 @@
*
*
* 13) Existent mapping is a right aligned subset of the requested one, hence
- * replace the existent one.
+ * replace the existing one.
*
* ::
*
@@ -386,7 +386,7 @@
*
*
* 14) Existent mapping is a centered subset of the requested one, hence
- * replace the existent one.
+ * replace the existing one.
*
* ::
*
@@ -406,7 +406,7 @@
*
* 15) Existent mappings is overlapped at the beginning by the requested mapping
* backed by a different BO. Hence, map the requested mapping and split up
- * the existent one, adjusting its BO offset accordingly.
+ * the existing one, adjusting its BO offset accordingly.
*
* ::
*
@@ -421,6 +421,71 @@
*/
/**
+ * DOC: Madvise Logic - Splitting and Traversal
+ *
+ * This logic handles GPU VA range updates by generating remap and map operations
+ * without performing unmaps or merging existing mappings.
+ *
+ * 1) The requested range lies entirely within a single drm_gpuva. The logic splits
+ * the existing mapping at the start and end boundaries and inserts a new map.
+ *
+ * ::
+ * a start end b
+ * pre: |-----------------------|
+ * drm_gpuva1
+ *
+ * a start end b
+ * new: |-----|=========|-------|
+ * remap map remap
+ *
+ * one REMAP and one MAP : Same behaviour as SPLIT and MERGE
+ *
+ * 2) The requested range spans multiple drm_gpuva regions. The logic traverses
+ * across boundaries, remapping the start and end segments, and inserting two
+ * map operations to cover the full range.
+ *
+ * :: a start b c end d
+ * pre: |------------------|--------------|------------------|
+ * drm_gpuva1 drm_gpuva2 drm_gpuva3
+ *
+ * a start b c end d
+ * new: |-------|==========|--------------|========|---------|
+ * remap1 map1 drm_gpuva2 map2 remap2
+ *
+ * two REMAPS and two MAPS
+ *
+ * 3) Either start or end lies within a drm_gpuva. A single remap and map operation
+ * are generated to update the affected portion.
+ *
+ *
+ * :: a/start b c end d
+ * pre: |------------------|--------------|------------------|
+ * drm_gpuva1 drm_gpuva2 drm_gpuva3
+ *
+ * a/start b c end d
+ * new: |------------------|--------------|========|---------|
+ * drm_gpuva1 drm_gpuva2 map1 remap1
+ *
+ * :: a start b c/end d
+ * pre: |------------------|--------------|------------------|
+ * drm_gpuva1 drm_gpuva2 drm_gpuva3
+ *
+ * a start b c/end d
+ * new: |-------|==========|--------------|------------------|
+ * remap1 map1 drm_gpuva2 drm_gpuva3
+ *
+ * one REMAP and one MAP
+ *
+ * 4) Both start and end align with existing drm_gpuva boundaries. No operations
+ * are needed as the range is already covered.
+ *
+ * 5) No existing drm_gpuvas. No operations.
+ *
+ * Unlike drm_gpuvm_sm_map_ops_create, this logic avoids unmaps and merging,
+ * focusing solely on remap and map operations for efficient traversal and update.
+ */
+
+/**
* DOC: Locking
*
* In terms of managing &drm_gpuva entries DRM GPUVM does not take care of
@@ -432,8 +497,7 @@
* DRM GPUVM also does not take care of the locking of the backing
* &drm_gem_object buffers GPU VA lists and &drm_gpuvm_bo abstractions by
* itself; drivers are responsible to enforce mutual exclusion using either the
- * GEMs dma_resv lock or alternatively a driver specific external lock. For the
- * latter see also drm_gem_gpuva_set_lock().
+ * GEMs dma_resv lock or the GEMs gpuva.lock mutex.
*
* However, DRM GPUVM contains lockdep checks to ensure callers of its API hold
* the corresponding lock whenever the &drm_gem_objects GPU VA list is accessed
@@ -469,8 +533,8 @@
* make use of them.
*
* The below code is strictly limited to illustrate the generic usage pattern.
- * To maintain simplicitly, it doesn't make use of any abstractions for common
- * code, different (asyncronous) stages with fence signalling critical paths,
+ * To maintain simplicity, it doesn't make use of any abstractions for common
+ * code, different (asynchronous) stages with fence signalling critical paths,
* any other helpers or error handling in terms of freeing memory and dropping
* previously taken locks.
*
@@ -479,20 +543,25 @@
* // Allocates a new &drm_gpuva.
* struct drm_gpuva * driver_gpuva_alloc(void);
*
- * // Typically drivers would embedd the &drm_gpuvm and &drm_gpuva
+ * // Typically drivers would embed the &drm_gpuvm and &drm_gpuva
* // structure in individual driver structures and lock the dma-resv with
* // drm_exec or similar helpers.
* int driver_mapping_create(struct drm_gpuvm *gpuvm,
* u64 addr, u64 range,
* struct drm_gem_object *obj, u64 offset)
* {
+ * struct drm_gpuvm_map_req map_req = {
+ * .map.va.addr = addr,
+ * .map.va.range = range,
+ * .map.gem.obj = obj,
+ * .map.gem.offset = offset,
+ * };
* struct drm_gpuva_ops *ops;
* struct drm_gpuva_op *op
* struct drm_gpuvm_bo *vm_bo;
*
* driver_lock_va_space();
- * ops = drm_gpuvm_sm_map_ops_create(gpuvm, addr, range,
- * obj, offset);
+ * ops = drm_gpuvm_sm_map_ops_create(gpuvm, &map_req);
* if (IS_ERR(ops))
* return PTR_ERR(ops);
*
@@ -582,7 +651,7 @@
* .sm_step_unmap = driver_gpuva_unmap,
* };
*
- * // Typically drivers would embedd the &drm_gpuvm and &drm_gpuva
+ * // Typically drivers would embed the &drm_gpuvm and &drm_gpuva
* // structure in individual driver structures and lock the dma-resv with
* // drm_exec or similar helpers.
* int driver_mapping_create(struct drm_gpuvm *gpuvm,
@@ -680,7 +749,7 @@
*
* This helper is here to provide lockless list iteration. Lockless as in, the
* iterator releases the lock immediately after picking the first element from
- * the list, so list insertion deletion can happen concurrently.
+ * the list, so list insertion and deletion can happen concurrently.
*
* Elements popped from the original list are kept in a local list, so removal
* and is_empty checks can still happen while we're iterating the list.
@@ -1160,7 +1229,7 @@ drm_gpuvm_prepare_objects_locked(struct drm_gpuvm *gpuvm,
}
/**
- * drm_gpuvm_prepare_objects() - prepare all assoiciated BOs
+ * drm_gpuvm_prepare_objects() - prepare all associated BOs
* @gpuvm: the &drm_gpuvm
* @exec: the &drm_exec locking context
* @num_fences: the amount of &dma_fences to reserve
@@ -1230,13 +1299,13 @@ drm_gpuvm_prepare_range(struct drm_gpuvm *gpuvm, struct drm_exec *exec,
EXPORT_SYMBOL_GPL(drm_gpuvm_prepare_range);
/**
- * drm_gpuvm_exec_lock() - lock all dma-resv of all assoiciated BOs
+ * drm_gpuvm_exec_lock() - lock all dma-resv of all associated BOs
* @vm_exec: the &drm_gpuvm_exec wrapper
*
* Acquires all dma-resv locks of all &drm_gem_objects the given
* &drm_gpuvm contains mappings of.
*
- * Addionally, when calling this function with struct drm_gpuvm_exec::extra
+ * Additionally, when calling this function with struct drm_gpuvm_exec::extra
* being set the driver receives the given @fn callback to lock additional
* dma-resv in the context of the &drm_gpuvm_exec instance. Typically, drivers
* would call drm_exec_prepare_obj() from within this callback.
@@ -1293,7 +1362,7 @@ fn_lock_array(struct drm_gpuvm_exec *vm_exec)
}
/**
- * drm_gpuvm_exec_lock_array() - lock all dma-resv of all assoiciated BOs
+ * drm_gpuvm_exec_lock_array() - lock all dma-resv of all associated BOs
* @vm_exec: the &drm_gpuvm_exec wrapper
* @objs: additional &drm_gem_objects to lock
* @num_objs: the number of additional &drm_gem_objects to lock
@@ -1512,7 +1581,7 @@ drm_gpuvm_bo_destroy(struct kref *kref)
drm_gpuvm_bo_list_del(vm_bo, extobj, lock);
drm_gpuvm_bo_list_del(vm_bo, evict, lock);
- drm_gem_gpuva_assert_lock_held(obj);
+ drm_gem_gpuva_assert_lock_held(gpuvm, obj);
list_del(&vm_bo->list.entry.gem);
if (ops && ops->vm_bo_free)
@@ -1533,7 +1602,8 @@ drm_gpuvm_bo_destroy(struct kref *kref)
* If the reference count drops to zero, the &gpuvm_bo is destroyed, which
* includes removing it from the GEMs gpuva list. Hence, if a call to this
* function can potentially let the reference count drop to zero the caller must
- * hold the dma-resv or driver specific GEM gpuva lock.
+ * hold the lock that the GEM uses for its gpuva list (either the GEM's
+ * dma-resv or gpuva.lock mutex).
*
* This function may only be called from non-atomic context.
*
@@ -1557,7 +1627,7 @@ __drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm,
{
struct drm_gpuvm_bo *vm_bo;
- drm_gem_gpuva_assert_lock_held(obj);
+ drm_gem_gpuva_assert_lock_held(gpuvm, obj);
drm_gem_for_each_gpuvm_bo(vm_bo, obj)
if (vm_bo->vm == gpuvm)
return vm_bo;
@@ -1588,7 +1658,7 @@ drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm,
EXPORT_SYMBOL_GPL(drm_gpuvm_bo_find);
/**
- * drm_gpuvm_bo_obtain() - obtains and instance of the &drm_gpuvm_bo for the
+ * drm_gpuvm_bo_obtain() - obtains an instance of the &drm_gpuvm_bo for the
* given &drm_gpuvm and &drm_gem_object
* @gpuvm: The &drm_gpuvm the @obj is mapped in.
* @obj: The &drm_gem_object being mapped in the @gpuvm.
@@ -1616,7 +1686,7 @@ drm_gpuvm_bo_obtain(struct drm_gpuvm *gpuvm,
if (!vm_bo)
return ERR_PTR(-ENOMEM);
- drm_gem_gpuva_assert_lock_held(obj);
+ drm_gem_gpuva_assert_lock_held(gpuvm, obj);
list_add_tail(&vm_bo->list.entry.gem, &obj->gpuva.list);
return vm_bo;
@@ -1624,7 +1694,7 @@ drm_gpuvm_bo_obtain(struct drm_gpuvm *gpuvm,
EXPORT_SYMBOL_GPL(drm_gpuvm_bo_obtain);
/**
- * drm_gpuvm_bo_obtain_prealloc() - obtains and instance of the &drm_gpuvm_bo
+ * drm_gpuvm_bo_obtain_prealloc() - obtains an instance of the &drm_gpuvm_bo
* for the given &drm_gpuvm and &drm_gem_object
* @__vm_bo: A pre-allocated struct drm_gpuvm_bo.
*
@@ -1652,7 +1722,7 @@ drm_gpuvm_bo_obtain_prealloc(struct drm_gpuvm_bo *__vm_bo)
return vm_bo;
}
- drm_gem_gpuva_assert_lock_held(obj);
+ drm_gem_gpuva_assert_lock_held(gpuvm, obj);
list_add_tail(&__vm_bo->list.entry.gem, &obj->gpuva.list);
return __vm_bo;
@@ -1688,7 +1758,7 @@ EXPORT_SYMBOL_GPL(drm_gpuvm_bo_extobj_add);
* @vm_bo: the &drm_gpuvm_bo to add or remove
* @evict: indicates whether the object is evicted
*
- * Adds a &drm_gpuvm_bo to or removes it from the &drm_gpuvms evicted list.
+ * Adds a &drm_gpuvm_bo to or removes it from the &drm_gpuvm's evicted list.
*/
void
drm_gpuvm_bo_evict(struct drm_gpuvm_bo *vm_bo, bool evict)
@@ -1790,7 +1860,7 @@ __drm_gpuva_remove(struct drm_gpuva *va)
* drm_gpuva_remove() - remove a &drm_gpuva
* @va: the &drm_gpuva to remove
*
- * This removes the given &va from the underlaying tree.
+ * This removes the given &va from the underlying tree.
*
* It is safe to use this function using the safe versions of iterating the GPU
* VA space, such as drm_gpuvm_for_each_va_safe() and
@@ -1824,8 +1894,7 @@ EXPORT_SYMBOL_GPL(drm_gpuva_remove);
* reference of the latter is taken.
*
* This function expects the caller to protect the GEM's GPUVA list against
- * concurrent access using either the GEMs dma_resv lock or a driver specific
- * lock set through drm_gem_gpuva_set_lock().
+ * concurrent access using either the GEM's dma-resv or gpuva.lock mutex.
*/
void
drm_gpuva_link(struct drm_gpuva *va, struct drm_gpuvm_bo *vm_bo)
@@ -1840,7 +1909,7 @@ drm_gpuva_link(struct drm_gpuva *va, struct drm_gpuvm_bo *vm_bo)
va->vm_bo = drm_gpuvm_bo_get(vm_bo);
- drm_gem_gpuva_assert_lock_held(obj);
+ drm_gem_gpuva_assert_lock_held(gpuvm, obj);
list_add_tail(&va->gem.entry, &vm_bo->list.gpuva);
}
EXPORT_SYMBOL_GPL(drm_gpuva_link);
@@ -1860,8 +1929,7 @@ EXPORT_SYMBOL_GPL(drm_gpuva_link);
* the latter is dropped.
*
* This function expects the caller to protect the GEM's GPUVA list against
- * concurrent access using either the GEMs dma_resv lock or a driver specific
- * lock set through drm_gem_gpuva_set_lock().
+ * concurrent access using either the GEM's dma-resv or gpuva.lock mutex.
*/
void
drm_gpuva_unlink(struct drm_gpuva *va)
@@ -1872,7 +1940,7 @@ drm_gpuva_unlink(struct drm_gpuva *va)
if (unlikely(!obj))
return;
- drm_gem_gpuva_assert_lock_held(obj);
+ drm_gem_gpuva_assert_lock_held(va->vm, obj);
list_del_init(&va->gem.entry);
va->vm_bo = NULL;
@@ -2054,16 +2122,18 @@ EXPORT_SYMBOL_GPL(drm_gpuva_unmap);
static int
op_map_cb(const struct drm_gpuvm_ops *fn, void *priv,
- u64 addr, u64 range,
- struct drm_gem_object *obj, u64 offset)
+ const struct drm_gpuvm_map_req *req)
{
struct drm_gpuva_op op = {};
+ if (!req)
+ return 0;
+
op.op = DRM_GPUVA_OP_MAP;
- op.map.va.addr = addr;
- op.map.va.range = range;
- op.map.gem.obj = obj;
- op.map.gem.offset = offset;
+ op.map.va.addr = req->map.va.addr;
+ op.map.va.range = req->map.va.range;
+ op.map.gem.obj = req->map.gem.obj;
+ op.map.gem.offset = req->map.gem.offset;
return fn->sm_step_map(&op, priv);
}
@@ -2088,10 +2158,13 @@ op_remap_cb(const struct drm_gpuvm_ops *fn, void *priv,
static int
op_unmap_cb(const struct drm_gpuvm_ops *fn, void *priv,
- struct drm_gpuva *va, bool merge)
+ struct drm_gpuva *va, bool merge, bool madvise)
{
struct drm_gpuva_op op = {};
+ if (madvise)
+ return 0;
+
op.op = DRM_GPUVA_OP_UNMAP;
op.unmap.va = va;
op.unmap.keep = merge;
@@ -2102,10 +2175,15 @@ op_unmap_cb(const struct drm_gpuvm_ops *fn, void *priv,
static int
__drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
const struct drm_gpuvm_ops *ops, void *priv,
- u64 req_addr, u64 req_range,
- struct drm_gem_object *req_obj, u64 req_offset)
+ const struct drm_gpuvm_map_req *req,
+ bool madvise)
{
+ struct drm_gem_object *req_obj = req->map.gem.obj;
+ const struct drm_gpuvm_map_req *op_map = madvise ? NULL : req;
struct drm_gpuva *va, *next;
+ u64 req_offset = req->map.gem.offset;
+ u64 req_range = req->map.va.range;
+ u64 req_addr = req->map.va.addr;
u64 req_end = req_addr + req_range;
int ret;
@@ -2120,19 +2198,22 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
u64 end = addr + range;
bool merge = !!va->gem.obj;
+ if (madvise && obj)
+ continue;
+
if (addr == req_addr) {
merge &= obj == req_obj &&
offset == req_offset;
if (end == req_end) {
- ret = op_unmap_cb(ops, priv, va, merge);
+ ret = op_unmap_cb(ops, priv, va, merge, madvise);
if (ret)
return ret;
break;
}
if (end < req_end) {
- ret = op_unmap_cb(ops, priv, va, merge);
+ ret = op_unmap_cb(ops, priv, va, merge, madvise);
if (ret)
return ret;
continue;
@@ -2153,6 +2234,9 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
ret = op_remap_cb(ops, priv, NULL, &n, &u);
if (ret)
return ret;
+
+ if (madvise)
+ op_map = req;
break;
}
} else if (addr < req_addr) {
@@ -2173,6 +2257,9 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
ret = op_remap_cb(ops, priv, &p, NULL, &u);
if (ret)
return ret;
+
+ if (madvise)
+ op_map = req;
break;
}
@@ -2180,6 +2267,18 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
ret = op_remap_cb(ops, priv, &p, NULL, &u);
if (ret)
return ret;
+
+ if (madvise) {
+ struct drm_gpuvm_map_req map_req = {
+ .map.va.addr = req_addr,
+ .map.va.range = end - req_addr,
+ };
+
+ ret = op_map_cb(ops, priv, &map_req);
+ if (ret)
+ return ret;
+ }
+
continue;
}
@@ -2195,6 +2294,9 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
ret = op_remap_cb(ops, priv, &p, &n, &u);
if (ret)
return ret;
+
+ if (madvise)
+ op_map = req;
break;
}
} else if (addr > req_addr) {
@@ -2203,16 +2305,18 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
(addr - req_addr);
if (end == req_end) {
- ret = op_unmap_cb(ops, priv, va, merge);
+ ret = op_unmap_cb(ops, priv, va, merge, madvise);
if (ret)
return ret;
+
break;
}
if (end < req_end) {
- ret = op_unmap_cb(ops, priv, va, merge);
+ ret = op_unmap_cb(ops, priv, va, merge, madvise);
if (ret)
return ret;
+
continue;
}
@@ -2231,14 +2335,20 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
ret = op_remap_cb(ops, priv, NULL, &n, &u);
if (ret)
return ret;
+
+ if (madvise) {
+ struct drm_gpuvm_map_req map_req = {
+ .map.va.addr = addr,
+ .map.va.range = req_end - addr,
+ };
+
+ return op_map_cb(ops, priv, &map_req);
+ }
break;
}
}
}
-
- return op_map_cb(ops, priv,
- req_addr, req_range,
- req_obj, req_offset);
+ return op_map_cb(ops, priv, op_map);
}
static int
@@ -2290,7 +2400,7 @@ __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm,
if (ret)
return ret;
} else {
- ret = op_unmap_cb(ops, priv, va, false);
+ ret = op_unmap_cb(ops, priv, va, false, false);
if (ret)
return ret;
}
@@ -2303,10 +2413,7 @@ __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm,
* drm_gpuvm_sm_map() - calls the &drm_gpuva_op split/merge steps
* @gpuvm: the &drm_gpuvm representing the GPU VA space
* @priv: pointer to a driver private data structure
- * @req_addr: the start address of the new mapping
- * @req_range: the range of the new mapping
- * @req_obj: the &drm_gem_object to map
- * @req_offset: the offset within the &drm_gem_object
+ * @req: ptr to struct drm_gpuvm_map_req
*
* This function iterates the given range of the GPU VA space. It utilizes the
* &drm_gpuvm_ops to call back into the driver providing the split and merge
@@ -2333,8 +2440,7 @@ __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm,
*/
int
drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
- u64 req_addr, u64 req_range,
- struct drm_gem_object *req_obj, u64 req_offset)
+ const struct drm_gpuvm_map_req *req)
{
const struct drm_gpuvm_ops *ops = gpuvm->ops;
@@ -2343,9 +2449,7 @@ drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
ops->sm_step_unmap)))
return -EINVAL;
- return __drm_gpuvm_sm_map(gpuvm, ops, priv,
- req_addr, req_range,
- req_obj, req_offset);
+ return __drm_gpuvm_sm_map(gpuvm, ops, priv, req, false);
}
EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map);
@@ -2358,7 +2462,7 @@ EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map);
*
* This function iterates the given range of the GPU VA space. It utilizes the
* &drm_gpuvm_ops to call back into the driver providing the operations to
- * unmap and, if required, split existent mappings.
+ * unmap and, if required, split existing mappings.
*
* Drivers may use these callbacks to update the GPU VA space right away within
* the callback. In case the driver decides to copy and store the operations for
@@ -2421,16 +2525,13 @@ static const struct drm_gpuvm_ops lock_ops = {
* @gpuvm: the &drm_gpuvm representing the GPU VA space
* @exec: the &drm_exec locking context
* @num_fences: for newly mapped objects, the # of fences to reserve
- * @req_addr: the start address of the range to unmap
- * @req_range: the range of the mappings to unmap
- * @req_obj: the &drm_gem_object to map
- * @req_offset: the offset within the &drm_gem_object
+ * @req: ptr to drm_gpuvm_map_req struct
*
* This function locks (drm_exec_lock_obj()) objects that will be unmapped/
* remapped, and locks+prepares (drm_exec_prepare_object()) objects that
* will be newly mapped.
*
- * The expected usage is:
+ * The expected usage is::
*
* vm_bind {
* struct drm_exec exec;
@@ -2445,9 +2546,7 @@ static const struct drm_gpuvm_ops lock_ops = {
* ret = drm_gpuvm_sm_unmap_exec_lock(gpuvm, &exec, op->addr, op->range);
* break;
* case DRIVER_OP_MAP:
- * ret = drm_gpuvm_sm_map_exec_lock(gpuvm, &exec, num_fences,
- * op->addr, op->range,
- * obj, op->obj_offset);
+ * ret = drm_gpuvm_sm_map_exec_lock(gpuvm, &exec, num_fences, &req);
* break;
* }
*
@@ -2473,23 +2572,22 @@ static const struct drm_gpuvm_ops lock_ops = {
* required without the earlier DRIVER_OP_MAP. This is safe because we've
* already locked the GEM object in the earlier DRIVER_OP_MAP step.
*
- * Returns: 0 on success or a negative error codec
+ * Returns: 0 on success or a negative error code
*/
int
drm_gpuvm_sm_map_exec_lock(struct drm_gpuvm *gpuvm,
struct drm_exec *exec, unsigned int num_fences,
- u64 req_addr, u64 req_range,
- struct drm_gem_object *req_obj, u64 req_offset)
+ struct drm_gpuvm_map_req *req)
{
+ struct drm_gem_object *req_obj = req->map.gem.obj;
+
if (req_obj) {
int ret = drm_exec_prepare_obj(exec, req_obj, num_fences);
if (ret)
return ret;
}
- return __drm_gpuvm_sm_map(gpuvm, &lock_ops, exec,
- req_addr, req_range,
- req_obj, req_offset);
+ return __drm_gpuvm_sm_map(gpuvm, &lock_ops, exec, req, false);
}
EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map_exec_lock);
@@ -2608,21 +2706,50 @@ static const struct drm_gpuvm_ops gpuvm_list_ops = {
.sm_step_unmap = drm_gpuva_sm_step,
};
+static struct drm_gpuva_ops *
+__drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
+ const struct drm_gpuvm_map_req *req,
+ bool madvise)
+{
+ struct drm_gpuva_ops *ops;
+ struct {
+ struct drm_gpuvm *vm;
+ struct drm_gpuva_ops *ops;
+ } args;
+ int ret;
+
+ ops = kzalloc(sizeof(*ops), GFP_KERNEL);
+ if (unlikely(!ops))
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&ops->list);
+
+ args.vm = gpuvm;
+ args.ops = ops;
+
+ ret = __drm_gpuvm_sm_map(gpuvm, &gpuvm_list_ops, &args, req, madvise);
+ if (ret)
+ goto err_free_ops;
+
+ return ops;
+
+err_free_ops:
+ drm_gpuva_ops_free(gpuvm, ops);
+ return ERR_PTR(ret);
+}
+
/**
* drm_gpuvm_sm_map_ops_create() - creates the &drm_gpuva_ops to split and merge
* @gpuvm: the &drm_gpuvm representing the GPU VA space
- * @req_addr: the start address of the new mapping
- * @req_range: the range of the new mapping
- * @req_obj: the &drm_gem_object to map
- * @req_offset: the offset within the &drm_gem_object
+ * @req: map request arguments
*
* This function creates a list of operations to perform splitting and merging
- * of existent mapping(s) with the newly requested one.
+ * of existing mapping(s) with the newly requested one.
*
* The list can be iterated with &drm_gpuva_for_each_op and must be processed
* in the given order. It can contain map, unmap and remap operations, but it
* also can be empty if no operation is required, e.g. if the requested mapping
- * already exists is the exact same way.
+ * already exists in the exact same way.
*
* There can be an arbitrary amount of unmap operations, a maximum of two remap
* operations and a single map operation. The latter one represents the original
@@ -2642,40 +2769,50 @@ static const struct drm_gpuvm_ops gpuvm_list_ops = {
*/
struct drm_gpuva_ops *
drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
- u64 req_addr, u64 req_range,
- struct drm_gem_object *req_obj, u64 req_offset)
+ const struct drm_gpuvm_map_req *req)
{
- struct drm_gpuva_ops *ops;
- struct {
- struct drm_gpuvm *vm;
- struct drm_gpuva_ops *ops;
- } args;
- int ret;
-
- ops = kzalloc(sizeof(*ops), GFP_KERNEL);
- if (unlikely(!ops))
- return ERR_PTR(-ENOMEM);
-
- INIT_LIST_HEAD(&ops->list);
-
- args.vm = gpuvm;
- args.ops = ops;
-
- ret = __drm_gpuvm_sm_map(gpuvm, &gpuvm_list_ops, &args,
- req_addr, req_range,
- req_obj, req_offset);
- if (ret)
- goto err_free_ops;
-
- return ops;
-
-err_free_ops:
- drm_gpuva_ops_free(gpuvm, ops);
- return ERR_PTR(ret);
+ return __drm_gpuvm_sm_map_ops_create(gpuvm, req, false);
}
EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map_ops_create);
/**
+ * drm_gpuvm_madvise_ops_create() - creates the &drm_gpuva_ops to split
+ * @gpuvm: the &drm_gpuvm representing the GPU VA space
+ * @req: map request arguments
+ *
+ * This function creates a list of operations to perform splitting
+ * of existent mapping(s) at start or end, based on the request map.
+ *
+ * The list can be iterated with &drm_gpuva_for_each_op and must be processed
+ * in the given order. It can contain map and remap operations, but it
+ * also can be empty if no operation is required, e.g. if the requested mapping
+ * already exists is the exact same way.
+ *
+ * There will be no unmap operations, a maximum of two remap operations and two
+ * map operations. The two map operations correspond to: one from start to the
+ * end of drm_gpuvaX, and another from the start of drm_gpuvaY to end.
+ *
+ * Note that before calling this function again with another mapping request it
+ * is necessary to update the &drm_gpuvm's view of the GPU VA space. The
+ * previously obtained operations must be either processed or abandoned. To
+ * update the &drm_gpuvm's view of the GPU VA space drm_gpuva_insert(),
+ * drm_gpuva_destroy_locked() and/or drm_gpuva_destroy_unlocked() should be
+ * used.
+ *
+ * After the caller finished processing the returned &drm_gpuva_ops, they must
+ * be freed with &drm_gpuva_ops_free.
+ *
+ * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure
+ */
+struct drm_gpuva_ops *
+drm_gpuvm_madvise_ops_create(struct drm_gpuvm *gpuvm,
+ const struct drm_gpuvm_map_req *req)
+{
+ return __drm_gpuvm_sm_map_ops_create(gpuvm, req, true);
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_madvise_ops_create);
+
+/**
* drm_gpuvm_sm_unmap_ops_create() - creates the &drm_gpuva_ops to split on
* unmap
* @gpuvm: the &drm_gpuvm representing the GPU VA space
@@ -2804,8 +2941,8 @@ EXPORT_SYMBOL_GPL(drm_gpuvm_prefetch_ops_create);
* After the caller finished processing the returned &drm_gpuva_ops, they must
* be freed with &drm_gpuva_ops_free.
*
- * It is the callers responsibility to protect the GEMs GPUVA list against
- * concurrent access using the GEMs dma_resv lock.
+ * This function expects the caller to protect the GEM's GPUVA list against
+ * concurrent access using either the GEM's dma-resv or gpuva.lock mutex.
*
* Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure
*/
@@ -2817,7 +2954,7 @@ drm_gpuvm_bo_unmap_ops_create(struct drm_gpuvm_bo *vm_bo)
struct drm_gpuva *va;
int ret;
- drm_gem_gpuva_assert_lock_held(vm_bo->obj);
+ drm_gem_gpuva_assert_lock_held(vm_bo->vm, vm_bo->obj);
ops = kzalloc(sizeof(*ops), GFP_KERNEL);
if (!ops)
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index e79c3c623c9a..5a3bed48ab1f 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -85,6 +85,8 @@ int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv);
void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv);
+int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
+ struct dma_buf *dma_buf, uint32_t handle);
void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv,
uint32_t handle);
@@ -170,6 +172,8 @@ int drm_gem_close_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_gem_flink_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+int drm_gem_change_handle_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
int drm_gem_open_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void drm_gem_open(struct drm_device *dev, struct drm_file *file_private);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index f593dc569d31..d8a24875a7ba 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -653,6 +653,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_CHANGE_HANDLE, drm_gem_change_handle_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, 0),
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index 3a9b3278a6e3..a712e177b350 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -773,41 +773,13 @@ ssize_t mipi_dsi_generic_write(struct mipi_dsi_device *dsi, const void *payload,
EXPORT_SYMBOL(mipi_dsi_generic_write);
/**
- * mipi_dsi_generic_write_chatty() - mipi_dsi_generic_write() w/ an error log
- * @dsi: DSI peripheral device
- * @payload: buffer containing the payload
- * @size: size of payload buffer
- *
- * Like mipi_dsi_generic_write() but includes a dev_err()
- * call for you and returns 0 upon success, not the number of bytes sent.
- *
- * Return: 0 on success or a negative error code on failure.
- */
-int mipi_dsi_generic_write_chatty(struct mipi_dsi_device *dsi,
- const void *payload, size_t size)
-{
- struct device *dev = &dsi->dev;
- ssize_t ret;
-
- ret = mipi_dsi_generic_write(dsi, payload, size);
- if (ret < 0) {
- dev_err(dev, "sending generic data %*ph failed: %zd\n",
- (int)size, payload, ret);
- return ret;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(mipi_dsi_generic_write_chatty);
-
-/**
- * mipi_dsi_generic_write_multi() - mipi_dsi_generic_write_chatty() w/ accum_err
+ * mipi_dsi_generic_write_multi() - mipi_dsi_generic_write() w/ accum_err
* @ctx: Context for multiple DSI transactions
* @payload: buffer containing the payload
* @size: size of payload buffer
*
- * Like mipi_dsi_generic_write_chatty() but deals with errors in a way that
- * makes it convenient to make several calls in a row.
+ * A wrapper around mipi_dsi_generic_write() that deals with errors in a way
+ * that makes it convenient to make several calls in a row.
*/
void mipi_dsi_generic_write_multi(struct mipi_dsi_multi_context *ctx,
const void *payload, size_t size)
@@ -829,6 +801,30 @@ void mipi_dsi_generic_write_multi(struct mipi_dsi_multi_context *ctx,
EXPORT_SYMBOL(mipi_dsi_generic_write_multi);
/**
+ * mipi_dsi_dual_generic_write_multi() - mipi_dsi_generic_write_multi() for
+ * two dsi channels, one after the other
+ * @ctx: Context for multiple DSI transactions
+ * @dsi1: First dsi channel to write buffer to
+ * @dsi2: Second dsi channel to write buffer to
+ * @payload: Buffer containing the payload
+ * @size: Size of payload buffer
+ *
+ * A wrapper around mipi_dsi_generic_write_multi() that allows the user to
+ * conveniently write to two dsi channels, one after the other.
+ */
+void mipi_dsi_dual_generic_write_multi(struct mipi_dsi_multi_context *ctx,
+ struct mipi_dsi_device *dsi1,
+ struct mipi_dsi_device *dsi2,
+ const void *payload, size_t size)
+{
+ ctx->dsi = dsi1;
+ mipi_dsi_generic_write_multi(ctx, payload, size);
+ ctx->dsi = dsi2;
+ mipi_dsi_generic_write_multi(ctx, payload, size);
+}
+EXPORT_SYMBOL(mipi_dsi_dual_generic_write_multi);
+
+/**
* mipi_dsi_generic_read() - receive data using a generic read packet
* @dsi: DSI peripheral device
* @params: buffer containing the request parameters
@@ -1008,6 +1004,30 @@ void mipi_dsi_dcs_write_buffer_multi(struct mipi_dsi_multi_context *ctx,
EXPORT_SYMBOL(mipi_dsi_dcs_write_buffer_multi);
/**
+ * mipi_dsi_dual_dcs_write_buffer_multi - mipi_dsi_dcs_write_buffer_multi() for
+ * two dsi channels, one after the other
+ * @ctx: Context for multiple DSI transactions
+ * @dsi1: First dsi channel to write buffer to
+ * @dsi2: Second dsi channel to write buffer to
+ * @data: Buffer containing data to be transmitted
+ * @len: Size of transmission buffer
+ *
+ * A wrapper around mipi_dsi_dcs_write_buffer_multi() that allows the user to
+ * conveniently write to two dsi channels, one after the other.
+ */
+void mipi_dsi_dual_dcs_write_buffer_multi(struct mipi_dsi_multi_context *ctx,
+ struct mipi_dsi_device *dsi1,
+ struct mipi_dsi_device *dsi2,
+ const void *data, size_t len)
+{
+ ctx->dsi = dsi1;
+ mipi_dsi_dcs_write_buffer_multi(ctx, data, len);
+ ctx->dsi = dsi2;
+ mipi_dsi_dcs_write_buffer_multi(ctx, data, len);
+}
+EXPORT_SYMBOL(mipi_dsi_dual_dcs_write_buffer_multi);
+
+/**
* mipi_dsi_dcs_write() - send DCS write command
* @dsi: DSI peripheral device
* @cmd: DCS command
@@ -1077,6 +1097,43 @@ ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, u8 cmd, void *data,
EXPORT_SYMBOL(mipi_dsi_dcs_read);
/**
+ * mipi_dsi_dcs_read_multi() - mipi_dsi_dcs_read() w/ accum_err
+ * @ctx: Context for multiple DSI transactions
+ * @cmd: DCS command
+ * @data: buffer in which to receive data
+ * @len: size of receive buffer
+ *
+ * Like mipi_dsi_dcs_read() but deals with errors in a way that makes it
+ * convenient to make several calls in a row.
+ */
+void mipi_dsi_dcs_read_multi(struct mipi_dsi_multi_context *ctx, u8 cmd,
+ void *data, size_t len)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ struct mipi_dsi_msg msg = {
+ .channel = dsi->channel,
+ .type = MIPI_DSI_DCS_READ,
+ .tx_buf = &cmd,
+ .tx_len = 1,
+ .rx_buf = data,
+ .rx_len = len
+ };
+ ssize_t ret;
+
+ if (ctx->accum_err)
+ return;
+
+ ret = mipi_dsi_device_transfer(dsi, &msg);
+ if (ret < 0) {
+ ctx->accum_err = ret;
+ dev_err(dev, "dcs read with command %#x failed: %d\n", cmd,
+ ctx->accum_err);
+ }
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_read_multi);
+
+/**
* mipi_dsi_dcs_nop() - send DCS nop packet
* @dsi: DSI peripheral device
*
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
index d0183dea7703..4f65ce729a47 100644
--- a/drivers/gpu/drm/drm_of.c
+++ b/drivers/gpu/drm/drm_of.c
@@ -55,7 +55,8 @@ EXPORT_SYMBOL(drm_of_crtc_port_mask);
* and generate the DRM mask of CRTCs which may be attached to this
* encoder.
*
- * See Documentation/devicetree/bindings/graph.txt for the bindings.
+ * See https://github.com/devicetree-org/dt-schema/blob/main/dtschema/schemas/graph.yaml
+ * for the bindings.
*/
uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
struct device_node *port)
@@ -106,7 +107,9 @@ EXPORT_SYMBOL_GPL(drm_of_component_match_add);
* Parse the platform device OF node and bind all the components associated
* with the master. Interface ports are added before the encoders in order to
* satisfy their .bind requirements
- * See Documentation/devicetree/bindings/graph.txt for the bindings.
+ *
+ * See https://github.com/devicetree-org/dt-schema/blob/main/dtschema/schemas/graph.yaml
+ * for the bindings.
*
* Returns zero if successful, or one of the standard error codes if it fails.
*/
diff --git a/drivers/gpu/drm/drm_pagemap.c b/drivers/gpu/drm/drm_pagemap.c
index 1da55322af12..22c44807e3fe 100644
--- a/drivers/gpu/drm/drm_pagemap.c
+++ b/drivers/gpu/drm/drm_pagemap.c
@@ -202,7 +202,7 @@ static void drm_pagemap_get_devmem_page(struct page *page,
/**
* drm_pagemap_migrate_map_pages() - Map migration pages for GPU SVM migration
* @dev: The device for which the pages are being mapped
- * @dma_addr: Array to store DMA addresses corresponding to mapped pages
+ * @pagemap_addr: Array to store DMA information corresponding to mapped pages
* @migrate_pfn: Array of migrate page frame numbers to map
* @npages: Number of pages to map
* @dir: Direction of data transfer (e.g., DMA_BIDIRECTIONAL)
@@ -215,25 +215,39 @@ static void drm_pagemap_get_devmem_page(struct page *page,
* Returns: 0 on success, -EFAULT if an error occurs during mapping.
*/
static int drm_pagemap_migrate_map_pages(struct device *dev,
- dma_addr_t *dma_addr,
+ struct drm_pagemap_addr *pagemap_addr,
unsigned long *migrate_pfn,
unsigned long npages,
enum dma_data_direction dir)
{
unsigned long i;
- for (i = 0; i < npages; ++i) {
+ for (i = 0; i < npages;) {
struct page *page = migrate_pfn_to_page(migrate_pfn[i]);
+ dma_addr_t dma_addr;
+ struct folio *folio;
+ unsigned int order = 0;
if (!page)
- continue;
+ goto next;
if (WARN_ON_ONCE(is_zone_device_page(page)))
return -EFAULT;
- dma_addr[i] = dma_map_page(dev, page, 0, PAGE_SIZE, dir);
- if (dma_mapping_error(dev, dma_addr[i]))
+ folio = page_folio(page);
+ order = folio_order(folio);
+
+ dma_addr = dma_map_page(dev, page, 0, page_size(page), dir);
+ if (dma_mapping_error(dev, dma_addr))
return -EFAULT;
+
+ pagemap_addr[i] =
+ drm_pagemap_addr_encode(dma_addr,
+ DRM_INTERCONNECT_SYSTEM,
+ order, dir);
+
+next:
+ i += NR_PAGES(order);
}
return 0;
@@ -242,7 +256,7 @@ static int drm_pagemap_migrate_map_pages(struct device *dev,
/**
* drm_pagemap_migrate_unmap_pages() - Unmap pages previously mapped for GPU SVM migration
* @dev: The device for which the pages were mapped
- * @dma_addr: Array of DMA addresses corresponding to mapped pages
+ * @pagemap_addr: Array of DMA information corresponding to mapped pages
* @npages: Number of pages to unmap
* @dir: Direction of data transfer (e.g., DMA_BIDIRECTIONAL)
*
@@ -251,17 +265,20 @@ static int drm_pagemap_migrate_map_pages(struct device *dev,
* if it's valid and not already unmapped, and unmaps the corresponding page.
*/
static void drm_pagemap_migrate_unmap_pages(struct device *dev,
- dma_addr_t *dma_addr,
+ struct drm_pagemap_addr *pagemap_addr,
unsigned long npages,
enum dma_data_direction dir)
{
unsigned long i;
- for (i = 0; i < npages; ++i) {
- if (!dma_addr[i] || dma_mapping_error(dev, dma_addr[i]))
- continue;
+ for (i = 0; i < npages;) {
+ if (!pagemap_addr[i].addr || dma_mapping_error(dev, pagemap_addr[i].addr))
+ goto next;
- dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir);
+ dma_unmap_page(dev, pagemap_addr[i].addr, PAGE_SIZE << pagemap_addr[i].order, dir);
+
+next:
+ i += NR_PAGES(pagemap_addr[i].order);
}
}
@@ -314,7 +331,7 @@ int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
struct vm_area_struct *vas;
struct drm_pagemap_zdd *zdd = NULL;
struct page **pages;
- dma_addr_t *dma_addr;
+ struct drm_pagemap_addr *pagemap_addr;
void *buf;
int err;
@@ -340,14 +357,14 @@ int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
goto err_out;
}
- buf = kvcalloc(npages, 2 * sizeof(*migrate.src) + sizeof(*dma_addr) +
+ buf = kvcalloc(npages, 2 * sizeof(*migrate.src) + sizeof(*pagemap_addr) +
sizeof(*pages), GFP_KERNEL);
if (!buf) {
err = -ENOMEM;
goto err_out;
}
- dma_addr = buf + (2 * sizeof(*migrate.src) * npages);
- pages = buf + (2 * sizeof(*migrate.src) + sizeof(*dma_addr)) * npages;
+ pagemap_addr = buf + (2 * sizeof(*migrate.src) * npages);
+ pages = buf + (2 * sizeof(*migrate.src) + sizeof(*pagemap_addr)) * npages;
zdd = drm_pagemap_zdd_alloc(pgmap_owner);
if (!zdd) {
@@ -377,8 +394,9 @@ int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
if (err)
goto err_finalize;
- err = drm_pagemap_migrate_map_pages(devmem_allocation->dev, dma_addr,
+ err = drm_pagemap_migrate_map_pages(devmem_allocation->dev, pagemap_addr,
migrate.src, npages, DMA_TO_DEVICE);
+
if (err)
goto err_finalize;
@@ -390,7 +408,7 @@ int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
drm_pagemap_get_devmem_page(page, zdd);
}
- err = ops->copy_to_devmem(pages, dma_addr, npages);
+ err = ops->copy_to_devmem(pages, pagemap_addr, npages);
if (err)
goto err_finalize;
@@ -404,7 +422,7 @@ err_finalize:
drm_pagemap_migration_unlock_put_pages(npages, migrate.dst);
migrate_vma_pages(&migrate);
migrate_vma_finalize(&migrate);
- drm_pagemap_migrate_unmap_pages(devmem_allocation->dev, dma_addr, npages,
+ drm_pagemap_migrate_unmap_pages(devmem_allocation->dev, pagemap_addr, npages,
DMA_TO_DEVICE);
err_free:
if (zdd)
@@ -442,54 +460,80 @@ static int drm_pagemap_migrate_populate_ram_pfn(struct vm_area_struct *vas,
{
unsigned long i;
- for (i = 0; i < npages; ++i, addr += PAGE_SIZE) {
- struct page *page, *src_page;
+ for (i = 0; i < npages;) {
+ struct page *page = NULL, *src_page;
+ struct folio *folio;
+ unsigned int order = 0;
if (!(src_mpfn[i] & MIGRATE_PFN_MIGRATE))
- continue;
+ goto next;
src_page = migrate_pfn_to_page(src_mpfn[i]);
if (!src_page)
- continue;
+ goto next;
if (fault_page) {
if (src_page->zone_device_data !=
fault_page->zone_device_data)
- continue;
+ goto next;
}
+ order = folio_order(page_folio(src_page));
+
+ /* TODO: Support fallback to single pages if THP allocation fails */
if (vas)
- page = alloc_page_vma(GFP_HIGHUSER, vas, addr);
+ folio = vma_alloc_folio(GFP_HIGHUSER, order, vas, addr);
else
- page = alloc_page(GFP_HIGHUSER);
+ folio = folio_alloc(GFP_HIGHUSER, order);
- if (!page)
+ if (!folio)
goto free_pages;
+ page = folio_page(folio, 0);
mpfn[i] = migrate_pfn(page_to_pfn(page));
+
+next:
+ if (page)
+ addr += page_size(page);
+ else
+ addr += PAGE_SIZE;
+
+ i += NR_PAGES(order);
}
- for (i = 0; i < npages; ++i) {
+ for (i = 0; i < npages;) {
struct page *page = migrate_pfn_to_page(mpfn[i]);
+ unsigned int order = 0;
if (!page)
- continue;
+ goto next_lock;
- WARN_ON_ONCE(!trylock_page(page));
- ++*mpages;
+ WARN_ON_ONCE(!folio_trylock(page_folio(page)));
+
+ order = folio_order(page_folio(page));
+ *mpages += NR_PAGES(order);
+
+next_lock:
+ i += NR_PAGES(order);
}
return 0;
free_pages:
- for (i = 0; i < npages; ++i) {
+ for (i = 0; i < npages;) {
struct page *page = migrate_pfn_to_page(mpfn[i]);
+ unsigned int order = 0;
if (!page)
- continue;
+ goto next_put;
put_page(page);
mpfn[i] = 0;
+
+ order = folio_order(page_folio(page));
+
+next_put:
+ i += NR_PAGES(order);
}
return -ENOMEM;
}
@@ -509,7 +553,7 @@ int drm_pagemap_evict_to_ram(struct drm_pagemap_devmem *devmem_allocation)
unsigned long npages, mpages = 0;
struct page **pages;
unsigned long *src, *dst;
- dma_addr_t *dma_addr;
+ struct drm_pagemap_addr *pagemap_addr;
void *buf;
int i, err = 0;
unsigned int retry_count = 2;
@@ -520,7 +564,7 @@ retry:
if (!mmget_not_zero(devmem_allocation->mm))
return -EFAULT;
- buf = kvcalloc(npages, 2 * sizeof(*src) + sizeof(*dma_addr) +
+ buf = kvcalloc(npages, 2 * sizeof(*src) + sizeof(*pagemap_addr) +
sizeof(*pages), GFP_KERNEL);
if (!buf) {
err = -ENOMEM;
@@ -528,8 +572,8 @@ retry:
}
src = buf;
dst = buf + (sizeof(*src) * npages);
- dma_addr = buf + (2 * sizeof(*src) * npages);
- pages = buf + (2 * sizeof(*src) + sizeof(*dma_addr)) * npages;
+ pagemap_addr = buf + (2 * sizeof(*src) * npages);
+ pages = buf + (2 * sizeof(*src) + sizeof(*pagemap_addr)) * npages;
err = ops->populate_devmem_pfn(devmem_allocation, npages, src);
if (err)
@@ -544,7 +588,7 @@ retry:
if (err || !mpages)
goto err_finalize;
- err = drm_pagemap_migrate_map_pages(devmem_allocation->dev, dma_addr,
+ err = drm_pagemap_migrate_map_pages(devmem_allocation->dev, pagemap_addr,
dst, npages, DMA_FROM_DEVICE);
if (err)
goto err_finalize;
@@ -552,7 +596,7 @@ retry:
for (i = 0; i < npages; ++i)
pages[i] = migrate_pfn_to_page(src[i]);
- err = ops->copy_to_ram(pages, dma_addr, npages);
+ err = ops->copy_to_ram(pages, pagemap_addr, npages);
if (err)
goto err_finalize;
@@ -561,7 +605,7 @@ err_finalize:
drm_pagemap_migration_unlock_put_pages(npages, dst);
migrate_device_pages(src, dst, npages);
migrate_device_finalize(src, dst, npages);
- drm_pagemap_migrate_unmap_pages(devmem_allocation->dev, dma_addr, npages,
+ drm_pagemap_migrate_unmap_pages(devmem_allocation->dev, pagemap_addr, npages,
DMA_FROM_DEVICE);
err_free:
kvfree(buf);
@@ -612,7 +656,7 @@ static int __drm_pagemap_migrate_to_ram(struct vm_area_struct *vas,
struct device *dev = NULL;
unsigned long npages, mpages = 0;
struct page **pages;
- dma_addr_t *dma_addr;
+ struct drm_pagemap_addr *pagemap_addr;
unsigned long start, end;
void *buf;
int i, err = 0;
@@ -637,14 +681,14 @@ static int __drm_pagemap_migrate_to_ram(struct vm_area_struct *vas,
migrate.end = end;
npages = npages_in_range(start, end);
- buf = kvcalloc(npages, 2 * sizeof(*migrate.src) + sizeof(*dma_addr) +
+ buf = kvcalloc(npages, 2 * sizeof(*migrate.src) + sizeof(*pagemap_addr) +
sizeof(*pages), GFP_KERNEL);
if (!buf) {
err = -ENOMEM;
goto err_out;
}
- dma_addr = buf + (2 * sizeof(*migrate.src) * npages);
- pages = buf + (2 * sizeof(*migrate.src) + sizeof(*dma_addr)) * npages;
+ pagemap_addr = buf + (2 * sizeof(*migrate.src) * npages);
+ pages = buf + (2 * sizeof(*migrate.src) + sizeof(*pagemap_addr)) * npages;
migrate.vma = vas;
migrate.src = buf;
@@ -680,7 +724,7 @@ static int __drm_pagemap_migrate_to_ram(struct vm_area_struct *vas,
if (err)
goto err_finalize;
- err = drm_pagemap_migrate_map_pages(dev, dma_addr, migrate.dst, npages,
+ err = drm_pagemap_migrate_map_pages(dev, pagemap_addr, migrate.dst, npages,
DMA_FROM_DEVICE);
if (err)
goto err_finalize;
@@ -688,7 +732,7 @@ static int __drm_pagemap_migrate_to_ram(struct vm_area_struct *vas,
for (i = 0; i < npages; ++i)
pages[i] = migrate_pfn_to_page(migrate.src[i]);
- err = ops->copy_to_ram(pages, dma_addr, npages);
+ err = ops->copy_to_ram(pages, pagemap_addr, npages);
if (err)
goto err_finalize;
@@ -698,7 +742,7 @@ err_finalize:
migrate_vma_pages(&migrate);
migrate_vma_finalize(&migrate);
if (dev)
- drm_pagemap_migrate_unmap_pages(dev, dma_addr, npages,
+ drm_pagemap_migrate_unmap_pages(dev, pagemap_addr, npages,
DMA_FROM_DEVICE);
err_free:
kvfree(buf);
diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c
index c8bb28dccdc1..d1e6598ea3bc 100644
--- a/drivers/gpu/drm/drm_panel.c
+++ b/drivers/gpu/drm/drm_panel.c
@@ -134,6 +134,9 @@ void drm_panel_prepare(struct drm_panel *panel)
panel->prepared = true;
list_for_each_entry(follower, &panel->followers, list) {
+ if (!follower->funcs->panel_prepared)
+ continue;
+
ret = follower->funcs->panel_prepared(follower);
if (ret < 0)
dev_info(panel->dev, "%ps failed: %d\n",
@@ -179,6 +182,9 @@ void drm_panel_unprepare(struct drm_panel *panel)
mutex_lock(&panel->follower_lock);
list_for_each_entry(follower, &panel->followers, list) {
+ if (!follower->funcs->panel_unpreparing)
+ continue;
+
ret = follower->funcs->panel_unpreparing(follower);
if (ret < 0)
dev_info(panel->dev, "%ps failed: %d\n",
@@ -209,6 +215,7 @@ EXPORT_SYMBOL(drm_panel_unprepare);
*/
void drm_panel_enable(struct drm_panel *panel)
{
+ struct drm_panel_follower *follower;
int ret;
if (!panel)
@@ -219,10 +226,12 @@ void drm_panel_enable(struct drm_panel *panel)
return;
}
+ mutex_lock(&panel->follower_lock);
+
if (panel->funcs && panel->funcs->enable) {
ret = panel->funcs->enable(panel);
if (ret < 0)
- return;
+ goto exit;
}
panel->enabled = true;
@@ -230,6 +239,19 @@ void drm_panel_enable(struct drm_panel *panel)
if (ret < 0)
DRM_DEV_INFO(panel->dev, "failed to enable backlight: %d\n",
ret);
+
+ list_for_each_entry(follower, &panel->followers, list) {
+ if (!follower->funcs->panel_enabled)
+ continue;
+
+ ret = follower->funcs->panel_enabled(follower);
+ if (ret < 0)
+ dev_info(panel->dev, "%ps failed: %d\n",
+ follower->funcs->panel_enabled, ret);
+ }
+
+exit:
+ mutex_unlock(&panel->follower_lock);
}
EXPORT_SYMBOL(drm_panel_enable);
@@ -243,6 +265,7 @@ EXPORT_SYMBOL(drm_panel_enable);
*/
void drm_panel_disable(struct drm_panel *panel)
{
+ struct drm_panel_follower *follower;
int ret;
if (!panel)
@@ -262,6 +285,18 @@ void drm_panel_disable(struct drm_panel *panel)
return;
}
+ mutex_lock(&panel->follower_lock);
+
+ list_for_each_entry(follower, &panel->followers, list) {
+ if (!follower->funcs->panel_disabling)
+ continue;
+
+ ret = follower->funcs->panel_disabling(follower);
+ if (ret < 0)
+ dev_info(panel->dev, "%ps failed: %d\n",
+ follower->funcs->panel_disabling, ret);
+ }
+
ret = backlight_disable(panel->backlight);
if (ret < 0)
DRM_DEV_INFO(panel->dev, "failed to disable backlight: %d\n",
@@ -270,9 +305,12 @@ void drm_panel_disable(struct drm_panel *panel)
if (panel->funcs && panel->funcs->disable) {
ret = panel->funcs->disable(panel);
if (ret < 0)
- return;
+ goto exit;
}
panel->enabled = false;
+
+exit:
+ mutex_unlock(&panel->follower_lock);
}
EXPORT_SYMBOL(drm_panel_disable);
@@ -539,13 +577,13 @@ EXPORT_SYMBOL(drm_is_panel_follower);
* @follower_dev: The 'struct device' for the follower.
* @follower: The panel follower descriptor for the follower.
*
- * A panel follower is called right after preparing the panel and right before
- * unpreparing the panel. It's primary intention is to power on an associated
- * touchscreen, though it could be used for any similar devices. Multiple
- * devices are allowed the follow the same panel.
+ * A panel follower is called right after preparing/enabling the panel and right
+ * before unpreparing/disabling the panel. It's primary intention is to power on
+ * an associated touchscreen, though it could be used for any similar devices.
+ * Multiple devices are allowed the follow the same panel.
*
- * If a follower is added to a panel that's already been turned on, the
- * follower's prepare callback is called right away.
+ * If a follower is added to a panel that's already been prepared/enabled, the
+ * follower's prepared/enabled callback is called right away.
*
* The "panel" property of the follower points to the panel to be followed.
*
@@ -569,12 +607,18 @@ int drm_panel_add_follower(struct device *follower_dev,
mutex_lock(&panel->follower_lock);
list_add_tail(&follower->list, &panel->followers);
- if (panel->prepared) {
+ if (panel->prepared && follower->funcs->panel_prepared) {
ret = follower->funcs->panel_prepared(follower);
if (ret < 0)
dev_info(panel->dev, "%ps failed: %d\n",
follower->funcs->panel_prepared, ret);
}
+ if (panel->enabled && follower->funcs->panel_enabled) {
+ ret = follower->funcs->panel_enabled(follower);
+ if (ret < 0)
+ dev_info(panel->dev, "%ps failed: %d\n",
+ follower->funcs->panel_enabled, ret);
+ }
mutex_unlock(&panel->follower_lock);
@@ -587,7 +631,8 @@ EXPORT_SYMBOL(drm_panel_add_follower);
* @follower: The panel follower descriptor for the follower.
*
* Undo drm_panel_add_follower(). This includes calling the follower's
- * unprepare function if we're removed from a panel that's currently prepared.
+ * unpreparing/disabling function if we're removed from a panel that's currently
+ * prepared/enabled.
*
* Return: 0 or an error code.
*/
@@ -598,7 +643,13 @@ void drm_panel_remove_follower(struct drm_panel_follower *follower)
mutex_lock(&panel->follower_lock);
- if (panel->prepared) {
+ if (panel->enabled && follower->funcs->panel_disabling) {
+ ret = follower->funcs->panel_disabling(follower);
+ if (ret < 0)
+ dev_info(panel->dev, "%ps failed: %d\n",
+ follower->funcs->panel_disabling, ret);
+ }
+ if (panel->prepared && follower->funcs->panel_unpreparing) {
ret = follower->funcs->panel_unpreparing(follower);
if (ret < 0)
dev_info(panel->dev, "%ps failed: %d\n",
diff --git a/drivers/gpu/drm/drm_panel_backlight_quirks.c b/drivers/gpu/drm/drm_panel_backlight_quirks.c
index 598f812b7cb3..537dc6dd0534 100644
--- a/drivers/gpu/drm/drm_panel_backlight_quirks.c
+++ b/drivers/gpu/drm/drm_panel_backlight_quirks.c
@@ -8,23 +8,26 @@
#include <drm/drm_edid.h>
#include <drm/drm_utils.h>
-struct drm_panel_min_backlight_quirk {
- struct {
- enum dmi_field field;
- const char * const value;
- } dmi_match;
+struct drm_panel_match {
+ enum dmi_field field;
+ const char * const value;
+};
+
+struct drm_get_panel_backlight_quirk {
+ struct drm_panel_match dmi_match;
+ struct drm_panel_match dmi_match_other;
struct drm_edid_ident ident;
- u8 min_brightness;
+ struct drm_panel_backlight_quirk quirk;
};
-static const struct drm_panel_min_backlight_quirk drm_panel_min_backlight_quirks[] = {
+static const struct drm_get_panel_backlight_quirk drm_panel_min_backlight_quirks[] = {
/* 13 inch matte panel */
{
.dmi_match.field = DMI_BOARD_VENDOR,
.dmi_match.value = "Framework",
.ident.panel_id = drm_edid_encode_panel_id('B', 'O', 'E', 0x0bca),
.ident.name = "NE135FBM-N41",
- .min_brightness = 0,
+ .quirk = { .min_brightness = 1, },
},
/* 13 inch glossy panel */
{
@@ -32,7 +35,7 @@ static const struct drm_panel_min_backlight_quirk drm_panel_min_backlight_quirks
.dmi_match.value = "Framework",
.ident.panel_id = drm_edid_encode_panel_id('B', 'O', 'E', 0x095f),
.ident.name = "NE135FBM-N41",
- .min_brightness = 0,
+ .quirk = { .min_brightness = 1, },
},
/* 13 inch 2.8k panel */
{
@@ -40,56 +43,114 @@ static const struct drm_panel_min_backlight_quirk drm_panel_min_backlight_quirks
.dmi_match.value = "Framework",
.ident.panel_id = drm_edid_encode_panel_id('B', 'O', 'E', 0x0cb4),
.ident.name = "NE135A1M-NY1",
- .min_brightness = 0,
+ .quirk = { .min_brightness = 1, },
+ },
+ /* Steam Deck models */
+ {
+ .dmi_match.field = DMI_SYS_VENDOR,
+ .dmi_match.value = "Valve",
+ .dmi_match_other.field = DMI_PRODUCT_NAME,
+ .dmi_match_other.value = "Jupiter",
+ .quirk = { .min_brightness = 1, },
+ },
+ {
+ .dmi_match.field = DMI_SYS_VENDOR,
+ .dmi_match.value = "Valve",
+ .dmi_match_other.field = DMI_PRODUCT_NAME,
+ .dmi_match_other.value = "Galileo",
+ .quirk = { .min_brightness = 1, },
+ },
+ /* Have OLED Panels with brightness issue when last byte is 0/1 */
+ {
+ .dmi_match.field = DMI_SYS_VENDOR,
+ .dmi_match.value = "AYANEO",
+ .dmi_match_other.field = DMI_PRODUCT_NAME,
+ .dmi_match_other.value = "AYANEO 3",
+ .quirk = { .brightness_mask = 3, },
+ },
+ {
+ .dmi_match.field = DMI_SYS_VENDOR,
+ .dmi_match.value = "ZOTAC",
+ .dmi_match_other.field = DMI_BOARD_NAME,
+ .dmi_match_other.value = "G0A1W",
+ .quirk = { .brightness_mask = 3, },
+ },
+ {
+ .dmi_match.field = DMI_SYS_VENDOR,
+ .dmi_match.value = "ZOTAC",
+ .dmi_match_other.field = DMI_BOARD_NAME,
+ .dmi_match_other.value = "G1A1W",
+ .quirk = { .brightness_mask = 3, },
+ },
+ {
+ .dmi_match.field = DMI_SYS_VENDOR,
+ .dmi_match.value = "ONE-NETBOOK",
+ .dmi_match_other.field = DMI_PRODUCT_NAME,
+ .dmi_match_other.value = "ONEXPLAYER F1Pro",
+ .quirk = { .brightness_mask = 3, },
+ },
+ {
+ .dmi_match.field = DMI_SYS_VENDOR,
+ .dmi_match.value = "ONE-NETBOOK",
+ .dmi_match_other.field = DMI_PRODUCT_NAME,
+ .dmi_match_other.value = "ONEXPLAYER F1 EVA-02",
+ .quirk = { .brightness_mask = 3, },
},
};
-static bool drm_panel_min_backlight_quirk_matches(const struct drm_panel_min_backlight_quirk *quirk,
- const struct drm_edid *edid)
+static bool drm_panel_min_backlight_quirk_matches(
+ const struct drm_get_panel_backlight_quirk *quirk,
+ const struct drm_edid *edid)
{
- if (!dmi_match(quirk->dmi_match.field, quirk->dmi_match.value))
+ if (quirk->dmi_match.field &&
+ !dmi_match(quirk->dmi_match.field, quirk->dmi_match.value))
+ return false;
+
+ if (quirk->dmi_match_other.field &&
+ !dmi_match(quirk->dmi_match_other.field,
+ quirk->dmi_match_other.value))
return false;
- if (!drm_edid_match(edid, &quirk->ident))
+ if (quirk->ident.panel_id && !drm_edid_match(edid, &quirk->ident))
return false;
return true;
}
/**
- * drm_get_panel_min_brightness_quirk - Get minimum supported brightness level for a panel.
+ * drm_get_panel_backlight_quirk - Get backlight quirks for a panel
* @edid: EDID of the panel to check
*
* This function checks for platform specific (e.g. DMI based) quirks
* providing info on the minimum backlight brightness for systems where this
- * cannot be probed correctly from the hard-/firm-ware.
+ * cannot be probed correctly from the hard-/firm-ware and other sources.
*
* Returns:
- * A negative error value or
- * an override value in the range [0, 255] representing 0-100% to be scaled to
- * the drivers target range.
+ * a drm_panel_backlight_quirk struct if a quirk was found, otherwise an
+ * error pointer.
*/
-int drm_get_panel_min_brightness_quirk(const struct drm_edid *edid)
+const struct drm_panel_backlight_quirk *
+drm_get_panel_backlight_quirk(const struct drm_edid *edid)
{
- const struct drm_panel_min_backlight_quirk *quirk;
+ const struct drm_get_panel_backlight_quirk *quirk;
size_t i;
if (!IS_ENABLED(CONFIG_DMI))
- return -ENODATA;
+ return ERR_PTR(-ENODATA);
if (!edid)
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
for (i = 0; i < ARRAY_SIZE(drm_panel_min_backlight_quirks); i++) {
quirk = &drm_panel_min_backlight_quirks[i];
if (drm_panel_min_backlight_quirk_matches(quirk, edid))
- return quirk->min_brightness;
+ return &quirk->quirk;
}
- return -ENODATA;
+ return ERR_PTR(-ENODATA);
}
-EXPORT_SYMBOL(drm_get_panel_min_brightness_quirk);
+EXPORT_SYMBOL(drm_get_panel_backlight_quirk);
MODULE_DESCRIPTION("Quirks for panel backlight overrides");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/drm_panic_qr.rs b/drivers/gpu/drm/drm_panic_qr.rs
index 09a9b452e8b7..ac27e86c601c 100644
--- a/drivers/gpu/drm/drm_panic_qr.rs
+++ b/drivers/gpu/drm/drm_panic_qr.rs
@@ -381,6 +381,26 @@ struct DecFifo {
len: usize,
}
+// On arm32 architecture, dividing an `u64` by a constant will generate a call
+// to `__aeabi_uldivmod` which is not present in the kernel.
+// So use the multiply by inverse method for this architecture.
+fn div10(val: u64) -> u64 {
+ if cfg!(target_arch = "arm") {
+ let val_h = val >> 32;
+ let val_l = val & 0xFFFFFFFF;
+ let b_h: u64 = 0x66666666;
+ let b_l: u64 = 0x66666667;
+
+ let tmp1 = val_h * b_l + ((val_l * b_l) >> 32);
+ let tmp2 = val_l * b_h + (tmp1 & 0xffffffff);
+ let tmp3 = val_h * b_h + (tmp1 >> 32) + (tmp2 >> 32);
+
+ tmp3 >> 2
+ } else {
+ val / 10
+ }
+}
+
impl DecFifo {
fn push(&mut self, data: u64, len: usize) {
let mut chunk = data;
@@ -389,7 +409,7 @@ impl DecFifo {
}
for i in 0..len {
self.decimals[i] = (chunk % 10) as u8;
- chunk /= 10;
+ chunk = div10(chunk);
}
self.len += len;
}
@@ -948,7 +968,7 @@ pub unsafe extern "C" fn drm_panic_qr_generate(
// nul-terminated string.
let url_cstr: &CStr = unsafe { CStr::from_char_ptr(url) };
let segments = &[
- &Segment::Binary(url_cstr.as_bytes()),
+ &Segment::Binary(url_cstr.to_bytes()),
&Segment::Numeric(&data_slice[0..data_len]),
];
match EncodedMsg::new(segments, tmp_slice) {
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index a23fc712a8b7..43a10b4af43a 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -93,7 +93,7 @@ struct drm_prime_member {
struct rb_node handle_rb;
};
-static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
+int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
struct dma_buf *dma_buf, uint32_t handle)
{
struct drm_prime_member *member;
@@ -190,8 +190,6 @@ void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv,
{
struct rb_node *rb;
- mutex_lock(&prime_fpriv->lock);
-
rb = prime_fpriv->handles.rb_node;
while (rb) {
struct drm_prime_member *member;
@@ -210,8 +208,6 @@ void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv,
rb = rb->rb_left;
}
}
-
- mutex_unlock(&prime_fpriv->lock);
}
void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 6b3541159c0f..09b12c30df69 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -119,6 +119,7 @@ drm_mode_validate_pipeline(struct drm_display_mode *mode,
*status = drm_bridge_chain_mode_valid(bridge,
&connector->display_info,
mode);
+ drm_bridge_put(bridge);
if (*status != MODE_OK) {
/* There is also no point in continuing for crtc check
* here. */
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index a455c56dbbeb..b01ffa4d6509 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -18,6 +18,7 @@
#include <linux/gfp.h>
#include <linux/i2c.h>
#include <linux/kdev_t.h>
+#include <linux/pci.h>
#include <linux/property.h>
#include <linux/slab.h>
@@ -30,6 +31,8 @@
#include <drm/drm_property.h>
#include <drm/drm_sysfs.h>
+#include <asm/video.h>
+
#include "drm_internal.h"
#include "drm_crtc_internal.h"
@@ -508,6 +511,43 @@ void drm_sysfs_connector_property_event(struct drm_connector *connector,
}
EXPORT_SYMBOL(drm_sysfs_connector_property_event);
+static ssize_t boot_display_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "1\n");
+}
+static DEVICE_ATTR_RO(boot_display);
+
+static struct attribute *display_attrs[] = {
+ &dev_attr_boot_display.attr,
+ NULL
+};
+
+static umode_t boot_display_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ struct device *dev = kobj_to_dev(kobj)->parent;
+
+ if (dev_is_pci(dev)) {
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ if (video_is_primary_device(&pdev->dev))
+ return a->mode;
+ }
+
+ return 0;
+}
+
+static const struct attribute_group display_attr_group = {
+ .attrs = display_attrs,
+ .is_visible = boot_display_visible,
+};
+
+static const struct attribute_group *card_dev_groups[] = {
+ &display_attr_group,
+ NULL
+};
+
struct device *drm_sysfs_minor_alloc(struct drm_minor *minor)
{
const char *minor_str;
@@ -531,6 +571,7 @@ struct device *drm_sysfs_minor_alloc(struct drm_minor *minor)
kdev->devt = MKDEV(DRM_MAJOR, minor->index);
kdev->class = drm_class;
+ kdev->groups = card_dev_groups;
kdev->type = &drm_sysfs_device_minor;
}
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 805aa28c1723..b8d9b7251319 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -69,7 +69,6 @@ struct decon_context {
void __iomem *regs;
unsigned long irq_flags;
bool i80_if;
- bool suspended;
wait_queue_head_t wait_vsync_queue;
atomic_t wait_vsync_event;
@@ -132,9 +131,6 @@ static void decon_shadow_protect_win(struct decon_context *ctx,
static void decon_wait_for_vblank(struct decon_context *ctx)
{
- if (ctx->suspended)
- return;
-
atomic_set(&ctx->wait_vsync_event, 1);
/*
@@ -210,9 +206,6 @@ static void decon_commit(struct exynos_drm_crtc *crtc)
struct drm_display_mode *mode = &crtc->base.state->adjusted_mode;
u32 val, clkdiv;
- if (ctx->suspended)
- return;
-
/* nothing to do if we haven't set the mode yet */
if (mode->htotal == 0 || mode->vtotal == 0)
return;
@@ -274,9 +267,6 @@ static int decon_enable_vblank(struct exynos_drm_crtc *crtc)
struct decon_context *ctx = crtc->ctx;
u32 val;
- if (ctx->suspended)
- return -EPERM;
-
if (!test_and_set_bit(0, &ctx->irq_flags)) {
val = readl(ctx->regs + VIDINTCON0);
@@ -299,9 +289,6 @@ static void decon_disable_vblank(struct exynos_drm_crtc *crtc)
struct decon_context *ctx = crtc->ctx;
u32 val;
- if (ctx->suspended)
- return;
-
if (test_and_clear_bit(0, &ctx->irq_flags)) {
val = readl(ctx->regs + VIDINTCON0);
@@ -404,9 +391,6 @@ static void decon_atomic_begin(struct exynos_drm_crtc *crtc)
struct decon_context *ctx = crtc->ctx;
int i;
- if (ctx->suspended)
- return;
-
for (i = 0; i < WINDOWS_NR; i++)
decon_shadow_protect_win(ctx, i, true);
}
@@ -427,9 +411,6 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
unsigned int pitch = fb->pitches[0];
unsigned int vidw_addr0_base = ctx->data->vidw_buf_start_base;
- if (ctx->suspended)
- return;
-
/*
* SHADOWCON/PRTCON register is used for enabling timing.
*
@@ -517,9 +498,6 @@ static void decon_disable_plane(struct exynos_drm_crtc *crtc,
unsigned int win = plane->index;
u32 val;
- if (ctx->suspended)
- return;
-
/* protect windows */
decon_shadow_protect_win(ctx, win, true);
@@ -538,9 +516,6 @@ static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
struct decon_context *ctx = crtc->ctx;
int i;
- if (ctx->suspended)
- return;
-
for (i = 0; i < WINDOWS_NR; i++)
decon_shadow_protect_win(ctx, i, false);
exynos_crtc_handle_event(crtc);
@@ -568,9 +543,6 @@ static void decon_atomic_enable(struct exynos_drm_crtc *crtc)
struct decon_context *ctx = crtc->ctx;
int ret;
- if (!ctx->suspended)
- return;
-
ret = pm_runtime_resume_and_get(ctx->dev);
if (ret < 0) {
DRM_DEV_ERROR(ctx->dev, "failed to enable DECON device.\n");
@@ -584,8 +556,6 @@ static void decon_atomic_enable(struct exynos_drm_crtc *crtc)
decon_enable_vblank(ctx->crtc);
decon_commit(ctx->crtc);
-
- ctx->suspended = false;
}
static void decon_atomic_disable(struct exynos_drm_crtc *crtc)
@@ -593,9 +563,6 @@ static void decon_atomic_disable(struct exynos_drm_crtc *crtc)
struct decon_context *ctx = crtc->ctx;
int i;
- if (ctx->suspended)
- return;
-
/*
* We need to make sure that all windows are disabled before we
* suspend that connector. Otherwise we might try to scan from
@@ -605,8 +572,6 @@ static void decon_atomic_disable(struct exynos_drm_crtc *crtc)
decon_disable_plane(crtc, &ctx->planes[i]);
pm_runtime_put_sync(ctx->dev);
-
- ctx->suspended = true;
}
static const struct exynos_drm_crtc_ops decon_crtc_ops = {
@@ -727,7 +692,6 @@ static int decon_probe(struct platform_device *pdev)
return -ENOMEM;
ctx->dev = dev;
- ctx->suspended = true;
ctx->data = of_device_get_match_data(dev);
i80_if_timings = of_get_child_by_name(dev->of_node, "i80-if-timings");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 896a03639e2d..c4d098ab7863 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -154,6 +154,11 @@ static const struct samsung_dsim_plat_data exynos5433_dsi_pdata = {
.host_ops = &exynos_dsi_exynos_host_ops,
};
+static const struct samsung_dsim_plat_data exynos7870_dsi_pdata = {
+ .hw_type = DSIM_TYPE_EXYNOS7870,
+ .host_ops = &exynos_dsi_exynos_host_ops,
+};
+
static const struct of_device_id exynos_dsi_of_match[] = {
{
.compatible = "samsung,exynos3250-mipi-dsi",
@@ -175,6 +180,10 @@ static const struct of_device_id exynos_dsi_of_match[] = {
.compatible = "samsung,exynos5433-mipi-dsi",
.data = &exynos5433_dsi_pdata,
},
+ {
+ .compatible = "samsung,exynos7870-mipi-dsi",
+ .data = &exynos7870_dsi_pdata,
+ },
{ /* sentinel. */ }
};
MODULE_DEVICE_TABLE(of, exynos_dsi_of_match);
diff --git a/drivers/gpu/drm/gma500/fbdev.c b/drivers/gpu/drm/gma500/fbdev.c
index 4a37136f90f4..32d31e5f5f1a 100644
--- a/drivers/gpu/drm/gma500/fbdev.c
+++ b/drivers/gpu/drm/gma500/fbdev.c
@@ -120,7 +120,6 @@ static void psb_fbdev_fb_destroy(struct fb_info *info)
drm_fb_helper_fini(fb_helper);
drm_framebuffer_unregister_private(fb);
- fb->obj[0] = NULL;
drm_framebuffer_cleanup(fb);
kfree(fb);
@@ -245,7 +244,6 @@ int psb_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
err_drm_framebuffer_unregister_private:
drm_framebuffer_unregister_private(fb);
- fb->obj[0] = NULL;
drm_framebuffer_cleanup(fb);
kfree(fb);
err_drm_gem_object_put:
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index 1cf394369127..c0feca58511d 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -726,8 +726,8 @@ void oaktrail_hdmi_teardown(struct drm_device *dev)
if (hdmi_dev) {
pdev = hdmi_dev->dev;
- pci_set_drvdata(pdev, NULL);
oaktrail_hdmi_i2c_exit(pdev);
+ pci_set_drvdata(pdev, NULL);
iounmap(hdmi_dev->regs);
kfree(hdmi_dev);
pci_dev_put(pdev);
diff --git a/drivers/gpu/drm/gud/gud_connector.c b/drivers/gpu/drm/gud/gud_connector.c
index 0f07d77c5d52..4a15695fa933 100644
--- a/drivers/gpu/drm/gud/gud_connector.c
+++ b/drivers/gpu/drm/gud/gud_connector.c
@@ -16,7 +16,6 @@
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_simple_kms_helper.h>
#include <drm/gud.h>
#include "gud_internal.h"
@@ -607,13 +606,16 @@ int gud_connector_fill_properties(struct drm_connector_state *connector_state,
return gconn->num_properties;
}
+static const struct drm_encoder_funcs gud_drm_simple_encoder_funcs_cleanup = {
+ .destroy = drm_encoder_cleanup,
+};
+
static int gud_connector_create(struct gud_device *gdrm, unsigned int index,
struct gud_connector_descriptor_req *desc)
{
struct drm_device *drm = &gdrm->drm;
struct gud_connector *gconn;
struct drm_connector *connector;
- struct drm_encoder *encoder;
int ret, connector_type;
u32 flags;
@@ -681,20 +683,13 @@ static int gud_connector_create(struct gud_device *gdrm, unsigned int index,
return ret;
}
- /* The first connector is attached to the existing simple pipe encoder */
- if (!connector->index) {
- encoder = &gdrm->pipe.encoder;
- } else {
- encoder = &gconn->encoder;
-
- ret = drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_NONE);
- if (ret)
- return ret;
-
- encoder->possible_crtcs = 1;
- }
+ gconn->encoder.possible_crtcs = drm_crtc_mask(&gdrm->crtc);
+ ret = drm_encoder_init(drm, &gconn->encoder, &gud_drm_simple_encoder_funcs_cleanup,
+ DRM_MODE_ENCODER_NONE, NULL);
+ if (ret)
+ return ret;
- return drm_connector_attach_encoder(connector, encoder);
+ return drm_connector_attach_encoder(connector, &gconn->encoder);
}
int gud_get_connectors(struct gud_device *gdrm)
diff --git a/drivers/gpu/drm/gud/gud_drv.c b/drivers/gpu/drm/gud/gud_drv.c
index 5385a2126e45..b7345c8d823d 100644
--- a/drivers/gpu/drm/gud/gud_drv.c
+++ b/drivers/gpu/drm/gud/gud_drv.c
@@ -16,6 +16,7 @@
#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
+#include <drm/drm_crtc_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_drv.h>
@@ -27,7 +28,6 @@
#include <drm/drm_managed.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_simple_kms_helper.h>
#include <drm/gud.h>
#include "gud_internal.h"
@@ -289,7 +289,7 @@ static int gud_get_properties(struct gud_device *gdrm)
* but mask out any additions on future devices.
*/
val &= GUD_ROTATION_MASK;
- ret = drm_plane_create_rotation_property(&gdrm->pipe.plane,
+ ret = drm_plane_create_rotation_property(&gdrm->plane,
DRM_MODE_ROTATE_0, val);
break;
default:
@@ -338,10 +338,30 @@ static int gud_stats_debugfs(struct seq_file *m, void *data)
return 0;
}
-static const struct drm_simple_display_pipe_funcs gud_pipe_funcs = {
- .check = gud_pipe_check,
- .update = gud_pipe_update,
- DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS
+static const struct drm_crtc_helper_funcs gud_crtc_helper_funcs = {
+ .atomic_check = drm_crtc_helper_atomic_check
+};
+
+static const struct drm_crtc_funcs gud_crtc_funcs = {
+ .reset = drm_atomic_helper_crtc_reset,
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+};
+
+static const struct drm_plane_helper_funcs gud_plane_helper_funcs = {
+ DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
+ .atomic_check = gud_plane_atomic_check,
+ .atomic_update = gud_plane_atomic_update,
+};
+
+static const struct drm_plane_funcs gud_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ DRM_GEM_SHADOW_PLANE_FUNCS,
};
static const struct drm_mode_config_funcs gud_mode_config_funcs = {
@@ -350,7 +370,7 @@ static const struct drm_mode_config_funcs gud_mode_config_funcs = {
.atomic_commit = drm_atomic_helper_commit,
};
-static const u64 gud_pipe_modifiers[] = {
+static const u64 gud_plane_modifiers[] = {
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
};
@@ -567,12 +587,17 @@ static int gud_probe(struct usb_interface *intf, const struct usb_device_id *id)
return -ENOMEM;
}
- ret = drm_simple_display_pipe_init(drm, &gdrm->pipe, &gud_pipe_funcs,
- formats, num_formats,
- gud_pipe_modifiers, NULL);
+ ret = drm_universal_plane_init(drm, &gdrm->plane, 0,
+ &gud_plane_funcs,
+ formats, num_formats,
+ gud_plane_modifiers,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret)
return ret;
+ drm_plane_helper_add(&gdrm->plane, &gud_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(&gdrm->plane);
+
devm_kfree(dev, formats);
devm_kfree(dev, formats_dev);
@@ -582,7 +607,12 @@ static int gud_probe(struct usb_interface *intf, const struct usb_device_id *id)
return ret;
}
- drm_plane_enable_fb_damage_clips(&gdrm->pipe.plane);
+ ret = drm_crtc_init_with_planes(drm, &gdrm->crtc, &gdrm->plane, NULL,
+ &gud_crtc_funcs, NULL);
+ if (ret)
+ return ret;
+
+ drm_crtc_helper_add(&gdrm->crtc, &gud_crtc_helper_funcs);
ret = gud_get_connectors(gdrm);
if (ret) {
@@ -620,8 +650,6 @@ static void gud_disconnect(struct usb_interface *interface)
struct gud_device *gdrm = usb_get_intfdata(interface);
struct drm_device *drm = &gdrm->drm;
- drm_dbg(drm, "%s:\n", __func__);
-
drm_kms_helper_poll_fini(drm);
drm_dev_unplug(drm);
drm_atomic_helper_shutdown(drm);
diff --git a/drivers/gpu/drm/gud/gud_internal.h b/drivers/gpu/drm/gud/gud_internal.h
index d6fb25388722..d27c31648341 100644
--- a/drivers/gpu/drm/gud/gud_internal.h
+++ b/drivers/gpu/drm/gud/gud_internal.h
@@ -11,11 +11,11 @@
#include <uapi/drm/drm_fourcc.h>
#include <drm/drm_modes.h>
-#include <drm/drm_simple_kms_helper.h>
struct gud_device {
struct drm_device drm;
- struct drm_simple_display_pipe pipe;
+ struct drm_plane plane;
+ struct drm_crtc crtc;
struct work_struct work;
u32 flags;
const struct drm_format_info *xrgb8888_emulation_format;
@@ -62,11 +62,10 @@ int gud_usb_set_u8(struct gud_device *gdrm, u8 request, u8 val);
void gud_clear_damage(struct gud_device *gdrm);
void gud_flush_work(struct work_struct *work);
-int gud_pipe_check(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *new_plane_state,
- struct drm_crtc_state *new_crtc_state);
-void gud_pipe_update(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *old_state);
+int gud_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state);
+void gud_plane_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *atomic_state);
int gud_connector_fill_properties(struct drm_connector_state *connector_state,
struct gud_property_req *properties);
int gud_get_connectors(struct gud_device *gdrm);
diff --git a/drivers/gpu/drm/gud/gud_pipe.c b/drivers/gpu/drm/gud/gud_pipe.c
index 8d548d08f127..54d9aa9998e5 100644
--- a/drivers/gpu/drm/gud/gud_pipe.c
+++ b/drivers/gpu/drm/gud/gud_pipe.c
@@ -20,7 +20,6 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_print.h>
#include <drm/drm_rect.h>
-#include <drm/drm_simple_kms_helper.h>
#include <drm/gud.h>
#include "gud_internal.h"
@@ -451,14 +450,15 @@ static void gud_fb_handle_damage(struct gud_device *gdrm, struct drm_framebuffer
gud_flush_damage(gdrm, fb, src, !fb->obj[0]->import_attach, damage);
}
-int gud_pipe_check(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *new_plane_state,
- struct drm_crtc_state *new_crtc_state)
+int gud_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
{
- struct gud_device *gdrm = to_gud_device(pipe->crtc.dev);
- struct drm_plane_state *old_plane_state = pipe->plane.state;
- const struct drm_display_mode *mode = &new_crtc_state->mode;
- struct drm_atomic_state *state = new_plane_state->state;
+ struct gud_device *gdrm = to_gud_device(plane->dev);
+ struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_crtc *crtc = new_plane_state->crtc;
+ struct drm_crtc_state *crtc_state;
+ const struct drm_display_mode *mode;
struct drm_framebuffer *old_fb = old_plane_state->fb;
struct drm_connector_state *connector_state = NULL;
struct drm_framebuffer *fb = new_plane_state->fb;
@@ -469,20 +469,37 @@ int gud_pipe_check(struct drm_simple_display_pipe *pipe,
int idx, ret;
size_t len;
- if (WARN_ON_ONCE(!fb))
+ if (drm_WARN_ON_ONCE(plane->dev, !fb))
return -EINVAL;
+ if (drm_WARN_ON_ONCE(plane->dev, !crtc))
+ return -EINVAL;
+
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+
+ mode = &crtc_state->mode;
+
+ ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ false, false);
+ if (ret)
+ return ret;
+
+ if (!new_plane_state->visible)
+ return 0;
+
if (old_plane_state->rotation != new_plane_state->rotation)
- new_crtc_state->mode_changed = true;
+ crtc_state->mode_changed = true;
if (old_fb && old_fb->format != format)
- new_crtc_state->mode_changed = true;
+ crtc_state->mode_changed = true;
- if (!new_crtc_state->mode_changed && !new_crtc_state->connectors_changed)
+ if (!crtc_state->mode_changed && !crtc_state->connectors_changed)
return 0;
/* Only one connector is supported */
- if (hweight32(new_crtc_state->connector_mask) != 1)
+ if (hweight32(crtc_state->connector_mask) != 1)
return -EINVAL;
if (format->format == DRM_FORMAT_XRGB8888 && gdrm->xrgb8888_emulation_format)
@@ -500,7 +517,7 @@ int gud_pipe_check(struct drm_simple_display_pipe *pipe,
if (!connector_state) {
struct drm_connector_list_iter conn_iter;
- drm_connector_list_iter_begin(pipe->crtc.dev, &conn_iter);
+ drm_connector_list_iter_begin(plane->dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
if (connector->state->crtc) {
connector_state = connector->state;
@@ -567,16 +584,18 @@ out:
return ret;
}
-void gud_pipe_update(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *old_state)
+void gud_plane_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *atomic_state)
{
- struct drm_device *drm = pipe->crtc.dev;
+ struct drm_device *drm = plane->dev;
struct gud_device *gdrm = to_gud_device(drm);
- struct drm_plane_state *state = pipe->plane.state;
- struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(state);
- struct drm_framebuffer *fb = state->fb;
- struct drm_crtc *crtc = &pipe->crtc;
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(atomic_state, plane);
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(atomic_state, plane);
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(new_state);
+ struct drm_framebuffer *fb = new_state->fb;
+ struct drm_crtc *crtc = new_state->crtc;
struct drm_rect damage;
+ struct drm_atomic_helper_damage_iter iter;
int ret, idx;
if (crtc->state->mode_changed || !crtc->state->enable) {
@@ -611,7 +630,8 @@ void gud_pipe_update(struct drm_simple_display_pipe *pipe,
if (ret)
goto ctrl_disable;
- if (drm_atomic_helper_damage_merged(old_state, state, &damage))
+ drm_atomic_helper_damage_iter_init(&iter, old_state, new_state);
+ drm_atomic_for_each_plane_damage(&iter, &damage)
gud_fb_handle_damage(gdrm, fb, &shadow_plane_state->data[0], &damage);
drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c
index 74f7832ea53e..0726cb5b736e 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c
@@ -325,6 +325,17 @@ static int hibmc_dp_link_downgrade_training_eq(struct hibmc_dp_dev *dp)
return hibmc_dp_link_reduce_rate(dp);
}
+static void hibmc_dp_update_caps(struct hibmc_dp_dev *dp)
+{
+ dp->link.cap.link_rate = dp->dpcd[DP_MAX_LINK_RATE];
+ if (dp->link.cap.link_rate > DP_LINK_BW_8_1 || !dp->link.cap.link_rate)
+ dp->link.cap.link_rate = DP_LINK_BW_8_1;
+
+ dp->link.cap.lanes = dp->dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
+ if (dp->link.cap.lanes > HIBMC_DP_LANE_NUM_MAX)
+ dp->link.cap.lanes = HIBMC_DP_LANE_NUM_MAX;
+}
+
int hibmc_dp_link_training(struct hibmc_dp_dev *dp)
{
struct hibmc_dp_link *link = &dp->link;
@@ -334,8 +345,7 @@ int hibmc_dp_link_training(struct hibmc_dp_dev *dp)
if (ret)
drm_err(dp->dev, "dp aux read dpcd failed, ret: %d\n", ret);
- dp->link.cap.link_rate = dp->dpcd[DP_MAX_LINK_RATE];
- dp->link.cap.lanes = 0x2;
+ hibmc_dp_update_caps(dp);
ret = hibmc_dp_get_serdes_rate_cfg(dp);
if (ret < 0)
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
index 768b97f9e74a..289304500ab0 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
@@ -32,7 +32,7 @@
DEFINE_DRM_GEM_FOPS(hibmc_fops);
-static const char *g_irqs_names_map[HIBMC_MAX_VECTORS] = { "vblank", "hpd" };
+static const char *g_irqs_names_map[HIBMC_MAX_VECTORS] = { "hibmc-vblank", "hibmc-hpd" };
static irqreturn_t hibmc_interrupt(int irq, void *arg)
{
@@ -115,6 +115,8 @@ static const struct drm_mode_config_funcs hibmc_mode_funcs = {
static int hibmc_kms_init(struct hibmc_drm_private *priv)
{
struct drm_device *dev = &priv->dev;
+ struct drm_encoder *encoder;
+ u32 clone_mask = 0;
int ret;
ret = drmm_mode_config_init(dev);
@@ -154,6 +156,12 @@ static int hibmc_kms_init(struct hibmc_drm_private *priv)
return ret;
}
+ drm_for_each_encoder(encoder, dev)
+ clone_mask |= drm_encoder_mask(encoder);
+
+ drm_for_each_encoder(encoder, dev)
+ encoder->possible_clones = clone_mask;
+
return 0;
}
@@ -277,7 +285,6 @@ static void hibmc_unload(struct drm_device *dev)
static int hibmc_msi_init(struct drm_device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev->dev);
- char name[32] = {0};
int valid_irq_num;
int irq;
int ret;
@@ -292,9 +299,6 @@ static int hibmc_msi_init(struct drm_device *dev)
valid_irq_num = ret;
for (int i = 0; i < valid_irq_num; i++) {
- snprintf(name, ARRAY_SIZE(name) - 1, "%s-%s-%s",
- dev->driver->name, pci_name(pdev), g_irqs_names_map[i]);
-
irq = pci_irq_vector(pdev, i);
if (i)
@@ -302,10 +306,10 @@ static int hibmc_msi_init(struct drm_device *dev)
ret = devm_request_threaded_irq(&pdev->dev, irq,
hibmc_dp_interrupt,
hibmc_dp_hpd_isr,
- IRQF_SHARED, name, dev);
+ IRQF_SHARED, g_irqs_names_map[i], dev);
else
ret = devm_request_irq(&pdev->dev, irq, hibmc_interrupt,
- IRQF_SHARED, name, dev);
+ IRQF_SHARED, g_irqs_names_map[i], dev);
if (ret) {
drm_err(dev, "install irq failed: %d\n", ret);
return ret;
@@ -323,13 +327,13 @@ static int hibmc_load(struct drm_device *dev)
ret = hibmc_hw_init(priv);
if (ret)
- goto err;
+ return ret;
ret = drmm_vram_helper_init(dev, pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0));
if (ret) {
drm_err(dev, "Error initializing VRAM MM; %d\n", ret);
- goto err;
+ return ret;
}
ret = hibmc_kms_init(priv);
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
index 274feabe7df0..ca8502e2760c 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
@@ -69,6 +69,7 @@ int hibmc_de_init(struct hibmc_drm_private *priv);
int hibmc_vdac_init(struct hibmc_drm_private *priv);
int hibmc_ddc_create(struct drm_device *drm_dev, struct hibmc_vdac *connector);
+void hibmc_ddc_del(struct hibmc_vdac *vdac);
int hibmc_dp_init(struct hibmc_drm_private *priv);
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c
index 99b3b77b5445..44860011855e 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c
@@ -95,3 +95,8 @@ int hibmc_ddc_create(struct drm_device *drm_dev, struct hibmc_vdac *vdac)
return i2c_bit_add_bus(&vdac->adapter);
}
+
+void hibmc_ddc_del(struct hibmc_vdac *vdac)
+{
+ i2c_del_adapter(&vdac->adapter);
+}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
index e8a527ede854..841e81f47b68 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
@@ -53,7 +53,7 @@ static void hibmc_connector_destroy(struct drm_connector *connector)
{
struct hibmc_vdac *vdac = to_hibmc_vdac(connector);
- i2c_del_adapter(&vdac->adapter);
+ hibmc_ddc_del(vdac);
drm_connector_cleanup(connector);
}
@@ -110,7 +110,7 @@ int hibmc_vdac_init(struct hibmc_drm_private *priv)
ret = drmm_encoder_init(dev, encoder, NULL, DRM_MODE_ENCODER_DAC, NULL);
if (ret) {
drm_err(dev, "failed to init encoder: %d\n", ret);
- return ret;
+ goto err;
}
drm_encoder_helper_add(encoder, &hibmc_encoder_helper_funcs);
@@ -121,7 +121,7 @@ int hibmc_vdac_init(struct hibmc_drm_private *priv)
&vdac->adapter);
if (ret) {
drm_err(dev, "failed to init connector: %d\n", ret);
- return ret;
+ goto err;
}
drm_connector_helper_add(connector, &hibmc_connector_helper_funcs);
@@ -131,4 +131,9 @@ int hibmc_vdac_init(struct hibmc_drm_private *priv)
connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
return 0;
+
+err:
+ hibmc_ddc_del(vdac);
+
+ return ret;
}
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index 1852e0804942..3562a02ef7ad 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -50,7 +50,7 @@ config DRM_I915_DEBUG
select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks)
select DRM_DEBUG_MM if DRM=y
select DRM_EXPORT_FOR_TESTS if m
- select DRM_DEBUG_SELFTEST
+ select DRM_KUNIT_TEST if KUNIT
select DMABUF_SELFTESTS
select SW_SYNC # signaling validation framework (igt/syncobj*)
select DRM_I915_WERROR
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 853543443072..e58c0c158b3a 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -32,6 +32,7 @@ i915-y += \
i915_scatterlist.o \
i915_switcheroo.o \
i915_sysfs.o \
+ i915_timer_util.o \
i915_utils.o \
intel_clock_gating.o \
intel_cpu_info.o \
@@ -280,6 +281,7 @@ i915-y += \
display/intel_modeset_setup.o \
display/intel_modeset_verify.o \
display/intel_overlay.o \
+ display/intel_panic.o \
display/intel_pch.o \
display/intel_pch_display.o \
display/intel_pch_refclk.o \
diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c
index 87f6b9602b16..aa159f9ce12f 100644
--- a/drivers/gpu/drm/i915/display/g4x_dp.c
+++ b/drivers/gpu/drm/i915/display/g4x_dp.c
@@ -424,17 +424,6 @@ intel_dp_link_down(struct intel_encoder *encoder,
drm_dbg_kms(display->drm, "\n");
- if ((display->platform.ivybridge && port == PORT_A) ||
- (HAS_PCH_CPT(display) && port != PORT_A)) {
- intel_dp->DP &= ~DP_LINK_TRAIN_MASK_CPT;
- intel_dp->DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
- } else {
- intel_dp->DP &= ~DP_LINK_TRAIN_MASK;
- intel_dp->DP |= DP_LINK_TRAIN_PAT_IDLE;
- }
- intel_de_write(display, intel_dp->output_reg, intel_dp->DP);
- intel_de_posting_read(display, intel_dp->output_reg);
-
intel_dp->DP &= ~DP_PORT_EN;
intel_de_write(display, intel_dp->output_reg, intel_dp->DP);
intel_de_posting_read(display, intel_dp->output_reg);
@@ -612,6 +601,19 @@ cpt_set_link_train(struct intel_dp *intel_dp,
}
static void
+cpt_set_idle_link_train(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+
+ intel_dp->DP &= ~DP_LINK_TRAIN_MASK_CPT;
+ intel_dp->DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
+
+ intel_de_write(display, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(display, intel_dp->output_reg);
+}
+
+static void
g4x_set_link_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
u8 dp_train_pat)
@@ -639,6 +641,19 @@ g4x_set_link_train(struct intel_dp *intel_dp,
intel_de_posting_read(display, intel_dp->output_reg);
}
+static void
+g4x_set_idle_link_train(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+
+ intel_dp->DP &= ~DP_LINK_TRAIN_MASK;
+ intel_dp->DP |= DP_LINK_TRAIN_PAT_IDLE;
+
+ intel_de_write(display, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(display, intel_dp->output_reg);
+}
+
static void intel_dp_enable_port(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
@@ -1285,12 +1300,10 @@ bool g4x_dp_init(struct intel_display *display,
drm_dbg_kms(display->drm, "No VBT child device for DP-%c\n",
port_name(port));
- dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL);
+ dig_port = intel_dig_port_alloc();
if (!dig_port)
return false;
- dig_port->aux_ch = AUX_CH_NONE;
-
intel_connector = intel_connector_alloc();
if (!intel_connector)
goto err_connector_alloc;
@@ -1300,8 +1313,6 @@ bool g4x_dp_init(struct intel_display *display,
intel_encoder->devdata = devdata;
- mutex_init(&dig_port->hdcp.mutex);
-
if (drm_encoder_init(display->drm, &intel_encoder->base,
&intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS,
"DP %c", port_name(port)))
@@ -1342,10 +1353,13 @@ bool g4x_dp_init(struct intel_display *display,
intel_encoder->audio_disable = g4x_dp_audio_disable;
if ((display->platform.ivybridge && port == PORT_A) ||
- (HAS_PCH_CPT(display) && port != PORT_A))
+ (HAS_PCH_CPT(display) && port != PORT_A)) {
dig_port->dp.set_link_train = cpt_set_link_train;
- else
+ dig_port->dp.set_idle_link_train = cpt_set_idle_link_train;
+ } else {
dig_port->dp.set_link_train = g4x_set_link_train;
+ dig_port->dp.set_idle_link_train = g4x_set_idle_link_train;
+ }
if (display->platform.cherryview)
intel_encoder->set_signal_levels = chv_set_signal_levels;
@@ -1368,7 +1382,6 @@ bool g4x_dp_init(struct intel_display *display,
}
dig_port->dp.output_reg = output_reg;
- dig_port->max_lanes = 4;
intel_encoder->type = INTEL_OUTPUT_DP;
intel_encoder->power_domain = intel_display_power_ddi_lanes_domain(display, port);
diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c b/drivers/gpu/drm/i915/display/g4x_hdmi.c
index 2610f5702fb9..f6e2d1ed5639 100644
--- a/drivers/gpu/drm/i915/display/g4x_hdmi.c
+++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c
@@ -19,7 +19,7 @@
#include "intel_display_types.h"
#include "intel_dp_aux.h"
#include "intel_dpio_phy.h"
-#include "intel_fdi.h"
+#include "intel_encoder.h"
#include "intel_fifo_underrun.h"
#include "intel_hdmi.h"
#include "intel_hotplug.h"
@@ -135,11 +135,8 @@ static int g4x_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- if (HAS_PCH_SPLIT(display)) {
+ if (HAS_PCH_SPLIT(display))
crtc_state->has_pch_encoder = true;
- if (!intel_fdi_compute_pipe_bpp(crtc_state))
- return -EINVAL;
- }
if (display->platform.g4x)
crtc_state->has_hdmi_sink = g4x_compute_has_hdmi_sink(state, crtc);
@@ -690,12 +687,10 @@ bool g4x_hdmi_init(struct intel_display *display,
drm_dbg_kms(display->drm, "No VBT child device for HDMI-%c\n",
port_name(port));
- dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL);
+ dig_port = intel_dig_port_alloc();
if (!dig_port)
return false;
- dig_port->aux_ch = AUX_CH_NONE;
-
intel_connector = intel_connector_alloc();
if (!intel_connector)
goto err_connector_alloc;
@@ -704,8 +699,6 @@ bool g4x_hdmi_init(struct intel_display *display,
intel_encoder->devdata = devdata;
- mutex_init(&dig_port->hdcp.mutex);
-
if (drm_encoder_init(display->drm, &intel_encoder->base,
&intel_hdmi_enc_funcs, DRM_MODE_ENCODER_TMDS,
"HDMI %c", port_name(port)))
@@ -767,8 +760,6 @@ bool g4x_hdmi_init(struct intel_display *display,
intel_encoder->cloneable |= BIT(INTEL_OUTPUT_HDMI);
dig_port->hdmi.hdmi_reg = hdmi_reg;
- dig_port->dp.output_reg = INVALID_MMIO_REG;
- dig_port->max_lanes = 4;
intel_infoframe_init(dig_port);
diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c
index f291ced989dc..407deb5dfb57 100644
--- a/drivers/gpu/drm/i915/display/i9xx_plane.c
+++ b/drivers/gpu/drm/i915/display/i9xx_plane.c
@@ -15,7 +15,6 @@
#include "i9xx_plane.h"
#include "i9xx_plane_regs.h"
#include "intel_atomic.h"
-#include "intel_bo.h"
#include "intel_de.h"
#include "intel_display_irq.h"
#include "intel_display_regs.h"
@@ -23,6 +22,7 @@
#include "intel_fb.h"
#include "intel_fbc.h"
#include "intel_frontbuffer.h"
+#include "intel_panic.h"
#include "intel_plane.h"
#include "intel_sprite.h"
@@ -155,8 +155,7 @@ static bool i9xx_plane_has_windowing(struct intel_plane *plane)
i9xx_plane == PLANE_C;
}
-static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+static u32 i9xx_plane_ctl(const struct intel_plane_state *plane_state)
{
struct intel_display *display = to_intel_display(plane_state);
const struct drm_framebuffer *fb = plane_state->hw.fb;
@@ -355,11 +354,24 @@ i9xx_plane_check(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
- plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state);
+ plane_state->ctl = i9xx_plane_ctl(plane_state);
return 0;
}
+static u32 i8xx_plane_surf_offset(const struct intel_plane_state *plane_state)
+{
+ int x = plane_state->view.color_plane[0].x;
+ int y = plane_state->view.color_plane[0].y;
+
+ return intel_fb_xy_to_linear(x, y, plane_state, 0);
+}
+
+u32 i965_plane_surf_offset(const struct intel_plane_state *plane_state)
+{
+ return plane_state->view.color_plane[0].offset;
+}
+
static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
@@ -463,7 +475,7 @@ static void i9xx_plane_update_arm(struct intel_dsb *dsb,
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
int x = plane_state->view.color_plane[0].x;
int y = plane_state->view.color_plane[0].y;
- u32 dspcntr, dspaddr_offset, linear_offset;
+ u32 dspcntr;
dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
@@ -472,13 +484,6 @@ static void i9xx_plane_update_arm(struct intel_dsb *dsb,
crtc_state->async_flip_planes & BIT(plane->id))
dspcntr |= DISP_ASYNC_FLIP;
- linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
-
- if (DISPLAY_VER(display) >= 4)
- dspaddr_offset = plane_state->view.color_plane[0].offset;
- else
- dspaddr_offset = linear_offset;
-
if (display->platform.cherryview && i9xx_plane == PLANE_B) {
int crtc_x = plane_state->uapi.dst.x1;
int crtc_y = plane_state->uapi.dst.y1;
@@ -498,7 +503,7 @@ static void i9xx_plane_update_arm(struct intel_dsb *dsb,
DISP_OFFSET_Y(y) | DISP_OFFSET_X(x));
} else if (DISPLAY_VER(display) >= 4) {
intel_de_write_fw(display, DSPLINOFF(display, i9xx_plane),
- linear_offset);
+ intel_fb_xy_to_linear(x, y, plane_state, 0));
intel_de_write_fw(display, DSPTILEOFF(display, i9xx_plane),
DISP_OFFSET_Y(y) | DISP_OFFSET_X(x));
}
@@ -511,11 +516,9 @@ static void i9xx_plane_update_arm(struct intel_dsb *dsb,
intel_de_write_fw(display, DSPCNTR(display, i9xx_plane), dspcntr);
if (DISPLAY_VER(display) >= 4)
- intel_de_write_fw(display, DSPSURF(display, i9xx_plane),
- intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
+ intel_de_write_fw(display, DSPSURF(display, i9xx_plane), plane_state->surf);
else
- intel_de_write_fw(display, DSPADDR(display, i9xx_plane),
- intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
+ intel_de_write_fw(display, DSPADDR(display, i9xx_plane), plane_state->surf);
}
static void i830_plane_update_arm(struct intel_dsb *dsb,
@@ -604,16 +607,13 @@ g4x_primary_async_flip(struct intel_dsb *dsb,
{
struct intel_display *display = to_intel_display(plane);
u32 dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
- u32 dspaddr_offset = plane_state->view.color_plane[0].offset;
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
if (async_flip)
dspcntr |= DISP_ASYNC_FLIP;
intel_de_write_fw(display, DSPCNTR(display, i9xx_plane), dspcntr);
-
- intel_de_write_fw(display, DSPSURF(display, i9xx_plane),
- intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
+ intel_de_write_fw(display, DSPSURF(display, i9xx_plane), plane_state->surf);
}
static void
@@ -624,11 +624,9 @@ vlv_primary_async_flip(struct intel_dsb *dsb,
bool async_flip)
{
struct intel_display *display = to_intel_display(plane);
- u32 dspaddr_offset = plane_state->view.color_plane[0].offset;
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
- intel_de_write_fw(display, DSPADDR_VLV(display, i9xx_plane),
- intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
+ intel_de_write_fw(display, DSPADDR_VLV(display, i9xx_plane), plane_state->surf);
}
static void
@@ -1037,6 +1035,11 @@ intel_primary_plane_create(struct intel_display *display, enum pipe pipe)
plane->get_hw_state = i9xx_plane_get_hw_state;
plane->check_plane = i9xx_plane_check;
+ if (DISPLAY_VER(display) >= 4)
+ plane->surf_offset = i965_plane_surf_offset;
+ else
+ plane->surf_offset = i8xx_plane_surf_offset;
+
if (DISPLAY_VER(display) >= 5 || display->platform.g4x)
plane->capture_error = g4x_primary_capture_error;
else if (DISPLAY_VER(display) >= 4)
@@ -1175,7 +1178,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
drm_WARN_ON(display->drm, pipe != crtc->pipe);
- intel_fb = intel_bo_alloc_framebuffer();
+ intel_fb = intel_framebuffer_alloc();
if (!intel_fb) {
drm_dbg_kms(display->drm, "failed to alloc fb\n");
return;
@@ -1254,24 +1257,21 @@ bool i9xx_fixup_initial_plane_config(struct intel_crtc *crtc,
const struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
- u32 base;
if (!plane_state->uapi.visible)
return false;
- base = intel_plane_ggtt_offset(plane_state);
-
/*
* We may have moved the surface to a different
* part of ggtt, make the plane aware of that.
*/
- if (plane_config->base == base)
+ if (plane_config->base == plane_state->surf)
return false;
if (DISPLAY_VER(display) >= 4)
- intel_de_write(display, DSPSURF(display, i9xx_plane), base);
+ intel_de_write(display, DSPSURF(display, i9xx_plane), plane_state->surf);
else
- intel_de_write(display, DSPADDR(display, i9xx_plane), base);
+ intel_de_write(display, DSPADDR(display, i9xx_plane), plane_state->surf);
return true;
}
diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.h b/drivers/gpu/drm/i915/display/i9xx_plane.h
index d90546d60855..565dab751301 100644
--- a/drivers/gpu/drm/i915/display/i9xx_plane.h
+++ b/drivers/gpu/drm/i915/display/i9xx_plane.h
@@ -24,6 +24,7 @@ unsigned int vlv_plane_min_alignment(struct intel_plane *plane,
const struct drm_framebuffer *fb,
int colot_plane);
int i9xx_check_plane_surface(struct intel_plane_state *plane_state);
+u32 i965_plane_surf_offset(const struct intel_plane_state *plane_state);
struct intel_plane *
intel_primary_plane_create(struct intel_display *display, enum pipe pipe);
diff --git a/drivers/gpu/drm/i915/display/i9xx_wm.c b/drivers/gpu/drm/i915/display/i9xx_wm.c
index 1f9db5118777..fd3b7b35f351 100644
--- a/drivers/gpu/drm/i915/display/i9xx_wm.c
+++ b/drivers/gpu/drm/i915/display/i9xx_wm.c
@@ -3,6 +3,10 @@
* Copyright © 2023 Intel Corporation
*/
+#include <linux/iopoll.h>
+
+#include "soc/intel_dram.h"
+
#include "i915_drv.h"
#include "i915_reg.h"
#include "i9xx_wm.h"
@@ -85,7 +89,8 @@ static const struct cxsr_latency cxsr_latency_table[] = {
static const struct cxsr_latency *pnv_get_cxsr_latency(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
+ const struct dram_info *dram_info = intel_dram_info(display->drm);
+ bool is_ddr3 = dram_info->type == INTEL_DRAM_DDR3;
int i;
for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
@@ -93,15 +98,16 @@ static const struct cxsr_latency *pnv_get_cxsr_latency(struct intel_display *dis
bool is_desktop = !display->platform.mobile;
if (is_desktop == latency->is_desktop &&
- i915->is_ddr3 == latency->is_ddr3 &&
- DIV_ROUND_CLOSEST(i915->fsb_freq, 1000) == latency->fsb_freq &&
- DIV_ROUND_CLOSEST(i915->mem_freq, 1000) == latency->mem_freq)
+ is_ddr3 == latency->is_ddr3 &&
+ DIV_ROUND_CLOSEST(dram_info->fsb_freq, 1000) == latency->fsb_freq &&
+ DIV_ROUND_CLOSEST(dram_info->mem_freq, 1000) == latency->mem_freq)
return latency;
}
drm_dbg_kms(display->drm,
- "Could not find CxSR latency for DDR%s, FSB %u kHz, MEM %u kHz\n",
- i915->is_ddr3 ? "3" : "2", i915->fsb_freq, i915->mem_freq);
+ "Could not find CxSR latency for %s, FSB %u kHz, MEM %u kHz\n",
+ intel_dram_type_str(dram_info->type),
+ dram_info->fsb_freq, dram_info->mem_freq);
return NULL;
}
@@ -109,6 +115,7 @@ static const struct cxsr_latency *pnv_get_cxsr_latency(struct intel_display *dis
static void chv_set_memory_dvfs(struct intel_display *display, bool enable)
{
u32 val;
+ int ret;
vlv_punit_get(display->drm);
@@ -121,8 +128,10 @@ static void chv_set_memory_dvfs(struct intel_display *display, bool enable)
val |= FORCE_DDR_FREQ_REQ_ACK;
vlv_punit_write(display->drm, PUNIT_REG_DDR_SETUP2, val);
- if (wait_for((vlv_punit_read(display->drm, PUNIT_REG_DDR_SETUP2) &
- FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
+ ret = poll_timeout_us(val = vlv_punit_read(display->drm, PUNIT_REG_DDR_SETUP2),
+ (val & FORCE_DDR_FREQ_REQ_ACK) == 0,
+ 500, 3000, false);
+ if (ret)
drm_err(display->drm,
"timed out waiting for Punit DDR DVFS request\n");
@@ -3902,6 +3911,7 @@ static void vlv_wm_get_hw_state(struct intel_display *display)
struct vlv_wm_values *wm = &display->wm.vlv;
struct intel_crtc *crtc;
u32 val;
+ int ret;
vlv_read_wm_values(display, wm);
@@ -3928,8 +3938,10 @@ static void vlv_wm_get_hw_state(struct intel_display *display)
val |= FORCE_DDR_FREQ_REQ_ACK;
vlv_punit_write(display->drm, PUNIT_REG_DDR_SETUP2, val);
- if (wait_for((vlv_punit_read(display->drm, PUNIT_REG_DDR_SETUP2) &
- FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) {
+ ret = poll_timeout_us(val = vlv_punit_read(display->drm, PUNIT_REG_DDR_SETUP2),
+ (val & FORCE_DDR_FREQ_REQ_ACK) == 0,
+ 500, 3000, false);
+ if (ret) {
drm_dbg_kms(display->drm,
"Punit not acking DDR DVFS request, "
"assuming DDR DVFS is disabled\n");
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index 8d9cb73a93a7..37faa8f19f6e 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -25,6 +25,8 @@
* Jani Nikula <jani.nikula@intel.com>
*/
+#include <linux/iopoll.h>
+
#include <drm/display/drm_dsc_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fixed.h>
@@ -72,8 +74,12 @@ static int payload_credits_available(struct intel_display *display,
static bool wait_for_header_credits(struct intel_display *display,
enum transcoder dsi_trans, int hdr_credit)
{
- if (wait_for_us(header_credits_available(display, dsi_trans) >=
- hdr_credit, 100)) {
+ int ret, available;
+
+ ret = poll_timeout_us(available = header_credits_available(display, dsi_trans),
+ available >= hdr_credit,
+ 10, 100, false);
+ if (ret) {
drm_err(display->drm, "DSI header credits not released\n");
return false;
}
@@ -84,8 +90,12 @@ static bool wait_for_header_credits(struct intel_display *display,
static bool wait_for_payload_credits(struct intel_display *display,
enum transcoder dsi_trans, int payld_credit)
{
- if (wait_for_us(payload_credits_available(display, dsi_trans) >=
- payld_credit, 100)) {
+ int ret, available;
+
+ ret = poll_timeout_us(available = payload_credits_available(display, dsi_trans),
+ available >= payld_credit,
+ 10, 100, false);
+ if (ret) {
drm_err(display->drm, "DSI payload credits not released\n");
return false;
}
@@ -137,8 +147,11 @@ static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder)
/* wait for LP TX in progress bit to be cleared */
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- if (wait_for_us(!(intel_de_read(display, DSI_LP_MSG(dsi_trans)) &
- LPTX_IN_PROGRESS), 20))
+
+ ret = intel_de_wait_custom(display, DSI_LP_MSG(dsi_trans),
+ LPTX_IN_PROGRESS, 0,
+ 20, 0, NULL);
+ if (ret)
drm_err(display->drm, "LPTX bit not cleared\n");
}
}
@@ -516,13 +529,15 @@ static void gen11_dsi_enable_ddi_buffer(struct intel_encoder *encoder)
struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
+ int ret;
for_each_dsi_port(port, intel_dsi->ports) {
intel_de_rmw(display, DDI_BUF_CTL(port), 0, DDI_BUF_CTL_ENABLE);
- if (wait_for_us(!(intel_de_read(display, DDI_BUF_CTL(port)) &
- DDI_BUF_IS_IDLE),
- 500))
+ ret = intel_de_wait_custom(display, DDI_BUF_CTL(port),
+ DDI_BUF_IS_IDLE, 0,
+ 500, 0, NULL);
+ if (ret)
drm_err(display->drm, "DDI port:%c buffer idle\n",
port_name(port));
}
@@ -838,9 +853,14 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
/* wait for link ready */
for_each_dsi_port(port, intel_dsi->ports) {
+ int ret;
+
dsi_trans = dsi_port_to_transcoder(port);
- if (wait_for_us((intel_de_read(display, DSI_TRANS_FUNC_CONF(dsi_trans)) &
- LINK_READY), 2500))
+
+ ret = intel_de_wait_custom(display, DSI_TRANS_FUNC_CONF(dsi_trans),
+ LINK_READY, LINK_READY,
+ 2500, 0, NULL);
+ if (ret)
drm_err(display->drm, "DSI link not ready\n");
}
}
@@ -1321,6 +1341,7 @@ static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder)
enum port port;
enum transcoder dsi_trans;
u32 tmp;
+ int ret;
/* disable periodic update mode */
if (is_cmd_mode(intel_dsi)) {
@@ -1337,9 +1358,10 @@ static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder)
tmp &= ~LINK_ULPS_TYPE_LP11;
intel_de_write(display, DSI_LP_MSG(dsi_trans), tmp);
- if (wait_for_us((intel_de_read(display, DSI_LP_MSG(dsi_trans)) &
- LINK_IN_ULPS),
- 10))
+ ret = intel_de_wait_custom(display, DSI_LP_MSG(dsi_trans),
+ LINK_IN_ULPS, LINK_IN_ULPS,
+ 10, 0, NULL);
+ if (ret)
drm_err(display->drm, "DSI link not in ULPS\n");
}
@@ -1367,14 +1389,17 @@ static void gen11_dsi_disable_port(struct intel_encoder *encoder)
struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
+ int ret;
gen11_dsi_ungate_clocks(encoder);
for_each_dsi_port(port, intel_dsi->ports) {
intel_de_rmw(display, DDI_BUF_CTL(port), DDI_BUF_CTL_ENABLE, 0);
- if (wait_for_us((intel_de_read(display, DDI_BUF_CTL(port)) &
- DDI_BUF_IS_IDLE),
- 8))
+ ret = intel_de_wait_custom(display, DDI_BUF_CTL(port),
+ DDI_BUF_IS_IDLE, DDI_BUF_IS_IDLE,
+ 8, 0, NULL);
+
+ if (ret)
drm_err(display->drm,
"DDI port:%c buffer not idle\n",
port_name(port));
diff --git a/drivers/gpu/drm/i915/display/intel_alpm.c b/drivers/gpu/drm/i915/display/intel_alpm.c
index dfdde8e4eabe..ed7a7ed486b5 100644
--- a/drivers/gpu/drm/i915/display/intel_alpm.c
+++ b/drivers/gpu/drm/i915/display/intel_alpm.c
@@ -16,6 +16,14 @@
#include "intel_psr.h"
#include "intel_psr_regs.h"
+#define SILENCE_PERIOD_MIN_TIME 80
+#define SILENCE_PERIOD_MAX_TIME 180
+#define SILENCE_PERIOD_TIME (SILENCE_PERIOD_MIN_TIME + \
+ (SILENCE_PERIOD_MAX_TIME - \
+ SILENCE_PERIOD_MIN_TIME) / 2)
+
+#define LFPS_CYCLE_COUNT 10
+
bool intel_alpm_aux_wake_supported(struct intel_dp *intel_dp)
{
return intel_dp->alpm_dpcd & DP_ALPM_CAP;
@@ -44,72 +52,49 @@ void intel_alpm_init(struct intel_dp *intel_dp)
mutex_init(&intel_dp->alpm_parameters.lock);
}
-/*
- * See Bspec: 71632 for the table
- *
- * Silence_period = tSilence,Min + ((tSilence,Max - tSilence,Min) / 2)
- *
- * Half cycle duration:
- *
- * Link rates 1.62 - 4.32 and tLFPS_Cycle = 70 ns
- * FLOOR( (Link Rate * tLFPS_Cycle) / (2 * 10) )
- *
- * Link rates 5.4 - 8.1
- * PORT_ALPM_LFPS_CTL[ LFPS Cycle Count ] = 10
- * LFPS Period chosen is the mid-point of the min:max values from the table
- * FLOOR( LFPS Period in Symbol clocks /
- * (2 * PORT_ALPM_LFPS_CTL[ LFPS Cycle Count ]) )
- */
-static bool _lnl_get_silence_period_and_lfps_half_cycle(int link_rate,
- int *silence_period,
- int *lfps_half_cycle)
+static int get_silence_period_symbols(const struct intel_crtc_state *crtc_state)
{
- switch (link_rate) {
- case 162000:
- *silence_period = 20;
- *lfps_half_cycle = 5;
- break;
- case 216000:
- *silence_period = 27;
- *lfps_half_cycle = 7;
- break;
- case 243000:
- *silence_period = 31;
- *lfps_half_cycle = 8;
- break;
- case 270000:
- *silence_period = 34;
- *lfps_half_cycle = 9;
- break;
- case 324000:
- *silence_period = 41;
- *lfps_half_cycle = 11;
- break;
- case 432000:
- *silence_period = 56;
- *lfps_half_cycle = 15;
- break;
- case 540000:
- *silence_period = 69;
- *lfps_half_cycle = 12;
- break;
- case 648000:
- *silence_period = 84;
- *lfps_half_cycle = 15;
- break;
- case 675000:
- *silence_period = 87;
- *lfps_half_cycle = 15;
- break;
- case 810000:
- *silence_period = 104;
- *lfps_half_cycle = 19;
- break;
- default:
- *silence_period = *lfps_half_cycle = -1;
- return false;
+ return SILENCE_PERIOD_TIME * intel_dp_link_symbol_clock(crtc_state->port_clock) /
+ 1000 / 1000;
+}
+
+static int get_lfps_cycle_min_max_time(const struct intel_crtc_state *crtc_state,
+ int *min, int *max)
+{
+ if (crtc_state->port_clock < 540000) {
+ *min = 65 * LFPS_CYCLE_COUNT;
+ *max = 75 * LFPS_CYCLE_COUNT;
+ } else if (crtc_state->port_clock <= 810000) {
+ *min = 140;
+ *max = 800;
+ } else {
+ *min = *max = -1;
+ return -1;
}
- return true;
+
+ return 0;
+}
+
+static int get_lfps_cycle_time(const struct intel_crtc_state *crtc_state)
+{
+ int tlfps_cycle_min, tlfps_cycle_max, ret;
+
+ ret = get_lfps_cycle_min_max_time(crtc_state, &tlfps_cycle_min,
+ &tlfps_cycle_max);
+ if (ret)
+ return ret;
+
+ return tlfps_cycle_min + (tlfps_cycle_max - tlfps_cycle_min) / 2;
+}
+
+static int get_lfps_half_cycle_clocks(const struct intel_crtc_state *crtc_state)
+{
+ int lfps_cycle_time = get_lfps_cycle_time(crtc_state);
+
+ if (lfps_cycle_time < 0)
+ return -1;
+
+ return lfps_cycle_time * crtc_state->port_clock / 1000 / 1000 / (2 * LFPS_CYCLE_COUNT);
}
/*
@@ -131,21 +116,19 @@ static bool _lnl_get_silence_period_and_lfps_half_cycle(int link_rate,
* tML_PHY_LOCK = TPS4 Length * ( 10 / (Link Rate in MHz) )
* TPS4 Length = 252 Symbols
*/
-static int _lnl_compute_aux_less_wake_time(int port_clock)
+static int _lnl_compute_aux_less_wake_time(const struct intel_crtc_state *crtc_state)
{
int tphy2_p2_to_p0 = 12 * 1000;
- int tlfps_period_max = 800;
- int tsilence_max = 180;
int t1 = 50 * 1000;
int tps4 = 252;
/* port_clock is link rate in 10kbit/s units */
- int tml_phy_lock = 1000 * 1000 * tps4 / port_clock;
+ int tml_phy_lock = 1000 * 1000 * tps4 / crtc_state->port_clock;
int num_ml_phy_lock = 7 + DIV_ROUND_UP(6500, tml_phy_lock) + 1;
int t2 = num_ml_phy_lock * tml_phy_lock;
int tcds = 1 * t2;
- return DIV_ROUND_UP(tphy2_p2_to_p0 + tlfps_period_max + tsilence_max +
- t1 + tcds, 1000);
+ return DIV_ROUND_UP(tphy2_p2_to_p0 + get_lfps_cycle_time(crtc_state) +
+ SILENCE_PERIOD_TIME + t1 + tcds, 1000);
}
static int
@@ -157,13 +140,13 @@ _lnl_compute_aux_less_alpm_params(struct intel_dp *intel_dp,
lfps_half_cycle;
aux_less_wake_time =
- _lnl_compute_aux_less_wake_time(crtc_state->port_clock);
+ _lnl_compute_aux_less_wake_time(crtc_state);
aux_less_wake_lines = intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode,
aux_less_wake_time);
+ silence_period = get_silence_period_symbols(crtc_state);
- if (!_lnl_get_silence_period_and_lfps_half_cycle(crtc_state->port_clock,
- &silence_period,
- &lfps_half_cycle))
+ lfps_half_cycle = get_lfps_half_cycle_clocks(crtc_state);
+ if (lfps_half_cycle < 0)
return false;
if (aux_less_wake_lines > ALPM_CTL_AUX_LESS_WAKE_TIME_MASK ||
@@ -406,7 +389,7 @@ void intel_alpm_port_configure(struct intel_dp *intel_dp,
PORT_ALPM_CTL_MAX_PHY_SWING_HOLD(0) |
PORT_ALPM_CTL_SILENCE_PERIOD(
intel_dp->alpm_parameters.silence_period_sym_clocks);
- lfps_ctl_val = PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT(10) |
+ lfps_ctl_val = PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT(LFPS_CYCLE_COUNT) |
PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION(
intel_dp->alpm_parameters.lfps_half_cycle_num_of_syms) |
PORT_ALPM_LFPS_CTL_FIRST_LFPS_HALF_CYCLE_DURATION(
diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c
index e007380e9a63..3b14f929825a 100644
--- a/drivers/gpu/drm/i915/display/intel_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_backlight.c
@@ -236,7 +236,8 @@ static void i9xx_set_backlight(const struct drm_connector_state *conn_state, u32
struct intel_panel *panel = &connector->panel;
u32 tmp, mask;
- drm_WARN_ON(display->drm, panel->backlight.pwm_level_max == 0);
+ if (drm_WARN_ON(display->drm, panel->backlight.pwm_level_max == 0))
+ return;
if (panel->backlight.combination_mode) {
struct pci_dev *pdev = to_pci_dev(display->drm->dev);
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index 9c268bed091d..3596dce84c28 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -36,6 +36,7 @@
#include "soc/intel_rom.h"
#include "i915_drv.h"
+#include "i915_utils.h"
#include "intel_display.h"
#include "intel_display_core.h"
#include "intel_display_rpm.h"
@@ -1566,10 +1567,7 @@ parse_psr(struct intel_display *display,
panel->vbt.psr.full_link = psr_table->full_link;
panel->vbt.psr.require_aux_wakeup = psr_table->require_aux_to_wakeup;
-
- /* Allowed VBT values goes from 0 to 15 */
- panel->vbt.psr.idle_frames = psr_table->idle_frames < 0 ? 0 :
- psr_table->idle_frames > 15 ? 15 : psr_table->idle_frames;
+ panel->vbt.psr.idle_frames = psr_table->idle_frames;
/*
* New psr options 0=500us, 1=100us, 2=2500us, 3=0us
@@ -2480,6 +2478,25 @@ static int parse_bdb_216_dp_max_link_rate(const int vbt_max_link_rate)
}
}
+static u32 edp_rate_override_mask(int rate)
+{
+ switch (rate) {
+ case 2000000: return BDB_263_VBT_EDP_LINK_RATE_20;
+ case 1350000: return BDB_263_VBT_EDP_LINK_RATE_13_5;
+ case 1000000: return BDB_263_VBT_EDP_LINK_RATE_10;
+ case 810000: return BDB_263_VBT_EDP_LINK_RATE_8_1;
+ case 675000: return BDB_263_VBT_EDP_LINK_RATE_6_75;
+ case 540000: return BDB_263_VBT_EDP_LINK_RATE_5_4;
+ case 432000: return BDB_263_VBT_EDP_LINK_RATE_4_32;
+ case 324000: return BDB_263_VBT_EDP_LINK_RATE_3_24;
+ case 270000: return BDB_263_VBT_EDP_LINK_RATE_2_7;
+ case 243000: return BDB_263_VBT_EDP_LINK_RATE_2_43;
+ case 216000: return BDB_263_VBT_EDP_LINK_RATE_2_16;
+ case 162000: return BDB_263_VBT_EDP_LINK_RATE_1_62;
+ default: return 0;
+ }
+}
+
int intel_bios_dp_max_link_rate(const struct intel_bios_encoder_data *devdata)
{
if (!devdata || devdata->display->vbt.version < 216)
@@ -2499,6 +2516,19 @@ int intel_bios_dp_max_lane_count(const struct intel_bios_encoder_data *devdata)
return devdata->child.dp_max_lane_count + 1;
}
+bool
+intel_bios_encoder_reject_edp_rate(const struct intel_bios_encoder_data *devdata,
+ int rate)
+{
+ if (!devdata || devdata->display->vbt.version < 263)
+ return false;
+
+ if (devdata->child.edp_data_rate_override == BDB_263_VBT_EDP_RATES_MASK)
+ return false;
+
+ return devdata->child.edp_data_rate_override & edp_rate_override_mask(rate);
+}
+
static void sanitize_device_type(struct intel_bios_encoder_data *devdata,
enum port port)
{
@@ -2747,8 +2777,10 @@ static int child_device_expected_size(u16 version)
{
BUILD_BUG_ON(sizeof(struct child_device_config) < 40);
- if (version > 256)
+ if (version > 263)
return -ENOENT;
+ else if (version >= 263)
+ return 44;
else if (version >= 256)
return 40;
else if (version >= 216)
@@ -3743,8 +3775,6 @@ DEFINE_SHOW_ATTRIBUTE(intel_bios_vbt);
void intel_bios_debugfs_register(struct intel_display *display)
{
- struct drm_minor *minor = display->drm->primary;
-
- debugfs_create_file("i915_vbt", 0444, minor->debugfs_root,
+ debugfs_create_file("i915_vbt", 0444, display->drm->debugfs_root,
display, &intel_bios_vbt_fops);
}
diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h
index 6cd7a011b8c4..f9e438b2787b 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.h
+++ b/drivers/gpu/drm/i915/display/intel_bios.h
@@ -50,180 +50,6 @@ enum intel_backlight_type {
INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE,
};
-/*
- * MIPI Sequence Block definitions
- *
- * Note the VBT spec has AssertReset / DeassertReset swapped from their
- * usual naming, we use the proper names here to avoid confusion when
- * reading the code.
- */
-enum mipi_seq {
- MIPI_SEQ_END = 0,
- MIPI_SEQ_DEASSERT_RESET, /* Spec says MipiAssertResetPin */
- MIPI_SEQ_INIT_OTP,
- MIPI_SEQ_DISPLAY_ON,
- MIPI_SEQ_DISPLAY_OFF,
- MIPI_SEQ_ASSERT_RESET, /* Spec says MipiDeassertResetPin */
- MIPI_SEQ_BACKLIGHT_ON, /* sequence block v2+ */
- MIPI_SEQ_BACKLIGHT_OFF, /* sequence block v2+ */
- MIPI_SEQ_TEAR_ON, /* sequence block v2+ */
- MIPI_SEQ_TEAR_OFF, /* sequence block v3+ */
- MIPI_SEQ_POWER_ON, /* sequence block v3+ */
- MIPI_SEQ_POWER_OFF, /* sequence block v3+ */
- MIPI_SEQ_MAX
-};
-
-enum mipi_seq_element {
- MIPI_SEQ_ELEM_END = 0,
- MIPI_SEQ_ELEM_SEND_PKT,
- MIPI_SEQ_ELEM_DELAY,
- MIPI_SEQ_ELEM_GPIO,
- MIPI_SEQ_ELEM_I2C, /* sequence block v2+ */
- MIPI_SEQ_ELEM_SPI, /* sequence block v3+ */
- MIPI_SEQ_ELEM_PMIC, /* sequence block v3+ */
- MIPI_SEQ_ELEM_MAX
-};
-
-#define MIPI_DSI_UNDEFINED_PANEL_ID 0
-#define MIPI_DSI_GENERIC_PANEL_ID 1
-
-struct mipi_config {
- u16 panel_id;
-
- /* General Params */
- u32 enable_dithering:1;
- u32 rsvd1:1;
- u32 is_bridge:1;
-
- u32 panel_arch_type:2;
- u32 is_cmd_mode:1;
-
-#define NON_BURST_SYNC_PULSE 0x1
-#define NON_BURST_SYNC_EVENTS 0x2
-#define BURST_MODE 0x3
- u32 video_transfer_mode:2;
-
- u32 cabc_supported:1;
-#define PPS_BLC_PMIC 0
-#define PPS_BLC_SOC 1
- u32 pwm_blc:1;
-
- /* Bit 13:10 */
-#define PIXEL_FORMAT_RGB565 0x1
-#define PIXEL_FORMAT_RGB666 0x2
-#define PIXEL_FORMAT_RGB666_LOOSELY_PACKED 0x3
-#define PIXEL_FORMAT_RGB888 0x4
- u32 videomode_color_format:4;
-
- /* Bit 15:14 */
-#define ENABLE_ROTATION_0 0x0
-#define ENABLE_ROTATION_90 0x1
-#define ENABLE_ROTATION_180 0x2
-#define ENABLE_ROTATION_270 0x3
- u32 rotation:2;
- u32 bta_enabled:1;
- u32 rsvd2:15;
-
- /* 2 byte Port Description */
-#define DUAL_LINK_NOT_SUPPORTED 0
-#define DUAL_LINK_FRONT_BACK 1
-#define DUAL_LINK_PIXEL_ALT 2
- u16 dual_link:2;
- u16 lane_cnt:2;
- u16 pixel_overlap:3;
- u16 rgb_flip:1;
-#define DL_DCS_PORT_A 0x00
-#define DL_DCS_PORT_C 0x01
-#define DL_DCS_PORT_A_AND_C 0x02
- u16 dl_dcs_cabc_ports:2;
- u16 dl_dcs_backlight_ports:2;
- u16 rsvd3:4;
-
- u16 rsvd4;
-
- u8 rsvd5;
- u32 target_burst_mode_freq;
- u32 dsi_ddr_clk;
- u32 bridge_ref_clk;
-
-#define BYTE_CLK_SEL_20MHZ 0
-#define BYTE_CLK_SEL_10MHZ 1
-#define BYTE_CLK_SEL_5MHZ 2
- u8 byte_clk_sel:2;
-
- u8 rsvd6:6;
-
- /* DPHY Flags */
- u16 dphy_param_valid:1;
- u16 eot_pkt_disabled:1;
- u16 enable_clk_stop:1;
- u16 rsvd7:13;
-
- u32 hs_tx_timeout;
- u32 lp_rx_timeout;
- u32 turn_around_timeout;
- u32 device_reset_timer;
- u32 master_init_timer;
- u32 dbi_bw_timer;
- u32 lp_byte_clk_val;
-
- /* 4 byte Dphy Params */
- u32 prepare_cnt:6;
- u32 rsvd8:2;
- u32 clk_zero_cnt:8;
- u32 trail_cnt:5;
- u32 rsvd9:3;
- u32 exit_zero_cnt:6;
- u32 rsvd10:2;
-
- u32 clk_lane_switch_cnt;
- u32 hl_switch_cnt;
-
- u32 rsvd11[6];
-
- /* timings based on dphy spec */
- u8 tclk_miss;
- u8 tclk_post;
- u8 rsvd12;
- u8 tclk_pre;
- u8 tclk_prepare;
- u8 tclk_settle;
- u8 tclk_term_enable;
- u8 tclk_trail;
- u16 tclk_prepare_clkzero;
- u8 rsvd13;
- u8 td_term_enable;
- u8 teot;
- u8 ths_exit;
- u8 ths_prepare;
- u16 ths_prepare_hszero;
- u8 rsvd14;
- u8 ths_settle;
- u8 ths_skip;
- u8 ths_trail;
- u8 tinit;
- u8 tlpx;
- u8 rsvd15[3];
-
- /* GPIOs */
- u8 panel_enable;
- u8 bl_enable;
- u8 pwm_enable;
- u8 reset_r_n;
- u8 pwr_down_r;
- u8 stdby_r_n;
-
-} __packed;
-
-/* all delays have a unit of 100us */
-struct mipi_pps_data {
- u16 panel_on_delay;
- u16 bl_enable_delay;
- u16 bl_disable_delay;
- u16 panel_off_delay;
- u16 panel_power_cycle_delay;
-} __packed;
-
void intel_bios_init(struct intel_display *display);
void intel_bios_init_panel_early(struct intel_display *display,
struct intel_panel *panel,
@@ -259,6 +85,8 @@ bool intel_bios_encoder_is_lspcon(const struct intel_bios_encoder_data *devdata)
bool intel_bios_encoder_lane_reversal(const struct intel_bios_encoder_data *devdata);
bool intel_bios_encoder_hpd_invert(const struct intel_bios_encoder_data *devdata);
enum port intel_bios_encoder_port(const struct intel_bios_encoder_data *devdata);
+bool intel_bios_encoder_reject_edp_rate(const struct intel_bios_encoder_data *devdata,
+ int rate);
enum aux_ch intel_bios_dp_aux_ch(const struct intel_bios_encoder_data *devdata);
int intel_bios_dp_boost_level(const struct intel_bios_encoder_data *devdata);
int intel_bios_dp_max_lane_count(const struct intel_bios_encoder_data *devdata);
diff --git a/drivers/gpu/drm/i915/display/intel_bo.c b/drivers/gpu/drm/i915/display/intel_bo.c
index 65d64f79a4bd..6ae1374d5c2b 100644
--- a/drivers/gpu/drm/i915/display/intel_bo.c
+++ b/drivers/gpu/drm/i915/display/intel_bo.c
@@ -2,7 +2,7 @@
/* Copyright © 2024 Intel Corporation */
#include <drm/drm_panic.h>
-#include "display/intel_display_types.h"
+
#include "gem/i915_gem_mman.h"
#include "gem/i915_gem_object.h"
#include "gem/i915_gem_object_frontbuffer.h"
@@ -59,18 +59,3 @@ void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj)
{
i915_debugfs_describe_obj(m, to_intel_bo(obj));
}
-
-struct intel_framebuffer *intel_bo_alloc_framebuffer(void)
-{
- return i915_gem_object_alloc_framebuffer();
-}
-
-int intel_bo_panic_setup(struct drm_scanout_buffer *sb)
-{
- return i915_gem_object_panic_setup(sb);
-}
-
-void intel_bo_panic_finish(struct intel_framebuffer *fb)
-{
- return i915_gem_object_panic_finish(fb);
-}
diff --git a/drivers/gpu/drm/i915/display/intel_bo.h b/drivers/gpu/drm/i915/display/intel_bo.h
index 97087a64d23b..48d87019e48a 100644
--- a/drivers/gpu/drm/i915/display/intel_bo.h
+++ b/drivers/gpu/drm/i915/display/intel_bo.h
@@ -25,8 +25,5 @@ struct intel_frontbuffer *intel_bo_set_frontbuffer(struct drm_gem_object *obj,
struct intel_frontbuffer *front);
void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj);
-struct intel_framebuffer *intel_bo_alloc_framebuffer(void);
-int intel_bo_panic_setup(struct drm_scanout_buffer *sb);
-void intel_bo_panic_finish(struct intel_framebuffer *fb);
#endif /* __INTEL_BO__ */
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index d29a755612de..ac6da20d9529 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -359,7 +359,7 @@ static int icl_get_qgv_points(struct intel_display *display,
for (i = 0; i < qi->num_psf_points; i++)
drm_dbg_kms(display->drm,
- "PSF GV %d: CLK=%d \n",
+ "PSF GV %d: CLK=%d\n",
i, qi->psf_points[i].clk);
}
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index 228aa64c1349..9725eebe5706 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -22,6 +22,7 @@
*/
#include <linux/debugfs.h>
+#include <linux/iopoll.h>
#include <linux/time.h>
#include <drm/drm_fixed.h>
@@ -31,6 +32,7 @@
#include "hsw_ips.h"
#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_atomic.h"
#include "intel_audio.h"
#include "intel_bw.h"
@@ -672,6 +674,7 @@ static void vlv_set_cdclk(struct intel_display *display,
int cdclk = cdclk_config->cdclk;
u32 val, cmd = cdclk_config->voltage_level;
intel_wakeref_t wakeref;
+ int ret;
switch (cdclk) {
case 400000:
@@ -702,12 +705,12 @@ static void vlv_set_cdclk(struct intel_display *display,
val &= ~DSPFREQGUAR_MASK;
val |= (cmd << DSPFREQGUAR_SHIFT);
vlv_punit_write(display->drm, PUNIT_REG_DSPSSPM, val);
- if (wait_for((vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM) &
- DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
- 50)) {
- drm_err(display->drm,
- "timed out waiting for CDclk change\n");
- }
+
+ ret = poll_timeout_us(val = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM),
+ (val & DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
+ 500, 50 * 1000, false);
+ if (ret)
+ drm_err(display->drm, "timed out waiting for CDCLK change\n");
if (cdclk == 400000) {
u32 divider;
@@ -721,11 +724,11 @@ static void vlv_set_cdclk(struct intel_display *display,
val |= divider;
vlv_cck_write(display->drm, CCK_DISPLAY_CLOCK_CONTROL, val);
- if (wait_for((vlv_cck_read(display->drm, CCK_DISPLAY_CLOCK_CONTROL) &
- CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT),
- 50))
- drm_err(display->drm,
- "timed out waiting for CDclk change\n");
+ ret = poll_timeout_us(val = vlv_cck_read(display->drm, CCK_DISPLAY_CLOCK_CONTROL),
+ (val & CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT),
+ 500, 50 * 1000, false);
+ if (ret)
+ drm_err(display->drm, "timed out waiting for CDCLK change\n");
}
/* adjust self-refresh exit latency value */
@@ -761,6 +764,7 @@ static void chv_set_cdclk(struct intel_display *display,
int cdclk = cdclk_config->cdclk;
u32 val, cmd = cdclk_config->voltage_level;
intel_wakeref_t wakeref;
+ int ret;
switch (cdclk) {
case 333333:
@@ -786,12 +790,12 @@ static void chv_set_cdclk(struct intel_display *display,
val &= ~DSPFREQGUAR_MASK_CHV;
val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
vlv_punit_write(display->drm, PUNIT_REG_DSPSSPM, val);
- if (wait_for((vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM) &
- DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
- 50)) {
- drm_err(display->drm,
- "timed out waiting for CDclk change\n");
- }
+
+ ret = poll_timeout_us(val = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM),
+ (val & DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
+ 500, 50 * 1000, false);
+ if (ret)
+ drm_err(display->drm, "timed out waiting for CDCLK change\n");
vlv_punit_put(display->drm);
@@ -903,8 +907,10 @@ static void bdw_set_cdclk(struct intel_display *display,
* According to the spec, it should be enough to poll for this 1 us.
* However, extensive testing shows that this can take longer.
*/
- if (wait_for_us(intel_de_read(display, LCPLL_CTL) &
- LCPLL_CD_SOURCE_FCLK_DONE, 100))
+ ret = intel_de_wait_custom(display, LCPLL_CTL,
+ LCPLL_CD_SOURCE_FCLK_DONE, LCPLL_CD_SOURCE_FCLK_DONE,
+ 100, 0, NULL);
+ if (ret)
drm_err(display->drm, "Switching to FCLK failed\n");
intel_de_rmw(display, LCPLL_CTL,
@@ -913,8 +919,10 @@ static void bdw_set_cdclk(struct intel_display *display,
intel_de_rmw(display, LCPLL_CTL,
LCPLL_CD_SOURCE_FCLK, 0);
- if (wait_for_us((intel_de_read(display, LCPLL_CTL) &
- LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
+ ret = intel_de_wait_custom(display, LCPLL_CTL,
+ LCPLL_CD_SOURCE_FCLK_DONE, 0,
+ 1, 0, NULL);
+ if (ret)
drm_err(display->drm, "Switching back to LCPLL failed\n");
intel_pcode_write(display->drm, HSW_PCODE_DE_WRITE_FREQ_REQ,
@@ -3569,7 +3577,7 @@ static int i9xx_hrawclk(struct intel_display *display)
struct drm_i915_private *i915 = to_i915(display->drm);
/* hrawclock is 1/4 the FSB frequency */
- return DIV_ROUND_CLOSEST(i9xx_fsb_freq(i915), 4);
+ return DIV_ROUND_CLOSEST(intel_fsb_freq(i915), 4);
}
/**
@@ -3622,9 +3630,7 @@ DEFINE_SHOW_ATTRIBUTE(i915_cdclk_info);
void intel_cdclk_debugfs_register(struct intel_display *display)
{
- struct drm_minor *minor = display->drm->primary;
-
- debugfs_create_file("i915_cdclk_info", 0444, minor->debugfs_root,
+ debugfs_create_file("i915_cdclk_info", 0444, display->drm->debugfs_root,
display, &i915_cdclk_info_fops);
}
diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c
index 42c923f416b3..6a55854db5b6 100644
--- a/drivers/gpu/drm/i915/display/intel_connector.c
+++ b/drivers/gpu/drm/i915/display/intel_connector.c
@@ -77,7 +77,7 @@ void intel_connector_cancel_modeset_retry_work(struct intel_connector *connector
drm_connector_put(&connector->base);
}
-int intel_connector_init(struct intel_connector *connector)
+static int intel_connector_init(struct intel_connector *connector)
{
struct intel_digital_connector_state *conn_state;
diff --git a/drivers/gpu/drm/i915/display/intel_connector.h b/drivers/gpu/drm/i915/display/intel_connector.h
index aafb25a814fa..0aa86626e646 100644
--- a/drivers/gpu/drm/i915/display/intel_connector.h
+++ b/drivers/gpu/drm/i915/display/intel_connector.h
@@ -14,7 +14,6 @@ struct i2c_adapter;
struct intel_connector;
struct intel_encoder;
-int intel_connector_init(struct intel_connector *connector);
struct intel_connector *intel_connector_alloc(void);
void intel_connector_free(struct intel_connector *connector);
void intel_connector_destroy(struct drm_connector *connector);
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index 898c5d9e8f7a..31e68047f217 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -50,6 +50,7 @@
#include "intel_gmbus.h"
#include "intel_hotplug.h"
#include "intel_hotplug_irq.h"
+#include "intel_link_bw.h"
#include "intel_load_detect.h"
#include "intel_pch_display.h"
#include "intel_pch_refclk.h"
@@ -421,7 +422,7 @@ static int pch_crt_compute_config(struct intel_encoder *encoder,
return -EINVAL;
crtc_state->has_pch_encoder = true;
- if (!intel_fdi_compute_pipe_bpp(crtc_state))
+ if (!intel_link_bw_compute_pipe_bpp(crtc_state))
return -EINVAL;
crtc_state->output_format = INTEL_OUTPUT_FORMAT_RGB;
@@ -446,7 +447,7 @@ static int hsw_crt_compute_config(struct intel_encoder *encoder,
return -EINVAL;
crtc_state->has_pch_encoder = true;
- if (!intel_fdi_compute_pipe_bpp(crtc_state))
+ if (!intel_link_bw_compute_pipe_bpp(crtc_state))
return -EINVAL;
crtc_state->output_format = INTEL_OUTPUT_FORMAT_RGB;
diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c
index 198e69efe9ac..d4d181f9dca5 100644
--- a/drivers/gpu/drm/i915/display/intel_cursor.c
+++ b/drivers/gpu/drm/i915/display/intel_cursor.c
@@ -33,17 +33,9 @@ static const u32 intel_cursor_formats[] = {
DRM_FORMAT_ARGB8888,
};
-static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
+static u32 intel_cursor_surf_offset(const struct intel_plane_state *plane_state)
{
- struct intel_display *display = to_intel_display(plane_state);
- u32 base;
-
- if (DISPLAY_INFO(display)->cursor_needs_physical)
- base = plane_state->phys_dma_addr;
- else
- base = intel_plane_ggtt_offset(plane_state);
-
- return base + plane_state->view.color_plane[0].offset;
+ return plane_state->view.color_plane[0].offset;
}
static u32 intel_cursor_position(const struct intel_crtc_state *crtc_state,
@@ -213,8 +205,7 @@ static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
return cntl;
}
-static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+static u32 i845_cursor_ctl(const struct intel_plane_state *plane_state)
{
return CURSOR_ENABLE |
CURSOR_FORMAT_ARGB |
@@ -274,7 +265,7 @@ static int i845_check_cursor(struct intel_crtc_state *crtc_state,
return -EINVAL;
}
- plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state);
+ plane_state->ctl = i845_cursor_ctl(plane_state);
return 0;
}
@@ -297,7 +288,7 @@ static void i845_cursor_update_arm(struct intel_dsb *dsb,
size = CURSOR_HEIGHT(height) | CURSOR_WIDTH(width);
- base = intel_cursor_base(plane_state);
+ base = plane_state->surf;
pos = intel_cursor_position(crtc_state, plane_state, false);
}
@@ -406,8 +397,7 @@ static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
return cntl;
}
-static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+static u32 i9xx_cursor_ctl(const struct intel_plane_state *plane_state)
{
struct intel_display *display = to_intel_display(plane_state);
u32 cntl = 0;
@@ -534,7 +524,7 @@ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
return -EINVAL;
}
- plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state);
+ plane_state->ctl = i9xx_cursor_ctl(plane_state);
return 0;
}
@@ -675,7 +665,7 @@ static void i9xx_cursor_update_arm(struct intel_dsb *dsb,
if (width != height)
fbc_ctl = CUR_FBC_EN | CUR_FBC_HEIGHT(height - 1);
- base = intel_cursor_base(plane_state);
+ base = plane_state->surf;
pos = intel_cursor_position(crtc_state, plane_state, false);
}
@@ -1051,6 +1041,8 @@ intel_cursor_plane_create(struct intel_display *display,
cursor->check_plane = i9xx_check_cursor;
}
+ cursor->surf_offset = intel_cursor_surf_offset;
+
if (DISPLAY_VER(display) >= 5 || display->platform.g4x)
cursor->capture_error = g4x_cursor_capture_error;
else
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
index ed8e640b96b0..801235a5bc0a 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
@@ -3239,14 +3239,22 @@ void intel_lnl_mac_transmit_lfps(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(encoder);
- u8 owned_lane_mask = intel_cx0_get_owned_lane_mask(encoder);
- bool enable = intel_alpm_is_alpm_aux_less(enc_to_intel_dp(encoder),
- crtc_state);
+ intel_wakeref_t wakeref;
int i;
+ u8 owned_lane_mask;
- if (DISPLAY_VER(display) < 20)
+ if (DISPLAY_VER(display) < 20 ||
+ !intel_alpm_is_alpm_aux_less(enc_to_intel_dp(encoder), crtc_state))
return;
+ owned_lane_mask = intel_cx0_get_owned_lane_mask(encoder);
+
+ wakeref = intel_cx0_phy_transaction_begin(encoder);
+
+ if (intel_encoder_is_c10phy(encoder))
+ intel_cx0_rmw(encoder, owned_lane_mask, PHY_C10_VDR_CONTROL(1), 0,
+ C10_VDR_CTRL_MSGBUS_ACCESS, MB_WRITE_COMMITTED);
+
for (i = 0; i < 4; i++) {
int tx = i % 2 + 1;
u8 lane_mask = i < 2 ? INTEL_CX0_LANE0 : INTEL_CX0_LANE1;
@@ -3256,9 +3264,10 @@ void intel_lnl_mac_transmit_lfps(struct intel_encoder *encoder,
intel_cx0_rmw(encoder, lane_mask, PHY_CMN1_CONTROL(tx, 0),
CONTROL0_MAC_TRANSMIT_LFPS,
- enable ? CONTROL0_MAC_TRANSMIT_LFPS : 0,
- MB_WRITE_COMMITTED);
+ CONTROL0_MAC_TRANSMIT_LFPS, MB_WRITE_COMMITTED);
}
+
+ intel_cx0_phy_transaction_end(encoder, wakeref);
}
static u8 cx0_power_control_disable_val(struct intel_encoder *encoder)
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 0405396c7750..c09aa759f4d4 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -26,6 +26,7 @@
*/
#include <linux/iopoll.h>
+#include <linux/seq_buf.h>
#include <linux/string_helpers.h>
#include <drm/display/drm_dp_helper.h>
@@ -596,8 +597,9 @@ intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder,
enum transcoder master;
master = crtc_state->mst_master_transcoder;
- drm_WARN_ON(display->drm,
- master == INVALID_TRANSCODER);
+ if (drm_WARN_ON(display->drm,
+ master == INVALID_TRANSCODER))
+ master = TRANSCODER_A;
temp |= TRANS_DDI_MST_TRANSPORT_SELECT(master);
}
} else {
@@ -2166,7 +2168,8 @@ icl_program_mg_dp_mode(struct intel_digital_port *dig_port,
{
struct intel_display *display = to_intel_display(crtc_state);
enum tc_port tc_port = intel_encoder_to_tc(&dig_port->base);
- u32 ln0, ln1, pin_assignment;
+ enum intel_tc_pin_assignment pin_assignment;
+ u32 ln0, ln1;
u8 width;
if (DISPLAY_VER(display) >= 14)
@@ -2188,11 +2191,11 @@ icl_program_mg_dp_mode(struct intel_digital_port *dig_port,
ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
/* DPPATC */
- pin_assignment = intel_tc_port_get_pin_assignment_mask(dig_port);
+ pin_assignment = intel_tc_port_get_pin_assignment(dig_port);
width = crtc_state->lane_count;
switch (pin_assignment) {
- case 0x0:
+ case INTEL_TC_PIN_ASSIGNMENT_NONE:
drm_WARN_ON(display->drm,
!intel_tc_port_in_legacy_mode(dig_port));
if (width == 1) {
@@ -2202,20 +2205,20 @@ icl_program_mg_dp_mode(struct intel_digital_port *dig_port,
ln1 |= MG_DP_MODE_CFG_DP_X2_MODE;
}
break;
- case 0x1:
+ case INTEL_TC_PIN_ASSIGNMENT_A:
if (width == 4) {
ln0 |= MG_DP_MODE_CFG_DP_X2_MODE;
ln1 |= MG_DP_MODE_CFG_DP_X2_MODE;
}
break;
- case 0x2:
+ case INTEL_TC_PIN_ASSIGNMENT_B:
if (width == 2) {
ln0 |= MG_DP_MODE_CFG_DP_X2_MODE;
ln1 |= MG_DP_MODE_CFG_DP_X2_MODE;
}
break;
- case 0x3:
- case 0x5:
+ case INTEL_TC_PIN_ASSIGNMENT_C:
+ case INTEL_TC_PIN_ASSIGNMENT_E:
if (width == 1) {
ln0 |= MG_DP_MODE_CFG_DP_X1_MODE;
ln1 |= MG_DP_MODE_CFG_DP_X1_MODE;
@@ -2224,8 +2227,8 @@ icl_program_mg_dp_mode(struct intel_digital_port *dig_port,
ln1 |= MG_DP_MODE_CFG_DP_X2_MODE;
}
break;
- case 0x4:
- case 0x6:
+ case INTEL_TC_PIN_ASSIGNMENT_D:
+ case INTEL_TC_PIN_ASSIGNMENT_F:
if (width == 1) {
ln0 |= MG_DP_MODE_CFG_DP_X1_MODE;
ln1 |= MG_DP_MODE_CFG_DP_X1_MODE;
@@ -2339,34 +2342,24 @@ static void intel_dp_sink_set_fec_ready(struct intel_dp *intel_dp,
drm_dbg_kms(display->drm, "Failed to clear FEC detected flags\n");
}
-static int read_fec_detected_status(struct drm_dp_aux *aux)
-{
- int ret;
- u8 status;
-
- ret = drm_dp_dpcd_readb(aux, DP_FEC_STATUS, &status);
- if (ret < 0)
- return ret;
-
- return status;
-}
-
static int wait_for_fec_detected(struct drm_dp_aux *aux, bool enabled)
{
struct intel_display *display = to_intel_display(aux->drm_dev);
int mask = enabled ? DP_FEC_DECODE_EN_DETECTED : DP_FEC_DECODE_DIS_DETECTED;
- int status;
- int err;
+ u8 status = 0;
+ int ret, err;
- err = readx_poll_timeout(read_fec_detected_status, aux, status,
- status & mask || status < 0,
- 10000, 200000);
+ ret = poll_timeout_us(err = drm_dp_dpcd_read_byte(aux, DP_FEC_STATUS, &status),
+ err || (status & mask),
+ 10 * 1000, 200 * 1000, false);
- if (err || status < 0) {
+ /* Either can be non-zero, but not both */
+ ret = ret ?: err;
+ if (ret) {
drm_dbg_kms(display->drm,
- "Failed waiting for FEC %s to get detected: %d (status %d)\n",
- str_enabled_disabled(enabled), err, status);
- return err ? err : status;
+ "Failed waiting for FEC %s to get detected: %d (status 0x%02x)\n",
+ str_enabled_disabled(enabled), ret, status);
+ return ret;
}
return 0;
@@ -2561,6 +2554,7 @@ mtl_ddi_enable_d2d(struct intel_encoder *encoder)
enum port port = encoder->port;
i915_reg_t reg;
u32 set_bits, wait_bits;
+ int ret;
if (DISPLAY_VER(display) < 14)
return;
@@ -2576,7 +2570,11 @@ mtl_ddi_enable_d2d(struct intel_encoder *encoder)
}
intel_de_rmw(display, reg, 0, set_bits);
- if (wait_for_us(intel_de_read(display, reg) & wait_bits, 100)) {
+
+ ret = intel_de_wait_custom(display, reg,
+ wait_bits, wait_bits,
+ 100, 0, NULL);
+ if (ret) {
drm_err(display->drm, "Timeout waiting for D2D Link enable for DDI/PORT_BUF_CTL %c\n",
port_name(port));
}
@@ -3058,6 +3056,7 @@ mtl_ddi_disable_d2d(struct intel_encoder *encoder)
enum port port = encoder->port;
i915_reg_t reg;
u32 clr_bits, wait_bits;
+ int ret;
if (DISPLAY_VER(display) < 14)
return;
@@ -3073,7 +3072,11 @@ mtl_ddi_disable_d2d(struct intel_encoder *encoder)
}
intel_de_rmw(display, reg, clr_bits, 0);
- if (wait_for_us(!(intel_de_read(display, reg) & wait_bits), 100))
+
+ ret = intel_de_wait_custom(display, reg,
+ wait_bits, 0,
+ 100, 0, NULL);
+ if (ret)
drm_err(display->drm, "Timeout waiting for D2D Link disable for DDI/PORT_BUF_CTL %c\n",
port_name(port));
}
@@ -5066,11 +5069,45 @@ static bool port_in_use(struct intel_display *display, enum port port)
return false;
}
+static const char *intel_ddi_encoder_name(struct intel_display *display,
+ enum port port, enum phy phy,
+ struct seq_buf *s)
+{
+ if (DISPLAY_VER(display) >= 13 && port >= PORT_D_XELPD) {
+ seq_buf_printf(s, "DDI %c/PHY %c",
+ port_name(port - PORT_D_XELPD + PORT_D),
+ phy_name(phy));
+ } else if (DISPLAY_VER(display) >= 12) {
+ enum tc_port tc_port = intel_port_to_tc(display, port);
+
+ seq_buf_printf(s, "DDI %s%c/PHY %s%c",
+ port >= PORT_TC1 ? "TC" : "",
+ port >= PORT_TC1 ? port_tc_name(port) : port_name(port),
+ tc_port != TC_PORT_NONE ? "TC" : "",
+ tc_port != TC_PORT_NONE ? tc_port_name(tc_port) : phy_name(phy));
+ } else if (DISPLAY_VER(display) >= 11) {
+ enum tc_port tc_port = intel_port_to_tc(display, port);
+
+ seq_buf_printf(s, "DDI %c%s/PHY %s%c",
+ port_name(port),
+ port >= PORT_C ? " (TC)" : "",
+ tc_port != TC_PORT_NONE ? "TC" : "",
+ tc_port != TC_PORT_NONE ? tc_port_name(tc_port) : phy_name(phy));
+ } else {
+ seq_buf_printf(s, "DDI %c/PHY %c", port_name(port), phy_name(phy));
+ }
+
+ drm_WARN_ON(display->drm, seq_buf_has_overflowed(s));
+
+ return seq_buf_str(s);
+}
+
void intel_ddi_init(struct intel_display *display,
const struct intel_bios_encoder_data *devdata)
{
struct intel_digital_port *dig_port;
struct intel_encoder *encoder;
+ DECLARE_SEQ_BUF(encoder_name, 20);
bool init_hdmi, init_dp;
enum port port;
enum phy phy;
@@ -5148,52 +5185,19 @@ void intel_ddi_init(struct intel_display *display,
phy_name(phy));
}
- dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL);
+ dig_port = intel_dig_port_alloc();
if (!dig_port)
return;
- dig_port->aux_ch = AUX_CH_NONE;
-
encoder = &dig_port->base;
encoder->devdata = devdata;
- if (DISPLAY_VER(display) >= 13 && port >= PORT_D_XELPD) {
- drm_encoder_init(display->drm, &encoder->base, &intel_ddi_funcs,
- DRM_MODE_ENCODER_TMDS,
- "DDI %c/PHY %c",
- port_name(port - PORT_D_XELPD + PORT_D),
- phy_name(phy));
- } else if (DISPLAY_VER(display) >= 12) {
- enum tc_port tc_port = intel_port_to_tc(display, port);
-
- drm_encoder_init(display->drm, &encoder->base, &intel_ddi_funcs,
- DRM_MODE_ENCODER_TMDS,
- "DDI %s%c/PHY %s%c",
- port >= PORT_TC1 ? "TC" : "",
- port >= PORT_TC1 ? port_tc_name(port) : port_name(port),
- tc_port != TC_PORT_NONE ? "TC" : "",
- tc_port != TC_PORT_NONE ? tc_port_name(tc_port) : phy_name(phy));
- } else if (DISPLAY_VER(display) >= 11) {
- enum tc_port tc_port = intel_port_to_tc(display, port);
-
- drm_encoder_init(display->drm, &encoder->base, &intel_ddi_funcs,
- DRM_MODE_ENCODER_TMDS,
- "DDI %c%s/PHY %s%c",
- port_name(port),
- port >= PORT_C ? " (TC)" : "",
- tc_port != TC_PORT_NONE ? "TC" : "",
- tc_port != TC_PORT_NONE ? tc_port_name(tc_port) : phy_name(phy));
- } else {
- drm_encoder_init(display->drm, &encoder->base, &intel_ddi_funcs,
- DRM_MODE_ENCODER_TMDS,
- "DDI %c/PHY %c", port_name(port), phy_name(phy));
- }
+ drm_encoder_init(display->drm, &encoder->base, &intel_ddi_funcs,
+ DRM_MODE_ENCODER_TMDS, "%s",
+ intel_ddi_encoder_name(display, port, phy, &encoder_name));
intel_encoder_link_check_init(encoder, intel_ddi_link_check);
- mutex_init(&dig_port->hdcp.mutex);
- dig_port->hdcp.num_streams = 0;
-
encoder->hotplug = intel_ddi_hotplug;
encoder->compute_output_type = intel_ddi_compute_output_type;
encoder->compute_config = intel_ddi_compute_config;
@@ -5331,7 +5335,6 @@ void intel_ddi_init(struct intel_display *display,
dig_port->ddi_a_4_lanes = DISPLAY_VER(display) < 11 && ddi_buf_ctl & DDI_A_4_LANES;
- dig_port->dp.output_reg = INVALID_MMIO_REG;
dig_port->max_lanes = intel_ddi_max_lanes(dig_port);
if (need_aux_ch(encoder, init_dp)) {
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 7035c1fc9033..5dca7f96b425 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -76,6 +76,7 @@
#include "intel_display_regs.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
+#include "intel_display_wa.h"
#include "intel_dmc.h"
#include "intel_dp.h"
#include "intel_dp_link_training.h"
@@ -1081,6 +1082,11 @@ static void intel_post_plane_update(struct intel_atomic_state *state,
if (audio_enabling(old_crtc_state, new_crtc_state))
intel_encoders_audio_enable(state, crtc);
+ if (intel_display_wa(display, 14011503117)) {
+ if (old_crtc_state->pch_pfit.enabled != new_crtc_state->pch_pfit.enabled)
+ adl_scaler_ecc_unmask(new_crtc_state);
+ }
+
intel_alpm_post_plane_update(state, crtc);
intel_psr_post_plane_update(state, crtc);
@@ -7265,6 +7271,9 @@ static void intel_atomic_dsb_finish(struct intel_atomic_state *state,
intel_psr_trigger_frame_change_event(new_crtc_state->dsb_commit,
state, crtc);
+ intel_psr_wait_for_idle_dsb(new_crtc_state->dsb_commit,
+ new_crtc_state);
+
if (new_crtc_state->use_dsb)
intel_dsb_vblank_evade(state, new_crtc_state->dsb_commit);
diff --git a/drivers/gpu/drm/i915/display/intel_display_conversion.c b/drivers/gpu/drm/i915/display/intel_display_conversion.c
index 4d565935e2cc..d56065f22655 100644
--- a/drivers/gpu/drm/i915/display/intel_display_conversion.c
+++ b/drivers/gpu/drm/i915/display/intel_display_conversion.c
@@ -4,7 +4,7 @@
#include "i915_drv.h"
#include "intel_display_conversion.h"
-struct intel_display *__i915_to_display(struct drm_i915_private *i915)
+static struct intel_display *__i915_to_display(struct drm_i915_private *i915)
{
return i915->display;
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_conversion.h b/drivers/gpu/drm/i915/display/intel_display_conversion.h
index 46c7208d42ba..d497bc58a73f 100644
--- a/drivers/gpu/drm/i915/display/intel_display_conversion.h
+++ b/drivers/gpu/drm/i915/display/intel_display_conversion.h
@@ -9,20 +9,8 @@
#define __INTEL_DISPLAY_CONVERSION__
struct drm_device;
-struct drm_i915_private;
struct intel_display;
-struct intel_display *__i915_to_display(struct drm_i915_private *i915);
struct intel_display *__drm_to_display(struct drm_device *drm);
-/*
- * Transitional macro to optionally convert struct drm_i915_private * to struct
- * intel_display *, also accepting the latter.
- */
-#define __to_intel_display(p) \
- _Generic(p, \
- const struct drm_i915_private *: __i915_to_display((struct drm_i915_private *)(p)), \
- struct drm_i915_private *: __i915_to_display((struct drm_i915_private *)(p)), \
- const struct intel_display *: (p), \
- struct intel_display *: (p))
#endif /* __INTEL_DISPLAY_CONVERSION__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index ce3f9810c42d..10dddec3796f 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -820,14 +820,14 @@ static const struct drm_info_list intel_display_debugfs_list[] = {
void intel_display_debugfs_register(struct intel_display *display)
{
- struct drm_minor *minor = display->drm->primary;
+ struct dentry *debugfs_root = display->drm->debugfs_root;
- debugfs_create_file("i915_fifo_underrun_reset", 0644, minor->debugfs_root,
+ debugfs_create_file("i915_fifo_underrun_reset", 0644, debugfs_root,
display, &i915_fifo_underrun_reset_ops);
drm_debugfs_create_files(intel_display_debugfs_list,
ARRAY_SIZE(intel_display_debugfs_list),
- minor->debugfs_root, minor);
+ debugfs_root, display->drm->primary);
intel_bios_debugfs_register(display);
intel_cdclk_debugfs_register(display);
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c b/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c
index 88914a1f3f62..de62b774272d 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c
@@ -7,7 +7,6 @@
#include <linux/kernel.h>
#include <drm/drm_drv.h>
-#include <drm/drm_file.h>
#include "intel_display_core.h"
#include "intel_display_debugfs_params.h"
@@ -154,14 +153,14 @@ intel_display_debugfs_create_uint(const char *name, umode_t mode,
/* add a subdirectory with files for each intel display param */
void intel_display_debugfs_params(struct intel_display *display)
{
- struct drm_minor *minor = display->drm->primary;
+ struct dentry *debugfs_root = display->drm->debugfs_root;
struct dentry *dir;
char dirname[16];
snprintf(dirname, sizeof(dirname), "%s_params", display->drm->driver->name);
- dir = debugfs_lookup(dirname, minor->debugfs_root);
+ dir = debugfs_lookup(dirname, debugfs_root);
if (!dir)
- dir = debugfs_create_dir(dirname, minor->debugfs_root);
+ dir = debugfs_create_dir(dirname, debugfs_root);
if (IS_ERR(dir))
return;
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.c b/drivers/gpu/drm/i915/display/intel_display_device.c
index 089cffabbad5..a002bc6ce7b0 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.c
+++ b/drivers/gpu/drm/i915/display/intel_display_device.c
@@ -1354,6 +1354,19 @@ static const struct intel_display_device_info xe2_lpd_display = {
.__runtime_defaults.has_dbuf_overlap_detection = true,
};
+static const struct intel_display_device_info wcl_display = {
+ XE_LPDP_FEATURES,
+
+ .__runtime_defaults.cpu_transcoder_mask =
+ BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C),
+ .__runtime_defaults.pipe_mask =
+ BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
+ .__runtime_defaults.fbc_mask =
+ BIT(INTEL_FBC_A) | BIT(INTEL_FBC_B) | BIT(INTEL_FBC_C),
+ .__runtime_defaults.port_mask =
+ BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_TC1) | BIT(PORT_TC2),
+};
+
static const struct intel_display_device_info xe2_hpd_display = {
XE_LPDP_FEATURES,
.__runtime_defaults.port_mask = BIT(PORT_A) |
@@ -1480,7 +1493,7 @@ static const struct {
{ 14, 1, &xe2_hpd_display },
{ 20, 0, &xe2_lpd_display },
{ 30, 0, &xe2_lpd_display },
- { 30, 2, &xe2_lpd_display },
+ { 30, 2, &wcl_display },
};
static const struct intel_display_device_info *
@@ -1931,6 +1944,11 @@ void intel_display_device_info_print(const struct intel_display_device_info *inf
drm_printf(p, "rawclk rate: %u kHz\n", runtime->rawclk_freq);
}
+bool intel_display_device_present(struct intel_display *display)
+{
+ return display && HAS_DISPLAY(display);
+}
+
/*
* Assuming the device has display hardware, should it be enabled?
*
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.h b/drivers/gpu/drm/i915/display/intel_display_device.h
index 4308822f0415..f329f1beafef 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.h
+++ b/drivers/gpu/drm/i915/display/intel_display_device.h
@@ -9,7 +9,6 @@
#include <linux/bitops.h>
#include <linux/types.h>
-#include "intel_display_conversion.h"
#include "intel_display_limits.h"
struct drm_printer;
@@ -224,8 +223,8 @@ struct intel_display_platforms {
(IS_DISPLAY_VERx100((__display), (ipver), (ipver)) && \
IS_DISPLAY_STEP((__display), (from), (until)))
-#define DISPLAY_INFO(__display) (__to_intel_display(__display)->info.__device_info)
-#define DISPLAY_RUNTIME_INFO(__display) (&__to_intel_display(__display)->info.__runtime_info)
+#define DISPLAY_INFO(__display) ((__display)->info.__device_info)
+#define DISPLAY_RUNTIME_INFO(__display) (&(__display)->info.__runtime_info)
#define DISPLAY_VER(__display) (DISPLAY_RUNTIME_INFO(__display)->ip.ver)
#define DISPLAY_VERx100(__display) (DISPLAY_RUNTIME_INFO(__display)->ip.ver * 100 + \
@@ -236,7 +235,7 @@ struct intel_display_platforms {
#define INTEL_DISPLAY_STEP(__display) (DISPLAY_RUNTIME_INFO(__display)->step)
#define IS_DISPLAY_STEP(__display, since, until) \
- (drm_WARN_ON(__to_intel_display(__display)->drm, INTEL_DISPLAY_STEP(__display) == STEP_NONE), \
+ (drm_WARN_ON((__display)->drm, INTEL_DISPLAY_STEP(__display) == STEP_NONE), \
INTEL_DISPLAY_STEP(__display) >= (since) && INTEL_DISPLAY_STEP(__display) < (until))
#define ARLS_HOST_BRIDGE_PCI_ID1 0x7D1C
@@ -307,6 +306,7 @@ struct intel_display_device_info {
} color;
};
+bool intel_display_device_present(struct intel_display *display);
bool intel_display_device_enabled(struct intel_display *display);
struct intel_display *intel_display_device_probe(struct pci_dev *pdev);
void intel_display_device_remove(struct intel_display *display);
diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c
index 8586ba102605..cf1c14412abe 100644
--- a/drivers/gpu/drm/i915/display/intel_display_driver.c
+++ b/drivers/gpu/drm/i915/display/intel_display_driver.c
@@ -18,6 +18,7 @@
#include <drm/drm_vblank.h>
#include "i915_drv.h"
+#include "i915_utils.h"
#include "i9xx_wm.h"
#include "intel_acpi.h"
#include "intel_atomic.h"
diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.c b/drivers/gpu/drm/i915/display/intel_display_irq.c
index fb25ec8adae3..123e054affbe 100644
--- a/drivers/gpu/drm/i915/display/intel_display_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_display_irq.c
@@ -1506,10 +1506,14 @@ u32 gen11_gu_misc_irq_ack(struct intel_display *display, const u32 master_ctl)
if (!(master_ctl & GEN11_GU_MISC_IRQ))
return 0;
+ intel_display_rpm_assert_block(display);
+
iir = intel_de_read(display, GEN11_GU_MISC_IIR);
if (likely(iir))
intel_de_write(display, GEN11_GU_MISC_IIR, iir);
+ intel_display_rpm_assert_unblock(display);
+
return iir;
}
@@ -1986,20 +1990,17 @@ void vlv_display_irq_postinstall(struct intel_display *display)
void ibx_display_irq_reset(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
- if (HAS_PCH_NOP(i915))
+ if (HAS_PCH_NOP(display))
return;
gen2_irq_reset(to_intel_uncore(display->drm), SDE_IRQ_REGS);
- if (HAS_PCH_CPT(i915) || HAS_PCH_LPT(i915))
+ if (HAS_PCH_CPT(display) || HAS_PCH_LPT(display))
intel_de_write(display, SERR_INT, 0xffffffff);
}
void gen8_display_irq_reset(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
enum pipe pipe;
if (!HAS_DISPLAY(display))
@@ -2016,7 +2017,7 @@ void gen8_display_irq_reset(struct intel_display *display)
intel_display_irq_regs_reset(display, GEN8_DE_PORT_IRQ_REGS);
intel_display_irq_regs_reset(display, GEN8_DE_MISC_IRQ_REGS);
- if (HAS_PCH_SPLIT(i915))
+ if (HAS_PCH_SPLIT(display))
ibx_display_irq_reset(display);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_params.c b/drivers/gpu/drm/i915/display/intel_display_params.c
index 75316247ee8a..2aed110c5b09 100644
--- a/drivers/gpu/drm/i915/display/intel_display_params.c
+++ b/drivers/gpu/drm/i915/display/intel_display_params.c
@@ -120,6 +120,9 @@ intel_display_param_named_unsafe(enable_psr, int, 0400,
"(0=disabled, 1=enable up to PSR1, 2=enable up to PSR2) "
"Default: -1 (use per-chip default)");
+intel_display_param_named_unsafe(enable_panel_replay, int, 0400,
+ "Enable Panel Replay (0=disabled, 1=enabled). Default: -1 (use per-chip default)");
+
intel_display_param_named(psr_safest_params, bool, 0400,
"Replace PSR VBT parameters by the safest and not optimal ones. This "
"is helpful to detect if PSR issues are related to bad values set in "
diff --git a/drivers/gpu/drm/i915/display/intel_display_params.h b/drivers/gpu/drm/i915/display/intel_display_params.h
index 784e6bae8615..b01bc5700c52 100644
--- a/drivers/gpu/drm/i915/display/intel_display_params.h
+++ b/drivers/gpu/drm/i915/display/intel_display_params.h
@@ -46,6 +46,7 @@ struct drm_printer;
param(bool, enable_dp_mst, true, 0600) \
param(int, enable_fbc, -1, 0600) \
param(int, enable_psr, -1, 0600) \
+ param(int, enable_panel_replay, -1, 0600) \
param(bool, psr_safest_params, false, 0400) \
param(bool, enable_psr2_sel_fetch, true, 0400) \
param(int, enable_dmc_wl, -1, 0400) \
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 273054c22325..da4babfd6bcb 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -3,6 +3,7 @@
* Copyright © 2019 Intel Corporation
*/
+#include <linux/iopoll.h>
#include <linux/string_helpers.h>
#include "soc/intel_dram.h"
@@ -10,6 +11,7 @@
#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_backlight_regs.h"
#include "intel_cdclk.h"
#include "intel_clock_gating.h"
@@ -1172,7 +1174,7 @@ static void icl_mbus_init(struct intel_display *display)
if (DISPLAY_VER(display) == 12)
abox_regs |= BIT(0);
- for_each_set_bit(i, &abox_regs, sizeof(abox_regs))
+ for_each_set_bit(i, &abox_regs, BITS_PER_TYPE(abox_regs))
intel_de_rmw(display, MBUS_ABOX_CTL(i), mask, val);
}
@@ -1278,6 +1280,7 @@ static void hsw_disable_lcpll(struct intel_display *display,
bool switch_to_fclk, bool allow_power_down)
{
u32 val;
+ int ret;
assert_can_disable_lcpll(display);
@@ -1287,8 +1290,10 @@ static void hsw_disable_lcpll(struct intel_display *display,
val |= LCPLL_CD_SOURCE_FCLK;
intel_de_write(display, LCPLL_CTL, val);
- if (wait_for_us(intel_de_read(display, LCPLL_CTL) &
- LCPLL_CD_SOURCE_FCLK_DONE, 1))
+ ret = intel_de_wait_custom(display, LCPLL_CTL,
+ LCPLL_CD_SOURCE_FCLK_DONE, LCPLL_CD_SOURCE_FCLK_DONE,
+ 1, 0, NULL);
+ if (ret)
drm_err(display->drm, "Switching to FCLK failed\n");
val = intel_de_read(display, LCPLL_CTL);
@@ -1306,8 +1311,10 @@ static void hsw_disable_lcpll(struct intel_display *display,
hsw_write_dcomp(display, val);
ndelay(100);
- if (wait_for((hsw_read_dcomp(display) &
- D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
+ ret = poll_timeout_us(val = hsw_read_dcomp(display),
+ (val & D_COMP_RCOMP_IN_PROGRESS) == 0,
+ 100, 1000, false);
+ if (ret)
drm_err(display->drm, "D_COMP RCOMP still in progress\n");
if (allow_power_down) {
@@ -1324,6 +1331,7 @@ static void hsw_restore_lcpll(struct intel_display *display)
{
struct drm_i915_private __maybe_unused *dev_priv = to_i915(display->drm);
u32 val;
+ int ret;
val = intel_de_read(display, LCPLL_CTL);
@@ -1358,8 +1366,10 @@ static void hsw_restore_lcpll(struct intel_display *display)
if (val & LCPLL_CD_SOURCE_FCLK) {
intel_de_rmw(display, LCPLL_CTL, LCPLL_CD_SOURCE_FCLK, 0);
- if (wait_for_us((intel_de_read(display, LCPLL_CTL) &
- LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
+ ret = intel_de_wait_custom(display, LCPLL_CTL,
+ LCPLL_CD_SOURCE_FCLK_DONE, 0,
+ 1, 0, NULL);
+ if (ret)
drm_err(display->drm,
"Switching back to LCPLL failed\n");
}
@@ -1629,11 +1639,11 @@ static void tgl_bw_buddy_init(struct intel_display *display)
if (table[config].page_mask == 0) {
drm_dbg_kms(display->drm,
"Unknown memory configuration; disabling address buddy logic.\n");
- for_each_set_bit(i, &abox_mask, sizeof(abox_mask))
+ for_each_set_bit(i, &abox_mask, BITS_PER_TYPE(abox_mask))
intel_de_write(display, BW_BUDDY_CTL(i),
BW_BUDDY_DISABLE);
} else {
- for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) {
+ for_each_set_bit(i, &abox_mask, BITS_PER_TYPE(abox_mask)) {
intel_de_write(display, BW_BUDDY_PAGE_MASK(i),
table[config].page_mask);
@@ -2155,8 +2165,6 @@ void intel_power_domains_resume(struct intel_display *display)
power_domains->init_wakeref =
intel_display_power_get(display, POWER_DOMAIN_INIT);
}
-
- intel_power_domains_verify_state(display);
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.c b/drivers/gpu/drm/i915/display/intel_display_power_map.c
index 77268802b55e..39b71fffa2cd 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_map.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_map.c
@@ -1717,6 +1717,59 @@ static const struct i915_power_well_desc_list xe3lpd_power_wells[] = {
I915_PW_DESCRIPTORS(xe2lpd_power_wells_pica),
};
+static const struct i915_power_well_desc wcl_power_wells_main[] = {
+ {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_2", &xe3lpd_pwdoms_pw_2,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_2,
+ .id = SKL_DISP_PW_2),
+ ),
+ .ops = &hsw_power_well_ops,
+ .has_vga = true,
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_A", &xelpd_pwdoms_pw_a,
+ .hsw.idx = XELPD_PW_CTL_IDX_PW_A),
+ ),
+ .ops = &hsw_power_well_ops,
+ .irq_pipe_mask = BIT(PIPE_A),
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_B", &xe3lpd_pwdoms_pw_b,
+ .hsw.idx = XELPD_PW_CTL_IDX_PW_B),
+ ),
+ .ops = &hsw_power_well_ops,
+ .irq_pipe_mask = BIT(PIPE_B),
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_C", &xe3lpd_pwdoms_pw_c,
+ .hsw.idx = XELPD_PW_CTL_IDX_PW_C),
+ ),
+ .ops = &hsw_power_well_ops,
+ .irq_pipe_mask = BIT(PIPE_C),
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("AUX_A", &icl_pwdoms_aux_a, .xelpdp.aux_ch = AUX_CH_A),
+ I915_PW("AUX_B", &icl_pwdoms_aux_b, .xelpdp.aux_ch = AUX_CH_B),
+ I915_PW("AUX_TC1", &xelpdp_pwdoms_aux_tc1, .xelpdp.aux_ch = AUX_CH_USBC1),
+ I915_PW("AUX_TC2", &xelpdp_pwdoms_aux_tc2, .xelpdp.aux_ch = AUX_CH_USBC2),
+ ),
+ .ops = &xelpdp_aux_power_well_ops,
+ },
+};
+
+static const struct i915_power_well_desc_list wcl_power_wells[] = {
+ I915_PW_DESCRIPTORS(i9xx_power_wells_always_on),
+ I915_PW_DESCRIPTORS(icl_power_wells_pw_1),
+ I915_PW_DESCRIPTORS(xe3lpd_power_wells_dcoff),
+ I915_PW_DESCRIPTORS(wcl_power_wells_main),
+ I915_PW_DESCRIPTORS(xe2lpd_power_wells_pica),
+};
+
static void init_power_well_domains(const struct i915_power_well_instance *inst,
struct i915_power_well *power_well)
{
@@ -1824,7 +1877,9 @@ int intel_display_power_map_init(struct i915_power_domains *power_domains)
return 0;
}
- if (DISPLAY_VER(display) >= 30)
+ if (DISPLAY_VERx100(display) == 3002)
+ return set_power_wells(power_domains, wcl_power_wells);
+ else if (DISPLAY_VER(display) >= 30)
return set_power_wells(power_domains, xe3lpd_power_wells);
else if (DISPLAY_VER(display) >= 20)
return set_power_wells(power_domains, xe2lpd_power_wells);
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c
index 48cac225a809..5e88b930f5aa 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_well.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c
@@ -3,6 +3,8 @@
* Copyright © 2022 Intel Corporation
*/
+#include <linux/iopoll.h>
+
#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_reg.h"
@@ -499,7 +501,6 @@ static void icl_tc_port_assert_ref_held(struct intel_display *display,
static void icl_tc_cold_exit(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
int ret, tries = 0;
while (1) {
@@ -514,7 +515,7 @@ static void icl_tc_cold_exit(struct intel_display *display)
msleep(1);
/* TODO: turn failure into a error as soon i915 CI updates ICL IFWI */
- drm_dbg_kms(&i915->drm, "TC cold block %s\n", ret ? "failed" :
+ drm_dbg_kms(display->drm, "TC cold block %s\n", ret ? "failed" :
"succeeded");
}
@@ -527,6 +528,8 @@ icl_tc_phy_aux_power_well_enable(struct intel_display *display,
const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
bool is_tbt = power_well->desc->is_tc_tbt;
bool timeout_expected;
+ u32 val;
+ int ret;
icl_tc_port_assert_ref_held(display, power_well, dig_port);
@@ -553,10 +556,11 @@ icl_tc_phy_aux_power_well_enable(struct intel_display *display,
tc_port = TGL_AUX_PW_TO_TC_PORT(i915_power_well_instance(power_well)->hsw.idx);
- if (wait_for(intel_dkl_phy_read(display, DKL_CMN_UC_DW_27(tc_port)) &
- DKL_CMN_UC_DW27_UC_HEALTH, 1))
- drm_warn(display->drm,
- "Timeout waiting TC uC health\n");
+ ret = poll_timeout_us(val = intel_dkl_phy_read(display, DKL_CMN_UC_DW_27(tc_port)),
+ val & DKL_CMN_UC_DW27_UC_HEALTH,
+ 100, 1000, false);
+ if (ret)
+ drm_warn(display->drm, "Timeout waiting TC uC health\n");
}
}
@@ -1122,6 +1126,8 @@ static void vlv_set_power_well(struct intel_display *display,
u32 mask;
u32 state;
u32 ctrl;
+ u32 val;
+ int ret;
mask = PUNIT_PWRGT_MASK(pw_idx);
state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) :
@@ -1129,10 +1135,8 @@ static void vlv_set_power_well(struct intel_display *display,
vlv_punit_get(display->drm);
-#define COND \
- ((vlv_punit_read(display->drm, PUNIT_REG_PWRGT_STATUS) & mask) == state)
-
- if (COND)
+ val = vlv_punit_read(display->drm, PUNIT_REG_PWRGT_STATUS);
+ if ((val & mask) == state)
goto out;
ctrl = vlv_punit_read(display->drm, PUNIT_REG_PWRGT_CTRL);
@@ -1140,14 +1144,15 @@ static void vlv_set_power_well(struct intel_display *display,
ctrl |= state;
vlv_punit_write(display->drm, PUNIT_REG_PWRGT_CTRL, ctrl);
- if (wait_for(COND, 100))
+ ret = poll_timeout_us(val = vlv_punit_read(display->drm, PUNIT_REG_PWRGT_STATUS),
+ (val & mask) == state,
+ 500, 100 * 1000, false);
+ if (ret)
drm_err(display->drm,
"timeout setting power well state %08x (%08x)\n",
state,
vlv_punit_read(display->drm, PUNIT_REG_PWRGT_CTRL));
-#undef COND
-
out:
vlv_punit_put(display->drm);
}
@@ -1208,7 +1213,7 @@ static void vlv_init_display_clock_gating(struct intel_display *display)
* (and never recovering) in this case. intel_dsi_post_disable() will
* clear it when we turn off the display.
*/
- intel_de_rmw(display, DSPCLK_GATE_D(display),
+ intel_de_rmw(display, VLV_DSPCLK_GATE_D,
~DPOUNIT_CLOCK_GATE_DISABLE, VRHUNIT_CLOCK_GATE_DISABLE);
/*
@@ -1711,23 +1716,24 @@ static void chv_set_pipe_power_well(struct intel_display *display,
enum pipe pipe = PIPE_A;
u32 state;
u32 ctrl;
+ int ret;
state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
vlv_punit_get(display->drm);
-#define COND \
- ((vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state)
-
- if (COND)
+ ctrl = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM);
+ if ((ctrl & DP_SSS_MASK(pipe)) == state)
goto out;
- ctrl = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM);
ctrl &= ~DP_SSC_MASK(pipe);
ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
vlv_punit_write(display->drm, PUNIT_REG_DSPSSPM, ctrl);
- if (wait_for(COND, 100))
+ ret = poll_timeout_us(ctrl = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM),
+ (ctrl & DP_SSS_MASK(pipe)) == state,
+ 500, 100 * 1000, false);
+ if (ret)
drm_err(display->drm,
"timeout setting power well state %08x (%08x)\n",
state,
@@ -1765,7 +1771,6 @@ static void chv_pipe_power_well_disable(struct intel_display *display,
static void
tgl_tc_cold_request(struct intel_display *display, bool block)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
u8 tries = 0;
int ret;
@@ -1798,10 +1803,9 @@ tgl_tc_cold_request(struct intel_display *display, bool block)
}
if (ret)
- drm_err(&i915->drm, "TC cold %sblock failed\n",
- block ? "" : "un");
+ drm_err(display->drm, "TC cold %sblock failed\n", block ? "" : "un");
else
- drm_dbg_kms(&i915->drm, "TC cold %sblock succeeded\n",
+ drm_dbg_kms(display->drm, "TC cold %sblock succeeded\n",
block ? "" : "un");
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_regs.h b/drivers/gpu/drm/i915/display/intel_display_regs.h
index 7bd09d981cd2..9d71e26a4fa2 100644
--- a/drivers/gpu/drm/i915/display/intel_display_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_display_regs.h
@@ -2890,6 +2890,7 @@ enum skl_power_gate {
#define DP_PIN_ASSIGNMENT_SHIFT(idx) ((idx) * 4)
#define DP_PIN_ASSIGNMENT_MASK(idx) (0xf << ((idx) * 4))
#define DP_PIN_ASSIGNMENT(idx, x) ((x) << ((idx) * 4))
+/* See enum intel_tc_pin_assignment for the pin assignment field values. */
#define _TCSS_DDI_STATUS_1 0x161500
#define _TCSS_DDI_STATUS_2 0x161504
@@ -2897,6 +2898,7 @@ enum skl_power_gate {
_TCSS_DDI_STATUS_1, \
_TCSS_DDI_STATUS_2))
#define TCSS_DDI_STATUS_PIN_ASSIGNMENT_MASK REG_GENMASK(28, 25)
+/* See enum intel_tc_pin_assignment for the pin assignment field values. */
#define TCSS_DDI_STATUS_READY REG_BIT(2)
#define TCSS_DDI_STATUS_HPD_LIVE_STATUS_TBT REG_BIT(1)
#define TCSS_DDI_STATUS_HPD_LIVE_STATUS_ALT REG_BIT(0)
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index ce45261c4a8f..358ab922d7a7 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -50,15 +50,17 @@
#include "intel_display_limits.h"
#include "intel_display_power.h"
#include "intel_dpll_mgr.h"
+#include "intel_dsi_vbt_defs.h"
#include "intel_wm_types.h"
struct cec_notifier;
struct drm_printer;
-struct __intel_global_objs_state;
struct intel_connector;
struct intel_ddi_buf_trans;
struct intel_fbc;
+struct intel_global_objs_state;
struct intel_hdcp_shim;
+struct intel_panic;
struct intel_tc_port;
/*
@@ -148,6 +150,7 @@ struct intel_framebuffer {
unsigned int vtd_guard;
unsigned int (*panic_tiling)(unsigned int x, unsigned int y, unsigned int width);
+ struct intel_panic *panic;
};
enum intel_hotplug_state {
@@ -593,7 +596,7 @@ struct intel_atomic_state {
struct ref_tracker *wakeref;
- struct __intel_global_objs_state *global_objs;
+ struct intel_global_objs_state *global_objs;
int num_global_objs;
/* Internal commit, as opposed to userspace/client initiated one */
@@ -642,7 +645,6 @@ struct intel_plane_state {
#define PLANE_HAS_FENCE BIT(0)
struct intel_fb_view view;
- u32 phys_dma_addr; /* for cursor_needs_physical */
/* for legacy cursor fb unpin */
struct drm_vblank_work unpin_work;
@@ -665,6 +667,9 @@ struct intel_plane_state {
/* chroma upsampler control register */
u32 cus_ctl;
+ /* surface address register */
+ u32 surf;
+
/*
* scaler_id
* = -1 : not using a scaler
@@ -941,10 +946,6 @@ struct intel_csc_matrix {
u16 postoff[3];
};
-void intel_io_mmio_fw_write(void *ctx, i915_reg_t reg, u32 val);
-
-typedef void (*intel_io_reg_write)(void *ctx, i915_reg_t reg, u32 val);
-
struct intel_crtc_state {
/*
* uapi (drm) state. This is the software state shown to userspace.
@@ -1122,6 +1123,7 @@ struct intel_crtc_state {
bool req_psr2_sdp_prior_scanline;
bool has_panel_replay;
bool wm_level_disabled;
+ bool pkg_c_latency_used;
u32 dc3co_exitline;
u16 su_y_granularity;
u8 active_non_psr_pipes;
@@ -1534,6 +1536,7 @@ struct intel_plane {
bool (*get_hw_state)(struct intel_plane *plane, enum pipe *pipe);
int (*check_plane)(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state);
+ u32 (*surf_offset)(const struct intel_plane_state *plane_state);
int (*min_cdclk)(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
void (*async_flip)(struct intel_dsb *dsb,
@@ -1683,6 +1686,7 @@ struct intel_psr {
u8 entry_setup_frames;
bool link_ok;
+ bool pkg_c_latency_used;
u8 active_non_psr_pipes;
};
diff --git a/drivers/gpu/drm/i915/display/intel_display_wa.c b/drivers/gpu/drm/i915/display/intel_display_wa.c
index f57280e9d041..31cd2c9cd488 100644
--- a/drivers/gpu/drm/i915/display/intel_display_wa.c
+++ b/drivers/gpu/drm/i915/display/intel_display_wa.c
@@ -3,6 +3,8 @@
* Copyright © 2023 Intel Corporation
*/
+#include <drm/drm_print.h>
+
#include "i915_reg.h"
#include "intel_de.h"
#include "intel_display_core.h"
@@ -39,3 +41,36 @@ void intel_display_wa_apply(struct intel_display *display)
else if (DISPLAY_VER(display) == 11)
gen11_display_wa_apply(display);
}
+
+/*
+ * Wa_16025573575:
+ * Fixes: Issue with bitbashing on Xe3 based platforms.
+ * Workaround: Set masks bits in GPIO CTL and preserve it during bitbashing sequence.
+ */
+static bool intel_display_needs_wa_16025573575(struct intel_display *display)
+{
+ return DISPLAY_VERx100(display) == 3000 || DISPLAY_VERx100(display) == 3002;
+}
+
+/*
+ * Wa_14011503117:
+ * Fixes: Before enabling the scaler DE fatal error is masked
+ * Workaround: Unmask the DE fatal error register after enabling the scaler
+ * and after waiting of at least 1 frame.
+ */
+bool __intel_display_wa(struct intel_display *display, enum intel_display_wa wa, const char *name)
+{
+ switch (wa) {
+ case INTEL_DISPLAY_WA_16023588340:
+ return intel_display_needs_wa_16023588340(display);
+ case INTEL_DISPLAY_WA_16025573575:
+ return intel_display_needs_wa_16025573575(display);
+ case INTEL_DISPLAY_WA_14011503117:
+ return DISPLAY_VER(display) == 13;
+ default:
+ drm_WARN(display->drm, 1, "Missing Wa number: %s\n", name);
+ break;
+ }
+
+ return false;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_display_wa.h b/drivers/gpu/drm/i915/display/intel_display_wa.h
index babd9d16603d..abc1df83f066 100644
--- a/drivers/gpu/drm/i915/display/intel_display_wa.h
+++ b/drivers/gpu/drm/i915/display/intel_display_wa.h
@@ -21,4 +21,15 @@ static inline bool intel_display_needs_wa_16023588340(struct intel_display *disp
bool intel_display_needs_wa_16023588340(struct intel_display *display);
#endif
+enum intel_display_wa {
+ INTEL_DISPLAY_WA_16023588340,
+ INTEL_DISPLAY_WA_16025573575,
+ INTEL_DISPLAY_WA_14011503117,
+};
+
+bool __intel_display_wa(struct intel_display *display, enum intel_display_wa wa, const char *name);
+
+#define intel_display_wa(__display, __wa) \
+ __intel_display_wa((__display), INTEL_DISPLAY_WA_##__wa, __stringify(__wa))
+
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c
index 744f51c0eab8..77a0199f9ea5 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc.c
@@ -1603,9 +1603,7 @@ DEFINE_SHOW_ATTRIBUTE(intel_dmc_debugfs_status);
void intel_dmc_debugfs_register(struct intel_display *display)
{
- struct drm_minor *minor = display->drm->primary;
-
- debugfs_create_file("i915_dmc_info", 0444, minor->debugfs_root,
+ debugfs_create_file("i915_dmc_info", 0444, display->drm->debugfs_root,
display, &intel_dmc_debugfs_status_fops);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 7976fec88606..2eab591a8ef5 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -27,6 +27,7 @@
#include <linux/export.h>
#include <linux/i2c.h>
+#include <linux/iopoll.h>
#include <linux/log2.h>
#include <linux/math.h>
#include <linux/notifier.h>
@@ -174,7 +175,6 @@ int intel_dp_link_symbol_clock(int rate)
static int max_dprx_rate(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
int max_rate;
if (intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
@@ -183,16 +183,13 @@ static int max_dprx_rate(struct intel_dp *intel_dp)
max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
/*
- * Some broken eDP sinks illegally declare support for
- * HBR3 without TPS4, and are unable to produce a stable
- * output. Reject HBR3 when TPS4 is not available.
+ * Some platforms + eDP panels may not reliably support HBR3
+ * due to signal integrity limitations, despite advertising it.
+ * Cap the link rate to HBR2 to avoid unstable configurations for the
+ * known machines.
*/
- if (max_rate >= 810000 && !drm_dp_tps4_supported(intel_dp->dpcd)) {
- drm_dbg_kms(display->drm,
- "[ENCODER:%d:%s] Rejecting HBR3 due to missing TPS4 support\n",
- encoder->base.base.id, encoder->base.name);
- max_rate = 540000;
- }
+ if (intel_dp_is_edp(intel_dp) && intel_has_quirk(display, QUIRK_EDP_LIMIT_RATE_HBR2))
+ max_rate = min(max_rate, 540000);
return max_rate;
}
@@ -1418,6 +1415,7 @@ intel_dp_mode_valid(struct drm_connector *_connector,
struct intel_display *display = to_intel_display(_connector->dev);
struct intel_connector *connector = to_intel_connector(_connector);
struct intel_dp *intel_dp = intel_attached_dp(connector);
+ enum intel_output_format sink_format, output_format;
const struct drm_display_mode *fixed_mode;
int target_clock = mode->clock;
int max_rate, mode_rate, max_lanes, max_link_clock;
@@ -1451,6 +1449,13 @@ intel_dp_mode_valid(struct drm_connector *_connector,
mode->hdisplay, target_clock);
max_dotclk *= num_joined_pipes;
+ sink_format = intel_dp_sink_format(connector, mode);
+ output_format = intel_dp_output_format(connector, sink_format);
+
+ status = intel_pfit_mode_valid(display, mode, output_format, num_joined_pipes);
+ if (status != MODE_OK)
+ return status;
+
if (target_clock > max_dotclk)
return MODE_CLOCK_HIGH;
@@ -1466,11 +1471,8 @@ intel_dp_mode_valid(struct drm_connector *_connector,
intel_dp_mode_min_output_bpp(connector, mode));
if (intel_dp_has_dsc(connector)) {
- enum intel_output_format sink_format, output_format;
int pipe_bpp;
- sink_format = intel_dp_sink_format(connector, mode);
- output_format = intel_dp_output_format(connector, sink_format);
/*
* TBD pass the connector BPC,
* for now U8_MAX so that max BPC on that platform would be picked
@@ -2535,13 +2537,15 @@ intel_dp_dsc_compute_pipe_bpp_limits(struct intel_dp *intel_dp,
bool
intel_dp_compute_config_limits(struct intel_dp *intel_dp,
- struct intel_connector *connector,
+ struct drm_connector_state *conn_state,
struct intel_crtc_state *crtc_state,
bool respect_downstream_limits,
bool dsc,
struct link_config_limits *limits)
{
bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
+ struct intel_connector *connector =
+ to_intel_connector(conn_state->connector);
limits->min_rate = intel_dp_min_link_rate(intel_dp);
limits->max_rate = intel_dp_max_link_rate(intel_dp);
@@ -2551,7 +2555,8 @@ intel_dp_compute_config_limits(struct intel_dp *intel_dp,
limits->min_lane_count = intel_dp_min_lane_count(intel_dp);
limits->max_lane_count = intel_dp_max_lane_count(intel_dp);
- limits->pipe.min_bpp = intel_dp_min_bpp(crtc_state->output_format);
+ limits->pipe.min_bpp = intel_dp_in_hdr_mode(conn_state) ? 30 :
+ intel_dp_min_bpp(crtc_state->output_format);
if (is_mst) {
/*
* FIXME: If all the streams can't fit into the link with their
@@ -2650,7 +2655,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
joiner_needs_dsc = intel_dp_joiner_needs_dsc(display, num_joined_pipes);
dsc_needed = joiner_needs_dsc || intel_dp->force_dsc_en ||
- !intel_dp_compute_config_limits(intel_dp, connector, pipe_config,
+ !intel_dp_compute_config_limits(intel_dp, conn_state, pipe_config,
respect_downstream_limits,
false,
&limits);
@@ -2684,7 +2689,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
str_yes_no(ret), str_yes_no(joiner_needs_dsc),
str_yes_no(intel_dp->force_dsc_en));
- if (!intel_dp_compute_config_limits(intel_dp, connector, pipe_config,
+ if (!intel_dp_compute_config_limits(intel_dp, conn_state, pipe_config,
respect_downstream_limits,
true,
&limits))
@@ -2916,6 +2921,19 @@ static void intel_dp_compute_vsc_sdp(struct intel_dp *intel_dp,
}
}
+bool
+intel_dp_in_hdr_mode(const struct drm_connector_state *conn_state)
+{
+ struct hdr_output_metadata *hdr_metadata;
+
+ if (!conn_state->hdr_output_metadata)
+ return false;
+
+ hdr_metadata = conn_state->hdr_output_metadata->data;
+
+ return hdr_metadata->hdmi_metadata_type1.eotf == HDMI_EOTF_SMPTE_ST2084;
+}
+
static void
intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state,
@@ -3181,7 +3199,26 @@ int intel_dp_compute_min_hblank(struct intel_crtc_state *crtc_state,
*/
min_hblank = min_hblank - 2;
- min_hblank = min(10, min_hblank);
+ /*
+ * min_hblank formula is undergoing a change, to avoid underrun use the
+ * recomended value in spec to compare with the calculated one and use the
+ * minimum value
+ */
+ if (intel_dp_is_uhbr(crtc_state)) {
+ /*
+ * Note: Bspec requires a min_hblank of 2 for YCBCR420
+ * with compressed bpp 6, but the minimum compressed bpp
+ * supported by the driver is 8.
+ */
+ drm_WARN_ON(display->drm,
+ (crtc_state->dsc.compression_enable &&
+ crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 &&
+ crtc_state->dsc.compressed_bpp_x16 < fxp_q4_from_int(8)));
+ min_hblank = min(3, min_hblank);
+ } else {
+ min_hblank = min(10, min_hblank);
+ }
+
crtc_state->min_hblank = min_hblank;
return 0;
@@ -3842,10 +3879,11 @@ static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp)
if (ret < 0)
return ret;
/* Wait for PCON to be FRL Ready */
- wait_for(is_active = drm_dp_pcon_is_frl_ready(&intel_dp->aux) == true, TIMEOUT_FRL_READY_MS);
-
- if (!is_active)
- return -ETIMEDOUT;
+ ret = poll_timeout_us(is_active = drm_dp_pcon_is_frl_ready(&intel_dp->aux),
+ is_active,
+ 1000, TIMEOUT_FRL_READY_MS * 1000, false);
+ if (ret)
+ return ret;
ret = drm_dp_pcon_frl_configure_1(&intel_dp->aux, max_frl_bw,
DP_PCON_ENABLE_SEQUENTIAL_LINK);
@@ -3862,12 +3900,11 @@ static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp)
* Wait for FRL to be completed
* Check if the HDMI Link is up and active.
*/
- wait_for(is_active =
- intel_dp_pcon_is_frl_trained(intel_dp, max_frl_bw_mask, &frl_trained_mask),
- TIMEOUT_HDMI_LINK_ACTIVE_MS);
-
- if (!is_active)
- return -ETIMEDOUT;
+ ret = poll_timeout_us(is_active = intel_dp_pcon_is_frl_trained(intel_dp, max_frl_bw_mask, &frl_trained_mask),
+ is_active,
+ 1000, TIMEOUT_HDMI_LINK_ACTIVE_MS * 1000, false);
+ if (ret)
+ return ret;
frl_trained:
drm_dbg(display->drm, "FRL_TRAINED_MASK = %u\n", frl_trained_mask);
@@ -4277,10 +4314,26 @@ static void intel_edp_mso_init(struct intel_dp *intel_dp)
}
static void
+intel_edp_set_data_override_rates(struct intel_dp *intel_dp)
+{
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ int *sink_rates = intel_dp->sink_rates;
+ int i, count = 0;
+
+ for (i = 0; i < intel_dp->num_sink_rates; i++) {
+ if (intel_bios_encoder_reject_edp_rate(encoder->devdata,
+ intel_dp->sink_rates[i]))
+ continue;
+
+ sink_rates[count++] = intel_dp->sink_rates[i];
+ }
+ intel_dp->num_sink_rates = count;
+}
+
+static void
intel_edp_set_sink_rates(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
intel_dp->num_sink_rates = 0;
@@ -4306,16 +4359,13 @@ intel_edp_set_sink_rates(struct intel_dp *intel_dp)
break;
/*
- * Some broken eDP sinks illegally declare support for
- * HBR3 without TPS4, and are unable to produce a stable
- * output. Reject HBR3 when TPS4 is not available.
+ * Some platforms cannot reliably drive HBR3 rates due to PHY limitations,
+ * even if the sink advertises support. Reject any sink rates above HBR2 on
+ * the known machines for stable output.
*/
- if (rate >= 810000 && !drm_dp_tps4_supported(intel_dp->dpcd)) {
- drm_dbg_kms(display->drm,
- "[ENCODER:%d:%s] Rejecting HBR3 due to missing TPS4 support\n",
- encoder->base.base.id, encoder->base.name);
+ if (rate > 540000 &&
+ intel_has_quirk(display, QUIRK_EDP_LIMIT_RATE_HBR2))
break;
- }
intel_dp->sink_rates[i] = rate;
}
@@ -4330,6 +4380,8 @@ intel_edp_set_sink_rates(struct intel_dp *intel_dp)
intel_dp->use_rate_select = true;
else
intel_dp_set_sink_rates(intel_dp);
+
+ intel_edp_set_data_override_rates(intel_dp);
}
static bool
@@ -5611,14 +5663,9 @@ bool intel_digital_port_connected_locked(struct intel_encoder *encoder)
intel_wakeref_t wakeref;
with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE, wakeref) {
- unsigned long wait_expires = jiffies + msecs_to_jiffies_timeout(4);
-
- do {
- is_connected = dig_port->connected(encoder);
- if (is_connected || is_glitch_free)
- break;
- usleep_range(10, 30);
- } while (time_before(jiffies, wait_expires));
+ poll_timeout_us(is_connected = dig_port->connected(encoder),
+ is_connected || is_glitch_free,
+ 30, 4000, false);
}
return is_connected;
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index 0657f5681196..f90cfd1dbbd0 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -193,7 +193,7 @@ void intel_dp_wait_source_oui(struct intel_dp *intel_dp);
int intel_dp_output_bpp(enum intel_output_format output_format, int bpp);
bool intel_dp_compute_config_limits(struct intel_dp *intel_dp,
- struct intel_connector *connector,
+ struct drm_connector_state *conn_state,
struct intel_crtc_state *crtc_state,
bool respect_downstream_limits,
bool dsc,
@@ -214,5 +214,6 @@ int intel_dp_compute_min_hblank(struct intel_crtc_state *crtc_state,
int intel_dp_dsc_bpp_step_x16(const struct intel_connector *connector);
void intel_dp_dpcd_set_probe(struct intel_dp *intel_dp, bool force_on_external);
+bool intel_dp_in_hdr_mode(const struct drm_connector_state *conn_state);
#endif /* __INTEL_DP_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index 41228478b21c..eb05ef4bd9f6 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -225,19 +225,6 @@ intel_dp_aux_hdr_set_aux_backlight(const struct drm_connector_state *conn_state,
connector->base.base.id, connector->base.name);
}
-static bool
-intel_dp_in_hdr_mode(const struct drm_connector_state *conn_state)
-{
- struct hdr_output_metadata *hdr_metadata;
-
- if (!conn_state->hdr_output_metadata)
- return false;
-
- hdr_metadata = conn_state->hdr_output_metadata->data;
-
- return hdr_metadata->hdmi_metadata_type1.eotf == HDMI_EOTF_SMPTE_ST2084;
-}
-
static void
intel_dp_aux_hdr_set_backlight(const struct drm_connector_state *conn_state, u32 level)
{
@@ -521,9 +508,6 @@ static void intel_dp_aux_vesa_disable_backlight(const struct drm_connector_state
struct intel_panel *panel = &connector->panel;
struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
- if (panel->backlight.edp.vesa.luminance_control_support)
- return;
-
drm_edp_backlight_disable(&intel_dp->aux, &panel->backlight.edp.vesa.info);
if (!panel->backlight.edp.vesa.info.aux_enable)
@@ -546,7 +530,7 @@ static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector,
luminance_range->max_luminance,
panel->vbt.backlight.pwm_freq_hz,
intel_dp->edp_dpcd, &current_level, &current_mode,
- false);
+ panel->backlight.edp.vesa.luminance_control_support);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index a479b63112ea..27f3716bdc1f 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -22,6 +22,7 @@
*/
#include <linux/debugfs.h>
+#include <linux/iopoll.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_print.h>
@@ -478,12 +479,13 @@ static u8 intel_dp_get_lane_adjust_train(struct intel_dp *intel_dp,
_TRAIN_REQ_TX_FFE_ARGS(link_status, 2), \
_TRAIN_REQ_TX_FFE_ARGS(link_status, 3)
-void
+bool
intel_dp_get_adjust_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
enum drm_dp_phy dp_phy,
const u8 link_status[DP_LINK_STATUS_SIZE])
{
+ bool changed = false;
int lane;
if (intel_dp_is_uhbr(crtc_state)) {
@@ -502,10 +504,17 @@ intel_dp_get_adjust_train(struct intel_dp *intel_dp,
TRAIN_REQ_PREEMPH_ARGS(link_status));
}
- for (lane = 0; lane < 4; lane++)
- intel_dp->train_set[lane] =
- intel_dp_get_lane_adjust_train(intel_dp, crtc_state,
- dp_phy, link_status, lane);
+ for (lane = 0; lane < 4; lane++) {
+ u8 new = intel_dp_get_lane_adjust_train(intel_dp, crtc_state,
+ dp_phy, link_status, lane);
+ if (intel_dp->train_set[lane] == new)
+ continue;
+
+ intel_dp->train_set[lane] = new;
+ changed = true;
+ }
+
+ return changed;
}
static int intel_dp_training_pattern_set_reg(struct intel_dp *intel_dp,
@@ -758,6 +767,63 @@ void intel_dp_link_training_set_bw(struct intel_dp *intel_dp,
}
}
+/*
+ * Pick Training Pattern Sequence (TPS) for channel equalization. 128b/132b TPS2
+ * for UHBR+, TPS4 for HBR3 or for 1.4 devices that support it, TPS3 for HBR2 or
+ * 1.2 devices that support it, TPS2 otherwise.
+ */
+static u32 intel_dp_training_pattern(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ enum drm_dp_phy dp_phy)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+ bool source_tps3, sink_tps3, source_tps4, sink_tps4;
+
+ /* UHBR+ use separate 128b/132b TPS2 */
+ if (intel_dp_is_uhbr(crtc_state))
+ return DP_TRAINING_PATTERN_2;
+
+ /*
+ * TPS4 support is mandatory for all downstream devices that
+ * support HBR3. There are no known eDP panels that support
+ * TPS4 as of Feb 2018 as per VESA eDP_v1.4b_E1 specification.
+ * LTTPRs must support TPS4.
+ */
+ source_tps4 = intel_dp_source_supports_tps4(display);
+ sink_tps4 = dp_phy != DP_PHY_DPRX ||
+ drm_dp_tps4_supported(intel_dp->dpcd);
+ if (source_tps4 && sink_tps4) {
+ return DP_TRAINING_PATTERN_4;
+ } else if (crtc_state->port_clock == 810000) {
+ if (!source_tps4)
+ lt_dbg(intel_dp, dp_phy,
+ "8.1 Gbps link rate without source TPS4 support\n");
+ if (!sink_tps4)
+ lt_dbg(intel_dp, dp_phy,
+ "8.1 Gbps link rate without sink TPS4 support\n");
+ }
+
+ /*
+ * TPS3 support is mandatory for downstream devices that
+ * support HBR2. However, not all sinks follow the spec.
+ */
+ source_tps3 = intel_dp_source_supports_tps3(display);
+ sink_tps3 = dp_phy != DP_PHY_DPRX ||
+ drm_dp_tps3_supported(intel_dp->dpcd);
+ if (source_tps3 && sink_tps3) {
+ return DP_TRAINING_PATTERN_3;
+ } else if (crtc_state->port_clock >= 540000) {
+ if (!source_tps3)
+ lt_dbg(intel_dp, dp_phy,
+ ">=5.4/6.48 Gbps link rate without source TPS3 support\n");
+ if (!sink_tps3)
+ lt_dbg(intel_dp, dp_phy,
+ ">=5.4/6.48 Gbps link rate without sink TPS3 support\n");
+ }
+
+ return DP_TRAINING_PATTERN_2;
+}
+
static void intel_dp_update_link_bw_set(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
u8 link_bw, u8 rate_select)
@@ -950,63 +1016,6 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp,
}
/*
- * Pick Training Pattern Sequence (TPS) for channel equalization. 128b/132b TPS2
- * for UHBR+, TPS4 for HBR3 or for 1.4 devices that support it, TPS3 for HBR2 or
- * 1.2 devices that support it, TPS2 otherwise.
- */
-static u32 intel_dp_training_pattern(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state,
- enum drm_dp_phy dp_phy)
-{
- struct intel_display *display = to_intel_display(intel_dp);
- bool source_tps3, sink_tps3, source_tps4, sink_tps4;
-
- /* UHBR+ use separate 128b/132b TPS2 */
- if (intel_dp_is_uhbr(crtc_state))
- return DP_TRAINING_PATTERN_2;
-
- /*
- * TPS4 support is mandatory for all downstream devices that
- * support HBR3. There are no known eDP panels that support
- * TPS4 as of Feb 2018 as per VESA eDP_v1.4b_E1 specification.
- * LTTPRs must support TPS4.
- */
- source_tps4 = intel_dp_source_supports_tps4(display);
- sink_tps4 = dp_phy != DP_PHY_DPRX ||
- drm_dp_tps4_supported(intel_dp->dpcd);
- if (source_tps4 && sink_tps4) {
- return DP_TRAINING_PATTERN_4;
- } else if (crtc_state->port_clock == 810000) {
- if (!source_tps4)
- lt_dbg(intel_dp, dp_phy,
- "8.1 Gbps link rate without source TPS4 support\n");
- if (!sink_tps4)
- lt_dbg(intel_dp, dp_phy,
- "8.1 Gbps link rate without sink TPS4 support\n");
- }
-
- /*
- * TPS3 support is mandatory for downstream devices that
- * support HBR2. However, not all sinks follow the spec.
- */
- source_tps3 = intel_dp_source_supports_tps3(display);
- sink_tps3 = dp_phy != DP_PHY_DPRX ||
- drm_dp_tps3_supported(intel_dp->dpcd);
- if (source_tps3 && sink_tps3) {
- return DP_TRAINING_PATTERN_3;
- } else if (crtc_state->port_clock >= 540000) {
- if (!source_tps3)
- lt_dbg(intel_dp, dp_phy,
- ">=5.4/6.48 Gbps link rate without source TPS3 support\n");
- if (!sink_tps3)
- lt_dbg(intel_dp, dp_phy,
- ">=5.4/6.48 Gbps link rate without sink TPS3 support\n");
- }
-
- return DP_TRAINING_PATTERN_2;
-}
-
-/*
* Perform the link training channel equalization phase on the given DP PHY
* using one of training pattern 2, 3 or 4 depending on the source and
* sink capabilities.
@@ -1127,16 +1136,19 @@ void intel_dp_stop_link_train(struct intel_dp *intel_dp,
{
struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ int ret;
intel_dp->link.active = true;
- intel_dp_disable_dpcd_training_pattern(intel_dp, DP_PHY_DPRX);
intel_dp_program_link_training_pattern(intel_dp, crtc_state, DP_PHY_DPRX,
DP_TRAINING_PATTERN_DISABLE);
- if (intel_dp_is_uhbr(crtc_state) &&
- wait_for(intel_dp_128b132b_intra_hop(intel_dp, crtc_state) == 0, 500)) {
- lt_dbg(intel_dp, DP_PHY_DPRX, "128b/132b intra-hop not clearing\n");
+ if (intel_dp_is_uhbr(crtc_state)) {
+ ret = poll_timeout_us(ret = intel_dp_128b132b_intra_hop(intel_dp, crtc_state),
+ ret == 0,
+ 500, 500 * 1000, false);
+ if (ret)
+ lt_dbg(intel_dp, DP_PHY_DPRX, "128b/132b intra-hop not clearing\n");
}
intel_hpd_unblock(encoder);
@@ -1371,8 +1383,8 @@ intel_dp_link_train_all_phys(struct intel_dp *intel_dp,
if (ret)
ret = intel_dp_link_train_phy(intel_dp, crtc_state, DP_PHY_DPRX);
- if (intel_dp->set_idle_link_train)
- intel_dp->set_idle_link_train(intel_dp, crtc_state);
+ intel_dp_disable_dpcd_training_pattern(intel_dp, DP_PHY_DPRX);
+ intel_dp->set_idle_link_train(intel_dp, crtc_state);
return ret;
}
@@ -1574,8 +1586,12 @@ intel_dp_128b132b_link_train(struct intel_dp *intel_dp,
int lttpr_count)
{
bool passed = false;
+ int ret;
- if (wait_for(intel_dp_128b132b_intra_hop(intel_dp, crtc_state) == 0, 500)) {
+ ret = poll_timeout_us(ret = intel_dp_128b132b_intra_hop(intel_dp, crtc_state),
+ ret == 0,
+ 500, 500 * 1000, false);
+ if (ret) {
lt_err(intel_dp, DP_PHY_DPRX, "128b/132b intra-hop not clear\n");
goto out;
}
@@ -1602,6 +1618,8 @@ out:
intel_dp_program_link_training_pattern(intel_dp, crtc_state,
DP_PHY_DPRX, DP_TRAINING_PATTERN_2);
+ intel_dp_disable_dpcd_training_pattern(intel_dp, DP_PHY_DPRX);
+
return passed;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.h b/drivers/gpu/drm/i915/display/intel_dp_link_training.h
index 46614124569f..1ba22ed6db08 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.h
@@ -23,7 +23,7 @@ void intel_dp_link_training_set_bw(struct intel_dp *intel_dp,
int link_bw, int rate_select, int lane_count,
bool enhanced_framing);
-void intel_dp_get_adjust_train(struct intel_dp *intel_dp,
+bool intel_dp_get_adjust_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
enum drm_dp_phy dp_phy,
const u8 link_status[DP_LINK_STATUS_SIZE]);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 74497c9a0554..352f7ef29c28 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -611,12 +611,15 @@ adjust_limits_for_dsc_hblank_expansion_quirk(struct intel_dp *intel_dp,
static bool
mst_stream_compute_config_limits(struct intel_dp *intel_dp,
- struct intel_connector *connector,
+ struct drm_connector_state *conn_state,
struct intel_crtc_state *crtc_state,
bool dsc,
struct link_config_limits *limits)
{
- if (!intel_dp_compute_config_limits(intel_dp, connector,
+ struct intel_connector *connector =
+ to_intel_connector(conn_state->connector);
+
+ if (!intel_dp_compute_config_limits(intel_dp, conn_state,
crtc_state, false, dsc,
limits))
return false;
@@ -665,7 +668,7 @@ static int mst_stream_compute_config(struct intel_encoder *encoder,
joiner_needs_dsc = intel_dp_joiner_needs_dsc(display, num_joined_pipes);
dsc_needed = joiner_needs_dsc || intel_dp->force_dsc_en ||
- !mst_stream_compute_config_limits(intel_dp, connector,
+ !mst_stream_compute_config_limits(intel_dp, conn_state,
pipe_config, false, &limits);
if (!dsc_needed) {
@@ -691,7 +694,7 @@ static int mst_stream_compute_config(struct intel_encoder *encoder,
str_yes_no(intel_dp->force_dsc_en));
- if (!mst_stream_compute_config_limits(intel_dp, connector,
+ if (!mst_stream_compute_config_limits(intel_dp, conn_state,
pipe_config, true,
&limits))
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_test.c b/drivers/gpu/drm/i915/display/intel_dp_test.c
index 6ed5012c5fac..5cfa1dd411da 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_test.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_test.c
@@ -6,7 +6,6 @@
#include <drm/display/drm_dp.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_edid.h>
-#include <drm/drm_file.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
@@ -753,13 +752,12 @@ static const struct {
void intel_dp_test_debugfs_register(struct intel_display *display)
{
- struct drm_minor *minor = display->drm->primary;
int i;
for (i = 0; i < ARRAY_SIZE(intel_display_debugfs_files); i++) {
debugfs_create_file(intel_display_debugfs_files[i].name,
0644,
- minor->debugfs_root,
+ display->drm->debugfs_root,
display,
intel_display_debugfs_files[i].fops);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index 33e0398120c8..8ea96cc524a1 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -2046,6 +2046,7 @@ static void bxt_ddi_pll_enable(struct intel_display *display,
enum dpio_phy phy = DPIO_PHY0;
enum dpio_channel ch = DPIO_CH0;
u32 temp;
+ int ret;
bxt_port_to_phy_channel(display, port, &phy, &ch);
@@ -2056,8 +2057,10 @@ static void bxt_ddi_pll_enable(struct intel_display *display,
intel_de_rmw(display, BXT_PORT_PLL_ENABLE(port),
0, PORT_PLL_POWER_ENABLE);
- if (wait_for_us((intel_de_read(display, BXT_PORT_PLL_ENABLE(port)) &
- PORT_PLL_POWER_STATE), 200))
+ ret = intel_de_wait_custom(display, BXT_PORT_PLL_ENABLE(port),
+ PORT_PLL_POWER_STATE, PORT_PLL_POWER_STATE,
+ 200, 0, NULL);
+ if (ret)
drm_err(display->drm,
"Power state not set for PLL:%d\n", port);
}
@@ -2119,8 +2122,10 @@ static void bxt_ddi_pll_enable(struct intel_display *display,
intel_de_rmw(display, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_ENABLE);
intel_de_posting_read(display, BXT_PORT_PLL_ENABLE(port));
- if (wait_for_us((intel_de_read(display, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
- 200))
+ ret = intel_de_wait_custom(display, BXT_PORT_PLL_ENABLE(port),
+ PORT_PLL_LOCK, PORT_PLL_LOCK,
+ 200, 0, NULL);
+ if (ret)
drm_err(display->drm, "PLL %d not locked\n", port);
if (display->platform.geminilake) {
@@ -2144,6 +2149,7 @@ static void bxt_ddi_pll_disable(struct intel_display *display,
struct intel_dpll *pll)
{
enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
+ int ret;
intel_de_rmw(display, BXT_PORT_PLL_ENABLE(port), PORT_PLL_ENABLE, 0);
intel_de_posting_read(display, BXT_PORT_PLL_ENABLE(port));
@@ -2152,8 +2158,10 @@ static void bxt_ddi_pll_disable(struct intel_display *display,
intel_de_rmw(display, BXT_PORT_PLL_ENABLE(port),
PORT_PLL_POWER_ENABLE, 0);
- if (wait_for_us(!(intel_de_read(display, BXT_PORT_PLL_ENABLE(port)) &
- PORT_PLL_POWER_STATE), 200))
+ ret = intel_de_wait_custom(display, BXT_PORT_PLL_ENABLE(port),
+ PORT_PLL_POWER_STATE, 0,
+ 200, 0, NULL);
+ if (ret)
drm_err(display->drm,
"Power state not reset for PLL:%d\n", port);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpt.c b/drivers/gpu/drm/i915/display/intel_dpt.c
index aea249e2699f..c0a817018d08 100644
--- a/drivers/gpu/drm/i915/display/intel_dpt.c
+++ b/drivers/gpu/drm/i915/display/intel_dpt.c
@@ -33,8 +33,6 @@ i915_vm_to_dpt(struct i915_address_space *vm)
return container_of(vm, struct i915_dpt, vm);
}
-#define dpt_total_entries(dpt) ((dpt)->vm.total >> PAGE_SHIFT)
-
static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
{
writeq(pte, addr);
@@ -322,5 +320,5 @@ void intel_dpt_destroy(struct i915_address_space *vm)
u64 intel_dpt_offset(struct i915_vma *dpt_vma)
{
- return dpt_vma->node.start;
+ return i915_vma_offset(dpt_vma);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
index 53d8ae3a70e9..dee44d45b668 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.c
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -4,10 +4,11 @@
*
*/
+#include <linux/iopoll.h>
+
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
-#include "i915_utils.h"
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_regs.h"
@@ -871,8 +872,13 @@ void intel_dsb_wait(struct intel_dsb *dsb)
struct intel_crtc *crtc = dsb->crtc;
struct intel_display *display = to_intel_display(crtc->base.dev);
enum pipe pipe = crtc->pipe;
+ bool is_busy;
+ int ret;
- if (wait_for(!is_dsb_busy(display, pipe, dsb->id), 1)) {
+ ret = poll_timeout_us(is_busy = is_dsb_busy(display, pipe, dsb->id),
+ !is_busy,
+ 100, 1000, false);
+ if (ret) {
u32 offset = intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf);
intel_de_write_fw(display, DSB_CTRL(pipe, dsb->id),
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
index e6a851d276f8..23402408e172 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
@@ -777,7 +777,7 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
intel_dsi->init_count = mipi_config->master_init_timer;
intel_dsi->bw_timer = mipi_config->dbi_bw_timer;
intel_dsi->video_frmt_cfg_bits =
- mipi_config->bta_enabled ? DISABLE_VIDEO_BTA : 0;
+ mipi_config->bta_disable ? DISABLE_VIDEO_BTA : 0;
intel_dsi->bgr_enabled = mipi_config->rgb_flip;
/* Starting point, adjusted depending on dual link and burst mode */
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_dsi_vbt_defs.h
new file mode 100644
index 000000000000..edc7331dcca2
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt_defs.h
@@ -0,0 +1,197 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation */
+
+#ifndef __INTEL_DSI_VBT_DEFS_H__
+#define __INTEL_DSI_VBT_DEFS_H__
+
+#include <linux/types.h>
+
+/*
+ * MIPI Sequence Block definitions
+ *
+ * Note the VBT spec has AssertReset / DeassertReset swapped from their
+ * usual naming, we use the proper names here to avoid confusion when
+ * reading the code.
+ */
+enum mipi_seq {
+ MIPI_SEQ_END = 0,
+ MIPI_SEQ_DEASSERT_RESET, /* Spec says MipiAssertResetPin */
+ MIPI_SEQ_INIT_OTP,
+ MIPI_SEQ_DISPLAY_ON,
+ MIPI_SEQ_DISPLAY_OFF,
+ MIPI_SEQ_ASSERT_RESET, /* Spec says MipiDeassertResetPin */
+ MIPI_SEQ_BACKLIGHT_ON, /* sequence block v2+ */
+ MIPI_SEQ_BACKLIGHT_OFF, /* sequence block v2+ */
+ MIPI_SEQ_TEAR_ON, /* sequence block v2+ */
+ MIPI_SEQ_TEAR_OFF, /* sequence block v3+ */
+ MIPI_SEQ_POWER_ON, /* sequence block v3+ */
+ MIPI_SEQ_POWER_OFF, /* sequence block v3+ */
+ MIPI_SEQ_MAX
+};
+
+enum mipi_seq_element {
+ MIPI_SEQ_ELEM_END = 0,
+ MIPI_SEQ_ELEM_SEND_PKT,
+ MIPI_SEQ_ELEM_DELAY,
+ MIPI_SEQ_ELEM_GPIO,
+ MIPI_SEQ_ELEM_I2C, /* sequence block v2+ */
+ MIPI_SEQ_ELEM_SPI, /* sequence block v3+ */
+ MIPI_SEQ_ELEM_PMIC, /* sequence block v3+ */
+ MIPI_SEQ_ELEM_MAX
+};
+
+#define MIPI_DSI_UNDEFINED_PANEL_ID 0
+#define MIPI_DSI_GENERIC_PANEL_ID 1
+
+struct mipi_config {
+ u16 panel_id;
+
+ /* General Params */
+ struct {
+ u32 enable_dithering:1;
+ u32 rsvd1:1;
+ u32 is_bridge:1;
+
+ u32 panel_arch_type:2;
+ u32 is_cmd_mode:1;
+
+#define NON_BURST_SYNC_PULSE 0x1
+#define NON_BURST_SYNC_EVENTS 0x2
+#define BURST_MODE 0x3
+ u32 video_transfer_mode:2;
+
+ u32 cabc_supported:1;
+#define PPS_BLC_PMIC 0
+#define PPS_BLC_SOC 1
+ u32 pwm_blc:1;
+
+#define PIXEL_FORMAT_RGB565 0x1
+#define PIXEL_FORMAT_RGB666 0x2
+#define PIXEL_FORMAT_RGB666_LOOSELY_PACKED 0x3
+#define PIXEL_FORMAT_RGB888 0x4
+ u32 videomode_color_format:4;
+
+#define ENABLE_ROTATION_0 0x0
+#define ENABLE_ROTATION_90 0x1
+#define ENABLE_ROTATION_180 0x2
+#define ENABLE_ROTATION_270 0x3
+ u32 rotation:2;
+ u32 bta_disable:1;
+ u32 rsvd2:15;
+ } __packed;
+
+ /* Port Desc */
+ struct {
+#define DUAL_LINK_NOT_SUPPORTED 0
+#define DUAL_LINK_FRONT_BACK 1
+#define DUAL_LINK_PIXEL_ALT 2
+ u16 dual_link:2;
+ u16 lane_cnt:2;
+ u16 pixel_overlap:3;
+ u16 rgb_flip:1;
+#define DL_DCS_PORT_A 0x00
+#define DL_DCS_PORT_C 0x01
+#define DL_DCS_PORT_A_AND_C 0x02
+ u16 dl_dcs_cabc_ports:2;
+ u16 dl_dcs_backlight_ports:2;
+ u16 port_sync:1; /* 219-230 */
+ u16 rsvd3:3;
+ } __packed;
+
+ /* DSI Controller Parameters */
+ struct {
+ u16 dsi_usage:1;
+ u16 rsvd4:15;
+ } __packed;
+
+ u8 rsvd5;
+ u32 target_burst_mode_freq;
+ u32 dsi_ddr_clk;
+ u32 bridge_ref_clk;
+
+ /* LP Byte Clock */
+ struct {
+#define BYTE_CLK_SEL_20MHZ 0
+#define BYTE_CLK_SEL_10MHZ 1
+#define BYTE_CLK_SEL_5MHZ 2
+ u8 byte_clk_sel:2;
+ u8 rsvd6:6;
+ } __packed;
+
+ /* DPhy Flags */
+ struct {
+ u16 dphy_param_valid:1;
+ u16 eot_pkt_disabled:1;
+ u16 enable_clk_stop:1;
+ u16 blanking_packets_during_bllp:1; /* 219+ */
+ u16 lp_clock_during_lpm:1; /* 219+ */
+ u16 rsvd7:11;
+ } __packed;
+
+ u32 hs_tx_timeout;
+ u32 lp_rx_timeout;
+ u32 turn_around_timeout;
+ u32 device_reset_timer;
+ u32 master_init_timer;
+ u32 dbi_bw_timer;
+ u32 lp_byte_clk_val;
+
+ /* DPhy Params */
+ struct {
+ u32 prepare_cnt:6;
+ u32 rsvd8:2;
+ u32 clk_zero_cnt:8;
+ u32 trail_cnt:5;
+ u32 rsvd9:3;
+ u32 exit_zero_cnt:6;
+ u32 rsvd10:2;
+ } __packed;
+
+ u32 clk_lane_switch_cnt;
+ u32 hl_switch_cnt;
+
+ u32 rsvd11[6];
+
+ /* timings based on dphy spec */
+ u8 tclk_miss;
+ u8 tclk_post;
+ u8 rsvd12;
+ u8 tclk_pre;
+ u8 tclk_prepare;
+ u8 tclk_settle;
+ u8 tclk_term_enable;
+ u8 tclk_trail;
+ u16 tclk_prepare_clkzero;
+ u8 rsvd13;
+ u8 td_term_enable;
+ u8 teot;
+ u8 ths_exit;
+ u8 ths_prepare;
+ u16 ths_prepare_hszero;
+ u8 rsvd14;
+ u8 ths_settle;
+ u8 ths_skip;
+ u8 ths_trail;
+ u8 tinit;
+ u8 tlpx;
+ u8 rsvd15[3];
+
+ /* GPIOs */
+ u8 panel_enable;
+ u8 bl_enable;
+ u8 pwm_enable;
+ u8 reset_r_n;
+ u8 pwr_down_r;
+ u8 stdby_r_n;
+} __packed;
+
+/* all delays have a unit of 100us */
+struct mipi_pps_data {
+ u16 panel_on_delay;
+ u16 bl_enable_delay;
+ u16 bl_disable_delay;
+ u16 panel_off_delay;
+ u16 panel_power_cycle_delay;
+} __packed;
+
+#endif /* __INTEL_DSI_VBT_DEFS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_encoder.c b/drivers/gpu/drm/i915/display/intel_encoder.c
index 0b7bd26f4339..2ffe1f251ef8 100644
--- a/drivers/gpu/drm/i915/display/intel_encoder.c
+++ b/drivers/gpu/drm/i915/display/intel_encoder.c
@@ -8,6 +8,7 @@
#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_encoder.h"
+#include "intel_hotplug.h"
static void intel_encoder_link_check_work_fn(struct work_struct *work)
{
@@ -37,6 +38,28 @@ void intel_encoder_link_check_queue_work(struct intel_encoder *encoder, int dela
&encoder->link_check_work, msecs_to_jiffies(delay_ms));
}
+void intel_encoder_unblock_all_hpds(struct intel_display *display)
+{
+ struct intel_encoder *encoder;
+
+ if (!HAS_DISPLAY(display))
+ return;
+
+ for_each_intel_encoder(display->drm, encoder)
+ intel_hpd_unblock(encoder);
+}
+
+void intel_encoder_block_all_hpds(struct intel_display *display)
+{
+ struct intel_encoder *encoder;
+
+ if (!HAS_DISPLAY(display))
+ return;
+
+ for_each_intel_encoder(display->drm, encoder)
+ intel_hpd_block(encoder);
+}
+
void intel_encoder_suspend_all(struct intel_display *display)
{
struct intel_encoder *encoder;
@@ -80,3 +103,21 @@ void intel_encoder_shutdown_all(struct intel_display *display)
if (encoder->shutdown_complete)
encoder->shutdown_complete(encoder);
}
+
+struct intel_digital_port *intel_dig_port_alloc(void)
+{
+ struct intel_digital_port *dig_port;
+
+ dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL);
+ if (!dig_port)
+ return NULL;
+
+ dig_port->hdmi.hdmi_reg = INVALID_MMIO_REG;
+ dig_port->dp.output_reg = INVALID_MMIO_REG;
+ dig_port->aux_ch = AUX_CH_NONE;
+ dig_port->max_lanes = 4;
+
+ mutex_init(&dig_port->hdcp.mutex);
+
+ return dig_port;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_encoder.h b/drivers/gpu/drm/i915/display/intel_encoder.h
index 3fa5589f0b1c..ace0fe1a8f27 100644
--- a/drivers/gpu/drm/i915/display/intel_encoder.h
+++ b/drivers/gpu/drm/i915/display/intel_encoder.h
@@ -6,6 +6,7 @@
#ifndef __INTEL_ENCODER_H__
#define __INTEL_ENCODER_H__
+struct intel_digital_port;
struct intel_display;
struct intel_encoder;
@@ -17,4 +18,9 @@ void intel_encoder_link_check_flush_work(struct intel_encoder *encoder);
void intel_encoder_suspend_all(struct intel_display *display);
void intel_encoder_shutdown_all(struct intel_display *display);
+void intel_encoder_block_all_hpds(struct intel_display *display);
+void intel_encoder_unblock_all_hpds(struct intel_display *display);
+
+struct intel_digital_port *intel_dig_port_alloc(void);
+
#endif /* __INTEL_ENCODER_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
index 0da842bd2f2f..22a4a1575d22 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -11,6 +11,7 @@
#include <drm/drm_modeset_helper.h>
#include "i915_drv.h"
+#include "i915_utils.h"
#include "intel_bo.h"
#include "intel_display.h"
#include "intel_display_core.h"
@@ -19,6 +20,7 @@
#include "intel_fb.h"
#include "intel_fb_bo.h"
#include "intel_frontbuffer.h"
+#include "intel_panic.h"
#include "intel_plane.h"
#define check_array_bounds(display, a, i) drm_WARN_ON((display)->drm, (i) >= ARRAY_SIZE(a))
@@ -2342,6 +2344,26 @@ intel_user_framebuffer_create(struct drm_device *dev,
return fb;
}
+struct intel_framebuffer *intel_framebuffer_alloc(void)
+{
+ struct intel_framebuffer *intel_fb;
+ struct intel_panic *panic;
+
+ intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
+ if (!intel_fb)
+ return NULL;
+
+ panic = intel_panic_alloc();
+ if (!panic) {
+ kfree(intel_fb);
+ return NULL;
+ }
+
+ intel_fb->panic = panic;
+
+ return intel_fb;
+}
+
struct drm_framebuffer *
intel_framebuffer_create(struct drm_gem_object *obj,
const struct drm_format_info *info,
@@ -2350,7 +2372,7 @@ intel_framebuffer_create(struct drm_gem_object *obj,
struct intel_framebuffer *intel_fb;
int ret;
- intel_fb = intel_bo_alloc_framebuffer();
+ intel_fb = intel_framebuffer_alloc();
if (!intel_fb)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/i915/display/intel_fb.h b/drivers/gpu/drm/i915/display/intel_fb.h
index 403b8b63721a..22514d5f2bb6 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.h
+++ b/drivers/gpu/drm/i915/display/intel_fb.h
@@ -104,6 +104,9 @@ int intel_framebuffer_init(struct intel_framebuffer *ifb,
struct drm_gem_object *obj,
const struct drm_format_info *info,
struct drm_mode_fb_cmd2 *mode_cmd);
+
+struct intel_framebuffer *intel_framebuffer_alloc(void);
+
struct drm_framebuffer *
intel_framebuffer_create(struct drm_gem_object *obj,
const struct drm_format_info *info,
diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.c b/drivers/gpu/drm/i915/display/intel_fb_pin.c
index 5a0151775a3a..45af04cb0fb2 100644
--- a/drivers/gpu/drm/i915/display/intel_fb_pin.c
+++ b/drivers/gpu/drm/i915/display/intel_fb_pin.c
@@ -11,6 +11,7 @@
#include "gem/i915_gem_object.h"
#include "i915_drv.h"
+#include "i915_vma.h"
#include "intel_display_core.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
@@ -151,7 +152,7 @@ intel_fb_pin_to_ggtt(const struct drm_framebuffer *fb,
* happy to scanout from anywhere within its global aperture.
*/
pinctl = 0;
- if (HAS_GMCH(dev_priv))
+ if (HAS_GMCH(display))
pinctl |= PIN_MAPPABLE;
i915_gem_ww_ctx_init(&ww, true);
@@ -192,7 +193,7 @@ retry:
* mode that matches the user configuration.
*/
ret = i915_vma_pin_fence(vma);
- if (ret != 0 && DISPLAY_VER(dev_priv) < 4) {
+ if (ret != 0 && DISPLAY_VER(display) < 4) {
i915_vma_unpin(vma);
goto err_unpin;
}
@@ -260,6 +261,7 @@ intel_plane_fb_vtd_guard(const struct intel_plane_state *plane_state)
int intel_plane_pin_fb(struct intel_plane_state *plane_state,
const struct intel_plane_state *old_plane_state)
{
+ struct intel_display *display = to_intel_display(plane_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
const struct intel_framebuffer *fb =
to_intel_framebuffer(plane_state->hw.fb);
@@ -277,17 +279,6 @@ int intel_plane_pin_fb(struct intel_plane_state *plane_state,
plane_state->ggtt_vma = vma;
- /*
- * Pre-populate the dma address before we enter the vblank
- * evade critical section as i915_gem_object_get_dma_address()
- * will trigger might_sleep() even if it won't actually sleep,
- * which is the case when the fb has already been pinned.
- */
- if (intel_plane_needs_physical(plane)) {
- struct drm_i915_gem_object *obj = to_intel_bo(intel_fb_bo(&fb->base));
-
- plane_state->phys_dma_addr = i915_gem_object_get_dma_address(obj, 0);
- }
} else {
unsigned int alignment = intel_plane_fb_min_alignment(plane_state);
@@ -309,6 +300,28 @@ int intel_plane_pin_fb(struct intel_plane_state *plane_state,
plane_state->dpt_vma = vma;
WARN_ON(plane_state->ggtt_vma == plane_state->dpt_vma);
+
+ /*
+ * The DPT object contains only one vma, and there is no VT-d
+ * guard, so the VMA's offset within the DPT is always 0.
+ */
+ drm_WARN_ON(display->drm, intel_dpt_offset(plane_state->dpt_vma));
+ }
+
+ /*
+ * Pre-populate the dma address before we enter the vblank
+ * evade critical section as i915_gem_object_get_dma_address()
+ * will trigger might_sleep() even if it won't actually sleep,
+ * which is the case when the fb has already been pinned.
+ */
+ if (intel_plane_needs_physical(plane)) {
+ struct drm_i915_gem_object *obj = to_intel_bo(intel_fb_bo(&fb->base));
+
+ plane_state->surf = i915_gem_object_get_dma_address(obj, 0) +
+ plane->surf_offset(plane_state);
+ } else {
+ plane_state->surf = i915_ggtt_offset(plane_state->ggtt_vma) +
+ plane->surf_offset(plane_state);
}
return 0;
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index 6e26cb4c5724..0d380c825791 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -98,11 +98,7 @@ struct intel_fbc {
struct intel_display *display;
const struct intel_fbc_funcs *funcs;
- /*
- * This is always the inner lock when overlapping with
- * struct_mutex and it's the outer lock when overlapping
- * with stolen_lock.
- */
+ /* This is always the outer lock when overlapping with stolen_lock */
struct mutex lock;
unsigned int busy_bits;
@@ -383,11 +379,11 @@ static void i8xx_fbc_program_cfb(struct intel_fbc *fbc)
struct drm_i915_private *i915 = to_i915(display->drm);
drm_WARN_ON(display->drm,
- range_overflows_end_t(u64, i915_gem_stolen_area_address(i915),
+ range_end_overflows_t(u64, i915_gem_stolen_area_address(i915),
i915_gem_stolen_node_offset(&fbc->compressed_fb),
U32_MAX));
drm_WARN_ON(display->drm,
- range_overflows_end_t(u64, i915_gem_stolen_area_address(i915),
+ range_end_overflows_t(u64, i915_gem_stolen_area_address(i915),
i915_gem_stolen_node_offset(&fbc->compressed_llb),
U32_MAX));
intel_de_write(display, FBC_CFB_BASE,
@@ -552,10 +548,6 @@ static void ilk_fbc_deactivate(struct intel_fbc *fbc)
if (dpfc_ctl & DPFC_CTL_EN) {
dpfc_ctl &= ~DPFC_CTL_EN;
intel_de_write(display, ILK_DPFC_CONTROL(fbc->id), dpfc_ctl);
-
- /* wa_18038517565 Enable DPFC clock gating after FBC disable */
- if (display->platform.dg2 || DISPLAY_VER(display) >= 14)
- fbc_compressor_clkgate_disable_wa(fbc, false);
}
}
@@ -1464,7 +1456,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
return 0;
}
- if (intel_display_needs_wa_16023588340(display)) {
+ if (intel_display_wa(display, 16023588340)) {
plane_state->no_fbc_reason = "Wa_16023588340";
return 0;
}
@@ -1554,14 +1546,14 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
* having a Y offset that isn't divisible by 4 causes FIFO underrun
* and screen flicker.
*/
- if (DISPLAY_VER(display) >= 9 &&
+ if (IS_DISPLAY_VER(display, 9, 12) &&
plane_state->view.color_plane[0].y & 3) {
plane_state->no_fbc_reason = "plane start Y offset misaligned";
return 0;
}
/* Wa_22010751166: icl, ehl, tgl, dg1, rkl */
- if (DISPLAY_VER(display) >= 11 &&
+ if (IS_DISPLAY_VER(display, 9, 12) &&
(plane_state->view.color_plane[0].y +
(drm_rect_height(&plane_state->uapi.src) >> 16)) & 3) {
plane_state->no_fbc_reason = "plane end Y offset misaligned";
@@ -1710,6 +1702,10 @@ static void __intel_fbc_disable(struct intel_fbc *fbc)
__intel_fbc_cleanup_cfb(fbc);
+ /* wa_18038517565 Enable DPFC clock gating after FBC disable */
+ if (display->platform.dg2 || DISPLAY_VER(display) >= 14)
+ fbc_compressor_clkgate_disable_wa(fbc, false);
+
fbc->state.plane = NULL;
fbc->flip_pending = false;
fbc->busy_bits = 0;
@@ -2240,10 +2236,9 @@ void intel_fbc_crtc_debugfs_add(struct intel_crtc *crtc)
/* FIXME: remove this once igt is on board with per-crtc stuff */
void intel_fbc_debugfs_register(struct intel_display *display)
{
- struct drm_minor *minor = display->drm->primary;
struct intel_fbc *fbc;
fbc = display->fbc[INTEL_FBC_A];
if (fbc)
- intel_fbc_debugfs_add(fbc, minor->debugfs_root);
+ intel_fbc_debugfs_add(fbc, display->drm->debugfs_root);
}
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c
index 8039a84671cc..59a36b3a22c1 100644
--- a/drivers/gpu/drm/i915/display/intel_fdi.c
+++ b/drivers/gpu/drm/i915/display/intel_fdi.c
@@ -292,34 +292,6 @@ int intel_fdi_link_freq(struct intel_display *display,
return display->fdi.pll_freq;
}
-/**
- * intel_fdi_compute_pipe_bpp - compute pipe bpp limited by max link bpp
- * @crtc_state: the crtc state
- *
- * Compute the pipe bpp limited by the CRTC's maximum link bpp. Encoders can
- * call this function during state computation in the simple case where the
- * link bpp will always match the pipe bpp. This is the case for all non-DP
- * encoders, while DP encoders will use a link bpp lower than pipe bpp in case
- * of DSC compression.
- *
- * Returns %true in case of success, %false if pipe bpp would need to be
- * reduced below its valid range.
- */
-bool intel_fdi_compute_pipe_bpp(struct intel_crtc_state *crtc_state)
-{
- int pipe_bpp = min(crtc_state->pipe_bpp,
- fxp_q4_to_int(crtc_state->max_link_bpp_x16));
-
- pipe_bpp = rounddown(pipe_bpp, 2 * 3);
-
- if (pipe_bpp < 6 * 3)
- return false;
-
- crtc_state->pipe_bpp = pipe_bpp;
-
- return true;
-}
-
int ilk_fdi_compute_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.h b/drivers/gpu/drm/i915/display/intel_fdi.h
index ad5e103c38a8..1cd08df9b0c2 100644
--- a/drivers/gpu/drm/i915/display/intel_fdi.h
+++ b/drivers/gpu/drm/i915/display/intel_fdi.h
@@ -20,7 +20,6 @@ struct intel_link_bw_limits;
int intel_fdi_add_affected_crtcs(struct intel_atomic_state *state);
int intel_fdi_link_freq(struct intel_display *display,
const struct intel_crtc_state *pipe_config);
-bool intel_fdi_compute_pipe_bpp(struct intel_crtc_state *crtc_state);
int ilk_fdi_compute_config(struct intel_crtc *intel_crtc,
struct intel_crtc_state *pipe_config);
int intel_fdi_atomic_check_link(struct intel_atomic_state *state,
diff --git a/drivers/gpu/drm/i915/display/intel_global_state.c b/drivers/gpu/drm/i915/display/intel_global_state.c
index 000a898c9480..30eff6009e87 100644
--- a/drivers/gpu/drm/i915/display/intel_global_state.c
+++ b/drivers/gpu/drm/i915/display/intel_global_state.c
@@ -13,6 +13,36 @@
#include "intel_display_types.h"
#include "intel_global_state.h"
+#define for_each_new_global_obj_in_state(__state, obj, new_obj_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->num_global_objs && \
+ ((obj) = (__state)->global_objs[__i].ptr, \
+ (new_obj_state) = (__state)->global_objs[__i].new_state, 1); \
+ (__i)++) \
+ for_each_if(obj)
+
+#define for_each_old_global_obj_in_state(__state, obj, old_obj_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->num_global_objs && \
+ ((obj) = (__state)->global_objs[__i].ptr, \
+ (old_obj_state) = (__state)->global_objs[__i].old_state, 1); \
+ (__i)++) \
+ for_each_if(obj)
+
+#define for_each_oldnew_global_obj_in_state(__state, obj, old_obj_state, new_obj_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->num_global_objs && \
+ ((obj) = (__state)->global_objs[__i].ptr, \
+ (old_obj_state) = (__state)->global_objs[__i].old_state, \
+ (new_obj_state) = (__state)->global_objs[__i].new_state, 1); \
+ (__i)++) \
+ for_each_if(obj)
+
+struct intel_global_objs_state {
+ struct intel_global_obj *ptr;
+ struct intel_global_state *state, *old_state, *new_state;
+};
+
struct intel_global_commit {
struct kref ref;
struct completion done;
@@ -148,7 +178,7 @@ intel_atomic_get_global_obj_state(struct intel_atomic_state *state,
struct intel_display *display = to_intel_display(state);
int index, num_objs, i;
size_t size;
- struct __intel_global_objs_state *arr;
+ struct intel_global_objs_state *arr;
struct intel_global_state *obj_state;
for (i = 0; i < state->num_global_objs; i++)
diff --git a/drivers/gpu/drm/i915/display/intel_global_state.h b/drivers/gpu/drm/i915/display/intel_global_state.h
index d42fb2547ee9..e1efa530cc86 100644
--- a/drivers/gpu/drm/i915/display/intel_global_state.h
+++ b/drivers/gpu/drm/i915/display/intel_global_state.h
@@ -11,6 +11,7 @@
struct intel_atomic_state;
struct intel_display;
+struct intel_global_commit;
struct intel_global_obj;
struct intel_global_state;
@@ -26,36 +27,6 @@ struct intel_global_obj {
const struct intel_global_state_funcs *funcs;
};
-#define intel_for_each_global_obj(obj, dev_priv) \
- list_for_each_entry(obj, &(dev_priv)->display.global.obj_list, head)
-
-#define for_each_new_global_obj_in_state(__state, obj, new_obj_state, __i) \
- for ((__i) = 0; \
- (__i) < (__state)->num_global_objs && \
- ((obj) = (__state)->global_objs[__i].ptr, \
- (new_obj_state) = (__state)->global_objs[__i].new_state, 1); \
- (__i)++) \
- for_each_if(obj)
-
-#define for_each_old_global_obj_in_state(__state, obj, old_obj_state, __i) \
- for ((__i) = 0; \
- (__i) < (__state)->num_global_objs && \
- ((obj) = (__state)->global_objs[__i].ptr, \
- (old_obj_state) = (__state)->global_objs[__i].old_state, 1); \
- (__i)++) \
- for_each_if(obj)
-
-#define for_each_oldnew_global_obj_in_state(__state, obj, old_obj_state, new_obj_state, __i) \
- for ((__i) = 0; \
- (__i) < (__state)->num_global_objs && \
- ((obj) = (__state)->global_objs[__i].ptr, \
- (old_obj_state) = (__state)->global_objs[__i].old_state, \
- (new_obj_state) = (__state)->global_objs[__i].new_state, 1); \
- (__i)++) \
- for_each_if(obj)
-
-struct intel_global_commit;
-
struct intel_global_state {
struct intel_global_obj *obj;
struct intel_atomic_state *state;
@@ -64,11 +35,6 @@ struct intel_global_state {
bool changed, serialized;
};
-struct __intel_global_objs_state {
- struct intel_global_obj *ptr;
- struct intel_global_state *state, *old_state, *new_state;
-};
-
void intel_atomic_global_obj_init(struct intel_display *display,
struct intel_global_obj *obj,
struct intel_global_state *state,
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c
index 0d73f32fe7f1..358210adb8f8 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus.c
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.c
@@ -30,6 +30,7 @@
#include <linux/export.h>
#include <linux/i2c-algo-bit.h>
#include <linux/i2c.h>
+#include <linux/iopoll.h>
#include <drm/display/drm_hdcp_helper.h>
@@ -39,6 +40,7 @@
#include "intel_de.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_wa.h"
#include "intel_gmbus.h"
#include "intel_gmbus_regs.h"
@@ -217,7 +219,7 @@ static void pnv_gmbus_clock_gating(struct intel_display *display,
bool enable)
{
/* When using bit bashing for I2C, this bit needs to be set to 1 */
- intel_de_rmw(display, DSPCLK_GATE_D(display),
+ intel_de_rmw(display, DSPCLK_GATE_D,
PNV_GMBUSUNIT_CLOCK_GATE_DISABLE,
!enable ? PNV_GMBUSUNIT_CLOCK_GATE_DISABLE : 0);
}
@@ -240,14 +242,20 @@ static void bxt_gmbus_clock_gating(struct intel_display *display,
static u32 get_reserved(struct intel_gmbus *bus)
{
struct intel_display *display = bus->display;
- u32 reserved = 0;
+ u32 preserve_bits = 0;
+
+ if (display->platform.i830 || display->platform.i845g)
+ return 0;
/* On most chips, these bits must be preserved in software. */
- if (!display->platform.i830 && !display->platform.i845g)
- reserved = intel_de_read_notrace(display, bus->gpio_reg) &
- (GPIO_DATA_PULLUP_DISABLE | GPIO_CLOCK_PULLUP_DISABLE);
+ preserve_bits |= GPIO_DATA_PULLUP_DISABLE | GPIO_CLOCK_PULLUP_DISABLE;
+
+ /* Wa_16025573575: the masks bits need to be preserved through out */
+ if (intel_display_wa(display, 16025573575))
+ preserve_bits |= GPIO_CLOCK_DIR_MASK | GPIO_CLOCK_VAL_MASK |
+ GPIO_DATA_DIR_MASK | GPIO_DATA_VAL_MASK;
- return reserved;
+ return intel_de_read_notrace(display, bus->gpio_reg) & preserve_bits;
}
static int get_clock(void *data)
@@ -308,6 +316,22 @@ static void set_data(void *data, int state_high)
intel_de_posting_read(display, bus->gpio_reg);
}
+static void
+ptl_handle_mask_bits(struct intel_gmbus *bus, bool set)
+{
+ struct intel_display *display = bus->display;
+ u32 reg_val = intel_de_read_notrace(display, bus->gpio_reg);
+ u32 mask_bits = GPIO_CLOCK_DIR_MASK | GPIO_CLOCK_VAL_MASK |
+ GPIO_DATA_DIR_MASK | GPIO_DATA_VAL_MASK;
+ if (set)
+ reg_val |= mask_bits;
+ else
+ reg_val &= ~mask_bits;
+
+ intel_de_write_notrace(display, bus->gpio_reg, reg_val);
+ intel_de_posting_read(display, bus->gpio_reg);
+}
+
static int
intel_gpio_pre_xfer(struct i2c_adapter *adapter)
{
@@ -319,6 +343,9 @@ intel_gpio_pre_xfer(struct i2c_adapter *adapter)
if (display->platform.pineview)
pnv_gmbus_clock_gating(display, false);
+ if (intel_display_wa(display, 16025573575))
+ ptl_handle_mask_bits(bus, true);
+
set_data(bus, 1);
set_clock(bus, 1);
udelay(I2C_RISEFALL_TIME);
@@ -336,6 +363,9 @@ intel_gpio_post_xfer(struct i2c_adapter *adapter)
if (display->platform.pineview)
pnv_gmbus_clock_gating(display, true);
+
+ if (intel_display_wa(display, 16025573575))
+ ptl_handle_mask_bits(bus, false);
}
static void
@@ -385,11 +415,14 @@ static int gmbus_wait(struct intel_display *display, u32 status, u32 irq_en)
intel_de_write_fw(display, GMBUS4(display), irq_en);
status |= GMBUS_SATOER;
- ret = wait_for_us((gmbus2 = intel_de_read_fw(display, GMBUS2(display))) & status,
- 2);
+
+ ret = poll_timeout_us_atomic(gmbus2 = intel_de_read_fw(display, GMBUS2(display)),
+ gmbus2 & status,
+ 0, 2, false);
if (ret)
- ret = wait_for((gmbus2 = intel_de_read_fw(display, GMBUS2(display))) & status,
- 50);
+ ret = poll_timeout_us(gmbus2 = intel_de_read_fw(display, GMBUS2(display)),
+ gmbus2 & status,
+ 500, 50 * 1000, false);
intel_de_write_fw(display, GMBUS4(display), 0);
remove_wait_queue(&display->gmbus.wait_queue, &wait);
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 42202c8bb066..531ee122bf82 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -11,6 +11,7 @@
#include <linux/component.h>
#include <linux/debugfs.h>
#include <linux/i2c.h>
+#include <linux/iopoll.h>
#include <linux/random.h>
#include <drm/display/drm_hdcp_helper.h>
@@ -326,16 +327,13 @@ static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *dig_port,
bool ksv_ready;
/* Poll for ksv list ready (spec says max time allowed is 5s) */
- ret = __wait_for(read_ret = shim->read_ksv_ready(dig_port,
- &ksv_ready),
- read_ret || ksv_ready, 5 * 1000 * 1000, 1000,
- 100 * 1000);
+ ret = poll_timeout_us(read_ret = shim->read_ksv_ready(dig_port, &ksv_ready),
+ read_ret || ksv_ready,
+ 100 * 1000, 5 * 1000 * 1000, false);
if (ret)
return ret;
if (read_ret)
return read_ret;
- if (!ksv_ready)
- return -ETIMEDOUT;
return 0;
}
@@ -817,6 +815,7 @@ static int intel_hdcp_auth(struct intel_connector *connector)
enum port port = dig_port->base.port;
unsigned long r0_prime_gen_start;
int ret, i, tries = 2;
+ u32 val;
union {
u32 reg[2];
u8 shim[DRM_HDCP_AN_LEN];
@@ -905,8 +904,10 @@ static int intel_hdcp_auth(struct intel_connector *connector)
HDCP_CONF_AUTH_AND_ENC);
/* Wait for R0 ready */
- if (wait_for(intel_de_read(display, HDCP_STATUS(display, cpu_transcoder, port)) &
- (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) {
+ ret = poll_timeout_us(val = intel_de_read(display, HDCP_STATUS(display, cpu_transcoder, port)),
+ val & (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC),
+ 100, 1000, false);
+ if (ret) {
drm_err(display->drm, "Timed out waiting for R0 ready\n");
return -ETIMEDOUT;
}
@@ -938,16 +939,16 @@ static int intel_hdcp_auth(struct intel_connector *connector)
ri.reg);
/* Wait for Ri prime match */
- if (!wait_for(intel_de_read(display, HDCP_STATUS(display, cpu_transcoder, port)) &
- (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1))
+ ret = poll_timeout_us(val = intel_de_read(display, HDCP_STATUS(display, cpu_transcoder, port)),
+ val & (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC),
+ 100, 1000, false);
+ if (!ret)
break;
}
if (i == tries) {
drm_dbg_kms(display->drm,
- "Timed out waiting for Ri prime match (%x)\n",
- intel_de_read(display,
- HDCP_STATUS(display, cpu_transcoder, port)));
+ "Timed out waiting for Ri prime match (%x)\n", val);
return -ETIMEDOUT;
}
@@ -2446,12 +2447,6 @@ static int _intel_hdcp_enable(struct intel_atomic_state *state,
if (!hdcp->shim)
return -ENOENT;
- if (!connector->encoder) {
- drm_err(display->drm, "[CONNECTOR:%d:%s] encoder is not initialized\n",
- connector->base.base.id, connector->base.name);
- return -ENODEV;
- }
-
mutex_lock(&hdcp->mutex);
mutex_lock(&dig_port->hdcp.mutex);
drm_WARN_ON(display->drm,
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index 9961ff259298..4ab7e2e3bfd4 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -29,6 +29,7 @@
#include <linux/delay.h>
#include <linux/hdmi.h>
#include <linux/i2c.h>
+#include <linux/iopoll.h>
#include <linux/slab.h>
#include <linux/string_helpers.h>
@@ -60,6 +61,7 @@
#include "intel_hdcp_regs.h"
#include "intel_hdcp_shim.h"
#include "intel_hdmi.h"
+#include "intel_link_bw.h"
#include "intel_lspcon.h"
#include "intel_panel.h"
#include "intel_pfit.h"
@@ -1582,9 +1584,9 @@ bool intel_hdmi_hdcp_check_link_once(struct intel_digital_port *dig_port,
intel_de_write(display, HDCP_RPRIME(display, cpu_transcoder, port), ri.reg);
/* Wait for Ri prime match */
- if (wait_for((intel_de_read(display, HDCP_STATUS(display, cpu_transcoder, port)) &
- (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC)) ==
- (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) {
+ ret = intel_de_wait_for_set(display, HDCP_STATUS(display, cpu_transcoder, port),
+ HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC, 1);
+ if (ret) {
drm_dbg_kms(display->drm, "Ri' mismatch detected (%x)\n",
intel_de_read(display, HDCP_STATUS(display, cpu_transcoder,
port)));
@@ -1689,11 +1691,10 @@ intel_hdmi_hdcp2_wait_for_msg(struct intel_digital_port *dig_port,
if (timeout < 0)
return timeout;
- ret = __wait_for(ret = hdcp2_detect_msg_availability(dig_port,
- msg_id, &msg_ready,
- &msg_sz),
- !ret && msg_ready && msg_sz, timeout * 1000,
- 1000, 5 * 1000);
+ ret = poll_timeout_us(ret = hdcp2_detect_msg_availability(dig_port, msg_id,
+ &msg_ready, &msg_sz),
+ !ret && msg_ready && msg_sz,
+ 4000, timeout * 1000, false);
if (ret)
drm_dbg_kms(display->drm,
"msg_id: %d, ret: %d, timeout: %d\n",
@@ -2053,6 +2054,10 @@ intel_hdmi_mode_valid(struct drm_connector *_connector,
else
sink_format = INTEL_OUTPUT_FORMAT_RGB;
+ status = intel_pfit_mode_valid(display, mode, sink_format, 0);
+ if (status != MODE_OK)
+ return status;
+
status = intel_hdmi_mode_clock_valid(&connector->base, clock, has_hdmi_sink, sink_format);
if (status != MODE_OK) {
if (ycbcr_420_only ||
@@ -2341,6 +2346,9 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
pipe_config->pixel_multiplier = 2;
+ if (!intel_link_bw_compute_pipe_bpp(pipe_config))
+ return -EINVAL;
+
pipe_config->has_audio =
intel_hdmi_has_audio(encoder, pipe_config, conn_state) &&
intel_audio_compute_config(encoder, pipe_config, conn_state);
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
index 265aa97fcc75..4451a792600a 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.c
@@ -28,6 +28,7 @@
#include "i915_drv.h"
#include "i915_irq.h"
+#include "i915_utils.h"
#include "intel_connector.h"
#include "intel_display_power.h"
#include "intel_display_core.h"
@@ -971,8 +972,6 @@ void intel_hpd_cancel_work(struct intel_display *display)
spin_lock_irq(&display->irq.lock);
- drm_WARN_ON(display->drm, get_blocked_hpd_pin_mask(display));
-
display->hotplug.long_hpd_pin_mask = 0;
display->hotplug.short_hpd_pin_mask = 0;
display->hotplug.event_bits = 0;
@@ -1333,12 +1332,12 @@ static const struct file_operations i915_hpd_short_storm_ctl_fops = {
void intel_hpd_debugfs_register(struct intel_display *display)
{
- struct drm_minor *minor = display->drm->primary;
+ struct dentry *debugfs_root = display->drm->debugfs_root;
- debugfs_create_file("i915_hpd_storm_ctl", 0644, minor->debugfs_root,
+ debugfs_create_file("i915_hpd_storm_ctl", 0644, debugfs_root,
display, &i915_hpd_storm_ctl_fops);
- debugfs_create_file("i915_hpd_short_storm_ctl", 0644, minor->debugfs_root,
+ debugfs_create_file("i915_hpd_short_storm_ctl", 0644, debugfs_root,
display, &i915_hpd_short_storm_ctl_fops);
- debugfs_create_bool("i915_ignore_long_hpd", 0644, minor->debugfs_root,
+ debugfs_create_bool("i915_ignore_long_hpd", 0644, debugfs_root,
&display->hotplug.ignore_long_hpd);
}
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
index 43aee70597bf..4f72f3fb9af5 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
@@ -1025,7 +1025,7 @@ static void mtp_tc_hpd_enable_detection(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
- intel_de_rmw(display, SHOTPLUG_CTL_DDI,
+ intel_de_rmw(display, SHOTPLUG_CTL_TC,
mtp_tc_hotplug_mask(encoder->hpd_pin),
mtp_tc_hotplug_enables(encoder));
}
diff --git a/drivers/gpu/drm/i915/display/intel_link_bw.c b/drivers/gpu/drm/i915/display/intel_link_bw.c
index 3caef7f9c7c4..f52dee0ea412 100644
--- a/drivers/gpu/drm/i915/display/intel_link_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_link_bw.c
@@ -165,6 +165,34 @@ int intel_link_bw_reduce_bpp(struct intel_atomic_state *state,
}
/**
+ * intel_link_bw_compute_pipe_bpp - compute pipe bpp limited by max link bpp
+ * @crtc_state: the crtc state
+ *
+ * Compute the pipe bpp limited by the CRTC's maximum link bpp. Encoders can
+ * call this function during state computation in the simple case where the
+ * link bpp will always match the pipe bpp. This is the case for all non-DP
+ * encoders, while DP encoders will use a link bpp lower than pipe bpp in case
+ * of DSC compression.
+ *
+ * Returns %true in case of success, %false if pipe bpp would need to be
+ * reduced below its valid range.
+ */
+bool intel_link_bw_compute_pipe_bpp(struct intel_crtc_state *crtc_state)
+{
+ int pipe_bpp = min(crtc_state->pipe_bpp,
+ fxp_q4_to_int(crtc_state->max_link_bpp_x16));
+
+ pipe_bpp = rounddown(pipe_bpp, 2 * 3);
+
+ if (pipe_bpp < 6 * 3)
+ return false;
+
+ crtc_state->pipe_bpp = pipe_bpp;
+
+ return true;
+}
+
+/**
* intel_link_bw_set_bpp_limit_for_pipe - set link bpp limit for a pipe to its minimum
* @state: atomic state
* @old_limits: link BW limits
@@ -449,6 +477,7 @@ void intel_link_bw_connector_debugfs_add(struct intel_connector *connector)
switch (connector->base.connector_type) {
case DRM_MODE_CONNECTOR_DisplayPort:
case DRM_MODE_CONNECTOR_eDP:
+ case DRM_MODE_CONNECTOR_HDMIA:
break;
case DRM_MODE_CONNECTOR_VGA:
case DRM_MODE_CONNECTOR_SVIDEO:
@@ -458,11 +487,6 @@ void intel_link_bw_connector_debugfs_add(struct intel_connector *connector)
break;
return;
- case DRM_MODE_CONNECTOR_HDMIA:
- if (HAS_FDI(display) && !HAS_DDI(display))
- break;
-
- return;
default:
return;
}
diff --git a/drivers/gpu/drm/i915/display/intel_link_bw.h b/drivers/gpu/drm/i915/display/intel_link_bw.h
index b499042e62b1..95ab7c50c61d 100644
--- a/drivers/gpu/drm/i915/display/intel_link_bw.h
+++ b/drivers/gpu/drm/i915/display/intel_link_bw.h
@@ -27,6 +27,7 @@ int intel_link_bw_reduce_bpp(struct intel_atomic_state *state,
struct intel_link_bw_limits *limits,
u8 pipe_mask,
const char *reason);
+bool intel_link_bw_compute_pipe_bpp(struct intel_crtc_state *crtc_state);
bool intel_link_bw_set_bpp_limit_for_pipe(struct intel_atomic_state *state,
const struct intel_link_bw_limits *old_limits,
struct intel_link_bw_limits *new_limits,
diff --git a/drivers/gpu/drm/i915/display/intel_lpe_audio.c b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
index 666148a14522..42284e9928f2 100644
--- a/drivers/gpu/drm/i915/display/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
@@ -68,9 +68,9 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <drm/drm_print.h>
#include <drm/intel/intel_lpe_audio.h>
-#include "i915_drv.h"
#include "i915_irq.h"
#include "intel_audio_regs.h"
#include "intel_de.h"
@@ -170,14 +170,11 @@ static struct irq_chip lpe_audio_irqchip = {
static int lpe_audio_irq_init(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
int irq = display->audio.lpe.irq;
- drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv));
- irq_set_chip_and_handler_name(irq,
- &lpe_audio_irqchip,
- handle_simple_irq,
- "hdmi_lpe_audio_irq_handler");
+ irq_set_chip_and_handler_name(irq, &lpe_audio_irqchip,
+ handle_simple_irq,
+ "hdmi_lpe_audio_irq_handler");
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c
index abc4b562083d..d56026c4efdd 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.c
@@ -23,6 +23,8 @@
*
*/
+#include <linux/iopoll.h>
+
#include <drm/display/drm_dp_dual_mode_helper.h>
#include <drm/display/drm_hdmi_helper.h>
#include <drm/drm_atomic_helper.h>
@@ -181,6 +183,8 @@ static enum drm_lspcon_mode lspcon_wait_mode(struct intel_lspcon *lspcon,
struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon);
struct intel_display *display = to_intel_display(intel_dp);
enum drm_lspcon_mode current_mode;
+ int timeout_us;
+ int ret;
current_mode = lspcon_get_current_mode(lspcon);
if (current_mode == mode)
@@ -189,9 +193,12 @@ static enum drm_lspcon_mode lspcon_wait_mode(struct intel_lspcon *lspcon,
drm_dbg_kms(display->drm, "Waiting for LSPCON mode %s to settle\n",
lspcon_mode_name(mode));
- wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode,
- lspcon_get_mode_settle_timeout(lspcon));
- if (current_mode != mode)
+ timeout_us = lspcon_get_mode_settle_timeout(lspcon) * 1000;
+
+ ret = poll_timeout_us(current_mode = lspcon_get_current_mode(lspcon),
+ current_mode == mode,
+ 5000, timeout_us, false);
+ if (ret)
drm_err(display->drm, "LSPCON mode hasn't settled\n");
out:
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index 7e48a235c99f..48f4d8ed4f15 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -48,6 +48,7 @@
#include "intel_dpll.h"
#include "intel_fdi.h"
#include "intel_gmbus.h"
+#include "intel_link_bw.h"
#include "intel_lvds.h"
#include "intel_lvds_regs.h"
#include "intel_panel.h"
@@ -433,7 +434,7 @@ static int intel_lvds_compute_config(struct intel_encoder *encoder,
if (HAS_PCH_SPLIT(display)) {
crtc_state->has_pch_encoder = true;
- if (!intel_fdi_compute_pipe_bpp(crtc_state))
+ if (!intel_link_bw_compute_pipe_bpp(crtc_state))
return -EINVAL;
}
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c
index 81efdb17fc0c..cbc220310813 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.c
+++ b/drivers/gpu/drm/i915/display/intel_opregion.c
@@ -28,13 +28,13 @@
#include <linux/acpi.h>
#include <linux/debugfs.h>
#include <linux/dmi.h>
+#include <linux/iopoll.h>
#include <acpi/video.h>
#include <drm/drm_edid.h>
#include <drm/drm_file.h>
#include <drm/drm_print.h>
-#include "i915_utils.h"
#include "intel_acpi.h"
#include "intel_backlight.h"
#include "intel_display_core.h"
@@ -357,10 +357,12 @@ static int swsci(struct intel_display *display,
pci_write_config_word(pdev, SWSCI, swsci_val);
/* Poll for the result. */
-#define C (((scic = swsci->scic) & SWSCI_SCIC_INDICATOR) == 0)
- if (wait_for(C, dslp)) {
+ ret = poll_timeout_us(scic = swsci->scic,
+ (scic & SWSCI_SCIC_INDICATOR) == 0,
+ 1000, dslp * 1000, false);
+ if (ret) {
drm_dbg(display->drm, "SWSCI request timed out\n");
- return -ETIMEDOUT;
+ return ret;
}
scic = (scic & SWSCI_SCIC_EXIT_STATUS_MASK) >>
@@ -1299,8 +1301,6 @@ DEFINE_SHOW_ATTRIBUTE(intel_opregion);
void intel_opregion_debugfs_register(struct intel_display *display)
{
- struct drm_minor *minor = display->drm->primary;
-
- debugfs_create_file("i915_opregion", 0444, minor->debugfs_root,
+ debugfs_create_file("i915_opregion", 0444, display->drm->debugfs_root,
display, &intel_opregion_fops);
}
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index 159a5f998ea0..272f9e7af4d4 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -217,10 +217,9 @@ static void i830_overlay_clock_gating(struct intel_display *display,
/* WA_OVERLAY_CLKGATE:alm */
if (enable)
- intel_de_write(display, DSPCLK_GATE_D(display), 0);
+ intel_de_write(display, DSPCLK_GATE_D, 0);
else
- intel_de_write(display, DSPCLK_GATE_D(display),
- OVRUNIT_CLOCK_GATE_DISABLE);
+ intel_de_write(display, DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
/* WA_DISABLE_L2CACHE_CLOCK_GATING:alm */
pci_bus_read_config_byte(pdev->bus,
diff --git a/drivers/gpu/drm/i915/display/intel_panic.c b/drivers/gpu/drm/i915/display/intel_panic.c
new file mode 100644
index 000000000000..7311ce4e8b6c
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_panic.c
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: MIT
+/* Copyright © 2025 Intel Corporation */
+
+#include <drm/drm_panic.h>
+
+#include "gem/i915_gem_object.h"
+#include "intel_display_types.h"
+#include "intel_fb.h"
+#include "intel_panic.h"
+
+struct intel_panic *intel_panic_alloc(void)
+{
+ return i915_gem_object_alloc_panic();
+}
+
+int intel_panic_setup(struct intel_panic *panic, struct drm_scanout_buffer *sb)
+{
+ struct intel_framebuffer *fb = sb->private;
+ struct drm_gem_object *obj = intel_fb_bo(&fb->base);
+
+ return i915_gem_object_panic_setup(panic, sb, obj, fb->panic_tiling);
+}
+
+void intel_panic_finish(struct intel_panic *panic)
+{
+ return i915_gem_object_panic_finish(panic);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_panic.h b/drivers/gpu/drm/i915/display/intel_panic.h
new file mode 100644
index 000000000000..afb472e924aa
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_panic.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation */
+
+#ifndef __INTEL_PANIC_H__
+#define __INTEL_PANIC_H__
+
+struct drm_scanout_buffer;
+struct intel_panic;
+
+struct intel_panic *intel_panic_alloc(void);
+int intel_panic_setup(struct intel_panic *panic, struct drm_scanout_buffer *sb);
+void intel_panic_finish(struct intel_panic *panic);
+
+#endif /* __INTEL_PANIC_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_pch.h b/drivers/gpu/drm/i915/display/intel_pch.h
index cf4dab1b98bf..19cac7412d0a 100644
--- a/drivers/gpu/drm/i915/display/intel_pch.h
+++ b/drivers/gpu/drm/i915/display/intel_pch.h
@@ -6,8 +6,6 @@
#ifndef __INTEL_PCH__
#define __INTEL_PCH__
-#include "intel_display_conversion.h"
-
struct intel_display;
/*
@@ -36,7 +34,7 @@ enum intel_pch {
PCH_LNL,
};
-#define INTEL_PCH_TYPE(_display) (__to_intel_display(_display)->pch_type)
+#define INTEL_PCH_TYPE(_display) ((_display)->pch_type)
#define HAS_PCH_DG2(display) (INTEL_PCH_TYPE(display) == PCH_DG2)
#define HAS_PCH_ADP(display) (INTEL_PCH_TYPE(display) == PCH_ADP)
#define HAS_PCH_DG1(display) (INTEL_PCH_TYPE(display) == PCH_DG1)
diff --git a/drivers/gpu/drm/i915/display/intel_pch_refclk.c b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
index d3c5255bf1a8..9ae53679a041 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_refclk.c
+++ b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
@@ -17,16 +17,22 @@
static void lpt_fdi_reset_mphy(struct intel_display *display)
{
+ int ret;
+
intel_de_rmw(display, SOUTH_CHICKEN2, 0, FDI_MPHY_IOSFSB_RESET_CTL);
- if (wait_for_us(intel_de_read(display, SOUTH_CHICKEN2) &
- FDI_MPHY_IOSFSB_RESET_STATUS, 100))
+ ret = intel_de_wait_custom(display, SOUTH_CHICKEN2,
+ FDI_MPHY_IOSFSB_RESET_STATUS, FDI_MPHY_IOSFSB_RESET_STATUS,
+ 100, 0, NULL);
+ if (ret)
drm_err(display->drm, "FDI mPHY reset assert timeout\n");
intel_de_rmw(display, SOUTH_CHICKEN2, FDI_MPHY_IOSFSB_RESET_CTL, 0);
- if (wait_for_us((intel_de_read(display, SOUTH_CHICKEN2) &
- FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
+ ret = intel_de_wait_custom(display, SOUTH_CHICKEN2,
+ FDI_MPHY_IOSFSB_RESET_STATUS, 0,
+ 100, 0, NULL);
+ if (ret)
drm_err(display->drm, "FDI mPHY reset de-assert timeout\n");
}
diff --git a/drivers/gpu/drm/i915/display/intel_pfit.c b/drivers/gpu/drm/i915/display/intel_pfit.c
index 13541be4d6df..68539e7c2a24 100644
--- a/drivers/gpu/drm/i915/display/intel_pfit.c
+++ b/drivers/gpu/drm/i915/display/intel_pfit.c
@@ -14,6 +14,7 @@
#include "intel_lvds_regs.h"
#include "intel_pfit.h"
#include "intel_pfit_regs.h"
+#include "skl_scaler.h"
static int intel_pch_pfit_check_dst_window(const struct intel_crtc_state *crtc_state)
{
@@ -546,6 +547,16 @@ out:
return intel_gmch_pfit_check_timings(crtc_state);
}
+enum drm_mode_status
+intel_pfit_mode_valid(struct intel_display *display,
+ const struct drm_display_mode *mode,
+ enum intel_output_format output_format,
+ int num_joined_pipes)
+{
+ return skl_scaler_mode_valid(display, mode, output_format,
+ num_joined_pipes);
+}
+
int intel_pfit_compute_config(struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
diff --git a/drivers/gpu/drm/i915/display/intel_pfit.h b/drivers/gpu/drm/i915/display/intel_pfit.h
index ef34f9b49d09..c1bb0d1f344e 100644
--- a/drivers/gpu/drm/i915/display/intel_pfit.h
+++ b/drivers/gpu/drm/i915/display/intel_pfit.h
@@ -6,8 +6,12 @@
#ifndef __INTEL_PFIT_H__
#define __INTEL_PFIT_H__
+enum drm_mode_status;
+struct drm_display_mode;
struct drm_connector_state;
struct intel_crtc_state;
+struct intel_display;
+enum intel_output_format;
int intel_pfit_compute_config(struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
@@ -17,5 +21,9 @@ void ilk_pfit_get_config(struct intel_crtc_state *crtc_state);
void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state);
void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state);
void i9xx_pfit_get_config(struct intel_crtc_state *crtc_state);
-
+enum drm_mode_status
+intel_pfit_mode_valid(struct intel_display *display,
+ const struct drm_display_mode *mode,
+ enum intel_output_format output_format,
+ int num_joined_pipes);
#endif /* __INTEL_PFIT_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_plane.c b/drivers/gpu/drm/i915/display/intel_plane.c
index 36fb07471deb..2329f09d413d 100644
--- a/drivers/gpu/drm/i915/display/intel_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_plane.c
@@ -46,9 +46,7 @@
#include "gem/i915_gem_object.h"
#include "i915_scheduler_types.h"
-#include "i915_vma.h"
#include "i9xx_plane_regs.h"
-#include "intel_bo.h"
#include "intel_cdclk.h"
#include "intel_cursor.h"
#include "intel_display_rps.h"
@@ -57,6 +55,7 @@
#include "intel_fb.h"
#include "intel_fb_pin.h"
#include "intel_fbdev.h"
+#include "intel_panic.h"
#include "intel_plane.h"
#include "intel_psr.h"
#include "skl_scaler.h"
@@ -1327,7 +1326,7 @@ static void intel_panic_flush(struct drm_plane *plane)
struct drm_framebuffer *fb = plane_state->hw.fb;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- intel_bo_panic_finish(intel_fb);
+ intel_panic_finish(intel_fb->panic);
if (crtc_state->enable_psr2_sel_fetch) {
/* Force a full update for psr2 */
@@ -1410,7 +1409,7 @@ static int intel_get_scanout_buffer(struct drm_plane *plane,
return -EOPNOTSUPP;
}
sb->private = intel_fb;
- ret = intel_bo_panic_setup(sb);
+ ret = intel_panic_setup(intel_fb->panic, sb);
if (ret)
return ret;
}
@@ -1749,8 +1748,3 @@ int intel_plane_atomic_check(struct intel_atomic_state *state)
return 0;
}
-
-u32 intel_plane_ggtt_offset(const struct intel_plane_state *plane_state)
-{
- return i915_ggtt_offset(plane_state->ggtt_vma);
-}
diff --git a/drivers/gpu/drm/i915/display/intel_plane.h b/drivers/gpu/drm/i915/display/intel_plane.h
index 4ef012c08fa4..8af41ccc0a69 100644
--- a/drivers/gpu/drm/i915/display/intel_plane.h
+++ b/drivers/gpu/drm/i915/display/intel_plane.h
@@ -87,7 +87,6 @@ int intel_plane_add_affected(struct intel_atomic_state *state,
struct intel_crtc *crtc);
int intel_plane_atomic_check(struct intel_atomic_state *state);
-u32 intel_plane_ggtt_offset(const struct intel_plane_state *plane_state);
bool intel_plane_format_mod_supported_async(struct drm_plane *plane,
u32 format,
u64 modifier);
diff --git a/drivers/gpu/drm/i915/display/intel_plane_initial.c b/drivers/gpu/drm/i915/display/intel_plane_initial.c
index 4246173ed311..a9f36b1b50c1 100644
--- a/drivers/gpu/drm/i915/display/intel_plane_initial.c
+++ b/drivers/gpu/drm/i915/display/intel_plane_initial.c
@@ -360,6 +360,8 @@ valid_fb:
i915_vma_pin_fence(vma) == 0 && vma->fence)
plane_state->flags |= PLANE_HAS_FENCE;
+ plane_state->surf = i915_ggtt_offset(plane_state->ggtt_vma);
+
plane_state->uapi.src_x = 0;
plane_state->uapi.src_y = 0;
plane_state->uapi.src_w = fb->width << 16;
diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c
index b64d0b30f5b1..327e0de86f1e 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.c
+++ b/drivers/gpu/drm/i915/display/intel_pps.c
@@ -4,6 +4,7 @@
*/
#include <linux/debugfs.h>
+#include <linux/iopoll.h>
#include <drm/drm_print.h>
@@ -608,6 +609,8 @@ static void wait_panel_status(struct intel_dp *intel_dp,
struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
i915_reg_t pp_stat_reg, pp_ctrl_reg;
+ int ret;
+ u32 val;
lockdep_assert_held(&display->pps.mutex);
@@ -624,13 +627,18 @@ static void wait_panel_status(struct intel_dp *intel_dp,
intel_de_read(display, pp_stat_reg),
intel_de_read(display, pp_ctrl_reg));
- if (intel_de_wait(display, pp_stat_reg, mask, value, 5000))
+ ret = poll_timeout_us(val = intel_de_read(display, pp_stat_reg),
+ (val & mask) == value,
+ 10 * 1000, 5000 * 1000, true);
+ if (ret) {
drm_err(display->drm,
"[ENCODER:%d:%s] %s panel status timeout: PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
dig_port->base.base.base.id, dig_port->base.base.name,
pps_name(intel_dp),
intel_de_read(display, pp_stat_reg),
intel_de_read(display, pp_ctrl_reg));
+ return;
+ }
drm_dbg_kms(display->drm, "Wait complete\n");
}
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index ae9053919211..01bf304c705f 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -42,6 +42,7 @@
#include "intel_dmc.h"
#include "intel_dp.h"
#include "intel_dp_aux.h"
+#include "intel_dsb.h"
#include "intel_frontbuffer.h"
#include "intel_hdmi.h"
#include "intel_psr.h"
@@ -233,16 +234,12 @@ bool intel_psr_needs_aux_io_power(struct intel_encoder *encoder,
static bool psr_global_enabled(struct intel_dp *intel_dp)
{
- struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
case I915_PSR_DEBUG_DEFAULT:
- if (display->params.enable_psr == -1)
- return intel_dp_is_edp(intel_dp) ?
- connector->panel.vbt.psr.enable :
- true;
- return display->params.enable_psr;
+ return intel_dp_is_edp(intel_dp) ?
+ connector->panel.vbt.psr.enable : true;
case I915_PSR_DEBUG_DISABLE:
return false;
default:
@@ -250,39 +247,23 @@ static bool psr_global_enabled(struct intel_dp *intel_dp)
}
}
-static bool psr2_global_enabled(struct intel_dp *intel_dp)
+static bool sel_update_global_enabled(struct intel_dp *intel_dp)
{
- struct intel_display *display = to_intel_display(intel_dp);
-
switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
case I915_PSR_DEBUG_DISABLE:
case I915_PSR_DEBUG_FORCE_PSR1:
return false;
default:
- if (display->params.enable_psr == 1)
- return false;
return true;
}
}
-static bool psr2_su_region_et_global_enabled(struct intel_dp *intel_dp)
-{
- struct intel_display *display = to_intel_display(intel_dp);
-
- if (display->params.enable_psr != -1)
- return false;
-
- return true;
-}
-
static bool panel_replay_global_enabled(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- if ((display->params.enable_psr != -1) ||
- (intel_dp->psr.debug & I915_PSR_DEBUG_PANEL_REPLAY_DISABLE))
- return false;
- return true;
+ return !(intel_dp->psr.debug & I915_PSR_DEBUG_PANEL_REPLAY_DISABLE) &&
+ display->params.enable_panel_replay;
}
static u32 psr_irq_psr_error_bit_get(struct intel_dp *intel_dp)
@@ -514,12 +495,14 @@ static u8 intel_dp_get_su_capability(struct intel_dp *intel_dp)
{
u8 su_capability = 0;
- if (intel_dp->psr.sink_panel_replay_su_support)
- drm_dp_dpcd_readb(&intel_dp->aux,
- DP_PANEL_REPLAY_CAP_CAPABILITY,
- &su_capability);
- else
+ if (intel_dp->psr.sink_panel_replay_su_support) {
+ if (drm_dp_dpcd_read_byte(&intel_dp->aux,
+ DP_PANEL_REPLAY_CAP_CAPABILITY,
+ &su_capability) < 0)
+ return 0;
+ } else {
su_capability = intel_dp->psr_dpcd[1];
+ }
return su_capability;
}
@@ -600,6 +583,16 @@ exit:
static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
+ int ret;
+
+ ret = drm_dp_dpcd_read_data(&intel_dp->aux, DP_PANEL_REPLAY_CAP_SUPPORT,
+ &intel_dp->pr_dpcd, sizeof(intel_dp->pr_dpcd));
+ if (ret < 0)
+ return;
+
+ if (!(intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
+ DP_PANEL_REPLAY_SUPPORT))
+ return;
if (intel_dp_is_edp(intel_dp)) {
if (!intel_alpm_aux_less_wake_supported(intel_dp)) {
@@ -631,6 +624,15 @@ static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
static void _psr_init_dpcd(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
+ int ret;
+
+ ret = drm_dp_dpcd_read_data(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
+ sizeof(intel_dp->psr_dpcd));
+ if (ret < 0)
+ return;
+
+ if (!intel_dp->psr_dpcd[0])
+ return;
drm_dbg_kms(display->drm, "eDP panel supports PSR version %x\n",
intel_dp->psr_dpcd[0]);
@@ -676,18 +678,9 @@ static void _psr_init_dpcd(struct intel_dp *intel_dp)
void intel_psr_init_dpcd(struct intel_dp *intel_dp)
{
- drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
- sizeof(intel_dp->psr_dpcd));
-
- drm_dp_dpcd_read(&intel_dp->aux, DP_PANEL_REPLAY_CAP_SUPPORT,
- &intel_dp->pr_dpcd, sizeof(intel_dp->pr_dpcd));
+ _psr_init_dpcd(intel_dp);
- if (intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
- DP_PANEL_REPLAY_SUPPORT)
- _panel_replay_init_dpcd(intel_dp);
-
- if (intel_dp->psr_dpcd[0])
- _psr_init_dpcd(intel_dp);
+ _panel_replay_init_dpcd(intel_dp);
if (intel_dp->psr.sink_psr2_support ||
intel_dp->psr.sink_panel_replay_su_support)
@@ -742,8 +735,7 @@ static bool psr2_su_region_et_valid(struct intel_dp *intel_dp, bool panel_replay
return panel_replay ?
intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT :
- intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED &&
- psr2_su_region_et_global_enabled(intel_dp);
+ intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED;
}
static void _panel_replay_enable_sink(struct intel_dp *intel_dp,
@@ -936,7 +928,7 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
/* Wa_16025596647 */
if ((DISPLAY_VER(display) == 20 ||
IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
- is_dc5_dc6_blocked(intel_dp))
+ is_dc5_dc6_blocked(intel_dp) && intel_dp->psr.pkg_c_latency_used)
intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(display,
intel_dp->psr.pipe,
true);
@@ -1026,7 +1018,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
/* Wa_16025596647 */
if ((DISPLAY_VER(display) == 20 ||
IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
- is_dc5_dc6_blocked(intel_dp))
+ is_dc5_dc6_blocked(intel_dp) && intel_dp->psr.pkg_c_latency_used)
idle_frames = 0;
else
idle_frames = psr_compute_idle_frames(intel_dp);
@@ -1423,7 +1415,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
- if (!intel_dp->psr.sink_psr2_support)
+ if (!intel_dp->psr.sink_psr2_support || display->params.enable_psr == 1)
return false;
/* JSL and EHL only supports eDP 1.3 */
@@ -1528,7 +1520,7 @@ static bool intel_sel_update_config_valid(struct intel_dp *intel_dp,
goto unsupported;
}
- if (!psr2_global_enabled(intel_dp)) {
+ if (!sel_update_global_enabled(intel_dp)) {
drm_dbg_kms(display->drm,
"Selective update disabled by flag\n");
goto unsupported;
@@ -1576,7 +1568,7 @@ static bool _psr_compute_config(struct intel_dp *intel_dp,
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
int entry_setup_frames;
- if (!CAN_PSR(intel_dp))
+ if (!CAN_PSR(intel_dp) || !display->params.enable_psr)
return false;
/*
@@ -1808,6 +1800,8 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
drm_WARN_ON(display->drm, intel_dp->psr.active);
+ drm_WARN_ON(display->drm, !intel_dp->psr.enabled);
+
lockdep_assert_held(&intel_dp->psr.lock);
/* psr1, psr2 and panel-replay are mutually exclusive.*/
@@ -2027,6 +2021,7 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
intel_dp->psr.req_psr2_sdp_prior_scanline =
crtc_state->req_psr2_sdp_prior_scanline;
intel_dp->psr.active_non_psr_pipes = crtc_state->active_non_psr_pipes;
+ intel_dp->psr.pkg_c_latency_used = crtc_state->pkg_c_latency_used;
if (!psr_interrupt_error_check(intel_dp))
return;
@@ -2103,8 +2098,9 @@ static void intel_psr_exit(struct intel_dp *intel_dp)
drm_WARN_ON(display->drm, !(val & EDP_PSR2_ENABLE));
} else {
- if (DISPLAY_VER(display) == 20 ||
- IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
+ if ((DISPLAY_VER(display) == 20 ||
+ IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
+ intel_dp->psr.pkg_c_latency_used)
intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(display,
intel_dp->psr.pipe,
false);
@@ -2207,6 +2203,7 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
intel_dp->psr.su_region_et_enabled = false;
intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
intel_dp->psr.active_non_psr_pipes = 0;
+ intel_dp->psr.pkg_c_latency_used = 0;
}
/**
@@ -3003,35 +3000,57 @@ void intel_psr_post_plane_update(struct intel_atomic_state *state,
}
}
-static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
+/*
+ * From bspec: Panel Self Refresh (BDW+)
+ * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
+ * exit training time + 1.5 ms of aux channel handshake. 50 ms is
+ * defensive enough to cover everything.
+ */
+#define PSR_IDLE_TIMEOUT_MS 50
+
+static int
+_psr2_ready_for_pipe_update_locked(const struct intel_crtc_state *new_crtc_state,
+ struct intel_dsb *dsb)
{
- struct intel_display *display = to_intel_display(intel_dp);
- enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
+ struct intel_display *display = to_intel_display(new_crtc_state);
+ enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
/*
* Any state lower than EDP_PSR2_STATUS_STATE_DEEP_SLEEP is enough.
* As all higher states has bit 4 of PSR2 state set we can just wait for
* EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared.
*/
+ if (dsb) {
+ intel_dsb_poll(dsb, EDP_PSR2_STATUS(display, cpu_transcoder),
+ EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 0, 200,
+ PSR_IDLE_TIMEOUT_MS * 1000 / 200);
+ return true;
+ }
+
return intel_de_wait_for_clear(display,
EDP_PSR2_STATUS(display, cpu_transcoder),
- EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 50);
+ EDP_PSR2_STATUS_STATE_DEEP_SLEEP,
+ PSR_IDLE_TIMEOUT_MS);
}
-static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
+static int
+_psr1_ready_for_pipe_update_locked(const struct intel_crtc_state *new_crtc_state,
+ struct intel_dsb *dsb)
{
- struct intel_display *display = to_intel_display(intel_dp);
- enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
+ struct intel_display *display = to_intel_display(new_crtc_state);
+ enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
+
+ if (dsb) {
+ intel_dsb_poll(dsb, psr_status_reg(display, cpu_transcoder),
+ EDP_PSR_STATUS_STATE_MASK, 0, 200,
+ PSR_IDLE_TIMEOUT_MS * 1000 / 200);
+ return true;
+ }
- /*
- * From bspec: Panel Self Refresh (BDW+)
- * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
- * exit training time + 1.5 ms of aux channel handshake. 50 ms is
- * defensive enough to cover everything.
- */
return intel_de_wait_for_clear(display,
psr_status_reg(display, cpu_transcoder),
- EDP_PSR_STATUS_STATE_MASK, 50);
+ EDP_PSR_STATUS_STATE_MASK,
+ PSR_IDLE_TIMEOUT_MS);
}
/**
@@ -3060,9 +3079,11 @@ void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_stat
continue;
if (intel_dp->psr.sel_update_enabled)
- ret = _psr2_ready_for_pipe_update_locked(intel_dp);
+ ret = _psr2_ready_for_pipe_update_locked(new_crtc_state,
+ NULL);
else
- ret = _psr1_ready_for_pipe_update_locked(intel_dp);
+ ret = _psr1_ready_for_pipe_update_locked(new_crtc_state,
+ NULL);
if (ret)
drm_err(display->drm,
@@ -3070,6 +3091,18 @@ void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_stat
}
}
+void intel_psr_wait_for_idle_dsb(struct intel_dsb *dsb,
+ const struct intel_crtc_state *new_crtc_state)
+{
+ if (!new_crtc_state->has_psr || new_crtc_state->has_panel_replay)
+ return;
+
+ if (new_crtc_state->has_sel_update)
+ _psr2_ready_for_pipe_update_locked(new_crtc_state, dsb);
+ else
+ _psr1_ready_for_pipe_update_locked(new_crtc_state, dsb);
+}
+
static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
@@ -3099,7 +3132,7 @@ static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
/* After the unlocked wait, verify that PSR is still wanted! */
mutex_lock(&intel_dp->psr.lock);
- return err == 0 && intel_dp->psr.enabled;
+ return err == 0 && intel_dp->psr.enabled && !intel_dp->psr.pause_counter;
}
static int intel_psr_fastset_force(struct intel_display *display)
@@ -3228,8 +3261,13 @@ static void intel_psr_work(struct work_struct *work)
if (!intel_dp->psr.enabled)
goto unlock;
- if (READ_ONCE(intel_dp->psr.irq_aux_error))
+ if (READ_ONCE(intel_dp->psr.irq_aux_error)) {
intel_psr_handle_irq(intel_dp);
+ goto unlock;
+ }
+
+ if (intel_dp->psr.pause_counter)
+ goto unlock;
/*
* We have to make sure PSR is ready for re-enable
@@ -3275,7 +3313,9 @@ static void intel_psr_configure_full_frame_update(struct intel_dp *intel_dp)
static void _psr_invalidate_handle(struct intel_dp *intel_dp)
{
- if (intel_dp->psr.psr2_sel_fetch_enabled) {
+ struct intel_display *display = to_intel_display(intel_dp);
+
+ if (DISPLAY_VER(display) < 20 && intel_dp->psr.psr2_sel_fetch_enabled) {
if (!intel_dp->psr.psr2_sel_fetch_cff_enabled) {
intel_dp->psr.psr2_sel_fetch_cff_enabled = true;
intel_psr_configure_full_frame_update(intel_dp);
@@ -3361,7 +3401,7 @@ static void _psr_flush_handle(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- if (intel_dp->psr.psr2_sel_fetch_enabled) {
+ if (DISPLAY_VER(display) < 20 && intel_dp->psr.psr2_sel_fetch_enabled) {
if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
/* can we turn CFF off? */
if (intel_dp->psr.busy_frontbuffer_bits == 0)
@@ -3378,11 +3418,13 @@ static void _psr_flush_handle(struct intel_dp *intel_dp)
* existing SU configuration
*/
intel_psr_configure_full_frame_update(intel_dp);
- }
- intel_psr_force_update(intel_dp);
+ intel_psr_force_update(intel_dp);
+ } else {
+ intel_psr_exit(intel_dp);
+ }
- if (!intel_dp->psr.psr2_sel_fetch_enabled && !intel_dp->psr.active &&
+ if ((!intel_dp->psr.psr2_sel_fetch_enabled || DISPLAY_VER(display) >= 20) &&
!intel_dp->psr.busy_frontbuffer_bits)
queue_work(display->wq.unordered, &intel_dp->psr.work);
}
@@ -3719,7 +3761,7 @@ static void intel_psr_apply_underrun_on_idle_wa_locked(struct intel_dp *intel_dp
struct intel_display *display = to_intel_display(intel_dp);
bool dc5_dc6_blocked;
- if (!intel_dp->psr.active)
+ if (!intel_dp->psr.active || !intel_dp->psr.pkg_c_latency_used)
return;
dc5_dc6_blocked = is_dc5_dc6_blocked(intel_dp);
@@ -3744,7 +3786,8 @@ static void psr_dc5_dc6_wa_work(struct work_struct *work)
mutex_lock(&intel_dp->psr.lock);
- if (intel_dp->psr.enabled && !intel_dp->psr.panel_replay_enabled)
+ if (intel_dp->psr.enabled && !intel_dp->psr.panel_replay_enabled &&
+ !intel_dp->psr.pkg_c_latency_used)
intel_psr_apply_underrun_on_idle_wa_locked(intel_dp);
mutex_unlock(&intel_dp->psr.lock);
@@ -3822,7 +3865,8 @@ void intel_psr_notify_pipe_change(struct intel_atomic_state *state,
goto unlock;
if ((enable && intel_dp->psr.active_non_psr_pipes) ||
- (!enable && !intel_dp->psr.active_non_psr_pipes)) {
+ (!enable && !intel_dp->psr.active_non_psr_pipes) ||
+ !intel_dp->psr.pkg_c_latency_used) {
intel_dp->psr.active_non_psr_pipes = active_non_psr_pipes;
goto unlock;
}
@@ -3857,7 +3901,7 @@ void intel_psr_notify_vblank_enable_disable(struct intel_display *display,
break;
}
- if (intel_dp->psr.enabled)
+ if (intel_dp->psr.enabled && intel_dp->psr.pkg_c_latency_used)
intel_psr_apply_underrun_on_idle_wa_locked(intel_dp);
mutex_unlock(&intel_dp->psr.lock);
@@ -4153,12 +4197,12 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
void intel_psr_debugfs_register(struct intel_display *display)
{
- struct drm_minor *minor = display->drm->primary;
+ struct dentry *debugfs_root = display->drm->debugfs_root;
- debugfs_create_file("i915_edp_psr_debug", 0644, minor->debugfs_root,
+ debugfs_create_file("i915_edp_psr_debug", 0644, debugfs_root,
display, &i915_edp_psr_debug_fops);
- debugfs_create_file("i915_edp_psr_status", 0444, minor->debugfs_root,
+ debugfs_create_file("i915_edp_psr_status", 0444, debugfs_root,
display, &i915_edp_psr_status_fops);
}
diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h
index 9b061a22361f..077751aa599f 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.h
+++ b/drivers/gpu/drm/i915/display/intel_psr.h
@@ -52,6 +52,8 @@ void intel_psr_get_config(struct intel_encoder *encoder,
void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir);
void intel_psr_short_pulse(struct intel_dp *intel_dp);
void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_state);
+void intel_psr_wait_for_idle_dsb(struct intel_dsb *dsb,
+ const struct intel_crtc_state *new_crtc_state);
bool intel_psr_enabled(struct intel_dp *intel_dp);
int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
struct intel_crtc *crtc);
diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c
index a32fae510ed2..d2e16b79d6be 100644
--- a/drivers/gpu/drm/i915/display/intel_quirks.c
+++ b/drivers/gpu/drm/i915/display/intel_quirks.c
@@ -80,6 +80,12 @@ static void quirk_fw_sync_len(struct intel_dp *intel_dp)
drm_info(display->drm, "Applying Fast Wake sync pulse count quirk\n");
}
+static void quirk_edp_limit_rate_hbr2(struct intel_display *display)
+{
+ intel_set_quirk(display, QUIRK_EDP_LIMIT_RATE_HBR2);
+ drm_info(display->drm, "Applying eDP Limit rate to HBR2 quirk\n");
+}
+
struct intel_quirk {
int device;
int subsystem_vendor;
@@ -231,6 +237,9 @@ static struct intel_quirk intel_quirks[] = {
{ 0x3184, 0x1019, 0xa94d, quirk_increase_ddi_disabled_time },
/* HP Notebook - 14-r206nv */
{ 0x0f31, 0x103c, 0x220f, quirk_invert_brightness },
+
+ /* Dell XPS 13 7390 2-in-1 */
+ { 0x8a12, 0x1028, 0x08b0, quirk_edp_limit_rate_hbr2 },
};
static const struct intel_dpcd_quirk intel_dpcd_quirks[] = {
diff --git a/drivers/gpu/drm/i915/display/intel_quirks.h b/drivers/gpu/drm/i915/display/intel_quirks.h
index cafdebda7535..06da0e286c67 100644
--- a/drivers/gpu/drm/i915/display/intel_quirks.h
+++ b/drivers/gpu/drm/i915/display/intel_quirks.h
@@ -20,6 +20,7 @@ enum intel_quirk_id {
QUIRK_LVDS_SSC_DISABLE,
QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK,
QUIRK_FW_SYNC_LEN,
+ QUIRK_EDP_LIMIT_RATE_HBR2,
};
void intel_init_quirks(struct intel_display *display);
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index 87aff2754f69..6c032d81e7ee 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -47,11 +47,11 @@
#include "intel_display_driver.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
-#include "intel_fdi.h"
#include "intel_fifo_underrun.h"
#include "intel_gmbus.h"
#include "intel_hdmi.h"
#include "intel_hotplug.h"
+#include "intel_link_bw.h"
#include "intel_panel.h"
#include "intel_sdvo.h"
#include "intel_sdvo_regs.h"
@@ -1367,7 +1367,7 @@ static int intel_sdvo_compute_config(struct intel_encoder *encoder,
if (HAS_PCH_SPLIT(display)) {
pipe_config->has_pch_encoder = true;
- if (!intel_fdi_compute_pipe_bpp(pipe_config))
+ if (!intel_link_bw_compute_pipe_bpp(pipe_config))
return -EINVAL;
}
@@ -2052,8 +2052,10 @@ static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder)
{
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
- intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG,
- &intel_sdvo->hotplug_active, 2);
+ if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG,
+ &intel_sdvo->hotplug_active, 2))
+ drm_warn(intel_sdvo->base.base.dev,
+ "Failed to enable hotplug on SDVO encoder\n");
}
static enum intel_hotplug_state
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index e6844df837af..75bbaa923204 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -264,8 +264,7 @@ static u32 vlv_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
return sprctl;
}
-static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+static u32 vlv_sprite_ctl(const struct intel_plane_state *plane_state)
{
const struct drm_framebuffer *fb = plane_state->hw.fb;
unsigned int rotation = plane_state->hw.rotation;
@@ -395,15 +394,12 @@ vlv_sprite_update_arm(struct intel_dsb *dsb,
enum pipe pipe = plane->pipe;
enum plane_id plane_id = plane->id;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
- u32 sprsurf_offset = plane_state->view.color_plane[0].offset;
u32 x = plane_state->view.color_plane[0].x;
u32 y = plane_state->view.color_plane[0].y;
- u32 sprctl, linear_offset;
+ u32 sprctl;
sprctl = plane_state->ctl | vlv_sprite_ctl_crtc(crtc_state);
- linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
-
if (display->platform.cherryview && pipe == PIPE_B)
chv_sprite_update_csc(plane_state);
@@ -418,7 +414,8 @@ vlv_sprite_update_arm(struct intel_dsb *dsb,
intel_de_write_fw(display, SPCONSTALPHA(pipe, plane_id), 0);
- intel_de_write_fw(display, SPLINOFF(pipe, plane_id), linear_offset);
+ intel_de_write_fw(display, SPLINOFF(pipe, plane_id),
+ intel_fb_xy_to_linear(x, y, plane_state, 0));
intel_de_write_fw(display, SPTILEOFF(pipe, plane_id),
SP_OFFSET_Y(y) | SP_OFFSET_X(x));
@@ -428,8 +425,7 @@ vlv_sprite_update_arm(struct intel_dsb *dsb,
* the control register just before the surface register.
*/
intel_de_write_fw(display, SPCNTR(pipe, plane_id), sprctl);
- intel_de_write_fw(display, SPSURF(pipe, plane_id),
- intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
+ intel_de_write_fw(display, SPSURF(pipe, plane_id), plane_state->surf);
vlv_sprite_update_clrc(plane_state);
vlv_sprite_update_gamma(plane_state);
@@ -663,8 +659,7 @@ static bool ivb_need_sprite_gamma(const struct intel_plane_state *plane_state)
(display->platform.ivybridge || display->platform.haswell);
}
-static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+static u32 ivb_sprite_ctl(const struct intel_plane_state *plane_state)
{
struct intel_display *display = to_intel_display(plane_state);
const struct drm_framebuffer *fb = plane_state->hw.fb;
@@ -830,15 +825,12 @@ ivb_sprite_update_arm(struct intel_dsb *dsb,
struct intel_display *display = to_intel_display(plane);
enum pipe pipe = plane->pipe;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
- u32 sprsurf_offset = plane_state->view.color_plane[0].offset;
u32 x = plane_state->view.color_plane[0].x;
u32 y = plane_state->view.color_plane[0].y;
- u32 sprctl, linear_offset;
+ u32 sprctl;
sprctl = plane_state->ctl | ivb_sprite_ctl_crtc(crtc_state);
- linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
-
if (key->flags) {
intel_de_write_fw(display, SPRKEYVAL(pipe), key->min_value);
intel_de_write_fw(display, SPRKEYMSK(pipe),
@@ -852,7 +844,8 @@ ivb_sprite_update_arm(struct intel_dsb *dsb,
intel_de_write_fw(display, SPROFFSET(pipe),
SPRITE_OFFSET_Y(y) | SPRITE_OFFSET_X(x));
} else {
- intel_de_write_fw(display, SPRLINOFF(pipe), linear_offset);
+ intel_de_write_fw(display, SPRLINOFF(pipe),
+ intel_fb_xy_to_linear(x, y, plane_state, 0));
intel_de_write_fw(display, SPRTILEOFF(pipe),
SPRITE_OFFSET_Y(y) | SPRITE_OFFSET_X(x));
}
@@ -863,8 +856,7 @@ ivb_sprite_update_arm(struct intel_dsb *dsb,
* the control register just before the surface register.
*/
intel_de_write_fw(display, SPRCTL(pipe), sprctl);
- intel_de_write_fw(display, SPRSURF(pipe),
- intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
+ intel_de_write_fw(display, SPRSURF(pipe), plane_state->surf);
ivb_sprite_update_gamma(plane_state);
}
@@ -1016,8 +1008,7 @@ static u32 g4x_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
return dvscntr;
}
-static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+static u32 g4x_sprite_ctl(const struct intel_plane_state *plane_state)
{
struct intel_display *display = to_intel_display(plane_state);
const struct drm_framebuffer *fb = plane_state->hw.fb;
@@ -1181,15 +1172,12 @@ g4x_sprite_update_arm(struct intel_dsb *dsb,
struct intel_display *display = to_intel_display(plane);
enum pipe pipe = plane->pipe;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
- u32 dvssurf_offset = plane_state->view.color_plane[0].offset;
u32 x = plane_state->view.color_plane[0].x;
u32 y = plane_state->view.color_plane[0].y;
- u32 dvscntr, linear_offset;
+ u32 dvscntr;
dvscntr = plane_state->ctl | g4x_sprite_ctl_crtc(crtc_state);
- linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
-
if (key->flags) {
intel_de_write_fw(display, DVSKEYVAL(pipe), key->min_value);
intel_de_write_fw(display, DVSKEYMSK(pipe),
@@ -1197,7 +1185,8 @@ g4x_sprite_update_arm(struct intel_dsb *dsb,
intel_de_write_fw(display, DVSKEYMAX(pipe), key->max_value);
}
- intel_de_write_fw(display, DVSLINOFF(pipe), linear_offset);
+ intel_de_write_fw(display, DVSLINOFF(pipe),
+ intel_fb_xy_to_linear(x, y, plane_state, 0));
intel_de_write_fw(display, DVSTILEOFF(pipe),
DVS_OFFSET_Y(y) | DVS_OFFSET_X(x));
@@ -1207,8 +1196,7 @@ g4x_sprite_update_arm(struct intel_dsb *dsb,
* the control register just before the surface register.
*/
intel_de_write_fw(display, DVSCNTR(pipe), dvscntr);
- intel_de_write_fw(display, DVSSURF(pipe),
- intel_plane_ggtt_offset(plane_state) + dvssurf_offset);
+ intel_de_write_fw(display, DVSSURF(pipe), plane_state->surf);
if (display->platform.g4x)
g4x_sprite_update_gamma(plane_state);
@@ -1387,9 +1375,9 @@ g4x_sprite_check(struct intel_crtc_state *crtc_state,
return ret;
if (DISPLAY_VER(display) >= 7)
- plane_state->ctl = ivb_sprite_ctl(crtc_state, plane_state);
+ plane_state->ctl = ivb_sprite_ctl(plane_state);
else
- plane_state->ctl = g4x_sprite_ctl(crtc_state, plane_state);
+ plane_state->ctl = g4x_sprite_ctl(plane_state);
return 0;
}
@@ -1439,7 +1427,7 @@ vlv_sprite_check(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
- plane_state->ctl = vlv_sprite_ctl(crtc_state, plane_state);
+ plane_state->ctl = vlv_sprite_ctl(plane_state);
return 0;
}
@@ -1624,6 +1612,7 @@ intel_sprite_plane_create(struct intel_display *display,
plane->capture_error = vlv_sprite_capture_error;
plane->get_hw_state = vlv_sprite_get_hw_state;
plane->check_plane = vlv_sprite_check;
+ plane->surf_offset = i965_plane_surf_offset;
plane->max_stride = i965_plane_max_stride;
plane->min_alignment = vlv_plane_min_alignment;
plane->min_cdclk = vlv_plane_min_cdclk;
@@ -1648,6 +1637,7 @@ intel_sprite_plane_create(struct intel_display *display,
plane->capture_error = ivb_sprite_capture_error;
plane->get_hw_state = ivb_sprite_get_hw_state;
plane->check_plane = g4x_sprite_check;
+ plane->surf_offset = i965_plane_surf_offset;
if (display->platform.broadwell || display->platform.haswell) {
plane->max_stride = hsw_sprite_max_stride;
@@ -1673,6 +1663,7 @@ intel_sprite_plane_create(struct intel_display *display,
plane->capture_error = g4x_sprite_capture_error;
plane->get_hw_state = g4x_sprite_get_hw_state;
plane->check_plane = g4x_sprite_check;
+ plane->surf_offset = i965_plane_surf_offset;
plane->max_stride = g4x_sprite_max_stride;
plane->min_alignment = g4x_sprite_min_alignment;
plane->min_cdclk = g4x_sprite_min_cdclk;
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index 3bc57579fe53..c4a5601c5107 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -3,6 +3,8 @@
* Copyright © 2019 Intel Corporation
*/
+#include <linux/iopoll.h>
+
#include <drm/drm_print.h>
#include "i915_reg.h"
@@ -23,10 +25,6 @@
#include "intel_modeset_lock.h"
#include "intel_tc.h"
-#define DP_PIN_ASSIGNMENT_C 0x3
-#define DP_PIN_ASSIGNMENT_D 0x4
-#define DP_PIN_ASSIGNMENT_E 0x5
-
enum tc_port_mode {
TC_PORT_DISCONNECTED,
TC_PORT_TBT_ALT,
@@ -65,7 +63,9 @@ struct intel_tc_port {
enum tc_port_mode mode;
enum tc_port_mode init_mode;
enum phy_fia phy_fia;
+ enum intel_tc_pin_assignment pin_assignment;
u8 phy_fia_idx;
+ u8 max_lane_count;
};
static enum intel_display_power_domain
@@ -251,6 +251,9 @@ tc_port_power_domain(struct intel_tc_port *tc)
{
enum tc_port tc_port = intel_encoder_to_tc(&tc->dig_port->base);
+ if (tc_port == TC_PORT_NONE)
+ return POWER_DOMAIN_INVALID;
+
return POWER_DOMAIN_PORT_DDI_LANES_TC1 + tc_port - TC_PORT_1;
}
@@ -263,13 +266,14 @@ assert_tc_port_power_enabled(struct intel_tc_port *tc)
!intel_display_power_is_enabled(display, tc_port_power_domain(tc)));
}
-static u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
+static u32 get_lane_mask(struct intel_tc_port *tc)
{
- struct intel_display *display = to_intel_display(dig_port);
- struct intel_tc_port *tc = to_tc_port(dig_port);
+ struct intel_display *display = to_intel_display(tc->dig_port);
+ intel_wakeref_t wakeref;
u32 lane_mask;
- lane_mask = intel_de_read(display, PORT_TX_DFLEXDPSP(tc->phy_fia));
+ with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE, wakeref)
+ lane_mask = intel_de_read(display, PORT_TX_DFLEXDPSP(tc->phy_fia));
drm_WARN_ON(display->drm, lane_mask == 0xffffffff);
assert_tc_cold_blocked(tc);
@@ -278,75 +282,87 @@ static u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
return lane_mask >> DP_LANE_ASSIGNMENT_SHIFT(tc->phy_fia_idx);
}
-u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port)
+static char pin_assignment_name(enum intel_tc_pin_assignment pin_assignment)
{
- struct intel_display *display = to_intel_display(dig_port);
- struct intel_tc_port *tc = to_tc_port(dig_port);
- u32 pin_mask;
-
- pin_mask = intel_de_read(display, PORT_TX_DFLEXPA1(tc->phy_fia));
-
- drm_WARN_ON(display->drm, pin_mask == 0xffffffff);
- assert_tc_cold_blocked(tc);
+ if (pin_assignment == INTEL_TC_PIN_ASSIGNMENT_NONE)
+ return '-';
- return (pin_mask & DP_PIN_ASSIGNMENT_MASK(tc->phy_fia_idx)) >>
- DP_PIN_ASSIGNMENT_SHIFT(tc->phy_fia_idx);
+ return 'A' + pin_assignment - INTEL_TC_PIN_ASSIGNMENT_A;
}
-static int lnl_tc_port_get_max_lane_count(struct intel_digital_port *dig_port)
+static enum intel_tc_pin_assignment
+get_pin_assignment(struct intel_tc_port *tc)
{
- struct intel_display *display = to_intel_display(dig_port);
- enum tc_port tc_port = intel_encoder_to_tc(&dig_port->base);
+ struct intel_display *display = to_intel_display(tc->dig_port);
+ enum tc_port tc_port = intel_encoder_to_tc(&tc->dig_port->base);
+ enum intel_tc_pin_assignment pin_assignment;
intel_wakeref_t wakeref;
- u32 val, pin_assignment;
+ i915_reg_t reg;
+ u32 mask;
+ u32 val;
+
+ if (tc->mode == TC_PORT_TBT_ALT)
+ return INTEL_TC_PIN_ASSIGNMENT_NONE;
+
+ if (DISPLAY_VER(display) >= 20) {
+ reg = TCSS_DDI_STATUS(tc_port);
+ mask = TCSS_DDI_STATUS_PIN_ASSIGNMENT_MASK;
+ } else {
+ reg = PORT_TX_DFLEXPA1(tc->phy_fia);
+ mask = DP_PIN_ASSIGNMENT_MASK(tc->phy_fia_idx);
+ }
with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE, wakeref)
- val = intel_de_read(display, TCSS_DDI_STATUS(tc_port));
+ val = intel_de_read(display, reg);
- pin_assignment =
- REG_FIELD_GET(TCSS_DDI_STATUS_PIN_ASSIGNMENT_MASK, val);
+ drm_WARN_ON(display->drm, val == 0xffffffff);
+ assert_tc_cold_blocked(tc);
+
+ pin_assignment = (val & mask) >> (ffs(mask) - 1);
switch (pin_assignment) {
+ case INTEL_TC_PIN_ASSIGNMENT_A:
+ case INTEL_TC_PIN_ASSIGNMENT_B:
+ case INTEL_TC_PIN_ASSIGNMENT_F:
+ drm_WARN_ON(display->drm, DISPLAY_VER(display) > 11);
+ break;
+ case INTEL_TC_PIN_ASSIGNMENT_NONE:
+ case INTEL_TC_PIN_ASSIGNMENT_C:
+ case INTEL_TC_PIN_ASSIGNMENT_D:
+ case INTEL_TC_PIN_ASSIGNMENT_E:
+ break;
default:
MISSING_CASE(pin_assignment);
- fallthrough;
- case DP_PIN_ASSIGNMENT_D:
- return 2;
- case DP_PIN_ASSIGNMENT_C:
- case DP_PIN_ASSIGNMENT_E:
- return 4;
}
+
+ return pin_assignment;
}
-static int mtl_tc_port_get_max_lane_count(struct intel_digital_port *dig_port)
+static int mtl_get_max_lane_count(struct intel_tc_port *tc)
{
- struct intel_display *display = to_intel_display(dig_port);
- intel_wakeref_t wakeref;
- u32 pin_mask;
+ enum intel_tc_pin_assignment pin_assignment;
- with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE, wakeref)
- pin_mask = intel_tc_port_get_pin_assignment_mask(dig_port);
+ pin_assignment = get_pin_assignment(tc);
- switch (pin_mask) {
+ switch (pin_assignment) {
+ case INTEL_TC_PIN_ASSIGNMENT_NONE:
+ return 0;
default:
- MISSING_CASE(pin_mask);
+ MISSING_CASE(pin_assignment);
fallthrough;
- case DP_PIN_ASSIGNMENT_D:
+ case INTEL_TC_PIN_ASSIGNMENT_D:
return 2;
- case DP_PIN_ASSIGNMENT_C:
- case DP_PIN_ASSIGNMENT_E:
+ case INTEL_TC_PIN_ASSIGNMENT_C:
+ case INTEL_TC_PIN_ASSIGNMENT_E:
return 4;
}
}
-static int intel_tc_port_get_max_lane_count(struct intel_digital_port *dig_port)
+static int icl_get_max_lane_count(struct intel_tc_port *tc)
{
- struct intel_display *display = to_intel_display(dig_port);
- intel_wakeref_t wakeref;
u32 lane_mask = 0;
- with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE, wakeref)
- lane_mask = intel_tc_port_get_lane_mask(dig_port);
+ lane_mask = get_lane_mask(tc);
switch (lane_mask) {
default:
@@ -365,23 +381,44 @@ static int intel_tc_port_get_max_lane_count(struct intel_digital_port *dig_port)
}
}
+static int get_max_lane_count(struct intel_tc_port *tc)
+{
+ struct intel_display *display = to_intel_display(tc->dig_port);
+
+ if (tc->mode != TC_PORT_DP_ALT)
+ return 4;
+
+ if (DISPLAY_VER(display) >= 14)
+ return mtl_get_max_lane_count(tc);
+
+ return icl_get_max_lane_count(tc);
+}
+
+static void read_pin_configuration(struct intel_tc_port *tc)
+{
+ tc->pin_assignment = get_pin_assignment(tc);
+ tc->max_lane_count = get_max_lane_count(tc);
+}
+
int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port)
{
- struct intel_display *display = to_intel_display(dig_port);
struct intel_tc_port *tc = to_tc_port(dig_port);
- if (!intel_encoder_is_tc(&dig_port->base) || tc->mode != TC_PORT_DP_ALT)
+ if (!intel_encoder_is_tc(&dig_port->base))
return 4;
- assert_tc_cold_blocked(tc);
+ return tc->max_lane_count;
+}
- if (DISPLAY_VER(display) >= 20)
- return lnl_tc_port_get_max_lane_count(dig_port);
+enum intel_tc_pin_assignment
+intel_tc_port_get_pin_assignment(struct intel_digital_port *dig_port)
+{
+ struct intel_tc_port *tc = to_tc_port(dig_port);
- if (DISPLAY_VER(display) >= 14)
- return mtl_tc_port_get_max_lane_count(dig_port);
+ if (!intel_encoder_is_tc(&dig_port->base))
+ return INTEL_TC_PIN_ASSIGNMENT_NONE;
- return intel_tc_port_get_max_lane_count(dig_port);
+ return tc->pin_assignment;
}
void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
@@ -596,9 +633,12 @@ static void icl_tc_phy_get_hw_state(struct intel_tc_port *tc)
tc_cold_wref = __tc_cold_block(tc, &domain);
tc->mode = tc_phy_get_current_mode(tc);
- if (tc->mode != TC_PORT_DISCONNECTED)
+ if (tc->mode != TC_PORT_DISCONNECTED) {
tc->lock_wakeref = tc_cold_block(tc);
+ read_pin_configuration(tc);
+ }
+
__tc_cold_unblock(tc, domain, tc_cold_wref);
}
@@ -656,8 +696,11 @@ static bool icl_tc_phy_connect(struct intel_tc_port *tc,
tc->lock_wakeref = tc_cold_block(tc);
- if (tc->mode == TC_PORT_TBT_ALT)
+ if (tc->mode == TC_PORT_TBT_ALT) {
+ read_pin_configuration(tc);
+
return true;
+ }
if ((!tc_phy_is_ready(tc) ||
!icl_tc_phy_take_ownership(tc, true)) &&
@@ -668,6 +711,7 @@ static bool icl_tc_phy_connect(struct intel_tc_port *tc,
goto out_unblock_tc_cold;
}
+ read_pin_configuration(tc);
if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes))
goto out_release_phy;
@@ -858,9 +902,12 @@ static void adlp_tc_phy_get_hw_state(struct intel_tc_port *tc)
port_wakeref = intel_display_power_get(display, port_power_domain);
tc->mode = tc_phy_get_current_mode(tc);
- if (tc->mode != TC_PORT_DISCONNECTED)
+ if (tc->mode != TC_PORT_DISCONNECTED) {
tc->lock_wakeref = tc_cold_block(tc);
+ read_pin_configuration(tc);
+ }
+
intel_display_power_put(display, port_power_domain, port_wakeref);
}
@@ -873,6 +920,9 @@ static bool adlp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
if (tc->mode == TC_PORT_TBT_ALT) {
tc->lock_wakeref = tc_cold_block(tc);
+
+ read_pin_configuration(tc);
+
return true;
}
@@ -894,6 +944,8 @@ static bool adlp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
tc->lock_wakeref = tc_cold_block(tc);
+ read_pin_configuration(tc);
+
if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes))
goto out_unblock_tc_cold;
@@ -1000,8 +1052,13 @@ static bool
xelpdp_tc_phy_wait_for_tcss_power(struct intel_tc_port *tc, bool enabled)
{
struct intel_display *display = to_intel_display(tc->dig_port);
+ bool is_enabled;
+ int ret;
- if (wait_for(xelpdp_tc_phy_tcss_power_is_enabled(tc) == enabled, 5)) {
+ ret = poll_timeout_us(is_enabled = xelpdp_tc_phy_tcss_power_is_enabled(tc),
+ is_enabled == enabled,
+ 200, 5000, false);
+ if (ret) {
drm_dbg_kms(display->drm,
"Port %s: timeout waiting for TCSS power to get %s\n",
str_enabled_disabled(enabled),
@@ -1124,9 +1181,18 @@ static void xelpdp_tc_phy_get_hw_state(struct intel_tc_port *tc)
tc_cold_wref = __tc_cold_block(tc, &domain);
tc->mode = tc_phy_get_current_mode(tc);
- if (tc->mode != TC_PORT_DISCONNECTED)
+ if (tc->mode != TC_PORT_DISCONNECTED) {
tc->lock_wakeref = tc_cold_block(tc);
+ read_pin_configuration(tc);
+ /*
+ * Set a valid lane count value for a DP-alt sink which got
+ * disconnected. The driver can only disable the output on this PHY.
+ */
+ if (tc->max_lane_count == 0)
+ tc->max_lane_count = 4;
+ }
+
drm_WARN_ON(display->drm,
(tc->mode == TC_PORT_DP_ALT || tc->mode == TC_PORT_LEGACY) &&
!xelpdp_tc_phy_tcss_power_is_enabled(tc));
@@ -1138,14 +1204,19 @@ static bool xelpdp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
{
tc->lock_wakeref = tc_cold_block(tc);
- if (tc->mode == TC_PORT_TBT_ALT)
+ if (tc->mode == TC_PORT_TBT_ALT) {
+ read_pin_configuration(tc);
+
return true;
+ }
if (!xelpdp_tc_phy_enable_tcss_power(tc, true))
goto out_unblock_tccold;
xelpdp_tc_phy_take_ownership(tc, true);
+ read_pin_configuration(tc);
+
if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes))
goto out_release_phy;
@@ -1226,14 +1297,19 @@ static void tc_phy_get_hw_state(struct intel_tc_port *tc)
tc->phy_ops->get_hw_state(tc);
}
-static bool tc_phy_is_ready_and_owned(struct intel_tc_port *tc,
- bool phy_is_ready, bool phy_is_owned)
+/* Is the PHY owned by display i.e. is it in legacy or DP-alt mode? */
+static bool tc_phy_owned_by_display(struct intel_tc_port *tc,
+ bool phy_is_ready, bool phy_is_owned)
{
struct intel_display *display = to_intel_display(tc->dig_port);
- drm_WARN_ON(display->drm, phy_is_owned && !phy_is_ready);
+ if (DISPLAY_VER(display) < 20) {
+ drm_WARN_ON(display->drm, phy_is_owned && !phy_is_ready);
- return phy_is_ready && phy_is_owned;
+ return phy_is_ready && phy_is_owned;
+ } else {
+ return phy_is_owned;
+ }
}
static bool tc_phy_is_connected(struct intel_tc_port *tc,
@@ -1244,7 +1320,7 @@ static bool tc_phy_is_connected(struct intel_tc_port *tc,
bool phy_is_owned = tc_phy_is_owned(tc);
bool is_connected;
- if (tc_phy_is_ready_and_owned(tc, phy_is_ready, phy_is_owned))
+ if (tc_phy_owned_by_display(tc, phy_is_ready, phy_is_owned))
is_connected = port_pll_type == ICL_PORT_DPLL_MG_PHY;
else
is_connected = port_pll_type == ICL_PORT_DPLL_DEFAULT;
@@ -1263,8 +1339,13 @@ static bool tc_phy_is_connected(struct intel_tc_port *tc,
static bool tc_phy_wait_for_ready(struct intel_tc_port *tc)
{
struct intel_display *display = to_intel_display(tc->dig_port);
+ bool is_ready;
+ int ret;
- if (wait_for(tc_phy_is_ready(tc), 500)) {
+ ret = poll_timeout_us(is_ready = tc_phy_is_ready(tc),
+ is_ready,
+ 1000, 500 * 1000, false);
+ if (ret) {
drm_err(display->drm, "Port %s: timeout waiting for PHY ready\n",
tc->port_name);
@@ -1352,7 +1433,7 @@ tc_phy_get_current_mode(struct intel_tc_port *tc)
phy_is_ready = tc_phy_is_ready(tc);
phy_is_owned = tc_phy_is_owned(tc);
- if (!tc_phy_is_ready_and_owned(tc, phy_is_ready, phy_is_owned)) {
+ if (!tc_phy_owned_by_display(tc, phy_is_ready, phy_is_owned)) {
mode = get_tc_mode_in_phy_not_owned_state(tc, live_mode);
} else {
drm_WARN_ON(display->drm, live_mode == TC_PORT_TBT_ALT);
@@ -1441,21 +1522,24 @@ static void intel_tc_port_reset_mode(struct intel_tc_port *tc,
intel_display_power_flush_work(display);
if (!intel_tc_cold_requires_aux_pw(dig_port)) {
enum intel_display_power_domain aux_domain;
- bool aux_powered;
aux_domain = intel_aux_power_domain(dig_port);
- aux_powered = intel_display_power_is_enabled(display, aux_domain);
- drm_WARN_ON(display->drm, aux_powered);
+ if (intel_display_power_is_enabled(display, aux_domain))
+ drm_dbg_kms(display->drm, "Port %s: AUX unexpectedly powered\n",
+ tc->port_name);
}
tc_phy_disconnect(tc);
if (!force_disconnect)
tc_phy_connect(tc, required_lanes);
- drm_dbg_kms(display->drm, "Port %s: TC port mode reset (%s -> %s)\n",
+ drm_dbg_kms(display->drm,
+ "Port %s: TC port mode reset (%s -> %s) pin assignment: %c max lanes: %d\n",
tc->port_name,
tc_port_mode_name(old_tc_mode),
- tc_port_mode_name(tc->mode));
+ tc_port_mode_name(tc->mode),
+ pin_assignment_name(tc->pin_assignment),
+ tc->max_lane_count);
}
static bool intel_tc_port_needs_reset(struct intel_tc_port *tc)
@@ -1610,9 +1694,11 @@ void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port,
__intel_tc_port_put_link(tc);
}
- drm_dbg_kms(display->drm, "Port %s: sanitize mode (%s)\n",
+ drm_dbg_kms(display->drm, "Port %s: sanitize mode (%s) pin assignment: %c max lanes: %d\n",
tc->port_name,
- tc_port_mode_name(tc->mode));
+ tc_port_mode_name(tc->mode),
+ pin_assignment_name(tc->pin_assignment),
+ tc->max_lane_count);
mutex_unlock(&tc->lock);
}
diff --git a/drivers/gpu/drm/i915/display/intel_tc.h b/drivers/gpu/drm/i915/display/intel_tc.h
index 26c4265368c1..fff8b96e4972 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.h
+++ b/drivers/gpu/drm/i915/display/intel_tc.h
@@ -12,6 +12,75 @@ struct intel_crtc_state;
struct intel_digital_port;
struct intel_encoder;
+/*
+ * The following enum values must stay fixed, as they match the corresponding
+ * pin assignment fields in the PORT_TX_DFLEXPA1 and TCSS_DDI_STATUS registers.
+ */
+enum intel_tc_pin_assignment { /* Lanes (a) Signal/ Cable Notes */
+ /* DP USB Rate (b) type */
+ INTEL_TC_PIN_ASSIGNMENT_NONE = 0, /* 4 - - - (c) */
+ INTEL_TC_PIN_ASSIGNMENT_A, /* 2/4 0 GEN2 TC->TC (d,e) */
+ INTEL_TC_PIN_ASSIGNMENT_B, /* 1/2 1 GEN2 TC->TC (d,f,g) */
+ INTEL_TC_PIN_ASSIGNMENT_C, /* 4 0 DP2 TC->TC (h) */
+ INTEL_TC_PIN_ASSIGNMENT_D, /* 2 1 DP2 TC->TC (h,g) */
+ INTEL_TC_PIN_ASSIGNMENT_E, /* 4 0 DP2 TC->DP */
+ INTEL_TC_PIN_ASSIGNMENT_F, /* 2 1 GEN1/DP1 TC->DP (d,g,i) */
+ /*
+ * (a) - DP unidirectional lanes, each lane using 1 differential signal
+ * pair.
+ * - USB SuperSpeed bidirectional lane, using 2 differential (TX and
+ * RX) signal pairs.
+ * - USB 2.0 (HighSpeed) unidirectional lane, using 1 differential
+ * signal pair. Not indicated, this lane is always present on pin
+ * assignments A-D and never present on pin assignments E/F.
+ * (b) - GEN1: USB 3.1 GEN1 bit rate (5 Gbps) and signaling. This
+ * is used for transferring only a USB stream.
+ * - GEN2: USB 3.1 GEN2 bit rate (10 Gbps) and signaling. This
+ * allows transferring an HBR3 (8.1 Gbps) DP stream.
+ * - DP1: Display Port signaling defined by the DP v1.3 Standard,
+ * with a maximum bit rate of HBR3.
+ * - DP2: Display Port signaling defined by the DP v2.1 Standard,
+ * with a maximum bit rate defined by the DP Alt Mode
+ * v2.1a Standard depending on the cable type as follows:
+ * - Passive (Full-Featured) USB 3.2 GEN1
+ * TC->TC cables (CC3G1-X) : UHBR10
+ * - Passive (Full-Featured) USB 3.2/4 GEN2 and
+ * Thunderbolt Alt Mode GEN2
+ * TC->TC cables (CC3G2-X) all : UHBR10
+ * DP54 logo : UHBR13.5
+ * - Passive (Full-Featured) USB4 GEN3+ and
+ * Thunderbolt Alt Mode GEN3+
+ * TC->TC cables (CC4G3-X) all : UHBR13.5
+ * DP80 logo : UHBR20
+ * - Active Re-Timed or
+ * Active Linear Re-driven (LRD)
+ * USB3.2 GEN1/2 and USB4 GEN2+
+ * TC->TC cables all : HBR3
+ * with DP_BR CTS : UHBR10
+ * DP54 logo : UHBR13.5
+ * DP80 logo : UHBR20
+ * - Passive/Active Re-Timed or
+ * Active Linear Re-driven (LRD)
+ * TC->DP cables with DP_BR CTS/DP8K logo : HBR3
+ * with DP_BR CTS : UHBR10
+ * DP54 logo : UHBR13.5
+ * DP80 logo : UHBR20
+ * (c) Used in TBT-alt/legacy modes and on LNL+ after the sink
+ * disconnected in DP-alt mode.
+ * (d) Only defined by the DP Alt Standard v1.0a, deprecated by v1.0b,
+ * only supported on ICL.
+ * (e) GEN2 passive 1 m cable: 4 DP lanes, GEN2 active cable: 2 DP lanes.
+ * (f) GEN2 passive 1 m cable: 2 DP lanes, GEN2 active cable: 1 DP lane.
+ * (g) These pin assignments are also referred to as (USB/DP)
+ * multifunction or Multifunction Display Port (MFD) modes.
+ * (h) Also used where one end of the cable is a captive connector,
+ * attached to a DP->HDMI/DVI/VGA converter.
+ * (i) The DP end of the cable is a captive connector attached to a
+ * (DP/USB) multifunction dock as defined by the DockPort v1.0a
+ * specification.
+ */
+};
+
bool intel_tc_port_in_tbt_alt_mode(struct intel_digital_port *dig_port);
bool intel_tc_port_in_dp_alt_mode(struct intel_digital_port *dig_port);
bool intel_tc_port_in_legacy_mode(struct intel_digital_port *dig_port);
@@ -19,7 +88,8 @@ bool intel_tc_port_handles_hpd_glitches(struct intel_digital_port *dig_port);
bool intel_tc_port_connected(struct intel_encoder *encoder);
-u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port);
+enum intel_tc_pin_assignment
+intel_tc_port_get_pin_assignment(struct intel_digital_port *dig_port);
int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port);
void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
int required_lanes);
diff --git a/drivers/gpu/drm/i915/display/intel_vblank.c b/drivers/gpu/drm/i915/display/intel_vblank.c
index 70ba7aa26bf4..c15234c1d96e 100644
--- a/drivers/gpu/drm/i915/display/intel_vblank.c
+++ b/drivers/gpu/drm/i915/display/intel_vblank.c
@@ -3,9 +3,12 @@
* Copyright © 2022-2023 Intel Corporation
*/
+#include <linux/iopoll.h>
+
#include <drm/drm_vblank.h>
#include "i915_drv.h"
+#include "i915_utils.h"
#include "intel_color.h"
#include "intel_crtc.h"
#include "intel_de.h"
@@ -492,9 +495,14 @@ static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
{
struct intel_display *display = to_intel_display(crtc);
enum pipe pipe = crtc->pipe;
+ bool is_moving;
+ int ret;
/* Wait for the display line to settle/start moving */
- if (wait_for(pipe_scanline_is_moving(display, pipe) == state, 100))
+ ret = poll_timeout_us(is_moving = pipe_scanline_is_moving(display, pipe),
+ is_moving == state,
+ 500, 100 * 1000, false);
+ if (ret)
drm_err(display->drm,
"pipe %c scanline %s wait timed out\n",
pipe_name(pipe), str_on_off(state));
@@ -724,9 +732,9 @@ int intel_vblank_evade(struct intel_vblank_evade_ctx *evade)
break;
if (!timeout) {
- drm_err(display->drm,
- "Potential atomic update failure on pipe %c\n",
- pipe_name(crtc->pipe));
+ drm_dbg_kms(display->drm,
+ "Potential atomic update failure on pipe %c\n",
+ pipe_name(crtc->pipe));
break;
}
diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
index 92c04811aa28..70e31520c560 100644
--- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
@@ -37,7 +37,7 @@
#ifndef _INTEL_VBT_DEFS_H_
#define _INTEL_VBT_DEFS_H_
-#include "intel_bios.h"
+#include "intel_dsi_vbt_defs.h"
/* EDID derived structures */
struct bdb_edid_pnp_id {
@@ -437,6 +437,22 @@ enum vbt_gmbus_ddi {
#define BDB_230_VBT_DP_MAX_LINK_RATE_UHBR13P5 6
#define BDB_230_VBT_DP_MAX_LINK_RATE_UHBR20 7
+/* EDP link rate 263+ */
+#define BDB_263_VBT_EDP_LINK_RATE_1_62 BIT_U32(0)
+#define BDB_263_VBT_EDP_LINK_RATE_2_16 BIT_U32(1)
+#define BDB_263_VBT_EDP_LINK_RATE_2_43 BIT_U32(2)
+#define BDB_263_VBT_EDP_LINK_RATE_2_7 BIT_U32(3)
+#define BDB_263_VBT_EDP_LINK_RATE_3_24 BIT_U32(4)
+#define BDB_263_VBT_EDP_LINK_RATE_4_32 BIT_U32(5)
+#define BDB_263_VBT_EDP_LINK_RATE_5_4 BIT_U32(6)
+#define BDB_263_VBT_EDP_LINK_RATE_6_75 BIT_U32(7)
+#define BDB_263_VBT_EDP_LINK_RATE_8_1 BIT_U32(8)
+#define BDB_263_VBT_EDP_LINK_RATE_10 BIT_U32(9)
+#define BDB_263_VBT_EDP_LINK_RATE_13_5 BIT_U32(10)
+#define BDB_263_VBT_EDP_LINK_RATE_20 BIT_U32(11)
+#define BDB_263_VBT_EDP_NUM_RATES 12
+#define BDB_263_VBT_EDP_RATES_MASK GENMASK(BDB_263_VBT_EDP_NUM_RATES - 1, 0)
+
/*
* The child device config, aka the display device data structure, provides a
* description of a port and its configuration on the platform.
@@ -547,6 +563,8 @@ struct child_device_config {
u8 dp_max_link_rate:3; /* 216+ */
u8 dp_max_link_rate_reserved:5; /* 216+ */
u8 efp_index; /* 256+ */
+ u32 edp_data_rate_override:12; /* 263+ */
+ u32 edp_data_rate_override_reserved:20; /* 263+ */
} __packed;
struct bdb_general_definitions {
diff --git a/drivers/gpu/drm/i915/display/intel_wm.c b/drivers/gpu/drm/i915/display/intel_wm.c
index bba82e888db2..f887a664fe22 100644
--- a/drivers/gpu/drm/i915/display/intel_wm.c
+++ b/drivers/gpu/drm/i915/display/intel_wm.c
@@ -5,7 +5,6 @@
#include <linux/debugfs.h>
-#include <drm/drm_file.h>
#include <drm/drm_print.h>
#include "i9xx_wm.h"
@@ -390,15 +389,15 @@ static const struct file_operations i915_cur_wm_latency_fops = {
void intel_wm_debugfs_register(struct intel_display *display)
{
- struct drm_minor *minor = display->drm->primary;
+ struct dentry *debugfs_root = display->drm->debugfs_root;
- debugfs_create_file("i915_pri_wm_latency", 0644, minor->debugfs_root,
+ debugfs_create_file("i915_pri_wm_latency", 0644, debugfs_root,
display, &i915_pri_wm_latency_fops);
- debugfs_create_file("i915_spr_wm_latency", 0644, minor->debugfs_root,
+ debugfs_create_file("i915_spr_wm_latency", 0644, debugfs_root,
display, &i915_spr_wm_latency_fops);
- debugfs_create_file("i915_cur_wm_latency", 0644, minor->debugfs_root,
+ debugfs_create_file("i915_cur_wm_latency", 0644, debugfs_root,
display, &i915_cur_wm_latency_fops);
skl_watermark_debugfs_register(display);
diff --git a/drivers/gpu/drm/i915/display/skl_scaler.c b/drivers/gpu/drm/i915/display/skl_scaler.c
index d77798499c57..c6cccf170ff1 100644
--- a/drivers/gpu/drm/i915/display/skl_scaler.c
+++ b/drivers/gpu/drm/i915/display/skl_scaler.c
@@ -10,6 +10,7 @@
#include "intel_display_regs.h"
#include "intel_display_trace.h"
#include "intel_display_types.h"
+#include "intel_display_wa.h"
#include "intel_fb.h"
#include "skl_scaler.h"
#include "skl_universal_plane.h"
@@ -91,11 +92,9 @@ static void skl_scaler_min_src_size(const struct drm_format_info *format,
}
}
-static void skl_scaler_max_src_size(struct intel_crtc *crtc,
+static void skl_scaler_max_src_size(struct intel_display *display,
int *max_w, int *max_h)
{
- struct intel_display *display = to_intel_display(crtc);
-
if (DISPLAY_VER(display) >= 14) {
*max_w = 4096;
*max_h = 8192;
@@ -134,6 +133,23 @@ static void skl_scaler_max_dst_size(struct intel_crtc *crtc,
}
}
+enum drm_mode_status
+skl_scaler_mode_valid(struct intel_display *display,
+ const struct drm_display_mode *mode,
+ enum intel_output_format output_format,
+ int num_joined_pipes)
+{
+ int max_h, max_w;
+
+ if (num_joined_pipes < 2 && output_format == INTEL_OUTPUT_FORMAT_YCBCR420) {
+ skl_scaler_max_src_size(display, &max_w, &max_h);
+ if (mode->hdisplay > max_h)
+ return MODE_NO_420;
+ }
+
+ return MODE_OK;
+}
+
static int
skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
unsigned int scaler_user, int *scaler_id,
@@ -201,7 +217,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
}
skl_scaler_min_src_size(format, modifier, &min_src_w, &min_src_h);
- skl_scaler_max_src_size(crtc, &max_src_w, &max_src_h);
+ skl_scaler_max_src_size(display, &max_src_w, &max_src_h);
skl_scaler_min_dst_size(&min_dst_w, &min_dst_h);
skl_scaler_max_dst_size(crtc, &max_dst_w, &max_dst_h);
@@ -747,6 +763,9 @@ void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
crtc_state->scaler_state.scaler_id < 0))
return;
+ if (intel_display_wa(display, 14011503117))
+ adl_scaler_ecc_mask(crtc_state);
+
drm_rect_init(&src, 0, 0,
drm_rect_width(&crtc_state->pipe_src) << 16,
drm_rect_height(&crtc_state->pipe_src) << 16);
@@ -923,3 +942,29 @@ void skl_scaler_get_config(struct intel_crtc_state *crtc_state)
else
scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
}
+
+void adl_scaler_ecc_mask(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ if (!crtc_state->pch_pfit.enabled)
+ return;
+
+ intel_de_write(display, XELPD_DISPLAY_ERR_FATAL_MASK, ~0);
+}
+
+void adl_scaler_ecc_unmask(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ const struct intel_crtc_scaler_state *scaler_state =
+ &crtc_state->scaler_state;
+
+ if (scaler_state->scaler_id < 0)
+ return;
+
+ intel_de_write_fw(display,
+ SKL_PS_ECC_STAT(crtc->pipe, scaler_state->scaler_id),
+ 1);
+ intel_de_write(display, XELPD_DISPLAY_ERR_FATAL_MASK, 0);
+}
diff --git a/drivers/gpu/drm/i915/display/skl_scaler.h b/drivers/gpu/drm/i915/display/skl_scaler.h
index 355ea15260ca..12a19016c5f6 100644
--- a/drivers/gpu/drm/i915/display/skl_scaler.h
+++ b/drivers/gpu/drm/i915/display/skl_scaler.h
@@ -5,10 +5,14 @@
#ifndef INTEL_SCALER_H
#define INTEL_SCALER_H
+enum drm_mode_status;
+struct drm_display_mode;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_display;
struct intel_dsb;
+enum intel_output_format;
struct intel_plane;
struct intel_plane_state;
@@ -32,4 +36,13 @@ void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state);
void skl_scaler_get_config(struct intel_crtc_state *crtc_state);
+enum drm_mode_status
+skl_scaler_mode_valid(struct intel_display *display,
+ const struct drm_display_mode *mode,
+ enum intel_output_format output_format,
+ int num_joined_pipes);
+
+void adl_scaler_ecc_mask(const struct intel_crtc_state *crtc_state);
+
+void adl_scaler_ecc_unmask(const struct intel_crtc_state *crtc_state);
#endif
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
index e20972ddfa09..e13fb781e7b2 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane.c
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
@@ -10,6 +10,7 @@
#include "pxp/intel_pxp.h"
#include "i915_drv.h"
+#include "i915_utils.h"
#include "intel_bo.h"
#include "intel_de.h"
#include "intel_display_irq.h"
@@ -19,6 +20,7 @@
#include "intel_fb.h"
#include "intel_fbc.h"
#include "intel_frontbuffer.h"
+#include "intel_panic.h"
#include "intel_plane.h"
#include "intel_psr.h"
#include "intel_psr_regs.h"
@@ -1166,8 +1168,7 @@ static u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
return plane_ctl;
}
-static u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+static u32 skl_plane_ctl(const struct intel_plane_state *plane_state)
{
struct intel_display *display = to_intel_display(plane_state);
const struct drm_framebuffer *fb = plane_state->hw.fb;
@@ -1225,8 +1226,7 @@ static u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
return plane_color_ctl;
}
-static u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+static u32 glk_plane_color_ctl(const struct intel_plane_state *plane_state)
{
struct intel_display *display = to_intel_display(plane_state);
const struct drm_framebuffer *fb = plane_state->hw.fb;
@@ -1271,12 +1271,6 @@ static u32 skl_surf_address(const struct intel_plane_state *plane_state,
u32 offset = plane_state->view.color_plane[color_plane].offset;
if (intel_fb_uses_dpt(fb)) {
- /*
- * The DPT object contains only one vma, so the VMA's offset
- * within the DPT is always 0.
- */
- drm_WARN_ON(display->drm, plane_state->dpt_vma &&
- intel_dpt_offset(plane_state->dpt_vma));
drm_WARN_ON(display->drm, offset & 0x1fffff);
return offset >> 9;
} else {
@@ -1285,13 +1279,20 @@ static u32 skl_surf_address(const struct intel_plane_state *plane_state,
}
}
-static u32 skl_plane_surf(const struct intel_plane_state *plane_state,
- int color_plane)
+static int icl_plane_color_plane(const struct intel_plane_state *plane_state)
+{
+ if (plane_state->planar_linked_plane && !plane_state->is_y_plane)
+ return 1;
+ else
+ return 0;
+}
+
+static u32 skl_plane_surf_offset(const struct intel_plane_state *plane_state)
{
+ int color_plane = icl_plane_color_plane(plane_state);
u32 plane_surf;
- plane_surf = intel_plane_ggtt_offset(plane_state) +
- skl_surf_address(plane_state, color_plane);
+ plane_surf = skl_surf_address(plane_state, color_plane);
if (plane_state->decrypt)
plane_surf |= PLANE_SURF_DECRYPT;
@@ -1373,14 +1374,6 @@ static void icl_plane_csc_load_black(struct intel_dsb *dsb,
intel_de_write_dsb(display, dsb, PLANE_CSC_POSTOFF(pipe, plane_id, 2), 0);
}
-static int icl_plane_color_plane(const struct intel_plane_state *plane_state)
-{
- if (plane_state->planar_linked_plane && !plane_state->is_y_plane)
- return 1;
- else
- return 0;
-}
-
static void
skl_plane_update_noarm(struct intel_dsb *dsb,
struct intel_plane *plane,
@@ -1476,7 +1469,7 @@ skl_plane_update_arm(struct intel_dsb *dsb,
intel_de_write_dsb(display, dsb, PLANE_CTL(pipe, plane_id),
plane_ctl);
intel_de_write_dsb(display, dsb, PLANE_SURF(pipe, plane_id),
- skl_plane_surf(plane_state, 0));
+ plane_state->surf);
}
static void icl_plane_update_sel_fetch_noarm(struct intel_dsb *dsb,
@@ -1632,7 +1625,6 @@ icl_plane_update_arm(struct intel_dsb *dsb,
struct intel_display *display = to_intel_display(plane);
enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe;
- int color_plane = icl_plane_color_plane(plane_state);
u32 plane_ctl;
plane_ctl = plane_state->ctl |
@@ -1658,7 +1650,7 @@ icl_plane_update_arm(struct intel_dsb *dsb,
intel_de_write_dsb(display, dsb, PLANE_CTL(pipe, plane_id),
plane_ctl);
intel_de_write_dsb(display, dsb, PLANE_SURF(pipe, plane_id),
- skl_plane_surf(plane_state, color_plane));
+ plane_state->surf);
}
static void skl_plane_capture_error(struct intel_crtc *crtc,
@@ -1682,10 +1674,10 @@ skl_plane_async_flip(struct intel_dsb *dsb,
struct intel_display *display = to_intel_display(plane);
enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe;
- u32 plane_ctl = plane_state->ctl, plane_surf;
+ u32 plane_ctl = plane_state->ctl;
+ u32 plane_surf = plane_state->surf;
plane_ctl |= skl_plane_ctl_crtc(crtc_state);
- plane_surf = skl_plane_surf(plane_state, 0);
if (async_flip) {
if (DISPLAY_VER(display) >= 30)
@@ -2363,11 +2355,10 @@ static int skl_plane_check(struct intel_crtc_state *crtc_state,
plane_state->damage = DRM_RECT_INIT(0, 0, 0, 0);
}
- plane_state->ctl = skl_plane_ctl(crtc_state, plane_state);
+ plane_state->ctl = skl_plane_ctl(plane_state);
if (DISPLAY_VER(display) >= 10)
- plane_state->color_ctl = glk_plane_color_ctl(crtc_state,
- plane_state);
+ plane_state->color_ctl = glk_plane_color_ctl(plane_state);
if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) &&
icl_is_hdr_plane(display, plane->id))
@@ -2814,7 +2805,7 @@ static void skl_disable_tiling(struct intel_plane *plane)
intel_de_write_fw(display, PLANE_CTL(plane->pipe, plane->id), plane_ctl);
intel_de_write_fw(display, PLANE_SURF(plane->pipe, plane->id),
- skl_plane_surf(state, 0));
+ state->surf);
}
struct intel_plane *
@@ -2865,6 +2856,8 @@ skl_universal_plane_create(struct intel_display *display,
}
plane->disable_tiling = skl_disable_tiling;
+ plane->surf_offset = skl_plane_surf_offset;
+
if (DISPLAY_VER(display) >= 13)
plane->max_stride = adl_plane_max_stride;
else
@@ -3036,7 +3029,7 @@ skl_get_initial_plane_config(struct intel_crtc *crtc,
return;
}
- intel_fb = intel_bo_alloc_framebuffer();
+ intel_fb = intel_framebuffer_alloc();
if (!intel_fb) {
drm_dbg_kms(display->drm, "failed to alloc fb\n");
return;
@@ -3191,21 +3184,18 @@ bool skl_fixup_initial_plane_config(struct intel_crtc *crtc,
to_intel_plane_state(plane->base.state);
enum plane_id plane_id = plane->id;
enum pipe pipe = crtc->pipe;
- u32 base;
if (!plane_state->uapi.visible)
return false;
- base = intel_plane_ggtt_offset(plane_state);
-
/*
* We may have moved the surface to a different
* part of ggtt, make the plane aware of that.
*/
- if (plane_config->base == base)
+ if (plane_config->base == plane_state->surf)
return false;
- intel_de_write(display, PLANE_SURF(pipe, plane_id), base);
+ intel_de_write(display, PLANE_SURF(pipe, plane_id), plane_state->surf);
return true;
}
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c
index 222c069fdadb..d74cbb43ae6f 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.c
+++ b/drivers/gpu/drm/i915/display/skl_watermark.c
@@ -6,7 +6,6 @@
#include <linux/debugfs.h>
#include <drm/drm_blend.h>
-#include <drm/drm_file.h>
#include <drm/drm_print.h>
#include "soc/intel_dram.h"
@@ -1389,7 +1388,7 @@ skl_allocate_plane_ddb(struct skl_plane_ddb_iter *iter,
{
u16 size, extra = 0;
- if (data_rate) {
+ if (data_rate && iter->data_rate) {
extra = min_t(u16, iter->size,
DIV64_U64_ROUND_UP(iter->size * data_rate,
iter->data_rate));
@@ -2273,6 +2272,11 @@ static int skl_max_wm0_lines(const struct intel_crtc_state *crtc_state)
return wm0_lines;
}
+/*
+ * TODO: In case we use PKG_C_LATENCY to allow C-states when the delayed vblank
+ * size is too small for the package C exit latency we need to notify PSR about
+ * the scenario to apply Wa_16025596647.
+ */
static int skl_max_wm_level_for_vblank(struct intel_crtc_state *crtc_state,
int wm0_lines)
{
@@ -3205,12 +3209,12 @@ adjust_wm_latency(struct intel_display *display,
}
/*
- * WA Level-0 adjustment for 16GB DIMMs: SKL+
+ * WA Level-0 adjustment for 16Gb DIMMs: SKL+
* If we could not get dimm info enable this WA to prevent from
- * any underrun. If not able to get Dimm info assume 16GB dimm
+ * any underrun. If not able to get DIMM info assume 16Gb DIMM
* to avoid any underrun.
*/
- if (!display->platform.dg2 && dram_info->wm_lv_0_adjust_needed)
+ if (!display->platform.dg2 && dram_info->has_16gb_dimms)
wm[0] += 1;
}
@@ -4033,14 +4037,14 @@ DEFINE_SHOW_ATTRIBUTE(intel_sagv_status);
void skl_watermark_debugfs_register(struct intel_display *display)
{
- struct drm_minor *minor = display->drm->primary;
+ struct dentry *debugfs_root = display->drm->debugfs_root;
if (HAS_IPC(display))
- debugfs_create_file("i915_ipc_status", 0644, minor->debugfs_root,
+ debugfs_create_file("i915_ipc_status", 0644, debugfs_root,
display, &skl_watermark_ipc_status_fops);
if (HAS_SAGV(display))
- debugfs_create_file("i915_sagv_status", 0444, minor->debugfs_root,
+ debugfs_create_file("i915_sagv_status", 0444, debugfs_root,
display, &intel_sagv_status_fops);
}
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index 6d9f3312de7e..c9a53fde79c4 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -761,7 +761,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
if (display->platform.valleyview || display->platform.cherryview) {
/* Disable DPOunit clock gating, can stall pipe */
- intel_de_rmw(display, DSPCLK_GATE_D(display),
+ intel_de_rmw(display, VLV_DSPCLK_GATE_D,
0, DPOUNIT_CLOCK_GATE_DISABLE);
}
@@ -918,7 +918,7 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state,
} else {
vlv_dsi_pll_disable(encoder);
- intel_de_rmw(display, DSPCLK_GATE_D(display),
+ intel_de_rmw(display, VLV_DSPCLK_GATE_D,
DPOUNIT_CLOCK_GATE_DISABLE, 0);
}
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
index d42b61e6f076..f078b9cda96c 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
@@ -25,12 +25,12 @@
* Yogesh Mohan Marimuthu <yogesh.mohan.marimuthu@intel.com>
*/
+#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/string_helpers.h>
#include <drm/drm_print.h>
-#include "i915_utils.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dsi.h"
@@ -142,11 +142,9 @@ static int vlv_dsi_pclk(struct intel_encoder *encoder,
pll_div &= DSI_PLL_M1_DIV_MASK;
pll_div = pll_div >> DSI_PLL_M1_DIV_SHIFT;
- while (pll_ctl) {
- pll_ctl = pll_ctl >> 1;
- p++;
- }
- p--;
+ p = fls(pll_ctl);
+ if (p)
+ p--;
if (!p) {
drm_err(display->drm, "wrong P1 divisor\n");
@@ -216,6 +214,8 @@ void vlv_dsi_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *config)
{
struct intel_display *display = to_intel_display(encoder);
+ u32 val;
+ int ret;
drm_dbg_kms(display->drm, "\n");
@@ -233,9 +233,10 @@ void vlv_dsi_pll_enable(struct intel_encoder *encoder,
vlv_cck_write(display->drm, CCK_REG_DSI_PLL_CONTROL, config->dsi_pll.ctrl);
- if (wait_for(vlv_cck_read(display->drm, CCK_REG_DSI_PLL_CONTROL) &
- DSI_PLL_LOCK, 20)) {
-
+ ret = poll_timeout_us(val = vlv_cck_read(display->drm, CCK_REG_DSI_PLL_CONTROL),
+ val & DSI_PLL_LOCK,
+ 500, 20 * 1000, false);
+ if (ret) {
vlv_cck_put(display->drm);
drm_err(display->drm, "DSI PLL lock failed\n");
return;
@@ -262,6 +263,11 @@ void vlv_dsi_pll_disable(struct intel_encoder *encoder)
vlv_cck_put(display->drm);
}
+static bool has_dsic_clock(struct intel_display *display)
+{
+ return display->platform.broxton;
+}
+
bool bxt_dsi_pll_is_enabled(struct intel_display *display)
{
bool enabled;
@@ -284,7 +290,7 @@ bool bxt_dsi_pll_is_enabled(struct intel_display *display)
* causes a system hang.
*/
val = intel_de_read(display, BXT_DSI_PLL_CTL);
- if (display->platform.geminilake) {
+ if (!has_dsic_clock(display)) {
if (!(val & BXT_DSIA_16X_MASK)) {
drm_dbg_kms(display->drm,
"Invalid PLL divider (%08x)\n", val);
@@ -358,6 +364,8 @@ u32 bxt_dsi_get_pclk(struct intel_encoder *encoder,
u32 pclk;
config->dsi_pll.ctrl = intel_de_read(display, BXT_DSI_PLL_CTL);
+ if (!has_dsic_clock(display))
+ config->dsi_pll.ctrl &= ~BXT_DSIC_16X_MASK;
pclk = bxt_dsi_pclk(encoder, config);
@@ -514,7 +522,9 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder,
* Spec says both have to be programmed, even if one is not getting
* used. Configure MIPI_CLOCK_CTL dividers in modeset
*/
- config->dsi_pll.ctrl = dsi_ratio | BXT_DSIA_16X_BY2 | BXT_DSIC_16X_BY2;
+ config->dsi_pll.ctrl = dsi_ratio | BXT_DSIA_16X_BY2;
+ if (has_dsic_clock(display))
+ config->dsi_pll.ctrl |= BXT_DSIC_16X_BY2;
/* As per recommendation from hardware team,
* Prog PVD ratio =1 if dsi ratio <= 50
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 15835952352e..ed6599694835 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -2158,18 +2158,12 @@ static int set_context_image(struct i915_gem_context *ctx,
goto out_ce;
}
- state = kmalloc(ce->engine->context_size, GFP_KERNEL);
- if (!state) {
- ret = -ENOMEM;
+ state = memdup_user(u64_to_user_ptr(user.image), ce->engine->context_size);
+ if (IS_ERR(state)) {
+ ret = PTR_ERR(state);
goto out_ce;
}
- if (copy_from_user(state, u64_to_user_ptr(user.image),
- ce->engine->context_size)) {
- ret = -EFAULT;
- goto out_state;
- }
-
shmem_state = shmem_create_from_data(ce->engine->name,
state, ce->engine->context_size);
if (IS_ERR(shmem_state)) {
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index ca7e9216934a..39c7c32e1e74 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -182,7 +182,7 @@ enum {
* the object. Simple! ... The relocation entries are stored in user memory
* and so to access them we have to copy them into a local buffer. That copy
* has to avoid taking any pagefaults as they may lead back to a GEM object
- * requiring the struct_mutex (i.e. recursive deadlock). So once again we split
+ * requiring the vm->mutex (i.e. recursive deadlock). So once again we split
* the relocation into multiple passes. First we try to do everything within an
* atomic context (avoid the pagefaults) which requires that we never wait. If
* we detect that we may wait, or if we need to fault, then we have to fallback
@@ -1382,8 +1382,9 @@ static void clflush_write32(u32 *addr, u32 value, unsigned int flushes)
*/
if (flushes & CLFLUSH_AFTER)
drm_clflush_virt_range(addr, sizeof(*addr));
- } else
+ } else {
*addr = value;
+ }
}
static u64
@@ -1567,36 +1568,36 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
do {
u64 offset = eb_relocate_entry(eb, ev, r);
- if (likely(offset == 0)) {
- } else if ((s64)offset < 0) {
+ if (likely(offset == 0))
+ continue;
+
+ if ((s64)offset < 0) {
remain = (int)offset;
goto out;
- } else {
- /*
- * Note that reporting an error now
- * leaves everything in an inconsistent
- * state as we have *already* changed
- * the relocation value inside the
- * object. As we have not changed the
- * reloc.presumed_offset or will not
- * change the execobject.offset, on the
- * call we may not rewrite the value
- * inside the object, leaving it
- * dangling and causing a GPU hang. Unless
- * userspace dynamically rebuilds the
- * relocations on each execbuf rather than
- * presume a static tree.
- *
- * We did previously check if the relocations
- * were writable (access_ok), an error now
- * would be a strange race with mprotect,
- * having already demonstrated that we
- * can read from this userspace address.
- */
- offset = gen8_canonical_addr(offset & ~UPDATE);
- __put_user(offset,
- &urelocs[r - stack].presumed_offset);
}
+ /*
+ * Note that reporting an error now
+ * leaves everything in an inconsistent
+ * state as we have *already* changed
+ * the relocation value inside the
+ * object. As we have not changed the
+ * reloc.presumed_offset or will not
+ * change the execobject.offset, on the
+ * call we may not rewrite the value
+ * inside the object, leaving it
+ * dangling and causing a GPU hang. Unless
+ * userspace dynamically rebuilds the
+ * relocations on each execbuf rather than
+ * presume a static tree.
+ *
+ * We did previously check if the relocations
+ * were writable (access_ok), an error now
+ * would be a strange race with mprotect,
+ * having already demonstrated that we
+ * can read from this userspace address.
+ */
+ offset = gen8_canonical_addr(offset & ~UPDATE);
+ __put_user(offset, &urelocs[r - stack].presumed_offset);
} while (r++, --count);
urelocs += ARRAY_SIZE(stack);
} while (remain);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 1f38e367c60b..478011e5ecb3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -459,8 +459,8 @@ static void i915_gem_free_object(struct drm_gem_object *gem_obj)
atomic_inc(&i915->mm.free_count);
/*
- * Since we require blocking on struct_mutex to unbind the freed
- * object from the GPU before releasing resources back to the
+ * Since we require blocking on drm_i915_gem_object->vma.lock to unbind
+ * the freed object from the GPU before releasing resources back to the
* system, we can not do that directly from the RCU callback (which may
* be a softirq context), but must instead then defer that work onto a
* kthread. We use the RCU callback rather than move the freed object
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 565f8fa330db..148034ef504d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -16,9 +16,9 @@
#include "i915_gem_ww.h"
#include "i915_vma_types.h"
-struct drm_scanout_buffer;
enum intel_region_id;
-struct intel_framebuffer;
+struct drm_scanout_buffer;
+struct intel_panic;
#define obj_to_i915(obj__) to_i915((obj__)->base.dev)
@@ -693,9 +693,10 @@ i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
int i915_gem_object_truncate(struct drm_i915_gem_object *obj);
-struct intel_framebuffer *i915_gem_object_alloc_framebuffer(void);
-int i915_gem_object_panic_setup(struct drm_scanout_buffer *sb);
-void i915_gem_object_panic_finish(struct intel_framebuffer *fb);
+struct intel_panic *i915_gem_object_alloc_panic(void);
+int i915_gem_object_panic_setup(struct intel_panic *panic, struct drm_scanout_buffer *sb,
+ struct drm_gem_object *_obj, bool panic_tiling);
+void i915_gem_object_panic_finish(struct intel_panic *panic);
/**
* i915_gem_object_pin_map - return a contiguous mapping of the entire object
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index c16a57160b26..3f09cbce05bb 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -357,23 +357,13 @@ static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj,
return vaddr ?: ERR_PTR(-ENOMEM);
}
-struct i915_panic_data {
+struct intel_panic {
struct page **pages;
int page;
void *vaddr;
};
-struct i915_framebuffer {
- struct intel_framebuffer base;
- struct i915_panic_data panic;
-};
-
-static inline struct i915_panic_data *to_i915_panic_data(struct intel_framebuffer *fb)
-{
- return &container_of_const(fb, struct i915_framebuffer, base)->panic;
-}
-
-static void i915_panic_kunmap(struct i915_panic_data *panic)
+static void i915_panic_kunmap(struct intel_panic *panic)
{
if (panic->vaddr) {
drm_clflush_virt_range(panic->vaddr, PAGE_SIZE);
@@ -420,7 +410,7 @@ static void i915_gem_object_panic_page_set_pixel(struct drm_scanout_buffer *sb,
unsigned int new_page;
unsigned int offset;
struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private;
- struct i915_panic_data *panic = to_i915_panic_data(fb);
+ struct intel_panic *panic = fb->panic;
if (fb->panic_tiling)
offset = fb->panic_tiling(sb->width, x, y);
@@ -441,14 +431,13 @@ static void i915_gem_object_panic_page_set_pixel(struct drm_scanout_buffer *sb,
}
}
-struct intel_framebuffer *i915_gem_object_alloc_framebuffer(void)
+struct intel_panic *i915_gem_object_alloc_panic(void)
{
- struct i915_framebuffer *i915_fb;
+ struct intel_panic *panic;
+
+ panic = kzalloc(sizeof(*panic), GFP_KERNEL);
- i915_fb = kzalloc(sizeof(*i915_fb), GFP_KERNEL);
- if (i915_fb)
- return &i915_fb->base;
- return NULL;
+ return panic;
}
/*
@@ -456,12 +445,11 @@ struct intel_framebuffer *i915_gem_object_alloc_framebuffer(void)
* Use current vaddr if it exists, or setup a list of pages.
* pfn is not supported yet.
*/
-int i915_gem_object_panic_setup(struct drm_scanout_buffer *sb)
+int i915_gem_object_panic_setup(struct intel_panic *panic, struct drm_scanout_buffer *sb,
+ struct drm_gem_object *_obj, bool panic_tiling)
{
enum i915_map_type has_type;
- struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private;
- struct i915_panic_data *panic = to_i915_panic_data(fb);
- struct drm_i915_gem_object *obj = to_intel_bo(intel_fb_bo(&fb->base));
+ struct drm_i915_gem_object *obj = to_intel_bo(_obj);
void *ptr;
ptr = page_unpack_bits(obj->mm.mapping, &has_type);
@@ -471,7 +459,7 @@ int i915_gem_object_panic_setup(struct drm_scanout_buffer *sb)
else
iosys_map_set_vaddr(&sb->map[0], ptr);
- if (fb->panic_tiling)
+ if (panic_tiling)
sb->set_pixel = i915_gem_object_panic_map_set_pixel;
return 0;
}
@@ -486,10 +474,8 @@ int i915_gem_object_panic_setup(struct drm_scanout_buffer *sb)
return -EOPNOTSUPP;
}
-void i915_gem_object_panic_finish(struct intel_framebuffer *fb)
+void i915_gem_object_panic_finish(struct intel_panic *panic)
{
- struct i915_panic_data *panic = to_i915_panic_data(fb);
-
i915_panic_kunmap(panic);
panic->page = -1;
kfree(panic->pages);
@@ -779,7 +765,7 @@ __i915_gem_object_get_page(struct drm_i915_gem_object *obj, pgoff_t n)
GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
sg = i915_gem_object_get_sg(obj, n, &offset);
- return nth_page(sg_page(sg), offset);
+ return sg_page(sg) + offset;
}
/* Like i915_gem_object_get_page(), but mark the returned page dirty */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index e3d188455f67..b9dae15c1d16 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -514,6 +514,13 @@ static int __create_shmem(struct drm_i915_private *i915,
if (IS_ERR(filp))
return PTR_ERR(filp);
+ /*
+ * Prevent -EFBIG by allowing large writes beyond MAX_NON_LFS on shmem
+ * objects by setting O_LARGEFILE.
+ */
+ if (force_o_largefile())
+ filp->f_flags |= O_LARGEFILE;
+
obj->filp = filp;
return 0;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index b81e67504bbe..7a3e74a6676e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -170,7 +170,7 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww,
* Also note that although these lists do not hold a reference to
* the object we can safely grab one here: The final object
* unreferencing and the bound_list are both protected by the
- * dev->struct_mutex and so we won't ever be able to observe an
+ * i915->mm.obj_lock and so we won't ever be able to observe an
* object on the bound_list with a reference count equals 0.
*/
for (phase = phases; phase->list; phase++) {
@@ -185,7 +185,7 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww,
/*
* We serialize our access to unreferenced objects through
- * the use of the struct_mutex. While the objects are not
+ * the use of the obj_lock. While the objects are not
* yet freed (due to RCU then a workqueue) we still want
* to be able to shrink their pages, so they remain on
* the unbound/bound list until actually freed.
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
index 991666fd9f85..54829801d3f7 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
@@ -217,10 +217,10 @@ static unsigned long to_wait_timeout(s64 timeout_ns)
*
* The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
* non-zero timeout parameter the wait ioctl will wait for the given number of
- * nanoseconds on an object becoming unbusy. Since the wait itself does so
- * without holding struct_mutex the object may become re-busied before this
- * function completes. A similar but shorter * race condition exists in the busy
- * ioctl
+ * nanoseconds on an object becoming unbusy. Since the wait occurs without
+ * holding a global or exclusive lock the object may become re-busied before
+ * this function completes. A similar but shorter * race condition exists
+ * in the busy ioctl
*/
int
i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
diff --git a/drivers/gpu/drm/i915/gem/i915_gemfs.c b/drivers/gpu/drm/i915/gem/i915_gemfs.c
index a09e2eb47175..8f13ec4ff0d0 100644
--- a/drivers/gpu/drm/i915/gem/i915_gemfs.c
+++ b/drivers/gpu/drm/i915/gem/i915_gemfs.c
@@ -11,11 +11,6 @@
#include "i915_gemfs.h"
#include "i915_utils.h"
-static int add_param(struct fs_context *fc, const char *key, const char *val)
-{
- return vfs_parse_fs_string(fc, key, val, strlen(val));
-}
-
void i915_gemfs_init(struct drm_i915_private *i915)
{
struct file_system_type *type;
@@ -48,9 +43,9 @@ void i915_gemfs_init(struct drm_i915_private *i915)
fc = fs_context_for_mount(type, SB_KERNMOUNT);
if (IS_ERR(fc))
goto err;
- ret = add_param(fc, "source", "tmpfs");
+ ret = vfs_parse_fs_string(fc, "source", "tmpfs");
if (!ret)
- ret = add_param(fc, "huge", "within_size");
+ ret = vfs_parse_fs_string(fc, "huge", "within_size");
if (!ret)
gemfs = fc_mount_longterm(fc);
put_fs_context(fc);
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
index 86d9d2fcb6a6..539c620364e3 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
@@ -5,7 +5,7 @@
#include "i915_selftest.h"
-#include "display/intel_display_core.h"
+#include "display/intel_display_device.h"
#include "gt/intel_context.h"
#include "gt/intel_engine_regs.h"
#include "gt/intel_engine_user.h"
@@ -110,6 +110,7 @@ struct tiled_blits {
static bool fastblit_supports_x_tiling(const struct drm_i915_private *i915)
{
+ struct intel_display *display = i915->display;
int gen = GRAPHICS_VER(i915);
/* XY_FAST_COPY_BLT does not exist on pre-gen9 platforms */
@@ -121,7 +122,7 @@ static bool fastblit_supports_x_tiling(const struct drm_i915_private *i915)
if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 55))
return false;
- return HAS_DISPLAY(i915);
+ return intel_display_device_present(display);
}
static bool fast_blit_ok(const struct blit_buffer *buf)
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 9c3f17e51885..78734c404a6d 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -1096,32 +1096,20 @@ static int ___igt_mmap_migrate(struct drm_i915_private *i915,
unsigned long addr,
bool unfaultable)
{
- struct vm_area_struct *area;
- int err = 0, i;
+ int i;
pr_info("igt_mmap(%s, %d) @ %lx\n",
obj->mm.region->name, I915_MMAP_TYPE_FIXED, addr);
- mmap_read_lock(current->mm);
- area = vma_lookup(current->mm, addr);
- mmap_read_unlock(current->mm);
- if (!area) {
- pr_err("%s: Did not create a vm_area_struct for the mmap\n",
- obj->mm.region->name);
- err = -EINVAL;
- goto out_unmap;
- }
-
for (i = 0; i < obj->base.size / sizeof(u32); i++) {
u32 __user *ux = u64_to_user_ptr((u64)(addr + i * sizeof(*ux)));
u32 x;
if (get_user(x, ux)) {
- err = -EFAULT;
if (!unfaultable) {
pr_err("%s: Unable to read from mmap, offset:%zd\n",
obj->mm.region->name, i * sizeof(x));
- goto out_unmap;
+ return -EFAULT;
}
continue;
@@ -1130,37 +1118,29 @@ static int ___igt_mmap_migrate(struct drm_i915_private *i915,
if (unfaultable) {
pr_err("%s: Faulted unmappable memory\n",
obj->mm.region->name);
- err = -EINVAL;
- goto out_unmap;
+ return -EINVAL;
}
if (x != expand32(POISON_INUSE)) {
pr_err("%s: Read incorrect value from mmap, offset:%zd, found:%x, expected:%x\n",
obj->mm.region->name,
i * sizeof(x), x, expand32(POISON_INUSE));
- err = -EINVAL;
- goto out_unmap;
+ return -EINVAL;
}
x = expand32(POISON_FREE);
if (put_user(x, ux)) {
pr_err("%s: Unable to write to mmap, offset:%zd\n",
obj->mm.region->name, i * sizeof(x));
- err = -EFAULT;
- goto out_unmap;
+ return -EFAULT;
}
}
- if (unfaultable) {
- if (err == -EFAULT)
- err = 0;
- } else {
- obj->flags &= ~I915_BO_ALLOC_GPU_ONLY;
- err = wc_check(obj);
- }
-out_unmap:
- vm_munmap(addr, obj->base.size);
- return err;
+ if (unfaultable)
+ return 0;
+
+ obj->flags &= ~I915_BO_ALLOC_GPU_ONLY;
+ return wc_check(obj);
}
#define IGT_MMAP_MIGRATE_TOPDOWN (1 << 0)
@@ -1176,6 +1156,7 @@ static int __igt_mmap_migrate(struct intel_memory_region **placements,
struct drm_i915_private *i915 = placements[0]->i915;
struct drm_i915_gem_object *obj;
struct i915_request *rq = NULL;
+ struct vm_area_struct *area;
unsigned long addr;
LIST_HEAD(objects);
u64 offset;
@@ -1207,20 +1188,30 @@ static int __igt_mmap_migrate(struct intel_memory_region **placements,
goto out_put;
}
+ mmap_read_lock(current->mm);
+ area = vma_lookup(current->mm, addr);
+ mmap_read_unlock(current->mm);
+ if (!area) {
+ pr_err("%s: Did not create a vm_area_struct for the mmap\n",
+ obj->mm.region->name);
+ err = -EINVAL;
+ goto out_addr;
+ }
+
if (flags & IGT_MMAP_MIGRATE_FILL) {
err = igt_fill_mappable(placements[0], &objects);
if (err)
- goto out_put;
+ goto out_addr;
}
err = i915_gem_object_lock(obj, NULL);
if (err)
- goto out_put;
+ goto out_addr;
err = i915_gem_object_pin_pages(obj);
if (err) {
i915_gem_object_unlock(obj);
- goto out_put;
+ goto out_addr;
}
err = intel_context_migrate_clear(to_gt(i915)->migrate.context, NULL,
@@ -1228,7 +1219,7 @@ static int __igt_mmap_migrate(struct intel_memory_region **placements,
i915_gem_object_is_lmem(obj),
expand32(POISON_INUSE), &rq);
i915_gem_object_unpin_pages(obj);
- if (rq) {
+ if (rq && !err) {
err = dma_resv_reserve_fences(obj->base.resv, 1);
if (!err)
dma_resv_add_fence(obj->base.resv, &rq->fence,
@@ -1237,7 +1228,7 @@ static int __igt_mmap_migrate(struct intel_memory_region **placements,
}
i915_gem_object_unlock(obj);
if (err)
- goto out_put;
+ goto out_addr;
if (flags & IGT_MMAP_MIGRATE_EVICTABLE)
igt_make_evictable(&objects);
@@ -1245,16 +1236,16 @@ static int __igt_mmap_migrate(struct intel_memory_region **placements,
if (flags & IGT_MMAP_MIGRATE_FAIL_GPU) {
err = i915_gem_object_lock(obj, NULL);
if (err)
- goto out_put;
+ goto out_addr;
/*
- * Ensure we only simulate the gpu failuire when faulting the
+ * Ensure we only simulate the gpu failure when faulting the
* pages.
*/
err = i915_gem_object_wait_moving_fence(obj, true);
i915_gem_object_unlock(obj);
if (err)
- goto out_put;
+ goto out_addr;
i915_ttm_migrate_set_failure_modes(true, false);
}
@@ -1298,6 +1289,9 @@ static int __igt_mmap_migrate(struct intel_memory_region **placements,
}
}
+out_addr:
+ vm_munmap(addr, obj->base.size);
+
out_put:
i915_gem_object_put(obj);
igt_close_objects(i915, &objects);
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index 98c7f6052069..10070ee4d74c 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -14,7 +14,6 @@
#include "i915_active_types.h"
#include "i915_sw_fence.h"
-#include "i915_utils.h"
#include "intel_engine_types.h"
#include "intel_sseu.h"
#include "intel_wakeref.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 03baa7fa0a27..7f389cb0bde4 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -106,14 +106,18 @@
* preemption, but just sampling the new tail pointer).
*
*/
+
#include <linux/interrupt.h>
#include <linux/string_helpers.h>
+#include "gen8_engine_cs.h"
#include "i915_drv.h"
+#include "i915_list_util.h"
#include "i915_reg.h"
+#include "i915_timer_util.h"
#include "i915_trace.h"
#include "i915_vgpu.h"
-#include "gen8_engine_cs.h"
+#include "i915_wait_util.h"
#include "intel_breadcrumbs.h"
#include "intel_context.h"
#include "intel_engine_heartbeat.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
index 86b5a9ba323d..c7befc5c20d0 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
@@ -7,6 +7,7 @@
#include "gem/i915_gem_object.h"
#include "i915_drv.h"
+#include "i915_list_util.h"
#include "intel_engine_pm.h"
#include "intel_gt_buffer_pool.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
index 6c499692d61e..88b147fa5cb1 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
@@ -148,7 +148,7 @@ static u32 gen4_read_clock_frequency(struct intel_uncore *uncore)
*
* Testing on actual hardware has shown there is no /16.
*/
- return DIV_ROUND_CLOSEST(i9xx_fsb_freq(uncore->i915), 4) * 1000;
+ return DIV_ROUND_CLOSEST(intel_fsb_freq(uncore->i915), 4) * 1000;
}
static u32 read_clock_frequency(struct intel_uncore *uncore)
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c
index 4dc23b8d3aa2..dcd40b30a96b 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c
@@ -82,14 +82,15 @@ static void gt_debugfs_register(struct intel_gt *gt, struct dentry *root)
void intel_gt_debugfs_register(struct intel_gt *gt)
{
+ struct dentry *debugfs_root = gt->i915->drm.debugfs_root;
struct dentry *root;
char gtname[4];
- if (!gt->i915->drm.primary->debugfs_root)
+ if (!debugfs_root)
return;
snprintf(gtname, sizeof(gtname), "gt%u", gt->info.id);
- root = debugfs_create_dir(gtname, gt->i915->drm.primary->debugfs_root);
+ root = debugfs_create_dir(gtname, debugfs_root);
if (IS_ERR(root))
return;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
index a60822e2b5d4..c3afa321fe30 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
@@ -4,6 +4,7 @@
*/
#include "i915_drv.h"
+#include "i915_wait_util.h"
#include "intel_gt.h"
#include "intel_gt_mcr.h"
#include "intel_gt_print.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
index 9ca42589da4d..bf38cc5fe872 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -341,7 +341,7 @@ static int vlv_rc6_init(struct intel_rc6 *rc6)
return PTR_ERR(pctx);
}
- GEM_BUG_ON(range_overflows_end_t(u64,
+ GEM_BUG_ON(range_end_overflows_t(u64,
i915->dsm.stolen.start,
pctx->stolen->start,
U32_MAX));
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index 4a1675dea1c7..41b5036dc538 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -9,18 +9,17 @@
#include "display/intel_display_reset.h"
#include "display/intel_overlay.h"
-
#include "gem/i915_gem_context.h"
-
#include "gt/intel_gt_regs.h"
-
#include "gt/uc/intel_gsc_fw.h"
+#include "uc/intel_guc.h"
#include "i915_drv.h"
#include "i915_file_private.h"
#include "i915_gpu_error.h"
#include "i915_irq.h"
#include "i915_reg.h"
+#include "i915_wait_util.h"
#include "intel_breadcrumbs.h"
#include "intel_engine_pm.h"
#include "intel_engine_regs.h"
@@ -32,8 +31,6 @@
#include "intel_pci_config.h"
#include "intel_reset.h"
-#include "uc/intel_guc.h"
-
#define RESET_MAX_RETRIES 3
static void client_mark_guilty(struct i915_gem_context *ctx, bool banned)
diff --git a/drivers/gpu/drm/i915/gt/intel_reset_types.h b/drivers/gpu/drm/i915/gt/intel_reset_types.h
index 4f5fd393af6f..ee4eb574a219 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_reset_types.h
@@ -20,7 +20,7 @@ struct intel_reset {
* FENCE registers).
*
* #I915_RESET_ENGINE[num_engines] - Since the driver doesn't need to
- * acquire the struct_mutex to reset an engine, we need an explicit
+ * acquire a global lock to reset an engine, we need an explicit
* flag to prevent two concurrent reset attempts in the same engine.
* As the number of engines continues to grow, allocate the flags from
* the most significant bits.
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index 2a6d79abf25b..8314a4b0505e 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -15,18 +15,19 @@
#include "i915_irq.h"
#include "i915_mitigations.h"
#include "i915_reg.h"
+#include "i915_wait_util.h"
#include "intel_breadcrumbs.h"
#include "intel_context.h"
+#include "intel_engine_heartbeat.h"
+#include "intel_engine_pm.h"
#include "intel_engine_regs.h"
#include "intel_gt.h"
#include "intel_gt_irq.h"
+#include "intel_gt_print.h"
#include "intel_gt_regs.h"
#include "intel_reset.h"
#include "intel_ring.h"
#include "shmem_utils.h"
-#include "intel_engine_heartbeat.h"
-#include "intel_engine_pm.h"
-#include "intel_gt_print.h"
/* Rough estimate of the typical request size, performing a flush,
* set-context and then emitting the batch.
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index 0b35fdd461d4..4da94098bd3e 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -9,9 +9,12 @@
#include "display/intel_display.h"
#include "display/intel_display_rps.h"
+#include "soc/intel_dram.h"
+
#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_reg.h"
+#include "i915_wait_util.h"
#include "intel_breadcrumbs.h"
#include "intel_gt.h"
#include "intel_gt_clock_utils.h"
@@ -276,20 +279,24 @@ static void gen5_rps_init(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
struct intel_uncore *uncore = rps_to_uncore(rps);
+ unsigned int fsb_freq, mem_freq;
u8 fmax, fmin, fstart;
u32 rgvmodectl;
int c_m, i;
- if (i915->fsb_freq <= 3200000)
+ fsb_freq = intel_fsb_freq(i915);
+ mem_freq = intel_mem_freq(i915);
+
+ if (fsb_freq <= 3200000)
c_m = 0;
- else if (i915->fsb_freq <= 4800000)
+ else if (fsb_freq <= 4800000)
c_m = 1;
else
c_m = 2;
for (i = 0; i < ARRAY_SIZE(cparams); i++) {
if (cparams[i].i == c_m &&
- cparams[i].t == DIV_ROUND_CLOSEST(i915->mem_freq, 1000)) {
+ cparams[i].t == DIV_ROUND_CLOSEST(mem_freq, 1000)) {
rps->ips.m = cparams[i].m;
rps->ips.c = cparams[i].c;
break;
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.h b/drivers/gpu/drm/i915/gt/intel_timeline.h
index 57308c4d664a..85b43f9b9d95 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.h
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.h
@@ -9,6 +9,7 @@
#include <linux/lockdep.h>
#include "i915_active.h"
+#include "i915_list_util.h"
#include "i915_syncmap.h"
#include "intel_timeline_types.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index b37e400f74e5..7d486dfa2fc1 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -337,12 +337,26 @@ static void gen6_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
+
+ /* WaDisable_RenderCache_OperationalFlush:snb */
+ wa_masked_dis(wal, CACHE_MODE_0, RC_OP_FLUSH_ENABLE);
}
static void gen7_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
+ /* WaDisable_RenderCache_OperationalFlush:ivb,vlv,hsw */
+ wa_masked_dis(wal, CACHE_MODE_0_GEN7, RC_OP_FLUSH_ENABLE);
+
+ /*
+ * BSpec says this must be set, even though
+ * WaDisable4x2SubspanOptimization:ivb,hsw
+ * WaDisable4x2SubspanOptimization isn't listed for VLV.
+ */
+ wa_masked_en(wal,
+ CACHE_MODE_1,
+ PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
}
static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine,
@@ -634,6 +648,8 @@ static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine,
static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
+ struct drm_i915_private *i915 = engine->i915;
+
/* Wa_1406697149 (WaDisableBankHangMode:icl) */
wa_write(wal, GEN8_L3CNTLREG, GEN8_ERRDETBCTRL);
@@ -669,6 +685,15 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
/* Wa_1406306137:icl,ehl */
wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN4, GEN11_DIS_PICK_2ND_EU);
+
+ if (IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) {
+ /*
+ * Disable Repacking for Compression (masked R/W access)
+ * before rendering compressed surfaces for display.
+ */
+ wa_masked_en(wal, CACHE_MODE_0_GEN7,
+ DISABLE_REPACKING_FOR_COMPRESSION);
+ }
}
/*
@@ -2306,15 +2331,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN8_RC_SEMA_IDLE_MSG_DISABLE);
}
- if (IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) {
- /*
- * "Disable Repacking for Compression (masked R/W access)
- * before rendering compressed surfaces for display."
- */
- wa_masked_en(wal, CACHE_MODE_0_GEN7,
- DISABLE_REPACKING_FOR_COMPRESSION);
- }
-
if (GRAPHICS_VER(i915) == 11) {
/* This is not an Wa. Enable for better image quality */
wa_masked_en(wal,
@@ -2565,18 +2581,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
RING_MODE_GEN7(RENDER_RING_BASE),
GFX_TLB_INVALIDATE_EXPLICIT | GFX_REPLAY_MODE);
- /* WaDisable_RenderCache_OperationalFlush:ivb,vlv,hsw */
- wa_masked_dis(wal, CACHE_MODE_0_GEN7, RC_OP_FLUSH_ENABLE);
-
- /*
- * BSpec says this must be set, even though
- * WaDisable4x2SubspanOptimization:ivb,hsw
- * WaDisable4x2SubspanOptimization isn't listed for VLV.
- */
- wa_masked_en(wal,
- CACHE_MODE_1,
- PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
-
/*
* BSpec recommends 8x4 when MSAA is used,
* however in practice 16x4 seems fastest.
@@ -2643,9 +2647,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN6_WIZ_HASHING_MASK,
GEN6_WIZ_HASHING_16x4);
- /* WaDisable_RenderCache_OperationalFlush:snb */
- wa_masked_dis(wal, CACHE_MODE_0, RC_OP_FLUSH_ENABLE);
-
/*
* From the Sandybridge PRM, volume 1 part 3, page 24:
* "If this bit is set, STCunit will have LRA as replacement
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index f057c16410e7..4f252f704975 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -904,7 +904,7 @@ static void active_engine(struct kthread_work *work)
arg->result = PTR_ERR(ce[count]);
pr_err("[%s] Create context #%ld failed: %d!\n",
engine->name, count, arg->result);
- while (--count)
+ while (count--)
intel_context_put(ce[count]);
return;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_tlb.c b/drivers/gpu/drm/i915/gt/selftest_tlb.c
index 69ed946a39e5..a5184f09d1de 100644
--- a/drivers/gpu/drm/i915/gt/selftest_tlb.c
+++ b/drivers/gpu/drm/i915/gt/selftest_tlb.c
@@ -3,17 +3,17 @@
* Copyright © 2022 Intel Corporation
*/
-#include "i915_selftest.h"
-
#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_region.h"
#include "gen8_engine_cs.h"
#include "i915_gem_ww.h"
+#include "i915_selftest.h"
+#include "i915_wait_util.h"
+#include "intel_context.h"
#include "intel_engine_regs.h"
#include "intel_gpu_commands.h"
-#include "intel_context.h"
#include "intel_gt.h"
#include "intel_ring.h"
diff --git a/drivers/gpu/drm/i915/gt/sysfs_engines.c b/drivers/gpu/drm/i915/gt/sysfs_engines.c
index aab2759067d2..4a81bc396b21 100644
--- a/drivers/gpu/drm/i915/gt/sysfs_engines.c
+++ b/drivers/gpu/drm/i915/gt/sysfs_engines.c
@@ -7,6 +7,7 @@
#include <linux/sysfs.h>
#include "i915_drv.h"
+#include "i915_timer_util.h"
#include "intel_engine.h"
#include "intel_engine_heartbeat.h"
#include "sysfs_engines.h"
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c
index d8edd7c054c8..e7444ebc373e 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c
@@ -10,11 +10,13 @@
#include "gt/intel_gt.h"
#include "gt/intel_gt_print.h"
+
+#include "i915_drv.h"
+#include "i915_reg.h"
+#include "i915_wait_util.h"
#include "intel_gsc_proxy.h"
#include "intel_gsc_uc.h"
#include "intel_gsc_uc_heci_cmd_submit.h"
-#include "i915_drv.h"
-#include "i915_reg.h"
/*
* GSC proxy:
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
index 2fde5c360cff..9bd29be7656f 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
@@ -8,6 +8,8 @@
#include "gt/intel_gpu_commands.h"
#include "gt/intel_gt.h"
#include "gt/intel_ring.h"
+
+#include "i915_wait_util.h"
#include "intel_gsc_uc_heci_cmd_submit.h"
struct gsc_heci_pkt {
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index f360f020d8f1..52ec4421a211 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -8,15 +8,17 @@
#include "gt/intel_gt_irq.h"
#include "gt/intel_gt_pm_irq.h"
#include "gt/intel_gt_regs.h"
+
+#include "i915_drv.h"
+#include "i915_irq.h"
+#include "i915_reg.h"
+#include "i915_wait_util.h"
#include "intel_guc.h"
#include "intel_guc_ads.h"
#include "intel_guc_capture.h"
#include "intel_guc_print.h"
#include "intel_guc_slpc.h"
#include "intel_guc_submission.h"
-#include "i915_drv.h"
-#include "i915_irq.h"
-#include "i915_reg.h"
/**
* DOC: GuC
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
index 0d5197c0824a..3e7e5badcc2b 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
@@ -5,15 +5,16 @@
#include <linux/circ_buf.h>
#include <linux/ktime.h>
-#include <linux/time64.h>
#include <linux/string_helpers.h>
+#include <linux/time64.h>
#include <linux/timekeeping.h>
#include "i915_drv.h"
+#include "i915_wait_util.h"
#include "intel_guc_ct.h"
#include "intel_guc_print.h"
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GUC)
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
enum {
CT_DEAD_ALIVE = 0,
CT_DEAD_SETUP,
@@ -144,7 +145,7 @@ void intel_guc_ct_init_early(struct intel_guc_ct *ct)
spin_lock_init(&ct->requests.lock);
INIT_LIST_HEAD(&ct->requests.pending);
INIT_LIST_HEAD(&ct->requests.incoming);
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GUC)
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
INIT_WORK(&ct->dead_ct_worker, ct_dead_ct_worker_func);
#endif
INIT_WORK(&ct->requests.worker, ct_incoming_request_worker_func);
@@ -373,7 +374,7 @@ int intel_guc_ct_enable(struct intel_guc_ct *ct)
ct->enabled = true;
ct->stall_time = KTIME_MAX;
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GUC)
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
ct->dead_ct_reported = false;
ct->dead_ct_reason = CT_DEAD_ALIVE;
#endif
@@ -1377,7 +1378,7 @@ void intel_guc_ct_print_info(struct intel_guc_ct *ct,
ct->ctbs.recv.desc->tail);
}
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GUC)
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
static void ct_dead_ct_worker_func(struct work_struct *w)
{
struct intel_guc_ct *ct = container_of(w, struct intel_guc_ct, dead_ct_worker);
@@ -1386,6 +1387,9 @@ static void ct_dead_ct_worker_func(struct work_struct *w)
if (ct->dead_ct_reported)
return;
+ if (i915_error_injected())
+ return;
+
ct->dead_ct_reported = true;
guc_info(guc, "CTB is dead - reason=0x%X\n", ct->dead_ct_reason);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h
index 2c4bb9a941be..e9a6ec4e6d38 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h
@@ -97,7 +97,7 @@ struct intel_guc_ct {
/** @stall_time: time of first time a CTB submission is stalled */
ktime_t stall_time;
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GUC)
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
int dead_ct_reason;
bool dead_ct_reported;
struct work_struct dead_ct_worker;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
index e7ccfa520df3..b1bda1b84f0a 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
@@ -13,9 +13,11 @@
#include "gt/intel_gt_mcr.h"
#include "gt/intel_gt_regs.h"
#include "gt/intel_rps.h"
+
+#include "i915_drv.h"
+#include "i915_wait_util.h"
#include "intel_guc_fw.h"
#include "intel_guc_print.h"
-#include "i915_drv.h"
static void guc_prepare_xfer(struct intel_gt *gt)
{
@@ -46,6 +48,14 @@ static void guc_prepare_xfer(struct intel_gt *gt)
/* allows for 5us (in 10ns units) before GT can go to RC6 */
intel_uncore_write(uncore, GUC_ARAT_C6DIS, 0x1FF);
}
+
+ /*
+ * Starting from IP 12.50 we need to enable the mirroring of GuC
+ * internal state to debug registers. This is always enabled on previous
+ * IPs.
+ */
+ if (GRAPHICS_VER_FULL(uncore->i915) >= IP_VER(12, 50))
+ intel_uncore_rmw(uncore, GUC_SHIM_CONTROL2, 0, GUC_ENABLE_DEBUG_REG);
}
static int guc_xfer_rsa_mmio(struct intel_uc_fw *guc_fw,
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
index 09a64f224c49..cdff48920ee6 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
@@ -6,6 +6,8 @@
#include <linux/debugfs.h>
#include <linux/string_helpers.h>
+#include <drm/drm_managed.h>
+
#include "gt/intel_gt.h"
#include "i915_drv.h"
#include "i915_irq.h"
@@ -511,7 +513,11 @@ static void guc_log_relay_unmap(struct intel_guc_log *log)
void intel_guc_log_init_early(struct intel_guc_log *log)
{
- mutex_init(&log->relay.lock);
+ struct intel_guc *guc = log_to_guc(log);
+ struct drm_i915_private *i915 = guc_to_i915(guc);
+
+ drmm_mutex_init(&i915->drm, &log->relay.lock);
+ drmm_mutex_init(&i915->drm, &log->guc_lock);
INIT_WORK(&log->relay.flush_work, copy_debug_logs_work);
log->relay.started = false;
}
@@ -677,7 +683,7 @@ int intel_guc_log_set_level(struct intel_guc_log *log, u32 level)
if (level < GUC_LOG_LEVEL_DISABLED || level > GUC_LOG_LEVEL_MAX)
return -EINVAL;
- mutex_lock(&i915->drm.struct_mutex);
+ mutex_lock(&log->guc_lock);
if (log->level == level)
goto out_unlock;
@@ -695,7 +701,7 @@ int intel_guc_log_set_level(struct intel_guc_log *log, u32 level)
log->level = level;
out_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
+ mutex_unlock(&log->guc_lock);
return ret;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
index 02127703be80..13cb93ad0710 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
@@ -42,6 +42,14 @@ enum {
struct intel_guc_log {
u32 level;
+ /*
+ * Protects concurrent access and modification of intel_guc_log->level.
+ *
+ * This lock replaces the legacy struct_mutex usage in
+ * intel_guc_log system.
+ */
+ struct mutex guc_lock;
+
/* Allocation settings */
struct {
s32 bytes; /* Size in bytes */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h
index 3fd798837502..f73dab527547 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h
@@ -96,6 +96,7 @@
#define GUC_GEN10_SHIM_WC_ENABLE (1<<21)
#define GUC_SHIM_CONTROL2 _MMIO(0xc068)
+#define GUC_ENABLE_DEBUG_REG (1<<11)
#define GUC_IS_PRIVILEGED (1<<29)
#define GSC_LOADS_HUC (1<<30)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
index d5ee6e5e1443..fa9af08f9708 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
@@ -3,17 +3,20 @@
* Copyright © 2021 Intel Corporation
*/
-#include <drm/drm_cache.h>
#include <linux/string_helpers.h>
+#include <drm/drm_cache.h>
+
+#include "gt/intel_gt.h"
+#include "gt/intel_gt_regs.h"
+#include "gt/intel_rps.h"
+
#include "i915_drv.h"
#include "i915_reg.h"
-#include "intel_guc_slpc.h"
+#include "i915_wait_util.h"
#include "intel_guc_print.h"
+#include "intel_guc_slpc.h"
#include "intel_mchbar_regs.h"
-#include "gt/intel_gt.h"
-#include "gt/intel_gt_regs.h"
-#include "gt/intel_rps.h"
/**
* DOC: SLPC - Dynamic Frequency management
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 127316d2c8aa..68f2b8d363ac 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -25,16 +25,16 @@
#include "gt/intel_mocs.h"
#include "gt/intel_ring.h"
+#include "i915_drv.h"
+#include "i915_irq.h"
+#include "i915_reg.h"
+#include "i915_trace.h"
+#include "i915_wait_util.h"
#include "intel_guc_ads.h"
#include "intel_guc_capture.h"
#include "intel_guc_print.h"
#include "intel_guc_submission.h"
-#include "i915_drv.h"
-#include "i915_reg.h"
-#include "i915_irq.h"
-#include "i915_trace.h"
-
/**
* DOC: GuC-based command submission
*
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index a91e23c22ea1..d432fdd69833 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -1921,7 +1921,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
if (!bb)
return -ENOMEM;
- bb->ppgtt = (s->buf_addr_type == GTT_BUFFER) ? false : true;
+ bb->ppgtt = s->buf_addr_type != GTT_BUFFER;
/*
* The start_offset stores the batch buffer's start gma's
diff --git a/drivers/gpu/drm/i915/gvt/debugfs.c b/drivers/gpu/drm/i915/gvt/debugfs.c
index 673534f061ef..415422b5943c 100644
--- a/drivers/gpu/drm/i915/gvt/debugfs.c
+++ b/drivers/gpu/drm/i915/gvt/debugfs.c
@@ -194,9 +194,9 @@ void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu)
void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
- struct drm_minor *minor = gvt->gt->i915->drm.primary;
+ struct dentry *debugfs_root = gvt->gt->i915->drm.debugfs_root;
- if (minor->debugfs_root && gvt->debugfs_root) {
+ if (debugfs_root && gvt->debugfs_root) {
debugfs_remove_recursive(vgpu->debugfs);
vgpu->debugfs = NULL;
}
@@ -208,9 +208,9 @@ void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu)
*/
void intel_gvt_debugfs_init(struct intel_gvt *gvt)
{
- struct drm_minor *minor = gvt->gt->i915->drm.primary;
+ struct dentry *debugfs_root = gvt->gt->i915->drm.debugfs_root;
- gvt->debugfs_root = debugfs_create_dir("gvt", minor->debugfs_root);
+ gvt->debugfs_root = debugfs_create_dir("gvt", debugfs_root);
debugfs_create_ulong("num_tracked_mmio", 0444, gvt->debugfs_root,
&gvt->mmio.num_tracked_mmio);
@@ -222,9 +222,9 @@ void intel_gvt_debugfs_init(struct intel_gvt *gvt)
*/
void intel_gvt_debugfs_clean(struct intel_gvt *gvt)
{
- struct drm_minor *minor = gvt->gt->i915->drm.primary;
+ struct dentry *debugfs_root = gvt->gt->i915->drm.debugfs_root;
- if (minor->debugfs_root) {
+ if (debugfs_root) {
debugfs_remove_recursive(gvt->debugfs_root);
gvt->debugfs_root = NULL;
}
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 2f7208843367..0b810baad20a 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -33,14 +33,16 @@
*
*/
-#include "i915_drv.h"
-#include "i915_reg.h"
#include "gt/intel_context.h"
#include "gt/intel_engine_regs.h"
#include "gt/intel_gpu_commands.h"
#include "gt/intel_gt_regs.h"
#include "gt/intel_ring.h"
+
#include "gvt.h"
+#include "i915_drv.h"
+#include "i915_reg.h"
+#include "i915_wait_util.h"
#include "trace.h"
#define GEN9_MOCS_SIZE 64
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index 0dbc4e289300..6b0c1162505a 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -257,10 +257,9 @@ static struct active_node *__active_lookup(struct i915_active *ref, u64 idx)
* claimed the cache and we know that is does not match our
* idx. If, and only if, the timeline is currently zero is it
* worth competing to claim it atomically for ourselves (for
- * only the winner of that race will cmpxchg return the old
- * value of 0).
+ * only the winner of that race will cmpxchg succeed).
*/
- if (!cached && !cmpxchg64(&it->timeline, 0, idx))
+ if (!cached && try_cmpxchg64(&it->timeline, &cached, idx))
return it;
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 967c0501e91e..c2e38d4bcd01 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -26,11 +26,11 @@
*
*/
+#include <linux/debugfs.h>
#include <linux/sched/mm.h>
#include <linux/sort.h>
#include <linux/string_helpers.h>
-#include <linux/debugfs.h>
#include <drm/drm_debugfs.h>
#include "gem/i915_gem_context.h"
@@ -54,6 +54,7 @@
#include "i915_irq.h"
#include "i915_reg.h"
#include "i915_scheduler.h"
+#include "i915_wait_util.h"
#include "intel_mchbar_regs.h"
static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
@@ -720,26 +721,24 @@ static const struct i915_debugfs_files {
{"i915_gem_drop_caches", &i915_drop_caches_fops},
};
-void i915_debugfs_register(struct drm_i915_private *dev_priv)
+void i915_debugfs_register(struct drm_i915_private *i915)
{
- struct drm_minor *minor = dev_priv->drm.primary;
+ struct dentry *debugfs_root = i915->drm.debugfs_root;
int i;
- i915_debugfs_params(dev_priv);
+ i915_debugfs_params(i915);
- debugfs_create_file("i915_forcewake_user", S_IRUSR, minor->debugfs_root,
- to_i915(minor->dev), &i915_forcewake_fops);
+ debugfs_create_file("i915_forcewake_user", S_IRUSR, debugfs_root,
+ i915, &i915_forcewake_fops);
for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
- debugfs_create_file(i915_debugfs_files[i].name,
- S_IRUGO | S_IWUSR,
- minor->debugfs_root,
- to_i915(minor->dev),
+ debugfs_create_file(i915_debugfs_files[i].name, S_IRUGO | S_IWUSR,
+ debugfs_root, i915,
i915_debugfs_files[i].fops);
}
drm_debugfs_create_files(i915_debugfs_list,
ARRAY_SIZE(i915_debugfs_list),
- minor->debugfs_root, minor);
+ debugfs_root, i915->drm.primary);
- i915_gpu_error_debugfs_register(dev_priv);
+ i915_gpu_error_debugfs_register(i915);
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs_params.c b/drivers/gpu/drm/i915/i915_debugfs_params.c
index 33d2dcb0de65..89ab5eb14779 100644
--- a/drivers/gpu/drm/i915/i915_debugfs_params.c
+++ b/drivers/gpu/drm/i915/i915_debugfs_params.c
@@ -248,11 +248,11 @@ i915_debugfs_create_charp(const char *name, umode_t mode,
/* add a subdirectory with files for each i915 param */
struct dentry *i915_debugfs_params(struct drm_i915_private *i915)
{
- struct drm_minor *minor = i915->drm.primary;
+ struct dentry *debugfs_root = i915->drm.debugfs_root;
struct i915_params *params = &i915->params;
struct dentry *dir;
- dir = debugfs_create_dir("i915_params", minor->debugfs_root);
+ dir = debugfs_create_dir("i915_params", debugfs_root);
if (IS_ERR(dir))
return dir;
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index c6263c6d3384..a28c3710c4d5 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -51,13 +51,15 @@
#include "display/intel_bw.h"
#include "display/intel_cdclk.h"
#include "display/intel_crtc.h"
-#include "display/intel_display_core.h"
+#include "display/intel_display_device.h"
#include "display/intel_display_driver.h"
+#include "display/intel_display_power.h"
#include "display/intel_dmc.h"
#include "display/intel_dp.h"
#include "display/intel_dpt.h"
#include "display/intel_encoder.h"
#include "display/intel_fbdev.h"
+#include "display/intel_gmbus.h"
#include "display/intel_hotplug.h"
#include "display/intel_opregion.h"
#include "display/intel_overlay.h"
@@ -977,7 +979,7 @@ void i915_driver_shutdown(struct drm_i915_private *i915)
intel_power_domains_disable(display);
drm_client_dev_suspend(&i915->drm, false);
- if (HAS_DISPLAY(i915)) {
+ if (intel_display_device_present(display)) {
drm_kms_helper_poll_disable(&i915->drm);
intel_display_driver_disable_user_access(display);
@@ -989,7 +991,7 @@ void i915_driver_shutdown(struct drm_i915_private *i915)
intel_irq_suspend(i915);
intel_hpd_cancel_work(display);
- if (HAS_DISPLAY(i915))
+ if (intel_display_device_present(display))
intel_display_driver_suspend_access(display);
intel_encoder_suspend_all(display);
@@ -1060,7 +1062,7 @@ static int i915_drm_suspend(struct drm_device *dev)
* properly. */
intel_power_domains_disable(display);
drm_client_dev_suspend(dev, false);
- if (HAS_DISPLAY(dev_priv)) {
+ if (intel_display_device_present(display)) {
drm_kms_helper_poll_disable(dev);
intel_display_driver_disable_user_access(display);
}
@@ -1072,7 +1074,7 @@ static int i915_drm_suspend(struct drm_device *dev)
intel_irq_suspend(dev_priv);
intel_hpd_cancel_work(display);
- if (HAS_DISPLAY(dev_priv))
+ if (intel_display_device_present(display))
intel_display_driver_suspend_access(display);
intel_encoder_suspend_all(display);
@@ -1219,7 +1221,7 @@ static int i915_drm_resume(struct drm_device *dev)
*/
intel_irq_resume(dev_priv);
- if (HAS_DISPLAY(dev_priv))
+ if (intel_display_device_present(display))
drm_mode_config_reset(dev);
i915_gem_resume(dev_priv);
@@ -1228,14 +1230,14 @@ static int i915_drm_resume(struct drm_device *dev)
intel_clock_gating_init(dev_priv);
- if (HAS_DISPLAY(dev_priv))
+ if (intel_display_device_present(display))
intel_display_driver_resume_access(display);
intel_hpd_init(display);
intel_display_driver_resume(display);
- if (HAS_DISPLAY(dev_priv)) {
+ if (intel_display_device_present(display)) {
intel_display_driver_enable_user_access(display);
drm_kms_helper_poll_enable(dev);
}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 4e4e89746aa6..6a768aad8edd 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -114,8 +114,7 @@ struct i915_gem_mm {
struct intel_memory_region *stolen_region;
/** Memory allocator for GTT stolen memory */
struct drm_mm stolen;
- /** Protects the usage of the GTT stolen memory allocator. This is
- * always the inner lock when overlapping with struct_mutex. */
+ /** Protects the usage of the GTT stolen memory allocator */
struct mutex stolen_lock;
/* Protects bound_list/unbound_list and #drm_i915_gem_object.mm.link */
@@ -222,6 +221,9 @@ struct drm_i915_private {
bool irqs_enabled;
+ /* LPT/WPT IOSF sideband protection */
+ struct mutex sbi_lock;
+
/* VLV/CHV IOSF sideband */
struct {
struct mutex lock; /* protect sideband access */
@@ -237,8 +239,6 @@ struct drm_i915_private {
bool preserve_bios_swizzle;
- unsigned int fsb_freq, mem_freq, is_ddr3;
-
unsigned int hpll_freq;
unsigned int czclk_freq;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8c8d43451f35..e14a0c3db999 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -847,8 +847,7 @@ void i915_gem_runtime_suspend(struct drm_i915_private *i915)
/*
* Only called during RPM suspend. All users of the userfault_list
* must be holding an RPM wakeref to ensure that this can not
- * run concurrently with themselves (and use the struct_mutex for
- * protection between themselves).
+ * run concurrently with themselves.
*/
list_for_each_entry_safe(obj, on,
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 0e4b832dff84..7582ef34bf3f 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -685,6 +685,74 @@ static void err_print_guc_ctb(struct drm_i915_error_state_buf *m,
ctb->head, ctb->tail, ctb->desc_offset, ctb->cmds_offset, ctb->size);
}
+/* This list includes registers that are useful in debugging GuC hangs. */
+const struct {
+ u32 start;
+ u32 count;
+} guc_hw_reg_state[] = {
+ { 0xc0b0, 2 },
+ { 0xc000, 65 },
+ { 0xc140, 1 },
+ { 0xc180, 16 },
+ { 0xc1dc, 10 },
+ { 0xc300, 79 },
+ { 0xc4b4, 47 },
+ { 0xc574, 1 },
+ { 0xc57c, 1 },
+ { 0xc584, 11 },
+ { 0xc5c0, 8 },
+ { 0xc5e4, 1 },
+ { 0xc5ec, 103 },
+ { 0xc7c0, 1 },
+ { 0xc0b0, 2 }
+};
+
+static u32 print_range_line(struct drm_i915_error_state_buf *m, u32 start, u32 *dump, u32 count)
+{
+ if (count >= 8) {
+ err_printf(m, "[0x%04x] 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ start, dump[0], dump[1], dump[2], dump[3],
+ dump[4], dump[5], dump[6], dump[7]);
+ return 8;
+ } else if (count >= 4) {
+ err_printf(m, "[0x%04x] 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ start, dump[0], dump[1], dump[2], dump[3]);
+ return 4;
+ } else if (count >= 2) {
+ err_printf(m, "[0x%04x] 0x%08x 0x%08x\n", start, dump[0], dump[1]);
+ return 2;
+ }
+
+ err_printf(m, "[0x%04x] 0x%08x\n", start, dump[0]);
+ return 1;
+}
+
+static void err_print_guc_hw_state(struct drm_i915_error_state_buf *m, u32 *hw_state)
+{
+ u32 total = 0;
+ int i;
+
+ if (!hw_state)
+ return;
+
+ err_printf(m, "GuC Register State:\n");
+
+ for (i = 0; i < ARRAY_SIZE(guc_hw_reg_state); i++) {
+ u32 entry = 0;
+
+ while (entry < guc_hw_reg_state[i].count) {
+ u32 start = guc_hw_reg_state[i].start + entry * sizeof(u32);
+ u32 count = guc_hw_reg_state[i].count - entry;
+ u32 *values = hw_state + total + entry;
+
+ entry += print_range_line(m, start, values, count);
+ }
+
+ GEM_BUG_ON(entry != guc_hw_reg_state[i].count);
+ total += entry;
+ }
+}
+
static void err_print_uc(struct drm_i915_error_state_buf *m,
const struct intel_uc_coredump *error_uc)
{
@@ -693,6 +761,7 @@ static void err_print_uc(struct drm_i915_error_state_buf *m,
intel_uc_fw_dump(&error_uc->guc_fw, &p);
intel_uc_fw_dump(&error_uc->huc_fw, &p);
err_printf(m, "GuC timestamp: 0x%08x\n", error_uc->guc.timestamp);
+ err_print_guc_hw_state(m, error_uc->guc.hw_state);
intel_gpu_error_print_vma(m, NULL, error_uc->guc.vma_log);
err_printf(m, "GuC CTB fence: %d\n", error_uc->guc.last_fence);
err_print_guc_ctb(m, "Send", error_uc->guc.ctb + 0);
@@ -1025,6 +1094,7 @@ static void cleanup_uc(struct intel_uc_coredump *uc)
kfree(uc->huc_fw.file_wanted.path);
i915_vma_coredump_free(uc->guc.vma_log);
i915_vma_coredump_free(uc->guc.vma_ctb);
+ kfree(uc->guc.hw_state);
kfree(uc);
}
@@ -1721,6 +1791,37 @@ static void gt_record_guc_ctb(struct intel_ctb_coredump *saved,
saved->cmds_offset = ((void *)ctb->cmds) - blob_ptr;
}
+static u32 read_guc_state_reg(struct intel_uncore *uncore, int range, int count)
+{
+ GEM_BUG_ON(range >= ARRAY_SIZE(guc_hw_reg_state));
+ GEM_BUG_ON(count >= guc_hw_reg_state[range].count);
+
+ return intel_uncore_read(uncore,
+ _MMIO(guc_hw_reg_state[range].start + count * sizeof(u32)));
+}
+
+static void gt_record_guc_hw_state(struct intel_uncore *uncore,
+ struct intel_uc_coredump *error_uc)
+{
+ u32 *hw_state;
+ u32 count = 0;
+ int i, j;
+
+ for (i = 0; i < ARRAY_SIZE(guc_hw_reg_state); i++)
+ count += guc_hw_reg_state[i].count;
+
+ hw_state = kcalloc(count, sizeof(u32), ALLOW_FAIL);
+ if (!hw_state)
+ return;
+
+ count = 0;
+ for (i = 0; i < ARRAY_SIZE(guc_hw_reg_state); i++)
+ for (j = 0; j < guc_hw_reg_state[i].count; j++)
+ hw_state[count++] = read_guc_state_reg(uncore, i, j);
+
+ error_uc->guc.hw_state = hw_state;
+}
+
static struct intel_uc_coredump *
gt_record_uc(struct intel_gt_coredump *gt,
struct i915_vma_compress *compress)
@@ -1755,6 +1856,7 @@ gt_record_uc(struct intel_gt_coredump *gt,
uc->guc.ct.ctbs.send.desc, (struct intel_guc *)&uc->guc);
gt_record_guc_ctb(error_uc->guc.ctb + 1, &uc->guc.ct.ctbs.recv,
uc->guc.ct.ctbs.send.desc, (struct intel_guc *)&uc->guc);
+ gt_record_guc_hw_state(gt->_gt->uncore, error_uc);
return error_uc;
}
@@ -2445,11 +2547,11 @@ static const struct file_operations i915_error_state_fops = {
void i915_gpu_error_debugfs_register(struct drm_i915_private *i915)
{
- struct drm_minor *minor = i915->drm.primary;
+ struct dentry *debugfs_root = i915->drm.debugfs_root;
- debugfs_create_file("i915_error_state", 0644, minor->debugfs_root, i915,
+ debugfs_create_file("i915_error_state", 0644, debugfs_root, i915,
&i915_error_state_fops);
- debugfs_create_file("i915_gpu_info", 0644, minor->debugfs_root, i915,
+ debugfs_create_file("i915_gpu_info", 0644, debugfs_root, i915,
&i915_gpu_info_fops);
}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 182324979278..91b3df621a49 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -177,6 +177,7 @@ struct intel_gt_coredump {
struct intel_ctb_coredump ctb[2];
struct i915_vma_coredump *vma_ctb;
struct i915_vma_coredump *vma_log;
+ u32 *hw_state;
u32 timestamp;
u16 last_fence;
bool is_guc_capture;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 191ed8bb1d9c..8d5da222a187 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -163,11 +163,6 @@ static void ivb_parity_work(struct work_struct *work)
u32 misccpctl;
u8 slice = 0;
- /* We must turn off DOP level clock gating to access the L3 registers.
- * In order to prevent a get/put style interface, acquire struct mutex
- * any time we access those registers.
- */
- mutex_lock(&dev_priv->drm.struct_mutex);
/* If we've screwed up tracking, just let the interrupt fire again */
if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice))
@@ -225,7 +220,6 @@ out:
gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv));
spin_unlock_irq(gt->irq_lock);
- mutex_unlock(&dev_priv->drm.struct_mutex);
}
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
@@ -439,7 +433,7 @@ static irqreturn_t ilk_irq_handler(int irq, void *arg)
* able to process them after we restore SDEIER (as soon as we restore
* it, we'll get an interrupt if SDEIIR still has something to process
* due to its back queue). */
- if (!HAS_PCH_NOP(i915)) {
+ if (!HAS_PCH_NOP(display)) {
sde_ier = raw_reg_read(regs, SDEIER);
raw_reg_write(regs, SDEIER, 0);
}
@@ -459,7 +453,7 @@ static irqreturn_t ilk_irq_handler(int irq, void *arg)
de_iir = raw_reg_read(regs, DEIIR);
if (de_iir) {
raw_reg_write(regs, DEIIR, de_iir);
- if (DISPLAY_VER(i915) >= 7)
+ if (DISPLAY_VER(display) >= 7)
ivb_display_irq_handler(display, de_iir);
else
ilk_display_irq_handler(display, de_iir);
@@ -834,6 +828,7 @@ static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
static u32 i9xx_error_mask(struct drm_i915_private *i915)
{
+ struct intel_display *display = i915->display;
/*
* On gen2/3 FBC generates (seemingly spurious)
* display INVALID_GTT/INVALID_GTT_PTE table errors.
@@ -846,7 +841,7 @@ static u32 i9xx_error_mask(struct drm_i915_private *i915)
* Unfortunately we can't mask off individual PGTBL_ER bits,
* so we just have to mask off all page table errors via EMR.
*/
- if (HAS_FBC(i915))
+ if (HAS_FBC(display))
return I915_ERROR_MEMORY_REFRESH;
else
return I915_ERROR_PAGE_TABLE |
@@ -924,12 +919,12 @@ static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
I915_MASTER_ERROR_INTERRUPT |
I915_USER_INTERRUPT;
- if (DISPLAY_VER(dev_priv) >= 3) {
+ if (DISPLAY_VER(display) >= 3) {
dev_priv->irq_mask &= ~I915_ASLE_INTERRUPT;
enable_mask |= I915_ASLE_INTERRUPT;
}
- if (HAS_HOTPLUG(dev_priv)) {
+ if (HAS_HOTPLUG(display)) {
dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
}
@@ -963,7 +958,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
ret = IRQ_HANDLED;
- if (HAS_HOTPLUG(dev_priv) &&
+ if (HAS_HOTPLUG(display) &&
iir & I915_DISPLAY_PORT_INTERRUPT)
hotplug_status = i9xx_hpd_irq_ack(display);
diff --git a/drivers/gpu/drm/i915/i915_list_util.h b/drivers/gpu/drm/i915/i915_list_util.h
new file mode 100644
index 000000000000..4e515dc8a3e0
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_list_util.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation */
+
+#ifndef __I915_LIST_UTIL_H__
+#define __I915_LIST_UTIL_H__
+
+#include <linux/list.h>
+#include <asm/rwonce.h>
+
+static inline void __list_del_many(struct list_head *head,
+ struct list_head *first)
+{
+ first->prev = head;
+ WRITE_ONCE(head->next, first);
+}
+
+static inline int list_is_last_rcu(const struct list_head *list,
+ const struct list_head *head)
+{
+ return READ_ONCE(list->next) == head;
+}
+
+#endif /* __I915_LIST_UTIL_H__ */
diff --git a/drivers/gpu/drm/i915/i915_ptr_util.h b/drivers/gpu/drm/i915/i915_ptr_util.h
new file mode 100644
index 000000000000..9f8931d7d99b
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_ptr_util.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation */
+
+#ifndef __I915_PTR_UTIL_H__
+#define __I915_PTR_UTIL_H__
+
+#include <linux/types.h>
+
+#define ptr_mask_bits(ptr, n) ({ \
+ unsigned long __v = (unsigned long)(ptr); \
+ (typeof(ptr))(__v & -BIT(n)); \
+})
+
+#define ptr_unmask_bits(ptr, n) ((unsigned long)(ptr) & (BIT(n) - 1))
+
+#define ptr_unpack_bits(ptr, bits, n) ({ \
+ unsigned long __v = (unsigned long)(ptr); \
+ *(bits) = __v & (BIT(n) - 1); \
+ (typeof(ptr))(__v & -BIT(n)); \
+})
+
+#define ptr_pack_bits(ptr, bits, n) ({ \
+ unsigned long __bits = (bits); \
+ GEM_BUG_ON(__bits & -BIT(n)); \
+ ((typeof(ptr))((unsigned long)(ptr) | __bits)); \
+})
+
+#define ptr_dec(ptr) ({ \
+ unsigned long __v = (unsigned long)(ptr); \
+ (typeof(ptr))(__v - 1); \
+})
+
+#define ptr_inc(ptr) ({ \
+ unsigned long __v = (unsigned long)(ptr); \
+ (typeof(ptr))(__v + 1); \
+})
+
+#define page_mask_bits(ptr) ptr_mask_bits(ptr, PAGE_SHIFT)
+#define page_unmask_bits(ptr) ptr_unmask_bits(ptr, PAGE_SHIFT)
+#define page_pack_bits(ptr, bits) ptr_pack_bits(ptr, bits, PAGE_SHIFT)
+#define page_unpack_bits(ptr, bits) ptr_unpack_bits(ptr, bits, PAGE_SHIFT)
+
+static __always_inline ptrdiff_t ptrdiff(const void *a, const void *b)
+{
+ return a - b;
+}
+
+#define u64_to_ptr(T, x) ({ \
+ typecheck(u64, x); \
+ (T *)(uintptr_t)(x); \
+})
+
+/*
+ * container_of_user: Extract the superclass from a pointer to a member.
+ *
+ * Exactly like container_of() with the exception that it plays nicely
+ * with sparse for __user @ptr.
+ */
+#define container_of_user(ptr, type, member) ({ \
+ void __user *__mptr = (void __user *)(ptr); \
+ BUILD_BUG_ON_MSG(!__same_type(*(ptr), typeof_member(type, member)) && \
+ !__same_type(*(ptr), void), \
+ "pointer type mismatch in container_of()"); \
+ ((type __user *)(__mptr - offsetof(type, member))); })
+
+#endif /* __I915_PTR_UTIL_H__ */
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 03b895897f60..354ef75ef6a5 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -412,9 +412,9 @@
#define FW_BLC _MMIO(0x20d8)
#define FW_BLC2 _MMIO(0x20dc)
#define FW_BLC_SELF _MMIO(0x20e0) /* 915+ only */
-#define FW_BLC_SELF_EN_MASK (1 << 31)
-#define FW_BLC_SELF_FIFO_MASK (1 << 16) /* 945 only */
-#define FW_BLC_SELF_EN (1 << 15) /* 945 only */
+#define FW_BLC_SELF_EN_MASK REG_BIT(31)
+#define FW_BLC_SELF_FIFO_MASK REG_BIT(16) /* 945 only */
+#define FW_BLC_SELF_EN REG_BIT(15) /* 945 only */
#define MM_BURST_LENGTH 0x00700000
#define MM_FIFO_WATERMARK 0x0001F000
#define LM_BURST_LENGTH 0x00000700
@@ -613,7 +613,8 @@
#define DSTATE_GFX_CLOCK_GATING (1 << 1)
#define DSTATE_DOT_CLOCK_GATING (1 << 0)
-#define DSPCLK_GATE_D(__i915) _MMIO(DISPLAY_MMIO_BASE(__i915) + 0x6200)
+#define DSPCLK_GATE_D _MMIO(0x6200)
+#define VLV_DSPCLK_GATE_D _MMIO(VLV_DISPLAY_BASE + 0x6200)
# define DPUNIT_B_CLOCK_GATE_DISABLE (1 << 30) /* 965 */
# define VSUNIT_CLOCK_GATE_DISABLE (1 << 29) /* 965 */
# define VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) /* 965 */
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 5f7e8138ec14..b09135301f39 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -31,19 +31,20 @@
#include <linux/llist.h>
#include <linux/lockdep.h>
+#include <uapi/drm/i915_drm.h>
+
#include "gem/i915_gem_context_types.h"
#include "gt/intel_context_types.h"
#include "gt/intel_engine_types.h"
#include "gt/intel_timeline_types.h"
#include "i915_gem.h"
+#include "i915_ptr_util.h"
#include "i915_scheduler.h"
#include "i915_selftest.h"
#include "i915_sw_fence.h"
#include "i915_vma_resource.h"
-#include <uapi/drm/i915_drm.h>
-
struct drm_file;
struct drm_i915_gem_object;
struct drm_printer;
diff --git a/drivers/gpu/drm/i915/i915_switcheroo.c b/drivers/gpu/drm/i915/i915_switcheroo.c
index 4c02a04be681..d5b6d8ab31a2 100644
--- a/drivers/gpu/drm/i915/i915_switcheroo.c
+++ b/drivers/gpu/drm/i915/i915_switcheroo.c
@@ -5,7 +5,7 @@
#include <linux/vga_switcheroo.h>
-#include "display/intel_display_core.h"
+#include "display/intel_display_device.h"
#include "i915_driver.h"
#include "i915_drv.h"
@@ -15,13 +15,14 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev,
enum vga_switcheroo_state state)
{
struct drm_i915_private *i915 = pdev_to_i915(pdev);
+ struct intel_display *display = i915 ? i915->display : NULL;
pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
if (!i915) {
dev_err(&pdev->dev, "DRM not initialized, aborting switch.\n");
return;
}
- if (!HAS_DISPLAY(i915)) {
+ if (!intel_display_device_present(display)) {
dev_err(&pdev->dev, "Device state not initialized, aborting switch.\n");
return;
}
@@ -44,13 +45,15 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev,
static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
{
struct drm_i915_private *i915 = pdev_to_i915(pdev);
+ struct intel_display *display = i915 ? i915->display : NULL;
/*
* FIXME: open_count is protected by drm_global_mutex but that would lead to
* locking inversion with the driver load path. And the access here is
* completely racy anyway. So don't bother with locking for now.
*/
- return i915 && HAS_DISPLAY(i915) && atomic_read(&i915->drm.open_count) == 0;
+ return i915 && intel_display_device_present(display) &&
+ atomic_read(&i915->drm.open_count) == 0;
}
static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
diff --git a/drivers/gpu/drm/i915/i915_timer_util.c b/drivers/gpu/drm/i915/i915_timer_util.c
new file mode 100644
index 000000000000..ee4cfd8b3c07
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_timer_util.c
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: MIT
+/* Copyright © 2025 Intel Corporation */
+
+#include <linux/jiffies.h>
+
+#include "i915_timer_util.h"
+
+void cancel_timer(struct timer_list *t)
+{
+ if (!timer_active(t))
+ return;
+
+ timer_delete(t);
+ WRITE_ONCE(t->expires, 0);
+}
+
+void set_timer_ms(struct timer_list *t, unsigned long timeout)
+{
+ if (!timeout) {
+ cancel_timer(t);
+ return;
+ }
+
+ timeout = msecs_to_jiffies(timeout);
+
+ /*
+ * Paranoia to make sure the compiler computes the timeout before
+ * loading 'jiffies' as jiffies is volatile and may be updated in
+ * the background by a timer tick. All to reduce the complexity
+ * of the addition and reduce the risk of losing a jiffy.
+ */
+ barrier();
+
+ /* Keep t->expires = 0 reserved to indicate a canceled timer. */
+ mod_timer(t, jiffies + timeout ?: 1);
+}
diff --git a/drivers/gpu/drm/i915/i915_timer_util.h b/drivers/gpu/drm/i915/i915_timer_util.h
new file mode 100644
index 000000000000..f35ad730820c
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_timer_util.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation */
+
+#ifndef __I915_TIMER_UTIL_H__
+#define __I915_TIMER_UTIL_H__
+
+#include <linux/timer.h>
+#include <asm/rwonce.h>
+
+void cancel_timer(struct timer_list *t);
+void set_timer_ms(struct timer_list *t, unsigned long timeout);
+
+static inline bool timer_active(const struct timer_list *t)
+{
+ return READ_ONCE(t->expires);
+}
+
+static inline bool timer_expired(const struct timer_list *t)
+{
+ return timer_active(t) && !timer_pending(t);
+}
+
+#endif /* __I915_TIMER_UTIL_H__ */
diff --git a/drivers/gpu/drm/i915/i915_utils.c b/drivers/gpu/drm/i915/i915_utils.c
index b60c28fbd207..49f7ed413132 100644
--- a/drivers/gpu/drm/i915/i915_utils.c
+++ b/drivers/gpu/drm/i915/i915_utils.c
@@ -47,36 +47,6 @@ bool i915_error_injected(void)
#endif
-void cancel_timer(struct timer_list *t)
-{
- if (!timer_active(t))
- return;
-
- timer_delete(t);
- WRITE_ONCE(t->expires, 0);
-}
-
-void set_timer_ms(struct timer_list *t, unsigned long timeout)
-{
- if (!timeout) {
- cancel_timer(t);
- return;
- }
-
- timeout = msecs_to_jiffies(timeout);
-
- /*
- * Paranoia to make sure the compiler computes the timeout before
- * loading 'jiffies' as jiffies is volatile and may be updated in
- * the background by a timer tick. All to reduce the complexity
- * of the addition and reduce the risk of losing a jiffy.
- */
- barrier();
-
- /* Keep t->expires = 0 reserved to indicate a canceled timer. */
- mod_timer(t, jiffies + timeout ?: 1);
-}
-
bool i915_vtd_active(struct drm_i915_private *i915)
{
if (device_iommu_mapped(i915->drm.dev))
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index f7fb40cfdb70..a0c892e4c40d 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -25,7 +25,6 @@
#ifndef __I915_UTILS_H
#define __I915_UTILS_H
-#include <linux/list.h>
#include <linux/overflow.h>
#include <linux/sched.h>
#include <linux/string_helpers.h>
@@ -38,7 +37,6 @@
#endif
struct drm_i915_private;
-struct timer_list;
#define MISSING_CASE(x) WARN(1, "Missing case (%s == %ld)\n", \
__stringify(x), (long)(x))
@@ -67,88 +65,12 @@ bool i915_error_injected(void);
drm_err(&(i915)->drm, fmt, ##__VA_ARGS__); \
})
-#define range_overflows(start, size, max) ({ \
- typeof(start) start__ = (start); \
- typeof(size) size__ = (size); \
- typeof(max) max__ = (max); \
- (void)(&start__ == &size__); \
- (void)(&start__ == &max__); \
- start__ >= max__ || size__ > max__ - start__; \
-})
-
-#define range_overflows_t(type, start, size, max) \
- range_overflows((type)(start), (type)(size), (type)(max))
-
-#define range_overflows_end(start, size, max) ({ \
- typeof(start) start__ = (start); \
- typeof(size) size__ = (size); \
- typeof(max) max__ = (max); \
- (void)(&start__ == &size__); \
- (void)(&start__ == &max__); \
- start__ > max__ || size__ > max__ - start__; \
-})
-
-#define range_overflows_end_t(type, start, size, max) \
- range_overflows_end((type)(start), (type)(size), (type)(max))
-
-#define ptr_mask_bits(ptr, n) ({ \
- unsigned long __v = (unsigned long)(ptr); \
- (typeof(ptr))(__v & -BIT(n)); \
-})
-
-#define ptr_unmask_bits(ptr, n) ((unsigned long)(ptr) & (BIT(n) - 1))
-
-#define ptr_unpack_bits(ptr, bits, n) ({ \
- unsigned long __v = (unsigned long)(ptr); \
- *(bits) = __v & (BIT(n) - 1); \
- (typeof(ptr))(__v & -BIT(n)); \
-})
-
-#define ptr_pack_bits(ptr, bits, n) ({ \
- unsigned long __bits = (bits); \
- GEM_BUG_ON(__bits & -BIT(n)); \
- ((typeof(ptr))((unsigned long)(ptr) | __bits)); \
-})
-
-#define ptr_dec(ptr) ({ \
- unsigned long __v = (unsigned long)(ptr); \
- (typeof(ptr))(__v - 1); \
-})
-
-#define ptr_inc(ptr) ({ \
- unsigned long __v = (unsigned long)(ptr); \
- (typeof(ptr))(__v + 1); \
-})
-
-#define page_mask_bits(ptr) ptr_mask_bits(ptr, PAGE_SHIFT)
-#define page_unmask_bits(ptr) ptr_unmask_bits(ptr, PAGE_SHIFT)
-#define page_pack_bits(ptr, bits) ptr_pack_bits(ptr, bits, PAGE_SHIFT)
-#define page_unpack_bits(ptr, bits) ptr_unpack_bits(ptr, bits, PAGE_SHIFT)
-
#define fetch_and_zero(ptr) ({ \
typeof(*ptr) __T = *(ptr); \
*(ptr) = (typeof(*ptr))0; \
__T; \
})
-static __always_inline ptrdiff_t ptrdiff(const void *a, const void *b)
-{
- return a - b;
-}
-
-/*
- * container_of_user: Extract the superclass from a pointer to a member.
- *
- * Exactly like container_of() with the exception that it plays nicely
- * with sparse for __user @ptr.
- */
-#define container_of_user(ptr, type, member) ({ \
- void __user *__mptr = (void __user *)(ptr); \
- BUILD_BUG_ON_MSG(!__same_type(*(ptr), typeof_member(type, member)) && \
- !__same_type(*(ptr), void), \
- "pointer type mismatch in container_of()"); \
- ((type __user *)(__mptr - offsetof(type, member))); })
-
/*
* check_user_mbz: Check that a user value exists and is zero
*
@@ -167,11 +89,6 @@ static __always_inline ptrdiff_t ptrdiff(const void *a, const void *b)
get_user(mbz__, (U)) ? -EFAULT : mbz__ ? -EINVAL : 0; \
})
-#define u64_to_ptr(T, x) ({ \
- typecheck(u64, x); \
- (T *)(uintptr_t)(x); \
-})
-
#define __mask_next_bit(mask) ({ \
int __idx = ffs(mask) - 1; \
mask &= ~BIT(__idx); \
@@ -183,19 +100,6 @@ static inline bool is_power_of_2_u64(u64 n)
return (n != 0 && ((n & (n - 1)) == 0));
}
-static inline void __list_del_many(struct list_head *head,
- struct list_head *first)
-{
- first->prev = head;
- WRITE_ONCE(head->next, first);
-}
-
-static inline int list_is_last_rcu(const struct list_head *list,
- const struct list_head *head)
-{
- return READ_ONCE(list->next) == head;
-}
-
static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
{
unsigned long j = msecs_to_jiffies(m);
@@ -230,107 +134,6 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
}
}
-/*
- * __wait_for - magic wait macro
- *
- * Macro to help avoid open coding check/wait/timeout patterns. Note that it's
- * important that we check the condition again after having timed out, since the
- * timeout could be due to preemption or similar and we've never had a chance to
- * check the condition before the timeout.
- */
-#define __wait_for(OP, COND, US, Wmin, Wmax) ({ \
- const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \
- long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \
- int ret__; \
- might_sleep(); \
- for (;;) { \
- const bool expired__ = ktime_after(ktime_get_raw(), end__); \
- OP; \
- /* Guarantee COND check prior to timeout */ \
- barrier(); \
- if (COND) { \
- ret__ = 0; \
- break; \
- } \
- if (expired__) { \
- ret__ = -ETIMEDOUT; \
- break; \
- } \
- usleep_range(wait__, wait__ * 2); \
- if (wait__ < (Wmax)) \
- wait__ <<= 1; \
- } \
- ret__; \
-})
-
-#define _wait_for(COND, US, Wmin, Wmax) __wait_for(, (COND), (US), (Wmin), \
- (Wmax))
-#define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000)
-
-/* If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. */
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG) && IS_ENABLED(CONFIG_PREEMPT_COUNT)
-# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) WARN_ON_ONCE((ATOMIC) && !in_atomic())
-#else
-# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) do { } while (0)
-#endif
-
-#define _wait_for_atomic(COND, US, ATOMIC) \
-({ \
- int cpu, ret, timeout = (US) * 1000; \
- u64 base; \
- _WAIT_FOR_ATOMIC_CHECK(ATOMIC); \
- if (!(ATOMIC)) { \
- preempt_disable(); \
- cpu = smp_processor_id(); \
- } \
- base = local_clock(); \
- for (;;) { \
- u64 now = local_clock(); \
- if (!(ATOMIC)) \
- preempt_enable(); \
- /* Guarantee COND check prior to timeout */ \
- barrier(); \
- if (COND) { \
- ret = 0; \
- break; \
- } \
- if (now - base >= timeout) { \
- ret = -ETIMEDOUT; \
- break; \
- } \
- cpu_relax(); \
- if (!(ATOMIC)) { \
- preempt_disable(); \
- if (unlikely(cpu != smp_processor_id())) { \
- timeout -= now - base; \
- cpu = smp_processor_id(); \
- base = local_clock(); \
- } \
- } \
- } \
- ret; \
-})
-
-#define wait_for_us(COND, US) \
-({ \
- int ret__; \
- BUILD_BUG_ON(!__builtin_constant_p(US)); \
- if ((US) > 10) \
- ret__ = _wait_for((COND), (US), 10, 10); \
- else \
- ret__ = _wait_for_atomic((COND), (US), 0); \
- ret__; \
-})
-
-#define wait_for_atomic_us(COND, US) \
-({ \
- BUILD_BUG_ON(!__builtin_constant_p(US)); \
- BUILD_BUG_ON((US) > 50000); \
- _wait_for_atomic((COND), (US), 1); \
-})
-
-#define wait_for_atomic(COND, MS) wait_for_atomic_us((COND), (MS) * 1000)
-
#define KHz(x) (1000 * (x))
#define MHz(x) KHz(1000 * (x))
@@ -346,19 +149,6 @@ static inline void __add_taint_for_CI(unsigned int taint)
add_taint(taint, LOCKDEP_STILL_OK);
}
-void cancel_timer(struct timer_list *t);
-void set_timer_ms(struct timer_list *t, unsigned long timeout);
-
-static inline bool timer_active(const struct timer_list *t)
-{
- return READ_ONCE(t->expires);
-}
-
-static inline bool timer_expired(const struct timer_list *t)
-{
- return timer_active(t) && !timer_pending(t);
-}
-
static inline bool i915_run_as_guest(void)
{
#if IS_ENABLED(CONFIG_X86)
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 0f9eee6d18d2..8054047840aa 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -30,12 +30,12 @@
#include <drm/drm_mm.h>
-#include "gt/intel_ggtt_fencing.h"
#include "gem/i915_gem_object.h"
-
-#include "i915_gem_gtt.h"
+#include "gt/intel_ggtt_fencing.h"
#include "i915_active.h"
+#include "i915_gem_gtt.h"
+#include "i915_ptr_util.h"
#include "i915_request.h"
#include "i915_vma_resource.h"
#include "i915_vma_types.h"
diff --git a/drivers/gpu/drm/i915/i915_wait_util.h b/drivers/gpu/drm/i915/i915_wait_util.h
new file mode 100644
index 000000000000..7376898e3bf8
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_wait_util.h
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation */
+
+#ifndef __I915_WAIT_UTIL_H__
+#define __I915_WAIT_UTIL_H__
+
+#include <linux/compiler.h>
+#include <linux/delay.h>
+#include <linux/ktime.h>
+#include <linux/sched/clock.h>
+#include <linux/smp.h>
+
+/*
+ * __wait_for - magic wait macro
+ *
+ * Macro to help avoid open coding check/wait/timeout patterns. Note that it's
+ * important that we check the condition again after having timed out, since the
+ * timeout could be due to preemption or similar and we've never had a chance to
+ * check the condition before the timeout.
+ */
+#define __wait_for(OP, COND, US, Wmin, Wmax) ({ \
+ const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \
+ long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \
+ int ret__; \
+ might_sleep(); \
+ for (;;) { \
+ const bool expired__ = ktime_after(ktime_get_raw(), end__); \
+ OP; \
+ /* Guarantee COND check prior to timeout */ \
+ barrier(); \
+ if (COND) { \
+ ret__ = 0; \
+ break; \
+ } \
+ if (expired__) { \
+ ret__ = -ETIMEDOUT; \
+ break; \
+ } \
+ usleep_range(wait__, wait__ * 2); \
+ if (wait__ < (Wmax)) \
+ wait__ <<= 1; \
+ } \
+ ret__; \
+})
+
+#define _wait_for(COND, US, Wmin, Wmax) __wait_for(, (COND), (US), (Wmin), \
+ (Wmax))
+#define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000)
+
+/*
+ * If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false.
+ * On PREEMPT_RT the context isn't becoming atomic because it is used in an
+ * interrupt handler or because a spinlock_t is acquired. This leads to
+ * warnings which don't occur otherwise and therefore the check is disabled.
+ */
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG) && IS_ENABLED(CONFIG_PREEMPT_COUNT) && !defined(CONFIG_PREEMPT_RT)
+# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) WARN_ON_ONCE((ATOMIC) && !in_atomic())
+#else
+# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) do { } while (0)
+#endif
+
+#define _wait_for_atomic(COND, US, ATOMIC) \
+({ \
+ int cpu, ret, timeout = (US) * 1000; \
+ u64 base; \
+ _WAIT_FOR_ATOMIC_CHECK(ATOMIC); \
+ if (!(ATOMIC)) { \
+ preempt_disable(); \
+ cpu = smp_processor_id(); \
+ } \
+ base = local_clock(); \
+ for (;;) { \
+ u64 now = local_clock(); \
+ if (!(ATOMIC)) \
+ preempt_enable(); \
+ /* Guarantee COND check prior to timeout */ \
+ barrier(); \
+ if (COND) { \
+ ret = 0; \
+ break; \
+ } \
+ if (now - base >= timeout) { \
+ ret = -ETIMEDOUT; \
+ break; \
+ } \
+ cpu_relax(); \
+ if (!(ATOMIC)) { \
+ preempt_disable(); \
+ if (unlikely(cpu != smp_processor_id())) { \
+ timeout -= now - base; \
+ cpu = smp_processor_id(); \
+ base = local_clock(); \
+ } \
+ } \
+ } \
+ ret; \
+})
+
+#define wait_for_us(COND, US) \
+({ \
+ int ret__; \
+ BUILD_BUG_ON(!__builtin_constant_p(US)); \
+ if ((US) > 10) \
+ ret__ = _wait_for((COND), (US), 10, 10); \
+ else \
+ ret__ = _wait_for_atomic((COND), (US), 0); \
+ ret__; \
+})
+
+#define wait_for_atomic_us(COND, US) \
+({ \
+ BUILD_BUG_ON(!__builtin_constant_p(US)); \
+ BUILD_BUG_ON((US) > 50000); \
+ _wait_for_atomic((COND), (US), 1); \
+})
+
+#define wait_for_atomic(COND, MS) wait_for_atomic_us((COND), (MS) * 1000)
+
+#endif /* __I915_WAIT_UTIL_H__ */
diff --git a/drivers/gpu/drm/i915/intel_clock_gating.c b/drivers/gpu/drm/i915/intel_clock_gating.c
index f86a3629ae9e..467740969431 100644
--- a/drivers/gpu/drm/i915/intel_clock_gating.c
+++ b/drivers/gpu/drm/i915/intel_clock_gating.c
@@ -132,16 +132,17 @@ static void ibx_init_clock_gating(struct drm_i915_private *i915)
static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = dev_priv->display;
enum pipe pipe;
- for_each_pipe(dev_priv, pipe) {
- intel_uncore_rmw(&dev_priv->uncore, DSPCNTR(dev_priv, pipe),
+ for_each_pipe(display, pipe) {
+ intel_uncore_rmw(&dev_priv->uncore, DSPCNTR(display, pipe),
0, DISP_TRICKLE_FEED_DISABLE);
- intel_uncore_rmw(&dev_priv->uncore, DSPSURF(dev_priv, pipe),
+ intel_uncore_rmw(&dev_priv->uncore, DSPSURF(display, pipe),
0, 0);
intel_uncore_posting_read(&dev_priv->uncore,
- DSPSURF(dev_priv, pipe));
+ DSPSURF(display, pipe));
}
}
@@ -218,7 +219,7 @@ static void cpt_init_clock_gating(struct drm_i915_private *i915)
/* The below fixes the weird display corruption, a few pixels shifted
* downward, on (only) LVDS of some HP laptops with IVY.
*/
- for_each_pipe(i915, pipe) {
+ for_each_pipe(display, pipe) {
val = intel_uncore_read(&i915->uncore, TRANS_CHICKEN2(pipe));
val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
@@ -229,7 +230,7 @@ static void cpt_init_clock_gating(struct drm_i915_private *i915)
intel_uncore_write(&i915->uncore, TRANS_CHICKEN2(pipe), val);
}
/* WADP0ClockGatingDisable */
- for_each_pipe(i915, pipe) {
+ for_each_pipe(display, pipe) {
intel_uncore_write(&i915->uncore, TRANS_CHICKEN1(pipe),
TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
}
@@ -307,11 +308,13 @@ static void gen6_init_clock_gating(struct drm_i915_private *i915)
static void lpt_init_clock_gating(struct drm_i915_private *i915)
{
+ struct intel_display *display = i915->display;
+
/*
* TODO: this bit should only be enabled when really needed, then
* disabled when not needed anymore in order to save power.
*/
- if (HAS_PCH_LPT_LP(i915))
+ if (HAS_PCH_LPT_LP(display))
intel_uncore_rmw(&i915->uncore, SOUTH_DSPCLK_GATE_D,
0, PCH_LP_PARTITION_LEVEL_DISABLE);
@@ -355,7 +358,9 @@ static void dg2_init_clock_gating(struct drm_i915_private *i915)
static void cnp_init_clock_gating(struct drm_i915_private *i915)
{
- if (!HAS_PCH_CNP(i915))
+ struct intel_display *display = i915->display;
+
+ if (!HAS_PCH_CNP(display))
return;
/* Display WA #1181 WaSouthDisplayDisablePWMCGEGating: cnp */
@@ -421,6 +426,7 @@ static void skl_init_clock_gating(struct drm_i915_private *i915)
static void bdw_init_clock_gating(struct drm_i915_private *i915)
{
+ struct intel_display *display = i915->display;
enum pipe pipe;
/* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
@@ -432,7 +438,7 @@ static void bdw_init_clock_gating(struct drm_i915_private *i915)
/* WaPsrDPAMaskVBlankInSRD:bdw */
intel_uncore_rmw(&i915->uncore, CHICKEN_PAR1_1, 0, HSW_MASK_VBL_TO_PIPE_IN_SRD);
- for_each_pipe(i915, pipe) {
+ for_each_pipe(display, pipe) {
/* WaPsrDPRSUnmaskVBlankInSRD:bdw */
intel_uncore_rmw(&i915->uncore, CHICKEN_PIPESL_1(pipe),
0, BDW_UNMASK_VBL_TO_REGS_IN_SRD);
@@ -468,6 +474,7 @@ static void bdw_init_clock_gating(struct drm_i915_private *i915)
static void hsw_init_clock_gating(struct drm_i915_private *i915)
{
+ struct intel_display *display = i915->display;
enum pipe pipe;
/* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
@@ -476,7 +483,7 @@ static void hsw_init_clock_gating(struct drm_i915_private *i915)
/* WaPsrDPAMaskVBlankInSRD:hsw */
intel_uncore_rmw(&i915->uncore, CHICKEN_PAR1_1, 0, HSW_MASK_VBL_TO_PIPE_IN_SRD);
- for_each_pipe(i915, pipe) {
+ for_each_pipe(display, pipe) {
/* WaPsrDPRSUnmaskVBlankInSRD:hsw */
intel_uncore_rmw(&i915->uncore, CHICKEN_PIPESL_1(pipe),
0, HSW_UNMASK_VBL_TO_REGS_IN_SRD);
@@ -494,6 +501,8 @@ static void hsw_init_clock_gating(struct drm_i915_private *i915)
static void ivb_init_clock_gating(struct drm_i915_private *i915)
{
+ struct intel_display *display = i915->display;
+
intel_uncore_write(&i915->uncore, ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
/* WaFbcAsynchFlipDisableFbcQueue:ivb */
@@ -531,7 +540,7 @@ static void ivb_init_clock_gating(struct drm_i915_private *i915)
intel_uncore_rmw(&i915->uncore, GEN6_MBCUNIT_SNPCR, GEN6_MBC_SNPCR_MASK,
GEN6_MBC_SNPCR_MED);
- if (!HAS_PCH_NOP(i915))
+ if (!HAS_PCH_NOP(display))
cpt_init_clock_gating(i915);
gen6_check_mch_setup(i915);
@@ -611,7 +620,7 @@ static void g4x_init_clock_gating(struct drm_i915_private *i915)
OVCUNIT_CLOCK_GATE_DISABLE;
if (IS_GM45(i915))
dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
- intel_uncore_write(&i915->uncore, DSPCLK_GATE_D(i915), dspclk_gate);
+ intel_uncore_write(&i915->uncore, DSPCLK_GATE_D, dspclk_gate);
g4x_disable_trickle_feed(i915);
}
@@ -622,7 +631,7 @@ static void i965gm_init_clock_gating(struct drm_i915_private *i915)
intel_uncore_write(uncore, RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
intel_uncore_write(uncore, RENCLK_GATE_D2, 0);
- intel_uncore_write(uncore, DSPCLK_GATE_D(i915), 0);
+ intel_uncore_write(uncore, DSPCLK_GATE_D, 0);
intel_uncore_write(uncore, RAMCLK_GATE_D, 0);
intel_uncore_write16(uncore, DEUC, 0);
intel_uncore_write(uncore,
diff --git a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
index 87ac4446d306..ca57a3dd3148 100644
--- a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
+++ b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
@@ -62,6 +62,7 @@
static int iterate_generic_mmio(struct intel_gvt_mmio_table_iter *iter)
{
struct drm_i915_private *dev_priv = iter->i915;
+ struct intel_display *display = dev_priv->display;
MMIO_RING_D(RING_IMR);
MMIO_D(SDEIMR);
@@ -133,38 +134,38 @@ static int iterate_generic_mmio(struct intel_gvt_mmio_table_iter *iter)
MMIO_D(_MMIO(0x650b4));
MMIO_D(_MMIO(0xc4040));
MMIO_D(DERRMR);
- MMIO_D(PIPEDSL(dev_priv, PIPE_A));
- MMIO_D(PIPEDSL(dev_priv, PIPE_B));
- MMIO_D(PIPEDSL(dev_priv, PIPE_C));
- MMIO_D(PIPEDSL(dev_priv, _PIPE_EDP));
- MMIO_D(TRANSCONF(dev_priv, TRANSCODER_A));
- MMIO_D(TRANSCONF(dev_priv, TRANSCODER_B));
- MMIO_D(TRANSCONF(dev_priv, TRANSCODER_C));
- MMIO_D(TRANSCONF(dev_priv, TRANSCODER_EDP));
- MMIO_D(PIPESTAT(dev_priv, PIPE_A));
- MMIO_D(PIPESTAT(dev_priv, PIPE_B));
- MMIO_D(PIPESTAT(dev_priv, PIPE_C));
- MMIO_D(PIPESTAT(dev_priv, _PIPE_EDP));
- MMIO_D(PIPE_FLIPCOUNT_G4X(dev_priv, PIPE_A));
- MMIO_D(PIPE_FLIPCOUNT_G4X(dev_priv, PIPE_B));
- MMIO_D(PIPE_FLIPCOUNT_G4X(dev_priv, PIPE_C));
- MMIO_D(PIPE_FLIPCOUNT_G4X(dev_priv, _PIPE_EDP));
- MMIO_D(PIPE_FRMCOUNT_G4X(dev_priv, PIPE_A));
- MMIO_D(PIPE_FRMCOUNT_G4X(dev_priv, PIPE_B));
- MMIO_D(PIPE_FRMCOUNT_G4X(dev_priv, PIPE_C));
- MMIO_D(PIPE_FRMCOUNT_G4X(dev_priv, _PIPE_EDP));
- MMIO_D(CURCNTR(dev_priv, PIPE_A));
- MMIO_D(CURCNTR(dev_priv, PIPE_B));
- MMIO_D(CURCNTR(dev_priv, PIPE_C));
- MMIO_D(CURPOS(dev_priv, PIPE_A));
- MMIO_D(CURPOS(dev_priv, PIPE_B));
- MMIO_D(CURPOS(dev_priv, PIPE_C));
- MMIO_D(CURBASE(dev_priv, PIPE_A));
- MMIO_D(CURBASE(dev_priv, PIPE_B));
- MMIO_D(CURBASE(dev_priv, PIPE_C));
- MMIO_D(CUR_FBC_CTL(dev_priv, PIPE_A));
- MMIO_D(CUR_FBC_CTL(dev_priv, PIPE_B));
- MMIO_D(CUR_FBC_CTL(dev_priv, PIPE_C));
+ MMIO_D(PIPEDSL(display, PIPE_A));
+ MMIO_D(PIPEDSL(display, PIPE_B));
+ MMIO_D(PIPEDSL(display, PIPE_C));
+ MMIO_D(PIPEDSL(display, _PIPE_EDP));
+ MMIO_D(TRANSCONF(display, TRANSCODER_A));
+ MMIO_D(TRANSCONF(display, TRANSCODER_B));
+ MMIO_D(TRANSCONF(display, TRANSCODER_C));
+ MMIO_D(TRANSCONF(display, TRANSCODER_EDP));
+ MMIO_D(PIPESTAT(display, PIPE_A));
+ MMIO_D(PIPESTAT(display, PIPE_B));
+ MMIO_D(PIPESTAT(display, PIPE_C));
+ MMIO_D(PIPESTAT(display, _PIPE_EDP));
+ MMIO_D(PIPE_FLIPCOUNT_G4X(display, PIPE_A));
+ MMIO_D(PIPE_FLIPCOUNT_G4X(display, PIPE_B));
+ MMIO_D(PIPE_FLIPCOUNT_G4X(display, PIPE_C));
+ MMIO_D(PIPE_FLIPCOUNT_G4X(display, _PIPE_EDP));
+ MMIO_D(PIPE_FRMCOUNT_G4X(display, PIPE_A));
+ MMIO_D(PIPE_FRMCOUNT_G4X(display, PIPE_B));
+ MMIO_D(PIPE_FRMCOUNT_G4X(display, PIPE_C));
+ MMIO_D(PIPE_FRMCOUNT_G4X(display, _PIPE_EDP));
+ MMIO_D(CURCNTR(display, PIPE_A));
+ MMIO_D(CURCNTR(display, PIPE_B));
+ MMIO_D(CURCNTR(display, PIPE_C));
+ MMIO_D(CURPOS(display, PIPE_A));
+ MMIO_D(CURPOS(display, PIPE_B));
+ MMIO_D(CURPOS(display, PIPE_C));
+ MMIO_D(CURBASE(display, PIPE_A));
+ MMIO_D(CURBASE(display, PIPE_B));
+ MMIO_D(CURBASE(display, PIPE_C));
+ MMIO_D(CUR_FBC_CTL(display, PIPE_A));
+ MMIO_D(CUR_FBC_CTL(display, PIPE_B));
+ MMIO_D(CUR_FBC_CTL(display, PIPE_C));
MMIO_D(_MMIO(0x700ac));
MMIO_D(_MMIO(0x710ac));
MMIO_D(_MMIO(0x720ac));
@@ -172,32 +173,32 @@ static int iterate_generic_mmio(struct intel_gvt_mmio_table_iter *iter)
MMIO_D(_MMIO(0x70094));
MMIO_D(_MMIO(0x70098));
MMIO_D(_MMIO(0x7009c));
- MMIO_D(DSPCNTR(dev_priv, PIPE_A));
- MMIO_D(DSPADDR(dev_priv, PIPE_A));
- MMIO_D(DSPSTRIDE(dev_priv, PIPE_A));
- MMIO_D(DSPPOS(dev_priv, PIPE_A));
- MMIO_D(DSPSIZE(dev_priv, PIPE_A));
- MMIO_D(DSPSURF(dev_priv, PIPE_A));
- MMIO_D(DSPOFFSET(dev_priv, PIPE_A));
- MMIO_D(DSPSURFLIVE(dev_priv, PIPE_A));
+ MMIO_D(DSPCNTR(display, PIPE_A));
+ MMIO_D(DSPADDR(display, PIPE_A));
+ MMIO_D(DSPSTRIDE(display, PIPE_A));
+ MMIO_D(DSPPOS(display, PIPE_A));
+ MMIO_D(DSPSIZE(display, PIPE_A));
+ MMIO_D(DSPSURF(display, PIPE_A));
+ MMIO_D(DSPOFFSET(display, PIPE_A));
+ MMIO_D(DSPSURFLIVE(display, PIPE_A));
MMIO_D(REG_50080(PIPE_A, PLANE_PRIMARY));
- MMIO_D(DSPCNTR(dev_priv, PIPE_B));
- MMIO_D(DSPADDR(dev_priv, PIPE_B));
- MMIO_D(DSPSTRIDE(dev_priv, PIPE_B));
- MMIO_D(DSPPOS(dev_priv, PIPE_B));
- MMIO_D(DSPSIZE(dev_priv, PIPE_B));
- MMIO_D(DSPSURF(dev_priv, PIPE_B));
- MMIO_D(DSPOFFSET(dev_priv, PIPE_B));
- MMIO_D(DSPSURFLIVE(dev_priv, PIPE_B));
+ MMIO_D(DSPCNTR(display, PIPE_B));
+ MMIO_D(DSPADDR(display, PIPE_B));
+ MMIO_D(DSPSTRIDE(display, PIPE_B));
+ MMIO_D(DSPPOS(display, PIPE_B));
+ MMIO_D(DSPSIZE(display, PIPE_B));
+ MMIO_D(DSPSURF(display, PIPE_B));
+ MMIO_D(DSPOFFSET(display, PIPE_B));
+ MMIO_D(DSPSURFLIVE(display, PIPE_B));
MMIO_D(REG_50080(PIPE_B, PLANE_PRIMARY));
- MMIO_D(DSPCNTR(dev_priv, PIPE_C));
- MMIO_D(DSPADDR(dev_priv, PIPE_C));
- MMIO_D(DSPSTRIDE(dev_priv, PIPE_C));
- MMIO_D(DSPPOS(dev_priv, PIPE_C));
- MMIO_D(DSPSIZE(dev_priv, PIPE_C));
- MMIO_D(DSPSURF(dev_priv, PIPE_C));
- MMIO_D(DSPOFFSET(dev_priv, PIPE_C));
- MMIO_D(DSPSURFLIVE(dev_priv, PIPE_C));
+ MMIO_D(DSPCNTR(display, PIPE_C));
+ MMIO_D(DSPADDR(display, PIPE_C));
+ MMIO_D(DSPSTRIDE(display, PIPE_C));
+ MMIO_D(DSPPOS(display, PIPE_C));
+ MMIO_D(DSPSIZE(display, PIPE_C));
+ MMIO_D(DSPSURF(display, PIPE_C));
+ MMIO_D(DSPOFFSET(display, PIPE_C));
+ MMIO_D(DSPSURFLIVE(display, PIPE_C));
MMIO_D(REG_50080(PIPE_C, PLANE_PRIMARY));
MMIO_D(SPRCTL(PIPE_A));
MMIO_D(SPRLINOFF(PIPE_A));
@@ -238,73 +239,73 @@ static int iterate_generic_mmio(struct intel_gvt_mmio_table_iter *iter)
MMIO_D(SPRSCALE(PIPE_C));
MMIO_D(SPRSURFLIVE(PIPE_C));
MMIO_D(REG_50080(PIPE_C, PLANE_SPRITE0));
- MMIO_D(TRANS_HTOTAL(dev_priv, TRANSCODER_A));
- MMIO_D(TRANS_HBLANK(dev_priv, TRANSCODER_A));
- MMIO_D(TRANS_HSYNC(dev_priv, TRANSCODER_A));
- MMIO_D(TRANS_VTOTAL(dev_priv, TRANSCODER_A));
- MMIO_D(TRANS_VBLANK(dev_priv, TRANSCODER_A));
- MMIO_D(TRANS_VSYNC(dev_priv, TRANSCODER_A));
- MMIO_D(BCLRPAT(dev_priv, TRANSCODER_A));
- MMIO_D(TRANS_VSYNCSHIFT(dev_priv, TRANSCODER_A));
- MMIO_D(PIPESRC(dev_priv, TRANSCODER_A));
- MMIO_D(TRANS_HTOTAL(dev_priv, TRANSCODER_B));
- MMIO_D(TRANS_HBLANK(dev_priv, TRANSCODER_B));
- MMIO_D(TRANS_HSYNC(dev_priv, TRANSCODER_B));
- MMIO_D(TRANS_VTOTAL(dev_priv, TRANSCODER_B));
- MMIO_D(TRANS_VBLANK(dev_priv, TRANSCODER_B));
- MMIO_D(TRANS_VSYNC(dev_priv, TRANSCODER_B));
- MMIO_D(BCLRPAT(dev_priv, TRANSCODER_B));
- MMIO_D(TRANS_VSYNCSHIFT(dev_priv, TRANSCODER_B));
- MMIO_D(PIPESRC(dev_priv, TRANSCODER_B));
- MMIO_D(TRANS_HTOTAL(dev_priv, TRANSCODER_C));
- MMIO_D(TRANS_HBLANK(dev_priv, TRANSCODER_C));
- MMIO_D(TRANS_HSYNC(dev_priv, TRANSCODER_C));
- MMIO_D(TRANS_VTOTAL(dev_priv, TRANSCODER_C));
- MMIO_D(TRANS_VBLANK(dev_priv, TRANSCODER_C));
- MMIO_D(TRANS_VSYNC(dev_priv, TRANSCODER_C));
- MMIO_D(BCLRPAT(dev_priv, TRANSCODER_C));
- MMIO_D(TRANS_VSYNCSHIFT(dev_priv, TRANSCODER_C));
- MMIO_D(PIPESRC(dev_priv, TRANSCODER_C));
- MMIO_D(TRANS_HTOTAL(dev_priv, TRANSCODER_EDP));
- MMIO_D(TRANS_HBLANK(dev_priv, TRANSCODER_EDP));
- MMIO_D(TRANS_HSYNC(dev_priv, TRANSCODER_EDP));
- MMIO_D(TRANS_VTOTAL(dev_priv, TRANSCODER_EDP));
- MMIO_D(TRANS_VBLANK(dev_priv, TRANSCODER_EDP));
- MMIO_D(TRANS_VSYNC(dev_priv, TRANSCODER_EDP));
- MMIO_D(BCLRPAT(dev_priv, TRANSCODER_EDP));
- MMIO_D(TRANS_VSYNCSHIFT(dev_priv, TRANSCODER_EDP));
- MMIO_D(PIPE_DATA_M1(dev_priv, TRANSCODER_A));
- MMIO_D(PIPE_DATA_N1(dev_priv, TRANSCODER_A));
- MMIO_D(PIPE_DATA_M2(dev_priv, TRANSCODER_A));
- MMIO_D(PIPE_DATA_N2(dev_priv, TRANSCODER_A));
- MMIO_D(PIPE_LINK_M1(dev_priv, TRANSCODER_A));
- MMIO_D(PIPE_LINK_N1(dev_priv, TRANSCODER_A));
- MMIO_D(PIPE_LINK_M2(dev_priv, TRANSCODER_A));
- MMIO_D(PIPE_LINK_N2(dev_priv, TRANSCODER_A));
- MMIO_D(PIPE_DATA_M1(dev_priv, TRANSCODER_B));
- MMIO_D(PIPE_DATA_N1(dev_priv, TRANSCODER_B));
- MMIO_D(PIPE_DATA_M2(dev_priv, TRANSCODER_B));
- MMIO_D(PIPE_DATA_N2(dev_priv, TRANSCODER_B));
- MMIO_D(PIPE_LINK_M1(dev_priv, TRANSCODER_B));
- MMIO_D(PIPE_LINK_N1(dev_priv, TRANSCODER_B));
- MMIO_D(PIPE_LINK_M2(dev_priv, TRANSCODER_B));
- MMIO_D(PIPE_LINK_N2(dev_priv, TRANSCODER_B));
- MMIO_D(PIPE_DATA_M1(dev_priv, TRANSCODER_C));
- MMIO_D(PIPE_DATA_N1(dev_priv, TRANSCODER_C));
- MMIO_D(PIPE_DATA_M2(dev_priv, TRANSCODER_C));
- MMIO_D(PIPE_DATA_N2(dev_priv, TRANSCODER_C));
- MMIO_D(PIPE_LINK_M1(dev_priv, TRANSCODER_C));
- MMIO_D(PIPE_LINK_N1(dev_priv, TRANSCODER_C));
- MMIO_D(PIPE_LINK_M2(dev_priv, TRANSCODER_C));
- MMIO_D(PIPE_LINK_N2(dev_priv, TRANSCODER_C));
- MMIO_D(PIPE_DATA_M1(dev_priv, TRANSCODER_EDP));
- MMIO_D(PIPE_DATA_N1(dev_priv, TRANSCODER_EDP));
- MMIO_D(PIPE_DATA_M2(dev_priv, TRANSCODER_EDP));
- MMIO_D(PIPE_DATA_N2(dev_priv, TRANSCODER_EDP));
- MMIO_D(PIPE_LINK_M1(dev_priv, TRANSCODER_EDP));
- MMIO_D(PIPE_LINK_N1(dev_priv, TRANSCODER_EDP));
- MMIO_D(PIPE_LINK_M2(dev_priv, TRANSCODER_EDP));
- MMIO_D(PIPE_LINK_N2(dev_priv, TRANSCODER_EDP));
+ MMIO_D(TRANS_HTOTAL(display, TRANSCODER_A));
+ MMIO_D(TRANS_HBLANK(display, TRANSCODER_A));
+ MMIO_D(TRANS_HSYNC(display, TRANSCODER_A));
+ MMIO_D(TRANS_VTOTAL(display, TRANSCODER_A));
+ MMIO_D(TRANS_VBLANK(display, TRANSCODER_A));
+ MMIO_D(TRANS_VSYNC(display, TRANSCODER_A));
+ MMIO_D(BCLRPAT(display, TRANSCODER_A));
+ MMIO_D(TRANS_VSYNCSHIFT(display, TRANSCODER_A));
+ MMIO_D(PIPESRC(display, TRANSCODER_A));
+ MMIO_D(TRANS_HTOTAL(display, TRANSCODER_B));
+ MMIO_D(TRANS_HBLANK(display, TRANSCODER_B));
+ MMIO_D(TRANS_HSYNC(display, TRANSCODER_B));
+ MMIO_D(TRANS_VTOTAL(display, TRANSCODER_B));
+ MMIO_D(TRANS_VBLANK(display, TRANSCODER_B));
+ MMIO_D(TRANS_VSYNC(display, TRANSCODER_B));
+ MMIO_D(BCLRPAT(display, TRANSCODER_B));
+ MMIO_D(TRANS_VSYNCSHIFT(display, TRANSCODER_B));
+ MMIO_D(PIPESRC(display, TRANSCODER_B));
+ MMIO_D(TRANS_HTOTAL(display, TRANSCODER_C));
+ MMIO_D(TRANS_HBLANK(display, TRANSCODER_C));
+ MMIO_D(TRANS_HSYNC(display, TRANSCODER_C));
+ MMIO_D(TRANS_VTOTAL(display, TRANSCODER_C));
+ MMIO_D(TRANS_VBLANK(display, TRANSCODER_C));
+ MMIO_D(TRANS_VSYNC(display, TRANSCODER_C));
+ MMIO_D(BCLRPAT(display, TRANSCODER_C));
+ MMIO_D(TRANS_VSYNCSHIFT(display, TRANSCODER_C));
+ MMIO_D(PIPESRC(display, TRANSCODER_C));
+ MMIO_D(TRANS_HTOTAL(display, TRANSCODER_EDP));
+ MMIO_D(TRANS_HBLANK(display, TRANSCODER_EDP));
+ MMIO_D(TRANS_HSYNC(display, TRANSCODER_EDP));
+ MMIO_D(TRANS_VTOTAL(display, TRANSCODER_EDP));
+ MMIO_D(TRANS_VBLANK(display, TRANSCODER_EDP));
+ MMIO_D(TRANS_VSYNC(display, TRANSCODER_EDP));
+ MMIO_D(BCLRPAT(display, TRANSCODER_EDP));
+ MMIO_D(TRANS_VSYNCSHIFT(display, TRANSCODER_EDP));
+ MMIO_D(PIPE_DATA_M1(display, TRANSCODER_A));
+ MMIO_D(PIPE_DATA_N1(display, TRANSCODER_A));
+ MMIO_D(PIPE_DATA_M2(display, TRANSCODER_A));
+ MMIO_D(PIPE_DATA_N2(display, TRANSCODER_A));
+ MMIO_D(PIPE_LINK_M1(display, TRANSCODER_A));
+ MMIO_D(PIPE_LINK_N1(display, TRANSCODER_A));
+ MMIO_D(PIPE_LINK_M2(display, TRANSCODER_A));
+ MMIO_D(PIPE_LINK_N2(display, TRANSCODER_A));
+ MMIO_D(PIPE_DATA_M1(display, TRANSCODER_B));
+ MMIO_D(PIPE_DATA_N1(display, TRANSCODER_B));
+ MMIO_D(PIPE_DATA_M2(display, TRANSCODER_B));
+ MMIO_D(PIPE_DATA_N2(display, TRANSCODER_B));
+ MMIO_D(PIPE_LINK_M1(display, TRANSCODER_B));
+ MMIO_D(PIPE_LINK_N1(display, TRANSCODER_B));
+ MMIO_D(PIPE_LINK_M2(display, TRANSCODER_B));
+ MMIO_D(PIPE_LINK_N2(display, TRANSCODER_B));
+ MMIO_D(PIPE_DATA_M1(display, TRANSCODER_C));
+ MMIO_D(PIPE_DATA_N1(display, TRANSCODER_C));
+ MMIO_D(PIPE_DATA_M2(display, TRANSCODER_C));
+ MMIO_D(PIPE_DATA_N2(display, TRANSCODER_C));
+ MMIO_D(PIPE_LINK_M1(display, TRANSCODER_C));
+ MMIO_D(PIPE_LINK_N1(display, TRANSCODER_C));
+ MMIO_D(PIPE_LINK_M2(display, TRANSCODER_C));
+ MMIO_D(PIPE_LINK_N2(display, TRANSCODER_C));
+ MMIO_D(PIPE_DATA_M1(display, TRANSCODER_EDP));
+ MMIO_D(PIPE_DATA_N1(display, TRANSCODER_EDP));
+ MMIO_D(PIPE_DATA_M2(display, TRANSCODER_EDP));
+ MMIO_D(PIPE_DATA_N2(display, TRANSCODER_EDP));
+ MMIO_D(PIPE_LINK_M1(display, TRANSCODER_EDP));
+ MMIO_D(PIPE_LINK_N1(display, TRANSCODER_EDP));
+ MMIO_D(PIPE_LINK_M2(display, TRANSCODER_EDP));
+ MMIO_D(PIPE_LINK_N2(display, TRANSCODER_EDP));
MMIO_D(PF_CTL(PIPE_A));
MMIO_D(PF_WIN_SZ(PIPE_A));
MMIO_D(PF_WIN_POS(PIPE_A));
@@ -513,12 +514,12 @@ static int iterate_generic_mmio(struct intel_gvt_mmio_table_iter *iter)
MMIO_D(GAMMA_MODE(PIPE_A));
MMIO_D(GAMMA_MODE(PIPE_B));
MMIO_D(GAMMA_MODE(PIPE_C));
- MMIO_D(TRANS_MULT(dev_priv, TRANSCODER_A));
- MMIO_D(TRANS_MULT(dev_priv, TRANSCODER_B));
- MMIO_D(TRANS_MULT(dev_priv, TRANSCODER_C));
- MMIO_D(HSW_TVIDEO_DIP_CTL(dev_priv, TRANSCODER_A));
- MMIO_D(HSW_TVIDEO_DIP_CTL(dev_priv, TRANSCODER_B));
- MMIO_D(HSW_TVIDEO_DIP_CTL(dev_priv, TRANSCODER_C));
+ MMIO_D(TRANS_MULT(display, TRANSCODER_A));
+ MMIO_D(TRANS_MULT(display, TRANSCODER_B));
+ MMIO_D(TRANS_MULT(display, TRANSCODER_C));
+ MMIO_D(HSW_TVIDEO_DIP_CTL(display, TRANSCODER_A));
+ MMIO_D(HSW_TVIDEO_DIP_CTL(display, TRANSCODER_B));
+ MMIO_D(HSW_TVIDEO_DIP_CTL(display, TRANSCODER_C));
MMIO_D(SFUSE_STRAP);
MMIO_D(SBI_ADDR);
MMIO_D(SBI_DATA);
@@ -1111,6 +1112,7 @@ static int iterate_skl_plus_mmio(struct intel_gvt_mmio_table_iter *iter)
static int iterate_bxt_mmio(struct intel_gvt_mmio_table_iter *iter)
{
struct drm_i915_private *dev_priv = iter->i915;
+ struct intel_display *display = dev_priv->display;
MMIO_F(_MMIO(0x80000), 0x3000);
MMIO_D(GEN7_SAMPLER_INSTDONE);
@@ -1242,9 +1244,9 @@ static int iterate_bxt_mmio(struct intel_gvt_mmio_table_iter *iter)
MMIO_D(BXT_DSI_PLL_ENABLE);
MMIO_D(GEN9_CLKGATE_DIS_0);
MMIO_D(GEN9_CLKGATE_DIS_4);
- MMIO_D(HSW_TVIDEO_DIP_GCP(dev_priv, TRANSCODER_A));
- MMIO_D(HSW_TVIDEO_DIP_GCP(dev_priv, TRANSCODER_B));
- MMIO_D(HSW_TVIDEO_DIP_GCP(dev_priv, TRANSCODER_C));
+ MMIO_D(HSW_TVIDEO_DIP_GCP(display, TRANSCODER_A));
+ MMIO_D(HSW_TVIDEO_DIP_GCP(display, TRANSCODER_B));
+ MMIO_D(HSW_TVIDEO_DIP_GCP(display, TRANSCODER_C));
MMIO_D(RC6_CTX_BASE);
MMIO_D(GEN8_PUSHBUS_CONTROL);
MMIO_D(GEN8_PUSHBUS_ENABLE);
diff --git a/drivers/gpu/drm/i915/intel_pcode.c b/drivers/gpu/drm/i915/intel_pcode.c
index 81da75108c60..55ffedad2490 100644
--- a/drivers/gpu/drm/i915/intel_pcode.c
+++ b/drivers/gpu/drm/i915/intel_pcode.c
@@ -5,6 +5,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_wait_util.h"
#include "intel_pcode.h"
static int gen6_check_mailbox_status(u32 mbox)
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index c8e29fd72290..8cb59f8d1f4c 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -21,19 +21,20 @@
* IN THE SOFTWARE.
*/
-#include <drm/drm_managed.h>
#include <linux/pm_runtime.h>
-#include "display/intel_display_core.h"
+#include <drm/drm_managed.h>
-#include "gt/intel_gt.h"
+#include "display/intel_display_core.h"
#include "gt/intel_engine_regs.h"
+#include "gt/intel_gt.h"
#include "gt/intel_gt_regs.h"
#include "i915_drv.h"
#include "i915_iosf_mbi.h"
#include "i915_reg.h"
#include "i915_vgpu.h"
+#include "i915_wait_util.h"
#include "intel_uncore_trace.h"
#define FORCEWAKE_ACK_TIMEOUT_MS 50
@@ -2502,6 +2503,7 @@ static int sanity_check_mmio_access(struct intel_uncore *uncore)
int intel_uncore_init_mmio(struct intel_uncore *uncore)
{
struct drm_i915_private *i915 = uncore->i915;
+ struct intel_display *display = i915->display;
int ret;
ret = sanity_check_mmio_access(uncore);
@@ -2536,7 +2538,7 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore)
GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.read_fw_domains);
GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.write_fw_domains);
- if (HAS_FPGA_DBG_UNCLAIMED(i915))
+ if (HAS_FPGA_DBG_UNCLAIMED(display))
uncore->flags |= UNCORE_HAS_FPGA_DBG_UNCLAIMED;
if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.c b/drivers/gpu/drm/i915/pxp/intel_pxp.c
index f8da693ad3ce..27d545c4e6a5 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp.c
@@ -2,15 +2,15 @@
/*
* Copyright(c) 2020 Intel Corporation.
*/
+
#include <linux/workqueue.h>
#include "gem/i915_gem_context.h"
-
#include "gt/intel_context.h"
#include "gt/intel_gt.h"
#include "i915_drv.h"
-
+#include "i915_wait_util.h"
#include "intel_pxp.h"
#include "intel_pxp_gsccs.h"
#include "intel_pxp_irq.h"
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c b/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c
index e07c5b380789..545f79eb0cc5 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c
@@ -69,17 +69,17 @@ DEFINE_SIMPLE_ATTRIBUTE(pxp_terminate_fops, pxp_terminate_get, pxp_terminate_set
void intel_pxp_debugfs_register(struct intel_pxp *pxp)
{
- struct drm_minor *minor;
+ struct dentry *debugfs_root;
struct dentry *pxproot;
if (!intel_pxp_is_supported(pxp))
return;
- minor = pxp->ctrl_gt->i915->drm.primary;
- if (!minor->debugfs_root)
+ debugfs_root = pxp->ctrl_gt->i915->drm.debugfs_root;
+ if (!debugfs_root)
return;
- pxproot = debugfs_create_dir("pxp", minor->debugfs_root);
+ pxproot = debugfs_create_dir("pxp", debugfs_root);
if (IS_ERR(pxproot))
return;
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index 2fb7a9e7efec..48cd617247d1 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -22,14 +22,13 @@
*
*/
-#include <linux/prime_numbers.h>
#include <linux/pm_qos.h>
+#include <linux/prime_numbers.h>
#include <linux/sort.h>
#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_pm.h"
#include "gem/selftests/mock_context.h"
-
#include "gt/intel_engine_heartbeat.h"
#include "gt/intel_engine_pm.h"
#include "gt/intel_engine_user.h"
@@ -40,11 +39,11 @@
#include "i915_random.h"
#include "i915_selftest.h"
+#include "i915_wait_util.h"
#include "igt_flush_test.h"
#include "igt_live_test.h"
#include "igt_spinner.h"
#include "lib_sw_fence.h"
-
#include "mock_drm.h"
#include "mock_gem_device.h"
diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c
index 889281819c5b..9c276c9d0a75 100644
--- a/drivers/gpu/drm/i915/selftests/i915_selftest.c
+++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c
@@ -31,7 +31,7 @@
#include "i915_driver.h"
#include "i915_drv.h"
#include "i915_selftest.h"
-
+#include "i915_wait_util.h"
#include "igt_flush_test.h"
struct i915_selftest i915_selftest __read_mostly = {
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
index 8c3e1f20e5a1..820364171ebe 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.c
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -3,12 +3,13 @@
*
* Copyright © 2018 Intel Corporation
*/
-#include "gt/intel_gpu_commands.h"
-#include "gt/intel_gt.h"
#include "gem/i915_gem_internal.h"
#include "gem/selftests/igt_gem_utils.h"
+#include "gt/intel_gpu_commands.h"
+#include "gt/intel_gt.h"
+#include "i915_wait_util.h"
#include "igt_spinner.h"
int igt_spinner_init(struct igt_spinner *spin, struct intel_gt *gt)
diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c
index 41eaa9b7f67d..58bcbdcef563 100644
--- a/drivers/gpu/drm/i915/selftests/intel_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c
@@ -277,13 +277,15 @@ static int live_forcewake_domains(void *arg)
#define FW_RANGE 0x40000
struct intel_gt *gt = arg;
struct intel_uncore *uncore = gt->uncore;
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_display *display = i915->display;
unsigned long *valid;
u32 offset;
int err;
- if (!HAS_FPGA_DBG_UNCLAIMED(gt->i915) &&
- !IS_VALLEYVIEW(gt->i915) &&
- !IS_CHERRYVIEW(gt->i915))
+ if (!HAS_FPGA_DBG_UNCLAIMED(display) &&
+ !IS_VALLEYVIEW(i915) &&
+ !IS_CHERRYVIEW(i915))
return 0;
/*
diff --git a/drivers/gpu/drm/i915/soc/intel_dram.c b/drivers/gpu/drm/i915/soc/intel_dram.c
index deb159548a09..edffaed8f9a7 100644
--- a/drivers/gpu/drm/i915/soc/intel_dram.c
+++ b/drivers/gpu/drm/i915/soc/intel_dram.c
@@ -11,6 +11,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_dram.h"
#include "intel_mchbar_regs.h"
#include "intel_pcode.h"
@@ -30,10 +31,11 @@ struct dram_channel_info {
#define DRAM_TYPE_STR(type) [INTEL_DRAM_ ## type] = #type
-static const char *intel_dram_type_str(enum intel_dram_type type)
+const char *intel_dram_type_str(enum intel_dram_type type)
{
static const char * const str[] = {
DRAM_TYPE_STR(UNKNOWN),
+ DRAM_TYPE_STR(DDR2),
DRAM_TYPE_STR(DDR3),
DRAM_TYPE_STR(DDR4),
DRAM_TYPE_STR(LPDDR3),
@@ -54,9 +56,10 @@ static const char *intel_dram_type_str(enum intel_dram_type type)
#undef DRAM_TYPE_STR
-static bool pnv_is_ddr3(struct drm_i915_private *i915)
+static enum intel_dram_type pnv_dram_type(struct drm_i915_private *i915)
{
- return intel_uncore_read(&i915->uncore, CSHRDDR3CTL) & CSHRDDR3CTL_DDR3;
+ return intel_uncore_read(&i915->uncore, CSHRDDR3CTL) & CSHRDDR3CTL_DDR3 ?
+ INTEL_DRAM_DDR3 : INTEL_DRAM_DDR2;
}
static unsigned int pnv_mem_freq(struct drm_i915_private *dev_priv)
@@ -135,25 +138,21 @@ static unsigned int vlv_mem_freq(struct drm_i915_private *i915)
return 0;
}
-static void detect_mem_freq(struct drm_i915_private *i915)
+unsigned int intel_mem_freq(struct drm_i915_private *i915)
{
if (IS_PINEVIEW(i915))
- i915->mem_freq = pnv_mem_freq(i915);
+ return pnv_mem_freq(i915);
else if (GRAPHICS_VER(i915) == 5)
- i915->mem_freq = ilk_mem_freq(i915);
+ return ilk_mem_freq(i915);
else if (IS_CHERRYVIEW(i915))
- i915->mem_freq = chv_mem_freq(i915);
+ return chv_mem_freq(i915);
else if (IS_VALLEYVIEW(i915))
- i915->mem_freq = vlv_mem_freq(i915);
-
- if (IS_PINEVIEW(i915))
- i915->is_ddr3 = pnv_is_ddr3(i915);
-
- if (i915->mem_freq)
- drm_dbg(&i915->drm, "DDR speed: %d kHz\n", i915->mem_freq);
+ return vlv_mem_freq(i915);
+ else
+ return 0;
}
-unsigned int i9xx_fsb_freq(struct drm_i915_private *i915)
+static unsigned int i9xx_fsb_freq(struct drm_i915_private *i915)
{
u32 fsb;
@@ -235,15 +234,30 @@ static unsigned int ilk_fsb_freq(struct drm_i915_private *dev_priv)
}
}
-static void detect_fsb_freq(struct drm_i915_private *i915)
+unsigned int intel_fsb_freq(struct drm_i915_private *i915)
{
if (GRAPHICS_VER(i915) == 5)
- i915->fsb_freq = ilk_fsb_freq(i915);
+ return ilk_fsb_freq(i915);
else if (GRAPHICS_VER(i915) == 3 || GRAPHICS_VER(i915) == 4)
- i915->fsb_freq = i9xx_fsb_freq(i915);
+ return i9xx_fsb_freq(i915);
+ else
+ return 0;
+}
- if (i915->fsb_freq)
- drm_dbg(&i915->drm, "FSB frequency: %d kHz\n", i915->fsb_freq);
+static int i915_get_dram_info(struct drm_i915_private *i915, struct dram_info *dram_info)
+{
+ dram_info->fsb_freq = intel_fsb_freq(i915);
+ if (dram_info->fsb_freq)
+ drm_dbg(&i915->drm, "FSB frequency: %d kHz\n", dram_info->fsb_freq);
+
+ dram_info->mem_freq = intel_mem_freq(i915);
+ if (dram_info->mem_freq)
+ drm_dbg(&i915->drm, "DDR speed: %d kHz\n", dram_info->mem_freq);
+
+ if (IS_PINEVIEW(i915))
+ dram_info->type = pnv_dram_type(i915);
+
+ return 0;
}
static int intel_dimm_num_devices(const struct dram_dimm_info *dimm)
@@ -392,6 +406,9 @@ skl_dram_get_channels_info(struct drm_i915_private *i915, struct dram_info *dram
u32 val;
int ret;
+ /* Assume 16Gb DIMMs are present until proven otherwise */
+ dram_info->has_16gb_dimms = true;
+
val = intel_uncore_read(&i915->uncore,
SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN);
ret = skl_dram_get_channel_info(i915, &ch0, 0, val);
@@ -414,13 +431,16 @@ skl_dram_get_channels_info(struct drm_i915_private *i915, struct dram_info *dram
return -EINVAL;
}
- dram_info->wm_lv_0_adjust_needed = ch0.is_16gb_dimm || ch1.is_16gb_dimm;
+ dram_info->has_16gb_dimms = ch0.is_16gb_dimm || ch1.is_16gb_dimm;
dram_info->symmetric_memory = intel_is_dram_symmetric(&ch0, &ch1);
drm_dbg_kms(&i915->drm, "Memory configuration is symmetric? %s\n",
str_yes_no(dram_info->symmetric_memory));
+ drm_dbg_kms(&i915->drm, "16Gb DIMMs: %s\n",
+ str_yes_no(dram_info->has_16gb_dimms));
+
return 0;
}
@@ -649,8 +669,9 @@ static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv,
static int gen11_get_dram_info(struct drm_i915_private *i915, struct dram_info *dram_info)
{
- int ret = skl_get_dram_info(i915, dram_info);
+ int ret;
+ ret = skl_dram_get_channels_info(i915, dram_info);
if (ret)
return ret;
@@ -659,8 +680,6 @@ static int gen11_get_dram_info(struct drm_i915_private *i915, struct dram_info *
static int gen12_get_dram_info(struct drm_i915_private *i915, struct dram_info *dram_info)
{
- dram_info->wm_lv_0_adjust_needed = false;
-
return icl_pcode_read_mem_global_info(i915, dram_info);
}
@@ -709,13 +728,11 @@ static int xelpdp_get_dram_info(struct drm_i915_private *i915, struct dram_info
int intel_dram_detect(struct drm_i915_private *i915)
{
+ struct intel_display *display = i915->display;
struct dram_info *dram_info;
int ret;
- detect_fsb_freq(i915);
- detect_mem_freq(i915);
-
- if (GRAPHICS_VER(i915) < 9 || IS_DG2(i915) || !HAS_DISPLAY(i915))
+ if (IS_DG2(i915) || !intel_display_device_present(display))
return 0;
dram_info = drmm_kzalloc(&i915->drm, sizeof(*dram_info), GFP_KERNEL);
@@ -724,13 +741,7 @@ int intel_dram_detect(struct drm_i915_private *i915)
i915->dram_info = dram_info;
- /*
- * Assume level 0 watermark latency adjustment is needed until proven
- * otherwise, this w/a is not needed by bxt/glk.
- */
- dram_info->wm_lv_0_adjust_needed = !IS_BROXTON(i915) && !IS_GEMINILAKE(i915);
-
- if (DISPLAY_VER(i915) >= 14)
+ if (DISPLAY_VER(display) >= 14)
ret = xelpdp_get_dram_info(i915, dram_info);
else if (GRAPHICS_VER(i915) >= 12)
ret = gen12_get_dram_info(i915, dram_info);
@@ -738,23 +749,23 @@ int intel_dram_detect(struct drm_i915_private *i915)
ret = gen11_get_dram_info(i915, dram_info);
else if (IS_BROXTON(i915) || IS_GEMINILAKE(i915))
ret = bxt_get_dram_info(i915, dram_info);
- else
+ else if (GRAPHICS_VER(i915) >= 9)
ret = skl_get_dram_info(i915, dram_info);
+ else
+ ret = i915_get_dram_info(i915, dram_info);
drm_dbg_kms(&i915->drm, "DRAM type: %s\n",
intel_dram_type_str(dram_info->type));
+ drm_dbg_kms(&i915->drm, "DRAM channels: %u\n", dram_info->num_channels);
+
+ drm_dbg_kms(&i915->drm, "Num QGV points %u\n", dram_info->num_qgv_points);
+ drm_dbg_kms(&i915->drm, "Num PSF GV points %u\n", dram_info->num_psf_gv_points);
+
/* TODO: Do we want to abort probe on dram detection failures? */
if (ret)
return 0;
- drm_dbg_kms(&i915->drm, "Num qgv points %u\n", dram_info->num_qgv_points);
-
- drm_dbg_kms(&i915->drm, "DRAM channels: %u\n", dram_info->num_channels);
-
- drm_dbg_kms(&i915->drm, "Watermark level 0 adjustment needed: %s\n",
- str_yes_no(dram_info->wm_lv_0_adjust_needed));
-
return 0;
}
diff --git a/drivers/gpu/drm/i915/soc/intel_dram.h b/drivers/gpu/drm/i915/soc/intel_dram.h
index 2a696e03aad4..03a973f1c941 100644
--- a/drivers/gpu/drm/i915/soc/intel_dram.h
+++ b/drivers/gpu/drm/i915/soc/intel_dram.h
@@ -12,11 +12,9 @@ struct drm_i915_private;
struct drm_device;
struct dram_info {
- bool wm_lv_0_adjust_needed;
- u8 num_channels;
- bool symmetric_memory;
enum intel_dram_type {
INTEL_DRAM_UNKNOWN,
+ INTEL_DRAM_DDR2,
INTEL_DRAM_DDR3,
INTEL_DRAM_DDR4,
INTEL_DRAM_LPDDR3,
@@ -27,13 +25,20 @@ struct dram_info {
INTEL_DRAM_GDDR_ECC,
__INTEL_DRAM_TYPE_MAX,
} type;
+ unsigned int fsb_freq;
+ unsigned int mem_freq;
+ u8 num_channels;
u8 num_qgv_points;
u8 num_psf_gv_points;
+ bool symmetric_memory;
+ bool has_16gb_dimms;
};
void intel_dram_edram_detect(struct drm_i915_private *i915);
int intel_dram_detect(struct drm_i915_private *i915);
-unsigned int i9xx_fsb_freq(struct drm_i915_private *i915);
+unsigned int intel_fsb_freq(struct drm_i915_private *i915);
+unsigned int intel_mem_freq(struct drm_i915_private *i915);
const struct dram_info *intel_dram_info(struct drm_device *drm);
+const char *intel_dram_type_str(enum intel_dram_type type);
#endif /* __INTEL_DRAM_H__ */
diff --git a/drivers/gpu/drm/i915/soc/intel_gmch.c b/drivers/gpu/drm/i915/soc/intel_gmch.c
index 5346b8dda79a..f210c9655b53 100644
--- a/drivers/gpu/drm/i915/soc/intel_gmch.c
+++ b/drivers/gpu/drm/i915/soc/intel_gmch.c
@@ -148,7 +148,8 @@ void intel_gmch_bar_teardown(struct drm_i915_private *i915)
int intel_gmch_vga_set_state(struct drm_i915_private *i915, bool enable_decode)
{
- unsigned int reg = DISPLAY_VER(i915) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
+ struct intel_display *display = i915->display;
+ unsigned int reg = DISPLAY_VER(display) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
u16 gmch_ctrl;
if (pci_read_config_word(i915->gmch.pdev, reg, &gmch_ctrl)) {
diff --git a/drivers/gpu/drm/i915/vlv_suspend.c b/drivers/gpu/drm/i915/vlv_suspend.c
index fc9f311ea1db..221e4c0b2c58 100644
--- a/drivers/gpu/drm/i915/vlv_suspend.c
+++ b/drivers/gpu/drm/i915/vlv_suspend.c
@@ -8,16 +8,17 @@
#include <drm/drm_print.h>
+#include "gt/intel_gt_regs.h"
+
#include "i915_drv.h"
#include "i915_reg.h"
#include "i915_trace.h"
#include "i915_utils.h"
+#include "i915_wait_util.h"
#include "intel_clock_gating.h"
#include "intel_uncore_trace.h"
#include "vlv_suspend.h"
-#include "gt/intel_gt_regs.h"
-
struct vlv_s0ix_state {
/* GAM */
u32 wr_watermark;
diff --git a/drivers/gpu/drm/imagination/Kconfig b/drivers/gpu/drm/imagination/Kconfig
index 3bfa2ac212dc..682dd2633d0c 100644
--- a/drivers/gpu/drm/imagination/Kconfig
+++ b/drivers/gpu/drm/imagination/Kconfig
@@ -3,8 +3,9 @@
config DRM_POWERVR
tristate "Imagination Technologies PowerVR (Series 6 and later) & IMG Graphics"
- depends on ARM64
+ depends on (ARM64 || RISCV && 64BIT)
depends on DRM
+ depends on MMU
depends on PM
select DRM_EXEC
select DRM_GEM_SHMEM_HELPER
diff --git a/drivers/gpu/drm/imagination/pvr_device.c b/drivers/gpu/drm/imagination/pvr_device.c
index 8b9ba4983c4c..294b6019b415 100644
--- a/drivers/gpu/drm/imagination/pvr_device.c
+++ b/drivers/gpu/drm/imagination/pvr_device.c
@@ -23,6 +23,7 @@
#include <linux/firmware.h>
#include <linux/gfp.h>
#include <linux/interrupt.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
@@ -121,21 +122,6 @@ static int pvr_device_clk_init(struct pvr_device *pvr_dev)
return 0;
}
-static int pvr_device_reset_init(struct pvr_device *pvr_dev)
-{
- struct drm_device *drm_dev = from_pvr_device(pvr_dev);
- struct reset_control *reset;
-
- reset = devm_reset_control_get_optional_exclusive(drm_dev->dev, NULL);
- if (IS_ERR(reset))
- return dev_err_probe(drm_dev->dev, PTR_ERR(reset),
- "failed to get gpu reset line\n");
-
- pvr_dev->reset = reset;
-
- return 0;
-}
-
/**
* pvr_device_process_active_queues() - Process all queue related events.
* @pvr_dev: PowerVR device to check
@@ -618,6 +604,9 @@ pvr_device_init(struct pvr_device *pvr_dev)
struct device *dev = drm_dev->dev;
int err;
+ /* Get the platform-specific data based on the compatible string. */
+ pvr_dev->device_data = of_device_get_match_data(dev);
+
/*
* Setup device parameters. We do this first in case other steps
* depend on them.
@@ -631,8 +620,7 @@ pvr_device_init(struct pvr_device *pvr_dev)
if (err)
return err;
- /* Get the reset line for the GPU */
- err = pvr_device_reset_init(pvr_dev);
+ err = pvr_dev->device_data->pwr_ops->init(pvr_dev);
if (err)
return err;
diff --git a/drivers/gpu/drm/imagination/pvr_device.h b/drivers/gpu/drm/imagination/pvr_device.h
index 7cb01c38d2a9..ab8f56ae15df 100644
--- a/drivers/gpu/drm/imagination/pvr_device.h
+++ b/drivers/gpu/drm/imagination/pvr_device.h
@@ -37,6 +37,9 @@ struct clk;
/* Forward declaration from <linux/firmware.h>. */
struct firmware;
+/* Forward declaration from <linux/pwrseq/consumer.h> */
+struct pwrseq_desc;
+
/**
* struct pvr_gpu_id - Hardware GPU ID information for a PowerVR device
* @b: Branch ID.
@@ -58,6 +61,14 @@ struct pvr_fw_version {
};
/**
+ * struct pvr_device_data - Platform specific data associated with a compatible string.
+ * @pwr_ops: Pointer to a structure with platform-specific power functions.
+ */
+struct pvr_device_data {
+ const struct pvr_power_sequence_ops *pwr_ops;
+};
+
+/**
* struct pvr_device - powervr-specific wrapper for &struct drm_device
*/
struct pvr_device {
@@ -98,6 +109,9 @@ struct pvr_device {
/** @fw_version: Firmware version detected at runtime. */
struct pvr_fw_version fw_version;
+ /** @device_data: Pointer to platform-specific data. */
+ const struct pvr_device_data *device_data;
+
/** @regs_resource: Resource representing device control registers. */
struct resource *regs_resource;
@@ -148,6 +162,9 @@ struct pvr_device {
*/
struct reset_control *reset;
+ /** @pwrseq: Pointer to a power sequencer, if one is used. */
+ struct pwrseq_desc *pwrseq;
+
/** @irq: IRQ number. */
int irq;
diff --git a/drivers/gpu/drm/imagination/pvr_drv.c b/drivers/gpu/drm/imagination/pvr_drv.c
index b058ec183bb3..916b40ced7eb 100644
--- a/drivers/gpu/drm/imagination/pvr_drv.c
+++ b/drivers/gpu/drm/imagination/pvr_drv.c
@@ -1480,15 +1480,33 @@ static void pvr_remove(struct platform_device *plat_dev)
pvr_power_domains_fini(pvr_dev);
}
+static const struct pvr_device_data pvr_device_data_manual = {
+ .pwr_ops = &pvr_power_sequence_ops_manual,
+};
+
+static const struct pvr_device_data pvr_device_data_pwrseq = {
+ .pwr_ops = &pvr_power_sequence_ops_pwrseq,
+};
+
static const struct of_device_id dt_match[] = {
- { .compatible = "img,img-rogue", .data = NULL },
+ {
+ .compatible = "thead,th1520-gpu",
+ .data = &pvr_device_data_pwrseq,
+ },
+ {
+ .compatible = "img,img-rogue",
+ .data = &pvr_device_data_manual,
+ },
/*
* This legacy compatible string was introduced early on before the more generic
* "img,img-rogue" was added. Keep it around here for compatibility, but never use
* "img,img-axe" in new devicetrees.
*/
- { .compatible = "img,img-axe", .data = NULL },
+ {
+ .compatible = "img,img-axe",
+ .data = &pvr_device_data_manual,
+ },
{}
};
MODULE_DEVICE_TABLE(of, dt_match);
@@ -1513,4 +1531,5 @@ MODULE_DESCRIPTION(PVR_DRIVER_DESC);
MODULE_LICENSE("Dual MIT/GPL");
MODULE_IMPORT_NS("DMA_BUF");
MODULE_FIRMWARE("powervr/rogue_33.15.11.3_v1.fw");
+MODULE_FIRMWARE("powervr/rogue_36.52.104.182_v1.fw");
MODULE_FIRMWARE("powervr/rogue_36.53.104.796_v1.fw");
diff --git a/drivers/gpu/drm/imagination/pvr_power.c b/drivers/gpu/drm/imagination/pvr_power.c
index 187a07e0bd9a..c6e7ff9e935d 100644
--- a/drivers/gpu/drm/imagination/pvr_power.c
+++ b/drivers/gpu/drm/imagination/pvr_power.c
@@ -18,6 +18,7 @@
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
+#include <linux/pwrseq/consumer.h>
#include <linux/reset.h>
#include <linux/timer.h>
#include <linux/types.h>
@@ -234,6 +235,118 @@ pvr_watchdog_init(struct pvr_device *pvr_dev)
return 0;
}
+static int pvr_power_init_manual(struct pvr_device *pvr_dev)
+{
+ struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+ struct reset_control *reset;
+
+ reset = devm_reset_control_get_optional_exclusive(drm_dev->dev, NULL);
+ if (IS_ERR(reset))
+ return dev_err_probe(drm_dev->dev, PTR_ERR(reset),
+ "failed to get gpu reset line\n");
+
+ pvr_dev->reset = reset;
+
+ return 0;
+}
+
+static int pvr_power_on_sequence_manual(struct pvr_device *pvr_dev)
+{
+ int err;
+
+ err = clk_prepare_enable(pvr_dev->core_clk);
+ if (err)
+ return err;
+
+ err = clk_prepare_enable(pvr_dev->sys_clk);
+ if (err)
+ goto err_core_clk_disable;
+
+ err = clk_prepare_enable(pvr_dev->mem_clk);
+ if (err)
+ goto err_sys_clk_disable;
+
+ /*
+ * According to the hardware manual, a delay of at least 32 clock
+ * cycles is required between de-asserting the clkgen reset and
+ * de-asserting the GPU reset. Assuming a worst-case scenario with
+ * a very high GPU clock frequency, a delay of 1 microsecond is
+ * sufficient to ensure this requirement is met across all
+ * feasible GPU clock speeds.
+ */
+ udelay(1);
+
+ err = reset_control_deassert(pvr_dev->reset);
+ if (err)
+ goto err_mem_clk_disable;
+
+ return 0;
+
+err_mem_clk_disable:
+ clk_disable_unprepare(pvr_dev->mem_clk);
+
+err_sys_clk_disable:
+ clk_disable_unprepare(pvr_dev->sys_clk);
+
+err_core_clk_disable:
+ clk_disable_unprepare(pvr_dev->core_clk);
+
+ return err;
+}
+
+static int pvr_power_off_sequence_manual(struct pvr_device *pvr_dev)
+{
+ int err;
+
+ err = reset_control_assert(pvr_dev->reset);
+
+ clk_disable_unprepare(pvr_dev->mem_clk);
+ clk_disable_unprepare(pvr_dev->sys_clk);
+ clk_disable_unprepare(pvr_dev->core_clk);
+
+ return err;
+}
+
+const struct pvr_power_sequence_ops pvr_power_sequence_ops_manual = {
+ .init = pvr_power_init_manual,
+ .power_on = pvr_power_on_sequence_manual,
+ .power_off = pvr_power_off_sequence_manual,
+};
+
+static int pvr_power_init_pwrseq(struct pvr_device *pvr_dev)
+{
+ struct device *dev = from_pvr_device(pvr_dev)->dev;
+
+ pvr_dev->pwrseq = devm_pwrseq_get(dev, "gpu-power");
+ if (IS_ERR(pvr_dev->pwrseq)) {
+ /*
+ * This platform requires a sequencer. If we can't get it, we
+ * must return the error (including -EPROBE_DEFER to wait for
+ * the provider to appear)
+ */
+ return dev_err_probe(dev, PTR_ERR(pvr_dev->pwrseq),
+ "Failed to get required power sequencer\n");
+ }
+
+ return 0;
+}
+
+static int pvr_power_on_sequence_pwrseq(struct pvr_device *pvr_dev)
+{
+ return pwrseq_power_on(pvr_dev->pwrseq);
+}
+
+static int pvr_power_off_sequence_pwrseq(struct pvr_device *pvr_dev)
+{
+ return pwrseq_power_off(pvr_dev->pwrseq);
+}
+
+const struct pvr_power_sequence_ops pvr_power_sequence_ops_pwrseq = {
+ .init = pvr_power_init_pwrseq,
+ .power_on = pvr_power_on_sequence_pwrseq,
+ .power_off = pvr_power_off_sequence_pwrseq,
+};
+
int
pvr_power_device_suspend(struct device *dev)
{
@@ -252,11 +365,7 @@ pvr_power_device_suspend(struct device *dev)
goto err_drm_dev_exit;
}
- clk_disable_unprepare(pvr_dev->mem_clk);
- clk_disable_unprepare(pvr_dev->sys_clk);
- clk_disable_unprepare(pvr_dev->core_clk);
-
- err = reset_control_assert(pvr_dev->reset);
+ err = pvr_dev->device_data->pwr_ops->power_off(pvr_dev);
err_drm_dev_exit:
drm_dev_exit(idx);
@@ -276,53 +385,22 @@ pvr_power_device_resume(struct device *dev)
if (!drm_dev_enter(drm_dev, &idx))
return -EIO;
- err = clk_prepare_enable(pvr_dev->core_clk);
+ err = pvr_dev->device_data->pwr_ops->power_on(pvr_dev);
if (err)
goto err_drm_dev_exit;
- err = clk_prepare_enable(pvr_dev->sys_clk);
- if (err)
- goto err_core_clk_disable;
-
- err = clk_prepare_enable(pvr_dev->mem_clk);
- if (err)
- goto err_sys_clk_disable;
-
- /*
- * According to the hardware manual, a delay of at least 32 clock
- * cycles is required between de-asserting the clkgen reset and
- * de-asserting the GPU reset. Assuming a worst-case scenario with
- * a very high GPU clock frequency, a delay of 1 microsecond is
- * sufficient to ensure this requirement is met across all
- * feasible GPU clock speeds.
- */
- udelay(1);
-
- err = reset_control_deassert(pvr_dev->reset);
- if (err)
- goto err_mem_clk_disable;
-
if (pvr_dev->fw_dev.booted) {
err = pvr_power_fw_enable(pvr_dev);
if (err)
- goto err_reset_assert;
+ goto err_power_off;
}
drm_dev_exit(idx);
return 0;
-err_reset_assert:
- reset_control_assert(pvr_dev->reset);
-
-err_mem_clk_disable:
- clk_disable_unprepare(pvr_dev->mem_clk);
-
-err_sys_clk_disable:
- clk_disable_unprepare(pvr_dev->sys_clk);
-
-err_core_clk_disable:
- clk_disable_unprepare(pvr_dev->core_clk);
+err_power_off:
+ pvr_dev->device_data->pwr_ops->power_off(pvr_dev);
err_drm_dev_exit:
drm_dev_exit(idx);
diff --git a/drivers/gpu/drm/imagination/pvr_power.h b/drivers/gpu/drm/imagination/pvr_power.h
index ada85674a7ca..b853d092242c 100644
--- a/drivers/gpu/drm/imagination/pvr_power.h
+++ b/drivers/gpu/drm/imagination/pvr_power.h
@@ -41,4 +41,19 @@ pvr_power_put(struct pvr_device *pvr_dev)
int pvr_power_domains_init(struct pvr_device *pvr_dev);
void pvr_power_domains_fini(struct pvr_device *pvr_dev);
+/**
+ * struct pvr_power_sequence_ops - Platform specific power sequence operations.
+ * @init: Pointer to the platform-specific initialization function.
+ * @power_on: Pointer to the platform-specific power on function.
+ * @power_off: Pointer to the platform-specific power off function.
+ */
+struct pvr_power_sequence_ops {
+ int (*init)(struct pvr_device *pvr_dev);
+ int (*power_on)(struct pvr_device *pvr_dev);
+ int (*power_off)(struct pvr_device *pvr_dev);
+};
+
+extern const struct pvr_power_sequence_ops pvr_power_sequence_ops_manual;
+extern const struct pvr_power_sequence_ops pvr_power_sequence_ops_pwrseq;
+
#endif /* PVR_POWER_H */
diff --git a/drivers/gpu/drm/imagination/pvr_vm.c b/drivers/gpu/drm/imagination/pvr_vm.c
index 2896fa7501b1..3d97990170bf 100644
--- a/drivers/gpu/drm/imagination/pvr_vm.c
+++ b/drivers/gpu/drm/imagination/pvr_vm.c
@@ -185,12 +185,17 @@ struct pvr_vm_bind_op {
static int pvr_vm_bind_op_exec(struct pvr_vm_bind_op *bind_op)
{
switch (bind_op->type) {
- case PVR_VM_BIND_TYPE_MAP:
+ case PVR_VM_BIND_TYPE_MAP: {
+ const struct drm_gpuvm_map_req map_req = {
+ .map.va.addr = bind_op->device_addr,
+ .map.va.range = bind_op->size,
+ .map.gem.obj = gem_from_pvr_gem(bind_op->pvr_obj),
+ .map.gem.offset = bind_op->offset,
+ };
+
return drm_gpuvm_sm_map(&bind_op->vm_ctx->gpuvm_mgr,
- bind_op, bind_op->device_addr,
- bind_op->size,
- gem_from_pvr_gem(bind_op->pvr_obj),
- bind_op->offset);
+ bind_op, &map_req);
+ }
case PVR_VM_BIND_TYPE_UNMAP:
return drm_gpuvm_sm_unmap(&bind_op->vm_ctx->gpuvm_mgr,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index d5e6bab36414..eb5537f0ac90 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -394,10 +394,12 @@ static bool mtk_drm_get_all_drm_priv(struct device *dev)
continue;
drm_dev = device_find_child(&pdev->dev, NULL, mtk_drm_match);
+ put_device(&pdev->dev);
if (!drm_dev)
continue;
temp_drm_priv = dev_get_drvdata(drm_dev);
+ put_device(drm_dev);
if (!temp_drm_priv)
continue;
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index d7726091819c..0e2bcd5f67b7 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -1002,6 +1002,12 @@ static int mtk_dsi_host_attach(struct mipi_dsi_host *host,
return PTR_ERR(dsi->next_bridge);
}
+ /*
+ * set flag to request the DSI host bridge be pre-enabled before device bridge
+ * in the chain, so the DSI host is ready when the device bridge is pre-enabled
+ */
+ dsi->next_bridge->pre_enable_prev_first = true;
+
drm_bridge_add(&dsi->bridge);
ret = component_add(host->dev, &mtk_dsi_component_ops);
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 845fd8aa43c3..b766dd5e6c8d 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -182,8 +182,8 @@ static inline struct mtk_hdmi *hdmi_ctx_from_bridge(struct drm_bridge *b)
static void mtk_hdmi_hw_vid_black(struct mtk_hdmi *hdmi, bool black)
{
- regmap_update_bits(hdmi->regs, VIDEO_SOURCE_SEL,
- VIDEO_CFG_4, black ? GEN_RGB : NORMAL_PATH);
+ regmap_update_bits(hdmi->regs, VIDEO_CFG_4,
+ VIDEO_SOURCE_SEL, black ? GEN_RGB : NORMAL_PATH);
}
static void mtk_hdmi_hw_make_reg_writable(struct mtk_hdmi *hdmi, bool enable)
@@ -310,8 +310,8 @@ static void mtk_hdmi_hw_send_info_frame(struct mtk_hdmi *hdmi, u8 *buffer,
static void mtk_hdmi_hw_send_aud_packet(struct mtk_hdmi *hdmi, bool enable)
{
- regmap_update_bits(hdmi->regs, AUDIO_PACKET_OFF,
- GRL_SHIFT_R2, enable ? 0 : AUDIO_PACKET_OFF);
+ regmap_update_bits(hdmi->regs, GRL_SHIFT_R2,
+ AUDIO_PACKET_OFF, enable ? 0 : AUDIO_PACKET_OFF);
}
static void mtk_hdmi_hw_config_sys(struct mtk_hdmi *hdmi)
diff --git a/drivers/gpu/drm/mediatek/mtk_plane.c b/drivers/gpu/drm/mediatek/mtk_plane.c
index cbc4f37da8ba..02349bd44001 100644
--- a/drivers/gpu/drm/mediatek/mtk_plane.c
+++ b/drivers/gpu/drm/mediatek/mtk_plane.c
@@ -292,7 +292,8 @@ static void mtk_plane_atomic_disable(struct drm_plane *plane,
wmb(); /* Make sure the above parameter is set before update */
mtk_plane_state->pending.dirty = true;
- mtk_crtc_plane_disable(old_state->crtc, plane);
+ if (old_state && old_state->crtc)
+ mtk_crtc_plane_disable(old_state->crtc, plane);
}
static void mtk_plane_atomic_update(struct drm_plane *plane,
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
index 00e1afd46b81..44df6410bce1 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
@@ -913,6 +913,11 @@ static const struct adreno_info a6xx_gpus[] = {
{ /* sentinel */ },
},
},
+ .speedbins = ADRENO_SPEEDBINS(
+ { 0, 0 },
+ { 185, 0 },
+ { 127, 1 },
+ ),
}, {
.chip_ids = ADRENO_CHIP_IDS(
0x06030001,
@@ -1024,6 +1029,11 @@ static const struct adreno_info a6xx_gpus[] = {
.gmu_cgc_mode = 0x00020200,
.prim_fifo_threshold = 0x00300200,
},
+ .speedbins = ADRENO_SPEEDBINS(
+ { 0, 0 },
+ { 169, 0 },
+ { 113, 1 },
+ ),
}, {
.chip_ids = ADRENO_CHIP_IDS(0x06030500),
.family = ADRENO_6XX_GEN4,
@@ -1343,6 +1353,69 @@ static const uint32_t a7xx_pwrup_reglist_regs[] = {
DECLARE_ADRENO_REGLIST_LIST(a7xx_pwrup_reglist);
+/* Applicable for X185, A750 */
+static const u32 a750_ifpc_reglist_regs[] = {
+ REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE(0),
+ REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE(1),
+ REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE(2),
+ REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE(3),
+ REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE(4),
+ REG_A6XX_TPL1_NC_MODE_CNTL,
+ REG_A6XX_SP_NC_MODE_CNTL,
+ REG_A6XX_CP_DBG_ECO_CNTL,
+ REG_A6XX_CP_PROTECT_CNTL,
+ REG_A6XX_CP_PROTECT(0),
+ REG_A6XX_CP_PROTECT(1),
+ REG_A6XX_CP_PROTECT(2),
+ REG_A6XX_CP_PROTECT(3),
+ REG_A6XX_CP_PROTECT(4),
+ REG_A6XX_CP_PROTECT(5),
+ REG_A6XX_CP_PROTECT(6),
+ REG_A6XX_CP_PROTECT(7),
+ REG_A6XX_CP_PROTECT(8),
+ REG_A6XX_CP_PROTECT(9),
+ REG_A6XX_CP_PROTECT(10),
+ REG_A6XX_CP_PROTECT(11),
+ REG_A6XX_CP_PROTECT(12),
+ REG_A6XX_CP_PROTECT(13),
+ REG_A6XX_CP_PROTECT(14),
+ REG_A6XX_CP_PROTECT(15),
+ REG_A6XX_CP_PROTECT(16),
+ REG_A6XX_CP_PROTECT(17),
+ REG_A6XX_CP_PROTECT(18),
+ REG_A6XX_CP_PROTECT(19),
+ REG_A6XX_CP_PROTECT(20),
+ REG_A6XX_CP_PROTECT(21),
+ REG_A6XX_CP_PROTECT(22),
+ REG_A6XX_CP_PROTECT(23),
+ REG_A6XX_CP_PROTECT(24),
+ REG_A6XX_CP_PROTECT(25),
+ REG_A6XX_CP_PROTECT(26),
+ REG_A6XX_CP_PROTECT(27),
+ REG_A6XX_CP_PROTECT(28),
+ REG_A6XX_CP_PROTECT(29),
+ REG_A6XX_CP_PROTECT(30),
+ REG_A6XX_CP_PROTECT(31),
+ REG_A6XX_CP_PROTECT(32),
+ REG_A6XX_CP_PROTECT(33),
+ REG_A6XX_CP_PROTECT(34),
+ REG_A6XX_CP_PROTECT(35),
+ REG_A6XX_CP_PROTECT(36),
+ REG_A6XX_CP_PROTECT(37),
+ REG_A6XX_CP_PROTECT(38),
+ REG_A6XX_CP_PROTECT(39),
+ REG_A6XX_CP_PROTECT(40),
+ REG_A6XX_CP_PROTECT(41),
+ REG_A6XX_CP_PROTECT(42),
+ REG_A6XX_CP_PROTECT(43),
+ REG_A6XX_CP_PROTECT(44),
+ REG_A6XX_CP_PROTECT(45),
+ REG_A6XX_CP_PROTECT(46),
+ REG_A6XX_CP_PROTECT(47),
+};
+
+DECLARE_ADRENO_REGLIST_LIST(a750_ifpc_reglist);
+
static const struct adreno_info a7xx_gpus[] = {
{
.chip_ids = ADRENO_CHIP_IDS(0x07000200),
@@ -1432,14 +1505,27 @@ static const struct adreno_info a7xx_gpus[] = {
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_HAS_HW_APRIV |
- ADRENO_QUIRK_PREEMPTION,
+ ADRENO_QUIRK_PREEMPTION |
+ ADRENO_QUIRK_IFPC,
.init = a6xx_gpu_init,
.a6xx = &(const struct a6xx_info) {
.hwcg = a740_hwcg,
.protect = &a730_protect,
.pwrup_reglist = &a7xx_pwrup_reglist,
+ .ifpc_reglist = &a750_ifpc_reglist,
.gmu_chipid = 0x7050001,
.gmu_cgc_mode = 0x00020202,
+ .bcms = (const struct a6xx_bcm[]) {
+ { .name = "SH0", .buswidth = 16 },
+ { .name = "MC0", .buswidth = 4 },
+ {
+ .name = "ACV",
+ .fixed = true,
+ .perfmode = BIT(3),
+ .perfmode_bw = 16500000,
+ },
+ { /* sentinel */ },
+ },
},
.preempt_record_size = 4192 * SZ_1K,
.speedbins = ADRENO_SPEEDBINS(
@@ -1460,12 +1546,14 @@ static const struct adreno_info a7xx_gpus[] = {
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_HAS_HW_APRIV |
- ADRENO_QUIRK_PREEMPTION,
+ ADRENO_QUIRK_PREEMPTION |
+ ADRENO_QUIRK_IFPC,
.init = a6xx_gpu_init,
.zapfw = "gen70900_zap.mbn",
.a6xx = &(const struct a6xx_info) {
.protect = &a730_protect,
.pwrup_reglist = &a7xx_pwrup_reglist,
+ .ifpc_reglist = &a750_ifpc_reglist,
.gmu_chipid = 0x7090100,
.gmu_cgc_mode = 0x00020202,
.bcms = (const struct a6xx_bcm[]) {
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 28e6705c6da6..fc62fef2fed8 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -93,14 +93,25 @@ bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu)
/* Check to see if the GX rail is still powered */
bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu)
{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
u32 val;
/* This can be called from gpu state code so make sure GMU is valid */
if (!gmu->initialized)
return false;
+ /* If GMU is absent, then GX power domain is ON as long as GPU is in active state */
+ if (adreno_has_gmu_wrapper(adreno_gpu))
+ return true;
+
val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
+ if (adreno_is_a7xx(adreno_gpu))
+ return !(val &
+ (A7XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF |
+ A7XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF));
+
return !(val &
(A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF |
A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF));
@@ -272,6 +283,8 @@ static int a6xx_gmu_start(struct a6xx_gmu *gmu)
if (ret)
DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n");
+ set_bit(GMU_STATUS_FW_START, &gmu->status);
+
return ret;
}
@@ -403,7 +416,10 @@ int a6xx_sptprac_enable(struct a6xx_gmu *gmu)
int ret;
u32 val;
- if (!gmu->legacy)
+ WARN_ON(!gmu->legacy);
+
+ /* Nothing to do if GMU does the power management */
+ if (gmu->idle_level > GMU_IDLE_STATE_ACTIVE)
return 0;
gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778000);
@@ -518,6 +534,9 @@ static int a6xx_rpmh_start(struct a6xx_gmu *gmu)
int ret;
u32 val;
+ if (!test_and_clear_bit(GMU_STATUS_PDC_SLEEP, &gmu->status))
+ return 0;
+
gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, BIT(1));
ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val,
@@ -545,6 +564,9 @@ static void a6xx_rpmh_stop(struct a6xx_gmu *gmu)
int ret;
u32 val;
+ if (test_and_clear_bit(GMU_STATUS_FW_START, &gmu->status))
+ return;
+
gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1);
ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0,
@@ -553,6 +575,8 @@ static void a6xx_rpmh_stop(struct a6xx_gmu *gmu)
DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n");
gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
+
+ set_bit(GMU_STATUS_PDC_SLEEP, &gmu->status);
}
static inline void pdc_write(void __iomem *ptr, u32 offset, u32 value)
@@ -681,8 +705,6 @@ setup_pdc:
/* ensure no writes happen before the uCode is fully written */
wmb();
- a6xx_rpmh_stop(gmu);
-
err:
if (!IS_ERR_OR_NULL(pdcptr))
iounmap(pdcptr);
@@ -842,19 +864,15 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
else
gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1);
- if (state == GMU_WARM_BOOT) {
- ret = a6xx_rpmh_start(gmu);
- if (ret)
- return ret;
- } else {
+ ret = a6xx_rpmh_start(gmu);
+ if (ret)
+ return ret;
+
+ if (state == GMU_COLD_BOOT) {
if (WARN(!adreno_gpu->fw[ADRENO_FW_GMU],
"GMU firmware is not loaded\n"))
return -ENOENT;
- ret = a6xx_rpmh_start(gmu);
- if (ret)
- return ret;
-
ret = a6xx_gmu_fw_load(gmu);
if (ret)
return ret;
@@ -925,10 +943,7 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
ret = a6xx_gmu_gfx_rail_on(gmu);
if (ret)
return ret;
- }
- /* Enable SPTP_PC if the CPU is responsible for it */
- if (gmu->idle_level < GMU_IDLE_STATE_SPTP) {
ret = a6xx_sptprac_enable(gmu);
if (ret)
return ret;
@@ -980,6 +995,22 @@ static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu)
val, (val & 1), 100, 10000);
gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS + seqmem_off,
val, (val & 1), 100, 1000);
+
+ if (!adreno_is_a740_family(adreno_gpu))
+ return;
+
+ gmu_poll_timeout_rscc(gmu, REG_A7XX_RSCC_TCS4_DRV0_STATUS + seqmem_off,
+ val, (val & 1), 100, 10000);
+ gmu_poll_timeout_rscc(gmu, REG_A7XX_RSCC_TCS5_DRV0_STATUS + seqmem_off,
+ val, (val & 1), 100, 10000);
+ gmu_poll_timeout_rscc(gmu, REG_A7XX_RSCC_TCS6_DRV0_STATUS + seqmem_off,
+ val, (val & 1), 100, 10000);
+ gmu_poll_timeout_rscc(gmu, REG_A7XX_RSCC_TCS7_DRV0_STATUS + seqmem_off,
+ val, (val & 1), 100, 1000);
+ gmu_poll_timeout_rscc(gmu, REG_A7XX_RSCC_TCS8_DRV0_STATUS + seqmem_off,
+ val, (val & 1), 100, 10000);
+ gmu_poll_timeout_rscc(gmu, REG_A7XX_RSCC_TCS9_DRV0_STATUS + seqmem_off,
+ val, (val & 1), 100, 1000);
}
/* Force the GMU off in case it isn't responsive */
@@ -1023,6 +1054,8 @@ static void a6xx_gmu_force_off(struct a6xx_gmu *gmu)
/* Reset GPU core blocks */
a6xx_gpu_sw_reset(gpu, true);
+
+ a6xx_rpmh_stop(gmu);
}
static void a6xx_gmu_set_initial_freq(struct msm_gpu *gpu, struct a6xx_gmu *gmu)
@@ -1128,6 +1161,11 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
/* Set the GPU to the current freq */
a6xx_gmu_set_initial_freq(gpu, gmu);
+ if (refcount_read(&gpu->sysprof_active) > 1) {
+ ret = a6xx_gmu_set_oob(gmu, GMU_OOB_PERFCOUNTER_SET);
+ if (!ret)
+ set_bit(GMU_STATUS_OOB_PERF_SET, &gmu->status);
+ }
out:
/* On failure, shut down the GMU to leave it in a good state */
if (ret) {
@@ -1175,6 +1213,9 @@ static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
}
+ if (test_and_clear_bit(GMU_STATUS_OOB_PERF_SET, &gmu->status))
+ a6xx_gmu_clear_oob(gmu, GMU_OOB_PERFCOUNTER_SET);
+
ret = a6xx_gmu_wait_for_idle(gmu);
/* If the GMU isn't responding assume it is hung */
@@ -1318,8 +1359,6 @@ static int a6xx_gmu_memory_probe(struct drm_device *drm, struct a6xx_gmu *gmu)
struct msm_mmu *mmu;
mmu = msm_iommu_new(gmu->dev, 0);
- if (!mmu)
- return -ENODEV;
if (IS_ERR(mmu))
return PTR_ERR(mmu);
@@ -1692,6 +1731,7 @@ static int a6xx_gmu_acd_probe(struct a6xx_gmu *gmu)
u32 val;
freq = gmu->gpu_freqs[i];
+ /* This is unlikely to fail because we are passing back a known freq */
opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, freq, true);
np = dev_pm_opp_get_of_node(opp);
@@ -1790,6 +1830,35 @@ static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev,
return irq;
}
+void a6xx_gmu_sysprof_setup(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ unsigned int sysprof_active;
+
+ /* Nothing to do if GPU is suspended. We will handle this during GMU resume */
+ if (!pm_runtime_get_if_active(&gpu->pdev->dev))
+ return;
+
+ mutex_lock(&gmu->lock);
+
+ sysprof_active = refcount_read(&gpu->sysprof_active);
+
+ /*
+ * 'Perfcounter select' register values are lost during IFPC collapse. To avoid that,
+ * use the currently unused perfcounter oob vote to block IFPC when sysprof is active
+ */
+ if ((sysprof_active > 1) && !test_and_set_bit(GMU_STATUS_OOB_PERF_SET, &gmu->status))
+ a6xx_gmu_set_oob(gmu, GMU_OOB_PERFCOUNTER_SET);
+ else if ((sysprof_active == 1) && test_and_clear_bit(GMU_STATUS_OOB_PERF_SET, &gmu->status))
+ a6xx_gmu_clear_oob(gmu, GMU_OOB_PERFCOUNTER_SET);
+
+ mutex_unlock(&gmu->lock);
+
+ pm_runtime_put(&gpu->pdev->dev);
+}
+
void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
{
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
@@ -1932,8 +2001,9 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
if (ret)
return ret;
- /* Fow now, don't do anything fancy until we get our feet under us */
- gmu->idle_level = GMU_IDLE_STATE_ACTIVE;
+ /* Set GMU idle level */
+ gmu->idle_level = (adreno_gpu->info->quirks & ADRENO_QUIRK_IFPC) ?
+ GMU_IDLE_STATE_IFPC : GMU_IDLE_STATE_ACTIVE;
pm_runtime_enable(gmu->dev);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
index d1ce11131ba6..06cfc294016f 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
@@ -50,6 +50,9 @@ struct a6xx_bcm {
/* The GMU does not do any idle state management */
#define GMU_IDLE_STATE_ACTIVE 0
+/* Unknown power state. Not exposed by the firmware. For documentation purpose only */
+#define GMU_IDLE_STATE_RESERVED 1
+
/* The GMU manages SPTP power collapse */
#define GMU_IDLE_STATE_SPTP 2
@@ -117,6 +120,14 @@ struct a6xx_gmu {
struct qmp *qmp;
struct a6xx_hfi_msg_bw_table *bw_table;
+
+/* To check if we can trigger sleep seq at PDC. Cleared in a6xx_rpmh_stop() */
+#define GMU_STATUS_FW_START 0
+/* To track if PDC sleep seq was done */
+#define GMU_STATUS_PDC_SLEEP 1
+/* To track Perfcounter OOB set status */
+#define GMU_STATUS_OOB_PERF_SET 2
+ unsigned long status;
};
static inline u32 gmu_read(struct a6xx_gmu *gmu, u32 offset)
@@ -158,6 +169,9 @@ static inline u64 gmu_read64(struct a6xx_gmu *gmu, u32 lo, u32 hi)
#define gmu_poll_timeout(gmu, addr, val, cond, interval, timeout) \
readl_poll_timeout((gmu)->mmio + ((addr) << 2), val, cond, \
interval, timeout)
+#define gmu_poll_timeout_atomic(gmu, addr, val, cond, interval, timeout) \
+ readl_poll_timeout_atomic((gmu)->mmio + ((addr) << 2), val, cond, \
+ interval, timeout)
static inline u32 gmu_read_rscc(struct a6xx_gmu *gmu, u32 offset)
{
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 45dd5fd1c2bf..b8f8ae940b55 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -16,6 +16,97 @@
#define GPU_PAS_ID 13
+static u64 read_gmu_ao_counter(struct a6xx_gpu *a6xx_gpu)
+{
+ u64 count_hi, count_lo, temp;
+
+ do {
+ count_hi = gmu_read(&a6xx_gpu->gmu, REG_A6XX_GMU_ALWAYS_ON_COUNTER_H);
+ count_lo = gmu_read(&a6xx_gpu->gmu, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L);
+ temp = gmu_read(&a6xx_gpu->gmu, REG_A6XX_GMU_ALWAYS_ON_COUNTER_H);
+ } while (unlikely(count_hi != temp));
+
+ return (count_hi << 32) | count_lo;
+}
+
+static bool fence_status_check(struct msm_gpu *gpu, u32 offset, u32 value, u32 status, u32 mask)
+{
+ /* Success if !writedropped0/1 */
+ if (!(status & mask))
+ return true;
+
+ udelay(10);
+
+ /* Try to update fenced register again */
+ gpu_write(gpu, offset, value);
+
+ /* We can't do a posted write here because the power domain could be
+ * in collapse state. So use the heaviest barrier instead
+ */
+ mb();
+ return false;
+}
+
+static int fenced_write(struct a6xx_gpu *a6xx_gpu, u32 offset, u32 value, u32 mask)
+{
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ u32 status;
+
+ gpu_write(gpu, offset, value);
+
+ /* Nothing else to be done in the case of no-GMU */
+ if (adreno_has_gmu_wrapper(adreno_gpu))
+ return 0;
+
+ /* We can't do a posted write here because the power domain could be
+ * in collapse state. So use the heaviest barrier instead
+ */
+ mb();
+
+ if (!gmu_poll_timeout(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS, status,
+ fence_status_check(gpu, offset, value, status, mask), 0, 1000))
+ return 0;
+
+ /* Try again for another 1ms before failing */
+ gpu_write(gpu, offset, value);
+ mb();
+
+ if (!gmu_poll_timeout(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS, status,
+ fence_status_check(gpu, offset, value, status, mask), 0, 1000)) {
+ /*
+ * The 'delay' warning is here because the pause to print this
+ * warning will allow gpu to move to power collapse which
+ * defeats the purpose of continuous polling for 2 ms
+ */
+ dev_err_ratelimited(gmu->dev, "delay in fenced register write (0x%x)\n",
+ offset);
+ return 0;
+ }
+
+ dev_err_ratelimited(gmu->dev, "fenced register write (0x%x) fail\n",
+ offset);
+
+ return -ETIMEDOUT;
+}
+
+int a6xx_fenced_write(struct a6xx_gpu *a6xx_gpu, u32 offset, u64 value, u32 mask, bool is_64b)
+{
+ int ret;
+
+ ret = fenced_write(a6xx_gpu, offset, lower_32_bits(value), mask);
+ if (ret)
+ return ret;
+
+ if (!is_64b)
+ return 0;
+
+ ret = fenced_write(a6xx_gpu, offset + 1, upper_32_bits(value), mask);
+
+ return ret;
+}
+
static inline bool _a6xx_check_idle(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -86,7 +177,7 @@ static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
/* Update HW if this is the current ring and we are not in preempt*/
if (!a6xx_in_preempt(a6xx_gpu)) {
if (a6xx_gpu->cur_ring == ring)
- gpu_write(gpu, REG_A6XX_CP_RB_WPTR, wptr);
+ a6xx_fenced_write(a6xx_gpu, REG_A6XX_CP_RB_WPTR, wptr, BIT(0), false);
else
ring->restore_wptr = true;
} else {
@@ -173,8 +264,8 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
* Needed for preemption
*/
OUT_PKT7(ring, CP_MEM_WRITE, 5);
- OUT_RING(ring, CP_MEM_WRITE_0_ADDR_LO(lower_32_bits(memptr)));
- OUT_RING(ring, CP_MEM_WRITE_1_ADDR_HI(upper_32_bits(memptr)));
+ OUT_RING(ring, A5XX_CP_MEM_WRITE_ADDR_LO(lower_32_bits(memptr)));
+ OUT_RING(ring, A5XX_CP_MEM_WRITE_ADDR_HI(upper_32_bits(memptr)));
OUT_RING(ring, lower_32_bits(ttbr));
OUT_RING(ring, upper_32_bits(ttbr));
OUT_RING(ring, ctx->seqno);
@@ -204,9 +295,9 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
*/
OUT_PKT7(ring, CP_WAIT_REG_MEM, 6);
OUT_RING(ring, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ));
- OUT_RING(ring, CP_WAIT_REG_MEM_1_POLL_ADDR_LO(
+ OUT_RING(ring, CP_WAIT_REG_MEM_POLL_ADDR_LO(
REG_A6XX_RBBM_PERFCTR_SRAM_INIT_STATUS));
- OUT_RING(ring, CP_WAIT_REG_MEM_2_POLL_ADDR_HI(0));
+ OUT_RING(ring, CP_WAIT_REG_MEM_POLL_ADDR_HI(0));
OUT_RING(ring, CP_WAIT_REG_MEM_3_REF(0x1));
OUT_RING(ring, CP_WAIT_REG_MEM_4_MASK(0x1));
OUT_RING(ring, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(0));
@@ -298,8 +389,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence)));
OUT_RING(ring, submit->seqno);
- trace_msm_gpu_submit_flush(submit,
- gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER));
+ trace_msm_gpu_submit_flush(submit, read_gmu_ao_counter(a6xx_gpu));
a6xx_flush(gpu, ring);
}
@@ -499,8 +589,7 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
}
- trace_msm_gpu_submit_flush(submit,
- gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER));
+ trace_msm_gpu_submit_flush(submit, read_gmu_ao_counter(a6xx_gpu));
a6xx_flush(gpu, ring);
@@ -739,11 +828,10 @@ static void a7xx_patch_pwrup_reglist(struct msm_gpu *gpu)
u32 *dest = (u32 *)&lock->regs[0];
int i;
- reglist = adreno_gpu->info->a6xx->pwrup_reglist;
-
lock->gpu_req = lock->cpu_req = lock->turn = 0;
- lock->ifpc_list_len = 0;
- lock->preemption_list_len = reglist->count;
+
+ reglist = adreno_gpu->info->a6xx->ifpc_reglist;
+ lock->ifpc_list_len = reglist->count;
/*
* For each entry in each of the lists, write the offset and the current
@@ -754,6 +842,14 @@ static void a7xx_patch_pwrup_reglist(struct msm_gpu *gpu)
*dest++ = gpu_read(gpu, reglist->regs[i]);
}
+ reglist = adreno_gpu->info->a6xx->pwrup_reglist;
+ lock->preemption_list_len = reglist->count;
+
+ for (i = 0; i < reglist->count; i++) {
+ *dest++ = reglist->regs[i];
+ *dest++ = gpu_read(gpu, reglist->regs[i]);
+ }
+
/*
* The overall register list is composed of
* 1. Static IFPC-only registers
@@ -1241,14 +1337,14 @@ static int hw_init(struct msm_gpu *gpu)
/* Set weights for bicubic filtering */
if (adreno_is_a650_family(adreno_gpu) || adreno_is_x185(adreno_gpu)) {
- gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_0, 0);
- gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_1,
+ gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE(0), 0);
+ gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE(1),
0x3fe05ff4);
- gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_2,
+ gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE(2),
0x3fa0ebee);
- gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_3,
+ gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE(3),
0x3f5193ed);
- gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_4,
+ gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE(4),
0x3f0243f0);
}
@@ -1448,21 +1544,25 @@ static void a6xx_recover(struct msm_gpu *gpu)
adreno_dump_info(gpu);
- for (i = 0; i < 8; i++)
- DRM_DEV_INFO(&gpu->pdev->dev, "CP_SCRATCH_REG%d: %u\n", i,
- gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(i)));
+ if (a6xx_gmu_gx_is_on(&a6xx_gpu->gmu)) {
+ /* Sometimes crashstate capture is skipped, so SQE should be halted here again */
+ gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 3);
+
+ for (i = 0; i < 8; i++)
+ DRM_DEV_INFO(&gpu->pdev->dev, "CP_SCRATCH_REG%d: %u\n", i,
+ gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(i)));
- if (hang_debug)
- a6xx_dump(gpu);
+ if (hang_debug)
+ a6xx_dump(gpu);
+
+ }
/*
* To handle recovery specific sequences during the rpm suspend we are
* about to trigger
*/
- a6xx_gpu->hung = true;
- /* Halt SQE first */
- gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 3);
+ a6xx_gpu->hung = true;
pm_runtime_dont_use_autosuspend(&gpu->pdev->dev);
@@ -1693,8 +1793,6 @@ static void a6xx_cp_hw_err_irq(struct msm_gpu *gpu)
static void a6xx_fault_detect_irq(struct msm_gpu *gpu)
{
- struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
/*
@@ -1706,13 +1804,6 @@ static void a6xx_fault_detect_irq(struct msm_gpu *gpu)
if (gpu_read(gpu, REG_A6XX_RBBM_STATUS3) & A6XX_RBBM_STATUS3_SMMU_STALLED_ON_FAULT)
return;
- /*
- * Force the GPU to stay on until after we finish
- * collecting information
- */
- if (!adreno_has_gmu_wrapper(adreno_gpu))
- gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 1);
-
DRM_DEV_ERROR(&gpu->pdev->dev,
"gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
ring ? ring->id : -1, ring ? ring->fctx->last_fence : 0,
@@ -1727,6 +1818,9 @@ static void a6xx_fault_detect_irq(struct msm_gpu *gpu)
/* Turn off the hangcheck timer to keep it from bothering us */
timer_delete(&gpu->hangcheck_timer);
+ /* Turn off interrupts to avoid triggering recovery again */
+ gpu_write(gpu, REG_A6XX_RBBM_INT_0_MASK, 0);
+
kthread_queue_work(gpu->worker, &gpu->recover_work);
}
@@ -1751,9 +1845,49 @@ static void a7xx_sw_fuse_violation_irq(struct msm_gpu *gpu)
}
}
+static void a6xx_gpu_keepalive_vote(struct msm_gpu *gpu, bool on)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ if (adreno_has_gmu_wrapper(adreno_gpu))
+ return;
+
+ gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, on);
+}
+
+static int irq_poll_fence(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ u32 status;
+
+ if (adreno_has_gmu_wrapper(adreno_gpu))
+ return 0;
+
+ if (gmu_poll_timeout_atomic(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, status, !status, 1, 100)) {
+ u32 rbbm_unmasked = gmu_read(gmu, REG_A6XX_GMU_RBBM_INT_UNMASKED_STATUS);
+
+ dev_err_ratelimited(&gpu->pdev->dev,
+ "irq fence poll timeout, fence_ctrl=0x%x, unmasked_status=0x%x\n",
+ status, rbbm_unmasked);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
static irqreturn_t a6xx_irq(struct msm_gpu *gpu)
{
struct msm_drm_private *priv = gpu->dev->dev_private;
+
+ /* Set keepalive vote to avoid power collapse after RBBM_INT_0_STATUS is read */
+ a6xx_gpu_keepalive_vote(gpu, true);
+
+ if (irq_poll_fence(gpu))
+ goto done;
+
u32 status = gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS);
gpu_write(gpu, REG_A6XX_RBBM_INT_CLEAR_CMD, status);
@@ -1790,6 +1924,9 @@ static irqreturn_t a6xx_irq(struct msm_gpu *gpu)
if (status & A6XX_RBBM_INT_0_MASK_CP_SW)
a6xx_preempt_irq(gpu);
+done:
+ a6xx_gpu_keepalive_vote(gpu, false);
+
return IRQ_HANDLED;
}
@@ -2179,16 +2316,7 @@ static int a6xx_gmu_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
- mutex_lock(&a6xx_gpu->gmu.lock);
-
- /* Force the GPU power on so we can read this register */
- a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
-
- *value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER);
-
- a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
-
- mutex_unlock(&a6xx_gpu->gmu.lock);
+ *value = read_gmu_ao_counter(a6xx_gpu);
return 0;
}
@@ -2298,18 +2426,36 @@ static uint32_t a6xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
if (adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami)
return a6xx_gpu->shadow[ring->id];
+ /*
+ * This is true only on an A6XX_GEN1 with GMU, has IFPC enabled and a super old SQE firmware
+ * without 'whereami' support
+ */
+ WARN_ONCE((to_adreno_gpu(gpu)->info->quirks & ADRENO_QUIRK_IFPC),
+ "Can't read CP_RB_RPTR register reliably\n");
+
return ring->memptrs->rptr = gpu_read(gpu, REG_A6XX_CP_RB_RPTR);
}
static bool a6xx_progress(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
- struct msm_cp_state cp_state = {
+ struct msm_cp_state cp_state;
+ bool progress;
+
+ /*
+ * With IFPC, KMD doesn't know whether GX power domain is collapsed
+ * or not. So, we can't blindly read the below registers in GX domain.
+ * Lets trust the hang detection in HW and lie to the caller that
+ * there was progress.
+ */
+ if (to_adreno_gpu(gpu)->info->quirks & ADRENO_QUIRK_IFPC)
+ return true;
+
+ cp_state = (struct msm_cp_state) {
.ib1_base = gpu_read64(gpu, REG_A6XX_CP_IB1_BASE),
.ib2_base = gpu_read64(gpu, REG_A6XX_CP_IB2_BASE),
.ib1_rem = gpu_read(gpu, REG_A6XX_CP_IB1_REM_SIZE),
.ib2_rem = gpu_read(gpu, REG_A6XX_CP_IB2_REM_SIZE),
};
- bool progress;
/*
* Adjust the remaining data to account for what has already been
@@ -2408,6 +2554,7 @@ static const struct adreno_gpu_funcs funcs = {
.create_private_vm = a6xx_create_private_vm,
.get_rptr = a6xx_get_rptr,
.progress = a6xx_progress,
+ .sysprof_setup = a6xx_gmu_sysprof_setup,
},
.get_timestamp = a6xx_gmu_get_timestamp,
};
@@ -2468,6 +2615,7 @@ static const struct adreno_gpu_funcs funcs_a7xx = {
.create_private_vm = a6xx_create_private_vm,
.get_rptr = a6xx_get_rptr,
.progress = a6xx_progress,
+ .sysprof_setup = a6xx_gmu_sysprof_setup,
},
.get_timestamp = a6xx_gmu_get_timestamp,
};
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
index 6e71f617fc3d..0b17d36c36a9 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
@@ -45,6 +45,7 @@ struct a6xx_info {
const struct adreno_reglist *hwcg;
const struct adreno_protect *protect;
const struct adreno_reglist_list *pwrup_reglist;
+ const struct adreno_reglist_list *ifpc_reglist;
u32 gmu_chipid;
u32 gmu_cgc_mode;
u32 prim_fifo_threshold;
@@ -254,6 +255,7 @@ void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state);
int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node);
int a6xx_gmu_wrapper_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node);
void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu);
+void a6xx_gmu_sysprof_setup(struct msm_gpu *gpu);
void a6xx_preempt_init(struct msm_gpu *gpu);
void a6xx_preempt_hw_init(struct msm_gpu *gpu);
@@ -295,5 +297,6 @@ int a6xx_gpu_state_put(struct msm_gpu_state *state);
void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu, bool gx_off);
void a6xx_gpu_sw_reset(struct msm_gpu *gpu, bool assert);
+int a6xx_fenced_write(struct a6xx_gpu *gpu, u32 offset, u64 value, u32 mask, bool is_64b);
#endif /* __A6XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
index faca2a0243ab..4c7f3c642f6a 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
@@ -11,7 +11,7 @@
static const unsigned int *gen7_0_0_external_core_regs[] __always_unused;
static const unsigned int *gen7_2_0_external_core_regs[] __always_unused;
static const unsigned int *gen7_9_0_external_core_regs[] __always_unused;
-static struct gen7_sptp_cluster_registers gen7_9_0_sptp_clusters[] __always_unused;
+static const struct gen7_sptp_cluster_registers gen7_9_0_sptp_clusters[] __always_unused;
static const u32 gen7_9_0_cx_debugbus_blocks[] __always_unused;
#include "adreno_gen7_0_0_snapshot.h"
@@ -174,8 +174,15 @@ static int a6xx_crashdumper_run(struct msm_gpu *gpu,
static int debugbus_read(struct msm_gpu *gpu, u32 block, u32 offset,
u32 *data)
{
- u32 reg = A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX(offset) |
- A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL(block);
+ u32 reg;
+
+ if (to_adreno_gpu(gpu)->info->family >= ADRENO_7XX_GEN1) {
+ reg = A7XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX(offset) |
+ A7XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL(block);
+ } else {
+ reg = A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX(offset) |
+ A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL(block);
+ }
gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_SEL_A, reg);
gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_SEL_B, reg);
@@ -198,11 +205,18 @@ static int debugbus_read(struct msm_gpu *gpu, u32 block, u32 offset,
readl((ptr) + ((offset) << 2))
/* read a value from the CX debug bus */
-static int cx_debugbus_read(void __iomem *cxdbg, u32 block, u32 offset,
+static int cx_debugbus_read(struct msm_gpu *gpu, void __iomem *cxdbg, u32 block, u32 offset,
u32 *data)
{
- u32 reg = A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX(offset) |
- A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL(block);
+ u32 reg;
+
+ if (to_adreno_gpu(gpu)->info->family >= ADRENO_7XX_GEN1) {
+ reg = A7XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX(offset) |
+ A7XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL(block);
+ } else {
+ reg = A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX(offset) |
+ A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL(block);
+ }
cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_A, reg);
cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_B, reg);
@@ -315,7 +329,8 @@ static void a6xx_get_debugbus_block(struct msm_gpu *gpu,
ptr += debugbus_read(gpu, block->id, i, ptr);
}
-static void a6xx_get_cx_debugbus_block(void __iomem *cxdbg,
+static void a6xx_get_cx_debugbus_block(struct msm_gpu *gpu,
+ void __iomem *cxdbg,
struct a6xx_gpu_state *a6xx_state,
const struct a6xx_debugbus_block *block,
struct a6xx_gpu_state_obj *obj)
@@ -330,7 +345,7 @@ static void a6xx_get_cx_debugbus_block(void __iomem *cxdbg,
obj->handle = block;
for (ptr = obj->data, i = 0; i < block->count; i++)
- ptr += cx_debugbus_read(cxdbg, block->id, i, ptr);
+ ptr += cx_debugbus_read(gpu, cxdbg, block->id, i, ptr);
}
static void a6xx_get_debugbus_blocks(struct msm_gpu *gpu,
@@ -423,8 +438,9 @@ static void a7xx_get_debugbus_blocks(struct msm_gpu *gpu,
a6xx_state, &a7xx_debugbus_blocks[gbif_debugbus_blocks[i]],
&a6xx_state->debugbus[i + debugbus_blocks_count]);
}
- }
+ a6xx_state->nr_debugbus = total_debugbus_blocks;
+ }
}
static void a6xx_get_debugbus(struct msm_gpu *gpu,
@@ -526,7 +542,8 @@ static void a6xx_get_debugbus(struct msm_gpu *gpu,
int i;
for (i = 0; i < nr_cx_debugbus_blocks; i++)
- a6xx_get_cx_debugbus_block(cxdbg,
+ a6xx_get_cx_debugbus_block(gpu,
+ cxdbg,
a6xx_state,
&cx_debugbus_blocks[i],
&a6xx_state->cx_debugbus[i]);
@@ -759,15 +776,15 @@ static void a7xx_get_cluster(struct msm_gpu *gpu,
size_t datasize;
int i, regcount = 0;
- /* Some clusters need a selector register to be programmed too */
- if (cluster->sel)
- in += CRASHDUMP_WRITE(in, cluster->sel->cd_reg, cluster->sel->val);
-
in += CRASHDUMP_WRITE(in, REG_A7XX_CP_APERTURE_CNTL_CD,
A7XX_CP_APERTURE_CNTL_CD_PIPE(cluster->pipe_id) |
A7XX_CP_APERTURE_CNTL_CD_CLUSTER(cluster->cluster_id) |
A7XX_CP_APERTURE_CNTL_CD_CONTEXT(cluster->context_id));
+ /* Some clusters need a selector register to be programmed too */
+ if (cluster->sel)
+ in += CRASHDUMP_WRITE(in, cluster->sel->cd_reg, cluster->sel->val);
+
for (i = 0; cluster->regs[i] != UINT_MAX; i += 2) {
int count = RANGE(cluster->regs, i);
@@ -1569,8 +1586,7 @@ struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu)
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
struct a6xx_gpu_state *a6xx_state = kzalloc(sizeof(*a6xx_state),
GFP_KERNEL);
- bool stalled = !!(gpu_read(gpu, REG_A6XX_RBBM_STATUS3) &
- A6XX_RBBM_STATUS3_SMMU_STALLED_ON_FAULT);
+ bool stalled;
if (!a6xx_state)
return ERR_PTR(-ENOMEM);
@@ -1591,15 +1607,20 @@ struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu)
}
/* If GX isn't on the rest of the data isn't going to be accessible */
- if (!adreno_has_gmu_wrapper(adreno_gpu) && !a6xx_gmu_gx_is_on(&a6xx_gpu->gmu))
+ if (!a6xx_gmu_gx_is_on(&a6xx_gpu->gmu))
return &a6xx_state->base;
+ /* Halt SQE first */
+ gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 3);
+
/* Get the banks of indexed registers */
if (adreno_is_a7xx(adreno_gpu))
a7xx_get_indexed_registers(gpu, a6xx_state);
else
a6xx_get_indexed_registers(gpu, a6xx_state);
+ stalled = !!(gpu_read(gpu, REG_A6XX_RBBM_STATUS3) &
+ A6XX_RBBM_STATUS3_SMMU_STALLED_ON_FAULT);
/*
* Try to initialize the crashdumper, if we are not dumping state
* with the SMMU stalled. The crashdumper needs memory access to
@@ -1796,6 +1817,7 @@ static void a7xx_show_shader(struct a6xx_gpu_state_obj *obj,
print_name(p, " - type: ", a7xx_statetype_names[block->statetype]);
print_name(p, " - pipe: ", a7xx_pipe_names[block->pipeid]);
+ drm_printf(p, " - location: %d\n", block->location);
for (i = 0; i < block->num_sps; i++) {
drm_printf(p, " - sp: %d\n", i);
@@ -1873,6 +1895,7 @@ static void a7xx_show_dbgahb_cluster(struct a6xx_gpu_state_obj *obj,
print_name(p, " - pipe: ", a7xx_pipe_names[dbgahb->pipe_id]);
print_name(p, " - cluster-name: ", a7xx_cluster_names[dbgahb->cluster_id]);
drm_printf(p, " - context: %d\n", dbgahb->context_id);
+ drm_printf(p, " - location: %d\n", dbgahb->location_id);
a7xx_show_registers_indented(dbgahb->regs, obj->data, p, 4);
}
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
index 95d93ac6812a..1c18499b60bb 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
@@ -419,47 +419,47 @@ static const struct a6xx_indexed_registers a6xx_indexed_reglist[] = {
REG_A6XX_CP_SQE_STAT_DATA, 0x33, NULL },
{ "CP_DRAW_STATE", REG_A6XX_CP_DRAW_STATE_ADDR,
REG_A6XX_CP_DRAW_STATE_DATA, 0x100, NULL },
- { "CP_UCODE_DBG_DATA", REG_A6XX_CP_SQE_UCODE_DBG_ADDR,
+ { "CP_SQE_UCODE_DBG", REG_A6XX_CP_SQE_UCODE_DBG_ADDR,
REG_A6XX_CP_SQE_UCODE_DBG_DATA, 0x8000, NULL },
- { "CP_ROQ", REG_A6XX_CP_ROQ_DBG_ADDR,
+ { "CP_ROQ_DBG", REG_A6XX_CP_ROQ_DBG_ADDR,
REG_A6XX_CP_ROQ_DBG_DATA, 0, a6xx_get_cp_roq_size},
};
static const struct a6xx_indexed_registers a7xx_indexed_reglist[] = {
{ "CP_SQE_STAT", REG_A6XX_CP_SQE_STAT_ADDR,
- REG_A6XX_CP_SQE_STAT_DATA, 0x33, NULL },
+ REG_A6XX_CP_SQE_STAT_DATA, 0x40, NULL },
{ "CP_DRAW_STATE", REG_A6XX_CP_DRAW_STATE_ADDR,
REG_A6XX_CP_DRAW_STATE_DATA, 0x100, NULL },
- { "CP_UCODE_DBG_DATA", REG_A6XX_CP_SQE_UCODE_DBG_ADDR,
+ { "CP_SQE_UCODE_DBG", REG_A6XX_CP_SQE_UCODE_DBG_ADDR,
REG_A6XX_CP_SQE_UCODE_DBG_DATA, 0x8000, NULL },
- { "CP_BV_SQE_STAT_ADDR", REG_A7XX_CP_BV_SQE_STAT_ADDR,
- REG_A7XX_CP_BV_SQE_STAT_DATA, 0x33, NULL },
- { "CP_BV_DRAW_STATE_ADDR", REG_A7XX_CP_BV_DRAW_STATE_ADDR,
+ { "CP_BV_SQE_STAT", REG_A7XX_CP_BV_SQE_STAT_ADDR,
+ REG_A7XX_CP_BV_SQE_STAT_DATA, 0x40, NULL },
+ { "CP_BV_DRAW_STATE", REG_A7XX_CP_BV_DRAW_STATE_ADDR,
REG_A7XX_CP_BV_DRAW_STATE_DATA, 0x100, NULL },
- { "CP_BV_SQE_UCODE_DBG_ADDR", REG_A7XX_CP_BV_SQE_UCODE_DBG_ADDR,
+ { "CP_BV_SQE_UCODE_DBG", REG_A7XX_CP_BV_SQE_UCODE_DBG_ADDR,
REG_A7XX_CP_BV_SQE_UCODE_DBG_DATA, 0x8000, NULL },
- { "CP_SQE_AC_STAT_ADDR", REG_A7XX_CP_SQE_AC_STAT_ADDR,
- REG_A7XX_CP_SQE_AC_STAT_DATA, 0x33, NULL },
- { "CP_LPAC_DRAW_STATE_ADDR", REG_A7XX_CP_LPAC_DRAW_STATE_ADDR,
+ { "CP_SQE_AC_STAT", REG_A7XX_CP_SQE_AC_STAT_ADDR,
+ REG_A7XX_CP_SQE_AC_STAT_DATA, 0x40, NULL },
+ { "CP_LPAC_DRAW_STATE", REG_A7XX_CP_LPAC_DRAW_STATE_ADDR,
REG_A7XX_CP_LPAC_DRAW_STATE_DATA, 0x100, NULL },
- { "CP_SQE_AC_UCODE_DBG_ADDR", REG_A7XX_CP_SQE_AC_UCODE_DBG_ADDR,
+ { "CP_SQE_AC_UCODE_DBG", REG_A7XX_CP_SQE_AC_UCODE_DBG_ADDR,
REG_A7XX_CP_SQE_AC_UCODE_DBG_DATA, 0x8000, NULL },
- { "CP_LPAC_FIFO_DBG_ADDR", REG_A7XX_CP_LPAC_FIFO_DBG_ADDR,
+ { "CP_LPAC_FIFO_DBG", REG_A7XX_CP_LPAC_FIFO_DBG_ADDR,
REG_A7XX_CP_LPAC_FIFO_DBG_DATA, 0x40, NULL },
- { "CP_ROQ", REG_A6XX_CP_ROQ_DBG_ADDR,
+ { "CP_ROQ_DBG", REG_A6XX_CP_ROQ_DBG_ADDR,
REG_A6XX_CP_ROQ_DBG_DATA, 0, a7xx_get_cp_roq_size },
};
static const struct a6xx_indexed_registers a6xx_cp_mempool_indexed = {
- "CP_MEMPOOL", REG_A6XX_CP_MEM_POOL_DBG_ADDR,
+ "CP_MEM_POOL_DBG", REG_A6XX_CP_MEM_POOL_DBG_ADDR,
REG_A6XX_CP_MEM_POOL_DBG_DATA, 0x2060, NULL,
};
static const struct a6xx_indexed_registers a7xx_cp_bv_mempool_indexed[] = {
- { "CP_MEMPOOL", REG_A6XX_CP_MEM_POOL_DBG_ADDR,
- REG_A6XX_CP_MEM_POOL_DBG_DATA, 0x2100, NULL },
- { "CP_BV_MEMPOOL", REG_A7XX_CP_BV_MEM_POOL_DBG_ADDR,
- REG_A7XX_CP_BV_MEM_POOL_DBG_DATA, 0x2100, NULL },
+ { "CP_MEM_POOL_DBG", REG_A6XX_CP_MEM_POOL_DBG_ADDR,
+ REG_A6XX_CP_MEM_POOL_DBG_DATA, 0x2200, NULL },
+ { "CP_BV_MEM_POOL_DBG", REG_A7XX_CP_BV_MEM_POOL_DBG_ADDR,
+ REG_A7XX_CP_BV_MEM_POOL_DBG_DATA, 0x2200, NULL },
};
#define DEBUGBUS(_id, _count) { .id = _id, .name = #_id, .count = _count }
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
index 8e69b1e84657..550de6ad68ef 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
@@ -21,6 +21,7 @@ static const char * const a6xx_hfi_msg_id[] = {
HFI_MSG_ID(HFI_H2F_MSG_PERF_TABLE),
HFI_MSG_ID(HFI_H2F_MSG_TEST),
HFI_MSG_ID(HFI_H2F_MSG_START),
+ HFI_MSG_ID(HFI_H2F_FEATURE_CTRL),
HFI_MSG_ID(HFI_H2F_MSG_CORE_FW_START),
HFI_MSG_ID(HFI_H2F_MSG_GX_BW_PERF_VOTE),
HFI_MSG_ID(HFI_H2F_MSG_PREPARE_SLUMBER),
@@ -765,23 +766,40 @@ send:
NULL, 0);
}
+static int a6xx_hfi_feature_ctrl_msg(struct a6xx_gmu *gmu, u32 feature, u32 enable, u32 data)
+{
+ struct a6xx_hfi_msg_feature_ctrl msg = {
+ .feature = feature,
+ .enable = enable,
+ .data = data,
+ };
+
+ return a6xx_hfi_send_msg(gmu, HFI_H2F_FEATURE_CTRL, &msg, sizeof(msg), NULL, 0);
+}
+
+#define HFI_FEATURE_IFPC 9
+#define IFPC_LONG_HYST 0x1680
+
+static int a6xx_hfi_enable_ifpc(struct a6xx_gmu *gmu)
+{
+ if (gmu->idle_level != GMU_IDLE_STATE_IFPC)
+ return 0;
+
+ return a6xx_hfi_feature_ctrl_msg(gmu, HFI_FEATURE_IFPC, 1, IFPC_LONG_HYST);
+}
+
#define HFI_FEATURE_ACD 12
static int a6xx_hfi_enable_acd(struct a6xx_gmu *gmu)
{
struct a6xx_hfi_acd_table *acd_table = &gmu->acd_table;
- struct a6xx_hfi_msg_feature_ctrl msg = {
- .feature = HFI_FEATURE_ACD,
- .enable = 1,
- .data = 0,
- };
int ret;
if (!acd_table->enable_by_level)
return 0;
/* Enable ACD feature at GMU */
- ret = a6xx_hfi_send_msg(gmu, HFI_H2F_FEATURE_CTRL, &msg, sizeof(msg), NULL, 0);
+ ret = a6xx_hfi_feature_ctrl_msg(gmu, HFI_FEATURE_ACD, 1, 0);
if (ret) {
DRM_DEV_ERROR(gmu->dev, "Unable to enable ACD (%d)\n", ret);
return ret;
@@ -898,6 +916,10 @@ int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state)
if (ret)
return ret;
+ ret = a6xx_hfi_enable_ifpc(gmu);
+ if (ret)
+ return ret;
+
ret = a6xx_hfi_send_core_fw_start(gmu);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_preempt.c b/drivers/gpu/drm/msm/adreno/a6xx_preempt.c
index 6a12a35dabff..afc5f4aa3b17 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_preempt.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_preempt.c
@@ -41,7 +41,7 @@ static inline void set_preempt_state(struct a6xx_gpu *gpu,
}
/* Write the most recent wptr for the given ring into the hardware */
-static inline void update_wptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+static inline void update_wptr(struct a6xx_gpu *a6xx_gpu, struct msm_ringbuffer *ring)
{
unsigned long flags;
uint32_t wptr;
@@ -51,7 +51,7 @@ static inline void update_wptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
if (ring->restore_wptr) {
wptr = get_wptr(ring);
- gpu_write(gpu, REG_A6XX_CP_RB_WPTR, wptr);
+ a6xx_fenced_write(a6xx_gpu, REG_A6XX_CP_RB_WPTR, wptr, BIT(0), false);
ring->restore_wptr = false;
}
@@ -111,9 +111,9 @@ static void preempt_prepare_postamble(struct a6xx_gpu *a6xx_gpu)
postamble[count++] = PKT7(CP_WAIT_REG_MEM, 6);
postamble[count++] = CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ);
- postamble[count++] = CP_WAIT_REG_MEM_1_POLL_ADDR_LO(
+ postamble[count++] = CP_WAIT_REG_MEM_POLL_ADDR_LO(
REG_A6XX_RBBM_PERFCTR_SRAM_INIT_STATUS);
- postamble[count++] = CP_WAIT_REG_MEM_2_POLL_ADDR_HI(0);
+ postamble[count++] = CP_WAIT_REG_MEM_POLL_ADDR_HI(0);
postamble[count++] = CP_WAIT_REG_MEM_3_REF(0x1);
postamble[count++] = CP_WAIT_REG_MEM_4_MASK(0x1);
postamble[count++] = CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(0);
@@ -136,6 +136,21 @@ static void preempt_disable_postamble(struct a6xx_gpu *a6xx_gpu)
a6xx_gpu->postamble_enabled = false;
}
+/*
+ * Set preemption keepalive vote. Please note that this vote is different from the one used in
+ * a6xx_irq()
+ */
+static void a6xx_preempt_keepalive_vote(struct msm_gpu *gpu, bool on)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ if (adreno_has_gmu_wrapper(adreno_gpu))
+ return;
+
+ gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_PWR_COL_PREEMPT_KEEPALIVE, on);
+}
+
void a6xx_preempt_irq(struct msm_gpu *gpu)
{
uint32_t status;
@@ -172,10 +187,12 @@ void a6xx_preempt_irq(struct msm_gpu *gpu)
set_preempt_state(a6xx_gpu, PREEMPT_FINISH);
- update_wptr(gpu, a6xx_gpu->cur_ring);
+ update_wptr(a6xx_gpu, a6xx_gpu->cur_ring);
set_preempt_state(a6xx_gpu, PREEMPT_NONE);
+ a6xx_preempt_keepalive_vote(gpu, false);
+
trace_msm_gpu_preemption_irq(a6xx_gpu->cur_ring->id);
/*
@@ -268,7 +285,7 @@ void a6xx_preempt_trigger(struct msm_gpu *gpu)
*/
if (!ring || (a6xx_gpu->cur_ring == ring)) {
set_preempt_state(a6xx_gpu, PREEMPT_FINISH);
- update_wptr(gpu, a6xx_gpu->cur_ring);
+ update_wptr(a6xx_gpu, a6xx_gpu->cur_ring);
set_preempt_state(a6xx_gpu, PREEMPT_NONE);
spin_unlock_irqrestore(&a6xx_gpu->eval_lock, flags);
return;
@@ -302,13 +319,16 @@ void a6xx_preempt_trigger(struct msm_gpu *gpu)
spin_unlock_irqrestore(&ring->preempt_lock, flags);
- gpu_write64(gpu,
- REG_A6XX_CP_CONTEXT_SWITCH_SMMU_INFO,
- a6xx_gpu->preempt_smmu_iova[ring->id]);
+ /* Set the keepalive bit to keep the GPU ON until preemption is complete */
+ a6xx_preempt_keepalive_vote(gpu, true);
+
+ a6xx_fenced_write(a6xx_gpu,
+ REG_A6XX_CP_CONTEXT_SWITCH_SMMU_INFO, a6xx_gpu->preempt_smmu_iova[ring->id],
+ BIT(1), true);
- gpu_write64(gpu,
+ a6xx_fenced_write(a6xx_gpu,
REG_A6XX_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR,
- a6xx_gpu->preempt_iova[ring->id]);
+ a6xx_gpu->preempt_iova[ring->id], BIT(1), true);
a6xx_gpu->next_ring = ring;
@@ -328,7 +348,7 @@ void a6xx_preempt_trigger(struct msm_gpu *gpu)
set_preempt_state(a6xx_gpu, PREEMPT_TRIGGERED);
/* Trigger the preemption */
- gpu_write(gpu, REG_A6XX_CP_CONTEXT_SWITCH_CNTL, cntl);
+ a6xx_fenced_write(a6xx_gpu, REG_A6XX_CP_CONTEXT_SWITCH_CNTL, cntl, BIT(1), false);
}
static int preempt_init_ring(struct a6xx_gpu *a6xx_gpu,
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 50945bfe9b49..28f744f3caf7 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -24,6 +24,10 @@ bool disable_acd;
MODULE_PARM_DESC(disable_acd, "Forcefully disable GPU ACD");
module_param_unsafe(disable_acd, bool, 0400);
+static bool skip_gpu;
+MODULE_PARM_DESC(no_gpu, "Disable GPU driver register (0=enable GPU driver register (default), 1=skip GPU driver register");
+module_param(skip_gpu, bool, 0400);
+
extern const struct adreno_gpulist a2xx_gpulist;
extern const struct adreno_gpulist a3xx_gpulist;
extern const struct adreno_gpulist a4xx_gpulist;
@@ -184,6 +188,9 @@ bool adreno_has_gpu(struct device_node *node)
uint32_t chip_id;
int ret;
+ if (skip_gpu)
+ return false;
+
ret = find_chipid(node, &chip_id);
if (ret)
return false;
@@ -404,10 +411,16 @@ static struct platform_driver adreno_driver = {
void __init adreno_register(void)
{
+ if (skip_gpu)
+ return;
+
platform_driver_register(&adreno_driver);
}
void __exit adreno_unregister(void)
{
+ if (skip_gpu)
+ return;
+
platform_driver_unregister(&adreno_driver);
}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gen7_0_0_snapshot.h b/drivers/gpu/drm/msm/adreno/adreno_gen7_0_0_snapshot.h
index cb66ece6606b..04b49d385f9d 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gen7_0_0_snapshot.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gen7_0_0_snapshot.h
@@ -81,7 +81,7 @@ static const u32 gen7_0_0_debugbus_blocks[] = {
A7XX_DBGBUS_USPTP_7,
};
-static struct gen7_shader_block gen7_0_0_shader_blocks[] = {
+static const struct gen7_shader_block gen7_0_0_shader_blocks[] = {
{A7XX_TP0_TMO_DATA, 0x200, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
{A7XX_TP0_SMO_DATA, 0x80, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
{A7XX_TP0_MIPMAP_BASE_DATA, 0x3c0, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
@@ -668,12 +668,19 @@ static const u32 gen7_0_0_sp_noncontext_pipe_lpac_usptp_registers[] = {
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_noncontext_pipe_lpac_usptp_registers), 8));
-/* Block: TPl1 Cluster: noncontext Pipeline: A7XX_PIPE_BR */
-static const u32 gen7_0_0_tpl1_noncontext_pipe_br_registers[] = {
+/* Block: TPl1 Cluster: noncontext Pipeline: A7XX_PIPE_NONE */
+static const u32 gen7_0_0_tpl1_noncontext_pipe_none_registers[] = {
0x0b600, 0x0b600, 0x0b602, 0x0b602, 0x0b604, 0x0b604, 0x0b608, 0x0b60c,
0x0b60f, 0x0b621, 0x0b630, 0x0b633,
UINT_MAX, UINT_MAX,
};
+static_assert(IS_ALIGNED(sizeof(gen7_0_0_tpl1_noncontext_pipe_none_registers), 8));
+
+/* Block: TPl1 Cluster: noncontext Pipeline: A7XX_PIPE_BR */
+static const u32 gen7_0_0_tpl1_noncontext_pipe_br_registers[] = {
+ 0x0b600, 0x0b600,
+ UINT_MAX, UINT_MAX,
+};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_tpl1_noncontext_pipe_br_registers), 8));
/* Block: TPl1 Cluster: noncontext Pipeline: A7XX_PIPE_LPAC */
@@ -695,7 +702,7 @@ static const struct gen7_sel_reg gen7_0_0_rb_rbp_sel = {
.val = 0x9,
};
-static struct gen7_cluster_registers gen7_0_0_clusters[] = {
+static const struct gen7_cluster_registers gen7_0_0_clusters[] = {
{ A7XX_CLUSTER_NONE, A7XX_PIPE_BR, STATE_NON_CONTEXT,
gen7_0_0_noncontext_pipe_br_registers, },
{ A7XX_CLUSTER_NONE, A7XX_PIPE_BV, STATE_NON_CONTEXT,
@@ -764,7 +771,7 @@ static struct gen7_cluster_registers gen7_0_0_clusters[] = {
gen7_0_0_vpc_cluster_vpc_ps_pipe_bv_registers, },
};
-static struct gen7_sptp_cluster_registers gen7_0_0_sptp_clusters[] = {
+static const struct gen7_sptp_cluster_registers gen7_0_0_sptp_clusters[] = {
{ A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE,
gen7_0_0_sp_noncontext_pipe_br_hlsq_state_registers, 0xae00 },
{ A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_SP_TOP,
@@ -914,7 +921,7 @@ static const u32 gen7_0_0_dpm_registers[] = {
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_dpm_registers), 8));
-static struct gen7_reg_list gen7_0_0_reg_list[] = {
+static const struct gen7_reg_list gen7_0_0_reg_list[] = {
{ gen7_0_0_gpu_registers, NULL },
{ gen7_0_0_cx_misc_registers, NULL },
{ gen7_0_0_dpm_registers, NULL },
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gen7_2_0_snapshot.h b/drivers/gpu/drm/msm/adreno/adreno_gen7_2_0_snapshot.h
index 6f8ad50f32ce..772652eb61f3 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gen7_2_0_snapshot.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gen7_2_0_snapshot.h
@@ -95,7 +95,7 @@ static const u32 gen7_2_0_debugbus_blocks[] = {
A7XX_DBGBUS_CCHE_2,
};
-static struct gen7_shader_block gen7_2_0_shader_blocks[] = {
+static const struct gen7_shader_block gen7_2_0_shader_blocks[] = {
{A7XX_TP0_TMO_DATA, 0x200, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
{A7XX_TP0_SMO_DATA, 0x80, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
{A7XX_TP0_MIPMAP_BASE_DATA, 0x3c0, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
@@ -489,7 +489,7 @@ static const struct gen7_sel_reg gen7_2_0_rb_rbp_sel = {
.val = 0x9,
};
-static struct gen7_cluster_registers gen7_2_0_clusters[] = {
+static const struct gen7_cluster_registers gen7_2_0_clusters[] = {
{ A7XX_CLUSTER_NONE, A7XX_PIPE_BR, STATE_NON_CONTEXT,
gen7_2_0_noncontext_pipe_br_registers, },
{ A7XX_CLUSTER_NONE, A7XX_PIPE_BV, STATE_NON_CONTEXT,
@@ -558,7 +558,7 @@ static struct gen7_cluster_registers gen7_2_0_clusters[] = {
gen7_0_0_vpc_cluster_vpc_ps_pipe_bv_registers, },
};
-static struct gen7_sptp_cluster_registers gen7_2_0_sptp_clusters[] = {
+static const struct gen7_sptp_cluster_registers gen7_2_0_sptp_clusters[] = {
{ A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE,
gen7_0_0_sp_noncontext_pipe_br_hlsq_state_registers, 0xae00 },
{ A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_SP_TOP,
@@ -573,6 +573,8 @@ static struct gen7_sptp_cluster_registers gen7_2_0_sptp_clusters[] = {
gen7_0_0_sp_noncontext_pipe_lpac_usptp_registers, 0xaf80 },
{ A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_USPTP,
gen7_0_0_tpl1_noncontext_pipe_br_registers, 0xb600 },
+ { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, A7XX_PIPE_NONE, 0, A7XX_USPTP,
+ gen7_0_0_tpl1_noncontext_pipe_none_registers, 0xb600 },
{ A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, A7XX_PIPE_LPAC, 0, A7XX_USPTP,
gen7_0_0_tpl1_noncontext_pipe_lpac_registers, 0xb780 },
{ A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE,
@@ -737,7 +739,7 @@ static const u32 gen7_2_0_dpm_registers[] = {
};
static_assert(IS_ALIGNED(sizeof(gen7_2_0_dpm_registers), 8));
-static struct gen7_reg_list gen7_2_0_reg_list[] = {
+static const struct gen7_reg_list gen7_2_0_reg_list[] = {
{ gen7_2_0_gpu_registers, NULL },
{ gen7_2_0_cx_misc_registers, NULL },
{ gen7_2_0_dpm_registers, NULL },
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h b/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h
index e02cabb39f19..0956dfca1f05 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h
@@ -117,7 +117,7 @@ static const u32 gen7_9_0_cx_debugbus_blocks[] = {
A7XX_DBGBUS_GBIF_CX,
};
-static struct gen7_shader_block gen7_9_0_shader_blocks[] = {
+static const struct gen7_shader_block gen7_9_0_shader_blocks[] = {
{ A7XX_TP0_TMO_DATA, 0x0200, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
{ A7XX_TP0_SMO_DATA, 0x0080, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
{ A7XX_TP0_MIPMAP_BASE_DATA, 0x03C0, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
@@ -1116,7 +1116,7 @@ static const struct gen7_sel_reg gen7_9_0_rb_rbp_sel = {
.val = 0x9,
};
-static struct gen7_cluster_registers gen7_9_0_clusters[] = {
+static const struct gen7_cluster_registers gen7_9_0_clusters[] = {
{ A7XX_CLUSTER_NONE, A7XX_PIPE_BR, STATE_NON_CONTEXT,
gen7_9_0_non_context_pipe_br_registers, },
{ A7XX_CLUSTER_NONE, A7XX_PIPE_BV, STATE_NON_CONTEXT,
@@ -1185,7 +1185,7 @@ static struct gen7_cluster_registers gen7_9_0_clusters[] = {
gen7_9_0_vpc_pipe_bv_cluster_vpc_ps_registers, },
};
-static struct gen7_sptp_cluster_registers gen7_9_0_sptp_clusters[] = {
+static const struct gen7_sptp_cluster_registers gen7_9_0_sptp_clusters[] = {
{ A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE,
gen7_9_0_non_context_sp_pipe_br_hlsq_state_registers, 0xae00},
{ A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_SP_TOP,
@@ -1294,34 +1294,34 @@ static struct gen7_sptp_cluster_registers gen7_9_0_sptp_clusters[] = {
gen7_9_0_tpl1_pipe_br_cluster_sp_ps_usptp_registers, 0xb000},
};
-static struct a6xx_indexed_registers gen7_9_0_cp_indexed_reg_list[] = {
+static const struct a6xx_indexed_registers gen7_9_0_cp_indexed_reg_list[] = {
{ "CP_SQE_STAT", REG_A6XX_CP_SQE_STAT_ADDR,
REG_A6XX_CP_SQE_STAT_DATA, 0x00040},
{ "CP_DRAW_STATE", REG_A6XX_CP_DRAW_STATE_ADDR,
REG_A6XX_CP_DRAW_STATE_DATA, 0x00200},
- { "CP_ROQ", REG_A6XX_CP_ROQ_DBG_ADDR,
+ { "CP_ROQ_DBG", REG_A6XX_CP_ROQ_DBG_ADDR,
REG_A6XX_CP_ROQ_DBG_DATA, 0x00800},
- { "CP_UCODE_DBG_DATA", REG_A6XX_CP_SQE_UCODE_DBG_ADDR,
+ { "CP_SQE_UCODE_DBG", REG_A6XX_CP_SQE_UCODE_DBG_ADDR,
REG_A6XX_CP_SQE_UCODE_DBG_DATA, 0x08000},
- { "CP_BV_DRAW_STATE_ADDR", REG_A7XX_CP_BV_DRAW_STATE_ADDR,
+ { "CP_BV_DRAW_STATE", REG_A7XX_CP_BV_DRAW_STATE_ADDR,
REG_A7XX_CP_BV_DRAW_STATE_DATA, 0x00200},
- { "CP_BV_ROQ_DBG_ADDR", REG_A7XX_CP_BV_ROQ_DBG_ADDR,
+ { "CP_BV_ROQ_DBG", REG_A7XX_CP_BV_ROQ_DBG_ADDR,
REG_A7XX_CP_BV_ROQ_DBG_DATA, 0x00800},
- { "CP_BV_SQE_UCODE_DBG_ADDR", REG_A7XX_CP_BV_SQE_UCODE_DBG_ADDR,
+ { "CP_BV_SQE_UCODE_DBG", REG_A7XX_CP_BV_SQE_UCODE_DBG_ADDR,
REG_A7XX_CP_BV_SQE_UCODE_DBG_DATA, 0x08000},
- { "CP_BV_SQE_STAT_ADDR", REG_A7XX_CP_BV_SQE_STAT_ADDR,
+ { "CP_BV_SQE_STAT", REG_A7XX_CP_BV_SQE_STAT_ADDR,
REG_A7XX_CP_BV_SQE_STAT_DATA, 0x00040},
- { "CP_RESOURCE_TBL", REG_A7XX_CP_RESOURCE_TABLE_DBG_ADDR,
+ { "CP_RESOURCE_TABLE_DBG", REG_A7XX_CP_RESOURCE_TABLE_DBG_ADDR,
REG_A7XX_CP_RESOURCE_TABLE_DBG_DATA, 0x04100},
- { "CP_LPAC_DRAW_STATE_ADDR", REG_A7XX_CP_LPAC_DRAW_STATE_ADDR,
+ { "CP_LPAC_DRAW_STATE", REG_A7XX_CP_LPAC_DRAW_STATE_ADDR,
REG_A7XX_CP_LPAC_DRAW_STATE_DATA, 0x00200},
- { "CP_LPAC_ROQ", REG_A7XX_CP_LPAC_ROQ_DBG_ADDR,
+ { "CP_LPAC_ROQ_DBG", REG_A7XX_CP_LPAC_ROQ_DBG_ADDR,
REG_A7XX_CP_LPAC_ROQ_DBG_DATA, 0x00200},
- { "CP_SQE_AC_UCODE_DBG_ADDR", REG_A7XX_CP_SQE_AC_UCODE_DBG_ADDR,
+ { "CP_SQE_AC_UCODE_DBG", REG_A7XX_CP_SQE_AC_UCODE_DBG_ADDR,
REG_A7XX_CP_SQE_AC_UCODE_DBG_DATA, 0x08000},
- { "CP_SQE_AC_STAT_ADDR", REG_A7XX_CP_SQE_AC_STAT_ADDR,
+ { "CP_SQE_AC_STAT", REG_A7XX_CP_SQE_AC_STAT_ADDR,
REG_A7XX_CP_SQE_AC_STAT_DATA, 0x00040},
- { "CP_LPAC_FIFO_DBG_ADDR", REG_A7XX_CP_LPAC_FIFO_DBG_ADDR,
+ { "CP_LPAC_FIFO_DBG", REG_A7XX_CP_LPAC_FIFO_DBG_ADDR,
REG_A7XX_CP_LPAC_FIFO_DBG_DATA, 0x00040},
{ "CP_AQE_ROQ_0", REG_A7XX_CP_AQE_ROQ_DBG_ADDR_0,
REG_A7XX_CP_AQE_ROQ_DBG_DATA_0, 0x00100},
@@ -1337,7 +1337,7 @@ static struct a6xx_indexed_registers gen7_9_0_cp_indexed_reg_list[] = {
REG_A7XX_CP_AQE_STAT_DATA_1, 0x00040},
};
-static struct gen7_reg_list gen7_9_0_reg_list[] = {
+static const struct gen7_reg_list gen7_9_0_reg_list[] = {
{ gen7_9_0_gpu_registers, NULL},
{ gen7_9_0_cx_misc_registers, NULL},
{ gen7_9_0_cx_dbgc_registers, NULL},
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index f1230465bf0d..afaa3cfefd35 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -10,7 +10,7 @@
#include <linux/interconnect.h>
#include <linux/firmware/qcom/qcom_scm.h>
#include <linux/kernel.h>
-#include <linux/of_address.h>
+#include <linux/of_reserved_mem.h>
#include <linux/pm_opp.h>
#include <linux/slab.h>
#include <linux/soc/qcom/mdt_loader.h>
@@ -33,7 +33,7 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname,
struct device *dev = &gpu->pdev->dev;
const struct firmware *fw;
const char *signed_fwname = NULL;
- struct device_node *np, *mem_np;
+ struct device_node *np;
struct resource r;
phys_addr_t mem_phys;
ssize_t mem_size;
@@ -51,18 +51,11 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname,
return -ENODEV;
}
- mem_np = of_parse_phandle(np, "memory-region", 0);
- of_node_put(np);
- if (!mem_np) {
+ ret = of_reserved_mem_region_to_resource(np, 0, &r);
+ if (ret) {
zap_available = false;
- return -EINVAL;
- }
-
- ret = of_address_to_resource(mem_np, 0, &r);
- of_node_put(mem_np);
- if (ret)
return ret;
-
+ }
mem_phys = r.start;
/*
@@ -209,9 +202,7 @@ adreno_iommu_create_vm(struct msm_gpu *gpu,
u64 start, size;
mmu = msm_iommu_gpu_new(&pdev->dev, gpu, quirks);
- if (!mmu)
- return ERR_PTR(-ENODEV);
- else if (IS_ERR_OR_NULL(mmu))
+ if (IS_ERR(mmu))
return ERR_CAST(mmu);
geometry = msm_iommu_get_geometry(mmu);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index 9dc93c247196..390fa6720d9b 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -59,6 +59,7 @@ enum adreno_family {
#define ADRENO_QUIRK_HAS_CACHED_COHERENT BIT(4)
#define ADRENO_QUIRK_PREEMPTION BIT(5)
#define ADRENO_QUIRK_4GB_VA BIT(6)
+#define ADRENO_QUIRK_IFPC BIT(7)
/* Helper for formating the chip_id in the way that userspace tools like
* crashdec expect.
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
index 0fb5789c60d0..13cc658065c5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
@@ -32,6 +32,26 @@ enum dpu_perf_mode {
};
/**
+ * dpu_core_perf_adjusted_mode_clk - Adjust given mode clock rate according to
+ * the perf clock factor.
+ * @crtc_clk_rate - Unadjusted mode clock rate
+ * @perf_cfg: performance configuration
+ */
+u64 dpu_core_perf_adjusted_mode_clk(u64 mode_clk_rate,
+ const struct dpu_perf_cfg *perf_cfg)
+{
+ u32 clk_factor;
+
+ clk_factor = perf_cfg->clk_inefficiency_factor;
+ if (clk_factor) {
+ mode_clk_rate *= clk_factor;
+ do_div(mode_clk_rate, 100);
+ }
+
+ return mode_clk_rate;
+}
+
+/**
* _dpu_core_perf_calc_bw() - to calculate BW per crtc
* @perf_cfg: performance configuration
* @crtc: pointer to a crtc
@@ -75,28 +95,21 @@ static u64 _dpu_core_perf_calc_clk(const struct dpu_perf_cfg *perf_cfg,
struct drm_plane *plane;
struct dpu_plane_state *pstate;
struct drm_display_mode *mode;
- u64 crtc_clk;
- u32 clk_factor;
+ u64 mode_clk;
mode = &state->adjusted_mode;
- crtc_clk = (u64)mode->vtotal * mode->hdisplay * drm_mode_vrefresh(mode);
+ mode_clk = (u64)mode->vtotal * mode->hdisplay * drm_mode_vrefresh(mode);
drm_atomic_crtc_for_each_plane(plane, crtc) {
pstate = to_dpu_plane_state(plane->state);
if (!pstate)
continue;
- crtc_clk = max(pstate->plane_clk, crtc_clk);
- }
-
- clk_factor = perf_cfg->clk_inefficiency_factor;
- if (clk_factor) {
- crtc_clk *= clk_factor;
- do_div(crtc_clk, 100);
+ mode_clk = max(pstate->plane_clk, mode_clk);
}
- return crtc_clk;
+ return dpu_core_perf_adjusted_mode_clk(mode_clk, perf_cfg);
}
static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
index d2f21d34e501..3740bc97422c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
@@ -54,6 +54,9 @@ struct dpu_core_perf {
u32 fix_core_ab_vote;
};
+u64 dpu_core_perf_adjusted_mode_clk(u64 clk_rate,
+ const struct dpu_perf_cfg *perf_cfg);
+
int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
struct drm_crtc_state *state);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index d4b545448d74..4b970a59deaf 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -377,11 +377,10 @@ static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc)
{
struct dpu_crtc_state *crtc_state;
- int lm_idx, lm_horiz_position;
+ int lm_idx;
crtc_state = to_dpu_crtc_state(crtc->state);
- lm_horiz_position = 0;
for (lm_idx = 0; lm_idx < crtc_state->num_mixers; lm_idx++) {
const struct drm_rect *lm_roi = &crtc_state->lm_bounds[lm_idx];
struct dpu_hw_mixer *hw_lm = crtc_state->mixers[lm_idx].hw_lm;
@@ -392,7 +391,7 @@ static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc)
cfg.out_width = drm_rect_width(lm_roi);
cfg.out_height = drm_rect_height(lm_roi);
- cfg.right_mixer = lm_horiz_position++;
+ cfg.right_mixer = lm_idx & 0x1;
cfg.flags = 0;
hw_lm->ops.setup_mixer_out(hw_lm, &cfg);
}
@@ -596,7 +595,7 @@ static void _dpu_crtc_complete_flip(struct drm_crtc *crtc)
spin_lock_irqsave(&dev->event_lock, flags);
if (dpu_crtc->event) {
- DRM_DEBUG_VBL("%s: send event: %pK\n", dpu_crtc->name,
+ DRM_DEBUG_VBL("%s: send event: %p\n", dpu_crtc->name,
dpu_crtc->event);
trace_dpu_crtc_complete_flip(DRMID(crtc));
drm_crtc_send_vblank_event(crtc, dpu_crtc->event);
@@ -1534,6 +1533,7 @@ static enum drm_mode_status dpu_crtc_mode_valid(struct drm_crtc *crtc,
const struct drm_display_mode *mode)
{
struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
+ u64 adjusted_mode_clk;
/* if there is no 3d_mux block we cannot merge LMs so we cannot
* split the large layer into 2 LMs, filter out such modes
@@ -1541,6 +1541,17 @@ static enum drm_mode_status dpu_crtc_mode_valid(struct drm_crtc *crtc,
if (!dpu_kms->catalog->caps->has_3d_merge &&
mode->hdisplay > dpu_kms->catalog->caps->max_mixer_width)
return MODE_BAD_HVALUE;
+
+ adjusted_mode_clk = dpu_core_perf_adjusted_mode_clk(mode->clock,
+ dpu_kms->perf.perf_cfg);
+
+ /*
+ * The given mode, adjusted for the perf clock factor, should not exceed
+ * the max core clock rate
+ */
+ if (dpu_kms->perf.max_core_clk_rate < adjusted_mode_clk * 1000)
+ return MODE_CLOCK_HIGH;
+
/*
* max crtc width is equal to the max mixer width * 2 and max height is 4K
*/
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 05e5f3463e30..258edaa18fc0 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -730,6 +730,8 @@ bool dpu_encoder_needs_modeset(struct drm_encoder *drm_enc, struct drm_atomic_st
return false;
conn_state = drm_atomic_get_new_connector_state(state, connector);
+ if (!conn_state)
+ return false;
/**
* These checks are duplicated from dpu_encoder_update_topology() since
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
index 56a5b596554d..46f348972a97 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
@@ -446,7 +446,7 @@ static void _dpu_encoder_phys_wb_handle_wbdone_timeout(
static int dpu_encoder_phys_wb_wait_for_commit_done(
struct dpu_encoder_phys *phys_enc)
{
- unsigned long ret;
+ int ret;
struct dpu_encoder_wait_info wait_info;
struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index e824cd64fd3f..6641455c4ec6 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -338,7 +338,6 @@ static const struct dpu_sspp_sub_blks dpu_dma_sblk = _DMA_SBLK();
*************************************************************/
static const struct dpu_lm_sub_blks msm8998_lm_sblk = {
- .maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.maxblendstages = 7, /* excluding base layer */
.blendstage_base = { /* offsets relative to mixer base */
0x20, 0x50, 0x80, 0xb0, 0x230,
@@ -347,7 +346,6 @@ static const struct dpu_lm_sub_blks msm8998_lm_sblk = {
};
static const struct dpu_lm_sub_blks sdm845_lm_sblk = {
- .maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.maxblendstages = 11, /* excluding base layer */
.blendstage_base = { /* offsets relative to mixer base */
0x20, 0x38, 0x50, 0x68, 0x80, 0x98,
@@ -356,7 +354,6 @@ static const struct dpu_lm_sub_blks sdm845_lm_sblk = {
};
static const struct dpu_lm_sub_blks sc7180_lm_sblk = {
- .maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.maxblendstages = 7, /* excluding base layer */
.blendstage_base = { /* offsets relative to mixer base */
0x20, 0x38, 0x50, 0x68, 0x80, 0x98, 0xb0
@@ -364,7 +361,6 @@ static const struct dpu_lm_sub_blks sc7180_lm_sblk = {
};
static const struct dpu_lm_sub_blks sm8750_lm_sblk = {
- .maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.maxblendstages = 11, /* excluding base layer */
.blendstage_base = { /* offsets relative to mixer base */
/* 0x40 + n*0x30 */
@@ -374,7 +370,6 @@ static const struct dpu_lm_sub_blks sm8750_lm_sblk = {
};
static const struct dpu_lm_sub_blks qcm2290_lm_sblk = {
- .maxwidth = DEFAULT_DPU_LINE_WIDTH,
.maxblendstages = 4, /* excluding base layer */
.blendstage_base = { /* offsets relative to mixer base */
0x20, 0x38, 0x50, 0x68
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
index a78bb2c334e3..f0768f54e9b3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -307,7 +307,6 @@ struct dpu_sspp_sub_blks {
* @blendstage_base: Blend-stage register base offset
*/
struct dpu_lm_sub_blks {
- u32 maxwidth;
u32 maxblendstages;
u32 blendstage_base[MAX_BLOCKS];
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
index 11fb1bc54fa9..54b20faa0b69 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
@@ -31,14 +31,14 @@ static void dpu_setup_dspp_pcc(struct dpu_hw_dspp *ctx,
u32 base;
if (!ctx) {
- DRM_ERROR("invalid ctx %pK\n", ctx);
+ DRM_ERROR("invalid ctx %p\n", ctx);
return;
}
base = ctx->cap->sblk->pcc.base;
if (!base) {
- DRM_ERROR("invalid ctx %pK pcc base 0x%x\n", ctx, base);
+ DRM_ERROR("invalid ctx %p pcc base 0x%x\n", ctx, base);
return;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index 12dcb32b4724..4e5a8ecd31f7 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -1110,7 +1110,7 @@ static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
{
struct drm_gpuvm *vm;
- vm = msm_kms_init_vm(dpu_kms->dev);
+ vm = msm_kms_init_vm(dpu_kms->dev, dpu_kms->dev->dev->parent);
if (IS_ERR(vm))
return PTR_ERR(vm);
@@ -1345,7 +1345,7 @@ static int dpu_kms_mmap_mdp5(struct dpu_kms *dpu_kms)
dpu_kms->mmio = NULL;
return ret;
}
- DRM_DEBUG("mapped dpu address space @%pK\n", dpu_kms->mmio);
+ DRM_DEBUG("mapped dpu address space @%p\n", dpu_kms->mmio);
dpu_kms->vbif[VBIF_RT] = msm_ioremap_mdss(mdss_dev,
dpu_kms->pdev,
@@ -1380,7 +1380,7 @@ static int dpu_kms_mmap_dpu(struct dpu_kms *dpu_kms)
dpu_kms->mmio = NULL;
return ret;
}
- DRM_DEBUG("mapped dpu address space @%pK\n", dpu_kms->mmio);
+ DRM_DEBUG("mapped dpu address space @%p\n", dpu_kms->mmio);
dpu_kms->vbif[VBIF_RT] = msm_ioremap(pdev, "vbif");
if (IS_ERR(dpu_kms->vbif[VBIF_RT])) {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index 01171c535a27..f54cf0faa1c7 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -922,6 +922,9 @@ static int dpu_plane_is_multirect_capable(struct dpu_hw_sspp *sspp,
if (MSM_FORMAT_IS_YUV(fmt))
return false;
+ if (!sspp)
+ return true;
+
if (!test_bit(DPU_SSPP_SMART_DMA_V1, &sspp->cap->features) &&
!test_bit(DPU_SSPP_SMART_DMA_V2, &sspp->cap->features))
return false;
@@ -1028,6 +1031,7 @@ static int dpu_plane_try_multirect_shared(struct dpu_plane_state *pstate,
prev_pipe->multirect_mode != DPU_SSPP_MULTIRECT_NONE)
return false;
+ /* Do not validate SSPP of current plane when it is not ready */
if (!dpu_plane_is_multirect_capable(pipe->sspp, pipe_cfg, fmt) ||
!dpu_plane_is_multirect_capable(prev_pipe->sspp, prev_pipe_cfg, prev_fmt))
return false;
@@ -1129,7 +1133,7 @@ static int dpu_plane_virtual_atomic_check(struct drm_plane *plane,
struct drm_plane_state *old_plane_state =
drm_atomic_get_old_plane_state(state, plane);
struct dpu_plane_state *pstate = to_dpu_plane_state(plane_state);
- struct drm_crtc_state *crtc_state;
+ struct drm_crtc_state *crtc_state = NULL;
int ret;
if (IS_ERR(plane_state))
@@ -1162,7 +1166,7 @@ static int dpu_plane_virtual_atomic_check(struct drm_plane *plane,
if (!old_plane_state || !old_plane_state->fb ||
old_plane_state->src_w != plane_state->src_w ||
old_plane_state->src_h != plane_state->src_h ||
- old_plane_state->src_w != plane_state->src_w ||
+ old_plane_state->crtc_w != plane_state->crtc_w ||
old_plane_state->crtc_h != plane_state->crtc_h ||
msm_framebuffer_format(old_plane_state->fb) !=
msm_framebuffer_format(plane_state->fb))
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
index 25382120cb1a..2c77c74fac0f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -865,6 +865,21 @@ void dpu_rm_release_all_sspp(struct dpu_global_state *global_state,
ARRAY_SIZE(global_state->sspp_to_crtc_id), crtc_id);
}
+static char *dpu_hw_blk_type_name[] = {
+ [DPU_HW_BLK_TOP] = "TOP",
+ [DPU_HW_BLK_SSPP] = "SSPP",
+ [DPU_HW_BLK_LM] = "LM",
+ [DPU_HW_BLK_CTL] = "CTL",
+ [DPU_HW_BLK_PINGPONG] = "pingpong",
+ [DPU_HW_BLK_INTF] = "INTF",
+ [DPU_HW_BLK_WB] = "WB",
+ [DPU_HW_BLK_DSPP] = "DSPP",
+ [DPU_HW_BLK_MERGE_3D] = "merge_3d",
+ [DPU_HW_BLK_DSC] = "DSC",
+ [DPU_HW_BLK_CDM] = "CDM",
+ [DPU_HW_BLK_MAX] = "unknown",
+};
+
/**
* dpu_rm_get_assigned_resources - Get hw resources of the given type that are
* assigned to this encoder
@@ -946,13 +961,13 @@ int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
}
if (num_blks == blks_size) {
- DPU_ERROR("More than %d resources assigned to crtc %d\n",
- blks_size, crtc_id);
+ DPU_ERROR("More than %d %s assigned to crtc %d\n",
+ blks_size, dpu_hw_blk_type_name[type], crtc_id);
break;
}
if (!hw_blks[i]) {
- DPU_ERROR("Allocated resource %d unavailable to assign to crtc %d\n",
- type, crtc_id);
+ DPU_ERROR("%s unavailable to assign to crtc %d\n",
+ dpu_hw_blk_type_name[type], crtc_id);
break;
}
blks[num_blks++] = hw_blks[i];
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c
index 8ff496082902..cd73468e369a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c
@@ -80,7 +80,6 @@ static int dpu_wb_conn_atomic_check(struct drm_connector *connector,
static const struct drm_connector_funcs dpu_wb_conn_funcs = {
.reset = drm_atomic_helper_connector_reset,
.fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = drm_connector_cleanup,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
@@ -131,12 +130,9 @@ int dpu_writeback_init(struct drm_device *dev, struct drm_encoder *enc,
drm_connector_helper_add(&dpu_wb_conn->base.base, &dpu_wb_conn_helper_funcs);
- /* DPU initializes the encoder and sets it up completely for writeback
- * cases and hence should use the new API drm_writeback_connector_init_with_encoder
- * to initialize the writeback connector
- */
- rc = drm_writeback_connector_init_with_encoder(dev, &dpu_wb_conn->base, enc,
- &dpu_wb_conn_funcs, format_list, num_formats);
+ rc = drmm_writeback_connector_init(dev, &dpu_wb_conn->base,
+ &dpu_wb_conn_funcs, enc,
+ format_list, num_formats);
if (!rc)
dpu_wb_conn->wb_enc = enc;
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
index 0952c7f18abd..809ca191e9de 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
@@ -391,11 +391,9 @@ static void read_mdp_hw_revision(struct mdp4_kms *mdp4_kms,
static int mdp4_kms_init(struct drm_device *dev)
{
- struct platform_device *pdev = to_platform_device(dev->dev);
struct msm_drm_private *priv = dev->dev_private;
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(priv->kms));
struct msm_kms *kms = NULL;
- struct msm_mmu *mmu;
struct drm_gpuvm *vm;
int ret;
u32 major, minor;
@@ -458,29 +456,14 @@ static int mdp4_kms_init(struct drm_device *dev)
mdp4_disable(mdp4_kms);
mdelay(16);
- mmu = msm_iommu_new(&pdev->dev, 0);
- if (IS_ERR(mmu)) {
- ret = PTR_ERR(mmu);
+ vm = msm_kms_init_vm(mdp4_kms->dev, NULL);
+ if (IS_ERR(vm)) {
+ ret = PTR_ERR(vm);
goto fail;
- } else if (!mmu) {
- DRM_DEV_INFO(dev->dev, "no iommu, fallback to phys "
- "contig buffers for scanout\n");
- vm = NULL;
- } else {
- vm = msm_gem_vm_create(dev, mmu, "mdp4",
- 0x1000, 0x100000000 - 0x1000,
- true);
-
- if (IS_ERR(vm)) {
- if (!IS_ERR(mmu))
- mmu->funcs->destroy(mmu);
- ret = PTR_ERR(vm);
- goto fail;
- }
-
- kms->vm = vm;
}
+ kms->vm = vm;
+
ret = modeset_init(mdp4_kms);
if (ret) {
DRM_DEV_ERROR(dev->dev, "modeset_init failed: %d\n", ret);
@@ -529,7 +512,7 @@ static int mdp4_probe(struct platform_device *pdev)
mdp4_kms = devm_kzalloc(dev, sizeof(*mdp4_kms), GFP_KERNEL);
if (!mdp4_kms)
- return dev_err_probe(dev, -ENOMEM, "failed to allocate kms\n");
+ return -ENOMEM;
mdp4_kms->mmio = msm_ioremap(pdev, NULL);
if (IS_ERR(mdp4_kms->mmio))
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h
index fb348583dc84..06458d4ee48c 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h
@@ -202,6 +202,6 @@ static inline struct drm_encoder *mdp4_dsi_encoder_init(struct drm_device *dev)
}
#endif
-struct clk *mpd4_get_lcdc_clock(struct drm_device *dev);
+struct clk *mdp4_get_lcdc_clock(struct drm_device *dev);
#endif /* __MDP4_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
index 06a307c1272d..1051873057f6 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
@@ -375,7 +375,7 @@ struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev)
drm_encoder_helper_add(encoder, &mdp4_lcdc_encoder_helper_funcs);
- mdp4_lcdc_encoder->lcdc_clk = mpd4_get_lcdc_clock(dev);
+ mdp4_lcdc_encoder->lcdc_clk = mdp4_get_lcdc_clock(dev);
if (IS_ERR(mdp4_lcdc_encoder->lcdc_clk)) {
DRM_DEV_ERROR(dev->dev, "failed to get lvds_clk\n");
return ERR_CAST(mdp4_lcdc_encoder->lcdc_clk);
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c
index fa2c29470510..04c49bf3d854 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c
@@ -54,7 +54,7 @@ static const struct pll_rate *find_rate(unsigned long rate)
return &freqtbl[i-1];
}
-static int mpd4_lvds_pll_enable(struct clk_hw *hw)
+static int mdp4_lvds_pll_enable(struct clk_hw *hw)
{
struct mdp4_lvds_pll *lvds_pll = to_mdp4_lvds_pll(hw);
struct mdp4_kms *mdp4_kms = get_kms(lvds_pll);
@@ -80,7 +80,7 @@ static int mpd4_lvds_pll_enable(struct clk_hw *hw)
return 0;
}
-static void mpd4_lvds_pll_disable(struct clk_hw *hw)
+static void mdp4_lvds_pll_disable(struct clk_hw *hw)
{
struct mdp4_lvds_pll *lvds_pll = to_mdp4_lvds_pll(hw);
struct mdp4_kms *mdp4_kms = get_kms(lvds_pll);
@@ -91,21 +91,24 @@ static void mpd4_lvds_pll_disable(struct clk_hw *hw)
mdp4_write(mdp4_kms, REG_MDP4_LVDS_PHY_PLL_CTRL_0, 0x0);
}
-static unsigned long mpd4_lvds_pll_recalc_rate(struct clk_hw *hw,
+static unsigned long mdp4_lvds_pll_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct mdp4_lvds_pll *lvds_pll = to_mdp4_lvds_pll(hw);
return lvds_pll->pixclk;
}
-static long mpd4_lvds_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int mdp4_lvds_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- const struct pll_rate *pll_rate = find_rate(rate);
- return pll_rate->rate;
+ const struct pll_rate *pll_rate = find_rate(req->rate);
+
+ req->rate = pll_rate->rate;
+
+ return 0;
}
-static int mpd4_lvds_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+static int mdp4_lvds_pll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct mdp4_lvds_pll *lvds_pll = to_mdp4_lvds_pll(hw);
@@ -114,26 +117,26 @@ static int mpd4_lvds_pll_set_rate(struct clk_hw *hw, unsigned long rate,
}
-static const struct clk_ops mpd4_lvds_pll_ops = {
- .enable = mpd4_lvds_pll_enable,
- .disable = mpd4_lvds_pll_disable,
- .recalc_rate = mpd4_lvds_pll_recalc_rate,
- .round_rate = mpd4_lvds_pll_round_rate,
- .set_rate = mpd4_lvds_pll_set_rate,
+static const struct clk_ops mdp4_lvds_pll_ops = {
+ .enable = mdp4_lvds_pll_enable,
+ .disable = mdp4_lvds_pll_disable,
+ .recalc_rate = mdp4_lvds_pll_recalc_rate,
+ .determine_rate = mdp4_lvds_pll_determine_rate,
+ .set_rate = mdp4_lvds_pll_set_rate,
};
-static const struct clk_parent_data mpd4_lvds_pll_parents[] = {
+static const struct clk_parent_data mdp4_lvds_pll_parents[] = {
{ .fw_name = "pxo", .name = "pxo", },
};
static struct clk_init_data pll_init = {
- .name = "mpd4_lvds_pll",
- .ops = &mpd4_lvds_pll_ops,
- .parent_data = mpd4_lvds_pll_parents,
- .num_parents = ARRAY_SIZE(mpd4_lvds_pll_parents),
+ .name = "mdp4_lvds_pll",
+ .ops = &mdp4_lvds_pll_ops,
+ .parent_data = mdp4_lvds_pll_parents,
+ .num_parents = ARRAY_SIZE(mdp4_lvds_pll_parents),
};
-static struct clk_hw *mpd4_lvds_pll_init(struct drm_device *dev)
+static struct clk_hw *mdp4_lvds_pll_init(struct drm_device *dev)
{
struct mdp4_lvds_pll *lvds_pll;
int ret;
@@ -156,14 +159,14 @@ static struct clk_hw *mpd4_lvds_pll_init(struct drm_device *dev)
return &lvds_pll->pll_hw;
}
-struct clk *mpd4_get_lcdc_clock(struct drm_device *dev)
+struct clk *mdp4_get_lcdc_clock(struct drm_device *dev)
{
struct clk_hw *hw;
struct clk *clk;
/* TODO: do we need different pll in other cases? */
- hw = mpd4_lvds_pll_init(dev);
+ hw = mdp4_lvds_pll_init(dev);
if (IS_ERR(hw)) {
DRM_DEV_ERROR(dev->dev, "failed to register LVDS PLL\n");
return ERR_CAST(hw);
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index 5b6ca8dd929e..61edf6864092 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -534,7 +534,7 @@ static int mdp5_kms_init(struct drm_device *dev)
}
mdelay(16);
- vm = msm_kms_init_vm(mdp5_kms->dev);
+ vm = msm_kms_init_vm(mdp5_kms->dev, pdev->dev.parent);
if (IS_ERR(vm)) {
ret = PTR_ERR(vm);
goto fail;
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index 221f12db5f8b..4ea681130dba 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -5,6 +5,8 @@
#include <linux/clk-provider.h>
#include <linux/platform_device.h>
+#include <linux/pm_clock.h>
+#include <linux/pm_runtime.h>
#include <dt-bindings/phy/phy.h>
#include "dsi_phy.h"
@@ -511,30 +513,6 @@ int msm_dsi_cphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing,
return 0;
}
-static int dsi_phy_enable_resource(struct msm_dsi_phy *phy)
-{
- struct device *dev = &phy->pdev->dev;
- int ret;
-
- ret = pm_runtime_resume_and_get(dev);
- if (ret)
- return ret;
-
- ret = clk_prepare_enable(phy->ahb_clk);
- if (ret) {
- DRM_DEV_ERROR(dev, "%s: can't enable ahb clk, %d\n", __func__, ret);
- pm_runtime_put_sync(dev);
- }
-
- return ret;
-}
-
-static void dsi_phy_disable_resource(struct msm_dsi_phy *phy)
-{
- clk_disable_unprepare(phy->ahb_clk);
- pm_runtime_put(&phy->pdev->dev);
-}
-
static const struct of_device_id dsi_phy_dt_match[] = {
#ifdef CONFIG_DRM_MSM_DSI_28NM_PHY
{ .compatible = "qcom,dsi-phy-28nm-hpm",
@@ -698,22 +676,20 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
if (ret)
return ret;
- phy->ahb_clk = msm_clk_get(pdev, "iface");
- if (IS_ERR(phy->ahb_clk))
- return dev_err_probe(dev, PTR_ERR(phy->ahb_clk),
- "Unable to get ahb clk\n");
+ platform_set_drvdata(pdev, phy);
- ret = devm_pm_runtime_enable(&pdev->dev);
+ ret = devm_pm_runtime_enable(dev);
if (ret)
return ret;
- /* PLL init will call into clk_register which requires
- * register access, so we need to enable power and ahb clock.
- */
- ret = dsi_phy_enable_resource(phy);
+ ret = devm_pm_clk_create(dev);
if (ret)
return ret;
+ ret = pm_clk_add(dev, "iface");
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Unable to get iface clk\n");
+
if (phy->cfg->ops.pll_init) {
ret = phy->cfg->ops.pll_init(phy);
if (ret)
@@ -727,18 +703,19 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
return dev_err_probe(dev, ret,
"Failed to register clk provider\n");
- dsi_phy_disable_resource(phy);
-
- platform_set_drvdata(pdev, phy);
-
return 0;
}
+static const struct dev_pm_ops dsi_phy_pm_ops = {
+ SET_RUNTIME_PM_OPS(pm_clk_suspend, pm_clk_resume, NULL)
+};
+
static struct platform_driver dsi_phy_platform_driver = {
.probe = dsi_phy_driver_probe,
.driver = {
.name = "msm_dsi_phy",
.of_match_table = dsi_phy_dt_match,
+ .pm = &dsi_phy_pm_ops,
},
};
@@ -764,9 +741,9 @@ int msm_dsi_phy_enable(struct msm_dsi_phy *phy,
dev = &phy->pdev->dev;
- ret = dsi_phy_enable_resource(phy);
+ ret = pm_runtime_resume_and_get(dev);
if (ret) {
- DRM_DEV_ERROR(dev, "%s: resource enable failed, %d\n",
+ DRM_DEV_ERROR(dev, "%s: resume failed, %d\n",
__func__, ret);
goto res_en_fail;
}
@@ -810,7 +787,7 @@ pll_restor_fail:
phy_en_fail:
regulator_bulk_disable(phy->cfg->num_regulators, phy->supplies);
reg_en_fail:
- dsi_phy_disable_resource(phy);
+ pm_runtime_put(dev);
res_en_fail:
return ret;
}
@@ -823,7 +800,7 @@ void msm_dsi_phy_disable(struct msm_dsi_phy *phy)
phy->cfg->ops.disable(phy);
regulator_bulk_disable(phy->cfg->num_regulators, phy->supplies);
- dsi_phy_disable_resource(phy);
+ pm_runtime_put(&phy->pdev->dev);
}
void msm_dsi_phy_set_usecase(struct msm_dsi_phy *phy,
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
index c558f8df1684..e391505fdaf0 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
@@ -104,12 +104,12 @@ struct msm_dsi_phy {
phys_addr_t lane_size;
int id;
- struct clk *ahb_clk;
struct regulator_bulk_data *supplies;
struct msm_dsi_dphy_timing timing;
const struct msm_dsi_phy_cfg *cfg;
void *tuning_cfg;
+ void *pll_data;
enum msm_dsi_phy_usecase usecase;
bool regulator_ldo_mode;
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
index af2e30f3f842..ec486ff02c9b 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
@@ -444,21 +444,19 @@ static unsigned long dsi_pll_10nm_vco_recalc_rate(struct clk_hw *hw,
return (unsigned long)vco_rate;
}
-static long dsi_pll_10nm_clk_round_rate(struct clk_hw *hw,
- unsigned long rate, unsigned long *parent_rate)
+static int dsi_pll_10nm_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct dsi_pll_10nm *pll_10nm = to_pll_10nm(hw);
- if (rate < pll_10nm->phy->cfg->min_pll_rate)
- return pll_10nm->phy->cfg->min_pll_rate;
- else if (rate > pll_10nm->phy->cfg->max_pll_rate)
- return pll_10nm->phy->cfg->max_pll_rate;
- else
- return rate;
+ req->rate = clamp_t(unsigned long, req->rate,
+ pll_10nm->phy->cfg->min_pll_rate, pll_10nm->phy->cfg->max_pll_rate);
+
+ return 0;
}
static const struct clk_ops clk_ops_dsi_pll_10nm_vco = {
- .round_rate = dsi_pll_10nm_clk_round_rate,
+ .determine_rate = dsi_pll_10nm_clk_determine_rate,
.set_rate = dsi_pll_10nm_vco_set_rate,
.recalc_rate = dsi_pll_10nm_vco_recalc_rate,
.prepare = dsi_pll_10nm_vco_prepare,
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
index 3a1c8ece6657..fdefcbd9c284 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
@@ -578,21 +578,19 @@ static void dsi_pll_14nm_vco_unprepare(struct clk_hw *hw)
pll_14nm->phy->pll_on = false;
}
-static long dsi_pll_14nm_clk_round_rate(struct clk_hw *hw,
- unsigned long rate, unsigned long *parent_rate)
+static int dsi_pll_14nm_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct dsi_pll_14nm *pll_14nm = to_pll_14nm(hw);
- if (rate < pll_14nm->phy->cfg->min_pll_rate)
- return pll_14nm->phy->cfg->min_pll_rate;
- else if (rate > pll_14nm->phy->cfg->max_pll_rate)
- return pll_14nm->phy->cfg->max_pll_rate;
- else
- return rate;
+ req->rate = clamp_t(unsigned long, req->rate,
+ pll_14nm->phy->cfg->min_pll_rate, pll_14nm->phy->cfg->max_pll_rate);
+
+ return 0;
}
static const struct clk_ops clk_ops_dsi_pll_14nm_vco = {
- .round_rate = dsi_pll_14nm_clk_round_rate,
+ .determine_rate = dsi_pll_14nm_clk_determine_rate,
.set_rate = dsi_pll_14nm_vco_set_rate,
.recalc_rate = dsi_pll_14nm_vco_recalc_rate,
.prepare = dsi_pll_14nm_vco_prepare,
@@ -622,18 +620,20 @@ static unsigned long dsi_pll_14nm_postdiv_recalc_rate(struct clk_hw *hw,
postdiv->flags, width);
}
-static long dsi_pll_14nm_postdiv_round_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long *prate)
+static int dsi_pll_14nm_postdiv_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct dsi_pll_14nm_postdiv *postdiv = to_pll_14nm_postdiv(hw);
struct dsi_pll_14nm *pll_14nm = postdiv->pll;
- DBG("DSI%d PLL parent rate=%lu", pll_14nm->phy->id, rate);
+ DBG("DSI%d PLL parent rate=%lu", pll_14nm->phy->id, req->rate);
- return divider_round_rate(hw, rate, prate, NULL,
- postdiv->width,
- postdiv->flags);
+ req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate,
+ NULL,
+ postdiv->width,
+ postdiv->flags);
+
+ return 0;
}
static int dsi_pll_14nm_postdiv_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -680,7 +680,7 @@ static int dsi_pll_14nm_postdiv_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops clk_ops_dsi_pll_14nm_postdiv = {
.recalc_rate = dsi_pll_14nm_postdiv_recalc_rate,
- .round_rate = dsi_pll_14nm_postdiv_round_rate,
+ .determine_rate = dsi_pll_14nm_postdiv_determine_rate,
.set_rate = dsi_pll_14nm_postdiv_set_rate,
};
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
index 90348a2af3e9..d00e415b9a99 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
@@ -533,21 +533,20 @@ static void dsi_pll_28nm_vco_unprepare(struct clk_hw *hw)
pll_28nm->phy->pll_on = false;
}
-static long dsi_pll_28nm_clk_round_rate(struct clk_hw *hw,
- unsigned long rate, unsigned long *parent_rate)
+static int dsi_pll_28nm_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
- if (rate < pll_28nm->phy->cfg->min_pll_rate)
- return pll_28nm->phy->cfg->min_pll_rate;
- else if (rate > pll_28nm->phy->cfg->max_pll_rate)
- return pll_28nm->phy->cfg->max_pll_rate;
- else
- return rate;
+ req->rate = clamp_t(unsigned long, req->rate,
+ pll_28nm->phy->cfg->min_pll_rate,
+ pll_28nm->phy->cfg->max_pll_rate);
+
+ return 0;
}
static const struct clk_ops clk_ops_dsi_pll_28nm_vco_hpm = {
- .round_rate = dsi_pll_28nm_clk_round_rate,
+ .determine_rate = dsi_pll_28nm_clk_determine_rate,
.set_rate = dsi_pll_28nm_clk_set_rate,
.recalc_rate = dsi_pll_28nm_clk_recalc_rate,
.prepare = dsi_pll_28nm_vco_prepare_hpm,
@@ -556,7 +555,7 @@ static const struct clk_ops clk_ops_dsi_pll_28nm_vco_hpm = {
};
static const struct clk_ops clk_ops_dsi_pll_28nm_vco_lp = {
- .round_rate = dsi_pll_28nm_clk_round_rate,
+ .determine_rate = dsi_pll_28nm_clk_determine_rate,
.set_rate = dsi_pll_28nm_clk_set_rate,
.recalc_rate = dsi_pll_28nm_clk_recalc_rate,
.prepare = dsi_pll_28nm_vco_prepare_lp,
@@ -565,7 +564,7 @@ static const struct clk_ops clk_ops_dsi_pll_28nm_vco_lp = {
};
static const struct clk_ops clk_ops_dsi_pll_28nm_vco_8226 = {
- .round_rate = dsi_pll_28nm_clk_round_rate,
+ .determine_rate = dsi_pll_28nm_clk_determine_rate,
.set_rate = dsi_pll_28nm_clk_set_rate,
.recalc_rate = dsi_pll_28nm_clk_recalc_rate,
.prepare = dsi_pll_28nm_vco_prepare_8226,
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
index f3643320ff2f..8dcce9581dc3 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
@@ -231,21 +231,19 @@ static void dsi_pll_28nm_vco_unprepare(struct clk_hw *hw)
pll_28nm->phy->pll_on = false;
}
-static long dsi_pll_28nm_clk_round_rate(struct clk_hw *hw,
- unsigned long rate, unsigned long *parent_rate)
+static int dsi_pll_28nm_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
- if (rate < pll_28nm->phy->cfg->min_pll_rate)
- return pll_28nm->phy->cfg->min_pll_rate;
- else if (rate > pll_28nm->phy->cfg->max_pll_rate)
- return pll_28nm->phy->cfg->max_pll_rate;
- else
- return rate;
+ req->rate = clamp_t(unsigned long, req->rate,
+ pll_28nm->phy->cfg->min_pll_rate, pll_28nm->phy->cfg->max_pll_rate);
+
+ return 0;
}
static const struct clk_ops clk_ops_dsi_pll_28nm_vco = {
- .round_rate = dsi_pll_28nm_clk_round_rate,
+ .determine_rate = dsi_pll_28nm_clk_determine_rate,
.set_rate = dsi_pll_28nm_clk_set_rate,
.recalc_rate = dsi_pll_28nm_clk_recalc_rate,
.prepare = dsi_pll_28nm_vco_prepare,
@@ -296,18 +294,20 @@ static unsigned int get_vco_mul_factor(unsigned long byte_clk_rate)
return 8;
}
-static long clk_bytediv_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_bytediv_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
unsigned long best_parent;
unsigned int factor;
- factor = get_vco_mul_factor(rate);
+ factor = get_vco_mul_factor(req->rate);
+
+ best_parent = req->rate * factor;
+ req->best_parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
- best_parent = rate * factor;
- *prate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
+ req->rate = req->best_parent_rate / factor;
- return *prate / factor;
+ return 0;
}
static int clk_bytediv_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -328,7 +328,7 @@ static int clk_bytediv_set_rate(struct clk_hw *hw, unsigned long rate,
/* Our special byte clock divider ops */
static const struct clk_ops clk_bytediv_ops = {
- .round_rate = clk_bytediv_round_rate,
+ .determine_rate = clk_bytediv_determine_rate,
.set_rate = clk_bytediv_set_rate,
.recalc_rate = clk_bytediv_recalc_rate,
};
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
index 8c98f91a5930..32f06edd21a9 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
@@ -90,6 +90,13 @@ struct dsi_pll_7nm {
/* protects REG_DSI_7nm_PHY_CMN_CLK_CFG1 register */
spinlock_t pclk_mux_lock;
+ /*
+ * protects REG_DSI_7nm_PHY_CMN_CTRL_0 register and pll_enable_cnt
+ * member
+ */
+ spinlock_t pll_enable_lock;
+ int pll_enable_cnt;
+
struct pll_7nm_cached_state cached_state;
struct dsi_pll_7nm *slave;
@@ -103,6 +110,9 @@ struct dsi_pll_7nm {
*/
static struct dsi_pll_7nm *pll_7nm_list[DSI_MAX];
+static void dsi_pll_enable_pll_bias(struct dsi_pll_7nm *pll);
+static void dsi_pll_disable_pll_bias(struct dsi_pll_7nm *pll);
+
static void dsi_pll_setup_config(struct dsi_pll_config *config)
{
config->ssc_freq = 31500;
@@ -340,6 +350,7 @@ static int dsi_pll_7nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
struct dsi_pll_7nm *pll_7nm = to_pll_7nm(hw);
struct dsi_pll_config config;
+ dsi_pll_enable_pll_bias(pll_7nm);
DBG("DSI PLL%d rate=%lu, parent's=%lu", pll_7nm->phy->id, rate,
parent_rate);
@@ -357,6 +368,7 @@ static int dsi_pll_7nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
dsi_pll_ssc_commit(pll_7nm, &config);
+ dsi_pll_disable_pll_bias(pll_7nm);
/* flush, ensure all register writes are done*/
wmb();
@@ -385,19 +397,47 @@ static int dsi_pll_7nm_lock_status(struct dsi_pll_7nm *pll)
static void dsi_pll_disable_pll_bias(struct dsi_pll_7nm *pll)
{
- u32 data = readl(pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0);
+ unsigned long flags;
+ u32 data;
+ spin_lock_irqsave(&pll->pll_enable_lock, flags);
+ --pll->pll_enable_cnt;
+ if (pll->pll_enable_cnt < 0) {
+ spin_unlock_irqrestore(&pll->pll_enable_lock, flags);
+ DRM_DEV_ERROR_RATELIMITED(&pll->phy->pdev->dev,
+ "bug: imbalance in disabling PLL bias\n");
+ return;
+ } else if (pll->pll_enable_cnt > 0) {
+ spin_unlock_irqrestore(&pll->pll_enable_lock, flags);
+ return;
+ } /* else: == 0 */
+
+ data = readl(pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0);
+ data &= ~DSI_7nm_PHY_CMN_CTRL_0_PLL_SHUTDOWNB;
writel(0, pll->phy->pll_base + REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES);
- writel(data & ~BIT(5), pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0);
+ writel(data, pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0);
+ spin_unlock_irqrestore(&pll->pll_enable_lock, flags);
ndelay(250);
}
static void dsi_pll_enable_pll_bias(struct dsi_pll_7nm *pll)
{
- u32 data = readl(pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0);
+ unsigned long flags;
+ u32 data;
+
+ spin_lock_irqsave(&pll->pll_enable_lock, flags);
+ if (pll->pll_enable_cnt++) {
+ spin_unlock_irqrestore(&pll->pll_enable_lock, flags);
+ WARN_ON(pll->pll_enable_cnt == INT_MAX);
+ return;
+ }
+
+ data = readl(pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0);
+ data |= DSI_7nm_PHY_CMN_CTRL_0_PLL_SHUTDOWNB;
+ writel(data, pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0);
- writel(data | BIT(5), pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0);
writel(0xc0, pll->phy->pll_base + REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES);
+ spin_unlock_irqrestore(&pll->pll_enable_lock, flags);
ndelay(250);
}
@@ -491,6 +531,10 @@ static int dsi_pll_7nm_vco_prepare(struct clk_hw *hw)
if (pll_7nm->slave)
dsi_pll_enable_global_clk(pll_7nm->slave);
+ writel(0x1, pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_RBUF_CTRL);
+ if (pll_7nm->slave)
+ writel(0x1, pll_7nm->slave->phy->base + REG_DSI_7nm_PHY_CMN_RBUF_CTRL);
+
error:
return rc;
}
@@ -534,6 +578,7 @@ static unsigned long dsi_pll_7nm_vco_recalc_rate(struct clk_hw *hw,
u32 dec;
u64 pll_freq, tmp64;
+ dsi_pll_enable_pll_bias(pll_7nm);
dec = readl(base + REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_1);
dec &= 0xff;
@@ -558,24 +603,24 @@ static unsigned long dsi_pll_7nm_vco_recalc_rate(struct clk_hw *hw,
DBG("DSI PLL%d returning vco rate = %lu, dec = %x, frac = %x",
pll_7nm->phy->id, (unsigned long)vco_rate, dec, frac);
+ dsi_pll_disable_pll_bias(pll_7nm);
+
return (unsigned long)vco_rate;
}
-static long dsi_pll_7nm_clk_round_rate(struct clk_hw *hw,
- unsigned long rate, unsigned long *parent_rate)
+static int dsi_pll_7nm_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct dsi_pll_7nm *pll_7nm = to_pll_7nm(hw);
- if (rate < pll_7nm->phy->cfg->min_pll_rate)
- return pll_7nm->phy->cfg->min_pll_rate;
- else if (rate > pll_7nm->phy->cfg->max_pll_rate)
- return pll_7nm->phy->cfg->max_pll_rate;
- else
- return rate;
+ req->rate = clamp_t(unsigned long, req->rate,
+ pll_7nm->phy->cfg->min_pll_rate, pll_7nm->phy->cfg->max_pll_rate);
+
+ return 0;
}
static const struct clk_ops clk_ops_dsi_pll_7nm_vco = {
- .round_rate = dsi_pll_7nm_clk_round_rate,
+ .determine_rate = dsi_pll_7nm_clk_determine_rate,
.set_rate = dsi_pll_7nm_vco_set_rate,
.recalc_rate = dsi_pll_7nm_vco_recalc_rate,
.prepare = dsi_pll_7nm_vco_prepare,
@@ -593,6 +638,7 @@ static void dsi_7nm_pll_save_state(struct msm_dsi_phy *phy)
void __iomem *phy_base = pll_7nm->phy->base;
u32 cmn_clk_cfg0, cmn_clk_cfg1;
+ dsi_pll_enable_pll_bias(pll_7nm);
cached->pll_out_div = readl(pll_7nm->phy->pll_base +
REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE);
cached->pll_out_div &= 0x3;
@@ -604,6 +650,7 @@ static void dsi_7nm_pll_save_state(struct msm_dsi_phy *phy)
cmn_clk_cfg1 = readl(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
cached->pll_mux = FIELD_GET(DSI_7nm_PHY_CMN_CLK_CFG1_DSICLK_SEL__MASK, cmn_clk_cfg1);
+ dsi_pll_disable_pll_bias(pll_7nm);
DBG("DSI PLL%d outdiv %x bit_clk_div %x pix_clk_div %x pll_mux %x",
pll_7nm->phy->id, cached->pll_out_div, cached->bit_clk_div,
cached->pix_clk_div, cached->pll_mux);
@@ -826,8 +873,10 @@ static int dsi_pll_7nm_init(struct msm_dsi_phy *phy)
spin_lock_init(&pll_7nm->postdiv_lock);
spin_lock_init(&pll_7nm->pclk_mux_lock);
+ spin_lock_init(&pll_7nm->pll_enable_lock);
pll_7nm->phy = phy;
+ phy->pll_data = pll_7nm;
ret = pll_7nm_register(pll_7nm, phy->provided_clocks->hws);
if (ret) {
@@ -839,6 +888,12 @@ static int dsi_pll_7nm_init(struct msm_dsi_phy *phy)
/* TODO: Remove this when we have proper display handover support */
msm_dsi_phy_pll_save_state(phy);
+ /*
+ * Store also proper vco_current_rate, because its value will be used in
+ * dsi_7nm_pll_restore_state().
+ */
+ if (!dsi_pll_7nm_vco_recalc_rate(&pll_7nm->clk_hw, VCO_REF_CLK_RATE))
+ pll_7nm->vco_current_rate = pll_7nm->phy->cfg->min_pll_rate;
return 0;
}
@@ -910,8 +965,10 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
u32 const delay_us = 5;
u32 const timeout_us = 1000;
struct msm_dsi_dphy_timing *timing = &phy->timing;
+ struct dsi_pll_7nm *pll = phy->pll_data;
void __iomem *base = phy->base;
bool less_than_1500_mhz;
+ unsigned long flags;
u32 vreg_ctrl_0, vreg_ctrl_1, lane_ctrl0;
u32 glbl_pemph_ctrl_0;
u32 glbl_str_swi_cal_sel_ctrl, glbl_hstx_str_ctrl_0;
@@ -1033,9 +1090,13 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
glbl_rescode_bot_ctrl = 0x3c;
}
+ spin_lock_irqsave(&pll->pll_enable_lock, flags);
+ pll->pll_enable_cnt = 1;
/* de-assert digital and pll power down */
- data = BIT(6) | BIT(5);
+ data = DSI_7nm_PHY_CMN_CTRL_0_DIGTOP_PWRDN_B |
+ DSI_7nm_PHY_CMN_CTRL_0_PLL_SHUTDOWNB;
writel(data, base + REG_DSI_7nm_PHY_CMN_CTRL_0);
+ spin_unlock_irqrestore(&pll->pll_enable_lock, flags);
/* Assert PLL core reset */
writel(0x00, base + REG_DSI_7nm_PHY_CMN_PLL_CNTRL);
@@ -1148,7 +1209,9 @@ static bool dsi_7nm_set_continuous_clock(struct msm_dsi_phy *phy, bool enable)
static void dsi_7nm_phy_disable(struct msm_dsi_phy *phy)
{
+ struct dsi_pll_7nm *pll = phy->pll_data;
void __iomem *base = phy->base;
+ unsigned long flags;
u32 data;
DBG("");
@@ -1175,8 +1238,12 @@ static void dsi_7nm_phy_disable(struct msm_dsi_phy *phy)
writel(data, base + REG_DSI_7nm_PHY_CMN_CTRL_0);
writel(0, base + REG_DSI_7nm_PHY_CMN_LANE_CTRL0);
+ spin_lock_irqsave(&pll->pll_enable_lock, flags);
+ pll->pll_enable_cnt = 0;
/* Turn off all PHY blocks */
writel(0x00, base + REG_DSI_7nm_PHY_CMN_CTRL_0);
+ spin_unlock_irqrestore(&pll->pll_enable_lock, flags);
+
/* make sure phy is turned off */
wmb();
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
index 8c8d80b59573..36e928b0fd5a 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
@@ -629,16 +629,12 @@ static int hdmi_8996_pll_prepare(struct clk_hw *hw)
return 0;
}
-static long hdmi_8996_pll_round_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long *parent_rate)
+static int hdmi_8996_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- if (rate < HDMI_PCLK_MIN_FREQ)
- return HDMI_PCLK_MIN_FREQ;
- else if (rate > HDMI_PCLK_MAX_FREQ)
- return HDMI_PCLK_MAX_FREQ;
- else
- return rate;
+ req->rate = clamp_t(unsigned long, req->rate, HDMI_PCLK_MIN_FREQ, HDMI_PCLK_MAX_FREQ);
+
+ return 0;
}
static unsigned long hdmi_8996_pll_recalc_rate(struct clk_hw *hw,
@@ -684,7 +680,7 @@ static int hdmi_8996_pll_is_enabled(struct clk_hw *hw)
static const struct clk_ops hdmi_8996_pll_ops = {
.set_rate = hdmi_8996_pll_set_clk_rate,
- .round_rate = hdmi_8996_pll_round_rate,
+ .determine_rate = hdmi_8996_pll_determine_rate,
.recalc_rate = hdmi_8996_pll_recalc_rate,
.prepare = hdmi_8996_pll_prepare,
.unprepare = hdmi_8996_pll_unprepare,
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c
index 33bb48ae58a2..a86ff3706369 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c
@@ -646,16 +646,12 @@ static int hdmi_8998_pll_prepare(struct clk_hw *hw)
return 0;
}
-static long hdmi_8998_pll_round_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long *parent_rate)
+static int hdmi_8998_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- if (rate < HDMI_PCLK_MIN_FREQ)
- return HDMI_PCLK_MIN_FREQ;
- else if (rate > HDMI_PCLK_MAX_FREQ)
- return HDMI_PCLK_MAX_FREQ;
- else
- return rate;
+ req->rate = clamp_t(unsigned long, req->rate, HDMI_PCLK_MIN_FREQ, HDMI_PCLK_MAX_FREQ);
+
+ return 0;
}
static unsigned long hdmi_8998_pll_recalc_rate(struct clk_hw *hw,
@@ -688,7 +684,7 @@ static int hdmi_8998_pll_is_enabled(struct clk_hw *hw)
static const struct clk_ops hdmi_8998_pll_ops = {
.set_rate = hdmi_8998_pll_set_clk_rate,
- .round_rate = hdmi_8998_pll_round_rate,
+ .determine_rate = hdmi_8998_pll_determine_rate,
.recalc_rate = hdmi_8998_pll_recalc_rate,
.prepare = hdmi_8998_pll_prepare,
.unprepare = hdmi_8998_pll_unprepare,
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c
index 83c8781fcc3f..6ba6bbdb7e05 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c
@@ -373,12 +373,14 @@ static unsigned long hdmi_pll_recalc_rate(struct clk_hw *hw,
return pll->pixclk;
}
-static long hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int hdmi_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- const struct pll_rate *pll_rate = find_rate(rate);
+ const struct pll_rate *pll_rate = find_rate(req->rate);
+
+ req->rate = pll_rate->rate;
- return pll_rate->rate;
+ return 0;
}
static int hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -402,7 +404,7 @@ static const struct clk_ops hdmi_pll_ops = {
.enable = hdmi_pll_enable,
.disable = hdmi_pll_disable,
.recalc_rate = hdmi_pll_recalc_rate,
- .round_rate = hdmi_pll_round_rate,
+ .determine_rate = hdmi_pll_determine_rate,
.set_rate = hdmi_pll_set_rate,
};
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
index bbda865addae..97dc70876442 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.c
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -325,25 +325,28 @@ static struct drm_info_list msm_debugfs_list[] = {
static int late_init_minor(struct drm_minor *minor)
{
- struct drm_device *dev = minor->dev;
- struct msm_drm_private *priv = dev->dev_private;
+ struct drm_device *dev;
+ struct msm_drm_private *priv;
int ret;
if (!minor)
return 0;
+ dev = minor->dev;
+ priv = dev->dev_private;
+
if (!priv->gpu_pdev)
return 0;
ret = msm_rd_debugfs_init(minor);
if (ret) {
- DRM_DEV_ERROR(minor->dev->dev, "could not install rd debugfs\n");
+ DRM_DEV_ERROR(dev->dev, "could not install rd debugfs\n");
return ret;
}
ret = msm_perf_debugfs_init(minor);
if (ret) {
- DRM_DEV_ERROR(minor->dev->dev, "could not install perf debugfs\n");
+ DRM_DEV_ERROR(dev->dev, "could not install perf debugfs\n");
return ret;
}
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 9dcc7a596a11..7e977fec4100 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -826,6 +826,7 @@ static const struct file_operations fops = {
#define DRIVER_FEATURES_KMS ( \
DRIVER_GEM | \
+ DRIVER_GEM_GPUVA | \
DRIVER_ATOMIC | \
DRIVER_MODESET | \
0 )
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 985db9febd98..6d847d593f1a 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -229,7 +229,7 @@ void msm_crtc_disable_vblank(struct drm_crtc *crtc);
int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu);
-struct drm_gpuvm *msm_kms_init_vm(struct drm_device *dev);
+struct drm_gpuvm *msm_kms_init_vm(struct drm_device *dev, struct device *mdss_dev);
bool msm_use_mmu(struct drm_device *dev);
int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 7ff994d4f91a..07d8cdd6bb2e 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -95,7 +95,6 @@ void msm_gem_vma_get(struct drm_gem_object *obj)
void msm_gem_vma_put(struct drm_gem_object *obj)
{
struct msm_drm_private *priv = obj->dev->dev_private;
- struct drm_exec exec;
if (atomic_dec_return(&to_msm_bo(obj)->vma_ref))
return;
@@ -103,9 +102,13 @@ void msm_gem_vma_put(struct drm_gem_object *obj)
if (!priv->kms)
return;
+#ifdef CONFIG_DRM_MSM_KMS
+ struct drm_exec exec;
+
msm_gem_lock_vm_and_obj(&exec, obj, priv->kms->vm);
put_iova_spaces(obj, priv->kms->vm, true, "vma_put");
drm_exec_fini(&exec); /* drop locks */
+#endif
}
/*
@@ -188,7 +191,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
if (!msm_obj->pages) {
struct drm_device *dev = obj->dev;
struct page **p;
- int npages = obj->size >> PAGE_SHIFT;
+ size_t npages = obj->size >> PAGE_SHIFT;
p = drm_gem_get_pages(obj);
@@ -663,9 +666,13 @@ int msm_gem_set_iova(struct drm_gem_object *obj,
static bool is_kms_vm(struct drm_gpuvm *vm)
{
+#ifdef CONFIG_DRM_MSM_KMS
struct msm_drm_private *priv = vm->drm->dev_private;
return priv->kms && (priv->kms->vm == vm);
+#else
+ return false;
+#endif
}
/*
@@ -1113,10 +1120,12 @@ static void msm_gem_free_object(struct drm_gem_object *obj)
put_pages(obj);
}
- if (msm_obj->flags & MSM_BO_NO_SHARE) {
+ if (obj->resv != &obj->_resv) {
struct drm_gem_object *r_obj =
container_of(obj->resv, struct drm_gem_object, _resv);
+ WARN_ON(!(msm_obj->flags & MSM_BO_NO_SHARE));
+
/* Drop reference we hold to shared resv obj: */
drm_gem_object_put(r_obj);
}
@@ -1139,7 +1148,7 @@ static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct
/* convenience method to construct a GEM buffer object, and userspace handle */
int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
- uint32_t size, uint32_t flags, uint32_t *handle,
+ size_t size, uint32_t flags, uint32_t *handle,
char *name)
{
struct drm_gem_object *obj;
@@ -1205,9 +1214,8 @@ static const struct drm_gem_object_funcs msm_gem_object_funcs = {
.vm_ops = &vm_ops,
};
-static int msm_gem_new_impl(struct drm_device *dev,
- uint32_t size, uint32_t flags,
- struct drm_gem_object **obj)
+static int msm_gem_new_impl(struct drm_device *dev, uint32_t flags,
+ struct drm_gem_object **obj)
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_object *msm_obj;
@@ -1241,7 +1249,7 @@ static int msm_gem_new_impl(struct drm_device *dev,
return 0;
}
-struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32_t flags)
+struct drm_gem_object *msm_gem_new(struct drm_device *dev, size_t size, uint32_t flags)
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_object *msm_obj;
@@ -1256,7 +1264,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32
if (size == 0)
return ERR_PTR(-EINVAL);
- ret = msm_gem_new_impl(dev, size, flags, &obj);
+ ret = msm_gem_new_impl(dev, flags, &obj);
if (ret)
return ERR_PTR(ret);
@@ -1296,12 +1304,12 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_object *msm_obj;
struct drm_gem_object *obj;
- uint32_t size;
- int ret, npages;
+ size_t size, npages;
+ int ret;
size = PAGE_ALIGN(dmabuf->size);
- ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
+ ret = msm_gem_new_impl(dev, MSM_BO_WC, &obj);
if (ret)
return ERR_PTR(ret);
@@ -1344,7 +1352,7 @@ fail:
return ERR_PTR(ret);
}
-void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size, uint32_t flags,
+void *msm_gem_kernel_new(struct drm_device *dev, size_t size, uint32_t flags,
struct drm_gpuvm *vm, struct drm_gem_object **bo,
uint64_t *iova)
{
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 88239da1cd72..a4cf31853c50 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -100,7 +100,7 @@ struct msm_gem_vm {
*
* Only used for kernel managed VMs, unused for user managed VMs.
*
- * Protected by @mm_lock.
+ * Protected by vm lock. See msm_gem_lock_vm_and_obj(), for ex.
*/
struct drm_mm mm;
@@ -297,10 +297,10 @@ bool msm_gem_active(struct drm_gem_object *obj);
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout);
int msm_gem_cpu_fini(struct drm_gem_object *obj);
int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
- uint32_t size, uint32_t flags, uint32_t *handle, char *name);
+ size_t size, uint32_t flags, uint32_t *handle, char *name);
struct drm_gem_object *msm_gem_new(struct drm_device *dev,
- uint32_t size, uint32_t flags);
-void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size, uint32_t flags,
+ size_t size, uint32_t flags);
+void *msm_gem_kernel_new(struct drm_device *dev, size_t size, uint32_t flags,
struct drm_gpuvm *vm, struct drm_gem_object **bo,
uint64_t *iova);
void msm_gem_kernel_put(struct drm_gem_object *bo, struct drm_gpuvm *vm);
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index c0a33ac839cb..036d34c674d9 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -15,7 +15,7 @@
struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- int npages = obj->size >> PAGE_SHIFT;
+ size_t npages = obj->size >> PAGE_SHIFT;
if (msm_obj->flags & MSM_BO_NO_SHARE)
return ERR_PTR(-EINVAL);
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 5f8e939a5906..3ab3b27134f9 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -271,32 +271,37 @@ out:
return ret;
}
-/* This is where we make sure all the bo's are reserved and pin'd: */
-static int submit_lock_objects(struct msm_gem_submit *submit)
+static int submit_lock_objects_vmbind(struct msm_gem_submit *submit)
{
- unsigned flags = DRM_EXEC_INTERRUPTIBLE_WAIT;
+ unsigned flags = DRM_EXEC_INTERRUPTIBLE_WAIT | DRM_EXEC_IGNORE_DUPLICATES;
struct drm_exec *exec = &submit->exec;
- int ret;
+ int ret = 0;
- if (msm_context_is_vmbind(submit->queue->ctx)) {
- flags |= DRM_EXEC_IGNORE_DUPLICATES;
+ drm_exec_init(&submit->exec, flags, submit->nr_bos);
- drm_exec_init(&submit->exec, flags, submit->nr_bos);
+ drm_exec_until_all_locked (&submit->exec) {
+ ret = drm_gpuvm_prepare_vm(submit->vm, exec, 1);
+ drm_exec_retry_on_contention(exec);
+ if (ret)
+ break;
- drm_exec_until_all_locked (&submit->exec) {
- ret = drm_gpuvm_prepare_vm(submit->vm, exec, 1);
- drm_exec_retry_on_contention(exec);
- if (ret)
- return ret;
+ ret = drm_gpuvm_prepare_objects(submit->vm, exec, 1);
+ drm_exec_retry_on_contention(exec);
+ if (ret)
+ break;
+ }
- ret = drm_gpuvm_prepare_objects(submit->vm, exec, 1);
- drm_exec_retry_on_contention(exec);
- if (ret)
- return ret;
- }
+ return ret;
+}
- return 0;
- }
+/* This is where we make sure all the bo's are reserved and pin'd: */
+static int submit_lock_objects(struct msm_gem_submit *submit)
+{
+ unsigned flags = DRM_EXEC_INTERRUPTIBLE_WAIT;
+ int ret = 0;
+
+ if (msm_context_is_vmbind(submit->queue->ctx))
+ return submit_lock_objects_vmbind(submit);
drm_exec_init(&submit->exec, flags, submit->nr_bos);
@@ -305,17 +310,17 @@ static int submit_lock_objects(struct msm_gem_submit *submit)
drm_gpuvm_resv_obj(submit->vm));
drm_exec_retry_on_contention(&submit->exec);
if (ret)
- return ret;
+ break;
for (unsigned i = 0; i < submit->nr_bos; i++) {
struct drm_gem_object *obj = submit->bos[i].obj;
ret = drm_exec_prepare_obj(&submit->exec, obj, 1);
drm_exec_retry_on_contention(&submit->exec);
if (ret)
- return ret;
+ break;
}
}
- return 0;
+ return ret;
}
static int submit_fence_sync(struct msm_gem_submit *submit)
@@ -514,14 +519,15 @@ out:
*/
static void submit_cleanup(struct msm_gem_submit *submit, bool error)
{
+ if (error)
+ submit_unpin_objects(submit);
+
if (submit->exec.objects)
drm_exec_fini(&submit->exec);
- if (error) {
- submit_unpin_objects(submit);
- /* job wasn't enqueued to scheduler, so early retirement: */
+ /* if job wasn't enqueued to scheduler, early retirement: */
+ if (error)
msm_submit_retire(submit);
- }
}
void msm_submit_retire(struct msm_gem_submit *submit)
@@ -769,12 +775,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (ret == 0 && args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
sync_file = sync_file_create(submit->user_fence);
- if (!sync_file) {
+ if (!sync_file)
ret = -ENOMEM;
- } else {
- fd_install(out_fence_fd, sync_file->file);
- args->fence_fd = out_fence_fd;
- }
}
if (ret)
@@ -812,10 +814,14 @@ out:
out_unlock:
mutex_unlock(&queue->lock);
out_post_unlock:
- if (ret && (out_fence_fd >= 0)) {
- put_unused_fd(out_fence_fd);
+ if (ret) {
+ if (out_fence_fd >= 0)
+ put_unused_fd(out_fence_fd);
if (sync_file)
fput(sync_file->file);
+ } else if (sync_file) {
+ fd_install(out_fence_fd, sync_file->file);
+ args->fence_fd = out_fence_fd;
}
if (!IS_ERR_OR_NULL(submit)) {
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index 3cd8562a5109..8316af1723c2 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -319,13 +319,10 @@ msm_gem_vma_map(struct drm_gpuva *vma, int prot, struct sg_table *sgt)
mutex_lock(&vm->mmu_lock);
/*
- * NOTE: iommu/io-pgtable can allocate pages, so we cannot hold
+ * NOTE: if not using pgtable preallocation, we cannot hold
* a lock across map/unmap which is also used in the job_run()
* path, as this can cause deadlock in job_run() vs shrinker/
* reclaim.
- *
- * Revisit this if we can come up with a scheme to pre-alloc pages
- * for the pgtable in map/unmap ops.
*/
ret = vm_map_op(vm, &(struct msm_vm_map_op){
.iova = vma->va.addr,
@@ -399,7 +396,14 @@ msm_gem_vma_new(struct drm_gpuvm *gpuvm, struct drm_gem_object *obj,
if (obj)
GEM_WARN_ON((range_end - range_start) > obj->size);
- drm_gpuva_init(&vma->base, range_start, range_end - range_start, obj, offset);
+ struct drm_gpuva_op_map op_map = {
+ .va.addr = range_start,
+ .va.range = range_end - range_start,
+ .gem.obj = obj,
+ .gem.offset = offset,
+ };
+
+ drm_gpuva_init_from_op(&vma->base, &op_map);
vma->mapped = false;
ret = drm_gpuva_insert(&vm->base, &vma->base);
@@ -454,6 +458,8 @@ msm_gem_vm_bo_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec)
struct op_arg {
unsigned flags;
struct msm_vm_bind_job *job;
+ const struct msm_vm_bind_op *op;
+ bool kept;
};
static void
@@ -475,14 +481,18 @@ vma_from_op(struct op_arg *arg, struct drm_gpuva_op_map *op)
}
static int
-msm_gem_vm_sm_step_map(struct drm_gpuva_op *op, void *arg)
+msm_gem_vm_sm_step_map(struct drm_gpuva_op *op, void *_arg)
{
- struct msm_vm_bind_job *job = ((struct op_arg *)arg)->job;
+ struct op_arg *arg = _arg;
+ struct msm_vm_bind_job *job = arg->job;
struct drm_gem_object *obj = op->map.gem.obj;
struct drm_gpuva *vma;
struct sg_table *sgt;
unsigned prot;
+ if (arg->kept)
+ return 0;
+
vma = vma_from_op(arg, &op->map);
if (WARN_ON(IS_ERR(vma)))
return PTR_ERR(vma);
@@ -602,15 +612,41 @@ msm_gem_vm_sm_step_remap(struct drm_gpuva_op *op, void *arg)
}
static int
-msm_gem_vm_sm_step_unmap(struct drm_gpuva_op *op, void *arg)
+msm_gem_vm_sm_step_unmap(struct drm_gpuva_op *op, void *_arg)
{
- struct msm_vm_bind_job *job = ((struct op_arg *)arg)->job;
+ struct op_arg *arg = _arg;
+ struct msm_vm_bind_job *job = arg->job;
struct drm_gpuva *vma = op->unmap.va;
struct msm_gem_vma *msm_vma = to_msm_vma(vma);
vm_dbg("%p:%p:%p: %016llx %016llx", vma->vm, vma, vma->gem.obj,
vma->va.addr, vma->va.range);
+ /*
+ * Detect in-place remap. Turnip does this to change the vma flags,
+ * in particular MSM_VMA_DUMP. In this case we want to avoid actually
+ * touching the page tables, as that would require synchronization
+ * against SUBMIT jobs running on the GPU.
+ */
+ if (op->unmap.keep &&
+ (arg->op->op == MSM_VM_BIND_OP_MAP) &&
+ (vma->gem.obj == arg->op->obj) &&
+ (vma->gem.offset == arg->op->obj_offset) &&
+ (vma->va.addr == arg->op->iova) &&
+ (vma->va.range == arg->op->range)) {
+ /* We are only expecting a single in-place unmap+map cb pair: */
+ WARN_ON(arg->kept);
+
+ /* Leave the existing VMA in place, but signal that to the map cb: */
+ arg->kept = true;
+
+ /* Only flags are changing, so update that in-place: */
+ unsigned orig_flags = vma->flags & (DRM_GPUVA_USERBITS - 1);
+ vma->flags = orig_flags | arg->flags;
+
+ return 0;
+ }
+
if (!msm_vma->mapped)
goto out_close;
@@ -994,6 +1030,7 @@ vm_bind_job_lookup_ops(struct msm_vm_bind_job *job, struct drm_msm_vm_bind *args
struct drm_device *dev = job->vm->drm;
int ret = 0;
int cnt = 0;
+ int i = -1;
if (args->nr_ops == 1) {
/* Single op case, the op is inlined: */
@@ -1027,11 +1064,12 @@ vm_bind_job_lookup_ops(struct msm_vm_bind_job *job, struct drm_msm_vm_bind *args
spin_lock(&file->table_lock);
- for (unsigned i = 0; i < args->nr_ops; i++) {
+ for (i = 0; i < args->nr_ops; i++) {
+ struct msm_vm_bind_op *op = &job->ops[i];
struct drm_gem_object *obj;
- if (!job->ops[i].handle) {
- job->ops[i].obj = NULL;
+ if (!op->handle) {
+ op->obj = NULL;
continue;
}
@@ -1039,16 +1077,22 @@ vm_bind_job_lookup_ops(struct msm_vm_bind_job *job, struct drm_msm_vm_bind *args
* normally use drm_gem_object_lookup(), but for bulk lookup
* all under single table_lock just hit object_idr directly:
*/
- obj = idr_find(&file->object_idr, job->ops[i].handle);
+ obj = idr_find(&file->object_idr, op->handle);
if (!obj) {
- ret = UERR(EINVAL, dev, "invalid handle %u at index %u\n", job->ops[i].handle, i);
+ ret = UERR(EINVAL, dev, "invalid handle %u at index %u\n", op->handle, i);
goto out_unlock;
}
drm_gem_object_get(obj);
- job->ops[i].obj = obj;
+ op->obj = obj;
cnt++;
+
+ if ((op->range + op->obj_offset) > obj->size) {
+ ret = UERR(EINVAL, dev, "invalid range: %016llx + %016llx > %016zx\n",
+ op->range, op->obj_offset, obj->size);
+ goto out_unlock;
+ }
}
*nr_bos = cnt;
@@ -1056,6 +1100,17 @@ vm_bind_job_lookup_ops(struct msm_vm_bind_job *job, struct drm_msm_vm_bind *args
out_unlock:
spin_unlock(&file->table_lock);
+ if (ret) {
+ for (; i >= 0; i--) {
+ struct msm_vm_bind_op *op = &job->ops[i];
+
+ if (!op->obj)
+ continue;
+
+ drm_gem_object_put(op->obj);
+ op->obj = NULL;
+ }
+ }
out:
return ret;
}
@@ -1171,11 +1226,17 @@ vm_bind_job_lock_objects(struct msm_vm_bind_job *job, struct drm_exec *exec)
op->obj_offset);
break;
case MSM_VM_BIND_OP_MAP:
- case MSM_VM_BIND_OP_MAP_NULL:
- ret = drm_gpuvm_sm_map_exec_lock(job->vm, exec, 1,
- op->iova, op->range,
- op->obj, op->obj_offset);
+ case MSM_VM_BIND_OP_MAP_NULL: {
+ struct drm_gpuvm_map_req map_req = {
+ .map.va.addr = op->iova,
+ .map.va.range = op->range,
+ .map.gem.obj = op->obj,
+ .map.gem.offset = op->obj_offset,
+ };
+
+ ret = drm_gpuvm_sm_map_exec_lock(job->vm, exec, 1, &map_req);
break;
+ }
default:
/*
* lookup_op() should have already thrown an error for
@@ -1271,6 +1332,7 @@ vm_bind_job_prepare(struct msm_vm_bind_job *job)
const struct msm_vm_bind_op *op = &job->ops[i];
struct op_arg arg = {
.job = job,
+ .op = op,
};
switch (op->op) {
@@ -1282,10 +1344,17 @@ vm_bind_job_prepare(struct msm_vm_bind_job *job)
if (op->flags & MSM_VM_BIND_OP_DUMP)
arg.flags |= MSM_VMA_DUMP;
fallthrough;
- case MSM_VM_BIND_OP_MAP_NULL:
- ret = drm_gpuvm_sm_map(job->vm, &arg, op->iova,
- op->range, op->obj, op->obj_offset);
+ case MSM_VM_BIND_OP_MAP_NULL: {
+ struct drm_gpuvm_map_req map_req = {
+ .map.va.addr = op->iova,
+ .map.va.range = op->range,
+ .map.gem.obj = op->obj,
+ .map.gem.offset = op->obj_offset,
+ };
+
+ ret = drm_gpuvm_sm_map(job->vm, &arg, &map_req);
break;
+ }
default:
/*
* lookup_op() should have already thrown an error for
@@ -1460,12 +1529,8 @@ msm_ioctl_vm_bind(struct drm_device *dev, void *data, struct drm_file *file)
if (args->flags & MSM_VM_BIND_FENCE_FD_OUT) {
sync_file = sync_file_create(job->fence);
- if (!sync_file) {
+ if (!sync_file)
ret = -ENOMEM;
- } else {
- fd_install(out_fence_fd, sync_file->file);
- args->fence_fd = out_fence_fd;
- }
}
if (ret)
@@ -1494,10 +1559,14 @@ out:
out_unlock:
mutex_unlock(&queue->lock);
out_post_unlock:
- if (ret && (out_fence_fd >= 0)) {
- put_unused_fd(out_fence_fd);
+ if (ret) {
+ if (out_fence_fd >= 0)
+ put_unused_fd(out_fence_fd);
if (sync_file)
fput(sync_file->file);
+ } else if (sync_file) {
+ fd_install(out_fence_fd, sync_file->file);
+ args->fence_fd = out_fence_fd;
}
if (!IS_ERR_OR_NULL(job)) {
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index c317b25a8162..17759abc46d7 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -304,7 +304,7 @@ static void crashstate_get_bos(struct msm_gpu_state *state, struct msm_gem_submi
sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
for (int i = 0; state->bos && i < submit->nr_bos; i++) {
- struct drm_gem_object *obj = submit->bos[i].obj;;
+ struct drm_gem_object *obj = submit->bos[i].obj;
bool dump = rd_full || (submit->bos[i].flags & MSM_SUBMIT_BO_DUMP);
msm_gem_lock(obj);
@@ -465,6 +465,7 @@ static void recover_worker(struct kthread_work *work)
struct msm_gem_submit *submit;
struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
char *comm = NULL, *cmd = NULL;
+ struct task_struct *task;
int i;
mutex_lock(&gpu->lock);
@@ -482,16 +483,20 @@ static void recover_worker(struct kthread_work *work)
/* Increment the fault counts */
submit->queue->faults++;
- if (submit->vm) {
+
+ task = get_pid_task(submit->pid, PIDTYPE_PID);
+ if (!task)
+ gpu->global_faults++;
+ else {
struct msm_gem_vm *vm = to_msm_vm(submit->vm);
vm->faults++;
/*
* If userspace has opted-in to VM_BIND (and therefore userspace
- * management of the VM), faults mark the VM as unusuable. This
+ * management of the VM), faults mark the VM as unusable. This
* matches vulkan expectations (vulkan is the main target for
- * VM_BIND)
+ * VM_BIND).
*/
if (!vm->managed)
msm_gem_vm_unusable(submit->vm);
@@ -553,8 +558,15 @@ static void recover_worker(struct kthread_work *work)
unsigned long flags;
spin_lock_irqsave(&ring->submit_lock, flags);
- list_for_each_entry(submit, &ring->submits, node)
+ list_for_each_entry(submit, &ring->submits, node) {
+ /*
+ * If the submit uses an unusable vm make sure
+ * we don't actually run it
+ */
+ if (to_msm_vm(submit->vm)->unusable)
+ submit->nr_cmds = 0;
gpu->funcs->submit(gpu, submit);
+ }
spin_unlock_irqrestore(&ring->submit_lock, flags);
}
}
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index b2a96544f92a..a597f2bee30b 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -16,6 +16,7 @@
#include "msm_drv.h"
#include "msm_fence.h"
+#include "msm_gpu_trace.h"
#include "msm_ringbuffer.h"
#include "msm_gem.h"
@@ -91,6 +92,7 @@ struct msm_gpu_funcs {
* for cmdstream that is buffered in this FIFO upstream of the CP fw.
*/
bool (*progress)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
+ void (*sysprof_setup)(struct msm_gpu *gpu);
};
/* Additional state for iommu faults: */
@@ -613,16 +615,19 @@ struct msm_gpu_state {
static inline void gpu_write(struct msm_gpu *gpu, u32 reg, u32 data)
{
+ trace_msm_gpu_regaccess(reg);
writel(data, gpu->mmio + (reg << 2));
}
static inline u32 gpu_read(struct msm_gpu *gpu, u32 reg)
{
+ trace_msm_gpu_regaccess(reg);
return readl(gpu->mmio + (reg << 2));
}
static inline void gpu_rmw(struct msm_gpu *gpu, u32 reg, u32 mask, u32 or)
{
+ trace_msm_gpu_regaccess(reg);
msm_rmw(gpu->mmio + (reg << 2), mask, or);
}
@@ -644,7 +649,9 @@ static inline u64 gpu_read64(struct msm_gpu *gpu, u32 reg)
* when the lo is read, so make sure to read the lo first to trigger
* that
*/
+ trace_msm_gpu_regaccess(reg);
val = (u64) readl(gpu->mmio + (reg << 2));
+ trace_msm_gpu_regaccess(reg+1);
val |= ((u64) readl(gpu->mmio + ((reg + 1) << 2)) << 32);
return val;
@@ -652,8 +659,10 @@ static inline u64 gpu_read64(struct msm_gpu *gpu, u32 reg)
static inline void gpu_write64(struct msm_gpu *gpu, u32 reg, u64 val)
{
+ trace_msm_gpu_regaccess(reg);
/* Why not a writeq here? Read the screed above */
writel(lower_32_bits(val), gpu->mmio + (reg << 2));
+ trace_msm_gpu_regaccess(reg+1);
writel(upper_32_bits(val), gpu->mmio + ((reg + 1) << 2));
}
diff --git a/drivers/gpu/drm/msm/msm_gpu_trace.h b/drivers/gpu/drm/msm/msm_gpu_trace.h
index 781bbe5540bd..5417f8d389a3 100644
--- a/drivers/gpu/drm/msm/msm_gpu_trace.h
+++ b/drivers/gpu/drm/msm/msm_gpu_trace.h
@@ -219,6 +219,18 @@ TRACE_EVENT(msm_mmu_prealloc_cleanup,
TP_printk("count=%u, remaining=%u", __entry->count, __entry->remaining)
);
+TRACE_EVENT(msm_gpu_regaccess,
+ TP_PROTO(u32 offset),
+ TP_ARGS(offset),
+ TP_STRUCT__entry(
+ __field(u32, offset)
+ ),
+ TP_fast_assign(
+ __entry->offset = offset;
+ ),
+ TP_printk("offset=0x%x", __entry->offset)
+);
+
#endif
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 55c29f49b788..0e18619f96cb 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -14,7 +14,9 @@
struct msm_iommu {
struct msm_mmu base;
struct iommu_domain *domain;
- atomic_t pagetables;
+
+ struct mutex init_lock; /* protects pagetables counter and prr_page */
+ int pagetables;
struct page *prr_page;
struct kmem_cache *pt_cache;
@@ -227,7 +229,8 @@ static void msm_iommu_pagetable_destroy(struct msm_mmu *mmu)
* If this is the last attached pagetable for the parent,
* disable TTBR0 in the arm-smmu driver
*/
- if (atomic_dec_return(&iommu->pagetables) == 0) {
+ mutex_lock(&iommu->init_lock);
+ if (--iommu->pagetables == 0) {
adreno_smmu->set_ttbr0_cfg(adreno_smmu->cookie, NULL);
if (adreno_smmu->set_prr_bit) {
@@ -236,6 +239,7 @@ static void msm_iommu_pagetable_destroy(struct msm_mmu *mmu)
iommu->prr_page = NULL;
}
}
+ mutex_unlock(&iommu->init_lock);
free_io_pgtable_ops(pagetable->pgtbl_ops);
kfree(pagetable);
@@ -568,9 +572,12 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent, bool kernel_m
* If this is the first pagetable that we've allocated, send it back to
* the arm-smmu driver as a trigger to set up TTBR0
*/
- if (atomic_inc_return(&iommu->pagetables) == 1) {
+ mutex_lock(&iommu->init_lock);
+ if (iommu->pagetables++ == 0) {
ret = adreno_smmu->set_ttbr0_cfg(adreno_smmu->cookie, &ttbr0_cfg);
if (ret) {
+ iommu->pagetables--;
+ mutex_unlock(&iommu->init_lock);
free_io_pgtable_ops(pagetable->pgtbl_ops);
kfree(pagetable);
return ERR_PTR(ret);
@@ -595,6 +602,7 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent, bool kernel_m
adreno_smmu->set_prr_bit(adreno_smmu->cookie, true);
}
}
+ mutex_unlock(&iommu->init_lock);
/* Needed later for TLB flush */
pagetable->parent = parent;
@@ -713,7 +721,7 @@ struct msm_mmu *msm_iommu_new(struct device *dev, unsigned long quirks)
int ret;
if (!device_iommu_mapped(dev))
- return NULL;
+ return ERR_PTR(-ENODEV);
domain = iommu_paging_domain_alloc(dev);
if (IS_ERR(domain))
@@ -730,7 +738,7 @@ struct msm_mmu *msm_iommu_new(struct device *dev, unsigned long quirks)
iommu->domain = domain;
msm_mmu_init(&iommu->base, dev, &funcs, MSM_MMU_IOMMU);
- atomic_set(&iommu->pagetables, 0);
+ mutex_init(&iommu->init_lock);
ret = iommu_attach_device(iommu->domain, dev);
if (ret) {
@@ -748,7 +756,7 @@ struct msm_mmu *msm_iommu_disp_new(struct device *dev, unsigned long quirks)
struct msm_mmu *mmu;
mmu = msm_iommu_new(dev, quirks);
- if (IS_ERR_OR_NULL(mmu))
+ if (IS_ERR(mmu))
return mmu;
iommu = to_msm_iommu(mmu);
@@ -764,11 +772,11 @@ struct msm_mmu *msm_iommu_gpu_new(struct device *dev, struct msm_gpu *gpu, unsig
struct msm_mmu *mmu;
mmu = msm_iommu_new(dev, quirks);
- if (IS_ERR_OR_NULL(mmu))
+ if (IS_ERR(mmu))
return mmu;
iommu = to_msm_iommu(mmu);
- if (adreno_smmu && adreno_smmu->cookie) {
+ if (adreno_smmu->cookie) {
const struct io_pgtable_cfg *cfg =
adreno_smmu->get_ttbr1_cfg(adreno_smmu->cookie);
size_t tblsz = get_tblsz(cfg);
diff --git a/drivers/gpu/drm/msm/msm_kms.c b/drivers/gpu/drm/msm/msm_kms.c
index 6889f1c1e721..6e5e94f5c9a7 100644
--- a/drivers/gpu/drm/msm/msm_kms.c
+++ b/drivers/gpu/drm/msm/msm_kms.c
@@ -177,12 +177,11 @@ static int msm_kms_fault_handler(void *arg, unsigned long iova, int flags, void
return -ENOSYS;
}
-struct drm_gpuvm *msm_kms_init_vm(struct drm_device *dev)
+struct drm_gpuvm *msm_kms_init_vm(struct drm_device *dev, struct device *mdss_dev)
{
struct drm_gpuvm *vm;
struct msm_mmu *mmu;
struct device *mdp_dev = dev->dev;
- struct device *mdss_dev = mdp_dev->parent;
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
struct device *iommu_dev;
@@ -193,18 +192,17 @@ struct drm_gpuvm *msm_kms_init_vm(struct drm_device *dev)
*/
if (device_iommu_mapped(mdp_dev))
iommu_dev = mdp_dev;
- else
+ else if (mdss_dev && device_iommu_mapped(mdss_dev))
iommu_dev = mdss_dev;
+ else {
+ drm_info(dev, "no IOMMU, bailing out\n");
+ return ERR_PTR(-ENODEV);
+ }
mmu = msm_iommu_disp_new(iommu_dev, 0);
if (IS_ERR(mmu))
return ERR_CAST(mmu);
- if (!mmu) {
- drm_info(dev, "no IOMMU, fallback to phys contig buffers for scanout\n");
- return NULL;
- }
-
vm = msm_gem_vm_create(dev, mmu, "mdp_kms",
0x1000, 0x100000000 - 0x1000, true);
if (IS_ERR(vm)) {
@@ -275,6 +273,12 @@ int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv)
if (ret)
return ret;
+ ret = msm_disp_snapshot_init(ddev);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "msm_disp_snapshot_init failed ret = %d\n", ret);
+ return ret;
+ }
+
ret = priv->kms_init(ddev);
if (ret) {
DRM_DEV_ERROR(dev, "failed to load kms\n");
@@ -327,10 +331,6 @@ int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv)
goto err_msm_uninit;
}
- ret = msm_disp_snapshot_init(ddev);
- if (ret)
- DRM_DEV_ERROR(dev, "msm_disp_snapshot_init failed ret = %d\n", ret);
-
drm_mode_config_reset(ddev);
return 0;
diff --git a/drivers/gpu/drm/msm/msm_mdss.c b/drivers/gpu/drm/msm/msm_mdss.c
index 1f5fe7811e01..2d0e3e784c04 100644
--- a/drivers/gpu/drm/msm/msm_mdss.c
+++ b/drivers/gpu/drm/msm/msm_mdss.c
@@ -154,8 +154,7 @@ static int _msm_mdss_irq_domain_add(struct msm_mdss *msm_mdss)
dev = msm_mdss->dev;
- domain = irq_domain_create_linear(of_fwnode_handle(dev->of_node), 32,
- &msm_mdss_irqdomain_ops, msm_mdss);
+ domain = irq_domain_create_linear(dev_fwnode(dev), 32, &msm_mdss_irqdomain_ops, msm_mdss);
if (!domain) {
dev_err(dev, "failed to add irq_domain\n");
return -EINVAL;
@@ -423,7 +422,7 @@ static struct msm_mdss *msm_mdss_init(struct platform_device *pdev, bool is_mdp5
if (IS_ERR(msm_mdss->mmio))
return ERR_CAST(msm_mdss->mmio);
- dev_dbg(&pdev->dev, "mapped mdss address space @%pK\n", msm_mdss->mmio);
+ dev_dbg(&pdev->dev, "mapped mdss address space @%p\n", msm_mdss->mmio);
ret = msm_mdss_parse_data_bus_icc_path(&pdev->dev, msm_mdss);
if (ret)
diff --git a/drivers/gpu/drm/msm/msm_submitqueue.c b/drivers/gpu/drm/msm/msm_submitqueue.c
index 8617a82cd6b3..d53dfad16bde 100644
--- a/drivers/gpu/drm/msm/msm_submitqueue.c
+++ b/drivers/gpu/drm/msm/msm_submitqueue.c
@@ -40,6 +40,10 @@ int msm_context_set_sysprof(struct msm_context *ctx, struct msm_gpu *gpu, int sy
break;
}
+ /* Some gpu families require additional setup for sysprof */
+ if (gpu->funcs->sysprof_setup)
+ gpu->funcs->sysprof_setup(gpu);
+
ctx->sysprof = sysprof;
return 0;
diff --git a/drivers/gpu/drm/msm/registers/adreno/a6xx.xml b/drivers/gpu/drm/msm/registers/adreno/a6xx.xml
index d860fd94feae..9459b6038217 100644
--- a/drivers/gpu/drm/msm/registers/adreno/a6xx.xml
+++ b/drivers/gpu/drm/msm/registers/adreno/a6xx.xml
@@ -594,10 +594,14 @@ by a particular renderpass/blit.
<reg32 offset="0x0600" name="DBGC_CFG_DBGBUS_SEL_A"/>
<reg32 offset="0x0601" name="DBGC_CFG_DBGBUS_SEL_B"/>
<reg32 offset="0x0602" name="DBGC_CFG_DBGBUS_SEL_C"/>
- <reg32 offset="0x0603" name="DBGC_CFG_DBGBUS_SEL_D">
+ <reg32 offset="0x0603" name="DBGC_CFG_DBGBUS_SEL_D" variants="A6XX">
<bitfield high="7" low="0" name="PING_INDEX"/>
<bitfield high="15" low="8" name="PING_BLK_SEL"/>
</reg32>
+ <reg32 offset="0x0603" name="DBGC_CFG_DBGBUS_SEL_D" variants="A7XX-">
+ <bitfield high="7" low="0" name="PING_INDEX"/>
+ <bitfield high="24" low="16" name="PING_BLK_SEL"/>
+ </reg32>
<reg32 offset="0x0604" name="DBGC_CFG_DBGBUS_CNTLT">
<bitfield high="5" low="0" name="TRACEEN"/>
<bitfield high="14" low="12" name="GRANU"/>
@@ -810,7 +814,7 @@ by a particular renderpass/blit.
<bitfield name="Y" low="16" high="29" type="uint"/>
</bitset>
- <reg32 offset="0x8000" name="GRAS_CL_CNTL" usage="rp_blit">
+ <bitset name="a6xx_gras_cl_cntl" inline="yes">
<bitfield name="CLIP_DISABLE" pos="0" type="boolean"/>
<bitfield name="ZNEAR_CLIP_DISABLE" pos="1" type="boolean"/>
<bitfield name="ZFAR_CLIP_DISABLE" pos="2" type="boolean"/>
@@ -822,18 +826,20 @@ by a particular renderpass/blit.
<bitfield name="VP_CLIP_CODE_IGNORE" pos="7" type="boolean"/>
<bitfield name="VP_XFORM_DISABLE" pos="8" type="boolean"/>
<bitfield name="PERSP_DIVISION_DISABLE" pos="9" type="boolean"/>
- </reg32>
+ </bitset>
+
+ <reg32 offset="0x8000" name="GRAS_CL_CNTL" type="a6xx_gras_cl_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
<bitset name="a6xx_gras_xs_clip_cull_distance" inline="yes">
<bitfield name="CLIP_MASK" low="0" high="7"/>
<bitfield name="CULL_MASK" low="8" high="15"/>
</bitset>
- <reg32 offset="0x8001" name="GRAS_CL_VS_CLIP_CULL_DISTANCE" type="a6xx_gras_xs_clip_cull_distance" usage="rp_blit"/>
- <reg32 offset="0x8002" name="GRAS_CL_DS_CLIP_CULL_DISTANCE" type="a6xx_gras_xs_clip_cull_distance" usage="rp_blit"/>
- <reg32 offset="0x8003" name="GRAS_CL_GS_CLIP_CULL_DISTANCE" type="a6xx_gras_xs_clip_cull_distance" usage="rp_blit"/>
- <reg32 offset="0x8004" name="GRAS_CL_ARRAY_SIZE" low="0" high="10" type="uint" usage="rp_blit"/>
+ <reg32 offset="0x8001" name="GRAS_CL_VS_CLIP_CULL_DISTANCE" type="a6xx_gras_xs_clip_cull_distance" usage="rp_blit" variants="A6XX-A7XX" />
+ <reg32 offset="0x8002" name="GRAS_CL_DS_CLIP_CULL_DISTANCE" type="a6xx_gras_xs_clip_cull_distance" usage="rp_blit" variants="A6XX-A7XX" />
+ <reg32 offset="0x8003" name="GRAS_CL_GS_CLIP_CULL_DISTANCE" type="a6xx_gras_xs_clip_cull_distance" usage="rp_blit" variants="A6XX-A7XX" />
+ <reg32 offset="0x8004" name="GRAS_CL_ARRAY_SIZE" low="0" high="10" type="uint" usage="rp_blit" variants="A6XX-A7XX" />
- <reg32 offset="0x8005" name="GRAS_CL_INTERP_CNTL" usage="rp_blit">
+ <bitset name="a6xx_gras_cl_interp_cntl" inline="yes">
<!-- see also RB_INTERP_CNTL -->
<bitfield name="IJ_PERSP_PIXEL" pos="0" type="boolean"/>
<bitfield name="IJ_PERSP_CENTROID" pos="1" type="boolean"/>
@@ -844,26 +850,69 @@ by a particular renderpass/blit.
<bitfield name="COORD_MASK" low="6" high="9" type="hex"/>
<bitfield name="UNK10" pos="10" type="boolean" variants="A7XX-"/>
<bitfield name="UNK11" pos="11" type="boolean" variants="A7XX-"/>
- </reg32>
- <reg32 offset="0x8006" name="GRAS_CL_GUARDBAND_CLIP_ADJ" usage="rp_blit">
+ </bitset>
+
+ <reg32 offset="0x8005" name="GRAS_CL_INTERP_CNTL" type="a6xx_gras_cl_interp_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+
+ <bitset name="a6xx_gras_cl_guardband_clip_adj" inline="true">
<bitfield name="HORZ" low="0" high="8" type="uint"/>
<bitfield name="VERT" low="10" high="18" type="uint"/>
- </reg32>
+ </bitset>
+
+ <reg32 offset="0x8006" name="GRAS_CL_GUARDBAND_CLIP_ADJ" type="a6xx_gras_cl_guardband_clip_adj" variants="A6XX-A7XX" usage="rp_blit"/>
<!-- Something connected to depth-stencil attachment size -->
<reg32 offset="0x8007" name="GRAS_UNKNOWN_8007" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0x8008" name="GRAS_UNKNOWN_8008" variants="A7XX-" usage="cmd"/>
+ <!-- the scale/offset is per view, with up to 6 views -->
+ <bitset name="a6xx_gras_bin_foveat" inline="yes">
+ <bitfield name="BINSCALEEN" pos="6" type="boolean"/>
+ <enum name="a7xx_bin_scale">
+ <value value="0" name="NOSCALE"/>
+ <value value="1" name="SCALE2X"/>
+ <value value="2" name="SCALE4X"/>
+ </enum>
+ <bitfield name="XSCALE_0" low="8" high="9" type="a7xx_bin_scale"/>
+ <bitfield name="YSCALE_0" low="10" high="11" type="a7xx_bin_scale"/>
+ <bitfield name="XSCALE_1" low="12" high="13" type="a7xx_bin_scale"/>
+ <bitfield name="YSCALE_1" low="14" high="15" type="a7xx_bin_scale"/>
+ <bitfield name="XSCALE_2" low="16" high="17" type="a7xx_bin_scale"/>
+ <bitfield name="YSCALE_2" low="18" high="19" type="a7xx_bin_scale"/>
+ <bitfield name="XSCALE_3" low="20" high="21" type="a7xx_bin_scale"/>
+ <bitfield name="YSCALE_3" low="22" high="23" type="a7xx_bin_scale"/>
+ <bitfield name="XSCALE_4" low="24" high="25" type="a7xx_bin_scale"/>
+ <bitfield name="YSCALE_4" low="26" high="27" type="a7xx_bin_scale"/>
+ <bitfield name="XSCALE_5" low="28" high="29" type="a7xx_bin_scale"/>
+ <bitfield name="YSCALE_5" low="30" high="31" type="a7xx_bin_scale"/>
+ </bitset>
- <reg32 offset="0x8009" name="GRAS_UNKNOWN_8009" variants="A7XX-" usage="cmd"/>
- <reg32 offset="0x800a" name="GRAS_UNKNOWN_800A" variants="A7XX-" usage="cmd"/>
- <reg32 offset="0x800b" name="GRAS_UNKNOWN_800B" variants="A7XX-" usage="cmd"/>
- <reg32 offset="0x800c" name="GRAS_UNKNOWN_800C" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0x8008" name="GRAS_BIN_FOVEAT" type="a6xx_gras_bin_foveat" variants="A7XX" usage="cmd"/>
+
+ <reg32 offset="0x8009" name="GRAS_BIN_FOVEAT_OFFSET_0" variants="A7XX-" usage="cmd">
+ <bitfield name="XOFFSET_0" low="0" high="9" shr="2" type="uint"/>
+ <bitfield name="XOFFSET_1" low="10" high="19" shr="2" type="uint"/>
+ <bitfield name="XOFFSET_2" low="20" high="29" shr="2" type="uint"/>
+ </reg32>
+ <reg32 offset="0x800a" name="GRAS_BIN_FOVEAT_OFFSET_1" variants="A7XX-" usage="cmd">
+ <bitfield name="XOFFSET_3" low="0" high="9" shr="2" type="uint"/>
+ <bitfield name="XOFFSET_4" low="10" high="19" shr="2" type="uint"/>
+ <bitfield name="XOFFSET_5" low="20" high="29" shr="2" type="uint"/>
+ </reg32>
+ <reg32 offset="0x800b" name="GRAS_BIN_FOVEAT_OFFSET_2" variants="A7XX-" usage="cmd">
+ <bitfield name="YOFFSET_0" low="0" high="9" shr="2" type="uint"/>
+ <bitfield name="YOFFSET_1" low="10" high="19" shr="2" type="uint"/>
+ <bitfield name="YOFFSET_2" low="20" high="29" shr="2" type="uint"/>
+ </reg32>
+ <reg32 offset="0x800c" name="GRAS_BIN_FOVEAT_OFFSET_3" variants="A7XX-" usage="cmd">
+ <bitfield name="YOFFSET_3" low="0" high="9" shr="2" type="uint"/>
+ <bitfield name="YOFFSET_4" low="10" high="19" shr="2" type="uint"/>
+ <bitfield name="YOFFSET_5" low="20" high="29" shr="2" type="uint"/>
+ </reg32>
<!-- <reg32 offset="0x80f0" name="GRAS_UNKNOWN_80F0" type="a6xx_reg_xy"/> -->
<!-- 0x8006-0x800f invalid -->
- <array offset="0x8010" name="GRAS_CL_VIEWPORT" stride="6" length="16" usage="rp_blit">
+ <array offset="0x8010" name="GRAS_CL_VIEWPORT" stride="6" length="16" variants="A6XX-A7XX" usage="rp_blit">
<reg32 offset="0" name="XOFFSET" type="float"/>
<reg32 offset="1" name="XSCALE" type="float"/>
<reg32 offset="2" name="YOFFSET" type="float"/>
@@ -871,12 +920,13 @@ by a particular renderpass/blit.
<reg32 offset="4" name="ZOFFSET" type="float"/>
<reg32 offset="5" name="ZSCALE" type="float"/>
</array>
- <array offset="0x8070" name="GRAS_CL_VIEWPORT_ZCLAMP" stride="2" length="16" usage="rp_blit">
+
+ <array offset="0x8070" name="GRAS_CL_VIEWPORT_ZCLAMP" stride="2" length="16" variants="A6XX-A7XX" usage="rp_blit">
<reg32 offset="0" name="MIN" type="float"/>
<reg32 offset="1" name="MAX" type="float"/>
</array>
- <reg32 offset="0x8090" name="GRAS_SU_CNTL" usage="rp_blit">
+ <bitset name="a6xx_gras_su_cntl" varset="chip">
<bitfield name="CULL_FRONT" pos="0" type="boolean"/>
<bitfield name="CULL_BACK" pos="1" type="boolean"/>
<bitfield name="FRONT_CW" pos="2" type="boolean"/>
@@ -886,39 +936,66 @@ by a particular renderpass/blit.
<bitfield name="LINE_MODE" pos="13" type="a5xx_line_mode"/>
<bitfield name="UNK15" low="15" high="16"/>
<!--
- On gen1 only MULTIVIEW_ENABLE exists. On gen3 we have
- the ability to add the view index to either the RT array
- index or the viewport index, and it seems that
- MULTIVIEW_ENABLE doesn't do anything, instead we need to
- set at least one of RENDERTARGETINDEXINCR or
- VIEWPORTINDEXINCR to enable multiview. The blob still
- sets MULTIVIEW_ENABLE regardless.
- TODO: what about gen2 (a640)?
+ On gen1 only MULTIVIEW_ENABLE exists. On gen3 we have
+ the ability to add the view index to either the RT array
+ index or the viewport index, and it seems that
+ MULTIVIEW_ENABLE doesn't do anything, instead we need to
+ set at least one of RENDERTARGETINDEXINCR or
+ VIEWPORTINDEXINCR to enable multiview. The blob still
+ sets MULTIVIEW_ENABLE regardless.
+ TODO: what about gen2 (a640)?
-->
<bitfield name="MULTIVIEW_ENABLE" pos="17" type="boolean"/>
- <bitfield name="RENDERTARGETINDEXINCR" pos="18" type="boolean"/>
- <bitfield name="VIEWPORTINDEXINCR" pos="19" type="boolean"/>
- <bitfield name="UNK20" low="20" high="22"/>
- </reg32>
- <reg32 offset="0x8091" name="GRAS_SU_POINT_MINMAX" usage="rp_blit">
+ <bitfield name="RENDERTARGETINDEXINCR" pos="18" type="boolean" variants="A6XX-A7XX"/>
+ <bitfield name="VIEWPORTINDEXINCR" pos="19" type="boolean" variants="A6XX-A7XX"/>
+ <bitfield name="UNK20" low="20" high="22" variants="A6XX-A7XX"/>
+ </bitset>
+ <reg32 offset="0x8090" name="GRAS_SU_CNTL" type="a6xx_gras_su_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+
+ <bitset name="a6xx_gras_su_point_minmax" inline="yes">
<bitfield name="MIN" low="0" high="15" type="ufixed" radix="4"/>
<bitfield name="MAX" low="16" high="31" type="ufixed" radix="4"/>
- </reg32>
- <reg32 offset="0x8092" name="GRAS_SU_POINT_SIZE" low="0" high="15" type="fixed" radix="4" usage="rp_blit"/>
+ </bitset>
+
+ <reg32 offset="0x8091" name="GRAS_SU_POINT_MINMAX" type="a6xx_gras_su_point_minmax" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x8092" name="GRAS_SU_POINT_SIZE" low="0" high="15" type="fixed" radix="4" variants="A6XX-A7XX" usage="rp_blit"/>
+
+ <bitset name="a6xx_gras_su_depth_cntl" inline="yes">
+ <bitfield name="Z_TEST_ENABLE" pos="0" type="boolean"/>
+ </bitset>
+
+ <reg32 offset="0x8114" name="GRAS_SU_DEPTH_CNTL" variants="A6XX-A7XX" type="a6xx_gras_su_depth_cntl" usage="rp_blit"/>
+
+ <bitset name="a6xx_gras_su_stencil_cntl" inline="yes">
+ <bitfield name="STENCIL_ENABLE" pos="0" type="boolean"/>
+ </bitset>
+
+ <reg32 offset="0x8115" name="GRAS_SU_STENCIL_CNTL" type="a6xx_gras_su_stencil_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+
+ <bitset name="a6xx_gras_su_render_cntl" inline="yes">
+ <bitfield name="FS_DISABLE" pos="7" type="boolean"/>
+ </bitset>
+
+ <reg32 offset="0x8116" name="GRAS_SU_RENDER_CNTL" type="a6xx_gras_su_render_cntl" variants="A7XX" usage="rp_blit"/>
+
<!-- 0x8093 invalid -->
- <reg32 offset="0x8094" name="GRAS_SU_DEPTH_PLANE_CNTL" usage="rp_blit">
+ <bitset name="a6xx_depth_plane_cntl" inline="yes">
<bitfield name="Z_MODE" low="0" high="1" type="a6xx_ztest_mode"/>
- </reg32>
- <reg32 offset="0x8095" name="GRAS_SU_POLY_OFFSET_SCALE" type="float" usage="rp_blit"/>
- <reg32 offset="0x8096" name="GRAS_SU_POLY_OFFSET_OFFSET" type="float" usage="rp_blit"/>
- <reg32 offset="0x8097" name="GRAS_SU_POLY_OFFSET_OFFSET_CLAMP" type="float" usage="rp_blit"/>
- <!-- duplicates RB_DEPTH_BUFFER_INFO: -->
- <reg32 offset="0x8098" name="GRAS_SU_DEPTH_BUFFER_INFO" usage="rp_blit">
+ </bitset>
+
+ <reg32 offset="0x8094" name="GRAS_SU_DEPTH_PLANE_CNTL" type="a6xx_depth_plane_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x8095" name="GRAS_SU_POLY_OFFSET_SCALE" type="float" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x8096" name="GRAS_SU_POLY_OFFSET_OFFSET" type="float" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x8097" name="GRAS_SU_POLY_OFFSET_OFFSET_CLAMP" type="float" variants="A6XX-A7XX" usage="rp_blit"/>
+ <bitset name="a6xx_depth_buffer_info" inline="yes">
<bitfield name="DEPTH_FORMAT" low="0" high="2" type="a6xx_depth_format"/>
<bitfield name="UNK3" pos="3"/>
- </reg32>
+ </bitset>
+
+ <!-- duplicates RB_DEPTH_BUFFER_INFO: -->
+ <reg32 offset="0x8098" name="GRAS_SU_DEPTH_BUFFER_INFO" type="a6xx_depth_buffer_info" variants="A6XX-A7XX" usage="rp_blit"/>
- <reg32 offset="0x8099" name="GRAS_SU_CONSERVATIVE_RAS_CNTL" usage="cmd">
+ <bitset name="a6xx_gras_su_conservative_ras_cntl" inline="yes">
<bitfield name="CONSERVATIVERASEN" pos="0" type="boolean"/>
<enum name="a6xx_shift_amount">
<value value="0" name="NO_SHIFT"/>
@@ -928,7 +1005,10 @@ by a particular renderpass/blit.
<bitfield name="SHIFTAMOUNT" low="1" high="2" type="a6xx_shift_amount"/>
<bitfield name="INNERCONSERVATIVERASEN" pos="3" type="boolean"/>
<bitfield name="UNK4" low="4" high="5"/>
- </reg32>
+ </bitset>
+
+ <reg32 offset="0x8099" name="GRAS_SU_CONSERVATIVE_RAS_CNTL" type="a6xx_gras_su_conservative_ras_cntl" variants="A6XX-A7XX" usage="cmd"/>
+
<reg32 offset="0x809a" name="GRAS_SU_PATH_RENDERING_CNTL">
<bitfield name="UNK0" pos="0" type="boolean"/>
<bitfield name="LINELENGTHEN" pos="1" type="boolean"/>
@@ -938,10 +1018,13 @@ by a particular renderpass/blit.
<bitfield name="WRITES_LAYER" pos="0" type="boolean"/>
<bitfield name="WRITES_VIEW" pos="1" type="boolean"/>
</bitset>
- <reg32 offset="0x809b" name="GRAS_SU_VS_SIV_CNTL" type="a6xx_gras_us_xs_siv_cntl" usage="rp_blit"/>
- <reg32 offset="0x809c" name="GRAS_SU_GS_SIV_CNTL" type="a6xx_gras_us_xs_siv_cntl" usage="rp_blit"/>
- <reg32 offset="0x809d" name="GRAS_SU_DS_SIV_CNTL" type="a6xx_gras_us_xs_siv_cntl" usage="rp_blit"/>
- <!-- 0x809e/0x809f invalid -->
+ <reg32 offset="0x809b" name="GRAS_SU_VS_SIV_CNTL" type="a6xx_gras_us_xs_siv_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x809c" name="GRAS_SU_GS_SIV_CNTL" type="a6xx_gras_us_xs_siv_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x809d" name="GRAS_SU_DS_SIV_CNTL" type="a6xx_gras_us_xs_siv_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+
+ <bitset name="a6xx_rast_cntl" inline="yes">
+ <bitfield name="MODE" low="0" high="1" type="a6xx_polygon_mode"/>
+ </bitset>
<enum name="a6xx_sequenced_thread_dist">
<value value="0x0" name="DIST_SCREEN_COORD"/>
@@ -989,7 +1072,7 @@ by a particular renderpass/blit.
<value value="0x3" name="RB_BT"/>
</enum>
- <reg32 offset="0x80a0" name="GRAS_SC_CNTL" usage="rp_blit">
+ <bitset name="a6xx_gras_sc_cntl" inline="yes">
<bitfield name="CCUSINGLECACHELINESIZE" low="0" high="2"/>
<bitfield name="SINGLE_PRIM_MODE" low="3" high="4" type="a6xx_single_prim_mode"/>
<bitfield name="RASTER_MODE" pos="5" type="a6xx_raster_mode"/>
@@ -999,7 +1082,9 @@ by a particular renderpass/blit.
<bitfield name="UNK9" pos="9" type="boolean"/>
<bitfield name="ROTATION" low="10" high="11" type="uint"/>
<bitfield name="EARLYVIZOUTEN" pos="12" type="boolean"/>
- </reg32>
+ </bitset>
+
+ <reg32 offset="0x80a0" name="GRAS_SC_CNTL" type="a6xx_gras_sc_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
<enum name="a6xx_render_mode">
<value value="0x0" name="RENDERING_PASS"/>
@@ -1020,7 +1105,7 @@ by a particular renderpass/blit.
<value value="0x4" name="LRZ_FEEDBACK_LATE_Z"/>
</enum>
- <reg32 offset="0x80a1" name="GRAS_SC_BIN_CNTL" usage="rp_blit">
+ <bitset name="a6xx_bin_cntl" inline="yes">
<bitfield name="BINW" low="0" high="5" shr="5" type="uint"/>
<bitfield name="BINH" low="8" high="14" shr="4" type="uint"/>
<bitfield name="RENDER_MODE" low="18" high="20" type="a6xx_render_mode"/>
@@ -1033,18 +1118,25 @@ by a particular renderpass/blit.
In sysmem mode GRAS_LRZ_CNTL.LRZ_WRITE is not considered.
</doc>
<bitfield name="LRZ_FEEDBACK_ZMODE_MASK" low="24" high="26" type="a6xx_lrz_feedback_mask"/>
- <bitfield name="UNK27" pos="27"/>
- </reg32>
+ <bitfield name="FORCE_LRZ_DIS" pos="27" type="boolean"/>
+ </bitset>
+
+ <reg32 offset="0x80a1" name="GRAS_SC_BIN_CNTL" type="a6xx_bin_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
- <reg32 offset="0x80a2" name="GRAS_SC_RAS_MSAA_CNTL" usage="rp_blit">
+ <bitset name="a6xx_gras_sc_ras_msaa_cntl" inline="yes">
<bitfield name="SAMPLES" low="0" high="1" type="a3xx_msaa_samples"/>
<bitfield name="UNK2" pos="2"/>
<bitfield name="UNK3" pos="3"/>
- </reg32>
- <reg32 offset="0x80a3" name="GRAS_SC_DEST_MSAA_CNTL" usage="rp_blit">
+ </bitset>
+
+ <reg32 offset="0x80a2" name="GRAS_SC_RAS_MSAA_CNTL" type="a6xx_gras_sc_ras_msaa_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+
+ <bitset name="a6xx_gras_sc_dest_msaa_cntl" inline="yes">
<bitfield name="SAMPLES" low="0" high="1" type="a3xx_msaa_samples"/>
<bitfield name="MSAA_DISABLE" pos="2" type="boolean"/>
- </reg32>
+ </bitset>
+
+ <reg32 offset="0x80a3" name="GRAS_SC_DEST_MSAA_CNTL" type="a6xx_gras_sc_dest_msaa_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
<bitset name="a6xx_msaa_sample_pos_cntl" inline="yes">
<bitfield name="UNK0" pos="0"/>
@@ -1062,30 +1154,35 @@ by a particular renderpass/blit.
<bitfield name="SAMPLE_3_Y" low="28" high="31" radix="4" type="fixed"/>
</bitset>
- <reg32 offset="0x80a4" name="GRAS_SC_MSAA_SAMPLE_POS_CNTL" type="a6xx_msaa_sample_pos_cntl" usage="rp_blit"/>
- <reg32 offset="0x80a5" name="GRAS_SC_PROGRAMMABLE_MSAA_POS_0" type="a6xx_programmable_msaa_pos" usage="rp_blit"/>
- <reg32 offset="0x80a6" name="GRAS_SC_PROGRAMMABLE_MSAA_POS_1" type="a6xx_programmable_msaa_pos" usage="rp_blit"/>
+ <reg32 offset="0x80a4" name="GRAS_SC_MSAA_SAMPLE_POS_CNTL" type="a6xx_msaa_sample_pos_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x80a5" name="GRAS_SC_PROGRAMMABLE_MSAA_POS_0" type="a6xx_programmable_msaa_pos" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x80a6" name="GRAS_SC_PROGRAMMABLE_MSAA_POS_1" type="a6xx_programmable_msaa_pos" variants="A6XX-A7XX" usage="rp_blit"/>
- <reg32 offset="0x80a7" name="GRAS_UNKNOWN_80A7" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0x80a7" name="GRAS_ROTATION_CNTL" variants="A7XX" usage="cmd"/>
- <!-- 0x80a7-0x80ae invalid -->
- <reg32 offset="0x80af" name="GRAS_UNKNOWN_80AF" pos="0" usage="cmd"/>
+ <bitset name="a6xx_screen_scissor_cntl" inline="yes">
+ <bitfield name="SCISSOR_DISABLE" pos="0" type="boolean"/>
+ </bitset>
+
+ <reg32 offset="0x80af" name="GRAS_SC_SCREEN_SCISSOR_CNTL" type="a6xx_screen_scissor_cntl" variants="A6XX-A7XX" pos="0" usage="cmd"/>
<bitset name="a6xx_scissor_xy" inline="yes">
<bitfield name="X" low="0" high="15" type="uint"/>
<bitfield name="Y" low="16" high="31" type="uint"/>
</bitset>
- <array offset="0x80b0" name="GRAS_SC_SCREEN_SCISSOR" stride="2" length="16" usage="rp_blit">
+
+ <array offset="0x80b0" name="GRAS_SC_SCREEN_SCISSOR" stride="2" length="16" variants="A6XX-A7XX" usage="rp_blit">
<reg32 offset="0" name="TL" type="a6xx_scissor_xy"/>
<reg32 offset="1" name="BR" type="a6xx_scissor_xy"/>
</array>
- <array offset="0x80d0" name="GRAS_SC_VIEWPORT_SCISSOR" stride="2" length="16" usage="rp_blit">
+
+ <array offset="0x80d0" name="GRAS_SC_VIEWPORT_SCISSOR" stride="2" length="16" variants="A6XX-A7XX" usage="rp_blit">
<reg32 offset="0" name="TL" type="a6xx_scissor_xy"/>
<reg32 offset="1" name="BR" type="a6xx_scissor_xy"/>
</array>
- <reg32 offset="0x80f0" name="GRAS_SC_WINDOW_SCISSOR_TL" type="a6xx_reg_xy" usage="rp_blit"/>
- <reg32 offset="0x80f1" name="GRAS_SC_WINDOW_SCISSOR_BR" type="a6xx_reg_xy" usage="rp_blit"/>
+ <reg32 offset="0x80f0" name="GRAS_SC_WINDOW_SCISSOR_TL" type="a6xx_reg_xy" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x80f1" name="GRAS_SC_WINDOW_SCISSOR_BR" type="a6xx_reg_xy" variants="A6XX-A7XX" usage="rp_blit"/>
<enum name="a6xx_fsr_combiner">
<value value="0" name="FSR_COMBINER_OP_KEEP"/>
@@ -1095,7 +1192,7 @@ by a particular renderpass/blit.
<value value="4" name="FSR_COMBINER_OP_MUL"/>
</enum>
- <reg32 offset="0x80f4" name="GRAS_VRS_CONFIG" variants="A7XX-" usage="rp_blit">
+ <bitset name="a6xx_gras_vrs_config">
<bitfield name="PIPELINE_FSR_ENABLE" pos="0" type="boolean"/>
<bitfield name="FRAG_SIZE_X" low="1" high="2" type="uint"/>
<bitfield name="FRAG_SIZE_Y" low="3" high="4" type="uint"/>
@@ -1103,20 +1200,32 @@ by a particular renderpass/blit.
<bitfield name="COMBINER_OP_2" low="8" high="10" type="a6xx_fsr_combiner"/>
<bitfield name="ATTACHMENT_FSR_ENABLE" pos="13" type="boolean"/>
<bitfield name="PRIMITIVE_FSR_ENABLE" pos="20" type="boolean"/>
- </reg32>
- <reg32 offset="0x80f5" name="GRAS_QUALITY_BUFFER_INFO" variants="A7XX-" usage="rp_blit">
+ </bitset>
+
+ <reg32 offset="0x80f4" name="GRAS_VRS_CONFIG" type="a6xx_gras_vrs_config" variants="A7XX" usage="rp_blit"/>
+
+ <bitset name="a6xx_gras_quality_buffer_info" inline="yes">
<bitfield name="LAYERED" pos="0" type="boolean"/>
<bitfield name="TILE_MODE" low="1" high="2" type="a6xx_tile_mode"/>
- </reg32>
- <reg32 offset="0x80f6" name="GRAS_QUALITY_BUFFER_DIMENSION" variants="A7XX-" usage="rp_blit">
+ </bitset>
+
+ <reg32 offset="0x80f5" name="GRAS_QUALITY_BUFFER_INFO" type="a6xx_gras_quality_buffer_info" variants="A7XX" usage="rp_blit"/>
+
+ <bitset name="a6xx_gras_quality_buffer_dimension" inline="yes">
<bitfield name="WIDTH" low="0" high="15" type="uint"/>
<bitfield name="HEIGHT" low="16" high="31" type="uint"/>
- </reg32>
- <reg64 offset="0x80f8" name="GRAS_QUALITY_BUFFER_BASE" variants="A7XX-" type="waddress" usage="rp_blit"/>
- <reg32 offset="0x80fa" name="GRAS_QUALITY_BUFFER_PITCH" variants="A7XX-" usage="rp_blit">
+ </bitset>
+
+ <reg32 offset="0x80f6" name="GRAS_QUALITY_BUFFER_DIMENSION" type="a6xx_gras_quality_buffer_dimension" variants="A7XX" usage="rp_blit"/>
+
+ <reg64 offset="0x80f8" name="GRAS_QUALITY_BUFFER_BASE" variants="A7XX" type="waddress" usage="rp_blit"/>
+
+ <bitset name="a6xx_gras_quality_buffer_pitch" inline="yes">
<bitfield name="PITCH" shr="6" low="0" high="7" type="uint"/>
<bitfield name="ARRAY_PITCH" shr="6" low="10" high="28" type="uint"/>
- </reg32>
+ </bitset>
+
+ <reg32 offset="0x80fa" name="GRAS_QUALITY_BUFFER_PITCH" type="a6xx_gras_quality_buffer_pitch" variants="A7XX" usage="rp_blit"/>
<enum name="a6xx_lrz_dir_status">
<value value="0x1" name="LRZ_DIR_LE"/>
@@ -1124,7 +1233,7 @@ by a particular renderpass/blit.
<value value="0x3" name="LRZ_DIR_INVALID"/>
</enum>
- <reg32 offset="0x8100" name="GRAS_LRZ_CNTL" usage="rp_blit">
+ <bitset name="a6xx_gras_lrz_cntl" inline="yes">
<bitfield name="ENABLE" pos="0" type="boolean"/>
<doc>LRZ write also disabled for blend/etc.</doc>
<bitfield name="LRZ_WRITE" pos="1" type="boolean"/>
@@ -1151,26 +1260,36 @@ by a particular renderpass/blit.
</doc>
<bitfield name="DISABLE_ON_WRONG_DIR" pos="9" type="boolean" variants="A6XX"/>
<bitfield name="Z_FUNC" low="11" high="13" type="adreno_compare_func" variants="A7XX-"/>
- </reg32>
+ </bitset>
+
+ <reg32 offset="0x8100" name="GRAS_LRZ_CNTL" type="a6xx_gras_lrz_cntl" usage="rp_blit" variants="A6XX-A7XX"/>
<enum name="a6xx_fragcoord_sample_mode">
<value value="0" name="FRAGCOORD_CENTER"/>
<value value="3" name="FRAGCOORD_SAMPLE"/>
</enum>
- <reg32 offset="0x8101" name="GRAS_LRZ_PS_INPUT_CNTL" low="0" high="2" usage="rp_blit">
+ <bitset name="a6xx_gras_lrz_ps_input_cntl" inline="yes">
<bitfield name="SAMPLEID" pos="0" type="boolean"/>
<bitfield name="FRAGCOORDSAMPLEMODE" low="1" high="2" type="a6xx_fragcoord_sample_mode"/>
- </reg32>
+ </bitset>
+
+ <reg32 offset="0x8101" name="GRAS_LRZ_PS_INPUT_CNTL" type="a6xx_gras_lrz_ps_input_cntl" usage="rp_blit" variants="A6XX-A7XX"/>
- <reg32 offset="0x8102" name="GRAS_LRZ_MRT_BUFFER_INFO_0" usage="rp_blit">
+ <bitset name="a6xx_gras_lrz_mrt_buffer_info_0" inline="yes">
<bitfield name="COLOR_FORMAT" low="0" high="7" type="a6xx_format"/>
- </reg32>
- <reg64 offset="0x8103" name="GRAS_LRZ_BUFFER_BASE" align="256" type="waddress" usage="rp_blit"/>
- <reg32 offset="0x8105" name="GRAS_LRZ_BUFFER_PITCH" usage="rp_blit">
+ </bitset>
+
+ <reg32 offset="0x8102" name="GRAS_LRZ_MRT_BUFFER_INFO_0" type="a6xx_gras_lrz_mrt_buffer_info_0" usage="rp_blit" variants="A6XX-A7XX"/>
+
+ <reg64 offset="0x8103" name="GRAS_LRZ_BUFFER_BASE" align="256" type="waddress" usage="rp_blit" variants="A6XX-A7XX"/>
+
+ <bitset name="a6xx_gras_lrz_buffer_pitch" inline="yes">
<bitfield name="PITCH" low="0" high="7" shr="5" type="uint"/>
<bitfield name="ARRAY_PITCH" low="10" high="28" shr="8" type="uint"/>
- </reg32>
+ </bitset>
+
+ <reg32 offset="0x8105" name="GRAS_LRZ_BUFFER_PITCH" type="a6xx_gras_lrz_buffer_pitch" usage="rp_blit" variants="A6XX-A7XX"/>
<!--
The LRZ "fast clear" buffer is initialized to zero's by blob, and
@@ -1203,7 +1322,6 @@ by a particular renderpass/blit.
not.
-->
<reg64 offset="0x8106" name="GRAS_LRZ_FAST_CLEAR_BUFFER_BASE" align="64" type="waddress" usage="rp_blit"/>
- <!-- 0x8108 invalid -->
<reg32 offset="0x8109" name="GRAS_LRZ_PS_SAMPLEFREQ_CNTL" usage="rp_blit">
<bitfield name="PER_SAMP_MODE" pos="0" type="boolean"/>
</reg32>
@@ -1228,19 +1346,20 @@ by a particular renderpass/blit.
<!-- 0x810c-0x810f invalid -->
- <reg32 offset="0x8110" name="GRAS_UNKNOWN_8110" low="0" high="1" usage="cmd"/>
+ <reg32 offset="0x8110" name="GRAS_MODE_CNTL" low="0" high="1" variants="A6XX-A7XX" usage="cmd"/>
<!-- A bit tentative but it's a color and it is followed by LRZ_CLEAR -->
- <reg32 offset="0x8111" name="GRAS_LRZ_DEPTH_CLEAR" type="float" variants="A7XX-"/>
+ <reg32 offset="0x8111" name="GRAS_LRZ_DEPTH_CLEAR" type="float" variants="A7XX"/>
- <reg32 offset="0x8113" name="GRAS_LRZ_DEPTH_BUFFER_INFO" variants="A7XX-" usage="rp_blit">
+ <bitset name="a6xx_gras_lrz_depth_buffer_info" inline="yes">
<bitfield name="DEPTH_FORMAT" low="0" high="2" type="a6xx_depth_format"/>
<bitfield name="UNK3" pos="3"/>
- </reg32>
+ </bitset>
+
+ <reg32 offset="0x8113" name="GRAS_LRZ_DEPTH_BUFFER_INFO" type="a6xx_gras_lrz_depth_buffer_info" variants="A7XX" usage="rp_blit"/>
- <!-- Always written together and always equal 09510840 00000a62 -->
- <reg32 offset="0x8120" name="GRAS_UNKNOWN_8120" variants="A7XX-" usage="cmd"/>
- <reg32 offset="0x8121" name="GRAS_UNKNOWN_8121" variants="A7XX-" usage="cmd"/>
+ <doc>LUT used to convert quality buffer values to HW shading rate values. An array of 4-bit values.</doc>
+ <array offset="0x8120" name="GRAS_LRZ_QUALITY_LOOKUP_TABLE" variants="A7XX-" stride="1" length="2"/>
<!-- 0x8112-0x83ff invalid -->
@@ -1265,28 +1384,29 @@ by a particular renderpass/blit.
<bitfield name="D24S8" pos="19" type="boolean"/>
<!-- some sort of channel mask, disabled channels are set to zero ? -->
<bitfield name="MASK" low="20" high="23"/>
- <bitfield name="IFMT" low="24" high="28" type="a6xx_2d_ifmt"/>
+ <bitfield name="IFMT" low="24" high="26" type="a6xx_2d_ifmt"/>
+ <bitfield name="UNK27" pos="27" type="boolean"/>
+ <bitfield name="UNK28" pos="28" type="boolean"/>
<bitfield name="RASTER_MODE" pos="29" type="a6xx_raster_mode"/>
- <bitfield name="UNK30" pos="30" type="boolean" variants="A7XX-"/>
+ <bitfield name="COPY" pos="30" type="boolean" variants="A7XX-"/>
</bitset>
- <reg32 offset="0x8400" name="GRAS_A2D_BLT_CNTL" type="a6xx_a2d_bit_cntl" usage="rp_blit"/>
+ <reg32 offset="0x8400" name="GRAS_A2D_BLT_CNTL" type="a6xx_a2d_bit_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
<!-- note: the low 8 bits for src coords are valid, probably fixed point
it would be a bit weird though, since we subtract 1 from BR coords
apparently signed, gallium driver uses negative coords and it works?
-->
- <reg32 offset="0x8401" name="GRAS_A2D_SRC_XMIN" low="8" high="24" type="int" usage="rp_blit"/>
- <reg32 offset="0x8402" name="GRAS_A2D_SRC_XMAX" low="8" high="24" type="int" usage="rp_blit"/>
- <reg32 offset="0x8403" name="GRAS_A2D_SRC_YMIN" low="8" high="24" type="int" usage="rp_blit"/>
- <reg32 offset="0x8404" name="GRAS_A2D_SRC_YMAX" low="8" high="24" type="int" usage="rp_blit"/>
- <reg32 offset="0x8405" name="GRAS_A2D_DEST_TL" type="a6xx_reg_xy" usage="rp_blit"/>
- <reg32 offset="0x8406" name="GRAS_A2D_DEST_BR" type="a6xx_reg_xy" usage="rp_blit"/>
+ <reg32 offset="0x8401" name="GRAS_A2D_SRC_XMIN" low="8" high="24" type="int" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x8402" name="GRAS_A2D_SRC_XMAX" low="8" high="24" type="int" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x8403" name="GRAS_A2D_SRC_YMIN" low="8" high="24" type="int" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x8404" name="GRAS_A2D_SRC_YMAX" low="8" high="24" type="int" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x8405" name="GRAS_A2D_DEST_TL" type="a6xx_reg_xy" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x8406" name="GRAS_A2D_DEST_BR" type="a6xx_reg_xy" variants="A6XX-A7XX" usage="rp_blit"/>
<reg32 offset="0x8407" name="GRAS_2D_UNKNOWN_8407" low="0" high="31"/>
<reg32 offset="0x8408" name="GRAS_2D_UNKNOWN_8408" low="0" high="31"/>
<reg32 offset="0x8409" name="GRAS_2D_UNKNOWN_8409" low="0" high="31"/>
- <reg32 offset="0x840a" name="GRAS_A2D_SCISSOR_TL" type="a6xx_reg_xy" usage="rp_blit"/>
- <reg32 offset="0x840b" name="GRAS_A2D_SCISSOR_BR" type="a6xx_reg_xy" usage="rp_blit"/>
- <!-- 0x840c-0x85ff invalid -->
+ <reg32 offset="0x840a" name="GRAS_A2D_SCISSOR_TL" type="a6xx_reg_xy" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x840b" name="GRAS_A2D_SCISSOR_BR" type="a6xx_reg_xy" variants="A6XX-A7XX" usage="rp_blit"/>
<!-- always 0x880 ? (and 0 in a640/a650 traces?) -->
<reg32 offset="0x8600" name="GRAS_DBG_ECO_CNTL" usage="cmd">
@@ -1304,22 +1424,7 @@ by a particular renderpass/blit.
-->
<!-- same as GRAS_BIN_CONTROL, but without bit 27: -->
- <reg32 offset="0x8800" name="RB_CNTL" variants="A6XX" usage="rp_blit">
- <bitfield name="BINW" low="0" high="5" shr="5" type="uint"/>
- <bitfield name="BINH" low="8" high="14" shr="4" type="uint"/>
- <bitfield name="RENDER_MODE" low="18" high="20" type="a6xx_render_mode"/>
- <bitfield name="FORCE_LRZ_WRITE_DIS" pos="21" type="boolean"/>
- <bitfield name="BUFFERS_LOCATION" low="22" high="23" type="a6xx_buffers_location"/>
- <bitfield name="LRZ_FEEDBACK_ZMODE_MASK" low="24" high="26" type="a6xx_lrz_feedback_mask"/>
- </reg32>
-
- <reg32 offset="0x8800" name="RB_CNTL" variants="A7XX-" usage="rp_blit">
- <bitfield name="BINW" low="0" high="5" shr="5" type="uint"/>
- <bitfield name="BINH" low="8" high="14" shr="4" type="uint"/>
- <bitfield name="RENDER_MODE" low="18" high="20" type="a6xx_render_mode"/>
- <bitfield name="FORCE_LRZ_WRITE_DIS" pos="21" type="boolean"/>
- <bitfield name="LRZ_FEEDBACK_ZMODE_MASK" low="24" high="26" type="a6xx_lrz_feedback_mask"/>
- </reg32>
+ <reg32 offset="0x8800" name="RB_CNTL" variants="A6XX-A7XX" type="a6xx_bin_cntl" usage="rp_blit"/>
<reg32 offset="0x8801" name="RB_RENDER_CNTL" variants="A6XX" usage="rp_blit">
<bitfield name="CCUSINGLECACHELINESIZE" low="3" high="5"/>
@@ -1343,9 +1448,6 @@ by a particular renderpass/blit.
<bitfield name="CONSERVATIVERASEN" pos="11" type="boolean"/>
<bitfield name="INNERCONSERVATIVERASEN" pos="12" type="boolean"/>
</reg32>
- <reg32 offset="0x8116" name="GRAS_SU_RENDER_CNTL" variants="A7XX-" usage="rp_blit">
- <bitfield name="FS_DISABLE" pos="7" type="boolean"/>
- </reg32>
<reg32 offset="0x8802" name="RB_RAS_MSAA_CNTL" usage="rp_blit">
<bitfield name="SAMPLES" low="0" high="1" type="a3xx_msaa_samples"/>
@@ -1512,9 +1614,7 @@ by a particular renderpass/blit.
<bitfield name="SAMPLE_MASK" low="16" high="31"/>
</reg32>
<!-- 0x8866-0x886f invalid -->
- <reg32 offset="0x8870" name="RB_DEPTH_PLANE_CNTL" usage="rp_blit">
- <bitfield name="Z_MODE" low="0" high="1" type="a6xx_ztest_mode"/>
- </reg32>
+ <reg32 offset="0x8870" name="RB_DEPTH_PLANE_CNTL" type="a6xx_depth_plane_cntl" usage="rp_blit"/>
<reg32 offset="0x8871" name="RB_DEPTH_CNTL" usage="rp_blit">
<bitfield name="Z_TEST_ENABLE" pos="0" type="boolean"/>
@@ -1528,14 +1628,9 @@ by a particular renderpass/blit.
<bitfield name="Z_READ_ENABLE" pos="6" type="boolean"/>
<bitfield name="Z_BOUNDS_ENABLE" pos="7" type="boolean"/>
</reg32>
- <reg32 offset="0x8114" name="GRAS_SU_DEPTH_CNTL" usage="rp_blit">
- <bitfield name="Z_TEST_ENABLE" pos="0" type="boolean"/>
- </reg32>
+
<!-- duplicates GRAS_SU_DEPTH_BUFFER_INFO: -->
- <reg32 offset="0x8872" name="RB_DEPTH_BUFFER_INFO" variants="A6XX" usage="rp_blit">
- <bitfield name="DEPTH_FORMAT" low="0" high="2" type="a6xx_depth_format"/>
- <bitfield name="UNK3" low="3" high="4"/>
- </reg32>
+ <reg32 offset="0x8872" name="RB_DEPTH_BUFFER_INFO" variants="A6XX" type="a6xx_depth_buffer_info" usage="rp_blit"/>
<!-- first 4 bits duplicates GRAS_SU_DEPTH_BUFFER_INFO -->
<reg32 offset="0x8872" name="RB_DEPTH_BUFFER_INFO" variants="A7XX-" usage="rp_blit">
<bitfield name="DEPTH_FORMAT" low="0" high="2" type="a6xx_depth_format"/>
@@ -1571,9 +1666,7 @@ by a particular renderpass/blit.
<bitfield name="ZPASS_BF" low="26" high="28" type="adreno_stencil_op"/>
<bitfield name="ZFAIL_BF" low="29" high="31" type="adreno_stencil_op"/>
</reg32>
- <reg32 offset="0x8115" name="GRAS_SU_STENCIL_CNTL" usage="rp_blit">
- <bitfield name="STENCIL_ENABLE" pos="0" type="boolean"/>
- </reg32>
+
<reg32 offset="0x8881" name="RB_STENCIL_BUFFER_INFO" variants="A6XX" usage="rp_blit">
<bitfield name="SEPARATE_STENCIL" pos="0" type="boolean"/>
<bitfield name="UNK1" pos="1" type="boolean"/>
@@ -1612,8 +1705,9 @@ by a particular renderpass/blit.
<reg32 offset="0x8899" name="RB_UNKNOWN_8899" variants="A7XX-" usage="cmd"/>
<!-- 0x8899-0x88bf invalid -->
<!-- clamps depth value for depth test/write -->
- <reg32 offset="0x88c0" name="RB_VIEWPORT_ZCLAMP_MIN" type="float" usage="rp_blit"/>
- <reg32 offset="0x88c1" name="RB_VIEWPORT_ZCLAMP_MAX" type="float" usage="rp_blit"/>
+ <reg32 offset="0x88c0" name="RB_VIEWPORT_ZCLAMP_MIN" type="float" usage="rp_blit" variants="A6XX-A7XX"/>
+ <reg32 offset="0x88c1" name="RB_VIEWPORT_ZCLAMP_MAX" type="float" usage="rp_blit" variants="A6XX-A7XX"/>
+
<!-- 0x88c2-0x88cf invalid-->
<reg32 offset="0x88d0" name="RB_RESOLVE_CNTL_0" usage="rp_blit">
<bitfield name="UNK0" low="0" high="12"/>
@@ -1622,7 +1716,7 @@ by a particular renderpass/blit.
<reg32 offset="0x88d1" name="RB_RESOLVE_CNTL_1" type="a6xx_reg_xy" usage="rp_blit"/>
<reg32 offset="0x88d2" name="RB_RESOLVE_CNTL_2" type="a6xx_reg_xy" usage="rp_blit"/>
<!-- weird to duplicate other regs from same block?? -->
- <reg32 offset="0x88d3" name="RB_RESOLVE_CNTL_3" usage="rp_blit">
+ <reg32 offset="0x88d3" name="RB_RESOLVE_CNTL_3" variants="A6XX-A7XX" usage="rp_blit">
<bitfield name="BINW" low="0" high="5" shr="5" type="uint"/>
<bitfield name="BINH" low="8" high="14" shr="4" type="uint"/>
</reg32>
@@ -1646,10 +1740,13 @@ by a particular renderpass/blit.
<!-- array-pitch is size of layer -->
<reg32 offset="0x88db" name="RB_RESOLVE_SYSTEM_BUFFER_ARRAY_PITCH" low="0" high="28" shr="6" type="uint" usage="rp_blit"/>
<reg64 offset="0x88dc" name="RB_RESOLVE_SYSTEM_FLAG_BUFFER_BASE" type="waddress" align="64" usage="rp_blit"/>
- <reg32 offset="0x88de" name="RB_RESOLVE_SYSTEM_FLAG_BUFFER_PITCH" usage="rp_blit">
+
+ <bitset name="a6xx_flag_buffer_pitch" inline="yes">
<bitfield name="PITCH" low="0" high="10" shr="6" type="uint"/>
- <bitfield name="ARRAY_PITCH" low="11" high="27" shr="7" type="uint"/>
- </reg32>
+ <bitfield name="ARRAY_PITCH" low="11" high="28" shr="7" type="uint"/>
+ </bitset>
+
+ <reg32 offset="0x88de" name="RB_RESOLVE_SYSTEM_FLAG_BUFFER_PITCH" type="a6xx_flag_buffer_pitch" usage="rp_blit"/>
<reg32 offset="0x88df" name="RB_RESOLVE_CLEAR_COLOR_DW0" usage="rp_blit"/>
<reg32 offset="0x88e0" name="RB_RESOLVE_CLEAR_COLOR_DW1" usage="rp_blit"/>
@@ -1722,10 +1819,7 @@ by a particular renderpass/blit.
<reg32 offset="0x88f0" name="RB_UNKNOWN_88F0" low="0" high="11" usage="cmd"/>
<!-- could be for separate stencil? (or may not be a flag buffer at all) -->
<reg64 offset="0x88f1" name="RB_UNK_FLAG_BUFFER_BASE" type="waddress" align="64"/>
- <reg32 offset="0x88f3" name="RB_UNK_FLAG_BUFFER_PITCH">
- <bitfield name="PITCH" low="0" high="10" shr="6" type="uint"/>
- <bitfield name="ARRAY_PITCH" low="11" high="23" shr="7" type="uint"/>
- </reg32>
+ <reg32 offset="0x88f3" name="RB_UNK_FLAG_BUFFER_PITCH" type="a6xx_flag_buffer_pitch"/>
<reg32 offset="0x88f4" name="RB_VRS_CONFIG" usage="rp_blit">
<bitfield name="UNK2" pos="2" type="boolean"/>
@@ -1733,8 +1827,9 @@ by a particular renderpass/blit.
<bitfield name="ATTACHMENT_FSR_ENABLE" pos="5" type="boolean"/>
<bitfield name="PRIMITIVE_FSR_ENABLE" pos="18" type="boolean"/>
</reg32>
- <!-- Connected to VK_EXT_fragment_density_map? -->
- <reg32 offset="0x88f5" name="RB_UNKNOWN_88F5" variants="A7XX-"/>
+ <reg32 offset="0x88f5" name="RB_BIN_FOVEAT" variants="A7XX-" usage="cmd">
+ <bitfield name="BINSCALEEN" pos="6" type="boolean"/>
+ </reg32>
<!-- 0x88f6-0x88ff invalid -->
<reg64 offset="0x8900" name="RB_DEPTH_FLAG_BUFFER_BASE" type="waddress" align="64" usage="rp_blit"/>
<reg32 offset="0x8902" name="RB_DEPTH_FLAG_BUFFER_PITCH" usage="rp_blit">
@@ -1743,12 +1838,10 @@ by a particular renderpass/blit.
<bitfield name="UNK8" low="8" high="10"/>
<bitfield name="ARRAY_PITCH" low="11" high="27" shr="7" type="uint"/>
</reg32>
+
<array offset="0x8903" name="RB_COLOR_FLAG_BUFFER" stride="3" length="8" usage="rp_blit">
<reg64 offset="0" name="ADDR" type="waddress" align="64"/>
- <reg32 offset="2" name="PITCH">
- <bitfield name="PITCH" low="0" high="10" shr="6" type="uint"/>
- <bitfield name="ARRAY_PITCH" low="11" high="28" shr="7" type="uint"/>
- </reg32>
+ <reg32 offset="2" name="PITCH" type="a6xx_flag_buffer_pitch"/>
</array>
<!-- 0x891b-0x8926 invalid -->
<doc>
@@ -1811,7 +1904,7 @@ by a particular renderpass/blit.
<reg64 offset="0x8c1e" name="RB_A2D_DEST_BUFFER_BASE_2" type="waddress" align="64" usage="rp_blit"/>
<reg64 offset="0x8c20" name="RB_A2D_DEST_FLAG_BUFFER_BASE" type="waddress" align="64" usage="rp_blit"/>
- <reg32 offset="0x8c22" name="RB_A2D_DEST_FLAG_BUFFER_PITCH" low="0" high="7" shr="6" type="uint" usage="rp_blit"/>
+ <reg32 offset="0x8c22" name="RB_A2D_DEST_FLAG_BUFFER_PITCH" type="a6xx_flag_buffer_pitch" usage="rp_blit"/>
<!-- this is a guess but seems likely (for NV12 with UBWC): -->
<reg64 offset="0x8c23" name="RB_A2D_DEST_FLAG_BUFFER_BASE_1" type="waddress" align="64" usage="rp_blit"/>
<reg32 offset="0x8c25" name="RB_A2D_DEST_FLAG_BUFFER_PITCH_1" low="0" high="7" shr="6" type="uint" usage="rp_blit"/>
@@ -1917,13 +2010,13 @@ by a particular renderpass/blit.
<bitfield name="CLIP_DIST_03_LOC" low="8" high="15" type="uint"/>
<bitfield name="CLIP_DIST_47_LOC" low="16" high="23" type="uint"/>
</bitset>
- <reg32 offset="0x9101" name="VPC_VS_CLIP_CULL_CNTL" type="a6xx_vpc_xs_clip_cntl" usage="rp_blit"/>
- <reg32 offset="0x9102" name="VPC_GS_CLIP_CULL_CNTL" type="a6xx_vpc_xs_clip_cntl" usage="rp_blit"/>
- <reg32 offset="0x9103" name="VPC_DS_CLIP_CULL_CNTL" type="a6xx_vpc_xs_clip_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9101" name="VPC_VS_CLIP_CULL_CNTL" type="a6xx_vpc_xs_clip_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x9102" name="VPC_GS_CLIP_CULL_CNTL" type="a6xx_vpc_xs_clip_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x9103" name="VPC_DS_CLIP_CULL_CNTL" type="a6xx_vpc_xs_clip_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
- <reg32 offset="0x9311" name="VPC_VS_CLIP_CULL_CNTL_V2" type="a6xx_vpc_xs_clip_cntl" usage="rp_blit"/>
- <reg32 offset="0x9312" name="VPC_GS_CLIP_CULL_CNTL_V2" type="a6xx_vpc_xs_clip_cntl" usage="rp_blit"/>
- <reg32 offset="0x9313" name="VPC_DS_CLIP_CULL_CNTL_V2" type="a6xx_vpc_xs_clip_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9311" name="VPC_VS_CLIP_CULL_CNTL_V2" type="a6xx_vpc_xs_clip_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x9312" name="VPC_GS_CLIP_CULL_CNTL_V2" type="a6xx_vpc_xs_clip_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x9313" name="VPC_DS_CLIP_CULL_CNTL_V2" type="a6xx_vpc_xs_clip_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
<bitset name="a6xx_vpc_xs_siv_cntl" inline="yes">
<bitfield name="LAYERLOC" low="0" high="7" type="uint"/>
@@ -1931,23 +2024,33 @@ by a particular renderpass/blit.
<bitfield name="SHADINGRATELOC" low="16" high="23" type="uint" variants="A7XX-"/>
</bitset>
- <reg32 offset="0x9104" name="VPC_VS_SIV_CNTL" type="a6xx_vpc_xs_siv_cntl" usage="rp_blit"/>
- <reg32 offset="0x9105" name="VPC_GS_SIV_CNTL" type="a6xx_vpc_xs_siv_cntl" usage="rp_blit"/>
- <reg32 offset="0x9106" name="VPC_DS_SIV_CNTL" type="a6xx_vpc_xs_siv_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9104" name="VPC_VS_SIV_CNTL" type="a6xx_vpc_xs_siv_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x9105" name="VPC_GS_SIV_CNTL" type="a6xx_vpc_xs_siv_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x9106" name="VPC_DS_SIV_CNTL" type="a6xx_vpc_xs_siv_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+
+
+ <reg32 offset="0x9314" name="VPC_VS_SIV_CNTL_V2" type="a6xx_vpc_xs_siv_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x9315" name="VPC_GS_SIV_CNTL_V2" type="a6xx_vpc_xs_siv_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x9316" name="VPC_DS_SIV_CNTL_V2" type="a6xx_vpc_xs_siv_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+
+ <bitset name="a6xx_vpc_rast_stream_cntl" inline="yes">
+ <!-- which stream to send to GRAS -->
+ <bitfield name="STREAM" low="0" high="1" type="uint"/>
+ <!-- discard primitives before rasterization -->
+ <bitfield name="DISCARD" pos="2" type="boolean"/>
+ </bitset>
- <reg32 offset="0x9314" name="VPC_VS_SIV_CNTL_V2" type="a6xx_vpc_xs_siv_cntl" usage="rp_blit"/>
- <reg32 offset="0x9315" name="VPC_GS_SIV_CNTL_V2" type="a6xx_vpc_xs_siv_cntl" usage="rp_blit"/>
- <reg32 offset="0x9316" name="VPC_DS_SIV_CNTL_V2" type="a6xx_vpc_xs_siv_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9980" name="VPC_RAST_STREAM_CNTL" type="a6xx_vpc_rast_stream_cntl" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0x9107" name="VPC_RAST_STREAM_CNTL" type="a6xx_vpc_rast_stream_cntl" variants="A7XX" usage="rp_blit"/>
+ <reg32 offset="0x9317" name="VPC_RAST_STREAM_CNTL_V2" type="a6xx_vpc_rast_stream_cntl" variants="A7XX" usage="rp_blit"/>
<reg32 offset="0x9107" name="VPC_UNKNOWN_9107" variants="A6XX" usage="rp_blit">
<!-- this mirrors VPC_RAST_STREAM_CNTL::DISCARD, although it seems it's unused -->
<bitfield name="RASTER_DISCARD" pos="0" type="boolean"/>
<bitfield name="UNK2" pos="2" type="boolean"/>
</reg32>
- <reg32 offset="0x9108" name="VPC_RAST_CNTL" usage="rp_blit">
- <bitfield name="MODE" low="0" high="1" type="a6xx_polygon_mode"/>
- </reg32>
+ <reg32 offset="0x9108" name="VPC_RAST_CNTL" type="a6xx_rast_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
<bitset name="a6xx_pc_cntl" inline="yes">
<bitfield name="PRIMITIVE_RESTART" pos="0" type="boolean"/>
<bitfield name="PROVOKING_VTX_LAST" pos="1" type="boolean"/>
@@ -1987,10 +2090,10 @@ by a particular renderpass/blit.
<bitfield name="VIEWS" low="2" high="6" type="uint"/>
</bitset>
- <reg32 offset="0x9109" name="VPC_PC_CNTL" type="a6xx_pc_cntl" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0x910a" name="VPC_GS_PARAM_0" type="a6xx_gs_param_0" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0x910b" name="VPC_STEREO_RENDERING_VIEWMASK" type="hex" low="0" high="15" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0x910c" name="VPC_STEREO_RENDERING_CNTL" type="a6xx_stereo_rendering_cntl" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0x9109" name="VPC_PC_CNTL" type="a6xx_pc_cntl" variants="A7XX" usage="rp_blit"/>
+ <reg32 offset="0x910a" name="VPC_GS_PARAM_0" type="a6xx_gs_param_0" variants="A7XX" usage="rp_blit"/>
+ <reg32 offset="0x910b" name="VPC_STEREO_RENDERING_VIEWMASK" type="hex" low="0" high="15" variants="A7XX" usage="rp_blit"/>
+ <reg32 offset="0x910c" name="VPC_STEREO_RENDERING_CNTL" type="a6xx_stereo_rendering_cntl" variants="A7XX" usage="rp_blit"/>
<enum name="a6xx_varying_interp_mode">
<value value="0" name="INTERP_SMOOTH"/>
@@ -2007,11 +2110,11 @@ by a particular renderpass/blit.
</enum>
<!-- 0x9109-0x91ff invalid -->
- <array offset="0x9200" name="VPC_VARYING_INTERP_MODE" stride="1" length="8" usage="rp_blit">
+ <array offset="0x9200" name="VPC_VARYING_INTERP_MODE" stride="1" length="8" variants="A6XX-A7XX" usage="rp_blit">
<doc>Packed array of a6xx_varying_interp_mode</doc>
<reg32 offset="0x0" name="MODE"/>
</array>
- <array offset="0x9208" name="VPC_VARYING_REPLACE_MODE_0" stride="1" length="8" usage="rp_blit">
+ <array offset="0x9208" name="VPC_VARYING_REPLACE_MODE" stride="1" length="8" variants="A6XX-A7XX" usage="rp_blit">
<doc>Packed array of a6xx_varying_ps_repl_mode</doc>
<reg32 offset="0x0" name="MODE"/>
</array>
@@ -2020,12 +2123,12 @@ by a particular renderpass/blit.
<reg32 offset="0x9210" name="VPC_UNKNOWN_9210" low="0" high="31" variants="A6XX" usage="cmd"/>
<reg32 offset="0x9211" name="VPC_UNKNOWN_9211" low="0" high="31" variants="A6XX" usage="cmd"/>
- <array offset="0x9212" name="VPC_VARYING_LM_TRANSFER_CNTL_0" stride="1" length="4" usage="rp_blit">
+ <array offset="0x9212" name="VPC_VARYING_LM_TRANSFER_CNTL" stride="1" length="4" variants="A6XX-A7XX" usage="rp_blit">
<!-- one bit per varying component: -->
<reg32 offset="0" name="DISABLE"/>
</array>
- <reg32 offset="0x9216" name="VPC_SO_MAPPING_WPTR" usage="rp_blit">
+ <bitset name="a6xx_vpc_so_mapping_wptr" inline="yes">
<!--
Choose which DWORD to write to. There is an array of
(4 * 64) DWORD's, dumped in the devcoredump at
@@ -2052,20 +2155,25 @@ by a particular renderpass/blit.
<bitfield name="ADDR" low="0" high="7" type="hex"/>
<!-- clear all A_EN and B_EN bits for all DWORD's -->
<bitfield name="RESET" pos="16" type="boolean"/>
- </reg32>
- <!-- special register, write multiple times to load SO program (not readable) -->
- <reg32 offset="0x9217" name="VPC_SO_MAPPING_PORT" usage="rp_blit">
+ </bitset>
+
+ <reg32 offset="0x9216" name="VPC_SO_MAPPING_WPTR" type="a6xx_vpc_so_mapping_wptr" variants="A6XX-A7XX" usage="rp_blit"/>
+
+ <bitset name="a6xx_vpc_so_mapping_port" inline="yes">
<bitfield name="A_BUF" low="0" high="1" type="uint"/>
<bitfield name="A_OFF" low="2" high="10" shr="2" type="uint"/>
<bitfield name="A_EN" pos="11" type="boolean"/>
<bitfield name="B_BUF" low="12" high="13" type="uint"/>
<bitfield name="B_OFF" low="14" high="22" shr="2" type="uint"/>
<bitfield name="B_EN" pos="23" type="boolean"/>
- </reg32>
+ </bitset>
+
+ <!-- special register, write multiple times to load SO program (not readable) -->
+ <reg32 offset="0x9217" name="VPC_SO_MAPPING_PORT" type="a6xx_vpc_so_mapping_port" variants="A6XX-A7XX" usage="rp_blit"/>
- <reg64 offset="0x9218" name="VPC_SO_QUERY_BASE" type="waddress" align="32" usage="cmd"/>
+ <reg64 offset="0x9218" name="VPC_SO_QUERY_BASE" type="waddress" align="32" variants="A6XX-A7XX" usage="cmd"/>
- <array offset="0x921a" name="VPC_SO" stride="7" length="4" usage="cmd">
+ <array offset="0x921a" name="VPC_SO" stride="7" length="4" variants="A6XX-A7XX" usage="cmd">
<reg64 offset="0" name="BUFFER_BASE" type="waddress" align="32"/>
<reg32 offset="2" name="BUFFER_SIZE" low="2" high="31" shr="2"/>
<reg32 offset="3" name="BUFFER_STRIDE" low="0" high="9" shr="2"/>
@@ -2073,12 +2181,13 @@ by a particular renderpass/blit.
<reg64 offset="5" name="FLUSH_BASE" type="waddress" align="32"/>
</array>
- <reg32 offset="0x9236" name="VPC_REPLACE_MODE_CNTL" usage="cmd">
+ <bitset name="a6xx_vpc_replace_mode_cntl" inline="yes">
<bitfield name="INVERT" pos="0" type="boolean"/>
- </reg32>
- <!-- 0x9237-0x92ff invalid -->
- <!-- always 0x0 ? -->
- <reg32 offset="0x9300" name="VPC_UNKNOWN_9300" low="0" high="2" usage="cmd"/>
+ </bitset>
+
+ <reg32 offset="0x9236" name="VPC_REPLACE_MODE_CNTL" type="a6xx_vpc_replace_mode_cntl" variants="A6XX-A7XX" usage="cmd"/>
+
+ <reg32 offset="0x9300" name="VPC_ROTATION_CNTL" low="0" high="2" variants="A6XX-A7XX" usage="cmd"/>
<bitset name="a6xx_vpc_xs_cntl" inline="yes">
<doc>
@@ -2097,11 +2206,12 @@ by a particular renderpass/blit.
</doc>
</bitfield>
</bitset>
- <reg32 offset="0x9301" name="VPC_VS_CNTL" type="a6xx_vpc_xs_cntl" usage="rp_blit"/>
- <reg32 offset="0x9302" name="VPC_GS_CNTL" type="a6xx_vpc_xs_cntl" usage="rp_blit"/>
- <reg32 offset="0x9303" name="VPC_DS_CNTL" type="a6xx_vpc_xs_cntl" usage="rp_blit"/>
- <reg32 offset="0x9304" name="VPC_PS_CNTL" usage="rp_blit">
+ <reg32 offset="0x9301" name="VPC_VS_CNTL" type="a6xx_vpc_xs_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x9302" name="VPC_GS_CNTL" type="a6xx_vpc_xs_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x9303" name="VPC_DS_CNTL" type="a6xx_vpc_xs_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+
+ <bitset name="a6xx_vpc_ps_cntl" inline="yes">
<bitfield name="NUMNONPOSVAR" low="0" high="7" type="uint"/>
<!-- for fixed-function (i.e. no GS) gl_PrimitiveID in FS -->
<bitfield name="PRIMIDLOC" low="8" high="15" type="uint"/>
@@ -2118,9 +2228,11 @@ by a particular renderpass/blit.
ViewID through the VS.
</doc>
</bitfield>
- </reg32>
+ </bitset>
+
+ <reg32 offset="0x9304" name="VPC_PS_CNTL" type="a6xx_vpc_ps_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
- <reg32 offset="0x9305" name="VPC_SO_CNTL" usage="rp_blit">
+ <bitset name="a6xx_vpc_so_cntl" inline="yes">
<!--
It's offset by 1, and 0 means "disabled"
-->
@@ -2129,22 +2241,28 @@ by a particular renderpass/blit.
<bitfield name="BUF2_STREAM" low="6" high="8" type="uint"/>
<bitfield name="BUF3_STREAM" low="9" high="11" type="uint"/>
<bitfield name="STREAM_ENABLE" low="15" high="18" type="hex"/>
- </reg32>
- <reg32 offset="0x9306" name="VPC_SO_OVERRIDE" usage="rp_blit">
+ </bitset>
+
+ <reg32 offset="0x9305" name="VPC_SO_CNTL" type="a6xx_vpc_so_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+
+ <bitset name="a6xx_so_override" inline="yes">
<bitfield name="DISABLE" pos="0" type="boolean"/>
- </reg32>
- <reg32 offset="0x9307" name="VPC_PS_RAST_CNTL" variants="A6XX-" usage="rp_blit"> <!-- A702 + A7xx -->
- <bitfield name="MODE" low="0" high="1" type="a6xx_polygon_mode"/>
- </reg32>
- <reg32 offset="0x9308" name="VPC_ATTR_BUF_GMEM_SIZE" variants="A7XX-" usage="rp_blit">
- <bitfield name="SIZE_GMEM" low="0" high="31"/>
- </reg32>
- <reg32 offset="0x9309" name="VPC_ATTR_BUF_GMEM_BASE" variants="A7XX-" usage="rp_blit">
- <bitfield name="BASE_GMEM" low="0" high="31"/>
- </reg32>
- <reg32 offset="0x9b09" name="PC_ATTR_BUF_GMEM_SIZE" variants="A7XX-" usage="rp_blit">
- <bitfield name="SIZE_GMEM" low="0" high="31"/>
- </reg32>
+ </bitset>
+
+ <reg32 offset="0x9306" name="VPC_SO_OVERRIDE" type="a6xx_so_override" variants="A6XX-A7XX" usage="rp_blit"/>
+
+ <reg32 offset="0x9807" name="PC_DGEN_SO_OVERRIDE" type="a6xx_so_override" variants="A7XX" usage="rp_blit"/>
+
+ <reg32 offset="0x9307" name="VPC_PS_RAST_CNTL" type="a6xx_rast_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+
+ <reg32 offset="0x9308" name="VPC_ATTR_BUF_GMEM_SIZE" variants="A7XX" type="uint" usage="rp_blit"/>
+ <reg32 offset="0x9309" name="VPC_ATTR_BUF_GMEM_BASE" variants="A7XX" type="uint" usage="rp_blit"/>
+
+ <reg32 offset="0x9b09" name="PC_ATTR_BUF_GMEM_SIZE" variants="A7XX" type="uint" usage="rp_blit"/>
+
+ <reg32 offset="0x930a" name="VPC_UNKNOWN_930A" variants="A7XX"/>
+
+ <reg32 offset="0x960a" name="VPC_FLATSHADE_MODE_CNTL" variants="A7XX"/>
<!-- 0x9307-0x95ff invalid -->
@@ -2159,52 +2277,62 @@ by a particular renderpass/blit.
<!-- TODO: regs from 0x9624-0x963a -->
<!-- 0x963b-0x97ff invalid -->
- <reg32 offset="0x9800" name="PC_HS_PARAM_0" low="0" high="5" type="uint" usage="rp_blit"/>
+ <reg32 offset="0x9800" name="PC_HS_PARAM_0" low="0" high="5" type="uint" variants="A6XX-A7XX" usage="rp_blit"/>
- <!-- always 0x0 ? -->
- <reg32 offset="0x9801" name="PC_HS_PARAM_1" usage="rp_blit">
+ <bitset name="a6xx_pc_hs_param_1" inline="yes">
<bitfield name="SIZE" low="0" high="10" type="uint"/>
<bitfield name="UNK13" pos="13"/>
- </reg32>
+ </bitset>
- <reg32 offset="0x9802" name="PC_DS_PARAM" usage="rp_blit">
+ <reg32 offset="0x9801" name="PC_HS_PARAM_1" type="a6xx_pc_hs_param_1" variants="A6XX-A7XX" usage="rp_blit"/>
+
+ <bitset name="a6xx_pc_ds_param" inline="yes">
<bitfield name="SPACING" low="0" high="1" type="a6xx_tess_spacing"/>
<bitfield name="OUTPUT" low="2" high="3" type="a6xx_tess_output"/>
- </reg32>
+ </bitset>
+
+ <reg32 offset="0x9802" name="PC_DS_PARAM" type="a6xx_pc_ds_param" variants="A6XX-A7XX" usage="rp_blit"/>
- <reg32 offset="0x9803" name="PC_RESTART_INDEX" low="0" high="31" type="uint" usage="rp_blit"/>
- <reg32 offset="0x9804" name="PC_MODE_CNTL" low="0" high="7" usage="rp_blit"/>
+ <reg32 offset="0x9803" name="PC_RESTART_INDEX" low="0" high="31" type="uint" variants="A6XX-A7XX" usage="rp_blit"/>
+
+ <reg32 offset="0x9804" name="PC_MODE_CNTL" low="0" high="7" variants="A6XX-A7XX" usage="rp_blit"/>
<reg32 offset="0x9805" name="PC_POWER_CNTL" low="0" high="2" usage="rp_blit"/>
- <reg32 offset="0x9806" name="PC_PS_CNTL" usage="rp_blit">
+ <bitset name="a6xx_pc_ps_cntl" inline="yes">
<bitfield name="PRIMITIVEIDEN" pos="0" type="boolean"/>
- </reg32>
+ </bitset>
- <!-- New in a6xx gen3+ -->
- <reg32 offset="0x9808" name="PC_DGEN_SO_CNTL" usage="rp_blit">
+ <reg32 offset="0x9806" name="PC_PS_CNTL" type="a6xx_pc_ps_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+
+ <bitset name="a6xx_pc_dgen_so_cntl" inline="yes">
<bitfield name="STREAM_ENABLE" low="15" high="18" type="hex"/>
- </reg32>
+ </bitset>
- <reg32 offset="0x980a" name="PC_DGEN_SU_CONSERVATIVE_RAS_CNTL">
+ <!-- New in a6xx gen3+ -->
+ <reg32 offset="0x9808" name="PC_DGEN_SO_CNTL" type="a6xx_pc_dgen_so_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+
+ <bitset name="a6xx_pc_dgen_su_conservative_ras_cntl" inline="yes">
<bitfield name="CONSERVATIVERASEN" pos="0" type="boolean"/>
- </reg32>
- <!-- 0x980b-0x983f invalid -->
+ </bitset>
+
+ <reg32 offset="0x980a" name="PC_DGEN_SU_CONSERVATIVE_RAS_CNTL" type="a6xx_pc_dgen_su_conservative_ras_cntl" variants="A6XX-A7XX"/>
<!-- 0x9840 - 0x9842 are not readable -->
- <reg32 offset="0x9840" name="PC_DRAW_INITIATOR">
+ <bitset name="a6xx_draw_initiator" inline="yes">
<bitfield name="STATE_ID" low="0" high="7"/>
- </reg32>
+ </bitset>
- <reg32 offset="0x9841" name="PC_KERNEL_INITIATOR">
- <bitfield name="STATE_ID" low="0" high="7"/>
- </reg32>
+ <reg32 offset="0x9840" name="PC_DRAW_INITIATOR" type="a6xx_draw_initiator" variants="A6XX-A7XX"/>
+ <reg32 offset="0x9841" name="PC_KERNEL_INITIATOR" type="a6xx_draw_initiator" variants="A6XX-A7XX"/>
- <reg32 offset="0x9842" name="PC_EVENT_INITIATOR">
+ <bitset name="a6xx_event_initiator" inline="yes">
<!-- I think only the low bit is actually used? -->
<bitfield name="STATE_ID" low="16" high="23"/>
<bitfield name="EVENT" low="0" high="6" type="vgt_event_type"/>
- </reg32>
+ </bitset>
+
+ <reg32 offset="0x9842" name="PC_EVENT_INITIATOR" type="a6xx_event_initiator" variants="A6XX-A7XX"/>
<!--
0x9880 written in a lot of places by SQE, same value gets written
@@ -2215,45 +2343,21 @@ by a particular renderpass/blit.
<!-- 0x9843-0x997f invalid -->
- <reg32 offset="0x9981" name="PC_DGEN_RAST_CNTL" variants="A6XX" usage="rp_blit">
- <bitfield name="MODE" low="0" high="1" type="a6xx_polygon_mode"/>
- </reg32>
- <reg32 offset="0x9809" name="PC_DGEN_RAST_CNTL" variants="A7XX-" usage="rp_blit">
- <bitfield name="MODE" low="0" high="1" type="a6xx_polygon_mode"/>
- </reg32>
-
- <reg32 offset="0x9980" name="VPC_RAST_STREAM_CNTL" variants="A6XX" usage="rp_blit">
- <!-- which stream to send to GRAS -->
- <bitfield name="STREAM" low="0" high="1" type="uint"/>
- <!-- discard primitives before rasterization -->
- <bitfield name="DISCARD" pos="2" type="boolean"/>
- </reg32>
- <!-- VPC_RAST_STREAM_CNTL -->
- <reg32 offset="0x9107" name="VPC_RAST_STREAM_CNTL" variants="A7XX-" usage="rp_blit">
- <!-- which stream to send to GRAS -->
- <bitfield name="STREAM" low="0" high="1" type="uint"/>
- <!-- discard primitives before rasterization -->
- <bitfield name="DISCARD" pos="2" type="boolean"/>
- </reg32>
- <reg32 offset="0x9317" name="VPC_RAST_STREAM_CNTL_V2" variants="A7XX-" usage="rp_blit">
- <!-- which stream to send to GRAS -->
- <bitfield name="STREAM" low="0" high="1" type="uint"/>
- <!-- discard primitives before rasterization -->
- <bitfield name="DISCARD" pos="2" type="boolean"/>
- </reg32>
+ <reg32 offset="0x9981" name="PC_DGEN_RAST_CNTL" type="a6xx_rast_cntl" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0x9809" name="PC_DGEN_RAST_CNTL" type="a6xx_rast_cntl" variants="A7XX" usage="rp_blit"/>
<!-- Both are a750+.
Probably needed to correctly overlap execution of several draws.
-->
- <reg32 offset="0x9885" name="PC_HS_BUFFER_SIZE" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0x9885" name="PC_HS_BUFFER_SIZE" variants="A7XX" usage="cmd"/>
<!-- Blob adds a bit more space {0x10, 0x20, 0x30, 0x40} bytes, but the meaning of
this additional space is not known.
-->
- <reg32 offset="0x9886" name="PC_TF_BUFFER_SIZE" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0x9886" name="PC_TF_BUFFER_SIZE" variants="A7XX" usage="cmd"/>
<!-- 0x9982-0x9aff invalid -->
- <reg32 offset="0x9b00" name="PC_CNTL" type="a6xx_pc_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9b00" name="PC_CNTL" type="a6xx_pc_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
<bitset name="a6xx_pc_xs_cntl" inline="yes">
<doc>
@@ -2266,18 +2370,18 @@ by a particular renderpass/blit.
<bitfield name="LAYER" pos="9" type="boolean"/>
<bitfield name="VIEW" pos="10" type="boolean"/>
<!-- note: PC_VS_CNTL doesn't have the PRIMITIVE_ID bit -->
+ <!-- since HS can't output anything, only PRIMITIVE_ID is valid -->
<bitfield name="PRIMITIVE_ID" pos="11" type="boolean"/>
<bitfield name="CLIP_MASK" low="16" high="23" type="uint"/>
<bitfield name="SHADINGRATE" pos="24" type="boolean" variants="A7XX-"/>
</bitset>
- <reg32 offset="0x9b01" name="PC_VS_CNTL" type="a6xx_pc_xs_cntl" usage="rp_blit"/>
- <reg32 offset="0x9b02" name="PC_GS_CNTL" type="a6xx_pc_xs_cntl" usage="rp_blit"/>
- <!-- since HS can't output anything, only PRIMITIVE_ID is valid -->
- <reg32 offset="0x9b03" name="PC_HS_CNTL" type="a6xx_pc_xs_cntl" usage="rp_blit"/>
- <reg32 offset="0x9b04" name="PC_DS_CNTL" type="a6xx_pc_xs_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9b01" name="PC_VS_CNTL" type="a6xx_pc_xs_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x9b02" name="PC_GS_CNTL" type="a6xx_pc_xs_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x9b03" name="PC_HS_CNTL" type="a6xx_pc_xs_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x9b04" name="PC_DS_CNTL" type="a6xx_pc_xs_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
- <reg32 offset="0x9b05" name="PC_GS_PARAM_0" type="a6xx_gs_param_0" usage="rp_blit"/>
+ <reg32 offset="0x9b05" name="PC_GS_PARAM_0" type="a6xx_gs_param_0" variants="A6XX-A7XX" usage="rp_blit"/>
<reg32 offset="0x9b06" name="PC_PRIMITIVE_CNTL_6" variants="A6XX" usage="rp_blit">
<doc>
@@ -2286,9 +2390,9 @@ by a particular renderpass/blit.
<bitfield name="STRIDE_IN_VPC" low="0" high="10" type="uint"/>
</reg32>
- <reg32 offset="0x9b07" name="PC_STEREO_RENDERING_CNTL" type="a6xx_stereo_rendering_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9b07" name="PC_STEREO_RENDERING_CNTL" type="a6xx_stereo_rendering_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
<!-- mask of enabled views, doesn't exist on A630 -->
- <reg32 offset="0x9b08" name="PC_STEREO_RENDERING_VIEWMASK" type="hex" low="0" high="15" usage="rp_blit"/>
+ <reg32 offset="0x9b08" name="PC_STEREO_RENDERING_VIEWMASK" type="hex" low="0" high="15" variants="A6XX-A7XX" usage="rp_blit"/>
<!-- 0x9b09-0x9bff invalid -->
<reg32 offset="0x9c00" name="PC_2D_EVENT_CMD">
<!-- special register (but note first 8 bits can be written/read) -->
@@ -2299,34 +2403,39 @@ by a particular renderpass/blit.
<!-- TODO: 0x9e00-0xa000 range incomplete -->
<reg32 offset="0x9e00" name="PC_DBG_ECO_CNTL"/>
<reg32 offset="0x9e01" name="PC_ADDR_MODE_CNTL" type="a5xx_address_mode"/>
- <reg64 offset="0x9e04" name="PC_DMA_BASE"/>
- <reg32 offset="0x9e06" name="PC_DMA_OFFSET" type="uint"/>
- <reg32 offset="0x9e07" name="PC_DMA_SIZE" type="uint"/>
+ <reg64 offset="0x9e04" name="PC_DMA_BASE" type="address" variants="A6XX-A7XX"/>
+ <reg32 offset="0x9e06" name="PC_DMA_OFFSET" type="uint" variants="A6XX-A7XX"/>
+ <reg32 offset="0x9e07" name="PC_DMA_SIZE" type="uint" variants="A6XX-A7XX"/>
+
<reg64 offset="0x9e08" name="PC_TESS_BASE" variants="A6XX" type="waddress" align="32" usage="cmd"/>
- <reg64 offset="0x9810" name="PC_TESS_BASE" variants="A7XX-" type="waddress" align="32" usage="cmd"/>
+ <reg64 offset="0x9810" name="PC_TESS_BASE" variants="A7XX" type="waddress" align="32" usage="cmd"/>
- <reg32 offset="0x9e0b" name="PC_DRAWCALL_CNTL" type="vgt_draw_initiator_a4xx">
+ <reg32 offset="0x9e0b" name="PC_DRAWCALL_CNTL" type="vgt_draw_initiator_a4xx" variants="A6XX-A7XX">
<doc>
Possibly not really "initiating" the draw but the layout is similar
to VGT_DRAW_INITIATOR on older gens
</doc>
</reg32>
- <reg32 offset="0x9e0c" name="PC_DRAWCALL_INSTANCE_NUM" type="uint"/>
- <reg32 offset="0x9e0d" name="PC_DRAWCALL_SIZE" type="uint"/>
+ <reg32 offset="0x9e0c" name="PC_DRAWCALL_INSTANCE_NUM" type="uint" variants="A6XX-A7XX"/>
+ <reg32 offset="0x9e0d" name="PC_DRAWCALL_SIZE" type="uint" variants="A6XX-A7XX"/>
<!-- These match the contents of CP_SET_BIN_DATA (not written directly) -->
- <reg32 offset="0x9e11" name="PC_VIS_STREAM_CNTL">
+ <bitset name="a6xx_pc_vis_stream_cntl" inline="yes">
<bitfield name="UNK0" low="0" high="15"/>
<bitfield name="VSC_SIZE" low="16" high="21" type="uint"/>
<bitfield name="VSC_N" low="22" high="26" type="uint"/>
- </reg32>
- <reg64 offset="0x9e12" name="PC_PVIS_STREAM_BIN_BASE" type="waddress" align="32"/>
- <reg64 offset="0x9e14" name="PC_DVIS_STREAM_BIN_BASE" type="waddress" align="32"/>
+ </bitset>
+
+ <reg32 offset="0x9e11" name="PC_VIS_STREAM_CNTL" type="a6xx_pc_vis_stream_cntl" variants="A6XX-A7XX"/>
+ <reg64 offset="0x9e12" name="PC_PVIS_STREAM_BIN_BASE" type="waddress" align="32" variants="A6XX-A7XX"/>
+ <reg64 offset="0x9e14" name="PC_DVIS_STREAM_BIN_BASE" type="waddress" align="32" variants="A6XX-A7XX"/>
- <reg32 offset="0x9e1c" name="PC_DRAWCALL_CNTL_OVERRIDE">
+ <bitset name="a6xx_pc_drawcall_cntl_override" inline="yes">
<doc>Written by CP_SET_VISIBILITY_OVERRIDE handler</doc>
<bitfield name="OVERRIDE" pos="0" type="boolean"/>
- </reg32>
+ </bitset>
+
+ <reg32 offset="0x9e1c" name="PC_DRAWCALL_CNTL_OVERRIDE" type="a6xx_pc_drawcall_cntl_override" variants="A6XX-A7XX"/>
<reg32 offset="0x9e24" name="PC_UNKNOWN_9E24" variants="A7XX-" usage="cmd"/>
@@ -2932,7 +3041,7 @@ by a particular renderpass/blit.
<reg32 offset="0xa9b3" name="SP_CS_PROGRAM_COUNTER_OFFSET" type="uint" usage="cmd"/>
<reg64 offset="0xa9b4" name="SP_CS_BASE" type="address" align="32" usage="cmd"/>
<reg32 offset="0xa9b6" name="SP_CS_PVT_MEM_PARAM" type="a6xx_sp_xs_pvt_mem_param" usage="cmd"/>
- <reg64 offset="0xa9b7" name="SP_CS_PVT_MEM_BASE" align="32" usage="cmd"/>
+ <reg64 offset="0xa9b7" name="SP_CS_PVT_MEM_BASE" type="waddress" align="32" usage="cmd"/>
<reg32 offset="0xa9b9" name="SP_CS_PVT_MEM_SIZE" type="a6xx_sp_xs_pvt_mem_size" usage="cmd"/>
<reg32 offset="0xa9ba" name="SP_CS_TSIZE" low="0" high="7" type="uint" usage="cmd"/>
<reg32 offset="0xa9bb" name="SP_CS_CONFIG" type="a6xx_sp_xs_config" usage="cmd"/>
@@ -3017,7 +3126,7 @@ by a particular renderpass/blit.
UAV state for compute shader:
-->
<reg64 offset="0xa9f2" name="SP_CS_UAV_BASE" type="address" align="16" variants="A6XX"/>
- <reg64 offset="0xa9f8" name="SP_CS_UAV_BASE" type="address" align="16" variants="A7XX"/>
+ <reg64 offset="0xa9f8" name="SP_CS_UAV_BASE" type="address" align="16" variants="A7XX-"/>
<reg32 offset="0xaa00" name="SP_CS_USIZE" low="0" high="6" type="uint"/>
<!-- Correlated with avgs/uvgs usage in FS -->
@@ -3100,14 +3209,19 @@ by a particular renderpass/blit.
instructions VS/HS/DS/GS/FS. See SP_CS_UAV_BASE_* for compute shaders.
-->
<reg64 offset="0xab1a" name="SP_GFX_UAV_BASE" type="address" align="16" usage="cmd"/>
- <reg32 offset="0xab20" name="SP_GFX_USIZE" low="0" high="6" type="uint" usage="cmd"/>
+ <reg32 offset="0xab20" name="SP_GFX_USIZE" low="0" high="6" type="uint" variants="A6XX-A7XX" usage="cmd"/>
- <reg32 offset="0xab22" name="SP_UNKNOWN_AB22" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0xab22" name="SP_UNKNOWN_AB22" variants="A7XX" usage="cmd"/>
+
+ <enum name="a6xx_sp_a2d_output_ifmt_type">
+ <value name="OUTPUT_IFMT_2D_FLOAT" value="0"/>
+ <value name="OUTPUT_IFMT_2D_SINT" value="1"/>
+ <value name="OUTPUT_IFMT_2D_UINT" value="2"/>
+ </enum>
<bitset name="a6xx_sp_a2d_output_info" inline="yes">
- <bitfield name="NORM" pos="0" type="boolean"/>
- <bitfield name="SINT" pos="1" type="boolean"/>
- <bitfield name="UINT" pos="2" type="boolean"/>
+ <bitfield name="HALF_PRECISION" pos="0" type="boolean"/>
+ <bitfield name="IFMT_TYPE" low="1" high="2" type="a6xx_sp_a2d_output_ifmt_type"/>
<!-- looks like HW only cares about the base type of this format,
which matches the ifmt? -->
<bitfield name="COLOR_FORMAT" low="3" high="10" type="a6xx_format"/>
@@ -3152,7 +3266,7 @@ by a particular renderpass/blit.
<reg32 offset="0xae6b" name="SP_UNKNOWN_AE6B" variants="A7XX-" usage="cmd"/>
<reg32 offset="0xae6c" name="SP_HLSQ_DBG_ECO_CNTL" variants="A7XX-" usage="cmd"/>
<reg32 offset="0xae6d" name="SP_READ_SEL" variants="A7XX-">
- <bitfield name="LOCATION" low="18" high="19" type="a7xx_state_location"/>
+ <bitfield name="LOCATION" low="18" high="20" type="a7xx_state_location"/>
<bitfield name="PIPE" low="16" high="17" type="a7xx_pipe"/>
<bitfield name="STATETYPE" low="8" high="15" type="a7xx_statetype_id"/>
<bitfield name="USPTP" low="4" high="7"/>
@@ -3188,7 +3302,7 @@ by a particular renderpass/blit.
<!-- looks to work in the same way as a5xx: -->
<reg64 offset="0xb302" name="TPL1_GFX_BORDER_COLOR_BASE" type="address" align="128" usage="cmd"/>
- <reg32 offset="0xb304" name="TPL1_MSAA_SAMPLE_POS_CNTL" type="a6xx_msaa_sample_pos_cntl" usage="rp_blit"/>
+ <reg32 offset="0xb304" name="TPL1_MSAA_SAMPLE_POS_CNTL" type="a6xx_msaa_sample_pos_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
<reg32 offset="0xb305" name="TPL1_PROGRAMMABLE_MSAA_POS_0" type="a6xx_programmable_msaa_pos" usage="rp_blit"/>
<reg32 offset="0xb306" name="TPL1_PROGRAMMABLE_MSAA_POS_1" type="a6xx_programmable_msaa_pos" usage="rp_blit"/>
<reg32 offset="0xb307" name="TPL1_WINDOW_OFFSET" type="a6xx_reg_xy" usage="rp_blit"/>
@@ -3228,12 +3342,12 @@ by a particular renderpass/blit.
</reg32>
<reg32 offset="0xb2c0" name="TPL1_A2D_SRC_TEXTURE_INFO" type="a6xx_a2d_src_texture_info" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0xb2c1" name="TPL1_A2D_SRC_TEXTURE_SIZE" variants="A7XX">
+ <reg32 offset="0xb2c1" name="TPL1_A2D_SRC_TEXTURE_SIZE" variants="A7XX-">
<bitfield name="WIDTH" low="0" high="14" type="uint"/>
<bitfield name="HEIGHT" low="15" high="29" type="uint"/>
</reg32>
<reg64 offset="0xb2c2" name="TPL1_A2D_SRC_TEXTURE_BASE" type="address" align="16" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0xb2c4" name="TPL1_A2D_SRC_TEXTURE_PITCH" variants="A7XX">
+ <reg32 offset="0xb2c4" name="TPL1_A2D_SRC_TEXTURE_PITCH" variants="A7XX-">
<!--
Bits from 3..9 must be zero unless 'TPL1_A2D_BLT_CNTL::TYPE'
is A6XX_TEX_IMG_BUFFER, which allows for lower alignment.
@@ -3266,13 +3380,13 @@ by a particular renderpass/blit.
<reg32 offset="0xb2ce" name="SP_PS_UNKNOWN_B4CE" low="0" high="31" variants="A7XX"/>
<reg32 offset="0xb2cf" name="SP_PS_UNKNOWN_B4CF" low="0" high="30" variants="A7XX"/>
<reg32 offset="0xb2d0" name="SP_PS_UNKNOWN_B4D0" low="0" high="29" variants="A7XX"/>
- <reg32 offset="0xb2d1" name="TPL1_A2D_WINDOW_OFFSET" type="a6xx_reg_xy" variants="A7XX"/>
+ <reg32 offset="0xb2d1" name="TPL1_A2D_WINDOW_OFFSET" type="a6xx_reg_xy" variants="A7XX-"/>
<reg32 offset="0xb2d2" name="TPL1_A2D_BLT_CNTL" variants="A7XX-" usage="rp_blit">
<bitfield name="RAW_COPY" pos="0" type="boolean"/>
<bitfield name="START_OFFSET_TEXELS" low="16" high="21"/>
<bitfield name="TYPE" low="29" high="31" type="a6xx_tex_type"/>
</reg32>
- <reg32 offset="0xab21" name="SP_WINDOW_OFFSET" type="a6xx_reg_xy" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xab21" name="SP_WINDOW_OFFSET" type="a6xx_reg_xy" variants="A7XX" usage="rp_blit"/>
<!-- always 0x100000 or 0x1000000? -->
<reg32 offset="0xb600" name="TPL1_DBG_ECO_CNTL" low="0" high="25" usage="cmd"/>
@@ -3292,17 +3406,13 @@ by a particular renderpass/blit.
</reg32>
<reg32 offset="0xb605" name="TPL1_UNKNOWN_B605" low="0" high="7" type="uint" variants="A6XX" usage="cmd"/> <!-- always 0x0 or 0x44 ? -->
- <reg32 offset="0xb608" name="TPL1_BICUBIC_WEIGHTS_TABLE_0" low="0" high="29" variants="A6XX"/>
- <reg32 offset="0xb609" name="TPL1_BICUBIC_WEIGHTS_TABLE_1" low="0" high="29" variants="A6XX"/>
- <reg32 offset="0xb60a" name="TPL1_BICUBIC_WEIGHTS_TABLE_2" low="0" high="29" variants="A6XX"/>
- <reg32 offset="0xb60b" name="TPL1_BICUBIC_WEIGHTS_TABLE_3" low="0" high="29" variants="A6XX"/>
- <reg32 offset="0xb60c" name="TPL1_BICUBIC_WEIGHTS_TABLE_4" low="0" high="29" variants="A6XX"/>
+ <array offset="0xb608" name="TPL1_BICUBIC_WEIGHTS_TABLE" stride="1" length="5" variants="A6XX">
+ <reg32 offset="0" name="REG" low="0" high="29"/>
+ </array>
- <reg32 offset="0xb608" name="TPL1_BICUBIC_WEIGHTS_TABLE_0" low="0" high="29" variants="A7XX" usage="cmd"/>
- <reg32 offset="0xb609" name="TPL1_BICUBIC_WEIGHTS_TABLE_1" low="0" high="29" variants="A7XX" usage="cmd"/>
- <reg32 offset="0xb60a" name="TPL1_BICUBIC_WEIGHTS_TABLE_2" low="0" high="29" variants="A7XX" usage="cmd"/>
- <reg32 offset="0xb60b" name="TPL1_BICUBIC_WEIGHTS_TABLE_3" low="0" high="29" variants="A7XX" usage="cmd"/>
- <reg32 offset="0xb60c" name="TPL1_BICUBIC_WEIGHTS_TABLE_4" low="0" high="29" variants="A7XX" usage="cmd"/>
+ <array offset="0xb608" name="TPL1_BICUBIC_WEIGHTS_TABLE" stride="1" length="5" variants="A7XX">
+ <reg32 offset="0" name="REG" low="0" high="29" usage="cmd"/>
+ </array>
<array offset="0xb610" name="TPL1_PERFCTR_TP_SEL" stride="1" length="12" variants="A6XX"/>
<array offset="0xb610" name="TPL1_PERFCTR_TP_SEL" stride="1" length="18" variants="A7XX"/>
@@ -3634,7 +3744,7 @@ by a particular renderpass/blit.
<reg32 offset="0xbb10" name="SP_PS_CONST_CONFIG" type="a6xx_xs_const_config" variants="A6XX" usage="rp_blit"/>
<reg32 offset="0xab03" name="SP_PS_CONST_CONFIG" type="a6xx_xs_const_config" variants="A7XX-" usage="rp_blit"/>
- <array offset="0xab40" name="SP_SHARED_CONSTANT_GFX_0" stride="1" length="64" variants="A7XX-"/>
+ <array offset="0xab40" name="SP_SHARED_CONSTANT_GFX" stride="1" length="64" variants="A7XX"/>
<reg32 offset="0xbb11" name="HLSQ_SHARED_CONSTS" variants="A6XX" usage="cmd">
<doc>
@@ -3796,6 +3906,14 @@ by a particular renderpass/blit.
<reg32 offset="0x0030" name="CFG_DBGBUS_TRACE_BUF2"/>
</domain>
+<domain name="A7XX_CX_DBGC" width="32" varset="chip">
+ <!-- Bitfields shifted, but otherwise the same: -->
+ <reg32 offset="0x0000" name="CFG_DBGBUS_SEL_A" variants="A7XX-">
+ <bitfield high="7" low="0" name="PING_INDEX"/>
+ <bitfield high="24" low="16" name="PING_BLK_SEL"/>
+ </reg32>
+</domain>
+
<domain name="A6XX_CX_MISC" width="32" prefix="variant" varset="chip">
<reg32 offset="0x0001" name="SYSTEM_CACHE_CNTL_0"/>
<reg32 offset="0x0002" name="SYSTEM_CACHE_CNTL_1"/>
diff --git a/drivers/gpu/drm/msm/registers/adreno/a6xx_descriptors.xml b/drivers/gpu/drm/msm/registers/adreno/a6xx_descriptors.xml
index 307d43dda8a2..56cfaff614a4 100644
--- a/drivers/gpu/drm/msm/registers/adreno/a6xx_descriptors.xml
+++ b/drivers/gpu/drm/msm/registers/adreno/a6xx_descriptors.xml
@@ -9,38 +9,6 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<domain name="A6XX_TEX_SAMP" width="32">
<doc>Texture sampler dwords</doc>
- <enum name="a6xx_tex_filter"> <!-- same as a4xx? -->
- <value name="A6XX_TEX_NEAREST" value="0"/>
- <value name="A6XX_TEX_LINEAR" value="1"/>
- <value name="A6XX_TEX_ANISO" value="2"/>
- <value name="A6XX_TEX_CUBIC" value="3"/> <!-- a650 only -->
- </enum>
- <enum name="a6xx_tex_clamp"> <!-- same as a4xx? -->
- <value name="A6XX_TEX_REPEAT" value="0"/>
- <value name="A6XX_TEX_CLAMP_TO_EDGE" value="1"/>
- <value name="A6XX_TEX_MIRROR_REPEAT" value="2"/>
- <value name="A6XX_TEX_CLAMP_TO_BORDER" value="3"/>
- <value name="A6XX_TEX_MIRROR_CLAMP" value="4"/>
- </enum>
- <enum name="a6xx_tex_aniso"> <!-- same as a4xx? -->
- <value name="A6XX_TEX_ANISO_1" value="0"/>
- <value name="A6XX_TEX_ANISO_2" value="1"/>
- <value name="A6XX_TEX_ANISO_4" value="2"/>
- <value name="A6XX_TEX_ANISO_8" value="3"/>
- <value name="A6XX_TEX_ANISO_16" value="4"/>
- </enum>
- <enum name="a6xx_reduction_mode">
- <value name="A6XX_REDUCTION_MODE_AVERAGE" value="0"/>
- <value name="A6XX_REDUCTION_MODE_MIN" value="1"/>
- <value name="A6XX_REDUCTION_MODE_MAX" value="2"/>
- </enum>
- <enum name="a6xx_fast_border_color">
- <!-- R B G A -->
- <value name="A6XX_BORDER_COLOR_0_0_0_0" value="0"/>
- <value name="A6XX_BORDER_COLOR_0_0_0_1" value="1"/>
- <value name="A6XX_BORDER_COLOR_1_1_1_0" value="2"/>
- <value name="A6XX_BORDER_COLOR_1_1_1_1" value="3"/>
- </enum>
<reg32 offset="0" name="0">
<bitfield name="MIPFILTER_LINEAR_NEAR" pos="0" type="boolean"/>
@@ -79,14 +47,6 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<domain name="A6XX_TEX_CONST" width="32" varset="chip">
<doc>Texture constant dwords</doc>
- <enum name="a6xx_tex_swiz"> <!-- same as a4xx? -->
- <value name="A6XX_TEX_X" value="0"/>
- <value name="A6XX_TEX_Y" value="1"/>
- <value name="A6XX_TEX_Z" value="2"/>
- <value name="A6XX_TEX_W" value="3"/>
- <value name="A6XX_TEX_ZERO" value="4"/>
- <value name="A6XX_TEX_ONE" value="5"/>
- </enum>
<reg32 offset="0" name="0">
<bitfield name="TILE_MODE" low="0" high="1" type="a6xx_tile_mode"/>
<bitfield name="SRGB" pos="2" type="boolean"/>
diff --git a/drivers/gpu/drm/msm/registers/adreno/a6xx_enums.xml b/drivers/gpu/drm/msm/registers/adreno/a6xx_enums.xml
index 665539b098c6..4e42f055b85f 100644
--- a/drivers/gpu/drm/msm/registers/adreno/a6xx_enums.xml
+++ b/drivers/gpu/drm/msm/registers/adreno/a6xx_enums.xml
@@ -320,14 +320,14 @@ to upconvert to 32b float internally?
16b float: 3
-->
<enum name="a6xx_2d_ifmt">
- <value value="0x10" name="R2D_UNORM8"/>
<value value="0x7" name="R2D_INT32"/>
<value value="0x6" name="R2D_INT16"/>
<value value="0x5" name="R2D_INT8"/>
<value value="0x4" name="R2D_FLOAT32"/>
<value value="0x3" name="R2D_FLOAT16"/>
+ <value value="0x2" name="R2D_SNORM8"/>
<value value="0x1" name="R2D_UNORM8_SRGB"/>
- <value value="0x0" name="R2D_RAW"/>
+ <value value="0x0" name="R2D_UNORM8"/>
</enum>
<enum name="a6xx_tex_type">
@@ -380,4 +380,50 @@ to upconvert to 32b float internally?
<value value="0x3" name="TESS_CCW_TRIS"/>
</enum>
+<enum name="a6xx_tex_filter"> <!-- same as a4xx? -->
+ <value name="A6XX_TEX_NEAREST" value="0"/>
+ <value name="A6XX_TEX_LINEAR" value="1"/>
+ <value name="A6XX_TEX_ANISO" value="2"/>
+ <value name="A6XX_TEX_CUBIC" value="3"/> <!-- a650 only -->
+</enum>
+
+<enum name="a6xx_tex_clamp"> <!-- same as a4xx? -->
+ <value name="A6XX_TEX_REPEAT" value="0"/>
+ <value name="A6XX_TEX_CLAMP_TO_EDGE" value="1"/>
+ <value name="A6XX_TEX_MIRROR_REPEAT" value="2"/>
+ <value name="A6XX_TEX_CLAMP_TO_BORDER" value="3"/>
+ <value name="A6XX_TEX_MIRROR_CLAMP" value="4"/>
+</enum>
+
+<enum name="a6xx_tex_aniso"> <!-- same as a4xx? -->
+ <value name="A6XX_TEX_ANISO_1" value="0"/>
+ <value name="A6XX_TEX_ANISO_2" value="1"/>
+ <value name="A6XX_TEX_ANISO_4" value="2"/>
+ <value name="A6XX_TEX_ANISO_8" value="3"/>
+ <value name="A6XX_TEX_ANISO_16" value="4"/>
+</enum>
+
+<enum name="a6xx_reduction_mode">
+ <value name="A6XX_REDUCTION_MODE_AVERAGE" value="0"/>
+ <value name="A6XX_REDUCTION_MODE_MIN" value="1"/>
+ <value name="A6XX_REDUCTION_MODE_MAX" value="2"/>
+</enum>
+
+<enum name="a6xx_fast_border_color">
+ <!-- R B G A -->
+ <value name="A6XX_BORDER_COLOR_0_0_0_0" value="0"/>
+ <value name="A6XX_BORDER_COLOR_0_0_0_1" value="1"/>
+ <value name="A6XX_BORDER_COLOR_1_1_1_0" value="2"/>
+ <value name="A6XX_BORDER_COLOR_1_1_1_1" value="3"/>
+</enum>
+
+<enum name="a6xx_tex_swiz"> <!-- same as a4xx? -->
+ <value name="A6XX_TEX_X" value="0"/>
+ <value name="A6XX_TEX_Y" value="1"/>
+ <value name="A6XX_TEX_Z" value="2"/>
+ <value name="A6XX_TEX_W" value="3"/>
+ <value name="A6XX_TEX_ZERO" value="4"/>
+ <value name="A6XX_TEX_ONE" value="5"/>
+</enum>
+
</database>
diff --git a/drivers/gpu/drm/msm/registers/adreno/a6xx_gmu.xml b/drivers/gpu/drm/msm/registers/adreno/a6xx_gmu.xml
index 3d2cc339b8f1..b15a242d974d 100644
--- a/drivers/gpu/drm/msm/registers/adreno/a6xx_gmu.xml
+++ b/drivers/gpu/drm/msm/registers/adreno/a6xx_gmu.xml
@@ -99,6 +99,10 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<bitfield name="GX_HM_GDSC_POWER_OFF" pos="6" type="boolean"/>
<bitfield name="GX_HM_CLK_OFF" pos="7" type="boolean"/>
</reg32>
+ <reg32 offset="0x50d0" name="GMU_SPTPRAC_PWR_CLK_STATUS" variants="A7XX">
+ <bitfield name="GX_HM_GDSC_POWER_OFF" pos="0" type="boolean"/>
+ <bitfield name="GX_HM_CLK_OFF" pos="1" type="boolean"/>
+ </reg32>
<reg32 offset="0x50e4" name="GMU_GPU_NAP_CTRL">
<bitfield name="HW_NAP_ENABLE" pos="0"/>
<bitfield name="SID" low="4" high="8"/>
@@ -127,6 +131,7 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<reg32 offset="0x5088" name="GMU_ALWAYS_ON_COUNTER_L"/>
<reg32 offset="0x5089" name="GMU_ALWAYS_ON_COUNTER_H"/>
<reg32 offset="0x50c3" name="GMU_GMU_PWR_COL_KEEPALIVE"/>
+ <reg32 offset="0x50c4" name="GMU_PWR_COL_PREEMPT_KEEPALIVE"/>
<reg32 offset="0x5180" name="GMU_HFI_CTRL_STATUS"/>
<reg32 offset="0x5181" name="GMU_HFI_VERSION_INFO"/>
<reg32 offset="0x5182" name="GMU_HFI_SFR_ADDR"/>
@@ -228,6 +233,12 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<reg32 offset="0x03ee" name="RSCC_TCS1_DRV0_STATUS"/>
<reg32 offset="0x0496" name="RSCC_TCS2_DRV0_STATUS"/>
<reg32 offset="0x053e" name="RSCC_TCS3_DRV0_STATUS"/>
+ <reg32 offset="0x05e6" name="RSCC_TCS4_DRV0_STATUS" variants="A7XX"/>
+ <reg32 offset="0x068e" name="RSCC_TCS5_DRV0_STATUS" variants="A7XX"/>
+ <reg32 offset="0x0736" name="RSCC_TCS6_DRV0_STATUS" variants="A7XX"/>
+ <reg32 offset="0x07de" name="RSCC_TCS7_DRV0_STATUS" variants="A7XX"/>
+ <reg32 offset="0x0886" name="RSCC_TCS8_DRV0_STATUS" variants="A7XX"/>
+ <reg32 offset="0x092e" name="RSCC_TCS9_DRV0_STATUS" variants="A7XX"/>
</domain>
</database>
diff --git a/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml b/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml
index 7abc08635495..0e10e1c6d263 100644
--- a/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml
+++ b/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml
@@ -120,12 +120,12 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<value name="LRZ_FLUSH" value="38" variants="A5XX-"/>
<value name="BLIT_OP_FILL_2D" value="39" variants="A5XX-"/>
<value name="BLIT_OP_COPY_2D" value="40" variants="A5XX-A6XX"/>
- <value name="UNK_40" value="40" variants="A7XX"/>
+ <value name="LRZ_CACHE_INVALIDATE" value="40" variants="A7XX"/>
<value name="LRZ_Q_CACHE_INVALIDATE" value="41" variants="A7XX"/>
<value name="BLIT_OP_SCALE_2D" value="42" variants="A5XX-"/>
<value name="CONTEXT_DONE_2D" value="43" variants="A5XX-"/>
- <value name="UNK_2C" value="44" variants="A5XX-"/>
- <value name="UNK_2D" value="45" variants="A5XX-"/>
+ <value name="VSC_BINNING_START" value="44" variants="A5XX-"/>
+ <value name="VSC_BINNING_END" value="45" variants="A5XX-"/>
<!-- a6xx events -->
<doc>
@@ -523,7 +523,7 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<!--
Seems to set the mode flags which control which CP_SET_DRAW_STATE
packets are executed, based on their ENABLE_MASK values
-
+
CP_SET_MODE w/ payload of 0x1 seems to cause CP_SET_DRAW_STATE
packets w/ ENABLE_MASK & 0x6 to execute immediately
-->
@@ -640,8 +640,7 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<value name="CP_BV_BR_COUNT_OPS" value="0x1b" variants="A7XX-"/>
<doc> Clears, adds to local, or adds to global timestamp </doc>
<value name="CP_MODIFY_TIMESTAMP" value="0x1c" variants="A7XX-"/>
- <!-- similar to CP_CONTEXT_REG_BUNCH, but discards first two dwords?? -->
- <value name="CP_CONTEXT_REG_BUNCH2" value="0x5d" variants="A7XX-"/>
+ <value name="CP_NON_CONTEXT_REG_BUNCH" value="0x5d" variants="A7XX-"/>
<doc>
Write to a scratch memory that is read by CP_REG_TEST with
SOURCE_SCRATCH_MEM set. It's not the same scratch as scratch registers.
@@ -918,12 +917,6 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
</reg32>
<stripe varset="chip" variants="A5XX-">
- <reg32 offset="4" name="4">
- <bitfield name="INDX_BASE_LO" low="0" high="31"/>
- </reg32>
- <reg32 offset="5" name="5">
- <bitfield name="INDX_BASE_HI" low="0" high="31"/>
- </reg32>
<reg64 offset="4" name="INDX_BASE" type="address"/>
<reg32 offset="6" name="6">
<!-- max # of elements in index buffer -->
@@ -1099,8 +1092,10 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
<bitfield name="BINNING" pos="20" varset="chip" variants="A6XX-" type="boolean"/>
<bitfield name="GMEM" pos="21" varset="chip" variants="A6XX-" type="boolean"/>
<bitfield name="SYSMEM" pos="22" varset="chip" variants="A6XX-" type="boolean"/>
- <bitfield name="GROUP_ID" low="24" high="28" type="uint"/>
+ <!-- high bit is 28 until a750: -->
+ <bitfield name="GROUP_ID" low="24" high="29" type="uint"/>
</reg32>
+ <reg64 offset="1" name="ADDR" type="address"/>
<reg32 offset="1" name="1">
<bitfield name="ADDR_LO" low="0" high="31" type="hex"/>
</reg32>
@@ -1166,26 +1161,11 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
</reg32>
<stripe varset="a7xx_abs_mask_mode" variants="NO_ABS_MASK">
<!-- BIN_DATA_ADDR -> VSC_PIPE[p].DATA_ADDRESS -->
- <reg32 offset="1" name="1">
- <bitfield name="BIN_DATA_ADDR_LO" low="0" high="31" type="hex"/>
- </reg32>
- <reg32 offset="2" name="2">
- <bitfield name="BIN_DATA_ADDR_HI" low="0" high="31" type="hex"/>
- </reg32>
+ <reg64 offset="1" name="BIN_DATA_ADDR" type="address"/>
<!-- BIN_SIZE_ADDRESS -> VSC_SIZE_ADDRESS + (p * 4)-->
- <reg32 offset="3" name="3">
- <bitfield name="BIN_SIZE_ADDRESS_LO" low="0" high="31"/>
- </reg32>
- <reg32 offset="4" name="4">
- <bitfield name="BIN_SIZE_ADDRESS_HI" low="0" high="31"/>
- </reg32>
+ <reg64 offset="3" name="BIN_SIZE_ADDR" type="address"/>
<!-- new on a6xx, where BIN_DATA_ADDR is the DRAW_STRM: -->
- <reg32 offset="5" name="5">
- <bitfield name="BIN_PRIM_STRM_LO" low="0" high="31"/>
- </reg32>
- <reg32 offset="6" name="6">
- <bitfield name="BIN_PRIM_STRM_HI" low="0" high="31"/>
- </reg32>
+ <reg64 offset="5" name="BIN_PRIM_STRM" type="address"/>
<!--
a7xx adds a few more addresses to the end of the pkt
-->
@@ -1195,26 +1175,11 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
<stripe varset="a7xx_abs_mask_mode" variants="ABS_MASK">
<reg32 offset="1" name="ABS_MASK"/>
<!-- BIN_DATA_ADDR -> VSC_PIPE[p].DATA_ADDRESS -->
- <reg32 offset="2" name="2">
- <bitfield name="BIN_DATA_ADDR_LO" low="0" high="31" type="hex"/>
- </reg32>
- <reg32 offset="3" name="3">
- <bitfield name="BIN_DATA_ADDR_HI" low="0" high="31" type="hex"/>
- </reg32>
+ <reg64 offset="2" name="BIN_DATA_ADDR" type="address"/>
<!-- BIN_SIZE_ADDRESS -> VSC_SIZE_ADDRESS + (p * 4)-->
- <reg32 offset="4" name="4">
- <bitfield name="BIN_SIZE_ADDRESS_LO" low="0" high="31"/>
- </reg32>
- <reg32 offset="5" name="5">
- <bitfield name="BIN_SIZE_ADDRESS_HI" low="0" high="31"/>
- </reg32>
+ <reg64 offset="4" name="BIN_SIZE_ADDR" type="address"/>
<!-- new on a6xx, where BIN_DATA_ADDR is the DRAW_STRM: -->
- <reg32 offset="6" name="6">
- <bitfield name="BIN_PRIM_STRM_LO" low="0" high="31"/>
- </reg32>
- <reg32 offset="7" name="7">
- <bitfield name="BIN_PRIM_STRM_HI" low="0" high="31"/>
- </reg32>
+ <reg64 offset="6" name="BIN_PRIM_STRM" type="address"/>
<!--
a7xx adds a few more addresses to the end of the pkt
-->
@@ -1300,7 +1265,7 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
</reg32>
</domain>
-<domain name="CP_REG_TO_MEM" width="32">
+<domain name="CP_REG_TO_MEM" width="32" prefix="chip">
<reg32 offset="0" name="0">
<bitfield name="REG" low="0" high="17" type="hex"/>
<!-- number of registers/dwords copied is max(CNT, 1). -->
@@ -1308,12 +1273,12 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
<bitfield name="64B" pos="30" type="boolean"/>
<bitfield name="ACCUMULATE" pos="31" type="boolean"/>
</reg32>
- <reg32 offset="1" name="1">
- <bitfield name="DEST" low="0" high="31"/>
- </reg32>
- <reg32 offset="2" name="2" varset="chip" variants="A5XX-">
- <bitfield name="DEST_HI" low="0" high="31"/>
- </reg32>
+ <stripe varset="chip" variants="A2XX-A4XX">
+ <reg32 offset="1" name="DEST" type="address"/>
+ </stripe>
+ <stripe varset="chip" variants="A5XX-">
+ <reg64 offset="1" name="DEST" type="address"/>
+ </stripe>
</domain>
<domain name="CP_REG_TO_MEM_OFFSET_REG" width="32">
@@ -1329,12 +1294,7 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
<bitfield name="64B" pos="30" type="boolean"/>
<bitfield name="ACCUMULATE" pos="31" type="boolean"/>
</reg32>
- <reg32 offset="1" name="1">
- <bitfield name="DEST" low="0" high="31"/>
- </reg32>
- <reg32 offset="2" name="2" varset="chip" variants="A5XX-">
- <bitfield name="DEST_HI" low="0" high="31"/>
- </reg32>
+ <reg64 offset="1" name="DEST" type="waddress"/>
<reg32 offset="3" name="3">
<bitfield name="OFFSET0" low="0" high="17" type="hex"/>
<bitfield name="OFFSET0_SCRATCH" pos="19" type="boolean"/>
@@ -1354,18 +1314,8 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
<bitfield name="64B" pos="30" type="boolean"/>
<bitfield name="ACCUMULATE" pos="31" type="boolean"/>
</reg32>
- <reg32 offset="1" name="1">
- <bitfield name="DEST" low="0" high="31"/>
- </reg32>
- <reg32 offset="2" name="2" varset="chip" variants="A5XX-">
- <bitfield name="DEST_HI" low="0" high="31"/>
- </reg32>
- <reg32 offset="3" name="3">
- <bitfield name="OFFSET_LO" low="0" high="31" type="hex"/>
- </reg32>
- <reg32 offset="4" name="4">
- <bitfield name="OFFSET_HI" low="0" high="31" type="hex"/>
- </reg32>
+ <reg64 offset="1" name="DEST" type="waddress"/>
+ <reg64 offset="3" name="OFFSET" type="waddress"/>
</domain>
<domain name="CP_MEM_TO_REG" width="32">
@@ -1378,12 +1328,12 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
<!-- does the same thing as CP_MEM_TO_MEM::UNK31 -->
<bitfield name="UNK31" pos="31" type="boolean"/>
</reg32>
- <reg32 offset="1" name="1">
- <bitfield name="SRC" low="0" high="31"/>
- </reg32>
- <reg32 offset="2" name="2" varset="chip" variants="A5XX-">
- <bitfield name="SRC_HI" low="0" high="31"/>
- </reg32>
+ <stripe varset="chip" variants="A2XX-A4XX">
+ <reg32 offset="1" name="SRC" type="address"/>
+ </stripe>
+ <stripe varset="chip" variants="A5XX-">
+ <reg64 offset="1" name="SRC" type="address"/>
+ </stripe>
</domain>
<domain name="CP_MEM_TO_MEM" width="32">
@@ -1403,6 +1353,10 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
<!-- some other kind of wait -->
<bitfield name="UNK31" pos="31" type="boolean"/>
</reg32>
+ <reg64 offset="1" name="DST" type="waddress"/>
+ <reg64 offset="3" name="SRC_A" type="address"/>
+ <reg64 offset="5" name="SRC_B" type="address"/>
+ <reg64 offset="7" name="SRC_C" type="address"/>
<!--
followed by sequence of addresses.. the first is the
destination and the rest are N src addresses which are
@@ -1461,12 +1415,12 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
</domain>
<domain name="CP_MEM_WRITE" width="32">
- <reg32 offset="0" name="0">
- <bitfield name="ADDR_LO" low="0" high="31"/>
- </reg32>
- <reg32 offset="1" name="1">
- <bitfield name="ADDR_HI" low="0" high="31"/>
- </reg32>
+ <stripe varset="chip" variants="A2XX-A4XX">
+ <reg32 offset="0" name="ADDR" type="address"/>
+ </stripe>
+ <stripe varset="chip" variants="A5XX-">
+ <reg64 offset="0" name="ADDR" type="address"/>
+ </stripe>
<!-- followed by the DWORDs to write -->
</domain>
@@ -1518,24 +1472,14 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
<bitfield name="POLL" low="4" high="5" type="poll_memory_type"/>
<bitfield name="WRITE_MEMORY" pos="8" type="boolean"/>
</reg32>
- <reg32 offset="1" name="1">
- <bitfield name="POLL_ADDR_LO" low="0" high="31" type="hex"/>
- </reg32>
- <reg32 offset="2" name="2">
- <bitfield name="POLL_ADDR_HI" low="0" high="31" type="hex"/>
- </reg32>
+ <reg64 offset="1" name="POLL_ADDR" type="address"/>
<reg32 offset="3" name="3">
<bitfield name="REF" low="0" high="31"/>
</reg32>
<reg32 offset="4" name="4">
<bitfield name="MASK" low="0" high="31"/>
</reg32>
- <reg32 offset="5" name="5">
- <bitfield name="WRITE_ADDR_LO" low="0" high="31" type="hex"/>
- </reg32>
- <reg32 offset="6" name="6">
- <bitfield name="WRITE_ADDR_HI" low="0" high="31" type="hex"/>
- </reg32>
+ <reg64 offset="5" name="WRITE_ADDR" type="waddress"/>
<reg32 offset="7" name="7">
<bitfield name="WRITE_DATA" low="0" high="31"/>
</reg32>
@@ -1550,12 +1494,7 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
<!-- Reserved for flags, presumably? Unused in FW -->
<bitfield name="RESERVED" low="0" high="31" type="hex"/>
</reg32>
- <reg32 offset="1" name="1">
- <bitfield name="POLL_ADDR_LO" low="0" high="31" type="hex"/>
- </reg32>
- <reg32 offset="2" name="2">
- <bitfield name="POLL_ADDR_HI" low="0" high="31" type="hex"/>
- </reg32>
+ <reg64 offset="1" name="POLL_ADDR" type="address"/>
<reg32 offset="3" name="3">
<bitfield name="REF" low="0" high="31"/>
</reg32>
@@ -1573,12 +1512,7 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
<bitfield name="POLL" low="4" high="5" type="poll_memory_type"/>
<bitfield name="WRITE_MEMORY" pos="8" type="boolean"/>
</reg32>
- <reg32 offset="1" name="1">
- <bitfield name="POLL_ADDR_LO" low="0" high="31" type="hex"/>
- </reg32>
- <reg32 offset="2" name="2">
- <bitfield name="POLL_ADDR_HI" low="0" high="31" type="hex"/>
- </reg32>
+ <reg64 offset="1" name="POLL_ADDR" type="address"/>
<reg32 offset="3" name="3">
<bitfield name="REF" low="0" high="31"/>
</reg32>
@@ -1712,12 +1646,7 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
TODO what is gpuaddr for, seems to be all 0's.. maybe needed for
context switch?
-->
- <reg32 offset="1" name="1">
- <bitfield name="ADDR_0_LO" low="0" high="31"/>
- </reg32>
- <reg32 offset="2" name="2">
- <bitfield name="ADDR_0_HI" low="0" high="31"/>
- </reg32>
+ <reg64 offset="1" name="ADDR" type="waddress"/>
<reg32 offset="3" name="3">
<!-- ??? -->
</reg32>
@@ -1832,9 +1761,7 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
<reg32 offset="0" name="0">
</reg32>
<stripe varset="chip" variants="A4XX">
- <reg32 offset="1" name="1">
- <bitfield name="ADDR" low="0" high="31"/>
- </reg32>
+ <reg32 offset="1" name="ADDR" type="address"/>
<reg32 offset="2" name="2">
<!-- localsize is value minus one: -->
<bitfield name="LOCALSIZEX" low="2" high="11" type="uint"/>
@@ -1843,12 +1770,7 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
</reg32>
</stripe>
<stripe varset="chip" variants="A5XX-">
- <reg32 offset="1" name="1">
- <bitfield name="ADDR_LO" low="0" high="31"/>
- </reg32>
- <reg32 offset="2" name="2">
- <bitfield name="ADDR_HI" low="0" high="31"/>
- </reg32>
+ <reg64 offset="1" name="ADDR" type="address"/>
<reg32 offset="3" name="3">
<!-- localsize is value minus one: -->
<bitfield name="LOCALSIZEX" low="2" high="11" type="uint"/>
@@ -2161,12 +2083,7 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
</doc>
</value>
</enum>
- <reg32 offset="0" name="0">
- <bitfield name="ADDR_LO" low="0" high="31"/>
- </reg32>
- <reg32 offset="1" name="1">
- <bitfield name="ADDR_HI" low="0" high="31"/>
- </reg32>
+ <reg64 offset="0" name="ADDR" type="address"/>
<reg32 offset="2" name="2">
<bitfield name="DWORDS" low="0" high="19" type="uint"/>
<bitfield name="TYPE" low="20" high="21" type="amble_type"/>
diff --git a/drivers/gpu/drm/msm/registers/display/dsi.xml b/drivers/gpu/drm/msm/registers/display/dsi.xml
index 501ffc585a9f..c7a7b633d747 100644
--- a/drivers/gpu/drm/msm/registers/display/dsi.xml
+++ b/drivers/gpu/drm/msm/registers/display/dsi.xml
@@ -159,28 +159,28 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<bitfield name="RGB_SWAP" low="12" high="14" type="dsi_rgb_swap"/>
</reg32>
<reg32 offset="0x00020" name="ACTIVE_H">
- <bitfield name="START" low="0" high="11" type="uint"/>
- <bitfield name="END" low="16" high="27" type="uint"/>
+ <bitfield name="START" low="0" high="15" type="uint"/>
+ <bitfield name="END" low="16" high="31" type="uint"/>
</reg32>
<reg32 offset="0x00024" name="ACTIVE_V">
- <bitfield name="START" low="0" high="11" type="uint"/>
- <bitfield name="END" low="16" high="27" type="uint"/>
+ <bitfield name="START" low="0" high="15" type="uint"/>
+ <bitfield name="END" low="16" high="31" type="uint"/>
</reg32>
<reg32 offset="0x00028" name="TOTAL">
- <bitfield name="H_TOTAL" low="0" high="11" type="uint"/>
- <bitfield name="V_TOTAL" low="16" high="27" type="uint"/>
+ <bitfield name="H_TOTAL" low="0" high="15" type="uint"/>
+ <bitfield name="V_TOTAL" low="16" high="31" type="uint"/>
</reg32>
<reg32 offset="0x0002c" name="ACTIVE_HSYNC">
- <bitfield name="START" low="0" high="11" type="uint"/>
- <bitfield name="END" low="16" high="27" type="uint"/>
+ <bitfield name="START" low="0" high="15" type="uint"/>
+ <bitfield name="END" low="16" high="31" type="uint"/>
</reg32>
<reg32 offset="0x00030" name="ACTIVE_VSYNC_HPOS">
- <bitfield name="START" low="0" high="11" type="uint"/>
- <bitfield name="END" low="16" high="27" type="uint"/>
+ <bitfield name="START" low="0" high="15" type="uint"/>
+ <bitfield name="END" low="16" high="31" type="uint"/>
</reg32>
<reg32 offset="0x00034" name="ACTIVE_VSYNC_VPOS">
- <bitfield name="START" low="0" high="11" type="uint"/>
- <bitfield name="END" low="16" high="27" type="uint"/>
+ <bitfield name="START" low="0" high="15" type="uint"/>
+ <bitfield name="END" low="16" high="31" type="uint"/>
</reg32>
<reg32 offset="0x00038" name="CMD_DMA_CTRL">
@@ -209,8 +209,8 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<bitfield name="WORD_COUNT" low="16" high="31" type="uint"/>
</reg32>
<reg32 offset="0x00058" name="CMD_MDP_STREAM0_TOTAL">
- <bitfield name="H_TOTAL" low="0" high="11" type="uint"/>
- <bitfield name="V_TOTAL" low="16" high="27" type="uint"/>
+ <bitfield name="H_TOTAL" low="0" high="15" type="uint"/>
+ <bitfield name="V_TOTAL" low="16" high="31" type="uint"/>
</reg32>
<reg32 offset="0x0005c" name="CMD_MDP_STREAM1_CTRL">
<bitfield name="DATA_TYPE" low="0" high="5" type="uint"/>
diff --git a/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml b/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml
index 4e5ac0f25dea..f41516dd0567 100644
--- a/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml
+++ b/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml
@@ -22,7 +22,16 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<reg32 offset="0x00018" name="GLBL_CTRL"/>
<reg32 offset="0x0001c" name="RBUF_CTRL"/>
<reg32 offset="0x00020" name="VREG_CTRL_0"/>
- <reg32 offset="0x00024" name="CTRL_0"/>
+ <reg32 offset="0x00024" name="CTRL_0">
+ <bitfield name="CLKSL_SHUTDOWNB" pos="7" type="boolean"/>
+ <bitfield name="DIGTOP_PWRDN_B" pos="6" type="boolean"/>
+ <bitfield name="PLL_SHUTDOWNB" pos="5" type="boolean"/>
+ <bitfield name="DLN3_SHUTDOWNB" pos="4" type="boolean"/>
+ <bitfield name="DLN2_SHUTDOWNB" pos="3" type="boolean"/>
+ <bitfield name="CLK_SHUTDOWNB" pos="2" type="boolean"/>
+ <bitfield name="DLN1_SHUTDOWNB" pos="1" type="boolean"/>
+ <bitfield name="DLN0_SHUTDOWNB" pos="0" type="boolean"/>
+ </reg32>
<reg32 offset="0x00028" name="CTRL_1"/>
<reg32 offset="0x0002c" name="CTRL_2"/>
<reg32 offset="0x00030" name="CTRL_3"/>
diff --git a/drivers/gpu/drm/msm/registers/gen_header.py b/drivers/gpu/drm/msm/registers/gen_header.py
index a409404627c7..1d603dadfabd 100644
--- a/drivers/gpu/drm/msm/registers/gen_header.py
+++ b/drivers/gpu/drm/msm/registers/gen_header.py
@@ -11,7 +11,6 @@ import collections
import argparse
import time
import datetime
-import re
class Error(Exception):
def __init__(self, message):
@@ -31,7 +30,7 @@ class Enum(object):
def names(self):
return [n for (n, value) in self.values]
- def dump(self):
+ def dump(self, is_deprecated):
use_hex = False
for (name, value) in self.values:
if value > 0x1000:
@@ -45,7 +44,7 @@ class Enum(object):
print("\t%s = %d," % (name, value))
print("};\n")
- def dump_pack_struct(self):
+ def dump_pack_struct(self, is_deprecated):
pass
class Field(object):
@@ -70,11 +69,11 @@ class Field(object):
raise parser.error("booleans should be 1 bit fields")
elif self.type == "float" and not (high - low == 31 or high - low == 15):
raise parser.error("floats should be 16 or 32 bit fields")
- elif not self.type in builtin_types and not self.type in parser.enums:
+ elif self.type not in builtin_types and self.type not in parser.enums:
raise parser.error("unknown type '%s'" % self.type)
def ctype(self, var_name):
- if self.type == None:
+ if self.type is None:
type = "uint32_t"
val = var_name
elif self.type == "boolean":
@@ -124,7 +123,7 @@ def field_name(reg, f):
name = f.name.lower()
else:
# We hit this path when a reg is defined with no bitset fields, ie.
- # <reg32 offset="0x88db" name="RB_BLIT_DST_ARRAY_PITCH" low="0" high="28" shr="6" type="uint"/>
+ # <reg32 offset="0x88db" name="RB_RESOLVE_SYSTEM_BUFFER_ARRAY_PITCH" low="0" high="28" shr="6" type="uint"/>
name = reg.name.lower()
if (name in [ "double", "float", "int" ]) or not (name[0].isalpha()):
@@ -146,10 +145,23 @@ def indices_strides(indices):
"%s(i%d)" % (offset, idx)
for (idx, (ctype, stride, offset)) in enumerate(indices)])
+def is_number(str):
+ try:
+ int(str)
+ return True
+ except ValueError:
+ return False
+
+def sanitize_variant(variant):
+ if variant and "-" in variant:
+ return variant[:variant.index("-")]
+ return variant
+
class Bitset(object):
def __init__(self, name, template):
self.name = name
self.inline = False
+ self.reg = None
if template:
self.fields = template.fields[:]
else:
@@ -175,11 +187,7 @@ class Bitset(object):
print("#endif\n")
print(" return (struct fd_reg_pair) {")
- if reg.array:
- print(" .reg = REG_%s(__i)," % reg.full_name)
- else:
- print(" .reg = REG_%s," % reg.full_name)
-
+ print(" .reg = (uint32_t)%s," % reg.reg_offset())
print(" .value =")
for f in self.fields:
if f.type in [ "address", "waddress" ]:
@@ -204,7 +212,7 @@ class Bitset(object):
print(" };")
- def dump_pack_struct(self, reg=None):
+ def dump_pack_struct(self, is_deprecated, reg=None):
if not reg:
return
@@ -229,12 +237,15 @@ class Bitset(object):
tab_to(" uint32_t", "dword;")
print("};\n")
+ depcrstr = ""
+ if is_deprecated:
+ depcrstr = " FD_DEPRECATED"
if reg.array:
- print("static inline struct fd_reg_pair\npack_%s(uint32_t __i, struct %s fields)\n{" %
- (prefix, prefix))
+ print("static inline%s struct fd_reg_pair\npack_%s(uint32_t __i, struct %s fields)\n{" %
+ (depcrstr, prefix, prefix))
else:
- print("static inline struct fd_reg_pair\npack_%s(struct %s fields)\n{" %
- (prefix, prefix))
+ print("static inline%s struct fd_reg_pair\npack_%s(struct %s fields)\n{" %
+ (depcrstr, prefix, prefix))
self.dump_regpair_builder(reg)
@@ -253,18 +264,23 @@ class Bitset(object):
(prefix, prefix, prefix, skip))
- def dump(self, prefix=None):
- if prefix == None:
+ def dump(self, is_deprecated, prefix=None):
+ if prefix is None:
prefix = self.name
+ if self.reg and self.reg.bit_size == 64:
+ print("static inline uint32_t %s_LO(uint32_t val)\n{" % prefix)
+ print("\treturn val;\n}")
+ print("static inline uint32_t %s_HI(uint32_t val)\n{" % prefix)
+ print("\treturn val;\n}")
for f in self.fields:
if f.name:
name = prefix + "_" + f.name
else:
name = prefix
- if not f.name and f.low == 0 and f.shr == 0 and not f.type in ["float", "fixed", "ufixed"]:
+ if not f.name and f.low == 0 and f.shr == 0 and f.type not in ["float", "fixed", "ufixed"]:
pass
- elif f.type == "boolean" or (f.type == None and f.low == f.high):
+ elif f.type == "boolean" or (f.type is None and f.low == f.high):
tab_to("#define %s" % name, "0x%08x" % (1 << f.low))
else:
tab_to("#define %s__MASK" % name, "0x%08x" % mask(f.low, f.high))
@@ -286,6 +302,7 @@ class Array(object):
self.domain = domain
self.variant = variant
self.parent = parent
+ self.children = []
if self.parent:
self.name = self.parent.name + "_" + self.local_name
else:
@@ -337,12 +354,15 @@ class Array(object):
offset += self.parent.total_offset()
return offset
- def dump(self):
+ def dump(self, is_deprecated):
+ depcrstr = ""
+ if is_deprecated:
+ depcrstr = " FD_DEPRECATED"
proto = indices_varlist(self.indices())
strides = indices_strides(self.indices())
array_offset = self.total_offset()
if self.fixed_offsets:
- print("static inline uint32_t __offset_%s(%s idx)" % (self.local_name, self.index_ctype()))
+ print("static inline%s uint32_t __offset_%s(%s idx)" % (depcrstr, self.local_name, self.index_ctype()))
print("{\n\tswitch (idx) {")
if self.index_type:
for val, offset in zip(self.index_type.names(), self.offsets):
@@ -357,7 +377,7 @@ class Array(object):
else:
tab_to("#define REG_%s_%s(%s)" % (self.domain, self.name, proto), "(0x%08x + %s )\n" % (array_offset, strides))
- def dump_pack_struct(self):
+ def dump_pack_struct(self, is_deprecated):
pass
def dump_regpair_builder(self):
@@ -373,6 +393,7 @@ class Reg(object):
self.bit_size = bit_size
if array:
self.name = array.name + "_" + self.name
+ array.children.append(self)
self.full_name = self.domain + "_" + self.name
if "stride" in attrs:
self.stride = int(attrs["stride"], 0)
@@ -397,25 +418,34 @@ class Reg(object):
else:
return self.offset
- def dump(self):
+ def reg_offset(self):
+ if self.array:
+ offset = self.array.offset + self.offset
+ return "(0x%08x + 0x%x*__i)" % (offset, self.array.stride)
+ return "0x%08x" % self.offset
+
+ def dump(self, is_deprecated):
+ depcrstr = ""
+ if is_deprecated:
+ depcrstr = " FD_DEPRECATED "
proto = indices_prototype(self.indices())
strides = indices_strides(self.indices())
offset = self.total_offset()
if proto == '':
tab_to("#define REG_%s" % self.full_name, "0x%08x" % offset)
else:
- print("static inline uint32_t REG_%s(%s) { return 0x%08x + %s; }" % (self.full_name, proto, offset, strides))
+ print("static inline%s uint32_t REG_%s(%s) { return 0x%08x + %s; }" % (depcrstr, self.full_name, proto, offset, strides))
if self.bitset.inline:
- self.bitset.dump(self.full_name)
+ self.bitset.dump(is_deprecated, self.full_name)
+ print("")
- def dump_pack_struct(self):
+ def dump_pack_struct(self, is_deprecated):
if self.bitset.inline:
- self.bitset.dump_pack_struct(self)
+ self.bitset.dump_pack_struct(is_deprecated, self)
def dump_regpair_builder(self):
- if self.bitset.inline:
- self.bitset.dump_regpair_builder(self)
+ self.bitset.dump_regpair_builder(self)
def dump_py(self):
print("\tREG_%s = 0x%08x" % (self.full_name, self.offset))
@@ -444,9 +474,6 @@ class Parser(object):
self.variants = set()
self.file = []
self.xml_files = []
- self.copyright_year = None
- self.authors = []
- self.license = None
def error(self, message):
parser, filename = self.stack[-1]
@@ -454,7 +481,7 @@ class Parser(object):
def prefix(self, variant=None):
if self.current_prefix_type == "variant" and variant:
- return variant
+ return sanitize_variant(variant)
elif self.current_stripe:
return self.current_stripe + "_" + self.current_domain
elif self.current_prefix:
@@ -500,15 +527,22 @@ class Parser(object):
return varset
def parse_variants(self, attrs):
- if not "variants" in attrs:
+ if "variants" not in attrs:
return None
- variant = attrs["variants"].split(",")[0]
- if "-" in variant:
- variant = variant[:variant.index("-")]
+ variant = attrs["variants"].split(",")[0]
varset = self.parse_varset(attrs)
- assert varset.has_name(variant)
+ if "-" in variant:
+ # if we have a range, validate that both the start and end
+ # of the range are valid enums:
+ start = variant[:variant.index("-")]
+ end = variant[variant.index("-") + 1:]
+ assert varset.has_name(start)
+ if end != "":
+ assert varset.has_name(end)
+ else:
+ assert varset.has_name(variant)
return variant
@@ -572,9 +606,6 @@ class Parser(object):
error_str = str(xmlschema.error_log.filter_from_errors()[0])
raise self.error("Schema validation failed for: " + filename + "\n" + error_str)
except ImportError as e:
- if self.validate:
- raise e
-
print("lxml not found, skipping validation", file=sys.stderr)
def do_parse(self, filename):
@@ -620,6 +651,7 @@ class Parser(object):
self.current_reg = Reg(attrs, self.prefix(variant), self.current_array, bit_size)
self.current_reg.bitset = self.current_bitset
+ self.current_bitset.reg = self.current_reg
if len(self.stack) == 1:
self.file.append(self.current_reg)
@@ -643,7 +675,7 @@ class Parser(object):
elif name == "domain":
self.current_domain = attrs["name"]
if "prefix" in attrs:
- self.current_prefix = self.parse_variants(attrs)
+ self.current_prefix = sanitize_variant(self.parse_variants(attrs))
self.current_prefix_type = attrs["prefix"]
else:
self.current_prefix = None
@@ -651,7 +683,7 @@ class Parser(object):
if "varset" in attrs:
self.current_varset = self.enums[attrs["varset"]]
elif name == "stripe":
- self.current_stripe = self.parse_variants(attrs)
+ self.current_stripe = sanitize_variant(self.parse_variants(attrs))
elif name == "enum":
self.current_enum_value = 0
self.current_enum = Enum(attrs["name"])
@@ -686,10 +718,6 @@ class Parser(object):
self.parse_field(attrs["name"], attrs)
elif name == "database":
self.do_validate(attrs["xsi:schemaLocation"])
- elif name == "copyright":
- self.copyright_year = attrs["year"]
- elif name == "author":
- self.authors.append(attrs["name"] + " <" + attrs["email"] + "> " + attrs["name"])
def end_element(self, name):
if name == "domain":
@@ -703,11 +731,16 @@ class Parser(object):
elif name == "reg32":
self.current_reg = None
elif name == "array":
+ # if the array has no Reg children, push an implicit reg32:
+ if len(self.current_array.children) == 0:
+ attrs = {
+ "name": "REG",
+ "offset": "0",
+ }
+ self.parse_reg(attrs, 32)
self.current_array = self.current_array.parent
elif name == "enum":
self.current_enum = None
- elif name == "license":
- self.license = self.cdata
def character_data(self, data):
self.cdata += data
@@ -720,10 +753,10 @@ class Parser(object):
if variants:
for variant, vreg in variants.items():
if reg == vreg:
- d[(usage, variant)].append(reg)
+ d[(usage, sanitize_variant(variant))].append(reg)
else:
for variant in self.variants:
- d[(usage, variant)].append(reg)
+ d[(usage, sanitize_variant(variant))].append(reg)
print("#ifdef __cplusplus")
@@ -753,6 +786,9 @@ class Parser(object):
print("#endif")
+ def has_variants(self, reg):
+ return reg.name in self.variant_regs and not is_number(reg.name) and not is_number(reg.name[1:])
+
def dump(self):
enums = []
bitsets = []
@@ -766,7 +802,7 @@ class Parser(object):
regs.append(e)
for e in enums + bitsets + regs:
- e.dump()
+ e.dump(self.has_variants(e))
self.dump_reg_usages()
@@ -782,8 +818,7 @@ class Parser(object):
def dump_reg_variants(self, regname, variants):
- # Don't bother for things that only have a single variant:
- if len(variants) == 1:
+ if is_number(regname) or is_number(regname[1:]):
return
print("#ifdef __cplusplus")
print("struct __%s {" % regname)
@@ -834,11 +869,20 @@ class Parser(object):
xtravar = "__i, "
print("__%s(%sstruct __%s fields) {" % (regname, xtra, regname))
for variant in variants.keys():
- print(" if (%s == %s) {" % (varenum.upper(), variant))
+ if "-" in variant:
+ start = variant[:variant.index("-")]
+ end = variant[variant.index("-") + 1:]
+ if end != "":
+ print(" if ((%s >= %s) && (%s <= %s)) {" % (varenum.upper(), start, varenum.upper(), end))
+ else:
+ print(" if (%s >= %s) {" % (varenum.upper(), start))
+ else:
+ print(" if (%s == %s) {" % (varenum.upper(), variant))
reg = variants[variant]
reg.dump_regpair_builder()
print(" } else")
print(" assert(!\"invalid variant\");")
+ print(" return (struct fd_reg_pair){};")
print("}")
if bit_size == 64:
@@ -851,7 +895,7 @@ class Parser(object):
def dump_structs(self):
for e in self.file:
- e.dump_pack_struct()
+ e.dump_pack_struct(self.has_variants(e))
for regname in self.variant_regs:
self.dump_reg_variants(regname, self.variant_regs[regname])
@@ -868,33 +912,7 @@ def dump_c(args, guard, func):
print("#ifndef %s\n#define %s\n" % (guard, guard))
- print("""/* Autogenerated file, DO NOT EDIT manually!
-
-This file was generated by the rules-ng-ng gen_header.py tool in this git repository:
-http://gitlab.freedesktop.org/mesa/mesa/
-git clone https://gitlab.freedesktop.org/mesa/mesa.git
-
-The rules-ng-ng source files this header was generated from are:
-""")
- maxlen = 0
- for filepath in p.xml_files:
- new_filepath = re.sub("^.+drivers","drivers",filepath)
- maxlen = max(maxlen, len(new_filepath))
- for filepath in p.xml_files:
- pad = " " * (maxlen - len(new_filepath))
- filesize = str(os.path.getsize(filepath))
- filesize = " " * (7 - len(filesize)) + filesize
- filetime = time.ctime(os.path.getmtime(filepath))
- print("- " + new_filepath + pad + " (" + filesize + " bytes, from <stripped>)")
- if p.copyright_year:
- current_year = str(datetime.date.today().year)
- print()
- print("Copyright (C) %s-%s by the following authors:" % (p.copyright_year, current_year))
- for author in p.authors:
- print("- " + author)
- if p.license:
- print(p.license)
- print("*/")
+ print("/* Autogenerated file, DO NOT EDIT manually! */")
print()
print("#ifdef __KERNEL__")
@@ -912,9 +930,20 @@ The rules-ng-ng source files this header was generated from are:
print("#endif")
print()
+ print("#ifndef FD_NO_DEPRECATED_PACK")
+ print("#define FD_DEPRECATED __attribute__((deprecated))")
+ print("#else")
+ print("#define FD_DEPRECATED")
+ print("#endif")
+ print()
+
func(p)
- print("\n#endif /* %s */" % guard)
+ print()
+ print("#undef FD_DEPRECATED")
+ print()
+
+ print("#endif /* %s */" % guard)
def dump_c_defines(args):
@@ -931,7 +960,7 @@ def dump_py_defines(args):
p = Parser()
try:
- p.parse(args.rnn, args.xml)
+ p.parse(args.rnn, args.xml, args.validate)
except Error as e:
print(e, file=sys.stderr)
exit(1)
diff --git a/drivers/gpu/drm/mxsfb/lcdif_kms.c b/drivers/gpu/drm/mxsfb/lcdif_kms.c
index dbd42cc1da87..1c3b33be6c40 100644
--- a/drivers/gpu/drm/mxsfb/lcdif_kms.c
+++ b/drivers/gpu/drm/mxsfb/lcdif_kms.c
@@ -433,7 +433,6 @@ static int lcdif_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_bridge_state *bridge_state;
- struct drm_bridge *bridge;
u32 bus_format, bus_flags;
bool format_set = false, flags_set = false;
int ret, i;
@@ -453,7 +452,8 @@ static int lcdif_crtc_atomic_check(struct drm_crtc *crtc,
encoder = connector_state->best_encoder;
- bridge = drm_bridge_chain_get_first_bridge(encoder);
+ struct drm_bridge *bridge __free(drm_bridge_put) =
+ drm_bridge_chain_get_first_bridge(encoder);
if (!bridge)
continue;
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index d1587639ebb0..c88776d1e784 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -102,14 +102,6 @@ config DRM_NOUVEAU_SVM
Say Y here if you want to enable experimental support for
Shared Virtual Memory (SVM).
-config DRM_NOUVEAU_GSP_DEFAULT
- bool "Use GSP firmware for Turing/Ampere (needs firmware installed)"
- depends on DRM_NOUVEAU
- default n
- help
- Say Y here if you want to use the GSP codepaths by default on
- Turing and Ampere GPUs.
-
config DRM_NOUVEAU_CH7006
tristate "Chrontel ch7006 TV encoder"
depends on DRM_NOUVEAU
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index 11d5b923d6e7..e2c55f4b9c5a 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -795,6 +795,10 @@ static bool nv50_plane_format_mod_supported(struct drm_plane *plane,
struct nouveau_drm *drm = nouveau_drm(plane->dev);
uint8_t i;
+ /* All chipsets can display all formats in linear layout */
+ if (modifier == DRM_FORMAT_MOD_LINEAR)
+ return true;
+
if (drm->client.device.info.chipset < 0xc0) {
const struct drm_format_info *info = drm_format_info(format);
const uint8_t kind = (modifier >> 12) & 0xff;
diff --git a/drivers/gpu/drm/nouveau/gv100_fence.c b/drivers/gpu/drm/nouveau/gv100_fence.c
index cccdeca72002..317e516c4ec7 100644
--- a/drivers/gpu/drm/nouveau/gv100_fence.c
+++ b/drivers/gpu/drm/nouveau/gv100_fence.c
@@ -18,7 +18,7 @@ gv100_fence_emit32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
struct nvif_push *push = &chan->chan.push;
int ret;
- ret = PUSH_WAIT(push, 8);
+ ret = PUSH_WAIT(push, 13);
if (ret)
return ret;
@@ -32,6 +32,11 @@ gv100_fence_emit32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
NVDEF(NVC36F, SEM_EXECUTE, PAYLOAD_SIZE, 32BIT) |
NVDEF(NVC36F, SEM_EXECUTE, RELEASE_TIMESTAMP, DIS));
+ PUSH_MTHD(push, NVC36F, MEM_OP_A, 0,
+ MEM_OP_B, 0,
+ MEM_OP_C, NVDEF(NVC36F, MEM_OP_C, MEMBAR_TYPE, SYS_MEMBAR),
+ MEM_OP_D, NVDEF(NVC36F, MEM_OP_D, OPERATION, MEMBAR));
+
PUSH_MTHD(push, NVC36F, NON_STALL_INTERRUPT, 0);
PUSH_KICK(push);
diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/clc36f.h b/drivers/gpu/drm/nouveau/include/nvhw/class/clc36f.h
index 8735dda4c8a7..338f74b9f501 100644
--- a/drivers/gpu/drm/nouveau/include/nvhw/class/clc36f.h
+++ b/drivers/gpu/drm/nouveau/include/nvhw/class/clc36f.h
@@ -7,6 +7,91 @@
#define NVC36F_NON_STALL_INTERRUPT (0x00000020)
#define NVC36F_NON_STALL_INTERRUPT_HANDLE 31:0
+// NOTE - MEM_OP_A and MEM_OP_B have been replaced in gp100 with methods for
+// specifying the page address for a targeted TLB invalidate and the uTLB for
+// a targeted REPLAY_CANCEL for UVM.
+// The previous MEM_OP_A/B functionality is in MEM_OP_C/D, with slightly
+// rearranged fields.
+#define NVC36F_MEM_OP_A (0x00000028)
+#define NVC36F_MEM_OP_A_TLB_INVALIDATE_CANCEL_TARGET_CLIENT_UNIT_ID 5:0 // only relevant for REPLAY_CANCEL_TARGETED
+#define NVC36F_MEM_OP_A_TLB_INVALIDATE_INVALIDATION_SIZE 5:0 // Used to specify size of invalidate, used for invalidates which are not of the REPLAY_CANCEL_TARGETED type
+#define NVC36F_MEM_OP_A_TLB_INVALIDATE_CANCEL_TARGET_GPC_ID 10:6 // only relevant for REPLAY_CANCEL_TARGETED
+#define NVC36F_MEM_OP_A_TLB_INVALIDATE_CANCEL_MMU_ENGINE_ID 6:0 // only relevant for REPLAY_CANCEL_VA_GLOBAL
+#define NVC36F_MEM_OP_A_TLB_INVALIDATE_SYSMEMBAR 11:11
+#define NVC36F_MEM_OP_A_TLB_INVALIDATE_SYSMEMBAR_EN 0x00000001
+#define NVC36F_MEM_OP_A_TLB_INVALIDATE_SYSMEMBAR_DIS 0x00000000
+#define NVC36F_MEM_OP_A_TLB_INVALIDATE_TARGET_ADDR_LO 31:12
+#define NVC36F_MEM_OP_B (0x0000002c)
+#define NVC36F_MEM_OP_B_TLB_INVALIDATE_TARGET_ADDR_HI 31:0
+#define NVC36F_MEM_OP_C (0x00000030)
+#define NVC36F_MEM_OP_C_MEMBAR_TYPE 2:0
+#define NVC36F_MEM_OP_C_MEMBAR_TYPE_SYS_MEMBAR 0x00000000
+#define NVC36F_MEM_OP_C_MEMBAR_TYPE_MEMBAR 0x00000001
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PDB 0:0
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PDB_ONE 0x00000000
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PDB_ALL 0x00000001 // Probably nonsensical for MMU_TLB_INVALIDATE_TARGETED
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_GPC 1:1
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_GPC_ENABLE 0x00000000
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_GPC_DISABLE 0x00000001
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_REPLAY 4:2 // only relevant if GPC ENABLE
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_REPLAY_NONE 0x00000000
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_REPLAY_START 0x00000001
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_REPLAY_START_ACK_ALL 0x00000002
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_REPLAY_CANCEL_TARGETED 0x00000003
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_REPLAY_CANCEL_GLOBAL 0x00000004
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_REPLAY_CANCEL_VA_GLOBAL 0x00000005
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE 6:5 // only relevant if GPC ENABLE
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE_NONE 0x00000000
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE_GLOBALLY 0x00000001
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACK_TYPE_INTRANODE 0x00000002
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE 9:7 //only relevant for REPLAY_CANCEL_VA_GLOBAL
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_READ 0
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_WRITE 1
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_ATOMIC_STRONG 2
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_RSVRVD 3
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_ATOMIC_WEAK 4
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_ATOMIC_ALL 5
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_WRITE_AND_ATOMIC 6
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_ACCESS_TYPE_VIRT_ALL 7
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL 9:7 // Invalidate affects this level and all below
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_ALL 0x00000000 // Invalidate tlb caches at all levels of the page table
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_PTE_ONLY 0x00000001
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE0 0x00000002
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE1 0x00000003
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE2 0x00000004
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE3 0x00000005
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE4 0x00000006
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PAGE_TABLE_LEVEL_UP_TO_PDE5 0x00000007
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE 11:10 // only relevant if PDB_ONE
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE_VID_MEM 0x00000000
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE_SYS_MEM_COHERENT 0x00000002
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PDB_APERTURE_SYS_MEM_NONCOHERENT 0x00000003
+#define NVC36F_MEM_OP_C_TLB_INVALIDATE_PDB_ADDR_LO 31:12 // only relevant if PDB_ONE
+#define NVC36F_MEM_OP_C_ACCESS_COUNTER_CLR_TARGETED_NOTIFY_TAG 19:0
+// MEM_OP_D MUST be preceded by MEM_OPs A-C.
+#define NVC36F_MEM_OP_D (0x00000034)
+#define NVC36F_MEM_OP_D_TLB_INVALIDATE_PDB_ADDR_HI 26:0 // only relevant if PDB_ONE
+#define NVC36F_MEM_OP_D_OPERATION 31:27
+#define NVC36F_MEM_OP_D_OPERATION_MEMBAR 0x00000005
+#define NVC36F_MEM_OP_D_OPERATION_MMU_TLB_INVALIDATE 0x00000009
+#define NVC36F_MEM_OP_D_OPERATION_MMU_TLB_INVALIDATE_TARGETED 0x0000000a
+#define NVC36F_MEM_OP_D_OPERATION_L2_PEERMEM_INVALIDATE 0x0000000d
+#define NVC36F_MEM_OP_D_OPERATION_L2_SYSMEM_INVALIDATE 0x0000000e
+// CLEAN_LINES is an alias for Tegra/GPU IP usage
+#define NVC36F_MEM_OP_B_OPERATION_L2_INVALIDATE_CLEAN_LINES 0x0000000e
+#define NVC36F_MEM_OP_D_OPERATION_L2_CLEAN_COMPTAGS 0x0000000f
+#define NVC36F_MEM_OP_D_OPERATION_L2_FLUSH_DIRTY 0x00000010
+#define NVC36F_MEM_OP_D_OPERATION_L2_WAIT_FOR_SYS_PENDING_READS 0x00000015
+#define NVC36F_MEM_OP_D_OPERATION_ACCESS_COUNTER_CLR 0x00000016
+#define NVC36F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE 1:0
+#define NVC36F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE_MIMC 0x00000000
+#define NVC36F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE_MOMC 0x00000001
+#define NVC36F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE_ALL 0x00000002
+#define NVC36F_MEM_OP_D_ACCESS_COUNTER_CLR_TYPE_TARGETED 0x00000003
+#define NVC36F_MEM_OP_D_ACCESS_COUNTER_CLR_TARGETED_TYPE 2:2
+#define NVC36F_MEM_OP_D_ACCESS_COUNTER_CLR_TARGETED_TYPE_MIMC 0x00000000
+#define NVC36F_MEM_OP_D_ACCESS_COUNTER_CLR_TARGETED_TYPE_MOMC 0x00000001
+#define NVC36F_MEM_OP_D_ACCESS_COUNTER_CLR_TARGETED_BANK 6:3
#define NVC36F_SEM_ADDR_LO (0x0000005c)
#define NVC36F_SEM_ADDR_LO_OFFSET 31:2
#define NVC36F_SEM_ADDR_HI (0x00000060)
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index b96f0555ca14..f26562eafffc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -929,7 +929,7 @@ done:
nvif_vmm_put(vmm, &old_mem->vma[1]);
nvif_vmm_put(vmm, &old_mem->vma[0]);
}
- return 0;
+ return ret;
}
static int
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.h b/drivers/gpu/drm/nouveau/nouveau_chan.h
index 561877725aac..bb34b0a6082d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.h
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.h
@@ -31,8 +31,6 @@ struct nouveau_channel {
u64 addr;
} push;
- /* TODO: this will be reworked in the near future */
- bool accel_done;
void *fence;
struct {
int max;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index e1e542126310..805d0a87aa54 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -253,6 +253,7 @@ nouveau_check_bl_size(struct nouveau_drm *drm, struct nouveau_bo *nvbo,
int
nouveau_framebuffer_new(struct drm_device *dev,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *gem,
struct drm_framebuffer **pfb)
@@ -260,7 +261,6 @@ nouveau_framebuffer_new(struct drm_device *dev,
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
struct drm_framebuffer *fb;
- const struct drm_format_info *info;
unsigned int height, i;
uint32_t tile_mode;
uint8_t kind;
@@ -295,9 +295,6 @@ nouveau_framebuffer_new(struct drm_device *dev,
kind = nvbo->kind;
}
- info = drm_get_format_info(dev, mode_cmd->pixel_format,
- mode_cmd->modifier[0]);
-
for (i = 0; i < info->num_planes; i++) {
height = drm_format_info_plane_height(info,
mode_cmd->height,
@@ -321,7 +318,7 @@ nouveau_framebuffer_new(struct drm_device *dev,
if (!(fb = *pfb = kzalloc(sizeof(*fb), GFP_KERNEL)))
return -ENOMEM;
- drm_helper_mode_fill_fb_struct(dev, fb, NULL, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, fb, info, mode_cmd);
fb->obj[0] = gem;
ret = drm_framebuffer_init(dev, fb, &nouveau_framebuffer_funcs);
@@ -344,7 +341,7 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
if (!gem)
return ERR_PTR(-ENOENT);
- ret = nouveau_framebuffer_new(dev, mode_cmd, gem, &fb);
+ ret = nouveau_framebuffer_new(dev, info, mode_cmd, gem, &fb);
if (ret == 0)
return fb;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index e45f211501f6..470e0910d484 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -8,8 +8,11 @@
#include <drm/drm_framebuffer.h>
+struct drm_format_info;
+
int
nouveau_framebuffer_new(struct drm_device *dev,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *gem,
struct drm_framebuffer **pfb);
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index 0e27b76d1e1c..c25ef9a54b9f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -90,7 +90,6 @@ FIRE_RING(struct nouveau_channel *chan)
{
if (chan->dma.cur == chan->dma.put)
return;
- chan->accel_done = true;
WRITE_PUT(chan->dma.cur);
diff --git a/drivers/gpu/drm/nouveau/nouveau_exec.c b/drivers/gpu/drm/nouveau/nouveau_exec.c
index edbbda78bac9..c4949e815eb3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_exec.c
+++ b/drivers/gpu/drm/nouveau/nouveau_exec.c
@@ -60,14 +60,14 @@
* virtual address in the GPU's VA space there is no guarantee that the actual
* mappings are created in the GPU's MMU. If the given memory is swapped out
* at the time the bind operation is executed the kernel will stash the mapping
- * details into it's internal alloctor and create the actual MMU mappings once
+ * details into it's internal allocator and create the actual MMU mappings once
* the memory is swapped back in. While this is transparent for userspace, it is
* guaranteed that all the backing memory is swapped back in and all the memory
* mappings, as requested by userspace previously, are actually mapped once the
* DRM_NOUVEAU_EXEC ioctl is called to submit an exec job.
*
* A VM_BIND job can be executed either synchronously or asynchronously. If
- * exectued asynchronously, userspace may provide a list of syncobjs this job
+ * executed asynchronously, userspace may provide a list of syncobjs this job
* will wait for and/or a list of syncobj the kernel will signal once the
* VM_BIND job finished execution. If executed synchronously the ioctl will
* block until the bind job is finished. For synchronous jobs the kernel will
@@ -82,7 +82,7 @@
* Since VM_BIND jobs update the GPU's VA space on job submit, EXEC jobs do have
* an up to date view of the VA space. However, the actual mappings might still
* be pending. Hence, EXEC jobs require to have the particular fences - of
- * the corresponding VM_BIND jobs they depent on - attached to them.
+ * the corresponding VM_BIND jobs they depend on - attached to them.
*/
static int
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 9f345a008717..869d4335c0f4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -240,21 +240,6 @@ nouveau_fence_emit(struct nouveau_fence *fence)
return ret;
}
-void
-nouveau_fence_cancel(struct nouveau_fence *fence)
-{
- struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
- unsigned long flags;
-
- spin_lock_irqsave(&fctx->lock, flags);
- if (!dma_fence_is_signaled_locked(&fence->base)) {
- dma_fence_set_error(&fence->base, -ECANCELED);
- if (nouveau_fence_signal(fence))
- nvif_event_block(&fctx->event);
- }
- spin_unlock_irqrestore(&fctx->lock, flags);
-}
-
bool
nouveau_fence_done(struct nouveau_fence *fence)
{
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index 9957a919bd38..183dd43ecfff 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -29,7 +29,6 @@ void nouveau_fence_unref(struct nouveau_fence **);
int nouveau_fence_emit(struct nouveau_fence *);
bool nouveau_fence_done(struct nouveau_fence *);
-void nouveau_fence_cancel(struct nouveau_fence *fence);
int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr);
int nouveau_fence_sync(struct nouveau_bo *, struct nouveau_channel *, bool exclusive, bool intr);
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
index a5ce8eb4a3be..8d5853deeee4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.c
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
@@ -30,10 +30,7 @@ static int nouveau_platform_probe(struct platform_device *pdev)
func = of_device_get_match_data(&pdev->dev);
drm = nouveau_platform_device_create(func, pdev, &device);
- if (IS_ERR(drm))
- return PTR_ERR(drm);
-
- return 0;
+ return PTR_ERR_OR_ZERO(drm);
}
static void nouveau_platform_remove(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index cd95446d6851..caab60fc62f6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -108,9 +108,21 @@ struct dma_buf *nouveau_gem_prime_export(struct drm_gem_object *gobj,
int flags)
{
struct nouveau_bo *nvbo = nouveau_gem_object(gobj);
+ struct ttm_operation_ctx ctx = {
+ .interruptible = true,
+ .no_wait_gpu = true,
+ /* We opt to avoid OOM on system pages allocations */
+ .gfp_retry_mayfail = true,
+ .allow_res_evict = false,
+ };
+ int ret;
if (nvbo->no_share)
return ERR_PTR(-EPERM);
+ ret = ttm_bo_setup_export(&nvbo->bo, &ctx);
+ if (ret)
+ return ERR_PTR(ret);
+
return drm_gem_prime_export(gobj, flags);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.c b/drivers/gpu/drm/nouveau/nouveau_sched.c
index 0cc0bc9f9952..e60f7892f5ce 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sched.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sched.c
@@ -11,7 +11,6 @@
#include "nouveau_exec.h"
#include "nouveau_abi16.h"
#include "nouveau_sched.h"
-#include "nouveau_chan.h"
#define NOUVEAU_SCHED_JOB_TIMEOUT_MS 10000
@@ -122,9 +121,11 @@ nouveau_job_done(struct nouveau_job *job)
{
struct nouveau_sched *sched = job->sched;
- spin_lock(&sched->job_list.lock);
+ spin_lock(&sched->job.list.lock);
list_del(&job->entry);
- spin_unlock(&sched->job_list.lock);
+ spin_unlock(&sched->job.list.lock);
+
+ wake_up(&sched->job.wq);
}
void
@@ -305,9 +306,9 @@ nouveau_job_submit(struct nouveau_job *job)
}
/* Submit was successful; add the job to the schedulers job list. */
- spin_lock(&sched->job_list.lock);
- list_add(&job->entry, &sched->job_list.head);
- spin_unlock(&sched->job_list.lock);
+ spin_lock(&sched->job.list.lock);
+ list_add(&job->entry, &sched->job.list.head);
+ spin_unlock(&sched->job.list.lock);
drm_sched_job_arm(&job->base);
job->done_fence = dma_fence_get(&job->base.s_fence->finished);
@@ -392,23 +393,10 @@ nouveau_sched_free_job(struct drm_sched_job *sched_job)
nouveau_job_fini(job);
}
-static void
-nouveau_sched_cancel_job(struct drm_sched_job *sched_job)
-{
- struct nouveau_fence *fence;
- struct nouveau_job *job;
-
- job = to_nouveau_job(sched_job);
- fence = to_nouveau_fence(job->done_fence);
-
- nouveau_fence_cancel(fence);
-}
-
static const struct drm_sched_backend_ops nouveau_sched_ops = {
.run_job = nouveau_sched_run_job,
.timedout_job = nouveau_sched_timedout_job,
.free_job = nouveau_sched_free_job,
- .cancel_job = nouveau_sched_cancel_job,
};
static int
@@ -458,8 +446,9 @@ nouveau_sched_init(struct nouveau_sched *sched, struct nouveau_drm *drm,
goto fail_sched;
mutex_init(&sched->mutex);
- spin_lock_init(&sched->job_list.lock);
- INIT_LIST_HEAD(&sched->job_list.head);
+ spin_lock_init(&sched->job.list.lock);
+ INIT_LIST_HEAD(&sched->job.list.head);
+ init_waitqueue_head(&sched->job.wq);
return 0;
@@ -493,12 +482,16 @@ nouveau_sched_create(struct nouveau_sched **psched, struct nouveau_drm *drm,
return 0;
}
+
static void
nouveau_sched_fini(struct nouveau_sched *sched)
{
struct drm_gpu_scheduler *drm_sched = &sched->base;
struct drm_sched_entity *entity = &sched->entity;
+ rmb(); /* for list_empty to work without lock */
+ wait_event(sched->job.wq, list_empty(&sched->job.list.head));
+
drm_sched_entity_fini(entity);
drm_sched_fini(drm_sched);
diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.h b/drivers/gpu/drm/nouveau/nouveau_sched.h
index b98c3f0bef30..20cd1da8db73 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sched.h
+++ b/drivers/gpu/drm/nouveau/nouveau_sched.h
@@ -103,9 +103,12 @@ struct nouveau_sched {
struct mutex mutex;
struct {
- struct list_head head;
- spinlock_t lock;
- } job_list;
+ struct {
+ struct list_head head;
+ spinlock_t lock;
+ } list;
+ struct wait_queue_head wq;
+ } job;
};
int nouveau_sched_create(struct nouveau_sched **psched, struct nouveau_drm *drm,
diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
index ddfc46bc1b3e..79eefdfd08a2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
@@ -1019,8 +1019,8 @@ bind_validate_map_sparse(struct nouveau_job *job, u64 addr, u64 range)
u64 end = addr + range;
again:
- spin_lock(&sched->job_list.lock);
- list_for_each_entry(__job, &sched->job_list.head, entry) {
+ spin_lock(&sched->job.list.lock);
+ list_for_each_entry(__job, &sched->job.list.head, entry) {
struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(__job);
list_for_each_op(op, &bind_job->ops) {
@@ -1030,7 +1030,7 @@ again:
if (!(end <= op_addr || addr >= op_end)) {
nouveau_uvmm_bind_job_get(bind_job);
- spin_unlock(&sched->job_list.lock);
+ spin_unlock(&sched->job.list.lock);
wait_for_completion(&bind_job->complete);
nouveau_uvmm_bind_job_put(bind_job);
goto again;
@@ -1038,7 +1038,7 @@ again:
}
}
}
- spin_unlock(&sched->job_list.lock);
+ spin_unlock(&sched->job.list.lock);
}
static int
@@ -1276,6 +1276,12 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job,
break;
case OP_MAP: {
struct nouveau_uvma_region *reg;
+ struct drm_gpuvm_map_req map_req = {
+ .map.va.addr = op->va.addr,
+ .map.va.range = op->va.range,
+ .map.gem.obj = op->gem.obj,
+ .map.gem.offset = op->gem.offset,
+ };
reg = nouveau_uvma_region_find_first(uvmm,
op->va.addr,
@@ -1301,10 +1307,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job,
}
op->ops = drm_gpuvm_sm_map_ops_create(&uvmm->base,
- op->va.addr,
- op->va.range,
- op->gem.obj,
- op->gem.offset);
+ &map_req);
if (IS_ERR(op->ops)) {
ret = PTR_ERR(op->ops);
goto unwind_continue;
diff --git a/drivers/gpu/drm/nouveau/nvif/vmm.c b/drivers/gpu/drm/nouveau/nvif/vmm.c
index 99296f03371a..07c1ebc2a941 100644
--- a/drivers/gpu/drm/nouveau/nvif/vmm.c
+++ b/drivers/gpu/drm/nouveau/nvif/vmm.c
@@ -219,7 +219,8 @@ nvif_vmm_ctor(struct nvif_mmu *mmu, const char *name, s32 oclass,
case RAW: args->type = NVIF_VMM_V0_TYPE_RAW; break;
default:
WARN_ON(1);
- return -EINVAL;
+ ret = -EINVAL;
+ goto done;
}
memcpy(args->data, argv, argc);
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/enum.c b/drivers/gpu/drm/nouveau/nvkm/core/enum.c
index b9581feb24cc..a23b40b27b81 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/enum.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/enum.c
@@ -44,7 +44,7 @@ nvkm_snprintbf(char *data, int size, const struct nvkm_bitfield *bf, u32 value)
bool space = false;
while (size >= 1 && bf->name) {
if (value & bf->mask) {
- int this = snprintf(data, size, "%s%s",
+ int this = scnprintf(data, size, "%s%s",
space ? " " : "", bf->name);
size -= this;
data += this;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
index fdffa0391b31..6fd4e60634fb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
@@ -350,6 +350,8 @@ nvkm_fifo_dtor(struct nvkm_engine *engine)
nvkm_chid_unref(&fifo->chid);
nvkm_event_fini(&fifo->nonstall.event);
+ if (fifo->func->nonstall_dtor)
+ fifo->func->nonstall_dtor(fifo);
mutex_destroy(&fifo->mutex);
if (fifo->func->dtor)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga100.c
index e74493a4569e..6848a56f20c0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga100.c
@@ -517,19 +517,11 @@ ga100_fifo_nonstall_intr(struct nvkm_inth *inth)
static void
ga100_fifo_nonstall_block(struct nvkm_event *event, int type, int index)
{
- struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), nonstall.event);
- struct nvkm_runl *runl = nvkm_runl_get(fifo, index, 0);
-
- nvkm_inth_block(&runl->nonstall.inth);
}
static void
ga100_fifo_nonstall_allow(struct nvkm_event *event, int type, int index)
{
- struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), nonstall.event);
- struct nvkm_runl *runl = nvkm_runl_get(fifo, index, 0);
-
- nvkm_inth_allow(&runl->nonstall.inth);
}
const struct nvkm_event_func
@@ -564,12 +556,26 @@ ga100_fifo_nonstall_ctor(struct nvkm_fifo *fifo)
if (ret)
return ret;
+ nvkm_inth_allow(&runl->nonstall.inth);
+
nr = max(nr, runl->id + 1);
}
return nr;
}
+void
+ga100_fifo_nonstall_dtor(struct nvkm_fifo *fifo)
+{
+ struct nvkm_runl *runl;
+
+ nvkm_runl_foreach(runl, fifo) {
+ if (runl->nonstall.vector < 0)
+ continue;
+ nvkm_inth_block(&runl->nonstall.inth);
+ }
+}
+
int
ga100_fifo_runl_ctor(struct nvkm_fifo *fifo)
{
@@ -599,6 +605,7 @@ ga100_fifo = {
.runl_ctor = ga100_fifo_runl_ctor,
.mmu_fault = &tu102_fifo_mmu_fault,
.nonstall_ctor = ga100_fifo_nonstall_ctor,
+ .nonstall_dtor = ga100_fifo_nonstall_dtor,
.nonstall = &ga100_fifo_nonstall,
.runl = &ga100_runl,
.runq = &ga100_runq,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c
index 755235f55b3a..18a0b1f4eab7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c
@@ -30,6 +30,7 @@ ga102_fifo = {
.runl_ctor = ga100_fifo_runl_ctor,
.mmu_fault = &tu102_fifo_mmu_fault,
.nonstall_ctor = ga100_fifo_nonstall_ctor,
+ .nonstall_dtor = ga100_fifo_nonstall_dtor,
.nonstall = &ga100_fifo_nonstall,
.runl = &ga100_runl,
.runq = &ga100_runq,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
index 5e81ae195329..fff1428ef267 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
@@ -41,6 +41,7 @@ struct nvkm_fifo_func {
void (*start)(struct nvkm_fifo *, unsigned long *);
int (*nonstall_ctor)(struct nvkm_fifo *);
+ void (*nonstall_dtor)(struct nvkm_fifo *);
const struct nvkm_event_func *nonstall;
const struct nvkm_runl_func *runl;
@@ -200,6 +201,7 @@ u32 tu102_chan_doorbell_handle(struct nvkm_chan *);
int ga100_fifo_runl_ctor(struct nvkm_fifo *);
int ga100_fifo_nonstall_ctor(struct nvkm_fifo *);
+void ga100_fifo_nonstall_dtor(struct nvkm_fifo *);
extern const struct nvkm_event_func ga100_fifo_nonstall;
extern const struct nvkm_runl_func ga100_runl;
extern const struct nvkm_runq_func ga100_runq;
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/gm200.c b/drivers/gpu/drm/nouveau/nvkm/falcon/gm200.c
index b7da3ab44c27..7c43397c19e6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/gm200.c
@@ -103,7 +103,7 @@ gm200_flcn_pio_imem_wr_init(struct nvkm_falcon *falcon, u8 port, bool sec, u32 i
static void
gm200_flcn_pio_imem_wr(struct nvkm_falcon *falcon, u8 port, const u8 *img, int len, u16 tag)
{
- nvkm_falcon_wr32(falcon, 0x188 + (port * 0x10), tag++);
+ nvkm_falcon_wr32(falcon, 0x188 + (port * 0x10), tag);
while (len >= 4) {
nvkm_falcon_wr32(falcon, 0x184 + (port * 0x10), *(u32 *)img);
img += 4;
@@ -249,9 +249,11 @@ int
gm200_flcn_fw_load(struct nvkm_falcon_fw *fw)
{
struct nvkm_falcon *falcon = fw->falcon;
- int target, ret;
+ int ret;
if (fw->inst) {
+ int target;
+
nvkm_falcon_mask(falcon, 0x048, 0x00000001, 0x00000001);
switch (nvkm_memory_target(fw->inst)) {
@@ -285,15 +287,6 @@ gm200_flcn_fw_load(struct nvkm_falcon_fw *fw)
}
if (fw->boot) {
- switch (nvkm_memory_target(&fw->fw.mem.memory)) {
- case NVKM_MEM_TARGET_VRAM: target = 4; break;
- case NVKM_MEM_TARGET_HOST: target = 5; break;
- case NVKM_MEM_TARGET_NCOH: target = 6; break;
- default:
- WARN_ON(1);
- return -EINVAL;
- }
-
ret = nvkm_falcon_pio_wr(falcon, fw->boot, 0, 0,
IMEM, falcon->code.limit - fw->boot_size, fw->boot_size,
fw->boot_addr >> 8, false);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c
index eb765da0876e..35d1fcef520b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c
@@ -41,8 +41,8 @@ ad102_gsp = {
static struct nvkm_gsp_fwif
ad102_gsps[] = {
- { 1, tu102_gsp_load, &ad102_gsp, &r570_rm_ga102, "570.144", true },
- { 0, tu102_gsp_load, &ad102_gsp, &r535_rm_ga102, "535.113.01", true },
+ { 1, tu102_gsp_load, &ad102_gsp, &r570_rm_ga102, "570.144" },
+ { 0, tu102_gsp_load, &ad102_gsp, &r535_rm_ga102, "535.113.01" },
{}
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c
index d23243a83a4c..7ccb41761066 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c
@@ -138,8 +138,10 @@ nvkm_gsp_new_(const struct nvkm_gsp_fwif *fwif, struct nvkm_device *device,
nvkm_subdev_ctor(&nvkm_gsp, device, type, inst, &gsp->subdev);
fwif = nvkm_firmware_load(&gsp->subdev, fwif, "Gsp", gsp);
- if (IS_ERR(fwif))
+ if (IS_ERR(fwif)) {
+ nvkm_error(&gsp->subdev, "Failed to load required firmware for device.");
return PTR_ERR(fwif);
+ }
gsp->func = fwif->func;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c
index 52412965fac1..5b721bd9d799 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c
@@ -209,11 +209,12 @@ nvkm_gsp_fwsec_v2(struct nvkm_gsp *gsp, const char *name,
fw->boot_addr = bld->start_tag << 8;
fw->boot_size = bld->code_size;
fw->boot = kmemdup(bl->data + hdr->data_offset + bld->code_off, fw->boot_size, GFP_KERNEL);
- if (!fw->boot)
- ret = -ENOMEM;
nvkm_firmware_put(bl);
+ if (!fw->boot)
+ return -ENOMEM;
+
/* Patch in interface data. */
return nvkm_gsp_fwsec_patch(gsp, fw, desc->InterfaceOffset, init_cmd);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb100.c
index 12a3f2c1ed82..1b3b31b95ce4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb100.c
@@ -20,7 +20,7 @@ gb100_gsp = {
static struct nvkm_gsp_fwif
gb100_gsps[] = {
- { 0, gh100_gsp_load, &gb100_gsp, &r570_rm_gb10x, "570.144", true },
+ { 0, gh100_gsp_load, &gb100_gsp, &r570_rm_gb10x, "570.144" },
{}
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb202.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb202.c
index c1d718172ddf..51384c63148c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb202.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb202.c
@@ -20,7 +20,7 @@ gb202_gsp = {
static struct nvkm_gsp_fwif
gb202_gsps[] = {
- { 0, gh100_gsp_load, &gb202_gsp, &r570_rm_gb20x, "570.144", true },
+ { 0, gh100_gsp_load, &gb202_gsp, &r570_rm_gb20x, "570.144" },
{}
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gh100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gh100.c
index ce31e8248807..b0dd5fce7bad 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gh100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gh100.c
@@ -344,7 +344,7 @@ done:
static struct nvkm_gsp_fwif
gh100_gsps[] = {
- { 0, gh100_gsp_load, &gh100_gsp, &r570_rm_gh100, "570.144", true },
+ { 0, gh100_gsp_load, &gh100_gsp, &r570_rm_gh100, "570.144" },
{}
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
index 4f14e85fc69e..c3494b7ac572 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
@@ -14,7 +14,6 @@ struct nvkm_gsp_fwif {
const struct nvkm_gsp_func *func;
const struct nvkm_rm_impl *rm;
const char *ver;
- bool enable;
};
int nvkm_gsp_load_fw(struct nvkm_gsp *, const char *name, const char *ver,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c
index 1ac5628c5140..4ed54b386a60 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fifo.c
@@ -601,6 +601,7 @@ r535_fifo_new(const struct nvkm_fifo_func *hw, struct nvkm_device *device,
rm->chan.func = &r535_chan;
rm->nonstall = &ga100_fifo_nonstall;
rm->nonstall_ctor = ga100_fifo_nonstall_ctor;
+ rm->nonstall_dtor = ga100_fifo_nonstall_dtor;
return nvkm_fifo_new_(rm, device, type, inst, pfifo);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c
index 588cb4ab85cb..32e6a065d6d7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c
@@ -582,10 +582,13 @@ struct nv_gsp_registry_entries {
* RMSecBusResetEnable - enables PCI secondary bus reset
* RMForcePcieConfigSave - forces GSP-RM to preserve PCI configuration
* registers on any PCI reset.
+ * RMDevidCheckIgnore - allows GSP-RM to boot even if the PCI dev ID
+ * is not found in the internal product name database.
*/
static const struct nv_gsp_registry_entries r535_registry_entries[] = {
{ "RMSecBusResetEnable", 1 },
{ "RMForcePcieConfigSave", 1 },
+ { "RMDevidCheckIgnore", 1 },
};
#define NV_GSP_REG_NUM_ENTRIES ARRAY_SIZE(r535_registry_entries)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c
index 9d06ff722fea..0dc4782df8c0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c
@@ -325,7 +325,7 @@ r535_gsp_msgq_recv(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *retries)
rpc = r535_gsp_msgq_peek(gsp, sizeof(*rpc), info.retries);
if (IS_ERR_OR_NULL(rpc)) {
- kfree(buf);
+ kvfree(buf);
return rpc;
}
@@ -334,7 +334,7 @@ r535_gsp_msgq_recv(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *retries)
rpc = r535_gsp_msgq_recv_one_elem(gsp, &info);
if (IS_ERR_OR_NULL(rpc)) {
- kfree(buf);
+ kvfree(buf);
return rpc;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c
index 58e233bc53b1..81e56da0474a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c
@@ -383,13 +383,9 @@ int
tu102_gsp_load_rm(struct nvkm_gsp *gsp, const struct nvkm_gsp_fwif *fwif)
{
struct nvkm_subdev *subdev = &gsp->subdev;
- bool enable_gsp = fwif->enable;
int ret;
-#if IS_ENABLED(CONFIG_DRM_NOUVEAU_GSP_DEFAULT)
- enable_gsp = true;
-#endif
- if (!nvkm_boolopt(subdev->device->cfgopt, "NvGspRm", enable_gsp))
+ if (!nvkm_boolopt(subdev->device->cfgopt, "NvGspRm", true))
return -EINVAL;
ret = nvkm_gsp_load_fw(gsp, "gsp", fwif->ver, &gsp->fws.rm);
diff --git a/drivers/gpu/drm/nova/driver.rs b/drivers/gpu/drm/nova/driver.rs
index b28b2e05cc15..91b7380f83ab 100644
--- a/drivers/gpu/drm/nova/driver.rs
+++ b/drivers/gpu/drm/nova/driver.rs
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
-use kernel::{auxiliary, c_str, device::Core, drm, drm::gem, drm::ioctl, prelude::*, types::ARef};
+use kernel::{
+ auxiliary, c_str, device::Core, drm, drm::gem, drm::ioctl, prelude::*, sync::aref::ARef,
+};
use crate::file::File;
use crate::gem::NovaObject;
diff --git a/drivers/gpu/drm/nova/file.rs b/drivers/gpu/drm/nova/file.rs
index 7e59a34b830d..90b9d2d0ec4a 100644
--- a/drivers/gpu/drm/nova/file.rs
+++ b/drivers/gpu/drm/nova/file.rs
@@ -2,13 +2,11 @@
use crate::driver::{NovaDevice, NovaDriver};
use crate::gem::NovaObject;
-use crate::uapi::{GemCreate, GemInfo, Getparam};
use kernel::{
alloc::flags::*,
drm::{self, gem::BaseObject},
pci,
prelude::*,
- types::Opaque,
uapi,
};
@@ -26,20 +24,19 @@ impl File {
/// IOCTL: get_param: Query GPU / driver metadata.
pub(crate) fn get_param(
dev: &NovaDevice,
- getparam: &Opaque<uapi::drm_nova_getparam>,
+ getparam: &mut uapi::drm_nova_getparam,
_file: &drm::File<File>,
) -> Result<u32> {
let adev = &dev.adev;
let parent = adev.parent().ok_or(ENOENT)?;
let pdev: &pci::Device = parent.try_into()?;
- let getparam: &Getparam = getparam.into();
- let value = match getparam.param() as u32 {
+ let value = match getparam.param as u32 {
uapi::NOVA_GETPARAM_VRAM_BAR_SIZE => pdev.resource_len(1)?,
_ => return Err(EINVAL),
};
- getparam.set_value(value);
+ getparam.value = Into::<u64>::into(value);
Ok(0)
}
@@ -47,13 +44,12 @@ impl File {
/// IOCTL: gem_create: Create a new DRM GEM object.
pub(crate) fn gem_create(
dev: &NovaDevice,
- req: &Opaque<uapi::drm_nova_gem_create>,
+ req: &mut uapi::drm_nova_gem_create,
file: &drm::File<File>,
) -> Result<u32> {
- let req: &GemCreate = req.into();
- let obj = NovaObject::new(dev, req.size().try_into()?)?;
+ let obj = NovaObject::new(dev, req.size.try_into()?)?;
- req.set_handle(obj.create_handle(file)?);
+ req.handle = obj.create_handle(file)?;
Ok(0)
}
@@ -61,13 +57,12 @@ impl File {
/// IOCTL: gem_info: Query GEM metadata.
pub(crate) fn gem_info(
_dev: &NovaDevice,
- req: &Opaque<uapi::drm_nova_gem_info>,
+ req: &mut uapi::drm_nova_gem_info,
file: &drm::File<File>,
) -> Result<u32> {
- let req: &GemInfo = req.into();
- let bo = NovaObject::lookup_handle(file, req.handle())?;
+ let bo = NovaObject::lookup_handle(file, req.handle)?;
- req.set_size(bo.size().try_into()?);
+ req.size = bo.size().try_into()?;
Ok(0)
}
diff --git a/drivers/gpu/drm/nova/gem.rs b/drivers/gpu/drm/nova/gem.rs
index 33b62d21400c..2760ba4f3450 100644
--- a/drivers/gpu/drm/nova/gem.rs
+++ b/drivers/gpu/drm/nova/gem.rs
@@ -4,7 +4,7 @@ use kernel::{
drm,
drm::{gem, gem::BaseObject},
prelude::*,
- types::ARef,
+ sync::aref::ARef,
};
use crate::{
@@ -16,16 +16,14 @@ use crate::{
#[pin_data]
pub(crate) struct NovaObject {}
-impl gem::BaseDriverObject<gem::Object<NovaObject>> for NovaObject {
+impl gem::DriverObject for NovaObject {
+ type Driver = NovaDriver;
+
fn new(_dev: &NovaDevice, _size: usize) -> impl PinInit<Self, Error> {
try_pin_init!(NovaObject {})
}
}
-impl gem::DriverObject for NovaObject {
- type Driver = NovaDriver;
-}
-
impl NovaObject {
/// Create a new DRM GEM object.
pub(crate) fn new(dev: &NovaDevice, size: usize) -> Result<ARef<gem::Object<Self>>> {
diff --git a/drivers/gpu/drm/nova/nova.rs b/drivers/gpu/drm/nova/nova.rs
index 64fd670e99e1..8893e58ee0db 100644
--- a/drivers/gpu/drm/nova/nova.rs
+++ b/drivers/gpu/drm/nova/nova.rs
@@ -5,7 +5,6 @@
mod driver;
mod file;
mod gem;
-mod uapi;
use crate::driver::NovaDriver;
diff --git a/drivers/gpu/drm/nova/uapi.rs b/drivers/gpu/drm/nova/uapi.rs
deleted file mode 100644
index eb228a58d423..000000000000
--- a/drivers/gpu/drm/nova/uapi.rs
+++ /dev/null
@@ -1,61 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-use kernel::uapi;
-
-// TODO Work out some common infrastructure to avoid boilerplate code for uAPI abstractions.
-
-macro_rules! define_uapi_abstraction {
- ($name:ident <= $inner:ty) => {
- #[repr(transparent)]
- pub struct $name(::kernel::types::Opaque<$inner>);
-
- impl ::core::convert::From<&::kernel::types::Opaque<$inner>> for &$name {
- fn from(value: &::kernel::types::Opaque<$inner>) -> Self {
- // SAFETY: `Self` is a transparent wrapper of `$inner`.
- unsafe { ::core::mem::transmute(value) }
- }
- }
- };
-}
-
-define_uapi_abstraction!(Getparam <= uapi::drm_nova_getparam);
-
-impl Getparam {
- pub fn param(&self) -> u64 {
- // SAFETY: `self.get()` is a valid pointer to a `struct drm_nova_getparam`.
- unsafe { (*self.0.get()).param }
- }
-
- pub fn set_value(&self, v: u64) {
- // SAFETY: `self.get()` is a valid pointer to a `struct drm_nova_getparam`.
- unsafe { (*self.0.get()).value = v };
- }
-}
-
-define_uapi_abstraction!(GemCreate <= uapi::drm_nova_gem_create);
-
-impl GemCreate {
- pub fn size(&self) -> u64 {
- // SAFETY: `self.get()` is a valid pointer to a `struct drm_nova_gem_create`.
- unsafe { (*self.0.get()).size }
- }
-
- pub fn set_handle(&self, handle: u32) {
- // SAFETY: `self.get()` is a valid pointer to a `struct drm_nova_gem_create`.
- unsafe { (*self.0.get()).handle = handle };
- }
-}
-
-define_uapi_abstraction!(GemInfo <= uapi::drm_nova_gem_info);
-
-impl GemInfo {
- pub fn handle(&self) -> u32 {
- // SAFETY: `self.get()` is a valid pointer to a `struct drm_nova_gem_info`.
- unsafe { (*self.0.get()).handle }
- }
-
- pub fn set_size(&self, size: u64) {
- // SAFETY: `self.get()` is a valid pointer to a `struct drm_nova_gem_info`.
- unsafe { (*self.0.get()).size = size };
- }
-}
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 054b71dba6a7..794267f0f007 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -378,10 +378,8 @@ static int omap_display_id(struct omap_dss_device *output)
struct device_node *node = NULL;
if (output->bridge) {
- struct drm_bridge *bridge = output->bridge;
-
- while (drm_bridge_get_next_bridge(bridge))
- bridge = drm_bridge_get_next_bridge(bridge);
+ struct drm_bridge *bridge __free(drm_bridge_put) =
+ drm_bridge_chain_get_last_bridge(output->bridge->encoder);
node = bridge->of_node;
}
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index 30c81e2e5d6b..bb3105556f19 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -351,7 +351,7 @@ struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
}
}
- fb = omap_framebuffer_init(dev, mode_cmd, bos);
+ fb = omap_framebuffer_init(dev, info, mode_cmd, bos);
if (IS_ERR(fb))
goto error;
@@ -365,9 +365,9 @@ error:
}
struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos)
{
- const struct drm_format_info *format = NULL;
struct omap_framebuffer *omap_fb = NULL;
struct drm_framebuffer *fb = NULL;
unsigned int pitch = mode_cmd->pitches[0];
@@ -377,15 +377,12 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
dev, mode_cmd, mode_cmd->width, mode_cmd->height,
(char *)&mode_cmd->pixel_format);
- format = drm_get_format_info(dev, mode_cmd->pixel_format,
- mode_cmd->modifier[0]);
-
for (i = 0; i < ARRAY_SIZE(formats); i++) {
if (formats[i] == mode_cmd->pixel_format)
break;
}
- if (!format || i == ARRAY_SIZE(formats)) {
+ if (i == ARRAY_SIZE(formats)) {
dev_dbg(dev->dev, "unsupported pixel format: %4.4s\n",
(char *)&mode_cmd->pixel_format);
ret = -EINVAL;
@@ -399,7 +396,7 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
}
fb = &omap_fb->base;
- omap_fb->format = format;
+ omap_fb->format = info;
mutex_init(&omap_fb->lock);
/*
@@ -407,23 +404,23 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
* that the two planes of multiplane formats need the same number of
* bytes per pixel.
*/
- if (format->num_planes == 2 && pitch != mode_cmd->pitches[1]) {
+ if (info->num_planes == 2 && pitch != mode_cmd->pitches[1]) {
dev_dbg(dev->dev, "pitches differ between planes 0 and 1\n");
ret = -EINVAL;
goto fail;
}
- if (pitch % format->cpp[0]) {
+ if (pitch % info->cpp[0]) {
dev_dbg(dev->dev,
"buffer pitch (%u bytes) is not a multiple of pixel size (%u bytes)\n",
- pitch, format->cpp[0]);
+ pitch, info->cpp[0]);
ret = -EINVAL;
goto fail;
}
- for (i = 0; i < format->num_planes; i++) {
+ for (i = 0; i < info->num_planes; i++) {
struct plane *plane = &omap_fb->planes[i];
- unsigned int vsub = i == 0 ? 1 : format->vsub;
+ unsigned int vsub = i == 0 ? 1 : info->vsub;
unsigned int size;
size = pitch * mode_cmd->height / vsub;
@@ -440,7 +437,7 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
plane->dma_addr = 0;
}
- drm_helper_mode_fill_fb_struct(dev, fb, NULL, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, fb, info, mode_cmd);
ret = drm_framebuffer_init(dev, fb, &omap_framebuffer_funcs);
if (ret) {
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.h b/drivers/gpu/drm/omapdrm/omap_fb.h
index 0873f953cf1d..e6010302a22b 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.h
+++ b/drivers/gpu/drm/omapdrm/omap_fb.h
@@ -13,6 +13,7 @@ struct drm_connector;
struct drm_device;
struct drm_file;
struct drm_framebuffer;
+struct drm_format_info;
struct drm_gem_object;
struct drm_mode_fb_cmd2;
struct drm_plane_state;
@@ -23,6 +24,7 @@ struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
struct drm_file *file, const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd);
struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
int omap_framebuffer_pin(struct drm_framebuffer *fb);
void omap_framebuffer_unpin(struct drm_framebuffer *fb);
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index 7b6396890681..948af7ec1130 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -197,7 +197,10 @@ int omap_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
goto fail;
}
- fb = omap_framebuffer_init(dev, &mode_cmd, &bo);
+ fb = omap_framebuffer_init(dev,
+ drm_get_format_info(dev, mode_cmd.pixel_format,
+ mode_cmd.modifier[0]),
+ &mode_cmd, &bo);
if (IS_ERR(fb)) {
dev_err(dev->dev, "failed to allocate fb\n");
/* note: if fb creation failed, we can't rely on fb destroy
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 09b9f7ff9340..407c5f6a268b 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -215,6 +215,19 @@ config DRM_PANEL_HIMAX_HX8394
If M is selected the module will be called panel-himax-hx8394.
+config DRM_PANEL_HYDIS_HV101HD1
+ tristate "Hydis HV101HD1 panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for the Hydis HV101HD1
+ 2-lane 1366x768 MIPI DSI panel found in ASUS VivoTab RT TF600T.
+ HV101HD1 is a color active matrix TFT LCD module using amorphous
+ silicon TFT's (Thin Film Transistors) as an active switching devices.
+
+ If M is selected the module will be called panel-hydis-hv101hd1
+
config DRM_PANEL_ILITEK_IL9322
tristate "Ilitek ILI9322 320x240 QVGA panels"
depends on OF && SPI
@@ -843,6 +856,17 @@ config DRM_PANEL_SAMSUNG_S6E8AA0
select DRM_MIPI_DSI
select VIDEOMODE_HELPERS
+config DRM_PANEL_SAMSUNG_S6E8AA5X01_AMS561RA01
+ tristate "Samsung AMS561RA01 panel with S6E8AA5X01 controller"
+ depends on GPIOLIB && OF && REGULATOR
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for Samsung AMS561RA01
+ panel, which uses Samsung's S6E8AA5X01 controller. The panel has a
+ ~5.6 inch AMOLED display, and the controller is driven by the MIPI
+ DSI protocol with 4 lanes.
+
config DRM_PANEL_SAMSUNG_SOFEF00
tristate "Samsung sofef00/s6e3fc2x01 OnePlus 6/6T DSI cmd mode panels"
depends on OF
@@ -971,7 +995,7 @@ config DRM_PANEL_STARTEK_KD070FHFID015
depends on BACKLIGHT_CLASS_DEVICE
help
Say Y here if you want to enable support for STARTEK KD070FHFID015 DSI panel
- based on RENESAS-R69429 controller. The pannel is a 7-inch TFT LCD display
+ based on RENESAS-R69429 controller. The panel is a 7-inch TFT LCD display
with a resolution of 1024 x 600 pixels. It provides a MIPI DSI interface to
the host, a built-in LED backlight and touch controller.
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index 957555b49996..3615a761b44f 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_DRM_PANEL_HIMAX_HX83102) += panel-himax-hx83102.o
obj-$(CONFIG_DRM_PANEL_HIMAX_HX83112A) += panel-himax-hx83112a.o
obj-$(CONFIG_DRM_PANEL_HIMAX_HX83112B) += panel-himax-hx83112b.o
obj-$(CONFIG_DRM_PANEL_HIMAX_HX8394) += panel-himax-hx8394.o
+obj-$(CONFIG_DRM_PANEL_HYDIS_HV101HD1) += panel-hydis-hv101hd1.o
obj-$(CONFIG_DRM_PANEL_ILITEK_IL9322) += panel-ilitek-ili9322.o
obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9341) += panel-ilitek-ili9341.o
obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9805) += panel-ilitek-ili9805.o
@@ -87,6 +88,7 @@ obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63M0_DSI) += panel-samsung-s6e63m0-dsi.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E88A0_AMS427AP24) += panel-samsung-s6e88a0-ams427ap24.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01) += panel-samsung-s6e88a0-ams452ef01.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0) += panel-samsung-s6e8aa0.o
+obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E8AA5X01_AMS561RA01) += panel-samsung-s6e8aa5x01-ams561ra01.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_SOFEF00) += panel-samsung-sofef00.o
obj-$(CONFIG_DRM_PANEL_SEIKO_43WVF1G) += panel-seiko-43wvf1g.o
obj-$(CONFIG_DRM_PANEL_SHARP_LQ101R1SX01) += panel-sharp-lq101r1sx01.o
diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
index 9a56e208cbdd..62435e3cd9f4 100644
--- a/drivers/gpu/drm/panel/panel-edp.c
+++ b/drivers/gpu/drm/panel/panel-edp.c
@@ -1736,10 +1736,11 @@ static const struct panel_delay delay_200_500_e50 = {
.enable = 50,
};
-static const struct panel_delay delay_200_500_e50_p2e200 = {
+static const struct panel_delay delay_200_500_e50_d50_p2e200 = {
.hpd_absent = 200,
.unprepare = 500,
.enable = 50,
+ .disable = 50,
.prepare_to_enable = 200,
};
@@ -1795,6 +1796,13 @@ static const struct panel_delay delay_200_500_e200_d10 = {
.disable = 10,
};
+static const struct panel_delay delay_200_500_e200_d50 = {
+ .hpd_absent = 200,
+ .unprepare = 500,
+ .enable = 200,
+ .disable = 50,
+};
+
static const struct panel_delay delay_200_150_e200 = {
.hpd_absent = 200,
.unprepare = 150,
@@ -1828,6 +1836,20 @@ static const struct panel_delay delay_50_500_e200_d200_po2e335 = {
.powered_on_to_enable = 335,
};
+static const struct panel_delay delay_200_500_e50_d100 = {
+ .hpd_absent = 200,
+ .unprepare = 500,
+ .enable = 50,
+ .disable = 100,
+};
+
+static const struct panel_delay delay_80_500_e50_d50 = {
+ .hpd_absent = 80,
+ .unprepare = 500,
+ .enable = 50,
+ .disable = 50,
+};
+
#define EDP_PANEL_ENTRY(vend_chr_0, vend_chr_1, vend_chr_2, product_id, _delay, _name) \
{ \
.ident = { \
@@ -1857,6 +1879,7 @@ static const struct panel_delay delay_50_500_e200_d200_po2e335 = {
* Sort first by vendor, then by product ID.
*/
static const struct edp_panel_entry edp_panels[] = {
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x04a4, &delay_200_500_e50, "B122UAN01.0"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x105c, &delay_200_500_e50, "B116XTN01.0"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x1062, &delay_200_500_e50, "B120XAN01.0"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x125c, &delay_200_500_e50, "Unknown"),
@@ -1875,6 +1898,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY2('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01.0",
&auo_b116xa3_mode),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x435c, &delay_200_500_e50, "Unknown"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x52b0, &delay_200_500_e50, "B116XAK02.0"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x582d, &delay_200_500_e50, "B133UAN01.0"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x615c, &delay_200_500_e50, "B116XAN06.1"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x635c, &delay_200_500_e50, "B116XAN06.3"),
@@ -1882,10 +1906,12 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('A', 'U', 'O', 0x723c, &delay_200_500_e50, "B140XTN07.2"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x73aa, &delay_200_500_e50, "B116XTN02.3"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x8594, &delay_200_500_e50, "B133UAN01.0"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x8bba, &delay_200_500_e50, "B140UAN08.5"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xa199, &delay_200_500_e50, "B116XAN06.1"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xa7b3, &delay_200_500_e50, "B140UAN04.4"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xc4b4, &delay_200_500_e50, "B116XAT04.1"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xc9a8, &delay_200_500_e50, "B140QAN08.H"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0xcdba, &delay_200_500_e50, "B140UAX01.2"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xd497, &delay_200_500_e50, "B120XAN01.0"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xf390, &delay_200_500_e50, "B140XTN07.7"),
@@ -1934,21 +1960,25 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('B', 'O', 'E', 0x09dd, &delay_200_500_e50, "NT116WHM-N21"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a1b, &delay_200_500_e50, "NV133WUM-N63"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a36, &delay_200_500_e200, "Unknown"),
- EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a3e, &delay_200_500_e80, "NV116WHM-N49"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a3e, &delay_200_500_e80_d50, "NV116WHM-N49"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a5d, &delay_200_500_e50, "NV116WHM-N45"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a6a, &delay_200_500_e80, "NV140WUM-N44"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0ac5, &delay_200_500_e50, "NV116WHM-N4C"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0ae8, &delay_200_500_e50_p2e80, "NV140WUM-N41"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b09, &delay_200_500_e50_po2e200, "NV140FHM-NZ"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b1e, &delay_200_500_e80, "NE140QDM-N6A"),
- EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b34, &delay_200_500_e80, "NV122WUM-N41"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b34, &delay_200_500_e80_d50, "NV122WUM-N41"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b43, &delay_200_500_e200, "NV140FHM-T09"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b56, &delay_200_500_e80, "NT140FHM-N47"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b66, &delay_200_500_e80, "NE140WUM-N6G"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0c20, &delay_200_500_e80, "NT140FHM-N47"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0c93, &delay_200_500_e200, "Unknown"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0cb6, &delay_200_500_e200, "NT116WHM-N44"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0cf6, &delay_200_500_e200, "NV140WUM-N64"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0cfa, &delay_200_500_e50, "NV116WHM-A4D"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0d45, &delay_200_500_e80, "NV116WHM-N4B"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0d73, &delay_200_500_e80, "NE140WUM-N6S"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0ddf, &delay_200_500_e80, "NV116WHM-T01"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1130, &delay_200_500_e50, "N116BGE-EB2"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1132, &delay_200_500_e80_d50, "N116BGE-EA2"),
@@ -1966,27 +1996,36 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('C', 'M', 'N', 0x115b, &delay_200_500_e80_d50, "N116BCN-EB1"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x115d, &delay_200_500_e80_d50, "N116BCA-EA2"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x115e, &delay_200_500_e80_d50, "N116BCA-EA1"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x115f, &delay_200_500_e80_d50, "N116BCL-EAK"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1160, &delay_200_500_e80_d50, "N116BCJ-EAK"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1161, &delay_200_500_e80, "N116BCP-EA2"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1163, &delay_200_500_e80_d50, "N116BCJ-EAK"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1247, &delay_200_500_e80_d50, "N120ACA-EA1"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x124c, &delay_200_500_e80_d50, "N122JCA-ENK"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x142b, &delay_200_500_e80_d50, "N140HCA-EAC"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x142e, &delay_200_500_e80_d50, "N140BGA-EA4"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x1441, &delay_200_500_e80_d50, "N140JCA-ELK"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x144f, &delay_200_500_e80_d50, "N140HGA-EA1"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1468, &delay_200_500_e80, "N140HGA-EA1"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x14a8, &delay_200_500_e80, "N140JCA-ELP"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x14d4, &delay_200_500_e80_d50, "N140HCA-EAC"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x14d6, &delay_200_500_e80_d50, "N140BGA-EA4"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x14e5, &delay_200_500_e80_d50, "N140HGA-EA1"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x162b, &delay_200_500_e80_d50, "N160JCE-ELL"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x7402, &delay_200_500_e200_d50, "N116BCA-EAK"),
- EDP_PANEL_ENTRY('C', 'S', 'O', 0x1200, &delay_200_500_e50_p2e200, "MNC207QS1-1"),
- EDP_PANEL_ENTRY('C', 'S', 'O', 0x1413, &delay_200_500_e50_p2e200, "MNE007JA1-2"),
+ EDP_PANEL_ENTRY('C', 'S', 'O', 0x1200, &delay_200_500_e50_d50_p2e200, "MNC207QS1-1"),
+ EDP_PANEL_ENTRY('C', 'S', 'O', 0x1413, &delay_200_500_e50_d50_p2e200, "MNE007JA1-2"),
EDP_PANEL_ENTRY('C', 'S', 'W', 0x1100, &delay_200_500_e80_d50, "MNB601LS1-1"),
EDP_PANEL_ENTRY('C', 'S', 'W', 0x1103, &delay_200_500_e80_d50, "MNB601LS1-3"),
- EDP_PANEL_ENTRY('C', 'S', 'W', 0x1104, &delay_200_500_e50, "MNB601LS1-4"),
+ EDP_PANEL_ENTRY('C', 'S', 'W', 0x1104, &delay_200_500_e50_d100, "MNB601LS1-4"),
+ EDP_PANEL_ENTRY('C', 'S', 'W', 0x143f, &delay_200_500_e50, "MNE007QS3-6"),
EDP_PANEL_ENTRY('C', 'S', 'W', 0x1448, &delay_200_500_e50, "MNE007QS3-7"),
EDP_PANEL_ENTRY('C', 'S', 'W', 0x1457, &delay_80_500_e80_p2e200, "MNE007QS3-8"),
+ EDP_PANEL_ENTRY('C', 'S', 'W', 0x1462, &delay_200_500_e50, "MNE007QS5-2"),
+ EDP_PANEL_ENTRY('C', 'S', 'W', 0x1468, &delay_200_500_e50, "MNE007QB2-2"),
+ EDP_PANEL_ENTRY('C', 'S', 'W', 0x146e, &delay_80_500_e50_d50, "MNE007QB3-1"),
EDP_PANEL_ENTRY('E', 'T', 'C', 0x0000, &delay_50_500_e200_d200_po2e335, "LP079QX1-SP0V"),
@@ -2027,12 +2066,16 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('S', 'H', 'P', 0x1523, &delay_80_500_e50, "LQ140M1JW46"),
EDP_PANEL_ENTRY('S', 'H', 'P', 0x153a, &delay_200_500_e50, "LQ140T1JH01"),
EDP_PANEL_ENTRY('S', 'H', 'P', 0x154c, &delay_200_500_p2e100, "LQ116M1JW10"),
+ EDP_PANEL_ENTRY('S', 'H', 'P', 0x158f, &delay_200_500_p2e100, "LQ134Z1"),
EDP_PANEL_ENTRY('S', 'H', 'P', 0x1593, &delay_200_500_p2e100, "LQ134N1"),
EDP_PANEL_ENTRY('S', 'T', 'A', 0x0004, &delay_200_500_e200, "116KHD024006"),
EDP_PANEL_ENTRY('S', 'T', 'A', 0x0009, &delay_200_500_e250, "116QHD024002"),
EDP_PANEL_ENTRY('S', 'T', 'A', 0x0100, &delay_100_500_e200, "2081116HHD028001-51D"),
+ EDP_PANEL_ENTRY('T', 'M', 'A', 0x0811, &delay_200_500_e80_d50, "TM140VDXP01-04"),
+ EDP_PANEL_ENTRY('T', 'M', 'A', 0x2094, &delay_200_500_e50_d100, "TL140VDMS03-01"),
+
{ /* sentinal */ }
};
diff --git a/drivers/gpu/drm/panel/panel-himax-hx8279.c b/drivers/gpu/drm/panel/panel-himax-hx8279.c
index fb302d1f91b9..9e443c719843 100644
--- a/drivers/gpu/drm/panel/panel-himax-hx8279.c
+++ b/drivers/gpu/drm/panel/panel-himax-hx8279.c
@@ -935,7 +935,7 @@ static int hx8279_check_dig_gamma(struct hx8279 *hx, struct device *dev, const u
j++;
x++;
} while (x < 4);
- };
+ }
return 0;
}
diff --git a/drivers/gpu/drm/panel/panel-hydis-hv101hd1.c b/drivers/gpu/drm/panel/panel-hydis-hv101hd1.c
new file mode 100644
index 000000000000..46426c388932
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-hydis-hv101hd1.c
@@ -0,0 +1,188 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/array_size.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+struct hv101hd1 {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+ struct regulator_bulk_data *supplies;
+};
+
+static const struct regulator_bulk_data hv101hd1_supplies[] = {
+ { .supply = "vdd" },
+ { .supply = "vio" },
+};
+
+static inline struct hv101hd1 *to_hv101hd1(struct drm_panel *panel)
+{
+ return container_of(panel, struct hv101hd1, panel);
+}
+
+static int hv101hd1_prepare(struct drm_panel *panel)
+{
+ struct hv101hd1 *hv = to_hv101hd1(panel);
+ struct mipi_dsi_multi_context ctx = { .dsi = hv->dsi };
+ struct device *dev = &hv->dsi->dev;
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(hv101hd1_supplies), hv->supplies);
+ if (ret) {
+ dev_err(dev, "error enabling regulators (%d)\n", ret);
+ return ret;
+ }
+
+ mipi_dsi_dcs_exit_sleep_mode_multi(&ctx);
+ mipi_dsi_msleep(&ctx, 20);
+
+ mipi_dsi_dcs_set_display_on_multi(&ctx);
+ mipi_dsi_msleep(&ctx, 20);
+
+ return 0;
+}
+
+static int hv101hd1_disable(struct drm_panel *panel)
+{
+ struct hv101hd1 *hv = to_hv101hd1(panel);
+ struct mipi_dsi_multi_context ctx = { .dsi = hv->dsi };
+
+ mipi_dsi_dcs_set_display_off_multi(&ctx);
+ mipi_dsi_msleep(&ctx, 120);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&ctx);
+ mipi_dsi_msleep(&ctx, 20);
+
+ return 0;
+}
+
+static int hv101hd1_unprepare(struct drm_panel *panel)
+{
+ struct hv101hd1 *hv = to_hv101hd1(panel);
+
+ return regulator_bulk_disable(ARRAY_SIZE(hv101hd1_supplies),
+ hv->supplies);
+}
+
+static const struct drm_display_mode hv101hd1_mode = {
+ .clock = (1366 + 74 + 36 + 24) * (768 + 21 + 7 + 4) * 60 / 1000,
+ .hdisplay = 1366,
+ .hsync_start = 1366 + 74,
+ .hsync_end = 1366 + 74 + 36,
+ .htotal = 1366 + 74 + 36 + 24,
+ .vdisplay = 768,
+ .vsync_start = 768 + 21,
+ .vsync_end = 768 + 21 + 7,
+ .vtotal = 768 + 21 + 7 + 4,
+ .width_mm = 140,
+ .height_mm = 220,
+};
+
+static int hv101hd1_get_modes(struct drm_panel *panel, struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &hv101hd1_mode);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs hv101hd1_panel_funcs = {
+ .prepare = hv101hd1_prepare,
+ .disable = hv101hd1_disable,
+ .unprepare = hv101hd1_unprepare,
+ .get_modes = hv101hd1_get_modes,
+};
+
+static int hv101hd1_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct hv101hd1 *hv;
+ int ret;
+
+ hv = devm_drm_panel_alloc(dev, struct hv101hd1, panel,
+ &hv101hd1_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(hv))
+ return PTR_ERR(hv);
+
+ ret = devm_regulator_bulk_get_const(dev, ARRAY_SIZE(hv101hd1_supplies),
+ hv101hd1_supplies, &hv->supplies);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to get regulators\n");
+
+ hv->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, hv);
+
+ dsi->lanes = 2;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_LPM;
+
+ ret = drm_panel_of_backlight(&hv->panel);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get backlight\n");
+
+ drm_panel_add(&hv->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret) {
+ drm_panel_remove(&hv->panel);
+ return dev_err_probe(dev, ret, "Failed to attach to DSI host\n");
+ }
+
+ return 0;
+}
+
+static void hv101hd1_remove(struct mipi_dsi_device *dsi)
+{
+ struct hv101hd1 *hv = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ dev_err(&dsi->dev,
+ "Failed to detach from DSI host: %d\n", ret);
+
+ drm_panel_remove(&hv->panel);
+}
+
+static const struct of_device_id hv101hd1_of_match[] = {
+ { .compatible = "hydis,hv101hd1" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, hv101hd1_of_match);
+
+static struct mipi_dsi_driver hv101hd1_driver = {
+ .driver = {
+ .name = "panel-hv101hd1",
+ .of_match_table = hv101hd1_of_match,
+ },
+ .probe = hv101hd1_probe,
+ .remove = hv101hd1_remove,
+};
+module_mipi_dsi_driver(hv101hd1_driver);
+
+MODULE_AUTHOR("Svyatoslav Ryhel <clamor95@gmail.com>");
+MODULE_DESCRIPTION("DRM driver for Hydis HV101HD1 panel");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
index ac433345a179..ad4993b2f92a 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
@@ -1417,6 +1417,200 @@ static const struct ili9881c_instr rpi_7inch_init[] = {
ILI9881C_COMMAND_INSTR(0xD3, 0x39),
};
+static const struct ili9881c_instr bsd1218_a101kl68_init[] = {
+ ILI9881C_SWITCH_PAGE_INSTR(3),
+ ILI9881C_COMMAND_INSTR(0x01, 0x00),
+ ILI9881C_COMMAND_INSTR(0x02, 0x00),
+ ILI9881C_COMMAND_INSTR(0x03, 0x55),
+ ILI9881C_COMMAND_INSTR(0x04, 0x55),
+ ILI9881C_COMMAND_INSTR(0x05, 0x03),
+ ILI9881C_COMMAND_INSTR(0x06, 0x06),
+ ILI9881C_COMMAND_INSTR(0x07, 0x00),
+ ILI9881C_COMMAND_INSTR(0x08, 0x07),
+ ILI9881C_COMMAND_INSTR(0x09, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0f, 0x00),
+ ILI9881C_COMMAND_INSTR(0x10, 0x00),
+ ILI9881C_COMMAND_INSTR(0x11, 0x00),
+ ILI9881C_COMMAND_INSTR(0x12, 0x00),
+ ILI9881C_COMMAND_INSTR(0x13, 0x00),
+ ILI9881C_COMMAND_INSTR(0x14, 0x00),
+ ILI9881C_COMMAND_INSTR(0x15, 0x00),
+ ILI9881C_COMMAND_INSTR(0x16, 0x00),
+ ILI9881C_COMMAND_INSTR(0x17, 0x00),
+ ILI9881C_COMMAND_INSTR(0x18, 0x00),
+ ILI9881C_COMMAND_INSTR(0x19, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1e, 0xc0),
+ ILI9881C_COMMAND_INSTR(0x1f, 0x80),
+ ILI9881C_COMMAND_INSTR(0x20, 0x04),
+ ILI9881C_COMMAND_INSTR(0x21, 0x03),
+ ILI9881C_COMMAND_INSTR(0x22, 0x00),
+ ILI9881C_COMMAND_INSTR(0x23, 0x00),
+ ILI9881C_COMMAND_INSTR(0x24, 0x00),
+ ILI9881C_COMMAND_INSTR(0x25, 0x00),
+ ILI9881C_COMMAND_INSTR(0x26, 0x00),
+ ILI9881C_COMMAND_INSTR(0x27, 0x00),
+ ILI9881C_COMMAND_INSTR(0x28, 0x33),
+ ILI9881C_COMMAND_INSTR(0x29, 0x33),
+ ILI9881C_COMMAND_INSTR(0x2a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2f, 0x00),
+ ILI9881C_COMMAND_INSTR(0x30, 0x00),
+ ILI9881C_COMMAND_INSTR(0x31, 0x00),
+ ILI9881C_COMMAND_INSTR(0x32, 0x00),
+ ILI9881C_COMMAND_INSTR(0x33, 0x00),
+ ILI9881C_COMMAND_INSTR(0x34, 0x04),
+ ILI9881C_COMMAND_INSTR(0x35, 0x00),
+ ILI9881C_COMMAND_INSTR(0x36, 0x00),
+ ILI9881C_COMMAND_INSTR(0x37, 0x00),
+ ILI9881C_COMMAND_INSTR(0x38, 0x3c),
+ ILI9881C_COMMAND_INSTR(0x39, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3f, 0x00),
+ ILI9881C_COMMAND_INSTR(0x40, 0x00),
+ ILI9881C_COMMAND_INSTR(0x41, 0x00),
+ ILI9881C_COMMAND_INSTR(0x42, 0x00),
+ ILI9881C_COMMAND_INSTR(0x43, 0x00),
+ ILI9881C_COMMAND_INSTR(0x44, 0x00),
+ ILI9881C_COMMAND_INSTR(0x50, 0x00),
+ ILI9881C_COMMAND_INSTR(0x51, 0x11),
+ ILI9881C_COMMAND_INSTR(0x52, 0x44),
+ ILI9881C_COMMAND_INSTR(0x53, 0x55),
+ ILI9881C_COMMAND_INSTR(0x54, 0x88),
+ ILI9881C_COMMAND_INSTR(0x55, 0xab),
+ ILI9881C_COMMAND_INSTR(0x56, 0x00),
+ ILI9881C_COMMAND_INSTR(0x57, 0x11),
+ ILI9881C_COMMAND_INSTR(0x58, 0x22),
+ ILI9881C_COMMAND_INSTR(0x59, 0x33),
+ ILI9881C_COMMAND_INSTR(0x5a, 0x44),
+ ILI9881C_COMMAND_INSTR(0x5b, 0x55),
+ ILI9881C_COMMAND_INSTR(0x5c, 0x66),
+ ILI9881C_COMMAND_INSTR(0x5d, 0x77),
+ ILI9881C_COMMAND_INSTR(0x5e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x5f, 0x02),
+ ILI9881C_COMMAND_INSTR(0x60, 0x02),
+ ILI9881C_COMMAND_INSTR(0x61, 0x0a),
+ ILI9881C_COMMAND_INSTR(0x62, 0x09),
+ ILI9881C_COMMAND_INSTR(0x63, 0x08),
+ ILI9881C_COMMAND_INSTR(0x64, 0x13),
+ ILI9881C_COMMAND_INSTR(0x65, 0x12),
+ ILI9881C_COMMAND_INSTR(0x66, 0x11),
+ ILI9881C_COMMAND_INSTR(0x67, 0x10),
+ ILI9881C_COMMAND_INSTR(0x68, 0x0f),
+ ILI9881C_COMMAND_INSTR(0x69, 0x0e),
+ ILI9881C_COMMAND_INSTR(0x6a, 0x0d),
+ ILI9881C_COMMAND_INSTR(0x6b, 0x0c),
+ ILI9881C_COMMAND_INSTR(0x6c, 0x06),
+ ILI9881C_COMMAND_INSTR(0x6d, 0x07),
+ ILI9881C_COMMAND_INSTR(0x6e, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6f, 0x02),
+ ILI9881C_COMMAND_INSTR(0x70, 0x02),
+ ILI9881C_COMMAND_INSTR(0x71, 0x02),
+ ILI9881C_COMMAND_INSTR(0x72, 0x02),
+ ILI9881C_COMMAND_INSTR(0x73, 0x02),
+ ILI9881C_COMMAND_INSTR(0x74, 0x02),
+ ILI9881C_COMMAND_INSTR(0x75, 0x02),
+ ILI9881C_COMMAND_INSTR(0x76, 0x02),
+ ILI9881C_COMMAND_INSTR(0x77, 0x0a),
+ ILI9881C_COMMAND_INSTR(0x78, 0x06),
+ ILI9881C_COMMAND_INSTR(0x79, 0x07),
+ ILI9881C_COMMAND_INSTR(0x7a, 0x10),
+ ILI9881C_COMMAND_INSTR(0x7b, 0x11),
+ ILI9881C_COMMAND_INSTR(0x7c, 0x12),
+ ILI9881C_COMMAND_INSTR(0x7d, 0x13),
+ ILI9881C_COMMAND_INSTR(0x7e, 0x0c),
+ ILI9881C_COMMAND_INSTR(0x7f, 0x0d),
+ ILI9881C_COMMAND_INSTR(0x80, 0x0e),
+ ILI9881C_COMMAND_INSTR(0x81, 0x0f),
+ ILI9881C_COMMAND_INSTR(0x82, 0x09),
+ ILI9881C_COMMAND_INSTR(0x83, 0x08),
+ ILI9881C_COMMAND_INSTR(0x84, 0x02),
+ ILI9881C_COMMAND_INSTR(0x85, 0x02),
+ ILI9881C_COMMAND_INSTR(0x86, 0x02),
+ ILI9881C_COMMAND_INSTR(0x87, 0x02),
+ ILI9881C_COMMAND_INSTR(0x88, 0x02),
+ ILI9881C_COMMAND_INSTR(0x89, 0x02),
+ ILI9881C_COMMAND_INSTR(0x8a, 0x02),
+
+ ILI9881C_SWITCH_PAGE_INSTR(4),
+ ILI9881C_COMMAND_INSTR(0x6e, 0x2a),
+ ILI9881C_COMMAND_INSTR(0x6f, 0x37),
+ ILI9881C_COMMAND_INSTR(0x3a, 0x24),
+ ILI9881C_COMMAND_INSTR(0x8d, 0x19),
+ ILI9881C_COMMAND_INSTR(0x87, 0xba),
+ ILI9881C_COMMAND_INSTR(0xb2, 0xd1),
+ ILI9881C_COMMAND_INSTR(0x88, 0x0b),
+ ILI9881C_COMMAND_INSTR(0x38, 0x01),
+ ILI9881C_COMMAND_INSTR(0x39, 0x00),
+ ILI9881C_COMMAND_INSTR(0xb5, 0x02),
+ ILI9881C_COMMAND_INSTR(0x31, 0x25),
+ ILI9881C_COMMAND_INSTR(0x3b, 0x98),
+
+ ILI9881C_SWITCH_PAGE_INSTR(1),
+ ILI9881C_COMMAND_INSTR(0x22, 0x0a),
+ ILI9881C_COMMAND_INSTR(0x31, 0x0c),
+ ILI9881C_COMMAND_INSTR(0x53, 0x40),
+ ILI9881C_COMMAND_INSTR(0x55, 0x45),
+ ILI9881C_COMMAND_INSTR(0x50, 0xb7),
+ ILI9881C_COMMAND_INSTR(0x51, 0xb2),
+ ILI9881C_COMMAND_INSTR(0x60, 0x07),
+ ILI9881C_COMMAND_INSTR(0xa0, 0x22),
+ ILI9881C_COMMAND_INSTR(0xa1, 0x3f),
+ ILI9881C_COMMAND_INSTR(0xa2, 0x4e),
+ ILI9881C_COMMAND_INSTR(0xa3, 0x17),
+ ILI9881C_COMMAND_INSTR(0xa4, 0x1a),
+ ILI9881C_COMMAND_INSTR(0xa5, 0x2d),
+ ILI9881C_COMMAND_INSTR(0xa6, 0x21),
+ ILI9881C_COMMAND_INSTR(0xa7, 0x22),
+ ILI9881C_COMMAND_INSTR(0xa8, 0xc4),
+ ILI9881C_COMMAND_INSTR(0xa9, 0x1b),
+ ILI9881C_COMMAND_INSTR(0xaa, 0x25),
+ ILI9881C_COMMAND_INSTR(0xab, 0xa7),
+ ILI9881C_COMMAND_INSTR(0xac, 0x1a),
+ ILI9881C_COMMAND_INSTR(0xad, 0x19),
+ ILI9881C_COMMAND_INSTR(0xae, 0x4b),
+ ILI9881C_COMMAND_INSTR(0xaf, 0x1f),
+ ILI9881C_COMMAND_INSTR(0xb0, 0x2a),
+ ILI9881C_COMMAND_INSTR(0xb1, 0x59),
+ ILI9881C_COMMAND_INSTR(0xb2, 0x64),
+ ILI9881C_COMMAND_INSTR(0xb3, 0x3f),
+ ILI9881C_COMMAND_INSTR(0xc0, 0x22),
+ ILI9881C_COMMAND_INSTR(0xc1, 0x48),
+ ILI9881C_COMMAND_INSTR(0xc2, 0x59),
+ ILI9881C_COMMAND_INSTR(0xc3, 0x15),
+ ILI9881C_COMMAND_INSTR(0xc4, 0x15),
+ ILI9881C_COMMAND_INSTR(0xc5, 0x28),
+ ILI9881C_COMMAND_INSTR(0xc6, 0x1c),
+ ILI9881C_COMMAND_INSTR(0xc7, 0x1e),
+ ILI9881C_COMMAND_INSTR(0xc8, 0xc4),
+ ILI9881C_COMMAND_INSTR(0xc9, 0x1c),
+ ILI9881C_COMMAND_INSTR(0xca, 0x2b),
+ ILI9881C_COMMAND_INSTR(0xcb, 0xa3),
+ ILI9881C_COMMAND_INSTR(0xcc, 0x1f),
+ ILI9881C_COMMAND_INSTR(0xcd, 0x1e),
+ ILI9881C_COMMAND_INSTR(0xce, 0x52),
+ ILI9881C_COMMAND_INSTR(0xcf, 0x24),
+ ILI9881C_COMMAND_INSTR(0xd0, 0x2a),
+ ILI9881C_COMMAND_INSTR(0xd1, 0x58),
+ ILI9881C_COMMAND_INSTR(0xd2, 0x68),
+ ILI9881C_COMMAND_INSTR(0xd3, 0x3f),
+};
+
static inline struct ili9881c *panel_to_ili9881c(struct drm_panel *panel)
{
return container_of(panel, struct ili9881c, panel);
@@ -1433,33 +1627,24 @@ static inline struct ili9881c *panel_to_ili9881c(struct drm_panel *panel)
* So before any attempt at sending a command or data, we have to be
* sure if we're in the right page or not.
*/
-static int ili9881c_switch_page(struct ili9881c *ctx, u8 page)
+static void ili9881c_switch_page(struct mipi_dsi_multi_context *mctx, u8 page)
{
u8 buf[4] = { 0xff, 0x98, 0x81, page };
- int ret;
-
- ret = mipi_dsi_dcs_write_buffer(ctx->dsi, buf, sizeof(buf));
- if (ret < 0)
- return ret;
- return 0;
+ mipi_dsi_dcs_write_buffer_multi(mctx, buf, sizeof(buf));
}
-static int ili9881c_send_cmd_data(struct ili9881c *ctx, u8 cmd, u8 data)
+static void ili9881c_send_cmd_data(struct mipi_dsi_multi_context *mctx, u8 cmd, u8 data)
{
u8 buf[2] = { cmd, data };
- int ret;
-
- ret = mipi_dsi_dcs_write_buffer(ctx->dsi, buf, sizeof(buf));
- if (ret < 0)
- return ret;
- return 0;
+ mipi_dsi_dcs_write_buffer_multi(mctx, buf, sizeof(buf));
}
static int ili9881c_prepare(struct drm_panel *panel)
{
struct ili9881c *ctx = panel_to_ili9881c(panel);
+ struct mipi_dsi_multi_context mctx = { .dsi = ctx->dsi };
unsigned int i;
int ret;
@@ -1480,61 +1665,39 @@ static int ili9881c_prepare(struct drm_panel *panel)
const struct ili9881c_instr *instr = &ctx->desc->init[i];
if (instr->op == ILI9881C_SWITCH_PAGE)
- ret = ili9881c_switch_page(ctx, instr->arg.page);
+ ili9881c_switch_page(&mctx, instr->arg.page);
else if (instr->op == ILI9881C_COMMAND)
- ret = ili9881c_send_cmd_data(ctx, instr->arg.cmd.cmd,
- instr->arg.cmd.data);
-
- if (ret)
- return ret;
+ ili9881c_send_cmd_data(&mctx, instr->arg.cmd.cmd,
+ instr->arg.cmd.data);
}
- ret = ili9881c_switch_page(ctx, 0);
- if (ret)
- return ret;
-
- if (ctx->address_mode) {
- ret = mipi_dsi_dcs_write(ctx->dsi, MIPI_DCS_SET_ADDRESS_MODE,
- &ctx->address_mode,
- sizeof(ctx->address_mode));
- if (ret < 0)
- return ret;
- }
-
- ret = mipi_dsi_dcs_set_tear_on(ctx->dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
- if (ret)
- return ret;
-
- ret = mipi_dsi_dcs_exit_sleep_mode(ctx->dsi);
- if (ret)
- return ret;
+ ili9881c_switch_page(&mctx, 0);
- return 0;
-}
+ if (ctx->address_mode)
+ ili9881c_send_cmd_data(&mctx, MIPI_DCS_SET_ADDRESS_MODE,
+ ctx->address_mode);
-static int ili9881c_enable(struct drm_panel *panel)
-{
- struct ili9881c *ctx = panel_to_ili9881c(panel);
-
- msleep(120);
-
- mipi_dsi_dcs_set_display_on(ctx->dsi);
+ mipi_dsi_dcs_set_tear_on_multi(&mctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+ mipi_dsi_dcs_exit_sleep_mode_multi(&mctx);
+ mipi_dsi_msleep(&mctx, 120);
+ mipi_dsi_dcs_set_display_on_multi(&mctx);
+ if (mctx.accum_err)
+ goto disable_power;
return 0;
-}
-static int ili9881c_disable(struct drm_panel *panel)
-{
- struct ili9881c *ctx = panel_to_ili9881c(panel);
-
- return mipi_dsi_dcs_set_display_off(ctx->dsi);
+disable_power:
+ regulator_disable(ctx->power);
+ return mctx.accum_err;
}
static int ili9881c_unprepare(struct drm_panel *panel)
{
struct ili9881c *ctx = panel_to_ili9881c(panel);
+ struct mipi_dsi_multi_context mctx = { .dsi = ctx->dsi };
- mipi_dsi_dcs_enter_sleep_mode(ctx->dsi);
+ mipi_dsi_dcs_set_display_off_multi(&mctx);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&mctx);
regulator_disable(ctx->power);
gpiod_set_value_cansleep(ctx->reset, 1);
@@ -1660,6 +1823,23 @@ static const struct drm_display_mode rpi_7inch_default_mode = {
.height_mm = 151,
};
+static const struct drm_display_mode bsd1218_a101kl68_default_mode = {
+ .clock = 70000,
+
+ .hdisplay = 800,
+ .hsync_start = 800 + 40,
+ .hsync_end = 800 + 40 + 20,
+ .htotal = 800 + 40 + 20 + 20,
+
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 20,
+ .vsync_end = 1280 + 20 + 4,
+ .vtotal = 1280 + 20 + 4 + 20,
+
+ .width_mm = 120,
+ .height_mm = 170,
+};
+
static int ili9881c_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
@@ -1706,8 +1886,6 @@ static enum drm_panel_orientation ili9881c_get_orientation(struct drm_panel *pan
static const struct drm_panel_funcs ili9881c_funcs = {
.prepare = ili9881c_prepare,
.unprepare = ili9881c_unprepare,
- .enable = ili9881c_enable,
- .disable = ili9881c_disable,
.get_modes = ili9881c_get_modes,
.get_orientation = ili9881c_get_orientation,
};
@@ -1830,8 +2008,18 @@ static const struct ili9881c_desc rpi_7inch_desc = {
.lanes = 2,
};
+static const struct ili9881c_desc bsd1218_a101kl68_desc = {
+ .init = bsd1218_a101kl68_init,
+ .init_length = ARRAY_SIZE(bsd1218_a101kl68_init),
+ .mode = &bsd1218_a101kl68_default_mode,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET,
+ .lanes = 4,
+};
+
static const struct of_device_id ili9881c_of_match[] = {
{ .compatible = "bananapi,lhr050h41", .data = &lhr050h41_desc },
+ { .compatible = "bestar,bsd1218-a101kl68", .data = &bsd1218_a101kl68_desc },
{ .compatible = "feixin,k101-im2byl02", .data = &k101_im2byl02_desc },
{ .compatible = "startek,kd050hdfia020", .data = &kd050hdfia020_desc },
{ .compatible = "tdo,tl050hdv35", .data = &tl050hdv35_desc },
diff --git a/drivers/gpu/drm/panel/panel-jdi-lpm102a188a.c b/drivers/gpu/drm/panel/panel-jdi-lpm102a188a.c
index 5f897e143758..23462065d726 100644
--- a/drivers/gpu/drm/panel/panel-jdi-lpm102a188a.c
+++ b/drivers/gpu/drm/panel/panel-jdi-lpm102a188a.c
@@ -81,25 +81,25 @@ static int jdi_panel_disable(struct drm_panel *panel)
static int jdi_panel_unprepare(struct drm_panel *panel)
{
struct jdi_panel *jdi = to_panel_jdi(panel);
- int ret;
- ret = mipi_dsi_dcs_set_display_off(jdi->link1);
- if (ret < 0)
- dev_err(panel->dev, "failed to set display off: %d\n", ret);
+ /*
+ * One context per panel since we'll continue trying to shut down the
+ * other panel even if one isn't responding.
+ */
+ struct mipi_dsi_multi_context dsi_ctx1 = { .dsi = jdi->link1 };
+ struct mipi_dsi_multi_context dsi_ctx2 = { .dsi = jdi->link2 };
- ret = mipi_dsi_dcs_set_display_off(jdi->link2);
- if (ret < 0)
- dev_err(panel->dev, "failed to set display off: %d\n", ret);
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx1);
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx2);
/* Specified by JDI @ 50ms, subject to change */
msleep(50);
- ret = mipi_dsi_dcs_enter_sleep_mode(jdi->link1);
- if (ret < 0)
- dev_err(panel->dev, "failed to enter sleep mode: %d\n", ret);
- ret = mipi_dsi_dcs_enter_sleep_mode(jdi->link2);
- if (ret < 0)
- dev_err(panel->dev, "failed to enter sleep mode: %d\n", ret);
+ /* Doesn't hurt to try sleep mode even if display off fails */
+ dsi_ctx1.accum_err = 0;
+ dsi_ctx2.accum_err = 0;
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx1);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx2);
/* Specified by JDI @ 150ms, subject to change */
msleep(150);
@@ -123,72 +123,46 @@ static int jdi_panel_unprepare(struct drm_panel *panel)
/* Specified by JDI @ 20ms, subject to change */
msleep(20);
- return ret;
+ return 0;
}
-static int jdi_setup_symmetrical_split(struct mipi_dsi_device *left,
- struct mipi_dsi_device *right,
- const struct drm_display_mode *mode)
+static void jdi_setup_symmetrical_split(struct mipi_dsi_multi_context *dsi_ctx,
+ struct mipi_dsi_device *left,
+ struct mipi_dsi_device *right,
+ const struct drm_display_mode *mode)
{
- int err;
-
- err = mipi_dsi_dcs_set_column_address(left, 0, mode->hdisplay / 2 - 1);
- if (err < 0) {
- dev_err(&left->dev, "failed to set column address: %d\n", err);
- return err;
- }
-
- err = mipi_dsi_dcs_set_column_address(right, 0, mode->hdisplay / 2 - 1);
- if (err < 0) {
- dev_err(&right->dev, "failed to set column address: %d\n", err);
- return err;
- }
-
- err = mipi_dsi_dcs_set_page_address(left, 0, mode->vdisplay - 1);
- if (err < 0) {
- dev_err(&left->dev, "failed to set page address: %d\n", err);
- return err;
- }
-
- err = mipi_dsi_dcs_set_page_address(right, 0, mode->vdisplay - 1);
- if (err < 0) {
- dev_err(&right->dev, "failed to set page address: %d\n", err);
- return err;
- }
-
- return 0;
+ mipi_dsi_dual(mipi_dsi_dcs_set_column_address_multi,
+ dsi_ctx, left, right,
+ 0, mode->hdisplay / 2 - 1);
+ mipi_dsi_dual(mipi_dsi_dcs_set_page_address_multi,
+ dsi_ctx, left, right,
+ 0, mode->vdisplay - 1);
}
-static int jdi_write_dcdc_registers(struct jdi_panel *jdi)
+static void jdi_write_dcdc_registers(struct mipi_dsi_multi_context *dsi_ctx,
+ struct jdi_panel *jdi)
{
/* Clear the manufacturer command access protection */
- mipi_dsi_generic_write_seq(jdi->link1, MCS_CMD_ACS_PROT,
- MCS_CMD_ACS_PROT_OFF);
- mipi_dsi_generic_write_seq(jdi->link2, MCS_CMD_ACS_PROT,
- MCS_CMD_ACS_PROT_OFF);
+ mipi_dsi_dual_generic_write_seq_multi(dsi_ctx, jdi->link1, jdi->link2,
+ MCS_CMD_ACS_PROT,
+ MCS_CMD_ACS_PROT_OFF);
/*
- * Change the VGH/VGL divide rations to move the noise generated by the
+ * Change the VGH/VGL divide ratios to move the noise generated by the
* TCONN. This should hopefully avoid interaction with the backlight
* controller.
*/
- mipi_dsi_generic_write_seq(jdi->link1, MCS_PWR_CTRL_FUNC,
- MCS_PWR_CTRL_PARAM1_VGH_330_DIV |
- MCS_PWR_CTRL_PARAM1_DEFAULT,
- MCS_PWR_CTRL_PARAM2_VGL_410_DIV |
- MCS_PWR_CTRL_PARAM2_DEFAULT);
-
- mipi_dsi_generic_write_seq(jdi->link2, MCS_PWR_CTRL_FUNC,
- MCS_PWR_CTRL_PARAM1_VGH_330_DIV |
- MCS_PWR_CTRL_PARAM1_DEFAULT,
- MCS_PWR_CTRL_PARAM2_VGL_410_DIV |
- MCS_PWR_CTRL_PARAM2_DEFAULT);
-
- return 0;
+ mipi_dsi_dual_generic_write_seq_multi(dsi_ctx, jdi->link1, jdi->link2,
+ MCS_PWR_CTRL_FUNC,
+ MCS_PWR_CTRL_PARAM1_VGH_330_DIV |
+ MCS_PWR_CTRL_PARAM1_DEFAULT,
+ MCS_PWR_CTRL_PARAM2_VGL_410_DIV |
+ MCS_PWR_CTRL_PARAM2_DEFAULT);
}
static int jdi_panel_prepare(struct drm_panel *panel)
{
struct jdi_panel *jdi = to_panel_jdi(panel);
+ struct mipi_dsi_multi_context dsi_ctx = {};
int err;
/* Disable backlight to avoid showing random pixels
@@ -231,86 +205,36 @@ static int jdi_panel_prepare(struct drm_panel *panel)
* put in place to communicate the configuration back to the DSI host
* controller.
*/
- err = jdi_setup_symmetrical_split(jdi->link1, jdi->link2,
- jdi->mode);
- if (err < 0) {
- dev_err(panel->dev, "failed to set up symmetrical split: %d\n",
- err);
- goto poweroff;
- }
+ jdi_setup_symmetrical_split(&dsi_ctx, jdi->link1, jdi->link2,
+ jdi->mode);
- err = mipi_dsi_dcs_set_tear_scanline(jdi->link1,
- jdi->mode->vdisplay - 16);
- if (err < 0) {
- dev_err(panel->dev, "failed to set tear scanline: %d\n", err);
- goto poweroff;
- }
+ mipi_dsi_dual(mipi_dsi_dcs_set_tear_scanline_multi,
+ &dsi_ctx, jdi->link1, jdi->link2,
+ jdi->mode->vdisplay - 16);
- err = mipi_dsi_dcs_set_tear_scanline(jdi->link2,
- jdi->mode->vdisplay - 16);
- if (err < 0) {
- dev_err(panel->dev, "failed to set tear scanline: %d\n", err);
- goto poweroff;
- }
-
- err = mipi_dsi_dcs_set_tear_on(jdi->link1,
- MIPI_DSI_DCS_TEAR_MODE_VBLANK);
- if (err < 0) {
- dev_err(panel->dev, "failed to set tear on: %d\n", err);
- goto poweroff;
- }
-
- err = mipi_dsi_dcs_set_tear_on(jdi->link2,
- MIPI_DSI_DCS_TEAR_MODE_VBLANK);
- if (err < 0) {
- dev_err(panel->dev, "failed to set tear on: %d\n", err);
- goto poweroff;
- }
+ mipi_dsi_dual(mipi_dsi_dcs_set_tear_on_multi,
+ &dsi_ctx, jdi->link1, jdi->link2,
+ MIPI_DSI_DCS_TEAR_MODE_VBLANK);
- err = mipi_dsi_dcs_set_pixel_format(jdi->link1, MIPI_DCS_PIXEL_FMT_24BIT);
- if (err < 0) {
- dev_err(panel->dev, "failed to set pixel format: %d\n", err);
- goto poweroff;
- }
+ mipi_dsi_dual(mipi_dsi_dcs_set_pixel_format_multi,
+ &dsi_ctx, jdi->link1, jdi->link2,
+ MIPI_DCS_PIXEL_FMT_24BIT);
- err = mipi_dsi_dcs_set_pixel_format(jdi->link2, MIPI_DCS_PIXEL_FMT_24BIT);
- if (err < 0) {
- dev_err(panel->dev, "failed to set pixel format: %d\n", err);
- goto poweroff;
- }
+ mipi_dsi_dual(mipi_dsi_dcs_exit_sleep_mode_multi,
+ &dsi_ctx, jdi->link1, jdi->link2);
- err = mipi_dsi_dcs_exit_sleep_mode(jdi->link1);
- if (err < 0) {
- dev_err(panel->dev, "failed to exit sleep mode: %d\n", err);
- goto poweroff;
- }
-
- err = mipi_dsi_dcs_exit_sleep_mode(jdi->link2);
- if (err < 0) {
- dev_err(panel->dev, "failed to exit sleep mode: %d\n", err);
- goto poweroff;
- }
-
- err = jdi_write_dcdc_registers(jdi);
- if (err < 0) {
- dev_err(panel->dev, "failed to write dcdc registers: %d\n", err);
- goto poweroff;
- }
+ jdi_write_dcdc_registers(&dsi_ctx, jdi);
/*
- * We need to wait 150ms between mipi_dsi_dcs_exit_sleep_mode() and
- * mipi_dsi_dcs_set_display_on().
+ * We need to wait 150ms between mipi_dsi_dcs_exit_sleep_mode_multi()
+ * and mipi_dsi_dcs_set_display_on_multi().
*/
- msleep(150);
+ mipi_dsi_msleep(&dsi_ctx, 150);
- err = mipi_dsi_dcs_set_display_on(jdi->link1);
- if (err < 0) {
- dev_err(panel->dev, "failed to set display on: %d\n", err);
- goto poweroff;
- }
+ mipi_dsi_dual(mipi_dsi_dcs_set_display_on_multi,
+ &dsi_ctx, jdi->link1, jdi->link2);
- err = mipi_dsi_dcs_set_display_on(jdi->link2);
- if (err < 0) {
- dev_err(panel->dev, "failed to set display on: %d\n", err);
+ if (dsi_ctx.accum_err < 0) {
+ err = dsi_ctx.accum_err;
goto poweroff;
}
diff --git a/drivers/gpu/drm/panel/panel-lvds.c b/drivers/gpu/drm/panel/panel-lvds.c
index 23fd535d8f47..46b07f38559f 100644
--- a/drivers/gpu/drm/panel/panel-lvds.c
+++ b/drivers/gpu/drm/panel/panel-lvds.c
@@ -28,8 +28,6 @@ struct panel_lvds {
struct device *dev;
const char *label;
- unsigned int width;
- unsigned int height;
struct drm_display_mode dmode;
u32 bus_flags;
unsigned int bus_format;
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35560.c b/drivers/gpu/drm/panel/panel-novatek-nt35560.c
index 98f0782c8411..561e6643dcbb 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt35560.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt35560.c
@@ -148,24 +148,20 @@ static inline struct nt35560 *panel_to_nt35560(struct drm_panel *panel)
static int nt35560_set_brightness(struct backlight_device *bl)
{
struct nt35560 *nt = bl_get_data(bl);
- struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
- int period_ns = 1023;
+ struct mipi_dsi_multi_context dsi_ctx = {
+ .dsi = to_mipi_dsi_device(nt->dev)
+ };
int duty_ns = bl->props.brightness;
+ int period_ns = 1023;
u8 pwm_ratio;
u8 pwm_div;
- u8 par;
- int ret;
if (backlight_is_blank(bl)) {
/* Disable backlight */
- par = 0x00;
- ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY,
- &par, 1);
- if (ret) {
- dev_err(nt->dev, "failed to disable display backlight (%d)\n", ret);
- return ret;
- }
- return 0;
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx,
+ MIPI_DCS_WRITE_CONTROL_DISPLAY,
+ 0x00);
+ return dsi_ctx.accum_err;
}
/* Calculate the PWM duty cycle in n/256's */
@@ -176,12 +172,6 @@ static int nt35560_set_brightness(struct backlight_device *bl)
/* Set up PWM dutycycle ONE byte (differs from the standard) */
dev_dbg(nt->dev, "calculated duty cycle %02x\n", pwm_ratio);
- ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_DISPLAY_BRIGHTNESS,
- &pwm_ratio, 1);
- if (ret < 0) {
- dev_err(nt->dev, "failed to set display PWM ratio (%d)\n", ret);
- return ret;
- }
/*
* Sequence to write PWMDIV:
@@ -192,46 +182,23 @@ static int nt35560_set_brightness(struct backlight_device *bl)
* 0x22 PWMDIV
* 0x7F 0xAA CMD2 page 1 lock
*/
- par = 0xaa;
- ret = mipi_dsi_dcs_write(dsi, 0xf3, &par, 1);
- if (ret < 0) {
- dev_err(nt->dev, "failed to unlock CMD 2 (%d)\n", ret);
- return ret;
- }
- par = 0x01;
- ret = mipi_dsi_dcs_write(dsi, 0x00, &par, 1);
- if (ret < 0) {
- dev_err(nt->dev, "failed to enter page 1 (%d)\n", ret);
- return ret;
- }
- par = 0x01;
- ret = mipi_dsi_dcs_write(dsi, 0x7d, &par, 1);
- if (ret < 0) {
- dev_err(nt->dev, "failed to disable MTP reload (%d)\n", ret);
- return ret;
- }
- ret = mipi_dsi_dcs_write(dsi, 0x22, &pwm_div, 1);
- if (ret < 0) {
- dev_err(nt->dev, "failed to set PWM divisor (%d)\n", ret);
- return ret;
- }
- par = 0xaa;
- ret = mipi_dsi_dcs_write(dsi, 0x7f, &par, 1);
- if (ret < 0) {
- dev_err(nt->dev, "failed to lock CMD 2 (%d)\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_write_var_seq_multi(&dsi_ctx,
+ MIPI_DCS_SET_DISPLAY_BRIGHTNESS,
+ pwm_ratio);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf3, 0xaa);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7d, 0x01);
+
+ mipi_dsi_dcs_write_var_seq_multi(&dsi_ctx, 0x22, pwm_div);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7f, 0xaa);
/* Enable backlight */
- par = 0x24;
- ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY,
- &par, 1);
- if (ret < 0) {
- dev_err(nt->dev, "failed to enable display backlight (%d)\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY,
+ 0x24);
- return 0;
+ return dsi_ctx.accum_err;
}
static const struct backlight_ops nt35560_bl_ops = {
@@ -244,32 +211,23 @@ static const struct backlight_properties nt35560_bl_props = {
.max_brightness = 1023,
};
-static int nt35560_read_id(struct nt35560 *nt)
+static void nt35560_read_id(struct mipi_dsi_multi_context *dsi_ctx)
{
- struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
+ struct device dev = dsi_ctx->dsi->dev;
u8 vendor, version, panel;
u16 val;
- int ret;
- ret = mipi_dsi_dcs_read(dsi, NT35560_DCS_READ_ID1, &vendor, 1);
- if (ret < 0) {
- dev_err(nt->dev, "could not vendor ID byte\n");
- return ret;
- }
- ret = mipi_dsi_dcs_read(dsi, NT35560_DCS_READ_ID2, &version, 1);
- if (ret < 0) {
- dev_err(nt->dev, "could not read device version byte\n");
- return ret;
- }
- ret = mipi_dsi_dcs_read(dsi, NT35560_DCS_READ_ID3, &panel, 1);
- if (ret < 0) {
- dev_err(nt->dev, "could not read panel ID byte\n");
- return ret;
- }
+ mipi_dsi_dcs_read_multi(dsi_ctx, NT35560_DCS_READ_ID1, &vendor, 1);
+ mipi_dsi_dcs_read_multi(dsi_ctx, NT35560_DCS_READ_ID2, &version, 1);
+ mipi_dsi_dcs_read_multi(dsi_ctx, NT35560_DCS_READ_ID3, &panel, 1);
+
+ if (dsi_ctx->accum_err < 0)
+ return;
if (vendor == 0x00) {
- dev_err(nt->dev, "device vendor ID is zero\n");
- return -ENODEV;
+ dev_err(&dev, "device vendor ID is zero\n");
+ dsi_ctx->accum_err = -ENODEV;
+ return;
}
val = (vendor << 8) | panel;
@@ -278,16 +236,16 @@ static int nt35560_read_id(struct nt35560 *nt)
case DISPLAY_SONY_ACX424AKP_ID2:
case DISPLAY_SONY_ACX424AKP_ID3:
case DISPLAY_SONY_ACX424AKP_ID4:
- dev_info(nt->dev, "MTP vendor: %02x, version: %02x, panel: %02x\n",
+ dev_info(&dev,
+ "MTP vendor: %02x, version: %02x, panel: %02x\n",
vendor, version, panel);
break;
default:
- dev_info(nt->dev, "unknown vendor: %02x, version: %02x, panel: %02x\n",
+ dev_info(&dev,
+ "unknown vendor: %02x, version: %02x, panel: %02x\n",
vendor, version, panel);
break;
}
-
- return 0;
}
static int nt35560_power_on(struct nt35560 *nt)
@@ -322,92 +280,56 @@ static void nt35560_power_off(struct nt35560 *nt)
static int nt35560_prepare(struct drm_panel *panel)
{
struct nt35560 *nt = panel_to_nt35560(panel);
- struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
- const u8 mddi = 3;
+ struct mipi_dsi_multi_context dsi_ctx = {
+ .dsi = to_mipi_dsi_device(nt->dev)
+ };
int ret;
ret = nt35560_power_on(nt);
if (ret)
return ret;
- ret = nt35560_read_id(nt);
- if (ret) {
- dev_err(nt->dev, "failed to read panel ID (%d)\n", ret);
- goto err_power_off;
- }
+ nt35560_read_id(&dsi_ctx);
- /* Enabe tearing mode: send TE (tearing effect) at VBLANK */
- ret = mipi_dsi_dcs_set_tear_on(dsi,
+ /* Enable tearing mode: send TE (tearing effect) at VBLANK */
+ mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx,
MIPI_DSI_DCS_TEAR_MODE_VBLANK);
- if (ret) {
- dev_err(nt->dev, "failed to enable vblank TE (%d)\n", ret);
- goto err_power_off;
- }
/*
* Set MDDI
*
* This presumably deactivates the Qualcomm MDDI interface and
* selects DSI, similar code is found in other drivers such as the
- * Sharp LS043T1LE01 which makes us suspect that this panel may be
- * using a Novatek NT35565 or similar display driver chip that shares
- * this command. Due to the lack of documentation we cannot know for
- * sure.
+ * Sharp LS043T1LE01.
*/
- ret = mipi_dsi_dcs_write(dsi, NT35560_DCS_SET_MDDI,
- &mddi, sizeof(mddi));
- if (ret < 0) {
- dev_err(nt->dev, "failed to set MDDI (%d)\n", ret);
- goto err_power_off;
- }
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, NT35560_DCS_SET_MDDI, 3);
- /* Exit sleep mode */
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret) {
- dev_err(nt->dev, "failed to exit sleep mode (%d)\n", ret);
- goto err_power_off;
- }
- msleep(140);
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 140);
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret) {
- dev_err(nt->dev, "failed to turn display on (%d)\n", ret);
- goto err_power_off;
- }
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
if (nt->video_mode) {
- /* In video mode turn peripheral on */
- ret = mipi_dsi_turn_on_peripheral(dsi);
- if (ret) {
- dev_err(nt->dev, "failed to turn on peripheral\n");
- goto err_power_off;
- }
+ mipi_dsi_turn_on_peripheral_multi(&dsi_ctx);
}
- return 0;
-
-err_power_off:
- nt35560_power_off(nt);
- return ret;
+ if (dsi_ctx.accum_err < 0)
+ nt35560_power_off(nt);
+ return dsi_ctx.accum_err;
}
static int nt35560_unprepare(struct drm_panel *panel)
{
struct nt35560 *nt = panel_to_nt35560(panel);
- struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = {
+ .dsi = to_mipi_dsi_device(nt->dev)
+ };
- ret = mipi_dsi_dcs_set_display_off(dsi);
- if (ret) {
- dev_err(nt->dev, "failed to turn display off (%d)\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+
+ if (dsi_ctx.accum_err < 0)
+ return dsi_ctx.accum_err;
- /* Enter sleep mode */
- ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret) {
- dev_err(nt->dev, "failed to enter sleep mode (%d)\n", ret);
- return ret;
- }
msleep(85);
nt35560_power_off(nt);
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt36523.c b/drivers/gpu/drm/panel/panel-novatek-nt36523.c
index 32cf64c7c18b..226d91daf8c7 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt36523.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt36523.c
@@ -23,14 +23,6 @@
#define DSI_NUM_MIN 1
-#define mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, cmd, seq...) \
- do { \
- dsi_ctx.dsi = dsi0; \
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, cmd, seq); \
- dsi_ctx.dsi = dsi1; \
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, cmd, seq); \
- } while (0)
-
struct panel_info {
struct drm_panel panel;
struct mipi_dsi_device *dsi[2];
@@ -71,217 +63,217 @@ static int elish_boe_init_sequence(struct panel_info *pinfo)
struct mipi_dsi_device *dsi1 = pinfo->dsi[1];
struct mipi_dsi_multi_context dsi_ctx = { .dsi = NULL };
/* No datasheet, so write magic init sequence directly */
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x05);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x20);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x18, 0x40);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x02);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x23);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x00, 0x80);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x01, 0x84);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x05, 0x2d);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x06, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x07, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x08, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x09, 0x45);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x11, 0x02);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x12, 0x80);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x15, 0x83);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x16, 0x0c);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x29, 0x0a);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x30, 0xff);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x31, 0xfe);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x32, 0xfd);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x33, 0xfb);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x34, 0xf8);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x35, 0xf5);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x36, 0xf3);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x37, 0xf2);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x38, 0xf2);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x39, 0xf2);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3a, 0xef);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3b, 0xec);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3d, 0xe9);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3f, 0xe5);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x40, 0xe5);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x41, 0xe5);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2a, 0x13);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x45, 0xff);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x46, 0xf4);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x47, 0xe7);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x48, 0xda);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x49, 0xcd);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4a, 0xc0);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4b, 0xb3);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4c, 0xb2);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4d, 0xb2);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4e, 0xb2);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4f, 0x99);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x50, 0x80);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x68);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x52, 0x66);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x53, 0x66);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x54, 0x66);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2b, 0x0e);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x58, 0xff);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x59, 0xfb);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5a, 0xf7);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5b, 0xf3);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5c, 0xef);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5d, 0xe3);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5e, 0xda);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5f, 0xd8);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x60, 0xd8);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x61, 0xd8);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x62, 0xcb);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x63, 0xbf);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x64, 0xb3);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x65, 0xb2);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x66, 0xb2);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x67, 0xb2);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x2a);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x25, 0x47);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x30, 0x47);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x39, 0x47);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x26);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x19, 0x10);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1a, 0xe0);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1b, 0x10);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1c, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2a, 0x10);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2b, 0xe0);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xf0);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x84, 0x08);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x85, 0x0c);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x20);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x91, 0x1f);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x92, 0x0f);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x93, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x94, 0x18);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x95, 0x03);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x96, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb0, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x19, 0x1f);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1b, 0x1b);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x24);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb8, 0x28);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x27);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd0, 0x31);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd1, 0x20);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd2, 0x30);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd4, 0x08);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xde, 0x80);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xdf, 0x02);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x26);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x00, 0x81);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x01, 0xb0);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x22);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9f, 0x50);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x6f, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x70, 0x11);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x73, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x74, 0x49);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x76, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x77, 0x49);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xa0, 0x3f);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xa9, 0x50);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xaa, 0x28);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xab, 0x28);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xad, 0x10);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb8, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x49);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xba, 0x49);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbb, 0x49);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbe, 0x04);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbf, 0x49);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc0, 0x04);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc1, 0x59);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc2, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc5, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc6, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc7, 0x48);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xca, 0x43);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xcb, 0x3c);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xce, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xcf, 0x43);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd0, 0x3c);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd3, 0x43);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd4, 0x3c);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd7, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xdc, 0x43);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xdd, 0x3c);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xe1, 0x43);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xe2, 0x3c);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xf2, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xf3, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xf4, 0x48);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x13, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x14, 0x23);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbc, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbd, 0x23);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x2a);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x97, 0x3c);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x98, 0x02);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x99, 0x95);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9a, 0x03);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9b, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9c, 0x0b);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9d, 0x0a);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9e, 0x90);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x22);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9f, 0x50);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x23);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xa3, 0x50);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xe0);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x14, 0x60);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x16, 0xc0);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4f, 0x02);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xf0);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3a, 0x08);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xd0);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x02, 0xaf);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x09, 0xee);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1c, 0x99);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1d, 0x09);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x0f, 0xff);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x53, 0x2c);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x35, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbb, 0x13);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3b, 0x03, 0xac, 0x1a, 0x04, 0x04);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x11);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xb9, 0x05);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x20);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x18, 0x40);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xb9, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x23);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x00, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x01, 0x84);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x05, 0x2d);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x06, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x07, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x08, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x09, 0x45);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x11, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x12, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x15, 0x83);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x16, 0x0c);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x29, 0x0a);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x30, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x31, 0xfe);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x32, 0xfd);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x33, 0xfb);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x34, 0xf8);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x35, 0xf5);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x36, 0xf3);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x37, 0xf2);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x38, 0xf2);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x39, 0xf2);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x3a, 0xef);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x3b, 0xec);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x3d, 0xe9);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x3f, 0xe5);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x40, 0xe5);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x41, 0xe5);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x2a, 0x13);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x45, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x46, 0xf4);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x47, 0xe7);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x48, 0xda);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x49, 0xcd);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x4a, 0xc0);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x4b, 0xb3);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x4c, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x4d, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x4e, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x4f, 0x99);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x50, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x51, 0x68);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x52, 0x66);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x53, 0x66);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x54, 0x66);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x2b, 0x0e);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x58, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x59, 0xfb);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x5a, 0xf7);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x5b, 0xf3);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x5c, 0xef);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x5d, 0xe3);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x5e, 0xda);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x5f, 0xd8);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x60, 0xd8);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x61, 0xd8);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x62, 0xcb);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x63, 0xbf);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x64, 0xb3);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x65, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x66, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x67, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x2a);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x25, 0x47);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x30, 0x47);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x39, 0x47);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x26);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x19, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x1a, 0xe0);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x1b, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x1c, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x2a, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x2b, 0xe0);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0xf0);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x84, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x85, 0x0c);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x20);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x51, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x25);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x91, 0x1f);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x92, 0x0f);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x93, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x94, 0x18);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x95, 0x03);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x96, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xb0, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x25);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x19, 0x1f);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x1b, 0x1b);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x24);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xb8, 0x28);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x27);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xd0, 0x31);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xd1, 0x20);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xd2, 0x30);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xd4, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xde, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xdf, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x26);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x00, 0x81);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x01, 0xb0);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x22);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x9f, 0x50);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x6f, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x70, 0x11);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x73, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x74, 0x49);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x76, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x77, 0x49);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xa0, 0x3f);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xa9, 0x50);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xaa, 0x28);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xab, 0x28);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xad, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xb8, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xb9, 0x49);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xba, 0x49);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xbb, 0x49);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xbe, 0x04);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xbf, 0x49);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xc0, 0x04);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xc1, 0x59);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xc2, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xc5, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xc6, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xc7, 0x48);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xca, 0x43);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xcb, 0x3c);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xce, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xcf, 0x43);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xd0, 0x3c);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xd3, 0x43);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xd4, 0x3c);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xd7, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xdc, 0x43);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xdd, 0x3c);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xe1, 0x43);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xe2, 0x3c);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xf2, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xf3, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xf4, 0x48);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x25);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x13, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x14, 0x23);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xbc, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xbd, 0x23);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x2a);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x97, 0x3c);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x98, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x99, 0x95);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x9a, 0x03);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x9b, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x9c, 0x0b);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x9d, 0x0a);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x9e, 0x90);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x22);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x9f, 0x50);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x23);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xa3, 0x50);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0xe0);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x14, 0x60);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x16, 0xc0);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x4f, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0xf0);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x3a, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0xd0);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x02, 0xaf);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x09, 0xee);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x1c, 0x99);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x1d, 0x09);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x51, 0x0f, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x53, 0x2c);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x35, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xbb, 0x13);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x3b, 0x03, 0xac, 0x1a, 0x04, 0x04);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x11);
mipi_dsi_msleep(&dsi_ctx, 70);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x29);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x29);
return dsi_ctx.accum_err;
}
@@ -292,195 +284,195 @@ static int elish_csot_init_sequence(struct panel_info *pinfo)
struct mipi_dsi_device *dsi1 = pinfo->dsi[1];
struct mipi_dsi_multi_context dsi_ctx = { .dsi = NULL };
/* No datasheet, so write magic init sequence directly */
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x05);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x20);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x18, 0x40);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x02);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xd0);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x02, 0xaf);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x00, 0x30);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x09, 0xee);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1c, 0x99);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1d, 0x09);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xf0);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3a, 0x08);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xe0);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4f, 0x02);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x20);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x58, 0x40);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x35, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x23);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x00, 0x80);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x01, 0x84);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x05, 0x2d);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x06, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x07, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x08, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x09, 0x45);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x11, 0x02);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x12, 0x80);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x15, 0x83);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x16, 0x0c);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x29, 0x0a);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x30, 0xff);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x31, 0xfe);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x32, 0xfd);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x33, 0xfb);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x34, 0xf8);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x35, 0xf5);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x36, 0xf3);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x37, 0xf2);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x38, 0xf2);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x39, 0xf2);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3a, 0xef);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3b, 0xec);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3d, 0xe9);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3f, 0xe5);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x40, 0xe5);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x41, 0xe5);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2a, 0x13);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x45, 0xff);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x46, 0xf4);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x47, 0xe7);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x48, 0xda);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x49, 0xcd);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4a, 0xc0);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4b, 0xb3);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4c, 0xb2);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4d, 0xb2);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4e, 0xb2);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4f, 0x99);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x50, 0x80);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x68);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x52, 0x66);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x53, 0x66);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x54, 0x66);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2b, 0x0e);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x58, 0xff);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x59, 0xfb);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5a, 0xf7);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5b, 0xf3);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5c, 0xef);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5d, 0xe3);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5e, 0xda);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5f, 0xd8);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x60, 0xd8);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x61, 0xd8);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x62, 0xcb);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x63, 0xbf);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x64, 0xb3);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x65, 0xb2);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x66, 0xb2);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x67, 0xb2);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x0f, 0xff);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x53, 0x2c);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x55, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbb, 0x13);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3b, 0x03, 0xac, 0x1a, 0x04, 0x04);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x2a);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x25, 0x46);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x30, 0x46);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x39, 0x46);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x26);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x01, 0xb0);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x19, 0x10);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1a, 0xe0);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1b, 0x10);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1c, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2a, 0x10);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2b, 0xe0);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xf0);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x84, 0x08);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x85, 0x0c);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x20);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x91, 0x1f);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x92, 0x0f);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x93, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x94, 0x18);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x95, 0x03);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x96, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb0, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x19, 0x1f);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1b, 0x1b);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x24);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb8, 0x28);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x27);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd0, 0x31);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd1, 0x20);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd4, 0x08);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xde, 0x80);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xdf, 0x02);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x26);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x00, 0x81);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x01, 0xb0);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x22);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x6f, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x70, 0x11);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x73, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x74, 0x4d);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xa0, 0x3f);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xa9, 0x50);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xaa, 0x28);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xab, 0x28);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xad, 0x10);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb8, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x4b);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xba, 0x96);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbb, 0x4b);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbe, 0x07);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbf, 0x4b);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc0, 0x07);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc1, 0x5c);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc2, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc5, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc6, 0x3f);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc7, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xca, 0x08);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xcb, 0x40);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xce, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xcf, 0x08);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd0, 0x40);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd3, 0x08);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd4, 0x40);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbc, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbd, 0x1c);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x2a);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9a, 0x03);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x11);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xb9, 0x05);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x20);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x18, 0x40);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xb9, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0xd0);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x02, 0xaf);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x00, 0x30);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x09, 0xee);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x1c, 0x99);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x1d, 0x09);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0xf0);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x3a, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0xe0);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x4f, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x20);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x58, 0x40);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x35, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x23);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x00, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x01, 0x84);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x05, 0x2d);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x06, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x07, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x08, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x09, 0x45);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x11, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x12, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x15, 0x83);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x16, 0x0c);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x29, 0x0a);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x30, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x31, 0xfe);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x32, 0xfd);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x33, 0xfb);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x34, 0xf8);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x35, 0xf5);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x36, 0xf3);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x37, 0xf2);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x38, 0xf2);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x39, 0xf2);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x3a, 0xef);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x3b, 0xec);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x3d, 0xe9);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x3f, 0xe5);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x40, 0xe5);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x41, 0xe5);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x2a, 0x13);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x45, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x46, 0xf4);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x47, 0xe7);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x48, 0xda);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x49, 0xcd);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x4a, 0xc0);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x4b, 0xb3);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x4c, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x4d, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x4e, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x4f, 0x99);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x50, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x51, 0x68);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x52, 0x66);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x53, 0x66);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x54, 0x66);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x2b, 0x0e);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x58, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x59, 0xfb);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x5a, 0xf7);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x5b, 0xf3);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x5c, 0xef);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x5d, 0xe3);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x5e, 0xda);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x5f, 0xd8);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x60, 0xd8);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x61, 0xd8);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x62, 0xcb);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x63, 0xbf);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x64, 0xb3);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x65, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x66, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x67, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x51, 0x0f, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x53, 0x2c);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x55, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xbb, 0x13);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x3b, 0x03, 0xac, 0x1a, 0x04, 0x04);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x2a);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x25, 0x46);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x30, 0x46);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x39, 0x46);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x26);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x01, 0xb0);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x19, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x1a, 0xe0);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x1b, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x1c, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x2a, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x2b, 0xe0);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0xf0);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x84, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x85, 0x0c);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x20);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x51, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x25);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x91, 0x1f);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x92, 0x0f);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x93, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x94, 0x18);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x95, 0x03);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x96, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xb0, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x25);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x19, 0x1f);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x1b, 0x1b);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x24);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xb8, 0x28);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x27);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xd0, 0x31);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xd1, 0x20);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xd4, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xde, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xdf, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x26);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x00, 0x81);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x01, 0xb0);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x22);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x6f, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x70, 0x11);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x73, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x74, 0x4d);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xa0, 0x3f);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xa9, 0x50);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xaa, 0x28);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xab, 0x28);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xad, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xb8, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xb9, 0x4b);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xba, 0x96);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xbb, 0x4b);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xbe, 0x07);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xbf, 0x4b);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xc0, 0x07);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xc1, 0x5c);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xc2, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xc5, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xc6, 0x3f);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xc7, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xca, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xcb, 0x40);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xce, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xcf, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xd0, 0x40);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xd3, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xd4, 0x40);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x25);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xbc, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xbd, 0x1c);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x2a);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x9a, 0x03);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x11);
mipi_dsi_msleep(&dsi_ctx, 70);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x29);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x29);
return dsi_ctx.accum_err;
}
diff --git a/drivers/gpu/drm/panel/panel-orisetech-ota5601a.c b/drivers/gpu/drm/panel/panel-orisetech-ota5601a.c
index 3231e84dc66c..8a608972fc41 100644
--- a/drivers/gpu/drm/panel/panel-orisetech-ota5601a.c
+++ b/drivers/gpu/drm/panel/panel-orisetech-ota5601a.c
@@ -276,11 +276,8 @@ static int ota5601a_probe(struct spi_device *spi)
}
err = drm_panel_of_backlight(&panel->drm_panel);
- if (err) {
- if (err != -EPROBE_DEFER)
- dev_err(dev, "Failed to get backlight handle\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(dev, err, "Failed to get backlight handle\n");
drm_panel_add(&panel->drm_panel);
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c
index e91f50662997..7e2f4e043d62 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c
@@ -7,7 +7,9 @@
#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
+#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <video/mipi_display.h>
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e8aa5x01-ams561ra01.c b/drivers/gpu/drm/panel/panel-samsung-s6e8aa5x01-ams561ra01.c
new file mode 100644
index 000000000000..56e10c7c3a76
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e8aa5x01-ams561ra01.c
@@ -0,0 +1,981 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Samsung AMS561RA01 panel with S6E8AA5X01 controller.
+ *
+ * Copyright (C) 2025 Kaustabh Chakraborty <kauschluss@disroot.org>
+ */
+
+#include <linux/backlight.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
+
+/* Manufacturer Command Set */
+#define MCS_AIDCTL 0xb2
+#define MCS_ADAPTIVECTL 0xb5
+#define MCS_ELVSS 0xb6
+#define MCS_TEMPERCTL 0xb8
+#define MCS_PENTILE 0xc0
+#define MCS_GAMMACTL 0xca
+#define MCS_LTPSCTL 0xcb
+#define MCS_PCD 0xcc
+#define MCS_ERRFLAG 0xe7
+#define MCS_ACCESSPROT 0xf0
+#define MCS_DISPCTL 0xf2
+#define MCS_GAMMAUPD 0xf7
+
+#define GAMMA_CMD_LEN 34
+#define AID_CMD_LEN 3
+
+static const struct {
+ u8 gamma[GAMMA_CMD_LEN];
+ u8 aid[AID_CMD_LEN];
+} s6e8aa5x01_ams561ra01_cmds[] = {
+ {
+ /* 5 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x94,
+ 0x88, 0x89, 0x8a, 0x87, 0x87, 0x89,
+ 0x8d, 0x8c, 0x8d, 0x89, 0x8c, 0x8e,
+ 0x8e, 0x8f, 0x90, 0xa3, 0xa2, 0x9a,
+ 0xcf, 0xca, 0x9f, 0xe6, 0xff, 0xb4,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x05, 0xa5 },
+ }, {
+ /* 6 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x95,
+ 0x88, 0x89, 0x8b, 0x87, 0x87, 0x89,
+ 0x8c, 0x8a, 0x8c, 0x85, 0x88, 0x8c,
+ 0x8b, 0x8c, 0x8e, 0xa2, 0xa2, 0x9a,
+ 0xd0, 0xcc, 0xa2, 0xed, 0xff, 0xb7,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x05, 0x95 },
+ }, {
+ /* 7 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x95,
+ 0x88, 0x89, 0x8b, 0x87, 0x87, 0x89,
+ 0x8c, 0x8a, 0x8c, 0x85, 0x88, 0x8c,
+ 0x8b, 0x8c, 0x8e, 0xa2, 0xa2, 0x99,
+ 0xc8, 0xc4, 0x9d, 0xed, 0xff, 0xb7,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x05, 0x89 },
+ }, {
+ /* 8 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x96,
+ 0x88, 0x89, 0x8a, 0x87, 0x87, 0x89,
+ 0x8a, 0x88, 0x8b, 0x83, 0x86, 0x8b,
+ 0x8c, 0x8b, 0x8d, 0x9d, 0x9f, 0x97,
+ 0xc7, 0xc3, 0x9c, 0xf5, 0xff, 0xbb,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x05, 0x7e },
+ }, {
+ /* 9 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x96,
+ 0x88, 0x89, 0x8a, 0x87, 0x87, 0x89,
+ 0x89, 0x86, 0x8a, 0x82, 0x84, 0x88,
+ 0x90, 0x8f, 0x91, 0x95, 0x97, 0x94,
+ 0xc6, 0xc2, 0x9d, 0xf5, 0xff, 0xbb,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x05, 0x73 },
+ }, {
+ /* 10 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x96,
+ 0x88, 0x89, 0x8a, 0x87, 0x87, 0x89,
+ 0x89, 0x86, 0x8a, 0x82, 0x84, 0x88,
+ 0x90, 0x8f, 0x91, 0x94, 0x97, 0x93,
+ 0xc6, 0xc2, 0x9e, 0xec, 0xff, 0xb7,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x05, 0x67 },
+ }, {
+ /* 11 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x96,
+ 0x88, 0x89, 0x8a, 0x87, 0x87, 0x89,
+ 0x89, 0x86, 0x8a, 0x82, 0x84, 0x88,
+ 0x8b, 0x8b, 0x8d, 0x90, 0x93, 0x92,
+ 0xc5, 0xc1, 0x9c, 0xf5, 0xff, 0xbb,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x05, 0x56 },
+ }, {
+ /* 12 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x96,
+ 0x88, 0x89, 0x8b, 0x87, 0x87, 0x89,
+ 0x89, 0x86, 0x89, 0x82, 0x84, 0x88,
+ 0x87, 0x86, 0x8a, 0x8c, 0x90, 0x8f,
+ 0xcd, 0xc9, 0xa1, 0xec, 0xff, 0xb7,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x05, 0x4a },
+ }, {
+ /* 13 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x96,
+ 0x88, 0x89, 0x8b, 0x87, 0x87, 0x89,
+ 0x89, 0x86, 0x89, 0x82, 0x84, 0x88,
+ 0x87, 0x86, 0x8a, 0x8c, 0x90, 0x8e,
+ 0xc4, 0xbf, 0x9c, 0xf5, 0xff, 0xbb,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x05, 0x3b },
+ }, {
+ /* 14 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x96,
+ 0x88, 0x89, 0x8b, 0x87, 0x87, 0x89,
+ 0x89, 0x86, 0x89, 0x82, 0x84, 0x88,
+ 0x87, 0x86, 0x89, 0x8c, 0x90, 0x8f,
+ 0xc2, 0xbf, 0x9c, 0xec, 0xff, 0xb7,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x05, 0x35 },
+ }, {
+ /* 15 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x96,
+ 0x88, 0x89, 0x8b, 0x87, 0x87, 0x89,
+ 0x89, 0x86, 0x89, 0x82, 0x84, 0x88,
+ 0x87, 0x86, 0x89, 0x8c, 0x90, 0x8f,
+ 0xb7, 0xb6, 0x96, 0xec, 0xff, 0xb7,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x05, 0x25 },
+ }, {
+ /* 16 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x96,
+ 0x88, 0x89, 0x8b, 0x87, 0x87, 0x89,
+ 0x89, 0x86, 0x89, 0x82, 0x84, 0x88,
+ 0x88, 0x86, 0x89, 0x8c, 0x90, 0x8f,
+ 0xb7, 0xb6, 0x96, 0xec, 0xff, 0xb7,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x05, 0x20 },
+ }, {
+ /* 17 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x96,
+ 0x88, 0x89, 0x8b, 0x87, 0x87, 0x89,
+ 0x89, 0x86, 0x89, 0x7f, 0x80, 0x86,
+ 0x86, 0x85, 0x89, 0x88, 0x8c, 0x8e,
+ 0xbf, 0xbe, 0x9c, 0xec, 0xff, 0xb7,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x05, 0x11 },
+ }, {
+ /* 19 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x96,
+ 0x88, 0x89, 0x8b, 0x87, 0x87, 0x89,
+ 0x89, 0x86, 0x89, 0x7f, 0x80, 0x86,
+ 0x87, 0x85, 0x89, 0x88, 0x8c, 0x8e,
+ 0xb3, 0xb4, 0x97, 0xeb, 0xff, 0xb7,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x04, 0xf2 },
+ }, {
+ /* 20 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x95,
+ 0x88, 0x89, 0x8b, 0x87, 0x87, 0x89,
+ 0x89, 0x86, 0x89, 0x7f, 0x80, 0x86,
+ 0x87, 0x85, 0x89, 0x89, 0x8c, 0x8e,
+ 0xb3, 0xb4, 0x97, 0xeb, 0xff, 0xb7,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x04, 0xe4 },
+ }, {
+ /* 21 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x96,
+ 0x88, 0x89, 0x8b, 0x87, 0x87, 0x89,
+ 0x8a, 0x88, 0x8b, 0x7d, 0x7e, 0x84,
+ 0x8c, 0x8a, 0x8c, 0x8e, 0x90, 0x8f,
+ 0xb6, 0xb6, 0x97, 0xe3, 0xff, 0xb3,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x04, 0xd5 },
+ }, {
+ /* 22 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x97,
+ 0x88, 0x89, 0x8b, 0x87, 0x87, 0x89,
+ 0x8a, 0x88, 0x8b, 0x81, 0x82, 0x86,
+ 0x87, 0x86, 0x88, 0x8e, 0x90, 0x8f,
+ 0xb6, 0xb6, 0x95, 0xe3, 0xff, 0xb3,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x04, 0xc5 },
+ }, {
+ /* 24 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x97,
+ 0x88, 0x89, 0x8b, 0x88, 0x88, 0x8a,
+ 0x8a, 0x87, 0x8a, 0x81, 0x82, 0x86,
+ 0x87, 0x86, 0x88, 0x8e, 0x90, 0x8f,
+ 0xb6, 0xb6, 0x94, 0xe3, 0xff, 0xb3,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x04, 0xa7 },
+ }, {
+ /* 25 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x98,
+ 0x88, 0x89, 0x8b, 0x88, 0x88, 0x8a,
+ 0x8a, 0x87, 0x8a, 0x81, 0x82, 0x86,
+ 0x87, 0x86, 0x87, 0x8e, 0x90, 0x8f,
+ 0xbf, 0xbf, 0x9a, 0xda, 0xfa, 0xaf,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x04, 0x95 },
+ }, {
+ /* 27 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x99,
+ 0x88, 0x89, 0x8b, 0x88, 0x88, 0x8a,
+ 0x8a, 0x87, 0x8a, 0x83, 0x86, 0x8a,
+ 0x88, 0x87, 0x87, 0x88, 0x8b, 0x8c,
+ 0xbf, 0xbf, 0x9a, 0xda, 0xfa, 0xaf,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x04, 0x76 },
+ }, {
+ /* 29 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x99,
+ 0x88, 0x89, 0x8b, 0x88, 0x88, 0x8a,
+ 0x8a, 0x87, 0x8b, 0x83, 0x86, 0x89,
+ 0x88, 0x87, 0x88, 0x88, 0x8b, 0x8b,
+ 0xbf, 0xbf, 0x9a, 0xda, 0xfa, 0xaf,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x04, 0x54 },
+ }, {
+ /* 30 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x9a,
+ 0x88, 0x89, 0x8b, 0x88, 0x88, 0x8a,
+ 0x8a, 0x87, 0x8a, 0x84, 0x86, 0x8a,
+ 0x87, 0x87, 0x87, 0x88, 0x8b, 0x8b,
+ 0xbf, 0xbf, 0x99, 0xda, 0xfa, 0xaf,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x04, 0x44 },
+ }, {
+ /* 32 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x9a,
+ 0x89, 0x89, 0x8c, 0x88, 0x88, 0x8a,
+ 0x89, 0x87, 0x8a, 0x84, 0x86, 0x8a,
+ 0x87, 0x87, 0x87, 0x89, 0x8b, 0x8b,
+ 0xbf, 0xbf, 0x98, 0xd2, 0xf2, 0xac,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x04, 0x1f },
+ }, {
+ /* 34 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x9b,
+ 0x88, 0x89, 0x8b, 0x88, 0x88, 0x8a,
+ 0x8b, 0x87, 0x8b, 0x83, 0x86, 0x89,
+ 0x87, 0x87, 0x88, 0x88, 0x8b, 0x8a,
+ 0xbf, 0xbf, 0x98, 0xd2, 0xf2, 0xac,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x03, 0xff },
+ }, {
+ /* 37 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x9b,
+ 0x89, 0x89, 0x8c, 0x88, 0x88, 0x8a,
+ 0x8a, 0x87, 0x8a, 0x81, 0x82, 0x86,
+ 0x86, 0x86, 0x86, 0x8d, 0x90, 0x8d,
+ 0xc0, 0xbf, 0x9a, 0xd2, 0xf2, 0xac,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x03, 0xd3 },
+ }, {
+ /* 39 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x9b,
+ 0x89, 0x89, 0x8c, 0x88, 0x88, 0x8a,
+ 0x8a, 0x87, 0x8a, 0x81, 0x82, 0x86,
+ 0x87, 0x86, 0x87, 0x8d, 0x90, 0x8d,
+ 0xb6, 0xb6, 0x93, 0xda, 0xf9, 0xaf,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x03, 0xb3 },
+ }, {
+ /* 41 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x9b,
+ 0x89, 0x89, 0x8c, 0x88, 0x88, 0x8a,
+ 0x8a, 0x87, 0x8b, 0x81, 0x82, 0x85,
+ 0x87, 0x86, 0x87, 0x8d, 0x90, 0x8d,
+ 0xb6, 0xb6, 0x94, 0xda, 0xf9, 0xaf,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x03, 0x93 },
+ }, {
+ /* 44 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x9b,
+ 0x89, 0x89, 0x8c, 0x88, 0x88, 0x8a,
+ 0x8a, 0x87, 0x8b, 0x81, 0x82, 0x86,
+ 0x87, 0x86, 0x86, 0x85, 0x87, 0x8a,
+ 0xbe, 0xbe, 0x99, 0xda, 0xf9, 0xaf,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x03, 0x66 },
+ }, {
+ /* 47 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x9b,
+ 0x89, 0x89, 0x8c, 0x88, 0x88, 0x8a,
+ 0x8a, 0x87, 0x8b, 0x81, 0x82, 0x86,
+ 0x88, 0x86, 0x87, 0x84, 0x87, 0x89,
+ 0xb4, 0xb4, 0x94, 0xe2, 0xff, 0xb3,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x03, 0x40 },
+ }, {
+ /* 50 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x9c,
+ 0x89, 0x89, 0x8b, 0x88, 0x88, 0x8a,
+ 0x8a, 0x87, 0x8b, 0x81, 0x82, 0x86,
+ 0x88, 0x86, 0x87, 0x84, 0x87, 0x89,
+ 0xb4, 0xb4, 0x95, 0xe2, 0xff, 0xb3,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x03, 0x0e },
+ }, {
+ /* 53 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x9c,
+ 0x89, 0x89, 0x8b, 0x88, 0x88, 0x8a,
+ 0x8a, 0x87, 0x8b, 0x81, 0x82, 0x86,
+ 0x88, 0x86, 0x87, 0x85, 0x87, 0x8a,
+ 0xb4, 0xb4, 0x96, 0xe2, 0xff, 0xb3,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x02, 0xe2 },
+ }, {
+ /* 56 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x9c,
+ 0x89, 0x89, 0x8b, 0x88, 0x88, 0x8a,
+ 0x8a, 0x87, 0x8b, 0x81, 0x82, 0x86,
+ 0x88, 0x86, 0x87, 0x85, 0x87, 0x8a,
+ 0xab, 0xab, 0x90, 0xdd, 0xf7, 0xaf,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x02, 0xb5 },
+ }, {
+ /* 60 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x9c,
+ 0x89, 0x89, 0x8b, 0x88, 0x88, 0x8a,
+ 0x8a, 0x87, 0x8b, 0x82, 0x82, 0x87,
+ 0x83, 0x81, 0x84, 0x81, 0x84, 0x88,
+ 0xb3, 0xb3, 0x96, 0xcf, 0xe5, 0xa8,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x02, 0x77 },
+ }, {
+ /* 64 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x9c,
+ 0x89, 0x89, 0x8b, 0x88, 0x88, 0x8a,
+ 0x8a, 0x87, 0x8b, 0x82, 0x82, 0x87,
+ 0x83, 0x81, 0x84, 0x82, 0x84, 0x88,
+ 0xb2, 0xb3, 0x97, 0xcf, 0xe5, 0xa8,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x02, 0x36 },
+ }, {
+ /* 68 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x9b, 0x00, 0xa6, 0x00, 0x9d,
+ 0x88, 0x88, 0x89, 0x89, 0x89, 0x8b,
+ 0x8a, 0x88, 0x8b, 0x7f, 0x80, 0x86,
+ 0x88, 0x86, 0x87, 0x7d, 0x7f, 0x85,
+ 0xb2, 0xb3, 0x97, 0xcf, 0xe5, 0xa8,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x02, 0x15 },
+ }, {
+ /* 72 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x9c, 0x00, 0xa9, 0x00, 0xa0,
+ 0x88, 0x88, 0x89, 0x88, 0x88, 0x8a,
+ 0x8c, 0x8a, 0x8d, 0x7f, 0x81, 0x85,
+ 0x84, 0x82, 0x84, 0x85, 0x87, 0x8a,
+ 0xaa, 0xab, 0x93, 0xcf, 0xe5, 0xa8,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x02, 0x15 },
+ }, {
+ /* 77 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xa1, 0x00, 0xad, 0x00, 0xa5,
+ 0x89, 0x89, 0x8a, 0x88, 0x87, 0x89,
+ 0x8c, 0x89, 0x8d, 0x7f, 0x81, 0x85,
+ 0x84, 0x83, 0x84, 0x81, 0x83, 0x86,
+ 0xaa, 0xab, 0x93, 0xc0, 0xd3, 0xa1,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x02, 0x15 },
+ }, {
+ /* 82 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xa5, 0x00, 0xb0, 0x00, 0xa9,
+ 0x88, 0x89, 0x89, 0x85, 0x86, 0x89,
+ 0x8a, 0x88, 0x8b, 0x82, 0x82, 0x87,
+ 0x81, 0x80, 0x82, 0x89, 0x8b, 0x8b,
+ 0xa2, 0xa3, 0x8e, 0xc0, 0xd3, 0xa1,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x02, 0x15 },
+ }, {
+ /* 87 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xab, 0x00, 0xb4, 0x00, 0xad,
+ 0x88, 0x89, 0x8a, 0x84, 0x86, 0x88,
+ 0x8a, 0x88, 0x8b, 0x7f, 0x7f, 0x84,
+ 0x86, 0x84, 0x85, 0x85, 0x86, 0x88,
+ 0xa2, 0xa3, 0x8f, 0xc0, 0xd3, 0xa1,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x02, 0x15 },
+ }, {
+ /* 93 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xaf, 0x00, 0xb9, 0x00, 0xb1,
+ 0x88, 0x89, 0x8a, 0x84, 0x85, 0x87,
+ 0x8a, 0x89, 0x8b, 0x7e, 0x7e, 0x83,
+ 0x87, 0x86, 0x86, 0x88, 0x8a, 0x89,
+ 0x9c, 0x9c, 0x8b, 0xc0, 0xd3, 0xa1,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x02, 0x15 },
+ }, {
+ /* 98 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xb3, 0x00, 0xbc, 0x00, 0xb5,
+ 0x88, 0x88, 0x88, 0x84, 0x84, 0x86,
+ 0x8a, 0x88, 0x8a, 0x7f, 0x7f, 0x84,
+ 0x84, 0x83, 0x84, 0x88, 0x8a, 0x89,
+ 0x9c, 0x9c, 0x8b, 0xc0, 0xd3, 0xa1,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x02, 0x15 },
+ }, {
+ /* 105 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xb7, 0x00, 0xc0, 0x00, 0xba,
+ 0x87, 0x87, 0x88, 0x85, 0x85, 0x87,
+ 0x89, 0x88, 0x89, 0x7f, 0x7f, 0x83,
+ 0x81, 0x80, 0x82, 0x88, 0x8a, 0x89,
+ 0x9c, 0x9c, 0x8c, 0xb2, 0xc2, 0x9a,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x02, 0x15 },
+ }, {
+ /* 111 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xbb, 0x00, 0xc3, 0x00, 0xbe,
+ 0x87, 0x87, 0x88, 0x85, 0x85, 0x88,
+ 0x88, 0x87, 0x89, 0x80, 0x80, 0x84,
+ 0x81, 0x81, 0x82, 0x85, 0x86, 0x87,
+ 0x9c, 0x9c, 0x8b, 0xb2, 0xc2, 0x9a,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x02, 0x15 },
+ }, {
+ /* 119 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xc0, 0x00, 0xc8, 0x00, 0xc4,
+ 0x87, 0x87, 0x88, 0x82, 0x84, 0x86,
+ 0x87, 0x85, 0x87, 0x82, 0x81, 0x84,
+ 0x83, 0x82, 0x83, 0x80, 0x81, 0x84,
+ 0x9c, 0x9c, 0x8c, 0xb2, 0xc2, 0x9a,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x02, 0x14 },
+ }, {
+ /* 126 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xc0, 0x00, 0xc8, 0x00, 0xc4,
+ 0x87, 0x87, 0x88, 0x82, 0x84, 0x86,
+ 0x87, 0x85, 0x87, 0x82, 0x81, 0x84,
+ 0x83, 0x82, 0x83, 0x80, 0x81, 0x84,
+ 0x9c, 0x9c, 0x8d, 0xb2, 0xc2, 0x9a,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x01, 0xde },
+ }, {
+ /* 134 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xc0, 0x00, 0xc8, 0x00, 0xc4,
+ 0x87, 0x87, 0x88, 0x82, 0x84, 0x86,
+ 0x87, 0x85, 0x87, 0x82, 0x81, 0x84,
+ 0x83, 0x82, 0x83, 0x80, 0x81, 0x84,
+ 0x9c, 0x9c, 0x8d, 0xa4, 0xb0, 0x92,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x01, 0x94 },
+ }, {
+ /* 143 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xc0, 0x00, 0xc8, 0x00, 0xc3,
+ 0x87, 0x87, 0x88, 0x82, 0x84, 0x86,
+ 0x87, 0x85, 0x87, 0x82, 0x81, 0x85,
+ 0x83, 0x82, 0x83, 0x80, 0x81, 0x84,
+ 0x92, 0x92, 0x89, 0xab, 0xb6, 0x96,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x01, 0x46 },
+ }, {
+ /* 152 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xc0, 0x00, 0xc8, 0x00, 0xc3,
+ 0x87, 0x87, 0x88, 0x83, 0x84, 0x86,
+ 0x87, 0x85, 0x87, 0x81, 0x81, 0x85,
+ 0x84, 0x82, 0x83, 0x80, 0x81, 0x83,
+ 0x92, 0x92, 0x8b, 0xab, 0xb6, 0x96,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0xfa },
+ }, {
+ /* 162 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xc0, 0x00, 0xc8, 0x00, 0xc3,
+ 0x87, 0x87, 0x88, 0x83, 0x84, 0x86,
+ 0x87, 0x85, 0x87, 0x81, 0x81, 0x84,
+ 0x84, 0x82, 0x84, 0x80, 0x81, 0x83,
+ 0x92, 0x92, 0x8b, 0x9d, 0xa4, 0x8e,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0xac },
+ }, {
+ /* 172 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xc0, 0x00, 0xc8, 0x00, 0xc3,
+ 0x87, 0x87, 0x88, 0x83, 0x84, 0x86,
+ 0x87, 0x85, 0x87, 0x81, 0x81, 0x84,
+ 0x84, 0x82, 0x83, 0x80, 0x81, 0x84,
+ 0x93, 0x92, 0x8c, 0x9d, 0xa4, 0x8e,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0x57 },
+ }, {
+ /* 183 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xc2, 0x00, 0xca, 0x00, 0xc5,
+ 0x86, 0x86, 0x87, 0x85, 0x84, 0x87,
+ 0x87, 0x86, 0x88, 0x7e, 0x80, 0x83,
+ 0x84, 0x82, 0x83, 0x80, 0x81, 0x83,
+ 0x93, 0x92, 0x8c, 0x9d, 0xa4, 0x8e,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0x10 },
+ }, {
+ /* 195 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xc7, 0x00, 0xce, 0x00, 0xc9,
+ 0x86, 0x87, 0x86, 0x83, 0x83, 0x85,
+ 0x85, 0x84, 0x86, 0x82, 0x82, 0x85,
+ 0x80, 0x80, 0x81, 0x81, 0x81, 0x84,
+ 0x93, 0x92, 0x8c, 0x9d, 0xa4, 0x8e,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0x10 },
+ }, {
+ /* 207 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xcc, 0x00, 0xd2, 0x00, 0xce,
+ 0x86, 0x86, 0x87, 0x81, 0x83, 0x84,
+ 0x84, 0x82, 0x84, 0x83, 0x83, 0x85,
+ 0x81, 0x81, 0x82, 0x7c, 0x7d, 0x81,
+ 0x93, 0x92, 0x8c, 0x9d, 0xa4, 0x8e,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0x10 },
+ }, {
+ /* 220 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xd1, 0x00, 0xd6, 0x00, 0xd3,
+ 0x86, 0x86, 0x86, 0x81, 0x83, 0x84,
+ 0x84, 0x82, 0x84, 0x80, 0x80, 0x83,
+ 0x81, 0x81, 0x82, 0x7c, 0x7d, 0x81,
+ 0x93, 0x92, 0x8c, 0x9d, 0xa4, 0x8e,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0x10 },
+ }, {
+ /* 234 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xd6, 0x00, 0xdb, 0x00, 0xd8,
+ 0x85, 0x85, 0x85, 0x81, 0x83, 0x84,
+ 0x83, 0x82, 0x83, 0x80, 0x80, 0x82,
+ 0x84, 0x82, 0x83, 0x79, 0x79, 0x7e,
+ 0x93, 0x92, 0x8d, 0x9d, 0xa4, 0x8e,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0x10 },
+ }, {
+ /* 249 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xdc, 0x00, 0xe0, 0x00, 0xdd,
+ 0x84, 0x84, 0x84, 0x81, 0x82, 0x83,
+ 0x84, 0x82, 0x84, 0x7f, 0x7f, 0x82,
+ 0x81, 0x80, 0x81, 0x80, 0x81, 0x82,
+ 0x8c, 0x8c, 0x86, 0x9d, 0xa4, 0x8e,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0x10 },
+ }, {
+ /* 265 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xe2, 0x00, 0xe5, 0x00, 0xe3,
+ 0x83, 0x83, 0x83, 0x81, 0x82, 0x83,
+ 0x82, 0x82, 0x83, 0x82, 0x81, 0x83,
+ 0x7f, 0x7e, 0x80, 0x7c, 0x7d, 0x80,
+ 0x8c, 0x8c, 0x86, 0x8e, 0x92, 0x87,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0x10 },
+ }, {
+ /* 282 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xe8, 0x00, 0xea, 0x00, 0xe9,
+ 0x83, 0x83, 0x83, 0x80, 0x82, 0x82,
+ 0x81, 0x82, 0x82, 0x82, 0x81, 0x82,
+ 0x81, 0x80, 0x81, 0x80, 0x80, 0x81,
+ 0x85, 0x85, 0x83, 0x8e, 0x92, 0x87,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0x10 },
+ }, {
+ /* 300 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xed, 0x00, 0xef, 0x00, 0xed,
+ 0x81, 0x82, 0x81, 0x81, 0x81, 0x82,
+ 0x82, 0x82, 0x83, 0x80, 0x80, 0x81,
+ 0x81, 0x81, 0x82, 0x83, 0x83, 0x83,
+ 0x80, 0x80, 0x7f, 0x8e, 0x92, 0x87,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0x10 },
+ }, {
+ /* 316 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xf3, 0x00, 0xf4, 0x00, 0xf3,
+ 0x80, 0x81, 0x80, 0x81, 0x81, 0x81,
+ 0x82, 0x82, 0x82, 0x81, 0x80, 0x81,
+ 0x82, 0x82, 0x83, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x7f, 0x80, 0x80, 0x80,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0x10 },
+ }, {
+ /* 333 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xf8, 0x00, 0xf8, 0x00, 0xf8,
+ 0x80, 0x81, 0x80, 0x81, 0x80, 0x81,
+ 0x81, 0x82, 0x82, 0x81, 0x80, 0x81,
+ 0x83, 0x83, 0x83, 0x7e, 0x7d, 0x7e,
+ 0x80, 0x80, 0x7f, 0x80, 0x80, 0x80,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0x10 },
+ }, {
+ /* 360 nits */
+ { MCS_GAMMACTL,
+ 0x01, 0x00, 0x01, 0x00, 0x01, 0x00,
+ 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0x10 },
+ }, {
+ /* 378 nits */
+ { MCS_GAMMACTL,
+ 0x01, 0x04, 0x01, 0x03, 0x01, 0x04,
+ 0x7f, 0x7f, 0x80, 0x7f, 0x7f, 0x7f,
+ 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f,
+ 0x80, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f,
+ 0x80, 0x80, 0x7f, 0x7f, 0x7f, 0x7f,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0x10 },
+ }, {
+ /* 395 nits */
+ { MCS_GAMMACTL,
+ 0x01, 0x09, 0x01, 0x07, 0x01, 0x08,
+ 0x7e, 0x7f, 0x80, 0x7f, 0x7f, 0x7f,
+ 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f,
+ 0x80, 0x7f, 0x7f, 0x7e, 0x7e, 0x7e,
+ 0x80, 0x80, 0x7f, 0x7e, 0x7e, 0x7f,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0x10 },
+ }, {
+ /* 413 nits */
+ { MCS_GAMMACTL,
+ 0x01, 0x0e, 0x01, 0x0b, 0x01, 0x0c,
+ 0x7e, 0x7f, 0x80, 0x7e, 0x7e, 0x7e,
+ 0x7e, 0x7e, 0x7e, 0x7f, 0x7f, 0x7f,
+ 0x80, 0x7f, 0x7f, 0x7d, 0x7d, 0x7d,
+ 0x80, 0x80, 0x7f, 0x7d, 0x7e, 0x7e,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0x10 },
+ }, {
+ /* 430 nits */
+ { MCS_GAMMACTL,
+ 0x01, 0x13, 0x01, 0x0f, 0x01, 0x10,
+ 0x7d, 0x7f, 0x80, 0x7e, 0x7e, 0x7e,
+ 0x7e, 0x7e, 0x7e, 0x7f, 0x7f, 0x7f,
+ 0x80, 0x7f, 0x7f, 0x7d, 0x7d, 0x7d,
+ 0x80, 0x80, 0x7f, 0x7c, 0x7d, 0x7e,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0x10 },
+ }, {
+ /* 448 nits */
+ { MCS_GAMMACTL,
+ 0x01, 0x18, 0x01, 0x13, 0x01, 0x14,
+ 0x7c, 0x7e, 0x80, 0x7e, 0x7e, 0x7e,
+ 0x7e, 0x7e, 0x7d, 0x7e, 0x7f, 0x7e,
+ 0x80, 0x7f, 0x7f, 0x7c, 0x7c, 0x7c,
+ 0x80, 0x80, 0x7e, 0x7b, 0x7c, 0x7d,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0x10 },
+ }, {
+ /* 465 nits */
+ { MCS_GAMMACTL,
+ 0x01, 0x1d, 0x01, 0x17, 0x01, 0x18,
+ 0x7c, 0x7e, 0x80, 0x7d, 0x7d, 0x7d,
+ 0x7d, 0x7d, 0x7d, 0x7e, 0x7f, 0x7e,
+ 0x80, 0x7f, 0x7f, 0x7b, 0x7b, 0x7b,
+ 0x80, 0x80, 0x7e, 0x7a, 0x7c, 0x7d,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0x10 },
+ }, {
+ /* 483 nits */
+ { MCS_GAMMACTL,
+ 0x01, 0x22, 0x01, 0x1b, 0x01, 0x1c,
+ 0x7b, 0x7e, 0x80, 0x7d, 0x7d, 0x7d,
+ 0x7d, 0x7d, 0x7c, 0x7e, 0x7f, 0x7e,
+ 0x80, 0x7f, 0x7f, 0x7a, 0x7a, 0x7a,
+ 0x80, 0x80, 0x7e, 0x79, 0x7b, 0x7c,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0x10 },
+ }, {
+ /* 500 nits */
+ { MCS_GAMMACTL,
+ 0x01, 0x27, 0x01, 0x1f, 0x01, 0x20,
+ 0x7b, 0x7e, 0x80, 0x7d, 0x7d, 0x7d,
+ 0x7d, 0x7d, 0x7c, 0x7e, 0x7f, 0x7e,
+ 0x80, 0x7f, 0x7f, 0x7a, 0x7a, 0x7a,
+ 0x81, 0x80, 0x7e, 0x79, 0x7b, 0x7c,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0x10 },
+ },
+};
+
+struct s6e8aa5x01_ams561ra01_ctx {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+ struct backlight_device *bl;
+ struct gpio_desc *reset_gpio;
+ struct regulator_bulk_data *supplies;
+ u32 nr_supplies;
+};
+
+static const struct regulator_bulk_data s6e8aa5x01_ams561ra01_supplies[] = {
+ { .supply = "vdd" },
+ { .supply = "vci" },
+};
+
+static inline struct s6e8aa5x01_ams561ra01_ctx *to_ctx(struct drm_panel *panel)
+{
+ return container_of(panel, struct s6e8aa5x01_ams561ra01_ctx, panel);
+}
+
+static int s6e8aa5x01_ams561ra01_update_status(struct backlight_device *bl)
+{
+ struct s6e8aa5x01_ams561ra01_ctx *ctx = bl_get_data(bl);
+ struct mipi_dsi_multi_context dsi = { .dsi = ctx->dsi };
+ u16 lvl = backlight_get_brightness(bl);
+
+ if (!ctx->panel.enabled)
+ return 0;
+
+ mipi_dsi_dcs_write_seq_multi(&dsi, MCS_ACCESSPROT, 0x5a, 0x5a);
+
+ mipi_dsi_dcs_write_buffer_multi(&dsi,
+ s6e8aa5x01_ams561ra01_cmds[lvl].gamma,
+ GAMMA_CMD_LEN);
+ mipi_dsi_dcs_write_buffer_multi(&dsi,
+ s6e8aa5x01_ams561ra01_cmds[lvl].aid,
+ AID_CMD_LEN);
+ mipi_dsi_dcs_write_seq_multi(&dsi, MCS_GAMMAUPD, 0x03);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi, MCS_ACCESSPROT, 0xa5, 0xa5);
+
+ return dsi.accum_err;
+}
+
+static int s6e8aa5x01_ams561ra01_prepare(struct drm_panel *panel)
+{
+ struct s6e8aa5x01_ams561ra01_ctx *ctx = to_ctx(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ ret = regulator_bulk_enable(ctx->nr_supplies, ctx->supplies);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable regulators: %d\n", ret);
+ return ret;
+ }
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ usleep_range(5000, 6000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(5000, 6000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ usleep_range(10000, 11000);
+
+ return 0;
+}
+
+static int s6e8aa5x01_ams561ra01_unprepare(struct drm_panel *panel)
+{
+ struct s6e8aa5x01_ams561ra01_ctx *ctx = to_ctx(panel);
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(5000, 6000);
+
+ regulator_bulk_disable(ctx->nr_supplies, ctx->supplies);
+
+ return 0;
+}
+
+static int s6e8aa5x01_ams561ra01_enable(struct drm_panel *panel)
+{
+ struct s6e8aa5x01_ams561ra01_ctx *ctx = to_ctx(panel);
+ struct mipi_dsi_multi_context dsi = { .dsi = ctx->dsi };
+
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi);
+ mipi_dsi_msleep(&dsi, 100);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi, MCS_ACCESSPROT, 0x5a, 0x5a);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi, MCS_PENTILE, 0xd8, 0xd8, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi, MCS_PCD, 0x5c);
+ mipi_dsi_dcs_write_seq_multi(&dsi, MCS_ERRFLAG, 0xed, 0xc7, 0x23, 0x67);
+ mipi_dsi_dcs_write_seq_multi(&dsi, MCS_DISPCTL, 0x0c, 0x0c, 0xb9, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi, MCS_LTPSCTL,
+ 0x00, 0x45, 0x10, 0x10, 0x08, 0x32, 0x54, 0x00,
+ 0x00, 0x00, 0x00, 0x07, 0x06, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x48, 0x5e, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x03, 0x00, 0x00, 0x00, 0xad, 0x00, 0x00,
+ 0x08, 0x05, 0x2a, 0x54, 0x03, 0xcc, 0x00, 0xff,
+ 0xfb, 0x03, 0x0d, 0x00, 0x11, 0x0f, 0x02, 0x03,
+ 0x0b, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
+ 0x13, 0x13, 0x13, 0x13, 0x00, 0x02, 0x03, 0x0b,
+ 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
+ 0x13, 0x13);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi, MCS_ACCESSPROT, 0xa5, 0xa5);
+
+ mipi_dsi_dcs_set_display_on_multi(&dsi);
+
+ return dsi.accum_err;
+}
+
+static int s6e8aa5x01_ams561ra01_disable(struct drm_panel *panel)
+{
+ struct s6e8aa5x01_ams561ra01_ctx *ctx = to_ctx(panel);
+ struct mipi_dsi_multi_context dsi = { .dsi = ctx->dsi };
+
+ mipi_dsi_dcs_set_display_off_multi(&dsi);
+ mipi_dsi_msleep(&dsi, 100);
+
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi);
+ mipi_dsi_msleep(&dsi, 150);
+
+ return dsi.accum_err;
+}
+
+static const struct drm_display_mode s6e8aa5x01_ams561ra01_mode = {
+ .clock = (720 + 62 + 2 + 26) * (1480 + 12 + 2 + 10) * 60 / 1000,
+ .hdisplay = 720,
+ .hsync_start = 720 + 62,
+ .hsync_end = 720 + 62 + 2,
+ .htotal = 720 + 62 + 2 + 26,
+ .vdisplay = 1480,
+ .vsync_start = 1480 + 12,
+ .vsync_end = 1480 + 12 + 2,
+ .vtotal = 1480 + 12 + 2 + 10,
+ .width_mm = 62,
+ .height_mm = 128,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static int s6e8aa5x01_ams561ra01_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ return drm_connector_helper_get_modes_fixed(connector,
+ &s6e8aa5x01_ams561ra01_mode);
+}
+
+static const struct backlight_ops s6e8aa5x01_ams561ra01_bl_ops = {
+ .update_status = s6e8aa5x01_ams561ra01_update_status,
+};
+
+static const struct drm_panel_funcs s6e8aa5x01_ams561ra01_panel_funcs = {
+ .prepare = s6e8aa5x01_ams561ra01_prepare,
+ .unprepare = s6e8aa5x01_ams561ra01_unprepare,
+ .enable = s6e8aa5x01_ams561ra01_enable,
+ .disable = s6e8aa5x01_ams561ra01_disable,
+ .get_modes = s6e8aa5x01_ams561ra01_get_modes,
+};
+
+static int s6e8aa5x01_ams561ra01_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct s6e8aa5x01_ams561ra01_ctx *ctx;
+ int ret;
+
+ ctx = devm_drm_panel_alloc(dev, struct s6e8aa5x01_ams561ra01_ctx, panel,
+ &s6e8aa5x01_ams561ra01_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ ctx->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ ctx->nr_supplies = ARRAY_SIZE(s6e8aa5x01_ams561ra01_supplies);
+ ret = devm_regulator_bulk_get_const(dev, ctx->nr_supplies,
+ s6e8aa5x01_ams561ra01_supplies,
+ &ctx->supplies);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to get regulators\n");
+
+ ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_ASIS);
+ if (IS_ERR(ctx->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
+ "failed to get reset-gpios\n");
+
+ ctx->bl = devm_backlight_device_register(dev, dev_name(dev), dev, ctx,
+ &s6e8aa5x01_ams561ra01_bl_ops,
+ NULL);
+ if (IS_ERR(ctx->bl))
+ return dev_err_probe(dev, PTR_ERR(ctx->bl),
+ "failed to register backlight device\n");
+
+ ctx->bl->props.type = BACKLIGHT_PLATFORM;
+ ctx->bl->props.brightness = ARRAY_SIZE(s6e8aa5x01_ams561ra01_cmds) - 1;
+ ctx->bl->props.max_brightness = ctx->bl->props.brightness;
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_VIDEO_NO_HFP;
+
+ ctx->panel.prepare_prev_first = true;
+ drm_panel_add(&ctx->panel);
+
+ ret = devm_mipi_dsi_attach(dev, dsi);
+ if (ret < 0) {
+ drm_panel_remove(&ctx->panel);
+ return dev_err_probe(dev, ret, "failed to attach to DSI host\n");
+ }
+
+ return 0;
+}
+
+static void s6e8aa5x01_ams561ra01_remove(struct mipi_dsi_device *dsi)
+{
+ struct s6e8aa5x01_ams561ra01_ctx *ctx = mipi_dsi_get_drvdata(dsi);
+
+ drm_panel_remove(&ctx->panel);
+}
+
+static const struct of_device_id s6e8aa5x01_ams561ra01_of_device_id[] = {
+ { .compatible = "samsung,s6e8aa5x01-ams561ra01" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, s6e8aa5x01_ams561ra01_of_device_id);
+
+static struct mipi_dsi_driver s6e8aa5x01_ams561ra01_dsi_driver = {
+ .probe = s6e8aa5x01_ams561ra01_probe,
+ .remove = s6e8aa5x01_ams561ra01_remove,
+ .driver = {
+ .name = "panel-samsung-s6e8aa5x01-ams561ra01",
+ .of_match_table = s6e8aa5x01_ams561ra01_of_device_id,
+ },
+};
+module_mipi_dsi_driver(s6e8aa5x01_ams561ra01_dsi_driver);
+
+MODULE_AUTHOR("Kaustabh Chakraborty <kauschluss@disroot.org>");
+MODULE_DESCRIPTION("Samsung AMS561RA01 Panel with S6E8AA5X01 Controller");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 3333d4a07504..0019de93be1b 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -3716,6 +3716,29 @@ static const struct panel_desc olimex_lcd_olinuxino_43ts = {
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
};
+static const struct drm_display_mode olimex_lcd_olinuxino_5cts_mode = {
+ .clock = 33300,
+ .hdisplay = 800,
+ .hsync_start = 800 + 210,
+ .hsync_end = 800 + 210 + 20,
+ .htotal = 800 + 210 + 20 + 26,
+ .vdisplay = 480,
+ .vsync_start = 480 + 22,
+ .vsync_end = 480 + 22 + 10,
+ .vtotal = 480 + 22 + 10 + 13,
+};
+
+static const struct panel_desc olimex_lcd_olinuxino_5cts = {
+ .modes = &olimex_lcd_olinuxino_5cts_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 154,
+ .height = 86,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+};
+
+
static const struct display_timing ontat_kd50g21_40nt_a1_timing = {
.pixelclock = { 30000000, 30000000, 50000000 },
.hactive = { 800, 800, 800 },
@@ -5279,6 +5302,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "olimex,lcd-olinuxino-43-ts",
.data = &olimex_lcd_olinuxino_43ts,
}, {
+ .compatible = "olimex,lcd-olinuxino-5-cts",
+ .data = &olimex_lcd_olinuxino_5cts,
+ }, {
.compatible = "ontat,kd50g21-40nt-a1",
.data = &ontat_kd50g21_40nt_a1,
}, {
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7703.c b/drivers/gpu/drm/panel/panel-sitronix-st7703.c
index 1a007a244d84..6c348fe28955 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7703.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7703.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Driver for panels based on Sitronix ST7703 controller, souch as:
+ * Driver for panels based on Sitronix ST7703 controller, such as:
*
* - Rocktech jh057n00900 5.5" MIPI-DSI panel
*
diff --git a/drivers/gpu/drm/panel/panel-summit.c b/drivers/gpu/drm/panel/panel-summit.c
index 4854437e2899..6d40b9ddfe02 100644
--- a/drivers/gpu/drm/panel/panel-summit.c
+++ b/drivers/gpu/drm/panel/panel-summit.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/backlight.h>
+#include <linux/mod_devicetable.h>
+#include <linux/property.h>
#include <drm/drm_device.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_mode.h>
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
index bb73f2a68a12..85d6289a6eda 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
@@ -432,7 +432,7 @@ static void panfrost_gem_debugfs_bo_print(struct panfrost_gem_object *bo,
if (!refcount)
return;
- resident_size = bo->base.pages ? bo->base.base.size : 0;
+ resident_size = panfrost_gem_rss(&bo->base.base);
snprintf(creator_info, sizeof(creator_info),
"%s/%d", bo->debugfs.creator.process_name, bo->debugfs.creator.tgid);
diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
index 563f16bae543..0dd62e8b2fa7 100644
--- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
+++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
@@ -203,7 +203,6 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev,
panfrost_mmu_as_put(pfdev, perfcnt->mapping->mmu);
panfrost_gem_mapping_put(perfcnt->mapping);
perfcnt->mapping = NULL;
- pm_runtime_mark_last_busy(pfdev->dev);
pm_runtime_put_autosuspend(pfdev->dev);
return 0;
@@ -279,7 +278,6 @@ void panfrost_perfcnt_close(struct drm_file *file_priv)
if (perfcnt->user == pfile)
panfrost_perfcnt_disable_locked(pfdev, file_priv);
mutex_unlock(&perfcnt->lock);
- pm_runtime_mark_last_busy(pfdev->dev);
pm_runtime_put_autosuspend(pfdev->dev);
}
diff --git a/drivers/gpu/drm/panthor/Makefile b/drivers/gpu/drm/panthor/Makefile
index 15294719b09c..02db21748c12 100644
--- a/drivers/gpu/drm/panthor/Makefile
+++ b/drivers/gpu/drm/panthor/Makefile
@@ -8,6 +8,7 @@ panthor-y := \
panthor_gem.o \
panthor_gpu.o \
panthor_heap.o \
+ panthor_hw.o \
panthor_mmu.o \
panthor_sched.o
diff --git a/drivers/gpu/drm/panthor/panthor_device.c b/drivers/gpu/drm/panthor/panthor_device.c
index f0b2da5b2b96..81df49880bd8 100644
--- a/drivers/gpu/drm/panthor/panthor_device.c
+++ b/drivers/gpu/drm/panthor/panthor_device.c
@@ -18,6 +18,7 @@
#include "panthor_device.h"
#include "panthor_fw.h"
#include "panthor_gpu.h"
+#include "panthor_hw.h"
#include "panthor_mmu.h"
#include "panthor_regs.h"
#include "panthor_sched.h"
@@ -244,6 +245,10 @@ int panthor_device_init(struct panthor_device *ptdev)
return ret;
}
+ ret = panthor_hw_init(ptdev);
+ if (ret)
+ goto err_rpm_put;
+
ret = panthor_gpu_init(ptdev);
if (ret)
goto err_rpm_put;
diff --git a/drivers/gpu/drm/panthor/panthor_drv.c b/drivers/gpu/drm/panthor/panthor_drv.c
index 1116f2d2826e..4c202fc5ce05 100644
--- a/drivers/gpu/drm/panthor/panthor_drv.c
+++ b/drivers/gpu/drm/panthor/panthor_drv.c
@@ -1094,7 +1094,7 @@ static int panthor_ioctl_group_create(struct drm_device *ddev, void *data,
struct drm_panthor_queue_create *queue_args;
int ret;
- if (!args->queues.count)
+ if (!args->queues.count || args->queues.count > MAX_CS_PER_CSG)
return -EINVAL;
ret = PANTHOR_UOBJ_GET_ARRAY(queue_args, &args->queues);
@@ -1103,14 +1103,15 @@ static int panthor_ioctl_group_create(struct drm_device *ddev, void *data,
ret = group_priority_permit(file, args->priority);
if (ret)
- return ret;
+ goto out;
ret = panthor_group_create(pfile, args, queue_args);
- if (ret >= 0) {
- args->group_handle = ret;
- ret = 0;
- }
+ if (ret < 0)
+ goto out;
+ args->group_handle = ret;
+ ret = 0;
+out:
kvfree(queue_args);
return ret;
}
@@ -1400,14 +1401,9 @@ panthor_open(struct drm_device *ddev, struct drm_file *file)
struct panthor_file *pfile;
int ret;
- if (!try_module_get(THIS_MODULE))
- return -EINVAL;
-
pfile = kzalloc(sizeof(*pfile), GFP_KERNEL);
- if (!pfile) {
- ret = -ENOMEM;
- goto err_put_mod;
- }
+ if (!pfile)
+ return -ENOMEM;
pfile->ptdev = ptdev;
pfile->user_mmio.offset = DRM_PANTHOR_USER_MMIO_OFFSET;
@@ -1439,9 +1435,6 @@ err_destroy_vm_pool:
err_free_file:
kfree(pfile);
-
-err_put_mod:
- module_put(THIS_MODULE);
return ret;
}
@@ -1454,7 +1447,6 @@ panthor_postclose(struct drm_device *ddev, struct drm_file *file)
panthor_vm_pool_destroy(pfile);
kfree(pfile);
- module_put(THIS_MODULE);
}
static const struct drm_ioctl_desc panthor_drm_driver_ioctls[] = {
@@ -1555,6 +1547,7 @@ static void panthor_show_fdinfo(struct drm_printer *p, struct drm_file *file)
}
static const struct file_operations panthor_drm_driver_fops = {
+ .owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
diff --git a/drivers/gpu/drm/panthor/panthor_fw.c b/drivers/gpu/drm/panthor/panthor_fw.c
index 36f1034839c2..9bf06e55eaee 100644
--- a/drivers/gpu/drm/panthor/panthor_fw.c
+++ b/drivers/gpu/drm/panthor/panthor_fw.c
@@ -1402,3 +1402,8 @@ err_unplug_fw:
}
MODULE_FIRMWARE("arm/mali/arch10.8/mali_csffw.bin");
+MODULE_FIRMWARE("arm/mali/arch10.10/mali_csffw.bin");
+MODULE_FIRMWARE("arm/mali/arch10.12/mali_csffw.bin");
+MODULE_FIRMWARE("arm/mali/arch11.8/mali_csffw.bin");
+MODULE_FIRMWARE("arm/mali/arch12.8/mali_csffw.bin");
+MODULE_FIRMWARE("arm/mali/arch13.8/mali_csffw.bin");
diff --git a/drivers/gpu/drm/panthor/panthor_gem.c b/drivers/gpu/drm/panthor/panthor_gem.c
index a123bc740ba1..156c7a0b62a2 100644
--- a/drivers/gpu/drm/panthor/panthor_gem.c
+++ b/drivers/gpu/drm/panthor/panthor_gem.c
@@ -74,7 +74,6 @@ static void panthor_gem_free_object(struct drm_gem_object *obj)
mutex_destroy(&bo->label.lock);
drm_gem_free_mmap_offset(&bo->base.base);
- mutex_destroy(&bo->gpuva_list_lock);
drm_gem_shmem_free(&bo->base);
drm_gem_object_put(vm_root_gem);
}
@@ -246,8 +245,6 @@ struct drm_gem_object *panthor_gem_create_object(struct drm_device *ddev, size_t
obj->base.base.funcs = &panthor_gem_funcs;
obj->base.map_wc = !ptdev->coherent;
- mutex_init(&obj->gpuva_list_lock);
- drm_gem_gpuva_set_lock(&obj->base.base, &obj->gpuva_list_lock);
mutex_init(&obj->label.lock);
panthor_gem_debugfs_bo_init(obj);
diff --git a/drivers/gpu/drm/panthor/panthor_gem.h b/drivers/gpu/drm/panthor/panthor_gem.h
index 8fc7215e9b90..80c6e24112d0 100644
--- a/drivers/gpu/drm/panthor/panthor_gem.h
+++ b/drivers/gpu/drm/panthor/panthor_gem.h
@@ -79,18 +79,6 @@ struct panthor_gem_object {
*/
struct drm_gem_object *exclusive_vm_root_gem;
- /**
- * @gpuva_list_lock: Custom GPUVA lock.
- *
- * Used to protect insertion of drm_gpuva elements to the
- * drm_gem_object.gpuva.list list.
- *
- * We can't use the GEM resv for that, because drm_gpuva_link() is
- * called in a dma-signaling path, where we're not allowed to take
- * resv locks.
- */
- struct mutex gpuva_list_lock;
-
/** @flags: Combination of drm_panthor_bo_flags flags. */
u32 flags;
diff --git a/drivers/gpu/drm/panthor/panthor_gpu.c b/drivers/gpu/drm/panthor/panthor_gpu.c
index cb7a335e07d7..db69449a5be0 100644
--- a/drivers/gpu/drm/panthor/panthor_gpu.c
+++ b/drivers/gpu/drm/panthor/panthor_gpu.c
@@ -35,40 +35,9 @@ struct panthor_gpu {
/** @reqs_acked: GPU request wait queue. */
wait_queue_head_t reqs_acked;
-};
-
-/**
- * struct panthor_model - GPU model description
- */
-struct panthor_model {
- /** @name: Model name. */
- const char *name;
-
- /** @arch_major: Major version number of architecture. */
- u8 arch_major;
- /** @product_major: Major version number of product. */
- u8 product_major;
-};
-
-/**
- * GPU_MODEL() - Define a GPU model. A GPU product can be uniquely identified
- * by a combination of the major architecture version and the major product
- * version.
- * @_name: Name for the GPU model.
- * @_arch_major: Architecture major.
- * @_product_major: Product major.
- */
-#define GPU_MODEL(_name, _arch_major, _product_major) \
-{\
- .name = __stringify(_name), \
- .arch_major = _arch_major, \
- .product_major = _product_major, \
-}
-
-static const struct panthor_model gpu_models[] = {
- GPU_MODEL(g610, 10, 7),
- {},
+ /** @cache_flush_lock: Lock to serialize cache flushes */
+ struct mutex cache_flush_lock;
};
#define GPU_INTERRUPTS_MASK \
@@ -83,66 +52,6 @@ static void panthor_gpu_coherency_set(struct panthor_device *ptdev)
ptdev->coherent ? GPU_COHERENCY_PROT_BIT(ACE_LITE) : GPU_COHERENCY_NONE);
}
-static void panthor_gpu_init_info(struct panthor_device *ptdev)
-{
- const struct panthor_model *model;
- u32 arch_major, product_major;
- u32 major, minor, status;
- unsigned int i;
-
- ptdev->gpu_info.gpu_id = gpu_read(ptdev, GPU_ID);
- ptdev->gpu_info.csf_id = gpu_read(ptdev, GPU_CSF_ID);
- ptdev->gpu_info.gpu_rev = gpu_read(ptdev, GPU_REVID);
- ptdev->gpu_info.core_features = gpu_read(ptdev, GPU_CORE_FEATURES);
- ptdev->gpu_info.l2_features = gpu_read(ptdev, GPU_L2_FEATURES);
- ptdev->gpu_info.tiler_features = gpu_read(ptdev, GPU_TILER_FEATURES);
- ptdev->gpu_info.mem_features = gpu_read(ptdev, GPU_MEM_FEATURES);
- ptdev->gpu_info.mmu_features = gpu_read(ptdev, GPU_MMU_FEATURES);
- ptdev->gpu_info.thread_features = gpu_read(ptdev, GPU_THREAD_FEATURES);
- ptdev->gpu_info.max_threads = gpu_read(ptdev, GPU_THREAD_MAX_THREADS);
- ptdev->gpu_info.thread_max_workgroup_size = gpu_read(ptdev, GPU_THREAD_MAX_WORKGROUP_SIZE);
- ptdev->gpu_info.thread_max_barrier_size = gpu_read(ptdev, GPU_THREAD_MAX_BARRIER_SIZE);
- ptdev->gpu_info.coherency_features = gpu_read(ptdev, GPU_COHERENCY_FEATURES);
- for (i = 0; i < 4; i++)
- ptdev->gpu_info.texture_features[i] = gpu_read(ptdev, GPU_TEXTURE_FEATURES(i));
-
- ptdev->gpu_info.as_present = gpu_read(ptdev, GPU_AS_PRESENT);
-
- ptdev->gpu_info.shader_present = gpu_read64(ptdev, GPU_SHADER_PRESENT);
- ptdev->gpu_info.tiler_present = gpu_read64(ptdev, GPU_TILER_PRESENT);
- ptdev->gpu_info.l2_present = gpu_read64(ptdev, GPU_L2_PRESENT);
-
- arch_major = GPU_ARCH_MAJOR(ptdev->gpu_info.gpu_id);
- product_major = GPU_PROD_MAJOR(ptdev->gpu_info.gpu_id);
- major = GPU_VER_MAJOR(ptdev->gpu_info.gpu_id);
- minor = GPU_VER_MINOR(ptdev->gpu_info.gpu_id);
- status = GPU_VER_STATUS(ptdev->gpu_info.gpu_id);
-
- for (model = gpu_models; model->name; model++) {
- if (model->arch_major == arch_major &&
- model->product_major == product_major)
- break;
- }
-
- drm_info(&ptdev->base,
- "mali-%s id 0x%x major 0x%x minor 0x%x status 0x%x",
- model->name ?: "unknown", ptdev->gpu_info.gpu_id >> 16,
- major, minor, status);
-
- drm_info(&ptdev->base,
- "Features: L2:%#x Tiler:%#x Mem:%#x MMU:%#x AS:%#x",
- ptdev->gpu_info.l2_features,
- ptdev->gpu_info.tiler_features,
- ptdev->gpu_info.mem_features,
- ptdev->gpu_info.mmu_features,
- ptdev->gpu_info.as_present);
-
- drm_info(&ptdev->base,
- "shader_present=0x%0llx l2_present=0x%0llx tiler_present=0x%0llx",
- ptdev->gpu_info.shader_present, ptdev->gpu_info.l2_present,
- ptdev->gpu_info.tiler_present);
-}
-
static void panthor_gpu_irq_handler(struct panthor_device *ptdev, u32 status)
{
gpu_write(ptdev, GPU_INT_CLEAR, status);
@@ -204,8 +113,8 @@ int panthor_gpu_init(struct panthor_device *ptdev)
spin_lock_init(&gpu->reqs_lock);
init_waitqueue_head(&gpu->reqs_acked);
+ mutex_init(&gpu->cache_flush_lock);
ptdev->gpu = gpu;
- panthor_gpu_init_info(ptdev);
dma_set_max_seg_size(ptdev->base.dev, UINT_MAX);
pa_bits = GPU_MMU_FEATURES_PA_BITS(ptdev->gpu_info.mmu_features);
@@ -353,6 +262,9 @@ int panthor_gpu_flush_caches(struct panthor_device *ptdev,
bool timedout = false;
unsigned long flags;
+ /* Serialize cache flush operations. */
+ guard(mutex)(&ptdev->gpu->cache_flush_lock);
+
spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags);
if (!drm_WARN_ON(&ptdev->base,
ptdev->gpu->pending_reqs & GPU_IRQ_CLEAN_CACHES_COMPLETED)) {
diff --git a/drivers/gpu/drm/panthor/panthor_hw.c b/drivers/gpu/drm/panthor/panthor_hw.c
new file mode 100644
index 000000000000..4f2858114e5e
--- /dev/null
+++ b/drivers/gpu/drm/panthor/panthor_hw.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0 or MIT
+/* Copyright 2025 ARM Limited. All rights reserved. */
+
+#include "panthor_device.h"
+#include "panthor_hw.h"
+#include "panthor_regs.h"
+
+#define GPU_PROD_ID_MAKE(arch_major, prod_major) \
+ (((arch_major) << 24) | (prod_major))
+
+static char *get_gpu_model_name(struct panthor_device *ptdev)
+{
+ const u32 gpu_id = ptdev->gpu_info.gpu_id;
+ const u32 product_id = GPU_PROD_ID_MAKE(GPU_ARCH_MAJOR(gpu_id),
+ GPU_PROD_MAJOR(gpu_id));
+ const bool ray_intersection = !!(ptdev->gpu_info.gpu_features &
+ GPU_FEATURES_RAY_INTERSECTION);
+ const u8 shader_core_count = hweight64(ptdev->gpu_info.shader_present);
+
+ switch (product_id) {
+ case GPU_PROD_ID_MAKE(10, 2):
+ return "Mali-G710";
+ case GPU_PROD_ID_MAKE(10, 3):
+ return "Mali-G510";
+ case GPU_PROD_ID_MAKE(10, 4):
+ return "Mali-G310";
+ case GPU_PROD_ID_MAKE(10, 7):
+ return "Mali-G610";
+ case GPU_PROD_ID_MAKE(11, 2):
+ if (shader_core_count > 10 && ray_intersection)
+ return "Mali-G715-Immortalis";
+ else if (shader_core_count >= 7)
+ return "Mali-G715";
+
+ fallthrough;
+ case GPU_PROD_ID_MAKE(11, 3):
+ return "Mali-G615";
+ case GPU_PROD_ID_MAKE(12, 0):
+ if (shader_core_count >= 10 && ray_intersection)
+ return "Mali-G720-Immortalis";
+ else if (shader_core_count >= 6)
+ return "Mali-G720";
+
+ fallthrough;
+ case GPU_PROD_ID_MAKE(12, 1):
+ return "Mali-G620";
+ case GPU_PROD_ID_MAKE(13, 0):
+ if (shader_core_count >= 10 && ray_intersection)
+ return "Mali-G925-Immortalis";
+ else if (shader_core_count >= 6)
+ return "Mali-G725";
+
+ fallthrough;
+ case GPU_PROD_ID_MAKE(13, 1):
+ return "Mali-G625";
+ }
+
+ return "(Unknown Mali GPU)";
+}
+
+static void panthor_gpu_info_init(struct panthor_device *ptdev)
+{
+ unsigned int i;
+
+ ptdev->gpu_info.gpu_id = gpu_read(ptdev, GPU_ID);
+ ptdev->gpu_info.csf_id = gpu_read(ptdev, GPU_CSF_ID);
+ ptdev->gpu_info.gpu_rev = gpu_read(ptdev, GPU_REVID);
+ ptdev->gpu_info.core_features = gpu_read(ptdev, GPU_CORE_FEATURES);
+ ptdev->gpu_info.l2_features = gpu_read(ptdev, GPU_L2_FEATURES);
+ ptdev->gpu_info.tiler_features = gpu_read(ptdev, GPU_TILER_FEATURES);
+ ptdev->gpu_info.mem_features = gpu_read(ptdev, GPU_MEM_FEATURES);
+ ptdev->gpu_info.mmu_features = gpu_read(ptdev, GPU_MMU_FEATURES);
+ ptdev->gpu_info.thread_features = gpu_read(ptdev, GPU_THREAD_FEATURES);
+ ptdev->gpu_info.max_threads = gpu_read(ptdev, GPU_THREAD_MAX_THREADS);
+ ptdev->gpu_info.thread_max_workgroup_size = gpu_read(ptdev, GPU_THREAD_MAX_WORKGROUP_SIZE);
+ ptdev->gpu_info.thread_max_barrier_size = gpu_read(ptdev, GPU_THREAD_MAX_BARRIER_SIZE);
+ ptdev->gpu_info.coherency_features = gpu_read(ptdev, GPU_COHERENCY_FEATURES);
+ for (i = 0; i < 4; i++)
+ ptdev->gpu_info.texture_features[i] = gpu_read(ptdev, GPU_TEXTURE_FEATURES(i));
+
+ ptdev->gpu_info.as_present = gpu_read(ptdev, GPU_AS_PRESENT);
+
+ ptdev->gpu_info.shader_present = gpu_read64(ptdev, GPU_SHADER_PRESENT);
+ ptdev->gpu_info.tiler_present = gpu_read64(ptdev, GPU_TILER_PRESENT);
+ ptdev->gpu_info.l2_present = gpu_read64(ptdev, GPU_L2_PRESENT);
+
+ /* Introduced in arch 11.x */
+ ptdev->gpu_info.gpu_features = gpu_read64(ptdev, GPU_FEATURES);
+}
+
+static void panthor_hw_info_init(struct panthor_device *ptdev)
+{
+ u32 major, minor, status;
+
+ panthor_gpu_info_init(ptdev);
+
+ major = GPU_VER_MAJOR(ptdev->gpu_info.gpu_id);
+ minor = GPU_VER_MINOR(ptdev->gpu_info.gpu_id);
+ status = GPU_VER_STATUS(ptdev->gpu_info.gpu_id);
+
+ drm_info(&ptdev->base,
+ "%s id 0x%x major 0x%x minor 0x%x status 0x%x",
+ get_gpu_model_name(ptdev), ptdev->gpu_info.gpu_id >> 16,
+ major, minor, status);
+
+ drm_info(&ptdev->base,
+ "Features: L2:%#x Tiler:%#x Mem:%#x MMU:%#x AS:%#x",
+ ptdev->gpu_info.l2_features,
+ ptdev->gpu_info.tiler_features,
+ ptdev->gpu_info.mem_features,
+ ptdev->gpu_info.mmu_features,
+ ptdev->gpu_info.as_present);
+
+ drm_info(&ptdev->base,
+ "shader_present=0x%0llx l2_present=0x%0llx tiler_present=0x%0llx",
+ ptdev->gpu_info.shader_present, ptdev->gpu_info.l2_present,
+ ptdev->gpu_info.tiler_present);
+}
+
+int panthor_hw_init(struct panthor_device *ptdev)
+{
+ panthor_hw_info_init(ptdev);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/panthor/panthor_hw.h b/drivers/gpu/drm/panthor/panthor_hw.h
new file mode 100644
index 000000000000..0af6acc6aa6a
--- /dev/null
+++ b/drivers/gpu/drm/panthor/panthor_hw.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 or MIT */
+/* Copyright 2025 ARM Limited. All rights reserved. */
+
+#ifndef __PANTHOR_HW_H__
+#define __PANTHOR_HW_H__
+
+struct panthor_device;
+
+int panthor_hw_init(struct panthor_device *ptdev);
+
+#endif /* __PANTHOR_HW_H__ */
diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c
index 4140f697ba5a..6dec4354e378 100644
--- a/drivers/gpu/drm/panthor/panthor_mmu.c
+++ b/drivers/gpu/drm/panthor/panthor_mmu.c
@@ -29,6 +29,7 @@
#include "panthor_device.h"
#include "panthor_gem.h"
+#include "panthor_gpu.h"
#include "panthor_heap.h"
#include "panthor_mmu.h"
#include "panthor_regs.h"
@@ -571,8 +572,24 @@ static void lock_region(struct panthor_device *ptdev, u32 as_nr,
static int mmu_hw_do_operation_locked(struct panthor_device *ptdev, int as_nr,
u64 iova, u64 size, u32 op)
{
+ const u32 l2_flush_op = CACHE_CLEAN | CACHE_INV;
+ u32 lsc_flush_op;
+ int ret;
+
lockdep_assert_held(&ptdev->mmu->as.slots_lock);
+ switch (op) {
+ case AS_COMMAND_FLUSH_MEM:
+ lsc_flush_op = CACHE_CLEAN | CACHE_INV;
+ break;
+ case AS_COMMAND_FLUSH_PT:
+ lsc_flush_op = 0;
+ break;
+ default:
+ drm_WARN(&ptdev->base, 1, "Unexpected AS_COMMAND: %d", op);
+ return -EINVAL;
+ }
+
if (as_nr < 0)
return 0;
@@ -582,13 +599,24 @@ static int mmu_hw_do_operation_locked(struct panthor_device *ptdev, int as_nr,
* power it up
*/
- if (op != AS_COMMAND_UNLOCK)
- lock_region(ptdev, as_nr, iova, size);
+ lock_region(ptdev, as_nr, iova, size);
- /* Run the MMU operation */
- write_cmd(ptdev, as_nr, op);
+ ret = wait_ready(ptdev, as_nr);
+ if (ret)
+ return ret;
- /* Wait for the flush to complete */
+ ret = panthor_gpu_flush_caches(ptdev, l2_flush_op, lsc_flush_op, 0);
+ if (ret)
+ return ret;
+
+ /*
+ * Explicitly unlock the region as the AS is not unlocked automatically
+ * at the end of the GPU_CONTROL cache flush command, unlike
+ * AS_COMMAND_FLUSH_MEM or AS_COMMAND_FLUSH_PT.
+ */
+ write_cmd(ptdev, as_nr, AS_COMMAND_UNLOCK);
+
+ /* Wait for the unlock command to complete */
return wait_ready(ptdev, as_nr);
}
@@ -1074,9 +1102,9 @@ static void panthor_vm_bo_put(struct drm_gpuvm_bo *vm_bo)
* GEM vm_bo list.
*/
dma_resv_lock(drm_gpuvm_resv(vm), NULL);
- mutex_lock(&bo->gpuva_list_lock);
+ mutex_lock(&bo->base.base.gpuva.lock);
unpin = drm_gpuvm_bo_put(vm_bo);
- mutex_unlock(&bo->gpuva_list_lock);
+ mutex_unlock(&bo->base.base.gpuva.lock);
dma_resv_unlock(drm_gpuvm_resv(vm));
/* If the vm_bo object was destroyed, release the pin reference that
@@ -1194,7 +1222,7 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx,
(flags & DRM_PANTHOR_VM_BIND_OP_TYPE_MASK) != DRM_PANTHOR_VM_BIND_OP_TYPE_MAP)
return -EINVAL;
- /* Make sure the VA and size are aligned and in-bounds. */
+ /* Make sure the VA and size are in-bounds. */
if (size > bo->base.base.size || offset > bo->base.base.size - size)
return -EINVAL;
@@ -1249,9 +1277,9 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx,
* calling this function.
*/
dma_resv_lock(panthor_vm_resv(vm), NULL);
- mutex_lock(&bo->gpuva_list_lock);
+ mutex_lock(&bo->base.base.gpuva.lock);
op_ctx->map.vm_bo = drm_gpuvm_bo_obtain_prealloc(preallocated_vm_bo);
- mutex_unlock(&bo->gpuva_list_lock);
+ mutex_unlock(&bo->base.base.gpuva.lock);
dma_resv_unlock(panthor_vm_resv(vm));
/* If the a vm_bo for this <VM,BO> combination exists, it already
@@ -2003,10 +2031,10 @@ static void panthor_vma_link(struct panthor_vm *vm,
{
struct panthor_gem_object *bo = to_panthor_bo(vma->base.gem.obj);
- mutex_lock(&bo->gpuva_list_lock);
+ mutex_lock(&bo->base.base.gpuva.lock);
drm_gpuva_link(&vma->base, vm_bo);
drm_WARN_ON(&vm->ptdev->base, drm_gpuvm_bo_put(vm_bo));
- mutex_unlock(&bo->gpuva_list_lock);
+ mutex_unlock(&bo->base.base.gpuva.lock);
}
static void panthor_vma_unlink(struct panthor_vm *vm,
@@ -2015,9 +2043,9 @@ static void panthor_vma_unlink(struct panthor_vm *vm,
struct panthor_gem_object *bo = to_panthor_bo(vma->base.gem.obj);
struct drm_gpuvm_bo *vm_bo = drm_gpuvm_bo_get(vma->base.vm_bo);
- mutex_lock(&bo->gpuva_list_lock);
+ mutex_lock(&bo->base.base.gpuva.lock);
drm_gpuva_unlink(&vma->base);
- mutex_unlock(&bo->gpuva_list_lock);
+ mutex_unlock(&bo->base.base.gpuva.lock);
/* drm_gpuva_unlink() release the vm_bo, but we manually retained it
* when entering this function, so we can implement deferred VMA
@@ -2169,15 +2197,22 @@ panthor_vm_exec_op(struct panthor_vm *vm, struct panthor_vm_op_ctx *op,
mutex_lock(&vm->op_lock);
vm->op_ctx = op;
switch (op_type) {
- case DRM_PANTHOR_VM_BIND_OP_TYPE_MAP:
+ case DRM_PANTHOR_VM_BIND_OP_TYPE_MAP: {
+ const struct drm_gpuvm_map_req map_req = {
+ .map.va.addr = op->va.addr,
+ .map.va.range = op->va.range,
+ .map.gem.obj = op->map.vm_bo->obj,
+ .map.gem.offset = op->map.bo_offset,
+ };
+
if (vm->unusable) {
ret = -EINVAL;
break;
}
- ret = drm_gpuvm_sm_map(&vm->base, vm, op->va.addr, op->va.range,
- op->map.vm_bo->obj, op->map.bo_offset);
+ ret = drm_gpuvm_sm_map(&vm->base, vm, &map_req);
break;
+ }
case DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP:
ret = drm_gpuvm_sm_unmap(&vm->base, vm, op->va.addr, op->va.range);
@@ -2380,8 +2415,9 @@ panthor_vm_create(struct panthor_device *ptdev, bool for_mcu,
* to be handled the same way user VMAs are.
*/
drm_gpuvm_init(&vm->base, for_mcu ? "panthor-MCU-VM" : "panthor-GPU-VM",
- DRM_GPUVM_RESV_PROTECTED, &ptdev->base, dummy_gem,
- min_va, va_range, 0, 0, &panthor_gpuvm_ops);
+ DRM_GPUVM_RESV_PROTECTED | DRM_GPUVM_IMMEDIATE_MODE,
+ &ptdev->base, dummy_gem, min_va, va_range, 0, 0,
+ &panthor_gpuvm_ops);
drm_gem_object_put(dummy_gem);
return vm;
@@ -2411,7 +2447,7 @@ panthor_vm_bind_prepare_op_ctx(struct drm_file *file,
int ret;
/* Aligned on page size. */
- if (!IS_ALIGNED(op->va | op->size, vm_pgsz))
+ if (!IS_ALIGNED(op->va | op->size | op->bo_offset, vm_pgsz))
return -EINVAL;
switch (op->flags & DRM_PANTHOR_VM_BIND_OP_TYPE_MASK) {
diff --git a/drivers/gpu/drm/panthor/panthor_regs.h b/drivers/gpu/drm/panthor/panthor_regs.h
index 48bbfd40138c..8bee76d01bf8 100644
--- a/drivers/gpu/drm/panthor/panthor_regs.h
+++ b/drivers/gpu/drm/panthor/panthor_regs.h
@@ -70,6 +70,9 @@
#define GPU_PWR_OVERRIDE0 0x54
#define GPU_PWR_OVERRIDE1 0x58
+#define GPU_FEATURES 0x60
+#define GPU_FEATURES_RAY_INTERSECTION BIT(2)
+
#define GPU_TIMESTAMP_OFFSET 0x88
#define GPU_CYCLE_COUNT 0x90
#define GPU_TIMESTAMP 0x98
diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c
index 8f17394cc82a..3d1f57e3990f 100644
--- a/drivers/gpu/drm/panthor/panthor_sched.c
+++ b/drivers/gpu/drm/panthor/panthor_sched.c
@@ -641,6 +641,15 @@ struct panthor_group {
size_t kbo_sizes;
} fdinfo;
+ /** @task_info: Info of current->group_leader that created the group. */
+ struct {
+ /** @task_info.pid: pid of current->group_leader */
+ pid_t pid;
+
+ /** @task_info.comm: comm of current->group_leader */
+ char comm[TASK_COMM_LEN];
+ } task_info;
+
/** @state: Group state. */
enum panthor_group_state state;
@@ -886,8 +895,7 @@ static void group_free_queue(struct panthor_group *group, struct panthor_queue *
if (IS_ERR_OR_NULL(queue))
return;
- if (queue->entity.fence_context)
- drm_sched_entity_destroy(&queue->entity);
+ drm_sched_entity_destroy(&queue->entity);
if (queue->scheduler.ops)
drm_sched_fini(&queue->scheduler);
@@ -1355,8 +1363,12 @@ cs_slot_process_fatal_event_locked(struct panthor_device *ptdev,
fatal = cs_iface->output->fatal;
info = cs_iface->output->fatal_info;
- if (group)
+ if (group) {
+ drm_warn(&ptdev->base, "CS_FATAL: pid=%d, comm=%s\n",
+ group->task_info.pid, group->task_info.comm);
+
group->fatal_queues |= BIT(cs_id);
+ }
if (CS_EXCEPTION_TYPE(fatal) == DRM_PANTHOR_EXCEPTION_CS_UNRECOVERABLE) {
/* If this exception is unrecoverable, queue a reset, and make
@@ -1416,6 +1428,11 @@ cs_slot_process_fault_event_locked(struct panthor_device *ptdev,
spin_unlock(&queue->fence_ctx.lock);
}
+ if (group) {
+ drm_warn(&ptdev->base, "CS_FAULT: pid=%d, comm=%s\n",
+ group->task_info.pid, group->task_info.comm);
+ }
+
drm_warn(&ptdev->base,
"CSG slot %d CS slot: %d\n"
"CS_FAULT.EXCEPTION_TYPE: 0x%x (%s)\n"
@@ -1632,11 +1649,15 @@ csg_slot_process_progress_timer_event_locked(struct panthor_device *ptdev, u32 c
lockdep_assert_held(&sched->lock);
- drm_warn(&ptdev->base, "CSG slot %d progress timeout\n", csg_id);
-
group = csg_slot->group;
- if (!drm_WARN_ON(&ptdev->base, !group))
+ if (!drm_WARN_ON(&ptdev->base, !group)) {
+ drm_warn(&ptdev->base, "CSG_PROGRESS_TIMER_EVENT: pid=%d, comm=%s\n",
+ group->task_info.pid, group->task_info.comm);
+
group->timedout = true;
+ }
+
+ drm_warn(&ptdev->base, "CSG slot %d progress timeout\n", csg_id);
sched_queue_delayed_work(sched, tick, 0);
}
@@ -3218,7 +3239,8 @@ queue_timedout_job(struct drm_sched_job *sched_job)
struct panthor_scheduler *sched = ptdev->scheduler;
struct panthor_queue *queue = group->queues[job->queue_idx];
- drm_warn(&ptdev->base, "job timeout\n");
+ drm_warn(&ptdev->base, "job timeout: pid=%d, comm=%s, seqno=%llu\n",
+ group->task_info.pid, group->task_info.comm, job->done_fence->seqno);
drm_WARN_ON(&ptdev->base, atomic_read(&sched->reset.in_progress));
@@ -3389,6 +3411,14 @@ err_free_queue:
return ERR_PTR(ret);
}
+static void group_init_task_info(struct panthor_group *group)
+{
+ struct task_struct *task = current->group_leader;
+
+ group->task_info.pid = task->pid;
+ get_task_comm(group->task_info.comm, task);
+}
+
static void add_group_kbo_sizes(struct panthor_device *ptdev,
struct panthor_group *group)
{
@@ -3540,6 +3570,8 @@ int panthor_group_create(struct panthor_file *pfile,
add_group_kbo_sizes(group->ptdev, group);
spin_lock_init(&group->fdinfo.lock);
+ group_init_task_info(group);
+
return gid;
err_put_group:
@@ -3558,11 +3590,6 @@ int panthor_group_destroy(struct panthor_file *pfile, u32 group_handle)
if (!group)
return -EINVAL;
- for (u32 i = 0; i < group->queue_count; i++) {
- if (group->queues[i])
- drm_sched_entity_destroy(&group->queues[i]->entity);
- }
-
mutex_lock(&sched->reset.lock);
mutex_lock(&sched->lock);
group->destroyed = true;
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index d1c5e471bdca..3d9f47bc807a 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -1832,7 +1832,7 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
return;
}
- radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+ radeon_atombios_encoder_dpms_scratch_regs(encoder, mode == DRM_MODE_DPMS_ON);
}
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 7c3a960f486a..ba8db1d07c07 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -2457,7 +2457,7 @@ static void ci_register_patching_mc_arb(struct radeon_device *rdev,
u32 tmp, tmp2;
tmp = RREG32(MC_SEQ_MISC0);
- patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
+ patch = (tmp & 0x0000f00) == 0x300;
if (patch &&
((rdev->pdev->device == 0x67B0) ||
@@ -3238,7 +3238,8 @@ static int ci_populate_all_graphic_levels(struct radeon_device *rdev)
u32 level_array_size = sizeof(SMU7_Discrete_GraphicsLevel) *
SMU7_MAX_LEVELS_GRAPHICS;
SMU7_Discrete_GraphicsLevel *levels = pi->smc_state_table.GraphicsLevel;
- u32 i, ret;
+ int ret;
+ u32 i;
memset(levels, 0, level_array_size);
@@ -3285,7 +3286,8 @@ static int ci_populate_all_memory_levels(struct radeon_device *rdev)
u32 level_array_size = sizeof(SMU7_Discrete_MemoryLevel) *
SMU7_MAX_LEVELS_MEMORY;
SMU7_Discrete_MemoryLevel *levels = pi->smc_state_table.MemoryLevel;
- u32 i, ret;
+ int ret;
+ u32 i;
memset(levels, 0, level_array_size);
@@ -3436,7 +3438,7 @@ static int ci_setup_default_dpm_tables(struct radeon_device *rdev)
pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value =
allowed_sclk_vddc_table->entries[i].clk;
pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled =
- (i == 0) ? true : false;
+ i == 0;
pi->dpm_table.sclk_table.count++;
}
}
@@ -3449,7 +3451,7 @@ static int ci_setup_default_dpm_tables(struct radeon_device *rdev)
pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value =
allowed_mclk_table->entries[i].clk;
pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled =
- (i == 0) ? true : false;
+ i == 0;
pi->dpm_table.mclk_table.count++;
}
}
@@ -4487,7 +4489,7 @@ static int ci_register_patching_mc_seq(struct radeon_device *rdev,
bool patch;
tmp = RREG32(MC_SEQ_MISC0);
- patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
+ patch = (tmp & 0x0000f00) == 0x300;
if (patch &&
((rdev->pdev->device == 0x67B0) ||
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 266c57733136..1162cb5d75ed 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -951,13 +951,13 @@ static int evergreen_cs_track_check(struct radeon_cs_parser *p)
u64 offset = (u64)track->vgt_strmout_bo_offset[i] +
(u64)track->vgt_strmout_size[i];
if (offset > radeon_bo_size(track->vgt_strmout_bo[i])) {
- DRM_ERROR("streamout %d bo too small: 0x%llx, 0x%lx\n",
- i, offset,
- radeon_bo_size(track->vgt_strmout_bo[i]));
+ dev_warn_once(p->dev, "streamout %d bo too small: 0x%llx, 0x%lx\n",
+ i, offset,
+ radeon_bo_size(track->vgt_strmout_bo[i]));
return -EINVAL;
}
} else {
- dev_warn(p->dev, "No buffer for streamout %d\n", i);
+ dev_warn_once(p->dev, "No buffer for streamout %d\n", i);
return -EINVAL;
}
}
@@ -979,8 +979,8 @@ static int evergreen_cs_track_check(struct radeon_cs_parser *p)
(tmp >> (i * 4)) & 0xF) {
/* at least one component is enabled */
if (track->cb_color_bo[i] == NULL) {
- dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
- __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
+ dev_warn_once(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
+ __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
return -EINVAL;
}
/* check cb */
@@ -1056,8 +1056,8 @@ static int evergreen_packet0_check(struct radeon_cs_parser *p,
case EVERGREEN_VLINE_START_END:
r = evergreen_cs_packet_parse_vline(p);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
return r;
}
break;
@@ -1143,8 +1143,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case SQ_VSTMP_RING_BASE:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
@@ -1155,15 +1155,15 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
break;
case CAYMAN_DB_EQAA:
if (p->rdev->family < CHIP_CAYMAN) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
break;
case CAYMAN_DB_DEPTH_INFO:
if (p->rdev->family < CHIP_CAYMAN) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
break;
@@ -1172,8 +1172,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
ib[idx] &= ~Z_ARRAY_MODE(0xf);
@@ -1214,8 +1214,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case DB_Z_READ_BASE:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
track->db_z_read_offset = radeon_get_ib_value(p, idx);
@@ -1226,8 +1226,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case DB_Z_WRITE_BASE:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
track->db_z_write_offset = radeon_get_ib_value(p, idx);
@@ -1238,8 +1238,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case DB_STENCIL_READ_BASE:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
track->db_s_read_offset = radeon_get_ib_value(p, idx);
@@ -1250,8 +1250,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case DB_STENCIL_WRITE_BASE:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
track->db_s_write_offset = radeon_get_ib_value(p, idx);
@@ -1273,8 +1273,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case VGT_STRMOUT_BUFFER_BASE_3:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16;
@@ -1295,8 +1295,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CP_COHER_BASE:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- dev_warn(p->dev, "missing reloc for CP_COHER_BASE "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "missing reloc for CP_COHER_BASE "
+ "0x%04X\n", reg);
return -EINVAL;
}
ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
@@ -1311,8 +1311,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
break;
case PA_SC_AA_CONFIG:
if (p->rdev->family >= CHIP_CAYMAN) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
tmp = radeon_get_ib_value(p, idx) & MSAA_NUM_SAMPLES_MASK;
@@ -1320,8 +1320,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
break;
case CAYMAN_PA_SC_AA_CONFIG:
if (p->rdev->family < CHIP_CAYMAN) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
tmp = radeon_get_ib_value(p, idx) & CAYMAN_MSAA_NUM_SAMPLES_MASK;
@@ -1360,8 +1360,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags));
@@ -1378,8 +1378,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags));
@@ -1439,8 +1439,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR7_ATTRIB:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
@@ -1467,8 +1467,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR11_ATTRIB:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
@@ -1555,8 +1555,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR7_BASE:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
tmp = (reg - CB_COLOR0_BASE) / 0x3c;
@@ -1571,8 +1571,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR11_BASE:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
tmp = ((reg - CB_COLOR8_BASE) / 0x1c) + 8;
@@ -1584,8 +1584,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case DB_HTILE_DATA_BASE:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
track->htile_offset = radeon_get_ib_value(p, idx);
@@ -1702,36 +1702,36 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case SQ_ALU_CONST_CACHE_LS_15:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
break;
case SX_MEMORY_EXPORT_BASE:
if (p->rdev->family >= CHIP_CAYMAN) {
- dev_warn(p->dev, "bad SET_CONFIG_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONFIG_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- dev_warn(p->dev, "bad SET_CONFIG_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONFIG_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
break;
case CAYMAN_SX_SCATTER_EXPORT_BASE:
if (p->rdev->family < CHIP_CAYMAN) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
@@ -1740,7 +1740,7 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0;
break;
default:
- dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+ dev_warn_once(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
return -EINVAL;
}
return 0;
@@ -1795,7 +1795,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
uint64_t offset;
if (pkt->count != 1) {
- DRM_ERROR("bad SET PREDICATION\n");
+ dev_warn_once(p->dev, "bad SET PREDICATION\n");
return -EINVAL;
}
@@ -1807,13 +1807,13 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
return 0;
if (pred_op > 2) {
- DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op);
+ dev_warn_once(p->dev, "bad SET PREDICATION operation %d\n", pred_op);
return -EINVAL;
}
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad SET PREDICATION\n");
+ dev_warn_once(p->dev, "bad SET PREDICATION\n");
return -EINVAL;
}
@@ -1827,7 +1827,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
break;
case PACKET3_CONTEXT_CONTROL:
if (pkt->count != 1) {
- DRM_ERROR("bad CONTEXT_CONTROL\n");
+ dev_warn_once(p->dev, "bad CONTEXT_CONTROL\n");
return -EINVAL;
}
break;
@@ -1835,17 +1835,17 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
case PACKET3_NUM_INSTANCES:
case PACKET3_CLEAR_STATE:
if (pkt->count) {
- DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
+ dev_warn_once(p->dev, "bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
return -EINVAL;
}
break;
case CAYMAN_PACKET3_DEALLOC_STATE:
if (p->rdev->family < CHIP_CAYMAN) {
- DRM_ERROR("bad PACKET3_DEALLOC_STATE\n");
+ dev_warn_once(p->dev, "bad PACKET3_DEALLOC_STATE\n");
return -EINVAL;
}
if (pkt->count) {
- DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
+ dev_warn_once(p->dev, "bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
return -EINVAL;
}
break;
@@ -1854,12 +1854,12 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
uint64_t offset;
if (pkt->count != 1) {
- DRM_ERROR("bad INDEX_BASE\n");
+ dev_warn_once(p->dev, "bad INDEX_BASE\n");
return -EINVAL;
}
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad INDEX_BASE\n");
+ dev_warn_once(p->dev, "bad INDEX_BASE\n");
return -EINVAL;
}
@@ -1872,7 +1872,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
r = evergreen_cs_track_check(p);
if (r) {
- dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ dev_warn_once(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
return r;
}
break;
@@ -1880,7 +1880,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
case PACKET3_INDEX_BUFFER_SIZE:
{
if (pkt->count != 0) {
- DRM_ERROR("bad INDEX_BUFFER_SIZE\n");
+ dev_warn_once(p->dev, "bad INDEX_BUFFER_SIZE\n");
return -EINVAL;
}
break;
@@ -1889,12 +1889,12 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
{
uint64_t offset;
if (pkt->count != 3) {
- DRM_ERROR("bad DRAW_INDEX\n");
+ dev_warn_once(p->dev, "bad DRAW_INDEX\n");
return -EINVAL;
}
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad DRAW_INDEX\n");
+ dev_warn_once(p->dev, "bad DRAW_INDEX\n");
return -EINVAL;
}
@@ -1907,7 +1907,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
r = evergreen_cs_track_check(p);
if (r) {
- dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ dev_warn_once(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
return r;
}
break;
@@ -1917,12 +1917,12 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
uint64_t offset;
if (pkt->count != 4) {
- DRM_ERROR("bad DRAW_INDEX_2\n");
+ dev_warn_once(p->dev, "bad DRAW_INDEX_2\n");
return -EINVAL;
}
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad DRAW_INDEX_2\n");
+ dev_warn_once(p->dev, "bad DRAW_INDEX_2\n");
return -EINVAL;
}
@@ -1935,63 +1935,63 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
r = evergreen_cs_track_check(p);
if (r) {
- dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ dev_warn_once(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
return r;
}
break;
}
case PACKET3_DRAW_INDEX_AUTO:
if (pkt->count != 1) {
- DRM_ERROR("bad DRAW_INDEX_AUTO\n");
+ dev_warn_once(p->dev, "bad DRAW_INDEX_AUTO\n");
return -EINVAL;
}
r = evergreen_cs_track_check(p);
if (r) {
- dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
+ dev_warn_once(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
return r;
}
break;
case PACKET3_DRAW_INDEX_MULTI_AUTO:
if (pkt->count != 2) {
- DRM_ERROR("bad DRAW_INDEX_MULTI_AUTO\n");
+ dev_warn_once(p->dev, "bad DRAW_INDEX_MULTI_AUTO\n");
return -EINVAL;
}
r = evergreen_cs_track_check(p);
if (r) {
- dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
+ dev_warn_once(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
return r;
}
break;
case PACKET3_DRAW_INDEX_IMMD:
if (pkt->count < 2) {
- DRM_ERROR("bad DRAW_INDEX_IMMD\n");
+ dev_warn_once(p->dev, "bad DRAW_INDEX_IMMD\n");
return -EINVAL;
}
r = evergreen_cs_track_check(p);
if (r) {
- dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ dev_warn_once(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
return r;
}
break;
case PACKET3_DRAW_INDEX_OFFSET:
if (pkt->count != 2) {
- DRM_ERROR("bad DRAW_INDEX_OFFSET\n");
+ dev_warn_once(p->dev, "bad DRAW_INDEX_OFFSET\n");
return -EINVAL;
}
r = evergreen_cs_track_check(p);
if (r) {
- dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ dev_warn_once(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
return r;
}
break;
case PACKET3_DRAW_INDEX_OFFSET_2:
if (pkt->count != 3) {
- DRM_ERROR("bad DRAW_INDEX_OFFSET_2\n");
+ dev_warn_once(p->dev, "bad DRAW_INDEX_OFFSET_2\n");
return -EINVAL;
}
r = evergreen_cs_track_check(p);
if (r) {
- dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ dev_warn_once(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
return r;
}
break;
@@ -2005,19 +2005,19 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
4 ADDRESS_HI Bits [31:8] - Reserved. Bits [7:0] - Upper bits of Address [47:32]
*/
if (pkt->count != 2) {
- DRM_ERROR("bad SET_BASE\n");
+ dev_warn_once(p->dev, "bad SET_BASE\n");
return -EINVAL;
}
/* currently only supporting setting indirect draw buffer base address */
if (idx_value != 1) {
- DRM_ERROR("bad SET_BASE\n");
+ dev_warn_once(p->dev, "bad SET_BASE\n");
return -EINVAL;
}
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad SET_BASE\n");
+ dev_warn_once(p->dev, "bad SET_BASE\n");
return -EINVAL;
}
@@ -2039,54 +2039,54 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
3 DRAW_INITIATOR Draw Initiator Register. Written to the VGT_DRAW_INITIATOR register for the assigned context
*/
if (pkt->count != 1) {
- DRM_ERROR("bad DRAW_INDIRECT\n");
+ dev_warn_once(p->dev, "bad DRAW_INDIRECT\n");
return -EINVAL;
}
if (idx_value + size > track->indirect_draw_buffer_size) {
- dev_warn(p->dev, "DRAW_INDIRECT buffer too small %u + %llu > %lu\n",
- idx_value, size, track->indirect_draw_buffer_size);
+ dev_warn_once(p->dev, "DRAW_INDIRECT buffer too small %u + %llu > %lu\n",
+ idx_value, size, track->indirect_draw_buffer_size);
return -EINVAL;
}
r = evergreen_cs_track_check(p);
if (r) {
- dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ dev_warn_once(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
return r;
}
break;
}
case PACKET3_DISPATCH_DIRECT:
if (pkt->count != 3) {
- DRM_ERROR("bad DISPATCH_DIRECT\n");
+ dev_warn_once(p->dev, "bad DISPATCH_DIRECT\n");
return -EINVAL;
}
r = evergreen_cs_track_check(p);
if (r) {
- dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
+ dev_warn_once(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
return r;
}
break;
case PACKET3_DISPATCH_INDIRECT:
if (pkt->count != 1) {
- DRM_ERROR("bad DISPATCH_INDIRECT\n");
+ dev_warn_once(p->dev, "bad DISPATCH_INDIRECT\n");
return -EINVAL;
}
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad DISPATCH_INDIRECT\n");
+ dev_warn_once(p->dev, "bad DISPATCH_INDIRECT\n");
return -EINVAL;
}
ib[idx+0] = idx_value + (u32)(reloc->gpu_offset & 0xffffffff);
r = evergreen_cs_track_check(p);
if (r) {
- dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ dev_warn_once(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
return r;
}
break;
case PACKET3_WAIT_REG_MEM:
if (pkt->count != 5) {
- DRM_ERROR("bad WAIT_REG_MEM\n");
+ dev_warn_once(p->dev, "bad WAIT_REG_MEM\n");
return -EINVAL;
}
/* bit 4 is reg (0) or mem (1) */
@@ -2095,7 +2095,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad WAIT_REG_MEM\n");
+ dev_warn_once(p->dev, "bad WAIT_REG_MEM\n");
return -EINVAL;
}
@@ -2106,7 +2106,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffffc);
ib[idx+2] = upper_32_bits(offset) & 0xff;
} else if (idx_value & 0x100) {
- DRM_ERROR("cannot use PFP on REG wait\n");
+ dev_warn_once(p->dev, "cannot use PFP on REG wait\n");
return -EINVAL;
}
break;
@@ -2115,7 +2115,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
u32 command, size, info;
u64 offset, tmp;
if (pkt->count != 4) {
- DRM_ERROR("bad CP DMA\n");
+ dev_warn_once(p->dev, "bad CP DMA\n");
return -EINVAL;
}
command = radeon_get_ib_value(p, idx+4);
@@ -2129,7 +2129,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
(command & PACKET3_CP_DMA_CMD_SAS))) { /* src = register */
/* non mem to mem copies requires dw aligned count */
if (size % 4) {
- DRM_ERROR("CP DMA command requires dw count alignment\n");
+ dev_warn_once(p->dev, "CP DMA command requires dw count alignment\n");
return -EINVAL;
}
}
@@ -2137,19 +2137,19 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
/* src address space is register */
/* GDS is ok */
if (((info & 0x60000000) >> 29) != 1) {
- DRM_ERROR("CP DMA SAS not supported\n");
+ dev_warn_once(p->dev, "CP DMA SAS not supported\n");
return -EINVAL;
}
} else {
if (command & PACKET3_CP_DMA_CMD_SAIC) {
- DRM_ERROR("CP DMA SAIC only supported for registers\n");
+ dev_warn_once(p->dev, "CP DMA SAIC only supported for registers\n");
return -EINVAL;
}
/* src address space is memory */
if (((info & 0x60000000) >> 29) == 0) {
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad CP DMA SRC\n");
+ dev_warn_once(p->dev, "bad CP DMA SRC\n");
return -EINVAL;
}
@@ -2159,15 +2159,15 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
offset = reloc->gpu_offset + tmp;
if ((tmp + size) > radeon_bo_size(reloc->robj)) {
- dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n",
- tmp + size, radeon_bo_size(reloc->robj));
+ dev_warn_once(p->dev, "CP DMA src buffer too small (%llu %lu)\n",
+ tmp + size, radeon_bo_size(reloc->robj));
return -EINVAL;
}
ib[idx] = offset;
ib[idx+1] = (ib[idx+1] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
} else if (((info & 0x60000000) >> 29) != 2) {
- DRM_ERROR("bad CP DMA SRC_SEL\n");
+ dev_warn_once(p->dev, "bad CP DMA SRC_SEL\n");
return -EINVAL;
}
}
@@ -2175,19 +2175,19 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
/* dst address space is register */
/* GDS is ok */
if (((info & 0x00300000) >> 20) != 1) {
- DRM_ERROR("CP DMA DAS not supported\n");
+ dev_warn_once(p->dev, "CP DMA DAS not supported\n");
return -EINVAL;
}
} else {
/* dst address space is memory */
if (command & PACKET3_CP_DMA_CMD_DAIC) {
- DRM_ERROR("CP DMA DAIC only supported for registers\n");
+ dev_warn_once(p->dev, "CP DMA DAIC only supported for registers\n");
return -EINVAL;
}
if (((info & 0x00300000) >> 20) == 0) {
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad CP DMA DST\n");
+ dev_warn_once(p->dev, "bad CP DMA DST\n");
return -EINVAL;
}
@@ -2197,15 +2197,15 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
offset = reloc->gpu_offset + tmp;
if ((tmp + size) > radeon_bo_size(reloc->robj)) {
- dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n",
- tmp + size, radeon_bo_size(reloc->robj));
+ dev_warn_once(p->dev, "CP DMA dst buffer too small (%llu %lu)\n",
+ tmp + size, radeon_bo_size(reloc->robj));
return -EINVAL;
}
ib[idx+2] = offset;
ib[idx+3] = upper_32_bits(offset) & 0xff;
} else {
- DRM_ERROR("bad CP DMA DST_SEL\n");
+ dev_warn_once(p->dev, "bad CP DMA DST_SEL\n");
return -EINVAL;
}
}
@@ -2213,13 +2213,13 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
}
case PACKET3_PFP_SYNC_ME:
if (pkt->count) {
- DRM_ERROR("bad PFP_SYNC_ME\n");
+ dev_warn_once(p->dev, "bad PFP_SYNC_ME\n");
return -EINVAL;
}
break;
case PACKET3_SURFACE_SYNC:
if (pkt->count != 3) {
- DRM_ERROR("bad SURFACE_SYNC\n");
+ dev_warn_once(p->dev, "bad SURFACE_SYNC\n");
return -EINVAL;
}
/* 0xffffffff/0x0 is flush all cache flag */
@@ -2227,7 +2227,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
radeon_get_ib_value(p, idx + 2) != 0) {
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad SURFACE_SYNC\n");
+ dev_warn_once(p->dev, "bad SURFACE_SYNC\n");
return -EINVAL;
}
ib[idx+2] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
@@ -2235,7 +2235,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
break;
case PACKET3_EVENT_WRITE:
if (pkt->count != 2 && pkt->count != 0) {
- DRM_ERROR("bad EVENT_WRITE\n");
+ dev_warn_once(p->dev, "bad EVENT_WRITE\n");
return -EINVAL;
}
if (pkt->count) {
@@ -2243,7 +2243,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad EVENT_WRITE\n");
+ dev_warn_once(p->dev, "bad EVENT_WRITE\n");
return -EINVAL;
}
offset = reloc->gpu_offset +
@@ -2259,12 +2259,12 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
uint64_t offset;
if (pkt->count != 4) {
- DRM_ERROR("bad EVENT_WRITE_EOP\n");
+ dev_warn_once(p->dev, "bad EVENT_WRITE_EOP\n");
return -EINVAL;
}
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad EVENT_WRITE_EOP\n");
+ dev_warn_once(p->dev, "bad EVENT_WRITE_EOP\n");
return -EINVAL;
}
@@ -2281,12 +2281,12 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
uint64_t offset;
if (pkt->count != 3) {
- DRM_ERROR("bad EVENT_WRITE_EOS\n");
+ dev_warn_once(p->dev, "bad EVENT_WRITE_EOS\n");
return -EINVAL;
}
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad EVENT_WRITE_EOS\n");
+ dev_warn_once(p->dev, "bad EVENT_WRITE_EOS\n");
return -EINVAL;
}
@@ -2304,7 +2304,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
(start_reg >= PACKET3_SET_CONFIG_REG_END) ||
(end_reg >= PACKET3_SET_CONFIG_REG_END)) {
- DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
+ dev_warn_once(p->dev, "bad PACKET3_SET_CONFIG_REG\n");
return -EINVAL;
}
for (reg = start_reg, idx++; reg <= end_reg; reg += 4, idx++) {
@@ -2321,7 +2321,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
if ((start_reg < PACKET3_SET_CONTEXT_REG_START) ||
(start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
(end_reg >= PACKET3_SET_CONTEXT_REG_END)) {
- DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
+ dev_warn_once(p->dev, "bad PACKET3_SET_CONTEXT_REG\n");
return -EINVAL;
}
for (reg = start_reg, idx++; reg <= end_reg; reg += 4, idx++) {
@@ -2334,7 +2334,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
break;
case PACKET3_SET_RESOURCE:
if (pkt->count % 8) {
- DRM_ERROR("bad SET_RESOURCE\n");
+ dev_warn_once(p->dev, "bad SET_RESOURCE\n");
return -EINVAL;
}
start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_START;
@@ -2342,7 +2342,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
if ((start_reg < PACKET3_SET_RESOURCE_START) ||
(start_reg >= PACKET3_SET_RESOURCE_END) ||
(end_reg >= PACKET3_SET_RESOURCE_END)) {
- DRM_ERROR("bad SET_RESOURCE\n");
+ dev_warn_once(p->dev, "bad SET_RESOURCE\n");
return -EINVAL;
}
for (i = 0; i < (pkt->count / 8); i++) {
@@ -2355,7 +2355,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
/* tex base */
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad SET_RESOURCE (tex)\n");
+ dev_warn_once(p->dev, "bad SET_RESOURCE (tex)\n");
return -EINVAL;
}
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
@@ -2392,7 +2392,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
} else {
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad SET_RESOURCE (tex)\n");
+ dev_warn_once(p->dev, "bad SET_RESOURCE (tex)\n");
return -EINVAL;
}
moffset = (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
@@ -2411,14 +2411,15 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
/* vtx base */
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad SET_RESOURCE (vtx)\n");
+ dev_warn_once(p->dev, "bad SET_RESOURCE (vtx)\n");
return -EINVAL;
}
offset = radeon_get_ib_value(p, idx+1+(i*8)+0);
size = radeon_get_ib_value(p, idx+1+(i*8)+1);
if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
/* force size to size of the buffer */
- dev_warn_ratelimited(p->dev, "vbo resource seems too big for the bo\n");
+ dev_warn_once(p->dev, "vbo resource seems too big (%d) for the bo (%ld)\n",
+ size + offset, radeon_bo_size(reloc->robj));
ib[idx+1+(i*8)+1] = radeon_bo_size(reloc->robj) - offset;
}
@@ -2431,7 +2432,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
case SQ_TEX_VTX_INVALID_TEXTURE:
case SQ_TEX_VTX_INVALID_BUFFER:
default:
- DRM_ERROR("bad SET_RESOURCE\n");
+ dev_warn_once(p->dev, "bad SET_RESOURCE\n");
return -EINVAL;
}
}
@@ -2445,7 +2446,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
if ((start_reg < PACKET3_SET_BOOL_CONST_START) ||
(start_reg >= PACKET3_SET_BOOL_CONST_END) ||
(end_reg >= PACKET3_SET_BOOL_CONST_END)) {
- DRM_ERROR("bad SET_BOOL_CONST\n");
+ dev_warn_once(p->dev, "bad SET_BOOL_CONST\n");
return -EINVAL;
}
break;
@@ -2455,7 +2456,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
if ((start_reg < PACKET3_SET_LOOP_CONST_START) ||
(start_reg >= PACKET3_SET_LOOP_CONST_END) ||
(end_reg >= PACKET3_SET_LOOP_CONST_END)) {
- DRM_ERROR("bad SET_LOOP_CONST\n");
+ dev_warn_once(p->dev, "bad SET_LOOP_CONST\n");
return -EINVAL;
}
break;
@@ -2465,13 +2466,13 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
if ((start_reg < PACKET3_SET_CTL_CONST_START) ||
(start_reg >= PACKET3_SET_CTL_CONST_END) ||
(end_reg >= PACKET3_SET_CTL_CONST_END)) {
- DRM_ERROR("bad SET_CTL_CONST\n");
+ dev_warn_once(p->dev, "bad SET_CTL_CONST\n");
return -EINVAL;
}
break;
case PACKET3_SET_SAMPLER:
if (pkt->count % 3) {
- DRM_ERROR("bad SET_SAMPLER\n");
+ dev_warn_once(p->dev, "bad SET_SAMPLER\n");
return -EINVAL;
}
start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_START;
@@ -2479,13 +2480,13 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
if ((start_reg < PACKET3_SET_SAMPLER_START) ||
(start_reg >= PACKET3_SET_SAMPLER_END) ||
(end_reg >= PACKET3_SET_SAMPLER_END)) {
- DRM_ERROR("bad SET_SAMPLER\n");
+ dev_warn_once(p->dev, "bad SET_SAMPLER\n");
return -EINVAL;
}
break;
case PACKET3_STRMOUT_BUFFER_UPDATE:
if (pkt->count != 4) {
- DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (invalid count)\n");
+ dev_warn_once(p->dev, "bad STRMOUT_BUFFER_UPDATE (invalid count)\n");
return -EINVAL;
}
/* Updating memory at DST_ADDRESS. */
@@ -2493,14 +2494,14 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
u64 offset;
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
+ dev_warn_once(p->dev, "bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
return -EINVAL;
}
offset = radeon_get_ib_value(p, idx+1);
offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
if ((offset + 4) > radeon_bo_size(reloc->robj)) {
- DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%llx, 0x%lx\n",
- offset + 4, radeon_bo_size(reloc->robj));
+ dev_warn_once(p->dev, "bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%llx, 0x%lx\n",
+ offset + 4, radeon_bo_size(reloc->robj));
return -EINVAL;
}
offset += reloc->gpu_offset;
@@ -2512,14 +2513,14 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
u64 offset;
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
+ dev_warn_once(p->dev, "bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
return -EINVAL;
}
offset = radeon_get_ib_value(p, idx+3);
offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
if ((offset + 4) > radeon_bo_size(reloc->robj)) {
- DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%llx, 0x%lx\n",
- offset + 4, radeon_bo_size(reloc->robj));
+ dev_warn_once(p->dev, "bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%llx, 0x%lx\n",
+ offset + 4, radeon_bo_size(reloc->robj));
return -EINVAL;
}
offset += reloc->gpu_offset;
@@ -2532,23 +2533,23 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
u64 offset;
if (pkt->count != 3) {
- DRM_ERROR("bad MEM_WRITE (invalid count)\n");
+ dev_warn_once(p->dev, "bad MEM_WRITE (invalid count)\n");
return -EINVAL;
}
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad MEM_WRITE (missing reloc)\n");
+ dev_warn_once(p->dev, "bad MEM_WRITE (missing reloc)\n");
return -EINVAL;
}
offset = radeon_get_ib_value(p, idx+0);
offset += ((u64)(radeon_get_ib_value(p, idx+1) & 0xff)) << 32UL;
if (offset & 0x7) {
- DRM_ERROR("bad MEM_WRITE (address not qwords aligned)\n");
+ dev_warn_once(p->dev, "bad MEM_WRITE (address not qwords aligned)\n");
return -EINVAL;
}
if ((offset + 8) > radeon_bo_size(reloc->robj)) {
- DRM_ERROR("bad MEM_WRITE bo too small: 0x%llx, 0x%lx\n",
- offset + 8, radeon_bo_size(reloc->robj));
+ dev_warn_once(p->dev, "bad MEM_WRITE bo too small: 0x%llx, 0x%lx\n",
+ offset + 8, radeon_bo_size(reloc->robj));
return -EINVAL;
}
offset += reloc->gpu_offset;
@@ -2558,7 +2559,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
}
case PACKET3_COPY_DW:
if (pkt->count != 4) {
- DRM_ERROR("bad COPY_DW (invalid count)\n");
+ dev_warn_once(p->dev, "bad COPY_DW (invalid count)\n");
return -EINVAL;
}
if (idx_value & 0x1) {
@@ -2566,14 +2567,14 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
/* SRC is memory. */
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad COPY_DW (missing src reloc)\n");
+ dev_warn_once(p->dev, "bad COPY_DW (missing src reloc)\n");
return -EINVAL;
}
offset = radeon_get_ib_value(p, idx+1);
offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
if ((offset + 4) > radeon_bo_size(reloc->robj)) {
- DRM_ERROR("bad COPY_DW src bo too small: 0x%llx, 0x%lx\n",
- offset + 4, radeon_bo_size(reloc->robj));
+ dev_warn_once(p->dev, "bad COPY_DW src bo too small: 0x%llx, 0x%lx\n",
+ offset + 4, radeon_bo_size(reloc->robj));
return -EINVAL;
}
offset += reloc->gpu_offset;
@@ -2583,8 +2584,8 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
/* SRC is a reg. */
reg = radeon_get_ib_value(p, idx+1) << 2;
if (!evergreen_is_safe_reg(p, reg)) {
- dev_warn(p->dev, "forbidden register 0x%08x at %d\n",
- reg, idx + 1);
+ dev_warn_once(p->dev, "forbidden register 0x%08x at %d\n",
+ reg, idx + 1);
return -EINVAL;
}
}
@@ -2593,14 +2594,14 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
/* DST is memory. */
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad COPY_DW (missing dst reloc)\n");
+ dev_warn_once(p->dev, "bad COPY_DW (missing dst reloc)\n");
return -EINVAL;
}
offset = radeon_get_ib_value(p, idx+3);
offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
if ((offset + 4) > radeon_bo_size(reloc->robj)) {
- DRM_ERROR("bad COPY_DW dst bo too small: 0x%llx, 0x%lx\n",
- offset + 4, radeon_bo_size(reloc->robj));
+ dev_warn_once(p->dev, "bad COPY_DW dst bo too small: 0x%llx, 0x%lx\n",
+ offset + 4, radeon_bo_size(reloc->robj));
return -EINVAL;
}
offset += reloc->gpu_offset;
@@ -2610,8 +2611,8 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
/* DST is a reg. */
reg = radeon_get_ib_value(p, idx+3) << 2;
if (!evergreen_is_safe_reg(p, reg)) {
- dev_warn(p->dev, "forbidden register 0x%08x at %d\n",
- reg, idx + 3);
+ dev_warn_once(p->dev, "forbidden register 0x%08x at %d\n",
+ reg, idx + 3);
return -EINVAL;
}
}
@@ -2622,7 +2623,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
uint32_t allowed_reg_base;
uint32_t source_sel;
if (pkt->count != 2) {
- DRM_ERROR("bad SET_APPEND_CNT (invalid count)\n");
+ dev_warn_once(p->dev, "bad SET_APPEND_CNT (invalid count)\n");
return -EINVAL;
}
@@ -2632,8 +2633,8 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
areg = idx_value >> 16;
if (areg < allowed_reg_base || areg > (allowed_reg_base + 11)) {
- dev_warn(p->dev, "forbidden register for append cnt 0x%08x at %d\n",
- areg, idx);
+ dev_warn_once(p->dev, "forbidden register for append cnt 0x%08x at %d\n",
+ areg, idx);
return -EINVAL;
}
@@ -2643,7 +2644,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
uint32_t swap;
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad SET_APPEND_CNT (missing reloc)\n");
+ dev_warn_once(p->dev, "bad SET_APPEND_CNT (missing reloc)\n");
return -EINVAL;
}
offset = radeon_get_ib_value(p, idx + 1);
@@ -2656,7 +2657,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
ib[idx+1] = (offset & 0xfffffffc) | swap;
ib[idx+2] = upper_32_bits(offset) & 0xff;
} else {
- DRM_ERROR("bad SET_APPEND_CNT (unsupported operation)\n");
+ dev_warn_once(p->dev, "bad SET_APPEND_CNT (unsupported operation)\n");
return -EINVAL;
}
break;
@@ -2666,23 +2667,23 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
u64 offset;
if (pkt->count != 2) {
- DRM_ERROR("bad COND_EXEC (invalid count)\n");
+ dev_warn_once(p->dev, "bad COND_EXEC (invalid count)\n");
return -EINVAL;
}
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad COND_EXEC (missing reloc)\n");
+ dev_warn_once(p->dev, "bad COND_EXEC (missing reloc)\n");
return -EINVAL;
}
offset = radeon_get_ib_value(p, idx + 0);
offset += ((u64)(radeon_get_ib_value(p, idx + 1) & 0xff)) << 32UL;
if (offset & 0x7) {
- DRM_ERROR("bad COND_EXEC (address not qwords aligned)\n");
+ dev_warn_once(p->dev, "bad COND_EXEC (address not qwords aligned)\n");
return -EINVAL;
}
if ((offset + 8) > radeon_bo_size(reloc->robj)) {
- DRM_ERROR("bad COND_EXEC bo too small: 0x%llx, 0x%lx\n",
- offset + 8, radeon_bo_size(reloc->robj));
+ dev_warn_once(p->dev, "bad COND_EXEC bo too small: 0x%llx, 0x%lx\n",
+ offset + 8, radeon_bo_size(reloc->robj));
return -EINVAL;
}
offset += reloc->gpu_offset;
@@ -2692,7 +2693,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
}
case PACKET3_COND_WRITE:
if (pkt->count != 7) {
- DRM_ERROR("bad COND_WRITE (invalid count)\n");
+ dev_warn_once(p->dev, "bad COND_WRITE (invalid count)\n");
return -EINVAL;
}
if (idx_value & 0x10) {
@@ -2700,14 +2701,14 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
/* POLL is memory. */
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad COND_WRITE (missing src reloc)\n");
+ dev_warn_once(p->dev, "bad COND_WRITE (missing src reloc)\n");
return -EINVAL;
}
offset = radeon_get_ib_value(p, idx + 1);
offset += ((u64)(radeon_get_ib_value(p, idx + 2) & 0xff)) << 32;
if ((offset + 8) > radeon_bo_size(reloc->robj)) {
- DRM_ERROR("bad COND_WRITE src bo too small: 0x%llx, 0x%lx\n",
- offset + 8, radeon_bo_size(reloc->robj));
+ dev_warn_once(p->dev, "bad COND_WRITE src bo too small: 0x%llx, 0x%lx\n",
+ offset + 8, radeon_bo_size(reloc->robj));
return -EINVAL;
}
offset += reloc->gpu_offset;
@@ -2717,8 +2718,8 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
/* POLL is a reg. */
reg = radeon_get_ib_value(p, idx + 1) << 2;
if (!evergreen_is_safe_reg(p, reg)) {
- dev_warn(p->dev, "forbidden register 0x%08x at %d\n",
- reg, idx + 1);
+ dev_warn_once(p->dev, "forbidden register 0x%08x at %d\n",
+ reg, idx + 1);
return -EINVAL;
}
}
@@ -2727,14 +2728,14 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
/* WRITE is memory. */
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad COND_WRITE (missing dst reloc)\n");
+ dev_warn_once(p->dev, "bad COND_WRITE (missing dst reloc)\n");
return -EINVAL;
}
offset = radeon_get_ib_value(p, idx + 5);
offset += ((u64)(radeon_get_ib_value(p, idx + 6) & 0xff)) << 32;
if ((offset + 8) > radeon_bo_size(reloc->robj)) {
- DRM_ERROR("bad COND_WRITE dst bo too small: 0x%llx, 0x%lx\n",
- offset + 8, radeon_bo_size(reloc->robj));
+ dev_warn_once(p->dev, "bad COND_WRITE dst bo too small: 0x%llx, 0x%lx\n",
+ offset + 8, radeon_bo_size(reloc->robj));
return -EINVAL;
}
offset += reloc->gpu_offset;
@@ -2744,8 +2745,8 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
/* WRITE is a reg. */
reg = radeon_get_ib_value(p, idx + 5) << 2;
if (!evergreen_is_safe_reg(p, reg)) {
- dev_warn(p->dev, "forbidden register 0x%08x at %d\n",
- reg, idx + 5);
+ dev_warn_once(p->dev, "forbidden register 0x%08x at %d\n",
+ reg, idx + 5);
return -EINVAL;
}
}
@@ -2753,7 +2754,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
case PACKET3_NOP:
break;
default:
- DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
+ dev_warn_once(p->dev, "Packet3 opcode %x not supported\n", pkt->opcode);
return -EINVAL;
}
return 0;
@@ -2853,7 +2854,7 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
r = evergreen_packet3_check(p, &pkt);
break;
default:
- DRM_ERROR("Unknown packet type %d !\n", pkt.type);
+ dev_warn_once(p->dev, "Unknown packet type %d !\n", pkt.type);
kfree(p->track);
p->track = NULL;
return -EINVAL;
@@ -2896,8 +2897,8 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
do {
if (p->idx >= ib_chunk->length_dw) {
- DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
- p->idx, ib_chunk->length_dw);
+ dev_warn_once(p->dev, "Can not parse packet at %d after CS end %d !\n",
+ p->idx, ib_chunk->length_dw);
return -EINVAL;
}
idx = p->idx;
@@ -2910,7 +2911,7 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
case DMA_PACKET_WRITE:
r = r600_dma_cs_next_reloc(p, &dst_reloc);
if (r) {
- DRM_ERROR("bad DMA_PACKET_WRITE\n");
+ dev_warn_once(p->dev, "bad DMA_PACKET_WRITE\n");
return -EINVAL;
}
switch (sub_cmd) {
@@ -2932,24 +2933,24 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
p->idx += count + 3;
break;
default:
- DRM_ERROR("bad DMA_PACKET_WRITE [%6d] 0x%08x sub cmd is not 0 or 8\n", idx, header);
+ dev_warn_once(p->dev, "bad DMA_PACKET_WRITE [%6d] 0x%08x sub cmd is not 0 or 8\n", idx, header);
return -EINVAL;
}
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA write buffer too small (%llu %lu)\n",
- dst_offset, radeon_bo_size(dst_reloc->robj));
+ dev_warn_once(p->dev, "DMA write buffer too small (%llu %lu)\n",
+ dst_offset, radeon_bo_size(dst_reloc->robj));
return -EINVAL;
}
break;
case DMA_PACKET_COPY:
r = r600_dma_cs_next_reloc(p, &src_reloc);
if (r) {
- DRM_ERROR("bad DMA_PACKET_COPY\n");
+ dev_warn_once(p->dev, "bad DMA_PACKET_COPY\n");
return -EINVAL;
}
r = r600_dma_cs_next_reloc(p, &dst_reloc);
if (r) {
- DRM_ERROR("bad DMA_PACKET_COPY\n");
+ dev_warn_once(p->dev, "bad DMA_PACKET_COPY\n");
return -EINVAL;
}
switch (sub_cmd) {
@@ -2961,13 +2962,13 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n",
- src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ dev_warn_once(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
return -EINVAL;
}
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2L, dw dst buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ dev_warn_once(p->dev, "DMA L2L, dw dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
return -EINVAL;
}
ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
@@ -3001,13 +3002,13 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
}
if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, src buffer too small (%llu %lu)\n",
- src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ dev_warn_once(p->dev, "DMA L2T, src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
return -EINVAL;
}
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, dst buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ dev_warn_once(p->dev, "DMA L2T, dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
return -EINVAL;
}
p->idx += 9;
@@ -3020,13 +3021,13 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
if ((src_offset + count) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n",
- src_offset + count, radeon_bo_size(src_reloc->robj));
+ dev_warn_once(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n",
+ src_offset + count, radeon_bo_size(src_reloc->robj));
return -EINVAL;
}
if ((dst_offset + count) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2L, byte dst buffer too small (%llu %lu)\n",
- dst_offset + count, radeon_bo_size(dst_reloc->robj));
+ dev_warn_once(p->dev, "DMA L2L, byte dst buffer too small (%llu %lu)\n",
+ dst_offset + count, radeon_bo_size(dst_reloc->robj));
return -EINVAL;
}
ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xffffffff);
@@ -3039,7 +3040,7 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
case 0x41:
/* L2L, partial */
if (p->family < CHIP_CAYMAN) {
- DRM_ERROR("L2L Partial is cayman only !\n");
+ dev_warn_once(p->dev, "L2L Partial is cayman only !\n");
return -EINVAL;
}
ib[idx+1] += (u32)(src_reloc->gpu_offset & 0xffffffff);
@@ -3054,7 +3055,7 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
/* L2L, dw, broadcast */
r = r600_dma_cs_next_reloc(p, &dst2_reloc);
if (r) {
- DRM_ERROR("bad L2L, dw, broadcast DMA_PACKET_COPY\n");
+ dev_warn_once(p->dev, "bad L2L, dw, broadcast DMA_PACKET_COPY\n");
return -EINVAL;
}
dst_offset = radeon_get_ib_value(p, idx+1);
@@ -3064,18 +3065,18 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
src_offset = radeon_get_ib_value(p, idx+3);
src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n",
- src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ dev_warn_once(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
return -EINVAL;
}
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2L, dw, broadcast dst buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ dev_warn_once(p->dev, "DMA L2L, dw, broadcast dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
return -EINVAL;
}
if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
- dev_warn(p->dev, "DMA L2L, dw, broadcast dst2 buffer too small (%llu %lu)\n",
- dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+ dev_warn_once(p->dev, "DMA L2L, dw, broadcast dst2 buffer too small (%llu %lu)\n",
+ dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
return -EINVAL;
}
ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
@@ -3089,12 +3090,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
/* Copy L2T Frame to Field */
case 0x48:
if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
- DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
+ dev_warn_once(p->dev, "bad L2T, frame to fields DMA_PACKET_COPY\n");
return -EINVAL;
}
r = r600_dma_cs_next_reloc(p, &dst2_reloc);
if (r) {
- DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
+ dev_warn_once(p->dev, "bad L2T, frame to fields DMA_PACKET_COPY\n");
return -EINVAL;
}
dst_offset = radeon_get_ib_value(p, idx+1);
@@ -3104,18 +3105,18 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
src_offset = radeon_get_ib_value(p, idx+8);
src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n",
- src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ dev_warn_once(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
return -EINVAL;
}
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ dev_warn_once(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
return -EINVAL;
}
if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
- dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+ dev_warn_once(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
+ dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
return -EINVAL;
}
ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
@@ -3128,7 +3129,7 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
case 0x49:
/* L2T, T2L partial */
if (p->family < CHIP_CAYMAN) {
- DRM_ERROR("L2T, T2L Partial is cayman only !\n");
+ dev_warn_once(p->dev, "L2T, T2L Partial is cayman only !\n");
return -EINVAL;
}
/* detile bit */
@@ -3151,12 +3152,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
case 0x4b:
/* L2T, broadcast */
if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
- DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+ dev_warn_once(p->dev, "bad L2T, broadcast DMA_PACKET_COPY\n");
return -EINVAL;
}
r = r600_dma_cs_next_reloc(p, &dst2_reloc);
if (r) {
- DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+ dev_warn_once(p->dev, "bad L2T, broadcast DMA_PACKET_COPY\n");
return -EINVAL;
}
dst_offset = radeon_get_ib_value(p, idx+1);
@@ -3166,18 +3167,18 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
src_offset = radeon_get_ib_value(p, idx+8);
src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
- src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ dev_warn_once(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
return -EINVAL;
}
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ dev_warn_once(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
return -EINVAL;
}
if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
- dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+ dev_warn_once(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
+ dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
return -EINVAL;
}
ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
@@ -3212,13 +3213,13 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
}
if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, T2L src buffer too small (%llu %lu)\n",
- src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ dev_warn_once(p->dev, "DMA L2T, T2L src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
return -EINVAL;
}
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, T2L dst buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ dev_warn_once(p->dev, "DMA L2T, T2L dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
return -EINVAL;
}
p->idx += 9;
@@ -3227,7 +3228,7 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
case 0x4d:
/* T2T partial */
if (p->family < CHIP_CAYMAN) {
- DRM_ERROR("L2T, T2L Partial is cayman only !\n");
+ dev_warn_once(p->dev, "L2T, T2L Partial is cayman only !\n");
return -EINVAL;
}
ib[idx+1] += (u32)(src_reloc->gpu_offset >> 8);
@@ -3238,12 +3239,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
case 0x4f:
/* L2T, broadcast */
if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
- DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+ dev_warn_once(p->dev, "bad L2T, broadcast DMA_PACKET_COPY\n");
return -EINVAL;
}
r = r600_dma_cs_next_reloc(p, &dst2_reloc);
if (r) {
- DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+ dev_warn_once(p->dev, "bad L2T, broadcast DMA_PACKET_COPY\n");
return -EINVAL;
}
dst_offset = radeon_get_ib_value(p, idx+1);
@@ -3253,18 +3254,18 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
src_offset = radeon_get_ib_value(p, idx+8);
src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
- src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ dev_warn_once(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
return -EINVAL;
}
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ dev_warn_once(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
return -EINVAL;
}
if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
- dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+ dev_warn_once(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
+ dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
return -EINVAL;
}
ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
@@ -3274,21 +3275,21 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
p->idx += 10;
break;
default:
- DRM_ERROR("bad DMA_PACKET_COPY [%6d] 0x%08x invalid sub cmd\n", idx, header);
+ dev_warn_once(p->dev, "bad DMA_PACKET_COPY [%6d] 0x%08x invalid sub cmd\n", idx, header);
return -EINVAL;
}
break;
case DMA_PACKET_CONSTANT_FILL:
r = r600_dma_cs_next_reloc(p, &dst_reloc);
if (r) {
- DRM_ERROR("bad DMA_PACKET_CONSTANT_FILL\n");
+ dev_warn_once(p->dev, "bad DMA_PACKET_CONSTANT_FILL\n");
return -EINVAL;
}
dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16;
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
- dst_offset, radeon_bo_size(dst_reloc->robj));
+ dev_warn_once(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
+ dst_offset, radeon_bo_size(dst_reloc->robj));
return -EINVAL;
}
ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
@@ -3299,7 +3300,7 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
p->idx += 1;
break;
default:
- DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
+ dev_warn_once(p->dev, "Unknown packet type %d at %d !\n", cmd, idx);
return -EINVAL;
}
} while (p->idx < p->chunk_ib->length_dw);
@@ -3430,7 +3431,7 @@ static bool evergreen_vm_reg_valid(u32 reg)
case CAYMAN_SQ_EX_ALLOC_TABLE_SLOTS:
return true;
default:
- DRM_ERROR("Invalid register 0x%x in CS\n", reg);
+ DRM_DEBUG("Invalid register 0x%x in CS\n", reg);
return false;
}
}
@@ -3448,7 +3449,7 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev,
break;
case PACKET3_SET_BASE:
if (idx_value != 1) {
- DRM_ERROR("bad SET_BASE");
+ dev_warn_once(rdev->dev, "bad SET_BASE");
return -EINVAL;
}
break;
@@ -3519,7 +3520,7 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev,
if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
(start_reg >= PACKET3_SET_CONFIG_REG_END) ||
(end_reg >= PACKET3_SET_CONFIG_REG_END)) {
- DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
+ dev_warn_once(rdev->dev, "bad PACKET3_SET_CONFIG_REG\n");
return -EINVAL;
}
for (i = 0; i < pkt->count; i++) {
@@ -3539,7 +3540,7 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev,
(command & PACKET3_CP_DMA_CMD_SAS))) { /* src = register */
/* non mem to mem copies requires dw aligned count */
if ((command & 0x1fffff) % 4) {
- DRM_ERROR("CP DMA command requires dw count alignment\n");
+ dev_warn_once(rdev->dev, "CP DMA command requires dw count alignment\n");
return -EINVAL;
}
}
@@ -3550,14 +3551,14 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev,
if (command & PACKET3_CP_DMA_CMD_SAIC) {
reg = start_reg;
if (!evergreen_vm_reg_valid(reg)) {
- DRM_ERROR("CP DMA Bad SRC register\n");
+ dev_warn_once(rdev->dev, "CP DMA Bad SRC register\n");
return -EINVAL;
}
} else {
for (i = 0; i < (command & 0x1fffff); i++) {
reg = start_reg + (4 * i);
if (!evergreen_vm_reg_valid(reg)) {
- DRM_ERROR("CP DMA Bad SRC register\n");
+ dev_warn_once(rdev->dev, "CP DMA Bad SRC register\n");
return -EINVAL;
}
}
@@ -3571,14 +3572,14 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev,
if (command & PACKET3_CP_DMA_CMD_DAIC) {
reg = start_reg;
if (!evergreen_vm_reg_valid(reg)) {
- DRM_ERROR("CP DMA Bad DST register\n");
+ dev_warn_once(rdev->dev, "CP DMA Bad DST register\n");
return -EINVAL;
}
} else {
for (i = 0; i < (command & 0x1fffff); i++) {
reg = start_reg + (4 * i);
if (!evergreen_vm_reg_valid(reg)) {
- DRM_ERROR("CP DMA Bad DST register\n");
+ dev_warn_once(rdev->dev, "CP DMA Bad DST register\n");
return -EINVAL;
}
}
@@ -3591,7 +3592,7 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev,
uint32_t allowed_reg_base;
if (pkt->count != 2) {
- DRM_ERROR("bad SET_APPEND_CNT (invalid count)\n");
+ dev_warn_once(rdev->dev, "bad SET_APPEND_CNT (invalid count)\n");
return -EINVAL;
}
@@ -3601,8 +3602,8 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev,
areg = idx_value >> 16;
if (areg < allowed_reg_base || areg > (allowed_reg_base + 11)) {
- DRM_ERROR("forbidden register for append cnt 0x%08x at %d\n",
- areg, idx);
+ dev_warn_once(rdev->dev, "forbidden register for append cnt 0x%08x at %d\n",
+ areg, idx);
return -EINVAL;
}
break;
@@ -3681,7 +3682,9 @@ int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
idx += count + 3;
break;
default:
- DRM_ERROR("bad DMA_PACKET_WRITE [%6d] 0x%08x sub cmd is not 0 or 8\n", idx, ib->ptr[idx]);
+ dev_warn_once(rdev->dev,
+ "bad DMA_PACKET_WRITE [%6d] 0x%08x sub cmd is not 0 or 8\n",
+ idx, ib->ptr[idx]);
return -EINVAL;
}
break;
@@ -3732,7 +3735,9 @@ int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
idx += 10;
break;
default:
- DRM_ERROR("bad DMA_PACKET_COPY [%6d] 0x%08x invalid sub cmd\n", idx, ib->ptr[idx]);
+ dev_warn_once(rdev->dev,
+ "bad DMA_PACKET_COPY [%6d] 0x%08x invalid sub cmd\n",
+ idx, ib->ptr[idx]);
return -EINVAL;
}
break;
@@ -3743,7 +3748,7 @@ int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
idx += 1;
break;
default:
- DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
+ dev_warn_once(rdev->dev, "Unknown packet type %d at %d !\n", cmd, idx);
return -EINVAL;
}
} while (idx < ib->length_dw);
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index e08559c44a5c..82edbfb259bf 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -3397,7 +3397,7 @@ static int ni_enable_smc_cac(struct radeon_device *rdev,
if (PPSMC_Result_OK != smc_result)
ret = -EINVAL;
- ni_pi->cac_enabled = (PPSMC_Result_OK == smc_result) ? true : false;
+ ni_pi->cac_enabled = PPSMC_Result_OK == smc_result;
}
} else if (ni_pi->cac_enabled) {
smc_result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_DisableCac);
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 80703417d8a1..07a9c523a17a 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -1298,8 +1298,8 @@ int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -1313,7 +1313,7 @@ int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
tile_flags |= RADEON_DST_TILE_MACRO;
if (reloc->tiling_flags & RADEON_TILING_MICRO) {
if (reg == RADEON_SRC_PITCH_OFFSET) {
- DRM_ERROR("Cannot src blit from microtiled surface\n");
+ dev_warn_once(p->dev, "Cannot src blit from microtiled surface\n");
radeon_cs_dump_packet(p, pkt);
return -EINVAL;
}
@@ -1342,8 +1342,8 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
track = (struct r100_cs_track *)p->track;
c = radeon_get_ib_value(p, idx++) & 0x1F;
if (c > 16) {
- DRM_ERROR("Only 16 vertex buffers are allowed %d\n",
- pkt->opcode);
+ dev_warn_once(p->dev, "Only 16 vertex buffers are allowed %d\n",
+ pkt->opcode);
radeon_cs_dump_packet(p, pkt);
return -EINVAL;
}
@@ -1351,8 +1351,8 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
for (i = 0; i < (c - 1); i += 2, idx += 3) {
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for packet3 %d\n",
- pkt->opcode);
+ dev_warn_once(p->dev, "No reloc for packet3 %d\n",
+ pkt->opcode);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -1364,8 +1364,8 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
track->arrays[i + 0].esize &= 0x7F;
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for packet3 %d\n",
- pkt->opcode);
+ dev_warn_once(p->dev, "No reloc for packet3 %d\n",
+ pkt->opcode);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -1377,8 +1377,8 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
if (c & 1) {
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for packet3 %d\n",
- pkt->opcode);
+ dev_warn_once(p->dev, "No reloc for packet3 %d\n",
+ pkt->opcode);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -1470,12 +1470,12 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
/* check its a wait until and only 1 count */
if (waitreloc.reg != RADEON_WAIT_UNTIL ||
waitreloc.count != 0) {
- DRM_ERROR("vline wait had illegal wait until segment\n");
+ dev_warn_once(p->dev, "vline wait had illegal wait until segment\n");
return -EINVAL;
}
if (radeon_get_ib_value(p, waitreloc.idx + 1) != RADEON_WAIT_CRTC_VLINE) {
- DRM_ERROR("vline wait had illegal wait until\n");
+ dev_warn_once(p->dev, "vline wait had illegal wait until\n");
return -EINVAL;
}
@@ -1493,7 +1493,7 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
reg = R100_CP_PACKET0_GET_REG(header);
crtc = drm_crtc_find(rdev_to_drm(p->rdev), p->filp, crtc_id);
if (!crtc) {
- DRM_ERROR("cannot find crtc %d\n", crtc_id);
+ dev_warn_once(p->dev, "cannot find crtc %d\n", crtc_id);
return -ENOENT;
}
radeon_crtc = to_radeon_crtc(crtc);
@@ -1514,7 +1514,7 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
header |= RADEON_CRTC2_GUI_TRIG_VLINE >> 2;
break;
default:
- DRM_ERROR("unknown crtc reloc\n");
+ dev_warn_once(p->dev, "unknown crtc reloc\n");
return -EINVAL;
}
ib[h_idx] = header;
@@ -1599,7 +1599,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
case RADEON_CRTC_GUI_TRIG_VLINE:
r = r100_cs_packet_parse_vline(p);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
@@ -1616,8 +1616,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
case RADEON_RB3D_DEPTHOFFSET:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -1629,8 +1629,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
case RADEON_RB3D_COLOROFFSET:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -1645,8 +1645,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
i = (reg - RADEON_PP_TXOFFSET_0) / 24;
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -1672,8 +1672,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
i = (reg - RADEON_PP_CUBIC_OFFSET_T0_0) / 4;
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -1690,8 +1690,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
i = (reg - RADEON_PP_CUBIC_OFFSET_T1_0) / 4;
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -1708,8 +1708,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
i = (reg - RADEON_PP_CUBIC_OFFSET_T2_0) / 4;
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -1726,8 +1726,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
case RADEON_RB3D_COLORPITCH:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -1768,8 +1768,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
track->cb[0].cpp = 4;
break;
default:
- DRM_ERROR("Invalid color buffer format (%d) !\n",
- ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f));
+ dev_warn_once(p->dev, "Invalid color buffer format (%d) !\n",
+ ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f));
return -EINVAL;
}
track->z_enabled = !!(idx_value & RADEON_Z_ENABLE);
@@ -1797,8 +1797,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
case RADEON_RB3D_ZPASS_ADDR:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -1927,10 +1927,10 @@ int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
idx = pkt->idx + 1;
value = radeon_get_ib_value(p, idx + 2);
if ((value + 1) > radeon_bo_size(robj)) {
- DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER "
- "(need %u have %lu) !\n",
- value + 1,
- radeon_bo_size(robj));
+ dev_warn_once(p->dev, "[drm] Buffer too small for PACKET3 INDX_BUFFER "
+ "(need %u have %lu) !\n",
+ value + 1,
+ radeon_bo_size(robj));
return -EINVAL;
}
return 0;
@@ -1957,7 +1957,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
case PACKET3_INDX_BUFFER:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
+ dev_warn_once(p->dev, "No reloc for packet3 %d\n", pkt->opcode);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -1971,7 +1971,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
/* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
+ dev_warn_once(p->dev, "No reloc for packet3 %d\n", pkt->opcode);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -1992,7 +1992,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
break;
case PACKET3_3D_DRAW_IMMD:
if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) {
- DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
+ dev_warn_once(p->dev, "PRIM_WALK must be 3 for IMMD draw\n");
return -EINVAL;
}
track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 0));
@@ -2005,7 +2005,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
/* triggers drawing using in-packet vertex data */
case PACKET3_3D_DRAW_IMMD_2:
if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) {
- DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
+ dev_warn_once(p->dev, "PRIM_WALK must be 3 for IMMD draw\n");
return -EINVAL;
}
track->vap_vf_cntl = radeon_get_ib_value(p, idx);
@@ -2051,7 +2051,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
case PACKET3_NOP:
break;
default:
- DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
+ dev_warn_once(p->dev, "Packet3 opcode %x not supported\n", pkt->opcode);
return -EINVAL;
}
return 0;
@@ -2093,8 +2093,8 @@ int r100_cs_parse(struct radeon_cs_parser *p)
r = r100_packet3_check(p, &pkt);
break;
default:
- DRM_ERROR("Unknown packet type %d !\n",
- pkt.type);
+ dev_warn_once(p->dev, "Unknown packet type %d !\n",
+ pkt.type);
return -EINVAL;
}
if (r)
@@ -2105,19 +2105,19 @@ int r100_cs_parse(struct radeon_cs_parser *p)
static void r100_cs_track_texture_print(struct r100_cs_track_texture *t)
{
- DRM_ERROR("pitch %d\n", t->pitch);
- DRM_ERROR("use_pitch %d\n", t->use_pitch);
- DRM_ERROR("width %d\n", t->width);
- DRM_ERROR("width_11 %d\n", t->width_11);
- DRM_ERROR("height %d\n", t->height);
- DRM_ERROR("height_11 %d\n", t->height_11);
- DRM_ERROR("num levels %d\n", t->num_levels);
- DRM_ERROR("depth %d\n", t->txdepth);
- DRM_ERROR("bpp %d\n", t->cpp);
- DRM_ERROR("coordinate type %d\n", t->tex_coord_type);
- DRM_ERROR("width round to power of 2 %d\n", t->roundup_w);
- DRM_ERROR("height round to power of 2 %d\n", t->roundup_h);
- DRM_ERROR("compress format %d\n", t->compress_format);
+ DRM_DEBUG("pitch %d\n", t->pitch);
+ DRM_DEBUG("use_pitch %d\n", t->use_pitch);
+ DRM_DEBUG("width %d\n", t->width);
+ DRM_DEBUG("width_11 %d\n", t->width_11);
+ DRM_DEBUG("height %d\n", t->height);
+ DRM_DEBUG("height_11 %d\n", t->height_11);
+ DRM_DEBUG("num levels %d\n", t->num_levels);
+ DRM_DEBUG("depth %d\n", t->txdepth);
+ DRM_DEBUG("bpp %d\n", t->cpp);
+ DRM_DEBUG("coordinate type %d\n", t->tex_coord_type);
+ DRM_DEBUG("width round to power of 2 %d\n", t->roundup_w);
+ DRM_DEBUG("height round to power of 2 %d\n", t->roundup_h);
+ DRM_DEBUG("compress format %d\n", t->compress_format);
}
static int r100_track_compress_size(int compress_format, int w, int h)
@@ -2172,8 +2172,9 @@ static int r100_cs_track_cube(struct radeon_device *rdev,
size += track->textures[idx].cube_info[face].offset;
if (size > radeon_bo_size(cube_robj)) {
- DRM_ERROR("Cube texture offset greater than object size %lu %lu\n",
- size, radeon_bo_size(cube_robj));
+ dev_warn_once(rdev->dev,
+ "Cube texture offset greater than object size %lu %lu\n",
+ size, radeon_bo_size(cube_robj));
r100_cs_track_texture_print(&track->textures[idx]);
return -1;
}
@@ -2196,7 +2197,7 @@ static int r100_cs_track_texture_check(struct radeon_device *rdev,
continue;
robj = track->textures[u].robj;
if (robj == NULL) {
- DRM_ERROR("No texture bound to unit %u\n", u);
+ dev_warn_once(rdev->dev, "No texture bound to unit %u\n", u);
return -EINVAL;
}
size = 0;
@@ -2249,13 +2250,13 @@ static int r100_cs_track_texture_check(struct radeon_device *rdev,
size *= 6;
break;
default:
- DRM_ERROR("Invalid texture coordinate type %u for unit "
- "%u\n", track->textures[u].tex_coord_type, u);
+ dev_warn_once(rdev->dev, "Invalid texture coordinate type %u for unit "
+ "%u\n", track->textures[u].tex_coord_type, u);
return -EINVAL;
}
if (size > radeon_bo_size(robj)) {
- DRM_ERROR("Texture of unit %u needs %lu bytes but is "
- "%lu\n", u, size, radeon_bo_size(robj));
+ dev_warn_once(rdev->dev, "Texture of unit %u needs %lu bytes but is "
+ "%lu\n", u, size, radeon_bo_size(robj));
r100_cs_track_texture_print(&track->textures[u]);
return -EINVAL;
}
@@ -2277,18 +2278,18 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
for (i = 0; i < num_cb; i++) {
if (track->cb[i].robj == NULL) {
- DRM_ERROR("[drm] No buffer for color buffer %d !\n", i);
+ dev_warn_once(rdev->dev, "[drm] No buffer for color buffer %d !\n", i);
return -EINVAL;
}
size = track->cb[i].pitch * track->cb[i].cpp * track->maxy;
size += track->cb[i].offset;
if (size > radeon_bo_size(track->cb[i].robj)) {
- DRM_ERROR("[drm] Buffer too small for color buffer %d "
- "(need %lu have %lu) !\n", i, size,
- radeon_bo_size(track->cb[i].robj));
- DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n",
- i, track->cb[i].pitch, track->cb[i].cpp,
- track->cb[i].offset, track->maxy);
+ dev_warn_once(rdev->dev, "[drm] Buffer too small for color buffer %d "
+ "(need %lu have %lu) !\n", i, size,
+ radeon_bo_size(track->cb[i].robj));
+ dev_warn_once(rdev->dev, "[drm] color buffer %d (%u %u %u %u)\n",
+ i, track->cb[i].pitch, track->cb[i].cpp,
+ track->cb[i].offset, track->maxy);
return -EINVAL;
}
}
@@ -2296,18 +2297,18 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
if (track->zb_dirty && track->z_enabled) {
if (track->zb.robj == NULL) {
- DRM_ERROR("[drm] No buffer for z buffer !\n");
+ dev_warn_once(rdev->dev, "[drm] No buffer for z buffer !\n");
return -EINVAL;
}
size = track->zb.pitch * track->zb.cpp * track->maxy;
size += track->zb.offset;
if (size > radeon_bo_size(track->zb.robj)) {
- DRM_ERROR("[drm] Buffer too small for z buffer "
- "(need %lu have %lu) !\n", size,
- radeon_bo_size(track->zb.robj));
- DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n",
- track->zb.pitch, track->zb.cpp,
- track->zb.offset, track->maxy);
+ dev_warn_once(rdev->dev, "[drm] Buffer too small for z buffer "
+ "(need %lu have %lu) !\n", size,
+ radeon_bo_size(track->zb.robj));
+ dev_warn_once(rdev->dev, "[drm] zbuffer (%u %u %u %u)\n",
+ track->zb.pitch, track->zb.cpp,
+ track->zb.offset, track->maxy);
return -EINVAL;
}
}
@@ -2315,19 +2316,19 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
if (track->aa_dirty && track->aaresolve) {
if (track->aa.robj == NULL) {
- DRM_ERROR("[drm] No buffer for AA resolve buffer %d !\n", i);
+ dev_warn_once(rdev->dev, "[drm] No buffer for AA resolve buffer %d !\n", i);
return -EINVAL;
}
/* I believe the format comes from colorbuffer0. */
size = track->aa.pitch * track->cb[0].cpp * track->maxy;
size += track->aa.offset;
if (size > radeon_bo_size(track->aa.robj)) {
- DRM_ERROR("[drm] Buffer too small for AA resolve buffer %d "
- "(need %lu have %lu) !\n", i, size,
- radeon_bo_size(track->aa.robj));
- DRM_ERROR("[drm] AA resolve buffer %d (%u %u %u %u)\n",
- i, track->aa.pitch, track->cb[0].cpp,
- track->aa.offset, track->maxy);
+ dev_warn_once(rdev->dev, "[drm] Buffer too small for AA resolve buffer %d "
+ "(need %lu have %lu) !\n", i, size,
+ radeon_bo_size(track->aa.robj));
+ dev_warn_once(rdev->dev, "[drm] AA resolve buffer %d (%u %u %u %u)\n",
+ i, track->aa.pitch, track->cb[0].cpp,
+ track->aa.offset, track->maxy);
return -EINVAL;
}
}
@@ -2344,17 +2345,17 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
for (i = 0; i < track->num_arrays; i++) {
size = track->arrays[i].esize * track->max_indx * 4UL;
if (track->arrays[i].robj == NULL) {
- DRM_ERROR("(PW %u) Vertex array %u no buffer "
- "bound\n", prim_walk, i);
+ dev_warn_once(rdev->dev, "(PW %u) Vertex array %u no buffer "
+ "bound\n", prim_walk, i);
return -EINVAL;
}
if (size > radeon_bo_size(track->arrays[i].robj)) {
- dev_err(rdev->dev, "(PW %u) Vertex array %u "
- "need %lu dwords have %lu dwords\n",
- prim_walk, i, size >> 2,
- radeon_bo_size(track->arrays[i].robj)
- >> 2);
- DRM_ERROR("Max indices %u\n", track->max_indx);
+ dev_warn_once(rdev->dev, "(PW %u) Vertex array %u "
+ "need %lu dwords have %lu dwords\n",
+ prim_walk, i, size >> 2,
+ radeon_bo_size(track->arrays[i].robj)
+ >> 2);
+ dev_warn_once(rdev->dev, "Max indices %u\n", track->max_indx);
return -EINVAL;
}
}
@@ -2363,16 +2364,16 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
for (i = 0; i < track->num_arrays; i++) {
size = track->arrays[i].esize * (nverts - 1) * 4UL;
if (track->arrays[i].robj == NULL) {
- DRM_ERROR("(PW %u) Vertex array %u no buffer "
- "bound\n", prim_walk, i);
+ dev_warn_once(rdev->dev, "(PW %u) Vertex array %u no buffer "
+ "bound\n", prim_walk, i);
return -EINVAL;
}
if (size > radeon_bo_size(track->arrays[i].robj)) {
- dev_err(rdev->dev, "(PW %u) Vertex array %u "
- "need %lu dwords have %lu dwords\n",
- prim_walk, i, size >> 2,
- radeon_bo_size(track->arrays[i].robj)
- >> 2);
+ dev_warn_once(rdev->dev, "(PW %u) Vertex array %u "
+ "need %lu dwords have %lu dwords\n",
+ prim_walk, i, size >> 2,
+ radeon_bo_size(track->arrays[i].robj)
+ >> 2);
return -EINVAL;
}
}
@@ -2380,16 +2381,16 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
case 3:
size = track->vtx_size * nverts;
if (size != track->immd_dwords) {
- DRM_ERROR("IMMD draw %u dwors but needs %lu dwords\n",
- track->immd_dwords, size);
- DRM_ERROR("VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n",
- nverts, track->vtx_size);
+ dev_warn_once(rdev->dev, "IMMD draw %u dwors but needs %lu dwords\n",
+ track->immd_dwords, size);
+ dev_warn_once(rdev->dev, "VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n",
+ nverts, track->vtx_size);
return -EINVAL;
}
break;
default:
- DRM_ERROR("[drm] Invalid primitive walk %d for VAP_VF_CNTL\n",
- prim_walk);
+ dev_warn_once(rdev->dev, "[drm] Invalid primitive walk %d for VAP_VF_CNTL\n",
+ prim_walk);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index f5f2ffea5ab2..10a65a71de31 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -163,8 +163,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
case RADEON_CRTC_GUI_TRIG_VLINE:
r = r100_cs_packet_parse_vline(p);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -180,8 +180,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
case RADEON_RB3D_DEPTHOFFSET:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -193,8 +193,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
case RADEON_RB3D_COLOROFFSET:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -212,8 +212,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
i = (reg - R200_PP_TXOFFSET_0) / 24;
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -265,8 +265,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
face = (reg - ((i * 24) + R200_PP_TXOFFSET_0)) / 4;
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -283,8 +283,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
case RADEON_RB3D_COLORPITCH:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -326,12 +326,12 @@ int r200_packet0_check(struct radeon_cs_parser *p,
track->cb[0].cpp = 4;
break;
default:
- DRM_ERROR("Invalid color buffer format (%d) !\n",
- ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f));
+ dev_warn_once(p->dev, "Invalid color buffer format (%d) !\n",
+ ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f));
return -EINVAL;
}
if (idx_value & RADEON_DEPTHXY_OFFSET_ENABLE) {
- DRM_ERROR("No support for depth xy offset in kms\n");
+ dev_warn_once(p->dev, "No support for depth xy offset in kms\n");
return -EINVAL;
}
@@ -360,8 +360,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
case RADEON_RB3D_ZPASS_ADDR:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index d22889fbfa9c..d2ee6deec039 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -645,8 +645,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case RADEON_CRTC_GUI_TRIG_VLINE:
r = r100_cs_packet_parse_vline(p);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -664,8 +664,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
i = (reg - R300_RB3D_COLOROFFSET0) >> 2;
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -677,8 +677,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case R300_ZB_DEPTHOFFSET:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -706,8 +706,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
i = (reg - R300_TX_OFFSET_0) >> 2;
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -762,7 +762,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
/* RB3D_CCTL */
if ((idx_value & (1 << 10)) && /* CMASK_ENABLE */
p->rdev->cmask_filp != p->filp) {
- DRM_ERROR("Invalid RB3D_CCTL: Cannot enable CMASK.\n");
+ dev_warn_once(p->dev, "Invalid RB3D_CCTL: Cannot enable CMASK.\n");
return -EINVAL;
}
track->num_cb = ((idx_value >> 5) & 0x3) + 1;
@@ -779,8 +779,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -812,8 +812,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
break;
case 5:
if (p->rdev->family < CHIP_RV515) {
- DRM_ERROR("Invalid color buffer format (%d)!\n",
- ((idx_value >> 21) & 0xF));
+ dev_warn_once(p->dev, "Invalid color buffer format (%d)!\n",
+ ((idx_value >> 21) & 0xF));
return -EINVAL;
}
fallthrough;
@@ -827,8 +827,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
track->cb[i].cpp = 16;
break;
default:
- DRM_ERROR("Invalid color buffer format (%d) !\n",
- ((idx_value >> 21) & 0xF));
+ dev_warn_once(p->dev, "Invalid color buffer format (%d) !\n",
+ ((idx_value >> 21) & 0xF));
return -EINVAL;
}
track->cb_dirty = true;
@@ -853,8 +853,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
track->zb.cpp = 4;
break;
default:
- DRM_ERROR("Invalid z buffer format (%d) !\n",
- (idx_value & 0xF));
+ dev_warn_once(p->dev, "Invalid z buffer format (%d) !\n",
+ (idx_value & 0xF));
return -EINVAL;
}
track->zb_dirty = true;
@@ -864,8 +864,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -962,8 +962,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
break;
case R300_TX_FORMAT_ATI2N:
if (p->rdev->family < CHIP_R420) {
- DRM_ERROR("Invalid texture format %u\n",
- (idx_value & 0x1F));
+ dev_warn_once(p->dev, "Invalid texture format %u\n",
+ (idx_value & 0x1F));
return -EINVAL;
}
/* The same rules apply as for DXT3/5. */
@@ -974,8 +974,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
track->textures[i].compress_format = R100_TRACK_COMP_DXT35;
break;
default:
- DRM_ERROR("Invalid texture format %u\n",
- (idx_value & 0x1F));
+ dev_warn_once(p->dev, "Invalid texture format %u\n",
+ (idx_value & 0x1F));
return -EINVAL;
}
track->tex_dirty = true;
@@ -1041,7 +1041,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
R100_TRACK_COMP_DXT1;
}
} else if (idx_value & (1 << 14)) {
- DRM_ERROR("Forbidden bit TXFORMAT_MSB\n");
+ dev_warn_once(p->dev, "Forbidden bit TXFORMAT_MSB\n");
return -EINVAL;
}
track->tex_dirty = true;
@@ -1079,8 +1079,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case R300_ZB_ZPASS_ADDR:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -1121,8 +1121,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case R300_RB3D_AARESOLVE_OFFSET:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -1191,7 +1191,7 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
case PACKET3_INDX_BUFFER:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
+ dev_warn_once(p->dev, "No reloc for packet3 %d\n", pkt->opcode);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -1207,7 +1207,7 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
* PRIM_WALK must be equal to 3 vertex data in embedded
* in cmd stream */
if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) {
- DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
+ dev_warn_once(p->dev, "PRIM_WALK must be 3 for IMMD draw\n");
return -EINVAL;
}
track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
@@ -1222,7 +1222,7 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
* PRIM_WALK must be equal to 3 vertex data in embedded
* in cmd stream */
if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) {
- DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
+ dev_warn_once(p->dev, "PRIM_WALK must be 3 for IMMD draw\n");
return -EINVAL;
}
track->vap_vf_cntl = radeon_get_ib_value(p, idx);
@@ -1272,7 +1272,7 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
case PACKET3_NOP:
break;
default:
- DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
+ dev_warn_once(p->dev, "Packet3 opcode %x not supported\n", pkt->opcode);
return -EINVAL;
}
return 0;
@@ -1308,7 +1308,7 @@ int r300_cs_parse(struct radeon_cs_parser *p)
r = r300_packet3_check(p, &pkt);
break;
default:
- DRM_ERROR("Unknown packet type %d !\n", pkt.type);
+ dev_warn_once(p->dev, "Unknown packet type %d !\n", pkt.type);
return -EINVAL;
}
if (r) {
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index ac77d1246b94..8eeceeeca362 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -361,9 +361,9 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
format = G_0280A0_FORMAT(track->cb_color_info[i]);
if (!r600_fmt_is_valid_color(format)) {
- dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n",
- __func__, __LINE__, format,
- i, track->cb_color_info[i]);
+ dev_warn_once(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n",
+ __func__, __LINE__, format,
+ i, track->cb_color_info[i]);
return -EINVAL;
}
/* pitch in pixels */
@@ -384,9 +384,9 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
array_check.blocksize = r600_fmt_get_blocksize(format);
if (r600_get_array_mode_alignment(&array_check,
&pitch_align, &height_align, &depth_align, &base_align)) {
- dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
- G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i,
- track->cb_color_info[i]);
+ dev_warn_once(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
+ G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i,
+ track->cb_color_info[i]);
return -EINVAL;
}
switch (array_mode) {
@@ -402,25 +402,26 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
case V_0280A0_ARRAY_2D_TILED_THIN1:
break;
default:
- dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
- G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i,
- track->cb_color_info[i]);
+ dev_warn_once(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
+ G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i,
+ track->cb_color_info[i]);
return -EINVAL;
}
if (!IS_ALIGNED(pitch, pitch_align)) {
- dev_warn(p->dev, "%s:%d cb pitch (%d, 0x%x, %d) invalid\n",
- __func__, __LINE__, pitch, pitch_align, array_mode);
+ dev_warn_once(p->dev, "%s:%d cb pitch (%d, 0x%x, %d) invalid\n",
+ __func__, __LINE__, pitch, pitch_align, array_mode);
return -EINVAL;
}
if (!IS_ALIGNED(height, height_align)) {
- dev_warn(p->dev, "%s:%d cb height (%d, 0x%x, %d) invalid\n",
- __func__, __LINE__, height, height_align, array_mode);
+ dev_warn_once(p->dev, "%s:%d cb height (%d, 0x%x, %d) invalid\n",
+ __func__, __LINE__, height, height_align, array_mode);
return -EINVAL;
}
if (!IS_ALIGNED(base_offset, base_align)) {
- dev_warn(p->dev, "%s offset[%d] 0x%llx 0x%llx, %d not aligned\n", __func__, i,
- base_offset, base_align, array_mode);
+ dev_warn_once(p->dev,
+ "%s offset[%d] 0x%llx 0x%llx, %d not aligned\n", __func__, i,
+ base_offset, base_align, array_mode);
return -EINVAL;
}
@@ -447,13 +448,14 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
* broken userspace.
*/
} else {
- dev_warn(p->dev, "%s offset[%d] %d %llu %d %lu too big (%d %d) (%d %d %d)\n",
- __func__, i, array_mode,
- track->cb_color_bo_offset[i], tmp,
- radeon_bo_size(track->cb_color_bo[i]),
- pitch, height, r600_fmt_get_nblocksx(format, pitch),
- r600_fmt_get_nblocksy(format, height),
- r600_fmt_get_blocksize(format));
+ dev_warn_once(p->dev,
+ "%s offset[%d] %d %llu %d %lu too big (%d %d) (%d %d %d)\n",
+ __func__, i, array_mode,
+ track->cb_color_bo_offset[i], tmp,
+ radeon_bo_size(track->cb_color_bo[i]),
+ pitch, height, r600_fmt_get_nblocksx(format, pitch),
+ r600_fmt_get_nblocksy(format, height),
+ r600_fmt_get_blocksize(format));
return -EINVAL;
}
}
@@ -478,11 +480,11 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
if (bytes + track->cb_color_frag_offset[i] >
radeon_bo_size(track->cb_color_frag_bo[i])) {
- dev_warn(p->dev, "%s FMASK_TILE_MAX too large "
- "(tile_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n",
- __func__, tile_max, bytes,
- track->cb_color_frag_offset[i],
- radeon_bo_size(track->cb_color_frag_bo[i]));
+ dev_warn_once(p->dev, "%s FMASK_TILE_MAX too large "
+ "(tile_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n",
+ __func__, tile_max, bytes,
+ track->cb_color_frag_offset[i],
+ radeon_bo_size(track->cb_color_frag_bo[i]));
return -EINVAL;
}
}
@@ -496,17 +498,17 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
if (bytes + track->cb_color_tile_offset[i] >
radeon_bo_size(track->cb_color_tile_bo[i])) {
- dev_warn(p->dev, "%s CMASK_BLOCK_MAX too large "
- "(block_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n",
- __func__, block_max, bytes,
- track->cb_color_tile_offset[i],
- radeon_bo_size(track->cb_color_tile_bo[i]));
+ dev_warn_once(p->dev, "%s CMASK_BLOCK_MAX too large "
+ "(block_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n",
+ __func__, block_max, bytes,
+ track->cb_color_tile_offset[i],
+ radeon_bo_size(track->cb_color_tile_bo[i]));
return -EINVAL;
}
break;
}
default:
- dev_warn(p->dev, "%s invalid tile mode\n", __func__);
+ dev_warn_once(p->dev, "%s invalid tile mode\n", __func__);
return -EINVAL;
}
return 0;
@@ -526,7 +528,7 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
if (track->db_bo == NULL) {
- dev_warn(p->dev, "z/stencil with no depth buffer\n");
+ dev_warn_once(p->dev, "z/stencil with no depth buffer\n");
return -EINVAL;
}
switch (G_028010_FORMAT(track->db_depth_info)) {
@@ -544,20 +546,22 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
bpe = 8;
break;
default:
- dev_warn(p->dev, "z/stencil with invalid format %d\n", G_028010_FORMAT(track->db_depth_info));
+ dev_warn_once(p->dev,
+ "z/stencil with invalid format %d\n",
+ G_028010_FORMAT(track->db_depth_info));
return -EINVAL;
}
if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) {
if (!track->db_depth_size_idx) {
- dev_warn(p->dev, "z/stencil buffer size not set\n");
+ dev_warn_once(p->dev, "z/stencil buffer size not set\n");
return -EINVAL;
}
tmp = radeon_bo_size(track->db_bo) - track->db_offset;
tmp = (tmp / bpe) >> 6;
if (!tmp) {
- dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %ld)\n",
- track->db_depth_size, bpe, track->db_offset,
- radeon_bo_size(track->db_bo));
+ dev_warn_once(p->dev, "z/stencil buffer too small (0x%08X %d %d %ld)\n",
+ track->db_depth_size, bpe, track->db_offset,
+ radeon_bo_size(track->db_bo));
return -EINVAL;
}
ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF);
@@ -579,9 +583,9 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
array_check.blocksize = bpe;
if (r600_get_array_mode_alignment(&array_check,
&pitch_align, &height_align, &depth_align, &base_align)) {
- dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
- G_028010_ARRAY_MODE(track->db_depth_info),
- track->db_depth_info);
+ dev_warn_once(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
+ G_028010_ARRAY_MODE(track->db_depth_info),
+ track->db_depth_info);
return -EINVAL;
}
switch (array_mode) {
@@ -592,24 +596,24 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
case V_028010_ARRAY_2D_TILED_THIN1:
break;
default:
- dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
- G_028010_ARRAY_MODE(track->db_depth_info),
- track->db_depth_info);
+ dev_warn_once(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
+ G_028010_ARRAY_MODE(track->db_depth_info),
+ track->db_depth_info);
return -EINVAL;
}
if (!IS_ALIGNED(pitch, pitch_align)) {
- dev_warn(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n",
+ dev_warn_once(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n",
__func__, __LINE__, pitch, pitch_align, array_mode);
return -EINVAL;
}
if (!IS_ALIGNED(height, height_align)) {
- dev_warn(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n",
+ dev_warn_once(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n",
__func__, __LINE__, height, height_align, array_mode);
return -EINVAL;
}
if (!IS_ALIGNED(base_offset, base_align)) {
- dev_warn(p->dev, "%s offset 0x%llx, 0x%llx, %d not aligned\n", __func__,
+ dev_warn_once(p->dev, "%s offset 0x%llx, 0x%llx, %d not aligned\n", __func__,
base_offset, base_align, array_mode);
return -EINVAL;
}
@@ -618,10 +622,11 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
tmp = ntiles * bpe * 64 * nviews * track->nsamples;
if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
- dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n",
- array_mode,
- track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
- radeon_bo_size(track->db_bo));
+ dev_warn_once(p->dev,
+ "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n",
+ array_mode,
+ track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
+ radeon_bo_size(track->db_bo));
return -EINVAL;
}
}
@@ -632,13 +637,13 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
unsigned nbx, nby;
if (track->htile_bo == NULL) {
- dev_warn(p->dev, "%s:%d htile enabled without htile surface 0x%08x\n",
- __func__, __LINE__, track->db_depth_info);
+ dev_warn_once(p->dev, "%s:%d htile enabled without htile surface 0x%08x\n",
+ __func__, __LINE__, track->db_depth_info);
return -EINVAL;
}
if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) {
- dev_warn(p->dev, "%s:%d htile can't be enabled with bogus db_depth_size 0x%08x\n",
- __func__, __LINE__, track->db_depth_size);
+ dev_warn_once(p->dev, "%s:%d htile can't be enabled with bogus db_depth_size 0x%08x\n",
+ __func__, __LINE__, track->db_depth_size);
return -EINVAL;
}
@@ -676,8 +681,8 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
nby = round_up(nby, 16 * 8);
break;
default:
- dev_warn(p->dev, "%s:%d invalid num pipes %d\n",
- __func__, __LINE__, track->npipes);
+ dev_warn_once(p->dev, "%s:%d invalid num pipes %d\n",
+ __func__, __LINE__, track->npipes);
return -EINVAL;
}
}
@@ -689,9 +694,9 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
size += track->htile_offset;
if (size > radeon_bo_size(track->htile_bo)) {
- dev_warn(p->dev, "%s:%d htile surface too small %ld for %ld (%d %d)\n",
- __func__, __LINE__, radeon_bo_size(track->htile_bo),
- size, nbx, nby);
+ dev_warn_once(p->dev, "%s:%d htile surface too small %ld for %ld (%d %d)\n",
+ __func__, __LINE__, radeon_bo_size(track->htile_bo),
+ size, nbx, nby);
return -EINVAL;
}
}
@@ -718,13 +723,13 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
u64 offset = (u64)track->vgt_strmout_bo_offset[i] +
(u64)track->vgt_strmout_size[i];
if (offset > radeon_bo_size(track->vgt_strmout_bo[i])) {
- DRM_ERROR("streamout %d bo too small: 0x%llx, 0x%lx\n",
- i, offset,
- radeon_bo_size(track->vgt_strmout_bo[i]));
+ dev_warn_once(p->dev, "streamout %d bo too small: 0x%llx, 0x%lx\n",
+ i, offset,
+ radeon_bo_size(track->vgt_strmout_bo[i]));
return -EINVAL;
}
} else {
- dev_warn(p->dev, "No buffer for streamout %d\n", i);
+ dev_warn_once(p->dev, "No buffer for streamout %d\n", i);
return -EINVAL;
}
}
@@ -753,8 +758,8 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
(tmp >> (i * 4)) & 0xF) {
/* at least one component is enabled */
if (track->cb_color_bo[i] == NULL) {
- dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
- __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
+ dev_warn_once(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
+ __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
return -EINVAL;
}
/* perform rewrite of CB_COLOR[0-7]_SIZE */
@@ -841,33 +846,33 @@ int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
/* check its a WAIT_REG_MEM */
if (wait_reg_mem.type != RADEON_PACKET_TYPE3 ||
wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
- DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
+ dev_warn_once(p->dev, "vline wait missing WAIT_REG_MEM segment\n");
return -EINVAL;
}
wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
/* bit 4 is reg (0) or mem (1) */
if (wait_reg_mem_info & 0x10) {
- DRM_ERROR("vline WAIT_REG_MEM waiting on MEM instead of REG\n");
+ dev_warn_once(p->dev, "vline WAIT_REG_MEM waiting on MEM instead of REG\n");
return -EINVAL;
}
/* bit 8 is me (0) or pfp (1) */
if (wait_reg_mem_info & 0x100) {
- DRM_ERROR("vline WAIT_REG_MEM waiting on PFP instead of ME\n");
+ dev_warn_once(p->dev, "vline WAIT_REG_MEM waiting on PFP instead of ME\n");
return -EINVAL;
}
/* waiting for value to be equal */
if ((wait_reg_mem_info & 0x7) != 0x3) {
- DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
+ dev_warn_once(p->dev, "vline WAIT_REG_MEM function not equal\n");
return -EINVAL;
}
if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != vline_status[0]) {
- DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
+ dev_warn_once(p->dev, "vline WAIT_REG_MEM bad reg\n");
return -EINVAL;
}
if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != RADEON_VLINE_STAT) {
- DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
+ dev_warn_once(p->dev, "vline WAIT_REG_MEM bad bit mask\n");
return -EINVAL;
}
@@ -886,7 +891,7 @@ int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
crtc = drm_crtc_find(rdev_to_drm(p->rdev), p->filp, crtc_id);
if (!crtc) {
- DRM_ERROR("cannot find crtc %d\n", crtc_id);
+ dev_warn_once(p->dev, "cannot find crtc %d\n", crtc_id);
return -ENOENT;
}
radeon_crtc = to_radeon_crtc(crtc);
@@ -907,7 +912,7 @@ int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
ib[h_idx] = header;
ib[h_idx + 4] = vline_status[crtc_id] >> 2;
} else {
- DRM_ERROR("unknown crtc reloc\n");
+ dev_warn_once(p->dev, "unknown crtc reloc\n");
return -EINVAL;
}
return 0;
@@ -923,8 +928,8 @@ static int r600_packet0_check(struct radeon_cs_parser *p,
case AVIVO_D1MODE_VLINE_START_END:
r = r600_cs_packet_parse_vline(p);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
return r;
}
break;
@@ -972,7 +977,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
i = (reg >> 7);
if (i >= ARRAY_SIZE(r600_reg_safe_bm)) {
- dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+ dev_warn_once(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
return -EINVAL;
}
m = 1 << ((reg >> 2) & 31);
@@ -1013,8 +1018,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case SQ_VSTMP_RING_BASE:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
@@ -1031,8 +1036,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
radeon_cs_packet_next_is_pkt3_nop(p)) {
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
track->db_depth_info = radeon_get_ib_value(p, idx);
@@ -1073,8 +1078,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case VGT_STRMOUT_BUFFER_BASE_3:
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16;
@@ -1096,8 +1101,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CP_COHER_BASE:
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- dev_warn(p->dev, "missing reloc for CP_COHER_BASE "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "missing reloc for CP_COHER_BASE "
+ "0x%04X\n", reg);
return -EINVAL;
}
ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
@@ -1270,8 +1275,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR7_BASE:
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
tmp = (reg - CB_COLOR0_BASE) / 4;
@@ -1285,8 +1290,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case DB_DEPTH_BASE:
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
track->db_offset = radeon_get_ib_value(p, idx) << 8;
@@ -1298,8 +1303,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case DB_HTILE_DATA_BASE:
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
track->htile_offset = (u64)radeon_get_ib_value(p, idx) << 8;
@@ -1368,8 +1373,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case SQ_ALU_CONST_CACHE_VS_15:
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
@@ -1377,8 +1382,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case SX_MEMORY_EXPORT_BASE:
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- dev_warn(p->dev, "bad SET_CONFIG_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONFIG_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
@@ -1387,7 +1392,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0;
break;
default:
- dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+ dev_warn_once(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
return -EINVAL;
}
return 0;
@@ -1408,7 +1413,7 @@ static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel,
unsigned block_align, unsigned height_align, unsigned base_align,
unsigned *l0_size, unsigned *mipmap_size)
{
- unsigned offset, i, level;
+ unsigned offset, i;
unsigned width, height, depth, size;
unsigned blocksize;
unsigned nbx, nby;
@@ -1420,7 +1425,7 @@ static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel,
w0 = r600_mip_minify(w0, 0);
h0 = r600_mip_minify(h0, 0);
d0 = r600_mip_minify(d0, 0);
- for(i = 0, offset = 0, level = blevel; i < nlevels; i++, level++) {
+ for (i = 0, offset = 0; i < nlevels; i++) {
width = r600_mip_minify(w0, i);
nbx = r600_fmt_get_nblocksx(format, width);
@@ -1543,43 +1548,43 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
llevel = 0;
break;
default:
- dev_warn(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0));
+ dev_warn_once(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0));
return -EINVAL;
}
if (!r600_fmt_is_valid_texture(format, p->family)) {
- dev_warn(p->dev, "%s:%d texture invalid format %d\n",
- __func__, __LINE__, format);
+ dev_warn_once(p->dev, "%s:%d texture invalid format %d\n",
+ __func__, __LINE__, format);
return -EINVAL;
}
if (r600_get_array_mode_alignment(&array_check,
&pitch_align, &height_align, &depth_align, &base_align)) {
- dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n",
- __func__, __LINE__, G_038000_TILE_MODE(word0));
+ dev_warn_once(p->dev, "%s:%d tex array mode (%d) invalid\n",
+ __func__, __LINE__, G_038000_TILE_MODE(word0));
return -EINVAL;
}
/* XXX check height as well... */
if (!IS_ALIGNED(pitch, pitch_align)) {
- dev_warn(p->dev, "%s:%d tex pitch (%d, 0x%x, %d) invalid\n",
- __func__, __LINE__, pitch, pitch_align, G_038000_TILE_MODE(word0));
+ dev_warn_once(p->dev, "%s:%d tex pitch (%d, 0x%x, %d) invalid\n",
+ __func__, __LINE__, pitch, pitch_align, G_038000_TILE_MODE(word0));
return -EINVAL;
}
if (!IS_ALIGNED(base_offset, base_align)) {
- dev_warn(p->dev, "%s:%d tex base offset (0x%llx, 0x%llx, %d) invalid\n",
- __func__, __LINE__, base_offset, base_align, G_038000_TILE_MODE(word0));
+ dev_warn_once(p->dev, "%s:%d tex base offset (0x%llx, 0x%llx, %d) invalid\n",
+ __func__, __LINE__, base_offset, base_align, G_038000_TILE_MODE(word0));
return -EINVAL;
}
if (!IS_ALIGNED(mip_offset, base_align)) {
- dev_warn(p->dev, "%s:%d tex mip offset (0x%llx, 0x%llx, %d) invalid\n",
- __func__, __LINE__, mip_offset, base_align, G_038000_TILE_MODE(word0));
+ dev_warn_once(p->dev, "%s:%d tex mip offset (0x%llx, 0x%llx, %d) invalid\n",
+ __func__, __LINE__, mip_offset, base_align, G_038000_TILE_MODE(word0));
return -EINVAL;
}
if (blevel > llevel) {
- dev_warn(p->dev, "texture blevel %d > llevel %d\n",
- blevel, llevel);
+ dev_warn_once(p->dev, "texture blevel %d > llevel %d\n",
+ blevel, llevel);
}
if (is_array) {
barray = G_038014_BASE_ARRAY(word5);
@@ -1592,16 +1597,16 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
&l0_size, &mipmap_size);
/* using get ib will give us the offset into the texture bo */
if ((l0_size + word2) > radeon_bo_size(texture)) {
- dev_warn(p->dev, "texture bo too small ((%d %d) (%d %d) %d %d %d -> %d have %ld)\n",
- w0, h0, pitch_align, height_align,
- array_check.array_mode, format, word2,
- l0_size, radeon_bo_size(texture));
- dev_warn(p->dev, "alignments %d %d %d %lld\n", pitch, pitch_align, height_align, base_align);
+ dev_warn_once(p->dev, "texture bo too small ((%d %d) (%d %d) %d %d %d -> %d have %ld)\n",
+ w0, h0, pitch_align, height_align,
+ array_check.array_mode, format, word2,
+ l0_size, radeon_bo_size(texture));
+ dev_warn_once(p->dev, "alignments %d %d %d %lld\n", pitch, pitch_align, height_align, base_align);
return -EINVAL;
}
/* using get ib will give us the offset into the mipmap bo */
if ((mipmap_size + word3) > radeon_bo_size(mipmap)) {
- /*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
+ /*dev_warn_once(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
w0, h0, format, blevel, nlevels, word3, mipmap_size, radeon_bo_size(texture));*/
}
return 0;
@@ -1613,13 +1618,13 @@ static bool r600_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
i = (reg >> 7);
if (i >= ARRAY_SIZE(r600_reg_safe_bm)) {
- dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+ dev_warn_once(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
return false;
}
m = 1 << ((reg >> 2) & 31);
if (!(r600_reg_safe_bm[i] & m))
return true;
- dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+ dev_warn_once(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
return false;
}
@@ -1648,7 +1653,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
uint64_t offset;
if (pkt->count != 1) {
- DRM_ERROR("bad SET PREDICATION\n");
+ dev_warn_once(p->dev, "bad SET PREDICATION\n");
return -EINVAL;
}
@@ -1660,13 +1665,13 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
return 0;
if (pred_op > 2) {
- DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op);
+ dev_warn_once(p->dev, "bad SET PREDICATION operation %d\n", pred_op);
return -EINVAL;
}
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- DRM_ERROR("bad SET PREDICATION\n");
+ dev_warn_once(p->dev, "bad SET PREDICATION\n");
return -EINVAL;
}
@@ -1681,20 +1686,20 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
case PACKET3_START_3D_CMDBUF:
if (p->family >= CHIP_RV770 || pkt->count) {
- DRM_ERROR("bad START_3D\n");
+ dev_warn_once(p->dev, "bad START_3D\n");
return -EINVAL;
}
break;
case PACKET3_CONTEXT_CONTROL:
if (pkt->count != 1) {
- DRM_ERROR("bad CONTEXT_CONTROL\n");
+ dev_warn_once(p->dev, "bad CONTEXT_CONTROL\n");
return -EINVAL;
}
break;
case PACKET3_INDEX_TYPE:
case PACKET3_NUM_INSTANCES:
if (pkt->count) {
- DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES\n");
+ dev_warn_once(p->dev, "bad INDEX_TYPE/NUM_INSTANCES\n");
return -EINVAL;
}
break;
@@ -1702,12 +1707,12 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
{
uint64_t offset;
if (pkt->count != 3) {
- DRM_ERROR("bad DRAW_INDEX\n");
+ dev_warn_once(p->dev, "bad DRAW_INDEX\n");
return -EINVAL;
}
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- DRM_ERROR("bad DRAW_INDEX\n");
+ dev_warn_once(p->dev, "bad DRAW_INDEX\n");
return -EINVAL;
}
@@ -1720,37 +1725,37 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
r = r600_cs_track_check(p);
if (r) {
- dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ dev_warn_once(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
return r;
}
break;
}
case PACKET3_DRAW_INDEX_AUTO:
if (pkt->count != 1) {
- DRM_ERROR("bad DRAW_INDEX_AUTO\n");
+ dev_warn_once(p->dev, "bad DRAW_INDEX_AUTO\n");
return -EINVAL;
}
r = r600_cs_track_check(p);
if (r) {
- dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
+ dev_warn_once(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
return r;
}
break;
case PACKET3_DRAW_INDEX_IMMD_BE:
case PACKET3_DRAW_INDEX_IMMD:
if (pkt->count < 2) {
- DRM_ERROR("bad DRAW_INDEX_IMMD\n");
+ dev_warn_once(p->dev, "bad DRAW_INDEX_IMMD\n");
return -EINVAL;
}
r = r600_cs_track_check(p);
if (r) {
- dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ dev_warn_once(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
return r;
}
break;
case PACKET3_WAIT_REG_MEM:
if (pkt->count != 5) {
- DRM_ERROR("bad WAIT_REG_MEM\n");
+ dev_warn_once(p->dev, "bad WAIT_REG_MEM\n");
return -EINVAL;
}
/* bit 4 is reg (0) or mem (1) */
@@ -1759,7 +1764,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- DRM_ERROR("bad WAIT_REG_MEM\n");
+ dev_warn_once(p->dev, "bad WAIT_REG_MEM\n");
return -EINVAL;
}
@@ -1770,7 +1775,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffff0);
ib[idx+2] = upper_32_bits(offset) & 0xff;
} else if (idx_value & 0x100) {
- DRM_ERROR("cannot use PFP on REG wait\n");
+ dev_warn_once(p->dev, "cannot use PFP on REG wait\n");
return -EINVAL;
}
break;
@@ -1779,24 +1784,24 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
u32 command, size;
u64 offset, tmp;
if (pkt->count != 4) {
- DRM_ERROR("bad CP DMA\n");
+ dev_warn_once(p->dev, "bad CP DMA\n");
return -EINVAL;
}
command = radeon_get_ib_value(p, idx+4);
size = command & 0x1fffff;
if (command & PACKET3_CP_DMA_CMD_SAS) {
/* src address space is register */
- DRM_ERROR("CP DMA SAS not supported\n");
+ dev_warn_once(p->dev, "CP DMA SAS not supported\n");
return -EINVAL;
} else {
if (command & PACKET3_CP_DMA_CMD_SAIC) {
- DRM_ERROR("CP DMA SAIC only supported for registers\n");
+ dev_warn_once(p->dev, "CP DMA SAIC only supported for registers\n");
return -EINVAL;
}
/* src address space is memory */
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- DRM_ERROR("bad CP DMA SRC\n");
+ dev_warn_once(p->dev, "bad CP DMA SRC\n");
return -EINVAL;
}
@@ -1806,8 +1811,8 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
offset = reloc->gpu_offset + tmp;
if ((tmp + size) > radeon_bo_size(reloc->robj)) {
- dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n",
- tmp + size, radeon_bo_size(reloc->robj));
+ dev_warn_once(p->dev, "CP DMA src buffer too small (%llu %lu)\n",
+ tmp + size, radeon_bo_size(reloc->robj));
return -EINVAL;
}
@@ -1816,17 +1821,17 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
}
if (command & PACKET3_CP_DMA_CMD_DAS) {
/* dst address space is register */
- DRM_ERROR("CP DMA DAS not supported\n");
+ dev_warn_once(p->dev, "CP DMA DAS not supported\n");
return -EINVAL;
} else {
/* dst address space is memory */
if (command & PACKET3_CP_DMA_CMD_DAIC) {
- DRM_ERROR("CP DMA DAIC only supported for registers\n");
+ dev_warn_once(p->dev, "CP DMA DAIC only supported for registers\n");
return -EINVAL;
}
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- DRM_ERROR("bad CP DMA DST\n");
+ dev_warn_once(p->dev, "bad CP DMA DST\n");
return -EINVAL;
}
@@ -1836,8 +1841,8 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
offset = reloc->gpu_offset + tmp;
if ((tmp + size) > radeon_bo_size(reloc->robj)) {
- dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n",
- tmp + size, radeon_bo_size(reloc->robj));
+ dev_warn_once(p->dev, "CP DMA dst buffer too small (%llu %lu)\n",
+ tmp + size, radeon_bo_size(reloc->robj));
return -EINVAL;
}
@@ -1848,7 +1853,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
}
case PACKET3_SURFACE_SYNC:
if (pkt->count != 3) {
- DRM_ERROR("bad SURFACE_SYNC\n");
+ dev_warn_once(p->dev, "bad SURFACE_SYNC\n");
return -EINVAL;
}
/* 0xffffffff/0x0 is flush all cache flag */
@@ -1856,7 +1861,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
radeon_get_ib_value(p, idx + 2) != 0) {
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- DRM_ERROR("bad SURFACE_SYNC\n");
+ dev_warn_once(p->dev, "bad SURFACE_SYNC\n");
return -EINVAL;
}
ib[idx+2] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
@@ -1864,7 +1869,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
break;
case PACKET3_EVENT_WRITE:
if (pkt->count != 2 && pkt->count != 0) {
- DRM_ERROR("bad EVENT_WRITE\n");
+ dev_warn_once(p->dev, "bad EVENT_WRITE\n");
return -EINVAL;
}
if (pkt->count) {
@@ -1872,7 +1877,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- DRM_ERROR("bad EVENT_WRITE\n");
+ dev_warn_once(p->dev, "bad EVENT_WRITE\n");
return -EINVAL;
}
offset = reloc->gpu_offset +
@@ -1888,12 +1893,12 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
uint64_t offset;
if (pkt->count != 4) {
- DRM_ERROR("bad EVENT_WRITE_EOP\n");
+ dev_warn_once(p->dev, "bad EVENT_WRITE_EOP\n");
return -EINVAL;
}
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- DRM_ERROR("bad EVENT_WRITE\n");
+ dev_warn_once(p->dev, "bad EVENT_WRITE\n");
return -EINVAL;
}
@@ -1911,7 +1916,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
if ((start_reg < PACKET3_SET_CONFIG_REG_OFFSET) ||
(start_reg >= PACKET3_SET_CONFIG_REG_END) ||
(end_reg >= PACKET3_SET_CONFIG_REG_END)) {
- DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
+ dev_warn_once(p->dev, "bad PACKET3_SET_CONFIG_REG\n");
return -EINVAL;
}
for (i = 0; i < pkt->count; i++) {
@@ -1927,7 +1932,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
if ((start_reg < PACKET3_SET_CONTEXT_REG_OFFSET) ||
(start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
(end_reg >= PACKET3_SET_CONTEXT_REG_END)) {
- DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
+ dev_warn_once(p->dev, "bad PACKET3_SET_CONTEXT_REG\n");
return -EINVAL;
}
for (i = 0; i < pkt->count; i++) {
@@ -1939,7 +1944,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
break;
case PACKET3_SET_RESOURCE:
if (pkt->count % 7) {
- DRM_ERROR("bad SET_RESOURCE\n");
+ dev_warn_once(p->dev, "bad SET_RESOURCE\n");
return -EINVAL;
}
start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_OFFSET;
@@ -1947,7 +1952,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
if ((start_reg < PACKET3_SET_RESOURCE_OFFSET) ||
(start_reg >= PACKET3_SET_RESOURCE_END) ||
(end_reg >= PACKET3_SET_RESOURCE_END)) {
- DRM_ERROR("bad SET_RESOURCE\n");
+ dev_warn_once(p->dev, "bad SET_RESOURCE\n");
return -EINVAL;
}
for (i = 0; i < (pkt->count / 7); i++) {
@@ -1959,7 +1964,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
/* tex base */
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- DRM_ERROR("bad SET_RESOURCE\n");
+ dev_warn_once(p->dev, "bad SET_RESOURCE\n");
return -EINVAL;
}
base_offset = (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
@@ -1973,7 +1978,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
/* tex mip base */
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- DRM_ERROR("bad SET_RESOURCE\n");
+ dev_warn_once(p->dev, "bad SET_RESOURCE\n");
return -EINVAL;
}
mip_offset = (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
@@ -1994,15 +1999,15 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
/* vtx base */
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- DRM_ERROR("bad SET_RESOURCE\n");
+ dev_warn_once(p->dev, "bad SET_RESOURCE\n");
return -EINVAL;
}
offset = radeon_get_ib_value(p, idx+1+(i*7)+0);
size = radeon_get_ib_value(p, idx+1+(i*7)+1) + 1;
if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
/* force size to size of the buffer */
- dev_warn(p->dev, "vbo resource seems too big (%d) for the bo (%ld)\n",
- size + offset, radeon_bo_size(reloc->robj));
+ dev_warn_once(p->dev, "vbo resource seems too big (%d) for the bo (%ld)\n",
+ size + offset, radeon_bo_size(reloc->robj));
ib[idx+1+(i*7)+1] = radeon_bo_size(reloc->robj) - offset;
}
@@ -2015,7 +2020,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
case SQ_TEX_VTX_INVALID_TEXTURE:
case SQ_TEX_VTX_INVALID_BUFFER:
default:
- DRM_ERROR("bad SET_RESOURCE\n");
+ dev_warn_once(p->dev, "bad SET_RESOURCE\n");
return -EINVAL;
}
}
@@ -2027,7 +2032,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) ||
(start_reg >= PACKET3_SET_ALU_CONST_END) ||
(end_reg >= PACKET3_SET_ALU_CONST_END)) {
- DRM_ERROR("bad SET_ALU_CONST\n");
+ dev_warn_once(p->dev, "bad SET_ALU_CONST\n");
return -EINVAL;
}
}
@@ -2038,7 +2043,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
if ((start_reg < PACKET3_SET_BOOL_CONST_OFFSET) ||
(start_reg >= PACKET3_SET_BOOL_CONST_END) ||
(end_reg >= PACKET3_SET_BOOL_CONST_END)) {
- DRM_ERROR("bad SET_BOOL_CONST\n");
+ dev_warn_once(p->dev, "bad SET_BOOL_CONST\n");
return -EINVAL;
}
break;
@@ -2048,7 +2053,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
if ((start_reg < PACKET3_SET_LOOP_CONST_OFFSET) ||
(start_reg >= PACKET3_SET_LOOP_CONST_END) ||
(end_reg >= PACKET3_SET_LOOP_CONST_END)) {
- DRM_ERROR("bad SET_LOOP_CONST\n");
+ dev_warn_once(p->dev, "bad SET_LOOP_CONST\n");
return -EINVAL;
}
break;
@@ -2058,13 +2063,13 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
if ((start_reg < PACKET3_SET_CTL_CONST_OFFSET) ||
(start_reg >= PACKET3_SET_CTL_CONST_END) ||
(end_reg >= PACKET3_SET_CTL_CONST_END)) {
- DRM_ERROR("bad SET_CTL_CONST\n");
+ dev_warn_once(p->dev, "bad SET_CTL_CONST\n");
return -EINVAL;
}
break;
case PACKET3_SET_SAMPLER:
if (pkt->count % 3) {
- DRM_ERROR("bad SET_SAMPLER\n");
+ dev_warn_once(p->dev, "bad SET_SAMPLER\n");
return -EINVAL;
}
start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_OFFSET;
@@ -2072,22 +2077,22 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
if ((start_reg < PACKET3_SET_SAMPLER_OFFSET) ||
(start_reg >= PACKET3_SET_SAMPLER_END) ||
(end_reg >= PACKET3_SET_SAMPLER_END)) {
- DRM_ERROR("bad SET_SAMPLER\n");
+ dev_warn_once(p->dev, "bad SET_SAMPLER\n");
return -EINVAL;
}
break;
case PACKET3_STRMOUT_BASE_UPDATE:
/* RS780 and RS880 also need this */
if (p->family < CHIP_RS780) {
- DRM_ERROR("STRMOUT_BASE_UPDATE only supported on 7xx\n");
+ dev_warn_once(p->dev, "STRMOUT_BASE_UPDATE only supported on 7xx\n");
return -EINVAL;
}
if (pkt->count != 1) {
- DRM_ERROR("bad STRMOUT_BASE_UPDATE packet count\n");
+ dev_warn_once(p->dev, "bad STRMOUT_BASE_UPDATE packet count\n");
return -EINVAL;
}
if (idx_value > 3) {
- DRM_ERROR("bad STRMOUT_BASE_UPDATE index\n");
+ dev_warn_once(p->dev, "bad STRMOUT_BASE_UPDATE index\n");
return -EINVAL;
}
{
@@ -2095,25 +2100,27 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- DRM_ERROR("bad STRMOUT_BASE_UPDATE reloc\n");
+ dev_warn_once(p->dev, "bad STRMOUT_BASE_UPDATE reloc\n");
return -EINVAL;
}
if (reloc->robj != track->vgt_strmout_bo[idx_value]) {
- DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo does not match\n");
+ dev_warn_once(p->dev, "bad STRMOUT_BASE_UPDATE, bo does not match\n");
return -EINVAL;
}
offset = (u64)radeon_get_ib_value(p, idx+1) << 8;
if (offset != track->vgt_strmout_bo_offset[idx_value]) {
- DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo offset does not match: 0x%llx, 0x%x\n",
- offset, track->vgt_strmout_bo_offset[idx_value]);
+ dev_warn_once(p->dev,
+ "bad STRMOUT_BASE_UPDATE, bo offset does not match: 0x%llx, 0x%x\n",
+ offset, track->vgt_strmout_bo_offset[idx_value]);
return -EINVAL;
}
if ((offset + 4) > radeon_bo_size(reloc->robj)) {
- DRM_ERROR("bad STRMOUT_BASE_UPDATE bo too small: 0x%llx, 0x%lx\n",
- offset + 4, radeon_bo_size(reloc->robj));
+ dev_warn_once(p->dev,
+ "bad STRMOUT_BASE_UPDATE bo too small: 0x%llx, 0x%lx\n",
+ offset + 4, radeon_bo_size(reloc->robj));
return -EINVAL;
}
ib[idx+1] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
@@ -2121,17 +2128,17 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
break;
case PACKET3_SURFACE_BASE_UPDATE:
if (p->family >= CHIP_RV770 || p->family == CHIP_R600) {
- DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
+ dev_warn_once(p->dev, "bad SURFACE_BASE_UPDATE\n");
return -EINVAL;
}
if (pkt->count) {
- DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
+ dev_warn_once(p->dev, "bad SURFACE_BASE_UPDATE\n");
return -EINVAL;
}
break;
case PACKET3_STRMOUT_BUFFER_UPDATE:
if (pkt->count != 4) {
- DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (invalid count)\n");
+ dev_warn_once(p->dev, "bad STRMOUT_BUFFER_UPDATE (invalid count)\n");
return -EINVAL;
}
/* Updating memory at DST_ADDRESS. */
@@ -2139,14 +2146,15 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
u64 offset;
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
+ dev_warn_once(p->dev, "bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
return -EINVAL;
}
offset = radeon_get_ib_value(p, idx+1);
offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
if ((offset + 4) > radeon_bo_size(reloc->robj)) {
- DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%llx, 0x%lx\n",
- offset + 4, radeon_bo_size(reloc->robj));
+ dev_warn_once(p->dev,
+ "bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%llx, 0x%lx\n",
+ offset + 4, radeon_bo_size(reloc->robj));
return -EINVAL;
}
offset += reloc->gpu_offset;
@@ -2158,14 +2166,15 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
u64 offset;
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
+ dev_warn_once(p->dev, "bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
return -EINVAL;
}
offset = radeon_get_ib_value(p, idx+3);
offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
if ((offset + 4) > radeon_bo_size(reloc->robj)) {
- DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%llx, 0x%lx\n",
- offset + 4, radeon_bo_size(reloc->robj));
+ dev_warn_once(p->dev,
+ "bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%llx, 0x%lx\n",
+ offset + 4, radeon_bo_size(reloc->robj));
return -EINVAL;
}
offset += reloc->gpu_offset;
@@ -2178,23 +2187,23 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
u64 offset;
if (pkt->count != 3) {
- DRM_ERROR("bad MEM_WRITE (invalid count)\n");
+ dev_warn_once(p->dev, "bad MEM_WRITE (invalid count)\n");
return -EINVAL;
}
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- DRM_ERROR("bad MEM_WRITE (missing reloc)\n");
+ dev_warn_once(p->dev, "bad MEM_WRITE (missing reloc)\n");
return -EINVAL;
}
offset = radeon_get_ib_value(p, idx+0);
offset += ((u64)(radeon_get_ib_value(p, idx+1) & 0xff)) << 32UL;
if (offset & 0x7) {
- DRM_ERROR("bad MEM_WRITE (address not qwords aligned)\n");
+ dev_warn_once(p->dev, "bad MEM_WRITE (address not qwords aligned)\n");
return -EINVAL;
}
if ((offset + 8) > radeon_bo_size(reloc->robj)) {
- DRM_ERROR("bad MEM_WRITE bo too small: 0x%llx, 0x%lx\n",
- offset + 8, radeon_bo_size(reloc->robj));
+ dev_warn_once(p->dev, "bad MEM_WRITE bo too small: 0x%llx, 0x%lx\n",
+ offset + 8, radeon_bo_size(reloc->robj));
return -EINVAL;
}
offset += reloc->gpu_offset;
@@ -2204,7 +2213,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
}
case PACKET3_COPY_DW:
if (pkt->count != 4) {
- DRM_ERROR("bad COPY_DW (invalid count)\n");
+ dev_warn_once(p->dev, "bad COPY_DW (invalid count)\n");
return -EINVAL;
}
if (idx_value & 0x1) {
@@ -2212,14 +2221,14 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
/* SRC is memory. */
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- DRM_ERROR("bad COPY_DW (missing src reloc)\n");
+ dev_warn_once(p->dev, "bad COPY_DW (missing src reloc)\n");
return -EINVAL;
}
offset = radeon_get_ib_value(p, idx+1);
offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
if ((offset + 4) > radeon_bo_size(reloc->robj)) {
- DRM_ERROR("bad COPY_DW src bo too small: 0x%llx, 0x%lx\n",
- offset + 4, radeon_bo_size(reloc->robj));
+ dev_warn_once(p->dev, "bad COPY_DW src bo too small: 0x%llx, 0x%lx\n",
+ offset + 4, radeon_bo_size(reloc->robj));
return -EINVAL;
}
offset += reloc->gpu_offset;
@@ -2236,14 +2245,14 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
/* DST is memory. */
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- DRM_ERROR("bad COPY_DW (missing dst reloc)\n");
+ dev_warn_once(p->dev, "bad COPY_DW (missing dst reloc)\n");
return -EINVAL;
}
offset = radeon_get_ib_value(p, idx+3);
offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
if ((offset + 4) > radeon_bo_size(reloc->robj)) {
- DRM_ERROR("bad COPY_DW dst bo too small: 0x%llx, 0x%lx\n",
- offset + 4, radeon_bo_size(reloc->robj));
+ dev_warn_once(p->dev, "bad COPY_DW dst bo too small: 0x%llx, 0x%lx\n",
+ offset + 4, radeon_bo_size(reloc->robj));
return -EINVAL;
}
offset += reloc->gpu_offset;
@@ -2259,7 +2268,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
case PACKET3_NOP:
break;
default:
- DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
+ dev_warn_once(p->dev, "Packet3 opcode %x not supported\n", pkt->opcode);
return -EINVAL;
}
return 0;
@@ -2306,7 +2315,7 @@ int r600_cs_parse(struct radeon_cs_parser *p)
r = r600_packet3_check(p, &pkt);
break;
default:
- DRM_ERROR("Unknown packet type %d !\n", pkt.type);
+ dev_warn_once(p->dev, "Unknown packet type %d !\n", pkt.type);
kfree(p->track);
p->track = NULL;
return -EINVAL;
@@ -2346,13 +2355,13 @@ int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
*cs_reloc = NULL;
if (p->chunk_relocs == NULL) {
- DRM_ERROR("No relocation chunk !\n");
+ dev_warn_once(p->dev, "No relocation chunk !\n");
return -EINVAL;
}
idx = p->dma_reloc_idx;
if (idx >= p->nrelocs) {
- DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
- idx, p->nrelocs);
+ dev_warn_once(p->dev, "Relocs at %d after relocations chunk end %d !\n",
+ idx, p->nrelocs);
return -EINVAL;
}
*cs_reloc = &p->relocs[idx];
@@ -2385,8 +2394,8 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
do {
if (p->idx >= ib_chunk->length_dw) {
- DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
- p->idx, ib_chunk->length_dw);
+ dev_warn_once(p->dev, "Can not parse packet at %d after CS end %d !\n",
+ p->idx, ib_chunk->length_dw);
return -EINVAL;
}
idx = p->idx;
@@ -2399,7 +2408,7 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
case DMA_PACKET_WRITE:
r = r600_dma_cs_next_reloc(p, &dst_reloc);
if (r) {
- DRM_ERROR("bad DMA_PACKET_WRITE\n");
+ dev_warn_once(p->dev, "bad DMA_PACKET_WRITE\n");
return -EINVAL;
}
if (tiled) {
@@ -2417,20 +2426,20 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
p->idx += count + 3;
}
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA write buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ dev_warn_once(p->dev, "DMA write buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
return -EINVAL;
}
break;
case DMA_PACKET_COPY:
r = r600_dma_cs_next_reloc(p, &src_reloc);
if (r) {
- DRM_ERROR("bad DMA_PACKET_COPY\n");
+ dev_warn_once(p->dev, "bad DMA_PACKET_COPY\n");
return -EINVAL;
}
r = r600_dma_cs_next_reloc(p, &dst_reloc);
if (r) {
- DRM_ERROR("bad DMA_PACKET_COPY\n");
+ dev_warn_once(p->dev, "bad DMA_PACKET_COPY\n");
return -EINVAL;
}
if (tiled) {
@@ -2484,31 +2493,31 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
}
}
if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA copy src buffer too small (%llu %lu)\n",
- src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ dev_warn_once(p->dev, "DMA copy src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
return -EINVAL;
}
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA write dst buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ dev_warn_once(p->dev, "DMA write dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
return -EINVAL;
}
break;
case DMA_PACKET_CONSTANT_FILL:
if (p->family < CHIP_RV770) {
- DRM_ERROR("Constant Fill is 7xx only !\n");
+ dev_warn_once(p->dev, "Constant Fill is 7xx only !\n");
return -EINVAL;
}
r = r600_dma_cs_next_reloc(p, &dst_reloc);
if (r) {
- DRM_ERROR("bad DMA_PACKET_WRITE\n");
+ dev_warn_once(p->dev, "bad DMA_PACKET_WRITE\n");
return -EINVAL;
}
dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16;
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ dev_warn_once(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
return -EINVAL;
}
ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
@@ -2519,7 +2528,7 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
p->idx += 1;
break;
default:
- DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
+ dev_warn_once(p->dev, "Unknown packet type %d at %d !\n", cmd, idx);
return -EINVAL;
}
} while (p->idx < p->chunk_ib->length_dw);
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index b8e6202f1d5b..3f9c0011244f 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -834,7 +834,7 @@ void radeon_cs_dump_packet(struct radeon_cs_parser *p,
ib = p->ib.ptr;
idx = pkt->idx;
for (i = 0; i <= (pkt->count + 1); i++, idx++)
- DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
+ dev_dbg(p->dev, "ib[%d]=0x%08X\n", idx, ib[idx]);
}
/**
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 7a3e510327b7..9e35b14e2bf0 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -554,7 +554,7 @@ int radeon_wb_init(struct radeon_device *rdev)
* cover the whole aperture even if VRAM size is inferior to aperture size
* Novell bug 204882 + along with lots of ubuntu ones
*
- * Note 3: when limiting vram it's safe to overwritte real_vram_size because
+ * Note 3: when limiting vram it's safe to overwrite real_vram_size because
* we are not in case where real_vram_size is inferior to mc_vram_size (ie
* not affected by bogus hw of Novell bug 204882 + along with lots of ubuntu
* ones)
@@ -562,7 +562,7 @@ int radeon_wb_init(struct radeon_device *rdev)
* Note 4: IGP TOM addr should be the same as the aperture addr, we don't
* explicitly check for that thought.
*
- * FIXME: when reducing VRAM size align new size on power of 2.
+ * FIXME: when reducing VRAM size, align new size on power of 2.
*/
void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
{
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index b4bf5dfeea2d..351b9dfcdad8 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -926,10 +926,10 @@ static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div,
unsigned *fb_div, unsigned *ref_div)
{
/* limit reference * post divider to a maximum */
- ref_div_max = max(min(100 / post_div, ref_div_max), 1u);
+ ref_div_max = clamp(100 / post_div, 1u, ref_div_max);
/* get matching reference and feedback divider */
- *ref_div = min(max(den/post_div, 1u), ref_div_max);
+ *ref_div = clamp(den / post_div, 1u, ref_div_max);
*fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den);
/* limit fb divider to its maximum */
@@ -1297,12 +1297,13 @@ static const struct drm_framebuffer_funcs radeon_fb_funcs = {
int
radeon_framebuffer_init(struct drm_device *dev,
struct drm_framebuffer *fb,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj)
{
int ret;
fb->obj[0] = obj;
- drm_helper_mode_fill_fb_struct(dev, fb, NULL, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, fb, info, mode_cmd);
ret = drm_framebuffer_init(dev, fb, &radeon_fb_funcs);
if (ret) {
fb->obj[0] = NULL;
@@ -1341,7 +1342,7 @@ radeon_user_framebuffer_create(struct drm_device *dev,
return ERR_PTR(-ENOMEM);
}
- ret = radeon_framebuffer_init(dev, fb, mode_cmd, obj);
+ ret = radeon_framebuffer_init(dev, fb, info, mode_cmd, obj);
if (ret) {
kfree(fb);
drm_gem_object_put(obj);
diff --git a/drivers/gpu/drm/radeon/radeon_fbdev.c b/drivers/gpu/drm/radeon/radeon_fbdev.c
index e3a481bbee7b..dc81b0c2dbff 100644
--- a/drivers/gpu/drm/radeon/radeon_fbdev.c
+++ b/drivers/gpu/drm/radeon/radeon_fbdev.c
@@ -53,10 +53,10 @@ static void radeon_fbdev_destroy_pinned_object(struct drm_gem_object *gobj)
}
static int radeon_fbdev_create_pinned_object(struct drm_fb_helper *fb_helper,
+ const struct drm_format_info *info,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object **gobj_p)
{
- const struct drm_format_info *info;
struct radeon_device *rdev = fb_helper->dev->dev_private;
struct drm_gem_object *gobj = NULL;
struct radeon_bo *rbo = NULL;
@@ -67,8 +67,6 @@ static int radeon_fbdev_create_pinned_object(struct drm_fb_helper *fb_helper,
int height = mode_cmd->height;
u32 cpp;
- info = drm_get_format_info(rdev_to_drm(rdev), mode_cmd->pixel_format,
- mode_cmd->modifier[0]);
cpp = info->cpp[0];
/* need to align pitch with crtc limits */
@@ -206,6 +204,7 @@ int radeon_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
struct drm_fb_helper_surface_size *sizes)
{
struct radeon_device *rdev = fb_helper->dev->dev_private;
+ const struct drm_format_info *format_info;
struct drm_mode_fb_cmd2 mode_cmd = { };
struct fb_info *info;
struct drm_gem_object *gobj;
@@ -224,7 +223,9 @@ int radeon_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
sizes->surface_depth);
- ret = radeon_fbdev_create_pinned_object(fb_helper, &mode_cmd, &gobj);
+ format_info = drm_get_format_info(rdev_to_drm(rdev), mode_cmd.pixel_format,
+ mode_cmd.modifier[0]);
+ ret = radeon_fbdev_create_pinned_object(fb_helper, format_info, &mode_cmd, &gobj);
if (ret) {
DRM_ERROR("failed to create fbcon object %d\n", ret);
return ret;
@@ -236,7 +237,7 @@ int radeon_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
ret = -ENOMEM;
goto err_radeon_fbdev_destroy_pinned_object;
}
- ret = radeon_framebuffer_init(rdev_to_drm(rdev), fb, &mode_cmd, gobj);
+ ret = radeon_framebuffer_init(rdev_to_drm(rdev), fb, format_info, &mode_cmd, gobj);
if (ret) {
DRM_ERROR("failed to initialize framebuffer %d\n", ret);
goto err_kfree;
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 4bb242437ff6..acd89a20f272 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -346,14 +346,14 @@ int radeon_gart_init(struct radeon_device *rdev)
DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages);
/* Allocate pages table */
- rdev->gart.pages = vzalloc(array_size(sizeof(void *),
- rdev->gart.num_cpu_pages));
+ rdev->gart.pages = vcalloc(rdev->gart.num_cpu_pages,
+ sizeof(void *));
if (rdev->gart.pages == NULL) {
radeon_gart_fini(rdev);
return -ENOMEM;
}
- rdev->gart.pages_entry = vmalloc(array_size(sizeof(uint64_t),
- rdev->gart.num_gpu_pages));
+ rdev->gart.pages_entry = vmalloc_array(rdev->gart.num_gpu_pages,
+ sizeof(uint64_t));
if (rdev->gart.pages_entry == NULL) {
radeon_gart_fini(rdev);
return -ENOMEM;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index d6aa1a3012a8..d1e8b9757a65 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -136,9 +136,9 @@ static void radeon_legacy_lvds_update(struct drm_encoder *encoder, int mode)
}
if (rdev->is_atom_bios)
- radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+ radeon_atombios_encoder_dpms_scratch_regs(encoder, mode == DRM_MODE_DPMS_ON);
else
- radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+ radeon_combios_encoder_dpms_scratch_regs(encoder, mode == DRM_MODE_DPMS_ON);
}
@@ -545,9 +545,9 @@ static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode
WREG32(RADEON_DAC_MACRO_CNTL, dac_macro_cntl);
if (rdev->is_atom_bios)
- radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+ radeon_atombios_encoder_dpms_scratch_regs(encoder, mode == DRM_MODE_DPMS_ON);
else
- radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+ radeon_combios_encoder_dpms_scratch_regs(encoder, mode == DRM_MODE_DPMS_ON);
}
@@ -742,9 +742,9 @@ static void radeon_legacy_tmds_int_dpms(struct drm_encoder *encoder, int mode)
WREG32(RADEON_FP_GEN_CNTL, fp_gen_cntl);
if (rdev->is_atom_bios)
- radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+ radeon_atombios_encoder_dpms_scratch_regs(encoder, mode == DRM_MODE_DPMS_ON);
else
- radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+ radeon_combios_encoder_dpms_scratch_regs(encoder, mode == DRM_MODE_DPMS_ON);
}
@@ -908,9 +908,9 @@ static void radeon_legacy_tmds_ext_dpms(struct drm_encoder *encoder, int mode)
WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
if (rdev->is_atom_bios)
- radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+ radeon_atombios_encoder_dpms_scratch_regs(encoder, mode == DRM_MODE_DPMS_ON);
else
- radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+ radeon_combios_encoder_dpms_scratch_regs(encoder, mode == DRM_MODE_DPMS_ON);
}
@@ -1113,9 +1113,9 @@ static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode)
}
if (rdev->is_atom_bios)
- radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+ radeon_atombios_encoder_dpms_scratch_regs(encoder, mode == DRM_MODE_DPMS_ON);
else
- radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+ radeon_combios_encoder_dpms_scratch_regs(encoder, mode == DRM_MODE_DPMS_ON);
}
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 3102f6c2d055..9e34da2cacef 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -40,6 +40,7 @@
struct drm_fb_helper;
struct drm_fb_helper_surface_size;
+struct drm_format_info;
struct edid;
struct drm_edid;
@@ -890,6 +891,7 @@ extern void
radeon_combios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on);
int radeon_framebuffer_init(struct drm_device *dev,
struct drm_framebuffer *rfb,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index b4fb7e70320b..a855a96dd2ea 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -907,8 +907,7 @@ static void radeon_dpm_thermal_work_handler(struct work_struct *work)
static bool radeon_dpm_single_display(struct radeon_device *rdev)
{
- bool single_display = (rdev->pm.dpm.new_active_crtc_count < 2) ?
- true : false;
+ bool single_display = rdev->pm.dpm.new_active_crtc_count < 2;
/* check if the vblank period is too short to adjust the mclk */
if (single_display && rdev->asic->dpm.vblank_too_short) {
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index c9fef9b61ced..818554e60537 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -455,7 +455,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev,
r = radeon_ring_lock(rdev, ringC, 64);
if (r) {
- DRM_ERROR("Failed to lock ring B %p\n", ringC);
+ DRM_ERROR("Failed to lock ring C %p\n", ringC);
goto out_cleanup;
}
radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
@@ -481,7 +481,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev,
r = radeon_ring_lock(rdev, ringC, 64);
if (r) {
- DRM_ERROR("Failed to lock ring B %p\n", ringC);
+ DRM_ERROR("Failed to lock ring C %p\n", ringC);
goto out_cleanup;
}
radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
index 2355a78e1b69..bdbc1bbe8a9b 100644
--- a/drivers/gpu/drm/radeon/radeon_vce.c
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -86,7 +86,7 @@ int radeon_vce_init(struct radeon_device *rdev)
r = request_firmware(&rdev->vce_fw, fw_name, rdev->dev);
if (r) {
- dev_err(rdev->dev, "radeon_vce: Can't load firmware \"%s\"\n",
+ dev_err(rdev->dev, "radeon_vce: can't load firmware \"%s\"\n",
fw_name);
return r;
}
@@ -126,7 +126,7 @@ int radeon_vce_init(struct radeon_device *rdev)
rdev->vce.fw_version = (start << 24) | (mid << 16) | (end << 8);
- /* we can only work with this fw version for now */
+ /* we can only work with these fw versions for now */
if ((rdev->vce.fw_version != ((40 << 24) | (2 << 16) | (2 << 8))) &&
(rdev->vce.fw_version != ((50 << 24) | (0 << 16) | (1 << 8))) &&
(rdev->vce.fw_version != ((50 << 24) | (1 << 16) | (2 << 8))))
@@ -281,7 +281,7 @@ static void radeon_vce_idle_work_handler(struct work_struct *work)
*
* @rdev: radeon_device pointer
*
- * Make sure VCE is powerd up when we want to use it
+ * Make sure VCE is powered up when we want to use it
*/
void radeon_vce_note_usage(struct radeon_device *rdev)
{
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c b/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c
index af58b814e588..001b3543924a 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c
@@ -1013,7 +1013,7 @@ err_reset_assert:
}
static const struct dev_pm_ops rcar_lvds_pm_ops = {
- SET_RUNTIME_PM_OPS(rcar_lvds_runtime_suspend, rcar_lvds_runtime_resume, NULL)
+ RUNTIME_PM_OPS(rcar_lvds_runtime_suspend, rcar_lvds_runtime_resume, NULL)
};
static struct platform_driver rcar_lvds_platform_driver = {
@@ -1021,7 +1021,7 @@ static struct platform_driver rcar_lvds_platform_driver = {
.remove = rcar_lvds_remove,
.driver = {
.name = "rcar-lvds",
- .pm = &rcar_lvds_pm_ops,
+ .pm = pm_ptr(&rcar_lvds_pm_ops),
.of_match_table = rcar_lvds_of_table,
},
};
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
index 1af4c73f7a88..5c73a513f678 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
@@ -576,7 +576,10 @@ static int rcar_mipi_dsi_startup(struct rcar_mipi_dsi *dsi,
udelay(10);
rcar_mipi_dsi_clr(dsi, CLOCKSET1, CLOCKSET1_UPDATEPLL);
- ppisetr = PPISETR_DLEN_3 | PPISETR_CLEN;
+ rcar_mipi_dsi_clr(dsi, TXSETR, TXSETR_LANECNT_MASK);
+ rcar_mipi_dsi_set(dsi, TXSETR, dsi->lanes - 1);
+
+ ppisetr = ((BIT(dsi->lanes) - 1) & PPISETR_DLEN_MASK) | PPISETR_CLEN;
rcar_mipi_dsi_write(dsi, PPISETR, ppisetr);
rcar_mipi_dsi_set(dsi, PHYSETUP, PHYSETUP_SHUTDOWNZ);
@@ -934,9 +937,234 @@ static int rcar_mipi_dsi_host_detach(struct mipi_dsi_host *host,
return 0;
}
+static ssize_t rcar_mipi_dsi_host_tx_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg,
+ bool is_rx_xfer)
+{
+ const bool is_tx_long = mipi_dsi_packet_format_is_long(msg->type);
+ struct rcar_mipi_dsi *dsi = host_to_rcar_mipi_dsi(host);
+ struct mipi_dsi_packet packet;
+ u8 payload[16] = { 0 };
+ u32 status;
+ int ret;
+
+ ret = mipi_dsi_create_packet(&packet, msg);
+ if (ret)
+ return ret;
+
+ /* Configure LP or HS command transfer. */
+ rcar_mipi_dsi_write(dsi, TXCMSETR, (msg->flags & MIPI_DSI_MSG_USE_LPM) ?
+ TXCMSETR_SPDTYP : 0);
+
+ /* Register access mode for RX transfer. */
+ if (is_rx_xfer)
+ rcar_mipi_dsi_write(dsi, RXPSETR, 0);
+
+ /* Do not use IRQ, poll for completion, the completion is quick. */
+ rcar_mipi_dsi_write(dsi, TXCMIER, 0);
+
+ /*
+ * Send the header:
+ * header[0] = Virtual Channel + Data Type
+ * header[1] = Word Count LSB (LP) or first param (SP)
+ * header[2] = Word Count MSB (LP) or second param (SP)
+ */
+ rcar_mipi_dsi_write(dsi, TXCMPHDR,
+ (is_tx_long ? TXCMPHDR_FMT : 0) |
+ TXCMPHDR_VC(msg->channel) |
+ TXCMPHDR_DT(msg->type) |
+ TXCMPHDR_DATA1(packet.header[2]) |
+ TXCMPHDR_DATA0(packet.header[1]));
+
+ if (is_tx_long) {
+ memcpy(payload, packet.payload,
+ min(msg->tx_len, sizeof(payload)));
+
+ rcar_mipi_dsi_write(dsi, TXCMPPD0R,
+ (payload[3] << 24) | (payload[2] << 16) |
+ (payload[1] << 8) | payload[0]);
+ rcar_mipi_dsi_write(dsi, TXCMPPD1R,
+ (payload[7] << 24) | (payload[6] << 16) |
+ (payload[5] << 8) | payload[4]);
+ rcar_mipi_dsi_write(dsi, TXCMPPD2R,
+ (payload[11] << 24) | (payload[10] << 16) |
+ (payload[9] << 8) | payload[8]);
+ rcar_mipi_dsi_write(dsi, TXCMPPD3R,
+ (payload[15] << 24) | (payload[14] << 16) |
+ (payload[13] << 8) | payload[12]);
+ }
+
+ /* Start the transfer, RX with BTA, TX without BTA. */
+ if (is_rx_xfer) {
+ rcar_mipi_dsi_write(dsi, TXCMCR, TXCMCR_BTAREQ);
+
+ /* Wait until the transmission, BTA, reception completed. */
+ ret = read_poll_timeout(rcar_mipi_dsi_read, status,
+ (status & RXPSR_BTAREQEND),
+ 2000, 50000, false, dsi, RXPSR);
+ } else {
+ rcar_mipi_dsi_write(dsi, TXCMCR, TXCMCR_TXREQ);
+
+ /* Wait until the transmission completed. */
+ ret = read_poll_timeout(rcar_mipi_dsi_read, status,
+ (status & TXCMSR_TXREQEND),
+ 2000, 50000, false, dsi, TXCMSR);
+ }
+
+ if (ret < 0) {
+ dev_err(dsi->dev, "Command transfer timeout (0x%08x)\n",
+ status);
+ return ret;
+ }
+
+ return packet.size;
+}
+
+static ssize_t rcar_mipi_dsi_host_rx_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct rcar_mipi_dsi *dsi = host_to_rcar_mipi_dsi(host);
+ u8 *rx_buf = (u8 *)(msg->rx_buf);
+ u32 reg, data, status, wc;
+ int i, ret;
+
+ /* RX transfer received data validation and parsing starts here. */
+ reg = rcar_mipi_dsi_read(dsi, TOSR);
+ if (reg & TOSR_TATO) { /* Turn-Around TimeOut. */
+ /* Clear TATO Turn-Around TimeOut bit. */
+ rcar_mipi_dsi_write(dsi, TOSR, TOSR_TATO);
+ return -ETIMEDOUT;
+ }
+
+ reg = rcar_mipi_dsi_read(dsi, RXPSR);
+
+ if (msg->flags & MIPI_DSI_MSG_REQ_ACK) {
+ /* Transfer with zero-length RX. */
+ if (!(reg & RXPSR_RCVACK)) {
+ /* No ACK on RX response received. */
+ return -EINVAL;
+ }
+ } else {
+ /* Transfer with non-zero-length RX. */
+ if (!(reg & RXPSR_RCVRESP)) {
+ /* No packet header of RX response received. */
+ return -EINVAL;
+ }
+
+ if (reg & (RXPSR_CRCERR | RXPSR_WCERR | RXPSR_AXIERR | RXPSR_OVRERR)) {
+ /* Incorrect response payload. */
+ return -ENODATA;
+ }
+
+ data = rcar_mipi_dsi_read(dsi, RXPHDR);
+ if (data & RXPHDR_FMT) { /* Long Packet Response. */
+ /* Read Long Packet Response length from packet header. */
+ wc = data & 0xffff;
+ if (wc > msg->rx_len) {
+ dev_warn(dsi->dev,
+ "Long Packet Response longer than RX buffer (%d), limited to %zu Bytes\n",
+ wc, msg->rx_len);
+ wc = msg->rx_len;
+ }
+
+ if (wc > 16) {
+ dev_warn(dsi->dev,
+ "Long Packet Response too long (%d), limited to 16 Bytes\n",
+ wc);
+ wc = 16;
+ }
+
+ for (i = 0; i < msg->rx_len; i++) {
+ if (!(i % 4))
+ data = rcar_mipi_dsi_read(dsi, RXPPD0R + i);
+
+ rx_buf[i] = data & 0xff;
+ data >>= 8;
+ }
+ } else { /* Short Packet Response. */
+ if (msg->rx_len >= 1)
+ rx_buf[0] = data & 0xff;
+ if (msg->rx_len >= 2)
+ rx_buf[1] = (data >> 8) & 0xff;
+ if (msg->rx_len >= 3) {
+ dev_warn(dsi->dev,
+ "Expected Short Packet Response too long (%zu), limited to 2 Bytes\n",
+ msg->rx_len);
+ }
+ }
+ }
+
+ if (reg & RXPSR_RCVAKE) {
+ /* Acknowledge and Error report received. */
+ return -EFAULT;
+ }
+
+ /* Wait until the bus handover to host processor completed. */
+ ret = read_poll_timeout(rcar_mipi_dsi_read, status,
+ !(status & PPIDL0SR_DIR),
+ 2000, 50000, false, dsi, PPIDL0SR);
+ if (ret < 0) {
+ dev_err(dsi->dev, "Command RX DIR timeout (0x%08x)\n", status);
+ return ret;
+ }
+
+ /* Wait until the data lane is in LP11 stop state. */
+ ret = read_poll_timeout(rcar_mipi_dsi_read, status,
+ status & PPIDL0SR_STPST,
+ 2000, 50000, false, dsi, PPIDL0SR);
+ if (ret < 0) {
+ dev_err(dsi->dev, "Command RX STPST timeout (0x%08x)\n", status);
+ return ret;
+ }
+
+ return 0;
+}
+
+static ssize_t rcar_mipi_dsi_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ const bool is_rx_xfer = (msg->flags & MIPI_DSI_MSG_REQ_ACK) || msg->rx_len;
+ struct rcar_mipi_dsi *dsi = host_to_rcar_mipi_dsi(host);
+ int ret;
+
+ if (msg->tx_len > 16 || msg->rx_len > 16) {
+ /* ToDo: Implement Memory on AXI bus command mode. */
+ dev_warn(dsi->dev,
+ "Register-based command mode supports only up to 16 Bytes long payload\n");
+ return -EOPNOTSUPP;
+ }
+
+ ret = rcar_mipi_dsi_host_tx_transfer(host, msg, is_rx_xfer);
+
+ /* If TX transfer succeeded and this transfer has RX part. */
+ if (ret >= 0 && is_rx_xfer) {
+ ret = rcar_mipi_dsi_host_rx_transfer(host, msg);
+ if (ret)
+ return ret;
+
+ ret = msg->rx_len;
+ }
+
+ /*
+ * Wait a bit between commands, otherwise panels based on ILI9881C
+ * TCON may fail to correctly receive all commands sent to them.
+ * Until we can actually test with another DSI device, keep the
+ * delay here, but eventually this delay might have to be moved
+ * into the ILI9881C panel driver.
+ */
+ usleep_range(1000, 2000);
+
+ /* Clear the completion interrupt. */
+ if (!msg->rx_len)
+ rcar_mipi_dsi_write(dsi, TXCMSR, TXCMSR_TXREQEND);
+
+ return ret;
+}
+
static const struct mipi_dsi_host_ops rcar_mipi_dsi_host_ops = {
.attach = rcar_mipi_dsi_host_attach,
.detach = rcar_mipi_dsi_host_detach,
+ .transfer = rcar_mipi_dsi_host_transfer
};
/* -----------------------------------------------------------------------------
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h
index a6b276f1d6ee..76521276e2af 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h
@@ -12,6 +12,130 @@
#define LINKSR_LPBUSY (1 << 1)
#define LINKSR_HSBUSY (1 << 0)
+#define TXSETR 0x100
+#define TXSETR_LANECNT_MASK (0x3 << 0)
+
+/*
+ * DSI Command Transfer Registers
+ */
+#define TXCMSETR 0x110
+#define TXCMSETR_SPDTYP (1 << 8) /* 0:HS 1:LP */
+#define TXCMSETR_LPPDACC (1 << 0)
+#define TXCMCR 0x120
+#define TXCMCR_BTATYP (1 << 2)
+#define TXCMCR_BTAREQ (1 << 1)
+#define TXCMCR_TXREQ (1 << 0)
+#define TXCMSR 0x130
+#define TXCMSR_CLSNERR (1 << 18)
+#define TXCMSR_AXIERR (1 << 16)
+#define TXCMSR_TXREQEND (1 << 0)
+#define TXCMSCR 0x134
+#define TXCMSCR_CLSNERR (1 << 18)
+#define TXCMSCR_AXIERR (1 << 16)
+#define TXCMSCR_TXREQEND (1 << 0)
+#define TXCMIER 0x138
+#define TXCMIER_CLSNERR (1 << 18)
+#define TXCMIER_AXIERR (1 << 16)
+#define TXCMIER_TXREQEND (1 << 0)
+#define TXCMADDRSET0R 0x140
+#define TXCMPHDR 0x150
+#define TXCMPHDR_FMT (1 << 24) /* 0:SP 1:LP */
+#define TXCMPHDR_VC(n) (((n) & 0x3) << 22)
+#define TXCMPHDR_DT(n) (((n) & 0x3f) << 16)
+#define TXCMPHDR_DATA1(n) (((n) & 0xff) << 8)
+#define TXCMPHDR_DATA0(n) (((n) & 0xff) << 0)
+#define TXCMPPD0R 0x160
+#define TXCMPPD1R 0x164
+#define TXCMPPD2R 0x168
+#define TXCMPPD3R 0x16c
+
+#define RXSETR 0x200
+#define RXSETR_CRCEN (((n) & 0xf) << 24)
+#define RXSETR_ECCEN (((n) & 0xf) << 16)
+#define RXPSETR 0x210
+#define RXPSETR_LPPDACC (1 << 0)
+#define RXPSR 0x220
+#define RXPSR_ECCERR1B (1 << 28)
+#define RXPSR_UEXTRGERR (1 << 25)
+#define RXPSR_RESPTOERR (1 << 24)
+#define RXPSR_OVRERR (1 << 23)
+#define RXPSR_AXIERR (1 << 22)
+#define RXPSR_CRCERR (1 << 21)
+#define RXPSR_WCERR (1 << 20)
+#define RXPSR_UEXDTERR (1 << 19)
+#define RXPSR_UEXPKTERR (1 << 18)
+#define RXPSR_ECCERR (1 << 17)
+#define RXPSR_MLFERR (1 << 16)
+#define RXPSR_RCVACK (1 << 14)
+#define RXPSR_RCVEOT (1 << 10)
+#define RXPSR_RCVAKE (1 << 9)
+#define RXPSR_RCVRESP (1 << 8)
+#define RXPSR_BTAREQEND (1 << 0)
+#define RXPSCR 0x224
+#define RXPSCR_ECCERR1B (1 << 28)
+#define RXPSCR_UEXTRGERR (1 << 25)
+#define RXPSCR_RESPTOERR (1 << 24)
+#define RXPSCR_OVRERR (1 << 23)
+#define RXPSCR_AXIERR (1 << 22)
+#define RXPSCR_CRCERR (1 << 21)
+#define RXPSCR_WCERR (1 << 20)
+#define RXPSCR_UEXDTERR (1 << 19)
+#define RXPSCR_UEXPKTERR (1 << 18)
+#define RXPSCR_ECCERR (1 << 17)
+#define RXPSCR_MLFERR (1 << 16)
+#define RXPSCR_RCVACK (1 << 14)
+#define RXPSCR_RCVEOT (1 << 10)
+#define RXPSCR_RCVAKE (1 << 9)
+#define RXPSCR_RCVRESP (1 << 8)
+#define RXPSCR_BTAREQEND (1 << 0)
+#define RXPIER 0x228
+#define RXPIER_ECCERR1B (1 << 28)
+#define RXPIER_UEXTRGERR (1 << 25)
+#define RXPIER_RESPTOERR (1 << 24)
+#define RXPIER_OVRERR (1 << 23)
+#define RXPIER_AXIERR (1 << 22)
+#define RXPIER_CRCERR (1 << 21)
+#define RXPIER_WCERR (1 << 20)
+#define RXPIER_UEXDTERR (1 << 19)
+#define RXPIER_UEXPKTERR (1 << 18)
+#define RXPIER_ECCERR (1 << 17)
+#define RXPIER_MLFERR (1 << 16)
+#define RXPIER_RCVACK (1 << 14)
+#define RXPIER_RCVEOT (1 << 10)
+#define RXPIER_RCVAKE (1 << 9)
+#define RXPIER_RCVRESP (1 << 8)
+#define RXPIER_BTAREQEND (1 << 0)
+#define RXPADDRSET0R 0x230
+#define RXPSIZESETR 0x238
+#define RXPSIZESETR_SIZE(n) (((n) & 0xf) << 3)
+#define RXPHDR 0x240
+#define RXPHDR_FMT (1 << 24) /* 0:SP 1:LP */
+#define RXPHDR_VC(n) (((n) & 0x3) << 22)
+#define RXPHDR_DT(n) (((n) & 0x3f) << 16)
+#define RXPHDR_DATA1(n) (((n) & 0xff) << 8)
+#define RXPHDR_DATA0(n) (((n) & 0xff) << 0)
+#define RXPPD0R 0x250
+#define RXPPD1R 0x254
+#define RXPPD2R 0x258
+#define RXPPD3R 0x25c
+#define AKEPR 0x300
+#define AKEPR_VC(n) (((n) & 0x3) << 22)
+#define AKEPR_DT(n) (((n) & 0x3f) << 16)
+#define AKEPR_ERRRPT(n) (((n) & 0xffff) << 0)
+#define RXRESPTOSETR 0x400
+#define TACR 0x500
+#define TASR 0x510
+#define TASCR 0x514
+#define TAIER 0x518
+#define TOSR 0x610
+#define TOSR_TATO (1 << 2)
+#define TOSR_LRXHTO (1 << 1)
+#define TOSR_HRXTO (1 << 0)
+#define TOSCR 0x614
+#define TOSCR_TATO (1 << 2)
+#define TOSCR_LRXHTO (1 << 1)
+#define TOSCR_HRXTO (1 << 0)
+
/*
* Video Mode Register
*/
@@ -80,10 +204,7 @@
* PHY-Protocol Interface (PPI) Registers
*/
#define PPISETR 0x700
-#define PPISETR_DLEN_0 (0x1 << 0)
-#define PPISETR_DLEN_1 (0x3 << 0)
-#define PPISETR_DLEN_2 (0x7 << 0)
-#define PPISETR_DLEN_3 (0xf << 0)
+#define PPISETR_DLEN_MASK (0xf << 0)
#define PPISETR_CLEN (1 << 8)
#define PPICLCR 0x710
@@ -100,6 +221,10 @@
#define PPICLSCR_HSTOLP (1 << 27)
#define PPICLSCR_TOHS (1 << 26)
+#define PPIDL0SR 0x740
+#define PPIDL0SR_DIR (1 << 10)
+#define PPIDL0SR_STPST (1 << 6)
+
#define PPIDLSR 0x760
#define PPIDLSR_STPST (0xf << 0)
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c
index f87337c3cbb5..3b52dfc0ea1e 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c
@@ -913,7 +913,7 @@ static const struct mipi_dsi_host_ops rzg2l_mipi_dsi_host_ops = {
* Power Management
*/
-static int __maybe_unused rzg2l_mipi_pm_runtime_suspend(struct device *dev)
+static int rzg2l_mipi_pm_runtime_suspend(struct device *dev)
{
struct rzg2l_mipi_dsi *dsi = dev_get_drvdata(dev);
@@ -923,7 +923,7 @@ static int __maybe_unused rzg2l_mipi_pm_runtime_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused rzg2l_mipi_pm_runtime_resume(struct device *dev)
+static int rzg2l_mipi_pm_runtime_resume(struct device *dev)
{
struct rzg2l_mipi_dsi *dsi = dev_get_drvdata(dev);
int ret;
@@ -940,7 +940,7 @@ static int __maybe_unused rzg2l_mipi_pm_runtime_resume(struct device *dev)
}
static const struct dev_pm_ops rzg2l_mipi_pm_ops = {
- SET_RUNTIME_PM_OPS(rzg2l_mipi_pm_runtime_suspend, rzg2l_mipi_pm_runtime_resume, NULL)
+ RUNTIME_PM_OPS(rzg2l_mipi_pm_runtime_suspend, rzg2l_mipi_pm_runtime_resume, NULL)
};
/* -----------------------------------------------------------------------------
@@ -1072,7 +1072,7 @@ static struct platform_driver rzg2l_mipi_dsi_platform_driver = {
.remove = rzg2l_mipi_dsi_remove,
.driver = {
.name = "rzg2l-mipi-dsi",
- .pm = &rzg2l_mipi_pm_ops,
+ .pm = pm_ptr(&rzg2l_mipi_pm_ops),
.of_match_table = rzg2l_mipi_dsi_of_table,
},
};
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
index ab525668939a..b7b025814e72 100644
--- a/drivers/gpu/drm/rockchip/Kconfig
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -10,6 +10,7 @@ config DRM_ROCKCHIP
select VIDEOMODE_HELPERS
select DRM_ANALOGIX_DP if ROCKCHIP_ANALOGIX_DP
select DRM_DISPLAY_DP_AUX_BUS if ROCKCHIP_ANALOGIX_DP
+ select DRM_DW_DP if ROCKCHIP_DW_DP
select DRM_DW_HDMI if ROCKCHIP_DW_HDMI
select DRM_DW_HDMI_QP if ROCKCHIP_DW_HDMI_QP
select DRM_DW_MIPI_DSI if ROCKCHIP_DW_MIPI_DSI
@@ -53,6 +54,7 @@ config ROCKCHIP_CDN_DP
bool "Rockchip cdn DP"
depends on EXTCON=y || (EXTCON=m && DRM_ROCKCHIP=m)
select DRM_DISPLAY_HELPER
+ select DRM_BRIDGE_CONNECTOR
select DRM_DISPLAY_DP_HELPER
help
This selects support for Rockchip SoC specific extensions
@@ -60,6 +62,14 @@ config ROCKCHIP_CDN_DP
RK3399 based SoC, you should select this
option.
+config ROCKCHIP_DW_DP
+ bool "Rockchip specific extensions for Synopsys DW DP"
+ help
+ This selects support for Rockchip SoC specific extensions
+ to enable Synopsys DesignWare Cores based DisplayPort transmit
+ controller support on Rockchip SoC, If you want to enable DP on
+ rk3588 based SoC, you should select this option.
+
config ROCKCHIP_DW_HDMI
bool "Rockchip specific extensions for Synopsys DW HDMI"
help
diff --git a/drivers/gpu/drm/rockchip/Makefile b/drivers/gpu/drm/rockchip/Makefile
index 2b867cebbc12..097f062399c7 100644
--- a/drivers/gpu/drm/rockchip/Makefile
+++ b/drivers/gpu/drm/rockchip/Makefile
@@ -14,6 +14,7 @@ rockchipdrm-$(CONFIG_ROCKCHIP_DW_HDMI) += dw_hdmi-rockchip.o
rockchipdrm-$(CONFIG_ROCKCHIP_DW_HDMI_QP) += dw_hdmi_qp-rockchip.o
rockchipdrm-$(CONFIG_ROCKCHIP_DW_MIPI_DSI) += dw-mipi-dsi-rockchip.o
rockchipdrm-$(CONFIG_ROCKCHIP_DW_MIPI_DSI2) += dw-mipi-dsi2-rockchip.o
+rockchipdrm-$(CONFIG_ROCKCHIP_DW_DP) += dw_dp-rockchip.o
rockchipdrm-$(CONFIG_ROCKCHIP_INNO_HDMI) += inno_hdmi.o
rockchipdrm-$(CONFIG_ROCKCHIP_LVDS) += rockchip_lvds.o
rockchipdrm-$(CONFIG_ROCKCHIP_RGB) += rockchip_rgb.o
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
index 3398160ad75e..5523911b990d 100644
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
@@ -7,6 +7,7 @@
*/
#include <linux/clk.h>
+#include <linux/hw_bitfield.h>
#include <linux/iopoll.h>
#include <linux/math64.h>
#include <linux/mfd/syscon.h>
@@ -148,7 +149,7 @@
#define DW_MIPI_NEEDS_GRF_CLK BIT(1)
#define PX30_GRF_PD_VO_CON1 0x0438
-#define PX30_DSI_FORCETXSTOPMODE (0xf << 7)
+#define PX30_DSI_FORCETXSTOPMODE (0xfUL << 7)
#define PX30_DSI_FORCERXMODE BIT(6)
#define PX30_DSI_TURNDISABLE BIT(5)
#define PX30_DSI_LCDC_SEL BIT(0)
@@ -167,16 +168,16 @@
#define RK3399_DSI1_LCDC_SEL BIT(4)
#define RK3399_GRF_SOC_CON22 0x6258
-#define RK3399_DSI0_TURNREQUEST (0xf << 12)
-#define RK3399_DSI0_TURNDISABLE (0xf << 8)
-#define RK3399_DSI0_FORCETXSTOPMODE (0xf << 4)
-#define RK3399_DSI0_FORCERXMODE (0xf << 0)
+#define RK3399_DSI0_TURNREQUEST (0xfUL << 12)
+#define RK3399_DSI0_TURNDISABLE (0xfUL << 8)
+#define RK3399_DSI0_FORCETXSTOPMODE (0xfUL << 4)
+#define RK3399_DSI0_FORCERXMODE (0xfUL << 0)
#define RK3399_GRF_SOC_CON23 0x625c
-#define RK3399_DSI1_TURNDISABLE (0xf << 12)
-#define RK3399_DSI1_FORCETXSTOPMODE (0xf << 8)
-#define RK3399_DSI1_FORCERXMODE (0xf << 4)
-#define RK3399_DSI1_ENABLE (0xf << 0)
+#define RK3399_DSI1_TURNDISABLE (0xfUL << 12)
+#define RK3399_DSI1_FORCETXSTOPMODE (0xfUL << 8)
+#define RK3399_DSI1_FORCERXMODE (0xfUL << 4)
+#define RK3399_DSI1_ENABLE (0xfUL << 0)
#define RK3399_GRF_SOC_CON24 0x6260
#define RK3399_TXRX_MASTERSLAVEZ BIT(7)
@@ -186,8 +187,8 @@
#define RK3399_TXRX_TURNREQUEST GENMASK(3, 0)
#define RK3568_GRF_VO_CON2 0x0368
-#define RK3568_DSI0_SKEWCALHS (0x1f << 11)
-#define RK3568_DSI0_FORCETXSTOPMODE (0xf << 4)
+#define RK3568_DSI0_SKEWCALHS (0x1fUL << 11)
+#define RK3568_DSI0_FORCETXSTOPMODE (0xfUL << 4)
#define RK3568_DSI0_TURNDISABLE BIT(2)
#define RK3568_DSI0_FORCERXMODE BIT(0)
@@ -197,18 +198,16 @@
* come from. Name GRF_VO_CON3 is assumed.
*/
#define RK3568_GRF_VO_CON3 0x36c
-#define RK3568_DSI1_SKEWCALHS (0x1f << 11)
-#define RK3568_DSI1_FORCETXSTOPMODE (0xf << 4)
+#define RK3568_DSI1_SKEWCALHS (0x1fUL << 11)
+#define RK3568_DSI1_FORCETXSTOPMODE (0xfUL << 4)
#define RK3568_DSI1_TURNDISABLE BIT(2)
#define RK3568_DSI1_FORCERXMODE BIT(0)
#define RV1126_GRF_DSIPHY_CON 0x10220
-#define RV1126_DSI_FORCETXSTOPMODE (0xf << 4)
+#define RV1126_DSI_FORCETXSTOPMODE (0xfUL << 4)
#define RV1126_DSI_TURNDISABLE BIT(2)
#define RV1126_DSI_FORCERXMODE BIT(0)
-#define HIWORD_UPDATE(val, mask) (val | (mask) << 16)
-
enum {
DW_DSI_USAGE_IDLE,
DW_DSI_USAGE_DSI,
@@ -1484,14 +1483,13 @@ static const struct rockchip_dw_dsi_chip_data px30_chip_data[] = {
{
.reg = 0xff450000,
.lcdsel_grf_reg = PX30_GRF_PD_VO_CON1,
- .lcdsel_big = HIWORD_UPDATE(0, PX30_DSI_LCDC_SEL),
- .lcdsel_lit = HIWORD_UPDATE(PX30_DSI_LCDC_SEL,
- PX30_DSI_LCDC_SEL),
+ .lcdsel_big = FIELD_PREP_WM16_CONST(PX30_DSI_LCDC_SEL, 0),
+ .lcdsel_lit = FIELD_PREP_WM16_CONST(PX30_DSI_LCDC_SEL, 1),
.lanecfg1_grf_reg = PX30_GRF_PD_VO_CON1,
- .lanecfg1 = HIWORD_UPDATE(0, PX30_DSI_TURNDISABLE |
- PX30_DSI_FORCERXMODE |
- PX30_DSI_FORCETXSTOPMODE),
+ .lanecfg1 = FIELD_PREP_WM16_CONST((PX30_DSI_TURNDISABLE |
+ PX30_DSI_FORCERXMODE |
+ PX30_DSI_FORCETXSTOPMODE), 0),
.max_data_lanes = 4,
},
@@ -1502,9 +1500,9 @@ static const struct rockchip_dw_dsi_chip_data rk3128_chip_data[] = {
{
.reg = 0x10110000,
.lanecfg1_grf_reg = RK3128_GRF_LVDS_CON0,
- .lanecfg1 = HIWORD_UPDATE(0, RK3128_DSI_TURNDISABLE |
- RK3128_DSI_FORCERXMODE |
- RK3128_DSI_FORCETXSTOPMODE),
+ .lanecfg1 = FIELD_PREP_WM16_CONST((RK3128_DSI_TURNDISABLE |
+ RK3128_DSI_FORCERXMODE |
+ RK3128_DSI_FORCETXSTOPMODE), 0),
.max_data_lanes = 4,
},
{ /* sentinel */ }
@@ -1514,16 +1512,16 @@ static const struct rockchip_dw_dsi_chip_data rk3288_chip_data[] = {
{
.reg = 0xff960000,
.lcdsel_grf_reg = RK3288_GRF_SOC_CON6,
- .lcdsel_big = HIWORD_UPDATE(0, RK3288_DSI0_LCDC_SEL),
- .lcdsel_lit = HIWORD_UPDATE(RK3288_DSI0_LCDC_SEL, RK3288_DSI0_LCDC_SEL),
+ .lcdsel_big = FIELD_PREP_WM16_CONST(RK3288_DSI0_LCDC_SEL, 0),
+ .lcdsel_lit = FIELD_PREP_WM16_CONST(RK3288_DSI0_LCDC_SEL, 1),
.max_data_lanes = 4,
},
{
.reg = 0xff964000,
.lcdsel_grf_reg = RK3288_GRF_SOC_CON6,
- .lcdsel_big = HIWORD_UPDATE(0, RK3288_DSI1_LCDC_SEL),
- .lcdsel_lit = HIWORD_UPDATE(RK3288_DSI1_LCDC_SEL, RK3288_DSI1_LCDC_SEL),
+ .lcdsel_big = FIELD_PREP_WM16_CONST(RK3288_DSI1_LCDC_SEL, 0),
+ .lcdsel_lit = FIELD_PREP_WM16_CONST(RK3288_DSI1_LCDC_SEL, 1),
.max_data_lanes = 4,
},
@@ -1539,13 +1537,13 @@ static int rk3399_dphy_tx1rx1_init(struct phy *phy)
* Assume ISP0 is supplied by the RX0 dphy.
*/
regmap_write(dsi->grf_regmap, RK3399_GRF_SOC_CON24,
- HIWORD_UPDATE(0, RK3399_TXRX_SRC_SEL_ISP0));
+ FIELD_PREP_WM16(RK3399_TXRX_SRC_SEL_ISP0, 0));
regmap_write(dsi->grf_regmap, RK3399_GRF_SOC_CON24,
- HIWORD_UPDATE(0, RK3399_TXRX_MASTERSLAVEZ));
+ FIELD_PREP_WM16(RK3399_TXRX_MASTERSLAVEZ, 0));
regmap_write(dsi->grf_regmap, RK3399_GRF_SOC_CON24,
- HIWORD_UPDATE(0, RK3399_TXRX_BASEDIR));
+ FIELD_PREP_WM16(RK3399_TXRX_BASEDIR, 0));
regmap_write(dsi->grf_regmap, RK3399_GRF_SOC_CON23,
- HIWORD_UPDATE(0, RK3399_DSI1_ENABLE));
+ FIELD_PREP_WM16(RK3399_DSI1_ENABLE, 0));
return 0;
}
@@ -1559,21 +1557,20 @@ static int rk3399_dphy_tx1rx1_power_on(struct phy *phy)
usleep_range(100, 150);
regmap_write(dsi->grf_regmap, RK3399_GRF_SOC_CON24,
- HIWORD_UPDATE(0, RK3399_TXRX_MASTERSLAVEZ));
+ FIELD_PREP_WM16(RK3399_TXRX_MASTERSLAVEZ, 0));
regmap_write(dsi->grf_regmap, RK3399_GRF_SOC_CON24,
- HIWORD_UPDATE(RK3399_TXRX_BASEDIR, RK3399_TXRX_BASEDIR));
+ FIELD_PREP_WM16(RK3399_TXRX_BASEDIR, 1));
regmap_write(dsi->grf_regmap, RK3399_GRF_SOC_CON23,
- HIWORD_UPDATE(0, RK3399_DSI1_FORCERXMODE));
+ FIELD_PREP_WM16(RK3399_DSI1_FORCERXMODE, 0));
regmap_write(dsi->grf_regmap, RK3399_GRF_SOC_CON23,
- HIWORD_UPDATE(0, RK3399_DSI1_FORCETXSTOPMODE));
+ FIELD_PREP_WM16(RK3399_DSI1_FORCETXSTOPMODE, 0));
/* Disable lane turn around, which is ignored in receive mode */
regmap_write(dsi->grf_regmap, RK3399_GRF_SOC_CON24,
- HIWORD_UPDATE(0, RK3399_TXRX_TURNREQUEST));
+ FIELD_PREP_WM16(RK3399_TXRX_TURNREQUEST, 0));
regmap_write(dsi->grf_regmap, RK3399_GRF_SOC_CON23,
- HIWORD_UPDATE(RK3399_DSI1_TURNDISABLE,
- RK3399_DSI1_TURNDISABLE));
+ FIELD_PREP_WM16(RK3399_DSI1_TURNDISABLE, 0xf));
usleep_range(100, 150);
dsi_write(dsi, DSI_PHY_TST_CTRL0, PHY_TESTCLK | PHY_UNTESTCLR);
@@ -1581,8 +1578,8 @@ static int rk3399_dphy_tx1rx1_power_on(struct phy *phy)
/* Enable dphy lanes */
regmap_write(dsi->grf_regmap, RK3399_GRF_SOC_CON23,
- HIWORD_UPDATE(GENMASK(dsi->dphy_config.lanes - 1, 0),
- RK3399_DSI1_ENABLE));
+ FIELD_PREP_WM16(RK3399_DSI1_ENABLE,
+ GENMASK(dsi->dphy_config.lanes - 1, 0)));
usleep_range(100, 150);
@@ -1594,7 +1591,7 @@ static int rk3399_dphy_tx1rx1_power_off(struct phy *phy)
struct dw_mipi_dsi_rockchip *dsi = phy_get_drvdata(phy);
regmap_write(dsi->grf_regmap, RK3399_GRF_SOC_CON23,
- HIWORD_UPDATE(0, RK3399_DSI1_ENABLE));
+ FIELD_PREP_WM16(RK3399_DSI1_ENABLE, 0));
return 0;
}
@@ -1603,15 +1600,14 @@ static const struct rockchip_dw_dsi_chip_data rk3399_chip_data[] = {
{
.reg = 0xff960000,
.lcdsel_grf_reg = RK3399_GRF_SOC_CON20,
- .lcdsel_big = HIWORD_UPDATE(0, RK3399_DSI0_LCDC_SEL),
- .lcdsel_lit = HIWORD_UPDATE(RK3399_DSI0_LCDC_SEL,
- RK3399_DSI0_LCDC_SEL),
+ .lcdsel_big = FIELD_PREP_WM16_CONST(RK3399_DSI0_LCDC_SEL, 0),
+ .lcdsel_lit = FIELD_PREP_WM16_CONST(RK3399_DSI0_LCDC_SEL, 1),
.lanecfg1_grf_reg = RK3399_GRF_SOC_CON22,
- .lanecfg1 = HIWORD_UPDATE(0, RK3399_DSI0_TURNREQUEST |
- RK3399_DSI0_TURNDISABLE |
- RK3399_DSI0_FORCETXSTOPMODE |
- RK3399_DSI0_FORCERXMODE),
+ .lanecfg1 = FIELD_PREP_WM16_CONST((RK3399_DSI0_TURNREQUEST |
+ RK3399_DSI0_TURNDISABLE |
+ RK3399_DSI0_FORCETXSTOPMODE |
+ RK3399_DSI0_FORCERXMODE), 0),
.flags = DW_MIPI_NEEDS_PHY_CFG_CLK | DW_MIPI_NEEDS_GRF_CLK,
.max_data_lanes = 4,
@@ -1619,25 +1615,23 @@ static const struct rockchip_dw_dsi_chip_data rk3399_chip_data[] = {
{
.reg = 0xff968000,
.lcdsel_grf_reg = RK3399_GRF_SOC_CON20,
- .lcdsel_big = HIWORD_UPDATE(0, RK3399_DSI1_LCDC_SEL),
- .lcdsel_lit = HIWORD_UPDATE(RK3399_DSI1_LCDC_SEL,
- RK3399_DSI1_LCDC_SEL),
+ .lcdsel_big = FIELD_PREP_WM16_CONST(RK3399_DSI1_LCDC_SEL, 0),
+ .lcdsel_lit = FIELD_PREP_WM16_CONST(RK3399_DSI1_LCDC_SEL, 1),
+
.lanecfg1_grf_reg = RK3399_GRF_SOC_CON23,
- .lanecfg1 = HIWORD_UPDATE(0, RK3399_DSI1_TURNDISABLE |
- RK3399_DSI1_FORCETXSTOPMODE |
- RK3399_DSI1_FORCERXMODE |
- RK3399_DSI1_ENABLE),
+ .lanecfg1 = FIELD_PREP_WM16_CONST((RK3399_DSI1_TURNDISABLE |
+ RK3399_DSI1_FORCETXSTOPMODE |
+ RK3399_DSI1_FORCERXMODE |
+ RK3399_DSI1_ENABLE), 0),
.lanecfg2_grf_reg = RK3399_GRF_SOC_CON24,
- .lanecfg2 = HIWORD_UPDATE(RK3399_TXRX_MASTERSLAVEZ |
- RK3399_TXRX_ENABLECLK,
- RK3399_TXRX_MASTERSLAVEZ |
- RK3399_TXRX_ENABLECLK |
- RK3399_TXRX_BASEDIR),
+ .lanecfg2 = (FIELD_PREP_WM16_CONST(RK3399_TXRX_MASTERSLAVEZ, 1) |
+ FIELD_PREP_WM16_CONST(RK3399_TXRX_ENABLECLK, 1) |
+ FIELD_PREP_WM16_CONST(RK3399_TXRX_BASEDIR, 0)),
.enable_grf_reg = RK3399_GRF_SOC_CON23,
- .enable = HIWORD_UPDATE(RK3399_DSI1_ENABLE, RK3399_DSI1_ENABLE),
+ .enable = FIELD_PREP_WM16_CONST(RK3399_DSI1_ENABLE, RK3399_DSI1_ENABLE),
.flags = DW_MIPI_NEEDS_PHY_CFG_CLK | DW_MIPI_NEEDS_GRF_CLK,
.max_data_lanes = 4,
@@ -1653,19 +1647,19 @@ static const struct rockchip_dw_dsi_chip_data rk3568_chip_data[] = {
{
.reg = 0xfe060000,
.lanecfg1_grf_reg = RK3568_GRF_VO_CON2,
- .lanecfg1 = HIWORD_UPDATE(0, RK3568_DSI0_SKEWCALHS |
- RK3568_DSI0_FORCETXSTOPMODE |
- RK3568_DSI0_TURNDISABLE |
- RK3568_DSI0_FORCERXMODE),
+ .lanecfg1 = (FIELD_PREP_WM16_CONST(RK3568_DSI0_SKEWCALHS, 0) |
+ FIELD_PREP_WM16_CONST(RK3568_DSI0_FORCETXSTOPMODE, 0) |
+ FIELD_PREP_WM16_CONST(RK3568_DSI0_TURNDISABLE, 0) |
+ FIELD_PREP_WM16_CONST(RK3568_DSI0_FORCERXMODE, 0)),
.max_data_lanes = 4,
},
{
.reg = 0xfe070000,
.lanecfg1_grf_reg = RK3568_GRF_VO_CON3,
- .lanecfg1 = HIWORD_UPDATE(0, RK3568_DSI1_SKEWCALHS |
- RK3568_DSI1_FORCETXSTOPMODE |
- RK3568_DSI1_TURNDISABLE |
- RK3568_DSI1_FORCERXMODE),
+ .lanecfg1 = (FIELD_PREP_WM16_CONST(RK3568_DSI1_SKEWCALHS, 0) |
+ FIELD_PREP_WM16_CONST(RK3568_DSI1_FORCETXSTOPMODE, 0) |
+ FIELD_PREP_WM16_CONST(RK3568_DSI1_TURNDISABLE, 0) |
+ FIELD_PREP_WM16_CONST(RK3568_DSI1_FORCERXMODE, 0)),
.max_data_lanes = 4,
},
{ /* sentinel */ }
@@ -1675,9 +1669,9 @@ static const struct rockchip_dw_dsi_chip_data rv1126_chip_data[] = {
{
.reg = 0xffb30000,
.lanecfg1_grf_reg = RV1126_GRF_DSIPHY_CON,
- .lanecfg1 = HIWORD_UPDATE(0, RV1126_DSI_TURNDISABLE |
- RV1126_DSI_FORCERXMODE |
- RV1126_DSI_FORCETXSTOPMODE),
+ .lanecfg1 = (FIELD_PREP_WM16_CONST(RV1126_DSI_TURNDISABLE, 0) |
+ FIELD_PREP_WM16_CONST(RV1126_DSI_FORCERXMODE, 0) |
+ FIELD_PREP_WM16_CONST(RV1126_DSI_FORCETXSTOPMODE, 0)),
.max_data_lanes = 4,
},
{ /* sentinel */ }
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi2-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi2-rockchip.c
index cdd490778756..0aea764e29b2 100644
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi2-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi2-rockchip.c
@@ -437,6 +437,15 @@ static void dw_mipi_dsi2_rockchip_remove(struct platform_device *pdev)
dw_mipi_dsi2_remove(dsi2->dmd);
}
+static const struct dsigrf_reg rk3576_dsi_grf_reg_fields[MAX_FIELDS] = {
+ [TXREQCLKHS_EN] = { 0x0028, 1, 1 },
+ [GATING_EN] = { 0x0028, 0, 0 },
+ [IPI_SHUTDN] = { 0x0028, 3, 3 },
+ [IPI_COLORM] = { 0x0028, 2, 2 },
+ [IPI_COLOR_DEPTH] = { 0x0028, 8, 11 },
+ [IPI_FORMAT] = { 0x0028, 4, 7 },
+};
+
static const struct dsigrf_reg rk3588_dsi0_grf_reg_fields[MAX_FIELDS] = {
[TXREQCLKHS_EN] = { 0x0000, 11, 11 },
[GATING_EN] = { 0x0000, 10, 10 },
@@ -455,6 +464,15 @@ static const struct dsigrf_reg rk3588_dsi1_grf_reg_fields[MAX_FIELDS] = {
[IPI_FORMAT] = { 0x0004, 0, 3 },
};
+static const struct rockchip_dw_dsi2_chip_data rk3576_chip_data[] = {
+ {
+ .reg = 0x27d80000,
+ .grf_regs = rk3576_dsi_grf_reg_fields,
+ .max_bit_rate_per_lane = 2500000ULL,
+ },
+ { /* sentinel */ }
+};
+
static const struct rockchip_dw_dsi2_chip_data rk3588_chip_data[] = {
{
.reg = 0xfde20000,
@@ -470,6 +488,9 @@ static const struct rockchip_dw_dsi2_chip_data rk3588_chip_data[] = {
static const struct of_device_id dw_mipi_dsi2_rockchip_dt_ids[] = {
{
+ .compatible = "rockchip,rk3576-mipi-dsi2",
+ .data = &rk3576_chip_data,
+ }, {
.compatible = "rockchip,rk3588-mipi-dsi2",
.data = &rk3588_chip_data,
},
diff --git a/drivers/gpu/drm/rockchip/dw_dp-rockchip.c b/drivers/gpu/drm/rockchip/dw_dp-rockchip.c
new file mode 100644
index 000000000000..25ab4e46301e
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/dw_dp-rockchip.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 Rockchip Electronics Co., Ltd.
+ *
+ * Author: Zhang Yubing <yubing.zhang@rock-chips.com>
+ * Author: Andy Yan <andy.yan@rock-chips.com>
+ */
+
+#include <linux/component.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <drm/bridge/dw_dp.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
+#include <drm/drm_of.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
+
+#include <linux/media-bus-format.h>
+#include <linux/videodev2.h>
+
+#include "rockchip_drm_drv.h"
+#include "rockchip_drm_vop.h"
+
+struct rockchip_dw_dp {
+ struct dw_dp *base;
+ struct device *dev;
+ struct rockchip_encoder encoder;
+};
+
+static int dw_dp_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
+ struct drm_atomic_state *state = conn_state->state;
+ struct drm_display_info *di = &conn_state->connector->display_info;
+ struct drm_bridge *bridge = drm_bridge_chain_get_first_bridge(encoder);
+ struct drm_bridge_state *bridge_state = drm_atomic_get_new_bridge_state(state, bridge);
+ u32 bus_format = bridge_state->input_bus_cfg.format;
+
+ switch (bus_format) {
+ case MEDIA_BUS_FMT_UYYVYY10_0_5X30:
+ case MEDIA_BUS_FMT_UYYVYY8_0_5X24:
+ s->output_mode = ROCKCHIP_OUT_MODE_YUV420;
+ break;
+ case MEDIA_BUS_FMT_YUYV10_1X20:
+ case MEDIA_BUS_FMT_YUYV8_1X16:
+ s->output_mode = ROCKCHIP_OUT_MODE_S888_DUMMY;
+ break;
+ case MEDIA_BUS_FMT_RGB101010_1X30:
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ case MEDIA_BUS_FMT_RGB666_1X24_CPADHI:
+ case MEDIA_BUS_FMT_YUV10_1X30:
+ case MEDIA_BUS_FMT_YUV8_1X24:
+ default:
+ s->output_mode = ROCKCHIP_OUT_MODE_AAAA;
+ break;
+ }
+
+ s->output_type = DRM_MODE_CONNECTOR_DisplayPort;
+ s->bus_format = bus_format;
+ s->bus_flags = di->bus_flags;
+ s->color_space = V4L2_COLORSPACE_DEFAULT;
+
+ return 0;
+}
+
+static const struct drm_encoder_helper_funcs dw_dp_encoder_helper_funcs = {
+ .atomic_check = dw_dp_encoder_atomic_check,
+};
+
+static int dw_dp_rockchip_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dw_dp_plat_data plat_data;
+ struct drm_device *drm_dev = data;
+ struct rockchip_dw_dp *dp;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ int ret;
+
+ dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
+ if (!dp)
+ return -ENOMEM;
+
+ dp->dev = dev;
+ platform_set_drvdata(pdev, dp);
+
+ plat_data.max_link_rate = 810000;
+ encoder = &dp->encoder.encoder;
+ encoder->possible_crtcs = drm_of_find_possible_crtcs(drm_dev, dev->of_node);
+ rockchip_drm_encoder_set_crtc_endpoint_id(&dp->encoder, dev->of_node, 0, 0);
+
+ ret = drmm_encoder_init(drm_dev, encoder, NULL, DRM_MODE_ENCODER_TMDS, NULL);
+ if (ret)
+ return ret;
+ drm_encoder_helper_add(encoder, &dw_dp_encoder_helper_funcs);
+
+ dp->base = dw_dp_bind(dev, encoder, &plat_data);
+ if (IS_ERR(dp->base)) {
+ ret = PTR_ERR(dp->base);
+ return ret;
+ }
+
+ connector = drm_bridge_connector_init(drm_dev, encoder);
+ if (IS_ERR(connector)) {
+ ret = PTR_ERR(connector);
+ return dev_err_probe(dev, ret, "Failed to init bridge connector");
+ }
+
+ drm_connector_attach_encoder(connector, encoder);
+
+ return 0;
+}
+
+static const struct component_ops dw_dp_rockchip_component_ops = {
+ .bind = dw_dp_rockchip_bind,
+};
+
+static int dw_dp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ return component_add(dev, &dw_dp_rockchip_component_ops);
+}
+
+static void dw_dp_remove(struct platform_device *pdev)
+{
+ struct rockchip_dw_dp *dp = platform_get_drvdata(pdev);
+
+ component_del(dp->dev, &dw_dp_rockchip_component_ops);
+}
+
+static const struct of_device_id dw_dp_of_match[] = {
+ { .compatible = "rockchip,rk3588-dp", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dw_dp_of_match);
+
+struct platform_driver dw_dp_driver = {
+ .probe = dw_dp_probe,
+ .remove = dw_dp_remove,
+ .driver = {
+ .name = "dw-dp",
+ .of_match_table = dw_dp_of_match,
+ },
+};
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index acb59b25d928..7b613997bb50 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -4,6 +4,7 @@
*/
#include <linux/clk.h>
+#include <linux/hw_bitfield.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -54,8 +55,6 @@
#define RK3568_HDMI_SDAIN_MSK BIT(15)
#define RK3568_HDMI_SCLIN_MSK BIT(14)
-#define HIWORD_UPDATE(val, mask) (val | (mask) << 16)
-
/**
* struct rockchip_hdmi_chip_data - splite the grf setting of kind of chips
* @lcdsel_grf_reg: grf register offset of lcdc select
@@ -355,17 +354,14 @@ static void dw_hdmi_rk3228_setup_hpd(struct dw_hdmi *dw_hdmi, void *data)
dw_hdmi_phy_setup_hpd(dw_hdmi, data);
- regmap_write(hdmi->regmap,
- RK3228_GRF_SOC_CON6,
- HIWORD_UPDATE(RK3228_HDMI_HPD_VSEL | RK3228_HDMI_SDA_VSEL |
- RK3228_HDMI_SCL_VSEL,
- RK3228_HDMI_HPD_VSEL | RK3228_HDMI_SDA_VSEL |
- RK3228_HDMI_SCL_VSEL));
-
- regmap_write(hdmi->regmap,
- RK3228_GRF_SOC_CON2,
- HIWORD_UPDATE(RK3228_HDMI_SDAIN_MSK | RK3228_HDMI_SCLIN_MSK,
- RK3228_HDMI_SDAIN_MSK | RK3228_HDMI_SCLIN_MSK));
+ regmap_write(hdmi->regmap, RK3228_GRF_SOC_CON6,
+ FIELD_PREP_WM16(RK3228_HDMI_HPD_VSEL, 1) |
+ FIELD_PREP_WM16(RK3228_HDMI_SDA_VSEL, 1) |
+ FIELD_PREP_WM16(RK3228_HDMI_SCL_VSEL, 1));
+
+ regmap_write(hdmi->regmap, RK3228_GRF_SOC_CON2,
+ FIELD_PREP_WM16(RK3228_HDMI_SDAIN_MSK, 1) |
+ FIELD_PREP_WM16(RK3328_HDMI_SCLIN_MSK, 1));
}
static enum drm_connector_status
@@ -377,15 +373,13 @@ dw_hdmi_rk3328_read_hpd(struct dw_hdmi *dw_hdmi, void *data)
status = dw_hdmi_phy_read_hpd(dw_hdmi, data);
if (status == connector_status_connected)
- regmap_write(hdmi->regmap,
- RK3328_GRF_SOC_CON4,
- HIWORD_UPDATE(RK3328_HDMI_SDA_5V | RK3328_HDMI_SCL_5V,
- RK3328_HDMI_SDA_5V | RK3328_HDMI_SCL_5V));
+ regmap_write(hdmi->regmap, RK3328_GRF_SOC_CON4,
+ FIELD_PREP_WM16(RK3328_HDMI_SDA_5V, 1) |
+ FIELD_PREP_WM16(RK3328_HDMI_SCL_5V, 1));
else
- regmap_write(hdmi->regmap,
- RK3328_GRF_SOC_CON4,
- HIWORD_UPDATE(0, RK3328_HDMI_SDA_5V |
- RK3328_HDMI_SCL_5V));
+ regmap_write(hdmi->regmap, RK3328_GRF_SOC_CON4,
+ FIELD_PREP_WM16(RK3328_HDMI_SDA_5V, 0) |
+ FIELD_PREP_WM16(RK3328_HDMI_SCL_5V, 0));
return status;
}
@@ -396,21 +390,21 @@ static void dw_hdmi_rk3328_setup_hpd(struct dw_hdmi *dw_hdmi, void *data)
dw_hdmi_phy_setup_hpd(dw_hdmi, data);
/* Enable and map pins to 3V grf-controlled io-voltage */
- regmap_write(hdmi->regmap,
- RK3328_GRF_SOC_CON4,
- HIWORD_UPDATE(0, RK3328_HDMI_HPD_SARADC | RK3328_HDMI_CEC_5V |
- RK3328_HDMI_SDA_5V | RK3328_HDMI_SCL_5V |
- RK3328_HDMI_HPD_5V));
- regmap_write(hdmi->regmap,
- RK3328_GRF_SOC_CON3,
- HIWORD_UPDATE(0, RK3328_HDMI_SDA5V_GRF | RK3328_HDMI_SCL5V_GRF |
- RK3328_HDMI_HPD5V_GRF |
- RK3328_HDMI_CEC5V_GRF));
- regmap_write(hdmi->regmap,
- RK3328_GRF_SOC_CON2,
- HIWORD_UPDATE(RK3328_HDMI_SDAIN_MSK | RK3328_HDMI_SCLIN_MSK,
- RK3328_HDMI_SDAIN_MSK | RK3328_HDMI_SCLIN_MSK |
- RK3328_HDMI_HPD_IOE));
+ regmap_write(hdmi->regmap, RK3328_GRF_SOC_CON4,
+ FIELD_PREP_WM16(RK3328_HDMI_HPD_SARADC, 0) |
+ FIELD_PREP_WM16(RK3328_HDMI_CEC_5V, 0) |
+ FIELD_PREP_WM16(RK3328_HDMI_SDA_5V, 0) |
+ FIELD_PREP_WM16(RK3328_HDMI_SCL_5V, 0) |
+ FIELD_PREP_WM16(RK3328_HDMI_HPD_5V, 0));
+ regmap_write(hdmi->regmap, RK3328_GRF_SOC_CON3,
+ FIELD_PREP_WM16(RK3328_HDMI_SDA5V_GRF, 0) |
+ FIELD_PREP_WM16(RK3328_HDMI_SCL5V_GRF, 0) |
+ FIELD_PREP_WM16(RK3328_HDMI_HPD5V_GRF, 0) |
+ FIELD_PREP_WM16(RK3328_HDMI_CEC5V_GRF, 0));
+ regmap_write(hdmi->regmap, RK3328_GRF_SOC_CON2,
+ FIELD_PREP_WM16(RK3328_HDMI_SDAIN_MSK, 1) |
+ FIELD_PREP_WM16(RK3328_HDMI_SCLIN_MSK, 1) |
+ FIELD_PREP_WM16(RK3328_HDMI_HPD_IOE, 0));
dw_hdmi_rk3328_read_hpd(dw_hdmi, data);
}
@@ -438,8 +432,8 @@ static const struct dw_hdmi_plat_data rk3228_hdmi_drv_data = {
static struct rockchip_hdmi_chip_data rk3288_chip_data = {
.lcdsel_grf_reg = RK3288_GRF_SOC_CON6,
- .lcdsel_big = HIWORD_UPDATE(0, RK3288_HDMI_LCDC_SEL),
- .lcdsel_lit = HIWORD_UPDATE(RK3288_HDMI_LCDC_SEL, RK3288_HDMI_LCDC_SEL),
+ .lcdsel_big = FIELD_PREP_WM16_CONST(RK3288_HDMI_LCDC_SEL, 0),
+ .lcdsel_lit = FIELD_PREP_WM16_CONST(RK3288_HDMI_LCDC_SEL, 1),
.max_tmds_clock = 340000,
};
@@ -475,8 +469,8 @@ static const struct dw_hdmi_plat_data rk3328_hdmi_drv_data = {
static struct rockchip_hdmi_chip_data rk3399_chip_data = {
.lcdsel_grf_reg = RK3399_GRF_SOC_CON20,
- .lcdsel_big = HIWORD_UPDATE(0, RK3399_HDMI_LCDC_SEL),
- .lcdsel_lit = HIWORD_UPDATE(RK3399_HDMI_LCDC_SEL, RK3399_HDMI_LCDC_SEL),
+ .lcdsel_big = FIELD_PREP_WM16_CONST(RK3399_HDMI_LCDC_SEL, 0),
+ .lcdsel_lit = FIELD_PREP_WM16_CONST(RK3399_HDMI_LCDC_SEL, 1),
.max_tmds_clock = 594000,
};
@@ -589,10 +583,8 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
if (hdmi->chip_data == &rk3568_chip_data) {
regmap_write(hdmi->regmap, RK3568_GRF_VO_CON1,
- HIWORD_UPDATE(RK3568_HDMI_SDAIN_MSK |
- RK3568_HDMI_SCLIN_MSK,
- RK3568_HDMI_SDAIN_MSK |
- RK3568_HDMI_SCLIN_MSK));
+ FIELD_PREP_WM16(RK3568_HDMI_SDAIN_MSK, 1) |
+ FIELD_PREP_WM16(RK3568_HDMI_SCLIN_MSK, 1));
}
drm_encoder_helper_add(encoder, &dw_hdmi_rockchip_encoder_helper_funcs);
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c
index 7d531b6f4c09..ed6e8f036f4b 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c
@@ -9,6 +9,7 @@
#include <linux/clk.h>
#include <linux/gpio/consumer.h>
+#include <linux/hw_bitfield.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -66,7 +67,8 @@
#define RK3588_HDMI1_HPD_INT_MSK BIT(15)
#define RK3588_HDMI1_HPD_INT_CLR BIT(14)
#define RK3588_GRF_SOC_CON7 0x031c
-#define RK3588_SET_HPD_PATH_MASK GENMASK(13, 12)
+#define RK3588_HPD_HDMI0_IO_EN_MASK BIT(12)
+#define RK3588_HPD_HDMI1_IO_EN_MASK BIT(13)
#define RK3588_GRF_SOC_STATUS1 0x0384
#define RK3588_HDMI0_LEVEL_INT BIT(16)
#define RK3588_HDMI1_LEVEL_INT BIT(24)
@@ -80,7 +82,6 @@
#define RK3588_HDMI0_GRANT_SEL BIT(10)
#define RK3588_HDMI1_GRANT_SEL BIT(12)
-#define HIWORD_UPDATE(val, mask) ((val) | (mask) << 16)
#define HOTPLUG_DEBOUNCE_MS 150
#define MAX_HDMI_PORT_NUM 2
@@ -185,11 +186,11 @@ static void dw_hdmi_qp_rk3588_setup_hpd(struct dw_hdmi_qp *dw_hdmi, void *data)
u32 val;
if (hdmi->port_id)
- val = HIWORD_UPDATE(RK3588_HDMI1_HPD_INT_CLR,
- RK3588_HDMI1_HPD_INT_CLR | RK3588_HDMI1_HPD_INT_MSK);
+ val = (FIELD_PREP_WM16(RK3588_HDMI1_HPD_INT_CLR, 1) |
+ FIELD_PREP_WM16(RK3588_HDMI1_HPD_INT_MSK, 0));
else
- val = HIWORD_UPDATE(RK3588_HDMI0_HPD_INT_CLR,
- RK3588_HDMI0_HPD_INT_CLR | RK3588_HDMI0_HPD_INT_MSK);
+ val = (FIELD_PREP_WM16(RK3588_HDMI0_HPD_INT_CLR, 1) |
+ FIELD_PREP_WM16(RK3588_HDMI0_HPD_INT_MSK, 0));
regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON2, val);
}
@@ -218,8 +219,8 @@ static void dw_hdmi_qp_rk3576_setup_hpd(struct dw_hdmi_qp *dw_hdmi, void *data)
struct rockchip_hdmi_qp *hdmi = (struct rockchip_hdmi_qp *)data;
u32 val;
- val = HIWORD_UPDATE(RK3576_HDMI_HPD_INT_CLR,
- RK3576_HDMI_HPD_INT_CLR | RK3576_HDMI_HPD_INT_MSK);
+ val = (FIELD_PREP_WM16(RK3576_HDMI_HPD_INT_CLR, 1) |
+ FIELD_PREP_WM16(RK3576_HDMI_HPD_INT_MSK, 0));
regmap_write(hdmi->regmap, RK3576_IOC_MISC_CON0, val);
regmap_write(hdmi->regmap, 0xa404, 0xffff0102);
@@ -254,7 +255,7 @@ static irqreturn_t dw_hdmi_qp_rk3576_hardirq(int irq, void *dev_id)
regmap_read(hdmi->regmap, RK3576_IOC_HDMI_HPD_STATUS, &intr_stat);
if (intr_stat) {
- val = HIWORD_UPDATE(RK3576_HDMI_HPD_INT_MSK, RK3576_HDMI_HPD_INT_MSK);
+ val = FIELD_PREP_WM16(RK3576_HDMI_HPD_INT_MSK, 1);
regmap_write(hdmi->regmap, RK3576_IOC_MISC_CON0, val);
return IRQ_WAKE_THREAD;
@@ -273,12 +274,12 @@ static irqreturn_t dw_hdmi_qp_rk3576_irq(int irq, void *dev_id)
if (!intr_stat)
return IRQ_NONE;
- val = HIWORD_UPDATE(RK3576_HDMI_HPD_INT_CLR, RK3576_HDMI_HPD_INT_CLR);
+ val = FIELD_PREP_WM16(RK3576_HDMI_HPD_INT_CLR, 1);
regmap_write(hdmi->regmap, RK3576_IOC_MISC_CON0, val);
mod_delayed_work(system_wq, &hdmi->hpd_work,
msecs_to_jiffies(HOTPLUG_DEBOUNCE_MS));
- val = HIWORD_UPDATE(0, RK3576_HDMI_HPD_INT_MSK);
+ val = FIELD_PREP_WM16(RK3576_HDMI_HPD_INT_MSK, 0);
regmap_write(hdmi->regmap, RK3576_IOC_MISC_CON0, val);
return IRQ_HANDLED;
@@ -293,11 +294,9 @@ static irqreturn_t dw_hdmi_qp_rk3588_hardirq(int irq, void *dev_id)
if (intr_stat) {
if (hdmi->port_id)
- val = HIWORD_UPDATE(RK3588_HDMI1_HPD_INT_MSK,
- RK3588_HDMI1_HPD_INT_MSK);
+ val = FIELD_PREP_WM16(RK3588_HDMI1_HPD_INT_MSK, 1);
else
- val = HIWORD_UPDATE(RK3588_HDMI0_HPD_INT_MSK,
- RK3588_HDMI0_HPD_INT_MSK);
+ val = FIELD_PREP_WM16(RK3588_HDMI0_HPD_INT_MSK, 1);
regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON2, val);
return IRQ_WAKE_THREAD;
}
@@ -315,20 +314,18 @@ static irqreturn_t dw_hdmi_qp_rk3588_irq(int irq, void *dev_id)
return IRQ_NONE;
if (hdmi->port_id)
- val = HIWORD_UPDATE(RK3588_HDMI1_HPD_INT_CLR,
- RK3588_HDMI1_HPD_INT_CLR);
+ val = FIELD_PREP_WM16(RK3588_HDMI1_HPD_INT_CLR, 1);
else
- val = HIWORD_UPDATE(RK3588_HDMI0_HPD_INT_CLR,
- RK3588_HDMI0_HPD_INT_CLR);
+ val = FIELD_PREP_WM16(RK3588_HDMI0_HPD_INT_CLR, 1);
regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON2, val);
mod_delayed_work(system_wq, &hdmi->hpd_work,
msecs_to_jiffies(HOTPLUG_DEBOUNCE_MS));
if (hdmi->port_id)
- val |= HIWORD_UPDATE(0, RK3588_HDMI1_HPD_INT_MSK);
+ val |= FIELD_PREP_WM16(RK3588_HDMI1_HPD_INT_MSK, 0);
else
- val |= HIWORD_UPDATE(0, RK3588_HDMI0_HPD_INT_MSK);
+ val |= FIELD_PREP_WM16(RK3588_HDMI0_HPD_INT_MSK, 0);
regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON2, val);
return IRQ_HANDLED;
@@ -338,14 +335,14 @@ static void dw_hdmi_qp_rk3576_io_init(struct rockchip_hdmi_qp *hdmi)
{
u32 val;
- val = HIWORD_UPDATE(RK3576_SCLIN_MASK, RK3576_SCLIN_MASK) |
- HIWORD_UPDATE(RK3576_SDAIN_MASK, RK3576_SDAIN_MASK) |
- HIWORD_UPDATE(RK3576_HDMI_GRANT_SEL, RK3576_HDMI_GRANT_SEL) |
- HIWORD_UPDATE(RK3576_I2S_SEL_MASK, RK3576_I2S_SEL_MASK);
+ val = FIELD_PREP_WM16(RK3576_SCLIN_MASK, 1) |
+ FIELD_PREP_WM16(RK3576_SDAIN_MASK, 1) |
+ FIELD_PREP_WM16(RK3576_HDMI_GRANT_SEL, 1) |
+ FIELD_PREP_WM16(RK3576_I2S_SEL_MASK, 1);
regmap_write(hdmi->vo_regmap, RK3576_VO0_GRF_SOC_CON14, val);
- val = HIWORD_UPDATE(0, RK3576_HDMI_HPD_INT_MSK);
+ val = FIELD_PREP_WM16(RK3576_HDMI_HPD_INT_MSK, 0);
regmap_write(hdmi->regmap, RK3576_IOC_MISC_CON0, val);
}
@@ -353,27 +350,28 @@ static void dw_hdmi_qp_rk3588_io_init(struct rockchip_hdmi_qp *hdmi)
{
u32 val;
- val = HIWORD_UPDATE(RK3588_SCLIN_MASK, RK3588_SCLIN_MASK) |
- HIWORD_UPDATE(RK3588_SDAIN_MASK, RK3588_SDAIN_MASK) |
- HIWORD_UPDATE(RK3588_MODE_MASK, RK3588_MODE_MASK) |
- HIWORD_UPDATE(RK3588_I2S_SEL_MASK, RK3588_I2S_SEL_MASK);
+ val = FIELD_PREP_WM16(RK3588_SCLIN_MASK, 1) |
+ FIELD_PREP_WM16(RK3588_SDAIN_MASK, 1) |
+ FIELD_PREP_WM16(RK3588_MODE_MASK, 1) |
+ FIELD_PREP_WM16(RK3588_I2S_SEL_MASK, 1);
regmap_write(hdmi->vo_regmap,
hdmi->port_id ? RK3588_GRF_VO1_CON6 : RK3588_GRF_VO1_CON3,
val);
- val = HIWORD_UPDATE(RK3588_SET_HPD_PATH_MASK, RK3588_SET_HPD_PATH_MASK);
+ val = FIELD_PREP_WM16(RK3588_HPD_HDMI0_IO_EN_MASK, 1) |
+ FIELD_PREP_WM16(RK3588_HPD_HDMI1_IO_EN_MASK, 1);
regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON7, val);
if (hdmi->port_id)
- val = HIWORD_UPDATE(RK3588_HDMI1_GRANT_SEL, RK3588_HDMI1_GRANT_SEL);
+ val = FIELD_PREP_WM16(RK3588_HDMI1_GRANT_SEL, 1);
else
- val = HIWORD_UPDATE(RK3588_HDMI0_GRANT_SEL, RK3588_HDMI0_GRANT_SEL);
+ val = FIELD_PREP_WM16(RK3588_HDMI0_GRANT_SEL, 1);
regmap_write(hdmi->vo_regmap, RK3588_GRF_VO1_CON9, val);
if (hdmi->port_id)
- val = HIWORD_UPDATE(RK3588_HDMI1_HPD_INT_MSK, RK3588_HDMI1_HPD_INT_MSK);
+ val = FIELD_PREP_WM16(RK3588_HDMI1_HPD_INT_MSK, 1);
else
- val = HIWORD_UPDATE(RK3588_HDMI0_HPD_INT_MSK, RK3588_HDMI0_HPD_INT_MSK);
+ val = FIELD_PREP_WM16(RK3588_HDMI0_HPD_INT_MSK, 1);
regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON2, val);
}
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c
index 1ab3ad4bde9e..f24827dc1421 100644
--- a/drivers/gpu/drm/rockchip/inno_hdmi.c
+++ b/drivers/gpu/drm/rockchip/inno_hdmi.c
@@ -10,6 +10,7 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/hdmi.h>
+#include <linux/hw_bitfield.h>
#include <linux/mfd/syscon.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
@@ -382,8 +383,6 @@ enum {
#define HDMI_CEC_BUSFREETIME_H 0xdd
#define HDMI_CEC_LOGICADDR 0xde
-#define HIWORD_UPDATE(val, mask) ((val) | (mask) << 16)
-
#define RK3036_GRF_SOC_CON2 0x148
#define RK3036_HDMI_PHSYNC BIT(4)
#define RK3036_HDMI_PVSYNC BIT(5)
@@ -756,10 +755,10 @@ static int inno_hdmi_config_video_timing(struct inno_hdmi *hdmi,
int value, psync;
if (hdmi->variant->dev_type == RK3036_HDMI) {
- psync = mode->flags & DRM_MODE_FLAG_PHSYNC ? RK3036_HDMI_PHSYNC : 0;
- value = HIWORD_UPDATE(psync, RK3036_HDMI_PHSYNC);
- psync = mode->flags & DRM_MODE_FLAG_PVSYNC ? RK3036_HDMI_PVSYNC : 0;
- value |= HIWORD_UPDATE(psync, RK3036_HDMI_PVSYNC);
+ psync = mode->flags & DRM_MODE_FLAG_PHSYNC ? 1 : 0;
+ value = FIELD_PREP_WM16(RK3036_HDMI_PHSYNC, psync);
+ psync = mode->flags & DRM_MODE_FLAG_PVSYNC ? 1 : 0;
+ value |= FIELD_PREP_WM16(RK3036_HDMI_PVSYNC, psync);
regmap_write(hdmi->grf, RK3036_GRF_SOC_CON2, value);
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index 180fad5d49ad..eb77bde9f628 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -529,6 +529,7 @@ static int __init rockchip_drm_init(void)
ADD_ROCKCHIP_SUB_DRIVER(rockchip_dp_driver,
CONFIG_ROCKCHIP_ANALOGIX_DP);
ADD_ROCKCHIP_SUB_DRIVER(cdn_dp_driver, CONFIG_ROCKCHIP_CDN_DP);
+ ADD_ROCKCHIP_SUB_DRIVER(dw_dp_driver, CONFIG_ROCKCHIP_DW_DP);
ADD_ROCKCHIP_SUB_DRIVER(dw_hdmi_rockchip_pltfm_driver,
CONFIG_ROCKCHIP_DW_HDMI);
ADD_ROCKCHIP_SUB_DRIVER(dw_hdmi_qp_rockchip_pltfm_driver,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
index c183e82a42a5..2e86ad00979c 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
@@ -87,6 +87,7 @@ int rockchip_drm_encoder_set_crtc_endpoint_id(struct rockchip_encoder *rencoder,
struct device_node *np, int port, int reg);
int rockchip_drm_endpoint_is_subdriver(struct device_node *ep);
extern struct platform_driver cdn_dp_driver;
+extern struct platform_driver dw_dp_driver;
extern struct platform_driver dw_hdmi_rockchip_pltfm_driver;
extern struct platform_driver dw_hdmi_qp_rockchip_pltfm_driver;
extern struct platform_driver dw_mipi_dsi_rockchip_driver;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
index 186f6452a7d3..b50927a824b4 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
@@ -2579,12 +2579,13 @@ static int vop2_win_init(struct vop2 *vop2)
}
/*
- * The window registers are only updated when config done is written.
- * Until that they read back the old value. As we read-modify-write
- * these registers mark them as non-volatile. This makes sure we read
- * the new values from the regmap register cache.
+ * The window and video port registers are only updated when config
+ * done is written. Until that they read back the old value. As we
+ * read-modify-write these registers mark them as non-volatile. This
+ * makes sure we read the new values from the regmap register cache.
*/
static const struct regmap_range vop2_nonvolatile_range[] = {
+ regmap_reg_range(RK3568_VP0_CTRL_BASE, RK3588_VP3_CTRL_BASE + 255),
regmap_reg_range(0x1000, 0x23ff),
};
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
index fa5c56f16047..9124191899ba 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
@@ -33,7 +33,6 @@
#define WIN_FEATURE_AFBDC BIT(0)
#define WIN_FEATURE_CLUSTER BIT(1)
-#define HIWORD_UPDATE(v, h, l) ((GENMASK(h, l) << 16) | ((v) << (l)))
/*
* the delay number of a window in different mode.
*/
diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.h b/drivers/gpu/drm/rockchip/rockchip_lvds.h
index ca83d7b6bea7..2d92447d819b 100644
--- a/drivers/gpu/drm/rockchip/rockchip_lvds.h
+++ b/drivers/gpu/drm/rockchip/rockchip_lvds.h
@@ -9,6 +9,9 @@
#ifndef _ROCKCHIP_LVDS_
#define _ROCKCHIP_LVDS_
+#include <linux/bits.h>
+#include <linux/hw_bitfield.h>
+
#define RK3288_LVDS_CH0_REG0 0x00
#define RK3288_LVDS_CH0_REG0_LVDS_EN BIT(7)
#define RK3288_LVDS_CH0_REG0_TTL_EN BIT(6)
@@ -106,18 +109,16 @@
#define LVDS_VESA_18 2
#define LVDS_JEIDA_18 3
-#define HIWORD_UPDATE(v, h, l) ((GENMASK(h, l) << 16) | ((v) << (l)))
-
#define PX30_LVDS_GRF_PD_VO_CON0 0x434
-#define PX30_LVDS_TIE_CLKS(val) HIWORD_UPDATE(val, 8, 8)
-#define PX30_LVDS_INVERT_CLKS(val) HIWORD_UPDATE(val, 9, 9)
-#define PX30_LVDS_INVERT_DCLK(val) HIWORD_UPDATE(val, 5, 5)
+#define PX30_LVDS_TIE_CLKS(val) FIELD_PREP_WM16(BIT(8), (val))
+#define PX30_LVDS_INVERT_CLKS(val) FIELD_PREP_WM16(BIT(9), (val))
+#define PX30_LVDS_INVERT_DCLK(val) FIELD_PREP_WM16(BIT(5), (val))
#define PX30_LVDS_GRF_PD_VO_CON1 0x438
-#define PX30_LVDS_FORMAT(val) HIWORD_UPDATE(val, 14, 13)
-#define PX30_LVDS_MODE_EN(val) HIWORD_UPDATE(val, 12, 12)
-#define PX30_LVDS_MSBSEL(val) HIWORD_UPDATE(val, 11, 11)
-#define PX30_LVDS_P2S_EN(val) HIWORD_UPDATE(val, 6, 6)
-#define PX30_LVDS_VOP_SEL(val) HIWORD_UPDATE(val, 1, 1)
+#define PX30_LVDS_FORMAT(val) FIELD_PREP_WM16(GENMASK(14, 13), (val))
+#define PX30_LVDS_MODE_EN(val) FIELD_PREP_WM16(BIT(12), (val))
+#define PX30_LVDS_MSBSEL(val) FIELD_PREP_WM16(BIT(11), (val))
+#define PX30_LVDS_P2S_EN(val) FIELD_PREP_WM16(BIT(6), (val))
+#define PX30_LVDS_VOP_SEL(val) FIELD_PREP_WM16(BIT(1), (val))
#endif /* _ROCKCHIP_LVDS_ */
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
index 45c5e3987813..38c49030c7ab 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
@@ -7,6 +7,7 @@
#include <linux/bitfield.h>
#include <linux/kernel.h>
#include <linux/component.h>
+#include <linux/hw_bitfield.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/of.h>
@@ -1695,8 +1696,9 @@ static unsigned long rk3588_set_intf_mux(struct vop2_video_port *vp, int id, u32
die |= RK3588_SYS_DSP_INFACE_EN_HDMI0 |
FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_EDP_HDMI0_MUX, vp->id);
val = rk3588_get_hdmi_pol(polflags);
- regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, HIWORD_UPDATE(1, 1, 1));
- regmap_write(vop2->vo1_grf, RK3588_GRF_VO1_CON0, HIWORD_UPDATE(val, 6, 5));
+ regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, FIELD_PREP_WM16(BIT(1), 1));
+ regmap_write(vop2->vo1_grf, RK3588_GRF_VO1_CON0,
+ FIELD_PREP_WM16(GENMASK(6, 5), val));
break;
case ROCKCHIP_VOP2_EP_HDMI1:
div &= ~RK3588_DSP_IF_EDP_HDMI1_DCLK_DIV;
@@ -1707,8 +1709,9 @@ static unsigned long rk3588_set_intf_mux(struct vop2_video_port *vp, int id, u32
die |= RK3588_SYS_DSP_INFACE_EN_HDMI1 |
FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_EDP_HDMI1_MUX, vp->id);
val = rk3588_get_hdmi_pol(polflags);
- regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, HIWORD_UPDATE(1, 4, 4));
- regmap_write(vop2->vo1_grf, RK3588_GRF_VO1_CON0, HIWORD_UPDATE(val, 8, 7));
+ regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, FIELD_PREP_WM16(BIT(4), 1));
+ regmap_write(vop2->vo1_grf, RK3588_GRF_VO1_CON0,
+ FIELD_PREP_WM16(GENMASK(8, 7), val));
break;
case ROCKCHIP_VOP2_EP_EDP0:
div &= ~RK3588_DSP_IF_EDP_HDMI0_DCLK_DIV;
@@ -1718,7 +1721,7 @@ static unsigned long rk3588_set_intf_mux(struct vop2_video_port *vp, int id, u32
die &= ~RK3588_SYS_DSP_INFACE_EN_EDP_HDMI0_MUX;
die |= RK3588_SYS_DSP_INFACE_EN_EDP0 |
FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_EDP_HDMI0_MUX, vp->id);
- regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, HIWORD_UPDATE(1, 0, 0));
+ regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, FIELD_PREP_WM16(BIT(0), 1));
break;
case ROCKCHIP_VOP2_EP_EDP1:
div &= ~RK3588_DSP_IF_EDP_HDMI1_DCLK_DIV;
@@ -1728,7 +1731,7 @@ static unsigned long rk3588_set_intf_mux(struct vop2_video_port *vp, int id, u32
die &= ~RK3588_SYS_DSP_INFACE_EN_EDP_HDMI1_MUX;
die |= RK3588_SYS_DSP_INFACE_EN_EDP1 |
FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_EDP_HDMI1_MUX, vp->id);
- regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, HIWORD_UPDATE(1, 3, 3));
+ regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, FIELD_PREP_WM16(BIT(3), 1));
break;
case ROCKCHIP_VOP2_EP_MIPI0:
div &= ~RK3588_DSP_IF_MIPI0_PCLK_DIV;
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 8867b95ab089..5a4697f636f2 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -285,9 +285,9 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
return 0;
sched = entity->rq->sched;
- /**
- * The client will not queue more IBs during this fini, consume existing
- * queued IBs or discard them on SIGKILL
+ /*
+ * The client will not queue more jobs during this fini - consume
+ * existing queued ones, or discard them on SIGKILL.
*/
if (current->flags & PF_EXITING) {
if (timeout)
@@ -300,7 +300,7 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
drm_sched_entity_is_idle(entity));
}
- /* For killed process disable any more IBs enqueue right now */
+ /* For a killed process disallow further enqueueing of jobs. */
last_user = cmpxchg(&entity->last_user, current->group_leader, NULL);
if ((!last_user || last_user == current->group_leader) &&
(current->flags & PF_EXITING) && (current->exit_code == SIGKILL))
@@ -324,9 +324,9 @@ EXPORT_SYMBOL(drm_sched_entity_flush);
void drm_sched_entity_fini(struct drm_sched_entity *entity)
{
/*
- * If consumption of existing IBs wasn't completed. Forcefully remove
- * them here. Also makes sure that the scheduler won't touch this entity
- * any more.
+ * If consumption of existing jobs wasn't completed forcefully remove
+ * them. Also makes sure that the scheduler won't touch this entity any
+ * more.
*/
drm_sched_entity_kill(entity);
@@ -391,7 +391,8 @@ EXPORT_SYMBOL(drm_sched_entity_set_priority);
* Add a callback to the current dependency of the entity to wake up the
* scheduler when the entity becomes available.
*/
-static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
+static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity,
+ struct drm_sched_job *sched_job)
{
struct drm_gpu_scheduler *sched = entity->rq->sched;
struct dma_fence *fence = entity->dependency;
@@ -421,6 +422,10 @@ static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
entity->dependency = fence;
}
+ if (trace_drm_sched_job_unschedulable_enabled() &&
+ !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &entity->dependency->flags))
+ trace_drm_sched_job_unschedulable(sched_job, entity->dependency);
+
if (!dma_fence_add_callback(entity->dependency, &entity->cb,
drm_sched_entity_wakeup))
return true;
@@ -461,10 +466,8 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
while ((entity->dependency =
drm_sched_job_dependency(sched_job, entity))) {
- if (drm_sched_entity_add_dependency_cb(entity)) {
- trace_drm_sched_job_unschedulable(sched_job, entity->dependency);
+ if (drm_sched_entity_add_dependency_cb(entity, sched_job))
return NULL;
- }
}
/* skip jobs from entity that marked guilty */
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index e2cda28a1af4..46119aacb809 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -349,37 +349,16 @@ static void drm_sched_run_job_queue(struct drm_gpu_scheduler *sched)
}
/**
- * __drm_sched_run_free_queue - enqueue free-job work
+ * drm_sched_run_free_queue - enqueue free-job work
* @sched: scheduler instance
*/
-static void __drm_sched_run_free_queue(struct drm_gpu_scheduler *sched)
+static void drm_sched_run_free_queue(struct drm_gpu_scheduler *sched)
{
if (!READ_ONCE(sched->pause_submit))
queue_work(sched->submit_wq, &sched->work_free_job);
}
/**
- * drm_sched_run_free_queue - enqueue free-job work if ready
- * @sched: scheduler instance
- */
-static void drm_sched_run_free_queue(struct drm_gpu_scheduler *sched)
-{
- struct drm_sched_job *job;
-
- job = list_first_entry_or_null(&sched->pending_list,
- struct drm_sched_job, list);
- if (job && dma_fence_is_signaled(&job->s_fence->finished))
- __drm_sched_run_free_queue(sched);
-}
-
-static void drm_sched_run_free_queue_unlocked(struct drm_gpu_scheduler *sched)
-{
- spin_lock(&sched->job_list_lock);
- drm_sched_run_free_queue(sched);
- spin_unlock(&sched->job_list_lock);
-}
-
-/**
* drm_sched_job_done - complete a job
* @s_job: pointer to the job which is done
*
@@ -398,7 +377,7 @@ static void drm_sched_job_done(struct drm_sched_job *s_job, int result)
dma_fence_get(&s_fence->finished);
drm_sched_fence_finished(s_fence, result);
dma_fence_put(&s_fence->finished);
- __drm_sched_run_free_queue(sched);
+ drm_sched_run_free_queue(sched);
}
/**
@@ -1134,12 +1113,16 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched)
* drm_sched_get_finished_job - fetch the next finished job to be destroyed
*
* @sched: scheduler instance
+ * @have_more: are there more finished jobs on the list
+ *
+ * Informs the caller through @have_more whether there are more finished jobs
+ * besides the returned one.
*
* Returns the next finished job from the pending list (if there is one)
* ready for it to be destroyed.
*/
static struct drm_sched_job *
-drm_sched_get_finished_job(struct drm_gpu_scheduler *sched)
+drm_sched_get_finished_job(struct drm_gpu_scheduler *sched, bool *have_more)
{
struct drm_sched_job *job, *next;
@@ -1147,22 +1130,25 @@ drm_sched_get_finished_job(struct drm_gpu_scheduler *sched)
job = list_first_entry_or_null(&sched->pending_list,
struct drm_sched_job, list);
-
if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
/* remove job from pending_list */
list_del_init(&job->list);
/* cancel this job's TO timer */
cancel_delayed_work(&sched->work_tdr);
- /* make the scheduled timestamp more accurate */
+
+ *have_more = false;
next = list_first_entry_or_null(&sched->pending_list,
typeof(*next), list);
-
if (next) {
+ /* make the scheduled timestamp more accurate */
if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT,
&next->s_fence->scheduled.flags))
next->s_fence->scheduled.timestamp =
dma_fence_timestamp(&job->s_fence->finished);
+
+ *have_more = dma_fence_is_signaled(&next->s_fence->finished);
+
/* start TO timer for next job */
drm_sched_start_timeout(sched);
}
@@ -1221,12 +1207,15 @@ static void drm_sched_free_job_work(struct work_struct *w)
struct drm_gpu_scheduler *sched =
container_of(w, struct drm_gpu_scheduler, work_free_job);
struct drm_sched_job *job;
+ bool have_more;
- job = drm_sched_get_finished_job(sched);
- if (job)
+ job = drm_sched_get_finished_job(sched, &have_more);
+ if (job) {
sched->ops->free_job(job);
+ if (have_more)
+ drm_sched_run_free_queue(sched);
+ }
- drm_sched_run_free_queue_unlocked(sched);
drm_sched_run_job_queue(sched);
}
@@ -1435,6 +1424,22 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched)
* Prevents reinsertion and marks job_queue as idle,
* it will be removed from the rq in drm_sched_entity_fini()
* eventually
+ *
+ * FIXME:
+ * This lacks the proper spin_lock(&s_entity->lock) and
+ * is, therefore, a race condition. Most notably, it
+ * can race with drm_sched_entity_push_job(). The lock
+ * cannot be taken here, however, because this would
+ * lead to lock inversion -> deadlock.
+ *
+ * The best solution probably is to enforce the life
+ * time rule of all entities having to be torn down
+ * before their scheduler. Then, however, locking could
+ * be dropped alltogether from this function.
+ *
+ * For now, this remains a potential race in all
+ * drivers that keep entities alive for longer than
+ * the scheduler.
*/
s_entity->stopped = true;
spin_unlock(&rq->lock);
diff --git a/drivers/gpu/drm/scheduler/tests/mock_scheduler.c b/drivers/gpu/drm/scheduler/tests/mock_scheduler.c
index 65acffc3fea8..8e9ae7d980eb 100644
--- a/drivers/gpu/drm/scheduler/tests/mock_scheduler.c
+++ b/drivers/gpu/drm/scheduler/tests/mock_scheduler.c
@@ -219,7 +219,7 @@ mock_sched_timedout_job(struct drm_sched_job *sched_job)
unsigned long flags;
if (job->flags & DRM_MOCK_SCHED_JOB_DONT_RESET) {
- job->flags &= ~DRM_MOCK_SCHED_JOB_DONT_RESET;
+ job->flags |= DRM_MOCK_SCHED_JOB_RESET_SKIPPED;
return DRM_GPU_SCHED_STAT_NO_HANG;
}
diff --git a/drivers/gpu/drm/scheduler/tests/sched_tests.h b/drivers/gpu/drm/scheduler/tests/sched_tests.h
index 63d4f2ac7074..7f31d35780cc 100644
--- a/drivers/gpu/drm/scheduler/tests/sched_tests.h
+++ b/drivers/gpu/drm/scheduler/tests/sched_tests.h
@@ -11,7 +11,6 @@
#include <linux/hrtimer.h>
#include <linux/ktime.h>
#include <linux/list.h>
-#include <linux/atomic.h>
#include <linux/mutex.h>
#include <linux/types.h>
@@ -95,9 +94,10 @@ struct drm_mock_sched_job {
struct completion done;
-#define DRM_MOCK_SCHED_JOB_DONE 0x1
-#define DRM_MOCK_SCHED_JOB_TIMEDOUT 0x2
-#define DRM_MOCK_SCHED_JOB_DONT_RESET 0x4
+#define DRM_MOCK_SCHED_JOB_DONE 0x1
+#define DRM_MOCK_SCHED_JOB_TIMEDOUT 0x2
+#define DRM_MOCK_SCHED_JOB_DONT_RESET 0x4
+#define DRM_MOCK_SCHED_JOB_RESET_SKIPPED 0x8
unsigned long flags;
struct list_head link;
diff --git a/drivers/gpu/drm/scheduler/tests/tests_basic.c b/drivers/gpu/drm/scheduler/tests/tests_basic.c
index 55eb142bd7c5..82a41a456b0a 100644
--- a/drivers/gpu/drm/scheduler/tests/tests_basic.c
+++ b/drivers/gpu/drm/scheduler/tests/tests_basic.c
@@ -317,8 +317,8 @@ static void drm_sched_skip_reset(struct kunit *test)
KUNIT_ASSERT_FALSE(test, done);
KUNIT_ASSERT_EQ(test,
- job->flags & DRM_MOCK_SCHED_JOB_DONT_RESET,
- 0);
+ job->flags & DRM_MOCK_SCHED_JOB_RESET_SKIPPED,
+ DRM_MOCK_SCHED_JOB_RESET_SKIPPED);
i = drm_mock_sched_advance(sched, 1);
KUNIT_ASSERT_EQ(test, i, 1);
diff --git a/drivers/gpu/drm/sitronix/st7571-i2c.c b/drivers/gpu/drm/sitronix/st7571-i2c.c
index 453eb7e045e5..a6c4a6738ded 100644
--- a/drivers/gpu/drm/sitronix/st7571-i2c.c
+++ b/drivers/gpu/drm/sitronix/st7571-i2c.c
@@ -151,6 +151,7 @@ struct st7571_device {
bool ignore_nak;
bool grayscale;
+ bool inverted;
u32 height_mm;
u32 width_mm;
u32 startline;
@@ -218,10 +219,11 @@ static int st7571_send_command_list(struct st7571_device *st7571,
return ret;
}
-static inline u8 st7571_transform_xy(const char *p, int x, int y)
+static inline u8 st7571_transform_xy(const char *p, int x, int y, u8 bpp)
{
int xrest = x % 8;
u8 result = 0;
+ u8 row_len = 16 * bpp;
/*
* Transforms an (x, y) pixel coordinate into a vertical 8-bit
@@ -236,7 +238,7 @@ static inline u8 st7571_transform_xy(const char *p, int x, int y)
for (int i = 0; i < 8; i++) {
int row_idx = y + i;
- u8 byte = p[row_idx * 16 + x];
+ u8 byte = p[row_idx * row_len + x];
u8 bit = (byte >> xrest) & 1;
result |= (bit << i);
@@ -303,11 +305,11 @@ static void st7571_prepare_buffer_grayscale(struct st7571_device *st7571,
struct iosys_map dst;
switch (fb->format->format) {
- case DRM_FORMAT_XRGB8888: /* Only support XRGB8888 in monochrome mode */
- dst_pitch = DIV_ROUND_UP(drm_rect_width(rect), 8);
+ case DRM_FORMAT_XRGB8888:
+ dst_pitch = DIV_ROUND_UP(drm_rect_width(rect), 4);
iosys_map_set_vaddr(&dst, st7571->hwbuf);
- drm_fb_xrgb8888_to_mono(&dst, &dst_pitch, vmap, fb, rect, fmtcnv_state);
+ drm_fb_xrgb8888_to_gray2(&dst, &dst_pitch, vmap, fb, rect, fmtcnv_state);
break;
case DRM_FORMAT_R1:
@@ -333,7 +335,7 @@ static int st7571_fb_update_rect_monochrome(struct drm_framebuffer *fb, struct d
for (int y = rect->y1; y < rect->y2; y += ST7571_PAGE_HEIGHT) {
for (int x = rect->x1; x < rect->x2; x++)
- row[x] = st7571_transform_xy(st7571->hwbuf, x, y);
+ row[x] = st7571_transform_xy(st7571->hwbuf, x, y, 1);
st7571_set_position(st7571, rect->x1, y);
@@ -358,14 +360,13 @@ static int st7571_fb_update_rect_grayscale(struct drm_framebuffer *fb, struct dr
rect->y2 = min_t(unsigned int, round_up(rect->y2, ST7571_PAGE_HEIGHT), st7571->nlines);
switch (format) {
- case DRM_FORMAT_XRGB8888:
- /* Threated as monochrome (R1) */
- fallthrough;
case DRM_FORMAT_R1:
- x1 = rect->x1;
- x2 = rect->x2;
+ x1 = rect->x1 * 1;
+ x2 = rect->x2 * 1;
break;
case DRM_FORMAT_R2:
+ fallthrough;
+ case DRM_FORMAT_XRGB8888:
x1 = rect->x1 * 2;
x2 = rect->x2 * 2;
break;
@@ -373,7 +374,7 @@ static int st7571_fb_update_rect_grayscale(struct drm_framebuffer *fb, struct dr
for (int y = rect->y1; y < rect->y2; y += ST7571_PAGE_HEIGHT) {
for (int x = x1; x < x2; x++)
- row[x] = st7571_transform_xy(st7571->hwbuf, x, y);
+ row[x] = st7571_transform_xy(st7571->hwbuf, x, y, 2);
st7571_set_position(st7571, rect->x1, y);
@@ -386,15 +387,15 @@ static int st7571_fb_update_rect_grayscale(struct drm_framebuffer *fb, struct dr
* even if the format is monochrome.
*
* The bit values maps to the following grayscale:
- * 0 0 = White
- * 0 1 = Light gray
- * 1 0 = Dark gray
- * 1 1 = Black
+ * 0 0 = Black
+ * 0 1 = Dark gray
+ * 1 0 = Light gray
+ * 1 1 = White
*
* For monochrome formats, write the same value twice to get
* either a black or white pixel.
*/
- if (format == DRM_FORMAT_R1 || format == DRM_FORMAT_XRGB8888)
+ if (format == DRM_FORMAT_R1)
regmap_bulk_write(st7571->regmap, ST7571_DATA_MODE, row + x, 1);
}
}
@@ -792,6 +793,7 @@ static int st7567_parse_dt(struct st7571_device *st7567)
of_property_read_u32(np, "width-mm", &st7567->width_mm);
of_property_read_u32(np, "height-mm", &st7567->height_mm);
+ st7567->inverted = of_property_read_bool(np, "sitronix,inverted");
st7567->pformat = &st7571_monochrome;
st7567->bpp = 1;
@@ -819,6 +821,7 @@ static int st7571_parse_dt(struct st7571_device *st7571)
of_property_read_u32(np, "width-mm", &st7571->width_mm);
of_property_read_u32(np, "height-mm", &st7571->height_mm);
st7571->grayscale = of_property_read_bool(np, "sitronix,grayscale");
+ st7571->inverted = of_property_read_bool(np, "sitronix,inverted");
if (st7571->grayscale) {
st7571->pformat = &st7571_grayscale;
@@ -873,7 +876,7 @@ static int st7567_lcd_init(struct st7571_device *st7567)
ST7571_SET_POWER(0x6), /* Power Control, VC: ON, VR: ON, VF: OFF */
ST7571_SET_POWER(0x7), /* Power Control, VC: ON, VR: ON, VF: ON */
- ST7571_SET_REVERSE(0),
+ ST7571_SET_REVERSE(st7567->inverted ? 1 : 0),
ST7571_SET_ENTIRE_DISPLAY_ON(0),
};
@@ -917,7 +920,7 @@ static int st7571_lcd_init(struct st7571_device *st7571)
ST7571_SET_COLOR_MODE(st7571->pformat->mode),
ST7571_COMMAND_SET_NORMAL,
- ST7571_SET_REVERSE(0),
+ ST7571_SET_REVERSE(st7571->inverted ? 1 : 0),
ST7571_SET_ENTIRE_DISPLAY_ON(0),
};
@@ -1024,7 +1027,7 @@ static void st7571_remove(struct i2c_client *client)
drm_dev_unplug(&st7571->dev);
}
-struct st7571_panel_data st7567_config = {
+static const struct st7571_panel_data st7567_config = {
.init = st7567_lcd_init,
.parse_dt = st7567_parse_dt,
.constraints = {
@@ -1036,7 +1039,7 @@ struct st7571_panel_data st7567_config = {
},
};
-struct st7571_panel_data st7571_config = {
+static const struct st7571_panel_data st7571_config = {
.init = st7571_lcd_init,
.parse_dt = st7571_parse_dt,
.constraints = {
diff --git a/drivers/gpu/drm/solomon/ssd130x-spi.c b/drivers/gpu/drm/solomon/ssd130x-spi.c
index 7c935870f7d2..b52f5fd592a1 100644
--- a/drivers/gpu/drm/solomon/ssd130x-spi.c
+++ b/drivers/gpu/drm/solomon/ssd130x-spi.c
@@ -74,8 +74,7 @@ static int ssd130x_spi_probe(struct spi_device *spi)
t = devm_kzalloc(dev, sizeof(*t), GFP_KERNEL);
if (!t)
- return dev_err_probe(dev, -ENOMEM,
- "Failed to allocate SPI transport data\n");
+ return -ENOMEM;
t->spi = spi;
t->dc = dc;
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index 03684062309b..b76606e9a82d 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -744,7 +744,7 @@ static bool sti_hqvdp_check_hw_scaling(struct sti_hqvdp *hqvdp,
inv_zy = DIV_ROUND_UP(src_h, dst_h);
- return (inv_zy <= lfw) ? true : false;
+ return inv_zy <= lfw;
}
/**
diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c
index 8ebcaf953782..ab00d1a6140c 100644
--- a/drivers/gpu/drm/stm/drv.c
+++ b/drivers/gpu/drm/stm/drv.c
@@ -236,8 +236,18 @@ static void stm_drm_platform_shutdown(struct platform_device *pdev)
drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
}
+static struct ltdc_plat_data stm_drm_plat_data = {
+ .pad_max_freq_hz = 90000000,
+};
+
+static struct ltdc_plat_data stm_drm_plat_data_mp25 = {
+ .pad_max_freq_hz = 150000000,
+};
+
static const struct of_device_id drv_dt_ids[] = {
- { .compatible = "st,stm32-ltdc"},
+ { .compatible = "st,stm32-ltdc", .data = &stm_drm_plat_data, },
+ { .compatible = "st,stm32mp251-ltdc", .data = &stm_drm_plat_data_mp25, },
+ { .compatible = "st,stm32mp255-ltdc", .data = &stm_drm_plat_data_mp25, },
{ /* end node */ },
};
MODULE_DEVICE_TABLE(of, drv_dt_ids);
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index ba315c66a04d..d1501e86a5b1 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -14,6 +14,7 @@
#include <linux/interrupt.h>
#include <linux/media-bus-format.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
@@ -51,6 +52,7 @@
#define HWVER_10300 0x010300
#define HWVER_20101 0x020101
#define HWVER_40100 0x040100
+#define HWVER_40101 0x040101
/*
* The address of some registers depends on the HW version: such registers have
@@ -641,7 +643,7 @@ static inline void ltdc_set_ycbcr_config(struct drm_plane *plane, u32 drm_pix_fm
break;
default:
/* RGB or not a YCbCr supported format */
- DRM_ERROR("Unsupported pixel format: %u\n", drm_pix_fmt);
+ drm_err(plane->dev, "Unsupported pixel format: %u\n", drm_pix_fmt);
return;
}
@@ -664,18 +666,19 @@ static inline void ltdc_set_ycbcr_coeffs(struct drm_plane *plane)
u32 lofs = plane->index * LAY_OFS;
if (enc != DRM_COLOR_YCBCR_BT601 && enc != DRM_COLOR_YCBCR_BT709) {
- DRM_ERROR("color encoding %d not supported, use bt601 by default\n", enc);
+ drm_err(plane->dev, "color encoding %d not supported, use bt601 by default\n", enc);
/* set by default color encoding to DRM_COLOR_YCBCR_BT601 */
enc = DRM_COLOR_YCBCR_BT601;
}
if (ran != DRM_COLOR_YCBCR_LIMITED_RANGE && ran != DRM_COLOR_YCBCR_FULL_RANGE) {
- DRM_ERROR("color range %d not supported, use limited range by default\n", ran);
+ drm_err(plane->dev,
+ "color range %d not supported, use limited range by default\n", ran);
/* set by default color range to DRM_COLOR_YCBCR_LIMITED_RANGE */
ran = DRM_COLOR_YCBCR_LIMITED_RANGE;
}
- DRM_DEBUG_DRIVER("Color encoding=%d, range=%d\n", enc, ran);
+ drm_err(plane->dev, "Color encoding=%d, range=%d\n", enc, ran);
regmap_write(ldev->regmap, LTDC_L1CYR0R + lofs,
ltdc_ycbcr2rgb_coeffs[enc][ran][0]);
regmap_write(ldev->regmap, LTDC_L1CYR1R + lofs,
@@ -774,7 +777,7 @@ static void ltdc_crtc_atomic_enable(struct drm_crtc *crtc,
struct ltdc_device *ldev = crtc_to_ltdc(crtc);
struct drm_device *ddev = crtc->dev;
- DRM_DEBUG_DRIVER("\n");
+ drm_dbg_driver(crtc->dev, "\n");
pm_runtime_get_sync(ddev->dev);
@@ -798,7 +801,7 @@ static void ltdc_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_device *ddev = crtc->dev;
int layer_index = 0;
- DRM_DEBUG_DRIVER("\n");
+ drm_dbg_driver(crtc->dev, "\n");
drm_crtc_vblank_off(crtc);
@@ -835,9 +838,15 @@ ltdc_crtc_mode_valid(struct drm_crtc *crtc,
int target_max = target + CLK_TOLERANCE_HZ;
int result;
+ if (ldev->lvds_clk) {
+ result = clk_round_rate(ldev->lvds_clk, target);
+ drm_dbg_driver(crtc->dev, "lvds pixclk rate target %d, available %d\n",
+ target, result);
+ }
+
result = clk_round_rate(ldev->pixel_clk, target);
- DRM_DEBUG_DRIVER("clk rate target %d, available %d\n", target, result);
+ drm_dbg_driver(crtc->dev, "clk rate target %d, available %d\n", target, result);
/* Filter modes according to the max frequency supported by the pads */
if (result > ldev->caps.pad_max_freq_hz)
@@ -872,14 +881,14 @@ static bool ltdc_crtc_mode_fixup(struct drm_crtc *crtc,
int rate = mode->clock * 1000;
if (clk_set_rate(ldev->pixel_clk, rate) < 0) {
- DRM_ERROR("Cannot set rate (%dHz) for pixel clk\n", rate);
+ drm_err(crtc->dev, "Cannot set rate (%dHz) for pixel clk\n", rate);
return false;
}
adjusted_mode->clock = clk_get_rate(ldev->pixel_clk) / 1000;
- DRM_DEBUG_DRIVER("requested clock %dkHz, adjusted clock %dkHz\n",
- mode->clock, adjusted_mode->clock);
+ drm_dbg_driver(crtc->dev, "requested clock %dkHz, adjusted clock %dkHz\n",
+ mode->clock, adjusted_mode->clock);
return true;
}
@@ -934,20 +943,20 @@ static void ltdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
if (!pm_runtime_active(ddev->dev)) {
ret = pm_runtime_get_sync(ddev->dev);
if (ret) {
- DRM_ERROR("Failed to set mode, cannot get sync\n");
+ drm_err(crtc->dev, "Failed to set mode, cannot get sync\n");
return;
}
}
- DRM_DEBUG_DRIVER("CRTC:%d mode:%s\n", crtc->base.id, mode->name);
- DRM_DEBUG_DRIVER("Video mode: %dx%d", mode->hdisplay, mode->vdisplay);
- DRM_DEBUG_DRIVER(" hfp %d hbp %d hsl %d vfp %d vbp %d vsl %d\n",
- mode->hsync_start - mode->hdisplay,
- mode->htotal - mode->hsync_end,
- mode->hsync_end - mode->hsync_start,
- mode->vsync_start - mode->vdisplay,
- mode->vtotal - mode->vsync_end,
- mode->vsync_end - mode->vsync_start);
+ drm_dbg_driver(crtc->dev, "CRTC:%d mode:%s\n", crtc->base.id, mode->name);
+ drm_dbg_driver(crtc->dev, "Video mode: %dx%d", mode->hdisplay, mode->vdisplay);
+ drm_dbg_driver(crtc->dev, " hfp %d hbp %d hsl %d vfp %d vbp %d vsl %d\n",
+ mode->hsync_start - mode->hdisplay,
+ mode->htotal - mode->hsync_end,
+ mode->hsync_end - mode->hsync_start,
+ mode->vsync_start - mode->vdisplay,
+ mode->vtotal - mode->vsync_end,
+ mode->vsync_end - mode->vsync_start);
/* Convert video timings to ltdc timings */
hsync = mode->hsync_end - mode->hsync_start - 1;
@@ -1033,7 +1042,7 @@ static void ltdc_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_device *ddev = crtc->dev;
struct drm_pending_vblank_event *event = crtc->state->event;
- DRM_DEBUG_ATOMIC("\n");
+ drm_dbg_atomic(crtc->dev, "\n");
ltdc_crtc_update_clut(crtc);
@@ -1121,7 +1130,7 @@ static int ltdc_crtc_enable_vblank(struct drm_crtc *crtc)
struct ltdc_device *ldev = crtc_to_ltdc(crtc);
struct drm_crtc_state *state = crtc->state;
- DRM_DEBUG_DRIVER("\n");
+ drm_dbg_driver(crtc->dev, "\n");
if (state->enable)
regmap_set_bits(ldev->regmap, LTDC_IER, IER_LIE);
@@ -1135,7 +1144,7 @@ static void ltdc_crtc_disable_vblank(struct drm_crtc *crtc)
{
struct ltdc_device *ldev = crtc_to_ltdc(crtc);
- DRM_DEBUG_DRIVER("\n");
+ drm_dbg_driver(crtc->dev, "\n");
regmap_clear_bits(ldev->regmap, LTDC_IER, IER_LIE);
}
@@ -1144,11 +1153,11 @@ static int ltdc_crtc_set_crc_source(struct drm_crtc *crtc, const char *source)
struct ltdc_device *ldev;
int ret;
- DRM_DEBUG_DRIVER("\n");
-
if (!crtc)
return -ENODEV;
+ drm_dbg_driver(crtc->dev, "\n");
+
ldev = crtc_to_ltdc(crtc);
if (source && strcmp(source, "auto") == 0) {
@@ -1168,14 +1177,14 @@ static int ltdc_crtc_set_crc_source(struct drm_crtc *crtc, const char *source)
static int ltdc_crtc_verify_crc_source(struct drm_crtc *crtc,
const char *source, size_t *values_cnt)
{
- DRM_DEBUG_DRIVER("\n");
-
if (!crtc)
return -ENODEV;
+ drm_dbg_driver(crtc->dev, "\n");
+
if (source && strcmp(source, "auto") != 0) {
- DRM_DEBUG_DRIVER("Unknown CRC source %s for %s\n",
- source, crtc->name);
+ drm_dbg_driver(crtc->dev, "Unknown CRC source %s for %s\n",
+ source, crtc->name);
return -EINVAL;
}
@@ -1233,7 +1242,7 @@ static int ltdc_plane_atomic_check(struct drm_plane *plane,
struct drm_framebuffer *fb = new_plane_state->fb;
u32 src_w, src_h;
- DRM_DEBUG_DRIVER("\n");
+ drm_dbg_driver(plane->dev, "\n");
if (!fb)
return 0;
@@ -1244,7 +1253,7 @@ static int ltdc_plane_atomic_check(struct drm_plane *plane,
/* Reject scaling */
if (src_w != new_plane_state->crtc_w || src_h != new_plane_state->crtc_h) {
- DRM_DEBUG_DRIVER("Scaling is not supported");
+ drm_dbg_driver(plane->dev, "Scaling is not supported");
return -EINVAL;
}
@@ -1270,7 +1279,7 @@ static void ltdc_plane_atomic_update(struct drm_plane *plane,
enum ltdc_pix_fmt pf;
if (!newstate->crtc || !fb) {
- DRM_DEBUG_DRIVER("fb or crtc NULL");
+ drm_dbg_driver(plane->dev, "fb or crtc NULL");
return;
}
@@ -1280,11 +1289,11 @@ static void ltdc_plane_atomic_update(struct drm_plane *plane,
src_w = newstate->src_w >> 16;
src_h = newstate->src_h >> 16;
- DRM_DEBUG_DRIVER("plane:%d fb:%d (%dx%d)@(%d,%d) -> (%dx%d)@(%d,%d)\n",
- plane->base.id, fb->base.id,
- src_w, src_h, src_x, src_y,
- newstate->crtc_w, newstate->crtc_h,
- newstate->crtc_x, newstate->crtc_y);
+ drm_dbg_driver(plane->dev, "plane:%d fb:%d (%dx%d)@(%d,%d) -> (%dx%d)@(%d,%d)\n",
+ plane->base.id, fb->base.id,
+ src_w, src_h, src_x, src_y,
+ newstate->crtc_w, newstate->crtc_h,
+ newstate->crtc_x, newstate->crtc_y);
regmap_read(ldev->regmap, LTDC_BPCR, &bpcr);
@@ -1312,8 +1321,8 @@ static void ltdc_plane_atomic_update(struct drm_plane *plane,
val = ltdc_set_flexible_pixel_format(plane, pf);
if (val == NB_PF) {
- DRM_ERROR("Pixel format %.4s not supported\n",
- (char *)&fb->format->format);
+ drm_err(fb->dev, "Pixel format %.4s not supported\n",
+ (char *)&fb->format->format);
val = 0; /* set by default ARGB 32 bits */
}
regmap_write_bits(ldev->regmap, LTDC_L1PFCR + lofs, LXPFCR_PF, val);
@@ -1350,7 +1359,7 @@ static void ltdc_plane_atomic_update(struct drm_plane *plane,
if (newstate->rotation & DRM_MODE_REFLECT_Y)
paddr += (fb->pitches[0] * (y1 - y0));
- DRM_DEBUG_DRIVER("fb: phys 0x%08x", paddr);
+ drm_dbg_driver(fb->dev, "fb: phys 0x%08x", paddr);
regmap_write(ldev->regmap, LTDC_L1CFBAR + lofs, paddr);
/* Configures the color frame buffer pitch in bytes & line length */
@@ -1517,8 +1526,8 @@ static void ltdc_plane_atomic_disable(struct drm_plane *plane,
regmap_write_bits(ldev->regmap, LTDC_L1RCR + lofs,
LXRCR_IMR | LXRCR_VBR | LXRCR_GRMSK, LXRCR_VBR);
- DRM_DEBUG_DRIVER("CRTC:%d plane:%d\n",
- oldstate->crtc->base.id, plane->base.id);
+ drm_dbg_driver(plane->dev, "CRTC:%d plane:%d\n",
+ oldstate->crtc->base.id, plane->base.id);
}
static void ltdc_plane_atomic_print_state(struct drm_printer *p,
@@ -1632,7 +1641,7 @@ static struct drm_plane *ltdc_plane_create(struct drm_device *ddev,
drm_plane_create_alpha_property(plane);
- DRM_DEBUG_DRIVER("plane:%d created\n", plane->base.id);
+ drm_dbg_driver(plane->dev, "plane:%d created\n", plane->base.id);
return plane;
}
@@ -1647,7 +1656,7 @@ static int ltdc_crtc_init(struct drm_device *ddev, struct drm_crtc *crtc)
primary = ltdc_plane_create(ddev, DRM_PLANE_TYPE_PRIMARY, 0);
if (!primary) {
- DRM_ERROR("Can not create primary plane\n");
+ drm_err(ddev, "Can not create primary plane\n");
return -EINVAL;
}
@@ -1668,7 +1677,7 @@ static int ltdc_crtc_init(struct drm_device *ddev, struct drm_crtc *crtc)
ret = drmm_crtc_init_with_planes(ddev, crtc, primary, NULL,
&ltdc_crtc_funcs, NULL);
if (ret) {
- DRM_ERROR("Can not initialize CRTC\n");
+ drm_err(ddev, "Can not initialize CRTC\n");
return ret;
}
@@ -1677,13 +1686,13 @@ static int ltdc_crtc_init(struct drm_device *ddev, struct drm_crtc *crtc)
drm_mode_crtc_set_gamma_size(crtc, CLUT_SIZE);
drm_crtc_enable_color_mgmt(crtc, 0, false, CLUT_SIZE);
- DRM_DEBUG_DRIVER("CRTC:%d created\n", crtc->base.id);
+ drm_dbg_driver(ddev, "CRTC:%d created\n", crtc->base.id);
/* Add planes. Note : the first layer is used by primary plane */
for (i = 1; i < ldev->caps.nb_layers; i++) {
overlay = ltdc_plane_create(ddev, DRM_PLANE_TYPE_OVERLAY, i);
if (!overlay) {
- DRM_ERROR("Can not create overlay plane %d\n", i);
+ drm_err(ddev, "Can not create overlay plane %d\n", i);
return -ENOMEM;
}
if (ldev->caps.dynamic_zorder)
@@ -1704,7 +1713,7 @@ static void ltdc_encoder_disable(struct drm_encoder *encoder)
struct drm_device *ddev = encoder->dev;
struct ltdc_device *ldev = ddev->dev_private;
- DRM_DEBUG_DRIVER("\n");
+ drm_dbg_driver(encoder->dev, "\n");
/* Disable LTDC */
regmap_clear_bits(ldev->regmap, LTDC_GCR, GCR_LTDCEN);
@@ -1718,7 +1727,7 @@ static void ltdc_encoder_enable(struct drm_encoder *encoder)
struct drm_device *ddev = encoder->dev;
struct ltdc_device *ldev = ddev->dev_private;
- DRM_DEBUG_DRIVER("\n");
+ drm_dbg_driver(encoder->dev, "\n");
/* set fifo underrun threshold register */
if (ldev->caps.fifo_threshold)
@@ -1734,7 +1743,7 @@ static void ltdc_encoder_mode_set(struct drm_encoder *encoder,
{
struct drm_device *ddev = encoder->dev;
- DRM_DEBUG_DRIVER("\n");
+ drm_dbg_driver(encoder->dev, "\n");
/*
* Set to default state the pinctrl only with DPI type.
@@ -1770,7 +1779,7 @@ static int ltdc_encoder_init(struct drm_device *ddev, struct drm_bridge *bridge)
if (ret)
return ret;
- DRM_DEBUG_DRIVER("Bridge encoder:%d created\n", encoder->base.id);
+ drm_dbg_driver(encoder->dev, "Bridge encoder:%d created\n", encoder->base.id);
return 0;
}
@@ -1779,6 +1788,7 @@ static int ltdc_get_caps(struct drm_device *ddev)
{
struct ltdc_device *ldev = ddev->dev_private;
u32 bus_width_log2, lcr, gc2r;
+ const struct ltdc_plat_data *pdata = of_device_get_match_data(ddev->dev);
/*
* at least 1 layer must be managed & the number of layers
@@ -1794,6 +1804,8 @@ static int ltdc_get_caps(struct drm_device *ddev)
ldev->caps.bus_width = 8 << bus_width_log2;
regmap_read(ldev->regmap, LTDC_IDR, &ldev->caps.hw_version);
+ ldev->caps.pad_max_freq_hz = pdata->pad_max_freq_hz;
+
switch (ldev->caps.hw_version) {
case HWVER_10200:
case HWVER_10300:
@@ -1811,7 +1823,6 @@ static int ltdc_get_caps(struct drm_device *ddev)
* does not work on 2nd layer.
*/
ldev->caps.non_alpha_only_l1 = true;
- ldev->caps.pad_max_freq_hz = 90000000;
if (ldev->caps.hw_version == HWVER_10200)
ldev->caps.pad_max_freq_hz = 65000000;
ldev->caps.nb_irq = 2;
@@ -1842,6 +1853,7 @@ static int ltdc_get_caps(struct drm_device *ddev)
ldev->caps.fifo_threshold = false;
break;
case HWVER_40100:
+ case HWVER_40101:
ldev->caps.layer_ofs = LAY_OFS_1;
ldev->caps.layer_regs = ltdc_layer_regs_a2;
ldev->caps.pix_fmt_hw = ltdc_pix_fmt_a2;
@@ -1849,7 +1861,6 @@ static int ltdc_get_caps(struct drm_device *ddev)
ldev->caps.pix_fmt_nb = ARRAY_SIZE(ltdc_drm_fmt_a2);
ldev->caps.pix_fmt_flex = true;
ldev->caps.non_alpha_only_l1 = false;
- ldev->caps.pad_max_freq_hz = 90000000;
ldev->caps.nb_irq = 2;
ldev->caps.ycbcr_input = true;
ldev->caps.ycbcr_output = true;
@@ -1870,8 +1881,12 @@ void ltdc_suspend(struct drm_device *ddev)
{
struct ltdc_device *ldev = ddev->dev_private;
- DRM_DEBUG_DRIVER("\n");
+ drm_dbg_driver(ddev, "\n");
clk_disable_unprepare(ldev->pixel_clk);
+ if (ldev->bus_clk)
+ clk_disable_unprepare(ldev->bus_clk);
+ if (ldev->lvds_clk)
+ clk_disable_unprepare(ldev->lvds_clk);
}
int ltdc_resume(struct drm_device *ddev)
@@ -1879,15 +1894,29 @@ int ltdc_resume(struct drm_device *ddev)
struct ltdc_device *ldev = ddev->dev_private;
int ret;
- DRM_DEBUG_DRIVER("\n");
+ drm_dbg_driver(ddev, "\n");
ret = clk_prepare_enable(ldev->pixel_clk);
if (ret) {
- DRM_ERROR("failed to enable pixel clock (%d)\n", ret);
+ drm_err(ddev, "failed to enable pixel clock (%d)\n", ret);
return ret;
}
- return 0;
+ if (ldev->bus_clk) {
+ ret = clk_prepare_enable(ldev->bus_clk);
+ if (ret) {
+ drm_err(ddev, "failed to enable bus clock (%d)\n", ret);
+ return ret;
+ }
+ }
+
+ if (ldev->lvds_clk) {
+ ret = clk_prepare_enable(ldev->lvds_clk);
+ if (ret)
+ drm_err(ddev, "failed to prepare lvds clock\n");
+ }
+
+ return ret;
}
int ltdc_load(struct drm_device *ddev)
@@ -1903,7 +1932,7 @@ int ltdc_load(struct drm_device *ddev)
int irq, i, nb_endpoints;
int ret = -ENODEV;
- DRM_DEBUG_DRIVER("\n");
+ drm_dbg_driver(ddev, "\n");
/* Get number of endpoints */
nb_endpoints = of_graph_get_endpoint_count(np);
@@ -1913,15 +1942,29 @@ int ltdc_load(struct drm_device *ddev)
ldev->pixel_clk = devm_clk_get(dev, "lcd");
if (IS_ERR(ldev->pixel_clk)) {
if (PTR_ERR(ldev->pixel_clk) != -EPROBE_DEFER)
- DRM_ERROR("Unable to get lcd clock\n");
+ drm_err(ddev, "Unable to get lcd clock\n");
return PTR_ERR(ldev->pixel_clk);
}
if (clk_prepare_enable(ldev->pixel_clk)) {
- DRM_ERROR("Unable to prepare pixel clock\n");
+ drm_err(ddev, "Unable to prepare pixel clock\n");
return -ENODEV;
}
+ if (of_device_is_compatible(np, "st,stm32mp251-ltdc") ||
+ of_device_is_compatible(np, "st,stm32mp255-ltdc")) {
+ ldev->bus_clk = devm_clk_get(dev, "bus");
+ if (IS_ERR(ldev->bus_clk))
+ return dev_err_probe(dev, PTR_ERR(ldev->bus_clk),
+ "Unable to get bus clock\n");
+
+ ret = clk_prepare_enable(ldev->bus_clk);
+ if (ret) {
+ drm_err(ddev, "Unable to prepare bus clock\n");
+ return ret;
+ }
+ }
+
/* Get endpoints if any */
for (i = 0; i < nb_endpoints; i++) {
ret = drm_of_find_panel_or_bridge(np, 0, i, &panel, &bridge);
@@ -1939,7 +1982,7 @@ int ltdc_load(struct drm_device *ddev)
if (panel) {
bridge = drmm_panel_bridge_add(ddev, panel);
if (IS_ERR(bridge)) {
- DRM_ERROR("panel-bridge endpoint %d\n", i);
+ drm_err(ddev, "panel-bridge endpoint %d\n", i);
ret = PTR_ERR(bridge);
goto err;
}
@@ -1949,12 +1992,16 @@ int ltdc_load(struct drm_device *ddev)
ret = ltdc_encoder_init(ddev, bridge);
if (ret) {
if (ret != -EPROBE_DEFER)
- DRM_ERROR("init encoder endpoint %d\n", i);
+ drm_err(ddev, "init encoder endpoint %d\n", i);
goto err;
}
}
}
+ ldev->lvds_clk = devm_clk_get(dev, "lvds");
+ if (IS_ERR(ldev->lvds_clk))
+ ldev->lvds_clk = NULL;
+
rstc = devm_reset_control_get_exclusive(dev, NULL);
mutex_init(&ldev->err_lock);
@@ -1967,29 +2014,29 @@ int ltdc_load(struct drm_device *ddev)
ldev->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ldev->regs)) {
- DRM_ERROR("Unable to get ltdc registers\n");
+ drm_err(ddev, "Unable to get ltdc registers\n");
ret = PTR_ERR(ldev->regs);
goto err;
}
ldev->regmap = devm_regmap_init_mmio(&pdev->dev, ldev->regs, &stm32_ltdc_regmap_cfg);
if (IS_ERR(ldev->regmap)) {
- DRM_ERROR("Unable to regmap ltdc registers\n");
+ drm_err(ddev, "Unable to regmap ltdc registers\n");
ret = PTR_ERR(ldev->regmap);
goto err;
}
ret = ltdc_get_caps(ddev);
if (ret) {
- DRM_ERROR("hardware identifier (0x%08x) not supported!\n",
- ldev->caps.hw_version);
+ drm_err(ddev, "hardware identifier (0x%08x) not supported!\n",
+ ldev->caps.hw_version);
goto err;
}
/* Disable all interrupts */
regmap_clear_bits(ldev->regmap, LTDC_IER, IER_MASK);
- DRM_DEBUG_DRIVER("ltdc hw version 0x%08x\n", ldev->caps.hw_version);
+ drm_dbg_driver(ddev, "ltdc hw version 0x%08x\n", ldev->caps.hw_version);
/* initialize default value for fifo underrun threshold & clear interrupt error counters */
ldev->transfer_err = 0;
@@ -2008,32 +2055,35 @@ int ltdc_load(struct drm_device *ddev)
ltdc_irq_thread, IRQF_ONESHOT,
dev_name(dev), ddev);
if (ret) {
- DRM_ERROR("Failed to register LTDC interrupt\n");
+ drm_err(ddev, "Failed to register LTDC interrupt\n");
goto err;
}
}
crtc = drmm_kzalloc(ddev, sizeof(*crtc), GFP_KERNEL);
if (!crtc) {
- DRM_ERROR("Failed to allocate crtc\n");
+ drm_err(ddev, "Failed to allocate crtc\n");
ret = -ENOMEM;
goto err;
}
ret = ltdc_crtc_init(ddev, crtc);
if (ret) {
- DRM_ERROR("Failed to init crtc\n");
+ drm_err(ddev, "Failed to init crtc\n");
goto err;
}
ret = drm_vblank_init(ddev, NB_CRTC);
if (ret) {
- DRM_ERROR("Failed calling drm_vblank_init()\n");
+ drm_err(ddev, "Failed calling drm_vblank_init()\n");
goto err;
}
clk_disable_unprepare(ldev->pixel_clk);
+ if (ldev->bus_clk)
+ clk_disable_unprepare(ldev->bus_clk);
+
pinctrl_pm_select_sleep_state(ddev->dev);
pm_runtime_enable(ddev->dev);
@@ -2042,12 +2092,15 @@ int ltdc_load(struct drm_device *ddev)
err:
clk_disable_unprepare(ldev->pixel_clk);
+ if (ldev->bus_clk)
+ clk_disable_unprepare(ldev->bus_clk);
+
return ret;
}
void ltdc_unload(struct drm_device *ddev)
{
- DRM_DEBUG_DRIVER("\n");
+ drm_dbg_driver(ddev, "\n");
pm_runtime_disable(ddev->dev);
}
diff --git a/drivers/gpu/drm/stm/ltdc.h b/drivers/gpu/drm/stm/ltdc.h
index 9d488043ffdb..17b51a7ce28e 100644
--- a/drivers/gpu/drm/stm/ltdc.h
+++ b/drivers/gpu/drm/stm/ltdc.h
@@ -40,10 +40,16 @@ struct fps_info {
ktime_t last_timestamp;
};
+struct ltdc_plat_data {
+ int pad_max_freq_hz; /* max frequency supported by pad */
+};
+
struct ltdc_device {
void __iomem *regs;
struct regmap *regmap;
struct clk *pixel_clk; /* lcd pixel clock */
+ struct clk *lvds_clk; /* lvds pixel clock */
+ struct clk *bus_clk; /* bus clock */
struct mutex err_lock; /* protecting error_status */
struct ltdc_caps caps;
u32 irq_status;
diff --git a/drivers/gpu/drm/sysfb/drm_sysfb_helper.h b/drivers/gpu/drm/sysfb/drm_sysfb_helper.h
index 1424b63dde99..89633e30ca62 100644
--- a/drivers/gpu/drm/sysfb/drm_sysfb_helper.h
+++ b/drivers/gpu/drm/sysfb/drm_sysfb_helper.h
@@ -132,7 +132,7 @@ int drm_sysfb_plane_helper_get_scanout_buffer(struct drm_plane *plane,
struct drm_sysfb_crtc_state {
struct drm_crtc_state base;
- /* Primary-plane format; required for color mgmt. */
+ /* CRTC input color format; required for color mgmt. */
const struct drm_format_info *format;
};
diff --git a/drivers/gpu/drm/sysfb/drm_sysfb_modeset.c b/drivers/gpu/drm/sysfb/drm_sysfb_modeset.c
index 1bcdb5ee8f09..ddb4a7523ee6 100644
--- a/drivers/gpu/drm/sysfb/drm_sysfb_modeset.c
+++ b/drivers/gpu/drm/sysfb/drm_sysfb_modeset.c
@@ -210,7 +210,12 @@ int drm_sysfb_plane_helper_atomic_check(struct drm_plane *plane,
else if (!new_plane_state->visible)
return 0;
- if (new_fb->format != sysfb->fb_format) {
+ new_crtc_state = drm_atomic_get_new_crtc_state(new_state, new_plane_state->crtc);
+
+ new_sysfb_crtc_state = to_drm_sysfb_crtc_state(new_crtc_state);
+ new_sysfb_crtc_state->format = sysfb->fb_format;
+
+ if (new_fb->format != new_sysfb_crtc_state->format) {
void *buf;
/* format conversion necessary; reserve buffer */
@@ -220,11 +225,6 @@ int drm_sysfb_plane_helper_atomic_check(struct drm_plane *plane,
return -ENOMEM;
}
- new_crtc_state = drm_atomic_get_new_crtc_state(new_state, new_plane_state->crtc);
-
- new_sysfb_crtc_state = to_drm_sysfb_crtc_state(new_crtc_state);
- new_sysfb_crtc_state->format = new_fb->format;
-
return 0;
}
EXPORT_SYMBOL(drm_sysfb_plane_helper_atomic_check);
@@ -238,7 +238,9 @@ void drm_sysfb_plane_helper_atomic_update(struct drm_plane *plane, struct drm_at
struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
struct drm_framebuffer *fb = plane_state->fb;
unsigned int dst_pitch = sysfb->fb_pitch;
- const struct drm_format_info *dst_format = sysfb->fb_format;
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, plane_state->crtc);
+ struct drm_sysfb_crtc_state *sysfb_crtc_state = to_drm_sysfb_crtc_state(crtc_state);
+ const struct drm_format_info *dst_format = sysfb_crtc_state->format;
struct drm_atomic_helper_damage_iter iter;
struct drm_rect damage;
int ret, idx;
@@ -370,16 +372,19 @@ EXPORT_SYMBOL(drm_sysfb_crtc_helper_atomic_check);
void drm_sysfb_crtc_reset(struct drm_crtc *crtc)
{
+ struct drm_sysfb_device *sysfb = to_drm_sysfb_device(crtc->dev);
struct drm_sysfb_crtc_state *sysfb_crtc_state;
if (crtc->state)
drm_sysfb_crtc_state_destroy(to_drm_sysfb_crtc_state(crtc->state));
sysfb_crtc_state = kzalloc(sizeof(*sysfb_crtc_state), GFP_KERNEL);
- if (sysfb_crtc_state)
+ if (sysfb_crtc_state) {
+ sysfb_crtc_state->format = sysfb->fb_format;
__drm_atomic_helper_crtc_reset(crtc, &sysfb_crtc_state->base);
- else
+ } else {
__drm_atomic_helper_crtc_reset(crtc, NULL);
+ }
}
EXPORT_SYMBOL(drm_sysfb_crtc_reset);
diff --git a/drivers/gpu/drm/sysfb/drm_sysfb_screen_info.c b/drivers/gpu/drm/sysfb/drm_sysfb_screen_info.c
index 0b3fb874a51f..885864168c54 100644
--- a/drivers/gpu/drm/sysfb/drm_sysfb_screen_info.c
+++ b/drivers/gpu/drm/sysfb/drm_sysfb_screen_info.c
@@ -79,22 +79,19 @@ const struct drm_format_info *drm_sysfb_get_format_si(struct drm_device *dev,
const struct screen_info *si)
{
const struct drm_format_info *format = NULL;
- u32 bits_per_pixel;
+ struct pixel_format pixel;
size_t i;
+ int ret;
- bits_per_pixel = __screen_info_lfb_bits_per_pixel(si);
+ ret = screen_info_pixel_format(si, &pixel);
+ if (ret)
+ return NULL;
for (i = 0; i < nformats; ++i) {
- const struct pixel_format *f = &formats[i].pixel;
-
- if (bits_per_pixel == f->bits_per_pixel &&
- si->red_size == f->red.length &&
- si->red_pos == f->red.offset &&
- si->green_size == f->green.length &&
- si->green_pos == f->green.offset &&
- si->blue_size == f->blue.length &&
- si->blue_pos == f->blue.offset) {
- format = drm_format_info(formats[i].fourcc);
+ const struct drm_sysfb_format *f = &formats[i];
+
+ if (pixel_format_equal(&pixel, &f->pixel)) {
+ format = drm_format_info(f->fourcc);
break;
}
}
diff --git a/drivers/gpu/drm/sysfb/simpledrm.c b/drivers/gpu/drm/sysfb/simpledrm.c
index 8530a3ef8a7a..0358164a623c 100644
--- a/drivers/gpu/drm/sysfb/simpledrm.c
+++ b/drivers/gpu/drm/sysfb/simpledrm.c
@@ -4,7 +4,7 @@
#include <linux/clk.h>
#include <linux/of_clk.h>
#include <linux/minmax.h>
-#include <linux/of_address.h>
+#include <linux/of_reserved_mem.h>
#include <linux/platform_data/simplefb.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
@@ -179,22 +179,17 @@ simplefb_get_format_of(struct drm_device *dev, struct device_node *of_node)
static struct resource *
simplefb_get_memory_of(struct drm_device *dev, struct device_node *of_node)
{
- struct device_node *np;
- struct resource *res;
+ struct resource r, *res;
int err;
- np = of_parse_phandle(of_node, "memory-region", 0);
- if (!np)
+ err = of_reserved_mem_region_to_resource(of_node, 0, &r);
+ if (err)
return NULL;
- res = devm_kzalloc(dev->dev, sizeof(*res), GFP_KERNEL);
+ res = devm_kmemdup(dev->dev, &r, sizeof(r), GFP_KERNEL);
if (!res)
return ERR_PTR(-ENOMEM);
- err = of_address_to_resource(np, 0, res);
- if (err)
- return ERR_PTR(err);
-
if (of_property_present(of_node, "reg"))
drm_warn(dev, "preferring \"memory-region\" over \"reg\" property\n");
diff --git a/drivers/gpu/drm/sysfb/vesadrm.c b/drivers/gpu/drm/sysfb/vesadrm.c
index 90615e9ac86b..16a4b52d45c6 100644
--- a/drivers/gpu/drm/sysfb/vesadrm.c
+++ b/drivers/gpu/drm/sysfb/vesadrm.c
@@ -46,6 +46,7 @@ static const struct drm_format_info *vesadrm_get_format_si(struct drm_device *de
{ PIXEL_FORMAT_RGB888, DRM_FORMAT_RGB888, },
{ PIXEL_FORMAT_XRGB8888, DRM_FORMAT_XRGB8888, },
{ PIXEL_FORMAT_XBGR8888, DRM_FORMAT_XBGR8888, },
+ { PIXEL_FORMAT_C8, DRM_FORMAT_C8, },
};
return drm_sysfb_get_format_si(dev, formats, ARRAY_SIZE(formats), si);
@@ -82,7 +83,7 @@ static struct vesadrm_device *to_vesadrm_device(struct drm_device *dev)
}
/*
- * Palette
+ * Color LUT
*/
static void vesadrm_vga_cmap_write(struct vesadrm_device *vesa, unsigned int index,
@@ -128,7 +129,7 @@ static void vesadrm_pmi_cmap_write(struct vesadrm_device *vesa, unsigned int ind
}
#endif
-static void vesadrm_set_gamma_lut(struct drm_crtc *crtc, unsigned int index,
+static void vesadrm_set_color_lut(struct drm_crtc *crtc, unsigned int index,
u16 red, u16 green, u16 blue)
{
struct drm_device *dev = crtc->dev;
@@ -149,15 +150,15 @@ static void vesadrm_fill_gamma_lut(struct vesadrm_device *vesa,
switch (format->format) {
case DRM_FORMAT_XRGB1555:
- drm_crtc_fill_gamma_555(crtc, vesadrm_set_gamma_lut);
+ drm_crtc_fill_gamma_555(crtc, vesadrm_set_color_lut);
break;
case DRM_FORMAT_RGB565:
- drm_crtc_fill_gamma_565(crtc, vesadrm_set_gamma_lut);
+ drm_crtc_fill_gamma_565(crtc, vesadrm_set_color_lut);
break;
case DRM_FORMAT_RGB888:
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_BGRX8888:
- drm_crtc_fill_gamma_888(crtc, vesadrm_set_gamma_lut);
+ drm_crtc_fill_gamma_888(crtc, vesadrm_set_color_lut);
break;
default:
drm_warn_once(dev, "Unsupported format %p4cc for gamma correction\n",
@@ -175,15 +176,53 @@ static void vesadrm_load_gamma_lut(struct vesadrm_device *vesa,
switch (format->format) {
case DRM_FORMAT_XRGB1555:
- drm_crtc_load_gamma_555_from_888(crtc, lut, vesadrm_set_gamma_lut);
+ drm_crtc_load_gamma_555_from_888(crtc, lut, vesadrm_set_color_lut);
break;
case DRM_FORMAT_RGB565:
- drm_crtc_load_gamma_565_from_888(crtc, lut, vesadrm_set_gamma_lut);
+ drm_crtc_load_gamma_565_from_888(crtc, lut, vesadrm_set_color_lut);
break;
case DRM_FORMAT_RGB888:
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_BGRX8888:
- drm_crtc_load_gamma_888(crtc, lut, vesadrm_set_gamma_lut);
+ drm_crtc_load_gamma_888(crtc, lut, vesadrm_set_color_lut);
+ break;
+ default:
+ drm_warn_once(dev, "Unsupported format %p4cc for gamma correction\n",
+ &format->format);
+ break;
+ }
+}
+
+static void vesadrm_fill_palette_lut(struct vesadrm_device *vesa,
+ const struct drm_format_info *format)
+{
+ struct drm_device *dev = &vesa->sysfb.dev;
+ struct drm_crtc *crtc = &vesa->crtc;
+
+ switch (format->format) {
+ case DRM_FORMAT_C8:
+ drm_crtc_fill_palette_8(crtc, vesadrm_set_color_lut);
+ break;
+ case DRM_FORMAT_RGB332:
+ drm_crtc_fill_palette_332(crtc, vesadrm_set_color_lut);
+ break;
+ default:
+ drm_warn_once(dev, "Unsupported format %p4cc for palette\n",
+ &format->format);
+ break;
+ }
+}
+
+static void vesadrm_load_palette_lut(struct vesadrm_device *vesa,
+ const struct drm_format_info *format,
+ struct drm_color_lut *lut)
+{
+ struct drm_device *dev = &vesa->sysfb.dev;
+ struct drm_crtc *crtc = &vesa->crtc;
+
+ switch (format->format) {
+ case DRM_FORMAT_C8:
+ drm_crtc_load_palette_8(crtc, lut, vesadrm_set_color_lut);
break;
default:
drm_warn_once(dev, "Unsupported format %p4cc for gamma correction\n",
@@ -200,8 +239,67 @@ static const u64 vesadrm_primary_plane_format_modifiers[] = {
DRM_SYSFB_PLANE_FORMAT_MODIFIERS,
};
+static int vesadrm_primary_plane_helper_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *new_state)
+{
+ struct drm_sysfb_device *sysfb = to_drm_sysfb_device(plane->dev);
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(new_state, plane);
+ struct drm_framebuffer *new_fb = new_plane_state->fb;
+ struct drm_crtc_state *new_crtc_state;
+ struct drm_sysfb_crtc_state *new_sysfb_crtc_state;
+ int ret;
+
+ ret = drm_sysfb_plane_helper_atomic_check(plane, new_state);
+ if (ret)
+ return ret;
+ else if (!new_plane_state->visible)
+ return 0;
+
+ /*
+ * Fix up format conversion for specific cases
+ */
+
+ switch (sysfb->fb_format->format) {
+ case DRM_FORMAT_C8:
+ new_crtc_state = drm_atomic_get_new_crtc_state(new_state, new_plane_state->crtc);
+ new_sysfb_crtc_state = to_drm_sysfb_crtc_state(new_crtc_state);
+
+ switch (new_fb->format->format) {
+ case DRM_FORMAT_XRGB8888:
+ /*
+ * Reduce XRGB8888 to RGB332. Each resulting pixel is an index
+ * into the C8 hardware palette, which stores RGB332 colors.
+ */
+ if (new_sysfb_crtc_state->format->format != DRM_FORMAT_RGB332) {
+ new_sysfb_crtc_state->format =
+ drm_format_info(DRM_FORMAT_RGB332);
+ new_crtc_state->color_mgmt_changed = true;
+ }
+ break;
+ case DRM_FORMAT_C8:
+ /*
+ * Restore original output. Emulation of XRGB8888 set RBG332
+ * output format and hardware palette. This needs to be undone
+ * when we switch back to DRM_FORMAT_C8.
+ */
+ if (new_sysfb_crtc_state->format->format == DRM_FORMAT_RGB332) {
+ new_sysfb_crtc_state->format = sysfb->fb_format;
+ new_crtc_state->color_mgmt_changed = true;
+ }
+ break;
+ }
+ break;
+ }
+
+ return 0;
+}
+
static const struct drm_plane_helper_funcs vesadrm_primary_plane_helper_funcs = {
- DRM_SYSFB_PLANE_HELPER_FUNCS,
+ DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
+ .atomic_check = vesadrm_primary_plane_helper_atomic_check,
+ .atomic_update = drm_sysfb_plane_helper_atomic_update,
+ .atomic_disable = drm_sysfb_plane_helper_atomic_disable,
+ .get_scanout_buffer = drm_sysfb_plane_helper_get_scanout_buffer,
};
static const struct drm_plane_funcs vesadrm_primary_plane_funcs = {
@@ -223,15 +321,36 @@ static void vesadrm_crtc_helper_atomic_flush(struct drm_crtc *crtc,
* plane's color format.
*/
if (crtc_state->enable && crtc_state->color_mgmt_changed) {
- if (sysfb_crtc_state->format == sysfb->fb_format) {
- if (crtc_state->gamma_lut)
- vesadrm_load_gamma_lut(vesa,
- sysfb_crtc_state->format,
- crtc_state->gamma_lut->data);
- else
+ switch (sysfb->fb_format->format) {
+ /*
+ * Index formats
+ */
+ case DRM_FORMAT_C8:
+ if (sysfb_crtc_state->format->format == DRM_FORMAT_RGB332) {
+ vesadrm_fill_palette_lut(vesa, sysfb_crtc_state->format);
+ } else if (crtc->state->gamma_lut) {
+ vesadrm_load_palette_lut(vesa,
+ sysfb_crtc_state->format,
+ crtc_state->gamma_lut->data);
+ } else {
+ vesadrm_fill_palette_lut(vesa, sysfb_crtc_state->format);
+ }
+ break;
+ /*
+ * Component formats
+ */
+ default:
+ if (sysfb_crtc_state->format == sysfb->fb_format) {
+ if (crtc_state->gamma_lut)
+ vesadrm_load_gamma_lut(vesa,
+ sysfb_crtc_state->format,
+ crtc_state->gamma_lut->data);
+ else
+ vesadrm_fill_gamma_lut(vesa, sysfb_crtc_state->format);
+ } else {
vesadrm_fill_gamma_lut(vesa, sysfb_crtc_state->format);
- } else {
- vesadrm_fill_gamma_lut(vesa, sysfb_crtc_state->format);
+ }
+ break;
}
}
}
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index 41a285ec889f..8ede07fb7a21 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -526,7 +526,7 @@ void tegra_bo_free_object(struct drm_gem_object *gem)
if (drm_gem_is_imported(gem)) {
dma_buf_unmap_attachment_unlocked(gem->import_attach, bo->sgt,
DMA_TO_DEVICE);
- dma_buf_detach(gem->dma_buf, gem->import_attach);
+ dma_buf_detach(gem->import_attach->dmabuf, gem->import_attach);
}
}
diff --git a/drivers/gpu/drm/tests/drm_exec_test.c b/drivers/gpu/drm/tests/drm_exec_test.c
index d6c4dd1194a0..3a20c788c51f 100644
--- a/drivers/gpu/drm/tests/drm_exec_test.c
+++ b/drivers/gpu/drm/tests/drm_exec_test.c
@@ -150,14 +150,22 @@ static void test_prepare(struct kunit *test)
static void test_prepare_array(struct kunit *test)
{
struct drm_exec_priv *priv = test->priv;
- struct drm_gem_object gobj1 = { };
- struct drm_gem_object gobj2 = { };
- struct drm_gem_object *array[] = { &gobj1, &gobj2 };
+ struct drm_gem_object *gobj1;
+ struct drm_gem_object *gobj2;
+ struct drm_gem_object *array[] = {
+ (gobj1 = kunit_kzalloc(test, sizeof(*gobj1), GFP_KERNEL)),
+ (gobj2 = kunit_kzalloc(test, sizeof(*gobj2), GFP_KERNEL)),
+ };
struct drm_exec exec;
int ret;
- drm_gem_private_object_init(priv->drm, &gobj1, PAGE_SIZE);
- drm_gem_private_object_init(priv->drm, &gobj2, PAGE_SIZE);
+ if (!gobj1 || !gobj2) {
+ KUNIT_FAIL(test, "Failed to allocate GEM objects.\n");
+ return;
+ }
+
+ drm_gem_private_object_init(priv->drm, gobj1, PAGE_SIZE);
+ drm_gem_private_object_init(priv->drm, gobj2, PAGE_SIZE);
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
drm_exec_until_all_locked(&exec)
@@ -166,8 +174,8 @@ static void test_prepare_array(struct kunit *test)
KUNIT_EXPECT_EQ(test, ret, 0);
drm_exec_fini(&exec);
- drm_gem_private_object_fini(&gobj1);
- drm_gem_private_object_fini(&gobj2);
+ drm_gem_private_object_fini(gobj1);
+ drm_gem_private_object_fini(gobj2);
}
static void test_multiple_loops(struct kunit *test)
diff --git a/drivers/gpu/drm/tests/drm_format_helper_test.c b/drivers/gpu/drm/tests/drm_format_helper_test.c
index 7299fa8971ce..981dada8f3a8 100644
--- a/drivers/gpu/drm/tests/drm_format_helper_test.c
+++ b/drivers/gpu/drm/tests/drm_format_helper_test.c
@@ -1033,13 +1033,14 @@ static void drm_test_fb_xrgb8888_to_xrgb2101010(struct kunit *test)
NULL : &result->dst_pitch;
drm_fb_xrgb8888_to_xrgb2101010(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
- buf = le32buf_to_cpu(test, buf, dst_size / sizeof(u32));
+ buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
buf = dst.vaddr; /* restore original value of buf */
memset(buf, 0, dst_size);
drm_fb_xrgb8888_to_xrgb2101010(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
+ buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
diff --git a/drivers/gpu/drm/tidss/tidss_crtc.c b/drivers/gpu/drm/tidss/tidss_crtc.c
index a2f40a5c7703..da89fd01c337 100644
--- a/drivers/gpu/drm/tidss/tidss_crtc.c
+++ b/drivers/gpu/drm/tidss/tidss_crtc.c
@@ -91,7 +91,7 @@ static int tidss_crtc_atomic_check(struct drm_crtc *crtc,
struct dispc_device *dispc = tidss->dispc;
struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
u32 hw_videoport = tcrtc->hw_videoport;
- const struct drm_display_mode *mode;
+ struct drm_display_mode *mode;
enum drm_mode_status ok;
dev_dbg(ddev->dev, "%s\n", __func__);
@@ -108,6 +108,9 @@ static int tidss_crtc_atomic_check(struct drm_crtc *crtc,
return -EINVAL;
}
+ if (drm_atomic_crtc_needs_modeset(crtc_state))
+ drm_mode_set_crtcinfo(mode, 0);
+
return dispc_vp_bus_check(dispc, hw_videoport, crtc_state);
}
@@ -225,7 +228,7 @@ static void tidss_crtc_atomic_enable(struct drm_crtc *crtc,
tidss_runtime_get(tidss);
r = dispc_vp_set_clk_rate(tidss->dispc, tcrtc->hw_videoport,
- mode->clock * 1000);
+ mode->crtc_clock * 1000);
if (r != 0)
return;
diff --git a/drivers/gpu/drm/tidss/tidss_dispc.c b/drivers/gpu/drm/tidss/tidss_dispc.c
index c0277fa36425..7c8c15a5c39b 100644
--- a/drivers/gpu/drm/tidss/tidss_dispc.c
+++ b/drivers/gpu/drm/tidss/tidss_dispc.c
@@ -4,6 +4,7 @@
* Author: Jyri Sarha <jsarha@ti.com>
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
@@ -594,79 +595,53 @@ void tidss_disable_oldi(struct tidss_device *tidss, u32 hw_videoport)
* number. For example 7:0
*/
-static u32 FLD_MASK(u32 start, u32 end)
-{
- return ((1 << (start - end + 1)) - 1) << end;
-}
-
-static u32 FLD_VAL(u32 val, u32 start, u32 end)
-{
- return (val << end) & FLD_MASK(start, end);
-}
-
-static u32 FLD_GET(u32 val, u32 start, u32 end)
-{
- return (val & FLD_MASK(start, end)) >> end;
-}
-
-static u32 FLD_MOD(u32 orig, u32 val, u32 start, u32 end)
-{
- return (orig & ~FLD_MASK(start, end)) | FLD_VAL(val, start, end);
-}
-
-static u32 REG_GET(struct dispc_device *dispc, u32 idx, u32 start, u32 end)
-{
- return FLD_GET(dispc_read(dispc, idx), start, end);
-}
-
-static void REG_FLD_MOD(struct dispc_device *dispc, u32 idx, u32 val,
- u32 start, u32 end)
-{
- dispc_write(dispc, idx, FLD_MOD(dispc_read(dispc, idx), val,
- start, end));
-}
-
-static u32 VID_REG_GET(struct dispc_device *dispc, u32 hw_plane, u32 idx,
- u32 start, u32 end)
-{
- return FLD_GET(dispc_vid_read(dispc, hw_plane, idx), start, end);
-}
-
-static void VID_REG_FLD_MOD(struct dispc_device *dispc, u32 hw_plane, u32 idx,
- u32 val, u32 start, u32 end)
-{
- dispc_vid_write(dispc, hw_plane, idx,
- FLD_MOD(dispc_vid_read(dispc, hw_plane, idx),
- val, start, end));
-}
-
-static u32 VP_REG_GET(struct dispc_device *dispc, u32 vp, u32 idx,
- u32 start, u32 end)
-{
- return FLD_GET(dispc_vp_read(dispc, vp, idx), start, end);
-}
-
-static void VP_REG_FLD_MOD(struct dispc_device *dispc, u32 vp, u32 idx, u32 val,
- u32 start, u32 end)
-{
- dispc_vp_write(dispc, vp, idx, FLD_MOD(dispc_vp_read(dispc, vp, idx),
- val, start, end));
-}
-
-__maybe_unused
-static u32 OVR_REG_GET(struct dispc_device *dispc, u32 ovr, u32 idx,
- u32 start, u32 end)
-{
- return FLD_GET(dispc_ovr_read(dispc, ovr, idx), start, end);
-}
-
-static void OVR_REG_FLD_MOD(struct dispc_device *dispc, u32 ovr, u32 idx,
- u32 val, u32 start, u32 end)
-{
- dispc_ovr_write(dispc, ovr, idx,
- FLD_MOD(dispc_ovr_read(dispc, ovr, idx),
- val, start, end));
-}
+#define REG_GET(dispc, idx, mask) \
+ ((u32)FIELD_GET((mask), dispc_read((dispc), (idx))))
+
+#define REG_FLD_MOD(dispc, idx, val, mask) \
+ ({ \
+ struct dispc_device *_dispc = (dispc); \
+ u32 _idx = (idx); \
+ u32 _reg = dispc_read(_dispc, _idx); \
+ FIELD_MODIFY((mask), &_reg, (val)); \
+ dispc_write(_dispc, _idx, _reg); \
+ })
+
+#define VID_REG_GET(dispc, hw_plane, idx, mask) \
+ ((u32)FIELD_GET((mask), dispc_vid_read((dispc), (hw_plane), (idx))))
+
+#define VID_REG_FLD_MOD(dispc, hw_plane, idx, val, mask) \
+ ({ \
+ struct dispc_device *_dispc = (dispc); \
+ u32 _hw_plane = (hw_plane); \
+ u32 _idx = (idx); \
+ u32 _reg = dispc_vid_read(_dispc, _hw_plane, _idx); \
+ FIELD_MODIFY((mask), &_reg, (val)); \
+ dispc_vid_write(_dispc, _hw_plane, _idx, _reg); \
+ })
+
+#define VP_REG_GET(dispc, vp, idx, mask) \
+ ((u32)FIELD_GET((mask), dispc_vp_read((dispc), (vp), (idx))))
+
+#define VP_REG_FLD_MOD(dispc, vp, idx, val, mask) \
+ ({ \
+ struct dispc_device *_dispc = (dispc); \
+ u32 _vp = (vp); \
+ u32 _idx = (idx); \
+ u32 _reg = dispc_vp_read(_dispc, _vp, _idx); \
+ FIELD_MODIFY((mask), &_reg, (val)); \
+ dispc_vp_write(_dispc, _vp, _idx, _reg); \
+ })
+
+#define OVR_REG_FLD_MOD(dispc, ovr, idx, val, mask) \
+ ({ \
+ struct dispc_device *_dispc = (dispc); \
+ u32 _ovr = (ovr); \
+ u32 _idx = (idx); \
+ u32 _reg = dispc_ovr_read(_dispc, _ovr, _idx); \
+ FIELD_MODIFY((mask), &_reg, (val)); \
+ dispc_ovr_write(_dispc, _ovr, _idx, _reg); \
+ })
static dispc_irq_t dispc_vp_irq_from_raw(u32 stat, u32 hw_videoport)
{
@@ -1139,7 +1114,8 @@ static void dispc_set_num_datalines(struct dispc_device *dispc,
v = 3;
}
- VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, v, 10, 8);
+ VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, v,
+ DISPC_VP_CONTROL_DATALINES_MASK);
}
static void dispc_enable_am65x_oldi(struct dispc_device *dispc, u32 hw_videoport,
@@ -1162,7 +1138,8 @@ static void dispc_enable_am65x_oldi(struct dispc_device *dispc, u32 hw_videoport
oldi_cfg |= BIT(7); /* DEPOL */
- oldi_cfg = FLD_MOD(oldi_cfg, fmt->am65x_oldi_mode_reg_val, 3, 1);
+ FIELD_MODIFY(DISPC_VP_DSS_OLDI_CFG_MAP_MASK, &oldi_cfg,
+ fmt->am65x_oldi_mode_reg_val);
oldi_cfg |= BIT(12); /* SOFTRST */
@@ -1215,23 +1192,23 @@ void dispc_vp_enable(struct dispc_device *dispc, u32 hw_videoport,
dispc_set_num_datalines(dispc, hw_videoport, fmt->data_width);
- hfp = mode->hsync_start - mode->hdisplay;
- hsw = mode->hsync_end - mode->hsync_start;
- hbp = mode->htotal - mode->hsync_end;
+ hfp = mode->crtc_hsync_start - mode->crtc_hdisplay;
+ hsw = mode->crtc_hsync_end - mode->crtc_hsync_start;
+ hbp = mode->crtc_htotal - mode->crtc_hsync_end;
- vfp = mode->vsync_start - mode->vdisplay;
- vsw = mode->vsync_end - mode->vsync_start;
- vbp = mode->vtotal - mode->vsync_end;
+ vfp = mode->crtc_vsync_start - mode->crtc_vdisplay;
+ vsw = mode->crtc_vsync_end - mode->crtc_vsync_start;
+ vbp = mode->crtc_vtotal - mode->crtc_vsync_end;
dispc_vp_write(dispc, hw_videoport, DISPC_VP_TIMING_H,
- FLD_VAL(hsw - 1, 7, 0) |
- FLD_VAL(hfp - 1, 19, 8) |
- FLD_VAL(hbp - 1, 31, 20));
+ FIELD_PREP(DISPC_VP_TIMING_H_SYNC_PULSE_MASK, hsw - 1) |
+ FIELD_PREP(DISPC_VP_TIMING_H_FRONT_PORCH_MASK, hfp - 1) |
+ FIELD_PREP(DISPC_VP_TIMING_H_BACK_PORCH_MASK, hbp - 1));
dispc_vp_write(dispc, hw_videoport, DISPC_VP_TIMING_V,
- FLD_VAL(vsw - 1, 7, 0) |
- FLD_VAL(vfp, 19, 8) |
- FLD_VAL(vbp, 31, 20));
+ FIELD_PREP(DISPC_VP_TIMING_V_SYNC_PULSE_MASK, vsw - 1) |
+ FIELD_PREP(DISPC_VP_TIMING_V_FRONT_PORCH_MASK, vfp) |
+ FIELD_PREP(DISPC_VP_TIMING_V_BACK_PORCH_MASK, vbp));
ivs = !!(mode->flags & DRM_MODE_FLAG_NVSYNC);
@@ -1254,24 +1231,28 @@ void dispc_vp_enable(struct dispc_device *dispc, u32 hw_videoport,
ieo = false;
dispc_vp_write(dispc, hw_videoport, DISPC_VP_POL_FREQ,
- FLD_VAL(align, 18, 18) |
- FLD_VAL(onoff, 17, 17) |
- FLD_VAL(rf, 16, 16) |
- FLD_VAL(ieo, 15, 15) |
- FLD_VAL(ipc, 14, 14) |
- FLD_VAL(ihs, 13, 13) |
- FLD_VAL(ivs, 12, 12));
+ FIELD_PREP(DISPC_VP_POL_FREQ_ALIGN_MASK, align) |
+ FIELD_PREP(DISPC_VP_POL_FREQ_ONOFF_MASK, onoff) |
+ FIELD_PREP(DISPC_VP_POL_FREQ_RF_MASK, rf) |
+ FIELD_PREP(DISPC_VP_POL_FREQ_IEO_MASK, ieo) |
+ FIELD_PREP(DISPC_VP_POL_FREQ_IPC_MASK, ipc) |
+ FIELD_PREP(DISPC_VP_POL_FREQ_IHS_MASK, ihs) |
+ FIELD_PREP(DISPC_VP_POL_FREQ_IVS_MASK, ivs));
dispc_vp_write(dispc, hw_videoport, DISPC_VP_SIZE_SCREEN,
- FLD_VAL(mode->hdisplay - 1, 11, 0) |
- FLD_VAL(mode->vdisplay - 1, 27, 16));
+ FIELD_PREP(DISPC_VP_SIZE_SCREEN_HDISPLAY_MASK,
+ mode->crtc_hdisplay - 1) |
+ FIELD_PREP(DISPC_VP_SIZE_SCREEN_VDISPLAY_MASK,
+ mode->crtc_vdisplay - 1));
- VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, 1, 0, 0);
+ VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, 1,
+ DISPC_VP_CONTROL_ENABLE_MASK);
}
void dispc_vp_disable(struct dispc_device *dispc, u32 hw_videoport)
{
- VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, 0, 0, 0);
+ VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, 0,
+ DISPC_VP_CONTROL_ENABLE_MASK);
}
void dispc_vp_unprepare(struct dispc_device *dispc, u32 hw_videoport)
@@ -1285,13 +1266,16 @@ void dispc_vp_unprepare(struct dispc_device *dispc, u32 hw_videoport)
bool dispc_vp_go_busy(struct dispc_device *dispc, u32 hw_videoport)
{
- return VP_REG_GET(dispc, hw_videoport, DISPC_VP_CONTROL, 5, 5);
+ return VP_REG_GET(dispc, hw_videoport, DISPC_VP_CONTROL,
+ DISPC_VP_CONTROL_GOBIT_MASK);
}
void dispc_vp_go(struct dispc_device *dispc, u32 hw_videoport)
{
- WARN_ON(VP_REG_GET(dispc, hw_videoport, DISPC_VP_CONTROL, 5, 5));
- VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, 1, 5, 5);
+ WARN_ON(VP_REG_GET(dispc, hw_videoport, DISPC_VP_CONTROL,
+ DISPC_VP_CONTROL_GOBIT_MASK));
+ VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, 1,
+ DISPC_VP_CONTROL_GOBIT_MASK);
}
enum c8_to_c12_mode { C8_TO_C12_REPLICATE, C8_TO_C12_MAX, C8_TO_C12_MIN };
@@ -1491,11 +1475,11 @@ static void dispc_am65x_ovr_set_plane(struct dispc_device *dispc,
u32 hw_id = dispc->feat->vid_info[hw_plane].hw_id;
OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer),
- hw_id, 4, 1);
- OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer),
- x, 17, 6);
- OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer),
- y, 30, 19);
+ hw_id, DISPC_OVR_ATTRIBUTES_CHANNELIN_MASK);
+ OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer), x,
+ DISPC_OVR_ATTRIBUTES_POSX_MASK);
+ OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer), y,
+ DISPC_OVR_ATTRIBUTES_POSY_MASK);
}
static void dispc_j721e_ovr_set_plane(struct dispc_device *dispc,
@@ -1505,11 +1489,11 @@ static void dispc_j721e_ovr_set_plane(struct dispc_device *dispc,
u32 hw_id = dispc->feat->vid_info[hw_plane].hw_id;
OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer),
- hw_id, 4, 1);
- OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES2(layer),
- x, 13, 0);
- OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES2(layer),
- y, 29, 16);
+ hw_id, DISPC_OVR_ATTRIBUTES_CHANNELIN_MASK);
+ OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES2(layer), x,
+ DISPC_OVR_ATTRIBUTES2_POSX_MASK);
+ OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES2(layer), y,
+ DISPC_OVR_ATTRIBUTES2_POSY_MASK);
}
void dispc_ovr_set_plane(struct dispc_device *dispc, u32 hw_plane,
@@ -1544,7 +1528,7 @@ void dispc_ovr_enable_layer(struct dispc_device *dispc,
return;
OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer),
- !!enable, 0, 0);
+ !!enable, DISPC_OVR_ATTRIBUTES_ENABLE_MASK);
}
/* CSC */
@@ -1580,14 +1564,14 @@ struct dispc_csc_coef {
static
void dispc_csc_offset_regval(const struct dispc_csc_coef *csc, u32 *regval)
{
-#define OVAL(x, y) (FLD_VAL(x, 15, 3) | FLD_VAL(y, 31, 19))
+#define OVAL(x, y) (FIELD_PREP(GENMASK(15, 3), x) | FIELD_PREP(GENMASK(31, 19), y))
regval[5] = OVAL(csc->preoffset[0], csc->preoffset[1]);
regval[6] = OVAL(csc->preoffset[2], csc->postoffset[0]);
regval[7] = OVAL(csc->postoffset[1], csc->postoffset[2]);
#undef OVAL
}
-#define CVAL(x, y) (FLD_VAL(x, 10, 0) | FLD_VAL(y, 26, 16))
+#define CVAL(x, y) (FIELD_PREP(GENMASK(10, 0), x) | FIELD_PREP(GENMASK(26, 16), y))
static
void dispc_csc_yuv2rgb_regval(const struct dispc_csc_coef *csc, u32 *regval)
{
@@ -1767,7 +1751,8 @@ static void dispc_vid_csc_setup(struct dispc_device *dispc, u32 hw_plane,
static void dispc_vid_csc_enable(struct dispc_device *dispc, u32 hw_plane,
bool enable)
{
- VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, !!enable, 9, 9);
+ VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, !!enable,
+ DISPC_VID_ATTRIBUTES_COLORCONVENABLE_MASK);
}
/* SCALER */
@@ -1826,7 +1811,8 @@ static void dispc_vid_write_fir_coefs(struct dispc_device *dispc,
c1 = coefs->c1[phase];
c2 = coefs->c2[phase];
- c12 = FLD_VAL(c1, 19, 10) | FLD_VAL(c2, 29, 20);
+ c12 = FIELD_PREP(GENMASK(19, 10), c1) | FIELD_PREP(GENMASK(29, 20),
+ c2);
dispc_vid_write(dispc, hw_plane, reg, c12);
}
@@ -2023,20 +2009,20 @@ static void dispc_vid_set_scaling(struct dispc_device *dispc,
u32 fourcc)
{
/* HORIZONTAL RESIZE ENABLE */
- VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES,
- sp->scale_x, 7, 7);
+ VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, sp->scale_x,
+ DISPC_VID_ATTRIBUTES_HRESIZEENABLE_MASK);
/* VERTICAL RESIZE ENABLE */
- VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES,
- sp->scale_y, 8, 8);
+ VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, sp->scale_y,
+ DISPC_VID_ATTRIBUTES_VRESIZEENABLE_MASK);
/* Skip the rest if no scaling is used */
if (!sp->scale_x && !sp->scale_y)
return;
/* VERTICAL 5-TAPS */
- VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES,
- sp->five_taps, 21, 21);
+ VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, sp->five_taps,
+ DISPC_VID_ATTRIBUTES_VERTICALTAPS_MASK);
if (dispc_fourcc_is_yuv(fourcc)) {
if (sp->scale_x) {
@@ -2126,7 +2112,7 @@ static void dispc_plane_set_pixel_format(struct dispc_device *dispc,
if (dispc_color_formats[i].fourcc == fourcc) {
VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES,
dispc_color_formats[i].dss_code,
- 6, 1);
+ DISPC_VID_ATTRIBUTES_FORMAT_MASK);
return;
}
}
@@ -2248,7 +2234,8 @@ void dispc_plane_setup(struct dispc_device *dispc, u32 hw_plane,
dispc_vid_write(dispc, hw_plane, DISPC_VID_BA_EXT_1, (u64)dma_addr >> 32);
dispc_vid_write(dispc, hw_plane, DISPC_VID_PICTURE_SIZE,
- (scale.in_w - 1) | ((scale.in_h - 1) << 16));
+ FIELD_PREP(DISPC_VID_PICTURE_SIZE_MEMSIZEY_MASK, scale.in_h - 1) |
+ FIELD_PREP(DISPC_VID_PICTURE_SIZE_MEMSIZEX_MASK, scale.in_w - 1));
/* For YUV422 format we use the macropixel size for pixel inc */
if (fourcc == DRM_FORMAT_YUYV || fourcc == DRM_FORMAT_UYVY)
@@ -2285,8 +2272,10 @@ void dispc_plane_setup(struct dispc_device *dispc, u32 hw_plane,
if (!lite) {
dispc_vid_write(dispc, hw_plane, DISPC_VID_SIZE,
- (state->crtc_w - 1) |
- ((state->crtc_h - 1) << 16));
+ FIELD_PREP(DISPC_VID_SIZE_SIZEY_MASK,
+ state->crtc_h - 1) |
+ FIELD_PREP(DISPC_VID_SIZE_SIZEX_MASK,
+ state->crtc_w - 1));
dispc_vid_set_scaling(dispc, hw_plane, &scale, fourcc);
}
@@ -2300,38 +2289,45 @@ void dispc_plane_setup(struct dispc_device *dispc, u32 hw_plane,
}
dispc_vid_write(dispc, hw_plane, DISPC_VID_GLOBAL_ALPHA,
- 0xFF & (state->alpha >> 8));
+ FIELD_PREP(DISPC_VID_GLOBAL_ALPHA_GLOBALALPHA_MASK,
+ state->alpha >> 8));
if (state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI)
VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, 1,
- 28, 28);
+ DISPC_VID_ATTRIBUTES_PREMULTIPLYALPHA_MASK);
else
VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, 0,
- 28, 28);
+ DISPC_VID_ATTRIBUTES_PREMULTIPLYALPHA_MASK);
}
void dispc_plane_enable(struct dispc_device *dispc, u32 hw_plane, bool enable)
{
- VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, !!enable, 0, 0);
+ VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, !!enable,
+ DISPC_VID_ATTRIBUTES_ENABLE_MASK);
}
static u32 dispc_vid_get_fifo_size(struct dispc_device *dispc, u32 hw_plane)
{
- return VID_REG_GET(dispc, hw_plane, DISPC_VID_BUF_SIZE_STATUS, 15, 0);
+ return VID_REG_GET(dispc, hw_plane, DISPC_VID_BUF_SIZE_STATUS,
+ DISPC_VID_BUF_SIZE_STATUS_BUFSIZE_MASK);
}
static void dispc_vid_set_mflag_threshold(struct dispc_device *dispc,
u32 hw_plane, u32 low, u32 high)
{
dispc_vid_write(dispc, hw_plane, DISPC_VID_MFLAG_THRESHOLD,
- FLD_VAL(high, 31, 16) | FLD_VAL(low, 15, 0));
+ FIELD_PREP(DISPC_VID_MFLAG_THRESHOLD_HT_MFLAG_MASK, high) |
+ FIELD_PREP(DISPC_VID_MFLAG_THRESHOLD_LT_MFLAG_MASK, low));
}
static void dispc_vid_set_buf_threshold(struct dispc_device *dispc,
u32 hw_plane, u32 low, u32 high)
{
dispc_vid_write(dispc, hw_plane, DISPC_VID_BUF_THRESHOLD,
- FLD_VAL(high, 31, 16) | FLD_VAL(low, 15, 0));
+ FIELD_PREP(DISPC_VID_BUF_THRESHOLD_BUFHIGHTHRESHOLD_MASK,
+ high) |
+ FIELD_PREP(DISPC_VID_BUF_THRESHOLD_BUFLOWTHRESHOLD_MASK,
+ low));
}
static void dispc_k2g_plane_init(struct dispc_device *dispc)
@@ -2341,9 +2337,11 @@ static void dispc_k2g_plane_init(struct dispc_device *dispc)
dev_dbg(dispc->dev, "%s()\n", __func__);
/* MFLAG_CTRL = ENABLED */
- REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 2, 1, 0);
+ REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 2,
+ DISPC_GLOBAL_MFLAG_ATTRIBUTE_MFLAG_CTRL_MASK);
/* MFLAG_START = MFLAGNORMALSTARTMODE */
- REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 0, 6, 6);
+ REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 0,
+ DISPC_GLOBAL_MFLAG_ATTRIBUTE_MFLAG_START_MASK);
for (hw_plane = 0; hw_plane < dispc->feat->num_vids; hw_plane++) {
u32 size = dispc_vid_get_fifo_size(dispc, hw_plane);
@@ -2380,7 +2378,7 @@ static void dispc_k2g_plane_init(struct dispc_device *dispc)
* register is ignored.
*/
VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, 1,
- 19, 19);
+ DISPC_VID_ATTRIBUTES_BUFPRELOAD_MASK);
}
}
@@ -2392,13 +2390,15 @@ static void dispc_k3_plane_init(struct dispc_device *dispc)
dev_dbg(dispc->dev, "%s()\n", __func__);
- REG_FLD_MOD(dispc, DSS_CBA_CFG, cba_lo_pri, 2, 0);
- REG_FLD_MOD(dispc, DSS_CBA_CFG, cba_hi_pri, 5, 3);
+ REG_FLD_MOD(dispc, DSS_CBA_CFG, cba_lo_pri, DSS_CBA_CFG_PRI_LO_MASK);
+ REG_FLD_MOD(dispc, DSS_CBA_CFG, cba_hi_pri, DSS_CBA_CFG_PRI_HI_MASK);
/* MFLAG_CTRL = ENABLED */
- REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 2, 1, 0);
+ REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 2,
+ DISPC_GLOBAL_MFLAG_ATTRIBUTE_MFLAG_CTRL_MASK);
/* MFLAG_START = MFLAGNORMALSTARTMODE */
- REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 0, 6, 6);
+ REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 0,
+ DISPC_GLOBAL_MFLAG_ATTRIBUTE_MFLAG_START_MASK);
for (hw_plane = 0; hw_plane < dispc->feat->num_vids; hw_plane++) {
u32 size = dispc_vid_get_fifo_size(dispc, hw_plane);
@@ -2431,7 +2431,7 @@ static void dispc_k3_plane_init(struct dispc_device *dispc)
/* Prefech up to PRELOAD value */
VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, 0,
- 19, 19);
+ DISPC_VID_ATTRIBUTES_BUFPRELOAD_MASK);
}
}
@@ -2461,7 +2461,8 @@ static void dispc_vp_init(struct dispc_device *dispc)
/* Enable the gamma Shadow bit-field for all VPs*/
for (i = 0; i < dispc->feat->num_vps; i++)
- VP_REG_FLD_MOD(dispc, i, DISPC_VP_CONFIG, 1, 2, 2);
+ VP_REG_FLD_MOD(dispc, i, DISPC_VP_CONFIG, 1,
+ DISPC_VP_CONFIG_GAMMAENABLE_MASK);
}
static void dispc_initial_config(struct dispc_device *dispc)
@@ -2472,8 +2473,8 @@ static void dispc_initial_config(struct dispc_device *dispc)
/* Note: Hardcoded DPI routing on J721E for now */
if (dispc->feat->subrev == DISPC_J721E) {
dispc_write(dispc, DISPC_CONNECTIONS,
- FLD_VAL(2, 3, 0) | /* VP1 to DPI0 */
- FLD_VAL(8, 7, 4) /* VP3 to DPI1 */
+ FIELD_PREP(DISPC_CONNECTIONS_DPI_0_CONN_MASK, 2) | /* VP1 to DPI0 */
+ FIELD_PREP(DISPC_CONNECTIONS_DPI_1_CONN_MASK, 8) /* VP3 to DPI1 */
);
}
}
@@ -2651,8 +2652,8 @@ static void dispc_k2g_cpr_from_ctm(const struct drm_color_ctm *ctm,
cpr->m[CSC_BB] = dispc_S31_32_to_s2_8(ctm->matrix[8]);
}
-#define CVAL(xR, xG, xB) (FLD_VAL(xR, 9, 0) | FLD_VAL(xG, 20, 11) | \
- FLD_VAL(xB, 31, 22))
+#define CVAL(xR, xG, xB) (FIELD_PREP(GENMASK(9, 0), xR) | FIELD_PREP(GENMASK(20, 11), xG) | \
+ FIELD_PREP(GENMASK(31, 22), xB))
static void dispc_k2g_vp_csc_cpr_regval(const struct dispc_csc_coef *csc,
u32 *regval)
@@ -2694,8 +2695,8 @@ static void dispc_k2g_vp_set_ctm(struct dispc_device *dispc, u32 hw_videoport,
cprenable = 1;
}
- VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONFIG,
- cprenable, 15, 15);
+ VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONFIG, cprenable,
+ DISPC_VP_CONFIG_CPR_MASK);
}
static s16 dispc_S31_32_to_s3_8(s64 coef)
@@ -2760,8 +2761,8 @@ static void dispc_k3_vp_set_ctm(struct dispc_device *dispc, u32 hw_videoport,
colorconvenable = 1;
}
- VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONFIG,
- colorconvenable, 24, 24);
+ VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONFIG, colorconvenable,
+ DISPC_VP_CONFIG_COLORCONVENABLE_MASK);
}
static void dispc_vp_set_color_mgmt(struct dispc_device *dispc,
@@ -2816,26 +2817,26 @@ int dispc_runtime_resume(struct dispc_device *dispc)
clk_prepare_enable(dispc->fclk);
- if (REG_GET(dispc, DSS_SYSSTATUS, 0, 0) == 0)
+ if (REG_GET(dispc, DSS_SYSSTATUS, DSS_SYSSTATUS_DISPC_FUNC_RESETDONE) == 0)
dev_warn(dispc->dev, "DSS FUNC RESET not done!\n");
dev_dbg(dispc->dev, "OMAP DSS7 rev 0x%x\n",
dispc_read(dispc, DSS_REVISION));
dev_dbg(dispc->dev, "VP RESETDONE %d,%d,%d\n",
- REG_GET(dispc, DSS_SYSSTATUS, 1, 1),
- REG_GET(dispc, DSS_SYSSTATUS, 2, 2),
- REG_GET(dispc, DSS_SYSSTATUS, 3, 3));
+ REG_GET(dispc, DSS_SYSSTATUS, GENMASK(1, 1)),
+ REG_GET(dispc, DSS_SYSSTATUS, GENMASK(2, 2)),
+ REG_GET(dispc, DSS_SYSSTATUS, GENMASK(3, 3)));
if (dispc->feat->subrev == DISPC_AM625 ||
dispc->feat->subrev == DISPC_AM65X)
dev_dbg(dispc->dev, "OLDI RESETDONE %d,%d,%d\n",
- REG_GET(dispc, DSS_SYSSTATUS, 5, 5),
- REG_GET(dispc, DSS_SYSSTATUS, 6, 6),
- REG_GET(dispc, DSS_SYSSTATUS, 7, 7));
+ REG_GET(dispc, DSS_SYSSTATUS, GENMASK(5, 5)),
+ REG_GET(dispc, DSS_SYSSTATUS, GENMASK(6, 6)),
+ REG_GET(dispc, DSS_SYSSTATUS, GENMASK(7, 7)));
dev_dbg(dispc->dev, "DISPC IDLE %d\n",
- REG_GET(dispc, DSS_SYSSTATUS, 9, 9));
+ REG_GET(dispc, DSS_SYSSTATUS, DSS_SYSSTATUS_DISPC_IDLE_STATUS));
dispc_initial_config(dispc);
@@ -2912,7 +2913,8 @@ static void dispc_softreset_k2g(struct dispc_device *dispc)
spin_unlock_irqrestore(&dispc->tidss->irq_lock, flags);
for (unsigned int vp_idx = 0; vp_idx < dispc->feat->num_vps; ++vp_idx)
- VP_REG_FLD_MOD(dispc, vp_idx, DISPC_VP_CONTROL, 0, 0, 0);
+ VP_REG_FLD_MOD(dispc, vp_idx, DISPC_VP_CONTROL, 0,
+ DISPC_VP_CONTROL_ENABLE_MASK);
}
static int dispc_softreset(struct dispc_device *dispc)
@@ -2926,7 +2928,7 @@ static int dispc_softreset(struct dispc_device *dispc)
}
/* Soft reset */
- REG_FLD_MOD(dispc, DSS_SYSCONFIG, 1, 1, 1);
+ REG_FLD_MOD(dispc, DSS_SYSCONFIG, 1, DSS_SYSCONFIG_SOFTRESET_MASK);
/* Wait for reset to complete */
ret = readl_poll_timeout(dispc->base_common + DSS_SYSSTATUS,
val, val & 1, 100, 5000);
diff --git a/drivers/gpu/drm/tidss/tidss_dispc.h b/drivers/gpu/drm/tidss/tidss_dispc.h
index b8614f62186c..60c1b400eb89 100644
--- a/drivers/gpu/drm/tidss/tidss_dispc.h
+++ b/drivers/gpu/drm/tidss/tidss_dispc.h
@@ -7,11 +7,14 @@
#ifndef __TIDSS_DISPC_H__
#define __TIDSS_DISPC_H__
+#include <drm/drm_color_mgmt.h>
+
#include "tidss_drv.h"
struct dispc_device;
struct drm_crtc_state;
+struct drm_plane_state;
enum tidss_gamma_type { TIDSS_GAMMA_8BIT, TIDSS_GAMMA_10BIT };
diff --git a/drivers/gpu/drm/tidss/tidss_dispc_regs.h b/drivers/gpu/drm/tidss/tidss_dispc_regs.h
index 50a3f28250ef..382027dddce8 100644
--- a/drivers/gpu/drm/tidss/tidss_dispc_regs.h
+++ b/drivers/gpu/drm/tidss/tidss_dispc_regs.h
@@ -56,7 +56,12 @@ enum dispc_common_regs {
#define DSS_REVISION REG(DSS_REVISION)
#define DSS_SYSCONFIG REG(DSS_SYSCONFIG)
+#define DSS_SYSCONFIG_SOFTRESET_MASK GENMASK(1, 1)
+
#define DSS_SYSSTATUS REG(DSS_SYSSTATUS)
+#define DSS_SYSSTATUS_DISPC_IDLE_STATUS GENMASK(9, 9)
+#define DSS_SYSSTATUS_DISPC_FUNC_RESETDONE GENMASK(0, 0)
+
#define DISPC_IRQ_EOI REG(DISPC_IRQ_EOI)
#define DISPC_IRQSTATUS_RAW REG(DISPC_IRQSTATUS_RAW)
#define DISPC_IRQSTATUS REG(DISPC_IRQSTATUS)
@@ -70,9 +75,15 @@ enum dispc_common_regs {
#define WB_IRQSTATUS REG(WB_IRQSTATUS)
#define DISPC_GLOBAL_MFLAG_ATTRIBUTE REG(DISPC_GLOBAL_MFLAG_ATTRIBUTE)
+#define DISPC_GLOBAL_MFLAG_ATTRIBUTE_MFLAG_START_MASK GENMASK(6, 6)
+#define DISPC_GLOBAL_MFLAG_ATTRIBUTE_MFLAG_CTRL_MASK GENMASK(1, 0)
+
#define DISPC_GLOBAL_OUTPUT_ENABLE REG(DISPC_GLOBAL_OUTPUT_ENABLE)
#define DISPC_GLOBAL_BUFFER REG(DISPC_GLOBAL_BUFFER)
#define DSS_CBA_CFG REG(DSS_CBA_CFG)
+#define DSS_CBA_CFG_PRI_HI_MASK GENMASK(5, 3)
+#define DSS_CBA_CFG_PRI_LO_MASK GENMASK(2, 0)
+
#define DISPC_DBG_CONTROL REG(DISPC_DBG_CONTROL)
#define DISPC_DBG_STATUS REG(DISPC_DBG_STATUS)
#define DISPC_CLKGATING_DISABLE REG(DISPC_CLKGATING_DISABLE)
@@ -88,6 +99,9 @@ enum dispc_common_regs {
#define FBDC_CONSTANT_COLOR_0 REG(FBDC_CONSTANT_COLOR_0)
#define FBDC_CONSTANT_COLOR_1 REG(FBDC_CONSTANT_COLOR_1)
#define DISPC_CONNECTIONS REG(DISPC_CONNECTIONS)
+#define DISPC_CONNECTIONS_DPI_1_CONN_MASK GENMASK(7, 4)
+#define DISPC_CONNECTIONS_DPI_0_CONN_MASK GENMASK(3, 0)
+
#define DISPC_MSS_VP1 REG(DISPC_MSS_VP1)
#define DISPC_MSS_VP3 REG(DISPC_MSS_VP3)
@@ -102,13 +116,27 @@ enum dispc_common_regs {
#define DISPC_VID_ACCUV2_0 0x18
#define DISPC_VID_ACCUV2_1 0x1c
#define DISPC_VID_ATTRIBUTES 0x20
+#define DISPC_VID_ATTRIBUTES_PREMULTIPLYALPHA_MASK GENMASK(28, 28)
+#define DISPC_VID_ATTRIBUTES_VERTICALTAPS_MASK GENMASK(21, 21)
+#define DISPC_VID_ATTRIBUTES_BUFPRELOAD_MASK GENMASK(19, 19)
+#define DISPC_VID_ATTRIBUTES_COLORCONVENABLE_MASK GENMASK(9, 9)
+#define DISPC_VID_ATTRIBUTES_VRESIZEENABLE_MASK GENMASK(8, 8)
+#define DISPC_VID_ATTRIBUTES_HRESIZEENABLE_MASK GENMASK(7, 7)
+#define DISPC_VID_ATTRIBUTES_FORMAT_MASK GENMASK(6, 1)
+#define DISPC_VID_ATTRIBUTES_ENABLE_MASK GENMASK(0, 0)
+
#define DISPC_VID_ATTRIBUTES2 0x24
#define DISPC_VID_BA_0 0x28
#define DISPC_VID_BA_1 0x2c
#define DISPC_VID_BA_UV_0 0x30
#define DISPC_VID_BA_UV_1 0x34
#define DISPC_VID_BUF_SIZE_STATUS 0x38
+#define DISPC_VID_BUF_SIZE_STATUS_BUFSIZE_MASK GENMASK(15, 0)
+
#define DISPC_VID_BUF_THRESHOLD 0x3c
+#define DISPC_VID_BUF_THRESHOLD_BUFHIGHTHRESHOLD_MASK GENMASK(31, 16)
+#define DISPC_VID_BUF_THRESHOLD_BUFLOWTHRESHOLD_MASK GENMASK(15, 0)
+
#define DISPC_VID_CSC_COEF(n) (0x40 + (n) * 4)
#define DISPC_VID_FIRH 0x5c
@@ -137,15 +165,26 @@ enum dispc_common_regs {
#define DISPC_VID_FIR_COEF_V12_C(phase) (0x1bc + (phase) * 4)
#define DISPC_VID_GLOBAL_ALPHA 0x1fc
+#define DISPC_VID_GLOBAL_ALPHA_GLOBALALPHA_MASK GENMASK(7, 0)
+
#define DISPC_VID_K2G_IRQENABLE 0x200 /* K2G */
#define DISPC_VID_K2G_IRQSTATUS 0x204 /* K2G */
#define DISPC_VID_MFLAG_THRESHOLD 0x208
+#define DISPC_VID_MFLAG_THRESHOLD_HT_MFLAG_MASK GENMASK(31, 16)
+#define DISPC_VID_MFLAG_THRESHOLD_LT_MFLAG_MASK GENMASK(15, 0)
+
#define DISPC_VID_PICTURE_SIZE 0x20c
+#define DISPC_VID_PICTURE_SIZE_MEMSIZEY_MASK GENMASK(27, 16)
+#define DISPC_VID_PICTURE_SIZE_MEMSIZEX_MASK GENMASK(11, 0)
+
#define DISPC_VID_PIXEL_INC 0x210
#define DISPC_VID_K2G_POSITION 0x214 /* K2G */
#define DISPC_VID_PRELOAD 0x218
#define DISPC_VID_ROW_INC 0x21c
#define DISPC_VID_SIZE 0x220
+#define DISPC_VID_SIZE_SIZEY_MASK GENMASK(27, 16)
+#define DISPC_VID_SIZE_SIZEX_MASK GENMASK(11, 0)
+
#define DISPC_VID_BA_EXT_0 0x22c
#define DISPC_VID_BA_EXT_1 0x230
#define DISPC_VID_BA_UV_EXT_0 0x234
@@ -173,11 +212,27 @@ enum dispc_common_regs {
#define DISPC_OVR_TRANS_COLOR_MIN 0x18
#define DISPC_OVR_TRANS_COLOR_MIN2 0x1c
#define DISPC_OVR_ATTRIBUTES(n) (0x20 + (n) * 4)
+#define DISPC_OVR_ATTRIBUTES_POSY_MASK GENMASK(30, 19)
+#define DISPC_OVR_ATTRIBUTES_POSX_MASK GENMASK(17, 6)
+#define DISPC_OVR_ATTRIBUTES_CHANNELIN_MASK GENMASK(4, 1)
+#define DISPC_OVR_ATTRIBUTES_ENABLE_MASK GENMASK(0, 0)
+
#define DISPC_OVR_ATTRIBUTES2(n) (0x34 + (n) * 4) /* J721E */
+#define DISPC_OVR_ATTRIBUTES2_POSY_MASK GENMASK(29, 16)
+#define DISPC_OVR_ATTRIBUTES2_POSX_MASK GENMASK(13, 0)
+
/* VP */
#define DISPC_VP_CONFIG 0x0
+#define DISPC_VP_CONFIG_COLORCONVENABLE_MASK GENMASK(24, 24)
+#define DISPC_VP_CONFIG_CPR_MASK GENMASK(15, 15)
+#define DISPC_VP_CONFIG_GAMMAENABLE_MASK GENMASK(2, 2)
+
#define DISPC_VP_CONTROL 0x4
+#define DISPC_VP_CONTROL_DATALINES_MASK GENMASK(10, 8)
+#define DISPC_VP_CONTROL_GOBIT_MASK GENMASK(5, 5)
+#define DISPC_VP_CONTROL_ENABLE_MASK GENMASK(0, 0)
+
#define DISPC_VP_CSC_COEF0 0x8
#define DISPC_VP_CSC_COEF1 0xc
#define DISPC_VP_CSC_COEF2 0x10
@@ -189,9 +244,28 @@ enum dispc_common_regs {
#define DISPC_VP_DATA_CYCLE_2 0x1c
#define DISPC_VP_LINE_NUMBER 0x44
#define DISPC_VP_POL_FREQ 0x4c
+#define DISPC_VP_POL_FREQ_ALIGN_MASK GENMASK(18, 18)
+#define DISPC_VP_POL_FREQ_ONOFF_MASK GENMASK(17, 17)
+#define DISPC_VP_POL_FREQ_RF_MASK GENMASK(16, 16)
+#define DISPC_VP_POL_FREQ_IEO_MASK GENMASK(15, 15)
+#define DISPC_VP_POL_FREQ_IPC_MASK GENMASK(14, 14)
+#define DISPC_VP_POL_FREQ_IHS_MASK GENMASK(13, 13)
+#define DISPC_VP_POL_FREQ_IVS_MASK GENMASK(12, 12)
+
#define DISPC_VP_SIZE_SCREEN 0x50
+#define DISPC_VP_SIZE_SCREEN_HDISPLAY_MASK GENMASK(11, 0)
+#define DISPC_VP_SIZE_SCREEN_VDISPLAY_MASK GENMASK(27, 16)
+
#define DISPC_VP_TIMING_H 0x54
+#define DISPC_VP_TIMING_H_SYNC_PULSE_MASK GENMASK(7, 0)
+#define DISPC_VP_TIMING_H_FRONT_PORCH_MASK GENMASK(19, 8)
+#define DISPC_VP_TIMING_H_BACK_PORCH_MASK GENMASK(31, 20)
+
#define DISPC_VP_TIMING_V 0x58
+#define DISPC_VP_TIMING_V_SYNC_PULSE_MASK GENMASK(7, 0)
+#define DISPC_VP_TIMING_V_FRONT_PORCH_MASK GENMASK(19, 8)
+#define DISPC_VP_TIMING_V_BACK_PORCH_MASK GENMASK(31, 20)
+
#define DISPC_VP_CSC_COEF3 0x5c
#define DISPC_VP_CSC_COEF4 0x60
#define DISPC_VP_CSC_COEF5 0x64
@@ -220,6 +294,8 @@ enum dispc_common_regs {
#define DISPC_VP_SAFETY_LFSR_SEED 0x110
#define DISPC_VP_GAMMA_TABLE 0x120
#define DISPC_VP_DSS_OLDI_CFG 0x160
+#define DISPC_VP_DSS_OLDI_CFG_MAP_MASK GENMASK(3, 1)
+
#define DISPC_VP_DSS_OLDI_STATUS 0x164
#define DISPC_VP_DSS_OLDI_LB 0x168
#define DISPC_VP_DSS_MERGE_SPLIT 0x16c /* J721E */
diff --git a/drivers/gpu/drm/tidss/tidss_drv.c b/drivers/gpu/drm/tidss/tidss_drv.c
index a1b12e52aca4..27d9a8fd541f 100644
--- a/drivers/gpu/drm/tidss/tidss_drv.c
+++ b/drivers/gpu/drm/tidss/tidss_drv.c
@@ -8,6 +8,7 @@
#include <linux/of.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
+#include <linux/aperture.h>
#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
@@ -192,12 +193,20 @@ static int tidss_probe(struct platform_device *pdev)
goto err_irq_uninstall;
}
+ /* Remove possible early fb before setting up the fbdev */
+ ret = aperture_remove_all_conflicting_devices(tidss_driver.name);
+ if (ret)
+ goto err_drm_dev_unreg;
+
drm_client_setup(ddev, NULL);
dev_dbg(dev, "%s done\n", __func__);
return 0;
+err_drm_dev_unreg:
+ drm_dev_unregister(ddev);
+
err_irq_uninstall:
tidss_irq_uninstall(ddev);
diff --git a/drivers/gpu/drm/tidss/tidss_drv.h b/drivers/gpu/drm/tidss/tidss_drv.h
index d14d5d28f0a3..84454a4855d1 100644
--- a/drivers/gpu/drm/tidss/tidss_drv.h
+++ b/drivers/gpu/drm/tidss/tidss_drv.h
@@ -9,6 +9,8 @@
#include <linux/spinlock.h>
+#include <drm/drm_device.h>
+
#define TIDSS_MAX_PORTS 4
#define TIDSS_MAX_PLANES 4
#define TIDSS_MAX_OLDI_TXES 2
diff --git a/drivers/gpu/drm/tidss/tidss_oldi.c b/drivers/gpu/drm/tidss/tidss_oldi.c
index 8f25159d0666..7688251beba2 100644
--- a/drivers/gpu/drm/tidss/tidss_oldi.c
+++ b/drivers/gpu/drm/tidss/tidss_oldi.c
@@ -464,7 +464,6 @@ int tidss_oldi_init(struct tidss_device *tidss)
* which may still be connected.
* Continue to search for that.
*/
- ret = 0;
continue;
}
goto err_put_node;
diff --git a/drivers/gpu/drm/tidss/tidss_plane.h b/drivers/gpu/drm/tidss/tidss_plane.h
index aecaf2728406..92c560c3a621 100644
--- a/drivers/gpu/drm/tidss/tidss_plane.h
+++ b/drivers/gpu/drm/tidss/tidss_plane.h
@@ -7,6 +7,8 @@
#ifndef __TIDSS_PLANE_H__
#define __TIDSS_PLANE_H__
+#include <drm/drm_plane.h>
+
#define to_tidss_plane(p) container_of((p), struct tidss_plane, plane)
struct tidss_device;
diff --git a/drivers/gpu/drm/tidss/tidss_scale_coefs.h b/drivers/gpu/drm/tidss/tidss_scale_coefs.h
index 9c560d0fdac0..9824d02d9d1f 100644
--- a/drivers/gpu/drm/tidss/tidss_scale_coefs.h
+++ b/drivers/gpu/drm/tidss/tidss_scale_coefs.h
@@ -9,6 +9,8 @@
#include <linux/types.h>
+struct device;
+
struct tidss_scale_coefs {
s16 c2[16];
s16 c1[16];
diff --git a/drivers/gpu/drm/tiny/Kconfig b/drivers/gpu/drm/tiny/Kconfig
index 06e54694a7f2..7d9e85e932d7 100644
--- a/drivers/gpu/drm/tiny/Kconfig
+++ b/drivers/gpu/drm/tiny/Kconfig
@@ -82,6 +82,21 @@ config DRM_PANEL_MIPI_DBI
https://github.com/notro/panel-mipi-dbi/wiki.
To compile this driver as a module, choose M here.
+config DRM_PIXPAPER
+ tristate "DRM support for PIXPAPER display panels"
+ depends on DRM && SPI
+ select DRM_CLIENT_SELECTION
+ select DRM_GEM_SHMEM_HELPER
+ select DRM_KMS_HELPER
+ help
+ DRM driver for the Mayqueen Pixpaper e-ink display panel.
+
+ This driver supports small e-paper displays connected over SPI,
+ with a resolution of 122x250 and XRGB8888 framebuffer format.
+ It is intended for low-power embedded applications.
+
+ If M is selected, the module will be built as pixpaper.ko.
+
config TINYDRM_HX8357D
tristate "DRM support for HX8357D display panels"
depends on DRM && SPI
diff --git a/drivers/gpu/drm/tiny/Makefile b/drivers/gpu/drm/tiny/Makefile
index 4a9ff61ec254..48d30bf6152f 100644
--- a/drivers/gpu/drm/tiny/Makefile
+++ b/drivers/gpu/drm/tiny/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_DRM_BOCHS) += bochs.o
obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus-qemu.o
obj-$(CONFIG_DRM_GM12U320) += gm12u320.o
obj-$(CONFIG_DRM_PANEL_MIPI_DBI) += panel-mipi-dbi.o
+obj-$(CONFIG_DRM_PIXPAPER) += pixpaper.o
obj-$(CONFIG_TINYDRM_HX8357D) += hx8357d.o
obj-$(CONFIG_TINYDRM_ILI9163) += ili9163.o
obj-$(CONFIG_TINYDRM_ILI9225) += ili9225.o
diff --git a/drivers/gpu/drm/tiny/bochs.c b/drivers/gpu/drm/tiny/bochs.c
index 8d3b7c4fa6a4..d2d5e9f1269f 100644
--- a/drivers/gpu/drm/tiny/bochs.c
+++ b/drivers/gpu/drm/tiny/bochs.c
@@ -252,7 +252,7 @@ static int bochs_hw_init(struct bochs_device *bochs)
}
bochs->ioports = 1;
} else {
- dev_err(dev->dev, "I/O ports are not supported\n");
+ drm_err(dev, "I/O ports are not supported\n");
return -EIO;
}
diff --git a/drivers/gpu/drm/tiny/pixpaper.c b/drivers/gpu/drm/tiny/pixpaper.c
new file mode 100644
index 000000000000..32598fb2fee7
--- /dev/null
+++ b/drivers/gpu/drm/tiny/pixpaper.c
@@ -0,0 +1,1165 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DRM driver for PIXPAPER e-ink panel
+ *
+ * Author: LiangCheng Wang <zaq14760@gmail.com>,
+ */
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+
+#include <drm/clients/drm_client_setup.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fbdev_shmem.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_probe_helper.h>
+
+/*
+ * Note on Undocumented Commands/Registers:
+ *
+ * Several commands and register parameters defined in this header are not
+ * documented in the datasheet. Their values and usage have been derived
+ * through analysis of existing userspace example programs.
+ *
+ * These 'unknown' definitions are crucial for the proper initialization
+ * and stable operation of the panel. Modifying these values without
+ * thorough understanding may lead to display anomalies, panel damage,
+ * or unexpected behavior.
+ */
+
+/* Command definitions */
+#define PIXPAPER_CMD_PANEL_SETTING 0x00 /* R00H: Panel settings */
+#define PIXPAPER_CMD_POWER_SETTING 0x01 /* R01H: Power settings */
+#define PIXPAPER_CMD_POWER_OFF 0x02 /* R02H: Power off */
+#define PIXPAPER_CMD_POWER_OFF_SEQUENCE 0x03 /* R03H: Power off sequence */
+#define PIXPAPER_CMD_POWER_ON 0x04 /* R04H: Power on */
+#define PIXPAPER_CMD_BOOSTER_SOFT_START 0x06 /* R06H: Booster soft start */
+#define PIXPAPER_CMD_DEEP_SLEEP 0x07 /* R07H: Deep sleep */
+#define PIXPAPER_CMD_DATA_START_TRANSMISSION 0x10
+/* R10H: Data transmission start */
+#define PIXPAPER_CMD_DISPLAY_REFRESH 0x12 /* R12H: Display refresh */
+#define PIXPAPER_CMD_PLL_CONTROL 0x30 /* R30H: PLL control */
+#define PIXPAPER_CMD_TEMP_SENSOR_CALIB 0x41
+/* R41H: Temperature sensor calibration */
+#define PIXPAPER_CMD_UNKNOWN_4D 0x4D /* R4DH: Unknown command */
+#define PIXPAPER_CMD_VCOM_INTERVAL 0x50 /* R50H: VCOM interval */
+#define PIXPAPER_CMD_UNKNOWN_60 0x60 /* R60H: Unknown command */
+#define PIXPAPER_CMD_RESOLUTION_SETTING 0x61 /* R61H: Resolution settings */
+#define PIXPAPER_CMD_GATE_SOURCE_START 0x65 /* R65H: Gate/source start */
+#define PIXPAPER_CMD_UNKNOWN_B4 0xB4 /* RB4H: Unknown command */
+#define PIXPAPER_CMD_UNKNOWN_B5 0xB5 /* RB5H: Unknown command */
+#define PIXPAPER_CMD_UNKNOWN_E0 0xE0 /* RE0H: Unknown command */
+#define PIXPAPER_CMD_POWER_SAVING 0xE3 /* RE3H: Power saving */
+#define PIXPAPER_CMD_UNKNOWN_E7 0xE7 /* RE7H: Unknown command */
+#define PIXPAPER_CMD_UNKNOWN_E9 0xE9 /* RE9H: Unknown command */
+
+/* R00H PSR - First Parameter */
+#define PIXPAPER_PSR_RST_N BIT(0)
+/* Bit 0: RST_N, 1=no effect (default), 0=reset with booster OFF */
+#define PIXPAPER_PSR_SHD_N BIT(1)
+/* Bit 1: SHD_N, 1=booster ON (default), 0=booster OFF */
+#define PIXPAPER_PSR_SHL BIT(2)
+/* Bit 2: SHL, 1=shift right (default), 0=shift left */
+#define PIXPAPER_PSR_UD BIT(3)
+/* Bit 3: UD, 1=scan up (default), 0=scan down */
+#define PIXPAPER_PSR_PST_MODE BIT(5)
+/* Bit 5: PST_MODE, 0=frame scanning (default), 1=external */
+#define PIXPAPER_PSR_RES_MASK (3 << 6)
+/* Bits 7-6: RES[1:0], resolution setting */
+#define PIXPAPER_PSR_RES_176x296 (0x0 << 6) /* 00: 176x296 */
+#define PIXPAPER_PSR_RES_128x296 (0x1 << 6) /* 01: 128x296 */
+#define PIXPAPER_PSR_RES_128x250 (0x2 << 6) /* 10: 128x250 */
+#define PIXPAPER_PSR_RES_112x204 (0x3 << 6) /* 11: 112x204 */
+#define PIXPAPER_PSR_CONFIG \
+ (PIXPAPER_PSR_RST_N | PIXPAPER_PSR_SHD_N | PIXPAPER_PSR_SHL | \
+ PIXPAPER_PSR_UD)
+/* 0x0F: Default settings, resolution set by R61H */
+
+/* R00H PSR - Second Parameter */
+#define PIXPAPER_PSR2_VC_LUTZ \
+ (1 << 0) /* Bit 0: VC_LUTZ, 1=VCOM float after refresh (default), 0=no effect */
+#define PIXPAPER_PSR2_NORG \
+ (1 << 1) /* Bit 1: NORG, 1=VCOM to GND before power off, 0=no effect (default) */
+#define PIXPAPER_PSR2_TIEG \
+ (1 << 2) /* Bit 2: TIEG, 1=VGN to GND on power off, 0=no effect (default) */
+#define PIXPAPER_PSR2_TS_AUTO \
+ (1 << 3) /* Bit 3: TS_AUTO, 1=sensor on RST_N low to high (default), 0=on booster */
+#define PIXPAPER_PSR2_VCMZ \
+ (1 << 4) /* Bit 4: VCMZ, 1=VCOM always floating, 0=no effect (default) */
+#define PIXPAPER_PSR2_FOPT \
+ (1 << 5) /* Bit 5: FOPT, 0=scan 1 frame (default), 1=no scan, HiZ */
+#define PIXPAPER_PSR_CONFIG2 \
+ (PIXPAPER_PSR2_VC_LUTZ | \
+ PIXPAPER_PSR2_TS_AUTO) /* 0x09: Default VCOM and temp sensor settings */
+
+/* R01H PWR - Power Setting Register */
+/* First Parameter */
+#define PIXPAPER_PWR_VDG_EN \
+ (1 << 0) /* Bit 0: VDG_EN, 1=internal DCDC for VGP/VGN (default), 0=external */
+#define PIXPAPER_PWR_VDS_EN \
+ (1 << 1) /* Bit 1: VDS_EN, 1=internal regulator for VSP/VSN (default), 0=external */
+#define PIXPAPER_PWR_VSC_EN \
+ (1 << 2) /* Bit 2: VSC_EN, 1=internal regulator for VSPL (default), 0=external */
+#define PIXPAPER_PWR_V_MODE \
+ (1 << 3) /* Bit 3: V_MODE, 0=Mode0 (default), 1=Mode1 */
+#define PIXPAPER_PWR_CONFIG1 \
+ (PIXPAPER_PWR_VDG_EN | PIXPAPER_PWR_VDS_EN | \
+ PIXPAPER_PWR_VSC_EN) /* 0x07: Internal power for VGP/VGN, VSP/VSN, VSPL */
+
+/* Second Parameter */
+#define PIXPAPER_PWR_VGPN_MASK \
+ (3 << 0) /* Bits 1-0: VGPN, VGP/VGN voltage levels */
+#define PIXPAPER_PWR_VGPN_20V (0x0 << 0) /* 00: VGP=20V, VGN=-20V (default) */
+#define PIXPAPER_PWR_VGPN_17V (0x1 << 0) /* 01: VGP=17V, VGN=-17V */
+#define PIXPAPER_PWR_VGPN_15V (0x2 << 0) /* 10: VGP=15V, VGN=-15V */
+#define PIXPAPER_PWR_VGPN_10V (0x3 << 0) /* 11: VGP=10V, VGN=-10V */
+#define PIXPAPER_PWR_CONFIG2 PIXPAPER_PWR_VGPN_20V /* 0x00: VGP=20V, VGN=-20V */
+
+/* Third, Fourth, Sixth Parameters (VSP_1, VSPL_0, VSPL_1) */
+#define PIXPAPER_PWR_VSP_8_2V 0x22 /* VSP_1/VSPL_1: 8.2V (34 decimal) */
+#define PIXPAPER_PWR_VSPL_15V 0x78 /* VSPL_0: 15V (120 decimal) */
+
+/* Fifth Parameter (VSN_1) */
+#define PIXPAPER_PWR_VSN_4V 0x0A /* VSN_1: -4V (10 decimal) */
+
+/* R03H PFS - Power Off Sequence Setting Register */
+/* First Parameter */
+#define PIXPAPER_PFS_T_VDS_OFF_MASK \
+ (3 << 0) /* Bits 1-0: T_VDS_OFF, VSP/VSN power-off sequence */
+#define PIXPAPER_PFS_T_VDS_OFF_20MS (0x0 << 0) /* 00: 20 ms (default) */
+#define PIXPAPER_PFS_T_VDS_OFF_40MS (0x1 << 0) /* 01: 40 ms */
+#define PIXPAPER_PFS_T_VDS_OFF_60MS (0x2 << 0) /* 10: 60 ms */
+#define PIXPAPER_PFS_T_VDS_OFF_80MS (0x3 << 0) /* 11: 80 ms */
+#define PIXPAPER_PFS_T_VDPG_OFF_MASK \
+ (3 << 4) /* Bits 5-4: T_VDPG_OFF, VGP/VGN power-off sequence */
+#define PIXPAPER_PFS_T_VDPG_OFF_20MS (0x0 << 4) /* 00: 20 ms (default) */
+#define PIXPAPER_PFS_T_VDPG_OFF_40MS (0x1 << 4) /* 01: 40 ms */
+#define PIXPAPER_PFS_T_VDPG_OFF_60MS (0x2 << 4) /* 10: 60 ms */
+#define PIXPAPER_PFS_T_VDPG_OFF_80MS (0x3 << 4) /* 11: 80 ms */
+#define PIXPAPER_PFS_CONFIG1 \
+ (PIXPAPER_PFS_T_VDS_OFF_20MS | \
+ PIXPAPER_PFS_T_VDPG_OFF_20MS) /* 0x10: Default 20 ms for VSP/VSN and VGP/VGN */
+
+/* Second Parameter */
+#define PIXPAPER_PFS_VGP_EXT_MASK \
+ (0xF << 0) /* Bits 3-0: VGP_EXT, VGP extension time */
+#define PIXPAPER_PFS_VGP_EXT_0MS (0x0 << 0) /* 0000: 0 ms */
+#define PIXPAPER_PFS_VGP_EXT_500MS (0x1 << 0) /* 0001: 500 ms */
+#define PIXPAPER_PFS_VGP_EXT_1000MS (0x2 << 0) /* 0010: 1000 ms */
+#define PIXPAPER_PFS_VGP_EXT_1500MS (0x3 << 0) /* 0011: 1500 ms */
+#define PIXPAPER_PFS_VGP_EXT_2000MS (0x4 << 0) /* 0100: 2000 ms (default) */
+#define PIXPAPER_PFS_VGP_EXT_2500MS (0x5 << 0) /* 0101: 2500 ms */
+#define PIXPAPER_PFS_VGP_EXT_3000MS (0x6 << 0) /* 0110: 3000 ms */
+#define PIXPAPER_PFS_VGP_EXT_3500MS (0x7 << 0) /* 0111: 3500 ms */
+#define PIXPAPER_PFS_VGP_EXT_4000MS (0x8 << 0) /* 1000: 4000 ms */
+#define PIXPAPER_PFS_VGP_EXT_4500MS (0x9 << 0) /* 1001: 4500 ms */
+#define PIXPAPER_PFS_VGP_EXT_5000MS (0xA << 0) /* 1010: 5000 ms */
+#define PIXPAPER_PFS_VGP_EXT_5500MS (0xB << 0) /* 1011: 5500 ms */
+#define PIXPAPER_PFS_VGP_EXT_6000MS (0xC << 0) /* 1100: 6000 ms */
+#define PIXPAPER_PFS_VGP_EXT_6500MS (0xD << 0) /* 1101: 6500 ms */
+#define PIXPAPER_PFS_VGP_LEN_MASK \
+ (0xF << 4) /* Bits 7-4: VGP_LEN, VGP at 10V during power-off */
+#define PIXPAPER_PFS_VGP_LEN_0MS (0x0 << 4) /* 0000: 0 ms */
+#define PIXPAPER_PFS_VGP_LEN_500MS (0x1 << 4) /* 0001: 500 ms */
+#define PIXPAPER_PFS_VGP_LEN_1000MS (0x2 << 4) /* 0010: 1000 ms */
+#define PIXPAPER_PFS_VGP_LEN_1500MS (0x3 << 4) /* 0011: 1500 ms */
+#define PIXPAPER_PFS_VGP_LEN_2000MS (0x4 << 4) /* 0100: 2000 ms */
+#define PIXPAPER_PFS_VGP_LEN_2500MS (0x5 << 4) /* 0101: 2500 ms (default) */
+#define PIXPAPER_PFS_VGP_LEN_3000MS (0x6 << 4) /* 0110: 3000 ms */
+#define PIXPAPER_PFS_VGP_LEN_3500MS (0x7 << 4) /* 0111: 3500 ms */
+#define PIXPAPER_PFS_VGP_LEN_4000MS (0x8 << 4) /* 1000: 4000 ms */
+#define PIXPAPER_PFS_VGP_LEN_4500MS (0x9 << 4) /* 1001: 4500 ms */
+#define PIXPAPER_PFS_VGP_LEN_5000MS (0xA << 4) /* 1010: 5000 ms */
+#define PIXPAPER_PFS_VGP_LEN_5500MS (0xB << 4) /* 1011: 5500 ms */
+#define PIXPAPER_PFS_VGP_LEN_6000MS (0xC << 4) /* 1100: 6000 ms */
+#define PIXPAPER_PFS_VGP_LEN_6500MS (0xD << 4) /* 1101: 6500 ms */
+#define PIXPAPER_PFS_CONFIG2 \
+ (PIXPAPER_PFS_VGP_EXT_1000MS | \
+ PIXPAPER_PFS_VGP_LEN_2500MS) /* 0x54: VGP extension 1000 ms, VGP at 10V for 2500 ms */
+
+/* Third Parameter */
+#define PIXPAPER_PFS_XON_LEN_MASK \
+ (0xF << 0) /* Bits 3-0: XON_LEN, XON enable time */
+#define PIXPAPER_PFS_XON_LEN_0MS (0x0 << 0) /* 0000: 0 ms */
+#define PIXPAPER_PFS_XON_LEN_500MS (0x1 << 0) /* 0001: 500 ms */
+#define PIXPAPER_PFS_XON_LEN_1000MS (0x2 << 0) /* 0010: 1000 ms */
+#define PIXPAPER_PFS_XON_LEN_1500MS (0x3 << 0) /* 0011: 1500 ms */
+#define PIXPAPER_PFS_XON_LEN_2000MS (0x4 << 0) /* 0100: 2000 ms (default) */
+#define PIXPAPER_PFS_XON_LEN_2500MS (0x5 << 0) /* 0101: 2500 ms */
+#define PIXPAPER_PFS_XON_LEN_3000MS (0x6 << 0) /* 0110: 3000 ms */
+#define PIXPAPER_PFS_XON_LEN_3500MS (0x7 << 0) /* 0111: 3500 ms */
+#define PIXPAPER_PFS_XON_LEN_4000MS (0x8 << 0) /* 1000: 4000 ms */
+#define PIXPAPER_PFS_XON_LEN_4500MS (0x9 << 0) /* 1001: 4500 ms */
+#define PIXPAPER_PFS_XON_LEN_5000MS (0xA << 0) /* 1010: 5000 ms */
+#define PIXPAPER_PFS_XON_LEN_5500MS (0xB << 0) /* 1011: 5500 ms */
+#define PIXPAPER_PFS_XON_LEN_6000MS (0xC << 0) /* 1100: 6000 ms */
+#define PIXPAPER_PFS_XON_DLY_MASK \
+ (0xF << 4) /* Bits 7-4: XON_DLY, XON delay time */
+#define PIXPAPER_PFS_XON_DLY_0MS (0x0 << 4) /* 0000: 0 ms */
+#define PIXPAPER_PFS_XON_DLY_500MS (0x1 << 4) /* 0001: 500 ms */
+#define PIXPAPER_PFS_XON_DLY_1000MS (0x2 << 4) /* 0010: 1000 ms */
+#define PIXPAPER_PFS_XON_DLY_1500MS (0x3 << 4) /* 0011: 1500 ms */
+#define PIXPAPER_PFS_XON_DLY_2000MS (0x4 << 4) /* 0100: 2000 ms (default) */
+#define PIXPAPER_PFS_XON_DLY_2500MS (0x5 << 4) /* 0101: 2500 ms */
+#define PIXPAPER_PFS_XON_DLY_3000MS (0x6 << 4) /* 0110: 3000 ms */
+#define PIXPAPER_PFS_XON_DLY_3500MS (0x7 << 4) /* 0111: 3500 ms */
+#define PIXPAPER_PFS_XON_DLY_4000MS (0x8 << 4) /* 1000: 4000 ms */
+#define PIXPAPER_PFS_XON_DLY_4500MS (0x9 << 4) /* 1001: 4500 ms */
+#define PIXPAPER_PFS_XON_DLY_5000MS (0xA << 4) /* 1010: 5000 ms */
+#define PIXPAPER_PFS_XON_DLY_5500MS (0xB << 4) /* 1011: 5500 ms */
+#define PIXPAPER_PFS_XON_DLY_6000MS (0xC << 4) /* 1100: 6000 ms */
+#define PIXPAPER_PFS_CONFIG3 \
+ (PIXPAPER_PFS_XON_LEN_2000MS | \
+ PIXPAPER_PFS_XON_DLY_2000MS) /* 0x44: XON enable and delay at 2000 ms */
+
+/* R06H BTST - Booster Soft Start Command */
+/* First Parameter */
+#define PIXPAPER_BTST_PHA_SFT_MASK \
+ (3 << 0) /* Bits 1-0: PHA_SFT, soft start period for phase A */
+#define PIXPAPER_BTST_PHA_SFT_10MS (0x0 << 0) /* 00: 10 ms (default) */
+#define PIXPAPER_BTST_PHA_SFT_20MS (0x1 << 0) /* 01: 20 ms */
+#define PIXPAPER_BTST_PHA_SFT_30MS (0x2 << 0) /* 10: 30 ms */
+#define PIXPAPER_BTST_PHA_SFT_40MS (0x3 << 0) /* 11: 40 ms */
+#define PIXPAPER_BTST_PHB_SFT_MASK \
+ (3 << 2) /* Bits 3-2: PHB_SFT, soft start period for phase B */
+#define PIXPAPER_BTST_PHB_SFT_10MS (0x0 << 2) /* 00: 10 ms (default) */
+#define PIXPAPER_BTST_PHB_SFT_20MS (0x1 << 2) /* 01: 20 ms */
+#define PIXPAPER_BTST_PHB_SFT_30MS (0x2 << 2) /* 10: 30 ms */
+#define PIXPAPER_BTST_PHB_SFT_40MS (0x3 << 2) /* 11: 40 ms */
+#define PIXPAPER_BTST_CONFIG1 \
+ (PIXPAPER_BTST_PHA_SFT_40MS | \
+ PIXPAPER_BTST_PHB_SFT_40MS) /* 0x0F: 40 ms for phase A and B */
+
+/* Second to Seventh Parameters (Driving Strength or Minimum OFF Time) */
+#define PIXPAPER_BTST_CONFIG2 0x0A /* Strength11 */
+#define PIXPAPER_BTST_CONFIG3 0x2F /* Period48 */
+#define PIXPAPER_BTST_CONFIG4 0x25 /* Strength38 */
+#define PIXPAPER_BTST_CONFIG5 0x22 /* Period35 */
+#define PIXPAPER_BTST_CONFIG6 0x2E /* Strength47 */
+#define PIXPAPER_BTST_CONFIG7 0x21 /* Period34 */
+
+/* R12H: DRF (Display Refresh) */
+#define PIXPAPER_DRF_VCOM_AC 0x00 /* AC VCOM: VCOM follows LUTC (default) */
+#define PIXPAPER_DRF_VCOM_DC 0x01 /* DC VCOM: VCOM fixed to VCOMDC */
+
+/* R30H PLL - PLL Control Register */
+/* First Parameter */
+#define PIXPAPER_PLL_FR_MASK (0x7 << 0) /* Bits 2-0: FR, frame rate */
+#define PIXPAPER_PLL_FR_12_5HZ (0x0 << 0) /* 000: 12.5 Hz */
+#define PIXPAPER_PLL_FR_25HZ (0x1 << 0) /* 001: 25 Hz */
+#define PIXPAPER_PLL_FR_50HZ (0x2 << 0) /* 010: 50 Hz (default) */
+#define PIXPAPER_PLL_FR_65HZ (0x3 << 0) /* 011: 65 Hz */
+#define PIXPAPER_PLL_FR_75HZ (0x4 << 0) /* 100: 75 Hz */
+#define PIXPAPER_PLL_FR_85HZ (0x5 << 0) /* 101: 85 Hz */
+#define PIXPAPER_PLL_FR_100HZ (0x6 << 0) /* 110: 100 Hz */
+#define PIXPAPER_PLL_FR_120HZ (0x7 << 0) /* 111: 120 Hz */
+#define PIXPAPER_PLL_DFR \
+ (1 << 3) /* Bit 3: Dynamic frame rate, 0=disabled (default), 1=enabled */
+#define PIXPAPER_PLL_CONFIG \
+ (PIXPAPER_PLL_FR_50HZ) /* 0x02: 50 Hz, dynamic frame rate disabled */
+
+/* R41H TSE - Temperature Sensor Calibration Register */
+/* First Parameter */
+#define PIXPAPER_TSE_TO_MASK \
+ (0xF << 0) /* Bits 3-0: TO[3:0], temperature offset */
+#define PIXPAPER_TSE_TO_POS_0C (0x0 << 0) /* 0000: +0°C (default) */
+#define PIXPAPER_TSE_TO_POS_0_5C (0x1 << 0) /* 0001: +0.5°C */
+#define PIXPAPER_TSE_TO_POS_1C (0x2 << 0) /* 0010: +1°C */
+#define PIXPAPER_TSE_TO_POS_1_5C (0x3 << 0) /* 0011: +1.5°C */
+#define PIXPAPER_TSE_TO_POS_2C (0x4 << 0) /* 0100: +2°C */
+#define PIXPAPER_TSE_TO_POS_2_5C (0x5 << 0) /* 0101: +2.5°C */
+#define PIXPAPER_TSE_TO_POS_3C (0x6 << 0) /* 0110: +3°C */
+#define PIXPAPER_TSE_TO_POS_3_5C (0x7 << 0) /* 0111: +3.5°C */
+#define PIXPAPER_TSE_TO_NEG_4C (0x8 << 0) /* 1000: -4°C */
+#define PIXPAPER_TSE_TO_NEG_3_5C (0x9 << 0) /* 1001: -3.5°C */
+#define PIXPAPER_TSE_TO_NEG_3C (0xA << 0) /* 1010: -3°C */
+#define PIXPAPER_TSE_TO_NEG_2_5C (0xB << 0) /* 1011: -2.5°C */
+#define PIXPAPER_TSE_TO_NEG_2C (0xC << 0) /* 1100: -2°C */
+#define PIXPAPER_TSE_TO_NEG_1_5C (0xD << 0) /* 1101: -1.5°C */
+#define PIXPAPER_TSE_TO_NEG_1C (0xE << 0) /* 1110: -1°C */
+#define PIXPAPER_TSE_TO_NEG_0_5C (0xF << 0) /* 1111: -0.5°C */
+#define PIXPAPER_TSE_TO_FINE_MASK \
+ (0x3 << 4) /* Bits 5-4: TO[5:4], fine adjustment for positive offsets */
+#define PIXPAPER_TSE_TO_FINE_0C (0x0 << 4) /* 00: +0.0°C (default) */
+#define PIXPAPER_TSE_TO_FINE_0_25C (0x1 << 4) /* 01: +0.25°C */
+#define PIXPAPER_TSE_ENABLE \
+ (0 << 7) /* Bit 7: TSE, 0=internal sensor enabled (default), 1=disabled (external) */
+#define PIXPAPER_TSE_DISABLE \
+ (1 << 7) /* Bit 7: TSE, 1=internal sensor disabled, use external */
+#define PIXPAPER_TSE_CONFIG \
+ (PIXPAPER_TSE_TO_POS_0C | PIXPAPER_TSE_TO_FINE_0C | \
+ PIXPAPER_TSE_ENABLE) /* 0x00: Internal sensor enabled, +0°C offset */
+
+/* R4DH */
+#define PIXPAPER_UNKNOWN_4D_CONFIG \
+ 0x78 /* This value is essential for initialization, derived from userspace examples. */
+
+/* R50H CDI - VCOM and DATA Interval Setting Register */
+/* First Parameter */
+#define PIXPAPER_CDI_INTERVAL_MASK \
+ (0xF << 0) /* Bits 3-0: CDI[3:0], VCOM and data interval (hsync) */
+#define PIXPAPER_CDI_17_HSYNC (0x0 << 0) /* 0000: 17 hsync */
+#define PIXPAPER_CDI_16_HSYNC (0x1 << 0) /* 0001: 16 hsync */
+#define PIXPAPER_CDI_15_HSYNC (0x2 << 0) /* 0010: 15 hsync */
+#define PIXPAPER_CDI_14_HSYNC (0x3 << 0) /* 0011: 14 hsync */
+#define PIXPAPER_CDI_13_HSYNC (0x4 << 0) /* 0100: 13 hsync */
+#define PIXPAPER_CDI_12_HSYNC (0x5 << 0) /* 0101: 12 hsync */
+#define PIXPAPER_CDI_11_HSYNC (0x6 << 0) /* 0110: 11 hsync */
+#define PIXPAPER_CDI_10_HSYNC (0x7 << 0) /* 0111: 10 hsync (default) */
+#define PIXPAPER_CDI_9_HSYNC (0x8 << 0) /* 1000: 9 hsync */
+#define PIXPAPER_CDI_8_HSYNC (0x9 << 0) /* 1001: 8 hsync */
+#define PIXPAPER_CDI_7_HSYNC (0xA << 0) /* 1010: 7 hsync */
+#define PIXPAPER_CDI_6_HSYNC (0xB << 0) /* 1011: 6 hsync */
+#define PIXPAPER_CDI_5_HSYNC (0xC << 0) /* 1100: 5 hsync */
+#define PIXPAPER_CDI_4_HSYNC (0xD << 0) /* 1101: 4 hsync */
+#define PIXPAPER_CDI_3_HSYNC (0xE << 0) /* 1110: 3 hsync */
+#define PIXPAPER_CDI_2_HSYNC (0xF << 0) /* 1111: 2 hsync */
+#define PIXPAPER_CDI_DDX \
+ (1 << 4) /* Bit 4: DDX, 0=grayscale mapping 0, 1=grayscale mapping 1 (default) */
+#define PIXPAPER_CDI_VBD_MASK \
+ (0x7 << 5) /* Bits 7-5: VBD[2:0], border data selection */
+#define PIXPAPER_CDI_VBD_FLOAT (0x0 << 5) /* 000: Floating (DDX=0 or 1) */
+#define PIXPAPER_CDI_VBD_GRAY3_DDX0 \
+ (0x1 << 5) /* 001: Gray3 (border_buf=011) when DDX=0 */
+#define PIXPAPER_CDI_VBD_GRAY2_DDX0 \
+ (0x2 << 5) /* 010: Gray2 (border_buf=010) when DDX=0 */
+#define PIXPAPER_CDI_VBD_GRAY1_DDX0 \
+ (0x3 << 5) /* 011: Gray1 (border_buf=001) when DDX=0 */
+#define PIXPAPER_CDI_VBD_GRAY0_DDX0 \
+ (0x4 << 5) /* 100: Gray0 (border_buf=000) when DDX=0 */
+#define PIXPAPER_CDI_VBD_GRAY0_DDX1 \
+ (0x0 << 5) /* 000: Gray0 (border_buf=000) when DDX=1 */
+#define PIXPAPER_CDI_VBD_GRAY1_DDX1 \
+ (0x1 << 5) /* 001: Gray1 (border_buf=001) when DDX=1 */
+#define PIXPAPER_CDI_VBD_GRAY2_DDX1 \
+ (0x2 << 5) /* 010: Gray2 (border_buf=010) when DDX=1 */
+#define PIXPAPER_CDI_VBD_GRAY3_DDX1 \
+ (0x3 << 5) /* 011: Gray3 (border_buf=011) when DDX=1 */
+#define PIXPAPER_CDI_VBD_FLOAT_DDX1 (0x4 << 5) /* 100: Floating when DDX=1 */
+#define PIXPAPER_CDI_CONFIG \
+ (PIXPAPER_CDI_10_HSYNC | PIXPAPER_CDI_DDX | \
+ PIXPAPER_CDI_VBD_GRAY1_DDX1) /* 0x37: 10 hsync, DDX=1, border Gray1 */
+
+/* R60H */
+#define PIXPAPER_UNKNOWN_60_CONFIG1 \
+ 0x02 /* This value is essential for initialization, derived from userspace examples. */
+#define PIXPAPER_UNKNOWN_60_CONFIG2 \
+ 0x02 /* This value is essential for initialization, derived from userspace examples. */
+
+/* R61H TRES - Resolution Setting Register */
+#define PIXPAPER_TRES_HRES_H \
+ ((PIXPAPER_PANEL_BUFFER_WIDTH >> 8) & \
+ 0xFF) /* HRES[9:8]: High byte of horizontal resolution (128) */
+#define PIXPAPER_TRES_HRES_L \
+ (PIXPAPER_PANEL_BUFFER_WIDTH & \
+ 0xFF) /* HRES[7:0]: Low byte of horizontal resolution (128 = 0x80) */
+#define PIXPAPER_TRES_VRES_H \
+ ((PIXPAPER_HEIGHT >> 8) & \
+ 0xFF) /* VRES[9:8]: High byte of vertical resolution (250) */
+#define PIXPAPER_TRES_VRES_L \
+ (PIXPAPER_HEIGHT & \
+ 0xFF) /* VRES[7:0]: Low byte of vertical resolution (250 = 0xFA) */
+
+/* R65H GSST - Gate/Source Start Setting Register */
+#define PIXPAPER_GSST_S_START 0x00 /* S_Start[7:0]: First source line (S0) */
+#define PIXPAPER_GSST_RESERVED 0x00 /* Reserved byte */
+#define PIXPAPER_GSST_G_START_H \
+ 0x00 /* G_Start[8]: High bit of first gate line (G0) */
+#define PIXPAPER_GSST_G_START_L \
+ 0x00 /* G_Start[7:0]: Low byte of first gate line (G0) */
+
+/* RB4H */
+#define PIXPAPER_UNKNOWN_B4_CONFIG \
+ 0xD0 /* This value is essential for initialization, derived from userspace examples. */
+
+/* RB5H */
+#define PIXPAPER_UNKNOWN_B5_CONFIG \
+ 0x03 /* This value is essential for initialization, derived from userspace examples. */
+
+/* RE0H */
+#define PIXPAPER_UNKNOWN_E0_CONFIG \
+ 0x00 /* This value is essential for initialization, derived from userspace examples. */
+
+/* RE3H PWS - Power Saving Register */
+/* First Parameter */
+#define PIXPAPER_PWS_VCOM_W_MASK \
+ (0xF \
+ << 4) /* Bits 7-4: VCOM_W[3:0], VCOM power-saving width (line periods) */
+#define PIXPAPER_PWS_VCOM_W_0 (0x0 << 4) /* 0000: 0 line periods */
+#define PIXPAPER_PWS_VCOM_W_1 (0x1 << 4) /* 0001: 1 line period */
+#define PIXPAPER_PWS_VCOM_W_2 (0x2 << 4) /* 0010: 2 line periods */
+#define PIXPAPER_PWS_VCOM_W_3 (0x3 << 4) /* 0011: 3 line periods */
+#define PIXPAPER_PWS_VCOM_W_4 (0x4 << 4) /* 0100: 4 line periods */
+#define PIXPAPER_PWS_VCOM_W_5 (0x5 << 4) /* 0101: 5 line periods */
+#define PIXPAPER_PWS_VCOM_W_6 (0x6 << 4) /* 0110: 6 line periods */
+#define PIXPAPER_PWS_VCOM_W_7 (0x7 << 4) /* 0111: 7 line periods */
+#define PIXPAPER_PWS_VCOM_W_8 (0x8 << 4) /* 1000: 8 line periods */
+#define PIXPAPER_PWS_VCOM_W_9 (0x9 << 4) /* 1001: 9 line periods */
+#define PIXPAPER_PWS_VCOM_W_10 (0xA << 4) /* 1010: 10 line periods */
+#define PIXPAPER_PWS_VCOM_W_11 (0xB << 4) /* 1011: 11 line periods */
+#define PIXPAPER_PWS_VCOM_W_12 (0xC << 4) /* 1100: 12 line periods */
+#define PIXPAPER_PWS_VCOM_W_13 (0xD << 4) /* 1101: 13 line periods */
+#define PIXPAPER_PWS_VCOM_W_14 (0xE << 4) /* 1110: 14 line periods */
+#define PIXPAPER_PWS_VCOM_W_15 (0xF << 4) /* 1111: 15 line periods */
+#define PIXPAPER_PWS_SD_W_MASK \
+ (0xF << 0) /* Bits 3-0: SD_W[3:0], source power-saving width (660 ns units) */
+#define PIXPAPER_PWS_SD_W_0 (0x0 << 0) /* 0000: 0 ns */
+#define PIXPAPER_PWS_SD_W_1 (0x1 << 0) /* 0001: 660 ns */
+#define PIXPAPER_PWS_SD_W_2 (0x2 << 0) /* 0010: 1320 ns */
+#define PIXPAPER_PWS_SD_W_3 (0x3 << 0) /* 0011: 1980 ns */
+#define PIXPAPER_PWS_SD_W_4 (0x4 << 0) /* 0100: 2640 ns */
+#define PIXPAPER_PWS_SD_W_5 (0x5 << 0) /* 0101: 3300 ns */
+#define PIXPAPER_PWS_SD_W_6 (0x6 << 0) /* 0110: 3960 ns */
+#define PIXPAPER_PWS_SD_W_7 (0x7 << 0) /* 0111: 4620 ns */
+#define PIXPAPER_PWS_SD_W_8 (0x8 << 0) /* 1000: 5280 ns */
+#define PIXPAPER_PWS_SD_W_9 (0x9 << 0) /* 1001: 5940 ns */
+#define PIXPAPER_PWS_SD_W_10 (0xA << 0) /* 1010: 6600 ns */
+#define PIXPAPER_PWS_SD_W_11 (0xB << 0) /* 1011: 7260 ns */
+#define PIXPAPER_PWS_SD_W_12 (0xC << 0) /* 1100: 7920 ns */
+#define PIXPAPER_PWS_SD_W_13 (0xD << 0) /* 1101: 8580 ns */
+#define PIXPAPER_PWS_SD_W_14 (0xE << 0) /* 1110: 9240 ns */
+#define PIXPAPER_PWS_SD_W_15 (0xF << 0) /* 1111: 9900 ns */
+#define PIXPAPER_PWS_CONFIG \
+ (PIXPAPER_PWS_VCOM_W_2 | \
+ PIXPAPER_PWS_SD_W_2) /* 0x22: VCOM 2 line periods (160 µs), source 1320 ns */
+
+/* RE7H */
+#define PIXPAPER_UNKNOWN_E7_CONFIG \
+ 0x1C /* This value is essential for initialization, derived from userspace examples. */
+
+/* RE9H */
+#define PIXPAPER_UNKNOWN_E9_CONFIG \
+ 0x01 /* This value is essential for initialization, derived from userspace examples. */
+
+MODULE_IMPORT_NS("DMA_BUF");
+
+/*
+ * The panel has a visible resolution of 122x250.
+ * However, the controller requires the horizontal resolution to be aligned to 128 pixels.
+ * No porch or sync timing values are provided in the datasheet, so we define minimal
+ * placeholder values to satisfy the DRM framework.
+ */
+
+/* Panel visible resolution */
+#define PIXPAPER_WIDTH 122
+#define PIXPAPER_HEIGHT 250
+
+/* Controller requires 128 horizontal pixels total (for memory alignment) */
+#define PIXPAPER_HTOTAL 128
+#define PIXPAPER_HFP 2
+#define PIXPAPER_HSYNC 2
+#define PIXPAPER_HBP (PIXPAPER_HTOTAL - PIXPAPER_WIDTH - PIXPAPER_HFP - PIXPAPER_HSYNC)
+
+/*
+ * According to the datasheet, the total vertical blanking must be 55 lines,
+ * regardless of how the vertical back porch is set.
+ * Here we allocate VFP=2, VSYNC=2, and VBP=51 to sum up to 55 lines.
+ * Total vertical lines = 250 (visible) + 55 (blanking) = 305.
+ */
+#define PIXPAPER_VTOTAL (250 + 55)
+#define PIXPAPER_VFP 2
+#define PIXPAPER_VSYNC 2
+#define PIXPAPER_VBP (55 - PIXPAPER_VFP - PIXPAPER_VSYNC)
+
+/*
+ * Pixel clock calculation:
+ * pixel_clock = htotal * vtotal * refresh_rate
+ * = 128 * 305 * 50
+ * = 1,952,000 Hz = 1952 kHz
+ */
+#define PIXPAPER_PIXEL_CLOCK 1952
+
+#define PIXPAPER_WIDTH_MM 24 /* approximate from 23.7046mm */
+#define PIXPAPER_HEIGHT_MM 49 /* approximate from 48.55mm */
+
+#define PIXPAPER_SPI_BITS_PER_WORD 8
+#define PIXPAPER_SPI_SPEED_DEFAULT 1000000
+
+#define PIXPAPER_PANEL_BUFFER_WIDTH 128
+#define PIXPAPER_PANEL_BUFFER_TWO_BYTES_PER_ROW (PIXPAPER_PANEL_BUFFER_WIDTH / 4)
+
+#define PIXPAPER_COLOR_THRESHOLD_LOW_CHANNEL 60
+#define PIXPAPER_COLOR_THRESHOLD_HIGH_CHANNEL 200
+#define PIXPAPER_COLOR_THRESHOLD_YELLOW_MIN_GREEN 180
+
+struct pixpaper_error_ctx {
+ int errno_code;
+};
+
+struct pixpaper_panel {
+ struct drm_device drm;
+ struct drm_plane plane;
+ struct drm_crtc crtc;
+ struct drm_encoder encoder;
+ struct drm_connector connector;
+
+ struct spi_device *spi;
+ struct gpio_desc *reset;
+ struct gpio_desc *busy;
+ struct gpio_desc *dc;
+};
+
+static inline struct pixpaper_panel *to_pixpaper_panel(struct drm_device *drm)
+{
+ return container_of(drm, struct pixpaper_panel, drm);
+}
+
+static void pixpaper_wait_for_panel(struct pixpaper_panel *panel)
+{
+ unsigned int timeout_ms = 10000;
+ unsigned long timeout_jiffies = jiffies + msecs_to_jiffies(timeout_ms);
+
+ usleep_range(1000, 1500);
+ while (gpiod_get_value_cansleep(panel->busy) != 1) {
+ if (time_after(jiffies, timeout_jiffies)) {
+ drm_warn(&panel->drm, "Busy wait timed out\n");
+ return;
+ }
+ usleep_range(100, 200);
+ }
+}
+
+static void pixpaper_spi_sync(struct spi_device *spi, struct spi_message *msg,
+ struct pixpaper_error_ctx *err)
+{
+ if (err->errno_code)
+ return;
+
+ int ret = spi_sync(spi, msg);
+
+ if (ret < 0)
+ err->errno_code = ret;
+}
+
+static void pixpaper_send_cmd(struct pixpaper_panel *panel, u8 cmd,
+ struct pixpaper_error_ctx *err)
+{
+ if (err->errno_code)
+ return;
+
+ struct spi_transfer xfer = {
+ .tx_buf = &cmd,
+ .len = 1,
+ };
+ struct spi_message msg;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+
+ gpiod_set_value_cansleep(panel->dc, 0);
+ usleep_range(1, 5);
+ pixpaper_spi_sync(panel->spi, &msg, err);
+}
+
+static void pixpaper_send_data(struct pixpaper_panel *panel, u8 data,
+ struct pixpaper_error_ctx *err)
+{
+ if (err->errno_code)
+ return;
+
+ struct spi_transfer xfer = {
+ .tx_buf = &data,
+ .len = 1,
+ };
+ struct spi_message msg;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+
+ gpiod_set_value_cansleep(panel->dc, 1);
+ usleep_range(1, 5);
+ pixpaper_spi_sync(panel->spi, &msg, err);
+}
+
+static int pixpaper_panel_hw_init(struct pixpaper_panel *panel)
+{
+ struct pixpaper_error_ctx err = { .errno_code = 0 };
+
+ gpiod_set_value_cansleep(panel->reset, 0);
+ msleep(50);
+ gpiod_set_value_cansleep(panel->reset, 1);
+ msleep(50);
+
+ pixpaper_wait_for_panel(panel);
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_UNKNOWN_4D, &err);
+ pixpaper_send_data(panel, PIXPAPER_UNKNOWN_4D_CONFIG, &err);
+ if (err.errno_code)
+ goto init_fail;
+ pixpaper_wait_for_panel(panel);
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_PANEL_SETTING, &err);
+ pixpaper_send_data(panel, PIXPAPER_PSR_CONFIG, &err);
+ pixpaper_send_data(panel, PIXPAPER_PSR_CONFIG2, &err);
+ if (err.errno_code)
+ goto init_fail;
+ pixpaper_wait_for_panel(panel);
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_POWER_SETTING, &err);
+ pixpaper_send_data(panel, PIXPAPER_PWR_CONFIG1, &err);
+ pixpaper_send_data(panel, PIXPAPER_PWR_CONFIG2, &err);
+ pixpaper_send_data(panel, PIXPAPER_PWR_VSP_8_2V, &err);
+ pixpaper_send_data(panel, PIXPAPER_PWR_VSPL_15V, &err);
+ pixpaper_send_data(panel, PIXPAPER_PWR_VSN_4V, &err);
+ pixpaper_send_data(panel, PIXPAPER_PWR_VSP_8_2V, &err);
+ if (err.errno_code)
+ goto init_fail;
+ pixpaper_wait_for_panel(panel);
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_POWER_OFF_SEQUENCE, &err);
+ pixpaper_send_data(panel, PIXPAPER_PFS_CONFIG1, &err);
+ pixpaper_send_data(panel, PIXPAPER_PFS_CONFIG2, &err);
+ pixpaper_send_data(panel, PIXPAPER_PFS_CONFIG3, &err);
+ if (err.errno_code)
+ goto init_fail;
+ pixpaper_wait_for_panel(panel);
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_BOOSTER_SOFT_START, &err);
+ pixpaper_send_data(panel, PIXPAPER_BTST_CONFIG1, &err);
+ pixpaper_send_data(panel, PIXPAPER_BTST_CONFIG2, &err);
+ pixpaper_send_data(panel, PIXPAPER_BTST_CONFIG3, &err);
+ pixpaper_send_data(panel, PIXPAPER_BTST_CONFIG4, &err);
+ pixpaper_send_data(panel, PIXPAPER_BTST_CONFIG5, &err);
+ pixpaper_send_data(panel, PIXPAPER_BTST_CONFIG6, &err);
+ pixpaper_send_data(panel, PIXPAPER_BTST_CONFIG7, &err);
+ if (err.errno_code)
+ goto init_fail;
+ pixpaper_wait_for_panel(panel);
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_PLL_CONTROL, &err);
+ pixpaper_send_data(panel, PIXPAPER_PLL_CONFIG, &err);
+ if (err.errno_code)
+ goto init_fail;
+ pixpaper_wait_for_panel(panel);
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_TEMP_SENSOR_CALIB, &err);
+ pixpaper_send_data(panel, PIXPAPER_TSE_CONFIG, &err);
+ if (err.errno_code)
+ goto init_fail;
+ pixpaper_wait_for_panel(panel);
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_VCOM_INTERVAL, &err);
+ pixpaper_send_data(panel, PIXPAPER_CDI_CONFIG, &err);
+ if (err.errno_code)
+ goto init_fail;
+ pixpaper_wait_for_panel(panel);
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_UNKNOWN_60, &err);
+ pixpaper_send_data(panel, PIXPAPER_UNKNOWN_60_CONFIG1, &err);
+ pixpaper_send_data(panel, PIXPAPER_UNKNOWN_60_CONFIG2, &err);
+ if (err.errno_code)
+ goto init_fail;
+ pixpaper_wait_for_panel(panel);
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_RESOLUTION_SETTING, &err);
+ pixpaper_send_data(panel, PIXPAPER_TRES_HRES_H, &err);
+ pixpaper_send_data(panel, PIXPAPER_TRES_HRES_L, &err);
+ pixpaper_send_data(panel, PIXPAPER_TRES_VRES_H, &err);
+ pixpaper_send_data(panel, PIXPAPER_TRES_VRES_L, &err);
+ if (err.errno_code)
+ goto init_fail;
+ pixpaper_wait_for_panel(panel);
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_GATE_SOURCE_START, &err);
+ pixpaper_send_data(panel, PIXPAPER_GSST_S_START, &err);
+ pixpaper_send_data(panel, PIXPAPER_GSST_RESERVED, &err);
+ pixpaper_send_data(panel, PIXPAPER_GSST_G_START_H, &err);
+ pixpaper_send_data(panel, PIXPAPER_GSST_G_START_L, &err);
+ if (err.errno_code)
+ goto init_fail;
+ pixpaper_wait_for_panel(panel);
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_UNKNOWN_E7, &err);
+ pixpaper_send_data(panel, PIXPAPER_UNKNOWN_E7_CONFIG, &err);
+ if (err.errno_code)
+ goto init_fail;
+ pixpaper_wait_for_panel(panel);
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_POWER_SAVING, &err);
+ pixpaper_send_data(panel, PIXPAPER_PWS_CONFIG, &err);
+ if (err.errno_code)
+ goto init_fail;
+ pixpaper_wait_for_panel(panel);
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_UNKNOWN_E0, &err);
+ pixpaper_send_data(panel, PIXPAPER_UNKNOWN_E0_CONFIG, &err);
+ if (err.errno_code)
+ goto init_fail;
+ pixpaper_wait_for_panel(panel);
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_UNKNOWN_B4, &err);
+ pixpaper_send_data(panel, PIXPAPER_UNKNOWN_B4_CONFIG, &err);
+ if (err.errno_code)
+ goto init_fail;
+ pixpaper_wait_for_panel(panel);
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_UNKNOWN_B5, &err);
+ pixpaper_send_data(panel, PIXPAPER_UNKNOWN_B5_CONFIG, &err);
+ if (err.errno_code)
+ goto init_fail;
+ pixpaper_wait_for_panel(panel);
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_UNKNOWN_E9, &err);
+ pixpaper_send_data(panel, PIXPAPER_UNKNOWN_E9_CONFIG, &err);
+ if (err.errno_code)
+ goto init_fail;
+ pixpaper_wait_for_panel(panel);
+
+ return 0;
+
+init_fail:
+ drm_err(&panel->drm, "Hardware initialization failed (err=%d)\n",
+ err.errno_code);
+ return err.errno_code;
+}
+
+/*
+ * Convert framebuffer pixels to 2-bit e-paper format:
+ * 00 - White
+ * 01 - Black
+ * 10 - Yellow
+ * 11 - Red
+ */
+static u8 pack_pixels_to_byte(__le32 *src_pixels, int i, int j,
+ struct drm_framebuffer *fb)
+{
+ u8 packed_byte = 0;
+ int k;
+
+ for (k = 0; k < 4; k++) {
+ int current_pixel_x = j * 4 + k;
+ u8 two_bit_val;
+
+ if (current_pixel_x < PIXPAPER_WIDTH) {
+ u32 pixel_offset =
+ (i * (fb->pitches[0] / 4)) + current_pixel_x;
+ u32 pixel = le32_to_cpu(src_pixels[pixel_offset]);
+ u32 r = (pixel >> 16) & 0xFF;
+ u32 g = (pixel >> 8) & 0xFF;
+ u32 b = pixel & 0xFF;
+
+ if (r < PIXPAPER_COLOR_THRESHOLD_LOW_CHANNEL &&
+ g < PIXPAPER_COLOR_THRESHOLD_LOW_CHANNEL &&
+ b < PIXPAPER_COLOR_THRESHOLD_LOW_CHANNEL) {
+ two_bit_val = 0b00;
+ } else if (r > PIXPAPER_COLOR_THRESHOLD_HIGH_CHANNEL &&
+ g > PIXPAPER_COLOR_THRESHOLD_HIGH_CHANNEL &&
+ b > PIXPAPER_COLOR_THRESHOLD_HIGH_CHANNEL) {
+ two_bit_val = 0b01;
+ } else if (r > PIXPAPER_COLOR_THRESHOLD_HIGH_CHANNEL &&
+ g < PIXPAPER_COLOR_THRESHOLD_LOW_CHANNEL &&
+ b < PIXPAPER_COLOR_THRESHOLD_LOW_CHANNEL) {
+ two_bit_val = 0b11;
+ } else if (r > PIXPAPER_COLOR_THRESHOLD_HIGH_CHANNEL &&
+ g > PIXPAPER_COLOR_THRESHOLD_YELLOW_MIN_GREEN &&
+ b < PIXPAPER_COLOR_THRESHOLD_LOW_CHANNEL) {
+ two_bit_val = 0b10;
+ } else {
+ two_bit_val = 0b01;
+ }
+ } else {
+ two_bit_val = 0b01;
+ }
+
+ packed_byte |= two_bit_val << ((3 - k) * 2);
+ }
+
+ return packed_byte;
+}
+
+static int pixpaper_plane_helper_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_plane_state =
+ drm_atomic_get_new_plane_state(state, plane);
+ struct drm_crtc *new_crtc = new_plane_state->crtc;
+ struct drm_crtc_state *new_crtc_state = NULL;
+ int ret;
+
+ if (new_crtc)
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc);
+
+ ret = drm_atomic_helper_check_plane_state(new_plane_state,
+ new_crtc_state, DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING, false, false);
+ if (ret)
+ return ret;
+ else if (!new_plane_state->visible)
+ return 0;
+
+ return 0;
+}
+
+static int pixpaper_crtc_helper_atomic_check(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *crtc_state =
+ drm_atomic_get_new_crtc_state(state, crtc);
+
+ if (!crtc_state->enable)
+ return 0;
+
+ return drm_atomic_helper_check_crtc_primary_plane(crtc_state);
+}
+
+static void pixpaper_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct pixpaper_panel *panel = to_pixpaper_panel(crtc->dev);
+ struct drm_device *drm = &panel->drm;
+ int idx;
+ struct pixpaper_error_ctx err = { .errno_code = 0 };
+
+ if (!drm_dev_enter(drm, &idx))
+ return;
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_POWER_ON, &err);
+ if (err.errno_code) {
+ drm_err_once(drm, "Failed to send PON command: %d\n", err.errno_code);
+ goto exit_drm_dev;
+ }
+
+ pixpaper_wait_for_panel(panel);
+
+ drm_dbg(drm, "Panel enabled and powered on\n");
+
+exit_drm_dev:
+ drm_dev_exit(idx);
+}
+
+static void pixpaper_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct pixpaper_panel *panel = to_pixpaper_panel(crtc->dev);
+ struct drm_device *drm = &panel->drm;
+ struct pixpaper_error_ctx err = { .errno_code = 0 };
+ int idx;
+
+ if (!drm_dev_enter(drm, &idx))
+ return;
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_POWER_OFF, &err);
+ if (err.errno_code) {
+ drm_err_once(drm, "Failed to send POF command: %d\n", err.errno_code);
+ goto exit_drm_dev;
+ }
+ pixpaper_wait_for_panel(panel);
+
+ drm_dbg(drm, "Panel disabled\n");
+
+exit_drm_dev:
+ drm_dev_exit(idx);
+}
+
+static void pixpaper_plane_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *plane_state =
+ drm_atomic_get_new_plane_state(state, plane);
+ struct drm_shadow_plane_state *shadow_plane_state =
+ to_drm_shadow_plane_state(plane_state);
+ struct drm_crtc *crtc = plane_state->crtc;
+ struct pixpaper_panel *panel = to_pixpaper_panel(crtc->dev);
+
+ struct drm_device *drm = &panel->drm;
+ struct drm_framebuffer *fb = plane_state->fb;
+ struct iosys_map map = shadow_plane_state->data[0];
+ void *vaddr = map.vaddr;
+ int i, j, idx;
+ __le32 *src_pixels = NULL;
+ struct pixpaper_error_ctx err = { .errno_code = 0 };
+
+ if (!drm_dev_enter(drm, &idx))
+ return;
+
+ drm_dbg(drm, "Starting frame update (phys=%dx%d, buf_w=%d)\n",
+ PIXPAPER_WIDTH, PIXPAPER_HEIGHT, PIXPAPER_PANEL_BUFFER_WIDTH);
+
+ if (!fb || !plane_state->visible) {
+ drm_err_once(drm, "No framebuffer or plane not visible, skipping update\n");
+ goto update_cleanup;
+ }
+
+ src_pixels = (__le32 *)vaddr;
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_DATA_START_TRANSMISSION, &err);
+ if (err.errno_code)
+ goto update_cleanup;
+
+ pixpaper_wait_for_panel(panel);
+
+ for (i = 0; i < PIXPAPER_HEIGHT; i++) {
+ for (j = 0; j < PIXPAPER_PANEL_BUFFER_TWO_BYTES_PER_ROW; j++) {
+ u8 packed_byte =
+ pack_pixels_to_byte(src_pixels, i, j, fb);
+
+ pixpaper_wait_for_panel(panel);
+ pixpaper_send_data(panel, packed_byte, &err);
+ }
+ }
+ pixpaper_wait_for_panel(panel);
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_POWER_ON, &err);
+ if (err.errno_code) {
+ drm_err_once(drm, "Failed to send PON command: %d\n", err.errno_code);
+ goto update_cleanup;
+ }
+ pixpaper_wait_for_panel(panel);
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_DISPLAY_REFRESH, &err);
+ pixpaper_send_data(panel, PIXPAPER_DRF_VCOM_AC, &err);
+ if (err.errno_code) {
+ drm_err_once(drm, "Failed sending data after DRF: %d\n", err.errno_code);
+ goto update_cleanup;
+ }
+ pixpaper_wait_for_panel(panel);
+
+update_cleanup:
+ if (err.errno_code && err.errno_code != -ETIMEDOUT)
+ drm_err_once(drm, "Frame update function failed with error %d\n", err.errno_code);
+
+ drm_dev_exit(idx);
+}
+
+static const struct drm_display_mode pixpaper_mode = {
+ .clock = PIXPAPER_PIXEL_CLOCK,
+ .hdisplay = PIXPAPER_WIDTH,
+ .hsync_start = PIXPAPER_WIDTH + PIXPAPER_HFP,
+ .hsync_end = PIXPAPER_WIDTH + PIXPAPER_HFP + PIXPAPER_HSYNC,
+ .htotal = PIXPAPER_HTOTAL,
+ .vdisplay = PIXPAPER_HEIGHT,
+ .vsync_start = PIXPAPER_HEIGHT + PIXPAPER_VFP,
+ .vsync_end = PIXPAPER_HEIGHT + PIXPAPER_VFP + PIXPAPER_VSYNC,
+ .vtotal = PIXPAPER_VTOTAL,
+ .width_mm = PIXPAPER_WIDTH_MM,
+ .height_mm = PIXPAPER_HEIGHT_MM,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static int pixpaper_connector_get_modes(struct drm_connector *connector)
+{
+ return drm_connector_helper_get_modes_fixed(connector, &pixpaper_mode);
+}
+
+static const struct drm_plane_funcs pixpaper_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ DRM_GEM_SHADOW_PLANE_FUNCS,
+};
+
+static const struct drm_plane_helper_funcs pixpaper_plane_helper_funcs = {
+ DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
+ .atomic_check = pixpaper_plane_helper_atomic_check,
+ .atomic_update = pixpaper_plane_atomic_update,
+};
+
+static const struct drm_crtc_funcs pixpaper_crtc_funcs = {
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .reset = drm_atomic_helper_crtc_reset,
+ .destroy = drm_crtc_cleanup,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+};
+
+static enum drm_mode_status
+pixpaper_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode)
+{
+ if (mode->hdisplay == PIXPAPER_WIDTH &&
+ mode->vdisplay == PIXPAPER_HEIGHT) {
+ return MODE_OK;
+ }
+ return MODE_BAD;
+}
+
+static const struct drm_crtc_helper_funcs pixpaper_crtc_helper_funcs = {
+ .mode_valid = pixpaper_mode_valid,
+ .atomic_check = pixpaper_crtc_helper_atomic_check,
+ .atomic_enable = pixpaper_crtc_atomic_enable,
+ .atomic_disable = pixpaper_crtc_atomic_disable,
+};
+
+static const struct drm_encoder_funcs pixpaper_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static const struct drm_connector_funcs pixpaper_connector_funcs = {
+ .reset = drm_atomic_helper_connector_reset,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static const struct drm_connector_helper_funcs pixpaper_connector_helper_funcs = {
+ .get_modes = pixpaper_connector_get_modes,
+};
+
+DEFINE_DRM_GEM_FOPS(pixpaper_fops);
+
+static struct drm_driver pixpaper_drm_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
+ .fops = &pixpaper_fops,
+ .name = "pixpaper",
+ .desc = "DRM driver for PIXPAPER e-ink",
+ .major = 1,
+ .minor = 0,
+ DRM_GEM_SHMEM_DRIVER_OPS,
+ DRM_FBDEV_SHMEM_DRIVER_OPS,
+};
+
+static const struct drm_mode_config_funcs pixpaper_mode_config_funcs = {
+ .fb_create = drm_gem_fb_create_with_dirty,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static int pixpaper_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct pixpaper_panel *panel;
+ struct drm_device *drm;
+ int ret;
+
+ panel = devm_drm_dev_alloc(dev, &pixpaper_drm_driver,
+ struct pixpaper_panel, drm);
+ if (IS_ERR(panel))
+ return PTR_ERR(panel);
+
+ drm = &panel->drm;
+ panel->spi = spi;
+ spi_set_drvdata(spi, panel);
+
+ spi->mode = SPI_MODE_0;
+ spi->bits_per_word = PIXPAPER_SPI_BITS_PER_WORD;
+
+ if (!spi->max_speed_hz) {
+ drm_warn(drm,
+ "spi-max-frequency not specified in DT, using default %u Hz\n",
+ PIXPAPER_SPI_SPEED_DEFAULT);
+ spi->max_speed_hz = PIXPAPER_SPI_SPEED_DEFAULT;
+ }
+
+ ret = spi_setup(spi);
+ if (ret < 0) {
+ drm_err(drm, "SPI setup failed: %d\n", ret);
+ return ret;
+ }
+
+ if (!dev->dma_mask)
+ dev->dma_mask = &dev->coherent_dma_mask;
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret) {
+ drm_err(drm, "Failed to set DMA mask: %d\n", ret);
+ return ret;
+ }
+
+ panel->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(panel->reset))
+ return PTR_ERR(panel->reset);
+
+ panel->busy = devm_gpiod_get(dev, "busy", GPIOD_IN);
+ if (IS_ERR(panel->busy))
+ return PTR_ERR(panel->busy);
+
+ panel->dc = devm_gpiod_get(dev, "dc", GPIOD_OUT_HIGH);
+ if (IS_ERR(panel->dc))
+ return PTR_ERR(panel->dc);
+
+ ret = pixpaper_panel_hw_init(panel);
+ if (ret) {
+ drm_err(drm, "Panel hardware initialization failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = drmm_mode_config_init(drm);
+ if (ret)
+ return ret;
+ drm->mode_config.funcs = &pixpaper_mode_config_funcs;
+ drm->mode_config.min_width = PIXPAPER_WIDTH;
+ drm->mode_config.max_width = PIXPAPER_WIDTH;
+ drm->mode_config.min_height = PIXPAPER_HEIGHT;
+ drm->mode_config.max_height = PIXPAPER_HEIGHT;
+
+ ret = drm_universal_plane_init(drm, &panel->plane, 1,
+ &pixpaper_plane_funcs,
+ (const uint32_t[]){ DRM_FORMAT_XRGB8888 },
+ 1, NULL, DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret)
+ return ret;
+ drm_plane_helper_add(&panel->plane, &pixpaper_plane_helper_funcs);
+
+ ret = drm_crtc_init_with_planes(drm, &panel->crtc, &panel->plane, NULL,
+ &pixpaper_crtc_funcs, NULL);
+ if (ret)
+ return ret;
+ drm_crtc_helper_add(&panel->crtc, &pixpaper_crtc_helper_funcs);
+
+ ret = drm_encoder_init(drm, &panel->encoder, &pixpaper_encoder_funcs,
+ DRM_MODE_ENCODER_NONE, NULL);
+ if (ret)
+ return ret;
+ panel->encoder.possible_crtcs = drm_crtc_mask(&panel->crtc);
+
+ ret = drm_connector_init(drm, &panel->connector,
+ &pixpaper_connector_funcs,
+ DRM_MODE_CONNECTOR_SPI);
+ if (ret)
+ return ret;
+
+ drm_connector_helper_add(&panel->connector,
+ &pixpaper_connector_helper_funcs);
+ drm_connector_attach_encoder(&panel->connector, &panel->encoder);
+
+ drm_mode_config_reset(drm);
+
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ return ret;
+
+ drm_client_setup(drm, NULL);
+
+ return 0;
+}
+
+static void pixpaper_remove(struct spi_device *spi)
+{
+ struct pixpaper_panel *panel = spi_get_drvdata(spi);
+
+ if (!panel)
+ return;
+
+ drm_dev_unplug(&panel->drm);
+ drm_atomic_helper_shutdown(&panel->drm);
+}
+
+static const struct spi_device_id pixpaper_ids[] = { { "pixpaper", 0 }, {} };
+MODULE_DEVICE_TABLE(spi, pixpaper_ids);
+
+static const struct of_device_id pixpaper_dt_ids[] = {
+ { .compatible = "mayqueen,pixpaper" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, pixpaper_dt_ids);
+
+static struct spi_driver pixpaper_spi_driver = {
+ .driver = {
+ .name = "pixpaper",
+ .of_match_table = pixpaper_dt_ids,
+ },
+ .id_table = pixpaper_ids,
+ .probe = pixpaper_probe,
+ .remove = pixpaper_remove,
+};
+
+module_spi_driver(pixpaper_spi_driver);
+
+MODULE_AUTHOR("LiangCheng Wang");
+MODULE_DESCRIPTION("DRM SPI driver for PIXPAPER e-ink panel");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tiny/repaper.c b/drivers/gpu/drm/tiny/repaper.c
index 5c3b51eb0a97..4824f863fdba 100644
--- a/drivers/gpu/drm/tiny/repaper.c
+++ b/drivers/gpu/drm/tiny/repaper.c
@@ -510,13 +510,12 @@ static void repaper_get_temperature(struct repaper_epd *epd)
epd->factored_stage_time = epd->stage_time * factor10x / 10;
}
-static int repaper_fb_dirty(struct drm_framebuffer *fb,
+static int repaper_fb_dirty(struct drm_framebuffer *fb, const struct iosys_map *vmap,
struct drm_format_conv_state *fmtcnv_state)
{
- struct drm_gem_dma_object *dma_obj = drm_fb_dma_get_gem_obj(fb, 0);
struct repaper_epd *epd = drm_to_epd(fb->dev);
unsigned int dst_pitch = 0;
- struct iosys_map dst, vmap;
+ struct iosys_map dst;
struct drm_rect clip;
int idx, ret = 0;
u8 *buf = NULL;
@@ -546,8 +545,7 @@ static int repaper_fb_dirty(struct drm_framebuffer *fb,
goto out_free;
iosys_map_set_vaddr(&dst, buf);
- iosys_map_set_vaddr(&vmap, dma_obj->vaddr);
- drm_fb_xrgb8888_to_mono(&dst, &dst_pitch, &vmap, fb, &clip, fmtcnv_state);
+ drm_fb_xrgb8888_to_mono(&dst, &dst_pitch, vmap, fb, &clip, fmtcnv_state);
drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
@@ -832,16 +830,15 @@ static void repaper_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_state)
{
struct drm_plane_state *state = pipe->plane.state;
- struct drm_format_conv_state fmtcnv_state = DRM_FORMAT_CONV_STATE_INIT;
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(state);
struct drm_rect rect;
if (!pipe->crtc.state->active)
return;
if (drm_atomic_helper_damage_merged(old_state, state, &rect))
- repaper_fb_dirty(state->fb, &fmtcnv_state);
-
- drm_format_conv_state_release(&fmtcnv_state);
+ repaper_fb_dirty(state->fb, shadow_plane_state->data,
+ &shadow_plane_state->fmtcnv_state);
}
static const struct drm_simple_display_pipe_funcs repaper_pipe_funcs = {
@@ -849,6 +846,7 @@ static const struct drm_simple_display_pipe_funcs repaper_pipe_funcs = {
.enable = repaper_pipe_enable,
.disable = repaper_pipe_disable,
.update = repaper_pipe_update,
+ DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS,
};
static int repaper_connector_get_modes(struct drm_connector *connector)
diff --git a/drivers/gpu/drm/tiny/sharp-memory.c b/drivers/gpu/drm/tiny/sharp-memory.c
index 03d2850310c4..64272cd0f6e2 100644
--- a/drivers/gpu/drm/tiny/sharp-memory.c
+++ b/drivers/gpu/drm/tiny/sharp-memory.c
@@ -126,28 +126,28 @@ static inline void sharp_memory_set_tx_buffer_addresses(u8 *buffer,
static void sharp_memory_set_tx_buffer_data(u8 *buffer,
struct drm_framebuffer *fb,
+ const struct iosys_map *vmap,
struct drm_rect clip,
u32 pitch,
struct drm_format_conv_state *fmtcnv_state)
{
int ret;
- struct iosys_map dst, vmap;
- struct drm_gem_dma_object *dma_obj = drm_fb_dma_get_gem_obj(fb, 0);
+ struct iosys_map dst;
ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
if (ret)
return;
iosys_map_set_vaddr(&dst, buffer);
- iosys_map_set_vaddr(&vmap, dma_obj->vaddr);
- drm_fb_xrgb8888_to_mono(&dst, &pitch, &vmap, fb, &clip, fmtcnv_state);
+ drm_fb_xrgb8888_to_mono(&dst, &pitch, vmap, fb, &clip, fmtcnv_state);
drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
}
static int sharp_memory_update_display(struct sharp_memory_device *smd,
struct drm_framebuffer *fb,
+ const struct iosys_map *vmap,
struct drm_rect clip,
struct drm_format_conv_state *fmtcnv_state)
{
@@ -163,7 +163,7 @@ static int sharp_memory_update_display(struct sharp_memory_device *smd,
sharp_memory_set_tx_buffer_mode(&tx_buffer[0],
SHARP_MEMORY_DISPLAY_UPDATE_MODE, vcom);
sharp_memory_set_tx_buffer_addresses(&tx_buffer[1], clip, pitch);
- sharp_memory_set_tx_buffer_data(&tx_buffer[2], fb, clip, pitch, fmtcnv_state);
+ sharp_memory_set_tx_buffer_data(&tx_buffer[2], fb, vmap, clip, pitch, fmtcnv_state);
ret = sharp_memory_spi_write(smd->spi, tx_buffer, tx_buffer_size);
@@ -206,7 +206,8 @@ static int sharp_memory_clear_display(struct sharp_memory_device *smd)
return ret;
}
-static void sharp_memory_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect,
+static void sharp_memory_fb_dirty(struct drm_framebuffer *fb, const struct iosys_map *vmap,
+ struct drm_rect *rect,
struct drm_format_conv_state *fmtconv_state)
{
struct drm_rect clip;
@@ -218,7 +219,7 @@ static void sharp_memory_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *r
clip.y1 = rect->y1;
clip.y2 = rect->y2;
- sharp_memory_update_display(smd, fb, clip, fmtconv_state);
+ sharp_memory_update_display(smd, fb, vmap, clip, fmtconv_state);
}
static int sharp_memory_plane_atomic_check(struct drm_plane *plane,
@@ -242,7 +243,7 @@ static void sharp_memory_plane_atomic_update(struct drm_plane *plane,
{
struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane);
struct drm_plane_state *plane_state = plane->state;
- struct drm_format_conv_state fmtcnv_state = DRM_FORMAT_CONV_STATE_INIT;
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
struct sharp_memory_device *smd;
struct drm_rect rect;
@@ -251,15 +252,15 @@ static void sharp_memory_plane_atomic_update(struct drm_plane *plane,
return;
if (drm_atomic_helper_damage_merged(old_state, plane_state, &rect))
- sharp_memory_fb_dirty(plane_state->fb, &rect, &fmtcnv_state);
-
- drm_format_conv_state_release(&fmtcnv_state);
+ sharp_memory_fb_dirty(plane_state->fb, shadow_plane_state->data,
+ &rect, &shadow_plane_state->fmtcnv_state);
}
static const struct drm_plane_helper_funcs sharp_memory_plane_helper_funcs = {
.prepare_fb = drm_gem_plane_helper_prepare_fb,
.atomic_check = sharp_memory_plane_atomic_check,
.atomic_update = sharp_memory_plane_atomic_update,
+ DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
};
static bool sharp_memory_format_mod_supported(struct drm_plane *plane,
@@ -273,9 +274,7 @@ static const struct drm_plane_funcs sharp_memory_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = drm_plane_cleanup,
- .reset = drm_atomic_helper_plane_reset,
- .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+ DRM_GEM_SHADOW_PLANE_FUNCS,
.format_mod_supported = sharp_memory_format_mod_supported,
};
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index f4d9e68b21e7..29423ceeec5c 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1283,3 +1283,18 @@ int ttm_bo_populate(struct ttm_buffer_object *bo,
return 0;
}
EXPORT_SYMBOL(ttm_bo_populate);
+
+int ttm_bo_setup_export(struct ttm_buffer_object *bo,
+ struct ttm_operation_ctx *ctx)
+{
+ int ret;
+
+ ret = ttm_bo_reserve(bo, false, false, NULL);
+ if (ret != 0)
+ return ret;
+
+ ret = ttm_bo_populate(bo, ctx);
+ ttm_bo_unreserve(bo);
+ return ret;
+}
+EXPORT_SYMBOL(ttm_bo_setup_export);
diff --git a/drivers/gpu/drm/tyr/Kconfig b/drivers/gpu/drm/tyr/Kconfig
new file mode 100644
index 000000000000..4b55308fd2eb
--- /dev/null
+++ b/drivers/gpu/drm/tyr/Kconfig
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0 or MIT
+
+config DRM_TYR
+ tristate "Tyr (Rust DRM support for ARM Mali CSF-based GPUs)"
+ depends on DRM=y
+ depends on RUST
+ depends on ARM || ARM64 || COMPILE_TEST
+ depends on !GENERIC_ATOMIC64 # for IOMMU_IO_PGTABLE_LPAE
+ default n
+ help
+ Rust DRM driver for ARM Mali CSF-based GPUs.
+
+ This driver is for Mali (or Immortalis) Valhall Gxxx GPUs.
+
+ Note that the Mali-G68 and Mali-G78, while Valhall architecture, will
+ be supported with the panfrost driver as they are not CSF GPUs.
+
+ if M is selected, the module will be called tyr. This driver is work
+ in progress and may not be functional.
diff --git a/drivers/gpu/drm/tyr/Makefile b/drivers/gpu/drm/tyr/Makefile
new file mode 100644
index 000000000000..ba545f65f2c0
--- /dev/null
+++ b/drivers/gpu/drm/tyr/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0 or MIT
+
+obj-$(CONFIG_DRM_TYR) += tyr.o
diff --git a/drivers/gpu/drm/tyr/driver.rs b/drivers/gpu/drm/tyr/driver.rs
new file mode 100644
index 000000000000..d5625dd1e41c
--- /dev/null
+++ b/drivers/gpu/drm/tyr/driver.rs
@@ -0,0 +1,205 @@
+// SPDX-License-Identifier: GPL-2.0 or MIT
+
+use kernel::c_str;
+use kernel::clk::Clk;
+use kernel::clk::OptionalClk;
+use kernel::device::Bound;
+use kernel::device::Core;
+use kernel::device::Device;
+use kernel::devres::Devres;
+use kernel::drm;
+use kernel::drm::ioctl;
+use kernel::new_mutex;
+use kernel::of;
+use kernel::platform;
+use kernel::prelude::*;
+use kernel::regulator;
+use kernel::regulator::Regulator;
+use kernel::sizes::SZ_2M;
+use kernel::sync::Arc;
+use kernel::sync::Mutex;
+use kernel::time;
+use kernel::types::ARef;
+
+use crate::file::File;
+use crate::gem::TyrObject;
+use crate::gpu;
+use crate::gpu::GpuInfo;
+use crate::regs;
+
+pub(crate) type IoMem = kernel::io::mem::IoMem<SZ_2M>;
+
+/// Convenience type alias for the DRM device type for this driver.
+pub(crate) type TyrDevice = drm::Device<TyrDriver>;
+
+#[pin_data(PinnedDrop)]
+pub(crate) struct TyrDriver {
+ device: ARef<TyrDevice>,
+}
+
+#[pin_data(PinnedDrop)]
+pub(crate) struct TyrData {
+ pub(crate) pdev: ARef<platform::Device>,
+
+ #[pin]
+ clks: Mutex<Clocks>,
+
+ #[pin]
+ regulators: Mutex<Regulators>,
+
+ /// Some information on the GPU.
+ ///
+ /// This is mainly queried by userspace, i.e.: Mesa.
+ pub(crate) gpu_info: GpuInfo,
+}
+
+// Both `Clk` and `Regulator` do not implement `Send` or `Sync`, but they
+// should. There are patches on the mailing list to address this, but they have
+// not landed yet.
+//
+// For now, add this workaround so that this patch compiles with the promise
+// that it will be removed in a future patch.
+//
+// SAFETY: This will be removed in a future patch.
+unsafe impl Send for TyrData {}
+// SAFETY: This will be removed in a future patch.
+unsafe impl Sync for TyrData {}
+
+fn issue_soft_reset(dev: &Device<Bound>, iomem: &Devres<IoMem>) -> Result {
+ regs::GPU_CMD.write(dev, iomem, regs::GPU_CMD_SOFT_RESET)?;
+
+ // TODO: We cannot poll, as there is no support in Rust currently, so we
+ // sleep. Change this when read_poll_timeout() is implemented in Rust.
+ kernel::time::delay::fsleep(time::Delta::from_millis(100));
+
+ if regs::GPU_IRQ_RAWSTAT.read(dev, iomem)? & regs::GPU_IRQ_RAWSTAT_RESET_COMPLETED == 0 {
+ dev_err!(dev, "GPU reset failed with errno\n");
+ dev_err!(
+ dev,
+ "GPU_INT_RAWSTAT is {}\n",
+ regs::GPU_IRQ_RAWSTAT.read(dev, iomem)?
+ );
+
+ return Err(EIO);
+ }
+
+ Ok(())
+}
+
+kernel::of_device_table!(
+ OF_TABLE,
+ MODULE_OF_TABLE,
+ <TyrDriver as platform::Driver>::IdInfo,
+ [
+ (of::DeviceId::new(c_str!("rockchip,rk3588-mali")), ()),
+ (of::DeviceId::new(c_str!("arm,mali-valhall-csf")), ())
+ ]
+);
+
+impl platform::Driver for TyrDriver {
+ type IdInfo = ();
+ const OF_ID_TABLE: Option<of::IdTable<Self::IdInfo>> = Some(&OF_TABLE);
+
+ fn probe(
+ pdev: &platform::Device<Core>,
+ _info: Option<&Self::IdInfo>,
+ ) -> Result<Pin<KBox<Self>>> {
+ let core_clk = Clk::get(pdev.as_ref(), Some(c_str!("core")))?;
+ let stacks_clk = OptionalClk::get(pdev.as_ref(), Some(c_str!("stacks")))?;
+ let coregroup_clk = OptionalClk::get(pdev.as_ref(), Some(c_str!("coregroup")))?;
+
+ core_clk.prepare_enable()?;
+ stacks_clk.prepare_enable()?;
+ coregroup_clk.prepare_enable()?;
+
+ let mali_regulator = Regulator::<regulator::Enabled>::get(pdev.as_ref(), c_str!("mali"))?;
+ let sram_regulator = Regulator::<regulator::Enabled>::get(pdev.as_ref(), c_str!("sram"))?;
+
+ let request = pdev.io_request_by_index(0).ok_or(ENODEV)?;
+ let iomem = Arc::pin_init(request.iomap_sized::<SZ_2M>(), GFP_KERNEL)?;
+
+ issue_soft_reset(pdev.as_ref(), &iomem)?;
+ gpu::l2_power_on(pdev.as_ref(), &iomem)?;
+
+ let gpu_info = GpuInfo::new(pdev.as_ref(), &iomem)?;
+ gpu_info.log(pdev);
+
+ let platform: ARef<platform::Device> = pdev.into();
+
+ let data = try_pin_init!(TyrData {
+ pdev: platform.clone(),
+ clks <- new_mutex!(Clocks {
+ core: core_clk,
+ stacks: stacks_clk,
+ coregroup: coregroup_clk,
+ }),
+ regulators <- new_mutex!(Regulators {
+ mali: mali_regulator,
+ sram: sram_regulator,
+ }),
+ gpu_info,
+ });
+
+ let tdev: ARef<TyrDevice> = drm::Device::new(pdev.as_ref(), data)?;
+ drm::driver::Registration::new_foreign_owned(&tdev, pdev.as_ref(), 0)?;
+
+ let driver = KBox::pin_init(try_pin_init!(TyrDriver { device: tdev }), GFP_KERNEL)?;
+
+ // We need this to be dev_info!() because dev_dbg!() does not work at
+ // all in Rust for now, and we need to see whether probe succeeded.
+ dev_info!(pdev.as_ref(), "Tyr initialized correctly.\n");
+ Ok(driver)
+ }
+}
+
+#[pinned_drop]
+impl PinnedDrop for TyrDriver {
+ fn drop(self: Pin<&mut Self>) {}
+}
+
+#[pinned_drop]
+impl PinnedDrop for TyrData {
+ fn drop(self: Pin<&mut Self>) {
+ // TODO: the type-state pattern for Clks will fix this.
+ let clks = self.clks.lock();
+ clks.core.disable_unprepare();
+ clks.stacks.disable_unprepare();
+ clks.coregroup.disable_unprepare();
+ }
+}
+
+// We need to retain the name "panthor" to achieve drop-in compatibility with
+// the C driver in the userspace stack.
+const INFO: drm::DriverInfo = drm::DriverInfo {
+ major: 1,
+ minor: 5,
+ patchlevel: 0,
+ name: c_str!("panthor"),
+ desc: c_str!("ARM Mali Tyr DRM driver"),
+};
+
+#[vtable]
+impl drm::Driver for TyrDriver {
+ type Data = TyrData;
+ type File = File;
+ type Object = drm::gem::Object<TyrObject>;
+
+ const INFO: drm::DriverInfo = INFO;
+
+ kernel::declare_drm_ioctls! {
+ (PANTHOR_DEV_QUERY, drm_panthor_dev_query, ioctl::RENDER_ALLOW, File::dev_query),
+ }
+}
+
+#[pin_data]
+struct Clocks {
+ core: Clk,
+ stacks: OptionalClk,
+ coregroup: OptionalClk,
+}
+
+#[pin_data]
+struct Regulators {
+ mali: Regulator<regulator::Enabled>,
+ sram: Regulator<regulator::Enabled>,
+}
diff --git a/drivers/gpu/drm/tyr/file.rs b/drivers/gpu/drm/tyr/file.rs
new file mode 100644
index 000000000000..0ef432947b73
--- /dev/null
+++ b/drivers/gpu/drm/tyr/file.rs
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0 or MIT
+
+use kernel::drm;
+use kernel::prelude::*;
+use kernel::uaccess::UserSlice;
+use kernel::uapi;
+
+use crate::driver::TyrDevice;
+use crate::TyrDriver;
+
+#[pin_data]
+pub(crate) struct File {}
+
+/// Convenience type alias for our DRM `File` type
+pub(crate) type DrmFile = drm::file::File<File>;
+
+impl drm::file::DriverFile for File {
+ type Driver = TyrDriver;
+
+ fn open(_dev: &drm::Device<Self::Driver>) -> Result<Pin<KBox<Self>>> {
+ KBox::try_pin_init(try_pin_init!(Self {}), GFP_KERNEL)
+ }
+}
+
+impl File {
+ pub(crate) fn dev_query(
+ tdev: &TyrDevice,
+ devquery: &mut uapi::drm_panthor_dev_query,
+ _file: &DrmFile,
+ ) -> Result<u32> {
+ if devquery.pointer == 0 {
+ match devquery.type_ {
+ uapi::drm_panthor_dev_query_type_DRM_PANTHOR_DEV_QUERY_GPU_INFO => {
+ devquery.size = core::mem::size_of_val(&tdev.gpu_info) as u32;
+ Ok(0)
+ }
+ _ => Err(EINVAL),
+ }
+ } else {
+ match devquery.type_ {
+ uapi::drm_panthor_dev_query_type_DRM_PANTHOR_DEV_QUERY_GPU_INFO => {
+ let mut writer = UserSlice::new(
+ UserPtr::from_addr(devquery.pointer as usize),
+ devquery.size as usize,
+ )
+ .writer();
+
+ writer.write(&tdev.gpu_info)?;
+
+ Ok(0)
+ }
+ _ => Err(EINVAL),
+ }
+ }
+ }
+}
diff --git a/drivers/gpu/drm/tyr/gem.rs b/drivers/gpu/drm/tyr/gem.rs
new file mode 100644
index 000000000000..1273bf89dbd5
--- /dev/null
+++ b/drivers/gpu/drm/tyr/gem.rs
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0 or MIT
+
+use crate::driver::TyrDevice;
+use crate::driver::TyrDriver;
+use kernel::drm::gem;
+use kernel::prelude::*;
+
+/// GEM Object inner driver data
+#[pin_data]
+pub(crate) struct TyrObject {}
+
+impl gem::DriverObject for TyrObject {
+ type Driver = TyrDriver;
+
+ fn new(_dev: &TyrDevice, _size: usize) -> impl PinInit<Self, Error> {
+ try_pin_init!(TyrObject {})
+ }
+}
diff --git a/drivers/gpu/drm/tyr/gpu.rs b/drivers/gpu/drm/tyr/gpu.rs
new file mode 100644
index 000000000000..6c582910dd5d
--- /dev/null
+++ b/drivers/gpu/drm/tyr/gpu.rs
@@ -0,0 +1,219 @@
+// SPDX-License-Identifier: GPL-2.0 or MIT
+
+use kernel::bits::genmask_u32;
+use kernel::device::Bound;
+use kernel::device::Device;
+use kernel::devres::Devres;
+use kernel::platform;
+use kernel::prelude::*;
+use kernel::time;
+use kernel::transmute::AsBytes;
+
+use crate::driver::IoMem;
+use crate::regs;
+
+/// Struct containing information that can be queried by userspace. This is read from
+/// the GPU's registers.
+///
+/// # Invariants
+///
+/// - The layout of this struct identical to the C `struct drm_panthor_gpu_info`.
+#[repr(C)]
+pub(crate) struct GpuInfo {
+ pub(crate) gpu_id: u32,
+ pub(crate) gpu_rev: u32,
+ pub(crate) csf_id: u32,
+ pub(crate) l2_features: u32,
+ pub(crate) tiler_features: u32,
+ pub(crate) mem_features: u32,
+ pub(crate) mmu_features: u32,
+ pub(crate) thread_features: u32,
+ pub(crate) max_threads: u32,
+ pub(crate) thread_max_workgroup_size: u32,
+ pub(crate) thread_max_barrier_size: u32,
+ pub(crate) coherency_features: u32,
+ pub(crate) texture_features: [u32; 4],
+ pub(crate) as_present: u32,
+ pub(crate) pad0: u32,
+ pub(crate) shader_present: u64,
+ pub(crate) l2_present: u64,
+ pub(crate) tiler_present: u64,
+ pub(crate) core_features: u32,
+ pub(crate) pad: u32,
+}
+
+impl GpuInfo {
+ pub(crate) fn new(dev: &Device<Bound>, iomem: &Devres<IoMem>) -> Result<Self> {
+ let gpu_id = regs::GPU_ID.read(dev, iomem)?;
+ let csf_id = regs::GPU_CSF_ID.read(dev, iomem)?;
+ let gpu_rev = regs::GPU_REVID.read(dev, iomem)?;
+ let core_features = regs::GPU_CORE_FEATURES.read(dev, iomem)?;
+ let l2_features = regs::GPU_L2_FEATURES.read(dev, iomem)?;
+ let tiler_features = regs::GPU_TILER_FEATURES.read(dev, iomem)?;
+ let mem_features = regs::GPU_MEM_FEATURES.read(dev, iomem)?;
+ let mmu_features = regs::GPU_MMU_FEATURES.read(dev, iomem)?;
+ let thread_features = regs::GPU_THREAD_FEATURES.read(dev, iomem)?;
+ let max_threads = regs::GPU_THREAD_MAX_THREADS.read(dev, iomem)?;
+ let thread_max_workgroup_size = regs::GPU_THREAD_MAX_WORKGROUP_SIZE.read(dev, iomem)?;
+ let thread_max_barrier_size = regs::GPU_THREAD_MAX_BARRIER_SIZE.read(dev, iomem)?;
+ let coherency_features = regs::GPU_COHERENCY_FEATURES.read(dev, iomem)?;
+
+ let texture_features = regs::GPU_TEXTURE_FEATURES0.read(dev, iomem)?;
+
+ let as_present = regs::GPU_AS_PRESENT.read(dev, iomem)?;
+
+ let shader_present = u64::from(regs::GPU_SHADER_PRESENT_LO.read(dev, iomem)?);
+ let shader_present =
+ shader_present | u64::from(regs::GPU_SHADER_PRESENT_HI.read(dev, iomem)?) << 32;
+
+ let tiler_present = u64::from(regs::GPU_TILER_PRESENT_LO.read(dev, iomem)?);
+ let tiler_present =
+ tiler_present | u64::from(regs::GPU_TILER_PRESENT_HI.read(dev, iomem)?) << 32;
+
+ let l2_present = u64::from(regs::GPU_L2_PRESENT_LO.read(dev, iomem)?);
+ let l2_present = l2_present | u64::from(regs::GPU_L2_PRESENT_HI.read(dev, iomem)?) << 32;
+
+ Ok(Self {
+ gpu_id,
+ gpu_rev,
+ csf_id,
+ l2_features,
+ tiler_features,
+ mem_features,
+ mmu_features,
+ thread_features,
+ max_threads,
+ thread_max_workgroup_size,
+ thread_max_barrier_size,
+ coherency_features,
+ // TODO: Add texture_features_{1,2,3}.
+ texture_features: [texture_features, 0, 0, 0],
+ as_present,
+ pad0: 0,
+ shader_present,
+ l2_present,
+ tiler_present,
+ core_features,
+ pad: 0,
+ })
+ }
+
+ pub(crate) fn log(&self, pdev: &platform::Device) {
+ let major = (self.gpu_id >> 16) & 0xff;
+ let minor = (self.gpu_id >> 8) & 0xff;
+ let status = self.gpu_id & 0xff;
+
+ let model_name = if let Some(model) = GPU_MODELS
+ .iter()
+ .find(|&f| f.major == major && f.minor == minor)
+ {
+ model.name
+ } else {
+ "unknown"
+ };
+
+ dev_info!(
+ pdev.as_ref(),
+ "mali-{} id 0x{:x} major 0x{:x} minor 0x{:x} status 0x{:x}",
+ model_name,
+ self.gpu_id >> 16,
+ major,
+ minor,
+ status
+ );
+
+ dev_info!(
+ pdev.as_ref(),
+ "Features: L2:{:#x} Tiler:{:#x} Mem:{:#x} MMU:{:#x} AS:{:#x}",
+ self.l2_features,
+ self.tiler_features,
+ self.mem_features,
+ self.mmu_features,
+ self.as_present
+ );
+
+ dev_info!(
+ pdev.as_ref(),
+ "shader_present=0x{:016x} l2_present=0x{:016x} tiler_present=0x{:016x}",
+ self.shader_present,
+ self.l2_present,
+ self.tiler_present
+ );
+ }
+
+ /// Returns the number of virtual address bits supported by the GPU.
+ #[expect(dead_code)]
+ pub(crate) fn va_bits(&self) -> u32 {
+ self.mmu_features & genmask_u32(0..=7)
+ }
+
+ /// Returns the number of physical address bits supported by the GPU.
+ #[expect(dead_code)]
+ pub(crate) fn pa_bits(&self) -> u32 {
+ (self.mmu_features >> 8) & genmask_u32(0..=7)
+ }
+}
+
+// SAFETY: `GpuInfo`'s invariant guarantees that it is the same type that is
+// already exposed to userspace by the C driver. This implies that it fulfills
+// the requirements for `AsBytes`.
+//
+// This means:
+//
+// - No implicit padding,
+// - No kernel pointers,
+// - No interior mutability.
+unsafe impl AsBytes for GpuInfo {}
+
+struct GpuModels {
+ name: &'static str,
+ major: u32,
+ minor: u32,
+}
+
+const GPU_MODELS: [GpuModels; 1] = [GpuModels {
+ name: "g610",
+ major: 10,
+ minor: 7,
+}];
+
+#[allow(dead_code)]
+pub(crate) struct GpuId {
+ pub(crate) arch_major: u32,
+ pub(crate) arch_minor: u32,
+ pub(crate) arch_rev: u32,
+ pub(crate) prod_major: u32,
+ pub(crate) ver_major: u32,
+ pub(crate) ver_minor: u32,
+ pub(crate) ver_status: u32,
+}
+
+impl From<u32> for GpuId {
+ fn from(value: u32) -> Self {
+ GpuId {
+ arch_major: (value & genmask_u32(28..=31)) >> 28,
+ arch_minor: (value & genmask_u32(24..=27)) >> 24,
+ arch_rev: (value & genmask_u32(20..=23)) >> 20,
+ prod_major: (value & genmask_u32(16..=19)) >> 16,
+ ver_major: (value & genmask_u32(12..=15)) >> 12,
+ ver_minor: (value & genmask_u32(4..=11)) >> 4,
+ ver_status: value & genmask_u32(0..=3),
+ }
+ }
+}
+
+/// Powers on the l2 block.
+pub(crate) fn l2_power_on(dev: &Device<Bound>, iomem: &Devres<IoMem>) -> Result {
+ regs::L2_PWRON_LO.write(dev, iomem, 1)?;
+
+ // TODO: We cannot poll, as there is no support in Rust currently, so we
+ // sleep. Change this when read_poll_timeout() is implemented in Rust.
+ kernel::time::delay::fsleep(time::Delta::from_millis(100));
+
+ if regs::L2_READY_LO.read(dev, iomem)? != 1 {
+ dev_err!(dev, "Failed to power on the GPU\n");
+ return Err(EIO);
+ }
+
+ Ok(())
+}
diff --git a/drivers/gpu/drm/tyr/regs.rs b/drivers/gpu/drm/tyr/regs.rs
new file mode 100644
index 000000000000..f46933aaa221
--- /dev/null
+++ b/drivers/gpu/drm/tyr/regs.rs
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0 or MIT
+
+// We don't expect that all the registers and fields will be used, even in the
+// future.
+//
+// Nevertheless, it is useful to have most of them defined, like the C driver
+// does.
+#![allow(dead_code)]
+
+use kernel::bits::bit_u32;
+use kernel::device::Bound;
+use kernel::device::Device;
+use kernel::devres::Devres;
+use kernel::prelude::*;
+
+use crate::driver::IoMem;
+
+/// Represents a register in the Register Set
+///
+/// TODO: Replace this with the Nova `register!()` macro when it is available.
+/// In particular, this will automatically give us 64bit register reads and
+/// writes.
+pub(crate) struct Register<const OFFSET: usize>;
+
+impl<const OFFSET: usize> Register<OFFSET> {
+ #[inline]
+ pub(crate) fn read(&self, dev: &Device<Bound>, iomem: &Devres<IoMem>) -> Result<u32> {
+ let value = (*iomem).access(dev)?.read32(OFFSET);
+ Ok(value)
+ }
+
+ #[inline]
+ pub(crate) fn write(&self, dev: &Device<Bound>, iomem: &Devres<IoMem>, value: u32) -> Result {
+ (*iomem).access(dev)?.write32(value, OFFSET);
+ Ok(())
+ }
+}
+
+pub(crate) const GPU_ID: Register<0x0> = Register;
+pub(crate) const GPU_L2_FEATURES: Register<0x4> = Register;
+pub(crate) const GPU_CORE_FEATURES: Register<0x8> = Register;
+pub(crate) const GPU_CSF_ID: Register<0x1c> = Register;
+pub(crate) const GPU_REVID: Register<0x280> = Register;
+pub(crate) const GPU_TILER_FEATURES: Register<0xc> = Register;
+pub(crate) const GPU_MEM_FEATURES: Register<0x10> = Register;
+pub(crate) const GPU_MMU_FEATURES: Register<0x14> = Register;
+pub(crate) const GPU_AS_PRESENT: Register<0x18> = Register;
+pub(crate) const GPU_IRQ_RAWSTAT: Register<0x20> = Register;
+
+pub(crate) const GPU_IRQ_RAWSTAT_FAULT: u32 = bit_u32(0);
+pub(crate) const GPU_IRQ_RAWSTAT_PROTECTED_FAULT: u32 = bit_u32(1);
+pub(crate) const GPU_IRQ_RAWSTAT_RESET_COMPLETED: u32 = bit_u32(8);
+pub(crate) const GPU_IRQ_RAWSTAT_POWER_CHANGED_SINGLE: u32 = bit_u32(9);
+pub(crate) const GPU_IRQ_RAWSTAT_POWER_CHANGED_ALL: u32 = bit_u32(10);
+pub(crate) const GPU_IRQ_RAWSTAT_CLEAN_CACHES_COMPLETED: u32 = bit_u32(17);
+pub(crate) const GPU_IRQ_RAWSTAT_DOORBELL_STATUS: u32 = bit_u32(18);
+pub(crate) const GPU_IRQ_RAWSTAT_MCU_STATUS: u32 = bit_u32(19);
+
+pub(crate) const GPU_IRQ_CLEAR: Register<0x24> = Register;
+pub(crate) const GPU_IRQ_MASK: Register<0x28> = Register;
+pub(crate) const GPU_IRQ_STAT: Register<0x2c> = Register;
+pub(crate) const GPU_CMD: Register<0x30> = Register;
+pub(crate) const GPU_CMD_SOFT_RESET: u32 = 1 | (1 << 8);
+pub(crate) const GPU_CMD_HARD_RESET: u32 = 1 | (2 << 8);
+pub(crate) const GPU_THREAD_FEATURES: Register<0xac> = Register;
+pub(crate) const GPU_THREAD_MAX_THREADS: Register<0xa0> = Register;
+pub(crate) const GPU_THREAD_MAX_WORKGROUP_SIZE: Register<0xa4> = Register;
+pub(crate) const GPU_THREAD_MAX_BARRIER_SIZE: Register<0xa8> = Register;
+pub(crate) const GPU_TEXTURE_FEATURES0: Register<0xb0> = Register;
+pub(crate) const GPU_SHADER_PRESENT_LO: Register<0x100> = Register;
+pub(crate) const GPU_SHADER_PRESENT_HI: Register<0x104> = Register;
+pub(crate) const GPU_TILER_PRESENT_LO: Register<0x110> = Register;
+pub(crate) const GPU_TILER_PRESENT_HI: Register<0x114> = Register;
+pub(crate) const GPU_L2_PRESENT_LO: Register<0x120> = Register;
+pub(crate) const GPU_L2_PRESENT_HI: Register<0x124> = Register;
+pub(crate) const L2_READY_LO: Register<0x160> = Register;
+pub(crate) const L2_READY_HI: Register<0x164> = Register;
+pub(crate) const L2_PWRON_LO: Register<0x1a0> = Register;
+pub(crate) const L2_PWRON_HI: Register<0x1a4> = Register;
+pub(crate) const L2_PWRTRANS_LO: Register<0x220> = Register;
+pub(crate) const L2_PWRTRANS_HI: Register<0x204> = Register;
+pub(crate) const L2_PWRACTIVE_LO: Register<0x260> = Register;
+pub(crate) const L2_PWRACTIVE_HI: Register<0x264> = Register;
+
+pub(crate) const MCU_CONTROL: Register<0x700> = Register;
+pub(crate) const MCU_CONTROL_ENABLE: u32 = 1;
+pub(crate) const MCU_CONTROL_AUTO: u32 = 2;
+pub(crate) const MCU_CONTROL_DISABLE: u32 = 0;
+
+pub(crate) const MCU_STATUS: Register<0x704> = Register;
+pub(crate) const MCU_STATUS_DISABLED: u32 = 0;
+pub(crate) const MCU_STATUS_ENABLED: u32 = 1;
+pub(crate) const MCU_STATUS_HALT: u32 = 2;
+pub(crate) const MCU_STATUS_FATAL: u32 = 3;
+
+pub(crate) const GPU_COHERENCY_FEATURES: Register<0x300> = Register;
+
+pub(crate) const JOB_IRQ_RAWSTAT: Register<0x1000> = Register;
+pub(crate) const JOB_IRQ_CLEAR: Register<0x1004> = Register;
+pub(crate) const JOB_IRQ_MASK: Register<0x1008> = Register;
+pub(crate) const JOB_IRQ_STAT: Register<0x100c> = Register;
+
+pub(crate) const JOB_IRQ_GLOBAL_IF: u32 = bit_u32(31);
+
+pub(crate) const MMU_IRQ_RAWSTAT: Register<0x2000> = Register;
+pub(crate) const MMU_IRQ_CLEAR: Register<0x2004> = Register;
+pub(crate) const MMU_IRQ_MASK: Register<0x2008> = Register;
+pub(crate) const MMU_IRQ_STAT: Register<0x200c> = Register;
diff --git a/drivers/gpu/drm/tyr/tyr.rs b/drivers/gpu/drm/tyr/tyr.rs
new file mode 100644
index 000000000000..861d1db43072
--- /dev/null
+++ b/drivers/gpu/drm/tyr/tyr.rs
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0 or MIT
+
+//! Arm Mali Tyr DRM driver.
+//!
+//! The name "Tyr" is inspired by Norse mythology, reflecting Arm's tradition of
+//! naming their GPUs after Nordic mythological figures and places.
+
+use crate::driver::TyrDriver;
+
+mod driver;
+mod file;
+mod gem;
+mod gpu;
+mod regs;
+
+kernel::module_platform_driver! {
+ type: TyrDriver,
+ name: "tyr",
+ authors: ["The Tyr driver authors"],
+ description: "Arm Mali Tyr DRM driver",
+ license: "Dual MIT/GPL",
+}
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index 5e997ae8bc9c..c5a3bbbc74c5 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -46,6 +46,7 @@ MODULE_PARM_DESC(super_pages, "Enable/Disable Super Pages support.");
static int v3d_get_param_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
struct v3d_dev *v3d = to_v3d_dev(dev);
struct drm_v3d_get_param *args = data;
static const u32 reg_map[] = {
@@ -107,6 +108,16 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data,
case DRM_V3D_PARAM_SUPPORTS_SUPER_PAGES:
args->value = !!v3d->gemfs;
return 0;
+ case DRM_V3D_PARAM_GLOBAL_RESET_COUNTER:
+ mutex_lock(&v3d->reset_lock);
+ args->value = v3d->reset_counter;
+ mutex_unlock(&v3d->reset_lock);
+ return 0;
+ case DRM_V3D_PARAM_CONTEXT_RESET_COUNTER:
+ mutex_lock(&v3d->reset_lock);
+ args->value = v3d_priv->reset_counter;
+ mutex_unlock(&v3d->reset_lock);
+ return 0;
default:
DRM_DEBUG("Unknown parameter %d\n", args->param);
return -EINVAL;
@@ -146,12 +157,24 @@ v3d_open(struct drm_device *dev, struct drm_file *file)
static void
v3d_postclose(struct drm_device *dev, struct drm_file *file)
{
+ struct v3d_dev *v3d = to_v3d_dev(dev);
struct v3d_file_priv *v3d_priv = file->driver_priv;
+ unsigned long irqflags;
enum v3d_queue q;
- for (q = 0; q < V3D_MAX_QUEUES; q++)
+ for (q = 0; q < V3D_MAX_QUEUES; q++) {
+ struct v3d_queue_state *queue = &v3d->queue[q];
+ struct v3d_job *job = queue->active_job;
+
drm_sched_entity_destroy(&v3d_priv->sched_entity[q]);
+ if (job && job->base.entity == &v3d_priv->sched_entity[q]) {
+ spin_lock_irqsave(&queue->queue_lock, irqflags);
+ job->file_priv = NULL;
+ spin_unlock_irqrestore(&queue->queue_lock, irqflags);
+ }
+ }
+
v3d_perfmon_close_file(v3d_priv);
kfree(v3d_priv);
}
diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
index 411e47702f8a..1884686985b8 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.h
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
@@ -58,6 +58,12 @@ struct v3d_queue_state {
/* Stores the GPU stats for this queue in the global context. */
struct v3d_stats stats;
+
+ /* Currently active job for this queue */
+ struct v3d_job *active_job;
+ spinlock_t queue_lock;
+ /* Protect dma fence for signalling job completion */
+ spinlock_t fence_lock;
};
/* Performance monitor object. The perform lifetime is controlled by userspace
@@ -159,18 +165,8 @@ struct v3d_dev {
struct work_struct overflow_mem_work;
- struct v3d_bin_job *bin_job;
- struct v3d_render_job *render_job;
- struct v3d_tfu_job *tfu_job;
- struct v3d_csd_job *csd_job;
-
struct v3d_queue_state queue[V3D_MAX_QUEUES];
- /* Spinlock used to synchronize the overflow memory
- * management against bin job submission.
- */
- spinlock_t job_lock;
-
/* Used to track the active perfmon if any. */
struct v3d_perfmon *active_perfmon;
@@ -204,6 +200,11 @@ struct v3d_dev {
* all jobs.
*/
struct v3d_perfmon *global_perfmon;
+
+ /* Global reset counter. The counter must be incremented when
+ * a GPU reset happens. It must be protected by @reset_lock.
+ */
+ unsigned int reset_counter;
};
static inline struct v3d_dev *
@@ -233,6 +234,12 @@ struct v3d_file_priv {
/* Stores the GPU stats for a specific queue for this fd. */
struct v3d_stats stats[V3D_MAX_QUEUES];
+
+ /* Per-fd reset counter, must be incremented when a job submitted
+ * by this fd causes a GPU reset. It must be protected by
+ * &struct v3d_dev->reset_lock.
+ */
+ unsigned int reset_counter;
};
struct v3d_bo {
@@ -316,9 +323,9 @@ struct v3d_job {
struct v3d_perfmon *perfmon;
/* File descriptor of the process that submitted the job that could be used
- * for collecting stats by process of GPU usage.
+ * to collect per-process information about the GPU.
*/
- struct drm_file *file;
+ struct v3d_file_priv *file_priv;
/* Callback for the freeing of the job on refcount going to 0. */
void (*free)(struct kref *ref);
@@ -559,7 +566,7 @@ void v3d_get_stats(const struct v3d_stats *stats, u64 timestamp,
/* v3d_fence.c */
extern const struct dma_fence_ops v3d_fence_ops;
-struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue queue);
+struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue q);
/* v3d_gem.c */
int v3d_gem_init(struct drm_device *dev);
@@ -603,7 +610,7 @@ void v3d_timestamp_query_info_free(struct v3d_timestamp_query_info *query_info,
unsigned int count);
void v3d_performance_query_info_free(struct v3d_performance_query_info *query_info,
unsigned int count);
-void v3d_job_update_stats(struct v3d_job *job, enum v3d_queue queue);
+void v3d_job_update_stats(struct v3d_job *job, enum v3d_queue q);
int v3d_sched_init(struct v3d_dev *v3d);
void v3d_sched_fini(struct v3d_dev *v3d);
diff --git a/drivers/gpu/drm/v3d/v3d_fence.c b/drivers/gpu/drm/v3d/v3d_fence.c
index 89840ed212c0..c82500a1df73 100644
--- a/drivers/gpu/drm/v3d/v3d_fence.c
+++ b/drivers/gpu/drm/v3d/v3d_fence.c
@@ -3,8 +3,9 @@
#include "v3d_drv.h"
-struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue queue)
+struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue q)
{
+ struct v3d_queue_state *queue = &v3d->queue[q];
struct v3d_fence *fence;
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
@@ -12,10 +13,10 @@ struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue queue)
return ERR_PTR(-ENOMEM);
fence->dev = &v3d->drm;
- fence->queue = queue;
- fence->seqno = ++v3d->queue[queue].emit_seqno;
- dma_fence_init(&fence->base, &v3d_fence_ops, &v3d->job_lock,
- v3d->queue[queue].fence_context, fence->seqno);
+ fence->queue = q;
+ fence->seqno = ++queue->emit_seqno;
+ dma_fence_init(&fence->base, &v3d_fence_ops, &queue->fence_lock,
+ queue->fence_context, fence->seqno);
return &fence->base;
}
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index 37bf5eecdd2c..bb110d35f749 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -271,10 +271,12 @@ v3d_gem_init(struct drm_device *dev)
queue->fence_context = dma_fence_context_alloc(1);
memset(&queue->stats, 0, sizeof(queue->stats));
seqcount_init(&queue->stats.lock);
+
+ spin_lock_init(&queue->queue_lock);
+ spin_lock_init(&queue->fence_lock);
}
spin_lock_init(&v3d->mm_lock);
- spin_lock_init(&v3d->job_lock);
ret = drmm_mutex_init(dev, &v3d->bo_lock);
if (ret)
return ret;
@@ -324,6 +326,7 @@ void
v3d_gem_destroy(struct drm_device *dev)
{
struct v3d_dev *v3d = to_v3d_dev(dev);
+ enum v3d_queue q;
v3d_sched_fini(v3d);
v3d_gemfs_fini(v3d);
@@ -331,10 +334,8 @@ v3d_gem_destroy(struct drm_device *dev)
/* Waiting for jobs to finish would need to be done before
* unregistering V3D.
*/
- WARN_ON(v3d->bin_job);
- WARN_ON(v3d->render_job);
- WARN_ON(v3d->tfu_job);
- WARN_ON(v3d->csd_job);
+ for (q = 0; q < V3D_MAX_QUEUES; q++)
+ WARN_ON(v3d->queue[q].active_job);
drm_mm_takedown(&v3d->mm);
diff --git a/drivers/gpu/drm/v3d/v3d_gemfs.c b/drivers/gpu/drm/v3d/v3d_gemfs.c
index 8ec6ed82b3d9..c1a30166c099 100644
--- a/drivers/gpu/drm/v3d/v3d_gemfs.c
+++ b/drivers/gpu/drm/v3d/v3d_gemfs.c
@@ -7,11 +7,6 @@
#include "v3d_drv.h"
-static int add_param(struct fs_context *fc, const char *key, const char *val)
-{
- return vfs_parse_fs_string(fc, key, val, strlen(val));
-}
-
void v3d_gemfs_init(struct v3d_dev *v3d)
{
struct file_system_type *type;
@@ -38,9 +33,9 @@ void v3d_gemfs_init(struct v3d_dev *v3d)
fc = fs_context_for_mount(type, SB_KERNMOUNT);
if (IS_ERR(fc))
goto err;
- ret = add_param(fc, "source", "tmpfs");
+ ret = vfs_parse_fs_string(fc, "source", "tmpfs");
if (!ret)
- ret = add_param(fc, "huge", "within_size");
+ ret = vfs_parse_fs_string(fc, "huge", "within_size");
if (!ret)
gemfs = fc_mount_longterm(fc);
put_fs_context(fc);
diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c
index a515a301e480..31ecc5b4ba5a 100644
--- a/drivers/gpu/drm/v3d/v3d_irq.c
+++ b/drivers/gpu/drm/v3d/v3d_irq.c
@@ -42,6 +42,8 @@ v3d_overflow_mem_work(struct work_struct *work)
container_of(work, struct v3d_dev, overflow_mem_work);
struct drm_device *dev = &v3d->drm;
struct v3d_bo *bo = v3d_bo_create(dev, NULL /* XXX: GMP */, 256 * 1024);
+ struct v3d_queue_state *queue = &v3d->queue[V3D_BIN];
+ struct v3d_bin_job *bin_job;
struct drm_gem_object *obj;
unsigned long irqflags;
@@ -60,15 +62,17 @@ v3d_overflow_mem_work(struct work_struct *work)
* bin job got scheduled, that's fine. We'll just give them
* some binner pool anyway.
*/
- spin_lock_irqsave(&v3d->job_lock, irqflags);
- if (!v3d->bin_job) {
- spin_unlock_irqrestore(&v3d->job_lock, irqflags);
+ spin_lock_irqsave(&queue->queue_lock, irqflags);
+ bin_job = (struct v3d_bin_job *)queue->active_job;
+
+ if (!bin_job) {
+ spin_unlock_irqrestore(&queue->queue_lock, irqflags);
goto out;
}
drm_gem_object_get(obj);
- list_add_tail(&bo->unref_head, &v3d->bin_job->render->unref_list);
- spin_unlock_irqrestore(&v3d->job_lock, irqflags);
+ list_add_tail(&bo->unref_head, &bin_job->render->unref_list);
+ spin_unlock_irqrestore(&queue->queue_lock, irqflags);
v3d_mmu_flush_all(v3d);
@@ -79,6 +83,20 @@ out:
drm_gem_object_put(obj);
}
+static void
+v3d_irq_signal_fence(struct v3d_dev *v3d, enum v3d_queue q,
+ void (*trace_irq)(struct drm_device *, uint64_t))
+{
+ struct v3d_queue_state *queue = &v3d->queue[q];
+ struct v3d_fence *fence = to_v3d_fence(queue->active_job->irq_fence);
+
+ v3d_job_update_stats(queue->active_job, q);
+ trace_irq(&v3d->drm, fence->seqno);
+
+ queue->active_job = NULL;
+ dma_fence_signal(&fence->base);
+}
+
static irqreturn_t
v3d_irq(int irq, void *arg)
{
@@ -102,41 +120,17 @@ v3d_irq(int irq, void *arg)
}
if (intsts & V3D_INT_FLDONE) {
- struct v3d_fence *fence =
- to_v3d_fence(v3d->bin_job->base.irq_fence);
-
- v3d_job_update_stats(&v3d->bin_job->base, V3D_BIN);
- trace_v3d_bcl_irq(&v3d->drm, fence->seqno);
-
- v3d->bin_job = NULL;
- dma_fence_signal(&fence->base);
-
+ v3d_irq_signal_fence(v3d, V3D_BIN, trace_v3d_bcl_irq);
status = IRQ_HANDLED;
}
if (intsts & V3D_INT_FRDONE) {
- struct v3d_fence *fence =
- to_v3d_fence(v3d->render_job->base.irq_fence);
-
- v3d_job_update_stats(&v3d->render_job->base, V3D_RENDER);
- trace_v3d_rcl_irq(&v3d->drm, fence->seqno);
-
- v3d->render_job = NULL;
- dma_fence_signal(&fence->base);
-
+ v3d_irq_signal_fence(v3d, V3D_RENDER, trace_v3d_rcl_irq);
status = IRQ_HANDLED;
}
if (intsts & V3D_INT_CSDDONE(v3d->ver)) {
- struct v3d_fence *fence =
- to_v3d_fence(v3d->csd_job->base.irq_fence);
-
- v3d_job_update_stats(&v3d->csd_job->base, V3D_CSD);
- trace_v3d_csd_irq(&v3d->drm, fence->seqno);
-
- v3d->csd_job = NULL;
- dma_fence_signal(&fence->base);
-
+ v3d_irq_signal_fence(v3d, V3D_CSD, trace_v3d_csd_irq);
status = IRQ_HANDLED;
}
@@ -168,15 +162,7 @@ v3d_hub_irq(int irq, void *arg)
V3D_WRITE(V3D_HUB_INT_CLR, intsts);
if (intsts & V3D_HUB_INT_TFUC) {
- struct v3d_fence *fence =
- to_v3d_fence(v3d->tfu_job->base.irq_fence);
-
- v3d_job_update_stats(&v3d->tfu_job->base, V3D_TFU);
- trace_v3d_tfu_irq(&v3d->drm, fence->seqno);
-
- v3d->tfu_job = NULL;
- dma_fence_signal(&fence->base);
-
+ v3d_irq_signal_fence(v3d, V3D_TFU, trace_v3d_tfu_irq);
status = IRQ_HANDLED;
}
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index cb9df8822472..0ec06bfbbebb 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -139,7 +139,7 @@ static void
v3d_job_start_stats(struct v3d_job *job, enum v3d_queue queue)
{
struct v3d_dev *v3d = job->v3d;
- struct v3d_file_priv *file = job->file->driver_priv;
+ struct v3d_file_priv *file = job->file_priv;
struct v3d_stats *global_stats = &v3d->queue[queue].stats;
struct v3d_stats *local_stats = &file->stats[queue];
u64 now = local_clock();
@@ -194,11 +194,11 @@ v3d_stats_update(struct v3d_stats *stats, u64 now)
}
void
-v3d_job_update_stats(struct v3d_job *job, enum v3d_queue queue)
+v3d_job_update_stats(struct v3d_job *job, enum v3d_queue q)
{
struct v3d_dev *v3d = job->v3d;
- struct v3d_file_priv *file = job->file->driver_priv;
- struct v3d_stats *global_stats = &v3d->queue[queue].stats;
+ struct v3d_queue_state *queue = &v3d->queue[q];
+ struct v3d_stats *global_stats = &queue->stats;
u64 now = local_clock();
unsigned long flags;
@@ -209,10 +209,10 @@ v3d_job_update_stats(struct v3d_job *job, enum v3d_queue queue)
preempt_disable();
/* Don't update the local stats if the file context has already closed */
- if (file)
- v3d_stats_update(&file->stats[queue], now);
- else
- drm_dbg(&v3d->drm, "The file descriptor was closed before job completion\n");
+ spin_lock(&queue->queue_lock);
+ if (job->file_priv)
+ v3d_stats_update(&job->file_priv->stats[q], now);
+ spin_unlock(&queue->queue_lock);
v3d_stats_update(global_stats, now);
@@ -226,27 +226,28 @@ static struct dma_fence *v3d_bin_job_run(struct drm_sched_job *sched_job)
{
struct v3d_bin_job *job = to_bin_job(sched_job);
struct v3d_dev *v3d = job->base.v3d;
+ struct v3d_queue_state *queue = &v3d->queue[V3D_BIN];
struct drm_device *dev = &v3d->drm;
struct dma_fence *fence;
unsigned long irqflags;
if (unlikely(job->base.base.s_fence->finished.error)) {
- spin_lock_irqsave(&v3d->job_lock, irqflags);
- v3d->bin_job = NULL;
- spin_unlock_irqrestore(&v3d->job_lock, irqflags);
+ spin_lock_irqsave(&queue->queue_lock, irqflags);
+ queue->active_job = NULL;
+ spin_unlock_irqrestore(&queue->queue_lock, irqflags);
return NULL;
}
/* Lock required around bin_job update vs
* v3d_overflow_mem_work().
*/
- spin_lock_irqsave(&v3d->job_lock, irqflags);
- v3d->bin_job = job;
+ spin_lock_irqsave(&queue->queue_lock, irqflags);
+ queue->active_job = &job->base;
/* Clear out the overflow allocation, so we don't
* reuse the overflow attached to a previous job.
*/
V3D_CORE_WRITE(0, V3D_PTB_BPOS, 0);
- spin_unlock_irqrestore(&v3d->job_lock, irqflags);
+ spin_unlock_irqrestore(&queue->queue_lock, irqflags);
v3d_invalidate_caches(v3d);
@@ -290,11 +291,11 @@ static struct dma_fence *v3d_render_job_run(struct drm_sched_job *sched_job)
struct dma_fence *fence;
if (unlikely(job->base.base.s_fence->finished.error)) {
- v3d->render_job = NULL;
+ v3d->queue[V3D_RENDER].active_job = NULL;
return NULL;
}
- v3d->render_job = job;
+ v3d->queue[V3D_RENDER].active_job = &job->base;
/* Can we avoid this flush? We need to be careful of
* scheduling, though -- imagine job0 rendering to texture and
@@ -338,11 +339,11 @@ v3d_tfu_job_run(struct drm_sched_job *sched_job)
struct dma_fence *fence;
if (unlikely(job->base.base.s_fence->finished.error)) {
- v3d->tfu_job = NULL;
+ v3d->queue[V3D_TFU].active_job = NULL;
return NULL;
}
- v3d->tfu_job = job;
+ v3d->queue[V3D_TFU].active_job = &job->base;
fence = v3d_fence_create(v3d, V3D_TFU);
if (IS_ERR(fence))
@@ -386,11 +387,11 @@ v3d_csd_job_run(struct drm_sched_job *sched_job)
int i, csd_cfg0_reg;
if (unlikely(job->base.base.s_fence->finished.error)) {
- v3d->csd_job = NULL;
+ v3d->queue[V3D_CSD].active_job = NULL;
return NULL;
}
- v3d->csd_job = job;
+ v3d->queue[V3D_CSD].active_job = &job->base;
v3d_invalidate_caches(v3d);
@@ -574,7 +575,7 @@ static void
v3d_reset_performance_queries(struct v3d_cpu_job *job)
{
struct v3d_performance_query_info *performance_query = &job->performance_query;
- struct v3d_file_priv *v3d_priv = job->base.file->driver_priv;
+ struct v3d_file_priv *v3d_priv = job->base.file_priv;
struct v3d_dev *v3d = job->base.v3d;
struct v3d_perfmon *perfmon;
@@ -604,7 +605,7 @@ v3d_write_performance_query_result(struct v3d_cpu_job *job, void *data,
{
struct v3d_performance_query_info *performance_query =
&job->performance_query;
- struct v3d_file_priv *v3d_priv = job->base.file->driver_priv;
+ struct v3d_file_priv *v3d_priv = job->base.file_priv;
struct v3d_performance_query *perf_query =
&performance_query->queries[query];
struct v3d_dev *v3d = job->base.v3d;
@@ -700,6 +701,7 @@ v3d_cpu_job_run(struct drm_sched_job *sched_job)
trace_v3d_cpu_job_end(&v3d->drm, job->job_type);
v3d_job_update_stats(&job->base, V3D_CPU);
+ /* Synchronous operation, so no fence to wait on. */
return NULL;
}
@@ -715,19 +717,24 @@ v3d_cache_clean_job_run(struct drm_sched_job *sched_job)
v3d_job_update_stats(job, V3D_CACHE_CLEAN);
+ /* Synchronous operation, so no fence to wait on. */
return NULL;
}
static enum drm_gpu_sched_stat
-v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job)
+v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job,
+ enum v3d_queue q)
{
- enum v3d_queue q;
+ struct v3d_job *job = to_v3d_job(sched_job);
+ struct v3d_file_priv *v3d_priv = job->file_priv;
+ unsigned long irqflags;
+ enum v3d_queue i;
mutex_lock(&v3d->reset_lock);
/* block scheduler */
- for (q = 0; q < V3D_MAX_QUEUES; q++)
- drm_sched_stop(&v3d->queue[q].sched, sched_job);
+ for (i = 0; i < V3D_MAX_QUEUES; i++)
+ drm_sched_stop(&v3d->queue[i].sched, sched_job);
if (sched_job)
drm_sched_increase_karma(sched_job);
@@ -735,13 +742,18 @@ v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job)
/* get the GPU back into the init state */
v3d_reset(v3d);
- for (q = 0; q < V3D_MAX_QUEUES; q++)
- drm_sched_resubmit_jobs(&v3d->queue[q].sched);
+ v3d->reset_counter++;
+ spin_lock_irqsave(&v3d->queue[q].queue_lock, irqflags);
+ if (v3d_priv)
+ v3d_priv->reset_counter++;
+ spin_unlock_irqrestore(&v3d->queue[q].queue_lock, irqflags);
+
+ for (i = 0; i < V3D_MAX_QUEUES; i++)
+ drm_sched_resubmit_jobs(&v3d->queue[i].sched);
/* Unblock schedulers and restart their jobs. */
- for (q = 0; q < V3D_MAX_QUEUES; q++) {
- drm_sched_start(&v3d->queue[q].sched, 0);
- }
+ for (i = 0; i < V3D_MAX_QUEUES; i++)
+ drm_sched_start(&v3d->queue[i].sched, 0);
mutex_unlock(&v3d->reset_lock);
@@ -769,7 +781,7 @@ v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q,
return DRM_GPU_SCHED_STAT_NO_HANG;
}
- return v3d_gpu_reset_for_timeout(v3d, sched_job);
+ return v3d_gpu_reset_for_timeout(v3d, sched_job, q);
}
static enum drm_gpu_sched_stat
@@ -791,11 +803,11 @@ v3d_render_job_timedout(struct drm_sched_job *sched_job)
}
static enum drm_gpu_sched_stat
-v3d_generic_job_timedout(struct drm_sched_job *sched_job)
+v3d_tfu_job_timedout(struct drm_sched_job *sched_job)
{
struct v3d_job *job = to_v3d_job(sched_job);
- return v3d_gpu_reset_for_timeout(job->v3d, sched_job);
+ return v3d_gpu_reset_for_timeout(job->v3d, sched_job, V3D_TFU);
}
static enum drm_gpu_sched_stat
@@ -814,7 +826,7 @@ v3d_csd_job_timedout(struct drm_sched_job *sched_job)
return DRM_GPU_SCHED_STAT_NO_HANG;
}
- return v3d_gpu_reset_for_timeout(v3d, sched_job);
+ return v3d_gpu_reset_for_timeout(v3d, sched_job, V3D_CSD);
}
static const struct drm_sched_backend_ops v3d_bin_sched_ops = {
@@ -831,7 +843,7 @@ static const struct drm_sched_backend_ops v3d_render_sched_ops = {
static const struct drm_sched_backend_ops v3d_tfu_sched_ops = {
.run_job = v3d_tfu_job_run,
- .timedout_job = v3d_generic_job_timedout,
+ .timedout_job = v3d_tfu_job_timedout,
.free_job = v3d_sched_job_free,
};
@@ -843,13 +855,11 @@ static const struct drm_sched_backend_ops v3d_csd_sched_ops = {
static const struct drm_sched_backend_ops v3d_cache_clean_sched_ops = {
.run_job = v3d_cache_clean_job_run,
- .timedout_job = v3d_generic_job_timedout,
.free_job = v3d_sched_job_free
};
static const struct drm_sched_backend_ops v3d_cpu_sched_ops = {
.run_job = v3d_cpu_job_run,
- .timedout_job = v3d_generic_job_timedout,
.free_job = v3d_cpu_job_free
};
diff --git a/drivers/gpu/drm/v3d/v3d_submit.c b/drivers/gpu/drm/v3d/v3d_submit.c
index 5171ffe9012d..f3652e90683c 100644
--- a/drivers/gpu/drm/v3d/v3d_submit.c
+++ b/drivers/gpu/drm/v3d/v3d_submit.c
@@ -166,7 +166,7 @@ v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
job->v3d = v3d;
job->free = free;
- job->file = file_priv;
+ job->file_priv = v3d_priv;
ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue],
1, v3d_priv, file_priv->client_id);
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index e5805ca646c7..c3315935d8bc 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -131,9 +131,8 @@ static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc,
* in the plane update callback, and here we just check
* whenever we must force the modeset.
*/
- if (drm_atomic_crtc_needs_modeset(crtc_state)) {
+ if (drm_atomic_crtc_needs_modeset(crtc_state))
output->needs_modeset = true;
- }
}
static const struct drm_crtc_helper_funcs virtio_gpu_crtc_helper_funcs = {
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index 7dfb2006c561..1c15cbf326b7 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -162,18 +162,18 @@ int virtio_gpu_init(struct virtio_device *vdev, struct drm_device *dev)
if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_VIRGL))
vgdev->has_virgl_3d = true;
#endif
- if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_EDID)) {
+ if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_EDID))
vgdev->has_edid = true;
- }
- if (virtio_has_feature(vgdev->vdev, VIRTIO_RING_F_INDIRECT_DESC)) {
+
+ if (virtio_has_feature(vgdev->vdev, VIRTIO_RING_F_INDIRECT_DESC))
vgdev->has_indirect = true;
- }
- if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_UUID)) {
+
+ if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_UUID))
vgdev->has_resource_assign_uuid = true;
- }
- if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_BLOB)) {
+
+ if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_BLOB))
vgdev->has_resource_blob = true;
- }
+
if (virtio_get_shm_region(vgdev->vdev, &vgdev->host_visible_region,
VIRTIO_GPU_SHM_ID_HOST_VISIBLE)) {
if (!devm_request_mem_region(&vgdev->vdev->dev,
@@ -193,9 +193,9 @@ int virtio_gpu_init(struct virtio_device *vdev, struct drm_device *dev)
(unsigned long)vgdev->host_visible_region.addr,
(unsigned long)vgdev->host_visible_region.len);
}
- if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_CONTEXT_INIT)) {
+
+ if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_CONTEXT_INIT))
vgdev->has_context_init = true;
- }
DRM_INFO("features: %cvirgl %cedid %cresource_blob %chost_visible",
vgdev->has_virgl_3d ? '+' : '-',
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index 5517cff8715c..e6363c887500 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -47,6 +47,7 @@ int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev, uint32_t *resid)
*resid = handle + 1;
} else {
int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL);
+
if (handle < 0)
return handle;
*resid = handle + 1;
@@ -56,9 +57,8 @@ int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev, uint32_t *resid)
static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
{
- if (!virtio_gpu_virglrenderer_workaround) {
+ if (!virtio_gpu_virglrenderer_workaround)
ida_free(&vgdev->resource_ida, id - 1);
- }
}
void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index 698ea7adb951..29e4b458ae57 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -120,7 +120,7 @@ static int virtio_gpu_plane_atomic_check(struct drm_plane *plane,
crtc_state = drm_atomic_get_crtc_state(state,
new_plane_state->crtc);
if (IS_ERR(crtc_state))
- return PTR_ERR(crtc_state);
+ return PTR_ERR(crtc_state);
ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
DRM_PLANE_NO_SCALING,
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 55a15e247dd1..8181b22b9b46 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -248,6 +248,7 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
if (le32_to_cpu(resp->type) >= VIRTIO_GPU_RESP_ERR_UNSPEC) {
struct virtio_gpu_ctrl_hdr *cmd;
+
cmd = virtio_gpu_vbuf_ctrl_hdr(entry);
DRM_ERROR_RATELIMITED("response 0x%x (command 0x%x)\n",
le32_to_cpu(resp->type),
@@ -468,6 +469,7 @@ static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
if (vbuf->data_size) {
if (is_vmalloc_addr(vbuf->data_buf)) {
int sg_ents;
+
sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size,
&sg_ents);
if (!sgt) {
diff --git a/drivers/gpu/drm/vkms/tests/vkms_config_test.c b/drivers/gpu/drm/vkms/tests/vkms_config_test.c
index ff4566cf9925..b0d78a81d2df 100644
--- a/drivers/gpu/drm/vkms/tests/vkms_config_test.c
+++ b/drivers/gpu/drm/vkms/tests/vkms_config_test.c
@@ -200,6 +200,7 @@ static void vkms_config_test_get_planes(struct kunit *test)
KUNIT_ASSERT_EQ(test, n_planes, 0);
plane_cfg1 = vkms_config_create_plane(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane_cfg1);
vkms_config_for_each_plane(config, plane_cfg) {
n_planes++;
if (plane_cfg != plane_cfg1)
@@ -209,6 +210,7 @@ static void vkms_config_test_get_planes(struct kunit *test)
n_planes = 0;
plane_cfg2 = vkms_config_create_plane(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane_cfg2);
vkms_config_for_each_plane(config, plane_cfg) {
n_planes++;
if (plane_cfg != plane_cfg1 && plane_cfg != plane_cfg2)
@@ -242,6 +244,7 @@ static void vkms_config_test_get_crtcs(struct kunit *test)
KUNIT_FAIL(test, "Unexpected CRTC");
crtc_cfg1 = vkms_config_create_crtc(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_cfg1);
KUNIT_ASSERT_EQ(test, vkms_config_get_num_crtcs(config), 1);
vkms_config_for_each_crtc(config, crtc_cfg) {
if (crtc_cfg != crtc_cfg1)
@@ -249,6 +252,7 @@ static void vkms_config_test_get_crtcs(struct kunit *test)
}
crtc_cfg2 = vkms_config_create_crtc(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_cfg2);
KUNIT_ASSERT_EQ(test, vkms_config_get_num_crtcs(config), 2);
vkms_config_for_each_crtc(config, crtc_cfg) {
if (crtc_cfg != crtc_cfg1 && crtc_cfg != crtc_cfg2)
@@ -280,6 +284,7 @@ static void vkms_config_test_get_encoders(struct kunit *test)
KUNIT_ASSERT_EQ(test, n_encoders, 0);
encoder_cfg1 = vkms_config_create_encoder(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder_cfg1);
vkms_config_for_each_encoder(config, encoder_cfg) {
n_encoders++;
if (encoder_cfg != encoder_cfg1)
@@ -289,6 +294,7 @@ static void vkms_config_test_get_encoders(struct kunit *test)
n_encoders = 0;
encoder_cfg2 = vkms_config_create_encoder(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder_cfg2);
vkms_config_for_each_encoder(config, encoder_cfg) {
n_encoders++;
if (encoder_cfg != encoder_cfg1 && encoder_cfg != encoder_cfg2)
@@ -324,6 +330,7 @@ static void vkms_config_test_get_connectors(struct kunit *test)
KUNIT_ASSERT_EQ(test, n_connectors, 0);
connector_cfg1 = vkms_config_create_connector(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, connector_cfg1);
vkms_config_for_each_connector(config, connector_cfg) {
n_connectors++;
if (connector_cfg != connector_cfg1)
@@ -333,6 +340,7 @@ static void vkms_config_test_get_connectors(struct kunit *test)
n_connectors = 0;
connector_cfg2 = vkms_config_create_connector(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, connector_cfg2);
vkms_config_for_each_connector(config, connector_cfg) {
n_connectors++;
if (connector_cfg != connector_cfg1 &&
@@ -370,7 +378,7 @@ static void vkms_config_test_invalid_plane_number(struct kunit *test)
/* Invalid: Too many planes */
for (n = 0; n <= 32; n++)
- vkms_config_create_plane(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vkms_config_create_plane(config));
KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
@@ -395,6 +403,7 @@ static void vkms_config_test_valid_plane_type(struct kunit *test)
/* Invalid: No primary plane */
plane_cfg = vkms_config_create_plane(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane_cfg);
vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_OVERLAY);
err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg);
KUNIT_EXPECT_EQ(test, err, 0);
@@ -402,11 +411,13 @@ static void vkms_config_test_valid_plane_type(struct kunit *test)
/* Invalid: Multiple primary planes */
plane_cfg = vkms_config_create_plane(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane_cfg);
vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_PRIMARY);
err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg);
KUNIT_EXPECT_EQ(test, err, 0);
plane_cfg = vkms_config_create_plane(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane_cfg);
vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_PRIMARY);
err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg);
KUNIT_EXPECT_EQ(test, err, 0);
@@ -419,11 +430,13 @@ static void vkms_config_test_valid_plane_type(struct kunit *test)
/* Invalid: Multiple cursor planes */
plane_cfg = vkms_config_create_plane(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane_cfg);
vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_CURSOR);
err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg);
KUNIT_EXPECT_EQ(test, err, 0);
plane_cfg = vkms_config_create_plane(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane_cfg);
vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_CURSOR);
err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg);
KUNIT_EXPECT_EQ(test, err, 0);
@@ -437,12 +450,16 @@ static void vkms_config_test_valid_plane_type(struct kunit *test)
/* Invalid: Second CRTC without primary plane */
crtc_cfg = vkms_config_create_crtc(config);
encoder_cfg = vkms_config_create_encoder(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_cfg);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder_cfg);
+
err = vkms_config_encoder_attach_crtc(encoder_cfg, crtc_cfg);
KUNIT_EXPECT_EQ(test, err, 0);
KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
/* Valid: Second CRTC with a primary plane */
plane_cfg = vkms_config_create_plane(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane_cfg);
vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_PRIMARY);
err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg);
KUNIT_EXPECT_EQ(test, err, 0);
@@ -486,7 +503,7 @@ static void vkms_config_test_invalid_crtc_number(struct kunit *test)
/* Invalid: Too many CRTCs */
for (n = 0; n <= 32; n++)
- vkms_config_create_crtc(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vkms_config_create_crtc(config));
KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
@@ -509,7 +526,7 @@ static void vkms_config_test_invalid_encoder_number(struct kunit *test)
/* Invalid: Too many encoders */
for (n = 0; n <= 32; n++)
- vkms_config_create_encoder(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vkms_config_create_encoder(config));
KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
@@ -531,12 +548,15 @@ static void vkms_config_test_valid_encoder_possible_crtcs(struct kunit *test)
/* Invalid: Encoder without a possible CRTC */
encoder_cfg = vkms_config_create_encoder(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder_cfg);
KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
/* Valid: Second CRTC with shared encoder */
crtc_cfg2 = vkms_config_create_crtc(config);
-
plane_cfg = vkms_config_create_plane(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_cfg2);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane_cfg);
+
vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_PRIMARY);
err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg2);
KUNIT_EXPECT_EQ(test, err, 0);
@@ -577,7 +597,7 @@ static void vkms_config_test_invalid_connector_number(struct kunit *test)
/* Invalid: Too many connectors */
for (n = 0; n <= 32; n++)
- connector_cfg = vkms_config_create_connector(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vkms_config_create_connector(config));
KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
@@ -669,13 +689,19 @@ static void vkms_config_test_plane_attach_crtc(struct kunit *test)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
overlay_cfg = vkms_config_create_plane(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, overlay_cfg);
vkms_config_plane_set_type(overlay_cfg, DRM_PLANE_TYPE_OVERLAY);
+
primary_cfg = vkms_config_create_plane(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, primary_cfg);
vkms_config_plane_set_type(primary_cfg, DRM_PLANE_TYPE_PRIMARY);
+
cursor_cfg = vkms_config_create_plane(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cursor_cfg);
vkms_config_plane_set_type(cursor_cfg, DRM_PLANE_TYPE_CURSOR);
crtc_cfg = vkms_config_create_crtc(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_cfg);
/* No primary or cursor planes */
KUNIT_EXPECT_NULL(test, vkms_config_crtc_primary_plane(config, crtc_cfg));
@@ -735,6 +761,11 @@ static void vkms_config_test_plane_get_possible_crtcs(struct kunit *test)
crtc_cfg1 = vkms_config_create_crtc(config);
crtc_cfg2 = vkms_config_create_crtc(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane_cfg1);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane_cfg2);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_cfg1);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_cfg2);
+
/* No possible CRTCs */
vkms_config_plane_for_each_possible_crtc(plane_cfg1, idx, possible_crtc)
KUNIT_FAIL(test, "Unexpected possible CRTC");
@@ -799,6 +830,11 @@ static void vkms_config_test_encoder_get_possible_crtcs(struct kunit *test)
crtc_cfg1 = vkms_config_create_crtc(config);
crtc_cfg2 = vkms_config_create_crtc(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder_cfg1);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder_cfg2);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_cfg1);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_cfg2);
+
/* No possible CRTCs */
vkms_config_encoder_for_each_possible_crtc(encoder_cfg1, idx, possible_crtc)
KUNIT_FAIL(test, "Unexpected possible CRTC");
@@ -863,6 +899,11 @@ static void vkms_config_test_connector_get_possible_encoders(struct kunit *test)
encoder_cfg1 = vkms_config_create_encoder(config);
encoder_cfg2 = vkms_config_create_encoder(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, connector_cfg1);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, connector_cfg2);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder_cfg1);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder_cfg2);
+
/* No possible encoders */
vkms_config_connector_for_each_possible_encoder(connector_cfg1, idx,
possible_encoder)
diff --git a/drivers/gpu/drm/vkms/tests/vkms_format_test.c b/drivers/gpu/drm/vkms/tests/vkms_format_test.c
index 2e1daef94831..a7788fbc45dc 100644
--- a/drivers/gpu/drm/vkms/tests/vkms_format_test.c
+++ b/drivers/gpu/drm/vkms/tests/vkms_format_test.c
@@ -14,20 +14,20 @@
MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
/**
- * struct pixel_yuv_u8 - Internal representation of a pixel color.
- * @y: Luma value, stored in 8 bits, without padding, using
+ * struct pixel_yuv_u16 - Internal representation of a pixel color.
+ * @y: Luma value, stored in 16 bits, without padding, using
* machine endianness
- * @u: Blue difference chroma value, stored in 8 bits, without padding, using
+ * @u: Blue difference chroma value, stored in 16 bits, without padding, using
* machine endianness
- * @v: Red difference chroma value, stored in 8 bits, without padding, using
+ * @v: Red difference chroma value, stored in 16 bits, without padding, using
* machine endianness
*/
-struct pixel_yuv_u8 {
- u8 y, u, v;
+struct pixel_yuv_u16 {
+ u16 y, u, v;
};
/*
- * struct yuv_u8_to_argb_u16_case - Reference values to test the color
+ * struct yuv_u16_to_argb_u16_case - Reference values to test the color
* conversions in VKMS between YUV to ARGB
*
* @encoding: Encoding used to convert RGB to YUV
@@ -39,13 +39,13 @@ struct pixel_yuv_u8 {
* @format_pair.yuv: Same color as @format_pair.rgb, but converted to
* YUV using @encoding and @range.
*/
-struct yuv_u8_to_argb_u16_case {
+struct yuv_u16_to_argb_u16_case {
enum drm_color_encoding encoding;
enum drm_color_range range;
size_t n_colors;
struct format_pair {
char *name;
- struct pixel_yuv_u8 yuv;
+ struct pixel_yuv_u16 yuv;
struct pixel_argb_u16 argb;
} colors[TEST_BUFF_SIZE];
};
@@ -57,14 +57,14 @@ struct yuv_u8_to_argb_u16_case {
* For more information got to the docs:
* https://colour.readthedocs.io/en/master/generated/colour.RGB_to_YCbCr.html
*/
-static struct yuv_u8_to_argb_u16_case yuv_u8_to_argb_u16_cases[] = {
+static struct yuv_u16_to_argb_u16_case yuv_u16_to_argb_u16_cases[] = {
/*
* colour.RGB_to_YCbCr(<rgb color in 16 bit form>,
* K=colour.WEIGHTS_YCBCR["ITU-R BT.601"],
* in_bits = 16,
* in_legal = False,
* in_int = True,
- * out_bits = 8,
+ * out_bits = 16,
* out_legal = False,
* out_int = True)
*
@@ -76,13 +76,13 @@ static struct yuv_u8_to_argb_u16_case yuv_u8_to_argb_u16_cases[] = {
.range = DRM_COLOR_YCBCR_FULL_RANGE,
.n_colors = 6,
.colors = {
- { "white", { 0xff, 0x80, 0x80 }, { 0xffff, 0xffff, 0xffff, 0xffff }},
- { "gray", { 0x80, 0x80, 0x80 }, { 0xffff, 0x8080, 0x8080, 0x8080 }},
- { "black", { 0x00, 0x80, 0x80 }, { 0xffff, 0x0000, 0x0000, 0x0000 }},
- { "red", { 0x4c, 0x55, 0xff }, { 0xffff, 0xffff, 0x0000, 0x0000 }},
- { "green", { 0x96, 0x2c, 0x15 }, { 0xffff, 0x0000, 0xffff, 0x0000 }},
- { "blue", { 0x1d, 0xff, 0x6b }, { 0xffff, 0x0000, 0x0000, 0xffff }},
- },
+ { "white", { 0xffff, 0x8000, 0x8000 }, { 0xffff, 0xffff, 0xffff, 0xffff }},
+ { "gray", { 0x8080, 0x8000, 0x8000 }, { 0xffff, 0x8080, 0x8080, 0x8080 }},
+ { "black", { 0x0000, 0x8000, 0x8000 }, { 0xffff, 0x0000, 0x0000, 0x0000 }},
+ { "red", { 0x4c8b, 0x54ce, 0xffff }, { 0xffff, 0xffff, 0x0000, 0x0000 }},
+ { "green", { 0x9645, 0x2b33, 0x14d1 }, { 0xffff, 0x0000, 0xffff, 0x0000 }},
+ { "blue", { 0x1d2f, 0xffff, 0x6b2f }, { 0xffff, 0x0000, 0x0000, 0xffff }},
+ }
},
/*
* colour.RGB_to_YCbCr(<rgb color in 16 bit form>,
@@ -90,7 +90,7 @@ static struct yuv_u8_to_argb_u16_case yuv_u8_to_argb_u16_cases[] = {
* in_bits = 16,
* in_legal = False,
* in_int = True,
- * out_bits = 8,
+ * out_bits = 16,
* out_legal = True,
* out_int = True)
* Tests cases for color conversion generated by converting RGB
@@ -101,13 +101,13 @@ static struct yuv_u8_to_argb_u16_case yuv_u8_to_argb_u16_cases[] = {
.range = DRM_COLOR_YCBCR_LIMITED_RANGE,
.n_colors = 6,
.colors = {
- { "white", { 0xeb, 0x80, 0x80 }, { 0xffff, 0xffff, 0xffff, 0xffff }},
- { "gray", { 0x7e, 0x80, 0x80 }, { 0xffff, 0x8080, 0x8080, 0x8080 }},
- { "black", { 0x10, 0x80, 0x80 }, { 0xffff, 0x0000, 0x0000, 0x0000 }},
- { "red", { 0x51, 0x5a, 0xf0 }, { 0xffff, 0xffff, 0x0000, 0x0000 }},
- { "green", { 0x91, 0x36, 0x22 }, { 0xffff, 0x0000, 0xffff, 0x0000 }},
- { "blue", { 0x29, 0xf0, 0x6e }, { 0xffff, 0x0000, 0x0000, 0xffff }},
- },
+ { "white", { 0xeb00, 0x8000, 0x8000 }, { 0xffff, 0xffff, 0xffff, 0xffff }},
+ { "gray", { 0x7dee, 0x8000, 0x8000 }, { 0xffff, 0x8080, 0x8080, 0x8080 }},
+ { "black", { 0x1000, 0x8000, 0x8000 }, { 0xffff, 0x0000, 0x0000, 0x0000 }},
+ { "red", { 0x517b, 0x5a34, 0xf000 }, { 0xffff, 0xffff, 0x0000, 0x0000 }},
+ { "green", { 0x908e, 0x35cc, 0x2237 }, { 0xffff, 0x0000, 0xffff, 0x0000 }},
+ { "blue", { 0x28f7, 0xf000, 0x6dc9 }, { 0xffff, 0x0000, 0x0000, 0xffff }},
+ }
},
/*
* colour.RGB_to_YCbCr(<rgb color in 16 bit form>,
@@ -115,7 +115,7 @@ static struct yuv_u8_to_argb_u16_case yuv_u8_to_argb_u16_cases[] = {
* in_bits = 16,
* in_legal = False,
* in_int = True,
- * out_bits = 8,
+ * out_bits = 16,
* out_legal = False,
* out_int = True)
* Tests cases for color conversion generated by converting RGB
@@ -126,21 +126,21 @@ static struct yuv_u8_to_argb_u16_case yuv_u8_to_argb_u16_cases[] = {
.range = DRM_COLOR_YCBCR_FULL_RANGE,
.n_colors = 6,
.colors = {
- { "white", { 0xff, 0x80, 0x80 }, { 0xffff, 0xffff, 0xffff, 0xffff }},
- { "gray", { 0x80, 0x80, 0x80 }, { 0xffff, 0x8080, 0x8080, 0x8080 }},
- { "black", { 0x00, 0x80, 0x80 }, { 0xffff, 0x0000, 0x0000, 0x0000 }},
- { "red", { 0x36, 0x63, 0xff }, { 0xffff, 0xffff, 0x0000, 0x0000 }},
- { "green", { 0xb6, 0x1e, 0x0c }, { 0xffff, 0x0000, 0xffff, 0x0000 }},
- { "blue", { 0x12, 0xff, 0x74 }, { 0xffff, 0x0000, 0x0000, 0xffff }},
- },
+ { "white", { 0xffff, 0x8000, 0x8000 }, { 0xffff, 0xffff, 0xffff, 0xffff }},
+ { "gray", { 0x8080, 0x8000, 0x8000 }, { 0xffff, 0x8080, 0x8080, 0x8080 }},
+ { "black", { 0x0000, 0x8000, 0x8000 }, { 0xffff, 0x0000, 0x0000, 0x0000 }},
+ { "red", { 0x366d, 0x62ac, 0xffff }, { 0xffff, 0xffff, 0x0000, 0x0000 }},
+ { "green", { 0xb717, 0x1d55, 0x0bbd }, { 0xffff, 0x0000, 0xffff, 0x0000 }},
+ { "blue", { 0x127c, 0xffff, 0x7443 }, { 0xffff, 0x0000, 0x0000, 0xffff }},
+ }
},
/*
* colour.RGB_to_YCbCr(<rgb color in 16 bit form>,
* K=colour.WEIGHTS_YCBCR["ITU-R BT.709"],
* in_bits = 16,
- * int_legal = False,
+ * in_legal = False,
* in_int = True,
- * out_bits = 8,
+ * out_bits = 16,
* out_legal = True,
* out_int = True)
* Tests cases for color conversion generated by converting RGB
@@ -151,13 +151,13 @@ static struct yuv_u8_to_argb_u16_case yuv_u8_to_argb_u16_cases[] = {
.range = DRM_COLOR_YCBCR_LIMITED_RANGE,
.n_colors = 6,
.colors = {
- { "white", { 0xeb, 0x80, 0x80 }, { 0xffff, 0xffff, 0xffff, 0xffff }},
- { "gray", { 0x7e, 0x80, 0x80 }, { 0xffff, 0x8080, 0x8080, 0x8080 }},
- { "black", { 0x10, 0x80, 0x80 }, { 0xffff, 0x0000, 0x0000, 0x0000 }},
- { "red", { 0x3f, 0x66, 0xf0 }, { 0xffff, 0xffff, 0x0000, 0x0000 }},
- { "green", { 0xad, 0x2a, 0x1a }, { 0xffff, 0x0000, 0xffff, 0x0000 }},
- { "blue", { 0x20, 0xf0, 0x76 }, { 0xffff, 0x0000, 0x0000, 0xffff }},
- },
+ { "white", { 0xeb00, 0x8000, 0x8000 }, { 0xffff, 0xffff, 0xffff, 0xffff }},
+ { "gray", { 0x7dee, 0x8000, 0x8000 }, { 0xffff, 0x8080, 0x8080, 0x8080 }},
+ { "black", { 0x1000, 0x8000, 0x8000 }, { 0xffff, 0x0000, 0x0000, 0x0000 }},
+ { "red", { 0x3e8f, 0x6656, 0xf000 }, { 0xffff, 0xffff, 0x0000, 0x0000 }},
+ { "green", { 0xaca1, 0x29aa, 0x1a45 }, { 0xffff, 0x0000, 0xffff, 0x0000 }},
+ { "blue", { 0x1fd0, 0xf000, 0x75bb }, { 0xffff, 0x0000, 0x0000, 0xffff }},
+ }
},
/*
* colour.RGB_to_YCbCr(<rgb color in 16 bit form>,
@@ -165,7 +165,7 @@ static struct yuv_u8_to_argb_u16_case yuv_u8_to_argb_u16_cases[] = {
* in_bits = 16,
* in_legal = False,
* in_int = True,
- * out_bits = 8,
+ * out_bits = 16,
* out_legal = False,
* out_int = True)
* Tests cases for color conversion generated by converting RGB
@@ -176,13 +176,13 @@ static struct yuv_u8_to_argb_u16_case yuv_u8_to_argb_u16_cases[] = {
.range = DRM_COLOR_YCBCR_FULL_RANGE,
.n_colors = 6,
.colors = {
- { "white", { 0xff, 0x80, 0x80 }, { 0xffff, 0xffff, 0xffff, 0xffff }},
- { "gray", { 0x80, 0x80, 0x80 }, { 0xffff, 0x8080, 0x8080, 0x8080 }},
- { "black", { 0x00, 0x80, 0x80 }, { 0xffff, 0x0000, 0x0000, 0x0000 }},
- { "red", { 0x43, 0x5c, 0xff }, { 0xffff, 0xffff, 0x0000, 0x0000 }},
- { "green", { 0xad, 0x24, 0x0b }, { 0xffff, 0x0000, 0xffff, 0x0000 }},
- { "blue", { 0x0f, 0xff, 0x76 }, { 0xffff, 0x0000, 0x0000, 0xffff }},
- },
+ { "white", { 0xffff, 0x8000, 0x8000 }, { 0xffff, 0xffff, 0xffff, 0xffff }},
+ { "gray", { 0x8080, 0x8000, 0x8000 }, { 0xffff, 0x8080, 0x8080, 0x8080 }},
+ { "black", { 0x0000, 0x8000, 0x8000 }, { 0xffff, 0x0000, 0x0000, 0x0000 }},
+ { "red", { 0x4340, 0x5c41, 0xffff }, { 0xffff, 0xffff, 0x0000, 0x0000 }},
+ { "green", { 0xad91, 0x23bf, 0x0a4c }, { 0xffff, 0x0000, 0xffff, 0x0000 }},
+ { "blue", { 0x0f2e, 0xffff, 0x75b5 }, { 0xffff, 0x0000, 0x0000, 0xffff }},
+ }
},
/*
* colour.RGB_to_YCbCr(<rgb color in 16 bit form>,
@@ -190,7 +190,7 @@ static struct yuv_u8_to_argb_u16_case yuv_u8_to_argb_u16_cases[] = {
* in_bits = 16,
* in_legal = False,
* in_int = True,
- * out_bits = 8,
+ * out_bits = 16,
* out_legal = True,
* out_int = True)
* Tests cases for color conversion generated by converting RGB
@@ -201,32 +201,30 @@ static struct yuv_u8_to_argb_u16_case yuv_u8_to_argb_u16_cases[] = {
.range = DRM_COLOR_YCBCR_LIMITED_RANGE,
.n_colors = 6,
.colors = {
- { "white", { 0xeb, 0x80, 0x80 }, { 0xffff, 0xffff, 0xffff, 0xffff }},
- { "gray", { 0x7e, 0x80, 0x80 }, { 0xffff, 0x8080, 0x8080, 0x8080 }},
- { "black", { 0x10, 0x80, 0x80 }, { 0xffff, 0x0000, 0x0000, 0x0000 }},
- { "red", { 0x4a, 0x61, 0xf0 }, { 0xffff, 0xffff, 0x0000, 0x0000 }},
- { "green", { 0xa4, 0x2f, 0x19 }, { 0xffff, 0x0000, 0xffff, 0x0000 }},
- { "blue", { 0x1d, 0xf0, 0x77 }, { 0xffff, 0x0000, 0x0000, 0xffff }},
- },
+ { "white", { 0xeb00, 0x8000, 0x8000 }, { 0xffff, 0xffff, 0xffff, 0xffff }},
+ { "gray", { 0x7dee, 0x8000, 0x8000 }, { 0xffff, 0x8080, 0x8080, 0x8080 }},
+ { "black", { 0x1000, 0x8000, 0x8000 }, { 0xffff, 0x0000, 0x0000, 0x0000 }},
+ { "red", { 0x4988, 0x60b9, 0xf000 }, { 0xffff, 0xffff, 0x0000, 0x0000 }},
+ { "green", { 0xa47b, 0x2f47, 0x1902 }, { 0xffff, 0x0000, 0xffff, 0x0000 }},
+ { "blue", { 0x1cfd, 0xf000, 0x76fe }, { 0xffff, 0x0000, 0x0000, 0xffff }},
+ }
},
};
/*
- * vkms_format_test_yuv_u8_to_argb_u16 - Testing the conversion between YUV
+ * vkms_format_test_yuv_u16_to_argb_u16 - Testing the conversion between YUV
* colors to ARGB colors in VKMS
*
* This test will use the functions get_conversion_matrix_to_argb_u16 and
- * argb_u16_from_yuv888 to convert YUV colors (stored in
- * yuv_u8_to_argb_u16_cases) into ARGB colors.
+ * argb_u16_from_yuv161616 to convert YUV colors (stored in
+ * yuv_u16_to_argb_u16_cases) into ARGB colors.
*
* The conversion between YUV and RGB is not totally reversible, so there may be
* some difference between the expected value and the result.
- * In addition, there may be some rounding error as the input color is 8 bits
- * and output color is 16 bits.
*/
-static void vkms_format_test_yuv_u8_to_argb_u16(struct kunit *test)
+static void vkms_format_test_yuv_u16_to_argb_u16(struct kunit *test)
{
- const struct yuv_u8_to_argb_u16_case *param = test->param_value;
+ const struct yuv_u16_to_argb_u16_case *param = test->param_value;
struct pixel_argb_u16 argb;
for (size_t i = 0; i < param->n_colors; i++) {
@@ -236,7 +234,8 @@ static void vkms_format_test_yuv_u8_to_argb_u16(struct kunit *test)
get_conversion_matrix_to_argb_u16
(DRM_FORMAT_NV12, param->encoding, param->range, &matrix);
- argb = argb_u16_from_yuv888(color->yuv.y, color->yuv.u, color->yuv.v, &matrix);
+ argb = argb_u16_from_yuv161616(&matrix, color->yuv.y, color->yuv.u,
+ color->yuv.v);
KUNIT_EXPECT_LE_MSG(test, abs_diff(argb.a, color->argb.a), 0x1ff,
"On the A channel of the color %s expected 0x%04x, got 0x%04x",
@@ -253,19 +252,19 @@ static void vkms_format_test_yuv_u8_to_argb_u16(struct kunit *test)
}
}
-static void vkms_format_test_yuv_u8_to_argb_u16_case_desc(struct yuv_u8_to_argb_u16_case *t,
- char *desc)
+static void vkms_format_test_yuv_u16_to_argb_u16_case_desc(struct yuv_u16_to_argb_u16_case *t,
+ char *desc)
{
snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s - %s",
drm_get_color_encoding_name(t->encoding), drm_get_color_range_name(t->range));
}
-KUNIT_ARRAY_PARAM(yuv_u8_to_argb_u16, yuv_u8_to_argb_u16_cases,
- vkms_format_test_yuv_u8_to_argb_u16_case_desc
+KUNIT_ARRAY_PARAM(yuv_u16_to_argb_u16, yuv_u16_to_argb_u16_cases,
+ vkms_format_test_yuv_u16_to_argb_u16_case_desc
);
static struct kunit_case vkms_format_test_cases[] = {
- KUNIT_CASE_PARAM(vkms_format_test_yuv_u8_to_argb_u16, yuv_u8_to_argb_u16_gen_params),
+ KUNIT_CASE_PARAM(vkms_format_test_yuv_u16_to_argb_u16, yuv_u16_to_argb_u16_gen_params),
{}
};
diff --git a/drivers/gpu/drm/vkms/vkms_formats.c b/drivers/gpu/drm/vkms/vkms_formats.c
index 6d0227c6635a..dfb8e13cba87 100644
--- a/drivers/gpu/drm/vkms/vkms_formats.c
+++ b/drivers/gpu/drm/vkms/vkms_formats.c
@@ -259,16 +259,27 @@ static struct pixel_argb_u16 argb_u16_from_grayu16(u16 gray)
return argb_u16_from_u16161616(0xFFFF, gray, gray, gray);
}
-VISIBLE_IF_KUNIT struct pixel_argb_u16 argb_u16_from_yuv888(u8 y, u8 channel_1, u8 channel_2,
- const struct conversion_matrix *matrix)
+static struct pixel_argb_u16 argb_u16_from_BGR565(const __le16 *pixel)
+{
+ struct pixel_argb_u16 out_pixel;
+
+ out_pixel = argb_u16_from_RGB565(pixel);
+ swap(out_pixel.r, out_pixel.b);
+
+ return out_pixel;
+}
+
+VISIBLE_IF_KUNIT
+struct pixel_argb_u16 argb_u16_from_yuv161616(const struct conversion_matrix *matrix,
+ u16 y, u16 channel_1, u16 channel_2)
{
u16 r, g, b;
s64 fp_y, fp_channel_1, fp_channel_2;
s64 fp_r, fp_g, fp_b;
- fp_y = drm_int2fixp(((int)y - matrix->y_offset) * 257);
- fp_channel_1 = drm_int2fixp(((int)channel_1 - 128) * 257);
- fp_channel_2 = drm_int2fixp(((int)channel_2 - 128) * 257);
+ fp_y = drm_int2fixp((int)y - matrix->y_offset * 257);
+ fp_channel_1 = drm_int2fixp((int)channel_1 - 128 * 257);
+ fp_channel_2 = drm_int2fixp((int)channel_2 - 128 * 257);
fp_r = drm_fixp_mul(matrix->matrix[0][0], fp_y) +
drm_fixp_mul(matrix->matrix[0][1], fp_channel_1) +
@@ -290,7 +301,65 @@ VISIBLE_IF_KUNIT struct pixel_argb_u16 argb_u16_from_yuv888(u8 y, u8 channel_1,
return argb_u16_from_u16161616(0xffff, r, g, b);
}
-EXPORT_SYMBOL_IF_KUNIT(argb_u16_from_yuv888);
+EXPORT_SYMBOL_IF_KUNIT(argb_u16_from_yuv161616);
+
+/**
+ * READ_LINE() - Generic generator for a read_line function which can be used for format with one
+ * plane and a block_h == block_w == 1.
+ *
+ * @function_name: Function name to generate
+ * @pixel_name: Temporary pixel name used in the @__VA_ARGS__ parameters
+ * @pixel_type: Used to specify the type you want to cast the pixel pointer
+ * @callback: Callback to call for each pixels. This fonction should take @__VA_ARGS__ as parameter
+ * and return a pixel_argb_u16
+ * __VA_ARGS__: Argument to pass inside the callback. You can use @pixel_name to access current
+ * pixel.
+ */
+#define READ_LINE(function_name, pixel_name, pixel_type, callback, ...) \
+static void function_name(const struct vkms_plane_state *plane, int x_start, \
+ int y_start, enum pixel_read_direction direction, int count, \
+ struct pixel_argb_u16 out_pixel[]) \
+{ \
+ struct pixel_argb_u16 *end = out_pixel + count; \
+ int step = get_block_step_bytes(plane->frame_info->fb, direction, 0); \
+ u8 *src_pixels; \
+ \
+ packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, &src_pixels); \
+ \
+ while (out_pixel < end) { \
+ pixel_type *(pixel_name) = (pixel_type *)src_pixels; \
+ *out_pixel = (callback)(__VA_ARGS__); \
+ out_pixel += 1; \
+ src_pixels += step; \
+ } \
+}
+
+/**
+ * READ_LINE_ARGB8888() - Generic generator for ARGB8888 formats.
+ * The pixel type used is u8, so pixel_name[0]..pixel_name[n] are the n components of the pixel.
+ *
+ * @function_name: Function name to generate
+ * @pixel_name: temporary pixel to use in @a, @r, @g and @b parameters
+ * @a: alpha value
+ * @r: red value
+ * @g: green value
+ * @b: blue value
+ */
+#define READ_LINE_ARGB8888(function_name, pixel_name, a, r, g, b) \
+ READ_LINE(function_name, pixel_name, u8, argb_u16_from_u8888, a, r, g, b)
+/**
+ * READ_LINE_le16161616() - Generic generator for ARGB16161616 formats.
+ * The pixel type used is u16, so pixel_name[0]..pixel_name[n] are the n components of the pixel.
+ *
+ * @function_name: Function name to generate
+ * @pixel_name: temporary pixel to use in @a, @r, @g and @b parameters
+ * @a: alpha value
+ * @r: red value
+ * @g: green value
+ * @b: blue value
+ */
+#define READ_LINE_le16161616(function_name, pixel_name, a, r, g, b) \
+ READ_LINE(function_name, pixel_name, __le16, argb_u16_from_le16161616, a, r, g, b)
/*
* The following functions are read_line function for each pixel format supported by VKMS.
@@ -378,138 +447,27 @@ static void R4_read_line(const struct vkms_plane_state *plane, int x_start,
Rx_read_line(plane, x_start, y_start, direction, count, out_pixel);
}
-static void R8_read_line(const struct vkms_plane_state *plane, int x_start,
- int y_start, enum pixel_read_direction direction, int count,
- struct pixel_argb_u16 out_pixel[])
-{
- struct pixel_argb_u16 *end = out_pixel + count;
- u8 *src_pixels;
- int step = get_block_step_bytes(plane->frame_info->fb, direction, 0);
-
- packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, &src_pixels);
-
- while (out_pixel < end) {
- *out_pixel = argb_u16_from_gray8(*src_pixels);
- src_pixels += step;
- out_pixel += 1;
- }
-}
-
-static void ARGB8888_read_line(const struct vkms_plane_state *plane, int x_start, int y_start,
- enum pixel_read_direction direction, int count,
- struct pixel_argb_u16 out_pixel[])
-{
- struct pixel_argb_u16 *end = out_pixel + count;
- u8 *src_pixels;
-
- packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, &src_pixels);
-
- int step = get_block_step_bytes(plane->frame_info->fb, direction, 0);
-
- while (out_pixel < end) {
- u8 *px = (u8 *)src_pixels;
- *out_pixel = argb_u16_from_u8888(px[3], px[2], px[1], px[0]);
- out_pixel += 1;
- src_pixels += step;
- }
-}
-
-static void XRGB8888_read_line(const struct vkms_plane_state *plane, int x_start, int y_start,
- enum pixel_read_direction direction, int count,
- struct pixel_argb_u16 out_pixel[])
-{
- struct pixel_argb_u16 *end = out_pixel + count;
- u8 *src_pixels;
-
- packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, &src_pixels);
-
- int step = get_block_step_bytes(plane->frame_info->fb, direction, 0);
-
- while (out_pixel < end) {
- u8 *px = (u8 *)src_pixels;
- *out_pixel = argb_u16_from_u8888(255, px[2], px[1], px[0]);
- out_pixel += 1;
- src_pixels += step;
- }
-}
-
-static void ABGR8888_read_line(const struct vkms_plane_state *plane, int x_start, int y_start,
- enum pixel_read_direction direction, int count,
- struct pixel_argb_u16 out_pixel[])
-{
- struct pixel_argb_u16 *end = out_pixel + count;
- u8 *src_pixels;
-
- packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, &src_pixels);
-
- int step = get_block_step_bytes(plane->frame_info->fb, direction, 0);
-
- while (out_pixel < end) {
- u8 *px = (u8 *)src_pixels;
- /* Switch blue and red pixels. */
- *out_pixel = argb_u16_from_u8888(px[3], px[0], px[1], px[2]);
- out_pixel += 1;
- src_pixels += step;
- }
-}
-
-static void ARGB16161616_read_line(const struct vkms_plane_state *plane, int x_start,
- int y_start, enum pixel_read_direction direction, int count,
- struct pixel_argb_u16 out_pixel[])
-{
- struct pixel_argb_u16 *end = out_pixel + count;
- u8 *src_pixels;
-
- packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, &src_pixels);
-
- int step = get_block_step_bytes(plane->frame_info->fb, direction, 0);
- while (out_pixel < end) {
- u16 *px = (u16 *)src_pixels;
- *out_pixel = argb_u16_from_u16161616(px[3], px[2], px[1], px[0]);
- out_pixel += 1;
- src_pixels += step;
- }
-}
+READ_LINE_ARGB8888(XRGB8888_read_line, px, 0xFF, px[2], px[1], px[0])
+READ_LINE_ARGB8888(XBGR8888_read_line, px, 0xFF, px[0], px[1], px[2])
-static void XRGB16161616_read_line(const struct vkms_plane_state *plane, int x_start,
- int y_start, enum pixel_read_direction direction, int count,
- struct pixel_argb_u16 out_pixel[])
-{
- struct pixel_argb_u16 *end = out_pixel + count;
- u8 *src_pixels;
-
- packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, &src_pixels);
-
- int step = get_block_step_bytes(plane->frame_info->fb, direction, 0);
-
- while (out_pixel < end) {
- __le16 *px = (__le16 *)src_pixels;
- *out_pixel = argb_u16_from_le16161616(cpu_to_le16(0xFFFF), px[2], px[1], px[0]);
- out_pixel += 1;
- src_pixels += step;
- }
-}
-
-static void RGB565_read_line(const struct vkms_plane_state *plane, int x_start,
- int y_start, enum pixel_read_direction direction, int count,
- struct pixel_argb_u16 out_pixel[])
-{
- struct pixel_argb_u16 *end = out_pixel + count;
- u8 *src_pixels;
+READ_LINE_ARGB8888(ARGB8888_read_line, px, px[3], px[2], px[1], px[0])
+READ_LINE_ARGB8888(ABGR8888_read_line, px, px[3], px[0], px[1], px[2])
+READ_LINE_ARGB8888(RGBA8888_read_line, px, px[0], px[3], px[2], px[1])
+READ_LINE_ARGB8888(BGRA8888_read_line, px, px[0], px[1], px[2], px[3])
- packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, &src_pixels);
+READ_LINE_ARGB8888(RGB888_read_line, px, 0xFF, px[2], px[1], px[0])
+READ_LINE_ARGB8888(BGR888_read_line, px, 0xFF, px[0], px[1], px[2])
- int step = get_block_step_bytes(plane->frame_info->fb, direction, 0);
+READ_LINE_le16161616(ARGB16161616_read_line, px, px[3], px[2], px[1], px[0])
+READ_LINE_le16161616(ABGR16161616_read_line, px, px[3], px[0], px[1], px[2])
+READ_LINE_le16161616(XRGB16161616_read_line, px, cpu_to_le16(0xFFFF), px[2], px[1], px[0])
+READ_LINE_le16161616(XBGR16161616_read_line, px, cpu_to_le16(0xFFFF), px[0], px[1], px[2])
- while (out_pixel < end) {
- __le16 *px = (__le16 *)src_pixels;
+READ_LINE(RGB565_read_line, px, __le16, argb_u16_from_RGB565, px)
+READ_LINE(BGR565_read_line, px, __le16, argb_u16_from_BGR565, px)
- *out_pixel = argb_u16_from_RGB565(px);
- out_pixel += 1;
- src_pixels += step;
- }
-}
+READ_LINE(R8_read_line, px, u8, argb_u16_from_gray8, *px)
/*
* This callback can be used for YUV formats where U and V values are
@@ -521,35 +479,57 @@ static void RGB565_read_line(const struct vkms_plane_state *plane, int x_start,
* - Convert YUV and YVU with the same function (a column swap is needed when setting up
* plane->conversion_matrix)
*/
-static void semi_planar_yuv_read_line(const struct vkms_plane_state *plane, int x_start,
- int y_start, enum pixel_read_direction direction, int count,
- struct pixel_argb_u16 out_pixel[])
-{
- u8 *y_plane;
- u8 *uv_plane;
-
- packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0,
- &y_plane);
- packed_pixels_addr_1x1(plane->frame_info,
- x_start / plane->frame_info->fb->format->hsub,
- y_start / plane->frame_info->fb->format->vsub, 1,
- &uv_plane);
- int step_y = get_block_step_bytes(plane->frame_info->fb, direction, 0);
- int step_uv = get_block_step_bytes(plane->frame_info->fb, direction, 1);
- int subsampling = get_subsampling(plane->frame_info->fb->format, direction);
- int subsampling_offset = get_subsampling_offset(direction, x_start, y_start);
- const struct conversion_matrix *conversion_matrix = &plane->conversion_matrix;
-
- for (int i = 0; i < count; i++) {
- *out_pixel = argb_u16_from_yuv888(y_plane[0], uv_plane[0], uv_plane[1],
- conversion_matrix);
- out_pixel += 1;
- y_plane += step_y;
- if ((i + subsampling_offset + 1) % subsampling == 0)
- uv_plane += step_uv;
- }
-}
+/**
+ * READ_LINE_YUV_SEMIPLANAR() - Generic generator for a read_line function which can be used for yuv
+ * formats with two planes and block_w == block_h == 1.
+ *
+ * @function_name: Function name to generate
+ * @pixel_1_name: temporary pixel name for the first plane used in the @__VA_ARGS__ parameters
+ * @pixel_2_name: temporary pixel name for the second plane used in the @__VA_ARGS__ parameters
+ * @pixel_1_type: Used to specify the type you want to cast the pixel pointer on the plane 1
+ * @pixel_2_type: Used to specify the type you want to cast the pixel pointer on the plane 2
+ * @callback: Callback to call for each pixels. This function should take
+ * (struct conversion_matrix*, @__VA_ARGS__) as parameter and return a pixel_argb_u16
+ * __VA_ARGS__: Argument to pass inside the callback. You can use @pixel_1_name and @pixel_2_name
+ * to access current pixel values
+ */
+#define READ_LINE_YUV_SEMIPLANAR(function_name, pixel_1_name, pixel_2_name, pixel_1_type, \
+ pixel_2_type, callback, ...) \
+static void function_name(const struct vkms_plane_state *plane, int x_start, \
+ int y_start, enum pixel_read_direction direction, int count, \
+ struct pixel_argb_u16 out_pixel[]) \
+{ \
+ u8 *plane_1; \
+ u8 *plane_2; \
+ \
+ packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, \
+ &plane_1); \
+ packed_pixels_addr_1x1(plane->frame_info, \
+ x_start / plane->frame_info->fb->format->hsub, \
+ y_start / plane->frame_info->fb->format->vsub, 1, \
+ &plane_2); \
+ int step_1 = get_block_step_bytes(plane->frame_info->fb, direction, 0); \
+ int step_2 = get_block_step_bytes(plane->frame_info->fb, direction, 1); \
+ int subsampling = get_subsampling(plane->frame_info->fb->format, direction); \
+ int subsampling_offset = get_subsampling_offset(direction, x_start, y_start); \
+ const struct conversion_matrix *conversion_matrix = &plane->conversion_matrix; \
+ \
+ for (int i = 0; i < count; i++) { \
+ pixel_1_type *(pixel_1_name) = (pixel_1_type *)plane_1; \
+ pixel_2_type *(pixel_2_name) = (pixel_2_type *)plane_2; \
+ *out_pixel = (callback)(conversion_matrix, __VA_ARGS__); \
+ out_pixel += 1; \
+ plane_1 += step_1; \
+ if ((i + subsampling_offset + 1) % subsampling == 0) \
+ plane_2 += step_2; \
+ } \
+}
+
+READ_LINE_YUV_SEMIPLANAR(YUV888_semiplanar_read_line, y, uv, u8, u8, argb_u16_from_yuv161616,
+ y[0] * 257, uv[0] * 257, uv[1] * 257)
+READ_LINE_YUV_SEMIPLANAR(YUV161616_semiplanar_read_line, y, uv, u16, u16, argb_u16_from_yuv161616,
+ y[0], uv[0], uv[1])
/*
* This callback can be used for YUV format where each color component is
* stored in a different plane (often called planar formats). It will
@@ -586,8 +566,9 @@ static void planar_yuv_read_line(const struct vkms_plane_state *plane, int x_sta
const struct conversion_matrix *conversion_matrix = &plane->conversion_matrix;
for (int i = 0; i < count; i++) {
- *out_pixel = argb_u16_from_yuv888(*y_plane, *channel_1_plane, *channel_2_plane,
- conversion_matrix);
+ *out_pixel = argb_u16_from_yuv161616(conversion_matrix,
+ *y_plane * 257, *channel_1_plane * 257,
+ *channel_2_plane * 257);
out_pixel += 1;
y_plane += step_y;
if ((i + subsampling_offset + 1) % subsampling == 0) {
@@ -712,23 +693,43 @@ pixel_read_line_t get_pixel_read_line_function(u32 format)
switch (format) {
case DRM_FORMAT_ARGB8888:
return &ARGB8888_read_line;
- case DRM_FORMAT_XRGB8888:
- return &XRGB8888_read_line;
case DRM_FORMAT_ABGR8888:
return &ABGR8888_read_line;
+ case DRM_FORMAT_BGRA8888:
+ return &BGRA8888_read_line;
+ case DRM_FORMAT_RGBA8888:
+ return &RGBA8888_read_line;
+ case DRM_FORMAT_XRGB8888:
+ return &XRGB8888_read_line;
+ case DRM_FORMAT_XBGR8888:
+ return &XBGR8888_read_line;
+ case DRM_FORMAT_RGB888:
+ return &RGB888_read_line;
+ case DRM_FORMAT_BGR888:
+ return &BGR888_read_line;
case DRM_FORMAT_ARGB16161616:
return &ARGB16161616_read_line;
+ case DRM_FORMAT_ABGR16161616:
+ return &ABGR16161616_read_line;
case DRM_FORMAT_XRGB16161616:
return &XRGB16161616_read_line;
+ case DRM_FORMAT_XBGR16161616:
+ return &XBGR16161616_read_line;
case DRM_FORMAT_RGB565:
return &RGB565_read_line;
+ case DRM_FORMAT_BGR565:
+ return &BGR565_read_line;
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV16:
case DRM_FORMAT_NV24:
case DRM_FORMAT_NV21:
case DRM_FORMAT_NV61:
case DRM_FORMAT_NV42:
- return &semi_planar_yuv_read_line;
+ return &YUV888_semiplanar_read_line;
+ case DRM_FORMAT_P010:
+ case DRM_FORMAT_P012:
+ case DRM_FORMAT_P016:
+ return &YUV161616_semiplanar_read_line;
case DRM_FORMAT_YUV420:
case DRM_FORMAT_YUV422:
case DRM_FORMAT_YUV444:
diff --git a/drivers/gpu/drm/vkms/vkms_formats.h b/drivers/gpu/drm/vkms/vkms_formats.h
index b4fe62ab9c65..eeb208cdd6b1 100644
--- a/drivers/gpu/drm/vkms/vkms_formats.h
+++ b/drivers/gpu/drm/vkms/vkms_formats.h
@@ -14,8 +14,8 @@ void get_conversion_matrix_to_argb_u16(u32 format, enum drm_color_encoding encod
struct conversion_matrix *matrix);
#if IS_ENABLED(CONFIG_KUNIT)
-struct pixel_argb_u16 argb_u16_from_yuv888(u8 y, u8 channel_1, u8 channel_2,
- const struct conversion_matrix *matrix);
+struct pixel_argb_u16 argb_u16_from_yuv161616(const struct conversion_matrix *matrix,
+ u16 y, u16 channel_1, u16 channel_2);
#endif
#endif /* _VKMS_FORMATS_H_ */
diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c
index 8d7ca0cdd79f..2ee3749e2b28 100644
--- a/drivers/gpu/drm/vkms/vkms_output.c
+++ b/drivers/gpu/drm/vkms/vkms_output.c
@@ -77,9 +77,22 @@ int vkms_output_init(struct vkms_device *vkmsdev)
return ret;
}
+ encoder_cfg->encoder->possible_clones |=
+ drm_encoder_mask(encoder_cfg->encoder);
+
vkms_config_encoder_for_each_possible_crtc(encoder_cfg, idx, possible_crtc) {
encoder_cfg->encoder->possible_crtcs |=
drm_crtc_mask(&possible_crtc->crtc->crtc);
+
+ if (vkms_config_crtc_get_writeback(possible_crtc)) {
+ struct drm_encoder *wb_encoder =
+ &possible_crtc->crtc->wb_encoder;
+
+ encoder_cfg->encoder->possible_clones |=
+ drm_encoder_mask(wb_encoder);
+ wb_encoder->possible_clones |=
+ drm_encoder_mask(encoder_cfg->encoder);
+ }
}
}
diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c
index e3fdd161d0f0..e592e47a5736 100644
--- a/drivers/gpu/drm/vkms/vkms_plane.c
+++ b/drivers/gpu/drm/vkms/vkms_plane.c
@@ -14,11 +14,19 @@
static const u32 vkms_formats[] = {
DRM_FORMAT_ARGB8888,
- DRM_FORMAT_XRGB8888,
DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_BGR888,
DRM_FORMAT_XRGB16161616,
+ DRM_FORMAT_XBGR16161616,
DRM_FORMAT_ARGB16161616,
+ DRM_FORMAT_ABGR16161616,
DRM_FORMAT_RGB565,
+ DRM_FORMAT_BGR565,
DRM_FORMAT_NV12,
DRM_FORMAT_NV16,
DRM_FORMAT_NV24,
@@ -31,6 +39,9 @@ static const u32 vkms_formats[] = {
DRM_FORMAT_YVU420,
DRM_FORMAT_YVU422,
DRM_FORMAT_YVU444,
+ DRM_FORMAT_P010,
+ DRM_FORMAT_P012,
+ DRM_FORMAT_P016,
DRM_FORMAT_R1,
DRM_FORMAT_R2,
DRM_FORMAT_R4,
diff --git a/drivers/gpu/drm/vkms/vkms_writeback.c b/drivers/gpu/drm/vkms/vkms_writeback.c
index fe163271d5b5..45d69a3b85f6 100644
--- a/drivers/gpu/drm/vkms/vkms_writeback.c
+++ b/drivers/gpu/drm/vkms/vkms_writeback.c
@@ -174,6 +174,8 @@ int vkms_enable_writeback_connector(struct vkms_device *vkmsdev,
if (ret)
return ret;
vkms_output->wb_encoder.possible_crtcs |= drm_crtc_mask(&vkms_output->crtc);
+ vkms_output->wb_encoder.possible_clones |=
+ drm_encoder_mask(&vkms_output->wb_encoder);
drm_connector_helper_add(&wb->base, &vkms_wb_conn_helper_funcs);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 819704ac675d..d539f25b5fbe 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -1497,6 +1497,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
SVGA3dCmdHeader *header)
{
struct vmw_bo *vmw_bo = NULL;
+ struct vmw_resource *res;
struct vmw_surface *srf = NULL;
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceDMA);
int ret;
@@ -1532,18 +1533,24 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
dirty = (cmd->body.transfer == SVGA3D_WRITE_HOST_VRAM) ?
VMW_RES_DIRTY_SET : 0;
- ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- dirty, user_surface_converter,
- &cmd->body.host.sid, NULL);
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, dirty,
+ user_surface_converter, &cmd->body.host.sid,
+ NULL);
if (unlikely(ret != 0)) {
if (unlikely(ret != -ERESTARTSYS))
VMW_DEBUG_USER("could not find surface for DMA.\n");
return ret;
}
- srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
+ res = sw_context->res_cache[vmw_res_surface].res;
+ if (!res) {
+ VMW_DEBUG_USER("Invalid DMA surface.\n");
+ return -EINVAL;
+ }
- vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->tbo, header);
+ srf = vmw_res_to_srf(res);
+ vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->tbo,
+ header);
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index c2294abbe753..00be92da5509 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -538,7 +538,7 @@ static void vmw_event_fence_action_seq_passed(struct dma_fence *f,
if (likely(eaction->tv_sec != NULL)) {
struct timespec64 ts;
- ktime_to_timespec64(f->timestamp);
+ ts = ktime_to_timespec64(f->timestamp);
/* monotonic time, so no y2038 overflow */
*eaction->tv_sec = ts.tv_sec;
*eaction->tv_usec = ts.tv_nsec / NSEC_PER_USEC;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
index 7ee93e7191c7..35dc94c3db39 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
@@ -308,8 +308,10 @@ int vmw_validation_add_resource(struct vmw_validation_context *ctx,
hash_add_rcu(ctx->sw_context->res_ht, &node->hash.head, node->hash.key);
}
node->res = vmw_resource_reference_unless_doomed(res);
- if (!node->res)
+ if (!node->res) {
+ hash_del_rcu(&node->hash.head);
return -ESRCH;
+ }
node->first_usage = 1;
if (!res->dev_priv->has_mob) {
@@ -636,7 +638,7 @@ void vmw_validation_drop_ht(struct vmw_validation_context *ctx)
hash_del_rcu(&val->hash.head);
list_for_each_entry(val, &ctx->resource_ctx_list, head)
- hash_del_rcu(&entry->hash.head);
+ hash_del_rcu(&val->hash.head);
ctx->sw_context = NULL;
}
diff --git a/drivers/gpu/drm/xe/Kconfig b/drivers/gpu/drm/xe/Kconfig
index 2bb2bc052120..7219f6b884b6 100644
--- a/drivers/gpu/drm/xe/Kconfig
+++ b/drivers/gpu/drm/xe/Kconfig
@@ -5,6 +5,7 @@ config DRM_XE
depends on KUNIT || !KUNIT
depends on INTEL_VSEC || !INTEL_VSEC
depends on X86_PLATFORM_DEVICES || !(X86 && ACPI)
+ depends on PAGE_SIZE_4KB || COMPILE_TEST || BROKEN
select INTERVAL_TREE
# we need shmfs for the swappable backing store, and in particular
# the shmem_readpage() which depends upon tmpfs
@@ -39,12 +40,12 @@ config DRM_XE
select DRM_TTM
select DRM_TTM_HELPER
select DRM_EXEC
+ select DRM_GPUSVM if !UML && DEVICE_PRIVATE
select DRM_GPUVM
select DRM_SCHED
select MMU_NOTIFIER
select WANT_DEV_COREDUMP
select AUXILIARY_BUS
- select HMM_MIRROR
select REGMAP if I2C
help
Driver for Intel Xe2 series GPUs and later. Experimental support
diff --git a/drivers/gpu/drm/xe/Kconfig.debug b/drivers/gpu/drm/xe/Kconfig.debug
index 01735c6ece8b..87902b4bd6d3 100644
--- a/drivers/gpu/drm/xe/Kconfig.debug
+++ b/drivers/gpu/drm/xe/Kconfig.debug
@@ -104,6 +104,7 @@ config DRM_XE_DEBUG_GUC
config DRM_XE_USERPTR_INVAL_INJECT
bool "Inject userptr invalidation -EINVAL errors"
+ depends on DRM_GPUSVM
default n
help
Choose this option when debugging error paths that
diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
index 07c71a29963d..d9c6cf0f189e 100644
--- a/drivers/gpu/drm/xe/Makefile
+++ b/drivers/gpu/drm/xe/Makefile
@@ -35,6 +35,7 @@ $(obj)/generated/%_device_wa_oob.c $(obj)/generated/%_device_wa_oob.h: $(obj)/xe
xe-y += xe_bb.o \
xe_bo.o \
xe_bo_evict.o \
+ xe_dep_scheduler.o \
xe_devcoredump.o \
xe_device.o \
xe_device_sysfs.o \
@@ -60,7 +61,6 @@ xe-y += xe_bb.o \
xe_gt_pagefault.o \
xe_gt_sysfs.o \
xe_gt_throttle.o \
- xe_gt_tlb_invalidation.o \
xe_gt_topology.o \
xe_guc.o \
xe_guc_ads.o \
@@ -75,16 +75,20 @@ xe-y += xe_bb.o \
xe_guc_log.o \
xe_guc_pc.o \
xe_guc_submit.o \
+ xe_guc_tlb_inval.o \
xe_heci_gsc.o \
xe_huc.o \
xe_hw_engine.o \
xe_hw_engine_class_sysfs.o \
xe_hw_engine_group.o \
+ xe_hw_error.o \
xe_hw_fence.o \
xe_irq.o \
+ xe_late_bind_fw.o \
xe_lrc.o \
xe_migrate.o \
xe_mmio.o \
+ xe_mmio_gem.o \
xe_mocs.o \
xe_module.o \
xe_nvm.o \
@@ -95,6 +99,7 @@ xe-y += xe_bb.o \
xe_pcode.o \
xe_pm.o \
xe_preempt_fence.o \
+ xe_psmi.o \
xe_pt.o \
xe_pt_walk.o \
xe_pxp.o \
@@ -114,6 +119,8 @@ xe-y += xe_bb.o \
xe_sync.o \
xe_tile.o \
xe_tile_sysfs.o \
+ xe_tlb_inval.o \
+ xe_tlb_inval_job.o \
xe_trace.o \
xe_trace_bo.o \
xe_trace_guc.o \
@@ -124,7 +131,9 @@ xe-y += xe_bb.o \
xe_tuning.o \
xe_uc.o \
xe_uc_fw.o \
+ xe_validation.o \
xe_vm.o \
+ xe_vm_madvise.o \
xe_vram.o \
xe_vram_freq.o \
xe_vsec.o \
@@ -133,8 +142,8 @@ xe-y += xe_bb.o \
xe_wopcm.o
xe-$(CONFIG_I2C) += xe_i2c.o
-xe-$(CONFIG_HMM_MIRROR) += xe_hmm.o
xe-$(CONFIG_DRM_XE_GPUSVM) += xe_svm.o
+xe-$(CONFIG_DRM_GPUSVM) += xe_userptr.o
# graphics hardware monitoring (HWMON) support
xe-$(CONFIG_HWMON) += xe_hwmon.o
@@ -149,6 +158,7 @@ xe-y += \
xe_memirq.o \
xe_sriov.o \
xe_sriov_vf.o \
+ xe_sriov_vf_ccs.o \
xe_tile_sriov_vf.o
xe-$(CONFIG_PCI_IOV) += \
@@ -202,6 +212,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
display/xe_dsb_buffer.o \
display/xe_fb_pin.o \
display/xe_hdcp_gsc.o \
+ display/xe_panic.o \
display/xe_plane_initial.o \
display/xe_tdf.o
@@ -317,6 +328,7 @@ ifeq ($(CONFIG_DEBUG_FS),y)
xe_gt_stats.o \
xe_guc_debugfs.o \
xe_huc_debugfs.o \
+ xe_tile_debugfs.o \
xe_uc_debugfs.o
xe-$(CONFIG_PCI_IOV) += xe_gt_sriov_pf_debugfs.o
diff --git a/drivers/gpu/drm/xe/abi/guc_actions_abi.h b/drivers/gpu/drm/xe/abi/guc_actions_abi.h
index 81eb046aeebf..31090c69dfbe 100644
--- a/drivers/gpu/drm/xe/abi/guc_actions_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_actions_abi.h
@@ -117,6 +117,7 @@ enum xe_guc_action {
XE_GUC_ACTION_ENTER_S_STATE = 0x501,
XE_GUC_ACTION_EXIT_S_STATE = 0x502,
XE_GUC_ACTION_GLOBAL_SCHED_POLICY_CHANGE = 0x506,
+ XE_GUC_ACTION_UPDATE_SCHEDULING_POLICIES_KLV = 0x509,
XE_GUC_ACTION_SCHED_CONTEXT = 0x1000,
XE_GUC_ACTION_SCHED_CONTEXT_MODE_SET = 0x1001,
XE_GUC_ACTION_SCHED_CONTEXT_MODE_DONE = 0x1002,
@@ -154,6 +155,8 @@ enum xe_guc_action {
XE_GUC_ACTION_NOTIFY_FLUSH_LOG_BUFFER_TO_FILE = 0x8003,
XE_GUC_ACTION_NOTIFY_CRASH_DUMP_POSTED = 0x8004,
XE_GUC_ACTION_NOTIFY_EXCEPTION = 0x8005,
+ XE_GUC_ACTION_TEST_G2G_SEND = 0xF001,
+ XE_GUC_ACTION_TEST_G2G_RECV = 0xF002,
XE_GUC_ACTION_LIMIT
};
@@ -193,6 +196,14 @@ enum xe_guc_register_context_multi_lrc_param_offsets {
XE_GUC_REGISTER_CONTEXT_MULTI_LRC_MSG_MIN_LEN = 11,
};
+enum xe_guc_context_wq_item_offsets {
+ XE_GUC_CONTEXT_WQ_HEADER_DATA_0_TYPE_LEN = 0,
+ XE_GUC_CONTEXT_WQ_EL_INFO_DATA_1_CTX_DESC_LOW,
+ XE_GUC_CONTEXT_WQ_EL_INFO_DATA_2_GUCCTX_RINGTAIL_FREEZEPOCS,
+ XE_GUC_CONTEXT_WQ_EL_INFO_DATA_3_WI_FENCE_ID,
+ XE_GUC_CONTEXT_WQ_EL_CHILD_LIST_DATA_4_RINGTAIL,
+};
+
enum xe_guc_report_status {
XE_GUC_REPORT_STATUS_UNKNOWN = 0x0,
XE_GUC_REPORT_STATUS_ACKED = 0x1,
diff --git a/drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h b/drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h
index b28c8fa061f7..ce5c59517528 100644
--- a/drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h
@@ -210,6 +210,11 @@ struct slpc_shared_data {
u8 reserved_mode_definition[4096];
} __packed;
+enum slpc_power_profile {
+ SLPC_POWER_PROFILE_BASE = 0x0,
+ SLPC_POWER_PROFILE_POWER_SAVING = 0x1
+};
+
/**
* DOC: SLPC H2G MESSAGE FORMAT
*
diff --git a/drivers/gpu/drm/xe/abi/guc_errors_abi.h b/drivers/gpu/drm/xe/abi/guc_errors_abi.h
index ecf748fd87df..ad76b4baf42e 100644
--- a/drivers/gpu/drm/xe/abi/guc_errors_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_errors_abi.h
@@ -63,6 +63,7 @@ enum xe_guc_load_status {
XE_GUC_LOAD_STATUS_HWCONFIG_START = 0x05,
XE_GUC_LOAD_STATUS_HWCONFIG_DONE = 0x06,
XE_GUC_LOAD_STATUS_HWCONFIG_ERROR = 0x07,
+ XE_GUC_LOAD_STATUS_BOOTROM_VERSION_MISMATCH = 0x08,
XE_GUC_LOAD_STATUS_GDT_DONE = 0x10,
XE_GUC_LOAD_STATUS_IDT_DONE = 0x20,
XE_GUC_LOAD_STATUS_LAPIC_DONE = 0x30,
@@ -75,6 +76,8 @@ enum xe_guc_load_status {
XE_GUC_LOAD_STATUS_INVALID_INIT_DATA_RANGE_START,
XE_GUC_LOAD_STATUS_MPU_DATA_INVALID = 0x73,
XE_GUC_LOAD_STATUS_INIT_MMIO_SAVE_RESTORE_INVALID = 0x74,
+ XE_GUC_LOAD_STATUS_KLV_WORKAROUND_INIT_ERROR = 0x75,
+ XE_GUC_LOAD_STATUS_INVALID_FTR_FLAG = 0x76,
XE_GUC_LOAD_STATUS_INVALID_INIT_DATA_RANGE_END,
XE_GUC_LOAD_STATUS_READY = 0xF0,
diff --git a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
index 0366a9da5977..265a135e7061 100644
--- a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
@@ -17,6 +17,7 @@
* | 0 | 31:16 | **KEY** - KLV key identifier |
* | | | - `GuC Self Config KLVs`_ |
* | | | - `GuC Opt In Feature KLVs`_ |
+ * | | | - `GuC Scheduling Policies KLVs`_ |
* | | | - `GuC VGT Policy KLVs`_ |
* | | | - `GuC VF Configuration KLVs`_ |
* | | | |
@@ -153,6 +154,30 @@ enum {
#define GUC_KLV_OPT_IN_FEATURE_DYNAMIC_INHIBIT_CONTEXT_SWITCH_LEN 0u
/**
+ * DOC: GuC Scheduling Policies KLVs
+ *
+ * `GuC KLV`_ keys available for use with UPDATE_SCHEDULING_POLICIES_KLV.
+ *
+ * _`GUC_KLV_SCHEDULING_POLICIES_RENDER_COMPUTE_YIELD` : 0x1001
+ * Some platforms do not allow concurrent execution of RCS and CCS
+ * workloads from different address spaces. By default, the GuC prioritizes
+ * RCS submissions over CCS ones, which can lead to CCS workloads being
+ * significantly (or completely) starved of execution time. This KLV allows
+ * the driver to specify a quantum (in ms) and a ratio (percentage value
+ * between 0 and 100), and the GuC will prioritize the CCS for that
+ * percentage of each quantum. For example, specifying 100ms and 30% will
+ * make the GuC prioritize the CCS for 30ms of every 100ms.
+ * Note that this does not necessarly mean that RCS and CCS engines will
+ * only be active for their percentage of the quantum, as the restriction
+ * only kicks in if both classes are fully busy with non-compatible address
+ * spaces; i.e., if one engine is idle or running the same address space,
+ * a pending job on the other engine will still be submitted to the HW no
+ * matter what the ratio is
+ */
+#define GUC_KLV_SCHEDULING_POLICIES_RENDER_COMPUTE_YIELD_KEY 0x1001
+#define GUC_KLV_SCHEDULING_POLICIES_RENDER_COMPUTE_YIELD_LEN 2u
+
+/**
* DOC: GuC VGT Policy KLVs
*
* `GuC KLV`_ keys available for use with PF2GUC_UPDATE_VGT_POLICY.
@@ -390,12 +415,14 @@ enum {
*/
enum xe_guc_klv_ids {
GUC_WORKAROUND_KLV_BLOCK_INTERRUPTS_WHEN_MGSR_BLOCKED = 0x9002,
+ GUC_WORKAROUND_KLV_DISABLE_PSMI_INTERRUPTS_AT_C6_ENTRY_RESTORE_AT_EXIT = 0x9004,
GUC_WORKAROUND_KLV_ID_GAM_PFQ_SHADOW_TAIL_POLLING = 0x9005,
GUC_WORKAROUND_KLV_ID_DISABLE_MTP_DURING_ASYNC_COMPUTE = 0x9007,
GUC_WA_KLV_NP_RD_WRITE_TO_CLEAR_RCSM_AT_CGP_LATE_RESTORE = 0x9008,
GUC_WORKAROUND_KLV_ID_BACK_TO_BACK_RCS_ENGINE_RESET = 0x9009,
GUC_WA_KLV_WAKE_POWER_DOMAINS_FOR_OUTBOUND_MMIO = 0x900a,
GUC_WA_KLV_RESET_BB_STACK_PTR_ON_VF_SWITCH = 0x900b,
+ GUC_WA_KLV_RESTORE_UNSAVED_MEDIA_CONTROL_REG = 0x900c,
};
#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h
index 41d39d67817a..f097fc6d5127 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h
@@ -8,6 +8,7 @@
#include "xe_ttm_stolen_mgr.h"
#include "xe_res_cursor.h"
+#include "xe_validation.h"
struct xe_bo;
@@ -21,7 +22,7 @@ static inline int i915_gem_stolen_insert_node_in_range(struct xe_device *xe,
u32 start, u32 end)
{
struct xe_bo *bo;
- int err;
+ int err = 0;
u32 flags = XE_BO_FLAG_PINNED | XE_BO_FLAG_STOLEN;
if (start < SZ_4K)
@@ -32,21 +33,13 @@ static inline int i915_gem_stolen_insert_node_in_range(struct xe_device *xe,
start = ALIGN(start, align);
}
- bo = xe_bo_create_locked_range(xe, xe_device_get_root_tile(xe),
- NULL, size, start, end,
- ttm_bo_type_kernel, flags, 0);
+ bo = xe_bo_create_pin_range_novm(xe, xe_device_get_root_tile(xe),
+ size, start, end, ttm_bo_type_kernel, flags);
if (IS_ERR(bo)) {
err = PTR_ERR(bo);
bo = NULL;
return err;
}
- err = xe_bo_pin(bo);
- xe_bo_unlock_vm_held(bo);
-
- if (err) {
- xe_bo_put(fb->bo);
- bo = NULL;
- }
fb->bo = bo;
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
index 9b7572e06f34..b8269391bc69 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
@@ -12,7 +12,6 @@
#include <drm/drm_drv.h>
-#include "i915_utils.h"
#include "xe_device.h" /* for xe_device_has_flat_ccs() */
#include "xe_device_types.h"
@@ -26,34 +25,13 @@ static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
#define IS_I915G(dev_priv) (dev_priv && 0)
#define IS_I915GM(dev_priv) (dev_priv && 0)
#define IS_PINEVIEW(dev_priv) (dev_priv && 0)
-#define IS_IVYBRIDGE(dev_priv) (dev_priv && 0)
#define IS_VALLEYVIEW(dev_priv) (dev_priv && 0)
#define IS_CHERRYVIEW(dev_priv) (dev_priv && 0)
#define IS_HASWELL(dev_priv) (dev_priv && 0)
#define IS_BROADWELL(dev_priv) (dev_priv && 0)
-#define IS_SKYLAKE(dev_priv) (dev_priv && 0)
#define IS_BROXTON(dev_priv) (dev_priv && 0)
-#define IS_KABYLAKE(dev_priv) (dev_priv && 0)
#define IS_GEMINILAKE(dev_priv) (dev_priv && 0)
-#define IS_COFFEELAKE(dev_priv) (dev_priv && 0)
-#define IS_COMETLAKE(dev_priv) (dev_priv && 0)
-#define IS_ICELAKE(dev_priv) (dev_priv && 0)
-#define IS_JASPERLAKE(dev_priv) (dev_priv && 0)
-#define IS_ELKHARTLAKE(dev_priv) (dev_priv && 0)
-#define IS_TIGERLAKE(dev_priv) IS_PLATFORM(dev_priv, XE_TIGERLAKE)
-#define IS_ROCKETLAKE(dev_priv) IS_PLATFORM(dev_priv, XE_ROCKETLAKE)
-#define IS_DG1(dev_priv) IS_PLATFORM(dev_priv, XE_DG1)
-#define IS_ALDERLAKE_S(dev_priv) IS_PLATFORM(dev_priv, XE_ALDERLAKE_S)
-#define IS_ALDERLAKE_P(dev_priv) (IS_PLATFORM(dev_priv, XE_ALDERLAKE_P) || \
- IS_PLATFORM(dev_priv, XE_ALDERLAKE_N))
#define IS_DG2(dev_priv) IS_PLATFORM(dev_priv, XE_DG2)
-#define IS_METEORLAKE(dev_priv) IS_PLATFORM(dev_priv, XE_METEORLAKE)
-#define IS_LUNARLAKE(dev_priv) IS_PLATFORM(dev_priv, XE_LUNARLAKE)
-#define IS_BATTLEMAGE(dev_priv) IS_PLATFORM(dev_priv, XE_BATTLEMAGE)
-#define IS_PANTHERLAKE(dev_priv) IS_PLATFORM(dev_priv, XE_PANTHERLAKE)
-
-#define IS_HASWELL_ULT(dev_priv) (dev_priv && 0)
-#define IS_BROADWELL_ULT(dev_priv) (dev_priv && 0)
#define IS_MOBILE(xe) (xe && 0)
diff --git a/drivers/gpu/drm/xe/display/ext/i915_utils.c b/drivers/gpu/drm/xe/display/ext/i915_utils.c
index 43b10a2cc508..1421c2a7b64d 100644
--- a/drivers/gpu/drm/xe/display/ext/i915_utils.c
+++ b/drivers/gpu/drm/xe/display/ext/i915_utils.c
@@ -4,6 +4,7 @@
*/
#include "i915_drv.h"
+#include "i915_utils.h"
bool i915_vtd_active(struct drm_i915_private *i915)
{
diff --git a/drivers/gpu/drm/xe/display/intel_bo.c b/drivers/gpu/drm/xe/display/intel_bo.c
index 910632f57c3d..27437c22bd70 100644
--- a/drivers/gpu/drm/xe/display/intel_bo.c
+++ b/drivers/gpu/drm/xe/display/intel_bo.c
@@ -1,12 +1,7 @@
// SPDX-License-Identifier: MIT
/* Copyright © 2024 Intel Corporation */
-#include <drm/drm_cache.h>
#include <drm/drm_gem.h>
-#include <drm/drm_panic.h>
-
-#include "intel_fb.h"
-#include "intel_display_types.h"
#include "xe_bo.h"
#include "intel_bo.h"
@@ -64,89 +59,3 @@ void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj)
{
/* FIXME */
}
-
-struct xe_panic_data {
- struct page **pages;
- int page;
- void *vaddr;
-};
-
-struct xe_framebuffer {
- struct intel_framebuffer base;
- struct xe_panic_data panic;
-};
-
-static inline struct xe_panic_data *to_xe_panic_data(struct intel_framebuffer *fb)
-{
- return &container_of_const(fb, struct xe_framebuffer, base)->panic;
-}
-
-static void xe_panic_kunmap(struct xe_panic_data *panic)
-{
- if (panic->vaddr) {
- drm_clflush_virt_range(panic->vaddr, PAGE_SIZE);
- kunmap_local(panic->vaddr);
- panic->vaddr = NULL;
- }
-}
-
-/*
- * The scanout buffer pages are not mapped, so for each pixel,
- * use kmap_local_page_try_from_panic() to map the page, and write the pixel.
- * Try to keep the map from the previous pixel, to avoid too much map/unmap.
- */
-static void xe_panic_page_set_pixel(struct drm_scanout_buffer *sb, unsigned int x,
- unsigned int y, u32 color)
-{
- struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private;
- struct xe_panic_data *panic = to_xe_panic_data(fb);
- struct xe_bo *bo = gem_to_xe_bo(intel_fb_bo(&fb->base));
- unsigned int new_page;
- unsigned int offset;
-
- if (fb->panic_tiling)
- offset = fb->panic_tiling(sb->width, x, y);
- else
- offset = y * sb->pitch[0] + x * sb->format->cpp[0];
-
- new_page = offset >> PAGE_SHIFT;
- offset = offset % PAGE_SIZE;
- if (new_page != panic->page) {
- xe_panic_kunmap(panic);
- panic->page = new_page;
- panic->vaddr = ttm_bo_kmap_try_from_panic(&bo->ttm,
- panic->page);
- }
- if (panic->vaddr) {
- u32 *pix = panic->vaddr + offset;
- *pix = color;
- }
-}
-
-struct intel_framebuffer *intel_bo_alloc_framebuffer(void)
-{
- struct xe_framebuffer *xe_fb;
-
- xe_fb = kzalloc(sizeof(*xe_fb), GFP_KERNEL);
- if (xe_fb)
- return &xe_fb->base;
- return NULL;
-}
-
-int intel_bo_panic_setup(struct drm_scanout_buffer *sb)
-{
- struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private;
- struct xe_panic_data *panic = to_xe_panic_data(fb);
-
- panic->page = -1;
- sb->set_pixel = xe_panic_page_set_pixel;
- return 0;
-}
-
-void intel_bo_panic_finish(struct intel_framebuffer *fb)
-{
- struct xe_panic_data *panic = to_xe_panic_data(fb);
-
- xe_panic_kunmap(panic);
- panic->page = -1;
-}
diff --git a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
index fba9617a75a5..8ea9a472113c 100644
--- a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
+++ b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
@@ -41,12 +41,12 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
size = PAGE_ALIGN(size);
obj = ERR_PTR(-ENODEV);
- if (!IS_DGFX(xe) && !XE_WA(xe_root_mmio_gt(xe), 22019338487_display)) {
- obj = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe),
- NULL, size,
- ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT |
- XE_BO_FLAG_STOLEN |
- XE_BO_FLAG_GGTT);
+ if (!IS_DGFX(xe) && !XE_GT_WA(xe_root_mmio_gt(xe), 22019338487_display)) {
+ obj = xe_bo_create_pin_map_novm(xe, xe_device_get_root_tile(xe),
+ size,
+ ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT |
+ XE_BO_FLAG_STOLEN |
+ XE_BO_FLAG_GGTT, false);
if (!IS_ERR(obj))
drm_info(&xe->drm, "Allocated fbdev into stolen\n");
else
@@ -54,10 +54,10 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
}
if (IS_ERR(obj)) {
- obj = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe), NULL, size,
- ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT |
- XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) |
- XE_BO_FLAG_GGTT);
+ obj = xe_bo_create_pin_map_novm(xe, xe_device_get_root_tile(xe), size,
+ ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT |
+ XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) |
+ XE_BO_FLAG_GGTT, false);
}
if (IS_ERR(obj)) {
diff --git a/drivers/gpu/drm/xe/display/xe_display.c b/drivers/gpu/drm/xe/display/xe_display.c
index e2e0771cf274..19e691fccf8c 100644
--- a/drivers/gpu/drm/xe/display/xe_display.c
+++ b/drivers/gpu/drm/xe/display/xe_display.c
@@ -20,7 +20,7 @@
#include "intel_audio.h"
#include "intel_bw.h"
#include "intel_display.h"
-#include "intel_display_core.h"
+#include "intel_display_device.h"
#include "intel_display_driver.h"
#include "intel_display_irq.h"
#include "intel_display_types.h"
@@ -37,13 +37,6 @@
/* Xe device functions */
-static bool has_display(struct xe_device *xe)
-{
- struct intel_display *display = xe->display;
-
- return HAS_DISPLAY(display);
-}
-
/**
* xe_display_driver_probe_defer - Detect if we need to wait for other drivers
* early on
@@ -96,6 +89,7 @@ static void xe_display_fini_early(void *arg)
if (!xe->info.probe_display)
return;
+ intel_hpd_cancel_work(display);
intel_display_driver_remove_nogem(display);
intel_display_driver_remove_noirq(display);
intel_opregion_cleanup(display);
@@ -289,7 +283,7 @@ static void xe_display_enable_d3cold(struct xe_device *xe)
intel_dmc_suspend(display);
- if (has_display(xe))
+ if (intel_display_device_present(display))
intel_hpd_poll_enable(display);
}
@@ -302,14 +296,14 @@ static void xe_display_disable_d3cold(struct xe_device *xe)
intel_dmc_resume(display);
- if (has_display(xe))
+ if (intel_display_device_present(display))
drm_mode_config_reset(&xe->drm);
intel_display_driver_init_hw(display);
intel_hpd_init(display);
- if (has_display(xe))
+ if (intel_display_device_present(display))
intel_hpd_poll_disable(display);
intel_opregion_resume(display);
@@ -332,7 +326,7 @@ void xe_display_pm_suspend(struct xe_device *xe)
intel_power_domains_disable(display);
drm_client_dev_suspend(&xe->drm, false);
- if (has_display(xe)) {
+ if (intel_display_device_present(display)) {
drm_kms_helper_poll_disable(&xe->drm);
intel_display_driver_disable_user_access(display);
intel_display_driver_suspend(display);
@@ -340,9 +334,11 @@ void xe_display_pm_suspend(struct xe_device *xe)
xe_display_flush_cleanup_work(xe);
+ intel_encoder_block_all_hpds(display);
+
intel_hpd_cancel_work(display);
- if (has_display(xe)) {
+ if (intel_display_device_present(display)) {
intel_display_driver_suspend_access(display);
intel_encoder_suspend_all(display);
}
@@ -362,7 +358,7 @@ void xe_display_pm_shutdown(struct xe_device *xe)
intel_power_domains_disable(display);
drm_client_dev_suspend(&xe->drm, false);
- if (has_display(xe)) {
+ if (intel_display_device_present(display)) {
drm_kms_helper_poll_disable(&xe->drm);
intel_display_driver_disable_user_access(display);
intel_display_driver_suspend(display);
@@ -370,9 +366,10 @@ void xe_display_pm_shutdown(struct xe_device *xe)
xe_display_flush_cleanup_work(xe);
intel_dp_mst_suspend(display);
+ intel_encoder_block_all_hpds(display);
intel_hpd_cancel_work(display);
- if (has_display(xe))
+ if (intel_display_device_present(display))
intel_display_driver_suspend_access(display);
intel_encoder_suspend_all(display);
@@ -461,23 +458,25 @@ void xe_display_pm_resume(struct xe_device *xe)
intel_dmc_resume(display);
- if (has_display(xe))
+ if (intel_display_device_present(display))
drm_mode_config_reset(&xe->drm);
intel_display_driver_init_hw(display);
- if (has_display(xe))
+ if (intel_display_device_present(display))
intel_display_driver_resume_access(display);
intel_hpd_init(display);
- if (has_display(xe)) {
+ intel_encoder_unblock_all_hpds(display);
+
+ if (intel_display_device_present(display)) {
intel_display_driver_resume(display);
drm_kms_helper_poll_enable(&xe->drm);
intel_display_driver_enable_user_access(display);
}
- if (has_display(xe))
+ if (intel_display_device_present(display))
intel_hpd_poll_disable(display);
intel_opregion_resume(display);
@@ -542,7 +541,7 @@ int xe_display_probe(struct xe_device *xe)
xe->display = display;
- if (has_display(xe))
+ if (intel_display_device_present(display))
return 0;
no_display:
diff --git a/drivers/gpu/drm/xe/display/xe_display_wa.c b/drivers/gpu/drm/xe/display/xe_display_wa.c
index 68d1387d81a0..8ada1cbcb16c 100644
--- a/drivers/gpu/drm/xe/display/xe_display_wa.c
+++ b/drivers/gpu/drm/xe/display/xe_display_wa.c
@@ -14,5 +14,5 @@ bool intel_display_needs_wa_16023588340(struct intel_display *display)
{
struct xe_device *xe = to_xe_device(display->drm);
- return XE_WA(xe_root_mmio_gt(xe), 16023588340);
+ return XE_GT_WA(xe_root_mmio_gt(xe), 16023588340);
}
diff --git a/drivers/gpu/drm/xe/display/xe_dsb_buffer.c b/drivers/gpu/drm/xe/display/xe_dsb_buffer.c
index 9f941fc2e36b..58581d7aaae6 100644
--- a/drivers/gpu/drm/xe/display/xe_dsb_buffer.c
+++ b/drivers/gpu/drm/xe/display/xe_dsb_buffer.c
@@ -43,11 +43,11 @@ bool intel_dsb_buffer_create(struct intel_crtc *crtc, struct intel_dsb_buffer *d
return false;
/* Set scanout flag for WC mapping */
- obj = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe),
- NULL, PAGE_ALIGN(size),
- ttm_bo_type_kernel,
- XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) |
- XE_BO_FLAG_SCANOUT | XE_BO_FLAG_GGTT);
+ obj = xe_bo_create_pin_map_novm(xe, xe_device_get_root_tile(xe),
+ PAGE_ALIGN(size),
+ ttm_bo_type_kernel,
+ XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) |
+ XE_BO_FLAG_SCANOUT | XE_BO_FLAG_GGTT, false);
if (IS_ERR(obj)) {
kfree(vma);
return false;
diff --git a/drivers/gpu/drm/xe/display/xe_fb_pin.c b/drivers/gpu/drm/xe/display/xe_fb_pin.c
index c38fba18effe..1fd4a815e784 100644
--- a/drivers/gpu/drm/xe/display/xe_fb_pin.c
+++ b/drivers/gpu/drm/xe/display/xe_fb_pin.c
@@ -16,6 +16,7 @@
#include "xe_device.h"
#include "xe_ggtt.h"
#include "xe_pm.h"
+#include "xe_vram_types.h"
static void
write_dpt_rotated(struct xe_bo *bo, struct iosys_map *map, u32 *dpt_ofs, u32 bo_ofs,
@@ -101,29 +102,29 @@ static int __xe_pin_fb_vma_dpt(const struct intel_framebuffer *fb,
XE_PAGE_SIZE);
if (IS_DGFX(xe))
- dpt = xe_bo_create_pin_map_at_aligned(xe, tile0, NULL,
- dpt_size, ~0ull,
- ttm_bo_type_kernel,
- XE_BO_FLAG_VRAM0 |
- XE_BO_FLAG_GGTT |
- XE_BO_FLAG_PAGETABLE,
- alignment);
+ dpt = xe_bo_create_pin_map_at_novm(xe, tile0,
+ dpt_size, ~0ull,
+ ttm_bo_type_kernel,
+ XE_BO_FLAG_VRAM0 |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_PAGETABLE,
+ alignment, false);
else
- dpt = xe_bo_create_pin_map_at_aligned(xe, tile0, NULL,
- dpt_size, ~0ull,
- ttm_bo_type_kernel,
- XE_BO_FLAG_STOLEN |
- XE_BO_FLAG_GGTT |
- XE_BO_FLAG_PAGETABLE,
- alignment);
+ dpt = xe_bo_create_pin_map_at_novm(xe, tile0,
+ dpt_size, ~0ull,
+ ttm_bo_type_kernel,
+ XE_BO_FLAG_STOLEN |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_PAGETABLE,
+ alignment, false);
if (IS_ERR(dpt))
- dpt = xe_bo_create_pin_map_at_aligned(xe, tile0, NULL,
- dpt_size, ~0ull,
- ttm_bo_type_kernel,
- XE_BO_FLAG_SYSTEM |
- XE_BO_FLAG_GGTT |
- XE_BO_FLAG_PAGETABLE,
- alignment);
+ dpt = xe_bo_create_pin_map_at_novm(xe, tile0,
+ dpt_size, ~0ull,
+ ttm_bo_type_kernel,
+ XE_BO_FLAG_SYSTEM |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_PAGETABLE,
+ alignment, false);
if (IS_ERR(dpt))
return PTR_ERR(dpt);
@@ -280,7 +281,9 @@ static struct i915_vma *__xe_pin_fb_vma(const struct intel_framebuffer *fb,
struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
struct drm_gem_object *obj = intel_fb_bo(&fb->base);
struct xe_bo *bo = gem_to_xe_bo(obj);
- int ret;
+ struct xe_validation_ctx ctx;
+ struct drm_exec exec;
+ int ret = 0;
if (!vma)
return ERR_PTR(-ENODEV);
@@ -289,7 +292,7 @@ static struct i915_vma *__xe_pin_fb_vma(const struct intel_framebuffer *fb,
if (IS_DGFX(to_xe_device(bo->ttm.base.dev)) &&
intel_fb_rc_ccs_cc_plane(&fb->base) >= 0 &&
!(bo->flags & XE_BO_FLAG_NEEDS_CPU_ACCESS)) {
- struct xe_tile *tile = xe_device_get_root_tile(xe);
+ struct xe_vram_region *vram = xe_device_get_root_tile(xe)->mem.vram;
/*
* If we need to able to access the clear-color value stored in
@@ -297,7 +300,7 @@ static struct i915_vma *__xe_pin_fb_vma(const struct intel_framebuffer *fb,
* accessible. This is important on small-bar systems where
* only some subset of VRAM is CPU accessible.
*/
- if (tile->mem.vram.io_size < tile->mem.vram.usable_size) {
+ if (xe_vram_region_io_size(vram) < xe_vram_region_usable_size(vram)) {
ret = -EINVAL;
goto err;
}
@@ -307,17 +310,22 @@ static struct i915_vma *__xe_pin_fb_vma(const struct intel_framebuffer *fb,
* Pin the framebuffer, we can't use xe_bo_(un)pin functions as the
* assumptions are incorrect for framebuffers
*/
- ret = ttm_bo_reserve(&bo->ttm, false, false, NULL);
- if (ret)
- goto err;
-
- if (IS_DGFX(xe))
- ret = xe_bo_migrate(bo, XE_PL_VRAM0);
- else
- ret = xe_bo_validate(bo, NULL, true);
- if (!ret)
- ttm_bo_pin(&bo->ttm);
- ttm_bo_unreserve(&bo->ttm);
+ xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.interruptible = true},
+ ret) {
+ ret = drm_exec_lock_obj(&exec, &bo->ttm.base);
+ drm_exec_retry_on_contention(&exec);
+ if (ret)
+ break;
+
+ if (IS_DGFX(xe))
+ ret = xe_bo_migrate(bo, XE_PL_VRAM0, NULL, &exec);
+ else
+ ret = xe_bo_validate(bo, NULL, true, &exec);
+ drm_exec_retry_on_contention(&exec);
+ xe_validation_retry_on_oom(&ctx, &ret);
+ if (!ret)
+ ttm_bo_pin(&bo->ttm);
+ }
if (ret)
goto err;
@@ -382,6 +390,7 @@ static bool reuse_vma(struct intel_plane_state *new_plane_state,
const struct intel_plane_state *old_plane_state)
{
struct intel_framebuffer *fb = to_intel_framebuffer(new_plane_state->hw.fb);
+ struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane);
struct xe_device *xe = to_xe_device(fb->base.dev);
struct intel_display *display = xe->display;
struct i915_vma *vma;
@@ -405,6 +414,10 @@ static bool reuse_vma(struct intel_plane_state *new_plane_state,
found:
refcount_inc(&vma->ref);
new_plane_state->ggtt_vma = vma;
+
+ new_plane_state->surf = i915_ggtt_offset(new_plane_state->ggtt_vma) +
+ plane->surf_offset(new_plane_state);
+
return true;
}
@@ -431,6 +444,10 @@ int intel_plane_pin_fb(struct intel_plane_state *new_plane_state,
return PTR_ERR(vma);
new_plane_state->ggtt_vma = vma;
+
+ new_plane_state->surf = i915_ggtt_offset(new_plane_state->ggtt_vma) +
+ plane->surf_offset(new_plane_state);
+
return 0;
}
diff --git a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
index 30f1073141fc..4ae847b628e2 100644
--- a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
+++ b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
@@ -72,10 +72,10 @@ static int intel_hdcp_gsc_initialize_message(struct xe_device *xe,
int ret = 0;
/* allocate object of two page for HDCP command memory and store it */
- bo = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe), NULL, PAGE_SIZE * 2,
- ttm_bo_type_kernel,
- XE_BO_FLAG_SYSTEM |
- XE_BO_FLAG_GGTT);
+ bo = xe_bo_create_pin_map_novm(xe, xe_device_get_root_tile(xe), PAGE_SIZE * 2,
+ ttm_bo_type_kernel,
+ XE_BO_FLAG_SYSTEM |
+ XE_BO_FLAG_GGTT, false);
if (IS_ERR(bo)) {
drm_err(&xe->drm, "Failed to allocate bo for HDCP streaming command!\n");
diff --git a/drivers/gpu/drm/xe/display/xe_panic.c b/drivers/gpu/drm/xe/display/xe_panic.c
new file mode 100644
index 000000000000..f32b23338331
--- /dev/null
+++ b/drivers/gpu/drm/xe/display/xe_panic.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: MIT
+/* Copyright © 2025 Intel Corporation */
+
+#include <drm/drm_cache.h>
+#include <drm/drm_panic.h>
+
+#include "intel_display_types.h"
+#include "intel_fb.h"
+#include "intel_panic.h"
+#include "xe_bo.h"
+
+struct intel_panic {
+ struct page **pages;
+ int page;
+ void *vaddr;
+};
+
+static void xe_panic_kunmap(struct intel_panic *panic)
+{
+ if (panic->vaddr) {
+ drm_clflush_virt_range(panic->vaddr, PAGE_SIZE);
+ kunmap_local(panic->vaddr);
+ panic->vaddr = NULL;
+ }
+}
+
+/*
+ * The scanout buffer pages are not mapped, so for each pixel,
+ * use kmap_local_page_try_from_panic() to map the page, and write the pixel.
+ * Try to keep the map from the previous pixel, to avoid too much map/unmap.
+ */
+static void xe_panic_page_set_pixel(struct drm_scanout_buffer *sb, unsigned int x,
+ unsigned int y, u32 color)
+{
+ struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private;
+ struct intel_panic *panic = fb->panic;
+ struct xe_bo *bo = gem_to_xe_bo(intel_fb_bo(&fb->base));
+ unsigned int new_page;
+ unsigned int offset;
+
+ if (fb->panic_tiling)
+ offset = fb->panic_tiling(sb->width, x, y);
+ else
+ offset = y * sb->pitch[0] + x * sb->format->cpp[0];
+
+ new_page = offset >> PAGE_SHIFT;
+ offset = offset % PAGE_SIZE;
+ if (new_page != panic->page) {
+ xe_panic_kunmap(panic);
+ panic->page = new_page;
+ panic->vaddr = ttm_bo_kmap_try_from_panic(&bo->ttm,
+ panic->page);
+ }
+ if (panic->vaddr) {
+ u32 *pix = panic->vaddr + offset;
+ *pix = color;
+ }
+}
+
+struct intel_panic *intel_panic_alloc(void)
+{
+ struct intel_panic *panic;
+
+ panic = kzalloc(sizeof(*panic), GFP_KERNEL);
+
+ return panic;
+}
+
+int intel_panic_setup(struct intel_panic *panic, struct drm_scanout_buffer *sb)
+{
+ panic->page = -1;
+ sb->set_pixel = xe_panic_page_set_pixel;
+ return 0;
+}
+
+void intel_panic_finish(struct intel_panic *panic)
+{
+ xe_panic_kunmap(panic);
+ panic->page = -1;
+}
diff --git a/drivers/gpu/drm/xe/display/xe_plane_initial.c b/drivers/gpu/drm/xe/display/xe_plane_initial.c
index dcbc4b2d3fd9..94f00def811b 100644
--- a/drivers/gpu/drm/xe/display/xe_plane_initial.c
+++ b/drivers/gpu/drm/xe/display/xe_plane_initial.c
@@ -10,6 +10,7 @@
#include "xe_ggtt.h"
#include "xe_mmio.h"
+#include "i915_vma.h"
#include "intel_crtc.h"
#include "intel_display.h"
#include "intel_display_core.h"
@@ -21,6 +22,7 @@
#include "intel_plane.h"
#include "intel_plane_initial.h"
#include "xe_bo.h"
+#include "xe_vram_types.h"
#include "xe_wa.h"
#include <generated/xe_wa_oob.h>
@@ -103,7 +105,7 @@ initial_plane_bo(struct xe_device *xe,
* We don't currently expect this to ever be placed in the
* stolen portion.
*/
- if (phys_base >= tile0->mem.vram.usable_size) {
+ if (phys_base >= xe_vram_region_usable_size(tile0->mem.vram)) {
drm_err(&xe->drm,
"Initial plane programming using invalid range, phys_base=%pa\n",
&phys_base);
@@ -121,7 +123,7 @@ initial_plane_bo(struct xe_device *xe,
phys_base = base;
flags |= XE_BO_FLAG_STOLEN;
- if (XE_WA(xe_root_mmio_gt(xe), 22019338487_display))
+ if (XE_GT_WA(xe_root_mmio_gt(xe), 22019338487_display))
return NULL;
/*
@@ -138,8 +140,8 @@ initial_plane_bo(struct xe_device *xe,
page_size);
size -= base;
- bo = xe_bo_create_pin_map_at(xe, tile0, NULL, size, phys_base,
- ttm_bo_type_kernel, flags);
+ bo = xe_bo_create_pin_map_at_novm(xe, tile0, size, phys_base,
+ ttm_bo_type_kernel, flags, 0, false);
if (IS_ERR(bo)) {
drm_dbg(&xe->drm,
"Failed to create bo phys_base=%pa size %u with flags %x: %li\n",
@@ -234,6 +236,9 @@ intel_find_initial_plane_obj(struct intel_crtc *crtc,
goto nofb;
plane_state->ggtt_vma = vma;
+
+ plane_state->surf = i915_ggtt_offset(plane_state->ggtt_vma);
+
plane_state->uapi.src_x = 0;
plane_state->uapi.src_y = 0;
plane_state->uapi.src_w = fb->width << 16;
diff --git a/drivers/gpu/drm/xe/instructions/xe_mi_commands.h b/drivers/gpu/drm/xe/instructions/xe_mi_commands.h
index e3f5e8bb3ebc..c47b290e0e9f 100644
--- a/drivers/gpu/drm/xe/instructions/xe_mi_commands.h
+++ b/drivers/gpu/drm/xe/instructions/xe_mi_commands.h
@@ -65,6 +65,7 @@
#define MI_LOAD_REGISTER_MEM (__MI_INSTR(0x29) | XE_INSTR_NUM_DW(4))
#define MI_LRM_USE_GGTT REG_BIT(22)
+#define MI_LRM_ASYNC REG_BIT(21)
#define MI_LOAD_REGISTER_REG (__MI_INSTR(0x2a) | XE_INSTR_NUM_DW(3))
#define MI_LRR_DST_CS_MMIO REG_BIT(19)
diff --git a/drivers/gpu/drm/xe/regs/xe_bars.h b/drivers/gpu/drm/xe/regs/xe_bars.h
index ce05b6ae832f..880140d6ccdc 100644
--- a/drivers/gpu/drm/xe/regs/xe_bars.h
+++ b/drivers/gpu/drm/xe/regs/xe_bars.h
@@ -7,5 +7,6 @@
#define GTTMMADR_BAR 0 /* MMIO + GTT */
#define LMEM_BAR 2 /* VRAM */
+#define VF_LMEM_BAR 9 /* VF VRAM */
#endif
diff --git a/drivers/gpu/drm/xe/regs/xe_engine_regs.h b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
index 7ade41e2b7b3..f4c3e1187a00 100644
--- a/drivers/gpu/drm/xe/regs/xe_engine_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
@@ -111,6 +111,9 @@
#define PPHWSP_CSB_AND_TIMESTAMP_REPORT_DIS REG_BIT(14)
#define CS_PRIORITY_MEM_READ REG_BIT(7)
+#define CS_DEBUG_MODE2(base) XE_REG((base) + 0xd8, XE_REG_OPTION_MASKED)
+#define INSTRUCTION_STATE_CACHE_INVALIDATE REG_BIT(6)
+
#define FF_SLICE_CS_CHICKEN1(base) XE_REG((base) + 0xe0, XE_REG_OPTION_MASKED)
#define FFSC_PERCTX_PREEMPT_CTRL REG_BIT(14)
diff --git a/drivers/gpu/drm/xe/regs/xe_gsc_regs.h b/drivers/gpu/drm/xe/regs/xe_gsc_regs.h
index 9b66cc972a63..180be82672ab 100644
--- a/drivers/gpu/drm/xe/regs/xe_gsc_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_gsc_regs.h
@@ -13,6 +13,8 @@
/* Definitions of GSC H/W registers, bits, etc */
+#define BMG_GSC_HECI1_BASE 0x373000
+
#define MTL_GSC_HECI1_BASE 0x00116000
#define MTL_GSC_HECI2_BASE 0x00117000
diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
index 5cd5ab8529c5..06cb6b02ec64 100644
--- a/drivers/gpu/drm/xe/regs/xe_gt_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
@@ -42,7 +42,7 @@
#define FORCEWAKE_ACK_GSC XE_REG(0xdf8)
#define FORCEWAKE_ACK_GT_MTL XE_REG(0xdfc)
-#define MCFG_MCR_SELECTOR XE_REG(0xfd0)
+#define STEER_SEMAPHORE XE_REG(0xfd0)
#define MTL_MCR_SELECTOR XE_REG(0xfd4)
#define SF_MCR_SELECTOR XE_REG(0xfd8)
#define MCR_SELECTOR XE_REG(0xfdc)
@@ -522,6 +522,7 @@
#define TDL_CHICKEN XE_REG_MCR(0xe5f4, XE_REG_OPTION_MASKED)
#define QID_WAIT_FOR_THREAD_NOT_RUN_DISABLE REG_BIT(12)
+#define EUSTALL_PERF_SAMPLING_DISABLE REG_BIT(5)
#define LSC_CHICKEN_BIT_0 XE_REG_MCR(0xe7c8)
#define DISABLE_D8_D16_COASLESCE REG_BIT(30)
diff --git a/drivers/gpu/drm/xe/regs/xe_hw_error_regs.h b/drivers/gpu/drm/xe/regs/xe_hw_error_regs.h
new file mode 100644
index 000000000000..c146b9ef44eb
--- /dev/null
+++ b/drivers/gpu/drm/xe/regs/xe_hw_error_regs.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_HW_ERROR_REGS_H_
+#define _XE_HW_ERROR_REGS_H_
+
+#define HEC_UNCORR_ERR_STATUS(base) XE_REG((base) + 0x118)
+#define UNCORR_FW_REPORTED_ERR BIT(6)
+
+#define HEC_UNCORR_FW_ERR_DW0(base) XE_REG((base) + 0x124)
+
+#define DEV_ERR_STAT_NONFATAL 0x100178
+#define DEV_ERR_STAT_CORRECTABLE 0x10017c
+#define DEV_ERR_STAT_REG(x) XE_REG(_PICK_EVEN((x), \
+ DEV_ERR_STAT_CORRECTABLE, \
+ DEV_ERR_STAT_NONFATAL))
+#define XE_CSC_ERROR BIT(17)
+#endif
diff --git a/drivers/gpu/drm/xe/regs/xe_irq_regs.h b/drivers/gpu/drm/xe/regs/xe_irq_regs.h
index 13635e4331d4..7c2a3a140142 100644
--- a/drivers/gpu/drm/xe/regs/xe_irq_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_irq_regs.h
@@ -18,6 +18,7 @@
#define GFX_MSTR_IRQ XE_REG(0x190010, XE_REG_OPTION_VF)
#define MASTER_IRQ REG_BIT(31)
#define GU_MISC_IRQ REG_BIT(29)
+#define ERROR_IRQ(x) REG_BIT(26 + (x))
#define DISPLAY_IRQ REG_BIT(16)
#define I2C_IRQ REG_BIT(12)
#define GT_DW_IRQ(x) REG_BIT(x)
diff --git a/drivers/gpu/drm/xe/regs/xe_lrc_layout.h b/drivers/gpu/drm/xe/regs/xe_lrc_layout.h
index 1b101edb838b..b5eff383902c 100644
--- a/drivers/gpu/drm/xe/regs/xe_lrc_layout.h
+++ b/drivers/gpu/drm/xe/regs/xe_lrc_layout.h
@@ -40,7 +40,4 @@
#define INDIRECT_CTX_RING_START_UDW (0x08 + 1)
#define INDIRECT_CTX_RING_CTL (0x0a + 1)
-#define CTX_INDIRECT_CTX_OFFSET_MASK REG_GENMASK(15, 6)
-#define CTX_INDIRECT_CTX_OFFSET_DEFAULT REG_FIELD_PREP(CTX_INDIRECT_CTX_OFFSET_MASK, 0xd)
-
#endif
diff --git a/drivers/gpu/drm/xe/regs/xe_pmt.h b/drivers/gpu/drm/xe/regs/xe_pmt.h
index 2995d72c3f78..264e9baf949c 100644
--- a/drivers/gpu/drm/xe/regs/xe_pmt.h
+++ b/drivers/gpu/drm/xe/regs/xe_pmt.h
@@ -21,4 +21,14 @@
#define SG_REMAP_INDEX1 XE_REG(SOC_BASE + 0x08)
#define SG_REMAP_BITS REG_GENMASK(31, 24)
+#define BMG_MODS_RESIDENCY_OFFSET (0x4D0)
+#define BMG_G2_RESIDENCY_OFFSET (0x530)
+#define BMG_G6_RESIDENCY_OFFSET (0x538)
+#define BMG_G8_RESIDENCY_OFFSET (0x540)
+#define BMG_G10_RESIDENCY_OFFSET (0x548)
+
+#define BMG_PCIE_LINK_L0_RESIDENCY_OFFSET (0x570)
+#define BMG_PCIE_LINK_L1_RESIDENCY_OFFSET (0x578)
+#define BMG_PCIE_LINK_L1_2_RESIDENCY_OFFSET (0x580)
+
#endif
diff --git a/drivers/gpu/drm/xe/tests/xe_bo.c b/drivers/gpu/drm/xe/tests/xe_bo.c
index bb469096d072..2294cf89f3e1 100644
--- a/drivers/gpu/drm/xe/tests/xe_bo.c
+++ b/drivers/gpu/drm/xe/tests/xe_bo.c
@@ -23,7 +23,7 @@
static int ccs_test_migrate(struct xe_tile *tile, struct xe_bo *bo,
bool clear, u64 get_val, u64 assign_val,
- struct kunit *test)
+ struct kunit *test, struct drm_exec *exec)
{
struct dma_fence *fence;
struct ttm_tt *ttm;
@@ -35,7 +35,7 @@ static int ccs_test_migrate(struct xe_tile *tile, struct xe_bo *bo,
u32 offset;
/* Move bo to VRAM if not already there. */
- ret = xe_bo_validate(bo, NULL, false);
+ ret = xe_bo_validate(bo, NULL, false, exec);
if (ret) {
KUNIT_FAIL(test, "Failed to validate bo.\n");
return ret;
@@ -60,7 +60,7 @@ static int ccs_test_migrate(struct xe_tile *tile, struct xe_bo *bo,
}
/* Evict to system. CCS data should be copied. */
- ret = xe_bo_evict(bo);
+ ret = xe_bo_evict(bo, exec);
if (ret) {
KUNIT_FAIL(test, "Failed to evict bo.\n");
return ret;
@@ -132,14 +132,15 @@ static void ccs_test_run_tile(struct xe_device *xe, struct xe_tile *tile,
/* TODO: Sanity check */
unsigned int bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile);
+ struct drm_exec *exec = XE_VALIDATION_OPT_OUT;
if (IS_DGFX(xe))
kunit_info(test, "Testing vram id %u\n", tile->id);
else
kunit_info(test, "Testing system memory\n");
- bo = xe_bo_create_user(xe, NULL, NULL, SZ_1M, DRM_XE_GEM_CPU_CACHING_WC,
- bo_flags);
+ bo = xe_bo_create_user(xe, NULL, SZ_1M, DRM_XE_GEM_CPU_CACHING_WC,
+ bo_flags, exec);
if (IS_ERR(bo)) {
KUNIT_FAIL(test, "Failed to create bo.\n");
return;
@@ -149,18 +150,18 @@ static void ccs_test_run_tile(struct xe_device *xe, struct xe_tile *tile,
kunit_info(test, "Verifying that CCS data is cleared on creation.\n");
ret = ccs_test_migrate(tile, bo, false, 0ULL, 0xdeadbeefdeadbeefULL,
- test);
+ test, exec);
if (ret)
goto out_unlock;
kunit_info(test, "Verifying that CCS data survives migration.\n");
ret = ccs_test_migrate(tile, bo, false, 0xdeadbeefdeadbeefULL,
- 0xdeadbeefdeadbeefULL, test);
+ 0xdeadbeefdeadbeefULL, test, exec);
if (ret)
goto out_unlock;
kunit_info(test, "Verifying that CCS data can be properly cleared.\n");
- ret = ccs_test_migrate(tile, bo, true, 0ULL, 0ULL, test);
+ ret = ccs_test_migrate(tile, bo, true, 0ULL, 0ULL, test, exec);
out_unlock:
xe_bo_unlock(bo);
@@ -210,6 +211,7 @@ static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struc
struct xe_bo *bo, *external;
unsigned int bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile);
struct xe_vm *vm = xe_migrate_get_vm(xe_device_get_root_tile(xe)->migrate);
+ struct drm_exec *exec = XE_VALIDATION_OPT_OUT;
struct xe_gt *__gt;
int err, i, id;
@@ -218,25 +220,25 @@ static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struc
for (i = 0; i < 2; ++i) {
xe_vm_lock(vm, false);
- bo = xe_bo_create_user(xe, NULL, vm, 0x10000,
+ bo = xe_bo_create_user(xe, vm, 0x10000,
DRM_XE_GEM_CPU_CACHING_WC,
- bo_flags);
+ bo_flags, exec);
xe_vm_unlock(vm);
if (IS_ERR(bo)) {
KUNIT_FAIL(test, "bo create err=%pe\n", bo);
break;
}
- external = xe_bo_create_user(xe, NULL, NULL, 0x10000,
+ external = xe_bo_create_user(xe, NULL, 0x10000,
DRM_XE_GEM_CPU_CACHING_WC,
- bo_flags);
+ bo_flags, NULL);
if (IS_ERR(external)) {
KUNIT_FAIL(test, "external bo create err=%pe\n", external);
goto cleanup_bo;
}
xe_bo_lock(external, false);
- err = xe_bo_pin_external(external);
+ err = xe_bo_pin_external(external, false, exec);
xe_bo_unlock(external);
if (err) {
KUNIT_FAIL(test, "external bo pin err=%pe\n",
@@ -294,7 +296,7 @@ static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struc
if (i) {
down_read(&vm->lock);
xe_vm_lock(vm, false);
- err = xe_bo_validate(bo, bo->vm, false);
+ err = xe_bo_validate(bo, bo->vm, false, exec);
xe_vm_unlock(vm);
up_read(&vm->lock);
if (err) {
@@ -303,7 +305,7 @@ static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struc
goto cleanup_all;
}
xe_bo_lock(external, false);
- err = xe_bo_validate(external, NULL, false);
+ err = xe_bo_validate(external, NULL, false, exec);
xe_bo_unlock(external);
if (err) {
KUNIT_FAIL(test, "external bo valid err=%pe\n",
@@ -495,9 +497,9 @@ static int shrink_test_run_device(struct xe_device *xe)
INIT_LIST_HEAD(&link->link);
/* We can create bos using WC caching here. But it is slower. */
- bo = xe_bo_create_user(xe, NULL, NULL, XE_BO_SHRINK_SIZE,
+ bo = xe_bo_create_user(xe, NULL, XE_BO_SHRINK_SIZE,
DRM_XE_GEM_CPU_CACHING_WB,
- XE_BO_FLAG_SYSTEM);
+ XE_BO_FLAG_SYSTEM, NULL);
if (IS_ERR(bo)) {
if (bo != ERR_PTR(-ENOMEM) && bo != ERR_PTR(-ENOSPC) &&
bo != ERR_PTR(-EINTR) && bo != ERR_PTR(-ERESTARTSYS))
diff --git a/drivers/gpu/drm/xe/tests/xe_dma_buf.c b/drivers/gpu/drm/xe/tests/xe_dma_buf.c
index c53f67ce4b0a..a7e548a2bdfb 100644
--- a/drivers/gpu/drm/xe/tests/xe_dma_buf.c
+++ b/drivers/gpu/drm/xe/tests/xe_dma_buf.c
@@ -27,7 +27,8 @@ static bool is_dynamic(struct dma_buf_test_params *params)
}
static void check_residency(struct kunit *test, struct xe_bo *exported,
- struct xe_bo *imported, struct dma_buf *dmabuf)
+ struct xe_bo *imported, struct dma_buf *dmabuf,
+ struct drm_exec *exec)
{
struct dma_buf_test_params *params = to_dma_buf_test_params(test->priv);
u32 mem_type;
@@ -57,16 +58,12 @@ static void check_residency(struct kunit *test, struct xe_bo *exported,
return;
/*
- * Evict exporter. Note that the gem object dma_buf member isn't
- * set from xe_gem_prime_export(), and it's needed for the move_notify()
- * functionality, so hack that up here. Evicting the exported bo will
+ * Evict exporter. Evicting the exported bo will
* evict also the imported bo through the move_notify() functionality if
* importer is on a different device. If they're on the same device,
* the exporter and the importer should be the same bo.
*/
- swap(exported->ttm.base.dma_buf, dmabuf);
- ret = xe_bo_evict(exported);
- swap(exported->ttm.base.dma_buf, dmabuf);
+ ret = xe_bo_evict(exported, exec);
if (ret) {
if (ret != -EINTR && ret != -ERESTARTSYS)
KUNIT_FAIL(test, "Evicting exporter failed with err=%d.\n",
@@ -81,7 +78,7 @@ static void check_residency(struct kunit *test, struct xe_bo *exported,
}
/* Re-validate the importer. This should move also exporter in. */
- ret = xe_bo_validate(imported, NULL, false);
+ ret = xe_bo_validate(imported, NULL, false, exec);
if (ret) {
if (ret != -EINTR && ret != -ERESTARTSYS)
KUNIT_FAIL(test, "Validating importer failed with err=%d.\n",
@@ -89,15 +86,7 @@ static void check_residency(struct kunit *test, struct xe_bo *exported,
return;
}
- /*
- * If on different devices, the exporter is kept in system if
- * possible, saving a migration step as the transfer is just
- * likely as fast from system memory.
- */
- if (params->mem_mask & XE_BO_FLAG_SYSTEM)
- KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(exported, XE_PL_TT));
- else
- KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(exported, mem_type));
+ KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(exported, mem_type));
if (params->force_different_devices)
KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(imported, XE_PL_TT));
@@ -125,8 +114,8 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
size = SZ_64K;
kunit_info(test, "running %s\n", __func__);
- bo = xe_bo_create_user(xe, NULL, NULL, size, DRM_XE_GEM_CPU_CACHING_WC,
- params->mem_mask);
+ bo = xe_bo_create_user(xe, NULL, size, DRM_XE_GEM_CPU_CACHING_WC,
+ params->mem_mask, NULL);
if (IS_ERR(bo)) {
KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
PTR_ERR(bo));
@@ -139,6 +128,7 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
PTR_ERR(dmabuf));
goto out;
}
+ bo->ttm.base.dma_buf = dmabuf;
import = xe_gem_prime_import(&xe->drm, dmabuf);
if (!IS_ERR(import)) {
@@ -153,11 +143,12 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
KUNIT_FAIL(test,
"xe_gem_prime_import() succeeded when it shouldn't have\n");
} else {
+ struct drm_exec *exec = XE_VALIDATION_OPT_OUT;
int err;
/* Is everything where we expect it to be? */
xe_bo_lock(import_bo, false);
- err = xe_bo_validate(import_bo, NULL, false);
+ err = xe_bo_validate(import_bo, NULL, false, exec);
/* Pinning in VRAM is not allowed. */
if (!is_dynamic(params) &&
@@ -170,7 +161,7 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
err == -ERESTARTSYS);
if (!err)
- check_residency(test, bo, import_bo, dmabuf);
+ check_residency(test, bo, import_bo, dmabuf, exec);
xe_bo_unlock(import_bo);
}
drm_gem_object_put(import);
@@ -186,6 +177,7 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
KUNIT_FAIL(test, "dynamic p2p attachment failed with err=%ld\n",
PTR_ERR(import));
}
+ bo->ttm.base.dma_buf = NULL;
dma_buf_put(dmabuf);
out:
drm_gem_object_put(&bo->ttm.base);
@@ -206,7 +198,7 @@ static const struct dma_buf_attach_ops nop2p_attach_ops = {
static const struct dma_buf_test_params test_params[] = {
{.mem_mask = XE_BO_FLAG_VRAM0,
.attach_ops = &xe_dma_buf_attach_ops},
- {.mem_mask = XE_BO_FLAG_VRAM0,
+ {.mem_mask = XE_BO_FLAG_VRAM0 | XE_BO_FLAG_NEEDS_CPU_ACCESS,
.attach_ops = &xe_dma_buf_attach_ops,
.force_different_devices = true},
@@ -238,7 +230,8 @@ static const struct dma_buf_test_params test_params[] = {
{.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0,
.attach_ops = &xe_dma_buf_attach_ops},
- {.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0,
+ {.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0 |
+ XE_BO_FLAG_NEEDS_CPU_ACCESS,
.attach_ops = &xe_dma_buf_attach_ops,
.force_different_devices = true},
diff --git a/drivers/gpu/drm/xe/tests/xe_guc_g2g_test.c b/drivers/gpu/drm/xe/tests/xe_guc_g2g_test.c
new file mode 100644
index 000000000000..3b213fcae916
--- /dev/null
+++ b/drivers/gpu/drm/xe/tests/xe_guc_g2g_test.c
@@ -0,0 +1,776 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include <linux/delay.h>
+
+#include <kunit/test.h>
+#include <kunit/visibility.h>
+
+#include "tests/xe_kunit_helpers.h"
+#include "tests/xe_pci_test.h"
+#include "tests/xe_test.h"
+
+#include "xe_bo.h"
+#include "xe_device.h"
+#include "xe_pm.h"
+
+/*
+ * There are different ways to allocate the G2G buffers. The plan for this test
+ * is to make sure that all the possible options work. The particular option
+ * chosen by the driver may vary from one platform to another, it may also change
+ * with time. So to ensure consistency of testing, the relevant driver code is
+ * replicated here to guarantee it won't change without the test being updated
+ * to keep testing the other options.
+ *
+ * In order to test the actual code being used by the driver, there is also the
+ * 'default' scheme. That will use the official driver routines to test whatever
+ * method the driver is using on the current platform at the current time.
+ */
+enum {
+ /* Driver defined allocation scheme */
+ G2G_CTB_TYPE_DEFAULT,
+ /* Single buffer in host memory */
+ G2G_CTB_TYPE_HOST,
+ /* Single buffer in a specific tile, loops across all tiles */
+ G2G_CTB_TYPE_TILE,
+};
+
+/*
+ * Payload is opaque to GuC. So KMD can define any structure or size it wants.
+ */
+struct g2g_test_payload {
+ u32 tx_dev;
+ u32 tx_tile;
+ u32 rx_dev;
+ u32 rx_tile;
+ u32 seqno;
+};
+
+static void g2g_test_send(struct kunit *test, struct xe_guc *guc,
+ u32 far_tile, u32 far_dev,
+ struct g2g_test_payload *payload)
+{
+ struct xe_device *xe = guc_to_xe(guc);
+ struct xe_gt *gt = guc_to_gt(guc);
+ u32 *action, total;
+ size_t payload_len;
+ int ret;
+
+ static_assert(IS_ALIGNED(sizeof(*payload), sizeof(u32)));
+ payload_len = sizeof(*payload) / sizeof(u32);
+
+ total = 4 + payload_len;
+ action = kunit_kmalloc_array(test, total, sizeof(*action), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, action);
+
+ action[0] = XE_GUC_ACTION_TEST_G2G_SEND;
+ action[1] = far_tile;
+ action[2] = far_dev;
+ action[3] = payload_len;
+ memcpy(action + 4, payload, payload_len * sizeof(u32));
+
+ atomic_inc(&xe->g2g_test_count);
+
+ /*
+ * Should specify the expected response notification here. Problem is that
+ * the response will be coming from a different GuC. By the end, it should
+ * all add up as long as an equal number of messages are sent from each GuC
+ * and to each GuC. However, in the middle negative reservation space errors
+ * and such like can occur. Rather than add intrusive changes to the CT layer
+ * it is simpler to just not bother counting it at all. The system should be
+ * idle when running the selftest, and the selftest's notification total size
+ * is well within the G2H allocation size. So there should be no issues with
+ * needing to block for space, which is all the tracking code is really for.
+ */
+ ret = xe_guc_ct_send(&guc->ct, action, total, 0, 0);
+ kunit_kfree(test, action);
+ KUNIT_ASSERT_EQ_MSG(test, 0, ret, "G2G send failed: %d [%d:%d -> %d:%d]\n", ret,
+ gt_to_tile(gt)->id, G2G_DEV(gt), far_tile, far_dev);
+}
+
+/*
+ * NB: Can't use KUNIT_ASSERT and friends in here as this is called asynchronously
+ * from the G2H notification handler. Need that to actually complete rather than
+ * thread-abort in order to keep the rest of the driver alive!
+ */
+int xe_guc_g2g_test_notification(struct xe_guc *guc, u32 *msg, u32 len)
+{
+ struct xe_device *xe = guc_to_xe(guc);
+ struct xe_gt *rx_gt = guc_to_gt(guc), *test_gt, *tx_gt = NULL;
+ u32 tx_tile, tx_dev, rx_tile, rx_dev, idx, got_len;
+ struct g2g_test_payload *payload;
+ size_t payload_len;
+ int ret = 0, i;
+
+ payload_len = sizeof(*payload) / sizeof(u32);
+
+ if (unlikely(len != (G2H_LEN_DW_G2G_NOTIFY_MIN + payload_len))) {
+ xe_gt_err(rx_gt, "G2G test notification invalid length %u", len);
+ ret = -EPROTO;
+ goto done;
+ }
+
+ tx_tile = msg[0];
+ tx_dev = msg[1];
+ got_len = msg[2];
+ payload = (struct g2g_test_payload *)(msg + 3);
+
+ rx_tile = gt_to_tile(rx_gt)->id;
+ rx_dev = G2G_DEV(rx_gt);
+
+ if (got_len != payload_len) {
+ xe_gt_err(rx_gt, "G2G: Invalid payload length: %u vs %zu\n", got_len, payload_len);
+ ret = -EPROTO;
+ goto done;
+ }
+
+ if (payload->tx_dev != tx_dev || payload->tx_tile != tx_tile ||
+ payload->rx_dev != rx_dev || payload->rx_tile != rx_tile) {
+ xe_gt_err(rx_gt, "G2G: Invalid payload: %d:%d -> %d:%d vs %d:%d -> %d:%d! [%d]\n",
+ payload->tx_tile, payload->tx_dev, payload->rx_tile, payload->rx_dev,
+ tx_tile, tx_dev, rx_tile, rx_dev, payload->seqno);
+ ret = -EPROTO;
+ goto done;
+ }
+
+ if (!xe->g2g_test_array) {
+ xe_gt_err(rx_gt, "G2G: Missing test array!\n");
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ for_each_gt(test_gt, xe, i) {
+ if (gt_to_tile(test_gt)->id != tx_tile)
+ continue;
+
+ if (G2G_DEV(test_gt) != tx_dev)
+ continue;
+
+ if (tx_gt) {
+ xe_gt_err(rx_gt, "G2G: Got duplicate TX GTs: %d vs %d for %d:%d!\n",
+ tx_gt->info.id, test_gt->info.id, tx_tile, tx_dev);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ tx_gt = test_gt;
+ }
+ if (!tx_gt) {
+ xe_gt_err(rx_gt, "G2G: Failed to find a TX GT for %d:%d!\n", tx_tile, tx_dev);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ idx = (tx_gt->info.id * xe->info.gt_count) + rx_gt->info.id;
+
+ if (xe->g2g_test_array[idx] != payload->seqno - 1) {
+ xe_gt_err(rx_gt, "G2G: Seqno mismatch %d vs %d for %d:%d -> %d:%d!\n",
+ xe->g2g_test_array[idx], payload->seqno - 1,
+ tx_tile, tx_dev, rx_tile, rx_dev);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ xe->g2g_test_array[idx] = payload->seqno;
+
+done:
+ atomic_dec(&xe->g2g_test_count);
+ return ret;
+}
+
+/*
+ * Send the given seqno from all GuCs to all other GuCs in tile/GT order
+ */
+static void g2g_test_in_order(struct kunit *test, struct xe_device *xe, u32 seqno)
+{
+ struct xe_gt *near_gt, *far_gt;
+ int i, j;
+
+ for_each_gt(near_gt, xe, i) {
+ u32 near_tile = gt_to_tile(near_gt)->id;
+ u32 near_dev = G2G_DEV(near_gt);
+
+ for_each_gt(far_gt, xe, j) {
+ u32 far_tile = gt_to_tile(far_gt)->id;
+ u32 far_dev = G2G_DEV(far_gt);
+ struct g2g_test_payload payload;
+
+ if (far_gt->info.id == near_gt->info.id)
+ continue;
+
+ payload.tx_dev = near_dev;
+ payload.tx_tile = near_tile;
+ payload.rx_dev = far_dev;
+ payload.rx_tile = far_tile;
+ payload.seqno = seqno;
+ g2g_test_send(test, &near_gt->uc.guc, far_tile, far_dev, &payload);
+ }
+ }
+}
+
+#define WAIT_TIME_MS 100
+#define WAIT_COUNT (1000 / WAIT_TIME_MS)
+
+static void g2g_wait_for_complete(void *_xe)
+{
+ struct xe_device *xe = (struct xe_device *)_xe;
+ struct kunit *test = kunit_get_current_test();
+ int wait = 0;
+
+ /* Wait for all G2H messages to be received */
+ while (atomic_read(&xe->g2g_test_count)) {
+ if (++wait > WAIT_COUNT)
+ break;
+
+ msleep(WAIT_TIME_MS);
+ }
+
+ KUNIT_ASSERT_EQ_MSG(test, 0, atomic_read(&xe->g2g_test_count),
+ "Timed out waiting for notifications\n");
+ kunit_info(test, "Got all notifications back\n");
+}
+
+#undef WAIT_TIME_MS
+#undef WAIT_COUNT
+
+static void g2g_clean_array(void *_xe)
+{
+ struct xe_device *xe = (struct xe_device *)_xe;
+
+ xe->g2g_test_array = NULL;
+}
+
+#define NUM_LOOPS 16
+
+static void g2g_run_test(struct kunit *test, struct xe_device *xe)
+{
+ u32 seqno, max_array;
+ int ret, i, j;
+
+ max_array = xe->info.gt_count * xe->info.gt_count;
+ xe->g2g_test_array = kunit_kcalloc(test, max_array, sizeof(u32), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xe->g2g_test_array);
+
+ ret = kunit_add_action_or_reset(test, g2g_clean_array, xe);
+ KUNIT_ASSERT_EQ_MSG(test, 0, ret, "Failed to register clean up action\n");
+
+ /*
+ * Send incrementing seqnos from all GuCs to all other GuCs in tile/GT order.
+ * Tile/GT order doesn't really mean anything to the hardware but it is going
+ * to be a fixed sequence every time.
+ *
+ * Verify that each one comes back having taken the correct route.
+ */
+ ret = kunit_add_action(test, g2g_wait_for_complete, xe);
+ KUNIT_ASSERT_EQ_MSG(test, 0, ret, "Failed to register clean up action\n");
+ for (seqno = 1; seqno < NUM_LOOPS; seqno++)
+ g2g_test_in_order(test, xe, seqno);
+ seqno--;
+
+ kunit_release_action(test, &g2g_wait_for_complete, xe);
+
+ /* Check for the final seqno in each slot */
+ for (i = 0; i < xe->info.gt_count; i++) {
+ for (j = 0; j < xe->info.gt_count; j++) {
+ u32 idx = (j * xe->info.gt_count) + i;
+
+ if (i == j)
+ KUNIT_ASSERT_EQ_MSG(test, 0, xe->g2g_test_array[idx],
+ "identity seqno modified: %d for %dx%d!\n",
+ xe->g2g_test_array[idx], i, j);
+ else
+ KUNIT_ASSERT_EQ_MSG(test, seqno, xe->g2g_test_array[idx],
+ "invalid seqno: %d vs %d for %dx%d!\n",
+ xe->g2g_test_array[idx], seqno, i, j);
+ }
+ }
+
+ kunit_kfree(test, xe->g2g_test_array);
+ kunit_release_action(test, &g2g_clean_array, xe);
+
+ kunit_info(test, "Test passed\n");
+}
+
+#undef NUM_LOOPS
+
+static void g2g_ct_stop(struct xe_guc *guc)
+{
+ struct xe_gt *remote_gt, *gt = guc_to_gt(guc);
+ struct xe_device *xe = gt_to_xe(gt);
+ int i, t;
+
+ for_each_gt(remote_gt, xe, i) {
+ u32 tile, dev;
+
+ if (remote_gt->info.id == gt->info.id)
+ continue;
+
+ tile = gt_to_tile(remote_gt)->id;
+ dev = G2G_DEV(remote_gt);
+
+ for (t = 0; t < XE_G2G_TYPE_LIMIT; t++)
+ guc_g2g_deregister(guc, tile, dev, t);
+ }
+}
+
+/* Size of a single allocation that contains all G2G CTBs across all GTs */
+static u32 g2g_ctb_size(struct kunit *test, struct xe_device *xe)
+{
+ unsigned int count = xe->info.gt_count;
+ u32 num_channels = (count * (count - 1)) / 2;
+
+ kunit_info(test, "Size: (%d * %d / 2) * %d * 0x%08X + 0x%08X => 0x%08X [%d]\n",
+ count, count - 1, XE_G2G_TYPE_LIMIT, G2G_BUFFER_SIZE, G2G_DESC_AREA_SIZE,
+ num_channels * XE_G2G_TYPE_LIMIT * G2G_BUFFER_SIZE + G2G_DESC_AREA_SIZE,
+ num_channels * XE_G2G_TYPE_LIMIT);
+
+ return num_channels * XE_G2G_TYPE_LIMIT * G2G_BUFFER_SIZE + G2G_DESC_AREA_SIZE;
+}
+
+/*
+ * Use the driver's regular CTB allocation scheme.
+ */
+static void g2g_alloc_default(struct kunit *test, struct xe_device *xe)
+{
+ struct xe_gt *gt;
+ int i;
+
+ kunit_info(test, "Default [tiles = %d, GTs = %d]\n",
+ xe->info.tile_count, xe->info.gt_count);
+
+ for_each_gt(gt, xe, i) {
+ struct xe_guc *guc = &gt->uc.guc;
+ int ret;
+
+ ret = guc_g2g_alloc(guc);
+ KUNIT_ASSERT_EQ_MSG(test, 0, ret, "G2G alloc failed: %pe", ERR_PTR(ret));
+ continue;
+ }
+}
+
+static void g2g_distribute(struct kunit *test, struct xe_device *xe, struct xe_bo *bo)
+{
+ struct xe_gt *root_gt, *gt;
+ int i;
+
+ root_gt = xe_device_get_gt(xe, 0);
+ root_gt->uc.guc.g2g.bo = bo;
+ root_gt->uc.guc.g2g.owned = true;
+ kunit_info(test, "[%d.%d] Assigned 0x%p\n", gt_to_tile(root_gt)->id, root_gt->info.id, bo);
+
+ for_each_gt(gt, xe, i) {
+ if (gt->info.id != 0) {
+ gt->uc.guc.g2g.owned = false;
+ gt->uc.guc.g2g.bo = xe_bo_get(bo);
+ kunit_info(test, "[%d.%d] Pinned 0x%p\n",
+ gt_to_tile(gt)->id, gt->info.id, gt->uc.guc.g2g.bo);
+ }
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gt->uc.guc.g2g.bo);
+ }
+}
+
+/*
+ * Allocate a single blob on the host and split between all G2G CTBs.
+ */
+static void g2g_alloc_host(struct kunit *test, struct xe_device *xe)
+{
+ struct xe_bo *bo;
+ u32 g2g_size;
+
+ kunit_info(test, "Host [tiles = %d, GTs = %d]\n", xe->info.tile_count, xe->info.gt_count);
+
+ g2g_size = g2g_ctb_size(test, xe);
+ bo = xe_managed_bo_create_pin_map(xe, xe_device_get_root_tile(xe), g2g_size,
+ XE_BO_FLAG_SYSTEM |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_GGTT_ALL |
+ XE_BO_FLAG_GGTT_INVALIDATE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bo);
+ kunit_info(test, "[HST] G2G buffer create: 0x%p\n", bo);
+
+ xe_map_memset(xe, &bo->vmap, 0, 0, g2g_size);
+
+ g2g_distribute(test, xe, bo);
+}
+
+/*
+ * Allocate a single blob on the given tile and split between all G2G CTBs.
+ */
+static void g2g_alloc_tile(struct kunit *test, struct xe_device *xe, struct xe_tile *tile)
+{
+ struct xe_bo *bo;
+ u32 g2g_size;
+
+ KUNIT_ASSERT_TRUE(test, IS_DGFX(xe));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, tile);
+
+ kunit_info(test, "Tile %d [tiles = %d, GTs = %d]\n",
+ tile->id, xe->info.tile_count, xe->info.gt_count);
+
+ g2g_size = g2g_ctb_size(test, xe);
+ bo = xe_managed_bo_create_pin_map(xe, tile, g2g_size,
+ XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_GGTT_ALL |
+ XE_BO_FLAG_GGTT_INVALIDATE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bo);
+ kunit_info(test, "[%d.*] G2G buffer create: 0x%p\n", tile->id, bo);
+
+ xe_map_memset(xe, &bo->vmap, 0, 0, g2g_size);
+
+ g2g_distribute(test, xe, bo);
+}
+
+static void g2g_free(struct kunit *test, struct xe_device *xe)
+{
+ struct xe_gt *gt;
+ struct xe_bo *bo;
+ int i;
+
+ for_each_gt(gt, xe, i) {
+ bo = gt->uc.guc.g2g.bo;
+ if (!bo)
+ continue;
+
+ if (gt->uc.guc.g2g.owned) {
+ xe_managed_bo_unpin_map_no_vm(bo);
+ kunit_info(test, "[%d.%d] Unmapped 0x%p\n",
+ gt_to_tile(gt)->id, gt->info.id, bo);
+ } else {
+ xe_bo_put(bo);
+ kunit_info(test, "[%d.%d] Unpinned 0x%p\n",
+ gt_to_tile(gt)->id, gt->info.id, bo);
+ }
+
+ gt->uc.guc.g2g.bo = NULL;
+ }
+}
+
+static void g2g_stop(struct kunit *test, struct xe_device *xe)
+{
+ struct xe_gt *gt;
+ int i;
+
+ for_each_gt(gt, xe, i) {
+ struct xe_guc *guc = &gt->uc.guc;
+
+ if (!guc->g2g.bo)
+ continue;
+
+ g2g_ct_stop(guc);
+ }
+
+ g2g_free(test, xe);
+}
+
+/*
+ * Generate a unique id for each bi-directional CTB for each pair of
+ * near and far tiles/devices. The id can then be used as an index into
+ * a single allocation that is sub-divided into multiple CTBs.
+ *
+ * For example, with two devices per tile and two tiles, the table should
+ * look like:
+ * Far <tile>.<dev>
+ * 0.0 0.1 1.0 1.1
+ * N 0.0 --/-- 00/01 02/03 04/05
+ * e 0.1 01/00 --/-- 06/07 08/09
+ * a 1.0 03/02 07/06 --/-- 10/11
+ * r 1.1 05/04 09/08 11/10 --/--
+ *
+ * Where each entry is Rx/Tx channel id.
+ *
+ * So GuC #3 (tile 1, dev 1) talking to GuC #2 (tile 1, dev 0) would
+ * be reading from channel #11 and writing to channel #10. Whereas,
+ * GuC #2 talking to GuC #3 would be read on #10 and write to #11.
+ */
+static int g2g_slot_flat(u32 near_tile, u32 near_dev, u32 far_tile, u32 far_dev,
+ u32 type, u32 max_inst, bool have_dev)
+{
+ u32 near = near_tile, far = far_tile;
+ u32 idx = 0, x, y, direction;
+ int i;
+
+ if (have_dev) {
+ near = (near << 1) | near_dev;
+ far = (far << 1) | far_dev;
+ }
+
+ /* No need to send to one's self */
+ if (far == near)
+ return -1;
+
+ if (far > near) {
+ /* Top right table half */
+ x = far;
+ y = near;
+
+ /* T/R is 'forwards' direction */
+ direction = type;
+ } else {
+ /* Bottom left table half */
+ x = near;
+ y = far;
+
+ /* B/L is 'backwards' direction */
+ direction = (1 - type);
+ }
+
+ /* Count the rows prior to the target */
+ for (i = y; i > 0; i--)
+ idx += max_inst - i;
+
+ /* Count this row up to the target */
+ idx += (x - 1 - y);
+
+ /* Slots are in Rx/Tx pairs */
+ idx *= 2;
+
+ /* Pick Rx/Tx direction */
+ idx += direction;
+
+ return idx;
+}
+
+static int g2g_register_flat(struct xe_guc *guc, u32 far_tile, u32 far_dev, u32 type, bool have_dev)
+{
+ struct xe_gt *gt = guc_to_gt(guc);
+ struct xe_device *xe = gt_to_xe(gt);
+ u32 near_tile = gt_to_tile(gt)->id;
+ u32 near_dev = G2G_DEV(gt);
+ u32 max = xe->info.gt_count;
+ int idx;
+ u32 base, desc, buf;
+
+ if (!guc->g2g.bo)
+ return -ENODEV;
+
+ idx = g2g_slot_flat(near_tile, near_dev, far_tile, far_dev, type, max, have_dev);
+ xe_assert(xe, idx >= 0);
+
+ base = guc_bo_ggtt_addr(guc, guc->g2g.bo);
+ desc = base + idx * G2G_DESC_SIZE;
+ buf = base + idx * G2G_BUFFER_SIZE + G2G_DESC_AREA_SIZE;
+
+ xe_assert(xe, (desc - base + G2G_DESC_SIZE) <= G2G_DESC_AREA_SIZE);
+ xe_assert(xe, (buf - base + G2G_BUFFER_SIZE) <= xe_bo_size(guc->g2g.bo));
+
+ return guc_action_register_g2g_buffer(guc, type, far_tile, far_dev,
+ desc, buf, G2G_BUFFER_SIZE);
+}
+
+static void g2g_start(struct kunit *test, struct xe_guc *guc)
+{
+ struct xe_gt *remote_gt, *gt = guc_to_gt(guc);
+ struct xe_device *xe = gt_to_xe(gt);
+ unsigned int i;
+ int t, ret;
+ bool have_dev;
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, guc->g2g.bo);
+
+ /* GuC interface will need extending if more GT device types are ever created. */
+ KUNIT_ASSERT_TRUE(test,
+ (gt->info.type == XE_GT_TYPE_MAIN) ||
+ (gt->info.type == XE_GT_TYPE_MEDIA));
+
+ /* Channel numbering depends on whether there are multiple GTs per tile */
+ have_dev = xe->info.gt_count > xe->info.tile_count;
+
+ for_each_gt(remote_gt, xe, i) {
+ u32 tile, dev;
+
+ if (remote_gt->info.id == gt->info.id)
+ continue;
+
+ tile = gt_to_tile(remote_gt)->id;
+ dev = G2G_DEV(remote_gt);
+
+ for (t = 0; t < XE_G2G_TYPE_LIMIT; t++) {
+ ret = g2g_register_flat(guc, tile, dev, t, have_dev);
+ KUNIT_ASSERT_EQ_MSG(test, 0, ret, "G2G register failed: %pe", ERR_PTR(ret));
+ }
+ }
+}
+
+static void g2g_reinit(struct kunit *test, struct xe_device *xe, int ctb_type, struct xe_tile *tile)
+{
+ struct xe_gt *gt;
+ int i, found = 0;
+
+ g2g_stop(test, xe);
+
+ for_each_gt(gt, xe, i) {
+ struct xe_guc *guc = &gt->uc.guc;
+
+ KUNIT_ASSERT_NULL(test, guc->g2g.bo);
+ }
+
+ switch (ctb_type) {
+ case G2G_CTB_TYPE_DEFAULT:
+ g2g_alloc_default(test, xe);
+ break;
+
+ case G2G_CTB_TYPE_HOST:
+ g2g_alloc_host(test, xe);
+ break;
+
+ case G2G_CTB_TYPE_TILE:
+ g2g_alloc_tile(test, xe, tile);
+ break;
+
+ default:
+ KUNIT_ASSERT_TRUE(test, false);
+ }
+
+ for_each_gt(gt, xe, i) {
+ struct xe_guc *guc = &gt->uc.guc;
+
+ if (!guc->g2g.bo)
+ continue;
+
+ if (ctb_type == G2G_CTB_TYPE_DEFAULT)
+ guc_g2g_start(guc);
+ else
+ g2g_start(test, guc);
+ found++;
+ }
+
+ KUNIT_ASSERT_GT_MSG(test, found, 1, "insufficient G2G channels running: %d", found);
+
+ kunit_info(test, "Testing across %d GTs\n", found);
+}
+
+static void g2g_recreate_ctb(void *_xe)
+{
+ struct xe_device *xe = (struct xe_device *)_xe;
+ struct kunit *test = kunit_get_current_test();
+
+ g2g_stop(test, xe);
+
+ if (xe_guc_g2g_wanted(xe))
+ g2g_reinit(test, xe, G2G_CTB_TYPE_DEFAULT, NULL);
+}
+
+static void g2g_pm_runtime_put(void *_xe)
+{
+ struct xe_device *xe = (struct xe_device *)_xe;
+
+ xe_pm_runtime_put(xe);
+}
+
+static void g2g_pm_runtime_get(struct kunit *test)
+{
+ struct xe_device *xe = test->priv;
+ int ret;
+
+ xe_pm_runtime_get(xe);
+ ret = kunit_add_action_or_reset(test, g2g_pm_runtime_put, xe);
+ KUNIT_ASSERT_EQ_MSG(test, 0, ret, "Failed to register runtime PM action\n");
+}
+
+static void g2g_check_skip(struct kunit *test)
+{
+ struct xe_device *xe = test->priv;
+ struct xe_gt *gt;
+ int i;
+
+ if (IS_SRIOV_VF(xe))
+ kunit_skip(test, "not supported from a VF");
+
+ if (xe->info.gt_count <= 1)
+ kunit_skip(test, "not enough GTs");
+
+ for_each_gt(gt, xe, i) {
+ struct xe_guc *guc = &gt->uc.guc;
+
+ if (guc->fw.build_type == CSS_UKERNEL_INFO_BUILDTYPE_PROD)
+ kunit_skip(test,
+ "G2G test interface not available in production firmware builds\n");
+ }
+}
+
+/*
+ * Simple test that does not try to recreate the CTBs.
+ * Requires that the platform already enables G2G comms
+ * but has no risk of leaving the system in a broken state
+ * afterwards.
+ */
+static void xe_live_guc_g2g_kunit_default(struct kunit *test)
+{
+ struct xe_device *xe = test->priv;
+
+ if (!xe_guc_g2g_wanted(xe))
+ kunit_skip(test, "G2G not enabled");
+
+ g2g_check_skip(test);
+
+ g2g_pm_runtime_get(test);
+
+ kunit_info(test, "Testing default CTBs\n");
+ g2g_run_test(test, xe);
+
+ kunit_release_action(test, &g2g_pm_runtime_put, xe);
+}
+
+/*
+ * More complex test that re-creates the CTBs in various location to
+ * test access to each location from each GuC. Can be run even on
+ * systems that do not enable G2G by default. On the other hand,
+ * because it recreates the CTBs, if something goes wrong it could
+ * leave the system with broken G2G comms.
+ */
+static void xe_live_guc_g2g_kunit_allmem(struct kunit *test)
+{
+ struct xe_device *xe = test->priv;
+ int ret;
+
+ g2g_check_skip(test);
+
+ g2g_pm_runtime_get(test);
+
+ /* Make sure to leave the system as we found it */
+ ret = kunit_add_action_or_reset(test, g2g_recreate_ctb, xe);
+ KUNIT_ASSERT_EQ_MSG(test, 0, ret, "Failed to register CTB re-creation action\n");
+
+ kunit_info(test, "Testing CTB type 'default'...\n");
+ g2g_reinit(test, xe, G2G_CTB_TYPE_DEFAULT, NULL);
+ g2g_run_test(test, xe);
+
+ kunit_info(test, "Testing CTB type 'host'...\n");
+ g2g_reinit(test, xe, G2G_CTB_TYPE_HOST, NULL);
+ g2g_run_test(test, xe);
+
+ if (IS_DGFX(xe)) {
+ struct xe_tile *tile;
+ int id;
+
+ for_each_tile(tile, xe, id) {
+ kunit_info(test, "Testing CTB type 'tile: #%d'...\n", id);
+
+ g2g_reinit(test, xe, G2G_CTB_TYPE_TILE, tile);
+ g2g_run_test(test, xe);
+ }
+ } else {
+ kunit_info(test, "Skipping local memory on integrated platform\n");
+ }
+
+ kunit_release_action(test, g2g_recreate_ctb, xe);
+ kunit_release_action(test, g2g_pm_runtime_put, xe);
+}
+
+static struct kunit_case xe_guc_g2g_tests[] = {
+ KUNIT_CASE_PARAM(xe_live_guc_g2g_kunit_default, xe_pci_live_device_gen_param),
+ KUNIT_CASE_PARAM(xe_live_guc_g2g_kunit_allmem, xe_pci_live_device_gen_param),
+ {}
+};
+
+VISIBLE_IF_KUNIT
+struct kunit_suite xe_guc_g2g_test_suite = {
+ .name = "xe_guc_g2g",
+ .test_cases = xe_guc_g2g_tests,
+ .init = xe_kunit_helper_xe_device_live_test_init,
+};
+EXPORT_SYMBOL_IF_KUNIT(xe_guc_g2g_test_suite);
diff --git a/drivers/gpu/drm/xe/tests/xe_live_test_mod.c b/drivers/gpu/drm/xe/tests/xe_live_test_mod.c
index 81277c77016d..c55e46f1ae92 100644
--- a/drivers/gpu/drm/xe/tests/xe_live_test_mod.c
+++ b/drivers/gpu/drm/xe/tests/xe_live_test_mod.c
@@ -10,12 +10,14 @@ extern struct kunit_suite xe_bo_shrink_test_suite;
extern struct kunit_suite xe_dma_buf_test_suite;
extern struct kunit_suite xe_migrate_test_suite;
extern struct kunit_suite xe_mocs_test_suite;
+extern struct kunit_suite xe_guc_g2g_test_suite;
kunit_test_suite(xe_bo_test_suite);
kunit_test_suite(xe_bo_shrink_test_suite);
kunit_test_suite(xe_dma_buf_test_suite);
kunit_test_suite(xe_migrate_test_suite);
kunit_test_suite(xe_mocs_test_suite);
+kunit_test_suite(xe_guc_g2g_test_suite);
MODULE_AUTHOR("Intel Corporation");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/xe/tests/xe_migrate.c b/drivers/gpu/drm/xe/tests/xe_migrate.c
index edd1e701aa1c..5904d658d1f2 100644
--- a/drivers/gpu/drm/xe/tests/xe_migrate.c
+++ b/drivers/gpu/drm/xe/tests/xe_migrate.c
@@ -70,7 +70,7 @@ static int run_sanity_job(struct xe_migrate *m, struct xe_device *xe,
} } while (0)
static void test_copy(struct xe_migrate *m, struct xe_bo *bo,
- struct kunit *test, u32 region)
+ struct kunit *test, u32 region, struct drm_exec *exec)
{
struct xe_device *xe = tile_to_xe(m->tile);
u64 retval, expected = 0;
@@ -84,14 +84,15 @@ static void test_copy(struct xe_migrate *m, struct xe_bo *bo,
ttm_bo_type_kernel,
region |
XE_BO_FLAG_NEEDS_CPU_ACCESS |
- XE_BO_FLAG_PINNED);
+ XE_BO_FLAG_PINNED,
+ exec);
if (IS_ERR(remote)) {
KUNIT_FAIL(test, "Failed to allocate remote bo for %s: %pe\n",
str, remote);
return;
}
- err = xe_bo_validate(remote, NULL, false);
+ err = xe_bo_validate(remote, NULL, false, exec);
if (err) {
KUNIT_FAIL(test, "Failed to validate system bo for %s: %i\n",
str, err);
@@ -161,13 +162,13 @@ out_unlock:
}
static void test_copy_sysmem(struct xe_migrate *m, struct xe_bo *bo,
- struct kunit *test)
+ struct drm_exec *exec, struct kunit *test)
{
- test_copy(m, bo, test, XE_BO_FLAG_SYSTEM);
+ test_copy(m, bo, test, XE_BO_FLAG_SYSTEM, exec);
}
static void test_copy_vram(struct xe_migrate *m, struct xe_bo *bo,
- struct kunit *test)
+ struct drm_exec *exec, struct kunit *test)
{
u32 region;
@@ -178,10 +179,11 @@ static void test_copy_vram(struct xe_migrate *m, struct xe_bo *bo,
region = XE_BO_FLAG_VRAM1;
else
region = XE_BO_FLAG_VRAM0;
- test_copy(m, bo, test, region);
+ test_copy(m, bo, test, region, exec);
}
-static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
+static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test,
+ struct drm_exec *exec)
{
struct xe_tile *tile = m->tile;
struct xe_device *xe = tile_to_xe(tile);
@@ -202,7 +204,8 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
big = xe_bo_create_pin_map(xe, tile, m->q->vm, SZ_4M,
ttm_bo_type_kernel,
- XE_BO_FLAG_VRAM_IF_DGFX(tile));
+ XE_BO_FLAG_VRAM_IF_DGFX(tile),
+ exec);
if (IS_ERR(big)) {
KUNIT_FAIL(test, "Failed to allocate bo: %li\n", PTR_ERR(big));
goto vunmap;
@@ -210,7 +213,8 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
pt = xe_bo_create_pin_map(xe, tile, m->q->vm, XE_PAGE_SIZE,
ttm_bo_type_kernel,
- XE_BO_FLAG_VRAM_IF_DGFX(tile));
+ XE_BO_FLAG_VRAM_IF_DGFX(tile),
+ exec);
if (IS_ERR(pt)) {
KUNIT_FAIL(test, "Failed to allocate fake pt: %li\n",
PTR_ERR(pt));
@@ -220,7 +224,8 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
tiny = xe_bo_create_pin_map(xe, tile, m->q->vm,
2 * SZ_4K,
ttm_bo_type_kernel,
- XE_BO_FLAG_VRAM_IF_DGFX(tile));
+ XE_BO_FLAG_VRAM_IF_DGFX(tile),
+ exec);
if (IS_ERR(tiny)) {
KUNIT_FAIL(test, "Failed to allocate tiny fake pt: %li\n",
PTR_ERR(tiny));
@@ -290,10 +295,10 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
check(retval, expected, "Command clear small last value", test);
kunit_info(test, "Copying small buffer object to system\n");
- test_copy_sysmem(m, tiny, test);
+ test_copy_sysmem(m, tiny, exec, test);
if (xe->info.tile_count > 1) {
kunit_info(test, "Copying small buffer object to other vram\n");
- test_copy_vram(m, tiny, test);
+ test_copy_vram(m, tiny, exec, test);
}
/* Clear a big bo */
@@ -312,10 +317,10 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
check(retval, expected, "Command clear big last value", test);
kunit_info(test, "Copying big buffer object to system\n");
- test_copy_sysmem(m, big, test);
+ test_copy_sysmem(m, big, exec, test);
if (xe->info.tile_count > 1) {
kunit_info(test, "Copying big buffer object to other vram\n");
- test_copy_vram(m, big, test);
+ test_copy_vram(m, big, exec, test);
}
out:
@@ -343,10 +348,11 @@ static int migrate_test_run_device(struct xe_device *xe)
for_each_tile(tile, xe, id) {
struct xe_migrate *m = tile->migrate;
+ struct drm_exec *exec = XE_VALIDATION_OPT_OUT;
kunit_info(test, "Testing tile id %d.\n", id);
xe_vm_lock(m->q->vm, false);
- xe_migrate_sanity_test(m, test);
+ xe_migrate_sanity_test(m, test, exec);
xe_vm_unlock(m->q->vm);
}
@@ -490,7 +496,7 @@ err_sync:
static void test_migrate(struct xe_device *xe, struct xe_tile *tile,
struct xe_bo *sys_bo, struct xe_bo *vram_bo, struct xe_bo *ccs_bo,
- struct kunit *test)
+ struct drm_exec *exec, struct kunit *test)
{
struct dma_fence *fence;
u64 expected, retval;
@@ -509,7 +515,7 @@ static void test_migrate(struct xe_device *xe, struct xe_tile *tile,
dma_fence_put(fence);
kunit_info(test, "Evict vram buffer object\n");
- ret = xe_bo_evict(vram_bo);
+ ret = xe_bo_evict(vram_bo, exec);
if (ret) {
KUNIT_FAIL(test, "Failed to evict bo.\n");
return;
@@ -538,7 +544,7 @@ static void test_migrate(struct xe_device *xe, struct xe_tile *tile,
dma_fence_put(fence);
kunit_info(test, "Restore vram buffer object\n");
- ret = xe_bo_validate(vram_bo, NULL, false);
+ ret = xe_bo_validate(vram_bo, NULL, false, exec);
if (ret) {
KUNIT_FAIL(test, "Failed to validate vram bo for: %li\n", ret);
return;
@@ -636,13 +642,14 @@ static void validate_ccs_test_run_tile(struct xe_device *xe, struct xe_tile *til
{
struct xe_bo *sys_bo, *vram_bo = NULL, *ccs_bo = NULL;
unsigned int bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile);
+ struct drm_exec *exec;
long ret;
- sys_bo = xe_bo_create_user(xe, NULL, NULL, SZ_4M,
+ sys_bo = xe_bo_create_user(xe, NULL, SZ_4M,
DRM_XE_GEM_CPU_CACHING_WC,
XE_BO_FLAG_SYSTEM |
XE_BO_FLAG_NEEDS_CPU_ACCESS |
- XE_BO_FLAG_PINNED);
+ XE_BO_FLAG_PINNED, NULL);
if (IS_ERR(sys_bo)) {
KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
@@ -650,8 +657,9 @@ static void validate_ccs_test_run_tile(struct xe_device *xe, struct xe_tile *til
return;
}
+ exec = XE_VALIDATION_OPT_OUT;
xe_bo_lock(sys_bo, false);
- ret = xe_bo_validate(sys_bo, NULL, false);
+ ret = xe_bo_validate(sys_bo, NULL, false, exec);
if (ret) {
KUNIT_FAIL(test, "Failed to validate system bo for: %li\n", ret);
goto free_sysbo;
@@ -664,10 +672,10 @@ static void validate_ccs_test_run_tile(struct xe_device *xe, struct xe_tile *til
}
xe_bo_unlock(sys_bo);
- ccs_bo = xe_bo_create_user(xe, NULL, NULL, SZ_4M,
+ ccs_bo = xe_bo_create_user(xe, NULL, SZ_4M,
DRM_XE_GEM_CPU_CACHING_WC,
bo_flags | XE_BO_FLAG_NEEDS_CPU_ACCESS |
- XE_BO_FLAG_PINNED);
+ XE_BO_FLAG_PINNED, NULL);
if (IS_ERR(ccs_bo)) {
KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
@@ -676,7 +684,7 @@ static void validate_ccs_test_run_tile(struct xe_device *xe, struct xe_tile *til
}
xe_bo_lock(ccs_bo, false);
- ret = xe_bo_validate(ccs_bo, NULL, false);
+ ret = xe_bo_validate(ccs_bo, NULL, false, exec);
if (ret) {
KUNIT_FAIL(test, "Failed to validate system bo for: %li\n", ret);
goto free_ccsbo;
@@ -689,10 +697,10 @@ static void validate_ccs_test_run_tile(struct xe_device *xe, struct xe_tile *til
}
xe_bo_unlock(ccs_bo);
- vram_bo = xe_bo_create_user(xe, NULL, NULL, SZ_4M,
+ vram_bo = xe_bo_create_user(xe, NULL, SZ_4M,
DRM_XE_GEM_CPU_CACHING_WC,
bo_flags | XE_BO_FLAG_NEEDS_CPU_ACCESS |
- XE_BO_FLAG_PINNED);
+ XE_BO_FLAG_PINNED, NULL);
if (IS_ERR(vram_bo)) {
KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
PTR_ERR(vram_bo));
@@ -700,7 +708,7 @@ static void validate_ccs_test_run_tile(struct xe_device *xe, struct xe_tile *til
}
xe_bo_lock(vram_bo, false);
- ret = xe_bo_validate(vram_bo, NULL, false);
+ ret = xe_bo_validate(vram_bo, NULL, false, exec);
if (ret) {
KUNIT_FAIL(test, "Failed to validate vram bo for: %li\n", ret);
goto free_vrambo;
@@ -713,7 +721,7 @@ static void validate_ccs_test_run_tile(struct xe_device *xe, struct xe_tile *til
}
test_clear(xe, tile, sys_bo, vram_bo, test);
- test_migrate(xe, tile, sys_bo, vram_bo, ccs_bo, test);
+ test_migrate(xe, tile, sys_bo, vram_bo, ccs_bo, exec, test);
xe_bo_unlock(vram_bo);
xe_bo_lock(vram_bo, false);
diff --git a/drivers/gpu/drm/xe/tests/xe_pci.c b/drivers/gpu/drm/xe/tests/xe_pci.c
index 9c715e59f030..69e2840c7ef0 100644
--- a/drivers/gpu/drm/xe/tests/xe_pci.c
+++ b/drivers/gpu/drm/xe/tests/xe_pci.c
@@ -12,12 +12,219 @@
#include <kunit/test-bug.h>
#include <kunit/visibility.h>
+#define PLATFORM_CASE(platform__, graphics_step__) \
+ { \
+ .platform = XE_ ## platform__, \
+ .subplatform = XE_SUBPLATFORM_NONE, \
+ .step = { .graphics = STEP_ ## graphics_step__ } \
+ }
+
+#define SUBPLATFORM_CASE(platform__, subplatform__, graphics_step__) \
+ { \
+ .platform = XE_ ## platform__, \
+ .subplatform = XE_SUBPLATFORM_ ## platform__ ## _ ## subplatform__, \
+ .step = { .graphics = STEP_ ## graphics_step__ } \
+ }
+
+#define GMDID_CASE(platform__, graphics_verx100__, graphics_step__, \
+ media_verx100__, media_step__) \
+ { \
+ .platform = XE_ ## platform__, \
+ .subplatform = XE_SUBPLATFORM_NONE, \
+ .graphics_verx100 = graphics_verx100__, \
+ .media_verx100 = media_verx100__, \
+ .step = { .graphics = STEP_ ## graphics_step__, \
+ .media = STEP_ ## media_step__ } \
+ }
+
+static const struct xe_pci_fake_data cases[] = {
+ PLATFORM_CASE(TIGERLAKE, B0),
+ PLATFORM_CASE(DG1, A0),
+ PLATFORM_CASE(DG1, B0),
+ PLATFORM_CASE(ALDERLAKE_S, A0),
+ PLATFORM_CASE(ALDERLAKE_S, B0),
+ PLATFORM_CASE(ALDERLAKE_S, C0),
+ PLATFORM_CASE(ALDERLAKE_S, D0),
+ PLATFORM_CASE(ALDERLAKE_P, A0),
+ PLATFORM_CASE(ALDERLAKE_P, B0),
+ PLATFORM_CASE(ALDERLAKE_P, C0),
+ SUBPLATFORM_CASE(ALDERLAKE_S, RPLS, D0),
+ SUBPLATFORM_CASE(ALDERLAKE_P, RPLU, E0),
+ SUBPLATFORM_CASE(DG2, G10, C0),
+ SUBPLATFORM_CASE(DG2, G11, B1),
+ SUBPLATFORM_CASE(DG2, G12, A1),
+ GMDID_CASE(METEORLAKE, 1270, A0, 1300, A0),
+ GMDID_CASE(METEORLAKE, 1271, A0, 1300, A0),
+ GMDID_CASE(METEORLAKE, 1274, A0, 1300, A0),
+ GMDID_CASE(LUNARLAKE, 2004, A0, 2000, A0),
+ GMDID_CASE(LUNARLAKE, 2004, B0, 2000, A0),
+ GMDID_CASE(BATTLEMAGE, 2001, A0, 1301, A1),
+ GMDID_CASE(PANTHERLAKE, 3000, A0, 3000, A0),
+};
+
+KUNIT_ARRAY_PARAM(platform, cases, xe_pci_fake_data_desc);
+
+/**
+ * xe_pci_fake_data_gen_params - Generate struct xe_pci_fake_data parameters
+ * @prev: the pointer to the previous parameter to iterate from or NULL
+ * @desc: output buffer with minimum size of KUNIT_PARAM_DESC_SIZE
+ *
+ * This function prepares struct xe_pci_fake_data parameter.
+ *
+ * To be used only as a parameter generator function in &KUNIT_CASE_PARAM.
+ *
+ * Return: pointer to the next parameter or NULL if no more parameters
+ */
+const void *xe_pci_fake_data_gen_params(struct kunit *test, const void *prev, char *desc)
+{
+ return platform_gen_params(test, prev, desc);
+}
+EXPORT_SYMBOL_IF_KUNIT(xe_pci_fake_data_gen_params);
+
+static const struct xe_device_desc *lookup_desc(enum xe_platform p)
+{
+ const struct xe_device_desc *desc;
+ const struct pci_device_id *ids;
+
+ for (ids = pciidlist; ids->driver_data; ids++) {
+ desc = (const void *)ids->driver_data;
+ if (desc->platform == p)
+ return desc;
+ }
+ return NULL;
+}
+
+static const struct xe_subplatform_desc *lookup_sub_desc(enum xe_platform p, enum xe_subplatform s)
+{
+ const struct xe_device_desc *desc = lookup_desc(p);
+ const struct xe_subplatform_desc *spd;
+
+ if (desc && desc->subplatforms)
+ for (spd = desc->subplatforms; spd->subplatform; spd++)
+ if (spd->subplatform == s)
+ return spd;
+ return NULL;
+}
+
+static const char *lookup_platform_name(enum xe_platform p)
+{
+ const struct xe_device_desc *desc = lookup_desc(p);
+
+ return desc ? desc->platform_name : "INVALID";
+}
+
+static const char *__lookup_subplatform_name(enum xe_platform p, enum xe_subplatform s)
+{
+ const struct xe_subplatform_desc *desc = lookup_sub_desc(p, s);
+
+ return desc ? desc->name : "INVALID";
+}
+
+static const char *lookup_subplatform_name(enum xe_platform p, enum xe_subplatform s)
+{
+ return s == XE_SUBPLATFORM_NONE ? "" : __lookup_subplatform_name(p, s);
+}
+
+static const char *subplatform_prefix(enum xe_subplatform s)
+{
+ return s == XE_SUBPLATFORM_NONE ? "" : " ";
+}
+
+static const char *step_prefix(enum xe_step step)
+{
+ return step == STEP_NONE ? "" : " ";
+}
+
+static const char *step_name(enum xe_step step)
+{
+ return step == STEP_NONE ? "" : xe_step_name(step);
+}
+
+static const char *sriov_prefix(enum xe_sriov_mode mode)
+{
+ return mode <= XE_SRIOV_MODE_NONE ? "" : " ";
+}
+
+static const char *sriov_name(enum xe_sriov_mode mode)
+{
+ return mode <= XE_SRIOV_MODE_NONE ? "" : xe_sriov_mode_to_string(mode);
+}
+
+static const char *lookup_graphics_name(unsigned int verx100)
+{
+ const struct xe_ip *ip = find_graphics_ip(verx100);
+
+ return ip ? ip->name : "";
+}
+
+static const char *lookup_media_name(unsigned int verx100)
+{
+ const struct xe_ip *ip = find_media_ip(verx100);
+
+ return ip ? ip->name : "";
+}
+
+/**
+ * xe_pci_fake_data_desc - Describe struct xe_pci_fake_data parameter
+ * @param: the &struct xe_pci_fake_data parameter to describe
+ * @desc: output buffer with minimum size of KUNIT_PARAM_DESC_SIZE
+ *
+ * This function prepares description of the struct xe_pci_fake_data parameter.
+ *
+ * It is tailored for use in parameterized KUnit tests where parameter generator
+ * is based on the struct xe_pci_fake_data arrays.
+ */
+void xe_pci_fake_data_desc(const struct xe_pci_fake_data *param, char *desc)
+{
+ if (param->graphics_verx100 || param->media_verx100)
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s%s%s %u.%02u(%s)%s%s %u.%02u(%s)%s%s%s%s",
+ lookup_platform_name(param->platform),
+ subplatform_prefix(param->subplatform),
+ lookup_subplatform_name(param->platform, param->subplatform),
+ param->graphics_verx100 / 100, param->graphics_verx100 % 100,
+ lookup_graphics_name(param->graphics_verx100),
+ step_prefix(param->step.graphics), step_name(param->step.graphics),
+ param->media_verx100 / 100, param->media_verx100 % 100,
+ lookup_media_name(param->media_verx100),
+ step_prefix(param->step.media), step_name(param->step.media),
+ sriov_prefix(param->sriov_mode), sriov_name(param->sriov_mode));
+ else
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s%s%s%s%s%s%s",
+ lookup_platform_name(param->platform),
+ subplatform_prefix(param->subplatform),
+ lookup_subplatform_name(param->platform, param->subplatform),
+ step_prefix(param->step.graphics), step_name(param->step.graphics),
+ sriov_prefix(param->sriov_mode), sriov_name(param->sriov_mode));
+}
+EXPORT_SYMBOL_IF_KUNIT(xe_pci_fake_data_desc);
+
static void xe_ip_kunit_desc(const struct xe_ip *param, char *desc)
{
snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%u.%02u %s",
param->verx100 / 100, param->verx100 % 100, param->name);
}
+/*
+ * Pre-GMDID Graphics and Media IPs definitions.
+ *
+ * Mimic the way GMDID IPs are declared so the same
+ * param generator can be used for both
+ */
+static const struct xe_ip pre_gmdid_graphics_ips[] = {
+ { 1200, "Xe_LP", &graphics_xelp },
+ { 1210, "Xe_LP+", &graphics_xelp },
+ { 1255, "Xe_HPG", &graphics_xehpg },
+ { 1260, "Xe_HPC", &graphics_xehpc },
+};
+
+static const struct xe_ip pre_gmdid_media_ips[] = {
+ { 1200, "Xe_M", &media_xem },
+ { 1255, "Xe_HPM", &media_xem },
+};
+
+KUNIT_ARRAY_PARAM(pre_gmdid_graphics_ip, pre_gmdid_graphics_ips, xe_ip_kunit_desc);
+KUNIT_ARRAY_PARAM(pre_gmdid_media_ip, pre_gmdid_media_ips, xe_ip_kunit_desc);
+
KUNIT_ARRAY_PARAM(graphics_ip, graphics_ips, xe_ip_kunit_desc);
KUNIT_ARRAY_PARAM(media_ip, media_ips, xe_ip_kunit_desc);
@@ -44,9 +251,16 @@ KUNIT_ARRAY_PARAM(pci_id, pciidlist, xe_pci_id_kunit_desc);
*
* Return: pointer to the next parameter or NULL if no more parameters
*/
-const void *xe_pci_graphics_ip_gen_param(const void *prev, char *desc)
+const void *xe_pci_graphics_ip_gen_param(struct kunit *test, const void *prev, char *desc)
{
- return graphics_ip_gen_params(prev, desc);
+ const void *next = pre_gmdid_graphics_ip_gen_params(test, prev, desc);
+
+ if (next)
+ return next;
+ if (is_insidevar(prev, pre_gmdid_graphics_ips))
+ prev = NULL;
+
+ return graphics_ip_gen_params(test, prev, desc);
}
EXPORT_SYMBOL_IF_KUNIT(xe_pci_graphics_ip_gen_param);
@@ -61,9 +275,16 @@ EXPORT_SYMBOL_IF_KUNIT(xe_pci_graphics_ip_gen_param);
*
* Return: pointer to the next parameter or NULL if no more parameters
*/
-const void *xe_pci_media_ip_gen_param(const void *prev, char *desc)
+const void *xe_pci_media_ip_gen_param(struct kunit *test, const void *prev, char *desc)
{
- return media_ip_gen_params(prev, desc);
+ const void *next = pre_gmdid_media_ip_gen_params(test, prev, desc);
+
+ if (next)
+ return next;
+ if (is_insidevar(prev, pre_gmdid_media_ips))
+ prev = NULL;
+
+ return media_ip_gen_params(test, prev, desc);
}
EXPORT_SYMBOL_IF_KUNIT(xe_pci_media_ip_gen_param);
@@ -78,9 +299,9 @@ EXPORT_SYMBOL_IF_KUNIT(xe_pci_media_ip_gen_param);
*
* Return: pointer to the next parameter or NULL if no more parameters
*/
-const void *xe_pci_id_gen_param(const void *prev, char *desc)
+const void *xe_pci_id_gen_param(struct kunit *test, const void *prev, char *desc)
{
- const struct pci_device_id *pci = pci_id_gen_params(prev, desc);
+ const struct pci_device_id *pci = pci_id_gen_params(test, prev, desc);
return pci->driver_data ? pci : NULL;
}
@@ -94,13 +315,18 @@ static void fake_read_gmdid(struct xe_device *xe, enum xe_gmdid_type type,
if (type == GMDID_MEDIA) {
*ver = data->media_verx100;
- *revid = xe_step_to_gmdid(data->media_step);
+ *revid = xe_step_to_gmdid(data->step.media);
} else {
*ver = data->graphics_verx100;
- *revid = xe_step_to_gmdid(data->graphics_step);
+ *revid = xe_step_to_gmdid(data->step.graphics);
}
}
+static void fake_xe_info_probe_tile_count(struct xe_device *xe)
+{
+ /* Nothing to do, just use the statically defined value. */
+}
+
int xe_pci_fake_device_init(struct xe_device *xe)
{
struct kunit *test = kunit_get_current_test();
@@ -138,6 +364,8 @@ done:
data->sriov_mode : XE_SRIOV_MODE_NONE;
kunit_activate_static_stub(test, read_gmdid, fake_read_gmdid);
+ kunit_activate_static_stub(test, xe_info_probe_tile_count,
+ fake_xe_info_probe_tile_count);
xe_info_init_early(xe, desc, subplatform_desc);
xe_info_init(xe, desc);
@@ -159,7 +387,7 @@ EXPORT_SYMBOL_IF_KUNIT(xe_pci_fake_device_init);
* Return: pointer to the next &struct xe_device ready to be used as a parameter
* or NULL if there are no more Xe devices on the system.
*/
-const void *xe_pci_live_device_gen_param(const void *prev, char *desc)
+const void *xe_pci_live_device_gen_param(struct kunit *test, const void *prev, char *desc)
{
const struct xe_device *xe = prev;
struct device *dev = xe ? xe->drm.dev : NULL;
diff --git a/drivers/gpu/drm/xe/tests/xe_pci_test.h b/drivers/gpu/drm/xe/tests/xe_pci_test.h
index ce4d2b86b778..30505d1cbefc 100644
--- a/drivers/gpu/drm/xe/tests/xe_pci_test.h
+++ b/drivers/gpu/drm/xe/tests/xe_pci_test.h
@@ -7,9 +7,11 @@
#define _XE_PCI_TEST_H_
#include <linux/types.h>
+#include <kunit/test.h>
#include "xe_platform_types.h"
#include "xe_sriov_types.h"
+#include "xe_step_types.h"
struct xe_device;
@@ -17,17 +19,18 @@ struct xe_pci_fake_data {
enum xe_sriov_mode sriov_mode;
enum xe_platform platform;
enum xe_subplatform subplatform;
+ struct xe_step_info step;
u32 graphics_verx100;
u32 media_verx100;
- u32 graphics_step;
- u32 media_step;
};
int xe_pci_fake_device_init(struct xe_device *xe);
+const void *xe_pci_fake_data_gen_params(struct kunit *test, const void *prev, char *desc);
+void xe_pci_fake_data_desc(const struct xe_pci_fake_data *param, char *desc);
-const void *xe_pci_graphics_ip_gen_param(const void *prev, char *desc);
-const void *xe_pci_media_ip_gen_param(const void *prev, char *desc);
-const void *xe_pci_id_gen_param(const void *prev, char *desc);
-const void *xe_pci_live_device_gen_param(const void *prev, char *desc);
+const void *xe_pci_graphics_ip_gen_param(struct kunit *test, const void *prev, char *desc);
+const void *xe_pci_media_ip_gen_param(struct kunit *test, const void *prev, char *desc);
+const void *xe_pci_id_gen_param(struct kunit *test, const void *prev, char *desc);
+const void *xe_pci_live_device_gen_param(struct kunit *test, const void *prev, char *desc);
#endif
diff --git a/drivers/gpu/drm/xe/tests/xe_wa_test.c b/drivers/gpu/drm/xe/tests/xe_wa_test.c
index c96d1fe34151..49d191043dfa 100644
--- a/drivers/gpu/drm/xe/tests/xe_wa_test.c
+++ b/drivers/gpu/drm/xe/tests/xe_wa_test.c
@@ -15,86 +15,10 @@
#include "xe_tuning.h"
#include "xe_wa.h"
-struct platform_test_case {
- const char *name;
- enum xe_platform platform;
- enum xe_subplatform subplatform;
- u32 graphics_verx100;
- u32 media_verx100;
- struct xe_step_info step;
-};
-
-#define PLATFORM_CASE(platform__, graphics_step__) \
- { \
- .name = #platform__ " (" #graphics_step__ ")", \
- .platform = XE_ ## platform__, \
- .subplatform = XE_SUBPLATFORM_NONE, \
- .step = { .graphics = STEP_ ## graphics_step__ } \
- }
-
-
-#define SUBPLATFORM_CASE(platform__, subplatform__, graphics_step__) \
- { \
- .name = #platform__ "_" #subplatform__ " (" #graphics_step__ ")", \
- .platform = XE_ ## platform__, \
- .subplatform = XE_SUBPLATFORM_ ## platform__ ## _ ## subplatform__, \
- .step = { .graphics = STEP_ ## graphics_step__ } \
- }
-
-#define GMDID_CASE(platform__, graphics_verx100__, graphics_step__, \
- media_verx100__, media_step__) \
- { \
- .name = #platform__ " (g:" #graphics_step__ ", m:" #media_step__ ")",\
- .platform = XE_ ## platform__, \
- .subplatform = XE_SUBPLATFORM_NONE, \
- .graphics_verx100 = graphics_verx100__, \
- .media_verx100 = media_verx100__, \
- .step = { .graphics = STEP_ ## graphics_step__, \
- .media = STEP_ ## media_step__ } \
- }
-
-static const struct platform_test_case cases[] = {
- PLATFORM_CASE(TIGERLAKE, B0),
- PLATFORM_CASE(DG1, A0),
- PLATFORM_CASE(DG1, B0),
- PLATFORM_CASE(ALDERLAKE_S, A0),
- PLATFORM_CASE(ALDERLAKE_S, B0),
- PLATFORM_CASE(ALDERLAKE_S, C0),
- PLATFORM_CASE(ALDERLAKE_S, D0),
- PLATFORM_CASE(ALDERLAKE_P, A0),
- PLATFORM_CASE(ALDERLAKE_P, B0),
- PLATFORM_CASE(ALDERLAKE_P, C0),
- SUBPLATFORM_CASE(ALDERLAKE_S, RPLS, D0),
- SUBPLATFORM_CASE(ALDERLAKE_P, RPLU, E0),
- SUBPLATFORM_CASE(DG2, G10, C0),
- SUBPLATFORM_CASE(DG2, G11, B1),
- SUBPLATFORM_CASE(DG2, G12, A1),
- GMDID_CASE(METEORLAKE, 1270, A0, 1300, A0),
- GMDID_CASE(METEORLAKE, 1271, A0, 1300, A0),
- GMDID_CASE(METEORLAKE, 1274, A0, 1300, A0),
- GMDID_CASE(LUNARLAKE, 2004, A0, 2000, A0),
- GMDID_CASE(LUNARLAKE, 2004, B0, 2000, A0),
- GMDID_CASE(BATTLEMAGE, 2001, A0, 1301, A1),
-};
-
-static void platform_desc(const struct platform_test_case *t, char *desc)
-{
- strscpy(desc, t->name, KUNIT_PARAM_DESC_SIZE);
-}
-
-KUNIT_ARRAY_PARAM(platform, cases, platform_desc);
-
static int xe_wa_test_init(struct kunit *test)
{
- const struct platform_test_case *param = test->param_value;
- struct xe_pci_fake_data data = {
- .platform = param->platform,
- .subplatform = param->subplatform,
- .graphics_verx100 = param->graphics_verx100,
- .media_verx100 = param->media_verx100,
- .graphics_step = param->step.graphics,
- .media_step = param->step.media,
- };
+ const struct xe_pci_fake_data *param = test->param_value;
+ struct xe_pci_fake_data data = *param;
struct xe_device *xe;
struct device *dev;
int ret;
@@ -119,13 +43,6 @@ static int xe_wa_test_init(struct kunit *test)
return 0;
}
-static void xe_wa_test_exit(struct kunit *test)
-{
- struct xe_device *xe = test->priv;
-
- drm_kunit_helper_free_device(test, xe->drm.dev);
-}
-
static void xe_wa_gt(struct kunit *test)
{
struct xe_device *xe = test->priv;
@@ -143,14 +60,13 @@ static void xe_wa_gt(struct kunit *test)
}
static struct kunit_case xe_wa_tests[] = {
- KUNIT_CASE_PARAM(xe_wa_gt, platform_gen_params),
+ KUNIT_CASE_PARAM(xe_wa_gt, xe_pci_fake_data_gen_params),
{}
};
static struct kunit_suite xe_rtp_test_suite = {
.name = "xe_wa",
.init = xe_wa_test_init,
- .exit = xe_wa_test_exit,
.test_cases = xe_wa_tests,
};
diff --git a/drivers/gpu/drm/xe/xe_assert.h b/drivers/gpu/drm/xe/xe_assert.h
index 68fe70ce2be3..a818eaa05b7d 100644
--- a/drivers/gpu/drm/xe/xe_assert.h
+++ b/drivers/gpu/drm/xe/xe_assert.h
@@ -12,6 +12,7 @@
#include "xe_gt_types.h"
#include "xe_step.h"
+#include "xe_vram.h"
/**
* DOC: Xe Asserts
@@ -145,7 +146,8 @@
const struct xe_tile *__tile = (tile); \
char __buf[10] __maybe_unused; \
xe_assert_msg(tile_to_xe(__tile), condition, "tile: %u VRAM %s\n" msg, \
- __tile->id, ({ string_get_size(__tile->mem.vram.actual_physical_size, 1, \
+ __tile->id, ({ string_get_size( \
+ xe_vram_region_actual_physical_size(__tile->mem.vram), 1, \
STRING_UNITS_2, __buf, sizeof(__buf)); __buf; }), ## arg); \
})
diff --git a/drivers/gpu/drm/xe/xe_bb.c b/drivers/gpu/drm/xe/xe_bb.c
index 5ce0e26822f2..6d20229c11de 100644
--- a/drivers/gpu/drm/xe/xe_bb.c
+++ b/drivers/gpu/drm/xe/xe_bb.c
@@ -60,6 +60,41 @@ err:
return ERR_PTR(err);
}
+struct xe_bb *xe_bb_ccs_new(struct xe_gt *gt, u32 dwords,
+ enum xe_sriov_vf_ccs_rw_ctxs ctx_id)
+{
+ struct xe_bb *bb = kmalloc(sizeof(*bb), GFP_KERNEL);
+ struct xe_device *xe = gt_to_xe(gt);
+ struct xe_sa_manager *bb_pool;
+ int err;
+
+ if (!bb)
+ return ERR_PTR(-ENOMEM);
+ /*
+ * We need to allocate space for the requested number of dwords &
+ * one additional MI_BATCH_BUFFER_END dword. Since the whole SA
+ * is submitted to HW, we need to make sure that the last instruction
+ * is not over written when the last chunk of SA is allocated for BB.
+ * So, this extra DW acts as a guard here.
+ */
+
+ bb_pool = xe->sriov.vf.ccs.contexts[ctx_id].mem.ccs_bb_pool;
+ bb->bo = xe_sa_bo_new(bb_pool, 4 * (dwords + 1));
+
+ if (IS_ERR(bb->bo)) {
+ err = PTR_ERR(bb->bo);
+ goto err;
+ }
+
+ bb->cs = xe_sa_bo_cpu_addr(bb->bo);
+ bb->len = 0;
+
+ return bb;
+err:
+ kfree(bb);
+ return ERR_PTR(err);
+}
+
static struct xe_sched_job *
__xe_bb_create_job(struct xe_exec_queue *q, struct xe_bb *bb, u64 *addr)
{
diff --git a/drivers/gpu/drm/xe/xe_bb.h b/drivers/gpu/drm/xe/xe_bb.h
index b5cc65506696..2a8adc9a6dee 100644
--- a/drivers/gpu/drm/xe/xe_bb.h
+++ b/drivers/gpu/drm/xe/xe_bb.h
@@ -13,8 +13,11 @@ struct dma_fence;
struct xe_gt;
struct xe_exec_queue;
struct xe_sched_job;
+enum xe_sriov_vf_ccs_rw_ctxs;
struct xe_bb *xe_bb_new(struct xe_gt *gt, u32 dwords, bool usm);
+struct xe_bb *xe_bb_ccs_new(struct xe_gt *gt, u32 dwords,
+ enum xe_sriov_vf_ccs_rw_ctxs ctx_id);
struct xe_sched_job *xe_bb_create_job(struct xe_exec_queue *q,
struct xe_bb *bb);
struct xe_sched_job *xe_bb_create_migration_job(struct xe_exec_queue *q,
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index 18f27da47a36..4410e28dee54 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -33,9 +33,11 @@
#include "xe_pxp.h"
#include "xe_res_cursor.h"
#include "xe_shrinker.h"
+#include "xe_sriov_vf_ccs.h"
#include "xe_trace_bo.h"
#include "xe_ttm_stolen_mgr.h"
#include "xe_vm.h"
+#include "xe_vram_types.h"
const char *const xe_mem_type_to_name[TTM_NUM_MEM_TYPES] = {
[XE_PL_SYSTEM] = "system",
@@ -186,6 +188,8 @@ static void try_add_system(struct xe_device *xe, struct xe_bo *bo,
bo->placements[*c] = (struct ttm_place) {
.mem_type = XE_PL_TT,
+ .flags = (bo_flags & XE_BO_FLAG_VRAM_MASK) ?
+ TTM_PL_FLAG_FALLBACK : 0,
};
*c += 1;
}
@@ -198,6 +202,8 @@ static bool force_contiguous(u32 bo_flags)
else if (bo_flags & XE_BO_FLAG_PINNED &&
!(bo_flags & XE_BO_FLAG_PINNED_LATE_RESTORE))
return true; /* needs vmap */
+ else if (bo_flags & XE_BO_FLAG_CPU_ADDR_MIRROR)
+ return true;
/*
* For eviction / restore on suspend / resume objects pinned in VRAM
@@ -812,14 +818,14 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
}
if (ttm_bo->type == ttm_bo_type_sg) {
- ret = xe_bo_move_notify(bo, ctx);
+ if (new_mem->mem_type == XE_PL_SYSTEM)
+ ret = xe_bo_move_notify(bo, ctx);
if (!ret)
ret = xe_bo_move_dmabuf(ttm_bo, new_mem);
return ret;
}
- tt_has_data = ttm && (ttm_tt_is_populated(ttm) ||
- (ttm->page_flags & TTM_TT_FLAG_SWAPPED));
+ tt_has_data = ttm && (ttm_tt_is_populated(ttm) || ttm_tt_is_swapped(ttm));
move_lacks_source = !old_mem || (handle_system_ccs ? (!bo->ccs_cleared) :
(!mem_type_is_vram(old_mem_type) && !tt_has_data));
@@ -964,6 +970,20 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
dma_fence_put(fence);
xe_pm_runtime_put(xe);
+ /*
+ * CCS meta data is migrated from TT -> SMEM. So, let us detach the
+ * BBs from BO as it is no longer needed.
+ */
+ if (IS_VF_CCS_READY(xe) && old_mem_type == XE_PL_TT &&
+ new_mem->mem_type == XE_PL_SYSTEM)
+ xe_sriov_vf_ccs_detach_bo(bo);
+
+ if (IS_VF_CCS_READY(xe) &&
+ ((move_lacks_source && new_mem->mem_type == XE_PL_TT) ||
+ (old_mem_type == XE_PL_SYSTEM && new_mem->mem_type == XE_PL_TT)) &&
+ handle_system_ccs)
+ ret = xe_sriov_vf_ccs_attach_bo(bo);
+
out:
if ((!ttm_bo->resource || ttm_bo->resource->mem_type == XE_PL_SYSTEM) &&
ttm_bo->ttm) {
@@ -974,6 +994,9 @@ out:
if (timeout < 0)
ret = timeout;
+ if (IS_VF_CCS_READY(xe))
+ xe_sriov_vf_ccs_detach_bo(bo);
+
xe_tt_unmap_sg(xe, ttm_bo->ttm);
}
@@ -1118,42 +1141,47 @@ out_unref:
int xe_bo_notifier_prepare_pinned(struct xe_bo *bo)
{
struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
+ struct xe_validation_ctx ctx;
+ struct drm_exec exec;
struct xe_bo *backup;
int ret = 0;
- xe_bo_lock(bo, false);
+ xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.exclusive = true}, ret) {
+ ret = drm_exec_lock_obj(&exec, &bo->ttm.base);
+ drm_exec_retry_on_contention(&exec);
+ xe_assert(xe, !ret);
+ xe_assert(xe, !bo->backup_obj);
- xe_assert(xe, !bo->backup_obj);
+ /*
+ * Since this is called from the PM notifier we might have raced with
+ * someone unpinning this after we dropped the pinned list lock and
+ * grabbing the above bo lock.
+ */
+ if (!xe_bo_is_pinned(bo))
+ break;
- /*
- * Since this is called from the PM notifier we might have raced with
- * someone unpinning this after we dropped the pinned list lock and
- * grabbing the above bo lock.
- */
- if (!xe_bo_is_pinned(bo))
- goto out_unlock_bo;
+ if (!xe_bo_is_vram(bo))
+ break;
- if (!xe_bo_is_vram(bo))
- goto out_unlock_bo;
+ if (bo->flags & XE_BO_FLAG_PINNED_NORESTORE)
+ break;
- if (bo->flags & XE_BO_FLAG_PINNED_NORESTORE)
- goto out_unlock_bo;
+ backup = xe_bo_init_locked(xe, NULL, NULL, bo->ttm.base.resv, NULL, xe_bo_size(bo),
+ DRM_XE_GEM_CPU_CACHING_WB, ttm_bo_type_kernel,
+ XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS |
+ XE_BO_FLAG_PINNED, &exec);
+ if (IS_ERR(backup)) {
+ drm_exec_retry_on_contention(&exec);
+ ret = PTR_ERR(backup);
+ xe_validation_retry_on_oom(&ctx, &ret);
+ break;
+ }
- backup = ___xe_bo_create_locked(xe, NULL, NULL, bo->ttm.base.resv, NULL, xe_bo_size(bo),
- DRM_XE_GEM_CPU_CACHING_WB, ttm_bo_type_kernel,
- XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS |
- XE_BO_FLAG_PINNED);
- if (IS_ERR(backup)) {
- ret = PTR_ERR(backup);
- goto out_unlock_bo;
+ backup->parent_obj = xe_bo_get(bo); /* Released by bo_destroy */
+ ttm_bo_pin(&backup->ttm);
+ bo->backup_obj = backup;
}
- backup->parent_obj = xe_bo_get(bo); /* Released by bo_destroy */
- ttm_bo_pin(&backup->ttm);
- bo->backup_obj = backup;
-
-out_unlock_bo:
- xe_bo_unlock(bo);
return ret;
}
@@ -1179,57 +1207,12 @@ int xe_bo_notifier_unprepare_pinned(struct xe_bo *bo)
return 0;
}
-/**
- * xe_bo_evict_pinned() - Evict a pinned VRAM object to system memory
- * @bo: The buffer object to move.
- *
- * On successful completion, the object memory will be moved to system memory.
- *
- * This is needed to for special handling of pinned VRAM object during
- * suspend-resume.
- *
- * Return: 0 on success. Negative error code on failure.
- */
-int xe_bo_evict_pinned(struct xe_bo *bo)
+static int xe_bo_evict_pinned_copy(struct xe_bo *bo, struct xe_bo *backup)
{
- struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
- struct xe_bo *backup = bo->backup_obj;
- bool backup_created = false;
+ struct xe_device *xe = xe_bo_device(bo);
bool unmap = false;
int ret = 0;
- xe_bo_lock(bo, false);
-
- if (WARN_ON(!bo->ttm.resource)) {
- ret = -EINVAL;
- goto out_unlock_bo;
- }
-
- if (WARN_ON(!xe_bo_is_pinned(bo))) {
- ret = -EINVAL;
- goto out_unlock_bo;
- }
-
- if (!xe_bo_is_vram(bo))
- goto out_unlock_bo;
-
- if (bo->flags & XE_BO_FLAG_PINNED_NORESTORE)
- goto out_unlock_bo;
-
- if (!backup) {
- backup = ___xe_bo_create_locked(xe, NULL, NULL, bo->ttm.base.resv,
- NULL, xe_bo_size(bo),
- DRM_XE_GEM_CPU_CACHING_WB, ttm_bo_type_kernel,
- XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS |
- XE_BO_FLAG_PINNED);
- if (IS_ERR(backup)) {
- ret = PTR_ERR(backup);
- goto out_unlock_bo;
- }
- backup->parent_obj = xe_bo_get(bo); /* Released by bo_destroy */
- backup_created = true;
- }
-
if (xe_bo_is_user(bo) || (bo->flags & XE_BO_FLAG_PINNED_LATE_RESTORE)) {
struct xe_migrate *migrate;
struct dma_fence *fence;
@@ -1239,14 +1222,11 @@ int xe_bo_evict_pinned(struct xe_bo *bo)
else
migrate = mem_type_to_migrate(xe, bo->ttm.resource->mem_type);
+ xe_assert(xe, bo->ttm.base.resv == backup->ttm.base.resv);
ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1);
if (ret)
goto out_backup;
- ret = dma_resv_reserve_fences(backup->ttm.base.resv, 1);
- if (ret)
- goto out_backup;
-
fence = xe_migrate_copy(migrate, bo, backup, bo->ttm.resource,
backup->ttm.resource, false);
if (IS_ERR(fence)) {
@@ -1256,8 +1236,6 @@ int xe_bo_evict_pinned(struct xe_bo *bo)
dma_resv_add_fence(bo->ttm.base.resv, fence,
DMA_RESV_USAGE_KERNEL);
- dma_resv_add_fence(backup->ttm.base.resv, fence,
- DMA_RESV_USAGE_KERNEL);
dma_fence_put(fence);
} else {
ret = xe_bo_vmap(backup);
@@ -1267,7 +1245,7 @@ int xe_bo_evict_pinned(struct xe_bo *bo)
if (iosys_map_is_null(&bo->vmap)) {
ret = xe_bo_vmap(bo);
if (ret)
- goto out_backup;
+ goto out_vunmap;
unmap = true;
}
@@ -1277,15 +1255,78 @@ int xe_bo_evict_pinned(struct xe_bo *bo)
if (!bo->backup_obj)
bo->backup_obj = backup;
-
-out_backup:
+out_vunmap:
xe_bo_vunmap(backup);
- if (ret && backup_created)
- xe_bo_put(backup);
-out_unlock_bo:
+out_backup:
if (unmap)
xe_bo_vunmap(bo);
- xe_bo_unlock(bo);
+
+ return ret;
+}
+
+/**
+ * xe_bo_evict_pinned() - Evict a pinned VRAM object to system memory
+ * @bo: The buffer object to move.
+ *
+ * On successful completion, the object memory will be moved to system memory.
+ *
+ * This is needed to for special handling of pinned VRAM object during
+ * suspend-resume.
+ *
+ * Return: 0 on success. Negative error code on failure.
+ */
+int xe_bo_evict_pinned(struct xe_bo *bo)
+{
+ struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
+ struct xe_validation_ctx ctx;
+ struct drm_exec exec;
+ struct xe_bo *backup = bo->backup_obj;
+ bool backup_created = false;
+ int ret = 0;
+
+ xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.exclusive = true}, ret) {
+ ret = drm_exec_lock_obj(&exec, &bo->ttm.base);
+ drm_exec_retry_on_contention(&exec);
+ xe_assert(xe, !ret);
+
+ if (WARN_ON(!bo->ttm.resource)) {
+ ret = -EINVAL;
+ break;
+ }
+
+ if (WARN_ON(!xe_bo_is_pinned(bo))) {
+ ret = -EINVAL;
+ break;
+ }
+
+ if (!xe_bo_is_vram(bo))
+ break;
+
+ if (bo->flags & XE_BO_FLAG_PINNED_NORESTORE)
+ break;
+
+ if (!backup) {
+ backup = xe_bo_init_locked(xe, NULL, NULL, bo->ttm.base.resv, NULL,
+ xe_bo_size(bo),
+ DRM_XE_GEM_CPU_CACHING_WB, ttm_bo_type_kernel,
+ XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS |
+ XE_BO_FLAG_PINNED, &exec);
+ if (IS_ERR(backup)) {
+ drm_exec_retry_on_contention(&exec);
+ ret = PTR_ERR(backup);
+ xe_validation_retry_on_oom(&ctx, &ret);
+ break;
+ }
+ backup->parent_obj = xe_bo_get(bo); /* Released by bo_destroy */
+ backup_created = true;
+ }
+
+ ret = xe_bo_evict_pinned_copy(bo, backup);
+ }
+
+ if (ret && backup_created)
+ xe_bo_put(backup);
+
return ret;
}
@@ -1335,10 +1376,6 @@ int xe_bo_restore_pinned(struct xe_bo *bo)
if (ret)
goto out_unlock_bo;
- ret = dma_resv_reserve_fences(backup->ttm.base.resv, 1);
- if (ret)
- goto out_unlock_bo;
-
fence = xe_migrate_copy(migrate, backup, bo,
backup->ttm.resource, bo->ttm.resource,
false);
@@ -1349,8 +1386,6 @@ int xe_bo_restore_pinned(struct xe_bo *bo)
dma_resv_add_fence(bo->ttm.base.resv, fence,
DMA_RESV_USAGE_KERNEL);
- dma_resv_add_fence(backup->ttm.base.resv, fence,
- DMA_RESV_USAGE_KERNEL);
dma_fence_put(fence);
} else {
ret = xe_bo_vmap(backup);
@@ -1501,9 +1536,14 @@ static void xe_ttm_bo_release_notify(struct ttm_buffer_object *ttm_bo)
static void xe_ttm_bo_delete_mem_notify(struct ttm_buffer_object *ttm_bo)
{
+ struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
+
if (!xe_bo_is_xe_bo(ttm_bo))
return;
+ if (IS_VF_CCS_READY(ttm_to_xe_device(ttm_bo->bdev)))
+ xe_sriov_vf_ccs_detach_bo(bo);
+
/*
* Object is idle and about to be destroyed. Release the
* dma-buf attachment.
@@ -1685,50 +1725,258 @@ static void xe_gem_object_close(struct drm_gem_object *obj,
}
}
-static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
+static bool should_migrate_to_smem(struct xe_bo *bo)
+{
+ /*
+ * NOTE: The following atomic checks are platform-specific. For example,
+ * if a device supports CXL atomics, these may not be necessary or
+ * may behave differently.
+ */
+
+ return bo->attr.atomic_access == DRM_XE_ATOMIC_GLOBAL ||
+ bo->attr.atomic_access == DRM_XE_ATOMIC_CPU;
+}
+
+static int xe_bo_wait_usage_kernel(struct xe_bo *bo, struct ttm_operation_ctx *ctx)
+{
+ long lerr;
+
+ if (ctx->no_wait_gpu)
+ return dma_resv_test_signaled(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL) ?
+ 0 : -EBUSY;
+
+ lerr = dma_resv_wait_timeout(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL,
+ ctx->interruptible, MAX_SCHEDULE_TIMEOUT);
+ if (lerr < 0)
+ return lerr;
+ if (lerr == 0)
+ return -EBUSY;
+
+ return 0;
+}
+
+/* Populate the bo if swapped out, or migrate if the access mode requires that. */
+static int xe_bo_fault_migrate(struct xe_bo *bo, struct ttm_operation_ctx *ctx,
+ struct drm_exec *exec)
+{
+ struct ttm_buffer_object *tbo = &bo->ttm;
+ int err = 0;
+
+ if (ttm_manager_type(tbo->bdev, tbo->resource->mem_type)->use_tt) {
+ err = xe_bo_wait_usage_kernel(bo, ctx);
+ if (!err)
+ err = ttm_bo_populate(&bo->ttm, ctx);
+ } else if (should_migrate_to_smem(bo)) {
+ xe_assert(xe_bo_device(bo), bo->flags & XE_BO_FLAG_SYSTEM);
+ err = xe_bo_migrate(bo, XE_PL_TT, ctx, exec);
+ }
+
+ return err;
+}
+
+/* Call into TTM to populate PTEs, and register bo for PTE removal on runtime suspend. */
+static vm_fault_t __xe_bo_cpu_fault(struct vm_fault *vmf, struct xe_device *xe, struct xe_bo *bo)
+{
+ vm_fault_t ret;
+
+ trace_xe_bo_cpu_fault(bo);
+
+ ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
+ TTM_BO_VM_NUM_PREFAULT);
+ /*
+ * When TTM is actually called to insert PTEs, ensure no blocking conditions
+ * remain, in which case TTM may drop locks and return VM_FAULT_RETRY.
+ */
+ xe_assert(xe, ret != VM_FAULT_RETRY);
+
+ if (ret == VM_FAULT_NOPAGE &&
+ mem_type_is_vram(bo->ttm.resource->mem_type)) {
+ mutex_lock(&xe->mem_access.vram_userfault.lock);
+ if (list_empty(&bo->vram_userfault_link))
+ list_add(&bo->vram_userfault_link,
+ &xe->mem_access.vram_userfault.list);
+ mutex_unlock(&xe->mem_access.vram_userfault.lock);
+ }
+
+ return ret;
+}
+
+static vm_fault_t xe_err_to_fault_t(int err)
+{
+ switch (err) {
+ case 0:
+ case -EINTR:
+ case -ERESTARTSYS:
+ case -EAGAIN:
+ return VM_FAULT_NOPAGE;
+ case -ENOMEM:
+ case -ENOSPC:
+ return VM_FAULT_OOM;
+ default:
+ break;
+ }
+ return VM_FAULT_SIGBUS;
+}
+
+static bool xe_ttm_bo_is_imported(struct ttm_buffer_object *tbo)
+{
+ dma_resv_assert_held(tbo->base.resv);
+
+ return tbo->ttm &&
+ (tbo->ttm->page_flags & (TTM_TT_FLAG_EXTERNAL | TTM_TT_FLAG_EXTERNAL_MAPPABLE)) ==
+ TTM_TT_FLAG_EXTERNAL;
+}
+
+static vm_fault_t xe_bo_cpu_fault_fastpath(struct vm_fault *vmf, struct xe_device *xe,
+ struct xe_bo *bo, bool needs_rpm)
+{
+ struct ttm_buffer_object *tbo = &bo->ttm;
+ vm_fault_t ret = VM_FAULT_RETRY;
+ struct xe_validation_ctx ctx;
+ struct ttm_operation_ctx tctx = {
+ .interruptible = true,
+ .no_wait_gpu = true,
+ .gfp_retry_mayfail = true,
+
+ };
+ int err;
+
+ if (needs_rpm && !xe_pm_runtime_get_if_active(xe))
+ return VM_FAULT_RETRY;
+
+ err = xe_validation_ctx_init(&ctx, &xe->val, NULL,
+ (struct xe_val_flags) {
+ .interruptible = true,
+ .no_block = true
+ });
+ if (err)
+ goto out_pm;
+
+ if (!dma_resv_trylock(tbo->base.resv))
+ goto out_validation;
+
+ if (xe_ttm_bo_is_imported(tbo)) {
+ ret = VM_FAULT_SIGBUS;
+ drm_dbg(&xe->drm, "CPU trying to access an imported buffer object.\n");
+ goto out_unlock;
+ }
+
+ err = xe_bo_fault_migrate(bo, &tctx, NULL);
+ if (err) {
+ /* Return VM_FAULT_RETRY on these errors. */
+ if (err != -ENOMEM && err != -ENOSPC && err != -EBUSY)
+ ret = xe_err_to_fault_t(err);
+ goto out_unlock;
+ }
+
+ if (dma_resv_test_signaled(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL))
+ ret = __xe_bo_cpu_fault(vmf, xe, bo);
+
+out_unlock:
+ dma_resv_unlock(tbo->base.resv);
+out_validation:
+ xe_validation_ctx_fini(&ctx);
+out_pm:
+ if (needs_rpm)
+ xe_pm_runtime_put(xe);
+
+ return ret;
+}
+
+static vm_fault_t xe_bo_cpu_fault(struct vm_fault *vmf)
{
struct ttm_buffer_object *tbo = vmf->vma->vm_private_data;
struct drm_device *ddev = tbo->base.dev;
struct xe_device *xe = to_xe_device(ddev);
struct xe_bo *bo = ttm_to_xe_bo(tbo);
bool needs_rpm = bo->flags & XE_BO_FLAG_VRAM_MASK;
+ bool retry_after_wait = false;
+ struct xe_validation_ctx ctx;
+ struct drm_exec exec;
vm_fault_t ret;
+ int err = 0;
int idx;
- if (needs_rpm)
- xe_pm_runtime_get(xe);
+ if (!drm_dev_enter(&xe->drm, &idx))
+ return ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
- ret = ttm_bo_vm_reserve(tbo, vmf);
- if (ret)
+ ret = xe_bo_cpu_fault_fastpath(vmf, xe, bo, needs_rpm);
+ if (ret != VM_FAULT_RETRY)
goto out;
- if (drm_dev_enter(ddev, &idx)) {
- trace_xe_bo_cpu_fault(bo);
-
- ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
- TTM_BO_VM_NUM_PREFAULT);
- drm_dev_exit(idx);
+ if (fault_flag_allow_retry_first(vmf->flags)) {
+ if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
+ goto out;
+ retry_after_wait = true;
+ xe_bo_get(bo);
+ mmap_read_unlock(vmf->vma->vm_mm);
} else {
- ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
+ ret = VM_FAULT_NOPAGE;
}
- if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
- goto out;
/*
- * ttm_bo_vm_reserve() already has dma_resv_lock.
+ * The fastpath failed and we were not required to return and retry immediately.
+ * We're now running in one of two modes:
+ *
+ * 1) retry_after_wait == true: The mmap_read_lock() is dropped, and we're trying
+ * to resolve blocking waits. But we can't resolve the fault since the
+ * mmap_read_lock() is dropped. After retrying the fault, the aim is that the fastpath
+ * should succeed. But it may fail since we drop the bo lock.
+ *
+ * 2) retry_after_wait == false: The fastpath failed, typically even after
+ * a retry. Do whatever's necessary to resolve the fault.
+ *
+ * This construct is recommended to avoid excessive waits under the mmap_lock.
*/
- if (ret == VM_FAULT_NOPAGE && mem_type_is_vram(tbo->resource->mem_type)) {
- mutex_lock(&xe->mem_access.vram_userfault.lock);
- if (list_empty(&bo->vram_userfault_link))
- list_add(&bo->vram_userfault_link, &xe->mem_access.vram_userfault.list);
- mutex_unlock(&xe->mem_access.vram_userfault.lock);
+
+ if (needs_rpm)
+ xe_pm_runtime_get(xe);
+
+ xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.interruptible = true},
+ err) {
+ struct ttm_operation_ctx tctx = {
+ .interruptible = true,
+ .no_wait_gpu = false,
+ .gfp_retry_mayfail = retry_after_wait,
+ };
+
+ err = drm_exec_lock_obj(&exec, &tbo->base);
+ drm_exec_retry_on_contention(&exec);
+ if (err)
+ break;
+
+ if (xe_ttm_bo_is_imported(tbo)) {
+ err = -EFAULT;
+ drm_dbg(&xe->drm, "CPU trying to access an imported buffer object.\n");
+ break;
+ }
+
+ err = xe_bo_fault_migrate(bo, &tctx, &exec);
+ if (err) {
+ drm_exec_retry_on_contention(&exec);
+ xe_validation_retry_on_oom(&ctx, &err);
+ break;
+ }
+
+ err = xe_bo_wait_usage_kernel(bo, &tctx);
+ if (err)
+ break;
+
+ if (!retry_after_wait)
+ ret = __xe_bo_cpu_fault(vmf, xe, bo);
}
+ /* if retry_after_wait == true, we *must* return VM_FAULT_RETRY. */
+ if (err && !retry_after_wait)
+ ret = xe_err_to_fault_t(err);
- dma_resv_unlock(tbo->base.resv);
-out:
if (needs_rpm)
xe_pm_runtime_put(xe);
+ if (retry_after_wait)
+ xe_bo_put(bo);
+out:
+ drm_dev_exit(idx);
+
return ret;
}
@@ -1772,7 +2020,7 @@ int xe_bo_read(struct xe_bo *bo, u64 offset, void *dst, int size)
}
static const struct vm_operations_struct xe_gem_vm_ops = {
- .fault = xe_gem_fault,
+ .fault = xe_bo_cpu_fault,
.open = ttm_bo_vm_open,
.close = ttm_bo_vm_close,
.access = xe_bo_vm_access,
@@ -1820,11 +2068,32 @@ void xe_bo_free(struct xe_bo *bo)
kfree(bo);
}
-struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
- struct xe_tile *tile, struct dma_resv *resv,
- struct ttm_lru_bulk_move *bulk, size_t size,
- u16 cpu_caching, enum ttm_bo_type type,
- u32 flags)
+/**
+ * xe_bo_init_locked() - Initialize or create an xe_bo.
+ * @xe: The xe device.
+ * @bo: An already allocated buffer object or NULL
+ * if the function should allocate a new one.
+ * @tile: The tile to select for migration of this bo, and the tile used for
+ * GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos.
+ * @resv: Pointer to a locked shared reservation object to use fo this bo,
+ * or NULL for the xe_bo to use its own.
+ * @bulk: The bulk move to use for LRU bumping, or NULL for external bos.
+ * @size: The storage size to use for the bo.
+ * @cpu_caching: The cpu caching used for system memory backing store.
+ * @type: The TTM buffer object type.
+ * @flags: XE_BO_FLAG_ flags.
+ * @exec: The drm_exec transaction to use for exhaustive eviction.
+ *
+ * Initialize or create an xe buffer object. On failure, any allocated buffer
+ * object passed in @bo will have been unreferenced.
+ *
+ * Return: The buffer object on success. Negative error pointer on failure.
+ */
+struct xe_bo *xe_bo_init_locked(struct xe_device *xe, struct xe_bo *bo,
+ struct xe_tile *tile, struct dma_resv *resv,
+ struct ttm_lru_bulk_move *bulk, size_t size,
+ u16 cpu_caching, enum ttm_bo_type type,
+ u32 flags, struct drm_exec *exec)
{
struct ttm_operation_ctx ctx = {
.interruptible = true,
@@ -1893,6 +2162,7 @@ struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
ctx.resv = resv;
}
+ xe_validation_assert_exec(xe, exec, &bo->ttm.base);
if (!(flags & XE_BO_FLAG_FIXED_PLACEMENT)) {
err = __xe_bo_placement_for_flags(xe, bo, bo->flags);
if (WARN_ON(err)) {
@@ -1994,7 +2264,7 @@ __xe_bo_create_locked(struct xe_device *xe,
struct xe_tile *tile, struct xe_vm *vm,
size_t size, u64 start, u64 end,
u16 cpu_caching, enum ttm_bo_type type, u32 flags,
- u64 alignment)
+ u64 alignment, struct drm_exec *exec)
{
struct xe_bo *bo = NULL;
int err;
@@ -2015,11 +2285,11 @@ __xe_bo_create_locked(struct xe_device *xe,
}
}
- bo = ___xe_bo_create_locked(xe, bo, tile, vm ? xe_vm_resv(vm) : NULL,
- vm && !xe_vm_in_fault_mode(vm) &&
- flags & XE_BO_FLAG_USER ?
- &vm->lru_bulk_move : NULL, size,
- cpu_caching, type, flags);
+ bo = xe_bo_init_locked(xe, bo, tile, vm ? xe_vm_resv(vm) : NULL,
+ vm && !xe_vm_in_fault_mode(vm) &&
+ flags & XE_BO_FLAG_USER ?
+ &vm->lru_bulk_move : NULL, size,
+ cpu_caching, type, flags, exec);
if (IS_ERR(bo))
return bo;
@@ -2053,9 +2323,10 @@ __xe_bo_create_locked(struct xe_device *xe,
if (flags & XE_BO_FLAG_FIXED_PLACEMENT) {
err = xe_ggtt_insert_bo_at(t->mem.ggtt, bo,
- start + xe_bo_size(bo), U64_MAX);
+ start + xe_bo_size(bo), U64_MAX,
+ exec);
} else {
- err = xe_ggtt_insert_bo(t->mem.ggtt, bo);
+ err = xe_ggtt_insert_bo(t->mem.ggtt, bo, exec);
}
if (err)
goto err_unlock_put_bo;
@@ -2072,82 +2343,166 @@ err_unlock_put_bo:
return ERR_PTR(err);
}
-struct xe_bo *
-xe_bo_create_locked_range(struct xe_device *xe,
- struct xe_tile *tile, struct xe_vm *vm,
- size_t size, u64 start, u64 end,
- enum ttm_bo_type type, u32 flags, u64 alignment)
-{
- return __xe_bo_create_locked(xe, tile, vm, size, start, end, 0, type,
- flags, alignment);
-}
-
+/**
+ * xe_bo_create_locked() - Create a BO
+ * @xe: The xe device.
+ * @tile: The tile to select for migration of this bo, and the tile used for
+ * GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos.
+ * @vm: The local vm or NULL for external objects.
+ * @size: The storage size to use for the bo.
+ * @type: The TTM buffer object type.
+ * @flags: XE_BO_FLAG_ flags.
+ * @exec: The drm_exec transaction to use for exhaustive eviction.
+ *
+ * Create a locked xe BO with no range- nor alignment restrictions.
+ *
+ * Return: The buffer object on success. Negative error pointer on failure.
+ */
struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile,
struct xe_vm *vm, size_t size,
- enum ttm_bo_type type, u32 flags)
+ enum ttm_bo_type type, u32 flags,
+ struct drm_exec *exec)
{
return __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL, 0, type,
- flags, 0);
+ flags, 0, exec);
}
-struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_tile *tile,
- struct xe_vm *vm, size_t size,
- u16 cpu_caching,
- u32 flags)
+static struct xe_bo *xe_bo_create_novm(struct xe_device *xe, struct xe_tile *tile,
+ size_t size, u16 cpu_caching,
+ enum ttm_bo_type type, u32 flags,
+ u64 alignment, bool intr)
{
- struct xe_bo *bo = __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL,
- cpu_caching, ttm_bo_type_device,
- flags | XE_BO_FLAG_USER, 0);
- if (!IS_ERR(bo))
- xe_bo_unlock_vm_held(bo);
+ struct xe_validation_ctx ctx;
+ struct drm_exec exec;
+ struct xe_bo *bo;
+ int ret = 0;
- return bo;
+ xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.interruptible = intr},
+ ret) {
+ bo = __xe_bo_create_locked(xe, tile, NULL, size, 0, ~0ULL,
+ cpu_caching, type, flags, alignment, &exec);
+ drm_exec_retry_on_contention(&exec);
+ if (IS_ERR(bo)) {
+ ret = PTR_ERR(bo);
+ xe_validation_retry_on_oom(&ctx, &ret);
+ } else {
+ xe_bo_unlock(bo);
+ }
+ }
+
+ return ret ? ERR_PTR(ret) : bo;
}
-struct xe_bo *xe_bo_create(struct xe_device *xe, struct xe_tile *tile,
- struct xe_vm *vm, size_t size,
- enum ttm_bo_type type, u32 flags)
+/**
+ * xe_bo_create_user() - Create a user BO
+ * @xe: The xe device.
+ * @vm: The local vm or NULL for external objects.
+ * @size: The storage size to use for the bo.
+ * @cpu_caching: The caching mode to be used for system backing store.
+ * @flags: XE_BO_FLAG_ flags.
+ * @exec: The drm_exec transaction to use for exhaustive eviction, or NULL
+ * if such a transaction should be initiated by the call.
+ *
+ * Create a bo on behalf of user-space.
+ *
+ * Return: The buffer object on success. Negative error pointer on failure.
+ */
+struct xe_bo *xe_bo_create_user(struct xe_device *xe,
+ struct xe_vm *vm, size_t size,
+ u16 cpu_caching,
+ u32 flags, struct drm_exec *exec)
{
- struct xe_bo *bo = xe_bo_create_locked(xe, tile, vm, size, type, flags);
+ struct xe_bo *bo;
- if (!IS_ERR(bo))
- xe_bo_unlock_vm_held(bo);
+ flags |= XE_BO_FLAG_USER;
+
+ if (vm || exec) {
+ xe_assert(xe, exec);
+ bo = __xe_bo_create_locked(xe, NULL, vm, size, 0, ~0ULL,
+ cpu_caching, ttm_bo_type_device,
+ flags, 0, exec);
+ if (!IS_ERR(bo))
+ xe_bo_unlock_vm_held(bo);
+ } else {
+ bo = xe_bo_create_novm(xe, NULL, size, cpu_caching,
+ ttm_bo_type_device, flags, 0, true);
+ }
return bo;
}
-struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile,
- struct xe_vm *vm,
- size_t size, u64 offset,
- enum ttm_bo_type type, u32 flags)
+/**
+ * xe_bo_create_pin_range_novm() - Create and pin a BO with range options.
+ * @xe: The xe device.
+ * @tile: The tile to select for migration of this bo, and the tile used for
+ * GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos.
+ * @size: The storage size to use for the bo.
+ * @start: Start of fixed VRAM range or 0.
+ * @end: End of fixed VRAM range or ~0ULL.
+ * @type: The TTM buffer object type.
+ * @flags: XE_BO_FLAG_ flags.
+ *
+ * Create an Xe BO with range- and options. If @start and @end indicate
+ * a fixed VRAM range, this must be a ttm_bo_type_kernel bo with VRAM placement
+ * only.
+ *
+ * Return: The buffer object on success. Negative error pointer on failure.
+ */
+struct xe_bo *xe_bo_create_pin_range_novm(struct xe_device *xe, struct xe_tile *tile,
+ size_t size, u64 start, u64 end,
+ enum ttm_bo_type type, u32 flags)
{
- return xe_bo_create_pin_map_at_aligned(xe, tile, vm, size, offset,
- type, flags, 0);
+ struct xe_validation_ctx ctx;
+ struct drm_exec exec;
+ struct xe_bo *bo;
+ int err = 0;
+
+ xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {}, err) {
+ bo = __xe_bo_create_locked(xe, tile, NULL, size, start, end,
+ 0, type, flags, 0, &exec);
+ if (IS_ERR(bo)) {
+ drm_exec_retry_on_contention(&exec);
+ err = PTR_ERR(bo);
+ xe_validation_retry_on_oom(&ctx, &err);
+ break;
+ }
+
+ err = xe_bo_pin(bo, &exec);
+ xe_bo_unlock(bo);
+ if (err) {
+ xe_bo_put(bo);
+ drm_exec_retry_on_contention(&exec);
+ xe_validation_retry_on_oom(&ctx, &err);
+ break;
+ }
+ }
+
+ return err ? ERR_PTR(err) : bo;
}
-struct xe_bo *xe_bo_create_pin_map_at_aligned(struct xe_device *xe,
- struct xe_tile *tile,
- struct xe_vm *vm,
- size_t size, u64 offset,
- enum ttm_bo_type type, u32 flags,
- u64 alignment)
+static struct xe_bo *xe_bo_create_pin_map_at_aligned(struct xe_device *xe,
+ struct xe_tile *tile,
+ struct xe_vm *vm,
+ size_t size, u64 offset,
+ enum ttm_bo_type type, u32 flags,
+ u64 alignment, struct drm_exec *exec)
{
struct xe_bo *bo;
int err;
u64 start = offset == ~0ull ? 0 : offset;
- u64 end = offset == ~0ull ? offset : start + size;
+ u64 end = offset == ~0ull ? ~0ull : start + size;
if (flags & XE_BO_FLAG_STOLEN &&
xe_ttm_stolen_cpu_access_needs_ggtt(xe))
flags |= XE_BO_FLAG_GGTT;
- bo = xe_bo_create_locked_range(xe, tile, vm, size, start, end, type,
- flags | XE_BO_FLAG_NEEDS_CPU_ACCESS | XE_BO_FLAG_PINNED,
- alignment);
+ bo = __xe_bo_create_locked(xe, tile, vm, size, start, end, 0, type,
+ flags | XE_BO_FLAG_NEEDS_CPU_ACCESS | XE_BO_FLAG_PINNED,
+ alignment, exec);
if (IS_ERR(bo))
return bo;
- err = xe_bo_pin(bo);
+ err = xe_bo_pin(bo, exec);
if (err)
goto err_put;
@@ -2167,11 +2522,100 @@ err_put:
return ERR_PTR(err);
}
+/**
+ * xe_bo_create_pin_map_at_novm() - Create pinned and mapped bo at optional VRAM offset
+ * @xe: The xe device.
+ * @tile: The tile to select for migration of this bo, and the tile used for
+ * GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos.
+ * @size: The storage size to use for the bo.
+ * @offset: Optional VRAM offset or %~0ull for don't care.
+ * @type: The TTM buffer object type.
+ * @flags: XE_BO_FLAG_ flags.
+ * @alignment: GGTT alignment.
+ * @intr: Whether to execute any waits for backing store interruptible.
+ *
+ * Create a pinned and optionally mapped bo with VRAM offset and GGTT alignment
+ * options. The bo will be external and not associated with a VM.
+ *
+ * Return: The buffer object on success. Negative error pointer on failure.
+ * In particular, the function may return ERR_PTR(%-EINTR) if @intr was set
+ * to true on entry.
+ */
+struct xe_bo *
+xe_bo_create_pin_map_at_novm(struct xe_device *xe, struct xe_tile *tile,
+ size_t size, u64 offset, enum ttm_bo_type type, u32 flags,
+ u64 alignment, bool intr)
+{
+ struct xe_validation_ctx ctx;
+ struct drm_exec exec;
+ struct xe_bo *bo;
+ int ret = 0;
+
+ xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.interruptible = intr},
+ ret) {
+ bo = xe_bo_create_pin_map_at_aligned(xe, tile, NULL, size, offset,
+ type, flags, alignment, &exec);
+ if (IS_ERR(bo)) {
+ drm_exec_retry_on_contention(&exec);
+ ret = PTR_ERR(bo);
+ xe_validation_retry_on_oom(&ctx, &ret);
+ }
+ }
+
+ return ret ? ERR_PTR(ret) : bo;
+}
+
+/**
+ * xe_bo_create_pin_map() - Create pinned and mapped bo
+ * @xe: The xe device.
+ * @tile: The tile to select for migration of this bo, and the tile used for
+ * @vm: The vm to associate the buffer object with. The vm's resv must be locked
+ * with the transaction represented by @exec.
+ * GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos.
+ * @size: The storage size to use for the bo.
+ * @type: The TTM buffer object type.
+ * @flags: XE_BO_FLAG_ flags.
+ * @exec: The drm_exec transaction to use for exhaustive eviction, and
+ * previously used for locking @vm's resv.
+ *
+ * Create a pinned and mapped bo. The bo will be external and not associated
+ * with a VM.
+ *
+ * Return: The buffer object on success. Negative error pointer on failure.
+ * In particular, the function may return ERR_PTR(%-EINTR) if @exec was
+ * configured for interruptible locking.
+ */
struct xe_bo *xe_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile,
struct xe_vm *vm, size_t size,
- enum ttm_bo_type type, u32 flags)
+ enum ttm_bo_type type, u32 flags,
+ struct drm_exec *exec)
+{
+ return xe_bo_create_pin_map_at_aligned(xe, tile, vm, size, ~0ull, type, flags,
+ 0, exec);
+}
+
+/**
+ * xe_bo_create_pin_map_novm() - Create pinned and mapped bo
+ * @xe: The xe device.
+ * @tile: The tile to select for migration of this bo, and the tile used for
+ * GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos.
+ * @size: The storage size to use for the bo.
+ * @type: The TTM buffer object type.
+ * @flags: XE_BO_FLAG_ flags.
+ * @intr: Whether to execut any waits for backing store interruptible.
+ *
+ * Create a pinned and mapped bo. The bo will be external and not associated
+ * with a VM.
+ *
+ * Return: The buffer object on success. Negative error pointer on failure.
+ * In particular, the function may return ERR_PTR(%-EINTR) if @intr was set
+ * to true on entry.
+ */
+struct xe_bo *xe_bo_create_pin_map_novm(struct xe_device *xe, struct xe_tile *tile,
+ size_t size, enum ttm_bo_type type, u32 flags,
+ bool intr)
{
- return xe_bo_create_pin_map_at(xe, tile, vm, size, ~0ull, type, flags);
+ return xe_bo_create_pin_map_at_novm(xe, tile, size, ~0ull, type, flags, 0, intr);
}
static void __xe_bo_unpin_map_no_vm(void *arg)
@@ -2186,8 +2630,7 @@ struct xe_bo *xe_managed_bo_create_pin_map(struct xe_device *xe, struct xe_tile
int ret;
KUNIT_STATIC_STUB_REDIRECT(xe_managed_bo_create_pin_map, xe, tile, size, flags);
-
- bo = xe_bo_create_pin_map(xe, tile, NULL, size, ttm_bo_type_kernel, flags);
+ bo = xe_bo_create_pin_map_novm(xe, tile, size, ttm_bo_type_kernel, flags, true);
if (IS_ERR(bo))
return bo;
@@ -2198,6 +2641,11 @@ struct xe_bo *xe_managed_bo_create_pin_map(struct xe_device *xe, struct xe_tile
return bo;
}
+void xe_managed_bo_unpin_map_no_vm(struct xe_bo *bo)
+{
+ devm_release_action(xe_bo_device(bo)->drm.dev, __xe_bo_unpin_map_no_vm, bo);
+}
+
struct xe_bo *xe_managed_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile,
const void *data, size_t size, u32 flags)
{
@@ -2269,6 +2717,8 @@ uint64_t vram_region_gpu_offset(struct ttm_resource *res)
/**
* xe_bo_pin_external - pin an external BO
* @bo: buffer object to be pinned
+ * @in_place: Pin in current placement, don't attempt to migrate.
+ * @exec: The drm_exec transaction to use for exhaustive eviction.
*
* Pin an external (not tied to a VM, can be exported via dma-buf / prime FD)
* BO. Unique call compared to xe_bo_pin as this function has it own set of
@@ -2276,7 +2726,7 @@ uint64_t vram_region_gpu_offset(struct ttm_resource *res)
*
* Returns 0 for success, negative error code otherwise.
*/
-int xe_bo_pin_external(struct xe_bo *bo)
+int xe_bo_pin_external(struct xe_bo *bo, bool in_place, struct drm_exec *exec)
{
struct xe_device *xe = xe_bo_device(bo);
int err;
@@ -2285,9 +2735,11 @@ int xe_bo_pin_external(struct xe_bo *bo)
xe_assert(xe, xe_bo_is_user(bo));
if (!xe_bo_is_pinned(bo)) {
- err = xe_bo_validate(bo, NULL, false);
- if (err)
- return err;
+ if (!in_place) {
+ err = xe_bo_validate(bo, NULL, false, exec);
+ if (err)
+ return err;
+ }
spin_lock(&xe->pinned.lock);
list_add_tail(&bo->pinned_link, &xe->pinned.late.external);
@@ -2307,7 +2759,17 @@ int xe_bo_pin_external(struct xe_bo *bo)
return 0;
}
-int xe_bo_pin(struct xe_bo *bo)
+/**
+ * xe_bo_pin() - Pin a kernel bo after potentially migrating it
+ * @bo: The kernel bo to pin.
+ * @exec: The drm_exec transaction to use for exhaustive eviction.
+ *
+ * Attempts to migrate a bo to @bo->placement. If that succeeds,
+ * pins the bo.
+ *
+ * Return: %0 on success, negative error code on migration failure.
+ */
+int xe_bo_pin(struct xe_bo *bo, struct drm_exec *exec)
{
struct ttm_place *place = &bo->placements[0];
struct xe_device *xe = xe_bo_device(bo);
@@ -2329,7 +2791,7 @@ int xe_bo_pin(struct xe_bo *bo)
/* We only expect at most 1 pin */
xe_assert(xe, !xe_bo_is_pinned(bo));
- err = xe_bo_validate(bo, NULL, false);
+ err = xe_bo_validate(bo, NULL, false, exec);
if (err)
return err;
@@ -2422,6 +2884,7 @@ void xe_bo_unpin(struct xe_bo *bo)
* NULL. Used together with @allow_res_evict.
* @allow_res_evict: Whether it's allowed to evict bos sharing @vm's
* reservation object.
+ * @exec: The drm_exec transaction to use for exhaustive eviction.
*
* Make sure the bo is in allowed placement, migrating it if necessary. If
* needed, other bos will be evicted. If bos selected for eviction shares
@@ -2431,16 +2894,19 @@ void xe_bo_unpin(struct xe_bo *bo)
* Return: 0 on success, negative error code on failure. May return
* -EINTR or -ERESTARTSYS if internal waits are interrupted by a signal.
*/
-int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict)
+int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict,
+ struct drm_exec *exec)
{
struct ttm_operation_ctx ctx = {
.interruptible = true,
.no_wait_gpu = false,
.gfp_retry_mayfail = true,
};
- struct pin_cookie cookie;
int ret;
+ if (xe_bo_is_pinned(bo))
+ return 0;
+
if (vm) {
lockdep_assert_held(&vm->lock);
xe_vm_assert_held(vm);
@@ -2449,10 +2915,11 @@ int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict)
ctx.resv = xe_vm_resv(vm);
}
- cookie = xe_vm_set_validating(vm, allow_res_evict);
+ xe_vm_set_validating(vm, allow_res_evict);
trace_xe_bo_validate(bo);
+ xe_validation_assert_exec(xe_bo_device(bo), exec, &bo->ttm.base);
ret = ttm_bo_validate(&bo->ttm, &bo->placement, &ctx);
- xe_vm_clear_validating(vm, allow_res_evict, cookie);
+ xe_vm_clear_validating(vm, allow_res_evict);
return ret;
}
@@ -2646,8 +3113,9 @@ int xe_gem_create_ioctl(struct drm_device *dev, void *data,
struct xe_device *xe = to_xe_device(dev);
struct xe_file *xef = to_xe_file(file);
struct drm_xe_gem_create *args = data;
+ struct xe_validation_ctx ctx;
+ struct drm_exec exec;
struct xe_vm *vm = NULL;
- ktime_t end = 0;
struct xe_bo *bo;
unsigned int bo_flags;
u32 handle;
@@ -2721,25 +3189,26 @@ int xe_gem_create_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
}
-retry:
- if (vm) {
- err = xe_vm_lock(vm, true);
- if (err)
- goto out_vm;
+ err = 0;
+ xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.interruptible = true},
+ err) {
+ if (vm) {
+ err = xe_vm_drm_exec_lock(vm, &exec);
+ drm_exec_retry_on_contention(&exec);
+ if (err)
+ break;
+ }
+ bo = xe_bo_create_user(xe, vm, args->size, args->cpu_caching,
+ bo_flags, &exec);
+ drm_exec_retry_on_contention(&exec);
+ if (IS_ERR(bo)) {
+ err = PTR_ERR(bo);
+ xe_validation_retry_on_oom(&ctx, &err);
+ break;
+ }
}
-
- bo = xe_bo_create_user(xe, NULL, vm, args->size, args->cpu_caching,
- bo_flags);
-
- if (vm)
- xe_vm_unlock(vm);
-
- if (IS_ERR(bo)) {
- err = PTR_ERR(bo);
- if (xe_vm_validate_should_retry(NULL, err, &end))
- goto retry;
+ if (err)
goto out_vm;
- }
if (args->extensions) {
err = gem_create_user_extensions(xe, bo, args->extensions, 0);
@@ -2888,6 +3357,9 @@ static void xe_place_from_ttm_type(u32 mem_type, struct ttm_place *place)
* xe_bo_migrate - Migrate an object to the desired region id
* @bo: The buffer object to migrate.
* @mem_type: The TTM region type to migrate to.
+ * @tctx: A pointer to a struct ttm_operation_ctx or NULL if
+ * a default interruptibe ctx is to be used.
+ * @exec: The drm_exec transaction to use for exhaustive eviction.
*
* Attempt to migrate the buffer object to the desired memory region. The
* buffer object may not be pinned, and must be locked.
@@ -2899,7 +3371,8 @@ static void xe_place_from_ttm_type(u32 mem_type, struct ttm_place *place)
* Return: 0 on success. Negative error code on failure. In particular may
* return -EINTR or -ERESTARTSYS if signal pending.
*/
-int xe_bo_migrate(struct xe_bo *bo, u32 mem_type)
+int xe_bo_migrate(struct xe_bo *bo, u32 mem_type, struct ttm_operation_ctx *tctx,
+ struct drm_exec *exec)
{
struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
struct ttm_operation_ctx ctx = {
@@ -2911,6 +3384,7 @@ int xe_bo_migrate(struct xe_bo *bo, u32 mem_type)
struct ttm_place requested;
xe_bo_assert_held(bo);
+ tctx = tctx ? tctx : &ctx;
if (bo->ttm.resource->mem_type == mem_type)
return 0;
@@ -2937,19 +3411,22 @@ int xe_bo_migrate(struct xe_bo *bo, u32 mem_type)
add_vram(xe, bo, &requested, bo->flags, mem_type, &c);
}
- return ttm_bo_validate(&bo->ttm, &placement, &ctx);
+ if (!tctx->no_wait_gpu)
+ xe_validation_assert_exec(xe_bo_device(bo), exec, &bo->ttm.base);
+ return ttm_bo_validate(&bo->ttm, &placement, tctx);
}
/**
* xe_bo_evict - Evict an object to evict placement
* @bo: The buffer object to migrate.
+ * @exec: The drm_exec transaction to use for exhaustive eviction.
*
* On successful completion, the object memory will be moved to evict
* placement. This function blocks until the object has been fully moved.
*
* Return: 0 on success. Negative error code on failure.
*/
-int xe_bo_evict(struct xe_bo *bo)
+int xe_bo_evict(struct xe_bo *bo, struct drm_exec *exec)
{
struct ttm_operation_ctx ctx = {
.interruptible = false,
@@ -3109,11 +3586,11 @@ int xe_bo_dumb_create(struct drm_file *file_priv,
args->size = ALIGN(mul_u32_u32(args->pitch, args->height),
page_size);
- bo = xe_bo_create_user(xe, NULL, NULL, args->size,
+ bo = xe_bo_create_user(xe, NULL, args->size,
DRM_XE_GEM_CPU_CACHING_WC,
XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) |
XE_BO_FLAG_SCANOUT |
- XE_BO_FLAG_NEEDS_CPU_ACCESS);
+ XE_BO_FLAG_NEEDS_CPU_ACCESS, NULL);
if (IS_ERR(bo))
return PTR_ERR(bo);
diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h
index 02e8cde4c6b2..a77af42b5f9e 100644
--- a/drivers/gpu/drm/xe/xe_bo.h
+++ b/drivers/gpu/drm/xe/xe_bo.h
@@ -10,8 +10,10 @@
#include "xe_bo_types.h"
#include "xe_macros.h"
+#include "xe_validation.h"
#include "xe_vm_types.h"
#include "xe_vm.h"
+#include "xe_vram_types.h"
#define XE_DEFAULT_GTT_SIZE_MB 3072ULL /* 3GB by default */
@@ -23,8 +25,9 @@
#define XE_BO_FLAG_VRAM_MASK (XE_BO_FLAG_VRAM0 | XE_BO_FLAG_VRAM1)
/* -- */
#define XE_BO_FLAG_STOLEN BIT(4)
+#define XE_BO_FLAG_VRAM(vram) (XE_BO_FLAG_VRAM0 << ((vram)->id))
#define XE_BO_FLAG_VRAM_IF_DGFX(tile) (IS_DGFX(tile_to_xe(tile)) ? \
- XE_BO_FLAG_VRAM0 << (tile)->id : \
+ XE_BO_FLAG_VRAM((tile)->mem.vram) : \
XE_BO_FLAG_SYSTEM)
#define XE_BO_FLAG_GGTT BIT(5)
#define XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE BIT(6)
@@ -86,40 +89,34 @@ struct sg_table;
struct xe_bo *xe_bo_alloc(void);
void xe_bo_free(struct xe_bo *bo);
-struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
- struct xe_tile *tile, struct dma_resv *resv,
- struct ttm_lru_bulk_move *bulk, size_t size,
- u16 cpu_caching, enum ttm_bo_type type,
- u32 flags);
-struct xe_bo *
-xe_bo_create_locked_range(struct xe_device *xe,
- struct xe_tile *tile, struct xe_vm *vm,
- size_t size, u64 start, u64 end,
- enum ttm_bo_type type, u32 flags, u64 alignment);
+struct xe_bo *xe_bo_init_locked(struct xe_device *xe, struct xe_bo *bo,
+ struct xe_tile *tile, struct dma_resv *resv,
+ struct ttm_lru_bulk_move *bulk, size_t size,
+ u16 cpu_caching, enum ttm_bo_type type,
+ u32 flags, struct drm_exec *exec);
struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile,
struct xe_vm *vm, size_t size,
- enum ttm_bo_type type, u32 flags);
-struct xe_bo *xe_bo_create(struct xe_device *xe, struct xe_tile *tile,
- struct xe_vm *vm, size_t size,
- enum ttm_bo_type type, u32 flags);
-struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_tile *tile,
- struct xe_vm *vm, size_t size,
- u16 cpu_caching,
- u32 flags);
+ enum ttm_bo_type type, u32 flags,
+ struct drm_exec *exec);
+struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_vm *vm, size_t size,
+ u16 cpu_caching, u32 flags, struct drm_exec *exec);
struct xe_bo *xe_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile,
struct xe_vm *vm, size_t size,
- enum ttm_bo_type type, u32 flags);
-struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile,
- struct xe_vm *vm, size_t size, u64 offset,
- enum ttm_bo_type type, u32 flags);
-struct xe_bo *xe_bo_create_pin_map_at_aligned(struct xe_device *xe,
- struct xe_tile *tile,
- struct xe_vm *vm,
- size_t size, u64 offset,
- enum ttm_bo_type type, u32 flags,
- u64 alignment);
+ enum ttm_bo_type type, u32 flags,
+ struct drm_exec *exec);
+struct xe_bo *xe_bo_create_pin_map_novm(struct xe_device *xe, struct xe_tile *tile,
+ size_t size, enum ttm_bo_type type, u32 flags,
+ bool intr);
+struct xe_bo *xe_bo_create_pin_range_novm(struct xe_device *xe, struct xe_tile *tile,
+ size_t size, u64 start, u64 end,
+ enum ttm_bo_type type, u32 flags);
+struct xe_bo *
+xe_bo_create_pin_map_at_novm(struct xe_device *xe, struct xe_tile *tile,
+ size_t size, u64 offset, enum ttm_bo_type type,
+ u32 flags, u64 alignment, bool intr);
struct xe_bo *xe_managed_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile,
size_t size, u32 flags);
+void xe_managed_bo_unpin_map_no_vm(struct xe_bo *bo);
struct xe_bo *xe_managed_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile,
const void *data, size_t size, u32 flags);
int xe_managed_bo_reinit_in_vram(struct xe_device *xe, struct xe_tile *tile, struct xe_bo **src);
@@ -198,11 +195,12 @@ static inline void xe_bo_unlock_vm_held(struct xe_bo *bo)
}
}
-int xe_bo_pin_external(struct xe_bo *bo);
-int xe_bo_pin(struct xe_bo *bo);
+int xe_bo_pin_external(struct xe_bo *bo, bool in_place, struct drm_exec *exec);
+int xe_bo_pin(struct xe_bo *bo, struct drm_exec *exec);
void xe_bo_unpin_external(struct xe_bo *bo);
void xe_bo_unpin(struct xe_bo *bo);
-int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict);
+int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict,
+ struct drm_exec *exec);
static inline bool xe_bo_is_pinned(struct xe_bo *bo)
{
@@ -283,8 +281,9 @@ uint64_t vram_region_gpu_offset(struct ttm_resource *res);
bool xe_bo_can_migrate(struct xe_bo *bo, u32 mem_type);
-int xe_bo_migrate(struct xe_bo *bo, u32 mem_type);
-int xe_bo_evict(struct xe_bo *bo);
+int xe_bo_migrate(struct xe_bo *bo, u32 mem_type, struct ttm_operation_ctx *ctc,
+ struct drm_exec *exec);
+int xe_bo_evict(struct xe_bo *bo, struct drm_exec *exec);
int xe_bo_evict_pinned(struct xe_bo *bo);
int xe_bo_notifier_prepare_pinned(struct xe_bo *bo);
@@ -313,6 +312,21 @@ static inline size_t xe_bo_ccs_pages_start(struct xe_bo *bo)
return PAGE_ALIGN(xe_bo_size(bo));
}
+/**
+ * xe_bo_has_valid_ccs_bb - Check if CCS's BBs were setup for the BO.
+ * @bo: the &xe_bo to check
+ *
+ * The CCS's BBs should only be setup by the driver VF, but it is safe
+ * to call this function also by non-VF driver.
+ *
+ * Return: true iff the CCS's BBs are setup, false otherwise.
+ */
+static inline bool xe_bo_has_valid_ccs_bb(struct xe_bo *bo)
+{
+ return bo->bb_ccs[XE_SRIOV_VF_CCS_READ_CTX] &&
+ bo->bb_ccs[XE_SRIOV_VF_CCS_WRITE_CTX];
+}
+
static inline bool xe_bo_has_pages(struct xe_bo *bo)
{
if ((bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm)) ||
diff --git a/drivers/gpu/drm/xe/xe_bo_evict.c b/drivers/gpu/drm/xe/xe_bo_evict.c
index 7484ce55a303..d5dbc51e8612 100644
--- a/drivers/gpu/drm/xe/xe_bo_evict.c
+++ b/drivers/gpu/drm/xe/xe_bo_evict.c
@@ -158,8 +158,8 @@ int xe_bo_evict_all(struct xe_device *xe)
if (ret)
return ret;
- ret = xe_bo_apply_to_pinned(xe, &xe->pinned.late.kernel_bo_present,
- &xe->pinned.late.evicted, xe_bo_evict_pinned);
+ ret = xe_bo_apply_to_pinned(xe, &xe->pinned.late.external,
+ &xe->pinned.late.external, xe_bo_evict_pinned);
if (!ret)
ret = xe_bo_apply_to_pinned(xe, &xe->pinned.late.kernel_bo_present,
diff --git a/drivers/gpu/drm/xe/xe_bo_types.h b/drivers/gpu/drm/xe/xe_bo_types.h
index ff560d82496f..d4fe3c8dca5b 100644
--- a/drivers/gpu/drm/xe/xe_bo_types.h
+++ b/drivers/gpu/drm/xe/xe_bo_types.h
@@ -9,6 +9,7 @@
#include <linux/iosys-map.h>
#include <drm/drm_gpusvm.h>
+#include <drm/drm_pagemap.h>
#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_device.h>
#include <drm/ttm/ttm_placement.h>
@@ -24,7 +25,9 @@ struct xe_vm;
/* TODO: To be selected with VM_MADVISE */
#define XE_BO_PRIORITY_NORMAL 1
-/** @xe_bo: XE buffer object */
+/**
+ * struct xe_bo - Xe buffer object
+ */
struct xe_bo {
/** @ttm: TTM base buffer object */
struct ttm_buffer_object ttm;
@@ -46,7 +49,7 @@ struct xe_bo {
struct xe_ggtt_node *ggtt_node[XE_MAX_TILES_PER_DEVICE];
/** @vmap: iosys map of this buffer */
struct iosys_map vmap;
- /** @ttm_kmap: TTM bo kmap object for internal use only. Keep off. */
+ /** @kmap: TTM bo kmap object for internal use only. Keep off. */
struct ttm_bo_kmap_obj kmap;
/** @pinned_link: link to present / evicted list of pinned BO */
struct list_head pinned_link;
@@ -60,6 +63,14 @@ struct xe_bo {
*/
struct list_head client_link;
#endif
+ /** @attr: User controlled attributes for bo */
+ struct {
+ /**
+ * @atomic_access: type of atomic access bo needs
+ * protected by bo dma-resv lock
+ */
+ u32 atomic_access;
+ } attr;
/**
* @pxp_key_instance: PXP key instance this BO was created against. A
* 0 in this variable indicates that the BO does not use PXP encryption.
@@ -73,9 +84,12 @@ struct xe_bo {
/** @created: Whether the bo has passed initial creation */
bool created;
- /** @ccs_cleared */
+ /** @ccs_cleared: true means that CCS region of BO is already cleared */
bool ccs_cleared;
+ /** @bb_ccs: BB instructions of CCS read/write. Valid only for VF */
+ struct xe_bb *bb_ccs[XE_SRIOV_VF_CCS_CTX_COUNT];
+
/**
* @cpu_caching: CPU caching mode. Currently only used for userspace
* objects. Exceptions are system memory on DGFX, which is always
@@ -87,9 +101,10 @@ struct xe_bo {
struct drm_pagemap_devmem devmem_allocation;
/** @vram_userfault_link: Link into @mem_access.vram_userfault.list */
- struct list_head vram_userfault_link;
+ struct list_head vram_userfault_link;
- /** @min_align: minimum alignment needed for this BO if different
+ /**
+ * @min_align: minimum alignment needed for this BO if different
* from default
*/
u64 min_align;
diff --git a/drivers/gpu/drm/xe/xe_configfs.c b/drivers/gpu/drm/xe/xe_configfs.c
index e9b46a2d0019..139663423185 100644
--- a/drivers/gpu/drm/xe/xe_configfs.c
+++ b/drivers/gpu/drm/xe/xe_configfs.c
@@ -4,42 +4,67 @@
*/
#include <linux/bitops.h>
+#include <linux/ctype.h>
#include <linux/configfs.h>
+#include <linux/cleanup.h>
#include <linux/find.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/string.h>
+#include "instructions/xe_mi_commands.h"
#include "xe_configfs.h"
-#include "xe_module.h"
-
#include "xe_hw_engine_types.h"
+#include "xe_module.h"
+#include "xe_pci_types.h"
/**
* DOC: Xe Configfs
*
* Overview
- * =========
+ * ========
*
* Configfs is a filesystem-based manager of kernel objects. XE KMD registers a
- * configfs subsystem called ``'xe'`` that creates a directory in the mounted configfs directory
- * The user can create devices under this directory and configure them as necessary
- * See Documentation/filesystems/configfs.rst for more information about how configfs works.
+ * configfs subsystem called ``xe`` that creates a directory in the mounted
+ * configfs directory. The user can create devices under this directory and
+ * configure them as necessary. See Documentation/filesystems/configfs.rst for
+ * more information about how configfs works.
*
* Create devices
- * ===============
+ * ==============
+ *
+ * To create a device, the ``xe`` module should already be loaded, but some
+ * attributes can only be set before binding the device. It can be accomplished
+ * by blocking the driver autoprobe::
*
- * In order to create a device, the user has to create a directory inside ``'xe'``::
+ * # echo 0 > /sys/bus/pci/drivers_autoprobe
+ * # modprobe xe
*
- * mkdir /sys/kernel/config/xe/0000:03:00.0/
+ * In order to create a device, the user has to create a directory inside ``xe``::
+ *
+ * # mkdir /sys/kernel/config/xe/0000:03:00.0/
*
* Every device created is populated by the driver with entries that can be
* used to configure it::
*
* /sys/kernel/config/xe/
- * .. 0000:03:00.0/
- * ... survivability_mode
+ * ├── 0000:00:02.0
+ * │   └── ...
+ * ├── 0000:00:02.1
+ * │   └── ...
+ * :
+ * └── 0000:03:00.0
+ * ├── survivability_mode
+ * ├── engines_allowed
+ * └── enable_psmi
+ *
+ * After configuring the attributes as per next section, the device can be
+ * probed with::
+ *
+ * # echo 0000:03:00.0 > /sys/bus/pci/drivers/xe/bind
+ * # # or
+ * # echo 0000:03:00.0 > /sys/bus/pci/drivers_probe
*
* Configure Attributes
* ====================
@@ -51,7 +76,8 @@
* effect when probing the device. Example to enable it::
*
* # echo 1 > /sys/kernel/config/xe/0000:03:00.0/survivability_mode
- * # echo 0000:03:00.0 > /sys/bus/pci/drivers/xe/bind (Enters survivability mode if supported)
+ *
+ * This attribute can only be set before binding to the device.
*
* Allowed engines:
* ----------------
@@ -77,27 +103,118 @@
* available for migrations, but it's disabled. This is intended for debugging
* purposes only.
*
+ * This attribute can only be set before binding to the device.
+ *
+ * PSMI
+ * ----
+ *
+ * Enable extra debugging capabilities to trace engine execution. Only useful
+ * during early platform enabling and requires additional hardware connected.
+ * Once it's enabled, additionals WAs are added and runtime configuration is
+ * done via debugfs. Example to enable it::
+ *
+ * # echo 1 > /sys/kernel/config/xe/0000:03:00.0/enable_psmi
+ *
+ * This attribute can only be set before binding to the device.
+ *
+ * Context restore BB
+ * ------------------
+ *
+ * Allow to execute a batch buffer during any context switches. When the
+ * GPU is restoring the context, it executes additional commands. It's useful
+ * for testing additional workarounds and validating certain HW behaviors: it's
+ * not intended for normal execution and will taint the kernel with TAINT_TEST
+ * when used.
+ *
+ * The syntax allows to pass straight instructions to be executed by the engine
+ * in a batch buffer or set specific registers.
+ *
+ * #. Generic instruction::
+ *
+ * <engine-class> cmd <instr> [[dword0] [dword1] [...]]
+ *
+ * #. Simple register setting::
+ *
+ * <engine-class> reg <address> <value>
+ *
+ * Commands are saved per engine class: all instances of that class will execute
+ * those commands during context switch. The instruction, dword arguments,
+ * addresses and values are in hex format like in the examples below.
+ *
+ * #. Execute a LRI command to write 0xDEADBEEF to register 0x4f10 after the
+ * normal context restore::
+ *
+ * # echo 'rcs cmd 11000001 4F100 DEADBEEF' \
+ * > /sys/kernel/config/xe/0000:03:00.0/ctx_restore_post_bb
+ *
+ * #. Execute a LRI command to write 0xDEADBEEF to register 0x4f10 at the
+ * beginning of the context restore::
+ *
+ * # echo 'rcs cmd 11000001 4F100 DEADBEEF' \
+ * > /sys/kernel/config/xe/0000:03:00.0/ctx_restore_mid_bb
+
+ * #. Load certain values in a couple of registers (it can be used as a simpler
+ * alternative to the `cmd`) action::
+ *
+ * # cat > /sys/kernel/config/xe/0000:03:00.0/ctx_restore_post_bb <<EOF
+ * rcs reg 4F100 DEADBEEF
+ * rcs reg 4F104 FFFFFFFF
+ * EOF
+ *
+ * .. note::
+ *
+ * When using multiple lines, make sure to use a command that is
+ * implemented with a single write syscall, like HEREDOC.
+ *
+ * Currently this is implemented only for post and mid context restore and
+ * these attributes can only be set before binding to the device.
+ *
* Remove devices
* ==============
*
* The created device directories can be removed using ``rmdir``::
*
- * rmdir /sys/kernel/config/xe/0000:03:00.0/
+ * # rmdir /sys/kernel/config/xe/0000:03:00.0/
*/
-struct xe_config_device {
+/* Similar to struct xe_bb, but not tied to HW (yet) */
+struct wa_bb {
+ u32 *cs;
+ u32 len; /* in dwords */
+};
+
+struct xe_config_group_device {
struct config_group group;
- bool survivability_mode;
- u64 engines_allowed;
+ struct xe_config_device {
+ u64 engines_allowed;
+ struct wa_bb ctx_restore_post_bb[XE_ENGINE_CLASS_MAX];
+ struct wa_bb ctx_restore_mid_bb[XE_ENGINE_CLASS_MAX];
+ bool survivability_mode;
+ bool enable_psmi;
+ } config;
/* protects attributes */
struct mutex lock;
+ /* matching descriptor */
+ const struct xe_device_desc *desc;
+};
+
+static const struct xe_config_device device_defaults = {
+ .engines_allowed = U64_MAX,
+ .survivability_mode = false,
+ .enable_psmi = false,
};
+static void set_device_defaults(struct xe_config_device *config)
+{
+ *config = device_defaults;
+}
+
struct engine_info {
const char *cls;
u64 mask;
+ enum xe_engine_class engine_class;
};
/* Some helpful macros to aid on the sizing of buffer allocation when parsing */
@@ -105,17 +222,48 @@ struct engine_info {
#define MAX_ENGINE_INSTANCE_CHARS 2
static const struct engine_info engine_info[] = {
- { .cls = "rcs", .mask = XE_HW_ENGINE_RCS_MASK },
- { .cls = "bcs", .mask = XE_HW_ENGINE_BCS_MASK },
- { .cls = "vcs", .mask = XE_HW_ENGINE_VCS_MASK },
- { .cls = "vecs", .mask = XE_HW_ENGINE_VECS_MASK },
- { .cls = "ccs", .mask = XE_HW_ENGINE_CCS_MASK },
- { .cls = "gsccs", .mask = XE_HW_ENGINE_GSCCS_MASK },
+ { .cls = "rcs", .mask = XE_HW_ENGINE_RCS_MASK, .engine_class = XE_ENGINE_CLASS_RENDER },
+ { .cls = "bcs", .mask = XE_HW_ENGINE_BCS_MASK, .engine_class = XE_ENGINE_CLASS_COPY },
+ { .cls = "vcs", .mask = XE_HW_ENGINE_VCS_MASK, .engine_class = XE_ENGINE_CLASS_VIDEO_DECODE },
+ { .cls = "vecs", .mask = XE_HW_ENGINE_VECS_MASK, .engine_class = XE_ENGINE_CLASS_VIDEO_ENHANCE },
+ { .cls = "ccs", .mask = XE_HW_ENGINE_CCS_MASK, .engine_class = XE_ENGINE_CLASS_COMPUTE },
+ { .cls = "gsccs", .mask = XE_HW_ENGINE_GSCCS_MASK, .engine_class = XE_ENGINE_CLASS_OTHER },
};
+static struct xe_config_group_device *to_xe_config_group_device(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct xe_config_group_device, group);
+}
+
static struct xe_config_device *to_xe_config_device(struct config_item *item)
{
- return container_of(to_config_group(item), struct xe_config_device, group);
+ return &to_xe_config_group_device(item)->config;
+}
+
+static bool is_bound(struct xe_config_group_device *dev)
+{
+ unsigned int domain, bus, slot, function;
+ struct pci_dev *pdev;
+ const char *name;
+ bool ret;
+
+ lockdep_assert_held(&dev->lock);
+
+ name = dev->group.cg_item.ci_name;
+ if (sscanf(name, "%x:%x:%x.%x", &domain, &bus, &slot, &function) != 4)
+ return false;
+
+ pdev = pci_get_domain_bus_and_slot(domain, bus, PCI_DEVFN(slot, function));
+ if (!pdev)
+ return false;
+
+ ret = pci_get_drvdata(pdev);
+ pci_dev_put(pdev);
+
+ if (ret)
+ pci_dbg(pdev, "Already bound to driver\n");
+
+ return ret;
}
static ssize_t survivability_mode_show(struct config_item *item, char *page)
@@ -127,7 +275,7 @@ static ssize_t survivability_mode_show(struct config_item *item, char *page)
static ssize_t survivability_mode_store(struct config_item *item, const char *page, size_t len)
{
- struct xe_config_device *dev = to_xe_config_device(item);
+ struct xe_config_group_device *dev = to_xe_config_group_device(item);
bool survivability_mode;
int ret;
@@ -135,9 +283,11 @@ static ssize_t survivability_mode_store(struct config_item *item, const char *pa
if (ret)
return ret;
- mutex_lock(&dev->lock);
- dev->survivability_mode = survivability_mode;
- mutex_unlock(&dev->lock);
+ guard(mutex)(&dev->lock);
+ if (is_bound(dev))
+ return -EBUSY;
+
+ dev->config.survivability_mode = survivability_mode;
return len;
}
@@ -166,7 +316,18 @@ static ssize_t engines_allowed_show(struct config_item *item, char *page)
return p - page;
}
-static bool lookup_engine_mask(const char *pattern, u64 *mask)
+/*
+ * Lookup engine_info. If @mask is not NULL, reduce the mask according to the
+ * instance in @pattern.
+ *
+ * Examples of inputs:
+ * - lookup_engine_info("rcs0", &mask): return "rcs" entry from @engine_info and
+ * mask == BIT_ULL(XE_HW_ENGINE_RCS0)
+ * - lookup_engine_info("rcs*", &mask): return "rcs" entry from @engine_info and
+ * mask == XE_HW_ENGINE_RCS_MASK
+ * - lookup_engine_info("rcs", NULL): return "rcs" entry from @engine_info
+ */
+static const struct engine_info *lookup_engine_info(const char *pattern, u64 *mask)
{
for (size_t i = 0; i < ARRAY_SIZE(engine_info); i++) {
u8 instance;
@@ -176,70 +337,359 @@ static bool lookup_engine_mask(const char *pattern, u64 *mask)
continue;
pattern += strlen(engine_info[i].cls);
+ if (!mask)
+ return *pattern ? NULL : &engine_info[i];
if (!strcmp(pattern, "*")) {
*mask = engine_info[i].mask;
- return true;
+ return &engine_info[i];
}
if (kstrtou8(pattern, 10, &instance))
- return false;
+ return NULL;
bit = __ffs64(engine_info[i].mask) + instance;
if (bit >= fls64(engine_info[i].mask))
- return false;
+ return NULL;
*mask = BIT_ULL(bit);
- return true;
+ return &engine_info[i];
}
- return false;
+ return NULL;
+}
+
+static int parse_engine(const char *s, const char *end_chars, u64 *mask,
+ const struct engine_info **pinfo)
+{
+ char buf[MAX_ENGINE_CLASS_CHARS + MAX_ENGINE_INSTANCE_CHARS + 1];
+ const struct engine_info *info;
+ size_t len;
+
+ len = strcspn(s, end_chars);
+ if (len >= sizeof(buf))
+ return -EINVAL;
+
+ memcpy(buf, s, len);
+ buf[len] = '\0';
+
+ info = lookup_engine_info(buf, mask);
+ if (!info)
+ return -ENOENT;
+
+ if (pinfo)
+ *pinfo = info;
+
+ return len;
}
static ssize_t engines_allowed_store(struct config_item *item, const char *page,
size_t len)
{
- struct xe_config_device *dev = to_xe_config_device(item);
- size_t patternlen, p;
+ struct xe_config_group_device *dev = to_xe_config_group_device(item);
+ ssize_t patternlen, p;
u64 mask, val = 0;
for (p = 0; p < len; p += patternlen + 1) {
- char buf[MAX_ENGINE_CLASS_CHARS + MAX_ENGINE_INSTANCE_CHARS + 1];
-
- patternlen = strcspn(page + p, ",\n");
- if (patternlen >= sizeof(buf))
+ patternlen = parse_engine(page + p, ",\n", &mask, NULL);
+ if (patternlen < 0)
return -EINVAL;
- memcpy(buf, page + p, patternlen);
- buf[patternlen] = '\0';
+ val |= mask;
+ }
+
+ guard(mutex)(&dev->lock);
+ if (is_bound(dev))
+ return -EBUSY;
+
+ dev->config.engines_allowed = val;
+
+ return len;
+}
+
+static ssize_t enable_psmi_show(struct config_item *item, char *page)
+{
+ struct xe_config_device *dev = to_xe_config_device(item);
+
+ return sprintf(page, "%d\n", dev->enable_psmi);
+}
+
+static ssize_t enable_psmi_store(struct config_item *item, const char *page, size_t len)
+{
+ struct xe_config_group_device *dev = to_xe_config_group_device(item);
+ bool val;
+ int ret;
+
+ ret = kstrtobool(page, &val);
+ if (ret)
+ return ret;
+
+ guard(mutex)(&dev->lock);
+ if (is_bound(dev))
+ return -EBUSY;
+
+ dev->config.enable_psmi = val;
+
+ return len;
+}
+
+static bool wa_bb_read_advance(bool dereference, char **p,
+ const char *append, size_t len,
+ size_t *max_size)
+{
+ if (dereference) {
+ if (len >= *max_size)
+ return false;
+ *max_size -= len;
+ if (append)
+ memcpy(*p, append, len);
+ }
+
+ *p += len;
+
+ return true;
+}
+
+static ssize_t wa_bb_show(struct xe_config_group_device *dev,
+ struct wa_bb wa_bb[static XE_ENGINE_CLASS_MAX],
+ char *data, size_t sz)
+{
+ char *p = data;
+
+ guard(mutex)(&dev->lock);
+
+ for (size_t i = 0; i < ARRAY_SIZE(engine_info); i++) {
+ enum xe_engine_class ec = engine_info[i].engine_class;
+ size_t len;
+
+ if (!wa_bb[ec].len)
+ continue;
+
+ len = snprintf(p, sz, "%s:", engine_info[i].cls);
+ if (!wa_bb_read_advance(data, &p, NULL, len, &sz))
+ return -ENOBUFS;
+
+ for (size_t j = 0; j < wa_bb[ec].len; j++) {
+ len = snprintf(p, sz, " %08x", wa_bb[ec].cs[j]);
+ if (!wa_bb_read_advance(data, &p, NULL, len, &sz))
+ return -ENOBUFS;
+ }
+
+ if (!wa_bb_read_advance(data, &p, "\n", 1, &sz))
+ return -ENOBUFS;
+ }
+
+ if (!wa_bb_read_advance(data, &p, "", 1, &sz))
+ return -ENOBUFS;
+
+ /* Reserve one more to match check for '\0' */
+ if (!data)
+ p++;
+
+ return p - data;
+}
+
+static ssize_t ctx_restore_mid_bb_show(struct config_item *item, char *page)
+{
+ struct xe_config_group_device *dev = to_xe_config_group_device(item);
+
+ return wa_bb_show(dev, dev->config.ctx_restore_mid_bb, page, SZ_4K);
+}
+
+static ssize_t ctx_restore_post_bb_show(struct config_item *item, char *page)
+{
+ struct xe_config_group_device *dev = to_xe_config_group_device(item);
+
+ return wa_bb_show(dev, dev->config.ctx_restore_post_bb, page, SZ_4K);
+}
+
+static void wa_bb_append(struct wa_bb *wa_bb, u32 val)
+{
+ if (wa_bb->cs)
+ wa_bb->cs[wa_bb->len] = val;
+
+ wa_bb->len++;
+}
+
+static ssize_t parse_hex(const char *line, u32 *pval)
+{
+ char numstr[12];
+ const char *p;
+ ssize_t numlen;
- if (!lookup_engine_mask(buf, &mask))
+ p = line + strspn(line, " \t");
+ if (!*p || *p == '\n')
+ return 0;
+
+ numlen = strcspn(p, " \t\n");
+ if (!numlen || numlen >= sizeof(numstr) - 1)
+ return -EINVAL;
+
+ memcpy(numstr, p, numlen);
+ numstr[numlen] = '\0';
+ p += numlen;
+
+ if (kstrtou32(numstr, 16, pval))
+ return -EINVAL;
+
+ return p - line;
+}
+
+/*
+ * Parse lines with the format
+ *
+ * <engine-class> cmd <u32> <u32...>
+ * <engine-class> reg <u32_addr> <u32_val>
+ *
+ * and optionally save them in @wa_bb[i].cs is non-NULL.
+ *
+ * Return the number of dwords parsed.
+ */
+static ssize_t parse_wa_bb_lines(const char *lines,
+ struct wa_bb wa_bb[static XE_ENGINE_CLASS_MAX])
+{
+ ssize_t dwords = 0, ret;
+ const char *p;
+
+ for (p = lines; *p; p++) {
+ const struct engine_info *info = NULL;
+ u32 val, val2;
+
+ /* Also allow empty lines */
+ p += strspn(p, " \t\n");
+ if (!*p)
+ break;
+
+ ret = parse_engine(p, " \t\n", NULL, &info);
+ if (ret < 0)
+ return ret;
+
+ p += ret;
+ p += strspn(p, " \t");
+
+ if (str_has_prefix(p, "cmd")) {
+ for (p += strlen("cmd"); *p;) {
+ ret = parse_hex(p, &val);
+ if (ret < 0)
+ return -EINVAL;
+ if (!ret)
+ break;
+
+ p += ret;
+ dwords++;
+ wa_bb_append(&wa_bb[info->engine_class], val);
+ }
+ } else if (str_has_prefix(p, "reg")) {
+ p += strlen("reg");
+ ret = parse_hex(p, &val);
+ if (ret <= 0)
+ return -EINVAL;
+
+ p += ret;
+ ret = parse_hex(p, &val2);
+ if (ret <= 0)
+ return -EINVAL;
+
+ p += ret;
+ dwords += 3;
+ wa_bb_append(&wa_bb[info->engine_class],
+ MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(1));
+ wa_bb_append(&wa_bb[info->engine_class], val);
+ wa_bb_append(&wa_bb[info->engine_class], val2);
+ } else {
return -EINVAL;
+ }
+ }
- val |= mask;
+ return dwords;
+}
+
+static ssize_t wa_bb_store(struct wa_bb wa_bb[static XE_ENGINE_CLASS_MAX],
+ struct xe_config_group_device *dev,
+ const char *page, size_t len)
+{
+ /* tmp_wa_bb must match wa_bb's size */
+ struct wa_bb tmp_wa_bb[XE_ENGINE_CLASS_MAX] = { };
+ ssize_t count, class;
+ u32 *tmp;
+
+ /* 1. Count dwords - wa_bb[i].cs is NULL for all classes */
+ count = parse_wa_bb_lines(page, tmp_wa_bb);
+ if (count < 0)
+ return count;
+
+ guard(mutex)(&dev->lock);
+
+ if (is_bound(dev))
+ return -EBUSY;
+
+ /*
+ * 2. Allocate a u32 array and set the pointers to the right positions
+ * according to the length of each class' wa_bb
+ */
+ tmp = krealloc(wa_bb[0].cs, count * sizeof(u32), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ if (!count) {
+ memset(wa_bb, 0, sizeof(tmp_wa_bb));
+ return len;
+ }
+
+ for (class = 0, count = 0; class < XE_ENGINE_CLASS_MAX; ++class) {
+ tmp_wa_bb[class].cs = tmp + count;
+ count += tmp_wa_bb[class].len;
+ tmp_wa_bb[class].len = 0;
}
- mutex_lock(&dev->lock);
- dev->engines_allowed = val;
- mutex_unlock(&dev->lock);
+ /* 3. Parse wa_bb lines again, this time saving the values */
+ count = parse_wa_bb_lines(page, tmp_wa_bb);
+ if (count < 0)
+ return count;
+
+ memcpy(wa_bb, tmp_wa_bb, sizeof(tmp_wa_bb));
return len;
}
-CONFIGFS_ATTR(, survivability_mode);
+static ssize_t ctx_restore_mid_bb_store(struct config_item *item,
+ const char *data, size_t sz)
+{
+ struct xe_config_group_device *dev = to_xe_config_group_device(item);
+
+ return wa_bb_store(dev->config.ctx_restore_mid_bb, dev, data, sz);
+}
+
+static ssize_t ctx_restore_post_bb_store(struct config_item *item,
+ const char *data, size_t sz)
+{
+ struct xe_config_group_device *dev = to_xe_config_group_device(item);
+
+ return wa_bb_store(dev->config.ctx_restore_post_bb, dev, data, sz);
+}
+
+CONFIGFS_ATTR(, ctx_restore_mid_bb);
+CONFIGFS_ATTR(, ctx_restore_post_bb);
+CONFIGFS_ATTR(, enable_psmi);
CONFIGFS_ATTR(, engines_allowed);
+CONFIGFS_ATTR(, survivability_mode);
static struct configfs_attribute *xe_config_device_attrs[] = {
- &attr_survivability_mode,
+ &attr_ctx_restore_mid_bb,
+ &attr_ctx_restore_post_bb,
+ &attr_enable_psmi,
&attr_engines_allowed,
+ &attr_survivability_mode,
NULL,
};
static void xe_config_device_release(struct config_item *item)
{
- struct xe_config_device *dev = to_xe_config_device(item);
+ struct xe_config_group_device *dev = to_xe_config_group_device(item);
mutex_destroy(&dev->lock);
+
+ kfree(dev->config.ctx_restore_post_bb[0].cs);
kfree(dev);
}
@@ -247,35 +697,106 @@ static struct configfs_item_operations xe_config_device_ops = {
.release = xe_config_device_release,
};
+static bool xe_config_device_is_visible(struct config_item *item,
+ struct configfs_attribute *attr, int n)
+{
+ struct xe_config_group_device *dev = to_xe_config_group_device(item);
+
+ if (attr == &attr_survivability_mode) {
+ if (!dev->desc->is_dgfx || dev->desc->platform < XE_BATTLEMAGE)
+ return false;
+ }
+
+ return true;
+}
+
+static struct configfs_group_operations xe_config_device_group_ops = {
+ .is_visible = xe_config_device_is_visible,
+};
+
static const struct config_item_type xe_config_device_type = {
.ct_item_ops = &xe_config_device_ops,
+ .ct_group_ops = &xe_config_device_group_ops,
.ct_attrs = xe_config_device_attrs,
.ct_owner = THIS_MODULE,
};
+static const struct xe_device_desc *xe_match_desc(struct pci_dev *pdev)
+{
+ struct device_driver *driver = driver_find("xe", &pci_bus_type);
+ struct pci_driver *drv = to_pci_driver(driver);
+ const struct pci_device_id *ids = drv ? drv->id_table : NULL;
+ const struct pci_device_id *found = pci_match_id(ids, pdev);
+
+ return found ? (const void *)found->driver_data : NULL;
+}
+
+static struct pci_dev *get_physfn_instead(struct pci_dev *virtfn)
+{
+ struct pci_dev *physfn = pci_physfn(virtfn);
+
+ pci_dev_get(physfn);
+ pci_dev_put(virtfn);
+ return physfn;
+}
+
static struct config_group *xe_config_make_device_group(struct config_group *group,
const char *name)
{
unsigned int domain, bus, slot, function;
- struct xe_config_device *dev;
+ struct xe_config_group_device *dev;
+ const struct xe_device_desc *match;
struct pci_dev *pdev;
+ char canonical[16];
+ int vfnumber = 0;
int ret;
- ret = sscanf(name, "%04x:%02x:%02x.%x", &domain, &bus, &slot, &function);
+ ret = sscanf(name, "%x:%x:%x.%x", &domain, &bus, &slot, &function);
if (ret != 4)
return ERR_PTR(-EINVAL);
+ ret = scnprintf(canonical, sizeof(canonical), "%04x:%02x:%02x.%d", domain, bus,
+ PCI_SLOT(PCI_DEVFN(slot, function)),
+ PCI_FUNC(PCI_DEVFN(slot, function)));
+ if (ret != 12 || strcmp(name, canonical))
+ return ERR_PTR(-EINVAL);
+
pdev = pci_get_domain_bus_and_slot(domain, bus, PCI_DEVFN(slot, function));
+ if (!pdev && function)
+ pdev = pci_get_domain_bus_and_slot(domain, bus, PCI_DEVFN(slot, 0));
+ if (!pdev && slot)
+ pdev = pci_get_domain_bus_and_slot(domain, bus, PCI_DEVFN(0, 0));
if (!pdev)
return ERR_PTR(-ENODEV);
+
+ if (PCI_DEVFN(slot, function) != pdev->devfn) {
+ pdev = get_physfn_instead(pdev);
+ vfnumber = PCI_DEVFN(slot, function) - pdev->devfn;
+ if (!dev_is_pf(&pdev->dev) || vfnumber > pci_sriov_get_totalvfs(pdev)) {
+ pci_dev_put(pdev);
+ return ERR_PTR(-ENODEV);
+ }
+ }
+
+ match = xe_match_desc(pdev);
+ if (match && vfnumber && !match->has_sriov) {
+ pci_info(pdev, "xe driver does not support VFs on this device\n");
+ match = NULL;
+ } else if (!match) {
+ pci_info(pdev, "xe driver does not support configuration of this device\n");
+ }
+
pci_dev_put(pdev);
+ if (!match)
+ return ERR_PTR(-ENOENT);
+
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return ERR_PTR(-ENOMEM);
- /* Default values */
- dev->engines_allowed = U64_MAX;
+ dev->desc = match;
+ set_device_defaults(&dev->config);
config_group_init_type_name(&dev->group, name, &xe_config_device_type);
@@ -284,12 +805,12 @@ static struct config_group *xe_config_make_device_group(struct config_group *gro
return &dev->group;
}
-static struct configfs_group_operations xe_config_device_group_ops = {
+static struct configfs_group_operations xe_config_group_ops = {
.make_group = xe_config_make_device_group,
};
static const struct config_item_type xe_configfs_type = {
- .ct_group_ops = &xe_config_device_group_ops,
+ .ct_group_ops = &xe_config_group_ops,
.ct_owner = THIS_MODULE,
};
@@ -302,110 +823,188 @@ static struct configfs_subsystem xe_configfs = {
},
};
-static struct xe_config_device *configfs_find_group(struct pci_dev *pdev)
+static struct xe_config_group_device *find_xe_config_group_device(struct pci_dev *pdev)
{
struct config_item *item;
- char name[64];
-
- snprintf(name, sizeof(name), "%04x:%02x:%02x.%x", pci_domain_nr(pdev->bus),
- pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
mutex_lock(&xe_configfs.su_mutex);
- item = config_group_find_item(&xe_configfs.su_group, name);
+ item = config_group_find_item(&xe_configfs.su_group, pci_name(pdev));
mutex_unlock(&xe_configfs.su_mutex);
if (!item)
return NULL;
- return to_xe_config_device(item);
+ return to_xe_config_group_device(item);
+}
+
+static void dump_custom_dev_config(struct pci_dev *pdev,
+ struct xe_config_group_device *dev)
+{
+#define PRI_CUSTOM_ATTR(fmt_, attr_) do { \
+ if (dev->config.attr_ != device_defaults.attr_) \
+ pci_info(pdev, "configfs: " __stringify(attr_) " = " fmt_ "\n", \
+ dev->config.attr_); \
+ } while (0)
+
+ PRI_CUSTOM_ATTR("%llx", engines_allowed);
+ PRI_CUSTOM_ATTR("%d", enable_psmi);
+ PRI_CUSTOM_ATTR("%d", survivability_mode);
+
+#undef PRI_CUSTOM_ATTR
+}
+
+/**
+ * xe_configfs_check_device() - Test if device was configured by configfs
+ * @pdev: the &pci_dev device to test
+ *
+ * Try to find the configfs group that belongs to the specified pci device
+ * and print a diagnostic message if different than the default value.
+ */
+void xe_configfs_check_device(struct pci_dev *pdev)
+{
+ struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
+
+ if (!dev)
+ return;
+
+ /* memcmp here is safe as both are zero-initialized */
+ if (memcmp(&dev->config, &device_defaults, sizeof(dev->config))) {
+ pci_info(pdev, "Found custom settings in configfs\n");
+ dump_custom_dev_config(pdev, dev);
+ }
+
+ config_group_put(&dev->group);
}
/**
* xe_configfs_get_survivability_mode - get configfs survivability mode attribute
* @pdev: pci device
*
- * find the configfs group that belongs to the pci device and return
- * the survivability mode attribute
- *
- * Return: survivability mode if config group is found, false otherwise
+ * Return: survivability_mode attribute in configfs
*/
bool xe_configfs_get_survivability_mode(struct pci_dev *pdev)
{
- struct xe_config_device *dev = configfs_find_group(pdev);
+ struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
bool mode;
if (!dev)
- return false;
+ return device_defaults.survivability_mode;
- mode = dev->survivability_mode;
- config_item_put(&dev->group.cg_item);
+ mode = dev->config.survivability_mode;
+ config_group_put(&dev->group);
return mode;
}
/**
- * xe_configfs_clear_survivability_mode - clear configfs survivability mode attribute
+ * xe_configfs_get_engines_allowed - get engine allowed mask from configfs
+ * @pdev: pci device
+ *
+ * Return: engine mask with allowed engines set in configfs
+ */
+u64 xe_configfs_get_engines_allowed(struct pci_dev *pdev)
+{
+ struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
+ u64 engines_allowed;
+
+ if (!dev)
+ return device_defaults.engines_allowed;
+
+ engines_allowed = dev->config.engines_allowed;
+ config_group_put(&dev->group);
+
+ return engines_allowed;
+}
+
+/**
+ * xe_configfs_get_psmi_enabled - get configfs enable_psmi setting
* @pdev: pci device
*
- * find the configfs group that belongs to the pci device and clear survivability
- * mode attribute
+ * Return: enable_psmi setting in configfs
*/
-void xe_configfs_clear_survivability_mode(struct pci_dev *pdev)
+bool xe_configfs_get_psmi_enabled(struct pci_dev *pdev)
{
- struct xe_config_device *dev = configfs_find_group(pdev);
+ struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
+ bool ret;
if (!dev)
- return;
+ return false;
- mutex_lock(&dev->lock);
- dev->survivability_mode = 0;
- mutex_unlock(&dev->lock);
+ ret = dev->config.enable_psmi;
+ config_group_put(&dev->group);
- config_item_put(&dev->group.cg_item);
+ return ret;
}
/**
- * xe_configfs_get_engines_allowed - get engine allowed mask from configfs
+ * xe_configfs_get_ctx_restore_mid_bb - get configfs ctx_restore_mid_bb setting
* @pdev: pci device
+ * @class: hw engine class
+ * @cs: pointer to the bb to use - only valid during probe
*
- * Find the configfs group that belongs to the pci device and return
- * the mask of engines allowed to be used.
+ * Return: Number of dwords used in the mid_ctx_restore setting in configfs
+ */
+u32 xe_configfs_get_ctx_restore_mid_bb(struct pci_dev *pdev,
+ enum xe_engine_class class,
+ const u32 **cs)
+{
+ struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
+ u32 len;
+
+ if (!dev)
+ return 0;
+
+ if (cs)
+ *cs = dev->config.ctx_restore_mid_bb[class].cs;
+
+ len = dev->config.ctx_restore_mid_bb[class].len;
+ config_group_put(&dev->group);
+
+ return len;
+}
+
+/**
+ * xe_configfs_get_ctx_restore_post_bb - get configfs ctx_restore_post_bb setting
+ * @pdev: pci device
+ * @class: hw engine class
+ * @cs: pointer to the bb to use - only valid during probe
*
- * Return: engine mask with allowed engines
+ * Return: Number of dwords used in the post_ctx_restore setting in configfs
*/
-u64 xe_configfs_get_engines_allowed(struct pci_dev *pdev)
+u32 xe_configfs_get_ctx_restore_post_bb(struct pci_dev *pdev,
+ enum xe_engine_class class,
+ const u32 **cs)
{
- struct xe_config_device *dev = configfs_find_group(pdev);
- u64 engines_allowed;
+ struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
+ u32 len;
if (!dev)
- return U64_MAX;
+ return 0;
- engines_allowed = dev->engines_allowed;
- config_item_put(&dev->group.cg_item);
+ *cs = dev->config.ctx_restore_post_bb[class].cs;
+ len = dev->config.ctx_restore_post_bb[class].len;
+ config_group_put(&dev->group);
- return engines_allowed;
+ return len;
}
int __init xe_configfs_init(void)
{
- struct config_group *root = &xe_configfs.su_group;
int ret;
- config_group_init(root);
+ config_group_init(&xe_configfs.su_group);
mutex_init(&xe_configfs.su_mutex);
ret = configfs_register_subsystem(&xe_configfs);
if (ret) {
- pr_err("Error %d while registering %s subsystem\n",
- ret, root->cg_item.ci_namebuf);
+ mutex_destroy(&xe_configfs.su_mutex);
return ret;
}
return 0;
}
-void __exit xe_configfs_exit(void)
+void xe_configfs_exit(void)
{
configfs_unregister_subsystem(&xe_configfs);
+ mutex_destroy(&xe_configfs.su_mutex);
}
-
diff --git a/drivers/gpu/drm/xe/xe_configfs.h b/drivers/gpu/drm/xe/xe_configfs.h
index fb8764008089..c61e0e47ed94 100644
--- a/drivers/gpu/drm/xe/xe_configfs.h
+++ b/drivers/gpu/drm/xe/xe_configfs.h
@@ -8,20 +8,32 @@
#include <linux/limits.h>
#include <linux/types.h>
+#include <xe_hw_engine_types.h>
+
struct pci_dev;
#if IS_ENABLED(CONFIG_CONFIGFS_FS)
int xe_configfs_init(void);
void xe_configfs_exit(void);
+void xe_configfs_check_device(struct pci_dev *pdev);
bool xe_configfs_get_survivability_mode(struct pci_dev *pdev);
-void xe_configfs_clear_survivability_mode(struct pci_dev *pdev);
u64 xe_configfs_get_engines_allowed(struct pci_dev *pdev);
+bool xe_configfs_get_psmi_enabled(struct pci_dev *pdev);
+u32 xe_configfs_get_ctx_restore_mid_bb(struct pci_dev *pdev, enum xe_engine_class,
+ const u32 **cs);
+u32 xe_configfs_get_ctx_restore_post_bb(struct pci_dev *pdev, enum xe_engine_class,
+ const u32 **cs);
#else
static inline int xe_configfs_init(void) { return 0; }
static inline void xe_configfs_exit(void) { }
+static inline void xe_configfs_check_device(struct pci_dev *pdev) { }
static inline bool xe_configfs_get_survivability_mode(struct pci_dev *pdev) { return false; }
-static inline void xe_configfs_clear_survivability_mode(struct pci_dev *pdev) { }
static inline u64 xe_configfs_get_engines_allowed(struct pci_dev *pdev) { return U64_MAX; }
+static inline bool xe_configfs_get_psmi_enabled(struct pci_dev *pdev) { return false; }
+static inline u32 xe_configfs_get_ctx_restore_mid_bb(struct pci_dev *pdev, enum xe_engine_class,
+ const u32 **cs) { return 0; }
+static inline u32 xe_configfs_get_ctx_restore_post_bb(struct pci_dev *pdev, enum xe_engine_class,
+ const u32 **cs) { return 0; }
#endif
#endif
diff --git a/drivers/gpu/drm/xe/xe_debugfs.c b/drivers/gpu/drm/xe/xe_debugfs.c
index 26e9d146ccbf..cd977dbd1ef6 100644
--- a/drivers/gpu/drm/xe/xe_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_debugfs.c
@@ -11,18 +11,24 @@
#include <drm/drm_debugfs.h>
+#include "regs/xe_pmt.h"
#include "xe_bo.h"
#include "xe_device.h"
#include "xe_force_wake.h"
#include "xe_gt_debugfs.h"
#include "xe_gt_printk.h"
#include "xe_guc_ads.h"
+#include "xe_mmio.h"
#include "xe_pm.h"
+#include "xe_psmi.h"
#include "xe_pxp_debugfs.h"
#include "xe_sriov.h"
#include "xe_sriov_pf.h"
+#include "xe_sriov_vf.h"
#include "xe_step.h"
+#include "xe_tile_debugfs.h"
#include "xe_wa.h"
+#include "xe_vsec.h"
#ifdef CONFIG_DRM_XE_DEBUG
#include "xe_bo_evict.h"
@@ -31,6 +37,24 @@
#endif
DECLARE_FAULT_ATTR(gt_reset_failure);
+DECLARE_FAULT_ATTR(inject_csc_hw_error);
+
+static void read_residency_counter(struct xe_device *xe, struct xe_mmio *mmio,
+ u32 offset, const char *name, struct drm_printer *p)
+{
+ u64 residency = 0;
+ int ret;
+
+ ret = xe_pmt_telem_read(to_pci_dev(xe->drm.dev),
+ xe_mmio_read32(mmio, PUNIT_TELEMETRY_GUID),
+ &residency, offset, sizeof(residency));
+ if (ret != sizeof(residency)) {
+ drm_warn(&xe->drm, "%s counter failed to read, ret %d\n", name, ret);
+ return;
+ }
+
+ drm_printf(p, "%s : %llu\n", name, residency);
+}
static struct xe_device *node_to_xe(struct drm_info_node *node)
{
@@ -102,12 +126,72 @@ static int workaround_info(struct seq_file *m, void *data)
return 0;
}
+static int dgfx_pkg_residencies_show(struct seq_file *m, void *data)
+{
+ struct xe_device *xe;
+ struct xe_mmio *mmio;
+ struct drm_printer p;
+
+ xe = node_to_xe(m->private);
+ p = drm_seq_file_printer(m);
+ xe_pm_runtime_get(xe);
+ mmio = xe_root_tile_mmio(xe);
+ static const struct {
+ u32 offset;
+ const char *name;
+ } residencies[] = {
+ {BMG_G2_RESIDENCY_OFFSET, "Package G2"},
+ {BMG_G6_RESIDENCY_OFFSET, "Package G6"},
+ {BMG_G8_RESIDENCY_OFFSET, "Package G8"},
+ {BMG_G10_RESIDENCY_OFFSET, "Package G10"},
+ {BMG_MODS_RESIDENCY_OFFSET, "Package ModS"}
+ };
+
+ for (int i = 0; i < ARRAY_SIZE(residencies); i++)
+ read_residency_counter(xe, mmio, residencies[i].offset, residencies[i].name, &p);
+
+ xe_pm_runtime_put(xe);
+ return 0;
+}
+
+static int dgfx_pcie_link_residencies_show(struct seq_file *m, void *data)
+{
+ struct xe_device *xe;
+ struct xe_mmio *mmio;
+ struct drm_printer p;
+
+ xe = node_to_xe(m->private);
+ p = drm_seq_file_printer(m);
+ xe_pm_runtime_get(xe);
+ mmio = xe_root_tile_mmio(xe);
+
+ static const struct {
+ u32 offset;
+ const char *name;
+ } residencies[] = {
+ {BMG_PCIE_LINK_L0_RESIDENCY_OFFSET, "PCIE LINK L0 RESIDENCY"},
+ {BMG_PCIE_LINK_L1_RESIDENCY_OFFSET, "PCIE LINK L1 RESIDENCY"},
+ {BMG_PCIE_LINK_L1_2_RESIDENCY_OFFSET, "PCIE LINK L1.2 RESIDENCY"}
+ };
+
+ for (int i = 0; i < ARRAY_SIZE(residencies); i++)
+ read_residency_counter(xe, mmio, residencies[i].offset, residencies[i].name, &p);
+
+ xe_pm_runtime_put(xe);
+ return 0;
+}
+
static const struct drm_info_list debugfs_list[] = {
{"info", info, 0},
{ .name = "sriov_info", .show = sriov_info, },
{ .name = "workarounds", .show = workaround_info, },
};
+static const struct drm_info_list debugfs_residencies[] = {
+ { .name = "dgfx_pkg_residencies", .show = dgfx_pkg_residencies_show, },
+ { .name = "dgfx_pcie_link_residencies", .show = dgfx_pcie_link_residencies_show, },
+};
+
static int forcewake_open(struct inode *inode, struct file *file)
{
struct xe_device *xe = inode->i_private;
@@ -247,20 +331,68 @@ static const struct file_operations atomic_svm_timeslice_ms_fops = {
.write = atomic_svm_timeslice_ms_set,
};
+static ssize_t disable_late_binding_show(struct file *f, char __user *ubuf,
+ size_t size, loff_t *pos)
+{
+ struct xe_device *xe = file_inode(f)->i_private;
+ struct xe_late_bind *late_bind = &xe->late_bind;
+ char buf[32];
+ int len;
+
+ len = scnprintf(buf, sizeof(buf), "%d\n", late_bind->disable);
+
+ return simple_read_from_buffer(ubuf, size, pos, buf, len);
+}
+
+static ssize_t disable_late_binding_set(struct file *f, const char __user *ubuf,
+ size_t size, loff_t *pos)
+{
+ struct xe_device *xe = file_inode(f)->i_private;
+ struct xe_late_bind *late_bind = &xe->late_bind;
+ u32 uval;
+ ssize_t ret;
+
+ ret = kstrtouint_from_user(ubuf, size, sizeof(uval), &uval);
+ if (ret)
+ return ret;
+
+ if (uval > 1)
+ return -EINVAL;
+
+ late_bind->disable = !!uval;
+ return size;
+}
+
+static const struct file_operations disable_late_binding_fops = {
+ .owner = THIS_MODULE,
+ .read = disable_late_binding_show,
+ .write = disable_late_binding_set,
+};
+
void xe_debugfs_register(struct xe_device *xe)
{
struct ttm_device *bdev = &xe->ttm;
struct drm_minor *minor = xe->drm.primary;
struct dentry *root = minor->debugfs_root;
struct ttm_resource_manager *man;
+ struct xe_tile *tile;
struct xe_gt *gt;
u32 mem_type;
+ u8 tile_id;
u8 id;
drm_debugfs_create_files(debugfs_list,
ARRAY_SIZE(debugfs_list),
root, minor);
+ if (xe->info.platform == XE_BATTLEMAGE && !IS_SRIOV_VF(xe)) {
+ drm_debugfs_create_files(debugfs_residencies,
+ ARRAY_SIZE(debugfs_residencies),
+ root, minor);
+ fault_create_debugfs_attr("inject_csc_hw_error", root,
+ &inject_csc_hw_error);
+ }
+
debugfs_create_file("forcewake_all", 0400, root, xe,
&forcewake_all_fops);
@@ -270,6 +402,9 @@ void xe_debugfs_register(struct xe_device *xe)
debugfs_create_file("atomic_svm_timeslice_ms", 0600, root, xe,
&atomic_svm_timeslice_ms_fops);
+ debugfs_create_file("disable_late_binding", 0600, root, xe,
+ &disable_late_binding_fops);
+
for (mem_type = XE_PL_VRAM0; mem_type <= XE_PL_VRAM1; ++mem_type) {
man = ttm_manager_type(bdev, mem_type);
@@ -288,13 +423,20 @@ void xe_debugfs_register(struct xe_device *xe)
if (man)
ttm_resource_manager_create_debugfs(man, root, "stolen_mm");
+ for_each_tile(tile, xe, tile_id)
+ xe_tile_debugfs_register(tile);
+
for_each_gt(gt, xe, id)
xe_gt_debugfs_register(gt);
xe_pxp_debugfs_register(xe->pxp);
+ xe_psmi_debugfs_register(xe);
+
fault_create_debugfs_attr("fail_gt_reset", root, &gt_reset_failure);
if (IS_SRIOV_PF(xe))
xe_sriov_pf_debugfs_register(xe, root);
+ else if (IS_SRIOV_VF(xe))
+ xe_sriov_vf_debugfs_register(xe, root);
}
diff --git a/drivers/gpu/drm/xe/xe_dep_job_types.h b/drivers/gpu/drm/xe/xe_dep_job_types.h
new file mode 100644
index 000000000000..c6a484f24c8c
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_dep_job_types.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_DEP_JOB_TYPES_H_
+#define _XE_DEP_JOB_TYPES_H_
+
+#include <drm/gpu_scheduler.h>
+
+struct xe_dep_job;
+
+/** struct xe_dep_job_ops - Generic Xe dependency job operations */
+struct xe_dep_job_ops {
+ /** @run_job: Run generic Xe dependency job */
+ struct dma_fence *(*run_job)(struct xe_dep_job *job);
+ /** @free_job: Free generic Xe dependency job */
+ void (*free_job)(struct xe_dep_job *job);
+};
+
+/** struct xe_dep_job - Generic dependency Xe job */
+struct xe_dep_job {
+ /** @drm: base DRM scheduler job */
+ struct drm_sched_job drm;
+ /** @ops: dependency job operations */
+ const struct xe_dep_job_ops *ops;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_dep_scheduler.c b/drivers/gpu/drm/xe/xe_dep_scheduler.c
new file mode 100644
index 000000000000..9bd3bfd2e526
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_dep_scheduler.c
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include <linux/slab.h>
+
+#include <drm/gpu_scheduler.h>
+
+#include "xe_dep_job_types.h"
+#include "xe_dep_scheduler.h"
+#include "xe_device_types.h"
+
+/**
+ * DOC: Xe Dependency Scheduler
+ *
+ * The Xe dependency scheduler is a simple wrapper built around the DRM
+ * scheduler to execute jobs once their dependencies are resolved (i.e., all
+ * input fences specified as dependencies are signaled). The jobs that are
+ * executed contain virtual functions to run (execute) and free the job,
+ * allowing a single dependency scheduler to handle jobs performing different
+ * operations.
+ *
+ * Example use cases include deferred resource freeing, TLB invalidations after
+ * bind jobs, etc.
+ */
+
+/** struct xe_dep_scheduler - Generic Xe dependency scheduler */
+struct xe_dep_scheduler {
+ /** @sched: DRM GPU scheduler */
+ struct drm_gpu_scheduler sched;
+ /** @entity: DRM scheduler entity */
+ struct drm_sched_entity entity;
+ /** @rcu: For safe freeing of exported dma fences */
+ struct rcu_head rcu;
+};
+
+static struct dma_fence *xe_dep_scheduler_run_job(struct drm_sched_job *drm_job)
+{
+ struct xe_dep_job *dep_job =
+ container_of(drm_job, typeof(*dep_job), drm);
+
+ return dep_job->ops->run_job(dep_job);
+}
+
+static void xe_dep_scheduler_free_job(struct drm_sched_job *drm_job)
+{
+ struct xe_dep_job *dep_job =
+ container_of(drm_job, typeof(*dep_job), drm);
+
+ dep_job->ops->free_job(dep_job);
+}
+
+static const struct drm_sched_backend_ops sched_ops = {
+ .run_job = xe_dep_scheduler_run_job,
+ .free_job = xe_dep_scheduler_free_job,
+};
+
+/**
+ * xe_dep_scheduler_create() - Generic Xe dependency scheduler create
+ * @xe: Xe device
+ * @submit_wq: Submit workqueue struct (can be NULL)
+ * @name: Name of dependency scheduler
+ * @job_limit: Max dependency jobs that can be scheduled
+ *
+ * Create a generic Xe dependency scheduler and initialize internal DRM
+ * scheduler objects.
+ *
+ * Return: Generic Xe dependency scheduler object on success, ERR_PTR failure
+ */
+struct xe_dep_scheduler *
+xe_dep_scheduler_create(struct xe_device *xe,
+ struct workqueue_struct *submit_wq,
+ const char *name, u32 job_limit)
+{
+ struct xe_dep_scheduler *dep_scheduler;
+ struct drm_gpu_scheduler *sched;
+ const struct drm_sched_init_args args = {
+ .ops = &sched_ops,
+ .submit_wq = submit_wq,
+ .num_rqs = 1,
+ .credit_limit = job_limit,
+ .timeout = MAX_SCHEDULE_TIMEOUT,
+ .name = name,
+ .dev = xe->drm.dev,
+ };
+ int err;
+
+ dep_scheduler = kzalloc(sizeof(*dep_scheduler), GFP_KERNEL);
+ if (!dep_scheduler)
+ return ERR_PTR(-ENOMEM);
+
+ err = drm_sched_init(&dep_scheduler->sched, &args);
+ if (err)
+ goto err_free;
+
+ sched = &dep_scheduler->sched;
+ err = drm_sched_entity_init(&dep_scheduler->entity, 0, &sched, 1, NULL);
+ if (err)
+ goto err_sched;
+
+ init_rcu_head(&dep_scheduler->rcu);
+
+ return dep_scheduler;
+
+err_sched:
+ drm_sched_fini(&dep_scheduler->sched);
+err_free:
+ kfree(dep_scheduler);
+
+ return ERR_PTR(err);
+}
+
+/**
+ * xe_dep_scheduler_fini() - Generic Xe dependency scheduler finalize
+ * @dep_scheduler: Generic Xe dependency scheduler object
+ *
+ * Finalize internal DRM scheduler objects and free generic Xe dependency
+ * scheduler object
+ */
+void xe_dep_scheduler_fini(struct xe_dep_scheduler *dep_scheduler)
+{
+ drm_sched_entity_fini(&dep_scheduler->entity);
+ drm_sched_fini(&dep_scheduler->sched);
+ /*
+ * RCU free due sched being exported via DRM scheduler fences
+ * (timeline name).
+ */
+ kfree_rcu(dep_scheduler, rcu);
+}
+
+/**
+ * xe_dep_scheduler_entity() - Retrieve a generic Xe dependency scheduler
+ * DRM scheduler entity
+ * @dep_scheduler: Generic Xe dependency scheduler object
+ *
+ * Return: The generic Xe dependency scheduler's DRM scheduler entity
+ */
+struct drm_sched_entity *
+xe_dep_scheduler_entity(struct xe_dep_scheduler *dep_scheduler)
+{
+ return &dep_scheduler->entity;
+}
diff --git a/drivers/gpu/drm/xe/xe_dep_scheduler.h b/drivers/gpu/drm/xe/xe_dep_scheduler.h
new file mode 100644
index 000000000000..853961eec64b
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_dep_scheduler.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include <linux/types.h>
+
+struct drm_sched_entity;
+struct workqueue_struct;
+struct xe_dep_scheduler;
+struct xe_device;
+
+struct xe_dep_scheduler *
+xe_dep_scheduler_create(struct xe_device *xe,
+ struct workqueue_struct *submit_wq,
+ const char *name, u32 job_limit);
+
+void xe_dep_scheduler_fini(struct xe_dep_scheduler *dep_scheduler);
+
+struct drm_sched_entity *
+xe_dep_scheduler_entity(struct xe_dep_scheduler *dep_scheduler);
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index 5bd2f7d7b4ea..2883b39c9b37 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -45,6 +45,7 @@
#include "xe_hwmon.h"
#include "xe_i2c.h"
#include "xe_irq.h"
+#include "xe_late_bind_fw.h"
#include "xe_mmio.h"
#include "xe_module.h"
#include "xe_nvm.h"
@@ -54,6 +55,7 @@
#include "xe_pcode.h"
#include "xe_pm.h"
#include "xe_pmu.h"
+#include "xe_psmi.h"
#include "xe_pxp.h"
#include "xe_query.h"
#include "xe_shrinker.h"
@@ -63,7 +65,9 @@
#include "xe_ttm_stolen_mgr.h"
#include "xe_ttm_sys_mgr.h"
#include "xe_vm.h"
+#include "xe_vm_madvise.h"
#include "xe_vram.h"
+#include "xe_vram_types.h"
#include "xe_vsec.h"
#include "xe_wait_user_fence.h"
#include "xe_wa.h"
@@ -200,6 +204,9 @@ static const struct drm_ioctl_desc xe_ioctls[] = {
DRM_IOCTL_DEF_DRV(XE_WAIT_USER_FENCE, xe_wait_user_fence_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(XE_OBSERVATION, xe_observation_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(XE_MADVISE, xe_vm_madvise_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(XE_VM_QUERY_MEM_RANGE_ATTRS, xe_vm_query_vmas_attrs_ioctl,
+ DRM_RENDER_ALLOW),
};
static long xe_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
@@ -451,6 +458,8 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
if (err)
goto err;
+ xe_validation_device_init(&xe->val);
+
init_waitqueue_head(&xe->ufence_wq);
init_rwsem(&xe->usm.lock);
@@ -524,7 +533,7 @@ static bool xe_driver_flr_disabled(struct xe_device *xe)
* re-init and saving/restoring (or re-populating) the wiped memory. Since we
* perform the FLR as the very last action before releasing access to the HW
* during the driver release flow, we don't attempt recovery at all, because
- * if/when a new instance of i915 is bound to the device it will do a full
+ * if/when a new instance of Xe is bound to the device it will do a full
* re-init anyway.
*/
static void __xe_driver_flr(struct xe_device *xe)
@@ -676,16 +685,31 @@ static int wait_for_lmem_ready(struct xe_device *xe)
}
ALLOW_ERROR_INJECTION(wait_for_lmem_ready, ERRNO); /* See xe_pci_probe() */
-static void sriov_update_device_info(struct xe_device *xe)
+static void vf_update_device_info(struct xe_device *xe)
{
+ xe_assert(xe, IS_SRIOV_VF(xe));
/* disable features that are not available/applicable to VFs */
- if (IS_SRIOV_VF(xe)) {
- xe->info.probe_display = 0;
- xe->info.has_heci_cscfi = 0;
- xe->info.has_heci_gscfi = 0;
- xe->info.skip_guc_pc = 1;
- xe->info.skip_pcode = 1;
- }
+ xe->info.probe_display = 0;
+ xe->info.has_heci_cscfi = 0;
+ xe->info.has_heci_gscfi = 0;
+ xe->info.has_late_bind = 0;
+ xe->info.skip_guc_pc = 1;
+ xe->info.skip_pcode = 1;
+}
+
+static int xe_device_vram_alloc(struct xe_device *xe)
+{
+ struct xe_vram_region *vram;
+
+ if (!IS_DGFX(xe))
+ return 0;
+
+ vram = drmm_kzalloc(&xe->drm, sizeof(*vram), GFP_KERNEL);
+ if (!vram)
+ return -ENOMEM;
+
+ xe->mem.vram = vram;
+ return 0;
}
/**
@@ -711,7 +735,8 @@ int xe_device_probe_early(struct xe_device *xe)
xe_sriov_probe_early(xe);
- sriov_update_device_info(xe);
+ if (IS_SRIOV_VF(xe))
+ vf_update_device_info(xe);
err = xe_pcode_probe_early(xe);
if (err || xe_survivability_mode_is_requested(xe)) {
@@ -722,7 +747,7 @@ int xe_device_probe_early(struct xe_device *xe)
* possible, but still return the previous error for error
* propagation
*/
- err = xe_survivability_mode_enable(xe);
+ err = xe_survivability_mode_boot_enable(xe);
if (err)
return err;
@@ -735,6 +760,10 @@ int xe_device_probe_early(struct xe_device *xe)
xe->wedged.mode = xe_modparam.wedged_mode;
+ err = xe_device_vram_alloc(xe);
+ if (err)
+ return err;
+
return 0;
}
ALLOW_ERROR_INJECTION(xe_device_probe_early, ERRNO); /* See xe_pci_probe() */
@@ -802,10 +831,6 @@ int xe_device_probe(struct xe_device *xe)
return err;
}
- err = xe_devcoredump_init(xe);
- if (err)
- return err;
-
/*
* From here on, if a step fails, make sure a Driver-FLR is triggereed
*/
@@ -867,15 +892,23 @@ int xe_device_probe(struct xe_device *xe)
}
if (xe->tiles->media_gt &&
- XE_WA(xe->tiles->media_gt, 15015404425_disable))
+ XE_GT_WA(xe->tiles->media_gt, 15015404425_disable))
XE_DEVICE_WA_DISABLE(xe, 15015404425);
+ err = xe_devcoredump_init(xe);
+ if (err)
+ return err;
+
xe_nvm_init(xe);
err = xe_heci_gsc_init(xe);
if (err)
return err;
+ err = xe_late_bind_init(&xe->late_bind);
+ if (err)
+ return err;
+
err = xe_oa_init(xe);
if (err)
return err;
@@ -888,6 +921,10 @@ int xe_device_probe(struct xe_device *xe)
if (err)
return err;
+ err = xe_psmi_init(xe);
+ if (err)
+ return err;
+
err = drm_dev_register(&xe->drm, 0);
if (err)
return err;
@@ -921,6 +958,10 @@ int xe_device_probe(struct xe_device *xe)
xe_vsec_init(xe);
+ err = xe_sriov_init_late(xe);
+ if (err)
+ goto err_unregister_display;
+
return devm_add_action_or_reset(xe->drm.dev, xe_device_sanitize, xe);
err_unregister_display:
@@ -1019,7 +1060,7 @@ void xe_device_l2_flush(struct xe_device *xe)
gt = xe_root_mmio_gt(xe);
- if (!XE_WA(gt, 16023588340))
+ if (!XE_GT_WA(gt, 16023588340))
return;
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
@@ -1063,7 +1104,7 @@ void xe_device_td_flush(struct xe_device *xe)
return;
root_gt = xe_root_mmio_gt(xe);
- if (XE_WA(root_gt, 16023588340)) {
+ if (XE_GT_WA(root_gt, 16023588340)) {
/* A transient flush is not sufficient: flush the L2 */
xe_device_l2_flush(xe);
} else {
@@ -1134,11 +1175,63 @@ static void xe_device_wedged_fini(struct drm_device *drm, void *arg)
}
/**
+ * DOC: Xe Device Wedging
+ *
+ * Xe driver uses drm device wedged uevent as documented in Documentation/gpu/drm-uapi.rst.
+ * When device is in wedged state, every IOCTL will be blocked and GT cannot be
+ * used. Certain critical errors like gt reset failure, firmware failures can cause
+ * the device to be wedged. The default recovery method for a wedged state
+ * is rebind/bus-reset.
+ *
+ * Another recovery method is vendor-specific. Below are the cases that send
+ * ``WEDGED=vendor-specific`` recovery method in drm device wedged uevent.
+ *
+ * Case: Firmware Flash
+ * --------------------
+ *
+ * Identification Hint
+ * +++++++++++++++++++
+ *
+ * ``WEDGED=vendor-specific`` drm device wedged uevent with
+ * :ref:`Runtime Survivability mode <xe-survivability-mode>` is used to notify
+ * admin/userspace consumer about the need for a firmware flash.
+ *
+ * Recovery Procedure
+ * ++++++++++++++++++
+ *
+ * Once ``WEDGED=vendor-specific`` drm device wedged uevent is received, follow
+ * the below steps
+ *
+ * - Check Runtime Survivability mode sysfs.
+ * If enabled, firmware flash is required to recover the device.
+ *
+ * /sys/bus/pci/devices/<device>/survivability_mode
+ *
+ * - Admin/userpsace consumer can use firmware flashing tools like fwupd to flash
+ * firmware and restore device to normal operation.
+ */
+
+/**
+ * xe_device_set_wedged_method - Set wedged recovery method
+ * @xe: xe device instance
+ * @method: recovery method to set
+ *
+ * Set wedged recovery method to be sent in drm wedged uevent.
+ */
+void xe_device_set_wedged_method(struct xe_device *xe, unsigned long method)
+{
+ xe->wedged.method = method;
+}
+
+/**
* xe_device_declare_wedged - Declare device wedged
* @xe: xe device instance
*
- * This is a final state that can only be cleared with a module
- * re-probe (unbind + bind).
+ * This is a final state that can only be cleared with the recovery method
+ * specified in the drm wedged uevent. The method can be set using
+ * xe_device_set_wedged_method before declaring the device as wedged. If no method
+ * is set, reprobe (unbind/re-bind) will be sent by default.
+ *
* In this state every IOCTL will be blocked so the GT cannot be used.
* In general it will be called upon any critical error such as gt reset
* failure or guc loading failure. Userspace will be notified of this state
@@ -1172,13 +1265,18 @@ void xe_device_declare_wedged(struct xe_device *xe)
"IOCTLs and executions are blocked. Only a rebind may clear the failure\n"
"Please file a _new_ bug report at https://gitlab.freedesktop.org/drm/xe/kernel/issues/new\n",
dev_name(xe->drm.dev));
-
- /* Notify userspace of wedged device */
- drm_dev_wedged_event(&xe->drm,
- DRM_WEDGE_RECOVERY_REBIND | DRM_WEDGE_RECOVERY_BUS_RESET,
- NULL);
}
for_each_gt(gt, xe, id)
xe_gt_declare_wedged(gt);
+
+ if (xe_device_wedged(xe)) {
+ /* If no wedge recovery method is set, use default */
+ if (!xe->wedged.method)
+ xe_device_set_wedged_method(xe, DRM_WEDGE_RECOVERY_REBIND |
+ DRM_WEDGE_RECOVERY_BUS_RESET);
+
+ /* Notify userspace of wedged device */
+ drm_dev_wedged_event(&xe->drm, xe->wedged.method, NULL);
+ }
}
diff --git a/drivers/gpu/drm/xe/xe_device.h b/drivers/gpu/drm/xe/xe_device.h
index bc802e066a7d..32cc6323b7f6 100644
--- a/drivers/gpu/drm/xe/xe_device.h
+++ b/drivers/gpu/drm/xe/xe_device.h
@@ -187,6 +187,7 @@ static inline bool xe_device_wedged(struct xe_device *xe)
return atomic_read(&xe->wedged.flag);
}
+void xe_device_set_wedged_method(struct xe_device *xe, unsigned long method);
void xe_device_declare_wedged(struct xe_device *xe);
struct xe_file *xe_file_get(struct xe_file *xef);
diff --git a/drivers/gpu/drm/xe/xe_device_sysfs.c b/drivers/gpu/drm/xe/xe_device_sysfs.c
index bd9015761aa0..c5151c86a98a 100644
--- a/drivers/gpu/drm/xe/xe_device_sysfs.c
+++ b/drivers/gpu/drm/xe/xe_device_sysfs.c
@@ -71,12 +71,21 @@ vram_d3cold_threshold_store(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RW(vram_d3cold_threshold);
+static struct attribute *vram_attrs[] = {
+ &dev_attr_vram_d3cold_threshold.attr,
+ NULL
+};
+
+static const struct attribute_group vram_attr_group = {
+ .attrs = vram_attrs,
+};
+
static ssize_t
lb_fan_control_version_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct xe_device *xe = pdev_to_xe_device(to_pci_dev(dev));
struct xe_tile *root = xe_device_get_root_tile(xe);
- u32 cap, ver_low = FAN_TABLE, ver_high = FAN_TABLE;
+ u32 cap = 0, ver_low = FAN_TABLE, ver_high = FAN_TABLE;
u16 major = 0, minor = 0, hotfix = 0, build = 0;
int ret;
@@ -115,7 +124,7 @@ lb_voltage_regulator_version_show(struct device *dev, struct device_attribute *a
{
struct xe_device *xe = pdev_to_xe_device(to_pci_dev(dev));
struct xe_tile *root = xe_device_get_root_tile(xe);
- u32 cap, ver_low = VR_CONFIG, ver_high = VR_CONFIG;
+ u32 cap = 0, ver_low = VR_CONFIG, ver_high = VR_CONFIG;
u16 major = 0, minor = 0, hotfix = 0, build = 0;
int ret;
@@ -149,62 +158,44 @@ out:
}
static DEVICE_ATTR_ADMIN_RO(lb_voltage_regulator_version);
-static int late_bind_create_files(struct device *dev)
-{
- struct xe_device *xe = pdev_to_xe_device(to_pci_dev(dev));
- struct xe_tile *root = xe_device_get_root_tile(xe);
- u32 cap;
- int ret;
-
- xe_pm_runtime_get(xe);
-
- ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_CAPABILITY_STATUS, 0),
- &cap, NULL);
- if (ret) {
- if (ret == -ENXIO) {
- drm_dbg(&xe->drm, "Late binding not supported by firmware\n");
- ret = 0;
- }
- goto out;
- }
-
- if (REG_FIELD_GET(V1_FAN_SUPPORTED, cap)) {
- ret = sysfs_create_file(&dev->kobj, &dev_attr_lb_fan_control_version.attr);
- if (ret)
- goto out;
- }
-
- if (REG_FIELD_GET(VR_PARAMS_SUPPORTED, cap))
- ret = sysfs_create_file(&dev->kobj, &dev_attr_lb_voltage_regulator_version.attr);
-out:
- xe_pm_runtime_put(xe);
-
- return ret;
-}
+static struct attribute *late_bind_attrs[] = {
+ &dev_attr_lb_fan_control_version.attr,
+ &dev_attr_lb_voltage_regulator_version.attr,
+ NULL
+};
-static void late_bind_remove_files(struct device *dev)
+static umode_t late_bind_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_device *xe = pdev_to_xe_device(to_pci_dev(dev));
struct xe_tile *root = xe_device_get_root_tile(xe);
- u32 cap;
+ u32 cap = 0;
int ret;
xe_pm_runtime_get(xe);
ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_CAPABILITY_STATUS, 0),
&cap, NULL);
+ xe_pm_runtime_put(xe);
if (ret)
- goto out;
+ return 0;
- if (REG_FIELD_GET(V1_FAN_SUPPORTED, cap))
- sysfs_remove_file(&dev->kobj, &dev_attr_lb_fan_control_version.attr);
+ if (attr == &dev_attr_lb_fan_control_version.attr &&
+ REG_FIELD_GET(V1_FAN_SUPPORTED, cap))
+ return attr->mode;
+ if (attr == &dev_attr_lb_voltage_regulator_version.attr &&
+ REG_FIELD_GET(VR_PARAMS_SUPPORTED, cap))
+ return attr->mode;
- if (REG_FIELD_GET(VR_PARAMS_SUPPORTED, cap))
- sysfs_remove_file(&dev->kobj, &dev_attr_lb_voltage_regulator_version.attr);
-out:
- xe_pm_runtime_put(xe);
+ return 0;
}
+static const struct attribute_group late_bind_attr_group = {
+ .attrs = late_bind_attrs,
+ .is_visible = late_bind_attr_is_visible,
+};
+
/**
* DOC: PCIe Gen5 Limitations
*
@@ -278,24 +269,15 @@ auto_link_downgrade_status_show(struct device *dev, struct device_attribute *att
}
static DEVICE_ATTR_ADMIN_RO(auto_link_downgrade_status);
-static const struct attribute *auto_link_downgrade_attrs[] = {
+static struct attribute *auto_link_downgrade_attrs[] = {
&dev_attr_auto_link_downgrade_capable.attr,
&dev_attr_auto_link_downgrade_status.attr,
NULL
};
-static void xe_device_sysfs_fini(void *arg)
-{
- struct xe_device *xe = arg;
-
- if (xe->d3cold.capable)
- sysfs_remove_file(&xe->drm.dev->kobj, &dev_attr_vram_d3cold_threshold.attr);
-
- if (xe->info.platform == XE_BATTLEMAGE) {
- sysfs_remove_files(&xe->drm.dev->kobj, auto_link_downgrade_attrs);
- late_bind_remove_files(xe->drm.dev);
- }
-}
+static const struct attribute_group auto_link_downgrade_attr_group = {
+ .attrs = auto_link_downgrade_attrs,
+};
int xe_device_sysfs_init(struct xe_device *xe)
{
@@ -303,20 +285,20 @@ int xe_device_sysfs_init(struct xe_device *xe)
int ret;
if (xe->d3cold.capable) {
- ret = sysfs_create_file(&dev->kobj, &dev_attr_vram_d3cold_threshold.attr);
+ ret = devm_device_add_group(dev, &vram_attr_group);
if (ret)
return ret;
}
- if (xe->info.platform == XE_BATTLEMAGE) {
- ret = sysfs_create_files(&dev->kobj, auto_link_downgrade_attrs);
+ if (xe->info.platform == XE_BATTLEMAGE && !IS_SRIOV_VF(xe)) {
+ ret = devm_device_add_group(dev, &auto_link_downgrade_attr_group);
if (ret)
return ret;
- ret = late_bind_create_files(dev);
+ ret = devm_device_add_group(dev, &late_bind_attr_group);
if (ret)
return ret;
}
- return devm_add_action_or_reset(dev, xe_device_sysfs_fini, xe);
+ return 0;
}
diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
index d4d2c6854790..74d7af830b85 100644
--- a/drivers/gpu/drm/xe/xe_device_types.h
+++ b/drivers/gpu/drm/xe/xe_device_types.h
@@ -10,11 +10,11 @@
#include <drm/drm_device.h>
#include <drm/drm_file.h>
-#include <drm/drm_pagemap.h>
#include <drm/ttm/ttm_device.h>
#include "xe_devcoredump_types.h"
#include "xe_heci_gsc.h"
+#include "xe_late_bind_fw_types.h"
#include "xe_lmtt_types.h"
#include "xe_memirq_types.h"
#include "xe_oa_types.h"
@@ -24,9 +24,10 @@
#include "xe_sriov_pf_types.h"
#include "xe_sriov_types.h"
#include "xe_sriov_vf_types.h"
+#include "xe_sriov_vf_ccs_types.h"
#include "xe_step_types.h"
#include "xe_survivability_mode_types.h"
-#include "xe_ttm_vram_mgr_types.h"
+#include "xe_validation.h"
#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
#define TEST_VM_OPS_ERROR
@@ -39,6 +40,7 @@ struct xe_ggtt;
struct xe_i2c;
struct xe_pat_ops;
struct xe_pxp;
+struct xe_vram_region;
#define XE_BO_INVALID_OFFSET LONG_MAX
@@ -72,61 +74,6 @@ struct xe_pxp;
struct xe_tile * : (tile__)->xe)
/**
- * struct xe_vram_region - memory region structure
- * This is used to describe a memory region in xe
- * device, such as HBM memory or CXL extension memory.
- */
-struct xe_vram_region {
- /** @io_start: IO start address of this VRAM instance */
- resource_size_t io_start;
- /**
- * @io_size: IO size of this VRAM instance
- *
- * This represents how much of this VRAM we can access
- * via the CPU through the VRAM BAR. This can be smaller
- * than @usable_size, in which case only part of VRAM is CPU
- * accessible (typically the first 256M). This
- * configuration is known as small-bar.
- */
- resource_size_t io_size;
- /** @dpa_base: This memory regions's DPA (device physical address) base */
- resource_size_t dpa_base;
- /**
- * @usable_size: usable size of VRAM
- *
- * Usable size of VRAM excluding reserved portions
- * (e.g stolen mem)
- */
- resource_size_t usable_size;
- /**
- * @actual_physical_size: Actual VRAM size
- *
- * Actual VRAM size including reserved portions
- * (e.g stolen mem)
- */
- resource_size_t actual_physical_size;
- /** @mapping: pointer to VRAM mappable space */
- void __iomem *mapping;
- /** @ttm: VRAM TTM manager */
- struct xe_ttm_vram_mgr ttm;
-#if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
- /** @pagemap: Used to remap device memory as ZONE_DEVICE */
- struct dev_pagemap pagemap;
- /**
- * @dpagemap: The struct drm_pagemap of the ZONE_DEVICE memory
- * pages of this tile.
- */
- struct drm_pagemap dpagemap;
- /**
- * @hpa_base: base host physical address
- *
- * This is generated when remap device memory as ZONE_DEVICE
- */
- resource_size_t hpa_base;
-#endif
-};
-
-/**
* struct xe_mmio - register mmio structure
*
* Represents an MMIO region that the CPU may use to access registers. A
@@ -216,7 +163,7 @@ struct xe_tile {
* Although VRAM is associated with a specific tile, it can
* still be accessed by all tiles' GTs.
*/
- struct xe_vram_region vram;
+ struct xe_vram_region *vram;
/** @mem.ggtt: Global graphics translation table */
struct xe_ggtt *ggtt;
@@ -244,6 +191,9 @@ struct xe_tile {
/** @memirq: Memory Based Interrupts. */
struct xe_memirq memirq;
+ /** @csc_hw_error_work: worker to report CSC HW errors */
+ struct work_struct csc_hw_error_work;
+
/** @pcode: tile's PCODE */
struct {
/** @pcode.lock: protecting tile's PCODE mailbox data */
@@ -255,6 +205,9 @@ struct xe_tile {
/** @sysfs: sysfs' kobj used by xe_tile_sysfs */
struct kobject *sysfs;
+
+ /** @debugfs: debugfs directory associated with this tile */
+ struct dentry *debugfs;
};
/**
@@ -328,6 +281,8 @@ struct xe_device {
u8 has_heci_cscfi:1;
/** @info.has_heci_gscfi: device has heci gscfi */
u8 has_heci_gscfi:1;
+ /** @info.has_late_bind: Device has firmware late binding support */
+ u8 has_late_bind:1;
/** @info.has_llc: Device has a shared CPU+GPU last level cache */
u8 has_llc:1;
/** @info.has_mbx_power_limits: Device has support to manage power limits using
@@ -336,8 +291,8 @@ struct xe_device {
u8 has_mbx_power_limits:1;
/** @info.has_pxp: Device has PXP support */
u8 has_pxp:1;
- /** @info.has_range_tlb_invalidation: Has range based TLB invalidations */
- u8 has_range_tlb_invalidation:1;
+ /** @info.has_range_tlb_inval: Has range based TLB invalidations */
+ u8 has_range_tlb_inval:1;
/** @info.has_sriov: Supports SR-IOV */
u8 has_sriov:1;
/** @info.has_usm: Device has unified shared memory support */
@@ -412,7 +367,7 @@ struct xe_device {
/** @mem: memory info for device */
struct {
/** @mem.vram: VRAM info for device */
- struct xe_vram_region vram;
+ struct xe_vram_region *vram;
/** @mem.sys_mgr: system TTM manager */
struct ttm_resource_manager sys_mgr;
/** @mem.sys_mgr: system memory shrinker. */
@@ -476,7 +431,7 @@ struct xe_device {
/** @ordered_wq: used to serialize compute mode resume */
struct workqueue_struct *ordered_wq;
- /** @unordered_wq: used to serialize unordered work, mostly display */
+ /** @unordered_wq: used to serialize unordered work */
struct workqueue_struct *unordered_wq;
/** @destroy_wq: used to serialize user destroy work, like queue */
@@ -553,6 +508,12 @@ struct xe_device {
/** @pm_notifier: Our PM notifier to perform actions in response to various PM events. */
struct notifier_block pm_notifier;
+ /** @pm_block: Completion to block validating tasks on suspend / hibernate prepare */
+ struct completion pm_block;
+ /** @rebind_resume_list: List of wq items to kick on resume. */
+ struct list_head rebind_resume_list;
+ /** @rebind_resume_lock: Lock to protect the rebind_resume_list */
+ struct mutex rebind_resume_lock;
/** @pmt: Support the PMT driver callback interface */
struct {
@@ -575,6 +536,9 @@ struct xe_device {
/** @nvm: discrete graphics non-volatile memory */
struct intel_dg_nvm_dev *nvm;
+ /** @late_bind: xe mei late bind interface */
+ struct xe_late_bind late_bind;
+
/** @oa: oa observation subsystem */
struct xe_oa oa;
@@ -590,6 +554,8 @@ struct xe_device {
atomic_t flag;
/** @wedged.mode: Mode controlled by kernel parameter and debugfs */
int mode;
+ /** @wedged.method: Recovery method to be sent in the drm device wedged uevent */
+ unsigned long method;
} wedged;
/** @bo_device: Struct to control async free of BOs */
@@ -624,6 +590,23 @@ struct xe_device {
*/
atomic64_t global_total_pages;
#endif
+ /** @val: The domain for exhaustive eviction, which is currently per device. */
+ struct xe_validation_device val;
+
+ /** @psmi: GPU debugging via additional validation HW */
+ struct {
+ /** @psmi.capture_obj: PSMI buffer for VRAM */
+ struct xe_bo *capture_obj[XE_MAX_TILES_PER_DEVICE + 1];
+ /** @psmi.region_mask: Mask of valid memory regions */
+ u8 region_mask;
+ } psmi;
+
+#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
+ /** @g2g_test_array: for testing G2G communications */
+ u32 *g2g_test_array;
+ /** @g2g_test_count: for testing G2G communications */
+ atomic_t g2g_test_count;
+#endif
/* private: */
@@ -658,7 +641,6 @@ struct xe_device {
struct {
unsigned int hpll_freq;
unsigned int czclk_freq;
- unsigned int fsb_freq, mem_freq, is_ddr3;
};
#endif
};
diff --git a/drivers/gpu/drm/xe/xe_dma_buf.c b/drivers/gpu/drm/xe/xe_dma_buf.c
index 346f857f3837..a7d67725c3ee 100644
--- a/drivers/gpu/drm/xe/xe_dma_buf.c
+++ b/drivers/gpu/drm/xe/xe_dma_buf.c
@@ -51,6 +51,7 @@ static int xe_dma_buf_pin(struct dma_buf_attachment *attach)
struct drm_gem_object *obj = attach->dmabuf->priv;
struct xe_bo *bo = gem_to_xe_bo(obj);
struct xe_device *xe = xe_bo_device(bo);
+ struct drm_exec *exec = XE_VALIDATION_UNSUPPORTED;
int ret;
/*
@@ -63,7 +64,7 @@ static int xe_dma_buf_pin(struct dma_buf_attachment *attach)
return -EINVAL;
}
- ret = xe_bo_migrate(bo, XE_PL_TT);
+ ret = xe_bo_migrate(bo, XE_PL_TT, NULL, exec);
if (ret) {
if (ret != -EINTR && ret != -ERESTARTSYS)
drm_dbg(&xe->drm,
@@ -72,7 +73,7 @@ static int xe_dma_buf_pin(struct dma_buf_attachment *attach)
return ret;
}
- ret = xe_bo_pin_external(bo);
+ ret = xe_bo_pin_external(bo, true, exec);
xe_assert(xe, !ret);
return 0;
@@ -92,6 +93,7 @@ static struct sg_table *xe_dma_buf_map(struct dma_buf_attachment *attach,
struct dma_buf *dma_buf = attach->dmabuf;
struct drm_gem_object *obj = dma_buf->priv;
struct xe_bo *bo = gem_to_xe_bo(obj);
+ struct drm_exec *exec = XE_VALIDATION_UNSUPPORTED;
struct sg_table *sgt;
int r = 0;
@@ -100,9 +102,9 @@ static struct sg_table *xe_dma_buf_map(struct dma_buf_attachment *attach,
if (!xe_bo_is_pinned(bo)) {
if (!attach->peer2peer)
- r = xe_bo_migrate(bo, XE_PL_TT);
+ r = xe_bo_migrate(bo, XE_PL_TT, NULL, exec);
else
- r = xe_bo_validate(bo, NULL, false);
+ r = xe_bo_validate(bo, NULL, false, exec);
if (r)
return ERR_PTR(r);
}
@@ -161,15 +163,26 @@ static int xe_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
struct xe_bo *bo = gem_to_xe_bo(obj);
bool reads = (direction == DMA_BIDIRECTIONAL ||
direction == DMA_FROM_DEVICE);
+ struct xe_validation_ctx ctx;
+ struct drm_exec exec;
+ int ret = 0;
if (!reads)
return 0;
/* Can we do interruptible lock here? */
- xe_bo_lock(bo, false);
- (void)xe_bo_migrate(bo, XE_PL_TT);
- xe_bo_unlock(bo);
+ xe_validation_guard(&ctx, &xe_bo_device(bo)->val, &exec, (struct xe_val_flags) {}, ret) {
+ ret = drm_exec_lock_obj(&exec, &bo->ttm.base);
+ drm_exec_retry_on_contention(&exec);
+ if (ret)
+ break;
+
+ ret = xe_bo_migrate(bo, XE_PL_TT, NULL, &exec);
+ drm_exec_retry_on_contention(&exec);
+ xe_validation_retry_on_oom(&ctx, &ret);
+ }
+ /* If we failed, cpu-access takes place in current placement. */
return 0;
}
@@ -191,10 +204,22 @@ struct dma_buf *xe_gem_prime_export(struct drm_gem_object *obj, int flags)
{
struct xe_bo *bo = gem_to_xe_bo(obj);
struct dma_buf *buf;
+ struct ttm_operation_ctx ctx = {
+ .interruptible = true,
+ .no_wait_gpu = true,
+ /* We opt to avoid OOM on system pages allocations */
+ .gfp_retry_mayfail = true,
+ .allow_res_evict = false,
+ };
+ int ret;
if (bo->vm)
return ERR_PTR(-EPERM);
+ ret = ttm_bo_setup_export(&bo->ttm, &ctx);
+ if (ret)
+ return ERR_PTR(ret);
+
buf = drm_gem_prime_export(obj, flags);
if (!IS_ERR(buf))
buf->ops = &xe_dmabuf_ops;
@@ -208,32 +233,45 @@ xe_dma_buf_init_obj(struct drm_device *dev, struct xe_bo *storage,
{
struct dma_resv *resv = dma_buf->resv;
struct xe_device *xe = to_xe_device(dev);
+ struct xe_validation_ctx ctx;
+ struct drm_gem_object *dummy_obj;
+ struct drm_exec exec;
struct xe_bo *bo;
- int ret;
-
- dma_resv_lock(resv, NULL);
- bo = ___xe_bo_create_locked(xe, storage, NULL, resv, NULL, dma_buf->size,
- 0, /* Will require 1way or 2way for vm_bind */
- ttm_bo_type_sg, XE_BO_FLAG_SYSTEM);
- if (IS_ERR(bo)) {
- ret = PTR_ERR(bo);
- goto error;
+ int ret = 0;
+
+ dummy_obj = drm_gpuvm_resv_object_alloc(&xe->drm);
+ if (!dummy_obj)
+ return ERR_PTR(-ENOMEM);
+
+ dummy_obj->resv = resv;
+ xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {}, ret) {
+ ret = drm_exec_lock_obj(&exec, dummy_obj);
+ drm_exec_retry_on_contention(&exec);
+ if (ret)
+ break;
+
+ bo = xe_bo_init_locked(xe, storage, NULL, resv, NULL, dma_buf->size,
+ 0, /* Will require 1way or 2way for vm_bind */
+ ttm_bo_type_sg, XE_BO_FLAG_SYSTEM, &exec);
+ drm_exec_retry_on_contention(&exec);
+ if (IS_ERR(bo)) {
+ ret = PTR_ERR(bo);
+ xe_validation_retry_on_oom(&ctx, &ret);
+ break;
+ }
}
- dma_resv_unlock(resv);
-
- return &bo->ttm.base;
+ drm_gem_object_put(dummy_obj);
-error:
- dma_resv_unlock(resv);
- return ERR_PTR(ret);
+ return ret ? ERR_PTR(ret) : &bo->ttm.base;
}
static void xe_dma_buf_move_notify(struct dma_buf_attachment *attach)
{
struct drm_gem_object *obj = attach->importer_priv;
struct xe_bo *bo = gem_to_xe_bo(obj);
+ struct drm_exec *exec = XE_VALIDATION_UNSUPPORTED;
- XE_WARN_ON(xe_bo_evict(bo));
+ XE_WARN_ON(xe_bo_evict(bo, exec));
}
static const struct dma_buf_attach_ops xe_dma_buf_attach_ops = {
diff --git a/drivers/gpu/drm/xe/xe_eu_stall.c b/drivers/gpu/drm/xe/xe_eu_stall.c
index af7916315ac6..f5cfdf29fde3 100644
--- a/drivers/gpu/drm/xe/xe_eu_stall.c
+++ b/drivers/gpu/drm/xe/xe_eu_stall.c
@@ -617,9 +617,8 @@ static int xe_eu_stall_data_buf_alloc(struct xe_eu_stall_data_stream *stream,
size = stream->per_xecore_buf_size * last_xecore;
- bo = xe_bo_create_pin_map_at_aligned(tile->xe, tile, NULL,
- size, ~0ull, ttm_bo_type_kernel,
- XE_BO_FLAG_SYSTEM | XE_BO_FLAG_GGTT, SZ_64);
+ bo = xe_bo_create_pin_map_at_novm(tile->xe, tile, size, ~0ull, ttm_bo_type_kernel,
+ XE_BO_FLAG_SYSTEM | XE_BO_FLAG_GGTT, SZ_64, false);
if (IS_ERR(bo)) {
kfree(stream->xecore_buf);
return PTR_ERR(bo);
@@ -649,7 +648,7 @@ static int xe_eu_stall_stream_enable(struct xe_eu_stall_data_stream *stream)
return -ETIMEDOUT;
}
- if (XE_WA(gt, 22016596838))
+ if (XE_GT_WA(gt, 22016596838))
xe_gt_mcr_multicast_write(gt, ROW_CHICKEN2,
_MASKED_BIT_ENABLE(DISABLE_DOP_GATING));
@@ -805,7 +804,7 @@ static int xe_eu_stall_disable_locked(struct xe_eu_stall_data_stream *stream)
cancel_delayed_work_sync(&stream->buf_poll_work);
- if (XE_WA(gt, 22016596838))
+ if (XE_GT_WA(gt, 22016596838))
xe_gt_mcr_multicast_write(gt, ROW_CHICKEN2,
_MASKED_BIT_DISABLE(DISABLE_DOP_GATING));
diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c
index 44364c042ad7..7715e74bb945 100644
--- a/drivers/gpu/drm/xe/xe_exec.c
+++ b/drivers/gpu/drm/xe/xe_exec.c
@@ -19,6 +19,7 @@
#include "xe_ring_ops_types.h"
#include "xe_sched_job.h"
#include "xe_sync.h"
+#include "xe_svm.h"
#include "xe_vm.h"
/**
@@ -97,9 +98,13 @@
static int xe_exec_fn(struct drm_gpuvm_exec *vm_exec)
{
struct xe_vm *vm = container_of(vm_exec->vm, struct xe_vm, gpuvm);
+ int ret;
/* The fence slot added here is intended for the exec sched job. */
- return xe_vm_validate_rebind(vm, &vm_exec->exec, 1);
+ xe_vm_set_validation_exec(vm, &vm_exec->exec);
+ ret = xe_vm_validate_rebind(vm, &vm_exec->exec, 1);
+ xe_vm_set_validation_exec(vm, NULL);
+ return ret;
}
int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
@@ -115,10 +120,10 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
struct drm_gpuvm_exec vm_exec = {.extra.fn = xe_exec_fn};
struct drm_exec *exec = &vm_exec.exec;
u32 i, num_syncs, num_ufence = 0;
+ struct xe_validation_ctx ctx;
struct xe_sched_job *job;
struct xe_vm *vm;
bool write_locked, skip_retry = false;
- ktime_t end = 0;
int err = 0;
struct xe_hw_engine_group *group;
enum xe_hw_engine_group_execution_mode mode, previous_mode;
@@ -237,17 +242,21 @@ retry:
goto err_unlock_list;
}
- vm_exec.vm = &vm->gpuvm;
- vm_exec.flags = DRM_EXEC_INTERRUPTIBLE_WAIT;
- if (xe_vm_in_lr_mode(vm)) {
- drm_exec_init(exec, vm_exec.flags, 0);
- } else {
- err = drm_gpuvm_exec_lock(&vm_exec);
- if (err) {
- if (xe_vm_validate_should_retry(exec, err, &end))
- err = -EAGAIN;
+ /*
+ * It's OK to block interruptible here with the vm lock held, since
+ * on task freezing during suspend / hibernate, the call will
+ * return -ERESTARTSYS and the IOCTL will be rerun.
+ */
+ err = wait_for_completion_interruptible(&xe->pm_block);
+ if (err)
+ goto err_unlock_list;
+
+ if (!xe_vm_in_lr_mode(vm)) {
+ vm_exec.vm = &vm->gpuvm;
+ vm_exec.flags = DRM_EXEC_INTERRUPTIBLE_WAIT;
+ err = xe_validation_exec_lock(&ctx, &vm_exec, &xe->val);
+ if (err)
goto err_unlock_list;
- }
}
if (xe_vm_is_closed_or_banned(q->vm)) {
@@ -294,7 +303,7 @@ retry:
if (err)
goto err_put_job;
- err = down_read_interruptible(&vm->userptr.notifier_lock);
+ err = xe_svm_notifier_lock_interruptible(vm);
if (err)
goto err_put_job;
@@ -336,12 +345,13 @@ retry:
err_repin:
if (!xe_vm_in_lr_mode(vm))
- up_read(&vm->userptr.notifier_lock);
+ xe_svm_notifier_unlock(vm);
err_put_job:
if (err)
xe_sched_job_put(job);
err_exec:
- drm_exec_fini(exec);
+ if (!xe_vm_in_lr_mode(vm))
+ xe_validation_ctx_fini(&ctx);
err_unlock_list:
up_read(&vm->lock);
if (err == -EAGAIN && !skip_retry)
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
index 8991b4aed440..37b2b93b73d6 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.c
+++ b/drivers/gpu/drm/xe/xe_exec_queue.c
@@ -12,6 +12,7 @@
#include <drm/drm_file.h>
#include <uapi/drm/xe_drm.h>
+#include "xe_dep_scheduler.h"
#include "xe_device.h"
#include "xe_gt.h"
#include "xe_hw_engine_class_sysfs.h"
@@ -39,6 +40,12 @@ static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue
static void __xe_exec_queue_free(struct xe_exec_queue *q)
{
+ int i;
+
+ for (i = 0; i < XE_EXEC_QUEUE_TLB_INVAL_COUNT; ++i)
+ if (q->tlb_inval[i].dep_scheduler)
+ xe_dep_scheduler_fini(q->tlb_inval[i].dep_scheduler);
+
if (xe_exec_queue_uses_pxp(q))
xe_pxp_exec_queue_remove(gt_to_xe(q->gt)->pxp, q);
if (q->vm)
@@ -50,6 +57,39 @@ static void __xe_exec_queue_free(struct xe_exec_queue *q)
kfree(q);
}
+static int alloc_dep_schedulers(struct xe_device *xe, struct xe_exec_queue *q)
+{
+ struct xe_tile *tile = gt_to_tile(q->gt);
+ int i;
+
+ for (i = 0; i < XE_EXEC_QUEUE_TLB_INVAL_COUNT; ++i) {
+ struct xe_dep_scheduler *dep_scheduler;
+ struct xe_gt *gt;
+ struct workqueue_struct *wq;
+
+ if (i == XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT)
+ gt = tile->primary_gt;
+ else
+ gt = tile->media_gt;
+
+ if (!gt)
+ continue;
+
+ wq = gt->tlb_inval.job_wq;
+
+#define MAX_TLB_INVAL_JOBS 16 /* Picking a reasonable value */
+ dep_scheduler = xe_dep_scheduler_create(xe, wq, q->name,
+ MAX_TLB_INVAL_JOBS);
+ if (IS_ERR(dep_scheduler))
+ return PTR_ERR(dep_scheduler);
+
+ q->tlb_inval[i].dep_scheduler = dep_scheduler;
+ }
+#undef MAX_TLB_INVAL_JOBS
+
+ return 0;
+}
+
static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe,
struct xe_vm *vm,
u32 logical_mask,
@@ -94,6 +134,14 @@ static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe,
else
q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_NORMAL;
+ if (q->flags & (EXEC_QUEUE_FLAG_MIGRATE | EXEC_QUEUE_FLAG_VM)) {
+ err = alloc_dep_schedulers(xe, q);
+ if (err) {
+ __xe_exec_queue_free(q);
+ return ERR_PTR(err);
+ }
+ }
+
if (vm)
q->vm = xe_vm_get(vm);
@@ -151,6 +199,16 @@ err_lrc:
return err;
}
+static void __xe_exec_queue_fini(struct xe_exec_queue *q)
+{
+ int i;
+
+ q->ops->fini(q);
+
+ for (i = 0; i < q->width; ++i)
+ xe_lrc_put(q->lrc[i]);
+}
+
struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm,
u32 logical_mask, u16 width,
struct xe_hw_engine *hwe, u32 flags,
@@ -181,11 +239,13 @@ struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *v
if (xe_exec_queue_uses_pxp(q)) {
err = xe_pxp_exec_queue_add(xe->pxp, q);
if (err)
- goto err_post_alloc;
+ goto err_post_init;
}
return q;
+err_post_init:
+ __xe_exec_queue_fini(q);
err_post_alloc:
__xe_exec_queue_free(q);
return ERR_PTR(err);
@@ -283,13 +343,11 @@ void xe_exec_queue_destroy(struct kref *ref)
xe_exec_queue_put(eq);
}
- q->ops->fini(q);
+ q->ops->destroy(q);
}
void xe_exec_queue_fini(struct xe_exec_queue *q)
{
- int i;
-
/*
* Before releasing our ref to lrc and xef, accumulate our run ticks
* and wakeup any waiters.
@@ -298,9 +356,7 @@ void xe_exec_queue_fini(struct xe_exec_queue *q)
if (q->xef && atomic_dec_and_test(&q->xef->exec_queue.pending_removal))
wake_up_var(&q->xef->exec_queue.pending_removal);
- for (i = 0; i < q->width; ++i)
- xe_lrc_put(q->lrc[i]);
-
+ __xe_exec_queue_fini(q);
__xe_exec_queue_free(q);
}
@@ -742,6 +798,21 @@ int xe_exec_queue_get_property_ioctl(struct drm_device *dev, void *data,
}
/**
+ * xe_exec_queue_lrc() - Get the LRC from exec queue.
+ * @q: The exec_queue.
+ *
+ * Retrieves the primary LRC for the exec queue. Note that this function
+ * returns only the first LRC instance, even when multiple parallel LRCs
+ * are configured.
+ *
+ * Return: Pointer to LRC on success, error on failure
+ */
+struct xe_lrc *xe_exec_queue_lrc(struct xe_exec_queue *q)
+{
+ return q->lrc[0];
+}
+
+/**
* xe_exec_queue_is_lr() - Whether an exec_queue is long-running
* @q: The exec_queue
*
@@ -1028,3 +1099,51 @@ int xe_exec_queue_last_fence_test_dep(struct xe_exec_queue *q, struct xe_vm *vm)
return err;
}
+
+/**
+ * xe_exec_queue_contexts_hwsp_rebase - Re-compute GGTT references
+ * within all LRCs of a queue.
+ * @q: the &xe_exec_queue struct instance containing target LRCs
+ * @scratch: scratch buffer to be used as temporary storage
+ *
+ * Returns: zero on success, negative error code on failure
+ */
+int xe_exec_queue_contexts_hwsp_rebase(struct xe_exec_queue *q, void *scratch)
+{
+ int i;
+ int err = 0;
+
+ for (i = 0; i < q->width; ++i) {
+ xe_lrc_update_memirq_regs_with_address(q->lrc[i], q->hwe, scratch);
+ xe_lrc_update_hwctx_regs_with_address(q->lrc[i]);
+ err = xe_lrc_setup_wa_bb_with_scratch(q->lrc[i], q->hwe, scratch);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+/**
+ * xe_exec_queue_jobs_ring_restore - Re-emit ring commands of requests pending on given queue.
+ * @q: the &xe_exec_queue struct instance
+ */
+void xe_exec_queue_jobs_ring_restore(struct xe_exec_queue *q)
+{
+ struct xe_gpu_scheduler *sched = &q->guc->sched;
+ struct xe_sched_job *job;
+
+ /*
+ * This routine is used within VF migration recovery. This means
+ * using the lock here introduces a restriction: we cannot wait
+ * for any GFX HW response while the lock is taken.
+ */
+ spin_lock(&sched->base.job_list_lock);
+ list_for_each_entry(job, &sched->base.pending_list, drm.list) {
+ if (xe_sched_job_is_error(job))
+ continue;
+
+ q->ring_ops->emit_job(job);
+ }
+ spin_unlock(&sched->base.job_list_lock);
+}
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.h b/drivers/gpu/drm/xe/xe_exec_queue.h
index 17bc50a7f05a..15ec852e7f7e 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.h
+++ b/drivers/gpu/drm/xe/xe_exec_queue.h
@@ -90,4 +90,9 @@ int xe_exec_queue_last_fence_test_dep(struct xe_exec_queue *q,
struct xe_vm *vm);
void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q);
+int xe_exec_queue_contexts_hwsp_rebase(struct xe_exec_queue *q, void *scratch);
+
+void xe_exec_queue_jobs_ring_restore(struct xe_exec_queue *q);
+
+struct xe_lrc *xe_exec_queue_lrc(struct xe_exec_queue *q);
#endif
diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h
index cc1cffb5c87f..27b76cf9da89 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue_types.h
+++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h
@@ -87,6 +87,8 @@ struct xe_exec_queue {
#define EXEC_QUEUE_FLAG_HIGH_PRIORITY BIT(4)
/* flag to indicate low latency hint to guc */
#define EXEC_QUEUE_FLAG_LOW_LATENCY BIT(5)
+/* for migration (kernel copy, clear, bind) jobs */
+#define EXEC_QUEUE_FLAG_MIGRATE BIT(6)
/**
* @flags: flags for this exec queue, should statically setup aside from ban
@@ -132,6 +134,19 @@ struct xe_exec_queue {
struct list_head link;
} lr;
+#define XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT 0
+#define XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT 1
+#define XE_EXEC_QUEUE_TLB_INVAL_COUNT (XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT + 1)
+
+ /** @tlb_inval: TLB invalidations exec queue state */
+ struct {
+ /**
+ * @tlb_inval.dep_scheduler: The TLB invalidation
+ * dependency scheduler
+ */
+ struct xe_dep_scheduler *dep_scheduler;
+ } tlb_inval[XE_EXEC_QUEUE_TLB_INVAL_COUNT];
+
/** @pxp: PXP info tracking */
struct {
/** @pxp.type: PXP session type used by this queue */
@@ -166,8 +181,14 @@ struct xe_exec_queue_ops {
int (*init)(struct xe_exec_queue *q);
/** @kill: Kill inflight submissions for backend */
void (*kill)(struct xe_exec_queue *q);
- /** @fini: Fini exec queue for submission backend */
+ /** @fini: Undoes the init() for submission backend */
void (*fini)(struct xe_exec_queue *q);
+ /**
+ * @destroy: Destroy exec queue for submission backend. The backend
+ * function must call xe_exec_queue_fini() (which will in turn call the
+ * fini() backend function) to ensure the queue is properly cleaned up.
+ */
+ void (*destroy)(struct xe_exec_queue *q);
/** @set_priority: Set priority for exec queue */
int (*set_priority)(struct xe_exec_queue *q,
enum xe_exec_queue_priority priority);
diff --git a/drivers/gpu/drm/xe/xe_execlist.c b/drivers/gpu/drm/xe/xe_execlist.c
index 788f56b066b6..f83d421ac9d3 100644
--- a/drivers/gpu/drm/xe/xe_execlist.c
+++ b/drivers/gpu/drm/xe/xe_execlist.c
@@ -385,10 +385,20 @@ err_free:
return err;
}
-static void execlist_exec_queue_fini_async(struct work_struct *w)
+static void execlist_exec_queue_fini(struct xe_exec_queue *q)
+{
+ struct xe_execlist_exec_queue *exl = q->execlist;
+
+ drm_sched_entity_fini(&exl->entity);
+ drm_sched_fini(&exl->sched);
+
+ kfree(exl);
+}
+
+static void execlist_exec_queue_destroy_async(struct work_struct *w)
{
struct xe_execlist_exec_queue *ee =
- container_of(w, struct xe_execlist_exec_queue, fini_async);
+ container_of(w, struct xe_execlist_exec_queue, destroy_async);
struct xe_exec_queue *q = ee->q;
struct xe_execlist_exec_queue *exl = q->execlist;
struct xe_device *xe = gt_to_xe(q->gt);
@@ -401,10 +411,6 @@ static void execlist_exec_queue_fini_async(struct work_struct *w)
list_del(&exl->active_link);
spin_unlock_irqrestore(&exl->port->lock, flags);
- drm_sched_entity_fini(&exl->entity);
- drm_sched_fini(&exl->sched);
- kfree(exl);
-
xe_exec_queue_fini(q);
}
@@ -413,10 +419,10 @@ static void execlist_exec_queue_kill(struct xe_exec_queue *q)
/* NIY */
}
-static void execlist_exec_queue_fini(struct xe_exec_queue *q)
+static void execlist_exec_queue_destroy(struct xe_exec_queue *q)
{
- INIT_WORK(&q->execlist->fini_async, execlist_exec_queue_fini_async);
- queue_work(system_unbound_wq, &q->execlist->fini_async);
+ INIT_WORK(&q->execlist->destroy_async, execlist_exec_queue_destroy_async);
+ queue_work(system_unbound_wq, &q->execlist->destroy_async);
}
static int execlist_exec_queue_set_priority(struct xe_exec_queue *q,
@@ -467,6 +473,7 @@ static const struct xe_exec_queue_ops execlist_exec_queue_ops = {
.init = execlist_exec_queue_init,
.kill = execlist_exec_queue_kill,
.fini = execlist_exec_queue_fini,
+ .destroy = execlist_exec_queue_destroy,
.set_priority = execlist_exec_queue_set_priority,
.set_timeslice = execlist_exec_queue_set_timeslice,
.set_preempt_timeout = execlist_exec_queue_set_preempt_timeout,
diff --git a/drivers/gpu/drm/xe/xe_execlist_types.h b/drivers/gpu/drm/xe/xe_execlist_types.h
index 415140936f11..92c4ba52db0c 100644
--- a/drivers/gpu/drm/xe/xe_execlist_types.h
+++ b/drivers/gpu/drm/xe/xe_execlist_types.h
@@ -42,7 +42,7 @@ struct xe_execlist_exec_queue {
bool has_run;
- struct work_struct fini_async;
+ struct work_struct destroy_async;
enum xe_exec_queue_priority active_priority;
struct list_head active_link;
diff --git a/drivers/gpu/drm/xe/xe_gen_wa_oob.c b/drivers/gpu/drm/xe/xe_gen_wa_oob.c
index 6581cb0f0e59..247e41c1c48d 100644
--- a/drivers/gpu/drm/xe/xe_gen_wa_oob.c
+++ b/drivers/gpu/drm/xe/xe_gen_wa_oob.c
@@ -123,11 +123,19 @@ static int parse(FILE *input, FILE *csource, FILE *cheader, char *prefix)
return 0;
}
+/* Avoid GNU vs POSIX basename() discrepancy, just use our own */
+static const char *xbasename(const char *s)
+{
+ const char *p = strrchr(s, '/');
+
+ return p ? p + 1 : s;
+}
+
static int fn_to_prefix(const char *fn, char *prefix, size_t size)
{
size_t len;
- fn = basename(fn);
+ fn = xbasename(fn);
len = strlen(fn);
if (len > size - 1)
diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c
index 29d4d3f51da1..7fdd0a97a628 100644
--- a/drivers/gpu/drm/xe/xe_ggtt.c
+++ b/drivers/gpu/drm/xe/xe_ggtt.c
@@ -23,13 +23,14 @@
#include "xe_device.h"
#include "xe_gt.h"
#include "xe_gt_printk.h"
-#include "xe_gt_tlb_invalidation.h"
#include "xe_map.h"
#include "xe_mmio.h"
#include "xe_pm.h"
#include "xe_res_cursor.h"
#include "xe_sriov.h"
+#include "xe_tile_printk.h"
#include "xe_tile_sriov_vf.h"
+#include "xe_tlb_inval.h"
#include "xe_wa.h"
#include "xe_wopcm.h"
@@ -106,10 +107,10 @@ static unsigned int probe_gsm_size(struct pci_dev *pdev)
static void ggtt_update_access_counter(struct xe_ggtt *ggtt)
{
struct xe_tile *tile = ggtt->tile;
- struct xe_gt *affected_gt = XE_WA(tile->primary_gt, 22019338487) ?
+ struct xe_gt *affected_gt = XE_GT_WA(tile->primary_gt, 22019338487) ?
tile->primary_gt : tile->media_gt;
struct xe_mmio *mmio = &affected_gt->mmio;
- u32 max_gtt_writes = XE_WA(ggtt->tile->primary_gt, 22019338487) ? 1100 : 63;
+ u32 max_gtt_writes = XE_GT_WA(ggtt->tile->primary_gt, 22019338487) ? 1100 : 63;
/*
* Wa_22019338487: GMD_ID is a RO register, a dummy write forces gunit
* to wait for completion of prior GTT writes before letting this through.
@@ -269,7 +270,7 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt)
gsm_size = probe_gsm_size(pdev);
if (gsm_size == 0) {
- drm_err(&xe->drm, "Hardware reported no preallocated GSM\n");
+ xe_tile_err(ggtt->tile, "Hardware reported no preallocated GSM\n");
return -ENOMEM;
}
@@ -284,8 +285,8 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt)
if (GRAPHICS_VERx100(xe) >= 1270)
ggtt->pt_ops = (ggtt->tile->media_gt &&
- XE_WA(ggtt->tile->media_gt, 22019338487)) ||
- XE_WA(ggtt->tile->primary_gt, 22019338487) ?
+ XE_GT_WA(ggtt->tile->media_gt, 22019338487)) ||
+ XE_GT_WA(ggtt->tile->primary_gt, 22019338487) ?
&xelpg_pt_wa_ops : &xelpg_pt_ops;
else
ggtt->pt_ops = &xelp_pt_ops;
@@ -438,9 +439,8 @@ static void ggtt_invalidate_gt_tlb(struct xe_gt *gt)
if (!gt)
return;
- err = xe_gt_tlb_invalidation_ggtt(gt);
- if (err)
- drm_warn(&gt_to_xe(gt)->drm, "xe_gt_tlb_invalidation_ggtt error=%d", err);
+ err = xe_tlb_inval_ggtt(&gt->tlb_inval);
+ xe_gt_WARN(gt, err, "Failed to invalidate GGTT (%pe)", ERR_PTR(err));
}
static void xe_ggtt_invalidate(struct xe_ggtt *ggtt)
@@ -467,8 +467,8 @@ static void xe_ggtt_dump_node(struct xe_ggtt *ggtt,
if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) {
string_get_size(node->size, 1, STRING_UNITS_2, buf, sizeof(buf));
- xe_gt_dbg(ggtt->tile->primary_gt, "GGTT %#llx-%#llx (%s) %s\n",
- node->start, node->start + node->size, buf, description);
+ xe_tile_dbg(ggtt->tile, "GGTT %#llx-%#llx (%s) %s\n",
+ node->start, node->start + node->size, buf, description);
}
}
@@ -500,9 +500,8 @@ int xe_ggtt_node_insert_balloon_locked(struct xe_ggtt_node *node, u64 start, u64
err = drm_mm_reserve_node(&ggtt->mm, &node->base);
- if (xe_gt_WARN(ggtt->tile->primary_gt, err,
- "Failed to balloon GGTT %#llx-%#llx (%pe)\n",
- node->base.start, node->base.start + node->base.size, ERR_PTR(err)))
+ if (xe_tile_WARN(ggtt->tile, err, "Failed to balloon GGTT %#llx-%#llx (%pe)\n",
+ node->base.start, node->base.start + node->base.size, ERR_PTR(err)))
return err;
xe_ggtt_dump_node(ggtt, &node->base, "balloon");
@@ -732,7 +731,7 @@ void xe_ggtt_map_bo_unlocked(struct xe_ggtt *ggtt, struct xe_bo *bo)
}
static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
- u64 start, u64 end)
+ u64 start, u64 end, struct drm_exec *exec)
{
u64 alignment = bo->min_align > 0 ? bo->min_align : XE_PAGE_SIZE;
u8 tile_id = ggtt->tile->id;
@@ -747,7 +746,7 @@ static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
return 0;
}
- err = xe_bo_validate(bo, NULL, false);
+ err = xe_bo_validate(bo, NULL, false, exec);
if (err)
return err;
@@ -789,25 +788,28 @@ out:
* @bo: the &xe_bo to be inserted
* @start: address where it will be inserted
* @end: end of the range where it will be inserted
+ * @exec: The drm_exec transaction to use for exhaustive eviction.
*
* Return: 0 on success or a negative error code on failure.
*/
int xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
- u64 start, u64 end)
+ u64 start, u64 end, struct drm_exec *exec)
{
- return __xe_ggtt_insert_bo_at(ggtt, bo, start, end);
+ return __xe_ggtt_insert_bo_at(ggtt, bo, start, end, exec);
}
/**
* xe_ggtt_insert_bo - Insert BO into GGTT
* @ggtt: the &xe_ggtt where bo will be inserted
* @bo: the &xe_bo to be inserted
+ * @exec: The drm_exec transaction to use for exhaustive eviction.
*
* Return: 0 on success or a negative error code on failure.
*/
-int xe_ggtt_insert_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
+int xe_ggtt_insert_bo(struct xe_ggtt *ggtt, struct xe_bo *bo,
+ struct drm_exec *exec)
{
- return __xe_ggtt_insert_bo_at(ggtt, bo, 0, U64_MAX);
+ return __xe_ggtt_insert_bo_at(ggtt, bo, 0, U64_MAX, exec);
}
/**
diff --git a/drivers/gpu/drm/xe/xe_ggtt.h b/drivers/gpu/drm/xe/xe_ggtt.h
index fbe1e397d05d..75fc7a1efea7 100644
--- a/drivers/gpu/drm/xe/xe_ggtt.h
+++ b/drivers/gpu/drm/xe/xe_ggtt.h
@@ -10,6 +10,7 @@
struct drm_printer;
struct xe_tile;
+struct drm_exec;
struct xe_ggtt *xe_ggtt_alloc(struct xe_tile *tile);
int xe_ggtt_init_early(struct xe_ggtt *ggtt);
@@ -31,9 +32,9 @@ bool xe_ggtt_node_allocated(const struct xe_ggtt_node *node);
void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_ggtt_node *node,
struct xe_bo *bo, u16 pat_index);
void xe_ggtt_map_bo_unlocked(struct xe_ggtt *ggtt, struct xe_bo *bo);
-int xe_ggtt_insert_bo(struct xe_ggtt *ggtt, struct xe_bo *bo);
+int xe_ggtt_insert_bo(struct xe_ggtt *ggtt, struct xe_bo *bo, struct drm_exec *exec);
int xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
- u64 start, u64 end);
+ u64 start, u64 end, struct drm_exec *exec);
void xe_ggtt_remove_bo(struct xe_ggtt *ggtt, struct xe_bo *bo);
u64 xe_ggtt_largest_hole(struct xe_ggtt *ggtt, u64 alignment, u64 *spare);
diff --git a/drivers/gpu/drm/xe/xe_gpu_scheduler.c b/drivers/gpu/drm/xe/xe_gpu_scheduler.c
index 869b43a4151d..455ccaf17314 100644
--- a/drivers/gpu/drm/xe/xe_gpu_scheduler.c
+++ b/drivers/gpu/drm/xe/xe_gpu_scheduler.c
@@ -101,6 +101,19 @@ void xe_sched_submission_stop(struct xe_gpu_scheduler *sched)
cancel_work_sync(&sched->work_process_msg);
}
+/**
+ * xe_sched_submission_stop_async - Stop further runs of submission tasks on a scheduler.
+ * @sched: the &xe_gpu_scheduler struct instance
+ *
+ * This call disables further runs of scheduling work queue. It does not wait
+ * for any in-progress runs to finish, only makes sure no further runs happen
+ * afterwards.
+ */
+void xe_sched_submission_stop_async(struct xe_gpu_scheduler *sched)
+{
+ drm_sched_wqueue_stop(&sched->base);
+}
+
void xe_sched_submission_resume_tdr(struct xe_gpu_scheduler *sched)
{
drm_sched_resume_timeout(&sched->base, sched->base.timeout);
diff --git a/drivers/gpu/drm/xe/xe_gpu_scheduler.h b/drivers/gpu/drm/xe/xe_gpu_scheduler.h
index 308061f0cf37..e548b2aed95a 100644
--- a/drivers/gpu/drm/xe/xe_gpu_scheduler.h
+++ b/drivers/gpu/drm/xe/xe_gpu_scheduler.h
@@ -21,6 +21,7 @@ void xe_sched_fini(struct xe_gpu_scheduler *sched);
void xe_sched_submission_start(struct xe_gpu_scheduler *sched);
void xe_sched_submission_stop(struct xe_gpu_scheduler *sched);
+void xe_sched_submission_stop_async(struct xe_gpu_scheduler *sched);
void xe_sched_submission_resume_tdr(struct xe_gpu_scheduler *sched);
diff --git a/drivers/gpu/drm/xe/xe_gsc.c b/drivers/gpu/drm/xe/xe_gsc.c
index 1d84bf2f2cef..83d61bf8ec62 100644
--- a/drivers/gpu/drm/xe/xe_gsc.c
+++ b/drivers/gpu/drm/xe/xe_gsc.c
@@ -136,10 +136,10 @@ static int query_compatibility_version(struct xe_gsc *gsc)
u64 ggtt_offset;
int err;
- bo = xe_bo_create_pin_map(xe, tile, NULL, GSC_VER_PKT_SZ * 2,
- ttm_bo_type_kernel,
- XE_BO_FLAG_SYSTEM |
- XE_BO_FLAG_GGTT);
+ bo = xe_bo_create_pin_map_novm(xe, tile, GSC_VER_PKT_SZ * 2,
+ ttm_bo_type_kernel,
+ XE_BO_FLAG_SYSTEM |
+ XE_BO_FLAG_GGTT, false);
if (IS_ERR(bo)) {
xe_gt_err(gt, "failed to allocate bo for GSC version query\n");
return PTR_ERR(bo);
@@ -266,7 +266,7 @@ static int gsc_upload_and_init(struct xe_gsc *gsc)
unsigned int fw_ref;
int ret;
- if (XE_WA(tile->primary_gt, 14018094691)) {
+ if (XE_GT_WA(tile->primary_gt, 14018094691)) {
fw_ref = xe_force_wake_get(gt_to_fw(tile->primary_gt), XE_FORCEWAKE_ALL);
/*
@@ -281,7 +281,7 @@ static int gsc_upload_and_init(struct xe_gsc *gsc)
ret = gsc_upload(gsc);
- if (XE_WA(tile->primary_gt, 14018094691))
+ if (XE_GT_WA(tile->primary_gt, 14018094691))
xe_force_wake_put(gt_to_fw(tile->primary_gt), fw_ref);
if (ret)
@@ -593,7 +593,7 @@ void xe_gsc_wa_14015076503(struct xe_gt *gt, bool prep)
u32 gs1_clr = prep ? 0 : HECI_H_GS1_ER_PREP;
/* WA only applies if the GSC is loaded */
- if (!XE_WA(gt, 14015076503) || !gsc_fw_is_loaded(gt))
+ if (!XE_GT_WA(gt, 14015076503) || !gsc_fw_is_loaded(gt))
return;
xe_mmio_rmw32(&gt->mmio, HECI_H_GS1(MTL_GSC_HECI2_BASE), gs1_clr, gs1_set);
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index c8eda36546d3..3e0ad7e5b5df 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -37,10 +37,10 @@
#include "xe_gt_sriov_pf.h"
#include "xe_gt_sriov_vf.h"
#include "xe_gt_sysfs.h"
-#include "xe_gt_tlb_invalidation.h"
#include "xe_gt_topology.h"
#include "xe_guc_exec_queue_types.h"
#include "xe_guc_pc.h"
+#include "xe_guc_submit.h"
#include "xe_hw_fence.h"
#include "xe_hw_engine_class_sysfs.h"
#include "xe_irq.h"
@@ -57,6 +57,7 @@
#include "xe_sa.h"
#include "xe_sched_job.h"
#include "xe_sriov.h"
+#include "xe_tlb_inval.h"
#include "xe_tuning.h"
#include "xe_uc.h"
#include "xe_uc_fw.h"
@@ -97,7 +98,7 @@ void xe_gt_sanitize(struct xe_gt *gt)
* FIXME: if xe_uc_sanitize is called here, on TGL driver will not
* reload
*/
- gt->uc.guc.submission_state.enabled = false;
+ xe_guc_submit_disable(&gt->uc.guc);
}
static void xe_gt_enable_host_l2_vram(struct xe_gt *gt)
@@ -105,7 +106,7 @@ static void xe_gt_enable_host_l2_vram(struct xe_gt *gt)
unsigned int fw_ref;
u32 reg;
- if (!XE_WA(gt, 16023588340))
+ if (!XE_GT_WA(gt, 16023588340))
return;
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
@@ -127,7 +128,7 @@ static void xe_gt_disable_host_l2_vram(struct xe_gt *gt)
unsigned int fw_ref;
u32 reg;
- if (!XE_WA(gt, 16023588340))
+ if (!XE_GT_WA(gt, 16023588340))
return;
if (xe_gt_is_media_type(gt))
@@ -399,7 +400,7 @@ int xe_gt_init_early(struct xe_gt *gt)
xe_reg_sr_init(&gt->reg_sr, "GT", gt_to_xe(gt));
- err = xe_wa_init(gt);
+ err = xe_wa_gt_init(gt);
if (err)
return err;
@@ -407,12 +408,12 @@ int xe_gt_init_early(struct xe_gt *gt)
if (err)
return err;
- xe_wa_process_oob(gt);
+ xe_wa_process_gt_oob(gt);
xe_force_wake_init_gt(gt, gt_to_fw(gt));
spin_lock_init(&gt->global_invl_lock);
- err = xe_gt_tlb_invalidation_init_early(gt);
+ err = xe_gt_tlb_inval_init_early(gt);
if (err)
return err;
@@ -564,11 +565,9 @@ static int gt_init_with_all_forcewake(struct xe_gt *gt)
if (xe_gt_is_main_type(gt)) {
struct xe_tile *tile = gt_to_tile(gt);
- tile->migrate = xe_migrate_init(tile);
- if (IS_ERR(tile->migrate)) {
- err = PTR_ERR(tile->migrate);
+ err = xe_migrate_init(tile->migrate);
+ if (err)
goto err_force_wake;
- }
}
err = xe_uc_load_hw(&gt->uc);
@@ -804,6 +803,11 @@ static int do_gt_restart(struct xe_gt *gt)
return 0;
}
+static int gt_wait_reset_unblock(struct xe_gt *gt)
+{
+ return xe_guc_wait_reset_unblock(&gt->uc.guc);
+}
+
static int gt_reset(struct xe_gt *gt)
{
unsigned int fw_ref;
@@ -818,6 +822,10 @@ static int gt_reset(struct xe_gt *gt)
xe_gt_info(gt, "reset started\n");
+ err = gt_wait_reset_unblock(gt);
+ if (!err)
+ xe_gt_warn(gt, "reset block failed to get lifted");
+
xe_pm_runtime_get(gt_to_xe(gt));
if (xe_fault_inject_gt_reset()) {
@@ -842,7 +850,7 @@ static int gt_reset(struct xe_gt *gt)
xe_uc_stop(&gt->uc);
- xe_gt_tlb_invalidation_reset(gt);
+ xe_tlb_inval_reset(&gt->tlb_inval);
err = do_gt_reset(gt);
if (err)
@@ -958,7 +966,7 @@ int xe_gt_sanitize_freq(struct xe_gt *gt)
if ((!xe_uc_fw_is_available(&gt->uc.gsc.fw) ||
xe_uc_fw_is_loaded(&gt->uc.gsc.fw) ||
xe_uc_fw_is_in_error_state(&gt->uc.gsc.fw)) &&
- XE_WA(gt, 22019338487))
+ XE_GT_WA(gt, 22019338487))
ret = xe_guc_pc_restore_stashed_freq(&gt->uc.guc.pc);
return ret;
@@ -1056,5 +1064,5 @@ void xe_gt_declare_wedged(struct xe_gt *gt)
xe_gt_assert(gt, gt_to_xe(gt)->wedged.mode);
xe_uc_declare_wedged(&gt->uc);
- xe_gt_tlb_invalidation_reset(gt);
+ xe_tlb_inval_reset(&gt->tlb_inval);
}
diff --git a/drivers/gpu/drm/xe/xe_gt_debugfs.c b/drivers/gpu/drm/xe/xe_gt_debugfs.c
index 848618acdca8..f253e2df4907 100644
--- a/drivers/gpu/drm/xe/xe_gt_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_gt_debugfs.c
@@ -29,7 +29,9 @@
#include "xe_pm.h"
#include "xe_reg_sr.h"
#include "xe_reg_whitelist.h"
+#include "xe_sa.h"
#include "xe_sriov.h"
+#include "xe_sriov_vf_ccs.h"
#include "xe_tuning.h"
#include "xe_uc_debugfs.h"
#include "xe_wa.h"
@@ -122,18 +124,6 @@ static int powergate_info(struct xe_gt *gt, struct drm_printer *p)
return ret;
}
-static int sa_info(struct xe_gt *gt, struct drm_printer *p)
-{
- struct xe_tile *tile = gt_to_tile(gt);
-
- xe_pm_runtime_get(gt_to_xe(gt));
- drm_suballoc_dump_debug_info(&tile->mem.kernel_bb_pool->base, p,
- tile->mem.kernel_bb_pool->gpu_addr);
- xe_pm_runtime_put(gt_to_xe(gt));
-
- return 0;
-}
-
static int topology(struct xe_gt *gt, struct drm_printer *p)
{
xe_pm_runtime_get(gt_to_xe(gt));
@@ -288,7 +278,6 @@ static int hwconfig(struct xe_gt *gt, struct drm_printer *p)
* - without access to the PF specific data
*/
static const struct drm_info_list vf_safe_debugfs_list[] = {
- {"sa_info", .show = xe_gt_debugfs_simple_show, .data = sa_info},
{"topology", .show = xe_gt_debugfs_simple_show, .data = topology},
{"ggtt", .show = xe_gt_debugfs_simple_show, .data = ggtt},
{"register-save-restore", .show = xe_gt_debugfs_simple_show, .data = register_save_restore},
@@ -299,7 +288,6 @@ static const struct drm_info_list vf_safe_debugfs_list[] = {
{"default_lrc_bcs", .show = xe_gt_debugfs_simple_show, .data = bcs_default_lrc},
{"default_lrc_vcs", .show = xe_gt_debugfs_simple_show, .data = vcs_default_lrc},
{"default_lrc_vecs", .show = xe_gt_debugfs_simple_show, .data = vecs_default_lrc},
- {"stats", .show = xe_gt_debugfs_simple_show, .data = xe_gt_stats_print_info},
{"hwconfig", .show = xe_gt_debugfs_simple_show, .data = hwconfig},
};
@@ -328,6 +316,24 @@ static ssize_t write_to_gt_call(const char __user *userbuf, size_t count, loff_t
return count;
}
+static ssize_t stats_write(struct file *file, const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct xe_gt *gt = s->private;
+
+ return write_to_gt_call(userbuf, count, ppos, xe_gt_stats_clear, gt);
+}
+
+static int stats_show(struct seq_file *s, void *unused)
+{
+ struct drm_printer p = drm_seq_file_printer(s);
+ struct xe_gt *gt = s->private;
+
+ return xe_gt_stats_print_info(gt, &p);
+}
+DEFINE_SHOW_STORE_ATTRIBUTE(stats);
+
static void force_reset(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
@@ -388,13 +394,18 @@ void xe_gt_debugfs_register(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
struct drm_minor *minor = gt_to_xe(gt)->drm.primary;
+ struct dentry *parent = gt->tile->debugfs;
struct dentry *root;
+ char symlink[16];
char name[8];
xe_gt_assert(gt, minor->debugfs_root);
+ if (IS_ERR(parent))
+ return;
+
snprintf(name, sizeof(name), "gt%d", gt->info.id);
- root = debugfs_create_dir(name, minor->debugfs_root);
+ root = debugfs_create_dir(name, parent);
if (IS_ERR(root)) {
drm_warn(&xe->drm, "Create GT directory failed");
return;
@@ -408,6 +419,7 @@ void xe_gt_debugfs_register(struct xe_gt *gt)
root->d_inode->i_private = gt;
/* VF safe */
+ debugfs_create_file("stats", 0600, root, gt, &stats_fops);
debugfs_create_file("force_reset", 0600, root, gt, &force_reset_fops);
debugfs_create_file("force_reset_sync", 0600, root, gt, &force_reset_sync_fops);
@@ -426,4 +438,11 @@ void xe_gt_debugfs_register(struct xe_gt *gt)
xe_gt_sriov_pf_debugfs_register(gt, root);
else if (IS_SRIOV_VF(xe))
xe_gt_sriov_vf_debugfs_register(gt, root);
+
+ /*
+ * Backwards compatibility only: create a link for the legacy clients
+ * who may expect gt/ directory at the root level, not the tile level.
+ */
+ snprintf(symlink, sizeof(symlink), "tile%u/%s", gt->tile->id, name);
+ debugfs_create_symlink(name, minor->debugfs_root, symlink);
}
diff --git a/drivers/gpu/drm/xe/xe_gt_freq.c b/drivers/gpu/drm/xe/xe_gt_freq.c
index 60d9354e7dbf..4ff1b6b58d6b 100644
--- a/drivers/gpu/drm/xe/xe_gt_freq.c
+++ b/drivers/gpu/drm/xe/xe_gt_freq.c
@@ -227,6 +227,33 @@ static ssize_t max_freq_store(struct kobject *kobj,
}
static struct kobj_attribute attr_max_freq = __ATTR_RW(max_freq);
+static ssize_t power_profile_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ struct device *dev = kobj_to_dev(kobj);
+
+ xe_guc_pc_get_power_profile(dev_to_pc(dev), buff);
+
+ return strlen(buff);
+}
+
+static ssize_t power_profile_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buff, size_t count)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct xe_guc_pc *pc = dev_to_pc(dev);
+ int err;
+
+ xe_pm_runtime_get(dev_to_xe(dev));
+ err = xe_guc_pc_set_power_profile(pc, buff);
+ xe_pm_runtime_put(dev_to_xe(dev));
+
+ return err ?: count;
+}
+static struct kobj_attribute attr_power_profile = __ATTR_RW(power_profile);
+
static const struct attribute *freq_attrs[] = {
&attr_act_freq.attr,
&attr_cur_freq.attr,
@@ -236,6 +263,7 @@ static const struct attribute *freq_attrs[] = {
&attr_rpn_freq.attr,
&attr_min_freq.attr,
&attr_max_freq.attr,
+ &attr_power_profile.attr,
NULL
};
diff --git a/drivers/gpu/drm/xe/xe_gt_idle.c b/drivers/gpu/drm/xe/xe_gt_idle.c
index ffb210216aa9..f8950a52d0a4 100644
--- a/drivers/gpu/drm/xe/xe_gt_idle.c
+++ b/drivers/gpu/drm/xe/xe_gt_idle.c
@@ -322,15 +322,11 @@ static void gt_idle_fini(void *arg)
{
struct kobject *kobj = arg;
struct xe_gt *gt = kobj_to_gt(kobj->parent);
- unsigned int fw_ref;
xe_gt_idle_disable_pg(gt);
- if (gt_to_xe(gt)->info.skip_guc_pc) {
- fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (gt_to_xe(gt)->info.skip_guc_pc)
xe_gt_idle_disable_c6(gt);
- xe_force_wake_put(gt_to_fw(gt), fw_ref);
- }
sysfs_remove_files(kobj, gt_idle_attrs);
kobject_put(kobj);
@@ -390,14 +386,23 @@ void xe_gt_idle_enable_c6(struct xe_gt *gt)
RC_CTL_HW_ENABLE | RC_CTL_TO_MODE | RC_CTL_RC6_ENABLE);
}
-void xe_gt_idle_disable_c6(struct xe_gt *gt)
+int xe_gt_idle_disable_c6(struct xe_gt *gt)
{
+ unsigned int fw_ref;
+
xe_device_assert_mem_access(gt_to_xe(gt));
- xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
if (IS_SRIOV_VF(gt_to_xe(gt)))
- return;
+ return 0;
+
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!fw_ref)
+ return -ETIMEDOUT;
xe_mmio_write32(&gt->mmio, RC_CONTROL, 0);
xe_mmio_write32(&gt->mmio, RC_STATE, 0);
+
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
+
+ return 0;
}
diff --git a/drivers/gpu/drm/xe/xe_gt_idle.h b/drivers/gpu/drm/xe/xe_gt_idle.h
index 591a01e181bc..9c34a155e102 100644
--- a/drivers/gpu/drm/xe/xe_gt_idle.h
+++ b/drivers/gpu/drm/xe/xe_gt_idle.h
@@ -13,7 +13,7 @@ struct xe_gt;
int xe_gt_idle_init(struct xe_gt_idle *gtidle);
void xe_gt_idle_enable_c6(struct xe_gt *gt);
-void xe_gt_idle_disable_c6(struct xe_gt *gt);
+int xe_gt_idle_disable_c6(struct xe_gt *gt);
void xe_gt_idle_enable_pg(struct xe_gt *gt);
void xe_gt_idle_disable_pg(struct xe_gt *gt);
int xe_gt_idle_pg_print(struct xe_gt *gt, struct drm_printer *p);
diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.c b/drivers/gpu/drm/xe/xe_gt_mcr.c
index 64a2f0d6aaf9..8fb1cae91724 100644
--- a/drivers/gpu/drm/xe/xe_gt_mcr.c
+++ b/drivers/gpu/drm/xe/xe_gt_mcr.c
@@ -46,8 +46,6 @@
* MCR registers are not available on Virtual Function (VF).
*/
-#define STEER_SEMAPHORE XE_REG(0xFD0)
-
static inline struct xe_reg to_xe_reg(struct xe_reg_mcr reg_mcr)
{
return reg_mcr.__reg;
@@ -364,7 +362,7 @@ fallback:
* @group: pointer to storage for steering group ID
* @instance: pointer to storage for steering instance ID
*/
-void xe_gt_mcr_get_dss_steering(struct xe_gt *gt, unsigned int dss, u16 *group, u16 *instance)
+void xe_gt_mcr_get_dss_steering(const struct xe_gt *gt, unsigned int dss, u16 *group, u16 *instance)
{
xe_gt_assert(gt, dss < XE_MAX_DSS_FUSE_BITS);
@@ -533,7 +531,7 @@ void xe_gt_mcr_set_implicit_defaults(struct xe_gt *gt)
u32 steer_val = REG_FIELD_PREP(MCR_SLICE_MASK, 0) |
REG_FIELD_PREP(MCR_SUBSLICE_MASK, 2);
- xe_mmio_write32(&gt->mmio, MCFG_MCR_SELECTOR, steer_val);
+ xe_mmio_write32(&gt->mmio, STEER_SEMAPHORE, steer_val);
xe_mmio_write32(&gt->mmio, SF_MCR_SELECTOR, steer_val);
/*
* For GAM registers, all reads should be directed to instance 1
diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.h b/drivers/gpu/drm/xe/xe_gt_mcr.h
index bc06520befab..283a1c9770e2 100644
--- a/drivers/gpu/drm/xe/xe_gt_mcr.h
+++ b/drivers/gpu/drm/xe/xe_gt_mcr.h
@@ -31,7 +31,8 @@ bool xe_gt_mcr_get_nonterminated_steering(struct xe_gt *gt,
u8 *group, u8 *instance);
void xe_gt_mcr_steering_dump(struct xe_gt *gt, struct drm_printer *p);
-void xe_gt_mcr_get_dss_steering(struct xe_gt *gt, unsigned int dss, u16 *group, u16 *instance);
+void xe_gt_mcr_get_dss_steering(const struct xe_gt *gt,
+ unsigned int dss, u16 *group, u16 *instance);
u32 xe_gt_mcr_steering_info_to_dss_id(struct xe_gt *gt, u16 group, u16 instance);
/*
diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c
index 5a75d56d8558..a054d6010ae0 100644
--- a/drivers/gpu/drm/xe/xe_gt_pagefault.c
+++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c
@@ -16,13 +16,13 @@
#include "xe_gt.h"
#include "xe_gt_printk.h"
#include "xe_gt_stats.h"
-#include "xe_gt_tlb_invalidation.h"
#include "xe_guc.h"
#include "xe_guc_ct.h"
#include "xe_migrate.h"
#include "xe_svm.h"
#include "xe_trace_bo.h"
#include "xe_vm.h"
+#include "xe_vram_types.h"
struct pagefault {
u64 page_addr;
@@ -74,7 +74,7 @@ static bool vma_is_valid(struct xe_tile *tile, struct xe_vma *vma)
}
static int xe_pf_begin(struct drm_exec *exec, struct xe_vma *vma,
- bool atomic, unsigned int id)
+ bool need_vram_move, struct xe_vram_region *vram)
{
struct xe_bo *bo = xe_vma_bo(vma);
struct xe_vm *vm = xe_vma_vm(vma);
@@ -84,24 +84,11 @@ static int xe_pf_begin(struct drm_exec *exec, struct xe_vma *vma,
if (err)
return err;
- if (atomic && IS_DGFX(vm->xe)) {
- if (xe_vma_is_userptr(vma)) {
- err = -EACCES;
- return err;
- }
-
- /* Migrate to VRAM, move should invalidate the VMA first */
- err = xe_bo_migrate(bo, XE_PL_VRAM0 + id);
- if (err)
- return err;
- } else if (bo) {
- /* Create backing store if needed */
- err = xe_bo_validate(bo, vm, true);
- if (err)
- return err;
- }
+ if (!bo)
+ return 0;
- return 0;
+ return need_vram_move ? xe_bo_migrate(bo, vram->placement, NULL, exec) :
+ xe_bo_validate(bo, vm, true, exec);
}
static int handle_vma_pagefault(struct xe_gt *gt, struct xe_vma *vma,
@@ -109,13 +96,17 @@ static int handle_vma_pagefault(struct xe_gt *gt, struct xe_vma *vma,
{
struct xe_vm *vm = xe_vma_vm(vma);
struct xe_tile *tile = gt_to_tile(gt);
+ struct xe_validation_ctx ctx;
struct drm_exec exec;
struct dma_fence *fence;
- ktime_t end = 0;
- int err;
+ int err, needs_vram;
lockdep_assert_held_write(&vm->lock);
+ needs_vram = xe_vma_need_vram_for_atomic(vm->xe, vma, atomic);
+ if (needs_vram < 0 || (needs_vram && xe_vma_is_userptr(vma)))
+ return needs_vram < 0 ? needs_vram : -EACCES;
+
xe_gt_stats_incr(gt, XE_GT_STATS_ID_VMA_PAGEFAULT_COUNT, 1);
xe_gt_stats_incr(gt, XE_GT_STATS_ID_VMA_PAGEFAULT_KB, xe_vma_size(vma) / 1024);
@@ -136,22 +127,22 @@ retry_userptr:
}
/* Lock VM and BOs dma-resv */
- drm_exec_init(&exec, 0, 0);
+ xe_validation_ctx_init(&ctx, &vm->xe->val, &exec, (struct xe_val_flags) {});
drm_exec_until_all_locked(&exec) {
- err = xe_pf_begin(&exec, vma, atomic, tile->id);
+ err = xe_pf_begin(&exec, vma, needs_vram == 1, tile->mem.vram);
drm_exec_retry_on_contention(&exec);
- if (xe_vm_validate_should_retry(&exec, err, &end))
- err = -EAGAIN;
+ xe_validation_retry_on_oom(&ctx, &err);
if (err)
goto unlock_dma_resv;
/* Bind VMA only to the GT that has faulted */
trace_xe_vma_pf_bind(vma);
+ xe_vm_set_validation_exec(vm, &exec);
fence = xe_vma_rebind(vm, vma, BIT(tile->id));
+ xe_vm_set_validation_exec(vm, NULL);
if (IS_ERR(fence)) {
err = PTR_ERR(fence);
- if (xe_vm_validate_should_retry(&exec, err, &end))
- err = -EAGAIN;
+ xe_validation_retry_on_oom(&ctx, &err);
goto unlock_dma_resv;
}
}
@@ -160,7 +151,7 @@ retry_userptr:
dma_fence_put(fence);
unlock_dma_resv:
- drm_exec_fini(&exec);
+ xe_validation_ctx_fini(&ctx);
if (err == -EAGAIN)
goto retry_userptr;
@@ -542,6 +533,7 @@ static int handle_acc(struct xe_gt *gt, struct acc *acc)
{
struct xe_device *xe = gt_to_xe(gt);
struct xe_tile *tile = gt_to_tile(gt);
+ struct xe_validation_ctx ctx;
struct drm_exec exec;
struct xe_vm *vm;
struct xe_vma *vma;
@@ -571,15 +563,14 @@ static int handle_acc(struct xe_gt *gt, struct acc *acc)
goto unlock_vm;
/* Lock VM and BOs dma-resv */
- drm_exec_init(&exec, 0, 0);
+ xe_validation_ctx_init(&ctx, &vm->xe->val, &exec, (struct xe_val_flags) {});
drm_exec_until_all_locked(&exec) {
- ret = xe_pf_begin(&exec, vma, true, tile->id);
+ ret = xe_pf_begin(&exec, vma, IS_DGFX(vm->xe), tile->mem.vram);
drm_exec_retry_on_contention(&exec);
- if (ret)
- break;
+ xe_validation_retry_on_oom(&ctx, &ret);
}
- drm_exec_fini(&exec);
+ xe_validation_ctx_fini(&ctx);
unlock_vm:
up_read(&vm->lock);
xe_vm_put(vm);
diff --git a/drivers/gpu/drm/xe/xe_gt_printk.h b/drivers/gpu/drm/xe/xe_gt_printk.h
index 11da0228cea7..1313d32862db 100644
--- a/drivers/gpu/drm/xe/xe_gt_printk.h
+++ b/drivers/gpu/drm/xe/xe_gt_printk.h
@@ -6,18 +6,22 @@
#ifndef _XE_GT_PRINTK_H_
#define _XE_GT_PRINTK_H_
-#include <drm/drm_print.h>
-
#include "xe_gt_types.h"
+#include "xe_tile_printk.h"
+
+#define __XE_GT_PRINTK_FMT(_gt, _fmt, _args...) "GT%u: " _fmt, (_gt)->info.id, ##_args
#define xe_gt_printk(_gt, _level, _fmt, ...) \
- drm_##_level(&gt_to_xe(_gt)->drm, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__)
+ xe_tile_printk((_gt)->tile, _level, __XE_GT_PRINTK_FMT((_gt), _fmt, ##__VA_ARGS__))
+
+#define xe_gt_err(_gt, _fmt, ...) \
+ xe_gt_printk((_gt), err, _fmt, ##__VA_ARGS__)
#define xe_gt_err_once(_gt, _fmt, ...) \
xe_gt_printk((_gt), err_once, _fmt, ##__VA_ARGS__)
-#define xe_gt_err(_gt, _fmt, ...) \
- xe_gt_printk((_gt), err, _fmt, ##__VA_ARGS__)
+#define xe_gt_err_ratelimited(_gt, _fmt, ...) \
+ xe_gt_printk((_gt), err_ratelimited, _fmt, ##__VA_ARGS__)
#define xe_gt_warn(_gt, _fmt, ...) \
xe_gt_printk((_gt), warn, _fmt, ##__VA_ARGS__)
@@ -31,20 +35,20 @@
#define xe_gt_dbg(_gt, _fmt, ...) \
xe_gt_printk((_gt), dbg, _fmt, ##__VA_ARGS__)
-#define xe_gt_err_ratelimited(_gt, _fmt, ...) \
- xe_gt_printk((_gt), err_ratelimited, _fmt, ##__VA_ARGS__)
+#define xe_gt_WARN_type(_gt, _type, _condition, _fmt, ...) \
+ xe_tile_WARN##_type((_gt)->tile, _condition, _fmt, ## __VA_ARGS__)
#define xe_gt_WARN(_gt, _condition, _fmt, ...) \
- drm_WARN(&gt_to_xe(_gt)->drm, _condition, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__)
+ xe_gt_WARN_type((_gt),, _condition, __XE_GT_PRINTK_FMT((_gt), _fmt, ##__VA_ARGS__))
#define xe_gt_WARN_ONCE(_gt, _condition, _fmt, ...) \
- drm_WARN_ONCE(&gt_to_xe(_gt)->drm, _condition, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__)
+ xe_gt_WARN_type((_gt), _ONCE, _condition, __XE_GT_PRINTK_FMT((_gt), _fmt, ##__VA_ARGS__))
#define xe_gt_WARN_ON(_gt, _condition) \
- xe_gt_WARN((_gt), _condition, "%s(%s)", "gt_WARN_ON", __stringify(_condition))
+ xe_gt_WARN((_gt), _condition, "%s(%s)", "WARN_ON", __stringify(_condition))
#define xe_gt_WARN_ON_ONCE(_gt, _condition) \
- xe_gt_WARN_ONCE((_gt), _condition, "%s(%s)", "gt_WARN_ON_ONCE", __stringify(_condition))
+ xe_gt_WARN_ONCE((_gt), _condition, "%s(%s)", "WARN_ON_ONCE", __stringify(_condition))
static inline void __xe_gt_printfn_err(struct drm_printer *p, struct va_format *vaf)
{
@@ -67,12 +71,12 @@ static inline void __xe_gt_printfn_dbg(struct drm_printer *p, struct va_format *
/*
* The original xe_gt_dbg() callsite annotations are useless here,
- * redirect to the tweaked drm_dbg_printer() instead.
+ * redirect to the tweaked xe_tile_dbg_printer() instead.
*/
- dbg = drm_dbg_printer(&gt_to_xe(gt)->drm, DRM_UT_DRIVER, NULL);
+ dbg = xe_tile_dbg_printer((gt)->tile);
dbg.origin = p->origin;
- drm_printf(&dbg, "GT%u: %pV", gt->info.id, vaf);
+ drm_printf(&dbg, __XE_GT_PRINTK_FMT(gt, "%pV", vaf));
}
/**
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
index 35489fa81825..c4dda87b47cc 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
@@ -16,6 +16,7 @@
#include "xe_gt_sriov_pf_migration.h"
#include "xe_gt_sriov_pf_service.h"
#include "xe_gt_sriov_printk.h"
+#include "xe_guc_submit.h"
#include "xe_mmio.h"
#include "xe_pm.h"
@@ -47,9 +48,21 @@ static int pf_alloc_metadata(struct xe_gt *gt)
static void pf_init_workers(struct xe_gt *gt)
{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
INIT_WORK(&gt->sriov.pf.workers.restart, pf_worker_restart_func);
}
+static void pf_fini_workers(struct xe_gt *gt)
+{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+
+ if (disable_work_sync(&gt->sriov.pf.workers.restart)) {
+ xe_gt_sriov_dbg_verbose(gt, "pending restart disabled!\n");
+ /* release an rpm reference taken on the worker's behalf */
+ xe_pm_runtime_put(gt_to_xe(gt));
+ }
+}
+
/**
* xe_gt_sriov_pf_init_early - Prepare SR-IOV PF data structures on PF.
* @gt: the &xe_gt to initialize
@@ -79,6 +92,21 @@ int xe_gt_sriov_pf_init_early(struct xe_gt *gt)
return 0;
}
+static void pf_fini_action(void *arg)
+{
+ struct xe_gt *gt = arg;
+
+ pf_fini_workers(gt);
+}
+
+static int pf_init_late(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+
+ xe_gt_assert(gt, IS_SRIOV_PF(xe));
+ return devm_add_action_or_reset(xe->drm.dev, pf_fini_action, gt);
+}
+
/**
* xe_gt_sriov_pf_init - Prepare SR-IOV PF data structures on PF.
* @gt: the &xe_gt to initialize
@@ -95,7 +123,15 @@ int xe_gt_sriov_pf_init(struct xe_gt *gt)
if (err)
return err;
- return xe_gt_sriov_pf_migration_init(gt);
+ err = xe_gt_sriov_pf_migration_init(gt);
+ if (err)
+ return err;
+
+ err = pf_init_late(gt);
+ if (err)
+ return err;
+
+ return 0;
}
static bool pf_needs_enable_ggtt_guest_update(struct xe_device *xe)
@@ -176,8 +212,11 @@ static void pf_cancel_restart(struct xe_gt *gt)
{
xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
- if (cancel_work_sync(&gt->sriov.pf.workers.restart))
+ if (cancel_work_sync(&gt->sriov.pf.workers.restart)) {
xe_gt_sriov_dbg_verbose(gt, "pending restart canceled!\n");
+ /* release an rpm reference taken on the worker's behalf */
+ xe_pm_runtime_put(gt_to_xe(gt));
+ }
}
/**
@@ -195,9 +234,12 @@ static void pf_restart(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
- xe_pm_runtime_get(xe);
+ xe_gt_assert(gt, !xe_pm_runtime_suspended(xe));
+
xe_gt_sriov_pf_config_restart(gt);
xe_gt_sriov_pf_control_restart(gt);
+
+ /* release an rpm reference taken on our behalf */
xe_pm_runtime_put(xe);
xe_gt_sriov_dbg(gt, "restart completed\n");
@@ -216,8 +258,13 @@ static void pf_queue_restart(struct xe_gt *gt)
xe_gt_assert(gt, IS_SRIOV_PF(xe));
- if (!queue_work(xe->sriov.wq, &gt->sriov.pf.workers.restart))
+ /* take an rpm reference on behalf of the worker */
+ xe_pm_runtime_get_noresume(xe);
+
+ if (!queue_work(xe->sriov.wq, &gt->sriov.pf.workers.restart)) {
xe_gt_sriov_dbg(gt, "restart already in queue!\n");
+ xe_pm_runtime_put(xe);
+ }
}
/**
@@ -230,3 +277,27 @@ void xe_gt_sriov_pf_restart(struct xe_gt *gt)
{
pf_queue_restart(gt);
}
+
+static void pf_flush_restart(struct xe_gt *gt)
+{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ flush_work(&gt->sriov.pf.workers.restart);
+}
+
+/**
+ * xe_gt_sriov_pf_wait_ready() - Wait until per-GT PF SR-IOV support is ready.
+ * @gt: the &xe_gt
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_wait_ready(struct xe_gt *gt)
+{
+ /* don't wait if there is another ongoing reset */
+ if (xe_guc_read_stopped(&gt->uc.guc))
+ return -EBUSY;
+
+ pf_flush_restart(gt);
+ return 0;
+}
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf.h
index e2b2ff8132dc..e7fde3f9937a 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.h
@@ -11,6 +11,7 @@ struct xe_gt;
#ifdef CONFIG_PCI_IOV
int xe_gt_sriov_pf_init_early(struct xe_gt *gt);
int xe_gt_sriov_pf_init(struct xe_gt *gt);
+int xe_gt_sriov_pf_wait_ready(struct xe_gt *gt);
void xe_gt_sriov_pf_init_hw(struct xe_gt *gt);
void xe_gt_sriov_pf_sanitize_hw(struct xe_gt *gt, unsigned int vfid);
void xe_gt_sriov_pf_stop_prepare(struct xe_gt *gt);
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
index 494909f74eb2..6344b5205c08 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
@@ -33,6 +33,7 @@
#include "xe_migrate.h"
#include "xe_sriov.h"
#include "xe_ttm_vram_mgr.h"
+#include "xe_vram_types.h"
#include "xe_wopcm.h"
#define make_u64_from_u32(hi, lo) ((u64)((u64)(u32)(hi) << 32 | (u32)(lo)))
@@ -1433,7 +1434,8 @@ fail:
return err;
}
-static void pf_release_vf_config_lmem(struct xe_gt *gt, struct xe_gt_sriov_config *config)
+/* Return: %true if there was an LMEM provisioned, %false otherwise */
+static bool pf_release_vf_config_lmem(struct xe_gt *gt, struct xe_gt_sriov_config *config)
{
xe_gt_assert(gt, IS_DGFX(gt_to_xe(gt)));
xe_gt_assert(gt, xe_gt_is_main_type(gt));
@@ -1442,7 +1444,9 @@ static void pf_release_vf_config_lmem(struct xe_gt *gt, struct xe_gt_sriov_confi
if (config->lmem_obj) {
xe_bo_unpin_map_no_vm(config->lmem_obj);
config->lmem_obj = NULL;
+ return true;
}
+ return false;
}
static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
@@ -1474,23 +1478,16 @@ static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
return 0;
xe_gt_assert(gt, pf_get_lmem_alignment(gt) == SZ_2M);
- bo = xe_bo_create_locked(xe, tile, NULL,
- ALIGN(size, PAGE_SIZE),
- ttm_bo_type_kernel,
- XE_BO_FLAG_VRAM_IF_DGFX(tile) |
- XE_BO_FLAG_NEEDS_2M |
- XE_BO_FLAG_PINNED |
- XE_BO_FLAG_PINNED_LATE_RESTORE);
+ bo = xe_bo_create_pin_range_novm(xe, tile,
+ ALIGN(size, PAGE_SIZE), 0, ~0ull,
+ ttm_bo_type_kernel,
+ XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_NEEDS_2M |
+ XE_BO_FLAG_PINNED |
+ XE_BO_FLAG_PINNED_LATE_RESTORE);
if (IS_ERR(bo))
return PTR_ERR(bo);
- err = xe_bo_pin(bo);
- xe_bo_unlock(bo);
- if (unlikely(err)) {
- xe_bo_put(bo);
- return err;
- }
-
config->lmem_obj = bo;
if (xe_device_has_lmtt(xe)) {
@@ -1604,7 +1601,7 @@ static u64 pf_query_free_lmem(struct xe_gt *gt)
{
struct xe_tile *tile = gt->tile;
- return xe_ttm_vram_get_avail(&tile->mem.vram.ttm.manager);
+ return xe_ttm_vram_get_avail(&tile->mem.vram->ttm.manager);
}
static u64 pf_query_max_lmem(struct xe_gt *gt)
@@ -1632,7 +1629,6 @@ static u64 pf_estimate_fair_lmem(struct xe_gt *gt, unsigned int num_vfs)
u64 fair;
fair = div_u64(available, num_vfs);
- fair = rounddown_pow_of_two(fair); /* XXX: ttm_vram_mgr & drm_buddy limitation */
fair = ALIGN_DOWN(fair, alignment);
#ifdef MAX_FAIR_LMEM
fair = min_t(u64, MAX_FAIR_LMEM, fair);
@@ -2020,12 +2016,13 @@ static void pf_release_vf_config(struct xe_gt *gt, unsigned int vfid)
{
struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
struct xe_device *xe = gt_to_xe(gt);
+ bool released;
if (xe_gt_is_main_type(gt)) {
pf_release_vf_config_ggtt(gt, config);
if (IS_DGFX(xe)) {
- pf_release_vf_config_lmem(gt, config);
- if (xe_device_has_lmtt(xe))
+ released = pf_release_vf_config_lmem(gt, config);
+ if (released && xe_device_has_lmtt(xe))
pf_update_vf_lmtt(xe, vfid);
}
}
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c
index bf679b21f485..3ed245e04d0c 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c
@@ -22,6 +22,7 @@
#include "xe_gt_sriov_pf_policy.h"
#include "xe_gt_sriov_pf_service.h"
#include "xe_pm.h"
+#include "xe_sriov_pf.h"
/*
* /sys/kernel/debug/dri/0/
@@ -205,7 +206,8 @@ static int CONFIG##_set(void *data, u64 val) \
return -EOVERFLOW; \
\
xe_pm_runtime_get(xe); \
- err = xe_gt_sriov_pf_config_set_##CONFIG(gt, vfid, val); \
+ err = xe_sriov_pf_wait_ready(xe) ?: \
+ xe_gt_sriov_pf_config_set_##CONFIG(gt, vfid, val); \
xe_pm_runtime_put(xe); \
\
return err; \
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c
index c712111aa30d..44cc612b0a75 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c
@@ -55,12 +55,12 @@ static int pf_send_guc_save_vf_state(struct xe_gt *gt, unsigned int vfid,
xe_gt_assert(gt, size % sizeof(u32) == 0);
xe_gt_assert(gt, size == ndwords * sizeof(u32));
- bo = xe_bo_create_pin_map(xe, tile, NULL,
- ALIGN(size, PAGE_SIZE),
- ttm_bo_type_kernel,
- XE_BO_FLAG_SYSTEM |
- XE_BO_FLAG_GGTT |
- XE_BO_FLAG_GGTT_INVALIDATE);
+ bo = xe_bo_create_pin_map_novm(xe, tile,
+ ALIGN(size, PAGE_SIZE),
+ ttm_bo_type_kernel,
+ XE_BO_FLAG_SYSTEM |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_GGTT_INVALIDATE, false);
if (IS_ERR(bo))
return PTR_ERR(bo);
@@ -91,12 +91,12 @@ static int pf_send_guc_restore_vf_state(struct xe_gt *gt, unsigned int vfid,
xe_gt_assert(gt, size % sizeof(u32) == 0);
xe_gt_assert(gt, size == ndwords * sizeof(u32));
- bo = xe_bo_create_pin_map(xe, tile, NULL,
- ALIGN(size, PAGE_SIZE),
- ttm_bo_type_kernel,
- XE_BO_FLAG_SYSTEM |
- XE_BO_FLAG_GGTT |
- XE_BO_FLAG_GGTT_INVALIDATE);
+ bo = xe_bo_create_pin_map_novm(xe, tile,
+ ALIGN(size, PAGE_SIZE),
+ ttm_bo_type_kernel,
+ XE_BO_FLAG_SYSTEM |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_GGTT_INVALIDATE, false);
if (IS_ERR(bo))
return PTR_ERR(bo);
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
index b282838d59e6..0461d5513487 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
@@ -25,6 +25,7 @@
#include "xe_guc.h"
#include "xe_guc_hxg_helpers.h"
#include "xe_guc_relay.h"
+#include "xe_lrc.h"
#include "xe_mmio.h"
#include "xe_sriov.h"
#include "xe_sriov_vf.h"
@@ -751,6 +752,19 @@ failed:
}
/**
+ * xe_gt_sriov_vf_default_lrcs_hwsp_rebase - Update GGTT references in HWSP of default LRCs.
+ * @gt: the &xe_gt struct instance
+ */
+void xe_gt_sriov_vf_default_lrcs_hwsp_rebase(struct xe_gt *gt)
+{
+ struct xe_hw_engine *hwe;
+ enum xe_hw_engine_id id;
+
+ for_each_hw_engine(hwe, gt, id)
+ xe_default_lrc_update_memirq_regs_with_address(hwe);
+}
+
+/**
* xe_gt_sriov_vf_migrated_event_handler - Start a VF migration recovery,
* or just mark that a GuC is ready for it.
* @gt: the &xe_gt struct instance linked to target GuC
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
index e0357f341a2d..0af1dc769fe0 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
@@ -21,6 +21,7 @@ void xe_gt_sriov_vf_guc_versions(struct xe_gt *gt,
int xe_gt_sriov_vf_query_config(struct xe_gt *gt);
int xe_gt_sriov_vf_connect(struct xe_gt *gt);
int xe_gt_sriov_vf_query_runtime(struct xe_gt *gt);
+void xe_gt_sriov_vf_default_lrcs_hwsp_rebase(struct xe_gt *gt);
int xe_gt_sriov_vf_notify_resfix_done(struct xe_gt *gt);
void xe_gt_sriov_vf_migrated_event_handler(struct xe_gt *gt);
diff --git a/drivers/gpu/drm/xe/xe_gt_stats.c b/drivers/gpu/drm/xe/xe_gt_stats.c
index 30f942671c2b..5f74706bab81 100644
--- a/drivers/gpu/drm/xe/xe_gt_stats.c
+++ b/drivers/gpu/drm/xe/xe_gt_stats.c
@@ -26,11 +26,46 @@ void xe_gt_stats_incr(struct xe_gt *gt, const enum xe_gt_stats_id id, int incr)
atomic64_add(incr, &gt->stats.counters[id]);
}
+#define DEF_STAT_STR(ID, name) [XE_GT_STATS_ID_##ID] = name
+
static const char *const stat_description[__XE_GT_STATS_NUM_IDS] = {
- "svm_pagefault_count",
- "tlb_inval_count",
- "vma_pagefault_count",
- "vma_pagefault_kb",
+ DEF_STAT_STR(SVM_PAGEFAULT_COUNT, "svm_pagefault_count"),
+ DEF_STAT_STR(TLB_INVAL, "tlb_inval_count"),
+ DEF_STAT_STR(SVM_TLB_INVAL_COUNT, "svm_tlb_inval_count"),
+ DEF_STAT_STR(SVM_TLB_INVAL_US, "svm_tlb_inval_us"),
+ DEF_STAT_STR(VMA_PAGEFAULT_COUNT, "vma_pagefault_count"),
+ DEF_STAT_STR(VMA_PAGEFAULT_KB, "vma_pagefault_kb"),
+ DEF_STAT_STR(SVM_4K_PAGEFAULT_COUNT, "svm_4K_pagefault_count"),
+ DEF_STAT_STR(SVM_64K_PAGEFAULT_COUNT, "svm_64K_pagefault_count"),
+ DEF_STAT_STR(SVM_2M_PAGEFAULT_COUNT, "svm_2M_pagefault_count"),
+ DEF_STAT_STR(SVM_4K_VALID_PAGEFAULT_COUNT, "svm_4K_valid_pagefault_count"),
+ DEF_STAT_STR(SVM_64K_VALID_PAGEFAULT_COUNT, "svm_64K_valid_pagefault_count"),
+ DEF_STAT_STR(SVM_2M_VALID_PAGEFAULT_COUNT, "svm_2M_valid_pagefault_count"),
+ DEF_STAT_STR(SVM_4K_PAGEFAULT_US, "svm_4K_pagefault_us"),
+ DEF_STAT_STR(SVM_64K_PAGEFAULT_US, "svm_64K_pagefault_us"),
+ DEF_STAT_STR(SVM_2M_PAGEFAULT_US, "svm_2M_pagefault_us"),
+ DEF_STAT_STR(SVM_4K_MIGRATE_COUNT, "svm_4K_migrate_count"),
+ DEF_STAT_STR(SVM_64K_MIGRATE_COUNT, "svm_64K_migrate_count"),
+ DEF_STAT_STR(SVM_2M_MIGRATE_COUNT, "svm_2M_migrate_count"),
+ DEF_STAT_STR(SVM_4K_MIGRATE_US, "svm_4K_migrate_us"),
+ DEF_STAT_STR(SVM_64K_MIGRATE_US, "svm_64K_migrate_us"),
+ DEF_STAT_STR(SVM_2M_MIGRATE_US, "svm_2M_migrate_us"),
+ DEF_STAT_STR(SVM_DEVICE_COPY_US, "svm_device_copy_us"),
+ DEF_STAT_STR(SVM_4K_DEVICE_COPY_US, "svm_4K_device_copy_us"),
+ DEF_STAT_STR(SVM_64K_DEVICE_COPY_US, "svm_64K_device_copy_us"),
+ DEF_STAT_STR(SVM_2M_DEVICE_COPY_US, "svm_2M_device_copy_us"),
+ DEF_STAT_STR(SVM_CPU_COPY_US, "svm_cpu_copy_us"),
+ DEF_STAT_STR(SVM_4K_CPU_COPY_US, "svm_4K_cpu_copy_us"),
+ DEF_STAT_STR(SVM_64K_CPU_COPY_US, "svm_64K_cpu_copy_us"),
+ DEF_STAT_STR(SVM_2M_CPU_COPY_US, "svm_2M_cpu_copy_us"),
+ DEF_STAT_STR(SVM_DEVICE_COPY_KB, "svm_device_copy_kb"),
+ DEF_STAT_STR(SVM_CPU_COPY_KB, "svm_cpu_copy_kb"),
+ DEF_STAT_STR(SVM_4K_GET_PAGES_US, "svm_4K_get_pages_us"),
+ DEF_STAT_STR(SVM_64K_GET_PAGES_US, "svm_64K_get_pages_us"),
+ DEF_STAT_STR(SVM_2M_GET_PAGES_US, "svm_2M_get_pages_us"),
+ DEF_STAT_STR(SVM_4K_BIND_US, "svm_4K_bind_us"),
+ DEF_STAT_STR(SVM_64K_BIND_US, "svm_64K_bind_us"),
+ DEF_STAT_STR(SVM_2M_BIND_US, "svm_2M_bind_us"),
};
/**
@@ -50,3 +85,17 @@ int xe_gt_stats_print_info(struct xe_gt *gt, struct drm_printer *p)
return 0;
}
+
+/**
+ * xe_gt_stats_clear - Clear the GT stats
+ * @gt: GT structure
+ *
+ * This clear (zeros) all the available GT stats.
+ */
+void xe_gt_stats_clear(struct xe_gt *gt)
+{
+ int id;
+
+ for (id = 0; id < ARRAY_SIZE(gt->stats.counters); ++id)
+ atomic64_set(&gt->stats.counters[id], 0);
+}
diff --git a/drivers/gpu/drm/xe/xe_gt_stats.h b/drivers/gpu/drm/xe/xe_gt_stats.h
index 38325ef53617..e8aea32bc971 100644
--- a/drivers/gpu/drm/xe/xe_gt_stats.h
+++ b/drivers/gpu/drm/xe/xe_gt_stats.h
@@ -13,6 +13,7 @@ struct drm_printer;
#ifdef CONFIG_DEBUG_FS
int xe_gt_stats_print_info(struct xe_gt *gt, struct drm_printer *p);
+void xe_gt_stats_clear(struct xe_gt *gt);
void xe_gt_stats_incr(struct xe_gt *gt, const enum xe_gt_stats_id id, int incr);
#else
static inline void
diff --git a/drivers/gpu/drm/xe/xe_gt_stats_types.h b/drivers/gpu/drm/xe/xe_gt_stats_types.h
index be3244d7133c..d8348a8de2e1 100644
--- a/drivers/gpu/drm/xe/xe_gt_stats_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_stats_types.h
@@ -9,8 +9,41 @@
enum xe_gt_stats_id {
XE_GT_STATS_ID_SVM_PAGEFAULT_COUNT,
XE_GT_STATS_ID_TLB_INVAL,
+ XE_GT_STATS_ID_SVM_TLB_INVAL_COUNT,
+ XE_GT_STATS_ID_SVM_TLB_INVAL_US,
XE_GT_STATS_ID_VMA_PAGEFAULT_COUNT,
XE_GT_STATS_ID_VMA_PAGEFAULT_KB,
+ XE_GT_STATS_ID_SVM_4K_PAGEFAULT_COUNT,
+ XE_GT_STATS_ID_SVM_64K_PAGEFAULT_COUNT,
+ XE_GT_STATS_ID_SVM_2M_PAGEFAULT_COUNT,
+ XE_GT_STATS_ID_SVM_4K_VALID_PAGEFAULT_COUNT,
+ XE_GT_STATS_ID_SVM_64K_VALID_PAGEFAULT_COUNT,
+ XE_GT_STATS_ID_SVM_2M_VALID_PAGEFAULT_COUNT,
+ XE_GT_STATS_ID_SVM_4K_PAGEFAULT_US,
+ XE_GT_STATS_ID_SVM_64K_PAGEFAULT_US,
+ XE_GT_STATS_ID_SVM_2M_PAGEFAULT_US,
+ XE_GT_STATS_ID_SVM_4K_MIGRATE_COUNT,
+ XE_GT_STATS_ID_SVM_64K_MIGRATE_COUNT,
+ XE_GT_STATS_ID_SVM_2M_MIGRATE_COUNT,
+ XE_GT_STATS_ID_SVM_4K_MIGRATE_US,
+ XE_GT_STATS_ID_SVM_64K_MIGRATE_US,
+ XE_GT_STATS_ID_SVM_2M_MIGRATE_US,
+ XE_GT_STATS_ID_SVM_DEVICE_COPY_US,
+ XE_GT_STATS_ID_SVM_4K_DEVICE_COPY_US,
+ XE_GT_STATS_ID_SVM_64K_DEVICE_COPY_US,
+ XE_GT_STATS_ID_SVM_2M_DEVICE_COPY_US,
+ XE_GT_STATS_ID_SVM_CPU_COPY_US,
+ XE_GT_STATS_ID_SVM_4K_CPU_COPY_US,
+ XE_GT_STATS_ID_SVM_64K_CPU_COPY_US,
+ XE_GT_STATS_ID_SVM_2M_CPU_COPY_US,
+ XE_GT_STATS_ID_SVM_DEVICE_COPY_KB,
+ XE_GT_STATS_ID_SVM_CPU_COPY_KB,
+ XE_GT_STATS_ID_SVM_4K_GET_PAGES_US,
+ XE_GT_STATS_ID_SVM_64K_GET_PAGES_US,
+ XE_GT_STATS_ID_SVM_2M_GET_PAGES_US,
+ XE_GT_STATS_ID_SVM_4K_BIND_US,
+ XE_GT_STATS_ID_SVM_64K_BIND_US,
+ XE_GT_STATS_ID_SVM_2M_BIND_US,
/* must be the last entry */
__XE_GT_STATS_NUM_IDS,
};
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
deleted file mode 100644
index 086c12ee3d9d..000000000000
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
+++ /dev/null
@@ -1,596 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#include "xe_gt_tlb_invalidation.h"
-
-#include "abi/guc_actions_abi.h"
-#include "xe_device.h"
-#include "xe_force_wake.h"
-#include "xe_gt.h"
-#include "xe_gt_printk.h"
-#include "xe_guc.h"
-#include "xe_guc_ct.h"
-#include "xe_gt_stats.h"
-#include "xe_mmio.h"
-#include "xe_pm.h"
-#include "xe_sriov.h"
-#include "xe_trace.h"
-#include "regs/xe_guc_regs.h"
-
-#define FENCE_STACK_BIT DMA_FENCE_FLAG_USER_BITS
-
-/*
- * TLB inval depends on pending commands in the CT queue and then the real
- * invalidation time. Double up the time to process full CT queue
- * just to be on the safe side.
- */
-static long tlb_timeout_jiffies(struct xe_gt *gt)
-{
- /* this reflects what HW/GuC needs to process TLB inv request */
- const long hw_tlb_timeout = HZ / 4;
-
- /* this estimates actual delay caused by the CTB transport */
- long delay = xe_guc_ct_queue_proc_time_jiffies(&gt->uc.guc.ct);
-
- return hw_tlb_timeout + 2 * delay;
-}
-
-static void xe_gt_tlb_invalidation_fence_fini(struct xe_gt_tlb_invalidation_fence *fence)
-{
- if (WARN_ON_ONCE(!fence->gt))
- return;
-
- xe_pm_runtime_put(gt_to_xe(fence->gt));
- fence->gt = NULL; /* fini() should be called once */
-}
-
-static void
-__invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence)
-{
- bool stack = test_bit(FENCE_STACK_BIT, &fence->base.flags);
-
- trace_xe_gt_tlb_invalidation_fence_signal(xe, fence);
- xe_gt_tlb_invalidation_fence_fini(fence);
- dma_fence_signal(&fence->base);
- if (!stack)
- dma_fence_put(&fence->base);
-}
-
-static void
-invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence)
-{
- list_del(&fence->link);
- __invalidation_fence_signal(xe, fence);
-}
-
-void xe_gt_tlb_invalidation_fence_signal(struct xe_gt_tlb_invalidation_fence *fence)
-{
- if (WARN_ON_ONCE(!fence->gt))
- return;
-
- __invalidation_fence_signal(gt_to_xe(fence->gt), fence);
-}
-
-static void xe_gt_tlb_fence_timeout(struct work_struct *work)
-{
- struct xe_gt *gt = container_of(work, struct xe_gt,
- tlb_invalidation.fence_tdr.work);
- struct xe_device *xe = gt_to_xe(gt);
- struct xe_gt_tlb_invalidation_fence *fence, *next;
-
- LNL_FLUSH_WORK(&gt->uc.guc.ct.g2h_worker);
-
- spin_lock_irq(&gt->tlb_invalidation.pending_lock);
- list_for_each_entry_safe(fence, next,
- &gt->tlb_invalidation.pending_fences, link) {
- s64 since_inval_ms = ktime_ms_delta(ktime_get(),
- fence->invalidation_time);
-
- if (msecs_to_jiffies(since_inval_ms) < tlb_timeout_jiffies(gt))
- break;
-
- trace_xe_gt_tlb_invalidation_fence_timeout(xe, fence);
- xe_gt_err(gt, "TLB invalidation fence timeout, seqno=%d recv=%d",
- fence->seqno, gt->tlb_invalidation.seqno_recv);
-
- fence->base.error = -ETIME;
- invalidation_fence_signal(xe, fence);
- }
- if (!list_empty(&gt->tlb_invalidation.pending_fences))
- queue_delayed_work(system_wq,
- &gt->tlb_invalidation.fence_tdr,
- tlb_timeout_jiffies(gt));
- spin_unlock_irq(&gt->tlb_invalidation.pending_lock);
-}
-
-/**
- * xe_gt_tlb_invalidation_init_early - Initialize GT TLB invalidation state
- * @gt: GT structure
- *
- * Initialize GT TLB invalidation state, purely software initialization, should
- * be called once during driver load.
- *
- * Return: 0 on success, negative error code on error.
- */
-int xe_gt_tlb_invalidation_init_early(struct xe_gt *gt)
-{
- gt->tlb_invalidation.seqno = 1;
- INIT_LIST_HEAD(&gt->tlb_invalidation.pending_fences);
- spin_lock_init(&gt->tlb_invalidation.pending_lock);
- spin_lock_init(&gt->tlb_invalidation.lock);
- INIT_DELAYED_WORK(&gt->tlb_invalidation.fence_tdr,
- xe_gt_tlb_fence_timeout);
-
- return 0;
-}
-
-/**
- * xe_gt_tlb_invalidation_reset - Initialize GT TLB invalidation reset
- * @gt: GT structure
- *
- * Signal any pending invalidation fences, should be called during a GT reset
- */
-void xe_gt_tlb_invalidation_reset(struct xe_gt *gt)
-{
- struct xe_gt_tlb_invalidation_fence *fence, *next;
- int pending_seqno;
-
- /*
- * we can get here before the CTs are even initialized if we're wedging
- * very early, in which case there are not going to be any pending
- * fences so we can bail immediately.
- */
- if (!xe_guc_ct_initialized(&gt->uc.guc.ct))
- return;
-
- /*
- * CT channel is already disabled at this point. No new TLB requests can
- * appear.
- */
-
- mutex_lock(&gt->uc.guc.ct.lock);
- spin_lock_irq(&gt->tlb_invalidation.pending_lock);
- cancel_delayed_work(&gt->tlb_invalidation.fence_tdr);
- /*
- * We might have various kworkers waiting for TLB flushes to complete
- * which are not tracked with an explicit TLB fence, however at this
- * stage that will never happen since the CT is already disabled, so
- * make sure we signal them here under the assumption that we have
- * completed a full GT reset.
- */
- if (gt->tlb_invalidation.seqno == 1)
- pending_seqno = TLB_INVALIDATION_SEQNO_MAX - 1;
- else
- pending_seqno = gt->tlb_invalidation.seqno - 1;
- WRITE_ONCE(gt->tlb_invalidation.seqno_recv, pending_seqno);
-
- list_for_each_entry_safe(fence, next,
- &gt->tlb_invalidation.pending_fences, link)
- invalidation_fence_signal(gt_to_xe(gt), fence);
- spin_unlock_irq(&gt->tlb_invalidation.pending_lock);
- mutex_unlock(&gt->uc.guc.ct.lock);
-}
-
-static bool tlb_invalidation_seqno_past(struct xe_gt *gt, int seqno)
-{
- int seqno_recv = READ_ONCE(gt->tlb_invalidation.seqno_recv);
-
- if (seqno - seqno_recv < -(TLB_INVALIDATION_SEQNO_MAX / 2))
- return false;
-
- if (seqno - seqno_recv > (TLB_INVALIDATION_SEQNO_MAX / 2))
- return true;
-
- return seqno_recv >= seqno;
-}
-
-static int send_tlb_invalidation(struct xe_guc *guc,
- struct xe_gt_tlb_invalidation_fence *fence,
- u32 *action, int len)
-{
- struct xe_gt *gt = guc_to_gt(guc);
- struct xe_device *xe = gt_to_xe(gt);
- int seqno;
- int ret;
-
- xe_gt_assert(gt, fence);
-
- /*
- * XXX: The seqno algorithm relies on TLB invalidation being processed
- * in order which they currently are, if that changes the algorithm will
- * need to be updated.
- */
-
- mutex_lock(&guc->ct.lock);
- seqno = gt->tlb_invalidation.seqno;
- fence->seqno = seqno;
- trace_xe_gt_tlb_invalidation_fence_send(xe, fence);
- action[1] = seqno;
- ret = xe_guc_ct_send_locked(&guc->ct, action, len,
- G2H_LEN_DW_TLB_INVALIDATE, 1);
- if (!ret) {
- spin_lock_irq(&gt->tlb_invalidation.pending_lock);
- /*
- * We haven't actually published the TLB fence as per
- * pending_fences, but in theory our seqno could have already
- * been written as we acquired the pending_lock. In such a case
- * we can just go ahead and signal the fence here.
- */
- if (tlb_invalidation_seqno_past(gt, seqno)) {
- __invalidation_fence_signal(xe, fence);
- } else {
- fence->invalidation_time = ktime_get();
- list_add_tail(&fence->link,
- &gt->tlb_invalidation.pending_fences);
-
- if (list_is_singular(&gt->tlb_invalidation.pending_fences))
- queue_delayed_work(system_wq,
- &gt->tlb_invalidation.fence_tdr,
- tlb_timeout_jiffies(gt));
- }
- spin_unlock_irq(&gt->tlb_invalidation.pending_lock);
- } else {
- __invalidation_fence_signal(xe, fence);
- }
- if (!ret) {
- gt->tlb_invalidation.seqno = (gt->tlb_invalidation.seqno + 1) %
- TLB_INVALIDATION_SEQNO_MAX;
- if (!gt->tlb_invalidation.seqno)
- gt->tlb_invalidation.seqno = 1;
- }
- mutex_unlock(&guc->ct.lock);
- xe_gt_stats_incr(gt, XE_GT_STATS_ID_TLB_INVAL, 1);
-
- return ret;
-}
-
-#define MAKE_INVAL_OP(type) ((type << XE_GUC_TLB_INVAL_TYPE_SHIFT) | \
- XE_GUC_TLB_INVAL_MODE_HEAVY << XE_GUC_TLB_INVAL_MODE_SHIFT | \
- XE_GUC_TLB_INVAL_FLUSH_CACHE)
-
-/**
- * xe_gt_tlb_invalidation_guc - Issue a TLB invalidation on this GT for the GuC
- * @gt: GT structure
- * @fence: invalidation fence which will be signal on TLB invalidation
- * completion
- *
- * Issue a TLB invalidation for the GuC. Completion of TLB is asynchronous and
- * caller can use the invalidation fence to wait for completion.
- *
- * Return: 0 on success, negative error code on error
- */
-static int xe_gt_tlb_invalidation_guc(struct xe_gt *gt,
- struct xe_gt_tlb_invalidation_fence *fence)
-{
- u32 action[] = {
- XE_GUC_ACTION_TLB_INVALIDATION,
- 0, /* seqno, replaced in send_tlb_invalidation */
- MAKE_INVAL_OP(XE_GUC_TLB_INVAL_GUC),
- };
- int ret;
-
- ret = send_tlb_invalidation(&gt->uc.guc, fence, action,
- ARRAY_SIZE(action));
- /*
- * -ECANCELED indicates the CT is stopped for a GT reset. TLB caches
- * should be nuked on a GT reset so this error can be ignored.
- */
- if (ret == -ECANCELED)
- return 0;
-
- return ret;
-}
-
-/**
- * xe_gt_tlb_invalidation_ggtt - Issue a TLB invalidation on this GT for the GGTT
- * @gt: GT structure
- *
- * Issue a TLB invalidation for the GGTT. Completion of TLB invalidation is
- * synchronous.
- *
- * Return: 0 on success, negative error code on error
- */
-int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt)
-{
- struct xe_device *xe = gt_to_xe(gt);
- unsigned int fw_ref;
-
- if (xe_guc_ct_enabled(&gt->uc.guc.ct) &&
- gt->uc.guc.submission_state.enabled) {
- struct xe_gt_tlb_invalidation_fence fence;
- int ret;
-
- xe_gt_tlb_invalidation_fence_init(gt, &fence, true);
- ret = xe_gt_tlb_invalidation_guc(gt, &fence);
- if (ret)
- return ret;
-
- xe_gt_tlb_invalidation_fence_wait(&fence);
- } else if (xe_device_uc_enabled(xe) && !xe_device_wedged(xe)) {
- struct xe_mmio *mmio = &gt->mmio;
-
- if (IS_SRIOV_VF(xe))
- return 0;
-
- fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
- if (xe->info.platform == XE_PVC || GRAPHICS_VER(xe) >= 20) {
- xe_mmio_write32(mmio, PVC_GUC_TLB_INV_DESC1,
- PVC_GUC_TLB_INV_DESC1_INVALIDATE);
- xe_mmio_write32(mmio, PVC_GUC_TLB_INV_DESC0,
- PVC_GUC_TLB_INV_DESC0_VALID);
- } else {
- xe_mmio_write32(mmio, GUC_TLB_INV_CR,
- GUC_TLB_INV_CR_INVALIDATE);
- }
- xe_force_wake_put(gt_to_fw(gt), fw_ref);
- }
-
- return 0;
-}
-
-static int send_tlb_invalidation_all(struct xe_gt *gt,
- struct xe_gt_tlb_invalidation_fence *fence)
-{
- u32 action[] = {
- XE_GUC_ACTION_TLB_INVALIDATION_ALL,
- 0, /* seqno, replaced in send_tlb_invalidation */
- MAKE_INVAL_OP(XE_GUC_TLB_INVAL_FULL),
- };
-
- return send_tlb_invalidation(&gt->uc.guc, fence, action, ARRAY_SIZE(action));
-}
-
-/**
- * xe_gt_tlb_invalidation_all - Invalidate all TLBs across PF and all VFs.
- * @gt: the &xe_gt structure
- * @fence: the &xe_gt_tlb_invalidation_fence to be signaled on completion
- *
- * Send a request to invalidate all TLBs across PF and all VFs.
- *
- * Return: 0 on success, negative error code on error
- */
-int xe_gt_tlb_invalidation_all(struct xe_gt *gt, struct xe_gt_tlb_invalidation_fence *fence)
-{
- int err;
-
- xe_gt_assert(gt, gt == fence->gt);
-
- err = send_tlb_invalidation_all(gt, fence);
- if (err)
- xe_gt_err(gt, "TLB invalidation request failed (%pe)", ERR_PTR(err));
-
- return err;
-}
-
-/*
- * Ensure that roundup_pow_of_two(length) doesn't overflow.
- * Note that roundup_pow_of_two() operates on unsigned long,
- * not on u64.
- */
-#define MAX_RANGE_TLB_INVALIDATION_LENGTH (rounddown_pow_of_two(ULONG_MAX))
-
-/**
- * xe_gt_tlb_invalidation_range - Issue a TLB invalidation on this GT for an
- * address range
- *
- * @gt: GT structure
- * @fence: invalidation fence which will be signal on TLB invalidation
- * completion
- * @start: start address
- * @end: end address
- * @asid: address space id
- *
- * Issue a range based TLB invalidation if supported, if not fallback to a full
- * TLB invalidation. Completion of TLB is asynchronous and caller can use
- * the invalidation fence to wait for completion.
- *
- * Return: Negative error code on error, 0 on success
- */
-int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
- struct xe_gt_tlb_invalidation_fence *fence,
- u64 start, u64 end, u32 asid)
-{
- struct xe_device *xe = gt_to_xe(gt);
-#define MAX_TLB_INVALIDATION_LEN 7
- u32 action[MAX_TLB_INVALIDATION_LEN];
- u64 length = end - start;
- int len = 0;
-
- xe_gt_assert(gt, fence);
-
- /* Execlists not supported */
- if (gt_to_xe(gt)->info.force_execlist) {
- __invalidation_fence_signal(xe, fence);
- return 0;
- }
-
- action[len++] = XE_GUC_ACTION_TLB_INVALIDATION;
- action[len++] = 0; /* seqno, replaced in send_tlb_invalidation */
- if (!xe->info.has_range_tlb_invalidation ||
- length > MAX_RANGE_TLB_INVALIDATION_LENGTH) {
- action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_FULL);
- } else {
- u64 orig_start = start;
- u64 align;
-
- if (length < SZ_4K)
- length = SZ_4K;
-
- /*
- * We need to invalidate a higher granularity if start address
- * is not aligned to length. When start is not aligned with
- * length we need to find the length large enough to create an
- * address mask covering the required range.
- */
- align = roundup_pow_of_two(length);
- start = ALIGN_DOWN(start, align);
- end = ALIGN(end, align);
- length = align;
- while (start + length < end) {
- length <<= 1;
- start = ALIGN_DOWN(orig_start, length);
- }
-
- /*
- * Minimum invalidation size for a 2MB page that the hardware
- * expects is 16MB
- */
- if (length >= SZ_2M) {
- length = max_t(u64, SZ_16M, length);
- start = ALIGN_DOWN(orig_start, length);
- }
-
- xe_gt_assert(gt, length >= SZ_4K);
- xe_gt_assert(gt, is_power_of_2(length));
- xe_gt_assert(gt, !(length & GENMASK(ilog2(SZ_16M) - 1,
- ilog2(SZ_2M) + 1)));
- xe_gt_assert(gt, IS_ALIGNED(start, length));
-
- action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_PAGE_SELECTIVE);
- action[len++] = asid;
- action[len++] = lower_32_bits(start);
- action[len++] = upper_32_bits(start);
- action[len++] = ilog2(length) - ilog2(SZ_4K);
- }
-
- xe_gt_assert(gt, len <= MAX_TLB_INVALIDATION_LEN);
-
- return send_tlb_invalidation(&gt->uc.guc, fence, action, len);
-}
-
-/**
- * xe_gt_tlb_invalidation_vm - Issue a TLB invalidation on this GT for a VM
- * @gt: graphics tile
- * @vm: VM to invalidate
- *
- * Invalidate entire VM's address space
- */
-void xe_gt_tlb_invalidation_vm(struct xe_gt *gt, struct xe_vm *vm)
-{
- struct xe_gt_tlb_invalidation_fence fence;
- u64 range = 1ull << vm->xe->info.va_bits;
- int ret;
-
- xe_gt_tlb_invalidation_fence_init(gt, &fence, true);
-
- ret = xe_gt_tlb_invalidation_range(gt, &fence, 0, range, vm->usm.asid);
- if (ret < 0)
- return;
-
- xe_gt_tlb_invalidation_fence_wait(&fence);
-}
-
-/**
- * xe_guc_tlb_invalidation_done_handler - TLB invalidation done handler
- * @guc: guc
- * @msg: message indicating TLB invalidation done
- * @len: length of message
- *
- * Parse seqno of TLB invalidation, wake any waiters for seqno, and signal any
- * invalidation fences for seqno. Algorithm for this depends on seqno being
- * received in-order and asserts this assumption.
- *
- * Return: 0 on success, -EPROTO for malformed messages.
- */
-int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
-{
- struct xe_gt *gt = guc_to_gt(guc);
- struct xe_device *xe = gt_to_xe(gt);
- struct xe_gt_tlb_invalidation_fence *fence, *next;
- unsigned long flags;
-
- if (unlikely(len != 1))
- return -EPROTO;
-
- /*
- * This can also be run both directly from the IRQ handler and also in
- * process_g2h_msg(). Only one may process any individual CT message,
- * however the order they are processed here could result in skipping a
- * seqno. To handle that we just process all the seqnos from the last
- * seqno_recv up to and including the one in msg[0]. The delta should be
- * very small so there shouldn't be much of pending_fences we actually
- * need to iterate over here.
- *
- * From GuC POV we expect the seqnos to always appear in-order, so if we
- * see something later in the timeline we can be sure that anything
- * appearing earlier has already signalled, just that we have yet to
- * officially process the CT message like if racing against
- * process_g2h_msg().
- */
- spin_lock_irqsave(&gt->tlb_invalidation.pending_lock, flags);
- if (tlb_invalidation_seqno_past(gt, msg[0])) {
- spin_unlock_irqrestore(&gt->tlb_invalidation.pending_lock, flags);
- return 0;
- }
-
- WRITE_ONCE(gt->tlb_invalidation.seqno_recv, msg[0]);
-
- list_for_each_entry_safe(fence, next,
- &gt->tlb_invalidation.pending_fences, link) {
- trace_xe_gt_tlb_invalidation_fence_recv(xe, fence);
-
- if (!tlb_invalidation_seqno_past(gt, fence->seqno))
- break;
-
- invalidation_fence_signal(xe, fence);
- }
-
- if (!list_empty(&gt->tlb_invalidation.pending_fences))
- mod_delayed_work(system_wq,
- &gt->tlb_invalidation.fence_tdr,
- tlb_timeout_jiffies(gt));
- else
- cancel_delayed_work(&gt->tlb_invalidation.fence_tdr);
-
- spin_unlock_irqrestore(&gt->tlb_invalidation.pending_lock, flags);
-
- return 0;
-}
-
-static const char *
-invalidation_fence_get_driver_name(struct dma_fence *dma_fence)
-{
- return "xe";
-}
-
-static const char *
-invalidation_fence_get_timeline_name(struct dma_fence *dma_fence)
-{
- return "invalidation_fence";
-}
-
-static const struct dma_fence_ops invalidation_fence_ops = {
- .get_driver_name = invalidation_fence_get_driver_name,
- .get_timeline_name = invalidation_fence_get_timeline_name,
-};
-
-/**
- * xe_gt_tlb_invalidation_fence_init - Initialize TLB invalidation fence
- * @gt: GT
- * @fence: TLB invalidation fence to initialize
- * @stack: fence is stack variable
- *
- * Initialize TLB invalidation fence for use. xe_gt_tlb_invalidation_fence_fini
- * will be automatically called when fence is signalled (all fences must signal),
- * even on error.
- */
-void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt,
- struct xe_gt_tlb_invalidation_fence *fence,
- bool stack)
-{
- xe_pm_runtime_get_noresume(gt_to_xe(gt));
-
- spin_lock_irq(&gt->tlb_invalidation.lock);
- dma_fence_init(&fence->base, &invalidation_fence_ops,
- &gt->tlb_invalidation.lock,
- dma_fence_context_alloc(1), 1);
- spin_unlock_irq(&gt->tlb_invalidation.lock);
- INIT_LIST_HEAD(&fence->link);
- if (stack)
- set_bit(FENCE_STACK_BIT, &fence->base.flags);
- else
- dma_fence_get(&fence->base);
- fence->gt = gt;
-}
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
deleted file mode 100644
index f7f0f2eaf4b5..000000000000
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#ifndef _XE_GT_TLB_INVALIDATION_H_
-#define _XE_GT_TLB_INVALIDATION_H_
-
-#include <linux/types.h>
-
-#include "xe_gt_tlb_invalidation_types.h"
-
-struct xe_gt;
-struct xe_guc;
-struct xe_vm;
-struct xe_vma;
-
-int xe_gt_tlb_invalidation_init_early(struct xe_gt *gt);
-
-void xe_gt_tlb_invalidation_reset(struct xe_gt *gt);
-int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt);
-void xe_gt_tlb_invalidation_vm(struct xe_gt *gt, struct xe_vm *vm);
-int xe_gt_tlb_invalidation_all(struct xe_gt *gt, struct xe_gt_tlb_invalidation_fence *fence);
-int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
- struct xe_gt_tlb_invalidation_fence *fence,
- u64 start, u64 end, u32 asid);
-int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len);
-
-void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt,
- struct xe_gt_tlb_invalidation_fence *fence,
- bool stack);
-void xe_gt_tlb_invalidation_fence_signal(struct xe_gt_tlb_invalidation_fence *fence);
-
-static inline void
-xe_gt_tlb_invalidation_fence_wait(struct xe_gt_tlb_invalidation_fence *fence)
-{
- dma_fence_wait(&fence->base, false);
-}
-
-#endif /* _XE_GT_TLB_INVALIDATION_ */
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation_types.h b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation_types.h
deleted file mode 100644
index de6e825e0851..000000000000
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation_types.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#ifndef _XE_GT_TLB_INVALIDATION_TYPES_H_
-#define _XE_GT_TLB_INVALIDATION_TYPES_H_
-
-#include <linux/dma-fence.h>
-
-struct xe_gt;
-
-/**
- * struct xe_gt_tlb_invalidation_fence - XE GT TLB invalidation fence
- *
- * Optionally passed to xe_gt_tlb_invalidation and will be signaled upon TLB
- * invalidation completion.
- */
-struct xe_gt_tlb_invalidation_fence {
- /** @base: dma fence base */
- struct dma_fence base;
- /** @gt: GT which fence belong to */
- struct xe_gt *gt;
- /** @link: link into list of pending tlb fences */
- struct list_head link;
- /** @seqno: seqno of TLB invalidation to signal fence one */
- int seqno;
- /** @invalidation_time: time of TLB invalidation */
- ktime_t invalidation_time;
-};
-
-#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_topology.c b/drivers/gpu/drm/xe/xe_gt_topology.c
index 8c63e3263643..4e61c5e39bcb 100644
--- a/drivers/gpu/drm/xe/xe_gt_topology.c
+++ b/drivers/gpu/drm/xe/xe_gt_topology.c
@@ -12,6 +12,7 @@
#include "regs/xe_gt_regs.h"
#include "xe_assert.h"
#include "xe_gt.h"
+#include "xe_gt_mcr.h"
#include "xe_gt_printk.h"
#include "xe_mmio.h"
#include "xe_wa.h"
@@ -122,6 +123,21 @@ gen_l3_mask_from_pattern(struct xe_device *xe, xe_l3_bank_mask_t dst,
}
}
+bool xe_gt_topology_report_l3(struct xe_gt *gt)
+{
+ /*
+ * No known userspace needs/uses the L3 bank mask reported by
+ * the media GT, and the hardware itself is known to report bogus
+ * values on several platforms. Only report L3 bank mask as part
+ * of the media GT's topology on pre-Xe3 platforms since that's
+ * already part of our ABI.
+ */
+ if (xe_gt_is_media_type(gt) && MEDIA_VER(gt_to_xe(gt)) >= 30)
+ return false;
+
+ return true;
+}
+
static void
load_l3_bank_mask(struct xe_gt *gt, xe_l3_bank_mask_t l3_bank_mask)
{
@@ -129,16 +145,7 @@ load_l3_bank_mask(struct xe_gt *gt, xe_l3_bank_mask_t l3_bank_mask)
struct xe_mmio *mmio = &gt->mmio;
u32 fuse3 = xe_mmio_read32(mmio, MIRROR_FUSE3);
- /*
- * PTL platforms with media version 30.00 do not provide proper values
- * for the media GT's L3 bank registers. Skip the readout since we
- * don't have any way to obtain real values.
- *
- * This may get re-described as an official workaround in the future,
- * but there's no tracking number assigned yet so we use a custom
- * OOB workaround descriptor.
- */
- if (XE_WA(gt, no_media_l3))
+ if (!xe_gt_topology_report_l3(gt))
return;
if (GRAPHICS_VER(xe) >= 30) {
@@ -275,8 +282,9 @@ xe_gt_topology_dump(struct xe_gt *gt, struct drm_printer *p)
drm_printf(p, "EU type: %s\n",
eu_type_to_str(gt->fuse_topo.eu_type));
- drm_printf(p, "L3 bank mask: %*pb\n", XE_MAX_L3_BANK_MASK_BITS,
- gt->fuse_topo.l3_bank_mask);
+ if (xe_gt_topology_report_l3(gt))
+ drm_printf(p, "L3 bank mask: %*pb\n", XE_MAX_L3_BANK_MASK_BITS,
+ gt->fuse_topo.l3_bank_mask);
}
/*
@@ -328,3 +336,19 @@ bool xe_gt_has_compute_dss(struct xe_gt *gt, unsigned int dss)
{
return test_bit(dss, gt->fuse_topo.c_dss_mask);
}
+
+bool xe_gt_has_discontiguous_dss_groups(const struct xe_gt *gt)
+{
+ unsigned int xecore;
+ int last_group = -1;
+ u16 group, instance;
+
+ for_each_dss_steering(xecore, gt, group, instance) {
+ if (last_group != group) {
+ if (group - last_group > 1)
+ return true;
+ last_group = group;
+ }
+ }
+ return false;
+}
diff --git a/drivers/gpu/drm/xe/xe_gt_topology.h b/drivers/gpu/drm/xe/xe_gt_topology.h
index c8140704ad4c..5e62f5949b7b 100644
--- a/drivers/gpu/drm/xe/xe_gt_topology.h
+++ b/drivers/gpu/drm/xe/xe_gt_topology.h
@@ -47,4 +47,8 @@ xe_gt_topology_has_dss_in_quadrant(struct xe_gt *gt, int quad);
bool xe_gt_has_geometry_dss(struct xe_gt *gt, unsigned int dss);
bool xe_gt_has_compute_dss(struct xe_gt *gt, unsigned int dss);
+bool xe_gt_has_discontiguous_dss_groups(const struct xe_gt *gt);
+
+bool xe_gt_topology_report_l3(struct xe_gt *gt);
+
#endif /* _XE_GT_TOPOLOGY_H_ */
diff --git a/drivers/gpu/drm/xe/xe_gt_types.h b/drivers/gpu/drm/xe/xe_gt_types.h
index 96344c604726..66158105aca5 100644
--- a/drivers/gpu/drm/xe/xe_gt_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_types.h
@@ -17,6 +17,7 @@
#include "xe_oa_types.h"
#include "xe_reg_sr_types.h"
#include "xe_sa_types.h"
+#include "xe_tlb_inval_types.h"
#include "xe_uc_types.h"
struct xe_exec_queue_ops;
@@ -185,34 +186,8 @@ struct xe_gt {
struct work_struct worker;
} reset;
- /** @tlb_invalidation: TLB invalidation state */
- struct {
- /** @tlb_invalidation.seqno: TLB invalidation seqno, protected by CT lock */
-#define TLB_INVALIDATION_SEQNO_MAX 0x100000
- int seqno;
- /**
- * @tlb_invalidation.seqno_recv: last received TLB invalidation seqno,
- * protected by CT lock
- */
- int seqno_recv;
- /**
- * @tlb_invalidation.pending_fences: list of pending fences waiting TLB
- * invaliations, protected by CT lock
- */
- struct list_head pending_fences;
- /**
- * @tlb_invalidation.pending_lock: protects @tlb_invalidation.pending_fences
- * and updating @tlb_invalidation.seqno_recv.
- */
- spinlock_t pending_lock;
- /**
- * @tlb_invalidation.fence_tdr: schedules a delayed call to
- * xe_gt_tlb_fence_timeout after the timeut interval is over.
- */
- struct delayed_work fence_tdr;
- /** @tlb_invalidation.lock: protects TLB invalidation fences */
- spinlock_t lock;
- } tlb_invalidation;
+ /** @tlb_inval: TLB invalidation state */
+ struct xe_tlb_inval tlb_inval;
/**
* @ccs_mode: Number of compute engines enabled.
@@ -411,7 +386,7 @@ struct xe_gt {
unsigned long *oob;
/**
* @wa_active.oob_initialized: mark oob as initialized to help
- * detecting misuse of XE_WA() - it can only be called on
+ * detecting misuse of XE_GT_WA() - it can only be called on
* initialization after OOB WAs have being processed
*/
bool oob_initialized;
diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c
index b1d1d6da3758..00789844ea4d 100644
--- a/drivers/gpu/drm/xe/xe_guc.c
+++ b/drivers/gpu/drm/xe/xe_guc.c
@@ -16,6 +16,7 @@
#include "regs/xe_guc_regs.h"
#include "regs/xe_irq_regs.h"
#include "xe_bo.h"
+#include "xe_configfs.h"
#include "xe_device.h"
#include "xe_force_wake.h"
#include "xe_gt.h"
@@ -73,19 +74,22 @@ static u32 guc_ctl_debug_flags(struct xe_guc *guc)
if (!GUC_LOG_LEVEL_IS_VERBOSE(level))
flags |= GUC_LOG_DISABLED;
else
- flags |= GUC_LOG_LEVEL_TO_VERBOSITY(level) <<
- GUC_LOG_VERBOSITY_SHIFT;
+ flags |= FIELD_PREP(GUC_LOG_VERBOSITY, GUC_LOG_LEVEL_TO_VERBOSITY(level));
return flags;
}
static u32 guc_ctl_feature_flags(struct xe_guc *guc)
{
+ struct xe_device *xe = guc_to_xe(guc);
u32 flags = GUC_CTL_ENABLE_LITE_RESTORE;
- if (!guc_to_xe(guc)->info.skip_guc_pc)
+ if (!xe->info.skip_guc_pc)
flags |= GUC_CTL_ENABLE_SLPC;
+ if (xe_configfs_get_psmi_enabled(to_pci_dev(xe->drm.dev)))
+ flags |= GUC_CTL_ENABLE_PSMI_LOGGING;
+
return flags;
}
@@ -117,22 +121,14 @@ static u32 guc_ctl_log_params_flags(struct xe_guc *guc)
BUILD_BUG_ON(!CAPTURE_BUFFER_SIZE);
BUILD_BUG_ON(!IS_ALIGNED(CAPTURE_BUFFER_SIZE, CAPTURE_UNIT));
- BUILD_BUG_ON((CRASH_BUFFER_SIZE / LOG_UNIT - 1) >
- (GUC_LOG_CRASH_MASK >> GUC_LOG_CRASH_SHIFT));
- BUILD_BUG_ON((DEBUG_BUFFER_SIZE / LOG_UNIT - 1) >
- (GUC_LOG_DEBUG_MASK >> GUC_LOG_DEBUG_SHIFT));
- BUILD_BUG_ON((CAPTURE_BUFFER_SIZE / CAPTURE_UNIT - 1) >
- (GUC_LOG_CAPTURE_MASK >> GUC_LOG_CAPTURE_SHIFT));
-
flags = GUC_LOG_VALID |
GUC_LOG_NOTIFY_ON_HALF_FULL |
CAPTURE_FLAG |
LOG_FLAG |
- ((CRASH_BUFFER_SIZE / LOG_UNIT - 1) << GUC_LOG_CRASH_SHIFT) |
- ((DEBUG_BUFFER_SIZE / LOG_UNIT - 1) << GUC_LOG_DEBUG_SHIFT) |
- ((CAPTURE_BUFFER_SIZE / CAPTURE_UNIT - 1) <<
- GUC_LOG_CAPTURE_SHIFT) |
- (offset << GUC_LOG_BUF_ADDR_SHIFT);
+ FIELD_PREP(GUC_LOG_CRASH, CRASH_BUFFER_SIZE / LOG_UNIT - 1) |
+ FIELD_PREP(GUC_LOG_DEBUG, DEBUG_BUFFER_SIZE / LOG_UNIT - 1) |
+ FIELD_PREP(GUC_LOG_CAPTURE, CAPTURE_BUFFER_SIZE / CAPTURE_UNIT - 1) |
+ FIELD_PREP(GUC_LOG_BUF_ADDR, offset);
#undef LOG_UNIT
#undef LOG_FLAG
@@ -145,7 +141,7 @@ static u32 guc_ctl_log_params_flags(struct xe_guc *guc)
static u32 guc_ctl_ads_flags(struct xe_guc *guc)
{
u32 ads = guc_bo_ggtt_addr(guc, guc->ads.bo) >> PAGE_SHIFT;
- u32 flags = ads << GUC_ADS_ADDR_SHIFT;
+ u32 flags = FIELD_PREP(GUC_ADS_ADDR, ads);
return flags;
}
@@ -157,7 +153,7 @@ static bool needs_wa_dual_queue(struct xe_gt *gt)
* on RCS and CCSes with different address spaces, which on DG2 is
* required as a WA for an HW bug.
*/
- if (XE_WA(gt, 22011391025))
+ if (XE_GT_WA(gt, 22011391025))
return true;
/*
@@ -184,10 +180,10 @@ static u32 guc_ctl_wa_flags(struct xe_guc *guc)
struct xe_gt *gt = guc_to_gt(guc);
u32 flags = 0;
- if (XE_WA(gt, 22012773006))
+ if (XE_GT_WA(gt, 22012773006))
flags |= GUC_WA_POLLCS;
- if (XE_WA(gt, 14014475959))
+ if (XE_GT_WA(gt, 14014475959))
flags |= GUC_WA_HOLD_CCS_SWITCHOUT;
if (needs_wa_dual_queue(gt))
@@ -201,19 +197,22 @@ static u32 guc_ctl_wa_flags(struct xe_guc *guc)
if (GRAPHICS_VERx100(xe) < 1270)
flags |= GUC_WA_PRE_PARSER;
- if (XE_WA(gt, 22012727170) || XE_WA(gt, 22012727685))
+ if (XE_GT_WA(gt, 22012727170) || XE_GT_WA(gt, 22012727685))
flags |= GUC_WA_CONTEXT_ISOLATION;
- if (XE_WA(gt, 18020744125) &&
+ if (XE_GT_WA(gt, 18020744125) &&
!xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_RENDER))
flags |= GUC_WA_RCS_REGS_IN_CCS_REGS_LIST;
- if (XE_WA(gt, 1509372804))
+ if (XE_GT_WA(gt, 1509372804))
flags |= GUC_WA_RENDER_RST_RC6_EXIT;
- if (XE_WA(gt, 14018913170))
+ if (XE_GT_WA(gt, 14018913170))
flags |= GUC_WA_ENABLE_TSC_CHECK_ON_RC6;
+ if (XE_GT_WA(gt, 16023683509))
+ flags |= GUC_WA_SAVE_RESTORE_MCFG_REG_AT_MC6;
+
return flags;
}
@@ -701,10 +700,6 @@ static int xe_guc_realloc_post_hwconfig(struct xe_guc *guc)
if (ret)
return ret;
- ret = xe_managed_bo_reinit_in_vram(xe, tile, &guc->ct.bo);
- if (ret)
- return ret;
-
return 0;
}
@@ -839,6 +834,10 @@ int xe_guc_init_post_hwconfig(struct xe_guc *guc)
if (ret)
return ret;
+ ret = xe_guc_ct_init_post_hwconfig(&guc->ct);
+ if (ret)
+ return ret;
+
guc_init_params_post_hwconfig(guc);
ret = xe_guc_submit_init(guc, ~0);
@@ -880,9 +879,7 @@ int xe_guc_post_load_init(struct xe_guc *guc)
return ret;
}
- guc->submission_state.enabled = true;
-
- return 0;
+ return xe_guc_submit_enable(guc);
}
int xe_guc_reset(struct xe_guc *guc)
@@ -992,11 +989,14 @@ static int guc_load_done(u32 status)
case XE_GUC_LOAD_STATUS_GUC_PREPROD_BUILD_MISMATCH:
case XE_GUC_LOAD_STATUS_ERROR_DEVID_INVALID_GUCTYPE:
case XE_GUC_LOAD_STATUS_HWCONFIG_ERROR:
+ case XE_GUC_LOAD_STATUS_BOOTROM_VERSION_MISMATCH:
case XE_GUC_LOAD_STATUS_DPC_ERROR:
case XE_GUC_LOAD_STATUS_EXCEPTION:
case XE_GUC_LOAD_STATUS_INIT_DATA_INVALID:
case XE_GUC_LOAD_STATUS_MPU_DATA_INVALID:
case XE_GUC_LOAD_STATUS_INIT_MMIO_SAVE_RESTORE_INVALID:
+ case XE_GUC_LOAD_STATUS_KLV_WORKAROUND_INIT_ERROR:
+ case XE_GUC_LOAD_STATUS_INVALID_FTR_FLAG:
return -1;
}
@@ -1055,7 +1055,7 @@ static s32 guc_pc_get_cur_freq(struct xe_guc_pc *guc_pc)
#endif
#define GUC_LOAD_TIME_WARN_MS 200
-static void guc_wait_ucode(struct xe_guc *guc)
+static int guc_wait_ucode(struct xe_guc *guc)
{
struct xe_gt *gt = guc_to_gt(guc);
struct xe_mmio *mmio = &gt->mmio;
@@ -1136,21 +1136,33 @@ static void guc_wait_ucode(struct xe_guc *guc)
}
switch (ukernel) {
+ case XE_GUC_LOAD_STATUS_HWCONFIG_START:
+ xe_gt_err(gt, "still extracting hwconfig table.\n");
+ break;
+
case XE_GUC_LOAD_STATUS_EXCEPTION:
xe_gt_err(gt, "firmware exception. EIP: %#x\n",
xe_mmio_read32(mmio, SOFT_SCRATCH(13)));
break;
+ case XE_GUC_LOAD_STATUS_INIT_DATA_INVALID:
+ xe_gt_err(gt, "illegal init/ADS data\n");
+ break;
+
case XE_GUC_LOAD_STATUS_INIT_MMIO_SAVE_RESTORE_INVALID:
xe_gt_err(gt, "illegal register in save/restore workaround list\n");
break;
- case XE_GUC_LOAD_STATUS_HWCONFIG_START:
- xe_gt_err(gt, "still extracting hwconfig table.\n");
+ case XE_GUC_LOAD_STATUS_KLV_WORKAROUND_INIT_ERROR:
+ xe_gt_err(gt, "illegal workaround KLV data\n");
+ break;
+
+ case XE_GUC_LOAD_STATUS_INVALID_FTR_FLAG:
+ xe_gt_err(gt, "illegal feature flag specified\n");
break;
}
- xe_device_declare_wedged(gt_to_xe(gt));
+ return -EPROTO;
} else if (delta_ms > GUC_LOAD_TIME_WARN_MS) {
xe_gt_warn(gt, "excessive init time: %lldms! [status = 0x%08X, timeouts = %d]\n",
delta_ms, status, count);
@@ -1162,7 +1174,10 @@ static void guc_wait_ucode(struct xe_guc *guc)
delta_ms, xe_guc_pc_get_act_freq(guc_pc), guc_pc_get_cur_freq(guc_pc),
before_freq, status, count);
}
+
+ return 0;
}
+ALLOW_ERROR_INJECTION(guc_wait_ucode, ERRNO);
static int __xe_guc_upload(struct xe_guc *guc)
{
@@ -1194,14 +1209,16 @@ static int __xe_guc_upload(struct xe_guc *guc)
goto out;
/* Wait for authentication */
- guc_wait_ucode(guc);
+ ret = guc_wait_ucode(guc);
+ if (ret)
+ goto out;
xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_RUNNING);
return 0;
out:
xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_LOAD_FAIL);
- return 0 /* FIXME: ret, don't want to stop load currently */;
+ return ret;
}
static int vf_guc_min_load_for_hwconfig(struct xe_guc *guc)
@@ -1579,7 +1596,7 @@ void xe_guc_sanitize(struct xe_guc *guc)
{
xe_uc_fw_sanitize(&guc->fw);
xe_guc_ct_disable(&guc->ct);
- guc->submission_state.enabled = false;
+ xe_guc_submit_disable(guc);
}
int xe_guc_reset_prepare(struct xe_guc *guc)
@@ -1672,3 +1689,7 @@ void xe_guc_declare_wedged(struct xe_guc *guc)
xe_guc_ct_stop(&guc->ct);
xe_guc_submit_wedge(guc);
}
+
+#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
+#include "tests/xe_guc_g2g_test.c"
+#endif
diff --git a/drivers/gpu/drm/xe/xe_guc.h b/drivers/gpu/drm/xe/xe_guc.h
index 22cf019a11bf..1cca05967e62 100644
--- a/drivers/gpu/drm/xe/xe_guc.h
+++ b/drivers/gpu/drm/xe/xe_guc.h
@@ -53,6 +53,10 @@ void xe_guc_stop(struct xe_guc *guc);
int xe_guc_start(struct xe_guc *guc);
void xe_guc_declare_wedged(struct xe_guc *guc);
+#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
+int xe_guc_g2g_test_notification(struct xe_guc *guc, u32 *payload, u32 len);
+#endif
+
static inline u16 xe_engine_class_to_guc_class(enum xe_engine_class class)
{
switch (class) {
diff --git a/drivers/gpu/drm/xe/xe_guc_ads.c b/drivers/gpu/drm/xe/xe_guc_ads.c
index 131cfc56be00..58e0b0294a5b 100644
--- a/drivers/gpu/drm/xe/xe_guc_ads.c
+++ b/drivers/gpu/drm/xe/xe_guc_ads.c
@@ -247,7 +247,7 @@ static size_t calculate_regset_size(struct xe_gt *gt)
count += ADS_REGSET_EXTRA_MAX * XE_NUM_HW_ENGINES;
- if (XE_WA(gt, 1607983814))
+ if (XE_GT_WA(gt, 1607983814))
count += LNCFCMOCS_REG_COUNT;
return count * sizeof(struct guc_mmio_reg);
@@ -284,52 +284,26 @@ static size_t calculate_golden_lrc_size(struct xe_guc_ads *ads)
return total_size;
}
-static void guc_waklv_enable_one_word(struct xe_guc_ads *ads,
- enum xe_guc_klv_ids klv_id,
- u32 value,
- u32 *offset, u32 *remain)
+static void guc_waklv_enable(struct xe_guc_ads *ads,
+ u32 data[], u32 data_len_dw,
+ u32 *offset, u32 *remain,
+ enum xe_guc_klv_ids klv_id)
{
- u32 size;
- u32 klv_entry[] = {
- /* 16:16 key/length */
- FIELD_PREP(GUC_KLV_0_KEY, klv_id) |
- FIELD_PREP(GUC_KLV_0_LEN, 1),
- value,
- /* 1 dword data */
- };
-
- size = sizeof(klv_entry);
+ size_t size = sizeof(u32) * (1 + data_len_dw);
if (*remain < size) {
drm_warn(&ads_to_xe(ads)->drm,
- "w/a klv buffer too small to add klv id %d\n", klv_id);
- } else {
- xe_map_memcpy_to(ads_to_xe(ads), ads_to_map(ads), *offset,
- klv_entry, size);
- *offset += size;
- *remain -= size;
+ "w/a klv buffer too small to add klv id 0x%04X\n", klv_id);
+ return;
}
-}
-static void guc_waklv_enable_simple(struct xe_guc_ads *ads,
- enum xe_guc_klv_ids klv_id, u32 *offset, u32 *remain)
-{
- u32 klv_entry[] = {
- /* 16:16 key/length */
- FIELD_PREP(GUC_KLV_0_KEY, klv_id) |
- FIELD_PREP(GUC_KLV_0_LEN, 0),
- /* 0 dwords data */
- };
- u32 size;
+ /* 16:16 key/length */
+ xe_map_wr(ads_to_xe(ads), ads_to_map(ads), *offset, u32,
+ FIELD_PREP(GUC_KLV_0_KEY, klv_id) | FIELD_PREP(GUC_KLV_0_LEN, data_len_dw));
+ /* data_len_dw dwords of data */
+ xe_map_memcpy_to(ads_to_xe(ads), ads_to_map(ads),
+ *offset + sizeof(u32), data, data_len_dw * sizeof(u32));
- size = sizeof(klv_entry);
-
- if (xe_gt_WARN(ads_to_gt(ads), *remain < size,
- "w/a klv buffer too small to add klv id %d\n", klv_id))
- return;
-
- xe_map_memcpy_to(ads_to_xe(ads), ads_to_map(ads), *offset,
- klv_entry, size);
*offset += size;
*remain -= size;
}
@@ -343,44 +317,51 @@ static void guc_waklv_init(struct xe_guc_ads *ads)
offset = guc_ads_waklv_offset(ads);
remain = guc_ads_waklv_size(ads);
- if (XE_WA(gt, 14019882105) || XE_WA(gt, 16021333562))
- guc_waklv_enable_simple(ads,
- GUC_WORKAROUND_KLV_BLOCK_INTERRUPTS_WHEN_MGSR_BLOCKED,
- &offset, &remain);
- if (XE_WA(gt, 18024947630))
- guc_waklv_enable_simple(ads,
- GUC_WORKAROUND_KLV_ID_GAM_PFQ_SHADOW_TAIL_POLLING,
- &offset, &remain);
- if (XE_WA(gt, 16022287689))
- guc_waklv_enable_simple(ads,
- GUC_WORKAROUND_KLV_ID_DISABLE_MTP_DURING_ASYNC_COMPUTE,
- &offset, &remain);
-
- if (XE_WA(gt, 14022866841))
- guc_waklv_enable_simple(ads,
- GUC_WA_KLV_WAKE_POWER_DOMAINS_FOR_OUTBOUND_MMIO,
- &offset, &remain);
+ if (XE_GT_WA(gt, 14019882105) || XE_GT_WA(gt, 16021333562))
+ guc_waklv_enable(ads, NULL, 0, &offset, &remain,
+ GUC_WORKAROUND_KLV_BLOCK_INTERRUPTS_WHEN_MGSR_BLOCKED);
+ if (XE_GT_WA(gt, 18024947630))
+ guc_waklv_enable(ads, NULL, 0, &offset, &remain,
+ GUC_WORKAROUND_KLV_ID_GAM_PFQ_SHADOW_TAIL_POLLING);
+ if (XE_GT_WA(gt, 16022287689))
+ guc_waklv_enable(ads, NULL, 0, &offset, &remain,
+ GUC_WORKAROUND_KLV_ID_DISABLE_MTP_DURING_ASYNC_COMPUTE);
+
+ if (XE_GT_WA(gt, 14022866841))
+ guc_waklv_enable(ads, NULL, 0, &offset, &remain,
+ GUC_WA_KLV_WAKE_POWER_DOMAINS_FOR_OUTBOUND_MMIO);
/*
* On RC6 exit, GuC will write register 0xB04 with the default value provided. As of now,
* the default value for this register is determined to be 0xC40. This could change in the
* future, so GuC depends on KMD to send it the correct value.
*/
- if (XE_WA(gt, 13011645652))
- guc_waklv_enable_one_word(ads,
- GUC_WA_KLV_NP_RD_WRITE_TO_CLEAR_RCSM_AT_CGP_LATE_RESTORE,
- 0xC40,
- &offset, &remain);
-
- if (XE_WA(gt, 14022293748) || XE_WA(gt, 22019794406))
- guc_waklv_enable_simple(ads,
- GUC_WORKAROUND_KLV_ID_BACK_TO_BACK_RCS_ENGINE_RESET,
- &offset, &remain);
-
- if (GUC_FIRMWARE_VER(&gt->uc.guc) >= MAKE_GUC_VER(70, 44, 0) && XE_WA(gt, 16026508708))
- guc_waklv_enable_simple(ads,
- GUC_WA_KLV_RESET_BB_STACK_PTR_ON_VF_SWITCH,
- &offset, &remain);
+ if (XE_GT_WA(gt, 13011645652)) {
+ u32 data = 0xC40;
+
+ guc_waklv_enable(ads, &data, 1, &offset, &remain,
+ GUC_WA_KLV_NP_RD_WRITE_TO_CLEAR_RCSM_AT_CGP_LATE_RESTORE);
+ }
+
+ if (XE_GT_WA(gt, 14022293748) || XE_GT_WA(gt, 22019794406))
+ guc_waklv_enable(ads, NULL, 0, &offset, &remain,
+ GUC_WORKAROUND_KLV_ID_BACK_TO_BACK_RCS_ENGINE_RESET);
+
+ if (GUC_FIRMWARE_VER(&gt->uc.guc) >= MAKE_GUC_VER(70, 44, 0) && XE_GT_WA(gt, 16026508708))
+ guc_waklv_enable(ads, NULL, 0, &offset, &remain,
+ GUC_WA_KLV_RESET_BB_STACK_PTR_ON_VF_SWITCH);
+ if (GUC_FIRMWARE_VER(&gt->uc.guc) >= MAKE_GUC_VER(70, 47, 0) && XE_GT_WA(gt, 16026007364)) {
+ u32 data[] = {
+ 0x0,
+ 0xF,
+ };
+ guc_waklv_enable(ads, data, ARRAY_SIZE(data), &offset, &remain,
+ GUC_WA_KLV_RESTORE_UNSAVED_MEDIA_CONTROL_REG);
+ }
+
+ if (XE_GT_WA(gt, 14020001231))
+ guc_waklv_enable(ads, NULL, 0, &offset, &remain,
+ GUC_WORKAROUND_KLV_DISABLE_PSMI_INTERRUPTS_AT_C6_ENTRY_RESTORE_AT_EXIT);
size = guc_ads_waklv_size(ads) - remain;
if (!size)
@@ -784,7 +765,7 @@ static unsigned int guc_mmio_regset_write(struct xe_guc_ads *ads,
guc_mmio_regset_write_one(ads, regset_map, e->reg, count++);
}
- if (XE_WA(hwe->gt, 1607983814) && hwe->class == XE_ENGINE_CLASS_RENDER) {
+ if (XE_GT_WA(hwe->gt, 1607983814) && hwe->class == XE_ENGINE_CLASS_RENDER) {
for (i = 0; i < LNCFCMOCS_REG_COUNT; i++) {
guc_mmio_regset_write_one(ads, regset_map,
XELP_LNCFCMOCS(i), count++);
diff --git a/drivers/gpu/drm/xe/xe_guc_buf.c b/drivers/gpu/drm/xe/xe_guc_buf.c
index 14a07dca48e7..502ca3a4ee60 100644
--- a/drivers/gpu/drm/xe/xe_guc_buf.c
+++ b/drivers/gpu/drm/xe/xe_guc_buf.c
@@ -164,7 +164,7 @@ u64 xe_guc_cache_gpu_addr_from_ptr(struct xe_guc_buf_cache *cache, const void *p
if (offset < 0 || offset + size > cache->sam->base.size)
return 0;
- return cache->sam->gpu_addr + offset;
+ return xe_sa_manager_gpu_addr(cache->sam) + offset;
}
#if IS_BUILTIN(CONFIG_DRM_XE_KUNIT_TEST)
diff --git a/drivers/gpu/drm/xe/xe_guc_capture.c b/drivers/gpu/drm/xe/xe_guc_capture.c
index 859a3ba91be5..243dad3e2418 100644
--- a/drivers/gpu/drm/xe/xe_guc_capture.c
+++ b/drivers/gpu/drm/xe/xe_guc_capture.c
@@ -1817,6 +1817,12 @@ void xe_engine_snapshot_print(struct xe_hw_engine_snapshot *snapshot, struct drm
str_yes_no(snapshot->kernel_reserved));
for (type = GUC_STATE_CAPTURE_TYPE_GLOBAL; type < GUC_STATE_CAPTURE_TYPE_MAX; type++) {
+ /*
+ * FIXME: During devcoredump print we should avoid accessing the
+ * driver pointers for gt or engine. Printing should be done only
+ * using the snapshot captured. Here we are accessing the gt
+ * pointer. It should be fixed.
+ */
list = xe_guc_capture_get_reg_desc_list(gt, GUC_CAPTURE_LIST_INDEX_PF, type,
capture_class, false);
snapshot_print_by_list_order(snapshot, p, type, list);
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
index 3f4e6a46ff16..18f6327bf552 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.c
+++ b/drivers/gpu/drm/xe/xe_guc_ct.c
@@ -26,11 +26,11 @@
#include "xe_gt_sriov_pf_control.h"
#include "xe_gt_sriov_pf_monitor.h"
#include "xe_gt_sriov_printk.h"
-#include "xe_gt_tlb_invalidation.h"
#include "xe_guc.h"
#include "xe_guc_log.h"
#include "xe_guc_relay.h"
#include "xe_guc_submit.h"
+#include "xe_guc_tlb_inval.h"
#include "xe_map.h"
#include "xe_pm.h"
#include "xe_trace_guc.h"
@@ -39,6 +39,8 @@ static void receive_g2h(struct xe_guc_ct *ct);
static void g2h_worker_func(struct work_struct *w);
static void safe_mode_worker_func(struct work_struct *w);
static void ct_exit_safe_mode(struct xe_guc_ct *ct);
+static void guc_ct_change_state(struct xe_guc_ct *ct,
+ enum xe_guc_ct_state state);
#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
enum {
@@ -252,6 +254,13 @@ int xe_guc_ct_init_noalloc(struct xe_guc_ct *ct)
}
ALLOW_ERROR_INJECTION(xe_guc_ct_init_noalloc, ERRNO); /* See xe_pci_probe() */
+static void guc_action_disable_ct(void *arg)
+{
+ struct xe_guc_ct *ct = arg;
+
+ guc_ct_change_state(ct, XE_GUC_CT_STATE_DISABLED);
+}
+
int xe_guc_ct_init(struct xe_guc_ct *ct)
{
struct xe_device *xe = ct_to_xe(ct);
@@ -268,10 +277,39 @@ int xe_guc_ct_init(struct xe_guc_ct *ct)
return PTR_ERR(bo);
ct->bo = bo;
- return 0;
+
+ return devm_add_action_or_reset(xe->drm.dev, guc_action_disable_ct, ct);
}
ALLOW_ERROR_INJECTION(xe_guc_ct_init, ERRNO); /* See xe_pci_probe() */
+/**
+ * xe_guc_ct_init_post_hwconfig - Reinitialize the GuC CTB in VRAM
+ * @ct: the &xe_guc_ct
+ *
+ * Allocate a new BO in VRAM and free the previous BO that was allocated
+ * in system memory (SMEM). Applicable only for DGFX products.
+ *
+ * Return: 0 on success, or a negative errno on failure.
+ */
+int xe_guc_ct_init_post_hwconfig(struct xe_guc_ct *ct)
+{
+ struct xe_device *xe = ct_to_xe(ct);
+ struct xe_gt *gt = ct_to_gt(ct);
+ struct xe_tile *tile = gt_to_tile(gt);
+ int ret;
+
+ xe_assert(xe, !xe_guc_ct_enabled(ct));
+
+ if (IS_DGFX(xe)) {
+ ret = xe_managed_bo_reinit_in_vram(xe, tile, &ct->bo);
+ if (ret)
+ return ret;
+ }
+
+ devm_remove_action(xe->drm.dev, guc_action_disable_ct, ct);
+ return devm_add_action_or_reset(xe->drm.dev, guc_action_disable_ct, ct);
+}
+
#define desc_read(xe_, guc_ctb__, field_) \
xe_map_rd_field(xe_, &guc_ctb__->desc, 0, \
struct guc_ct_buffer_desc, field_)
@@ -1040,11 +1078,15 @@ static bool retry_failure(struct xe_guc_ct *ct, int ret)
return true;
}
+#define GUC_SEND_RETRY_LIMIT 50
+#define GUC_SEND_RETRY_MSLEEP 5
+
static int guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len,
u32 *response_buffer, bool no_fail)
{
struct xe_gt *gt = ct_to_gt(ct);
struct g2h_fence g2h_fence;
+ unsigned int retries = 0;
int ret = 0;
/*
@@ -1109,6 +1151,12 @@ retry_same_fence:
xe_gt_dbg(gt, "H2G action %#x retrying: reason %#x\n",
action[0], g2h_fence.reason);
mutex_unlock(&ct->lock);
+ if (++retries > GUC_SEND_RETRY_LIMIT) {
+ xe_gt_err(gt, "H2G action %#x reached retry limit=%u, aborting\n",
+ action[0], GUC_SEND_RETRY_LIMIT);
+ return -ELOOP;
+ }
+ msleep(GUC_SEND_RETRY_MSLEEP * retries);
goto retry;
}
if (g2h_fence.fail) {
@@ -1416,8 +1464,7 @@ static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
ret = xe_guc_pagefault_handler(guc, payload, adj_len);
break;
case XE_GUC_ACTION_TLB_INVALIDATION_DONE:
- ret = xe_guc_tlb_invalidation_done_handler(guc, payload,
- adj_len);
+ ret = xe_guc_tlb_inval_done_handler(guc, payload, adj_len);
break;
case XE_GUC_ACTION_ACCESS_COUNTER_NOTIFY:
ret = xe_guc_access_counter_notify_handler(guc, payload,
@@ -1439,6 +1486,11 @@ static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
case XE_GUC_ACTION_NOTIFY_EXCEPTION:
ret = guc_crash_process_msg(ct, action);
break;
+#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
+ case XE_GUC_ACTION_TEST_G2G_RECV:
+ ret = xe_guc_g2g_test_notification(guc, payload, adj_len);
+ break;
+#endif
default:
xe_gt_err(gt, "unexpected G2H action 0x%04x\n", action);
}
@@ -1618,8 +1670,7 @@ static void g2h_fast_path(struct xe_guc_ct *ct, u32 *msg, u32 len)
break;
case XE_GUC_ACTION_TLB_INVALIDATION_DONE:
__g2h_release_space(ct, len);
- ret = xe_guc_tlb_invalidation_done_handler(guc, payload,
- adj_len);
+ ret = xe_guc_tlb_inval_done_handler(guc, payload, adj_len);
break;
default:
xe_gt_warn(gt, "NOT_POSSIBLE");
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.h b/drivers/gpu/drm/xe/xe_guc_ct.h
index 18d4225e6502..cf41210ab30a 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.h
+++ b/drivers/gpu/drm/xe/xe_guc_ct.h
@@ -13,6 +13,7 @@ struct xe_device;
int xe_guc_ct_init_noalloc(struct xe_guc_ct *ct);
int xe_guc_ct_init(struct xe_guc_ct *ct);
+int xe_guc_ct_init_post_hwconfig(struct xe_guc_ct *ct);
int xe_guc_ct_enable(struct xe_guc_ct *ct);
void xe_guc_ct_disable(struct xe_guc_ct *ct);
void xe_guc_ct_stop(struct xe_guc_ct *ct);
diff --git a/drivers/gpu/drm/xe/xe_guc_engine_activity.c b/drivers/gpu/drm/xe/xe_guc_engine_activity.c
index 92e1f9f41b8c..2b99c1ebdd58 100644
--- a/drivers/gpu/drm/xe/xe_guc_engine_activity.c
+++ b/drivers/gpu/drm/xe/xe_guc_engine_activity.c
@@ -94,16 +94,17 @@ static int allocate_engine_activity_buffers(struct xe_guc *guc,
struct xe_tile *tile = gt_to_tile(gt);
struct xe_bo *bo, *metadata_bo;
- metadata_bo = xe_bo_create_pin_map(gt_to_xe(gt), tile, NULL, PAGE_ALIGN(metadata_size),
- ttm_bo_type_kernel, XE_BO_FLAG_SYSTEM |
- XE_BO_FLAG_GGTT | XE_BO_FLAG_GGTT_INVALIDATE);
+ metadata_bo = xe_bo_create_pin_map_novm(gt_to_xe(gt), tile, PAGE_ALIGN(metadata_size),
+ ttm_bo_type_kernel, XE_BO_FLAG_SYSTEM |
+ XE_BO_FLAG_GGTT | XE_BO_FLAG_GGTT_INVALIDATE,
+ false);
if (IS_ERR(metadata_bo))
return PTR_ERR(metadata_bo);
- bo = xe_bo_create_pin_map(gt_to_xe(gt), tile, NULL, PAGE_ALIGN(size),
- ttm_bo_type_kernel, XE_BO_FLAG_VRAM_IF_DGFX(tile) |
- XE_BO_FLAG_GGTT | XE_BO_FLAG_GGTT_INVALIDATE);
+ bo = xe_bo_create_pin_map_novm(gt_to_xe(gt), tile, PAGE_ALIGN(size),
+ ttm_bo_type_kernel, XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_GGTT | XE_BO_FLAG_GGTT_INVALIDATE, false);
if (IS_ERR(bo)) {
xe_bo_unpin_map_no_vm(metadata_bo);
diff --git a/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h b/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h
index a3f421e2adc0..c30c0e3ccbbb 100644
--- a/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h
@@ -35,8 +35,8 @@ struct xe_guc_exec_queue {
struct xe_sched_msg static_msgs[MAX_STATIC_MSG_TYPE];
/** @lr_tdr: long running TDR worker */
struct work_struct lr_tdr;
- /** @fini_async: do final fini async from this worker */
- struct work_struct fini_async;
+ /** @destroy_async: do final destroy async from this worker */
+ struct work_struct destroy_async;
/** @resume_time: time of last resume */
u64 resume_time;
/** @state: GuC specific state for this xe_exec_queue */
diff --git a/drivers/gpu/drm/xe/xe_guc_fwif.h b/drivers/gpu/drm/xe/xe_guc_fwif.h
index 6f57578b07cb..50c4c2406132 100644
--- a/drivers/gpu/drm/xe/xe_guc_fwif.h
+++ b/drivers/gpu/drm/xe/xe_guc_fwif.h
@@ -15,6 +15,7 @@
#define G2H_LEN_DW_SCHED_CONTEXT_MODE_SET 4
#define G2H_LEN_DW_DEREGISTER_CONTEXT 3
#define G2H_LEN_DW_TLB_INVALIDATE 3
+#define G2H_LEN_DW_G2G_NOTIFY_MIN 3
#define GUC_ID_MAX 65535
#define GUC_ID_UNKNOWN 0xffffffff
@@ -45,6 +46,11 @@
#define GUC_MAX_ENGINE_CLASSES 16
#define GUC_MAX_INSTANCES_PER_CLASS 32
+#define GUC_CONTEXT_NORMAL 0
+#define GUC_CONTEXT_COMPRESSION_SAVE 1
+#define GUC_CONTEXT_COMPRESSION_RESTORE 2
+#define GUC_CONTEXT_COUNT (GUC_CONTEXT_COMPRESSION_RESTORE + 1)
+
/* Helper for context registration H2G */
struct guc_ctxt_registration_info {
u32 flags;
@@ -60,6 +66,7 @@ struct guc_ctxt_registration_info {
u32 hwlrca_hi;
};
#define CONTEXT_REGISTRATION_FLAG_KMD BIT(0)
+#define CONTEXT_REGISTRATION_FLAG_TYPE GENMASK(2, 1)
/* 32-bit KLV structure as used by policy updates and others */
struct guc_klv_generic_dw_t {
@@ -84,13 +91,10 @@ struct guc_update_exec_queue_policy {
#define GUC_LOG_NOTIFY_ON_HALF_FULL BIT(1)
#define GUC_LOG_CAPTURE_ALLOC_UNITS BIT(2)
#define GUC_LOG_LOG_ALLOC_UNITS BIT(3)
-#define GUC_LOG_CRASH_SHIFT 4
-#define GUC_LOG_CRASH_MASK (0x3 << GUC_LOG_CRASH_SHIFT)
-#define GUC_LOG_DEBUG_SHIFT 6
-#define GUC_LOG_DEBUG_MASK (0xF << GUC_LOG_DEBUG_SHIFT)
-#define GUC_LOG_CAPTURE_SHIFT 10
-#define GUC_LOG_CAPTURE_MASK (0x3 << GUC_LOG_CAPTURE_SHIFT)
-#define GUC_LOG_BUF_ADDR_SHIFT 12
+#define GUC_LOG_CRASH REG_GENMASK(5, 4)
+#define GUC_LOG_DEBUG REG_GENMASK(9, 6)
+#define GUC_LOG_CAPTURE REG_GENMASK(11, 10)
+#define GUC_LOG_BUF_ADDR REG_GENMASK(31, 12)
#define GUC_CTL_WA 1
#define GUC_WA_GAM_CREDITS BIT(10)
@@ -103,28 +107,23 @@ struct guc_update_exec_queue_policy {
#define GUC_WA_RENDER_RST_RC6_EXIT BIT(19)
#define GUC_WA_RCS_REGS_IN_CCS_REGS_LIST BIT(21)
#define GUC_WA_ENABLE_TSC_CHECK_ON_RC6 BIT(22)
+#define GUC_WA_SAVE_RESTORE_MCFG_REG_AT_MC6 BIT(25)
#define GUC_CTL_FEATURE 2
#define GUC_CTL_ENABLE_SLPC BIT(2)
#define GUC_CTL_ENABLE_LITE_RESTORE BIT(4)
+#define GUC_CTL_ENABLE_PSMI_LOGGING BIT(7)
#define GUC_CTL_DISABLE_SCHEDULER BIT(14)
#define GUC_CTL_DEBUG 3
-#define GUC_LOG_VERBOSITY_SHIFT 0
-#define GUC_LOG_VERBOSITY_LOW (0 << GUC_LOG_VERBOSITY_SHIFT)
-#define GUC_LOG_VERBOSITY_MED (1 << GUC_LOG_VERBOSITY_SHIFT)
-#define GUC_LOG_VERBOSITY_HIGH (2 << GUC_LOG_VERBOSITY_SHIFT)
-#define GUC_LOG_VERBOSITY_ULTRA (3 << GUC_LOG_VERBOSITY_SHIFT)
-#define GUC_LOG_VERBOSITY_MIN 0
+#define GUC_LOG_VERBOSITY REG_GENMASK(1, 0)
#define GUC_LOG_VERBOSITY_MAX 3
-#define GUC_LOG_VERBOSITY_MASK 0x0000000f
-#define GUC_LOG_DESTINATION_MASK (3 << 4)
-#define GUC_LOG_DISABLED (1 << 6)
-#define GUC_PROFILE_ENABLED (1 << 7)
+#define GUC_LOG_DESTINATION REG_GENMASK(5, 4)
+#define GUC_LOG_DISABLED BIT(6)
+#define GUC_PROFILE_ENABLED BIT(7)
#define GUC_CTL_ADS 4
-#define GUC_ADS_ADDR_SHIFT 1
-#define GUC_ADS_ADDR_MASK (0xFFFFF << GUC_ADS_ADDR_SHIFT)
+#define GUC_ADS_ADDR REG_GENMASK(21, 1)
#define GUC_CTL_DEVID 5
diff --git a/drivers/gpu/drm/xe/xe_guc_log.h b/drivers/gpu/drm/xe/xe_guc_log.h
index f1e2b0be90a9..98a47ac42b08 100644
--- a/drivers/gpu/drm/xe/xe_guc_log.h
+++ b/drivers/gpu/drm/xe/xe_guc_log.h
@@ -17,7 +17,7 @@ struct xe_device;
#define DEBUG_BUFFER_SIZE SZ_8M
#define CAPTURE_BUFFER_SIZE SZ_2M
#else
-#define CRASH_BUFFER_SIZE SZ_8K
+#define CRASH_BUFFER_SIZE SZ_16K
#define DEBUG_BUFFER_SIZE SZ_64K
#define CAPTURE_BUFFER_SIZE SZ_1M
#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c
index 68b192fe3b32..53fdf59524c4 100644
--- a/drivers/gpu/drm/xe/xe_guc_pc.c
+++ b/drivers/gpu/drm/xe/xe_guc_pc.c
@@ -79,6 +79,11 @@
* Xe driver enables SLPC with all of its defaults features and frequency
* selection, which varies per platform.
*
+ * Power profiles add another level of control to SLPC. When power saving
+ * profile is chosen, SLPC will use conservative thresholds to ramp frequency,
+ * thus saving power. Base profile is default and ensures balanced performance
+ * for any workload.
+ *
* Render-C States:
* ================
*
@@ -722,7 +727,7 @@ static int xe_guc_pc_set_max_freq_locked(struct xe_guc_pc *pc, u32 freq)
*/
int xe_guc_pc_set_max_freq(struct xe_guc_pc *pc, u32 freq)
{
- if (XE_WA(pc_to_gt(pc), 22019338487)) {
+ if (XE_GT_WA(pc_to_gt(pc), 22019338487)) {
if (wait_for_flush_complete(pc) != 0)
return -EAGAIN;
}
@@ -835,7 +840,7 @@ static u32 pc_max_freq_cap(struct xe_guc_pc *pc)
{
struct xe_gt *gt = pc_to_gt(pc);
- if (XE_WA(gt, 22019338487)) {
+ if (XE_GT_WA(gt, 22019338487)) {
if (xe_gt_is_media_type(gt))
return min(LNL_MERT_FREQ_CAP, pc->rp0_freq);
else
@@ -899,7 +904,7 @@ static int pc_adjust_freq_bounds(struct xe_guc_pc *pc)
if (pc_get_min_freq(pc) > pc->rp0_freq)
ret = pc_set_min_freq(pc, pc->rp0_freq);
- if (XE_WA(tile->primary_gt, 14022085890))
+ if (XE_GT_WA(tile->primary_gt, 14022085890))
ret = pc_set_min_freq(pc, max(BMG_MIN_FREQ, pc_get_min_freq(pc)));
out:
@@ -931,7 +936,7 @@ static bool needs_flush_freq_limit(struct xe_guc_pc *pc)
{
struct xe_gt *gt = pc_to_gt(pc);
- return XE_WA(gt, 22019338487) &&
+ return XE_GT_WA(gt, 22019338487) &&
pc->rp0_freq > BMG_MERT_FLUSH_FREQ_CAP;
}
@@ -1017,7 +1022,7 @@ static int pc_set_mert_freq_cap(struct xe_guc_pc *pc)
{
int ret;
- if (!XE_WA(pc_to_gt(pc), 22019338487))
+ if (!XE_GT_WA(pc_to_gt(pc), 22019338487))
return 0;
guard(mutex)(&pc->freq_lock);
@@ -1076,7 +1081,6 @@ int xe_guc_pc_gucrc_disable(struct xe_guc_pc *pc)
{
struct xe_device *xe = pc_to_xe(pc);
struct xe_gt *gt = pc_to_gt(pc);
- unsigned int fw_ref;
int ret = 0;
if (xe->info.skip_guc_pc)
@@ -1086,17 +1090,7 @@ int xe_guc_pc_gucrc_disable(struct xe_guc_pc *pc)
if (ret)
return ret;
- fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
- if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
- xe_force_wake_put(gt_to_fw(gt), fw_ref);
- return -ETIMEDOUT;
- }
-
- xe_gt_idle_disable_c6(gt);
-
- xe_force_wake_put(gt_to_fw(gt), fw_ref);
-
- return 0;
+ return xe_gt_idle_disable_c6(gt);
}
/**
@@ -1182,6 +1176,61 @@ static int pc_action_set_strategy(struct xe_guc_pc *pc, u32 val)
return ret;
}
+static const char *power_profile_to_string(struct xe_guc_pc *pc)
+{
+ switch (pc->power_profile) {
+ case SLPC_POWER_PROFILE_BASE:
+ return "base";
+ case SLPC_POWER_PROFILE_POWER_SAVING:
+ return "power_saving";
+ default:
+ return "invalid";
+ }
+}
+
+void xe_guc_pc_get_power_profile(struct xe_guc_pc *pc, char *profile)
+{
+ switch (pc->power_profile) {
+ case SLPC_POWER_PROFILE_BASE:
+ sprintf(profile, "[%s] %s\n", "base", "power_saving");
+ break;
+ case SLPC_POWER_PROFILE_POWER_SAVING:
+ sprintf(profile, "%s [%s]\n", "base", "power_saving");
+ break;
+ default:
+ sprintf(profile, "invalid");
+ }
+}
+
+int xe_guc_pc_set_power_profile(struct xe_guc_pc *pc, const char *buf)
+{
+ int ret = 0;
+ u32 val;
+
+ if (strncmp("base", buf, strlen("base")) == 0)
+ val = SLPC_POWER_PROFILE_BASE;
+ else if (strncmp("power_saving", buf, strlen("power_saving")) == 0)
+ val = SLPC_POWER_PROFILE_POWER_SAVING;
+ else
+ return -EINVAL;
+
+ guard(mutex)(&pc->freq_lock);
+ xe_pm_runtime_get_noresume(pc_to_xe(pc));
+
+ ret = pc_action_set_param(pc,
+ SLPC_PARAM_POWER_PROFILE,
+ val);
+ if (ret)
+ xe_gt_err_once(pc_to_gt(pc), "Failed to set power profile to %d: %pe\n",
+ val, ERR_PTR(ret));
+ else
+ pc->power_profile = val;
+
+ xe_pm_runtime_put(pc_to_xe(pc));
+
+ return ret;
+}
+
/**
* xe_guc_pc_start - Start GuC's Power Conservation component
* @pc: Xe_GuC_PC instance
@@ -1260,6 +1309,11 @@ int xe_guc_pc_start(struct xe_guc_pc *pc)
/* Enable SLPC Optimized Strategy for compute */
ret = pc_action_set_strategy(pc, SLPC_OPTIMIZED_STRATEGY_COMPUTE);
+ /* Set cached value of power_profile */
+ ret = xe_guc_pc_set_power_profile(pc, power_profile_to_string(pc));
+ if (unlikely(ret))
+ xe_gt_err(gt, "Failed to set SLPC power profile: %pe\n", ERR_PTR(ret));
+
out:
xe_force_wake_put(gt_to_fw(gt), fw_ref);
return ret;
@@ -1338,6 +1392,8 @@ int xe_guc_pc_init(struct xe_guc_pc *pc)
pc->bo = bo;
+ pc->power_profile = SLPC_POWER_PROFILE_BASE;
+
return devm_add_action_or_reset(xe->drm.dev, xe_guc_pc_fini_hw, pc);
}
diff --git a/drivers/gpu/drm/xe/xe_guc_pc.h b/drivers/gpu/drm/xe/xe_guc_pc.h
index 52ecdd5ddbff..0e31396f103c 100644
--- a/drivers/gpu/drm/xe/xe_guc_pc.h
+++ b/drivers/gpu/drm/xe/xe_guc_pc.h
@@ -31,6 +31,8 @@ int xe_guc_pc_get_min_freq(struct xe_guc_pc *pc, u32 *freq);
int xe_guc_pc_set_min_freq(struct xe_guc_pc *pc, u32 freq);
int xe_guc_pc_get_max_freq(struct xe_guc_pc *pc, u32 *freq);
int xe_guc_pc_set_max_freq(struct xe_guc_pc *pc, u32 freq);
+int xe_guc_pc_set_power_profile(struct xe_guc_pc *pc, const char *buf);
+void xe_guc_pc_get_power_profile(struct xe_guc_pc *pc, char *profile);
enum xe_gt_idle_state xe_guc_pc_c_status(struct xe_guc_pc *pc);
u64 xe_guc_pc_rc6_residency(struct xe_guc_pc *pc);
diff --git a/drivers/gpu/drm/xe/xe_guc_pc_types.h b/drivers/gpu/drm/xe/xe_guc_pc_types.h
index c02053948a57..5e4ea53fbee6 100644
--- a/drivers/gpu/drm/xe/xe_guc_pc_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_pc_types.h
@@ -37,6 +37,8 @@ struct xe_guc_pc {
struct mutex freq_lock;
/** @freq_ready: Only handle freq changes, if they are really ready */
bool freq_ready;
+ /** @power_profile: Base or power_saving profile */
+ u32 power_profile;
};
#endif /* _XE_GUC_PC_TYPES_H_ */
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index cafb47711e9b..53024eb5670b 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -32,6 +32,7 @@
#include "xe_guc_ct.h"
#include "xe_guc_exec_queue_types.h"
#include "xe_guc_id_mgr.h"
+#include "xe_guc_klv_helpers.h"
#include "xe_guc_submit_types.h"
#include "xe_hw_engine.h"
#include "xe_hw_fence.h"
@@ -316,6 +317,71 @@ int xe_guc_submit_init(struct xe_guc *guc, unsigned int num_ids)
return drmm_add_action_or_reset(&xe->drm, guc_submit_fini, guc);
}
+/*
+ * Given that we want to guarantee enough RCS throughput to avoid missing
+ * frames, we set the yield policy to 20% of each 80ms interval.
+ */
+#define RC_YIELD_DURATION 80 /* in ms */
+#define RC_YIELD_RATIO 20 /* in percent */
+static u32 *emit_render_compute_yield_klv(u32 *emit)
+{
+ *emit++ = PREP_GUC_KLV_TAG(SCHEDULING_POLICIES_RENDER_COMPUTE_YIELD);
+ *emit++ = RC_YIELD_DURATION;
+ *emit++ = RC_YIELD_RATIO;
+
+ return emit;
+}
+
+#define SCHEDULING_POLICY_MAX_DWORDS 16
+static int guc_init_global_schedule_policy(struct xe_guc *guc)
+{
+ u32 data[SCHEDULING_POLICY_MAX_DWORDS];
+ u32 *emit = data;
+ u32 count = 0;
+ int ret;
+
+ if (GUC_SUBMIT_VER(guc) < MAKE_GUC_VER(1, 1, 0))
+ return 0;
+
+ *emit++ = XE_GUC_ACTION_UPDATE_SCHEDULING_POLICIES_KLV;
+
+ if (CCS_MASK(guc_to_gt(guc)))
+ emit = emit_render_compute_yield_klv(emit);
+
+ count = emit - data;
+ if (count > 1) {
+ xe_assert(guc_to_xe(guc), count <= SCHEDULING_POLICY_MAX_DWORDS);
+
+ ret = xe_guc_ct_send_block(&guc->ct, data, count);
+ if (ret < 0) {
+ xe_gt_err(guc_to_gt(guc),
+ "failed to enable GuC scheduling policies: %pe\n",
+ ERR_PTR(ret));
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int xe_guc_submit_enable(struct xe_guc *guc)
+{
+ int ret;
+
+ ret = guc_init_global_schedule_policy(guc);
+ if (ret)
+ return ret;
+
+ guc->submission_state.enabled = true;
+
+ return 0;
+}
+
+void xe_guc_submit_disable(struct xe_guc *guc)
+{
+ guc->submission_state.enabled = false;
+}
+
static void __release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q, u32 xa_count)
{
int i;
@@ -542,7 +608,7 @@ static void __register_exec_queue(struct xe_guc *guc,
xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action), 0, 0);
}
-static void register_exec_queue(struct xe_exec_queue *q)
+static void register_exec_queue(struct xe_exec_queue *q, int ctx_type)
{
struct xe_guc *guc = exec_queue_to_guc(q);
struct xe_device *xe = guc_to_xe(guc);
@@ -550,6 +616,7 @@ static void register_exec_queue(struct xe_exec_queue *q)
struct guc_ctxt_registration_info info;
xe_gt_assert(guc_to_gt(guc), !exec_queue_registered(q));
+ xe_gt_assert(guc_to_gt(guc), ctx_type < GUC_CONTEXT_COUNT);
memset(&info, 0, sizeof(info));
info.context_idx = q->guc->id;
@@ -557,7 +624,8 @@ static void register_exec_queue(struct xe_exec_queue *q)
info.engine_submit_mask = q->logical_mask;
info.hwlrca_lo = lower_32_bits(xe_lrc_descriptor(lrc));
info.hwlrca_hi = upper_32_bits(xe_lrc_descriptor(lrc));
- info.flags = CONTEXT_REGISTRATION_FLAG_KMD;
+ info.flags = CONTEXT_REGISTRATION_FLAG_KMD |
+ FIELD_PREP(CONTEXT_REGISTRATION_FLAG_TYPE, ctx_type);
if (xe_exec_queue_is_parallel(q)) {
u64 ggtt_addr = xe_lrc_parallel_ggtt_addr(lrc);
@@ -667,12 +735,18 @@ static void wq_item_append(struct xe_exec_queue *q)
if (wq_wait_for_space(q, wqi_size))
return;
+ xe_gt_assert(guc_to_gt(guc), i == XE_GUC_CONTEXT_WQ_HEADER_DATA_0_TYPE_LEN);
wqi[i++] = FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_MULTI_LRC) |
FIELD_PREP(WQ_LEN_MASK, len_dw);
+ xe_gt_assert(guc_to_gt(guc), i == XE_GUC_CONTEXT_WQ_EL_INFO_DATA_1_CTX_DESC_LOW);
wqi[i++] = xe_lrc_descriptor(q->lrc[0]);
+ xe_gt_assert(guc_to_gt(guc), i ==
+ XE_GUC_CONTEXT_WQ_EL_INFO_DATA_2_GUCCTX_RINGTAIL_FREEZEPOCS);
wqi[i++] = FIELD_PREP(WQ_GUC_ID_MASK, q->guc->id) |
FIELD_PREP(WQ_RING_TAIL_MASK, q->lrc[0]->ring.tail / sizeof(u64));
+ xe_gt_assert(guc_to_gt(guc), i == XE_GUC_CONTEXT_WQ_EL_INFO_DATA_3_WI_FENCE_ID);
wqi[i++] = 0;
+ xe_gt_assert(guc_to_gt(guc), i == XE_GUC_CONTEXT_WQ_EL_CHILD_LIST_DATA_4_RINGTAIL);
for (j = 1; j < q->width; ++j) {
struct xe_lrc *lrc = q->lrc[j];
@@ -693,6 +767,50 @@ static void wq_item_append(struct xe_exec_queue *q)
parallel_write(xe, map, wq_desc.tail, q->guc->wqi_tail);
}
+static int wq_items_rebase(struct xe_exec_queue *q)
+{
+ struct xe_guc *guc = exec_queue_to_guc(q);
+ struct xe_device *xe = guc_to_xe(guc);
+ struct iosys_map map = xe_lrc_parallel_map(q->lrc[0]);
+ int i = q->guc->wqi_head;
+
+ /* the ring starts after a header struct */
+ iosys_map_incr(&map, offsetof(struct guc_submit_parallel_scratch, wq[0]));
+
+ while ((i % WQ_SIZE) != (q->guc->wqi_tail % WQ_SIZE)) {
+ u32 len_dw, type, val;
+
+ if (drm_WARN_ON_ONCE(&xe->drm, i < 0 || i > 2 * WQ_SIZE))
+ break;
+
+ val = xe_map_rd_ring_u32(xe, &map, i / sizeof(u32) +
+ XE_GUC_CONTEXT_WQ_HEADER_DATA_0_TYPE_LEN,
+ WQ_SIZE / sizeof(u32));
+ len_dw = FIELD_GET(WQ_LEN_MASK, val);
+ type = FIELD_GET(WQ_TYPE_MASK, val);
+
+ if (drm_WARN_ON_ONCE(&xe->drm, len_dw >= WQ_SIZE / sizeof(u32)))
+ break;
+
+ if (type == WQ_TYPE_MULTI_LRC) {
+ val = xe_lrc_descriptor(q->lrc[0]);
+ xe_map_wr_ring_u32(xe, &map, i / sizeof(u32) +
+ XE_GUC_CONTEXT_WQ_EL_INFO_DATA_1_CTX_DESC_LOW,
+ WQ_SIZE / sizeof(u32), val);
+ } else if (drm_WARN_ON_ONCE(&xe->drm, type != WQ_TYPE_NOOP)) {
+ break;
+ }
+
+ i += (len_dw + 1) * sizeof(u32);
+ }
+
+ if ((i % WQ_SIZE) != (q->guc->wqi_tail % WQ_SIZE)) {
+ xe_gt_err(q->gt, "Exec queue fixups incomplete - wqi parse failed\n");
+ return -EBADMSG;
+ }
+ return 0;
+}
+
#define RESUME_PENDING ~0x0ull
static void submit_exec_queue(struct xe_exec_queue *q)
{
@@ -761,7 +879,7 @@ guc_exec_queue_run_job(struct drm_sched_job *drm_job)
if (!exec_queue_killed_or_banned_or_wedged(q) && !xe_sched_job_is_error(job)) {
if (!exec_queue_registered(q))
- register_exec_queue(q);
+ register_exec_queue(q, GUC_CONTEXT_NORMAL);
if (!lr) /* LR jobs are emitted in the exec IOCTL */
q->ring_ops->emit_job(job);
submit_exec_queue(q);
@@ -777,6 +895,30 @@ guc_exec_queue_run_job(struct drm_sched_job *drm_job)
return fence;
}
+/**
+ * xe_guc_jobs_ring_rebase - Re-emit ring commands of requests pending
+ * on all queues under a guc.
+ * @guc: the &xe_guc struct instance
+ */
+void xe_guc_jobs_ring_rebase(struct xe_guc *guc)
+{
+ struct xe_exec_queue *q;
+ unsigned long index;
+
+ /*
+ * This routine is used within VF migration recovery. This means
+ * using the lock here introduces a restriction: we cannot wait
+ * for any GFX HW response while the lock is taken.
+ */
+ mutex_lock(&guc->submission_state.lock);
+ xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
+ if (exec_queue_killed_or_banned_or_wedged(q))
+ continue;
+ xe_exec_queue_jobs_ring_restore(q);
+ }
+ mutex_unlock(&guc->submission_state.lock);
+}
+
static void guc_exec_queue_free_job(struct drm_sched_job *drm_job)
{
struct xe_sched_job *job = to_xe_sched_job(drm_job);
@@ -1277,48 +1419,57 @@ rearm:
return DRM_GPU_SCHED_STAT_NO_HANG;
}
-static void __guc_exec_queue_fini_async(struct work_struct *w)
+static void guc_exec_queue_fini(struct xe_exec_queue *q)
+{
+ struct xe_guc_exec_queue *ge = q->guc;
+ struct xe_guc *guc = exec_queue_to_guc(q);
+
+ release_guc_id(guc, q);
+ xe_sched_entity_fini(&ge->entity);
+ xe_sched_fini(&ge->sched);
+
+ /*
+ * RCU free due sched being exported via DRM scheduler fences
+ * (timeline name).
+ */
+ kfree_rcu(ge, rcu);
+}
+
+static void __guc_exec_queue_destroy_async(struct work_struct *w)
{
struct xe_guc_exec_queue *ge =
- container_of(w, struct xe_guc_exec_queue, fini_async);
+ container_of(w, struct xe_guc_exec_queue, destroy_async);
struct xe_exec_queue *q = ge->q;
struct xe_guc *guc = exec_queue_to_guc(q);
xe_pm_runtime_get(guc_to_xe(guc));
trace_xe_exec_queue_destroy(q);
- release_guc_id(guc, q);
if (xe_exec_queue_is_lr(q))
cancel_work_sync(&ge->lr_tdr);
/* Confirm no work left behind accessing device structures */
cancel_delayed_work_sync(&ge->sched.base.work_tdr);
- xe_sched_entity_fini(&ge->entity);
- xe_sched_fini(&ge->sched);
- /*
- * RCU free due sched being exported via DRM scheduler fences
- * (timeline name).
- */
- kfree_rcu(ge, rcu);
xe_exec_queue_fini(q);
+
xe_pm_runtime_put(guc_to_xe(guc));
}
-static void guc_exec_queue_fini_async(struct xe_exec_queue *q)
+static void guc_exec_queue_destroy_async(struct xe_exec_queue *q)
{
struct xe_guc *guc = exec_queue_to_guc(q);
struct xe_device *xe = guc_to_xe(guc);
- INIT_WORK(&q->guc->fini_async, __guc_exec_queue_fini_async);
+ INIT_WORK(&q->guc->destroy_async, __guc_exec_queue_destroy_async);
/* We must block on kernel engines so slabs are empty on driver unload */
if (q->flags & EXEC_QUEUE_FLAG_PERMANENT || exec_queue_wedged(q))
- __guc_exec_queue_fini_async(&q->guc->fini_async);
+ __guc_exec_queue_destroy_async(&q->guc->destroy_async);
else
- queue_work(xe->destroy_wq, &q->guc->fini_async);
+ queue_work(xe->destroy_wq, &q->guc->destroy_async);
}
-static void __guc_exec_queue_fini(struct xe_guc *guc, struct xe_exec_queue *q)
+static void __guc_exec_queue_destroy(struct xe_guc *guc, struct xe_exec_queue *q)
{
/*
* Might be done from within the GPU scheduler, need to do async as we
@@ -1327,7 +1478,7 @@ static void __guc_exec_queue_fini(struct xe_guc *guc, struct xe_exec_queue *q)
* this we and don't really care when everything is fini'd, just that it
* is.
*/
- guc_exec_queue_fini_async(q);
+ guc_exec_queue_destroy_async(q);
}
static void __guc_exec_queue_process_msg_cleanup(struct xe_sched_msg *msg)
@@ -1341,7 +1492,7 @@ static void __guc_exec_queue_process_msg_cleanup(struct xe_sched_msg *msg)
if (exec_queue_registered(q))
disable_scheduling_deregister(guc, q);
else
- __guc_exec_queue_fini(guc, q);
+ __guc_exec_queue_destroy(guc, q);
}
static bool guc_exec_queue_allowed_to_change_state(struct xe_exec_queue *q)
@@ -1574,14 +1725,14 @@ static bool guc_exec_queue_try_add_msg(struct xe_exec_queue *q,
#define STATIC_MSG_CLEANUP 0
#define STATIC_MSG_SUSPEND 1
#define STATIC_MSG_RESUME 2
-static void guc_exec_queue_fini(struct xe_exec_queue *q)
+static void guc_exec_queue_destroy(struct xe_exec_queue *q)
{
struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_CLEANUP;
if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && !exec_queue_wedged(q))
guc_exec_queue_add_msg(q, msg, CLEANUP);
else
- __guc_exec_queue_fini(exec_queue_to_guc(q), q);
+ __guc_exec_queue_destroy(exec_queue_to_guc(q), q);
}
static int guc_exec_queue_set_priority(struct xe_exec_queue *q,
@@ -1711,6 +1862,7 @@ static const struct xe_exec_queue_ops guc_exec_queue_ops = {
.init = guc_exec_queue_init,
.kill = guc_exec_queue_kill,
.fini = guc_exec_queue_fini,
+ .destroy = guc_exec_queue_destroy,
.set_priority = guc_exec_queue_set_priority,
.set_timeslice = guc_exec_queue_set_timeslice,
.set_preempt_timeout = guc_exec_queue_set_preempt_timeout,
@@ -1732,7 +1884,7 @@ static void guc_exec_queue_stop(struct xe_guc *guc, struct xe_exec_queue *q)
if (exec_queue_extra_ref(q) || xe_exec_queue_is_lr(q))
xe_exec_queue_put(q);
else if (exec_queue_destroyed(q))
- __guc_exec_queue_fini(guc, q);
+ __guc_exec_queue_destroy(guc, q);
}
if (q->guc->suspend_pending) {
set_exec_queue_suspended(q);
@@ -1773,6 +1925,43 @@ static void guc_exec_queue_stop(struct xe_guc *guc, struct xe_exec_queue *q)
}
}
+/**
+ * xe_guc_submit_reset_block - Disallow reset calls on given GuC.
+ * @guc: the &xe_guc struct instance
+ */
+int xe_guc_submit_reset_block(struct xe_guc *guc)
+{
+ return atomic_fetch_or(1, &guc->submission_state.reset_blocked);
+}
+
+/**
+ * xe_guc_submit_reset_unblock - Allow back reset calls on given GuC.
+ * @guc: the &xe_guc struct instance
+ */
+void xe_guc_submit_reset_unblock(struct xe_guc *guc)
+{
+ atomic_set_release(&guc->submission_state.reset_blocked, 0);
+ wake_up_all(&guc->ct.wq);
+}
+
+static int guc_submit_reset_is_blocked(struct xe_guc *guc)
+{
+ return atomic_read_acquire(&guc->submission_state.reset_blocked);
+}
+
+/* Maximum time of blocking reset */
+#define RESET_BLOCK_PERIOD_MAX (HZ * 5)
+
+/**
+ * xe_guc_wait_reset_unblock - Wait until reset blocking flag is lifted, or timeout.
+ * @guc: the &xe_guc struct instance
+ */
+int xe_guc_wait_reset_unblock(struct xe_guc *guc)
+{
+ return wait_event_timeout(guc->ct.wq,
+ !guc_submit_reset_is_blocked(guc), RESET_BLOCK_PERIOD_MAX);
+}
+
int xe_guc_submit_reset_prepare(struct xe_guc *guc)
{
int ret;
@@ -1826,6 +2015,19 @@ void xe_guc_submit_stop(struct xe_guc *guc)
}
+/**
+ * xe_guc_submit_pause - Stop further runs of submission tasks on given GuC.
+ * @guc: the &xe_guc struct instance whose scheduler is to be disabled
+ */
+void xe_guc_submit_pause(struct xe_guc *guc)
+{
+ struct xe_exec_queue *q;
+ unsigned long index;
+
+ xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
+ xe_sched_submission_stop_async(&q->guc->sched);
+}
+
static void guc_exec_queue_start(struct xe_exec_queue *q)
{
struct xe_gpu_scheduler *sched = &q->guc->sched;
@@ -1866,6 +2068,28 @@ int xe_guc_submit_start(struct xe_guc *guc)
return 0;
}
+static void guc_exec_queue_unpause(struct xe_exec_queue *q)
+{
+ struct xe_gpu_scheduler *sched = &q->guc->sched;
+
+ xe_sched_submission_start(sched);
+}
+
+/**
+ * xe_guc_submit_unpause - Allow further runs of submission tasks on given GuC.
+ * @guc: the &xe_guc struct instance whose scheduler is to be enabled
+ */
+void xe_guc_submit_unpause(struct xe_guc *guc)
+{
+ struct xe_exec_queue *q;
+ unsigned long index;
+
+ xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
+ guc_exec_queue_unpause(q);
+
+ wake_up_all(&guc->ct.wq);
+}
+
static struct xe_exec_queue *
g2h_exec_queue_lookup(struct xe_guc *guc, u32 guc_id)
{
@@ -1879,7 +2103,7 @@ g2h_exec_queue_lookup(struct xe_guc *guc, u32 guc_id)
q = xa_load(&guc->submission_state.exec_queue_lookup, guc_id);
if (unlikely(!q)) {
- xe_gt_err(gt, "Not engine present for guc_id %u\n", guc_id);
+ xe_gt_err(gt, "No exec queue found for guc_id %u\n", guc_id);
return NULL;
}
@@ -1989,7 +2213,7 @@ static void handle_deregister_done(struct xe_guc *guc, struct xe_exec_queue *q)
if (exec_queue_extra_ref(q) || xe_exec_queue_is_lr(q))
xe_exec_queue_put(q);
else
- __guc_exec_queue_fini(guc, q);
+ __guc_exec_queue_destroy(guc, q);
}
int xe_guc_deregister_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
@@ -2378,6 +2602,34 @@ static void guc_exec_queue_print(struct xe_exec_queue *q, struct drm_printer *p)
}
/**
+ * xe_guc_register_vf_exec_queue - Register exec queue for a given context type.
+ * @q: Execution queue
+ * @ctx_type: Type of the context
+ *
+ * This function registers the execution queue with the guc. Special context
+ * types like GUC_CONTEXT_COMPRESSION_SAVE and GUC_CONTEXT_COMPRESSION_RESTORE
+ * are only applicable for IGPU and in the VF.
+ * Submits the execution queue to GUC after registering it.
+ *
+ * Returns - None.
+ */
+void xe_guc_register_vf_exec_queue(struct xe_exec_queue *q, int ctx_type)
+{
+ struct xe_guc *guc = exec_queue_to_guc(q);
+ struct xe_device *xe = guc_to_xe(guc);
+ struct xe_gt *gt = guc_to_gt(guc);
+
+ xe_gt_assert(gt, IS_SRIOV_VF(xe));
+ xe_gt_assert(gt, !IS_DGFX(xe));
+ xe_gt_assert(gt, ctx_type == GUC_CONTEXT_COMPRESSION_SAVE ||
+ ctx_type == GUC_CONTEXT_COMPRESSION_RESTORE);
+ xe_gt_assert(gt, GUC_SUBMIT_VER(guc) >= MAKE_GUC_VER(1, 23, 0));
+
+ register_exec_queue(q, ctx_type);
+ enable_scheduling(q);
+}
+
+/**
* xe_guc_submit_print - GuC Submit Print.
* @guc: GuC.
* @p: drm_printer where it will be printed out.
@@ -2397,3 +2649,32 @@ void xe_guc_submit_print(struct xe_guc *guc, struct drm_printer *p)
guc_exec_queue_print(q, p);
mutex_unlock(&guc->submission_state.lock);
}
+
+/**
+ * xe_guc_contexts_hwsp_rebase - Re-compute GGTT references within all
+ * exec queues registered to given GuC.
+ * @guc: the &xe_guc struct instance
+ * @scratch: scratch buffer to be used as temporary storage
+ *
+ * Returns: zero on success, negative error code on failure.
+ */
+int xe_guc_contexts_hwsp_rebase(struct xe_guc *guc, void *scratch)
+{
+ struct xe_exec_queue *q;
+ unsigned long index;
+ int err = 0;
+
+ mutex_lock(&guc->submission_state.lock);
+ xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
+ err = xe_exec_queue_contexts_hwsp_rebase(q, scratch);
+ if (err)
+ break;
+ if (xe_exec_queue_is_parallel(q))
+ err = wq_items_rebase(q);
+ if (err)
+ break;
+ }
+ mutex_unlock(&guc->submission_state.lock);
+
+ return err;
+}
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.h b/drivers/gpu/drm/xe/xe_guc_submit.h
index 9b71a986c6ca..78c3f07e31a0 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.h
+++ b/drivers/gpu/drm/xe/xe_guc_submit.h
@@ -13,11 +13,18 @@ struct xe_exec_queue;
struct xe_guc;
int xe_guc_submit_init(struct xe_guc *guc, unsigned int num_ids);
+int xe_guc_submit_enable(struct xe_guc *guc);
+void xe_guc_submit_disable(struct xe_guc *guc);
int xe_guc_submit_reset_prepare(struct xe_guc *guc);
void xe_guc_submit_reset_wait(struct xe_guc *guc);
void xe_guc_submit_stop(struct xe_guc *guc);
int xe_guc_submit_start(struct xe_guc *guc);
+void xe_guc_submit_pause(struct xe_guc *guc);
+void xe_guc_submit_unpause(struct xe_guc *guc);
+int xe_guc_submit_reset_block(struct xe_guc *guc);
+void xe_guc_submit_reset_unblock(struct xe_guc *guc);
+int xe_guc_wait_reset_unblock(struct xe_guc *guc);
void xe_guc_submit_wedge(struct xe_guc *guc);
int xe_guc_read_stopped(struct xe_guc *guc);
@@ -29,6 +36,8 @@ int xe_guc_exec_queue_memory_cat_error_handler(struct xe_guc *guc, u32 *msg,
int xe_guc_exec_queue_reset_failure_handler(struct xe_guc *guc, u32 *msg, u32 len);
int xe_guc_error_capture_handler(struct xe_guc *guc, u32 *msg, u32 len);
+void xe_guc_jobs_ring_rebase(struct xe_guc *guc);
+
struct xe_guc_submit_exec_queue_snapshot *
xe_guc_exec_queue_snapshot_capture(struct xe_exec_queue *q);
void
@@ -39,5 +48,8 @@ xe_guc_exec_queue_snapshot_print(struct xe_guc_submit_exec_queue_snapshot *snaps
void
xe_guc_exec_queue_snapshot_free(struct xe_guc_submit_exec_queue_snapshot *snapshot);
void xe_guc_submit_print(struct xe_guc *guc, struct drm_printer *p);
+void xe_guc_register_vf_exec_queue(struct xe_exec_queue *q, int ctx_type);
+
+int xe_guc_contexts_hwsp_rebase(struct xe_guc *guc, void *scratch);
#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_tlb_inval.c b/drivers/gpu/drm/xe/xe_guc_tlb_inval.c
new file mode 100644
index 000000000000..6bf2103602f8
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_tlb_inval.c
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include "abi/guc_actions_abi.h"
+
+#include "xe_device.h"
+#include "xe_gt_stats.h"
+#include "xe_gt_types.h"
+#include "xe_guc.h"
+#include "xe_guc_ct.h"
+#include "xe_guc_tlb_inval.h"
+#include "xe_force_wake.h"
+#include "xe_mmio.h"
+#include "xe_tlb_inval.h"
+
+#include "regs/xe_guc_regs.h"
+
+/*
+ * XXX: The seqno algorithm relies on TLB invalidation being processed in order
+ * which they currently are by the GuC, if that changes the algorithm will need
+ * to be updated.
+ */
+
+static int send_tlb_inval(struct xe_guc *guc, const u32 *action, int len)
+{
+ struct xe_gt *gt = guc_to_gt(guc);
+
+ xe_gt_assert(gt, action[1]); /* Seqno */
+
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_TLB_INVAL, 1);
+ return xe_guc_ct_send(&guc->ct, action, len,
+ G2H_LEN_DW_TLB_INVALIDATE, 1);
+}
+
+#define MAKE_INVAL_OP(type) ((type << XE_GUC_TLB_INVAL_TYPE_SHIFT) | \
+ XE_GUC_TLB_INVAL_MODE_HEAVY << XE_GUC_TLB_INVAL_MODE_SHIFT | \
+ XE_GUC_TLB_INVAL_FLUSH_CACHE)
+
+static int send_tlb_inval_all(struct xe_tlb_inval *tlb_inval, u32 seqno)
+{
+ struct xe_guc *guc = tlb_inval->private;
+ u32 action[] = {
+ XE_GUC_ACTION_TLB_INVALIDATION_ALL,
+ seqno,
+ MAKE_INVAL_OP(XE_GUC_TLB_INVAL_FULL),
+ };
+
+ return send_tlb_inval(guc, action, ARRAY_SIZE(action));
+}
+
+static int send_tlb_inval_ggtt(struct xe_tlb_inval *tlb_inval, u32 seqno)
+{
+ struct xe_guc *guc = tlb_inval->private;
+ struct xe_gt *gt = guc_to_gt(guc);
+ struct xe_device *xe = guc_to_xe(guc);
+
+ /*
+ * Returning -ECANCELED in this function is squashed at the caller and
+ * signals waiters.
+ */
+
+ if (xe_guc_ct_enabled(&guc->ct) && guc->submission_state.enabled) {
+ u32 action[] = {
+ XE_GUC_ACTION_TLB_INVALIDATION,
+ seqno,
+ MAKE_INVAL_OP(XE_GUC_TLB_INVAL_GUC),
+ };
+
+ return send_tlb_inval(guc, action, ARRAY_SIZE(action));
+ } else if (xe_device_uc_enabled(xe) && !xe_device_wedged(xe)) {
+ struct xe_mmio *mmio = &gt->mmio;
+ unsigned int fw_ref;
+
+ if (IS_SRIOV_VF(xe))
+ return -ECANCELED;
+
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (xe->info.platform == XE_PVC || GRAPHICS_VER(xe) >= 20) {
+ xe_mmio_write32(mmio, PVC_GUC_TLB_INV_DESC1,
+ PVC_GUC_TLB_INV_DESC1_INVALIDATE);
+ xe_mmio_write32(mmio, PVC_GUC_TLB_INV_DESC0,
+ PVC_GUC_TLB_INV_DESC0_VALID);
+ } else {
+ xe_mmio_write32(mmio, GUC_TLB_INV_CR,
+ GUC_TLB_INV_CR_INVALIDATE);
+ }
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ }
+
+ return -ECANCELED;
+}
+
+/*
+ * Ensure that roundup_pow_of_two(length) doesn't overflow.
+ * Note that roundup_pow_of_two() operates on unsigned long,
+ * not on u64.
+ */
+#define MAX_RANGE_TLB_INVALIDATION_LENGTH (rounddown_pow_of_two(ULONG_MAX))
+
+static int send_tlb_inval_ppgtt(struct xe_tlb_inval *tlb_inval, u32 seqno,
+ u64 start, u64 end, u32 asid)
+{
+#define MAX_TLB_INVALIDATION_LEN 7
+ struct xe_guc *guc = tlb_inval->private;
+ struct xe_gt *gt = guc_to_gt(guc);
+ u32 action[MAX_TLB_INVALIDATION_LEN];
+ u64 length = end - start;
+ int len = 0;
+
+ if (guc_to_xe(guc)->info.force_execlist)
+ return -ECANCELED;
+
+ action[len++] = XE_GUC_ACTION_TLB_INVALIDATION;
+ action[len++] = seqno;
+ if (!gt_to_xe(gt)->info.has_range_tlb_inval ||
+ length > MAX_RANGE_TLB_INVALIDATION_LENGTH) {
+ action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_FULL);
+ } else {
+ u64 orig_start = start;
+ u64 align;
+
+ if (length < SZ_4K)
+ length = SZ_4K;
+
+ /*
+ * We need to invalidate a higher granularity if start address
+ * is not aligned to length. When start is not aligned with
+ * length we need to find the length large enough to create an
+ * address mask covering the required range.
+ */
+ align = roundup_pow_of_two(length);
+ start = ALIGN_DOWN(start, align);
+ end = ALIGN(end, align);
+ length = align;
+ while (start + length < end) {
+ length <<= 1;
+ start = ALIGN_DOWN(orig_start, length);
+ }
+
+ /*
+ * Minimum invalidation size for a 2MB page that the hardware
+ * expects is 16MB
+ */
+ if (length >= SZ_2M) {
+ length = max_t(u64, SZ_16M, length);
+ start = ALIGN_DOWN(orig_start, length);
+ }
+
+ xe_gt_assert(gt, length >= SZ_4K);
+ xe_gt_assert(gt, is_power_of_2(length));
+ xe_gt_assert(gt, !(length & GENMASK(ilog2(SZ_16M) - 1,
+ ilog2(SZ_2M) + 1)));
+ xe_gt_assert(gt, IS_ALIGNED(start, length));
+
+ action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_PAGE_SELECTIVE);
+ action[len++] = asid;
+ action[len++] = lower_32_bits(start);
+ action[len++] = upper_32_bits(start);
+ action[len++] = ilog2(length) - ilog2(SZ_4K);
+ }
+
+ xe_gt_assert(gt, len <= MAX_TLB_INVALIDATION_LEN);
+
+ return send_tlb_inval(guc, action, len);
+}
+
+static bool tlb_inval_initialized(struct xe_tlb_inval *tlb_inval)
+{
+ struct xe_guc *guc = tlb_inval->private;
+
+ return xe_guc_ct_initialized(&guc->ct);
+}
+
+static void tlb_inval_flush(struct xe_tlb_inval *tlb_inval)
+{
+ struct xe_guc *guc = tlb_inval->private;
+
+ LNL_FLUSH_WORK(&guc->ct.g2h_worker);
+}
+
+static long tlb_inval_timeout_delay(struct xe_tlb_inval *tlb_inval)
+{
+ struct xe_guc *guc = tlb_inval->private;
+
+ /* this reflects what HW/GuC needs to process TLB inv request */
+ const long hw_tlb_timeout = HZ / 4;
+
+ /* this estimates actual delay caused by the CTB transport */
+ long delay = xe_guc_ct_queue_proc_time_jiffies(&guc->ct);
+
+ return hw_tlb_timeout + 2 * delay;
+}
+
+static const struct xe_tlb_inval_ops guc_tlb_inval_ops = {
+ .all = send_tlb_inval_all,
+ .ggtt = send_tlb_inval_ggtt,
+ .ppgtt = send_tlb_inval_ppgtt,
+ .initialized = tlb_inval_initialized,
+ .flush = tlb_inval_flush,
+ .timeout_delay = tlb_inval_timeout_delay,
+};
+
+/**
+ * xe_guc_tlb_inval_init_early() - Init GuC TLB invalidation early
+ * @guc: GuC object
+ * @tlb_inval: TLB invalidation client
+ *
+ * Inititialize GuC TLB invalidation by setting back pointer in TLB invalidation
+ * client to the GuC and setting GuC backend ops.
+ */
+void xe_guc_tlb_inval_init_early(struct xe_guc *guc,
+ struct xe_tlb_inval *tlb_inval)
+{
+ tlb_inval->private = guc;
+ tlb_inval->ops = &guc_tlb_inval_ops;
+}
+
+/**
+ * xe_guc_tlb_inval_done_handler() - TLB invalidation done handler
+ * @guc: guc
+ * @msg: message indicating TLB invalidation done
+ * @len: length of message
+ *
+ * Parse seqno of TLB invalidation, wake any waiters for seqno, and signal any
+ * invalidation fences for seqno. Algorithm for this depends on seqno being
+ * received in-order and asserts this assumption.
+ *
+ * Return: 0 on success, -EPROTO for malformed messages.
+ */
+int xe_guc_tlb_inval_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
+{
+ struct xe_gt *gt = guc_to_gt(guc);
+
+ if (unlikely(len != 1))
+ return -EPROTO;
+
+ xe_tlb_inval_done_handler(&gt->tlb_inval, msg[0]);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/xe/xe_guc_tlb_inval.h b/drivers/gpu/drm/xe/xe_guc_tlb_inval.h
new file mode 100644
index 000000000000..07d668b02e3d
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_tlb_inval.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_GUC_TLB_INVAL_H_
+#define _XE_GUC_TLB_INVAL_H_
+
+#include <linux/types.h>
+
+struct xe_guc;
+struct xe_tlb_inval;
+
+void xe_guc_tlb_inval_init_early(struct xe_guc *guc,
+ struct xe_tlb_inval *tlb_inval);
+
+int xe_guc_tlb_inval_done_handler(struct xe_guc *guc, u32 *msg, u32 len);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_types.h b/drivers/gpu/drm/xe/xe_guc_types.h
index 1fde7614fcc5..c7b9642b41ba 100644
--- a/drivers/gpu/drm/xe/xe_guc_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_types.h
@@ -85,6 +85,12 @@ struct xe_guc {
struct xarray exec_queue_lookup;
/** @submission_state.stopped: submissions are stopped */
atomic_t stopped;
+ /**
+ * @submission_state.reset_blocked: reset attempts are blocked;
+ * blocking reset in order to delay it may be required if running
+ * an operation which is sensitive to resets.
+ */
+ atomic_t reset_blocked;
/** @submission_state.lock: protects submission state */
struct mutex lock;
/** @submission_state.enabled: submission is enabled */
diff --git a/drivers/gpu/drm/xe/xe_heci_gsc.c b/drivers/gpu/drm/xe/xe_heci_gsc.c
index 6d7b62724126..a415ca488791 100644
--- a/drivers/gpu/drm/xe/xe_heci_gsc.c
+++ b/drivers/gpu/drm/xe/xe_heci_gsc.c
@@ -197,7 +197,7 @@ int xe_heci_gsc_init(struct xe_device *xe)
if (ret)
return ret;
- if (!def->use_polling && !xe_survivability_mode_is_enabled(xe)) {
+ if (!def->use_polling && !xe_survivability_mode_is_boot_enabled(xe)) {
ret = heci_gsc_irq_setup(xe);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/xe/xe_hmm.c b/drivers/gpu/drm/xe/xe_hmm.c
deleted file mode 100644
index 57b71956ddf4..000000000000
--- a/drivers/gpu/drm/xe/xe_hmm.c
+++ /dev/null
@@ -1,325 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2024 Intel Corporation
- */
-
-#include <linux/scatterlist.h>
-#include <linux/mmu_notifier.h>
-#include <linux/dma-mapping.h>
-#include <linux/memremap.h>
-#include <linux/swap.h>
-#include <linux/hmm.h>
-#include <linux/mm.h>
-#include "xe_hmm.h"
-#include "xe_vm.h"
-#include "xe_bo.h"
-
-static u64 xe_npages_in_range(unsigned long start, unsigned long end)
-{
- return (end - start) >> PAGE_SHIFT;
-}
-
-static int xe_alloc_sg(struct xe_device *xe, struct sg_table *st,
- struct hmm_range *range, struct rw_semaphore *notifier_sem)
-{
- unsigned long i, npages, hmm_pfn;
- unsigned long num_chunks = 0;
- int ret;
-
- /* HMM docs says this is needed. */
- ret = down_read_interruptible(notifier_sem);
- if (ret)
- return ret;
-
- if (mmu_interval_read_retry(range->notifier, range->notifier_seq)) {
- up_read(notifier_sem);
- return -EAGAIN;
- }
-
- npages = xe_npages_in_range(range->start, range->end);
- for (i = 0; i < npages;) {
- unsigned long len;
-
- hmm_pfn = range->hmm_pfns[i];
- xe_assert(xe, hmm_pfn & HMM_PFN_VALID);
-
- len = 1UL << hmm_pfn_to_map_order(hmm_pfn);
-
- /* If order > 0 the page may extend beyond range->start */
- len -= (hmm_pfn & ~HMM_PFN_FLAGS) & (len - 1);
- i += len;
- num_chunks++;
- }
- up_read(notifier_sem);
-
- return sg_alloc_table(st, num_chunks, GFP_KERNEL);
-}
-
-/**
- * xe_build_sg() - build a scatter gather table for all the physical pages/pfn
- * in a hmm_range. dma-map pages if necessary. dma-address is save in sg table
- * and will be used to program GPU page table later.
- * @xe: the xe device who will access the dma-address in sg table
- * @range: the hmm range that we build the sg table from. range->hmm_pfns[]
- * has the pfn numbers of pages that back up this hmm address range.
- * @st: pointer to the sg table.
- * @notifier_sem: The xe notifier lock.
- * @write: whether we write to this range. This decides dma map direction
- * for system pages. If write we map it bi-diretional; otherwise
- * DMA_TO_DEVICE
- *
- * All the contiguous pfns will be collapsed into one entry in
- * the scatter gather table. This is for the purpose of efficiently
- * programming GPU page table.
- *
- * The dma_address in the sg table will later be used by GPU to
- * access memory. So if the memory is system memory, we need to
- * do a dma-mapping so it can be accessed by GPU/DMA.
- *
- * FIXME: This function currently only support pages in system
- * memory. If the memory is GPU local memory (of the GPU who
- * is going to access memory), we need gpu dpa (device physical
- * address), and there is no need of dma-mapping. This is TBD.
- *
- * FIXME: dma-mapping for peer gpu device to access remote gpu's
- * memory. Add this when you support p2p
- *
- * This function allocates the storage of the sg table. It is
- * caller's responsibility to free it calling sg_free_table.
- *
- * Returns 0 if successful; -ENOMEM if fails to allocate memory
- */
-static int xe_build_sg(struct xe_device *xe, struct hmm_range *range,
- struct sg_table *st,
- struct rw_semaphore *notifier_sem,
- bool write)
-{
- unsigned long npages = xe_npages_in_range(range->start, range->end);
- struct device *dev = xe->drm.dev;
- struct scatterlist *sgl;
- struct page *page;
- unsigned long i, j;
-
- lockdep_assert_held(notifier_sem);
-
- i = 0;
- for_each_sg(st->sgl, sgl, st->nents, j) {
- unsigned long hmm_pfn, size;
-
- hmm_pfn = range->hmm_pfns[i];
- page = hmm_pfn_to_page(hmm_pfn);
- xe_assert(xe, !is_device_private_page(page));
-
- size = 1UL << hmm_pfn_to_map_order(hmm_pfn);
- size -= page_to_pfn(page) & (size - 1);
- i += size;
-
- if (unlikely(j == st->nents - 1)) {
- xe_assert(xe, i >= npages);
- if (i > npages)
- size -= (i - npages);
-
- sg_mark_end(sgl);
- } else {
- xe_assert(xe, i < npages);
- }
-
- sg_set_page(sgl, page, size << PAGE_SHIFT, 0);
- }
-
- return dma_map_sgtable(dev, st, write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE,
- DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_NO_KERNEL_MAPPING);
-}
-
-static void xe_hmm_userptr_set_mapped(struct xe_userptr_vma *uvma)
-{
- struct xe_userptr *userptr = &uvma->userptr;
- struct xe_vm *vm = xe_vma_vm(&uvma->vma);
-
- lockdep_assert_held_write(&vm->lock);
- lockdep_assert_held(&vm->userptr.notifier_lock);
-
- mutex_lock(&userptr->unmap_mutex);
- xe_assert(vm->xe, !userptr->mapped);
- userptr->mapped = true;
- mutex_unlock(&userptr->unmap_mutex);
-}
-
-void xe_hmm_userptr_unmap(struct xe_userptr_vma *uvma)
-{
- struct xe_userptr *userptr = &uvma->userptr;
- struct xe_vma *vma = &uvma->vma;
- bool write = !xe_vma_read_only(vma);
- struct xe_vm *vm = xe_vma_vm(vma);
- struct xe_device *xe = vm->xe;
-
- if (!lockdep_is_held_type(&vm->userptr.notifier_lock, 0) &&
- !lockdep_is_held_type(&vm->lock, 0) &&
- !(vma->gpuva.flags & XE_VMA_DESTROYED)) {
- /* Don't unmap in exec critical section. */
- xe_vm_assert_held(vm);
- /* Don't unmap while mapping the sg. */
- lockdep_assert_held(&vm->lock);
- }
-
- mutex_lock(&userptr->unmap_mutex);
- if (userptr->sg && userptr->mapped)
- dma_unmap_sgtable(xe->drm.dev, userptr->sg,
- write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE, 0);
- userptr->mapped = false;
- mutex_unlock(&userptr->unmap_mutex);
-}
-
-/**
- * xe_hmm_userptr_free_sg() - Free the scatter gather table of userptr
- * @uvma: the userptr vma which hold the scatter gather table
- *
- * With function xe_userptr_populate_range, we allocate storage of
- * the userptr sg table. This is a helper function to free this
- * sg table, and dma unmap the address in the table.
- */
-void xe_hmm_userptr_free_sg(struct xe_userptr_vma *uvma)
-{
- struct xe_userptr *userptr = &uvma->userptr;
-
- xe_assert(xe_vma_vm(&uvma->vma)->xe, userptr->sg);
- xe_hmm_userptr_unmap(uvma);
- sg_free_table(userptr->sg);
- userptr->sg = NULL;
-}
-
-/**
- * xe_hmm_userptr_populate_range() - Populate physical pages of a virtual
- * address range
- *
- * @uvma: userptr vma which has information of the range to populate.
- * @is_mm_mmap_locked: True if mmap_read_lock is already acquired by caller.
- *
- * This function populate the physical pages of a virtual
- * address range. The populated physical pages is saved in
- * userptr's sg table. It is similar to get_user_pages but call
- * hmm_range_fault.
- *
- * This function also read mmu notifier sequence # (
- * mmu_interval_read_begin), for the purpose of later
- * comparison (through mmu_interval_read_retry).
- *
- * This must be called with mmap read or write lock held.
- *
- * This function allocates the storage of the userptr sg table.
- * It is caller's responsibility to free it calling sg_free_table.
- *
- * returns: 0 for success; negative error no on failure
- */
-int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma,
- bool is_mm_mmap_locked)
-{
- unsigned long timeout =
- jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
- unsigned long *pfns;
- struct xe_userptr *userptr;
- struct xe_vma *vma = &uvma->vma;
- u64 userptr_start = xe_vma_userptr(vma);
- u64 userptr_end = userptr_start + xe_vma_size(vma);
- struct xe_vm *vm = xe_vma_vm(vma);
- struct hmm_range hmm_range = {
- .pfn_flags_mask = 0, /* ignore pfns */
- .default_flags = HMM_PFN_REQ_FAULT,
- .start = userptr_start,
- .end = userptr_end,
- .notifier = &uvma->userptr.notifier,
- .dev_private_owner = vm->xe,
- };
- bool write = !xe_vma_read_only(vma);
- unsigned long notifier_seq;
- u64 npages;
- int ret;
-
- userptr = &uvma->userptr;
-
- if (is_mm_mmap_locked)
- mmap_assert_locked(userptr->notifier.mm);
-
- if (vma->gpuva.flags & XE_VMA_DESTROYED)
- return 0;
-
- notifier_seq = mmu_interval_read_begin(&userptr->notifier);
- if (notifier_seq == userptr->notifier_seq)
- return 0;
-
- if (userptr->sg)
- xe_hmm_userptr_free_sg(uvma);
-
- npages = xe_npages_in_range(userptr_start, userptr_end);
- pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL);
- if (unlikely(!pfns))
- return -ENOMEM;
-
- if (write)
- hmm_range.default_flags |= HMM_PFN_REQ_WRITE;
-
- if (!mmget_not_zero(userptr->notifier.mm)) {
- ret = -EFAULT;
- goto free_pfns;
- }
-
- hmm_range.hmm_pfns = pfns;
-
- while (true) {
- hmm_range.notifier_seq = mmu_interval_read_begin(&userptr->notifier);
-
- if (!is_mm_mmap_locked)
- mmap_read_lock(userptr->notifier.mm);
-
- ret = hmm_range_fault(&hmm_range);
-
- if (!is_mm_mmap_locked)
- mmap_read_unlock(userptr->notifier.mm);
-
- if (ret == -EBUSY) {
- if (time_after(jiffies, timeout))
- break;
-
- continue;
- }
- break;
- }
-
- mmput(userptr->notifier.mm);
-
- if (ret)
- goto free_pfns;
-
- ret = xe_alloc_sg(vm->xe, &userptr->sgt, &hmm_range, &vm->userptr.notifier_lock);
- if (ret)
- goto free_pfns;
-
- ret = down_read_interruptible(&vm->userptr.notifier_lock);
- if (ret)
- goto free_st;
-
- if (mmu_interval_read_retry(hmm_range.notifier, hmm_range.notifier_seq)) {
- ret = -EAGAIN;
- goto out_unlock;
- }
-
- ret = xe_build_sg(vm->xe, &hmm_range, &userptr->sgt,
- &vm->userptr.notifier_lock, write);
- if (ret)
- goto out_unlock;
-
- userptr->sg = &userptr->sgt;
- xe_hmm_userptr_set_mapped(uvma);
- userptr->notifier_seq = hmm_range.notifier_seq;
- up_read(&vm->userptr.notifier_lock);
- kvfree(pfns);
- return 0;
-
-out_unlock:
- up_read(&vm->userptr.notifier_lock);
-free_st:
- sg_free_table(&userptr->sgt);
-free_pfns:
- kvfree(pfns);
- return ret;
-}
diff --git a/drivers/gpu/drm/xe/xe_hmm.h b/drivers/gpu/drm/xe/xe_hmm.h
deleted file mode 100644
index 0ea98d8e7bbc..000000000000
--- a/drivers/gpu/drm/xe/xe_hmm.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: MIT
- *
- * Copyright © 2024 Intel Corporation
- */
-
-#ifndef _XE_HMM_H_
-#define _XE_HMM_H_
-
-#include <linux/types.h>
-
-struct xe_userptr_vma;
-
-int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma, bool is_mm_mmap_locked);
-
-void xe_hmm_userptr_free_sg(struct xe_userptr_vma *uvma);
-
-void xe_hmm_userptr_unmap(struct xe_userptr_vma *uvma);
-#endif
diff --git a/drivers/gpu/drm/xe/xe_hw_engine.c b/drivers/gpu/drm/xe/xe_hw_engine.c
index 796ba8c34a16..1cf623b4a5bc 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine.c
+++ b/drivers/gpu/drm/xe/xe_hw_engine.c
@@ -576,7 +576,7 @@ static void adjust_idledly(struct xe_hw_engine *hwe)
u32 maxcnt_units_ns = 640;
bool inhibit_switch = 0;
- if (!IS_SRIOV_VF(gt_to_xe(hwe->gt)) && XE_WA(gt, 16023105232)) {
+ if (!IS_SRIOV_VF(gt_to_xe(hwe->gt)) && XE_GT_WA(gt, 16023105232)) {
idledly = xe_mmio_read32(&gt->mmio, RING_IDLEDLY(hwe->mmio_base));
maxcnt = xe_mmio_read32(&gt->mmio, RING_PWRCTX_MAXCNT(hwe->mmio_base));
diff --git a/drivers/gpu/drm/xe/xe_hw_engine_group.c b/drivers/gpu/drm/xe/xe_hw_engine_group.c
index c926f840c87b..fa4db5f23342 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine_group.c
+++ b/drivers/gpu/drm/xe/xe_hw_engine_group.c
@@ -103,8 +103,8 @@ int xe_hw_engine_setup_groups(struct xe_gt *gt)
break;
case XE_ENGINE_CLASS_OTHER:
break;
- default:
- drm_warn(&xe->drm, "NOT POSSIBLE");
+ case XE_ENGINE_CLASS_MAX:
+ xe_gt_assert(gt, false);
}
}
@@ -213,17 +213,13 @@ static int xe_hw_engine_group_suspend_faulting_lr_jobs(struct xe_hw_engine_group
err = q->ops->suspend_wait(q);
if (err)
- goto err_suspend;
+ return err;
}
if (need_resume)
xe_hw_engine_group_resume_faulting_lr_jobs(group);
return 0;
-
-err_suspend:
- up_write(&group->mode_sem);
- return err;
}
/**
diff --git a/drivers/gpu/drm/xe/xe_hw_error.c b/drivers/gpu/drm/xe/xe_hw_error.c
new file mode 100644
index 000000000000..8c65291f36fc
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_hw_error.c
@@ -0,0 +1,182 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include <linux/fault-inject.h>
+
+#include "regs/xe_gsc_regs.h"
+#include "regs/xe_hw_error_regs.h"
+#include "regs/xe_irq_regs.h"
+
+#include "xe_device.h"
+#include "xe_hw_error.h"
+#include "xe_mmio.h"
+#include "xe_survivability_mode.h"
+
+#define HEC_UNCORR_FW_ERR_BITS 4
+extern struct fault_attr inject_csc_hw_error;
+
+/* Error categories reported by hardware */
+enum hardware_error {
+ HARDWARE_ERROR_CORRECTABLE = 0,
+ HARDWARE_ERROR_NONFATAL = 1,
+ HARDWARE_ERROR_FATAL = 2,
+ HARDWARE_ERROR_MAX,
+};
+
+static const char * const hec_uncorrected_fw_errors[] = {
+ "Fatal",
+ "CSE Disabled",
+ "FD Corruption",
+ "Data Corruption"
+};
+
+static const char *hw_error_to_str(const enum hardware_error hw_err)
+{
+ switch (hw_err) {
+ case HARDWARE_ERROR_CORRECTABLE:
+ return "CORRECTABLE";
+ case HARDWARE_ERROR_NONFATAL:
+ return "NONFATAL";
+ case HARDWARE_ERROR_FATAL:
+ return "FATAL";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+static bool fault_inject_csc_hw_error(void)
+{
+ return IS_ENABLED(CONFIG_DEBUG_FS) && should_fail(&inject_csc_hw_error, 1);
+}
+
+static void csc_hw_error_work(struct work_struct *work)
+{
+ struct xe_tile *tile = container_of(work, typeof(*tile), csc_hw_error_work);
+ struct xe_device *xe = tile_to_xe(tile);
+ int ret;
+
+ ret = xe_survivability_mode_runtime_enable(xe);
+ if (ret)
+ drm_err(&xe->drm, "Failed to enable runtime survivability mode\n");
+}
+
+static void csc_hw_error_handler(struct xe_tile *tile, const enum hardware_error hw_err)
+{
+ const char *hw_err_str = hw_error_to_str(hw_err);
+ struct xe_device *xe = tile_to_xe(tile);
+ struct xe_mmio *mmio = &tile->mmio;
+ u32 base, err_bit, err_src;
+ unsigned long fw_err;
+
+ if (xe->info.platform != XE_BATTLEMAGE)
+ return;
+
+ base = BMG_GSC_HECI1_BASE;
+ lockdep_assert_held(&xe->irq.lock);
+ err_src = xe_mmio_read32(mmio, HEC_UNCORR_ERR_STATUS(base));
+ if (!err_src) {
+ drm_err_ratelimited(&xe->drm, HW_ERR "Tile%d reported HEC_ERR_STATUS_%s blank\n",
+ tile->id, hw_err_str);
+ return;
+ }
+
+ if (err_src & UNCORR_FW_REPORTED_ERR) {
+ fw_err = xe_mmio_read32(mmio, HEC_UNCORR_FW_ERR_DW0(base));
+ for_each_set_bit(err_bit, &fw_err, HEC_UNCORR_FW_ERR_BITS) {
+ drm_err_ratelimited(&xe->drm, HW_ERR
+ "%s: HEC Uncorrected FW %s error reported, bit[%d] is set\n",
+ hw_err_str, hec_uncorrected_fw_errors[err_bit],
+ err_bit);
+
+ schedule_work(&tile->csc_hw_error_work);
+ }
+ }
+
+ xe_mmio_write32(mmio, HEC_UNCORR_ERR_STATUS(base), err_src);
+}
+
+static void hw_error_source_handler(struct xe_tile *tile, const enum hardware_error hw_err)
+{
+ const char *hw_err_str = hw_error_to_str(hw_err);
+ struct xe_device *xe = tile_to_xe(tile);
+ unsigned long flags;
+ u32 err_src;
+
+ if (xe->info.platform != XE_BATTLEMAGE)
+ return;
+
+ spin_lock_irqsave(&xe->irq.lock, flags);
+ err_src = xe_mmio_read32(&tile->mmio, DEV_ERR_STAT_REG(hw_err));
+ if (!err_src) {
+ drm_err_ratelimited(&xe->drm, HW_ERR "Tile%d reported DEV_ERR_STAT_%s blank!\n",
+ tile->id, hw_err_str);
+ goto unlock;
+ }
+
+ if (err_src & XE_CSC_ERROR)
+ csc_hw_error_handler(tile, hw_err);
+
+ xe_mmio_write32(&tile->mmio, DEV_ERR_STAT_REG(hw_err), err_src);
+
+unlock:
+ spin_unlock_irqrestore(&xe->irq.lock, flags);
+}
+
+/**
+ * xe_hw_error_irq_handler - irq handling for hw errors
+ * @tile: tile instance
+ * @master_ctl: value read from master interrupt register
+ *
+ * Xe platforms add three error bits to the master interrupt register to support error handling.
+ * These three bits are used to convey the class of error FATAL, NONFATAL, or CORRECTABLE.
+ * To process the interrupt, determine the source of error by reading the Device Error Source
+ * Register that corresponds to the class of error being serviced.
+ */
+void xe_hw_error_irq_handler(struct xe_tile *tile, const u32 master_ctl)
+{
+ enum hardware_error hw_err;
+
+ if (fault_inject_csc_hw_error())
+ schedule_work(&tile->csc_hw_error_work);
+
+ for (hw_err = 0; hw_err < HARDWARE_ERROR_MAX; hw_err++)
+ if (master_ctl & ERROR_IRQ(hw_err))
+ hw_error_source_handler(tile, hw_err);
+}
+
+/*
+ * Process hardware errors during boot
+ */
+static void process_hw_errors(struct xe_device *xe)
+{
+ struct xe_tile *tile;
+ u32 master_ctl;
+ u8 id;
+
+ for_each_tile(tile, xe, id) {
+ master_ctl = xe_mmio_read32(&tile->mmio, GFX_MSTR_IRQ);
+ xe_hw_error_irq_handler(tile, master_ctl);
+ xe_mmio_write32(&tile->mmio, GFX_MSTR_IRQ, master_ctl);
+ }
+}
+
+/**
+ * xe_hw_error_init - Initialize hw errors
+ * @xe: xe device instance
+ *
+ * Initialize and check for errors that occurred during boot
+ * prior to driver load
+ */
+void xe_hw_error_init(struct xe_device *xe)
+{
+ struct xe_tile *tile = xe_device_get_root_tile(xe);
+
+ if (!IS_DGFX(xe) || IS_SRIOV_VF(xe))
+ return;
+
+ INIT_WORK(&tile->csc_hw_error_work, csc_hw_error_work);
+
+ process_hw_errors(xe);
+}
diff --git a/drivers/gpu/drm/xe/xe_hw_error.h b/drivers/gpu/drm/xe/xe_hw_error.h
new file mode 100644
index 000000000000..d86e28c5180c
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_hw_error.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+#ifndef XE_HW_ERROR_H_
+#define XE_HW_ERROR_H_
+
+#include <linux/types.h>
+
+struct xe_tile;
+struct xe_device;
+
+void xe_hw_error_irq_handler(struct xe_tile *tile, const u32 master_ctl);
+void xe_hw_error_init(struct xe_device *xe);
+#endif
diff --git a/drivers/gpu/drm/xe/xe_hwmon.c b/drivers/gpu/drm/xe/xe_hwmon.c
index f08fc4377d25..b6790589e623 100644
--- a/drivers/gpu/drm/xe/xe_hwmon.c
+++ b/drivers/gpu/drm/xe/xe_hwmon.c
@@ -179,7 +179,7 @@ static int xe_hwmon_pcode_rmw_power_limit(const struct xe_hwmon *hwmon, u32 attr
u32 clr, u32 set)
{
struct xe_tile *root_tile = xe_device_get_root_tile(hwmon->xe);
- u32 val0, val1;
+ u32 val0 = 0, val1 = 0;
int ret = 0;
ret = xe_pcode_read(root_tile, PCODE_MBOX(PCODE_POWER_SETUP,
@@ -286,7 +286,7 @@ static struct xe_reg xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg
*/
static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *value)
{
- u64 reg_val = 0, min, max;
+ u32 reg_val = 0;
struct xe_device *xe = hwmon->xe;
struct xe_reg rapl_limit, pkg_power_sku;
struct xe_mmio *mmio = xe_root_tile_mmio(xe);
@@ -294,7 +294,7 @@ static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, u32 attr, int channe
mutex_lock(&hwmon->hwmon_lock);
if (hwmon->xe->info.has_mbx_power_limits) {
- xe_hwmon_pcode_read_power_limit(hwmon, attr, channel, (u32 *)&reg_val);
+ xe_hwmon_pcode_read_power_limit(hwmon, attr, channel, &reg_val);
} else {
rapl_limit = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel);
pkg_power_sku = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel);
@@ -304,19 +304,21 @@ static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, u32 attr, int channe
/* Check if PL limits are disabled. */
if (!(reg_val & PWR_LIM_EN)) {
*value = PL_DISABLE;
- drm_info(&hwmon->xe->drm, "%s disabled for channel %d, val 0x%016llx\n",
+ drm_info(&hwmon->xe->drm, "%s disabled for channel %d, val 0x%08x\n",
PWR_ATTR_TO_STR(attr), channel, reg_val);
goto unlock;
}
reg_val = REG_FIELD_GET(PWR_LIM_VAL, reg_val);
- *value = mul_u64_u32_shr(reg_val, SF_POWER, hwmon->scl_shift_power);
+ *value = mul_u32_u32(reg_val, SF_POWER) >> hwmon->scl_shift_power;
/* For platforms with mailbox power limit support clamping would be done by pcode. */
if (!hwmon->xe->info.has_mbx_power_limits) {
- reg_val = xe_mmio_read64_2x32(mmio, pkg_power_sku);
- min = REG_FIELD_GET(PKG_MIN_PWR, reg_val);
- max = REG_FIELD_GET(PKG_MAX_PWR, reg_val);
+ u64 pkg_pwr, min, max;
+
+ pkg_pwr = xe_mmio_read64_2x32(mmio, pkg_power_sku);
+ min = REG_FIELD_GET(PKG_MIN_PWR, pkg_pwr);
+ max = REG_FIELD_GET(PKG_MAX_PWR, pkg_pwr);
min = mul_u64_u32_shr(min, SF_POWER, hwmon->scl_shift_power);
max = mul_u64_u32_shr(max, SF_POWER, hwmon->scl_shift_power);
if (min && max)
@@ -332,6 +334,7 @@ static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, u32 attr, int channe
int ret = 0;
u32 reg_val, max;
struct xe_reg rapl_limit;
+ u64 max_supp_power_limit = 0;
mutex_lock(&hwmon->hwmon_lock);
@@ -356,6 +359,20 @@ static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, u32 attr, int channe
goto unlock;
}
+ /*
+ * If the sysfs value exceeds the maximum pcode supported power limit value, clamp it to
+ * the supported maximum (U12.3 format).
+ * This is to avoid truncation during reg_val calculation below and ensure the valid
+ * power limit is sent for pcode which would clamp it to card-supported value.
+ */
+ max_supp_power_limit = ((PWR_LIM_VAL) >> hwmon->scl_shift_power) * SF_POWER;
+ if (value > max_supp_power_limit) {
+ value = max_supp_power_limit;
+ drm_info(&hwmon->xe->drm,
+ "Power limit clamped as selected %s exceeds channel %d limit\n",
+ PWR_ATTR_TO_STR(attr), channel);
+ }
+
/* Computation in 64-bits to avoid overflow. Round to nearest. */
reg_val = DIV_ROUND_CLOSEST_ULL((u64)value << hwmon->scl_shift_power, SF_POWER);
@@ -478,8 +495,8 @@ xe_hwmon_power_max_interval_show(struct device *dev, struct device_attribute *at
{
struct xe_hwmon *hwmon = dev_get_drvdata(dev);
struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
- u32 x, y, x_w = 2; /* 2 bits */
- u64 r, tau4, out;
+ u32 reg_val, x, y, x_w = 2; /* 2 bits */
+ u64 tau4, out;
int channel = (to_sensor_dev_attr(attr)->index % 2) ? CHANNEL_PKG : CHANNEL_CARD;
u32 power_attr = (to_sensor_dev_attr(attr)->index > 1) ? PL2_HWMON_ATTR : PL1_HWMON_ATTR;
@@ -490,23 +507,24 @@ xe_hwmon_power_max_interval_show(struct device *dev, struct device_attribute *at
mutex_lock(&hwmon->hwmon_lock);
if (hwmon->xe->info.has_mbx_power_limits) {
- ret = xe_hwmon_pcode_read_power_limit(hwmon, power_attr, channel, (u32 *)&r);
+ ret = xe_hwmon_pcode_read_power_limit(hwmon, power_attr, channel, &reg_val);
if (ret) {
drm_err(&hwmon->xe->drm,
- "power interval read fail, ch %d, attr %d, r 0%llx, ret %d\n",
- channel, power_attr, r, ret);
- r = 0;
+ "power interval read fail, ch %d, attr %d, val 0x%08x, ret %d\n",
+ channel, power_attr, reg_val, ret);
+ reg_val = 0;
}
} else {
- r = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel));
+ reg_val = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT,
+ channel));
}
mutex_unlock(&hwmon->hwmon_lock);
xe_pm_runtime_put(hwmon->xe);
- x = REG_FIELD_GET(PWR_LIM_TIME_X, r);
- y = REG_FIELD_GET(PWR_LIM_TIME_Y, r);
+ x = REG_FIELD_GET(PWR_LIM_TIME_X, reg_val);
+ y = REG_FIELD_GET(PWR_LIM_TIME_Y, reg_val);
/*
* tau = (1 + (x / 4)) * power(2,y), x = bits(23:22), y = bits(21:17)
@@ -719,7 +737,7 @@ static int xe_hwmon_power_curr_crit_read(struct xe_hwmon *hwmon, int channel,
long *value, u32 scale_factor)
{
int ret;
- u32 uval;
+ u32 uval = 0;
mutex_lock(&hwmon->hwmon_lock);
@@ -739,9 +757,23 @@ static int xe_hwmon_power_curr_crit_write(struct xe_hwmon *hwmon, int channel,
{
int ret;
u32 uval;
+ u64 max_crit_power_curr = 0;
mutex_lock(&hwmon->hwmon_lock);
+ /*
+ * If the sysfs value exceeds the pcode mailbox cmd POWER_SETUP_SUBCOMMAND_WRITE_I1
+ * max supported value, clamp it to the command's max (U10.6 format).
+ * This is to avoid truncation during uval calculation below and ensure the valid power
+ * limit is sent for pcode which would clamp it to card-supported value.
+ */
+ max_crit_power_curr = (POWER_SETUP_I1_DATA_MASK >> POWER_SETUP_I1_SHIFT) * scale_factor;
+ if (value > max_crit_power_curr) {
+ value = max_crit_power_curr;
+ drm_info(&hwmon->xe->drm,
+ "Power limit clamped as selected exceeds channel %d limit\n",
+ channel);
+ }
uval = DIV_ROUND_CLOSEST_ULL(value << POWER_SETUP_I1_SHIFT, scale_factor);
ret = xe_hwmon_pcode_write_i1(hwmon, uval);
@@ -889,7 +921,7 @@ xe_hwmon_power_write(struct xe_hwmon *hwmon, u32 attr, int channel, long val)
static umode_t
xe_hwmon_curr_is_visible(const struct xe_hwmon *hwmon, u32 attr, int channel)
{
- u32 uval;
+ u32 uval = 0;
/* hwmon sysfs attribute of current available only for package */
if (channel != CHANNEL_PKG)
@@ -991,7 +1023,7 @@ xe_hwmon_energy_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val)
static umode_t
xe_hwmon_fan_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel)
{
- u32 uval;
+ u32 uval = 0;
if (!hwmon->xe->info.has_fan_control)
return 0;
@@ -1265,13 +1297,6 @@ xe_hwmon_get_preregistration_info(struct xe_hwmon *hwmon)
xe_hwmon_fan_input_read(hwmon, channel, &fan_speed);
}
-static void xe_hwmon_mutex_destroy(void *arg)
-{
- struct xe_hwmon *hwmon = arg;
-
- mutex_destroy(&hwmon->hwmon_lock);
-}
-
int xe_hwmon_register(struct xe_device *xe)
{
struct device *dev = xe->drm.dev;
@@ -1290,8 +1315,7 @@ int xe_hwmon_register(struct xe_device *xe)
if (!hwmon)
return -ENOMEM;
- mutex_init(&hwmon->hwmon_lock);
- ret = devm_add_action_or_reset(dev, xe_hwmon_mutex_destroy, hwmon);
+ ret = devm_mutex_init(dev, &hwmon->hwmon_lock);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/xe/xe_i2c.c b/drivers/gpu/drm/xe/xe_i2c.c
index bc7dc2099470..48dfcb41fa08 100644
--- a/drivers/gpu/drm/xe/xe_i2c.c
+++ b/drivers/gpu/drm/xe/xe_i2c.c
@@ -147,6 +147,20 @@ static void xe_i2c_unregister_adapter(struct xe_i2c *i2c)
}
/**
+ * xe_i2c_present - I2C controller is present and functional
+ * @xe: xe device instance
+ *
+ * Check whether the I2C controller is present and functioning with valid
+ * endpoint cookie.
+ *
+ * Return: %true if present, %false otherwise.
+ */
+bool xe_i2c_present(struct xe_device *xe)
+{
+ return xe->i2c && xe->i2c->ep.cookie == XE_I2C_EP_COOKIE_DEVICE;
+}
+
+/**
* xe_i2c_irq_handler: Handler for I2C interrupts
* @xe: xe device instance
* @master_ctl: interrupt register
@@ -230,7 +244,7 @@ void xe_i2c_pm_suspend(struct xe_device *xe)
{
struct xe_mmio *mmio = xe_root_tile_mmio(xe);
- if (!xe->i2c || xe->i2c->ep.cookie != XE_I2C_EP_COOKIE_DEVICE)
+ if (!xe_i2c_present(xe))
return;
xe_mmio_rmw32(mmio, I2C_CONFIG_PMCSR, PCI_PM_CTRL_STATE_MASK, (__force u32)PCI_D3hot);
@@ -241,11 +255,11 @@ void xe_i2c_pm_resume(struct xe_device *xe, bool d3cold)
{
struct xe_mmio *mmio = xe_root_tile_mmio(xe);
- if (!xe->i2c || xe->i2c->ep.cookie != XE_I2C_EP_COOKIE_DEVICE)
+ if (!xe_i2c_present(xe))
return;
if (d3cold)
- xe_mmio_rmw32(mmio, I2C_CONFIG_CMD, 0, PCI_COMMAND_MEMORY);
+ xe_mmio_rmw32(mmio, I2C_CONFIG_CMD, 0, PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
xe_mmio_rmw32(mmio, I2C_CONFIG_PMCSR, PCI_PM_CTRL_STATE_MASK, (__force u32)PCI_D0);
drm_dbg(&xe->drm, "pmcsr: 0x%08x\n", xe_mmio_read32(mmio, I2C_CONFIG_PMCSR));
diff --git a/drivers/gpu/drm/xe/xe_i2c.h b/drivers/gpu/drm/xe/xe_i2c.h
index b767ed8ce52b..ecd5f10358e2 100644
--- a/drivers/gpu/drm/xe/xe_i2c.h
+++ b/drivers/gpu/drm/xe/xe_i2c.h
@@ -49,11 +49,13 @@ struct xe_i2c {
#if IS_ENABLED(CONFIG_I2C)
int xe_i2c_probe(struct xe_device *xe);
+bool xe_i2c_present(struct xe_device *xe);
void xe_i2c_irq_handler(struct xe_device *xe, u32 master_ctl);
void xe_i2c_pm_suspend(struct xe_device *xe);
void xe_i2c_pm_resume(struct xe_device *xe, bool d3cold);
#else
static inline int xe_i2c_probe(struct xe_device *xe) { return 0; }
+static inline bool xe_i2c_present(struct xe_device *xe) { return false; }
static inline void xe_i2c_irq_handler(struct xe_device *xe, u32 master_ctl) { }
static inline void xe_i2c_pm_suspend(struct xe_device *xe) { }
static inline void xe_i2c_pm_resume(struct xe_device *xe, bool d3cold) { }
diff --git a/drivers/gpu/drm/xe/xe_irq.c b/drivers/gpu/drm/xe/xe_irq.c
index 5df5b8c2a3e4..870edaf69388 100644
--- a/drivers/gpu/drm/xe/xe_irq.c
+++ b/drivers/gpu/drm/xe/xe_irq.c
@@ -18,6 +18,7 @@
#include "xe_gt.h"
#include "xe_guc.h"
#include "xe_hw_engine.h"
+#include "xe_hw_error.h"
#include "xe_i2c.h"
#include "xe_memirq.h"
#include "xe_mmio.h"
@@ -468,6 +469,7 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg)
xe_mmio_write32(mmio, GFX_MSTR_IRQ, master_ctl);
gt_irq_handler(tile, master_ctl, intr_dw, identity);
+ xe_hw_error_irq_handler(tile, master_ctl);
/*
* Display interrupts (including display backlight operations
@@ -756,6 +758,8 @@ int xe_irq_install(struct xe_device *xe)
int nvec = 1;
int err;
+ xe_hw_error_init(xe);
+
xe_irq_reset(xe);
if (xe_device_has_msix(xe)) {
diff --git a/drivers/gpu/drm/xe/xe_late_bind_fw.c b/drivers/gpu/drm/xe/xe_late_bind_fw.c
new file mode 100644
index 000000000000..768442ca7da6
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_late_bind_fw.c
@@ -0,0 +1,464 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include <linux/component.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+
+#include <drm/drm_managed.h>
+#include <drm/intel/i915_component.h>
+#include <drm/intel/intel_lb_mei_interface.h>
+#include <drm/drm_print.h>
+
+#include "xe_device.h"
+#include "xe_late_bind_fw.h"
+#include "xe_pcode.h"
+#include "xe_pcode_api.h"
+#include "xe_pm.h"
+
+/*
+ * The component should load quite quickly in most cases, but it could take
+ * a bit. Using a very big timeout just to cover the worst case scenario
+ */
+#define LB_INIT_TIMEOUT_MS 20000
+
+/*
+ * Retry interval set to 6 seconds, in steps of 200 ms, to allow time for
+ * other OS components to release the MEI CL handle
+ */
+#define LB_FW_LOAD_RETRY_MAXCOUNT 30
+#define LB_FW_LOAD_RETRY_PAUSE_MS 200
+
+static const u32 fw_id_to_type[] = {
+ [XE_LB_FW_FAN_CONTROL] = INTEL_LB_TYPE_FAN_CONTROL,
+ };
+
+static const char * const fw_id_to_name[] = {
+ [XE_LB_FW_FAN_CONTROL] = "fan_control",
+ };
+
+static struct xe_device *
+late_bind_to_xe(struct xe_late_bind *late_bind)
+{
+ return container_of(late_bind, struct xe_device, late_bind);
+}
+
+static struct xe_device *
+late_bind_fw_to_xe(struct xe_late_bind_fw *lb_fw)
+{
+ return container_of(lb_fw, struct xe_device, late_bind.late_bind_fw[lb_fw->id]);
+}
+
+/* Refer to the "Late Bind based Firmware Layout" documentation entry for details */
+static int parse_cpd_header(struct xe_late_bind_fw *lb_fw,
+ const void *data, size_t size, const char *manifest_entry)
+{
+ struct xe_device *xe = late_bind_fw_to_xe(lb_fw);
+ const struct gsc_cpd_header_v2 *header = data;
+ const struct gsc_manifest_header *manifest;
+ const struct gsc_cpd_entry *entry;
+ size_t min_size = sizeof(*header);
+ u32 offset = 0;
+ int i;
+
+ /* manifest_entry is mandatory */
+ xe_assert(xe, manifest_entry);
+
+ if (size < min_size || header->header_marker != GSC_CPD_HEADER_MARKER)
+ return -ENOENT;
+
+ if (header->header_length < sizeof(struct gsc_cpd_header_v2)) {
+ drm_err(&xe->drm, "%s late binding fw: Invalid CPD header length %u!\n",
+ fw_id_to_name[lb_fw->id], header->header_length);
+ return -EINVAL;
+ }
+
+ min_size = header->header_length + sizeof(struct gsc_cpd_entry) * header->num_of_entries;
+ if (size < min_size) {
+ drm_err(&xe->drm, "%s late binding fw: too small! %zu < %zu\n",
+ fw_id_to_name[lb_fw->id], size, min_size);
+ return -ENODATA;
+ }
+
+ /* Look for the manifest first */
+ entry = (void *)header + header->header_length;
+ for (i = 0; i < header->num_of_entries; i++, entry++)
+ if (strcmp(entry->name, manifest_entry) == 0)
+ offset = entry->offset & GSC_CPD_ENTRY_OFFSET_MASK;
+
+ if (!offset) {
+ drm_err(&xe->drm, "%s late binding fw: Failed to find manifest_entry\n",
+ fw_id_to_name[lb_fw->id]);
+ return -ENODATA;
+ }
+
+ min_size = offset + sizeof(struct gsc_manifest_header);
+ if (size < min_size) {
+ drm_err(&xe->drm, "%s late binding fw: too small! %zu < %zu\n",
+ fw_id_to_name[lb_fw->id], size, min_size);
+ return -ENODATA;
+ }
+
+ manifest = data + offset;
+
+ lb_fw->version = manifest->fw_version;
+
+ return 0;
+}
+
+/* Refer to the "Late Bind based Firmware Layout" documentation entry for details */
+static int parse_lb_layout(struct xe_late_bind_fw *lb_fw,
+ const void *data, size_t size, const char *fpt_entry)
+{
+ struct xe_device *xe = late_bind_fw_to_xe(lb_fw);
+ const struct csc_fpt_header *header = data;
+ const struct csc_fpt_entry *entry;
+ size_t min_size = sizeof(*header);
+ u32 offset = 0;
+ int i;
+
+ /* fpt_entry is mandatory */
+ xe_assert(xe, fpt_entry);
+
+ if (size < min_size || header->header_marker != CSC_FPT_HEADER_MARKER)
+ return -ENOENT;
+
+ if (header->header_length < sizeof(struct csc_fpt_header)) {
+ drm_err(&xe->drm, "%s late binding fw: Invalid FPT header length %u!\n",
+ fw_id_to_name[lb_fw->id], header->header_length);
+ return -EINVAL;
+ }
+
+ min_size = header->header_length + sizeof(struct csc_fpt_entry) * header->num_of_entries;
+ if (size < min_size) {
+ drm_err(&xe->drm, "%s late binding fw: too small! %zu < %zu\n",
+ fw_id_to_name[lb_fw->id], size, min_size);
+ return -ENODATA;
+ }
+
+ /* Look for the cpd header first */
+ entry = (void *)header + header->header_length;
+ for (i = 0; i < header->num_of_entries; i++, entry++)
+ if (strcmp(entry->name, fpt_entry) == 0)
+ offset = entry->offset;
+
+ if (!offset) {
+ drm_err(&xe->drm, "%s late binding fw: Failed to find fpt_entry\n",
+ fw_id_to_name[lb_fw->id]);
+ return -ENODATA;
+ }
+
+ min_size = offset + sizeof(struct gsc_cpd_header_v2);
+ if (size < min_size) {
+ drm_err(&xe->drm, "%s late binding fw: too small! %zu < %zu\n",
+ fw_id_to_name[lb_fw->id], size, min_size);
+ return -ENODATA;
+ }
+
+ return parse_cpd_header(lb_fw, data + offset, size - offset, "LTES.man");
+}
+
+static const char *xe_late_bind_parse_status(uint32_t status)
+{
+ switch (status) {
+ case INTEL_LB_STATUS_SUCCESS:
+ return "success";
+ case INTEL_LB_STATUS_4ID_MISMATCH:
+ return "4Id Mismatch";
+ case INTEL_LB_STATUS_ARB_FAILURE:
+ return "ARB Failure";
+ case INTEL_LB_STATUS_GENERAL_ERROR:
+ return "General Error";
+ case INTEL_LB_STATUS_INVALID_PARAMS:
+ return "Invalid Params";
+ case INTEL_LB_STATUS_INVALID_SIGNATURE:
+ return "Invalid Signature";
+ case INTEL_LB_STATUS_INVALID_PAYLOAD:
+ return "Invalid Payload";
+ case INTEL_LB_STATUS_TIMEOUT:
+ return "Timeout";
+ default:
+ return "Unknown error";
+ }
+}
+
+static int xe_late_bind_fw_num_fans(struct xe_late_bind *late_bind, u32 *num_fans)
+{
+ struct xe_device *xe = late_bind_to_xe(late_bind);
+ struct xe_tile *root_tile = xe_device_get_root_tile(xe);
+
+ return xe_pcode_read(root_tile,
+ PCODE_MBOX(FAN_SPEED_CONTROL, FSC_READ_NUM_FANS, 0), num_fans, NULL);
+}
+
+void xe_late_bind_wait_for_worker_completion(struct xe_late_bind *late_bind)
+{
+ struct xe_device *xe = late_bind_to_xe(late_bind);
+ struct xe_late_bind_fw *lbfw;
+ int fw_id;
+
+ for (fw_id = 0; fw_id < XE_LB_FW_MAX_ID; fw_id++) {
+ lbfw = &late_bind->late_bind_fw[fw_id];
+ if (lbfw->payload && late_bind->wq) {
+ drm_dbg(&xe->drm, "Flush work: load %s firmware\n",
+ fw_id_to_name[lbfw->id]);
+ flush_work(&lbfw->work);
+ }
+ }
+}
+
+static void xe_late_bind_work(struct work_struct *work)
+{
+ struct xe_late_bind_fw *lbfw = container_of(work, struct xe_late_bind_fw, work);
+ struct xe_late_bind *late_bind = container_of(lbfw, struct xe_late_bind,
+ late_bind_fw[lbfw->id]);
+ struct xe_device *xe = late_bind_to_xe(late_bind);
+ int retry = LB_FW_LOAD_RETRY_MAXCOUNT;
+ int ret;
+ int slept;
+
+ xe_device_assert_mem_access(xe);
+
+ /* we can queue this before the component is bound */
+ for (slept = 0; slept < LB_INIT_TIMEOUT_MS; slept += 100) {
+ if (late_bind->component.ops)
+ break;
+ msleep(100);
+ }
+
+ if (!late_bind->component.ops) {
+ drm_err(&xe->drm, "Late bind component not bound\n");
+ /* Do not re-attempt fw load */
+ drmm_kfree(&xe->drm, (void *)lbfw->payload);
+ lbfw->payload = NULL;
+ goto out;
+ }
+
+ drm_dbg(&xe->drm, "Load %s firmware\n", fw_id_to_name[lbfw->id]);
+
+ do {
+ ret = late_bind->component.ops->push_payload(late_bind->component.mei_dev,
+ lbfw->type,
+ lbfw->flags,
+ lbfw->payload,
+ lbfw->payload_size);
+ if (!ret)
+ break;
+ msleep(LB_FW_LOAD_RETRY_PAUSE_MS);
+ } while (--retry && ret == -EBUSY);
+
+ if (!ret) {
+ drm_dbg(&xe->drm, "Load %s firmware successful\n",
+ fw_id_to_name[lbfw->id]);
+ goto out;
+ }
+
+ if (ret > 0)
+ drm_err(&xe->drm, "Load %s firmware failed with err %d, %s\n",
+ fw_id_to_name[lbfw->id], ret, xe_late_bind_parse_status(ret));
+ else
+ drm_err(&xe->drm, "Load %s firmware failed with err %d",
+ fw_id_to_name[lbfw->id], ret);
+ /* Do not re-attempt fw load */
+ drmm_kfree(&xe->drm, (void *)lbfw->payload);
+ lbfw->payload = NULL;
+
+out:
+ xe_pm_runtime_put(xe);
+}
+
+int xe_late_bind_fw_load(struct xe_late_bind *late_bind)
+{
+ struct xe_device *xe = late_bind_to_xe(late_bind);
+ struct xe_late_bind_fw *lbfw;
+ int fw_id;
+
+ if (!late_bind->component_added)
+ return -ENODEV;
+
+ if (late_bind->disable)
+ return 0;
+
+ for (fw_id = 0; fw_id < XE_LB_FW_MAX_ID; fw_id++) {
+ lbfw = &late_bind->late_bind_fw[fw_id];
+ if (lbfw->payload) {
+ xe_pm_runtime_get_noresume(xe);
+ queue_work(late_bind->wq, &lbfw->work);
+ }
+ }
+ return 0;
+}
+
+static int __xe_late_bind_fw_init(struct xe_late_bind *late_bind, u32 fw_id)
+{
+ struct xe_device *xe = late_bind_to_xe(late_bind);
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ struct xe_late_bind_fw *lb_fw;
+ const struct firmware *fw;
+ u32 num_fans;
+ int ret;
+
+ if (fw_id >= XE_LB_FW_MAX_ID)
+ return -EINVAL;
+
+ lb_fw = &late_bind->late_bind_fw[fw_id];
+
+ lb_fw->id = fw_id;
+ lb_fw->type = fw_id_to_type[lb_fw->id];
+ lb_fw->flags &= ~INTEL_LB_FLAG_IS_PERSISTENT;
+
+ if (lb_fw->type == INTEL_LB_TYPE_FAN_CONTROL) {
+ ret = xe_late_bind_fw_num_fans(late_bind, &num_fans);
+ if (ret) {
+ drm_dbg(&xe->drm, "Failed to read number of fans: %d\n", ret);
+ return 0; /* Not a fatal error, continue without fan control */
+ }
+ drm_dbg(&xe->drm, "Number of Fans: %d\n", num_fans);
+ if (!num_fans)
+ return 0;
+ }
+
+ snprintf(lb_fw->blob_path, sizeof(lb_fw->blob_path), "xe/%s_8086_%04x_%04x_%04x.bin",
+ fw_id_to_name[lb_fw->id], pdev->device,
+ pdev->subsystem_vendor, pdev->subsystem_device);
+
+ drm_dbg(&xe->drm, "Request late binding firmware %s\n", lb_fw->blob_path);
+ ret = firmware_request_nowarn(&fw, lb_fw->blob_path, xe->drm.dev);
+ if (ret) {
+ drm_dbg(&xe->drm, "%s late binding fw not available for current device",
+ fw_id_to_name[lb_fw->id]);
+ return 0;
+ }
+
+ if (fw->size > XE_LB_MAX_PAYLOAD_SIZE) {
+ drm_err(&xe->drm, "Firmware %s size %zu is larger than max pay load size %u\n",
+ lb_fw->blob_path, fw->size, XE_LB_MAX_PAYLOAD_SIZE);
+ release_firmware(fw);
+ return -ENODATA;
+ }
+
+ ret = parse_lb_layout(lb_fw, fw->data, fw->size, "LTES");
+ if (ret)
+ return ret;
+
+ lb_fw->payload_size = fw->size;
+ lb_fw->payload = drmm_kzalloc(&xe->drm, lb_fw->payload_size, GFP_KERNEL);
+ if (!lb_fw->payload) {
+ release_firmware(fw);
+ return -ENOMEM;
+ }
+
+ drm_info(&xe->drm, "Using %s firmware from %s version %u.%u.%u.%u\n",
+ fw_id_to_name[lb_fw->id], lb_fw->blob_path,
+ lb_fw->version.major, lb_fw->version.minor,
+ lb_fw->version.hotfix, lb_fw->version.build);
+
+ memcpy((void *)lb_fw->payload, fw->data, lb_fw->payload_size);
+ release_firmware(fw);
+ INIT_WORK(&lb_fw->work, xe_late_bind_work);
+
+ return 0;
+}
+
+static int xe_late_bind_fw_init(struct xe_late_bind *late_bind)
+{
+ int ret;
+ int fw_id;
+
+ late_bind->wq = alloc_ordered_workqueue("late-bind-ordered-wq", 0);
+ if (!late_bind->wq)
+ return -ENOMEM;
+
+ for (fw_id = 0; fw_id < XE_LB_FW_MAX_ID; fw_id++) {
+ ret = __xe_late_bind_fw_init(late_bind, fw_id);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int xe_late_bind_component_bind(struct device *xe_kdev,
+ struct device *mei_kdev, void *data)
+{
+ struct xe_device *xe = kdev_to_xe_device(xe_kdev);
+ struct xe_late_bind *late_bind = &xe->late_bind;
+
+ late_bind->component.ops = data;
+ late_bind->component.mei_dev = mei_kdev;
+
+ return 0;
+}
+
+static void xe_late_bind_component_unbind(struct device *xe_kdev,
+ struct device *mei_kdev, void *data)
+{
+ struct xe_device *xe = kdev_to_xe_device(xe_kdev);
+ struct xe_late_bind *late_bind = &xe->late_bind;
+
+ xe_late_bind_wait_for_worker_completion(late_bind);
+
+ late_bind->component.ops = NULL;
+}
+
+static const struct component_ops xe_late_bind_component_ops = {
+ .bind = xe_late_bind_component_bind,
+ .unbind = xe_late_bind_component_unbind,
+};
+
+static void xe_late_bind_remove(void *arg)
+{
+ struct xe_late_bind *late_bind = arg;
+ struct xe_device *xe = late_bind_to_xe(late_bind);
+
+ xe_late_bind_wait_for_worker_completion(late_bind);
+
+ late_bind->component_added = false;
+
+ component_del(xe->drm.dev, &xe_late_bind_component_ops);
+ if (late_bind->wq) {
+ destroy_workqueue(late_bind->wq);
+ late_bind->wq = NULL;
+ }
+}
+
+/**
+ * xe_late_bind_init() - add xe mei late binding component
+ * @late_bind: pointer to late bind structure.
+ *
+ * Return: 0 if the initialization was successful, a negative errno otherwise.
+ */
+int xe_late_bind_init(struct xe_late_bind *late_bind)
+{
+ struct xe_device *xe = late_bind_to_xe(late_bind);
+ int err;
+
+ if (!xe->info.has_late_bind)
+ return 0;
+
+ if (!IS_ENABLED(CONFIG_INTEL_MEI_LB) || !IS_ENABLED(CONFIG_INTEL_MEI_GSC)) {
+ drm_info(&xe->drm, "Can't init xe mei late bind missing mei component\n");
+ return 0;
+ }
+
+ err = component_add_typed(xe->drm.dev, &xe_late_bind_component_ops,
+ INTEL_COMPONENT_LB);
+ if (err < 0) {
+ drm_err(&xe->drm, "Failed to add mei late bind component (%pe)\n", ERR_PTR(err));
+ return err;
+ }
+
+ late_bind->component_added = true;
+
+ err = devm_add_action_or_reset(xe->drm.dev, xe_late_bind_remove, late_bind);
+ if (err)
+ return err;
+
+ err = xe_late_bind_fw_init(late_bind);
+ if (err)
+ return err;
+
+ return xe_late_bind_fw_load(late_bind);
+}
diff --git a/drivers/gpu/drm/xe/xe_late_bind_fw.h b/drivers/gpu/drm/xe/xe_late_bind_fw.h
new file mode 100644
index 000000000000..07e437390539
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_late_bind_fw.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_LATE_BIND_FW_H_
+#define _XE_LATE_BIND_FW_H_
+
+#include <linux/types.h>
+
+struct xe_late_bind;
+
+int xe_late_bind_init(struct xe_late_bind *late_bind);
+int xe_late_bind_fw_load(struct xe_late_bind *late_bind);
+void xe_late_bind_wait_for_worker_completion(struct xe_late_bind *late_bind);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_late_bind_fw_types.h b/drivers/gpu/drm/xe/xe_late_bind_fw_types.h
new file mode 100644
index 000000000000..0f5da89ce98b
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_late_bind_fw_types.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_LATE_BIND_TYPES_H_
+#define _XE_LATE_BIND_TYPES_H_
+
+#include <linux/iosys-map.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include "xe_uc_fw_abi.h"
+
+#define XE_LB_MAX_PAYLOAD_SIZE SZ_4K
+
+/**
+ * xe_late_bind_fw_id - enum to determine late binding fw index
+ */
+enum xe_late_bind_fw_id {
+ XE_LB_FW_FAN_CONTROL = 0,
+ XE_LB_FW_MAX_ID
+};
+
+/**
+ * struct xe_late_bind_fw
+ */
+struct xe_late_bind_fw {
+ /** @id: firmware index */
+ u32 id;
+ /** @blob_path: firmware binary path */
+ char blob_path[PATH_MAX];
+ /** @type: firmware type */
+ u32 type;
+ /** @flags: firmware flags */
+ u32 flags;
+ /** @payload: to store the late binding blob */
+ const u8 *payload;
+ /** @payload_size: late binding blob payload_size */
+ size_t payload_size;
+ /** @work: worker to upload latebind blob */
+ struct work_struct work;
+ /** @version: late binding blob manifest version */
+ struct gsc_version version;
+};
+
+/**
+ * struct xe_late_bind_component - Late Binding services component
+ * @mei_dev: device that provide Late Binding service.
+ * @ops: Ops implemented by Late Binding driver, used by Xe driver.
+ *
+ * Communication between Xe and MEI drivers for Late Binding services
+ */
+struct xe_late_bind_component {
+ struct device *mei_dev;
+ const struct intel_lb_component_ops *ops;
+};
+
+/**
+ * struct xe_late_bind
+ */
+struct xe_late_bind {
+ /** @component: struct for communication with mei component */
+ struct xe_late_bind_component component;
+ /** @late_bind_fw: late binding firmware array */
+ struct xe_late_bind_fw late_bind_fw[XE_LB_FW_MAX_ID];
+ /** @wq: workqueue to submit request to download late bind blob */
+ struct workqueue_struct *wq;
+ /** @component_added: whether the component has been added */
+ bool component_added;
+ /** @disable: to block late binding reload during pm resume flow*/
+ bool disable;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_lmtt.c b/drivers/gpu/drm/xe/xe_lmtt.c
index a2000307d5bf..62fc5a1a332d 100644
--- a/drivers/gpu/drm/xe/xe_lmtt.c
+++ b/drivers/gpu/drm/xe/xe_lmtt.c
@@ -11,7 +11,7 @@
#include "xe_assert.h"
#include "xe_bo.h"
-#include "xe_gt_tlb_invalidation.h"
+#include "xe_tlb_inval.h"
#include "xe_lmtt.h"
#include "xe_map.h"
#include "xe_mmio.h"
@@ -67,12 +67,12 @@ static struct xe_lmtt_pt *lmtt_pt_alloc(struct xe_lmtt *lmtt, unsigned int level
goto out;
}
- bo = xe_bo_create_pin_map(lmtt_to_xe(lmtt), lmtt_to_tile(lmtt), NULL,
- PAGE_ALIGN(lmtt->ops->lmtt_pte_size(level) *
- lmtt->ops->lmtt_pte_num(level)),
- ttm_bo_type_kernel,
- XE_BO_FLAG_VRAM_IF_DGFX(lmtt_to_tile(lmtt)) |
- XE_BO_FLAG_NEEDS_64K);
+ bo = xe_bo_create_pin_map_novm(lmtt_to_xe(lmtt), lmtt_to_tile(lmtt),
+ PAGE_ALIGN(lmtt->ops->lmtt_pte_size(level) *
+ lmtt->ops->lmtt_pte_num(level)),
+ ttm_bo_type_kernel,
+ XE_BO_FLAG_VRAM_IF_DGFX(lmtt_to_tile(lmtt)) |
+ XE_BO_FLAG_NEEDS_64K, false);
if (IS_ERR(bo)) {
err = PTR_ERR(bo);
goto out_free_pt;
@@ -195,14 +195,17 @@ static void lmtt_setup_dir_ptr(struct xe_lmtt *lmtt)
struct xe_tile *tile = lmtt_to_tile(lmtt);
struct xe_device *xe = tile_to_xe(tile);
dma_addr_t offset = xe_bo_main_addr(lmtt->pd->bo, XE_PAGE_SIZE);
+ struct xe_gt *gt;
+ u8 id;
lmtt_debug(lmtt, "DIR offset %pad\n", &offset);
lmtt_assert(lmtt, xe_bo_is_vram(lmtt->pd->bo));
lmtt_assert(lmtt, IS_ALIGNED(offset, SZ_64K));
- xe_mmio_write32(&tile->mmio,
- GRAPHICS_VER(xe) >= 20 ? XE2_LMEM_CFG : LMEM_CFG,
- LMEM_EN | REG_FIELD_PREP(LMTT_DIR_PTR, offset / SZ_64K));
+ for_each_gt_on_tile(gt, tile, id)
+ xe_mmio_write32(&gt->mmio,
+ GRAPHICS_VER(xe) >= 20 ? XE2_LMEM_CFG : LMEM_CFG,
+ LMEM_EN | REG_FIELD_PREP(LMTT_DIR_PTR, offset / SZ_64K));
}
/**
@@ -225,8 +228,8 @@ void xe_lmtt_init_hw(struct xe_lmtt *lmtt)
static int lmtt_invalidate_hw(struct xe_lmtt *lmtt)
{
- struct xe_gt_tlb_invalidation_fence fences[XE_MAX_GT_PER_TILE];
- struct xe_gt_tlb_invalidation_fence *fence = fences;
+ struct xe_tlb_inval_fence fences[XE_MAX_GT_PER_TILE];
+ struct xe_tlb_inval_fence *fence = fences;
struct xe_tile *tile = lmtt_to_tile(lmtt);
struct xe_gt *gt;
int result = 0;
@@ -234,8 +237,8 @@ static int lmtt_invalidate_hw(struct xe_lmtt *lmtt)
u8 id;
for_each_gt_on_tile(gt, tile, id) {
- xe_gt_tlb_invalidation_fence_init(gt, fence, true);
- err = xe_gt_tlb_invalidation_all(gt, fence);
+ xe_tlb_inval_fence_init(&gt->tlb_inval, fence, true);
+ err = xe_tlb_inval_all(&gt->tlb_inval, fence);
result = result ?: err;
fence++;
}
@@ -249,7 +252,7 @@ static int lmtt_invalidate_hw(struct xe_lmtt *lmtt)
*/
fence = fences;
for_each_gt_on_tile(gt, tile, id)
- xe_gt_tlb_invalidation_fence_wait(fence++);
+ xe_tlb_inval_fence_wait(fence++);
return result;
}
diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c
index 6d38411bdeba..47e9df775072 100644
--- a/drivers/gpu/drm/xe/xe_lrc.c
+++ b/drivers/gpu/drm/xe/xe_lrc.c
@@ -8,6 +8,7 @@
#include <generated/xe_wa_oob.h>
#include <linux/ascii85.h>
+#include <linux/panic.h>
#include "instructions/xe_mi_commands.h"
#include "instructions/xe_gfxpipe_commands.h"
@@ -16,6 +17,7 @@
#include "regs/xe_lrc_layout.h"
#include "xe_bb.h"
#include "xe_bo.h"
+#include "xe_configfs.h"
#include "xe_device.h"
#include "xe_drm_client.h"
#include "xe_exec_queue_types.h"
@@ -41,7 +43,6 @@
#define LRC_PPHWSP_SIZE SZ_4K
#define LRC_INDIRECT_CTX_BO_SIZE SZ_4K
#define LRC_INDIRECT_RING_STATE_SIZE SZ_4K
-#define LRC_WA_BB_SIZE SZ_4K
/*
* Layout of the LRC and associated data allocated as
@@ -76,6 +77,17 @@ lrc_to_xe(struct xe_lrc *lrc)
static bool
gt_engine_needs_indirect_ctx(struct xe_gt *gt, enum xe_engine_class class)
{
+ struct xe_device *xe = gt_to_xe(gt);
+
+ if (XE_GT_WA(gt, 16010904313) &&
+ (class == XE_ENGINE_CLASS_RENDER ||
+ class == XE_ENGINE_CLASS_COMPUTE))
+ return true;
+
+ if (xe_configfs_get_ctx_restore_mid_bb(to_pci_dev(xe->drm.dev),
+ class, NULL))
+ return true;
+
return false;
}
@@ -692,7 +704,13 @@ u32 xe_lrc_regs_offset(struct xe_lrc *lrc)
return xe_lrc_pphwsp_offset(lrc) + LRC_PPHWSP_SIZE;
}
-static size_t lrc_reg_size(struct xe_device *xe)
+/**
+ * xe_lrc_reg_size() - Get size of the LRC registers area within queues
+ * @xe: the &xe_device struct instance
+ *
+ * Returns: Size of the LRC registers area for current platform
+ */
+size_t xe_lrc_reg_size(struct xe_device *xe)
{
if (GRAPHICS_VERx100(xe) >= 1250)
return 96 * sizeof(u32);
@@ -702,7 +720,7 @@ static size_t lrc_reg_size(struct xe_device *xe)
size_t xe_lrc_skip_size(struct xe_device *xe)
{
- return LRC_PPHWSP_SIZE + lrc_reg_size(xe);
+ return LRC_PPHWSP_SIZE + xe_lrc_reg_size(xe);
}
static inline u32 __xe_lrc_seqno_offset(struct xe_lrc *lrc)
@@ -943,6 +961,47 @@ static void *empty_lrc_data(struct xe_hw_engine *hwe)
return data;
}
+/**
+ * xe_default_lrc_update_memirq_regs_with_address - Re-compute GGTT references in default LRC
+ * of given engine.
+ * @hwe: the &xe_hw_engine struct instance
+ */
+void xe_default_lrc_update_memirq_regs_with_address(struct xe_hw_engine *hwe)
+{
+ struct xe_gt *gt = hwe->gt;
+ u32 *regs;
+
+ if (!gt->default_lrc[hwe->class])
+ return;
+
+ regs = gt->default_lrc[hwe->class] + LRC_PPHWSP_SIZE;
+ set_memory_based_intr(regs, hwe);
+}
+
+/**
+ * xe_lrc_update_memirq_regs_with_address - Re-compute GGTT references in mem interrupt data
+ * for given LRC.
+ * @lrc: the &xe_lrc struct instance
+ * @hwe: the &xe_hw_engine struct instance
+ * @regs: scratch buffer to be used as temporary storage
+ */
+void xe_lrc_update_memirq_regs_with_address(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
+ u32 *regs)
+{
+ struct xe_gt *gt = hwe->gt;
+ struct iosys_map map;
+ size_t regs_len;
+
+ if (!xe_device_uses_memirq(gt_to_xe(gt)))
+ return;
+
+ map = __xe_lrc_regs_map(lrc);
+ regs_len = xe_lrc_reg_size(gt_to_xe(gt));
+ xe_map_memcpy_from(gt_to_xe(gt), regs, &map, 0, regs_len);
+ set_memory_based_intr(regs, hwe);
+ xe_map_memcpy_to(gt_to_xe(gt), &map, 0, regs, regs_len);
+}
+
static void xe_lrc_set_ppgtt(struct xe_lrc *lrc, struct xe_vm *vm)
{
u64 desc = xe_vm_pdp4_descriptor(vm, gt_to_tile(lrc->gt));
@@ -1014,6 +1073,121 @@ static ssize_t setup_utilization_wa(struct xe_lrc *lrc,
return cmd - batch;
}
+static ssize_t setup_timestamp_wa(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
+ u32 *batch, size_t max_len)
+{
+ const u32 ts_addr = __xe_lrc_ctx_timestamp_ggtt_addr(lrc);
+ u32 *cmd = batch;
+
+ if (!XE_GT_WA(lrc->gt, 16010904313) ||
+ !(hwe->class == XE_ENGINE_CLASS_RENDER ||
+ hwe->class == XE_ENGINE_CLASS_COMPUTE ||
+ hwe->class == XE_ENGINE_CLASS_COPY ||
+ hwe->class == XE_ENGINE_CLASS_VIDEO_DECODE ||
+ hwe->class == XE_ENGINE_CLASS_VIDEO_ENHANCE))
+ return 0;
+
+ if (xe_gt_WARN_ON(lrc->gt, max_len < 12))
+ return -ENOSPC;
+
+ *cmd++ = MI_LOAD_REGISTER_MEM | MI_LRM_USE_GGTT | MI_LRI_LRM_CS_MMIO |
+ MI_LRM_ASYNC;
+ *cmd++ = RING_CTX_TIMESTAMP(0).addr;
+ *cmd++ = ts_addr;
+ *cmd++ = 0;
+
+ *cmd++ = MI_LOAD_REGISTER_MEM | MI_LRM_USE_GGTT | MI_LRI_LRM_CS_MMIO |
+ MI_LRM_ASYNC;
+ *cmd++ = RING_CTX_TIMESTAMP(0).addr;
+ *cmd++ = ts_addr;
+ *cmd++ = 0;
+
+ *cmd++ = MI_LOAD_REGISTER_MEM | MI_LRM_USE_GGTT | MI_LRI_LRM_CS_MMIO;
+ *cmd++ = RING_CTX_TIMESTAMP(0).addr;
+ *cmd++ = ts_addr;
+ *cmd++ = 0;
+
+ return cmd - batch;
+}
+
+static ssize_t setup_configfs_post_ctx_restore_bb(struct xe_lrc *lrc,
+ struct xe_hw_engine *hwe,
+ u32 *batch, size_t max_len)
+{
+ struct xe_device *xe = gt_to_xe(lrc->gt);
+ const u32 *user_batch;
+ u32 *cmd = batch;
+ u32 count;
+
+ count = xe_configfs_get_ctx_restore_post_bb(to_pci_dev(xe->drm.dev),
+ hwe->class, &user_batch);
+ if (!count)
+ return 0;
+
+ if (count > max_len)
+ return -ENOSPC;
+
+ /*
+ * This should be used only for tests and validation. Taint the kernel
+ * as anything could be submitted directly in context switches
+ */
+ add_taint(TAINT_TEST, LOCKDEP_STILL_OK);
+
+ memcpy(cmd, user_batch, count * sizeof(u32));
+ cmd += count;
+
+ return cmd - batch;
+}
+
+static ssize_t setup_configfs_mid_ctx_restore_bb(struct xe_lrc *lrc,
+ struct xe_hw_engine *hwe,
+ u32 *batch, size_t max_len)
+{
+ struct xe_device *xe = gt_to_xe(lrc->gt);
+ const u32 *user_batch;
+ u32 *cmd = batch;
+ u32 count;
+
+ count = xe_configfs_get_ctx_restore_mid_bb(to_pci_dev(xe->drm.dev),
+ hwe->class, &user_batch);
+ if (!count)
+ return 0;
+
+ if (count > max_len)
+ return -ENOSPC;
+
+ /*
+ * This should be used only for tests and validation. Taint the kernel
+ * as anything could be submitted directly in context switches
+ */
+ add_taint(TAINT_TEST, LOCKDEP_STILL_OK);
+
+ memcpy(cmd, user_batch, count * sizeof(u32));
+ cmd += count;
+
+ return cmd - batch;
+}
+
+static ssize_t setup_invalidate_state_cache_wa(struct xe_lrc *lrc,
+ struct xe_hw_engine *hwe,
+ u32 *batch, size_t max_len)
+{
+ u32 *cmd = batch;
+
+ if (!XE_GT_WA(lrc->gt, 18022495364) ||
+ hwe->class != XE_ENGINE_CLASS_RENDER)
+ return 0;
+
+ if (xe_gt_WARN_ON(lrc->gt, max_len < 3))
+ return -ENOSPC;
+
+ *cmd++ = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(1);
+ *cmd++ = CS_DEBUG_MODE1(0).addr;
+ *cmd++ = _MASKED_BIT_ENABLE(INSTRUCTION_STATE_CACHE_INVALIDATE);
+
+ return cmd - batch;
+}
+
struct bo_setup {
ssize_t (*setup)(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
u32 *batch, size_t max_size);
@@ -1040,13 +1214,11 @@ static int setup_bo(struct bo_setup_state *state)
ssize_t remain;
if (state->lrc->bo->vmap.is_iomem) {
- state->buffer = kmalloc(state->max_size, GFP_KERNEL);
if (!state->buffer)
return -ENOMEM;
state->ptr = state->buffer;
} else {
state->ptr = state->lrc->bo->vmap.vaddr + state->offset;
- state->buffer = NULL;
}
remain = state->max_size / sizeof(u32);
@@ -1071,7 +1243,6 @@ static int setup_bo(struct bo_setup_state *state)
return 0;
fail:
- kfree(state->buffer);
return -ENOSPC;
}
@@ -1083,18 +1254,28 @@ static void finish_bo(struct bo_setup_state *state)
xe_map_memcpy_to(gt_to_xe(state->lrc->gt), &state->lrc->bo->vmap,
state->offset, state->buffer,
state->written * sizeof(u32));
- kfree(state->buffer);
}
-static int setup_wa_bb(struct xe_lrc *lrc, struct xe_hw_engine *hwe)
+/**
+ * xe_lrc_setup_wa_bb_with_scratch - Execute all wa bb setup callbacks.
+ * @lrc: the &xe_lrc struct instance
+ * @hwe: the &xe_hw_engine struct instance
+ * @scratch: preallocated scratch buffer for temporary storage
+ * Return: 0 on success, negative error code on failure
+ */
+int xe_lrc_setup_wa_bb_with_scratch(struct xe_lrc *lrc, struct xe_hw_engine *hwe, u32 *scratch)
{
static const struct bo_setup funcs[] = {
+ { .setup = setup_timestamp_wa },
+ { .setup = setup_invalidate_state_cache_wa },
{ .setup = setup_utilization_wa },
+ { .setup = setup_configfs_post_ctx_restore_bb },
};
struct bo_setup_state state = {
.lrc = lrc,
.hwe = hwe,
.max_size = LRC_WA_BB_SIZE,
+ .buffer = scratch,
.reserve_dw = 1,
.offset = __xe_lrc_wa_bb_offset(lrc),
.funcs = funcs,
@@ -1117,15 +1298,36 @@ static int setup_wa_bb(struct xe_lrc *lrc, struct xe_hw_engine *hwe)
return 0;
}
+static int setup_wa_bb(struct xe_lrc *lrc, struct xe_hw_engine *hwe)
+{
+ u32 *buf = NULL;
+ int ret;
+
+ if (lrc->bo->vmap.is_iomem)
+ buf = kmalloc(LRC_WA_BB_SIZE, GFP_KERNEL);
+
+ ret = xe_lrc_setup_wa_bb_with_scratch(lrc, hwe, buf);
+
+ kfree(buf);
+
+ return ret;
+}
+
static int
setup_indirect_ctx(struct xe_lrc *lrc, struct xe_hw_engine *hwe)
{
- static struct bo_setup rcs_funcs[] = {
+ static const struct bo_setup rcs_funcs[] = {
+ { .setup = setup_timestamp_wa },
+ { .setup = setup_configfs_mid_ctx_restore_bb },
+ };
+ static const struct bo_setup xcs_funcs[] = {
+ { .setup = setup_configfs_mid_ctx_restore_bb },
};
struct bo_setup_state state = {
.lrc = lrc,
.hwe = hwe,
.max_size = (63 * 64) /* max 63 cachelines */,
+ .buffer = NULL,
.offset = __xe_lrc_indirect_ctx_offset(lrc),
};
int ret;
@@ -1137,14 +1339,22 @@ setup_indirect_ctx(struct xe_lrc *lrc, struct xe_hw_engine *hwe)
hwe->class == XE_ENGINE_CLASS_COMPUTE) {
state.funcs = rcs_funcs;
state.num_funcs = ARRAY_SIZE(rcs_funcs);
+ } else {
+ state.funcs = xcs_funcs;
+ state.num_funcs = ARRAY_SIZE(xcs_funcs);
}
if (xe_gt_WARN_ON(lrc->gt, !state.funcs))
return 0;
+ if (lrc->bo->vmap.is_iomem)
+ state.buffer = kmalloc(state.max_size, GFP_KERNEL);
+
ret = setup_bo(&state);
- if (ret)
+ if (ret) {
+ kfree(state.buffer);
return ret;
+ }
/*
* Align to 64B cacheline so there's no garbage at the end for CS to
@@ -1156,15 +1366,17 @@ setup_indirect_ctx(struct xe_lrc *lrc, struct xe_hw_engine *hwe)
}
finish_bo(&state);
+ kfree(state.buffer);
+ /*
+ * Enable INDIRECT_CTX leaving INDIRECT_CTX_OFFSET at its default: it
+ * varies per engine class, but the default is good enough
+ */
xe_lrc_write_ctx_reg(lrc,
CTX_CS_INDIRECT_CTX,
(xe_bo_ggtt_addr(lrc->bo) + state.offset) |
/* Size in CLs. */
(state.written * sizeof(u32) / 64));
- xe_lrc_write_ctx_reg(lrc,
- CTX_CS_INDIRECT_CTX_OFFSET,
- CTX_INDIRECT_CTX_OFFSET_DEFAULT);
return 0;
}
@@ -1203,9 +1415,10 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
if (vm && vm->xef) /* userspace */
bo_flags |= XE_BO_FLAG_PINNED_LATE_RESTORE;
- lrc->bo = xe_bo_create_pin_map(xe, tile, NULL, bo_size,
- ttm_bo_type_kernel,
- bo_flags);
+ lrc->bo = xe_bo_create_pin_map_novm(xe, tile,
+ bo_size,
+ ttm_bo_type_kernel,
+ bo_flags, false);
if (IS_ERR(lrc->bo))
return PTR_ERR(lrc->bo);
@@ -1374,6 +1587,23 @@ void xe_lrc_destroy(struct kref *ref)
kfree(lrc);
}
+/**
+ * xe_lrc_update_hwctx_regs_with_address - Re-compute GGTT references within given LRC.
+ * @lrc: the &xe_lrc struct instance
+ */
+void xe_lrc_update_hwctx_regs_with_address(struct xe_lrc *lrc)
+{
+ if (xe_lrc_has_indirect_ring_state(lrc)) {
+ xe_lrc_write_ctx_reg(lrc, CTX_INDIRECT_RING_STATE,
+ __xe_lrc_indirect_ring_ggtt_addr(lrc));
+
+ xe_lrc_write_indirect_ctx_reg(lrc, INDIRECT_CTX_RING_START,
+ __xe_lrc_ring_ggtt_addr(lrc));
+ } else {
+ xe_lrc_write_ctx_reg(lrc, CTX_RING_START, __xe_lrc_ring_ggtt_addr(lrc));
+ }
+}
+
void xe_lrc_set_ring_tail(struct xe_lrc *lrc, u32 tail)
{
if (xe_lrc_has_indirect_ring_state(lrc))
@@ -1939,7 +2169,7 @@ u32 *xe_lrc_emit_hwe_state_instructions(struct xe_exec_queue *q, u32 *cs)
* continue to emit all of the SVG state since it's best not to leak
* any of the state between contexts, even if that leakage is harmless.
*/
- if (XE_WA(gt, 14019789679) && q->hwe->class == XE_ENGINE_CLASS_RENDER) {
+ if (XE_GT_WA(gt, 14019789679) && q->hwe->class == XE_ENGINE_CLASS_RENDER) {
state_table = xe_hpg_svg_state;
state_table_size = ARRAY_SIZE(xe_hpg_svg_state);
}
diff --git a/drivers/gpu/drm/xe/xe_lrc.h b/drivers/gpu/drm/xe/xe_lrc.h
index b6c8053c581b..188565465779 100644
--- a/drivers/gpu/drm/xe/xe_lrc.h
+++ b/drivers/gpu/drm/xe/xe_lrc.h
@@ -42,6 +42,8 @@ struct xe_lrc_snapshot {
#define LRC_PPHWSP_FLUSH_INVAL_SCRATCH_ADDR (0x34 * 4)
#define LRC_PPHWSP_PXP_INVAL_SCRATCH_ADDR (0x40 * 4)
+#define LRC_WA_BB_SIZE SZ_4K
+
#define XE_LRC_CREATE_RUNALONE 0x1
#define XE_LRC_CREATE_PXP 0x2
struct xe_lrc *xe_lrc_create(struct xe_hw_engine *hwe, struct xe_vm *vm,
@@ -88,6 +90,10 @@ bool xe_lrc_ring_is_idle(struct xe_lrc *lrc);
u32 xe_lrc_indirect_ring_ggtt_addr(struct xe_lrc *lrc);
u32 xe_lrc_ggtt_addr(struct xe_lrc *lrc);
u32 *xe_lrc_regs(struct xe_lrc *lrc);
+void xe_lrc_update_hwctx_regs_with_address(struct xe_lrc *lrc);
+void xe_default_lrc_update_memirq_regs_with_address(struct xe_hw_engine *hwe);
+void xe_lrc_update_memirq_regs_with_address(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
+ u32 *regs);
u32 xe_lrc_read_ctx_reg(struct xe_lrc *lrc, int reg_nr);
void xe_lrc_write_ctx_reg(struct xe_lrc *lrc, int reg_nr, u32 val);
@@ -106,6 +112,7 @@ s32 xe_lrc_start_seqno(struct xe_lrc *lrc);
u32 xe_lrc_parallel_ggtt_addr(struct xe_lrc *lrc);
struct iosys_map xe_lrc_parallel_map(struct xe_lrc *lrc);
+size_t xe_lrc_reg_size(struct xe_device *xe);
size_t xe_lrc_skip_size(struct xe_device *xe);
void xe_lrc_dump_default(struct drm_printer *p,
@@ -124,6 +131,8 @@ u32 xe_lrc_ctx_timestamp_udw_ggtt_addr(struct xe_lrc *lrc);
u64 xe_lrc_ctx_timestamp(struct xe_lrc *lrc);
u32 xe_lrc_ctx_job_timestamp_ggtt_addr(struct xe_lrc *lrc);
u32 xe_lrc_ctx_job_timestamp(struct xe_lrc *lrc);
+int xe_lrc_setup_wa_bb_with_scratch(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
+ u32 *scratch);
/**
* xe_lrc_update_timestamp - readout LRC timestamp and update cached value
diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
index ba1cff2e4cda..1d667fa36cf3 100644
--- a/drivers/gpu/drm/xe/xe_migrate.c
+++ b/drivers/gpu/drm/xe/xe_migrate.c
@@ -9,6 +9,7 @@
#include <linux/sizes.h>
#include <drm/drm_managed.h>
+#include <drm/drm_pagemap.h>
#include <drm/ttm/ttm_tt.h>
#include <uapi/drm/xe_drm.h>
@@ -30,10 +31,13 @@
#include "xe_mocs.h"
#include "xe_pt.h"
#include "xe_res_cursor.h"
+#include "xe_sa.h"
#include "xe_sched_job.h"
#include "xe_sync.h"
#include "xe_trace_bo.h"
+#include "xe_validation.h"
#include "xe_vm.h"
+#include "xe_vram.h"
/**
* struct xe_migrate - migrate context.
@@ -84,19 +88,6 @@ struct xe_migrate {
*/
#define MAX_PTE_PER_SDI 0x1FEU
-/**
- * xe_tile_migrate_exec_queue() - Get this tile's migrate exec queue.
- * @tile: The tile.
- *
- * Returns the default migrate exec queue of this tile.
- *
- * Return: The default migrate exec queue
- */
-struct xe_exec_queue *xe_tile_migrate_exec_queue(struct xe_tile *tile)
-{
- return tile->migrate->q;
-}
-
static void xe_migrate_fini(void *arg)
{
struct xe_migrate *m = arg;
@@ -130,38 +121,39 @@ static u64 xe_migrate_vram_ofs(struct xe_device *xe, u64 addr, bool is_comp_pte)
u64 identity_offset = IDENTITY_OFFSET;
if (GRAPHICS_VER(xe) >= 20 && is_comp_pte)
- identity_offset += DIV_ROUND_UP_ULL(xe->mem.vram.actual_physical_size, SZ_1G);
+ identity_offset += DIV_ROUND_UP_ULL(xe_vram_region_actual_physical_size
+ (xe->mem.vram), SZ_1G);
- addr -= xe->mem.vram.dpa_base;
+ addr -= xe_vram_region_dpa_base(xe->mem.vram);
return addr + (identity_offset << xe_pt_shift(2));
}
static void xe_migrate_program_identity(struct xe_device *xe, struct xe_vm *vm, struct xe_bo *bo,
u64 map_ofs, u64 vram_offset, u16 pat_index, u64 pt_2m_ofs)
{
+ struct xe_vram_region *vram = xe->mem.vram;
+ resource_size_t dpa_base = xe_vram_region_dpa_base(vram);
u64 pos, ofs, flags;
u64 entry;
/* XXX: Unclear if this should be usable_size? */
- u64 vram_limit = xe->mem.vram.actual_physical_size +
- xe->mem.vram.dpa_base;
+ u64 vram_limit = xe_vram_region_actual_physical_size(vram) + dpa_base;
u32 level = 2;
ofs = map_ofs + XE_PAGE_SIZE * level + vram_offset * 8;
flags = vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level,
true, 0);
- xe_assert(xe, IS_ALIGNED(xe->mem.vram.usable_size, SZ_2M));
+ xe_assert(xe, IS_ALIGNED(xe_vram_region_usable_size(vram), SZ_2M));
/*
* Use 1GB pages when possible, last chunk always use 2M
* pages as mixing reserved memory (stolen, WOCPM) with a single
* mapping is not allowed on certain platforms.
*/
- for (pos = xe->mem.vram.dpa_base; pos < vram_limit;
+ for (pos = dpa_base; pos < vram_limit;
pos += SZ_1G, ofs += 8) {
if (pos + SZ_1G >= vram_limit) {
- entry = vm->pt_ops->pde_encode_bo(bo, pt_2m_ofs,
- pat_index);
+ entry = vm->pt_ops->pde_encode_bo(bo, pt_2m_ofs);
xe_map_wr(xe, &bo->vmap, ofs, u64, entry);
flags = vm->pt_ops->pte_encode_addr(xe, 0,
@@ -182,7 +174,7 @@ static void xe_migrate_program_identity(struct xe_device *xe, struct xe_vm *vm,
}
static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
- struct xe_vm *vm)
+ struct xe_vm *vm, struct drm_exec *exec)
{
struct xe_device *xe = tile_to_xe(tile);
u16 pat_index = xe->pat.idx[XE_CACHE_WB];
@@ -209,13 +201,13 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
num_entries * XE_PAGE_SIZE,
ttm_bo_type_kernel,
XE_BO_FLAG_VRAM_IF_DGFX(tile) |
- XE_BO_FLAG_PAGETABLE);
+ XE_BO_FLAG_PAGETABLE, exec);
if (IS_ERR(bo))
return PTR_ERR(bo);
/* PT30 & PT31 reserved for 2M identity map */
pt29_ofs = xe_bo_size(bo) - 3 * XE_PAGE_SIZE;
- entry = vm->pt_ops->pde_encode_bo(bo, pt29_ofs, pat_index);
+ entry = vm->pt_ops->pde_encode_bo(bo, pt29_ofs);
xe_pt_write(xe, &vm->pt_root[id]->bo->vmap, 0, entry);
map_ofs = (num_entries - num_setup) * XE_PAGE_SIZE;
@@ -283,15 +275,14 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
flags = XE_PDE_64K;
entry = vm->pt_ops->pde_encode_bo(bo, map_ofs + (u64)(level - 1) *
- XE_PAGE_SIZE, pat_index);
+ XE_PAGE_SIZE);
xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE * level, u64,
entry | flags);
}
/* Write PDE's that point to our BO. */
- for (i = 0; i < map_ofs / PAGE_SIZE; i++) {
- entry = vm->pt_ops->pde_encode_bo(bo, (u64)i * XE_PAGE_SIZE,
- pat_index);
+ for (i = 0; i < map_ofs / XE_PAGE_SIZE; i++) {
+ entry = vm->pt_ops->pde_encode_bo(bo, (u64)i * XE_PAGE_SIZE);
xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE +
(i + 1) * 8, u64, entry);
@@ -307,11 +298,11 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
/* Identity map the entire vram at 256GiB offset */
if (IS_DGFX(xe)) {
u64 pt30_ofs = xe_bo_size(bo) - 2 * XE_PAGE_SIZE;
+ resource_size_t actual_phy_size = xe_vram_region_actual_physical_size(xe->mem.vram);
xe_migrate_program_identity(xe, vm, bo, map_ofs, IDENTITY_OFFSET,
pat_index, pt30_ofs);
- xe_assert(xe, xe->mem.vram.actual_physical_size <=
- (MAX_NUM_PTE - IDENTITY_OFFSET) * SZ_1G);
+ xe_assert(xe, actual_phy_size <= (MAX_NUM_PTE - IDENTITY_OFFSET) * SZ_1G);
/*
* Identity map the entire vram for compressed pat_index for xe2+
@@ -320,11 +311,11 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
if (GRAPHICS_VER(xe) >= 20 && xe_device_has_flat_ccs(xe)) {
u16 comp_pat_index = xe->pat.idx[XE_CACHE_NONE_COMPRESSION];
u64 vram_offset = IDENTITY_OFFSET +
- DIV_ROUND_UP_ULL(xe->mem.vram.actual_physical_size, SZ_1G);
+ DIV_ROUND_UP_ULL(actual_phy_size, SZ_1G);
u64 pt31_ofs = xe_bo_size(bo) - XE_PAGE_SIZE;
- xe_assert(xe, xe->mem.vram.actual_physical_size <= (MAX_NUM_PTE -
- IDENTITY_OFFSET - IDENTITY_OFFSET / 2) * SZ_1G);
+ xe_assert(xe, actual_phy_size <= (MAX_NUM_PTE - IDENTITY_OFFSET -
+ IDENTITY_OFFSET / 2) * SZ_1G);
xe_migrate_program_identity(xe, vm, bo, map_ofs, vram_offset,
comp_pat_index, pt31_ofs);
}
@@ -387,38 +378,63 @@ static bool xe_migrate_needs_ccs_emit(struct xe_device *xe)
}
/**
- * xe_migrate_init() - Initialize a migrate context
- * @tile: Back-pointer to the tile we're initializing for.
+ * xe_migrate_alloc - Allocate a migrate struct for a given &xe_tile
+ * @tile: &xe_tile
*
- * Return: Pointer to a migrate context on success. Error pointer on error.
+ * Allocates a &xe_migrate for a given tile.
+ *
+ * Return: &xe_migrate on success, or NULL when out of memory.
*/
-struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
+struct xe_migrate *xe_migrate_alloc(struct xe_tile *tile)
+{
+ struct xe_migrate *m = drmm_kzalloc(&tile_to_xe(tile)->drm, sizeof(*m), GFP_KERNEL);
+
+ if (m)
+ m->tile = tile;
+ return m;
+}
+
+static int xe_migrate_lock_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, struct xe_vm *vm)
{
struct xe_device *xe = tile_to_xe(tile);
+ struct xe_validation_ctx ctx;
+ struct drm_exec exec;
+ int err = 0;
+
+ xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {}, err) {
+ err = xe_vm_drm_exec_lock(vm, &exec);
+ drm_exec_retry_on_contention(&exec);
+ err = xe_migrate_prepare_vm(tile, m, vm, &exec);
+ drm_exec_retry_on_contention(&exec);
+ xe_validation_retry_on_oom(&ctx, &err);
+ }
+
+ return err;
+}
+
+/**
+ * xe_migrate_init() - Initialize a migrate context
+ * @m: The migration context
+ *
+ * Return: 0 if successful, negative error code on failure
+ */
+int xe_migrate_init(struct xe_migrate *m)
+{
+ struct xe_tile *tile = m->tile;
struct xe_gt *primary_gt = tile->primary_gt;
- struct xe_migrate *m;
+ struct xe_device *xe = tile_to_xe(tile);
struct xe_vm *vm;
int err;
- m = devm_kzalloc(xe->drm.dev, sizeof(*m), GFP_KERNEL);
- if (!m)
- return ERR_PTR(-ENOMEM);
-
- m->tile = tile;
-
/* Special layout, prepared below.. */
vm = xe_vm_create(xe, XE_VM_FLAG_MIGRATION |
- XE_VM_FLAG_SET_TILE_ID(tile));
+ XE_VM_FLAG_SET_TILE_ID(tile), NULL);
if (IS_ERR(vm))
- return ERR_CAST(vm);
+ return PTR_ERR(vm);
- xe_vm_lock(vm, false);
- err = xe_migrate_prepare_vm(tile, m, vm);
- xe_vm_unlock(vm);
- if (err) {
- xe_vm_close_and_put(vm);
- return ERR_PTR(err);
- }
+ err = xe_migrate_lock_prepare_vm(tile, m, vm);
+ if (err)
+ return err;
if (xe->info.has_usm) {
struct xe_hw_engine *hwe = xe_gt_hw_engine(primary_gt,
@@ -427,8 +443,10 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
false);
u32 logical_mask = xe_migrate_usm_logical_mask(primary_gt);
- if (!hwe || !logical_mask)
- return ERR_PTR(-EINVAL);
+ if (!hwe || !logical_mask) {
+ err = -EINVAL;
+ goto err_out;
+ }
/*
* XXX: Currently only reserving 1 (likely slow) BCS instance on
@@ -437,16 +455,18 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
m->q = xe_exec_queue_create(xe, vm, logical_mask, 1, hwe,
EXEC_QUEUE_FLAG_KERNEL |
EXEC_QUEUE_FLAG_PERMANENT |
- EXEC_QUEUE_FLAG_HIGH_PRIORITY, 0);
+ EXEC_QUEUE_FLAG_HIGH_PRIORITY |
+ EXEC_QUEUE_FLAG_MIGRATE, 0);
} else {
m->q = xe_exec_queue_create_class(xe, primary_gt, vm,
XE_ENGINE_CLASS_COPY,
EXEC_QUEUE_FLAG_KERNEL |
- EXEC_QUEUE_FLAG_PERMANENT, 0);
+ EXEC_QUEUE_FLAG_PERMANENT |
+ EXEC_QUEUE_FLAG_MIGRATE, 0);
}
if (IS_ERR(m->q)) {
- xe_vm_close_and_put(vm);
- return ERR_CAST(m->q);
+ err = PTR_ERR(m->q);
+ goto err_out;
}
mutex_init(&m->job_mutex);
@@ -456,7 +476,7 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
err = devm_add_action_or_reset(xe->drm.dev, xe_migrate_fini, m);
if (err)
- return ERR_PTR(err);
+ return err;
if (IS_DGFX(xe)) {
if (xe_migrate_needs_ccs_emit(xe))
@@ -471,7 +491,12 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
(unsigned long long)m->min_chunk_size);
}
- return m;
+ return err;
+
+err_out:
+ xe_vm_close_and_put(vm);
+ return err;
+
}
static u64 max_mem_transfer_per_pass(struct xe_device *xe)
@@ -834,11 +859,15 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
batch_size += pte_update_size(m, pte_flags, src, &src_it, &src_L0,
&src_L0_ofs, &src_L0_pt, 0, 0,
avail_pts);
-
- pte_flags = dst_is_vram ? PTE_UPDATE_FLAG_IS_VRAM : 0;
- batch_size += pte_update_size(m, pte_flags, dst, &dst_it, &src_L0,
- &dst_L0_ofs, &dst_L0_pt, 0,
- avail_pts, avail_pts);
+ if (copy_only_ccs) {
+ dst_L0_ofs = src_L0_ofs;
+ } else {
+ pte_flags = dst_is_vram ? PTE_UPDATE_FLAG_IS_VRAM : 0;
+ batch_size += pte_update_size(m, pte_flags, dst,
+ &dst_it, &src_L0,
+ &dst_L0_ofs, &dst_L0_pt,
+ 0, avail_pts, avail_pts);
+ }
if (copy_system_ccs) {
xe_assert(xe, type_device);
@@ -868,7 +897,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
if (dst_is_vram && xe_migrate_allow_identity(src_L0, &dst_it))
xe_res_next(&dst_it, src_L0);
- else
+ else if (!copy_only_ccs)
emit_pte(m, bb, dst_L0_pt, dst_is_vram, copy_system_ccs,
&dst_it, src_L0, dst);
@@ -896,11 +925,11 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
goto err;
}
- xe_sched_job_add_migrate_flush(job, flush_flags);
+ xe_sched_job_add_migrate_flush(job, flush_flags | MI_INVALIDATE_TLB);
if (!fence) {
err = xe_sched_job_add_deps(job, src_bo->ttm.base.resv,
DMA_RESV_USAGE_BOOKKEEP);
- if (!err && src_bo != dst_bo)
+ if (!err && src_bo->ttm.base.resv != dst_bo->ttm.base.resv)
err = xe_sched_job_add_deps(job, dst_bo->ttm.base.resv,
DMA_RESV_USAGE_BOOKKEEP);
if (err)
@@ -940,6 +969,167 @@ err_sync:
return fence;
}
+/**
+ * xe_migrate_lrc() - Get the LRC from migrate context.
+ * @migrate: Migrate context.
+ *
+ * Return: Pointer to LRC on success, error on failure
+ */
+struct xe_lrc *xe_migrate_lrc(struct xe_migrate *migrate)
+{
+ return migrate->q->lrc[0];
+}
+
+static int emit_flush_invalidate(struct xe_exec_queue *q, u32 *dw, int i,
+ u32 flags)
+{
+ struct xe_lrc *lrc = xe_exec_queue_lrc(q);
+ dw[i++] = MI_FLUSH_DW | MI_INVALIDATE_TLB | MI_FLUSH_DW_OP_STOREDW |
+ MI_FLUSH_IMM_DW | flags;
+ dw[i++] = lower_32_bits(xe_lrc_start_seqno_ggtt_addr(lrc)) |
+ MI_FLUSH_DW_USE_GTT;
+ dw[i++] = upper_32_bits(xe_lrc_start_seqno_ggtt_addr(lrc));
+ dw[i++] = MI_NOOP;
+ dw[i++] = MI_NOOP;
+
+ return i;
+}
+
+/**
+ * xe_migrate_ccs_rw_copy() - Copy content of TTM resources.
+ * @tile: Tile whose migration context to be used.
+ * @q : Execution to be used along with migration context.
+ * @src_bo: The buffer object @src is currently bound to.
+ * @read_write : Creates BB commands for CCS read/write.
+ *
+ * Creates batch buffer instructions to copy CCS metadata from CCS pool to
+ * memory and vice versa.
+ *
+ * This function should only be called for IGPU.
+ *
+ * Return: 0 if successful, negative error code on failure.
+ */
+int xe_migrate_ccs_rw_copy(struct xe_tile *tile, struct xe_exec_queue *q,
+ struct xe_bo *src_bo,
+ enum xe_sriov_vf_ccs_rw_ctxs read_write)
+
+{
+ bool src_is_pltt = read_write == XE_SRIOV_VF_CCS_READ_CTX;
+ bool dst_is_pltt = read_write == XE_SRIOV_VF_CCS_WRITE_CTX;
+ struct ttm_resource *src = src_bo->ttm.resource;
+ struct xe_migrate *m = tile->migrate;
+ struct xe_gt *gt = tile->primary_gt;
+ u32 batch_size, batch_size_allocated;
+ struct xe_device *xe = gt_to_xe(gt);
+ struct xe_res_cursor src_it, ccs_it;
+ u64 size = xe_bo_size(src_bo);
+ struct xe_bb *bb = NULL;
+ u64 src_L0, src_L0_ofs;
+ u32 src_L0_pt;
+ int err;
+
+ xe_res_first_sg(xe_bo_sg(src_bo), 0, size, &src_it);
+
+ xe_res_first_sg(xe_bo_sg(src_bo), xe_bo_ccs_pages_start(src_bo),
+ PAGE_ALIGN(xe_device_ccs_bytes(xe, size)),
+ &ccs_it);
+
+ /* Calculate Batch buffer size */
+ batch_size = 0;
+ while (size) {
+ batch_size += 10; /* Flush + ggtt addr + 2 NOP */
+ u64 ccs_ofs, ccs_size;
+ u32 ccs_pt;
+
+ u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
+
+ src_L0 = min_t(u64, max_mem_transfer_per_pass(xe), size);
+
+ batch_size += pte_update_size(m, false, src, &src_it, &src_L0,
+ &src_L0_ofs, &src_L0_pt, 0, 0,
+ avail_pts);
+
+ ccs_size = xe_device_ccs_bytes(xe, src_L0);
+ batch_size += pte_update_size(m, 0, NULL, &ccs_it, &ccs_size, &ccs_ofs,
+ &ccs_pt, 0, avail_pts, avail_pts);
+ xe_assert(xe, IS_ALIGNED(ccs_it.start, PAGE_SIZE));
+
+ /* Add copy commands size here */
+ batch_size += EMIT_COPY_CCS_DW;
+
+ size -= src_L0;
+ }
+
+ bb = xe_bb_ccs_new(gt, batch_size, read_write);
+ if (IS_ERR(bb)) {
+ drm_err(&xe->drm, "BB allocation failed.\n");
+ err = PTR_ERR(bb);
+ goto err_ret;
+ }
+
+ batch_size_allocated = batch_size;
+ size = xe_bo_size(src_bo);
+ batch_size = 0;
+
+ /*
+ * Emit PTE and copy commands here.
+ * The CCS copy command can only support limited size. If the size to be
+ * copied is more than the limit, divide copy into chunks. So, calculate
+ * sizes here again before copy command is emitted.
+ */
+ while (size) {
+ batch_size += 10; /* Flush + ggtt addr + 2 NOP */
+ u32 flush_flags = 0;
+ u64 ccs_ofs, ccs_size;
+ u32 ccs_pt;
+
+ u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
+
+ src_L0 = xe_migrate_res_sizes(m, &src_it);
+
+ batch_size += pte_update_size(m, false, src, &src_it, &src_L0,
+ &src_L0_ofs, &src_L0_pt, 0, 0,
+ avail_pts);
+
+ ccs_size = xe_device_ccs_bytes(xe, src_L0);
+ batch_size += pte_update_size(m, 0, NULL, &ccs_it, &ccs_size, &ccs_ofs,
+ &ccs_pt, 0, avail_pts, avail_pts);
+ xe_assert(xe, IS_ALIGNED(ccs_it.start, PAGE_SIZE));
+ batch_size += EMIT_COPY_CCS_DW;
+
+ emit_pte(m, bb, src_L0_pt, false, true, &src_it, src_L0, src);
+
+ emit_pte(m, bb, ccs_pt, false, false, &ccs_it, ccs_size, src);
+
+ bb->len = emit_flush_invalidate(q, bb->cs, bb->len, flush_flags);
+ flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs, src_is_pltt,
+ src_L0_ofs, dst_is_pltt,
+ src_L0, ccs_ofs, true);
+ bb->len = emit_flush_invalidate(q, bb->cs, bb->len, flush_flags);
+
+ size -= src_L0;
+ }
+
+ xe_assert(xe, (batch_size_allocated == bb->len));
+ src_bo->bb_ccs[read_write] = bb;
+
+ return 0;
+
+err_ret:
+ return err;
+}
+
+/**
+ * xe_get_migrate_exec_queue() - Get the execution queue from migrate context.
+ * @migrate: Migrate context.
+ *
+ * Return: Pointer to execution queue on success, error on failure
+ */
+struct xe_exec_queue *xe_migrate_exec_queue(struct xe_migrate *migrate)
+{
+ return migrate->q;
+}
+
static void emit_clear_link_copy(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
u32 size, u32 pitch)
{
@@ -1119,11 +1309,13 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
size -= clear_L0;
/* Preemption is enabled again by the ring ops. */
- if (clear_vram && xe_migrate_allow_identity(clear_L0, &src_it))
+ if (clear_vram && xe_migrate_allow_identity(clear_L0, &src_it)) {
xe_res_next(&src_it, clear_L0);
- else
- emit_pte(m, bb, clear_L0_pt, clear_vram, clear_only_system_ccs,
- &src_it, clear_L0, dst);
+ } else {
+ emit_pte(m, bb, clear_L0_pt, clear_vram,
+ clear_only_system_ccs, &src_it, clear_L0, dst);
+ flush_flags |= MI_INVALIDATE_TLB;
+ }
bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
update_idx = bb->len;
@@ -1134,7 +1326,7 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
if (xe_migrate_needs_ccs_emit(xe)) {
emit_copy_ccs(gt, bb, clear_L0_ofs, true,
m->cleared_mem_ofs, false, clear_L0);
- flush_flags = MI_FLUSH_DW_CCS;
+ flush_flags |= MI_FLUSH_DW_CCS;
}
job = xe_bb_create_migration_job(m->q, bb,
@@ -1469,6 +1661,8 @@ next_cmd:
goto err_sa;
}
+ xe_sched_job_add_migrate_flush(job, MI_INVALIDATE_TLB);
+
if (ops->pre_commit) {
pt_update->job = job;
err = ops->pre_commit(pt_update);
@@ -1571,7 +1765,8 @@ static u32 pte_update_cmd_size(u64 size)
static void build_pt_update_batch_sram(struct xe_migrate *m,
struct xe_bb *bb, u32 pt_offset,
- dma_addr_t *sram_addr, u32 size)
+ struct drm_pagemap_addr *sram_addr,
+ u32 size)
{
u16 pat_index = tile_to_xe(m->tile)->pat.idx[XE_CACHE_WB];
u32 ptes;
@@ -1589,14 +1784,18 @@ static void build_pt_update_batch_sram(struct xe_migrate *m,
ptes -= chunk;
while (chunk--) {
- u64 addr = sram_addr[i++] & PAGE_MASK;
+ u64 addr = sram_addr[i].addr & PAGE_MASK;
+ xe_tile_assert(m->tile, sram_addr[i].proto ==
+ DRM_INTERCONNECT_SYSTEM);
xe_tile_assert(m->tile, addr);
addr = m->q->vm->pt_ops->pte_encode_addr(m->tile->xe,
addr, pat_index,
0, false, 0);
bb->cs[bb->len++] = lower_32_bits(addr);
bb->cs[bb->len++] = upper_32_bits(addr);
+
+ i++;
}
}
}
@@ -1612,7 +1811,8 @@ enum xe_migrate_copy_dir {
static struct dma_fence *xe_migrate_vram(struct xe_migrate *m,
unsigned long len,
unsigned long sram_offset,
- dma_addr_t *sram_addr, u64 vram_addr,
+ struct drm_pagemap_addr *sram_addr,
+ u64 vram_addr,
const enum xe_migrate_copy_dir dir)
{
struct xe_gt *gt = m->tile->primary_gt;
@@ -1628,6 +1828,7 @@ static struct dma_fence *xe_migrate_vram(struct xe_migrate *m,
unsigned int pitch = len >= PAGE_SIZE && !(len & ~PAGE_MASK) ?
PAGE_SIZE : 4;
int err;
+ unsigned long i, j;
if (drm_WARN_ON(&xe->drm, (len & XE_CACHELINE_MASK) ||
(sram_offset | vram_addr) & XE_CACHELINE_MASK))
@@ -1644,6 +1845,24 @@ static struct dma_fence *xe_migrate_vram(struct xe_migrate *m,
return ERR_PTR(err);
}
+ /*
+ * If the order of a struct drm_pagemap_addr entry is greater than 0,
+ * the entry is populated by GPU pagemap but subsequent entries within
+ * the range of that order are not populated.
+ * build_pt_update_batch_sram() expects a fully populated array of
+ * struct drm_pagemap_addr. Ensure this is the case even with higher
+ * orders.
+ */
+ for (i = 0; i < npages;) {
+ unsigned int order = sram_addr[i].order;
+
+ for (j = 1; j < NR_PAGES(order) && i + j < npages; j++)
+ if (!sram_addr[i + j].addr)
+ sram_addr[i + j].addr = sram_addr[i].addr + j * PAGE_SIZE;
+
+ i += NR_PAGES(order);
+ }
+
build_pt_update_batch_sram(m, bb, pt_slot * XE_PAGE_SIZE,
sram_addr, len + sram_offset);
@@ -1669,7 +1888,7 @@ static struct dma_fence *xe_migrate_vram(struct xe_migrate *m,
goto err;
}
- xe_sched_job_add_migrate_flush(job, 0);
+ xe_sched_job_add_migrate_flush(job, MI_INVALIDATE_TLB);
mutex_lock(&m->job_mutex);
xe_sched_job_arm(job);
@@ -1694,7 +1913,7 @@ err:
* xe_migrate_to_vram() - Migrate to VRAM
* @m: The migration context.
* @npages: Number of pages to migrate.
- * @src_addr: Array of dma addresses (source of migrate)
+ * @src_addr: Array of DMA information (source of migrate)
* @dst_addr: Device physical address of VRAM (destination of migrate)
*
* Copy from an array dma addresses to a VRAM device physical address
@@ -1704,7 +1923,7 @@ err:
*/
struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
unsigned long npages,
- dma_addr_t *src_addr,
+ struct drm_pagemap_addr *src_addr,
u64 dst_addr)
{
return xe_migrate_vram(m, npages * PAGE_SIZE, 0, src_addr, dst_addr,
@@ -1716,7 +1935,7 @@ struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
* @m: The migration context.
* @npages: Number of pages to migrate.
* @src_addr: Device physical address of VRAM (source of migrate)
- * @dst_addr: Array of dma addresses (destination of migrate)
+ * @dst_addr: Array of DMA information (destination of migrate)
*
* Copy from a VRAM device physical address to an array dma addresses
*
@@ -1726,61 +1945,65 @@ struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
struct dma_fence *xe_migrate_from_vram(struct xe_migrate *m,
unsigned long npages,
u64 src_addr,
- dma_addr_t *dst_addr)
+ struct drm_pagemap_addr *dst_addr)
{
return xe_migrate_vram(m, npages * PAGE_SIZE, 0, dst_addr, src_addr,
XE_MIGRATE_COPY_TO_SRAM);
}
-static void xe_migrate_dma_unmap(struct xe_device *xe, dma_addr_t *dma_addr,
+static void xe_migrate_dma_unmap(struct xe_device *xe,
+ struct drm_pagemap_addr *pagemap_addr,
int len, int write)
{
unsigned long i, npages = DIV_ROUND_UP(len, PAGE_SIZE);
for (i = 0; i < npages; ++i) {
- if (!dma_addr[i])
+ if (!pagemap_addr[i].addr)
break;
- dma_unmap_page(xe->drm.dev, dma_addr[i], PAGE_SIZE,
+ dma_unmap_page(xe->drm.dev, pagemap_addr[i].addr, PAGE_SIZE,
write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
}
- kfree(dma_addr);
+ kfree(pagemap_addr);
}
-static dma_addr_t *xe_migrate_dma_map(struct xe_device *xe,
- void *buf, int len, int write)
+static struct drm_pagemap_addr *xe_migrate_dma_map(struct xe_device *xe,
+ void *buf, int len,
+ int write)
{
- dma_addr_t *dma_addr;
+ struct drm_pagemap_addr *pagemap_addr;
unsigned long i, npages = DIV_ROUND_UP(len, PAGE_SIZE);
- dma_addr = kcalloc(npages, sizeof(*dma_addr), GFP_KERNEL);
- if (!dma_addr)
+ pagemap_addr = kcalloc(npages, sizeof(*pagemap_addr), GFP_KERNEL);
+ if (!pagemap_addr)
return ERR_PTR(-ENOMEM);
for (i = 0; i < npages; ++i) {
dma_addr_t addr;
struct page *page;
+ enum dma_data_direction dir = write ? DMA_TO_DEVICE :
+ DMA_FROM_DEVICE;
if (is_vmalloc_addr(buf))
page = vmalloc_to_page(buf);
else
page = virt_to_page(buf);
- addr = dma_map_page(xe->drm.dev,
- page, 0, PAGE_SIZE,
- write ? DMA_TO_DEVICE :
- DMA_FROM_DEVICE);
+ addr = dma_map_page(xe->drm.dev, page, 0, PAGE_SIZE, dir);
if (dma_mapping_error(xe->drm.dev, addr))
goto err_fault;
- dma_addr[i] = addr;
+ pagemap_addr[i] =
+ drm_pagemap_addr_encode(addr,
+ DRM_INTERCONNECT_SYSTEM,
+ 0, dir);
buf += PAGE_SIZE;
}
- return dma_addr;
+ return pagemap_addr;
err_fault:
- xe_migrate_dma_unmap(xe, dma_addr, len, write);
+ xe_migrate_dma_unmap(xe, pagemap_addr, len, write);
return ERR_PTR(-EFAULT);
}
@@ -1809,7 +2032,7 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
struct xe_device *xe = tile_to_xe(tile);
struct xe_res_cursor cursor;
struct dma_fence *fence = NULL;
- dma_addr_t *dma_addr;
+ struct drm_pagemap_addr *pagemap_addr;
unsigned long page_offset = (unsigned long)buf & ~PAGE_MASK;
int bytes_left = len, current_page = 0;
void *orig_buf = buf;
@@ -1820,15 +2043,19 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
if (!IS_ALIGNED(len, XE_CACHELINE_BYTES) ||
!IS_ALIGNED((unsigned long)buf + offset, XE_CACHELINE_BYTES)) {
int buf_offset = 0;
+ void *bounce;
+ int err;
+
+ BUILD_BUG_ON(!is_power_of_2(XE_CACHELINE_BYTES));
+ bounce = kmalloc(XE_CACHELINE_BYTES, GFP_KERNEL);
+ if (!bounce)
+ return -ENOMEM;
/*
* Less than ideal for large unaligned access but this should be
* fairly rare, can fixup if this becomes common.
*/
do {
- u8 bounce[XE_CACHELINE_BYTES];
- void *ptr = (void *)bounce;
- int err;
int copy_bytes = min_t(int, bytes_left,
XE_CACHELINE_BYTES -
(offset & XE_CACHELINE_MASK));
@@ -1837,22 +2064,22 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
err = xe_migrate_access_memory(m, bo,
offset &
~XE_CACHELINE_MASK,
- (void *)ptr,
- sizeof(bounce), 0);
+ bounce,
+ XE_CACHELINE_BYTES, 0);
if (err)
- return err;
+ break;
if (write) {
- memcpy(ptr + ptr_offset, buf + buf_offset, copy_bytes);
+ memcpy(bounce + ptr_offset, buf + buf_offset, copy_bytes);
err = xe_migrate_access_memory(m, bo,
offset & ~XE_CACHELINE_MASK,
- (void *)ptr,
- sizeof(bounce), write);
+ bounce,
+ XE_CACHELINE_BYTES, write);
if (err)
- return err;
+ break;
} else {
- memcpy(buf + buf_offset, ptr + ptr_offset,
+ memcpy(buf + buf_offset, bounce + ptr_offset,
copy_bytes);
}
@@ -1861,12 +2088,13 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
offset += copy_bytes;
} while (bytes_left);
- return 0;
+ kfree(bounce);
+ return err;
}
- dma_addr = xe_migrate_dma_map(xe, buf, len + page_offset, write);
- if (IS_ERR(dma_addr))
- return PTR_ERR(dma_addr);
+ pagemap_addr = xe_migrate_dma_map(xe, buf, len + page_offset, write);
+ if (IS_ERR(pagemap_addr))
+ return PTR_ERR(pagemap_addr);
xe_res_first(bo->ttm.resource, offset, xe_bo_size(bo) - offset, &cursor);
@@ -1882,21 +2110,28 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
else
current_bytes = min_t(int, bytes_left, cursor.size);
- if (fence)
- dma_fence_put(fence);
+ if (current_bytes & ~PAGE_MASK) {
+ int pitch = 4;
+
+ current_bytes = min_t(int, current_bytes, S16_MAX * pitch);
+ }
__fence = xe_migrate_vram(m, current_bytes,
(unsigned long)buf & ~PAGE_MASK,
- dma_addr + current_page,
+ &pagemap_addr[current_page],
vram_addr, write ?
XE_MIGRATE_COPY_TO_VRAM :
XE_MIGRATE_COPY_TO_SRAM);
if (IS_ERR(__fence)) {
- if (fence)
+ if (fence) {
dma_fence_wait(fence, false);
+ dma_fence_put(fence);
+ }
fence = __fence;
goto out_err;
}
+
+ dma_fence_put(fence);
fence = __fence;
buf += current_bytes;
@@ -1911,10 +2146,46 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
dma_fence_put(fence);
out_err:
- xe_migrate_dma_unmap(xe, dma_addr, len + page_offset, write);
+ xe_migrate_dma_unmap(xe, pagemap_addr, len + page_offset, write);
return IS_ERR(fence) ? PTR_ERR(fence) : 0;
}
+/**
+ * xe_migrate_job_lock() - Lock migrate job lock
+ * @m: The migration context.
+ * @q: Queue associated with the operation which requires a lock
+ *
+ * Lock the migrate job lock if the queue is a migration queue, otherwise
+ * assert the VM's dma-resv is held (user queue's have own locking).
+ */
+void xe_migrate_job_lock(struct xe_migrate *m, struct xe_exec_queue *q)
+{
+ bool is_migrate = q == m->q;
+
+ if (is_migrate)
+ mutex_lock(&m->job_mutex);
+ else
+ xe_vm_assert_held(q->vm); /* User queues VM's should be locked */
+}
+
+/**
+ * xe_migrate_job_unlock() - Unlock migrate job lock
+ * @m: The migration context.
+ * @q: Queue associated with the operation which requires a lock
+ *
+ * Unlock the migrate job lock if the queue is a migration queue, otherwise
+ * assert the VM's dma-resv is held (user queue's have own locking).
+ */
+void xe_migrate_job_unlock(struct xe_migrate *m, struct xe_exec_queue *q)
+{
+ bool is_migrate = q == m->q;
+
+ if (is_migrate)
+ mutex_unlock(&m->job_mutex);
+ else
+ xe_vm_assert_held(q->vm); /* User queues VM's should be locked */
+}
+
#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
#include "tests/xe_migrate.c"
#endif
diff --git a/drivers/gpu/drm/xe/xe_migrate.h b/drivers/gpu/drm/xe/xe_migrate.h
index fb9839c1bae0..4fad324b6253 100644
--- a/drivers/gpu/drm/xe/xe_migrate.h
+++ b/drivers/gpu/drm/xe/xe_migrate.h
@@ -9,11 +9,13 @@
#include <linux/types.h>
struct dma_fence;
+struct drm_pagemap_addr;
struct iosys_map;
struct ttm_resource;
struct xe_bo;
struct xe_gt;
+struct xe_tlb_inval_job;
struct xe_exec_queue;
struct xe_migrate;
struct xe_migrate_pt_update;
@@ -24,6 +26,8 @@ struct xe_vm;
struct xe_vm_pgtable_update;
struct xe_vma;
+enum xe_sriov_vf_ccs_rw_ctxs;
+
/**
* struct xe_migrate_pt_update_ops - Callbacks for the
* xe_migrate_update_pgtables() function.
@@ -89,21 +93,30 @@ struct xe_migrate_pt_update {
struct xe_vma_ops *vops;
/** @job: The job if a GPU page-table update. NULL otherwise */
struct xe_sched_job *job;
+ /**
+ * @ijob: The TLB invalidation job for primary GT. NULL otherwise
+ */
+ struct xe_tlb_inval_job *ijob;
+ /**
+ * @mjob: The TLB invalidation job for media GT. NULL otherwise
+ */
+ struct xe_tlb_inval_job *mjob;
/** @tile_id: Tile ID of the update */
u8 tile_id;
};
-struct xe_migrate *xe_migrate_init(struct xe_tile *tile);
+struct xe_migrate *xe_migrate_alloc(struct xe_tile *tile);
+int xe_migrate_init(struct xe_migrate *m);
struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
unsigned long npages,
- dma_addr_t *src_addr,
+ struct drm_pagemap_addr *src_addr,
u64 dst_addr);
struct dma_fence *xe_migrate_from_vram(struct xe_migrate *m,
unsigned long npages,
u64 src_addr,
- dma_addr_t *dst_addr);
+ struct drm_pagemap_addr *dst_addr);
struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
struct xe_bo *src_bo,
@@ -112,6 +125,12 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
struct ttm_resource *dst,
bool copy_only_ccs);
+int xe_migrate_ccs_rw_copy(struct xe_tile *tile, struct xe_exec_queue *q,
+ struct xe_bo *src_bo,
+ enum xe_sriov_vf_ccs_rw_ctxs read_write);
+
+struct xe_lrc *xe_migrate_lrc(struct xe_migrate *migrate);
+struct xe_exec_queue *xe_migrate_exec_queue(struct xe_migrate *migrate);
int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
unsigned long offset, void *buf, int len,
int write);
@@ -133,5 +152,7 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
void xe_migrate_wait(struct xe_migrate *m);
-struct xe_exec_queue *xe_tile_migrate_exec_queue(struct xe_tile *tile);
+void xe_migrate_job_lock(struct xe_migrate *m, struct xe_exec_queue *q);
+void xe_migrate_job_unlock(struct xe_migrate *m, struct xe_exec_queue *q);
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c
index e4db8d58ea2d..ef6f3ea573a2 100644
--- a/drivers/gpu/drm/xe/xe_mmio.c
+++ b/drivers/gpu/drm/xe/xe_mmio.c
@@ -58,7 +58,6 @@ static void tiles_fini(void *arg)
static void mmio_multi_tile_setup(struct xe_device *xe, size_t tile_mmio_size)
{
struct xe_tile *tile;
- struct xe_gt *gt;
u8 id;
/*
@@ -68,38 +67,6 @@ static void mmio_multi_tile_setup(struct xe_device *xe, size_t tile_mmio_size)
if (xe->info.tile_count == 1)
return;
- /* Possibly override number of tile based on configuration register */
- if (!xe->info.skip_mtcfg) {
- struct xe_mmio *mmio = xe_root_tile_mmio(xe);
- u8 tile_count, gt_count;
- u32 mtcfg;
-
- /*
- * Although the per-tile mmio regs are not yet initialized, this
- * is fine as it's going to the root tile's mmio, that's
- * guaranteed to be initialized earlier in xe_mmio_probe_early()
- */
- mtcfg = xe_mmio_read32(mmio, XEHP_MTCFG_ADDR);
- tile_count = REG_FIELD_GET(TILE_COUNT, mtcfg) + 1;
-
- if (tile_count < xe->info.tile_count) {
- drm_info(&xe->drm, "tile_count: %d, reduced_tile_count %d\n",
- xe->info.tile_count, tile_count);
- xe->info.tile_count = tile_count;
-
- /*
- * We've already setup gt_count according to the full
- * tile count. Re-calculate it to only include the GTs
- * that belong to the remaining tile(s).
- */
- gt_count = 0;
- for_each_gt(gt, xe, id)
- if (gt->info.id < tile_count * xe->info.max_gt_per_tile)
- gt_count++;
- xe->info.gt_count = gt_count;
- }
- }
-
for_each_remote_tile(tile, xe, id)
xe_mmio_init(&tile->mmio, tile, xe->mmio.regs + id * tile_mmio_size, SZ_4M);
}
diff --git a/drivers/gpu/drm/xe/xe_mmio_gem.c b/drivers/gpu/drm/xe/xe_mmio_gem.c
new file mode 100644
index 000000000000..9a97c4387e4f
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_mmio_gem.c
@@ -0,0 +1,226 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include "xe_mmio_gem.h"
+
+#include <drm/drm_drv.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_managed.h>
+
+#include "xe_device_types.h"
+
+/**
+ * DOC: Exposing MMIO regions to userspace
+ *
+ * In certain cases, the driver may allow userspace to mmap a portion of the hardware registers.
+ *
+ * This can be done as follows:
+ * 1. Call xe_mmio_gem_create() to create a GEM object with an mmap-able fake offset.
+ * 2. Use xe_mmio_gem_mmap_offset() on the created GEM object to retrieve the fake offset.
+ * 3. Provide the fake offset to userspace.
+ * 4. Userspace can call mmap with the fake offset. The length provided to mmap
+ * must match the size of the GEM object.
+ * 5. When the region is no longer needed, call xe_mmio_gem_destroy() to release the GEM object.
+ *
+ * NOTE: The exposed MMIO region must be page-aligned with regards to its BAR offset and size.
+ *
+ * WARNING: Exposing MMIO regions to userspace can have security and stability implications.
+ * Make sure not to expose any sensitive registers.
+ */
+
+static void xe_mmio_gem_free(struct drm_gem_object *);
+static int xe_mmio_gem_mmap(struct drm_gem_object *, struct vm_area_struct *);
+static vm_fault_t xe_mmio_gem_vm_fault(struct vm_fault *);
+
+struct xe_mmio_gem {
+ struct drm_gem_object base;
+ phys_addr_t phys_addr;
+};
+
+static const struct vm_operations_struct vm_ops = {
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+ .fault = xe_mmio_gem_vm_fault,
+};
+
+static const struct drm_gem_object_funcs xe_mmio_gem_funcs = {
+ .free = xe_mmio_gem_free,
+ .mmap = xe_mmio_gem_mmap,
+ .vm_ops = &vm_ops,
+};
+
+static inline struct xe_mmio_gem *to_xe_mmio_gem(struct drm_gem_object *obj)
+{
+ return container_of(obj, struct xe_mmio_gem, base);
+}
+
+/**
+ * xe_mmio_gem_create - Expose an MMIO region to userspace
+ * @xe: The xe device
+ * @file: DRM file descriptor
+ * @phys_addr: Start of the exposed MMIO region
+ * @size: The size of the exposed MMIO region
+ *
+ * This function creates a GEM object that exposes an MMIO region with an mmap-able
+ * fake offset.
+ *
+ * See: "Exposing MMIO regions to userspace"
+ */
+struct xe_mmio_gem *xe_mmio_gem_create(struct xe_device *xe, struct drm_file *file,
+ phys_addr_t phys_addr, size_t size)
+{
+ struct xe_mmio_gem *obj;
+ struct drm_gem_object *base;
+ int err;
+
+ if ((phys_addr % PAGE_SIZE != 0) || (size % PAGE_SIZE != 0))
+ return ERR_PTR(-EINVAL);
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+
+ base = &obj->base;
+ base->funcs = &xe_mmio_gem_funcs;
+ obj->phys_addr = phys_addr;
+
+ drm_gem_private_object_init(&xe->drm, base, size);
+
+ err = drm_gem_create_mmap_offset(base);
+ if (err)
+ goto free_gem;
+
+ err = drm_vma_node_allow(&base->vma_node, file);
+ if (err)
+ goto free_gem;
+
+ return obj;
+
+free_gem:
+ xe_mmio_gem_free(base);
+ return ERR_PTR(err);
+}
+
+/**
+ * xe_mmio_gem_mmap_offset - Return the mmap-able fake offset
+ * @gem: the GEM object created with xe_mmio_gem_create()
+ *
+ * This function returns the mmap-able fake offset allocated during
+ * xe_mmio_gem_create().
+ *
+ * See: "Exposing MMIO regions to userspace"
+ */
+u64 xe_mmio_gem_mmap_offset(struct xe_mmio_gem *gem)
+{
+ return drm_vma_node_offset_addr(&gem->base.vma_node);
+}
+
+static void xe_mmio_gem_free(struct drm_gem_object *base)
+{
+ struct xe_mmio_gem *obj = to_xe_mmio_gem(base);
+
+ drm_gem_object_release(base);
+ kfree(obj);
+}
+
+/**
+ * xe_mmio_gem_destroy - Destroy the GEM object that exposes an MMIO region
+ * @gem: the GEM object to destroy
+ *
+ * This function releases resources associated with the GEM object created by
+ * xe_mmio_gem_create().
+ *
+ * See: "Exposing MMIO regions to userspace"
+ */
+void xe_mmio_gem_destroy(struct xe_mmio_gem *gem)
+{
+ xe_mmio_gem_free(&gem->base);
+}
+
+static int xe_mmio_gem_mmap(struct drm_gem_object *base, struct vm_area_struct *vma)
+{
+ if (vma->vm_end - vma->vm_start != base->size)
+ return -EINVAL;
+
+ if ((vma->vm_flags & VM_SHARED) == 0)
+ return -EINVAL;
+
+ /* Set vm_pgoff (used as a fake buffer offset by DRM) to 0 */
+ vma->vm_pgoff = 0;
+ vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
+ vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
+ VM_DONTCOPY | VM_NORESERVE);
+
+ /* Defer actual mapping to the fault handler. */
+ return 0;
+}
+
+static void xe_mmio_gem_release_dummy_page(struct drm_device *dev, void *res)
+{
+ __free_page((struct page *)res);
+}
+
+static vm_fault_t xe_mmio_gem_vm_fault_dummy_page(struct vm_area_struct *vma)
+{
+ struct drm_gem_object *base = vma->vm_private_data;
+ struct drm_device *dev = base->dev;
+ vm_fault_t ret = VM_FAULT_NOPAGE;
+ struct page *page;
+ unsigned long pfn;
+ unsigned long i;
+
+ page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!page)
+ return VM_FAULT_OOM;
+
+ if (drmm_add_action_or_reset(dev, xe_mmio_gem_release_dummy_page, page))
+ return VM_FAULT_OOM;
+
+ pfn = page_to_pfn(page);
+
+ /* Map the entire VMA to the same dummy page */
+ for (i = 0; i < base->size; i += PAGE_SIZE) {
+ unsigned long addr = vma->vm_start + i;
+
+ ret = vmf_insert_pfn(vma, addr, pfn);
+ if (ret & VM_FAULT_ERROR)
+ break;
+ }
+
+ return ret;
+}
+
+static vm_fault_t xe_mmio_gem_vm_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct drm_gem_object *base = vma->vm_private_data;
+ struct xe_mmio_gem *obj = to_xe_mmio_gem(base);
+ struct drm_device *dev = base->dev;
+ vm_fault_t ret = VM_FAULT_NOPAGE;
+ unsigned long i;
+ int idx;
+
+ if (!drm_dev_enter(dev, &idx)) {
+ /*
+ * Provide a dummy page to avoid SIGBUS for events such as hot-unplug.
+ * This gives the userspace the option to recover instead of crashing.
+ * It is assumed the userspace will receive the notification via some
+ * other channel (e.g. drm uevent).
+ */
+ return xe_mmio_gem_vm_fault_dummy_page(vma);
+ }
+
+ for (i = 0; i < base->size; i += PAGE_SIZE) {
+ unsigned long addr = vma->vm_start + i;
+ unsigned long phys_addr = obj->phys_addr + i;
+
+ ret = vmf_insert_pfn(vma, addr, PHYS_PFN(phys_addr));
+ if (ret & VM_FAULT_ERROR)
+ break;
+ }
+
+ drm_dev_exit(idx);
+ return ret;
+}
diff --git a/drivers/gpu/drm/xe/xe_mmio_gem.h b/drivers/gpu/drm/xe/xe_mmio_gem.h
new file mode 100644
index 000000000000..4b76d5586ebb
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_mmio_gem.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_MMIO_GEM_H_
+#define _XE_MMIO_GEM_H_
+
+#include <linux/types.h>
+
+struct drm_file;
+struct xe_device;
+struct xe_mmio_gem;
+
+struct xe_mmio_gem *xe_mmio_gem_create(struct xe_device *xe, struct drm_file *file,
+ phys_addr_t phys_addr, size_t size);
+u64 xe_mmio_gem_mmap_offset(struct xe_mmio_gem *gem);
+void xe_mmio_gem_destroy(struct xe_mmio_gem *gem);
+
+#endif /* _XE_MMIO_GEM_H_ */
diff --git a/drivers/gpu/drm/xe/xe_module.c b/drivers/gpu/drm/xe/xe_module.c
index 107ffe87808c..d08338fc3bc1 100644
--- a/drivers/gpu/drm/xe/xe_module.c
+++ b/drivers/gpu/drm/xe/xe_module.c
@@ -27,6 +27,8 @@
#define DEFAULT_PROBE_DISPLAY true
#define DEFAULT_VRAM_BAR_SIZE 0
#define DEFAULT_FORCE_PROBE CONFIG_DRM_XE_FORCE_PROBE
+#define DEFAULT_MAX_VFS ~0
+#define DEFAULT_MAX_VFS_STR "unlimited"
#define DEFAULT_WEDGED_MODE 1
#define DEFAULT_SVM_NOTIFIER_SIZE 512
@@ -34,6 +36,9 @@ struct xe_modparam xe_modparam = {
.probe_display = DEFAULT_PROBE_DISPLAY,
.guc_log_level = DEFAULT_GUC_LOG_LEVEL,
.force_probe = DEFAULT_FORCE_PROBE,
+#ifdef CONFIG_PCI_IOV
+ .max_vfs = DEFAULT_MAX_VFS,
+#endif
.wedged_mode = DEFAULT_WEDGED_MODE,
.svm_notifier_size = DEFAULT_SVM_NOTIFIER_SIZE,
/* the rest are 0 by default */
@@ -79,7 +84,8 @@ MODULE_PARM_DESC(force_probe,
module_param_named(max_vfs, xe_modparam.max_vfs, uint, 0400);
MODULE_PARM_DESC(max_vfs,
"Limit number of Virtual Functions (VFs) that could be managed. "
- "(0 = no VFs [default]; N = allow up to N VFs)");
+ "(0=no VFs; N=allow up to N VFs "
+ "[default=" DEFAULT_MAX_VFS_STR "])");
#endif
module_param_named_unsafe(wedged_mode, xe_modparam.wedged_mode, int, 0600);
@@ -129,24 +135,17 @@ static const struct init_funcs init_funcs[] = {
},
};
-static int __init xe_call_init_func(unsigned int i)
+static int __init xe_call_init_func(const struct init_funcs *func)
{
- if (WARN_ON(i >= ARRAY_SIZE(init_funcs)))
- return 0;
- if (!init_funcs[i].init)
- return 0;
-
- return init_funcs[i].init();
+ if (func->init)
+ return func->init();
+ return 0;
}
-static void xe_call_exit_func(unsigned int i)
+static void xe_call_exit_func(const struct init_funcs *func)
{
- if (WARN_ON(i >= ARRAY_SIZE(init_funcs)))
- return;
- if (!init_funcs[i].exit)
- return;
-
- init_funcs[i].exit();
+ if (func->exit)
+ func->exit();
}
static int __init xe_init(void)
@@ -154,10 +153,12 @@ static int __init xe_init(void)
int err, i;
for (i = 0; i < ARRAY_SIZE(init_funcs); i++) {
- err = xe_call_init_func(i);
+ err = xe_call_init_func(init_funcs + i);
if (err) {
+ pr_info("%s: module_init aborted at %ps %pe\n",
+ DRIVER_NAME, init_funcs[i].init, ERR_PTR(err));
while (i--)
- xe_call_exit_func(i);
+ xe_call_exit_func(init_funcs + i);
return err;
}
}
@@ -170,7 +171,7 @@ static void __exit xe_exit(void)
int i;
for (i = ARRAY_SIZE(init_funcs) - 1; i >= 0; i--)
- xe_call_exit_func(i);
+ xe_call_exit_func(init_funcs + i);
}
module_init(xe_init);
diff --git a/drivers/gpu/drm/xe/xe_nvm.c b/drivers/gpu/drm/xe/xe_nvm.c
index 61b0a1531a53..33f4ac82fc80 100644
--- a/drivers/gpu/drm/xe/xe_nvm.c
+++ b/drivers/gpu/drm/xe/xe_nvm.c
@@ -35,21 +35,25 @@ static const struct intel_dg_nvm_region regions[INTEL_DG_NVM_REGIONS] = {
static void xe_nvm_release_dev(struct device *dev)
{
+ struct auxiliary_device *aux = container_of(dev, struct auxiliary_device, dev);
+ struct intel_dg_nvm_dev *nvm = container_of(aux, struct intel_dg_nvm_dev, aux_dev);
+
+ kfree(nvm);
}
static bool xe_nvm_non_posted_erase(struct xe_device *xe)
{
- struct xe_gt *gt = xe_root_mmio_gt(xe);
+ struct xe_mmio *mmio = xe_root_tile_mmio(xe);
if (xe->info.platform != XE_BATTLEMAGE)
return false;
- return !(xe_mmio_read32(&gt->mmio, XE_REG(GEN12_CNTL_PROTECTED_NVM_REG)) &
+ return !(xe_mmio_read32(mmio, XE_REG(GEN12_CNTL_PROTECTED_NVM_REG)) &
NVM_NON_POSTED_ERASE_CHICKEN_BIT);
}
static bool xe_nvm_writable_override(struct xe_device *xe)
{
- struct xe_gt *gt = xe_root_mmio_gt(xe);
+ struct xe_mmio *mmio = xe_root_tile_mmio(xe);
bool writable_override;
resource_size_t base;
@@ -72,7 +76,7 @@ static bool xe_nvm_writable_override(struct xe_device *xe)
}
writable_override =
- !(xe_mmio_read32(&gt->mmio, HECI_FWSTS2(base)) &
+ !(xe_mmio_read32(mmio, HECI_FWSTS2(base)) &
HECI_FW_STATUS_2_NVM_ACCESS_MODE);
if (writable_override)
drm_info(&xe->drm, "NVM access overridden by jumper\n");
@@ -162,6 +166,5 @@ void xe_nvm_fini(struct xe_device *xe)
auxiliary_device_delete(&nvm->aux_dev);
auxiliary_device_uninit(&nvm->aux_dev);
- kfree(nvm);
xe->nvm = NULL;
}
diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
index 5729e7d3e335..a4894eb0d7f3 100644
--- a/drivers/gpu/drm/xe/xe_oa.c
+++ b/drivers/gpu/drm/xe/xe_oa.c
@@ -822,7 +822,7 @@ static void xe_oa_disable_metric_set(struct xe_oa_stream *stream)
u32 sqcnt1;
/* Enable thread stall DOP gating and EU DOP gating. */
- if (XE_WA(stream->gt, 1508761755)) {
+ if (XE_GT_WA(stream->gt, 1508761755)) {
xe_gt_mcr_multicast_write(stream->gt, ROW_CHICKEN,
_MASKED_BIT_DISABLE(STALL_DOP_GATING_DISABLE));
xe_gt_mcr_multicast_write(stream->gt, ROW_CHICKEN2,
@@ -883,9 +883,9 @@ static int xe_oa_alloc_oa_buffer(struct xe_oa_stream *stream, size_t size)
{
struct xe_bo *bo;
- bo = xe_bo_create_pin_map(stream->oa->xe, stream->gt->tile, NULL,
- size, ttm_bo_type_kernel,
- XE_BO_FLAG_SYSTEM | XE_BO_FLAG_GGTT);
+ bo = xe_bo_create_pin_map_novm(stream->oa->xe, stream->gt->tile,
+ size, ttm_bo_type_kernel,
+ XE_BO_FLAG_SYSTEM | XE_BO_FLAG_GGTT, false);
if (IS_ERR(bo))
return PTR_ERR(bo);
@@ -1079,7 +1079,7 @@ static int xe_oa_enable_metric_set(struct xe_oa_stream *stream)
* EU NOA signals behave incorrectly if EU clock gating is enabled.
* Disable thread stall DOP gating and EU DOP gating.
*/
- if (XE_WA(stream->gt, 1508761755)) {
+ if (XE_GT_WA(stream->gt, 1508761755)) {
xe_gt_mcr_multicast_write(stream->gt, ROW_CHICKEN,
_MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
xe_gt_mcr_multicast_write(stream->gt, ROW_CHICKEN2,
@@ -1754,7 +1754,7 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream,
* GuC reset of engines causes OA to lose configuration
* state. Prevent this by overriding GUCRC mode.
*/
- if (XE_WA(stream->gt, 1509372804)) {
+ if (XE_GT_WA(stream->gt, 1509372804)) {
ret = xe_guc_pc_override_gucrc_mode(&gt->uc.guc.pc,
SLPC_GUCRC_MODE_GUCRC_NO_RC6);
if (ret)
@@ -1886,7 +1886,7 @@ u32 xe_oa_timestamp_frequency(struct xe_gt *gt)
{
u32 reg, shift;
- if (XE_WA(gt, 18013179988) || XE_WA(gt, 14015568240)) {
+ if (XE_GT_WA(gt, 18013179988) || XE_GT_WA(gt, 14015568240)) {
xe_pm_runtime_get(gt_to_xe(gt));
reg = xe_mmio_read32(&gt->mmio, RPM_CONFIG0);
xe_pm_runtime_put(gt_to_xe(gt));
diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c
index 3c40ef426f0c..be91343829dd 100644
--- a/drivers/gpu/drm/xe/xe_pci.c
+++ b/drivers/gpu/drm/xe/xe_pci.c
@@ -17,6 +17,8 @@
#include "display/xe_display.h"
#include "regs/xe_gt_regs.h"
+#include "regs/xe_regs.h"
+#include "xe_configfs.h"
#include "xe_device.h"
#include "xe_drv.h"
#include "xe_gt.h"
@@ -55,7 +57,7 @@ static const struct xe_graphics_desc graphics_xelp = {
};
#define XE_HP_FEATURES \
- .has_range_tlb_invalidation = true, \
+ .has_range_tlb_inval = true, \
.va_bits = 48, \
.vm_max_level = 3
@@ -103,7 +105,7 @@ static const struct xe_graphics_desc graphics_xelpg = {
.has_asid = 1, \
.has_atomic_enable_pte_bit = 1, \
.has_flat_ccs = 1, \
- .has_range_tlb_invalidation = 1, \
+ .has_range_tlb_inval = 1, \
.has_usm = 1, \
.has_64bit_timestamp = 1, \
.va_bits = 48, \
@@ -169,6 +171,7 @@ static const struct xe_device_desc tgl_desc = {
.dma_mask_size = 39,
.has_display = true,
.has_llc = true,
+ .has_sriov = true,
.max_gt_per_tile = 1,
.require_force_probe = true,
};
@@ -193,6 +196,7 @@ static const struct xe_device_desc adl_s_desc = {
.dma_mask_size = 39,
.has_display = true,
.has_llc = true,
+ .has_sriov = true,
.max_gt_per_tile = 1,
.require_force_probe = true,
.subplatforms = (const struct xe_subplatform_desc[]) {
@@ -210,6 +214,7 @@ static const struct xe_device_desc adl_p_desc = {
.dma_mask_size = 39,
.has_display = true,
.has_llc = true,
+ .has_sriov = true,
.max_gt_per_tile = 1,
.require_force_probe = true,
.subplatforms = (const struct xe_subplatform_desc[]) {
@@ -225,6 +230,7 @@ static const struct xe_device_desc adl_n_desc = {
.dma_mask_size = 39,
.has_display = true,
.has_llc = true,
+ .has_sriov = true,
.max_gt_per_tile = 1,
.require_force_probe = true,
};
@@ -270,6 +276,7 @@ static const struct xe_device_desc ats_m_desc = {
DG2_FEATURES,
.has_display = false,
+ .has_sriov = true,
};
static const struct xe_device_desc dg2_desc = {
@@ -327,6 +334,7 @@ static const struct xe_device_desc bmg_desc = {
.has_mbx_power_limits = true,
.has_gsc_nvm = 1,
.has_heci_cscfi = 1,
+ .has_late_bind = true,
.has_sriov = true,
.max_gt_per_tile = 2,
.needs_scratch = true,
@@ -503,6 +511,26 @@ static void read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, u32 *ver,
*revid = REG_FIELD_GET(GMD_ID_REVID, val);
}
+static const struct xe_ip *find_graphics_ip(unsigned int verx100)
+{
+ KUNIT_STATIC_STUB_REDIRECT(find_graphics_ip, verx100);
+
+ for (int i = 0; i < ARRAY_SIZE(graphics_ips); i++)
+ if (graphics_ips[i].verx100 == verx100)
+ return &graphics_ips[i];
+ return NULL;
+}
+
+static const struct xe_ip *find_media_ip(unsigned int verx100)
+{
+ KUNIT_STATIC_STUB_REDIRECT(find_media_ip, verx100);
+
+ for (int i = 0; i < ARRAY_SIZE(media_ips); i++)
+ if (media_ips[i].verx100 == verx100)
+ return &media_ips[i];
+ return NULL;
+}
+
/*
* Read IP version from hardware and select graphics/media IP descriptors
* based on the result.
@@ -520,14 +548,7 @@ static void handle_gmdid(struct xe_device *xe,
read_gmdid(xe, GMDID_GRAPHICS, &ver, graphics_revid);
- for (int i = 0; i < ARRAY_SIZE(graphics_ips); i++) {
- if (ver == graphics_ips[i].verx100) {
- *graphics_ip = &graphics_ips[i];
-
- break;
- }
- }
-
+ *graphics_ip = find_graphics_ip(ver);
if (!*graphics_ip) {
drm_err(&xe->drm, "Hardware reports unknown graphics version %u.%02u\n",
ver / 100, ver % 100);
@@ -538,14 +559,7 @@ static void handle_gmdid(struct xe_device *xe,
if (ver == 0)
return;
- for (int i = 0; i < ARRAY_SIZE(media_ips); i++) {
- if (ver == media_ips[i].verx100) {
- *media_ip = &media_ips[i];
-
- break;
- }
- }
-
+ *media_ip = find_media_ip(ver);
if (!*media_ip) {
drm_err(&xe->drm, "Hardware reports unknown media version %u.%02u\n",
ver / 100, ver % 100);
@@ -574,6 +588,7 @@ static int xe_info_init_early(struct xe_device *xe,
xe->info.has_gsc_nvm = desc->has_gsc_nvm;
xe->info.has_heci_gscfi = desc->has_heci_gscfi;
xe->info.has_heci_cscfi = desc->has_heci_cscfi;
+ xe->info.has_late_bind = desc->has_late_bind;
xe->info.has_llc = desc->has_llc;
xe->info.has_pxp = desc->has_pxp;
xe->info.has_sriov = desc->has_sriov;
@@ -599,6 +614,44 @@ static int xe_info_init_early(struct xe_device *xe,
}
/*
+ * Possibly override number of tile based on configuration register.
+ */
+static void xe_info_probe_tile_count(struct xe_device *xe)
+{
+ struct xe_mmio *mmio;
+ u8 tile_count;
+ u32 mtcfg;
+
+ KUNIT_STATIC_STUB_REDIRECT(xe_info_probe_tile_count, xe);
+
+ /*
+ * Probe for tile count only for platforms that support multiple
+ * tiles.
+ */
+ if (xe->info.tile_count == 1)
+ return;
+
+ if (xe->info.skip_mtcfg)
+ return;
+
+ mmio = xe_root_tile_mmio(xe);
+
+ /*
+ * Although the per-tile mmio regs are not yet initialized, this
+ * is fine as it's going to the root tile's mmio, that's
+ * guaranteed to be initialized earlier in xe_mmio_probe_early()
+ */
+ mtcfg = xe_mmio_read32(mmio, XEHP_MTCFG_ADDR);
+ tile_count = REG_FIELD_GET(TILE_COUNT, mtcfg) + 1;
+
+ if (tile_count < xe->info.tile_count) {
+ drm_info(&xe->drm, "tile_count: %d, reduced_tile_count %d\n",
+ xe->info.tile_count, tile_count);
+ xe->info.tile_count = tile_count;
+ }
+}
+
+/*
* Initialize device info content that does require knowledge about
* graphics / media IP version.
* Make sure that GT / tile structures allocated by the driver match the data
@@ -668,10 +721,12 @@ static int xe_info_init(struct xe_device *xe,
/* Runtime detection may change this later */
xe->info.has_flat_ccs = graphics_desc->has_flat_ccs;
- xe->info.has_range_tlb_invalidation = graphics_desc->has_range_tlb_invalidation;
+ xe->info.has_range_tlb_inval = graphics_desc->has_range_tlb_inval;
xe->info.has_usm = graphics_desc->has_usm;
xe->info.has_64bit_timestamp = graphics_desc->has_64bit_timestamp;
+ xe_info_probe_tile_count(xe);
+
for_each_remote_tile(tile, xe, id) {
int err;
@@ -687,12 +742,17 @@ static int xe_info_init(struct xe_device *xe,
* All of these together determine the overall GT count.
*/
for_each_tile(tile, xe, id) {
+ int err;
+
gt = tile->primary_gt;
gt->info.type = XE_GT_TYPE_MAIN;
gt->info.id = tile->id * xe->info.max_gt_per_tile;
gt->info.has_indirect_ring_state = graphics_desc->has_indirect_ring_state;
gt->info.engine_mask = graphics_desc->hw_engine_mask;
- xe->info.gt_count++;
+
+ err = xe_tile_alloc_vram(tile);
+ if (err)
+ return err;
if (MEDIA_VER(xe) < 13 && media_desc)
gt->info.engine_mask |= media_desc->hw_engine_mask;
@@ -713,9 +773,15 @@ static int xe_info_init(struct xe_device *xe,
gt->info.id = tile->id * xe->info.max_gt_per_tile + 1;
gt->info.has_indirect_ring_state = media_desc->has_indirect_ring_state;
gt->info.engine_mask = media_desc->hw_engine_mask;
- xe->info.gt_count++;
}
+ /*
+ * Now that we have tiles and GTs defined, let's loop over valid GTs
+ * in order to define gt_count.
+ */
+ for_each_gt(gt, xe, id)
+ xe->info.gt_count++;
+
return 0;
}
@@ -726,7 +792,7 @@ static void xe_pci_remove(struct pci_dev *pdev)
if (IS_SRIOV_PF(xe))
xe_pci_sriov_configure(pdev, 0);
- if (xe_survivability_mode_is_enabled(xe))
+ if (xe_survivability_mode_is_boot_enabled(xe))
return;
xe_device_remove(xe);
@@ -759,6 +825,8 @@ static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct xe_device *xe;
int err;
+ xe_configfs_check_device(pdev);
+
if (desc->require_force_probe && !id_forced(pdev->device)) {
dev_info(&pdev->dev,
"Your graphics device %04x is not officially supported\n"
@@ -806,7 +874,7 @@ static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* flashed through mei. Return success, if survivability mode
* is enabled due to pcode failure or configfs being set
*/
- if (xe_survivability_mode_is_enabled(xe))
+ if (xe_survivability_mode_is_boot_enabled(xe))
return 0;
if (err)
@@ -900,7 +968,7 @@ static int xe_pci_suspend(struct device *dev)
struct xe_device *xe = pdev_to_xe_device(pdev);
int err;
- if (xe_survivability_mode_is_enabled(xe))
+ if (xe_survivability_mode_is_boot_enabled(xe))
return -EBUSY;
err = xe_pm_suspend(xe);
diff --git a/drivers/gpu/drm/xe/xe_pci_sriov.c b/drivers/gpu/drm/xe/xe_pci_sriov.c
index 8813efdcafbb..af05db07162e 100644
--- a/drivers/gpu/drm/xe/xe_pci_sriov.c
+++ b/drivers/gpu/drm/xe/xe_pci_sriov.c
@@ -3,6 +3,10 @@
* Copyright © 2023-2024 Intel Corporation
*/
+#include <linux/bitops.h>
+#include <linux/pci.h>
+
+#include "regs/xe_bars.h"
#include "xe_assert.h"
#include "xe_device.h"
#include "xe_gt_sriov_pf_config.h"
@@ -12,6 +16,7 @@
#include "xe_pci_sriov.h"
#include "xe_pm.h"
#include "xe_sriov.h"
+#include "xe_sriov_pf.h"
#include "xe_sriov_pf_helpers.h"
#include "xe_sriov_printk.h"
@@ -127,6 +132,18 @@ static void pf_engine_activity_stats(struct xe_device *xe, unsigned int num_vfs,
}
}
+static int resize_vf_vram_bar(struct xe_device *xe, int num_vfs)
+{
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ u32 sizes;
+
+ sizes = pci_iov_vf_bar_get_sizes(pdev, VF_LMEM_BAR, num_vfs);
+ if (!sizes)
+ return 0;
+
+ return pci_iov_vf_bar_set_size(pdev, VF_LMEM_BAR, __fls(sizes));
+}
+
static int pf_enable_vfs(struct xe_device *xe, int num_vfs)
{
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
@@ -138,6 +155,10 @@ static int pf_enable_vfs(struct xe_device *xe, int num_vfs)
xe_assert(xe, num_vfs <= total_vfs);
xe_sriov_dbg(xe, "enabling %u VF%s\n", num_vfs, str_plural(num_vfs));
+ err = xe_sriov_pf_wait_ready(xe);
+ if (err)
+ goto out;
+
/*
* We must hold additional reference to the runtime PM to keep PF in D0
* during VFs lifetime, as our VFs do not implement the PM capability.
@@ -153,6 +174,12 @@ static int pf_enable_vfs(struct xe_device *xe, int num_vfs)
if (err < 0)
goto failed;
+ if (IS_DGFX(xe)) {
+ err = resize_vf_vram_bar(xe, num_vfs);
+ if (err)
+ xe_sriov_info(xe, "Failed to set VF LMEM BAR size: %d\n", err);
+ }
+
err = pci_enable_sriov(pdev, num_vfs);
if (err < 0)
goto failed;
@@ -169,7 +196,7 @@ static int pf_enable_vfs(struct xe_device *xe, int num_vfs)
failed:
pf_unprovision_vfs(xe, num_vfs);
xe_pm_runtime_put(xe);
-
+out:
xe_sriov_notice(xe, "Failed to enable %u VF%s (%pe)\n",
num_vfs, str_plural(num_vfs), ERR_PTR(err));
return err;
diff --git a/drivers/gpu/drm/xe/xe_pci_types.h b/drivers/gpu/drm/xe/xe_pci_types.h
index 4de6f69ed975..9b9766a3baa3 100644
--- a/drivers/gpu/drm/xe/xe_pci_types.h
+++ b/drivers/gpu/drm/xe/xe_pci_types.h
@@ -39,6 +39,7 @@ struct xe_device_desc {
u8 has_gsc_nvm:1;
u8 has_heci_gscfi:1;
u8 has_heci_cscfi:1;
+ u8 has_late_bind:1;
u8 has_llc:1;
u8 has_mbx_power_limits:1;
u8 has_pxp:1;
@@ -60,7 +61,7 @@ struct xe_graphics_desc {
u8 has_atomic_enable_pte_bit:1;
u8 has_flat_ccs:1;
u8 has_indirect_ring_state:1;
- u8 has_range_tlb_invalidation:1;
+ u8 has_range_tlb_inval:1;
u8 has_usm:1;
u8 has_64bit_timestamp:1;
};
diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c
index e279b47ba03b..2c5a44377994 100644
--- a/drivers/gpu/drm/xe/xe_pm.c
+++ b/drivers/gpu/drm/xe/xe_pm.c
@@ -18,12 +18,15 @@
#include "xe_device.h"
#include "xe_ggtt.h"
#include "xe_gt.h"
-#include "xe_guc.h"
+#include "xe_gt_idle.h"
#include "xe_i2c.h"
#include "xe_irq.h"
+#include "xe_late_bind_fw.h"
#include "xe_pcode.h"
#include "xe_pxp.h"
+#include "xe_sriov_vf_ccs.h"
#include "xe_trace.h"
+#include "xe_vm.h"
#include "xe_wa.h"
/**
@@ -127,6 +130,8 @@ int xe_pm_suspend(struct xe_device *xe)
if (err)
goto err;
+ xe_late_bind_wait_for_worker_completion(&xe->late_bind);
+
for_each_gt(gt, xe, id)
xe_gt_suspend_prepare(gt);
@@ -176,6 +181,9 @@ int xe_pm_resume(struct xe_device *xe)
drm_dbg(&xe->drm, "Resuming device\n");
trace_xe_pm_resume(xe, __builtin_return_address(0));
+ for_each_gt(gt, xe, id)
+ xe_gt_idle_disable_c6(gt);
+
for_each_tile(tile, xe, id)
xe_wa_apply_tile_workarounds(tile);
@@ -193,7 +201,7 @@ int xe_pm_resume(struct xe_device *xe)
if (err)
goto err;
- xe_i2c_pm_resume(xe, xe->d3cold.allowed);
+ xe_i2c_pm_resume(xe, true);
xe_irq_resume(xe);
@@ -208,6 +216,11 @@ int xe_pm_resume(struct xe_device *xe)
xe_pxp_pm_resume(xe->pxp);
+ if (IS_VF_CCS_READY(xe))
+ xe_sriov_vf_ccs_register_context(xe);
+
+ xe_late_bind_fw_load(&xe->late_bind);
+
drm_dbg(&xe->drm, "Device resumed\n");
return 0;
err:
@@ -243,6 +256,10 @@ static void xe_pm_runtime_init(struct xe_device *xe)
{
struct device *dev = xe->drm.dev;
+ /* Our current VFs do not support RPM. so, disable it */
+ if (IS_SRIOV_VF(xe))
+ return;
+
/*
* Disable the system suspend direct complete optimization.
* We need to ensure that the regular device suspend/resume functions
@@ -290,6 +307,19 @@ static u32 vram_threshold_value(struct xe_device *xe)
return DEFAULT_VRAM_THRESHOLD;
}
+static void xe_pm_wake_rebind_workers(struct xe_device *xe)
+{
+ struct xe_vm *vm, *next;
+
+ mutex_lock(&xe->rebind_resume_lock);
+ list_for_each_entry_safe(vm, next, &xe->rebind_resume_list,
+ preempt.pm_activate_link) {
+ list_del_init(&vm->preempt.pm_activate_link);
+ xe_vm_resume_rebind_worker(vm);
+ }
+ mutex_unlock(&xe->rebind_resume_lock);
+}
+
static int xe_pm_notifier_callback(struct notifier_block *nb,
unsigned long action, void *data)
{
@@ -299,30 +329,30 @@ static int xe_pm_notifier_callback(struct notifier_block *nb,
switch (action) {
case PM_HIBERNATION_PREPARE:
case PM_SUSPEND_PREPARE:
+ reinit_completion(&xe->pm_block);
xe_pm_runtime_get(xe);
err = xe_bo_evict_all_user(xe);
- if (err) {
+ if (err)
drm_dbg(&xe->drm, "Notifier evict user failed (%d)\n", err);
- xe_pm_runtime_put(xe);
- break;
- }
err = xe_bo_notifier_prepare_all_pinned(xe);
- if (err) {
+ if (err)
drm_dbg(&xe->drm, "Notifier prepare pin failed (%d)\n", err);
- xe_pm_runtime_put(xe);
- }
+ /*
+ * Keep the runtime pm reference until post hibernation / post suspend to
+ * avoid a runtime suspend interfering with evicted objects or backup
+ * allocations.
+ */
break;
case PM_POST_HIBERNATION:
case PM_POST_SUSPEND:
+ complete_all(&xe->pm_block);
+ xe_pm_wake_rebind_workers(xe);
xe_bo_notifier_unprepare_all_pinned(xe);
xe_pm_runtime_put(xe);
break;
}
- if (err)
- return NOTIFY_BAD;
-
return NOTIFY_DONE;
}
@@ -344,6 +374,14 @@ int xe_pm_init(struct xe_device *xe)
if (err)
return err;
+ err = drmm_mutex_init(&xe->drm, &xe->rebind_resume_lock);
+ if (err)
+ goto err_unregister;
+
+ init_completion(&xe->pm_block);
+ complete_all(&xe->pm_block);
+ INIT_LIST_HEAD(&xe->rebind_resume_list);
+
/* For now suspend/resume is only allowed with GuC */
if (!xe_device_uc_enabled(xe))
return 0;
@@ -367,6 +405,10 @@ static void xe_pm_runtime_fini(struct xe_device *xe)
{
struct device *dev = xe->drm.dev;
+ /* Our current VFs do not support RPM. so, disable it */
+ if (IS_SRIOV_VF(xe))
+ return;
+
pm_runtime_get_sync(dev);
pm_runtime_forbid(dev);
}
@@ -525,6 +567,9 @@ int xe_pm_runtime_resume(struct xe_device *xe)
xe_rpm_lockmap_acquire(xe);
+ for_each_gt(gt, xe, id)
+ xe_gt_idle_disable_c6(gt);
+
if (xe->d3cold.allowed) {
err = xe_pcode_ready(xe, true);
if (err)
@@ -558,6 +603,12 @@ int xe_pm_runtime_resume(struct xe_device *xe)
xe_pxp_pm_resume(xe->pxp);
+ if (IS_VF_CCS_READY(xe))
+ xe_sriov_vf_ccs_register_context(xe);
+
+ if (xe->d3cold.allowed)
+ xe_late_bind_fw_load(&xe->late_bind);
+
out:
xe_rpm_lockmap_release(xe);
xe_pm_write_callback_task(xe, NULL);
diff --git a/drivers/gpu/drm/xe/xe_printk.h b/drivers/gpu/drm/xe/xe_printk.h
new file mode 100644
index 000000000000..c5be2385aa95
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_printk.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_PRINTK_H_
+#define _XE_PRINTK_H_
+
+#include <drm/drm_print.h>
+
+#include "xe_device_types.h"
+
+#define __XE_PRINTK_FMT(_xe, _fmt, _args...) _fmt, ##_args
+
+#define xe_printk(_xe, _level, _fmt, ...) \
+ drm_##_level(&(_xe)->drm, __XE_PRINTK_FMT((_xe), _fmt, ## __VA_ARGS__))
+
+#define xe_err(_xe, _fmt, ...) \
+ xe_printk((_xe), err, _fmt, ##__VA_ARGS__)
+
+#define xe_err_once(_xe, _fmt, ...) \
+ xe_printk((_xe), err_once, _fmt, ##__VA_ARGS__)
+
+#define xe_err_ratelimited(_xe, _fmt, ...) \
+ xe_printk((_xe), err_ratelimited, _fmt, ##__VA_ARGS__)
+
+#define xe_warn(_xe, _fmt, ...) \
+ xe_printk((_xe), warn, _fmt, ##__VA_ARGS__)
+
+#define xe_notice(_xe, _fmt, ...) \
+ xe_printk((_xe), notice, _fmt, ##__VA_ARGS__)
+
+#define xe_info(_xe, _fmt, ...) \
+ xe_printk((_xe), info, _fmt, ##__VA_ARGS__)
+
+#define xe_dbg(_xe, _fmt, ...) \
+ xe_printk((_xe), dbg, _fmt, ##__VA_ARGS__)
+
+#define xe_WARN_type(_xe, _type, _condition, _fmt, ...) \
+ drm_WARN##_type(&(_xe)->drm, _condition, _fmt, ## __VA_ARGS__)
+
+#define xe_WARN(_xe, _condition, _fmt, ...) \
+ xe_WARN_type((_xe),, _condition, __XE_PRINTK_FMT((_xe), _fmt, ## __VA_ARGS__))
+
+#define xe_WARN_ONCE(_xe, _condition, _fmt, ...) \
+ xe_WARN_type((_xe), _ONCE, _condition, __XE_PRINTK_FMT((_xe), _fmt, ## __VA_ARGS__))
+
+#define xe_WARN_ON(_xe, _condition) \
+ xe_WARN((_xe), _condition, "%s(%s)", "WARN_ON", __stringify(_condition))
+
+#define xe_WARN_ON_ONCE(_xe, _condition) \
+ xe_WARN_ONCE((_xe), _condition, "%s(%s)", "WARN_ON_ONCE", __stringify(_condition))
+
+static inline void __xe_printfn_err(struct drm_printer *p, struct va_format *vaf)
+{
+ struct xe_device *xe = p->arg;
+
+ xe_err(xe, "%pV", vaf);
+}
+
+static inline void __xe_printfn_info(struct drm_printer *p, struct va_format *vaf)
+{
+ struct xe_device *xe = p->arg;
+
+ xe_info(xe, "%pV", vaf);
+}
+
+static inline void __xe_printfn_dbg(struct drm_printer *p, struct va_format *vaf)
+{
+ struct xe_device *xe = p->arg;
+ struct drm_printer ddp;
+
+ /*
+ * The original xe_dbg() callsite annotations are useless here,
+ * redirect to the tweaked drm_dbg_printer() instead.
+ */
+ ddp = drm_dbg_printer(&xe->drm, DRM_UT_DRIVER, NULL);
+ ddp.origin = p->origin;
+
+ drm_printf(&ddp, __XE_PRINTK_FMT(xe, "%pV", vaf));
+}
+
+/**
+ * xe_err_printer - Construct a &drm_printer that outputs to xe_err()
+ * @xe: the &xe_device pointer to use in xe_err()
+ *
+ * Return: The &drm_printer object.
+ */
+static inline struct drm_printer xe_err_printer(struct xe_device *xe)
+{
+ struct drm_printer p = {
+ .printfn = __xe_printfn_err,
+ .arg = xe,
+ };
+ return p;
+}
+
+/**
+ * xe_info_printer - Construct a &drm_printer that outputs to xe_info()
+ * @xe: the &xe_device pointer to use in xe_info()
+ *
+ * Return: The &drm_printer object.
+ */
+static inline struct drm_printer xe_info_printer(struct xe_device *xe)
+{
+ struct drm_printer p = {
+ .printfn = __xe_printfn_info,
+ .arg = xe,
+ };
+ return p;
+}
+
+/**
+ * xe_dbg_printer - Construct a &drm_printer that outputs like xe_dbg()
+ * @xe: the &xe_device pointer to use in xe_dbg()
+ *
+ * Return: The &drm_printer object.
+ */
+static inline struct drm_printer xe_dbg_printer(struct xe_device *xe)
+{
+ struct drm_printer p = {
+ .printfn = __xe_printfn_dbg,
+ .arg = xe,
+ .origin = (const void *)_THIS_IP_,
+ };
+ return p;
+}
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_psmi.c b/drivers/gpu/drm/xe/xe_psmi.c
new file mode 100644
index 000000000000..45d142191d60
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_psmi.c
@@ -0,0 +1,294 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include <linux/debugfs.h>
+
+#include "xe_bo.h"
+#include "xe_device.h"
+#include "xe_configfs.h"
+#include "xe_psmi.h"
+
+/*
+ * PSMI capture support
+ *
+ * Requirement for PSMI capture is to have a physically contiguous buffer. The
+ * PSMI tool owns doing all necessary configuration (MMIO register writes are
+ * done from user-space). However, KMD needs to provide the PSMI tool with the
+ * required physical address of the base of PSMI buffer in case of VRAM.
+ *
+ * VRAM backed PSMI buffer:
+ * Buffer is allocated as GEM object and with XE_BO_CREATE_PINNED_BIT flag which
+ * creates a contiguous allocation. The physical address is returned from
+ * psmi_debugfs_capture_addr_show(). PSMI tool can mmap the buffer via the
+ * PCIBAR through sysfs.
+ *
+ * SYSTEM memory backed PSMI buffer:
+ * Interface here does not support allocating from SYSTEM memory region. The
+ * PSMI tool needs to allocate memory themselves using hugetlbfs. In order to
+ * get the physical address, user-space can query /proc/[pid]/pagemap. As an
+ * alternative, CMA debugfs could also be used to allocate reserved CMA memory.
+ */
+
+static bool psmi_enabled(struct xe_device *xe)
+{
+ return xe_configfs_get_psmi_enabled(to_pci_dev(xe->drm.dev));
+}
+
+static void psmi_free_object(struct xe_bo *bo)
+{
+ xe_bo_lock(bo, NULL);
+ xe_bo_unpin(bo);
+ xe_bo_unlock(bo);
+ xe_bo_put(bo);
+}
+
+/*
+ * Free PSMI capture buffer objects.
+ */
+static void psmi_cleanup(struct xe_device *xe)
+{
+ unsigned long id, region_mask = xe->psmi.region_mask;
+ struct xe_bo *bo;
+
+ for_each_set_bit(id, &region_mask,
+ ARRAY_SIZE(xe->psmi.capture_obj)) {
+ /* smem should never be set */
+ xe_assert(xe, id);
+
+ bo = xe->psmi.capture_obj[id];
+ if (bo) {
+ psmi_free_object(bo);
+ xe->psmi.capture_obj[id] = NULL;
+ }
+ }
+}
+
+static struct xe_bo *psmi_alloc_object(struct xe_device *xe,
+ unsigned int id, size_t bo_size)
+{
+ struct xe_tile *tile;
+
+ if (!id || !bo_size)
+ return NULL;
+
+ tile = &xe->tiles[id - 1];
+
+ /* VRAM: Allocate GEM object for the capture buffer */
+ return xe_bo_create_pin_range_novm(xe, tile, bo_size, 0, ~0ull,
+ ttm_bo_type_kernel,
+ XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_PINNED |
+ XE_BO_FLAG_PINNED_LATE_RESTORE |
+ XE_BO_FLAG_NEEDS_CPU_ACCESS);
+}
+
+/*
+ * Allocate PSMI capture buffer objects (via debugfs set function), based on
+ * which regions the user has selected in region_mask. @size: size in bytes
+ * (should be power of 2)
+ *
+ * Always release/free the current buffer objects before attempting to allocate
+ * new ones. Size == 0 will free all current buffers.
+ *
+ * Note, we don't write any registers as the capture tool is already configuring
+ * all PSMI registers itself via mmio space.
+ */
+static int psmi_resize_object(struct xe_device *xe, size_t size)
+{
+ unsigned long id, region_mask = xe->psmi.region_mask;
+ struct xe_bo *bo = NULL;
+ int err = 0;
+
+ /* if resizing, free currently allocated buffers first */
+ psmi_cleanup(xe);
+
+ /* can set size to 0, in which case, now done */
+ if (!size)
+ return 0;
+
+ for_each_set_bit(id, &region_mask,
+ ARRAY_SIZE(xe->psmi.capture_obj)) {
+ /* smem should never be set */
+ xe_assert(xe, id);
+
+ bo = psmi_alloc_object(xe, id, size);
+ if (IS_ERR(bo)) {
+ err = PTR_ERR(bo);
+ break;
+ }
+ xe->psmi.capture_obj[id] = bo;
+
+ drm_info(&xe->drm,
+ "PSMI capture size requested: %zu bytes, allocated: %lu:%zu\n",
+ size, id, bo ? xe_bo_size(bo) : 0);
+ }
+
+ /* on error, reverse what was allocated */
+ if (err)
+ psmi_cleanup(xe);
+
+ return err;
+}
+
+/*
+ * Returns an address for the capture tool to use to find start of capture
+ * buffer. Capture tool requires the capability to have a buffer allocated per
+ * each tile (VRAM region), thus we return an address for each region.
+ */
+static int psmi_debugfs_capture_addr_show(struct seq_file *m, void *data)
+{
+ struct xe_device *xe = m->private;
+ unsigned long id, region_mask;
+ struct xe_bo *bo;
+ u64 val;
+
+ region_mask = xe->psmi.region_mask;
+ for_each_set_bit(id, &region_mask,
+ ARRAY_SIZE(xe->psmi.capture_obj)) {
+ /* smem should never be set */
+ xe_assert(xe, id);
+
+ /* VRAM region */
+ bo = xe->psmi.capture_obj[id];
+ if (!bo)
+ continue;
+
+ /* pinned, so don't need bo_lock */
+ val = __xe_bo_addr(bo, 0, PAGE_SIZE);
+ seq_printf(m, "%ld: 0x%llx\n", id, val);
+ }
+
+ return 0;
+}
+
+/*
+ * Return capture buffer size, using the size from first allocated object that
+ * is found. This works because all objects must be of the same size.
+ */
+static int psmi_debugfs_capture_size_get(void *data, u64 *val)
+{
+ unsigned long id, region_mask;
+ struct xe_device *xe = data;
+ struct xe_bo *bo;
+
+ region_mask = xe->psmi.region_mask;
+ for_each_set_bit(id, &region_mask,
+ ARRAY_SIZE(xe->psmi.capture_obj)) {
+ /* smem should never be set */
+ xe_assert(xe, id);
+
+ bo = xe->psmi.capture_obj[id];
+ if (bo) {
+ *val = xe_bo_size(bo);
+ return 0;
+ }
+ }
+
+ /* no capture objects are allocated */
+ *val = 0;
+
+ return 0;
+}
+
+/*
+ * Set size of PSMI capture buffer. This triggers the allocation of capture
+ * buffer in each memory region as specified with prior write to
+ * psmi_capture_region_mask.
+ */
+static int psmi_debugfs_capture_size_set(void *data, u64 val)
+{
+ struct xe_device *xe = data;
+
+ /* user must have specified at least one region */
+ if (!xe->psmi.region_mask)
+ return -EINVAL;
+
+ return psmi_resize_object(xe, val);
+}
+
+static int psmi_debugfs_capture_region_mask_get(void *data, u64 *val)
+{
+ struct xe_device *xe = data;
+
+ *val = xe->psmi.region_mask;
+
+ return 0;
+}
+
+/*
+ * Select VRAM regions for multi-tile devices, only allowed when buffer is not
+ * currently allocated.
+ */
+static int psmi_debugfs_capture_region_mask_set(void *data, u64 region_mask)
+{
+ struct xe_device *xe = data;
+ u64 size = 0;
+
+ /* SMEM is not supported (see comments at top of file) */
+ if (region_mask & 0x1)
+ return -EOPNOTSUPP;
+
+ /* input bitmask should contain only valid TTM regions */
+ if (!region_mask || region_mask & ~xe->info.mem_region_mask)
+ return -EINVAL;
+
+ /* only allow setting mask if buffer is not yet allocated */
+ psmi_debugfs_capture_size_get(xe, &size);
+ if (size)
+ return -EBUSY;
+
+ xe->psmi.region_mask = region_mask;
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(psmi_debugfs_capture_addr);
+
+DEFINE_DEBUGFS_ATTRIBUTE(psmi_debugfs_capture_region_mask_fops,
+ psmi_debugfs_capture_region_mask_get,
+ psmi_debugfs_capture_region_mask_set,
+ "0x%llx\n");
+
+DEFINE_DEBUGFS_ATTRIBUTE(psmi_debugfs_capture_size_fops,
+ psmi_debugfs_capture_size_get,
+ psmi_debugfs_capture_size_set,
+ "%lld\n");
+
+void xe_psmi_debugfs_register(struct xe_device *xe)
+{
+ struct drm_minor *minor;
+
+ if (!psmi_enabled(xe))
+ return;
+
+ minor = xe->drm.primary;
+ if (!minor->debugfs_root)
+ return;
+
+ debugfs_create_file("psmi_capture_addr",
+ 0400, minor->debugfs_root, xe,
+ &psmi_debugfs_capture_addr_fops);
+
+ debugfs_create_file("psmi_capture_region_mask",
+ 0600, minor->debugfs_root, xe,
+ &psmi_debugfs_capture_region_mask_fops);
+
+ debugfs_create_file("psmi_capture_size",
+ 0600, minor->debugfs_root, xe,
+ &psmi_debugfs_capture_size_fops);
+}
+
+static void psmi_fini(void *arg)
+{
+ psmi_cleanup(arg);
+}
+
+int xe_psmi_init(struct xe_device *xe)
+{
+ if (!psmi_enabled(xe))
+ return 0;
+
+ return devm_add_action(xe->drm.dev, psmi_fini, xe);
+}
diff --git a/drivers/gpu/drm/xe/xe_psmi.h b/drivers/gpu/drm/xe/xe_psmi.h
new file mode 100644
index 000000000000..b1dfba80d893
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_psmi.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_PSMI_H_
+#define _XE_PSMI_H_
+
+struct xe_device;
+
+int xe_psmi_init(struct xe_device *xe);
+void xe_psmi_debugfs_register(struct xe_device *xe);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index c8e63bd23300..a1c88f9a6c76 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -13,16 +13,17 @@
#include "xe_drm_client.h"
#include "xe_exec_queue.h"
#include "xe_gt.h"
-#include "xe_gt_tlb_invalidation.h"
#include "xe_migrate.h"
#include "xe_pt_types.h"
#include "xe_pt_walk.h"
#include "xe_res_cursor.h"
#include "xe_sched_job.h"
-#include "xe_sync.h"
#include "xe_svm.h"
+#include "xe_sync.h"
+#include "xe_tlb_inval_job.h"
#include "xe_trace.h"
#include "xe_ttm_stolen_mgr.h"
+#include "xe_userptr.h"
#include "xe_vm.h"
struct xe_pt_dir {
@@ -69,7 +70,7 @@ static u64 __xe_pt_empty_pte(struct xe_tile *tile, struct xe_vm *vm,
if (level > MAX_HUGEPTE_LEVEL)
return vm->pt_ops->pde_encode_bo(vm->scratch_pt[id][level - 1]->bo,
- 0, pat_index);
+ 0);
return vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level, IS_DGFX(xe), 0) |
XE_PTE_NULL;
@@ -88,6 +89,7 @@ static void xe_pt_free(struct xe_pt *pt)
* @vm: The vm to create for.
* @tile: The tile to create for.
* @level: The page-table level.
+ * @exec: The drm_exec object used to lock the vm.
*
* Allocate and initialize a single struct xe_pt metadata structure. Also
* create the corresponding page-table bo, but don't initialize it. If the
@@ -99,7 +101,7 @@ static void xe_pt_free(struct xe_pt *pt)
* error.
*/
struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile,
- unsigned int level)
+ unsigned int level, struct drm_exec *exec)
{
struct xe_pt *pt;
struct xe_bo *bo;
@@ -123,9 +125,11 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile,
bo_flags |= XE_BO_FLAG_PINNED_LATE_RESTORE;
pt->level = level;
+
+ drm_WARN_ON(&vm->xe->drm, IS_ERR_OR_NULL(exec));
bo = xe_bo_create_pin_map(vm->xe, tile, vm, SZ_4K,
ttm_bo_type_kernel,
- bo_flags);
+ bo_flags, exec);
if (IS_ERR(bo)) {
err = PTR_ERR(bo);
goto err_kfree;
@@ -518,7 +522,7 @@ xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset,
{
struct xe_pt_stage_bind_walk *xe_walk =
container_of(walk, typeof(*xe_walk), base);
- u16 pat_index = xe_walk->vma->pat_index;
+ u16 pat_index = xe_walk->vma->attr.pat_index;
struct xe_pt *xe_parent = container_of(parent, typeof(*xe_parent), base);
struct xe_vm *vm = xe_walk->vm;
struct xe_pt *xe_child;
@@ -589,7 +593,8 @@ xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset,
if (covers || !*child) {
u64 flags = 0;
- xe_child = xe_pt_create(xe_walk->vm, xe_walk->tile, level - 1);
+ xe_child = xe_pt_create(xe_walk->vm, xe_walk->tile, level - 1,
+ xe_vm_validation_exec(vm));
if (IS_ERR(xe_child))
return PTR_ERR(xe_child);
@@ -616,7 +621,7 @@ xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset,
xe_child->is_compact = true;
}
- pte = vm->pt_ops->pde_encode_bo(xe_child->bo, 0, pat_index) | flags;
+ pte = vm->pt_ops->pde_encode_bo(xe_child->bo, 0) | flags;
ret = xe_pt_insert_entry(xe_walk, xe_parent, offset, xe_child,
pte);
}
@@ -640,28 +645,31 @@ static const struct xe_pt_walk_ops xe_pt_stage_bind_ops = {
* - In all other cases device atomics will be disabled with AE=0 until an application
* request differently using a ioctl like madvise.
*/
-static bool xe_atomic_for_vram(struct xe_vm *vm)
+static bool xe_atomic_for_vram(struct xe_vm *vm, struct xe_vma *vma)
{
+ if (vma->attr.atomic_access == DRM_XE_ATOMIC_CPU)
+ return false;
+
return true;
}
-static bool xe_atomic_for_system(struct xe_vm *vm, struct xe_bo *bo)
+static bool xe_atomic_for_system(struct xe_vm *vm, struct xe_vma *vma)
{
struct xe_device *xe = vm->xe;
+ struct xe_bo *bo = xe_vma_bo(vma);
- if (!xe->info.has_device_atomics_on_smem)
+ if (!xe->info.has_device_atomics_on_smem ||
+ vma->attr.atomic_access == DRM_XE_ATOMIC_CPU)
return false;
+ if (vma->attr.atomic_access == DRM_XE_ATOMIC_DEVICE)
+ return true;
+
/*
* If a SMEM+LMEM allocation is backed by SMEM, a device
* atomics will cause a gpu page fault and which then
* gets migrated to LMEM, bind such allocations with
* device atomics enabled.
- *
- * TODO: Revisit this. Perhaps add something like a
- * fault_on_atomics_in_system UAPI flag.
- * Note that this also prohibits GPU atomics in LR mode for
- * userptr and system memory on DGFX.
*/
return (!IS_DGFX(xe) || (!xe_vm_in_lr_mode(vm) ||
(bo && xe_bo_has_single_placement(bo))));
@@ -725,7 +733,7 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
return -EAGAIN;
}
if (xe_svm_range_has_dma_mapping(range)) {
- xe_res_first_dma(range->base.dma_addr, 0,
+ xe_res_first_dma(range->base.pages.dma_addr, 0,
range->base.itree.last + 1 - range->base.itree.start,
&curs);
xe_svm_range_debug(range, "BIND PREPARE - MIXED");
@@ -744,8 +752,8 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
goto walk_pt;
if (vma->gpuva.flags & XE_VMA_ATOMIC_PTE_BIT) {
- xe_walk.default_vram_pte = xe_atomic_for_vram(vm) ? XE_USM_PPGTT_PTE_AE : 0;
- xe_walk.default_system_pte = xe_atomic_for_system(vm, bo) ?
+ xe_walk.default_vram_pte = xe_atomic_for_vram(vm, vma) ? XE_USM_PPGTT_PTE_AE : 0;
+ xe_walk.default_system_pte = xe_atomic_for_system(vm, vma) ?
XE_USM_PPGTT_PTE_AE : 0;
}
@@ -756,8 +764,8 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
if (!xe_vma_is_null(vma) && !range) {
if (xe_vma_is_userptr(vma))
- xe_res_first_sg(to_userptr_vma(vma)->userptr.sg, 0,
- xe_vma_size(vma), &curs);
+ xe_res_first_dma(to_userptr_vma(vma)->userptr.pages.dma_addr, 0,
+ xe_vma_size(vma), &curs);
else if (xe_bo_is_vram(bo) || xe_bo_is_stolen(bo))
xe_res_first(bo->ttm.resource, xe_vma_bo_offset(vma),
xe_vma_size(vma), &curs);
@@ -910,7 +918,7 @@ bool xe_pt_zap_ptes(struct xe_tile *tile, struct xe_vma *vma)
if (xe_vma_bo(vma))
xe_bo_assert_held(xe_vma_bo(vma));
else if (xe_vma_is_userptr(vma))
- lockdep_assert_held(&xe_vma_vm(vma)->userptr.notifier_lock);
+ lockdep_assert_held(&xe_vma_vm(vma)->svm.gpusvm.notifier_lock);
if (!(pt_mask & BIT(tile->id)))
return false;
@@ -950,7 +958,19 @@ bool xe_pt_zap_ptes_range(struct xe_tile *tile, struct xe_vm *vm,
struct xe_pt *pt = vm->pt_root[tile->id];
u8 pt_mask = (range->tile_present & ~range->tile_invalidated);
- xe_svm_assert_in_notifier(vm);
+ /*
+ * Locking rules:
+ *
+ * - notifier_lock (write): full protection against page table changes
+ * and MMU notifier invalidations.
+ *
+ * - notifier_lock (read) + vm_lock (write): combined protection against
+ * invalidations and concurrent page table modifications. (e.g., madvise)
+ *
+ */
+ lockdep_assert(lockdep_is_held_type(&vm->svm.gpusvm.notifier_lock, 0) ||
+ (lockdep_is_held_type(&vm->svm.gpusvm.notifier_lock, 1) &&
+ lockdep_is_held_type(&vm->lock, 0)));
if (!(pt_mask & BIT(tile->id)))
return false;
@@ -1033,7 +1053,7 @@ static void xe_pt_commit_locks_assert(struct xe_vma *vma)
xe_pt_commit_prepare_locks_assert(vma);
if (xe_vma_is_userptr(vma))
- lockdep_assert_held_read(&vm->userptr.notifier_lock);
+ xe_svm_assert_held_read(vm);
}
static void xe_pt_commit(struct xe_vma *vma,
@@ -1261,6 +1281,8 @@ static int op_add_deps(struct xe_vm *vm, struct xe_vma_op *op,
}
static int xe_pt_vm_dependencies(struct xe_sched_job *job,
+ struct xe_tlb_inval_job *ijob,
+ struct xe_tlb_inval_job *mjob,
struct xe_vm *vm,
struct xe_vma_ops *vops,
struct xe_vm_pgtable_update_ops *pt_update_ops,
@@ -1328,6 +1350,20 @@ static int xe_pt_vm_dependencies(struct xe_sched_job *job,
for (i = 0; job && !err && i < vops->num_syncs; i++)
err = xe_sync_entry_add_deps(&vops->syncs[i], job);
+ if (job) {
+ if (ijob) {
+ err = xe_tlb_inval_job_alloc_dep(ijob);
+ if (err)
+ return err;
+ }
+
+ if (mjob) {
+ err = xe_tlb_inval_job_alloc_dep(mjob);
+ if (err)
+ return err;
+ }
+ }
+
return err;
}
@@ -1339,10 +1375,12 @@ static int xe_pt_pre_commit(struct xe_migrate_pt_update *pt_update)
struct xe_vm_pgtable_update_ops *pt_update_ops =
&vops->pt_update_ops[pt_update->tile_id];
- return xe_pt_vm_dependencies(pt_update->job, vm, pt_update->vops,
+ return xe_pt_vm_dependencies(pt_update->job, pt_update->ijob,
+ pt_update->mjob, vm, pt_update->vops,
pt_update_ops, rftree);
}
+#if IS_ENABLED(CONFIG_DRM_GPUSVM)
#ifdef CONFIG_DRM_XE_USERPTR_INVAL_INJECT
static bool xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma)
@@ -1373,7 +1411,7 @@ static int vma_check_userptr(struct xe_vm *vm, struct xe_vma *vma,
struct xe_userptr_vma *uvma;
unsigned long notifier_seq;
- lockdep_assert_held_read(&vm->userptr.notifier_lock);
+ xe_svm_assert_held_read(vm);
if (!xe_vma_is_userptr(vma))
return 0;
@@ -1382,7 +1420,7 @@ static int vma_check_userptr(struct xe_vm *vm, struct xe_vma *vma,
if (xe_pt_userptr_inject_eagain(uvma))
xe_vma_userptr_force_invalidate(uvma);
- notifier_seq = uvma->userptr.notifier_seq;
+ notifier_seq = uvma->userptr.pages.notifier_seq;
if (!mmu_interval_read_retry(&uvma->userptr.notifier,
notifier_seq))
@@ -1398,12 +1436,12 @@ static int vma_check_userptr(struct xe_vm *vm, struct xe_vma *vma,
return 0;
}
-static int op_check_userptr(struct xe_vm *vm, struct xe_vma_op *op,
- struct xe_vm_pgtable_update_ops *pt_update)
+static int op_check_svm_userptr(struct xe_vm *vm, struct xe_vma_op *op,
+ struct xe_vm_pgtable_update_ops *pt_update)
{
int err = 0;
- lockdep_assert_held_read(&vm->userptr.notifier_lock);
+ xe_svm_assert_held_read(vm);
switch (op->base.op) {
case DRM_GPUVA_OP_MAP:
@@ -1421,64 +1459,10 @@ static int op_check_userptr(struct xe_vm *vm, struct xe_vma_op *op,
case DRM_GPUVA_OP_UNMAP:
break;
case DRM_GPUVA_OP_PREFETCH:
- err = vma_check_userptr(vm, gpuva_to_vma(op->base.prefetch.va),
- pt_update);
- break;
- default:
- drm_warn(&vm->xe->drm, "NOT POSSIBLE");
- }
-
- return err;
-}
-
-static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update)
-{
- struct xe_vm *vm = pt_update->vops->vm;
- struct xe_vma_ops *vops = pt_update->vops;
- struct xe_vm_pgtable_update_ops *pt_update_ops =
- &vops->pt_update_ops[pt_update->tile_id];
- struct xe_vma_op *op;
- int err;
-
- err = xe_pt_pre_commit(pt_update);
- if (err)
- return err;
-
- down_read(&vm->userptr.notifier_lock);
-
- list_for_each_entry(op, &vops->list, link) {
- err = op_check_userptr(vm, op, pt_update_ops);
- if (err) {
- up_read(&vm->userptr.notifier_lock);
- break;
- }
- }
-
- return err;
-}
-
-#if IS_ENABLED(CONFIG_DRM_XE_GPUSVM)
-static int xe_pt_svm_pre_commit(struct xe_migrate_pt_update *pt_update)
-{
- struct xe_vm *vm = pt_update->vops->vm;
- struct xe_vma_ops *vops = pt_update->vops;
- struct xe_vma_op *op;
- unsigned long i;
- int err;
-
- err = xe_pt_pre_commit(pt_update);
- if (err)
- return err;
-
- xe_svm_notifier_lock(vm);
-
- list_for_each_entry(op, &vops->list, link) {
- struct xe_svm_range *range = NULL;
-
- if (op->subop == XE_VMA_SUBOP_UNMAP_RANGE)
- continue;
+ if (xe_vma_is_cpu_addr_mirror(gpuva_to_vma(op->base.prefetch.va))) {
+ struct xe_svm_range *range = op->map_range.range;
+ unsigned long i;
- if (op->base.op == DRM_GPUVA_OP_PREFETCH) {
xe_assert(vm->xe,
xe_vma_is_cpu_addr_mirror(gpuva_to_vma(op->base.prefetch.va)));
xa_for_each(&op->prefetch_range.range, i, range) {
@@ -1486,97 +1470,62 @@ static int xe_pt_svm_pre_commit(struct xe_migrate_pt_update *pt_update)
if (!xe_svm_range_pages_valid(range)) {
xe_svm_range_debug(range, "PRE-COMMIT - RETRY");
- xe_svm_notifier_unlock(vm);
return -ENODATA;
}
}
} else {
+ err = vma_check_userptr(vm, gpuva_to_vma(op->base.prefetch.va), pt_update);
+ }
+ break;
+#if IS_ENABLED(CONFIG_DRM_XE_GPUSVM)
+ case DRM_GPUVA_OP_DRIVER:
+ if (op->subop == XE_VMA_SUBOP_MAP_RANGE) {
+ struct xe_svm_range *range = op->map_range.range;
+
xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(op->map_range.vma));
- xe_assert(vm->xe, op->subop == XE_VMA_SUBOP_MAP_RANGE);
- range = op->map_range.range;
xe_svm_range_debug(range, "PRE-COMMIT");
if (!xe_svm_range_pages_valid(range)) {
xe_svm_range_debug(range, "PRE-COMMIT - RETRY");
- xe_svm_notifier_unlock(vm);
return -EAGAIN;
}
}
- }
-
- return 0;
-}
+ break;
#endif
-
-struct invalidation_fence {
- struct xe_gt_tlb_invalidation_fence base;
- struct xe_gt *gt;
- struct dma_fence *fence;
- struct dma_fence_cb cb;
- struct work_struct work;
- u64 start;
- u64 end;
- u32 asid;
-};
-
-static void invalidation_fence_cb(struct dma_fence *fence,
- struct dma_fence_cb *cb)
-{
- struct invalidation_fence *ifence =
- container_of(cb, struct invalidation_fence, cb);
- struct xe_device *xe = gt_to_xe(ifence->gt);
-
- trace_xe_gt_tlb_invalidation_fence_cb(xe, &ifence->base);
- if (!ifence->fence->error) {
- queue_work(system_wq, &ifence->work);
- } else {
- ifence->base.base.error = ifence->fence->error;
- xe_gt_tlb_invalidation_fence_signal(&ifence->base);
+ default:
+ drm_warn(&vm->xe->drm, "NOT POSSIBLE");
}
- dma_fence_put(ifence->fence);
-}
-static void invalidation_fence_work_func(struct work_struct *w)
-{
- struct invalidation_fence *ifence =
- container_of(w, struct invalidation_fence, work);
- struct xe_device *xe = gt_to_xe(ifence->gt);
-
- trace_xe_gt_tlb_invalidation_fence_work_func(xe, &ifence->base);
- xe_gt_tlb_invalidation_range(ifence->gt, &ifence->base, ifence->start,
- ifence->end, ifence->asid);
+ return err;
}
-static void invalidation_fence_init(struct xe_gt *gt,
- struct invalidation_fence *ifence,
- struct dma_fence *fence,
- u64 start, u64 end, u32 asid)
+static int xe_pt_svm_userptr_pre_commit(struct xe_migrate_pt_update *pt_update)
{
- int ret;
-
- trace_xe_gt_tlb_invalidation_fence_create(gt_to_xe(gt), &ifence->base);
+ struct xe_vm *vm = pt_update->vops->vm;
+ struct xe_vma_ops *vops = pt_update->vops;
+ struct xe_vm_pgtable_update_ops *pt_update_ops =
+ &vops->pt_update_ops[pt_update->tile_id];
+ struct xe_vma_op *op;
+ int err;
- xe_gt_tlb_invalidation_fence_init(gt, &ifence->base, false);
+ err = xe_pt_pre_commit(pt_update);
+ if (err)
+ return err;
- ifence->fence = fence;
- ifence->gt = gt;
- ifence->start = start;
- ifence->end = end;
- ifence->asid = asid;
+ xe_svm_notifier_lock(vm);
- INIT_WORK(&ifence->work, invalidation_fence_work_func);
- ret = dma_fence_add_callback(fence, &ifence->cb, invalidation_fence_cb);
- if (ret == -ENOENT) {
- dma_fence_put(ifence->fence); /* Usually dropped in CB */
- invalidation_fence_work_func(&ifence->work);
- } else if (ret) {
- dma_fence_put(&ifence->base.base); /* Caller ref */
- dma_fence_put(&ifence->base.base); /* Creation ref */
+ list_for_each_entry(op, &vops->list, link) {
+ err = op_check_svm_userptr(vm, op, pt_update_ops);
+ if (err) {
+ xe_svm_notifier_unlock(vm);
+ break;
+ }
}
- xe_gt_assert(gt, !ret || ret == -ENOENT);
+ return err;
}
+#endif
struct xe_pt_stage_unbind_walk {
/** @base: The pagewalk base-class. */
@@ -1879,7 +1828,7 @@ static int bind_op_prepare(struct xe_vm *vm, struct xe_tile *tile,
xe_vma_start(vma),
xe_vma_end(vma));
++pt_update_ops->current_op;
- pt_update_ops->needs_userptr_lock |= xe_vma_is_userptr(vma);
+ pt_update_ops->needs_svm_lock |= xe_vma_is_userptr(vma);
/*
* If rebind, we have to invalidate TLB on !LR vms to invalidate
@@ -1987,7 +1936,7 @@ static int unbind_op_prepare(struct xe_tile *tile,
xe_pt_update_ops_rfence_interval(pt_update_ops, xe_vma_start(vma),
xe_vma_end(vma));
++pt_update_ops->current_op;
- pt_update_ops->needs_userptr_lock |= xe_vma_is_userptr(vma);
+ pt_update_ops->needs_svm_lock |= xe_vma_is_userptr(vma);
pt_update_ops->needs_invalidation = true;
xe_pt_commit_prepare_unbind(vma, pt_op->entries, pt_op->num_entries);
@@ -2235,7 +2184,7 @@ static void bind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
vma->tile_invalidated & ~BIT(tile->id));
vma->tile_staged &= ~BIT(tile->id);
if (xe_vma_is_userptr(vma)) {
- lockdep_assert_held_read(&vm->userptr.notifier_lock);
+ xe_svm_assert_held_read(vm);
to_userptr_vma(vma)->userptr.initial_bind = true;
}
@@ -2271,7 +2220,7 @@ static void unbind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
if (!vma->tile_present) {
list_del_init(&vma->combined_links.rebind);
if (xe_vma_is_userptr(vma)) {
- lockdep_assert_held_read(&vm->userptr.notifier_lock);
+ xe_svm_assert_held_read(vm);
spin_lock(&vm->userptr.invalidated_lock);
list_del_init(&to_userptr_vma(vma)->userptr.invalidate_link);
@@ -2374,22 +2323,25 @@ static const struct xe_migrate_pt_update_ops migrate_ops = {
.pre_commit = xe_pt_pre_commit,
};
-static const struct xe_migrate_pt_update_ops userptr_migrate_ops = {
+#if IS_ENABLED(CONFIG_DRM_GPUSVM)
+static const struct xe_migrate_pt_update_ops svm_userptr_migrate_ops = {
.populate = xe_vm_populate_pgtable,
.clear = xe_migrate_clear_pgtable_callback,
- .pre_commit = xe_pt_userptr_pre_commit,
-};
-
-#if IS_ENABLED(CONFIG_DRM_XE_GPUSVM)
-static const struct xe_migrate_pt_update_ops svm_migrate_ops = {
- .populate = xe_vm_populate_pgtable,
- .clear = xe_migrate_clear_pgtable_callback,
- .pre_commit = xe_pt_svm_pre_commit,
+ .pre_commit = xe_pt_svm_userptr_pre_commit,
};
#else
-static const struct xe_migrate_pt_update_ops svm_migrate_ops;
+static const struct xe_migrate_pt_update_ops svm_userptr_migrate_ops;
#endif
+static struct xe_dep_scheduler *to_dep_scheduler(struct xe_exec_queue *q,
+ struct xe_gt *gt)
+{
+ if (xe_gt_is_media_type(gt))
+ return q->tlb_inval[XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT].dep_scheduler;
+
+ return q->tlb_inval[XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT].dep_scheduler;
+}
+
/**
* xe_pt_update_ops_run() - Run PT update operations
* @tile: Tile of PT update operations
@@ -2407,8 +2359,8 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops)
struct xe_vm *vm = vops->vm;
struct xe_vm_pgtable_update_ops *pt_update_ops =
&vops->pt_update_ops[tile->id];
- struct dma_fence *fence;
- struct invalidation_fence *ifence = NULL, *mfence = NULL;
+ struct dma_fence *fence, *ifence, *mfence;
+ struct xe_tlb_inval_job *ijob = NULL, *mjob = NULL;
struct dma_fence **fences = NULL;
struct dma_fence_array *cf = NULL;
struct xe_range_fence *rfence;
@@ -2416,9 +2368,7 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops)
int err = 0, i;
struct xe_migrate_pt_update update = {
.ops = pt_update_ops->needs_svm_lock ?
- &svm_migrate_ops :
- pt_update_ops->needs_userptr_lock ?
- &userptr_migrate_ops :
+ &svm_userptr_migrate_ops :
&migrate_ops,
.vops = vops,
.tile_id = tile->id,
@@ -2440,26 +2390,45 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops)
#endif
if (pt_update_ops->needs_invalidation) {
- ifence = kzalloc(sizeof(*ifence), GFP_KERNEL);
- if (!ifence) {
- err = -ENOMEM;
+ struct xe_exec_queue *q = pt_update_ops->q;
+ struct xe_dep_scheduler *dep_scheduler =
+ to_dep_scheduler(q, tile->primary_gt);
+
+ ijob = xe_tlb_inval_job_create(q, &tile->primary_gt->tlb_inval,
+ dep_scheduler,
+ pt_update_ops->start,
+ pt_update_ops->last,
+ vm->usm.asid);
+ if (IS_ERR(ijob)) {
+ err = PTR_ERR(ijob);
goto kill_vm_tile1;
}
+ update.ijob = ijob;
+
if (tile->media_gt) {
- mfence = kzalloc(sizeof(*ifence), GFP_KERNEL);
- if (!mfence) {
- err = -ENOMEM;
- goto free_ifence;
+ dep_scheduler = to_dep_scheduler(q, tile->media_gt);
+
+ mjob = xe_tlb_inval_job_create(q,
+ &tile->media_gt->tlb_inval,
+ dep_scheduler,
+ pt_update_ops->start,
+ pt_update_ops->last,
+ vm->usm.asid);
+ if (IS_ERR(mjob)) {
+ err = PTR_ERR(mjob);
+ goto free_ijob;
}
+ update.mjob = mjob;
+
fences = kmalloc_array(2, sizeof(*fences), GFP_KERNEL);
if (!fences) {
err = -ENOMEM;
- goto free_ifence;
+ goto free_ijob;
}
cf = dma_fence_array_alloc(2);
if (!cf) {
err = -ENOMEM;
- goto free_ifence;
+ goto free_ijob;
}
}
}
@@ -2467,7 +2436,7 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops)
rfence = kzalloc(sizeof(*rfence), GFP_KERNEL);
if (!rfence) {
err = -ENOMEM;
- goto free_ifence;
+ goto free_ijob;
}
fence = xe_migrate_update_pgtables(tile->migrate, &update);
@@ -2491,30 +2460,31 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops)
pt_update_ops->last, fence))
dma_fence_wait(fence, false);
- /* tlb invalidation must be done before signaling rebind */
- if (ifence) {
- if (mfence)
- dma_fence_get(fence);
- invalidation_fence_init(tile->primary_gt, ifence, fence,
- pt_update_ops->start,
- pt_update_ops->last, vm->usm.asid);
- if (mfence) {
- invalidation_fence_init(tile->media_gt, mfence, fence,
- pt_update_ops->start,
- pt_update_ops->last, vm->usm.asid);
- fences[0] = &ifence->base.base;
- fences[1] = &mfence->base.base;
+ /* tlb invalidation must be done before signaling unbind/rebind */
+ if (ijob) {
+ struct dma_fence *__fence;
+
+ ifence = xe_tlb_inval_job_push(ijob, tile->migrate, fence);
+ __fence = ifence;
+
+ if (mjob) {
+ fences[0] = ifence;
+ mfence = xe_tlb_inval_job_push(mjob, tile->migrate,
+ fence);
+ fences[1] = mfence;
+
dma_fence_array_init(cf, 2, fences,
vm->composite_fence_ctx,
vm->composite_fence_seqno++,
false);
- fence = &cf->base;
- } else {
- fence = &ifence->base.base;
+ __fence = &cf->base;
}
+
+ dma_fence_put(fence);
+ fence = __fence;
}
- if (!mfence) {
+ if (!mjob) {
dma_resv_add_fence(xe_vm_resv(vm), fence,
pt_update_ops->wait_vm_bookkeep ?
DMA_RESV_USAGE_KERNEL :
@@ -2523,35 +2493,36 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops)
list_for_each_entry(op, &vops->list, link)
op_commit(vops->vm, tile, pt_update_ops, op, fence, NULL);
} else {
- dma_resv_add_fence(xe_vm_resv(vm), &ifence->base.base,
+ dma_resv_add_fence(xe_vm_resv(vm), ifence,
pt_update_ops->wait_vm_bookkeep ?
DMA_RESV_USAGE_KERNEL :
DMA_RESV_USAGE_BOOKKEEP);
- dma_resv_add_fence(xe_vm_resv(vm), &mfence->base.base,
+ dma_resv_add_fence(xe_vm_resv(vm), mfence,
pt_update_ops->wait_vm_bookkeep ?
DMA_RESV_USAGE_KERNEL :
DMA_RESV_USAGE_BOOKKEEP);
list_for_each_entry(op, &vops->list, link)
- op_commit(vops->vm, tile, pt_update_ops, op,
- &ifence->base.base, &mfence->base.base);
+ op_commit(vops->vm, tile, pt_update_ops, op, ifence,
+ mfence);
}
if (pt_update_ops->needs_svm_lock)
xe_svm_notifier_unlock(vm);
- if (pt_update_ops->needs_userptr_lock)
- up_read(&vm->userptr.notifier_lock);
+
+ xe_tlb_inval_job_put(mjob);
+ xe_tlb_inval_job_put(ijob);
return fence;
free_rfence:
kfree(rfence);
-free_ifence:
+free_ijob:
kfree(cf);
kfree(fences);
- kfree(mfence);
- kfree(ifence);
+ xe_tlb_inval_job_put(mjob);
+ xe_tlb_inval_job_put(ijob);
kill_vm_tile1:
if (err != -EAGAIN && err != -ENODATA && tile->id)
xe_vm_kill(vops->vm, false);
diff --git a/drivers/gpu/drm/xe/xe_pt.h b/drivers/gpu/drm/xe/xe_pt.h
index 5ecf003d513c..4daeebaab5a1 100644
--- a/drivers/gpu/drm/xe/xe_pt.h
+++ b/drivers/gpu/drm/xe/xe_pt.h
@@ -10,6 +10,7 @@
#include "xe_pt_types.h"
struct dma_fence;
+struct drm_exec;
struct xe_bo;
struct xe_device;
struct xe_exec_queue;
@@ -29,7 +30,7 @@ struct xe_vma_ops;
unsigned int xe_pt_shift(unsigned int level);
struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile,
- unsigned int level);
+ unsigned int level, struct drm_exec *exec);
void xe_pt_populate_empty(struct xe_tile *tile, struct xe_vm *vm,
struct xe_pt *pt);
diff --git a/drivers/gpu/drm/xe/xe_pt_types.h b/drivers/gpu/drm/xe/xe_pt_types.h
index 69eab6f37cfe..881f01e14db8 100644
--- a/drivers/gpu/drm/xe/xe_pt_types.h
+++ b/drivers/gpu/drm/xe/xe_pt_types.h
@@ -45,8 +45,7 @@ struct xe_pt_ops {
u64 (*pte_encode_addr)(struct xe_device *xe, u64 addr,
u16 pat_index,
u32 pt_level, bool devmem, u64 flags);
- u64 (*pde_encode_bo)(struct xe_bo *bo, u64 bo_offset,
- u16 pat_index);
+ u64 (*pde_encode_bo)(struct xe_bo *bo, u64 bo_offset);
};
struct xe_pt_entry {
@@ -106,8 +105,6 @@ struct xe_vm_pgtable_update_ops {
u32 current_op;
/** @needs_svm_lock: Needs SVM lock */
bool needs_svm_lock;
- /** @needs_userptr_lock: Needs userptr lock */
- bool needs_userptr_lock;
/** @needs_invalidation: Needs invalidation */
bool needs_invalidation;
/**
diff --git a/drivers/gpu/drm/xe/xe_pxp.c b/drivers/gpu/drm/xe/xe_pxp.c
index 3d62008c99f1..bdbdbbf6a678 100644
--- a/drivers/gpu/drm/xe/xe_pxp.c
+++ b/drivers/gpu/drm/xe/xe_pxp.c
@@ -688,6 +688,7 @@ start:
return ret;
}
+ALLOW_ERROR_INJECTION(xe_pxp_exec_queue_add, ERRNO);
static void __pxp_exec_queue_remove(struct xe_pxp *pxp, struct xe_exec_queue *q, bool lock)
{
diff --git a/drivers/gpu/drm/xe/xe_pxp_submit.c b/drivers/gpu/drm/xe/xe_pxp_submit.c
index d92ec0f515b0..e60526e30030 100644
--- a/drivers/gpu/drm/xe/xe_pxp_submit.c
+++ b/drivers/gpu/drm/xe/xe_pxp_submit.c
@@ -54,8 +54,9 @@ static int allocate_vcs_execution_resources(struct xe_pxp *pxp)
* Each termination is 16 DWORDS, so 4K is enough to contain a
* termination for each sessions.
*/
- bo = xe_bo_create_pin_map(xe, tile, NULL, SZ_4K, ttm_bo_type_kernel,
- XE_BO_FLAG_SYSTEM | XE_BO_FLAG_PINNED | XE_BO_FLAG_GGTT);
+ bo = xe_bo_create_pin_map_novm(xe, tile, SZ_4K, ttm_bo_type_kernel,
+ XE_BO_FLAG_SYSTEM | XE_BO_FLAG_PINNED | XE_BO_FLAG_GGTT,
+ false);
if (IS_ERR(bo)) {
err = PTR_ERR(bo);
goto out_queue;
@@ -87,7 +88,9 @@ static int allocate_gsc_client_resources(struct xe_gt *gt,
{
struct xe_tile *tile = gt_to_tile(gt);
struct xe_device *xe = tile_to_xe(tile);
+ struct xe_validation_ctx ctx;
struct xe_hw_engine *hwe;
+ struct drm_exec exec;
struct xe_vm *vm;
struct xe_bo *bo;
struct xe_exec_queue *q;
@@ -101,20 +104,31 @@ static int allocate_gsc_client_resources(struct xe_gt *gt,
xe_assert(xe, hwe);
/* PXP instructions must be issued from PPGTT */
- vm = xe_vm_create(xe, XE_VM_FLAG_GSC);
+ vm = xe_vm_create(xe, XE_VM_FLAG_GSC, NULL);
if (IS_ERR(vm))
return PTR_ERR(vm);
/* We allocate a single object for the batch and the in/out memory */
- xe_vm_lock(vm, false);
- bo = xe_bo_create_pin_map(xe, tile, vm, PXP_BB_SIZE + inout_size * 2,
- ttm_bo_type_kernel,
- XE_BO_FLAG_SYSTEM | XE_BO_FLAG_PINNED | XE_BO_FLAG_NEEDS_UC);
- xe_vm_unlock(vm);
- if (IS_ERR(bo)) {
- err = PTR_ERR(bo);
- goto vm_out;
+
+ xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags){}, err) {
+ err = xe_vm_drm_exec_lock(vm, &exec);
+ drm_exec_retry_on_contention(&exec);
+ if (err)
+ break;
+
+ bo = xe_bo_create_pin_map(xe, tile, vm, PXP_BB_SIZE + inout_size * 2,
+ ttm_bo_type_kernel,
+ XE_BO_FLAG_SYSTEM | XE_BO_FLAG_PINNED |
+ XE_BO_FLAG_NEEDS_UC, &exec);
+ drm_exec_retry_on_contention(&exec);
+ if (IS_ERR(bo)) {
+ err = PTR_ERR(bo);
+ xe_validation_retry_on_oom(&ctx, &err);
+ break;
+ }
}
+ if (err)
+ goto vm_out;
fence = xe_vm_bind_kernel_bo(vm, bo, NULL, 0, XE_CACHE_WB);
if (IS_ERR(fence)) {
diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c
index d517ec9ddcbf..2e9ff33ed2fe 100644
--- a/drivers/gpu/drm/xe/xe_query.c
+++ b/drivers/gpu/drm/xe/xe_query.c
@@ -21,12 +21,14 @@
#include "xe_force_wake.h"
#include "xe_ggtt.h"
#include "xe_gt.h"
+#include "xe_gt_topology.h"
#include "xe_guc_hwconfig.h"
#include "xe_macros.h"
#include "xe_mmio.h"
#include "xe_oa.h"
#include "xe_pxp.h"
#include "xe_ttm_vram_mgr.h"
+#include "xe_vram_types.h"
#include "xe_wa.h"
static const u16 xe_to_user_engine_class[] = {
@@ -274,8 +276,7 @@ static int query_mem_regions(struct xe_device *xe,
mem_regions->mem_regions[0].instance = 0;
mem_regions->mem_regions[0].min_page_size = PAGE_SIZE;
mem_regions->mem_regions[0].total_size = man->size << PAGE_SHIFT;
- if (perfmon_capable())
- mem_regions->mem_regions[0].used = ttm_resource_manager_usage(man);
+ mem_regions->mem_regions[0].used = ttm_resource_manager_usage(man);
mem_regions->num_mem_regions = 1;
for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) {
@@ -291,13 +292,11 @@ static int query_mem_regions(struct xe_device *xe,
mem_regions->mem_regions[mem_regions->num_mem_regions].total_size =
man->size;
- if (perfmon_capable()) {
- xe_ttm_vram_get_used(man,
- &mem_regions->mem_regions
- [mem_regions->num_mem_regions].used,
- &mem_regions->mem_regions
- [mem_regions->num_mem_regions].cpu_visible_used);
- }
+ xe_ttm_vram_get_used(man,
+ &mem_regions->mem_regions
+ [mem_regions->num_mem_regions].used,
+ &mem_regions->mem_regions
+ [mem_regions->num_mem_regions].cpu_visible_used);
mem_regions->mem_regions[mem_regions->num_mem_regions].cpu_visible_size =
xe_ttm_vram_get_cpu_visible_size(man);
@@ -337,7 +336,7 @@ static int query_config(struct xe_device *xe, struct drm_xe_device_query *query)
config->num_params = num_params;
config->info[DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID] =
xe->info.devid | (xe->info.revid << 16);
- if (xe_device_get_root_tile(xe)->mem.vram.usable_size)
+ if (xe->mem.vram)
config->info[DRM_XE_QUERY_CONFIG_FLAGS] |=
DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM;
if (xe->info.has_usm && IS_ENABLED(CONFIG_DRM_XE_GPUSVM))
@@ -410,7 +409,7 @@ static int query_gt_list(struct xe_device *xe, struct drm_xe_device_query *query
gt_list->gt_list[iter].near_mem_regions = 0x1;
else
gt_list->gt_list[iter].near_mem_regions =
- BIT(gt_to_tile(gt)->id) << 1;
+ BIT(gt_to_tile(gt)->mem.vram->id) << 1;
gt_list->gt_list[iter].far_mem_regions = xe->info.mem_region_mask ^
gt_list->gt_list[iter].near_mem_regions;
@@ -476,7 +475,7 @@ static size_t calc_topo_query_size(struct xe_device *xe)
sizeof_field(struct xe_gt, fuse_topo.eu_mask_per_dss);
/* L3bank mask may not be available for some GTs */
- if (!XE_WA(gt, no_media_l3))
+ if (xe_gt_topology_report_l3(gt))
query_size += sizeof(struct drm_xe_query_topology_mask) +
sizeof_field(struct xe_gt, fuse_topo.l3_bank_mask);
}
@@ -539,7 +538,7 @@ static int query_gt_topology(struct xe_device *xe,
* mask, then it's better to omit L3 from the query rather than
* reporting bogus or zeroed information to userspace.
*/
- if (!XE_WA(gt, no_media_l3)) {
+ if (xe_gt_topology_report_l3(gt)) {
topo.type = DRM_XE_TOPO_L3_BANK;
err = copy_mask(&query_ptr, &topo, gt->fuse_topo.l3_bank_mask,
sizeof(gt->fuse_topo.l3_bank_mask));
@@ -748,10 +747,8 @@ static int query_eu_stall(struct xe_device *xe,
u32 num_rates;
int ret;
- if (!xe_eu_stall_supported_on_platform(xe)) {
- drm_dbg(&xe->drm, "EU stall monitoring is not supported on this platform\n");
+ if (!xe_eu_stall_supported_on_platform(xe))
return -ENODEV;
- }
array_size = xe_eu_stall_get_sampling_rates(&num_rates, &rates);
size = sizeof(struct drm_xe_query_eu_stall) + array_size;
diff --git a/drivers/gpu/drm/xe/xe_res_cursor.h b/drivers/gpu/drm/xe/xe_res_cursor.h
index d1a403cfb628..4e00008b7081 100644
--- a/drivers/gpu/drm/xe/xe_res_cursor.h
+++ b/drivers/gpu/drm/xe/xe_res_cursor.h
@@ -55,8 +55,8 @@ struct xe_res_cursor {
u32 mem_type;
/** @sgl: Scatterlist for cursor */
struct scatterlist *sgl;
- /** @dma_addr: Current element in a struct drm_pagemap_device_addr array */
- const struct drm_pagemap_device_addr *dma_addr;
+ /** @dma_addr: Current element in a struct drm_pagemap_addr array */
+ const struct drm_pagemap_addr *dma_addr;
/** @mm: Buddy allocator for VRAM cursor */
struct drm_buddy *mm;
/**
@@ -170,7 +170,7 @@ static inline void __xe_res_sg_next(struct xe_res_cursor *cur)
*/
static inline void __xe_res_dma_next(struct xe_res_cursor *cur)
{
- const struct drm_pagemap_device_addr *addr = cur->dma_addr;
+ const struct drm_pagemap_addr *addr = cur->dma_addr;
u64 start = cur->start;
while (start >= cur->dma_seg_size) {
@@ -222,14 +222,14 @@ static inline void xe_res_first_sg(const struct sg_table *sg,
/**
* xe_res_first_dma - initialize a xe_res_cursor with dma_addr array
*
- * @dma_addr: struct drm_pagemap_device_addr array to walk
+ * @dma_addr: struct drm_pagemap_addr array to walk
* @start: Start of the range
* @size: Size of the range
* @cur: cursor object to initialize
*
* Start walking over the range of allocations between @start and @size.
*/
-static inline void xe_res_first_dma(const struct drm_pagemap_device_addr *dma_addr,
+static inline void xe_res_first_dma(const struct drm_pagemap_addr *dma_addr,
u64 start, u64 size,
struct xe_res_cursor *cur)
{
diff --git a/drivers/gpu/drm/xe/xe_ring_ops.c b/drivers/gpu/drm/xe/xe_ring_ops.c
index 7b50c7c1ee21..d71837773d6c 100644
--- a/drivers/gpu/drm/xe/xe_ring_ops.c
+++ b/drivers/gpu/drm/xe/xe_ring_ops.c
@@ -110,10 +110,10 @@ static int emit_bb_start(u64 batch_addr, u32 ppgtt_flag, u32 *dw, int i)
return i;
}
-static int emit_flush_invalidate(u32 addr, u32 val, u32 *dw, int i)
+static int emit_flush_invalidate(u32 addr, u32 val, u32 flush_flags, u32 *dw, int i)
{
- dw[i++] = MI_FLUSH_DW | MI_INVALIDATE_TLB | MI_FLUSH_DW_OP_STOREDW |
- MI_FLUSH_IMM_DW;
+ dw[i++] = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW |
+ MI_FLUSH_IMM_DW | (flush_flags & MI_INVALIDATE_TLB) ?: 0;
dw[i++] = addr | MI_FLUSH_DW_USE_GTT;
dw[i++] = 0;
@@ -179,7 +179,7 @@ static int emit_render_cache_flush(struct xe_sched_job *job, u32 *dw, int i)
bool lacks_render = !(gt->info.engine_mask & XE_HW_ENGINE_RCS_MASK);
u32 flags;
- if (XE_WA(gt, 14016712196))
+ if (XE_GT_WA(gt, 14016712196))
i = emit_pipe_control(dw, i, 0, PIPE_CONTROL_DEPTH_CACHE_FLUSH,
LRC_PPHWSP_FLUSH_INVAL_SCRATCH_ADDR, 0);
@@ -190,7 +190,7 @@ static int emit_render_cache_flush(struct xe_sched_job *job, u32 *dw, int i)
PIPE_CONTROL_DC_FLUSH_ENABLE |
PIPE_CONTROL_FLUSH_ENABLE);
- if (XE_WA(gt, 1409600907))
+ if (XE_GT_WA(gt, 1409600907))
flags |= PIPE_CONTROL_DEPTH_STALL;
if (lacks_render)
@@ -206,7 +206,7 @@ static int emit_pipe_control_to_ring_end(struct xe_hw_engine *hwe, u32 *dw, int
if (hwe->class != XE_ENGINE_CLASS_RENDER)
return i;
- if (XE_WA(hwe->gt, 16020292621))
+ if (XE_GT_WA(hwe->gt, 16020292621))
i = emit_pipe_control(dw, i, 0, PIPE_CONTROL_LRI_POST_SYNC,
RING_NOPID(hwe->mmio_base).addr, 0);
@@ -410,16 +410,14 @@ static void emit_migration_job_gen12(struct xe_sched_job *job,
i = emit_bb_start(job->ptrs[0].batch_addr, BIT(8), dw, i);
dw[i++] = preparser_disable(true);
- i = emit_flush_invalidate(saddr, seqno, dw, i);
+ i = emit_flush_invalidate(saddr, seqno, job->migrate_flush_flags, dw, i);
dw[i++] = preparser_disable(false);
i = emit_bb_start(job->ptrs[1].batch_addr, BIT(8), dw, i);
- dw[i++] = MI_FLUSH_DW | MI_INVALIDATE_TLB | job->migrate_flush_flags |
- MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_IMM_DW;
- dw[i++] = xe_lrc_seqno_ggtt_addr(lrc) | MI_FLUSH_DW_USE_GTT;
- dw[i++] = 0;
- dw[i++] = seqno; /* value */
+ i = emit_flush_imm_ggtt(xe_lrc_seqno_ggtt_addr(lrc), seqno,
+ job->migrate_flush_flags,
+ dw, i);
i = emit_user_interrupt(dw, i);
diff --git a/drivers/gpu/drm/xe/xe_rtp.c b/drivers/gpu/drm/xe/xe_rtp.c
index 95571b87aa73..b5f430d59f80 100644
--- a/drivers/gpu/drm/xe/xe_rtp.c
+++ b/drivers/gpu/drm/xe/xe_rtp.c
@@ -9,6 +9,7 @@
#include <uapi/drm/xe_drm.h>
+#include "xe_configfs.h"
#include "xe_gt.h"
#include "xe_gt_topology.h"
#include "xe_macros.h"
@@ -363,3 +364,15 @@ bool xe_rtp_match_not_sriov_vf(const struct xe_gt *gt,
{
return !IS_SRIOV_VF(gt_to_xe(gt));
}
+
+bool xe_rtp_match_psmi_enabled(const struct xe_gt *gt,
+ const struct xe_hw_engine *hwe)
+{
+ return xe_configfs_get_psmi_enabled(to_pci_dev(gt_to_xe(gt)->drm.dev));
+}
+
+bool xe_rtp_match_gt_has_discontiguous_dss_groups(const struct xe_gt *gt,
+ const struct xe_hw_engine *hwe)
+{
+ return xe_gt_has_discontiguous_dss_groups(gt);
+}
diff --git a/drivers/gpu/drm/xe/xe_rtp.h b/drivers/gpu/drm/xe/xe_rtp.h
index 5ed6c14b9ae3..ac12ddf6cde6 100644
--- a/drivers/gpu/drm/xe/xe_rtp.h
+++ b/drivers/gpu/drm/xe/xe_rtp.h
@@ -477,4 +477,10 @@ bool xe_rtp_match_first_render_or_compute(const struct xe_gt *gt,
bool xe_rtp_match_not_sriov_vf(const struct xe_gt *gt,
const struct xe_hw_engine *hwe);
+bool xe_rtp_match_psmi_enabled(const struct xe_gt *gt,
+ const struct xe_hw_engine *hwe);
+
+bool xe_rtp_match_gt_has_discontiguous_dss_groups(const struct xe_gt *gt,
+ const struct xe_hw_engine *hwe);
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_sa.c b/drivers/gpu/drm/xe/xe_sa.c
index 1d43e183ca21..fedd017d6dd3 100644
--- a/drivers/gpu/drm/xe/xe_sa.c
+++ b/drivers/gpu/drm/xe/xe_sa.c
@@ -69,7 +69,6 @@ struct xe_sa_manager *__xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u3
}
sa_manager->bo = bo;
sa_manager->is_iomem = bo->vmap.is_iomem;
- sa_manager->gpu_addr = xe_bo_ggtt_addr(bo);
if (bo->vmap.is_iomem) {
sa_manager->cpu_ptr = kvzalloc(managed_size, GFP_KERNEL);
diff --git a/drivers/gpu/drm/xe/xe_sa.h b/drivers/gpu/drm/xe/xe_sa.h
index 1170ee5a81a8..99dbf0eea540 100644
--- a/drivers/gpu/drm/xe/xe_sa.h
+++ b/drivers/gpu/drm/xe/xe_sa.h
@@ -7,6 +7,8 @@
#include <linux/sizes.h>
#include <linux/types.h>
+
+#include "xe_bo.h"
#include "xe_sa_types.h"
struct dma_fence;
@@ -43,9 +45,20 @@ to_xe_sa_manager(struct drm_suballoc_manager *mng)
return container_of(mng, struct xe_sa_manager, base);
}
+/**
+ * xe_sa_manager_gpu_addr - Retrieve GPU address of a back storage BO
+ * within suballocator.
+ * @sa_manager: the &xe_sa_manager struct instance
+ * Return: GGTT address of the back storage BO.
+ */
+static inline u64 xe_sa_manager_gpu_addr(struct xe_sa_manager *sa_manager)
+{
+ return xe_bo_ggtt_addr(sa_manager->bo);
+}
+
static inline u64 xe_sa_bo_gpu_addr(struct drm_suballoc *sa)
{
- return to_xe_sa_manager(sa->manager)->gpu_addr +
+ return xe_sa_manager_gpu_addr(to_xe_sa_manager(sa->manager)) +
drm_suballoc_soffset(sa);
}
diff --git a/drivers/gpu/drm/xe/xe_sa_types.h b/drivers/gpu/drm/xe/xe_sa_types.h
index 2b070ff1292e..cb7238799dcb 100644
--- a/drivers/gpu/drm/xe/xe_sa_types.h
+++ b/drivers/gpu/drm/xe/xe_sa_types.h
@@ -12,7 +12,6 @@ struct xe_bo;
struct xe_sa_manager {
struct drm_suballoc_manager base;
struct xe_bo *bo;
- u64 gpu_addr;
void *cpu_ptr;
bool is_iomem;
};
diff --git a/drivers/gpu/drm/xe/xe_shrinker.c b/drivers/gpu/drm/xe/xe_shrinker.c
index 1c3c04d52f55..90244fe59b59 100644
--- a/drivers/gpu/drm/xe/xe_shrinker.c
+++ b/drivers/gpu/drm/xe/xe_shrinker.c
@@ -54,10 +54,10 @@ xe_shrinker_mod_pages(struct xe_shrinker *shrinker, long shrinkable, long purgea
write_unlock(&shrinker->lock);
}
-static s64 xe_shrinker_walk(struct xe_device *xe,
- struct ttm_operation_ctx *ctx,
- const struct xe_bo_shrink_flags flags,
- unsigned long to_scan, unsigned long *scanned)
+static s64 __xe_shrinker_walk(struct xe_device *xe,
+ struct ttm_operation_ctx *ctx,
+ const struct xe_bo_shrink_flags flags,
+ unsigned long to_scan, unsigned long *scanned)
{
unsigned int mem_type;
s64 freed = 0, lret;
@@ -93,6 +93,48 @@ static s64 xe_shrinker_walk(struct xe_device *xe,
return freed;
}
+/*
+ * Try shrinking idle objects without writeback first, then if not sufficient,
+ * try also non-idle objects and finally if that's not sufficient either,
+ * add writeback. This avoids stalls and explicit writebacks with light or
+ * moderate memory pressure.
+ */
+static s64 xe_shrinker_walk(struct xe_device *xe,
+ struct ttm_operation_ctx *ctx,
+ const struct xe_bo_shrink_flags flags,
+ unsigned long to_scan, unsigned long *scanned)
+{
+ bool no_wait_gpu = true;
+ struct xe_bo_shrink_flags save_flags = flags;
+ s64 lret, freed;
+
+ swap(no_wait_gpu, ctx->no_wait_gpu);
+ save_flags.writeback = false;
+ lret = __xe_shrinker_walk(xe, ctx, save_flags, to_scan, scanned);
+ swap(no_wait_gpu, ctx->no_wait_gpu);
+ if (lret < 0 || *scanned >= to_scan)
+ return lret;
+
+ freed = lret;
+ if (!ctx->no_wait_gpu) {
+ lret = __xe_shrinker_walk(xe, ctx, save_flags, to_scan, scanned);
+ if (lret < 0)
+ return lret;
+ freed += lret;
+ if (*scanned >= to_scan)
+ return freed;
+ }
+
+ if (flags.writeback) {
+ lret = __xe_shrinker_walk(xe, ctx, flags, to_scan, scanned);
+ if (lret < 0)
+ return lret;
+ freed += lret;
+ }
+
+ return freed;
+}
+
static unsigned long
xe_shrinker_count(struct shrinker *shrink, struct shrink_control *sc)
{
@@ -199,6 +241,7 @@ static unsigned long xe_shrinker_scan(struct shrinker *shrink, struct shrink_con
runtime_pm = xe_shrinker_runtime_pm_get(shrinker, true, 0, can_backup);
shrink_flags.purge = false;
+
lret = xe_shrinker_walk(shrinker->xe, &ctx, shrink_flags,
nr_to_scan, &nr_scanned);
if (lret >= 0)
diff --git a/drivers/gpu/drm/xe/xe_sriov.c b/drivers/gpu/drm/xe/xe_sriov.c
index a0eab44c0e76..7d2d6de2aabf 100644
--- a/drivers/gpu/drm/xe/xe_sriov.c
+++ b/drivers/gpu/drm/xe/xe_sriov.c
@@ -15,6 +15,7 @@
#include "xe_sriov.h"
#include "xe_sriov_pf.h"
#include "xe_sriov_vf.h"
+#include "xe_sriov_vf_ccs.h"
/**
* xe_sriov_mode_to_string - Convert enum value to string.
@@ -157,3 +158,17 @@ const char *xe_sriov_function_name(unsigned int n, char *buf, size_t size)
strscpy(buf, "PF", size);
return buf;
}
+
+/**
+ * xe_sriov_init_late() - SR-IOV late initialization functions.
+ * @xe: the &xe_device to initialize
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_init_late(struct xe_device *xe)
+{
+ if (IS_SRIOV_VF(xe))
+ return xe_sriov_vf_init_late(xe);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/xe/xe_sriov.h b/drivers/gpu/drm/xe/xe_sriov.h
index 688fbabf08f1..6db45df55615 100644
--- a/drivers/gpu/drm/xe/xe_sriov.h
+++ b/drivers/gpu/drm/xe/xe_sriov.h
@@ -18,6 +18,7 @@ const char *xe_sriov_function_name(unsigned int n, char *buf, size_t len);
void xe_sriov_probe_early(struct xe_device *xe);
void xe_sriov_print_info(struct xe_device *xe, struct drm_printer *p);
int xe_sriov_init(struct xe_device *xe);
+int xe_sriov_init_late(struct xe_device *xe);
static inline enum xe_sriov_mode xe_device_sriov_mode(const struct xe_device *xe)
{
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf.c b/drivers/gpu/drm/xe/xe_sriov_pf.c
index afbdd894bd6e..27ddf3cc80e9 100644
--- a/drivers/gpu/drm/xe/xe_sriov_pf.c
+++ b/drivers/gpu/drm/xe/xe_sriov_pf.c
@@ -9,6 +9,7 @@
#include "xe_assert.h"
#include "xe_device.h"
+#include "xe_gt_sriov_pf.h"
#include "xe_module.h"
#include "xe_sriov.h"
#include "xe_sriov_pf.h"
@@ -103,6 +104,32 @@ int xe_sriov_pf_init_early(struct xe_device *xe)
}
/**
+ * xe_sriov_pf_wait_ready() - Wait until PF is ready to operate.
+ * @xe: the &xe_device to test
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_pf_wait_ready(struct xe_device *xe)
+{
+ struct xe_gt *gt;
+ unsigned int id;
+ int err;
+
+ if (xe_device_wedged(xe))
+ return -ECANCELED;
+
+ for_each_gt(gt, xe, id) {
+ err = xe_gt_sriov_pf_wait_ready(gt);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
* xe_sriov_pf_print_vfs_summary - Print SR-IOV PF information.
* @xe: the &xe_device to print info from
* @p: the &drm_printer
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf.h b/drivers/gpu/drm/xe/xe_sriov_pf.h
index c392c3fcf085..e3b34f8f5e04 100644
--- a/drivers/gpu/drm/xe/xe_sriov_pf.h
+++ b/drivers/gpu/drm/xe/xe_sriov_pf.h
@@ -15,6 +15,7 @@ struct xe_device;
#ifdef CONFIG_PCI_IOV
bool xe_sriov_pf_readiness(struct xe_device *xe);
int xe_sriov_pf_init_early(struct xe_device *xe);
+int xe_sriov_pf_wait_ready(struct xe_device *xe);
void xe_sriov_pf_debugfs_register(struct xe_device *xe, struct dentry *root);
void xe_sriov_pf_print_vfs_summary(struct xe_device *xe, struct drm_printer *p);
#else
diff --git a/drivers/gpu/drm/xe/xe_sriov_vf.c b/drivers/gpu/drm/xe/xe_sriov_vf.c
index 26e243c28994..cdd9f8e78b2a 100644
--- a/drivers/gpu/drm/xe/xe_sriov_vf.c
+++ b/drivers/gpu/drm/xe/xe_sriov_vf.c
@@ -3,6 +3,7 @@
* Copyright © 2023-2024 Intel Corporation
*/
+#include <drm/drm_debugfs.h>
#include <drm/drm_managed.h>
#include "xe_assert.h"
@@ -10,11 +11,16 @@
#include "xe_gt.h"
#include "xe_gt_sriov_printk.h"
#include "xe_gt_sriov_vf.h"
+#include "xe_guc.h"
#include "xe_guc_ct.h"
+#include "xe_guc_submit.h"
+#include "xe_irq.h"
+#include "xe_lrc.h"
#include "xe_pm.h"
#include "xe_sriov.h"
#include "xe_sriov_printk.h"
#include "xe_sriov_vf.h"
+#include "xe_sriov_vf_ccs.h"
#include "xe_tile_sriov_vf.h"
/**
@@ -124,16 +130,66 @@
* | | |
*/
-static bool vf_migration_supported(struct xe_device *xe)
+/**
+ * xe_sriov_vf_migration_supported - Report whether SR-IOV VF migration is
+ * supported or not.
+ * @xe: the &xe_device to check
+ *
+ * Returns: true if VF migration is supported, false otherwise.
+ */
+bool xe_sriov_vf_migration_supported(struct xe_device *xe)
+{
+ xe_assert(xe, IS_SRIOV_VF(xe));
+ return xe->sriov.vf.migration.enabled;
+}
+
+static void vf_disable_migration(struct xe_device *xe, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list va_args;
+
+ xe_assert(xe, IS_SRIOV_VF(xe));
+
+ va_start(va_args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &va_args;
+ xe_sriov_notice(xe, "migration disabled: %pV\n", &vaf);
+ va_end(va_args);
+
+ xe->sriov.vf.migration.enabled = false;
+}
+
+static void migration_worker_func(struct work_struct *w);
+
+static void vf_migration_init_early(struct xe_device *xe)
{
/*
* TODO: Add conditions to allow specific platforms, when they're
* supported at production quality.
*/
- return IS_ENABLED(CONFIG_DRM_XE_DEBUG);
-}
+ if (!IS_ENABLED(CONFIG_DRM_XE_DEBUG))
+ return vf_disable_migration(xe,
+ "experimental feature not available on production builds");
+
+ if (GRAPHICS_VER(xe) < 20)
+ return vf_disable_migration(xe, "requires gfx version >= 20, but only %u found",
+ GRAPHICS_VER(xe));
+
+ if (!IS_DGFX(xe)) {
+ struct xe_uc_fw_version guc_version;
+
+ xe_gt_sriov_vf_guc_versions(xe_device_get_gt(xe, 0), NULL, &guc_version);
+ if (MAKE_GUC_VER_STRUCT(guc_version) < MAKE_GUC_VER(1, 23, 0))
+ return vf_disable_migration(xe,
+ "CCS migration requires GuC ABI >= 1.23 but only %u.%u found",
+ guc_version.major, guc_version.minor);
+ }
-static void migration_worker_func(struct work_struct *w);
+ INIT_WORK(&xe->sriov.vf.migration.worker, migration_worker_func);
+
+ xe->sriov.vf.migration.enabled = true;
+ xe_sriov_dbg(xe, "migration support enabled\n");
+}
/**
* xe_sriov_vf_init_early - Initialize SR-IOV VF specific data.
@@ -141,10 +197,57 @@ static void migration_worker_func(struct work_struct *w);
*/
void xe_sriov_vf_init_early(struct xe_device *xe)
{
- INIT_WORK(&xe->sriov.vf.migration.worker, migration_worker_func);
+ vf_migration_init_early(xe);
+}
+
+/**
+ * vf_post_migration_shutdown - Stop the driver activities after VF migration.
+ * @xe: the &xe_device struct instance
+ *
+ * After this VM is migrated and assigned to a new VF, it is running on a new
+ * hardware, and therefore many hardware-dependent states and related structures
+ * require fixups. Without fixups, the hardware cannot do any work, and therefore
+ * all GPU pipelines are stalled.
+ * Stop some of kernel activities to make the fixup process faster.
+ */
+static void vf_post_migration_shutdown(struct xe_device *xe)
+{
+ struct xe_gt *gt;
+ unsigned int id;
+ int ret = 0;
+
+ for_each_gt(gt, xe, id) {
+ xe_guc_submit_pause(&gt->uc.guc);
+ ret |= xe_guc_submit_reset_block(&gt->uc.guc);
+ }
+
+ if (ret)
+ drm_info(&xe->drm, "migration recovery encountered ongoing reset\n");
+}
+
+/**
+ * vf_post_migration_kickstart - Re-start the driver activities under new hardware.
+ * @xe: the &xe_device struct instance
+ *
+ * After we have finished with all post-migration fixups, restart the driver
+ * activities to continue feeding the GPU with workloads.
+ */
+static void vf_post_migration_kickstart(struct xe_device *xe)
+{
+ struct xe_gt *gt;
+ unsigned int id;
- if (!vf_migration_supported(xe))
- xe_sriov_info(xe, "migration not supported by this module version\n");
+ /*
+ * Make sure interrupts on the new HW are properly set. The GuC IRQ
+ * must be working at this point, since the recovery did started,
+ * but the rest was not enabled using the procedure from spec.
+ */
+ xe_irq_resume(xe);
+
+ for_each_gt(gt, xe, id) {
+ xe_guc_submit_reset_unblock(&gt->uc.guc);
+ xe_guc_submit_unpause(&gt->uc.guc);
+ }
}
static bool gt_vf_post_migration_needed(struct xe_gt *gt)
@@ -192,6 +295,11 @@ static int vf_get_next_migrated_gt_id(struct xe_device *xe)
return -1;
}
+static size_t post_migration_scratch_size(struct xe_device *xe)
+{
+ return max(xe_lrc_reg_size(xe), LRC_WA_BB_SIZE);
+}
+
/**
* Perform post-migration fixups on a single GT.
*
@@ -208,19 +316,31 @@ static int vf_get_next_migrated_gt_id(struct xe_device *xe)
static int gt_vf_post_migration_fixups(struct xe_gt *gt)
{
s64 shift;
+ void *buf;
int err;
+ buf = kmalloc(post_migration_scratch_size(gt_to_xe(gt)), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
err = xe_gt_sriov_vf_query_config(gt);
if (err)
- return err;
+ goto out;
shift = xe_gt_sriov_vf_ggtt_shift(gt);
if (shift) {
xe_tile_sriov_vf_fixup_ggtt_nodes(gt_to_tile(gt), shift);
- /* FIXME: add the recovery steps */
+ xe_gt_sriov_vf_default_lrcs_hwsp_rebase(gt);
+ err = xe_guc_contexts_hwsp_rebase(&gt->uc.guc, buf);
+ if (err)
+ goto out;
+ xe_guc_jobs_ring_rebase(&gt->uc.guc);
xe_guc_ct_fixup_messages_with_ggtt(&gt->uc.guc.ct, shift);
}
- return 0;
+
+out:
+ kfree(buf);
+ return err;
}
static void vf_post_migration_recovery(struct xe_device *xe)
@@ -230,9 +350,10 @@ static void vf_post_migration_recovery(struct xe_device *xe)
drm_dbg(&xe->drm, "migration recovery in progress\n");
xe_pm_runtime_get(xe);
+ vf_post_migration_shutdown(xe);
- if (!vf_migration_supported(xe)) {
- xe_sriov_err(xe, "migration not supported by this module version\n");
+ if (!xe_sriov_vf_migration_supported(xe)) {
+ xe_sriov_err(xe, "migration is not supported\n");
err = -ENOTRECOVERABLE;
goto fail;
}
@@ -247,6 +368,7 @@ static void vf_post_migration_recovery(struct xe_device *xe)
set_bit(id, &fixed_gts);
}
+ vf_post_migration_kickstart(xe);
err = vf_post_migration_notify_resfix_done(xe, fixed_gts);
if (err)
goto fail;
@@ -306,3 +428,48 @@ void xe_sriov_vf_start_migration_recovery(struct xe_device *xe)
drm_info(&xe->drm, "VF migration recovery %s\n", started ?
"scheduled" : "already in progress");
}
+
+/**
+ * xe_sriov_vf_init_late() - SR-IOV VF late initialization functions.
+ * @xe: the &xe_device to initialize
+ *
+ * This function initializes code for CCS migration.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_vf_init_late(struct xe_device *xe)
+{
+ int err = 0;
+
+ if (xe_sriov_vf_migration_supported(xe))
+ err = xe_sriov_vf_ccs_init(xe);
+
+ return err;
+}
+
+static int sa_info_vf_ccs(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = m->private;
+ struct xe_device *xe = to_xe_device(node->minor->dev);
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ xe_sriov_vf_ccs_print(xe, &p);
+ return 0;
+}
+
+static const struct drm_info_list debugfs_list[] = {
+ { .name = "sa_info_vf_ccs", .show = sa_info_vf_ccs },
+};
+
+/**
+ * xe_sriov_vf_debugfs_register - Register VF debugfs attributes.
+ * @xe: the &xe_device
+ * @root: the root &dentry
+ *
+ * Prepare debugfs attributes exposed by the VF.
+ */
+void xe_sriov_vf_debugfs_register(struct xe_device *xe, struct dentry *root)
+{
+ drm_debugfs_create_files(debugfs_list, ARRAY_SIZE(debugfs_list),
+ root, xe->drm.primary);
+}
diff --git a/drivers/gpu/drm/xe/xe_sriov_vf.h b/drivers/gpu/drm/xe/xe_sriov_vf.h
index 7b8622cff2b7..9e752105ec2a 100644
--- a/drivers/gpu/drm/xe/xe_sriov_vf.h
+++ b/drivers/gpu/drm/xe/xe_sriov_vf.h
@@ -6,9 +6,15 @@
#ifndef _XE_SRIOV_VF_H_
#define _XE_SRIOV_VF_H_
+#include <linux/types.h>
+
+struct dentry;
struct xe_device;
void xe_sriov_vf_init_early(struct xe_device *xe);
+int xe_sriov_vf_init_late(struct xe_device *xe);
void xe_sriov_vf_start_migration_recovery(struct xe_device *xe);
+bool xe_sriov_vf_migration_supported(struct xe_device *xe);
+void xe_sriov_vf_debugfs_register(struct xe_device *xe, struct dentry *root);
#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov_vf_ccs.c b/drivers/gpu/drm/xe/xe_sriov_vf_ccs.c
new file mode 100644
index 000000000000..8dec616c37c9
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_vf_ccs.c
@@ -0,0 +1,410 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include "instructions/xe_mi_commands.h"
+#include "instructions/xe_gpu_commands.h"
+#include "xe_bb.h"
+#include "xe_bo.h"
+#include "xe_device.h"
+#include "xe_exec_queue.h"
+#include "xe_exec_queue_types.h"
+#include "xe_guc_submit.h"
+#include "xe_lrc.h"
+#include "xe_migrate.h"
+#include "xe_pm.h"
+#include "xe_sa.h"
+#include "xe_sriov_printk.h"
+#include "xe_sriov_vf.h"
+#include "xe_sriov_vf_ccs.h"
+#include "xe_sriov_vf_ccs_types.h"
+
+/**
+ * DOC: VF save/restore of compression Meta Data
+ *
+ * VF KMD registers two special contexts/LRCAs.
+ *
+ * Save Context/LRCA: contain necessary cmds+page table to trigger Meta data /
+ * compression control surface (Aka CCS) save in regular System memory in VM.
+ *
+ * Restore Context/LRCA: contain necessary cmds+page table to trigger Meta data /
+ * compression control surface (Aka CCS) Restore from regular System memory in
+ * VM to corresponding CCS pool.
+ *
+ * Below diagram explain steps needed for VF save/Restore of compression Meta Data::
+ *
+ * CCS Save CCS Restore VF KMD Guc BCS
+ * LRCA LRCA
+ * | | | | |
+ * | | | | |
+ * | Create Save LRCA | | |
+ * [ ]<----------------------------- [ ] | |
+ * | | | | |
+ * | | | | |
+ * | | | Register save LRCA | |
+ * | | | with Guc | |
+ * | | [ ]--------------------------->[ ] |
+ * | | | | |
+ * | | Create restore LRCA | | |
+ * | [ ]<------------------[ ] | |
+ * | | | | |
+ * | | | Register restore LRCA | |
+ * | | | with Guc | |
+ * | | [ ]--------------------------->[ ] |
+ * | | | | |
+ * | | | | |
+ * | | [ ]------------------------- | |
+ * | | [ ] Allocate main memory. | | |
+ * | | [ ] Allocate CCS memory. | | |
+ * | | [ ] Update Main memory & | | |
+ * [ ]<------------------------------[ ] CCS pages PPGTT + BB | | |
+ * | [ ]<------------------[ ] cmds to save & restore.| | |
+ * | | [ ]<------------------------ | |
+ * | | | | |
+ * | | | | |
+ * | | | | |
+ * : : : : :
+ * ---------------------------- VF Paused -------------------------------------
+ * | | | | |
+ * | | | | |
+ * | | | |Schedule |
+ * | | | |CCS Save |
+ * | | | | LRCA |
+ * | | | [ ]------>[ ]
+ * | | | | |
+ * | | | | |
+ * | | | |CCS save |
+ * | | | |completed|
+ * | | | [ ]<------[ ]
+ * | | | | |
+ * : : : : :
+ * ---------------------------- VM Migrated -----------------------------------
+ * | | | | |
+ * | | | | |
+ * : : : : :
+ * ---------------------------- VF Resumed ------------------------------------
+ * | | | | |
+ * | | | | |
+ * | | [ ]-------------- | |
+ * | | [ ] Fix up GGTT | | |
+ * | | [ ]<------------- | |
+ * | | | | |
+ * | | | | |
+ * | | | Notify VF_RESFIX_DONE | |
+ * | | [ ]--------------------------->[ ] |
+ * | | | | |
+ * | | | |Schedule |
+ * | | | |CCS |
+ * | | | |Restore |
+ * | | | |LRCA |
+ * | | | [ ]------>[ ]
+ * | | | | |
+ * | | | | |
+ * | | | |CCS |
+ * | | | |restore |
+ * | | | |completed|
+ * | | | [ ]<------[ ]
+ * | | | | |
+ * | | | | |
+ * | | | VF_RESFIX_DONE complete | |
+ * | | | notification | |
+ * | | [ ]<---------------------------[ ] |
+ * | | | | |
+ * | | | | |
+ * : : : : :
+ * ------------------------- Continue VM restore ------------------------------
+ */
+
+static u64 get_ccs_bb_pool_size(struct xe_device *xe)
+{
+ u64 sys_mem_size, ccs_mem_size, ptes, bb_pool_size;
+ struct sysinfo si;
+
+ si_meminfo(&si);
+ sys_mem_size = si.totalram * si.mem_unit;
+ ccs_mem_size = div64_u64(sys_mem_size, NUM_BYTES_PER_CCS_BYTE(xe));
+ ptes = DIV_ROUND_UP_ULL(sys_mem_size + ccs_mem_size, XE_PAGE_SIZE);
+
+ /**
+ * We need below BB size to hold PTE mappings and some DWs for copy
+ * command. In reality, we need space for many copy commands. So, let
+ * us allocate double the calculated size which is enough to holds GPU
+ * instructions for the whole region.
+ */
+ bb_pool_size = ptes * sizeof(u32);
+
+ return round_up(bb_pool_size * 2, SZ_1M);
+}
+
+static int alloc_bb_pool(struct xe_tile *tile, struct xe_sriov_vf_ccs_ctx *ctx)
+{
+ struct xe_device *xe = tile_to_xe(tile);
+ struct xe_sa_manager *sa_manager;
+ u64 bb_pool_size;
+ int offset, err;
+
+ bb_pool_size = get_ccs_bb_pool_size(xe);
+ xe_sriov_info(xe, "Allocating %s CCS BB pool size = %lldMB\n",
+ ctx->ctx_id ? "Restore" : "Save", bb_pool_size / SZ_1M);
+
+ sa_manager = xe_sa_bo_manager_init(tile, bb_pool_size, SZ_16);
+
+ if (IS_ERR(sa_manager)) {
+ xe_sriov_err(xe, "Suballocator init failed with error: %pe\n",
+ sa_manager);
+ err = PTR_ERR(sa_manager);
+ return err;
+ }
+
+ offset = 0;
+ xe_map_memset(xe, &sa_manager->bo->vmap, offset, MI_NOOP,
+ bb_pool_size);
+
+ offset = bb_pool_size - sizeof(u32);
+ xe_map_wr(xe, &sa_manager->bo->vmap, offset, u32, MI_BATCH_BUFFER_END);
+
+ ctx->mem.ccs_bb_pool = sa_manager;
+
+ return 0;
+}
+
+static void ccs_rw_update_ring(struct xe_sriov_vf_ccs_ctx *ctx)
+{
+ u64 addr = xe_sa_manager_gpu_addr(ctx->mem.ccs_bb_pool);
+ struct xe_lrc *lrc = xe_exec_queue_lrc(ctx->mig_q);
+ u32 dw[10], i = 0;
+
+ dw[i++] = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ dw[i++] = MI_BATCH_BUFFER_START | XE_INSTR_NUM_DW(3);
+ dw[i++] = lower_32_bits(addr);
+ dw[i++] = upper_32_bits(addr);
+ dw[i++] = MI_NOOP;
+ dw[i++] = MI_NOOP;
+
+ xe_lrc_write_ring(lrc, dw, i * sizeof(u32));
+ xe_lrc_set_ring_tail(lrc, lrc->ring.tail);
+}
+
+static int register_save_restore_context(struct xe_sriov_vf_ccs_ctx *ctx)
+{
+ int ctx_type;
+
+ switch (ctx->ctx_id) {
+ case XE_SRIOV_VF_CCS_READ_CTX:
+ ctx_type = GUC_CONTEXT_COMPRESSION_SAVE;
+ break;
+ case XE_SRIOV_VF_CCS_WRITE_CTX:
+ ctx_type = GUC_CONTEXT_COMPRESSION_RESTORE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ xe_guc_register_vf_exec_queue(ctx->mig_q, ctx_type);
+ return 0;
+}
+
+/**
+ * xe_sriov_vf_ccs_register_context - Register read/write contexts with guc.
+ * @xe: the &xe_device to register contexts on.
+ *
+ * This function registers read and write contexts with Guc. Re-registration
+ * is needed whenever resuming from pm runtime suspend.
+ *
+ * Return: 0 on success. Negative error code on failure.
+ */
+int xe_sriov_vf_ccs_register_context(struct xe_device *xe)
+{
+ enum xe_sriov_vf_ccs_rw_ctxs ctx_id;
+ struct xe_sriov_vf_ccs_ctx *ctx;
+ int err;
+
+ xe_assert(xe, IS_VF_CCS_READY(xe));
+
+ for_each_ccs_rw_ctx(ctx_id) {
+ ctx = &xe->sriov.vf.ccs.contexts[ctx_id];
+ err = register_save_restore_context(ctx);
+ if (err)
+ return err;
+ }
+
+ return err;
+}
+
+static void xe_sriov_vf_ccs_fini(void *arg)
+{
+ struct xe_sriov_vf_ccs_ctx *ctx = arg;
+ struct xe_lrc *lrc = xe_exec_queue_lrc(ctx->mig_q);
+
+ /*
+ * Make TAIL = HEAD in the ring so that no issues are seen if Guc
+ * submits this context to HW on VF pause after unbinding device.
+ */
+ xe_lrc_set_ring_tail(lrc, xe_lrc_ring_head(lrc));
+ xe_exec_queue_put(ctx->mig_q);
+}
+
+/**
+ * xe_sriov_vf_ccs_init - Setup LRCA for save & restore.
+ * @xe: the &xe_device to start recovery on
+ *
+ * This function shall be called only by VF. It initializes
+ * LRCA and suballocator needed for CCS save & restore.
+ *
+ * Return: 0 on success. Negative error code on failure.
+ */
+int xe_sriov_vf_ccs_init(struct xe_device *xe)
+{
+ struct xe_tile *tile = xe_device_get_root_tile(xe);
+ enum xe_sriov_vf_ccs_rw_ctxs ctx_id;
+ struct xe_sriov_vf_ccs_ctx *ctx;
+ struct xe_exec_queue *q;
+ u32 flags;
+ int err;
+
+ xe_assert(xe, IS_SRIOV_VF(xe));
+ xe_assert(xe, xe_sriov_vf_migration_supported(xe));
+
+ if (IS_DGFX(xe) || !xe_device_has_flat_ccs(xe))
+ return 0;
+
+ for_each_ccs_rw_ctx(ctx_id) {
+ ctx = &xe->sriov.vf.ccs.contexts[ctx_id];
+ ctx->ctx_id = ctx_id;
+
+ flags = EXEC_QUEUE_FLAG_KERNEL |
+ EXEC_QUEUE_FLAG_PERMANENT |
+ EXEC_QUEUE_FLAG_MIGRATE;
+ q = xe_exec_queue_create_bind(xe, tile, flags, 0);
+ if (IS_ERR(q)) {
+ err = PTR_ERR(q);
+ goto err_ret;
+ }
+ ctx->mig_q = q;
+
+ err = alloc_bb_pool(tile, ctx);
+ if (err)
+ goto err_free_queue;
+
+ ccs_rw_update_ring(ctx);
+
+ err = register_save_restore_context(ctx);
+ if (err)
+ goto err_free_queue;
+
+ err = devm_add_action_or_reset(xe->drm.dev,
+ xe_sriov_vf_ccs_fini,
+ ctx);
+ if (err)
+ goto err_ret;
+ }
+
+ xe->sriov.vf.ccs.initialized = 1;
+
+ return 0;
+
+err_free_queue:
+ xe_exec_queue_put(q);
+
+err_ret:
+ return err;
+}
+
+/**
+ * xe_sriov_vf_ccs_attach_bo - Insert CCS read write commands in the BO.
+ * @bo: the &buffer object to which batch buffer commands will be added.
+ *
+ * This function shall be called only by VF. It inserts the PTEs and copy
+ * command instructions in the BO by calling xe_migrate_ccs_rw_copy()
+ * function.
+ *
+ * Returns: 0 if successful, negative error code on failure.
+ */
+int xe_sriov_vf_ccs_attach_bo(struct xe_bo *bo)
+{
+ struct xe_device *xe = xe_bo_device(bo);
+ enum xe_sriov_vf_ccs_rw_ctxs ctx_id;
+ struct xe_sriov_vf_ccs_ctx *ctx;
+ struct xe_tile *tile;
+ struct xe_bb *bb;
+ int err = 0;
+
+ xe_assert(xe, IS_VF_CCS_READY(xe));
+
+ tile = xe_device_get_root_tile(xe);
+
+ for_each_ccs_rw_ctx(ctx_id) {
+ bb = bo->bb_ccs[ctx_id];
+ /* bb should be NULL here. Assert if not NULL */
+ xe_assert(xe, !bb);
+
+ ctx = &xe->sriov.vf.ccs.contexts[ctx_id];
+ err = xe_migrate_ccs_rw_copy(tile, ctx->mig_q, bo, ctx_id);
+ }
+ return err;
+}
+
+/**
+ * xe_sriov_vf_ccs_detach_bo - Remove CCS read write commands from the BO.
+ * @bo: the &buffer object from which batch buffer commands will be removed.
+ *
+ * This function shall be called only by VF. It removes the PTEs and copy
+ * command instructions from the BO. Make sure to update the BB with MI_NOOP
+ * before freeing.
+ *
+ * Returns: 0 if successful.
+ */
+int xe_sriov_vf_ccs_detach_bo(struct xe_bo *bo)
+{
+ struct xe_device *xe = xe_bo_device(bo);
+ enum xe_sriov_vf_ccs_rw_ctxs ctx_id;
+ struct xe_bb *bb;
+
+ xe_assert(xe, IS_VF_CCS_READY(xe));
+
+ if (!xe_bo_has_valid_ccs_bb(bo))
+ return 0;
+
+ for_each_ccs_rw_ctx(ctx_id) {
+ bb = bo->bb_ccs[ctx_id];
+ if (!bb)
+ continue;
+
+ memset(bb->cs, MI_NOOP, bb->len * sizeof(u32));
+ xe_bb_free(bb, NULL);
+ bo->bb_ccs[ctx_id] = NULL;
+ }
+ return 0;
+}
+
+/**
+ * xe_sriov_vf_ccs_print - Print VF CCS details.
+ * @xe: the &xe_device
+ * @p: the &drm_printer
+ *
+ * This function is for VF use only.
+ */
+void xe_sriov_vf_ccs_print(struct xe_device *xe, struct drm_printer *p)
+{
+ struct xe_sa_manager *bb_pool;
+ enum xe_sriov_vf_ccs_rw_ctxs ctx_id;
+
+ if (!IS_VF_CCS_READY(xe))
+ return;
+
+ xe_pm_runtime_get(xe);
+
+ for_each_ccs_rw_ctx(ctx_id) {
+ bb_pool = xe->sriov.vf.ccs.contexts[ctx_id].mem.ccs_bb_pool;
+ if (!bb_pool)
+ break;
+
+ drm_printf(p, "ccs %s bb suballoc info\n", ctx_id ? "write" : "read");
+ drm_printf(p, "-------------------------\n");
+ drm_suballoc_dump_debug_info(&bb_pool->base, p, xe_sa_manager_gpu_addr(bb_pool));
+ drm_puts(p, "\n");
+ }
+
+ xe_pm_runtime_put(xe);
+}
diff --git a/drivers/gpu/drm/xe/xe_sriov_vf_ccs.h b/drivers/gpu/drm/xe/xe_sriov_vf_ccs.h
new file mode 100644
index 000000000000..0745c0ff0228
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_vf_ccs.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_SRIOV_VF_CCS_H_
+#define _XE_SRIOV_VF_CCS_H_
+
+#include "xe_device_types.h"
+#include "xe_sriov.h"
+#include "xe_sriov_vf_ccs_types.h"
+
+struct drm_printer;
+struct xe_device;
+struct xe_bo;
+
+int xe_sriov_vf_ccs_init(struct xe_device *xe);
+int xe_sriov_vf_ccs_attach_bo(struct xe_bo *bo);
+int xe_sriov_vf_ccs_detach_bo(struct xe_bo *bo);
+int xe_sriov_vf_ccs_register_context(struct xe_device *xe);
+void xe_sriov_vf_ccs_print(struct xe_device *xe, struct drm_printer *p);
+
+static inline bool xe_sriov_vf_ccs_ready(struct xe_device *xe)
+{
+ xe_assert(xe, IS_SRIOV_VF(xe));
+ return xe->sriov.vf.ccs.initialized;
+}
+
+#define IS_VF_CCS_READY(xe) ({ \
+ struct xe_device *xe__ = (xe); \
+ IS_SRIOV_VF(xe__) && xe_sriov_vf_ccs_ready(xe__); \
+ })
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov_vf_ccs_types.h b/drivers/gpu/drm/xe/xe_sriov_vf_ccs_types.h
new file mode 100644
index 000000000000..22c499943d2a
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_vf_ccs_types.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_SRIOV_VF_CCS_TYPES_H_
+#define _XE_SRIOV_VF_CCS_TYPES_H_
+
+#include <linux/types.h>
+
+#define for_each_ccs_rw_ctx(id__) \
+ for ((id__) = 0; (id__) < XE_SRIOV_VF_CCS_CTX_COUNT; (id__)++)
+
+enum xe_sriov_vf_ccs_rw_ctxs {
+ XE_SRIOV_VF_CCS_READ_CTX,
+ XE_SRIOV_VF_CCS_WRITE_CTX,
+ XE_SRIOV_VF_CCS_CTX_COUNT
+};
+
+struct xe_migrate;
+struct xe_sa_manager;
+
+/**
+ * struct xe_sriov_vf_ccs_ctx - VF CCS migration context data.
+ */
+struct xe_sriov_vf_ccs_ctx {
+ /** @ctx_id: Id to which context it belongs to */
+ enum xe_sriov_vf_ccs_rw_ctxs ctx_id;
+
+ /** @mig_q: exec queues used for migration */
+ struct xe_exec_queue *mig_q;
+
+ /** @mem: memory data */
+ struct {
+ /** @mem.ccs_bb_pool: Pool from which batch buffers are allocated. */
+ struct xe_sa_manager *ccs_bb_pool;
+ } mem;
+};
+
+/**
+ * struct xe_sriov_vf_ccs - The VF CCS migration support data.
+ */
+struct xe_sriov_vf_ccs {
+ /** @contexts: CCS read and write contexts for VF. */
+ struct xe_sriov_vf_ccs_ctx contexts[XE_SRIOV_VF_CCS_CTX_COUNT];
+
+ /** @initialized: Initialization of VF CCS is completed or not. */
+ bool initialized;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov_vf_types.h b/drivers/gpu/drm/xe/xe_sriov_vf_types.h
index 8300416a6226..426cc5841958 100644
--- a/drivers/gpu/drm/xe/xe_sriov_vf_types.h
+++ b/drivers/gpu/drm/xe/xe_sriov_vf_types.h
@@ -9,6 +9,8 @@
#include <linux/types.h>
#include <linux/workqueue_types.h>
+#include "xe_sriov_vf_ccs_types.h"
+
/**
* struct xe_sriov_vf_relay_version - PF ABI version details.
*/
@@ -35,7 +37,15 @@ struct xe_device_vf {
struct work_struct worker;
/** @migration.gt_flags: Per-GT request flags for VF migration recovery */
unsigned long gt_flags;
+ /**
+ * @migration.enabled: flag indicating if migration support
+ * was enabled or not due to missing prerequisites
+ */
+ bool enabled;
} migration;
+
+ /** @ccs: VF CCS state data */
+ struct xe_sriov_vf_ccs ccs;
};
#endif
diff --git a/drivers/gpu/drm/xe/xe_survivability_mode.c b/drivers/gpu/drm/xe/xe_survivability_mode.c
index 41705f5d52e3..1662bfddd4bc 100644
--- a/drivers/gpu/drm/xe/xe_survivability_mode.c
+++ b/drivers/gpu/drm/xe/xe_survivability_mode.c
@@ -22,15 +22,18 @@
#define MAX_SCRATCH_MMIO 8
/**
- * DOC: Xe Boot Survivability
+ * DOC: Survivability Mode
*
- * Boot Survivability is a software based workflow for recovering a system in a failed boot state
+ * Survivability Mode is a software based workflow for recovering a system in a failed boot state
* Here system recoverability is concerned with recovering the firmware responsible for boot.
*
- * This is implemented by loading the driver with bare minimum (no drm card) to allow the firmware
- * to be flashed through mei and collect telemetry. The driver's probe flow is modified
- * such that it enters survivability mode when pcode initialization is incomplete and boot status
- * denotes a failure.
+ * Boot Survivability
+ * ===================
+ *
+ * Boot Survivability is implemented by loading the driver with bare minimum (no drm card) to allow
+ * the firmware to be flashed through mei driver and collect telemetry. The driver's probe flow is
+ * modified such that it enters survivability mode when pcode initialization is incomplete and boot
+ * status denotes a failure.
*
* Survivability mode can also be entered manually using the survivability mode attribute available
* through configfs which is beneficial in several usecases. It can be used to address scenarios
@@ -41,12 +44,14 @@
*
* # echo 1 > /sys/kernel/config/xe/0000:03:00.0/survivability_mode
*
+ * It is the responsibility of the user to clear the mode once firmware flash is complete.
+ *
* Refer :ref:`xe_configfs` for more details on how to use configfs
*
* Survivability mode is indicated by the below admin-only readable sysfs which provides additional
* debug information::
*
- * /sys/bus/pci/devices/<device>/surivability_mode
+ * /sys/bus/pci/devices/<device>/survivability_mode
*
* Capability Information:
* Provides boot status
@@ -56,6 +61,22 @@
* Provides history of previous failures
* Auxiliary Information
* Certain failures may have information in addition to postcode information
+ *
+ * Runtime Survivability
+ * =====================
+ *
+ * Certain runtime firmware errors can cause the device to enter a wedged state
+ * (:ref:`xe-device-wedging`) requiring a firmware flash to restore normal operation.
+ * Runtime Survivability Mode indicates that a firmware flash is necessary to recover the device and
+ * is indicated by the presence of survivability mode sysfs::
+ *
+ * /sys/bus/pci/devices/<device>/survivability_mode
+ *
+ * Survivability mode sysfs provides information about the type of survivability mode.
+ *
+ * When such errors occur, userspace is notified with the drm device wedged uevent and runtime
+ * survivability mode. User can then initiate a firmware flash using userspace tools like fwupd
+ * to restore device to normal operation.
*/
static u32 aux_history_offset(u32 reg_value)
@@ -121,6 +142,14 @@ static void log_survivability_info(struct pci_dev *pdev)
}
}
+static int check_boot_failure(struct xe_device *xe)
+{
+ struct xe_survivability *survivability = &xe->survivability;
+
+ return survivability->boot_status == NON_CRITICAL_FAILURE ||
+ survivability->boot_status == CRITICAL_FAILURE;
+}
+
static ssize_t survivability_mode_show(struct device *dev,
struct device_attribute *attr, char *buff)
{
@@ -130,6 +159,12 @@ static ssize_t survivability_mode_show(struct device *dev,
struct xe_survivability_info *info = survivability->info;
int index = 0, count = 0;
+ count += sysfs_emit_at(buff, count, "Survivability mode type: %s\n",
+ survivability->type ? "Runtime" : "Boot");
+
+ if (!check_boot_failure(xe))
+ return count;
+
for (index = 0; index < MAX_SCRATCH_MMIO; index++) {
if (info[index].reg)
count += sysfs_emit_at(buff, count, "%s: 0x%x - 0x%x\n", info[index].name,
@@ -147,16 +182,14 @@ static void xe_survivability_mode_fini(void *arg)
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
struct device *dev = &pdev->dev;
- xe_configfs_clear_survivability_mode(pdev);
sysfs_remove_file(&dev->kobj, &dev_attr_survivability_mode.attr);
}
-static int enable_survivability_mode(struct pci_dev *pdev)
+static int create_survivability_sysfs(struct pci_dev *pdev)
{
struct device *dev = &pdev->dev;
struct xe_device *xe = pdev_to_xe_device(pdev);
- struct xe_survivability *survivability = &xe->survivability;
- int ret = 0;
+ int ret;
/* create survivability mode sysfs */
ret = sysfs_create_file(&dev->kobj, &dev_attr_survivability_mode.attr);
@@ -170,6 +203,20 @@ static int enable_survivability_mode(struct pci_dev *pdev)
if (ret)
return ret;
+ return 0;
+}
+
+static int enable_boot_survivability_mode(struct pci_dev *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct xe_device *xe = pdev_to_xe_device(pdev);
+ struct xe_survivability *survivability = &xe->survivability;
+ int ret = 0;
+
+ ret = create_survivability_sysfs(pdev);
+ if (ret)
+ return ret;
+
/* Make sure xe_heci_gsc_init() knows about survivability mode */
survivability->mode = true;
@@ -192,15 +239,36 @@ err:
return ret;
}
+static int init_survivability_mode(struct xe_device *xe)
+{
+ struct xe_survivability *survivability = &xe->survivability;
+ struct xe_survivability_info *info;
+
+ survivability->size = MAX_SCRATCH_MMIO;
+
+ info = devm_kcalloc(xe->drm.dev, survivability->size, sizeof(*info),
+ GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ survivability->info = info;
+
+ populate_survivability_info(xe);
+
+ return 0;
+}
+
/**
- * xe_survivability_mode_is_enabled - check if survivability mode is enabled
+ * xe_survivability_mode_is_boot_enabled- check if boot survivability mode is enabled
* @xe: xe device instance
*
- * Returns true if in survivability mode, false otherwise
+ * Returns true if in boot survivability mode of type, else false
*/
-bool xe_survivability_mode_is_enabled(struct xe_device *xe)
+bool xe_survivability_mode_is_boot_enabled(struct xe_device *xe)
{
- return xe->survivability.mode;
+ struct xe_survivability *survivability = &xe->survivability;
+
+ return survivability->mode && survivability->type == XE_SURVIVABILITY_TYPE_BOOT;
}
/**
@@ -221,19 +289,10 @@ bool xe_survivability_mode_is_requested(struct xe_device *xe)
u32 data;
bool survivability_mode;
- if (!IS_DGFX(xe) || IS_SRIOV_VF(xe))
+ if (!IS_DGFX(xe) || IS_SRIOV_VF(xe) || xe->info.platform < XE_BATTLEMAGE)
return false;
survivability_mode = xe_configfs_get_survivability_mode(pdev);
-
- if (xe->info.platform < XE_BATTLEMAGE) {
- if (survivability_mode) {
- dev_err(&pdev->dev, "Survivability Mode is not supported on this card\n");
- xe_configfs_clear_survivability_mode(pdev);
- }
- return false;
- }
-
/* Enable survivability mode if set via configfs */
if (survivability_mode)
return true;
@@ -241,44 +300,78 @@ bool xe_survivability_mode_is_requested(struct xe_device *xe)
data = xe_mmio_read32(mmio, PCODE_SCRATCH(0));
survivability->boot_status = REG_FIELD_GET(BOOT_STATUS, data);
- return survivability->boot_status == NON_CRITICAL_FAILURE ||
- survivability->boot_status == CRITICAL_FAILURE;
+ return check_boot_failure(xe);
}
/**
- * xe_survivability_mode_enable - Initialize and enable the survivability mode
+ * xe_survivability_mode_runtime_enable - Initialize and enable runtime survivability mode
* @xe: xe device instance
*
- * Initialize survivability information and enable survivability mode
+ * Initialize survivability information and enable runtime survivability mode.
+ * Runtime survivability mode is enabled when certain errors cause the device to be
+ * in non-recoverable state. The device is declared wedged with the appropriate
+ * recovery method and survivability mode sysfs exposed to userspace
*
- * Return: 0 if survivability mode is enabled or not requested; negative error
- * code otherwise.
+ * Return: 0 if runtime survivability mode is enabled, negative error code otherwise.
*/
-int xe_survivability_mode_enable(struct xe_device *xe)
+int xe_survivability_mode_runtime_enable(struct xe_device *xe)
{
struct xe_survivability *survivability = &xe->survivability;
- struct xe_survivability_info *info;
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ int ret;
- if (!xe_survivability_mode_is_requested(xe))
- return 0;
+ if (!IS_DGFX(xe) || IS_SRIOV_VF(xe) || xe->info.platform < XE_BATTLEMAGE) {
+ dev_err(&pdev->dev, "Runtime Survivability Mode not supported\n");
+ return -EINVAL;
+ }
- survivability->size = MAX_SCRATCH_MMIO;
+ ret = init_survivability_mode(xe);
+ if (ret)
+ return ret;
- info = devm_kcalloc(xe->drm.dev, survivability->size, sizeof(*info),
- GFP_KERNEL);
- if (!info)
- return -ENOMEM;
+ ret = create_survivability_sysfs(pdev);
+ if (ret)
+ dev_err(&pdev->dev, "Failed to create survivability mode sysfs\n");
- survivability->info = info;
+ survivability->type = XE_SURVIVABILITY_TYPE_RUNTIME;
+ dev_err(&pdev->dev, "Runtime Survivability mode enabled\n");
- populate_survivability_info(xe);
+ xe_device_set_wedged_method(xe, DRM_WEDGE_RECOVERY_VENDOR);
+ xe_device_declare_wedged(xe);
+ dev_err(&pdev->dev, "Firmware flash required, Please refer to the userspace documentation for more details!\n");
+
+ return 0;
+}
- /* Only log debug information and exit if it is a critical failure */
+/**
+ * xe_survivability_mode_boot_enable - Initialize and enable boot survivability mode
+ * @xe: xe device instance
+ *
+ * Initialize survivability information and enable boot survivability mode
+ *
+ * Return: 0 if boot survivability mode is enabled or not requested, negative error
+ * code otherwise.
+ */
+int xe_survivability_mode_boot_enable(struct xe_device *xe)
+{
+ struct xe_survivability *survivability = &xe->survivability;
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ int ret;
+
+ if (!xe_survivability_mode_is_requested(xe))
+ return 0;
+
+ ret = init_survivability_mode(xe);
+ if (ret)
+ return ret;
+
+ /* Log breadcrumbs but do not enter survivability mode for Critical boot errors */
if (survivability->boot_status == CRITICAL_FAILURE) {
log_survivability_info(pdev);
return -ENXIO;
}
- return enable_survivability_mode(pdev);
+ survivability->type = XE_SURVIVABILITY_TYPE_BOOT;
+
+ return enable_boot_survivability_mode(pdev);
}
diff --git a/drivers/gpu/drm/xe/xe_survivability_mode.h b/drivers/gpu/drm/xe/xe_survivability_mode.h
index 02231c2bf008..1cc94226aa82 100644
--- a/drivers/gpu/drm/xe/xe_survivability_mode.h
+++ b/drivers/gpu/drm/xe/xe_survivability_mode.h
@@ -10,8 +10,9 @@
struct xe_device;
-int xe_survivability_mode_enable(struct xe_device *xe);
-bool xe_survivability_mode_is_enabled(struct xe_device *xe);
+int xe_survivability_mode_boot_enable(struct xe_device *xe);
+int xe_survivability_mode_runtime_enable(struct xe_device *xe);
+bool xe_survivability_mode_is_boot_enabled(struct xe_device *xe);
bool xe_survivability_mode_is_requested(struct xe_device *xe);
#endif /* _XE_SURVIVABILITY_MODE_H_ */
diff --git a/drivers/gpu/drm/xe/xe_survivability_mode_types.h b/drivers/gpu/drm/xe/xe_survivability_mode_types.h
index 19d433e253df..cd65a5d167c9 100644
--- a/drivers/gpu/drm/xe/xe_survivability_mode_types.h
+++ b/drivers/gpu/drm/xe/xe_survivability_mode_types.h
@@ -9,6 +9,11 @@
#include <linux/limits.h>
#include <linux/types.h>
+enum xe_survivability_type {
+ XE_SURVIVABILITY_TYPE_BOOT,
+ XE_SURVIVABILITY_TYPE_RUNTIME,
+};
+
struct xe_survivability_info {
char name[NAME_MAX];
u32 reg;
@@ -30,6 +35,9 @@ struct xe_survivability {
/** @mode: boolean to indicate survivability mode */
bool mode;
+
+ /** @type: survivability type */
+ enum xe_survivability_type type;
};
#endif /* _XE_SURVIVABILITY_MODE_TYPES_H_ */
diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
index a7ff5975873f..7e2db71ff34e 100644
--- a/drivers/gpu/drm/xe/xe_svm.c
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -6,8 +6,8 @@
#include <drm/drm_drv.h>
#include "xe_bo.h"
+#include "xe_exec_queue_types.h"
#include "xe_gt_stats.h"
-#include "xe_gt_tlb_invalidation.h"
#include "xe_migrate.h"
#include "xe_module.h"
#include "xe_pm.h"
@@ -17,6 +17,7 @@
#include "xe_ttm_vram_mgr.h"
#include "xe_vm.h"
#include "xe_vm_types.h"
+#include "xe_vram_types.h"
static bool xe_svm_range_in_vram(struct xe_svm_range *range)
{
@@ -25,9 +26,9 @@ static bool xe_svm_range_in_vram(struct xe_svm_range *range)
* memory.
*/
- struct drm_gpusvm_range_flags flags = {
+ struct drm_gpusvm_pages_flags flags = {
/* Pairs with WRITE_ONCE in drm_gpusvm.c */
- .__flags = READ_ONCE(range->base.flags.__flags),
+ .__flags = READ_ONCE(range->base.pages.flags.__flags),
};
return flags.has_devmem_pages;
@@ -49,15 +50,15 @@ static struct xe_vm *range_to_vm(struct drm_gpusvm_range *r)
return gpusvm_to_vm(r->gpusvm);
}
-#define range_debug(r__, operaton__) \
+#define range_debug(r__, operation__) \
vm_dbg(&range_to_vm(&(r__)->base)->xe->drm, \
"%s: asid=%u, gpusvm=%p, vram=%d,%d, seqno=%lu, " \
"start=0x%014lx, end=0x%014lx, size=%lu", \
- (operaton__), range_to_vm(&(r__)->base)->usm.asid, \
+ (operation__), range_to_vm(&(r__)->base)->usm.asid, \
(r__)->base.gpusvm, \
xe_svm_range_in_vram((r__)) ? 1 : 0, \
xe_svm_range_has_vram_binding((r__)) ? 1 : 0, \
- (r__)->base.notifier_seq, \
+ (r__)->base.pages.notifier_seq, \
xe_svm_range_start((r__)), xe_svm_range_end((r__)), \
xe_svm_range_size((r__)))
@@ -66,11 +67,6 @@ void xe_svm_range_debug(struct xe_svm_range *range, const char *operation)
range_debug(range, operation);
}
-static void *xe_svm_devm_owner(struct xe_device *xe)
-{
- return xe;
-}
-
static struct drm_gpusvm_range *
xe_svm_range_alloc(struct drm_gpusvm *gpusvm)
{
@@ -112,6 +108,11 @@ xe_svm_garbage_collector_add_range(struct xe_vm *vm, struct xe_svm_range *range,
&vm->svm.garbage_collector.work);
}
+static void xe_svm_tlb_inval_count_stats_incr(struct xe_gt *gt)
+{
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_TLB_INVAL_COUNT, 1);
+}
+
static u8
xe_svm_range_notifier_event_begin(struct xe_vm *vm, struct drm_gpusvm_range *r,
const struct mmu_notifier_range *mmu_range,
@@ -128,7 +129,7 @@ xe_svm_range_notifier_event_begin(struct xe_vm *vm, struct drm_gpusvm_range *r,
range_debug(range, "NOTIFIER");
/* Skip if already unmapped or if no binding exist */
- if (range->base.flags.unmapped || !range->tile_present)
+ if (range->base.pages.flags.unmapped || !range->tile_present)
return 0;
range_debug(range, "NOTIFIER - EXECUTE");
@@ -144,13 +145,19 @@ xe_svm_range_notifier_event_begin(struct xe_vm *vm, struct drm_gpusvm_range *r,
*/
for_each_tile(tile, xe, id)
if (xe_pt_zap_ptes_range(tile, vm, range)) {
- tile_mask |= BIT(id);
/*
* WRITE_ONCE pairs with READ_ONCE in
* xe_vm_has_valid_gpu_mapping()
*/
WRITE_ONCE(range->tile_invalidated,
range->tile_invalidated | BIT(id));
+
+ if (!(tile_mask & BIT(id))) {
+ xe_svm_tlb_inval_count_stats_incr(tile->primary_gt);
+ if (tile->media_gt)
+ xe_svm_tlb_inval_count_stats_incr(tile->media_gt);
+ tile_mask |= BIT(id);
+ }
}
return tile_mask;
@@ -170,6 +177,24 @@ xe_svm_range_notifier_event_end(struct xe_vm *vm, struct drm_gpusvm_range *r,
mmu_range);
}
+static s64 xe_svm_stats_ktime_us_delta(ktime_t start)
+{
+ return IS_ENABLED(CONFIG_DEBUG_FS) ?
+ ktime_us_delta(ktime_get(), start) : 0;
+}
+
+static void xe_svm_tlb_inval_us_stats_incr(struct xe_gt *gt, ktime_t start)
+{
+ s64 us_delta = xe_svm_stats_ktime_us_delta(start);
+
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_TLB_INVAL_US, us_delta);
+}
+
+static ktime_t xe_svm_stats_ktime_get(void)
+{
+ return IS_ENABLED(CONFIG_DEBUG_FS) ? ktime_get() : 0;
+}
+
static void xe_svm_invalidate(struct drm_gpusvm *gpusvm,
struct drm_gpusvm_notifier *notifier,
const struct mmu_notifier_range *mmu_range)
@@ -177,8 +202,10 @@ static void xe_svm_invalidate(struct drm_gpusvm *gpusvm,
struct xe_vm *vm = gpusvm_to_vm(gpusvm);
struct xe_device *xe = vm->xe;
struct drm_gpusvm_range *r, *first;
+ struct xe_tile *tile;
+ ktime_t start = xe_svm_stats_ktime_get();
u64 adj_start = mmu_range->start, adj_end = mmu_range->end;
- u8 tile_mask = 0;
+ u8 tile_mask = 0, id;
long err;
xe_svm_assert_in_notifier(vm);
@@ -224,13 +251,20 @@ static void xe_svm_invalidate(struct drm_gpusvm *gpusvm,
xe_device_wmb(xe);
- err = xe_vm_range_tilemask_tlb_invalidation(vm, adj_start, adj_end, tile_mask);
+ err = xe_vm_range_tilemask_tlb_inval(vm, adj_start, adj_end, tile_mask);
WARN_ON_ONCE(err);
range_notifier_event_end:
r = first;
drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end)
xe_svm_range_notifier_event_end(vm, r, mmu_range);
+ for_each_tile(tile, xe, id) {
+ if (tile_mask & BIT(id)) {
+ xe_svm_tlb_inval_us_stats_incr(tile->primary_gt, start);
+ if (tile->media_gt)
+ xe_svm_tlb_inval_us_stats_incr(tile->media_gt, start);
+ }
+ }
}
static int __xe_svm_garbage_collector(struct xe_vm *vm,
@@ -252,24 +286,73 @@ static int __xe_svm_garbage_collector(struct xe_vm *vm,
return 0;
}
+static int xe_svm_range_set_default_attr(struct xe_vm *vm, u64 range_start, u64 range_end)
+{
+ struct xe_vma *vma;
+ struct xe_vma_mem_attr default_attr = {
+ .preferred_loc = {
+ .devmem_fd = DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE,
+ .migration_policy = DRM_XE_MIGRATE_ALL_PAGES,
+ },
+ .atomic_access = DRM_XE_ATOMIC_UNDEFINED,
+ };
+ int err = 0;
+
+ vma = xe_vm_find_vma_by_addr(vm, range_start);
+ if (!vma)
+ return -EINVAL;
+
+ if (xe_vma_has_default_mem_attrs(vma))
+ return 0;
+
+ vm_dbg(&vm->xe->drm, "Existing VMA start=0x%016llx, vma_end=0x%016llx",
+ xe_vma_start(vma), xe_vma_end(vma));
+
+ if (xe_vma_start(vma) == range_start && xe_vma_end(vma) == range_end) {
+ default_attr.pat_index = vma->attr.default_pat_index;
+ default_attr.default_pat_index = vma->attr.default_pat_index;
+ vma->attr = default_attr;
+ } else {
+ vm_dbg(&vm->xe->drm, "Split VMA start=0x%016llx, vma_end=0x%016llx",
+ range_start, range_end);
+ err = xe_vm_alloc_cpu_addr_mirror_vma(vm, range_start, range_end - range_start);
+ if (err) {
+ drm_warn(&vm->xe->drm, "VMA SPLIT failed: %pe\n", ERR_PTR(err));
+ xe_vm_kill(vm, true);
+ return err;
+ }
+ }
+
+ /*
+ * On call from xe_svm_handle_pagefault original VMA might be changed
+ * signal this to lookup for VMA again.
+ */
+ return -EAGAIN;
+}
+
static int xe_svm_garbage_collector(struct xe_vm *vm)
{
struct xe_svm_range *range;
- int err;
+ u64 range_start;
+ u64 range_end;
+ int err, ret = 0;
lockdep_assert_held_write(&vm->lock);
if (xe_vm_is_closed_or_banned(vm))
return -ENOENT;
- spin_lock(&vm->svm.garbage_collector.lock);
for (;;) {
+ spin_lock(&vm->svm.garbage_collector.lock);
range = list_first_entry_or_null(&vm->svm.garbage_collector.range_list,
typeof(*range),
garbage_collector_link);
if (!range)
break;
+ range_start = xe_svm_range_start(range);
+ range_end = xe_svm_range_end(range);
+
list_del(&range->garbage_collector_link);
spin_unlock(&vm->svm.garbage_collector.lock);
@@ -282,11 +365,17 @@ static int xe_svm_garbage_collector(struct xe_vm *vm)
return err;
}
- spin_lock(&vm->svm.garbage_collector.lock);
+ err = xe_svm_range_set_default_attr(vm, range_start, range_end);
+ if (err) {
+ if (err == -EAGAIN)
+ ret = -EAGAIN;
+ else
+ return err;
+ }
}
spin_unlock(&vm->svm.garbage_collector.lock);
- return 0;
+ return ret;
}
static void xe_svm_garbage_collector_work_func(struct work_struct *w)
@@ -306,21 +395,15 @@ static struct xe_vram_region *page_to_vr(struct page *page)
return container_of(page_pgmap(page), struct xe_vram_region, pagemap);
}
-static struct xe_tile *vr_to_tile(struct xe_vram_region *vr)
-{
- return container_of(vr, struct xe_tile, mem.vram);
-}
-
static u64 xe_vram_region_page_to_dpa(struct xe_vram_region *vr,
struct page *page)
{
u64 dpa;
- struct xe_tile *tile = vr_to_tile(vr);
u64 pfn = page_to_pfn(page);
u64 offset;
- xe_tile_assert(tile, is_device_private_page(page));
- xe_tile_assert(tile, (pfn << PAGE_SHIFT) >= vr->hpa_base);
+ xe_assert(vr->xe, is_device_private_page(page));
+ xe_assert(vr->xe, (pfn << PAGE_SHIFT) >= vr->hpa_base);
offset = (pfn << PAGE_SHIFT) - vr->hpa_base;
dpa = vr->dpa_base + offset;
@@ -333,17 +416,74 @@ enum xe_svm_copy_dir {
XE_SVM_COPY_TO_SRAM,
};
-static int xe_svm_copy(struct page **pages, dma_addr_t *dma_addr,
+static void xe_svm_copy_kb_stats_incr(struct xe_gt *gt,
+ const enum xe_svm_copy_dir dir,
+ int kb)
+{
+ if (dir == XE_SVM_COPY_TO_VRAM)
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_DEVICE_COPY_KB, kb);
+ else
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_CPU_COPY_KB, kb);
+}
+
+static void xe_svm_copy_us_stats_incr(struct xe_gt *gt,
+ const enum xe_svm_copy_dir dir,
+ unsigned long npages,
+ ktime_t start)
+{
+ s64 us_delta = xe_svm_stats_ktime_us_delta(start);
+
+ if (dir == XE_SVM_COPY_TO_VRAM) {
+ switch (npages) {
+ case 1:
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_4K_DEVICE_COPY_US,
+ us_delta);
+ break;
+ case 16:
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_64K_DEVICE_COPY_US,
+ us_delta);
+ break;
+ case 512:
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_2M_DEVICE_COPY_US,
+ us_delta);
+ break;
+ }
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_DEVICE_COPY_US,
+ us_delta);
+ } else {
+ switch (npages) {
+ case 1:
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_4K_CPU_COPY_US,
+ us_delta);
+ break;
+ case 16:
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_64K_CPU_COPY_US,
+ us_delta);
+ break;
+ case 512:
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_2M_CPU_COPY_US,
+ us_delta);
+ break;
+ }
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_CPU_COPY_US,
+ us_delta);
+ }
+}
+
+static int xe_svm_copy(struct page **pages,
+ struct drm_pagemap_addr *pagemap_addr,
unsigned long npages, const enum xe_svm_copy_dir dir)
{
struct xe_vram_region *vr = NULL;
- struct xe_tile *tile;
+ struct xe_gt *gt = NULL;
+ struct xe_device *xe;
struct dma_fence *fence = NULL;
unsigned long i;
#define XE_VRAM_ADDR_INVALID ~0x0ull
u64 vram_addr = XE_VRAM_ADDR_INVALID;
int err = 0, pos = 0;
bool sram = dir == XE_SVM_COPY_TO_SRAM;
+ ktime_t start = xe_svm_stats_ktime_get();
/*
* This flow is complex: it locates physically contiguous device pages,
@@ -365,12 +505,13 @@ static int xe_svm_copy(struct page **pages, dma_addr_t *dma_addr,
last = (i + 1) == npages;
/* No CPU page and no device pages queue'd to copy */
- if (!dma_addr[i] && vram_addr == XE_VRAM_ADDR_INVALID)
+ if (!pagemap_addr[i].addr && vram_addr == XE_VRAM_ADDR_INVALID)
continue;
if (!vr && spage) {
vr = page_to_vr(spage);
- tile = vr_to_tile(vr);
+ gt = xe_migrate_exec_queue(vr->migrate)->gt;
+ xe = vr->xe;
}
XE_WARN_ON(spage && page_to_vr(spage) != vr);
@@ -379,7 +520,7 @@ static int xe_svm_copy(struct page **pages, dma_addr_t *dma_addr,
* first device page, check if physical contiguous on subsequent
* device pages.
*/
- if (dma_addr[i] && spage) {
+ if (pagemap_addr[i].addr && spage) {
__vram_addr = xe_vram_region_page_to_dpa(vr, spage);
if (vram_addr == XE_VRAM_ADDR_INVALID) {
vram_addr = __vram_addr;
@@ -387,6 +528,14 @@ static int xe_svm_copy(struct page **pages, dma_addr_t *dma_addr,
}
match = vram_addr + PAGE_SIZE * (i - pos) == __vram_addr;
+ /* Expected with contiguous memory */
+ xe_assert(vr->xe, match);
+
+ if (pagemap_addr[i].order) {
+ i += NR_PAGES(pagemap_addr[i].order) - 1;
+ chunk = (i - pos) == (XE_MIGRATE_CHUNK_SIZE / PAGE_SIZE);
+ last = (i + 1) == npages;
+ }
}
/*
@@ -401,21 +550,26 @@ static int xe_svm_copy(struct page **pages, dma_addr_t *dma_addr,
int incr = (match && last) ? 1 : 0;
if (vram_addr != XE_VRAM_ADDR_INVALID) {
+ xe_svm_copy_kb_stats_incr(gt, dir,
+ (i - pos + incr) *
+ (PAGE_SIZE / SZ_1K));
if (sram) {
- vm_dbg(&tile->xe->drm,
+ vm_dbg(&xe->drm,
"COPY TO SRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld",
- vram_addr, (u64)dma_addr[pos], i - pos + incr);
- __fence = xe_migrate_from_vram(tile->migrate,
+ vram_addr,
+ (u64)pagemap_addr[pos].addr, i - pos + incr);
+ __fence = xe_migrate_from_vram(vr->migrate,
i - pos + incr,
vram_addr,
- dma_addr + pos);
+ &pagemap_addr[pos]);
} else {
- vm_dbg(&tile->xe->drm,
+ vm_dbg(&xe->drm,
"COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld",
- (u64)dma_addr[pos], vram_addr, i - pos + incr);
- __fence = xe_migrate_to_vram(tile->migrate,
+ (u64)pagemap_addr[pos].addr, vram_addr,
+ i - pos + incr);
+ __fence = xe_migrate_to_vram(vr->migrate,
i - pos + incr,
- dma_addr + pos,
+ &pagemap_addr[pos],
vram_addr);
}
if (IS_ERR(__fence)) {
@@ -428,7 +582,7 @@ static int xe_svm_copy(struct page **pages, dma_addr_t *dma_addr,
}
/* Setup physical address of next device page */
- if (dma_addr[i] && spage) {
+ if (pagemap_addr[i].addr && spage) {
vram_addr = __vram_addr;
pos = i;
} else {
@@ -437,19 +591,21 @@ static int xe_svm_copy(struct page **pages, dma_addr_t *dma_addr,
/* Extra mismatched device page, copy it */
if (!match && last && vram_addr != XE_VRAM_ADDR_INVALID) {
+ xe_svm_copy_kb_stats_incr(gt, dir,
+ (PAGE_SIZE / SZ_1K));
if (sram) {
- vm_dbg(&tile->xe->drm,
+ vm_dbg(&xe->drm,
"COPY TO SRAM - 0x%016llx -> 0x%016llx, NPAGES=%d",
- vram_addr, (u64)dma_addr[pos], 1);
- __fence = xe_migrate_from_vram(tile->migrate, 1,
+ vram_addr, (u64)pagemap_addr[pos].addr, 1);
+ __fence = xe_migrate_from_vram(vr->migrate, 1,
vram_addr,
- dma_addr + pos);
+ &pagemap_addr[pos]);
} else {
- vm_dbg(&tile->xe->drm,
+ vm_dbg(&xe->drm,
"COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%d",
- (u64)dma_addr[pos], vram_addr, 1);
- __fence = xe_migrate_to_vram(tile->migrate, 1,
- dma_addr + pos,
+ (u64)pagemap_addr[pos].addr, vram_addr, 1);
+ __fence = xe_migrate_to_vram(vr->migrate, 1,
+ &pagemap_addr[pos],
vram_addr);
}
if (IS_ERR(__fence)) {
@@ -470,21 +626,31 @@ err_out:
dma_fence_put(fence);
}
+ /*
+ * XXX: We can't derive the GT here (or anywhere in this functions, but
+ * compute always uses the primary GT so accumlate stats on the likely
+ * GT of the fault.
+ */
+ if (gt)
+ xe_svm_copy_us_stats_incr(gt, dir, npages, start);
+
return err;
#undef XE_MIGRATE_CHUNK_SIZE
#undef XE_VRAM_ADDR_INVALID
}
-static int xe_svm_copy_to_devmem(struct page **pages, dma_addr_t *dma_addr,
+static int xe_svm_copy_to_devmem(struct page **pages,
+ struct drm_pagemap_addr *pagemap_addr,
unsigned long npages)
{
- return xe_svm_copy(pages, dma_addr, npages, XE_SVM_COPY_TO_VRAM);
+ return xe_svm_copy(pages, pagemap_addr, npages, XE_SVM_COPY_TO_VRAM);
}
-static int xe_svm_copy_to_ram(struct page **pages, dma_addr_t *dma_addr,
+static int xe_svm_copy_to_ram(struct page **pages,
+ struct drm_pagemap_addr *pagemap_addr,
unsigned long npages)
{
- return xe_svm_copy(pages, dma_addr, npages, XE_SVM_COPY_TO_SRAM);
+ return xe_svm_copy(pages, pagemap_addr, npages, XE_SVM_COPY_TO_SRAM);
}
static struct xe_bo *to_xe_bo(struct drm_pagemap_devmem *devmem_allocation)
@@ -506,9 +672,9 @@ static u64 block_offset_to_pfn(struct xe_vram_region *vr, u64 offset)
return PHYS_PFN(offset + vr->hpa_base);
}
-static struct drm_buddy *tile_to_buddy(struct xe_tile *tile)
+static struct drm_buddy *vram_to_buddy(struct xe_vram_region *vram)
{
- return &tile->mem.vram.ttm.mm;
+ return &vram->ttm.mm;
}
static int xe_svm_populate_devmem_pfn(struct drm_pagemap_devmem *devmem_allocation,
@@ -522,8 +688,7 @@ static int xe_svm_populate_devmem_pfn(struct drm_pagemap_devmem *devmem_allocati
list_for_each_entry(block, blocks, link) {
struct xe_vram_region *vr = block->private;
- struct xe_tile *tile = vr_to_tile(vr);
- struct drm_buddy *buddy = tile_to_buddy(tile);
+ struct drm_buddy *buddy = vram_to_buddy(vr);
u64 block_pfn = block_offset_to_pfn(vr, drm_buddy_block_offset(block));
int i;
@@ -567,22 +732,25 @@ int xe_svm_init(struct xe_vm *vm)
{
int err;
- spin_lock_init(&vm->svm.garbage_collector.lock);
- INIT_LIST_HEAD(&vm->svm.garbage_collector.range_list);
- INIT_WORK(&vm->svm.garbage_collector.work,
- xe_svm_garbage_collector_work_func);
-
- err = drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM", &vm->xe->drm,
- current->mm, xe_svm_devm_owner(vm->xe), 0,
- vm->size, xe_modparam.svm_notifier_size * SZ_1M,
- &gpusvm_ops, fault_chunk_sizes,
- ARRAY_SIZE(fault_chunk_sizes));
- if (err)
- return err;
-
- drm_gpusvm_driver_set_lock(&vm->svm.gpusvm, &vm->lock);
+ if (vm->flags & XE_VM_FLAG_FAULT_MODE) {
+ spin_lock_init(&vm->svm.garbage_collector.lock);
+ INIT_LIST_HEAD(&vm->svm.garbage_collector.range_list);
+ INIT_WORK(&vm->svm.garbage_collector.work,
+ xe_svm_garbage_collector_work_func);
+
+ err = drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM", &vm->xe->drm,
+ current->mm, 0, vm->size,
+ xe_modparam.svm_notifier_size * SZ_1M,
+ &gpusvm_ops, fault_chunk_sizes,
+ ARRAY_SIZE(fault_chunk_sizes));
+ drm_gpusvm_driver_set_lock(&vm->svm.gpusvm, &vm->lock);
+ } else {
+ err = drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM (simple)",
+ &vm->xe->drm, NULL, 0, 0, 0, NULL,
+ NULL, 0);
+ }
- return 0;
+ return err;
}
/**
@@ -653,7 +821,7 @@ bool xe_svm_range_validate(struct xe_vm *vm,
xe_svm_notifier_lock(vm);
ret = (range->tile_present & ~range->tile_invalidated & tile_mask) == tile_mask &&
- (devmem_preferred == range->base.flags.has_devmem_pages);
+ (devmem_preferred == range->base.pages.flags.has_devmem_pages);
xe_svm_notifier_unlock(vm);
@@ -683,66 +851,57 @@ u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 start, u64 end, struct xe_vma *v
}
#if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
-static struct xe_vram_region *tile_to_vr(struct xe_tile *tile)
-{
- return &tile->mem.vram;
-}
-
static int xe_drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
unsigned long start, unsigned long end,
struct mm_struct *mm,
unsigned long timeslice_ms)
{
- struct xe_tile *tile = container_of(dpagemap, typeof(*tile), mem.vram.dpagemap);
- struct xe_device *xe = tile_to_xe(tile);
+ struct xe_vram_region *vr = container_of(dpagemap, typeof(*vr), dpagemap);
+ struct xe_device *xe = vr->xe;
struct device *dev = xe->drm.dev;
- struct xe_vram_region *vr = tile_to_vr(tile);
struct drm_buddy_block *block;
+ struct xe_validation_ctx vctx;
struct list_head *blocks;
+ struct drm_exec exec;
struct xe_bo *bo;
- ktime_t time_end = 0;
- int err, idx;
+ int err = 0, idx;
if (!drm_dev_enter(&xe->drm, &idx))
return -ENODEV;
xe_pm_runtime_get(xe);
- retry:
- bo = xe_bo_create_locked(tile_to_xe(tile), NULL, NULL, end - start,
- ttm_bo_type_device,
- XE_BO_FLAG_VRAM_IF_DGFX(tile) |
- XE_BO_FLAG_CPU_ADDR_MIRROR);
- if (IS_ERR(bo)) {
- err = PTR_ERR(bo);
- if (xe_vm_validate_should_retry(NULL, err, &time_end))
- goto retry;
- goto out_pm_put;
- }
-
- drm_pagemap_devmem_init(&bo->devmem_allocation, dev, mm,
- &dpagemap_devmem_ops,
- &tile->mem.vram.dpagemap,
- end - start);
+ xe_validation_guard(&vctx, &xe->val, &exec, (struct xe_val_flags) {}, err) {
+ bo = xe_bo_create_locked(xe, NULL, NULL, end - start,
+ ttm_bo_type_device,
+ (IS_DGFX(xe) ? XE_BO_FLAG_VRAM(vr) : XE_BO_FLAG_SYSTEM) |
+ XE_BO_FLAG_CPU_ADDR_MIRROR, &exec);
+ drm_exec_retry_on_contention(&exec);
+ if (IS_ERR(bo)) {
+ err = PTR_ERR(bo);
+ xe_validation_retry_on_oom(&vctx, &err);
+ break;
+ }
- blocks = &to_xe_ttm_vram_mgr_resource(bo->ttm.resource)->blocks;
- list_for_each_entry(block, blocks, link)
- block->private = vr;
+ drm_pagemap_devmem_init(&bo->devmem_allocation, dev, mm,
+ &dpagemap_devmem_ops, dpagemap, end - start);
- xe_bo_get(bo);
+ blocks = &to_xe_ttm_vram_mgr_resource(bo->ttm.resource)->blocks;
+ list_for_each_entry(block, blocks, link)
+ block->private = vr;
- /* Ensure the device has a pm ref while there are device pages active. */
- xe_pm_runtime_get_noresume(xe);
- err = drm_pagemap_migrate_to_devmem(&bo->devmem_allocation, mm,
- start, end, timeslice_ms,
- xe_svm_devm_owner(xe));
- if (err)
- xe_svm_devmem_release(&bo->devmem_allocation);
+ xe_bo_get(bo);
- xe_bo_unlock(bo);
- xe_bo_put(bo);
-
-out_pm_put:
+ /* Ensure the device has a pm ref while there are device pages active. */
+ xe_pm_runtime_get_noresume(xe);
+ err = drm_pagemap_migrate_to_devmem(&bo->devmem_allocation, mm,
+ start, end, timeslice_ms,
+ xe_svm_devm_owner(xe));
+ if (err)
+ xe_svm_devmem_release(&bo->devmem_allocation);
+ xe_bo_unlock(bo);
+ xe_bo_put(bo);
+ }
xe_pm_runtime_put(xe);
drm_dev_exit(idx);
@@ -772,17 +931,17 @@ bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vm
struct xe_vm *vm = range_to_vm(&range->base);
u64 range_size = xe_svm_range_size(range);
- if (!range->base.flags.migrate_devmem || !preferred_region_is_vram)
+ if (!range->base.pages.flags.migrate_devmem || !preferred_region_is_vram)
return false;
xe_assert(vm->xe, IS_DGFX(vm->xe));
- if (preferred_region_is_vram && xe_svm_range_in_vram(range)) {
+ if (xe_svm_range_in_vram(range)) {
drm_info(&vm->xe->drm, "Range is already in VRAM\n");
return false;
}
- if (preferred_region_is_vram && range_size < SZ_64K && !supports_4K_migration(vm->xe)) {
+ if (range_size < SZ_64K && !supports_4K_migration(vm->xe)) {
drm_dbg(&vm->xe->drm, "Platform doesn't support SZ_4K range migration\n");
return false;
}
@@ -790,40 +949,78 @@ bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vm
return true;
}
-/**
- * xe_svm_handle_pagefault() - SVM handle page fault
- * @vm: The VM.
- * @vma: The CPU address mirror VMA.
- * @gt: The gt upon the fault occurred.
- * @fault_addr: The GPU fault address.
- * @atomic: The fault atomic access bit.
- *
- * Create GPU bindings for a SVM page fault. Optionally migrate to device
- * memory.
- *
- * Return: 0 on success, negative error code on error.
- */
-int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
- struct xe_gt *gt, u64 fault_addr,
- bool atomic)
+#define DECL_SVM_RANGE_COUNT_STATS(elem, stat) \
+static void xe_svm_range_##elem##_count_stats_incr(struct xe_gt *gt, \
+ struct xe_svm_range *range) \
+{ \
+ switch (xe_svm_range_size(range)) { \
+ case SZ_4K: \
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_4K_##stat##_COUNT, 1); \
+ break; \
+ case SZ_64K: \
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_64K_##stat##_COUNT, 1); \
+ break; \
+ case SZ_2M: \
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_2M_##stat##_COUNT, 1); \
+ break; \
+ } \
+} \
+
+DECL_SVM_RANGE_COUNT_STATS(fault, PAGEFAULT)
+DECL_SVM_RANGE_COUNT_STATS(valid_fault, VALID_PAGEFAULT)
+DECL_SVM_RANGE_COUNT_STATS(migrate, MIGRATE)
+
+#define DECL_SVM_RANGE_US_STATS(elem, stat) \
+static void xe_svm_range_##elem##_us_stats_incr(struct xe_gt *gt, \
+ struct xe_svm_range *range, \
+ ktime_t start) \
+{ \
+ s64 us_delta = xe_svm_stats_ktime_us_delta(start); \
+\
+ switch (xe_svm_range_size(range)) { \
+ case SZ_4K: \
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_4K_##stat##_US, \
+ us_delta); \
+ break; \
+ case SZ_64K: \
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_64K_##stat##_US, \
+ us_delta); \
+ break; \
+ case SZ_2M: \
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_2M_##stat##_US, \
+ us_delta); \
+ break; \
+ } \
+} \
+
+DECL_SVM_RANGE_US_STATS(migrate, MIGRATE)
+DECL_SVM_RANGE_US_STATS(get_pages, GET_PAGES)
+DECL_SVM_RANGE_US_STATS(bind, BIND)
+DECL_SVM_RANGE_US_STATS(fault, PAGEFAULT)
+
+static int __xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
+ struct xe_gt *gt, u64 fault_addr,
+ bool need_vram)
{
+ int devmem_possible = IS_DGFX(vm->xe) &&
+ IS_ENABLED(CONFIG_DRM_XE_PAGEMAP);
struct drm_gpusvm_ctx ctx = {
.read_only = xe_vma_read_only(vma),
- .devmem_possible = IS_DGFX(vm->xe) &&
- IS_ENABLED(CONFIG_DRM_XE_PAGEMAP),
- .check_pages_threshold = IS_DGFX(vm->xe) &&
- IS_ENABLED(CONFIG_DRM_XE_PAGEMAP) ? SZ_64K : 0,
- .devmem_only = atomic && IS_DGFX(vm->xe) &&
- IS_ENABLED(CONFIG_DRM_XE_PAGEMAP),
- .timeslice_ms = atomic && IS_DGFX(vm->xe) &&
- IS_ENABLED(CONFIG_DRM_XE_PAGEMAP) ?
+ .devmem_possible = devmem_possible,
+ .check_pages_threshold = devmem_possible ? SZ_64K : 0,
+ .devmem_only = need_vram && devmem_possible,
+ .timeslice_ms = need_vram && devmem_possible ?
vm->xe->atomic_svm_timeslice_ms : 0,
+ .device_private_page_owner = xe_svm_devm_owner(vm->xe),
};
+ struct xe_validation_ctx vctx;
+ struct drm_exec exec;
struct xe_svm_range *range;
struct dma_fence *fence;
+ struct drm_pagemap *dpagemap;
struct xe_tile *tile = gt_to_tile(gt);
int migrate_try_count = ctx.devmem_only ? 3 : 1;
- ktime_t end = 0;
+ ktime_t start = xe_svm_stats_ktime_get(), bind_start, get_pages_start;
int err;
lockdep_assert_held_write(&vm->lock);
@@ -842,17 +1039,34 @@ retry:
if (IS_ERR(range))
return PTR_ERR(range);
- if (ctx.devmem_only && !range->base.flags.migrate_devmem)
- return -EACCES;
+ xe_svm_range_fault_count_stats_incr(gt, range);
- if (xe_svm_range_is_valid(range, tile, ctx.devmem_only))
- return 0;
+ if (ctx.devmem_only && !range->base.pages.flags.migrate_devmem) {
+ err = -EACCES;
+ goto out;
+ }
+
+ if (xe_svm_range_is_valid(range, tile, ctx.devmem_only)) {
+ xe_svm_range_valid_fault_count_stats_incr(gt, range);
+ range_debug(range, "PAGE FAULT - VALID");
+ goto out;
+ }
range_debug(range, "PAGE FAULT");
+ dpagemap = xe_vma_resolve_pagemap(vma, tile);
if (--migrate_try_count >= 0 &&
- xe_svm_range_needs_migrate_to_vram(range, vma, IS_DGFX(vm->xe))) {
+ xe_svm_range_needs_migrate_to_vram(range, vma, !!dpagemap || ctx.devmem_only)) {
+ ktime_t migrate_start = xe_svm_stats_ktime_get();
+
+ /* TODO : For multi-device dpagemap will be used to find the
+ * remote tile and remote device. Will need to modify
+ * xe_svm_alloc_vram to use dpagemap for future multi-device
+ * support.
+ */
+ xe_svm_range_migrate_count_stats_incr(gt, range);
err = xe_svm_alloc_vram(tile, range, &ctx);
+ xe_svm_range_migrate_us_stats_incr(gt, range, migrate_start);
ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */
if (err) {
if (migrate_try_count || !ctx.devmem_only) {
@@ -869,6 +1083,8 @@ retry:
}
}
+ get_pages_start = xe_svm_stats_ktime_get();
+
range_debug(range, "GET PAGES");
err = xe_svm_range_get_pages(vm, range, &ctx);
/* Corner where CPU mappings have changed */
@@ -888,37 +1104,89 @@ retry:
}
if (err) {
range_debug(range, "PAGE FAULT - FAIL PAGE COLLECT");
- goto err_out;
+ goto out;
}
+ xe_svm_range_get_pages_us_stats_incr(gt, range, get_pages_start);
range_debug(range, "PAGE FAULT - BIND");
-retry_bind:
- xe_vm_lock(vm, false);
- fence = xe_vm_range_rebind(vm, vma, range, BIT(tile->id));
- if (IS_ERR(fence)) {
- xe_vm_unlock(vm);
- err = PTR_ERR(fence);
- if (err == -EAGAIN) {
- ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */
- range_debug(range, "PAGE FAULT - RETRY BIND");
- goto retry;
+ bind_start = xe_svm_stats_ktime_get();
+ xe_validation_guard(&vctx, &vm->xe->val, &exec, (struct xe_val_flags) {}, err) {
+ err = xe_vm_drm_exec_lock(vm, &exec);
+ drm_exec_retry_on_contention(&exec);
+
+ xe_vm_set_validation_exec(vm, &exec);
+ fence = xe_vm_range_rebind(vm, vma, range, BIT(tile->id));
+ xe_vm_set_validation_exec(vm, NULL);
+ if (IS_ERR(fence)) {
+ drm_exec_retry_on_contention(&exec);
+ err = PTR_ERR(fence);
+ xe_validation_retry_on_oom(&vctx, &err);
+ xe_svm_range_bind_us_stats_incr(gt, range, bind_start);
+ break;
}
- if (xe_vm_validate_should_retry(NULL, err, &end))
- goto retry_bind;
- goto err_out;
}
- xe_vm_unlock(vm);
+ if (err)
+ goto err_out;
dma_fence_wait(fence, false);
dma_fence_put(fence);
+ xe_svm_range_bind_us_stats_incr(gt, range, bind_start);
+
+out:
+ xe_svm_range_fault_us_stats_incr(gt, range, start);
+ return 0;
err_out:
+ if (err == -EAGAIN) {
+ ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */
+ range_debug(range, "PAGE FAULT - RETRY BIND");
+ goto retry;
+ }
return err;
}
/**
+ * xe_svm_handle_pagefault() - SVM handle page fault
+ * @vm: The VM.
+ * @vma: The CPU address mirror VMA.
+ * @gt: The gt upon the fault occurred.
+ * @fault_addr: The GPU fault address.
+ * @atomic: The fault atomic access bit.
+ *
+ * Create GPU bindings for a SVM page fault. Optionally migrate to device
+ * memory.
+ *
+ * Return: 0 on success, negative error code on error.
+ */
+int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
+ struct xe_gt *gt, u64 fault_addr,
+ bool atomic)
+{
+ int need_vram, ret;
+retry:
+ need_vram = xe_vma_need_vram_for_atomic(vm->xe, vma, atomic);
+ if (need_vram < 0)
+ return need_vram;
+
+ ret = __xe_svm_handle_pagefault(vm, vma, gt, fault_addr,
+ need_vram ? true : false);
+ if (ret == -EAGAIN) {
+ /*
+ * Retry once on -EAGAIN to re-lookup the VMA, as the original VMA
+ * may have been split by xe_svm_range_set_default_attr.
+ */
+ vma = xe_vm_find_vma_by_addr(vm, fault_addr);
+ if (!vma)
+ return -EINVAL;
+
+ goto retry;
+ }
+ return ret;
+}
+
+/**
* xe_svm_has_mapping() - SVM has mappings
* @vm: The VM.
* @start: Start address.
@@ -934,6 +1202,41 @@ bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
}
/**
+ * xe_svm_unmap_address_range - UNMAP SVM mappings and ranges
+ * @vm: The VM
+ * @start: start addr
+ * @end: end addr
+ *
+ * This function UNMAPS svm ranges if start or end address are inside them.
+ */
+void xe_svm_unmap_address_range(struct xe_vm *vm, u64 start, u64 end)
+{
+ struct drm_gpusvm_notifier *notifier, *next;
+
+ lockdep_assert_held_write(&vm->lock);
+
+ drm_gpusvm_for_each_notifier_safe(notifier, next, &vm->svm.gpusvm, start, end) {
+ struct drm_gpusvm_range *range, *__next;
+
+ drm_gpusvm_for_each_range_safe(range, __next, notifier, start, end) {
+ if (start > drm_gpusvm_range_start(range) ||
+ end < drm_gpusvm_range_end(range)) {
+ if (IS_DGFX(vm->xe) && xe_svm_range_in_vram(to_xe_range(range)))
+ drm_gpusvm_range_evict(&vm->svm.gpusvm, range);
+ drm_gpusvm_range_get(range);
+ __xe_svm_garbage_collector(vm, to_xe_range(range));
+ if (!list_empty(&to_xe_range(range)->garbage_collector_link)) {
+ spin_lock(&vm->svm.garbage_collector.lock);
+ list_del(&to_xe_range(range)->garbage_collector_link);
+ spin_unlock(&vm->svm.garbage_collector.lock);
+ }
+ drm_gpusvm_range_put(range);
+ }
+ }
+ }
+}
+
+/**
* xe_svm_bo_evict() - SVM evict BO to system memory
* @bo: BO to evict
*
@@ -967,7 +1270,7 @@ struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr,
r = drm_gpusvm_range_find_or_insert(&vm->svm.gpusvm, max(addr, xe_vma_start(vma)),
xe_vma_start(vma), xe_vma_end(vma), ctx);
if (IS_ERR(r))
- return ERR_PTR(PTR_ERR(r));
+ return ERR_CAST(r);
return to_xe_range(r);
}
@@ -997,8 +1300,94 @@ int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
return err;
}
+/**
+ * xe_svm_ranges_zap_ptes_in_range - clear ptes of svm ranges in input range
+ * @vm: Pointer to the xe_vm structure
+ * @start: Start of the input range
+ * @end: End of the input range
+ *
+ * This function removes the page table entries (PTEs) associated
+ * with the svm ranges within the given input start and end
+ *
+ * Return: tile_mask for which gt's need to be tlb invalidated.
+ */
+u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end)
+{
+ struct drm_gpusvm_notifier *notifier;
+ struct xe_svm_range *range;
+ u64 adj_start, adj_end;
+ struct xe_tile *tile;
+ u8 tile_mask = 0;
+ u8 id;
+
+ lockdep_assert(lockdep_is_held_type(&vm->svm.gpusvm.notifier_lock, 1) &&
+ lockdep_is_held_type(&vm->lock, 0));
+
+ drm_gpusvm_for_each_notifier(notifier, &vm->svm.gpusvm, start, end) {
+ struct drm_gpusvm_range *r = NULL;
+
+ adj_start = max(start, drm_gpusvm_notifier_start(notifier));
+ adj_end = min(end, drm_gpusvm_notifier_end(notifier));
+ drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end) {
+ range = to_xe_range(r);
+ for_each_tile(tile, vm->xe, id) {
+ if (xe_pt_zap_ptes_range(tile, vm, range)) {
+ tile_mask |= BIT(id);
+ /*
+ * WRITE_ONCE pairs with READ_ONCE in
+ * xe_vm_has_valid_gpu_mapping().
+ * Must not fail after setting
+ * tile_invalidated and before
+ * TLB invalidation.
+ */
+ WRITE_ONCE(range->tile_invalidated,
+ range->tile_invalidated | BIT(id));
+ }
+ }
+ }
+ }
+
+ return tile_mask;
+}
+
#if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
+static struct drm_pagemap *tile_local_pagemap(struct xe_tile *tile)
+{
+ return &tile->mem.vram->dpagemap;
+}
+
+/**
+ * xe_vma_resolve_pagemap - Resolve the appropriate DRM pagemap for a VMA
+ * @vma: Pointer to the xe_vma structure containing memory attributes
+ * @tile: Pointer to the xe_tile structure used as fallback for VRAM mapping
+ *
+ * This function determines the correct DRM pagemap to use for a given VMA.
+ * It first checks if a valid devmem_fd is provided in the VMA's preferred
+ * location. If the devmem_fd is negative, it returns NULL, indicating no
+ * pagemap is available and smem to be used as preferred location.
+ * If the devmem_fd is equal to the default faulting
+ * GT identifier, it returns the VRAM pagemap associated with the tile.
+ *
+ * Future support for multi-device configurations may use drm_pagemap_from_fd()
+ * to resolve pagemaps from arbitrary file descriptors.
+ *
+ * Return: A pointer to the resolved drm_pagemap, or NULL if none is applicable.
+ */
+struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile)
+{
+ s32 fd = (s32)vma->attr.preferred_loc.devmem_fd;
+
+ if (fd == DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM)
+ return NULL;
+
+ if (fd == DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE)
+ return IS_DGFX(tile_to_xe(tile)) ? tile_local_pagemap(tile) : NULL;
+
+ /* TODO: Support multi-device with drm_pagemap_from_fd(fd) */
+ return NULL;
+}
+
/**
* xe_svm_alloc_vram()- Allocate device memory pages for range,
* migrating existing data.
@@ -1013,17 +1402,17 @@ int xe_svm_alloc_vram(struct xe_tile *tile, struct xe_svm_range *range,
{
struct drm_pagemap *dpagemap;
- xe_assert(tile_to_xe(tile), range->base.flags.migrate_devmem);
+ xe_assert(tile_to_xe(tile), range->base.pages.flags.migrate_devmem);
range_debug(range, "ALLOCATE VRAM");
- dpagemap = xe_tile_local_pagemap(tile);
+ dpagemap = tile_local_pagemap(tile);
return drm_pagemap_populate_mm(dpagemap, xe_svm_range_start(range),
xe_svm_range_end(range),
range->base.gpusvm->mm,
ctx->timeslice_ms);
}
-static struct drm_pagemap_device_addr
+static struct drm_pagemap_addr
xe_drm_pagemap_device_map(struct drm_pagemap *dpagemap,
struct device *dev,
struct page *page,
@@ -1042,7 +1431,7 @@ xe_drm_pagemap_device_map(struct drm_pagemap *dpagemap,
prot = 0;
}
- return drm_pagemap_device_addr_encode(addr, prot, order, dir);
+ return drm_pagemap_addr_encode(addr, prot, order, dir);
}
static const struct drm_pagemap_ops xe_drm_pagemap_ops = {
@@ -1111,6 +1500,11 @@ int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
{
return 0;
}
+
+struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile)
+{
+ return NULL;
+}
#endif
/**
diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
index da9a69ea0bb1..0955d2ac8d74 100644
--- a/drivers/gpu/drm/xe/xe_svm.h
+++ b/drivers/gpu/drm/xe/xe_svm.h
@@ -6,6 +6,20 @@
#ifndef _XE_SVM_H_
#define _XE_SVM_H_
+struct xe_device;
+
+/**
+ * xe_svm_devm_owner() - Return the owner of device private memory
+ * @xe: The xe device.
+ *
+ * Return: The owner of this device's device private memory to use in
+ * hmm_range_fault()-
+ */
+static inline void *xe_svm_devm_owner(struct xe_device *xe)
+{
+ return xe;
+}
+
#if IS_ENABLED(CONFIG_DRM_XE_GPUSVM)
#include <drm/drm_pagemap.h>
@@ -90,6 +104,12 @@ bool xe_svm_range_validate(struct xe_vm *vm,
u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end, struct xe_vma *vma);
+void xe_svm_unmap_address_range(struct xe_vm *vm, u64 start, u64 end);
+
+u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end);
+
+struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile);
+
/**
* xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
* @range: SVM range
@@ -99,7 +119,7 @@ u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end, struct xe_vma *v
static inline bool xe_svm_range_has_dma_mapping(struct xe_svm_range *range)
{
lockdep_assert_held(&range->base.gpusvm->notifier_lock);
- return range->base.flags.has_dma_mapping;
+ return range->base.pages.flags.has_dma_mapping;
}
/**
@@ -149,21 +169,13 @@ static inline unsigned long xe_svm_range_size(struct xe_svm_range *range)
return drm_gpusvm_range_size(&range->base);
}
-#define xe_svm_assert_in_notifier(vm__) \
- lockdep_assert_held_write(&(vm__)->svm.gpusvm.notifier_lock)
-
-#define xe_svm_notifier_lock(vm__) \
- drm_gpusvm_notifier_lock(&(vm__)->svm.gpusvm)
-
-#define xe_svm_notifier_unlock(vm__) \
- drm_gpusvm_notifier_unlock(&(vm__)->svm.gpusvm)
-
void xe_svm_flush(struct xe_vm *vm);
#else
#include <linux/interval_tree.h>
+#include "xe_vm.h"
-struct drm_pagemap_device_addr;
+struct drm_pagemap_addr;
struct drm_gpusvm_ctx;
struct drm_gpusvm_range;
struct xe_bo;
@@ -178,7 +190,9 @@ struct xe_vram_region;
struct xe_svm_range {
struct {
struct interval_tree_node itree;
- const struct drm_pagemap_device_addr *dma_addr;
+ struct {
+ const struct drm_pagemap_addr *dma_addr;
+ } pages;
} base;
u32 tile_present;
u32 tile_invalidated;
@@ -198,12 +212,21 @@ int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
static inline
int xe_svm_init(struct xe_vm *vm)
{
+#if IS_ENABLED(CONFIG_DRM_GPUSVM)
+ return drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM (simple)", &vm->xe->drm,
+ NULL, NULL, 0, 0, 0, NULL, NULL, 0);
+#else
return 0;
+#endif
}
static inline
void xe_svm_fini(struct xe_vm *vm)
{
+#if IS_ENABLED(CONFIG_DRM_GPUSVM)
+ xe_assert(vm->xe, xe_vm_is_closed(vm));
+ drm_gpusvm_fini(&vm->svm.gpusvm);
+#endif
}
static inline
@@ -303,19 +326,64 @@ u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end, struct xe_vma *vm
return ULONG_MAX;
}
-#define xe_svm_assert_in_notifier(...) do {} while (0)
+static inline
+void xe_svm_unmap_address_range(struct xe_vm *vm, u64 start, u64 end)
+{
+}
+
+static inline
+u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end)
+{
+ return 0;
+}
+
+static inline
+struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile)
+{
+ return NULL;
+}
+
+static inline void xe_svm_flush(struct xe_vm *vm)
+{
+}
#define xe_svm_range_has_dma_mapping(...) false
+#endif /* CONFIG_DRM_XE_GPUSVM */
+
+#if IS_ENABLED(CONFIG_DRM_GPUSVM) /* Need to support userptr without XE_GPUSVM */
+#define xe_svm_assert_in_notifier(vm__) \
+ lockdep_assert_held_write(&(vm__)->svm.gpusvm.notifier_lock)
+
+#define xe_svm_assert_held_read(vm__) \
+ lockdep_assert_held_read(&(vm__)->svm.gpusvm.notifier_lock)
+
+#define xe_svm_notifier_lock(vm__) \
+ drm_gpusvm_notifier_lock(&(vm__)->svm.gpusvm)
+
+#define xe_svm_notifier_lock_interruptible(vm__) \
+ down_read_interruptible(&(vm__)->svm.gpusvm.notifier_lock)
+
+#define xe_svm_notifier_unlock(vm__) \
+ drm_gpusvm_notifier_unlock(&(vm__)->svm.gpusvm)
+
+#else
+#define xe_svm_assert_in_notifier(...) do {} while (0)
+
+static inline void xe_svm_assert_held_read(struct xe_vm *vm)
+{
+}
static inline void xe_svm_notifier_lock(struct xe_vm *vm)
{
}
-static inline void xe_svm_notifier_unlock(struct xe_vm *vm)
+static inline int xe_svm_notifier_lock_interruptible(struct xe_vm *vm)
{
+ return 0;
}
-static inline void xe_svm_flush(struct xe_vm *vm)
+static inline void xe_svm_notifier_unlock(struct xe_vm *vm)
{
}
-#endif
+#endif /* CONFIG_DRM_GPUSVM */
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_sync.c b/drivers/gpu/drm/xe/xe_sync.c
index f87276df18f2..82872a51f098 100644
--- a/drivers/gpu/drm/xe/xe_sync.c
+++ b/drivers/gpu/drm/xe/xe_sync.c
@@ -77,6 +77,7 @@ static void user_fence_worker(struct work_struct *w)
{
struct xe_user_fence *ufence = container_of(w, struct xe_user_fence, worker);
+ WRITE_ONCE(ufence->signalled, 1);
if (mmget_not_zero(ufence->mm)) {
kthread_use_mm(ufence->mm);
if (copy_to_user(ufence->addr, &ufence->value, sizeof(ufence->value)))
@@ -91,7 +92,6 @@ static void user_fence_worker(struct work_struct *w)
* Wake up waiters only after updating the ufence state, allowing the UMD
* to safely reuse the same ufence without encountering -EBUSY errors.
*/
- WRITE_ONCE(ufence->signalled, 1);
wake_up_all(&ufence->xe->ufence_wq);
user_fence_put(ufence);
}
diff --git a/drivers/gpu/drm/xe/xe_tile.c b/drivers/gpu/drm/xe/xe_tile.c
index 86e9811e60ba..d49ba3401963 100644
--- a/drivers/gpu/drm/xe/xe_tile.c
+++ b/drivers/gpu/drm/xe/xe_tile.c
@@ -7,6 +7,7 @@
#include <drm/drm_managed.h>
+#include "xe_bo.h"
#include "xe_device.h"
#include "xe_ggtt.h"
#include "xe_gt.h"
@@ -19,6 +20,8 @@
#include "xe_tile_sysfs.h"
#include "xe_ttm_vram_mgr.h"
#include "xe_wa.h"
+#include "xe_vram.h"
+#include "xe_vram_types.h"
/**
* DOC: Multi-tile Design
@@ -92,6 +95,35 @@ static int xe_tile_alloc(struct xe_tile *tile)
if (!tile->mem.ggtt)
return -ENOMEM;
+ tile->migrate = xe_migrate_alloc(tile);
+ if (!tile->migrate)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * xe_tile_alloc_vram - Perform per-tile VRAM structs allocation
+ * @tile: Tile to perform allocations for
+ *
+ * Allocates VRAM per-tile data structures using DRM-managed allocations.
+ * Does not touch the hardware.
+ *
+ * Returns -ENOMEM if allocations fail, otherwise 0.
+ */
+int xe_tile_alloc_vram(struct xe_tile *tile)
+{
+ struct xe_device *xe = tile_to_xe(tile);
+ struct xe_vram_region *vram;
+
+ if (!IS_DGFX(xe))
+ return 0;
+
+ vram = xe_vram_region_alloc(xe, tile->id, XE_PL_VRAM0 + tile->id);
+ if (!vram)
+ return -ENOMEM;
+ tile->mem.vram = vram;
+
return 0;
}
@@ -127,21 +159,6 @@ int xe_tile_init_early(struct xe_tile *tile, struct xe_device *xe, u8 id)
}
ALLOW_ERROR_INJECTION(xe_tile_init_early, ERRNO); /* See xe_pci_probe() */
-static int tile_ttm_mgr_init(struct xe_tile *tile)
-{
- struct xe_device *xe = tile_to_xe(tile);
- int err;
-
- if (tile->mem.vram.usable_size) {
- err = xe_ttm_vram_mgr_init(tile, &tile->mem.vram.ttm);
- if (err)
- return err;
- xe->info.mem_region_mask |= BIT(tile->id) << 1;
- }
-
- return 0;
-}
-
/**
* xe_tile_init_noalloc - Init tile up to the point where allocations can happen.
* @tile: The tile to initialize.
@@ -159,16 +176,19 @@ static int tile_ttm_mgr_init(struct xe_tile *tile)
int xe_tile_init_noalloc(struct xe_tile *tile)
{
struct xe_device *xe = tile_to_xe(tile);
- int err;
-
- err = tile_ttm_mgr_init(tile);
- if (err)
- return err;
xe_wa_apply_tile_workarounds(tile);
if (xe->info.has_usm && IS_DGFX(xe))
- xe_devm_add(tile, &tile->mem.vram);
+ xe_devm_add(tile, tile->mem.vram);
+
+ if (IS_DGFX(xe) && !ttm_resource_manager_used(&tile->mem.vram->ttm.manager)) {
+ int err = xe_ttm_vram_mgr_init(xe, tile->mem.vram);
+
+ if (err)
+ return err;
+ xe->info.mem_region_mask |= BIT(tile->mem.vram->id) << 1;
+ }
return xe_tile_sysfs_init(tile);
}
diff --git a/drivers/gpu/drm/xe/xe_tile.h b/drivers/gpu/drm/xe/xe_tile.h
index cc33e8733983..dceb6297aa01 100644
--- a/drivers/gpu/drm/xe/xe_tile.h
+++ b/drivers/gpu/drm/xe/xe_tile.h
@@ -14,19 +14,9 @@ int xe_tile_init_early(struct xe_tile *tile, struct xe_device *xe, u8 id);
int xe_tile_init_noalloc(struct xe_tile *tile);
int xe_tile_init(struct xe_tile *tile);
-void xe_tile_migrate_wait(struct xe_tile *tile);
+int xe_tile_alloc_vram(struct xe_tile *tile);
-#if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
-static inline struct drm_pagemap *xe_tile_local_pagemap(struct xe_tile *tile)
-{
- return &tile->mem.vram.dpagemap;
-}
-#else
-static inline struct drm_pagemap *xe_tile_local_pagemap(struct xe_tile *tile)
-{
- return NULL;
-}
-#endif
+void xe_tile_migrate_wait(struct xe_tile *tile);
static inline bool xe_tile_is_root(struct xe_tile *tile)
{
diff --git a/drivers/gpu/drm/xe/xe_tile_debugfs.c b/drivers/gpu/drm/xe/xe_tile_debugfs.c
new file mode 100644
index 000000000000..5523874cba7b
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_tile_debugfs.c
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include <linux/debugfs.h>
+#include <drm/drm_debugfs.h>
+
+#include "xe_pm.h"
+#include "xe_sa.h"
+#include "xe_tile_debugfs.h"
+
+static struct xe_tile *node_to_tile(struct drm_info_node *node)
+{
+ return node->dent->d_parent->d_inode->i_private;
+}
+
+/**
+ * tile_debugfs_simple_show - A show callback for struct drm_info_list
+ * @m: the &seq_file
+ * @data: data used by the drm debugfs helpers
+ *
+ * This callback can be used in struct drm_info_list to describe debugfs
+ * files that are &xe_tile specific.
+ *
+ * It is assumed that those debugfs files will be created on directory entry
+ * which struct dentry d_inode->i_private points to &xe_tile.
+ *
+ * /sys/kernel/debug/dri/0/
+ * ├── tile0/ # tile = dentry->d_inode->i_private
+ * │ │ ├── id # tile = dentry->d_parent->d_inode->i_private
+ *
+ * This function assumes that &m->private will be set to the &struct
+ * drm_info_node corresponding to the instance of the info on a given &struct
+ * drm_minor (see struct drm_info_list.show for details).
+ *
+ * This function also assumes that struct drm_info_list.data will point to the
+ * function code that will actually print a file content::
+ *
+ * int (*print)(struct xe_tile *, struct drm_printer *)
+ *
+ * Example::
+ *
+ * int tile_id(struct xe_tile *tile, struct drm_printer *p)
+ * {
+ * drm_printf(p, "%u\n", tile->id);
+ * return 0;
+ * }
+ *
+ * static const struct drm_info_list info[] = {
+ * { name = "id", .show = tile_debugfs_simple_show, .data = tile_id },
+ * };
+ *
+ * dir = debugfs_create_dir("tile0", parent);
+ * dir->d_inode->i_private = tile;
+ * drm_debugfs_create_files(info, ARRAY_SIZE(info), dir, minor);
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+static int tile_debugfs_simple_show(struct seq_file *m, void *data)
+{
+ struct drm_printer p = drm_seq_file_printer(m);
+ struct drm_info_node *node = m->private;
+ struct xe_tile *tile = node_to_tile(node);
+ int (*print)(struct xe_tile *, struct drm_printer *) = node->info_ent->data;
+
+ return print(tile, &p);
+}
+
+/**
+ * tile_debugfs_show_with_rpm - A show callback for struct drm_info_list
+ * @m: the &seq_file
+ * @data: data used by the drm debugfs helpers
+ *
+ * Similar to tile_debugfs_simple_show() but implicitly takes a RPM ref.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+static int tile_debugfs_show_with_rpm(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = m->private;
+ struct xe_tile *tile = node_to_tile(node);
+ struct xe_device *xe = tile_to_xe(tile);
+ int ret;
+
+ xe_pm_runtime_get(xe);
+ ret = tile_debugfs_simple_show(m, data);
+ xe_pm_runtime_put(xe);
+
+ return ret;
+}
+
+static int sa_info(struct xe_tile *tile, struct drm_printer *p)
+{
+ drm_suballoc_dump_debug_info(&tile->mem.kernel_bb_pool->base, p,
+ xe_sa_manager_gpu_addr(tile->mem.kernel_bb_pool));
+
+ return 0;
+}
+
+/* only for debugfs files which can be safely used on the VF */
+static const struct drm_info_list vf_safe_debugfs_list[] = {
+ { "sa_info", .show = tile_debugfs_show_with_rpm, .data = sa_info },
+};
+
+/**
+ * xe_tile_debugfs_register - Register tile's debugfs attributes
+ * @tile: the &xe_tile to register
+ *
+ * Create debugfs sub-directory with a name that includes a tile ID and
+ * then creates set of debugfs files (attributes) specific to this tile.
+ */
+void xe_tile_debugfs_register(struct xe_tile *tile)
+{
+ struct xe_device *xe = tile_to_xe(tile);
+ struct drm_minor *minor = xe->drm.primary;
+ struct dentry *root = minor->debugfs_root;
+ char name[8];
+
+ snprintf(name, sizeof(name), "tile%u", tile->id);
+ tile->debugfs = debugfs_create_dir(name, root);
+ if (IS_ERR(tile->debugfs))
+ return;
+
+ /*
+ * Store the xe_tile pointer as private data of the tile/ directory
+ * node so other tile specific attributes under that directory may
+ * refer to it by looking at its parent node private data.
+ */
+ tile->debugfs->d_inode->i_private = tile;
+
+ drm_debugfs_create_files(vf_safe_debugfs_list,
+ ARRAY_SIZE(vf_safe_debugfs_list),
+ tile->debugfs, minor);
+}
diff --git a/drivers/gpu/drm/xe/xe_tile_debugfs.h b/drivers/gpu/drm/xe/xe_tile_debugfs.h
new file mode 100644
index 000000000000..0e5f724de37f
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_tile_debugfs.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_TILE_DEBUGFS_H_
+#define _XE_TILE_DEBUGFS_H_
+
+struct xe_tile;
+
+void xe_tile_debugfs_register(struct xe_tile *tile);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_tile_printk.h b/drivers/gpu/drm/xe/xe_tile_printk.h
new file mode 100644
index 000000000000..63640a42685d
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_tile_printk.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _xe_tile_printk_H_
+#define _xe_tile_printk_H_
+
+#include "xe_printk.h"
+
+#define __XE_TILE_PRINTK_FMT(_tile, _fmt, _args...) "Tile%u: " _fmt, (_tile)->id, ##_args
+
+#define xe_tile_printk(_tile, _level, _fmt, ...) \
+ xe_printk((_tile)->xe, _level, __XE_TILE_PRINTK_FMT((_tile), _fmt, ##__VA_ARGS__))
+
+#define xe_tile_err(_tile, _fmt, ...) \
+ xe_tile_printk((_tile), err, _fmt, ##__VA_ARGS__)
+
+#define xe_tile_err_once(_tile, _fmt, ...) \
+ xe_tile_printk((_tile), err_once, _fmt, ##__VA_ARGS__)
+
+#define xe_tile_err_ratelimited(_tile, _fmt, ...) \
+ xe_tile_printk((_tile), err_ratelimited, _fmt, ##__VA_ARGS__)
+
+#define xe_tile_warn(_tile, _fmt, ...) \
+ xe_tile_printk((_tile), warn, _fmt, ##__VA_ARGS__)
+
+#define xe_tile_notice(_tile, _fmt, ...) \
+ xe_tile_printk((_tile), notice, _fmt, ##__VA_ARGS__)
+
+#define xe_tile_info(_tile, _fmt, ...) \
+ xe_tile_printk((_tile), info, _fmt, ##__VA_ARGS__)
+
+#define xe_tile_dbg(_tile, _fmt, ...) \
+ xe_tile_printk((_tile), dbg, _fmt, ##__VA_ARGS__)
+
+#define xe_tile_WARN_type(_tile, _type, _condition, _fmt, ...) \
+ xe_WARN##_type((_tile)->xe, _condition, _fmt, ## __VA_ARGS__)
+
+#define xe_tile_WARN(_tile, _condition, _fmt, ...) \
+ xe_tile_WARN_type((_tile),, _condition, __XE_TILE_PRINTK_FMT((_tile), _fmt, ##__VA_ARGS__))
+
+#define xe_tile_WARN_ONCE(_tile, _condition, _fmt, ...) \
+ xe_tile_WARN_type((_tile), _ONCE, _condition, __XE_TILE_PRINTK_FMT((_tile), _fmt, ##__VA_ARGS__))
+
+#define xe_tile_WARN_ON(_tile, _condition) \
+ xe_tile_WARN((_tile), _condition, "%s(%s)", "WARN_ON", __stringify(_condition))
+
+#define xe_tile_WARN_ON_ONCE(_tile, _condition) \
+ xe_tile_WARN_ONCE((_tile), _condition, "%s(%s)", "WARN_ON_ONCE", __stringify(_condition))
+
+static inline void __xe_tile_printfn_err(struct drm_printer *p, struct va_format *vaf)
+{
+ struct xe_tile *tile = p->arg;
+
+ xe_tile_err(tile, "%pV", vaf);
+}
+
+static inline void __xe_tile_printfn_info(struct drm_printer *p, struct va_format *vaf)
+{
+ struct xe_tile *tile = p->arg;
+
+ xe_tile_info(tile, "%pV", vaf);
+}
+
+static inline void __xe_tile_printfn_dbg(struct drm_printer *p, struct va_format *vaf)
+{
+ struct xe_tile *tile = p->arg;
+ struct drm_printer dbg;
+
+ /*
+ * The original xe_tile_dbg() callsite annotations are useless here,
+ * redirect to the tweaked xe_dbg_printer() instead.
+ */
+ dbg = xe_dbg_printer(tile->xe);
+ dbg.origin = p->origin;
+
+ drm_printf(&dbg, __XE_TILE_PRINTK_FMT(tile, "%pV", vaf));
+}
+
+/**
+ * xe_tile_err_printer - Construct a &drm_printer that outputs to xe_tile_err()
+ * @tile: the &xe_tile pointer to use in xe_tile_err()
+ *
+ * Return: The &drm_printer object.
+ */
+static inline struct drm_printer xe_tile_err_printer(struct xe_tile *tile)
+{
+ struct drm_printer p = {
+ .printfn = __xe_tile_printfn_err,
+ .arg = tile,
+ };
+ return p;
+}
+
+/**
+ * xe_tile_info_printer - Construct a &drm_printer that outputs to xe_tile_info()
+ * @tile: the &xe_tile pointer to use in xe_tile_info()
+ *
+ * Return: The &drm_printer object.
+ */
+static inline struct drm_printer xe_tile_info_printer(struct xe_tile *tile)
+{
+ struct drm_printer p = {
+ .printfn = __xe_tile_printfn_info,
+ .arg = tile,
+ };
+ return p;
+}
+
+/**
+ * xe_tile_dbg_printer - Construct a &drm_printer that outputs like xe_tile_dbg()
+ * @tile: the &xe_tile pointer to use in xe_tile_dbg()
+ *
+ * Return: The &drm_printer object.
+ */
+static inline struct drm_printer xe_tile_dbg_printer(struct xe_tile *tile)
+{
+ struct drm_printer p = {
+ .printfn = __xe_tile_printfn_dbg,
+ .arg = tile,
+ .origin = (const void *)_THIS_IP_,
+ };
+ return p;
+}
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_tile_sysfs.c b/drivers/gpu/drm/xe/xe_tile_sysfs.c
index b804234a6551..9e1236a9ec67 100644
--- a/drivers/gpu/drm/xe/xe_tile_sysfs.c
+++ b/drivers/gpu/drm/xe/xe_tile_sysfs.c
@@ -44,16 +44,18 @@ int xe_tile_sysfs_init(struct xe_tile *tile)
kt->tile = tile;
err = kobject_add(&kt->base, &dev->kobj, "tile%d", tile->id);
- if (err) {
- kobject_put(&kt->base);
- return err;
- }
+ if (err)
+ goto err_object;
tile->sysfs = &kt->base;
err = xe_vram_freq_sysfs_init(tile);
if (err)
- return err;
+ goto err_object;
return devm_add_action_or_reset(xe->drm.dev, tile_sysfs_fini, tile);
+
+err_object:
+ kobject_put(&kt->base);
+ return err;
}
diff --git a/drivers/gpu/drm/xe/xe_tlb_inval.c b/drivers/gpu/drm/xe/xe_tlb_inval.c
new file mode 100644
index 000000000000..918a59e686ea
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_tlb_inval.c
@@ -0,0 +1,433 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <drm/drm_managed.h>
+
+#include "abi/guc_actions_abi.h"
+#include "xe_device.h"
+#include "xe_force_wake.h"
+#include "xe_gt.h"
+#include "xe_gt_printk.h"
+#include "xe_gt_stats.h"
+#include "xe_guc.h"
+#include "xe_guc_ct.h"
+#include "xe_guc_tlb_inval.h"
+#include "xe_mmio.h"
+#include "xe_pm.h"
+#include "xe_tlb_inval.h"
+#include "xe_trace.h"
+
+/**
+ * DOC: Xe TLB invalidation
+ *
+ * Xe TLB invalidation is implemented in two layers. The first is the frontend
+ * API, which provides an interface for TLB invalidations to the driver code.
+ * The frontend handles seqno assignment, synchronization (fences), and the
+ * timeout mechanism. The frontend is implemented via an embedded structure
+ * xe_tlb_inval that includes a set of ops hooking into the backend. The backend
+ * interacts with the hardware (or firmware) to perform the actual invalidation.
+ */
+
+#define FENCE_STACK_BIT DMA_FENCE_FLAG_USER_BITS
+
+static void xe_tlb_inval_fence_fini(struct xe_tlb_inval_fence *fence)
+{
+ if (WARN_ON_ONCE(!fence->tlb_inval))
+ return;
+
+ xe_pm_runtime_put(fence->tlb_inval->xe);
+ fence->tlb_inval = NULL; /* fini() should be called once */
+}
+
+static void
+xe_tlb_inval_fence_signal(struct xe_tlb_inval_fence *fence)
+{
+ bool stack = test_bit(FENCE_STACK_BIT, &fence->base.flags);
+
+ lockdep_assert_held(&fence->tlb_inval->pending_lock);
+
+ list_del(&fence->link);
+ trace_xe_tlb_inval_fence_signal(fence->tlb_inval->xe, fence);
+ xe_tlb_inval_fence_fini(fence);
+ dma_fence_signal(&fence->base);
+ if (!stack)
+ dma_fence_put(&fence->base);
+}
+
+static void
+xe_tlb_inval_fence_signal_unlocked(struct xe_tlb_inval_fence *fence)
+{
+ struct xe_tlb_inval *tlb_inval = fence->tlb_inval;
+
+ spin_lock_irq(&tlb_inval->pending_lock);
+ xe_tlb_inval_fence_signal(fence);
+ spin_unlock_irq(&tlb_inval->pending_lock);
+}
+
+static void xe_tlb_inval_fence_timeout(struct work_struct *work)
+{
+ struct xe_tlb_inval *tlb_inval = container_of(work, struct xe_tlb_inval,
+ fence_tdr.work);
+ struct xe_device *xe = tlb_inval->xe;
+ struct xe_tlb_inval_fence *fence, *next;
+ long timeout_delay = tlb_inval->ops->timeout_delay(tlb_inval);
+
+ tlb_inval->ops->flush(tlb_inval);
+
+ spin_lock_irq(&tlb_inval->pending_lock);
+ list_for_each_entry_safe(fence, next,
+ &tlb_inval->pending_fences, link) {
+ s64 since_inval_ms = ktime_ms_delta(ktime_get(),
+ fence->inval_time);
+
+ if (msecs_to_jiffies(since_inval_ms) < timeout_delay)
+ break;
+
+ trace_xe_tlb_inval_fence_timeout(xe, fence);
+ drm_err(&xe->drm,
+ "TLB invalidation fence timeout, seqno=%d recv=%d",
+ fence->seqno, tlb_inval->seqno_recv);
+
+ fence->base.error = -ETIME;
+ xe_tlb_inval_fence_signal(fence);
+ }
+ if (!list_empty(&tlb_inval->pending_fences))
+ queue_delayed_work(system_wq, &tlb_inval->fence_tdr,
+ timeout_delay);
+ spin_unlock_irq(&tlb_inval->pending_lock);
+}
+
+/**
+ * tlb_inval_fini - Clean up TLB invalidation state
+ * @drm: @drm_device
+ * @arg: pointer to struct @xe_tlb_inval
+ *
+ * Cancel pending fence workers and clean up any additional
+ * TLB invalidation state.
+ */
+static void tlb_inval_fini(struct drm_device *drm, void *arg)
+{
+ struct xe_tlb_inval *tlb_inval = arg;
+
+ xe_tlb_inval_reset(tlb_inval);
+}
+
+/**
+ * xe_gt_tlb_inval_init - Initialize TLB invalidation state
+ * @gt: GT structure
+ *
+ * Initialize TLB invalidation state, purely software initialization, should
+ * be called once during driver load.
+ *
+ * Return: 0 on success, negative error code on error.
+ */
+int xe_gt_tlb_inval_init_early(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ struct xe_tlb_inval *tlb_inval = &gt->tlb_inval;
+ int err;
+
+ tlb_inval->xe = xe;
+ tlb_inval->seqno = 1;
+ INIT_LIST_HEAD(&tlb_inval->pending_fences);
+ spin_lock_init(&tlb_inval->pending_lock);
+ spin_lock_init(&tlb_inval->lock);
+ INIT_DELAYED_WORK(&tlb_inval->fence_tdr, xe_tlb_inval_fence_timeout);
+
+ err = drmm_mutex_init(&xe->drm, &tlb_inval->seqno_lock);
+ if (err)
+ return err;
+
+ tlb_inval->job_wq = drmm_alloc_ordered_workqueue(&xe->drm,
+ "gt-tbl-inval-job-wq",
+ WQ_MEM_RECLAIM);
+ if (IS_ERR(tlb_inval->job_wq))
+ return PTR_ERR(tlb_inval->job_wq);
+
+ /* XXX: Blindly setting up backend to GuC */
+ xe_guc_tlb_inval_init_early(&gt->uc.guc, tlb_inval);
+
+ return drmm_add_action_or_reset(&xe->drm, tlb_inval_fini, tlb_inval);
+}
+
+/**
+ * xe_tlb_inval_reset() - TLB invalidation reset
+ * @tlb_inval: TLB invalidation client
+ *
+ * Signal any pending invalidation fences, should be called during a GT reset
+ */
+void xe_tlb_inval_reset(struct xe_tlb_inval *tlb_inval)
+{
+ struct xe_tlb_inval_fence *fence, *next;
+ int pending_seqno;
+
+ /*
+ * we can get here before the backends are even initialized if we're
+ * wedging very early, in which case there are not going to be any
+ * pendind fences so we can bail immediately.
+ */
+ if (!tlb_inval->ops->initialized(tlb_inval))
+ return;
+
+ /*
+ * Backend is already disabled at this point. No new TLB requests can
+ * appear.
+ */
+
+ mutex_lock(&tlb_inval->seqno_lock);
+ spin_lock_irq(&tlb_inval->pending_lock);
+ cancel_delayed_work(&tlb_inval->fence_tdr);
+ /*
+ * We might have various kworkers waiting for TLB flushes to complete
+ * which are not tracked with an explicit TLB fence, however at this
+ * stage that will never happen since the backend is already disabled,
+ * so make sure we signal them here under the assumption that we have
+ * completed a full GT reset.
+ */
+ if (tlb_inval->seqno == 1)
+ pending_seqno = TLB_INVALIDATION_SEQNO_MAX - 1;
+ else
+ pending_seqno = tlb_inval->seqno - 1;
+ WRITE_ONCE(tlb_inval->seqno_recv, pending_seqno);
+
+ list_for_each_entry_safe(fence, next,
+ &tlb_inval->pending_fences, link)
+ xe_tlb_inval_fence_signal(fence);
+ spin_unlock_irq(&tlb_inval->pending_lock);
+ mutex_unlock(&tlb_inval->seqno_lock);
+}
+
+static bool xe_tlb_inval_seqno_past(struct xe_tlb_inval *tlb_inval, int seqno)
+{
+ int seqno_recv = READ_ONCE(tlb_inval->seqno_recv);
+
+ lockdep_assert_held(&tlb_inval->pending_lock);
+
+ if (seqno - seqno_recv < -(TLB_INVALIDATION_SEQNO_MAX / 2))
+ return false;
+
+ if (seqno - seqno_recv > (TLB_INVALIDATION_SEQNO_MAX / 2))
+ return true;
+
+ return seqno_recv >= seqno;
+}
+
+static void xe_tlb_inval_fence_prep(struct xe_tlb_inval_fence *fence)
+{
+ struct xe_tlb_inval *tlb_inval = fence->tlb_inval;
+
+ fence->seqno = tlb_inval->seqno;
+ trace_xe_tlb_inval_fence_send(tlb_inval->xe, fence);
+
+ spin_lock_irq(&tlb_inval->pending_lock);
+ fence->inval_time = ktime_get();
+ list_add_tail(&fence->link, &tlb_inval->pending_fences);
+
+ if (list_is_singular(&tlb_inval->pending_fences))
+ queue_delayed_work(system_wq, &tlb_inval->fence_tdr,
+ tlb_inval->ops->timeout_delay(tlb_inval));
+ spin_unlock_irq(&tlb_inval->pending_lock);
+
+ tlb_inval->seqno = (tlb_inval->seqno + 1) %
+ TLB_INVALIDATION_SEQNO_MAX;
+ if (!tlb_inval->seqno)
+ tlb_inval->seqno = 1;
+}
+
+#define xe_tlb_inval_issue(__tlb_inval, __fence, op, args...) \
+({ \
+ int __ret; \
+ \
+ xe_assert((__tlb_inval)->xe, (__tlb_inval)->ops); \
+ xe_assert((__tlb_inval)->xe, (__fence)); \
+ \
+ mutex_lock(&(__tlb_inval)->seqno_lock); \
+ xe_tlb_inval_fence_prep((__fence)); \
+ __ret = op((__tlb_inval), (__fence)->seqno, ##args); \
+ if (__ret < 0) \
+ xe_tlb_inval_fence_signal_unlocked((__fence)); \
+ mutex_unlock(&(__tlb_inval)->seqno_lock); \
+ \
+ __ret == -ECANCELED ? 0 : __ret; \
+})
+
+/**
+ * xe_tlb_inval_all() - Issue a TLB invalidation for all TLBs
+ * @tlb_inval: TLB invalidation client
+ * @fence: invalidation fence which will be signal on TLB invalidation
+ * completion
+ *
+ * Issue a TLB invalidation for all TLBs. Completion of TLB is asynchronous and
+ * caller can use the invalidation fence to wait for completion.
+ *
+ * Return: 0 on success, negative error code on error
+ */
+int xe_tlb_inval_all(struct xe_tlb_inval *tlb_inval,
+ struct xe_tlb_inval_fence *fence)
+{
+ return xe_tlb_inval_issue(tlb_inval, fence, tlb_inval->ops->all);
+}
+
+/**
+ * xe_tlb_inval_ggtt() - Issue a TLB invalidation for the GGTT
+ * @tlb_inval: TLB invalidation client
+ *
+ * Issue a TLB invalidation for the GGTT. Completion of TLB is asynchronous and
+ * caller can use the invalidation fence to wait for completion.
+ *
+ * Return: 0 on success, negative error code on error
+ */
+int xe_tlb_inval_ggtt(struct xe_tlb_inval *tlb_inval)
+{
+ struct xe_tlb_inval_fence fence, *fence_ptr = &fence;
+ int ret;
+
+ xe_tlb_inval_fence_init(tlb_inval, fence_ptr, true);
+ ret = xe_tlb_inval_issue(tlb_inval, fence_ptr, tlb_inval->ops->ggtt);
+ xe_tlb_inval_fence_wait(fence_ptr);
+
+ return ret;
+}
+
+/**
+ * xe_tlb_inval_range() - Issue a TLB invalidation for an address range
+ * @tlb_inval: TLB invalidation client
+ * @fence: invalidation fence which will be signal on TLB invalidation
+ * completion
+ * @start: start address
+ * @end: end address
+ * @asid: address space id
+ *
+ * Issue a range based TLB invalidation if supported, if not fallback to a full
+ * TLB invalidation. Completion of TLB is asynchronous and caller can use
+ * the invalidation fence to wait for completion.
+ *
+ * Return: Negative error code on error, 0 on success
+ */
+int xe_tlb_inval_range(struct xe_tlb_inval *tlb_inval,
+ struct xe_tlb_inval_fence *fence, u64 start, u64 end,
+ u32 asid)
+{
+ return xe_tlb_inval_issue(tlb_inval, fence, tlb_inval->ops->ppgtt,
+ start, end, asid);
+}
+
+/**
+ * xe_tlb_inval_vm() - Issue a TLB invalidation for a VM
+ * @tlb_inval: TLB invalidation client
+ * @vm: VM to invalidate
+ *
+ * Invalidate entire VM's address space
+ */
+void xe_tlb_inval_vm(struct xe_tlb_inval *tlb_inval, struct xe_vm *vm)
+{
+ struct xe_tlb_inval_fence fence;
+ u64 range = 1ull << vm->xe->info.va_bits;
+
+ xe_tlb_inval_fence_init(tlb_inval, &fence, true);
+ xe_tlb_inval_range(tlb_inval, &fence, 0, range, vm->usm.asid);
+ xe_tlb_inval_fence_wait(&fence);
+}
+
+/**
+ * xe_tlb_inval_done_handler() - TLB invalidation done handler
+ * @tlb_inval: TLB invalidation client
+ * @seqno: seqno of invalidation that is done
+ *
+ * Update recv seqno, signal any TLB invalidation fences, and restart TDR
+ */
+void xe_tlb_inval_done_handler(struct xe_tlb_inval *tlb_inval, int seqno)
+{
+ struct xe_device *xe = tlb_inval->xe;
+ struct xe_tlb_inval_fence *fence, *next;
+ unsigned long flags;
+
+ /*
+ * This can also be run both directly from the IRQ handler and also in
+ * process_g2h_msg(). Only one may process any individual CT message,
+ * however the order they are processed here could result in skipping a
+ * seqno. To handle that we just process all the seqnos from the last
+ * seqno_recv up to and including the one in msg[0]. The delta should be
+ * very small so there shouldn't be much of pending_fences we actually
+ * need to iterate over here.
+ *
+ * From GuC POV we expect the seqnos to always appear in-order, so if we
+ * see something later in the timeline we can be sure that anything
+ * appearing earlier has already signalled, just that we have yet to
+ * officially process the CT message like if racing against
+ * process_g2h_msg().
+ */
+ spin_lock_irqsave(&tlb_inval->pending_lock, flags);
+ if (xe_tlb_inval_seqno_past(tlb_inval, seqno)) {
+ spin_unlock_irqrestore(&tlb_inval->pending_lock, flags);
+ return;
+ }
+
+ WRITE_ONCE(tlb_inval->seqno_recv, seqno);
+
+ list_for_each_entry_safe(fence, next,
+ &tlb_inval->pending_fences, link) {
+ trace_xe_tlb_inval_fence_recv(xe, fence);
+
+ if (!xe_tlb_inval_seqno_past(tlb_inval, fence->seqno))
+ break;
+
+ xe_tlb_inval_fence_signal(fence);
+ }
+
+ if (!list_empty(&tlb_inval->pending_fences))
+ mod_delayed_work(system_wq,
+ &tlb_inval->fence_tdr,
+ tlb_inval->ops->timeout_delay(tlb_inval));
+ else
+ cancel_delayed_work(&tlb_inval->fence_tdr);
+
+ spin_unlock_irqrestore(&tlb_inval->pending_lock, flags);
+}
+
+static const char *
+xe_inval_fence_get_driver_name(struct dma_fence *dma_fence)
+{
+ return "xe";
+}
+
+static const char *
+xe_inval_fence_get_timeline_name(struct dma_fence *dma_fence)
+{
+ return "tlb_inval_fence";
+}
+
+static const struct dma_fence_ops inval_fence_ops = {
+ .get_driver_name = xe_inval_fence_get_driver_name,
+ .get_timeline_name = xe_inval_fence_get_timeline_name,
+};
+
+/**
+ * xe_tlb_inval_fence_init() - Initialize TLB invalidation fence
+ * @tlb_inval: TLB invalidation client
+ * @fence: TLB invalidation fence to initialize
+ * @stack: fence is stack variable
+ *
+ * Initialize TLB invalidation fence for use. xe_tlb_inval_fence_fini
+ * will be automatically called when fence is signalled (all fences must signal),
+ * even on error.
+ */
+void xe_tlb_inval_fence_init(struct xe_tlb_inval *tlb_inval,
+ struct xe_tlb_inval_fence *fence,
+ bool stack)
+{
+ xe_pm_runtime_get_noresume(tlb_inval->xe);
+
+ spin_lock_irq(&tlb_inval->lock);
+ dma_fence_init(&fence->base, &inval_fence_ops, &tlb_inval->lock,
+ dma_fence_context_alloc(1), 1);
+ spin_unlock_irq(&tlb_inval->lock);
+ INIT_LIST_HEAD(&fence->link);
+ if (stack)
+ set_bit(FENCE_STACK_BIT, &fence->base.flags);
+ else
+ dma_fence_get(&fence->base);
+ fence->tlb_inval = tlb_inval;
+}
diff --git a/drivers/gpu/drm/xe/xe_tlb_inval.h b/drivers/gpu/drm/xe/xe_tlb_inval.h
new file mode 100644
index 000000000000..554634dfd4e2
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_tlb_inval.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_TLB_INVAL_H_
+#define _XE_TLB_INVAL_H_
+
+#include <linux/types.h>
+
+#include "xe_tlb_inval_types.h"
+
+struct xe_gt;
+struct xe_guc;
+struct xe_vm;
+
+int xe_gt_tlb_inval_init_early(struct xe_gt *gt);
+
+void xe_tlb_inval_reset(struct xe_tlb_inval *tlb_inval);
+int xe_tlb_inval_all(struct xe_tlb_inval *tlb_inval,
+ struct xe_tlb_inval_fence *fence);
+int xe_tlb_inval_ggtt(struct xe_tlb_inval *tlb_inval);
+void xe_tlb_inval_vm(struct xe_tlb_inval *tlb_inval, struct xe_vm *vm);
+int xe_tlb_inval_range(struct xe_tlb_inval *tlb_inval,
+ struct xe_tlb_inval_fence *fence,
+ u64 start, u64 end, u32 asid);
+
+void xe_tlb_inval_fence_init(struct xe_tlb_inval *tlb_inval,
+ struct xe_tlb_inval_fence *fence,
+ bool stack);
+
+/**
+ * xe_tlb_inval_fence_wait() - TLB invalidiation fence wait
+ * @fence: TLB invalidation fence to wait on
+ *
+ * Wait on a TLB invalidiation fence until it signals, non interruptable
+ */
+static inline void
+xe_tlb_inval_fence_wait(struct xe_tlb_inval_fence *fence)
+{
+ dma_fence_wait(&fence->base, false);
+}
+
+void xe_tlb_inval_done_handler(struct xe_tlb_inval *tlb_inval, int seqno);
+
+#endif /* _XE_TLB_INVAL_ */
diff --git a/drivers/gpu/drm/xe/xe_tlb_inval_job.c b/drivers/gpu/drm/xe/xe_tlb_inval_job.c
new file mode 100644
index 000000000000..492def04a559
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_tlb_inval_job.c
@@ -0,0 +1,268 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include "xe_assert.h"
+#include "xe_dep_job_types.h"
+#include "xe_dep_scheduler.h"
+#include "xe_exec_queue.h"
+#include "xe_gt_types.h"
+#include "xe_tlb_inval.h"
+#include "xe_tlb_inval_job.h"
+#include "xe_migrate.h"
+#include "xe_pm.h"
+
+/** struct xe_tlb_inval_job - TLB invalidation job */
+struct xe_tlb_inval_job {
+ /** @dep: base generic dependency Xe job */
+ struct xe_dep_job dep;
+ /** @tlb_inval: TLB invalidation client */
+ struct xe_tlb_inval *tlb_inval;
+ /** @q: exec queue issuing the invalidate */
+ struct xe_exec_queue *q;
+ /** @refcount: ref count of this job */
+ struct kref refcount;
+ /**
+ * @fence: dma fence to indicate completion. 1 way relationship - job
+ * can safely reference fence, fence cannot safely reference job.
+ */
+ struct dma_fence *fence;
+ /** @start: Start address to invalidate */
+ u64 start;
+ /** @end: End address to invalidate */
+ u64 end;
+ /** @asid: Address space ID to invalidate */
+ u32 asid;
+ /** @fence_armed: Fence has been armed */
+ bool fence_armed;
+};
+
+static struct dma_fence *xe_tlb_inval_job_run(struct xe_dep_job *dep_job)
+{
+ struct xe_tlb_inval_job *job =
+ container_of(dep_job, typeof(*job), dep);
+ struct xe_tlb_inval_fence *ifence =
+ container_of(job->fence, typeof(*ifence), base);
+
+ xe_tlb_inval_range(job->tlb_inval, ifence, job->start,
+ job->end, job->asid);
+
+ return job->fence;
+}
+
+static void xe_tlb_inval_job_free(struct xe_dep_job *dep_job)
+{
+ struct xe_tlb_inval_job *job =
+ container_of(dep_job, typeof(*job), dep);
+
+ /* Pairs with get in xe_tlb_inval_job_push */
+ xe_tlb_inval_job_put(job);
+}
+
+static const struct xe_dep_job_ops dep_job_ops = {
+ .run_job = xe_tlb_inval_job_run,
+ .free_job = xe_tlb_inval_job_free,
+};
+
+/**
+ * xe_tlb_inval_job_create() - TLB invalidation job create
+ * @q: exec queue issuing the invalidate
+ * @tlb_inval: TLB invalidation client
+ * @dep_scheduler: Dependency scheduler for job
+ * @start: Start address to invalidate
+ * @end: End address to invalidate
+ * @asid: Address space ID to invalidate
+ *
+ * Create a TLB invalidation job and initialize internal fields. The caller is
+ * responsible for releasing the creation reference.
+ *
+ * Return: TLB invalidation job object on success, ERR_PTR failure
+ */
+struct xe_tlb_inval_job *
+xe_tlb_inval_job_create(struct xe_exec_queue *q, struct xe_tlb_inval *tlb_inval,
+ struct xe_dep_scheduler *dep_scheduler, u64 start,
+ u64 end, u32 asid)
+{
+ struct xe_tlb_inval_job *job;
+ struct drm_sched_entity *entity =
+ xe_dep_scheduler_entity(dep_scheduler);
+ struct xe_tlb_inval_fence *ifence;
+ int err;
+
+ job = kmalloc(sizeof(*job), GFP_KERNEL);
+ if (!job)
+ return ERR_PTR(-ENOMEM);
+
+ job->q = q;
+ job->tlb_inval = tlb_inval;
+ job->start = start;
+ job->end = end;
+ job->asid = asid;
+ job->fence_armed = false;
+ job->dep.ops = &dep_job_ops;
+ kref_init(&job->refcount);
+ xe_exec_queue_get(q); /* Pairs with put in xe_tlb_inval_job_destroy */
+
+ ifence = kmalloc(sizeof(*ifence), GFP_KERNEL);
+ if (!ifence) {
+ err = -ENOMEM;
+ goto err_job;
+ }
+ job->fence = &ifence->base;
+
+ err = drm_sched_job_init(&job->dep.drm, entity, 1, NULL,
+ q->xef ? q->xef->drm->client_id : 0);
+ if (err)
+ goto err_fence;
+
+ /* Pairs with put in xe_tlb_inval_job_destroy */
+ xe_pm_runtime_get_noresume(gt_to_xe(q->gt));
+
+ return job;
+
+err_fence:
+ kfree(ifence);
+err_job:
+ xe_exec_queue_put(q);
+ kfree(job);
+
+ return ERR_PTR(err);
+}
+
+static void xe_tlb_inval_job_destroy(struct kref *ref)
+{
+ struct xe_tlb_inval_job *job = container_of(ref, typeof(*job),
+ refcount);
+ struct xe_tlb_inval_fence *ifence =
+ container_of(job->fence, typeof(*ifence), base);
+ struct xe_exec_queue *q = job->q;
+ struct xe_device *xe = gt_to_xe(q->gt);
+
+ if (!job->fence_armed)
+ kfree(ifence);
+ else
+ /* Ref from xe_tlb_inval_fence_init */
+ dma_fence_put(job->fence);
+
+ drm_sched_job_cleanup(&job->dep.drm);
+ kfree(job);
+ xe_exec_queue_put(q); /* Pairs with get from xe_tlb_inval_job_create */
+ xe_pm_runtime_put(xe); /* Pairs with get from xe_tlb_inval_job_create */
+}
+
+/**
+ * xe_tlb_inval_alloc_dep() - TLB invalidation job alloc dependency
+ * @job: TLB invalidation job to alloc dependency for
+ *
+ * Allocate storage for a dependency in the TLB invalidation fence. This
+ * function should be called at most once per job and must be paired with
+ * xe_tlb_inval_job_push being called with a real fence.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int xe_tlb_inval_job_alloc_dep(struct xe_tlb_inval_job *job)
+{
+ xe_assert(gt_to_xe(job->q->gt), !xa_load(&job->dep.drm.dependencies, 0));
+ might_alloc(GFP_KERNEL);
+
+ return drm_sched_job_add_dependency(&job->dep.drm,
+ dma_fence_get_stub());
+}
+
+/**
+ * xe_tlb_inval_job_push() - TLB invalidation job push
+ * @job: TLB invalidation job to push
+ * @m: The migration object being used
+ * @fence: Dependency for TLB invalidation job
+ *
+ * Pushes a TLB invalidation job for execution, using @fence as a dependency.
+ * Storage for @fence must be preallocated with xe_tlb_inval_job_alloc_dep
+ * prior to this call if @fence is not signaled. Takes a reference to the job’s
+ * finished fence, which the caller is responsible for releasing, and return it
+ * to the caller. This function is safe to be called in the path of reclaim.
+ *
+ * Return: Job's finished fence on success, cannot fail
+ */
+struct dma_fence *xe_tlb_inval_job_push(struct xe_tlb_inval_job *job,
+ struct xe_migrate *m,
+ struct dma_fence *fence)
+{
+ struct xe_tlb_inval_fence *ifence =
+ container_of(job->fence, typeof(*ifence), base);
+
+ if (!dma_fence_is_signaled(fence)) {
+ void *ptr;
+
+ /*
+ * Can be in path of reclaim, hence the preallocation of fence
+ * storage in xe_tlb_inval_job_alloc_dep. Verify caller did
+ * this correctly.
+ */
+ xe_assert(gt_to_xe(job->q->gt),
+ xa_load(&job->dep.drm.dependencies, 0) ==
+ dma_fence_get_stub());
+
+ dma_fence_get(fence); /* ref released once dependency processed by scheduler */
+ ptr = xa_store(&job->dep.drm.dependencies, 0, fence,
+ GFP_ATOMIC);
+ xe_assert(gt_to_xe(job->q->gt), !xa_is_err(ptr));
+ }
+
+ xe_tlb_inval_job_get(job); /* Pairs with put in free_job */
+ job->fence_armed = true;
+
+ /*
+ * We need the migration lock to protect the job's seqno and the spsc
+ * queue, only taken on migration queue, user queues protected dma-resv
+ * VM lock.
+ */
+ xe_migrate_job_lock(m, job->q);
+
+ /* Creation ref pairs with put in xe_tlb_inval_job_destroy */
+ xe_tlb_inval_fence_init(job->tlb_inval, ifence, false);
+ dma_fence_get(job->fence); /* Pairs with put in DRM scheduler */
+
+ drm_sched_job_arm(&job->dep.drm);
+ /*
+ * caller ref, get must be done before job push as it could immediately
+ * signal and free.
+ */
+ dma_fence_get(&job->dep.drm.s_fence->finished);
+ drm_sched_entity_push_job(&job->dep.drm);
+
+ xe_migrate_job_unlock(m, job->q);
+
+ /*
+ * Not using job->fence, as it has its own dma-fence context, which does
+ * not allow TLB invalidation fences on the same queue, GT tuple to
+ * be squashed in dma-resv/DRM scheduler. Instead, we use the DRM scheduler
+ * context and job's finished fence, which enables squashing.
+ */
+ return &job->dep.drm.s_fence->finished;
+}
+
+/**
+ * xe_tlb_inval_job_get() - Get a reference to TLB invalidation job
+ * @job: TLB invalidation job object
+ *
+ * Increment the TLB invalidation job's reference count
+ */
+void xe_tlb_inval_job_get(struct xe_tlb_inval_job *job)
+{
+ kref_get(&job->refcount);
+}
+
+/**
+ * xe_tlb_inval_job_put() - Put a reference to TLB invalidation job
+ * @job: TLB invalidation job object
+ *
+ * Decrement the TLB invalidation job's reference count, call
+ * xe_tlb_inval_job_destroy when reference count == 0. Skips decrement if
+ * input @job is NULL or IS_ERR.
+ */
+void xe_tlb_inval_job_put(struct xe_tlb_inval_job *job)
+{
+ if (!IS_ERR_OR_NULL(job))
+ kref_put(&job->refcount, xe_tlb_inval_job_destroy);
+}
diff --git a/drivers/gpu/drm/xe/xe_tlb_inval_job.h b/drivers/gpu/drm/xe/xe_tlb_inval_job.h
new file mode 100644
index 000000000000..e63edcb26b50
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_tlb_inval_job.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_TLB_INVAL_JOB_H_
+#define _XE_TLB_INVAL_JOB_H_
+
+#include <linux/types.h>
+
+struct dma_fence;
+struct xe_dep_scheduler;
+struct xe_exec_queue;
+struct xe_tlb_inval;
+struct xe_tlb_inval_job;
+struct xe_migrate;
+
+struct xe_tlb_inval_job *
+xe_tlb_inval_job_create(struct xe_exec_queue *q, struct xe_tlb_inval *tlb_inval,
+ struct xe_dep_scheduler *dep_scheduler,
+ u64 start, u64 end, u32 asid);
+
+int xe_tlb_inval_job_alloc_dep(struct xe_tlb_inval_job *job);
+
+struct dma_fence *xe_tlb_inval_job_push(struct xe_tlb_inval_job *job,
+ struct xe_migrate *m,
+ struct dma_fence *fence);
+
+void xe_tlb_inval_job_get(struct xe_tlb_inval_job *job);
+
+void xe_tlb_inval_job_put(struct xe_tlb_inval_job *job);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_tlb_inval_types.h b/drivers/gpu/drm/xe/xe_tlb_inval_types.h
new file mode 100644
index 000000000000..8f8b060e9005
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_tlb_inval_types.h
@@ -0,0 +1,130 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_TLB_INVAL_TYPES_H_
+#define _XE_TLB_INVAL_TYPES_H_
+
+#include <linux/workqueue.h>
+#include <linux/dma-fence.h>
+
+struct xe_tlb_inval;
+
+/** struct xe_tlb_inval_ops - TLB invalidation ops (backend) */
+struct xe_tlb_inval_ops {
+ /**
+ * @all: Invalidate all TLBs
+ * @tlb_inval: TLB invalidation client
+ * @seqno: Seqno of TLB invalidation
+ *
+ * Return 0 on success, -ECANCELED if backend is mid-reset, error on
+ * failure
+ */
+ int (*all)(struct xe_tlb_inval *tlb_inval, u32 seqno);
+
+ /**
+ * @ggtt: Invalidate global translation TLBs
+ * @tlb_inval: TLB invalidation client
+ * @seqno: Seqno of TLB invalidation
+ *
+ * Return 0 on success, -ECANCELED if backend is mid-reset, error on
+ * failure
+ */
+ int (*ggtt)(struct xe_tlb_inval *tlb_inval, u32 seqno);
+
+ /**
+ * @ppgtt: Invalidate per-process translation TLBs
+ * @tlb_inval: TLB invalidation client
+ * @seqno: Seqno of TLB invalidation
+ * @start: Start address
+ * @end: End address
+ * @asid: Address space ID
+ *
+ * Return 0 on success, -ECANCELED if backend is mid-reset, error on
+ * failure
+ */
+ int (*ppgtt)(struct xe_tlb_inval *tlb_inval, u32 seqno, u64 start,
+ u64 end, u32 asid);
+
+ /**
+ * @initialized: Backend is initialized
+ * @tlb_inval: TLB invalidation client
+ *
+ * Return: True if back is initialized, False otherwise
+ */
+ bool (*initialized)(struct xe_tlb_inval *tlb_inval);
+
+ /**
+ * @flush: Flush pending TLB invalidations
+ * @tlb_inval: TLB invalidation client
+ */
+ void (*flush)(struct xe_tlb_inval *tlb_inval);
+
+ /**
+ * @timeout_delay: Timeout delay for TLB invalidation
+ * @tlb_inval: TLB invalidation client
+ *
+ * Return: Timeout delay for TLB invalidation in jiffies
+ */
+ long (*timeout_delay)(struct xe_tlb_inval *tlb_inval);
+};
+
+/** struct xe_tlb_inval - TLB invalidation client (frontend) */
+struct xe_tlb_inval {
+ /** @private: Backend private pointer */
+ void *private;
+ /** @xe: Pointer to Xe device */
+ struct xe_device *xe;
+ /** @ops: TLB invalidation ops */
+ const struct xe_tlb_inval_ops *ops;
+ /** @tlb_inval.seqno: TLB invalidation seqno, protected by CT lock */
+#define TLB_INVALIDATION_SEQNO_MAX 0x100000
+ int seqno;
+ /** @tlb_invalidation.seqno_lock: protects @tlb_invalidation.seqno */
+ struct mutex seqno_lock;
+ /**
+ * @seqno_recv: last received TLB invalidation seqno, protected by
+ * CT lock
+ */
+ int seqno_recv;
+ /**
+ * @pending_fences: list of pending fences waiting TLB invaliations,
+ * protected CT lock
+ */
+ struct list_head pending_fences;
+ /**
+ * @pending_lock: protects @pending_fences and updating @seqno_recv.
+ */
+ spinlock_t pending_lock;
+ /**
+ * @fence_tdr: schedules a delayed call to xe_tlb_fence_timeout after
+ * the timeout interval is over.
+ */
+ struct delayed_work fence_tdr;
+ /** @job_wq: schedules TLB invalidation jobs */
+ struct workqueue_struct *job_wq;
+ /** @tlb_inval.lock: protects TLB invalidation fences */
+ spinlock_t lock;
+};
+
+/**
+ * struct xe_tlb_inval_fence - TLB invalidation fence
+ *
+ * Optionally passed to xe_tlb_inval* functions and will be signaled upon TLB
+ * invalidation completion.
+ */
+struct xe_tlb_inval_fence {
+ /** @base: dma fence base */
+ struct dma_fence base;
+ /** @tlb_inval: TLB invalidation client which fence belong to */
+ struct xe_tlb_inval *tlb_inval;
+ /** @link: link into list of pending tlb fences */
+ struct list_head link;
+ /** @seqno: seqno of TLB invalidation to signal fence one */
+ int seqno;
+ /** @inval_time: time of TLB invalidation */
+ ktime_t inval_time;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_trace.h b/drivers/gpu/drm/xe/xe_trace.h
index b4a3577df70c..314f42fcbcbd 100644
--- a/drivers/gpu/drm/xe/xe_trace.h
+++ b/drivers/gpu/drm/xe/xe_trace.h
@@ -14,10 +14,10 @@
#include "xe_exec_queue_types.h"
#include "xe_gpu_scheduler_types.h"
-#include "xe_gt_tlb_invalidation_types.h"
#include "xe_gt_types.h"
#include "xe_guc_exec_queue_types.h"
#include "xe_sched_job.h"
+#include "xe_tlb_inval_types.h"
#include "xe_vm.h"
#define __dev_name_xe(xe) dev_name((xe)->drm.dev)
@@ -25,13 +25,13 @@
#define __dev_name_gt(gt) __dev_name_xe(gt_to_xe((gt)))
#define __dev_name_eq(q) __dev_name_gt((q)->gt)
-DECLARE_EVENT_CLASS(xe_gt_tlb_invalidation_fence,
- TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence),
+DECLARE_EVENT_CLASS(xe_tlb_inval_fence,
+ TP_PROTO(struct xe_device *xe, struct xe_tlb_inval_fence *fence),
TP_ARGS(xe, fence),
TP_STRUCT__entry(
__string(dev, __dev_name_xe(xe))
- __field(struct xe_gt_tlb_invalidation_fence *, fence)
+ __field(struct xe_tlb_inval_fence *, fence)
__field(int, seqno)
),
@@ -45,39 +45,23 @@ DECLARE_EVENT_CLASS(xe_gt_tlb_invalidation_fence,
__get_str(dev), __entry->fence, __entry->seqno)
);
-DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_create,
- TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence),
+DEFINE_EVENT(xe_tlb_inval_fence, xe_tlb_inval_fence_send,
+ TP_PROTO(struct xe_device *xe, struct xe_tlb_inval_fence *fence),
TP_ARGS(xe, fence)
);
-DEFINE_EVENT(xe_gt_tlb_invalidation_fence,
- xe_gt_tlb_invalidation_fence_work_func,
- TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence),
+DEFINE_EVENT(xe_tlb_inval_fence, xe_tlb_inval_fence_recv,
+ TP_PROTO(struct xe_device *xe, struct xe_tlb_inval_fence *fence),
TP_ARGS(xe, fence)
);
-DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_cb,
- TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence),
+DEFINE_EVENT(xe_tlb_inval_fence, xe_tlb_inval_fence_signal,
+ TP_PROTO(struct xe_device *xe, struct xe_tlb_inval_fence *fence),
TP_ARGS(xe, fence)
);
-DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_send,
- TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence),
- TP_ARGS(xe, fence)
-);
-
-DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_recv,
- TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence),
- TP_ARGS(xe, fence)
-);
-
-DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_signal,
- TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence),
- TP_ARGS(xe, fence)
-);
-
-DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_timeout,
- TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence),
+DEFINE_EVENT(xe_tlb_inval_fence, xe_tlb_inval_fence_timeout,
+ TP_PROTO(struct xe_device *xe, struct xe_tlb_inval_fence *fence),
TP_ARGS(xe, fence)
);
diff --git a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
index d9c9d2547aad..dc588255674d 100644
--- a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
+++ b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
@@ -25,6 +25,7 @@
#include "xe_ttm_stolen_mgr.h"
#include "xe_ttm_vram_mgr.h"
#include "xe_wa.h"
+#include "xe_vram.h"
struct xe_ttm_stolen_mgr {
struct xe_ttm_vram_mgr base;
@@ -82,15 +83,16 @@ static u32 get_wopcm_size(struct xe_device *xe)
static s64 detect_bar2_dgfx(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr)
{
- struct xe_tile *tile = xe_device_get_root_tile(xe);
+ struct xe_vram_region *tile_vram = xe_device_get_root_tile(xe)->mem.vram;
+ resource_size_t tile_io_start = xe_vram_region_io_start(tile_vram);
struct xe_mmio *mmio = xe_root_tile_mmio(xe);
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
u64 stolen_size, wopcm_size;
u64 tile_offset;
u64 tile_size;
- tile_offset = tile->mem.vram.io_start - xe->mem.vram.io_start;
- tile_size = tile->mem.vram.actual_physical_size;
+ tile_offset = tile_io_start - xe_vram_region_io_start(xe->mem.vram);
+ tile_size = xe_vram_region_actual_physical_size(tile_vram);
/* Use DSM base address instead for stolen memory */
mgr->stolen_base = (xe_mmio_read64_2x32(mmio, DSMBASE) & BDSM_MASK) - tile_offset;
@@ -107,7 +109,7 @@ static s64 detect_bar2_dgfx(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr)
/* Verify usage fits in the actual resource available */
if (mgr->stolen_base + stolen_size <= pci_resource_len(pdev, LMEM_BAR))
- mgr->io_base = tile->mem.vram.io_start + mgr->stolen_base;
+ mgr->io_base = tile_io_start + mgr->stolen_base;
/*
* There may be few KB of platform dependent reserved memory at the end
@@ -164,7 +166,7 @@ static u32 detect_bar2_integrated(struct xe_device *xe, struct xe_ttm_stolen_mgr
stolen_size -= wopcm_size;
- if (media_gt && XE_WA(media_gt, 14019821291)) {
+ if (media_gt && XE_GT_WA(media_gt, 14019821291)) {
u64 gscpsmi_base = xe_mmio_read64_2x32(&media_gt->mmio, GSCPSMI_BASE)
& ~GENMASK_ULL(5, 0);
diff --git a/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c b/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c
index 9e375a40aee9..9175b4a2214b 100644
--- a/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c
+++ b/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c
@@ -15,6 +15,7 @@
#include "xe_gt.h"
#include "xe_res_cursor.h"
#include "xe_ttm_vram_mgr.h"
+#include "xe_vram_types.h"
static inline struct drm_buddy_block *
xe_ttm_vram_mgr_first_block(struct list_head *list)
@@ -337,13 +338,20 @@ int __xe_ttm_vram_mgr_init(struct xe_device *xe, struct xe_ttm_vram_mgr *mgr,
return drmm_add_action_or_reset(&xe->drm, ttm_vram_mgr_fini, mgr);
}
-int xe_ttm_vram_mgr_init(struct xe_tile *tile, struct xe_ttm_vram_mgr *mgr)
+/**
+ * xe_ttm_vram_mgr_init - initialize TTM VRAM region
+ * @xe: pointer to Xe device
+ * @vram: pointer to xe_vram_region that contains the memory region attributes
+ *
+ * Initialize the Xe TTM for given @vram region using the given parameters.
+ *
+ * Returns 0 for success, negative error code otherwise.
+ */
+int xe_ttm_vram_mgr_init(struct xe_device *xe, struct xe_vram_region *vram)
{
- struct xe_device *xe = tile_to_xe(tile);
- struct xe_vram_region *vram = &tile->mem.vram;
-
- return __xe_ttm_vram_mgr_init(xe, mgr, XE_PL_VRAM0 + tile->id,
- vram->usable_size, vram->io_size,
+ return __xe_ttm_vram_mgr_init(xe, &vram->ttm, vram->placement,
+ xe_vram_region_usable_size(vram),
+ xe_vram_region_io_size(vram),
PAGE_SIZE);
}
@@ -392,7 +400,7 @@ int xe_ttm_vram_mgr_alloc_sgt(struct xe_device *xe,
*/
xe_res_first(res, offset, length, &cursor);
for_each_sgtable_sg((*sgt), sg, i) {
- phys_addr_t phys = cursor.start + tile->mem.vram.io_start;
+ phys_addr_t phys = cursor.start + xe_vram_region_io_start(tile->mem.vram);
size_t size = min_t(u64, cursor.size, SZ_2G);
dma_addr_t addr;
diff --git a/drivers/gpu/drm/xe/xe_ttm_vram_mgr.h b/drivers/gpu/drm/xe/xe_ttm_vram_mgr.h
index cc76050e376d..87b7fae5edba 100644
--- a/drivers/gpu/drm/xe/xe_ttm_vram_mgr.h
+++ b/drivers/gpu/drm/xe/xe_ttm_vram_mgr.h
@@ -11,11 +11,12 @@
enum dma_data_direction;
struct xe_device;
struct xe_tile;
+struct xe_vram_region;
int __xe_ttm_vram_mgr_init(struct xe_device *xe, struct xe_ttm_vram_mgr *mgr,
u32 mem_type, u64 size, u64 io_size,
u64 default_page_size);
-int xe_ttm_vram_mgr_init(struct xe_tile *tile, struct xe_ttm_vram_mgr *mgr);
+int xe_ttm_vram_mgr_init(struct xe_device *xe, struct xe_vram_region *vram);
int xe_ttm_vram_mgr_alloc_sgt(struct xe_device *xe,
struct ttm_resource *res,
u64 offset, u64 length,
diff --git a/drivers/gpu/drm/xe/xe_tuning.c b/drivers/gpu/drm/xe/xe_tuning.c
index 828b45b24c23..a524170a04d0 100644
--- a/drivers/gpu/drm/xe/xe_tuning.c
+++ b/drivers/gpu/drm/xe/xe_tuning.c
@@ -99,7 +99,7 @@ static const struct xe_rtp_entry_sr engine_tunings[] = {
XE_RTP_ACTIONS(SET(SAMPLER_MODE, INDIRECT_STATE_BASE_ADDR_OVERRIDE))
},
{ XE_RTP_NAME("Tuning: Disable NULL query for Anyhit Shader"),
- XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, XE_RTP_END_VERSION_UNDEFINED),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2000, XE_RTP_END_VERSION_UNDEFINED),
FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(RT_CTRL, DIS_NULL_QUERY))
},
diff --git a/drivers/gpu/drm/xe/xe_uc_fw.c b/drivers/gpu/drm/xe/xe_uc_fw.c
index 9bbdde604923..622b76078567 100644
--- a/drivers/gpu/drm/xe/xe_uc_fw.c
+++ b/drivers/gpu/drm/xe/xe_uc_fw.c
@@ -115,8 +115,8 @@ struct fw_blobs_by_type {
#define XE_GT_TYPE_ANY XE_GT_TYPE_UNINITIALIZED
#define XE_GUC_FIRMWARE_DEFS(fw_def, mmp_ver, major_ver) \
- fw_def(PANTHERLAKE, GT_TYPE_ANY, major_ver(xe, guc, ptl, 70, 47, 0)) \
- fw_def(BATTLEMAGE, GT_TYPE_ANY, major_ver(xe, guc, bmg, 70, 45, 2)) \
+ fw_def(PANTHERLAKE, GT_TYPE_ANY, major_ver(xe, guc, ptl, 70, 49, 4)) \
+ fw_def(BATTLEMAGE, GT_TYPE_ANY, major_ver(xe, guc, bmg, 70, 49, 4)) \
fw_def(LUNARLAKE, GT_TYPE_ANY, major_ver(xe, guc, lnl, 70, 45, 2)) \
fw_def(METEORLAKE, GT_TYPE_ANY, major_ver(i915, guc, mtl, 70, 44, 1)) \
fw_def(DG2, GT_TYPE_ANY, major_ver(i915, guc, dg2, 70, 45, 2)) \
@@ -328,7 +328,7 @@ static void uc_fw_fini(struct drm_device *drm, void *arg)
xe_uc_fw_change_status(uc_fw, XE_UC_FIRMWARE_SELECTED);
}
-static int guc_read_css_info(struct xe_uc_fw *uc_fw, struct uc_css_header *css)
+static int guc_read_css_info(struct xe_uc_fw *uc_fw, struct uc_css_guc_info *guc_info)
{
struct xe_gt *gt = uc_fw_to_gt(uc_fw);
struct xe_uc_fw_version *release = &uc_fw->versions.found[XE_UC_FW_VER_RELEASE];
@@ -343,11 +343,12 @@ static int guc_read_css_info(struct xe_uc_fw *uc_fw, struct uc_css_header *css)
return -EINVAL;
}
- compatibility->major = FIELD_GET(CSS_SW_VERSION_UC_MAJOR, css->submission_version);
- compatibility->minor = FIELD_GET(CSS_SW_VERSION_UC_MINOR, css->submission_version);
- compatibility->patch = FIELD_GET(CSS_SW_VERSION_UC_PATCH, css->submission_version);
+ compatibility->major = FIELD_GET(CSS_SW_VERSION_UC_MAJOR, guc_info->submission_version);
+ compatibility->minor = FIELD_GET(CSS_SW_VERSION_UC_MINOR, guc_info->submission_version);
+ compatibility->patch = FIELD_GET(CSS_SW_VERSION_UC_PATCH, guc_info->submission_version);
- uc_fw->private_data_size = css->private_data_size;
+ uc_fw->build_type = FIELD_GET(CSS_UKERNEL_INFO_BUILDTYPE, guc_info->ukernel_info);
+ uc_fw->private_data_size = guc_info->private_data_size;
return 0;
}
@@ -416,8 +417,8 @@ static int parse_css_header(struct xe_uc_fw *uc_fw, const void *fw_data, size_t
css = (struct uc_css_header *)fw_data;
/* Check integrity of size values inside CSS header */
- size = (css->header_size_dw - css->key_size_dw - css->modulus_size_dw -
- css->exponent_size_dw) * sizeof(u32);
+ size = (css->header_size_dw - css->rsa_info.key_size_dw - css->rsa_info.modulus_size_dw -
+ css->rsa_info.exponent_size_dw) * sizeof(u32);
if (unlikely(size != sizeof(struct uc_css_header))) {
drm_warn(&xe->drm,
"%s firmware %s: unexpected header size: %zu != %zu\n",
@@ -430,7 +431,7 @@ static int parse_css_header(struct xe_uc_fw *uc_fw, const void *fw_data, size_t
uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
/* now RSA */
- uc_fw->rsa_size = css->key_size_dw * sizeof(u32);
+ uc_fw->rsa_size = css->rsa_info.key_size_dw * sizeof(u32);
/* At least, it should have header, uCode and RSA. Size of all three. */
size = sizeof(struct uc_css_header) + uc_fw->ucode_size +
@@ -443,12 +444,12 @@ static int parse_css_header(struct xe_uc_fw *uc_fw, const void *fw_data, size_t
}
/* Get version numbers from the CSS header */
- release->major = FIELD_GET(CSS_SW_VERSION_UC_MAJOR, css->sw_version);
- release->minor = FIELD_GET(CSS_SW_VERSION_UC_MINOR, css->sw_version);
- release->patch = FIELD_GET(CSS_SW_VERSION_UC_PATCH, css->sw_version);
+ release->major = FIELD_GET(CSS_SW_VERSION_UC_MAJOR, css->guc_info.sw_version);
+ release->minor = FIELD_GET(CSS_SW_VERSION_UC_MINOR, css->guc_info.sw_version);
+ release->patch = FIELD_GET(CSS_SW_VERSION_UC_PATCH, css->guc_info.sw_version);
if (uc_fw->type == XE_UC_FW_TYPE_GUC)
- return guc_read_css_info(uc_fw, css);
+ return guc_read_css_info(uc_fw, &css->guc_info);
return 0;
}
diff --git a/drivers/gpu/drm/xe/xe_uc_fw_abi.h b/drivers/gpu/drm/xe/xe_uc_fw_abi.h
index 87ade41209d0..3c9a63d13032 100644
--- a/drivers/gpu/drm/xe/xe_uc_fw_abi.h
+++ b/drivers/gpu/drm/xe/xe_uc_fw_abi.h
@@ -44,6 +44,39 @@
* in fw. So driver will load a truncated firmware in this case.
*/
+struct uc_css_rsa_info {
+ u32 key_size_dw;
+ u32 modulus_size_dw;
+ u32 exponent_size_dw;
+} __packed;
+
+struct uc_css_guc_info {
+ u32 time;
+#define CSS_TIME_HOUR (0xFF << 0)
+#define CSS_TIME_MIN (0xFF << 8)
+#define CSS_TIME_SEC (0xFFFF << 16)
+ u32 reserved0[5];
+ u32 sw_version;
+#define CSS_SW_VERSION_UC_MAJOR (0xFF << 16)
+#define CSS_SW_VERSION_UC_MINOR (0xFF << 8)
+#define CSS_SW_VERSION_UC_PATCH (0xFF << 0)
+ u32 submission_version;
+ u32 reserved1[11];
+ u32 header_info;
+#define CSS_HEADER_INFO_SVN (0xFF)
+#define CSS_HEADER_INFO_COPY_VALID (0x1 << 31)
+ u32 private_data_size;
+ u32 ukernel_info;
+#define CSS_UKERNEL_INFO_DEVICEID (0xFFFF << 16)
+#define CSS_UKERNEL_INFO_PRODKEY (0xFF << 8)
+#define CSS_UKERNEL_INFO_BUILDTYPE (0x3 << 2)
+#define CSS_UKERNEL_INFO_BUILDTYPE_PROD 0
+#define CSS_UKERNEL_INFO_BUILDTYPE_PREPROD 1
+#define CSS_UKERNEL_INFO_BUILDTYPE_DEBUG 2
+#define CSS_UKERNEL_INFO_ENCSTATUS (0x1 << 1)
+#define CSS_UKERNEL_INFO_COPY_VALID (0x1 << 0)
+} __packed;
+
struct uc_css_header {
u32 module_type;
/*
@@ -52,36 +85,21 @@ struct uc_css_header {
*/
u32 header_size_dw;
u32 header_version;
- u32 module_id;
+ u32 reserved0;
u32 module_vendor;
u32 date;
-#define CSS_DATE_DAY (0xFF << 0)
-#define CSS_DATE_MONTH (0xFF << 8)
-#define CSS_DATE_YEAR (0xFFFF << 16)
+#define CSS_DATE_DAY (0xFF << 0)
+#define CSS_DATE_MONTH (0xFF << 8)
+#define CSS_DATE_YEAR (0xFFFF << 16)
u32 size_dw; /* uCode plus header_size_dw */
- u32 key_size_dw;
- u32 modulus_size_dw;
- u32 exponent_size_dw;
- u32 time;
-#define CSS_TIME_HOUR (0xFF << 0)
-#define CSS_DATE_MIN (0xFF << 8)
-#define CSS_DATE_SEC (0xFFFF << 16)
- char username[8];
- char buildnumber[12];
- u32 sw_version;
-#define CSS_SW_VERSION_UC_MAJOR (0xFF << 16)
-#define CSS_SW_VERSION_UC_MINOR (0xFF << 8)
-#define CSS_SW_VERSION_UC_PATCH (0xFF << 0)
union {
- u32 submission_version; /* only applies to GuC */
- u32 reserved2;
+ u32 reserved1[3];
+ struct uc_css_rsa_info rsa_info;
};
- u32 reserved0[12];
union {
- u32 private_data_size; /* only applies to GuC */
- u32 reserved1;
+ u32 reserved2[22];
+ struct uc_css_guc_info guc_info;
};
- u32 header_info;
} __packed;
static_assert(sizeof(struct uc_css_header) == 128);
@@ -318,4 +336,70 @@ struct gsc_manifest_header {
u32 exponent_size; /* in dwords */
} __packed;
+/**
+ * DOC: Late binding Firmware Layout
+ *
+ * The Late binding binary starts with FPT header, which contains locations
+ * of various partitions of the binary. Here we're interested in finding out
+ * manifest version. To the manifest version, we need to locate CPD header
+ * one of the entry in CPD header points to manifest header. Manifest header
+ * contains the version.
+ *
+ * +================================================+
+ * | FPT Header |
+ * +================================================+
+ * | FPT entries[] |
+ * | entry1 |
+ * | ... |
+ * | entryX |
+ * | "LTES" |
+ * | ... |
+ * | offset >-----------------------------|------o
+ * +================================================+ |
+ * |
+ * +================================================+ |
+ * | CPD Header |<-----o
+ * +================================================+
+ * | CPD entries[] |
+ * | entry1 |
+ * | ... |
+ * | entryX |
+ * | "LTES.man" |
+ * | ... |
+ * | offset >----------------------------|------o
+ * +================================================+ |
+ * |
+ * +================================================+ |
+ * | Manifest Header |<-----o
+ * | ... |
+ * | FW version |
+ * | ... |
+ * +================================================+
+ */
+
+/* FPT Headers */
+struct csc_fpt_header {
+ u32 header_marker;
+#define CSC_FPT_HEADER_MARKER 0x54504624
+ u32 num_of_entries;
+ u8 header_version;
+ u8 entry_version;
+ u8 header_length; /* in bytes */
+ u8 flags;
+ u16 ticks_to_add;
+ u16 tokens_to_add;
+ u32 uma_size;
+ u32 crc32;
+ struct gsc_version fitc_version;
+} __packed;
+
+struct csc_fpt_entry {
+ u8 name[4]; /* partition name */
+ u32 reserved1;
+ u32 offset; /* offset from beginning of CSE region */
+ u32 length; /* partition length in bytes */
+ u32 reserved2[3];
+ u32 partition_flags;
+} __packed;
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_uc_fw_types.h b/drivers/gpu/drm/xe/xe_uc_fw_types.h
index 914026015019..77a1dcf8b4ed 100644
--- a/drivers/gpu/drm/xe/xe_uc_fw_types.h
+++ b/drivers/gpu/drm/xe/xe_uc_fw_types.h
@@ -147,6 +147,9 @@ struct xe_uc_fw {
/** @private_data_size: size of private data found in uC css header */
u32 private_data_size;
+
+ /** @build_type: Firmware build type (see CSS_UKERNEL_INFO_BUILDTYPE for definitions) */
+ u32 build_type;
};
#endif
diff --git a/drivers/gpu/drm/xe/xe_userptr.c b/drivers/gpu/drm/xe/xe_userptr.c
new file mode 100644
index 000000000000..f16e92cd8090
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_userptr.c
@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include "xe_userptr.h"
+
+#include <linux/mm.h>
+
+#include "xe_trace_bo.h"
+
+/**
+ * xe_vma_userptr_check_repin() - Advisory check for repin needed
+ * @uvma: The userptr vma
+ *
+ * Check if the userptr vma has been invalidated since last successful
+ * repin. The check is advisory only and can the function can be called
+ * without the vm->svm.gpusvm.notifier_lock held. There is no guarantee that the
+ * vma userptr will remain valid after a lockless check, so typically
+ * the call needs to be followed by a proper check under the notifier_lock.
+ *
+ * Return: 0 if userptr vma is valid, -EAGAIN otherwise; repin recommended.
+ */
+int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma)
+{
+ return mmu_interval_check_retry(&uvma->userptr.notifier,
+ uvma->userptr.pages.notifier_seq) ?
+ -EAGAIN : 0;
+}
+
+/**
+ * __xe_vm_userptr_needs_repin() - Check whether the VM does have userptrs
+ * that need repinning.
+ * @vm: The VM.
+ *
+ * This function checks for whether the VM has userptrs that need repinning,
+ * and provides a release-type barrier on the svm.gpusvm.notifier_lock after
+ * checking.
+ *
+ * Return: 0 if there are no userptrs needing repinning, -EAGAIN if there are.
+ */
+int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
+{
+ lockdep_assert_held_read(&vm->svm.gpusvm.notifier_lock);
+
+ return (list_empty(&vm->userptr.repin_list) &&
+ list_empty(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
+}
+
+int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma)
+{
+ struct xe_vma *vma = &uvma->vma;
+ struct xe_vm *vm = xe_vma_vm(vma);
+ struct xe_device *xe = vm->xe;
+ struct drm_gpusvm_ctx ctx = {
+ .read_only = xe_vma_read_only(vma),
+ .device_private_page_owner = NULL,
+ };
+
+ lockdep_assert_held(&vm->lock);
+ xe_assert(xe, xe_vma_is_userptr(vma));
+
+ if (vma->gpuva.flags & XE_VMA_DESTROYED)
+ return 0;
+
+ return drm_gpusvm_get_pages(&vm->svm.gpusvm, &uvma->userptr.pages,
+ uvma->userptr.notifier.mm,
+ &uvma->userptr.notifier,
+ xe_vma_userptr(vma),
+ xe_vma_userptr(vma) + xe_vma_size(vma),
+ &ctx);
+}
+
+static void __vma_userptr_invalidate(struct xe_vm *vm, struct xe_userptr_vma *uvma)
+{
+ struct xe_userptr *userptr = &uvma->userptr;
+ struct xe_vma *vma = &uvma->vma;
+ struct dma_resv_iter cursor;
+ struct dma_fence *fence;
+ struct drm_gpusvm_ctx ctx = {
+ .in_notifier = true,
+ .read_only = xe_vma_read_only(vma),
+ };
+ long err;
+
+ /*
+ * Tell exec and rebind worker they need to repin and rebind this
+ * userptr.
+ */
+ if (!xe_vm_in_fault_mode(vm) &&
+ !(vma->gpuva.flags & XE_VMA_DESTROYED)) {
+ spin_lock(&vm->userptr.invalidated_lock);
+ list_move_tail(&userptr->invalidate_link,
+ &vm->userptr.invalidated);
+ spin_unlock(&vm->userptr.invalidated_lock);
+ }
+
+ /*
+ * Preempt fences turn into schedule disables, pipeline these.
+ * Note that even in fault mode, we need to wait for binds and
+ * unbinds to complete, and those are attached as BOOKMARK fences
+ * to the vm.
+ */
+ dma_resv_iter_begin(&cursor, xe_vm_resv(vm),
+ DMA_RESV_USAGE_BOOKKEEP);
+ dma_resv_for_each_fence_unlocked(&cursor, fence)
+ dma_fence_enable_sw_signaling(fence);
+ dma_resv_iter_end(&cursor);
+
+ err = dma_resv_wait_timeout(xe_vm_resv(vm),
+ DMA_RESV_USAGE_BOOKKEEP,
+ false, MAX_SCHEDULE_TIMEOUT);
+ XE_WARN_ON(err <= 0);
+
+ if (xe_vm_in_fault_mode(vm) && userptr->initial_bind) {
+ err = xe_vm_invalidate_vma(vma);
+ XE_WARN_ON(err);
+ }
+
+ drm_gpusvm_unmap_pages(&vm->svm.gpusvm, &uvma->userptr.pages,
+ xe_vma_size(vma) >> PAGE_SHIFT, &ctx);
+}
+
+static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq)
+{
+ struct xe_userptr_vma *uvma = container_of(mni, typeof(*uvma), userptr.notifier);
+ struct xe_vma *vma = &uvma->vma;
+ struct xe_vm *vm = xe_vma_vm(vma);
+
+ xe_assert(vm->xe, xe_vma_is_userptr(vma));
+ trace_xe_vma_userptr_invalidate(vma);
+
+ if (!mmu_notifier_range_blockable(range))
+ return false;
+
+ vm_dbg(&xe_vma_vm(vma)->xe->drm,
+ "NOTIFIER: addr=0x%016llx, range=0x%016llx",
+ xe_vma_start(vma), xe_vma_size(vma));
+
+ down_write(&vm->svm.gpusvm.notifier_lock);
+ mmu_interval_set_seq(mni, cur_seq);
+
+ __vma_userptr_invalidate(vm, uvma);
+ up_write(&vm->svm.gpusvm.notifier_lock);
+ trace_xe_vma_userptr_invalidate_complete(vma);
+
+ return true;
+}
+
+static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = {
+ .invalidate = vma_userptr_invalidate,
+};
+
+#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
+/**
+ * xe_vma_userptr_force_invalidate() - force invalidate a userptr
+ * @uvma: The userptr vma to invalidate
+ *
+ * Perform a forced userptr invalidation for testing purposes.
+ */
+void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma)
+{
+ struct xe_vm *vm = xe_vma_vm(&uvma->vma);
+
+ /* Protect against concurrent userptr pinning */
+ lockdep_assert_held(&vm->lock);
+ /* Protect against concurrent notifiers */
+ lockdep_assert_held(&vm->svm.gpusvm.notifier_lock);
+ /*
+ * Protect against concurrent instances of this function and
+ * the critical exec sections
+ */
+ xe_vm_assert_held(vm);
+
+ if (!mmu_interval_read_retry(&uvma->userptr.notifier,
+ uvma->userptr.pages.notifier_seq))
+ uvma->userptr.pages.notifier_seq -= 2;
+ __vma_userptr_invalidate(vm, uvma);
+}
+#endif
+
+int xe_vm_userptr_pin(struct xe_vm *vm)
+{
+ struct xe_userptr_vma *uvma, *next;
+ int err = 0;
+
+ xe_assert(vm->xe, !xe_vm_in_fault_mode(vm));
+ lockdep_assert_held_write(&vm->lock);
+
+ /* Collect invalidated userptrs */
+ spin_lock(&vm->userptr.invalidated_lock);
+ xe_assert(vm->xe, list_empty(&vm->userptr.repin_list));
+ list_for_each_entry_safe(uvma, next, &vm->userptr.invalidated,
+ userptr.invalidate_link) {
+ list_del_init(&uvma->userptr.invalidate_link);
+ list_add_tail(&uvma->userptr.repin_link,
+ &vm->userptr.repin_list);
+ }
+ spin_unlock(&vm->userptr.invalidated_lock);
+
+ /* Pin and move to bind list */
+ list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
+ userptr.repin_link) {
+ err = xe_vma_userptr_pin_pages(uvma);
+ if (err == -EFAULT) {
+ list_del_init(&uvma->userptr.repin_link);
+ /*
+ * We might have already done the pin once already, but
+ * then had to retry before the re-bind happened, due
+ * some other condition in the caller, but in the
+ * meantime the userptr got dinged by the notifier such
+ * that we need to revalidate here, but this time we hit
+ * the EFAULT. In such a case make sure we remove
+ * ourselves from the rebind list to avoid going down in
+ * flames.
+ */
+ if (!list_empty(&uvma->vma.combined_links.rebind))
+ list_del_init(&uvma->vma.combined_links.rebind);
+
+ /* Wait for pending binds */
+ xe_vm_lock(vm, false);
+ dma_resv_wait_timeout(xe_vm_resv(vm),
+ DMA_RESV_USAGE_BOOKKEEP,
+ false, MAX_SCHEDULE_TIMEOUT);
+
+ down_read(&vm->svm.gpusvm.notifier_lock);
+ err = xe_vm_invalidate_vma(&uvma->vma);
+ up_read(&vm->svm.gpusvm.notifier_lock);
+ xe_vm_unlock(vm);
+ if (err)
+ break;
+ } else {
+ if (err)
+ break;
+
+ list_del_init(&uvma->userptr.repin_link);
+ list_move_tail(&uvma->vma.combined_links.rebind,
+ &vm->rebind_list);
+ }
+ }
+
+ if (err) {
+ down_write(&vm->svm.gpusvm.notifier_lock);
+ spin_lock(&vm->userptr.invalidated_lock);
+ list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
+ userptr.repin_link) {
+ list_del_init(&uvma->userptr.repin_link);
+ list_move_tail(&uvma->userptr.invalidate_link,
+ &vm->userptr.invalidated);
+ }
+ spin_unlock(&vm->userptr.invalidated_lock);
+ up_write(&vm->svm.gpusvm.notifier_lock);
+ }
+ return err;
+}
+
+/**
+ * xe_vm_userptr_check_repin() - Check whether the VM might have userptrs
+ * that need repinning.
+ * @vm: The VM.
+ *
+ * This function does an advisory check for whether the VM has userptrs that
+ * need repinning.
+ *
+ * Return: 0 if there are no indications of userptrs needing repinning,
+ * -EAGAIN if there are.
+ */
+int xe_vm_userptr_check_repin(struct xe_vm *vm)
+{
+ return (list_empty_careful(&vm->userptr.repin_list) &&
+ list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
+}
+
+int xe_userptr_setup(struct xe_userptr_vma *uvma, unsigned long start,
+ unsigned long range)
+{
+ struct xe_userptr *userptr = &uvma->userptr;
+ int err;
+
+ INIT_LIST_HEAD(&userptr->invalidate_link);
+ INIT_LIST_HEAD(&userptr->repin_link);
+
+ err = mmu_interval_notifier_insert(&userptr->notifier, current->mm,
+ start, range,
+ &vma_userptr_notifier_ops);
+ if (err)
+ return err;
+
+ userptr->pages.notifier_seq = LONG_MAX;
+
+ return 0;
+}
+
+void xe_userptr_remove(struct xe_userptr_vma *uvma)
+{
+ struct xe_vm *vm = xe_vma_vm(&uvma->vma);
+ struct xe_userptr *userptr = &uvma->userptr;
+
+ drm_gpusvm_free_pages(&vm->svm.gpusvm, &uvma->userptr.pages,
+ xe_vma_size(&uvma->vma) >> PAGE_SHIFT);
+
+ /*
+ * Since userptr pages are not pinned, we can't remove
+ * the notifier until we're sure the GPU is not accessing
+ * them anymore
+ */
+ mmu_interval_notifier_remove(&userptr->notifier);
+}
+
+void xe_userptr_destroy(struct xe_userptr_vma *uvma)
+{
+ struct xe_vm *vm = xe_vma_vm(&uvma->vma);
+
+ spin_lock(&vm->userptr.invalidated_lock);
+ xe_assert(vm->xe, list_empty(&uvma->userptr.repin_link));
+ list_del(&uvma->userptr.invalidate_link);
+ spin_unlock(&vm->userptr.invalidated_lock);
+}
diff --git a/drivers/gpu/drm/xe/xe_userptr.h b/drivers/gpu/drm/xe/xe_userptr.h
new file mode 100644
index 000000000000..ef801234991e
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_userptr.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_USERPTR_H_
+#define _XE_USERPTR_H_
+
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/scatterlist.h>
+#include <linux/spinlock.h>
+
+#include <drm/drm_gpusvm.h>
+
+struct xe_vm;
+struct xe_vma;
+struct xe_userptr_vma;
+
+/** struct xe_userptr_vm - User pointer VM level state */
+struct xe_userptr_vm {
+ /**
+ * @userptr.repin_list: list of VMAs which are user pointers,
+ * and needs repinning. Protected by @lock.
+ */
+ struct list_head repin_list;
+ /**
+ * @userptr.invalidated_lock: Protects the
+ * @userptr.invalidated list.
+ */
+ spinlock_t invalidated_lock;
+ /**
+ * @userptr.invalidated: List of invalidated userptrs, not yet
+ * picked
+ * up for revalidation. Protected from access with the
+ * @invalidated_lock. Removing items from the list
+ * additionally requires @lock in write mode, and adding
+ * items to the list requires either the @svm.gpusvm.notifier_lock in
+ * write mode, OR @lock in write mode.
+ */
+ struct list_head invalidated;
+};
+
+/** struct xe_userptr - User pointer */
+struct xe_userptr {
+ /** @invalidate_link: Link for the vm::userptr.invalidated list */
+ struct list_head invalidate_link;
+ /** @userptr: link into VM repin list if userptr. */
+ struct list_head repin_link;
+ /**
+ * @pages: gpusvm pages for this user pointer.
+ */
+ struct drm_gpusvm_pages pages;
+ /**
+ * @notifier: MMU notifier for user pointer (invalidation call back)
+ */
+ struct mmu_interval_notifier notifier;
+
+ /**
+ * @initial_bind: user pointer has been bound at least once.
+ * write: vm->svm.gpusvm.notifier_lock in read mode and vm->resv held.
+ * read: vm->svm.gpusvm.notifier_lock in write mode or vm->resv held.
+ */
+ bool initial_bind;
+#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
+ u32 divisor;
+#endif
+};
+
+#if IS_ENABLED(CONFIG_DRM_GPUSVM)
+void xe_userptr_remove(struct xe_userptr_vma *uvma);
+int xe_userptr_setup(struct xe_userptr_vma *uvma, unsigned long start,
+ unsigned long range);
+void xe_userptr_destroy(struct xe_userptr_vma *uvma);
+
+int xe_vm_userptr_pin(struct xe_vm *vm);
+int __xe_vm_userptr_needs_repin(struct xe_vm *vm);
+int xe_vm_userptr_check_repin(struct xe_vm *vm);
+int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma);
+int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma);
+#else
+static inline void xe_userptr_remove(struct xe_userptr_vma *uvma) {}
+
+static inline int xe_userptr_setup(struct xe_userptr_vma *uvma,
+ unsigned long start, unsigned long range)
+{
+ return -ENODEV;
+}
+
+static inline void xe_userptr_destroy(struct xe_userptr_vma *uvma) {}
+
+static inline int xe_vm_userptr_pin(struct xe_vm *vm) { return 0; }
+static inline int __xe_vm_userptr_needs_repin(struct xe_vm *vm) { return 0; }
+static inline int xe_vm_userptr_check_repin(struct xe_vm *vm) { return 0; }
+static inline int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma) { return -ENODEV; }
+static inline int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma) { return -ENODEV; };
+#endif
+
+#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
+void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma);
+#else
+static inline void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma)
+{
+}
+#endif
+#endif
diff --git a/drivers/gpu/drm/xe/xe_validation.c b/drivers/gpu/drm/xe/xe_validation.c
new file mode 100644
index 000000000000..826cd09966ef
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_validation.c
@@ -0,0 +1,278 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+#include "xe_bo.h"
+#include <drm/drm_exec.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gpuvm.h>
+
+#include "xe_assert.h"
+#include "xe_validation.h"
+
+#ifdef CONFIG_DRM_XE_DEBUG
+/**
+ * xe_validation_assert_exec() - Assert that the drm_exec pointer is suitable
+ * for validation.
+ * @xe: Pointer to the xe device.
+ * @exec: The drm_exec pointer to check.
+ * @obj: Pointer to the object subject to validation.
+ *
+ * NULL exec pointers are not allowed.
+ * For XE_VALIDATION_UNIMPLEMENTED, no checking.
+ * For XE_VLIDATION_OPT_OUT, check that the caller is a kunit test
+ * For XE_VALIDATION_UNSUPPORTED, check that the object subject to
+ * validation is a dma-buf, for which support for ww locking is
+ * not in place in the dma-buf layer.
+ */
+void xe_validation_assert_exec(const struct xe_device *xe,
+ const struct drm_exec *exec,
+ const struct drm_gem_object *obj)
+{
+ xe_assert(xe, exec);
+ if (IS_ERR(exec)) {
+ switch (PTR_ERR(exec)) {
+ case __XE_VAL_UNIMPLEMENTED:
+ break;
+ case __XE_VAL_UNSUPPORTED:
+ xe_assert(xe, !!obj->dma_buf);
+ break;
+#if IS_ENABLED(CONFIG_KUNIT)
+ case __XE_VAL_OPT_OUT:
+ xe_assert(xe, current->kunit_test);
+ break;
+#endif
+ default:
+ xe_assert(xe, false);
+ }
+ }
+}
+#endif
+
+static int xe_validation_lock(struct xe_validation_ctx *ctx)
+{
+ struct xe_validation_device *val = ctx->val;
+ int ret = 0;
+
+ if (ctx->val_flags.interruptible) {
+ if (ctx->request_exclusive)
+ ret = down_write_killable(&val->lock);
+ else
+ ret = down_read_interruptible(&val->lock);
+ } else {
+ if (ctx->request_exclusive)
+ down_write(&val->lock);
+ else
+ down_read(&val->lock);
+ }
+
+ if (!ret) {
+ ctx->lock_held = true;
+ ctx->lock_held_exclusive = ctx->request_exclusive;
+ }
+
+ return ret;
+}
+
+static int xe_validation_trylock(struct xe_validation_ctx *ctx)
+{
+ struct xe_validation_device *val = ctx->val;
+ bool locked;
+
+ if (ctx->request_exclusive)
+ locked = down_write_trylock(&val->lock);
+ else
+ locked = down_read_trylock(&val->lock);
+
+ if (locked) {
+ ctx->lock_held = true;
+ ctx->lock_held_exclusive = ctx->request_exclusive;
+ }
+
+ return locked ? 0 : -EWOULDBLOCK;
+}
+
+static void xe_validation_unlock(struct xe_validation_ctx *ctx)
+{
+ if (!ctx->lock_held)
+ return;
+
+ if (ctx->lock_held_exclusive)
+ up_write(&ctx->val->lock);
+ else
+ up_read(&ctx->val->lock);
+
+ ctx->lock_held = false;
+}
+
+/**
+ * xe_validation_ctx_init() - Initialize an xe_validation_ctx
+ * @ctx: The xe_validation_ctx to initialize.
+ * @val: The xe_validation_device representing the validation domain.
+ * @exec: The struct drm_exec to use for the transaction. May be NULL.
+ * @flags: The flags to use for initialization.
+ *
+ * Initialize and lock a an xe_validation transaction using the validation domain
+ * represented by @val. Also initialize the drm_exec object forwarding parts of
+ * @flags to the drm_exec initialization. The @flags.exclusive flag should
+ * typically be set to false to avoid locking out other validators from the
+ * domain until an OOM is hit. For testing- or final attempt purposes it can,
+ * however, be set to true.
+ *
+ * Return: %0 on success, %-EINTR if interruptible initial locking failed with a
+ * signal pending. If @flags.no_block is set to true, a failed trylock
+ * returns %-EWOULDBLOCK.
+ */
+int xe_validation_ctx_init(struct xe_validation_ctx *ctx, struct xe_validation_device *val,
+ struct drm_exec *exec, const struct xe_val_flags flags)
+{
+ int ret;
+
+ ctx->exec = exec;
+ ctx->val = val;
+ ctx->lock_held = false;
+ ctx->lock_held_exclusive = false;
+ ctx->request_exclusive = flags.exclusive;
+ ctx->val_flags = flags;
+ ctx->exec_flags = 0;
+ ctx->nr = 0;
+
+ if (flags.no_block)
+ ret = xe_validation_trylock(ctx);
+ else
+ ret = xe_validation_lock(ctx);
+ if (ret)
+ return ret;
+
+ if (exec) {
+ if (flags.interruptible)
+ ctx->exec_flags |= DRM_EXEC_INTERRUPTIBLE_WAIT;
+ if (flags.exec_ignore_duplicates)
+ ctx->exec_flags |= DRM_EXEC_IGNORE_DUPLICATES;
+ drm_exec_init(exec, ctx->exec_flags, ctx->nr);
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
+/*
+ * This abuses both drm_exec and ww_mutex internals and should be
+ * replaced by checking for -EDEADLK when we can make TTM
+ * stop converting -EDEADLK to -ENOMEM.
+ * An alternative is to not have exhaustive eviction with
+ * CONFIG_DEBUG_WW_MUTEX_SLOWPATH until that happens.
+ */
+static bool xe_validation_contention_injected(struct drm_exec *exec)
+{
+ return !!exec->ticket.contending_lock;
+}
+
+#else
+
+static bool xe_validation_contention_injected(struct drm_exec *exec)
+{
+ return false;
+}
+
+#endif
+
+static bool __xe_validation_should_retry(struct xe_validation_ctx *ctx, int ret)
+{
+ if (ret == -ENOMEM &&
+ ((ctx->request_exclusive &&
+ xe_validation_contention_injected(ctx->exec)) ||
+ !ctx->request_exclusive)) {
+ ctx->request_exclusive = true;
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * xe_validation_exec_lock() - Perform drm_gpuvm_exec_lock within a validation
+ * transaction.
+ * @ctx: An uninitialized xe_validation_ctx.
+ * @vm_exec: An initialized struct vm_exec.
+ * @val: The validation domain.
+ *
+ * The drm_gpuvm_exec_lock() function internally initializes its drm_exec
+ * transaction and therefore doesn't lend itself very well to be using
+ * xe_validation_ctx_init(). Provide a helper that takes an uninitialized
+ * xe_validation_ctx and calls drm_gpuvm_exec_lock() with OOM retry.
+ *
+ * Return: %0 on success, negative error code on failure.
+ */
+int xe_validation_exec_lock(struct xe_validation_ctx *ctx,
+ struct drm_gpuvm_exec *vm_exec,
+ struct xe_validation_device *val)
+{
+ int ret;
+
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->exec = &vm_exec->exec;
+ ctx->exec_flags = vm_exec->flags;
+ ctx->val = val;
+ if (ctx->exec_flags & DRM_EXEC_INTERRUPTIBLE_WAIT)
+ ctx->val_flags.interruptible = 1;
+ if (ctx->exec_flags & DRM_EXEC_IGNORE_DUPLICATES)
+ ctx->val_flags.exec_ignore_duplicates = 1;
+retry:
+ ret = xe_validation_lock(ctx);
+ if (ret)
+ return ret;
+
+ ret = drm_gpuvm_exec_lock(vm_exec);
+ if (ret) {
+ xe_validation_unlock(ctx);
+ if (__xe_validation_should_retry(ctx, ret))
+ goto retry;
+ }
+
+ return ret;
+}
+
+/**
+ * xe_validation_ctx_fini() - Finalize a validation transaction
+ * @ctx: The Validation transaction to finalize.
+ *
+ * Finalize a validation transaction and its related drm_exec transaction.
+ */
+void xe_validation_ctx_fini(struct xe_validation_ctx *ctx)
+{
+ if (ctx->exec)
+ drm_exec_fini(ctx->exec);
+ xe_validation_unlock(ctx);
+}
+
+/**
+ * xe_validation_should_retry() - Determine if a validation transaction should retry
+ * @ctx: The validation transaction.
+ * @ret: Pointer to a return value variable.
+ *
+ * Determines whether a validation transaction should retry based on the
+ * internal transaction state and the return value pointed to by @ret.
+ * If a validation should be retried, the transaction is prepared for that,
+ * and the validation locked might be re-locked in exclusive mode, and *@ret
+ * is set to %0. If the re-locking errors, typically due to interruptible
+ * locking with signal pending, *@ret is instead set to -EINTR and the
+ * function returns %false.
+ *
+ * Return: %true if validation should be retried, %false otherwise.
+ */
+bool xe_validation_should_retry(struct xe_validation_ctx *ctx, int *ret)
+{
+ if (__xe_validation_should_retry(ctx, *ret)) {
+ drm_exec_fini(ctx->exec);
+ *ret = 0;
+ if (ctx->request_exclusive != ctx->lock_held_exclusive) {
+ xe_validation_unlock(ctx);
+ *ret = xe_validation_lock(ctx);
+ }
+ drm_exec_init(ctx->exec, ctx->exec_flags, ctx->nr);
+ return !*ret;
+ }
+
+ return false;
+}
diff --git a/drivers/gpu/drm/xe/xe_validation.h b/drivers/gpu/drm/xe/xe_validation.h
new file mode 100644
index 000000000000..fec331d791e7
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_validation.h
@@ -0,0 +1,192 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+#ifndef _XE_VALIDATION_H_
+#define _XE_VALIDATION_H_
+
+#include <linux/dma-resv.h>
+#include <linux/types.h>
+#include <linux/rwsem.h>
+
+struct drm_exec;
+struct drm_gem_object;
+struct drm_gpuvm_exec;
+struct xe_device;
+
+#ifdef CONFIG_PROVE_LOCKING
+/**
+ * xe_validation_lockdep() - Assert that a drm_exec locking transaction can
+ * be initialized at this point.
+ */
+static inline void xe_validation_lockdep(void)
+{
+ struct ww_acquire_ctx ticket;
+
+ ww_acquire_init(&ticket, &reservation_ww_class);
+ ww_acquire_fini(&ticket);
+}
+#else
+static inline void xe_validation_lockdep(void)
+{
+}
+#endif
+
+/*
+ * Various values of the drm_exec pointer where we've not (yet)
+ * implemented full ww locking.
+ *
+ * XE_VALIDATION_UNIMPLEMENTED means implementation is pending.
+ * A lockdep check is made to assure that a drm_exec locking
+ * transaction can actually take place where the macro is
+ * used. If this asserts, the exec pointer needs to be assigned
+ * higher up in the callchain and passed down.
+ *
+ * XE_VALIDATION_UNSUPPORTED is for dma-buf code only where
+ * the dma-buf layer doesn't support WW locking.
+ *
+ * XE_VALIDATION_OPT_OUT is for simplification of kunit tests where
+ * exhaustive eviction isn't necessary.
+ */
+#define __XE_VAL_UNIMPLEMENTED -EINVAL
+#define XE_VALIDATION_UNIMPLEMENTED (xe_validation_lockdep(), \
+ (struct drm_exec *)ERR_PTR(__XE_VAL_UNIMPLEMENTED))
+
+#define __XE_VAL_UNSUPPORTED -EOPNOTSUPP
+#define XE_VALIDATION_UNSUPPORTED ((struct drm_exec *)ERR_PTR(__XE_VAL_UNSUPPORTED))
+
+#define __XE_VAL_OPT_OUT -ENOMEM
+#define XE_VALIDATION_OPT_OUT (xe_validation_lockdep(), \
+ (struct drm_exec *)ERR_PTR(__XE_VAL_OPT_OUT))
+#ifdef CONFIG_DRM_XE_DEBUG
+void xe_validation_assert_exec(const struct xe_device *xe, const struct drm_exec *exec,
+ const struct drm_gem_object *obj);
+#else
+#define xe_validation_assert_exec(_xe, _exec, _obj) \
+ do { \
+ (void)_xe; (void)_exec; (void)_obj; \
+ } while (0)
+#endif
+
+/**
+ * struct xe_validation_device - The domain for exhaustive eviction
+ * @lock: The lock used to exclude other processes from allocating graphics memory
+ *
+ * The struct xe_validation_device represents the domain for which we want to use
+ * exhaustive eviction. The @lock is typically grabbed in read mode for allocations
+ * but when graphics memory allocation fails, it is retried with the write mode held.
+ */
+struct xe_validation_device {
+ struct rw_semaphore lock;
+};
+
+/**
+ * struct xe_val_flags - Flags for xe_validation_ctx_init().
+ * @exclusive: Start the validation transaction by locking out all other validators.
+ * @no_block: Don't block on initialization.
+ * @interruptible: Block interruptible if blocking. Implies initializing the drm_exec
+ * context with the DRM_EXEC_INTERRUPTIBLE_WAIT flag.
+ * @exec_ignore_duplicates: Initialize the drm_exec context with the
+ * DRM_EXEC_IGNORE_DUPLICATES flag.
+ */
+struct xe_val_flags {
+ u32 exclusive :1;
+ u32 no_block :1;
+ u32 interruptible :1;
+ u32 exec_ignore_duplicates :1;
+};
+
+/**
+ * struct xe_validation_ctx - A struct drm_exec subclass with support for
+ * exhaustive eviction
+ * @exec: The drm_exec object base class. Note that we use a pointer instead of
+ * embedding to avoid diamond inheritance.
+ * @val: The exhaustive eviction domain.
+ * @val_flags: Copy of the struct xe_val_flags passed to xe_validation_ctx_init.
+ * @lock_held: Whether The domain lock is currently held.
+ * @lock_held_exclusive: Whether the domain lock is held in exclusive mode.
+ * @request_exclusive: Whether to lock exclusively (write mode) the next time
+ * the domain lock is locked.
+ * @exec_flags: The drm_exec flags used for drm_exec (re-)initialization.
+ * @nr: The drm_exec nr parameter used for drm_exec (re-)initializaiton.
+ */
+struct xe_validation_ctx {
+ struct drm_exec *exec;
+ struct xe_validation_device *val;
+ struct xe_val_flags val_flags;
+ bool lock_held;
+ bool lock_held_exclusive;
+ bool request_exclusive;
+ u32 exec_flags;
+ unsigned int nr;
+};
+
+int xe_validation_ctx_init(struct xe_validation_ctx *ctx, struct xe_validation_device *val,
+ struct drm_exec *exec, const struct xe_val_flags flags);
+
+int xe_validation_exec_lock(struct xe_validation_ctx *ctx, struct drm_gpuvm_exec *vm_exec,
+ struct xe_validation_device *val);
+
+void xe_validation_ctx_fini(struct xe_validation_ctx *ctx);
+
+bool xe_validation_should_retry(struct xe_validation_ctx *ctx, int *ret);
+
+/**
+ * xe_validation_retry_on_oom() - Retry on oom in an xe_validaton transaction
+ * @_ctx: Pointer to the xe_validation_ctx
+ * @_ret: The current error value possibly holding -ENOMEM
+ *
+ * Use this in way similar to drm_exec_retry_on_contention().
+ * If @_ret contains -ENOMEM the tranaction is restarted once in a way that
+ * blocks other transactions and allows exhastive eviction. If the transaction
+ * was already restarted once, Just return the -ENOMEM. May also set
+ * _ret to -EINTR if not retrying and waits are interruptible.
+ * May only be used within a drm_exec_until_all_locked() loop.
+ */
+#define xe_validation_retry_on_oom(_ctx, _ret) \
+ do { \
+ if (xe_validation_should_retry(_ctx, _ret)) \
+ goto *__drm_exec_retry_ptr; \
+ } while (0)
+
+/**
+ * xe_validation_device_init - Initialize a struct xe_validation_device
+ * @val: The xe_validation_device to init.
+ */
+static inline void
+xe_validation_device_init(struct xe_validation_device *val)
+{
+ init_rwsem(&val->lock);
+}
+
+/*
+ * Make guard() and scoped_guard() work with xe_validation_ctx
+ * so that we can exit transactions without caring about the
+ * cleanup.
+ */
+DEFINE_CLASS(xe_validation, struct xe_validation_ctx *,
+ if (_T) xe_validation_ctx_fini(_T);,
+ ({_ret = xe_validation_ctx_init(_ctx, _val, _exec, _flags);
+ _ret ? NULL : _ctx; }),
+ struct xe_validation_ctx *_ctx, struct xe_validation_device *_val,
+ struct drm_exec *_exec, const struct xe_val_flags _flags, int _ret);
+static inline void *class_xe_validation_lock_ptr(class_xe_validation_t *_T)
+{return *_T; }
+#define class_xe_validation_is_conditional true
+
+/**
+ * xe_validation_guard() - An auto-cleanup xe_validation_ctx transaction
+ * @_ctx: The xe_validation_ctx.
+ * @_val: The xe_validation_device.
+ * @_exec: The struct drm_exec object
+ * @_flags: Flags for the xe_validation_ctx initialization.
+ * @_ret: Return in / out parameter. May be set by this macro. Typicall 0 when called.
+ *
+ * This macro is will initiate a drm_exec transaction with additional support for
+ * exhaustive eviction.
+ */
+#define xe_validation_guard(_ctx, _val, _exec, _flags, _ret) \
+ scoped_guard(xe_validation, _ctx, _val, _exec, _flags, _ret) \
+ drm_exec_until_all_locked(_exec)
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 2035604121e6..027e6ce648c5 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -28,7 +28,6 @@
#include "xe_drm_client.h"
#include "xe_exec_queue.h"
#include "xe_gt_pagefault.h"
-#include "xe_gt_tlb_invalidation.h"
#include "xe_migrate.h"
#include "xe_pat.h"
#include "xe_pm.h"
@@ -38,9 +37,10 @@
#include "xe_res_cursor.h"
#include "xe_svm.h"
#include "xe_sync.h"
+#include "xe_tile.h"
+#include "xe_tlb_inval.h"
#include "xe_trace_bo.h"
#include "xe_wa.h"
-#include "xe_hmm.h"
static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm)
{
@@ -48,34 +48,17 @@ static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm)
}
/**
- * xe_vma_userptr_check_repin() - Advisory check for repin needed
- * @uvma: The userptr vma
+ * xe_vm_drm_exec_lock() - Lock the vm's resv with a drm_exec transaction
+ * @vm: The vm whose resv is to be locked.
+ * @exec: The drm_exec transaction.
*
- * Check if the userptr vma has been invalidated since last successful
- * repin. The check is advisory only and can the function can be called
- * without the vm->userptr.notifier_lock held. There is no guarantee that the
- * vma userptr will remain valid after a lockless check, so typically
- * the call needs to be followed by a proper check under the notifier_lock.
+ * Helper to lock the vm's resv as part of a drm_exec transaction.
*
- * Return: 0 if userptr vma is valid, -EAGAIN otherwise; repin recommended.
+ * Return: %0 on success. See drm_exec_lock_obj() for error codes.
*/
-int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma)
+int xe_vm_drm_exec_lock(struct xe_vm *vm, struct drm_exec *exec)
{
- return mmu_interval_check_retry(&uvma->userptr.notifier,
- uvma->userptr.notifier_seq) ?
- -EAGAIN : 0;
-}
-
-int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma)
-{
- struct xe_vma *vma = &uvma->vma;
- struct xe_vm *vm = xe_vma_vm(vma);
- struct xe_device *xe = vm->xe;
-
- lockdep_assert_held(&vm->lock);
- xe_assert(xe, xe_vma_is_userptr(vma));
-
- return xe_hmm_userptr_populate_range(uvma, false);
+ return drm_exec_lock_obj(exec, xe_vm_obj(vm));
}
static bool preempt_fences_waiting(struct xe_vm *vm)
@@ -227,6 +210,7 @@ int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
.num_fences = 1,
};
struct drm_exec *exec = &vm_exec.exec;
+ struct xe_validation_ctx ctx;
struct dma_fence *pfence;
int err;
bool wait;
@@ -234,14 +218,14 @@ int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
down_write(&vm->lock);
- err = drm_gpuvm_exec_lock(&vm_exec);
+ err = xe_validation_exec_lock(&ctx, &vm_exec, &vm->xe->val);
if (err)
goto out_up_write;
pfence = xe_preempt_fence_create(q, q->lr.context,
++q->lr.seqno);
- if (!pfence) {
- err = -ENOMEM;
+ if (IS_ERR(pfence)) {
+ err = PTR_ERR(pfence);
goto out_fini;
}
@@ -249,7 +233,7 @@ int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
++vm->preempt.num_exec_queues;
q->lr.pfence = pfence;
- down_read(&vm->userptr.notifier_lock);
+ xe_svm_notifier_lock(vm);
drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, pfence,
DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP);
@@ -263,10 +247,10 @@ int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
if (wait)
dma_fence_enable_sw_signaling(pfence);
- up_read(&vm->userptr.notifier_lock);
+ xe_svm_notifier_unlock(vm);
out_fini:
- drm_exec_fini(exec);
+ xe_validation_ctx_fini(&ctx);
out_up_write:
up_write(&vm->lock);
@@ -299,25 +283,6 @@ void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
up_write(&vm->lock);
}
-/**
- * __xe_vm_userptr_needs_repin() - Check whether the VM does have userptrs
- * that need repinning.
- * @vm: The VM.
- *
- * This function checks for whether the VM has userptrs that need repinning,
- * and provides a release-type barrier on the userptr.notifier_lock after
- * checking.
- *
- * Return: 0 if there are no userptrs needing repinning, -EAGAIN if there are.
- */
-int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
-{
- lockdep_assert_held_read(&vm->userptr.notifier_lock);
-
- return (list_empty(&vm->userptr.repin_list) &&
- list_empty(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
-}
-
#define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000
/**
@@ -349,39 +314,6 @@ void xe_vm_kill(struct xe_vm *vm, bool unlocked)
/* TODO: Inform user the VM is banned */
}
-/**
- * xe_vm_validate_should_retry() - Whether to retry after a validate error.
- * @exec: The drm_exec object used for locking before validation.
- * @err: The error returned from ttm_bo_validate().
- * @end: A ktime_t cookie that should be set to 0 before first use and
- * that should be reused on subsequent calls.
- *
- * With multiple active VMs, under memory pressure, it is possible that
- * ttm_bo_validate() run into -EDEADLK and in such case returns -ENOMEM.
- * Until ttm properly handles locking in such scenarios, best thing the
- * driver can do is retry with a timeout. Check if that is necessary, and
- * if so unlock the drm_exec's objects while keeping the ticket to prepare
- * for a rerun.
- *
- * Return: true if a retry after drm_exec_init() is recommended;
- * false otherwise.
- */
-bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end)
-{
- ktime_t cur;
-
- if (err != -ENOMEM)
- return false;
-
- cur = ktime_get();
- *end = *end ? : ktime_add_ms(cur, XE_VM_REBIND_RETRY_TIMEOUT_MS);
- if (!ktime_before(cur, *end))
- return false;
-
- msleep(20);
- return true;
-}
-
static int xe_gpuvm_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec)
{
struct xe_vm *vm = gpuvm_to_vm(vm_bo->vm);
@@ -393,7 +325,10 @@ static int xe_gpuvm_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec)
list_move_tail(&gpuva_to_vma(gpuva)->combined_links.rebind,
&vm->rebind_list);
- ret = xe_bo_validate(gem_to_xe_bo(vm_bo->obj), vm, false);
+ if (!try_wait_for_completion(&vm->xe->pm_block))
+ return -EAGAIN;
+
+ ret = xe_bo_validate(gem_to_xe_bo(vm_bo->obj), vm, false, exec);
if (ret)
return ret;
@@ -479,13 +414,40 @@ static int xe_preempt_work_begin(struct drm_exec *exec, struct xe_vm *vm,
return xe_vm_validate_rebind(vm, exec, vm->preempt.num_exec_queues);
}
+static bool vm_suspend_rebind_worker(struct xe_vm *vm)
+{
+ struct xe_device *xe = vm->xe;
+ bool ret = false;
+
+ mutex_lock(&xe->rebind_resume_lock);
+ if (!try_wait_for_completion(&vm->xe->pm_block)) {
+ ret = true;
+ list_move_tail(&vm->preempt.pm_activate_link, &xe->rebind_resume_list);
+ }
+ mutex_unlock(&xe->rebind_resume_lock);
+
+ return ret;
+}
+
+/**
+ * xe_vm_resume_rebind_worker() - Resume the rebind worker.
+ * @vm: The vm whose preempt worker to resume.
+ *
+ * Resume a preempt worker that was previously suspended by
+ * vm_suspend_rebind_worker().
+ */
+void xe_vm_resume_rebind_worker(struct xe_vm *vm)
+{
+ queue_work(vm->xe->ordered_wq, &vm->preempt.rebind_work);
+}
+
static void preempt_rebind_work_func(struct work_struct *w)
{
struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work);
+ struct xe_validation_ctx ctx;
struct drm_exec exec;
unsigned int fence_count = 0;
LIST_HEAD(preempt_fences);
- ktime_t end = 0;
int err = 0;
long wait;
int __maybe_unused tries = 0;
@@ -502,24 +464,30 @@ static void preempt_rebind_work_func(struct work_struct *w)
}
retry:
+ if (!try_wait_for_completion(&vm->xe->pm_block) && vm_suspend_rebind_worker(vm)) {
+ up_write(&vm->lock);
+ return;
+ }
+
if (xe_vm_userptr_check_repin(vm)) {
err = xe_vm_userptr_pin(vm);
if (err)
goto out_unlock_outer;
}
- drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
+ err = xe_validation_ctx_init(&ctx, &vm->xe->val, &exec,
+ (struct xe_val_flags) {.interruptible = true});
+ if (err)
+ goto out_unlock_outer;
drm_exec_until_all_locked(&exec) {
bool done = false;
err = xe_preempt_work_begin(&exec, vm, &done);
drm_exec_retry_on_contention(&exec);
+ xe_validation_retry_on_oom(&ctx, &err);
if (err || done) {
- drm_exec_fini(&exec);
- if (err && xe_vm_validate_should_retry(&exec, err, &end))
- err = -EAGAIN;
-
+ xe_validation_ctx_fini(&ctx);
goto out_unlock_outer;
}
}
@@ -528,7 +496,9 @@ retry:
if (err)
goto out_unlock;
+ xe_vm_set_validation_exec(vm, &exec);
err = xe_vm_rebind(vm, true);
+ xe_vm_set_validation_exec(vm, NULL);
if (err)
goto out_unlock;
@@ -546,9 +516,9 @@ retry:
(!(__tries)++ || __xe_vm_userptr_needs_repin(__vm)) : \
__xe_vm_userptr_needs_repin(__vm))
- down_read(&vm->userptr.notifier_lock);
+ xe_svm_notifier_lock(vm);
if (retry_required(tries, vm)) {
- up_read(&vm->userptr.notifier_lock);
+ xe_svm_notifier_unlock(vm);
err = -EAGAIN;
goto out_unlock;
}
@@ -562,10 +532,10 @@ retry:
/* Point of no return. */
arm_preempt_fences(vm, &preempt_fences);
resume_and_reinstall_preempt_fences(vm, &exec);
- up_read(&vm->userptr.notifier_lock);
+ xe_svm_notifier_unlock(vm);
out_unlock:
- drm_exec_fini(&exec);
+ xe_validation_ctx_fini(&ctx);
out_unlock_outer:
if (err == -EAGAIN) {
trace_xe_vm_rebind_worker_retry(vm);
@@ -583,203 +553,6 @@ out_unlock_outer:
trace_xe_vm_rebind_worker_exit(vm);
}
-static void __vma_userptr_invalidate(struct xe_vm *vm, struct xe_userptr_vma *uvma)
-{
- struct xe_userptr *userptr = &uvma->userptr;
- struct xe_vma *vma = &uvma->vma;
- struct dma_resv_iter cursor;
- struct dma_fence *fence;
- long err;
-
- /*
- * Tell exec and rebind worker they need to repin and rebind this
- * userptr.
- */
- if (!xe_vm_in_fault_mode(vm) &&
- !(vma->gpuva.flags & XE_VMA_DESTROYED)) {
- spin_lock(&vm->userptr.invalidated_lock);
- list_move_tail(&userptr->invalidate_link,
- &vm->userptr.invalidated);
- spin_unlock(&vm->userptr.invalidated_lock);
- }
-
- /*
- * Preempt fences turn into schedule disables, pipeline these.
- * Note that even in fault mode, we need to wait for binds and
- * unbinds to complete, and those are attached as BOOKMARK fences
- * to the vm.
- */
- dma_resv_iter_begin(&cursor, xe_vm_resv(vm),
- DMA_RESV_USAGE_BOOKKEEP);
- dma_resv_for_each_fence_unlocked(&cursor, fence)
- dma_fence_enable_sw_signaling(fence);
- dma_resv_iter_end(&cursor);
-
- err = dma_resv_wait_timeout(xe_vm_resv(vm),
- DMA_RESV_USAGE_BOOKKEEP,
- false, MAX_SCHEDULE_TIMEOUT);
- XE_WARN_ON(err <= 0);
-
- if (xe_vm_in_fault_mode(vm) && userptr->initial_bind) {
- err = xe_vm_invalidate_vma(vma);
- XE_WARN_ON(err);
- }
-
- xe_hmm_userptr_unmap(uvma);
-}
-
-static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
- const struct mmu_notifier_range *range,
- unsigned long cur_seq)
-{
- struct xe_userptr_vma *uvma = container_of(mni, typeof(*uvma), userptr.notifier);
- struct xe_vma *vma = &uvma->vma;
- struct xe_vm *vm = xe_vma_vm(vma);
-
- xe_assert(vm->xe, xe_vma_is_userptr(vma));
- trace_xe_vma_userptr_invalidate(vma);
-
- if (!mmu_notifier_range_blockable(range))
- return false;
-
- vm_dbg(&xe_vma_vm(vma)->xe->drm,
- "NOTIFIER: addr=0x%016llx, range=0x%016llx",
- xe_vma_start(vma), xe_vma_size(vma));
-
- down_write(&vm->userptr.notifier_lock);
- mmu_interval_set_seq(mni, cur_seq);
-
- __vma_userptr_invalidate(vm, uvma);
- up_write(&vm->userptr.notifier_lock);
- trace_xe_vma_userptr_invalidate_complete(vma);
-
- return true;
-}
-
-static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = {
- .invalidate = vma_userptr_invalidate,
-};
-
-#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
-/**
- * xe_vma_userptr_force_invalidate() - force invalidate a userptr
- * @uvma: The userptr vma to invalidate
- *
- * Perform a forced userptr invalidation for testing purposes.
- */
-void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma)
-{
- struct xe_vm *vm = xe_vma_vm(&uvma->vma);
-
- /* Protect against concurrent userptr pinning */
- lockdep_assert_held(&vm->lock);
- /* Protect against concurrent notifiers */
- lockdep_assert_held(&vm->userptr.notifier_lock);
- /*
- * Protect against concurrent instances of this function and
- * the critical exec sections
- */
- xe_vm_assert_held(vm);
-
- if (!mmu_interval_read_retry(&uvma->userptr.notifier,
- uvma->userptr.notifier_seq))
- uvma->userptr.notifier_seq -= 2;
- __vma_userptr_invalidate(vm, uvma);
-}
-#endif
-
-int xe_vm_userptr_pin(struct xe_vm *vm)
-{
- struct xe_userptr_vma *uvma, *next;
- int err = 0;
-
- xe_assert(vm->xe, !xe_vm_in_fault_mode(vm));
- lockdep_assert_held_write(&vm->lock);
-
- /* Collect invalidated userptrs */
- spin_lock(&vm->userptr.invalidated_lock);
- xe_assert(vm->xe, list_empty(&vm->userptr.repin_list));
- list_for_each_entry_safe(uvma, next, &vm->userptr.invalidated,
- userptr.invalidate_link) {
- list_del_init(&uvma->userptr.invalidate_link);
- list_add_tail(&uvma->userptr.repin_link,
- &vm->userptr.repin_list);
- }
- spin_unlock(&vm->userptr.invalidated_lock);
-
- /* Pin and move to bind list */
- list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
- userptr.repin_link) {
- err = xe_vma_userptr_pin_pages(uvma);
- if (err == -EFAULT) {
- list_del_init(&uvma->userptr.repin_link);
- /*
- * We might have already done the pin once already, but
- * then had to retry before the re-bind happened, due
- * some other condition in the caller, but in the
- * meantime the userptr got dinged by the notifier such
- * that we need to revalidate here, but this time we hit
- * the EFAULT. In such a case make sure we remove
- * ourselves from the rebind list to avoid going down in
- * flames.
- */
- if (!list_empty(&uvma->vma.combined_links.rebind))
- list_del_init(&uvma->vma.combined_links.rebind);
-
- /* Wait for pending binds */
- xe_vm_lock(vm, false);
- dma_resv_wait_timeout(xe_vm_resv(vm),
- DMA_RESV_USAGE_BOOKKEEP,
- false, MAX_SCHEDULE_TIMEOUT);
-
- down_read(&vm->userptr.notifier_lock);
- err = xe_vm_invalidate_vma(&uvma->vma);
- up_read(&vm->userptr.notifier_lock);
- xe_vm_unlock(vm);
- if (err)
- break;
- } else {
- if (err)
- break;
-
- list_del_init(&uvma->userptr.repin_link);
- list_move_tail(&uvma->vma.combined_links.rebind,
- &vm->rebind_list);
- }
- }
-
- if (err) {
- down_write(&vm->userptr.notifier_lock);
- spin_lock(&vm->userptr.invalidated_lock);
- list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
- userptr.repin_link) {
- list_del_init(&uvma->userptr.repin_link);
- list_move_tail(&uvma->userptr.invalidate_link,
- &vm->userptr.invalidated);
- }
- spin_unlock(&vm->userptr.invalidated_lock);
- up_write(&vm->userptr.notifier_lock);
- }
- return err;
-}
-
-/**
- * xe_vm_userptr_check_repin() - Check whether the VM might have userptrs
- * that need repinning.
- * @vm: The VM.
- *
- * This function does an advisory check for whether the VM has userptrs that
- * need repinning.
- *
- * Return: 0 if there are no indications of userptrs needing repinning,
- * -EAGAIN if there are.
- */
-int xe_vm_userptr_check_repin(struct xe_vm *vm)
-{
- return (list_empty_careful(&vm->userptr.repin_list) &&
- list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
-}
-
static int xe_vma_ops_alloc(struct xe_vma_ops *vops, bool array_of_binds)
{
int i;
@@ -953,7 +726,7 @@ struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma, u8 tile_ma
for_each_tile(tile, vm->xe, id) {
vops.pt_update_ops[id].wait_vm_bookkeep = true;
vops.pt_update_ops[tile->id].q =
- xe_tile_migrate_exec_queue(tile);
+ xe_migrate_exec_queue(tile->migrate);
}
err = xe_vm_ops_add_rebind(&vops, vma, tile_mask);
@@ -1043,7 +816,7 @@ struct dma_fence *xe_vm_range_rebind(struct xe_vm *vm,
for_each_tile(tile, vm->xe, id) {
vops.pt_update_ops[id].wait_vm_bookkeep = true;
vops.pt_update_ops[tile->id].q =
- xe_tile_migrate_exec_queue(tile);
+ xe_migrate_exec_queue(tile->migrate);
}
err = xe_vm_ops_add_range_rebind(&vops, vma, range, tile_mask);
@@ -1126,7 +899,7 @@ struct dma_fence *xe_vm_range_unbind(struct xe_vm *vm,
for_each_tile(tile, vm->xe, id) {
vops.pt_update_ops[id].wait_vm_bookkeep = true;
vops.pt_update_ops[tile->id].q =
- xe_tile_migrate_exec_queue(tile);
+ xe_migrate_exec_queue(tile->migrate);
}
err = xe_vm_ops_add_range_unbind(&vops, range);
@@ -1168,7 +941,8 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
struct xe_bo *bo,
u64 bo_offset_or_userptr,
u64 start, u64 end,
- u16 pat_index, unsigned int flags)
+ struct xe_vma_mem_attr *attr,
+ unsigned int flags)
{
struct xe_vma *vma;
struct xe_tile *tile;
@@ -1223,7 +997,7 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
if (vm->xe->info.has_atomic_enable_pte_bit)
vma->gpuva.flags |= XE_VMA_ATOMIC_PTE_BIT;
- vma->pat_index = pat_index;
+ vma->attr = *attr;
if (bo) {
struct drm_gpuvm_bo *vm_bo;
@@ -1243,25 +1017,17 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
drm_gpuvm_bo_put(vm_bo);
} else /* userptr or null */ {
if (!is_null && !is_cpu_addr_mirror) {
- struct xe_userptr *userptr = &to_userptr_vma(vma)->userptr;
+ struct xe_userptr_vma *uvma = to_userptr_vma(vma);
u64 size = end - start + 1;
int err;
- INIT_LIST_HEAD(&userptr->invalidate_link);
- INIT_LIST_HEAD(&userptr->repin_link);
vma->gpuva.gem.offset = bo_offset_or_userptr;
- mutex_init(&userptr->unmap_mutex);
- err = mmu_interval_notifier_insert(&userptr->notifier,
- current->mm,
- xe_vma_userptr(vma), size,
- &vma_userptr_notifier_ops);
+ err = xe_userptr_setup(uvma, xe_vma_userptr(vma), size);
if (err) {
xe_vma_free(vma);
return ERR_PTR(err);
}
-
- userptr->notifier_seq = LONG_MAX;
}
xe_vm_get(vm);
@@ -1281,18 +1047,8 @@ static void xe_vma_destroy_late(struct xe_vma *vma)
if (xe_vma_is_userptr(vma)) {
struct xe_userptr_vma *uvma = to_userptr_vma(vma);
- struct xe_userptr *userptr = &uvma->userptr;
- if (userptr->sg)
- xe_hmm_userptr_free_sg(uvma);
-
- /*
- * Since userptr pages are not pinned, we can't remove
- * the notifier until we're sure the GPU is not accessing
- * them anymore
- */
- mmu_interval_notifier_remove(&userptr->notifier);
- mutex_destroy(&userptr->unmap_mutex);
+ xe_userptr_remove(uvma);
xe_vm_put(vm);
} else if (xe_vma_is_null(vma) || xe_vma_is_cpu_addr_mirror(vma)) {
xe_vm_put(vm);
@@ -1329,11 +1085,7 @@ static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
if (xe_vma_is_userptr(vma)) {
xe_assert(vm->xe, vma->gpuva.flags & XE_VMA_DESTROYED);
-
- spin_lock(&vm->userptr.invalidated_lock);
- xe_assert(vm->xe, list_empty(&to_userptr_vma(vma)->userptr.repin_link));
- list_del(&to_userptr_vma(vma)->userptr.invalidate_link);
- spin_unlock(&vm->userptr.invalidated_lock);
+ xe_userptr_destroy(to_userptr_vma(vma));
} else if (!xe_vma_is_null(vma) && !xe_vma_is_cpu_addr_mirror(vma)) {
xe_bo_assert_held(xe_vma_bo(vma));
@@ -1381,20 +1133,19 @@ int xe_vm_lock_vma(struct drm_exec *exec, struct xe_vma *vma)
static void xe_vma_destroy_unlocked(struct xe_vma *vma)
{
+ struct xe_device *xe = xe_vma_vm(vma)->xe;
+ struct xe_validation_ctx ctx;
struct drm_exec exec;
- int err;
+ int err = 0;
- drm_exec_init(&exec, 0, 0);
- drm_exec_until_all_locked(&exec) {
+ xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {}, err) {
err = xe_vm_lock_vma(&exec, vma);
drm_exec_retry_on_contention(&exec);
if (XE_WARN_ON(err))
break;
+ xe_vma_destroy(vma, NULL);
}
-
- xe_vma_destroy(vma, NULL);
-
- drm_exec_fini(&exec);
+ xe_assert(xe, !err);
}
struct xe_vma *
@@ -1512,14 +1263,39 @@ static u64 pte_encode_ps(u32 pt_level)
return 0;
}
-static u64 xelp_pde_encode_bo(struct xe_bo *bo, u64 bo_offset,
- const u16 pat_index)
+static u16 pde_pat_index(struct xe_bo *bo)
+{
+ struct xe_device *xe = xe_bo_device(bo);
+ u16 pat_index;
+
+ /*
+ * We only have two bits to encode the PAT index in non-leaf nodes, but
+ * these only point to other paging structures so we only need a minimal
+ * selection of options. The user PAT index is only for encoding leaf
+ * nodes, where we have use of more bits to do the encoding. The
+ * non-leaf nodes are instead under driver control so the chosen index
+ * here should be distict from the user PAT index. Also the
+ * corresponding coherency of the PAT index should be tied to the
+ * allocation type of the page table (or at least we should pick
+ * something which is always safe).
+ */
+ if (!xe_bo_is_vram(bo) && bo->ttm.ttm->caching == ttm_cached)
+ pat_index = xe->pat.idx[XE_CACHE_WB];
+ else
+ pat_index = xe->pat.idx[XE_CACHE_NONE];
+
+ xe_assert(xe, pat_index <= 3);
+
+ return pat_index;
+}
+
+static u64 xelp_pde_encode_bo(struct xe_bo *bo, u64 bo_offset)
{
u64 pde;
pde = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
pde |= XE_PAGE_PRESENT | XE_PAGE_RW;
- pde |= pde_encode_pat_index(pat_index);
+ pde |= pde_encode_pat_index(pde_pat_index(bo));
return pde;
}
@@ -1594,6 +1370,7 @@ static void vm_destroy_work_func(struct work_struct *w);
* @xe: xe device.
* @tile: tile to set up for.
* @vm: vm to set up for.
+ * @exec: The struct drm_exec object used to lock the vm resv.
*
* Sets up a pagetable tree with one page-table per level and a single
* leaf PTE. All pagetable entries point to the single page-table or,
@@ -1603,16 +1380,19 @@ static void vm_destroy_work_func(struct work_struct *w);
* Return: 0 on success, negative error code on error.
*/
static int xe_vm_create_scratch(struct xe_device *xe, struct xe_tile *tile,
- struct xe_vm *vm)
+ struct xe_vm *vm, struct drm_exec *exec)
{
u8 id = tile->id;
int i;
for (i = MAX_HUGEPTE_LEVEL; i < vm->pt_root[id]->level; i++) {
- vm->scratch_pt[id][i] = xe_pt_create(vm, tile, i);
- if (IS_ERR(vm->scratch_pt[id][i]))
- return PTR_ERR(vm->scratch_pt[id][i]);
+ vm->scratch_pt[id][i] = xe_pt_create(vm, tile, i, exec);
+ if (IS_ERR(vm->scratch_pt[id][i])) {
+ int err = PTR_ERR(vm->scratch_pt[id][i]);
+ vm->scratch_pt[id][i] = NULL;
+ return err;
+ }
xe_pt_populate_empty(tile, vm, vm->scratch_pt[id][i]);
}
@@ -1640,9 +1420,26 @@ static void xe_vm_free_scratch(struct xe_vm *vm)
}
}
-struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
+static void xe_vm_pt_destroy(struct xe_vm *vm)
+{
+ struct xe_tile *tile;
+ u8 id;
+
+ xe_vm_assert_held(vm);
+
+ for_each_tile(tile, vm->xe, id) {
+ if (vm->pt_root[id]) {
+ xe_pt_destroy(vm->pt_root[id], vm->flags, NULL);
+ vm->pt_root[id] = NULL;
+ }
+ }
+}
+
+struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags, struct xe_file *xef)
{
struct drm_gem_object *vm_resv_obj;
+ struct xe_validation_ctx ctx;
+ struct drm_exec exec;
struct xe_vm *vm;
int err, number_tiles = 0;
struct xe_tile *tile;
@@ -1661,9 +1458,10 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
vm->xe = xe;
vm->size = 1ull << xe->info.va_bits;
-
vm->flags = flags;
+ if (xef)
+ vm->xef = xe_file_get(xef);
/**
* GSC VMs are kernel-owned, only used for PXP ops and can sometimes be
* manipulated under the PXP mutex. However, the PXP mutex can be taken
@@ -1685,7 +1483,6 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
INIT_LIST_HEAD(&vm->userptr.repin_list);
INIT_LIST_HEAD(&vm->userptr.invalidated);
- init_rwsem(&vm->userptr.notifier_lock);
spin_lock_init(&vm->userptr.invalidated_lock);
ttm_lru_bulk_move_init(&vm->lru_bulk_move);
@@ -1709,13 +1506,12 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
if (flags & XE_VM_FLAG_LR_MODE) {
INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
xe_pm_runtime_get_noresume(xe);
+ INIT_LIST_HEAD(&vm->preempt.pm_activate_link);
}
- if (flags & XE_VM_FLAG_FAULT_MODE) {
- err = xe_svm_init(vm);
- if (err)
- goto err_no_resv;
- }
+ err = xe_svm_init(vm);
+ if (err)
+ goto err_no_resv;
vm_resv_obj = drm_gpuvm_resv_object_alloc(&xe->drm);
if (!vm_resv_obj) {
@@ -1728,49 +1524,68 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
drm_gem_object_put(vm_resv_obj);
- err = xe_vm_lock(vm, true);
- if (err)
- goto err_close;
-
- if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
- vm->flags |= XE_VM_FLAG_64K;
-
- for_each_tile(tile, xe, id) {
- if (flags & XE_VM_FLAG_MIGRATION &&
- tile->id != XE_VM_FLAG_TILE_ID(flags))
- continue;
+ err = 0;
+ xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.interruptible = true},
+ err) {
+ err = xe_vm_drm_exec_lock(vm, &exec);
+ drm_exec_retry_on_contention(&exec);
- vm->pt_root[id] = xe_pt_create(vm, tile, xe->info.vm_max_level);
- if (IS_ERR(vm->pt_root[id])) {
- err = PTR_ERR(vm->pt_root[id]);
- vm->pt_root[id] = NULL;
- goto err_unlock_close;
- }
- }
+ if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
+ vm->flags |= XE_VM_FLAG_64K;
- if (xe_vm_has_scratch(vm)) {
for_each_tile(tile, xe, id) {
- if (!vm->pt_root[id])
+ if (flags & XE_VM_FLAG_MIGRATION &&
+ tile->id != XE_VM_FLAG_TILE_ID(flags))
continue;
- err = xe_vm_create_scratch(xe, tile, vm);
+ vm->pt_root[id] = xe_pt_create(vm, tile, xe->info.vm_max_level,
+ &exec);
+ if (IS_ERR(vm->pt_root[id])) {
+ err = PTR_ERR(vm->pt_root[id]);
+ vm->pt_root[id] = NULL;
+ xe_vm_pt_destroy(vm);
+ drm_exec_retry_on_contention(&exec);
+ xe_validation_retry_on_oom(&ctx, &err);
+ break;
+ }
+ }
+ if (err)
+ break;
+
+ if (xe_vm_has_scratch(vm)) {
+ for_each_tile(tile, xe, id) {
+ if (!vm->pt_root[id])
+ continue;
+
+ err = xe_vm_create_scratch(xe, tile, vm, &exec);
+ if (err) {
+ xe_vm_free_scratch(vm);
+ xe_vm_pt_destroy(vm);
+ drm_exec_retry_on_contention(&exec);
+ xe_validation_retry_on_oom(&ctx, &err);
+ break;
+ }
+ }
if (err)
- goto err_unlock_close;
+ break;
+ vm->batch_invalidate_tlb = true;
}
- vm->batch_invalidate_tlb = true;
- }
- if (vm->flags & XE_VM_FLAG_LR_MODE)
- vm->batch_invalidate_tlb = false;
+ if (vm->flags & XE_VM_FLAG_LR_MODE) {
+ INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
+ vm->batch_invalidate_tlb = false;
+ }
- /* Fill pt_root after allocating scratch tables */
- for_each_tile(tile, xe, id) {
- if (!vm->pt_root[id])
- continue;
+ /* Fill pt_root after allocating scratch tables */
+ for_each_tile(tile, xe, id) {
+ if (!vm->pt_root[id])
+ continue;
- xe_pt_populate_empty(tile, vm, vm->pt_root[id]);
+ xe_pt_populate_empty(tile, vm, vm->pt_root[id]);
+ }
}
- xe_vm_unlock(vm);
+ if (err)
+ goto err_close;
/* Kernel migration VM shouldn't have a circular loop.. */
if (!(flags & XE_VM_FLAG_MIGRATION)) {
@@ -1794,12 +1609,24 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
if (number_tiles > 1)
vm->composite_fence_ctx = dma_fence_context_alloc(1);
+ if (xef && xe->info.has_asid) {
+ u32 asid;
+
+ down_write(&xe->usm.lock);
+ err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm,
+ XA_LIMIT(1, XE_MAX_ASID - 1),
+ &xe->usm.next_asid, GFP_KERNEL);
+ up_write(&xe->usm.lock);
+ if (err < 0)
+ goto err_close;
+
+ vm->usm.asid = asid;
+ }
+
trace_xe_vm_create(vm);
return vm;
-err_unlock_close:
- xe_vm_unlock(vm);
err_close:
xe_vm_close_and_put(vm);
return ERR_PTR(err);
@@ -1814,6 +1641,8 @@ err_no_resv:
for_each_tile(tile, xe, id)
xe_range_fence_tree_fini(&vm->rftree[id]);
ttm_lru_bulk_move_fini(&xe->ttm, &vm->lru_bulk_move);
+ if (vm->xef)
+ xe_file_put(vm->xef);
kfree(vm);
if (flags & XE_VM_FLAG_LR_MODE)
xe_pm_runtime_put(xe);
@@ -1850,7 +1679,7 @@ static void xe_vm_close(struct xe_vm *vm)
xe_pt_clear(xe, vm->pt_root[id]);
for_each_gt(gt, xe, id)
- xe_gt_tlb_invalidation_vm(gt, vm);
+ xe_tlb_inval_vm(&gt->tlb_inval, vm);
}
}
@@ -1874,8 +1703,12 @@ void xe_vm_close_and_put(struct xe_vm *vm)
xe_assert(xe, !vm->preempt.num_exec_queues);
xe_vm_close(vm);
- if (xe_vm_in_preempt_fence_mode(vm))
+ if (xe_vm_in_preempt_fence_mode(vm)) {
+ mutex_lock(&xe->rebind_resume_lock);
+ list_del_init(&vm->preempt.pm_activate_link);
+ mutex_unlock(&xe->rebind_resume_lock);
flush_work(&vm->preempt.rebind_work);
+ }
if (xe_vm_in_fault_mode(vm))
xe_svm_close(vm);
@@ -1900,9 +1733,9 @@ void xe_vm_close_and_put(struct xe_vm *vm)
vma = gpuva_to_vma(gpuva);
if (xe_vma_has_no_bo(vma)) {
- down_read(&vm->userptr.notifier_lock);
+ xe_svm_notifier_lock(vm);
vma->gpuva.flags |= XE_VMA_DESTROYED;
- up_read(&vm->userptr.notifier_lock);
+ xe_svm_notifier_unlock(vm);
}
xe_vm_remove_vma(vm, vma);
@@ -1926,13 +1759,7 @@ void xe_vm_close_and_put(struct xe_vm *vm)
* destroy the pagetables immediately.
*/
xe_vm_free_scratch(vm);
-
- for_each_tile(tile, xe, id) {
- if (vm->pt_root[id]) {
- xe_pt_destroy(vm->pt_root[id], vm->flags, NULL);
- vm->pt_root[id] = NULL;
- }
- }
+ xe_vm_pt_destroy(vm);
xe_vm_unlock(vm);
/*
@@ -1946,8 +1773,7 @@ void xe_vm_close_and_put(struct xe_vm *vm)
xe_vma_destroy_unlocked(vma);
}
- if (xe_vm_in_fault_mode(vm))
- xe_svm_fini(vm);
+ xe_svm_fini(vm);
up_write(&vm->lock);
@@ -2024,8 +1850,7 @@ struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id)
u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile)
{
- return vm->pt_ops->pde_encode_bo(vm->pt_root[tile->id]->bo, 0,
- tile_to_xe(tile)->pat.idx[XE_CACHE_WB]);
+ return vm->pt_ops->pde_encode_bo(vm->pt_root[tile->id]->bo, 0);
}
static struct xe_exec_queue *
@@ -2059,16 +1884,15 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
struct xe_device *xe = to_xe_device(dev);
struct xe_file *xef = to_xe_file(file);
struct drm_xe_vm_create *args = data;
- struct xe_tile *tile;
struct xe_vm *vm;
- u32 id, asid;
+ u32 id;
int err;
u32 flags = 0;
if (XE_IOCTL_DBG(xe, args->extensions))
return -EINVAL;
- if (XE_WA(xe_root_mmio_gt(xe), 14016763929))
+ if (XE_GT_WA(xe_root_mmio_gt(xe), 14016763929))
args->flags |= DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE;
if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
@@ -2097,29 +1921,10 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
if (args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
flags |= XE_VM_FLAG_FAULT_MODE;
- vm = xe_vm_create(xe, flags);
+ vm = xe_vm_create(xe, flags, xef);
if (IS_ERR(vm))
return PTR_ERR(vm);
- if (xe->info.has_asid) {
- down_write(&xe->usm.lock);
- err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm,
- XA_LIMIT(1, XE_MAX_ASID - 1),
- &xe->usm.next_asid, GFP_KERNEL);
- up_write(&xe->usm.lock);
- if (err < 0)
- goto err_close_and_put;
-
- vm->usm.asid = asid;
- }
-
- vm->xef = xe_file_get(xef);
-
- /* Record BO memory for VM pagetable created against client */
- for_each_tile(tile, xe, id)
- if (vm->pt_root[id])
- xe_drm_client_add_bo(vm->xef->client, vm->pt_root[id]->bo);
-
#if IS_ENABLED(CONFIG_DRM_XE_DEBUG_MEM)
/* Warning: Security issue - never enable by default */
args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, XE_PAGE_SIZE);
@@ -2169,6 +1974,110 @@ int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
return err;
}
+static int xe_vm_query_vmas(struct xe_vm *vm, u64 start, u64 end)
+{
+ struct drm_gpuva *gpuva;
+ u32 num_vmas = 0;
+
+ lockdep_assert_held(&vm->lock);
+ drm_gpuvm_for_each_va_range(gpuva, &vm->gpuvm, start, end)
+ num_vmas++;
+
+ return num_vmas;
+}
+
+static int get_mem_attrs(struct xe_vm *vm, u32 *num_vmas, u64 start,
+ u64 end, struct drm_xe_mem_range_attr *attrs)
+{
+ struct drm_gpuva *gpuva;
+ int i = 0;
+
+ lockdep_assert_held(&vm->lock);
+
+ drm_gpuvm_for_each_va_range(gpuva, &vm->gpuvm, start, end) {
+ struct xe_vma *vma = gpuva_to_vma(gpuva);
+
+ if (i == *num_vmas)
+ return -ENOSPC;
+
+ attrs[i].start = xe_vma_start(vma);
+ attrs[i].end = xe_vma_end(vma);
+ attrs[i].atomic.val = vma->attr.atomic_access;
+ attrs[i].pat_index.val = vma->attr.pat_index;
+ attrs[i].preferred_mem_loc.devmem_fd = vma->attr.preferred_loc.devmem_fd;
+ attrs[i].preferred_mem_loc.migration_policy =
+ vma->attr.preferred_loc.migration_policy;
+
+ i++;
+ }
+
+ *num_vmas = i;
+ return 0;
+}
+
+int xe_vm_query_vmas_attrs_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct xe_device *xe = to_xe_device(dev);
+ struct xe_file *xef = to_xe_file(file);
+ struct drm_xe_mem_range_attr *mem_attrs;
+ struct drm_xe_vm_query_mem_range_attr *args = data;
+ u64 __user *attrs_user = u64_to_user_ptr(args->vector_of_mem_attr);
+ struct xe_vm *vm;
+ int err = 0;
+
+ if (XE_IOCTL_DBG(xe,
+ ((args->num_mem_ranges == 0 &&
+ (attrs_user || args->sizeof_mem_range_attr != 0)) ||
+ (args->num_mem_ranges > 0 &&
+ (!attrs_user ||
+ args->sizeof_mem_range_attr !=
+ sizeof(struct drm_xe_mem_range_attr))))))
+ return -EINVAL;
+
+ vm = xe_vm_lookup(xef, args->vm_id);
+ if (XE_IOCTL_DBG(xe, !vm))
+ return -EINVAL;
+
+ err = down_read_interruptible(&vm->lock);
+ if (err)
+ goto put_vm;
+
+ attrs_user = u64_to_user_ptr(args->vector_of_mem_attr);
+
+ if (args->num_mem_ranges == 0 && !attrs_user) {
+ args->num_mem_ranges = xe_vm_query_vmas(vm, args->start, args->start + args->range);
+ args->sizeof_mem_range_attr = sizeof(struct drm_xe_mem_range_attr);
+ goto unlock_vm;
+ }
+
+ mem_attrs = kvmalloc_array(args->num_mem_ranges, args->sizeof_mem_range_attr,
+ GFP_KERNEL | __GFP_ACCOUNT |
+ __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
+ if (!mem_attrs) {
+ err = args->num_mem_ranges > 1 ? -ENOBUFS : -ENOMEM;
+ goto unlock_vm;
+ }
+
+ memset(mem_attrs, 0, args->num_mem_ranges * args->sizeof_mem_range_attr);
+ err = get_mem_attrs(vm, &args->num_mem_ranges, args->start,
+ args->start + args->range, mem_attrs);
+ if (err)
+ goto free_mem_attrs;
+
+ err = copy_to_user(attrs_user, mem_attrs,
+ args->sizeof_mem_range_attr * args->num_mem_ranges);
+ if (err)
+ err = -EFAULT;
+
+free_mem_attrs:
+ kvfree(mem_attrs);
+unlock_vm:
+ up_read(&vm->lock);
+put_vm:
+ xe_vm_put(vm);
+ return err;
+}
+
static bool vma_matches(struct xe_vma *vma, u64 page_addr)
{
if (page_addr > xe_vma_end(vma) - 1 ||
@@ -2207,9 +2116,9 @@ static const u32 region_to_mem_type[] = {
static void prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma,
bool post_commit)
{
- down_read(&vm->userptr.notifier_lock);
+ xe_svm_notifier_lock(vm);
vma->gpuva.flags |= XE_VMA_DESTROYED;
- up_read(&vm->userptr.notifier_lock);
+ xe_svm_notifier_unlock(vm);
if (post_commit)
xe_vm_remove_vma(vm, vma);
}
@@ -2316,10 +2225,17 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops,
switch (operation) {
case DRM_XE_VM_BIND_OP_MAP:
- case DRM_XE_VM_BIND_OP_MAP_USERPTR:
- ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, addr, range,
- obj, bo_offset_or_userptr);
+ case DRM_XE_VM_BIND_OP_MAP_USERPTR: {
+ struct drm_gpuvm_map_req map_req = {
+ .map.va.addr = addr,
+ .map.va.range = range,
+ .map.gem.obj = obj,
+ .map.gem.offset = bo_offset_or_userptr,
+ };
+
+ ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, &map_req);
break;
+ }
case DRM_XE_VM_BIND_OP_UNMAP:
ops = drm_gpuvm_sm_unmap_ops_create(&vm->gpuvm, addr, range);
break;
@@ -2367,9 +2283,10 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops,
__xe_vm_needs_clear_scratch_pages(vm, flags);
} else if (__op->op == DRM_GPUVA_OP_PREFETCH) {
struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
+ struct xe_tile *tile;
struct xe_svm_range *svm_range;
struct drm_gpusvm_ctx ctx = {};
- struct xe_tile *tile;
+ struct drm_pagemap *dpagemap;
u8 id, tile_mask = 0;
u32 i;
@@ -2386,8 +2303,24 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops,
tile_mask |= 0x1 << id;
xa_init_flags(&op->prefetch_range.range, XA_FLAGS_ALLOC);
- op->prefetch_range.region = prefetch_region;
op->prefetch_range.ranges_count = 0;
+ tile = NULL;
+
+ if (prefetch_region == DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC) {
+ dpagemap = xe_vma_resolve_pagemap(vma,
+ xe_device_get_root_tile(vm->xe));
+ /*
+ * TODO: Once multigpu support is enabled will need
+ * something to dereference tile from dpagemap.
+ */
+ if (dpagemap)
+ tile = xe_device_get_root_tile(vm->xe);
+ } else if (prefetch_region) {
+ tile = &vm->xe->tiles[region_to_mem_type[prefetch_region] -
+ XE_PL_VRAM0];
+ }
+
+ op->prefetch_range.tile = tile;
alloc_next_range:
svm_range = xe_svm_range_find_or_insert(vm, addr, vma, &ctx);
@@ -2406,7 +2339,7 @@ alloc_next_range:
goto unwind_prefetch_ops;
}
- if (xe_svm_range_validate(vm, svm_range, tile_mask, !!prefetch_region)) {
+ if (xe_svm_range_validate(vm, svm_range, tile_mask, !!tile)) {
xe_svm_range_debug(svm_range, "PREFETCH - RANGE IS VALID");
goto check_next_range;
}
@@ -2443,9 +2376,10 @@ unwind_prefetch_ops:
ALLOW_ERROR_INJECTION(vm_bind_ioctl_ops_create, ERRNO);
static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
- u16 pat_index, unsigned int flags)
+ struct xe_vma_mem_attr *attr, unsigned int flags)
{
struct xe_bo *bo = op->gem.obj ? gem_to_xe_bo(op->gem.obj) : NULL;
+ struct xe_validation_ctx ctx;
struct drm_exec exec;
struct xe_vma *vma;
int err = 0;
@@ -2453,9 +2387,9 @@ static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
lockdep_assert_held_write(&vm->lock);
if (bo) {
- drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
- drm_exec_until_all_locked(&exec) {
- err = 0;
+ err = 0;
+ xe_validation_guard(&ctx, &vm->xe->val, &exec,
+ (struct xe_val_flags) {.interruptible = true}, err) {
if (!bo->vm) {
err = drm_exec_lock_obj(&exec, xe_vm_obj(vm));
drm_exec_retry_on_contention(&exec);
@@ -2464,27 +2398,35 @@ static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
err = drm_exec_lock_obj(&exec, &bo->ttm.base);
drm_exec_retry_on_contention(&exec);
}
- if (err) {
- drm_exec_fini(&exec);
+ if (err)
return ERR_PTR(err);
+
+ vma = xe_vma_create(vm, bo, op->gem.offset,
+ op->va.addr, op->va.addr +
+ op->va.range - 1, attr, flags);
+ if (IS_ERR(vma))
+ return vma;
+
+ if (!bo->vm) {
+ err = add_preempt_fences(vm, bo);
+ if (err) {
+ prep_vma_destroy(vm, vma, false);
+ xe_vma_destroy(vma, NULL);
+ }
}
}
+ if (err)
+ return ERR_PTR(err);
+ } else {
+ vma = xe_vma_create(vm, NULL, op->gem.offset,
+ op->va.addr, op->va.addr +
+ op->va.range - 1, attr, flags);
+ if (IS_ERR(vma))
+ return vma;
+
+ if (xe_vma_is_userptr(vma))
+ err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
}
- vma = xe_vma_create(vm, bo, op->gem.offset,
- op->va.addr, op->va.addr +
- op->va.range - 1, pat_index, flags);
- if (IS_ERR(vma))
- goto err_unlock;
-
- if (xe_vma_is_userptr(vma))
- err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
- else if (!xe_vma_has_no_bo(vma) && !bo->vm)
- err = add_preempt_fences(vm, bo);
-
-err_unlock:
- if (bo)
- drm_exec_fini(&exec);
-
if (err) {
prep_vma_destroy(vm, vma, false);
xe_vma_destroy_unlocked(vma);
@@ -2589,6 +2531,29 @@ static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
return err;
}
+/**
+ * xe_vma_has_default_mem_attrs - Check if a VMA has default memory attributes
+ * @vma: Pointer to the xe_vma structure to check
+ *
+ * This function determines whether the given VMA (Virtual Memory Area)
+ * has its memory attributes set to their default values. Specifically,
+ * it checks the following conditions:
+ *
+ * - `atomic_access` is `DRM_XE_VMA_ATOMIC_UNDEFINED`
+ * - `pat_index` is equal to `default_pat_index`
+ * - `preferred_loc.devmem_fd` is `DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE`
+ * - `preferred_loc.migration_policy` is `DRM_XE_MIGRATE_ALL_PAGES`
+ *
+ * Return: true if all attributes are at their default values, false otherwise.
+ */
+bool xe_vma_has_default_mem_attrs(struct xe_vma *vma)
+{
+ return (vma->attr.atomic_access == DRM_XE_ATOMIC_UNDEFINED &&
+ vma->attr.pat_index == vma->attr.default_pat_index &&
+ vma->attr.preferred_loc.devmem_fd == DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE &&
+ vma->attr.preferred_loc.migration_policy == DRM_XE_MIGRATE_ALL_PAGES);
+}
+
static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
struct xe_vma_ops *vops)
{
@@ -2615,6 +2580,16 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
switch (op->base.op) {
case DRM_GPUVA_OP_MAP:
{
+ struct xe_vma_mem_attr default_attr = {
+ .preferred_loc = {
+ .devmem_fd = DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE,
+ .migration_policy = DRM_XE_MIGRATE_ALL_PAGES,
+ },
+ .atomic_access = DRM_XE_ATOMIC_UNDEFINED,
+ .default_pat_index = op->map.pat_index,
+ .pat_index = op->map.pat_index,
+ };
+
flags |= op->map.read_only ?
VMA_CREATE_FLAG_READ_ONLY : 0;
flags |= op->map.is_null ?
@@ -2624,7 +2599,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
flags |= op->map.is_cpu_addr_mirror ?
VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR : 0;
- vma = new_vma(vm, &op->base.map, op->map.pat_index,
+ vma = new_vma(vm, &op->base.map, &default_attr,
flags);
if (IS_ERR(vma))
return PTR_ERR(vma);
@@ -2652,8 +2627,12 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
end = op->base.remap.next->va.addr;
if (xe_vma_is_cpu_addr_mirror(old) &&
- xe_svm_has_mapping(vm, start, end))
- return -EBUSY;
+ xe_svm_has_mapping(vm, start, end)) {
+ if (vops->flags & XE_VMA_OPS_FLAG_MADVISE)
+ xe_svm_unmap_address_range(vm, start, end);
+ else
+ return -EBUSY;
+ }
op->remap.start = xe_vma_start(old);
op->remap.range = xe_vma_size(old);
@@ -2672,7 +2651,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
if (op->base.remap.prev) {
vma = new_vma(vm, op->base.remap.prev,
- old->pat_index, flags);
+ &old->attr, flags);
if (IS_ERR(vma))
return PTR_ERR(vma);
@@ -2702,7 +2681,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
if (op->base.remap.next) {
vma = new_vma(vm, op->base.remap.next,
- old->pat_index, flags);
+ &old->attr, flags);
if (IS_ERR(vma))
return PTR_ERR(vma);
@@ -2791,9 +2770,9 @@ static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op,
struct xe_vma *vma = gpuva_to_vma(op->base.unmap.va);
if (vma) {
- down_read(&vm->userptr.notifier_lock);
+ xe_svm_notifier_lock(vm);
vma->gpuva.flags &= ~XE_VMA_DESTROYED;
- up_read(&vm->userptr.notifier_lock);
+ xe_svm_notifier_unlock(vm);
if (post_commit)
xe_vm_insert_vma(vm, vma);
}
@@ -2812,9 +2791,9 @@ static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op,
xe_vma_destroy_unlocked(op->remap.next);
}
if (vma) {
- down_read(&vm->userptr.notifier_lock);
+ xe_svm_notifier_lock(vm);
vma->gpuva.flags &= ~XE_VMA_DESTROYED;
- up_read(&vm->userptr.notifier_lock);
+ xe_svm_notifier_unlock(vm);
if (post_commit)
xe_vm_insert_vma(vm, vma);
}
@@ -2864,7 +2843,7 @@ static int vma_lock_and_validate(struct drm_exec *exec, struct xe_vma *vma,
err = drm_exec_lock_obj(exec, &bo->ttm.base);
if (!err && validate)
err = xe_bo_validate(bo, vm,
- !xe_vm_in_preempt_fence_mode(vm));
+ !xe_vm_in_preempt_fence_mode(vm), exec);
}
return err;
@@ -2889,30 +2868,27 @@ static int prefetch_ranges(struct xe_vm *vm, struct xe_vma_op *op)
{
bool devmem_possible = IS_DGFX(vm->xe) && IS_ENABLED(CONFIG_DRM_XE_PAGEMAP);
struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
+ struct xe_tile *tile = op->prefetch_range.tile;
int err = 0;
struct xe_svm_range *svm_range;
struct drm_gpusvm_ctx ctx = {};
- struct xe_tile *tile;
unsigned long i;
- u32 region;
if (!xe_vma_is_cpu_addr_mirror(vma))
return 0;
- region = op->prefetch_range.region;
-
ctx.read_only = xe_vma_read_only(vma);
ctx.devmem_possible = devmem_possible;
ctx.check_pages_threshold = devmem_possible ? SZ_64K : 0;
+ ctx.device_private_page_owner = xe_svm_devm_owner(vm->xe);
/* TODO: Threading the migration */
xa_for_each(&op->prefetch_range.range, i, svm_range) {
- if (!region)
+ if (!tile)
xe_svm_range_migrate_to_smem(vm, svm_range);
- if (xe_svm_range_needs_migrate_to_vram(svm_range, vma, region)) {
- tile = &vm->xe->tiles[region_to_mem_type[region] - XE_PL_VRAM0];
+ if (xe_svm_range_needs_migrate_to_vram(svm_range, vma, !!tile)) {
err = xe_svm_alloc_vram(tile, svm_range, &ctx);
if (err) {
drm_dbg(&vm->xe->drm, "VRAM allocation failed, retry from userspace, asid=%u, gpusvm=%p, errno=%pe\n",
@@ -2975,19 +2951,20 @@ static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
u32 region;
- if (xe_vma_is_cpu_addr_mirror(vma))
- region = op->prefetch_range.region;
- else
+ if (!xe_vma_is_cpu_addr_mirror(vma)) {
region = op->prefetch.region;
-
- xe_assert(vm->xe, region <= ARRAY_SIZE(region_to_mem_type));
+ xe_assert(vm->xe, region == DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC ||
+ region <= ARRAY_SIZE(region_to_mem_type));
+ }
err = vma_lock_and_validate(exec,
gpuva_to_vma(op->base.prefetch.va),
false);
if (!err && !xe_vma_has_no_bo(vma))
err = xe_bo_migrate(xe_vma_bo(vma),
- region_to_mem_type[region]);
+ region_to_mem_type[region],
+ NULL,
+ exec);
break;
}
default:
@@ -3250,35 +3227,37 @@ static void vm_bind_ioctl_ops_fini(struct xe_vm *vm, struct xe_vma_ops *vops,
static struct dma_fence *vm_bind_ioctl_ops_execute(struct xe_vm *vm,
struct xe_vma_ops *vops)
{
+ struct xe_validation_ctx ctx;
struct drm_exec exec;
struct dma_fence *fence;
- int err;
+ int err = 0;
lockdep_assert_held_write(&vm->lock);
- drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
- DRM_EXEC_IGNORE_DUPLICATES, 0);
- drm_exec_until_all_locked(&exec) {
+ xe_validation_guard(&ctx, &vm->xe->val, &exec,
+ ((struct xe_val_flags) {
+ .interruptible = true,
+ .exec_ignore_duplicates = true,
+ }), err) {
err = vm_bind_ioctl_ops_lock_and_prep(&exec, vm, vops);
drm_exec_retry_on_contention(&exec);
- if (err) {
- fence = ERR_PTR(err);
- goto unlock;
- }
+ xe_validation_retry_on_oom(&ctx, &err);
+ if (err)
+ return ERR_PTR(err);
+ xe_vm_set_validation_exec(vm, &exec);
fence = ops_execute(vm, vops);
+ xe_vm_set_validation_exec(vm, NULL);
if (IS_ERR(fence)) {
if (PTR_ERR(fence) == -ENODATA)
vm_bind_ioctl_ops_fini(vm, vops, NULL);
- goto unlock;
+ return fence;
}
vm_bind_ioctl_ops_fini(vm, vops, fence);
}
-unlock:
- drm_exec_fini(&exec);
- return fence;
+ return err ? ERR_PTR(err) : fence;
}
ALLOW_ERROR_INJECTION(vm_bind_ioctl_ops_execute, ERRNO);
@@ -3394,12 +3373,14 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe, struct xe_vm *vm,
op == DRM_XE_VM_BIND_OP_MAP_USERPTR) ||
XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE &&
op == DRM_XE_VM_BIND_OP_MAP_USERPTR) ||
+ XE_IOCTL_DBG(xe, op == DRM_XE_VM_BIND_OP_MAP_USERPTR &&
+ !IS_ENABLED(CONFIG_DRM_GPUSVM)) ||
XE_IOCTL_DBG(xe, obj &&
op == DRM_XE_VM_BIND_OP_PREFETCH) ||
XE_IOCTL_DBG(xe, prefetch_region &&
op != DRM_XE_VM_BIND_OP_PREFETCH) ||
- XE_IOCTL_DBG(xe, !(BIT(prefetch_region) &
- xe->info.mem_region_mask)) ||
+ XE_IOCTL_DBG(xe, (prefetch_region != DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC &&
+ !(BIT(prefetch_region) & xe->info.mem_region_mask))) ||
XE_IOCTL_DBG(xe, obj &&
op == DRM_XE_VM_BIND_OP_UNMAP)) {
err = -EINVAL;
@@ -3421,6 +3402,7 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe, struct xe_vm *vm,
free_bind_ops:
if (args->num_binds > 1)
kvfree(*bind_ops);
+ *bind_ops = NULL;
return err;
}
@@ -3527,7 +3509,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
struct xe_exec_queue *q = NULL;
u32 num_syncs, num_ufence = 0;
struct xe_sync_entry *syncs = NULL;
- struct drm_xe_vm_bind_op *bind_ops;
+ struct drm_xe_vm_bind_op *bind_ops = NULL;
struct xe_vma_ops vops;
struct dma_fence *fence;
int err;
@@ -3545,7 +3527,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
q = xe_exec_queue_lookup(xef, args->exec_queue_id);
if (XE_IOCTL_DBG(xe, !q)) {
err = -ENOENT;
- goto put_vm;
+ goto free_bind_ops;
}
if (XE_IOCTL_DBG(xe, !(q->flags & EXEC_QUEUE_FLAG_VM))) {
@@ -3591,7 +3573,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
__GFP_RETRY_MAYFAIL | __GFP_NOWARN);
if (!ops) {
err = -ENOMEM;
- goto release_vm_lock;
+ goto free_bos;
}
}
@@ -3725,17 +3707,20 @@ free_syncs:
put_obj:
for (i = 0; i < args->num_binds; ++i)
xe_bo_put(bos[i]);
+
+ kvfree(ops);
+free_bos:
+ kvfree(bos);
release_vm_lock:
up_write(&vm->lock);
put_exec_queue:
if (q)
xe_exec_queue_put(q);
-put_vm:
- xe_vm_put(vm);
- kvfree(bos);
- kvfree(ops);
+free_bind_ops:
if (args->num_binds > 1)
kvfree(bind_ops);
+put_vm:
+ xe_vm_put(vm);
return err;
}
@@ -3825,10 +3810,14 @@ release_vm_lock:
*/
int xe_vm_lock(struct xe_vm *vm, bool intr)
{
+ int ret;
+
if (intr)
- return dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
+ ret = dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
+ else
+ ret = dma_resv_lock(xe_vm_resv(vm), NULL);
- return dma_resv_lock(xe_vm_resv(vm), NULL);
+ return ret;
}
/**
@@ -3843,7 +3832,7 @@ void xe_vm_unlock(struct xe_vm *vm)
}
/**
- * xe_vm_range_tilemask_tlb_invalidation - Issue a TLB invalidation on this tilemask for an
+ * xe_vm_range_tilemask_tlb_inval - Issue a TLB invalidation on this tilemask for an
* address range
* @vm: The VM
* @start: start address
@@ -3854,10 +3843,11 @@ void xe_vm_unlock(struct xe_vm *vm)
*
* Returns 0 for success, negative error code otherwise.
*/
-int xe_vm_range_tilemask_tlb_invalidation(struct xe_vm *vm, u64 start,
- u64 end, u8 tile_mask)
+int xe_vm_range_tilemask_tlb_inval(struct xe_vm *vm, u64 start,
+ u64 end, u8 tile_mask)
{
- struct xe_gt_tlb_invalidation_fence fence[XE_MAX_TILES_PER_DEVICE * XE_MAX_GT_PER_TILE];
+ struct xe_tlb_inval_fence
+ fence[XE_MAX_TILES_PER_DEVICE * XE_MAX_GT_PER_TILE];
struct xe_tile *tile;
u32 fence_id = 0;
u8 id;
@@ -3867,39 +3857,36 @@ int xe_vm_range_tilemask_tlb_invalidation(struct xe_vm *vm, u64 start,
return 0;
for_each_tile(tile, vm->xe, id) {
- if (tile_mask & BIT(id)) {
- xe_gt_tlb_invalidation_fence_init(tile->primary_gt,
- &fence[fence_id], true);
-
- err = xe_gt_tlb_invalidation_range(tile->primary_gt,
- &fence[fence_id],
- start,
- end,
- vm->usm.asid);
- if (err)
- goto wait;
- ++fence_id;
+ if (!(tile_mask & BIT(id)))
+ continue;
- if (!tile->media_gt)
- continue;
+ xe_tlb_inval_fence_init(&tile->primary_gt->tlb_inval,
+ &fence[fence_id], true);
- xe_gt_tlb_invalidation_fence_init(tile->media_gt,
- &fence[fence_id], true);
+ err = xe_tlb_inval_range(&tile->primary_gt->tlb_inval,
+ &fence[fence_id], start, end,
+ vm->usm.asid);
+ if (err)
+ goto wait;
+ ++fence_id;
- err = xe_gt_tlb_invalidation_range(tile->media_gt,
- &fence[fence_id],
- start,
- end,
- vm->usm.asid);
- if (err)
- goto wait;
- ++fence_id;
- }
+ if (!tile->media_gt)
+ continue;
+
+ xe_tlb_inval_fence_init(&tile->media_gt->tlb_inval,
+ &fence[fence_id], true);
+
+ err = xe_tlb_inval_range(&tile->media_gt->tlb_inval,
+ &fence[fence_id], start, end,
+ vm->usm.asid);
+ if (err)
+ goto wait;
+ ++fence_id;
}
wait:
for (id = 0; id < fence_id; ++id)
- xe_gt_tlb_invalidation_fence_wait(&fence[id]);
+ xe_tlb_inval_fence_wait(&fence[id]);
return err;
}
@@ -3937,13 +3924,13 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
*/
if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
if (xe_vma_is_userptr(vma)) {
- lockdep_assert(lockdep_is_held_type(&vm->userptr.notifier_lock, 0) ||
- (lockdep_is_held_type(&vm->userptr.notifier_lock, 1) &&
+ lockdep_assert(lockdep_is_held_type(&vm->svm.gpusvm.notifier_lock, 0) ||
+ (lockdep_is_held_type(&vm->svm.gpusvm.notifier_lock, 1) &&
lockdep_is_held(&xe_vm_resv(vm)->lock.base)));
WARN_ON_ONCE(!mmu_interval_check_retry
(&to_userptr_vma(vma)->userptr.notifier,
- to_userptr_vma(vma)->userptr.notifier_seq));
+ to_userptr_vma(vma)->userptr.pages.notifier_seq));
WARN_ON_ONCE(!dma_resv_test_signaled(xe_vm_resv(vm),
DMA_RESV_USAGE_BOOKKEEP));
@@ -3958,8 +3945,8 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
xe_device_wmb(xe);
- ret = xe_vm_range_tilemask_tlb_invalidation(xe_vma_vm(vma), xe_vma_start(vma),
- xe_vma_end(vma), tile_mask);
+ ret = xe_vm_range_tilemask_tlb_inval(xe_vma_vm(vma), xe_vma_start(vma),
+ xe_vma_end(vma), tile_mask);
/* WRITE_ONCE pairs with READ_ONCE in xe_vm_has_valid_gpu_mapping() */
WRITE_ONCE(vma->tile_invalidated, vma->tile_mask);
@@ -4161,3 +4148,223 @@ void xe_vm_snapshot_free(struct xe_vm_snapshot *snap)
}
kvfree(snap);
}
+
+/**
+ * xe_vma_need_vram_for_atomic - Check if VMA needs VRAM migration for atomic operations
+ * @xe: Pointer to the XE device structure
+ * @vma: Pointer to the virtual memory area (VMA) structure
+ * @is_atomic: In pagefault path and atomic operation
+ *
+ * This function determines whether the given VMA needs to be migrated to
+ * VRAM in order to do atomic GPU operation.
+ *
+ * Return:
+ * 1 - Migration to VRAM is required
+ * 0 - Migration is not required
+ * -EACCES - Invalid access for atomic memory attr
+ *
+ */
+int xe_vma_need_vram_for_atomic(struct xe_device *xe, struct xe_vma *vma, bool is_atomic)
+{
+ u32 atomic_access = xe_vma_bo(vma) ? xe_vma_bo(vma)->attr.atomic_access :
+ vma->attr.atomic_access;
+
+ if (!IS_DGFX(xe) || !is_atomic)
+ return false;
+
+ /*
+ * NOTE: The checks implemented here are platform-specific. For
+ * instance, on a device supporting CXL atomics, these would ideally
+ * work universally without additional handling.
+ */
+ switch (atomic_access) {
+ case DRM_XE_ATOMIC_DEVICE:
+ return !xe->info.has_device_atomics_on_smem;
+
+ case DRM_XE_ATOMIC_CPU:
+ return -EACCES;
+
+ case DRM_XE_ATOMIC_UNDEFINED:
+ case DRM_XE_ATOMIC_GLOBAL:
+ default:
+ return 1;
+ }
+}
+
+static int xe_vm_alloc_vma(struct xe_vm *vm,
+ struct drm_gpuvm_map_req *map_req,
+ bool is_madvise)
+{
+ struct xe_vma_ops vops;
+ struct drm_gpuva_ops *ops = NULL;
+ struct drm_gpuva_op *__op;
+ bool is_cpu_addr_mirror = false;
+ bool remap_op = false;
+ struct xe_vma_mem_attr tmp_attr;
+ u16 default_pat;
+ int err;
+
+ lockdep_assert_held_write(&vm->lock);
+
+ if (is_madvise)
+ ops = drm_gpuvm_madvise_ops_create(&vm->gpuvm, map_req);
+ else
+ ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, map_req);
+
+ if (IS_ERR(ops))
+ return PTR_ERR(ops);
+
+ if (list_empty(&ops->list)) {
+ err = 0;
+ goto free_ops;
+ }
+
+ drm_gpuva_for_each_op(__op, ops) {
+ struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
+ struct xe_vma *vma = NULL;
+
+ if (!is_madvise) {
+ if (__op->op == DRM_GPUVA_OP_UNMAP) {
+ vma = gpuva_to_vma(op->base.unmap.va);
+ XE_WARN_ON(!xe_vma_has_default_mem_attrs(vma));
+ default_pat = vma->attr.default_pat_index;
+ }
+
+ if (__op->op == DRM_GPUVA_OP_REMAP) {
+ vma = gpuva_to_vma(op->base.remap.unmap->va);
+ default_pat = vma->attr.default_pat_index;
+ }
+
+ if (__op->op == DRM_GPUVA_OP_MAP) {
+ op->map.is_cpu_addr_mirror = true;
+ op->map.pat_index = default_pat;
+ }
+ } else {
+ if (__op->op == DRM_GPUVA_OP_REMAP) {
+ vma = gpuva_to_vma(op->base.remap.unmap->va);
+ xe_assert(vm->xe, !remap_op);
+ xe_assert(vm->xe, xe_vma_has_no_bo(vma));
+ remap_op = true;
+
+ if (xe_vma_is_cpu_addr_mirror(vma))
+ is_cpu_addr_mirror = true;
+ else
+ is_cpu_addr_mirror = false;
+ }
+
+ if (__op->op == DRM_GPUVA_OP_MAP) {
+ xe_assert(vm->xe, remap_op);
+ remap_op = false;
+ /*
+ * In case of madvise ops DRM_GPUVA_OP_MAP is
+ * always after DRM_GPUVA_OP_REMAP, so ensure
+ * we assign op->map.is_cpu_addr_mirror true
+ * if REMAP is for xe_vma_is_cpu_addr_mirror vma
+ */
+ op->map.is_cpu_addr_mirror = is_cpu_addr_mirror;
+ }
+ }
+ print_op(vm->xe, __op);
+ }
+
+ xe_vma_ops_init(&vops, vm, NULL, NULL, 0);
+
+ if (is_madvise)
+ vops.flags |= XE_VMA_OPS_FLAG_MADVISE;
+
+ err = vm_bind_ioctl_ops_parse(vm, ops, &vops);
+ if (err)
+ goto unwind_ops;
+
+ xe_vm_lock(vm, false);
+
+ drm_gpuva_for_each_op(__op, ops) {
+ struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
+ struct xe_vma *vma;
+
+ if (__op->op == DRM_GPUVA_OP_UNMAP) {
+ vma = gpuva_to_vma(op->base.unmap.va);
+ /* There should be no unmap for madvise */
+ if (is_madvise)
+ XE_WARN_ON("UNEXPECTED UNMAP");
+
+ xe_vma_destroy(vma, NULL);
+ } else if (__op->op == DRM_GPUVA_OP_REMAP) {
+ vma = gpuva_to_vma(op->base.remap.unmap->va);
+ /* In case of madvise ops Store attributes for REMAP UNMAPPED
+ * VMA, so they can be assigned to newly MAP created vma.
+ */
+ if (is_madvise)
+ tmp_attr = vma->attr;
+
+ xe_vma_destroy(gpuva_to_vma(op->base.remap.unmap->va), NULL);
+ } else if (__op->op == DRM_GPUVA_OP_MAP) {
+ vma = op->map.vma;
+ /* In case of madvise call, MAP will always be follwed by REMAP.
+ * Therefore temp_attr will always have sane values, making it safe to
+ * copy them to new vma.
+ */
+ if (is_madvise)
+ vma->attr = tmp_attr;
+ }
+ }
+
+ xe_vm_unlock(vm);
+ drm_gpuva_ops_free(&vm->gpuvm, ops);
+ return 0;
+
+unwind_ops:
+ vm_bind_ioctl_ops_unwind(vm, &ops, 1);
+free_ops:
+ drm_gpuva_ops_free(&vm->gpuvm, ops);
+ return err;
+}
+
+/**
+ * xe_vm_alloc_madvise_vma - Allocate VMA's with madvise ops
+ * @vm: Pointer to the xe_vm structure
+ * @start: Starting input address
+ * @range: Size of the input range
+ *
+ * This function splits existing vma to create new vma for user provided input range
+ *
+ * Return: 0 if success
+ */
+int xe_vm_alloc_madvise_vma(struct xe_vm *vm, uint64_t start, uint64_t range)
+{
+ struct drm_gpuvm_map_req map_req = {
+ .map.va.addr = start,
+ .map.va.range = range,
+ };
+
+ lockdep_assert_held_write(&vm->lock);
+
+ vm_dbg(&vm->xe->drm, "MADVISE_OPS_CREATE: addr=0x%016llx, size=0x%016llx", start, range);
+
+ return xe_vm_alloc_vma(vm, &map_req, true);
+}
+
+/**
+ * xe_vm_alloc_cpu_addr_mirror_vma - Allocate CPU addr mirror vma
+ * @vm: Pointer to the xe_vm structure
+ * @start: Starting input address
+ * @range: Size of the input range
+ *
+ * This function splits/merges existing vma to create new vma for user provided input range
+ *
+ * Return: 0 if success
+ */
+int xe_vm_alloc_cpu_addr_mirror_vma(struct xe_vm *vm, uint64_t start, uint64_t range)
+{
+ struct drm_gpuvm_map_req map_req = {
+ .map.va.addr = start,
+ .map.va.range = range,
+ };
+
+ lockdep_assert_held_write(&vm->lock);
+
+ vm_dbg(&vm->xe->drm, "CPU_ADDR_MIRROR_VMA_OPS_CREATE: addr=0x%016llx, size=0x%016llx",
+ start, range);
+
+ return xe_vm_alloc_vma(vm, &map_req, false);
+}
diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
index 3475a118f666..ef8a5019574e 100644
--- a/drivers/gpu/drm/xe/xe_vm.h
+++ b/drivers/gpu/drm/xe/xe_vm.h
@@ -26,7 +26,7 @@ struct xe_sync_entry;
struct xe_svm_range;
struct drm_exec;
-struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags);
+struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags, struct xe_file *xef);
struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id);
int xe_vma_cmp_vma_cb(const void *key, const struct rb_node *node);
@@ -66,6 +66,8 @@ static inline bool xe_vm_is_closed_or_banned(struct xe_vm *vm)
struct xe_vma *
xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range);
+bool xe_vma_has_default_mem_attrs(struct xe_vma *vma);
+
/**
* xe_vm_has_scratch() - Whether the vm is configured for scratch PTEs
* @vm: The vm
@@ -171,6 +173,12 @@ static inline bool xe_vma_is_userptr(struct xe_vma *vma)
struct xe_vma *xe_vm_find_vma_by_addr(struct xe_vm *vm, u64 page_addr);
+int xe_vma_need_vram_for_atomic(struct xe_device *xe, struct xe_vma *vma, bool is_atomic);
+
+int xe_vm_alloc_madvise_vma(struct xe_vm *vm, uint64_t addr, uint64_t size);
+
+int xe_vm_alloc_cpu_addr_mirror_vma(struct xe_vm *vm, uint64_t addr, uint64_t size);
+
/**
* to_userptr_vma() - Return a pointer to an embedding userptr vma
* @vma: Pointer to the embedded struct xe_vma
@@ -191,7 +199,7 @@ int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int xe_vm_bind_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
-
+int xe_vm_query_vmas_attrs_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
void xe_vm_close_and_put(struct xe_vm *vm);
static inline bool xe_vm_in_fault_mode(struct xe_vm *vm)
@@ -212,12 +220,6 @@ static inline bool xe_vm_in_preempt_fence_mode(struct xe_vm *vm)
int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q);
void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q);
-int xe_vm_userptr_pin(struct xe_vm *vm);
-
-int __xe_vm_userptr_needs_repin(struct xe_vm *vm);
-
-int xe_vm_userptr_check_repin(struct xe_vm *vm);
-
int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker);
struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma,
u8 tile_mask);
@@ -228,8 +230,8 @@ struct dma_fence *xe_vm_range_rebind(struct xe_vm *vm,
struct dma_fence *xe_vm_range_unbind(struct xe_vm *vm,
struct xe_svm_range *range);
-int xe_vm_range_tilemask_tlb_invalidation(struct xe_vm *vm, u64 start,
- u64 end, u8 tile_mask);
+int xe_vm_range_tilemask_tlb_inval(struct xe_vm *vm, u64 start,
+ u64 end, u8 tile_mask);
int xe_vm_invalidate_vma(struct xe_vma *vma);
@@ -258,12 +260,6 @@ static inline void xe_vm_reactivate_rebind(struct xe_vm *vm)
}
}
-int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma);
-
-int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma);
-
-bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end);
-
int xe_vm_lock_vma(struct drm_exec *exec, struct xe_vma *vma);
int xe_vm_validate_rebind(struct xe_vm *vm, struct drm_exec *exec,
@@ -273,6 +269,8 @@ struct dma_fence *xe_vm_bind_kernel_bo(struct xe_vm *vm, struct xe_bo *bo,
struct xe_exec_queue *q, u64 addr,
enum xe_cache_level cache_lvl);
+void xe_vm_resume_rebind_worker(struct xe_vm *vm);
+
/**
* xe_vm_resv() - Return's the vm's reservation object
* @vm: The vm
@@ -292,6 +290,8 @@ void xe_vm_kill(struct xe_vm *vm, bool unlocked);
*/
#define xe_vm_assert_held(vm) dma_resv_assert_held(xe_vm_resv(vm))
+int xe_vm_drm_exec_lock(struct xe_vm *vm, struct drm_exec *exec);
+
#if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
#define vm_dbg drm_dbg
#else
@@ -315,22 +315,14 @@ void xe_vm_snapshot_free(struct xe_vm_snapshot *snap);
* Register this task as currently making bos resident for the vm. Intended
* to avoid eviction by the same task of shared bos bound to the vm.
* Call with the vm's resv lock held.
- *
- * Return: A pin cookie that should be used for xe_vm_clear_validating().
*/
-static inline struct pin_cookie xe_vm_set_validating(struct xe_vm *vm,
- bool allow_res_evict)
+static inline void xe_vm_set_validating(struct xe_vm *vm, bool allow_res_evict)
{
- struct pin_cookie cookie = {};
-
if (vm && !allow_res_evict) {
xe_vm_assert_held(vm);
- cookie = lockdep_pin_lock(&xe_vm_resv(vm)->lock.base);
/* Pairs with READ_ONCE in xe_vm_is_validating() */
- WRITE_ONCE(vm->validating, current);
+ WRITE_ONCE(vm->validation.validating, current);
}
-
- return cookie;
}
/**
@@ -338,19 +330,16 @@ static inline struct pin_cookie xe_vm_set_validating(struct xe_vm *vm,
* @vm: Pointer to the vm or NULL
* @allow_res_evict: Eviction from @vm was allowed. Must be set to the same
* value as for xe_vm_set_validation().
- * @cookie: Cookie obtained from xe_vm_set_validating().
*
* Register this task as currently making bos resident for the vm. Intended
* to avoid eviction by the same task of shared bos bound to the vm.
* Call with the vm's resv lock held.
*/
-static inline void xe_vm_clear_validating(struct xe_vm *vm, bool allow_res_evict,
- struct pin_cookie cookie)
+static inline void xe_vm_clear_validating(struct xe_vm *vm, bool allow_res_evict)
{
if (vm && !allow_res_evict) {
- lockdep_unpin_lock(&xe_vm_resv(vm)->lock.base, cookie);
/* Pairs with READ_ONCE in xe_vm_is_validating() */
- WRITE_ONCE(vm->validating, NULL);
+ WRITE_ONCE(vm->validation.validating, NULL);
}
}
@@ -368,7 +357,7 @@ static inline void xe_vm_clear_validating(struct xe_vm *vm, bool allow_res_evict
static inline bool xe_vm_is_validating(struct xe_vm *vm)
{
/* Pairs with WRITE_ONCE in xe_vm_is_validating() */
- if (READ_ONCE(vm->validating) == current) {
+ if (READ_ONCE(vm->validation.validating) == current) {
xe_vm_assert_held(vm);
return true;
}
@@ -376,6 +365,34 @@ static inline bool xe_vm_is_validating(struct xe_vm *vm)
}
/**
+ * xe_vm_set_validation_exec() - Accessor to set the drm_exec object
+ * @vm: The vm we want to register a drm_exec object with.
+ * @exec: The exec object we want to register.
+ *
+ * Set the drm_exec object used to lock the vm's resv.
+ */
+static inline void xe_vm_set_validation_exec(struct xe_vm *vm, struct drm_exec *exec)
+{
+ xe_vm_assert_held(vm);
+ xe_assert(vm->xe, !!exec ^ !!vm->validation._exec);
+ vm->validation._exec = exec;
+}
+
+/**
+ * xe_vm_set_validation_exec() - Accessor to read the drm_exec object
+ * @vm: The vm we want to register a drm_exec object with.
+ *
+ * Return: The drm_exec object used to lock the vm's resv. The value
+ * is a valid pointer, %NULL, or one of the special values defined in
+ * xe_validation.h.
+ */
+static inline struct drm_exec *xe_vm_validation_exec(struct xe_vm *vm)
+{
+ xe_vm_assert_held(vm);
+ return vm->validation._exec;
+}
+
+/**
* xe_vm_has_valid_gpu_mapping() - Advisory helper to check if VMA or SVM range has
* a valid GPU mapping
* @tile: The tile which the GPU mapping belongs to
@@ -394,11 +411,4 @@ static inline bool xe_vm_is_validating(struct xe_vm *vm)
#define xe_vm_has_valid_gpu_mapping(tile, tile_present, tile_invalidated) \
((READ_ONCE(tile_present) & ~READ_ONCE(tile_invalidated)) & BIT((tile)->id))
-#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
-void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma);
-#else
-static inline void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma)
-{
-}
-#endif
#endif
diff --git a/drivers/gpu/drm/xe/xe_vm_madvise.c b/drivers/gpu/drm/xe/xe_vm_madvise.c
new file mode 100644
index 000000000000..cad3cf627c3f
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_vm_madvise.c
@@ -0,0 +1,431 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include "xe_vm_madvise.h"
+
+#include <linux/nospec.h>
+#include <drm/xe_drm.h>
+
+#include "xe_bo.h"
+#include "xe_pat.h"
+#include "xe_pt.h"
+#include "xe_svm.h"
+
+struct xe_vmas_in_madvise_range {
+ u64 addr;
+ u64 range;
+ struct xe_vma **vmas;
+ int num_vmas;
+ bool has_bo_vmas;
+ bool has_svm_userptr_vmas;
+};
+
+static int get_vmas(struct xe_vm *vm, struct xe_vmas_in_madvise_range *madvise_range)
+{
+ u64 addr = madvise_range->addr;
+ u64 range = madvise_range->range;
+
+ struct xe_vma **__vmas;
+ struct drm_gpuva *gpuva;
+ int max_vmas = 8;
+
+ lockdep_assert_held(&vm->lock);
+
+ madvise_range->num_vmas = 0;
+ madvise_range->vmas = kmalloc_array(max_vmas, sizeof(*madvise_range->vmas), GFP_KERNEL);
+ if (!madvise_range->vmas)
+ return -ENOMEM;
+
+ vm_dbg(&vm->xe->drm, "VMA's in range: start=0x%016llx, end=0x%016llx", addr, addr + range);
+
+ drm_gpuvm_for_each_va_range(gpuva, &vm->gpuvm, addr, addr + range) {
+ struct xe_vma *vma = gpuva_to_vma(gpuva);
+
+ if (xe_vma_bo(vma))
+ madvise_range->has_bo_vmas = true;
+ else if (xe_vma_is_cpu_addr_mirror(vma) || xe_vma_is_userptr(vma))
+ madvise_range->has_svm_userptr_vmas = true;
+
+ if (madvise_range->num_vmas == max_vmas) {
+ max_vmas <<= 1;
+ __vmas = krealloc(madvise_range->vmas,
+ max_vmas * sizeof(*madvise_range->vmas),
+ GFP_KERNEL);
+ if (!__vmas) {
+ kfree(madvise_range->vmas);
+ return -ENOMEM;
+ }
+ madvise_range->vmas = __vmas;
+ }
+
+ madvise_range->vmas[madvise_range->num_vmas] = vma;
+ (madvise_range->num_vmas)++;
+ }
+
+ if (!madvise_range->num_vmas)
+ kfree(madvise_range->vmas);
+
+ vm_dbg(&vm->xe->drm, "madvise_range-num_vmas = %d\n", madvise_range->num_vmas);
+
+ return 0;
+}
+
+static void madvise_preferred_mem_loc(struct xe_device *xe, struct xe_vm *vm,
+ struct xe_vma **vmas, int num_vmas,
+ struct drm_xe_madvise *op)
+{
+ int i;
+
+ xe_assert(vm->xe, op->type == DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC);
+
+ for (i = 0; i < num_vmas; i++) {
+ /*TODO: Extend attributes to bo based vmas */
+ if ((vmas[i]->attr.preferred_loc.devmem_fd == op->preferred_mem_loc.devmem_fd &&
+ vmas[i]->attr.preferred_loc.migration_policy ==
+ op->preferred_mem_loc.migration_policy) ||
+ !xe_vma_is_cpu_addr_mirror(vmas[i])) {
+ vmas[i]->skip_invalidation = true;
+ } else {
+ vmas[i]->skip_invalidation = false;
+ vmas[i]->attr.preferred_loc.devmem_fd = op->preferred_mem_loc.devmem_fd;
+ /* Till multi-device support is not added migration_policy
+ * is of no use and can be ignored.
+ */
+ vmas[i]->attr.preferred_loc.migration_policy =
+ op->preferred_mem_loc.migration_policy;
+ }
+ }
+}
+
+static void madvise_atomic(struct xe_device *xe, struct xe_vm *vm,
+ struct xe_vma **vmas, int num_vmas,
+ struct drm_xe_madvise *op)
+{
+ struct xe_bo *bo;
+ int i;
+
+ xe_assert(vm->xe, op->type == DRM_XE_MEM_RANGE_ATTR_ATOMIC);
+ xe_assert(vm->xe, op->atomic.val <= DRM_XE_ATOMIC_CPU);
+
+ for (i = 0; i < num_vmas; i++) {
+ if (xe_vma_is_userptr(vmas[i]) &&
+ !(op->atomic.val == DRM_XE_ATOMIC_DEVICE &&
+ xe->info.has_device_atomics_on_smem)) {
+ vmas[i]->skip_invalidation = true;
+ continue;
+ }
+
+ if (vmas[i]->attr.atomic_access == op->atomic.val) {
+ vmas[i]->skip_invalidation = true;
+ } else {
+ vmas[i]->skip_invalidation = false;
+ vmas[i]->attr.atomic_access = op->atomic.val;
+ }
+
+ bo = xe_vma_bo(vmas[i]);
+ if (!bo || bo->attr.atomic_access == op->atomic.val)
+ continue;
+
+ vmas[i]->skip_invalidation = false;
+ xe_bo_assert_held(bo);
+ bo->attr.atomic_access = op->atomic.val;
+
+ /* Invalidate cpu page table, so bo can migrate to smem in next access */
+ if (xe_bo_is_vram(bo) &&
+ (bo->attr.atomic_access == DRM_XE_ATOMIC_CPU ||
+ bo->attr.atomic_access == DRM_XE_ATOMIC_GLOBAL))
+ ttm_bo_unmap_virtual(&bo->ttm);
+ }
+}
+
+static void madvise_pat_index(struct xe_device *xe, struct xe_vm *vm,
+ struct xe_vma **vmas, int num_vmas,
+ struct drm_xe_madvise *op)
+{
+ int i;
+
+ xe_assert(vm->xe, op->type == DRM_XE_MEM_RANGE_ATTR_PAT);
+
+ for (i = 0; i < num_vmas; i++) {
+ if (vmas[i]->attr.pat_index == op->pat_index.val) {
+ vmas[i]->skip_invalidation = true;
+ } else {
+ vmas[i]->skip_invalidation = false;
+ vmas[i]->attr.pat_index = op->pat_index.val;
+ }
+ }
+}
+
+typedef void (*madvise_func)(struct xe_device *xe, struct xe_vm *vm,
+ struct xe_vma **vmas, int num_vmas,
+ struct drm_xe_madvise *op);
+
+static const madvise_func madvise_funcs[] = {
+ [DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC] = madvise_preferred_mem_loc,
+ [DRM_XE_MEM_RANGE_ATTR_ATOMIC] = madvise_atomic,
+ [DRM_XE_MEM_RANGE_ATTR_PAT] = madvise_pat_index,
+};
+
+static u8 xe_zap_ptes_in_madvise_range(struct xe_vm *vm, u64 start, u64 end)
+{
+ struct drm_gpuva *gpuva;
+ struct xe_tile *tile;
+ u8 id, tile_mask = 0;
+
+ lockdep_assert_held_write(&vm->lock);
+
+ /* Wait for pending binds */
+ if (dma_resv_wait_timeout(xe_vm_resv(vm), DMA_RESV_USAGE_BOOKKEEP,
+ false, MAX_SCHEDULE_TIMEOUT) <= 0)
+ XE_WARN_ON(1);
+
+ drm_gpuvm_for_each_va_range(gpuva, &vm->gpuvm, start, end) {
+ struct xe_vma *vma = gpuva_to_vma(gpuva);
+
+ if (vma->skip_invalidation || xe_vma_is_null(vma))
+ continue;
+
+ if (xe_vma_is_cpu_addr_mirror(vma)) {
+ tile_mask |= xe_svm_ranges_zap_ptes_in_range(vm,
+ xe_vma_start(vma),
+ xe_vma_end(vma));
+ } else {
+ for_each_tile(tile, vm->xe, id) {
+ if (xe_pt_zap_ptes(tile, vma)) {
+ tile_mask |= BIT(id);
+
+ /*
+ * WRITE_ONCE pairs with READ_ONCE
+ * in xe_vm_has_valid_gpu_mapping()
+ */
+ WRITE_ONCE(vma->tile_invalidated,
+ vma->tile_invalidated | BIT(id));
+ }
+ }
+ }
+ }
+
+ return tile_mask;
+}
+
+static int xe_vm_invalidate_madvise_range(struct xe_vm *vm, u64 start, u64 end)
+{
+ u8 tile_mask = xe_zap_ptes_in_madvise_range(vm, start, end);
+
+ if (!tile_mask)
+ return 0;
+
+ xe_device_wmb(vm->xe);
+
+ return xe_vm_range_tilemask_tlb_inval(vm, start, end, tile_mask);
+}
+
+static bool madvise_args_are_sane(struct xe_device *xe, const struct drm_xe_madvise *args)
+{
+ if (XE_IOCTL_DBG(xe, !args))
+ return false;
+
+ if (XE_IOCTL_DBG(xe, !IS_ALIGNED(args->start, SZ_4K)))
+ return false;
+
+ if (XE_IOCTL_DBG(xe, !IS_ALIGNED(args->range, SZ_4K)))
+ return false;
+
+ if (XE_IOCTL_DBG(xe, args->range < SZ_4K))
+ return false;
+
+ switch (args->type) {
+ case DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC:
+ {
+ s32 fd = (s32)args->preferred_mem_loc.devmem_fd;
+
+ if (XE_IOCTL_DBG(xe, fd < DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM))
+ return false;
+
+ if (XE_IOCTL_DBG(xe, args->preferred_mem_loc.migration_policy >
+ DRM_XE_MIGRATE_ONLY_SYSTEM_PAGES))
+ return false;
+
+ if (XE_IOCTL_DBG(xe, args->preferred_mem_loc.pad))
+ return false;
+
+ if (XE_IOCTL_DBG(xe, args->preferred_mem_loc.reserved))
+ return false;
+ break;
+ }
+ case DRM_XE_MEM_RANGE_ATTR_ATOMIC:
+ if (XE_IOCTL_DBG(xe, args->atomic.val > DRM_XE_ATOMIC_CPU))
+ return false;
+
+ if (XE_IOCTL_DBG(xe, args->atomic.pad))
+ return false;
+
+ if (XE_IOCTL_DBG(xe, args->atomic.reserved))
+ return false;
+
+ break;
+ case DRM_XE_MEM_RANGE_ATTR_PAT:
+ {
+ u16 coh_mode = xe_pat_index_get_coh_mode(xe, args->pat_index.val);
+
+ if (XE_IOCTL_DBG(xe, !coh_mode))
+ return false;
+
+ if (XE_WARN_ON(coh_mode > XE_COH_AT_LEAST_1WAY))
+ return false;
+
+ if (XE_IOCTL_DBG(xe, args->pat_index.pad))
+ return false;
+
+ if (XE_IOCTL_DBG(xe, args->pat_index.reserved))
+ return false;
+ break;
+ }
+ default:
+ if (XE_IOCTL_DBG(xe, 1))
+ return false;
+ }
+
+ if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
+ return false;
+
+ return true;
+}
+
+static bool check_bo_args_are_sane(struct xe_vm *vm, struct xe_vma **vmas,
+ int num_vmas, u32 atomic_val)
+{
+ struct xe_device *xe = vm->xe;
+ struct xe_bo *bo;
+ int i;
+
+ for (i = 0; i < num_vmas; i++) {
+ bo = xe_vma_bo(vmas[i]);
+ if (!bo)
+ continue;
+ /*
+ * NOTE: The following atomic checks are platform-specific. For example,
+ * if a device supports CXL atomics, these may not be necessary or
+ * may behave differently.
+ */
+ if (XE_IOCTL_DBG(xe, atomic_val == DRM_XE_ATOMIC_CPU &&
+ !(bo->flags & XE_BO_FLAG_SYSTEM)))
+ return false;
+
+ if (XE_IOCTL_DBG(xe, atomic_val == DRM_XE_ATOMIC_DEVICE &&
+ !(bo->flags & XE_BO_FLAG_VRAM0) &&
+ !(bo->flags & XE_BO_FLAG_VRAM1) &&
+ !(bo->flags & XE_BO_FLAG_SYSTEM &&
+ xe->info.has_device_atomics_on_smem)))
+ return false;
+
+ if (XE_IOCTL_DBG(xe, atomic_val == DRM_XE_ATOMIC_GLOBAL &&
+ (!(bo->flags & XE_BO_FLAG_SYSTEM) ||
+ (!(bo->flags & XE_BO_FLAG_VRAM0) &&
+ !(bo->flags & XE_BO_FLAG_VRAM1)))))
+ return false;
+ }
+ return true;
+}
+/**
+ * xe_vm_madvise_ioctl - Handle MADVise ioctl for a VM
+ * @dev: DRM device pointer
+ * @data: Pointer to ioctl data (drm_xe_madvise*)
+ * @file: DRM file pointer
+ *
+ * Handles the MADVISE ioctl to provide memory advice for vma's within
+ * input range.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_vm_madvise_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct xe_device *xe = to_xe_device(dev);
+ struct xe_file *xef = to_xe_file(file);
+ struct drm_xe_madvise *args = data;
+ struct xe_vmas_in_madvise_range madvise_range = {.addr = args->start,
+ .range = args->range, };
+ struct xe_vm *vm;
+ struct drm_exec exec;
+ int err, attr_type;
+
+ vm = xe_vm_lookup(xef, args->vm_id);
+ if (XE_IOCTL_DBG(xe, !vm))
+ return -EINVAL;
+
+ if (!madvise_args_are_sane(vm->xe, args)) {
+ err = -EINVAL;
+ goto put_vm;
+ }
+
+ xe_svm_flush(vm);
+
+ err = down_write_killable(&vm->lock);
+ if (err)
+ goto put_vm;
+
+ if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
+ err = -ENOENT;
+ goto unlock_vm;
+ }
+
+ err = xe_vm_alloc_madvise_vma(vm, args->start, args->range);
+ if (err)
+ goto unlock_vm;
+
+ err = get_vmas(vm, &madvise_range);
+ if (err || !madvise_range.num_vmas)
+ goto unlock_vm;
+
+ if (madvise_range.has_bo_vmas) {
+ if (args->type == DRM_XE_MEM_RANGE_ATTR_ATOMIC) {
+ if (!check_bo_args_are_sane(vm, madvise_range.vmas,
+ madvise_range.num_vmas,
+ args->atomic.val)) {
+ err = -EINVAL;
+ goto unlock_vm;
+ }
+ }
+
+ drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES | DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
+ drm_exec_until_all_locked(&exec) {
+ for (int i = 0; i < madvise_range.num_vmas; i++) {
+ struct xe_bo *bo = xe_vma_bo(madvise_range.vmas[i]);
+
+ if (!bo)
+ continue;
+ err = drm_exec_lock_obj(&exec, &bo->ttm.base);
+ drm_exec_retry_on_contention(&exec);
+ if (err)
+ goto err_fini;
+ }
+ }
+ }
+
+ if (madvise_range.has_svm_userptr_vmas) {
+ err = xe_svm_notifier_lock_interruptible(vm);
+ if (err)
+ goto err_fini;
+ }
+
+ attr_type = array_index_nospec(args->type, ARRAY_SIZE(madvise_funcs));
+ madvise_funcs[attr_type](xe, vm, madvise_range.vmas, madvise_range.num_vmas, args);
+
+ err = xe_vm_invalidate_madvise_range(vm, args->start, args->start + args->range);
+
+ if (madvise_range.has_svm_userptr_vmas)
+ xe_svm_notifier_unlock(vm);
+
+err_fini:
+ if (madvise_range.has_bo_vmas)
+ drm_exec_fini(&exec);
+ kfree(madvise_range.vmas);
+ madvise_range.vmas = NULL;
+unlock_vm:
+ up_write(&vm->lock);
+put_vm:
+ xe_vm_put(vm);
+ return err;
+}
diff --git a/drivers/gpu/drm/xe/xe_vm_madvise.h b/drivers/gpu/drm/xe/xe_vm_madvise.h
new file mode 100644
index 000000000000..b0e1fc445f23
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_vm_madvise.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_VM_MADVISE_H_
+#define _XE_VM_MADVISE_H_
+
+struct drm_device;
+struct drm_file;
+
+int xe_vm_madvise_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index 8a07feef503b..da39940501d8 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -17,6 +17,7 @@
#include "xe_device_types.h"
#include "xe_pt_types.h"
#include "xe_range_fence.h"
+#include "xe_userptr.h"
struct xe_bo;
struct xe_svm_range;
@@ -46,35 +47,42 @@ struct xe_vm_pgtable_update_op;
#define XE_VMA_DUMPABLE (DRM_GPUVA_USERBITS << 8)
#define XE_VMA_SYSTEM_ALLOCATOR (DRM_GPUVA_USERBITS << 9)
-/** struct xe_userptr - User pointer */
-struct xe_userptr {
- /** @invalidate_link: Link for the vm::userptr.invalidated list */
- struct list_head invalidate_link;
- /** @userptr: link into VM repin list if userptr. */
- struct list_head repin_link;
+/**
+ * struct xe_vma_mem_attr - memory attributes associated with vma
+ */
+struct xe_vma_mem_attr {
+ /** @preferred_loc: perferred memory_location */
+ struct {
+ /** @preferred_loc.migration_policy: Pages migration policy */
+ u32 migration_policy;
+
+ /**
+ * @preferred_loc.devmem_fd: used for determining pagemap_fd
+ * requested by user DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM and
+ * DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE mean system memory or
+ * closest device memory respectively.
+ */
+ u32 devmem_fd;
+ } preferred_loc;
+
/**
- * @notifier: MMU notifier for user pointer (invalidation call back)
+ * @atomic_access: The atomic access type for the vma
+ * See %DRM_XE_VMA_ATOMIC_UNDEFINED, %DRM_XE_VMA_ATOMIC_DEVICE,
+ * %DRM_XE_VMA_ATOMIC_GLOBAL, and %DRM_XE_VMA_ATOMIC_CPU for possible
+ * values. These are defined in uapi/drm/xe_drm.h.
*/
- struct mmu_interval_notifier notifier;
- /** @sgt: storage for a scatter gather table */
- struct sg_table sgt;
- /** @sg: allocated scatter gather table */
- struct sg_table *sg;
- /** @notifier_seq: notifier sequence number */
- unsigned long notifier_seq;
- /** @unmap_mutex: Mutex protecting dma-unmapping */
- struct mutex unmap_mutex;
+ u32 atomic_access;
+
/**
- * @initial_bind: user pointer has been bound at least once.
- * write: vm->userptr.notifier_lock in read mode and vm->resv held.
- * read: vm->userptr.notifier_lock in write mode or vm->resv held.
+ * @default_pat_index: The pat index for VMA set during first bind by user.
*/
- bool initial_bind;
- /** @mapped: Whether the @sgt sg-table is dma-mapped. Protected by @unmap_mutex. */
- bool mapped;
-#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
- u32 divisor;
-#endif
+ u16 default_pat_index;
+
+ /**
+ * @pat_index: The pat index to use when encoding the PTEs for this vma.
+ * same as default_pat_index unless overwritten by madvise.
+ */
+ u16 pat_index;
};
struct xe_vma {
@@ -102,10 +110,10 @@ struct xe_vma {
/**
* @tile_invalidated: Tile mask of binding are invalidated for this VMA.
- * protected by BO's resv and for userptrs, vm->userptr.notifier_lock in
- * write mode for writing or vm->userptr.notifier_lock in read mode and
+ * protected by BO's resv and for userptrs, vm->svm.gpusvm.notifier_lock in
+ * write mode for writing or vm->svm.gpusvm.notifier_lock in read mode and
* the vm->resv. For stable reading, BO's resv or userptr
- * vm->userptr.notifier_lock in read mode is required. Can be
+ * vm->svm.gpusvm.notifier_lock in read mode is required. Can be
* opportunistically read with READ_ONCE outside of locks.
*/
u8 tile_invalidated;
@@ -116,7 +124,7 @@ struct xe_vma {
/**
* @tile_present: Tile mask of binding are present for this VMA.
* protected by vm->lock, vm->resv and for userptrs,
- * vm->userptr.notifier_lock for writing. Needs either for reading,
+ * vm->svm.gpusvm.notifier_lock for writing. Needs either for reading,
* but if reading is done under the vm->lock only, it needs to be held
* in write mode.
*/
@@ -126,15 +134,22 @@ struct xe_vma {
u8 tile_staged;
/**
- * @pat_index: The pat index to use when encoding the PTEs for this vma.
+ * @skip_invalidation: Used in madvise to avoid invalidation
+ * if mem attributes doesn't change
*/
- u16 pat_index;
+ bool skip_invalidation;
/**
* @ufence: The user fence that was provided with MAP.
* Needs to be signalled before UNMAP can be processed.
*/
struct xe_user_fence *ufence;
+
+ /**
+ * @attr: The attributes of vma which determines the migration policy
+ * and encoding of the PTEs for this vma.
+ */
+ struct xe_vma_mem_attr attr;
};
/**
@@ -244,33 +259,7 @@ struct xe_vm {
const struct xe_pt_ops *pt_ops;
/** @userptr: user pointer state */
- struct {
- /**
- * @userptr.repin_list: list of VMAs which are user pointers,
- * and needs repinning. Protected by @lock.
- */
- struct list_head repin_list;
- /**
- * @notifier_lock: protects notifier in write mode and
- * submission in read mode.
- */
- struct rw_semaphore notifier_lock;
- /**
- * @userptr.invalidated_lock: Protects the
- * @userptr.invalidated list.
- */
- spinlock_t invalidated_lock;
- /**
- * @userptr.invalidated: List of invalidated userptrs, not yet
- * picked
- * up for revalidation. Protected from access with the
- * @invalidated_lock. Removing items from the list
- * additionally requires @lock in write mode, and adding
- * items to the list requires either the @userptr.notifier_lock in
- * write mode, OR @lock in write mode.
- */
- struct list_head invalidated;
- } userptr;
+ struct xe_userptr_vm userptr;
/** @preempt: preempt state */
struct {
@@ -293,6 +282,11 @@ struct xe_vm {
* BOs
*/
struct work_struct rebind_work;
+ /**
+ * @preempt.pm_activate_link: Link to list of rebind workers to be
+ * kicked on resume.
+ */
+ struct list_head pm_activate_link;
} preempt;
/** @um: unified memory state */
@@ -313,18 +307,34 @@ struct xe_vm {
} error_capture;
/**
+ * @validation: Validation data only valid with the vm resv held.
+ * Note: This is really task state of the task holding the vm resv,
+ * and moving forward we should
+ * come up with a better way of passing this down the call-
+ * chain.
+ */
+ struct {
+ /**
+ * @validation.validating: The task that is currently making bos resident.
+ * for this vm.
+ * Protected by the VM's resv for writing. Opportunistic reading can be done
+ * using READ_ONCE. Note: This is a workaround for the
+ * TTM eviction_valuable() callback not being passed a struct
+ * ttm_operation_context(). Future work might want to address this.
+ */
+ struct task_struct *validating;
+ /**
+ * @validation.exec The drm_exec context used when locking the vm resv.
+ * Protected by the vm's resv.
+ */
+ struct drm_exec *_exec;
+ } validation;
+
+ /**
* @tlb_flush_seqno: Required TLB flush seqno for the next exec.
* protected by the vm resv.
*/
u64 tlb_flush_seqno;
- /**
- * @validating: The task that is currently making bos resident for this vm.
- * Protected by the VM's resv for writing. Opportunistic reading can be done
- * using READ_ONCE. Note: This is a workaround for the
- * TTM eviction_valuable() callback not being passed a struct
- * ttm_operation_context(). Future work might want to address this.
- */
- struct task_struct *validating;
/** @batch_invalidate_tlb: Always invalidate TLB before batch start */
bool batch_invalidate_tlb;
/** @xef: XE file handle for tracking this VM's drm client */
@@ -395,8 +405,11 @@ struct xe_vma_op_prefetch_range {
struct xarray range;
/** @ranges_count: number of svm ranges to map */
u32 ranges_count;
- /** @region: memory region to prefetch to */
- u32 region;
+ /**
+ * @tile: Pointer to the tile structure containing memory to prefetch.
+ * NULL if prefetch requested region is smem
+ */
+ struct xe_tile *tile;
};
/** enum xe_vma_op_flags - flags for VMA operation */
@@ -462,6 +475,7 @@ struct xe_vma_ops {
struct xe_vm_pgtable_update_ops pt_update_ops[XE_MAX_TILES_PER_DEVICE];
/** @flag: signify the properties within xe_vma_ops*/
#define XE_VMA_OPS_FLAG_HAS_SVM_PREFETCH BIT(0)
+#define XE_VMA_OPS_FLAG_MADVISE BIT(1)
u32 flags;
#ifdef TEST_VM_OPS_ERROR
/** @inject_error: inject error to test error handling */
diff --git a/drivers/gpu/drm/xe/xe_vram.c b/drivers/gpu/drm/xe/xe_vram.c
index e421a74fb87c..b44ebf50fedb 100644
--- a/drivers/gpu/drm/xe/xe_vram.c
+++ b/drivers/gpu/drm/xe/xe_vram.c
@@ -3,6 +3,7 @@
* Copyright © 2021-2024 Intel Corporation
*/
+#include <kunit/visibility.h>
#include <linux/pci.h>
#include <drm/drm_managed.h>
@@ -19,7 +20,9 @@
#include "xe_mmio.h"
#include "xe_module.h"
#include "xe_sriov.h"
+#include "xe_ttm_vram_mgr.h"
#include "xe_vram.h"
+#include "xe_vram_types.h"
#define BAR_SIZE_SHIFT 20
@@ -136,7 +139,7 @@ static bool resource_is_valid(struct pci_dev *pdev, int bar)
return true;
}
-static int determine_lmem_bar_size(struct xe_device *xe)
+static int determine_lmem_bar_size(struct xe_device *xe, struct xe_vram_region *lmem_bar)
{
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
@@ -147,16 +150,16 @@ static int determine_lmem_bar_size(struct xe_device *xe)
resize_vram_bar(xe);
- xe->mem.vram.io_start = pci_resource_start(pdev, LMEM_BAR);
- xe->mem.vram.io_size = pci_resource_len(pdev, LMEM_BAR);
- if (!xe->mem.vram.io_size)
+ lmem_bar->io_start = pci_resource_start(pdev, LMEM_BAR);
+ lmem_bar->io_size = pci_resource_len(pdev, LMEM_BAR);
+ if (!lmem_bar->io_size)
return -EIO;
/* XXX: Need to change when xe link code is ready */
- xe->mem.vram.dpa_base = 0;
+ lmem_bar->dpa_base = 0;
/* set up a map to the total memory area. */
- xe->mem.vram.mapping = ioremap_wc(xe->mem.vram.io_start, xe->mem.vram.io_size);
+ lmem_bar->mapping = devm_ioremap_wc(&pdev->dev, lmem_bar->io_start, lmem_bar->io_size);
return 0;
}
@@ -278,13 +281,71 @@ static void vram_fini(void *arg)
struct xe_tile *tile;
int id;
- if (xe->mem.vram.mapping)
- iounmap(xe->mem.vram.mapping);
-
- xe->mem.vram.mapping = NULL;
+ xe->mem.vram->mapping = NULL;
for_each_tile(tile, xe, id)
- tile->mem.vram.mapping = NULL;
+ tile->mem.vram->mapping = NULL;
+}
+
+struct xe_vram_region *xe_vram_region_alloc(struct xe_device *xe, u8 id, u32 placement)
+{
+ struct xe_vram_region *vram;
+ struct drm_device *drm = &xe->drm;
+
+ xe_assert(xe, id < xe->info.tile_count);
+
+ vram = drmm_kzalloc(drm, sizeof(*vram), GFP_KERNEL);
+ if (!vram)
+ return NULL;
+
+ vram->xe = xe;
+ vram->id = id;
+ vram->placement = placement;
+#if defined(CONFIG_DRM_XE_PAGEMAP)
+ vram->migrate = xe->tiles[id].migrate;
+#endif
+ return vram;
+}
+
+static void print_vram_region_info(struct xe_device *xe, struct xe_vram_region *vram)
+{
+ struct drm_device *drm = &xe->drm;
+
+ if (vram->io_size < vram->usable_size)
+ drm_info(drm, "Small BAR device\n");
+
+ drm_info(drm,
+ "VRAM[%u]: Actual physical size %pa, usable size exclude stolen %pa, CPU accessible size %pa\n",
+ vram->id, &vram->actual_physical_size, &vram->usable_size, &vram->io_size);
+ drm_info(drm, "VRAM[%u]: DPA range: [%pa-%llx], io range: [%pa-%llx]\n",
+ vram->id, &vram->dpa_base, vram->dpa_base + (u64)vram->actual_physical_size,
+ &vram->io_start, vram->io_start + (u64)vram->io_size);
+}
+
+static int vram_region_init(struct xe_device *xe, struct xe_vram_region *vram,
+ struct xe_vram_region *lmem_bar, u64 offset, u64 usable_size,
+ u64 region_size, resource_size_t remain_io_size)
+{
+ /* Check if VRAM region is already initialized */
+ if (vram->mapping)
+ return 0;
+
+ vram->actual_physical_size = region_size;
+ vram->io_start = lmem_bar->io_start + offset;
+ vram->io_size = min_t(u64, usable_size, remain_io_size);
+
+ if (!vram->io_size) {
+ drm_err(&xe->drm, "Tile without any CPU visible VRAM. Aborting.\n");
+ return -ENODEV;
+ }
+
+ vram->dpa_base = lmem_bar->dpa_base + offset;
+ vram->mapping = lmem_bar->mapping + offset;
+ vram->usable_size = usable_size;
+
+ print_vram_region_info(xe, vram);
+
+ return 0;
}
/**
@@ -298,78 +359,108 @@ static void vram_fini(void *arg)
int xe_vram_probe(struct xe_device *xe)
{
struct xe_tile *tile;
- resource_size_t io_size;
+ struct xe_vram_region lmem_bar;
+ resource_size_t remain_io_size;
u64 available_size = 0;
u64 total_size = 0;
- u64 tile_offset;
- u64 tile_size;
- u64 vram_size;
int err;
u8 id;
if (!IS_DGFX(xe))
return 0;
- /* Get the size of the root tile's vram for later accessibility comparison */
- tile = xe_device_get_root_tile(xe);
- err = tile_vram_size(tile, &vram_size, &tile_size, &tile_offset);
+ err = determine_lmem_bar_size(xe, &lmem_bar);
if (err)
return err;
+ drm_info(&xe->drm, "VISIBLE VRAM: %pa, %pa\n", &lmem_bar.io_start, &lmem_bar.io_size);
- err = determine_lmem_bar_size(xe);
- if (err)
- return err;
-
- drm_info(&xe->drm, "VISIBLE VRAM: %pa, %pa\n", &xe->mem.vram.io_start,
- &xe->mem.vram.io_size);
-
- io_size = xe->mem.vram.io_size;
+ remain_io_size = lmem_bar.io_size;
- /* tile specific ranges */
for_each_tile(tile, xe, id) {
- err = tile_vram_size(tile, &vram_size, &tile_size, &tile_offset);
+ u64 region_size;
+ u64 usable_size;
+ u64 tile_offset;
+
+ err = tile_vram_size(tile, &usable_size, &region_size, &tile_offset);
if (err)
return err;
- tile->mem.vram.actual_physical_size = tile_size;
- tile->mem.vram.io_start = xe->mem.vram.io_start + tile_offset;
- tile->mem.vram.io_size = min_t(u64, vram_size, io_size);
+ total_size += region_size;
+ available_size += usable_size;
+
+ err = vram_region_init(xe, tile->mem.vram, &lmem_bar, tile_offset, usable_size,
+ region_size, remain_io_size);
+ if (err)
+ return err;
- if (!tile->mem.vram.io_size) {
- drm_err(&xe->drm, "Tile without any CPU visible VRAM. Aborting.\n");
- return -ENODEV;
+ if (total_size > lmem_bar.io_size) {
+ drm_info(&xe->drm, "VRAM: %pa is larger than resource %pa\n",
+ &total_size, &lmem_bar.io_size);
}
- tile->mem.vram.dpa_base = xe->mem.vram.dpa_base + tile_offset;
- tile->mem.vram.usable_size = vram_size;
- tile->mem.vram.mapping = xe->mem.vram.mapping + tile_offset;
+ remain_io_size -= min_t(u64, tile->mem.vram->actual_physical_size, remain_io_size);
+ }
- if (tile->mem.vram.io_size < tile->mem.vram.usable_size)
- drm_info(&xe->drm, "Small BAR device\n");
- drm_info(&xe->drm, "VRAM[%u, %u]: Actual physical size %pa, usable size exclude stolen %pa, CPU accessible size %pa\n", id,
- tile->id, &tile->mem.vram.actual_physical_size, &tile->mem.vram.usable_size, &tile->mem.vram.io_size);
- drm_info(&xe->drm, "VRAM[%u, %u]: DPA range: [%pa-%llx], io range: [%pa-%llx]\n", id, tile->id,
- &tile->mem.vram.dpa_base, tile->mem.vram.dpa_base + (u64)tile->mem.vram.actual_physical_size,
- &tile->mem.vram.io_start, tile->mem.vram.io_start + (u64)tile->mem.vram.io_size);
+ err = vram_region_init(xe, xe->mem.vram, &lmem_bar, 0, available_size, total_size,
+ lmem_bar.io_size);
+ if (err)
+ return err;
- /* calculate total size using tile size to get the correct HW sizing */
- total_size += tile_size;
- available_size += vram_size;
+ return devm_add_action_or_reset(xe->drm.dev, vram_fini, xe);
+}
- if (total_size > xe->mem.vram.io_size) {
- drm_info(&xe->drm, "VRAM: %pa is larger than resource %pa\n",
- &total_size, &xe->mem.vram.io_size);
- }
+/**
+ * xe_vram_region_io_start - Get the IO start of a VRAM region
+ * @vram: the VRAM region
+ *
+ * Return: the IO start of the VRAM region, or 0 if not valid
+ */
+resource_size_t xe_vram_region_io_start(const struct xe_vram_region *vram)
+{
+ return vram ? vram->io_start : 0;
+}
- io_size -= min_t(u64, tile_size, io_size);
- }
+/**
+ * xe_vram_region_io_size - Get the IO size of a VRAM region
+ * @vram: the VRAM region
+ *
+ * Return: the IO size of the VRAM region, or 0 if not valid
+ */
+resource_size_t xe_vram_region_io_size(const struct xe_vram_region *vram)
+{
+ return vram ? vram->io_size : 0;
+}
- xe->mem.vram.actual_physical_size = total_size;
+/**
+ * xe_vram_region_dpa_base - Get the DPA base of a VRAM region
+ * @vram: the VRAM region
+ *
+ * Return: the DPA base of the VRAM region, or 0 if not valid
+ */
+resource_size_t xe_vram_region_dpa_base(const struct xe_vram_region *vram)
+{
+ return vram ? vram->dpa_base : 0;
+}
- drm_info(&xe->drm, "Total VRAM: %pa, %pa\n", &xe->mem.vram.io_start,
- &xe->mem.vram.actual_physical_size);
- drm_info(&xe->drm, "Available VRAM: %pa, %pa\n", &xe->mem.vram.io_start,
- &available_size);
+/**
+ * xe_vram_region_usable_size - Get the usable size of a VRAM region
+ * @vram: the VRAM region
+ *
+ * Return: the usable size of the VRAM region, or 0 if not valid
+ */
+resource_size_t xe_vram_region_usable_size(const struct xe_vram_region *vram)
+{
+ return vram ? vram->usable_size : 0;
+}
- return devm_add_action_or_reset(xe->drm.dev, vram_fini, xe);
+/**
+ * xe_vram_region_actual_physical_size - Get the actual physical size of a VRAM region
+ * @vram: the VRAM region
+ *
+ * Return: the actual physical size of the VRAM region, or 0 if not valid
+ */
+resource_size_t xe_vram_region_actual_physical_size(const struct xe_vram_region *vram)
+{
+ return vram ? vram->actual_physical_size : 0;
}
+EXPORT_SYMBOL_IF_KUNIT(xe_vram_region_actual_physical_size);
diff --git a/drivers/gpu/drm/xe/xe_vram.h b/drivers/gpu/drm/xe/xe_vram.h
index e31cc04ec0db..72860f714fc6 100644
--- a/drivers/gpu/drm/xe/xe_vram.h
+++ b/drivers/gpu/drm/xe/xe_vram.h
@@ -6,8 +6,19 @@
#ifndef _XE_VRAM_H_
#define _XE_VRAM_H_
+#include <linux/types.h>
+
struct xe_device;
+struct xe_vram_region;
int xe_vram_probe(struct xe_device *xe);
+struct xe_vram_region *xe_vram_region_alloc(struct xe_device *xe, u8 id, u32 placement);
+
+resource_size_t xe_vram_region_io_start(const struct xe_vram_region *vram);
+resource_size_t xe_vram_region_io_size(const struct xe_vram_region *vram);
+resource_size_t xe_vram_region_dpa_base(const struct xe_vram_region *vram);
+resource_size_t xe_vram_region_usable_size(const struct xe_vram_region *vram);
+resource_size_t xe_vram_region_actual_physical_size(const struct xe_vram_region *vram);
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_vram_freq.c b/drivers/gpu/drm/xe/xe_vram_freq.c
index b26e26d73dae..17bc84da4cdc 100644
--- a/drivers/gpu/drm/xe/xe_vram_freq.c
+++ b/drivers/gpu/drm/xe/xe_vram_freq.c
@@ -34,7 +34,7 @@ static ssize_t max_freq_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct xe_tile *tile = dev_to_tile(dev);
- u32 val, mbox;
+ u32 val = 0, mbox;
int err;
mbox = REG_FIELD_PREP(PCODE_MB_COMMAND, PCODE_FREQUENCY_CONFIG)
@@ -56,7 +56,7 @@ static ssize_t min_freq_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct xe_tile *tile = dev_to_tile(dev);
- u32 val, mbox;
+ u32 val = 0, mbox;
int err;
mbox = REG_FIELD_PREP(PCODE_MB_COMMAND, PCODE_FREQUENCY_CONFIG)
diff --git a/drivers/gpu/drm/xe/xe_vram_types.h b/drivers/gpu/drm/xe/xe_vram_types.h
new file mode 100644
index 000000000000..83772dcbf1af
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_vram_types.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_VRAM_TYPES_H_
+#define _XE_VRAM_TYPES_H_
+
+#if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
+#include <drm/drm_pagemap.h>
+#endif
+
+#include "xe_ttm_vram_mgr_types.h"
+
+struct xe_device;
+struct xe_migrate;
+
+/**
+ * struct xe_vram_region - memory region structure
+ * This is used to describe a memory region in xe
+ * device, such as HBM memory or CXL extension memory.
+ */
+struct xe_vram_region {
+ /** @xe: Back pointer to xe device */
+ struct xe_device *xe;
+ /**
+ * @id: VRAM region instance id
+ *
+ * The value should be unique for VRAM region.
+ */
+ u8 id;
+ /** @io_start: IO start address of this VRAM instance */
+ resource_size_t io_start;
+ /**
+ * @io_size: IO size of this VRAM instance
+ *
+ * This represents how much of this VRAM we can access
+ * via the CPU through the VRAM BAR. This can be smaller
+ * than @usable_size, in which case only part of VRAM is CPU
+ * accessible (typically the first 256M). This
+ * configuration is known as small-bar.
+ */
+ resource_size_t io_size;
+ /** @dpa_base: This memory regions's DPA (device physical address) base */
+ resource_size_t dpa_base;
+ /**
+ * @usable_size: usable size of VRAM
+ *
+ * Usable size of VRAM excluding reserved portions
+ * (e.g stolen mem)
+ */
+ resource_size_t usable_size;
+ /**
+ * @actual_physical_size: Actual VRAM size
+ *
+ * Actual VRAM size including reserved portions
+ * (e.g stolen mem)
+ */
+ resource_size_t actual_physical_size;
+ /** @mapping: pointer to VRAM mappable space */
+ void __iomem *mapping;
+ /** @ttm: VRAM TTM manager */
+ struct xe_ttm_vram_mgr ttm;
+ /** @placement: TTM placement dedicated for this region */
+ u32 placement;
+#if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
+ /** @migrate: Back pointer to migrate */
+ struct xe_migrate *migrate;
+ /** @pagemap: Used to remap device memory as ZONE_DEVICE */
+ struct dev_pagemap pagemap;
+ /**
+ * @dpagemap: The struct drm_pagemap of the ZONE_DEVICE memory
+ * pages of this tile.
+ */
+ struct drm_pagemap dpagemap;
+ /**
+ * @hpa_base: base host physical address
+ *
+ * This is generated when remap device memory as ZONE_DEVICE
+ */
+ resource_size_t hpa_base;
+#endif
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c
index 22a98600fd8f..cd03891654a1 100644
--- a/drivers/gpu/drm/xe/xe_wa.c
+++ b/drivers/gpu/drm/xe/xe_wa.c
@@ -39,7 +39,8 @@
* Register Immediate commands) once when initializing the device and saved in
* the default context. That default context is then used on every context
* creation to have a "primed golden context", i.e. a context image that
- * already contains the changes needed to all the registers.
+ * already contains the changes needed to all the registers. See
+ * drivers/gpu/drm/xe/xe_lrc.c for default context handling.
*
* - Engine workarounds: the list of these WAs is applied whenever the specific
* engine is reset. It's also possible that a set of engine classes share a
@@ -48,10 +49,10 @@
* them need to keeep the workaround programming: the approach taken in the
* driver is to tie those workarounds to the first compute/render engine that
* is registered. When executing with GuC submission, engine resets are
- * outside of kernel driver control, hence the list of registers involved in
+ * outside of kernel driver control, hence the list of registers involved is
* written once, on engine initialization, and then passed to GuC, that
* saves/restores their values before/after the reset takes place. See
- * ``drivers/gpu/drm/xe/xe_guc_ads.c`` for reference.
+ * drivers/gpu/drm/xe/xe_guc_ads.c for reference.
*
* - GT workarounds: the list of these WAs is applied whenever these registers
* revert to their default values: on GPU reset, suspend/resume [1]_, etc.
@@ -66,21 +67,39 @@
* hardware on every HW context restore. These buffers are created and
* programmed in the default context so the hardware always go through those
* programming sequences when switching contexts. The support for workaround
- * batchbuffers is enabled these hardware mechanisms:
+ * batchbuffers is enabled via these hardware mechanisms:
*
- * #. INDIRECT_CTX: A batchbuffer and an offset are provided in the default
- * context, pointing the hardware to jump to that location when that offset
- * is reached in the context restore. Workaround batchbuffer in the driver
- * currently uses this mechanism for all platforms.
+ * #. INDIRECT_CTX (also known as **mid context restore bb**): A batchbuffer
+ * and an offset are provided in the default context, pointing the hardware
+ * to jump to that location when that offset is reached in the context
+ * restore. When a context is being restored, this is executed after the
+ * ring context, in the middle (or beginning) of the engine context image.
*
- * #. BB_PER_CTX_PTR: A batchbuffer is provided in the default context,
- * pointing the hardware to a buffer to continue executing after the
- * engine registers are restored in a context restore sequence. This is
- * currently not used in the driver.
+ * #. BB_PER_CTX_PTR (also known as **post context restore bb**): A
+ * batchbuffer is provided in the default context, pointing the hardware to
+ * a buffer to continue executing after the engine registers are restored
+ * in a context restore sequence.
+ *
+ * Below is the timeline for a context restore sequence:
+ *
+ * .. code::
+ *
+ * INDIRECT_CTX_OFFSET
+ * |----------->|
+ * .------------.------------.-------------.------------.--------------.-----------.
+ * |Ring | Engine | Mid-context | Engine | Post-context | Ring |
+ * |Restore | Restore (1)| BB Restore | Restore (2)| BB Restore | Execution |
+ * `------------'------------'-------------'------------'--------------'-----------'
*
* - Other/OOB: There are WAs that, due to their nature, cannot be applied from
* a central place. Those are peppered around the rest of the code, as needed.
- * Workarounds related to the display IP are the main example.
+ * There's a central place to control which workarounds are enabled:
+ * drivers/gpu/drm/xe/xe_wa_oob.rules for GT workarounds and
+ * drivers/gpu/drm/xe/xe_device_wa_oob.rules for device/SoC workarounds.
+ * These files only record which workarounds are enabled: during early device
+ * initialization those rules are evaluated and recorded by the driver. Then
+ * later the driver checks with ``XE_GT_WA()`` and ``XE_DEVICE_WA()`` to
+ * implement them.
*
* .. [1] Technically, some registers are powercontext saved & restored, so they
* survive a suspend/resume. In practice, writing them again is not too
@@ -538,6 +557,11 @@ static const struct xe_rtp_entry_sr engine_was[] = {
XE_RTP_RULES(GRAPHICS_VERSION(2004), ENGINE_CLASS(RENDER)),
XE_RTP_ACTIONS(SET(HALF_SLICE_CHICKEN7, CLEAR_OPTIMIZATION_DISABLE))
},
+ { XE_RTP_NAME("13012615864"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2004),
+ FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(TDL_TSL_CHICKEN, RES_CHK_SPR_DIS))
+ },
/* Xe2_HPG */
@@ -602,6 +626,18 @@ static const struct xe_rtp_entry_sr engine_was[] = {
FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(TDL_TSL_CHICKEN, STK_ID_RESTRICT))
},
+ { XE_RTP_NAME("13012615864"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002),
+ FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(TDL_TSL_CHICKEN, RES_CHK_SPR_DIS))
+ },
+ { XE_RTP_NAME("18041344222"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002),
+ FUNC(xe_rtp_match_first_render_or_compute),
+ FUNC(xe_rtp_match_not_sriov_vf),
+ FUNC(xe_rtp_match_gt_has_discontiguous_dss_groups)),
+ XE_RTP_ACTIONS(SET(TDL_CHICKEN, EUSTALL_PERF_SAMPLING_DISABLE))
+ },
/* Xe2_LPM */
@@ -647,7 +683,8 @@ static const struct xe_rtp_entry_sr engine_was[] = {
XE_RTP_ACTIONS(SET(TDL_CHICKEN, QID_WAIT_FOR_THREAD_NOT_RUN_DISABLE))
},
{ XE_RTP_NAME("13012615864"),
- XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, 3001),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, 3001), OR,
+ GRAPHICS_VERSION(3003),
FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(TDL_TSL_CHICKEN, RES_CHK_SPR_DIS))
},
@@ -661,6 +698,13 @@ static const struct xe_rtp_entry_sr engine_was[] = {
XE_RTP_RULES(GRAPHICS_VERSION(3003), FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(HALF_SLICE_CHICKEN7, CLEAR_OPTIMIZATION_DISABLE))
},
+ { XE_RTP_NAME("18041344222"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, 3001),
+ FUNC(xe_rtp_match_first_render_or_compute),
+ FUNC(xe_rtp_match_not_sriov_vf),
+ FUNC(xe_rtp_match_gt_has_discontiguous_dss_groups)),
+ XE_RTP_ACTIONS(SET(TDL_CHICKEN, EUSTALL_PERF_SAMPLING_DISABLE))
+ },
};
static const struct xe_rtp_entry_sr lrc_was[] = {
@@ -868,6 +912,10 @@ static const struct xe_rtp_entry_sr lrc_was[] = {
DIS_PARTIAL_AUTOSTRIP |
DIS_AUTOSTRIP))
},
+ { XE_RTP_NAME("22021007897"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, 3003), ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(COMMON_SLICE_CHICKEN4, SBE_PUSH_CONSTANT_BEHIND_FIX_ENABLE))
+ },
};
static __maybe_unused const struct xe_rtp_entry oob_was[] = {
@@ -905,13 +953,13 @@ void xe_wa_process_device_oob(struct xe_device *xe)
}
/**
- * xe_wa_process_oob - process OOB workaround table
+ * xe_wa_process_gt_oob - process GT OOB workaround table
* @gt: GT instance to process workarounds for
*
* Process OOB workaround table for this platform, marking in @gt the
* workarounds that are active.
*/
-void xe_wa_process_oob(struct xe_gt *gt)
+void xe_wa_process_gt_oob(struct xe_gt *gt)
{
struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(gt);
@@ -995,12 +1043,12 @@ int xe_wa_device_init(struct xe_device *xe)
}
/**
- * xe_wa_init - initialize gt with workaround bookkeeping
+ * xe_wa_gt_init - initialize gt with workaround bookkeeping
* @gt: GT instance to initialize
*
* Returns 0 for success, negative error code otherwise.
*/
-int xe_wa_init(struct xe_gt *gt)
+int xe_wa_gt_init(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
size_t n_oob, n_lrc, n_engine, n_gt, total;
@@ -1026,7 +1074,7 @@ int xe_wa_init(struct xe_gt *gt)
return 0;
}
-ALLOW_ERROR_INJECTION(xe_wa_init, ERRNO); /* See xe_pci_probe() */
+ALLOW_ERROR_INJECTION(xe_wa_gt_init, ERRNO); /* See xe_pci_probe() */
void xe_wa_device_dump(struct xe_device *xe, struct drm_printer *p)
{
@@ -1079,6 +1127,6 @@ void xe_wa_apply_tile_workarounds(struct xe_tile *tile)
if (IS_SRIOV_VF(tile->xe))
return;
- if (XE_WA(tile->primary_gt, 22010954014))
+ if (XE_GT_WA(tile->primary_gt, 22010954014))
xe_mmio_rmw32(mmio, XEHP_CLOCK_GATE_DIS, 0, SGSI_SIDECLK_DIS);
}
diff --git a/drivers/gpu/drm/xe/xe_wa.h b/drivers/gpu/drm/xe/xe_wa.h
index f3880c65cb8d..6a869b2de643 100644
--- a/drivers/gpu/drm/xe/xe_wa.h
+++ b/drivers/gpu/drm/xe/xe_wa.h
@@ -14,9 +14,9 @@ struct xe_hw_engine;
struct xe_tile;
int xe_wa_device_init(struct xe_device *xe);
-int xe_wa_init(struct xe_gt *gt);
+int xe_wa_gt_init(struct xe_gt *gt);
void xe_wa_process_device_oob(struct xe_device *xe);
-void xe_wa_process_oob(struct xe_gt *gt);
+void xe_wa_process_gt_oob(struct xe_gt *gt);
void xe_wa_process_gt(struct xe_gt *gt);
void xe_wa_process_engine(struct xe_hw_engine *hwe);
void xe_wa_process_lrc(struct xe_hw_engine *hwe);
@@ -25,11 +25,11 @@ void xe_wa_device_dump(struct xe_device *xe, struct drm_printer *p);
void xe_wa_dump(struct xe_gt *gt, struct drm_printer *p);
/**
- * XE_WA - Out-of-band workarounds, to be queried and called as needed.
+ * XE_GT_WA - Out-of-band GT workarounds, to be queried and called as needed.
* @gt__: gt instance
* @id__: XE_OOB_<id__>, as generated by build system in generated/xe_wa_oob.h
*/
-#define XE_WA(gt__, id__) ({ \
+#define XE_GT_WA(gt__, id__) ({ \
xe_gt_assert(gt__, (gt__)->wa_active.oob_initialized); \
test_bit(XE_WA_OOB_ ## id__, (gt__)->wa_active.oob); \
})
diff --git a/drivers/gpu/drm/xe/xe_wa_oob.rules b/drivers/gpu/drm/xe/xe_wa_oob.rules
index e990f20eccfe..f3a6d5d239ce 100644
--- a/drivers/gpu/drm/xe/xe_wa_oob.rules
+++ b/drivers/gpu/drm/xe/xe_wa_oob.rules
@@ -1,4 +1,6 @@
1607983814 GRAPHICS_VERSION_RANGE(1200, 1210)
+16010904313 GRAPHICS_VERSION_RANGE(1200, 1210)
+18022495364 GRAPHICS_VERSION_RANGE(1200, 1210)
22012773006 GRAPHICS_VERSION_RANGE(1200, 1250)
14014475959 GRAPHICS_VERSION_RANGE(1270, 1271), GRAPHICS_STEP(A0, B0)
PLATFORM(DG2)
@@ -30,7 +32,8 @@
16022287689 GRAPHICS_VERSION(2001)
GRAPHICS_VERSION(2004)
13011645652 GRAPHICS_VERSION(2004)
- GRAPHICS_VERSION(3001)
+ GRAPHICS_VERSION_RANGE(3000, 3001)
+ GRAPHICS_VERSION(3003)
14022293748 GRAPHICS_VERSION_RANGE(2001, 2002)
GRAPHICS_VERSION(2004)
GRAPHICS_VERSION_RANGE(3000, 3001)
@@ -46,7 +49,6 @@
16023588340 GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_not_sriov_vf)
14019789679 GRAPHICS_VERSION(1255)
GRAPHICS_VERSION_RANGE(1270, 2004)
-no_media_l3 MEDIA_VERSION(3000)
14022866841 GRAPHICS_VERSION(3000), GRAPHICS_STEP(A0, B0)
MEDIA_VERSION(3000), MEDIA_STEP(A0, B0)
16021333562 GRAPHICS_VERSION_RANGE(1200, 1274)
@@ -66,9 +68,16 @@ no_media_l3 MEDIA_VERSION(3000)
MEDIA_VERSION_RANGE(1300, 3000)
MEDIA_VERSION(3002)
GRAPHICS_VERSION(3003)
+14020001231 GRAPHICS_VERSION_RANGE(2001,2004), FUNC(xe_rtp_match_psmi_enabled)
+ MEDIA_VERSION(2000), FUNC(xe_rtp_match_psmi_enabled)
+ MEDIA_VERSION(3000), FUNC(xe_rtp_match_psmi_enabled)
+ MEDIA_VERSION(3002), FUNC(xe_rtp_match_psmi_enabled)
+16023683509 MEDIA_VERSION(2000), FUNC(xe_rtp_match_psmi_enabled)
+ MEDIA_VERSION(3000), MEDIA_STEP(A0, B0), FUNC(xe_rtp_match_psmi_enabled)
# SoC workaround - currently applies to all platforms with the following
# primary GT GMDID
14022085890 GRAPHICS_VERSION(2001)
15015404425_disable PLATFORM(PANTHERLAKE), MEDIA_STEP(B0, FOREVER)
+16026007364 MEDIA_VERSION(3000)
diff --git a/drivers/gpu/nova-core/Kconfig b/drivers/gpu/nova-core/Kconfig
index 8726d80d6ba4..20d3e6d0d796 100644
--- a/drivers/gpu/nova-core/Kconfig
+++ b/drivers/gpu/nova-core/Kconfig
@@ -1,5 +1,6 @@
config NOVA_CORE
tristate "Nova Core GPU driver"
+ depends on 64BIT
depends on PCI
depends on RUST
depends on RUST_FW_LOADER_ABSTRACTIONS
diff --git a/drivers/gpu/nova-core/driver.rs b/drivers/gpu/nova-core/driver.rs
index 274989ea1fb4..edc72052e27a 100644
--- a/drivers/gpu/nova-core/driver.rs
+++ b/drivers/gpu/nova-core/driver.rs
@@ -1,6 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
-use kernel::{auxiliary, bindings, c_str, device::Core, pci, prelude::*, sizes::SZ_16M, sync::Arc};
+use kernel::{
+ auxiliary, c_str,
+ device::Core,
+ pci,
+ pci::{Class, ClassMask, Vendor},
+ prelude::*,
+ sizes::SZ_16M,
+ sync::Arc,
+};
use crate::gpu::Gpu;
@@ -18,10 +26,25 @@ kernel::pci_device_table!(
PCI_TABLE,
MODULE_PCI_TABLE,
<NovaCore as pci::Driver>::IdInfo,
- [(
- pci::DeviceId::from_id(bindings::PCI_VENDOR_ID_NVIDIA, bindings::PCI_ANY_ID as u32),
- ()
- )]
+ [
+ // Modern NVIDIA GPUs will show up as either VGA or 3D controllers.
+ (
+ pci::DeviceId::from_class_and_vendor(
+ Class::DISPLAY_VGA,
+ ClassMask::ClassSubclass,
+ Vendor::NVIDIA
+ ),
+ ()
+ ),
+ (
+ pci::DeviceId::from_class_and_vendor(
+ Class::DISPLAY_3D,
+ ClassMask::ClassSubclass,
+ Vendor::NVIDIA
+ ),
+ ()
+ ),
+ ]
);
impl pci::Driver for NovaCore {
@@ -34,14 +57,19 @@ impl pci::Driver for NovaCore {
pdev.enable_device_mem()?;
pdev.set_master();
- let bar = Arc::pin_init(
+ let devres_bar = Arc::pin_init(
pdev.iomap_region_sized::<BAR0_SIZE>(0, c_str!("nova-core/bar0")),
GFP_KERNEL,
)?;
+ // Used to provided a `&Bar0` to `Gpu::new` without tying it to the lifetime of
+ // `devres_bar`.
+ let bar_clone = Arc::clone(&devres_bar);
+ let bar = bar_clone.access(pdev.as_ref())?;
+
let this = KBox::pin_init(
try_pin_init!(Self {
- gpu <- Gpu::new(pdev, bar)?,
+ gpu <- Gpu::new(pdev, devres_bar, bar),
_reg: auxiliary::Registration::new(
pdev.as_ref(),
c_str!("nova-drm"),
@@ -54,4 +82,8 @@ impl pci::Driver for NovaCore {
Ok(this)
}
+
+ fn unbind(pdev: &pci::Device<Core>, this: Pin<&Self>) {
+ this.gpu.unbind(pdev.as_ref());
+ }
}
diff --git a/drivers/gpu/nova-core/falcon.rs b/drivers/gpu/nova-core/falcon.rs
index 50437c67c14a..37e6298195e4 100644
--- a/drivers/gpu/nova-core/falcon.rs
+++ b/drivers/gpu/nova-core/falcon.rs
@@ -4,16 +4,17 @@
use core::ops::Deref;
use hal::FalconHal;
-use kernel::bindings;
use kernel::device;
+use kernel::dma::DmaAddress;
use kernel::prelude::*;
+use kernel::sync::aref::ARef;
use kernel::time::Delta;
-use kernel::types::ARef;
use crate::dma::DmaObject;
use crate::driver::Bar0;
use crate::gpu::Chipset;
use crate::regs;
+use crate::regs::macros::RegisterBase;
use crate::util;
pub(crate) mod gsp;
@@ -274,14 +275,25 @@ impl From<bool> for FalconFbifMemType {
}
}
-/// Trait defining the parameters of a given Falcon instance.
-pub(crate) trait FalconEngine: Sync {
- /// Base I/O address for the falcon, relative from which its registers are accessed.
- const BASE: usize;
+/// Type used to represent the `PFALCON` registers address base for a given falcon engine.
+pub(crate) struct PFalconBase(());
+
+/// Type used to represent the `PFALCON2` registers address base for a given falcon engine.
+pub(crate) struct PFalcon2Base(());
+
+/// Trait defining the parameters of a given Falcon engine.
+///
+/// Each engine provides one base for `PFALCON` and `PFALCON2` registers. The `ID` constant is used
+/// to identify a given Falcon instance with register I/O methods.
+pub(crate) trait FalconEngine:
+ Send + Sync + RegisterBase<PFalconBase> + RegisterBase<PFalcon2Base> + Sized
+{
+ /// Singleton of the engine, used to identify it with register I/O methods.
+ const ID: Self;
}
/// Represents a portion of the firmware to be loaded into a particular memory (e.g. IMEM or DMEM).
-#[derive(Debug)]
+#[derive(Debug, Clone)]
pub(crate) struct FalconLoadTarget {
/// Offset from the start of the source object to copy from.
pub(crate) src_start: u32,
@@ -292,7 +304,7 @@ pub(crate) struct FalconLoadTarget {
}
/// Parameters for the falcon boot ROM.
-#[derive(Debug)]
+#[derive(Debug, Clone)]
pub(crate) struct FalconBromParams {
/// Offset in `DMEM`` of the firmware's signature.
pub(crate) pkc_data_offset: u32,
@@ -343,13 +355,13 @@ impl<E: FalconEngine + 'static> Falcon<E> {
bar: &Bar0,
need_riscv: bool,
) -> Result<Self> {
- let hwcfg1 = regs::NV_PFALCON_FALCON_HWCFG1::read(bar, E::BASE);
+ let hwcfg1 = regs::NV_PFALCON_FALCON_HWCFG1::read(bar, &E::ID);
// Check that the revision and security model contain valid values.
let _ = hwcfg1.core_rev()?;
let _ = hwcfg1.security_model()?;
if need_riscv {
- let hwcfg2 = regs::NV_PFALCON_FALCON_HWCFG2::read(bar, E::BASE);
+ let hwcfg2 = regs::NV_PFALCON_FALCON_HWCFG2::read(bar, &E::ID);
if !hwcfg2.riscv() {
dev_err!(
dev,
@@ -369,7 +381,7 @@ impl<E: FalconEngine + 'static> Falcon<E> {
fn reset_wait_mem_scrubbing(&self, bar: &Bar0) -> Result {
// TIMEOUT: memory scrubbing should complete in less than 20ms.
util::wait_on(Delta::from_millis(20), || {
- if regs::NV_PFALCON_FALCON_HWCFG2::read(bar, E::BASE).mem_scrubbing_done() {
+ if regs::NV_PFALCON_FALCON_HWCFG2::read(bar, &E::ID).mem_scrubbing_done() {
Some(())
} else {
None
@@ -379,12 +391,12 @@ impl<E: FalconEngine + 'static> Falcon<E> {
/// Reset the falcon engine.
fn reset_eng(&self, bar: &Bar0) -> Result {
- let _ = regs::NV_PFALCON_FALCON_HWCFG2::read(bar, E::BASE);
+ let _ = regs::NV_PFALCON_FALCON_HWCFG2::read(bar, &E::ID);
// According to OpenRM's `kflcnPreResetWait_GA102` documentation, HW sometimes does not set
// RESET_READY so a non-failing timeout is used.
let _ = util::wait_on(Delta::from_micros(150), || {
- let r = regs::NV_PFALCON_FALCON_HWCFG2::read(bar, E::BASE);
+ let r = regs::NV_PFALCON_FALCON_HWCFG2::read(bar, &E::ID);
if r.reset_ready() {
Some(())
} else {
@@ -392,13 +404,13 @@ impl<E: FalconEngine + 'static> Falcon<E> {
}
});
- regs::NV_PFALCON_FALCON_ENGINE::alter(bar, E::BASE, |v| v.set_reset(true));
+ regs::NV_PFALCON_FALCON_ENGINE::alter(bar, &E::ID, |v| v.set_reset(true));
// TODO[DLAY]: replace with udelay() or equivalent once available.
// TIMEOUT: falcon engine should not take more than 10us to reset.
let _: Result = util::wait_on(Delta::from_micros(10), || None);
- regs::NV_PFALCON_FALCON_ENGINE::alter(bar, E::BASE, |v| v.set_reset(false));
+ regs::NV_PFALCON_FALCON_ENGINE::alter(bar, &E::ID, |v| v.set_reset(false));
self.reset_wait_mem_scrubbing(bar)?;
@@ -413,7 +425,7 @@ impl<E: FalconEngine + 'static> Falcon<E> {
regs::NV_PFALCON_FALCON_RM::default()
.set_value(regs::NV_PMC_BOOT_0::read(bar).into())
- .write(bar, E::BASE);
+ .write(bar, &E::ID);
Ok(())
}
@@ -443,7 +455,7 @@ impl<E: FalconEngine + 'static> Falcon<E> {
fw.dma_handle_with_offset(load_offsets.src_start as usize)?,
),
};
- if dma_start % bindings::dma_addr_t::from(DMA_LEN) > 0 {
+ if dma_start % DmaAddress::from(DMA_LEN) > 0 {
dev_err!(
self.dev,
"DMA transfer start addresses must be a multiple of {}",
@@ -451,44 +463,57 @@ impl<E: FalconEngine + 'static> Falcon<E> {
);
return Err(EINVAL);
}
- if load_offsets.len % DMA_LEN > 0 {
- dev_err!(
- self.dev,
- "DMA transfer length must be a multiple of {}",
- DMA_LEN
- );
- return Err(EINVAL);
- }
+
+ // DMA transfers can only be done in units of 256 bytes. Compute how many such transfers we
+ // need to perform.
+ let num_transfers = load_offsets.len.div_ceil(DMA_LEN);
+
+ // Check that the area we are about to transfer is within the bounds of the DMA object.
+ // Upper limit of transfer is `(num_transfers * DMA_LEN) + load_offsets.src_start`.
+ match num_transfers
+ .checked_mul(DMA_LEN)
+ .and_then(|size| size.checked_add(load_offsets.src_start))
+ {
+ None => {
+ dev_err!(self.dev, "DMA transfer length overflow");
+ return Err(EOVERFLOW);
+ }
+ Some(upper_bound) if upper_bound as usize > fw.size() => {
+ dev_err!(self.dev, "DMA transfer goes beyond range of DMA object");
+ return Err(EINVAL);
+ }
+ Some(_) => (),
+ };
// Set up the base source DMA address.
regs::NV_PFALCON_FALCON_DMATRFBASE::default()
.set_base((dma_start >> 8) as u32)
- .write(bar, E::BASE);
+ .write(bar, &E::ID);
regs::NV_PFALCON_FALCON_DMATRFBASE1::default()
.set_base((dma_start >> 40) as u16)
- .write(bar, E::BASE);
+ .write(bar, &E::ID);
let cmd = regs::NV_PFALCON_FALCON_DMATRFCMD::default()
.set_size(DmaTrfCmdSize::Size256B)
.set_imem(target_mem == FalconMem::Imem)
.set_sec(if sec { 1 } else { 0 });
- for pos in (0..load_offsets.len).step_by(DMA_LEN as usize) {
+ for pos in (0..num_transfers).map(|i| i * DMA_LEN) {
// Perform a transfer of size `DMA_LEN`.
regs::NV_PFALCON_FALCON_DMATRFMOFFS::default()
.set_offs(load_offsets.dst_start + pos)
- .write(bar, E::BASE);
+ .write(bar, &E::ID);
regs::NV_PFALCON_FALCON_DMATRFFBOFFS::default()
.set_offs(src_start + pos)
- .write(bar, E::BASE);
- cmd.write(bar, E::BASE);
+ .write(bar, &E::ID);
+ cmd.write(bar, &E::ID);
// Wait for the transfer to complete.
// TIMEOUT: arbitrarily large value, no DMA transfer to the falcon's small memories
// should ever take that long.
util::wait_on(Delta::from_secs(2), || {
- let r = regs::NV_PFALCON_FALCON_DMATRFCMD::read(bar, E::BASE);
+ let r = regs::NV_PFALCON_FALCON_DMATRFCMD::read(bar, &E::ID);
if r.idle() {
Some(())
} else {
@@ -502,9 +527,9 @@ impl<E: FalconEngine + 'static> Falcon<E> {
/// Perform a DMA load into `IMEM` and `DMEM` of `fw`, and prepare the falcon to run it.
pub(crate) fn dma_load<F: FalconFirmware<Target = E>>(&self, bar: &Bar0, fw: &F) -> Result {
- regs::NV_PFALCON_FBIF_CTL::alter(bar, E::BASE, |v| v.set_allow_phys_no_ctx(true));
- regs::NV_PFALCON_FALCON_DMACTL::default().write(bar, E::BASE);
- regs::NV_PFALCON_FBIF_TRANSCFG::alter(bar, E::BASE, |v| {
+ regs::NV_PFALCON_FBIF_CTL::alter(bar, &E::ID, |v| v.set_allow_phys_no_ctx(true));
+ regs::NV_PFALCON_FALCON_DMACTL::default().write(bar, &E::ID);
+ regs::NV_PFALCON_FBIF_TRANSCFG::alter(bar, &E::ID, 0, |v| {
v.set_target(FalconFbifTarget::CoherentSysmem)
.set_mem_type(FalconFbifMemType::Physical)
});
@@ -517,7 +542,7 @@ impl<E: FalconEngine + 'static> Falcon<E> {
// Set `BootVec` to start of non-secure code.
regs::NV_PFALCON_FALCON_BOOTVEC::default()
.set_value(fw.boot_addr())
- .write(bar, E::BASE);
+ .write(bar, &E::ID);
Ok(())
}
@@ -538,27 +563,27 @@ impl<E: FalconEngine + 'static> Falcon<E> {
if let Some(mbox0) = mbox0 {
regs::NV_PFALCON_FALCON_MAILBOX0::default()
.set_value(mbox0)
- .write(bar, E::BASE);
+ .write(bar, &E::ID);
}
if let Some(mbox1) = mbox1 {
regs::NV_PFALCON_FALCON_MAILBOX1::default()
.set_value(mbox1)
- .write(bar, E::BASE);
+ .write(bar, &E::ID);
}
- match regs::NV_PFALCON_FALCON_CPUCTL::read(bar, E::BASE).alias_en() {
+ match regs::NV_PFALCON_FALCON_CPUCTL::read(bar, &E::ID).alias_en() {
true => regs::NV_PFALCON_FALCON_CPUCTL_ALIAS::default()
.set_startcpu(true)
- .write(bar, E::BASE),
+ .write(bar, &E::ID),
false => regs::NV_PFALCON_FALCON_CPUCTL::default()
.set_startcpu(true)
- .write(bar, E::BASE),
+ .write(bar, &E::ID),
}
// TIMEOUT: arbitrarily large value, firmwares should complete in less than 2 seconds.
util::wait_on(Delta::from_secs(2), || {
- let r = regs::NV_PFALCON_FALCON_CPUCTL::read(bar, E::BASE);
+ let r = regs::NV_PFALCON_FALCON_CPUCTL::read(bar, &E::ID);
if r.halted() {
Some(())
} else {
@@ -567,8 +592,8 @@ impl<E: FalconEngine + 'static> Falcon<E> {
})?;
let (mbox0, mbox1) = (
- regs::NV_PFALCON_FALCON_MAILBOX0::read(bar, E::BASE).value(),
- regs::NV_PFALCON_FALCON_MAILBOX1::read(bar, E::BASE).value(),
+ regs::NV_PFALCON_FALCON_MAILBOX0::read(bar, &E::ID).value(),
+ regs::NV_PFALCON_FALCON_MAILBOX1::read(bar, &E::ID).value(),
);
Ok((mbox0, mbox1))
diff --git a/drivers/gpu/nova-core/falcon/gsp.rs b/drivers/gpu/nova-core/falcon/gsp.rs
index d622e9a64470..f17599cb49fa 100644
--- a/drivers/gpu/nova-core/falcon/gsp.rs
+++ b/drivers/gpu/nova-core/falcon/gsp.rs
@@ -2,23 +2,31 @@
use crate::{
driver::Bar0,
- falcon::{Falcon, FalconEngine},
- regs,
+ falcon::{Falcon, FalconEngine, PFalcon2Base, PFalconBase},
+ regs::{self, macros::RegisterBase},
};
/// Type specifying the `Gsp` falcon engine. Cannot be instantiated.
pub(crate) struct Gsp(());
-impl FalconEngine for Gsp {
+impl RegisterBase<PFalconBase> for Gsp {
const BASE: usize = 0x00110000;
}
+impl RegisterBase<PFalcon2Base> for Gsp {
+ const BASE: usize = 0x00111000;
+}
+
+impl FalconEngine for Gsp {
+ const ID: Self = Gsp(());
+}
+
impl Falcon<Gsp> {
/// Clears the SWGEN0 bit in the Falcon's IRQ status clear register to
/// allow GSP to signal CPU for processing new messages in message queue.
pub(crate) fn clear_swgen0_intr(&self, bar: &Bar0) {
regs::NV_PFALCON_FALCON_IRQSCLR::default()
.set_swgen0(true)
- .write(bar, Gsp::BASE);
+ .write(bar, &Gsp::ID);
}
}
diff --git a/drivers/gpu/nova-core/falcon/hal.rs b/drivers/gpu/nova-core/falcon/hal.rs
index b233bc365882..bba288455617 100644
--- a/drivers/gpu/nova-core/falcon/hal.rs
+++ b/drivers/gpu/nova-core/falcon/hal.rs
@@ -13,7 +13,7 @@ mod ga102;
/// Implements chipset-specific low-level operations. The trait is generic against [`FalconEngine`]
/// so its `BASE` parameter can be used in order to avoid runtime bound checks when accessing
/// registers.
-pub(crate) trait FalconHal<E: FalconEngine>: Sync {
+pub(crate) trait FalconHal<E: FalconEngine>: Send + Sync {
/// Activates the Falcon core if the engine is a risvc/falcon dual engine.
fn select_core(&self, _falcon: &Falcon<E>, _bar: &Bar0) -> Result {
Ok(())
diff --git a/drivers/gpu/nova-core/falcon/hal/ga102.rs b/drivers/gpu/nova-core/falcon/hal/ga102.rs
index 52c33d3f22a8..0b1cbe7853b3 100644
--- a/drivers/gpu/nova-core/falcon/hal/ga102.rs
+++ b/drivers/gpu/nova-core/falcon/hal/ga102.rs
@@ -16,15 +16,15 @@ use crate::util;
use super::FalconHal;
fn select_core_ga102<E: FalconEngine>(bar: &Bar0) -> Result {
- let bcr_ctrl = regs::NV_PRISCV_RISCV_BCR_CTRL::read(bar, E::BASE);
+ let bcr_ctrl = regs::NV_PRISCV_RISCV_BCR_CTRL::read(bar, &E::ID);
if bcr_ctrl.core_select() != PeregrineCoreSelect::Falcon {
regs::NV_PRISCV_RISCV_BCR_CTRL::default()
.set_core_select(PeregrineCoreSelect::Falcon)
- .write(bar, E::BASE);
+ .write(bar, &E::ID);
// TIMEOUT: falcon core should take less than 10ms to report being enabled.
util::wait_on(Delta::from_millis(10), || {
- let r = regs::NV_PRISCV_RISCV_BCR_CTRL::read(bar, E::BASE);
+ let r = regs::NV_PRISCV_RISCV_BCR_CTRL::read(bar, &E::ID);
if r.valid() {
Some(())
} else {
@@ -42,50 +42,47 @@ fn signature_reg_fuse_version_ga102(
engine_id_mask: u16,
ucode_id: u8,
) -> Result<u32> {
- // TODO[REGA]: The ucode fuse versions are contained in the
- // FUSE_OPT_FPF_<ENGINE>_UCODE<X>_VERSION registers, which are an array. Our register
- // definition macros do not allow us to manage them properly, so we need to hardcode their
- // addresses for now. Clean this up once we support register arrays.
+ const NV_FUSE_OPT_FPF_SIZE: u8 = regs::NV_FUSE_OPT_FPF_SIZE as u8;
// Each engine has 16 ucode version registers numbered from 1 to 16.
- if ucode_id == 0 || ucode_id > 16 {
- dev_err!(dev, "invalid ucode id {:#x}", ucode_id);
- return Err(EINVAL);
- }
+ let ucode_idx = match ucode_id {
+ 1..=NV_FUSE_OPT_FPF_SIZE => (ucode_id - 1) as usize,
+ _ => {
+ dev_err!(dev, "invalid ucode id {:#x}", ucode_id);
+ return Err(EINVAL);
+ }
+ };
- // Base address of the FUSE registers array corresponding to the engine.
- let reg_fuse_base = if engine_id_mask & 0x0001 != 0 {
- regs::NV_FUSE_OPT_FPF_SEC2_UCODE1_VERSION::OFFSET
+ // `ucode_idx` is guaranteed to be in the range [0..15], making the `read` calls provable valid
+ // at build-time.
+ let reg_fuse_version = if engine_id_mask & 0x0001 != 0 {
+ regs::NV_FUSE_OPT_FPF_SEC2_UCODE1_VERSION::read(bar, ucode_idx).data()
} else if engine_id_mask & 0x0004 != 0 {
- regs::NV_FUSE_OPT_FPF_NVDEC_UCODE1_VERSION::OFFSET
+ regs::NV_FUSE_OPT_FPF_NVDEC_UCODE1_VERSION::read(bar, ucode_idx).data()
} else if engine_id_mask & 0x0400 != 0 {
- regs::NV_FUSE_OPT_FPF_GSP_UCODE1_VERSION::OFFSET
+ regs::NV_FUSE_OPT_FPF_GSP_UCODE1_VERSION::read(bar, ucode_idx).data()
} else {
dev_err!(dev, "unexpected engine_id_mask {:#x}", engine_id_mask);
return Err(EINVAL);
};
- // Read `reg_fuse_base[ucode_id - 1]`.
- let reg_fuse_version =
- bar.read32(reg_fuse_base + ((ucode_id - 1) as usize * core::mem::size_of::<u32>()));
-
// TODO[NUMM]: replace with `last_set_bit` once it lands.
- Ok(u32::BITS - reg_fuse_version.leading_zeros())
+ Ok(u16::BITS - reg_fuse_version.leading_zeros())
}
fn program_brom_ga102<E: FalconEngine>(bar: &Bar0, params: &FalconBromParams) -> Result {
regs::NV_PFALCON2_FALCON_BROM_PARAADDR::default()
.set_value(params.pkc_data_offset)
- .write(bar, E::BASE);
+ .write(bar, &E::ID, 0);
regs::NV_PFALCON2_FALCON_BROM_ENGIDMASK::default()
.set_value(u32::from(params.engine_id_mask))
- .write(bar, E::BASE);
+ .write(bar, &E::ID);
regs::NV_PFALCON2_FALCON_BROM_CURR_UCODE_ID::default()
.set_ucode_id(params.ucode_id)
- .write(bar, E::BASE);
+ .write(bar, &E::ID);
regs::NV_PFALCON2_FALCON_MOD_SEL::default()
.set_algo(FalconModSelAlgo::Rsa3k)
- .write(bar, E::BASE);
+ .write(bar, &E::ID);
Ok(())
}
diff --git a/drivers/gpu/nova-core/falcon/sec2.rs b/drivers/gpu/nova-core/falcon/sec2.rs
index 5147d9e2a7fe..815786c8480d 100644
--- a/drivers/gpu/nova-core/falcon/sec2.rs
+++ b/drivers/gpu/nova-core/falcon/sec2.rs
@@ -1,10 +1,19 @@
// SPDX-License-Identifier: GPL-2.0
-use crate::falcon::FalconEngine;
+use crate::falcon::{FalconEngine, PFalcon2Base, PFalconBase};
+use crate::regs::macros::RegisterBase;
/// Type specifying the `Sec2` falcon engine. Cannot be instantiated.
pub(crate) struct Sec2(());
-impl FalconEngine for Sec2 {
+impl RegisterBase<PFalconBase> for Sec2 {
const BASE: usize = 0x00840000;
}
+
+impl RegisterBase<PFalcon2Base> for Sec2 {
+ const BASE: usize = 0x00841000;
+}
+
+impl FalconEngine for Sec2 {
+ const ID: Self = Sec2(());
+}
diff --git a/drivers/gpu/nova-core/fb.rs b/drivers/gpu/nova-core/fb.rs
index 4a702525fff4..27d9edab8347 100644
--- a/drivers/gpu/nova-core/fb.rs
+++ b/drivers/gpu/nova-core/fb.rs
@@ -3,8 +3,9 @@
use core::ops::Range;
use kernel::prelude::*;
+use kernel::ptr::{Alignable, Alignment};
use kernel::sizes::*;
-use kernel::types::ARef;
+use kernel::sync::aref::ARef;
use kernel::{dev_warn, device};
use crate::dma::DmaObject;
@@ -130,10 +131,9 @@ impl FbLayout {
};
let frts = {
- const FRTS_DOWN_ALIGN: u64 = SZ_128K as u64;
+ const FRTS_DOWN_ALIGN: Alignment = Alignment::new::<SZ_128K>();
const FRTS_SIZE: u64 = SZ_1M as u64;
- // TODO[NUMM]: replace with `align_down` once it lands.
- let frts_base = (vga_workspace.start & !(FRTS_DOWN_ALIGN - 1)) - FRTS_SIZE;
+ let frts_base = vga_workspace.start.align_down(FRTS_DOWN_ALIGN) - FRTS_SIZE;
frts_base..frts_base + FRTS_SIZE
};
diff --git a/drivers/gpu/nova-core/firmware.rs b/drivers/gpu/nova-core/firmware.rs
index 2931912ddba0..4179a74a2342 100644
--- a/drivers/gpu/nova-core/firmware.rs
+++ b/drivers/gpu/nova-core/firmware.rs
@@ -4,48 +4,36 @@
//! to be loaded into a given execution unit.
use core::marker::PhantomData;
+use core::mem::size_of;
use kernel::device;
use kernel::firmware;
use kernel::prelude::*;
use kernel::str::CString;
+use kernel::transmute::FromBytes;
use crate::dma::DmaObject;
use crate::falcon::FalconFirmware;
use crate::gpu;
-use crate::gpu::Chipset;
+pub(crate) mod booter;
pub(crate) mod fwsec;
-
-pub(crate) const FIRMWARE_VERSION: &str = "535.113.01";
-
-/// Structure encapsulating the firmware blobs required for the GPU to operate.
-#[expect(dead_code)]
-pub(crate) struct Firmware {
- booter_load: firmware::Firmware,
- booter_unload: firmware::Firmware,
- bootloader: firmware::Firmware,
- gsp: firmware::Firmware,
-}
-
-impl Firmware {
- pub(crate) fn new(dev: &device::Device, chipset: Chipset, ver: &str) -> Result<Firmware> {
- let mut chip_name = CString::try_from_fmt(fmt!("{chipset}"))?;
- chip_name.make_ascii_lowercase();
- let chip_name = &*chip_name;
-
- let request = |name_| {
- CString::try_from_fmt(fmt!("nvidia/{chip_name}/gsp/{name_}-{ver}.bin"))
- .and_then(|path| firmware::Firmware::request(&path, dev))
- };
-
- Ok(Firmware {
- booter_load: request("booter_load")?,
- booter_unload: request("booter_unload")?,
- bootloader: request("bootloader")?,
- gsp: request("gsp")?,
- })
- }
+pub(crate) mod gsp;
+pub(crate) mod riscv;
+
+pub(crate) const FIRMWARE_VERSION: &str = "570.144";
+
+/// Requests the GPU firmware `name` suitable for `chipset`, with version `ver`.
+fn request_firmware(
+ dev: &device::Device,
+ chipset: gpu::Chipset,
+ name: &str,
+ ver: &str,
+) -> Result<firmware::Firmware> {
+ let chip_name = chipset.name();
+
+ CString::try_from_fmt(fmt!("nvidia/{chip_name}/gsp/{name}-{ver}.bin"))
+ .and_then(|path| firmware::Firmware::request(&path, dev))
}
/// Structure used to describe some firmwares, notably FWSEC-FRTS.
@@ -150,6 +138,65 @@ impl<F: FalconFirmware> FirmwareDmaObject<F, Unsigned> {
}
}
+/// Header common to most firmware files.
+#[repr(C)]
+#[derive(Debug, Clone)]
+struct BinHdr {
+ /// Magic number, must be `0x10de`.
+ bin_magic: u32,
+ /// Version of the header.
+ bin_ver: u32,
+ /// Size in bytes of the binary (to be ignored).
+ bin_size: u32,
+ /// Offset of the start of the application-specific header.
+ header_offset: u32,
+ /// Offset of the start of the data payload.
+ data_offset: u32,
+ /// Size in bytes of the data payload.
+ data_size: u32,
+}
+
+// SAFETY: all bit patterns are valid for this type, and it doesn't use interior mutability.
+unsafe impl FromBytes for BinHdr {}
+
+// A firmware blob starting with a `BinHdr`.
+struct BinFirmware<'a> {
+ hdr: BinHdr,
+ fw: &'a [u8],
+}
+
+impl<'a> BinFirmware<'a> {
+ /// Interpret `fw` as a firmware image starting with a [`BinHdr`], and returns the
+ /// corresponding [`BinFirmware`] that can be used to extract its payload.
+ fn new(fw: &'a firmware::Firmware) -> Result<Self> {
+ const BIN_MAGIC: u32 = 0x10de;
+ let fw = fw.data();
+
+ fw.get(0..size_of::<BinHdr>())
+ // Extract header.
+ .and_then(BinHdr::from_bytes_copy)
+ // Validate header.
+ .and_then(|hdr| {
+ if hdr.bin_magic == BIN_MAGIC {
+ Some(hdr)
+ } else {
+ None
+ }
+ })
+ .map(|hdr| Self { hdr, fw })
+ .ok_or(EINVAL)
+ }
+
+ /// Returns the data payload of the firmware, or `None` if the data range is out of bounds of
+ /// the firmware image.
+ fn data(&self) -> Option<&[u8]> {
+ let fw_start = self.hdr.data_offset as usize;
+ let fw_size = self.hdr.data_size as usize;
+
+ self.fw.get(fw_start..fw_start + fw_size)
+ }
+}
+
pub(crate) struct ModInfoBuilder<const N: usize>(firmware::ModInfoBuilder<N>);
impl<const N: usize> ModInfoBuilder<N> {
@@ -180,8 +227,8 @@ impl<const N: usize> ModInfoBuilder<N> {
let mut this = Self(firmware::ModInfoBuilder::new(module_name));
let mut i = 0;
- while i < gpu::Chipset::NAMES.len() {
- this = this.make_entry_chipset(gpu::Chipset::NAMES[i]);
+ while i < gpu::Chipset::ALL.len() {
+ this = this.make_entry_chipset(gpu::Chipset::ALL[i].name());
i += 1;
}
diff --git a/drivers/gpu/nova-core/firmware/booter.rs b/drivers/gpu/nova-core/firmware/booter.rs
new file mode 100644
index 000000000000..b4ff1b17e4a0
--- /dev/null
+++ b/drivers/gpu/nova-core/firmware/booter.rs
@@ -0,0 +1,375 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Support for loading and patching the `Booter` firmware. `Booter` is a Heavy Secured firmware
+//! running on [`Sec2`], that is used on Turing/Ampere to load the GSP firmware into the GSP falcon
+//! (and optionally unload it through a separate firmware image).
+
+use core::marker::PhantomData;
+use core::mem::size_of;
+use core::ops::Deref;
+
+use kernel::device;
+use kernel::prelude::*;
+use kernel::transmute::FromBytes;
+
+use crate::dma::DmaObject;
+use crate::driver::Bar0;
+use crate::falcon::sec2::Sec2;
+use crate::falcon::{Falcon, FalconBromParams, FalconFirmware, FalconLoadParams, FalconLoadTarget};
+use crate::firmware::{BinFirmware, FirmwareDmaObject, FirmwareSignature, Signed, Unsigned};
+use crate::gpu::Chipset;
+
+/// Local convenience function to return a copy of `S` by reinterpreting the bytes starting at
+/// `offset` in `slice`.
+fn frombytes_at<S: FromBytes + Sized>(slice: &[u8], offset: usize) -> Result<S> {
+ slice
+ .get(offset..offset + size_of::<S>())
+ .and_then(S::from_bytes_copy)
+ .ok_or(EINVAL)
+}
+
+/// Heavy-Secured firmware header.
+///
+/// Such firmwares have an application-specific payload that needs to be patched with a given
+/// signature.
+#[repr(C)]
+#[derive(Debug, Clone)]
+struct HsHeaderV2 {
+ /// Offset to the start of the signatures.
+ sig_prod_offset: u32,
+ /// Size in bytes of the signatures.
+ sig_prod_size: u32,
+ /// Offset to a `u32` containing the location at which to patch the signature in the microcode
+ /// image.
+ patch_loc_offset: u32,
+ /// Offset to a `u32` containing the index of the signature to patch.
+ patch_sig_offset: u32,
+ /// Start offset to the signature metadata.
+ meta_data_offset: u32,
+ /// Size in bytes of the signature metadata.
+ meta_data_size: u32,
+ /// Offset to a `u32` containing the number of signatures in the signatures section.
+ num_sig_offset: u32,
+ /// Offset of the application-specific header.
+ header_offset: u32,
+ /// Size in bytes of the application-specific header.
+ header_size: u32,
+}
+
+// SAFETY: all bit patterns are valid for this type, and it doesn't use interior mutability.
+unsafe impl FromBytes for HsHeaderV2 {}
+
+/// Heavy-Secured Firmware image container.
+///
+/// This provides convenient access to the fields of [`HsHeaderV2`] that are actually indices to
+/// read from in the firmware data.
+struct HsFirmwareV2<'a> {
+ hdr: HsHeaderV2,
+ fw: &'a [u8],
+}
+
+impl<'a> HsFirmwareV2<'a> {
+ /// Interprets the header of `bin_fw` as a [`HsHeaderV2`] and returns an instance of
+ /// `HsFirmwareV2` for further parsing.
+ ///
+ /// Fails if the header pointed at by `bin_fw` is not within the bounds of the firmware image.
+ fn new(bin_fw: &BinFirmware<'a>) -> Result<Self> {
+ frombytes_at::<HsHeaderV2>(bin_fw.fw, bin_fw.hdr.header_offset as usize)
+ .map(|hdr| Self { hdr, fw: bin_fw.fw })
+ }
+
+ /// Returns the location at which the signatures should be patched in the microcode image.
+ ///
+ /// Fails if the offset of the patch location is outside the bounds of the firmware
+ /// image.
+ fn patch_location(&self) -> Result<u32> {
+ frombytes_at::<u32>(self.fw, self.hdr.patch_loc_offset as usize)
+ }
+
+ /// Returns an iterator to the signatures of the firmware. The iterator can be empty if the
+ /// firmware is unsigned.
+ ///
+ /// Fails if the pointed signatures are outside the bounds of the firmware image.
+ fn signatures_iter(&'a self) -> Result<impl Iterator<Item = BooterSignature<'a>>> {
+ let num_sig = frombytes_at::<u32>(self.fw, self.hdr.num_sig_offset as usize)?;
+ let iter = match self.hdr.sig_prod_size.checked_div(num_sig) {
+ // If there are no signatures, return an iterator that will yield zero elements.
+ None => (&[] as &[u8]).chunks_exact(1),
+ Some(sig_size) => {
+ let patch_sig = frombytes_at::<u32>(self.fw, self.hdr.patch_sig_offset as usize)?;
+ let signatures_start = (self.hdr.sig_prod_offset + patch_sig) as usize;
+
+ self.fw
+ // Get signatures range.
+ .get(signatures_start..signatures_start + self.hdr.sig_prod_size as usize)
+ .ok_or(EINVAL)?
+ .chunks_exact(sig_size as usize)
+ }
+ };
+
+ // Map the byte slices into signatures.
+ Ok(iter.map(BooterSignature))
+ }
+}
+
+/// Signature parameters, as defined in the firmware.
+#[repr(C)]
+struct HsSignatureParams {
+ /// Fuse version to use.
+ fuse_ver: u32,
+ /// Mask of engine IDs this firmware applies to.
+ engine_id_mask: u32,
+ /// ID of the microcode.
+ ucode_id: u32,
+}
+
+// SAFETY: all bit patterns are valid for this type, and it doesn't use interior mutability.
+unsafe impl FromBytes for HsSignatureParams {}
+
+impl HsSignatureParams {
+ /// Returns the signature parameters contained in `hs_fw`.
+ ///
+ /// Fails if the meta data parameter of `hs_fw` is outside the bounds of the firmware image, or
+ /// if its size doesn't match that of [`HsSignatureParams`].
+ fn new(hs_fw: &HsFirmwareV2<'_>) -> Result<Self> {
+ let start = hs_fw.hdr.meta_data_offset as usize;
+ let end = start
+ .checked_add(hs_fw.hdr.meta_data_size as usize)
+ .ok_or(EINVAL)?;
+
+ hs_fw
+ .fw
+ .get(start..end)
+ .and_then(Self::from_bytes_copy)
+ .ok_or(EINVAL)
+ }
+}
+
+/// Header for code and data load offsets.
+#[repr(C)]
+#[derive(Debug, Clone)]
+struct HsLoadHeaderV2 {
+ // Offset at which the code starts.
+ os_code_offset: u32,
+ // Total size of the code, for all apps.
+ os_code_size: u32,
+ // Offset at which the data starts.
+ os_data_offset: u32,
+ // Size of the data.
+ os_data_size: u32,
+ // Number of apps following this header. Each app is described by a [`HsLoadHeaderV2App`].
+ num_apps: u32,
+}
+
+// SAFETY: all bit patterns are valid for this type, and it doesn't use interior mutability.
+unsafe impl FromBytes for HsLoadHeaderV2 {}
+
+impl HsLoadHeaderV2 {
+ /// Returns the load header contained in `hs_fw`.
+ ///
+ /// Fails if the header pointed at by `hs_fw` is not within the bounds of the firmware image.
+ fn new(hs_fw: &HsFirmwareV2<'_>) -> Result<Self> {
+ frombytes_at::<Self>(hs_fw.fw, hs_fw.hdr.header_offset as usize)
+ }
+}
+
+/// Header for app code loader.
+#[repr(C)]
+#[derive(Debug, Clone)]
+struct HsLoadHeaderV2App {
+ /// Offset at which to load the app code.
+ offset: u32,
+ /// Length in bytes of the app code.
+ len: u32,
+}
+
+// SAFETY: all bit patterns are valid for this type, and it doesn't use interior mutability.
+unsafe impl FromBytes for HsLoadHeaderV2App {}
+
+impl HsLoadHeaderV2App {
+ /// Returns the [`HsLoadHeaderV2App`] for app `idx` of `hs_fw`.
+ ///
+ /// Fails if `idx` is larger than the number of apps declared in `hs_fw`, or if the header is
+ /// not within the bounds of the firmware image.
+ fn new(hs_fw: &HsFirmwareV2<'_>, idx: u32) -> Result<Self> {
+ let load_hdr = HsLoadHeaderV2::new(hs_fw)?;
+ if idx >= load_hdr.num_apps {
+ Err(EINVAL)
+ } else {
+ frombytes_at::<Self>(
+ hs_fw.fw,
+ (hs_fw.hdr.header_offset as usize)
+ // Skip the load header...
+ .checked_add(size_of::<HsLoadHeaderV2>())
+ // ... and jump to app header `idx`.
+ .and_then(|offset| {
+ offset.checked_add((idx as usize).checked_mul(size_of::<Self>())?)
+ })
+ .ok_or(EINVAL)?,
+ )
+ }
+ }
+}
+
+/// Signature for Booter firmware. Their size is encoded into the header and not known a compile
+/// time, so we just wrap a byte slices on which we can implement [`FirmwareSignature`].
+struct BooterSignature<'a>(&'a [u8]);
+
+impl<'a> AsRef<[u8]> for BooterSignature<'a> {
+ fn as_ref(&self) -> &[u8] {
+ self.0
+ }
+}
+
+impl<'a> FirmwareSignature<BooterFirmware> for BooterSignature<'a> {}
+
+/// The `Booter` loader firmware, responsible for loading the GSP.
+pub(crate) struct BooterFirmware {
+ // Load parameters for `IMEM` falcon memory.
+ imem_load_target: FalconLoadTarget,
+ // Load parameters for `DMEM` falcon memory.
+ dmem_load_target: FalconLoadTarget,
+ // BROM falcon parameters.
+ brom_params: FalconBromParams,
+ // Device-mapped firmware image.
+ ucode: FirmwareDmaObject<Self, Signed>,
+}
+
+impl FirmwareDmaObject<BooterFirmware, Unsigned> {
+ fn new_booter(dev: &device::Device<device::Bound>, data: &[u8]) -> Result<Self> {
+ DmaObject::from_data(dev, data).map(|ucode| Self(ucode, PhantomData))
+ }
+}
+
+#[derive(Copy, Clone, Debug, PartialEq)]
+pub(crate) enum BooterKind {
+ Loader,
+ #[expect(unused)]
+ Unloader,
+}
+
+impl BooterFirmware {
+ /// Parses the Booter firmware contained in `fw`, and patches the correct signature so it is
+ /// ready to be loaded and run on `falcon`.
+ pub(crate) fn new(
+ dev: &device::Device<device::Bound>,
+ kind: BooterKind,
+ chipset: Chipset,
+ ver: &str,
+ falcon: &Falcon<<Self as FalconFirmware>::Target>,
+ bar: &Bar0,
+ ) -> Result<Self> {
+ let fw_name = match kind {
+ BooterKind::Loader => "booter_load",
+ BooterKind::Unloader => "booter_unload",
+ };
+ let fw = super::request_firmware(dev, chipset, fw_name, ver)?;
+ let bin_fw = BinFirmware::new(&fw)?;
+
+ // The binary firmware embeds a Heavy-Secured firmware.
+ let hs_fw = HsFirmwareV2::new(&bin_fw)?;
+
+ // The Heavy-Secured firmware embeds a firmware load descriptor.
+ let load_hdr = HsLoadHeaderV2::new(&hs_fw)?;
+
+ // Offset in `ucode` where to patch the signature.
+ let patch_loc = hs_fw.patch_location()?;
+
+ let sig_params = HsSignatureParams::new(&hs_fw)?;
+ let brom_params = FalconBromParams {
+ // `load_hdr.os_data_offset` is an absolute index, but `pkc_data_offset` is from the
+ // signature patch location.
+ pkc_data_offset: patch_loc
+ .checked_sub(load_hdr.os_data_offset)
+ .ok_or(EINVAL)?,
+ engine_id_mask: u16::try_from(sig_params.engine_id_mask).map_err(|_| EINVAL)?,
+ ucode_id: u8::try_from(sig_params.ucode_id).map_err(|_| EINVAL)?,
+ };
+ let app0 = HsLoadHeaderV2App::new(&hs_fw, 0)?;
+
+ // Object containing the firmware microcode to be signature-patched.
+ let ucode = bin_fw
+ .data()
+ .ok_or(EINVAL)
+ .and_then(|data| FirmwareDmaObject::<Self, _>::new_booter(dev, data))?;
+
+ let ucode_signed = {
+ let mut signatures = hs_fw.signatures_iter()?.peekable();
+
+ if signatures.peek().is_none() {
+ // If there are no signatures, then the firmware is unsigned.
+ ucode.no_patch_signature()
+ } else {
+ // Obtain the version from the fuse register, and extract the corresponding
+ // signature.
+ let reg_fuse_version = falcon.signature_reg_fuse_version(
+ bar,
+ brom_params.engine_id_mask,
+ brom_params.ucode_id,
+ )?;
+
+ // `0` means the last signature should be used.
+ const FUSE_VERSION_USE_LAST_SIG: u32 = 0;
+ let signature = match reg_fuse_version {
+ FUSE_VERSION_USE_LAST_SIG => signatures.last(),
+ // Otherwise hardware fuse version needs to be subtracted to obtain the index.
+ reg_fuse_version => {
+ let Some(idx) = sig_params.fuse_ver.checked_sub(reg_fuse_version) else {
+ dev_err!(dev, "invalid fuse version for Booter firmware\n");
+ return Err(EINVAL);
+ };
+ signatures.nth(idx as usize)
+ }
+ }
+ .ok_or(EINVAL)?;
+
+ ucode.patch_signature(&signature, patch_loc as usize)?
+ }
+ };
+
+ Ok(Self {
+ imem_load_target: FalconLoadTarget {
+ src_start: app0.offset,
+ dst_start: 0,
+ len: app0.len,
+ },
+ dmem_load_target: FalconLoadTarget {
+ src_start: load_hdr.os_data_offset,
+ dst_start: 0,
+ len: load_hdr.os_data_size,
+ },
+ brom_params,
+ ucode: ucode_signed,
+ })
+ }
+}
+
+impl FalconLoadParams for BooterFirmware {
+ fn imem_load_params(&self) -> FalconLoadTarget {
+ self.imem_load_target.clone()
+ }
+
+ fn dmem_load_params(&self) -> FalconLoadTarget {
+ self.dmem_load_target.clone()
+ }
+
+ fn brom_params(&self) -> FalconBromParams {
+ self.brom_params.clone()
+ }
+
+ fn boot_addr(&self) -> u32 {
+ self.imem_load_target.src_start
+ }
+}
+
+impl Deref for BooterFirmware {
+ type Target = DmaObject;
+
+ fn deref(&self) -> &Self::Target {
+ &self.ucode.0
+ }
+}
+
+impl FalconFirmware for BooterFirmware {
+ type Target = Sec2;
+}
diff --git a/drivers/gpu/nova-core/firmware/fwsec.rs b/drivers/gpu/nova-core/firmware/fwsec.rs
index 0dff3cfa90af..8edbb5c0572c 100644
--- a/drivers/gpu/nova-core/firmware/fwsec.rs
+++ b/drivers/gpu/nova-core/firmware/fwsec.rs
@@ -202,9 +202,6 @@ pub(crate) struct FwsecFirmware {
ucode: FirmwareDmaObject<Self, Signed>,
}
-// We need to load full DMEM pages.
-const DMEM_LOAD_SIZE_ALIGN: u32 = 256;
-
impl FalconLoadParams for FwsecFirmware {
fn imem_load_params(&self) -> FalconLoadTarget {
FalconLoadTarget {
@@ -218,11 +215,7 @@ impl FalconLoadParams for FwsecFirmware {
FalconLoadTarget {
src_start: self.desc.imem_load_size,
dst_start: self.desc.dmem_phys_base,
- // TODO[NUMM]: replace with `align_up` once it lands.
- len: self
- .desc
- .dmem_load_size
- .next_multiple_of(DMEM_LOAD_SIZE_ALIGN),
+ len: self.desc.dmem_load_size,
}
}
@@ -253,8 +246,8 @@ impl FalconFirmware for FwsecFirmware {
impl FirmwareDmaObject<FwsecFirmware, Unsigned> {
fn new_fwsec(dev: &Device<device::Bound>, bios: &Vbios, cmd: FwsecCommand) -> Result<Self> {
- let desc = bios.fwsec_image().header(dev)?;
- let ucode = bios.fwsec_image().ucode(dev, desc)?;
+ let desc = bios.fwsec_image().header()?;
+ let ucode = bios.fwsec_image().ucode(desc)?;
let mut dma_object = DmaObject::from_data(dev, ucode)?;
let hdr_offset = (desc.imem_load_size + desc.interface_offset) as usize;
@@ -343,7 +336,7 @@ impl FwsecFirmware {
let ucode_dma = FirmwareDmaObject::<Self, _>::new_fwsec(dev, bios, cmd)?;
// Patch signature if needed.
- let desc = bios.fwsec_image().header(dev)?;
+ let desc = bios.fwsec_image().header()?;
let ucode_signed = if desc.signature_count != 0 {
let sig_base_img = (desc.imem_load_size + desc.pkc_data_offset) as usize;
let desc_sig_versions = u32::from(desc.signature_versions);
@@ -382,7 +375,7 @@ impl FwsecFirmware {
dev_dbg!(dev, "patching signature with index {}\n", signature_idx);
let signature = bios
.fwsec_image()
- .sigs(dev, desc)
+ .sigs(desc)
.and_then(|sigs| sigs.get(signature_idx).ok_or(EINVAL))?;
ucode_dma.patch_signature(signature, sig_base_img)?
diff --git a/drivers/gpu/nova-core/firmware/gsp.rs b/drivers/gpu/nova-core/firmware/gsp.rs
new file mode 100644
index 000000000000..9b70095434c6
--- /dev/null
+++ b/drivers/gpu/nova-core/firmware/gsp.rs
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use core::mem::size_of_val;
+
+use kernel::device;
+use kernel::dma::{DataDirection, DmaAddress};
+use kernel::kvec;
+use kernel::prelude::*;
+use kernel::scatterlist::{Owned, SGTable};
+
+use crate::dma::DmaObject;
+use crate::firmware::riscv::RiscvFirmware;
+use crate::gpu::{Architecture, Chipset};
+use crate::gsp::GSP_PAGE_SIZE;
+
+/// Ad-hoc and temporary module to extract sections from ELF images.
+///
+/// Some firmware images are currently packaged as ELF files, where sections names are used as keys
+/// to specific and related bits of data. Future firmware versions are scheduled to move away from
+/// that scheme before nova-core becomes stable, which means this module will eventually be
+/// removed.
+mod elf {
+ use core::mem::size_of;
+
+ use kernel::bindings;
+ use kernel::str::CStr;
+ use kernel::transmute::FromBytes;
+
+ /// Newtype to provide a [`FromBytes`] implementation.
+ #[repr(transparent)]
+ struct Elf64Hdr(bindings::elf64_hdr);
+ // SAFETY: all bit patterns are valid for this type, and it doesn't use interior mutability.
+ unsafe impl FromBytes for Elf64Hdr {}
+
+ #[repr(transparent)]
+ struct Elf64SHdr(bindings::elf64_shdr);
+ // SAFETY: all bit patterns are valid for this type, and it doesn't use interior mutability.
+ unsafe impl FromBytes for Elf64SHdr {}
+
+ /// Tries to extract section with name `name` from the ELF64 image `elf`, and returns it.
+ pub(super) fn elf64_section<'a, 'b>(elf: &'a [u8], name: &'b str) -> Option<&'a [u8]> {
+ let hdr = &elf
+ .get(0..size_of::<bindings::elf64_hdr>())
+ .and_then(Elf64Hdr::from_bytes)?
+ .0;
+
+ // Get all the section headers.
+ let mut shdr = {
+ let shdr_num = usize::from(hdr.e_shnum);
+ let shdr_start = usize::try_from(hdr.e_shoff).ok()?;
+ let shdr_end = shdr_num
+ .checked_mul(size_of::<Elf64SHdr>())
+ .and_then(|v| v.checked_add(shdr_start))?;
+
+ elf.get(shdr_start..shdr_end)
+ .map(|slice| slice.chunks_exact(size_of::<Elf64SHdr>()))?
+ };
+
+ // Get the strings table.
+ let strhdr = shdr
+ .clone()
+ .nth(usize::from(hdr.e_shstrndx))
+ .and_then(Elf64SHdr::from_bytes)?;
+
+ // Find the section which name matches `name` and return it.
+ shdr.find(|&sh| {
+ let Some(hdr) = Elf64SHdr::from_bytes(sh) else {
+ return false;
+ };
+
+ let Some(name_idx) = strhdr
+ .0
+ .sh_offset
+ .checked_add(u64::from(hdr.0.sh_name))
+ .and_then(|idx| usize::try_from(idx).ok())
+ else {
+ return false;
+ };
+
+ // Get the start of the name.
+ elf.get(name_idx..)
+ // Stop at the first `0`.
+ .and_then(|nstr| nstr.get(0..=nstr.iter().position(|b| *b == 0)?))
+ // Convert into CStr. This should never fail because of the line above.
+ .and_then(|nstr| CStr::from_bytes_with_nul(nstr).ok())
+ // Convert into str.
+ .and_then(|c_str| c_str.to_str().ok())
+ // Check that the name matches.
+ .map(|str| str == name)
+ .unwrap_or(false)
+ })
+ // Return the slice containing the section.
+ .and_then(|sh| {
+ let hdr = Elf64SHdr::from_bytes(sh)?;
+ let start = usize::try_from(hdr.0.sh_offset).ok()?;
+ let end = usize::try_from(hdr.0.sh_size)
+ .ok()
+ .and_then(|sh_size| start.checked_add(sh_size))?;
+
+ elf.get(start..end)
+ })
+ }
+}
+
+/// GSP firmware with 3-level radix page tables for the GSP bootloader.
+///
+/// The bootloader expects firmware to be mapped starting at address 0 in GSP's virtual address
+/// space:
+///
+/// ```text
+/// Level 0: 1 page, 1 entry -> points to first level 1 page
+/// Level 1: Multiple pages/entries -> each entry points to a level 2 page
+/// Level 2: Multiple pages/entries -> each entry points to a firmware page
+/// ```
+///
+/// Each page is 4KB, each entry is 8 bytes (64-bit DMA address).
+/// Also known as "Radix3" firmware.
+#[pin_data]
+pub(crate) struct GspFirmware {
+ /// The GSP firmware inside a [`VVec`], device-mapped via a SG table.
+ #[pin]
+ fw: SGTable<Owned<VVec<u8>>>,
+ /// Level 2 page table whose entries contain DMA addresses of firmware pages.
+ #[pin]
+ level2: SGTable<Owned<VVec<u8>>>,
+ /// Level 1 page table whose entries contain DMA addresses of level 2 pages.
+ #[pin]
+ level1: SGTable<Owned<VVec<u8>>>,
+ /// Level 0 page table (single 4KB page) with one entry: DMA address of first level 1 page.
+ level0: DmaObject,
+ /// Size in bytes of the firmware contained in [`Self::fw`].
+ size: usize,
+ /// Device-mapped GSP signatures matching the GPU's [`Chipset`].
+ signatures: DmaObject,
+ /// GSP bootloader, verifies the GSP firmware before loading and running it.
+ bootloader: RiscvFirmware,
+}
+
+impl GspFirmware {
+ /// Loads the GSP firmware binaries, map them into `dev`'s address-space, and creates the page
+ /// tables expected by the GSP bootloader to load it.
+ pub(crate) fn new<'a, 'b>(
+ dev: &'a device::Device<device::Bound>,
+ chipset: Chipset,
+ ver: &'b str,
+ ) -> Result<impl PinInit<Self, Error> + 'a> {
+ let fw = super::request_firmware(dev, chipset, "gsp", ver)?;
+
+ let fw_section = elf::elf64_section(fw.data(), ".fwimage").ok_or(EINVAL)?;
+
+ let sigs_section = match chipset.arch() {
+ Architecture::Ampere => ".fwsignature_ga10x",
+ _ => return Err(ENOTSUPP),
+ };
+ let signatures = elf::elf64_section(fw.data(), sigs_section)
+ .ok_or(EINVAL)
+ .and_then(|data| DmaObject::from_data(dev, data))?;
+
+ let size = fw_section.len();
+
+ // Move the firmware into a vmalloc'd vector and map it into the device address
+ // space.
+ let fw_vvec = VVec::with_capacity(fw_section.len(), GFP_KERNEL)
+ .and_then(|mut v| {
+ v.extend_from_slice(fw_section, GFP_KERNEL)?;
+ Ok(v)
+ })
+ .map_err(|_| ENOMEM)?;
+
+ let bl = super::request_firmware(dev, chipset, "bootloader", ver)?;
+ let bootloader = RiscvFirmware::new(dev, &bl)?;
+
+ Ok(try_pin_init!(Self {
+ fw <- SGTable::new(dev, fw_vvec, DataDirection::ToDevice, GFP_KERNEL),
+ level2 <- {
+ // Allocate the level 2 page table, map the firmware onto it, and map it into the
+ // device address space.
+ VVec::<u8>::with_capacity(
+ fw.iter().count() * core::mem::size_of::<u64>(),
+ GFP_KERNEL,
+ )
+ .map_err(|_| ENOMEM)
+ .and_then(|level2| map_into_lvl(&fw, level2))
+ .map(|level2| SGTable::new(dev, level2, DataDirection::ToDevice, GFP_KERNEL))?
+ },
+ level1 <- {
+ // Allocate the level 1 page table, map the level 2 page table onto it, and map it
+ // into the device address space.
+ VVec::<u8>::with_capacity(
+ level2.iter().count() * core::mem::size_of::<u64>(),
+ GFP_KERNEL,
+ )
+ .map_err(|_| ENOMEM)
+ .and_then(|level1| map_into_lvl(&level2, level1))
+ .map(|level1| SGTable::new(dev, level1, DataDirection::ToDevice, GFP_KERNEL))?
+ },
+ level0: {
+ // Allocate the level 0 page table as a device-visible DMA object, and map the
+ // level 1 page table onto it.
+
+ // Level 0 page table data.
+ let mut level0_data = kvec![0u8; GSP_PAGE_SIZE]?;
+
+ // Fill level 1 page entry.
+ #[allow(clippy::useless_conversion)]
+ let level1_entry = u64::from(level1.iter().next().unwrap().dma_address());
+ let dst = &mut level0_data[..size_of_val(&level1_entry)];
+ dst.copy_from_slice(&level1_entry.to_le_bytes());
+
+ // Turn the level0 page table into a [`DmaObject`].
+ DmaObject::from_data(dev, &level0_data)?
+ },
+ size,
+ signatures,
+ bootloader,
+ }))
+ }
+
+ #[expect(unused)]
+ /// Returns the DMA handle of the radix3 level 0 page table.
+ pub(crate) fn radix3_dma_handle(&self) -> DmaAddress {
+ self.level0.dma_handle()
+ }
+}
+
+/// Build a page table from a scatter-gather list.
+///
+/// Takes each DMA-mapped region from `sg_table` and writes page table entries
+/// for all 4KB pages within that region. For example, a 16KB SG entry becomes
+/// 4 consecutive page table entries.
+fn map_into_lvl(sg_table: &SGTable<Owned<VVec<u8>>>, mut dst: VVec<u8>) -> Result<VVec<u8>> {
+ for sg_entry in sg_table.iter() {
+ // Number of pages we need to map.
+ let num_pages = (sg_entry.dma_len() as usize).div_ceil(GSP_PAGE_SIZE);
+
+ for i in 0..num_pages {
+ let entry = sg_entry.dma_address() + (i as u64 * GSP_PAGE_SIZE as u64);
+ dst.extend_from_slice(&entry.to_le_bytes(), GFP_KERNEL)?;
+ }
+ }
+
+ Ok(dst)
+}
diff --git a/drivers/gpu/nova-core/firmware/riscv.rs b/drivers/gpu/nova-core/firmware/riscv.rs
new file mode 100644
index 000000000000..afb08f5bc4ba
--- /dev/null
+++ b/drivers/gpu/nova-core/firmware/riscv.rs
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Support for firmware binaries designed to run on a RISC-V core. Such firmwares files have a
+//! dedicated header.
+
+use core::mem::size_of;
+
+use kernel::device;
+use kernel::firmware::Firmware;
+use kernel::prelude::*;
+use kernel::transmute::FromBytes;
+
+use crate::dma::DmaObject;
+use crate::firmware::BinFirmware;
+
+/// Descriptor for microcode running on a RISC-V core.
+#[repr(C)]
+#[derive(Debug)]
+struct RmRiscvUCodeDesc {
+ version: u32,
+ bootloader_offset: u32,
+ bootloader_size: u32,
+ bootloader_param_offset: u32,
+ bootloader_param_size: u32,
+ riscv_elf_offset: u32,
+ riscv_elf_size: u32,
+ app_version: u32,
+ manifest_offset: u32,
+ manifest_size: u32,
+ monitor_data_offset: u32,
+ monitor_data_size: u32,
+ monitor_code_offset: u32,
+ monitor_code_size: u32,
+}
+
+// SAFETY: all bit patterns are valid for this type, and it doesn't use interior mutability.
+unsafe impl FromBytes for RmRiscvUCodeDesc {}
+
+impl RmRiscvUCodeDesc {
+ /// Interprets the header of `bin_fw` as a [`RmRiscvUCodeDesc`] and returns it.
+ ///
+ /// Fails if the header pointed at by `bin_fw` is not within the bounds of the firmware image.
+ fn new(bin_fw: &BinFirmware<'_>) -> Result<Self> {
+ let offset = bin_fw.hdr.header_offset as usize;
+
+ bin_fw
+ .fw
+ .get(offset..offset + size_of::<Self>())
+ .and_then(Self::from_bytes_copy)
+ .ok_or(EINVAL)
+ }
+}
+
+/// A parsed firmware for a RISC-V core, ready to be loaded and run.
+#[expect(unused)]
+pub(crate) struct RiscvFirmware {
+ /// Offset at which the code starts in the firmware image.
+ code_offset: u32,
+ /// Offset at which the data starts in the firmware image.
+ data_offset: u32,
+ /// Offset at which the manifest starts in the firmware image.
+ manifest_offset: u32,
+ /// Application version.
+ app_version: u32,
+ /// Device-mapped firmware image.
+ ucode: DmaObject,
+}
+
+impl RiscvFirmware {
+ /// Parses the RISC-V firmware image contained in `fw`.
+ pub(crate) fn new(dev: &device::Device<device::Bound>, fw: &Firmware) -> Result<Self> {
+ let bin_fw = BinFirmware::new(fw)?;
+
+ let riscv_desc = RmRiscvUCodeDesc::new(&bin_fw)?;
+
+ let ucode = {
+ let start = bin_fw.hdr.data_offset as usize;
+ let len = bin_fw.hdr.data_size as usize;
+
+ DmaObject::from_data(dev, fw.data().get(start..start + len).ok_or(EINVAL)?)?
+ };
+
+ Ok(Self {
+ ucode,
+ code_offset: riscv_desc.monitor_code_offset,
+ data_offset: riscv_desc.monitor_data_offset,
+ manifest_offset: riscv_desc.manifest_offset,
+ app_version: riscv_desc.app_version,
+ })
+ }
+}
diff --git a/drivers/gpu/nova-core/gpu.rs b/drivers/gpu/nova-core/gpu.rs
index b5c9786619a9..af20e2daea24 100644
--- a/drivers/gpu/nova-core/gpu.rs
+++ b/drivers/gpu/nova-core/gpu.rs
@@ -1,18 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
-use kernel::{device, devres::Devres, error::code::*, pci, prelude::*, sync::Arc};
+use kernel::{device, devres::Devres, error::code::*, fmt, pci, prelude::*, sync::Arc};
use crate::driver::Bar0;
-use crate::falcon::{gsp::Gsp, sec2::Sec2, Falcon};
-use crate::fb::FbLayout;
+use crate::falcon::{gsp::Gsp as GspFalcon, sec2::Sec2 as Sec2Falcon, Falcon};
use crate::fb::SysmemFlush;
-use crate::firmware::fwsec::{FwsecCommand, FwsecFirmware};
-use crate::firmware::{Firmware, FIRMWARE_VERSION};
use crate::gfw;
+use crate::gsp::Gsp;
use crate::regs;
-use crate::util;
-use crate::vbios::Vbios;
-use core::fmt;
macro_rules! define_chipset {
({ $($variant:ident = $value:expr),* $(,)* }) =>
@@ -28,13 +23,23 @@ macro_rules! define_chipset {
$( Chipset::$variant, )*
];
- pub(crate) const NAMES: [&'static str; Self::ALL.len()] = [
- $( util::const_bytes_to_str(
- util::to_lowercase_bytes::<{ stringify!($variant).len() }>(
- stringify!($variant)
- ).as_slice()
- ), )*
- ];
+ ::kernel::macros::paste!(
+ /// Returns the name of this chipset, in lowercase.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let chipset = Chipset::GA102;
+ /// assert_eq!(chipset.name(), "ga102");
+ /// ```
+ pub(crate) const fn name(&self) -> &'static str {
+ match *self {
+ $(
+ Chipset::$variant => stringify!([<$variant:lower>]),
+ )*
+ }
+ }
+ );
}
// TODO[FPRI]: replace with something like derive(FromPrimitive)
@@ -163,150 +168,74 @@ impl Spec {
}
/// Structure holding the resources required to operate the GPU.
-#[pin_data(PinnedDrop)]
+#[pin_data]
pub(crate) struct Gpu {
spec: Spec,
/// MMIO mapping of PCI BAR 0
bar: Arc<Devres<Bar0>>,
- fw: Firmware,
/// System memory page required for flushing all pending GPU-side memory writes done through
/// PCIE into system memory, via sysmembar (A GPU-initiated HW memory-barrier operation).
sysmem_flush: SysmemFlush,
-}
-
-#[pinned_drop]
-impl PinnedDrop for Gpu {
- fn drop(self: Pin<&mut Self>) {
- // Unregister the sysmem flush page before we release it.
- self.bar
- .try_access_with(|b| self.sysmem_flush.unregister(b));
- }
+ /// GSP falcon instance, used for GSP boot up and cleanup.
+ gsp_falcon: Falcon<GspFalcon>,
+ /// SEC2 falcon instance, used for GSP boot up and cleanup.
+ sec2_falcon: Falcon<Sec2Falcon>,
+ /// GSP runtime data. Temporarily an empty placeholder.
+ #[pin]
+ gsp: Gsp,
}
impl Gpu {
- /// Helper function to load and run the FWSEC-FRTS firmware and confirm that it has properly
- /// created the WPR2 region.
- ///
- /// TODO: this needs to be moved into a larger type responsible for booting the whole GSP
- /// (`GspBooter`?).
- fn run_fwsec_frts(
- dev: &device::Device<device::Bound>,
- falcon: &Falcon<Gsp>,
- bar: &Bar0,
- bios: &Vbios,
- fb_layout: &FbLayout,
- ) -> Result<()> {
- // Check that the WPR2 region does not already exists - if it does, we cannot run
- // FWSEC-FRTS until the GPU is reset.
- if regs::NV_PFB_PRI_MMU_WPR2_ADDR_HI::read(bar).higher_bound() != 0 {
- dev_err!(
- dev,
- "WPR2 region already exists - GPU needs to be reset to proceed\n"
- );
- return Err(EBUSY);
- }
+ pub(crate) fn new<'a>(
+ pdev: &'a pci::Device<device::Bound>,
+ devres_bar: Arc<Devres<Bar0>>,
+ bar: &'a Bar0,
+ ) -> impl PinInit<Self, Error> + 'a {
+ try_pin_init!(Self {
+ spec: Spec::new(bar).inspect(|spec| {
+ dev_info!(
+ pdev.as_ref(),
+ "NVIDIA (Chipset: {}, Architecture: {:?}, Revision: {})\n",
+ spec.chipset,
+ spec.chipset.arch(),
+ spec.revision
+ );
+ })?,
- let fwsec_frts = FwsecFirmware::new(
- dev,
- falcon,
- bar,
- bios,
- FwsecCommand::Frts {
- frts_addr: fb_layout.frts.start,
- frts_size: fb_layout.frts.end - fb_layout.frts.start,
+ // We must wait for GFW_BOOT completion before doing any significant setup on the GPU.
+ _: {
+ gfw::wait_gfw_boot_completion(bar)
+ .inspect_err(|_| dev_err!(pdev.as_ref(), "GFW boot did not complete"))?;
},
- )?;
- // Run FWSEC-FRTS to create the WPR2 region.
- fwsec_frts.run(dev, falcon, bar)?;
+ sysmem_flush: SysmemFlush::register(pdev.as_ref(), bar, spec.chipset)?,
- // SCRATCH_E contains the error code for FWSEC-FRTS.
- let frts_status = regs::NV_PBUS_SW_SCRATCH_0E::read(bar).frts_err_code();
- if frts_status != 0 {
- dev_err!(
- dev,
- "FWSEC-FRTS returned with error code {:#x}",
- frts_status
- );
+ gsp_falcon: Falcon::new(
+ pdev.as_ref(),
+ spec.chipset,
+ bar,
+ spec.chipset > Chipset::GA100,
+ )
+ .inspect(|falcon| falcon.clear_swgen0_intr(bar))?,
- return Err(EIO);
- }
+ sec2_falcon: Falcon::new(pdev.as_ref(), spec.chipset, bar, true)?,
- // Check that the WPR2 region has been created as we requested.
- let (wpr2_lo, wpr2_hi) = (
- regs::NV_PFB_PRI_MMU_WPR2_ADDR_LO::read(bar).lower_bound(),
- regs::NV_PFB_PRI_MMU_WPR2_ADDR_HI::read(bar).higher_bound(),
- );
+ gsp <- Gsp::new(),
- match (wpr2_lo, wpr2_hi) {
- (_, 0) => {
- dev_err!(dev, "WPR2 region not created after running FWSEC-FRTS\n");
+ _: { gsp.boot(pdev, bar, spec.chipset, gsp_falcon, sec2_falcon)? },
- Err(EIO)
- }
- (wpr2_lo, _) if wpr2_lo != fb_layout.frts.start => {
- dev_err!(
- dev,
- "WPR2 region created at unexpected address {:#x}; expected {:#x}\n",
- wpr2_lo,
- fb_layout.frts.start,
- );
-
- Err(EIO)
- }
- (wpr2_lo, wpr2_hi) => {
- dev_dbg!(dev, "WPR2: {:#x}-{:#x}\n", wpr2_lo, wpr2_hi);
- dev_dbg!(dev, "GPU instance built\n");
-
- Ok(())
- }
- }
+ bar: devres_bar,
+ })
}
- pub(crate) fn new(
- pdev: &pci::Device<device::Bound>,
- devres_bar: Arc<Devres<Bar0>>,
- ) -> Result<impl PinInit<Self>> {
- let bar = devres_bar.access(pdev.as_ref())?;
- let spec = Spec::new(bar)?;
- let fw = Firmware::new(pdev.as_ref(), spec.chipset, FIRMWARE_VERSION)?;
-
- dev_info!(
- pdev.as_ref(),
- "NVIDIA (Chipset: {}, Architecture: {:?}, Revision: {})\n",
- spec.chipset,
- spec.chipset.arch(),
- spec.revision
- );
-
- // We must wait for GFW_BOOT completion before doing any significant setup on the GPU.
- gfw::wait_gfw_boot_completion(bar)
- .inspect_err(|_| dev_err!(pdev.as_ref(), "GFW boot did not complete"))?;
-
- let sysmem_flush = SysmemFlush::register(pdev.as_ref(), bar, spec.chipset)?;
-
- let gsp_falcon = Falcon::<Gsp>::new(
- pdev.as_ref(),
- spec.chipset,
- bar,
- spec.chipset > Chipset::GA100,
- )?;
- gsp_falcon.clear_swgen0_intr(bar);
-
- let _sec2_falcon = Falcon::<Sec2>::new(pdev.as_ref(), spec.chipset, bar, true)?;
-
- let fb_layout = FbLayout::new(spec.chipset, bar)?;
- dev_dbg!(pdev.as_ref(), "{:#x?}\n", fb_layout);
-
- let bios = Vbios::new(pdev, bar)?;
-
- Self::run_fwsec_frts(pdev.as_ref(), &gsp_falcon, bar, &bios, &fb_layout)?;
-
- Ok(pin_init!(Self {
- spec,
- bar: devres_bar,
- fw,
- sysmem_flush,
- }))
+ /// Called when the corresponding [`Device`](device::Device) is unbound.
+ ///
+ /// Note: This method must only be called from `Driver::unbind`.
+ pub(crate) fn unbind(&self, dev: &device::Device<device::Core>) {
+ kernel::warn_on!(self
+ .bar
+ .access(dev)
+ .inspect(|bar| self.sysmem_flush.unregister(bar))
+ .is_err());
}
}
diff --git a/drivers/gpu/nova-core/gsp.rs b/drivers/gpu/nova-core/gsp.rs
new file mode 100644
index 000000000000..64e472e7a9d3
--- /dev/null
+++ b/drivers/gpu/nova-core/gsp.rs
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0
+
+mod boot;
+
+use kernel::prelude::*;
+
+mod fw;
+
+pub(crate) const GSP_PAGE_SHIFT: usize = 12;
+pub(crate) const GSP_PAGE_SIZE: usize = 1 << GSP_PAGE_SHIFT;
+
+/// GSP runtime data.
+///
+/// This is an empty pinned placeholder for now.
+#[pin_data]
+pub(crate) struct Gsp {}
+
+impl Gsp {
+ pub(crate) fn new() -> impl PinInit<Self> {
+ pin_init!(Self {})
+ }
+}
diff --git a/drivers/gpu/nova-core/gsp/boot.rs b/drivers/gpu/nova-core/gsp/boot.rs
new file mode 100644
index 000000000000..2800f3aee37d
--- /dev/null
+++ b/drivers/gpu/nova-core/gsp/boot.rs
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use kernel::device;
+use kernel::pci;
+use kernel::prelude::*;
+
+use crate::driver::Bar0;
+use crate::falcon::{gsp::Gsp, sec2::Sec2, Falcon};
+use crate::fb::FbLayout;
+use crate::firmware::{
+ booter::{BooterFirmware, BooterKind},
+ fwsec::{FwsecCommand, FwsecFirmware},
+ gsp::GspFirmware,
+ FIRMWARE_VERSION,
+};
+use crate::gpu::Chipset;
+use crate::regs;
+use crate::vbios::Vbios;
+
+impl super::Gsp {
+ /// Helper function to load and run the FWSEC-FRTS firmware and confirm that it has properly
+ /// created the WPR2 region.
+ fn run_fwsec_frts(
+ dev: &device::Device<device::Bound>,
+ falcon: &Falcon<Gsp>,
+ bar: &Bar0,
+ bios: &Vbios,
+ fb_layout: &FbLayout,
+ ) -> Result<()> {
+ // Check that the WPR2 region does not already exists - if it does, we cannot run
+ // FWSEC-FRTS until the GPU is reset.
+ if regs::NV_PFB_PRI_MMU_WPR2_ADDR_HI::read(bar).higher_bound() != 0 {
+ dev_err!(
+ dev,
+ "WPR2 region already exists - GPU needs to be reset to proceed\n"
+ );
+ return Err(EBUSY);
+ }
+
+ let fwsec_frts = FwsecFirmware::new(
+ dev,
+ falcon,
+ bar,
+ bios,
+ FwsecCommand::Frts {
+ frts_addr: fb_layout.frts.start,
+ frts_size: fb_layout.frts.end - fb_layout.frts.start,
+ },
+ )?;
+
+ // Run FWSEC-FRTS to create the WPR2 region.
+ fwsec_frts.run(dev, falcon, bar)?;
+
+ // SCRATCH_E contains the error code for FWSEC-FRTS.
+ let frts_status = regs::NV_PBUS_SW_SCRATCH_0E_FRTS_ERR::read(bar).frts_err_code();
+ if frts_status != 0 {
+ dev_err!(
+ dev,
+ "FWSEC-FRTS returned with error code {:#x}",
+ frts_status
+ );
+
+ return Err(EIO);
+ }
+
+ // Check that the WPR2 region has been created as we requested.
+ let (wpr2_lo, wpr2_hi) = (
+ regs::NV_PFB_PRI_MMU_WPR2_ADDR_LO::read(bar).lower_bound(),
+ regs::NV_PFB_PRI_MMU_WPR2_ADDR_HI::read(bar).higher_bound(),
+ );
+
+ match (wpr2_lo, wpr2_hi) {
+ (_, 0) => {
+ dev_err!(dev, "WPR2 region not created after running FWSEC-FRTS\n");
+
+ Err(EIO)
+ }
+ (wpr2_lo, _) if wpr2_lo != fb_layout.frts.start => {
+ dev_err!(
+ dev,
+ "WPR2 region created at unexpected address {:#x}; expected {:#x}\n",
+ wpr2_lo,
+ fb_layout.frts.start,
+ );
+
+ Err(EIO)
+ }
+ (wpr2_lo, wpr2_hi) => {
+ dev_dbg!(dev, "WPR2: {:#x}-{:#x}\n", wpr2_lo, wpr2_hi);
+ dev_dbg!(dev, "GPU instance built\n");
+
+ Ok(())
+ }
+ }
+ }
+
+ /// Attempt to boot the GSP.
+ ///
+ /// This is a GPU-dependent and complex procedure that involves loading firmware files from
+ /// user-space, patching them with signatures, and building firmware-specific intricate data
+ /// structures that the GSP will use at runtime.
+ ///
+ /// Upon return, the GSP is up and running, and its runtime object given as return value.
+ pub(crate) fn boot(
+ self: Pin<&mut Self>,
+ pdev: &pci::Device<device::Bound>,
+ bar: &Bar0,
+ chipset: Chipset,
+ gsp_falcon: &Falcon<Gsp>,
+ sec2_falcon: &Falcon<Sec2>,
+ ) -> Result {
+ let dev = pdev.as_ref();
+
+ let bios = Vbios::new(dev, bar)?;
+
+ let _gsp_fw = KBox::pin_init(
+ GspFirmware::new(dev, chipset, FIRMWARE_VERSION)?,
+ GFP_KERNEL,
+ )?;
+
+ let fb_layout = FbLayout::new(chipset, bar)?;
+ dev_dbg!(dev, "{:#x?}\n", fb_layout);
+
+ Self::run_fwsec_frts(dev, gsp_falcon, bar, &bios, &fb_layout)?;
+
+ let _booter_loader = BooterFirmware::new(
+ dev,
+ BooterKind::Loader,
+ chipset,
+ FIRMWARE_VERSION,
+ sec2_falcon,
+ bar,
+ )?;
+
+ Ok(())
+ }
+}
diff --git a/drivers/gpu/nova-core/gsp/fw.rs b/drivers/gpu/nova-core/gsp/fw.rs
new file mode 100644
index 000000000000..34226dd00982
--- /dev/null
+++ b/drivers/gpu/nova-core/gsp/fw.rs
@@ -0,0 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
+
+mod r570_144;
+
+// Alias to avoid repeating the version number with every use.
+#[expect(unused)]
+use r570_144 as bindings;
diff --git a/drivers/gpu/nova-core/gsp/fw/r570_144.rs b/drivers/gpu/nova-core/gsp/fw/r570_144.rs
new file mode 100644
index 000000000000..35cb0370a7c9
--- /dev/null
+++ b/drivers/gpu/nova-core/gsp/fw/r570_144.rs
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Firmware bindings.
+//!
+//! Imports the generated bindings by `bindgen`.
+//!
+//! This module may not be directly used. Please abstract or re-export the needed symbols in the
+//! parent module instead.
+
+#![cfg_attr(test, allow(deref_nullptr))]
+#![cfg_attr(test, allow(unaligned_references))]
+#![cfg_attr(test, allow(unsafe_op_in_unsafe_fn))]
+#![allow(
+ dead_code,
+ unused_imports,
+ clippy::all,
+ clippy::undocumented_unsafe_blocks,
+ clippy::ptr_as_ptr,
+ clippy::ref_as_ptr,
+ missing_docs,
+ non_camel_case_types,
+ non_upper_case_globals,
+ non_snake_case,
+ improper_ctypes,
+ unreachable_pub,
+ unsafe_op_in_unsafe_fn
+)]
+use kernel::ffi;
+include!("r570_144/bindings.rs");
diff --git a/drivers/gpu/nova-core/gsp/fw/r570_144/bindings.rs b/drivers/gpu/nova-core/gsp/fw/r570_144/bindings.rs
new file mode 100644
index 000000000000..cec594032515
--- /dev/null
+++ b/drivers/gpu/nova-core/gsp/fw/r570_144/bindings.rs
@@ -0,0 +1 @@
+// SPDX-License-Identifier: GPL-2.0
diff --git a/drivers/gpu/nova-core/nova_core.rs b/drivers/gpu/nova-core/nova_core.rs
index cb2bbb30cba1..fffcaee2249f 100644
--- a/drivers/gpu/nova-core/nova_core.rs
+++ b/drivers/gpu/nova-core/nova_core.rs
@@ -9,6 +9,7 @@ mod fb;
mod firmware;
mod gfw;
mod gpu;
+mod gsp;
mod regs;
mod util;
mod vbios;
diff --git a/drivers/gpu/nova-core/regs.rs b/drivers/gpu/nova-core/regs.rs
index d49fddf6a3c6..206dab2e1335 100644
--- a/drivers/gpu/nova-core/regs.rs
+++ b/drivers/gpu/nova-core/regs.rs
@@ -5,11 +5,11 @@
#![allow(non_camel_case_types)]
#[macro_use]
-mod macros;
+pub(crate) mod macros;
use crate::falcon::{
DmaTrfCmdSize, FalconCoreRev, FalconCoreRevSubversion, FalconFbifMemType, FalconFbifTarget,
- FalconModSelAlgo, FalconSecurityModel, PeregrineCoreSelect,
+ FalconModSelAlgo, FalconSecurityModel, PFalcon2Base, PFalconBase, PeregrineCoreSelect,
};
use crate::gpu::{Architecture, Chipset};
use kernel::prelude::*;
@@ -28,7 +28,7 @@ impl NV_PMC_BOOT_0 {
/// Combines `architecture_0` and `architecture_1` to obtain the architecture of the chip.
pub(crate) fn architecture(self) -> Result<Architecture> {
Architecture::try_from(
- self.architecture_0() | (self.architecture_1() << Self::ARCHITECTURE_0.len()),
+ self.architecture_0() | (self.architecture_1() << Self::ARCHITECTURE_0_RANGE.len()),
)
}
@@ -36,7 +36,8 @@ impl NV_PMC_BOOT_0 {
pub(crate) fn chipset(self) -> Result<Chipset> {
self.architecture()
.map(|arch| {
- ((arch as u32) << Self::IMPLEMENTATION.len()) | u32::from(self.implementation())
+ ((arch as u32) << Self::IMPLEMENTATION_RANGE.len())
+ | u32::from(self.implementation())
})
.and_then(Chipset::try_from)
}
@@ -44,8 +45,10 @@ impl NV_PMC_BOOT_0 {
// PBUS
-// TODO[REGA]: this is an array of registers.
-register!(NV_PBUS_SW_SCRATCH_0E@0x00001438 {
+register!(NV_PBUS_SW_SCRATCH @ 0x00001400[64] {});
+
+register!(NV_PBUS_SW_SCRATCH_0E_FRTS_ERR => NV_PBUS_SW_SCRATCH[0xe],
+ "scratch register 0xe used as FRTS firmware error code" {
31:16 frts_err_code as u16;
});
@@ -123,13 +126,12 @@ register!(NV_PGC6_AON_SECURE_SCRATCH_GROUP_05_PRIV_LEVEL_MASK @ 0x00118128,
0:0 read_protection_level0 as bool, "Set after FWSEC lowers its protection level";
});
-// TODO[REGA]: This is an array of registers.
-register!(NV_PGC6_AON_SECURE_SCRATCH_GROUP_05 @ 0x00118234 {
- 31:0 value as u32;
-});
+// OpenRM defines this as a register array, but doesn't specify its size and only uses its first
+// element. Be conservative until we know the actual size or need to use more registers.
+register!(NV_PGC6_AON_SECURE_SCRATCH_GROUP_05 @ 0x00118234[1] {});
register!(
- NV_PGC6_AON_SECURE_SCRATCH_GROUP_05_0_GFW_BOOT => NV_PGC6_AON_SECURE_SCRATCH_GROUP_05,
+ NV_PGC6_AON_SECURE_SCRATCH_GROUP_05_0_GFW_BOOT => NV_PGC6_AON_SECURE_SCRATCH_GROUP_05[0],
"Scratch group 05 register 0 used as GFW boot progress indicator" {
7:0 progress as u8, "Progress of GFW boot (0xff means completed)";
}
@@ -180,38 +182,40 @@ impl NV_PDISP_VGA_WORKSPACE_BASE {
// FUSE
-register!(NV_FUSE_OPT_FPF_NVDEC_UCODE1_VERSION @ 0x00824100 {
+pub(crate) const NV_FUSE_OPT_FPF_SIZE: usize = 16;
+
+register!(NV_FUSE_OPT_FPF_NVDEC_UCODE1_VERSION @ 0x00824100[NV_FUSE_OPT_FPF_SIZE] {
15:0 data as u16;
});
-register!(NV_FUSE_OPT_FPF_SEC2_UCODE1_VERSION @ 0x00824140 {
+register!(NV_FUSE_OPT_FPF_SEC2_UCODE1_VERSION @ 0x00824140[NV_FUSE_OPT_FPF_SIZE] {
15:0 data as u16;
});
-register!(NV_FUSE_OPT_FPF_GSP_UCODE1_VERSION @ 0x008241c0 {
+register!(NV_FUSE_OPT_FPF_GSP_UCODE1_VERSION @ 0x008241c0[NV_FUSE_OPT_FPF_SIZE] {
15:0 data as u16;
});
// PFALCON
-register!(NV_PFALCON_FALCON_IRQSCLR @ +0x00000004 {
+register!(NV_PFALCON_FALCON_IRQSCLR @ PFalconBase[0x00000004] {
4:4 halt as bool;
6:6 swgen0 as bool;
});
-register!(NV_PFALCON_FALCON_MAILBOX0 @ +0x00000040 {
+register!(NV_PFALCON_FALCON_MAILBOX0 @ PFalconBase[0x00000040] {
31:0 value as u32;
});
-register!(NV_PFALCON_FALCON_MAILBOX1 @ +0x00000044 {
+register!(NV_PFALCON_FALCON_MAILBOX1 @ PFalconBase[0x00000044] {
31:0 value as u32;
});
-register!(NV_PFALCON_FALCON_RM @ +0x00000084 {
+register!(NV_PFALCON_FALCON_RM @ PFalconBase[0x00000084] {
31:0 value as u32;
});
-register!(NV_PFALCON_FALCON_HWCFG2 @ +0x000000f4 {
+register!(NV_PFALCON_FALCON_HWCFG2 @ PFalconBase[0x000000f4] {
10:10 riscv as bool;
12:12 mem_scrubbing as bool, "Set to 0 after memory scrubbing is completed";
31:31 reset_ready as bool, "Signal indicating that reset is completed (GA102+)";
@@ -224,17 +228,17 @@ impl NV_PFALCON_FALCON_HWCFG2 {
}
}
-register!(NV_PFALCON_FALCON_CPUCTL @ +0x00000100 {
+register!(NV_PFALCON_FALCON_CPUCTL @ PFalconBase[0x00000100] {
1:1 startcpu as bool;
4:4 halted as bool;
6:6 alias_en as bool;
});
-register!(NV_PFALCON_FALCON_BOOTVEC @ +0x00000104 {
+register!(NV_PFALCON_FALCON_BOOTVEC @ PFalconBase[0x00000104] {
31:0 value as u32;
});
-register!(NV_PFALCON_FALCON_DMACTL @ +0x0000010c {
+register!(NV_PFALCON_FALCON_DMACTL @ PFalconBase[0x0000010c] {
0:0 require_ctx as bool;
1:1 dmem_scrubbing as bool;
2:2 imem_scrubbing as bool;
@@ -242,15 +246,15 @@ register!(NV_PFALCON_FALCON_DMACTL @ +0x0000010c {
7:7 secure_stat as bool;
});
-register!(NV_PFALCON_FALCON_DMATRFBASE @ +0x00000110 {
+register!(NV_PFALCON_FALCON_DMATRFBASE @ PFalconBase[0x00000110] {
31:0 base as u32;
});
-register!(NV_PFALCON_FALCON_DMATRFMOFFS @ +0x00000114 {
+register!(NV_PFALCON_FALCON_DMATRFMOFFS @ PFalconBase[0x00000114] {
23:0 offs as u32;
});
-register!(NV_PFALCON_FALCON_DMATRFCMD @ +0x00000118 {
+register!(NV_PFALCON_FALCON_DMATRFCMD @ PFalconBase[0x00000118] {
0:0 full as bool;
1:1 idle as bool;
3:2 sec as u8;
@@ -261,60 +265,62 @@ register!(NV_PFALCON_FALCON_DMATRFCMD @ +0x00000118 {
16:16 set_dmtag as u8;
});
-register!(NV_PFALCON_FALCON_DMATRFFBOFFS @ +0x0000011c {
+register!(NV_PFALCON_FALCON_DMATRFFBOFFS @ PFalconBase[0x0000011c] {
31:0 offs as u32;
});
-register!(NV_PFALCON_FALCON_DMATRFBASE1 @ +0x00000128 {
+register!(NV_PFALCON_FALCON_DMATRFBASE1 @ PFalconBase[0x00000128] {
8:0 base as u16;
});
-register!(NV_PFALCON_FALCON_HWCFG1 @ +0x0000012c {
+register!(NV_PFALCON_FALCON_HWCFG1 @ PFalconBase[0x0000012c] {
3:0 core_rev as u8 ?=> FalconCoreRev, "Core revision";
5:4 security_model as u8 ?=> FalconSecurityModel, "Security model";
7:6 core_rev_subversion as u8 ?=> FalconCoreRevSubversion, "Core revision subversion";
});
-register!(NV_PFALCON_FALCON_CPUCTL_ALIAS @ +0x00000130 {
+register!(NV_PFALCON_FALCON_CPUCTL_ALIAS @ PFalconBase[0x00000130] {
1:1 startcpu as bool;
});
// Actually known as `NV_PSEC_FALCON_ENGINE` and `NV_PGSP_FALCON_ENGINE` depending on the falcon
// instance.
-register!(NV_PFALCON_FALCON_ENGINE @ +0x000003c0 {
+register!(NV_PFALCON_FALCON_ENGINE @ PFalconBase[0x000003c0] {
0:0 reset as bool;
});
-// TODO[REGA]: this is an array of registers.
-register!(NV_PFALCON_FBIF_TRANSCFG @ +0x00000600 {
+register!(NV_PFALCON_FBIF_TRANSCFG @ PFalconBase[0x00000600[8]] {
1:0 target as u8 ?=> FalconFbifTarget;
2:2 mem_type as bool => FalconFbifMemType;
});
-register!(NV_PFALCON_FBIF_CTL @ +0x00000624 {
+register!(NV_PFALCON_FBIF_CTL @ PFalconBase[0x00000624] {
7:7 allow_phys_no_ctx as bool;
});
-register!(NV_PFALCON2_FALCON_MOD_SEL @ +0x00001180 {
+/* PFALCON2 */
+
+register!(NV_PFALCON2_FALCON_MOD_SEL @ PFalcon2Base[0x00000180] {
7:0 algo as u8 ?=> FalconModSelAlgo;
});
-register!(NV_PFALCON2_FALCON_BROM_CURR_UCODE_ID @ +0x00001198 {
+register!(NV_PFALCON2_FALCON_BROM_CURR_UCODE_ID @ PFalcon2Base[0x00000198] {
7:0 ucode_id as u8;
});
-register!(NV_PFALCON2_FALCON_BROM_ENGIDMASK @ +0x0000119c {
+register!(NV_PFALCON2_FALCON_BROM_ENGIDMASK @ PFalcon2Base[0x0000019c] {
31:0 value as u32;
});
-// TODO[REGA]: this is an array of registers.
-register!(NV_PFALCON2_FALCON_BROM_PARAADDR @ +0x00001210 {
+// OpenRM defines this as a register array, but doesn't specify its size and only uses its first
+// element. Be conservative until we know the actual size or need to use more registers.
+register!(NV_PFALCON2_FALCON_BROM_PARAADDR @ PFalcon2Base[0x00000210[1]] {
31:0 value as u32;
});
// PRISCV
-register!(NV_PRISCV_RISCV_BCR_CTRL @ +0x00001668 {
+register!(NV_PRISCV_RISCV_BCR_CTRL @ PFalconBase[0x00001668] {
0:0 valid as bool;
4:4 core_select as bool => PeregrineCoreSelect;
8:8 br_fetch as bool;
diff --git a/drivers/gpu/nova-core/regs/macros.rs b/drivers/gpu/nova-core/regs/macros.rs
index a3e6de1779d4..8058e1696df9 100644
--- a/drivers/gpu/nova-core/regs/macros.rs
+++ b/drivers/gpu/nova-core/regs/macros.rs
@@ -1,17 +1,27 @@
// SPDX-License-Identifier: GPL-2.0
-//! Macro to define register layout and accessors.
+//! `register!` macro to define register layout and accessors.
//!
//! A single register typically includes several fields, which are accessed through a combination
//! of bit-shift and mask operations that introduce a class of potential mistakes, notably because
//! not all possible field values are necessarily valid.
//!
-//! The macro in this module allow to define, using an intruitive and readable syntax, a dedicated
-//! type for each register with its own field accessors that can return an error is a field's value
-//! is invalid.
+//! The `register!` macro in this module provides an intuitive and readable syntax for defining a
+//! dedicated type for each register. Each such type comes with its own field accessors that can
+//! return an error if a field's value is invalid.
-/// Defines a dedicated type for a register with an absolute offset, alongside with getter and
-/// setter methods for its fields and methods to read and write it from an `Io` region.
+/// Trait providing a base address to be added to the offset of a relative register to obtain
+/// its actual offset.
+///
+/// The `T` generic argument is used to distinguish which base to use, in case a type provides
+/// several bases. It is given to the `register!` macro to restrict the use of the register to
+/// implementors of this particular variant.
+pub(crate) trait RegisterBase<T> {
+ const BASE: usize;
+}
+
+/// Defines a dedicated type for a register with an absolute offset, including getter and setter
+/// methods for its fields and methods to read and write it from an `Io` region.
///
/// Example:
///
@@ -24,7 +34,7 @@
/// ```
///
/// This defines a `BOOT_0` type which can be read or written from offset `0x100` of an `Io`
-/// region. It is composed of 3 fields, for instance `minor_revision` is made of the 4 less
+/// region. It is composed of 3 fields, for instance `minor_revision` is made of the 4 least
/// significant bits of the register. Each field can be accessed and modified using accessor
/// methods:
///
@@ -33,130 +43,344 @@
/// let boot0 = BOOT_0::read(&bar);
/// pr_info!("chip revision: {}.{}", boot0.major_revision(), boot0.minor_revision());
///
-/// // `Chipset::try_from` will be called with the value of the field and returns an error if the
-/// // value is invalid.
+/// // `Chipset::try_from` is called with the value of the `chipset` field and returns an
+/// // error if it is invalid.
/// let chipset = boot0.chipset()?;
///
/// // Update some fields and write the value back.
/// boot0.set_major_revision(3).set_minor_revision(10).write(&bar);
///
-/// // Or just read and update the register in a single step:
+/// // Or, just read and update the register in a single step:
/// BOOT_0::alter(&bar, |r| r.set_major_revision(3).set_minor_revision(10));
/// ```
///
-/// Fields can be defined as follows:
+/// Fields are defined as follows:
///
-/// - `as <type>` simply returns the field value casted as the requested integer type, typically
-/// `u32`, `u16`, `u8` or `bool`. Note that `bool` fields must have a range of 1 bit.
+/// - `as <type>` simply returns the field value casted to <type>, typically `u32`, `u16`, `u8` or
+/// `bool`. Note that `bool` fields must have a range of 1 bit.
/// - `as <type> => <into_type>` calls `<into_type>`'s `From::<<type>>` implementation and returns
/// the result.
/// - `as <type> ?=> <try_into_type>` calls `<try_into_type>`'s `TryFrom::<<type>>` implementation
-/// and returns the result. This is useful on fields for which not all values are value.
+/// and returns the result. This is useful with fields for which not all values are valid.
///
/// The documentation strings are optional. If present, they will be added to the type's
/// definition, or the field getter and setter methods they are attached to.
///
-/// Putting a `+` before the address of the register makes it relative to a base: the `read` and
-/// `write` methods take a `base` argument that is added to the specified address before access,
-/// and `try_read` and `try_write` methods are also created, allowing access with offsets unknown
-/// at compile-time:
+/// It is also possible to create a alias register by using the `=> ALIAS` syntax. This is useful
+/// for cases where a register's interpretation depends on the context:
///
/// ```no_run
-/// register!(CPU_CTL @ +0x0000010, "CPU core control" {
-/// 0:0 start as bool, "Start the CPU core";
+/// register!(SCRATCH @ 0x00000200, "Scratch register" {
+/// 31:0 value as u32, "Raw value";
/// });
///
-/// // Flip the `start` switch for the CPU core which base address is at `CPU_BASE`.
-/// let cpuctl = CPU_CTL::read(&bar, CPU_BASE);
-/// pr_info!("CPU CTL: {:#x}", cpuctl);
-/// cpuctl.set_start(true).write(&bar, CPU_BASE);
+/// register!(SCRATCH_BOOT_STATUS => SCRATCH, "Boot status of the firmware" {
+/// 0:0 completed as bool, "Whether the firmware has completed booting";
+/// });
/// ```
///
-/// It is also possible to create a alias register by using the `=> ALIAS` syntax. This is useful
-/// for cases where a register's interpretation depends on the context:
+/// In this example, `SCRATCH_0_BOOT_STATUS` uses the same I/O address as `SCRATCH`, while also
+/// providing its own `completed` field.
+///
+/// ## Relative registers
+///
+/// A register can be defined as being accessible from a fixed offset of a provided base. For
+/// instance, imagine the following I/O space:
+///
+/// ```text
+/// +-----------------------------+
+/// | ... |
+/// | |
+/// 0x100--->+------------CPU0-------------+
+/// | |
+/// 0x110--->+-----------------------------+
+/// | CPU_CTL |
+/// +-----------------------------+
+/// | ... |
+/// | |
+/// | |
+/// 0x200--->+------------CPU1-------------+
+/// | |
+/// 0x210--->+-----------------------------+
+/// | CPU_CTL |
+/// +-----------------------------+
+/// | ... |
+/// +-----------------------------+
+/// ```
+///
+/// `CPU0` and `CPU1` both have a `CPU_CTL` register that starts at offset `0x10` of their I/O
+/// space segment. Since both instances of `CPU_CTL` share the same layout, we don't want to define
+/// them twice and would prefer a way to select which one to use from a single definition
+///
+/// This can be done using the `Base[Offset]` syntax when specifying the register's address.
+///
+/// `Base` is an arbitrary type (typically a ZST) to be used as a generic parameter of the
+/// [`RegisterBase`] trait to provide the base as a constant, i.e. each type providing a base for
+/// this register needs to implement `RegisterBase<Base>`. Here is the above example translated
+/// into code:
///
/// ```no_run
-/// register!(SCRATCH_0 @ 0x0000100, "Scratch register 0" {
-/// 31:0 value as u32, "Raw value";
+/// // Type used to identify the base.
+/// pub(crate) struct CpuCtlBase;
///
-/// register!(SCRATCH_0_BOOT_STATUS => SCRATCH_0, "Boot status of the firmware" {
-/// 0:0 completed as bool, "Whether the firmware has completed booting";
+/// // ZST describing `CPU0`.
+/// struct Cpu0;
+/// impl RegisterBase<CpuCtlBase> for Cpu0 {
+/// const BASE: usize = 0x100;
+/// }
+/// // Singleton of `CPU0` used to identify it.
+/// const CPU0: Cpu0 = Cpu0;
+///
+/// // ZST describing `CPU1`.
+/// struct Cpu1;
+/// impl RegisterBase<CpuCtlBase> for Cpu1 {
+/// const BASE: usize = 0x200;
+/// }
+/// // Singleton of `CPU1` used to identify it.
+/// const CPU1: Cpu1 = Cpu1;
+///
+/// // This makes `CPU_CTL` accessible from all implementors of `RegisterBase<CpuCtlBase>`.
+/// register!(CPU_CTL @ CpuCtlBase[0x10], "CPU core control" {
+/// 0:0 start as bool, "Start the CPU core";
+/// });
+///
+/// // The `read`, `write` and `alter` methods of relative registers take an extra `base` argument
+/// // that is used to resolve its final address by adding its `BASE` to the offset of the
+/// // register.
+///
+/// // Start `CPU0`.
+/// CPU_CTL::alter(bar, &CPU0, |r| r.set_start(true));
+///
+/// // Start `CPU1`.
+/// CPU_CTL::alter(bar, &CPU1, |r| r.set_start(true));
+///
+/// // Aliases can also be defined for relative register.
+/// register!(CPU_CTL_ALIAS => CpuCtlBase[CPU_CTL], "Alias to CPU core control" {
+/// 1:1 alias_start as bool, "Start the aliased CPU core";
+/// });
+///
+/// // Start the aliased `CPU0`.
+/// CPU_CTL_ALIAS::alter(bar, &CPU0, |r| r.set_alias_start(true));
/// ```
///
-/// In this example, `SCRATCH_0_BOOT_STATUS` uses the same I/O address as `SCRATCH_0`, while also
-/// providing its own `completed` method.
+/// ## Arrays of registers
+///
+/// Some I/O areas contain consecutive values that can be interpreted in the same way. These areas
+/// can be defined as an array of identical registers, allowing them to be accessed by index with
+/// compile-time or runtime bound checking. Simply define their address as `Address[Size]`, and add
+/// an `idx` parameter to their `read`, `write` and `alter` methods:
+///
+/// ```no_run
+/// # fn no_run() -> Result<(), Error> {
+/// # fn get_scratch_idx() -> usize {
+/// # 0x15
+/// # }
+/// // Array of 64 consecutive registers with the same layout starting at offset `0x80`.
+/// register!(SCRATCH @ 0x00000080[64], "Scratch registers" {
+/// 31:0 value as u32;
+/// });
+///
+/// // Read scratch register 0, i.e. I/O address `0x80`.
+/// let scratch_0 = SCRATCH::read(bar, 0).value();
+/// // Read scratch register 15, i.e. I/O address `0x80 + (15 * 4)`.
+/// let scratch_15 = SCRATCH::read(bar, 15).value();
+///
+/// // This is out of bounds and won't build.
+/// // let scratch_128 = SCRATCH::read(bar, 128).value();
+///
+/// // Runtime-obtained array index.
+/// let scratch_idx = get_scratch_idx();
+/// // Access on a runtime index returns an error if it is out-of-bounds.
+/// let some_scratch = SCRATCH::try_read(bar, scratch_idx)?.value();
+///
+/// // Alias to a particular register in an array.
+/// // Here `SCRATCH[8]` is used to convey the firmware exit code.
+/// register!(FIRMWARE_STATUS => SCRATCH[8], "Firmware exit status code" {
+/// 7:0 status as u8;
+/// });
+///
+/// let status = FIRMWARE_STATUS::read(bar).status();
+///
+/// // Non-contiguous register arrays can be defined by adding a stride parameter.
+/// // Here, each of the 16 registers of the array are separated by 8 bytes, meaning that the
+/// // registers of the two declarations below are interleaved.
+/// register!(SCRATCH_INTERLEAVED_0 @ 0x000000c0[16 ; 8], "Scratch registers bank 0" {
+/// 31:0 value as u32;
+/// });
+/// register!(SCRATCH_INTERLEAVED_1 @ 0x000000c4[16 ; 8], "Scratch registers bank 1" {
+/// 31:0 value as u32;
+/// });
+/// # Ok(())
+/// # }
+/// ```
+///
+/// ## Relative arrays of registers
+///
+/// Combining the two features described in the sections above, arrays of registers accessible from
+/// a base can also be defined:
+///
+/// ```no_run
+/// # fn no_run() -> Result<(), Error> {
+/// # fn get_scratch_idx() -> usize {
+/// # 0x15
+/// # }
+/// // Type used as parameter of `RegisterBase` to specify the base.
+/// pub(crate) struct CpuCtlBase;
+///
+/// // ZST describing `CPU0`.
+/// struct Cpu0;
+/// impl RegisterBase<CpuCtlBase> for Cpu0 {
+/// const BASE: usize = 0x100;
+/// }
+/// // Singleton of `CPU0` used to identify it.
+/// const CPU0: Cpu0 = Cpu0;
+///
+/// // ZST describing `CPU1`.
+/// struct Cpu1;
+/// impl RegisterBase<CpuCtlBase> for Cpu1 {
+/// const BASE: usize = 0x200;
+/// }
+/// // Singleton of `CPU1` used to identify it.
+/// const CPU1: Cpu1 = Cpu1;
+///
+/// // 64 per-cpu scratch registers, arranged as an contiguous array.
+/// register!(CPU_SCRATCH @ CpuCtlBase[0x00000080[64]], "Per-CPU scratch registers" {
+/// 31:0 value as u32;
+/// });
+///
+/// let cpu0_scratch_0 = CPU_SCRATCH::read(bar, &Cpu0, 0).value();
+/// let cpu1_scratch_15 = CPU_SCRATCH::read(bar, &Cpu1, 15).value();
+///
+/// // This won't build.
+/// // let cpu0_scratch_128 = CPU_SCRATCH::read(bar, &Cpu0, 128).value();
+///
+/// // Runtime-obtained array index.
+/// let scratch_idx = get_scratch_idx();
+/// // Access on a runtime value returns an error if it is out-of-bounds.
+/// let cpu0_some_scratch = CPU_SCRATCH::try_read(bar, &Cpu0, scratch_idx)?.value();
+///
+/// // `SCRATCH[8]` is used to convey the firmware exit code.
+/// register!(CPU_FIRMWARE_STATUS => CpuCtlBase[CPU_SCRATCH[8]],
+/// "Per-CPU firmware exit status code" {
+/// 7:0 status as u8;
+/// });
+///
+/// let cpu0_status = CPU_FIRMWARE_STATUS::read(bar, &Cpu0).status();
+///
+/// // Non-contiguous register arrays can be defined by adding a stride parameter.
+/// // Here, each of the 16 registers of the array are separated by 8 bytes, meaning that the
+/// // registers of the two declarations below are interleaved.
+/// register!(CPU_SCRATCH_INTERLEAVED_0 @ CpuCtlBase[0x00000d00[16 ; 8]],
+/// "Scratch registers bank 0" {
+/// 31:0 value as u32;
+/// });
+/// register!(CPU_SCRATCH_INTERLEAVED_1 @ CpuCtlBase[0x00000d04[16 ; 8]],
+/// "Scratch registers bank 1" {
+/// 31:0 value as u32;
+/// });
+/// # Ok(())
+/// # }
+/// ```
macro_rules! register {
// Creates a register at a fixed offset of the MMIO space.
+ ($name:ident @ $offset:literal $(, $comment:literal)? { $($fields:tt)* } ) => {
+ register!(@core $name $(, $comment)? { $($fields)* } );
+ register!(@io_fixed $name @ $offset);
+ };
+
+ // Creates an alias register of fixed offset register `alias` with its own fields.
+ ($name:ident => $alias:ident $(, $comment:literal)? { $($fields:tt)* } ) => {
+ register!(@core $name $(, $comment)? { $($fields)* } );
+ register!(@io_fixed $name @ $alias::OFFSET);
+ };
+
+ // Creates a register at a relative offset from a base address provider.
+ ($name:ident @ $base:ty [ $offset:literal ] $(, $comment:literal)? { $($fields:tt)* } ) => {
+ register!(@core $name $(, $comment)? { $($fields)* } );
+ register!(@io_relative $name @ $base [ $offset ]);
+ };
+
+ // Creates an alias register of relative offset register `alias` with its own fields.
+ ($name:ident => $base:ty [ $alias:ident ] $(, $comment:literal)? { $($fields:tt)* }) => {
+ register!(@core $name $(, $comment)? { $($fields)* } );
+ register!(@io_relative $name @ $base [ $alias::OFFSET ]);
+ };
+
+ // Creates an array of registers at a fixed offset of the MMIO space.
(
- $name:ident @ $offset:literal $(, $comment:literal)? {
+ $name:ident @ $offset:literal [ $size:expr ; $stride:expr ] $(, $comment:literal)? {
$($fields:tt)*
}
) => {
- register!(@common $name @ $offset $(, $comment)?);
- register!(@field_accessors $name { $($fields)* });
- register!(@io $name @ $offset);
+ static_assert!(::core::mem::size_of::<u32>() <= $stride);
+ register!(@core $name $(, $comment)? { $($fields)* } );
+ register!(@io_array $name @ $offset [ $size ; $stride ]);
};
- // Creates a alias register of fixed offset register `alias` with its own fields.
+ // Shortcut for contiguous array of registers (stride == size of element).
(
- $name:ident => $alias:ident $(, $comment:literal)? {
+ $name:ident @ $offset:literal [ $size:expr ] $(, $comment:literal)? {
$($fields:tt)*
}
) => {
- register!(@common $name @ $alias::OFFSET $(, $comment)?);
- register!(@field_accessors $name { $($fields)* });
- register!(@io $name @ $alias::OFFSET);
+ register!($name @ $offset [ $size ; ::core::mem::size_of::<u32>() ] $(, $comment)? {
+ $($fields)*
+ } );
+ };
+
+ // Creates an array of registers at a relative offset from a base address provider.
+ (
+ $name:ident @ $base:ty [ $offset:literal [ $size:expr ; $stride:expr ] ]
+ $(, $comment:literal)? { $($fields:tt)* }
+ ) => {
+ static_assert!(::core::mem::size_of::<u32>() <= $stride);
+ register!(@core $name $(, $comment)? { $($fields)* } );
+ register!(@io_relative_array $name @ $base [ $offset [ $size ; $stride ] ]);
};
- // Creates a register at a relative offset from a base address.
+ // Shortcut for contiguous array of relative registers (stride == size of element).
(
- $name:ident @ + $offset:literal $(, $comment:literal)? {
+ $name:ident @ $base:ty [ $offset:literal [ $size:expr ] ] $(, $comment:literal)? {
$($fields:tt)*
}
) => {
- register!(@common $name @ $offset $(, $comment)?);
- register!(@field_accessors $name { $($fields)* });
- register!(@io$name @ + $offset);
+ register!($name @ $base [ $offset [ $size ; ::core::mem::size_of::<u32>() ] ]
+ $(, $comment)? { $($fields)* } );
};
- // Creates a alias register of relative offset register `alias` with its own fields.
+ // Creates an alias of register `idx` of relative array of registers `alias` with its own
+ // fields.
(
- $name:ident => + $alias:ident $(, $comment:literal)? {
+ $name:ident => $base:ty [ $alias:ident [ $idx:expr ] ] $(, $comment:literal)? {
$($fields:tt)*
}
) => {
- register!(@common $name @ $alias::OFFSET $(, $comment)?);
- register!(@field_accessors $name { $($fields)* });
- register!(@io $name @ + $alias::OFFSET);
+ static_assert!($idx < $alias::SIZE);
+ register!(@core $name $(, $comment)? { $($fields)* } );
+ register!(@io_relative $name @ $base [ $alias::OFFSET + $idx * $alias::STRIDE ] );
+ };
+
+ // Creates an alias of register `idx` of array of registers `alias` with its own fields.
+ // This rule belongs to the (non-relative) register arrays set, but needs to be put last
+ // to avoid it being interpreted in place of the relative register array alias rule.
+ ($name:ident => $alias:ident [ $idx:expr ] $(, $comment:literal)? { $($fields:tt)* }) => {
+ static_assert!($idx < $alias::SIZE);
+ register!(@core $name $(, $comment)? { $($fields)* } );
+ register!(@io_fixed $name @ $alias::OFFSET + $idx * $alias::STRIDE );
};
// All rules below are helpers.
- // Defines the wrapper `$name` type, as well as its relevant implementations (`Debug`, `BitOr`,
- // and conversion to regular `u32`).
- (@common $name:ident @ $offset:expr $(, $comment:literal)?) => {
+ // Defines the wrapper `$name` type, as well as its relevant implementations (`Debug`,
+ // `Default`, `BitOr`, and conversion to the value type) and field accessor methods.
+ (@core $name:ident $(, $comment:literal)? { $($fields:tt)* }) => {
$(
#[doc=$comment]
)?
#[repr(transparent)]
- #[derive(Clone, Copy, Default)]
+ #[derive(Clone, Copy)]
pub(crate) struct $name(u32);
- #[allow(dead_code)]
- impl $name {
- pub(crate) const OFFSET: usize = $offset;
- }
-
- // TODO[REGA]: display the raw hex value, then the value of all the fields. This requires
- // matching the fields, which will complexify the syntax considerably...
- impl ::core::fmt::Debug for $name {
- fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result {
- f.debug_tuple(stringify!($name))
- .field(&format_args!("0x{0:x}", &self.0))
- .finish()
- }
- }
-
impl ::core::ops::BitOr for $name {
type Output = Self;
@@ -170,6 +394,34 @@ macro_rules! register {
reg.0
}
}
+
+ register!(@fields_dispatcher $name { $($fields)* });
+ };
+
+ // Captures the fields and passes them to all the implementers that require field information.
+ //
+ // Used to simplify the matching rules for implementers, so they don't need to match the entire
+ // complex fields rule even though they only make use of part of it.
+ (@fields_dispatcher $name:ident {
+ $($hi:tt:$lo:tt $field:ident as $type:tt
+ $(?=> $try_into_type:ty)?
+ $(=> $into_type:ty)?
+ $(, $comment:literal)?
+ ;
+ )*
+ }
+ ) => {
+ register!(@field_accessors $name {
+ $(
+ $hi:$lo $field as $type
+ $(?=> $try_into_type)?
+ $(=> $into_type)?
+ $(, $comment)?
+ ;
+ )*
+ });
+ register!(@debug $name { $($field;)* });
+ register!(@default $name { $($field;)* });
};
// Defines all the field getter/methods methods for `$name`.
@@ -228,7 +480,7 @@ macro_rules! register {
$(, $comment:literal)?;
) => {
register!(
- @leaf_accessor $name $hi:$lo $field as bool
+ @leaf_accessor $name $hi:$lo $field
{ |f| <$into_type>::from(if f != 0 { true } else { false }) }
$into_type => $into_type $(, $comment)?;
);
@@ -246,7 +498,7 @@ macro_rules! register {
@field_accessor $name:ident $hi:tt:$lo:tt $field:ident as $type:tt ?=> $try_into_type:ty
$(, $comment:literal)?;
) => {
- register!(@leaf_accessor $name $hi:$lo $field as $type
+ register!(@leaf_accessor $name $hi:$lo $field
{ |f| <$try_into_type>::try_from(f as $type) } $try_into_type =>
::core::result::Result<
$try_into_type,
@@ -260,11 +512,11 @@ macro_rules! register {
@field_accessor $name:ident $hi:tt:$lo:tt $field:ident as $type:tt => $into_type:ty
$(, $comment:literal)?;
) => {
- register!(@leaf_accessor $name $hi:$lo $field as $type
+ register!(@leaf_accessor $name $hi:$lo $field
{ |f| <$into_type>::from(f as $type) } $into_type => $into_type $(, $comment)?;);
};
- // Shortcut for fields defined as non-`bool` without the `=>` or `?=>` syntax.
+ // Shortcut for non-boolean fields defined without the `=>` or `?=>` syntax.
(
@field_accessor $name:ident $hi:tt:$lo:tt $field:ident as $type:tt
$(, $comment:literal)?;
@@ -274,11 +526,11 @@ macro_rules! register {
// Generates the accessor methods for a single field.
(
- @leaf_accessor $name:ident $hi:tt:$lo:tt $field:ident as $type:ty
+ @leaf_accessor $name:ident $hi:tt:$lo:tt $field:ident
{ $process:expr } $to_type:ty => $res_type:ty $(, $comment:literal)?;
) => {
::kernel::macros::paste!(
- const [<$field:upper>]: ::core::ops::RangeInclusive<u8> = $lo..=$hi;
+ const [<$field:upper _RANGE>]: ::core::ops::RangeInclusive<u8> = $lo..=$hi;
const [<$field:upper _MASK>]: u32 = ((((1 << $hi) - 1) << 1) + 1) - ((1 << $lo) - 1);
const [<$field:upper _SHIFT>]: u32 = Self::[<$field:upper _MASK>].trailing_zeros();
);
@@ -287,7 +539,7 @@ macro_rules! register {
#[doc="Returns the value of this field:"]
#[doc=$comment]
)?
- #[inline]
+ #[inline(always)]
pub(crate) fn $field(self) -> $res_type {
::kernel::macros::paste!(
const MASK: u32 = $name::[<$field:upper _MASK>];
@@ -303,7 +555,7 @@ macro_rules! register {
#[doc="Sets the value of this field:"]
#[doc=$comment]
)?
- #[inline]
+ #[inline(always)]
pub(crate) fn [<set_ $field>](mut self, value: $to_type) -> Self {
const MASK: u32 = $name::[<$field:upper _MASK>];
const SHIFT: u32 = $name::[<$field:upper _SHIFT>];
@@ -315,25 +567,64 @@ macro_rules! register {
);
};
- // Creates the IO accessors for a fixed offset register.
- (@io $name:ident @ $offset:expr) => {
+ // Generates the `Debug` implementation for `$name`.
+ (@debug $name:ident { $($field:ident;)* }) => {
+ impl ::kernel::fmt::Debug for $name {
+ fn fmt(&self, f: &mut ::kernel::fmt::Formatter<'_>) -> ::kernel::fmt::Result {
+ f.debug_struct(stringify!($name))
+ .field("<raw>", &::kernel::prelude::fmt!("{:#x}", &self.0))
+ $(
+ .field(stringify!($field), &self.$field())
+ )*
+ .finish()
+ }
+ }
+ };
+
+ // Generates the `Default` implementation for `$name`.
+ (@default $name:ident { $($field:ident;)* }) => {
+ /// Returns a value for the register where all fields are set to their default value.
+ impl ::core::default::Default for $name {
+ fn default() -> Self {
+ #[allow(unused_mut)]
+ let mut value = Self(Default::default());
+
+ ::kernel::macros::paste!(
+ $(
+ value.[<set_ $field>](Default::default());
+ )*
+ );
+
+ value
+ }
+ }
+ };
+
+ // Generates the IO accessors for a fixed offset register.
+ (@io_fixed $name:ident @ $offset:expr) => {
#[allow(dead_code)]
impl $name {
- #[inline]
+ pub(crate) const OFFSET: usize = $offset;
+
+ /// Read the register from its address in `io`.
+ #[inline(always)]
pub(crate) fn read<const SIZE: usize, T>(io: &T) -> Self where
T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
{
Self(io.read32($offset))
}
- #[inline]
+ /// Write the value contained in `self` to the register address in `io`.
+ #[inline(always)]
pub(crate) fn write<const SIZE: usize, T>(self, io: &T) where
T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
{
io.write32(self.0, $offset)
}
- #[inline]
+ /// Read the register from its address in `io` and run `f` on its value to obtain a new
+ /// value to write back.
+ #[inline(always)]
pub(crate) fn alter<const SIZE: usize, T, F>(
io: &T,
f: F,
@@ -347,76 +638,322 @@ macro_rules! register {
}
};
- // Create the IO accessors for a relative offset register.
- (@io $name:ident @ + $offset:literal) => {
+ // Generates the IO accessors for a relative offset register.
+ (@io_relative $name:ident @ $base:ty [ $offset:expr ]) => {
+ #[allow(dead_code)]
+ impl $name {
+ pub(crate) const OFFSET: usize = $offset;
+
+ /// Read the register from `io`, using the base address provided by `base` and adding
+ /// the register's offset to it.
+ #[inline(always)]
+ pub(crate) fn read<const SIZE: usize, T, B>(
+ io: &T,
+ #[allow(unused_variables)]
+ base: &B,
+ ) -> Self where
+ T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
+ B: crate::regs::macros::RegisterBase<$base>,
+ {
+ const OFFSET: usize = $name::OFFSET;
+
+ let value = io.read32(
+ <B as crate::regs::macros::RegisterBase<$base>>::BASE + OFFSET
+ );
+
+ Self(value)
+ }
+
+ /// Write the value contained in `self` to `io`, using the base address provided by
+ /// `base` and adding the register's offset to it.
+ #[inline(always)]
+ pub(crate) fn write<const SIZE: usize, T, B>(
+ self,
+ io: &T,
+ #[allow(unused_variables)]
+ base: &B,
+ ) where
+ T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
+ B: crate::regs::macros::RegisterBase<$base>,
+ {
+ const OFFSET: usize = $name::OFFSET;
+
+ io.write32(
+ self.0,
+ <B as crate::regs::macros::RegisterBase<$base>>::BASE + OFFSET
+ );
+ }
+
+ /// Read the register from `io`, using the base address provided by `base` and adding
+ /// the register's offset to it, then run `f` on its value to obtain a new value to
+ /// write back.
+ #[inline(always)]
+ pub(crate) fn alter<const SIZE: usize, T, B, F>(
+ io: &T,
+ base: &B,
+ f: F,
+ ) where
+ T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
+ B: crate::regs::macros::RegisterBase<$base>,
+ F: ::core::ops::FnOnce(Self) -> Self,
+ {
+ let reg = f(Self::read(io, base));
+ reg.write(io, base);
+ }
+ }
+ };
+
+ // Generates the IO accessors for an array of registers.
+ (@io_array $name:ident @ $offset:literal [ $size:expr ; $stride:expr ]) => {
#[allow(dead_code)]
impl $name {
- #[inline]
+ pub(crate) const OFFSET: usize = $offset;
+ pub(crate) const SIZE: usize = $size;
+ pub(crate) const STRIDE: usize = $stride;
+
+ /// Read the array register at index `idx` from its address in `io`.
+ #[inline(always)]
pub(crate) fn read<const SIZE: usize, T>(
io: &T,
- base: usize,
+ idx: usize,
) -> Self where
T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
{
- Self(io.read32(base + $offset))
+ build_assert!(idx < Self::SIZE);
+
+ let offset = Self::OFFSET + (idx * Self::STRIDE);
+ let value = io.read32(offset);
+
+ Self(value)
}
- #[inline]
+ /// Write the value contained in `self` to the array register with index `idx` in `io`.
+ #[inline(always)]
pub(crate) fn write<const SIZE: usize, T>(
self,
io: &T,
- base: usize,
+ idx: usize
) where
T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
{
- io.write32(self.0, base + $offset)
+ build_assert!(idx < Self::SIZE);
+
+ let offset = Self::OFFSET + (idx * Self::STRIDE);
+
+ io.write32(self.0, offset);
}
- #[inline]
+ /// Read the array register at index `idx` in `io` and run `f` on its value to obtain a
+ /// new value to write back.
+ #[inline(always)]
pub(crate) fn alter<const SIZE: usize, T, F>(
io: &T,
- base: usize,
+ idx: usize,
f: F,
) where
T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
F: ::core::ops::FnOnce(Self) -> Self,
{
- let reg = f(Self::read(io, base));
- reg.write(io, base);
+ let reg = f(Self::read(io, idx));
+ reg.write(io, idx);
}
- #[inline]
+ /// Read the array register at index `idx` from its address in `io`.
+ ///
+ /// The validity of `idx` is checked at run-time, and `EINVAL` is returned is the
+ /// access was out-of-bounds.
+ #[inline(always)]
pub(crate) fn try_read<const SIZE: usize, T>(
io: &T,
- base: usize,
+ idx: usize,
) -> ::kernel::error::Result<Self> where
T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
{
- io.try_read32(base + $offset).map(Self)
+ if idx < Self::SIZE {
+ Ok(Self::read(io, idx))
+ } else {
+ Err(EINVAL)
+ }
}
- #[inline]
+ /// Write the value contained in `self` to the array register with index `idx` in `io`.
+ ///
+ /// The validity of `idx` is checked at run-time, and `EINVAL` is returned is the
+ /// access was out-of-bounds.
+ #[inline(always)]
pub(crate) fn try_write<const SIZE: usize, T>(
self,
io: &T,
- base: usize,
- ) -> ::kernel::error::Result<()> where
+ idx: usize,
+ ) -> ::kernel::error::Result where
T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
{
- io.try_write32(self.0, base + $offset)
+ if idx < Self::SIZE {
+ Ok(self.write(io, idx))
+ } else {
+ Err(EINVAL)
+ }
}
- #[inline]
+ /// Read the array register at index `idx` in `io` and run `f` on its value to obtain a
+ /// new value to write back.
+ ///
+ /// The validity of `idx` is checked at run-time, and `EINVAL` is returned is the
+ /// access was out-of-bounds.
+ #[inline(always)]
pub(crate) fn try_alter<const SIZE: usize, T, F>(
io: &T,
- base: usize,
+ idx: usize,
+ f: F,
+ ) -> ::kernel::error::Result where
+ T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
+ F: ::core::ops::FnOnce(Self) -> Self,
+ {
+ if idx < Self::SIZE {
+ Ok(Self::alter(io, idx, f))
+ } else {
+ Err(EINVAL)
+ }
+ }
+ }
+ };
+
+ // Generates the IO accessors for an array of relative registers.
+ (
+ @io_relative_array $name:ident @ $base:ty
+ [ $offset:literal [ $size:expr ; $stride:expr ] ]
+ ) => {
+ #[allow(dead_code)]
+ impl $name {
+ pub(crate) const OFFSET: usize = $offset;
+ pub(crate) const SIZE: usize = $size;
+ pub(crate) const STRIDE: usize = $stride;
+
+ /// Read the array register at index `idx` from `io`, using the base address provided
+ /// by `base` and adding the register's offset to it.
+ #[inline(always)]
+ pub(crate) fn read<const SIZE: usize, T, B>(
+ io: &T,
+ #[allow(unused_variables)]
+ base: &B,
+ idx: usize,
+ ) -> Self where
+ T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
+ B: crate::regs::macros::RegisterBase<$base>,
+ {
+ build_assert!(idx < Self::SIZE);
+
+ let offset = <B as crate::regs::macros::RegisterBase<$base>>::BASE +
+ Self::OFFSET + (idx * Self::STRIDE);
+ let value = io.read32(offset);
+
+ Self(value)
+ }
+
+ /// Write the value contained in `self` to `io`, using the base address provided by
+ /// `base` and adding the offset of array register `idx` to it.
+ #[inline(always)]
+ pub(crate) fn write<const SIZE: usize, T, B>(
+ self,
+ io: &T,
+ #[allow(unused_variables)]
+ base: &B,
+ idx: usize
+ ) where
+ T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
+ B: crate::regs::macros::RegisterBase<$base>,
+ {
+ build_assert!(idx < Self::SIZE);
+
+ let offset = <B as crate::regs::macros::RegisterBase<$base>>::BASE +
+ Self::OFFSET + (idx * Self::STRIDE);
+
+ io.write32(self.0, offset);
+ }
+
+ /// Read the array register at index `idx` from `io`, using the base address provided
+ /// by `base` and adding the register's offset to it, then run `f` on its value to
+ /// obtain a new value to write back.
+ #[inline(always)]
+ pub(crate) fn alter<const SIZE: usize, T, B, F>(
+ io: &T,
+ base: &B,
+ idx: usize,
+ f: F,
+ ) where
+ T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
+ B: crate::regs::macros::RegisterBase<$base>,
+ F: ::core::ops::FnOnce(Self) -> Self,
+ {
+ let reg = f(Self::read(io, base, idx));
+ reg.write(io, base, idx);
+ }
+
+ /// Read the array register at index `idx` from `io`, using the base address provided
+ /// by `base` and adding the register's offset to it.
+ ///
+ /// The validity of `idx` is checked at run-time, and `EINVAL` is returned is the
+ /// access was out-of-bounds.
+ #[inline(always)]
+ pub(crate) fn try_read<const SIZE: usize, T, B>(
+ io: &T,
+ base: &B,
+ idx: usize,
+ ) -> ::kernel::error::Result<Self> where
+ T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
+ B: crate::regs::macros::RegisterBase<$base>,
+ {
+ if idx < Self::SIZE {
+ Ok(Self::read(io, base, idx))
+ } else {
+ Err(EINVAL)
+ }
+ }
+
+ /// Write the value contained in `self` to `io`, using the base address provided by
+ /// `base` and adding the offset of array register `idx` to it.
+ ///
+ /// The validity of `idx` is checked at run-time, and `EINVAL` is returned is the
+ /// access was out-of-bounds.
+ #[inline(always)]
+ pub(crate) fn try_write<const SIZE: usize, T, B>(
+ self,
+ io: &T,
+ base: &B,
+ idx: usize,
+ ) -> ::kernel::error::Result where
+ T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
+ B: crate::regs::macros::RegisterBase<$base>,
+ {
+ if idx < Self::SIZE {
+ Ok(self.write(io, base, idx))
+ } else {
+ Err(EINVAL)
+ }
+ }
+
+ /// Read the array register at index `idx` from `io`, using the base address provided
+ /// by `base` and adding the register's offset to it, then run `f` on its value to
+ /// obtain a new value to write back.
+ ///
+ /// The validity of `idx` is checked at run-time, and `EINVAL` is returned is the
+ /// access was out-of-bounds.
+ #[inline(always)]
+ pub(crate) fn try_alter<const SIZE: usize, T, B, F>(
+ io: &T,
+ base: &B,
+ idx: usize,
f: F,
- ) -> ::kernel::error::Result<()> where
+ ) -> ::kernel::error::Result where
T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
+ B: crate::regs::macros::RegisterBase<$base>,
F: ::core::ops::FnOnce(Self) -> Self,
{
- let reg = f(Self::try_read(io, base)?);
- reg.try_write(io, base)
+ if idx < Self::SIZE {
+ Ok(Self::alter(io, base, idx, f))
+ } else {
+ Err(EINVAL)
+ }
}
}
};
diff --git a/drivers/gpu/nova-core/util.rs b/drivers/gpu/nova-core/util.rs
index 76cedf3710d7..bf35f00cb732 100644
--- a/drivers/gpu/nova-core/util.rs
+++ b/drivers/gpu/nova-core/util.rs
@@ -3,26 +3,6 @@
use kernel::prelude::*;
use kernel::time::{Delta, Instant, Monotonic};
-pub(crate) const fn to_lowercase_bytes<const N: usize>(s: &str) -> [u8; N] {
- let src = s.as_bytes();
- let mut dst = [0; N];
- let mut i = 0;
-
- while i < src.len() && i < N {
- dst[i] = (src[i] as char).to_ascii_lowercase() as u8;
- i += 1;
- }
-
- dst
-}
-
-pub(crate) const fn const_bytes_to_str(bytes: &[u8]) -> &str {
- match core::str::from_utf8(bytes) {
- Ok(string) => string,
- Err(_) => kernel::build_error!("Bytes are not valid UTF-8."),
- }
-}
-
/// Wait until `cond` is true or `timeout` elapsed.
///
/// When `cond` evaluates to `Some`, its return value is returned.
diff --git a/drivers/gpu/nova-core/vbios.rs b/drivers/gpu/nova-core/vbios.rs
index 5b5d9f38cbb3..71fbe71b84db 100644
--- a/drivers/gpu/nova-core/vbios.rs
+++ b/drivers/gpu/nova-core/vbios.rs
@@ -8,8 +8,9 @@ use crate::firmware::FalconUCodeDescV3;
use core::convert::TryFrom;
use kernel::device;
use kernel::error::Result;
-use kernel::pci;
use kernel::prelude::*;
+use kernel::ptr::{Alignable, Alignment};
+use kernel::types::ARef;
/// The offset of the VBIOS ROM in the BAR0 space.
const ROM_OFFSET: usize = 0x300000;
@@ -31,7 +32,7 @@ const FALCON_UCODE_ENTRY_APPID_FWSEC_PROD: u8 = 0x85;
/// Vbios Reader for constructing the VBIOS data.
struct VbiosIterator<'a> {
- pdev: &'a pci::Device,
+ dev: &'a device::Device,
bar0: &'a Bar0,
/// VBIOS data vector: As BIOS images are scanned, they are added to this vector for reference
/// or copying into other data structures. It is the entire scanned contents of the VBIOS which
@@ -46,9 +47,9 @@ struct VbiosIterator<'a> {
}
impl<'a> VbiosIterator<'a> {
- fn new(pdev: &'a pci::Device, bar0: &'a Bar0) -> Result<Self> {
+ fn new(dev: &'a device::Device, bar0: &'a Bar0) -> Result<Self> {
Ok(Self {
- pdev,
+ dev,
bar0,
data: KVec::new(),
current_offset: 0,
@@ -64,7 +65,7 @@ impl<'a> VbiosIterator<'a> {
// Ensure length is a multiple of 4 for 32-bit reads
if len % core::mem::size_of::<u32>() != 0 {
dev_err!(
- self.pdev.as_ref(),
+ self.dev,
"VBIOS read length {} is not a multiple of 4\n",
len
);
@@ -89,7 +90,7 @@ impl<'a> VbiosIterator<'a> {
/// Read bytes at a specific offset, filling any gap.
fn read_more_at_offset(&mut self, offset: usize, len: usize) -> Result {
if offset > BIOS_MAX_SCAN_LEN {
- dev_err!(self.pdev.as_ref(), "Error: exceeded BIOS scan limit.\n");
+ dev_err!(self.dev, "Error: exceeded BIOS scan limit.\n");
return Err(EINVAL);
}
@@ -115,7 +116,7 @@ impl<'a> VbiosIterator<'a> {
if offset + len > data_len {
self.read_more_at_offset(offset, len).inspect_err(|e| {
dev_err!(
- self.pdev.as_ref(),
+ self.dev,
"Failed to read more at offset {:#x}: {:?}\n",
offset,
e
@@ -123,9 +124,9 @@ impl<'a> VbiosIterator<'a> {
})?;
}
- BiosImage::new(self.pdev, &self.data[offset..offset + len]).inspect_err(|err| {
+ BiosImage::new(self.dev, &self.data[offset..offset + len]).inspect_err(|err| {
dev_err!(
- self.pdev.as_ref(),
+ self.dev,
"Failed to {} at offset {:#x}: {:?}\n",
context,
offset,
@@ -146,10 +147,7 @@ impl<'a> Iterator for VbiosIterator<'a> {
}
if self.current_offset > BIOS_MAX_SCAN_LEN {
- dev_err!(
- self.pdev.as_ref(),
- "Error: exceeded BIOS scan limit, stopping scan\n"
- );
+ dev_err!(self.dev, "Error: exceeded BIOS scan limit, stopping scan\n");
return None;
}
@@ -177,8 +175,7 @@ impl<'a> Iterator for VbiosIterator<'a> {
// Advance to next image (aligned to 512 bytes).
self.current_offset += image_size;
- // TODO[NUMM]: replace with `align_up` once it lands.
- self.current_offset = self.current_offset.next_multiple_of(512);
+ self.current_offset = self.current_offset.align_up(Alignment::new::<512>())?;
Some(Ok(full_image))
}
@@ -192,18 +189,18 @@ impl Vbios {
/// Probe for VBIOS extraction.
///
/// Once the VBIOS object is built, `bar0` is not read for [`Vbios`] purposes anymore.
- pub(crate) fn new(pdev: &pci::Device, bar0: &Bar0) -> Result<Vbios> {
+ pub(crate) fn new(dev: &device::Device, bar0: &Bar0) -> Result<Vbios> {
// Images to extract from iteration
let mut pci_at_image: Option<PciAtBiosImage> = None;
let mut first_fwsec_image: Option<FwSecBiosBuilder> = None;
let mut second_fwsec_image: Option<FwSecBiosBuilder> = None;
// Parse all VBIOS images in the ROM
- for image_result in VbiosIterator::new(pdev, bar0)? {
+ for image_result in VbiosIterator::new(dev, bar0)? {
let full_image = image_result?;
dev_dbg!(
- pdev.as_ref(),
+ dev,
"Found BIOS image: size: {:#x}, type: {}, last: {}\n",
full_image.image_size_bytes(),
full_image.image_type_str(),
@@ -234,14 +231,14 @@ impl Vbios {
(second_fwsec_image, first_fwsec_image, pci_at_image)
{
second
- .setup_falcon_data(pdev, &pci_at, &first)
- .inspect_err(|e| dev_err!(pdev.as_ref(), "Falcon data setup failed: {:?}\n", e))?;
+ .setup_falcon_data(&pci_at, &first)
+ .inspect_err(|e| dev_err!(dev, "Falcon data setup failed: {:?}\n", e))?;
Ok(Vbios {
- fwsec_image: second.build(pdev)?,
+ fwsec_image: second.build()?,
})
} else {
dev_err!(
- pdev.as_ref(),
+ dev,
"Missing required images for falcon data setup, skipping\n"
);
Err(EINVAL)
@@ -284,9 +281,9 @@ struct PcirStruct {
}
impl PcirStruct {
- fn new(pdev: &pci::Device, data: &[u8]) -> Result<Self> {
+ fn new(dev: &device::Device, data: &[u8]) -> Result<Self> {
if data.len() < core::mem::size_of::<PcirStruct>() {
- dev_err!(pdev.as_ref(), "Not enough data for PcirStruct\n");
+ dev_err!(dev, "Not enough data for PcirStruct\n");
return Err(EINVAL);
}
@@ -295,11 +292,7 @@ impl PcirStruct {
// Signature should be "PCIR" (0x52494350) or "NPDS" (0x5344504e).
if &signature != b"PCIR" && &signature != b"NPDS" {
- dev_err!(
- pdev.as_ref(),
- "Invalid signature for PcirStruct: {:?}\n",
- signature
- );
+ dev_err!(dev, "Invalid signature for PcirStruct: {:?}\n", signature);
return Err(EINVAL);
}
@@ -308,7 +301,7 @@ impl PcirStruct {
let image_len = u16::from_le_bytes([data[16], data[17]]);
if image_len == 0 {
- dev_err!(pdev.as_ref(), "Invalid image length: 0\n");
+ dev_err!(dev, "Invalid image length: 0\n");
return Err(EINVAL);
}
@@ -345,7 +338,7 @@ impl PcirStruct {
/// its header) is in the [`PciAtBiosImage`] and the falcon data it is pointing to is in the
/// [`FwSecBiosImage`].
#[derive(Debug, Clone, Copy)]
-#[expect(dead_code)]
+#[repr(C)]
struct BitHeader {
/// 0h: BIT Header Identifier (BMP=0x7FFF/BIT=0xB8FF)
id: u16,
@@ -365,7 +358,7 @@ struct BitHeader {
impl BitHeader {
fn new(data: &[u8]) -> Result<Self> {
- if data.len() < 12 {
+ if data.len() < core::mem::size_of::<Self>() {
return Err(EINVAL);
}
@@ -467,7 +460,7 @@ struct PciRomHeader {
}
impl PciRomHeader {
- fn new(pdev: &pci::Device, data: &[u8]) -> Result<Self> {
+ fn new(dev: &device::Device, data: &[u8]) -> Result<Self> {
if data.len() < 26 {
// Need at least 26 bytes to read pciDataStrucPtr and sizeOfBlock.
return Err(EINVAL);
@@ -479,7 +472,7 @@ impl PciRomHeader {
match signature {
0xAA55 | 0xBB77 | 0x4E56 => {}
_ => {
- dev_err!(pdev.as_ref(), "ROM signature unknown {:#x}\n", signature);
+ dev_err!(dev, "ROM signature unknown {:#x}\n", signature);
return Err(EINVAL);
}
}
@@ -538,9 +531,9 @@ struct NpdeStruct {
}
impl NpdeStruct {
- fn new(pdev: &pci::Device, data: &[u8]) -> Option<Self> {
+ fn new(dev: &device::Device, data: &[u8]) -> Option<Self> {
if data.len() < core::mem::size_of::<Self>() {
- dev_dbg!(pdev.as_ref(), "Not enough data for NpdeStruct\n");
+ dev_dbg!(dev, "Not enough data for NpdeStruct\n");
return None;
}
@@ -549,17 +542,13 @@ impl NpdeStruct {
// Signature should be "NPDE" (0x4544504E).
if &signature != b"NPDE" {
- dev_dbg!(
- pdev.as_ref(),
- "Invalid signature for NpdeStruct: {:?}\n",
- signature
- );
+ dev_dbg!(dev, "Invalid signature for NpdeStruct: {:?}\n", signature);
return None;
}
let subimage_len = u16::from_le_bytes([data[8], data[9]]);
if subimage_len == 0 {
- dev_dbg!(pdev.as_ref(), "Invalid subimage length: 0\n");
+ dev_dbg!(dev, "Invalid subimage length: 0\n");
return None;
}
@@ -584,7 +573,7 @@ impl NpdeStruct {
/// Try to find NPDE in the data, the NPDE is right after the PCIR.
fn find_in_data(
- pdev: &pci::Device,
+ dev: &device::Device,
data: &[u8],
rom_header: &PciRomHeader,
pcir: &PcirStruct,
@@ -596,12 +585,12 @@ impl NpdeStruct {
// Check if we have enough data
if npde_start + core::mem::size_of::<Self>() > data.len() {
- dev_dbg!(pdev.as_ref(), "Not enough data for NPDE\n");
+ dev_dbg!(dev, "Not enough data for NPDE\n");
return None;
}
// Try to create NPDE from the data
- NpdeStruct::new(pdev, &data[npde_start..])
+ NpdeStruct::new(dev, &data[npde_start..])
}
}
@@ -669,10 +658,10 @@ impl BiosImage {
/// Create a [`BiosImageBase`] from a byte slice and convert it to a [`BiosImage`] which
/// triggers the constructor of the specific BiosImage enum variant.
- fn new(pdev: &pci::Device, data: &[u8]) -> Result<Self> {
- let base = BiosImageBase::new(pdev, data)?;
+ fn new(dev: &device::Device, data: &[u8]) -> Result<Self> {
+ let base = BiosImageBase::new(dev, data)?;
let image = base.into_image().inspect_err(|e| {
- dev_err!(pdev.as_ref(), "Failed to create BiosImage: {:?}\n", e);
+ dev_err!(dev, "Failed to create BiosImage: {:?}\n", e);
})?;
Ok(image)
@@ -754,9 +743,10 @@ impl TryFrom<BiosImageBase> for BiosImage {
///
/// Each BiosImage type has a BiosImageBase type along with other image-specific fields. Note that
/// Rust favors composition of types over inheritance.
-#[derive(Debug)]
#[expect(dead_code)]
struct BiosImageBase {
+ /// Used for logging.
+ dev: ARef<device::Device>,
/// PCI ROM Expansion Header
rom_header: PciRomHeader,
/// PCI Data Structure
@@ -773,16 +763,16 @@ impl BiosImageBase {
}
/// Creates a new BiosImageBase from raw byte data.
- fn new(pdev: &pci::Device, data: &[u8]) -> Result<Self> {
+ fn new(dev: &device::Device, data: &[u8]) -> Result<Self> {
// Ensure we have enough data for the ROM header.
if data.len() < 26 {
- dev_err!(pdev.as_ref(), "Not enough data for ROM header\n");
+ dev_err!(dev, "Not enough data for ROM header\n");
return Err(EINVAL);
}
// Parse the ROM header.
- let rom_header = PciRomHeader::new(pdev, &data[0..26])
- .inspect_err(|e| dev_err!(pdev.as_ref(), "Failed to create PciRomHeader: {:?}\n", e))?;
+ let rom_header = PciRomHeader::new(dev, &data[0..26])
+ .inspect_err(|e| dev_err!(dev, "Failed to create PciRomHeader: {:?}\n", e))?;
// Get the PCI Data Structure using the pointer from the ROM header.
let pcir_offset = rom_header.pci_data_struct_offset as usize;
@@ -791,28 +781,29 @@ impl BiosImageBase {
.ok_or(EINVAL)
.inspect_err(|_| {
dev_err!(
- pdev.as_ref(),
+ dev,
"PCIR offset {:#x} out of bounds (data length: {})\n",
pcir_offset,
data.len()
);
dev_err!(
- pdev.as_ref(),
+ dev,
"Consider reading more data for construction of BiosImage\n"
);
})?;
- let pcir = PcirStruct::new(pdev, pcir_data)
- .inspect_err(|e| dev_err!(pdev.as_ref(), "Failed to create PcirStruct: {:?}\n", e))?;
+ let pcir = PcirStruct::new(dev, pcir_data)
+ .inspect_err(|e| dev_err!(dev, "Failed to create PcirStruct: {:?}\n", e))?;
// Look for NPDE structure if this is not an NBSI image (type != 0x70).
- let npde = NpdeStruct::find_in_data(pdev, data, &rom_header, &pcir);
+ let npde = NpdeStruct::find_in_data(dev, data, &rom_header, &pcir);
// Create a copy of the data.
let mut data_copy = KVec::new();
data_copy.extend_from_slice(data, GFP_KERNEL)?;
Ok(BiosImageBase {
+ dev: dev.into(),
rom_header,
pcir,
npde,
@@ -848,7 +839,7 @@ impl PciAtBiosImage {
///
/// This is just a 4 byte structure that contains a pointer to the Falcon data in the FWSEC
/// image.
- fn falcon_data_ptr(&self, pdev: &pci::Device) -> Result<u32> {
+ fn falcon_data_ptr(&self) -> Result<u32> {
let token = self.get_bit_token(BIT_TOKEN_ID_FALCON_DATA)?;
// Make sure we don't go out of bounds
@@ -859,14 +850,14 @@ impl PciAtBiosImage {
// read the 4 bytes at the offset specified in the token
let offset = token.data_offset as usize;
let bytes: [u8; 4] = self.base.data[offset..offset + 4].try_into().map_err(|_| {
- dev_err!(pdev.as_ref(), "Failed to convert data slice to array");
+ dev_err!(self.base.dev, "Failed to convert data slice to array");
EINVAL
})?;
let data_ptr = u32::from_le_bytes(bytes);
if (data_ptr as usize) < self.base.data.len() {
- dev_err!(pdev.as_ref(), "Falcon data pointer out of bounds\n");
+ dev_err!(self.base.dev, "Falcon data pointer out of bounds\n");
return Err(EINVAL);
}
@@ -892,7 +883,7 @@ impl TryFrom<BiosImageBase> for PciAtBiosImage {
/// The [`PmuLookupTableEntry`] structure is a single entry in the [`PmuLookupTable`].
///
/// See the [`PmuLookupTable`] description for more information.
-#[expect(dead_code)]
+#[repr(C, packed)]
struct PmuLookupTableEntry {
application_id: u8,
target_id: u8,
@@ -901,7 +892,7 @@ struct PmuLookupTableEntry {
impl PmuLookupTableEntry {
fn new(data: &[u8]) -> Result<Self> {
- if data.len() < 6 {
+ if data.len() < core::mem::size_of::<Self>() {
return Err(EINVAL);
}
@@ -928,7 +919,7 @@ struct PmuLookupTable {
}
impl PmuLookupTable {
- fn new(pdev: &pci::Device, data: &[u8]) -> Result<Self> {
+ fn new(dev: &device::Device, data: &[u8]) -> Result<Self> {
if data.len() < 4 {
return Err(EINVAL);
}
@@ -940,10 +931,7 @@ impl PmuLookupTable {
let required_bytes = header_len + (entry_count * entry_len);
if data.len() < required_bytes {
- dev_err!(
- pdev.as_ref(),
- "PmuLookupTable data length less than required\n"
- );
+ dev_err!(dev, "PmuLookupTable data length less than required\n");
return Err(EINVAL);
}
@@ -956,11 +944,7 @@ impl PmuLookupTable {
// Debug logging of entries (dumps the table data to dmesg)
for i in (header_len..required_bytes).step_by(entry_len) {
- dev_dbg!(
- pdev.as_ref(),
- "PMU entry: {:02x?}\n",
- &data[i..][..entry_len]
- );
+ dev_dbg!(dev, "PMU entry: {:02x?}\n", &data[i..][..entry_len]);
}
Ok(PmuLookupTable {
@@ -997,11 +981,10 @@ impl PmuLookupTable {
impl FwSecBiosBuilder {
fn setup_falcon_data(
&mut self,
- pdev: &pci::Device,
pci_at_image: &PciAtBiosImage,
first_fwsec: &FwSecBiosBuilder,
) -> Result {
- let mut offset = pci_at_image.falcon_data_ptr(pdev)? as usize;
+ let mut offset = pci_at_image.falcon_data_ptr()? as usize;
let mut pmu_in_first_fwsec = false;
// The falcon data pointer assumes that the PciAt and FWSEC images
@@ -1024,10 +1007,15 @@ impl FwSecBiosBuilder {
self.falcon_data_offset = Some(offset);
if pmu_in_first_fwsec {
- self.pmu_lookup_table =
- Some(PmuLookupTable::new(pdev, &first_fwsec.base.data[offset..])?);
+ self.pmu_lookup_table = Some(PmuLookupTable::new(
+ &self.base.dev,
+ &first_fwsec.base.data[offset..],
+ )?);
} else {
- self.pmu_lookup_table = Some(PmuLookupTable::new(pdev, &self.base.data[offset..])?);
+ self.pmu_lookup_table = Some(PmuLookupTable::new(
+ &self.base.dev,
+ &self.base.data[offset..],
+ )?);
}
match self
@@ -1040,7 +1028,7 @@ impl FwSecBiosBuilder {
let mut ucode_offset = entry.data as usize;
ucode_offset -= pci_at_image.base.data.len();
if ucode_offset < first_fwsec.base.data.len() {
- dev_err!(pdev.as_ref(), "Falcon Ucode offset not in second Fwsec.\n");
+ dev_err!(self.base.dev, "Falcon Ucode offset not in second Fwsec.\n");
return Err(EINVAL);
}
ucode_offset -= first_fwsec.base.data.len();
@@ -1048,7 +1036,7 @@ impl FwSecBiosBuilder {
}
Err(e) => {
dev_err!(
- pdev.as_ref(),
+ self.base.dev,
"PmuLookupTableEntry not found, error: {:?}\n",
e
);
@@ -1059,7 +1047,7 @@ impl FwSecBiosBuilder {
}
/// Build the final FwSecBiosImage from this builder
- fn build(self, pdev: &pci::Device) -> Result<FwSecBiosImage> {
+ fn build(self) -> Result<FwSecBiosImage> {
let ret = FwSecBiosImage {
base: self.base,
falcon_ucode_offset: self.falcon_ucode_offset.ok_or(EINVAL)?,
@@ -1067,8 +1055,8 @@ impl FwSecBiosBuilder {
if cfg!(debug_assertions) {
// Print the desc header for debugging
- let desc = ret.header(pdev.as_ref())?;
- dev_dbg!(pdev.as_ref(), "PmuLookupTableEntry desc: {:#?}\n", desc);
+ let desc = ret.header()?;
+ dev_dbg!(ret.base.dev, "PmuLookupTableEntry desc: {:#?}\n", desc);
}
Ok(ret)
@@ -1077,13 +1065,16 @@ impl FwSecBiosBuilder {
impl FwSecBiosImage {
/// Get the FwSec header ([`FalconUCodeDescV3`]).
- pub(crate) fn header(&self, dev: &device::Device) -> Result<&FalconUCodeDescV3> {
+ pub(crate) fn header(&self) -> Result<&FalconUCodeDescV3> {
// Get the falcon ucode offset that was found in setup_falcon_data.
let falcon_ucode_offset = self.falcon_ucode_offset;
// Make sure the offset is within the data bounds.
if falcon_ucode_offset + core::mem::size_of::<FalconUCodeDescV3>() > self.base.data.len() {
- dev_err!(dev, "fwsec-frts header not contained within BIOS bounds\n");
+ dev_err!(
+ self.base.dev,
+ "fwsec-frts header not contained within BIOS bounds\n"
+ );
return Err(ERANGE);
}
@@ -1095,7 +1086,7 @@ impl FwSecBiosImage {
let ver = (hdr & 0xff00) >> 8;
if ver != 3 {
- dev_err!(dev, "invalid fwsec firmware version: {:?}\n", ver);
+ dev_err!(self.base.dev, "invalid fwsec firmware version: {:?}\n", ver);
return Err(EINVAL);
}
@@ -1115,7 +1106,7 @@ impl FwSecBiosImage {
}
/// Get the ucode data as a byte slice
- pub(crate) fn ucode(&self, dev: &device::Device, desc: &FalconUCodeDescV3) -> Result<&[u8]> {
+ pub(crate) fn ucode(&self, desc: &FalconUCodeDescV3) -> Result<&[u8]> {
let falcon_ucode_offset = self.falcon_ucode_offset;
// The ucode data follows the descriptor.
@@ -1127,15 +1118,16 @@ impl FwSecBiosImage {
.data
.get(ucode_data_offset..ucode_data_offset + size)
.ok_or(ERANGE)
- .inspect_err(|_| dev_err!(dev, "fwsec ucode data not contained within BIOS bounds\n"))
+ .inspect_err(|_| {
+ dev_err!(
+ self.base.dev,
+ "fwsec ucode data not contained within BIOS bounds\n"
+ )
+ })
}
/// Get the signatures as a byte slice
- pub(crate) fn sigs(
- &self,
- dev: &device::Device,
- desc: &FalconUCodeDescV3,
- ) -> Result<&[Bcrt30Rsa3kSignature]> {
+ pub(crate) fn sigs(&self, desc: &FalconUCodeDescV3) -> Result<&[Bcrt30Rsa3kSignature]> {
// The signatures data follows the descriptor.
let sigs_data_offset = self.falcon_ucode_offset + core::mem::size_of::<FalconUCodeDescV3>();
let sigs_size =
@@ -1144,7 +1136,7 @@ impl FwSecBiosImage {
// Make sure the data is within bounds.
if sigs_data_offset + sigs_size > self.base.data.len() {
dev_err!(
- dev,
+ self.base.dev,
"fwsec signatures data not contained within BIOS bounds\n"
);
return Err(ERANGE);
diff --git a/drivers/greybus/svc.c b/drivers/greybus/svc.c
index 4256467fcd35..35ea7147dca6 100644
--- a/drivers/greybus/svc.c
+++ b/drivers/greybus/svc.c
@@ -10,6 +10,7 @@
#include <linux/kstrtox.h>
#include <linux/workqueue.h>
#include <linux/greybus.h>
+#include <linux/string_choices.h>
#define SVC_INTF_EJECT_TIMEOUT 9000
#define SVC_INTF_ACTIVATE_TIMEOUT 6000
@@ -73,7 +74,7 @@ static ssize_t watchdog_show(struct device *dev, struct device_attribute *attr,
struct gb_svc *svc = to_gb_svc(dev);
return sprintf(buf, "%s\n",
- gb_svc_watchdog_enabled(svc) ? "enabled" : "disabled");
+ str_enabled_disabled(gb_svc_watchdog_enabled(svc)));
}
static ssize_t watchdog_store(struct device *dev,
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index a57901203aeb..5341aa79f387 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -92,6 +92,17 @@ config HID_GENERIC
If unsure, say Y.
+config HID_HAPTIC
+ tristate "Haptic touchpad support"
+ default n
+ help
+ Support for touchpads with force sensors and haptic actuators instead of a
+ traditional button.
+ Adds extra parsing and FF device for the hid multitouch driver.
+ It can be used for Elan 2703 haptic touchpad.
+
+ If unsure, say N.
+
menu "Special HID drivers"
config HID_A4TECH
@@ -597,8 +608,6 @@ config HID_LED
config HID_LENOVO
tristate "Lenovo / Thinkpad devices"
- depends on ACPI
- select ACPI_PLATFORM_PROFILE
select NEW_LEDS
select LEDS_CLASS
help
@@ -1162,7 +1171,7 @@ config GREENASIA_FF
config HID_HYPERV_MOUSE
tristate "Microsoft Hyper-V mouse driver"
- depends on HYPERV
+ depends on HYPERV_VMBUS
help
Select this option to enable the Hyper-V mouse driver.
@@ -1243,7 +1252,7 @@ config HID_U2FZERO
U2F Zero supports custom commands for blinking the LED
and getting data from the internal hardware RNG.
- The internal hardware can be used to feed the enthropy pool.
+ The internal hardware can be used to feed the entropy pool.
U2F Zero only supports blinking its LED, so this driver doesn't
allow setting the brightness to anything but 1, which will
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 10ae5dedbd84..361a7daedeb8 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -4,6 +4,7 @@
#
hid-y := hid-core.o hid-input.o hid-quirks.o
hid-$(CONFIG_DEBUG_FS) += hid-debug.o
+hid-$(CONFIG_HID_HAPTIC) += hid-haptic.o
obj-$(CONFIG_HID_BPF) += bpf/
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_client.c b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
index 0f2cbae39b2b..7017bfa59093 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_client.c
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
@@ -39,8 +39,12 @@ int amd_sfh_get_report(struct hid_device *hid, int report_id, int report_type)
struct amdtp_hid_data *hid_data = hid->driver_data;
struct amdtp_cl_data *cli_data = hid_data->cli_data;
struct request_list *req_list = &cli_data->req_list;
+ struct amd_input_data *in_data = cli_data->in_data;
+ struct amd_mp2_dev *mp2;
int i;
+ mp2 = container_of(in_data, struct amd_mp2_dev, in_data);
+ guard(mutex)(&mp2->lock);
for (i = 0; i < cli_data->num_hid_devices; i++) {
if (cli_data->hid_sensor_hubs[i] == hid) {
struct request_list *new = kzalloc(sizeof(*new), GFP_KERNEL);
@@ -75,6 +79,8 @@ void amd_sfh_work(struct work_struct *work)
u8 report_id, node_type;
u8 report_size = 0;
+ mp2 = container_of(in_data, struct amd_mp2_dev, in_data);
+ guard(mutex)(&mp2->lock);
req_node = list_last_entry(&req_list->list, struct request_list, list);
list_del(&req_node->list);
current_index = req_node->current_index;
@@ -83,7 +89,6 @@ void amd_sfh_work(struct work_struct *work)
node_type = req_node->report_type;
kfree(req_node);
- mp2 = container_of(in_data, struct amd_mp2_dev, in_data);
mp2_ops = mp2->mp2_ops;
if (node_type == HID_FEATURE_REPORT) {
report_size = mp2_ops->get_feat_rep(sensor_index, report_id,
@@ -107,6 +112,8 @@ void amd_sfh_work(struct work_struct *work)
cli_data->cur_hid_dev = current_index;
cli_data->sensor_requested_cnt[current_index] = 0;
amdtp_hid_wakeup(cli_data->hid_sensor_hubs[current_index]);
+ if (!list_empty(&req_list->list))
+ schedule_delayed_work(&cli_data->work, 0);
}
void amd_sfh_work_buffer(struct work_struct *work)
@@ -117,9 +124,10 @@ void amd_sfh_work_buffer(struct work_struct *work)
u8 report_size;
int i;
+ mp2 = container_of(in_data, struct amd_mp2_dev, in_data);
+ guard(mutex)(&mp2->lock);
for (i = 0; i < cli_data->num_hid_devices; i++) {
if (cli_data->sensor_sts[i] == SENSOR_ENABLED) {
- mp2 = container_of(in_data, struct amd_mp2_dev, in_data);
report_size = mp2->mp2_ops->get_in_rep(i, cli_data->sensor_idx[i],
cli_data->report_id[i], in_data);
hid_input_report(cli_data->hid_sensor_hubs[i], HID_INPUT_REPORT,
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_common.h b/drivers/hid/amd-sfh-hid/amd_sfh_common.h
index f44a3bb2fbd4..78f830c133e5 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_common.h
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_common.h
@@ -10,6 +10,7 @@
#ifndef AMD_SFH_COMMON_H
#define AMD_SFH_COMMON_H
+#include <linux/mutex.h>
#include <linux/pci.h>
#include "amd_sfh_hid.h"
@@ -59,6 +60,8 @@ struct amd_mp2_dev {
u32 mp2_acs;
struct sfh_dev_status dev_en;
struct work_struct work;
+ /* mp2 to protect data */
+ struct mutex lock;
u8 init_done;
u8 rver;
};
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
index 2983af969579..1d9f955573aa 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
@@ -466,6 +466,10 @@ static int amd_mp2_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i
if (!privdata->cl_data)
return -ENOMEM;
+ rc = devm_mutex_init(&pdev->dev, &privdata->lock);
+ if (rc)
+ return rc;
+
privdata->sfh1_1_ops = (const struct amd_sfh1_1_ops *)id->driver_data;
if (privdata->sfh1_1_ops) {
if (boot_cpu_data.x86 >= 0x1A)
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index 4b45e31f0bab..a444d41e53b6 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -974,7 +974,10 @@ static int asus_input_mapping(struct hid_device *hdev,
case 0xc4: asus_map_key_clear(KEY_KBDILLUMUP); break;
case 0xc5: asus_map_key_clear(KEY_KBDILLUMDOWN); break;
case 0xc7: asus_map_key_clear(KEY_KBDILLUMTOGGLE); break;
+ case 0x4e: asus_map_key_clear(KEY_FN_ESC); break;
+ case 0x7e: asus_map_key_clear(KEY_EMOJI_PICKER); break;
+ case 0x8b: asus_map_key_clear(KEY_PROG1); break; /* ProArt Creator Hub key */
case 0x6b: asus_map_key_clear(KEY_F21); break; /* ASUS touchpad toggle */
case 0x38: asus_map_key_clear(KEY_PROG1); break; /* ROG key */
case 0xba: asus_map_key_clear(KEY_PROG2); break; /* Fn+C ASUS Splendid */
@@ -1213,7 +1216,13 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id)
return ret;
}
- if (!drvdata->input) {
+ /*
+ * Check that input registration succeeded. Checking that
+ * HID_CLAIMED_INPUT is set prevents a UAF when all input devices
+ * were freed during registration due to no usages being mapped,
+ * leaving drvdata->input pointing to freed memory.
+ */
+ if (!drvdata->input || !(hdev->claimed & HID_CLAIMED_INPUT)) {
hid_err(hdev, "Asus input not registered\n");
ret = -ENOMEM;
goto err_stop_hw;
@@ -1379,9 +1388,6 @@ static const struct hid_device_id asus_devices[] = {
USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD2),
QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
- USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD3),
- QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD },
- { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
USB_DEVICE_ID_ASUSTEK_ROG_Z13_LIGHTBAR),
QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
@@ -1411,6 +1417,9 @@ static const struct hid_device_id asus_devices[] = {
* part, while letting hid-multitouch.c handle the touchpad.
*/
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+ USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_Z13_FOLIO),
+ QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD },
+ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_T101HA_KEYBOARD) },
{ }
};
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 5419a6c10907..a5b3a8ca2fcb 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -944,6 +944,15 @@ static int hid_scan_report(struct hid_device *hid)
hid->group = HID_GROUP_GENERIC;
/*
+ * In case we are re-scanning after a BPF has been loaded,
+ * we need to use the bpf report descriptor, not the original one.
+ */
+ if (hid->bpf_rdesc && hid->bpf_rsize) {
+ start = hid->bpf_rdesc;
+ end = start + hid->bpf_rsize;
+ }
+
+ /*
* The parsing is simpler than the one in hid_open_report() as we should
* be robust against hid errors. Those errors will be raised by
* hid_open_report() anyway.
@@ -2708,12 +2717,32 @@ static bool hid_check_device_match(struct hid_device *hdev,
return !hid_ignore_special_drivers && !(hdev->quirks & HID_QUIRK_IGNORE_SPECIAL_DRIVER);
}
+static void hid_set_group(struct hid_device *hdev)
+{
+ int ret;
+
+ if (hid_ignore_special_drivers) {
+ hdev->group = HID_GROUP_GENERIC;
+ } else if (!hdev->group &&
+ !(hdev->quirks & HID_QUIRK_HAVE_SPECIAL_DRIVER)) {
+ ret = hid_scan_report(hdev);
+ if (ret)
+ hid_warn(hdev, "bad device descriptor (%d)\n", ret);
+ }
+}
+
static int __hid_device_probe(struct hid_device *hdev, struct hid_driver *hdrv)
{
const struct hid_device_id *id;
int ret;
if (!hdev->bpf_rsize) {
+ /* we keep a reference to the currently scanned report descriptor */
+ const __u8 *original_rdesc = hdev->bpf_rdesc;
+
+ if (!original_rdesc)
+ original_rdesc = hdev->dev_rdesc;
+
/* in case a bpf program gets detached, we need to free the old one */
hid_free_bpf_rdesc(hdev);
@@ -2723,6 +2752,12 @@ static int __hid_device_probe(struct hid_device *hdev, struct hid_driver *hdrv)
/* call_hid_bpf_rdesc_fixup will always return a valid pointer */
hdev->bpf_rdesc = call_hid_bpf_rdesc_fixup(hdev, hdev->dev_rdesc,
&hdev->bpf_rsize);
+
+ /* the report descriptor changed, we need to re-scan it */
+ if (original_rdesc != hdev->bpf_rdesc) {
+ hdev->group = 0;
+ hid_set_group(hdev);
+ }
}
if (!hid_check_device_match(hdev, hdrv, &id))
@@ -2903,14 +2938,7 @@ int hid_add_device(struct hid_device *hdev)
/*
* Scan generic devices for group information
*/
- if (hid_ignore_special_drivers) {
- hdev->group = HID_GROUP_GENERIC;
- } else if (!hdev->group &&
- !(hdev->quirks & HID_QUIRK_HAVE_SPECIAL_DRIVER)) {
- ret = hid_scan_report(hdev);
- if (ret)
- hid_warn(hdev, "bad device descriptor (%d)\n", ret);
- }
+ hid_set_group(hdev);
hdev->id = atomic_inc_return(&id);
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index 234fa82eab07..5a95ea3bec98 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -229,10 +229,12 @@ static int cp2112_gpio_set_unlocked(struct cp2112_device *dev,
ret = hid_hw_raw_request(hdev, CP2112_GPIO_SET, buf,
CP2112_GPIO_SET_LENGTH, HID_FEATURE_REPORT,
HID_REQ_SET_REPORT);
- if (ret < 0)
+ if (ret != CP2112_GPIO_SET_LENGTH) {
hid_err(hdev, "error setting GPIO values: %d\n", ret);
+ return ret < 0 ? ret : -EIO;
+ }
- return ret;
+ return 0;
}
static int cp2112_gpio_set(struct gpio_chip *chip, unsigned int offset,
@@ -309,9 +311,7 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
* Set gpio value when output direction is already set,
* as specified in AN495, Rev. 0.2, cpt. 4.4
*/
- cp2112_gpio_set_unlocked(dev, offset, value);
-
- return 0;
+ return cp2112_gpio_set_unlocked(dev, offset, value);
}
static int cp2112_hid_get(struct hid_device *hdev, unsigned char report_number,
@@ -1288,7 +1288,7 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
dev->gc.label = "cp2112_gpio";
dev->gc.direction_input = cp2112_gpio_direction_input;
dev->gc.direction_output = cp2112_gpio_direction_output;
- dev->gc.set_rv = cp2112_gpio_set;
+ dev->gc.set = cp2112_gpio_set;
dev->gc.get = cp2112_gpio_get;
dev->gc.base = -1;
dev->gc.ngpio = CP2112_GPIO_MAX_GPIO;
diff --git a/drivers/hid/hid-elecom.c b/drivers/hid/hid-elecom.c
index 0ad7d25d9864..69771fd35006 100644
--- a/drivers/hid/hid-elecom.c
+++ b/drivers/hid/hid-elecom.c
@@ -101,6 +101,7 @@ static const __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
*/
mouse_button_fixup(hdev, rdesc, *rsize, 12, 30, 14, 20, 8);
break;
+ case USB_DEVICE_ID_ELECOM_M_DT2DRBK:
case USB_DEVICE_ID_ELECOM_M_HT1DRBK_011C:
/*
* Report descriptor format:
@@ -123,6 +124,7 @@ static const struct hid_device_id elecom_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XT4DRBK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_DT1URBK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_DT1DRBK) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_DT2DRBK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1URBK_010C) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1URBK_019B) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1DRBK_010D) },
diff --git a/drivers/hid/hid-haptic.c b/drivers/hid/hid-haptic.c
new file mode 100644
index 000000000000..aa090684c1f2
--- /dev/null
+++ b/drivers/hid/hid-haptic.c
@@ -0,0 +1,580 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * HID Haptic support for Linux
+ *
+ * Copyright (c) 2021 Angela Czubak <acz@semihalf.com>
+ */
+
+#include <linux/input/mt.h>
+#include <linux/module.h>
+
+#include "hid-haptic.h"
+
+void hid_haptic_feature_mapping(struct hid_device *hdev,
+ struct hid_haptic_device *haptic,
+ struct hid_field *field, struct hid_usage *usage)
+{
+ u16 usage_hid;
+
+ if (usage->hid == HID_HP_AUTOTRIGGER) {
+ if (usage->usage_index >= field->report_count) {
+ dev_err(&hdev->dev,
+ "HID_HP_AUTOTRIGGER out of range\n");
+ return;
+ }
+
+ hid_device_io_start(hdev);
+ hid_hw_request(hdev, field->report, HID_REQ_GET_REPORT);
+ hid_hw_wait(hdev);
+ hid_device_io_stop(hdev);
+ haptic->default_auto_trigger =
+ field->value[usage->usage_index];
+ haptic->auto_trigger_report = field->report;
+ } else if ((usage->hid & HID_USAGE_PAGE) == HID_UP_ORDINAL) {
+ usage_hid = usage->hid & HID_USAGE;
+ switch (field->logical) {
+ case HID_HP_WAVEFORMLIST:
+ if (usage_hid > haptic->max_waveform_id)
+ haptic->max_waveform_id = usage_hid;
+ break;
+ case HID_HP_DURATIONLIST:
+ if (usage_hid > haptic->max_duration_id)
+ haptic->max_duration_id = usage_hid;
+ break;
+ default:
+ break;
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(hid_haptic_feature_mapping);
+
+bool hid_haptic_check_pressure_unit(struct hid_haptic_device *haptic,
+ struct hid_input *hi, struct hid_field *field)
+{
+ if (field->unit == HID_UNIT_GRAM || field->unit == HID_UNIT_NEWTON) {
+ haptic->force_logical_minimum = field->logical_minimum;
+ haptic->force_physical_minimum = field->physical_minimum;
+ haptic->force_resolution = input_abs_get_res(hi->input,
+ ABS_MT_PRESSURE);
+ return true;
+ }
+ return false;
+}
+EXPORT_SYMBOL_GPL(hid_haptic_check_pressure_unit);
+
+int hid_haptic_input_mapping(struct hid_device *hdev,
+ struct hid_haptic_device *haptic,
+ struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ if (usage->hid == HID_HP_MANUALTRIGGER) {
+ haptic->manual_trigger_report = field->report;
+ /* we don't really want to map these fields */
+ return -1;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hid_haptic_input_mapping);
+
+int hid_haptic_input_configured(struct hid_device *hdev,
+ struct hid_haptic_device *haptic,
+ struct hid_input *hi)
+{
+
+ if (hi->application == HID_DG_TOUCHPAD) {
+ if (haptic->auto_trigger_report &&
+ haptic->manual_trigger_report) {
+ __set_bit(INPUT_PROP_HAPTIC_TOUCHPAD, hi->input->propbit);
+ return 1;
+ }
+ return 0;
+ }
+ return -1;
+}
+EXPORT_SYMBOL_GPL(hid_haptic_input_configured);
+
+static void parse_auto_trigger_field(struct hid_haptic_device *haptic,
+ struct hid_field *field)
+{
+ int count = field->report_count;
+ int n;
+ u16 usage_hid;
+
+ for (n = 0; n < count; n++) {
+ switch (field->usage[n].hid & HID_USAGE_PAGE) {
+ case HID_UP_ORDINAL:
+ usage_hid = field->usage[n].hid & HID_USAGE;
+ switch (field->logical) {
+ case HID_HP_WAVEFORMLIST:
+ haptic->hid_usage_map[usage_hid] = field->value[n];
+ if (field->value[n] ==
+ (HID_HP_WAVEFORMPRESS & HID_USAGE)) {
+ haptic->press_ordinal = usage_hid;
+ } else if (field->value[n] ==
+ (HID_HP_WAVEFORMRELEASE & HID_USAGE)) {
+ haptic->release_ordinal = usage_hid;
+ }
+ break;
+ case HID_HP_DURATIONLIST:
+ haptic->duration_map[usage_hid] =
+ field->value[n];
+ break;
+ default:
+ break;
+ }
+ break;
+ case HID_UP_HAPTIC:
+ switch (field->usage[n].hid) {
+ case HID_HP_WAVEFORMVENDORID:
+ haptic->vendor_id = field->value[n];
+ break;
+ case HID_HP_WAVEFORMVENDORPAGE:
+ haptic->vendor_page = field->value[n];
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ /* Should not really happen */
+ break;
+ }
+ }
+}
+
+static void fill_effect_buf(struct hid_haptic_device *haptic,
+ struct ff_haptic_effect *effect,
+ struct hid_haptic_effect *haptic_effect,
+ int waveform_ordinal)
+{
+ struct hid_report *rep = haptic->manual_trigger_report;
+ struct hid_usage *usage;
+ struct hid_field *field;
+ s32 value;
+ int i, j;
+ u8 *buf = haptic_effect->report_buf;
+
+ mutex_lock(&haptic->manual_trigger_mutex);
+ for (i = 0; i < rep->maxfield; i++) {
+ field = rep->field[i];
+ /* Ignore if report count is out of bounds. */
+ if (field->report_count < 1)
+ continue;
+
+ for (j = 0; j < field->maxusage; j++) {
+ usage = &field->usage[j];
+
+ switch (usage->hid) {
+ case HID_HP_INTENSITY:
+ if (effect->intensity > 100) {
+ value = field->logical_maximum;
+ } else {
+ value = field->logical_minimum +
+ effect->intensity *
+ (field->logical_maximum -
+ field->logical_minimum) / 100;
+ }
+ break;
+ case HID_HP_REPEATCOUNT:
+ value = effect->repeat_count;
+ break;
+ case HID_HP_RETRIGGERPERIOD:
+ value = effect->retrigger_period;
+ break;
+ case HID_HP_MANUALTRIGGER:
+ value = waveform_ordinal;
+ break;
+ default:
+ break;
+ }
+
+ field->value[j] = value;
+ }
+ }
+
+ hid_output_report(rep, buf);
+ mutex_unlock(&haptic->manual_trigger_mutex);
+}
+
+static void switch_mode(struct hid_device *hdev, struct hid_haptic_device *haptic,
+ int mode)
+{
+ struct hid_report *rep = haptic->auto_trigger_report;
+ struct hid_field *field;
+ s32 value;
+ int i, j;
+
+ if (mode == HID_HAPTIC_MODE_HOST)
+ value = HID_HAPTIC_ORDINAL_WAVEFORMSTOP;
+ else
+ value = haptic->default_auto_trigger;
+
+ mutex_lock(&haptic->auto_trigger_mutex);
+ for (i = 0; i < rep->maxfield; i++) {
+ field = rep->field[i];
+ /* Ignore if report count is out of bounds. */
+ if (field->report_count < 1)
+ continue;
+
+ for (j = 0; j < field->maxusage; j++) {
+ if (field->usage[j].hid == HID_HP_AUTOTRIGGER)
+ field->value[j] = value;
+ }
+ }
+
+ /* send the report */
+ hid_hw_request(hdev, rep, HID_REQ_SET_REPORT);
+ mutex_unlock(&haptic->auto_trigger_mutex);
+ haptic->mode = mode;
+}
+
+static int hid_haptic_upload_effect(struct input_dev *dev, struct ff_effect *effect,
+ struct ff_effect *old)
+{
+ struct hid_device *hdev = input_get_drvdata(dev);
+ struct ff_device *ff = dev->ff;
+ struct hid_haptic_device *haptic = ff->private;
+ int i, ordinal = 0;
+ bool switch_modes = false;
+
+ /* If vendor range, check vendor id and page */
+ if (effect->u.haptic.hid_usage >= (HID_HP_VENDORWAVEFORMMIN & HID_USAGE) &&
+ effect->u.haptic.hid_usage <= (HID_HP_VENDORWAVEFORMMAX & HID_USAGE) &&
+ (effect->u.haptic.vendor_id != haptic->vendor_id ||
+ effect->u.haptic.vendor_waveform_page != haptic->vendor_page))
+ return -EINVAL;
+
+ /* Check hid_usage */
+ for (i = 1; i <= haptic->max_waveform_id; i++) {
+ if (haptic->hid_usage_map[i] == effect->u.haptic.hid_usage) {
+ ordinal = i;
+ break;
+ }
+ }
+ if (ordinal < 1)
+ return -EINVAL;
+
+ /* Fill the buffer for the effect id */
+ fill_effect_buf(haptic, &effect->u.haptic, &haptic->effect[effect->id],
+ ordinal);
+
+ if (effect->u.haptic.hid_usage == (HID_HP_WAVEFORMPRESS & HID_USAGE) ||
+ effect->u.haptic.hid_usage == (HID_HP_WAVEFORMRELEASE & HID_USAGE))
+ switch_modes = true;
+
+ /* If device is in autonomous mode, and the uploaded effect signals userspace
+ * wants control of the device, change modes
+ */
+ if (switch_modes && haptic->mode == HID_HAPTIC_MODE_DEVICE)
+ switch_mode(hdev, haptic, HID_HAPTIC_MODE_HOST);
+
+ return 0;
+}
+
+static int play_effect(struct hid_device *hdev, struct hid_haptic_device *haptic,
+ struct hid_haptic_effect *effect)
+{
+ int ret;
+
+ ret = hid_hw_output_report(hdev, effect->report_buf,
+ haptic->manual_trigger_report_len);
+ if (ret < 0) {
+ ret = hid_hw_raw_request(hdev,
+ haptic->manual_trigger_report->id,
+ effect->report_buf,
+ haptic->manual_trigger_report_len,
+ HID_OUTPUT_REPORT, HID_REQ_SET_REPORT);
+ }
+
+ return ret;
+}
+
+static void haptic_work_handler(struct work_struct *work)
+{
+
+ struct hid_haptic_effect *effect = container_of(work,
+ struct hid_haptic_effect,
+ work);
+ struct input_dev *dev = effect->input_dev;
+ struct hid_device *hdev = input_get_drvdata(dev);
+ struct hid_haptic_device *haptic = dev->ff->private;
+
+ mutex_lock(&haptic->manual_trigger_mutex);
+ if (effect != &haptic->stop_effect)
+ play_effect(hdev, haptic, &haptic->stop_effect);
+
+ play_effect(hdev, haptic, effect);
+ mutex_unlock(&haptic->manual_trigger_mutex);
+
+}
+
+static int hid_haptic_playback(struct input_dev *dev, int effect_id, int value)
+{
+ struct hid_haptic_device *haptic = dev->ff->private;
+
+ if (value)
+ queue_work(haptic->wq, &haptic->effect[effect_id].work);
+ else
+ queue_work(haptic->wq, &haptic->stop_effect.work);
+
+ return 0;
+}
+
+static void effect_set_default(struct ff_effect *effect)
+{
+ effect->type = FF_HAPTIC;
+ effect->id = -1;
+ effect->u.haptic.hid_usage = HID_HP_WAVEFORMNONE & HID_USAGE;
+ effect->u.haptic.intensity = 100;
+ effect->u.haptic.retrigger_period = 0;
+ effect->u.haptic.repeat_count = 0;
+}
+
+static int hid_haptic_erase(struct input_dev *dev, int effect_id)
+{
+ struct hid_haptic_device *haptic = dev->ff->private;
+ struct hid_device *hdev = input_get_drvdata(dev);
+ struct ff_effect effect;
+ int ordinal;
+
+ effect_set_default(&effect);
+
+ if (effect.u.haptic.hid_usage == (HID_HP_WAVEFORMRELEASE & HID_USAGE)) {
+ ordinal = haptic->release_ordinal;
+ if (!ordinal) {
+ ordinal = HID_HAPTIC_ORDINAL_WAVEFORMNONE;
+ if (haptic->mode == HID_HAPTIC_MODE_HOST)
+ switch_mode(hdev, haptic, HID_HAPTIC_MODE_DEVICE);
+ } else
+ effect.u.haptic.hid_usage = HID_HP_WAVEFORMRELEASE & HID_USAGE;
+
+ fill_effect_buf(haptic, &effect.u.haptic, &haptic->effect[effect_id],
+ ordinal);
+ } else if (effect.u.haptic.hid_usage == (HID_HP_WAVEFORMPRESS & HID_USAGE)) {
+ ordinal = haptic->press_ordinal;
+ if (!ordinal) {
+ ordinal = HID_HAPTIC_ORDINAL_WAVEFORMNONE;
+ if (haptic->mode == HID_HAPTIC_MODE_HOST)
+ switch_mode(hdev, haptic, HID_HAPTIC_MODE_DEVICE);
+ }
+ else
+ effect.u.haptic.hid_usage = HID_HP_WAVEFORMPRESS & HID_USAGE;
+
+ fill_effect_buf(haptic, &effect.u.haptic, &haptic->effect[effect_id],
+ ordinal);
+ }
+
+ return 0;
+}
+
+static void hid_haptic_destroy(struct ff_device *ff)
+{
+ struct hid_haptic_device *haptic = ff->private;
+ struct hid_device *hdev = haptic->hdev;
+ int r;
+
+ if (hdev)
+ put_device(&hdev->dev);
+
+ kfree(haptic->stop_effect.report_buf);
+ haptic->stop_effect.report_buf = NULL;
+
+ if (haptic->effect) {
+ for (r = 0; r < ff->max_effects; r++)
+ kfree(haptic->effect[r].report_buf);
+ kfree(haptic->effect);
+ }
+ haptic->effect = NULL;
+
+ destroy_workqueue(haptic->wq);
+ haptic->wq = NULL;
+
+ kfree(haptic->duration_map);
+ haptic->duration_map = NULL;
+
+ kfree(haptic->hid_usage_map);
+ haptic->hid_usage_map = NULL;
+
+ module_put(THIS_MODULE);
+}
+
+int hid_haptic_init(struct hid_device *hdev,
+ struct hid_haptic_device **haptic_ptr)
+{
+ struct hid_haptic_device *haptic = *haptic_ptr;
+ struct input_dev *dev = NULL;
+ struct hid_input *hidinput;
+ struct ff_device *ff;
+ int ret = 0, r;
+ struct ff_haptic_effect stop_effect = {
+ .hid_usage = HID_HP_WAVEFORMSTOP & HID_USAGE,
+ };
+ const char *prefix = "hid-haptic";
+ char *name;
+ int (*flush)(struct input_dev *dev, struct file *file);
+ int (*event)(struct input_dev *dev, unsigned int type, unsigned int code, int value);
+
+ haptic->hdev = hdev;
+ haptic->max_waveform_id = max(2u, haptic->max_waveform_id);
+ haptic->max_duration_id = max(2u, haptic->max_duration_id);
+
+ haptic->hid_usage_map = kcalloc(haptic->max_waveform_id + 1,
+ sizeof(u16), GFP_KERNEL);
+ if (!haptic->hid_usage_map) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+ haptic->duration_map = kcalloc(haptic->max_duration_id + 1,
+ sizeof(u32), GFP_KERNEL);
+ if (!haptic->duration_map) {
+ ret = -ENOMEM;
+ goto usage_map;
+ }
+
+ if (haptic->max_waveform_id != haptic->max_duration_id)
+ dev_warn(&hdev->dev,
+ "Haptic duration and waveform lists have different max id (%u and %u).\n",
+ haptic->max_duration_id, haptic->max_waveform_id);
+
+ haptic->hid_usage_map[HID_HAPTIC_ORDINAL_WAVEFORMNONE] =
+ HID_HP_WAVEFORMNONE & HID_USAGE;
+ haptic->hid_usage_map[HID_HAPTIC_ORDINAL_WAVEFORMSTOP] =
+ HID_HP_WAVEFORMSTOP & HID_USAGE;
+
+ mutex_init(&haptic->auto_trigger_mutex);
+ for (r = 0; r < haptic->auto_trigger_report->maxfield; r++)
+ parse_auto_trigger_field(haptic, haptic->auto_trigger_report->field[r]);
+
+ list_for_each_entry(hidinput, &hdev->inputs, list) {
+ if (hidinput->application == HID_DG_TOUCHPAD) {
+ dev = hidinput->input;
+ break;
+ }
+ }
+
+ if (!dev) {
+ dev_err(&hdev->dev, "Failed to find the input device\n");
+ ret = -ENODEV;
+ goto duration_map;
+ }
+
+ haptic->input_dev = dev;
+ haptic->manual_trigger_report_len =
+ hid_report_len(haptic->manual_trigger_report);
+ mutex_init(&haptic->manual_trigger_mutex);
+ name = kmalloc(strlen(prefix) + strlen(hdev->name) + 2, GFP_KERNEL);
+ if (name) {
+ sprintf(name, "%s %s", prefix, hdev->name);
+ haptic->wq = create_singlethread_workqueue(name);
+ kfree(name);
+ }
+ if (!haptic->wq) {
+ ret = -ENOMEM;
+ goto duration_map;
+ }
+ haptic->effect = kcalloc(FF_MAX_EFFECTS,
+ sizeof(struct hid_haptic_effect), GFP_KERNEL);
+ if (!haptic->effect) {
+ ret = -ENOMEM;
+ goto output_queue;
+ }
+ for (r = 0; r < FF_MAX_EFFECTS; r++) {
+ haptic->effect[r].report_buf =
+ hid_alloc_report_buf(haptic->manual_trigger_report,
+ GFP_KERNEL);
+ if (!haptic->effect[r].report_buf) {
+ dev_err(&hdev->dev,
+ "Failed to allocate a buffer for an effect.\n");
+ ret = -ENOMEM;
+ goto buffer_free;
+ }
+ haptic->effect[r].input_dev = dev;
+ INIT_WORK(&haptic->effect[r].work, haptic_work_handler);
+ }
+ haptic->stop_effect.report_buf =
+ hid_alloc_report_buf(haptic->manual_trigger_report,
+ GFP_KERNEL);
+ if (!haptic->stop_effect.report_buf) {
+ dev_err(&hdev->dev,
+ "Failed to allocate a buffer for stop effect.\n");
+ ret = -ENOMEM;
+ goto buffer_free;
+ }
+ haptic->stop_effect.input_dev = dev;
+ INIT_WORK(&haptic->stop_effect.work, haptic_work_handler);
+ fill_effect_buf(haptic, &stop_effect, &haptic->stop_effect,
+ HID_HAPTIC_ORDINAL_WAVEFORMSTOP);
+
+ input_set_capability(dev, EV_FF, FF_HAPTIC);
+
+ flush = dev->flush;
+ event = dev->event;
+ ret = input_ff_create(dev, FF_MAX_EFFECTS);
+ if (ret) {
+ dev_err(&hdev->dev, "Failed to create ff device.\n");
+ goto stop_buffer_free;
+ }
+
+ ff = dev->ff;
+ ff->private = haptic;
+ ff->upload = hid_haptic_upload_effect;
+ ff->playback = hid_haptic_playback;
+ ff->erase = hid_haptic_erase;
+ ff->destroy = hid_haptic_destroy;
+ if (!try_module_get(THIS_MODULE)) {
+ dev_err(&hdev->dev, "Failed to increase module count.\n");
+ goto input_free;
+ }
+ if (!get_device(&hdev->dev)) {
+ dev_err(&hdev->dev, "Failed to get hdev device.\n");
+ module_put(THIS_MODULE);
+ goto input_free;
+ }
+ return 0;
+
+input_free:
+ input_ff_destroy(dev);
+ /* Do not let double free happen, input_ff_destroy will call
+ * hid_haptic_destroy.
+ */
+ *haptic_ptr = NULL;
+ /* Restore dev flush and event */
+ dev->flush = flush;
+ dev->event = event;
+ return ret;
+stop_buffer_free:
+ kfree(haptic->stop_effect.report_buf);
+ haptic->stop_effect.report_buf = NULL;
+buffer_free:
+ while (--r >= 0)
+ kfree(haptic->effect[r].report_buf);
+ kfree(haptic->effect);
+ haptic->effect = NULL;
+output_queue:
+ destroy_workqueue(haptic->wq);
+ haptic->wq = NULL;
+duration_map:
+ kfree(haptic->duration_map);
+ haptic->duration_map = NULL;
+usage_map:
+ kfree(haptic->hid_usage_map);
+ haptic->hid_usage_map = NULL;
+exit:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(hid_haptic_init);
+
+void hid_haptic_pressure_reset(struct hid_haptic_device *haptic)
+{
+ haptic->pressure_sum = 0;
+}
+EXPORT_SYMBOL_GPL(hid_haptic_pressure_reset);
+
+void hid_haptic_pressure_increase(struct hid_haptic_device *haptic,
+ __s32 pressure)
+{
+ haptic->pressure_sum += pressure;
+}
+EXPORT_SYMBOL_GPL(hid_haptic_pressure_increase);
diff --git a/drivers/hid/hid-haptic.h b/drivers/hid/hid-haptic.h
new file mode 100644
index 000000000000..c6539ac04c1d
--- /dev/null
+++ b/drivers/hid/hid-haptic.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * HID Haptic support for Linux
+ *
+ * Copyright (c) 2021 Angela Czubak <acz@semihalf.com>
+ */
+
+#include <linux/hid.h>
+
+#define HID_HAPTIC_ORDINAL_WAVEFORMNONE 1
+#define HID_HAPTIC_ORDINAL_WAVEFORMSTOP 2
+
+#define HID_HAPTIC_MODE_DEVICE 0
+#define HID_HAPTIC_MODE_HOST 1
+
+struct hid_haptic_effect {
+ u8 *report_buf;
+ struct input_dev *input_dev;
+ struct work_struct work;
+ struct list_head control;
+ struct mutex control_mutex;
+};
+
+struct hid_haptic_effect_node {
+ struct list_head node;
+ struct file *file;
+};
+
+struct hid_haptic_device {
+ struct input_dev *input_dev;
+ struct hid_device *hdev;
+ struct hid_report *auto_trigger_report;
+ struct mutex auto_trigger_mutex;
+ struct workqueue_struct *wq;
+ struct hid_report *manual_trigger_report;
+ struct mutex manual_trigger_mutex;
+ size_t manual_trigger_report_len;
+ int pressed_state;
+ s32 pressure_sum;
+ s32 force_logical_minimum;
+ s32 force_physical_minimum;
+ s32 force_resolution;
+ u32 mode;
+ u32 default_auto_trigger;
+ u32 vendor_page;
+ u32 vendor_id;
+ u32 max_waveform_id;
+ u32 max_duration_id;
+ u16 *hid_usage_map;
+ u32 *duration_map;
+ u16 press_ordinal;
+ u16 release_ordinal;
+ struct hid_haptic_effect *effect;
+ struct hid_haptic_effect stop_effect;
+};
+
+#if IS_ENABLED(CONFIG_HID_HAPTIC)
+void hid_haptic_feature_mapping(struct hid_device *hdev,
+ struct hid_haptic_device *haptic,
+ struct hid_field *field, struct hid_usage
+ *usage);
+bool hid_haptic_check_pressure_unit(struct hid_haptic_device *haptic,
+ struct hid_input *hi, struct hid_field *field);
+int hid_haptic_input_mapping(struct hid_device *hdev,
+ struct hid_haptic_device *haptic,
+ struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max);
+int hid_haptic_input_configured(struct hid_device *hdev,
+ struct hid_haptic_device *haptic,
+ struct hid_input *hi);
+int hid_haptic_init(struct hid_device *hdev, struct hid_haptic_device **haptic_ptr);
+void hid_haptic_handle_press_release(struct hid_haptic_device *haptic);
+void hid_haptic_pressure_reset(struct hid_haptic_device *haptic);
+void hid_haptic_pressure_increase(struct hid_haptic_device *haptic,
+ __s32 pressure);
+#else
+static inline
+void hid_haptic_feature_mapping(struct hid_device *hdev,
+ struct hid_haptic_device *haptic,
+ struct hid_field *field, struct hid_usage
+ *usage)
+{}
+static inline
+bool hid_haptic_check_pressure_unit(struct hid_haptic_device *haptic,
+ struct hid_input *hi, struct hid_field *field)
+{
+ return false;
+}
+static inline
+int hid_haptic_input_mapping(struct hid_device *hdev,
+ struct hid_haptic_device *haptic,
+ struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ return 0;
+}
+static inline
+int hid_haptic_input_configured(struct hid_device *hdev,
+ struct hid_haptic_device *haptic,
+ struct hid_input *hi)
+{
+ return 0;
+}
+static inline
+void hid_haptic_reset(struct hid_device *hdev, struct hid_haptic_device *haptic)
+{}
+static inline
+int hid_haptic_init(struct hid_device *hdev, struct hid_haptic_device **haptic_ptr)
+{
+ return 0;
+}
+static inline
+void hid_haptic_handle_press_release(struct hid_haptic_device *haptic) {}
+static inline
+bool hid_haptic_handle_input(struct hid_haptic_device *haptic)
+{
+ return false;
+}
+static inline
+void hid_haptic_pressure_reset(struct hid_haptic_device *haptic) {}
+static inline
+void hid_haptic_pressure_increase(struct hid_haptic_device *haptic,
+ __s32 pressure)
+{}
+#endif
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 5a1096283855..5721b8414bbd 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -223,7 +223,7 @@
#define USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD3 0x1822
#define USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD 0x1866
#define USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD2 0x19b6
-#define USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD3 0x1a30
+#define USB_DEVICE_ID_ASUSTEK_ROG_Z13_FOLIO 0x1a30
#define USB_DEVICE_ID_ASUSTEK_ROG_Z13_LIGHTBAR 0x18c6
#define USB_DEVICE_ID_ASUSTEK_ROG_NKEY_ALLY 0x1abe
#define USB_DEVICE_ID_ASUSTEK_ROG_NKEY_ALLY_X 0x1b4c
@@ -451,6 +451,7 @@
#define USB_DEVICE_ID_ELECOM_M_XT4DRBK 0x00fd
#define USB_DEVICE_ID_ELECOM_M_DT1URBK 0x00fe
#define USB_DEVICE_ID_ELECOM_M_DT1DRBK 0x00ff
+#define USB_DEVICE_ID_ELECOM_M_DT2DRBK 0x018d
#define USB_DEVICE_ID_ELECOM_M_HT1URBK_010C 0x010c
#define USB_DEVICE_ID_ELECOM_M_HT1URBK_019B 0x019b
#define USB_DEVICE_ID_ELECOM_M_HT1DRBK_010D 0x010d
@@ -834,6 +835,8 @@
#define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6019 0x6019
#define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_602E 0x602e
#define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6093 0x6093
+#define USB_DEVICE_ID_LENOVO_LEGION_GO_DUAL_DINPUT 0x6184
+#define USB_DEVICE_ID_LENOVO_LEGION_GO2_DUAL_DINPUT 0x61ed
#define USB_VENDOR_ID_LETSKETCH 0x6161
#define USB_DEVICE_ID_WP9620N 0x4d15
@@ -907,6 +910,7 @@
#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_2 0xc534
#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1 0xc539
#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_1 0xc53f
+#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_2 0xc543
#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_POWERPLAY 0xc53a
#define USB_DEVICE_ID_LOGITECH_BOLT_RECEIVER 0xc548
#define USB_DEVICE_ID_SPACETRAVELLER 0xc623
@@ -1292,6 +1296,8 @@
#define USB_VENDOR_ID_STEELSERIES 0x1038
#define USB_DEVICE_ID_STEELSERIES_SRWS1 0x1410
+#define USB_DEVICE_ID_STEELSERIES_ARCTIS_1 0x12b6
+#define USB_DEVICE_ID_STEELSERIES_ARCTIS_9 0x12c2
#define USB_VENDOR_ID_SUN 0x0430
#define USB_DEVICE_ID_RARITAN_KVM_DONGLE 0xcdab
diff --git a/drivers/hid/hid-input-test.c b/drivers/hid/hid-input-test.c
index 77c2d45ac62a..6f5c71660d82 100644
--- a/drivers/hid/hid-input-test.c
+++ b/drivers/hid/hid-input-test.c
@@ -7,7 +7,7 @@
#include <kunit/test.h>
-static void hid_test_input_set_battery_charge_status(struct kunit *test)
+static void hid_test_input_update_battery_charge_status(struct kunit *test)
{
struct hid_device *dev;
bool handled;
@@ -15,15 +15,15 @@ static void hid_test_input_set_battery_charge_status(struct kunit *test)
dev = kunit_kzalloc(test, sizeof(*dev), GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
- handled = hidinput_set_battery_charge_status(dev, HID_DG_HEIGHT, 0);
+ handled = hidinput_update_battery_charge_status(dev, HID_DG_HEIGHT, 0);
KUNIT_EXPECT_FALSE(test, handled);
KUNIT_EXPECT_EQ(test, dev->battery_charge_status, POWER_SUPPLY_STATUS_UNKNOWN);
- handled = hidinput_set_battery_charge_status(dev, HID_BAT_CHARGING, 0);
+ handled = hidinput_update_battery_charge_status(dev, HID_BAT_CHARGING, 0);
KUNIT_EXPECT_TRUE(test, handled);
KUNIT_EXPECT_EQ(test, dev->battery_charge_status, POWER_SUPPLY_STATUS_DISCHARGING);
- handled = hidinput_set_battery_charge_status(dev, HID_BAT_CHARGING, 1);
+ handled = hidinput_update_battery_charge_status(dev, HID_BAT_CHARGING, 1);
KUNIT_EXPECT_TRUE(test, handled);
KUNIT_EXPECT_EQ(test, dev->battery_charge_status, POWER_SUPPLY_STATUS_CHARGING);
}
@@ -63,7 +63,7 @@ static void hid_test_input_get_battery_property(struct kunit *test)
}
static struct kunit_case hid_input_tests[] = {
- KUNIT_CASE(hid_test_input_set_battery_charge_status),
+ KUNIT_CASE(hid_test_input_update_battery_charge_status),
KUNIT_CASE(hid_test_input_get_battery_property),
{ }
};
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index ff1784b5c2a4..5d7532d79d21 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -303,6 +303,19 @@ __s32 hidinput_calc_abs_res(const struct hid_field *field, __u16 code)
}
break;
+ case ABS_PRESSURE:
+ case ABS_MT_PRESSURE:
+ if (field->unit == HID_UNIT_NEWTON) {
+ /* Convert to grams, 1 newton is 101.97 grams */
+ prev = physical_extents;
+ physical_extents *= 10197;
+ if (physical_extents < prev)
+ return 0;
+ unit_exponent -= 2;
+ } else if (field->unit != HID_UNIT_GRAM) {
+ return 0;
+ }
+ break;
default:
return 0;
}
@@ -595,13 +608,33 @@ static void hidinput_cleanup_battery(struct hid_device *dev)
dev->battery = NULL;
}
-static void hidinput_update_battery(struct hid_device *dev, int value)
+static bool hidinput_update_battery_charge_status(struct hid_device *dev,
+ unsigned int usage, int value)
+{
+ switch (usage) {
+ case HID_BAT_CHARGING:
+ dev->battery_charge_status = value ?
+ POWER_SUPPLY_STATUS_CHARGING :
+ POWER_SUPPLY_STATUS_DISCHARGING;
+ return true;
+ }
+
+ return false;
+}
+
+static void hidinput_update_battery(struct hid_device *dev, unsigned int usage,
+ int value)
{
int capacity;
if (!dev->battery)
return;
+ if (hidinput_update_battery_charge_status(dev, usage, value)) {
+ power_supply_changed(dev->battery);
+ return;
+ }
+
if (value == 0 || value < dev->battery_min || value > dev->battery_max)
return;
@@ -617,20 +650,6 @@ static void hidinput_update_battery(struct hid_device *dev, int value)
power_supply_changed(dev->battery);
}
}
-
-static bool hidinput_set_battery_charge_status(struct hid_device *dev,
- unsigned int usage, int value)
-{
- switch (usage) {
- case HID_BAT_CHARGING:
- dev->battery_charge_status = value ?
- POWER_SUPPLY_STATUS_CHARGING :
- POWER_SUPPLY_STATUS_DISCHARGING;
- return true;
- }
-
- return false;
-}
#else /* !CONFIG_HID_BATTERY_STRENGTH */
static int hidinput_setup_battery(struct hid_device *dev, unsigned report_type,
struct hid_field *field, bool is_percentage)
@@ -642,14 +661,9 @@ static void hidinput_cleanup_battery(struct hid_device *dev)
{
}
-static void hidinput_update_battery(struct hid_device *dev, int value)
-{
-}
-
-static bool hidinput_set_battery_charge_status(struct hid_device *dev,
- unsigned int usage, int value)
+static void hidinput_update_battery(struct hid_device *dev, unsigned int usage,
+ int value)
{
- return false;
}
#endif /* CONFIG_HID_BATTERY_STRENGTH */
@@ -682,9 +696,10 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
if (field->report_count < 1)
goto ignore;
- /* only LED usages are supported in output fields */
+ /* only LED and HAPTIC usages are supported in output fields */
if (field->report_type == HID_OUTPUT_REPORT &&
- (usage->hid & HID_USAGE_PAGE) != HID_UP_LED) {
+ (usage->hid & HID_USAGE_PAGE) != HID_UP_LED &&
+ (usage->hid & HID_USAGE_PAGE) != HID_UP_HAPTIC) {
goto ignore;
}
@@ -1515,11 +1530,7 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct
return;
if (usage->type == EV_PWR) {
- bool handled = hidinput_set_battery_charge_status(hid, usage->hid, value);
-
- if (!handled)
- hidinput_update_battery(hid, value);
-
+ hidinput_update_battery(hid, usage->hid, value);
return;
}
diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
index b3121fa7a72d..654879814f97 100644
--- a/drivers/hid/hid-lenovo.c
+++ b/drivers/hid/hid-lenovo.c
@@ -32,8 +32,6 @@
#include <linux/leds.h>
#include <linux/workqueue.h>
-#include <linux/platform_profile.h>
-
#include "hid-ids.h"
/* Userspace expects F20 for mic-mute KEY_MICMUTE does not work */
@@ -734,7 +732,7 @@ static int lenovo_raw_event_TP_X12_tab(struct hid_device *hdev, u32 raw_data)
report_key_event(input, KEY_RFKILL);
return 1;
}
- platform_profile_cycle();
+ report_key_event(input, KEY_PERFORMANCE);
return 1;
case TP_X12_RAW_HOTKEY_FN_F10:
/* TAB1 has PICKUP Phone and TAB2 use Snipping tool*/
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index 34fa71ceec2b..cce54dd9884a 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -1983,6 +1983,10 @@ static const struct hid_device_id logi_dj_receivers[] = {
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_1),
.driver_data = recvr_type_gaming_hidpp},
+ { /* Logitech lightspeed receiver (0xc543) */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_2),
+ .driver_data = recvr_type_gaming_hidpp},
{ /* Logitech 27 MHz HID++ 1.0 receiver (0xc513) */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER),
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index 10a3bc5f931b..aaef405a717e 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -4596,6 +4596,8 @@ static const struct hid_device_id hidpp_devices[] = {
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC094) },
{ /* Logitech G Pro X Superlight 2 Gaming Mouse over USB */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC09b) },
+ { /* Logitech G PRO 2 LIGHTSPEED Wireless Mouse over USB */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xc09a) },
{ /* G935 Gaming Headset */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0x0a87),
diff --git a/drivers/hid/hid-mcp2200.c b/drivers/hid/hid-mcp2200.c
index e6ea0a2140eb..dafdd5b4a079 100644
--- a/drivers/hid/hid-mcp2200.c
+++ b/drivers/hid/hid-mcp2200.c
@@ -279,8 +279,8 @@ static const struct gpio_chip template_chip = {
.get_direction = mcp_get_direction,
.direction_input = mcp_direction_input,
.direction_output = mcp_direction_output,
- .set_rv = mcp_set,
- .set_multiple_rv = mcp_set_multiple,
+ .set = mcp_set,
+ .set_multiple = mcp_set_multiple,
.get = mcp_get,
.get_multiple = mcp_get_multiple,
.base = -1,
diff --git a/drivers/hid/hid-mcp2221.c b/drivers/hid/hid-mcp2221.c
index fcfe9370a887..33603b019f97 100644
--- a/drivers/hid/hid-mcp2221.c
+++ b/drivers/hid/hid-mcp2221.c
@@ -906,6 +906,10 @@ static int mcp2221_raw_event(struct hid_device *hdev,
}
if (data[2] == MCP2221_I2C_READ_COMPL ||
data[2] == MCP2221_I2C_READ_PARTIAL) {
+ if (!mcp->rxbuf || mcp->rxbuf_idx < 0 || data[3] > 60) {
+ mcp->status = -EINVAL;
+ break;
+ }
buf = mcp->rxbuf;
memcpy(&buf[mcp->rxbuf_idx], &data[4], data[3]);
mcp->rxbuf_idx = mcp->rxbuf_idx + data[3];
@@ -1298,7 +1302,7 @@ static int mcp2221_probe(struct hid_device *hdev,
mcp->gc->direction_input = mcp_gpio_direction_input;
mcp->gc->direction_output = mcp_gpio_direction_output;
mcp->gc->get_direction = mcp_gpio_get_direction;
- mcp->gc->set_rv = mcp_gpio_set;
+ mcp->gc->set = mcp_gpio_set;
mcp->gc->get = mcp_gpio_get;
mcp->gc->ngpio = MCP_NGPIO;
mcp->gc->base = -1;
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 294516a8f541..2879e65cf303 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -49,6 +49,8 @@ MODULE_LICENSE("GPL");
#include "hid-ids.h"
+#include "hid-haptic.h"
+
/* quirks to control the device */
#define MT_QUIRK_NOT_SEEN_MEANS_UP BIT(0)
#define MT_QUIRK_SLOT_IS_CONTACTID BIT(1)
@@ -168,11 +170,13 @@ struct mt_report_data {
struct mt_device {
struct mt_class mtclass; /* our mt device class */
struct timer_list release_timer; /* to release sticky fingers */
+ struct hid_haptic_device *haptic; /* haptic related configuration */
struct hid_device *hdev; /* hid_device we're attached to */
unsigned long mt_io_flags; /* mt flags (MT_IO_FLAGS_*) */
__u8 inputmode_value; /* InputMode HID feature value */
__u8 maxcontacts;
bool is_buttonpad; /* is this device a button pad? */
+ bool is_haptic_touchpad; /* is this device a haptic touchpad? */
bool serial_maybe; /* need to check for serial protocol */
struct list_head applications;
@@ -533,6 +537,8 @@ static void mt_feature_mapping(struct hid_device *hdev,
mt_get_feature(hdev, field->report);
break;
}
+
+ hid_haptic_feature_mapping(hdev, td->haptic, field, usage);
}
static void set_abs(struct input_dev *input, unsigned int code,
@@ -888,6 +894,9 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
case HID_DG_TIPPRESSURE:
set_abs(hi->input, ABS_MT_PRESSURE, field,
cls->sn_pressure);
+ td->is_haptic_touchpad =
+ hid_haptic_check_pressure_unit(td->haptic,
+ hi, field);
MT_STORE_FIELD(p);
return 1;
case HID_DG_SCANTIME:
@@ -1008,6 +1017,8 @@ static void mt_sync_frame(struct mt_device *td, struct mt_application *app,
app->num_received = 0;
app->left_button_state = 0;
+ if (td->is_haptic_touchpad)
+ hid_haptic_pressure_reset(td->haptic);
if (test_bit(MT_IO_FLAGS_ACTIVE_SLOTS, &td->mt_io_flags))
set_bit(MT_IO_FLAGS_PENDING_SLOTS, &td->mt_io_flags);
@@ -1165,6 +1176,9 @@ static int mt_process_slot(struct mt_device *td, struct input_dev *input,
minor = minor >> 1;
}
+ if (td->is_haptic_touchpad)
+ hid_haptic_pressure_increase(td->haptic, *slot->p);
+
x = hdev->quirks & HID_QUIRK_X_INVERT ?
input_abs_get_max(input, ABS_MT_POSITION_X) - *slot->x :
*slot->x;
@@ -1366,6 +1380,9 @@ static int mt_touch_input_configured(struct hid_device *hdev,
if (cls->is_indirect)
app->mt_flags |= INPUT_MT_POINTER;
+ if (td->is_haptic_touchpad)
+ app->mt_flags |= INPUT_MT_TOTAL_FORCE;
+
if (app->quirks & MT_QUIRK_NOT_SEEN_MEANS_UP)
app->mt_flags |= INPUT_MT_DROP_UNUSED;
@@ -1401,6 +1418,7 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
struct mt_device *td = hid_get_drvdata(hdev);
struct mt_application *application;
struct mt_report_data *rdata;
+ int ret;
rdata = mt_find_report_data(td, field->report);
if (!rdata) {
@@ -1463,6 +1481,11 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
if (field->physical == HID_DG_STYLUS)
hi->application = HID_DG_STYLUS;
+ ret = hid_haptic_input_mapping(hdev, td->haptic, hi, field, usage, bit,
+ max);
+ if (ret != 0)
+ return ret;
+
/* let hid-core decide for the others */
return 0;
}
@@ -1503,6 +1526,14 @@ static const __u8 *mt_report_fixup(struct hid_device *hdev, __u8 *rdesc,
if (hdev->vendor == I2C_VENDOR_ID_GOODIX &&
(hdev->product == I2C_DEVICE_ID_GOODIX_01E8 ||
hdev->product == I2C_DEVICE_ID_GOODIX_01E9)) {
+ if (*size < 608) {
+ dev_info(
+ &hdev->dev,
+ "GT7868Q fixup: report descriptor is only %u bytes, skipping\n",
+ *size);
+ return rdesc;
+ }
+
if (rdesc[607] == 0x15) {
rdesc[607] = 0x25;
dev_info(
@@ -1677,6 +1708,14 @@ static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
struct hid_report *report;
int ret;
+ if (td->is_haptic_touchpad && (td->mtclass.name == MT_CLS_WIN_8 ||
+ td->mtclass.name == MT_CLS_WIN_8_FORCE_MULTI_INPUT)) {
+ if (hid_haptic_input_configured(hdev, td->haptic, hi) == 0)
+ td->is_haptic_touchpad = false;
+ } else {
+ td->is_haptic_touchpad = false;
+ }
+
list_for_each_entry(report, &hi->reports, hidinput_list) {
rdata = mt_find_report_data(td, report);
if (!rdata) {
@@ -1819,6 +1858,11 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
dev_err(&hdev->dev, "cannot allocate multitouch data\n");
return -ENOMEM;
}
+ td->haptic = devm_kzalloc(&hdev->dev, sizeof(*(td->haptic)), GFP_KERNEL);
+ if (!td->haptic)
+ return -ENOMEM;
+
+ td->haptic->hdev = hdev;
td->hdev = hdev;
td->mtclass = *mtclass;
td->inputmode_value = MT_INPUTMODE_TOUCHSCREEN;
@@ -1887,6 +1931,17 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
mt_set_modes(hdev, HID_LATENCY_NORMAL, TOUCHPAD_REPORT_ALL);
+ if (td->is_haptic_touchpad) {
+ if (hid_haptic_init(hdev, &td->haptic)) {
+ dev_warn(&hdev->dev, "Cannot allocate haptic for %s\n",
+ hdev->name);
+ td->is_haptic_touchpad = false;
+ devm_kfree(&hdev->dev, td->haptic);
+ }
+ } else {
+ devm_kfree(&hdev->dev, td->haptic);
+ }
+
return 0;
}
diff --git a/drivers/hid/hid-ntrig.c b/drivers/hid/hid-ntrig.c
index 2738ce947434..0f76e241e0af 100644
--- a/drivers/hid/hid-ntrig.c
+++ b/drivers/hid/hid-ntrig.c
@@ -144,6 +144,9 @@ static void ntrig_report_version(struct hid_device *hdev)
struct usb_device *usb_dev = hid_to_usb_dev(hdev);
unsigned char *data = kmalloc(8, GFP_KERNEL);
+ if (!hid_is_usb(hdev))
+ return;
+
if (!data)
goto err_free;
diff --git a/drivers/hid/hid-playstation.c b/drivers/hid/hid-playstation.c
index 1468fb11e39d..63f6eb9030d1 100644
--- a/drivers/hid/hid-playstation.c
+++ b/drivers/hid/hid-playstation.c
@@ -5,7 +5,9 @@
* Copyright (c) 2020-2022 Sony Interactive Entertainment
*/
+#include <linux/bitfield.h>
#include <linux/bits.h>
+#include <linux/cleanup.h>
#include <linux/crc32.h>
#include <linux/device.h>
#include <linux/hid.h>
@@ -36,19 +38,19 @@ enum PS_TYPE {
struct ps_device {
struct list_head list;
struct hid_device *hdev;
- spinlock_t lock;
+ spinlock_t lock; /* Sync between event handler and workqueue */
- uint32_t player_id;
+ u32 player_id;
struct power_supply_desc battery_desc;
struct power_supply *battery;
- uint8_t battery_capacity;
+ u8 battery_capacity;
int battery_status;
const char *input_dev_name; /* Name of primary input device. */
- uint8_t mac_address[6]; /* Note: stored in little endian order. */
- uint32_t hw_version;
- uint32_t fw_version;
+ u8 mac_address[6]; /* Note: stored in little endian order. */
+ u32 hw_version;
+ u32 fw_version;
int (*parse_report)(struct ps_device *dev, struct hid_report *report, u8 *data, int size);
void (*remove)(struct ps_device *dev);
@@ -110,41 +112,62 @@ struct ps_led_info {
#define DS_BUTTONS2_TOUCHPAD BIT(1)
#define DS_BUTTONS2_MIC_MUTE BIT(2)
-/* Status field of DualSense input report. */
-#define DS_STATUS_BATTERY_CAPACITY GENMASK(3, 0)
-#define DS_STATUS_CHARGING GENMASK(7, 4)
-#define DS_STATUS_CHARGING_SHIFT 4
+/* Status fields of DualSense input report. */
+#define DS_STATUS0_BATTERY_CAPACITY GENMASK(3, 0)
+#define DS_STATUS0_CHARGING GENMASK(7, 4)
+#define DS_STATUS1_HP_DETECT BIT(0)
+#define DS_STATUS1_MIC_DETECT BIT(1)
+#define DS_STATUS1_JACK_DETECT (DS_STATUS1_HP_DETECT | DS_STATUS1_MIC_DETECT)
+#define DS_STATUS1_MIC_MUTE BIT(2)
/* Feature version from DualSense Firmware Info report. */
-#define DS_FEATURE_VERSION(major, minor) ((major & 0xff) << 8 | (minor & 0xff))
-
+#define DS_FEATURE_VERSION_MINOR GENMASK(7, 0)
+#define DS_FEATURE_VERSION_MAJOR GENMASK(15, 8)
+#define DS_FEATURE_VERSION(major, minor) (FIELD_PREP(DS_FEATURE_VERSION_MAJOR, major) | \
+ FIELD_PREP(DS_FEATURE_VERSION_MINOR, minor))
/*
* Status of a DualSense touch point contact.
* Contact IDs, with highest bit set are 'inactive'
* and any associated data is then invalid.
*/
-#define DS_TOUCH_POINT_INACTIVE BIT(7)
+#define DS_TOUCH_POINT_INACTIVE BIT(7)
+#define DS_TOUCH_POINT_X_LO GENMASK(7, 0)
+#define DS_TOUCH_POINT_X_HI GENMASK(11, 8)
+#define DS_TOUCH_POINT_X(hi, lo) (FIELD_PREP(DS_TOUCH_POINT_X_HI, hi) | \
+ FIELD_PREP(DS_TOUCH_POINT_X_LO, lo))
+#define DS_TOUCH_POINT_Y_LO GENMASK(3, 0)
+#define DS_TOUCH_POINT_Y_HI GENMASK(11, 4)
+#define DS_TOUCH_POINT_Y(hi, lo) (FIELD_PREP(DS_TOUCH_POINT_Y_HI, hi) | \
+ FIELD_PREP(DS_TOUCH_POINT_Y_LO, lo))
/* Magic value required in tag field of Bluetooth output report. */
-#define DS_OUTPUT_TAG 0x10
+#define DS_OUTPUT_TAG 0x10
+#define DS_OUTPUT_SEQ_TAG GENMASK(3, 0)
+#define DS_OUTPUT_SEQ_NO GENMASK(7, 4)
/* Flags for DualSense output report. */
-#define DS_OUTPUT_VALID_FLAG0_COMPATIBLE_VIBRATION BIT(0)
-#define DS_OUTPUT_VALID_FLAG0_HAPTICS_SELECT BIT(1)
-#define DS_OUTPUT_VALID_FLAG1_MIC_MUTE_LED_CONTROL_ENABLE BIT(0)
-#define DS_OUTPUT_VALID_FLAG1_POWER_SAVE_CONTROL_ENABLE BIT(1)
-#define DS_OUTPUT_VALID_FLAG1_LIGHTBAR_CONTROL_ENABLE BIT(2)
-#define DS_OUTPUT_VALID_FLAG1_RELEASE_LEDS BIT(3)
-#define DS_OUTPUT_VALID_FLAG1_PLAYER_INDICATOR_CONTROL_ENABLE BIT(4)
-#define DS_OUTPUT_VALID_FLAG2_LIGHTBAR_SETUP_CONTROL_ENABLE BIT(1)
-#define DS_OUTPUT_VALID_FLAG2_COMPATIBLE_VIBRATION2 BIT(2)
-#define DS_OUTPUT_POWER_SAVE_CONTROL_MIC_MUTE BIT(4)
-#define DS_OUTPUT_LIGHTBAR_SETUP_LIGHT_OUT BIT(1)
+#define DS_OUTPUT_VALID_FLAG0_COMPATIBLE_VIBRATION BIT(0)
+#define DS_OUTPUT_VALID_FLAG0_HAPTICS_SELECT BIT(1)
+#define DS_OUTPUT_VALID_FLAG0_SPEAKER_VOLUME_ENABLE BIT(5)
+#define DS_OUTPUT_VALID_FLAG0_MIC_VOLUME_ENABLE BIT(6)
+#define DS_OUTPUT_VALID_FLAG0_AUDIO_CONTROL_ENABLE BIT(7)
+#define DS_OUTPUT_VALID_FLAG1_MIC_MUTE_LED_CONTROL_ENABLE BIT(0)
+#define DS_OUTPUT_VALID_FLAG1_POWER_SAVE_CONTROL_ENABLE BIT(1)
+#define DS_OUTPUT_VALID_FLAG1_LIGHTBAR_CONTROL_ENABLE BIT(2)
+#define DS_OUTPUT_VALID_FLAG1_RELEASE_LEDS BIT(3)
+#define DS_OUTPUT_VALID_FLAG1_PLAYER_INDICATOR_CONTROL_ENABLE BIT(4)
+#define DS_OUTPUT_VALID_FLAG1_AUDIO_CONTROL2_ENABLE BIT(7)
+#define DS_OUTPUT_VALID_FLAG2_LIGHTBAR_SETUP_CONTROL_ENABLE BIT(1)
+#define DS_OUTPUT_VALID_FLAG2_COMPATIBLE_VIBRATION2 BIT(2)
+#define DS_OUTPUT_AUDIO_FLAGS_OUTPUT_PATH_SEL GENMASK(5, 4)
+#define DS_OUTPUT_AUDIO_FLAGS2_SP_PREAMP_GAIN GENMASK(2, 0)
+#define DS_OUTPUT_POWER_SAVE_CONTROL_MIC_MUTE BIT(4)
+#define DS_OUTPUT_LIGHTBAR_SETUP_LIGHT_OUT BIT(1)
/* DualSense hardware limits */
#define DS_ACC_RES_PER_G 8192
-#define DS_ACC_RANGE (4*DS_ACC_RES_PER_G)
+#define DS_ACC_RANGE (4 * DS_ACC_RES_PER_G)
#define DS_GYRO_RES_PER_DEG_S 1024
-#define DS_GYRO_RANGE (2048*DS_GYRO_RES_PER_DEG_S)
+#define DS_GYRO_RANGE (2048 * DS_GYRO_RES_PER_DEG_S)
#define DS_TOUCHPAD_WIDTH 1920
#define DS_TOUCHPAD_HEIGHT 1080
@@ -153,9 +176,10 @@ struct dualsense {
struct input_dev *gamepad;
struct input_dev *sensors;
struct input_dev *touchpad;
+ struct input_dev *jack;
/* Update version is used as a feature/capability version. */
- uint16_t update_version;
+ u16 update_version;
/* Calibration data for accelerometer and gyroscope. */
struct ps_calibration_data accel_calib_data[3];
@@ -163,21 +187,26 @@ struct dualsense {
/* Timestamp for sensor data */
bool sensor_timestamp_initialized;
- uint32_t prev_sensor_timestamp;
- uint32_t sensor_timestamp_us;
+ u32 prev_sensor_timestamp;
+ u32 sensor_timestamp_us;
/* Compatible rumble state */
bool use_vibration_v2;
bool update_rumble;
- uint8_t motor_left;
- uint8_t motor_right;
+ u8 motor_left;
+ u8 motor_right;
/* RGB lightbar */
struct led_classdev_mc lightbar;
bool update_lightbar;
- uint8_t lightbar_red;
- uint8_t lightbar_green;
- uint8_t lightbar_blue;
+ u8 lightbar_red;
+ u8 lightbar_green;
+ u8 lightbar_blue;
+
+ /* Audio Jack plugged state */
+ u8 plugged_state;
+ u8 prev_plugged_state;
+ bool prev_plugged_state_valid;
/* Microphone */
bool update_mic_mute;
@@ -186,90 +215,94 @@ struct dualsense {
/* Player leds */
bool update_player_leds;
- uint8_t player_leds_state;
+ u8 player_leds_state;
struct led_classdev player_leds[5];
struct work_struct output_worker;
bool output_worker_initialized;
void *output_report_dmabuf;
- uint8_t output_seq; /* Sequence number for output report. */
+ u8 output_seq; /* Sequence number for output report. */
};
struct dualsense_touch_point {
- uint8_t contact;
- uint8_t x_lo;
- uint8_t x_hi:4, y_lo:4;
- uint8_t y_hi;
+ u8 contact;
+ u8 x_lo;
+ u8 x_hi:4, y_lo:4;
+ u8 y_hi;
} __packed;
static_assert(sizeof(struct dualsense_touch_point) == 4);
/* Main DualSense input report excluding any BT/USB specific headers. */
struct dualsense_input_report {
- uint8_t x, y;
- uint8_t rx, ry;
- uint8_t z, rz;
- uint8_t seq_number;
- uint8_t buttons[4];
- uint8_t reserved[4];
+ u8 x, y;
+ u8 rx, ry;
+ u8 z, rz;
+ u8 seq_number;
+ u8 buttons[4];
+ u8 reserved[4];
/* Motion sensors */
__le16 gyro[3]; /* x, y, z */
__le16 accel[3]; /* x, y, z */
__le32 sensor_timestamp;
- uint8_t reserved2;
+ u8 reserved2;
/* Touchpad */
struct dualsense_touch_point points[2];
- uint8_t reserved3[12];
- uint8_t status;
- uint8_t reserved4[10];
+ u8 reserved3[12];
+ u8 status[3];
+ u8 reserved4[8];
} __packed;
/* Common input report size shared equals the size of the USB report minus 1 byte for ReportID. */
static_assert(sizeof(struct dualsense_input_report) == DS_INPUT_REPORT_USB_SIZE - 1);
/* Common data between DualSense BT/USB main output report. */
struct dualsense_output_report_common {
- uint8_t valid_flag0;
- uint8_t valid_flag1;
+ u8 valid_flag0;
+ u8 valid_flag1;
/* For DualShock 4 compatibility mode. */
- uint8_t motor_right;
- uint8_t motor_left;
+ u8 motor_right;
+ u8 motor_left;
/* Audio controls */
- uint8_t reserved[4];
- uint8_t mute_button_led;
+ u8 headphone_volume; /* 0x0 - 0x7f */
+ u8 speaker_volume; /* 0x0 - 0xff */
+ u8 mic_volume; /* 0x0 - 0x40 */
+ u8 audio_control;
+ u8 mute_button_led;
- uint8_t power_save_control;
- uint8_t reserved2[28];
+ u8 power_save_control;
+ u8 reserved2[27];
+ u8 audio_control2;
/* LEDs and lightbar */
- uint8_t valid_flag2;
- uint8_t reserved3[2];
- uint8_t lightbar_setup;
- uint8_t led_brightness;
- uint8_t player_leds;
- uint8_t lightbar_red;
- uint8_t lightbar_green;
- uint8_t lightbar_blue;
+ u8 valid_flag2;
+ u8 reserved3[2];
+ u8 lightbar_setup;
+ u8 led_brightness;
+ u8 player_leds;
+ u8 lightbar_red;
+ u8 lightbar_green;
+ u8 lightbar_blue;
} __packed;
static_assert(sizeof(struct dualsense_output_report_common) == 47);
struct dualsense_output_report_bt {
- uint8_t report_id; /* 0x31 */
- uint8_t seq_tag;
- uint8_t tag;
+ u8 report_id; /* 0x31 */
+ u8 seq_tag;
+ u8 tag;
struct dualsense_output_report_common common;
- uint8_t reserved[24];
+ u8 reserved[24];
__le32 crc32;
} __packed;
static_assert(sizeof(struct dualsense_output_report_bt) == DS_OUTPUT_REPORT_BT_SIZE);
struct dualsense_output_report_usb {
- uint8_t report_id; /* 0x02 */
+ u8 report_id; /* 0x02 */
struct dualsense_output_report_common common;
- uint8_t reserved[15];
+ u8 reserved[15];
} __packed;
static_assert(sizeof(struct dualsense_output_report_usb) == DS_OUTPUT_REPORT_USB_SIZE);
@@ -279,8 +312,8 @@ static_assert(sizeof(struct dualsense_output_report_usb) == DS_OUTPUT_REPORT_USB
* This structure hide the differences between the two to simplify sending output reports.
*/
struct dualsense_output_report {
- uint8_t *data; /* Start of data */
- uint8_t len; /* Size of output report */
+ u8 *data; /* Start of data */
+ u8 len; /* Size of output report */
/* Points to Bluetooth data payload in case for a Bluetooth report else NULL. */
struct dualsense_output_report_bt *bt;
@@ -315,7 +348,9 @@ struct dualsense_output_report {
* Contact IDs, with highest bit set are 'inactive'
* and any associated data is then invalid.
*/
-#define DS4_TOUCH_POINT_INACTIVE BIT(7)
+#define DS4_TOUCH_POINT_INACTIVE BIT(7)
+#define DS4_TOUCH_POINT_X(hi, lo) DS_TOUCH_POINT_X(hi, lo)
+#define DS4_TOUCH_POINT_Y(hi, lo) DS_TOUCH_POINT_Y(hi, lo)
/* Status field of DualShock4 input report. */
#define DS4_STATUS0_BATTERY_CAPACITY GENMASK(3, 0)
@@ -323,7 +358,7 @@ struct dualsense_output_report {
/* Battery status within batery_status field. */
#define DS4_BATTERY_STATUS_FULL 11
/* Status1 bit2 contains dongle connection state:
- * 0 = connectd
+ * 0 = connected
* 1 = disconnected
*/
#define DS4_STATUS1_DONGLE_STATE BIT(2)
@@ -349,9 +384,9 @@ struct dualsense_output_report {
/* DualShock4 hardware limits */
#define DS4_ACC_RES_PER_G 8192
-#define DS4_ACC_RANGE (4*DS_ACC_RES_PER_G)
+#define DS4_ACC_RANGE (4 * DS_ACC_RES_PER_G)
#define DS4_GYRO_RES_PER_DEG_S 1024
-#define DS4_GYRO_RANGE (2048*DS_GYRO_RES_PER_DEG_S)
+#define DS4_GYRO_RANGE (2048 * DS_GYRO_RES_PER_DEG_S)
#define DS4_LIGHTBAR_MAX_BLINK 255 /* 255 centiseconds */
#define DS4_TOUCHPAD_WIDTH 1920
#define DS4_TOUCHPAD_HEIGHT 942
@@ -380,26 +415,26 @@ struct dualshock4 {
/* Timestamp for sensor data */
bool sensor_timestamp_initialized;
- uint32_t prev_sensor_timestamp;
- uint32_t sensor_timestamp_us;
+ u32 prev_sensor_timestamp;
+ u32 sensor_timestamp_us;
/* Bluetooth poll interval */
bool update_bt_poll_interval;
- uint8_t bt_poll_interval;
+ u8 bt_poll_interval;
bool update_rumble;
- uint8_t motor_left;
- uint8_t motor_right;
+ u8 motor_left;
+ u8 motor_right;
/* Lightbar leds */
bool update_lightbar;
bool update_lightbar_blink;
bool lightbar_enabled; /* For use by global LED control. */
- uint8_t lightbar_red;
- uint8_t lightbar_green;
- uint8_t lightbar_blue;
- uint8_t lightbar_blink_on; /* In increments of 10ms. */
- uint8_t lightbar_blink_off; /* In increments of 10ms. */
+ u8 lightbar_red;
+ u8 lightbar_green;
+ u8 lightbar_blue;
+ u8 lightbar_blink_on; /* In increments of 10ms. */
+ u8 lightbar_blink_off; /* In increments of 10ms. */
struct led_classdev lightbar_leds[4];
struct work_struct output_worker;
@@ -408,88 +443,88 @@ struct dualshock4 {
};
struct dualshock4_touch_point {
- uint8_t contact;
- uint8_t x_lo;
- uint8_t x_hi:4, y_lo:4;
- uint8_t y_hi;
+ u8 contact;
+ u8 x_lo;
+ u8 x_hi:4, y_lo:4;
+ u8 y_hi;
} __packed;
static_assert(sizeof(struct dualshock4_touch_point) == 4);
struct dualshock4_touch_report {
- uint8_t timestamp;
+ u8 timestamp;
struct dualshock4_touch_point points[2];
} __packed;
static_assert(sizeof(struct dualshock4_touch_report) == 9);
/* Main DualShock4 input report excluding any BT/USB specific headers. */
struct dualshock4_input_report_common {
- uint8_t x, y;
- uint8_t rx, ry;
- uint8_t buttons[3];
- uint8_t z, rz;
+ u8 x, y;
+ u8 rx, ry;
+ u8 buttons[3];
+ u8 z, rz;
/* Motion sensors */
__le16 sensor_timestamp;
- uint8_t sensor_temperature;
+ u8 sensor_temperature;
__le16 gyro[3]; /* x, y, z */
__le16 accel[3]; /* x, y, z */
- uint8_t reserved2[5];
+ u8 reserved2[5];
- uint8_t status[2];
- uint8_t reserved3;
+ u8 status[2];
+ u8 reserved3;
} __packed;
static_assert(sizeof(struct dualshock4_input_report_common) == 32);
struct dualshock4_input_report_usb {
- uint8_t report_id; /* 0x01 */
+ u8 report_id; /* 0x01 */
struct dualshock4_input_report_common common;
- uint8_t num_touch_reports;
+ u8 num_touch_reports;
struct dualshock4_touch_report touch_reports[3];
- uint8_t reserved[3];
+ u8 reserved[3];
} __packed;
static_assert(sizeof(struct dualshock4_input_report_usb) == DS4_INPUT_REPORT_USB_SIZE);
struct dualshock4_input_report_bt {
- uint8_t report_id; /* 0x11 */
- uint8_t reserved[2];
+ u8 report_id; /* 0x11 */
+ u8 reserved[2];
struct dualshock4_input_report_common common;
- uint8_t num_touch_reports;
+ u8 num_touch_reports;
struct dualshock4_touch_report touch_reports[4]; /* BT has 4 compared to 3 for USB */
- uint8_t reserved2[2];
+ u8 reserved2[2];
__le32 crc32;
} __packed;
static_assert(sizeof(struct dualshock4_input_report_bt) == DS4_INPUT_REPORT_BT_SIZE);
/* Common data between Bluetooth and USB DualShock4 output reports. */
struct dualshock4_output_report_common {
- uint8_t valid_flag0;
- uint8_t valid_flag1;
+ u8 valid_flag0;
+ u8 valid_flag1;
- uint8_t reserved;
+ u8 reserved;
- uint8_t motor_right;
- uint8_t motor_left;
+ u8 motor_right;
+ u8 motor_left;
- uint8_t lightbar_red;
- uint8_t lightbar_green;
- uint8_t lightbar_blue;
- uint8_t lightbar_blink_on;
- uint8_t lightbar_blink_off;
+ u8 lightbar_red;
+ u8 lightbar_green;
+ u8 lightbar_blue;
+ u8 lightbar_blink_on;
+ u8 lightbar_blink_off;
} __packed;
struct dualshock4_output_report_usb {
- uint8_t report_id; /* 0x5 */
+ u8 report_id; /* 0x5 */
struct dualshock4_output_report_common common;
- uint8_t reserved[21];
+ u8 reserved[21];
} __packed;
static_assert(sizeof(struct dualshock4_output_report_usb) == DS4_OUTPUT_REPORT_USB_SIZE);
struct dualshock4_output_report_bt {
- uint8_t report_id; /* 0x11 */
- uint8_t hw_control;
- uint8_t audio_control;
+ u8 report_id; /* 0x11 */
+ u8 hw_control;
+ u8 audio_control;
struct dualshock4_output_report_common common;
- uint8_t reserved[61];
+ u8 reserved[61];
__le32 crc32;
} __packed;
static_assert(sizeof(struct dualshock4_output_report_bt) == DS4_OUTPUT_REPORT_BT_SIZE);
@@ -500,8 +535,8 @@ static_assert(sizeof(struct dualshock4_output_report_bt) == DS4_OUTPUT_REPORT_BT
* This structure hide the differences between the two to simplify sending output reports.
*/
struct dualshock4_output_report {
- uint8_t *data; /* Start of data */
- uint8_t len; /* Size of output report */
+ u8 *data; /* Start of data */
+ u8 len; /* Size of output report */
/* Points to Bluetooth data payload in case for a Bluetooth report else NULL. */
struct dualshock4_output_report_bt *bt;
@@ -540,7 +575,7 @@ static const struct {int x; int y; } ps_gamepad_hat_mapping[] = {
static int dualshock4_get_calibration_data(struct dualshock4 *ds4);
static inline void dualsense_schedule_work(struct dualsense *ds);
static inline void dualshock4_schedule_work(struct dualshock4 *ds4);
-static void dualsense_set_lightbar(struct dualsense *ds, uint8_t red, uint8_t green, uint8_t blue);
+static void dualsense_set_lightbar(struct dualsense *ds, u8 red, u8 green, u8 blue);
static void dualshock4_set_default_lightbar_colors(struct dualshock4 *ds4);
/*
@@ -552,26 +587,25 @@ static int ps_devices_list_add(struct ps_device *dev)
{
struct ps_device *entry;
- mutex_lock(&ps_devices_lock);
+ guard(mutex)(&ps_devices_lock);
+
list_for_each_entry(entry, &ps_devices_list, list) {
if (!memcmp(entry->mac_address, dev->mac_address, sizeof(dev->mac_address))) {
hid_err(dev->hdev, "Duplicate device found for MAC address %pMR.\n",
- dev->mac_address);
- mutex_unlock(&ps_devices_lock);
+ dev->mac_address);
return -EEXIST;
}
}
list_add_tail(&dev->list, &ps_devices_list);
- mutex_unlock(&ps_devices_lock);
return 0;
}
static int ps_devices_list_remove(struct ps_device *dev)
{
- mutex_lock(&ps_devices_lock);
+ guard(mutex)(&ps_devices_lock);
+
list_del(&dev->list);
- mutex_unlock(&ps_devices_lock);
return 0;
}
@@ -593,7 +627,8 @@ static void ps_device_release_player_id(struct ps_device *dev)
dev->player_id = U32_MAX;
}
-static struct input_dev *ps_allocate_input_dev(struct hid_device *hdev, const char *name_suffix)
+static struct input_dev *ps_allocate_input_dev(struct hid_device *hdev,
+ const char *name_suffix)
{
struct input_dev *input_dev;
@@ -608,8 +643,8 @@ static struct input_dev *ps_allocate_input_dev(struct hid_device *hdev, const ch
input_dev->uniq = hdev->uniq;
if (name_suffix) {
- input_dev->name = devm_kasprintf(&hdev->dev, GFP_KERNEL, "%s %s", hdev->name,
- name_suffix);
+ input_dev->name = devm_kasprintf(&hdev->dev, GFP_KERNEL, "%s %s",
+ hdev->name, name_suffix);
if (!input_dev->name)
return ERR_PTR(-ENOMEM);
} else {
@@ -629,19 +664,18 @@ static enum power_supply_property ps_power_supply_props[] = {
};
static int ps_battery_get_property(struct power_supply *psy,
- enum power_supply_property psp,
- union power_supply_propval *val)
+ enum power_supply_property psp,
+ union power_supply_propval *val)
{
struct ps_device *dev = power_supply_get_drvdata(psy);
- uint8_t battery_capacity;
+ u8 battery_capacity;
int battery_status;
- unsigned long flags;
int ret = 0;
- spin_lock_irqsave(&dev->lock, flags);
- battery_capacity = dev->battery_capacity;
- battery_status = dev->battery_status;
- spin_unlock_irqrestore(&dev->lock, flags);
+ scoped_guard(spinlock_irqsave, &dev->lock) {
+ battery_capacity = dev->battery_capacity;
+ battery_status = dev->battery_status;
+ }
switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
@@ -675,7 +709,7 @@ static int ps_device_register_battery(struct ps_device *dev)
dev->battery_desc.num_properties = ARRAY_SIZE(ps_power_supply_props);
dev->battery_desc.get_property = ps_battery_get_property;
dev->battery_desc.name = devm_kasprintf(&dev->hdev->dev, GFP_KERNEL,
- "ps-controller-battery-%pMR", dev->mac_address);
+ "ps-controller-battery-%pMR", dev->mac_address);
if (!dev->battery_desc.name)
return -ENOMEM;
@@ -697,9 +731,9 @@ static int ps_device_register_battery(struct ps_device *dev)
}
/* Compute crc32 of HID data and compare against expected CRC. */
-static bool ps_check_crc32(uint8_t seed, uint8_t *data, size_t len, uint32_t report_crc)
+static bool ps_check_crc32(u8 seed, u8 *data, size_t len, u32 report_crc)
{
- uint32_t crc;
+ u32 crc;
crc = crc32_le(0xFFFFFFFF, &seed, 1);
crc = ~crc32_le(crc, data, len);
@@ -707,8 +741,9 @@ static bool ps_check_crc32(uint8_t seed, uint8_t *data, size_t len, uint32_t rep
return crc == report_crc;
}
-static struct input_dev *ps_gamepad_create(struct hid_device *hdev,
- int (*play_effect)(struct input_dev *, void *, struct ff_effect *))
+static struct input_dev *
+ps_gamepad_create(struct hid_device *hdev,
+ int (*play_effect)(struct input_dev *, void *, struct ff_effect *))
{
struct input_dev *gamepad;
unsigned int i;
@@ -745,8 +780,8 @@ static struct input_dev *ps_gamepad_create(struct hid_device *hdev,
return gamepad;
}
-static int ps_get_report(struct hid_device *hdev, uint8_t report_id, uint8_t *buf, size_t size,
- bool check_crc)
+static int ps_get_report(struct hid_device *hdev, u8 report_id, u8 *buf,
+ size_t size, bool check_crc)
{
int ret;
@@ -769,8 +804,8 @@ static int ps_get_report(struct hid_device *hdev, uint8_t report_id, uint8_t *bu
if (hdev->bus == BUS_BLUETOOTH && check_crc) {
/* Last 4 bytes contains crc32. */
- uint8_t crc_offset = size - 4;
- uint32_t report_crc = get_unaligned_le32(&buf[crc_offset]);
+ u8 crc_offset = size - 4;
+ u32 report_crc = get_unaligned_le32(&buf[crc_offset]);
if (!ps_check_crc32(PS_FEATURE_CRC32_SEED, buf, crc_offset, report_crc)) {
hid_err(hdev, "CRC check failed for reportID=%d\n", report_id);
@@ -782,17 +817,20 @@ static int ps_get_report(struct hid_device *hdev, uint8_t report_id, uint8_t *bu
}
static int ps_led_register(struct ps_device *ps_dev, struct led_classdev *led,
- const struct ps_led_info *led_info)
+ const struct ps_led_info *led_info)
{
int ret;
if (led_info->name) {
- led->name = devm_kasprintf(&ps_dev->hdev->dev, GFP_KERNEL,
- "%s:%s:%s", ps_dev->input_dev_name, led_info->color, led_info->name);
+ led->name = devm_kasprintf(&ps_dev->hdev->dev, GFP_KERNEL, "%s:%s:%s",
+ ps_dev->input_dev_name, led_info->color,
+ led_info->name);
} else {
- /* Backwards compatible mode for hid-sony, but not compliant with LED class spec. */
- led->name = devm_kasprintf(&ps_dev->hdev->dev, GFP_KERNEL,
- "%s:%s", ps_dev->input_dev_name, led_info->color);
+ /* Backwards compatible mode for hid-sony, but not compliant
+ * with LED class spec.
+ */
+ led->name = devm_kasprintf(&ps_dev->hdev->dev, GFP_KERNEL, "%s:%s",
+ ps_dev->input_dev_name, led_info->color);
}
if (!led->name)
@@ -816,7 +854,7 @@ static int ps_led_register(struct ps_device *ps_dev, struct led_classdev *led,
/* Register a DualSense/DualShock4 RGB lightbar represented by a multicolor LED. */
static int ps_lightbar_register(struct ps_device *ps_dev, struct led_classdev_mc *lightbar_mc_dev,
- int (*brightness_set)(struct led_classdev *, enum led_brightness))
+ int (*brightness_set)(struct led_classdev *, enum led_brightness))
{
struct hid_device *hdev = ps_dev->hdev;
struct mc_subled *mc_led_info;
@@ -837,7 +875,7 @@ static int ps_lightbar_register(struct ps_device *ps_dev, struct led_classdev_mc
led_cdev = &lightbar_mc_dev->led_cdev;
led_cdev->name = devm_kasprintf(&hdev->dev, GFP_KERNEL, "%s:rgb:indicator",
- ps_dev->input_dev_name);
+ ps_dev->input_dev_name);
if (!led_cdev->name)
return -ENOMEM;
led_cdev->brightness = 255;
@@ -853,8 +891,8 @@ static int ps_lightbar_register(struct ps_device *ps_dev, struct led_classdev_mc
return 0;
}
-static struct input_dev *ps_sensors_create(struct hid_device *hdev, int accel_range, int accel_res,
- int gyro_range, int gyro_res)
+static struct input_dev *ps_sensors_create(struct hid_device *hdev, int accel_range,
+ int accel_res, int gyro_range, int gyro_res)
{
struct input_dev *sensors;
int ret;
@@ -890,8 +928,8 @@ static struct input_dev *ps_sensors_create(struct hid_device *hdev, int accel_ra
return sensors;
}
-static struct input_dev *ps_touchpad_create(struct hid_device *hdev, int width, int height,
- unsigned int num_contacts)
+static struct input_dev *ps_touchpad_create(struct hid_device *hdev, int width,
+ int height, unsigned int num_contacts)
{
struct input_dev *touchpad;
int ret;
@@ -918,9 +956,27 @@ static struct input_dev *ps_touchpad_create(struct hid_device *hdev, int width,
return touchpad;
}
+static struct input_dev *ps_headset_jack_create(struct hid_device *hdev)
+{
+ struct input_dev *jack;
+ int ret;
+
+ jack = ps_allocate_input_dev(hdev, "Headset Jack");
+ if (IS_ERR(jack))
+ return ERR_CAST(jack);
+
+ input_set_capability(jack, EV_SW, SW_HEADPHONE_INSERT);
+ input_set_capability(jack, EV_SW, SW_MICROPHONE_INSERT);
+
+ ret = input_register_device(jack);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return jack;
+}
+
static ssize_t firmware_version_show(struct device *dev,
- struct device_attribute
- *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct hid_device *hdev = to_hid_device(dev);
struct ps_device *ps_dev = hid_get_drvdata(hdev);
@@ -931,8 +987,7 @@ static ssize_t firmware_version_show(struct device *dev,
static DEVICE_ATTR_RO(firmware_version);
static ssize_t hardware_version_show(struct device *dev,
- struct device_attribute
- *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct hid_device *hdev = to_hid_device(dev);
struct ps_device *ps_dev = hid_get_drvdata(hdev);
@@ -963,14 +1018,14 @@ static int dualsense_get_calibration_data(struct dualsense *ds)
int range_2g;
int ret = 0;
int i;
- uint8_t *buf;
+ u8 *buf;
buf = kzalloc(DS_FEATURE_REPORT_CALIBRATION_SIZE, GFP_KERNEL);
if (!buf)
return -ENOMEM;
ret = ps_get_report(ds->base.hdev, DS_FEATURE_REPORT_CALIBRATION, buf,
- DS_FEATURE_REPORT_CALIBRATION_SIZE, true);
+ DS_FEATURE_REPORT_CALIBRATION_SIZE, true);
if (ret) {
hid_err(ds->base.hdev, "Failed to retrieve DualSense calibration info: %d\n", ret);
goto err_free;
@@ -1001,19 +1056,19 @@ static int dualsense_get_calibration_data(struct dualsense *ds)
speed_2x = (gyro_speed_plus + gyro_speed_minus);
ds->gyro_calib_data[0].abs_code = ABS_RX;
ds->gyro_calib_data[0].bias = 0;
- ds->gyro_calib_data[0].sens_numer = speed_2x*DS_GYRO_RES_PER_DEG_S;
+ ds->gyro_calib_data[0].sens_numer = speed_2x * DS_GYRO_RES_PER_DEG_S;
ds->gyro_calib_data[0].sens_denom = abs(gyro_pitch_plus - gyro_pitch_bias) +
abs(gyro_pitch_minus - gyro_pitch_bias);
ds->gyro_calib_data[1].abs_code = ABS_RY;
ds->gyro_calib_data[1].bias = 0;
- ds->gyro_calib_data[1].sens_numer = speed_2x*DS_GYRO_RES_PER_DEG_S;
+ ds->gyro_calib_data[1].sens_numer = speed_2x * DS_GYRO_RES_PER_DEG_S;
ds->gyro_calib_data[1].sens_denom = abs(gyro_yaw_plus - gyro_yaw_bias) +
abs(gyro_yaw_minus - gyro_yaw_bias);
ds->gyro_calib_data[2].abs_code = ABS_RZ;
ds->gyro_calib_data[2].bias = 0;
- ds->gyro_calib_data[2].sens_numer = speed_2x*DS_GYRO_RES_PER_DEG_S;
+ ds->gyro_calib_data[2].sens_numer = speed_2x * DS_GYRO_RES_PER_DEG_S;
ds->gyro_calib_data[2].sens_denom = abs(gyro_roll_plus - gyro_roll_bias) +
abs(gyro_roll_minus - gyro_roll_bias);
@@ -1024,8 +1079,9 @@ static int dualsense_get_calibration_data(struct dualsense *ds)
*/
for (i = 0; i < ARRAY_SIZE(ds->gyro_calib_data); i++) {
if (ds->gyro_calib_data[i].sens_denom == 0) {
- hid_warn(hdev, "Invalid gyro calibration data for axis (%d), disabling calibration.",
- ds->gyro_calib_data[i].abs_code);
+ hid_warn(hdev,
+ "Invalid gyro calibration data for axis (%d), disabling calibration.",
+ ds->gyro_calib_data[i].abs_code);
ds->gyro_calib_data[i].bias = 0;
ds->gyro_calib_data[i].sens_numer = DS_GYRO_RANGE;
ds->gyro_calib_data[i].sens_denom = S16_MAX;
@@ -1039,19 +1095,19 @@ static int dualsense_get_calibration_data(struct dualsense *ds)
range_2g = acc_x_plus - acc_x_minus;
ds->accel_calib_data[0].abs_code = ABS_X;
ds->accel_calib_data[0].bias = acc_x_plus - range_2g / 2;
- ds->accel_calib_data[0].sens_numer = 2*DS_ACC_RES_PER_G;
+ ds->accel_calib_data[0].sens_numer = 2 * DS_ACC_RES_PER_G;
ds->accel_calib_data[0].sens_denom = range_2g;
range_2g = acc_y_plus - acc_y_minus;
ds->accel_calib_data[1].abs_code = ABS_Y;
ds->accel_calib_data[1].bias = acc_y_plus - range_2g / 2;
- ds->accel_calib_data[1].sens_numer = 2*DS_ACC_RES_PER_G;
+ ds->accel_calib_data[1].sens_numer = 2 * DS_ACC_RES_PER_G;
ds->accel_calib_data[1].sens_denom = range_2g;
range_2g = acc_z_plus - acc_z_minus;
ds->accel_calib_data[2].abs_code = ABS_Z;
ds->accel_calib_data[2].bias = acc_z_plus - range_2g / 2;
- ds->accel_calib_data[2].sens_numer = 2*DS_ACC_RES_PER_G;
+ ds->accel_calib_data[2].sens_numer = 2 * DS_ACC_RES_PER_G;
ds->accel_calib_data[2].sens_denom = range_2g;
/*
@@ -1061,8 +1117,9 @@ static int dualsense_get_calibration_data(struct dualsense *ds)
*/
for (i = 0; i < ARRAY_SIZE(ds->accel_calib_data); i++) {
if (ds->accel_calib_data[i].sens_denom == 0) {
- hid_warn(hdev, "Invalid accelerometer calibration data for axis (%d), disabling calibration.",
- ds->accel_calib_data[i].abs_code);
+ hid_warn(hdev,
+ "Invalid accelerometer calibration data for axis (%d), disabling calibration.",
+ ds->accel_calib_data[i].abs_code);
ds->accel_calib_data[i].bias = 0;
ds->accel_calib_data[i].sens_numer = DS_ACC_RANGE;
ds->accel_calib_data[i].sens_denom = S16_MAX;
@@ -1074,10 +1131,9 @@ err_free:
return ret;
}
-
static int dualsense_get_firmware_info(struct dualsense *ds)
{
- uint8_t *buf;
+ u8 *buf;
int ret;
buf = kzalloc(DS_FEATURE_REPORT_FIRMWARE_INFO_SIZE, GFP_KERNEL);
@@ -1085,7 +1141,7 @@ static int dualsense_get_firmware_info(struct dualsense *ds)
return -ENOMEM;
ret = ps_get_report(ds->base.hdev, DS_FEATURE_REPORT_FIRMWARE_INFO, buf,
- DS_FEATURE_REPORT_FIRMWARE_INFO_SIZE, true);
+ DS_FEATURE_REPORT_FIRMWARE_INFO_SIZE, true);
if (ret) {
hid_err(ds->base.hdev, "Failed to retrieve DualSense firmware info: %d\n", ret);
goto err_free;
@@ -1110,7 +1166,7 @@ err_free:
static int dualsense_get_mac_address(struct dualsense *ds)
{
- uint8_t *buf;
+ u8 *buf;
int ret = 0;
buf = kzalloc(DS_FEATURE_REPORT_PAIRING_INFO_SIZE, GFP_KERNEL);
@@ -1118,7 +1174,7 @@ static int dualsense_get_mac_address(struct dualsense *ds)
return -ENOMEM;
ret = ps_get_report(ds->base.hdev, DS_FEATURE_REPORT_PAIRING_INFO, buf,
- DS_FEATURE_REPORT_PAIRING_INFO_SIZE, true);
+ DS_FEATURE_REPORT_PAIRING_INFO_SIZE, true);
if (ret) {
hid_err(ds->base.hdev, "Failed to retrieve DualSense pairing info: %d\n", ret);
goto err_free;
@@ -1132,11 +1188,11 @@ err_free:
}
static int dualsense_lightbar_set_brightness(struct led_classdev *cdev,
- enum led_brightness brightness)
+ enum led_brightness brightness)
{
struct led_classdev_mc *mc_cdev = lcdev_to_mccdev(cdev);
struct dualsense *ds = container_of(mc_cdev, struct dualsense, lightbar);
- uint8_t red, green, blue;
+ u8 red, green, blue;
led_mc_calc_color_components(mc_cdev, brightness);
red = mc_cdev->subled_info[0].brightness;
@@ -1159,27 +1215,25 @@ static int dualsense_player_led_set_brightness(struct led_classdev *led, enum le
{
struct hid_device *hdev = to_hid_device(led->dev->parent);
struct dualsense *ds = hid_get_drvdata(hdev);
- unsigned long flags;
unsigned int led_index;
- spin_lock_irqsave(&ds->base.lock, flags);
-
- led_index = led - ds->player_leds;
- if (value == LED_OFF)
- ds->player_leds_state &= ~BIT(led_index);
- else
- ds->player_leds_state |= BIT(led_index);
+ scoped_guard(spinlock_irqsave, &ds->base.lock) {
+ led_index = led - ds->player_leds;
+ if (value == LED_OFF)
+ ds->player_leds_state &= ~BIT(led_index);
+ else
+ ds->player_leds_state |= BIT(led_index);
- ds->update_player_leds = true;
- spin_unlock_irqrestore(&ds->base.lock, flags);
+ ds->update_player_leds = true;
+ }
dualsense_schedule_work(ds);
return 0;
}
-static void dualsense_init_output_report(struct dualsense *ds, struct dualsense_output_report *rp,
- void *buf)
+static void dualsense_init_output_report(struct dualsense *ds,
+ struct dualsense_output_report *rp, void *buf)
{
struct hid_device *hdev = ds->base.hdev;
@@ -1194,7 +1248,8 @@ static void dualsense_init_output_report(struct dualsense *ds, struct dualsense_
* Highest 4-bit is a sequence number, which needs to be increased
* every report. Lowest 4-bit is tag and can be zero for now.
*/
- bt->seq_tag = (ds->output_seq << 4) | 0x0;
+ bt->seq_tag = FIELD_PREP(DS_OUTPUT_SEQ_NO, ds->output_seq) |
+ FIELD_PREP(DS_OUTPUT_SEQ_TAG, 0x0);
if (++ds->output_seq == 16)
ds->output_seq = 0;
@@ -1219,12 +1274,10 @@ static void dualsense_init_output_report(struct dualsense *ds, struct dualsense_
static inline void dualsense_schedule_work(struct dualsense *ds)
{
- unsigned long flags;
-
- spin_lock_irqsave(&ds->base.lock, flags);
- if (ds->output_worker_initialized)
- schedule_work(&ds->output_worker);
- spin_unlock_irqrestore(&ds->base.lock, flags);
+ /* Using scoped_guard() instead of guard() to make sparse happy */
+ scoped_guard(spinlock_irqsave, &ds->base.lock)
+ if (ds->output_worker_initialized)
+ schedule_work(&ds->output_worker);
}
/*
@@ -1232,14 +1285,14 @@ static inline void dualsense_schedule_work(struct dualsense *ds)
* for Bluetooth reports.
*/
static void dualsense_send_output_report(struct dualsense *ds,
- struct dualsense_output_report *report)
+ struct dualsense_output_report *report)
{
struct hid_device *hdev = ds->base.hdev;
/* Bluetooth packets need to be signed with a CRC in the last 4 bytes. */
if (report->bt) {
- uint32_t crc;
- uint8_t seed = PS_OUTPUT_CRC32_SEED;
+ u32 crc;
+ u8 seed = PS_OUTPUT_CRC32_SEED;
crc = crc32_le(0xFFFFFFFF, &seed, 1);
crc = ~crc32_le(crc, report->data, report->len - 4);
@@ -1255,74 +1308,125 @@ static void dualsense_output_worker(struct work_struct *work)
struct dualsense *ds = container_of(work, struct dualsense, output_worker);
struct dualsense_output_report report;
struct dualsense_output_report_common *common;
- unsigned long flags;
dualsense_init_output_report(ds, &report, ds->output_report_dmabuf);
common = report.common;
- spin_lock_irqsave(&ds->base.lock, flags);
+ scoped_guard(spinlock_irqsave, &ds->base.lock) {
+ if (ds->update_rumble) {
+ /* Select classic rumble style haptics and enable it. */
+ common->valid_flag0 |= DS_OUTPUT_VALID_FLAG0_HAPTICS_SELECT;
+ if (ds->use_vibration_v2)
+ common->valid_flag2 |= DS_OUTPUT_VALID_FLAG2_COMPATIBLE_VIBRATION2;
+ else
+ common->valid_flag0 |= DS_OUTPUT_VALID_FLAG0_COMPATIBLE_VIBRATION;
+ common->motor_left = ds->motor_left;
+ common->motor_right = ds->motor_right;
+ ds->update_rumble = false;
+ }
- if (ds->update_rumble) {
- /* Select classic rumble style haptics and enable it. */
- common->valid_flag0 |= DS_OUTPUT_VALID_FLAG0_HAPTICS_SELECT;
- if (ds->use_vibration_v2)
- common->valid_flag2 |= DS_OUTPUT_VALID_FLAG2_COMPATIBLE_VIBRATION2;
- else
- common->valid_flag0 |= DS_OUTPUT_VALID_FLAG0_COMPATIBLE_VIBRATION;
- common->motor_left = ds->motor_left;
- common->motor_right = ds->motor_right;
- ds->update_rumble = false;
- }
+ if (ds->update_lightbar) {
+ common->valid_flag1 |= DS_OUTPUT_VALID_FLAG1_LIGHTBAR_CONTROL_ENABLE;
+ common->lightbar_red = ds->lightbar_red;
+ common->lightbar_green = ds->lightbar_green;
+ common->lightbar_blue = ds->lightbar_blue;
- if (ds->update_lightbar) {
- common->valid_flag1 |= DS_OUTPUT_VALID_FLAG1_LIGHTBAR_CONTROL_ENABLE;
- common->lightbar_red = ds->lightbar_red;
- common->lightbar_green = ds->lightbar_green;
- common->lightbar_blue = ds->lightbar_blue;
+ ds->update_lightbar = false;
+ }
- ds->update_lightbar = false;
- }
+ if (ds->update_player_leds) {
+ common->valid_flag1 |=
+ DS_OUTPUT_VALID_FLAG1_PLAYER_INDICATOR_CONTROL_ENABLE;
+ common->player_leds = ds->player_leds_state;
- if (ds->update_player_leds) {
- common->valid_flag1 |= DS_OUTPUT_VALID_FLAG1_PLAYER_INDICATOR_CONTROL_ENABLE;
- common->player_leds = ds->player_leds_state;
+ ds->update_player_leds = false;
+ }
- ds->update_player_leds = false;
- }
+ if (ds->plugged_state != ds->prev_plugged_state) {
+ u8 val = ds->plugged_state & DS_STATUS1_HP_DETECT;
+
+ if (val != (ds->prev_plugged_state & DS_STATUS1_HP_DETECT)) {
+ common->valid_flag0 = DS_OUTPUT_VALID_FLAG0_AUDIO_CONTROL_ENABLE;
+ /*
+ * _--------> Output path setup in audio_flag0
+ * / _------> Headphone (HP) Left channel sink
+ * | / _----> Headphone (HP) Right channel sink
+ * | | / _--> Internal Speaker (SP) sink
+ * | | | /
+ * | | | | L/R - Left/Right channel source
+ * 0 L-R X X - Unrouted (muted) channel source
+ * 1 L-L X
+ * 2 L-L R
+ * 3 X-X R
+ */
+ if (val) {
+ /* Mute SP and route L+R channels to HP */
+ common->audio_control = 0;
+ } else {
+ /* Mute HP and route R channel to SP */
+ common->audio_control =
+ FIELD_PREP(DS_OUTPUT_AUDIO_FLAGS_OUTPUT_PATH_SEL,
+ 0x3);
+ /*
+ * Set SP hardware volume to 100%.
+ * Note the accepted range seems to be [0x3d..0x64]
+ */
+ common->valid_flag0 |=
+ DS_OUTPUT_VALID_FLAG0_SPEAKER_VOLUME_ENABLE;
+ common->speaker_volume = 0x64;
+ /* Set SP preamp gain to +6dB */
+ common->valid_flag1 =
+ DS_OUTPUT_VALID_FLAG1_AUDIO_CONTROL2_ENABLE;
+ common->audio_control2 =
+ FIELD_PREP(DS_OUTPUT_AUDIO_FLAGS2_SP_PREAMP_GAIN,
+ 0x2);
+ }
- if (ds->update_mic_mute) {
- common->valid_flag1 |= DS_OUTPUT_VALID_FLAG1_MIC_MUTE_LED_CONTROL_ENABLE;
- common->mute_button_led = ds->mic_muted;
+ input_report_switch(ds->jack, SW_HEADPHONE_INSERT, val);
+ }
- if (ds->mic_muted) {
- /* Disable microphone */
- common->valid_flag1 |= DS_OUTPUT_VALID_FLAG1_POWER_SAVE_CONTROL_ENABLE;
- common->power_save_control |= DS_OUTPUT_POWER_SAVE_CONTROL_MIC_MUTE;
- } else {
- /* Enable microphone */
- common->valid_flag1 |= DS_OUTPUT_VALID_FLAG1_POWER_SAVE_CONTROL_ENABLE;
- common->power_save_control &= ~DS_OUTPUT_POWER_SAVE_CONTROL_MIC_MUTE;
+ val = ds->plugged_state & DS_STATUS1_MIC_DETECT;
+ if (val != (ds->prev_plugged_state & DS_STATUS1_MIC_DETECT))
+ input_report_switch(ds->jack, SW_MICROPHONE_INSERT, val);
+
+ input_sync(ds->jack);
+ ds->prev_plugged_state = ds->plugged_state;
}
- ds->update_mic_mute = false;
- }
+ if (ds->update_mic_mute) {
+ common->valid_flag1 |= DS_OUTPUT_VALID_FLAG1_MIC_MUTE_LED_CONTROL_ENABLE;
+ common->mute_button_led = ds->mic_muted;
+
+ if (ds->mic_muted) {
+ /* Disable microphone */
+ common->valid_flag1 |=
+ DS_OUTPUT_VALID_FLAG1_POWER_SAVE_CONTROL_ENABLE;
+ common->power_save_control |= DS_OUTPUT_POWER_SAVE_CONTROL_MIC_MUTE;
+ } else {
+ /* Enable microphone */
+ common->valid_flag1 |=
+ DS_OUTPUT_VALID_FLAG1_POWER_SAVE_CONTROL_ENABLE;
+ common->power_save_control &=
+ ~DS_OUTPUT_POWER_SAVE_CONTROL_MIC_MUTE;
+ }
- spin_unlock_irqrestore(&ds->base.lock, flags);
+ ds->update_mic_mute = false;
+ }
+ }
dualsense_send_output_report(ds, &report);
}
static int dualsense_parse_report(struct ps_device *ps_dev, struct hid_report *report,
- u8 *data, int size)
+ u8 *data, int size)
{
struct hid_device *hdev = ps_dev->hdev;
struct dualsense *ds = container_of(ps_dev, struct dualsense, base);
struct dualsense_input_report *ds_report;
- uint8_t battery_data, battery_capacity, charging_status, value;
+ u8 battery_data, battery_capacity, charging_status, value;
int battery_status;
- uint32_t sensor_timestamp;
+ u32 sensor_timestamp;
bool btn_mic_state;
- unsigned long flags;
int i;
/*
@@ -1331,12 +1435,12 @@ static int dualsense_parse_report(struct ps_device *ps_dev, struct hid_report *r
* the full report using reportID 49.
*/
if (hdev->bus == BUS_USB && report->id == DS_INPUT_REPORT_USB &&
- size == DS_INPUT_REPORT_USB_SIZE) {
+ size == DS_INPUT_REPORT_USB_SIZE) {
ds_report = (struct dualsense_input_report *)&data[1];
} else if (hdev->bus == BUS_BLUETOOTH && report->id == DS_INPUT_REPORT_BT &&
- size == DS_INPUT_REPORT_BT_SIZE) {
+ size == DS_INPUT_REPORT_BT_SIZE) {
/* Last 4 bytes of input report contain crc32 */
- uint32_t report_crc = get_unaligned_le32(&data[size - 4]);
+ u32 report_crc = get_unaligned_le32(&data[size - 4]);
if (!ps_check_crc32(PS_INPUT_CRC32_SEED, data, size - 4, report_crc)) {
hid_err(hdev, "DualSense input CRC's check failed\n");
@@ -1384,16 +1488,42 @@ static int dualsense_parse_report(struct ps_device *ps_dev, struct hid_report *r
*/
btn_mic_state = !!(ds_report->buttons[2] & DS_BUTTONS2_MIC_MUTE);
if (btn_mic_state && !ds->last_btn_mic_state) {
- spin_lock_irqsave(&ps_dev->lock, flags);
- ds->update_mic_mute = true;
- ds->mic_muted = !ds->mic_muted; /* toggle */
- spin_unlock_irqrestore(&ps_dev->lock, flags);
+ scoped_guard(spinlock_irqsave, &ps_dev->lock) {
+ ds->update_mic_mute = true;
+ ds->mic_muted = !ds->mic_muted; /* toggle */
+ }
/* Schedule updating of microphone state at hardware level. */
dualsense_schedule_work(ds);
}
ds->last_btn_mic_state = btn_mic_state;
+ /*
+ * Parse HP/MIC plugged state data for USB use case, since Bluetooth
+ * audio is currently not supported.
+ */
+ if (hdev->bus == BUS_USB) {
+ value = ds_report->status[1] & DS_STATUS1_JACK_DETECT;
+
+ if (!ds->prev_plugged_state_valid) {
+ /* Initial handling of the plugged state report */
+ scoped_guard(spinlock_irqsave, &ps_dev->lock) {
+ ds->plugged_state = (~value) & DS_STATUS1_JACK_DETECT;
+ ds->prev_plugged_state_valid = true;
+ }
+ }
+
+ if (value != ds->plugged_state) {
+ scoped_guard(spinlock_irqsave, &ps_dev->lock) {
+ ds->prev_plugged_state = ds->plugged_state;
+ ds->plugged_state = value;
+ }
+
+ /* Schedule audio routing towards active endpoint. */
+ dualsense_schedule_work(ds);
+ }
+ }
+
/* Parse and calibrate gyroscope data. */
for (i = 0; i < ARRAY_SIZE(ds_report->gyro); i++) {
int raw_data = (short)le16_to_cpu(ds_report->gyro[i]);
@@ -1419,7 +1549,7 @@ static int dualsense_parse_report(struct ps_device *ps_dev, struct hid_report *r
ds->sensor_timestamp_us = DIV_ROUND_CLOSEST(sensor_timestamp, 3);
ds->sensor_timestamp_initialized = true;
} else {
- uint32_t delta;
+ u32 delta;
if (ds->prev_sensor_timestamp > sensor_timestamp)
delta = (U32_MAX - ds->prev_sensor_timestamp + sensor_timestamp + 1);
@@ -1439,19 +1569,18 @@ static int dualsense_parse_report(struct ps_device *ps_dev, struct hid_report *r
input_mt_report_slot_state(ds->touchpad, MT_TOOL_FINGER, active);
if (active) {
- int x = (point->x_hi << 8) | point->x_lo;
- int y = (point->y_hi << 4) | point->y_lo;
-
- input_report_abs(ds->touchpad, ABS_MT_POSITION_X, x);
- input_report_abs(ds->touchpad, ABS_MT_POSITION_Y, y);
+ input_report_abs(ds->touchpad, ABS_MT_POSITION_X,
+ DS_TOUCH_POINT_X(point->x_hi, point->x_lo));
+ input_report_abs(ds->touchpad, ABS_MT_POSITION_Y,
+ DS_TOUCH_POINT_Y(point->y_hi, point->y_lo));
}
}
input_mt_sync_frame(ds->touchpad);
input_report_key(ds->touchpad, BTN_LEFT, ds_report->buttons[2] & DS_BUTTONS2_TOUCHPAD);
input_sync(ds->touchpad);
- battery_data = ds_report->status & DS_STATUS_BATTERY_CAPACITY;
- charging_status = (ds_report->status & DS_STATUS_CHARGING) >> DS_STATUS_CHARGING_SHIFT;
+ battery_data = FIELD_GET(DS_STATUS0_BATTERY_CAPACITY, ds_report->status[0]);
+ charging_status = FIELD_GET(DS_STATUS0_CHARGING, ds_report->status[0]);
switch (charging_status) {
case 0x0:
@@ -1481,10 +1610,10 @@ static int dualsense_parse_report(struct ps_device *ps_dev, struct hid_report *r
battery_status = POWER_SUPPLY_STATUS_UNKNOWN;
}
- spin_lock_irqsave(&ps_dev->lock, flags);
- ps_dev->battery_capacity = battery_capacity;
- ps_dev->battery_status = battery_status;
- spin_unlock_irqrestore(&ps_dev->lock, flags);
+ scoped_guard(spinlock_irqsave, &ps_dev->lock) {
+ ps_dev->battery_capacity = battery_capacity;
+ ps_dev->battery_status = battery_status;
+ }
return 0;
}
@@ -1493,16 +1622,15 @@ static int dualsense_play_effect(struct input_dev *dev, void *data, struct ff_ef
{
struct hid_device *hdev = input_get_drvdata(dev);
struct dualsense *ds = hid_get_drvdata(hdev);
- unsigned long flags;
if (effect->type != FF_RUMBLE)
return 0;
- spin_lock_irqsave(&ds->base.lock, flags);
- ds->update_rumble = true;
- ds->motor_left = effect->u.rumble.strong_magnitude / 256;
- ds->motor_right = effect->u.rumble.weak_magnitude / 256;
- spin_unlock_irqrestore(&ds->base.lock, flags);
+ scoped_guard(spinlock_irqsave, &ds->base.lock) {
+ ds->update_rumble = true;
+ ds->motor_left = effect->u.rumble.strong_magnitude / 256;
+ ds->motor_right = effect->u.rumble.weak_magnitude / 256;
+ }
dualsense_schedule_work(ds);
return 0;
@@ -1511,11 +1639,9 @@ static int dualsense_play_effect(struct input_dev *dev, void *data, struct ff_ef
static void dualsense_remove(struct ps_device *ps_dev)
{
struct dualsense *ds = container_of(ps_dev, struct dualsense, base);
- unsigned long flags;
- spin_lock_irqsave(&ds->base.lock, flags);
- ds->output_worker_initialized = false;
- spin_unlock_irqrestore(&ds->base.lock, flags);
+ scoped_guard(spinlock_irqsave, &ds->base.lock)
+ ds->output_worker_initialized = false;
cancel_work_sync(&ds->output_worker);
}
@@ -1523,9 +1649,9 @@ static void dualsense_remove(struct ps_device *ps_dev)
static int dualsense_reset_leds(struct dualsense *ds)
{
struct dualsense_output_report report;
- uint8_t *buf;
+ struct dualsense_output_report_bt *buf;
- buf = kzalloc(sizeof(struct dualsense_output_report_bt), GFP_KERNEL);
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -1545,16 +1671,14 @@ static int dualsense_reset_leds(struct dualsense *ds)
return 0;
}
-static void dualsense_set_lightbar(struct dualsense *ds, uint8_t red, uint8_t green, uint8_t blue)
+static void dualsense_set_lightbar(struct dualsense *ds, u8 red, u8 green, u8 blue)
{
- unsigned long flags;
-
- spin_lock_irqsave(&ds->base.lock, flags);
- ds->update_lightbar = true;
- ds->lightbar_red = red;
- ds->lightbar_green = green;
- ds->lightbar_blue = blue;
- spin_unlock_irqrestore(&ds->base.lock, flags);
+ scoped_guard(spinlock_irqsave, &ds->base.lock) {
+ ds->update_lightbar = true;
+ ds->lightbar_red = red;
+ ds->lightbar_green = green;
+ ds->lightbar_blue = blue;
+ }
dualsense_schedule_work(ds);
}
@@ -1575,7 +1699,7 @@ static void dualsense_set_player_leds(struct dualsense *ds)
BIT(4) | BIT(3) | BIT(2) | BIT(1) | BIT(0)
};
- uint8_t player_id = ds->base.player_id % ARRAY_SIZE(player_ids);
+ u8 player_id = ds->base.player_id % ARRAY_SIZE(player_ids);
ds->update_player_leds = true;
ds->player_leds_state = player_ids[player_id];
@@ -1586,7 +1710,7 @@ static struct ps_device *dualsense_create(struct hid_device *hdev)
{
struct dualsense *ds;
struct ps_device *ps_dev;
- uint8_t max_output_report_size;
+ u8 max_output_report_size;
int i, ret;
static const struct ps_led_info player_leds_info[] = {
@@ -1675,7 +1799,7 @@ static struct ps_device *dualsense_create(struct hid_device *hdev)
ps_dev->input_dev_name = dev_name(&ds->gamepad->dev);
ds->sensors = ps_sensors_create(hdev, DS_ACC_RANGE, DS_ACC_RES_PER_G,
- DS_GYRO_RANGE, DS_GYRO_RES_PER_DEG_S);
+ DS_GYRO_RANGE, DS_GYRO_RES_PER_DEG_S);
if (IS_ERR(ds->sensors)) {
ret = PTR_ERR(ds->sensors);
goto err;
@@ -1687,6 +1811,15 @@ static struct ps_device *dualsense_create(struct hid_device *hdev)
goto err;
}
+ /* Bluetooth audio is currently not supported. */
+ if (hdev->bus == BUS_USB) {
+ ds->jack = ps_headset_jack_create(hdev);
+ if (IS_ERR(ds->jack)) {
+ ret = PTR_ERR(ds->jack);
+ goto err;
+ }
+ }
+
ret = ps_device_register_battery(ps_dev);
if (ret)
goto err;
@@ -1729,7 +1862,7 @@ static struct ps_device *dualsense_create(struct hid_device *hdev)
* can change behavior.
*/
hid_info(hdev, "Registered DualSense controller hw_version=0x%08x fw_version=0x%08x\n",
- ds->base.hw_version, ds->base.fw_version);
+ ds->base.hw_version, ds->base.fw_version);
return &ds->base;
@@ -1741,7 +1874,6 @@ err:
static void dualshock4_dongle_calibration_work(struct work_struct *work)
{
struct dualshock4 *ds4 = container_of(work, struct dualshock4, dongle_hotplug_worker);
- unsigned long flags;
enum dualshock4_dongle_state dongle_state;
int ret;
@@ -1753,16 +1885,16 @@ static void dualshock4_dongle_calibration_work(struct work_struct *work)
* DS4 hotplug is detect from sony_raw_event as any issues
* are likely resolved then (the dongle is quite stupid).
*/
- hid_err(ds4->base.hdev, "DualShock 4 USB dongle: calibration failed, disabling device\n");
+ hid_err(ds4->base.hdev,
+ "DualShock 4 USB dongle: calibration failed, disabling device\n");
dongle_state = DONGLE_DISABLED;
} else {
hid_info(ds4->base.hdev, "DualShock 4 USB dongle: calibration completed\n");
dongle_state = DONGLE_CONNECTED;
}
- spin_lock_irqsave(&ds4->base.lock, flags);
- ds4->dongle_state = dongle_state;
- spin_unlock_irqrestore(&ds4->base.lock, flags);
+ scoped_guard(spinlock_irqsave, &ds4->base.lock)
+ ds4->dongle_state = dongle_state;
}
static int dualshock4_get_calibration_data(struct dualshock4 *ds4)
@@ -1779,7 +1911,7 @@ static int dualshock4_get_calibration_data(struct dualshock4 *ds4)
int range_2g;
int ret = 0;
int i;
- uint8_t *buf;
+ u8 *buf;
if (ds4->base.hdev->bus == BUS_USB) {
int retries;
@@ -1798,14 +1930,17 @@ static int dualshock4_get_calibration_data(struct dualshock4 *ds4)
*/
for (retries = 0; retries < 3; retries++) {
ret = ps_get_report(hdev, DS4_FEATURE_REPORT_CALIBRATION, buf,
- DS4_FEATURE_REPORT_CALIBRATION_SIZE, true);
+ DS4_FEATURE_REPORT_CALIBRATION_SIZE, true);
if (ret) {
if (retries < 2) {
- hid_warn(hdev, "Retrying DualShock 4 get calibration report (0x02) request\n");
+ hid_warn(hdev,
+ "Retrying DualShock 4 get calibration report (0x02) request\n");
continue;
}
- hid_warn(hdev, "Failed to retrieve DualShock4 calibration info: %d\n", ret);
+ hid_warn(hdev,
+ "Failed to retrieve DualShock4 calibration info: %d\n",
+ ret);
ret = -EILSEQ;
goto transfer_failed;
} else {
@@ -1820,7 +1955,7 @@ static int dualshock4_get_calibration_data(struct dualshock4 *ds4)
}
ret = ps_get_report(hdev, DS4_FEATURE_REPORT_CALIBRATION_BT, buf,
- DS4_FEATURE_REPORT_CALIBRATION_BT_SIZE, true);
+ DS4_FEATURE_REPORT_CALIBRATION_BT_SIZE, true);
if (ret) {
hid_warn(hdev, "Failed to retrieve DualShock4 calibration info: %d\n", ret);
@@ -1867,19 +2002,19 @@ static int dualshock4_get_calibration_data(struct dualshock4 *ds4)
speed_2x = (gyro_speed_plus + gyro_speed_minus);
ds4->gyro_calib_data[0].abs_code = ABS_RX;
ds4->gyro_calib_data[0].bias = 0;
- ds4->gyro_calib_data[0].sens_numer = speed_2x*DS4_GYRO_RES_PER_DEG_S;
+ ds4->gyro_calib_data[0].sens_numer = speed_2x * DS4_GYRO_RES_PER_DEG_S;
ds4->gyro_calib_data[0].sens_denom = abs(gyro_pitch_plus - gyro_pitch_bias) +
abs(gyro_pitch_minus - gyro_pitch_bias);
ds4->gyro_calib_data[1].abs_code = ABS_RY;
ds4->gyro_calib_data[1].bias = 0;
- ds4->gyro_calib_data[1].sens_numer = speed_2x*DS4_GYRO_RES_PER_DEG_S;
+ ds4->gyro_calib_data[1].sens_numer = speed_2x * DS4_GYRO_RES_PER_DEG_S;
ds4->gyro_calib_data[1].sens_denom = abs(gyro_yaw_plus - gyro_yaw_bias) +
abs(gyro_yaw_minus - gyro_yaw_bias);
ds4->gyro_calib_data[2].abs_code = ABS_RZ;
ds4->gyro_calib_data[2].bias = 0;
- ds4->gyro_calib_data[2].sens_numer = speed_2x*DS4_GYRO_RES_PER_DEG_S;
+ ds4->gyro_calib_data[2].sens_numer = speed_2x * DS4_GYRO_RES_PER_DEG_S;
ds4->gyro_calib_data[2].sens_denom = abs(gyro_roll_plus - gyro_roll_bias) +
abs(gyro_roll_minus - gyro_roll_bias);
@@ -1890,19 +2025,19 @@ static int dualshock4_get_calibration_data(struct dualshock4 *ds4)
range_2g = acc_x_plus - acc_x_minus;
ds4->accel_calib_data[0].abs_code = ABS_X;
ds4->accel_calib_data[0].bias = acc_x_plus - range_2g / 2;
- ds4->accel_calib_data[0].sens_numer = 2*DS4_ACC_RES_PER_G;
+ ds4->accel_calib_data[0].sens_numer = 2 * DS4_ACC_RES_PER_G;
ds4->accel_calib_data[0].sens_denom = range_2g;
range_2g = acc_y_plus - acc_y_minus;
ds4->accel_calib_data[1].abs_code = ABS_Y;
ds4->accel_calib_data[1].bias = acc_y_plus - range_2g / 2;
- ds4->accel_calib_data[1].sens_numer = 2*DS4_ACC_RES_PER_G;
+ ds4->accel_calib_data[1].sens_numer = 2 * DS4_ACC_RES_PER_G;
ds4->accel_calib_data[1].sens_denom = range_2g;
range_2g = acc_z_plus - acc_z_minus;
ds4->accel_calib_data[2].abs_code = ABS_Z;
ds4->accel_calib_data[2].bias = acc_z_plus - range_2g / 2;
- ds4->accel_calib_data[2].sens_numer = 2*DS4_ACC_RES_PER_G;
+ ds4->accel_calib_data[2].sens_numer = 2 * DS4_ACC_RES_PER_G;
ds4->accel_calib_data[2].sens_denom = range_2g;
transfer_failed:
@@ -1914,8 +2049,9 @@ transfer_failed:
for (i = 0; i < ARRAY_SIZE(ds4->gyro_calib_data); i++) {
if (ds4->gyro_calib_data[i].sens_denom == 0) {
ds4->gyro_calib_data[i].abs_code = ABS_RX + i;
- hid_warn(hdev, "Invalid gyro calibration data for axis (%d), disabling calibration.",
- ds4->gyro_calib_data[i].abs_code);
+ hid_warn(hdev,
+ "Invalid gyro calibration data for axis (%d), disabling calibration.",
+ ds4->gyro_calib_data[i].abs_code);
ds4->gyro_calib_data[i].bias = 0;
ds4->gyro_calib_data[i].sens_numer = DS4_GYRO_RANGE;
ds4->gyro_calib_data[i].sens_denom = S16_MAX;
@@ -1930,8 +2066,9 @@ transfer_failed:
for (i = 0; i < ARRAY_SIZE(ds4->accel_calib_data); i++) {
if (ds4->accel_calib_data[i].sens_denom == 0) {
ds4->accel_calib_data[i].abs_code = ABS_X + i;
- hid_warn(hdev, "Invalid accelerometer calibration data for axis (%d), disabling calibration.",
- ds4->accel_calib_data[i].abs_code);
+ hid_warn(hdev,
+ "Invalid accelerometer calibration data for axis (%d), disabling calibration.",
+ ds4->accel_calib_data[i].abs_code);
ds4->accel_calib_data[i].bias = 0;
ds4->accel_calib_data[i].sens_numer = DS4_ACC_RANGE;
ds4->accel_calib_data[i].sens_denom = S16_MAX;
@@ -1943,7 +2080,7 @@ transfer_failed:
static int dualshock4_get_firmware_info(struct dualshock4 *ds4)
{
- uint8_t *buf;
+ u8 *buf;
int ret;
buf = kzalloc(DS4_FEATURE_REPORT_FIRMWARE_INFO_SIZE, GFP_KERNEL);
@@ -1954,7 +2091,7 @@ static int dualshock4_get_firmware_info(struct dualshock4 *ds4)
* lacks CRC support, so must be disabled in ps_get_report.
*/
ret = ps_get_report(ds4->base.hdev, DS4_FEATURE_REPORT_FIRMWARE_INFO, buf,
- DS4_FEATURE_REPORT_FIRMWARE_INFO_SIZE, false);
+ DS4_FEATURE_REPORT_FIRMWARE_INFO_SIZE, false);
if (ret) {
hid_err(ds4->base.hdev, "Failed to retrieve DualShock4 firmware info: %d\n", ret);
goto err_free;
@@ -1971,7 +2108,7 @@ err_free:
static int dualshock4_get_mac_address(struct dualshock4 *ds4)
{
struct hid_device *hdev = ds4->base.hdev;
- uint8_t *buf;
+ u8 *buf;
int ret = 0;
if (hdev->bus == BUS_USB) {
@@ -1980,7 +2117,7 @@ static int dualshock4_get_mac_address(struct dualshock4 *ds4)
return -ENOMEM;
ret = ps_get_report(hdev, DS4_FEATURE_REPORT_PAIRING_INFO, buf,
- DS4_FEATURE_REPORT_PAIRING_INFO_SIZE, false);
+ DS4_FEATURE_REPORT_PAIRING_INFO_SIZE, false);
if (ret) {
hid_err(hdev, "Failed to retrieve DualShock4 pairing info: %d\n", ret);
goto err_free;
@@ -1993,9 +2130,9 @@ static int dualshock4_get_mac_address(struct dualshock4 *ds4)
return -EINVAL;
ret = sscanf(hdev->uniq, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
- &ds4->base.mac_address[5], &ds4->base.mac_address[4],
- &ds4->base.mac_address[3], &ds4->base.mac_address[2],
- &ds4->base.mac_address[1], &ds4->base.mac_address[0]);
+ &ds4->base.mac_address[5], &ds4->base.mac_address[4],
+ &ds4->base.mac_address[3], &ds4->base.mac_address[2],
+ &ds4->base.mac_address[1], &ds4->base.mac_address[0]);
if (ret != sizeof(ds4->base.mac_address))
return -EINVAL;
@@ -2030,28 +2167,27 @@ static enum led_brightness dualshock4_led_get_brightness(struct led_classdev *le
}
static int dualshock4_led_set_blink(struct led_classdev *led, unsigned long *delay_on,
- unsigned long *delay_off)
+ unsigned long *delay_off)
{
struct hid_device *hdev = to_hid_device(led->dev->parent);
struct dualshock4 *ds4 = hid_get_drvdata(hdev);
- unsigned long flags;
- spin_lock_irqsave(&ds4->base.lock, flags);
+ scoped_guard(spinlock_irqsave, &ds4->base.lock) {
+ if (!*delay_on && !*delay_off) {
+ /* Default to 1 Hz (50 centiseconds on, 50 centiseconds off). */
+ ds4->lightbar_blink_on = 50;
+ ds4->lightbar_blink_off = 50;
+ } else {
+ /* Blink delays in centiseconds. */
+ ds4->lightbar_blink_on = min_t(unsigned long, *delay_on / 10,
+ DS4_LIGHTBAR_MAX_BLINK);
+ ds4->lightbar_blink_off = min_t(unsigned long, *delay_off / 10,
+ DS4_LIGHTBAR_MAX_BLINK);
+ }
- if (!*delay_on && !*delay_off) {
- /* Default to 1 Hz (50 centiseconds on, 50 centiseconds off). */
- ds4->lightbar_blink_on = 50;
- ds4->lightbar_blink_off = 50;
- } else {
- /* Blink delays in centiseconds. */
- ds4->lightbar_blink_on = min_t(unsigned long, *delay_on/10, DS4_LIGHTBAR_MAX_BLINK);
- ds4->lightbar_blink_off = min_t(unsigned long, *delay_off/10, DS4_LIGHTBAR_MAX_BLINK);
+ ds4->update_lightbar_blink = true;
}
- ds4->update_lightbar_blink = true;
-
- spin_unlock_irqrestore(&ds4->base.lock, flags);
-
dualshock4_schedule_work(ds4);
/* Report scaled values back to LED subsystem */
@@ -2065,36 +2201,33 @@ static int dualshock4_led_set_brightness(struct led_classdev *led, enum led_brig
{
struct hid_device *hdev = to_hid_device(led->dev->parent);
struct dualshock4 *ds4 = hid_get_drvdata(hdev);
- unsigned long flags;
unsigned int led_index;
- spin_lock_irqsave(&ds4->base.lock, flags);
-
- led_index = led - ds4->lightbar_leds;
- switch (led_index) {
- case 0:
- ds4->lightbar_red = value;
- break;
- case 1:
- ds4->lightbar_green = value;
- break;
- case 2:
- ds4->lightbar_blue = value;
- break;
- case 3:
- ds4->lightbar_enabled = !!value;
-
- /* brightness = 0 also cancels blinking in Linux. */
- if (!ds4->lightbar_enabled) {
- ds4->lightbar_blink_off = 0;
- ds4->lightbar_blink_on = 0;
- ds4->update_lightbar_blink = true;
+ scoped_guard(spinlock_irqsave, &ds4->base.lock) {
+ led_index = led - ds4->lightbar_leds;
+ switch (led_index) {
+ case 0:
+ ds4->lightbar_red = value;
+ break;
+ case 1:
+ ds4->lightbar_green = value;
+ break;
+ case 2:
+ ds4->lightbar_blue = value;
+ break;
+ case 3:
+ ds4->lightbar_enabled = !!value;
+
+ /* brightness = 0 also cancels blinking in Linux. */
+ if (!ds4->lightbar_enabled) {
+ ds4->lightbar_blink_off = 0;
+ ds4->lightbar_blink_on = 0;
+ ds4->update_lightbar_blink = true;
+ }
}
- }
- ds4->update_lightbar = true;
-
- spin_unlock_irqrestore(&ds4->base.lock, flags);
+ ds4->update_lightbar = true;
+ }
dualshock4_schedule_work(ds4);
@@ -2102,7 +2235,7 @@ static int dualshock4_led_set_brightness(struct led_classdev *led, enum led_brig
}
static void dualshock4_init_output_report(struct dualshock4 *ds4,
- struct dualshock4_output_report *rp, void *buf)
+ struct dualshock4_output_report *rp, void *buf)
{
struct hid_device *hdev = ds4->base.hdev;
@@ -2136,66 +2269,63 @@ static void dualshock4_output_worker(struct work_struct *work)
struct dualshock4 *ds4 = container_of(work, struct dualshock4, output_worker);
struct dualshock4_output_report report;
struct dualshock4_output_report_common *common;
- unsigned long flags;
dualshock4_init_output_report(ds4, &report, ds4->output_report_dmabuf);
common = report.common;
- spin_lock_irqsave(&ds4->base.lock, flags);
-
- /*
- * Some 3rd party gamepads expect updates to rumble and lightbar
- * together, and setting one may cancel the other.
- *
- * Let's maximise compatibility by always sending rumble and lightbar
- * updates together, even when only one has been scheduled, resulting
- * in:
- *
- * ds4->valid_flag0 >= 0x03
- *
- * Hopefully this will maximise compatibility with third-party pads.
- *
- * Any further update bits, such as 0x04 for lightbar blinking, will
- * be or'd on top of this like before.
- */
- if (ds4->update_rumble || ds4->update_lightbar) {
- ds4->update_rumble = true; /* 0x01 */
- ds4->update_lightbar = true; /* 0x02 */
- }
+ scoped_guard(spinlock_irqsave, &ds4->base.lock) {
+ /*
+ * Some 3rd party gamepads expect updates to rumble and lightbar
+ * together, and setting one may cancel the other.
+ *
+ * Let's maximise compatibility by always sending rumble and lightbar
+ * updates together, even when only one has been scheduled, resulting
+ * in:
+ *
+ * ds4->valid_flag0 >= 0x03
+ *
+ * Hopefully this will maximise compatibility with third-party pads.
+ *
+ * Any further update bits, such as 0x04 for lightbar blinking, will
+ * be or'd on top of this like before.
+ */
+ if (ds4->update_rumble || ds4->update_lightbar) {
+ ds4->update_rumble = true; /* 0x01 */
+ ds4->update_lightbar = true; /* 0x02 */
+ }
- if (ds4->update_rumble) {
- /* Select classic rumble style haptics and enable it. */
- common->valid_flag0 |= DS4_OUTPUT_VALID_FLAG0_MOTOR;
- common->motor_left = ds4->motor_left;
- common->motor_right = ds4->motor_right;
- ds4->update_rumble = false;
- }
+ if (ds4->update_rumble) {
+ /* Select classic rumble style haptics and enable it. */
+ common->valid_flag0 |= DS4_OUTPUT_VALID_FLAG0_MOTOR;
+ common->motor_left = ds4->motor_left;
+ common->motor_right = ds4->motor_right;
+ ds4->update_rumble = false;
+ }
- if (ds4->update_lightbar) {
- common->valid_flag0 |= DS4_OUTPUT_VALID_FLAG0_LED;
- /* Comptabile behavior with hid-sony, which used a dummy global LED to
- * allow enabling/disabling the lightbar. The global LED maps to
- * lightbar_enabled.
- */
- common->lightbar_red = ds4->lightbar_enabled ? ds4->lightbar_red : 0;
- common->lightbar_green = ds4->lightbar_enabled ? ds4->lightbar_green : 0;
- common->lightbar_blue = ds4->lightbar_enabled ? ds4->lightbar_blue : 0;
- ds4->update_lightbar = false;
- }
+ if (ds4->update_lightbar) {
+ common->valid_flag0 |= DS4_OUTPUT_VALID_FLAG0_LED;
+ /* Compatible behavior with hid-sony, which used a dummy global LED to
+ * allow enabling/disabling the lightbar. The global LED maps to
+ * lightbar_enabled.
+ */
+ common->lightbar_red = ds4->lightbar_enabled ? ds4->lightbar_red : 0;
+ common->lightbar_green = ds4->lightbar_enabled ? ds4->lightbar_green : 0;
+ common->lightbar_blue = ds4->lightbar_enabled ? ds4->lightbar_blue : 0;
+ ds4->update_lightbar = false;
+ }
- if (ds4->update_lightbar_blink) {
- common->valid_flag0 |= DS4_OUTPUT_VALID_FLAG0_LED_BLINK;
- common->lightbar_blink_on = ds4->lightbar_blink_on;
- common->lightbar_blink_off = ds4->lightbar_blink_off;
- ds4->update_lightbar_blink = false;
+ if (ds4->update_lightbar_blink) {
+ common->valid_flag0 |= DS4_OUTPUT_VALID_FLAG0_LED_BLINK;
+ common->lightbar_blink_on = ds4->lightbar_blink_on;
+ common->lightbar_blink_off = ds4->lightbar_blink_off;
+ ds4->update_lightbar_blink = false;
+ }
}
- spin_unlock_irqrestore(&ds4->base.lock, flags);
-
/* Bluetooth packets need additional flags as well as a CRC in the last 4 bytes. */
if (report.bt) {
- uint32_t crc;
- uint8_t seed = PS_OUTPUT_CRC32_SEED;
+ u32 crc;
+ u8 seed = PS_OUTPUT_CRC32_SEED;
/* Hardware control flags need to set to let the device know
* there is HID data as well as CRC.
@@ -2217,16 +2347,15 @@ static void dualshock4_output_worker(struct work_struct *work)
}
static int dualshock4_parse_report(struct ps_device *ps_dev, struct hid_report *report,
- u8 *data, int size)
+ u8 *data, int size)
{
struct hid_device *hdev = ps_dev->hdev;
struct dualshock4 *ds4 = container_of(ps_dev, struct dualshock4, base);
struct dualshock4_input_report_common *ds4_report;
struct dualshock4_touch_report *touch_reports;
- uint8_t battery_capacity, num_touch_reports, value;
+ u8 battery_capacity, num_touch_reports, value;
int battery_status, i, j;
- uint16_t sensor_timestamp;
- unsigned long flags;
+ u16 sensor_timestamp;
bool is_minimal = false;
/*
@@ -2235,16 +2364,17 @@ static int dualshock4_parse_report(struct ps_device *ps_dev, struct hid_report *
* the full report using reportID 17.
*/
if (hdev->bus == BUS_USB && report->id == DS4_INPUT_REPORT_USB &&
- size == DS4_INPUT_REPORT_USB_SIZE) {
- struct dualshock4_input_report_usb *usb = (struct dualshock4_input_report_usb *)data;
+ size == DS4_INPUT_REPORT_USB_SIZE) {
+ struct dualshock4_input_report_usb *usb =
+ (struct dualshock4_input_report_usb *)data;
ds4_report = &usb->common;
num_touch_reports = usb->num_touch_reports;
touch_reports = usb->touch_reports;
} else if (hdev->bus == BUS_BLUETOOTH && report->id == DS4_INPUT_REPORT_BT &&
- size == DS4_INPUT_REPORT_BT_SIZE) {
+ size == DS4_INPUT_REPORT_BT_SIZE) {
struct dualshock4_input_report_bt *bt = (struct dualshock4_input_report_bt *)data;
- uint32_t report_crc = get_unaligned_le32(&bt->crc32);
+ u32 report_crc = get_unaligned_le32(&bt->crc32);
/* Last 4 bytes of input report contains CRC. */
if (!ps_check_crc32(PS_INPUT_CRC32_SEED, data, size - 4, report_crc)) {
@@ -2325,16 +2455,16 @@ static int dualshock4_parse_report(struct ps_device *ps_dev, struct hid_report *
/* Convert timestamp (in 5.33us unit) to timestamp_us */
sensor_timestamp = le16_to_cpu(ds4_report->sensor_timestamp);
if (!ds4->sensor_timestamp_initialized) {
- ds4->sensor_timestamp_us = DIV_ROUND_CLOSEST(sensor_timestamp*16, 3);
+ ds4->sensor_timestamp_us = DIV_ROUND_CLOSEST(sensor_timestamp * 16, 3);
ds4->sensor_timestamp_initialized = true;
} else {
- uint16_t delta;
+ u16 delta;
if (ds4->prev_sensor_timestamp > sensor_timestamp)
delta = (U16_MAX - ds4->prev_sensor_timestamp + sensor_timestamp + 1);
else
delta = sensor_timestamp - ds4->prev_sensor_timestamp;
- ds4->sensor_timestamp_us += DIV_ROUND_CLOSEST(delta*16, 3);
+ ds4->sensor_timestamp_us += DIV_ROUND_CLOSEST(delta * 16, 3);
}
ds4->prev_sensor_timestamp = sensor_timestamp;
input_event(ds4->sensors, EV_MSC, MSC_TIMESTAMP, ds4->sensor_timestamp_us);
@@ -2351,11 +2481,10 @@ static int dualshock4_parse_report(struct ps_device *ps_dev, struct hid_report *
input_mt_report_slot_state(ds4->touchpad, MT_TOOL_FINGER, active);
if (active) {
- int x = (point->x_hi << 8) | point->x_lo;
- int y = (point->y_hi << 4) | point->y_lo;
-
- input_report_abs(ds4->touchpad, ABS_MT_POSITION_X, x);
- input_report_abs(ds4->touchpad, ABS_MT_POSITION_Y, y);
+ input_report_abs(ds4->touchpad, ABS_MT_POSITION_X,
+ DS4_TOUCH_POINT_X(point->x_hi, point->x_lo));
+ input_report_abs(ds4->touchpad, ABS_MT_POSITION_Y,
+ DS4_TOUCH_POINT_Y(point->y_hi, point->y_lo));
}
}
input_mt_sync_frame(ds4->touchpad);
@@ -2374,7 +2503,7 @@ static int dualshock4_parse_report(struct ps_device *ps_dev, struct hid_report *
* - 15: charge error
*/
if (ds4_report->status[0] & DS4_STATUS0_CABLE_STATE) {
- uint8_t battery_data = ds4_report->status[0] & DS4_STATUS0_BATTERY_CAPACITY;
+ u8 battery_data = ds4_report->status[0] & DS4_STATUS0_BATTERY_CAPACITY;
if (battery_data < 10) {
/* Take the mid-point for each battery capacity value,
@@ -2395,7 +2524,7 @@ static int dualshock4_parse_report(struct ps_device *ps_dev, struct hid_report *
battery_status = POWER_SUPPLY_STATUS_UNKNOWN;
}
} else {
- uint8_t battery_data = ds4_report->status[0] & DS4_STATUS0_BATTERY_CAPACITY;
+ u8 battery_data = ds4_report->status[0] & DS4_STATUS0_BATTERY_CAPACITY;
if (battery_data < 10)
battery_capacity = battery_data * 10 + 5;
@@ -2405,16 +2534,16 @@ static int dualshock4_parse_report(struct ps_device *ps_dev, struct hid_report *
battery_status = POWER_SUPPLY_STATUS_DISCHARGING;
}
- spin_lock_irqsave(&ps_dev->lock, flags);
- ps_dev->battery_capacity = battery_capacity;
- ps_dev->battery_status = battery_status;
- spin_unlock_irqrestore(&ps_dev->lock, flags);
+ scoped_guard(spinlock_irqsave, &ps_dev->lock) {
+ ps_dev->battery_capacity = battery_capacity;
+ ps_dev->battery_status = battery_status;
+ }
return 0;
}
static int dualshock4_dongle_parse_report(struct ps_device *ps_dev, struct hid_report *report,
- u8 *data, int size)
+ u8 *data, int size)
{
struct dualshock4 *ds4 = container_of(ps_dev, struct dualshock4, base);
bool connected = false;
@@ -2425,8 +2554,8 @@ static int dualshock4_dongle_parse_report(struct ps_device *ps_dev, struct hid_r
* parsing code.
*/
if (data[0] == DS4_INPUT_REPORT_USB && size == DS4_INPUT_REPORT_USB_SIZE) {
- struct dualshock4_input_report_common *ds4_report = (struct dualshock4_input_report_common *)&data[1];
- unsigned long flags;
+ struct dualshock4_input_report_common *ds4_report =
+ (struct dualshock4_input_report_common *)&data[1];
connected = ds4_report->status[1] & DS4_STATUS1_DONGLE_STATE ? false : true;
@@ -2435,9 +2564,8 @@ static int dualshock4_dongle_parse_report(struct ps_device *ps_dev, struct hid_r
dualshock4_set_default_lightbar_colors(ds4);
- spin_lock_irqsave(&ps_dev->lock, flags);
- ds4->dongle_state = DONGLE_CALIBRATING;
- spin_unlock_irqrestore(&ps_dev->lock, flags);
+ scoped_guard(spinlock_irqsave, &ps_dev->lock)
+ ds4->dongle_state = DONGLE_CALIBRATING;
schedule_work(&ds4->dongle_hotplug_worker);
@@ -2449,9 +2577,8 @@ static int dualshock4_dongle_parse_report(struct ps_device *ps_dev, struct hid_r
ds4->dongle_state == DONGLE_DISABLED) && !connected) {
hid_info(ps_dev->hdev, "DualShock 4 USB dongle: controller disconnected\n");
- spin_lock_irqsave(&ps_dev->lock, flags);
- ds4->dongle_state = DONGLE_DISCONNECTED;
- spin_unlock_irqrestore(&ps_dev->lock, flags);
+ scoped_guard(spinlock_irqsave, &ps_dev->lock)
+ ds4->dongle_state = DONGLE_DISCONNECTED;
/* Return 0, so hidraw can get the report. */
return 0;
@@ -2473,16 +2600,15 @@ static int dualshock4_play_effect(struct input_dev *dev, void *data, struct ff_e
{
struct hid_device *hdev = input_get_drvdata(dev);
struct dualshock4 *ds4 = hid_get_drvdata(hdev);
- unsigned long flags;
if (effect->type != FF_RUMBLE)
return 0;
- spin_lock_irqsave(&ds4->base.lock, flags);
- ds4->update_rumble = true;
- ds4->motor_left = effect->u.rumble.strong_magnitude / 256;
- ds4->motor_right = effect->u.rumble.weak_magnitude / 256;
- spin_unlock_irqrestore(&ds4->base.lock, flags);
+ scoped_guard(spinlock_irqsave, &ds4->base.lock) {
+ ds4->update_rumble = true;
+ ds4->motor_left = effect->u.rumble.strong_magnitude / 256;
+ ds4->motor_right = effect->u.rumble.weak_magnitude / 256;
+ }
dualshock4_schedule_work(ds4);
return 0;
@@ -2491,11 +2617,9 @@ static int dualshock4_play_effect(struct input_dev *dev, void *data, struct ff_e
static void dualshock4_remove(struct ps_device *ps_dev)
{
struct dualshock4 *ds4 = container_of(ps_dev, struct dualshock4, base);
- unsigned long flags;
- spin_lock_irqsave(&ds4->base.lock, flags);
- ds4->output_worker_initialized = false;
- spin_unlock_irqrestore(&ds4->base.lock, flags);
+ scoped_guard(spinlock_irqsave, &ds4->base.lock)
+ ds4->output_worker_initialized = false;
cancel_work_sync(&ds4->output_worker);
@@ -2505,15 +2629,13 @@ static void dualshock4_remove(struct ps_device *ps_dev)
static inline void dualshock4_schedule_work(struct dualshock4 *ds4)
{
- unsigned long flags;
-
- spin_lock_irqsave(&ds4->base.lock, flags);
- if (ds4->output_worker_initialized)
- schedule_work(&ds4->output_worker);
- spin_unlock_irqrestore(&ds4->base.lock, flags);
+ /* Using scoped_guard() instead of guard() to make sparse happy */
+ scoped_guard(spinlock_irqsave, &ds4->base.lock)
+ if (ds4->output_worker_initialized)
+ schedule_work(&ds4->output_worker);
}
-static void dualshock4_set_bt_poll_interval(struct dualshock4 *ds4, uint8_t interval)
+static void dualshock4_set_bt_poll_interval(struct dualshock4 *ds4, u8 interval)
{
ds4->bt_poll_interval = interval;
ds4->update_bt_poll_interval = true;
@@ -2533,7 +2655,7 @@ static void dualshock4_set_default_lightbar_colors(struct dualshock4 *ds4)
{ 0x20, 0x00, 0x20 } /* Pink */
};
- uint8_t player_id = ds4->base.player_id % ARRAY_SIZE(player_colors);
+ u8 player_id = ds4->base.player_id % ARRAY_SIZE(player_colors);
ds4->lightbar_enabled = true;
ds4->lightbar_red = player_colors[player_id][0];
@@ -2548,7 +2670,7 @@ static struct ps_device *dualshock4_create(struct hid_device *hdev)
{
struct dualshock4 *ds4;
struct ps_device *ps_dev;
- uint8_t max_output_report_size;
+ u8 max_output_report_size;
int i, ret;
/* The DualShock4 has an RGB lightbar, which the original hid-sony driver
@@ -2561,11 +2683,14 @@ static struct ps_device *dualshock4_create(struct hid_device *hdev)
* existing applications (e.g. Android). Nothing matches against MAC address.
*/
static const struct ps_led_info lightbar_leds_info[] = {
- { NULL, "red", 255, dualshock4_led_get_brightness, dualshock4_led_set_brightness },
- { NULL, "green", 255, dualshock4_led_get_brightness, dualshock4_led_set_brightness },
- { NULL, "blue", 255, dualshock4_led_get_brightness, dualshock4_led_set_brightness },
- { NULL, "global", 1, dualshock4_led_get_brightness, dualshock4_led_set_brightness,
- dualshock4_led_set_blink },
+ { NULL, "red", 255, dualshock4_led_get_brightness,
+ dualshock4_led_set_brightness },
+ { NULL, "green", 255, dualshock4_led_get_brightness,
+ dualshock4_led_set_brightness },
+ { NULL, "blue", 255, dualshock4_led_get_brightness,
+ dualshock4_led_set_brightness },
+ { NULL, "global", 1, dualshock4_led_get_brightness,
+ dualshock4_led_set_brightness, dualshock4_led_set_blink },
};
ds4 = devm_kzalloc(&hdev->dev, sizeof(*ds4), GFP_KERNEL);
@@ -2635,7 +2760,7 @@ static struct ps_device *dualshock4_create(struct hid_device *hdev)
ps_dev->input_dev_name = dev_name(&ds4->gamepad->dev);
ds4->sensors = ps_sensors_create(hdev, DS4_ACC_RANGE, DS4_ACC_RES_PER_G,
- DS4_GYRO_RANGE, DS4_GYRO_RES_PER_DEG_S);
+ DS4_GYRO_RANGE, DS4_GYRO_RES_PER_DEG_S);
if (IS_ERR(ds4->sensors)) {
ret = PTR_ERR(ds4->sensors);
goto err;
@@ -2674,7 +2799,7 @@ static struct ps_device *dualshock4_create(struct hid_device *hdev)
* can change behavior.
*/
hid_info(hdev, "Registered DualShock4 controller hw_version=0x%08x fw_version=0x%08x\n",
- ds4->base.hw_version, ds4->base.fw_version);
+ ds4->base.hw_version, ds4->base.fw_version);
return &ds4->base;
err:
@@ -2683,7 +2808,7 @@ err:
}
static int ps_raw_event(struct hid_device *hdev, struct hid_report *report,
- u8 *data, int size)
+ u8 *data, int size)
{
struct ps_device *dev = hid_get_drvdata(hdev);
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index ff11f1ad344d..ffd034566e2e 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -124,6 +124,8 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_V2), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_PENSKETCH_T609A), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_ODDOR_HANDBRAKE), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_LEGION_GO_DUAL_DINPUT), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_LEGION_GO2_DUAL_DINPUT), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_OPTICAL_USB_MOUSE_600E), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6019), HID_QUIRK_ALWAYS_POLL },
@@ -411,6 +413,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XT4DRBK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_DT1URBK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_DT1DRBK) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_DT2DRBK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1URBK_010C) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1URBK_019B) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1DRBK_010D) },
@@ -692,6 +695,8 @@ static const struct hid_device_id hid_have_special_driver[] = {
#endif
#if IS_ENABLED(CONFIG_HID_STEELSERIES)
{ HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_SRWS1) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_ARCTIS_1) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_ARCTIS_9) },
#endif
#if IS_ENABLED(CONFIG_HID_SUNPLUS)
{ HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) },
diff --git a/drivers/hid/hid-steelseries.c b/drivers/hid/hid-steelseries.c
index d4bd7848b8c6..f98435631aa1 100644
--- a/drivers/hid/hid-steelseries.c
+++ b/drivers/hid/hid-steelseries.c
@@ -249,11 +249,11 @@ static int steelseries_srws1_probe(struct hid_device *hdev,
{
int ret, i;
struct led_classdev *led;
+ struct steelseries_srws1_data *drv_data;
size_t name_sz;
char *name;
- struct steelseries_srws1_data *drv_data = kzalloc(sizeof(*drv_data), GFP_KERNEL);
-
+ drv_data = devm_kzalloc(&hdev->dev, sizeof(*drv_data), GFP_KERNEL);
if (drv_data == NULL) {
hid_err(hdev, "can't alloc SRW-S1 memory\n");
return -ENOMEM;
@@ -264,18 +264,18 @@ static int steelseries_srws1_probe(struct hid_device *hdev,
ret = hid_parse(hdev);
if (ret) {
hid_err(hdev, "parse failed\n");
- goto err_free;
+ goto err;
}
if (!hid_validate_values(hdev, HID_OUTPUT_REPORT, 0, 0, 16)) {
ret = -ENODEV;
- goto err_free;
+ goto err;
}
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
if (ret) {
hid_err(hdev, "hw start failed\n");
- goto err_free;
+ goto err;
}
/* register led subsystem */
@@ -288,10 +288,10 @@ static int steelseries_srws1_probe(struct hid_device *hdev,
name_sz = strlen(hdev->uniq) + 16;
/* 'ALL', for setting all LEDs simultaneously */
- led = kzalloc(sizeof(struct led_classdev)+name_sz, GFP_KERNEL);
+ led = devm_kzalloc(&hdev->dev, sizeof(struct led_classdev)+name_sz, GFP_KERNEL);
if (!led) {
hid_err(hdev, "can't allocate memory for LED ALL\n");
- goto err_led;
+ goto out;
}
name = (void *)(&led[1]);
@@ -303,16 +303,18 @@ static int steelseries_srws1_probe(struct hid_device *hdev,
led->brightness_set = steelseries_srws1_led_all_set_brightness;
drv_data->led[SRWS1_NUMBER_LEDS] = led;
- ret = led_classdev_register(&hdev->dev, led);
- if (ret)
- goto err_led;
+ ret = devm_led_classdev_register(&hdev->dev, led);
+ if (ret) {
+ hid_err(hdev, "failed to register LED %d. Aborting.\n", SRWS1_NUMBER_LEDS);
+ goto out; /* let the driver continue without LEDs */
+ }
/* Each individual LED */
for (i = 0; i < SRWS1_NUMBER_LEDS; i++) {
- led = kzalloc(sizeof(struct led_classdev)+name_sz, GFP_KERNEL);
+ led = devm_kzalloc(&hdev->dev, sizeof(struct led_classdev)+name_sz, GFP_KERNEL);
if (!led) {
hid_err(hdev, "can't allocate memory for LED %d\n", i);
- goto err_led;
+ break;
}
name = (void *)(&led[1]);
@@ -324,53 +326,18 @@ static int steelseries_srws1_probe(struct hid_device *hdev,
led->brightness_set = steelseries_srws1_led_set_brightness;
drv_data->led[i] = led;
- ret = led_classdev_register(&hdev->dev, led);
+ ret = devm_led_classdev_register(&hdev->dev, led);
if (ret) {
hid_err(hdev, "failed to register LED %d. Aborting.\n", i);
-err_led:
- /* Deregister all LEDs (if any) */
- for (i = 0; i < SRWS1_NUMBER_LEDS + 1; i++) {
- led = drv_data->led[i];
- drv_data->led[i] = NULL;
- if (!led)
- continue;
- led_classdev_unregister(led);
- kfree(led);
- }
- goto out; /* but let the driver continue without LEDs */
+ break; /* but let the driver continue without LEDs */
}
}
out:
return 0;
-err_free:
- kfree(drv_data);
+err:
return ret;
}
-
-static void steelseries_srws1_remove(struct hid_device *hdev)
-{
- int i;
- struct led_classdev *led;
-
- struct steelseries_srws1_data *drv_data = hid_get_drvdata(hdev);
-
- if (drv_data) {
- /* Deregister LEDs (if any) */
- for (i = 0; i < SRWS1_NUMBER_LEDS + 1; i++) {
- led = drv_data->led[i];
- drv_data->led[i] = NULL;
- if (!led)
- continue;
- led_classdev_unregister(led);
- kfree(led);
- }
-
- }
-
- hid_hw_stop(hdev);
- kfree(drv_data);
-}
#endif
#define STEELSERIES_HEADSET_BATTERY_TIMEOUT_MS 3000
@@ -405,13 +372,12 @@ static int steelseries_headset_request_battery(struct hid_device *hdev,
static void steelseries_headset_fetch_battery(struct hid_device *hdev)
{
- struct steelseries_device *sd = hid_get_drvdata(hdev);
int ret = 0;
- if (sd->quirks & STEELSERIES_ARCTIS_1)
+ if (hdev->product == USB_DEVICE_ID_STEELSERIES_ARCTIS_1)
ret = steelseries_headset_request_battery(hdev,
arctis_1_battery_request, sizeof(arctis_1_battery_request));
- else if (sd->quirks & STEELSERIES_ARCTIS_9)
+ else if (hdev->product == USB_DEVICE_ID_STEELSERIES_ARCTIS_9)
ret = steelseries_headset_request_battery(hdev,
arctis_9_battery_request, sizeof(arctis_9_battery_request));
@@ -567,14 +533,7 @@ static int steelseries_probe(struct hid_device *hdev, const struct hid_device_id
struct steelseries_device *sd;
int ret;
- sd = devm_kzalloc(&hdev->dev, sizeof(*sd), GFP_KERNEL);
- if (!sd)
- return -ENOMEM;
- hid_set_drvdata(hdev, sd);
- sd->hdev = hdev;
- sd->quirks = id->driver_data;
-
- if (sd->quirks & STEELSERIES_SRWS1) {
+ if (hdev->product == USB_DEVICE_ID_STEELSERIES_SRWS1) {
#if IS_BUILTIN(CONFIG_LEDS_CLASS) || \
(IS_MODULE(CONFIG_LEDS_CLASS) && IS_MODULE(CONFIG_HID_STEELSERIES))
return steelseries_srws1_probe(hdev, id);
@@ -583,6 +542,13 @@ static int steelseries_probe(struct hid_device *hdev, const struct hid_device_id
#endif
}
+ sd = devm_kzalloc(&hdev->dev, sizeof(*sd), GFP_KERNEL);
+ if (!sd)
+ return -ENOMEM;
+ hid_set_drvdata(hdev, sd);
+ sd->hdev = hdev;
+ sd->quirks = id->driver_data;
+
ret = hid_parse(hdev);
if (ret)
return ret;
@@ -610,17 +576,19 @@ static int steelseries_probe(struct hid_device *hdev, const struct hid_device_id
static void steelseries_remove(struct hid_device *hdev)
{
- struct steelseries_device *sd = hid_get_drvdata(hdev);
+ struct steelseries_device *sd;
unsigned long flags;
- if (sd->quirks & STEELSERIES_SRWS1) {
+ if (hdev->product == USB_DEVICE_ID_STEELSERIES_SRWS1) {
#if IS_BUILTIN(CONFIG_LEDS_CLASS) || \
(IS_MODULE(CONFIG_LEDS_CLASS) && IS_MODULE(CONFIG_HID_STEELSERIES))
- steelseries_srws1_remove(hdev);
+ hid_hw_stop(hdev);
#endif
return;
}
+ sd = hid_get_drvdata(hdev);
+
spin_lock_irqsave(&sd->lock, flags);
sd->removed = true;
spin_unlock_irqrestore(&sd->lock, flags);
@@ -667,10 +635,10 @@ static int steelseries_headset_raw_event(struct hid_device *hdev,
unsigned long flags;
/* Not a headset */
- if (sd->quirks & STEELSERIES_SRWS1)
+ if (hdev->product == USB_DEVICE_ID_STEELSERIES_SRWS1)
return 0;
- if (sd->quirks & STEELSERIES_ARCTIS_1) {
+ if (hdev->product == USB_DEVICE_ID_STEELSERIES_ARCTIS_1) {
hid_dbg(sd->hdev,
"Parsing raw event for Arctis 1 headset (%*ph)\n", size, read_buf);
if (size < ARCTIS_1_BATTERY_RESPONSE_LEN ||
@@ -688,7 +656,7 @@ static int steelseries_headset_raw_event(struct hid_device *hdev,
}
}
- if (sd->quirks & STEELSERIES_ARCTIS_9) {
+ if (hdev->product == USB_DEVICE_ID_STEELSERIES_ARCTIS_9) {
hid_dbg(sd->hdev,
"Parsing raw event for Arctis 9 headset (%*ph)\n", size, read_buf);
if (size < ARCTIS_9_BATTERY_RESPONSE_LEN) {
@@ -757,11 +725,11 @@ static const struct hid_device_id steelseries_devices[] = {
.driver_data = STEELSERIES_SRWS1 },
{ /* SteelSeries Arctis 1 Wireless for XBox */
- HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, 0x12b6),
- .driver_data = STEELSERIES_ARCTIS_1 },
+ HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_ARCTIS_1),
+ .driver_data = STEELSERIES_ARCTIS_1 },
{ /* SteelSeries Arctis 9 Wireless for XBox */
- HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, 0x12c2),
+ HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_ARCTIS_9),
.driver_data = STEELSERIES_ARCTIS_9 },
{ }
diff --git a/drivers/hid/hid-uclogic-params.c b/drivers/hid/hid-uclogic-params.c
index 4a17f7332c3f..ffa14a4621ef 100644
--- a/drivers/hid/hid-uclogic-params.c
+++ b/drivers/hid/hid-uclogic-params.c
@@ -20,6 +20,7 @@
#include <linux/ctype.h>
#include <linux/string.h>
#include <linux/unaligned.h>
+#include <linux/string_choices.h>
/**
* uclogic_params_pen_inrange_to_str() - Convert a pen in-range reporting type
@@ -59,7 +60,7 @@ static void uclogic_params_pen_hid_dbg(const struct hid_device *hdev,
size_t i;
hid_dbg(hdev, "\t.usage_invalid = %s\n",
- (pen->usage_invalid ? "true" : "false"));
+ str_true_false(pen->usage_invalid));
hid_dbg(hdev, "\t.desc_ptr = %p\n", pen->desc_ptr);
hid_dbg(hdev, "\t.desc_size = %u\n", pen->desc_size);
hid_dbg(hdev, "\t.id = %u\n", pen->id);
@@ -74,9 +75,9 @@ static void uclogic_params_pen_hid_dbg(const struct hid_device *hdev,
hid_dbg(hdev, "\t.inrange = %s\n",
uclogic_params_pen_inrange_to_str(pen->inrange));
hid_dbg(hdev, "\t.fragmented_hires = %s\n",
- (pen->fragmented_hires ? "true" : "false"));
+ str_true_false(pen->fragmented_hires));
hid_dbg(hdev, "\t.tilt_y_flipped = %s\n",
- (pen->tilt_y_flipped ? "true" : "false"));
+ str_true_false(pen->tilt_y_flipped));
}
/**
@@ -119,8 +120,7 @@ void uclogic_params_hid_dbg(const struct hid_device *hdev,
{
size_t i;
- hid_dbg(hdev, ".invalid = %s\n",
- params->invalid ? "true" : "false");
+ hid_dbg(hdev, ".invalid = %s\n", str_true_false(params->invalid));
hid_dbg(hdev, ".desc_ptr = %p\n", params->desc_ptr);
hid_dbg(hdev, ".desc_size = %u\n", params->desc_size);
hid_dbg(hdev, ".pen = {\n");
diff --git a/drivers/hid/hid-universal-pidff.c b/drivers/hid/hid-universal-pidff.c
index 554a6559aeb7..549dac555d40 100644
--- a/drivers/hid/hid-universal-pidff.c
+++ b/drivers/hid/hid-universal-pidff.c
@@ -8,12 +8,12 @@
* Copyright (c) 2024, 2025 Tomasz Pakuła
*/
+#include "hid-ids.h"
+#include "usbhid/hid-pidff.h"
#include <linux/device.h>
#include <linux/hid.h>
-#include <linux/module.h>
#include <linux/input-event-codes.h>
-#include "hid-ids.h"
-#include "usbhid/hid-pidff.h"
+#include <linux/module.h>
#define JOY_RANGE (BTN_DEAD - BTN_JOYSTICK + 1)
@@ -21,8 +21,10 @@
* Map buttons manually to extend the default joystick button limit
*/
static int universal_pidff_input_mapping(struct hid_device *hdev,
- struct hid_input *hi, struct hid_field *field, struct hid_usage *usage,
- unsigned long **bit, int *max)
+ struct hid_input *hi,
+ struct hid_field *field,
+ struct hid_usage *usage,
+ unsigned long **bit, int *max)
{
if ((usage->hid & HID_USAGE_PAGE) != HID_UP_BUTTON)
return 0;
@@ -126,65 +128,64 @@ static int universal_pidff_input_configured(struct hid_device *hdev,
if (!test_bit(axis, input->absbit))
continue;
- input_set_abs_params(input, axis,
- input->absinfo[axis].minimum,
- input->absinfo[axis].maximum,
- axis == ABS_X ? 0 : 8, 0);
+ input_set_abs_params(input, axis, input->absinfo[axis].minimum,
+ input->absinfo[axis].maximum,
+ axis == ABS_X ? 0 : 8, 0);
}
/* Remove fuzz and deadzone from the second joystick axis */
if (hdev->vendor == USB_VENDOR_ID_FFBEAST &&
hdev->product == USB_DEVICE_ID_FFBEAST_JOYSTICK)
input_set_abs_params(input, ABS_Y,
- input->absinfo[ABS_Y].minimum,
- input->absinfo[ABS_Y].maximum, 0, 0);
+ input->absinfo[ABS_Y].minimum,
+ input->absinfo[ABS_Y].maximum, 0, 0);
return 0;
}
static const struct hid_device_id universal_pidff_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R3),
- .driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION },
+ .driver_data = HID_PIDFF_QUIRK_FIX_CONDITIONAL_DIRECTION },
{ HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R3_2),
- .driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION },
+ .driver_data = HID_PIDFF_QUIRK_FIX_CONDITIONAL_DIRECTION },
{ HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R5),
- .driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION },
+ .driver_data = HID_PIDFF_QUIRK_FIX_CONDITIONAL_DIRECTION },
{ HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R5_2),
- .driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION },
+ .driver_data = HID_PIDFF_QUIRK_FIX_CONDITIONAL_DIRECTION },
{ HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R9),
- .driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION },
+ .driver_data = HID_PIDFF_QUIRK_FIX_CONDITIONAL_DIRECTION },
{ HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R9_2),
- .driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION },
+ .driver_data = HID_PIDFF_QUIRK_FIX_CONDITIONAL_DIRECTION },
{ HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R12),
- .driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION },
+ .driver_data = HID_PIDFF_QUIRK_FIX_CONDITIONAL_DIRECTION },
{ HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R12_2),
- .driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION },
+ .driver_data = HID_PIDFF_QUIRK_FIX_CONDITIONAL_DIRECTION },
{ HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R16_R21),
- .driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION },
+ .driver_data = HID_PIDFF_QUIRK_FIX_CONDITIONAL_DIRECTION },
{ HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R16_R21_2),
- .driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION },
+ .driver_data = HID_PIDFF_QUIRK_FIX_CONDITIONAL_DIRECTION },
{ HID_USB_DEVICE(USB_VENDOR_ID_CAMMUS, USB_DEVICE_ID_CAMMUS_C5) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CAMMUS, USB_DEVICE_ID_CAMMUS_C12) },
{ HID_USB_DEVICE(USB_VENDOR_ID_VRS, USB_DEVICE_ID_VRS_DFP),
- .driver_data = HID_PIDFF_QUIRK_PERMISSIVE_CONTROL },
+ .driver_data = HID_PIDFF_QUIRK_PERMISSIVE_CONTROL },
{ HID_USB_DEVICE(USB_VENDOR_ID_FFBEAST, USB_DEVICE_ID_FFBEAST_JOYSTICK), },
{ HID_USB_DEVICE(USB_VENDOR_ID_FFBEAST, USB_DEVICE_ID_FFBEAST_RUDDER), },
{ HID_USB_DEVICE(USB_VENDOR_ID_FFBEAST, USB_DEVICE_ID_FFBEAST_WHEEL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LITE_STAR, USB_DEVICE_ID_PXN_V10),
- .driver_data = HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY },
+ .driver_data = HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY },
{ HID_USB_DEVICE(USB_VENDOR_ID_LITE_STAR, USB_DEVICE_ID_PXN_V12),
- .driver_data = HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY },
+ .driver_data = HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY },
{ HID_USB_DEVICE(USB_VENDOR_ID_LITE_STAR, USB_DEVICE_ID_PXN_V12_LITE),
- .driver_data = HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY },
+ .driver_data = HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY },
{ HID_USB_DEVICE(USB_VENDOR_ID_LITE_STAR, USB_DEVICE_ID_PXN_V12_LITE_2),
- .driver_data = HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY },
+ .driver_data = HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY },
{ HID_USB_DEVICE(USB_VENDOR_ID_LITE_STAR, USB_DEVICE_ID_LITE_STAR_GT987),
- .driver_data = HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY },
+ .driver_data = HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASETEK, USB_DEVICE_ID_ASETEK_INVICTA) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASETEK, USB_DEVICE_ID_ASETEK_FORTE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASETEK, USB_DEVICE_ID_ASETEK_LA_PRIMA) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASETEK, USB_DEVICE_ID_ASETEK_TONY_KANAAN) },
- { }
+ {}
};
MODULE_DEVICE_TABLE(hid, universal_pidff_devices);
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index c887f48756f4..bbd6f23bce78 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -394,27 +394,15 @@ static int hidraw_revoke(struct hidraw_list *list)
return 0;
}
-static long hidraw_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
+static long hidraw_fixed_size_ioctl(struct file *file, struct hidraw *dev, unsigned int cmd,
+ void __user *arg)
{
- struct inode *inode = file_inode(file);
- unsigned int minor = iminor(inode);
- long ret = 0;
- struct hidraw *dev;
- struct hidraw_list *list = file->private_data;
- void __user *user_arg = (void __user*) arg;
-
- down_read(&minors_rwsem);
- dev = hidraw_table[minor];
- if (!dev || !dev->exist || hidraw_is_revoked(list)) {
- ret = -ENODEV;
- goto out;
- }
+ struct hid_device *hid = dev->hid;
switch (cmd) {
case HIDIOCGRDESCSIZE:
- if (put_user(dev->hid->rsize, (int __user *)arg))
- ret = -EFAULT;
+ if (put_user(hid->rsize, (int __user *)arg))
+ return -EFAULT;
break;
case HIDIOCGRDESC:
@@ -422,113 +410,145 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd,
__u32 len;
if (get_user(len, (int __user *)arg))
- ret = -EFAULT;
- else if (len > HID_MAX_DESCRIPTOR_SIZE - 1)
- ret = -EINVAL;
- else if (copy_to_user(user_arg + offsetof(
- struct hidraw_report_descriptor,
- value[0]),
- dev->hid->rdesc,
- min(dev->hid->rsize, len)))
- ret = -EFAULT;
+ return -EFAULT;
+
+ if (len > HID_MAX_DESCRIPTOR_SIZE - 1)
+ return -EINVAL;
+
+ if (copy_to_user(arg + offsetof(
+ struct hidraw_report_descriptor,
+ value[0]),
+ hid->rdesc,
+ min(hid->rsize, len)))
+ return -EFAULT;
+
break;
}
case HIDIOCGRAWINFO:
{
struct hidraw_devinfo dinfo;
- dinfo.bustype = dev->hid->bus;
- dinfo.vendor = dev->hid->vendor;
- dinfo.product = dev->hid->product;
- if (copy_to_user(user_arg, &dinfo, sizeof(dinfo)))
- ret = -EFAULT;
+ dinfo.bustype = hid->bus;
+ dinfo.vendor = hid->vendor;
+ dinfo.product = hid->product;
+ if (copy_to_user(arg, &dinfo, sizeof(dinfo)))
+ return -EFAULT;
break;
}
case HIDIOCREVOKE:
{
- if (user_arg)
- ret = -EINVAL;
- else
- ret = hidraw_revoke(list);
- break;
+ struct hidraw_list *list = file->private_data;
+
+ if (arg)
+ return -EINVAL;
+
+ return hidraw_revoke(list);
}
default:
- {
- struct hid_device *hid = dev->hid;
- if (_IOC_TYPE(cmd) != 'H') {
- ret = -EINVAL;
- break;
- }
+ /*
+ * None of the above ioctls can return -EAGAIN, so
+ * use it as a marker that we need to check variable
+ * length ioctls.
+ */
+ return -EAGAIN;
+ }
- if (_IOC_NR(cmd) == _IOC_NR(HIDIOCSFEATURE(0))) {
- int len = _IOC_SIZE(cmd);
- ret = hidraw_send_report(file, user_arg, len, HID_FEATURE_REPORT);
- break;
- }
- if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGFEATURE(0))) {
- int len = _IOC_SIZE(cmd);
- ret = hidraw_get_report(file, user_arg, len, HID_FEATURE_REPORT);
- break;
- }
+ return 0;
+}
- if (_IOC_NR(cmd) == _IOC_NR(HIDIOCSINPUT(0))) {
- int len = _IOC_SIZE(cmd);
- ret = hidraw_send_report(file, user_arg, len, HID_INPUT_REPORT);
- break;
- }
- if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGINPUT(0))) {
- int len = _IOC_SIZE(cmd);
- ret = hidraw_get_report(file, user_arg, len, HID_INPUT_REPORT);
- break;
- }
+static long hidraw_rw_variable_size_ioctl(struct file *file, struct hidraw *dev, unsigned int cmd,
+ void __user *user_arg)
+{
+ int len = _IOC_SIZE(cmd);
+
+ switch (cmd & ~IOCSIZE_MASK) {
+ case HIDIOCSFEATURE(0):
+ return hidraw_send_report(file, user_arg, len, HID_FEATURE_REPORT);
+ case HIDIOCGFEATURE(0):
+ return hidraw_get_report(file, user_arg, len, HID_FEATURE_REPORT);
+ case HIDIOCSINPUT(0):
+ return hidraw_send_report(file, user_arg, len, HID_INPUT_REPORT);
+ case HIDIOCGINPUT(0):
+ return hidraw_get_report(file, user_arg, len, HID_INPUT_REPORT);
+ case HIDIOCSOUTPUT(0):
+ return hidraw_send_report(file, user_arg, len, HID_OUTPUT_REPORT);
+ case HIDIOCGOUTPUT(0):
+ return hidraw_get_report(file, user_arg, len, HID_OUTPUT_REPORT);
+ }
- if (_IOC_NR(cmd) == _IOC_NR(HIDIOCSOUTPUT(0))) {
- int len = _IOC_SIZE(cmd);
- ret = hidraw_send_report(file, user_arg, len, HID_OUTPUT_REPORT);
- break;
- }
- if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGOUTPUT(0))) {
- int len = _IOC_SIZE(cmd);
- ret = hidraw_get_report(file, user_arg, len, HID_OUTPUT_REPORT);
- break;
- }
+ return -EINVAL;
+}
- /* Begin Read-only ioctls. */
- if (_IOC_DIR(cmd) != _IOC_READ) {
- ret = -EINVAL;
- break;
- }
+static long hidraw_ro_variable_size_ioctl(struct file *file, struct hidraw *dev, unsigned int cmd,
+ void __user *user_arg)
+{
+ struct hid_device *hid = dev->hid;
+ int len = _IOC_SIZE(cmd);
+ int field_len;
+
+ switch (cmd & ~IOCSIZE_MASK) {
+ case HIDIOCGRAWNAME(0):
+ field_len = strlen(hid->name) + 1;
+ if (len > field_len)
+ len = field_len;
+ return copy_to_user(user_arg, hid->name, len) ? -EFAULT : len;
+ case HIDIOCGRAWPHYS(0):
+ field_len = strlen(hid->phys) + 1;
+ if (len > field_len)
+ len = field_len;
+ return copy_to_user(user_arg, hid->phys, len) ? -EFAULT : len;
+ case HIDIOCGRAWUNIQ(0):
+ field_len = strlen(hid->uniq) + 1;
+ if (len > field_len)
+ len = field_len;
+ return copy_to_user(user_arg, hid->uniq, len) ? -EFAULT : len;
+ }
- if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGRAWNAME(0))) {
- int len = strlen(hid->name) + 1;
- if (len > _IOC_SIZE(cmd))
- len = _IOC_SIZE(cmd);
- ret = copy_to_user(user_arg, hid->name, len) ?
- -EFAULT : len;
- break;
- }
+ return -EINVAL;
+}
- if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGRAWPHYS(0))) {
- int len = strlen(hid->phys) + 1;
- if (len > _IOC_SIZE(cmd))
- len = _IOC_SIZE(cmd);
- ret = copy_to_user(user_arg, hid->phys, len) ?
- -EFAULT : len;
- break;
- }
+static long hidraw_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct inode *inode = file_inode(file);
+ unsigned int minor = iminor(inode);
+ struct hidraw *dev;
+ struct hidraw_list *list = file->private_data;
+ void __user *user_arg = (void __user *)arg;
+ int ret;
- if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGRAWUNIQ(0))) {
- int len = strlen(hid->uniq) + 1;
- if (len > _IOC_SIZE(cmd))
- len = _IOC_SIZE(cmd);
- ret = copy_to_user(user_arg, hid->uniq, len) ?
- -EFAULT : len;
- break;
- }
- }
+ down_read(&minors_rwsem);
+ dev = hidraw_table[minor];
+ if (!dev || !dev->exist || hidraw_is_revoked(list)) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (_IOC_TYPE(cmd) != 'H') {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (_IOC_NR(cmd) > HIDIOCTL_LAST || _IOC_NR(cmd) == 0) {
ret = -ENOTTY;
+ goto out;
}
+
+ ret = hidraw_fixed_size_ioctl(file, dev, cmd, user_arg);
+ if (ret != -EAGAIN)
+ goto out;
+
+ switch (_IOC_DIR(cmd)) {
+ case (_IOC_READ | _IOC_WRITE):
+ ret = hidraw_rw_variable_size_ioctl(file, dev, cmd, user_arg);
+ break;
+ case _IOC_READ:
+ ret = hidraw_ro_variable_size_ioctl(file, dev, cmd, user_arg);
+ break;
+ default:
+ /* Any other IOC_DIR is wrong */
+ ret = -EINVAL;
+ }
+
out:
up_read(&minors_rwsem);
return ret;
diff --git a/drivers/hid/i2c-hid/i2c-hid-acpi.c b/drivers/hid/i2c-hid/i2c-hid-acpi.c
index 1b49243adb16..abd700a101f4 100644
--- a/drivers/hid/i2c-hid/i2c-hid-acpi.c
+++ b/drivers/hid/i2c-hid/i2c-hid-acpi.c
@@ -76,6 +76,13 @@ static int i2c_hid_acpi_get_descriptor(struct i2c_hid_acpi *ihid_acpi)
return hid_descriptor_address;
}
+static void i2c_hid_acpi_restore_sequence(struct i2chid_ops *ops)
+{
+ struct i2c_hid_acpi *ihid_acpi = container_of(ops, struct i2c_hid_acpi, ops);
+
+ i2c_hid_acpi_get_descriptor(ihid_acpi);
+}
+
static void i2c_hid_acpi_shutdown_tail(struct i2chid_ops *ops)
{
struct i2c_hid_acpi *ihid_acpi = container_of(ops, struct i2c_hid_acpi, ops);
@@ -96,6 +103,7 @@ static int i2c_hid_acpi_probe(struct i2c_client *client)
ihid_acpi->adev = ACPI_COMPANION(dev);
ihid_acpi->ops.shutdown_tail = i2c_hid_acpi_shutdown_tail;
+ ihid_acpi->ops.restore_sequence = i2c_hid_acpi_restore_sequence;
ret = i2c_hid_acpi_get_descriptor(ihid_acpi);
if (ret < 0)
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
index d3912e3f2f13..63f46a2e5788 100644
--- a/drivers/hid/i2c-hid/i2c-hid-core.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -112,9 +112,9 @@ struct i2c_hid {
struct i2chid_ops *ops;
struct drm_panel_follower panel_follower;
- struct work_struct panel_follower_prepare_work;
+ struct work_struct panel_follower_work;
bool is_panel_follower;
- bool prepare_work_finished;
+ bool panel_follower_work_finished;
};
static const struct i2c_hid_quirks {
@@ -961,6 +961,14 @@ static void i2c_hid_core_shutdown_tail(struct i2c_hid *ihid)
ihid->ops->shutdown_tail(ihid->ops);
}
+static void i2c_hid_core_restore_sequence(struct i2c_hid *ihid)
+{
+ if (!ihid->ops->restore_sequence)
+ return;
+
+ ihid->ops->restore_sequence(ihid->ops);
+}
+
static int i2c_hid_core_suspend(struct i2c_hid *ihid, bool force_poweroff)
{
struct i2c_client *client = ihid->client;
@@ -1110,10 +1118,10 @@ err_power_down:
return ret;
}
-static void ihid_core_panel_prepare_work(struct work_struct *work)
+static void ihid_core_panel_follower_work(struct work_struct *work)
{
struct i2c_hid *ihid = container_of(work, struct i2c_hid,
- panel_follower_prepare_work);
+ panel_follower_work);
struct hid_device *hid = ihid->hid;
int ret;
@@ -1130,7 +1138,7 @@ static void ihid_core_panel_prepare_work(struct work_struct *work)
if (ret)
dev_warn(&ihid->client->dev, "Power on failed: %d\n", ret);
else
- WRITE_ONCE(ihid->prepare_work_finished, true);
+ WRITE_ONCE(ihid->panel_follower_work_finished, true);
/*
* The work APIs provide a number of memory ordering guarantees
@@ -1139,12 +1147,12 @@ static void ihid_core_panel_prepare_work(struct work_struct *work)
* guarantee that a write that happened in the work is visible after
* cancel_work_sync(). We'll add a write memory barrier here to match
* with i2c_hid_core_panel_unpreparing() to ensure that our write to
- * prepare_work_finished is visible there.
+ * panel_follower_work_finished is visible there.
*/
smp_wmb();
}
-static int i2c_hid_core_panel_prepared(struct drm_panel_follower *follower)
+static int i2c_hid_core_panel_follower_resume(struct drm_panel_follower *follower)
{
struct i2c_hid *ihid = container_of(follower, struct i2c_hid, panel_follower);
@@ -1152,29 +1160,36 @@ static int i2c_hid_core_panel_prepared(struct drm_panel_follower *follower)
* Powering on a touchscreen can be a slow process. Queue the work to
* the system workqueue so we don't block the panel's power up.
*/
- WRITE_ONCE(ihid->prepare_work_finished, false);
- schedule_work(&ihid->panel_follower_prepare_work);
+ WRITE_ONCE(ihid->panel_follower_work_finished, false);
+ schedule_work(&ihid->panel_follower_work);
return 0;
}
-static int i2c_hid_core_panel_unpreparing(struct drm_panel_follower *follower)
+static int i2c_hid_core_panel_follower_suspend(struct drm_panel_follower *follower)
{
struct i2c_hid *ihid = container_of(follower, struct i2c_hid, panel_follower);
- cancel_work_sync(&ihid->panel_follower_prepare_work);
+ cancel_work_sync(&ihid->panel_follower_work);
- /* Match with ihid_core_panel_prepare_work() */
+ /* Match with ihid_core_panel_follower_work() */
smp_rmb();
- if (!READ_ONCE(ihid->prepare_work_finished))
+ if (!READ_ONCE(ihid->panel_follower_work_finished))
return 0;
return i2c_hid_core_suspend(ihid, true);
}
-static const struct drm_panel_follower_funcs i2c_hid_core_panel_follower_funcs = {
- .panel_prepared = i2c_hid_core_panel_prepared,
- .panel_unpreparing = i2c_hid_core_panel_unpreparing,
+static const struct drm_panel_follower_funcs
+ i2c_hid_core_panel_follower_prepare_funcs = {
+ .panel_prepared = i2c_hid_core_panel_follower_resume,
+ .panel_unpreparing = i2c_hid_core_panel_follower_suspend,
+};
+
+static const struct drm_panel_follower_funcs
+ i2c_hid_core_panel_follower_enable_funcs = {
+ .panel_enabled = i2c_hid_core_panel_follower_resume,
+ .panel_disabling = i2c_hid_core_panel_follower_suspend,
};
static int i2c_hid_core_register_panel_follower(struct i2c_hid *ihid)
@@ -1182,7 +1197,10 @@ static int i2c_hid_core_register_panel_follower(struct i2c_hid *ihid)
struct device *dev = &ihid->client->dev;
int ret;
- ihid->panel_follower.funcs = &i2c_hid_core_panel_follower_funcs;
+ if (ihid->hid->initial_quirks & HID_QUIRK_POWER_ON_AFTER_BACKLIGHT)
+ ihid->panel_follower.funcs = &i2c_hid_core_panel_follower_enable_funcs;
+ else
+ ihid->panel_follower.funcs = &i2c_hid_core_panel_follower_prepare_funcs;
/*
* If we're not in control of our own power up/power down then we can't
@@ -1237,7 +1255,7 @@ int i2c_hid_core_probe(struct i2c_client *client, struct i2chid_ops *ops,
init_waitqueue_head(&ihid->wait);
mutex_init(&ihid->cmd_lock);
mutex_init(&ihid->reset_lock);
- INIT_WORK(&ihid->panel_follower_prepare_work, ihid_core_panel_prepare_work);
+ INIT_WORK(&ihid->panel_follower_work, ihid_core_panel_follower_work);
/* we need to allocate the command buffer without knowing the maximum
* size of the reports. Let's use HID_MIN_BUFFER_SIZE, then we do the
@@ -1360,8 +1378,26 @@ static int i2c_hid_core_pm_resume(struct device *dev)
return i2c_hid_core_resume(ihid);
}
+static int i2c_hid_core_pm_restore(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct i2c_hid *ihid = i2c_get_clientdata(client);
+
+ if (ihid->is_panel_follower)
+ return 0;
+
+ i2c_hid_core_restore_sequence(ihid);
+
+ return i2c_hid_core_resume(ihid);
+}
+
const struct dev_pm_ops i2c_hid_core_pm = {
- SYSTEM_SLEEP_PM_OPS(i2c_hid_core_pm_suspend, i2c_hid_core_pm_resume)
+ .suspend = pm_sleep_ptr(i2c_hid_core_pm_suspend),
+ .resume = pm_sleep_ptr(i2c_hid_core_pm_resume),
+ .freeze = pm_sleep_ptr(i2c_hid_core_pm_suspend),
+ .thaw = pm_sleep_ptr(i2c_hid_core_pm_resume),
+ .poweroff = pm_sleep_ptr(i2c_hid_core_pm_suspend),
+ .restore = pm_sleep_ptr(i2c_hid_core_pm_restore),
};
EXPORT_SYMBOL_GPL(i2c_hid_core_pm);
diff --git a/drivers/hid/i2c-hid/i2c-hid-of-elan.c b/drivers/hid/i2c-hid/i2c-hid-of-elan.c
index 3fcff6daa0d3..0215f217f6d8 100644
--- a/drivers/hid/i2c-hid/i2c-hid-of-elan.c
+++ b/drivers/hid/i2c-hid/i2c-hid-of-elan.c
@@ -8,6 +8,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/gpio/consumer.h>
+#include <linux/hid.h>
#include <linux/i2c.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -23,6 +24,7 @@ struct elan_i2c_hid_chip_data {
unsigned int post_power_delay_ms;
u16 hid_descriptor_address;
const char *main_supply_name;
+ bool power_after_backlight;
};
struct i2c_hid_of_elan {
@@ -97,6 +99,7 @@ static int i2c_hid_of_elan_probe(struct i2c_client *client)
{
struct i2c_hid_of_elan *ihid_elan;
int ret;
+ u32 quirks = 0;
ihid_elan = devm_kzalloc(&client->dev, sizeof(*ihid_elan), GFP_KERNEL);
if (!ihid_elan)
@@ -131,8 +134,12 @@ static int i2c_hid_of_elan_probe(struct i2c_client *client)
}
}
+ if (ihid_elan->chip_data->power_after_backlight)
+ quirks = HID_QUIRK_POWER_ON_AFTER_BACKLIGHT;
+
ret = i2c_hid_core_probe(client, &ihid_elan->ops,
- ihid_elan->chip_data->hid_descriptor_address, 0);
+ ihid_elan->chip_data->hid_descriptor_address,
+ quirks);
if (ret)
goto err_deassert_reset;
@@ -150,6 +157,7 @@ static const struct elan_i2c_hid_chip_data elan_ekth6915_chip_data = {
.post_gpio_reset_on_delay_ms = 300,
.hid_descriptor_address = 0x0001,
.main_supply_name = "vcc33",
+ .power_after_backlight = true,
};
static const struct elan_i2c_hid_chip_data elan_ekth6a12nay_chip_data = {
@@ -157,6 +165,7 @@ static const struct elan_i2c_hid_chip_data elan_ekth6a12nay_chip_data = {
.post_gpio_reset_on_delay_ms = 300,
.hid_descriptor_address = 0x0001,
.main_supply_name = "vcc33",
+ .power_after_backlight = true,
};
static const struct elan_i2c_hid_chip_data ilitek_ili9882t_chip_data = {
diff --git a/drivers/hid/i2c-hid/i2c-hid.h b/drivers/hid/i2c-hid/i2c-hid.h
index 2c7b66d5caa0..1724a435c783 100644
--- a/drivers/hid/i2c-hid/i2c-hid.h
+++ b/drivers/hid/i2c-hid/i2c-hid.h
@@ -27,11 +27,13 @@ static inline u32 i2c_hid_get_dmi_quirks(const u16 vendor, const u16 product)
* @power_up: do sequencing to power up the device.
* @power_down: do sequencing to power down the device.
* @shutdown_tail: called at the end of shutdown.
+ * @restore_sequence: hibernation restore sequence.
*/
struct i2chid_ops {
int (*power_up)(struct i2chid_ops *ops);
void (*power_down)(struct i2chid_ops *ops);
void (*shutdown_tail)(struct i2chid_ops *ops);
+ void (*restore_sequence)(struct i2chid_ops *ops);
};
int i2c_hid_core_probe(struct i2c_client *client, struct i2chid_ops *ops,
diff --git a/drivers/hid/intel-ish-hid/ipc/ipc.c b/drivers/hid/intel-ish-hid/ipc/ipc.c
index 4c861119e97a..3ddaa2cd39d5 100644
--- a/drivers/hid/intel-ish-hid/ipc/ipc.c
+++ b/drivers/hid/intel-ish-hid/ipc/ipc.c
@@ -498,6 +498,7 @@ static int ish_fw_reset_handler(struct ishtp_device *dev)
{
uint32_t reset_id;
unsigned long flags;
+ int ret;
/* Read reset ID */
reset_id = ish_reg_read(dev, IPC_REG_ISH2HOST_MSG) & 0xFFFF;
@@ -510,12 +511,11 @@ static int ish_fw_reset_handler(struct ishtp_device *dev)
/* ISHTP notification in IPC_RESET */
ishtp_reset_handler(dev);
- if (!ish_is_input_ready(dev))
- timed_wait_for_timeout(dev, WAIT_FOR_INPUT_RDY,
- TIME_SLICE_FOR_INPUT_RDY_MS, TIMEOUT_FOR_INPUT_RDY_MS);
-
+ ret = timed_wait_for_timeout(dev, WAIT_FOR_INPUT_RDY,
+ TIME_SLICE_FOR_INPUT_RDY_MS,
+ TIMEOUT_FOR_INPUT_RDY_MS);
/* ISH FW is dead */
- if (!ish_is_input_ready(dev))
+ if (ret)
return -EPIPE;
/* Send clock sync at once after reset */
@@ -531,9 +531,10 @@ static int ish_fw_reset_handler(struct ishtp_device *dev)
sizeof(uint32_t));
/* Wait for ISH FW'es ILUP and ISHTP_READY */
- timed_wait_for_timeout(dev, WAIT_FOR_FW_RDY,
- TIME_SLICE_FOR_FW_RDY_MS, TIMEOUT_FOR_FW_RDY_MS);
- if (!ishtp_fw_is_ready(dev)) {
+ ret = timed_wait_for_timeout(dev, WAIT_FOR_FW_RDY,
+ TIME_SLICE_FOR_FW_RDY_MS,
+ TIMEOUT_FOR_FW_RDY_MS);
+ if (ret) {
/* ISH FW is dead */
uint32_t ish_status;
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
index c57483224db6..9d150ce234f2 100644
--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
@@ -264,9 +264,6 @@ static void ish_shutdown(struct pci_dev *pdev)
static struct device __maybe_unused *ish_resume_device;
-/* 50ms to get resume response */
-#define WAIT_FOR_RESUME_ACK_MS 50
-
/**
* ish_resume_handler() - Work function to complete resume
* @work: work struct
diff --git a/drivers/hid/intel-ish-hid/ishtp-hid-client.c b/drivers/hid/intel-ish-hid/ishtp-hid-client.c
index 6550ad5bfbb5..d8c3c54a8c0f 100644
--- a/drivers/hid/intel-ish-hid/ishtp-hid-client.c
+++ b/drivers/hid/intel-ish-hid/ishtp-hid-client.c
@@ -759,6 +759,9 @@ static void hid_ishtp_cl_resume_handler(struct work_struct *work)
if (ishtp_wait_resume(ishtp_get_ishtp_device(hid_ishtp_cl))) {
client_data->suspended = false;
wake_up_interruptible(&client_data->ishtp_resume_wait);
+ } else {
+ hid_ishtp_trace(client_data, "hid client: wait for resume timed out");
+ dev_err(cl_data_to_dev(client_data), "wait for resume timed out");
}
}
diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c
index 5ac7d70a7c84..93a0432e7058 100644
--- a/drivers/hid/intel-ish-hid/ishtp/bus.c
+++ b/drivers/hid/intel-ish-hid/ishtp/bus.c
@@ -852,9 +852,6 @@ EXPORT_SYMBOL(ishtp_device);
*/
bool ishtp_wait_resume(struct ishtp_device *dev)
{
- /* 50ms to get resume response */
- #define WAIT_FOR_RESUME_ACK_MS 50
-
/* Waiting to get resume response */
if (dev->resume_flag)
wait_event_interruptible_timeout(dev->resume_wait,
diff --git a/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h b/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
index ec9f6e87aaf2..23db97ecf21c 100644
--- a/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
+++ b/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
@@ -47,6 +47,9 @@
#define MAX_DMA_DELAY 20
+/* 300ms to get resume response */
+#define WAIT_FOR_RESUME_ACK_MS 300
+
/* ISHTP device states */
enum ishtp_dev_state {
ISHTP_DEV_INITIALIZING = 0,
diff --git a/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c b/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c
index e944a6ccb776..8433a991e7f4 100644
--- a/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c
+++ b/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c
@@ -23,6 +23,7 @@
static struct quicki2c_ddata ptl_ddata = {
.max_detect_size = MAX_RX_DETECT_SIZE_PTL,
+ .max_interrupt_delay = MAX_RX_INTERRUPT_DELAY,
};
/* THC QuickI2C ACPI method to get device properties */
@@ -200,6 +201,21 @@ static int quicki2c_get_acpi_resources(struct quicki2c_device *qcdev)
return -EOPNOTSUPP;
}
+ if (qcdev->ddata) {
+ qcdev->i2c_max_frame_size_enable = i2c_config.FSEN;
+ qcdev->i2c_int_delay_enable = i2c_config.INDE;
+
+ if (i2c_config.FSVL <= qcdev->ddata->max_detect_size)
+ qcdev->i2c_max_frame_size = i2c_config.FSVL;
+ else
+ qcdev->i2c_max_frame_size = qcdev->ddata->max_detect_size;
+
+ if (i2c_config.INDV <= qcdev->ddata->max_interrupt_delay)
+ qcdev->i2c_int_delay = i2c_config.INDV;
+ else
+ qcdev->i2c_int_delay = qcdev->ddata->max_interrupt_delay;
+ }
+
return 0;
}
@@ -419,6 +435,7 @@ static struct quicki2c_device *quicki2c_dev_init(struct pci_dev *pdev, void __io
*/
static void quicki2c_dev_deinit(struct quicki2c_device *qcdev)
{
+ thc_interrupt_quiesce(qcdev->thc_hw, true);
thc_interrupt_enable(qcdev->thc_hw, false);
thc_ltr_unconfig(qcdev->thc_hw);
thc_wot_unconfig(qcdev->thc_hw);
@@ -440,17 +457,24 @@ static void quicki2c_dma_adv_enable(struct quicki2c_device *qcdev)
* max input length <= THC detect capability, enable the feature with device
* max input length.
*/
- if (qcdev->ddata->max_detect_size >=
- le16_to_cpu(qcdev->dev_desc.max_input_len)) {
- thc_i2c_set_rx_max_size(qcdev->thc_hw,
- le16_to_cpu(qcdev->dev_desc.max_input_len));
+ if (qcdev->i2c_max_frame_size_enable) {
+ if (qcdev->i2c_max_frame_size >=
+ le16_to_cpu(qcdev->dev_desc.max_input_len)) {
+ thc_i2c_set_rx_max_size(qcdev->thc_hw,
+ le16_to_cpu(qcdev->dev_desc.max_input_len));
+ } else {
+ dev_warn(qcdev->dev,
+ "Max frame size is smaller than hid max input length!");
+ thc_i2c_set_rx_max_size(qcdev->thc_hw,
+ le16_to_cpu(qcdev->i2c_max_frame_size));
+ }
thc_i2c_rx_max_size_enable(qcdev->thc_hw, true);
}
/* If platform supports interrupt delay feature, enable it with given delay */
- if (qcdev->ddata->interrupt_delay) {
+ if (qcdev->i2c_int_delay_enable) {
thc_i2c_set_rx_int_delay(qcdev->thc_hw,
- qcdev->ddata->interrupt_delay);
+ qcdev->i2c_int_delay * 10);
thc_i2c_rx_int_delay_enable(qcdev->thc_hw, true);
}
}
@@ -463,10 +487,10 @@ static void quicki2c_dma_adv_enable(struct quicki2c_device *qcdev)
*/
static void quicki2c_dma_adv_disable(struct quicki2c_device *qcdev)
{
- if (qcdev->ddata->max_detect_size)
+ if (qcdev->i2c_max_frame_size_enable)
thc_i2c_rx_max_size_enable(qcdev->thc_hw, false);
- if (qcdev->ddata->interrupt_delay)
+ if (qcdev->i2c_int_delay_enable)
thc_i2c_rx_int_delay_enable(qcdev->thc_hw, false);
}
@@ -996,6 +1020,8 @@ static const struct pci_device_id quicki2c_pci_tbl[] = {
{ PCI_DEVICE_DATA(INTEL, THC_PTL_H_DEVICE_ID_I2C_PORT2, &ptl_ddata) },
{ PCI_DEVICE_DATA(INTEL, THC_PTL_U_DEVICE_ID_I2C_PORT1, &ptl_ddata) },
{ PCI_DEVICE_DATA(INTEL, THC_PTL_U_DEVICE_ID_I2C_PORT2, &ptl_ddata) },
+ { PCI_DEVICE_DATA(INTEL, THC_WCL_DEVICE_ID_I2C_PORT1, &ptl_ddata) },
+ { PCI_DEVICE_DATA(INTEL, THC_WCL_DEVICE_ID_I2C_PORT2, &ptl_ddata) },
{ }
};
MODULE_DEVICE_TABLE(pci, quicki2c_pci_tbl);
diff --git a/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-dev.h b/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-dev.h
index 93d6fa982d60..2cb5471a8133 100644
--- a/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-dev.h
+++ b/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-dev.h
@@ -13,6 +13,8 @@
#define PCI_DEVICE_ID_INTEL_THC_PTL_H_DEVICE_ID_I2C_PORT2 0xE34A
#define PCI_DEVICE_ID_INTEL_THC_PTL_U_DEVICE_ID_I2C_PORT1 0xE448
#define PCI_DEVICE_ID_INTEL_THC_PTL_U_DEVICE_ID_I2C_PORT2 0xE44A
+#define PCI_DEVICE_ID_INTEL_THC_WCL_DEVICE_ID_I2C_PORT1 0x4D48
+#define PCI_DEVICE_ID_INTEL_THC_WCL_DEVICE_ID_I2C_PORT2 0x4D4A
/* Packet size value, the unit is 16 bytes */
#define MAX_PACKET_SIZE_VALUE_LNL 256
@@ -38,6 +40,8 @@
/* PTL Max packet size detection capability is 255 Bytes */
#define MAX_RX_DETECT_SIZE_PTL 255
+/* Max interrupt delay capability is 2.56ms */
+#define MAX_RX_INTERRUPT_DELAY 256
/* Default interrupt delay is 1ms, suitable for most devices */
#define DEFAULT_INTERRUPT_DELAY_US (1 * USEC_PER_MSEC)
@@ -77,6 +81,7 @@ struct quicki2c_subip_acpi_parameter {
u16 device_address;
u64 connection_speed;
u8 addressing_mode;
+ u8 reserved;
} __packed;
/**
@@ -100,6 +105,10 @@ struct quicki2c_subip_acpi_parameter {
* @HMTD: High Speed Mode Plus (3.4Mbits/sec) Serial Data Line Transmit HOLD Period
* @HMRD: High Speed Mode Plus (3.4Mbits/sec) Serial Data Line Receive HOLD Period
* @HMSL: Maximum length (in ic_clk_cycles) of suppressed spikes in High Speed Mode
+ * @FSEN: Maximum Frame Size Feature Enable Control
+ * @FSVL: Maximum Frame Size Value (unit in Bytes)
+ * @INDE: Interrupt Delay Feature Enable Control
+ * @INDV: Interrupt Delay Value (unit in 10 us)
*
* Those properties get from QUICKI2C_ACPI_METHOD_NAME_ISUB method, used for
* I2C timing configure.
@@ -126,16 +135,22 @@ struct quicki2c_subip_acpi_config {
u64 HMTD;
u64 HMRD;
u64 HMSL;
+
+ u64 FSEN;
+ u64 FSVL;
+ u64 INDE;
+ u64 INDV;
+ u8 reserved;
};
/**
* struct quicki2c_ddata - Driver specific data for quicki2c device
* @max_detect_size: Identify max packet size detect for rx
- * @interrupt_delay: Identify interrupt detect delay for rx
+ * @interrupt_delay: Identify max interrupt detect delay for rx
*/
struct quicki2c_ddata {
u32 max_detect_size;
- u32 interrupt_delay;
+ u32 max_interrupt_delay;
};
struct device;
@@ -168,6 +183,10 @@ struct acpi_device;
* @report_len: The length of input/output report packet
* @reset_ack_wq: Workqueue for waiting reset response from device
* @reset_ack: Indicate reset response received or not
+ * @i2c_max_frame_size_enable: Indicate max frame size feature enabled or not
+ * @i2c_max_frame_size: Max RX frame size (unit in Bytes)
+ * @i2c_int_delay_enable: Indicate interrupt delay feature enabled or not
+ * @i2c_int_delay: Interrupt detection delay value (unit in 10 us)
*/
struct quicki2c_device {
struct device *dev;
@@ -198,6 +217,11 @@ struct quicki2c_device {
wait_queue_head_t reset_ack_wq;
bool reset_ack;
+
+ u32 i2c_max_frame_size_enable;
+ u32 i2c_max_frame_size;
+ u32 i2c_int_delay_enable;
+ u32 i2c_int_delay;
};
#endif /* _QUICKI2C_DEV_H_ */
diff --git a/drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c b/drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c
index 5e5f179dd113..84314989dc53 100644
--- a/drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c
+++ b/drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c
@@ -976,6 +976,8 @@ static const struct pci_device_id quickspi_pci_tbl[] = {
{PCI_DEVICE_DATA(INTEL, THC_PTL_H_DEVICE_ID_SPI_PORT2, &ptl), },
{PCI_DEVICE_DATA(INTEL, THC_PTL_U_DEVICE_ID_SPI_PORT1, &ptl), },
{PCI_DEVICE_DATA(INTEL, THC_PTL_U_DEVICE_ID_SPI_PORT2, &ptl), },
+ {PCI_DEVICE_DATA(INTEL, THC_WCL_DEVICE_ID_SPI_PORT1, &ptl), },
+ {PCI_DEVICE_DATA(INTEL, THC_WCL_DEVICE_ID_SPI_PORT2, &ptl), },
{}
};
MODULE_DEVICE_TABLE(pci, quickspi_pci_tbl);
diff --git a/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-dev.h b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-dev.h
index 6fdf674b21c5..f3532d866749 100644
--- a/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-dev.h
+++ b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-dev.h
@@ -19,6 +19,8 @@
#define PCI_DEVICE_ID_INTEL_THC_PTL_H_DEVICE_ID_SPI_PORT2 0xE34B
#define PCI_DEVICE_ID_INTEL_THC_PTL_U_DEVICE_ID_SPI_PORT1 0xE449
#define PCI_DEVICE_ID_INTEL_THC_PTL_U_DEVICE_ID_SPI_PORT2 0xE44B
+#define PCI_DEVICE_ID_INTEL_THC_WCL_DEVICE_ID_SPI_PORT1 0x4D49
+#define PCI_DEVICE_ID_INTEL_THC_WCL_DEVICE_ID_SPI_PORT2 0x4D4B
/* HIDSPI special ACPI parameters DSM methods */
#define ACPI_QUICKSPI_REVISION_NUM 2
diff --git a/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.c b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.c
index 6f2263869b20..636a68306501 100644
--- a/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.c
+++ b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.c
@@ -4,6 +4,7 @@
#include <linux/bitfield.h>
#include <linux/math.h>
#include <linux/regmap.h>
+#include <linux/string_choices.h>
#include "intel-thc-dev.h"
#include "intel-thc-hw.h"
@@ -664,7 +665,7 @@ int thc_interrupt_quiesce(const struct thc_device *dev, bool int_quiesce)
if (ret) {
dev_err_once(dev->dev,
"Timeout while waiting THC idle, target quiesce state = %s\n",
- int_quiesce ? "true" : "false");
+ str_true_false(int_quiesce));
return ret;
}
@@ -1540,7 +1541,7 @@ int thc_i2c_subip_regs_save(struct thc_device *dev)
for (int i = 0; i < ARRAY_SIZE(i2c_subip_regs); i++) {
ret = thc_i2c_subip_pio_read(dev, i2c_subip_regs[i],
- &read_size, (u32 *)&dev->i2c_subip_regs + i);
+ &read_size, &dev->i2c_subip_regs[i]);
if (ret < 0)
return ret;
}
@@ -1563,7 +1564,7 @@ int thc_i2c_subip_regs_restore(struct thc_device *dev)
for (int i = 0; i < ARRAY_SIZE(i2c_subip_regs); i++) {
ret = thc_i2c_subip_pio_write(dev, i2c_subip_regs[i],
- write_size, (u32 *)&dev->i2c_subip_regs + i);
+ write_size, &dev->i2c_subip_regs[i]);
if (ret < 0)
return ret;
}
diff --git a/drivers/hid/usbhid/hid-pidff.c b/drivers/hid/usbhid/hid-pidff.c
index 614a20b62023..edd61ef50e16 100644
--- a/drivers/hid/usbhid/hid-pidff.c
+++ b/drivers/hid/usbhid/hid-pidff.c
@@ -9,12 +9,11 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "hid-pidff.h"
+#include <linux/hid.h>
#include <linux/input.h>
+#include <linux/minmax.h>
#include <linux/slab.h>
#include <linux/usb.h>
-#include <linux/hid.h>
-#include <linux/minmax.h>
-
#define PID_EFFECTS_MAX 64
#define PID_INFINITE U16_MAX
@@ -33,7 +32,7 @@
#define PID_DEVICE_CONTROL 6
#define PID_CREATE_NEW_EFFECT 7
-#define PID_REQUIRED_REPORTS 7
+#define PID_REQUIRED_REPORTS 8
#define PID_SET_ENVELOPE 8
#define PID_SET_CONDITION 9
@@ -51,6 +50,7 @@ static const u8 pidff_reports[] = {
/* PID special fields */
#define PID_EFFECT_TYPE 0x25
+#define PID_AXES_ENABLE 0x55
#define PID_DIRECTION 0x57
#define PID_EFFECT_OPERATION_ARRAY 0x78
#define PID_BLOCK_LOAD_STATUS 0x8b
@@ -141,37 +141,74 @@ static const u8 pidff_effect_types[] = {
#define PID_BLOCK_LOAD_SUCCESS 0
#define PID_BLOCK_LOAD_FULL 1
#define PID_BLOCK_LOAD_ERROR 2
-static const u8 pidff_block_load_status[] = { 0x8c, 0x8d, 0x8e};
+static const u8 pidff_block_load_status[] = { 0x8c, 0x8d, 0x8e };
#define PID_EFFECT_START 0
#define PID_EFFECT_STOP 1
static const u8 pidff_effect_operation_status[] = { 0x79, 0x7b };
-/* Polar direction 90 degrees (East) */
-#define PIDFF_FIXED_WHEEL_DIRECTION 0x4000
+#define PID_DIRECTION_NORTH 0x0000
+#define PID_DIRECTION_EAST 0x4000
+#define PID_DIRECTION_SOUTH 0x8000
+#define PID_DIRECTION_WEST 0xc000
+
+#define PIDFF_FIXED_WHEEL_DIRECTION PID_DIRECTION_EAST
+
+/* AXES_ENABLE and DIRECTION axes */
+enum pid_axes {
+ PID_AXIS_X,
+ PID_AXIS_Y,
+ PID_AXIS_Z,
+ PID_AXIS_RX,
+ PID_AXIS_RY,
+ PID_AXIS_RZ,
+ PID_AXIS_SLIDER,
+ PID_AXIS_DIAL,
+ PID_AXIS_WHEEL,
+ PID_AXES_COUNT,
+};
+static const u8 pidff_direction_axis[] = {
+ HID_USAGE & HID_GD_X,
+ HID_USAGE & HID_GD_Y,
+ HID_USAGE & HID_GD_Z,
+ HID_USAGE & HID_GD_RX,
+ HID_USAGE & HID_GD_RY,
+ HID_USAGE & HID_GD_RZ,
+ HID_USAGE & HID_GD_SLIDER,
+ HID_USAGE & HID_GD_DIAL,
+ HID_USAGE & HID_GD_WHEEL,
+};
struct pidff_usage {
struct hid_field *field;
s32 *value;
};
+struct pidff_effect {
+ int pid_id;
+ int is_infinite;
+ unsigned int loop_count;
+};
+
struct pidff_device {
struct hid_device *hid;
- struct hid_report *reports[sizeof(pidff_reports)];
+ struct hid_report *reports[ARRAY_SIZE(pidff_reports)];
- struct pidff_usage set_effect[sizeof(pidff_set_effect)];
- struct pidff_usage set_envelope[sizeof(pidff_set_envelope)];
- struct pidff_usage set_condition[sizeof(pidff_set_condition)];
- struct pidff_usage set_periodic[sizeof(pidff_set_periodic)];
- struct pidff_usage set_constant[sizeof(pidff_set_constant)];
- struct pidff_usage set_ramp[sizeof(pidff_set_ramp)];
+ struct pidff_usage set_effect[ARRAY_SIZE(pidff_set_effect)];
+ struct pidff_usage set_envelope[ARRAY_SIZE(pidff_set_envelope)];
+ struct pidff_usage set_condition[ARRAY_SIZE(pidff_set_condition)];
+ struct pidff_usage set_periodic[ARRAY_SIZE(pidff_set_periodic)];
+ struct pidff_usage set_constant[ARRAY_SIZE(pidff_set_constant)];
+ struct pidff_usage set_ramp[ARRAY_SIZE(pidff_set_ramp)];
- struct pidff_usage device_gain[sizeof(pidff_device_gain)];
- struct pidff_usage block_load[sizeof(pidff_block_load)];
- struct pidff_usage pool[sizeof(pidff_pool)];
- struct pidff_usage effect_operation[sizeof(pidff_effect_operation)];
- struct pidff_usage block_free[sizeof(pidff_block_free)];
+ struct pidff_usage device_gain[ARRAY_SIZE(pidff_device_gain)];
+ struct pidff_usage block_load[ARRAY_SIZE(pidff_block_load)];
+ struct pidff_usage pool[ARRAY_SIZE(pidff_pool)];
+ struct pidff_usage effect_operation[ARRAY_SIZE(pidff_effect_operation)];
+ struct pidff_usage block_free[ARRAY_SIZE(pidff_block_free)];
+
+ struct pidff_effect effect[PID_EFFECTS_MAX];
/*
* Special field is a field that is not composed of
@@ -184,6 +221,7 @@ struct pidff_device {
/* Special fields in set_effect */
struct hid_field *set_effect_type;
struct hid_field *effect_direction;
+ struct hid_field *axes_enable;
/* Special field in device_control */
struct hid_field *device_control;
@@ -194,17 +232,86 @@ struct pidff_device {
/* Special field in effect_operation */
struct hid_field *effect_operation_status;
- int control_id[sizeof(pidff_device_control)];
- int type_id[sizeof(pidff_effect_types)];
- int status_id[sizeof(pidff_block_load_status)];
- int operation_id[sizeof(pidff_effect_operation_status)];
-
- int pid_id[PID_EFFECTS_MAX];
+ int control_id[ARRAY_SIZE(pidff_device_control)];
+ int type_id[ARRAY_SIZE(pidff_effect_types)];
+ int status_id[ARRAY_SIZE(pidff_block_load_status)];
+ int operation_id[ARRAY_SIZE(pidff_effect_operation_status)];
+ int direction_axis_id[ARRAY_SIZE(pidff_direction_axis)];
u32 quirks;
u8 effect_count;
+ u8 axis_count;
};
+static int pidff_is_effect_conditional(struct ff_effect *effect)
+{
+ return effect->type == FF_SPRING ||
+ effect->type == FF_DAMPER ||
+ effect->type == FF_INERTIA ||
+ effect->type == FF_FRICTION;
+}
+
+static int pidff_is_duration_infinite(u16 duration)
+{
+ return duration == FF_INFINITE || duration == PID_INFINITE;
+}
+
+/*
+ * Get PID effect index from FF effect type.
+ * Return 0 if invalid.
+ */
+static int pidff_effect_ff_to_pid(struct ff_effect *effect)
+{
+ switch (effect->type) {
+ case FF_CONSTANT:
+ return PID_CONSTANT;
+ case FF_RAMP:
+ return PID_RAMP;
+ case FF_SPRING:
+ return PID_SPRING;
+ case FF_DAMPER:
+ return PID_DAMPER;
+ case FF_INERTIA:
+ return PID_INERTIA;
+ case FF_FRICTION:
+ return PID_FRICTION;
+ case FF_PERIODIC:
+ switch (effect->u.periodic.waveform) {
+ case FF_SQUARE:
+ return PID_SQUARE;
+ case FF_TRIANGLE:
+ return PID_TRIANGLE;
+ case FF_SINE:
+ return PID_SINE;
+ case FF_SAW_UP:
+ return PID_SAW_UP;
+ case FF_SAW_DOWN:
+ return PID_SAW_DOWN;
+ }
+ }
+ pr_err("invalid effect type\n");
+ return -EINVAL;
+}
+
+/*
+ * Get effect id in the device descriptor.
+ * Return 0 if invalid.
+ */
+static int pidff_get_effect_type_id(struct pidff_device *pidff,
+ struct ff_effect *effect)
+{
+ int id = pidff_effect_ff_to_pid(effect);
+
+ if (id < 0)
+ return 0;
+
+ if (effect->type == FF_PERIODIC &&
+ pidff->quirks & HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY)
+ id = PID_SINE;
+
+ return pidff->type_id[id];
+}
+
/*
* Clamp value for a given field
*/
@@ -219,7 +326,7 @@ static s32 pidff_clamp(s32 i, struct hid_field *field)
static int pidff_rescale(int i, int max, struct hid_field *field)
{
return i * (field->logical_maximum - field->logical_minimum) / max +
- field->logical_minimum;
+ field->logical_minimum;
}
/*
@@ -265,28 +372,24 @@ static void pidff_set_signed(struct pidff_usage *usage, s16 value)
else {
if (value < 0)
usage->value[0] =
- pidff_rescale(-value, -S16_MIN, usage->field);
+ pidff_rescale(-value, -S16_MIN, usage->field);
else
usage->value[0] =
- pidff_rescale(value, S16_MAX, usage->field);
+ pidff_rescale(value, S16_MAX, usage->field);
}
pr_debug("calculated from %d to %d\n", value, usage->value[0]);
}
static void pidff_set_time(struct pidff_usage *usage, u16 time)
{
- usage->value[0] = pidff_clamp(
- pidff_rescale_time(time, usage->field), usage->field);
+ usage->value[0] = pidff_clamp(pidff_rescale_time(time, usage->field),
+ usage->field);
}
static void pidff_set_duration(struct pidff_usage *usage, u16 duration)
{
- /* Infinite value conversion from Linux API -> PID */
- if (duration == FF_INFINITE)
- duration = PID_INFINITE;
-
/* PID defines INFINITE as the max possible value for duration field */
- if (duration == PID_INFINITE) {
+ if (pidff_is_duration_infinite(duration)) {
usage->value[0] = (1U << usage->field->report_size) - 1;
return;
}
@@ -294,6 +397,43 @@ static void pidff_set_duration(struct pidff_usage *usage, u16 duration)
pidff_set_time(usage, duration);
}
+static void pidff_set_effect_direction(struct pidff_device *pidff,
+ struct ff_effect *effect)
+{
+ u16 direction = effect->direction;
+ int direction_enable = 1;
+
+ /* Use fixed direction if needed */
+ if (pidff->quirks & HID_PIDFF_QUIRK_FIX_CONDITIONAL_DIRECTION &&
+ pidff_is_effect_conditional(effect))
+ direction = PIDFF_FIXED_WHEEL_DIRECTION;
+
+ pidff->set_effect[PID_DIRECTION_ENABLE].value[0] = direction_enable;
+ pidff->effect_direction->value[0] =
+ pidff_rescale(direction, U16_MAX, pidff->effect_direction);
+
+ if (direction_enable)
+ return;
+
+ /*
+ * For use with improved FFB API
+ * We want to read the selected axes and their direction from the effect
+ * struct and only enable those. For now, enable all axes.
+ *
+ */
+ for (int i = 0; i < PID_AXES_COUNT; i++) {
+ /* HID index starts with 1 */
+ int index = pidff->direction_axis_id[i] - 1;
+
+ if (index < 0)
+ continue;
+
+ pidff->axes_enable->value[index] = 1;
+ pidff->effect_direction->value[index] = pidff_rescale(
+ direction, U16_MAX, pidff->effect_direction);
+ }
+}
+
/*
* Send envelope report to the device
*/
@@ -313,16 +453,12 @@ static void pidff_set_envelope_report(struct pidff_device *pidff,
pidff->set_envelope[PID_FADE_LEVEL].field);
pidff_set_time(&pidff->set_envelope[PID_ATTACK_TIME],
- envelope->attack_length);
+ envelope->attack_length);
pidff_set_time(&pidff->set_envelope[PID_FADE_TIME],
- envelope->fade_length);
-
- hid_dbg(pidff->hid, "attack %u => %d\n",
- envelope->attack_level,
- pidff->set_envelope[PID_ATTACK_LEVEL].value[0]);
+ envelope->fade_length);
hid_hw_request(pidff->hid, pidff->reports[PID_SET_ENVELOPE],
- HID_REQ_SET_REPORT);
+ HID_REQ_SET_REPORT);
}
/*
@@ -331,7 +467,7 @@ static void pidff_set_envelope_report(struct pidff_device *pidff,
static int pidff_needs_set_envelope(struct ff_envelope *envelope,
struct ff_envelope *old)
{
- bool needs_new_envelope;
+ int needs_new_envelope;
needs_new_envelope = envelope->attack_level != 0 ||
envelope->fade_level != 0 ||
@@ -339,8 +475,7 @@ static int pidff_needs_set_envelope(struct ff_envelope *envelope,
envelope->fade_length != 0;
if (!needs_new_envelope)
- return false;
-
+ return 0;
if (!old)
return needs_new_envelope;
@@ -353,8 +488,8 @@ static int pidff_needs_set_envelope(struct ff_envelope *envelope,
/*
* Send constant force report to the device
*/
-static void pidff_set_constant_force_report(struct pidff_device *pidff,
- struct ff_effect *effect)
+static void pidff_set_constant_report(struct pidff_device *pidff,
+ struct ff_effect *effect)
{
pidff->set_constant[PID_EFFECT_BLOCK_INDEX].value[0] =
pidff->block_load[PID_EFFECT_BLOCK_INDEX].value[0];
@@ -362,7 +497,7 @@ static void pidff_set_constant_force_report(struct pidff_device *pidff,
effect->u.constant.level);
hid_hw_request(pidff->hid, pidff->reports[PID_SET_CONSTANT],
- HID_REQ_SET_REPORT);
+ HID_REQ_SET_REPORT);
}
/*
@@ -386,28 +521,23 @@ static void pidff_set_effect_report(struct pidff_device *pidff,
pidff->create_new_effect_type->value[0];
pidff_set_duration(&pidff->set_effect[PID_DURATION],
- effect->replay.length);
+ effect->replay.length);
pidff->set_effect[PID_TRIGGER_BUTTON].value[0] = effect->trigger.button;
pidff_set_time(&pidff->set_effect[PID_TRIGGER_REPEAT_INT],
- effect->trigger.interval);
+ effect->trigger.interval);
pidff->set_effect[PID_GAIN].value[0] =
pidff->set_effect[PID_GAIN].field->logical_maximum;
- pidff->set_effect[PID_DIRECTION_ENABLE].value[0] = 1;
- /* Use fixed direction if needed */
- pidff->effect_direction->value[0] = pidff_rescale(
- pidff->quirks & HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION ?
- PIDFF_FIXED_WHEEL_DIRECTION : effect->direction,
- U16_MAX, pidff->effect_direction);
+ pidff_set_effect_direction(pidff, effect);
/* Omit setting delay field if it's missing */
if (!(pidff->quirks & HID_PIDFF_QUIRK_MISSING_DELAY))
pidff_set_time(&pidff->set_effect[PID_START_DELAY],
- effect->replay.delay);
+ effect->replay.delay);
hid_hw_request(pidff->hid, pidff->reports[PID_SET_EFFECT],
- HID_REQ_SET_REPORT);
+ HID_REQ_SET_REPORT);
}
/*
@@ -437,10 +567,10 @@ static void pidff_set_periodic_report(struct pidff_device *pidff,
effect->u.periodic.offset);
pidff_set(&pidff->set_periodic[PID_PHASE], effect->u.periodic.phase);
pidff_set_time(&pidff->set_periodic[PID_PERIOD],
- effect->u.periodic.period);
+ effect->u.periodic.period);
hid_hw_request(pidff->hid, pidff->reports[PID_SET_PERIODIC],
- HID_REQ_SET_REPORT);
+ HID_REQ_SET_REPORT);
}
/*
@@ -487,7 +617,7 @@ static void pidff_set_condition_report(struct pidff_device *pidff,
pidff_set(&pidff->set_condition[PID_DEAD_BAND],
effect->u.condition[i].deadband);
hid_hw_request(pidff->hid, pidff->reports[PID_SET_CONDITION],
- HID_REQ_SET_REPORT);
+ HID_REQ_SET_REPORT);
}
}
@@ -518,8 +648,8 @@ static int pidff_needs_set_condition(struct ff_effect *effect,
/*
* Send ramp force report to the device
*/
-static void pidff_set_ramp_force_report(struct pidff_device *pidff,
- struct ff_effect *effect)
+static void pidff_set_ramp_report(struct pidff_device *pidff,
+ struct ff_effect *effect)
{
pidff->set_ramp[PID_EFFECT_BLOCK_INDEX].value[0] =
pidff->block_load[PID_EFFECT_BLOCK_INDEX].value[0];
@@ -528,7 +658,7 @@ static void pidff_set_ramp_force_report(struct pidff_device *pidff,
pidff_set_signed(&pidff->set_ramp[PID_RAMP_END],
effect->u.ramp.end_level);
hid_hw_request(pidff->hid, pidff->reports[PID_SET_RAMP],
- HID_REQ_SET_REPORT);
+ HID_REQ_SET_REPORT);
}
/*
@@ -550,7 +680,7 @@ static void pidff_set_gain_report(struct pidff_device *pidff, u16 gain)
pidff_set(&pidff->device_gain[PID_DEVICE_GAIN_FIELD], gain);
hid_hw_request(pidff->hid, pidff->reports[PID_DEVICE_GAIN],
- HID_REQ_SET_REPORT);
+ HID_REQ_SET_REPORT);
}
/*
@@ -558,8 +688,7 @@ static void pidff_set_gain_report(struct pidff_device *pidff, u16 gain)
*/
static void pidff_set_device_control(struct pidff_device *pidff, int field)
{
- int i, index;
- int field_index = pidff->control_id[field];
+ const int field_index = pidff->control_id[field];
if (field_index < 1)
return;
@@ -569,8 +698,9 @@ static void pidff_set_device_control(struct pidff_device *pidff, int field)
hid_dbg(pidff->hid, "DEVICE_CONTROL is a bitmask\n");
/* Clear current bitmask */
- for (i = 0; i < sizeof(pidff_device_control); i++) {
- index = pidff->control_id[i];
+ for (int i = 0; i < ARRAY_SIZE(pidff_device_control); i++) {
+ int index = pidff->control_id[i];
+
if (index < 1)
continue;
@@ -585,16 +715,8 @@ static void pidff_set_device_control(struct pidff_device *pidff, int field)
hid_hw_request(pidff->hid, pidff->reports[PID_DEVICE_CONTROL], HID_REQ_SET_REPORT);
hid_hw_wait(pidff->hid);
-}
-
-/*
- * Modify actuators state
- */
-static void pidff_set_actuators(struct pidff_device *pidff, bool enable)
-{
- hid_dbg(pidff->hid, "%s actuators\n", enable ? "Enable" : "Disable");
- pidff_set_device_control(pidff,
- enable ? PID_ENABLE_ACTUATORS : PID_DISABLE_ACTUATORS);
+ hid_dbg(pidff->hid, "Device control command 0x%02x sent",
+ pidff_device_control[field]);
}
/*
@@ -608,7 +730,7 @@ static void pidff_reset(struct pidff_device *pidff)
pidff->effect_count = 0;
pidff_set_device_control(pidff, PID_STOP_ALL_EFFECTS);
- pidff_set_actuators(pidff, 1);
+ pidff_set_device_control(pidff, PID_ENABLE_ACTUATORS);
}
/*
@@ -644,32 +766,25 @@ static void pidff_fetch_pool(struct pidff_device *pidff)
*/
static int pidff_request_effect_upload(struct pidff_device *pidff, int efnum)
{
- int j;
-
- if (!pidff->effect_count)
- pidff_reset(pidff);
-
pidff->create_new_effect_type->value[0] = efnum;
hid_hw_request(pidff->hid, pidff->reports[PID_CREATE_NEW_EFFECT],
- HID_REQ_SET_REPORT);
+ HID_REQ_SET_REPORT);
hid_dbg(pidff->hid, "create_new_effect sent, type: %d\n", efnum);
pidff->block_load[PID_EFFECT_BLOCK_INDEX].value[0] = 0;
pidff->block_load_status->value[0] = 0;
hid_hw_wait(pidff->hid);
- for (j = 0; j < 60; j++) {
+ for (int i = 0; i < 60; i++) {
hid_dbg(pidff->hid, "pid_block_load requested\n");
hid_hw_request(pidff->hid, pidff->reports[PID_BLOCK_LOAD],
- HID_REQ_GET_REPORT);
+ HID_REQ_GET_REPORT);
hid_hw_wait(pidff->hid);
if (pidff->block_load_status->value[0] ==
pidff->status_id[PID_BLOCK_LOAD_SUCCESS]) {
hid_dbg(pidff->hid, "device reported free memory: %d bytes\n",
pidff->block_load[PID_RAM_POOL_AVAILABLE].value ?
pidff->block_load[PID_RAM_POOL_AVAILABLE].value[0] : -1);
-
- pidff->effect_count++;
return 0;
}
if (pidff->block_load_status->value[0] ==
@@ -689,6 +804,12 @@ static int pidff_request_effect_upload(struct pidff_device *pidff, int efnum)
return -EIO;
}
+static int pidff_needs_playback(struct pidff_device *pidff, int effect_id, int n)
+{
+ return pidff->effect[effect_id].is_infinite ||
+ pidff->effect[effect_id].loop_count != n;
+}
+
/*
* Play the effect with PID id n times
*/
@@ -696,6 +817,9 @@ static void pidff_playback_pid(struct pidff_device *pidff, int pid_id, int n)
{
pidff->effect_operation[PID_EFFECT_BLOCK_INDEX].value[0] = pid_id;
+ hid_dbg(pidff->hid, "%s PID effect %d", n == 0 ? "stopping" : "playing",
+ pid_id);
+
if (n == 0) {
pidff->effect_operation_status->value[0] =
pidff->operation_id[PID_EFFECT_STOP];
@@ -707,7 +831,7 @@ static void pidff_playback_pid(struct pidff_device *pidff, int pid_id, int n)
}
hid_hw_request(pidff->hid, pidff->reports[PID_EFFECT_OPERATION],
- HID_REQ_SET_REPORT);
+ HID_REQ_SET_REPORT);
}
/*
@@ -717,7 +841,14 @@ static int pidff_playback(struct input_dev *dev, int effect_id, int value)
{
struct pidff_device *pidff = dev->ff->private;
- pidff_playback_pid(pidff, pidff->pid_id[effect_id], value);
+ if (!pidff_needs_playback(pidff, effect_id, value))
+ return 0;
+
+ hid_dbg(pidff->hid, "requesting %s on FF effect %d",
+ value == 0 ? "stop" : "playback", effect_id);
+
+ pidff->effect[effect_id].loop_count = value;
+ pidff_playback_pid(pidff, pidff->effect[effect_id].pid_id, value);
return 0;
}
@@ -729,10 +860,7 @@ static void pidff_erase_pid(struct pidff_device *pidff, int pid_id)
{
pidff->block_free[PID_EFFECT_BLOCK_INDEX].value[0] = pid_id;
hid_hw_request(pidff->hid, pidff->reports[PID_BLOCK_FREE],
- HID_REQ_SET_REPORT);
-
- if (pidff->effect_count > 0)
- pidff->effect_count--;
+ HID_REQ_SET_REPORT);
}
/*
@@ -741,10 +869,9 @@ static void pidff_erase_pid(struct pidff_device *pidff, int pid_id)
static int pidff_erase_effect(struct input_dev *dev, int effect_id)
{
struct pidff_device *pidff = dev->ff->private;
- int pid_id = pidff->pid_id[effect_id];
+ int pid_id = pidff->effect[effect_id].pid_id;
- hid_dbg(pidff->hid, "starting to erase %d/%d\n",
- effect_id, pidff->pid_id[effect_id]);
+ hid_dbg(pidff->hid, "starting to erase %d/%d\n", effect_id, pid_id);
/*
* Wait for the queue to clear. We do not want
@@ -754,139 +881,83 @@ static int pidff_erase_effect(struct input_dev *dev, int effect_id)
pidff_playback_pid(pidff, pid_id, 0);
pidff_erase_pid(pidff, pid_id);
+ if (pidff->effect_count > 0)
+ pidff->effect_count--;
+
+ hid_dbg(pidff->hid, "current effect count: %d", pidff->effect_count);
return 0;
}
+#define PIDFF_SET_REPORT_IF_NEEDED(type, effect, old) \
+ ({ if (!old || pidff_needs_set_## type(effect, old)) \
+ pidff_set_ ##type## _report(pidff, effect); })
+
+#define PIDFF_SET_ENVELOPE_IF_NEEDED(type, effect, old) \
+ ({ if (pidff_needs_set_envelope(&effect->u.type.envelope, \
+ old ? &old->u.type.envelope : NULL)) \
+ pidff_set_envelope_report(pidff, &effect->u.type.envelope); })
+
/*
* Effect upload handler
*/
-static int pidff_upload_effect(struct input_dev *dev, struct ff_effect *effect,
+static int pidff_upload_effect(struct input_dev *dev, struct ff_effect *new,
struct ff_effect *old)
{
struct pidff_device *pidff = dev->ff->private;
- int type_id;
- int error;
+ const int type_id = pidff_get_effect_type_id(pidff, new);
- pidff->block_load[PID_EFFECT_BLOCK_INDEX].value[0] = 0;
- if (old) {
- pidff->block_load[PID_EFFECT_BLOCK_INDEX].value[0] =
- pidff->pid_id[effect->id];
+ if (!type_id) {
+ hid_err(pidff->hid, "effect type not supported\n");
+ return -EINVAL;
}
- switch (effect->type) {
+ if (!pidff->effect_count)
+ pidff_reset(pidff);
+
+ if (!old) {
+ int error = pidff_request_effect_upload(pidff, type_id);
+
+ if (error)
+ return error;
+
+ pidff->effect_count++;
+ hid_dbg(pidff->hid, "current effect count: %d", pidff->effect_count);
+ pidff->effect[new->id].loop_count = 0;
+ pidff->effect[new->id].pid_id =
+ pidff->block_load[PID_EFFECT_BLOCK_INDEX].value[0];
+ }
+
+ pidff->effect[new->id].is_infinite =
+ pidff_is_duration_infinite(new->replay.length);
+
+ pidff->block_load[PID_EFFECT_BLOCK_INDEX].value[0] =
+ pidff->effect[new->id].pid_id;
+
+ PIDFF_SET_REPORT_IF_NEEDED(effect, new, old);
+ switch (new->type) {
case FF_CONSTANT:
- if (!old) {
- error = pidff_request_effect_upload(pidff,
- pidff->type_id[PID_CONSTANT]);
- if (error)
- return error;
- }
- if (!old || pidff_needs_set_effect(effect, old))
- pidff_set_effect_report(pidff, effect);
- if (!old || pidff_needs_set_constant(effect, old))
- pidff_set_constant_force_report(pidff, effect);
- if (pidff_needs_set_envelope(&effect->u.constant.envelope,
- old ? &old->u.constant.envelope : NULL))
- pidff_set_envelope_report(pidff, &effect->u.constant.envelope);
+ PIDFF_SET_REPORT_IF_NEEDED(constant, new, old);
+ PIDFF_SET_ENVELOPE_IF_NEEDED(constant, new, old);
break;
case FF_PERIODIC:
- if (!old) {
- switch (effect->u.periodic.waveform) {
- case FF_SQUARE:
- type_id = PID_SQUARE;
- break;
- case FF_TRIANGLE:
- type_id = PID_TRIANGLE;
- break;
- case FF_SINE:
- type_id = PID_SINE;
- break;
- case FF_SAW_UP:
- type_id = PID_SAW_UP;
- break;
- case FF_SAW_DOWN:
- type_id = PID_SAW_DOWN;
- break;
- default:
- hid_err(pidff->hid, "invalid waveform\n");
- return -EINVAL;
- }
-
- if (pidff->quirks & HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY)
- type_id = PID_SINE;
-
- error = pidff_request_effect_upload(pidff,
- pidff->type_id[type_id]);
- if (error)
- return error;
- }
- if (!old || pidff_needs_set_effect(effect, old))
- pidff_set_effect_report(pidff, effect);
- if (!old || pidff_needs_set_periodic(effect, old))
- pidff_set_periodic_report(pidff, effect);
- if (pidff_needs_set_envelope(&effect->u.periodic.envelope,
- old ? &old->u.periodic.envelope : NULL))
- pidff_set_envelope_report(pidff, &effect->u.periodic.envelope);
+ PIDFF_SET_REPORT_IF_NEEDED(periodic, new, old);
+ PIDFF_SET_ENVELOPE_IF_NEEDED(periodic, new, old);
break;
case FF_RAMP:
- if (!old) {
- error = pidff_request_effect_upload(pidff,
- pidff->type_id[PID_RAMP]);
- if (error)
- return error;
- }
- if (!old || pidff_needs_set_effect(effect, old))
- pidff_set_effect_report(pidff, effect);
- if (!old || pidff_needs_set_ramp(effect, old))
- pidff_set_ramp_force_report(pidff, effect);
- if (pidff_needs_set_envelope(&effect->u.ramp.envelope,
- old ? &old->u.ramp.envelope : NULL))
- pidff_set_envelope_report(pidff, &effect->u.ramp.envelope);
+ PIDFF_SET_REPORT_IF_NEEDED(ramp, new, old);
+ PIDFF_SET_ENVELOPE_IF_NEEDED(ramp, new, old);
break;
case FF_SPRING:
case FF_DAMPER:
case FF_INERTIA:
case FF_FRICTION:
- if (!old) {
- switch (effect->type) {
- case FF_SPRING:
- type_id = PID_SPRING;
- break;
- case FF_DAMPER:
- type_id = PID_DAMPER;
- break;
- case FF_INERTIA:
- type_id = PID_INERTIA;
- break;
- case FF_FRICTION:
- type_id = PID_FRICTION;
- break;
- }
- error = pidff_request_effect_upload(pidff,
- pidff->type_id[type_id]);
- if (error)
- return error;
- }
- if (!old || pidff_needs_set_effect(effect, old))
- pidff_set_effect_report(pidff, effect);
- if (!old || pidff_needs_set_condition(effect, old))
- pidff_set_condition_report(pidff, effect);
+ PIDFF_SET_REPORT_IF_NEEDED(condition, new, old);
break;
-
- default:
- hid_err(pidff->hid, "invalid type\n");
- return -EINVAL;
}
-
- if (!old)
- pidff->pid_id[effect->id] =
- pidff->block_load[PID_EFFECT_BLOCK_INDEX].value[0];
-
hid_dbg(pidff->hid, "uploaded\n");
-
return 0;
}
@@ -924,7 +995,7 @@ static void pidff_autocenter(struct pidff_device *pidff, u16 magnitude)
pidff->set_effect[PID_START_DELAY].value[0] = 0;
hid_hw_request(pidff->hid, pidff->reports[PID_SET_EFFECT],
- HID_REQ_SET_REPORT);
+ HID_REQ_SET_REPORT);
}
/*
@@ -936,56 +1007,85 @@ static void pidff_set_autocenter(struct input_dev *dev, u16 magnitude)
}
/*
+ * Find specific usage in a given hid_field
+ */
+static int pidff_find_usage(struct hid_field *fld, unsigned int usage_code)
+{
+ for (int i = 0; i < fld->maxusage; i++) {
+ if (fld->usage[i].hid == usage_code)
+ return i;
+ }
+ return -1;
+}
+
+/*
+ * Find hid_field with a specific usage. Return the usage index as well
+ */
+static int pidff_find_field_with_usage(int *usage_index,
+ struct hid_report *report,
+ unsigned int usage_code)
+{
+ for (int i = 0; i < report->maxfield; i++) {
+ struct hid_field *fld = report->field[i];
+
+ if (fld->maxusage != fld->report_count) {
+ pr_debug("maxusage and report_count do not match, skipping\n");
+ continue;
+ }
+
+ int index = pidff_find_usage(fld, usage_code);
+
+ if (index >= 0) {
+ *usage_index = index;
+ return i;
+ }
+ }
+ return -1;
+}
+
+/*
* Find fields from a report and fill a pidff_usage
*/
static int pidff_find_fields(struct pidff_usage *usage, const u8 *table,
- struct hid_report *report, int count, int strict)
+ struct hid_report *report, int count, int strict,
+ u32 *quirks)
{
+ const u8 block_offset = pidff_set_condition[PID_PARAM_BLOCK_OFFSET];
+ const u8 delay = pidff_set_effect[PID_START_DELAY];
+
if (!report) {
pr_debug("%s, null report\n", __func__);
return -1;
}
- int i, j, k, found;
- int return_value = 0;
+ for (int i = 0; i < count; i++) {
+ int index;
+ int found = pidff_find_field_with_usage(&index, report,
+ HID_UP_PID | table[i]);
- for (k = 0; k < count; k++) {
- found = 0;
- for (i = 0; i < report->maxfield; i++) {
- if (report->field[i]->maxusage !=
- report->field[i]->report_count) {
- pr_debug("maxusage and report_count do not match, skipping\n");
- continue;
- }
- for (j = 0; j < report->field[i]->maxusage; j++) {
- if (report->field[i]->usage[j].hid ==
- (HID_UP_PID | table[k])) {
- pr_debug("found %d at %d->%d\n",
- k, i, j);
- usage[k].field = report->field[i];
- usage[k].value =
- &report->field[i]->value[j];
- found = 1;
- break;
- }
- }
- if (found)
- break;
+ if (found >= 0) {
+ pr_debug("found %d at %d->%d\n", i, found, index);
+ usage[i].field = report->field[found];
+ usage[i].value = &report->field[found]->value[index];
+ continue;
}
- if (!found && table[k] == pidff_set_effect[PID_START_DELAY]) {
+
+ if (table[i] == delay) {
pr_debug("Delay field not found, but that's OK\n");
pr_debug("Setting MISSING_DELAY quirk\n");
- return_value |= HID_PIDFF_QUIRK_MISSING_DELAY;
- } else if (!found && table[k] == pidff_set_condition[PID_PARAM_BLOCK_OFFSET]) {
+ *quirks |= HID_PIDFF_QUIRK_MISSING_DELAY;
+
+ } else if (table[i] == block_offset) {
pr_debug("PBO field not found, but that's OK\n");
pr_debug("Setting MISSING_PBO quirk\n");
- return_value |= HID_PIDFF_QUIRK_MISSING_PBO;
- } else if (!found && strict) {
- pr_debug("failed to locate %d\n", k);
+ *quirks |= HID_PIDFF_QUIRK_MISSING_PBO;
+
+ } else if (strict) {
+ pr_debug("failed to locate %d\n", i);
return -1;
}
}
- return return_value;
+ return 0;
}
/*
@@ -995,7 +1095,7 @@ static int pidff_check_usage(int usage)
{
int i;
- for (i = 0; i < sizeof(pidff_reports); i++)
+ for (i = 0; i < ARRAY_SIZE(pidff_reports); i++)
if (usage == (HID_UP_PID | pidff_reports[i]))
return i;
@@ -1050,9 +1150,7 @@ static void pidff_find_reports(struct hid_device *hid, int report_type,
*/
static int pidff_reports_ok(struct pidff_device *pidff)
{
- int i;
-
- for (i = 0; i <= PID_REQUIRED_REPORTS; i++) {
+ for (int i = 0; i < PID_REQUIRED_REPORTS; i++) {
if (!pidff->reports[i]) {
hid_dbg(pidff->hid, "%d missing\n", i);
return 0;
@@ -1073,9 +1171,7 @@ static struct hid_field *pidff_find_special_field(struct hid_report *report,
return NULL;
}
- int i;
-
- for (i = 0; i < report->maxfield; i++) {
+ for (int i = 0; i < report->maxfield; i++) {
if (report->field[i]->logical == (HID_UP_PID | usage) &&
report->field[i]->report_count > 0) {
if (!enforce_min ||
@@ -1093,27 +1189,29 @@ static struct hid_field *pidff_find_special_field(struct hid_report *report,
* Fill a pidff->*_id struct table
*/
static int pidff_find_special_keys(int *keys, struct hid_field *fld,
- const u8 *usagetable, int count)
+ const u8 *usagetable, int count,
+ unsigned int usage_page)
{
-
- int i, j;
int found = 0;
- for (i = 0; i < count; i++) {
- for (j = 0; j < fld->maxusage; j++) {
- if (fld->usage[j].hid == (HID_UP_PID | usagetable[i])) {
- keys[i] = j + 1;
- found++;
- break;
- }
- }
+ if (!fld)
+ return 0;
+
+ for (int i = 0; i < count; i++) {
+ keys[i] = pidff_find_usage(fld, usage_page | usagetable[i]) + 1;
+ if (keys[i])
+ found++;
}
return found;
}
#define PIDFF_FIND_SPECIAL_KEYS(keys, field, name) \
pidff_find_special_keys(pidff->keys, pidff->field, pidff_ ## name, \
- sizeof(pidff_ ## name))
+ ARRAY_SIZE(pidff_ ## name), HID_UP_PID)
+
+#define PIDFF_FIND_GENERAL_DESKTOP(keys, field, name) \
+ pidff_find_special_keys(pidff->keys, pidff->field, pidff_ ## name, \
+ ARRAY_SIZE(pidff_ ## name), HID_UP_GENDESK)
/*
* Find and check the special fields
@@ -1128,13 +1226,24 @@ static int pidff_find_special_fields(struct pidff_device *pidff)
pidff->set_effect_type =
pidff_find_special_field(pidff->reports[PID_SET_EFFECT],
PID_EFFECT_TYPE, 1);
+ pidff->axes_enable =
+ pidff_find_special_field(pidff->reports[PID_SET_EFFECT],
+ PID_AXES_ENABLE, 0);
pidff->effect_direction =
pidff_find_special_field(pidff->reports[PID_SET_EFFECT],
PID_DIRECTION, 0);
pidff->device_control =
pidff_find_special_field(pidff->reports[PID_DEVICE_CONTROL],
- PID_DEVICE_CONTROL_ARRAY,
- !(pidff->quirks & HID_PIDFF_QUIRK_PERMISSIVE_CONTROL));
+ PID_DEVICE_CONTROL_ARRAY, 1);
+
+ /* Detect and set permissive control quirk */
+ if (!pidff->device_control) {
+ pr_debug("Setting PERMISSIVE_CONTROL quirk\n");
+ pidff->quirks |= HID_PIDFF_QUIRK_PERMISSIVE_CONTROL;
+ pidff->device_control = pidff_find_special_field(
+ pidff->reports[PID_DEVICE_CONTROL],
+ PID_DEVICE_CONTROL_ARRAY, 0);
+ }
pidff->block_load_status =
pidff_find_special_field(pidff->reports[PID_BLOCK_LOAD],
@@ -1180,7 +1289,7 @@ static int pidff_find_special_fields(struct pidff_device *pidff)
if (PIDFF_FIND_SPECIAL_KEYS(status_id, block_load_status,
block_load_status) !=
- sizeof(pidff_block_load_status)) {
+ ARRAY_SIZE(pidff_block_load_status)) {
hid_err(pidff->hid,
"block load status identifiers not found\n");
return -1;
@@ -1188,11 +1297,37 @@ static int pidff_find_special_fields(struct pidff_device *pidff)
if (PIDFF_FIND_SPECIAL_KEYS(operation_id, effect_operation_status,
effect_operation_status) !=
- sizeof(pidff_effect_operation_status)) {
+ ARRAY_SIZE(pidff_effect_operation_status)) {
hid_err(pidff->hid, "effect operation identifiers not found\n");
return -1;
}
+ if (!pidff->axes_enable) {
+ hid_info(pidff->hid, "axes enable field not found!\n");
+ return 0;
+ }
+
+ hid_dbg(pidff->hid, "axes enable report count: %u\n",
+ pidff->axes_enable->report_count);
+
+ uint found = PIDFF_FIND_GENERAL_DESKTOP(direction_axis_id, axes_enable,
+ direction_axis);
+
+ pidff->axis_count = found;
+ hid_dbg(pidff->hid, "found direction axes: %u", found);
+
+ for (int i = 0; i < ARRAY_SIZE(pidff_direction_axis); i++) {
+ if (!pidff->direction_axis_id[i])
+ continue;
+
+ hid_dbg(pidff->hid, "axis %d, usage: 0x%04x, index: %d", i + 1,
+ pidff_direction_axis[i], pidff->direction_axis_id[i]);
+ }
+
+ if (pidff->axes_enable && found != pidff->axes_enable->report_count)
+ hid_warn(pidff->hid, "axes_enable: %u != direction axes: %u",
+ pidff->axes_enable->report_count, found);
+
return 0;
}
@@ -1204,7 +1339,7 @@ static int pidff_find_effects(struct pidff_device *pidff,
{
int i;
- for (i = 0; i < sizeof(pidff_effect_types); i++) {
+ for (i = 0; i < ARRAY_SIZE(pidff_effect_types); i++) {
int pidff_type = pidff->type_id[i];
if (pidff->set_effect_type->usage[pidff_type].hid !=
@@ -1254,26 +1389,17 @@ static int pidff_find_effects(struct pidff_device *pidff,
#define PIDFF_FIND_FIELDS(name, report, strict) \
pidff_find_fields(pidff->name, pidff_ ## name, \
pidff->reports[report], \
- sizeof(pidff_ ## name), strict)
+ ARRAY_SIZE(pidff_ ## name), strict, &pidff->quirks)
/*
* Fill and check the pidff_usages
*/
static int pidff_init_fields(struct pidff_device *pidff, struct input_dev *dev)
{
- int status = 0;
-
- /* Save info about the device not having the DELAY ffb field. */
- status = PIDFF_FIND_FIELDS(set_effect, PID_SET_EFFECT, 1);
- if (status == -1) {
+ if (PIDFF_FIND_FIELDS(set_effect, PID_SET_EFFECT, 1)) {
hid_err(pidff->hid, "unknown set_effect report layout\n");
return -ENODEV;
}
- pidff->quirks |= status;
-
- if (status & HID_PIDFF_QUIRK_MISSING_DELAY)
- hid_dbg(pidff->hid, "Adding MISSING_DELAY quirk\n");
-
PIDFF_FIND_FIELDS(block_load, PID_BLOCK_LOAD, 0);
if (!pidff->block_load[PID_EFFECT_BLOCK_INDEX].value) {
@@ -1307,39 +1433,25 @@ static int pidff_init_fields(struct pidff_device *pidff, struct input_dev *dev)
"has periodic effect but no envelope\n");
}
- if (test_bit(FF_CONSTANT, dev->ffbit) &&
- PIDFF_FIND_FIELDS(set_constant, PID_SET_CONSTANT, 1)) {
+ if (PIDFF_FIND_FIELDS(set_constant, PID_SET_CONSTANT, 1) &&
+ test_and_clear_bit(FF_CONSTANT, dev->ffbit))
hid_warn(pidff->hid, "unknown constant effect layout\n");
- clear_bit(FF_CONSTANT, dev->ffbit);
- }
- if (test_bit(FF_RAMP, dev->ffbit) &&
- PIDFF_FIND_FIELDS(set_ramp, PID_SET_RAMP, 1)) {
+ if (PIDFF_FIND_FIELDS(set_ramp, PID_SET_RAMP, 1) &&
+ test_and_clear_bit(FF_RAMP, dev->ffbit))
hid_warn(pidff->hid, "unknown ramp effect layout\n");
- clear_bit(FF_RAMP, dev->ffbit);
- }
-
- if (test_bit(FF_SPRING, dev->ffbit) ||
- test_bit(FF_DAMPER, dev->ffbit) ||
- test_bit(FF_FRICTION, dev->ffbit) ||
- test_bit(FF_INERTIA, dev->ffbit)) {
- status = PIDFF_FIND_FIELDS(set_condition, PID_SET_CONDITION, 1);
- if (status < 0) {
+ if (PIDFF_FIND_FIELDS(set_condition, PID_SET_CONDITION, 1)) {
+ if (test_and_clear_bit(FF_SPRING, dev->ffbit) ||
+ test_and_clear_bit(FF_DAMPER, dev->ffbit) ||
+ test_and_clear_bit(FF_FRICTION, dev->ffbit) ||
+ test_and_clear_bit(FF_INERTIA, dev->ffbit))
hid_warn(pidff->hid, "unknown condition effect layout\n");
- clear_bit(FF_SPRING, dev->ffbit);
- clear_bit(FF_DAMPER, dev->ffbit);
- clear_bit(FF_FRICTION, dev->ffbit);
- clear_bit(FF_INERTIA, dev->ffbit);
- }
- pidff->quirks |= status;
}
- if (test_bit(FF_PERIODIC, dev->ffbit) &&
- PIDFF_FIND_FIELDS(set_periodic, PID_SET_PERIODIC, 1)) {
+ if (PIDFF_FIND_FIELDS(set_periodic, PID_SET_PERIODIC, 1) &&
+ test_and_clear_bit(FF_PERIODIC, dev->ffbit))
hid_warn(pidff->hid, "unknown periodic effect layout\n");
- clear_bit(FF_PERIODIC, dev->ffbit);
- }
PIDFF_FIND_FIELDS(pool, PID_POOL, 0);
@@ -1392,8 +1504,8 @@ static int pidff_check_autocenter(struct pidff_device *pidff,
int hid_pidff_init_with_quirks(struct hid_device *hid, u32 initial_quirks)
{
struct pidff_device *pidff;
- struct hid_input *hidinput = list_entry(hid->inputs.next,
- struct hid_input, list);
+ struct hid_input *hidinput =
+ list_entry(hid->inputs.next, struct hid_input, list);
struct input_dev *dev = hidinput->input;
struct ff_device *ff;
int max_effects;
@@ -1473,14 +1585,14 @@ int hid_pidff_init_with_quirks(struct hid_device *hid, u32 initial_quirks)
ff->set_autocenter = pidff_set_autocenter;
ff->playback = pidff_playback;
- hid_info(dev, "Force feedback for USB HID PID devices by Anssi Hannula <anssi.hannula@gmail.com>\n");
- hid_dbg(dev, "Active quirks mask: 0x%x\n", pidff->quirks);
+ hid_info(dev, "Force feedback for USB HID PID devices by Anssi Hannula\n");
+ hid_dbg(dev, "Active quirks mask: 0x%08x\n", pidff->quirks);
hid_device_io_stop(hid);
return 0;
- fail:
+fail:
hid_device_io_stop(hid);
kfree(pidff);
diff --git a/drivers/hid/usbhid/hid-pidff.h b/drivers/hid/usbhid/hid-pidff.h
index a53a8b436baa..f321f675e131 100644
--- a/drivers/hid/usbhid/hid-pidff.h
+++ b/drivers/hid/usbhid/hid-pidff.h
@@ -16,7 +16,7 @@
#define HID_PIDFF_QUIRK_PERMISSIVE_CONTROL BIT(2)
/* Use fixed 0x4000 direction during SET_EFFECT report upload */
-#define HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION BIT(3)
+#define HID_PIDFF_QUIRK_FIX_CONDITIONAL_DIRECTION BIT(3)
/* Force all periodic effects to be uploaded as SINE */
#define HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY BIT(4)
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 955b39d22524..9b2c710f8da1 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -684,6 +684,7 @@ static bool wacom_is_art_pen(int tool_id)
case 0x885: /* Intuos3 Marker Pen */
case 0x804: /* Intuos4/5 13HD/24HD Marker Pen */
case 0x10804: /* Intuos4/5 13HD/24HD Art Pen */
+ case 0x204: /* Art Pen 2 */
is_art_pen = true;
break;
}
diff --git a/drivers/hsi/controllers/omap_ssi_port.c b/drivers/hsi/controllers/omap_ssi_port.c
index aeb92b803a17..50dde968febe 100644
--- a/drivers/hsi/controllers/omap_ssi_port.c
+++ b/drivers/hsi/controllers/omap_ssi_port.c
@@ -362,7 +362,6 @@ static int ssi_async_break(struct hsi_msg *msg)
spin_unlock_bh(&omap_port->lock);
}
out:
- pm_runtime_mark_last_busy(omap_port->pdev);
pm_runtime_put_autosuspend(omap_port->pdev);
return err;
@@ -401,7 +400,6 @@ static int ssi_async(struct hsi_msg *msg)
msg->status = HSI_STATUS_ERROR;
}
spin_unlock_bh(&omap_port->lock);
- pm_runtime_mark_last_busy(omap_port->pdev);
pm_runtime_put_autosuspend(omap_port->pdev);
dev_dbg(&port->device, "msg status %d ttype %d ch %d\n",
msg->status, msg->ttype, msg->channel);
@@ -504,7 +502,6 @@ static int ssi_setup(struct hsi_client *cl)
omap_port->ssr.mode = cl->rx_cfg.mode;
out:
spin_unlock_bh(&omap_port->lock);
- pm_runtime_mark_last_busy(omap_port->pdev);
pm_runtime_put_autosuspend(omap_port->pdev);
return err;
@@ -570,7 +567,6 @@ static int ssi_flush(struct hsi_client *cl)
pinctrl_pm_select_default_state(omap_port->pdev);
spin_unlock_bh(&omap_port->lock);
- pm_runtime_mark_last_busy(omap_port->pdev);
pm_runtime_put_autosuspend(omap_port->pdev);
return 0;
@@ -625,7 +621,6 @@ static int ssi_stop_tx(struct hsi_client *cl)
writel(SSI_WAKE(0), omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num));
spin_unlock_bh(&omap_port->wk_lock);
- pm_runtime_mark_last_busy(omap_port->pdev);
pm_runtime_put_autosuspend(omap_port->pdev); /* Release clocks */
@@ -653,7 +648,6 @@ static void ssi_transfer(struct omap_ssi_port *omap_port,
}
}
spin_unlock_bh(&omap_port->lock);
- pm_runtime_mark_last_busy(omap_port->pdev);
pm_runtime_put_autosuspend(omap_port->pdev);
}
@@ -683,7 +677,6 @@ static void ssi_cleanup_queues(struct hsi_client *cl)
txbufstate |= (1 << i);
status |= SSI_DATAACCEPT(i);
/* Release the clocks writes, also GDD ones */
- pm_runtime_mark_last_busy(omap_port->pdev);
pm_runtime_put_autosuspend(omap_port->pdev);
}
ssi_flush_queue(&omap_port->txqueue[i], cl);
@@ -739,7 +732,6 @@ static void ssi_cleanup_gdd(struct hsi_controller *ssi, struct hsi_client *cl)
* ssi_cleanup_queues
*/
if (msg->ttype == HSI_MSG_READ) {
- pm_runtime_mark_last_busy(omap_port->pdev);
pm_runtime_put_autosuspend(omap_port->pdev);
}
omap_ssi->gdd_trn[i].msg = NULL;
@@ -936,7 +928,6 @@ static void ssi_pio_complete(struct hsi_port *port, struct list_head *queue)
reg = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
if (msg->ttype == HSI_MSG_WRITE) {
/* Release clocks for write transfer */
- pm_runtime_mark_last_busy(omap_port->pdev);
pm_runtime_put_autosuspend(omap_port->pdev);
}
reg &= ~val;
@@ -981,7 +972,6 @@ static irqreturn_t ssi_pio_thread(int irq, void *ssi_port)
/* TODO: sleep if we retry? */
} while (status_reg);
- pm_runtime_mark_last_busy(omap_port->pdev);
pm_runtime_put_autosuspend(omap_port->pdev);
return IRQ_HANDLED;
@@ -1018,7 +1008,6 @@ static irqreturn_t ssi_wake_thread(int irq __maybe_unused, void *ssi_port)
}
hsi_event(port, HSI_EVENT_STOP_RX);
if (test_and_clear_bit(SSI_WAKE_EN, &omap_port->flags)) {
- pm_runtime_mark_last_busy(omap_port->pdev);
pm_runtime_put_autosuspend(omap_port->pdev);
}
}
diff --git a/drivers/hv/Kconfig b/drivers/hv/Kconfig
index 57623ca7f350..0b8c391a0342 100644
--- a/drivers/hv/Kconfig
+++ b/drivers/hv/Kconfig
@@ -3,13 +3,14 @@
menu "Microsoft Hyper-V guest support"
config HYPERV
- tristate "Microsoft Hyper-V client drivers"
+ bool "Microsoft Hyper-V core hypervisor support"
depends on (X86 && X86_LOCAL_APIC && HYPERVISOR_GUEST) \
|| (ARM64 && !CPU_BIG_ENDIAN)
select PARAVIRT
select X86_HV_CALLBACK_VECTOR if X86
select OF_EARLY_FLATTREE if OF
select SYSFB if EFI && !HYPERV_VTL_MODE
+ select IRQ_MSI_LIB if X86
help
Select this option to run Linux as a Hyper-V client operating
system.
@@ -44,18 +45,25 @@ config HYPERV_TIMER
config HYPERV_UTILS
tristate "Microsoft Hyper-V Utilities driver"
- depends on HYPERV && CONNECTOR && NLS
+ depends on HYPERV_VMBUS && CONNECTOR && NLS
depends on PTP_1588_CLOCK_OPTIONAL
help
Select this option to enable the Hyper-V Utilities.
config HYPERV_BALLOON
tristate "Microsoft Hyper-V Balloon driver"
- depends on HYPERV
+ depends on HYPERV_VMBUS
select PAGE_REPORTING
help
Select this option to enable Hyper-V Balloon driver.
+config HYPERV_VMBUS
+ tristate "Microsoft Hyper-V VMBus driver"
+ depends on HYPERV
+ default HYPERV
+ help
+ Select this option to enable Hyper-V Vmbus driver.
+
config MSHV_ROOT
tristate "Microsoft Hyper-V root partition support"
depends on HYPERV && (X86_64 || ARM64)
@@ -66,6 +74,7 @@ config MSHV_ROOT
# no particular order, making it impossible to reassemble larger pages
depends on PAGE_SIZE_4KB
select EVENTFD
+ select VIRT_XFER_TO_GUEST_WORK
default n
help
Select this option to enable support for booting and running as root
diff --git a/drivers/hv/Makefile b/drivers/hv/Makefile
index 976189c725dc..1a1677bf4dac 100644
--- a/drivers/hv/Makefile
+++ b/drivers/hv/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_HYPERV) += hv_vmbus.o
+obj-$(CONFIG_HYPERV_VMBUS) += hv_vmbus.o
obj-$(CONFIG_HYPERV_UTILS) += hv_utils.o
obj-$(CONFIG_HYPERV_BALLOON) += hv_balloon.o
obj-$(CONFIG_MSHV_ROOT) += mshv_root.o
@@ -16,5 +16,5 @@ mshv_root-y := mshv_root_main.o mshv_synic.o mshv_eventfd.o mshv_irq.o \
mshv_root_hv_call.o mshv_portid_table.o
# Code that must be built-in
-obj-$(subst m,y,$(CONFIG_HYPERV)) += hv_common.o
+obj-$(CONFIG_HYPERV) += hv_common.o
obj-$(subst m,y,$(CONFIG_MSHV_ROOT)) += hv_proc.o mshv_common.o
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 7c7c66e0dc3f..162d6aeece7b 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -925,7 +925,7 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
/* Send a closing message */
- msg = &channel->close_msg.msg;
+ msg = &channel->close_msg;
msg->header.msgtype = CHANNELMSG_CLOSECHANNEL;
msg->child_relid = channel->offermsg.child_relid;
diff --git a/drivers/hv/hv_common.c b/drivers/hv/hv_common.c
index 49898d10faff..e109a620c83f 100644
--- a/drivers/hv/hv_common.c
+++ b/drivers/hv/hv_common.c
@@ -257,7 +257,7 @@ static void hv_kmsg_dump_register(void)
static inline bool hv_output_page_exists(void)
{
- return hv_root_partition() || IS_ENABLED(CONFIG_HYPERV_VTL_MODE);
+ return hv_parent_partition() || IS_ENABLED(CONFIG_HYPERV_VTL_MODE);
}
void __init hv_get_partition_id(void)
@@ -377,7 +377,7 @@ int __init hv_common_init(void)
BUG_ON(!hyperv_pcpu_output_arg);
}
- if (hv_root_partition()) {
+ if (hv_parent_partition()) {
hv_synic_eventring_tail = alloc_percpu(u8 *);
BUG_ON(!hv_synic_eventring_tail);
}
@@ -531,7 +531,7 @@ int hv_common_cpu_init(unsigned int cpu)
if (msr_vp_index > hv_max_vp_index)
hv_max_vp_index = msr_vp_index;
- if (hv_root_partition()) {
+ if (hv_parent_partition()) {
synic_eventring_tail = (u8 **)this_cpu_ptr(hv_synic_eventring_tail);
*synic_eventring_tail = kcalloc(HV_SYNIC_SINT_COUNT,
sizeof(u8), flags);
@@ -558,7 +558,7 @@ int hv_common_cpu_die(unsigned int cpu)
* originally allocated memory is reused in hv_common_cpu_init().
*/
- if (hv_root_partition()) {
+ if (hv_parent_partition()) {
synic_eventring_tail = this_cpu_ptr(hv_synic_eventring_tail);
kfree(*synic_eventring_tail);
*synic_eventring_tail = NULL;
@@ -729,13 +729,17 @@ void hv_identify_partition_type(void)
* the root partition setting if also a Confidential VM.
*/
if ((ms_hyperv.priv_high & HV_CREATE_PARTITIONS) &&
- (ms_hyperv.priv_high & HV_CPU_MANAGEMENT) &&
!(ms_hyperv.priv_high & HV_ISOLATION)) {
- pr_info("Hyper-V: running as root partition\n");
- if (IS_ENABLED(CONFIG_MSHV_ROOT))
- hv_curr_partition_type = HV_PARTITION_TYPE_ROOT;
- else
+
+ if (!IS_ENABLED(CONFIG_MSHV_ROOT)) {
pr_crit("Hyper-V: CONFIG_MSHV_ROOT not enabled!\n");
+ } else if (ms_hyperv.priv_high & HV_CPU_MANAGEMENT) {
+ pr_info("Hyper-V: running as root partition\n");
+ hv_curr_partition_type = HV_PARTITION_TYPE_ROOT;
+ } else {
+ pr_info("Hyper-V: running as L1VH partition\n");
+ hv_curr_partition_type = HV_PARTITION_TYPE_L1VH;
+ }
}
}
diff --git a/drivers/hv/hv_utils_transport.c b/drivers/hv/hv_utils_transport.c
index 832885198643..b3de35ff6334 100644
--- a/drivers/hv/hv_utils_transport.c
+++ b/drivers/hv/hv_utils_transport.c
@@ -129,8 +129,7 @@ static int hvt_op_open(struct inode *inode, struct file *file)
* device gets released.
*/
hvt->mode = HVUTIL_TRANSPORT_CHARDEV;
- }
- else if (hvt->mode == HVUTIL_TRANSPORT_NETLINK) {
+ } else if (hvt->mode == HVUTIL_TRANSPORT_NETLINK) {
/*
* We're switching from netlink communication to using char
* device. Issue the reset first.
@@ -195,7 +194,7 @@ static void hvt_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
}
spin_unlock(&hvt_list_lock);
if (!hvt_found) {
- pr_warn("hvt_cn_callback: spurious message received!\n");
+ pr_warn("%s: spurious message received!\n", __func__);
return;
}
@@ -210,7 +209,7 @@ static void hvt_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
if (hvt->mode == HVUTIL_TRANSPORT_NETLINK)
hvt_found->on_msg(msg->data, msg->len);
else
- pr_warn("hvt_cn_callback: unexpected netlink message!\n");
+ pr_warn("%s: unexpected netlink message!\n", __func__);
mutex_unlock(&hvt->lock);
}
@@ -260,8 +259,9 @@ int hvutil_transport_send(struct hvutil_transport *hvt, void *msg, int len,
hvt->outmsg_len = len;
hvt->on_read = on_read_cb;
wake_up_interruptible(&hvt->outmsg_q);
- } else
+ } else {
ret = -ENOMEM;
+ }
out_unlock:
mutex_unlock(&hvt->lock);
return ret;
diff --git a/drivers/hv/mshv.h b/drivers/hv/mshv.h
index 0340a67acd0a..d4813df92b9c 100644
--- a/drivers/hv/mshv.h
+++ b/drivers/hv/mshv.h
@@ -25,6 +25,4 @@ int hv_call_set_vp_registers(u32 vp_index, u64 partition_id, u16 count,
int hv_call_get_partition_property(u64 partition_id, u64 property_code,
u64 *property_value);
-int mshv_do_pre_guest_mode_work(ulong th_flags);
-
#endif /* _MSHV_H */
diff --git a/drivers/hv/mshv_common.c b/drivers/hv/mshv_common.c
index 6f227a8a5af7..aa2be51979fd 100644
--- a/drivers/hv/mshv_common.c
+++ b/drivers/hv/mshv_common.c
@@ -138,25 +138,3 @@ int hv_call_get_partition_property(u64 partition_id,
return 0;
}
EXPORT_SYMBOL_GPL(hv_call_get_partition_property);
-
-/*
- * Handle any pre-processing before going into the guest mode on this cpu, most
- * notably call schedule(). Must be invoked with both preemption and
- * interrupts enabled.
- *
- * Returns: 0 on success, -errno on error.
- */
-int mshv_do_pre_guest_mode_work(ulong th_flags)
-{
- if (th_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
- return -EINTR;
-
- if (th_flags & _TIF_NEED_RESCHED)
- schedule();
-
- if (th_flags & _TIF_NOTIFY_RESUME)
- resume_user_mode_work(NULL);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(mshv_do_pre_guest_mode_work);
diff --git a/drivers/hv/mshv_root_main.c b/drivers/hv/mshv_root_main.c
index 72df774e410a..e3b2bd417c46 100644
--- a/drivers/hv/mshv_root_main.c
+++ b/drivers/hv/mshv_root_main.c
@@ -8,6 +8,7 @@
* Authors: Microsoft Linux virtualization team
*/
+#include <linux/entry-virt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/fs.h>
@@ -37,12 +38,6 @@ MODULE_AUTHOR("Microsoft");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Microsoft Hyper-V root partition VMM interface /dev/mshv");
-/* TODO move this to mshyperv.h when needed outside driver */
-static inline bool hv_parent_partition(void)
-{
- return hv_root_partition();
-}
-
/* TODO move this to another file when debugfs code is added */
enum hv_stats_vp_counters { /* HV_THREAD_COUNTER */
#if defined(CONFIG_X86)
@@ -487,28 +482,6 @@ mshv_vp_wait_for_hv_kick(struct mshv_vp *vp)
return 0;
}
-static int mshv_pre_guest_mode_work(struct mshv_vp *vp)
-{
- const ulong work_flags = _TIF_NOTIFY_SIGNAL | _TIF_SIGPENDING |
- _TIF_NEED_RESCHED | _TIF_NOTIFY_RESUME;
- ulong th_flags;
-
- th_flags = read_thread_flags();
- while (th_flags & work_flags) {
- int ret;
-
- /* nb: following will call schedule */
- ret = mshv_do_pre_guest_mode_work(th_flags);
-
- if (ret)
- return ret;
-
- th_flags = read_thread_flags();
- }
-
- return 0;
-}
-
/* Must be called with interrupts enabled */
static long mshv_run_vp_with_root_scheduler(struct mshv_vp *vp)
{
@@ -529,9 +502,11 @@ static long mshv_run_vp_with_root_scheduler(struct mshv_vp *vp)
u32 flags = 0;
struct hv_output_dispatch_vp output;
- ret = mshv_pre_guest_mode_work(vp);
- if (ret)
- break;
+ if (__xfer_to_guest_mode_work_pending()) {
+ ret = xfer_to_guest_mode_handle_work();
+ if (ret)
+ break;
+ }
if (vp->run.flags.intercept_suspend)
flags |= HV_DISPATCH_VP_FLAG_CLEAR_INTERCEPT_SUSPEND;
@@ -2074,9 +2049,13 @@ static int __init hv_retrieve_scheduler_type(enum hv_scheduler_type *out)
/* Retrieve and stash the supported scheduler type */
static int __init mshv_retrieve_scheduler_type(struct device *dev)
{
- int ret;
+ int ret = 0;
+
+ if (hv_l1vh_partition())
+ hv_scheduler_type = HV_SCHEDULER_TYPE_CORE_SMT;
+ else
+ ret = hv_retrieve_scheduler_type(&hv_scheduler_type);
- ret = hv_retrieve_scheduler_type(&hv_scheduler_type);
if (ret)
return ret;
@@ -2203,9 +2182,6 @@ static int __init mshv_root_partition_init(struct device *dev)
{
int err;
- if (mshv_retrieve_scheduler_type(dev))
- return -ENODEV;
-
err = root_scheduler_init(dev);
if (err)
return err;
@@ -2227,7 +2203,7 @@ static int __init mshv_parent_partition_init(void)
struct device *dev;
union hv_hypervisor_version_info version_info;
- if (!hv_root_partition() || is_kdump_kernel())
+ if (!hv_parent_partition() || is_kdump_kernel())
return -ENODEV;
if (hv_get_hypervisor_version(&version_info))
@@ -2264,7 +2240,12 @@ static int __init mshv_parent_partition_init(void)
mshv_cpuhp_online = ret;
- ret = mshv_root_partition_init(dev);
+ ret = mshv_retrieve_scheduler_type(dev);
+ if (ret)
+ goto remove_cpu_state;
+
+ if (hv_root_partition())
+ ret = mshv_root_partition_init(dev);
if (ret)
goto remove_cpu_state;
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 2ed5a1e89d69..69591dc7bad2 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -322,7 +322,7 @@ static ssize_t out_read_index_show(struct device *dev,
&outbound);
if (ret < 0)
return ret;
- return sysfs_emit(buf, "%d\n", outbound.current_read_index);
+ return sysfs_emit(buf, "%u\n", outbound.current_read_index);
}
static DEVICE_ATTR_RO(out_read_index);
@@ -341,7 +341,7 @@ static ssize_t out_write_index_show(struct device *dev,
&outbound);
if (ret < 0)
return ret;
- return sysfs_emit(buf, "%d\n", outbound.current_write_index);
+ return sysfs_emit(buf, "%u\n", outbound.current_write_index);
}
static DEVICE_ATTR_RO(out_write_index);
@@ -1742,7 +1742,7 @@ static ssize_t target_cpu_store(struct vmbus_channel *channel,
u32 target_cpu;
ssize_t ret;
- if (sscanf(buf, "%uu", &target_cpu) != 1)
+ if (sscanf(buf, "%u", &target_cpu) != 1)
return -EIO;
cpus_read_lock();
@@ -1947,7 +1947,7 @@ static const struct kobj_type vmbus_chan_ktype = {
* is running.
* For example, HV_NIC device is used either by uio_hv_generic or hv_netvsc at any given point of
* time, and "ring" sysfs is needed only when uio_hv_generic is bound to that device. To avoid
- * exposing the ring buffer by default, this function is reponsible to enable visibility of
+ * exposing the ring buffer by default, this function is responsible to enable visibility of
* ring for userspace to use.
* Note: Race conditions can happen with userspace and it is not encouraged to create new
* use-cases for this. This was added to maintain backward compatibility, while solving
@@ -2110,7 +2110,7 @@ int vmbus_device_register(struct hv_device *child_device_obj)
ret = vmbus_add_channel_kobj(child_device_obj,
child_device_obj->channel);
if (ret) {
- pr_err("Unable to register primary channeln");
+ pr_err("Unable to register primary channel\n");
goto err_kset_unregister;
}
hv_debug_add_dev_dir(child_device_obj);
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 9d28fcf7cd2a..2760feb9f83b 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -769,6 +769,16 @@ config SENSORS_GL520SM
This driver can also be built as a module. If so, the module
will be called gl520sm.
+config SENSORS_GPD
+ tristate "GPD handhelds"
+ depends on X86 && DMI && HAS_IOPORT
+ help
+ If you say yes here you get support for fan readings and
+ control over GPD handheld devices.
+
+ Can also be built as a module. In that case it will be
+ called gpd-fan.
+
config SENSORS_G760A
tristate "GMT G760A"
depends on I2C
@@ -1698,6 +1708,16 @@ config SENSORS_NCT6683
This driver can also be built as a module. If so, the module
will be called nct6683.
+config SENSORS_NCT6694
+ tristate "Nuvoton NCT6694 Hardware Monitor support"
+ depends on MFD_NCT6694
+ help
+ Say Y here to support Nuvoton NCT6694 hardware monitoring
+ functionality.
+
+ This driver can also be built as a module. If so, the module
+ will be called nct6694-hwmon.
+
config SENSORS_NCT6775_CORE
tristate
select REGMAP
@@ -1895,6 +1915,16 @@ config SENSORS_RASPBERRYPI_HWMON
This driver can also be built as a module. If so, the module
will be called raspberrypi-hwmon.
+config SENSORS_SA67MCU
+ tristate "Kontron sa67mcu hardware monitoring driver"
+ depends on MFD_SL28CPLD || COMPILE_TEST
+ help
+ If you say yes here you get support for the voltage and temperature
+ monitor of the sa67 board management controller.
+
+ This driver can also be built as a module. If so, the module
+ will be called sa67mcu-hwmon.
+
config SENSORS_SL28CPLD
tristate "Kontron sl28cpld hardware monitoring driver"
depends on MFD_SL28CPLD || COMPILE_TEST
@@ -1930,8 +1960,8 @@ config SENSORS_SHT21
tristate "Sensiron humidity and temperature sensors. SHT21 and compat."
depends on I2C
help
- If you say yes here you get support for the Sensiron SHT21, SHT25
- humidity and temperature sensors.
+ If you say yes here you get support for the Sensiron SHT20, SHT21,
+ SHT25 humidity and temperature sensors.
This driver can also be built as a module. If so, the module
will be called sht21.
@@ -2252,13 +2282,14 @@ config SENSORS_INA2XX
will be called ina2xx.
config SENSORS_INA238
- tristate "Texas Instruments INA238"
+ tristate "Texas Instruments INA238 and compatibles"
depends on I2C
select REGMAP_I2C
help
- If you say yes here you get support for the INA238 power monitor
- chip. This driver supports voltage, current, power and temperature
- measurements as well as alarm configuration.
+ If you say yes here you get support for INA228, INA237, INA238,
+ INA700, INA780, and SQ52206 power monitor chips. This driver supports
+ voltage, current, power, energy, and temperature measurements as well
+ as alarm configuration.
This driver can also be built as a module. If so, the module
will be called ina238.
@@ -2673,9 +2704,10 @@ config SENSORS_ASUS_EC
depends on ACPI_EC
help
If you say yes here you get support for the ACPI embedded controller
- hardware monitoring interface found in ASUS motherboards. The driver
- currently supports B550/X570 boards, although other ASUS boards might
- provide this monitoring interface as well.
+ hardware monitoring interface found in some ASUS motherboards. This is
+ where such sensors as water flow and temperature, optional fans, and
+ additional temperature sensors (T_Sensor, chipset temperatures)
+ find themselves.
This driver can also be built as a module. If so, the module
will be called asus_ec_sensors.
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index cd8bc4752b4d..73b2abdcc6dd 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -88,6 +88,7 @@ obj-$(CONFIG_SENSORS_GIGABYTE_WATERFORCE) += gigabyte_waterforce.o
obj-$(CONFIG_SENSORS_GL518SM) += gl518sm.o
obj-$(CONFIG_SENSORS_GL520SM) += gl520sm.o
obj-$(CONFIG_SENSORS_GSC) += gsc-hwmon.o
+obj-$(CONFIG_SENSORS_GPD) += gpd-fan.o
obj-$(CONFIG_SENSORS_GPIO_FAN) += gpio-fan.o
obj-$(CONFIG_SENSORS_GXP_FAN_CTRL) += gxp-fan-ctrl.o
obj-$(CONFIG_SENSORS_HIH6130) += hih6130.o
@@ -174,6 +175,7 @@ obj-$(CONFIG_SENSORS_MLXREG_FAN) += mlxreg-fan.o
obj-$(CONFIG_SENSORS_MENF21BMC_HWMON) += menf21bmc_hwmon.o
obj-$(CONFIG_SENSORS_MR75203) += mr75203.o
obj-$(CONFIG_SENSORS_NCT6683) += nct6683.o
+obj-$(CONFIG_SENSORS_NCT6694) += nct6694-hwmon.o
obj-$(CONFIG_SENSORS_NCT6775_CORE) += nct6775-core.o
nct6775-objs := nct6775-platform.o
obj-$(CONFIG_SENSORS_NCT6775) += nct6775.o
@@ -196,6 +198,7 @@ obj-$(CONFIG_SENSORS_PT5161L) += pt5161l.o
obj-$(CONFIG_SENSORS_PWM_FAN) += pwm-fan.o
obj-$(CONFIG_SENSORS_QNAP_MCU_HWMON) += qnap-mcu-hwmon.o
obj-$(CONFIG_SENSORS_RASPBERRYPI_HWMON) += raspberrypi-hwmon.o
+obj-$(CONFIG_SENSORS_SA67MCU) += sa67mcu-hwmon.o
obj-$(CONFIG_SENSORS_SBTSI) += sbtsi_temp.o
obj-$(CONFIG_SENSORS_SBRMI) += sbrmi.o
obj-$(CONFIG_SENSORS_SCH56XX_COMMON)+= sch56xx-common.o
diff --git a/drivers/hwmon/asus-ec-sensors.c b/drivers/hwmon/asus-ec-sensors.c
index 4ac554731e98..34a8f6b834c9 100644
--- a/drivers/hwmon/asus-ec-sensors.c
+++ b/drivers/hwmon/asus-ec-sensors.c
@@ -49,15 +49,19 @@ static char *mutex_path_override;
*/
#define ASUS_EC_MAX_BANK 3
-#define ACPI_LOCK_DELAY_MS 500
+#define ACPI_LOCK_DELAY_MS 800
/* ACPI mutex for locking access to the EC for the firmware */
#define ASUS_HW_ACCESS_MUTEX_ASMX "\\AMW0.ASMX"
#define ASUS_HW_ACCESS_MUTEX_RMTW_ASMX "\\RMTW.ASMX"
+#define ASUS_HW_ACCESS_MUTEX_SB_PC00_LPCB_SIO1_MUT0 "\\_SB.PC00.LPCB.SIO1.MUT0"
+
#define ASUS_HW_ACCESS_MUTEX_SB_PCI0_SBRG_SIO1_MUT0 "\\_SB_.PCI0.SBRG.SIO1.MUT0"
+#define ASUS_HW_ACCESS_MUTEX_SB_PCI0_LPCB_SIO1_MUT0 "\\_SB_.PCI0.LPCB.SIO1.MUT0"
+
#define MAX_IDENTICAL_BOARD_VARIATIONS 3
/* Moniker for the ACPI global lock (':' is not allowed in ASL identifiers) */
@@ -115,10 +119,18 @@ enum ec_sensors {
ec_sensor_fan_cpu_opt,
/* VRM heat sink fan [RPM] */
ec_sensor_fan_vrm_hs,
+ /* VRM east heat sink fan [RPM] */
+ ec_sensor_fan_vrme_hs,
+ /* VRM west heat sink fan [RPM] */
+ ec_sensor_fan_vrmw_hs,
/* Chipset fan [RPM] */
ec_sensor_fan_chipset,
/* Water flow sensor reading [RPM] */
ec_sensor_fan_water_flow,
+ /* USB4 fan [RPM] */
+ ec_sensor_fan_usb4,
+ /* M.2 fan [RPM] */
+ ec_sensor_fan_m2,
/* CPU current [A] */
ec_sensor_curr_cpu,
/* "Water_In" temperature sensor reading [℃] */
@@ -148,8 +160,12 @@ enum ec_sensors {
#define SENSOR_IN_CPU_CORE BIT(ec_sensor_in_cpu_core)
#define SENSOR_FAN_CPU_OPT BIT(ec_sensor_fan_cpu_opt)
#define SENSOR_FAN_VRM_HS BIT(ec_sensor_fan_vrm_hs)
+#define SENSOR_FAN_VRME_HS BIT(ec_sensor_fan_vrme_hs)
+#define SENSOR_FAN_VRMW_HS BIT(ec_sensor_fan_vrmw_hs)
#define SENSOR_FAN_CHIPSET BIT(ec_sensor_fan_chipset)
#define SENSOR_FAN_WATER_FLOW BIT(ec_sensor_fan_water_flow)
+#define SENSOR_FAN_USB4 BIT(ec_sensor_fan_usb4)
+#define SENSOR_FAN_M2 BIT(ec_sensor_fan_m2)
#define SENSOR_CURR_CPU BIT(ec_sensor_curr_cpu)
#define SENSOR_TEMP_WATER_IN BIT(ec_sensor_temp_water_in)
#define SENSOR_TEMP_WATER_OUT BIT(ec_sensor_temp_water_out)
@@ -166,9 +182,12 @@ enum board_family {
family_amd_500_series,
family_amd_600_series,
family_amd_800_series,
+ family_amd_wrx_90,
+ family_intel_200_series,
family_intel_300_series,
family_intel_400_series,
- family_intel_600_series
+ family_intel_600_series,
+ family_intel_700_series
};
/*
@@ -275,6 +294,33 @@ static const struct ec_sensor_info sensors_family_amd_800[] = {
EC_SENSOR("CPU_Opt", hwmon_fan, 2, 0x00, 0xb0),
};
+static const struct ec_sensor_info sensors_family_amd_wrx_90[] = {
+ [ec_sensor_temp_cpu_package] =
+ EC_SENSOR("CPU Package", hwmon_temp, 1, 0x00, 0x31),
+ [ec_sensor_fan_cpu_opt] =
+ EC_SENSOR("CPU_Opt", hwmon_fan, 2, 0x00, 0xb0),
+ [ec_sensor_fan_vrmw_hs] =
+ EC_SENSOR("VRMW HS", hwmon_fan, 2, 0x00, 0xb4),
+ [ec_sensor_fan_usb4] = EC_SENSOR("USB4", hwmon_fan, 2, 0x00, 0xb6),
+ [ec_sensor_fan_vrme_hs] =
+ EC_SENSOR("VRME HS", hwmon_fan, 2, 0x00, 0xbc),
+ [ec_sensor_fan_m2] = EC_SENSOR("M.2", hwmon_fan, 2, 0x00, 0xbe),
+ [ec_sensor_temp_t_sensor] =
+ EC_SENSOR("T_Sensor", hwmon_temp, 1, 0x01, 0x04),
+};
+
+static const struct ec_sensor_info sensors_family_intel_200[] = {
+ [ec_sensor_temp_chipset] =
+ EC_SENSOR("Chipset", hwmon_temp, 1, 0x00, 0x3a),
+ [ec_sensor_temp_cpu] = EC_SENSOR("CPU", hwmon_temp, 1, 0x00, 0x3b),
+ [ec_sensor_temp_mb] =
+ EC_SENSOR("Motherboard", hwmon_temp, 1, 0x00, 0x3c),
+ [ec_sensor_temp_t_sensor] =
+ EC_SENSOR("T_Sensor", hwmon_temp, 1, 0x00, 0x3d),
+ [ec_sensor_fan_cpu_opt] =
+ EC_SENSOR("CPU_Opt", hwmon_fan, 2, 0x00, 0xbc),
+};
+
static const struct ec_sensor_info sensors_family_intel_300[] = {
[ec_sensor_temp_chipset] =
EC_SENSOR("Chipset", hwmon_temp, 1, 0x00, 0x3a),
@@ -323,6 +369,16 @@ static const struct ec_sensor_info sensors_family_intel_600[] = {
EC_SENSOR("Water_Block_In", hwmon_temp, 1, 0x01, 0x02),
};
+static const struct ec_sensor_info sensors_family_intel_700[] = {
+ [ec_sensor_temp_t_sensor] =
+ EC_SENSOR("T_Sensor", hwmon_temp, 1, 0x01, 0x09),
+ [ec_sensor_temp_t_sensor_2] =
+ EC_SENSOR("T_Sensor 2", hwmon_temp, 1, 0x01, 0x05),
+ [ec_sensor_temp_vrm] = EC_SENSOR("VRM", hwmon_temp, 1, 0x00, 0x33),
+ [ec_sensor_fan_cpu_opt] =
+ EC_SENSOR("CPU_Opt", hwmon_fan, 2, 0x00, 0xb0),
+};
+
/* Shortcuts for common combinations */
#define SENSOR_SET_TEMP_CHIPSET_CPU_MB \
(SENSOR_TEMP_CHIPSET | SENSOR_TEMP_CPU | SENSOR_TEMP_MB)
@@ -343,6 +399,52 @@ struct ec_board_info {
enum board_family family;
};
+static const struct ec_board_info board_info_crosshair_viii_dark_hero = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+ SENSOR_TEMP_T_SENSOR |
+ SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
+ SENSOR_FAN_CPU_OPT | SENSOR_FAN_WATER_FLOW |
+ SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+ .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_crosshair_viii_hero = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+ SENSOR_TEMP_T_SENSOR |
+ SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
+ SENSOR_FAN_CPU_OPT | SENSOR_FAN_CHIPSET |
+ SENSOR_FAN_WATER_FLOW | SENSOR_CURR_CPU |
+ SENSOR_IN_CPU_CORE,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+ .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_crosshair_viii_impact = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+ SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
+ SENSOR_FAN_CHIPSET | SENSOR_CURR_CPU |
+ SENSOR_IN_CPU_CORE,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+ .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_crosshair_x670e_gene = {
+ .sensors = SENSOR_TEMP_CPU | SENSOR_TEMP_CPU_PACKAGE |
+ SENSOR_TEMP_T_SENSOR |
+ SENSOR_TEMP_MB | SENSOR_TEMP_VRM,
+ .mutex_path = ACPI_GLOBAL_LOCK_PSEUDO_PATH,
+ .family = family_amd_600_series,
+};
+
+static const struct ec_board_info board_info_crosshair_x670e_hero = {
+ .sensors = SENSOR_TEMP_CPU | SENSOR_TEMP_CPU_PACKAGE |
+ SENSOR_TEMP_MB | SENSOR_TEMP_VRM |
+ SENSOR_SET_TEMP_WATER,
+ .mutex_path = ACPI_GLOBAL_LOCK_PSEUDO_PATH,
+ .family = family_amd_600_series,
+};
+
static const struct ec_board_info board_info_maximus_vi_hero = {
.sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
SENSOR_TEMP_T_SENSOR |
@@ -352,6 +454,22 @@ static const struct ec_board_info board_info_maximus_vi_hero = {
.family = family_intel_300_series,
};
+static const struct ec_board_info board_info_maximus_xi_hero = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+ SENSOR_TEMP_T_SENSOR |
+ SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
+ SENSOR_FAN_CPU_OPT | SENSOR_FAN_WATER_FLOW,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+ .family = family_intel_300_series,
+};
+
+static const struct ec_board_info board_info_maximus_z690_formula = {
+ .sensors = SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
+ SENSOR_SET_TEMP_WATER | SENSOR_FAN_WATER_FLOW,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_RMTW_ASMX,
+ .family = family_intel_600_series,
+};
+
static const struct ec_board_info board_info_prime_x470_pro = {
.sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
@@ -376,28 +494,11 @@ static const struct ec_board_info board_info_prime_x670e_pro_wifi = {
.family = family_amd_600_series,
};
-static const struct ec_board_info board_info_pro_art_x570_creator_wifi = {
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_VRM |
- SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CPU_OPT |
- SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
- .family = family_amd_500_series,
-};
-
-static const struct ec_board_info board_info_pro_art_x670E_creator_wifi = {
- .sensors = SENSOR_TEMP_CPU | SENSOR_TEMP_CPU_PACKAGE |
- SENSOR_TEMP_MB | SENSOR_TEMP_VRM |
- SENSOR_TEMP_T_SENSOR,
- .mutex_path = ACPI_GLOBAL_LOCK_PSEUDO_PATH,
- .family = family_amd_600_series,
-};
-
-static const struct ec_board_info board_info_pro_art_x870E_creator_wifi = {
- .sensors = SENSOR_TEMP_CPU | SENSOR_TEMP_CPU_PACKAGE |
- SENSOR_TEMP_MB | SENSOR_TEMP_VRM |
+static const struct ec_board_info board_info_prime_z270_a = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CPU_OPT,
- .mutex_path = ACPI_GLOBAL_LOCK_PSEUDO_PATH,
- .family = family_amd_800_series,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_SB_PCI0_LPCB_SIO1_MUT0,
+ .family = family_intel_200_series,
};
static const struct ec_board_info board_info_pro_art_b550_creator = {
@@ -408,72 +509,43 @@ static const struct ec_board_info board_info_pro_art_b550_creator = {
.family = family_amd_500_series,
};
-static const struct ec_board_info board_info_pro_ws_x570_ace = {
+static const struct ec_board_info board_info_pro_art_x570_creator_wifi = {
.sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_VRM |
- SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CHIPSET |
+ SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CPU_OPT |
SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
.mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
.family = family_amd_500_series,
};
-static const struct ec_board_info board_info_crosshair_x670e_hero = {
+static const struct ec_board_info board_info_pro_art_x670E_creator_wifi = {
.sensors = SENSOR_TEMP_CPU | SENSOR_TEMP_CPU_PACKAGE |
SENSOR_TEMP_MB | SENSOR_TEMP_VRM |
- SENSOR_SET_TEMP_WATER,
+ SENSOR_TEMP_T_SENSOR,
.mutex_path = ACPI_GLOBAL_LOCK_PSEUDO_PATH,
.family = family_amd_600_series,
};
-static const struct ec_board_info board_info_crosshair_x670e_gene = {
+static const struct ec_board_info board_info_pro_art_x870E_creator_wifi = {
.sensors = SENSOR_TEMP_CPU | SENSOR_TEMP_CPU_PACKAGE |
- SENSOR_TEMP_T_SENSOR |
- SENSOR_TEMP_MB | SENSOR_TEMP_VRM,
- .mutex_path = ACPI_GLOBAL_LOCK_PSEUDO_PATH,
- .family = family_amd_600_series,
-};
-
-static const struct ec_board_info board_info_crosshair_viii_dark_hero = {
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
- SENSOR_TEMP_T_SENSOR |
- SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
- SENSOR_FAN_CPU_OPT | SENSOR_FAN_WATER_FLOW |
- SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
- .family = family_amd_500_series,
-};
-
-static const struct ec_board_info board_info_crosshair_viii_hero = {
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
- SENSOR_TEMP_T_SENSOR |
- SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
- SENSOR_FAN_CPU_OPT | SENSOR_FAN_CHIPSET |
- SENSOR_FAN_WATER_FLOW | SENSOR_CURR_CPU |
- SENSOR_IN_CPU_CORE,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
- .family = family_amd_500_series,
-};
-
-static const struct ec_board_info board_info_maximus_xi_hero = {
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
- SENSOR_TEMP_T_SENSOR |
- SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
- SENSOR_FAN_CPU_OPT | SENSOR_FAN_WATER_FLOW,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
- .family = family_intel_300_series,
+ SENSOR_TEMP_MB | SENSOR_TEMP_VRM |
+ SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CPU_OPT,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_SB_PCI0_SBRG_SIO1_MUT0,
+ .family = family_amd_800_series,
};
-static const struct ec_board_info board_info_maximus_z690_formula = {
- .sensors = SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
- SENSOR_SET_TEMP_WATER | SENSOR_FAN_WATER_FLOW,
+static const struct ec_board_info board_info_pro_ws_wrx90e_sage_se = {
+ /* Board also has a nct6798 with 7 more fans and temperatures */
+ .sensors = SENSOR_TEMP_CPU_PACKAGE | SENSOR_TEMP_T_SENSOR |
+ SENSOR_FAN_CPU_OPT | SENSOR_FAN_USB4 | SENSOR_FAN_M2 |
+ SENSOR_FAN_VRME_HS | SENSOR_FAN_VRMW_HS,
.mutex_path = ASUS_HW_ACCESS_MUTEX_RMTW_ASMX,
- .family = family_intel_600_series,
+ .family = family_amd_wrx_90,
};
-static const struct ec_board_info board_info_crosshair_viii_impact = {
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
- SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
- SENSOR_FAN_CHIPSET | SENSOR_CURR_CPU |
- SENSOR_IN_CPU_CORE,
+static const struct ec_board_info board_info_pro_ws_x570_ace = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_VRM |
+ SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CHIPSET |
+ SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
.mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
.family = family_amd_500_series,
};
@@ -495,6 +567,20 @@ static const struct ec_board_info board_info_strix_b550_i_gaming = {
.family = family_amd_500_series,
};
+static const struct ec_board_info board_info_strix_b650e_i_gaming = {
+ .sensors = SENSOR_TEMP_VRM | SENSOR_TEMP_T_SENSOR |
+ SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_IN_CPU_CORE,
+ .mutex_path = ACPI_GLOBAL_LOCK_PSEUDO_PATH,
+ .family = family_amd_600_series,
+};
+
+static const struct ec_board_info board_info_strix_b850_i_gaming_wifi = {
+ .sensors = SENSOR_TEMP_CPU | SENSOR_TEMP_CPU_PACKAGE |
+ SENSOR_TEMP_MB | SENSOR_TEMP_VRM,
+ .mutex_path = ACPI_GLOBAL_LOCK_PSEUDO_PATH,
+ .family = family_amd_800_series,
+};
+
static const struct ec_board_info board_info_strix_x570_e_gaming = {
.sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
SENSOR_TEMP_T_SENSOR |
@@ -528,6 +614,35 @@ static const struct ec_board_info board_info_strix_x570_i_gaming = {
.family = family_amd_500_series,
};
+static const struct ec_board_info board_info_strix_x670e_e_gaming_wifi = {
+ .sensors = SENSOR_TEMP_CPU | SENSOR_TEMP_CPU_PACKAGE |
+ SENSOR_TEMP_MB | SENSOR_TEMP_VRM,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_SB_PCI0_SBRG_SIO1_MUT0,
+ .family = family_amd_600_series,
+};
+
+static const struct ec_board_info board_info_strix_x670e_i_gaming_wifi = {
+ .sensors = SENSOR_TEMP_CPU | SENSOR_TEMP_CPU_PACKAGE |
+ SENSOR_TEMP_MB | SENSOR_TEMP_VRM,
+ .mutex_path = ACPI_GLOBAL_LOCK_PSEUDO_PATH,
+ .family = family_amd_600_series,
+};
+
+static const struct ec_board_info board_info_strix_x870_i_gaming_wifi = {
+ .sensors = SENSOR_TEMP_CPU | SENSOR_TEMP_CPU_PACKAGE |
+ SENSOR_TEMP_MB | SENSOR_TEMP_VRM,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_SB_PCI0_SBRG_SIO1_MUT0,
+ .family = family_amd_800_series,
+};
+
+static const struct ec_board_info board_info_strix_x870e_e_gaming_wifi = {
+ .sensors = SENSOR_TEMP_CPU | SENSOR_TEMP_CPU_PACKAGE |
+ SENSOR_TEMP_MB | SENSOR_TEMP_VRM |
+ SENSOR_FAN_CPU_OPT,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_SB_PCI0_SBRG_SIO1_MUT0,
+ .family = family_amd_800_series,
+};
+
static const struct ec_board_info board_info_strix_z390_f_gaming = {
.sensors = SENSOR_TEMP_CHIPSET | SENSOR_TEMP_VRM |
SENSOR_TEMP_T_SENSOR |
@@ -554,6 +669,35 @@ static const struct ec_board_info board_info_strix_z690_a_gaming_wifi_d4 = {
.family = family_intel_600_series,
};
+static const struct ec_board_info board_info_strix_z690_e_gaming_wifi = {
+ .sensors = SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_RMTW_ASMX,
+ .family = family_intel_600_series,
+};
+
+static const struct ec_board_info board_info_strix_z790_e_gaming_wifi_ii = {
+ .sensors = SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
+ SENSOR_FAN_CPU_OPT,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_SB_PC00_LPCB_SIO1_MUT0,
+ .family = family_intel_700_series,
+};
+
+static const struct ec_board_info board_info_strix_z790_i_gaming_wifi = {
+ .sensors = SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_T_SENSOR_2 |
+ SENSOR_TEMP_VRM,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_SB_PC00_LPCB_SIO1_MUT0,
+ .family = family_intel_700_series,
+};
+
+static const struct ec_board_info board_info_tuf_gaming_x670e_plus = {
+ .sensors = SENSOR_TEMP_CPU | SENSOR_TEMP_CPU_PACKAGE |
+ SENSOR_TEMP_MB | SENSOR_TEMP_VRM |
+ SENSOR_TEMP_WATER_IN | SENSOR_TEMP_WATER_OUT |
+ SENSOR_FAN_CPU_OPT,
+ .mutex_path = ACPI_GLOBAL_LOCK_PSEUDO_PATH,
+ .family = family_amd_600_series,
+};
+
static const struct ec_board_info board_info_zenith_ii_extreme = {
.sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_T_SENSOR |
SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
@@ -566,15 +710,6 @@ static const struct ec_board_info board_info_zenith_ii_extreme = {
.family = family_amd_500_series,
};
-static const struct ec_board_info board_info_tuf_gaming_x670e_plus = {
- .sensors = SENSOR_TEMP_CPU | SENSOR_TEMP_CPU_PACKAGE |
- SENSOR_TEMP_MB | SENSOR_TEMP_VRM |
- SENSOR_TEMP_WATER_IN | SENSOR_TEMP_WATER_OUT |
- SENSOR_FAN_CPU_OPT,
- .mutex_path = ACPI_GLOBAL_LOCK_PSEUDO_PATH,
- .family = family_amd_600_series,
-};
-
#define DMI_EXACT_MATCH_ASUS_BOARD_NAME(name, board_info) \
{ \
.matches = { \
@@ -594,14 +729,18 @@ static const struct dmi_system_id dmi_table[] = {
&board_info_prime_x570_pro),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("PRIME X670E-PRO WIFI",
&board_info_prime_x670e_pro_wifi),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("PRIME Z270-A",
+ &board_info_prime_z270_a),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ProArt B550-CREATOR",
+ &board_info_pro_art_b550_creator),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ProArt X570-CREATOR WIFI",
&board_info_pro_art_x570_creator_wifi),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ProArt X670E-CREATOR WIFI",
&board_info_pro_art_x670E_creator_wifi),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ProArt X870E-CREATOR WIFI",
&board_info_pro_art_x870E_creator_wifi),
- DMI_EXACT_MATCH_ASUS_BOARD_NAME("ProArt B550-CREATOR",
- &board_info_pro_art_b550_creator),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("Pro WS WRX90E-SAGE SE",
+ &board_info_pro_ws_wrx90e_sage_se),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("Pro WS X570-ACE",
&board_info_pro_ws_x570_ace),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VIII DARK HERO",
@@ -612,22 +751,26 @@ static const struct dmi_system_id dmi_table[] = {
&board_info_crosshair_viii_hero),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VIII HERO (WI-FI)",
&board_info_crosshair_viii_hero),
- DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR X670E HERO",
- &board_info_crosshair_x670e_hero),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VIII IMPACT",
+ &board_info_crosshair_viii_impact),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR X670E GENE",
&board_info_crosshair_x670e_gene),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR X670E HERO",
+ &board_info_crosshair_x670e_hero),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG MAXIMUS XI HERO",
&board_info_maximus_xi_hero),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG MAXIMUS XI HERO (WI-FI)",
&board_info_maximus_xi_hero),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG MAXIMUS Z690 FORMULA",
&board_info_maximus_z690_formula),
- DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VIII IMPACT",
- &board_info_crosshair_viii_impact),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX B550-E GAMING",
&board_info_strix_b550_e_gaming),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX B550-I GAMING",
&board_info_strix_b550_i_gaming),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX B650E-I GAMING WIFI",
+ &board_info_strix_b650e_i_gaming),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX B850-I GAMING WIFI",
+ &board_info_strix_b850_i_gaming_wifi),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX X570-E GAMING",
&board_info_strix_x570_e_gaming),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX X570-E GAMING WIFI II",
@@ -636,18 +779,34 @@ static const struct dmi_system_id dmi_table[] = {
&board_info_strix_x570_f_gaming),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX X570-I GAMING",
&board_info_strix_x570_i_gaming),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX X670E-E GAMING WIFI",
+ &board_info_strix_x670e_e_gaming_wifi),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX X670E-I GAMING WIFI",
+ &board_info_strix_x670e_i_gaming_wifi),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX X870-I GAMING WIFI",
+ &board_info_strix_x870_i_gaming_wifi),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX X870E-E GAMING WIFI",
+ &board_info_strix_x870e_e_gaming_wifi),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX Z390-F GAMING",
&board_info_strix_z390_f_gaming),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX Z490-F GAMING",
&board_info_strix_z490_f_gaming),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX Z690-A GAMING WIFI D4",
&board_info_strix_z690_a_gaming_wifi_d4),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX Z690-E GAMING WIFI",
+ &board_info_strix_z690_e_gaming_wifi),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX Z790-E GAMING WIFI II",
+ &board_info_strix_z790_e_gaming_wifi_ii),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX Z790-I GAMING WIFI",
+ &board_info_strix_z790_i_gaming_wifi),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG ZENITH II EXTREME",
&board_info_zenith_ii_extreme),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG ZENITH II EXTREME ALPHA",
&board_info_zenith_ii_extreme),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("TUF GAMING X670E-PLUS",
&board_info_tuf_gaming_x670e_plus),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("TUF GAMING X670E-PLUS WIFI",
+ &board_info_tuf_gaming_x670e_plus),
{},
};
@@ -1115,6 +1274,12 @@ static int asus_ec_probe(struct platform_device *pdev)
case family_amd_800_series:
ec_data->sensors_info = sensors_family_amd_800;
break;
+ case family_amd_wrx_90:
+ ec_data->sensors_info = sensors_family_amd_wrx_90;
+ break;
+ case family_intel_200_series:
+ ec_data->sensors_info = sensors_family_intel_200;
+ break;
case family_intel_300_series:
ec_data->sensors_info = sensors_family_intel_300;
break;
@@ -1124,6 +1289,9 @@ static int asus_ec_probe(struct platform_device *pdev)
case family_intel_600_series:
ec_data->sensors_info = sensors_family_intel_600;
break;
+ case family_intel_700_series:
+ ec_data->sensors_info = sensors_family_intel_700;
+ break;
default:
dev_err(dev, "Unknown board family: %d",
ec_data->board_info->family);
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 1b9203b20d70..ad79db5a183e 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -122,29 +122,29 @@ static const struct tjmax tjmax_table[] = {
};
struct tjmax_model {
- u8 model;
- u8 mask;
+ u32 vfm;
+ u8 stepping_mask;
int tjmax;
};
#define ANY 0xff
static const struct tjmax_model tjmax_model_table[] = {
- { 0x1c, 10, 100000 }, /* D4xx, K4xx, N4xx, D5xx, K5xx, N5xx */
- { 0x1c, ANY, 90000 }, /* Z5xx, N2xx, possibly others
- * Note: Also matches 230 and 330,
- * which are covered by tjmax_table
- */
- { 0x26, ANY, 90000 }, /* Atom Tunnel Creek (Exx), Lincroft (Z6xx)
- * Note: TjMax for E6xxT is 110C, but CPU type
- * is undetectable by software
- */
- { 0x27, ANY, 90000 }, /* Atom Medfield (Z2460) */
- { 0x35, ANY, 90000 }, /* Atom Clover Trail/Cloverview (Z27x0) */
- { 0x36, ANY, 100000 }, /* Atom Cedar Trail/Cedarview (N2xxx, D2xxx)
- * Also matches S12x0 (stepping 9), covered by
- * PCI table
- */
+ { INTEL_ATOM_BONNELL, 10, 100000 }, /* D4xx, K4xx, N4xx, D5xx, K5xx, N5xx */
+ { INTEL_ATOM_BONNELL, ANY, 90000 }, /* Z5xx, N2xx, possibly others
+ * Note: Also matches 230 and 330,
+ * which are covered by tjmax_table
+ */
+ { INTEL_ATOM_BONNELL_MID, ANY, 90000 }, /* Atom Tunnel Creek (Exx), Lincroft (Z6xx)
+ * Note: TjMax for E6xxT is 110C, but CPU type
+ * is undetectable by software
+ */
+ { INTEL_ATOM_SALTWELL_MID, ANY, 90000 }, /* Atom Medfield (Z2460) */
+ { INTEL_ATOM_SALTWELL_TABLET, ANY, 90000 }, /* Atom Clover Trail/Cloverview (Z27x0) */
+ { INTEL_ATOM_SALTWELL, ANY, 100000 }, /* Atom Cedar Trail/Cedarview (N2xxx, D2xxx)
+ * Also matches S12x0 (stepping 9), covered by
+ * PCI table
+ */
};
static bool is_pkg_temp_data(struct temp_data *tdata)
@@ -180,6 +180,11 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
}
pci_dev_put(host_bridge);
+ /*
+ * This is literally looking for "CPU XXX" in the model string.
+ * Not checking it against the model as well. Just purely a
+ * string search.
+ */
for (i = 0; i < ARRAY_SIZE(tjmax_table); i++) {
if (strstr(c->x86_model_id, tjmax_table[i].id))
return tjmax_table[i].tjmax;
@@ -187,17 +192,18 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
for (i = 0; i < ARRAY_SIZE(tjmax_model_table); i++) {
const struct tjmax_model *tm = &tjmax_model_table[i];
- if (c->x86_model == tm->model &&
- (tm->mask == ANY || c->x86_stepping == tm->mask))
+ if (c->x86_vfm == tm->vfm &&
+ (tm->stepping_mask == ANY ||
+ tm->stepping_mask == c->x86_stepping))
return tm->tjmax;
}
/* Early chips have no MSR for TjMax */
- if (c->x86_model == 0xf && c->x86_stepping < 4)
+ if (c->x86_vfm == INTEL_CORE2_MEROM && c->x86_stepping < 4)
usemsr_ee = 0;
- if (c->x86_model > 0xe && usemsr_ee) {
+ if (c->x86_vfm > INTEL_CORE_YONAH && usemsr_ee) {
u8 platform_id;
/*
@@ -211,7 +217,8 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
"Unable to access MSR 0x17, assuming desktop"
" CPU\n");
usemsr_ee = 0;
- } else if (c->x86_model < 0x17 && !(eax & 0x10000000)) {
+ } else if (c->x86_vfm < INTEL_CORE2_PENRYN &&
+ !(eax & 0x10000000)) {
/*
* Trust bit 28 up to Penryn, I could not find any
* documentation on that; if you happen to know
@@ -226,7 +233,7 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
* Mobile Penryn CPU seems to be platform ID 7 or 5
* (guesswork)
*/
- if (c->x86_model == 0x17 &&
+ if (c->x86_vfm == INTEL_CORE2_PENRYN &&
(platform_id == 5 || platform_id == 7)) {
/*
* If MSR EE bit is set, set it to 90 degrees C,
@@ -258,18 +265,6 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
return tjmax;
}
-static bool cpu_has_tjmax(struct cpuinfo_x86 *c)
-{
- u8 model = c->x86_model;
-
- return model > 0xe &&
- model != 0x1c &&
- model != 0x26 &&
- model != 0x27 &&
- model != 0x35 &&
- model != 0x36;
-}
-
static int get_tjmax(struct temp_data *tdata, struct device *dev)
{
struct cpuinfo_x86 *c = &cpu_data(tdata->cpu);
@@ -287,8 +282,7 @@ static int get_tjmax(struct temp_data *tdata, struct device *dev)
*/
err = rdmsr_safe_on_cpu(tdata->cpu, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
if (err) {
- if (cpu_has_tjmax(c))
- dev_warn(dev, "Unable to read TjMax from CPU %u\n", tdata->cpu);
+ dev_warn_once(dev, "Unable to read TjMax from CPU %u\n", tdata->cpu);
} else {
val = (eax >> 16) & 0xff;
if (val)
@@ -460,7 +454,7 @@ static int chk_ucode_version(unsigned int cpu)
* Readings might stop update when processor visited too deep sleep,
* fixed for stepping D0 (6EC).
*/
- if (c->x86_model == 0xe && c->x86_stepping < 0xc && c->microcode < 0x39) {
+ if (c->x86_vfm == INTEL_CORE_YONAH && c->x86_stepping < 0xc && c->microcode < 0x39) {
pr_err("Errata AE18 not fixed, update BIOS or microcode of the CPU!\n");
return -ENODEV;
}
@@ -580,7 +574,7 @@ static int create_core_data(struct platform_device *pdev, unsigned int cpu,
* MSR_IA32_TEMPERATURE_TARGET register. Atoms don't have the register
* at all.
*/
- if (c->x86_model > 0xe && c->x86_model != 0x1c)
+ if (c->x86_vfm > INTEL_CORE_YONAH && c->x86_vfm != INTEL_ATOM_BONNELL)
if (get_ttarget(tdata, &pdev->dev) >= 0)
tdata->attr_size++;
@@ -793,7 +787,9 @@ static int __init coretemp_init(void)
/*
* CPUID.06H.EAX[0] indicates whether the CPU has thermal
* sensors. We check this bit only, all the early CPUs
- * without thermal sensors will be filtered out.
+ * without thermal sensors will be filtered out. This
+ * includes all the Family 5 and Family 15 (Pentium 4)
+ * models, since they never set the CPUID bit.
*/
if (!x86_match_cpu(coretemp_ids))
return -ENODEV;
diff --git a/drivers/hwmon/cros_ec_hwmon.c b/drivers/hwmon/cros_ec_hwmon.c
index 9991c3fa020a..48331703f2f5 100644
--- a/drivers/hwmon/cros_ec_hwmon.c
+++ b/drivers/hwmon/cros_ec_hwmon.c
@@ -7,20 +7,34 @@
#include <linux/device.h>
#include <linux/hwmon.h>
+#include <linux/math.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
+#include <linux/thermal.h>
#include <linux/types.h>
#include <linux/units.h>
#define DRV_NAME "cros-ec-hwmon"
+#define CROS_EC_HWMON_PWM_GET_FAN_DUTY_CMD_VERSION 0
+#define CROS_EC_HWMON_PWM_SET_FAN_DUTY_CMD_VERSION 1
+#define CROS_EC_HWMON_THERMAL_AUTO_FAN_CTRL_CMD_VERSION 2
+
struct cros_ec_hwmon_priv {
struct cros_ec_device *cros_ec;
const char *temp_sensor_names[EC_TEMP_SENSOR_ENTRIES + EC_TEMP_SENSOR_B_ENTRIES];
u8 usable_fans;
+ bool fan_control_supported;
+ u8 manual_fans; /* bits to indicate whether the fan is set to manual */
+ u8 manual_fan_pwm[EC_FAN_SPEED_ENTRIES];
+};
+
+struct cros_ec_hwmon_cooling_priv {
+ struct cros_ec_hwmon_priv *hwmon_priv;
+ u8 index;
};
static int cros_ec_hwmon_read_fan_speed(struct cros_ec_device *cros_ec, u8 index, u16 *speed)
@@ -36,6 +50,42 @@ static int cros_ec_hwmon_read_fan_speed(struct cros_ec_device *cros_ec, u8 index
return 0;
}
+static int cros_ec_hwmon_read_pwm_value(struct cros_ec_device *cros_ec, u8 index, u8 *pwm_value)
+{
+ struct ec_params_pwm_get_fan_duty req = {
+ .fan_idx = index,
+ };
+ struct ec_response_pwm_get_fan_duty resp;
+ int ret;
+
+ ret = cros_ec_cmd(cros_ec, CROS_EC_HWMON_PWM_GET_FAN_DUTY_CMD_VERSION,
+ EC_CMD_PWM_GET_FAN_DUTY, &req, sizeof(req), &resp, sizeof(resp));
+ if (ret < 0)
+ return ret;
+
+ *pwm_value = (u8)DIV_ROUND_CLOSEST(le32_to_cpu(resp.percent) * 255, 100);
+ return 0;
+}
+
+static int cros_ec_hwmon_read_pwm_enable(struct cros_ec_device *cros_ec, u8 index,
+ u8 *control_method)
+{
+ struct ec_params_auto_fan_ctrl_v2 req = {
+ .cmd = EC_AUTO_FAN_CONTROL_CMD_GET,
+ .fan_idx = index,
+ };
+ struct ec_response_auto_fan_control resp;
+ int ret;
+
+ ret = cros_ec_cmd(cros_ec, CROS_EC_HWMON_THERMAL_AUTO_FAN_CTRL_CMD_VERSION,
+ EC_CMD_THERMAL_AUTO_FAN_CTRL, &req, sizeof(req), &resp, sizeof(resp));
+ if (ret < 0)
+ return ret;
+
+ *control_method = resp.is_auto ? 2 : 1;
+ return 0;
+}
+
static int cros_ec_hwmon_read_temp(struct cros_ec_device *cros_ec, u8 index, u8 *temp)
{
unsigned int offset;
@@ -75,6 +125,8 @@ static int cros_ec_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
{
struct cros_ec_hwmon_priv *priv = dev_get_drvdata(dev);
int ret = -EOPNOTSUPP;
+ u8 control_method;
+ u8 pwm_value;
u16 speed;
u8 temp;
@@ -92,6 +144,17 @@ static int cros_ec_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
if (ret == 0)
*val = cros_ec_hwmon_is_error_fan(speed);
}
+ } else if (type == hwmon_pwm) {
+ if (attr == hwmon_pwm_enable) {
+ ret = cros_ec_hwmon_read_pwm_enable(priv->cros_ec, channel,
+ &control_method);
+ if (ret == 0)
+ *val = control_method;
+ } else if (attr == hwmon_pwm_input) {
+ ret = cros_ec_hwmon_read_pwm_value(priv->cros_ec, channel, &pwm_value);
+ if (ret == 0)
+ *val = pwm_value;
+ }
} else if (type == hwmon_temp) {
if (attr == hwmon_temp_input) {
ret = cros_ec_hwmon_read_temp(priv->cros_ec, channel, &temp);
@@ -124,6 +187,74 @@ static int cros_ec_hwmon_read_string(struct device *dev, enum hwmon_sensor_types
return -EOPNOTSUPP;
}
+static int cros_ec_hwmon_set_fan_pwm_val(struct cros_ec_device *cros_ec, u8 index, u8 val)
+{
+ struct ec_params_pwm_set_fan_duty_v1 req = {
+ .fan_idx = index,
+ .percent = DIV_ROUND_CLOSEST((uint32_t)val * 100, 255),
+ };
+ int ret;
+
+ ret = cros_ec_cmd(cros_ec, CROS_EC_HWMON_PWM_SET_FAN_DUTY_CMD_VERSION,
+ EC_CMD_PWM_SET_FAN_DUTY, &req, sizeof(req), NULL, 0);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+static int cros_ec_hwmon_write_pwm_input(struct cros_ec_device *cros_ec, u8 index, u8 val)
+{
+ u8 control_method;
+ int ret;
+
+ ret = cros_ec_hwmon_read_pwm_enable(cros_ec, index, &control_method);
+ if (ret)
+ return ret;
+ if (control_method != 1)
+ return -EOPNOTSUPP;
+
+ return cros_ec_hwmon_set_fan_pwm_val(cros_ec, index, val);
+}
+
+static int cros_ec_hwmon_write_pwm_enable(struct cros_ec_device *cros_ec, u8 index, u8 val)
+{
+ struct ec_params_auto_fan_ctrl_v2 req = {
+ .fan_idx = index,
+ .cmd = EC_AUTO_FAN_CONTROL_CMD_SET,
+ };
+ int ret;
+
+ /* No CrOS EC supports no fan speed control */
+ if (val == 0)
+ return -EOPNOTSUPP;
+
+ req.set_auto = (val != 1) ? true : false;
+ ret = cros_ec_cmd(cros_ec, CROS_EC_HWMON_THERMAL_AUTO_FAN_CTRL_CMD_VERSION,
+ EC_CMD_THERMAL_AUTO_FAN_CTRL, &req, sizeof(req), NULL, 0);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+static int cros_ec_hwmon_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, long val)
+{
+ struct cros_ec_hwmon_priv *priv = dev_get_drvdata(dev);
+
+ if (type == hwmon_pwm) {
+ switch (attr) {
+ case hwmon_pwm_input:
+ return cros_ec_hwmon_write_pwm_input(priv->cros_ec, channel, val);
+ case hwmon_pwm_enable:
+ return cros_ec_hwmon_write_pwm_enable(priv->cros_ec, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return -EOPNOTSUPP;
+}
+
static umode_t cros_ec_hwmon_is_visible(const void *data, enum hwmon_sensor_types type,
u32 attr, int channel)
{
@@ -132,6 +263,9 @@ static umode_t cros_ec_hwmon_is_visible(const void *data, enum hwmon_sensor_type
if (type == hwmon_fan) {
if (priv->usable_fans & BIT(channel))
return 0444;
+ } else if (type == hwmon_pwm) {
+ if (priv->fan_control_supported && priv->usable_fans & BIT(channel))
+ return 0644;
} else if (type == hwmon_temp) {
if (priv->temp_sensor_names[channel])
return 0444;
@@ -147,6 +281,11 @@ static const struct hwmon_channel_info * const cros_ec_hwmon_info[] = {
HWMON_F_INPUT | HWMON_F_FAULT,
HWMON_F_INPUT | HWMON_F_FAULT,
HWMON_F_INPUT | HWMON_F_FAULT),
+ HWMON_CHANNEL_INFO(pwm,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE),
HWMON_CHANNEL_INFO(temp,
HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_LABEL,
HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_LABEL,
@@ -175,9 +314,46 @@ static const struct hwmon_channel_info * const cros_ec_hwmon_info[] = {
NULL
};
+static int cros_ec_hwmon_cooling_get_max_state(struct thermal_cooling_device *cdev,
+ unsigned long *val)
+{
+ *val = 255;
+ return 0;
+}
+
+static int cros_ec_hwmon_cooling_get_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long *val)
+{
+ const struct cros_ec_hwmon_cooling_priv *priv = cdev->devdata;
+ u8 read_val;
+ int ret;
+
+ ret = cros_ec_hwmon_read_pwm_value(priv->hwmon_priv->cros_ec, priv->index, &read_val);
+ if (ret)
+ return ret;
+
+ *val = read_val;
+ return 0;
+}
+
+static int cros_ec_hwmon_cooling_set_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long val)
+{
+ const struct cros_ec_hwmon_cooling_priv *priv = cdev->devdata;
+
+ return cros_ec_hwmon_write_pwm_input(priv->hwmon_priv->cros_ec, priv->index, val);
+}
+
+static const struct thermal_cooling_device_ops cros_ec_thermal_cooling_ops = {
+ .get_max_state = cros_ec_hwmon_cooling_get_max_state,
+ .get_cur_state = cros_ec_hwmon_cooling_get_cur_state,
+ .set_cur_state = cros_ec_hwmon_cooling_set_cur_state,
+};
+
static const struct hwmon_ops cros_ec_hwmon_ops = {
.read = cros_ec_hwmon_read,
.read_string = cros_ec_hwmon_read_string,
+ .write = cros_ec_hwmon_write,
.is_visible = cros_ec_hwmon_is_visible,
};
@@ -233,6 +409,65 @@ static void cros_ec_hwmon_probe_fans(struct cros_ec_hwmon_priv *priv)
}
}
+static inline bool is_cros_ec_cmd_available(struct cros_ec_device *cros_ec,
+ u16 cmd, u8 version)
+{
+ int ret;
+
+ ret = cros_ec_get_cmd_versions(cros_ec, cmd);
+ return ret >= 0 && (ret & EC_VER_MASK(version));
+}
+
+static bool cros_ec_hwmon_probe_fan_control_supported(struct cros_ec_device *cros_ec)
+{
+ return is_cros_ec_cmd_available(cros_ec, EC_CMD_PWM_GET_FAN_DUTY,
+ CROS_EC_HWMON_PWM_GET_FAN_DUTY_CMD_VERSION) &&
+ is_cros_ec_cmd_available(cros_ec, EC_CMD_PWM_SET_FAN_DUTY,
+ CROS_EC_HWMON_PWM_SET_FAN_DUTY_CMD_VERSION) &&
+ is_cros_ec_cmd_available(cros_ec, EC_CMD_THERMAL_AUTO_FAN_CTRL,
+ CROS_EC_HWMON_THERMAL_AUTO_FAN_CTRL_CMD_VERSION);
+}
+
+static void cros_ec_hwmon_register_fan_cooling_devices(struct device *dev,
+ struct cros_ec_hwmon_priv *priv)
+{
+ struct cros_ec_hwmon_cooling_priv *cpriv;
+ struct thermal_cooling_device *cdev;
+ const char *type;
+ size_t i;
+
+ if (!IS_ENABLED(CONFIG_THERMAL))
+ return;
+
+ if (!priv->fan_control_supported)
+ return;
+
+ for (i = 0; i < EC_FAN_SPEED_ENTRIES; i++) {
+ if (!(priv->usable_fans & BIT(i)))
+ continue;
+
+ cpriv = devm_kzalloc(dev, sizeof(*cpriv), GFP_KERNEL);
+ if (!cpriv)
+ continue;
+
+ type = devm_kasprintf(dev, GFP_KERNEL, "%s-fan%zu", dev_name(dev), i);
+ if (!type) {
+ dev_warn(dev, "no memory to compose cooling device type for fan %zu\n", i);
+ continue;
+ }
+
+ cpriv->hwmon_priv = priv;
+ cpriv->index = i;
+ cdev = devm_thermal_of_cooling_device_register(dev, NULL, type, cpriv,
+ &cros_ec_thermal_cooling_ops);
+ if (IS_ERR(cdev)) {
+ dev_warn(dev, "failed to register fan %zu as a cooling device: %pe\n", i,
+ cdev);
+ continue;
+ }
+ }
+}
+
static int cros_ec_hwmon_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -259,13 +494,89 @@ static int cros_ec_hwmon_probe(struct platform_device *pdev)
cros_ec_hwmon_probe_temp_sensors(dev, priv, thermal_version);
cros_ec_hwmon_probe_fans(priv);
+ priv->fan_control_supported = cros_ec_hwmon_probe_fan_control_supported(priv->cros_ec);
+ cros_ec_hwmon_register_fan_cooling_devices(dev, priv);
hwmon_dev = devm_hwmon_device_register_with_info(dev, "cros_ec", priv,
&cros_ec_hwmon_chip_info, NULL);
+ platform_set_drvdata(pdev, priv);
return PTR_ERR_OR_ZERO(hwmon_dev);
}
+static int cros_ec_hwmon_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct cros_ec_hwmon_priv *priv = platform_get_drvdata(pdev);
+ u8 control_method;
+ size_t i;
+ int ret;
+
+ if (!priv->fan_control_supported)
+ return 0;
+
+ /* EC sets fan control to auto after suspended, store settings before suspending. */
+ for (i = 0; i < EC_FAN_SPEED_ENTRIES; i++) {
+ if (!(priv->usable_fans & BIT(i)))
+ continue;
+
+ ret = cros_ec_hwmon_read_pwm_enable(priv->cros_ec, i, &control_method);
+ if (ret) {
+ dev_warn(&pdev->dev, "failed to get mode setting for fan %zu: %d\n", i,
+ ret);
+ continue;
+ }
+
+ if (control_method != 1) {
+ priv->manual_fans &= ~BIT(i);
+ continue;
+ } else {
+ priv->manual_fans |= BIT(i);
+ }
+
+ ret = cros_ec_hwmon_read_pwm_value(priv->cros_ec, i, &priv->manual_fan_pwm[i]);
+ /*
+ * If storing the value failed, invalidate the stored mode value by setting it
+ * to auto control. EC will automatically switch to auto mode for that fan after
+ * suspended.
+ */
+ if (ret) {
+ dev_warn(&pdev->dev, "failed to get PWM setting for fan %zu: %pe\n", i,
+ ERR_PTR(ret));
+ priv->manual_fans &= ~BIT(i);
+ continue;
+ }
+ }
+
+ return 0;
+}
+
+static int cros_ec_hwmon_resume(struct platform_device *pdev)
+{
+ const struct cros_ec_hwmon_priv *priv = platform_get_drvdata(pdev);
+ size_t i;
+ int ret;
+
+ if (!priv->fan_control_supported)
+ return 0;
+
+ /* EC sets fan control to auto after suspend, restore to settings before suspend. */
+ for (i = 0; i < EC_FAN_SPEED_ENTRIES; i++) {
+ if (!(priv->manual_fans & BIT(i)))
+ continue;
+
+ /*
+ * Setting fan PWM value to EC will change the mode to manual for that fan in EC as
+ * well, so we do not need to issue a separate fan mode to manual call.
+ */
+ ret = cros_ec_hwmon_set_fan_pwm_val(priv->cros_ec, i, priv->manual_fan_pwm[i]);
+ if (ret)
+ dev_warn(&pdev->dev, "failed to restore settings for fan %zu: %pe\n", i,
+ ERR_PTR(ret));
+ }
+
+ return 0;
+}
+
static const struct platform_device_id cros_ec_hwmon_id[] = {
{ DRV_NAME, 0 },
{}
@@ -274,6 +585,8 @@ static const struct platform_device_id cros_ec_hwmon_id[] = {
static struct platform_driver cros_ec_hwmon_driver = {
.driver.name = DRV_NAME,
.probe = cros_ec_hwmon_probe,
+ .suspend = pm_ptr(cros_ec_hwmon_suspend),
+ .resume = pm_ptr(cros_ec_hwmon_resume),
.id_table = cros_ec_hwmon_id,
};
module_platform_driver(cros_ec_hwmon_driver);
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index 1e2c8e284001..cbe1a74a3dee 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -24,6 +24,7 @@
#include <linux/init.h>
#include <linux/kconfig.h>
#include <linux/kernel.h>
+#include <linux/minmax.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
@@ -446,7 +447,6 @@ static int i8k_set_fan(const struct dell_smm_data *data, u8 fan, int speed)
if (disallow_fan_support)
return -EINVAL;
- speed = (speed < 0) ? 0 : ((speed > data->i8k_fan_max) ? data->i8k_fan_max : speed);
regs.ebx = fan | (speed << 8);
return dell_smm_call(data->ops, &regs);
@@ -637,6 +637,8 @@ static long i8k_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
if (copy_from_user(&speed, argp + 1, sizeof(int)))
return -EFAULT;
+ speed = clamp_val(speed, 0, data->i8k_fan_max);
+
mutex_lock(&data->i8k_mutex);
err = i8k_set_fan(data, val, speed);
if (err < 0)
@@ -762,6 +764,13 @@ static int dell_smm_get_cur_state(struct thermal_cooling_device *dev, unsigned l
if (ret < 0)
return ret;
+ /*
+ * A fan state bigger than i8k_fan_max might indicate that
+ * the fan is currently in automatic mode.
+ */
+ if (ret > cdata->data->i8k_fan_max)
+ return -ENODATA;
+
*state = ret;
return 0;
@@ -849,7 +858,14 @@ static umode_t dell_smm_is_visible(const void *drvdata, enum hwmon_sensor_types
break;
case hwmon_pwm_enable:
- if (auto_fan)
+ if (auto_fan) {
+ /*
+ * The setting affects all fans, so only create a
+ * single attribute.
+ */
+ if (channel != 1)
+ return 0;
+
/*
* There is no command for retrieve the current status
* from BIOS, and userspace/firmware itself can change
@@ -857,6 +873,10 @@ static umode_t dell_smm_is_visible(const void *drvdata, enum hwmon_sensor_types
* Thus we can only provide write-only access for now.
*/
return 0200;
+ }
+
+ if (data->fan[channel] && data->i8k_fan_max < I8K_FAN_AUTO)
+ return 0644;
break;
default:
@@ -926,15 +946,29 @@ static int dell_smm_read(struct device *dev, enum hwmon_sensor_types type, u32 a
}
break;
case hwmon_pwm:
+ ret = i8k_get_fan_status(data, channel);
+ if (ret < 0)
+ return ret;
+
switch (attr) {
case hwmon_pwm_input:
- ret = i8k_get_fan_status(data, channel);
- if (ret < 0)
- return ret;
+ /*
+ * A fan state bigger than i8k_fan_max might indicate that
+ * the fan is currently in automatic mode.
+ */
+ if (ret > data->i8k_fan_max)
+ return -ENODATA;
*val = clamp_val(ret * data->i8k_pwm_mult, 0, 255);
return 0;
+ case hwmon_pwm_enable:
+ if (ret == I8K_FAN_AUTO)
+ *val = 2;
+ else
+ *val = 1;
+
+ return 0;
default:
break;
}
@@ -1020,16 +1054,32 @@ static int dell_smm_write(struct device *dev, enum hwmon_sensor_types type, u32
return 0;
case hwmon_pwm_enable:
- if (!val)
- return -EINVAL;
-
- if (val == 1)
+ switch (val) {
+ case 1:
enable = false;
- else
+ break;
+ case 2:
enable = true;
+ break;
+ default:
+ return -EINVAL;
+ }
mutex_lock(&data->i8k_mutex);
- err = i8k_enable_fan_auto_mode(data, enable);
+ if (auto_fan) {
+ err = i8k_enable_fan_auto_mode(data, enable);
+ } else {
+ /*
+ * When putting the fan into manual control mode we have to ensure
+ * that the device does not overheat until the userspace fan control
+ * software takes over. Because of this we set the fan speed to
+ * i8k_fan_max when disabling automatic fan control.
+ */
+ if (enable)
+ err = i8k_set_fan(data, channel, I8K_FAN_AUTO);
+ else
+ err = i8k_set_fan(data, channel, data->i8k_fan_max);
+ }
mutex_unlock(&data->i8k_mutex);
if (err < 0)
@@ -1080,9 +1130,9 @@ static const struct hwmon_channel_info * const dell_smm_info[] = {
),
HWMON_CHANNEL_INFO(pwm,
HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
- HWMON_PWM_INPUT,
- HWMON_PWM_INPUT,
- HWMON_PWM_INPUT
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE
),
NULL
};
@@ -1281,6 +1331,13 @@ static const struct dmi_system_id i8k_dmi_table[] __initconst = {
},
},
{
+ .ident = "Dell OptiPlex 7040",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "OptiPlex 7040"),
+ },
+ },
+ {
.ident = "Dell Precision",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
@@ -1331,7 +1388,6 @@ struct i8k_config_data {
enum i8k_configs {
DELL_LATITUDE_D520,
- DELL_PRECISION_490,
DELL_STUDIO,
DELL_XPS,
};
@@ -1341,10 +1397,6 @@ static const struct i8k_config_data i8k_config_data[] __initconst = {
.fan_mult = 1,
.fan_max = I8K_FAN_TURBO,
},
- [DELL_PRECISION_490] = {
- .fan_mult = 1,
- .fan_max = I8K_FAN_TURBO,
- },
[DELL_STUDIO] = {
.fan_mult = 1,
.fan_max = I8K_FAN_HIGH,
@@ -1365,15 +1417,6 @@ static const struct dmi_system_id i8k_config_dmi_table[] __initconst = {
.driver_data = (void *)&i8k_config_data[DELL_LATITUDE_D520],
},
{
- .ident = "Dell Precision 490",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME,
- "Precision WorkStation 490"),
- },
- .driver_data = (void *)&i8k_config_data[DELL_PRECISION_490],
- },
- {
.ident = "Dell Studio",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
diff --git a/drivers/hwmon/gpd-fan.c b/drivers/hwmon/gpd-fan.c
new file mode 100644
index 000000000000..644dc3ca9df7
--- /dev/null
+++ b/drivers/hwmon/gpd-fan.c
@@ -0,0 +1,715 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/* Platform driver for GPD devices that expose fan control via hwmon sysfs.
+ *
+ * Fan control is provided via pwm interface in the range [0-255].
+ * Each model has a different range in the EC, the written value is scaled to
+ * accommodate for that.
+ *
+ * Based on this repo:
+ * https://github.com/Cryolitia/gpd-fan-driver
+ *
+ * Copyright (c) 2024 Cryolitia PukNgae
+ */
+
+#include <linux/acpi.h>
+#include <linux/dmi.h>
+#include <linux/hwmon.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#define DRIVER_NAME "gpdfan"
+#define GPD_PWM_CTR_OFFSET 0x1841
+
+static char *gpd_fan_board = "";
+module_param(gpd_fan_board, charp, 0444);
+
+// EC read/write locker, protecting a sequence of EC operations
+static DEFINE_MUTEX(gpd_fan_sequence_lock);
+
+enum gpd_board {
+ win_mini,
+ win4_6800u,
+ win_max_2,
+ duo,
+};
+
+enum FAN_PWM_ENABLE {
+ DISABLE = 0,
+ MANUAL = 1,
+ AUTOMATIC = 2,
+};
+
+static struct {
+ enum FAN_PWM_ENABLE pwm_enable;
+ u8 pwm_value;
+
+ const struct gpd_fan_drvdata *drvdata;
+} gpd_driver_priv;
+
+struct gpd_fan_drvdata {
+ const char *board_name; // Board name for module param comparison
+ const enum gpd_board board;
+
+ const u8 addr_port;
+ const u8 data_port;
+ const u16 manual_control_enable;
+ const u16 rpm_read;
+ const u16 pwm_write;
+ const u16 pwm_max;
+};
+
+static struct gpd_fan_drvdata gpd_win_mini_drvdata = {
+ .board_name = "win_mini",
+ .board = win_mini,
+
+ .addr_port = 0x4E,
+ .data_port = 0x4F,
+ .manual_control_enable = 0x047A,
+ .rpm_read = 0x0478,
+ .pwm_write = 0x047A,
+ .pwm_max = 244,
+};
+
+static struct gpd_fan_drvdata gpd_duo_drvdata = {
+ .board_name = "duo",
+ .board = duo,
+
+ .addr_port = 0x4E,
+ .data_port = 0x4F,
+ .manual_control_enable = 0x047A,
+ .rpm_read = 0x0478,
+ .pwm_write = 0x047A,
+ .pwm_max = 244,
+};
+
+static struct gpd_fan_drvdata gpd_win4_drvdata = {
+ .board_name = "win4",
+ .board = win4_6800u,
+
+ .addr_port = 0x2E,
+ .data_port = 0x2F,
+ .manual_control_enable = 0xC311,
+ .rpm_read = 0xC880,
+ .pwm_write = 0xC311,
+ .pwm_max = 127,
+};
+
+static struct gpd_fan_drvdata gpd_wm2_drvdata = {
+ .board_name = "wm2",
+ .board = win_max_2,
+
+ .addr_port = 0x4E,
+ .data_port = 0x4F,
+ .manual_control_enable = 0x0275,
+ .rpm_read = 0x0218,
+ .pwm_write = 0x1809,
+ .pwm_max = 184,
+};
+
+static const struct dmi_system_id dmi_table[] = {
+ {
+ // GPD Win Mini
+ // GPD Win Mini with AMD Ryzen 8840U
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GPD"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "G1617-01")
+ },
+ .driver_data = &gpd_win_mini_drvdata,
+ },
+ {
+ // GPD Win Mini
+ // GPD Win Mini with AMD Ryzen HX370
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GPD"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "G1617-02")
+ },
+ .driver_data = &gpd_win_mini_drvdata,
+ },
+ {
+ // GPD Win Mini
+ // GPD Win Mini with AMD Ryzen HX370
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GPD"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "G1617-02-L")
+ },
+ .driver_data = &gpd_win_mini_drvdata,
+ },
+ {
+ // GPD Win 4 with AMD Ryzen 6800U
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GPD"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "G1618-04"),
+ DMI_MATCH(DMI_BOARD_VERSION, "Default string"),
+ },
+ .driver_data = &gpd_win4_drvdata,
+ },
+ {
+ // GPD Win 4 with Ryzen 7840U
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GPD"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "G1618-04"),
+ DMI_MATCH(DMI_BOARD_VERSION, "Ver. 1.0"),
+ },
+ // Since 7840U, win4 uses the same drvdata as wm2
+ .driver_data = &gpd_wm2_drvdata,
+ },
+ {
+ // GPD Win 4 with Ryzen 7840U (another)
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GPD"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "G1618-04"),
+ DMI_MATCH(DMI_BOARD_VERSION, "Ver.1.0"),
+ },
+ .driver_data = &gpd_wm2_drvdata,
+ },
+ {
+ // GPD Win Max 2 with Ryzen 6800U
+ // GPD Win Max 2 2023 with Ryzen 7840U
+ // GPD Win Max 2 2024 with Ryzen 8840U
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GPD"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "G1619-04"),
+ },
+ .driver_data = &gpd_wm2_drvdata,
+ },
+ {
+ // GPD Win Max 2 with AMD Ryzen HX370
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GPD"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "G1619-05"),
+ },
+ .driver_data = &gpd_wm2_drvdata,
+ },
+ {
+ // GPD Duo
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GPD"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "G1622-01"),
+ },
+ .driver_data = &gpd_duo_drvdata,
+ },
+ {
+ // GPD Duo (another)
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GPD"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "G1622-01-L"),
+ },
+ .driver_data = &gpd_duo_drvdata,
+ },
+ {
+ // GPD Pocket 4
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GPD"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "G1628-04"),
+ },
+ .driver_data = &gpd_win_mini_drvdata,
+ },
+ {
+ // GPD Pocket 4 (another)
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GPD"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "G1628-04-L"),
+ },
+ .driver_data = &gpd_win_mini_drvdata,
+ },
+ {}
+};
+
+static const struct gpd_fan_drvdata *gpd_module_drvdata[] = {
+ &gpd_win_mini_drvdata, &gpd_win4_drvdata, &gpd_wm2_drvdata, NULL
+};
+
+// Helper functions to handle EC read/write
+static void gpd_ecram_read(u16 offset, u8 *val)
+{
+ u16 addr_port = gpd_driver_priv.drvdata->addr_port;
+ u16 data_port = gpd_driver_priv.drvdata->data_port;
+
+ outb(0x2E, addr_port);
+ outb(0x11, data_port);
+ outb(0x2F, addr_port);
+ outb((u8)((offset >> 8) & 0xFF), data_port);
+
+ outb(0x2E, addr_port);
+ outb(0x10, data_port);
+ outb(0x2F, addr_port);
+ outb((u8)(offset & 0xFF), data_port);
+
+ outb(0x2E, addr_port);
+ outb(0x12, data_port);
+ outb(0x2F, addr_port);
+ *val = inb(data_port);
+}
+
+static void gpd_ecram_write(u16 offset, u8 value)
+{
+ u16 addr_port = gpd_driver_priv.drvdata->addr_port;
+ u16 data_port = gpd_driver_priv.drvdata->data_port;
+
+ outb(0x2E, addr_port);
+ outb(0x11, data_port);
+ outb(0x2F, addr_port);
+ outb((u8)((offset >> 8) & 0xFF), data_port);
+
+ outb(0x2E, addr_port);
+ outb(0x10, data_port);
+ outb(0x2F, addr_port);
+ outb((u8)(offset & 0xFF), data_port);
+
+ outb(0x2E, addr_port);
+ outb(0x12, data_port);
+ outb(0x2F, addr_port);
+ outb(value, data_port);
+}
+
+static int gpd_generic_read_rpm(void)
+{
+ const struct gpd_fan_drvdata *const drvdata = gpd_driver_priv.drvdata;
+ u8 high, low;
+
+ gpd_ecram_read(drvdata->rpm_read, &high);
+ gpd_ecram_read(drvdata->rpm_read + 1, &low);
+
+ return (u16)high << 8 | low;
+}
+
+static void gpd_win4_init_ec(void)
+{
+ u8 chip_id, chip_ver;
+
+ gpd_ecram_read(0x2000, &chip_id);
+
+ if (chip_id == 0x55) {
+ gpd_ecram_read(0x1060, &chip_ver);
+ gpd_ecram_write(0x1060, chip_ver | 0x80);
+ }
+}
+
+static int gpd_win4_read_rpm(void)
+{
+ int ret;
+
+ ret = gpd_generic_read_rpm();
+
+ if (ret == 0)
+ // Re-init EC when speed is 0
+ gpd_win4_init_ec();
+
+ return ret;
+}
+
+static int gpd_wm2_read_rpm(void)
+{
+ for (u16 pwm_ctr_offset = GPD_PWM_CTR_OFFSET;
+ pwm_ctr_offset <= GPD_PWM_CTR_OFFSET + 2; pwm_ctr_offset++) {
+ u8 PWMCTR;
+
+ gpd_ecram_read(pwm_ctr_offset, &PWMCTR);
+
+ if (PWMCTR != 0xB8)
+ gpd_ecram_write(pwm_ctr_offset, 0xB8);
+ }
+
+ return gpd_generic_read_rpm();
+}
+
+// Read value for fan1_input
+static int gpd_read_rpm(void)
+{
+ switch (gpd_driver_priv.drvdata->board) {
+ case win_mini:
+ case duo:
+ return gpd_generic_read_rpm();
+ case win4_6800u:
+ return gpd_win4_read_rpm();
+ case win_max_2:
+ return gpd_wm2_read_rpm();
+ }
+
+ return 0;
+}
+
+static int gpd_wm2_read_pwm(void)
+{
+ const struct gpd_fan_drvdata *const drvdata = gpd_driver_priv.drvdata;
+ u8 var;
+
+ gpd_ecram_read(drvdata->pwm_write, &var);
+
+ // Match gpd_generic_write_pwm(u8) below
+ return DIV_ROUND_CLOSEST((var - 1) * 255, (drvdata->pwm_max - 1));
+}
+
+// Read value for pwm1
+static int gpd_read_pwm(void)
+{
+ switch (gpd_driver_priv.drvdata->board) {
+ case win_mini:
+ case duo:
+ case win4_6800u:
+ switch (gpd_driver_priv.pwm_enable) {
+ case DISABLE:
+ return 255;
+ case MANUAL:
+ return gpd_driver_priv.pwm_value;
+ case AUTOMATIC:
+ return -EOPNOTSUPP;
+ }
+ break;
+ case win_max_2:
+ return gpd_wm2_read_pwm();
+ }
+ return 0;
+}
+
+// PWM value's range in EC is 1 - pwm_max, cast 0 - 255 to it.
+static inline u8 gpd_cast_pwm_range(u8 val)
+{
+ const struct gpd_fan_drvdata *const drvdata = gpd_driver_priv.drvdata;
+
+ return DIV_ROUND_CLOSEST(val * (drvdata->pwm_max - 1), 255) + 1;
+}
+
+static void gpd_generic_write_pwm(u8 val)
+{
+ const struct gpd_fan_drvdata *const drvdata = gpd_driver_priv.drvdata;
+ u8 pwm_reg;
+
+ pwm_reg = gpd_cast_pwm_range(val);
+ gpd_ecram_write(drvdata->pwm_write, pwm_reg);
+}
+
+static void gpd_duo_write_pwm(u8 val)
+{
+ const struct gpd_fan_drvdata *const drvdata = gpd_driver_priv.drvdata;
+ u8 pwm_reg;
+
+ pwm_reg = gpd_cast_pwm_range(val);
+ gpd_ecram_write(drvdata->pwm_write, pwm_reg);
+ gpd_ecram_write(drvdata->pwm_write + 1, pwm_reg);
+}
+
+// Write value for pwm1
+static int gpd_write_pwm(u8 val)
+{
+ if (gpd_driver_priv.pwm_enable != MANUAL)
+ return -EPERM;
+
+ switch (gpd_driver_priv.drvdata->board) {
+ case duo:
+ gpd_duo_write_pwm(val);
+ break;
+ case win_mini:
+ case win4_6800u:
+ case win_max_2:
+ gpd_generic_write_pwm(val);
+ break;
+ }
+
+ return 0;
+}
+
+static void gpd_win_mini_set_pwm_enable(enum FAN_PWM_ENABLE pwm_enable)
+{
+ switch (pwm_enable) {
+ case DISABLE:
+ gpd_generic_write_pwm(255);
+ break;
+ case MANUAL:
+ gpd_generic_write_pwm(gpd_driver_priv.pwm_value);
+ break;
+ case AUTOMATIC:
+ gpd_ecram_write(gpd_driver_priv.drvdata->pwm_write, 0);
+ break;
+ }
+}
+
+static void gpd_duo_set_pwm_enable(enum FAN_PWM_ENABLE pwm_enable)
+{
+ switch (pwm_enable) {
+ case DISABLE:
+ gpd_duo_write_pwm(255);
+ break;
+ case MANUAL:
+ gpd_duo_write_pwm(gpd_driver_priv.pwm_value);
+ break;
+ case AUTOMATIC:
+ gpd_ecram_write(gpd_driver_priv.drvdata->pwm_write, 0);
+ break;
+ }
+}
+
+static void gpd_wm2_set_pwm_enable(enum FAN_PWM_ENABLE enable)
+{
+ const struct gpd_fan_drvdata *const drvdata = gpd_driver_priv.drvdata;
+
+ switch (enable) {
+ case DISABLE:
+ gpd_generic_write_pwm(255);
+ gpd_ecram_write(drvdata->manual_control_enable, 1);
+ break;
+ case MANUAL:
+ gpd_generic_write_pwm(gpd_driver_priv.pwm_value);
+ gpd_ecram_write(drvdata->manual_control_enable, 1);
+ break;
+ case AUTOMATIC:
+ gpd_ecram_write(drvdata->manual_control_enable, 0);
+ break;
+ }
+}
+
+// Write value for pwm1_enable
+static void gpd_set_pwm_enable(enum FAN_PWM_ENABLE enable)
+{
+ if (enable == MANUAL)
+ // Set pwm_value to max firstly when switching to manual mode, in
+ // consideration of device safety.
+ gpd_driver_priv.pwm_value = 255;
+
+ switch (gpd_driver_priv.drvdata->board) {
+ case win_mini:
+ case win4_6800u:
+ gpd_win_mini_set_pwm_enable(enable);
+ break;
+ case duo:
+ gpd_duo_set_pwm_enable(enable);
+ break;
+ case win_max_2:
+ gpd_wm2_set_pwm_enable(enable);
+ break;
+ }
+}
+
+static umode_t gpd_fan_hwmon_is_visible(__always_unused const void *drvdata,
+ enum hwmon_sensor_types type, u32 attr,
+ __always_unused int channel)
+{
+ if (type == hwmon_fan && attr == hwmon_fan_input) {
+ return 0444;
+ } else if (type == hwmon_pwm) {
+ switch (attr) {
+ case hwmon_pwm_enable:
+ case hwmon_pwm_input:
+ return 0644;
+ default:
+ return 0;
+ }
+ }
+ return 0;
+}
+
+static int gpd_fan_hwmon_read(__always_unused struct device *dev,
+ enum hwmon_sensor_types type, u32 attr,
+ __always_unused int channel, long *val)
+{
+ int ret;
+
+ ret = mutex_lock_interruptible(&gpd_fan_sequence_lock);
+ if (ret)
+ return ret;
+
+ if (type == hwmon_fan) {
+ if (attr == hwmon_fan_input) {
+ ret = gpd_read_rpm();
+
+ if (ret < 0)
+ goto OUT;
+
+ *val = ret;
+ ret = 0;
+ goto OUT;
+ }
+ } else if (type == hwmon_pwm) {
+ switch (attr) {
+ case hwmon_pwm_enable:
+ *val = gpd_driver_priv.pwm_enable;
+ ret = 0;
+ goto OUT;
+ case hwmon_pwm_input:
+ ret = gpd_read_pwm();
+
+ if (ret < 0)
+ goto OUT;
+
+ *val = ret;
+ ret = 0;
+ goto OUT;
+ }
+ }
+
+ ret = -EOPNOTSUPP;
+
+OUT:
+ mutex_unlock(&gpd_fan_sequence_lock);
+ return ret;
+}
+
+static int gpd_fan_hwmon_write(__always_unused struct device *dev,
+ enum hwmon_sensor_types type, u32 attr,
+ __always_unused int channel, long val)
+{
+ int ret;
+
+ ret = mutex_lock_interruptible(&gpd_fan_sequence_lock);
+ if (ret)
+ return ret;
+
+ if (type == hwmon_pwm) {
+ switch (attr) {
+ case hwmon_pwm_enable:
+ if (!in_range(val, 0, 3)) {
+ ret = -EINVAL;
+ goto OUT;
+ }
+
+ gpd_driver_priv.pwm_enable = val;
+
+ gpd_set_pwm_enable(gpd_driver_priv.pwm_enable);
+ ret = 0;
+ goto OUT;
+ case hwmon_pwm_input:
+ if (!in_range(val, 0, 256)) {
+ ret = -ERANGE;
+ goto OUT;
+ }
+
+ gpd_driver_priv.pwm_value = val;
+
+ ret = gpd_write_pwm(val);
+ goto OUT;
+ }
+ }
+
+ ret = -EOPNOTSUPP;
+
+OUT:
+ mutex_unlock(&gpd_fan_sequence_lock);
+ return ret;
+}
+
+static const struct hwmon_ops gpd_fan_ops = {
+ .is_visible = gpd_fan_hwmon_is_visible,
+ .read = gpd_fan_hwmon_read,
+ .write = gpd_fan_hwmon_write,
+};
+
+static const struct hwmon_channel_info *gpd_fan_hwmon_channel_info[] = {
+ HWMON_CHANNEL_INFO(fan, HWMON_F_INPUT),
+ HWMON_CHANNEL_INFO(pwm, HWMON_PWM_INPUT | HWMON_PWM_ENABLE),
+ NULL
+};
+
+static struct hwmon_chip_info gpd_fan_chip_info = {
+ .ops = &gpd_fan_ops,
+ .info = gpd_fan_hwmon_channel_info
+};
+
+static int gpd_fan_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct resource *region;
+ const struct resource *res;
+ const struct device *hwdev;
+
+ res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (IS_ERR(res))
+ return dev_err_probe(dev, PTR_ERR(res),
+ "Failed to get platform resource\n");
+
+ region = devm_request_region(dev, res->start,
+ resource_size(res), DRIVER_NAME);
+ if (IS_ERR(region))
+ return dev_err_probe(dev, PTR_ERR(region),
+ "Failed to request region\n");
+
+ hwdev = devm_hwmon_device_register_with_info(dev,
+ DRIVER_NAME,
+ NULL,
+ &gpd_fan_chip_info,
+ NULL);
+ if (IS_ERR(hwdev))
+ return dev_err_probe(dev, PTR_ERR(region),
+ "Failed to register hwmon device\n");
+
+ return 0;
+}
+
+static void gpd_fan_remove(__always_unused struct platform_device *pdev)
+{
+ gpd_driver_priv.pwm_enable = AUTOMATIC;
+ gpd_set_pwm_enable(AUTOMATIC);
+}
+
+static struct platform_driver gpd_fan_driver = {
+ .probe = gpd_fan_probe,
+ .remove = gpd_fan_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ },
+};
+
+static struct platform_device *gpd_fan_platform_device;
+
+static int __init gpd_fan_init(void)
+{
+ const struct gpd_fan_drvdata *match = NULL;
+
+ for (const struct gpd_fan_drvdata **p = gpd_module_drvdata; *p; p++) {
+ if (strcmp(gpd_fan_board, (*p)->board_name) == 0) {
+ match = *p;
+ break;
+ }
+ }
+
+ if (!match) {
+ const struct dmi_system_id *dmi_match =
+ dmi_first_match(dmi_table);
+ if (dmi_match)
+ match = dmi_match->driver_data;
+ }
+
+ if (!match)
+ return -ENODEV;
+
+ gpd_driver_priv.pwm_enable = AUTOMATIC;
+ gpd_driver_priv.pwm_value = 255;
+ gpd_driver_priv.drvdata = match;
+
+ struct resource gpd_fan_resources[] = {
+ {
+ .start = match->addr_port,
+ .end = match->data_port,
+ .flags = IORESOURCE_IO,
+ },
+ };
+
+ gpd_fan_platform_device = platform_create_bundle(&gpd_fan_driver,
+ gpd_fan_probe,
+ gpd_fan_resources,
+ 1, NULL, 0);
+
+ if (IS_ERR(gpd_fan_platform_device)) {
+ pr_warn("Failed to create platform device\n");
+ return PTR_ERR(gpd_fan_platform_device);
+ }
+
+ return 0;
+}
+
+static void __exit gpd_fan_exit(void)
+{
+ platform_device_unregister(gpd_fan_platform_device);
+ platform_driver_unregister(&gpd_fan_driver);
+}
+
+MODULE_DEVICE_TABLE(dmi, dmi_table);
+
+module_init(gpd_fan_init);
+module_exit(gpd_fan_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Cryolitia PukNgae <cryolitia@uniontech.com>");
+MODULE_DESCRIPTION("GPD Devices fan control driver");
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index 1688c210888a..0b4bdcd33c7b 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -19,6 +19,7 @@
#include <linux/kstrtox.h>
#include <linux/list.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/property.h>
#include <linux/slab.h>
@@ -36,6 +37,7 @@ struct hwmon_device {
const char *label;
struct device dev;
const struct hwmon_chip_info *chip;
+ struct mutex lock;
struct list_head tzdata;
struct attribute_group group;
const struct attribute_group **groups;
@@ -165,6 +167,8 @@ static int hwmon_thermal_get_temp(struct thermal_zone_device *tz, int *temp)
int ret;
long t;
+ guard(mutex)(&hwdev->lock);
+
ret = hwdev->chip->ops->read(tdata->dev, hwmon_temp, hwmon_temp_input,
tdata->index, &t);
if (ret < 0)
@@ -193,6 +197,8 @@ static int hwmon_thermal_set_trips(struct thermal_zone_device *tz, int low, int
if (!info[i])
return 0;
+ guard(mutex)(&hwdev->lock);
+
if (info[i]->config[tdata->index] & HWMON_T_MIN) {
err = chip->ops->write(tdata->dev, hwmon_temp,
hwmon_temp_min, tdata->index, low);
@@ -330,8 +336,6 @@ static int hwmon_attr_base(enum hwmon_sensor_types type)
* attached to an i2c client device.
*/
-static DEFINE_MUTEX(hwmon_pec_mutex);
-
static int hwmon_match_device(struct device *dev, const void *data)
{
return dev->class == &hwmon_class;
@@ -362,17 +366,16 @@ static ssize_t pec_store(struct device *dev, struct device_attribute *devattr,
if (!hdev)
return -ENODEV;
- mutex_lock(&hwmon_pec_mutex);
-
/*
* If there is no write function, we assume that chip specific
* handling is not required.
*/
hwdev = to_hwmon_device(hdev);
+ guard(mutex)(&hwdev->lock);
if (hwdev->chip->ops->write) {
err = hwdev->chip->ops->write(hdev, hwmon_chip, hwmon_chip_pec, 0, val);
if (err && err != -EOPNOTSUPP)
- goto unlock;
+ goto put;
}
if (!val)
@@ -381,8 +384,7 @@ static ssize_t pec_store(struct device *dev, struct device_attribute *devattr,
client->flags |= I2C_CLIENT_PEC;
err = count;
-unlock:
- mutex_unlock(&hwmon_pec_mutex);
+put:
put_device(hdev);
return err;
@@ -426,18 +428,25 @@ static ssize_t hwmon_attr_show(struct device *dev,
struct device_attribute *devattr, char *buf)
{
struct hwmon_device_attribute *hattr = to_hwmon_attr(devattr);
+ struct hwmon_device *hwdev = to_hwmon_device(dev);
+ s64 val64;
long val;
int ret;
+ guard(mutex)(&hwdev->lock);
+
ret = hattr->ops->read(dev, hattr->type, hattr->attr, hattr->index,
- &val);
+ (hattr->type == hwmon_energy64) ? (long *)&val64 : &val);
if (ret < 0)
return ret;
+ if (hattr->type != hwmon_energy64)
+ val64 = val;
+
trace_hwmon_attr_show(hattr->index + hwmon_attr_base(hattr->type),
- hattr->name, val);
+ hattr->name, val64);
- return sprintf(buf, "%ld\n", val);
+ return sprintf(buf, "%lld\n", val64);
}
static ssize_t hwmon_attr_show_string(struct device *dev,
@@ -445,10 +454,13 @@ static ssize_t hwmon_attr_show_string(struct device *dev,
char *buf)
{
struct hwmon_device_attribute *hattr = to_hwmon_attr(devattr);
+ struct hwmon_device *hwdev = to_hwmon_device(dev);
enum hwmon_sensor_types type = hattr->type;
const char *s;
int ret;
+ guard(mutex)(&hwdev->lock);
+
ret = hattr->ops->read_string(dev, hattr->type, hattr->attr,
hattr->index, &s);
if (ret < 0)
@@ -465,6 +477,7 @@ static ssize_t hwmon_attr_store(struct device *dev,
const char *buf, size_t count)
{
struct hwmon_device_attribute *hattr = to_hwmon_attr(devattr);
+ struct hwmon_device *hwdev = to_hwmon_device(dev);
long val;
int ret;
@@ -472,13 +485,15 @@ static ssize_t hwmon_attr_store(struct device *dev,
if (ret < 0)
return ret;
+ guard(mutex)(&hwdev->lock);
+
ret = hattr->ops->write(dev, hattr->type, hattr->attr, hattr->index,
val);
if (ret < 0)
return ret;
trace_hwmon_attr_store(hattr->index + hwmon_attr_base(hattr->type),
- hattr->name, val);
+ hattr->name, (s64)val);
return count;
}
@@ -734,6 +749,7 @@ static const char * const *__templates[] = {
[hwmon_curr] = hwmon_curr_attr_templates,
[hwmon_power] = hwmon_power_attr_templates,
[hwmon_energy] = hwmon_energy_attr_templates,
+ [hwmon_energy64] = hwmon_energy_attr_templates,
[hwmon_humidity] = hwmon_humidity_attr_templates,
[hwmon_fan] = hwmon_fan_attr_templates,
[hwmon_pwm] = hwmon_pwm_attr_templates,
@@ -747,6 +763,7 @@ static const int __templates_size[] = {
[hwmon_curr] = ARRAY_SIZE(hwmon_curr_attr_templates),
[hwmon_power] = ARRAY_SIZE(hwmon_power_attr_templates),
[hwmon_energy] = ARRAY_SIZE(hwmon_energy_attr_templates),
+ [hwmon_energy64] = ARRAY_SIZE(hwmon_energy_attr_templates),
[hwmon_humidity] = ARRAY_SIZE(hwmon_humidity_attr_templates),
[hwmon_fan] = ARRAY_SIZE(hwmon_fan_attr_templates),
[hwmon_pwm] = ARRAY_SIZE(hwmon_pwm_attr_templates),
@@ -785,6 +802,22 @@ int hwmon_notify_event(struct device *dev, enum hwmon_sensor_types type,
}
EXPORT_SYMBOL_GPL(hwmon_notify_event);
+void hwmon_lock(struct device *dev)
+{
+ struct hwmon_device *hwdev = to_hwmon_device(dev);
+
+ mutex_lock(&hwdev->lock);
+}
+EXPORT_SYMBOL_GPL(hwmon_lock);
+
+void hwmon_unlock(struct device *dev)
+{
+ struct hwmon_device *hwdev = to_hwmon_device(dev);
+
+ mutex_unlock(&hwdev->lock);
+}
+EXPORT_SYMBOL_GPL(hwmon_unlock);
+
static int hwmon_num_channel_attrs(const struct hwmon_channel_info *info)
{
int i, n;
@@ -945,6 +978,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
tdev = tdev->parent;
hdev->of_node = tdev ? tdev->of_node : NULL;
hwdev->chip = chip;
+ mutex_init(&hwdev->lock);
dev_set_drvdata(hdev, drvdata);
dev_set_name(hdev, HWMON_ID_FORMAT, id);
err = device_register(hdev);
diff --git a/drivers/hwmon/ina238.c b/drivers/hwmon/ina238.c
index 5a394eeff676..356d19b7675c 100644
--- a/drivers/hwmon/ina238.c
+++ b/drivers/hwmon/ina238.c
@@ -16,8 +16,6 @@
#include <linux/of.h>
#include <linux/regmap.h>
-#include <linux/platform_data/ina2xx.h>
-
/* INA238 register definitions */
#define INA238_CONFIG 0x0
#define INA238_ADC_CONFIG 0x1
@@ -53,7 +51,7 @@
#define INA238_REGISTERS 0x20
-#define INA238_RSHUNT_DEFAULT 10000 /* uOhm */
+#define INA238_RSHUNT_DEFAULT 2500 /* uOhm */
/* Default configuration of device on reset. */
#define INA238_CONFIG_DEFAULT 0
@@ -62,6 +60,7 @@
#define INA238_ADC_CONFIG_DEFAULT 0xfb6a
/* Configure alerts to be based on averaged value (SLOWALERT) */
#define INA238_DIAG_ALERT_DEFAULT 0x2000
+#define INA238_DIAG_ALERT_APOL BIT(12)
/*
* This driver uses a fixed calibration value in order to scale current/power
* based on a fixed shunt resistor value. This allows for conversion within the
@@ -69,46 +68,32 @@
* relative to the shunt resistor value within the driver. This is similar to
* how the ina2xx driver handles current/power scaling.
*
- * The end result of this is that increasing shunt values (from a fixed 20 mOhm
- * shunt) increase the effective current/power accuracy whilst limiting the
- * range and decreasing shunt values decrease the effective accuracy but
- * increase the range.
- *
- * The value of the Current register is calculated given the following:
- * Current (A) = (shunt voltage register * 5) * calibration / 81920
- *
- * The maximum shunt voltage is 163.835 mV (0x7fff, ADC_RANGE = 0, gain = 4).
- * With the maximum current value of 0x7fff and a fixed shunt value results in
- * a calibration value of 16384 (0x4000).
- *
- * 0x7fff = (0x7fff * 5) * calibration / 81920
- * calibration = 0x4000
- *
- * Equivalent calibration is applied for the Power register (maximum value for
- * bus voltage is 102396.875 mV, 0x7fff), where the maximum power that can
- * occur is ~16776192 uW (register value 0x147a8):
- *
- * This scaling means the resulting values for Current and Power registers need
- * to be scaled by the difference between the fixed shunt resistor and the
- * actual shunt resistor:
+ * To achieve the best possible dynamic range, the value of the shunt voltage
+ * register should match the value of the current register. With that, the shunt
+ * voltage of 0x7fff = 32,767 uV = 163,785 uV matches the maximum current,
+ * and no accuracy is lost. Experiments with a real chip show that this is
+ * achieved by setting the SHUNT_CAL register to a value of 0x1000 = 4,096.
+ * Per datasheet,
+ * SHUNT_CAL = 819.2 x 10^6 x CURRENT_LSB x Rshunt
+ * = 819,200,000 x CURRENT_LSB x Rshunt
+ * With SHUNT_CAL set to 4,096, we get
+ * CURRENT_LSB = 4,096 / (819,200,000 x Rshunt)
+ * Assuming an Rshunt value of 5 mOhm, we get
+ * CURRENT_LSB = 4,096 / (819,200,000 x 0.005) = 1mA
+ * and thus a dynamic range of 1mA ... 32,767mA, which is sufficient for most
+ * applications. The actual dynamic range is of course determined by the actual
+ * shunt resistor value.
*
- * shunt = 0x4000 / (819.2 * 10^6) / 0.001 = 20000 uOhms (with 1mA/lsb)
- *
- * Current (mA) = register value * 20000 / rshunt / 4 * gain
- * Power (mW) = 0.2 * register value * 20000 / rshunt / 4 * gain
- * (Specific for SQ52206)
- * Power (mW) = 0.24 * register value * 20000 / rshunt / 4 * gain
- * Energy (uJ) = 16 * 0.24 * register value * 20000 / rshunt / 4 * gain * 1000
+ * Power and energy values are scaled accordingly.
*/
-#define INA238_CALIBRATION_VALUE 16384
-#define INA238_FIXED_SHUNT 20000
+#define INA238_CALIBRATION_VALUE 4096
+#define INA238_FIXED_SHUNT 5000
+
+#define INA238_SHUNT_VOLTAGE_LSB 5000 /* 5 uV/lsb, in nV */
+#define INA238_BUS_VOLTAGE_LSB 3125000 /* 3.125 mV/lsb, in nV */
+#define SQ52206_BUS_VOLTAGE_LSB 3750000 /* 3.75 mV/lsb, in nV */
-#define INA238_SHUNT_VOLTAGE_LSB 5 /* 5 uV/lsb */
-#define INA238_BUS_VOLTAGE_LSB 3125 /* 3.125 mV/lsb */
-#define INA238_DIE_TEMP_LSB 1250000 /* 125.0000 mC/lsb */
-#define SQ52206_BUS_VOLTAGE_LSB 3750 /* 3.75 mV/lsb */
-#define SQ52206_DIE_TEMP_LSB 78125 /* 7.8125 mC/lsb */
-#define INA228_DIE_TEMP_LSB 78125 /* 7.8125 mC/lsb */
+#define NUNIT_PER_MUNIT 1000000 /* n[AV] -> m[AV] */
static const struct regmap_config ina238_regmap_config = {
.max_register = INA238_REGISTERS,
@@ -116,17 +101,17 @@ static const struct regmap_config ina238_regmap_config = {
.val_bits = 16,
};
-enum ina238_ids { ina238, ina237, sq52206, ina228 };
+enum ina238_ids { ina228, ina237, ina238, ina700, ina780, sq52206 };
struct ina238_config {
bool has_20bit_voltage_current; /* vshunt, vbus and current are 20-bit fields */
bool has_power_highest; /* chip detection power peak */
bool has_energy; /* chip detection energy */
- u8 temp_shift; /* fixed parameters for temp calculate */
- u32 power_calculate_factor; /* fixed parameters for power calculate */
+ u8 temp_resolution; /* temperature register resolution in bit */
u16 config_default; /* Power-on default state */
- int bus_voltage_lsb; /* use for temperature calculate, uV/lsb */
- int temp_lsb; /* use for temperature calculate */
+ u32 power_calculate_factor; /* fixed parameter for power calculation, from datasheet */
+ u32 bus_voltage_lsb; /* bus voltage LSB, in nV */
+ int current_lsb; /* current LSB, in uA */
};
struct ina238_data {
@@ -136,48 +121,68 @@ struct ina238_data {
struct regmap *regmap;
u32 rshunt;
int gain;
+ u32 voltage_lsb[2]; /* shunt, bus voltage LSB, in nV */
+ int current_lsb; /* current LSB, in uA */
+ int power_lsb; /* power LSB, in uW */
+ int energy_lsb; /* energy LSB, in uJ */
};
static const struct ina238_config ina238_config[] = {
- [ina238] = {
+ [ina228] = {
+ .has_20bit_voltage_current = true,
+ .has_energy = true,
+ .has_power_highest = false,
+ .power_calculate_factor = 20,
+ .config_default = INA238_CONFIG_DEFAULT,
+ .bus_voltage_lsb = INA238_BUS_VOLTAGE_LSB,
+ .temp_resolution = 16,
+ },
+ [ina237] = {
.has_20bit_voltage_current = false,
.has_energy = false,
.has_power_highest = false,
- .temp_shift = 4,
.power_calculate_factor = 20,
.config_default = INA238_CONFIG_DEFAULT,
.bus_voltage_lsb = INA238_BUS_VOLTAGE_LSB,
- .temp_lsb = INA238_DIE_TEMP_LSB,
+ .temp_resolution = 12,
},
- [ina237] = {
+ [ina238] = {
.has_20bit_voltage_current = false,
.has_energy = false,
.has_power_highest = false,
- .temp_shift = 4,
.power_calculate_factor = 20,
.config_default = INA238_CONFIG_DEFAULT,
.bus_voltage_lsb = INA238_BUS_VOLTAGE_LSB,
- .temp_lsb = INA238_DIE_TEMP_LSB,
+ .temp_resolution = 12,
},
- [sq52206] = {
+ [ina700] = {
.has_20bit_voltage_current = false,
.has_energy = true,
- .has_power_highest = true,
- .temp_shift = 0,
- .power_calculate_factor = 24,
- .config_default = SQ52206_CONFIG_DEFAULT,
- .bus_voltage_lsb = SQ52206_BUS_VOLTAGE_LSB,
- .temp_lsb = SQ52206_DIE_TEMP_LSB,
+ .has_power_highest = false,
+ .power_calculate_factor = 20,
+ .config_default = INA238_CONFIG_DEFAULT,
+ .bus_voltage_lsb = INA238_BUS_VOLTAGE_LSB,
+ .temp_resolution = 12,
+ .current_lsb = 480,
},
- [ina228] = {
- .has_20bit_voltage_current = true,
+ [ina780] = {
+ .has_20bit_voltage_current = false,
.has_energy = true,
.has_power_highest = false,
- .temp_shift = 0,
.power_calculate_factor = 20,
.config_default = INA238_CONFIG_DEFAULT,
.bus_voltage_lsb = INA238_BUS_VOLTAGE_LSB,
- .temp_lsb = INA228_DIE_TEMP_LSB,
+ .temp_resolution = 12,
+ .current_lsb = 2400,
+ },
+ [sq52206] = {
+ .has_20bit_voltage_current = false,
+ .has_energy = true,
+ .has_power_highest = true,
+ .power_calculate_factor = 24,
+ .config_default = SQ52206_CONFIG_DEFAULT,
+ .bus_voltage_lsb = SQ52206_BUS_VOLTAGE_LSB,
+ .temp_resolution = 16,
},
};
@@ -232,45 +237,28 @@ static int ina238_read_field_s20(const struct i2c_client *client, u8 reg, s32 *v
return 0;
}
-static int ina228_read_shunt_voltage(struct device *dev, u32 attr, int channel,
- long *val)
+static int ina228_read_voltage(struct ina238_data *data, int channel, long *val)
{
- struct ina238_data *data = dev_get_drvdata(dev);
- int regval;
- int err;
+ int reg = channel ? INA238_BUS_VOLTAGE : INA238_CURRENT;
+ u32 lsb = data->voltage_lsb[channel];
+ u32 factor = NUNIT_PER_MUNIT;
+ int err, regval;
- err = ina238_read_field_s20(data->client, INA238_SHUNT_VOLTAGE, &regval);
- if (err)
- return err;
-
- /*
- * gain of 1 -> LSB / 4
- * This field has 16 bit on ina238. ina228 adds another 4 bits of
- * precision. ina238 conversion factors can still be applied when
- * dividing by 16.
- */
- *val = (regval * INA238_SHUNT_VOLTAGE_LSB) * data->gain / (1000 * 4) / 16;
- return 0;
-}
-
-static int ina228_read_bus_voltage(struct device *dev, u32 attr, int channel,
- long *val)
-{
- struct ina238_data *data = dev_get_drvdata(dev);
- int regval;
- int err;
-
- err = ina238_read_field_s20(data->client, INA238_BUS_VOLTAGE, &regval);
- if (err)
- return err;
+ if (data->config->has_20bit_voltage_current) {
+ err = ina238_read_field_s20(data->client, reg, &regval);
+ if (err)
+ return err;
+ /* Adjust accuracy: LSB in units of 500 pV */
+ lsb /= 8;
+ factor *= 2;
+ } else {
+ err = regmap_read(data->regmap, reg, &regval);
+ if (err)
+ return err;
+ regval = (s16)regval;
+ }
- /*
- * gain of 1 -> LSB / 4
- * This field has 16 bit on ina238. ina228 adds another 4 bits of
- * precision. ina238 conversion factors can still be applied when
- * dividing by 16.
- */
- *val = (regval * data->config->bus_voltage_lsb) / 1000 / 16;
+ *val = DIV_S64_ROUND_CLOSEST((s64)regval * lsb, factor);
return 0;
}
@@ -278,18 +266,16 @@ static int ina238_read_in(struct device *dev, u32 attr, int channel,
long *val)
{
struct ina238_data *data = dev_get_drvdata(dev);
- int reg, mask;
+ int reg, mask = 0;
int regval;
int err;
+ if (attr == hwmon_in_input)
+ return ina228_read_voltage(data, channel, val);
+
switch (channel) {
case 0:
switch (attr) {
- case hwmon_in_input:
- if (data->config->has_20bit_voltage_current)
- return ina228_read_shunt_voltage(dev, attr, channel, val);
- reg = INA238_SHUNT_VOLTAGE;
- break;
case hwmon_in_max:
reg = INA238_SHUNT_OVER_VOLTAGE;
break;
@@ -310,11 +296,6 @@ static int ina238_read_in(struct device *dev, u32 attr, int channel,
break;
case 1:
switch (attr) {
- case hwmon_in_input:
- if (data->config->has_20bit_voltage_current)
- return ina228_read_bus_voltage(dev, attr, channel, val);
- reg = INA238_BUS_VOLTAGE;
- break;
case hwmon_in_max:
reg = INA238_BUS_OVER_VOLTAGE;
break;
@@ -341,112 +322,126 @@ static int ina238_read_in(struct device *dev, u32 attr, int channel,
if (err < 0)
return err;
- switch (attr) {
- case hwmon_in_input:
- case hwmon_in_max:
- case hwmon_in_min:
- /* signed register, value in mV */
- regval = (s16)regval;
- if (channel == 0)
- /* gain of 1 -> LSB / 4 */
- *val = (regval * INA238_SHUNT_VOLTAGE_LSB) *
- data->gain / (1000 * 4);
- else
- *val = (regval * data->config->bus_voltage_lsb) / 1000;
- break;
- case hwmon_in_max_alarm:
- case hwmon_in_min_alarm:
+ if (mask)
*val = !!(regval & mask);
- break;
- }
+ else
+ *val = DIV_S64_ROUND_CLOSEST((s64)(s16)regval * data->voltage_lsb[channel],
+ NUNIT_PER_MUNIT);
return 0;
}
-static int ina238_write_in(struct device *dev, u32 attr, int channel,
- long val)
+static int ina238_write_in(struct device *dev, u32 attr, int channel, long val)
{
struct ina238_data *data = dev_get_drvdata(dev);
+ static const int low_limits[2] = {-164, 0};
+ static const int high_limits[2] = {164, 150000};
+ static const u8 low_regs[2] = {INA238_SHUNT_UNDER_VOLTAGE, INA238_BUS_UNDER_VOLTAGE};
+ static const u8 high_regs[2] = {INA238_SHUNT_OVER_VOLTAGE, INA238_BUS_OVER_VOLTAGE};
int regval;
- if (attr != hwmon_in_max && attr != hwmon_in_min)
- return -EOPNOTSUPP;
-
- /* convert decimal to register value */
- switch (channel) {
- case 0:
- /* signed value, clamp to max range +/-163 mV */
- regval = clamp_val(val, -163, 163);
- regval = (regval * 1000 * 4) /
- (INA238_SHUNT_VOLTAGE_LSB * data->gain);
- regval = clamp_val(regval, S16_MIN, S16_MAX);
+ /* Initial clamp to avoid overflows */
+ val = clamp_val(val, low_limits[channel], high_limits[channel]);
+ val = DIV_S64_ROUND_CLOSEST((s64)val * NUNIT_PER_MUNIT, data->voltage_lsb[channel]);
+ /* Final clamp to register limits */
+ regval = clamp_val(val, S16_MIN, S16_MAX) & 0xffff;
- switch (attr) {
- case hwmon_in_max:
- return regmap_write(data->regmap,
- INA238_SHUNT_OVER_VOLTAGE, regval);
- case hwmon_in_min:
- return regmap_write(data->regmap,
- INA238_SHUNT_UNDER_VOLTAGE, regval);
- default:
- return -EOPNOTSUPP;
- }
- case 1:
- /* signed value, positive values only. Clamp to max 102.396 V */
- regval = clamp_val(val, 0, 102396);
- regval = (regval * 1000) / data->config->bus_voltage_lsb;
- regval = clamp_val(regval, 0, S16_MAX);
-
- switch (attr) {
- case hwmon_in_max:
- return regmap_write(data->regmap,
- INA238_BUS_OVER_VOLTAGE, regval);
- case hwmon_in_min:
- return regmap_write(data->regmap,
- INA238_BUS_UNDER_VOLTAGE, regval);
- default:
- return -EOPNOTSUPP;
- }
+ switch (attr) {
+ case hwmon_in_min:
+ return regmap_write(data->regmap, low_regs[channel], regval);
+ case hwmon_in_max:
+ return regmap_write(data->regmap, high_regs[channel], regval);
default:
return -EOPNOTSUPP;
}
}
-static int ina238_read_current(struct device *dev, u32 attr, long *val)
+static int __ina238_read_curr(struct ina238_data *data, long *val)
+{
+ u32 lsb = data->current_lsb;
+ int err, regval;
+
+ if (data->config->has_20bit_voltage_current) {
+ err = ina238_read_field_s20(data->client, INA238_CURRENT, &regval);
+ if (err)
+ return err;
+ lsb /= 16; /* Adjust accuracy */
+ } else {
+ err = regmap_read(data->regmap, INA238_CURRENT, &regval);
+ if (err)
+ return err;
+ regval = (s16)regval;
+ }
+
+ *val = DIV_S64_ROUND_CLOSEST((s64)regval * lsb, 1000);
+ return 0;
+}
+
+static int ina238_read_curr(struct device *dev, u32 attr, long *val)
{
struct ina238_data *data = dev_get_drvdata(dev);
+ int reg, mask = 0;
int regval;
int err;
- switch (attr) {
- case hwmon_curr_input:
- if (data->config->has_20bit_voltage_current) {
- err = ina238_read_field_s20(data->client, INA238_CURRENT, &regval);
- if (err)
- return err;
- } else {
- err = regmap_read(data->regmap, INA238_CURRENT, &regval);
- if (err < 0)
- return err;
- /* sign-extend */
- regval = (s16)regval;
- }
-
- /* Signed register, fixed 1mA current lsb. result in mA */
- *val = div_s64((s64)regval * INA238_FIXED_SHUNT * data->gain,
- data->rshunt * 4);
+ if (attr == hwmon_curr_input)
+ return __ina238_read_curr(data, val);
- /* Account for 4 bit offset */
- if (data->config->has_20bit_voltage_current)
- *val /= 16;
+ switch (attr) {
+ case hwmon_curr_min:
+ reg = INA238_SHUNT_UNDER_VOLTAGE;
+ break;
+ case hwmon_curr_min_alarm:
+ reg = INA238_DIAG_ALERT;
+ mask = INA238_DIAG_ALERT_SHNTUL;
+ break;
+ case hwmon_curr_max:
+ reg = INA238_SHUNT_OVER_VOLTAGE;
+ break;
+ case hwmon_curr_max_alarm:
+ reg = INA238_DIAG_ALERT;
+ mask = INA238_DIAG_ALERT_SHNTOL;
break;
default:
return -EOPNOTSUPP;
}
+ err = regmap_read(data->regmap, reg, &regval);
+ if (err < 0)
+ return err;
+
+ if (mask)
+ *val = !!(regval & mask);
+ else
+ *val = DIV_S64_ROUND_CLOSEST((s64)(s16)regval * data->current_lsb, 1000);
+
return 0;
}
+static int ina238_write_curr(struct device *dev, u32 attr, long val)
+{
+ struct ina238_data *data = dev_get_drvdata(dev);
+ int regval;
+
+ /* Set baseline range to avoid over/underflows */
+ val = clamp_val(val, -1000000, 1000000);
+ /* Scale */
+ val = DIV_ROUND_CLOSEST(val * 1000, data->current_lsb);
+ /* Clamp to register size */
+ regval = clamp_val(val, S16_MIN, S16_MAX) & 0xffff;
+
+ switch (attr) {
+ case hwmon_curr_min:
+ return regmap_write(data->regmap, INA238_SHUNT_UNDER_VOLTAGE,
+ regval);
+ case hwmon_curr_max:
+ return regmap_write(data->regmap, INA238_SHUNT_OVER_VOLTAGE,
+ regval);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int ina238_read_power(struct device *dev, u32 attr, long *val)
{
struct ina238_data *data = dev_get_drvdata(dev);
@@ -460,9 +455,7 @@ static int ina238_read_power(struct device *dev, u32 attr, long *val)
if (err)
return err;
- /* Fixed 1mA lsb, scaled by 1000000 to have result in uW */
- power = div_u64(regval * 1000ULL * INA238_FIXED_SHUNT * data->gain *
- data->config->power_calculate_factor, 4 * 100 * data->rshunt);
+ power = (long long)regval * data->power_lsb;
/* Clamp value to maximum value of long */
*val = clamp_val(power, 0, LONG_MAX);
break;
@@ -471,9 +464,7 @@ static int ina238_read_power(struct device *dev, u32 attr, long *val)
if (err)
return err;
- /* Fixed 1mA lsb, scaled by 1000000 to have result in uW */
- power = div_u64(regval * 1000ULL * INA238_FIXED_SHUNT * data->gain *
- data->config->power_calculate_factor, 4 * 100 * data->rshunt);
+ power = (long long)regval * data->power_lsb;
/* Clamp value to maximum value of long */
*val = clamp_val(power, 0, LONG_MAX);
break;
@@ -486,8 +477,7 @@ static int ina238_read_power(struct device *dev, u32 attr, long *val)
* Truncated 24-bit compare register, lower 8-bits are
* truncated. Same conversion to/from uW as POWER register.
*/
- power = div_u64((regval << 8) * 1000ULL * INA238_FIXED_SHUNT * data->gain *
- data->config->power_calculate_factor, 4 * 100 * data->rshunt);
+ power = ((long long)regval << 8) * data->power_lsb;
/* Clamp value to maximum value of long */
*val = clamp_val(power, 0, LONG_MAX);
break;
@@ -505,25 +495,26 @@ static int ina238_read_power(struct device *dev, u32 attr, long *val)
return 0;
}
-static int ina238_write_power(struct device *dev, u32 attr, long val)
+static int ina238_write_power_max(struct device *dev, long val)
{
struct ina238_data *data = dev_get_drvdata(dev);
- long regval;
-
- if (attr != hwmon_power_max)
- return -EOPNOTSUPP;
/*
* Unsigned postive values. Compared against the 24-bit power register,
* lower 8-bits are truncated. Same conversion to/from uW as POWER
* register.
+ * The first clamp_val() is to establish a baseline to avoid overflows.
*/
- regval = clamp_val(val, 0, LONG_MAX);
- regval = div_u64(val * 4 * 100 * data->rshunt, data->config->power_calculate_factor *
- 1000ULL * INA238_FIXED_SHUNT * data->gain);
- regval = clamp_val(regval >> 8, 0, U16_MAX);
+ val = clamp_val(val, 0, LONG_MAX / 2);
+ val = DIV_ROUND_CLOSEST(val, data->power_lsb);
+ val = clamp_val(val >> 8, 0, U16_MAX);
+
+ return regmap_write(data->regmap, INA238_POWER_LIMIT, val);
+}
- return regmap_write(data->regmap, INA238_POWER_LIMIT, regval);
+static int ina238_temp_from_reg(s16 regval, u8 resolution)
+{
+ return ((regval >> (16 - resolution)) * 1000) >> (resolution - 9);
}
static int ina238_read_temp(struct device *dev, u32 attr, long *val)
@@ -537,17 +528,14 @@ static int ina238_read_temp(struct device *dev, u32 attr, long *val)
err = regmap_read(data->regmap, INA238_DIE_TEMP, &regval);
if (err)
return err;
- /* Signed, result in mC */
- *val = div_s64(((s64)((s16)regval) >> data->config->temp_shift) *
- (s64)data->config->temp_lsb, 10000);
+ *val = ina238_temp_from_reg(regval, data->config->temp_resolution);
break;
case hwmon_temp_max:
err = regmap_read(data->regmap, INA238_TEMP_LIMIT, &regval);
if (err)
return err;
/* Signed, result in mC */
- *val = div_s64(((s64)((s16)regval) >> data->config->temp_shift) *
- (s64)data->config->temp_lsb, 10000);
+ *val = ina238_temp_from_reg(regval, data->config->temp_resolution);
break;
case hwmon_temp_max_alarm:
err = regmap_read(data->regmap, INA238_DIAG_ALERT, &regval);
@@ -563,39 +551,37 @@ static int ina238_read_temp(struct device *dev, u32 attr, long *val)
return 0;
}
-static int ina238_write_temp(struct device *dev, u32 attr, long val)
+static u16 ina238_temp_to_reg(long val, u8 resolution)
{
- struct ina238_data *data = dev_get_drvdata(dev);
- int regval;
+ int fraction = 1000 - DIV_ROUND_CLOSEST(1000, BIT(resolution - 9));
- if (attr != hwmon_temp_max)
- return -EOPNOTSUPP;
+ val = clamp_val(val, -255000 - fraction, 255000 + fraction);
+
+ return (DIV_ROUND_CLOSEST(val << (resolution - 9), 1000) << (16 - resolution)) & 0xffff;
+}
- /* Signed */
- regval = clamp_val(val, -40000, 125000);
- regval = div_s64(val * 10000, data->config->temp_lsb) << data->config->temp_shift;
- regval = clamp_val(regval, S16_MIN, S16_MAX) & (0xffff << data->config->temp_shift);
+static int ina238_write_temp_max(struct device *dev, long val)
+{
+ struct ina238_data *data = dev_get_drvdata(dev);
+ int regval;
+ regval = ina238_temp_to_reg(val, data->config->temp_resolution);
return regmap_write(data->regmap, INA238_TEMP_LIMIT, regval);
}
-static ssize_t energy1_input_show(struct device *dev,
- struct device_attribute *da, char *buf)
+static int ina238_read_energy(struct device *dev, s64 *energy)
{
struct ina238_data *data = dev_get_drvdata(dev);
- int ret;
u64 regval;
- u64 energy;
+ int ret;
ret = ina238_read_reg40(data->client, SQ52206_ENERGY, &regval);
if (ret)
return ret;
/* result in uJ */
- energy = div_u64(regval * INA238_FIXED_SHUNT * data->gain * 16 * 10 *
- data->config->power_calculate_factor, 4 * data->rshunt);
-
- return sysfs_emit(buf, "%llu\n", energy);
+ *energy = regval * data->energy_lsb;
+ return 0;
}
static int ina238_read(struct device *dev, enum hwmon_sensor_types type,
@@ -605,9 +591,11 @@ static int ina238_read(struct device *dev, enum hwmon_sensor_types type,
case hwmon_in:
return ina238_read_in(dev, attr, channel, val);
case hwmon_curr:
- return ina238_read_current(dev, attr, val);
+ return ina238_read_curr(dev, attr, val);
case hwmon_power:
return ina238_read_power(dev, attr, val);
+ case hwmon_energy64:
+ return ina238_read_energy(dev, (s64 *)val);
case hwmon_temp:
return ina238_read_temp(dev, attr, val);
default:
@@ -628,11 +616,14 @@ static int ina238_write(struct device *dev, enum hwmon_sensor_types type,
case hwmon_in:
err = ina238_write_in(dev, attr, channel, val);
break;
+ case hwmon_curr:
+ err = ina238_write_curr(dev, attr, val);
+ break;
case hwmon_power:
- err = ina238_write_power(dev, attr, val);
+ err = ina238_write_power_max(dev, val);
break;
case hwmon_temp:
- err = ina238_write_temp(dev, attr, val);
+ err = ina238_write_temp_max(dev, val);
break;
default:
err = -EOPNOTSUPP;
@@ -649,6 +640,7 @@ static umode_t ina238_is_visible(const void *drvdata,
{
const struct ina238_data *data = drvdata;
bool has_power_highest = data->config->has_power_highest;
+ bool has_energy = data->config->has_energy;
switch (type) {
case hwmon_in:
@@ -666,7 +658,12 @@ static umode_t ina238_is_visible(const void *drvdata,
case hwmon_curr:
switch (attr) {
case hwmon_curr_input:
+ case hwmon_curr_max_alarm:
+ case hwmon_curr_min_alarm:
return 0444;
+ case hwmon_curr_max:
+ case hwmon_curr_min:
+ return 0644;
default:
return 0;
}
@@ -684,6 +681,11 @@ static umode_t ina238_is_visible(const void *drvdata,
default:
return 0;
}
+ case hwmon_energy64:
+ /* hwmon_energy_input */
+ if (has_energy)
+ return 0444;
+ return 0;
case hwmon_temp:
switch (attr) {
case hwmon_temp_input:
@@ -711,11 +713,14 @@ static const struct hwmon_channel_info * const ina238_info[] = {
INA238_HWMON_IN_CONFIG),
HWMON_CHANNEL_INFO(curr,
/* 0: current through shunt */
- HWMON_C_INPUT),
+ HWMON_C_INPUT | HWMON_C_MIN | HWMON_C_MIN_ALARM |
+ HWMON_C_MAX | HWMON_C_MAX_ALARM),
HWMON_CHANNEL_INFO(power,
/* 0: power */
HWMON_P_INPUT | HWMON_P_MAX |
HWMON_P_MAX_ALARM | HWMON_P_INPUT_HIGHEST),
+ HWMON_CHANNEL_INFO(energy64,
+ HWMON_E_INPUT),
HWMON_CHANNEL_INFO(temp,
/* 0: die temperature */
HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_ALARM),
@@ -733,18 +738,8 @@ static const struct hwmon_chip_info ina238_chip_info = {
.info = ina238_info,
};
-/* energy attributes are 5 bytes wide so we need u64 */
-static DEVICE_ATTR_RO(energy1_input);
-
-static struct attribute *ina238_attrs[] = {
- &dev_attr_energy1_input.attr,
- NULL,
-};
-ATTRIBUTE_GROUPS(ina238);
-
static int ina238_probe(struct i2c_client *client)
{
- struct ina2xx_platform_data *pdata = dev_get_platdata(&client->dev);
struct device *dev = &client->dev;
struct device *hwmon_dev;
struct ina238_data *data;
@@ -770,33 +765,48 @@ static int ina238_probe(struct i2c_client *client)
return PTR_ERR(data->regmap);
}
- /* load shunt value */
- data->rshunt = INA238_RSHUNT_DEFAULT;
- if (device_property_read_u32(dev, "shunt-resistor", &data->rshunt) < 0 && pdata)
- data->rshunt = pdata->shunt_uohms;
- if (data->rshunt == 0) {
- dev_err(dev, "invalid shunt resister value %u\n", data->rshunt);
- return -EINVAL;
- }
-
- /* load shunt gain value */
- if (device_property_read_u32(dev, "ti,shunt-gain", &data->gain) < 0)
- data->gain = 4; /* Default of ADCRANGE = 0 */
- if (data->gain != 1 && data->gain != 2 && data->gain != 4) {
- dev_err(dev, "invalid shunt gain value %u\n", data->gain);
- return -EINVAL;
- }
-
/* Setup CONFIG register */
config = data->config->config_default;
- if (chip == sq52206) {
- if (data->gain == 1)
- config |= SQ52206_CONFIG_ADCRANGE_HIGH; /* ADCRANGE = 10/11 is /1 */
- else if (data->gain == 2)
- config |= SQ52206_CONFIG_ADCRANGE_LOW; /* ADCRANGE = 01 is /2 */
- } else if (data->gain == 1) {
- config |= INA238_CONFIG_ADCRANGE; /* ADCRANGE = 1 is /1 */
+ if (data->config->current_lsb) {
+ data->voltage_lsb[0] = INA238_SHUNT_VOLTAGE_LSB;
+ data->current_lsb = data->config->current_lsb;
+ } else {
+ /* load shunt value */
+ if (device_property_read_u32(dev, "shunt-resistor", &data->rshunt) < 0)
+ data->rshunt = INA238_RSHUNT_DEFAULT;
+ if (data->rshunt == 0) {
+ dev_err(dev, "invalid shunt resister value %u\n", data->rshunt);
+ return -EINVAL;
+ }
+
+ /* load shunt gain value */
+ if (device_property_read_u32(dev, "ti,shunt-gain", &data->gain) < 0)
+ data->gain = 4; /* Default of ADCRANGE = 0 */
+ if (data->gain != 1 && data->gain != 2 && data->gain != 4) {
+ dev_err(dev, "invalid shunt gain value %u\n", data->gain);
+ return -EINVAL;
+ }
+
+ /* Setup SHUNT_CALIBRATION register with fixed value */
+ ret = regmap_write(data->regmap, INA238_SHUNT_CALIBRATION,
+ INA238_CALIBRATION_VALUE);
+ if (ret < 0) {
+ dev_err(dev, "error configuring the device: %d\n", ret);
+ return -ENODEV;
+ }
+ if (chip == sq52206) {
+ if (data->gain == 1) /* ADCRANGE = 10/11 is /1 */
+ config |= SQ52206_CONFIG_ADCRANGE_HIGH;
+ else if (data->gain == 2) /* ADCRANGE = 01 is /2 */
+ config |= SQ52206_CONFIG_ADCRANGE_LOW;
+ } else if (data->gain == 1) { /* ADCRANGE = 1 is /1 */
+ config |= INA238_CONFIG_ADCRANGE;
+ }
+ data->voltage_lsb[0] = INA238_SHUNT_VOLTAGE_LSB * data->gain / 4;
+ data->current_lsb = DIV_U64_ROUND_CLOSEST(250ULL * INA238_FIXED_SHUNT * data->gain,
+ data->rshunt);
}
+
ret = regmap_write(data->regmap, INA238_CONFIG, config);
if (ret < 0) {
dev_err(dev, "error configuring the device: %d\n", ret);
@@ -811,31 +821,33 @@ static int ina238_probe(struct i2c_client *client)
return -ENODEV;
}
- /* Setup SHUNT_CALIBRATION register with fixed value */
- ret = regmap_write(data->regmap, INA238_SHUNT_CALIBRATION,
- INA238_CALIBRATION_VALUE);
- if (ret < 0) {
- dev_err(dev, "error configuring the device: %d\n", ret);
- return -ENODEV;
- }
-
/* Setup alert/alarm configuration */
- ret = regmap_write(data->regmap, INA238_DIAG_ALERT,
- INA238_DIAG_ALERT_DEFAULT);
+ config = INA238_DIAG_ALERT_DEFAULT;
+ if (device_property_read_bool(dev, "ti,alert-polarity-active-high"))
+ config |= INA238_DIAG_ALERT_APOL;
+
+ ret = regmap_write(data->regmap, INA238_DIAG_ALERT, config);
if (ret < 0) {
dev_err(dev, "error configuring the device: %d\n", ret);
return -ENODEV;
}
+ data->voltage_lsb[1] = data->config->bus_voltage_lsb;
+
+ data->power_lsb = DIV_ROUND_CLOSEST(data->current_lsb *
+ data->config->power_calculate_factor,
+ 100);
+
+ data->energy_lsb = data->power_lsb * 16;
+
hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name, data,
- &ina238_chip_info,
- data->config->has_energy ?
- ina238_groups : NULL);
+ &ina238_chip_info, NULL);
if (IS_ERR(hwmon_dev))
return PTR_ERR(hwmon_dev);
- dev_info(dev, "power monitor %s (Rshunt = %u uOhm, gain = %u)\n",
- client->name, data->rshunt, data->gain);
+ if (data->rshunt)
+ dev_info(dev, "power monitor %s (Rshunt = %u uOhm, gain = %u)\n",
+ client->name, data->rshunt, data->gain);
return 0;
}
@@ -844,6 +856,8 @@ static const struct i2c_device_id ina238_id[] = {
{ "ina228", ina228 },
{ "ina237", ina237 },
{ "ina238", ina238 },
+ { "ina700", ina700 },
+ { "ina780", ina780 },
{ "sq52206", sq52206 },
{ }
};
@@ -863,6 +877,14 @@ static const struct of_device_id __maybe_unused ina238_of_match[] = {
.data = (void *)ina238
},
{
+ .compatible = "ti,ina700",
+ .data = (void *)ina700
+ },
+ {
+ .compatible = "ti,ina780",
+ .data = (void *)ina780
+ },
+ {
.compatible = "silergy,sq52206",
.data = (void *)sq52206
},
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index babf2413d666..b98d5ec72c4f 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -84,6 +84,13 @@ static DEFINE_MUTEX(nb_smu_ind_mutex);
*/
#define AMD_I3255_STR "3255"
+/*
+ * PCI Device IDs for AMD's Family 1Ah-based SOCs.
+ * Defining locally as IDs are not shared.
+ */
+#define PCI_DEVICE_ID_AMD_1AH_M50H_DF_F3 0x12cb
+#define PCI_DEVICE_ID_AMD_1AH_M90H_DF_F3 0x127b
+
struct k10temp_data {
struct pci_dev *pdev;
void (*read_htcreg)(struct pci_dev *pdev, u32 *regval);
@@ -556,7 +563,10 @@ static const struct pci_device_id k10temp_id_table[] = {
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M78H_DF_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_1AH_M00H_DF_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_1AH_M20H_DF_F3) },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_1AH_M50H_DF_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_1AH_M60H_DF_F3) },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_1AH_M70H_DF_F3) },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_1AH_M90H_DF_F3) },
{ PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) },
{}
};
diff --git a/drivers/hwmon/lenovo-ec-sensors.c b/drivers/hwmon/lenovo-ec-sensors.c
index 143fb79713f7..8681bbf6665b 100644
--- a/drivers/hwmon/lenovo-ec-sensors.c
+++ b/drivers/hwmon/lenovo-ec-sensors.c
@@ -66,7 +66,7 @@ enum systems {
LENOVO_P8,
};
-static int px_temp_map[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
+static int px_temp_map[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 31, 32};
static const char * const lenovo_px_ec_temp_label[] = {
"CPU1",
@@ -84,9 +84,29 @@ static const char * const lenovo_px_ec_temp_label[] = {
"PCI_Z3",
"PCI_Z4",
"AMB",
+ "PSU1",
+ "PSU2",
};
-static int gen_temp_map[] = {0, 2, 3, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
+static int p8_temp_map[] = {0, 1, 2, 8, 9, 13, 14, 15, 16, 17, 19, 20, 33};
+
+static const char * const lenovo_p8_ec_temp_label[] = {
+ "CPU1",
+ "CPU_DIMM_BANK1",
+ "CPU_DIMM_BANK2",
+ "M2_Z2R",
+ "M2_Z3R",
+ "DIMM_RIGHT",
+ "DIMM_LEFT",
+ "PCI_Z1",
+ "PCI_Z2",
+ "PCI_Z3",
+ "AMB",
+ "REAR_VR",
+ "PSU",
+};
+
+static int gen_temp_map[] = {0, 2, 3, 6, 7, 8, 9, 10, 11, 12, 13, 14, 31};
static const char * const lenovo_gen_ec_temp_label[] = {
"CPU1",
@@ -101,6 +121,7 @@ static const char * const lenovo_gen_ec_temp_label[] = {
"PCI_Z3",
"PCI_Z4",
"AMB",
+ "PSU",
};
static int px_fan_map[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
@@ -293,6 +314,8 @@ static const struct hwmon_channel_info *lenovo_ec_hwmon_info_px[] = {
HWMON_T_INPUT | HWMON_T_LABEL,
HWMON_T_INPUT | HWMON_T_LABEL,
HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
HWMON_T_INPUT | HWMON_T_LABEL),
HWMON_CHANNEL_INFO(fan,
HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MAX,
@@ -327,6 +350,7 @@ static const struct hwmon_channel_info *lenovo_ec_hwmon_info_p8[] = {
HWMON_T_INPUT | HWMON_T_LABEL,
HWMON_T_INPUT | HWMON_T_LABEL,
HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
HWMON_T_INPUT | HWMON_T_LABEL),
HWMON_CHANNEL_INFO(fan,
HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MAX,
@@ -359,6 +383,7 @@ static const struct hwmon_channel_info *lenovo_ec_hwmon_info_p7[] = {
HWMON_T_INPUT | HWMON_T_LABEL,
HWMON_T_INPUT | HWMON_T_LABEL,
HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
HWMON_T_INPUT | HWMON_T_LABEL),
HWMON_CHANNEL_INFO(fan,
HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MAX,
@@ -388,6 +413,7 @@ static const struct hwmon_channel_info *lenovo_ec_hwmon_info_p5[] = {
HWMON_T_INPUT | HWMON_T_LABEL,
HWMON_T_INPUT | HWMON_T_LABEL,
HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
HWMON_T_INPUT | HWMON_T_LABEL),
HWMON_CHANNEL_INFO(fan,
HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MAX,
@@ -545,9 +571,9 @@ static int lenovo_ec_probe(struct platform_device *pdev)
break;
case 3:
ec_data->fan_labels = p8_ec_fan_label;
- ec_data->temp_labels = lenovo_gen_ec_temp_label;
+ ec_data->temp_labels = lenovo_p8_ec_temp_label;
ec_data->fan_map = p8_fan_map;
- ec_data->temp_map = gen_temp_map;
+ ec_data->temp_map = p8_temp_map;
lenovo_ec_chip_info.info = lenovo_ec_hwmon_info_p8;
break;
default:
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index 9b4875e2fd8d..3c23b6e8e1bf 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -39,6 +39,7 @@ enum lm75_type { /* keep sorted in alphabetical order */
max6626,
max31725,
mcp980x,
+ p3t1750,
p3t1755,
pct2075,
stds75,
@@ -222,6 +223,13 @@ static const struct lm75_params device_params[] = {
.default_resolution = 9,
.default_sample_time = MSEC_PER_SEC / 18,
},
+ [p3t1750] = {
+ .clr_mask = 1 << 1 | 1 << 7, /* disable SMBAlert and one-shot */
+ .default_resolution = 12,
+ .default_sample_time = 55,
+ .num_sample_times = 4,
+ .sample_times = (unsigned int []){ 28, 55, 110, 220 },
+ },
[p3t1755] = {
.clr_mask = 1 << 1 | 1 << 7, /* disable SMBAlert and one-shot */
.default_resolution = 12,
@@ -805,6 +813,7 @@ static const struct i2c_device_id lm75_i2c_ids[] = {
{ "max31725", max31725, },
{ "max31726", max31725, },
{ "mcp980x", mcp980x, },
+ { "p3t1750", p3t1750, },
{ "p3t1755", p3t1755, },
{ "pct2075", pct2075, },
{ "stds75", stds75, },
@@ -917,6 +926,10 @@ static const struct of_device_id __maybe_unused lm75_of_match[] = {
.data = (void *)mcp980x
},
{
+ .compatible = "nxp,p3t1750",
+ .data = (void *)p3t1750
+ },
+ {
.compatible = "nxp,p3t1755",
.data = (void *)p3t1755
},
diff --git a/drivers/hwmon/ltc2992.c b/drivers/hwmon/ltc2992.c
index a07e2eb93c71..1fcd320d6161 100644
--- a/drivers/hwmon/ltc2992.c
+++ b/drivers/hwmon/ltc2992.c
@@ -339,8 +339,8 @@ static int ltc2992_config_gpio(struct ltc2992_state *st)
st->gc.ngpio = ARRAY_SIZE(st->gpio_names);
st->gc.get = ltc2992_gpio_get;
st->gc.get_multiple = ltc2992_gpio_get_multiple;
- st->gc.set_rv = ltc2992_gpio_set;
- st->gc.set_multiple_rv = ltc2992_gpio_set_multiple;
+ st->gc.set = ltc2992_gpio_set;
+ st->gc.set_multiple = ltc2992_gpio_set_multiple;
ret = devm_gpiochip_add_data(&st->client->dev, &st->gc, st);
if (ret)
diff --git a/drivers/hwmon/ltc4282.c b/drivers/hwmon/ltc4282.c
index dbb30abcd343..1d664a2d7b3c 100644
--- a/drivers/hwmon/ltc4282.c
+++ b/drivers/hwmon/ltc4282.c
@@ -1693,8 +1693,7 @@ static int ltc4282_probe(struct i2c_client *i2c)
st = devm_kzalloc(dev, sizeof(*st), GFP_KERNEL);
if (!st)
- return dev_err_probe(dev, -ENOMEM,
- "Failed to allocate memory\n");
+ return -ENOMEM;
st->map = devm_regmap_init_i2c(i2c, &ltc4282_regmap_config);
if (IS_ERR(st->map))
diff --git a/drivers/hwmon/mlxreg-fan.c b/drivers/hwmon/mlxreg-fan.c
index a5f89aab3fb4..137a90dd2075 100644
--- a/drivers/hwmon/mlxreg-fan.c
+++ b/drivers/hwmon/mlxreg-fan.c
@@ -63,12 +63,14 @@ struct mlxreg_fan;
* @reg: register offset;
* @mask: fault mask;
* @prsnt: present register offset;
+ * @shift: tacho presence bit shift;
*/
struct mlxreg_fan_tacho {
bool connected;
u32 reg;
u32 mask;
u32 prsnt;
+ u32 shift;
};
/*
@@ -113,8 +115,8 @@ struct mlxreg_fan {
int divider;
};
-static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
- unsigned long state);
+static int _mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long state, bool thermal);
static int
mlxreg_fan_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
@@ -143,8 +145,10 @@ mlxreg_fan_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
/*
* Map channel to presence bit - drawer can be equipped with
* one or few FANs, while presence is indicated per drawer.
+ * Shift channel value if necessary to align with register value.
*/
- if (BIT(channel / fan->tachos_per_drwr) & regval) {
+ if (BIT(rol32(channel, tacho->shift) / fan->tachos_per_drwr) &
+ regval) {
/* FAN is not connected - return zero for FAN speed. */
*val = 0;
return 0;
@@ -224,8 +228,9 @@ mlxreg_fan_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
* last thermal state.
*/
if (pwm->last_hwmon_state >= pwm->last_thermal_state)
- return mlxreg_fan_set_cur_state(pwm->cdev,
- pwm->last_hwmon_state);
+ return _mlxreg_fan_set_cur_state(pwm->cdev,
+ pwm->last_hwmon_state,
+ false);
return 0;
}
return regmap_write(fan->regmap, pwm->reg, val);
@@ -357,9 +362,8 @@ static int mlxreg_fan_get_cur_state(struct thermal_cooling_device *cdev,
return 0;
}
-static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
- unsigned long state)
-
+static int _mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long state, bool thermal)
{
struct mlxreg_fan_pwm *pwm = cdev->devdata;
struct mlxreg_fan *fan = pwm->fan;
@@ -369,7 +373,8 @@ static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
return -EINVAL;
/* Save thermal state. */
- pwm->last_thermal_state = state;
+ if (thermal)
+ pwm->last_thermal_state = state;
state = max_t(unsigned long, state, pwm->last_hwmon_state);
err = regmap_write(fan->regmap, pwm->reg,
@@ -381,6 +386,13 @@ static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
return 0;
}
+static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long state)
+
+{
+ return _mlxreg_fan_set_cur_state(cdev, state, true);
+}
+
static const struct thermal_cooling_device_ops mlxreg_fan_cooling_ops = {
.get_max_state = mlxreg_fan_get_max_state,
.get_cur_state = mlxreg_fan_get_cur_state,
@@ -400,7 +412,7 @@ static int mlxreg_fan_connect_verify(struct mlxreg_fan *fan,
return err;
}
- return !!(regval & data->bit);
+ return data->slot ? (data->slot <= regval ? 1 : 0) : !!(regval & data->bit);
}
static int mlxreg_pwm_connect_verify(struct mlxreg_fan *fan,
@@ -537,7 +549,15 @@ static int mlxreg_fan_config(struct mlxreg_fan *fan,
return err;
}
- drwr_avail = hweight32(regval);
+ /*
+ * The number of drawers could be specified in registers by counters for newer
+ * systems, or by bitmasks for older systems. In case the data is provided by
+ * counter, it is indicated through 'version' field.
+ */
+ if (pdata->version)
+ drwr_avail = regval;
+ else
+ drwr_avail = hweight32(regval);
if (!tacho_avail || !drwr_avail || tacho_avail < drwr_avail) {
dev_err(fan->dev, "Configuration is invalid: drawers num %d tachos num %d\n",
drwr_avail, tacho_avail);
@@ -561,15 +581,14 @@ static int mlxreg_fan_cooling_config(struct device *dev, struct mlxreg_fan *fan)
if (!pwm->connected)
continue;
pwm->fan = fan;
+ /* Set minimal PWM speed. */
+ pwm->last_hwmon_state = MLXREG_FAN_PWM_DUTY2STATE(MLXREG_FAN_MIN_DUTY);
pwm->cdev = devm_thermal_of_cooling_device_register(dev, NULL, mlxreg_fan_name[i],
pwm, &mlxreg_fan_cooling_ops);
if (IS_ERR(pwm->cdev)) {
dev_err(dev, "Failed to register cooling device\n");
return PTR_ERR(pwm->cdev);
}
-
- /* Set minimal PWM speed. */
- pwm->last_hwmon_state = MLXREG_FAN_PWM_DUTY2STATE(MLXREG_FAN_MIN_DUTY);
}
return 0;
diff --git a/drivers/hwmon/nct6694-hwmon.c b/drivers/hwmon/nct6694-hwmon.c
new file mode 100644
index 000000000000..6dcf22ca5018
--- /dev/null
+++ b/drivers/hwmon/nct6694-hwmon.c
@@ -0,0 +1,949 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Nuvoton NCT6694 HWMON driver based on USB interface.
+ *
+ * Copyright (C) 2025 Nuvoton Technology Corp.
+ */
+
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/hwmon.h>
+#include <linux/kernel.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/nct6694.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/*
+ * USB command module type for NCT6694 report channel
+ * This defines the module type used for communication with the NCT6694
+ * report channel over the USB interface.
+ */
+#define NCT6694_RPT_MOD 0xFF
+
+/* Report channel */
+/*
+ * The report channel is used to report the status of the hardware monitor
+ * devices, such as voltage, temperature, fan speed, and PWM.
+ */
+#define NCT6694_VIN_IDX(x) (0x00 + (x))
+#define NCT6694_TIN_IDX(x) \
+ ({ typeof(x) (_x) = (x); \
+ ((_x) < 10) ? (0x10 + ((_x) * 2)) : \
+ (0x30 + (((_x) - 10) * 2)); })
+#define NCT6694_FIN_IDX(x) (0x50 + ((x) * 2))
+#define NCT6694_PWM_IDX(x) (0x70 + (x))
+#define NCT6694_VIN_STS(x) (0x68 + (x))
+#define NCT6694_TIN_STS(x) (0x6A + (x))
+#define NCT6694_FIN_STS(x) (0x6E + (x))
+
+/*
+ * USB command module type for NCT6694 HWMON controller.
+ * This defines the module type used for communication with the NCT6694
+ * HWMON controller over the USB interface.
+ */
+#define NCT6694_HWMON_MOD 0x00
+
+/* Command 00h - Hardware Monitor Control */
+#define NCT6694_HWMON_CONTROL 0x00
+#define NCT6694_HWMON_CONTROL_SEL 0x00
+
+/* Command 02h - Alarm Control */
+#define NCT6694_HWMON_ALARM 0x02
+#define NCT6694_HWMON_ALARM_SEL 0x00
+
+/*
+ * USB command module type for NCT6694 PWM controller.
+ * This defines the module type used for communication with the NCT6694
+ * PWM controller over the USB interface.
+ */
+#define NCT6694_PWM_MOD 0x01
+
+/* PWM Command - Manual Control */
+#define NCT6694_PWM_CONTROL 0x01
+#define NCT6694_PWM_CONTROL_SEL 0x00
+
+#define NCT6694_FREQ_FROM_REG(reg) ((reg) * 25000 / 255)
+#define NCT6694_FREQ_TO_REG(val) \
+ (DIV_ROUND_CLOSEST(clamp_val((val), 100, 25000) * 255, 25000))
+
+#define NCT6694_LSB_REG_MASK GENMASK(7, 5)
+#define NCT6694_TIN_HYST_MASK GENMASK(7, 5)
+
+enum nct6694_hwmon_temp_mode {
+ NCT6694_HWMON_TWOTIME_IRQ = 0,
+ NCT6694_HWMON_ONETIME_IRQ,
+ NCT6694_HWMON_REALTIME_IRQ,
+ NCT6694_HWMON_COMPARE_IRQ,
+};
+
+struct __packed nct6694_hwmon_control {
+ u8 vin_en[2];
+ u8 tin_en[2];
+ u8 fin_en[2];
+ u8 pwm_en[2];
+ u8 reserved1[40];
+ u8 pwm_freq[10];
+ u8 reserved2[6];
+};
+
+struct __packed nct6694_hwmon_alarm {
+ u8 smi_ctrl;
+ u8 reserved1[15];
+ struct {
+ u8 hl;
+ u8 ll;
+ } vin_limit[16];
+ struct {
+ u8 hyst;
+ s8 hl;
+ } tin_cfg[32];
+ __be16 fin_ll[10];
+ u8 reserved2[4];
+};
+
+struct __packed nct6694_pwm_control {
+ u8 mal_en[2];
+ u8 mal_val[10];
+ u8 reserved[12];
+};
+
+union __packed nct6694_hwmon_rpt {
+ u8 vin;
+ struct {
+ u8 msb;
+ u8 lsb;
+ } tin;
+ __be16 fin;
+ u8 pwm;
+ u8 status;
+};
+
+union __packed nct6694_hwmon_msg {
+ struct nct6694_hwmon_alarm hwmon_alarm;
+ struct nct6694_pwm_control pwm_ctrl;
+};
+
+struct nct6694_hwmon_data {
+ struct nct6694 *nct6694;
+ struct mutex lock;
+ struct nct6694_hwmon_control hwmon_en;
+ union nct6694_hwmon_rpt *rpt;
+ union nct6694_hwmon_msg *msg;
+};
+
+static inline long in_from_reg(u8 reg)
+{
+ return reg * 16;
+}
+
+static inline u8 in_to_reg(long val)
+{
+ return DIV_ROUND_CLOSEST(val, 16);
+}
+
+static inline long temp_from_reg(s8 reg)
+{
+ return reg * 1000;
+}
+
+static inline s8 temp_to_reg(long val)
+{
+ return DIV_ROUND_CLOSEST(val, 1000);
+}
+
+#define NCT6694_HWMON_IN_CONFIG (HWMON_I_INPUT | HWMON_I_ENABLE | \
+ HWMON_I_MAX | HWMON_I_MIN | \
+ HWMON_I_ALARM)
+#define NCT6694_HWMON_TEMP_CONFIG (HWMON_T_INPUT | HWMON_T_ENABLE | \
+ HWMON_T_MAX | HWMON_T_MAX_HYST | \
+ HWMON_T_MAX_ALARM)
+#define NCT6694_HWMON_FAN_CONFIG (HWMON_F_INPUT | HWMON_F_ENABLE | \
+ HWMON_F_MIN | HWMON_F_MIN_ALARM)
+#define NCT6694_HWMON_PWM_CONFIG (HWMON_PWM_INPUT | HWMON_PWM_ENABLE | \
+ HWMON_PWM_FREQ)
+static const struct hwmon_channel_info *nct6694_info[] = {
+ HWMON_CHANNEL_INFO(in,
+ NCT6694_HWMON_IN_CONFIG, /* VIN0 */
+ NCT6694_HWMON_IN_CONFIG, /* VIN1 */
+ NCT6694_HWMON_IN_CONFIG, /* VIN2 */
+ NCT6694_HWMON_IN_CONFIG, /* VIN3 */
+ NCT6694_HWMON_IN_CONFIG, /* VIN5 */
+ NCT6694_HWMON_IN_CONFIG, /* VIN6 */
+ NCT6694_HWMON_IN_CONFIG, /* VIN7 */
+ NCT6694_HWMON_IN_CONFIG, /* VIN14 */
+ NCT6694_HWMON_IN_CONFIG, /* VIN15 */
+ NCT6694_HWMON_IN_CONFIG, /* VIN16 */
+ NCT6694_HWMON_IN_CONFIG, /* VBAT */
+ NCT6694_HWMON_IN_CONFIG, /* VSB */
+ NCT6694_HWMON_IN_CONFIG, /* AVSB */
+ NCT6694_HWMON_IN_CONFIG, /* VCC */
+ NCT6694_HWMON_IN_CONFIG, /* VHIF */
+ NCT6694_HWMON_IN_CONFIG), /* VTT */
+
+ HWMON_CHANNEL_INFO(temp,
+ NCT6694_HWMON_TEMP_CONFIG, /* THR1 */
+ NCT6694_HWMON_TEMP_CONFIG, /* THR2 */
+ NCT6694_HWMON_TEMP_CONFIG, /* THR14 */
+ NCT6694_HWMON_TEMP_CONFIG, /* THR15 */
+ NCT6694_HWMON_TEMP_CONFIG, /* THR16 */
+ NCT6694_HWMON_TEMP_CONFIG, /* TDP0 */
+ NCT6694_HWMON_TEMP_CONFIG, /* TDP1 */
+ NCT6694_HWMON_TEMP_CONFIG, /* TDP2 */
+ NCT6694_HWMON_TEMP_CONFIG, /* TDP3 */
+ NCT6694_HWMON_TEMP_CONFIG, /* TDP4 */
+ NCT6694_HWMON_TEMP_CONFIG, /* DTIN0 */
+ NCT6694_HWMON_TEMP_CONFIG, /* DTIN1 */
+ NCT6694_HWMON_TEMP_CONFIG, /* DTIN2 */
+ NCT6694_HWMON_TEMP_CONFIG, /* DTIN3 */
+ NCT6694_HWMON_TEMP_CONFIG, /* DTIN4 */
+ NCT6694_HWMON_TEMP_CONFIG, /* DTIN5 */
+ NCT6694_HWMON_TEMP_CONFIG, /* DTIN6 */
+ NCT6694_HWMON_TEMP_CONFIG, /* DTIN7 */
+ NCT6694_HWMON_TEMP_CONFIG, /* DTIN8 */
+ NCT6694_HWMON_TEMP_CONFIG, /* DTIN9 */
+ NCT6694_HWMON_TEMP_CONFIG, /* DTIN10 */
+ NCT6694_HWMON_TEMP_CONFIG, /* DTIN11 */
+ NCT6694_HWMON_TEMP_CONFIG, /* DTIN12 */
+ NCT6694_HWMON_TEMP_CONFIG, /* DTIN13 */
+ NCT6694_HWMON_TEMP_CONFIG, /* DTIN14 */
+ NCT6694_HWMON_TEMP_CONFIG), /* DTIN15 */
+
+ HWMON_CHANNEL_INFO(fan,
+ NCT6694_HWMON_FAN_CONFIG, /* FIN0 */
+ NCT6694_HWMON_FAN_CONFIG, /* FIN1 */
+ NCT6694_HWMON_FAN_CONFIG, /* FIN2 */
+ NCT6694_HWMON_FAN_CONFIG, /* FIN3 */
+ NCT6694_HWMON_FAN_CONFIG, /* FIN4 */
+ NCT6694_HWMON_FAN_CONFIG, /* FIN5 */
+ NCT6694_HWMON_FAN_CONFIG, /* FIN6 */
+ NCT6694_HWMON_FAN_CONFIG, /* FIN7 */
+ NCT6694_HWMON_FAN_CONFIG, /* FIN8 */
+ NCT6694_HWMON_FAN_CONFIG), /* FIN9 */
+
+ HWMON_CHANNEL_INFO(pwm,
+ NCT6694_HWMON_PWM_CONFIG, /* PWM0 */
+ NCT6694_HWMON_PWM_CONFIG, /* PWM1 */
+ NCT6694_HWMON_PWM_CONFIG, /* PWM2 */
+ NCT6694_HWMON_PWM_CONFIG, /* PWM3 */
+ NCT6694_HWMON_PWM_CONFIG, /* PWM4 */
+ NCT6694_HWMON_PWM_CONFIG, /* PWM5 */
+ NCT6694_HWMON_PWM_CONFIG, /* PWM6 */
+ NCT6694_HWMON_PWM_CONFIG, /* PWM7 */
+ NCT6694_HWMON_PWM_CONFIG, /* PWM8 */
+ NCT6694_HWMON_PWM_CONFIG), /* PWM9 */
+ NULL
+};
+
+static int nct6694_in_read(struct device *dev, u32 attr, int channel,
+ long *val)
+{
+ struct nct6694_hwmon_data *data = dev_get_drvdata(dev);
+ struct nct6694_cmd_header cmd_hd;
+ unsigned char vin_en;
+ int ret;
+
+ guard(mutex)(&data->lock);
+
+ switch (attr) {
+ case hwmon_in_enable:
+ vin_en = data->hwmon_en.vin_en[(channel / 8)];
+ *val = !!(vin_en & BIT(channel % 8));
+
+ return 0;
+ case hwmon_in_input:
+ cmd_hd = (struct nct6694_cmd_header) {
+ .mod = NCT6694_RPT_MOD,
+ .offset = cpu_to_le16(NCT6694_VIN_IDX(channel)),
+ .len = cpu_to_le16(sizeof(data->rpt->vin))
+ };
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd,
+ &data->rpt->vin);
+ if (ret)
+ return ret;
+
+ *val = in_from_reg(data->rpt->vin);
+
+ return 0;
+ case hwmon_in_max:
+ cmd_hd = (struct nct6694_cmd_header) {
+ .mod = NCT6694_HWMON_MOD,
+ .cmd = NCT6694_HWMON_ALARM,
+ .sel = NCT6694_HWMON_ALARM_SEL,
+ .len = cpu_to_le16(sizeof(data->msg->hwmon_alarm))
+ };
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd,
+ &data->msg->hwmon_alarm);
+ if (ret)
+ return ret;
+
+ *val = in_from_reg(data->msg->hwmon_alarm.vin_limit[channel].hl);
+
+ return 0;
+ case hwmon_in_min:
+ cmd_hd = (struct nct6694_cmd_header) {
+ .mod = NCT6694_HWMON_MOD,
+ .cmd = NCT6694_HWMON_ALARM,
+ .sel = NCT6694_HWMON_ALARM_SEL,
+ .len = cpu_to_le16(sizeof(data->msg->hwmon_alarm))
+ };
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd,
+ &data->msg->hwmon_alarm);
+ if (ret)
+ return ret;
+
+ *val = in_from_reg(data->msg->hwmon_alarm.vin_limit[channel].ll);
+
+ return 0;
+ case hwmon_in_alarm:
+ cmd_hd = (struct nct6694_cmd_header) {
+ .mod = NCT6694_RPT_MOD,
+ .offset = cpu_to_le16(NCT6694_VIN_STS(channel / 8)),
+ .len = cpu_to_le16(sizeof(data->rpt->status))
+ };
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd,
+ &data->rpt->status);
+ if (ret)
+ return ret;
+
+ *val = !!(data->rpt->status & BIT(channel % 8));
+
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int nct6694_temp_read(struct device *dev, u32 attr, int channel,
+ long *val)
+{
+ struct nct6694_hwmon_data *data = dev_get_drvdata(dev);
+ struct nct6694_cmd_header cmd_hd;
+ unsigned char temp_en, temp_hyst;
+ signed char temp_max;
+ int ret, temp_raw;
+
+ guard(mutex)(&data->lock);
+
+ switch (attr) {
+ case hwmon_temp_enable:
+ temp_en = data->hwmon_en.tin_en[channel / 8];
+ *val = !!(temp_en & BIT(channel % 8));
+
+ return 0;
+ case hwmon_temp_input:
+ cmd_hd = (struct nct6694_cmd_header) {
+ .mod = NCT6694_RPT_MOD,
+ .offset = cpu_to_le16(NCT6694_TIN_IDX(channel)),
+ .len = cpu_to_le16(sizeof(data->rpt->tin))
+ };
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd,
+ &data->rpt->tin);
+ if (ret)
+ return ret;
+
+ temp_raw = data->rpt->tin.msb << 3;
+ temp_raw |= FIELD_GET(NCT6694_LSB_REG_MASK, data->rpt->tin.lsb);
+
+ /* Real temperature(milli degrees Celsius) = temp_raw * 1000 * 0.125 */
+ *val = sign_extend32(temp_raw, 10) * 125;
+
+ return 0;
+ case hwmon_temp_max:
+ cmd_hd = (struct nct6694_cmd_header) {
+ .mod = NCT6694_HWMON_MOD,
+ .cmd = NCT6694_HWMON_ALARM,
+ .sel = NCT6694_HWMON_ALARM_SEL,
+ .len = cpu_to_le16(sizeof(data->msg->hwmon_alarm))
+ };
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd,
+ &data->msg->hwmon_alarm);
+ if (ret)
+ return ret;
+
+ *val = temp_from_reg(data->msg->hwmon_alarm.tin_cfg[channel].hl);
+
+ return 0;
+ case hwmon_temp_max_hyst:
+ cmd_hd = (struct nct6694_cmd_header) {
+ .mod = NCT6694_HWMON_MOD,
+ .cmd = NCT6694_HWMON_ALARM,
+ .sel = NCT6694_HWMON_ALARM_SEL,
+ .len = cpu_to_le16(sizeof(data->msg->hwmon_alarm))
+ };
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd,
+ &data->msg->hwmon_alarm);
+ if (ret)
+ return ret;
+
+ temp_max = data->msg->hwmon_alarm.tin_cfg[channel].hl;
+ temp_hyst = FIELD_GET(NCT6694_TIN_HYST_MASK,
+ data->msg->hwmon_alarm.tin_cfg[channel].hyst);
+ *val = temp_from_reg(temp_max - temp_hyst);
+
+ return 0;
+ case hwmon_temp_max_alarm:
+ cmd_hd = (struct nct6694_cmd_header) {
+ .mod = NCT6694_RPT_MOD,
+ .offset = cpu_to_le16(NCT6694_TIN_STS(channel / 8)),
+ .len = cpu_to_le16(sizeof(data->rpt->status))
+ };
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd,
+ &data->rpt->status);
+ if (ret)
+ return ret;
+
+ *val = !!(data->rpt->status & BIT(channel % 8));
+
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int nct6694_fan_read(struct device *dev, u32 attr, int channel,
+ long *val)
+{
+ struct nct6694_hwmon_data *data = dev_get_drvdata(dev);
+ struct nct6694_cmd_header cmd_hd;
+ unsigned char fanin_en;
+ int ret;
+
+ guard(mutex)(&data->lock);
+
+ switch (attr) {
+ case hwmon_fan_enable:
+ fanin_en = data->hwmon_en.fin_en[channel / 8];
+ *val = !!(fanin_en & BIT(channel % 8));
+
+ return 0;
+ case hwmon_fan_input:
+ cmd_hd = (struct nct6694_cmd_header) {
+ .mod = NCT6694_RPT_MOD,
+ .offset = cpu_to_le16(NCT6694_FIN_IDX(channel)),
+ .len = cpu_to_le16(sizeof(data->rpt->fin))
+ };
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd,
+ &data->rpt->fin);
+ if (ret)
+ return ret;
+
+ *val = be16_to_cpu(data->rpt->fin);
+
+ return 0;
+ case hwmon_fan_min:
+ cmd_hd = (struct nct6694_cmd_header) {
+ .mod = NCT6694_HWMON_MOD,
+ .cmd = NCT6694_HWMON_ALARM,
+ .sel = NCT6694_HWMON_ALARM_SEL,
+ .len = cpu_to_le16(sizeof(data->msg->hwmon_alarm))
+ };
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd,
+ &data->msg->hwmon_alarm);
+ if (ret)
+ return ret;
+
+ *val = be16_to_cpu(data->msg->hwmon_alarm.fin_ll[channel]);
+
+ return 0;
+ case hwmon_fan_min_alarm:
+ cmd_hd = (struct nct6694_cmd_header) {
+ .mod = NCT6694_RPT_MOD,
+ .offset = cpu_to_le16(NCT6694_FIN_STS(channel / 8)),
+ .len = cpu_to_le16(sizeof(data->rpt->status))
+ };
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd,
+ &data->rpt->status);
+ if (ret)
+ return ret;
+
+ *val = !!(data->rpt->status & BIT(channel % 8));
+
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int nct6694_pwm_read(struct device *dev, u32 attr, int channel,
+ long *val)
+{
+ struct nct6694_hwmon_data *data = dev_get_drvdata(dev);
+ struct nct6694_cmd_header cmd_hd;
+ unsigned char pwm_en;
+ int ret;
+
+ guard(mutex)(&data->lock);
+
+ switch (attr) {
+ case hwmon_pwm_enable:
+ pwm_en = data->hwmon_en.pwm_en[channel / 8];
+ *val = !!(pwm_en & BIT(channel % 8));
+
+ return 0;
+ case hwmon_pwm_input:
+ cmd_hd = (struct nct6694_cmd_header) {
+ .mod = NCT6694_RPT_MOD,
+ .offset = cpu_to_le16(NCT6694_PWM_IDX(channel)),
+ .len = cpu_to_le16(sizeof(data->rpt->pwm))
+ };
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd,
+ &data->rpt->pwm);
+ if (ret)
+ return ret;
+
+ *val = data->rpt->pwm;
+
+ return 0;
+ case hwmon_pwm_freq:
+ *val = NCT6694_FREQ_FROM_REG(data->hwmon_en.pwm_freq[channel]);
+
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int nct6694_in_write(struct device *dev, u32 attr, int channel,
+ long val)
+{
+ struct nct6694_hwmon_data *data = dev_get_drvdata(dev);
+ struct nct6694_cmd_header cmd_hd;
+ int ret;
+
+ guard(mutex)(&data->lock);
+
+ switch (attr) {
+ case hwmon_in_enable:
+ if (val == 0)
+ data->hwmon_en.vin_en[channel / 8] &= ~BIT(channel % 8);
+ else if (val == 1)
+ data->hwmon_en.vin_en[channel / 8] |= BIT(channel % 8);
+ else
+ return -EINVAL;
+
+ cmd_hd = (struct nct6694_cmd_header) {
+ .mod = NCT6694_HWMON_MOD,
+ .cmd = NCT6694_HWMON_CONTROL,
+ .sel = NCT6694_HWMON_CONTROL_SEL,
+ .len = cpu_to_le16(sizeof(data->hwmon_en))
+ };
+
+ return nct6694_write_msg(data->nct6694, &cmd_hd,
+ &data->hwmon_en);
+ case hwmon_in_max:
+ cmd_hd = (struct nct6694_cmd_header) {
+ .mod = NCT6694_HWMON_MOD,
+ .cmd = NCT6694_HWMON_ALARM,
+ .sel = NCT6694_HWMON_ALARM_SEL,
+ .len = cpu_to_le16(sizeof(data->msg->hwmon_alarm))
+ };
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd,
+ &data->msg->hwmon_alarm);
+ if (ret)
+ return ret;
+
+ val = clamp_val(val, 0, 2032);
+ data->msg->hwmon_alarm.vin_limit[channel].hl = in_to_reg(val);
+
+ return nct6694_write_msg(data->nct6694, &cmd_hd,
+ &data->msg->hwmon_alarm);
+ case hwmon_in_min:
+ cmd_hd = (struct nct6694_cmd_header) {
+ .mod = NCT6694_HWMON_MOD,
+ .cmd = NCT6694_HWMON_ALARM,
+ .sel = NCT6694_HWMON_ALARM_SEL,
+ .len = cpu_to_le16(sizeof(data->msg->hwmon_alarm))
+ };
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd,
+ &data->msg->hwmon_alarm);
+ if (ret)
+ return ret;
+
+ val = clamp_val(val, 0, 2032);
+ data->msg->hwmon_alarm.vin_limit[channel].ll = in_to_reg(val);
+
+ return nct6694_write_msg(data->nct6694, &cmd_hd,
+ &data->msg->hwmon_alarm);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int nct6694_temp_write(struct device *dev, u32 attr, int channel,
+ long val)
+{
+ struct nct6694_hwmon_data *data = dev_get_drvdata(dev);
+ struct nct6694_cmd_header cmd_hd;
+ unsigned char temp_hyst;
+ signed char temp_max;
+ int ret;
+
+ guard(mutex)(&data->lock);
+
+ switch (attr) {
+ case hwmon_temp_enable:
+ if (val == 0)
+ data->hwmon_en.tin_en[channel / 8] &= ~BIT(channel % 8);
+ else if (val == 1)
+ data->hwmon_en.tin_en[channel / 8] |= BIT(channel % 8);
+ else
+ return -EINVAL;
+
+ cmd_hd = (struct nct6694_cmd_header) {
+ .mod = NCT6694_HWMON_MOD,
+ .cmd = NCT6694_HWMON_CONTROL,
+ .sel = NCT6694_HWMON_CONTROL_SEL,
+ .len = cpu_to_le16(sizeof(data->hwmon_en))
+ };
+
+ return nct6694_write_msg(data->nct6694, &cmd_hd,
+ &data->hwmon_en);
+ case hwmon_temp_max:
+ cmd_hd = (struct nct6694_cmd_header) {
+ .mod = NCT6694_HWMON_MOD,
+ .cmd = NCT6694_HWMON_ALARM,
+ .sel = NCT6694_HWMON_ALARM_SEL,
+ .len = cpu_to_le16(sizeof(data->msg->hwmon_alarm))
+ };
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd,
+ &data->msg->hwmon_alarm);
+ if (ret)
+ return ret;
+
+ val = clamp_val(val, -127000, 127000);
+ data->msg->hwmon_alarm.tin_cfg[channel].hl = temp_to_reg(val);
+
+ return nct6694_write_msg(data->nct6694, &cmd_hd,
+ &data->msg->hwmon_alarm);
+ case hwmon_temp_max_hyst:
+ cmd_hd = (struct nct6694_cmd_header) {
+ .mod = NCT6694_HWMON_MOD,
+ .cmd = NCT6694_HWMON_ALARM,
+ .sel = NCT6694_HWMON_ALARM_SEL,
+ .len = cpu_to_le16(sizeof(data->msg->hwmon_alarm))
+ };
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd,
+ &data->msg->hwmon_alarm);
+
+ val = clamp_val(val, -127000, 127000);
+ temp_max = data->msg->hwmon_alarm.tin_cfg[channel].hl;
+ temp_hyst = temp_max - temp_to_reg(val);
+ temp_hyst = clamp_val(temp_hyst, 0, 7);
+ data->msg->hwmon_alarm.tin_cfg[channel].hyst =
+ (data->msg->hwmon_alarm.tin_cfg[channel].hyst & ~NCT6694_TIN_HYST_MASK) |
+ FIELD_PREP(NCT6694_TIN_HYST_MASK, temp_hyst);
+
+ return nct6694_write_msg(data->nct6694, &cmd_hd,
+ &data->msg->hwmon_alarm);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int nct6694_fan_write(struct device *dev, u32 attr, int channel,
+ long val)
+{
+ struct nct6694_hwmon_data *data = dev_get_drvdata(dev);
+ struct nct6694_cmd_header cmd_hd;
+ int ret;
+
+ guard(mutex)(&data->lock);
+
+ switch (attr) {
+ case hwmon_fan_enable:
+ if (val == 0)
+ data->hwmon_en.fin_en[channel / 8] &= ~BIT(channel % 8);
+ else if (val == 1)
+ data->hwmon_en.fin_en[channel / 8] |= BIT(channel % 8);
+ else
+ return -EINVAL;
+
+ cmd_hd = (struct nct6694_cmd_header) {
+ .mod = NCT6694_HWMON_MOD,
+ .cmd = NCT6694_HWMON_CONTROL,
+ .sel = NCT6694_HWMON_CONTROL_SEL,
+ .len = cpu_to_le16(sizeof(data->hwmon_en))
+ };
+
+ return nct6694_write_msg(data->nct6694, &cmd_hd,
+ &data->hwmon_en);
+ case hwmon_fan_min:
+ cmd_hd = (struct nct6694_cmd_header) {
+ .mod = NCT6694_HWMON_MOD,
+ .cmd = NCT6694_HWMON_ALARM,
+ .sel = NCT6694_HWMON_ALARM_SEL,
+ .len = cpu_to_le16(sizeof(data->msg->hwmon_alarm))
+ };
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd,
+ &data->msg->hwmon_alarm);
+ if (ret)
+ return ret;
+
+ val = clamp_val(val, 1, 65535);
+ data->msg->hwmon_alarm.fin_ll[channel] = cpu_to_be16(val);
+
+ return nct6694_write_msg(data->nct6694, &cmd_hd,
+ &data->msg->hwmon_alarm);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int nct6694_pwm_write(struct device *dev, u32 attr, int channel,
+ long val)
+{
+ struct nct6694_hwmon_data *data = dev_get_drvdata(dev);
+ struct nct6694_cmd_header cmd_hd;
+ int ret;
+
+ guard(mutex)(&data->lock);
+
+ switch (attr) {
+ case hwmon_pwm_enable:
+ if (val == 0)
+ data->hwmon_en.pwm_en[channel / 8] &= ~BIT(channel % 8);
+ else if (val == 1)
+ data->hwmon_en.pwm_en[channel / 8] |= BIT(channel % 8);
+ else
+ return -EINVAL;
+
+ cmd_hd = (struct nct6694_cmd_header) {
+ .mod = NCT6694_HWMON_MOD,
+ .cmd = NCT6694_HWMON_CONTROL,
+ .sel = NCT6694_HWMON_CONTROL_SEL,
+ .len = cpu_to_le16(sizeof(data->hwmon_en))
+ };
+
+ return nct6694_write_msg(data->nct6694, &cmd_hd,
+ &data->hwmon_en);
+ case hwmon_pwm_input:
+ if (val < 0 || val > 255)
+ return -EINVAL;
+
+ cmd_hd = (struct nct6694_cmd_header) {
+ .mod = NCT6694_PWM_MOD,
+ .cmd = NCT6694_PWM_CONTROL,
+ .sel = NCT6694_PWM_CONTROL_SEL,
+ .len = cpu_to_le16(sizeof(data->msg->pwm_ctrl))
+ };
+
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd,
+ &data->msg->pwm_ctrl);
+ if (ret)
+ return ret;
+
+ data->msg->pwm_ctrl.mal_val[channel] = val;
+
+ return nct6694_write_msg(data->nct6694, &cmd_hd,
+ &data->msg->pwm_ctrl);
+ case hwmon_pwm_freq:
+ cmd_hd = (struct nct6694_cmd_header) {
+ .mod = NCT6694_HWMON_MOD,
+ .cmd = NCT6694_HWMON_CONTROL,
+ .sel = NCT6694_HWMON_CONTROL_SEL,
+ .len = cpu_to_le16(sizeof(data->hwmon_en))
+ };
+
+ data->hwmon_en.pwm_freq[channel] = NCT6694_FREQ_TO_REG(val);
+
+ return nct6694_write_msg(data->nct6694, &cmd_hd,
+ &data->hwmon_en);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int nct6694_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ switch (type) {
+ case hwmon_in:
+ /* in mV */
+ return nct6694_in_read(dev, attr, channel, val);
+ case hwmon_temp:
+ /* in mC */
+ return nct6694_temp_read(dev, attr, channel, val);
+ case hwmon_fan:
+ /* in RPM */
+ return nct6694_fan_read(dev, attr, channel, val);
+ case hwmon_pwm:
+ /* in value 0~255 */
+ return nct6694_pwm_read(dev, attr, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int nct6694_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ switch (type) {
+ case hwmon_in:
+ return nct6694_in_write(dev, attr, channel, val);
+ case hwmon_temp:
+ return nct6694_temp_write(dev, attr, channel, val);
+ case hwmon_fan:
+ return nct6694_fan_write(dev, attr, channel, val);
+ case hwmon_pwm:
+ return nct6694_pwm_write(dev, attr, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static umode_t nct6694_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_enable:
+ case hwmon_in_max:
+ case hwmon_in_min:
+ return 0644;
+ case hwmon_in_alarm:
+ case hwmon_in_input:
+ return 0444;
+ default:
+ return 0;
+ }
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_enable:
+ case hwmon_temp_max:
+ case hwmon_temp_max_hyst:
+ return 0644;
+ case hwmon_temp_input:
+ case hwmon_temp_max_alarm:
+ return 0444;
+ default:
+ return 0;
+ }
+ case hwmon_fan:
+ switch (attr) {
+ case hwmon_fan_enable:
+ case hwmon_fan_min:
+ return 0644;
+ case hwmon_fan_input:
+ case hwmon_fan_min_alarm:
+ return 0444;
+ default:
+ return 0;
+ }
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_enable:
+ case hwmon_pwm_freq:
+ case hwmon_pwm_input:
+ return 0644;
+ default:
+ return 0;
+ }
+ default:
+ return 0;
+ }
+}
+
+static const struct hwmon_ops nct6694_hwmon_ops = {
+ .is_visible = nct6694_is_visible,
+ .read = nct6694_read,
+ .write = nct6694_write,
+};
+
+static const struct hwmon_chip_info nct6694_chip_info = {
+ .ops = &nct6694_hwmon_ops,
+ .info = nct6694_info,
+};
+
+static int nct6694_hwmon_init(struct nct6694_hwmon_data *data)
+{
+ struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_HWMON_MOD,
+ .cmd = NCT6694_HWMON_CONTROL,
+ .sel = NCT6694_HWMON_CONTROL_SEL,
+ .len = cpu_to_le16(sizeof(data->hwmon_en))
+ };
+ int ret;
+
+ /*
+ * Record each Hardware Monitor Channel enable status
+ * and PWM frequency register
+ */
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd,
+ &data->hwmon_en);
+ if (ret)
+ return ret;
+
+ cmd_hd = (struct nct6694_cmd_header) {
+ .mod = NCT6694_HWMON_MOD,
+ .cmd = NCT6694_HWMON_ALARM,
+ .sel = NCT6694_HWMON_ALARM_SEL,
+ .len = cpu_to_le16(sizeof(data->msg->hwmon_alarm))
+ };
+
+ /* Select hwmon device alarm mode */
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd,
+ &data->msg->hwmon_alarm);
+ if (ret)
+ return ret;
+
+ data->msg->hwmon_alarm.smi_ctrl = NCT6694_HWMON_REALTIME_IRQ;
+
+ return nct6694_write_msg(data->nct6694, &cmd_hd,
+ &data->msg->hwmon_alarm);
+}
+
+static int nct6694_hwmon_probe(struct platform_device *pdev)
+{
+ struct nct6694_hwmon_data *data;
+ struct nct6694 *nct6694 = dev_get_drvdata(pdev->dev.parent);
+ struct device *hwmon_dev;
+ int ret;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->rpt = devm_kzalloc(&pdev->dev, sizeof(union nct6694_hwmon_rpt),
+ GFP_KERNEL);
+ if (!data->rpt)
+ return -ENOMEM;
+
+ data->msg = devm_kzalloc(&pdev->dev, sizeof(union nct6694_hwmon_msg),
+ GFP_KERNEL);
+ if (!data->msg)
+ return -ENOMEM;
+
+ data->nct6694 = nct6694;
+ ret = devm_mutex_init(&pdev->dev, &data->lock);
+ if (ret)
+ return ret;
+
+ ret = nct6694_hwmon_init(data);
+ if (ret)
+ return ret;
+
+ /* Register hwmon device to HWMON framework */
+ hwmon_dev = devm_hwmon_device_register_with_info(&pdev->dev,
+ "nct6694", data,
+ &nct6694_chip_info,
+ NULL);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static struct platform_driver nct6694_hwmon_driver = {
+ .driver = {
+ .name = "nct6694-hwmon",
+ },
+ .probe = nct6694_hwmon_probe,
+};
+
+module_platform_driver(nct6694_hwmon_driver);
+
+MODULE_DESCRIPTION("USB-HWMON driver for NCT6694");
+MODULE_AUTHOR("Ming Yu <tmyu0@nuvoton.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:nct6694-hwmon");
diff --git a/drivers/hwmon/nct6775-platform.c b/drivers/hwmon/nct6775-platform.c
index 0a040364b512..407945d2cd6a 100644
--- a/drivers/hwmon/nct6775-platform.c
+++ b/drivers/hwmon/nct6775-platform.c
@@ -167,7 +167,8 @@ static inline int nct6775_asuswmi_write(u8 bank, u8 reg, u8 val)
static inline int nct6775_asuswmi_read(u8 bank, u8 reg, u8 *val)
{
- u32 ret, tmp = 0;
+ u32 tmp = 0;
+ int ret;
ret = nct6775_asuswmi_evaluate_method(ASUSWMI_METHODID_RHWM, bank,
reg, 0, &tmp);
diff --git a/drivers/hwmon/nzxt-smart2.c b/drivers/hwmon/nzxt-smart2.c
index c2d1173f42fe..58ef9fa0184b 100644
--- a/drivers/hwmon/nzxt-smart2.c
+++ b/drivers/hwmon/nzxt-smart2.c
@@ -721,11 +721,6 @@ static int __maybe_unused nzxt_smart2_hid_reset_resume(struct hid_device *hdev)
return init_device(drvdata, drvdata->update_interval);
}
-static void mutex_fini(void *lock)
-{
- mutex_destroy(lock);
-}
-
static int nzxt_smart2_hid_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
@@ -741,8 +736,7 @@ static int nzxt_smart2_hid_probe(struct hid_device *hdev,
init_waitqueue_head(&drvdata->wq);
- mutex_init(&drvdata->mutex);
- ret = devm_add_action_or_reset(&hdev->dev, mutex_fini, &drvdata->mutex);
+ ret = devm_mutex_init(&hdev->dev, &drvdata->mutex);
if (ret)
return ret;
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index 55e492452ce8..da04ff6df28b 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -52,7 +52,8 @@ config SENSORS_ADM1275
help
If you say yes here you get hardware monitoring support for Analog
Devices ADM1075, ADM1272, ADM1273, ADM1275, ADM1276, ADM1278, ADM1281,
- ADM1293, and ADM1294 Hot-Swap Controller and Digital Power Monitors.
+ ADM1293, ADM1294 and SQ24905C Hot-Swap Controller and
+ Digital Power Monitors.
This driver can also be built as a module. If so, the module will
be called adm1275.
@@ -373,6 +374,15 @@ config SENSORS_MP2856
This driver can also be built as a module. If so, the module will
be called mp2856.
+config SENSORS_MP2869
+ tristate "MPS MP2869"
+ help
+ If you say yes here you get hardware monitoring support for MPS
+ MP2869 Dual Loop Digital Multi-Phase Controller.
+
+ This driver can also be built as a module. If so, the module will
+ be called mp2869.
+
config SENSORS_MP2888
tristate "MPS MP2888"
help
@@ -391,6 +401,15 @@ config SENSORS_MP2891
This driver can also be built as a module. If so, the module will
be called mp2891.
+config SENSORS_MP29502
+ tristate "MPS MP29502"
+ help
+ If you say yes here you get hardware monitoring support for MPS
+ MP29502 Dual Loop Digital Multi-Phase Controller.
+
+ This driver can also be built as a module. If so, the module will
+ be called mp29502.
+
config SENSORS_MP2975
tristate "MPS MP2975"
help
diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile
index 29cd8a3317d2..4c5ff3f32c5e 100644
--- a/drivers/hwmon/pmbus/Makefile
+++ b/drivers/hwmon/pmbus/Makefile
@@ -37,8 +37,10 @@ obj-$(CONFIG_SENSORS_MAX31785) += max31785.o
obj-$(CONFIG_SENSORS_MAX34440) += max34440.o
obj-$(CONFIG_SENSORS_MAX8688) += max8688.o
obj-$(CONFIG_SENSORS_MP2856) += mp2856.o
+obj-$(CONFIG_SENSORS_MP2869) += mp2869.o
obj-$(CONFIG_SENSORS_MP2888) += mp2888.o
obj-$(CONFIG_SENSORS_MP2891) += mp2891.o
+obj-$(CONFIG_SENSORS_MP29502) += mp29502.o
obj-$(CONFIG_SENSORS_MP2975) += mp2975.o
obj-$(CONFIG_SENSORS_MP2993) += mp2993.o
obj-$(CONFIG_SENSORS_MP5023) += mp5023.o
diff --git a/drivers/hwmon/pmbus/adm1275.c b/drivers/hwmon/pmbus/adm1275.c
index 7d175baa5de2..bc2a6a07dc3e 100644
--- a/drivers/hwmon/pmbus/adm1275.c
+++ b/drivers/hwmon/pmbus/adm1275.c
@@ -18,7 +18,8 @@
#include <linux/log2.h>
#include "pmbus.h"
-enum chips { adm1075, adm1272, adm1273, adm1275, adm1276, adm1278, adm1281, adm1293, adm1294 };
+enum chips { adm1075, adm1272, adm1273, adm1275, adm1276, adm1278, adm1281,
+ adm1293, adm1294, sq24905c };
#define ADM1275_MFR_STATUS_IOUT_WARN2 BIT(0)
#define ADM1293_MFR_STATUS_VAUX_UV_WARN BIT(5)
@@ -486,6 +487,7 @@ static const struct i2c_device_id adm1275_id[] = {
{ "adm1281", adm1281 },
{ "adm1293", adm1293 },
{ "adm1294", adm1294 },
+ { "mc09c", sq24905c },
{ }
};
MODULE_DEVICE_TABLE(i2c, adm1275_id);
@@ -532,7 +534,8 @@ static int adm1275_probe(struct i2c_client *client)
dev_err(&client->dev, "Failed to read Manufacturer ID\n");
return ret;
}
- if (ret != 3 || strncmp(block_buffer, "ADI", 3)) {
+ if ((ret != 3 || strncmp(block_buffer, "ADI", 3)) &&
+ (ret != 2 || strncmp(block_buffer, "SY", 2))) {
dev_err(&client->dev, "Unsupported Manufacturer ID\n");
return -ENODEV;
}
@@ -558,7 +561,8 @@ static int adm1275_probe(struct i2c_client *client)
if (mid->driver_data == adm1272 || mid->driver_data == adm1273 ||
mid->driver_data == adm1278 || mid->driver_data == adm1281 ||
- mid->driver_data == adm1293 || mid->driver_data == adm1294)
+ mid->driver_data == adm1293 || mid->driver_data == adm1294 ||
+ mid->driver_data == sq24905c)
config_read_fn = i2c_smbus_read_word_data;
else
config_read_fn = i2c_smbus_read_byte_data;
@@ -708,6 +712,7 @@ static int adm1275_probe(struct i2c_client *client)
break;
case adm1278:
case adm1281:
+ case sq24905c:
data->have_vout = true;
data->have_pin_max = true;
data->have_temp_max = true;
diff --git a/drivers/hwmon/pmbus/isl68137.c b/drivers/hwmon/pmbus/isl68137.c
index c52c55d2e7f4..52cf62e45a86 100644
--- a/drivers/hwmon/pmbus/isl68137.c
+++ b/drivers/hwmon/pmbus/isl68137.c
@@ -61,6 +61,8 @@ enum chips {
raa228004,
raa228006,
raa228228,
+ raa228244,
+ raa228246,
raa229001,
raa229004,
raa229621,
@@ -464,6 +466,8 @@ static const struct i2c_device_id raa_dmpvr_id[] = {
{"raa228004", raa_dmpvr2_hv},
{"raa228006", raa_dmpvr2_hv},
{"raa228228", raa_dmpvr2_2rail_nontc},
+ {"raa228244", raa_dmpvr2_2rail_nontc},
+ {"raa228246", raa_dmpvr2_2rail_nontc},
{"raa229001", raa_dmpvr2_2rail},
{"raa229004", raa_dmpvr2_2rail},
{"raa229621", raa_dmpvr2_2rail},
@@ -512,6 +516,8 @@ static const struct of_device_id isl68137_of_match[] = {
{ .compatible = "renesas,raa228004", .data = (void *)raa_dmpvr2_hv },
{ .compatible = "renesas,raa228006", .data = (void *)raa_dmpvr2_hv },
{ .compatible = "renesas,raa228228", .data = (void *)raa_dmpvr2_2rail_nontc },
+ { .compatible = "renesas,raa228244", .data = (void *)raa_dmpvr2_2rail_nontc },
+ { .compatible = "renesas,raa228246", .data = (void *)raa_dmpvr2_2rail_nontc },
{ .compatible = "renesas,raa229001", .data = (void *)raa_dmpvr2_2rail },
{ .compatible = "renesas,raa229004", .data = (void *)raa_dmpvr2_2rail },
{ .compatible = "renesas,raa229621", .data = (void *)raa_dmpvr2_2rail },
diff --git a/drivers/hwmon/pmbus/mp2869.c b/drivers/hwmon/pmbus/mp2869.c
new file mode 100644
index 000000000000..cc69a1e91dfe
--- /dev/null
+++ b/drivers/hwmon/pmbus/mp2869.c
@@ -0,0 +1,659 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Hardware monitoring driver for MPS Multi-phase Digital VR Controllers(MP2869)
+ */
+
+#include <linux/bitfield.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include "pmbus.h"
+
+/*
+ * Vender specific registers, the register MFR_SVI3_IOUT_PRT(0x67),
+ * READ_PIN_EST(0x94)and READ_IIN_EST(0x95) redefine the standard
+ * PMBUS register. The MFR_VOUT_LOOP_CTRL(0x29) is used to identify
+ * the vout scale and the MFR_SVI3_IOUT_PRT(0x67) is used to identify
+ * the iout scale. The READ_PIN_EST(0x94) is used to read input power
+ * per rail. The MP2891 does not have standard READ_IIN register(0x89),
+ * the iin telemetry can be obtained through the vendor redefined
+ * register READ_IIN_EST(0x95).
+ */
+#define MFR_SVI3_IOUT_PRT 0x67
+#define MFR_READ_PIN_EST 0x94
+#define MFR_READ_IIN_EST 0x95
+#define MFR_TSNS_FLT_SET 0xBB
+
+#define MP2869_VIN_OV_FAULT_GAIN 4
+#define MP2869_READ_VOUT_DIV 1024
+#define MP2869_READ_IOUT_DIV 32
+#define MP2869_OVUV_LIMIT_SCALE 10
+#define MP2869_OVUV_DELTA_SCALE 50
+#define MP2869_TEMP_LIMIT_OFFSET 40
+#define MP2869_IOUT_LIMIT_UINT 8
+#define MP2869_POUT_OP_GAIN 2
+
+#define MP2869_PAGE_NUM 2
+
+#define MP2869_RAIL1_FUNC (PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | \
+ PMBUS_HAVE_IOUT | PMBUS_HAVE_POUT | \
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_PIN | \
+ PMBUS_HAVE_IIN | \
+ PMBUS_HAVE_STATUS_VOUT | \
+ PMBUS_HAVE_STATUS_IOUT | \
+ PMBUS_HAVE_STATUS_TEMP | \
+ PMBUS_HAVE_STATUS_INPUT)
+
+#define MP2869_RAIL2_FUNC (PMBUS_HAVE_VOUT | PMBUS_HAVE_IOUT | \
+ PMBUS_HAVE_POUT | PMBUS_HAVE_TEMP | \
+ PMBUS_HAVE_PIN | PMBUS_HAVE_IIN | \
+ PMBUS_HAVE_STATUS_VOUT | \
+ PMBUS_HAVE_STATUS_IOUT | \
+ PMBUS_HAVE_STATUS_TEMP | \
+ PMBUS_HAVE_STATUS_INPUT)
+
+struct mp2869_data {
+ struct pmbus_driver_info info;
+ bool mfr_thwn_flt_en;
+ int vout_scale[MP2869_PAGE_NUM];
+ int iout_scale[MP2869_PAGE_NUM];
+};
+
+static const int mp2869_vout_sacle[8] = {6400, 5120, 2560, 2048, 1024,
+ 4, 2, 1};
+static const int mp2869_iout_sacle[8] = {32, 1, 2, 4, 8, 16, 32, 64};
+
+#define to_mp2869_data(x) container_of(x, struct mp2869_data, info)
+
+static u16 mp2869_reg2data_linear11(u16 word)
+{
+ s16 exponent;
+ s32 mantissa;
+ s64 val;
+
+ exponent = ((s16)word) >> 11;
+ mantissa = ((s16)((word & 0x7ff) << 5)) >> 5;
+ val = mantissa;
+
+ if (exponent >= 0)
+ val <<= exponent;
+ else
+ val >>= -exponent;
+
+ return val;
+}
+
+static int
+mp2869_identify_thwn_flt(struct i2c_client *client, struct pmbus_driver_info *info,
+ int page)
+{
+ struct mp2869_data *data = to_mp2869_data(info);
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_read_word_data(client, MFR_TSNS_FLT_SET);
+ if (ret < 0)
+ return ret;
+
+ data->mfr_thwn_flt_en = FIELD_GET(GENMASK(13, 13), ret);
+
+ return 0;
+}
+
+static int
+mp2869_identify_vout_scale(struct i2c_client *client, struct pmbus_driver_info *info,
+ int page)
+{
+ struct mp2869_data *data = to_mp2869_data(info);
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_read_word_data(client, PMBUS_VOUT_SCALE_LOOP);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * The output voltage is equal to the READ_VOUT(0x8B) register value multiply
+ * by vout_scale.
+ * Obtain vout scale from the register PMBUS_VOUT_SCALE_LOOP, bits 12-10
+ * PMBUS_VOUT_SCALE_LOOP[12:10]:
+ * 000b - 6.25mV/LSB, 001b - 5mV/LSB, 010b - 2.5mV/LSB, 011b - 2mV/LSB
+ * 100b - 1mV/Lsb, 101b - (1/256)mV/LSB, 110b - (1/512)mV/LSB,
+ * 111b - (1/1024)mV/LSB
+ */
+ data->vout_scale[page] = mp2869_vout_sacle[FIELD_GET(GENMASK(12, 10), ret)];
+
+ return 0;
+}
+
+static int
+mp2869_identify_iout_scale(struct i2c_client *client, struct pmbus_driver_info *info,
+ int page)
+{
+ struct mp2869_data *data = to_mp2869_data(info);
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_read_word_data(client, MFR_SVI3_IOUT_PRT);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * The output current is equal to the READ_IOUT(0x8C) register value
+ * multiply by iout_scale.
+ * Obtain iout_scale from the register MFR_SVI3_IOUT_PRT[2:0].
+ * The value is selected as below:
+ * 000b - 1A/LSB, 001b - (1/32)A/LSB, 010b - (1/16)A/LSB,
+ * 011b - (1/8)A/LSB, 100b - (1/4)A/LSB, 101b - (1/2)A/LSB
+ * 110b - 1A/LSB, 111b - 2A/LSB
+ */
+ data->iout_scale[page] = mp2869_iout_sacle[FIELD_GET(GENMASK(2, 0), ret)];
+
+ return 0;
+}
+
+static int mp2869_read_byte_data(struct i2c_client *client, int page, int reg)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ struct mp2869_data *data = to_mp2869_data(info);
+ int ret;
+
+ switch (reg) {
+ case PMBUS_VOUT_MODE:
+ /*
+ * The calculation of vout in this driver is based on direct format.
+ * As a result, the format of vout is enforced to direct.
+ */
+ ret = PB_VOUT_MODE_DIRECT;
+ break;
+ case PMBUS_STATUS_BYTE:
+ /*
+ * If the tsns digital fault is enabled, the TEMPERATURE flag
+ * of PMBUS_STATUS_BYTE should come from STATUS_MFR_SPECIFIC
+ * register bit1.
+ */
+ if (!data->mfr_thwn_flt_en)
+ return -ENODATA;
+
+ ret = pmbus_read_byte_data(client, page, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = (ret & ~GENMASK(2, 2)) |
+ FIELD_PREP(GENMASK(2, 2),
+ FIELD_GET(GENMASK(1, 1),
+ pmbus_read_byte_data(client, page,
+ PMBUS_STATUS_MFR_SPECIFIC)));
+ break;
+ case PMBUS_STATUS_TEMPERATURE:
+ /*
+ * If the tsns digital fault is enabled, the OT Fault and OT Warning
+ * flag of PMBUS_STATUS_TEMPERATURE should come from STATUS_MFR_SPECIFIC
+ * register bit1.
+ */
+ if (!data->mfr_thwn_flt_en)
+ return -ENODATA;
+
+ ret = pmbus_read_byte_data(client, page, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = (ret & ~GENMASK(7, 6)) |
+ FIELD_PREP(GENMASK(6, 6),
+ FIELD_GET(GENMASK(1, 1),
+ pmbus_read_byte_data(client, page,
+ PMBUS_STATUS_MFR_SPECIFIC))) |
+ FIELD_PREP(GENMASK(7, 7),
+ FIELD_GET(GENMASK(1, 1),
+ pmbus_read_byte_data(client, page,
+ PMBUS_STATUS_MFR_SPECIFIC)));
+ break;
+ default:
+ ret = -ENODATA;
+ break;
+ }
+
+ return ret;
+}
+
+static int mp2869_read_word_data(struct i2c_client *client, int page, int phase,
+ int reg)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ struct mp2869_data *data = to_mp2869_data(info);
+ int ret;
+
+ switch (reg) {
+ case PMBUS_STATUS_WORD:
+ /*
+ * If the tsns digital fault is enabled, the OT Fault flag
+ * of PMBUS_STATUS_WORD should come from STATUS_MFR_SPECIFIC
+ * register bit1.
+ */
+ if (!data->mfr_thwn_flt_en)
+ return -ENODATA;
+
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = (ret & ~GENMASK(2, 2)) |
+ FIELD_PREP(GENMASK(2, 2),
+ FIELD_GET(GENMASK(1, 1),
+ pmbus_read_byte_data(client, page,
+ PMBUS_STATUS_MFR_SPECIFIC)));
+ break;
+ case PMBUS_READ_VIN:
+ /*
+ * The MP2869 PMBUS_READ_VIN[10:0] is the vin value, the vin scale is
+ * 31.25mV/LSB. And the vin scale is set to 31.25mV/Lsb(using r/m/b scale)
+ * in MP2869 pmbus_driver_info struct, so the word data bit0-bit10 can be
+ * returned to pmbus core directly.
+ */
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = FIELD_GET(GENMASK(10, 0), ret);
+ break;
+ case PMBUS_READ_IIN:
+ /*
+ * The MP2869 redefine the standard 0x95 register as iin telemetry
+ * per rail.
+ */
+ ret = pmbus_read_word_data(client, page, phase, MFR_READ_IIN_EST);
+ if (ret < 0)
+ return ret;
+
+ break;
+ case PMBUS_READ_PIN:
+ /*
+ * The MP2869 redefine the standard 0x94 register as pin telemetry
+ * per rail. The MP2869 MFR_READ_PIN_EST register is linear11 format,
+ * but the pin scale is set to 1W/Lsb(using r/m/b scale). As a result,
+ * the pin read from MP2869 should be converted to W, then return
+ * the result to pmbus core.
+ */
+ ret = pmbus_read_word_data(client, page, phase, MFR_READ_PIN_EST);
+ if (ret < 0)
+ return ret;
+
+ ret = mp2869_reg2data_linear11(ret);
+ break;
+ case PMBUS_READ_VOUT:
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = DIV_ROUND_CLOSEST((ret & GENMASK(11, 0)) * data->vout_scale[page],
+ MP2869_READ_VOUT_DIV);
+ break;
+ case PMBUS_READ_IOUT:
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = DIV_ROUND_CLOSEST((ret & GENMASK(10, 0)) * data->iout_scale[page],
+ MP2869_READ_IOUT_DIV);
+ break;
+ case PMBUS_READ_POUT:
+ /*
+ * The MP2869 PMBUS_READ_POUT register is linear11 format, but the pout
+ * scale is set to 1W/Lsb(using r/m/b scale). As a result, the pout read
+ * from MP2869 should be converted to W, then return the result to pmbus
+ * core.
+ */
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = mp2869_reg2data_linear11(ret);
+ break;
+ case PMBUS_READ_TEMPERATURE_1:
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = FIELD_GET(GENMASK(10, 0), ret);
+ break;
+ case PMBUS_VOUT_OV_FAULT_LIMIT:
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ if (FIELD_GET(GENMASK(12, 9), ret))
+ ret = FIELD_GET(GENMASK(8, 0), ret) * MP2869_OVUV_LIMIT_SCALE +
+ (FIELD_GET(GENMASK(12, 9), ret) + 1) * MP2869_OVUV_DELTA_SCALE;
+ else
+ ret = FIELD_GET(GENMASK(8, 0), ret) * MP2869_OVUV_LIMIT_SCALE;
+ break;
+ case PMBUS_VOUT_UV_FAULT_LIMIT:
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ if (FIELD_GET(GENMASK(12, 9), ret))
+ ret = FIELD_GET(GENMASK(8, 0), ret) * MP2869_OVUV_LIMIT_SCALE -
+ (FIELD_GET(GENMASK(12, 9), ret) + 1) * MP2869_OVUV_DELTA_SCALE;
+ else
+ ret = FIELD_GET(GENMASK(8, 0), ret) * MP2869_OVUV_LIMIT_SCALE;
+ break;
+ case PMBUS_OT_FAULT_LIMIT:
+ case PMBUS_OT_WARN_LIMIT:
+ /*
+ * The scale of MP2869 PMBUS_OT_FAULT_LIMIT and PMBUS_OT_WARN_LIMIT
+ * is 1°C/LSB and they have 40°C offset.
+ */
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = (ret & GENMASK(7, 0)) - MP2869_TEMP_LIMIT_OFFSET;
+ break;
+ case PMBUS_VIN_OV_FAULT_LIMIT:
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = (ret & GENMASK(7, 0)) * MP2869_VIN_OV_FAULT_GAIN;
+ break;
+ case PMBUS_VIN_UV_WARN_LIMIT:
+ case PMBUS_VIN_UV_FAULT_LIMIT:
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = FIELD_GET(GENMASK(9, 0), ret);
+ break;
+ case PMBUS_IOUT_OC_FAULT_LIMIT:
+ case PMBUS_IOUT_OC_WARN_LIMIT:
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = DIV_ROUND_CLOSEST((ret & GENMASK(7, 0)) * data->iout_scale[page] *
+ MP2869_IOUT_LIMIT_UINT, MP2869_READ_IOUT_DIV);
+ break;
+ case PMBUS_POUT_OP_WARN_LIMIT:
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = (ret & GENMASK(7, 0)) * MP2869_POUT_OP_GAIN;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int mp2869_write_word_data(struct i2c_client *client, int page, int reg,
+ u16 word)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ struct mp2869_data *data = to_mp2869_data(info);
+ int ret;
+
+ switch (reg) {
+ case PMBUS_VOUT_UV_FAULT_LIMIT:
+ /*
+ * The MP2869 PMBUS_VOUT_UV_FAULT_LIMIT[8:0] is the limit value,
+ * and bit9-bit15 should not be changed.
+ */
+ ret = pmbus_read_word_data(client, page, 0xff, reg);
+ if (ret < 0)
+ return ret;
+
+ if (FIELD_GET(GENMASK(12, 9), ret))
+ ret = pmbus_write_word_data(client, page, reg,
+ (ret & ~GENMASK(8, 0)) |
+ FIELD_PREP(GENMASK(8, 0),
+ DIV_ROUND_CLOSEST(word +
+ (FIELD_GET(GENMASK(12, 9),
+ ret) + 1) *
+ MP2869_OVUV_DELTA_SCALE,
+ MP2869_OVUV_LIMIT_SCALE)));
+ else
+ ret = pmbus_write_word_data(client, page, reg,
+ (ret & ~GENMASK(8, 0)) |
+ FIELD_PREP(GENMASK(8, 0),
+ DIV_ROUND_CLOSEST(word,
+ MP2869_OVUV_LIMIT_SCALE)));
+ break;
+ case PMBUS_VOUT_OV_FAULT_LIMIT:
+ /*
+ * The MP2869 PMBUS_VOUT_OV_FAULT_LIMIT[8:0] is the limit value,
+ * and bit9-bit15 should not be changed.
+ */
+ ret = pmbus_read_word_data(client, page, 0xff, reg);
+ if (ret < 0)
+ return ret;
+
+ if (FIELD_GET(GENMASK(12, 9), ret))
+ ret = pmbus_write_word_data(client, page, reg,
+ (ret & ~GENMASK(8, 0)) |
+ FIELD_PREP(GENMASK(8, 0),
+ DIV_ROUND_CLOSEST(word -
+ (FIELD_GET(GENMASK(12, 9),
+ ret) + 1) *
+ MP2869_OVUV_DELTA_SCALE,
+ MP2869_OVUV_LIMIT_SCALE)));
+ else
+ ret = pmbus_write_word_data(client, page, reg,
+ (ret & ~GENMASK(8, 0)) |
+ FIELD_PREP(GENMASK(8, 0),
+ DIV_ROUND_CLOSEST(word,
+ MP2869_OVUV_LIMIT_SCALE)));
+ break;
+ case PMBUS_OT_FAULT_LIMIT:
+ case PMBUS_OT_WARN_LIMIT:
+ /*
+ * If the tsns digital fault is enabled, the PMBUS_OT_FAULT_LIMIT and
+ * PMBUS_OT_WARN_LIMIT can not be written.
+ */
+ if (data->mfr_thwn_flt_en)
+ return -EINVAL;
+
+ /*
+ * The MP2869 scale of MP2869 PMBUS_OT_FAULT_LIMIT and PMBUS_OT_WARN_LIMIT
+ * have 40°C offset. The bit0-bit7 is the limit value, and bit8-bit15
+ * should not be changed.
+ */
+ ret = pmbus_read_word_data(client, page, 0xff, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = pmbus_write_word_data(client, page, reg,
+ (ret & ~GENMASK(7, 0)) |
+ FIELD_PREP(GENMASK(7, 0),
+ word + MP2869_TEMP_LIMIT_OFFSET));
+ break;
+ case PMBUS_VIN_OV_FAULT_LIMIT:
+ /*
+ * The MP2869 PMBUS_VIN_OV_FAULT_LIMIT[7:0] is the limit value, and bit8-bit15
+ * should not be changed. The scale of PMBUS_VIN_OV_FAULT_LIMIT is 125mV/Lsb,
+ * but the vin scale is set to 31.25mV/Lsb(using r/m/b scale), so the word data
+ * should divide by MP2869_VIN_OV_FAULT_GAIN(4)
+ */
+ ret = pmbus_read_word_data(client, page, 0xff, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = pmbus_write_word_data(client, page, reg,
+ (ret & ~GENMASK(7, 0)) |
+ FIELD_PREP(GENMASK(7, 0),
+ DIV_ROUND_CLOSEST(word,
+ MP2869_VIN_OV_FAULT_GAIN)));
+ break;
+ case PMBUS_VIN_UV_WARN_LIMIT:
+ case PMBUS_VIN_UV_FAULT_LIMIT:
+ /*
+ * The PMBUS_VIN_UV_LIMIT[9:0] is the limit value, and bit10-bit15 should
+ * not be changed. The scale of PMBUS_VIN_UV_LIMIT is 31.25mV/Lsb, and the
+ * vin scale is set to 31.25mV/Lsb(using r/m/b scale), so the word data can
+ * be written directly.
+ */
+ ret = pmbus_read_word_data(client, page, 0xff, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = pmbus_write_word_data(client, page, reg,
+ (ret & ~GENMASK(9, 0)) |
+ FIELD_PREP(GENMASK(9, 0),
+ word));
+ break;
+ case PMBUS_IOUT_OC_FAULT_LIMIT:
+ case PMBUS_IOUT_OC_WARN_LIMIT:
+ ret = pmbus_write_word_data(client, page, reg,
+ DIV_ROUND_CLOSEST(word * MP2869_READ_IOUT_DIV,
+ MP2869_IOUT_LIMIT_UINT *
+ data->iout_scale[page]));
+ break;
+ case PMBUS_POUT_OP_WARN_LIMIT:
+ /*
+ * The POUT_OP_WARN_LIMIT[11:0] is the limit value, and bit12-bit15 should
+ * not be changed. The scale of POUT_OP_WARN_LIMIT is 2W/Lsb.
+ */
+ ret = pmbus_read_word_data(client, page, 0xff, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = pmbus_write_word_data(client, page, reg,
+ (ret & ~GENMASK(11, 0)) |
+ FIELD_PREP(GENMASK(11, 0),
+ DIV_ROUND_CLOSEST(word,
+ MP2869_POUT_OP_GAIN)));
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int mp2869_identify(struct i2c_client *client, struct pmbus_driver_info *info)
+{
+ int ret;
+
+ /* Identify whether tsns digital fault is enable */
+ ret = mp2869_identify_thwn_flt(client, info, 1);
+ if (ret < 0)
+ return 0;
+
+ /* Identify vout scale for rail1. */
+ ret = mp2869_identify_vout_scale(client, info, 0);
+ if (ret < 0)
+ return ret;
+
+ /* Identify vout scale for rail2. */
+ ret = mp2869_identify_vout_scale(client, info, 1);
+ if (ret < 0)
+ return ret;
+
+ /* Identify iout scale for rail 1. */
+ ret = mp2869_identify_iout_scale(client, info, 0);
+ if (ret < 0)
+ return ret;
+
+ /* Identify iout scale for rail 2. */
+ return mp2869_identify_iout_scale(client, info, 1);
+}
+
+static const struct pmbus_driver_info mp2869_info = {
+ .pages = MP2869_PAGE_NUM,
+ .format[PSC_VOLTAGE_IN] = direct,
+ .format[PSC_CURRENT_IN] = linear,
+ .format[PSC_CURRENT_OUT] = direct,
+ .format[PSC_TEMPERATURE] = direct,
+ .format[PSC_POWER] = direct,
+ .format[PSC_VOLTAGE_OUT] = direct,
+
+ .m[PSC_VOLTAGE_IN] = 32,
+ .R[PSC_VOLTAGE_IN] = 0,
+ .b[PSC_VOLTAGE_IN] = 0,
+
+ .m[PSC_VOLTAGE_OUT] = 1,
+ .R[PSC_VOLTAGE_OUT] = 3,
+ .b[PSC_VOLTAGE_OUT] = 0,
+
+ .m[PSC_CURRENT_OUT] = 1,
+ .R[PSC_CURRENT_OUT] = 0,
+ .b[PSC_CURRENT_OUT] = 0,
+
+ .m[PSC_TEMPERATURE] = 1,
+ .R[PSC_TEMPERATURE] = 0,
+ .b[PSC_TEMPERATURE] = 0,
+
+ .m[PSC_POWER] = 1,
+ .R[PSC_POWER] = 0,
+ .b[PSC_POWER] = 0,
+
+ .func[0] = MP2869_RAIL1_FUNC,
+ .func[1] = MP2869_RAIL2_FUNC,
+ .read_word_data = mp2869_read_word_data,
+ .write_word_data = mp2869_write_word_data,
+ .read_byte_data = mp2869_read_byte_data,
+ .identify = mp2869_identify,
+};
+
+static int mp2869_probe(struct i2c_client *client)
+{
+ struct pmbus_driver_info *info;
+ struct mp2869_data *data;
+
+ data = devm_kzalloc(&client->dev, sizeof(struct mp2869_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ memcpy(&data->info, &mp2869_info, sizeof(*info));
+ info = &data->info;
+
+ return pmbus_do_probe(client, info);
+}
+
+static const struct i2c_device_id mp2869_id[] = {
+ {"mp2869", 0},
+ {"mp29608", 1},
+ {"mp29612", 2},
+ {"mp29816", 3},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, mp2869_id);
+
+static const struct of_device_id __maybe_unused mp2869_of_match[] = {
+ {.compatible = "mps,mp2869", .data = (void *)0},
+ {.compatible = "mps,mp29608", .data = (void *)1},
+ {.compatible = "mps,mp29612", .data = (void *)2},
+ {.compatible = "mps,mp29816", .data = (void *)3},
+ {}
+};
+MODULE_DEVICE_TABLE(of, mp2869_of_match);
+
+static struct i2c_driver mp2869_driver = {
+ .driver = {
+ .name = "mp2869",
+ .of_match_table = mp2869_of_match,
+ },
+ .probe = mp2869_probe,
+ .id_table = mp2869_id,
+};
+
+module_i2c_driver(mp2869_driver);
+
+MODULE_AUTHOR("Wensheng Wang <wenswang@yeah.net>");
+MODULE_DESCRIPTION("PMBus driver for MPS MP2869");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/mp29502.c b/drivers/hwmon/pmbus/mp29502.c
new file mode 100644
index 000000000000..7241373f1557
--- /dev/null
+++ b/drivers/hwmon/pmbus/mp29502.c
@@ -0,0 +1,670 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Hardware monitoring driver for MPS Multi-phase Digital VR Controllers(MP29502)
+ */
+
+#include <linux/bitfield.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include "pmbus.h"
+
+#define MFR_VOUT_SCALE_LOOP 0x29
+#define MFR_SVI3_IOUT_PRT 0x67
+#define MFR_READ_PIN_EST 0x94
+#define MFR_READ_IIN_EST 0x95
+#define MFR_VOUT_PROT1 0x3D
+#define MFR_VOUT_PROT2 0x51
+#define MFR_SLOPE_CNT_SET 0xA8
+#define MFR_TSNS_FLT_SET 0xBB
+
+#define MP29502_VIN_OV_GAIN 4
+#define MP29502_TEMP_LIMIT_OFFSET 40
+#define MP29502_READ_VOUT_DIV 1024
+#define MP29502_READ_IOUT_DIV 32
+#define MP29502_IOUT_LIMIT_UINT 8
+#define MP29502_OVUV_LIMIT_SCALE 10
+#define MP28502_VOUT_OV_GAIN 512
+#define MP28502_VOUT_OV_SCALE 40
+#define MP29502_VOUT_UV_OFFSET 36
+#define MP29502_PIN_GAIN 2
+#define MP29502_IIN_DIV 2
+
+#define MP29502_PAGE_NUM 1
+
+#define MP29502_RAIL_FUNC (PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | \
+ PMBUS_HAVE_IOUT | PMBUS_HAVE_POUT | \
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_PIN | \
+ PMBUS_HAVE_IIN | \
+ PMBUS_HAVE_STATUS_VOUT | \
+ PMBUS_HAVE_STATUS_IOUT | \
+ PMBUS_HAVE_STATUS_TEMP | \
+ PMBUS_HAVE_STATUS_INPUT)
+
+struct mp29502_data {
+ struct pmbus_driver_info info;
+ int vout_scale;
+ int vout_bottom_div;
+ int vout_top_div;
+ int ovp_div;
+ int iout_scale;
+};
+
+#define to_mp29502_data(x) container_of(x, struct mp29502_data, info)
+
+static u16 mp29502_reg2data_linear11(u16 word)
+{
+ s16 exponent;
+ s32 mantissa;
+ s64 val;
+
+ exponent = ((s16)word) >> 11;
+ mantissa = ((s16)((word & 0x7ff) << 5)) >> 5;
+ val = mantissa;
+
+ if (exponent >= 0)
+ val <<= exponent;
+ else
+ val >>= -exponent;
+
+ return val;
+}
+
+static int
+mp29502_identify_vout_scale(struct i2c_client *client, struct pmbus_driver_info *info,
+ int page)
+{
+ struct mp29502_data *data = to_mp29502_data(info);
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_read_word_data(client, MFR_VOUT_SCALE_LOOP);
+ if (ret < 0)
+ return ret;
+
+ switch (FIELD_GET(GENMASK(12, 10), ret)) {
+ case 0:
+ data->vout_scale = 6400;
+ break;
+ case 1:
+ data->vout_scale = 5120;
+ break;
+ case 2:
+ data->vout_scale = 2560;
+ break;
+ case 3:
+ data->vout_scale = 2048;
+ break;
+ case 4:
+ data->vout_scale = 1024;
+ break;
+ case 5:
+ data->vout_scale = 4;
+ break;
+ case 6:
+ data->vout_scale = 2;
+ break;
+ case 7:
+ data->vout_scale = 1;
+ break;
+ default:
+ data->vout_scale = 1;
+ break;
+ }
+
+ return 0;
+}
+
+static int
+mp29502_identify_vout_divider(struct i2c_client *client, struct pmbus_driver_info *info,
+ int page)
+{
+ struct mp29502_data *data = to_mp29502_data(info);
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_read_word_data(client, MFR_VOUT_PROT1);
+ if (ret < 0)
+ return ret;
+
+ data->vout_bottom_div = FIELD_GET(GENMASK(11, 0), ret);
+
+ ret = i2c_smbus_read_word_data(client, MFR_VOUT_PROT2);
+ if (ret < 0)
+ return ret;
+
+ data->vout_top_div = FIELD_GET(GENMASK(14, 0), ret);
+
+ return 0;
+}
+
+static int
+mp29502_identify_ovp_divider(struct i2c_client *client, struct pmbus_driver_info *info,
+ int page)
+{
+ struct mp29502_data *data = to_mp29502_data(info);
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_read_word_data(client, MFR_SLOPE_CNT_SET);
+ if (ret < 0)
+ return ret;
+
+ data->ovp_div = FIELD_GET(GENMASK(9, 0), ret);
+
+ return 0;
+}
+
+static int
+mp29502_identify_iout_scale(struct i2c_client *client, struct pmbus_driver_info *info,
+ int page)
+{
+ struct mp29502_data *data = to_mp29502_data(info);
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_read_word_data(client, MFR_SVI3_IOUT_PRT);
+ if (ret < 0)
+ return ret;
+
+ switch (ret & GENMASK(2, 0)) {
+ case 0:
+ case 6:
+ data->iout_scale = 32;
+ break;
+ case 1:
+ data->iout_scale = 1;
+ break;
+ case 2:
+ data->iout_scale = 2;
+ break;
+ case 3:
+ data->iout_scale = 4;
+ break;
+ case 4:
+ data->iout_scale = 8;
+ break;
+ case 5:
+ data->iout_scale = 16;
+ break;
+ default:
+ data->iout_scale = 64;
+ break;
+ }
+
+ return 0;
+}
+
+static int mp29502_read_vout_ov_limit(struct i2c_client *client, struct mp29502_data *data)
+{
+ int ret;
+ int ov_value;
+
+ /*
+ * This is because the vout ov fault limit value comes from
+ * page1 MFR_TSNS_FLT_SET reg, and other telemetry and limit
+ * value comes from page0 reg. So the page should be set to
+ * 0 after the reading of vout ov limit.
+ */
+ ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, 1);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_read_word_data(client, MFR_TSNS_FLT_SET);
+ if (ret < 0)
+ return ret;
+
+ ov_value = DIV_ROUND_CLOSEST(FIELD_GET(GENMASK(12, 7), ret) *
+ MP28502_VOUT_OV_GAIN * MP28502_VOUT_OV_SCALE,
+ data->ovp_div);
+
+ ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, 0);
+ if (ret < 0)
+ return ret;
+
+ return ov_value;
+}
+
+static int mp29502_write_vout_ov_limit(struct i2c_client *client, u16 word,
+ struct mp29502_data *data)
+{
+ int ret;
+
+ /*
+ * This is because the vout ov fault limit value comes from
+ * page1 MFR_TSNS_FLT_SET reg, and other telemetry and limit
+ * value comes from page0 reg. So the page should be set to
+ * 0 after the writing of vout ov limit.
+ */
+ ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, 1);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_read_word_data(client, MFR_TSNS_FLT_SET);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_write_word_data(client, MFR_TSNS_FLT_SET,
+ (ret & ~GENMASK(12, 7)) |
+ FIELD_PREP(GENMASK(12, 7),
+ DIV_ROUND_CLOSEST(word * data->ovp_div,
+ MP28502_VOUT_OV_GAIN * MP28502_VOUT_OV_SCALE)));
+
+ return i2c_smbus_write_byte_data(client, PMBUS_PAGE, 0);
+}
+
+static int mp29502_read_byte_data(struct i2c_client *client, int page, int reg)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, 0);
+ if (ret < 0)
+ return ret;
+
+ switch (reg) {
+ case PMBUS_VOUT_MODE:
+ ret = PB_VOUT_MODE_DIRECT;
+ break;
+ default:
+ ret = -ENODATA;
+ break;
+ }
+
+ return ret;
+}
+
+static int mp29502_read_word_data(struct i2c_client *client, int page,
+ int phase, int reg)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ struct mp29502_data *data = to_mp29502_data(info);
+ int ret;
+
+ switch (reg) {
+ case PMBUS_STATUS_WORD:
+ ret = -ENODATA;
+ break;
+ case PMBUS_READ_VIN:
+ /*
+ * The MP29502 PMBUS_READ_VIN[10:0] is the vin value, the vin scale is
+ * 125mV/LSB. And the vin scale is set to 125mV/Lsb(using r/m/b scale)
+ * in MP29502 pmbus_driver_info struct, so the word data bit0-bit10 can
+ * be returned to pmbus core directly.
+ */
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = FIELD_GET(GENMASK(10, 0), ret);
+ break;
+ case PMBUS_READ_VOUT:
+ /*
+ * The MP29502 PMBUS_READ_VOUT[11:0] is the vout value, and vout
+ * value is calculated based on vout scale and vout divider.
+ */
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = DIV_ROUND_CLOSEST((ret & GENMASK(11, 0)) *
+ data->vout_scale *
+ (data->vout_bottom_div +
+ 4 * data->vout_top_div),
+ MP29502_READ_VOUT_DIV *
+ data->vout_bottom_div);
+ break;
+ case PMBUS_READ_IIN:
+ /*
+ * The MP29502 MFR_READ_IIN_EST register is linear11 format, and the
+ * exponent is not a constant value. But the iin scale is set to
+ * 1A/Lsb(using r/m/b scale). As a result, the iin read from MP29502
+ * should be calculated to A, then return the result to pmbus core.
+ */
+ ret = pmbus_read_word_data(client, page, phase, MFR_READ_IIN_EST);
+ if (ret < 0)
+ return ret;
+
+ ret = DIV_ROUND_CLOSEST(mp29502_reg2data_linear11(ret),
+ MP29502_IIN_DIV);
+ break;
+ case PMBUS_READ_PIN:
+ /*
+ * The MP29502 MFR_READ_PIN_EST register is linear11 format, and the
+ * exponent is not a constant value. But the pin scale is set to
+ * 1W/Lsb(using r/m/b scale). As a result, the pout read from MP29502
+ * should be calculated to W, then return the result to pmbus core.
+ */
+ ret = pmbus_read_word_data(client, page, phase, MFR_READ_PIN_EST);
+ if (ret < 0)
+ return ret;
+
+ ret = mp29502_reg2data_linear11(ret) * MP29502_PIN_GAIN;
+ break;
+ case PMBUS_READ_POUT:
+ /*
+ * The MP29502 PMBUS_READ_POUT register is linear11 format, and the
+ * exponent is not a constant value. But the pout scale is set to
+ * 1W/Lsb(using r/m/b scale). As a result, the pout read from MP29502
+ * should be calculated to W, then return the result to pmbus core.
+ * And the pout is calculated based on vout divider.
+ */
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = DIV_ROUND_CLOSEST(mp29502_reg2data_linear11(ret) *
+ (data->vout_bottom_div +
+ 4 * data->vout_top_div),
+ data->vout_bottom_div);
+ break;
+ case PMBUS_READ_IOUT:
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = DIV_ROUND_CLOSEST((ret & GENMASK(10, 0)) * data->iout_scale,
+ MP29502_READ_IOUT_DIV);
+ break;
+ case PMBUS_READ_TEMPERATURE_1:
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = FIELD_GET(GENMASK(10, 0), ret);
+ break;
+ case PMBUS_VIN_OV_FAULT_LIMIT:
+ /*
+ * The MP29502 PMBUS_VIN_OV_FAULT_LIMIT is 500mV/Lsb, but
+ * the vin scale is set to 125mV/Lsb(using r/m/b scale),
+ * so the word data should multiply by 4.
+ */
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = FIELD_GET(GENMASK(7, 0), ret) * MP29502_VIN_OV_GAIN;
+ break;
+ case PMBUS_VIN_UV_WARN_LIMIT:
+ case PMBUS_VIN_UV_FAULT_LIMIT:
+ /*
+ * The MP29502 PMBUS_VIN_UV_WARN_LIMIT and PMBUS_VIN_UV_FAULT_LIMIT
+ * scale is 125mV/Lsb, but the vin scale is set to 125mV/Lsb(using
+ * r/m/b scale), so the word data bit0-bit9 can be returned to pmbus
+ * core directly.
+ */
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = FIELD_GET(GENMASK(9, 0), ret);
+ break;
+ case PMBUS_VOUT_OV_FAULT_LIMIT:
+ /*
+ * The MP29502 vout ov fault limit value comes from
+ * page1 MFR_TSNS_FLT_SET[12:7].
+ */
+ ret = mp29502_read_vout_ov_limit(client, data);
+ if (ret < 0)
+ return ret;
+
+ break;
+ case PMBUS_VOUT_UV_FAULT_LIMIT:
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = DIV_ROUND_CLOSEST((FIELD_GET(GENMASK(8, 0), ret) *
+ MP29502_OVUV_LIMIT_SCALE -
+ MP29502_VOUT_UV_OFFSET) *
+ (data->vout_bottom_div +
+ 4 * data->vout_top_div),
+ data->vout_bottom_div);
+ break;
+ case PMBUS_IOUT_OC_FAULT_LIMIT:
+ case PMBUS_IOUT_OC_WARN_LIMIT:
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = DIV_ROUND_CLOSEST((ret & GENMASK(7, 0)) *
+ data->iout_scale *
+ MP29502_IOUT_LIMIT_UINT,
+ MP29502_READ_IOUT_DIV);
+ break;
+ case PMBUS_OT_FAULT_LIMIT:
+ case PMBUS_OT_WARN_LIMIT:
+ /*
+ * The scale of MP29502 PMBUS_OT_FAULT_LIMIT and PMBUS_OT_WARN_LIMIT
+ * is 1°C/LSB and they have 40°C offset.
+ */
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = (ret & GENMASK(7, 0)) - MP29502_TEMP_LIMIT_OFFSET;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int mp29502_write_word_data(struct i2c_client *client, int page, int reg,
+ u16 word)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ struct mp29502_data *data = to_mp29502_data(info);
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, 0);
+ if (ret < 0)
+ return ret;
+
+ switch (reg) {
+ case PMBUS_VIN_OV_FAULT_LIMIT:
+ /*
+ * The PMBUS_VIN_OV_FAULT_LIMIT[7:0] is the limit value,
+ * and bit8-bit15 should not be changed. The scale of
+ * PMBUS_VIN_OV_FAULT_LIMIT is 500mV/Lsb, but the vin
+ * scale is set to 125mV/Lsb(using r/m/b scale), so
+ * the word data should divide by 4.
+ */
+ ret = pmbus_read_word_data(client, page, 0xff, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = pmbus_write_word_data(client, page, reg,
+ (ret & ~GENMASK(7, 0)) |
+ FIELD_PREP(GENMASK(7, 0),
+ DIV_ROUND_CLOSEST(word,
+ MP29502_VIN_OV_GAIN)));
+ break;
+ case PMBUS_VIN_UV_WARN_LIMIT:
+ case PMBUS_VIN_UV_FAULT_LIMIT:
+ /*
+ * The PMBUS_VIN_UV_WARN_LIMIT[9:0] and PMBUS_VIN_UV_FAULT_LIMIT[9:0]
+ * are the limit value, and bit10-bit15 should not be changed.
+ */
+ ret = pmbus_read_word_data(client, page, 0xff, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = pmbus_write_word_data(client, page, reg,
+ (ret & ~GENMASK(9, 0)) |
+ FIELD_PREP(GENMASK(9, 0),
+ word));
+ break;
+ case PMBUS_VOUT_OV_FAULT_LIMIT:
+ ret = mp29502_write_vout_ov_limit(client, word, data);
+ if (ret < 0)
+ return ret;
+
+ break;
+ case PMBUS_VOUT_UV_FAULT_LIMIT:
+ ret = pmbus_read_word_data(client, page, 0xff, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = pmbus_write_word_data(client, page, reg,
+ (ret & ~GENMASK(8, 0)) |
+ FIELD_PREP(GENMASK(8, 0),
+ DIV_ROUND_CLOSEST(word *
+ data->vout_bottom_div +
+ MP29502_VOUT_UV_OFFSET *
+ (data->vout_bottom_div +
+ 4 * data->vout_top_div),
+ MP29502_OVUV_LIMIT_SCALE *
+ (data->vout_bottom_div +
+ 4 * data->vout_top_div))));
+ break;
+ case PMBUS_IOUT_OC_FAULT_LIMIT:
+ case PMBUS_IOUT_OC_WARN_LIMIT:
+ ret = pmbus_write_word_data(client, page, reg,
+ DIV_ROUND_CLOSEST(word *
+ MP29502_READ_IOUT_DIV,
+ MP29502_IOUT_LIMIT_UINT *
+ data->iout_scale));
+ break;
+ case PMBUS_OT_FAULT_LIMIT:
+ case PMBUS_OT_WARN_LIMIT:
+ /*
+ * The PMBUS_OT_FAULT_LIMIT[7:0] and PMBUS_OT_WARN_LIMIT[7:0]
+ * are the limit value, and bit8-bit15 should not be changed.
+ */
+ ret = pmbus_read_word_data(client, page, 0xff, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = pmbus_write_word_data(client, page, reg,
+ (ret & ~GENMASK(7, 0)) |
+ FIELD_PREP(GENMASK(7, 0),
+ word + MP29502_TEMP_LIMIT_OFFSET));
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int mp29502_identify(struct i2c_client *client, struct pmbus_driver_info *info)
+{
+ int ret;
+
+ /* Identify vout scale */
+ ret = mp29502_identify_vout_scale(client, info, 0);
+ if (ret < 0)
+ return ret;
+
+ /* Identify vout divider. */
+ ret = mp29502_identify_vout_divider(client, info, 1);
+ if (ret < 0)
+ return ret;
+
+ /* Identify ovp divider. */
+ ret = mp29502_identify_ovp_divider(client, info, 1);
+ if (ret < 0)
+ return ret;
+
+ /* Identify iout scale */
+ return mp29502_identify_iout_scale(client, info, 0);
+}
+
+static const struct pmbus_driver_info mp29502_info = {
+ .pages = MP29502_PAGE_NUM,
+ .format[PSC_VOLTAGE_IN] = direct,
+ .format[PSC_TEMPERATURE] = direct,
+ .format[PSC_CURRENT_IN] = direct,
+ .format[PSC_CURRENT_OUT] = direct,
+ .format[PSC_VOLTAGE_OUT] = direct,
+ .format[PSC_POWER] = direct,
+
+ .m[PSC_VOLTAGE_IN] = 8,
+ .R[PSC_VOLTAGE_IN] = 0,
+ .b[PSC_VOLTAGE_IN] = 0,
+
+ .m[PSC_VOLTAGE_OUT] = 1,
+ .R[PSC_VOLTAGE_OUT] = 3,
+ .b[PSC_VOLTAGE_OUT] = 0,
+
+ .m[PSC_TEMPERATURE] = 1,
+ .R[PSC_TEMPERATURE] = 0,
+ .b[PSC_TEMPERATURE] = 0,
+
+ .m[PSC_CURRENT_IN] = 1,
+ .R[PSC_CURRENT_IN] = 0,
+ .b[PSC_CURRENT_IN] = 0,
+
+ .m[PSC_CURRENT_OUT] = 1,
+ .R[PSC_CURRENT_OUT] = 0,
+ .b[PSC_CURRENT_OUT] = 0,
+
+ .m[PSC_POWER] = 1,
+ .R[PSC_POWER] = 0,
+ .b[PSC_POWER] = 0,
+
+ .func[0] = MP29502_RAIL_FUNC,
+ .read_word_data = mp29502_read_word_data,
+ .read_byte_data = mp29502_read_byte_data,
+ .write_word_data = mp29502_write_word_data,
+ .identify = mp29502_identify,
+};
+
+static int mp29502_probe(struct i2c_client *client)
+{
+ struct pmbus_driver_info *info;
+ struct mp29502_data *data;
+
+ data = devm_kzalloc(&client->dev, sizeof(struct mp29502_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ memcpy(&data->info, &mp29502_info, sizeof(*info));
+ info = &data->info;
+
+ return pmbus_do_probe(client, info);
+}
+
+static const struct i2c_device_id mp29502_id[] = {
+ {"mp29502", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, mp29502_id);
+
+static const struct of_device_id __maybe_unused mp29502_of_match[] = {
+ {.compatible = "mps,mp29502"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, mp29502_of_match);
+
+static struct i2c_driver mp29502_driver = {
+ .driver = {
+ .name = "mp29502",
+ .of_match_table = mp29502_of_match,
+ },
+ .probe = mp29502_probe,
+ .id_table = mp29502_id,
+};
+
+module_i2c_driver(mp29502_driver);
+
+MODULE_AUTHOR("Wensheng Wang <wenswang@yeah.net");
+MODULE_DESCRIPTION("PMBus driver for MPS MP29502");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/mp5990.c b/drivers/hwmon/pmbus/mp5990.c
index 4ce381a39480..9a4ee79712cf 100644
--- a/drivers/hwmon/pmbus/mp5990.c
+++ b/drivers/hwmon/pmbus/mp5990.c
@@ -8,6 +8,8 @@
#include <linux/of_device.h>
#include "pmbus.h"
+enum chips { mp5990, mp5998 };
+
#define MP5990_EFUSE_CFG (0xC4)
#define MP5990_VOUT_FORMAT BIT(9)
@@ -110,10 +112,53 @@ static struct pmbus_driver_info mp5990_info = {
.read_word_data = mp5990_read_word_data,
};
+static struct pmbus_driver_info mp5998_info = {
+ .pages = 1,
+ .format[PSC_VOLTAGE_IN] = direct,
+ .format[PSC_VOLTAGE_OUT] = direct,
+ .format[PSC_CURRENT_IN] = direct,
+ .format[PSC_CURRENT_OUT] = direct,
+ .format[PSC_POWER] = direct,
+ .format[PSC_TEMPERATURE] = direct,
+ .m[PSC_VOLTAGE_IN] = 64,
+ .b[PSC_VOLTAGE_IN] = 0,
+ .R[PSC_VOLTAGE_IN] = 0,
+ .m[PSC_VOLTAGE_OUT] = 64,
+ .b[PSC_VOLTAGE_OUT] = 0,
+ .R[PSC_VOLTAGE_OUT] = 0,
+ .m[PSC_CURRENT_IN] = 16,
+ .b[PSC_CURRENT_IN] = 0,
+ .R[PSC_CURRENT_IN] = 0,
+ .m[PSC_CURRENT_OUT] = 16,
+ .b[PSC_CURRENT_OUT] = 0,
+ .R[PSC_CURRENT_OUT] = 0,
+ .m[PSC_POWER] = 2,
+ .b[PSC_POWER] = 0,
+ .R[PSC_POWER] = 0,
+ .m[PSC_TEMPERATURE] = 1,
+ .b[PSC_TEMPERATURE] = 0,
+ .R[PSC_TEMPERATURE] = 0,
+ .func[0] =
+ PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_IOUT |
+ PMBUS_HAVE_IIN | PMBUS_HAVE_PIN | PMBUS_HAVE_POUT |
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_IOUT |
+ PMBUS_HAVE_STATUS_INPUT | PMBUS_HAVE_STATUS_TEMP,
+ .read_byte_data = mp5990_read_byte_data,
+ .read_word_data = mp5990_read_word_data,
+};
+
+static const struct i2c_device_id mp5990_id[] = {
+ {"mp5990", mp5990},
+ {"mp5998", mp5998},
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, mp5990_id);
+
static int mp5990_probe(struct i2c_client *client)
{
struct pmbus_driver_info *info;
struct mp5990_data *data;
+ enum chips chip;
int ret;
data = devm_kzalloc(&client->dev, sizeof(struct mp5990_data),
@@ -121,7 +166,15 @@ static int mp5990_probe(struct i2c_client *client)
if (!data)
return -ENOMEM;
- memcpy(&data->info, &mp5990_info, sizeof(*info));
+ if (client->dev.of_node)
+ chip = (uintptr_t)of_device_get_match_data(&client->dev);
+ else
+ chip = i2c_match_id(mp5990_id, client)->driver_data;
+
+ if (chip == mp5990)
+ memcpy(&data->info, &mp5990_info, sizeof(*info));
+ else
+ memcpy(&data->info, &mp5998_info, sizeof(*info));
info = &data->info;
/* Read Vout Config */
@@ -140,6 +193,9 @@ static int mp5990_probe(struct i2c_client *client)
data->info.format[PSC_VOLTAGE_OUT] = linear;
data->info.format[PSC_CURRENT_OUT] = linear;
data->info.format[PSC_POWER] = linear;
+ if (chip == mp5998)
+ data->info.format[PSC_CURRENT_IN] = linear;
+
ret = i2c_smbus_read_word_data(client, PMBUS_READ_VOUT);
if (ret < 0) {
dev_err(&client->dev, "Can't get vout exponent.");
@@ -153,16 +209,11 @@ static int mp5990_probe(struct i2c_client *client)
}
static const struct of_device_id mp5990_of_match[] = {
- { .compatible = "mps,mp5990" },
+ { .compatible = "mps,mp5990", .data = (void *)mp5990 },
+ { .compatible = "mps,mp5998", .data = (void *)mp5998 },
{}
};
-static const struct i2c_device_id mp5990_id[] = {
- {"mp5990"},
- { }
-};
-MODULE_DEVICE_TABLE(i2c, mp5990_id);
-
static struct i2c_driver mp5990_driver = {
.driver = {
.name = "mp5990",
diff --git a/drivers/hwmon/pmbus/ucd9000.c b/drivers/hwmon/pmbus/ucd9000.c
index 52d4000902d5..55e7af3a5f98 100644
--- a/drivers/hwmon/pmbus/ucd9000.c
+++ b/drivers/hwmon/pmbus/ucd9000.c
@@ -364,7 +364,7 @@ static void ucd9000_probe_gpio(struct i2c_client *client,
data->gpio.direction_input = ucd9000_gpio_direction_input;
data->gpio.direction_output = ucd9000_gpio_direction_output;
data->gpio.get = ucd9000_gpio_get;
- data->gpio.set_rv = ucd9000_gpio_set;
+ data->gpio.set = ucd9000_gpio_set;
data->gpio.can_sleep = true;
data->gpio.base = -1;
data->gpio.parent = &client->dev;
diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c
index d0fe53451bdf..37269db2de84 100644
--- a/drivers/hwmon/pwm-fan.c
+++ b/drivers/hwmon/pwm-fan.c
@@ -64,6 +64,7 @@ struct pwm_fan_ctx {
u64 pwm_duty_cycle_from_stopped;
u32 pwm_usec_from_stopped;
+ u8 pwm_shutdown;
};
/* This handler assumes self resetting edge triggered interrupt. */
@@ -484,9 +485,14 @@ static void pwm_fan_cleanup(void *__ctx)
struct pwm_fan_ctx *ctx = __ctx;
timer_delete_sync(&ctx->rpm_timer);
- /* Switch off everything */
- ctx->enable_mode = pwm_disable_reg_disable;
- pwm_fan_power_off(ctx, true);
+ if (ctx->pwm_shutdown) {
+ ctx->enable_mode = pwm_enable_reg_enable;
+ __set_pwm(ctx, ctx->pwm_shutdown);
+ } else {
+ /* Switch off everything */
+ ctx->enable_mode = pwm_disable_reg_disable;
+ pwm_fan_power_off(ctx, true);
+ }
}
static int pwm_fan_probe(struct platform_device *pdev)
@@ -498,6 +504,7 @@ static int pwm_fan_probe(struct platform_device *pdev)
int ret;
const struct hwmon_channel_info **channels;
u32 initial_pwm, pwm_min_from_stopped = 0;
+ u32 pwm_shutdown_percent = 0;
u32 *fan_channel_config;
int channel_count = 1; /* We always have a PWM channel. */
int i;
@@ -648,6 +655,11 @@ static int pwm_fan_probe(struct platform_device *pdev)
channels[1] = &ctx->fan_channel;
}
+ ret = device_property_read_u32(dev, "fan-shutdown-percent",
+ &pwm_shutdown_percent);
+ if (!ret && pwm_shutdown_percent)
+ ctx->pwm_shutdown = (clamp(pwm_shutdown_percent, 0, 100) * 255) / 100;
+
ret = device_property_read_u32(dev, "fan-stop-to-start-percent",
&pwm_min_from_stopped);
if (!ret && pwm_min_from_stopped) {
diff --git a/drivers/hwmon/sa67mcu-hwmon.c b/drivers/hwmon/sa67mcu-hwmon.c
new file mode 100644
index 000000000000..22f703b7b256
--- /dev/null
+++ b/drivers/hwmon/sa67mcu-hwmon.c
@@ -0,0 +1,161 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * sl67mcu hardware monitoring driver
+ *
+ * Copyright 2025 Kontron Europe GmbH
+ */
+
+#include <linux/bitfield.h>
+#include <linux/hwmon.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+
+#define SA67MCU_VOLTAGE(n) (0x00 + ((n) * 2))
+#define SA67MCU_TEMP(n) (0x04 + ((n) * 2))
+
+struct sa67mcu_hwmon {
+ struct regmap *regmap;
+ u32 offset;
+};
+
+static int sa67mcu_hwmon_read(struct device *dev,
+ enum hwmon_sensor_types type, u32 attr,
+ int channel, long *input)
+{
+ struct sa67mcu_hwmon *hwmon = dev_get_drvdata(dev);
+ unsigned int offset;
+ u8 reg[2];
+ int ret;
+
+ switch (type) {
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_input:
+ offset = hwmon->offset + SA67MCU_VOLTAGE(channel);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ offset = hwmon->offset + SA67MCU_TEMP(channel);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ /* Reading the low byte will capture the value */
+ ret = regmap_bulk_read(hwmon->regmap, offset, reg, ARRAY_SIZE(reg));
+ if (ret)
+ return ret;
+
+ *input = reg[1] << 8 | reg[0];
+
+ /* Temperatures are s16 and in 0.1degC steps. */
+ if (type == hwmon_temp)
+ *input = sign_extend32(*input, 15) * 100;
+
+ return 0;
+}
+
+static const struct hwmon_channel_info * const sa67mcu_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL),
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT),
+ NULL
+};
+
+static const char *const sa67mcu_hwmon_in_labels[] = {
+ "VDDIN",
+ "VDD_RTC",
+};
+
+static int sa67mcu_hwmon_read_string(struct device *dev,
+ enum hwmon_sensor_types type, u32 attr,
+ int channel, const char **str)
+{
+ switch (type) {
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_label:
+ *str = sa67mcu_hwmon_in_labels[channel];
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct hwmon_ops sa67mcu_hwmon_ops = {
+ .visible = 0444,
+ .read = sa67mcu_hwmon_read,
+ .read_string = sa67mcu_hwmon_read_string,
+};
+
+static const struct hwmon_chip_info sa67mcu_hwmon_chip_info = {
+ .ops = &sa67mcu_hwmon_ops,
+ .info = sa67mcu_hwmon_info,
+};
+
+static int sa67mcu_hwmon_probe(struct platform_device *pdev)
+{
+ struct sa67mcu_hwmon *hwmon;
+ struct device *hwmon_dev;
+ int ret;
+
+ if (!pdev->dev.parent)
+ return -ENODEV;
+
+ hwmon = devm_kzalloc(&pdev->dev, sizeof(*hwmon), GFP_KERNEL);
+ if (!hwmon)
+ return -ENOMEM;
+
+ hwmon->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!hwmon->regmap)
+ return -ENODEV;
+
+ ret = device_property_read_u32(&pdev->dev, "reg", &hwmon->offset);
+ if (ret)
+ return -EINVAL;
+
+ hwmon_dev = devm_hwmon_device_register_with_info(&pdev->dev,
+ "sa67mcu_hwmon", hwmon,
+ &sa67mcu_hwmon_chip_info,
+ NULL);
+ if (IS_ERR(hwmon_dev))
+ dev_err(&pdev->dev, "failed to register as hwmon device");
+
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct of_device_id sa67mcu_hwmon_of_match[] = {
+ { .compatible = "kontron,sa67mcu-hwmon", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sa67mcu_hwmon_of_match);
+
+static struct platform_driver sa67mcu_hwmon_driver = {
+ .probe = sa67mcu_hwmon_probe,
+ .driver = {
+ .name = "sa67mcu-hwmon",
+ .of_match_table = sa67mcu_hwmon_of_match,
+ },
+};
+module_platform_driver(sa67mcu_hwmon_driver);
+
+MODULE_DESCRIPTION("sa67mcu Hardware Monitoring Driver");
+MODULE_AUTHOR("Michael Walle <mwalle@kernel.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/sbtsi_temp.c b/drivers/hwmon/sbtsi_temp.c
index 3c839f56c460..a6c439e376ff 100644
--- a/drivers/hwmon/sbtsi_temp.c
+++ b/drivers/hwmon/sbtsi_temp.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
+#include <linux/bitfield.h>
/*
* SB-TSI registers only support SMBus byte data access. "_INT" registers are
@@ -29,8 +30,22 @@
#define SBTSI_REG_TEMP_HIGH_DEC 0x13 /* RW */
#define SBTSI_REG_TEMP_LOW_DEC 0x14 /* RW */
+/*
+ * Bit for reporting value with temperature measurement range.
+ * bit == 0: Use default temperature range (0C to 255.875C).
+ * bit == 1: Use extended temperature range (-49C to +206.875C).
+ */
+#define SBTSI_CONFIG_EXT_RANGE_SHIFT 2
+/*
+ * ReadOrder bit specifies the reading order of integer and decimal part of
+ * CPU temperature for atomic reads. If bit == 0, reading integer part triggers
+ * latching of the decimal part, so integer part should be read first.
+ * If bit == 1, read order should be reversed.
+ */
#define SBTSI_CONFIG_READ_ORDER_SHIFT 5
+#define SBTSI_TEMP_EXT_RANGE_ADJ 49000
+
#define SBTSI_TEMP_MIN 0
#define SBTSI_TEMP_MAX 255875
@@ -38,6 +53,8 @@
struct sbtsi_data {
struct i2c_client *client;
struct mutex lock;
+ bool ext_range_mode;
+ bool read_order;
};
/*
@@ -74,23 +91,11 @@ static int sbtsi_read(struct device *dev, enum hwmon_sensor_types type,
{
struct sbtsi_data *data = dev_get_drvdata(dev);
s32 temp_int, temp_dec;
- int err;
switch (attr) {
case hwmon_temp_input:
- /*
- * ReadOrder bit specifies the reading order of integer and
- * decimal part of CPU temp for atomic reads. If bit == 0,
- * reading integer part triggers latching of the decimal part,
- * so integer part should be read first. If bit == 1, read
- * order should be reversed.
- */
- err = i2c_smbus_read_byte_data(data->client, SBTSI_REG_CONFIG);
- if (err < 0)
- return err;
-
mutex_lock(&data->lock);
- if (err & BIT(SBTSI_CONFIG_READ_ORDER_SHIFT)) {
+ if (data->read_order) {
temp_dec = i2c_smbus_read_byte_data(data->client, SBTSI_REG_TEMP_DEC);
temp_int = i2c_smbus_read_byte_data(data->client, SBTSI_REG_TEMP_INT);
} else {
@@ -122,6 +127,8 @@ static int sbtsi_read(struct device *dev, enum hwmon_sensor_types type,
return temp_dec;
*val = sbtsi_reg_to_mc(temp_int, temp_dec);
+ if (data->ext_range_mode)
+ *val -= SBTSI_TEMP_EXT_RANGE_ADJ;
return 0;
}
@@ -146,6 +153,8 @@ static int sbtsi_write(struct device *dev, enum hwmon_sensor_types type,
return -EINVAL;
}
+ if (data->ext_range_mode)
+ val += SBTSI_TEMP_EXT_RANGE_ADJ;
val = clamp_val(val, SBTSI_TEMP_MIN, SBTSI_TEMP_MAX);
sbtsi_mc_to_reg(val, &temp_int, &temp_dec);
@@ -203,6 +212,7 @@ static int sbtsi_probe(struct i2c_client *client)
struct device *dev = &client->dev;
struct device *hwmon_dev;
struct sbtsi_data *data;
+ int err;
data = devm_kzalloc(dev, sizeof(struct sbtsi_data), GFP_KERNEL);
if (!data)
@@ -211,8 +221,14 @@ static int sbtsi_probe(struct i2c_client *client)
data->client = client;
mutex_init(&data->lock);
- hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name, data, &sbtsi_chip_info,
- NULL);
+ err = i2c_smbus_read_byte_data(data->client, SBTSI_REG_CONFIG);
+ if (err < 0)
+ return err;
+ data->ext_range_mode = FIELD_GET(BIT(SBTSI_CONFIG_EXT_RANGE_SHIFT), err);
+ data->read_order = FIELD_GET(BIT(SBTSI_CONFIG_READ_ORDER_SHIFT), err);
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name, data,
+ &sbtsi_chip_info, NULL);
return PTR_ERR_OR_ZERO(hwmon_dev);
}
diff --git a/drivers/hwmon/sch56xx-common.c b/drivers/hwmon/sch56xx-common.c
index 71941b1bb573..98e075e54e9d 100644
--- a/drivers/hwmon/sch56xx-common.c
+++ b/drivers/hwmon/sch56xx-common.c
@@ -544,10 +544,8 @@ void sch56xx_watchdog_register(struct device *parent, u16 addr, u32 revision,
watchdog_set_drvdata(&data->wddev, data);
err = devm_watchdog_register_device(parent, &data->wddev);
- if (err) {
- pr_err("Registering watchdog chardev: %d\n", err);
+ if (err)
devm_kfree(parent, data);
- }
}
EXPORT_SYMBOL(sch56xx_watchdog_register);
diff --git a/drivers/hwmon/sht21.c b/drivers/hwmon/sht21.c
index 97327313529b..627d35070a42 100644
--- a/drivers/hwmon/sht21.c
+++ b/drivers/hwmon/sht21.c
@@ -275,13 +275,26 @@ static int sht21_probe(struct i2c_client *client)
/* Device ID table */
static const struct i2c_device_id sht21_id[] = {
+ { "sht20" },
{ "sht21" },
+ { "sht25" },
{ }
};
MODULE_DEVICE_TABLE(i2c, sht21_id);
+static const struct of_device_id sht21_of_match[] = {
+ { .compatible = "sensirion,sht20" },
+ { .compatible = "sensirion,sht21" },
+ { .compatible = "sensirion,sht25" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sht21_of_match);
+
static struct i2c_driver sht21_driver = {
- .driver.name = "sht21",
+ .driver = {
+ .name = "sht21",
+ .of_match_table = sht21_of_match,
+ },
.probe = sht21_probe,
.id_table = sht21_id,
};
diff --git a/drivers/hwmon/sy7636a-hwmon.c b/drivers/hwmon/sy7636a-hwmon.c
index ed110884786b..a12fc0ce70e7 100644
--- a/drivers/hwmon/sy7636a-hwmon.c
+++ b/drivers/hwmon/sy7636a-hwmon.c
@@ -104,3 +104,4 @@ module_platform_driver(sy7636a_sensor_driver);
MODULE_DESCRIPTION("SY7636A sensor driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:sy7636a-temperature");
diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c
index a02daa496c9c..376e0eac8cc1 100644
--- a/drivers/hwmon/tmp102.c
+++ b/drivers/hwmon/tmp102.c
@@ -53,6 +53,7 @@
#define CONVERSION_TIME_MS 35 /* in milli-seconds */
struct tmp102 {
+ const char *label;
struct regmap *regmap;
u16 config_orig;
unsigned long ready_time;
@@ -70,6 +71,16 @@ static inline u16 tmp102_mC_to_reg(int val)
return (val * 128) / 1000;
}
+static int tmp102_read_string(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, const char **str)
+{
+ struct tmp102 *tmp102 = dev_get_drvdata(dev);
+
+ *str = tmp102->label;
+
+ return 0;
+}
+
static int tmp102_read(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long *temp)
{
@@ -128,12 +139,18 @@ static int tmp102_write(struct device *dev, enum hwmon_sensor_types type,
static umode_t tmp102_is_visible(const void *data, enum hwmon_sensor_types type,
u32 attr, int channel)
{
+ const struct tmp102 *tmp102 = data;
+
if (type != hwmon_temp)
return 0;
switch (attr) {
case hwmon_temp_input:
return 0444;
+ case hwmon_temp_label:
+ if (tmp102->label)
+ return 0444;
+ return 0;
case hwmon_temp_max_hyst:
case hwmon_temp_max:
return 0644;
@@ -146,12 +163,13 @@ static const struct hwmon_channel_info * const tmp102_info[] = {
HWMON_CHANNEL_INFO(chip,
HWMON_C_REGISTER_TZ),
HWMON_CHANNEL_INFO(temp,
- HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_HYST),
+ HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX | HWMON_T_MAX_HYST),
NULL
};
static const struct hwmon_ops tmp102_hwmon_ops = {
.is_visible = tmp102_is_visible,
+ .read_string = tmp102_read_string,
.read = tmp102_read,
.write = tmp102_write,
};
@@ -213,6 +231,8 @@ static int tmp102_probe(struct i2c_client *client)
if (!tmp102)
return -ENOMEM;
+ of_property_read_string(dev->of_node, "label", &tmp102->label);
+
i2c_set_clientdata(client, tmp102);
tmp102->regmap = devm_regmap_init_i2c(client, &tmp102_regmap_config);
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig
index f064e3d172b3..6a4239ebb582 100644
--- a/drivers/hwtracing/coresight/Kconfig
+++ b/drivers/hwtracing/coresight/Kconfig
@@ -268,4 +268,16 @@ config CORESIGHT_KUNIT_TESTS
Enable Coresight unit tests. Only useful for development and not
intended for production.
+config CORESIGHT_TNOC
+ tristate "Coresight Trace Network On Chip driver"
+ help
+ This driver provides support for Trace Network On Chip (TNOC) component.
+ TNOC is an interconnect used to collect traces from various subsystems
+ and transport to a coresight trace sink. It sits in the different
+ tiles of SOC and aggregates the trace local to the tile and transports
+ it another tile or to coresight trace sink eventually.
+
+ To compile this driver as a module, choose M here: the module will be
+ called coresight-tnoc.
+
endif
diff --git a/drivers/hwtracing/coresight/Makefile b/drivers/hwtracing/coresight/Makefile
index 4e7cc3c5bf99..ab16d06783a5 100644
--- a/drivers/hwtracing/coresight/Makefile
+++ b/drivers/hwtracing/coresight/Makefile
@@ -36,6 +36,7 @@ obj-$(CONFIG_CORESIGHT_SINK_TPIU) += coresight-tpiu.o
obj-$(CONFIG_CORESIGHT_SINK_ETBV10) += coresight-etb10.o
obj-$(CONFIG_CORESIGHT_LINKS_AND_SINKS) += coresight-funnel.o \
coresight-replicator.o
+obj-$(CONFIG_CORESIGHT_TNOC) += coresight-tnoc.o
obj-$(CONFIG_CORESIGHT_SOURCE_ETM3X) += coresight-etm3x.o
coresight-etm3x-y := coresight-etm3x-core.o coresight-etm-cp14.o \
coresight-etm3x-sysfs.o
diff --git a/drivers/hwtracing/coresight/coresight-catu.c b/drivers/hwtracing/coresight/coresight-catu.c
index 5058432233da..a3ccb7034ae1 100644
--- a/drivers/hwtracing/coresight/coresight-catu.c
+++ b/drivers/hwtracing/coresight/coresight-catu.c
@@ -515,11 +515,21 @@ static int __catu_probe(struct device *dev, struct resource *res)
{
int ret = 0;
u32 dma_mask;
- struct catu_drvdata *drvdata = dev_get_drvdata(dev);
+ struct catu_drvdata *drvdata;
struct coresight_desc catu_desc;
struct coresight_platform_data *pdata = NULL;
void __iomem *base;
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, drvdata);
+
+ ret = coresight_get_enable_clocks(dev, &drvdata->pclk, &drvdata->atclk);
+ if (ret)
+ return ret;
+
catu_desc.name = coresight_alloc_device_name(&catu_devs, dev);
if (!catu_desc.name)
return -ENOMEM;
@@ -576,14 +586,8 @@ out:
static int catu_probe(struct amba_device *adev, const struct amba_id *id)
{
- struct catu_drvdata *drvdata;
int ret;
- drvdata = devm_kzalloc(&adev->dev, sizeof(*drvdata), GFP_KERNEL);
- if (!drvdata)
- return -ENOMEM;
-
- amba_set_drvdata(adev, drvdata);
ret = __catu_probe(&adev->dev, &adev->res);
if (!ret)
pm_runtime_put(&adev->dev);
@@ -623,29 +627,16 @@ static struct amba_driver catu_driver = {
static int catu_platform_probe(struct platform_device *pdev)
{
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- struct catu_drvdata *drvdata;
int ret = 0;
- drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL);
- if (!drvdata)
- return -ENOMEM;
-
- drvdata->pclk = coresight_get_enable_apb_pclk(&pdev->dev);
- if (IS_ERR(drvdata->pclk))
- return -ENODEV;
-
pm_runtime_get_noresume(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
- dev_set_drvdata(&pdev->dev, drvdata);
ret = __catu_probe(&pdev->dev, res);
pm_runtime_put(&pdev->dev);
- if (ret) {
+ if (ret)
pm_runtime_disable(&pdev->dev);
- if (!IS_ERR_OR_NULL(drvdata->pclk))
- clk_put(drvdata->pclk);
- }
return ret;
}
@@ -659,8 +650,6 @@ static void catu_platform_remove(struct platform_device *pdev)
__catu_remove(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- if (!IS_ERR_OR_NULL(drvdata->pclk))
- clk_put(drvdata->pclk);
}
#ifdef CONFIG_PM
@@ -668,18 +657,26 @@ static int catu_runtime_suspend(struct device *dev)
{
struct catu_drvdata *drvdata = dev_get_drvdata(dev);
- if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk))
- clk_disable_unprepare(drvdata->pclk);
+ clk_disable_unprepare(drvdata->atclk);
+ clk_disable_unprepare(drvdata->pclk);
+
return 0;
}
static int catu_runtime_resume(struct device *dev)
{
struct catu_drvdata *drvdata = dev_get_drvdata(dev);
+ int ret;
- if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk))
- clk_prepare_enable(drvdata->pclk);
- return 0;
+ ret = clk_prepare_enable(drvdata->pclk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(drvdata->atclk);
+ if (ret)
+ clk_disable_unprepare(drvdata->pclk);
+
+ return ret;
}
#endif
diff --git a/drivers/hwtracing/coresight/coresight-catu.h b/drivers/hwtracing/coresight/coresight-catu.h
index 755776cd19c5..6e6b7aac206d 100644
--- a/drivers/hwtracing/coresight/coresight-catu.h
+++ b/drivers/hwtracing/coresight/coresight-catu.h
@@ -62,6 +62,7 @@
struct catu_drvdata {
struct clk *pclk;
+ struct clk *atclk;
void __iomem *base;
struct coresight_device *csdev;
int irq;
diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c
index fa758cc21827..3267192f0c1c 100644
--- a/drivers/hwtracing/coresight/coresight-core.c
+++ b/drivers/hwtracing/coresight/coresight-core.c
@@ -3,6 +3,8 @@
* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*/
+#include <linux/acpi.h>
+#include <linux/bitfield.h>
#include <linux/build_bug.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -1374,8 +1376,9 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
goto out_unlock;
}
- if (csdev->type == CORESIGHT_DEV_TYPE_SINK ||
- csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) {
+ if ((csdev->type == CORESIGHT_DEV_TYPE_SINK ||
+ csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) &&
+ sink_ops(csdev)->alloc_buffer) {
ret = etm_perf_add_symlink_sink(csdev);
if (ret) {
@@ -1698,6 +1701,53 @@ int coresight_etm_get_trace_id(struct coresight_device *csdev, enum cs_mode mode
}
EXPORT_SYMBOL_GPL(coresight_etm_get_trace_id);
+/*
+ * Attempt to find and enable programming clock (pclk) and trace clock (atclk)
+ * for the given device.
+ *
+ * For ACPI devices, clocks are controlled by firmware, so bail out early in
+ * this case. Also, skip enabling pclk if the clock is managed by the AMBA
+ * bus driver instead.
+ *
+ * atclk is an optional clock, it will be only enabled when it is existed.
+ * Otherwise, a NULL pointer will be returned to caller.
+ *
+ * Returns: '0' on Success; Error code otherwise.
+ */
+int coresight_get_enable_clocks(struct device *dev, struct clk **pclk,
+ struct clk **atclk)
+{
+ WARN_ON(!pclk);
+
+ if (has_acpi_companion(dev))
+ return 0;
+
+ if (!dev_is_amba(dev)) {
+ /*
+ * "apb_pclk" is the default clock name for an Arm Primecell
+ * peripheral, while "apb" is used only by the CTCU driver.
+ *
+ * For easier maintenance, CoreSight drivers should use
+ * "apb_pclk" as the programming clock name.
+ */
+ *pclk = devm_clk_get_optional_enabled(dev, "apb_pclk");
+ if (!*pclk)
+ *pclk = devm_clk_get_optional_enabled(dev, "apb");
+ if (IS_ERR(*pclk))
+ return PTR_ERR(*pclk);
+ }
+
+ /* Initialization of atclk is skipped if it is a NULL pointer. */
+ if (atclk) {
+ *atclk = devm_clk_get_optional_enabled(dev, "atclk");
+ if (IS_ERR(*atclk))
+ return PTR_ERR(*atclk);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(coresight_get_enable_clocks);
+
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Pratik Patel <pratikp@codeaurora.org>");
MODULE_AUTHOR("Mathieu Poirier <mathieu.poirier@linaro.org>");
diff --git a/drivers/hwtracing/coresight/coresight-cpu-debug.c b/drivers/hwtracing/coresight/coresight-cpu-debug.c
index a871d997330b..5f21366406aa 100644
--- a/drivers/hwtracing/coresight/coresight-cpu-debug.c
+++ b/drivers/hwtracing/coresight/coresight-cpu-debug.c
@@ -562,10 +562,20 @@ static void debug_func_exit(void)
static int __debug_probe(struct device *dev, struct resource *res)
{
- struct debug_drvdata *drvdata = dev_get_drvdata(dev);
+ struct debug_drvdata *drvdata;
void __iomem *base;
int ret;
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, drvdata);
+
+ ret = coresight_get_enable_clocks(dev, &drvdata->pclk, NULL);
+ if (ret)
+ return ret;
+
drvdata->cpu = coresight_get_cpu(dev);
if (drvdata->cpu < 0)
return drvdata->cpu;
@@ -625,13 +635,6 @@ err:
static int debug_probe(struct amba_device *adev, const struct amba_id *id)
{
- struct debug_drvdata *drvdata;
-
- drvdata = devm_kzalloc(&adev->dev, sizeof(*drvdata), GFP_KERNEL);
- if (!drvdata)
- return -ENOMEM;
-
- amba_set_drvdata(adev, drvdata);
return __debug_probe(&adev->dev, &adev->res);
}
@@ -690,18 +693,8 @@ static struct amba_driver debug_driver = {
static int debug_platform_probe(struct platform_device *pdev)
{
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- struct debug_drvdata *drvdata;
int ret = 0;
- drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL);
- if (!drvdata)
- return -ENOMEM;
-
- drvdata->pclk = coresight_get_enable_apb_pclk(&pdev->dev);
- if (IS_ERR(drvdata->pclk))
- return -ENODEV;
-
- dev_set_drvdata(&pdev->dev, drvdata);
pm_runtime_get_noresume(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
@@ -710,8 +703,6 @@ static int debug_platform_probe(struct platform_device *pdev)
if (ret) {
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- if (!IS_ERR_OR_NULL(drvdata->pclk))
- clk_put(drvdata->pclk);
}
return ret;
}
@@ -725,8 +716,6 @@ static void debug_platform_remove(struct platform_device *pdev)
__debug_remove(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- if (!IS_ERR_OR_NULL(drvdata->pclk))
- clk_put(drvdata->pclk);
}
#ifdef CONFIG_ACPI
@@ -742,8 +731,8 @@ static int debug_runtime_suspend(struct device *dev)
{
struct debug_drvdata *drvdata = dev_get_drvdata(dev);
- if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk))
- clk_disable_unprepare(drvdata->pclk);
+ clk_disable_unprepare(drvdata->pclk);
+
return 0;
}
@@ -751,9 +740,7 @@ static int debug_runtime_resume(struct device *dev)
{
struct debug_drvdata *drvdata = dev_get_drvdata(dev);
- if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk))
- clk_prepare_enable(drvdata->pclk);
- return 0;
+ return clk_prepare_enable(drvdata->pclk);
}
#endif
diff --git a/drivers/hwtracing/coresight/coresight-ctcu-core.c b/drivers/hwtracing/coresight/coresight-ctcu-core.c
index c6bafc96db96..c586495e9a08 100644
--- a/drivers/hwtracing/coresight/coresight-ctcu-core.c
+++ b/drivers/hwtracing/coresight/coresight-ctcu-core.c
@@ -188,7 +188,7 @@ static int ctcu_probe(struct platform_device *pdev)
const struct ctcu_config *cfgs;
struct ctcu_drvdata *drvdata;
void __iomem *base;
- int i;
+ int i, ret;
desc.name = coresight_alloc_device_name(&ctcu_devs, dev);
if (!desc.name)
@@ -207,9 +207,9 @@ static int ctcu_probe(struct platform_device *pdev)
if (IS_ERR(base))
return PTR_ERR(base);
- drvdata->apb_clk = coresight_get_enable_apb_pclk(dev);
- if (IS_ERR(drvdata->apb_clk))
- return -ENODEV;
+ ret = coresight_get_enable_clocks(dev, &drvdata->apb_clk, NULL);
+ if (ret)
+ return ret;
cfgs = of_device_get_match_data(dev);
if (cfgs) {
@@ -233,12 +233,8 @@ static int ctcu_probe(struct platform_device *pdev)
desc.access = CSDEV_ACCESS_IOMEM(base);
drvdata->csdev = coresight_register(&desc);
- if (IS_ERR(drvdata->csdev)) {
- if (!IS_ERR_OR_NULL(drvdata->apb_clk))
- clk_put(drvdata->apb_clk);
-
+ if (IS_ERR(drvdata->csdev))
return PTR_ERR(drvdata->csdev);
- }
return 0;
}
@@ -275,8 +271,6 @@ static void ctcu_platform_remove(struct platform_device *pdev)
ctcu_remove(pdev);
pm_runtime_disable(&pdev->dev);
- if (!IS_ERR_OR_NULL(drvdata->apb_clk))
- clk_put(drvdata->apb_clk);
}
#ifdef CONFIG_PM
@@ -284,8 +278,7 @@ static int ctcu_runtime_suspend(struct device *dev)
{
struct ctcu_drvdata *drvdata = dev_get_drvdata(dev);
- if (drvdata && !IS_ERR_OR_NULL(drvdata->apb_clk))
- clk_disable_unprepare(drvdata->apb_clk);
+ clk_disable_unprepare(drvdata->apb_clk);
return 0;
}
@@ -294,10 +287,7 @@ static int ctcu_runtime_resume(struct device *dev)
{
struct ctcu_drvdata *drvdata = dev_get_drvdata(dev);
- if (drvdata && !IS_ERR_OR_NULL(drvdata->apb_clk))
- clk_prepare_enable(drvdata->apb_clk);
-
- return 0;
+ return clk_prepare_enable(drvdata->apb_clk);
}
#endif
diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
index d5efb085b30d..35db1b6093d1 100644
--- a/drivers/hwtracing/coresight/coresight-etb10.c
+++ b/drivers/hwtracing/coresight/coresight-etb10.c
@@ -730,12 +730,10 @@ static int etb_probe(struct amba_device *adev, const struct amba_id *id)
if (!drvdata)
return -ENOMEM;
- drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */
- if (!IS_ERR(drvdata->atclk)) {
- ret = clk_prepare_enable(drvdata->atclk);
- if (ret)
- return ret;
- }
+ drvdata->atclk = devm_clk_get_optional_enabled(dev, "atclk");
+ if (IS_ERR(drvdata->atclk))
+ return PTR_ERR(drvdata->atclk);
+
dev_set_drvdata(dev, drvdata);
/* validity for the resource is already checked by the AMBA core */
@@ -811,8 +809,7 @@ static int etb_runtime_suspend(struct device *dev)
{
struct etb_drvdata *drvdata = dev_get_drvdata(dev);
- if (drvdata && !IS_ERR(drvdata->atclk))
- clk_disable_unprepare(drvdata->atclk);
+ clk_disable_unprepare(drvdata->atclk);
return 0;
}
@@ -821,10 +818,7 @@ static int etb_runtime_resume(struct device *dev)
{
struct etb_drvdata *drvdata = dev_get_drvdata(dev);
- if (drvdata && !IS_ERR(drvdata->atclk))
- clk_prepare_enable(drvdata->atclk);
-
- return 0;
+ return clk_prepare_enable(drvdata->atclk);
}
#endif
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
index f1551c08ecb2..f677c08233ba 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
@@ -851,7 +851,7 @@ static ssize_t etm_perf_sink_name_show(struct device *dev,
struct dev_ext_attribute *ea;
ea = container_of(dattr, struct dev_ext_attribute, attr);
- return scnprintf(buf, PAGE_SIZE, "0x%lx\n", (unsigned long)(ea->var));
+ return scnprintf(buf, PAGE_SIZE, "0x%px\n", ea->var);
}
static struct dev_ext_attribute *
@@ -943,7 +943,7 @@ static ssize_t etm_perf_cscfg_event_show(struct device *dev,
struct dev_ext_attribute *ea;
ea = container_of(dattr, struct dev_ext_attribute, attr);
- return scnprintf(buf, PAGE_SIZE, "configid=0x%lx\n", (unsigned long)(ea->var));
+ return scnprintf(buf, PAGE_SIZE, "configid=0x%px\n", ea->var);
}
int etm_perf_add_symlink_cscfg(struct device *dev, struct cscfg_config_desc *config_desc)
diff --git a/drivers/hwtracing/coresight/coresight-etm3x-core.c b/drivers/hwtracing/coresight/coresight-etm3x-core.c
index 1c6204e14422..45630a1cd32f 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x-core.c
@@ -832,12 +832,9 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
spin_lock_init(&drvdata->spinlock);
- drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */
- if (!IS_ERR(drvdata->atclk)) {
- ret = clk_prepare_enable(drvdata->atclk);
- if (ret)
- return ret;
- }
+ drvdata->atclk = devm_clk_get_optional_enabled(dev, "atclk");
+ if (IS_ERR(drvdata->atclk))
+ return PTR_ERR(drvdata->atclk);
drvdata->cpu = coresight_get_cpu(dev);
if (drvdata->cpu < 0)
@@ -928,8 +925,7 @@ static int etm_runtime_suspend(struct device *dev)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev);
- if (drvdata && !IS_ERR(drvdata->atclk))
- clk_disable_unprepare(drvdata->atclk);
+ clk_disable_unprepare(drvdata->atclk);
return 0;
}
@@ -938,10 +934,7 @@ static int etm_runtime_resume(struct device *dev)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev);
- if (drvdata && !IS_ERR(drvdata->atclk))
- clk_prepare_enable(drvdata->atclk);
-
- return 0;
+ return clk_prepare_enable(drvdata->atclk);
}
#endif
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c
index 42e5d37403ad..020f070bf17d 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
@@ -4,6 +4,7 @@
*/
#include <linux/acpi.h>
+#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/kvm_host.h>
@@ -528,7 +529,8 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
etm4x_relaxed_write32(csa, config->seq_rst, TRCSEQRSTEVR);
etm4x_relaxed_write32(csa, config->seq_state, TRCSEQSTR);
}
- etm4x_relaxed_write32(csa, config->ext_inp, TRCEXTINSELR);
+ if (drvdata->numextinsel)
+ etm4x_relaxed_write32(csa, config->ext_inp, TRCEXTINSELR);
for (i = 0; i < drvdata->nr_cntr; i++) {
etm4x_relaxed_write32(csa, config->cntrldvr[i], TRCCNTRLDVRn(i));
etm4x_relaxed_write32(csa, config->cntr_ctrl[i], TRCCNTCTLRn(i));
@@ -1423,6 +1425,7 @@ static void etm4_init_arch_data(void *info)
etmidr5 = etm4x_relaxed_read32(csa, TRCIDR5);
/* NUMEXTIN, bits[8:0] number of external inputs implemented */
drvdata->nr_ext_inp = FIELD_GET(TRCIDR5_NUMEXTIN_MASK, etmidr5);
+ drvdata->numextinsel = FIELD_GET(TRCIDR5_NUMEXTINSEL_MASK, etmidr5);
/* TRACEIDSIZE, bits[21:16] indicates the trace ID width */
drvdata->trcid_size = FIELD_GET(TRCIDR5_TRACEIDSIZE_MASK, etmidr5);
/* ATBTRIG, bit[22] implementation can support ATB triggers? */
@@ -1852,7 +1855,9 @@ static int __etm4_cpu_save(struct etmv4_drvdata *drvdata)
state->trcseqrstevr = etm4x_read32(csa, TRCSEQRSTEVR);
state->trcseqstr = etm4x_read32(csa, TRCSEQSTR);
}
- state->trcextinselr = etm4x_read32(csa, TRCEXTINSELR);
+
+ if (drvdata->numextinsel)
+ state->trcextinselr = etm4x_read32(csa, TRCEXTINSELR);
for (i = 0; i < drvdata->nr_cntr; i++) {
state->trccntrldvr[i] = etm4x_read32(csa, TRCCNTRLDVRn(i));
@@ -1984,7 +1989,8 @@ static void __etm4_cpu_restore(struct etmv4_drvdata *drvdata)
etm4x_relaxed_write32(csa, state->trcseqrstevr, TRCSEQRSTEVR);
etm4x_relaxed_write32(csa, state->trcseqstr, TRCSEQSTR);
}
- etm4x_relaxed_write32(csa, state->trcextinselr, TRCEXTINSELR);
+ if (drvdata->numextinsel)
+ etm4x_relaxed_write32(csa, state->trcextinselr, TRCEXTINSELR);
for (i = 0; i < drvdata->nr_cntr; i++) {
etm4x_relaxed_write32(csa, state->trccntrldvr[i], TRCCNTRLDVRn(i));
@@ -2211,10 +2217,15 @@ static int etm4_probe(struct device *dev)
struct csdev_access access = { 0 };
struct etm4_init_arg init_arg = { 0 };
struct etm4_init_arg *delayed;
+ int ret;
if (WARN_ON(!drvdata))
return -ENOMEM;
+ ret = coresight_get_enable_clocks(dev, &drvdata->pclk, &drvdata->atclk);
+ if (ret)
+ return ret;
+
if (pm_save_enable == PARAM_PM_SAVE_FIRMWARE)
pm_save_enable = coresight_loses_context_with_cpu(dev) ?
PARAM_PM_SAVE_SELF_HOSTED : PARAM_PM_SAVE_NEVER;
@@ -2297,16 +2308,10 @@ static int etm4_probe_platform_dev(struct platform_device *pdev)
if (!drvdata)
return -ENOMEM;
- drvdata->pclk = coresight_get_enable_apb_pclk(&pdev->dev);
- if (IS_ERR(drvdata->pclk))
- return -ENODEV;
-
if (res) {
drvdata->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(drvdata->base)) {
- clk_put(drvdata->pclk);
+ if (IS_ERR(drvdata->base))
return PTR_ERR(drvdata->base);
- }
}
dev_set_drvdata(&pdev->dev, drvdata);
@@ -2413,9 +2418,6 @@ static void etm4_remove_platform_dev(struct platform_device *pdev)
if (drvdata)
etm4_remove_dev(drvdata);
pm_runtime_disable(&pdev->dev);
-
- if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk))
- clk_put(drvdata->pclk);
}
static const struct amba_id etm4_ids[] = {
@@ -2463,8 +2465,8 @@ static int etm4_runtime_suspend(struct device *dev)
{
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev);
- if (drvdata->pclk && !IS_ERR(drvdata->pclk))
- clk_disable_unprepare(drvdata->pclk);
+ clk_disable_unprepare(drvdata->atclk);
+ clk_disable_unprepare(drvdata->pclk);
return 0;
}
@@ -2472,11 +2474,17 @@ static int etm4_runtime_suspend(struct device *dev)
static int etm4_runtime_resume(struct device *dev)
{
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev);
+ int ret;
- if (drvdata->pclk && !IS_ERR(drvdata->pclk))
- clk_prepare_enable(drvdata->pclk);
+ ret = clk_prepare_enable(drvdata->pclk);
+ if (ret)
+ return ret;
- return 0;
+ ret = clk_prepare_enable(drvdata->atclk);
+ if (ret)
+ clk_disable_unprepare(drvdata->pclk);
+
+ return ret;
}
#endif
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
index ab251865b893..e9eeea6240d5 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
@@ -4,6 +4,7 @@
* Author: Mathieu Poirier <mathieu.poirier@linaro.org>
*/
+#include <linux/bitfield.h>
#include <linux/coresight.h>
#include <linux/pid_namespace.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
index ac649515054d..13ec9ecef46f 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.h
+++ b/drivers/hwtracing/coresight/coresight-etm4x.h
@@ -162,6 +162,7 @@
#define TRCIDR4_NUMVMIDC_MASK GENMASK(31, 28)
#define TRCIDR5_NUMEXTIN_MASK GENMASK(8, 0)
+#define TRCIDR5_NUMEXTINSEL_MASK GENMASK(11, 9)
#define TRCIDR5_TRACEIDSIZE_MASK GENMASK(21, 16)
#define TRCIDR5_ATBTRIG BIT(22)
#define TRCIDR5_LPOVERRIDE BIT(23)
@@ -919,7 +920,8 @@ struct etmv4_save_state {
/**
* struct etm4_drvdata - specifics associated to an ETM component
- * @pclk APB clock if present, otherwise NULL
+ * @pclk: APB clock if present, otherwise NULL
+ * @atclk: Optional clock for the core parts of the ETMv4.
* @base: Memory mapped base address for this component.
* @csdev: Component vitals needed by the framework.
* @spinlock: Only one at a time pls.
@@ -988,6 +990,7 @@ struct etmv4_save_state {
*/
struct etmv4_drvdata {
struct clk *pclk;
+ struct clk *atclk;
void __iomem *base;
struct coresight_device *csdev;
raw_spinlock_t spinlock;
@@ -999,6 +1002,7 @@ struct etmv4_drvdata {
u8 nr_cntr;
u8 nr_ext_inp;
u8 numcidc;
+ u8 numextinsel;
u8 numvmidc;
u8 nrseqstate;
u8 nr_event;
diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c
index b1922dbe9292..3b248e54471a 100644
--- a/drivers/hwtracing/coresight/coresight-funnel.c
+++ b/drivers/hwtracing/coresight/coresight-funnel.c
@@ -213,11 +213,11 @@ ATTRIBUTE_GROUPS(coresight_funnel);
static int funnel_probe(struct device *dev, struct resource *res)
{
- int ret;
void __iomem *base;
struct coresight_platform_data *pdata = NULL;
struct funnel_drvdata *drvdata;
struct coresight_desc desc = { 0 };
+ int ret;
if (is_of_node(dev_fwnode(dev)) &&
of_device_is_compatible(dev->of_node, "arm,coresight-funnel"))
@@ -231,16 +231,9 @@ static int funnel_probe(struct device *dev, struct resource *res)
if (!drvdata)
return -ENOMEM;
- drvdata->atclk = devm_clk_get(dev, "atclk"); /* optional */
- if (!IS_ERR(drvdata->atclk)) {
- ret = clk_prepare_enable(drvdata->atclk);
- if (ret)
- return ret;
- }
-
- drvdata->pclk = coresight_get_enable_apb_pclk(dev);
- if (IS_ERR(drvdata->pclk))
- return -ENODEV;
+ ret = coresight_get_enable_clocks(dev, &drvdata->pclk, &drvdata->atclk);
+ if (ret)
+ return ret;
/*
* Map the device base for dynamic-funnel, which has been
@@ -248,10 +241,8 @@ static int funnel_probe(struct device *dev, struct resource *res)
*/
if (res) {
base = devm_ioremap_resource(dev, res);
- if (IS_ERR(base)) {
- ret = PTR_ERR(base);
- goto out_disable_clk;
- }
+ if (IS_ERR(base))
+ return PTR_ERR(base);
drvdata->base = base;
desc.groups = coresight_funnel_groups;
desc.access = CSDEV_ACCESS_IOMEM(base);
@@ -261,10 +252,9 @@ static int funnel_probe(struct device *dev, struct resource *res)
dev_set_drvdata(dev, drvdata);
pdata = coresight_get_platform_data(dev);
- if (IS_ERR(pdata)) {
- ret = PTR_ERR(pdata);
- goto out_disable_clk;
- }
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+
dev->platform_data = pdata;
raw_spin_lock_init(&drvdata->spinlock);
@@ -274,19 +264,10 @@ static int funnel_probe(struct device *dev, struct resource *res)
desc.pdata = pdata;
desc.dev = dev;
drvdata->csdev = coresight_register(&desc);
- if (IS_ERR(drvdata->csdev)) {
- ret = PTR_ERR(drvdata->csdev);
- goto out_disable_clk;
- }
+ if (IS_ERR(drvdata->csdev))
+ return PTR_ERR(drvdata->csdev);
- ret = 0;
-
-out_disable_clk:
- if (ret && !IS_ERR_OR_NULL(drvdata->atclk))
- clk_disable_unprepare(drvdata->atclk);
- if (ret && !IS_ERR_OR_NULL(drvdata->pclk))
- clk_disable_unprepare(drvdata->pclk);
- return ret;
+ return 0;
}
static int funnel_remove(struct device *dev)
@@ -303,11 +284,8 @@ static int funnel_runtime_suspend(struct device *dev)
{
struct funnel_drvdata *drvdata = dev_get_drvdata(dev);
- if (drvdata && !IS_ERR(drvdata->atclk))
- clk_disable_unprepare(drvdata->atclk);
-
- if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk))
- clk_disable_unprepare(drvdata->pclk);
+ clk_disable_unprepare(drvdata->atclk);
+ clk_disable_unprepare(drvdata->pclk);
return 0;
}
@@ -315,13 +293,17 @@ static int funnel_runtime_suspend(struct device *dev)
static int funnel_runtime_resume(struct device *dev)
{
struct funnel_drvdata *drvdata = dev_get_drvdata(dev);
+ int ret;
- if (drvdata && !IS_ERR(drvdata->atclk))
- clk_prepare_enable(drvdata->atclk);
+ ret = clk_prepare_enable(drvdata->pclk);
+ if (ret)
+ return ret;
- if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk))
- clk_prepare_enable(drvdata->pclk);
- return 0;
+ ret = clk_prepare_enable(drvdata->atclk);
+ if (ret)
+ clk_disable_unprepare(drvdata->pclk);
+
+ return ret;
}
#endif
@@ -355,8 +337,6 @@ static void funnel_platform_remove(struct platform_device *pdev)
funnel_remove(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- if (!IS_ERR_OR_NULL(drvdata->pclk))
- clk_put(drvdata->pclk);
}
static const struct of_device_id funnel_match[] = {
diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c
index 06efd2b01a0f..e6472658235d 100644
--- a/drivers/hwtracing/coresight/coresight-replicator.c
+++ b/drivers/hwtracing/coresight/coresight-replicator.c
@@ -219,11 +219,11 @@ static const struct attribute_group *replicator_groups[] = {
static int replicator_probe(struct device *dev, struct resource *res)
{
- int ret = 0;
struct coresight_platform_data *pdata = NULL;
struct replicator_drvdata *drvdata;
struct coresight_desc desc = { 0 };
void __iomem *base;
+ int ret;
if (is_of_node(dev_fwnode(dev)) &&
of_device_is_compatible(dev->of_node, "arm,coresight-replicator"))
@@ -238,16 +238,9 @@ static int replicator_probe(struct device *dev, struct resource *res)
if (!drvdata)
return -ENOMEM;
- drvdata->atclk = devm_clk_get(dev, "atclk"); /* optional */
- if (!IS_ERR(drvdata->atclk)) {
- ret = clk_prepare_enable(drvdata->atclk);
- if (ret)
- return ret;
- }
-
- drvdata->pclk = coresight_get_enable_apb_pclk(dev);
- if (IS_ERR(drvdata->pclk))
- return -ENODEV;
+ ret = coresight_get_enable_clocks(dev, &drvdata->pclk, &drvdata->atclk);
+ if (ret)
+ return ret;
/*
* Map the device base for dynamic-replicator, which has been
@@ -255,10 +248,8 @@ static int replicator_probe(struct device *dev, struct resource *res)
*/
if (res) {
base = devm_ioremap_resource(dev, res);
- if (IS_ERR(base)) {
- ret = PTR_ERR(base);
- goto out_disable_clk;
- }
+ if (IS_ERR(base))
+ return PTR_ERR(base);
drvdata->base = base;
desc.groups = replicator_groups;
desc.access = CSDEV_ACCESS_IOMEM(base);
@@ -272,10 +263,8 @@ static int replicator_probe(struct device *dev, struct resource *res)
dev_set_drvdata(dev, drvdata);
pdata = coresight_get_platform_data(dev);
- if (IS_ERR(pdata)) {
- ret = PTR_ERR(pdata);
- goto out_disable_clk;
- }
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
dev->platform_data = pdata;
raw_spin_lock_init(&drvdata->spinlock);
@@ -286,19 +275,11 @@ static int replicator_probe(struct device *dev, struct resource *res)
desc.dev = dev;
drvdata->csdev = coresight_register(&desc);
- if (IS_ERR(drvdata->csdev)) {
- ret = PTR_ERR(drvdata->csdev);
- goto out_disable_clk;
- }
+ if (IS_ERR(drvdata->csdev))
+ return PTR_ERR(drvdata->csdev);
replicator_reset(drvdata);
-
-out_disable_clk:
- if (ret && !IS_ERR_OR_NULL(drvdata->atclk))
- clk_disable_unprepare(drvdata->atclk);
- if (ret && !IS_ERR_OR_NULL(drvdata->pclk))
- clk_disable_unprepare(drvdata->pclk);
- return ret;
+ return 0;
}
static int replicator_remove(struct device *dev)
@@ -335,8 +316,6 @@ static void replicator_platform_remove(struct platform_device *pdev)
replicator_remove(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- if (!IS_ERR_OR_NULL(drvdata->pclk))
- clk_put(drvdata->pclk);
}
#ifdef CONFIG_PM
@@ -344,24 +323,26 @@ static int replicator_runtime_suspend(struct device *dev)
{
struct replicator_drvdata *drvdata = dev_get_drvdata(dev);
- if (drvdata && !IS_ERR(drvdata->atclk))
- clk_disable_unprepare(drvdata->atclk);
+ clk_disable_unprepare(drvdata->atclk);
+ clk_disable_unprepare(drvdata->pclk);
- if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk))
- clk_disable_unprepare(drvdata->pclk);
return 0;
}
static int replicator_runtime_resume(struct device *dev)
{
struct replicator_drvdata *drvdata = dev_get_drvdata(dev);
+ int ret;
- if (drvdata && !IS_ERR(drvdata->atclk))
- clk_prepare_enable(drvdata->atclk);
+ ret = clk_prepare_enable(drvdata->pclk);
+ if (ret)
+ return ret;
- if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk))
- clk_prepare_enable(drvdata->pclk);
- return 0;
+ ret = clk_prepare_enable(drvdata->atclk);
+ if (ret)
+ clk_disable_unprepare(drvdata->pclk);
+
+ return ret;
}
#endif
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index e45c6c7204b4..e68529bf89c9 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -342,7 +342,7 @@ static int stm_generic_link(struct stm_data *stm_data,
{
struct stm_drvdata *drvdata = container_of(stm_data,
struct stm_drvdata, stm);
- if (!drvdata || !drvdata->csdev)
+ if (!drvdata->csdev)
return -EINVAL;
return coresight_enable_sysfs(drvdata->csdev);
@@ -353,7 +353,7 @@ static void stm_generic_unlink(struct stm_data *stm_data,
{
struct stm_drvdata *drvdata = container_of(stm_data,
struct stm_drvdata, stm);
- if (!drvdata || !drvdata->csdev)
+ if (!drvdata->csdev)
return;
coresight_disable_sysfs(drvdata->csdev);
@@ -384,7 +384,7 @@ static long stm_generic_set_options(struct stm_data *stm_data,
{
struct stm_drvdata *drvdata = container_of(stm_data,
struct stm_drvdata, stm);
- if (!(drvdata && coresight_get_mode(drvdata->csdev)))
+ if (!coresight_get_mode(drvdata->csdev))
return -EINVAL;
if (channel >= drvdata->numsp)
@@ -419,7 +419,7 @@ static ssize_t notrace stm_generic_packet(struct stm_data *stm_data,
struct stm_drvdata, stm);
unsigned int stm_flags;
- if (!(drvdata && coresight_get_mode(drvdata->csdev)))
+ if (!coresight_get_mode(drvdata->csdev))
return -EACCES;
if (channel >= drvdata->numsp)
@@ -842,16 +842,10 @@ static int __stm_probe(struct device *dev, struct resource *res)
if (!drvdata)
return -ENOMEM;
- drvdata->atclk = devm_clk_get(dev, "atclk"); /* optional */
- if (!IS_ERR(drvdata->atclk)) {
- ret = clk_prepare_enable(drvdata->atclk);
- if (ret)
- return ret;
- }
+ ret = coresight_get_enable_clocks(dev, &drvdata->pclk, &drvdata->atclk);
+ if (ret)
+ return ret;
- drvdata->pclk = coresight_get_enable_apb_pclk(dev);
- if (IS_ERR(drvdata->pclk))
- return -ENODEV;
dev_set_drvdata(dev, drvdata);
base = devm_ioremap_resource(dev, res);
@@ -963,24 +957,26 @@ static int stm_runtime_suspend(struct device *dev)
{
struct stm_drvdata *drvdata = dev_get_drvdata(dev);
- if (drvdata && !IS_ERR(drvdata->atclk))
- clk_disable_unprepare(drvdata->atclk);
+ clk_disable_unprepare(drvdata->atclk);
+ clk_disable_unprepare(drvdata->pclk);
- if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk))
- clk_disable_unprepare(drvdata->pclk);
return 0;
}
static int stm_runtime_resume(struct device *dev)
{
struct stm_drvdata *drvdata = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(drvdata->pclk);
+ if (ret)
+ return ret;
- if (drvdata && !IS_ERR(drvdata->atclk))
- clk_prepare_enable(drvdata->atclk);
+ ret = clk_prepare_enable(drvdata->atclk);
+ if (ret)
+ clk_disable_unprepare(drvdata->pclk);
- if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk))
- clk_prepare_enable(drvdata->pclk);
- return 0;
+ return ret;
}
#endif
@@ -1033,8 +1029,6 @@ static void stm_platform_remove(struct platform_device *pdev)
__stm_remove(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- if (!IS_ERR_OR_NULL(drvdata->pclk))
- clk_put(drvdata->pclk);
}
#ifdef CONFIG_ACPI
diff --git a/drivers/hwtracing/coresight/coresight-syscfg.c b/drivers/hwtracing/coresight/coresight-syscfg.c
index 83dad24e0116..6836b05986e8 100644
--- a/drivers/hwtracing/coresight/coresight-syscfg.c
+++ b/drivers/hwtracing/coresight/coresight-syscfg.c
@@ -395,7 +395,7 @@ static void cscfg_remove_owned_csdev_configs(struct coresight_device *csdev, voi
if (list_empty(&csdev->config_csdev_list))
return;
- guard(raw_spinlock_irqsave)(&csdev->cscfg_csdev_lock);
+ guard(raw_spinlock_irqsave)(&csdev->cscfg_csdev_lock);
list_for_each_entry_safe(config_csdev, tmp, &csdev->config_csdev_list, node) {
if (config_csdev->config_desc->load_owner == load_owner)
diff --git a/drivers/hwtracing/coresight/coresight-sysfs.c b/drivers/hwtracing/coresight/coresight-sysfs.c
index feadaf065b53..5e52324aa9ac 100644
--- a/drivers/hwtracing/coresight/coresight-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-sysfs.c
@@ -7,6 +7,7 @@
#include <linux/device.h>
#include <linux/idr.h>
#include <linux/kernel.h>
+#include <linux/property.h>
#include "coresight-priv.h"
#include "coresight-trace-id.h"
@@ -371,17 +372,81 @@ static ssize_t enable_source_store(struct device *dev,
}
static DEVICE_ATTR_RW(enable_source);
+static ssize_t label_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+
+ const char *str;
+ int ret;
+
+ ret = fwnode_property_read_string(dev_fwnode(dev), "label", &str);
+ if (ret == 0)
+ return sysfs_emit(buf, "%s\n", str);
+ else
+ return ret;
+}
+static DEVICE_ATTR_RO(label);
+
+static umode_t label_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+
+ if (attr == &dev_attr_label.attr) {
+ if (fwnode_property_present(dev_fwnode(dev), "label"))
+ return attr->mode;
+ else
+ return 0;
+ }
+
+ return attr->mode;
+}
+
static struct attribute *coresight_sink_attrs[] = {
&dev_attr_enable_sink.attr,
+ &dev_attr_label.attr,
NULL,
};
-ATTRIBUTE_GROUPS(coresight_sink);
+
+static struct attribute_group coresight_sink_group = {
+ .attrs = coresight_sink_attrs,
+ .is_visible = label_is_visible,
+};
+__ATTRIBUTE_GROUPS(coresight_sink);
static struct attribute *coresight_source_attrs[] = {
&dev_attr_enable_source.attr,
+ &dev_attr_label.attr,
NULL,
};
-ATTRIBUTE_GROUPS(coresight_source);
+
+static struct attribute_group coresight_source_group = {
+ .attrs = coresight_source_attrs,
+ .is_visible = label_is_visible,
+};
+__ATTRIBUTE_GROUPS(coresight_source);
+
+static struct attribute *coresight_link_attrs[] = {
+ &dev_attr_label.attr,
+ NULL,
+};
+
+static struct attribute_group coresight_link_group = {
+ .attrs = coresight_link_attrs,
+ .is_visible = label_is_visible,
+};
+__ATTRIBUTE_GROUPS(coresight_link);
+
+static struct attribute *coresight_helper_attrs[] = {
+ &dev_attr_label.attr,
+ NULL,
+};
+
+static struct attribute_group coresight_helper_group = {
+ .attrs = coresight_helper_attrs,
+ .is_visible = label_is_visible,
+};
+__ATTRIBUTE_GROUPS(coresight_helper);
const struct device_type coresight_dev_type[] = {
[CORESIGHT_DEV_TYPE_SINK] = {
@@ -390,6 +455,7 @@ const struct device_type coresight_dev_type[] = {
},
[CORESIGHT_DEV_TYPE_LINK] = {
.name = "link",
+ .groups = coresight_link_groups,
},
[CORESIGHT_DEV_TYPE_LINKSINK] = {
.name = "linksink",
@@ -401,6 +467,7 @@ const struct device_type coresight_dev_type[] = {
},
[CORESIGHT_DEV_TYPE_HELPER] = {
.name = "helper",
+ .groups = coresight_helper_groups,
}
};
/* Ensure the enum matches the names and groups */
diff --git a/drivers/hwtracing/coresight/coresight-tmc-core.c b/drivers/hwtracing/coresight/coresight-tmc-core.c
index 88afb16bb6be..36599c431be6 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-core.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-core.c
@@ -24,6 +24,7 @@
#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/of_reserved_mem.h>
#include <linux/coresight.h>
#include <linux/amba/bus.h>
#include <linux/platform_device.h>
@@ -634,25 +635,14 @@ static int of_tmc_get_reserved_resource_by_name(struct device *dev,
const char *name,
struct resource *res)
{
- int index, rc = -ENODEV;
- struct device_node *node;
+ int rc = -ENODEV;
- if (!is_of_node(dev->fwnode))
- return -ENODEV;
-
- index = of_property_match_string(dev->of_node, "memory-region-names",
- name);
- if (index < 0)
- return rc;
-
- node = of_parse_phandle(dev->of_node, "memory-region", index);
- if (!node)
+ rc = of_reserved_mem_region_to_resource_byname(dev->of_node, name, res);
+ if (rc < 0)
return rc;
- if (!of_address_to_resource(node, 0, res) &&
- res->start != 0 && resource_size(res) != 0)
- rc = 0;
- of_node_put(node);
+ if (res->start == 0 || resource_size(res) == 0)
+ rc = -ENODEV;
return rc;
}
@@ -785,10 +775,20 @@ static int __tmc_probe(struct device *dev, struct resource *res)
u32 devid;
void __iomem *base;
struct coresight_platform_data *pdata = NULL;
- struct tmc_drvdata *drvdata = dev_get_drvdata(dev);
+ struct tmc_drvdata *drvdata;
struct coresight_desc desc = { 0 };
struct coresight_dev_list *dev_list = NULL;
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, drvdata);
+
+ ret = coresight_get_enable_clocks(dev, &drvdata->pclk, &drvdata->atclk);
+ if (ret)
+ return ret;
+
ret = -ENOMEM;
/* Validity for the resource is already checked by the AMBA core */
@@ -894,14 +894,8 @@ out:
static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
{
- struct tmc_drvdata *drvdata;
int ret;
- drvdata = devm_kzalloc(&adev->dev, sizeof(*drvdata), GFP_KERNEL);
- if (!drvdata)
- return -ENOMEM;
-
- amba_set_drvdata(adev, drvdata);
ret = __tmc_probe(&adev->dev, &adev->res);
if (!ret)
pm_runtime_put(&adev->dev);
@@ -978,18 +972,8 @@ static struct amba_driver tmc_driver = {
static int tmc_platform_probe(struct platform_device *pdev)
{
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- struct tmc_drvdata *drvdata;
int ret = 0;
- drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL);
- if (!drvdata)
- return -ENOMEM;
-
- drvdata->pclk = coresight_get_enable_apb_pclk(&pdev->dev);
- if (IS_ERR(drvdata->pclk))
- return -ENODEV;
-
- dev_set_drvdata(&pdev->dev, drvdata);
pm_runtime_get_noresume(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
@@ -1011,8 +995,6 @@ static void tmc_platform_remove(struct platform_device *pdev)
__tmc_remove(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- if (!IS_ERR_OR_NULL(drvdata->pclk))
- clk_put(drvdata->pclk);
}
#ifdef CONFIG_PM
@@ -1020,18 +1002,26 @@ static int tmc_runtime_suspend(struct device *dev)
{
struct tmc_drvdata *drvdata = dev_get_drvdata(dev);
- if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk))
- clk_disable_unprepare(drvdata->pclk);
+ clk_disable_unprepare(drvdata->atclk);
+ clk_disable_unprepare(drvdata->pclk);
+
return 0;
}
static int tmc_runtime_resume(struct device *dev)
{
struct tmc_drvdata *drvdata = dev_get_drvdata(dev);
+ int ret;
- if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk))
- clk_prepare_enable(drvdata->pclk);
- return 0;
+ ret = clk_prepare_enable(drvdata->pclk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(drvdata->atclk);
+ if (ret)
+ clk_disable_unprepare(drvdata->pclk);
+
+ return ret;
}
#endif
diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
index 6541a27a018e..cbb4ba439158 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.h
+++ b/drivers/hwtracing/coresight/coresight-tmc.h
@@ -210,6 +210,7 @@ struct tmc_resrv_buf {
/**
* struct tmc_drvdata - specifics associated to an TMC component
+ * @atclk: optional clock for the core parts of the TMC.
* @pclk: APB clock if present, otherwise NULL
* @base: memory mapped base address for this component.
* @csdev: component vitals needed by the framework.
@@ -244,6 +245,7 @@ struct tmc_resrv_buf {
* Used by ETR/ETF.
*/
struct tmc_drvdata {
+ struct clk *atclk;
struct clk *pclk;
void __iomem *base;
struct coresight_device *csdev;
diff --git a/drivers/hwtracing/coresight/coresight-tnoc.c b/drivers/hwtracing/coresight/coresight-tnoc.c
new file mode 100644
index 000000000000..ff9a0a9cfe96
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-tnoc.c
@@ -0,0 +1,246 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #include <linux/amba/bus.h>
+ #include <linux/coresight.h>
+ #include <linux/device.h>
+ #include <linux/io.h>
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/of.h>
+ #include <linux/platform_device.h>
+
+#include "coresight-priv.h"
+#include "coresight-trace-id.h"
+
+#define TRACE_NOC_CTRL 0x008
+#define TRACE_NOC_XLD 0x010
+#define TRACE_NOC_FREQVAL 0x018
+#define TRACE_NOC_SYNCR 0x020
+
+/* Enable generation of output ATB traffic.*/
+#define TRACE_NOC_CTRL_PORTEN BIT(0)
+/* Sets the type of issued ATB FLAG packets.*/
+#define TRACE_NOC_CTRL_FLAGTYPE BIT(7)
+/* Sets the type of issued ATB FREQ packet*/
+#define TRACE_NOC_CTRL_FREQTYPE BIT(8)
+
+#define TRACE_NOC_SYNC_INTERVAL 0xFFFF
+
+/*
+ * struct trace_noc_drvdata - specifics associated to a trace noc component
+ * @base: memory mapped base address for this component.
+ * @dev: device node for trace_noc_drvdata.
+ * @csdev: component vitals needed by the framework.
+ * @spinlock: serialize enable/disable operation.
+ * @atid: id for the trace packet.
+ */
+struct trace_noc_drvdata {
+ void __iomem *base;
+ struct device *dev;
+ struct coresight_device *csdev;
+ spinlock_t spinlock;
+ u32 atid;
+};
+
+DEFINE_CORESIGHT_DEVLIST(trace_noc_devs, "traceNoc");
+
+static void trace_noc_enable_hw(struct trace_noc_drvdata *drvdata)
+{
+ u32 val;
+
+ /* Set ATID */
+ writel_relaxed(drvdata->atid, drvdata->base + TRACE_NOC_XLD);
+
+ /* Set the data word count between 'SYNC' packets */
+ writel_relaxed(TRACE_NOC_SYNC_INTERVAL, drvdata->base + TRACE_NOC_SYNCR);
+
+ /* Set the Control register:
+ * - Set the FLAG packets to 'FLAG' packets
+ * - Set the FREQ packets to 'FREQ_TS' packets
+ * - Enable generation of output ATB traffic
+ */
+
+ val = readl_relaxed(drvdata->base + TRACE_NOC_CTRL);
+
+ val &= ~TRACE_NOC_CTRL_FLAGTYPE;
+ val |= TRACE_NOC_CTRL_FREQTYPE;
+ val |= TRACE_NOC_CTRL_PORTEN;
+
+ writel(val, drvdata->base + TRACE_NOC_CTRL);
+}
+
+static int trace_noc_enable(struct coresight_device *csdev, struct coresight_connection *inport,
+ struct coresight_connection *outport)
+{
+ struct trace_noc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ scoped_guard(spinlock, &drvdata->spinlock) {
+ if (csdev->refcnt == 0)
+ trace_noc_enable_hw(drvdata);
+
+ csdev->refcnt++;
+ }
+
+ dev_dbg(drvdata->dev, "Trace NOC is enabled\n");
+ return 0;
+}
+
+static void trace_noc_disable(struct coresight_device *csdev, struct coresight_connection *inport,
+ struct coresight_connection *outport)
+{
+ struct trace_noc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ scoped_guard(spinlock, &drvdata->spinlock) {
+ if (--csdev->refcnt == 0)
+ writel(0x0, drvdata->base + TRACE_NOC_CTRL);
+ }
+ dev_dbg(drvdata->dev, "Trace NOC is disabled\n");
+}
+
+static int trace_noc_id(struct coresight_device *csdev, __maybe_unused enum cs_mode mode,
+ __maybe_unused struct coresight_device *sink)
+{
+ struct trace_noc_drvdata *drvdata;
+
+ drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ return drvdata->atid;
+}
+
+static const struct coresight_ops_link trace_noc_link_ops = {
+ .enable = trace_noc_enable,
+ .disable = trace_noc_disable,
+};
+
+static const struct coresight_ops trace_noc_cs_ops = {
+ .trace_id = trace_noc_id,
+ .link_ops = &trace_noc_link_ops,
+};
+
+static int trace_noc_init_default_data(struct trace_noc_drvdata *drvdata)
+{
+ int atid;
+
+ atid = coresight_trace_id_get_system_id();
+ if (atid < 0)
+ return atid;
+
+ drvdata->atid = atid;
+
+ return 0;
+}
+
+static ssize_t traceid_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct trace_noc_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->atid;
+ return sprintf(buf, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(traceid);
+
+static struct attribute *coresight_tnoc_attrs[] = {
+ &dev_attr_traceid.attr,
+ NULL,
+};
+
+static const struct attribute_group coresight_tnoc_group = {
+ .attrs = coresight_tnoc_attrs,
+};
+
+static const struct attribute_group *coresight_tnoc_groups[] = {
+ &coresight_tnoc_group,
+ NULL,
+};
+
+static int trace_noc_probe(struct amba_device *adev, const struct amba_id *id)
+{
+ struct device *dev = &adev->dev;
+ struct coresight_platform_data *pdata;
+ struct trace_noc_drvdata *drvdata;
+ struct coresight_desc desc = { 0 };
+ int ret;
+
+ desc.name = coresight_alloc_device_name(&trace_noc_devs, dev);
+ if (!desc.name)
+ return -ENOMEM;
+
+ pdata = coresight_get_platform_data(dev);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+ adev->dev.platform_data = pdata;
+
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ drvdata->dev = &adev->dev;
+ dev_set_drvdata(dev, drvdata);
+
+ drvdata->base = devm_ioremap_resource(dev, &adev->res);
+ if (IS_ERR(drvdata->base))
+ return PTR_ERR(drvdata->base);
+
+ spin_lock_init(&drvdata->spinlock);
+
+ ret = trace_noc_init_default_data(drvdata);
+ if (ret)
+ return ret;
+
+ desc.ops = &trace_noc_cs_ops;
+ desc.type = CORESIGHT_DEV_TYPE_LINK;
+ desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_MERG;
+ desc.pdata = adev->dev.platform_data;
+ desc.dev = &adev->dev;
+ desc.access = CSDEV_ACCESS_IOMEM(drvdata->base);
+ desc.groups = coresight_tnoc_groups;
+ drvdata->csdev = coresight_register(&desc);
+ if (IS_ERR(drvdata->csdev)) {
+ coresight_trace_id_put_system_id(drvdata->atid);
+ return PTR_ERR(drvdata->csdev);
+ }
+ pm_runtime_put(&adev->dev);
+
+ return 0;
+}
+
+static void trace_noc_remove(struct amba_device *adev)
+{
+ struct trace_noc_drvdata *drvdata = dev_get_drvdata(&adev->dev);
+
+ coresight_unregister(drvdata->csdev);
+ coresight_trace_id_put_system_id(drvdata->atid);
+}
+
+static struct amba_id trace_noc_ids[] = {
+ {
+ .id = 0x000f0c00,
+ .mask = 0x00ffff00,
+ },
+ {
+ .id = 0x001f0c00,
+ .mask = 0x00ffff00,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(amba, trace_noc_ids);
+
+static struct amba_driver trace_noc_driver = {
+ .drv = {
+ .name = "coresight-trace-noc",
+ .suppress_bind_attrs = true,
+ },
+ .probe = trace_noc_probe,
+ .remove = trace_noc_remove,
+ .id_table = trace_noc_ids,
+};
+
+module_amba_driver(trace_noc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Trace NOC driver");
diff --git a/drivers/hwtracing/coresight/coresight-tpda.c b/drivers/hwtracing/coresight/coresight-tpda.c
index 0633f04beb24..333b3cb23685 100644
--- a/drivers/hwtracing/coresight/coresight-tpda.c
+++ b/drivers/hwtracing/coresight/coresight-tpda.c
@@ -71,6 +71,8 @@ static int tpdm_read_element_size(struct tpda_drvdata *drvdata,
if (tpdm_data->dsb) {
rc = fwnode_property_read_u32(dev_fwnode(csdev->dev.parent),
"qcom,dsb-element-bits", &drvdata->dsb_esize);
+ if (rc)
+ goto out;
}
if (tpdm_data->cmb) {
@@ -78,6 +80,7 @@ static int tpdm_read_element_size(struct tpda_drvdata *drvdata,
"qcom,cmb-element-bits", &drvdata->cmb_esize);
}
+out:
if (rc)
dev_warn_once(&csdev->dev,
"Failed to read TPDM Element size: %d\n", rc);
diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c
index 3e0159288428..9463afdbda8a 100644
--- a/drivers/hwtracing/coresight/coresight-tpiu.c
+++ b/drivers/hwtracing/coresight/coresight-tpiu.c
@@ -128,11 +128,11 @@ static const struct coresight_ops tpiu_cs_ops = {
static int __tpiu_probe(struct device *dev, struct resource *res)
{
- int ret;
void __iomem *base;
struct coresight_platform_data *pdata = NULL;
struct tpiu_drvdata *drvdata;
struct coresight_desc desc = { 0 };
+ int ret;
desc.name = coresight_alloc_device_name(&tpiu_devs, dev);
if (!desc.name)
@@ -144,16 +144,10 @@ static int __tpiu_probe(struct device *dev, struct resource *res)
spin_lock_init(&drvdata->spinlock);
- drvdata->atclk = devm_clk_get(dev, "atclk"); /* optional */
- if (!IS_ERR(drvdata->atclk)) {
- ret = clk_prepare_enable(drvdata->atclk);
- if (ret)
- return ret;
- }
+ ret = coresight_get_enable_clocks(dev, &drvdata->pclk, &drvdata->atclk);
+ if (ret)
+ return ret;
- drvdata->pclk = coresight_get_enable_apb_pclk(dev);
- if (IS_ERR(drvdata->pclk))
- return -ENODEV;
dev_set_drvdata(dev, drvdata);
/* Validity for the resource is already checked by the AMBA core */
@@ -212,24 +206,26 @@ static int tpiu_runtime_suspend(struct device *dev)
{
struct tpiu_drvdata *drvdata = dev_get_drvdata(dev);
- if (drvdata && !IS_ERR(drvdata->atclk))
- clk_disable_unprepare(drvdata->atclk);
+ clk_disable_unprepare(drvdata->atclk);
+ clk_disable_unprepare(drvdata->pclk);
- if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk))
- clk_disable_unprepare(drvdata->pclk);
return 0;
}
static int tpiu_runtime_resume(struct device *dev)
{
struct tpiu_drvdata *drvdata = dev_get_drvdata(dev);
+ int ret;
- if (drvdata && !IS_ERR(drvdata->atclk))
- clk_prepare_enable(drvdata->atclk);
+ ret = clk_prepare_enable(drvdata->pclk);
+ if (ret)
+ return ret;
- if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk))
- clk_prepare_enable(drvdata->pclk);
- return 0;
+ ret = clk_prepare_enable(drvdata->atclk);
+ if (ret)
+ clk_disable_unprepare(drvdata->pclk);
+
+ return ret;
}
#endif
@@ -293,8 +289,6 @@ static void tpiu_platform_remove(struct platform_device *pdev)
__tpiu_remove(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- if (!IS_ERR_OR_NULL(drvdata->pclk))
- clk_put(drvdata->pclk);
}
#ifdef CONFIG_ACPI
diff --git a/drivers/hwtracing/coresight/coresight-trbe.c b/drivers/hwtracing/coresight/coresight-trbe.c
index 8267dd1a2130..43643d2c5bdd 100644
--- a/drivers/hwtracing/coresight/coresight-trbe.c
+++ b/drivers/hwtracing/coresight/coresight-trbe.c
@@ -23,7 +23,8 @@
#include "coresight-self-hosted-trace.h"
#include "coresight-trbe.h"
-#define PERF_IDX2OFF(idx, buf) ((idx) % ((buf)->nr_pages << PAGE_SHIFT))
+#define PERF_IDX2OFF(idx, buf) \
+ ((idx) % ((unsigned long)(buf)->nr_pages << PAGE_SHIFT))
/*
* A padding packet that will help the user space tools
@@ -257,6 +258,7 @@ static void trbe_drain_and_disable_local(struct trbe_cpudata *cpudata)
static void trbe_reset_local(struct trbe_cpudata *cpudata)
{
write_sysreg_s(0, SYS_TRBLIMITR_EL1);
+ isb();
trbe_drain_buffer();
write_sysreg_s(0, SYS_TRBPTR_EL1);
write_sysreg_s(0, SYS_TRBBASER_EL1);
@@ -747,12 +749,12 @@ static void *arm_trbe_alloc_buffer(struct coresight_device *csdev,
buf = kzalloc_node(sizeof(*buf), GFP_KERNEL, trbe_alloc_node(event));
if (!buf)
- return ERR_PTR(-ENOMEM);
+ return NULL;
pglist = kcalloc(nr_pages, sizeof(*pglist), GFP_KERNEL);
if (!pglist) {
kfree(buf);
- return ERR_PTR(-ENOMEM);
+ return NULL;
}
for (i = 0; i < nr_pages; i++)
@@ -762,7 +764,7 @@ static void *arm_trbe_alloc_buffer(struct coresight_device *csdev,
if (!buf->trbe_base) {
kfree(pglist);
kfree(buf);
- return ERR_PTR(-ENOMEM);
+ return NULL;
}
buf->trbe_limit = buf->trbe_base + nr_pages * PAGE_SIZE;
buf->trbe_write = buf->trbe_base;
@@ -1279,7 +1281,7 @@ static void arm_trbe_register_coresight_cpu(struct trbe_drvdata *drvdata, int cp
* into the device for that purpose.
*/
desc.pdata = devm_kzalloc(dev, sizeof(*desc.pdata), GFP_KERNEL);
- if (IS_ERR(desc.pdata))
+ if (!desc.pdata)
goto cpu_clear;
desc.type = CORESIGHT_DEV_TYPE_SINK;
diff --git a/drivers/hwtracing/coresight/ultrasoc-smb.h b/drivers/hwtracing/coresight/ultrasoc-smb.h
index c4c111275627..323f0ccb6878 100644
--- a/drivers/hwtracing/coresight/ultrasoc-smb.h
+++ b/drivers/hwtracing/coresight/ultrasoc-smb.h
@@ -7,6 +7,7 @@
#ifndef _ULTRASOC_SMB_H
#define _ULTRASOC_SMB_H
+#include <linux/bitfield.h>
#include <linux/miscdevice.h>
#include <linux/spinlock.h>
diff --git a/drivers/i2c/algos/i2c-algo-pca.c b/drivers/i2c/algos/i2c-algo-pca.c
index 74b66aec33d4..ee86df4cff4b 100644
--- a/drivers/i2c/algos/i2c-algo-pca.c
+++ b/drivers/i2c/algos/i2c-algo-pca.c
@@ -30,7 +30,7 @@ static int i2c_debug;
#define pca_clock(adap) adap->i2c_clock
#define pca_set_con(adap, val) pca_outw(adap, I2C_PCA_CON, val)
#define pca_get_con(adap) pca_inw(adap, I2C_PCA_CON)
-#define pca_wait(adap) adap->wait_for_completion(adap->data)
+#define pca_wait(adap) adap->wait_for_completion_cb(adap->data)
static void pca_reset(struct i2c_algo_pca_data *adap)
{
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 070d014fdc5d..fd81e49638aa 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -165,6 +165,7 @@ config I2C_I801
Birch Stream (SOC)
Arrow Lake (SOC)
Panther Lake (SOC)
+ Wildcat Lake (SOC)
This driver can also be built as a module. If so, the module
will be called i2c-i801.
@@ -414,7 +415,7 @@ config I2C_ASPEED
config I2C_AT91
tristate "Atmel AT91 I2C Two-Wire interface (TWI)"
- depends on ARCH_AT91 || COMPILE_TEST
+ depends on ARCH_MICROCHIP || COMPILE_TEST
help
This supports the use of the I2C interface on Atmel AT91
processors.
@@ -1357,6 +1358,27 @@ config I2C_LJCA
This driver can also be built as a module. If so, the module
will be called i2c-ljca.
+config I2C_NCT6694
+ tristate "Nuvoton NCT6694 I2C adapter support"
+ depends on MFD_NCT6694
+ help
+ If you say yes to this option, support will be included for Nuvoton
+ NCT6694, a USB to I2C interface.
+
+ This driver can also be built as a module. If so, the module will
+ be called i2c-nct6694.
+
+config I2C_USBIO
+ tristate "Intel USBIO I2C Adapter support"
+ depends on USB_USBIO
+ default USB_USBIO
+ help
+ Select this option to enable I2C driver for the INTEL
+ USBIO driver stack.
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c_usbio.
+
config I2C_CP2615
tristate "Silicon Labs CP2615 USB sound card and I2C adapter"
depends on USB
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 04db855fdfd6..fb985769f5ff 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -135,6 +135,8 @@ obj-$(CONFIG_I2C_GXP) += i2c-gxp.o
obj-$(CONFIG_I2C_DIOLAN_U2C) += i2c-diolan-u2c.o
obj-$(CONFIG_I2C_DLN2) += i2c-dln2.o
obj-$(CONFIG_I2C_LJCA) += i2c-ljca.o
+obj-$(CONFIG_I2C_NCT6694) += i2c-nct6694.o
+obj-$(CONFIG_I2C_USBIO) += i2c-usbio.o
obj-$(CONFIG_I2C_CP2615) += i2c-cp2615.o
obj-$(CONFIG_I2C_PARPORT) += i2c-parport.o
obj-$(CONFIG_I2C_PCI1XXXX) += i2c-mchp-pci1xxxx.o
diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
index cbd88ffa5610..c7a72c28786c 100644
--- a/drivers/i2c/busses/i2c-designware-master.c
+++ b/drivers/i2c/busses/i2c-designware-master.c
@@ -1068,11 +1068,10 @@ int i2c_dw_probe_master(struct dw_i2c_dev *dev)
if (!(dev->flags & ACCESS_POLLING)) {
ret = devm_request_irq(dev->dev, dev->irq, i2c_dw_isr,
irq_flags, dev_name(dev->dev), dev);
- if (ret) {
- dev_err(dev->dev, "failure requesting irq %i: %d\n",
- dev->irq, ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev->dev, ret,
+ "failure requesting irq %i: %d\n",
+ dev->irq, ret);
}
ret = i2c_dw_init_recovery_info(dev);
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index a35e4c64a1d4..34d881572351 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -238,7 +238,7 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
dev->rst = devm_reset_control_get_optional_exclusive(device, NULL);
if (IS_ERR(dev->rst))
- return PTR_ERR(dev->rst);
+ return dev_err_probe(device, PTR_ERR(dev->rst), "failed to acquire reset\n");
reset_control_deassert(dev->rst);
@@ -247,21 +247,23 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
goto exit_reset;
ret = i2c_dw_probe_lock_support(dev);
- if (ret)
+ if (ret) {
+ ret = dev_err_probe(device, ret, "failed to probe lock support\n");
goto exit_reset;
+ }
i2c_dw_configure(dev);
/* Optional interface clock */
dev->pclk = devm_clk_get_optional(device, "pclk");
if (IS_ERR(dev->pclk)) {
- ret = PTR_ERR(dev->pclk);
+ ret = dev_err_probe(device, PTR_ERR(dev->pclk), "failed to acquire pclk\n");
goto exit_reset;
}
dev->clk = devm_clk_get_optional(device, NULL);
if (IS_ERR(dev->clk)) {
- ret = PTR_ERR(dev->clk);
+ ret = dev_err_probe(device, PTR_ERR(dev->clk), "failed to acquire clock\n");
goto exit_reset;
}
@@ -314,6 +316,7 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
exit_probe:
dw_i2c_plat_pm_cleanup(dev);
+ i2c_dw_prepare_clk(dev, false);
exit_reset:
reset_control_assert(dev->rst);
return ret;
@@ -331,9 +334,11 @@ static void dw_i2c_plat_remove(struct platform_device *pdev)
i2c_dw_disable(dev);
pm_runtime_dont_use_autosuspend(device);
- pm_runtime_put_sync(device);
+ pm_runtime_put_noidle(device);
dw_i2c_plat_pm_cleanup(dev);
+ i2c_dw_prepare_clk(dev, false);
+
i2c_dw_remove_lock_support(dev);
reset_control_assert(dev->rst);
diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c
index b936a240db0a..6eb16b7d75a6 100644
--- a/drivers/i2c/busses/i2c-designware-slave.c
+++ b/drivers/i2c/busses/i2c-designware-slave.c
@@ -266,11 +266,10 @@ int i2c_dw_probe_slave(struct dw_i2c_dev *dev)
ret = devm_request_irq(dev->dev, dev->irq, i2c_dw_isr_slave,
IRQF_SHARED, dev_name(dev->dev), dev);
- if (ret) {
- dev_err(dev->dev, "failure requesting IRQ %i: %d\n",
- dev->irq, ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev->dev, ret,
+ "failure requesting IRQ %i: %d\n",
+ dev->irq, ret);
ret = i2c_add_numbered_adapter(adap);
if (ret)
diff --git a/drivers/i2c/busses/i2c-hix5hd2.c b/drivers/i2c/busses/i2c-hix5hd2.c
index 370f32974763..5358f5ddf924 100644
--- a/drivers/i2c/busses/i2c-hix5hd2.c
+++ b/drivers/i2c/busses/i2c-hix5hd2.c
@@ -339,7 +339,7 @@ static int hix5hd2_i2c_xfer_msg(struct hix5hd2_i2c_priv *priv,
ret = priv->state;
/*
- * If this is the last message to be transfered (stop == 1)
+ * If this is the last message to be transferred (stop == 1)
* Then check if the bus can be brought back to idle.
*/
if (priv->state == HIX5I2C_STAT_RW_SUCCESS && stop)
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index a7f89946dad4..cba992fa6557 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -83,6 +83,7 @@
* Arrow Lake-H (SOC) 0x7722 32 hard yes yes yes
* Panther Lake-H (SOC) 0xe322 32 hard yes yes yes
* Panther Lake-P (SOC) 0xe422 32 hard yes yes yes
+ * Wildcat Lake-U (SOC) 0x4d22 32 hard yes yes yes
*
* Features supported by this driver:
* Software PEC no
@@ -236,6 +237,7 @@
#define PCI_DEVICE_ID_INTEL_5_3400_SERIES_SMBUS 0x3b30
#define PCI_DEVICE_ID_INTEL_TIGERLAKE_H_SMBUS 0x43a3
#define PCI_DEVICE_ID_INTEL_ELKHART_LAKE_SMBUS 0x4b23
+#define PCI_DEVICE_ID_INTEL_WILDCAT_LAKE_U_SMBUS 0x4d22
#define PCI_DEVICE_ID_INTEL_JASPER_LAKE_SMBUS 0x4da3
#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_P_SMBUS 0x51a3
#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_M_SMBUS 0x54a3
@@ -1052,10 +1054,11 @@ static const struct pci_device_id i801_ids[] = {
{ PCI_DEVICE_DATA(INTEL, METEOR_LAKE_P_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
{ PCI_DEVICE_DATA(INTEL, METEOR_LAKE_SOC_S_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
{ PCI_DEVICE_DATA(INTEL, METEOR_LAKE_PCH_S_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
- { PCI_DEVICE_DATA(INTEL, BIRCH_STREAM_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, BIRCH_STREAM_SMBUS, FEATURES_ICH5) },
{ PCI_DEVICE_DATA(INTEL, ARROW_LAKE_H_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
{ PCI_DEVICE_DATA(INTEL, PANTHER_LAKE_H_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
{ PCI_DEVICE_DATA(INTEL, PANTHER_LAKE_P_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, WILDCAT_LAKE_U_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-k1.c b/drivers/i2c/busses/i2c-k1.c
index b68a21fff0b5..6b918770e612 100644
--- a/drivers/i2c/busses/i2c-k1.c
+++ b/drivers/i2c/busses/i2c-k1.c
@@ -3,6 +3,7 @@
* Copyright (C) 2024-2025 Troy Mitchell <troymitchell988@gmail.com>
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/i2c.h>
#include <linux/iopoll.h>
@@ -14,6 +15,7 @@
#define SPACEMIT_ICR 0x0 /* Control register */
#define SPACEMIT_ISR 0x4 /* Status register */
#define SPACEMIT_IDBR 0xc /* Data buffer register */
+#define SPACEMIT_IRCR 0x18 /* Reset cycle counter */
#define SPACEMIT_IBMR 0x1c /* Bus monitor register */
/* SPACEMIT_ICR register fields */
@@ -25,7 +27,8 @@
#define SPACEMIT_CR_MODE_FAST BIT(8) /* bus mode (master operation) */
/* Bit 9 is reserved */
#define SPACEMIT_CR_UR BIT(10) /* unit reset */
-/* Bits 11-12 are reserved */
+#define SPACEMIT_CR_RSTREQ BIT(11) /* i2c bus reset request */
+/* Bit 12 is reserved */
#define SPACEMIT_CR_SCLE BIT(13) /* master clock enable */
#define SPACEMIT_CR_IUE BIT(14) /* unit enable */
/* Bits 15-17 are reserved */
@@ -76,6 +79,10 @@
SPACEMIT_SR_GCAD | SPACEMIT_SR_IRF | SPACEMIT_SR_ITE | \
SPACEMIT_SR_ALD)
+#define SPACEMIT_RCR_SDA_GLITCH_NOFIX BIT(7) /* bypass the SDA glitch fix */
+/* the cycles of SCL during bus reset */
+#define SPACEMIT_RCR_FIELD_RST_CYC GENMASK(3, 0)
+
/* SPACEMIT_IBMR register fields */
#define SPACEMIT_BMR_SDA BIT(0) /* SDA line level */
#define SPACEMIT_BMR_SCL BIT(1) /* SCL line level */
@@ -88,6 +95,8 @@
#define SPACEMIT_SR_ERR (SPACEMIT_SR_BED | SPACEMIT_SR_RXOV | SPACEMIT_SR_ALD)
+#define SPACEMIT_BUS_RESET_CLK_CNT_MAX 9
+
enum spacemit_i2c_state {
SPACEMIT_STATE_IDLE,
SPACEMIT_STATE_START,
@@ -160,6 +169,7 @@ static int spacemit_i2c_handle_err(struct spacemit_i2c_dev *i2c)
static void spacemit_i2c_conditionally_reset_bus(struct spacemit_i2c_dev *i2c)
{
u32 status;
+ u8 clk_cnt;
/* if bus is locked, reset unit. 0: locked */
status = readl(i2c->base + SPACEMIT_IBMR);
@@ -169,9 +179,21 @@ static void spacemit_i2c_conditionally_reset_bus(struct spacemit_i2c_dev *i2c)
spacemit_i2c_reset(i2c);
usleep_range(10, 20);
- /* check scl status again */
+ for (clk_cnt = 0; clk_cnt < SPACEMIT_BUS_RESET_CLK_CNT_MAX; clk_cnt++) {
+ status = readl(i2c->base + SPACEMIT_IBMR);
+ if (status & SPACEMIT_BMR_SDA)
+ return;
+
+ /* There's nothing left to save here, we are about to exit */
+ writel(FIELD_PREP(SPACEMIT_RCR_FIELD_RST_CYC, 1),
+ i2c->base + SPACEMIT_IRCR);
+ writel(SPACEMIT_CR_RSTREQ, i2c->base + SPACEMIT_ICR);
+ usleep_range(20, 30);
+ }
+
+ /* check sda again here */
status = readl(i2c->base + SPACEMIT_IBMR);
- if (!(status & SPACEMIT_BMR_SCL))
+ if (!(status & SPACEMIT_BMR_SDA))
dev_warn_ratelimited(i2c->dev, "unit reset failed\n");
}
@@ -237,6 +259,14 @@ static void spacemit_i2c_init(struct spacemit_i2c_dev *i2c)
val |= SPACEMIT_CR_MSDE | SPACEMIT_CR_MSDIE;
writel(val, i2c->base + SPACEMIT_ICR);
+
+ /*
+ * The glitch fix in the K1 I2C controller introduces a delay
+ * on restart signals, so we disable the fix here.
+ */
+ val = readl(i2c->base + SPACEMIT_IRCR);
+ val |= SPACEMIT_RCR_SDA_GLITCH_NOFIX;
+ writel(val, i2c->base + SPACEMIT_IRCR);
}
static inline void
@@ -267,19 +297,6 @@ static void spacemit_i2c_start(struct spacemit_i2c_dev *i2c)
writel(val, i2c->base + SPACEMIT_ICR);
}
-static void spacemit_i2c_stop(struct spacemit_i2c_dev *i2c)
-{
- u32 val;
-
- val = readl(i2c->base + SPACEMIT_ICR);
- val |= SPACEMIT_CR_STOP | SPACEMIT_CR_ALDIE | SPACEMIT_CR_TB;
-
- if (i2c->read)
- val |= SPACEMIT_CR_ACKNAK;
-
- writel(val, i2c->base + SPACEMIT_ICR);
-}
-
static int spacemit_i2c_xfer_msg(struct spacemit_i2c_dev *i2c)
{
unsigned long time_left;
@@ -412,7 +429,6 @@ static irqreturn_t spacemit_i2c_irq_handler(int irq, void *devid)
val = readl(i2c->base + SPACEMIT_ICR);
val &= ~(SPACEMIT_CR_TB | SPACEMIT_CR_ACKNAK | SPACEMIT_CR_STOP | SPACEMIT_CR_START);
- writel(val, i2c->base + SPACEMIT_ICR);
switch (i2c->state) {
case SPACEMIT_STATE_START:
@@ -429,14 +445,16 @@ static irqreturn_t spacemit_i2c_irq_handler(int irq, void *devid)
}
if (i2c->state != SPACEMIT_STATE_IDLE) {
+ val |= SPACEMIT_CR_TB | SPACEMIT_CR_ALDIE;
+
if (spacemit_i2c_is_last_msg(i2c)) {
/* trigger next byte with stop */
- spacemit_i2c_stop(i2c);
- } else {
- /* trigger next byte */
- val |= SPACEMIT_CR_ALDIE | SPACEMIT_CR_TB;
- writel(val, i2c->base + SPACEMIT_ICR);
+ val |= SPACEMIT_CR_STOP;
+
+ if (i2c->read)
+ val |= SPACEMIT_CR_ACKNAK;
}
+ writel(val, i2c->base + SPACEMIT_ICR);
}
err_out:
@@ -476,12 +494,13 @@ static int spacemit_i2c_xfer(struct i2c_adapter *adapt, struct i2c_msg *msgs, in
spacemit_i2c_enable(i2c);
ret = spacemit_i2c_wait_bus_idle(i2c);
- if (!ret)
+ if (!ret) {
ret = spacemit_i2c_xfer_msg(i2c);
- else if (ret < 0)
- dev_dbg(i2c->dev, "i2c transfer error: %d\n", ret);
- else
+ if (ret < 0)
+ dev_dbg(i2c->dev, "i2c transfer error: %d\n", ret);
+ } else {
spacemit_i2c_check_bus_release(i2c);
+ }
spacemit_i2c_disable(i2c);
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
index ab456c3717db..aefdbee1f03c 100644
--- a/drivers/i2c/busses/i2c-mt65xx.c
+++ b/drivers/i2c/busses/i2c-mt65xx.c
@@ -868,7 +868,7 @@ static int mtk_i2c_calculate_speed(struct mtk_i2c *i2c, unsigned int clk_src,
return 0;
}
-static int mtk_i2c_set_speed(struct mtk_i2c *i2c, unsigned int parent_clk)
+static void mtk_i2c_set_speed(struct mtk_i2c *i2c, unsigned int parent_clk)
{
unsigned int clk_src;
unsigned int step_cnt;
@@ -938,9 +938,6 @@ static int mtk_i2c_set_speed(struct mtk_i2c *i2c, unsigned int parent_clk)
break;
}
-
-
- return 0;
}
static void i2c_dump_register(struct mtk_i2c *i2c)
@@ -1243,6 +1240,7 @@ static int mtk_i2c_transfer(struct i2c_adapter *adap,
{
int ret;
int left_num = num;
+ bool write_then_read_en = false;
struct mtk_i2c *i2c = i2c_get_adapdata(adap);
ret = clk_bulk_enable(I2C_MT65XX_CLK_MAX, i2c->clocks);
@@ -1256,6 +1254,7 @@ static int mtk_i2c_transfer(struct i2c_adapter *adap,
if (!(msgs[0].flags & I2C_M_RD) && (msgs[1].flags & I2C_M_RD) &&
msgs[0].addr == msgs[1].addr) {
i2c->auto_restart = 0;
+ write_then_read_en = true;
}
}
@@ -1280,12 +1279,10 @@ static int mtk_i2c_transfer(struct i2c_adapter *adap,
else
i2c->op = I2C_MASTER_WR;
- if (!i2c->auto_restart) {
- if (num > 1) {
- /* combined two messages into one transaction */
- i2c->op = I2C_MASTER_WRRD;
- left_num--;
- }
+ if (write_then_read_en) {
+ /* combined two messages into one transaction */
+ i2c->op = I2C_MASTER_WRRD;
+ left_num--;
}
/* always use DMA mode. */
@@ -1293,7 +1290,10 @@ static int mtk_i2c_transfer(struct i2c_adapter *adap,
if (ret < 0)
goto err_exit;
- msgs++;
+ if (i2c->op == I2C_MASTER_WRRD)
+ msgs += 2;
+ else
+ msgs++;
}
/* the return value is number of executed messages */
ret = num;
@@ -1457,11 +1457,7 @@ static int mtk_i2c_probe(struct platform_device *pdev)
strscpy(i2c->adap.name, I2C_DRV_NAME, sizeof(i2c->adap.name));
- ret = mtk_i2c_set_speed(i2c, clk_get_rate(i2c->clocks[speed_clk].clk));
- if (ret) {
- dev_err(&pdev->dev, "Failed to set the speed.\n");
- return -EINVAL;
- }
+ mtk_i2c_set_speed(i2c, clk_get_rate(i2c->clocks[speed_clk].clk));
if (i2c->dev_comp->max_dma_support > 32) {
ret = dma_set_mask(&pdev->dev,
diff --git a/drivers/i2c/busses/i2c-nct6694.c b/drivers/i2c/busses/i2c-nct6694.c
new file mode 100644
index 000000000000..1413ab6f9462
--- /dev/null
+++ b/drivers/i2c/busses/i2c-nct6694.c
@@ -0,0 +1,196 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Nuvoton NCT6694 I2C adapter driver based on USB interface.
+ *
+ * Copyright (C) 2025 Nuvoton Technology Corp.
+ */
+
+#include <linux/i2c.h>
+#include <linux/idr.h>
+#include <linux/kernel.h>
+#include <linux/mfd/nct6694.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+/*
+ * USB command module type for NCT6694 I2C controller.
+ * This defines the module type used for communication with the NCT6694
+ * I2C controller over the USB interface.
+ */
+#define NCT6694_I2C_MOD 0x03
+
+/* Command 00h - I2C Deliver */
+#define NCT6694_I2C_DELIVER 0x00
+#define NCT6694_I2C_DELIVER_SEL 0x00
+
+#define NCT6694_I2C_MAX_XFER_SIZE 64
+#define NCT6694_I2C_MAX_DEVS 6
+
+static unsigned char br_reg[NCT6694_I2C_MAX_DEVS] = {[0 ... (NCT6694_I2C_MAX_DEVS - 1)] = 0xFF};
+
+module_param_array(br_reg, byte, NULL, 0644);
+MODULE_PARM_DESC(br_reg,
+ "I2C Baudrate register per adapter: (0=25K, 1=50K, 2=100K, 3=200K, 4=400K, 5=800K, 6=1M), default=2");
+
+enum nct6694_i2c_baudrate {
+ NCT6694_I2C_BR_25K = 0,
+ NCT6694_I2C_BR_50K,
+ NCT6694_I2C_BR_100K,
+ NCT6694_I2C_BR_200K,
+ NCT6694_I2C_BR_400K,
+ NCT6694_I2C_BR_800K,
+ NCT6694_I2C_BR_1M
+};
+
+struct __packed nct6694_i2c_deliver {
+ u8 port;
+ u8 br;
+ u8 addr;
+ u8 w_cnt;
+ u8 r_cnt;
+ u8 rsv[11];
+ u8 write_data[NCT6694_I2C_MAX_XFER_SIZE];
+ u8 read_data[NCT6694_I2C_MAX_XFER_SIZE];
+};
+
+struct nct6694_i2c_data {
+ struct device *dev;
+ struct nct6694 *nct6694;
+ struct i2c_adapter adapter;
+ struct nct6694_i2c_deliver deliver;
+ unsigned char port;
+ unsigned char br;
+};
+
+static int nct6694_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+{
+ struct nct6694_i2c_data *data = adap->algo_data;
+ struct nct6694_i2c_deliver *deliver = &data->deliver;
+ static const struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_I2C_MOD,
+ .cmd = NCT6694_I2C_DELIVER,
+ .sel = NCT6694_I2C_DELIVER_SEL,
+ .len = cpu_to_le16(sizeof(*deliver))
+ };
+ int ret, i;
+
+ for (i = 0; i < num; i++) {
+ struct i2c_msg *msg_temp = &msgs[i];
+
+ memset(deliver, 0, sizeof(*deliver));
+
+ deliver->port = data->port;
+ deliver->br = data->br;
+ deliver->addr = i2c_8bit_addr_from_msg(msg_temp);
+ if (msg_temp->flags & I2C_M_RD) {
+ deliver->r_cnt = msg_temp->len;
+ ret = nct6694_write_msg(data->nct6694, &cmd_hd, deliver);
+ if (ret < 0)
+ return ret;
+
+ memcpy(msg_temp->buf, deliver->read_data, msg_temp->len);
+ } else {
+ deliver->w_cnt = msg_temp->len;
+ memcpy(deliver->write_data, msg_temp->buf, msg_temp->len);
+ ret = nct6694_write_msg(data->nct6694, &cmd_hd, deliver);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ return num;
+}
+
+static u32 nct6694_i2c_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_adapter_quirks nct6694_i2c_quirks = {
+ .max_read_len = NCT6694_I2C_MAX_XFER_SIZE,
+ .max_write_len = NCT6694_I2C_MAX_XFER_SIZE,
+};
+
+static const struct i2c_algorithm nct6694_i2c_algo = {
+ .xfer = nct6694_i2c_xfer,
+ .functionality = nct6694_i2c_func,
+};
+
+static int nct6694_i2c_set_baudrate(struct nct6694_i2c_data *data)
+{
+ if (data->port >= NCT6694_I2C_MAX_DEVS) {
+ dev_err(data->dev, "Invalid I2C port index %d\n", data->port);
+ return -EINVAL;
+ }
+
+ if (br_reg[data->port] > NCT6694_I2C_BR_1M) {
+ dev_warn(data->dev, "Invalid baudrate %d for I2C%d, using 100K\n",
+ br_reg[data->port], data->port);
+ br_reg[data->port] = NCT6694_I2C_BR_100K;
+ }
+
+ data->br = br_reg[data->port];
+
+ return 0;
+}
+
+static void nct6694_i2c_ida_free(void *d)
+{
+ struct nct6694_i2c_data *data = d;
+ struct nct6694 *nct6694 = data->nct6694;
+
+ ida_free(&nct6694->i2c_ida, data->port);
+}
+
+static int nct6694_i2c_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct nct6694 *nct6694 = dev_get_drvdata(dev->parent);
+ struct nct6694_i2c_data *data;
+ int ret;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->dev = dev;
+ data->nct6694 = nct6694;
+
+ ret = ida_alloc(&nct6694->i2c_ida, GFP_KERNEL);
+ if (ret < 0)
+ return ret;
+ data->port = ret;
+
+ ret = devm_add_action_or_reset(dev, nct6694_i2c_ida_free, data);
+ if (ret)
+ return ret;
+
+ ret = nct6694_i2c_set_baudrate(data);
+ if (ret)
+ return ret;
+
+ sprintf(data->adapter.name, "NCT6694 I2C Adapter %d", data->port);
+ data->adapter.owner = THIS_MODULE;
+ data->adapter.algo = &nct6694_i2c_algo;
+ data->adapter.quirks = &nct6694_i2c_quirks;
+ data->adapter.dev.parent = dev;
+ data->adapter.algo_data = data;
+
+ platform_set_drvdata(pdev, data);
+
+ return devm_i2c_add_adapter(dev, &data->adapter);
+}
+
+static struct platform_driver nct6694_i2c_driver = {
+ .driver = {
+ .name = "nct6694-i2c",
+ },
+ .probe = nct6694_i2c_probe,
+};
+
+module_platform_driver(nct6694_i2c_driver);
+
+MODULE_DESCRIPTION("USB-I2C adapter driver for NCT6694");
+MODULE_AUTHOR("Ming Yu <tmyu0@nuvoton.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:nct6694-i2c");
diff --git a/drivers/i2c/busses/i2c-pca-isa.c b/drivers/i2c/busses/i2c-pca-isa.c
index 85e8cf58e8bf..0cbf2f509527 100644
--- a/drivers/i2c/busses/i2c-pca-isa.c
+++ b/drivers/i2c/busses/i2c-pca-isa.c
@@ -95,7 +95,7 @@ static struct i2c_algo_pca_data pca_isa_data = {
/* .data intentionally left NULL, not needed with ISA */
.write_byte = pca_isa_writebyte,
.read_byte = pca_isa_readbyte,
- .wait_for_completion = pca_isa_waitforcompletion,
+ .wait_for_completion_cb = pca_isa_waitforcompletion,
.reset_chip = pca_isa_resetchip,
};
diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c
index 87da8241b927..c0f35ebbe37d 100644
--- a/drivers/i2c/busses/i2c-pca-platform.c
+++ b/drivers/i2c/busses/i2c-pca-platform.c
@@ -180,7 +180,7 @@ static int i2c_pca_pf_probe(struct platform_device *pdev)
}
i2c->algo_data.data = i2c;
- i2c->algo_data.wait_for_completion = i2c_pca_pf_waitforcompletion;
+ i2c->algo_data.wait_for_completion_cb = i2c_pca_pf_waitforcompletion;
if (i2c->gpio)
i2c->algo_data.reset_chip = i2c_pca_pf_resetchip;
else
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index ff2289b52c84..95a577764d5c 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -870,7 +870,13 @@ static int geni_i2c_probe(struct platform_device *pdev)
goto err_clk;
}
proto = geni_se_read_proto(&gi2c->se);
- if (proto != GENI_SE_I2C) {
+ if (proto == GENI_SE_INVALID_PROTO) {
+ ret = geni_load_se_firmware(&gi2c->se, GENI_SE_I2C);
+ if (ret) {
+ dev_err_probe(dev, ret, "i2c firmware load failed ret: %d\n", ret);
+ goto err_resources;
+ }
+ } else if (proto != GENI_SE_I2C) {
ret = dev_err_probe(dev, -ENXIO, "Invalid proto %d\n", proto);
goto err_resources;
}
diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c
index 9c164a4b9bb9..b0ee9ac45a97 100644
--- a/drivers/i2c/busses/i2c-riic.c
+++ b/drivers/i2c/busses/i2c-riic.c
@@ -386,7 +386,7 @@ static int riic_init_hw(struct riic_dev *riic)
*/
total_ticks = DIV_ROUND_UP(rate, t->bus_freq_hz ?: 1);
- for (cks = 0; cks < 7; cks++) {
+ for (cks = 0; cks <= 7; cks++) {
/*
* 60% low time must be less than BRL + 2 + 1
* BRL max register value is 0x1F.
diff --git a/drivers/i2c/busses/i2c-rtl9300.c b/drivers/i2c/busses/i2c-rtl9300.c
index e064e8a4a1f0..4723e48cfe18 100644
--- a/drivers/i2c/busses/i2c-rtl9300.c
+++ b/drivers/i2c/busses/i2c-rtl9300.c
@@ -8,6 +8,7 @@
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
+#include <linux/unaligned.h>
enum rtl9300_bus_freq {
RTL9300_I2C_STD_FREQ,
@@ -20,100 +21,143 @@ struct rtl9300_i2c_chan {
struct i2c_adapter adap;
struct rtl9300_i2c *i2c;
enum rtl9300_bus_freq bus_freq;
- u8 sda_pin;
+ u8 sda_num;
+};
+
+enum rtl9300_i2c_reg_scope {
+ REG_SCOPE_GLOBAL,
+ REG_SCOPE_MASTER,
+};
+
+struct rtl9300_i2c_reg_field {
+ struct reg_field field;
+ enum rtl9300_i2c_reg_scope scope;
+};
+
+enum rtl9300_i2c_reg_fields {
+ F_DATA_WIDTH = 0,
+ F_DEV_ADDR,
+ F_I2C_FAIL,
+ F_I2C_TRIG,
+ F_MEM_ADDR,
+ F_MEM_ADDR_WIDTH,
+ F_RD_MODE,
+ F_RWOP,
+ F_SCL_FREQ,
+ F_SCL_SEL,
+ F_SDA_OUT_SEL,
+ F_SDA_SEL,
+
+ /* keep last */
+ F_NUM_FIELDS
+};
+
+struct rtl9300_i2c_drv_data {
+ struct rtl9300_i2c_reg_field field_desc[F_NUM_FIELDS];
+ int (*select_scl)(struct rtl9300_i2c *i2c, u8 scl);
+ u32 data_reg;
+ u8 max_nchan;
};
#define RTL9300_I2C_MUX_NCHAN 8
+#define RTL9310_I2C_MUX_NCHAN 12
struct rtl9300_i2c {
struct regmap *regmap;
struct device *dev;
- struct rtl9300_i2c_chan chans[RTL9300_I2C_MUX_NCHAN];
+ struct rtl9300_i2c_chan chans[RTL9310_I2C_MUX_NCHAN];
+ struct regmap_field *fields[F_NUM_FIELDS];
u32 reg_base;
- u8 sda_pin;
+ u32 data_reg;
+ u8 scl_num;
+ u8 sda_num;
struct mutex lock;
};
+DEFINE_GUARD(rtl9300_i2c, struct rtl9300_i2c *, mutex_lock(&_T->lock), mutex_unlock(&_T->lock))
+
+enum rtl9300_i2c_xfer_type {
+ RTL9300_I2C_XFER_BYTE,
+ RTL9300_I2C_XFER_WORD,
+ RTL9300_I2C_XFER_BLOCK,
+};
+
+struct rtl9300_i2c_xfer {
+ enum rtl9300_i2c_xfer_type type;
+ u16 dev_addr;
+ u8 reg_addr;
+ u8 reg_addr_len;
+ u8 *data;
+ u8 data_len;
+ bool write;
+};
+
#define RTL9300_I2C_MST_CTRL1 0x0
-#define RTL9300_I2C_MST_CTRL1_MEM_ADDR_OFS 8
-#define RTL9300_I2C_MST_CTRL1_MEM_ADDR_MASK GENMASK(31, 8)
-#define RTL9300_I2C_MST_CTRL1_SDA_OUT_SEL_OFS 4
-#define RTL9300_I2C_MST_CTRL1_SDA_OUT_SEL_MASK GENMASK(6, 4)
-#define RTL9300_I2C_MST_CTRL1_GPIO_SCL_SEL BIT(3)
-#define RTL9300_I2C_MST_CTRL1_RWOP BIT(2)
-#define RTL9300_I2C_MST_CTRL1_I2C_FAIL BIT(1)
-#define RTL9300_I2C_MST_CTRL1_I2C_TRIG BIT(0)
#define RTL9300_I2C_MST_CTRL2 0x4
-#define RTL9300_I2C_MST_CTRL2_RD_MODE BIT(15)
-#define RTL9300_I2C_MST_CTRL2_DEV_ADDR_OFS 8
-#define RTL9300_I2C_MST_CTRL2_DEV_ADDR_MASK GENMASK(14, 8)
-#define RTL9300_I2C_MST_CTRL2_DATA_WIDTH_OFS 4
-#define RTL9300_I2C_MST_CTRL2_DATA_WIDTH_MASK GENMASK(7, 4)
-#define RTL9300_I2C_MST_CTRL2_MEM_ADDR_WIDTH_OFS 2
-#define RTL9300_I2C_MST_CTRL2_MEM_ADDR_WIDTH_MASK GENMASK(3, 2)
-#define RTL9300_I2C_MST_CTRL2_SCL_FREQ_OFS 0
-#define RTL9300_I2C_MST_CTRL2_SCL_FREQ_MASK GENMASK(1, 0)
#define RTL9300_I2C_MST_DATA_WORD0 0x8
#define RTL9300_I2C_MST_DATA_WORD1 0xc
#define RTL9300_I2C_MST_DATA_WORD2 0x10
#define RTL9300_I2C_MST_DATA_WORD3 0x14
-
#define RTL9300_I2C_MST_GLB_CTRL 0x384
+#define RTL9310_I2C_MST_IF_CTRL 0x1004
+#define RTL9310_I2C_MST_IF_SEL 0x1008
+#define RTL9310_I2C_MST_CTRL 0x0
+#define RTL9310_I2C_MST_MEMADDR_CTRL 0x4
+#define RTL9310_I2C_MST_DATA_CTRL 0x8
+
static int rtl9300_i2c_reg_addr_set(struct rtl9300_i2c *i2c, u32 reg, u16 len)
{
- u32 val, mask;
int ret;
- val = len << RTL9300_I2C_MST_CTRL2_MEM_ADDR_WIDTH_OFS;
- mask = RTL9300_I2C_MST_CTRL2_MEM_ADDR_WIDTH_MASK;
-
- ret = regmap_update_bits(i2c->regmap, i2c->reg_base + RTL9300_I2C_MST_CTRL2, mask, val);
+ ret = regmap_field_write(i2c->fields[F_MEM_ADDR_WIDTH], len);
if (ret)
return ret;
- val = reg << RTL9300_I2C_MST_CTRL1_MEM_ADDR_OFS;
- mask = RTL9300_I2C_MST_CTRL1_MEM_ADDR_MASK;
-
- return regmap_update_bits(i2c->regmap, i2c->reg_base + RTL9300_I2C_MST_CTRL1, mask, val);
+ return regmap_field_write(i2c->fields[F_MEM_ADDR], reg);
}
-static int rtl9300_i2c_config_io(struct rtl9300_i2c *i2c, u8 sda_pin)
+static int rtl9300_i2c_select_scl(struct rtl9300_i2c *i2c, u8 scl)
{
- int ret;
- u32 val, mask;
-
- ret = regmap_update_bits(i2c->regmap, RTL9300_I2C_MST_GLB_CTRL, BIT(sda_pin), BIT(sda_pin));
- if (ret)
- return ret;
-
- val = (sda_pin << RTL9300_I2C_MST_CTRL1_SDA_OUT_SEL_OFS) |
- RTL9300_I2C_MST_CTRL1_GPIO_SCL_SEL;
- mask = RTL9300_I2C_MST_CTRL1_SDA_OUT_SEL_MASK | RTL9300_I2C_MST_CTRL1_GPIO_SCL_SEL;
+ return regmap_field_write(i2c->fields[F_SCL_SEL], 1);
+}
- return regmap_update_bits(i2c->regmap, i2c->reg_base + RTL9300_I2C_MST_CTRL1, mask, val);
+static int rtl9310_i2c_select_scl(struct rtl9300_i2c *i2c, u8 scl)
+{
+ return regmap_field_update_bits(i2c->fields[F_SCL_SEL], BIT(scl), BIT(scl));
}
-static int rtl9300_i2c_config_xfer(struct rtl9300_i2c *i2c, struct rtl9300_i2c_chan *chan,
- u16 addr, u16 len)
+static int rtl9300_i2c_config_chan(struct rtl9300_i2c *i2c, struct rtl9300_i2c_chan *chan)
{
- u32 val, mask;
+ struct rtl9300_i2c_drv_data *drv_data;
+ int ret;
- val = chan->bus_freq << RTL9300_I2C_MST_CTRL2_SCL_FREQ_OFS;
- mask = RTL9300_I2C_MST_CTRL2_SCL_FREQ_MASK;
+ if (i2c->sda_num == chan->sda_num)
+ return 0;
- val |= addr << RTL9300_I2C_MST_CTRL2_DEV_ADDR_OFS;
- mask |= RTL9300_I2C_MST_CTRL2_DEV_ADDR_MASK;
+ ret = regmap_field_write(i2c->fields[F_SCL_FREQ], chan->bus_freq);
+ if (ret)
+ return ret;
- val |= ((len - 1) & 0xf) << RTL9300_I2C_MST_CTRL2_DATA_WIDTH_OFS;
- mask |= RTL9300_I2C_MST_CTRL2_DATA_WIDTH_MASK;
+ drv_data = (struct rtl9300_i2c_drv_data *)device_get_match_data(i2c->dev);
+ ret = drv_data->select_scl(i2c, i2c->scl_num);
+ if (ret)
+ return ret;
+
+ ret = regmap_field_update_bits(i2c->fields[F_SDA_SEL], BIT(chan->sda_num),
+ BIT(chan->sda_num));
+ if (ret)
+ return ret;
- mask |= RTL9300_I2C_MST_CTRL2_RD_MODE;
+ ret = regmap_field_write(i2c->fields[F_SDA_OUT_SEL], chan->sda_num);
+ if (ret)
+ return ret;
- return regmap_update_bits(i2c->regmap, i2c->reg_base + RTL9300_I2C_MST_CTRL2, mask, val);
+ i2c->sda_num = chan->sda_num;
+ return 0;
}
-static int rtl9300_i2c_read(struct rtl9300_i2c *i2c, u8 *buf, int len)
+static int rtl9300_i2c_read(struct rtl9300_i2c *i2c, u8 *buf, u8 len)
{
u32 vals[4] = {};
int i, ret;
@@ -121,8 +165,7 @@ static int rtl9300_i2c_read(struct rtl9300_i2c *i2c, u8 *buf, int len)
if (len > 16)
return -EIO;
- ret = regmap_bulk_read(i2c->regmap, i2c->reg_base + RTL9300_I2C_MST_DATA_WORD0,
- vals, ARRAY_SIZE(vals));
+ ret = regmap_bulk_read(i2c->regmap, i2c->data_reg, vals, ARRAY_SIZE(vals));
if (ret)
return ret;
@@ -134,7 +177,7 @@ static int rtl9300_i2c_read(struct rtl9300_i2c *i2c, u8 *buf, int len)
return 0;
}
-static int rtl9300_i2c_write(struct rtl9300_i2c *i2c, u8 *buf, int len)
+static int rtl9300_i2c_write(struct rtl9300_i2c *i2c, u8 *buf, u8 len)
{
u32 vals[4] = {};
int i;
@@ -143,62 +186,100 @@ static int rtl9300_i2c_write(struct rtl9300_i2c *i2c, u8 *buf, int len)
return -EIO;
for (i = 0; i < len; i++) {
- if (i % 4 == 0)
- vals[i/4] = 0;
- vals[i/4] <<= 8;
- vals[i/4] |= buf[i];
+ unsigned int shift = (i % 4) * 8;
+ unsigned int reg = i / 4;
+
+ vals[reg] |= buf[i] << shift;
}
- return regmap_bulk_write(i2c->regmap, i2c->reg_base + RTL9300_I2C_MST_DATA_WORD0,
- vals, ARRAY_SIZE(vals));
+ return regmap_bulk_write(i2c->regmap, i2c->data_reg, vals, ARRAY_SIZE(vals));
}
static int rtl9300_i2c_writel(struct rtl9300_i2c *i2c, u32 data)
{
- return regmap_write(i2c->regmap, i2c->reg_base + RTL9300_I2C_MST_DATA_WORD0, data);
+ return regmap_write(i2c->regmap, i2c->data_reg, data);
}
-static int rtl9300_i2c_execute_xfer(struct rtl9300_i2c *i2c, char read_write,
- int size, union i2c_smbus_data *data, int len)
+static int rtl9300_i2c_prepare_xfer(struct rtl9300_i2c *i2c, struct rtl9300_i2c_xfer *xfer)
{
- u32 val, mask;
int ret;
- val = read_write == I2C_SMBUS_WRITE ? RTL9300_I2C_MST_CTRL1_RWOP : 0;
- mask = RTL9300_I2C_MST_CTRL1_RWOP;
+ if (xfer->data_len < 1 || xfer->data_len > 16)
+ return -EINVAL;
+
+ ret = regmap_field_write(i2c->fields[F_DEV_ADDR], xfer->dev_addr);
+ if (ret)
+ return ret;
- val |= RTL9300_I2C_MST_CTRL1_I2C_TRIG;
- mask |= RTL9300_I2C_MST_CTRL1_I2C_TRIG;
+ ret = rtl9300_i2c_reg_addr_set(i2c, xfer->reg_addr, xfer->reg_addr_len);
+ if (ret)
+ return ret;
- ret = regmap_update_bits(i2c->regmap, i2c->reg_base + RTL9300_I2C_MST_CTRL1, mask, val);
+ ret = regmap_field_write(i2c->fields[F_RWOP], xfer->write);
if (ret)
return ret;
- ret = regmap_read_poll_timeout(i2c->regmap, i2c->reg_base + RTL9300_I2C_MST_CTRL1,
- val, !(val & RTL9300_I2C_MST_CTRL1_I2C_TRIG), 100, 2000);
+ ret = regmap_field_write(i2c->fields[F_DATA_WIDTH], (xfer->data_len - 1) & 0xf);
if (ret)
return ret;
- if (val & RTL9300_I2C_MST_CTRL1_I2C_FAIL)
+ if (xfer->write) {
+ switch (xfer->type) {
+ case RTL9300_I2C_XFER_BYTE:
+ ret = rtl9300_i2c_writel(i2c, *xfer->data);
+ break;
+ case RTL9300_I2C_XFER_WORD:
+ ret = rtl9300_i2c_writel(i2c, get_unaligned((const u16 *)xfer->data));
+ break;
+ default:
+ ret = rtl9300_i2c_write(i2c, xfer->data, xfer->data_len);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int rtl9300_i2c_do_xfer(struct rtl9300_i2c *i2c, struct rtl9300_i2c_xfer *xfer)
+{
+ u32 val;
+ int ret;
+
+ ret = regmap_field_write(i2c->fields[F_I2C_TRIG], 1);
+ if (ret)
+ return ret;
+
+ ret = regmap_field_read_poll_timeout(i2c->fields[F_I2C_TRIG], val, !val, 100, 100000);
+ if (ret)
+ return ret;
+
+ ret = regmap_field_read(i2c->fields[F_I2C_FAIL], &val);
+ if (ret)
+ return ret;
+ if (val)
return -EIO;
- if (read_write == I2C_SMBUS_READ) {
- if (size == I2C_SMBUS_BYTE || size == I2C_SMBUS_BYTE_DATA) {
- ret = regmap_read(i2c->regmap,
- i2c->reg_base + RTL9300_I2C_MST_DATA_WORD0, &val);
+ if (!xfer->write) {
+ switch (xfer->type) {
+ case RTL9300_I2C_XFER_BYTE:
+ ret = regmap_read(i2c->regmap, i2c->data_reg, &val);
if (ret)
return ret;
- data->byte = val & 0xff;
- } else if (size == I2C_SMBUS_WORD_DATA) {
- ret = regmap_read(i2c->regmap,
- i2c->reg_base + RTL9300_I2C_MST_DATA_WORD0, &val);
+
+ *xfer->data = val & 0xff;
+ break;
+ case RTL9300_I2C_XFER_WORD:
+ ret = regmap_read(i2c->regmap, i2c->data_reg, &val);
if (ret)
return ret;
- data->word = val & 0xffff;
- } else {
- ret = rtl9300_i2c_read(i2c, &data->block[0], len);
+
+ put_unaligned(val & 0xffff, (u16*)xfer->data);
+ break;
+ default:
+ ret = rtl9300_i2c_read(i2c, xfer->data, xfer->data_len);
if (ret)
return ret;
+ break;
}
}
@@ -211,106 +292,68 @@ static int rtl9300_i2c_smbus_xfer(struct i2c_adapter *adap, u16 addr, unsigned s
{
struct rtl9300_i2c_chan *chan = i2c_get_adapdata(adap);
struct rtl9300_i2c *i2c = chan->i2c;
- int len = 0, ret;
+ struct rtl9300_i2c_xfer xfer = {0};
+ int ret;
- mutex_lock(&i2c->lock);
- if (chan->sda_pin != i2c->sda_pin) {
- ret = rtl9300_i2c_config_io(i2c, chan->sda_pin);
- if (ret)
- goto out_unlock;
- i2c->sda_pin = chan->sda_pin;
- }
+ if (addr > 0x7f)
+ return -EINVAL;
- switch (size) {
- case I2C_SMBUS_QUICK:
- ret = rtl9300_i2c_config_xfer(i2c, chan, addr, 0);
- if (ret)
- goto out_unlock;
- ret = rtl9300_i2c_reg_addr_set(i2c, 0, 0);
- if (ret)
- goto out_unlock;
- break;
+ guard(rtl9300_i2c)(i2c);
+
+ ret = rtl9300_i2c_config_chan(i2c, chan);
+ if (ret)
+ return ret;
+ xfer.dev_addr = addr & 0x7f;
+ xfer.write = (read_write == I2C_SMBUS_WRITE);
+ xfer.reg_addr = command;
+ xfer.reg_addr_len = 1;
+
+ switch (size) {
case I2C_SMBUS_BYTE:
- if (read_write == I2C_SMBUS_WRITE) {
- ret = rtl9300_i2c_config_xfer(i2c, chan, addr, 0);
- if (ret)
- goto out_unlock;
- ret = rtl9300_i2c_reg_addr_set(i2c, command, 1);
- if (ret)
- goto out_unlock;
- } else {
- ret = rtl9300_i2c_config_xfer(i2c, chan, addr, 1);
- if (ret)
- goto out_unlock;
- ret = rtl9300_i2c_reg_addr_set(i2c, 0, 0);
- if (ret)
- goto out_unlock;
- }
+ xfer.data = (read_write == I2C_SMBUS_READ) ? &data->byte : &command;
+ xfer.data_len = 1;
+ xfer.reg_addr = 0;
+ xfer.reg_addr_len = 0;
+ xfer.type = RTL9300_I2C_XFER_BYTE;
break;
-
case I2C_SMBUS_BYTE_DATA:
- ret = rtl9300_i2c_reg_addr_set(i2c, command, 1);
- if (ret)
- goto out_unlock;
- ret = rtl9300_i2c_config_xfer(i2c, chan, addr, 1);
- if (ret)
- goto out_unlock;
- if (read_write == I2C_SMBUS_WRITE) {
- ret = rtl9300_i2c_writel(i2c, data->byte);
- if (ret)
- goto out_unlock;
- }
+ xfer.data = &data->byte;
+ xfer.data_len = 1;
+ xfer.type = RTL9300_I2C_XFER_BYTE;
break;
-
case I2C_SMBUS_WORD_DATA:
- ret = rtl9300_i2c_reg_addr_set(i2c, command, 1);
- if (ret)
- goto out_unlock;
- ret = rtl9300_i2c_config_xfer(i2c, chan, addr, 2);
- if (ret)
- goto out_unlock;
- if (read_write == I2C_SMBUS_WRITE) {
- ret = rtl9300_i2c_writel(i2c, data->word);
- if (ret)
- goto out_unlock;
- }
+ xfer.data = (u8 *)&data->word;
+ xfer.data_len = 2;
+ xfer.type = RTL9300_I2C_XFER_WORD;
break;
-
case I2C_SMBUS_BLOCK_DATA:
- ret = rtl9300_i2c_reg_addr_set(i2c, command, 1);
- if (ret)
- goto out_unlock;
- ret = rtl9300_i2c_config_xfer(i2c, chan, addr, data->block[0]);
- if (ret)
- goto out_unlock;
- if (read_write == I2C_SMBUS_WRITE) {
- ret = rtl9300_i2c_write(i2c, &data->block[1], data->block[0]);
- if (ret)
- goto out_unlock;
- }
- len = data->block[0];
+ xfer.data = &data->block[0];
+ xfer.data_len = data->block[0] + 1;
+ xfer.type = RTL9300_I2C_XFER_BLOCK;
+ break;
+ case I2C_SMBUS_I2C_BLOCK_DATA:
+ xfer.data = &data->block[1];
+ xfer.data_len = data->block[0];
+ xfer.type = RTL9300_I2C_XFER_BLOCK;
break;
-
default:
dev_err(&adap->dev, "Unsupported transaction %d\n", size);
- ret = -EOPNOTSUPP;
- goto out_unlock;
+ return -EOPNOTSUPP;
}
- ret = rtl9300_i2c_execute_xfer(i2c, read_write, size, data, len);
-
-out_unlock:
- mutex_unlock(&i2c->lock);
+ ret = rtl9300_i2c_prepare_xfer(i2c, &xfer);
+ if (ret)
+ return ret;
- return ret;
+ return rtl9300_i2c_do_xfer(i2c, &xfer);
}
static u32 rtl9300_i2c_func(struct i2c_adapter *a)
{
- return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE |
- I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA |
- I2C_FUNC_SMBUS_BLOCK_DATA;
+ return I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BLOCK_DATA |
+ I2C_FUNC_SMBUS_I2C_BLOCK;
}
static const struct i2c_algorithm rtl9300_i2c_algo = {
@@ -319,7 +362,7 @@ static const struct i2c_algorithm rtl9300_i2c_algo = {
};
static struct i2c_adapter_quirks rtl9300_i2c_quirks = {
- .flags = I2C_AQ_NO_CLK_STRETCH,
+ .flags = I2C_AQ_NO_CLK_STRETCH | I2C_AQ_NO_ZERO_LEN,
.max_read_len = 16,
.max_write_len = 16,
};
@@ -328,9 +371,11 @@ static int rtl9300_i2c_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rtl9300_i2c *i2c;
- u32 clock_freq, sda_pin;
- int ret, i = 0;
struct fwnode_handle *child;
+ struct rtl9300_i2c_drv_data *drv_data;
+ struct reg_field fields[F_NUM_FIELDS];
+ u32 clock_freq, scl_num, sda_num;
+ int ret, i = 0;
i2c = devm_kzalloc(dev, sizeof(*i2c), GFP_KERNEL);
if (!i2c)
@@ -347,16 +392,34 @@ static int rtl9300_i2c_probe(struct platform_device *pdev)
if (ret)
return ret;
+ ret = device_property_read_u32(dev, "realtek,scl", &scl_num);
+ if (ret || scl_num != 1)
+ scl_num = 0;
+ i2c->scl_num = (u8)scl_num;
+
platform_set_drvdata(pdev, i2c);
- if (device_get_child_node_count(dev) >= RTL9300_I2C_MUX_NCHAN)
+ drv_data = (struct rtl9300_i2c_drv_data *)device_get_match_data(i2c->dev);
+ if (device_get_child_node_count(dev) > drv_data->max_nchan)
return dev_err_probe(dev, -EINVAL, "Too many channels\n");
+ i2c->data_reg = i2c->reg_base + drv_data->data_reg;
+ for (i = 0; i < F_NUM_FIELDS; i++) {
+ fields[i] = drv_data->field_desc[i].field;
+ if (drv_data->field_desc[i].scope == REG_SCOPE_MASTER)
+ fields[i].reg += i2c->reg_base;
+ }
+ ret = devm_regmap_field_bulk_alloc(dev, i2c->regmap, i2c->fields,
+ fields, F_NUM_FIELDS);
+ if (ret)
+ return ret;
+
+ i = 0;
device_for_each_child_node(dev, child) {
struct rtl9300_i2c_chan *chan = &i2c->chans[i];
struct i2c_adapter *adap = &chan->adap;
- ret = fwnode_property_read_u32(child, "reg", &sda_pin);
+ ret = fwnode_property_read_u32(child, "reg", &sda_num);
if (ret)
return ret;
@@ -368,17 +431,16 @@ static int rtl9300_i2c_probe(struct platform_device *pdev)
case I2C_MAX_STANDARD_MODE_FREQ:
chan->bus_freq = RTL9300_I2C_STD_FREQ;
break;
-
case I2C_MAX_FAST_MODE_FREQ:
chan->bus_freq = RTL9300_I2C_FAST_FREQ;
break;
default:
dev_warn(i2c->dev, "SDA%d clock-frequency %d not supported using default\n",
- sda_pin, clock_freq);
+ sda_num, clock_freq);
break;
}
- chan->sda_pin = sda_pin;
+ chan->sda_num = sda_num;
chan->i2c = i2c;
adap = &i2c->chans[i].adap;
adap->owner = THIS_MODULE;
@@ -388,23 +450,77 @@ static int rtl9300_i2c_probe(struct platform_device *pdev)
adap->dev.parent = dev;
i2c_set_adapdata(adap, chan);
adap->dev.of_node = to_of_node(child);
- snprintf(adap->name, sizeof(adap->name), "%s SDA%d\n", dev_name(dev), sda_pin);
+ snprintf(adap->name, sizeof(adap->name), "%s SDA%d\n", dev_name(dev), sda_num);
i++;
ret = devm_i2c_add_adapter(dev, adap);
if (ret)
return ret;
}
- i2c->sda_pin = 0xff;
+ i2c->sda_num = 0xff;
+
+ /* only use standard read format */
+ ret = regmap_field_write(i2c->fields[F_RD_MODE], 0);
+ if (ret)
+ return ret;
return 0;
}
+#define GLB_REG_FIELD(reg, msb, lsb) \
+ { .field = REG_FIELD(reg, msb, lsb), .scope = REG_SCOPE_GLOBAL }
+#define MST_REG_FIELD(reg, msb, lsb) \
+ { .field = REG_FIELD(reg, msb, lsb), .scope = REG_SCOPE_MASTER }
+
+static const struct rtl9300_i2c_drv_data rtl9300_i2c_drv_data = {
+ .field_desc = {
+ [F_MEM_ADDR] = MST_REG_FIELD(RTL9300_I2C_MST_CTRL1, 8, 31),
+ [F_SDA_OUT_SEL] = MST_REG_FIELD(RTL9300_I2C_MST_CTRL1, 4, 6),
+ [F_SCL_SEL] = MST_REG_FIELD(RTL9300_I2C_MST_CTRL1, 3, 3),
+ [F_RWOP] = MST_REG_FIELD(RTL9300_I2C_MST_CTRL1, 2, 2),
+ [F_I2C_FAIL] = MST_REG_FIELD(RTL9300_I2C_MST_CTRL1, 1, 1),
+ [F_I2C_TRIG] = MST_REG_FIELD(RTL9300_I2C_MST_CTRL1, 0, 0),
+ [F_RD_MODE] = MST_REG_FIELD(RTL9300_I2C_MST_CTRL2, 15, 15),
+ [F_DEV_ADDR] = MST_REG_FIELD(RTL9300_I2C_MST_CTRL2, 8, 14),
+ [F_DATA_WIDTH] = MST_REG_FIELD(RTL9300_I2C_MST_CTRL2, 4, 7),
+ [F_MEM_ADDR_WIDTH] = MST_REG_FIELD(RTL9300_I2C_MST_CTRL2, 2, 3),
+ [F_SCL_FREQ] = MST_REG_FIELD(RTL9300_I2C_MST_CTRL2, 0, 1),
+ [F_SDA_SEL] = GLB_REG_FIELD(RTL9300_I2C_MST_GLB_CTRL, 0, 7),
+ },
+ .select_scl = rtl9300_i2c_select_scl,
+ .data_reg = RTL9300_I2C_MST_DATA_WORD0,
+ .max_nchan = RTL9300_I2C_MUX_NCHAN,
+};
+
+static const struct rtl9300_i2c_drv_data rtl9310_i2c_drv_data = {
+ .field_desc = {
+ [F_SCL_SEL] = GLB_REG_FIELD(RTL9310_I2C_MST_IF_SEL, 12, 13),
+ [F_SDA_SEL] = GLB_REG_FIELD(RTL9310_I2C_MST_IF_SEL, 0, 11),
+ [F_SCL_FREQ] = MST_REG_FIELD(RTL9310_I2C_MST_CTRL, 30, 31),
+ [F_DEV_ADDR] = MST_REG_FIELD(RTL9310_I2C_MST_CTRL, 11, 17),
+ [F_SDA_OUT_SEL] = MST_REG_FIELD(RTL9310_I2C_MST_CTRL, 18, 21),
+ [F_MEM_ADDR_WIDTH] = MST_REG_FIELD(RTL9310_I2C_MST_CTRL, 9, 10),
+ [F_DATA_WIDTH] = MST_REG_FIELD(RTL9310_I2C_MST_CTRL, 5, 8),
+ [F_RD_MODE] = MST_REG_FIELD(RTL9310_I2C_MST_CTRL, 4, 4),
+ [F_RWOP] = MST_REG_FIELD(RTL9310_I2C_MST_CTRL, 2, 2),
+ [F_I2C_FAIL] = MST_REG_FIELD(RTL9310_I2C_MST_CTRL, 1, 1),
+ [F_I2C_TRIG] = MST_REG_FIELD(RTL9310_I2C_MST_CTRL, 0, 0),
+ [F_MEM_ADDR] = MST_REG_FIELD(RTL9310_I2C_MST_MEMADDR_CTRL, 0, 23),
+ },
+ .select_scl = rtl9310_i2c_select_scl,
+ .data_reg = RTL9310_I2C_MST_DATA_CTRL,
+ .max_nchan = RTL9310_I2C_MUX_NCHAN,
+};
+
static const struct of_device_id i2c_rtl9300_dt_ids[] = {
- { .compatible = "realtek,rtl9301-i2c" },
- { .compatible = "realtek,rtl9302b-i2c" },
- { .compatible = "realtek,rtl9302c-i2c" },
- { .compatible = "realtek,rtl9303-i2c" },
+ { .compatible = "realtek,rtl9301-i2c", .data = (void *) &rtl9300_i2c_drv_data },
+ { .compatible = "realtek,rtl9302b-i2c", .data = (void *) &rtl9300_i2c_drv_data },
+ { .compatible = "realtek,rtl9302c-i2c", .data = (void *) &rtl9300_i2c_drv_data },
+ { .compatible = "realtek,rtl9303-i2c", .data = (void *) &rtl9300_i2c_drv_data },
+ { .compatible = "realtek,rtl9310-i2c", .data = (void *) &rtl9310_i2c_drv_data },
+ { .compatible = "realtek,rtl9311-i2c", .data = (void *) &rtl9310_i2c_drv_data },
+ { .compatible = "realtek,rtl9312-i2c", .data = (void *) &rtl9310_i2c_drv_data },
+ { .compatible = "realtek,rtl9313-i2c", .data = (void *) &rtl9310_i2c_drv_data },
{}
};
MODULE_DEVICE_TABLE(of, i2c_rtl9300_dt_ids);
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index f4fa4703acbd..8138f5ef40f0 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -138,7 +138,6 @@ static void i2c_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat)
#ifdef CONFIG_OF
static const struct of_device_id s3c24xx_i2c_match[] = {
- { .compatible = "samsung,s3c2410-i2c", .data = (void *)0 },
{ .compatible = "samsung,s3c2440-i2c", .data = (void *)QUIRK_S3C2440 },
{ .compatible = "samsung,s3c2440-hdmiphy-i2c",
.data = (void *)(QUIRK_S3C2440 | QUIRK_HDMIPHY | QUIRK_NO_GPIO) },
diff --git a/drivers/i2c/busses/i2c-sprd.c b/drivers/i2c/busses/i2c-sprd.c
index 56b2e5c5fb49..26ec34b19ad5 100644
--- a/drivers/i2c/busses/i2c-sprd.c
+++ b/drivers/i2c/busses/i2c-sprd.c
@@ -425,7 +425,7 @@ static irqreturn_t sprd_i2c_isr(int irq, void *dev_id)
* If we did not get one ACK from target when writing data, then we
* should finish this transmission since we got some errors.
*
- * When writing data, if i2c_tran == 0 which means we have writen
+ * When writing data, if i2c_tran == 0 which means we have written
* done all data, then we can finish this transmission.
*
* When reading data, if conut < rx fifo full threshold, which
diff --git a/drivers/i2c/busses/i2c-st.c b/drivers/i2c/busses/i2c-st.c
index bf28f8e3ee6b..97d70e667227 100644
--- a/drivers/i2c/busses/i2c-st.c
+++ b/drivers/i2c/busses/i2c-st.c
@@ -152,7 +152,7 @@ struct st_i2c_timings {
/**
* struct st_i2c_client - client specific data
* @addr: 8-bit target addr, including r/w bit
- * @count: number of bytes to be transfered
+ * @count: number of bytes to be transferred
* @xfered: number of bytes already transferred
* @buf: data buffer
* @result: result of the transfer
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 4eb31b913c1a..e533460bccc3 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -1649,7 +1649,33 @@ static const struct tegra_i2c_hw_feature tegra194_i2c_hw = {
.has_interface_timing_reg = true,
};
+static const struct tegra_i2c_hw_feature tegra256_i2c_hw = {
+ .has_continue_xfer_support = true,
+ .has_per_pkt_xfer_complete_irq = true,
+ .clk_divisor_hs_mode = 7,
+ .clk_divisor_std_mode = 0x7a,
+ .clk_divisor_fast_mode = 0x40,
+ .clk_divisor_fast_plus_mode = 0x19,
+ .has_config_load_reg = true,
+ .has_multi_master_mode = true,
+ .has_slcg_override_reg = true,
+ .has_mst_fifo = true,
+ .has_mst_reset = true,
+ .quirks = &tegra194_i2c_quirks,
+ .supports_bus_clear = true,
+ .has_apb_dma = false,
+ .tlow_std_mode = 0x8,
+ .thigh_std_mode = 0x7,
+ .tlow_fast_fastplus_mode = 0x3,
+ .thigh_fast_fastplus_mode = 0x3,
+ .setup_hold_time_std_mode = 0x08080808,
+ .setup_hold_time_fast_fast_plus_mode = 0x02020202,
+ .setup_hold_time_hs_mode = 0x090909,
+ .has_interface_timing_reg = true,
+};
+
static const struct of_device_id tegra_i2c_of_match[] = {
+ { .compatible = "nvidia,tegra256-i2c", .data = &tegra256_i2c_hw, },
{ .compatible = "nvidia,tegra194-i2c", .data = &tegra194_i2c_hw, },
{ .compatible = "nvidia,tegra186-i2c", .data = &tegra186_i2c_hw, },
#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
diff --git a/drivers/i2c/busses/i2c-usbio.c b/drivers/i2c/busses/i2c-usbio.c
new file mode 100644
index 000000000000..d42f9ab6e9a5
--- /dev/null
+++ b/drivers/i2c/busses/i2c-usbio.c
@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2025 Intel Corporation.
+ * Copyright (c) 2025 Red Hat, Inc.
+ */
+
+#include <linux/auxiliary_bus.h>
+#include <linux/dev_printk.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/types.h>
+#include <linux/usb/usbio.h>
+
+#define I2C_RW_OVERHEAD (sizeof(struct usbio_bulk_packet) + sizeof(struct usbio_i2c_rw))
+
+struct usbio_i2c {
+ struct i2c_adapter adap;
+ struct auxiliary_device *adev;
+ struct usbio_i2c_rw *rwbuf;
+ unsigned long quirks;
+ u32 speed;
+ u16 txbuf_len;
+ u16 rxbuf_len;
+};
+
+static const struct acpi_device_id usbio_i2c_acpi_hids[] = {
+ { "INTC1008" }, /* MTL */
+ { "INTC10B3" }, /* ARL */
+ { "INTC10B6" }, /* LNL */
+ { "INTC10E3" }, /* PTL */
+ { }
+};
+
+static const u32 usbio_i2c_speeds[] = {
+ I2C_MAX_STANDARD_MODE_FREQ,
+ I2C_MAX_FAST_MODE_FREQ,
+ I2C_MAX_FAST_MODE_PLUS_FREQ,
+ I2C_MAX_HIGH_SPEED_MODE_FREQ
+};
+
+static void usbio_i2c_uninit(struct i2c_adapter *adap, struct i2c_msg *msg)
+{
+ struct usbio_i2c *i2c = i2c_get_adapdata(adap);
+ struct usbio_i2c_uninit ubuf;
+
+ ubuf.busid = i2c->adev->id;
+ ubuf.config = cpu_to_le16(msg->addr);
+
+ usbio_bulk_msg(i2c->adev, USBIO_PKTTYPE_I2C, USBIO_I2CCMD_UNINIT, true,
+ &ubuf, sizeof(ubuf), NULL, 0);
+}
+
+static int usbio_i2c_init(struct i2c_adapter *adap, struct i2c_msg *msg)
+{
+ struct usbio_i2c *i2c = i2c_get_adapdata(adap);
+ struct usbio_i2c_init ibuf;
+ void *reply_buf;
+ u16 reply_len;
+ int ret;
+
+ ibuf.busid = i2c->adev->id;
+ ibuf.config = cpu_to_le16(msg->addr);
+ ibuf.speed = cpu_to_le32(i2c->speed);
+
+ if (i2c->quirks & USBIO_QUIRK_I2C_NO_INIT_ACK) {
+ reply_buf = NULL;
+ reply_len = 0;
+ } else {
+ reply_buf = &ibuf;
+ reply_len = sizeof(ibuf);
+ }
+
+ ret = usbio_bulk_msg(i2c->adev, USBIO_PKTTYPE_I2C, USBIO_I2CCMD_INIT, true,
+ &ibuf, sizeof(ibuf), reply_buf, reply_len);
+ if (ret != sizeof(ibuf))
+ return (ret < 0) ? ret : -EIO;
+
+ return 0;
+}
+
+static int usbio_i2c_read(struct i2c_adapter *adap, struct i2c_msg *msg)
+{
+ struct usbio_i2c *i2c = i2c_get_adapdata(adap);
+ u16 rxchunk = i2c->rxbuf_len - I2C_RW_OVERHEAD;
+ struct usbio_i2c_rw *rbuf = i2c->rwbuf;
+ int ret;
+
+ rbuf->busid = i2c->adev->id;
+ rbuf->config = cpu_to_le16(msg->addr);
+ rbuf->size = cpu_to_le16(msg->len);
+
+ if (msg->len > rxchunk) {
+ /* Need to split the input buffer */
+ u16 len = 0;
+
+ do {
+ if (msg->len - len < rxchunk)
+ rxchunk = msg->len - len;
+
+ ret = usbio_bulk_msg(i2c->adev, USBIO_PKTTYPE_I2C,
+ USBIO_I2CCMD_READ, true,
+ rbuf, len == 0 ? sizeof(*rbuf) : 0,
+ rbuf, sizeof(*rbuf) + rxchunk);
+ if (ret < 0)
+ return ret;
+
+ memcpy(&msg->buf[len], rbuf->data, rxchunk);
+ len += rxchunk;
+ } while (msg->len > len);
+
+ return 0;
+ }
+
+ ret = usbio_bulk_msg(i2c->adev, USBIO_PKTTYPE_I2C, USBIO_I2CCMD_READ, true,
+ rbuf, sizeof(*rbuf), rbuf, sizeof(*rbuf) + msg->len);
+ if (ret != sizeof(*rbuf) + msg->len)
+ return (ret < 0) ? ret : -EIO;
+
+ memcpy(msg->buf, rbuf->data, msg->len);
+
+ return 0;
+}
+
+static int usbio_i2c_write(struct i2c_adapter *adap, struct i2c_msg *msg)
+{
+ struct usbio_i2c *i2c = i2c_get_adapdata(adap);
+ u16 txchunk = i2c->txbuf_len - I2C_RW_OVERHEAD;
+ struct usbio_i2c_rw *wbuf = i2c->rwbuf;
+ int ret;
+
+ if (msg->len > txchunk) {
+ /* Need to split the output buffer */
+ u16 len = 0;
+
+ do {
+ wbuf->busid = i2c->adev->id;
+ wbuf->config = cpu_to_le16(msg->addr);
+
+ if (i2c->quirks & USBIO_QUIRK_I2C_USE_CHUNK_LEN)
+ wbuf->size = cpu_to_le16(txchunk);
+ else
+ wbuf->size = cpu_to_le16(msg->len);
+
+ memcpy(wbuf->data, &msg->buf[len], txchunk);
+ len += txchunk;
+
+ ret = usbio_bulk_msg(i2c->adev, USBIO_PKTTYPE_I2C,
+ USBIO_I2CCMD_WRITE, msg->len == len,
+ wbuf, sizeof(*wbuf) + txchunk,
+ wbuf, sizeof(*wbuf));
+ if (ret < 0)
+ return ret;
+
+ if (msg->len - len < txchunk)
+ txchunk = msg->len - len;
+ } while (msg->len > len);
+
+ return 0;
+ }
+
+ wbuf->busid = i2c->adev->id;
+ wbuf->config = cpu_to_le16(msg->addr);
+ wbuf->size = cpu_to_le16(msg->len);
+ memcpy(wbuf->data, msg->buf, msg->len);
+
+ ret = usbio_bulk_msg(i2c->adev, USBIO_PKTTYPE_I2C, USBIO_I2CCMD_WRITE, true,
+ wbuf, sizeof(*wbuf) + msg->len, wbuf, sizeof(*wbuf));
+ if (ret != sizeof(*wbuf) || le16_to_cpu(wbuf->size) != msg->len)
+ return (ret < 0) ? ret : -EIO;
+
+ return 0;
+}
+
+static int usbio_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+{
+ struct usbio_i2c *i2c = i2c_get_adapdata(adap);
+ int ret;
+
+ usbio_acquire(i2c->adev);
+
+ ret = usbio_i2c_init(adap, msgs);
+ if (ret)
+ goto out_release;
+
+ for (int i = 0; i < num; ret = ++i) {
+ if (msgs[i].flags & I2C_M_RD)
+ ret = usbio_i2c_read(adap, &msgs[i]);
+ else
+ ret = usbio_i2c_write(adap, &msgs[i]);
+
+ if (ret)
+ break;
+ }
+
+ usbio_i2c_uninit(adap, msgs);
+
+out_release:
+ usbio_release(i2c->adev);
+
+ return ret;
+}
+
+static u32 usbio_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_adapter_quirks usbio_i2c_quirks = {
+ .flags = I2C_AQ_NO_ZERO_LEN | I2C_AQ_NO_REP_START,
+ .max_read_len = SZ_4K,
+ .max_write_len = SZ_4K,
+};
+
+static const struct i2c_adapter_quirks usbio_i2c_quirks_max_rw_len52 = {
+ .flags = I2C_AQ_NO_ZERO_LEN | I2C_AQ_NO_REP_START,
+ .max_read_len = 52,
+ .max_write_len = 52,
+};
+
+static const struct i2c_algorithm usbio_i2c_algo = {
+ .master_xfer = usbio_i2c_xfer,
+ .functionality = usbio_i2c_func,
+};
+
+static int usbio_i2c_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *adev_id)
+{
+ struct usbio_i2c_bus_desc *i2c_desc;
+ struct device *dev = &adev->dev;
+ struct usbio_i2c *i2c;
+ u32 max_speed;
+ int ret;
+
+ i2c_desc = dev_get_platdata(dev);
+ if (!i2c_desc)
+ return -EINVAL;
+
+ i2c = devm_kzalloc(dev, sizeof(*i2c), GFP_KERNEL);
+ if (!i2c)
+ return -ENOMEM;
+
+ i2c->adev = adev;
+
+ usbio_acpi_bind(i2c->adev, usbio_i2c_acpi_hids);
+ usbio_get_txrxbuf_len(i2c->adev, &i2c->txbuf_len, &i2c->rxbuf_len);
+
+ i2c->rwbuf = devm_kzalloc(dev, max(i2c->txbuf_len, i2c->rxbuf_len), GFP_KERNEL);
+ if (!i2c->rwbuf)
+ return -ENOMEM;
+
+ i2c->quirks = usbio_get_quirks(i2c->adev);
+
+ max_speed = usbio_i2c_speeds[i2c_desc->caps & USBIO_I2C_BUS_MODE_CAP_MASK];
+ if (max_speed < I2C_MAX_FAST_MODE_FREQ &&
+ (i2c->quirks & USBIO_QUIRK_I2C_ALLOW_400KHZ))
+ max_speed = I2C_MAX_FAST_MODE_FREQ;
+
+ i2c->speed = i2c_acpi_find_bus_speed(dev);
+ if (!i2c->speed)
+ i2c->speed = I2C_MAX_STANDARD_MODE_FREQ;
+ else if (i2c->speed > max_speed) {
+ dev_warn(dev, "Invalid speed %u adjusting to bus max %u\n",
+ i2c->speed, max_speed);
+ i2c->speed = max_speed;
+ }
+
+ i2c->adap.owner = THIS_MODULE;
+ i2c->adap.class = I2C_CLASS_HWMON;
+ i2c->adap.dev.parent = dev;
+ i2c->adap.algo = &usbio_i2c_algo;
+
+ if (i2c->quirks & USBIO_QUIRK_I2C_MAX_RW_LEN_52)
+ i2c->adap.quirks = &usbio_i2c_quirks_max_rw_len52;
+ else
+ i2c->adap.quirks = &usbio_i2c_quirks;
+
+ snprintf(i2c->adap.name, sizeof(i2c->adap.name), "%s.%d",
+ USBIO_I2C_CLIENT, i2c->adev->id);
+
+ device_set_node(&i2c->adap.dev, dev_fwnode(&adev->dev));
+
+ auxiliary_set_drvdata(adev, i2c);
+ i2c_set_adapdata(&i2c->adap, i2c);
+
+ ret = i2c_add_adapter(&i2c->adap);
+ if (ret)
+ return ret;
+
+ if (has_acpi_companion(&i2c->adap.dev))
+ acpi_dev_clear_dependencies(ACPI_COMPANION(&i2c->adap.dev));
+
+ return 0;
+}
+
+static void usbio_i2c_remove(struct auxiliary_device *adev)
+{
+ struct usbio_i2c *i2c = auxiliary_get_drvdata(adev);
+
+ i2c_del_adapter(&i2c->adap);
+}
+
+static const struct auxiliary_device_id usbio_i2c_id_table[] = {
+ { "usbio.usbio-i2c" },
+ { }
+};
+MODULE_DEVICE_TABLE(auxiliary, usbio_i2c_id_table);
+
+static struct auxiliary_driver usbio_i2c_driver = {
+ .name = USBIO_I2C_CLIENT,
+ .probe = usbio_i2c_probe,
+ .remove = usbio_i2c_remove,
+ .id_table = usbio_i2c_id_table
+};
+module_auxiliary_driver(usbio_i2c_driver);
+
+MODULE_DESCRIPTION("Intel USBIO I2C driver");
+MODULE_AUTHOR("Israel Cepeda <israel.a.cepeda.lopez@intel.com>");
+MODULE_AUTHOR("Hans de Goede <hansg@kernel.org>");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("USBIO");
diff --git a/drivers/i2c/busses/i2c-viperboard.c b/drivers/i2c/busses/i2c-viperboard.c
index 1bd602852e35..f596efcc291c 100644
--- a/drivers/i2c/busses/i2c-viperboard.c
+++ b/drivers/i2c/busses/i2c-viperboard.c
@@ -204,7 +204,7 @@ static int vprbrd_i2c_read(struct vprbrd *vb, struct i2c_msg *msg)
/* copy the received data */
memcpy(msg->buf + start, rmsg, len1);
- /* second read transfer if neccessary */
+ /* second read transfer if necessary */
if (len2 > 0) {
ret = vprbrd_i2c_receive(vb->usb_dev, rmsg, len2);
if (ret < 0)
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index ecca8c006b02..ae7e9c8b65a6 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -573,7 +573,8 @@ static int i2c_device_probe(struct device *dev)
goto err_clear_wakeup_irq;
do_power_on = !i2c_acpi_waive_d0_probe(dev);
- status = dev_pm_domain_attach(&client->dev, do_power_on ? PD_FLAG_ATTACH_POWER_ON : 0);
+ status = dev_pm_domain_attach(&client->dev, PD_FLAG_DETACH_POWER_OFF |
+ (do_power_on ? PD_FLAG_ATTACH_POWER_ON : 0));
if (status)
goto err_clear_wakeup_irq;
@@ -581,7 +582,7 @@ static int i2c_device_probe(struct device *dev)
GFP_KERNEL);
if (!client->devres_group_id) {
status = -ENOMEM;
- goto err_detach_pm_domain;
+ goto err_clear_wakeup_irq;
}
client->debugfs = debugfs_create_dir(dev_name(&client->dev),
@@ -608,8 +609,6 @@ static int i2c_device_probe(struct device *dev)
err_release_driver_resources:
debugfs_remove_recursive(client->debugfs);
devres_release_group(&client->dev, client->devres_group_id);
-err_detach_pm_domain:
- dev_pm_domain_detach(&client->dev, do_power_on);
err_clear_wakeup_irq:
dev_pm_clear_wake_irq(&client->dev);
device_init_wakeup(&client->dev, false);
@@ -636,8 +635,6 @@ static void i2c_device_remove(struct device *dev)
devres_release_group(&client->dev, client->devres_group_id);
- dev_pm_domain_detach(&client->dev, true);
-
dev_pm_clear_wake_irq(&client->dev);
device_init_wakeup(&client->dev, false);
diff --git a/drivers/i2c/i2c-core-slave.c b/drivers/i2c/i2c-core-slave.c
index 7ee6b992b835..02ca55c2246b 100644
--- a/drivers/i2c/i2c-core-slave.c
+++ b/drivers/i2c/i2c-core-slave.c
@@ -112,10 +112,9 @@ bool i2c_detect_slave_mode(struct device *dev)
struct fwnode_handle *fwnode = dev_fwnode(dev);
if (is_of_node(fwnode)) {
- struct fwnode_handle *child __free(fwnode_handle) = NULL;
u32 reg;
- fwnode_for_each_child_node(fwnode, child) {
+ fwnode_for_each_child_node_scoped(fwnode, child) {
fwnode_property_read_u32(child, "reg", &reg);
if (reg & I2C_OWN_SLAVE_ADDRESS)
return true;
diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
index 4d8690981a55..d59644e50f14 100644
--- a/drivers/i2c/i2c-mux.c
+++ b/drivers/i2c/i2c-mux.c
@@ -241,12 +241,9 @@ struct i2c_mux_core *i2c_mux_alloc(struct i2c_adapter *parent,
muxc->parent = parent;
muxc->dev = dev;
- if (flags & I2C_MUX_LOCKED)
- muxc->mux_locked = true;
- if (flags & I2C_MUX_ARBITRATOR)
- muxc->arbitrator = true;
- if (flags & I2C_MUX_GATE)
- muxc->gate = true;
+ muxc->mux_locked = !!(flags & I2C_MUX_LOCKED);
+ muxc->arbitrator = !!(flags & I2C_MUX_ARBITRATOR);
+ muxc->gate = !!(flags & I2C_MUX_GATE);
muxc->select = select;
muxc->deselect = deselect;
muxc->max_adapters = max_adapters;
diff --git a/drivers/i2c/muxes/i2c-mux-ltc4306.c b/drivers/i2c/muxes/i2c-mux-ltc4306.c
index c688af270a11..50fbc0d06e62 100644
--- a/drivers/i2c/muxes/i2c-mux-ltc4306.c
+++ b/drivers/i2c/muxes/i2c-mux-ltc4306.c
@@ -164,7 +164,7 @@ static int ltc4306_gpio_init(struct ltc4306 *data)
data->gpiochip.direction_input = ltc4306_gpio_direction_input;
data->gpiochip.direction_output = ltc4306_gpio_direction_output;
data->gpiochip.get = ltc4306_gpio_get;
- data->gpiochip.set_rv = ltc4306_gpio_set;
+ data->gpiochip.set = ltc4306_gpio_set;
data->gpiochip.set_config = ltc4306_gpio_set_config;
data->gpiochip.owner = THIS_MODULE;
diff --git a/drivers/i2c/muxes/i2c-mux-pca9541.c b/drivers/i2c/muxes/i2c-mux-pca9541.c
index 8663c8a7c269..3d8002caf703 100644
--- a/drivers/i2c/muxes/i2c-mux-pca9541.c
+++ b/drivers/i2c/muxes/i2c-mux-pca9541.c
@@ -63,10 +63,6 @@
#define mybus(x) (!((x) & MYBUS) || ((x) & MYBUS) == MYBUS)
#define busoff(x) (!((x) & BUSON) || ((x) & BUSON) == BUSON)
-/* arbitration timeouts, in jiffies */
-#define ARB_TIMEOUT (HZ / 8) /* 125 ms until forcing bus ownership */
-#define ARB2_TIMEOUT (HZ / 4) /* 250 ms until acquisition failure */
-
/* arbitration retry delays, in us */
#define SELECT_DELAY_SHORT 50
#define SELECT_DELAY_LONG 1000
@@ -229,6 +225,9 @@ static int pca9541_arbitrate(struct i2c_client *client)
*/
data->select_timeout = SELECT_DELAY_LONG;
if (time_is_before_eq_jiffies(data->arb_timeout)) {
+ dev_warn(&client->dev,
+ "Arbitration timeout on I2C bus, forcing bus ownership\n");
+
/* Time is up, take the bus and reset it. */
pca9541_reg_write(client,
PCA9541_CONTROL,
@@ -251,10 +250,10 @@ static int pca9541_select_chan(struct i2c_mux_core *muxc, u32 chan)
struct pca9541 *data = i2c_mux_priv(muxc);
struct i2c_client *client = data->client;
int ret;
- unsigned long timeout = jiffies + ARB2_TIMEOUT;
+ unsigned long timeout = jiffies + (2 * client->adapter->timeout);
/* give up after this time */
- data->arb_timeout = jiffies + ARB_TIMEOUT;
+ data->arb_timeout = jiffies + client->adapter->timeout;
/* force bus ownership after this time */
do {
@@ -267,6 +266,7 @@ static int pca9541_select_chan(struct i2c_mux_core *muxc, u32 chan)
else
msleep(data->select_timeout / 1000);
} while (time_is_after_eq_jiffies(timeout));
+ dev_warn(&client->dev, "Failed to acquire I2C bus, timed out\n");
return -ETIMEDOUT;
}
diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
index b9f370c9f018..75c8d08fa24e 100644
--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -118,7 +118,6 @@ struct pca954x {
raw_spinlock_t lock;
struct regulator *supply;
- struct gpio_desc *reset_gpio;
struct reset_control *reset_cont;
};
@@ -316,6 +315,25 @@ static u8 pca954x_regval(struct pca954x *data, u8 chan)
return 1 << chan;
}
+static void pca954x_reset_assert(struct pca954x *data)
+{
+ if (data->reset_cont)
+ reset_control_assert(data->reset_cont);
+}
+
+static void pca954x_reset_deassert(struct pca954x *data)
+{
+ if (data->reset_cont)
+ reset_control_deassert(data->reset_cont);
+}
+
+static void pca954x_reset_mux(struct pca954x *data)
+{
+ pca954x_reset_assert(data);
+ udelay(1);
+ pca954x_reset_deassert(data);
+}
+
static int pca954x_select_chan(struct i2c_mux_core *muxc, u32 chan)
{
struct pca954x *data = i2c_mux_priv(muxc);
@@ -329,6 +347,8 @@ static int pca954x_select_chan(struct i2c_mux_core *muxc, u32 chan)
ret = pca954x_reg_write(muxc->parent, client, regval);
data->last_chan = ret < 0 ? 0 : regval;
}
+ if (ret == -ETIMEDOUT && data->reset_cont)
+ pca954x_reset_mux(data);
return ret;
}
@@ -338,6 +358,7 @@ static int pca954x_deselect_mux(struct i2c_mux_core *muxc, u32 chan)
struct pca954x *data = i2c_mux_priv(muxc);
struct i2c_client *client = data->client;
s32 idle_state;
+ int ret = 0;
idle_state = READ_ONCE(data->idle_state);
if (idle_state >= 0)
@@ -347,8 +368,10 @@ static int pca954x_deselect_mux(struct i2c_mux_core *muxc, u32 chan)
if (idle_state == MUX_IDLE_DISCONNECT) {
/* Deselect active channel */
data->last_chan = 0;
- return pca954x_reg_write(muxc->parent, client,
- data->last_chan);
+ ret = pca954x_reg_write(muxc->parent, client,
+ data->last_chan);
+ if (ret == -ETIMEDOUT && data->reset_cont)
+ pca954x_reset_mux(data);
}
/* otherwise leave as-is */
@@ -527,29 +550,10 @@ static int pca954x_get_reset(struct device *dev, struct pca954x *data)
if (IS_ERR(data->reset_cont))
return dev_err_probe(dev, PTR_ERR(data->reset_cont),
"Failed to get reset\n");
- else if (data->reset_cont)
- return 0;
-
- /*
- * fallback to legacy reset-gpios
- */
- data->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
- if (IS_ERR(data->reset_gpio)) {
- return dev_err_probe(dev, PTR_ERR(data->reset_gpio),
- "Failed to get reset gpio");
- }
return 0;
}
-static void pca954x_reset_deassert(struct pca954x *data)
-{
- if (data->reset_cont)
- reset_control_deassert(data->reset_cont);
- else
- gpiod_set_value_cansleep(data->reset_gpio, 0);
-}
-
/*
* I2C init/probing/exit functions
*/
@@ -589,7 +593,7 @@ static int pca954x_probe(struct i2c_client *client)
if (ret)
goto fail_cleanup;
- if (data->reset_cont || data->reset_gpio) {
+ if (data->reset_cont) {
udelay(1);
pca954x_reset_deassert(data);
/* Give the chip some time to recover. */
diff --git a/drivers/i3c/internals.h b/drivers/i3c/internals.h
index 0d857cc68cc5..79ceaa5f5afd 100644
--- a/drivers/i3c/internals.h
+++ b/drivers/i3c/internals.h
@@ -38,7 +38,11 @@ static inline void i3c_writel_fifo(void __iomem *addr, const void *buf,
u32 tmp = 0;
memcpy(&tmp, buf + (nbytes & ~3), nbytes & 3);
- writel(tmp, addr);
+ /*
+ * writesl() instead of writel() to keep FIFO
+ * byteorder on big-endian targets
+ */
+ writesl(addr, &tmp, 1);
}
}
@@ -55,7 +59,11 @@ static inline void i3c_readl_fifo(const void __iomem *addr, void *buf,
if (nbytes & 3) {
u32 tmp;
- tmp = readl(addr);
+ /*
+ * readsl() instead of readl() to keep FIFO
+ * byteorder on big-endian targets
+ */
+ readsl(addr, &tmp, 1);
memcpy(buf + (nbytes & ~3), &tmp, nbytes & 3);
}
}
diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
index 2ef898a8fd80..d946db75df70 100644
--- a/drivers/i3c/master.c
+++ b/drivers/i3c/master.c
@@ -8,6 +8,7 @@
#include <linux/atomic.h>
#include <linux/bug.h>
#include <linux/device.h>
+#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/export.h>
#include <linux/kernel.h>
@@ -1728,6 +1729,79 @@ int i3c_master_do_daa(struct i3c_master_controller *master)
EXPORT_SYMBOL_GPL(i3c_master_do_daa);
/**
+ * i3c_master_dma_map_single() - Map buffer for single DMA transfer
+ * @dev: device object of a device doing DMA
+ * @buf: destination/source buffer for DMA
+ * @len: length of transfer
+ * @force_bounce: true, force to use a bounce buffer,
+ * false, function will auto check is a bounce buffer required
+ * @dir: DMA direction
+ *
+ * Map buffer for a DMA transfer and allocate a bounce buffer if required.
+ *
+ * Return: I3C DMA transfer descriptor or NULL in case of error.
+ */
+struct i3c_dma *i3c_master_dma_map_single(struct device *dev, void *buf,
+ size_t len, bool force_bounce, enum dma_data_direction dir)
+{
+ struct i3c_dma *dma_xfer __free(kfree) = NULL;
+ void *bounce __free(kfree) = NULL;
+ void *dma_buf = buf;
+
+ dma_xfer = kzalloc(sizeof(*dma_xfer), GFP_KERNEL);
+ if (!dma_xfer)
+ return NULL;
+
+ dma_xfer->dev = dev;
+ dma_xfer->buf = buf;
+ dma_xfer->dir = dir;
+ dma_xfer->len = len;
+ dma_xfer->map_len = len;
+
+ if (is_vmalloc_addr(buf))
+ force_bounce = true;
+
+ if (force_bounce) {
+ dma_xfer->map_len = ALIGN(len, cache_line_size());
+ if (dir == DMA_FROM_DEVICE)
+ bounce = kzalloc(dma_xfer->map_len, GFP_KERNEL);
+ else
+ bounce = kmemdup(buf, dma_xfer->map_len, GFP_KERNEL);
+ if (!bounce)
+ return NULL;
+ dma_buf = bounce;
+ }
+
+ dma_xfer->addr = dma_map_single(dev, dma_buf, dma_xfer->map_len, dir);
+ if (dma_mapping_error(dev, dma_xfer->addr))
+ return NULL;
+
+ dma_xfer->bounce_buf = no_free_ptr(bounce);
+ return no_free_ptr(dma_xfer);
+}
+EXPORT_SYMBOL_GPL(i3c_master_dma_map_single);
+
+/**
+ * i3c_master_dma_unmap_single() - Unmap buffer after DMA
+ * @dma_xfer: DMA transfer and mapping descriptor
+ *
+ * Unmap buffer and cleanup DMA transfer descriptor.
+ */
+void i3c_master_dma_unmap_single(struct i3c_dma *dma_xfer)
+{
+ dma_unmap_single(dma_xfer->dev, dma_xfer->addr,
+ dma_xfer->map_len, dma_xfer->dir);
+ if (dma_xfer->bounce_buf) {
+ if (dma_xfer->dir == DMA_FROM_DEVICE)
+ memcpy(dma_xfer->buf, dma_xfer->bounce_buf,
+ dma_xfer->len);
+ kfree(dma_xfer->bounce_buf);
+ }
+ kfree(dma_xfer);
+}
+EXPORT_SYMBOL_GPL(i3c_master_dma_unmap_single);
+
+/**
* i3c_master_set_info() - set master device information
* @master: master used to send frames on the bus
* @info: I3C device information
@@ -2490,9 +2564,7 @@ static int i3c_master_i2c_adapter_init(struct i3c_master_controller *master)
adap->owner = master->dev.parent->driver->owner;
adap->algo = &i3c_master_i2c_algo;
strscpy(adap->name, dev_name(master->dev.parent), sizeof(adap->name));
-
- /* FIXME: Should we allow i3c masters to override these values? */
- adap->timeout = 1000;
+ adap->timeout = HZ;
adap->retries = 3;
id = of_alias_get_id(master->dev.of_node, "i2c");
diff --git a/drivers/i3c/master/Kconfig b/drivers/i3c/master/Kconfig
index 13df2944f2ec..82cf330778d5 100644
--- a/drivers/i3c/master/Kconfig
+++ b/drivers/i3c/master/Kconfig
@@ -1,4 +1,15 @@
# SPDX-License-Identifier: GPL-2.0-only
+config ADI_I3C_MASTER
+ tristate "Analog Devices I3C master driver"
+ depends on HAS_IOMEM
+ help
+ Support for Analog Devices I3C Controller IP, an AXI-interfaced IP
+ core that supports I3C and I2C devices, multiple speed-grades and I3C
+ IBIs.
+
+ This driver can also be built as a module. If so, the module will be
+ called adi-i3c-master.
+
config CDNS_I3C_MASTER
tristate "Cadence I3C master driver"
depends on HAS_IOMEM
diff --git a/drivers/i3c/master/Makefile b/drivers/i3c/master/Makefile
index aac74f3e3851..816a227b6f7a 100644
--- a/drivers/i3c/master/Makefile
+++ b/drivers/i3c/master/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_ADI_I3C_MASTER) += adi-i3c-master.o
obj-$(CONFIG_CDNS_I3C_MASTER) += i3c-master-cdns.o
obj-$(CONFIG_DW_I3C_MASTER) += dw-i3c-master.o
obj-$(CONFIG_AST2600_I3C_MASTER) += ast2600-i3c-master.o
diff --git a/drivers/i3c/master/adi-i3c-master.c b/drivers/i3c/master/adi-i3c-master.c
new file mode 100644
index 000000000000..82ac0b3d057a
--- /dev/null
+++ b/drivers/i3c/master/adi-i3c-master.c
@@ -0,0 +1,1019 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * I3C Controller driver
+ * Copyright 2025 Analog Devices Inc.
+ * Author: Jorge Marques <jorge.marques@analog.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/adi-axi-common.h>
+#include <linux/i3c/master.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "../internals.h"
+
+#define ADI_MAX_DEVS 16
+#define ADI_HAS_MDB_FROM_BCR(x) (FIELD_GET(BIT(2), (x)))
+
+#define REG_ENABLE 0x040
+
+#define REG_PID_L 0x054
+#define REG_PID_H 0x058
+#define REG_DCR_BCR_DA 0x05c
+#define REG_DCR_BCR_DA_GET_DA(x) FIELD_GET(GENMASK(22, 16), (x))
+#define REG_DCR_BCR_DA_GET_BCR(x) FIELD_GET(GENMASK(15, 8), (x))
+#define REG_DCR_BCR_DA_GET_DCR(x) FIELD_GET(GENMASK(7, 0), (x))
+
+#define REG_IRQ_MASK 0x080
+#define REG_IRQ_PENDING 0x084
+#define REG_IRQ_PENDING_DAA BIT(7)
+#define REG_IRQ_PENDING_IBI BIT(6)
+#define REG_IRQ_PENDING_CMDR BIT(5)
+
+#define REG_CMD_FIFO 0x0d4
+#define REG_CMD_FIFO_0_IS_CCC BIT(22)
+#define REG_CMD_FIFO_0_BCAST BIT(21)
+#define REG_CMD_FIFO_0_SR BIT(20)
+#define REG_CMD_FIFO_0_LEN(l) FIELD_PREP(GENMASK(19, 8), (l))
+#define REG_CMD_FIFO_0_DEV_ADDR(a) FIELD_PREP(GENMASK(7, 1), (a))
+#define REG_CMD_FIFO_0_RNW BIT(0)
+#define REG_CMD_FIFO_1_CCC(id) FIELD_PREP(GENMASK(7, 0), (id))
+
+#define REG_CMD_FIFO_ROOM 0x0c0
+#define REG_CMDR_FIFO 0x0d8
+#define REG_CMDR_FIFO_UDA_ERROR 8
+#define REG_CMDR_FIFO_NACK_RESP 6
+#define REG_CMDR_FIFO_CE2_ERROR 4
+#define REG_CMDR_FIFO_CE0_ERROR 1
+#define REG_CMDR_FIFO_NO_ERROR 0
+#define REG_CMDR_FIFO_ERROR(x) FIELD_GET(GENMASK(23, 20), (x))
+#define REG_CMDR_FIFO_XFER_BYTES(x) FIELD_GET(GENMASK(19, 8), (x))
+
+#define REG_SDO_FIFO 0x0dc
+#define REG_SDO_FIFO_ROOM 0x0c8
+#define REG_SDI_FIFO 0x0e0
+#define REG_IBI_FIFO 0x0e4
+#define REG_FIFO_STATUS 0x0e8
+#define REG_FIFO_STATUS_CMDR_EMPTY BIT(0)
+#define REG_FIFO_STATUS_IBI_EMPTY BIT(1)
+
+#define REG_OPS 0x100
+#define REG_OPS_PP_SG_MASK GENMASK(6, 5)
+#define REG_OPS_SET_SG(x) FIELD_PREP(REG_OPS_PP_SG_MASK, (x))
+
+#define REG_IBI_CONFIG 0x140
+#define REG_IBI_CONFIG_ENABLE BIT(0)
+#define REG_IBI_CONFIG_LISTEN BIT(1)
+
+#define REG_DEV_CHAR 0x180
+#define REG_DEV_CHAR_IS_I2C BIT(0)
+#define REG_DEV_CHAR_IS_ATTACHED BIT(1)
+#define REG_DEV_CHAR_BCR_IBI(x) FIELD_PREP(GENMASK(3, 2), (x))
+#define REG_DEV_CHAR_WEN BIT(8)
+#define REG_DEV_CHAR_ADDR(x) FIELD_PREP(GENMASK(15, 9), (x))
+
+enum speed_grade {PP_SG_UNSET, PP_SG_1MHZ, PP_SG_3MHZ, PP_SG_6MHZ, PP_SG_12MHZ};
+
+struct adi_i3c_cmd {
+ u32 cmd0;
+ u32 cmd1;
+ u32 tx_len;
+ const void *tx_buf;
+ u32 rx_len;
+ void *rx_buf;
+ u32 error;
+};
+
+struct adi_i3c_xfer {
+ struct list_head node;
+ struct completion comp;
+ int ret;
+ unsigned int ncmds;
+ unsigned int ncmds_comp;
+ struct adi_i3c_cmd cmds[] __counted_by(ncmds);
+};
+
+struct adi_i3c_master {
+ struct i3c_master_controller base;
+ u32 free_rr_slots;
+ struct {
+ unsigned int num_slots;
+ struct i3c_dev_desc **slots;
+ spinlock_t lock; /* Protect IBI slot access */
+ } ibi;
+ struct {
+ struct list_head list;
+ struct adi_i3c_xfer *cur;
+ spinlock_t lock; /* Protect transfer */
+ } xferqueue;
+ void __iomem *regs;
+ struct clk *clk;
+ unsigned long i3c_scl_lim;
+ struct {
+ u8 addrs[ADI_MAX_DEVS];
+ u8 index;
+ } daa;
+};
+
+static inline struct adi_i3c_master *to_adi_i3c_master(struct i3c_master_controller *master)
+{
+ return container_of(master, struct adi_i3c_master, base);
+}
+
+static void adi_i3c_master_wr_to_tx_fifo(struct adi_i3c_master *master,
+ const u8 *buf, unsigned int nbytes)
+{
+ unsigned int n, m;
+
+ n = readl(master->regs + REG_SDO_FIFO_ROOM);
+ m = min(n, nbytes);
+ i3c_writel_fifo(master->regs + REG_SDO_FIFO, buf, m);
+}
+
+static void adi_i3c_master_rd_from_rx_fifo(struct adi_i3c_master *master,
+ u8 *buf, unsigned int nbytes)
+{
+ i3c_readl_fifo(master->regs + REG_SDI_FIFO, buf, nbytes);
+}
+
+static bool adi_i3c_master_supports_ccc_cmd(struct i3c_master_controller *m,
+ const struct i3c_ccc_cmd *cmd)
+{
+ if (cmd->ndests > 1)
+ return false;
+
+ switch (cmd->id) {
+ case I3C_CCC_ENEC(true):
+ case I3C_CCC_ENEC(false):
+ case I3C_CCC_DISEC(true):
+ case I3C_CCC_DISEC(false):
+ case I3C_CCC_RSTDAA(true):
+ case I3C_CCC_RSTDAA(false):
+ case I3C_CCC_ENTDAA:
+ case I3C_CCC_SETDASA:
+ case I3C_CCC_SETNEWDA:
+ case I3C_CCC_GETMWL:
+ case I3C_CCC_GETMRL:
+ case I3C_CCC_GETPID:
+ case I3C_CCC_GETBCR:
+ case I3C_CCC_GETDCR:
+ case I3C_CCC_GETSTATUS:
+ case I3C_CCC_GETHDRCAP:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+static int adi_i3c_master_disable(struct adi_i3c_master *master)
+{
+ writel(0, master->regs + REG_IBI_CONFIG);
+
+ return 0;
+}
+
+static struct adi_i3c_xfer *adi_i3c_master_alloc_xfer(struct adi_i3c_master *master,
+ unsigned int ncmds)
+{
+ struct adi_i3c_xfer *xfer;
+
+ xfer = kzalloc(struct_size(xfer, cmds, ncmds), GFP_KERNEL);
+ if (!xfer)
+ return NULL;
+
+ INIT_LIST_HEAD(&xfer->node);
+ xfer->ncmds = ncmds;
+ xfer->ret = -ETIMEDOUT;
+
+ return xfer;
+}
+
+static void adi_i3c_master_start_xfer_locked(struct adi_i3c_master *master)
+{
+ struct adi_i3c_xfer *xfer = master->xferqueue.cur;
+ unsigned int i, n, m;
+
+ if (!xfer)
+ return;
+
+ for (i = 0; i < xfer->ncmds; i++) {
+ struct adi_i3c_cmd *cmd = &xfer->cmds[i];
+
+ if (!(cmd->cmd0 & REG_CMD_FIFO_0_RNW))
+ adi_i3c_master_wr_to_tx_fifo(master, cmd->tx_buf, cmd->tx_len);
+ }
+
+ n = readl(master->regs + REG_CMD_FIFO_ROOM);
+ for (i = 0; i < xfer->ncmds; i++) {
+ struct adi_i3c_cmd *cmd = &xfer->cmds[i];
+
+ m = cmd->cmd0 & REG_CMD_FIFO_0_IS_CCC ? 2 : 1;
+ if (m > n)
+ break;
+ writel(cmd->cmd0, master->regs + REG_CMD_FIFO);
+ if (cmd->cmd0 & REG_CMD_FIFO_0_IS_CCC)
+ writel(cmd->cmd1, master->regs + REG_CMD_FIFO);
+ n -= m;
+ }
+}
+
+static void adi_i3c_master_end_xfer_locked(struct adi_i3c_master *master,
+ u32 pending)
+{
+ struct adi_i3c_xfer *xfer = master->xferqueue.cur;
+ int i, ret = 0;
+
+ if (!xfer)
+ return;
+
+ while (!(readl(master->regs + REG_FIFO_STATUS) & REG_FIFO_STATUS_CMDR_EMPTY)) {
+ struct adi_i3c_cmd *cmd;
+ u32 cmdr, rx_len;
+
+ cmdr = readl(master->regs + REG_CMDR_FIFO);
+
+ cmd = &xfer->cmds[xfer->ncmds_comp++];
+ if (cmd->cmd0 & REG_CMD_FIFO_0_RNW) {
+ rx_len = min_t(u32, REG_CMDR_FIFO_XFER_BYTES(cmdr), cmd->rx_len);
+ adi_i3c_master_rd_from_rx_fifo(master, cmd->rx_buf, rx_len);
+ }
+ cmd->error = REG_CMDR_FIFO_ERROR(cmdr);
+ }
+
+ for (i = 0; i < xfer->ncmds_comp; i++) {
+ switch (xfer->cmds[i].error) {
+ case REG_CMDR_FIFO_NO_ERROR:
+ break;
+
+ case REG_CMDR_FIFO_CE0_ERROR:
+ case REG_CMDR_FIFO_CE2_ERROR:
+ case REG_CMDR_FIFO_NACK_RESP:
+ case REG_CMDR_FIFO_UDA_ERROR:
+ ret = -EIO;
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ }
+
+ xfer->ret = ret;
+
+ if (xfer->ncmds_comp != xfer->ncmds)
+ return;
+
+ complete(&xfer->comp);
+
+ xfer = list_first_entry_or_null(&master->xferqueue.list,
+ struct adi_i3c_xfer, node);
+ if (xfer)
+ list_del_init(&xfer->node);
+
+ master->xferqueue.cur = xfer;
+ adi_i3c_master_start_xfer_locked(master);
+}
+
+static void adi_i3c_master_queue_xfer(struct adi_i3c_master *master,
+ struct adi_i3c_xfer *xfer)
+{
+ init_completion(&xfer->comp);
+ guard(spinlock_irqsave)(&master->xferqueue.lock);
+ if (master->xferqueue.cur) {
+ list_add_tail(&xfer->node, &master->xferqueue.list);
+ } else {
+ master->xferqueue.cur = xfer;
+ adi_i3c_master_start_xfer_locked(master);
+ }
+}
+
+static void adi_i3c_master_unqueue_xfer(struct adi_i3c_master *master,
+ struct adi_i3c_xfer *xfer)
+{
+ guard(spinlock_irqsave)(&master->xferqueue.lock);
+ if (master->xferqueue.cur == xfer)
+ master->xferqueue.cur = NULL;
+ else
+ list_del_init(&xfer->node);
+
+ writel(0x01, master->regs + REG_ENABLE);
+ writel(0x00, master->regs + REG_ENABLE);
+ writel(REG_IRQ_PENDING_CMDR, master->regs + REG_IRQ_MASK);
+}
+
+static enum i3c_error_code adi_i3c_cmd_get_err(struct adi_i3c_cmd *cmd)
+{
+ switch (cmd->error) {
+ case REG_CMDR_FIFO_CE0_ERROR:
+ return I3C_ERROR_M0;
+
+ case REG_CMDR_FIFO_CE2_ERROR:
+ case REG_CMDR_FIFO_NACK_RESP:
+ return I3C_ERROR_M2;
+
+ default:
+ break;
+ }
+
+ return I3C_ERROR_UNKNOWN;
+}
+
+static int adi_i3c_master_send_ccc_cmd(struct i3c_master_controller *m,
+ struct i3c_ccc_cmd *cmd)
+{
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+ struct adi_i3c_xfer *xfer __free(kfree) = NULL;
+ struct adi_i3c_cmd *ccmd;
+
+ xfer = adi_i3c_master_alloc_xfer(master, 1);
+ if (!xfer)
+ return -ENOMEM;
+
+ ccmd = xfer->cmds;
+ ccmd->cmd1 = REG_CMD_FIFO_1_CCC(cmd->id);
+ ccmd->cmd0 = REG_CMD_FIFO_0_IS_CCC |
+ REG_CMD_FIFO_0_LEN(cmd->dests[0].payload.len);
+
+ if (cmd->id & I3C_CCC_DIRECT)
+ ccmd->cmd0 |= REG_CMD_FIFO_0_DEV_ADDR(cmd->dests[0].addr);
+
+ if (cmd->rnw) {
+ ccmd->cmd0 |= REG_CMD_FIFO_0_RNW;
+ ccmd->rx_buf = cmd->dests[0].payload.data;
+ ccmd->rx_len = cmd->dests[0].payload.len;
+ } else {
+ ccmd->tx_buf = cmd->dests[0].payload.data;
+ ccmd->tx_len = cmd->dests[0].payload.len;
+ }
+
+ adi_i3c_master_queue_xfer(master, xfer);
+ if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
+ adi_i3c_master_unqueue_xfer(master, xfer);
+
+ cmd->err = adi_i3c_cmd_get_err(&xfer->cmds[0]);
+
+ return 0;
+}
+
+static int adi_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
+ struct i3c_priv_xfer *xfers,
+ int nxfers)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+ struct adi_i3c_xfer *xfer __free(kfree) = NULL;
+ int i, ret;
+
+ if (!nxfers)
+ return 0;
+
+ xfer = adi_i3c_master_alloc_xfer(master, nxfers);
+ if (!xfer)
+ return -ENOMEM;
+
+ for (i = 0; i < nxfers; i++) {
+ struct adi_i3c_cmd *ccmd = &xfer->cmds[i];
+
+ ccmd->cmd0 = REG_CMD_FIFO_0_DEV_ADDR(dev->info.dyn_addr);
+
+ if (xfers[i].rnw) {
+ ccmd->cmd0 |= REG_CMD_FIFO_0_RNW;
+ ccmd->rx_buf = xfers[i].data.in;
+ ccmd->rx_len = xfers[i].len;
+ } else {
+ ccmd->tx_buf = xfers[i].data.out;
+ ccmd->tx_len = xfers[i].len;
+ }
+
+ ccmd->cmd0 |= REG_CMD_FIFO_0_LEN(xfers[i].len);
+
+ if (i < nxfers - 1)
+ ccmd->cmd0 |= REG_CMD_FIFO_0_SR;
+
+ if (!i)
+ ccmd->cmd0 |= REG_CMD_FIFO_0_BCAST;
+ }
+
+ adi_i3c_master_queue_xfer(master, xfer);
+ if (!wait_for_completion_timeout(&xfer->comp,
+ msecs_to_jiffies(1000)))
+ adi_i3c_master_unqueue_xfer(master, xfer);
+
+ ret = xfer->ret;
+
+ for (i = 0; i < nxfers; i++)
+ xfers[i].err = adi_i3c_cmd_get_err(&xfer->cmds[i]);
+
+ return ret;
+}
+
+struct adi_i3c_i2c_dev_data {
+ struct i3c_generic_ibi_pool *ibi_pool;
+ u16 id;
+ s16 ibi;
+};
+
+static int adi_i3c_master_get_rr_slot(struct adi_i3c_master *master,
+ u8 dyn_addr)
+{
+ if (!master->free_rr_slots)
+ return -ENOSPC;
+
+ return ffs(master->free_rr_slots) - 1;
+}
+
+static int adi_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev, u8 dyn_addr)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+ u8 addr;
+
+ addr = dev->info.dyn_addr ? dev->info.dyn_addr : dev->info.static_addr;
+
+ writel(REG_DEV_CHAR_ADDR(dyn_addr), master->regs + REG_DEV_CHAR);
+ writel((readl(master->regs + REG_DEV_CHAR) &
+ ~REG_DEV_CHAR_IS_ATTACHED) | REG_DEV_CHAR_WEN,
+ master->regs + REG_DEV_CHAR);
+
+ writel(REG_DEV_CHAR_ADDR(addr), master->regs + REG_DEV_CHAR);
+ writel(readl(master->regs + REG_DEV_CHAR) |
+ REG_DEV_CHAR_IS_ATTACHED | REG_DEV_CHAR_WEN,
+ master->regs + REG_DEV_CHAR);
+
+ return 0;
+}
+
+static int adi_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+ struct adi_i3c_i2c_dev_data *data;
+ int slot;
+ u8 addr;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ slot = adi_i3c_master_get_rr_slot(master, dev->info.dyn_addr);
+ if (slot < 0) {
+ kfree(data);
+ return slot;
+ }
+
+ data->id = slot;
+ i3c_dev_set_master_data(dev, data);
+ master->free_rr_slots &= ~BIT(slot);
+
+ addr = dev->info.dyn_addr ? dev->info.dyn_addr : dev->info.static_addr;
+
+ writel(REG_DEV_CHAR_ADDR(addr), master->regs + REG_DEV_CHAR);
+ writel(readl(master->regs + REG_DEV_CHAR) |
+ REG_DEV_CHAR_IS_ATTACHED | REG_DEV_CHAR_WEN,
+ master->regs + REG_DEV_CHAR);
+
+ return 0;
+}
+
+static void adi_i3c_master_sync_dev_char(struct i3c_master_controller *m)
+{
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+ struct i3c_dev_desc *i3cdev;
+ u32 bcr_ibi;
+ u8 addr;
+
+ i3c_bus_for_each_i3cdev(&m->bus, i3cdev) {
+ addr = i3cdev->info.dyn_addr ?
+ i3cdev->info.dyn_addr : i3cdev->info.static_addr;
+ writel(REG_DEV_CHAR_ADDR(addr), master->regs + REG_DEV_CHAR);
+ bcr_ibi = FIELD_GET(I3C_BCR_IBI_PAYLOAD | I3C_BCR_IBI_REQ_CAP, (i3cdev->info.bcr));
+ writel(readl(master->regs + REG_DEV_CHAR) |
+ REG_DEV_CHAR_BCR_IBI(bcr_ibi) | REG_DEV_CHAR_WEN,
+ master->regs + REG_DEV_CHAR);
+ }
+}
+
+static void adi_i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+ struct adi_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+ u8 addr;
+
+ addr = dev->info.dyn_addr ? dev->info.dyn_addr : dev->info.static_addr;
+
+ writel(REG_DEV_CHAR_ADDR(addr), master->regs + REG_DEV_CHAR);
+ writel((readl(master->regs + REG_DEV_CHAR) &
+ ~REG_DEV_CHAR_IS_ATTACHED) | REG_DEV_CHAR_WEN,
+ master->regs + REG_DEV_CHAR);
+
+ i3c_dev_set_master_data(dev, NULL);
+ master->free_rr_slots |= BIT(data->id);
+ kfree(data);
+}
+
+static int adi_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i2c_dev_get_master(dev);
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+ struct adi_i3c_i2c_dev_data *data;
+ int slot;
+
+ slot = adi_i3c_master_get_rr_slot(master, 0);
+ if (slot < 0)
+ return slot;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->id = slot;
+ master->free_rr_slots &= ~BIT(slot);
+ i2c_dev_set_master_data(dev, data);
+
+ writel(REG_DEV_CHAR_ADDR(dev->addr) |
+ REG_DEV_CHAR_IS_I2C | REG_DEV_CHAR_IS_ATTACHED | REG_DEV_CHAR_WEN,
+ master->regs + REG_DEV_CHAR);
+
+ return 0;
+}
+
+static void adi_i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i2c_dev_get_master(dev);
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+ struct adi_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
+
+ writel(REG_DEV_CHAR_ADDR(dev->addr) |
+ REG_DEV_CHAR_IS_I2C | REG_DEV_CHAR_WEN,
+ master->regs + REG_DEV_CHAR);
+
+ i2c_dev_set_master_data(dev, NULL);
+ master->free_rr_slots |= BIT(data->id);
+ kfree(data);
+}
+
+static void adi_i3c_master_bus_cleanup(struct i3c_master_controller *m)
+{
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+
+ adi_i3c_master_disable(master);
+}
+
+static void adi_i3c_master_upd_i3c_scl_lim(struct adi_i3c_master *master)
+{
+ struct i3c_master_controller *m = &master->base;
+ struct i3c_bus *bus = i3c_master_get_bus(m);
+ u8 i3c_scl_lim = 0;
+ struct i3c_dev_desc *dev;
+ u8 pp_sg;
+
+ i3c_bus_for_each_i3cdev(bus, dev) {
+ u8 max_fscl;
+
+ max_fscl = max(I3C_CCC_MAX_SDR_FSCL(dev->info.max_read_ds),
+ I3C_CCC_MAX_SDR_FSCL(dev->info.max_write_ds));
+
+ switch (max_fscl) {
+ case I3C_SDR1_FSCL_8MHZ:
+ max_fscl = PP_SG_6MHZ;
+ break;
+ case I3C_SDR2_FSCL_6MHZ:
+ max_fscl = PP_SG_3MHZ;
+ break;
+ case I3C_SDR3_FSCL_4MHZ:
+ max_fscl = PP_SG_3MHZ;
+ break;
+ case I3C_SDR4_FSCL_2MHZ:
+ max_fscl = PP_SG_1MHZ;
+ break;
+ case I3C_SDR0_FSCL_MAX:
+ default:
+ max_fscl = PP_SG_12MHZ;
+ break;
+ }
+
+ if (max_fscl &&
+ (i3c_scl_lim > max_fscl || !i3c_scl_lim))
+ i3c_scl_lim = max_fscl;
+ }
+
+ if (!i3c_scl_lim)
+ return;
+
+ master->i3c_scl_lim = i3c_scl_lim - 1;
+
+ pp_sg = readl(master->regs + REG_OPS) & ~REG_OPS_PP_SG_MASK;
+ pp_sg |= REG_OPS_SET_SG(master->i3c_scl_lim);
+
+ writel(pp_sg, master->regs + REG_OPS);
+}
+
+static void adi_i3c_master_get_features(struct adi_i3c_master *master,
+ unsigned int slot,
+ struct i3c_device_info *info)
+{
+ u32 buf;
+
+ /* Dynamic address and PID are for identification only */
+ memset(info, 0, sizeof(*info));
+ buf = readl(master->regs + REG_DCR_BCR_DA);
+ info->dyn_addr = REG_DCR_BCR_DA_GET_DA(buf);
+ info->dcr = REG_DCR_BCR_DA_GET_DCR(buf);
+ info->bcr = REG_DCR_BCR_DA_GET_BCR(buf);
+ info->pid = readl(master->regs + REG_PID_L);
+ info->pid |= (u64)readl(master->regs + REG_PID_H) << 32;
+}
+
+static int adi_i3c_master_do_daa(struct i3c_master_controller *m)
+{
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+ int ret, addr = 0;
+ u32 irq_mask;
+
+ for (u8 i = 0; i < ADI_MAX_DEVS; i++) {
+ addr = i3c_master_get_free_addr(m, addr);
+ if (addr < 0)
+ return addr;
+ master->daa.addrs[i] = addr;
+ }
+
+ irq_mask = readl(master->regs + REG_IRQ_MASK);
+ writel(irq_mask | REG_IRQ_PENDING_DAA,
+ master->regs + REG_IRQ_MASK);
+
+ master->daa.index = 0;
+ ret = i3c_master_entdaa_locked(&master->base);
+
+ writel(irq_mask, master->regs + REG_IRQ_MASK);
+
+ /* DAA always finishes with CE2_ERROR or NACK_RESP */
+ if (ret && ret != I3C_ERROR_M2)
+ return ret;
+
+ /* Add I3C devices discovered */
+ for (u8 i = 0; i < master->daa.index; i++)
+ i3c_master_add_i3c_dev_locked(m, master->daa.addrs[i]);
+ /* Sync retrieved devs info with the IP */
+ adi_i3c_master_sync_dev_char(m);
+
+ i3c_master_defslvs_locked(&master->base);
+
+ adi_i3c_master_upd_i3c_scl_lim(master);
+
+ return 0;
+}
+
+static int adi_i3c_master_bus_init(struct i3c_master_controller *m)
+{
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+ struct i3c_device_info info = { };
+ int ret;
+
+ ret = i3c_master_get_free_addr(m, 0);
+ if (ret < 0)
+ return ret;
+
+ adi_i3c_master_get_features(master, 0, &info);
+ ret = i3c_master_set_info(&master->base, &info);
+ if (ret)
+ return ret;
+
+ writel(REG_IBI_CONFIG_LISTEN,
+ master->regs + REG_IBI_CONFIG);
+
+ return 0;
+}
+
+static void adi_i3c_master_handle_ibi(struct adi_i3c_master *master,
+ u32 raw)
+{
+ struct adi_i3c_i2c_dev_data *data;
+ struct i3c_ibi_slot *slot;
+ struct i3c_dev_desc *dev;
+ u8 da, id, mdb, len;
+ u8 *buf;
+
+ da = FIELD_GET(GENMASK(23, 17), raw);
+ mdb = FIELD_GET(GENMASK(15, 8), raw);
+ for (id = 0; id < master->ibi.num_slots; id++) {
+ if (master->ibi.slots[id] &&
+ master->ibi.slots[id]->info.dyn_addr == da)
+ break;
+ }
+
+ if (id == master->ibi.num_slots)
+ return;
+
+ dev = master->ibi.slots[id];
+ len = ADI_HAS_MDB_FROM_BCR(dev->info.bcr);
+ data = i3c_dev_get_master_data(dev);
+
+ guard(spinlock)(&master->ibi.lock);
+ slot = i3c_generic_ibi_get_free_slot(data->ibi_pool);
+ if (!slot)
+ return;
+
+ slot->len = len;
+ buf = slot->data;
+ buf[0] = mdb;
+ i3c_master_queue_ibi(dev, slot);
+}
+
+static void adi_i3c_master_demux_ibis(struct adi_i3c_master *master)
+{
+ while (!(readl(master->regs + REG_FIFO_STATUS) & REG_FIFO_STATUS_IBI_EMPTY)) {
+ u32 raw = readl(master->regs + REG_IBI_FIFO);
+
+ adi_i3c_master_handle_ibi(master, raw);
+ }
+}
+
+static void adi_i3c_master_handle_da_req(struct adi_i3c_master *master)
+{
+ u8 payload0[8];
+ u32 addr;
+
+ adi_i3c_master_rd_from_rx_fifo(master, payload0, 6);
+ addr = master->daa.addrs[master->daa.index++];
+ addr = (addr << 1) | (parity8(addr) ? 0 : 1);
+
+ writel(addr, master->regs + REG_SDO_FIFO);
+}
+
+static irqreturn_t adi_i3c_master_irq(int irq, void *data)
+{
+ struct adi_i3c_master *master = data;
+ u32 pending;
+
+ pending = readl(master->regs + REG_IRQ_PENDING);
+ writel(pending, master->regs + REG_IRQ_PENDING);
+ if (pending & REG_IRQ_PENDING_CMDR) {
+ scoped_guard(spinlock_irqsave, &master->xferqueue.lock) {
+ adi_i3c_master_end_xfer_locked(master, pending);
+ }
+ }
+ if (pending & REG_IRQ_PENDING_IBI)
+ adi_i3c_master_demux_ibis(master);
+ if (pending & REG_IRQ_PENDING_DAA)
+ adi_i3c_master_handle_da_req(master);
+
+ return IRQ_HANDLED;
+}
+
+static int adi_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
+ struct i2c_msg *xfers,
+ int nxfers)
+{
+ struct i3c_master_controller *m = i2c_dev_get_master(dev);
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+ struct adi_i3c_xfer *xfer __free(kfree) = NULL;
+ int i;
+
+ if (!nxfers)
+ return 0;
+ for (i = 0; i < nxfers; i++) {
+ if (xfers[i].flags & I2C_M_TEN)
+ return -EOPNOTSUPP;
+ }
+ xfer = adi_i3c_master_alloc_xfer(master, nxfers);
+ if (!xfer)
+ return -ENOMEM;
+
+ for (i = 0; i < nxfers; i++) {
+ struct adi_i3c_cmd *ccmd = &xfer->cmds[i];
+
+ ccmd->cmd0 = REG_CMD_FIFO_0_DEV_ADDR(xfers[i].addr);
+
+ if (xfers[i].flags & I2C_M_RD) {
+ ccmd->cmd0 |= REG_CMD_FIFO_0_RNW;
+ ccmd->rx_buf = xfers[i].buf;
+ ccmd->rx_len = xfers[i].len;
+ } else {
+ ccmd->tx_buf = xfers[i].buf;
+ ccmd->tx_len = xfers[i].len;
+ }
+
+ ccmd->cmd0 |= REG_CMD_FIFO_0_LEN(xfers[i].len);
+ }
+
+ adi_i3c_master_queue_xfer(master, xfer);
+ if (!wait_for_completion_timeout(&xfer->comp,
+ m->i2c.timeout))
+ adi_i3c_master_unqueue_xfer(master, xfer);
+
+ return xfer->ret;
+}
+
+static int adi_i3c_master_disable_ibi(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+ struct i3c_dev_desc *i3cdev;
+ u32 enabled = 0;
+ int ret;
+
+ ret = i3c_master_disec_locked(m, dev->info.dyn_addr,
+ I3C_CCC_EVENT_SIR);
+
+ i3c_bus_for_each_i3cdev(&m->bus, i3cdev) {
+ if (dev != i3cdev && i3cdev->ibi)
+ enabled |= i3cdev->ibi->enabled;
+ }
+ if (!enabled) {
+ writel(REG_IBI_CONFIG_LISTEN,
+ master->regs + REG_IBI_CONFIG);
+ writel(readl(master->regs + REG_IRQ_MASK) & ~REG_IRQ_PENDING_IBI,
+ master->regs + REG_IRQ_MASK);
+ }
+
+ return ret;
+}
+
+static int adi_i3c_master_enable_ibi(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+
+ writel(REG_IBI_CONFIG_LISTEN | REG_IBI_CONFIG_ENABLE,
+ master->regs + REG_IBI_CONFIG);
+
+ writel(readl(master->regs + REG_IRQ_MASK) | REG_IRQ_PENDING_IBI,
+ master->regs + REG_IRQ_MASK);
+
+ return i3c_master_enec_locked(m, dev->info.dyn_addr,
+ I3C_CCC_EVENT_SIR);
+}
+
+static int adi_i3c_master_request_ibi(struct i3c_dev_desc *dev,
+ const struct i3c_ibi_setup *req)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+ struct adi_i3c_i2c_dev_data *data;
+ unsigned int i;
+
+ data = i3c_dev_get_master_data(dev);
+ data->ibi_pool = i3c_generic_ibi_alloc_pool(dev, req);
+ if (IS_ERR(data->ibi_pool))
+ return PTR_ERR(data->ibi_pool);
+
+ scoped_guard(spinlock_irqsave, &master->ibi.lock) {
+ for (i = 0; i < master->ibi.num_slots; i++) {
+ if (!master->ibi.slots[i]) {
+ data->ibi = i;
+ master->ibi.slots[i] = dev;
+ break;
+ }
+ }
+ }
+
+ if (i < master->ibi.num_slots)
+ return 0;
+
+ i3c_generic_ibi_free_pool(data->ibi_pool);
+ data->ibi_pool = NULL;
+
+ return -ENOSPC;
+}
+
+static void adi_i3c_master_free_ibi(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+ struct adi_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+
+ scoped_guard(spinlock_irqsave, &master->ibi.lock) {
+ master->ibi.slots[data->ibi] = NULL;
+ }
+
+ i3c_generic_ibi_free_pool(data->ibi_pool);
+}
+
+static void adi_i3c_master_recycle_ibi_slot(struct i3c_dev_desc *dev,
+ struct i3c_ibi_slot *slot)
+{
+ struct adi_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+
+ i3c_generic_ibi_recycle_slot(data->ibi_pool, slot);
+}
+
+static const struct i3c_master_controller_ops adi_i3c_master_ops = {
+ .bus_init = adi_i3c_master_bus_init,
+ .bus_cleanup = adi_i3c_master_bus_cleanup,
+ .attach_i3c_dev = adi_i3c_master_attach_i3c_dev,
+ .reattach_i3c_dev = adi_i3c_master_reattach_i3c_dev,
+ .detach_i3c_dev = adi_i3c_master_detach_i3c_dev,
+ .attach_i2c_dev = adi_i3c_master_attach_i2c_dev,
+ .detach_i2c_dev = adi_i3c_master_detach_i2c_dev,
+ .do_daa = adi_i3c_master_do_daa,
+ .supports_ccc_cmd = adi_i3c_master_supports_ccc_cmd,
+ .send_ccc_cmd = adi_i3c_master_send_ccc_cmd,
+ .priv_xfers = adi_i3c_master_priv_xfers,
+ .i2c_xfers = adi_i3c_master_i2c_xfers,
+ .request_ibi = adi_i3c_master_request_ibi,
+ .enable_ibi = adi_i3c_master_enable_ibi,
+ .disable_ibi = adi_i3c_master_disable_ibi,
+ .free_ibi = adi_i3c_master_free_ibi,
+ .recycle_ibi_slot = adi_i3c_master_recycle_ibi_slot,
+};
+
+static const struct of_device_id adi_i3c_master_of_match[] = {
+ { .compatible = "adi,i3c-master-v1" },
+ {}
+};
+
+static int adi_i3c_master_probe(struct platform_device *pdev)
+{
+ struct adi_i3c_master *master;
+ struct clk_bulk_data *clk;
+ unsigned int version;
+ int ret, irq;
+
+ master = devm_kzalloc(&pdev->dev, sizeof(*master), GFP_KERNEL);
+ if (!master)
+ return -ENOMEM;
+
+ master->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(master->regs))
+ return PTR_ERR(master->regs);
+
+ ret = devm_clk_bulk_get_all_enabled(&pdev->dev, &clk);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to get clocks\n");
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ version = readl(master->regs + ADI_AXI_REG_VERSION);
+ if (ADI_AXI_PCORE_VER_MAJOR(version) != 1)
+ dev_err_probe(&pdev->dev, -ENODEV, "Unsupported peripheral version %u.%u.%u\n",
+ ADI_AXI_PCORE_VER_MAJOR(version),
+ ADI_AXI_PCORE_VER_MINOR(version),
+ ADI_AXI_PCORE_VER_PATCH(version));
+
+ writel(0x00, master->regs + REG_ENABLE);
+ writel(0x00, master->regs + REG_IRQ_MASK);
+
+ ret = devm_request_irq(&pdev->dev, irq, adi_i3c_master_irq, 0,
+ dev_name(&pdev->dev), master);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, master);
+
+ master->free_rr_slots = GENMASK(ADI_MAX_DEVS, 1);
+
+ writel(REG_IRQ_PENDING_CMDR, master->regs + REG_IRQ_MASK);
+
+ spin_lock_init(&master->ibi.lock);
+ master->ibi.num_slots = 15;
+ master->ibi.slots = devm_kcalloc(&pdev->dev, master->ibi.num_slots,
+ sizeof(*master->ibi.slots),
+ GFP_KERNEL);
+ if (!master->ibi.slots)
+ return -ENOMEM;
+
+ spin_lock_init(&master->xferqueue.lock);
+ INIT_LIST_HEAD(&master->xferqueue.list);
+
+ return i3c_master_register(&master->base, &pdev->dev,
+ &adi_i3c_master_ops, false);
+}
+
+static void adi_i3c_master_remove(struct platform_device *pdev)
+{
+ struct adi_i3c_master *master = platform_get_drvdata(pdev);
+
+ writel(0xff, master->regs + REG_IRQ_PENDING);
+ writel(0x00, master->regs + REG_IRQ_MASK);
+ writel(0x01, master->regs + REG_ENABLE);
+
+ i3c_master_unregister(&master->base);
+}
+
+static struct platform_driver adi_i3c_master = {
+ .probe = adi_i3c_master_probe,
+ .remove = adi_i3c_master_remove,
+ .driver = {
+ .name = "adi-i3c-master",
+ .of_match_table = adi_i3c_master_of_match,
+ },
+};
+module_platform_driver(adi_i3c_master);
+
+MODULE_AUTHOR("Jorge Marques <jorge.marques@analog.com>");
+MODULE_DESCRIPTION("Analog Devices I3C master driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c
index 974122b2d20e..9ceedf09c3b6 100644
--- a/drivers/i3c/master/dw-i3c-master.c
+++ b/drivers/i3c/master/dw-i3c-master.c
@@ -1737,6 +1737,28 @@ static const struct dev_pm_ops dw_i3c_pm_ops = {
SET_RUNTIME_PM_OPS(dw_i3c_master_runtime_suspend, dw_i3c_master_runtime_resume, NULL)
};
+static void dw_i3c_shutdown(struct platform_device *pdev)
+{
+ struct dw_i3c_master *master = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(master->dev);
+ if (ret < 0) {
+ dev_err(master->dev,
+ "<%s> cannot resume i3c bus master, err: %d\n",
+ __func__, ret);
+ return;
+ }
+
+ cancel_work_sync(&master->hj_work);
+
+ /* Disable interrupts */
+ writel((u32)~INTR_ALL, master->regs + INTR_STATUS_EN);
+ writel((u32)~INTR_ALL, master->regs + INTR_SIGNAL_EN);
+
+ pm_runtime_put_autosuspend(master->dev);
+}
+
static const struct of_device_id dw_i3c_master_of_match[] = {
{ .compatible = "snps,dw-i3c-master-1.00a", },
{},
@@ -1752,6 +1774,7 @@ MODULE_DEVICE_TABLE(acpi, amd_i3c_device_match);
static struct platform_driver dw_i3c_driver = {
.probe = dw_i3c_probe,
.remove = dw_i3c_remove,
+ .shutdown = dw_i3c_shutdown,
.driver = {
.name = "dw-i3c-master",
.of_match_table = dw_i3c_master_of_match,
diff --git a/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c b/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c
index dd636094b07f..eb8a3ae2990d 100644
--- a/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c
+++ b/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c
@@ -317,7 +317,9 @@ static int hci_cmd_v1_daa(struct i3c_hci *hci)
break;
next_addr = ret;
- DBG("next_addr = 0x%02x, DAA using DAT %d", next_addr, dat_idx);
+ dev_dbg(&hci->master.dev,
+ "next_addr = 0x%02x, DAA using DAT %d",
+ next_addr, dat_idx);
mipi_i3c_hci_dat_v1.set_dynamic_addr(hci, dat_idx, next_addr);
mipi_i3c_hci_dct_index_reset(hci);
@@ -349,8 +351,9 @@ static int hci_cmd_v1_daa(struct i3c_hci *hci)
}
i3c_hci_dct_get_val(hci, 0, &pid, &dcr, &bcr);
- DBG("assigned address %#x to device PID=0x%llx DCR=%#x BCR=%#x",
- next_addr, pid, dcr, bcr);
+ dev_dbg(&hci->master.dev,
+ "assigned address %#x to device PID=0x%llx DCR=%#x BCR=%#x",
+ next_addr, pid, dcr, bcr);
mipi_i3c_hci_dat_v1.free_entry(hci, dat_idx);
dat_idx = -1;
diff --git a/drivers/i3c/master/mipi-i3c-hci/cmd_v2.c b/drivers/i3c/master/mipi-i3c-hci/cmd_v2.c
index 4493b2b067cb..efb4326a25b7 100644
--- a/drivers/i3c/master/mipi-i3c-hci/cmd_v2.c
+++ b/drivers/i3c/master/mipi-i3c-hci/cmd_v2.c
@@ -261,7 +261,7 @@ static int hci_cmd_v2_daa(struct i3c_hci *hci)
if (ret < 0)
break;
next_addr = ret;
- DBG("next_addr = 0x%02x", next_addr);
+ dev_dbg(&hci->master.dev, "next_addr = 0x%02x", next_addr);
xfer[0].cmd_tid = hci_get_tid();
xfer[0].cmd_desc[0] =
CMD_0_ATTR_A |
@@ -293,8 +293,9 @@ static int hci_cmd_v2_daa(struct i3c_hci *hci)
pid = (pid << 32) | device_id[0];
bcr = FIELD_GET(W1_MASK(55, 48), device_id[1]);
dcr = FIELD_GET(W1_MASK(63, 56), device_id[1]);
- DBG("assigned address %#x to device PID=0x%llx DCR=%#x BCR=%#x",
- next_addr, pid, dcr, bcr);
+ dev_dbg(&hci->master.dev,
+ "assigned address %#x to device PID=0x%llx DCR=%#x BCR=%#x",
+ next_addr, pid, dcr, bcr);
/*
* TODO: Extend the subsystem layer to allow for registering
* new device and provide BCR/DCR/PID at the same time.
diff --git a/drivers/i3c/master/mipi-i3c-hci/core.c b/drivers/i3c/master/mipi-i3c-hci/core.c
index 60f1175f1f37..47e42cb4dbe7 100644
--- a/drivers/i3c/master/mipi-i3c-hci/core.c
+++ b/drivers/i3c/master/mipi-i3c-hci/core.c
@@ -121,8 +121,6 @@ static int i3c_hci_bus_init(struct i3c_master_controller *m)
struct i3c_device_info info;
int ret;
- DBG("");
-
if (hci->cmd == &mipi_i3c_hci_cmd_v1) {
ret = mipi_i3c_hci_dat_v1.init(hci);
if (ret)
@@ -149,7 +147,7 @@ static int i3c_hci_bus_init(struct i3c_master_controller *m)
amd_set_resp_buf_thld(hci);
reg_set(HC_CONTROL, HC_CONTROL_BUS_ENABLE);
- DBG("HC_CONTROL = %#x", reg_read(HC_CONTROL));
+ dev_dbg(&hci->master.dev, "HC_CONTROL = %#x", reg_read(HC_CONTROL));
return 0;
}
@@ -159,8 +157,6 @@ static void i3c_hci_bus_cleanup(struct i3c_master_controller *m)
struct i3c_hci *hci = to_i3c_hci(m);
struct platform_device *pdev = to_platform_device(m->dev.parent);
- DBG("");
-
reg_clear(HC_CONTROL, HC_CONTROL_BUS_ENABLE);
synchronize_irq(platform_get_irq(pdev, 0));
hci->io->cleanup(hci);
@@ -196,8 +192,8 @@ static int i3c_hci_send_ccc_cmd(struct i3c_master_controller *m,
DECLARE_COMPLETION_ONSTACK(done);
int i, last, ret = 0;
- DBG("cmd=%#x rnw=%d ndests=%d data[0].len=%d",
- ccc->id, ccc->rnw, ccc->ndests, ccc->dests[0].payload.len);
+ dev_dbg(&hci->master.dev, "cmd=%#x rnw=%d ndests=%d data[0].len=%d",
+ ccc->id, ccc->rnw, ccc->ndests, ccc->dests[0].payload.len);
xfer = hci_alloc_xfer(nxfers);
if (!xfer)
@@ -255,8 +251,8 @@ static int i3c_hci_send_ccc_cmd(struct i3c_master_controller *m,
}
if (ccc->rnw)
- DBG("got: %*ph",
- ccc->dests[0].payload.len, ccc->dests[0].payload.data);
+ dev_dbg(&hci->master.dev, "got: %*ph",
+ ccc->dests[0].payload.len, ccc->dests[0].payload.data);
out:
hci_free_xfer(xfer, nxfers);
@@ -267,39 +263,9 @@ static int i3c_hci_daa(struct i3c_master_controller *m)
{
struct i3c_hci *hci = to_i3c_hci(m);
- DBG("");
-
return hci->cmd->perform_daa(hci);
}
-static int i3c_hci_alloc_safe_xfer_buf(struct i3c_hci *hci,
- struct hci_xfer *xfer)
-{
- if (hci->io != &mipi_i3c_hci_dma ||
- xfer->data == NULL || !is_vmalloc_addr(xfer->data))
- return 0;
-
- if (xfer->rnw)
- xfer->bounce_buf = kzalloc(xfer->data_len, GFP_KERNEL);
- else
- xfer->bounce_buf = kmemdup(xfer->data,
- xfer->data_len, GFP_KERNEL);
-
- return xfer->bounce_buf == NULL ? -ENOMEM : 0;
-}
-
-static void i3c_hci_free_safe_xfer_buf(struct i3c_hci *hci,
- struct hci_xfer *xfer)
-{
- if (hci->io != &mipi_i3c_hci_dma || xfer->bounce_buf == NULL)
- return;
-
- if (xfer->rnw)
- memcpy(xfer->data, xfer->bounce_buf, xfer->data_len);
-
- kfree(xfer->bounce_buf);
-}
-
static int i3c_hci_priv_xfers(struct i3c_dev_desc *dev,
struct i3c_priv_xfer *i3c_xfers,
int nxfers)
@@ -311,7 +277,7 @@ static int i3c_hci_priv_xfers(struct i3c_dev_desc *dev,
unsigned int size_limit;
int i, last, ret = 0;
- DBG("nxfers = %d", nxfers);
+ dev_dbg(&hci->master.dev, "nxfers = %d", nxfers);
xfer = hci_alloc_xfer(nxfers);
if (!xfer)
@@ -333,9 +299,6 @@ static int i3c_hci_priv_xfers(struct i3c_dev_desc *dev,
}
hci->cmd->prep_i3c_xfer(hci, dev, &xfer[i]);
xfer[i].cmd_desc[0] |= CMD_0_ROC;
- ret = i3c_hci_alloc_safe_xfer_buf(hci, &xfer[i]);
- if (ret)
- goto out;
}
last = i - 1;
xfer[last].cmd_desc[0] |= CMD_0_TOC;
@@ -359,9 +322,6 @@ static int i3c_hci_priv_xfers(struct i3c_dev_desc *dev,
}
out:
- for (i = 0; i < nxfers; i++)
- i3c_hci_free_safe_xfer_buf(hci, &xfer[i]);
-
hci_free_xfer(xfer, nxfers);
return ret;
}
@@ -375,14 +335,14 @@ static int i3c_hci_i2c_xfers(struct i2c_dev_desc *dev,
DECLARE_COMPLETION_ONSTACK(done);
int i, last, ret = 0;
- DBG("nxfers = %d", nxfers);
+ dev_dbg(&hci->master.dev, "nxfers = %d", nxfers);
xfer = hci_alloc_xfer(nxfers);
if (!xfer)
return -ENOMEM;
for (i = 0; i < nxfers; i++) {
- xfer[i].data = i2c_get_dma_safe_msg_buf(&i2c_xfers[i], 1);
+ xfer[i].data = i2c_xfers[i].buf;
xfer[i].data_len = i2c_xfers[i].len;
xfer[i].rnw = i2c_xfers[i].flags & I2C_M_RD;
hci->cmd->prep_i2c_xfer(hci, dev, &xfer[i]);
@@ -408,10 +368,6 @@ static int i3c_hci_i2c_xfers(struct i2c_dev_desc *dev,
}
out:
- for (i = 0; i < nxfers; i++)
- i2c_put_dma_safe_msg_buf(xfer[i].data, &i2c_xfers[i],
- ret ? false : true);
-
hci_free_xfer(xfer, nxfers);
return ret;
}
@@ -423,8 +379,6 @@ static int i3c_hci_attach_i3c_dev(struct i3c_dev_desc *dev)
struct i3c_hci_dev_data *dev_data;
int ret;
- DBG("");
-
dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
if (!dev_data)
return -ENOMEM;
@@ -448,8 +402,6 @@ static int i3c_hci_reattach_i3c_dev(struct i3c_dev_desc *dev, u8 old_dyn_addr)
struct i3c_hci *hci = to_i3c_hci(m);
struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
- DBG("");
-
if (hci->cmd == &mipi_i3c_hci_cmd_v1)
mipi_i3c_hci_dat_v1.set_dynamic_addr(hci, dev_data->dat_idx,
dev->info.dyn_addr);
@@ -462,8 +414,6 @@ static void i3c_hci_detach_i3c_dev(struct i3c_dev_desc *dev)
struct i3c_hci *hci = to_i3c_hci(m);
struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
- DBG("");
-
i3c_dev_set_master_data(dev, NULL);
if (hci->cmd == &mipi_i3c_hci_cmd_v1)
mipi_i3c_hci_dat_v1.free_entry(hci, dev_data->dat_idx);
@@ -477,8 +427,6 @@ static int i3c_hci_attach_i2c_dev(struct i2c_dev_desc *dev)
struct i3c_hci_dev_data *dev_data;
int ret;
- DBG("");
-
if (hci->cmd != &mipi_i3c_hci_cmd_v1)
return 0;
dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
@@ -502,8 +450,6 @@ static void i3c_hci_detach_i2c_dev(struct i2c_dev_desc *dev)
struct i3c_hci *hci = to_i3c_hci(m);
struct i3c_hci_dev_data *dev_data = i2c_dev_get_master_data(dev);
- DBG("");
-
if (dev_data) {
i2c_dev_set_master_data(dev, NULL);
if (hci->cmd == &mipi_i3c_hci_cmd_v1)
@@ -591,7 +537,7 @@ static irqreturn_t i3c_hci_irq_handler(int irq, void *dev_id)
val = reg_read(INTR_STATUS);
reg_write(INTR_STATUS, val);
- DBG("INTR_STATUS = %#x", val);
+ dev_dbg(&hci->master.dev, "INTR_STATUS %#x", val);
if (val)
result = IRQ_HANDLED;
@@ -641,7 +587,7 @@ static int i3c_hci_init(struct i3c_hci *hci)
}
hci->caps = reg_read(HC_CAPABILITIES);
- DBG("caps = %#x", hci->caps);
+ dev_dbg(&hci->master.dev, "caps = %#x", hci->caps);
size_in_dwords = hci->version_major < 1 ||
(hci->version_major == 1 && hci->version_minor < 1);
diff --git a/drivers/i3c/master/mipi-i3c-hci/dma.c b/drivers/i3c/master/mipi-i3c-hci/dma.c
index 491dfe70b660..c401a9425cdc 100644
--- a/drivers/i3c/master/mipi-i3c-hci/dma.c
+++ b/drivers/i3c/master/mipi-i3c-hci/dma.c
@@ -14,6 +14,7 @@
#include <linux/errno.h>
#include <linux/i3c/master.h>
#include <linux/io.h>
+#include <linux/pci.h>
#include "hci.h"
#include "cmd.h"
@@ -76,7 +77,6 @@
#define INTR_TRANSFER_COMPLETION BIT(11)
#define INTR_RING_OP BIT(10)
#define INTR_TRANSFER_ERR BIT(9)
-#define INTR_WARN_INS_STOP_MODE BIT(7)
#define INTR_IBI_RING_FULL BIT(6)
#define INTR_TRANSFER_ABORT BIT(5)
@@ -138,6 +138,7 @@ struct hci_rh_data {
};
struct hci_rings_data {
+ struct device *sysdev;
unsigned int total;
struct hci_rh_data headers[] __counted_by(total);
};
@@ -165,20 +166,20 @@ static void hci_dma_cleanup(struct i3c_hci *hci)
rh_reg_write(IBI_SETUP, 0);
if (rh->xfer)
- dma_free_coherent(&hci->master.dev,
+ dma_free_coherent(rings->sysdev,
rh->xfer_struct_sz * rh->xfer_entries,
rh->xfer, rh->xfer_dma);
if (rh->resp)
- dma_free_coherent(&hci->master.dev,
+ dma_free_coherent(rings->sysdev,
rh->resp_struct_sz * rh->xfer_entries,
rh->resp, rh->resp_dma);
kfree(rh->src_xfers);
if (rh->ibi_status)
- dma_free_coherent(&hci->master.dev,
+ dma_free_coherent(rings->sysdev,
rh->ibi_status_sz * rh->ibi_status_entries,
rh->ibi_status, rh->ibi_status_dma);
if (rh->ibi_data_dma)
- dma_unmap_single(&hci->master.dev, rh->ibi_data_dma,
+ dma_unmap_single(rings->sysdev, rh->ibi_data_dma,
rh->ibi_chunk_sz * rh->ibi_chunks_total,
DMA_FROM_DEVICE);
kfree(rh->ibi_data);
@@ -194,11 +195,23 @@ static int hci_dma_init(struct i3c_hci *hci)
{
struct hci_rings_data *rings;
struct hci_rh_data *rh;
+ struct device *sysdev;
u32 regval;
unsigned int i, nr_rings, xfers_sz, resps_sz;
unsigned int ibi_status_ring_sz, ibi_data_ring_sz;
int ret;
+ /*
+ * Set pointer to a physical device that does DMA and has IOMMU setup
+ * done for it in case of enabled IOMMU and use it with the DMA API.
+ * Here such device is either
+ * "mipi-i3c-hci" platform device (OF/ACPI enumeration) parent or
+ * grandparent (PCI enumeration).
+ */
+ sysdev = hci->master.dev.parent;
+ if (sysdev->parent && dev_is_pci(sysdev->parent))
+ sysdev = sysdev->parent;
+
regval = rhs_reg_read(CONTROL);
nr_rings = FIELD_GET(MAX_HEADER_COUNT_CAP, regval);
dev_info(&hci->master.dev, "%d DMA rings available\n", nr_rings);
@@ -213,6 +226,7 @@ static int hci_dma_init(struct i3c_hci *hci)
return -ENOMEM;
hci->io_data = rings;
rings->total = nr_rings;
+ rings->sysdev = sysdev;
regval = FIELD_PREP(MAX_HEADER_COUNT, rings->total);
rhs_reg_write(CONTROL, regval);
@@ -234,14 +248,15 @@ static int hci_dma_init(struct i3c_hci *hci)
regval = rh_reg_read(CR_SETUP);
rh->xfer_struct_sz = FIELD_GET(CR_XFER_STRUCT_SIZE, regval);
rh->resp_struct_sz = FIELD_GET(CR_RESP_STRUCT_SIZE, regval);
- DBG("xfer_struct_sz = %d, resp_struct_sz = %d",
- rh->xfer_struct_sz, rh->resp_struct_sz);
+ dev_dbg(&hci->master.dev,
+ "xfer_struct_sz = %d, resp_struct_sz = %d",
+ rh->xfer_struct_sz, rh->resp_struct_sz);
xfers_sz = rh->xfer_struct_sz * rh->xfer_entries;
resps_sz = rh->resp_struct_sz * rh->xfer_entries;
- rh->xfer = dma_alloc_coherent(&hci->master.dev, xfers_sz,
+ rh->xfer = dma_alloc_coherent(rings->sysdev, xfers_sz,
&rh->xfer_dma, GFP_KERNEL);
- rh->resp = dma_alloc_coherent(&hci->master.dev, resps_sz,
+ rh->resp = dma_alloc_coherent(rings->sysdev, resps_sz,
&rh->resp_dma, GFP_KERNEL);
rh->src_xfers =
kmalloc_array(rh->xfer_entries, sizeof(*rh->src_xfers),
@@ -263,7 +278,6 @@ static int hci_dma_init(struct i3c_hci *hci)
INTR_TRANSFER_COMPLETION |
INTR_RING_OP |
INTR_TRANSFER_ERR |
- INTR_WARN_INS_STOP_MODE |
INTR_IBI_RING_FULL |
INTR_TRANSFER_ABORT);
@@ -295,16 +309,16 @@ static int hci_dma_init(struct i3c_hci *hci)
ibi_data_ring_sz = rh->ibi_chunk_sz * rh->ibi_chunks_total;
rh->ibi_status =
- dma_alloc_coherent(&hci->master.dev, ibi_status_ring_sz,
+ dma_alloc_coherent(rings->sysdev, ibi_status_ring_sz,
&rh->ibi_status_dma, GFP_KERNEL);
rh->ibi_data = kmalloc(ibi_data_ring_sz, GFP_KERNEL);
ret = -ENOMEM;
if (!rh->ibi_status || !rh->ibi_data)
goto err_out;
rh->ibi_data_dma =
- dma_map_single(&hci->master.dev, rh->ibi_data,
+ dma_map_single(rings->sysdev, rh->ibi_data,
ibi_data_ring_sz, DMA_FROM_DEVICE);
- if (dma_mapping_error(&hci->master.dev, rh->ibi_data_dma)) {
+ if (dma_mapping_error(rings->sysdev, rh->ibi_data_dma)) {
rh->ibi_data_dma = 0;
ret = -ENOMEM;
goto err_out;
@@ -349,9 +363,7 @@ static void hci_dma_unmap_xfer(struct i3c_hci *hci,
xfer = xfer_list + i;
if (!xfer->data)
continue;
- dma_unmap_single(&hci->master.dev,
- xfer->data_dma, xfer->data_len,
- xfer->rnw ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ i3c_master_dma_unmap_single(xfer->dma);
}
}
@@ -362,7 +374,6 @@ static int hci_dma_queue_xfer(struct i3c_hci *hci,
struct hci_rh_data *rh;
unsigned int i, ring, enqueue_ptr;
u32 op1_val, op2_val;
- void *buf;
/* For now we only use ring 0 */
ring = 0;
@@ -373,6 +384,9 @@ static int hci_dma_queue_xfer(struct i3c_hci *hci,
for (i = 0; i < n; i++) {
struct hci_xfer *xfer = xfer_list + i;
u32 *ring_data = rh->xfer + rh->xfer_struct_sz * enqueue_ptr;
+ enum dma_data_direction dir = xfer->rnw ? DMA_FROM_DEVICE :
+ DMA_TO_DEVICE;
+ bool need_bounce;
/* store cmd descriptor */
*ring_data++ = xfer->cmd_desc[0];
@@ -391,21 +405,20 @@ static int hci_dma_queue_xfer(struct i3c_hci *hci,
/* 2nd and 3rd words of Data Buffer Descriptor Structure */
if (xfer->data) {
- buf = xfer->bounce_buf ? xfer->bounce_buf : xfer->data;
- xfer->data_dma =
- dma_map_single(&hci->master.dev,
- buf,
- xfer->data_len,
- xfer->rnw ?
- DMA_FROM_DEVICE :
- DMA_TO_DEVICE);
- if (dma_mapping_error(&hci->master.dev,
- xfer->data_dma)) {
+ need_bounce = device_iommu_mapped(rings->sysdev) &&
+ xfer->rnw &&
+ xfer->data_len != ALIGN(xfer->data_len, 4);
+ xfer->dma = i3c_master_dma_map_single(rings->sysdev,
+ xfer->data,
+ xfer->data_len,
+ need_bounce,
+ dir);
+ if (!xfer->dma) {
hci_dma_unmap_xfer(hci, xfer_list, i);
return -ENOMEM;
}
- *ring_data++ = lower_32_bits(xfer->data_dma);
- *ring_data++ = upper_32_bits(xfer->data_dma);
+ *ring_data++ = lower_32_bits(xfer->dma->addr);
+ *ring_data++ = upper_32_bits(xfer->dma->addr);
} else {
*ring_data++ = 0;
*ring_data++ = 0;
@@ -511,11 +524,11 @@ static void hci_dma_xfer_done(struct i3c_hci *hci, struct hci_rh_data *rh)
ring_resp = rh->resp + rh->resp_struct_sz * done_ptr;
resp = *ring_resp;
tid = RESP_TID(resp);
- DBG("resp = 0x%08x", resp);
+ dev_dbg(&hci->master.dev, "resp = 0x%08x", resp);
xfer = rh->src_xfers[done_ptr];
if (!xfer) {
- DBG("orphaned ring entry");
+ dev_dbg(&hci->master.dev, "orphaned ring entry");
} else {
hci_dma_unmap_xfer(hci, xfer, 1);
xfer->ring_entry = -1;
@@ -586,6 +599,7 @@ static void hci_dma_recycle_ibi_slot(struct i3c_hci *hci,
static void hci_dma_process_ibi(struct i3c_hci *hci, struct hci_rh_data *rh)
{
+ struct hci_rings_data *rings = hci->io_data;
struct i3c_dev_desc *dev;
struct i3c_hci_dev_data *dev_data;
struct hci_dma_dev_ibi_data *dev_ibi;
@@ -617,7 +631,7 @@ static void hci_dma_process_ibi(struct i3c_hci *hci, struct hci_rh_data *rh)
ring_ibi_status = rh->ibi_status + rh->ibi_status_sz * ptr;
ibi_status = *ring_ibi_status;
- DBG("status = %#x", ibi_status);
+ dev_dbg(&hci->master.dev, "status = %#x", ibi_status);
if (ibi_status_error) {
/* we no longer care */
@@ -645,7 +659,9 @@ static void hci_dma_process_ibi(struct i3c_hci *hci, struct hci_rh_data *rh)
if (last_ptr == -1) {
/* this IBI sequence is not yet complete */
- DBG("no LAST_STATUS available (e=%d d=%d)", enq_ptr, deq_ptr);
+ dev_dbg(&hci->master.dev,
+ "no LAST_STATUS available (e=%d d=%d)",
+ enq_ptr, deq_ptr);
return;
}
deq_ptr = last_ptr + 1;
@@ -696,7 +712,7 @@ static void hci_dma_process_ibi(struct i3c_hci *hci, struct hci_rh_data *rh)
* rh->ibi_chunk_sz;
if (first_part > ibi_size)
first_part = ibi_size;
- dma_sync_single_for_cpu(&hci->master.dev, ring_ibi_data_dma,
+ dma_sync_single_for_cpu(rings->sysdev, ring_ibi_data_dma,
first_part, DMA_FROM_DEVICE);
memcpy(slot->data, ring_ibi_data, first_part);
@@ -705,7 +721,7 @@ static void hci_dma_process_ibi(struct i3c_hci *hci, struct hci_rh_data *rh)
/* we wrap back to the start and copy remaining data */
ring_ibi_data = rh->ibi_data;
ring_ibi_data_dma = rh->ibi_data_dma;
- dma_sync_single_for_cpu(&hci->master.dev, ring_ibi_data_dma,
+ dma_sync_single_for_cpu(rings->sysdev, ring_ibi_data_dma,
ibi_size - first_part, DMA_FROM_DEVICE);
memcpy(slot->data + first_part, ring_ibi_data,
ibi_size - first_part);
@@ -745,7 +761,8 @@ static bool hci_dma_irq_handler(struct i3c_hci *hci)
rh = &rings->headers[i];
status = rh_reg_read(INTR_STATUS);
- DBG("rh%d status: %#x", i, status);
+ dev_dbg(&hci->master.dev, "Ring %d: RH_INTR_STATUS %#x",
+ i, status);
if (!status)
continue;
rh_reg_write(INTR_STATUS, status);
@@ -761,7 +778,7 @@ static bool hci_dma_irq_handler(struct i3c_hci *hci)
u32 ring_status;
dev_notice_ratelimited(&hci->master.dev,
- "ring %d: Transfer Aborted\n", i);
+ "Ring %d: Transfer Aborted\n", i);
mipi_i3c_hci_resume(hci);
ring_status = rh_reg_read(RING_STATUS);
if (!(ring_status & RING_STATUS_RUNNING) &&
@@ -779,12 +796,9 @@ static bool hci_dma_irq_handler(struct i3c_hci *hci)
RING_CTRL_RUN_STOP);
}
}
- if (status & INTR_WARN_INS_STOP_MODE)
- dev_warn_ratelimited(&hci->master.dev,
- "ring %d: Inserted Stop on Mode Change\n", i);
if (status & INTR_IBI_RING_FULL)
dev_err_ratelimited(&hci->master.dev,
- "ring %d: IBI Ring Full Condition\n", i);
+ "Ring %d: IBI Ring Full Condition\n", i);
handled = true;
}
diff --git a/drivers/i3c/master/mipi-i3c-hci/ext_caps.c b/drivers/i3c/master/mipi-i3c-hci/ext_caps.c
index 2e9b23efdc45..7714f00ea9cc 100644
--- a/drivers/i3c/master/mipi-i3c-hci/ext_caps.c
+++ b/drivers/i3c/master/mipi-i3c-hci/ext_caps.c
@@ -35,7 +35,7 @@ static int hci_extcap_hardware_id(struct i3c_hci *hci, void __iomem *base)
switch (hci->vendor_mipi_id) {
case MIPI_VENDOR_NXP:
hci->quirks |= HCI_QUIRK_RAW_CCC;
- DBG("raw CCC quirks set");
+ dev_dbg(&hci->master.dev, "raw CCC quirks set");
break;
}
@@ -77,7 +77,8 @@ static int hci_extcap_xfer_modes(struct i3c_hci *hci, void __iomem *base)
for (index = 0; index < entries; index++) {
u32 mode_entry = readl(base);
- DBG("mode %d: 0x%08x", index, mode_entry);
+ dev_dbg(&hci->master.dev, "mode %d: 0x%08x",
+ index, mode_entry);
/* TODO: will be needed when I3C core does more than SDR */
base += 4;
}
@@ -97,7 +98,8 @@ static int hci_extcap_xfer_rates(struct i3c_hci *hci, void __iomem *base)
dev_info(&hci->master.dev, "available data rates:\n");
for (index = 0; index < entries; index++) {
rate_entry = readl(base);
- DBG("entry %d: 0x%08x", index, rate_entry);
+ dev_dbg(&hci->master.dev, "entry %d: 0x%08x",
+ index, rate_entry);
rate = FIELD_GET(XFERRATE_ACTUAL_RATE_KHZ, rate_entry);
rate_id = FIELD_GET(XFERRATE_RATE_ID, rate_entry);
mode_id = FIELD_GET(XFERRATE_MODE_ID, rate_entry);
@@ -268,7 +270,8 @@ int i3c_hci_parse_ext_caps(struct i3c_hci *hci)
cap_header = readl(curr_cap);
cap_id = FIELD_GET(CAP_HEADER_ID, cap_header);
cap_length = FIELD_GET(CAP_HEADER_LENGTH, cap_header);
- DBG("id=0x%02x length=%d", cap_id, cap_length);
+ dev_dbg(&hci->master.dev, "id=0x%02x length=%d",
+ cap_id, cap_length);
if (!cap_length)
break;
if (curr_cap + cap_length * 4 >= end) {
diff --git a/drivers/i3c/master/mipi-i3c-hci/hci.h b/drivers/i3c/master/mipi-i3c-hci/hci.h
index 69ea1d10414b..249ccb13c909 100644
--- a/drivers/i3c/master/mipi-i3c-hci/hci.h
+++ b/drivers/i3c/master/mipi-i3c-hci/hci.h
@@ -12,9 +12,6 @@
#include <linux/io.h>
-/* Handy logging macro to save on line length */
-#define DBG(x, ...) pr_devel("%s: " x "\n", __func__, ##__VA_ARGS__)
-
/* 32-bit word aware bit and mask macros */
#define W0_MASK(h, l) GENMASK((h) - 0, (l) - 0)
#define W1_MASK(h, l) GENMASK((h) - 32, (l) - 32)
@@ -94,8 +91,7 @@ struct hci_xfer {
};
struct {
/* DMA specific */
- dma_addr_t data_dma;
- void *bounce_buf;
+ struct i3c_dma *dma;
int ring_number;
int ring_entry;
};
diff --git a/drivers/i3c/master/mipi-i3c-hci/mipi-i3c-hci-pci.c b/drivers/i3c/master/mipi-i3c-hci/mipi-i3c-hci-pci.c
index c6c3a3ec11ea..08e6cbdf89ce 100644
--- a/drivers/i3c/master/mipi-i3c-hci/mipi-i3c-hci-pci.c
+++ b/drivers/i3c/master/mipi-i3c-hci/mipi-i3c-hci-pci.c
@@ -124,6 +124,9 @@ static void mipi_i3c_hci_pci_remove(struct pci_dev *pci)
}
static const struct pci_device_id mipi_i3c_hci_pci_devices[] = {
+ /* Wildcat Lake-U */
+ { PCI_VDEVICE(INTEL, 0x4d7c), (kernel_ulong_t)&intel_info},
+ { PCI_VDEVICE(INTEL, 0x4d6f), (kernel_ulong_t)&intel_info},
/* Panther Lake-H */
{ PCI_VDEVICE(INTEL, 0xe37c), (kernel_ulong_t)&intel_info},
{ PCI_VDEVICE(INTEL, 0xe36f), (kernel_ulong_t)&intel_info},
diff --git a/drivers/i3c/master/mipi-i3c-hci/pio.c b/drivers/i3c/master/mipi-i3c-hci/pio.c
index 2fc71e696911..710faa46a00f 100644
--- a/drivers/i3c/master/mipi-i3c-hci/pio.c
+++ b/drivers/i3c/master/mipi-i3c-hci/pio.c
@@ -213,8 +213,8 @@ static void hci_pio_cleanup(struct i3c_hci *hci)
pio_reg_write(INTR_SIGNAL_ENABLE, 0x0);
if (pio) {
- DBG("status = %#x/%#x",
- pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
+ dev_dbg(&hci->master.dev, "status = %#x/%#x",
+ pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
BUG_ON(pio->curr_xfer);
BUG_ON(pio->curr_rx);
BUG_ON(pio->curr_tx);
@@ -226,13 +226,17 @@ static void hci_pio_cleanup(struct i3c_hci *hci)
static void hci_pio_write_cmd(struct i3c_hci *hci, struct hci_xfer *xfer)
{
- DBG("cmd_desc[%d] = 0x%08x", 0, xfer->cmd_desc[0]);
- DBG("cmd_desc[%d] = 0x%08x", 1, xfer->cmd_desc[1]);
+ dev_dbg(&hci->master.dev, "cmd_desc[%d] = 0x%08x",
+ 0, xfer->cmd_desc[0]);
+ dev_dbg(&hci->master.dev, "cmd_desc[%d] = 0x%08x",
+ 1, xfer->cmd_desc[1]);
pio_reg_write(COMMAND_QUEUE_PORT, xfer->cmd_desc[0]);
pio_reg_write(COMMAND_QUEUE_PORT, xfer->cmd_desc[1]);
if (hci->cmd == &mipi_i3c_hci_cmd_v2) {
- DBG("cmd_desc[%d] = 0x%08x", 2, xfer->cmd_desc[2]);
- DBG("cmd_desc[%d] = 0x%08x", 3, xfer->cmd_desc[3]);
+ dev_dbg(&hci->master.dev, "cmd_desc[%d] = 0x%08x",
+ 2, xfer->cmd_desc[2]);
+ dev_dbg(&hci->master.dev, "cmd_desc[%d] = 0x%08x",
+ 3, xfer->cmd_desc[3]);
pio_reg_write(COMMAND_QUEUE_PORT, xfer->cmd_desc[2]);
pio_reg_write(COMMAND_QUEUE_PORT, xfer->cmd_desc[3]);
}
@@ -254,7 +258,8 @@ static bool hci_pio_do_rx(struct i3c_hci *hci, struct hci_pio_data *pio)
nr_words = min(xfer->data_left / 4, pio->rx_thresh_size);
/* extract data from FIFO */
xfer->data_left -= nr_words * 4;
- DBG("now %d left %d", nr_words * 4, xfer->data_left);
+ dev_dbg(&hci->master.dev, "now %d left %d",
+ nr_words * 4, xfer->data_left);
while (nr_words--)
*p++ = pio_reg_read(XFER_DATA_PORT);
}
@@ -269,7 +274,7 @@ static void hci_pio_do_trailing_rx(struct i3c_hci *hci,
struct hci_xfer *xfer = pio->curr_rx;
u32 *p;
- DBG("%d remaining", count);
+ dev_dbg(&hci->master.dev, "%d remaining", count);
p = xfer->data;
p += (xfer->data_len - xfer->data_left) / 4;
@@ -278,7 +283,8 @@ static void hci_pio_do_trailing_rx(struct i3c_hci *hci,
unsigned int nr_words = count / 4;
/* extract data from FIFO */
xfer->data_left -= nr_words * 4;
- DBG("now %d left %d", nr_words * 4, xfer->data_left);
+ dev_dbg(&hci->master.dev, "now %d left %d",
+ nr_words * 4, xfer->data_left);
while (nr_words--)
*p++ = pio_reg_read(XFER_DATA_PORT);
}
@@ -321,7 +327,8 @@ static bool hci_pio_do_tx(struct i3c_hci *hci, struct hci_pio_data *pio)
nr_words = min(xfer->data_left / 4, pio->tx_thresh_size);
/* push data into the FIFO */
xfer->data_left -= nr_words * 4;
- DBG("now %d left %d", nr_words * 4, xfer->data_left);
+ dev_dbg(&hci->master.dev, "now %d left %d",
+ nr_words * 4, xfer->data_left);
while (nr_words--)
pio_reg_write(XFER_DATA_PORT, *p++);
}
@@ -336,7 +343,7 @@ static bool hci_pio_do_tx(struct i3c_hci *hci, struct hci_pio_data *pio)
*/
if (!(pio_reg_read(INTR_STATUS) & STAT_TX_THLD))
return false;
- DBG("trailing %d", xfer->data_left);
+ dev_dbg(&hci->master.dev, "trailing %d", xfer->data_left);
pio_reg_write(XFER_DATA_PORT, *p);
xfer->data_left = 0;
}
@@ -481,7 +488,7 @@ static bool hci_pio_process_resp(struct i3c_hci *hci, struct hci_pio_data *pio)
u32 resp = pio_reg_read(RESPONSE_QUEUE_PORT);
unsigned int tid = RESP_TID(resp);
- DBG("resp = 0x%08x", resp);
+ dev_dbg(&hci->master.dev, "resp = 0x%08x", resp);
if (tid != xfer->cmd_tid) {
dev_err(&hci->master.dev,
"response tid=%d when expecting %d\n",
@@ -522,14 +529,15 @@ static bool hci_pio_process_resp(struct i3c_hci *hci, struct hci_pio_data *pio)
* still exists.
*/
if (pio->curr_rx == xfer) {
- DBG("short RX ?");
+ dev_dbg(&hci->master.dev, "short RX ?");
pio->curr_rx = pio->curr_rx->next_data;
} else if (pio->curr_tx == xfer) {
- DBG("short TX ?");
+ dev_dbg(&hci->master.dev, "short TX ?");
pio->curr_tx = pio->curr_tx->next_data;
} else if (xfer->data_left) {
- DBG("PIO xfer count = %d after response",
- xfer->data_left);
+ dev_dbg(&hci->master.dev,
+ "PIO xfer count = %d after response",
+ xfer->data_left);
}
pio->curr_resp = xfer->next_resp;
@@ -591,7 +599,7 @@ static int hci_pio_queue_xfer(struct i3c_hci *hci, struct hci_xfer *xfer, int n)
struct hci_xfer *prev_queue_tail;
int i;
- DBG("n = %d", n);
+ dev_dbg(&hci->master.dev, "n = %d", n);
/* link xfer instances together and initialize data count */
for (i = 0; i < n; i++) {
@@ -611,8 +619,9 @@ static int hci_pio_queue_xfer(struct i3c_hci *hci, struct hci_xfer *xfer, int n)
if (!hci_pio_process_cmd(hci, pio))
pio->enabled_irqs |= STAT_CMD_QUEUE_READY;
pio_reg_write(INTR_SIGNAL_ENABLE, pio->enabled_irqs);
- DBG("status = %#x/%#x",
- pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
+ dev_dbg(&hci->master.dev, "status = %#x/%#x",
+ pio_reg_read(INTR_STATUS),
+ pio_reg_read(INTR_SIGNAL_ENABLE));
}
spin_unlock_irq(&pio->lock);
return 0;
@@ -686,10 +695,10 @@ static bool hci_pio_dequeue_xfer(struct i3c_hci *hci, struct hci_xfer *xfer, int
int ret;
spin_lock_irq(&pio->lock);
- DBG("n=%d status=%#x/%#x", n,
- pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
- DBG("main_status = %#x/%#x",
- readl(hci->base_regs + 0x20), readl(hci->base_regs + 0x28));
+ dev_dbg(&hci->master.dev, "n=%d status=%#x/%#x", n,
+ pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
+ dev_dbg(&hci->master.dev, "main_status = %#x/%#x",
+ readl(hci->base_regs + 0x20), readl(hci->base_regs + 0x28));
ret = hci_pio_dequeue_xfer_common(hci, pio, xfer, n);
spin_unlock_irq(&pio->lock);
@@ -733,8 +742,8 @@ static void hci_pio_err(struct i3c_hci *hci, struct hci_pio_data *pio,
mipi_i3c_hci_pio_reset(hci);
mipi_i3c_hci_resume(hci);
- DBG("status=%#x/%#x",
- pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
+ dev_dbg(&hci->master.dev, "status=%#x/%#x",
+ pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
}
static void hci_pio_set_ibi_thresh(struct i3c_hci *hci,
@@ -749,7 +758,7 @@ static void hci_pio_set_ibi_thresh(struct i3c_hci *hci,
if (regval != pio->reg_queue_thresh) {
pio_reg_write(QUEUE_THLD_CTRL, regval);
pio->reg_queue_thresh = regval;
- DBG("%d", thresh_val);
+ dev_dbg(&hci->master.dev, "%d", thresh_val);
}
}
@@ -773,7 +782,8 @@ static bool hci_pio_get_ibi_segment(struct i3c_hci *hci,
/* extract the data from the IBI port */
nr_words = thresh_val;
ibi->seg_cnt -= nr_words * 4;
- DBG("now %d left %d", nr_words * 4, ibi->seg_cnt);
+ dev_dbg(&hci->master.dev, "now %d left %d",
+ nr_words * 4, ibi->seg_cnt);
while (nr_words--)
*p++ = pio_reg_read(IBI_PORT);
}
@@ -791,7 +801,7 @@ static bool hci_pio_get_ibi_segment(struct i3c_hci *hci,
hci_pio_set_ibi_thresh(hci, pio, 1);
if (!(pio_reg_read(INTR_STATUS) & STAT_IBI_STATUS_THLD))
return false;
- DBG("trailing %d", ibi->seg_cnt);
+ dev_dbg(&hci->master.dev, "trailing %d", ibi->seg_cnt);
data = pio_reg_read(IBI_PORT);
data = (__force u32) cpu_to_le32(data);
while (ibi->seg_cnt--) {
@@ -820,7 +830,7 @@ static bool hci_pio_prep_new_ibi(struct i3c_hci *hci, struct hci_pio_data *pio)
*/
ibi_status = pio_reg_read(IBI_PORT);
- DBG("status = %#x", ibi_status);
+ dev_dbg(&hci->master.dev, "status = %#x", ibi_status);
ibi->addr = FIELD_GET(IBI_TARGET_ADDR, ibi_status);
if (ibi_status & IBI_ERROR) {
dev_err(&hci->master.dev, "IBI error from %#x\n", ibi->addr);
@@ -986,7 +996,8 @@ static bool hci_pio_irq_handler(struct i3c_hci *hci)
spin_lock(&pio->lock);
status = pio_reg_read(INTR_STATUS);
- DBG("(in) status: %#x/%#x", status, pio->enabled_irqs);
+ dev_dbg(&hci->master.dev, "PIO_INTR_STATUS %#x/%#x",
+ status, pio->enabled_irqs);
status &= pio->enabled_irqs | STAT_LATENCY_WARNINGS;
if (!status) {
spin_unlock(&pio->lock);
@@ -1023,8 +1034,8 @@ static bool hci_pio_irq_handler(struct i3c_hci *hci)
pio->enabled_irqs &= ~STAT_CMD_QUEUE_READY;
pio_reg_write(INTR_SIGNAL_ENABLE, pio->enabled_irqs);
- DBG("(out) status: %#x/%#x",
- pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
+ dev_dbg(&hci->master.dev, "PIO_INTR_STATUS %#x/%#x",
+ pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
spin_unlock(&pio->lock);
return true;
}
diff --git a/drivers/i3c/master/renesas-i3c.c b/drivers/i3c/master/renesas-i3c.c
index 174d3dc5d276..275f7b924288 100644
--- a/drivers/i3c/master/renesas-i3c.c
+++ b/drivers/i3c/master/renesas-i3c.c
@@ -679,7 +679,7 @@ static int renesas_i3c_daa(struct i3c_master_controller *m)
i3c_master_add_i3c_dev_locked(m, i3c->addrs[pos]);
}
- return ret < 0 ? ret : 0;
+ return 0;
}
static bool renesas_i3c_supports_ccc_cmd(struct i3c_master_controller *m,
diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c
index 701ae165b25b..9641e66a4e5f 100644
--- a/drivers/i3c/master/svc-i3c-master.c
+++ b/drivers/i3c/master/svc-i3c-master.c
@@ -417,6 +417,7 @@ static int svc_i3c_master_handle_ibi(struct svc_i3c_master *master,
SVC_I3C_MSTATUS_COMPLETE(val), 0, 1000);
if (ret) {
dev_err(master->dev, "Timeout when polling for COMPLETE\n");
+ i3c_generic_ibi_recycle_slot(data->ibi_pool, slot);
return ret;
}
@@ -517,9 +518,24 @@ static void svc_i3c_master_ibi_isr(struct svc_i3c_master *master)
*/
writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
- /* Acknowledge the incoming interrupt with the AUTOIBI mechanism */
- writel(SVC_I3C_MCTRL_REQUEST_AUTO_IBI |
- SVC_I3C_MCTRL_IBIRESP_AUTO,
+ /*
+ * Write REQUEST_START_ADDR request to emit broadcast address for arbitration,
+ * instend of using AUTO_IBI.
+ *
+ * Using AutoIBI request may cause controller to remain in AutoIBI state when
+ * there is a glitch on SDA line (high->low->high).
+ * 1. SDA high->low, raising an interrupt to execute IBI isr.
+ * 2. SDA low->high.
+ * 3. IBI isr writes an AutoIBI request.
+ * 4. The controller will not start AutoIBI process because SDA is not low.
+ * 5. IBIWON polling times out.
+ * 6. Controller reamins in AutoIBI state and doesn't accept EmitStop request.
+ */
+ writel(SVC_I3C_MCTRL_REQUEST_START_ADDR |
+ SVC_I3C_MCTRL_TYPE_I3C |
+ SVC_I3C_MCTRL_IBIRESP_MANUAL |
+ SVC_I3C_MCTRL_DIR(SVC_I3C_MCTRL_DIR_WRITE) |
+ SVC_I3C_MCTRL_ADDR(I3C_BROADCAST_ADDR),
master->regs + SVC_I3C_MCTRL);
/* Wait for IBIWON, should take approximately 100us */
@@ -539,10 +555,15 @@ static void svc_i3c_master_ibi_isr(struct svc_i3c_master *master)
switch (ibitype) {
case SVC_I3C_MSTATUS_IBITYPE_IBI:
dev = svc_i3c_master_dev_from_addr(master, ibiaddr);
- if (!dev || !is_events_enabled(master, SVC_I3C_EVENT_IBI))
+ if (!dev || !is_events_enabled(master, SVC_I3C_EVENT_IBI)) {
svc_i3c_master_nack_ibi(master);
- else
+ } else {
+ if (dev->info.bcr & I3C_BCR_IBI_PAYLOAD)
+ svc_i3c_master_ack_ibi(master, true);
+ else
+ svc_i3c_master_ack_ibi(master, false);
svc_i3c_master_handle_ibi(master, dev);
+ }
break;
case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN:
if (is_events_enabled(master, SVC_I3C_EVENT_HOTJOIN))
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 73747d20df85..9ba83954c255 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -259,7 +259,7 @@ static struct cpuidle_state nehalem_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 3,
.target_residency = 6,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -267,7 +267,7 @@ static struct cpuidle_state nehalem_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C3",
@@ -275,7 +275,7 @@ static struct cpuidle_state nehalem_cstates[] __initdata = {
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 20,
.target_residency = 80,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -283,7 +283,7 @@ static struct cpuidle_state nehalem_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 200,
.target_residency = 800,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -296,7 +296,7 @@ static struct cpuidle_state snb_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 2,
.target_residency = 2,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -304,7 +304,7 @@ static struct cpuidle_state snb_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C3",
@@ -312,7 +312,7 @@ static struct cpuidle_state snb_cstates[] __initdata = {
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 80,
.target_residency = 211,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -320,7 +320,7 @@ static struct cpuidle_state snb_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 104,
.target_residency = 345,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C7",
@@ -328,7 +328,7 @@ static struct cpuidle_state snb_cstates[] __initdata = {
.flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 109,
.target_residency = 345,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -341,7 +341,7 @@ static struct cpuidle_state byt_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6N",
@@ -349,7 +349,7 @@ static struct cpuidle_state byt_cstates[] __initdata = {
.flags = MWAIT2flg(0x58) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 300,
.target_residency = 275,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6S",
@@ -357,7 +357,7 @@ static struct cpuidle_state byt_cstates[] __initdata = {
.flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 500,
.target_residency = 560,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C7",
@@ -365,7 +365,7 @@ static struct cpuidle_state byt_cstates[] __initdata = {
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 1200,
.target_residency = 4000,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C7S",
@@ -373,7 +373,7 @@ static struct cpuidle_state byt_cstates[] __initdata = {
.flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 10000,
.target_residency = 20000,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -386,7 +386,7 @@ static struct cpuidle_state cht_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6N",
@@ -394,7 +394,7 @@ static struct cpuidle_state cht_cstates[] __initdata = {
.flags = MWAIT2flg(0x58) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 80,
.target_residency = 275,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6S",
@@ -402,7 +402,7 @@ static struct cpuidle_state cht_cstates[] __initdata = {
.flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 200,
.target_residency = 560,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C7",
@@ -410,7 +410,7 @@ static struct cpuidle_state cht_cstates[] __initdata = {
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 1200,
.target_residency = 4000,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C7S",
@@ -418,7 +418,7 @@ static struct cpuidle_state cht_cstates[] __initdata = {
.flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 10000,
.target_residency = 20000,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -431,7 +431,7 @@ static struct cpuidle_state ivb_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -439,7 +439,7 @@ static struct cpuidle_state ivb_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C3",
@@ -447,7 +447,7 @@ static struct cpuidle_state ivb_cstates[] __initdata = {
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 59,
.target_residency = 156,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -455,7 +455,7 @@ static struct cpuidle_state ivb_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 80,
.target_residency = 300,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C7",
@@ -463,7 +463,7 @@ static struct cpuidle_state ivb_cstates[] __initdata = {
.flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 87,
.target_residency = 300,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -476,7 +476,7 @@ static struct cpuidle_state ivt_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -484,7 +484,7 @@ static struct cpuidle_state ivt_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 80,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C3",
@@ -492,7 +492,7 @@ static struct cpuidle_state ivt_cstates[] __initdata = {
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 59,
.target_residency = 156,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -500,7 +500,7 @@ static struct cpuidle_state ivt_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 82,
.target_residency = 300,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -513,7 +513,7 @@ static struct cpuidle_state ivt_cstates_4s[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -521,7 +521,7 @@ static struct cpuidle_state ivt_cstates_4s[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 250,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C3",
@@ -529,7 +529,7 @@ static struct cpuidle_state ivt_cstates_4s[] __initdata = {
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 59,
.target_residency = 300,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -537,7 +537,7 @@ static struct cpuidle_state ivt_cstates_4s[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 84,
.target_residency = 400,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -550,7 +550,7 @@ static struct cpuidle_state ivt_cstates_8s[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -558,7 +558,7 @@ static struct cpuidle_state ivt_cstates_8s[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 500,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C3",
@@ -566,7 +566,7 @@ static struct cpuidle_state ivt_cstates_8s[] __initdata = {
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 59,
.target_residency = 600,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -574,7 +574,7 @@ static struct cpuidle_state ivt_cstates_8s[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 88,
.target_residency = 700,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -587,7 +587,7 @@ static struct cpuidle_state hsw_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 2,
.target_residency = 2,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -595,7 +595,7 @@ static struct cpuidle_state hsw_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C3",
@@ -603,7 +603,7 @@ static struct cpuidle_state hsw_cstates[] __initdata = {
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 33,
.target_residency = 100,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -611,7 +611,7 @@ static struct cpuidle_state hsw_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 133,
.target_residency = 400,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C7s",
@@ -619,7 +619,7 @@ static struct cpuidle_state hsw_cstates[] __initdata = {
.flags = MWAIT2flg(0x32) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 166,
.target_residency = 500,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C8",
@@ -627,7 +627,7 @@ static struct cpuidle_state hsw_cstates[] __initdata = {
.flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 300,
.target_residency = 900,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C9",
@@ -635,7 +635,7 @@ static struct cpuidle_state hsw_cstates[] __initdata = {
.flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 600,
.target_residency = 1800,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C10",
@@ -643,7 +643,7 @@ static struct cpuidle_state hsw_cstates[] __initdata = {
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 2600,
.target_residency = 7700,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -655,7 +655,7 @@ static struct cpuidle_state bdw_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 2,
.target_residency = 2,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -663,7 +663,7 @@ static struct cpuidle_state bdw_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C3",
@@ -671,7 +671,7 @@ static struct cpuidle_state bdw_cstates[] __initdata = {
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 40,
.target_residency = 100,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -679,7 +679,7 @@ static struct cpuidle_state bdw_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 133,
.target_residency = 400,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C7s",
@@ -687,7 +687,7 @@ static struct cpuidle_state bdw_cstates[] __initdata = {
.flags = MWAIT2flg(0x32) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 166,
.target_residency = 500,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C8",
@@ -695,7 +695,7 @@ static struct cpuidle_state bdw_cstates[] __initdata = {
.flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 300,
.target_residency = 900,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C9",
@@ -703,7 +703,7 @@ static struct cpuidle_state bdw_cstates[] __initdata = {
.flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 600,
.target_residency = 1800,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C10",
@@ -711,7 +711,7 @@ static struct cpuidle_state bdw_cstates[] __initdata = {
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 2600,
.target_residency = 7700,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -724,7 +724,7 @@ static struct cpuidle_state skl_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 2,
.target_residency = 2,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -732,7 +732,7 @@ static struct cpuidle_state skl_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C3",
@@ -740,7 +740,7 @@ static struct cpuidle_state skl_cstates[] __initdata = {
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 70,
.target_residency = 100,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -748,7 +748,7 @@ static struct cpuidle_state skl_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
.exit_latency = 85,
.target_residency = 200,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C7s",
@@ -756,7 +756,7 @@ static struct cpuidle_state skl_cstates[] __initdata = {
.flags = MWAIT2flg(0x33) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
.exit_latency = 124,
.target_residency = 800,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C8",
@@ -764,7 +764,7 @@ static struct cpuidle_state skl_cstates[] __initdata = {
.flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
.exit_latency = 200,
.target_residency = 800,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C9",
@@ -772,7 +772,7 @@ static struct cpuidle_state skl_cstates[] __initdata = {
.flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
.exit_latency = 480,
.target_residency = 5000,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C10",
@@ -780,7 +780,7 @@ static struct cpuidle_state skl_cstates[] __initdata = {
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
.exit_latency = 890,
.target_residency = 5000,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -793,7 +793,7 @@ static struct cpuidle_state skx_cstates[] __initdata = {
.flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_IRQ_ENABLE,
.exit_latency = 2,
.target_residency = 2,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -801,7 +801,7 @@ static struct cpuidle_state skx_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -809,7 +809,7 @@ static struct cpuidle_state skx_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
.exit_latency = 133,
.target_residency = 600,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -822,7 +822,7 @@ static struct cpuidle_state icx_cstates[] __initdata = {
.flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_IRQ_ENABLE,
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -830,7 +830,7 @@ static struct cpuidle_state icx_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 4,
.target_residency = 4,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -838,7 +838,7 @@ static struct cpuidle_state icx_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 170,
.target_residency = 600,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -861,7 +861,7 @@ static struct cpuidle_state adl_cstates[] __initdata = {
.flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_UNUSABLE,
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -869,7 +869,7 @@ static struct cpuidle_state adl_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 2,
.target_residency = 4,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -877,7 +877,7 @@ static struct cpuidle_state adl_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 220,
.target_residency = 600,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C8",
@@ -885,7 +885,7 @@ static struct cpuidle_state adl_cstates[] __initdata = {
.flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 280,
.target_residency = 800,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C10",
@@ -893,7 +893,7 @@ static struct cpuidle_state adl_cstates[] __initdata = {
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 680,
.target_residency = 2000,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -906,7 +906,7 @@ static struct cpuidle_state adl_l_cstates[] __initdata = {
.flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_UNUSABLE,
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -914,7 +914,7 @@ static struct cpuidle_state adl_l_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 2,
.target_residency = 4,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -922,7 +922,7 @@ static struct cpuidle_state adl_l_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 170,
.target_residency = 500,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C8",
@@ -930,7 +930,7 @@ static struct cpuidle_state adl_l_cstates[] __initdata = {
.flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 200,
.target_residency = 600,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C10",
@@ -938,7 +938,7 @@ static struct cpuidle_state adl_l_cstates[] __initdata = {
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 230,
.target_residency = 700,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -951,7 +951,7 @@ static struct cpuidle_state mtl_l_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -959,7 +959,7 @@ static struct cpuidle_state mtl_l_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 140,
.target_residency = 420,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C10",
@@ -967,7 +967,7 @@ static struct cpuidle_state mtl_l_cstates[] __initdata = {
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 310,
.target_residency = 930,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -980,7 +980,7 @@ static struct cpuidle_state gmt_cstates[] __initdata = {
.flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_UNUSABLE,
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -988,7 +988,7 @@ static struct cpuidle_state gmt_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 2,
.target_residency = 4,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -996,7 +996,7 @@ static struct cpuidle_state gmt_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 195,
.target_residency = 585,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C8",
@@ -1004,7 +1004,7 @@ static struct cpuidle_state gmt_cstates[] __initdata = {
.flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 260,
.target_residency = 1040,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C10",
@@ -1012,7 +1012,7 @@ static struct cpuidle_state gmt_cstates[] __initdata = {
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 660,
.target_residency = 1980,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -1025,7 +1025,7 @@ static struct cpuidle_state spr_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -1033,7 +1033,7 @@ static struct cpuidle_state spr_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 2,
.target_residency = 4,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -1042,7 +1042,7 @@ static struct cpuidle_state spr_cstates[] __initdata = {
CPUIDLE_FLAG_INIT_XSTATE,
.exit_latency = 290,
.target_residency = 800,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -1055,7 +1055,7 @@ static struct cpuidle_state gnr_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -1063,7 +1063,7 @@ static struct cpuidle_state gnr_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 4,
.target_residency = 4,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -1073,7 +1073,7 @@ static struct cpuidle_state gnr_cstates[] __initdata = {
CPUIDLE_FLAG_PARTIAL_HINT_MATCH,
.exit_latency = 170,
.target_residency = 650,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6P",
@@ -1083,7 +1083,7 @@ static struct cpuidle_state gnr_cstates[] __initdata = {
CPUIDLE_FLAG_PARTIAL_HINT_MATCH,
.exit_latency = 210,
.target_residency = 1000,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -1096,7 +1096,7 @@ static struct cpuidle_state gnrd_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -1104,7 +1104,7 @@ static struct cpuidle_state gnrd_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 4,
.target_residency = 4,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -1114,7 +1114,7 @@ static struct cpuidle_state gnrd_cstates[] __initdata = {
CPUIDLE_FLAG_PARTIAL_HINT_MATCH,
.exit_latency = 220,
.target_residency = 650,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6P",
@@ -1124,7 +1124,7 @@ static struct cpuidle_state gnrd_cstates[] __initdata = {
CPUIDLE_FLAG_PARTIAL_HINT_MATCH,
.exit_latency = 240,
.target_residency = 750,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -1137,7 +1137,7 @@ static struct cpuidle_state atom_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 10,
.target_residency = 20,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C2",
@@ -1145,7 +1145,7 @@ static struct cpuidle_state atom_cstates[] __initdata = {
.flags = MWAIT2flg(0x10),
.exit_latency = 20,
.target_residency = 80,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C4",
@@ -1153,7 +1153,7 @@ static struct cpuidle_state atom_cstates[] __initdata = {
.flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 100,
.target_residency = 400,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -1161,7 +1161,7 @@ static struct cpuidle_state atom_cstates[] __initdata = {
.flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 140,
.target_residency = 560,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -1173,7 +1173,7 @@ static struct cpuidle_state tangier_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 1,
.target_residency = 4,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C4",
@@ -1181,7 +1181,7 @@ static struct cpuidle_state tangier_cstates[] __initdata = {
.flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 100,
.target_residency = 400,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -1189,7 +1189,7 @@ static struct cpuidle_state tangier_cstates[] __initdata = {
.flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 140,
.target_residency = 560,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C7",
@@ -1197,7 +1197,7 @@ static struct cpuidle_state tangier_cstates[] __initdata = {
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 1200,
.target_residency = 4000,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C9",
@@ -1205,7 +1205,7 @@ static struct cpuidle_state tangier_cstates[] __initdata = {
.flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 10000,
.target_residency = 20000,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -1217,7 +1217,7 @@ static struct cpuidle_state avn_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 2,
.target_residency = 2,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -1225,7 +1225,7 @@ static struct cpuidle_state avn_cstates[] __initdata = {
.flags = MWAIT2flg(0x51) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 15,
.target_residency = 45,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -1237,7 +1237,7 @@ static struct cpuidle_state knl_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 1,
.target_residency = 2,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle },
{
.name = "C6",
@@ -1245,7 +1245,7 @@ static struct cpuidle_state knl_cstates[] __initdata = {
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 120,
.target_residency = 500,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle },
{
.enter = NULL }
@@ -1258,7 +1258,7 @@ static struct cpuidle_state bxt_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 2,
.target_residency = 2,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -1266,7 +1266,7 @@ static struct cpuidle_state bxt_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -1274,7 +1274,7 @@ static struct cpuidle_state bxt_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 133,
.target_residency = 133,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C7s",
@@ -1282,7 +1282,7 @@ static struct cpuidle_state bxt_cstates[] __initdata = {
.flags = MWAIT2flg(0x31) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 155,
.target_residency = 155,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C8",
@@ -1290,7 +1290,7 @@ static struct cpuidle_state bxt_cstates[] __initdata = {
.flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 1000,
.target_residency = 1000,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C9",
@@ -1298,7 +1298,7 @@ static struct cpuidle_state bxt_cstates[] __initdata = {
.flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 2000,
.target_residency = 2000,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C10",
@@ -1306,7 +1306,7 @@ static struct cpuidle_state bxt_cstates[] __initdata = {
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 10000,
.target_residency = 10000,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -1319,7 +1319,7 @@ static struct cpuidle_state dnv_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 2,
.target_residency = 2,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -1327,7 +1327,7 @@ static struct cpuidle_state dnv_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -1335,7 +1335,7 @@ static struct cpuidle_state dnv_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 50,
.target_residency = 500,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -1352,7 +1352,7 @@ static struct cpuidle_state snr_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 2,
.target_residency = 2,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -1360,7 +1360,7 @@ static struct cpuidle_state snr_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 15,
.target_residency = 25,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -1368,7 +1368,7 @@ static struct cpuidle_state snr_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 130,
.target_residency = 500,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -1381,7 +1381,7 @@ static struct cpuidle_state grr_cstates[] __initdata = {
.flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -1389,7 +1389,7 @@ static struct cpuidle_state grr_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 2,
.target_residency = 10,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6S",
@@ -1397,7 +1397,7 @@ static struct cpuidle_state grr_cstates[] __initdata = {
.flags = MWAIT2flg(0x22) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 140,
.target_residency = 500,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -1410,7 +1410,7 @@ static struct cpuidle_state srf_cstates[] __initdata = {
.flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -1418,7 +1418,7 @@ static struct cpuidle_state srf_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 2,
.target_residency = 10,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6S",
@@ -1427,7 +1427,7 @@ static struct cpuidle_state srf_cstates[] __initdata = {
CPUIDLE_FLAG_PARTIAL_HINT_MATCH,
.exit_latency = 270,
.target_residency = 700,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6SP",
@@ -1436,7 +1436,7 @@ static struct cpuidle_state srf_cstates[] __initdata = {
CPUIDLE_FLAG_PARTIAL_HINT_MATCH,
.exit_latency = 310,
.target_residency = 900,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -1679,7 +1679,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
};
static const struct x86_cpu_id intel_mwait_ids[] __initconst = {
- X86_MATCH_VENDOR_FAM_FEATURE(INTEL, 6, X86_FEATURE_MWAIT, NULL),
+ X86_MATCH_VENDOR_FAM_FEATURE(INTEL, X86_FAMILY_ANY, X86_FEATURE_MWAIT, NULL),
{}
};
diff --git a/drivers/iio/accel/adxl345_core.c b/drivers/iio/accel/adxl345_core.c
index b7dfd0007aa0..78e3f799ecc1 100644
--- a/drivers/iio/accel/adxl345_core.c
+++ b/drivers/iio/accel/adxl345_core.c
@@ -36,10 +36,29 @@
#define ADXL345_REG_TAP_AXIS_MSK GENMASK(2, 0)
#define ADXL345_REG_TAP_SUPPRESS_MSK BIT(3)
#define ADXL345_REG_TAP_SUPPRESS BIT(3)
+#define ADXL345_POWER_CTL_INACT_MSK (ADXL345_POWER_CTL_AUTO_SLEEP | ADXL345_POWER_CTL_LINK)
#define ADXL345_TAP_Z_EN BIT(0)
#define ADXL345_TAP_Y_EN BIT(1)
#define ADXL345_TAP_X_EN BIT(2)
+#define ADXL345_REG_TAP_SUPPRESS BIT(3)
+
+#define ADXL345_INACT_Z_EN BIT(0)
+#define ADXL345_INACT_Y_EN BIT(1)
+#define ADXL345_INACT_X_EN BIT(2)
+#define ADXL345_REG_INACT_ACDC BIT(3)
+#define ADXL345_ACT_INACT_NO_AXIS_EN 0x00
+#define ADXL345_INACT_XYZ_EN (ADXL345_INACT_Z_EN | ADXL345_INACT_Y_EN | ADXL345_INACT_X_EN)
+
+#define ADXL345_ACT_Z_EN BIT(4)
+#define ADXL345_ACT_Y_EN BIT(5)
+#define ADXL345_ACT_X_EN BIT(6)
+#define ADXL345_REG_ACT_ACDC BIT(7)
+#define ADXL345_ACT_XYZ_EN (ADXL345_ACT_Z_EN | ADXL345_ACT_Y_EN | ADXL345_ACT_X_EN)
+
+#define ADXL345_COUPLING_DC 0
+#define ADXL345_COUPLING_AC 1
+#define ADXL345_REG_NO_ACDC 0x00
/* single/double tap */
enum adxl345_tap_type {
@@ -64,6 +83,39 @@ static const unsigned int adxl345_tap_time_reg[] = {
[ADXL345_TAP_TIME_DUR] = ADXL345_REG_DUR,
};
+/* activity/inactivity */
+enum adxl345_activity_type {
+ ADXL345_ACTIVITY,
+ ADXL345_INACTIVITY,
+ ADXL345_ACTIVITY_AC,
+ ADXL345_INACTIVITY_AC,
+ ADXL345_INACTIVITY_FF,
+};
+
+static const unsigned int adxl345_act_int_reg[] = {
+ [ADXL345_ACTIVITY] = ADXL345_INT_ACTIVITY,
+ [ADXL345_INACTIVITY] = ADXL345_INT_INACTIVITY,
+ [ADXL345_ACTIVITY_AC] = ADXL345_INT_ACTIVITY,
+ [ADXL345_INACTIVITY_AC] = ADXL345_INT_INACTIVITY,
+ [ADXL345_INACTIVITY_FF] = ADXL345_INT_FREE_FALL,
+};
+
+static const unsigned int adxl345_act_thresh_reg[] = {
+ [ADXL345_ACTIVITY] = ADXL345_REG_THRESH_ACT,
+ [ADXL345_INACTIVITY] = ADXL345_REG_THRESH_INACT,
+ [ADXL345_ACTIVITY_AC] = ADXL345_REG_THRESH_ACT,
+ [ADXL345_INACTIVITY_AC] = ADXL345_REG_THRESH_INACT,
+ [ADXL345_INACTIVITY_FF] = ADXL345_REG_THRESH_FF,
+};
+
+static const unsigned int adxl345_act_acdc_msk[] = {
+ [ADXL345_ACTIVITY] = ADXL345_REG_ACT_ACDC,
+ [ADXL345_INACTIVITY] = ADXL345_REG_INACT_ACDC,
+ [ADXL345_ACTIVITY_AC] = ADXL345_REG_ACT_ACDC,
+ [ADXL345_INACTIVITY_AC] = ADXL345_REG_INACT_ACDC,
+ [ADXL345_INACTIVITY_FF] = ADXL345_REG_NO_ACDC,
+};
+
enum adxl345_odr {
ADXL345_ODR_0P10HZ = 0,
ADXL345_ODR_0P20HZ,
@@ -129,6 +181,14 @@ static const int adxl345_fullres_range_tbl[][2] = {
[ADXL345_16G_RANGE] = { 0, 38312 },
};
+/* scaling */
+static const int adxl345_range_factor_tbl[] = {
+ [ADXL345_2G_RANGE] = 1,
+ [ADXL345_4G_RANGE] = 2,
+ [ADXL345_8G_RANGE] = 4,
+ [ADXL345_16G_RANGE] = 8,
+};
+
struct adxl345_state {
const struct adxl345_chip_info *info;
struct regmap *regmap;
@@ -136,6 +196,9 @@ struct adxl345_state {
u8 watermark;
u8 fifo_mode;
+ u8 inact_threshold;
+ u32 inact_time_ms;
+
u32 tap_duration_us;
u32 tap_latent_us;
u32 tap_window_us;
@@ -145,6 +208,22 @@ struct adxl345_state {
static const struct iio_event_spec adxl345_events[] = {
{
+ /* activity */
+ .type = IIO_EV_TYPE_MAG,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_shared_by_type =
+ BIT(IIO_EV_INFO_ENABLE) |
+ BIT(IIO_EV_INFO_VALUE),
+ },
+ {
+ /* activity, ac bit set */
+ .type = IIO_EV_TYPE_MAG_ADAPTIVE,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_shared_by_type =
+ BIT(IIO_EV_INFO_ENABLE) |
+ BIT(IIO_EV_INFO_VALUE),
+ },
+ {
/* single tap */
.type = IIO_EV_TYPE_GESTURE,
.dir = IIO_EV_DIR_SINGLETAP,
@@ -188,10 +267,39 @@ enum adxl345_chans {
chan_x, chan_y, chan_z,
};
+static const struct iio_event_spec adxl345_fake_chan_events[] = {
+ {
+ /* inactivity */
+ .type = IIO_EV_TYPE_MAG,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE),
+ .mask_shared_by_type =
+ BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_PERIOD),
+ },
+ {
+ /* inactivity, AC bit set */
+ .type = IIO_EV_TYPE_MAG_ADAPTIVE,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE),
+ .mask_shared_by_type =
+ BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_PERIOD),
+ },
+};
+
static const struct iio_chan_spec adxl345_channels[] = {
ADXL345_CHANNEL(0, chan_x, X),
ADXL345_CHANNEL(1, chan_y, Y),
ADXL345_CHANNEL(2, chan_z, Z),
+ {
+ .type = IIO_ACCEL,
+ .modified = 1,
+ .channel2 = IIO_MOD_X_AND_Y_AND_Z,
+ .scan_index = -1, /* Fake channel */
+ .event_spec = adxl345_fake_chan_events,
+ .num_event_specs = ARRAY_SIZE(adxl345_fake_chan_events),
+ },
};
static const unsigned long adxl345_scan_masks[] = {
@@ -237,6 +345,394 @@ static int adxl345_set_measure_en(struct adxl345_state *st, bool en)
ADXL345_POWER_CTL_MEASURE, en);
}
+/* activity / inactivity */
+
+static int adxl345_set_inact_threshold(struct adxl345_state *st,
+ unsigned int threshold)
+{
+ int ret;
+
+ st->inact_threshold = min(U8_MAX, threshold);
+
+ ret = regmap_write(st->regmap,
+ adxl345_act_thresh_reg[ADXL345_INACTIVITY],
+ st->inact_threshold);
+ if (ret)
+ return ret;
+
+ return regmap_write(st->regmap,
+ adxl345_act_thresh_reg[ADXL345_INACTIVITY_FF],
+ st->inact_threshold);
+}
+
+static int adxl345_set_default_time(struct adxl345_state *st)
+{
+ int max_boundary = U8_MAX;
+ int min_boundary = 10;
+ enum adxl345_odr odr;
+ unsigned int regval;
+ unsigned int val;
+ int ret;
+
+ /* Generated inactivity time based on ODR */
+ ret = regmap_read(st->regmap, ADXL345_REG_BW_RATE, &regval);
+ if (ret)
+ return ret;
+
+ odr = FIELD_GET(ADXL345_BW_RATE_MSK, regval);
+ val = clamp(max_boundary - adxl345_odr_tbl[odr][0],
+ min_boundary, max_boundary);
+ st->inact_time_ms = MILLI * val;
+
+ /* Inactivity time in s */
+ return regmap_write(st->regmap, ADXL345_REG_TIME_INACT, val);
+}
+
+static int adxl345_set_inactivity_time(struct adxl345_state *st, u32 val_int)
+{
+ st->inact_time_ms = MILLI * val_int;
+
+ return regmap_write(st->regmap, ADXL345_REG_TIME_INACT, val_int);
+}
+
+static int adxl345_set_freefall_time(struct adxl345_state *st, u32 val_fract)
+{
+ /*
+ * Datasheet max. value is 255 * 5000 us = 1.275000 seconds.
+ *
+ * Recommended values between 100ms and 350ms (0x14 to 0x46)
+ */
+ st->inact_time_ms = DIV_ROUND_UP(val_fract, MILLI);
+
+ return regmap_write(st->regmap, ADXL345_REG_TIME_FF,
+ DIV_ROUND_CLOSEST(val_fract, 5));
+}
+
+/**
+ * adxl345_set_inact_time - Configure inactivity time explicitly or by ODR.
+ * @st: The sensor state instance.
+ * @val_int: The inactivity time, integer part.
+ * @val_fract: The inactivity time, fractional part when val_int is 0.
+ *
+ * Inactivity time can be configured between 1 and 255 seconds. If a user sets
+ * val_s to 0, a default inactivity time is calculated automatically (since 0 is
+ * also invalid and undefined by the sensor).
+ *
+ * In such cases, power consumption should be considered: the inactivity period
+ * should be shorter at higher sampling frequencies and longer at lower ones.
+ * Specifically, for frequencies above 255 Hz, the default is set to 10 seconds;
+ * for frequencies below 10 Hz, it defaults to 255 seconds.
+ *
+ * The calculation method subtracts the integer part of the configured sample
+ * frequency from 255 to estimate the inactivity time in seconds. Sub-Hertz
+ * values are ignored in this approximation. Since the recommended output data
+ * rates (ODRs) for features like activity/inactivity detection, sleep modes,
+ * and free fall range between 12.5 Hz and 400 Hz, frequencies outside this
+ * range will either use the defined boundary defaults or require explicit
+ * configuration via val_s.
+ *
+ * Return: 0 or error value.
+ */
+static int adxl345_set_inact_time(struct adxl345_state *st, u32 val_int,
+ u32 val_fract)
+{
+ if (val_int > 0) {
+ /* Time >= 1s, inactivity */
+ return adxl345_set_inactivity_time(st, val_int);
+ } else if (val_int == 0) {
+ if (val_fract > 0) {
+ /* Time < 1s, free-fall */
+ return adxl345_set_freefall_time(st, val_fract);
+ } else if (val_fract == 0) {
+ /* Time == 0.0s */
+ return adxl345_set_default_time(st);
+ }
+ }
+
+ /* Do not support negative or wrong input. */
+ return -EINVAL;
+}
+
+/**
+ * adxl345_is_act_inact_ac() - Verify if AC or DC coupling is currently enabled.
+ *
+ * @st: The device data.
+ * @type: The activity or inactivity type.
+ *
+ * Given a type of activity / inactivity combined with either AC coupling set or
+ * default to DC, this function verifies if the combination is currently
+ * configured, hence enabled or not.
+ *
+ * Return: true if configured coupling matches the provided type, else a negative
+ * error value.
+ */
+static int adxl345_is_act_inact_ac(struct adxl345_state *st,
+ enum adxl345_activity_type type)
+{
+ unsigned int regval;
+ bool coupling;
+ int ret;
+
+ if (type == ADXL345_INACTIVITY_FF)
+ return true;
+
+ ret = regmap_read(st->regmap, ADXL345_REG_ACT_INACT_CTRL, &regval);
+ if (ret)
+ return ret;
+
+ coupling = adxl345_act_acdc_msk[type] & regval;
+
+ switch (type) {
+ case ADXL345_ACTIVITY:
+ case ADXL345_INACTIVITY:
+ return coupling == ADXL345_COUPLING_DC;
+ case ADXL345_ACTIVITY_AC:
+ case ADXL345_INACTIVITY_AC:
+ return coupling == ADXL345_COUPLING_AC;
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
+ * adxl345_set_act_inact_ac() - Configure AC coupling or DC coupling.
+ *
+ * @st: The device data.
+ * @type: Provide a type of activity or inactivity.
+ * @cmd_en: enable or disable AC coupling.
+ *
+ * Enables AC coupling or DC coupling depending on the provided type argument.
+ * Note: Activity and inactivity can be either AC coupled or DC coupled not
+ * both at the same time.
+ *
+ * Return: 0 if successful, else error value.
+ */
+static int adxl345_set_act_inact_ac(struct adxl345_state *st,
+ enum adxl345_activity_type type,
+ bool cmd_en)
+{
+ unsigned int act_inact_ac;
+
+ if (type == ADXL345_ACTIVITY_AC || type == ADXL345_INACTIVITY_AC)
+ act_inact_ac = ADXL345_COUPLING_AC && cmd_en;
+ else
+ act_inact_ac = ADXL345_COUPLING_DC && cmd_en;
+
+ /*
+ * A setting of false selects dc-coupled operation, and a setting of
+ * true enables ac-coupled operation. In dc-coupled operation, the
+ * current acceleration magnitude is compared directly with
+ * ADXL345_REG_THRESH_ACT and ADXL345_REG_THRESH_INACT to determine
+ * whether activity or inactivity is detected.
+ *
+ * In ac-coupled operation for activity detection, the acceleration
+ * value at the start of activity detection is taken as a reference
+ * value. New samples of acceleration are then compared to this
+ * reference value, and if the magnitude of the difference exceeds the
+ * ADXL345_REG_THRESH_ACT value, the device triggers an activity
+ * interrupt.
+ *
+ * Similarly, in ac-coupled operation for inactivity detection, a
+ * reference value is used for comparison and is updated whenever the
+ * device exceeds the inactivity threshold. After the reference value
+ * is selected, the device compares the magnitude of the difference
+ * between the reference value and the current acceleration with
+ * ADXL345_REG_THRESH_INACT. If the difference is less than the value in
+ * ADXL345_REG_THRESH_INACT for the time in ADXL345_REG_TIME_INACT, the
+ * device is considered inactive and the inactivity interrupt is
+ * triggered. [quoted from p. 24, ADXL345 datasheet Rev. G]
+ *
+ * In a conclusion, the first acceleration snapshot sample which hit the
+ * threshold in a particular direction is always taken as acceleration
+ * reference value to that direction. Since for the hardware activity
+ * and inactivity depend on the x/y/z axis, so do ac and dc coupling.
+ * Note, this sw driver always enables or disables all three x/y/z axis
+ * for detection via act_axis_ctrl and inact_axis_ctrl, respectively.
+ * Where in dc-coupling samples are compared against the thresholds, in
+ * ac-coupling measurement difference to the first acceleration
+ * reference value are compared against the threshold. So, ac-coupling
+ * allows for a bit more dynamic compensation depending on the initial
+ * sample.
+ */
+ return regmap_assign_bits(st->regmap, ADXL345_REG_ACT_INACT_CTRL,
+ adxl345_act_acdc_msk[type], act_inact_ac);
+}
+
+static int adxl345_is_act_inact_en(struct adxl345_state *st,
+ enum adxl345_activity_type type)
+{
+ unsigned int axis_ctrl;
+ unsigned int regval;
+ bool int_en, en;
+ int ret;
+
+ ret = regmap_read(st->regmap, ADXL345_REG_ACT_INACT_CTRL, &axis_ctrl);
+ if (ret)
+ return ret;
+
+ /* Check if axis for activity are enabled */
+ switch (type) {
+ case ADXL345_ACTIVITY:
+ case ADXL345_ACTIVITY_AC:
+ en = FIELD_GET(ADXL345_ACT_XYZ_EN, axis_ctrl);
+ if (!en)
+ return false;
+ break;
+ case ADXL345_INACTIVITY:
+ case ADXL345_INACTIVITY_AC:
+ en = FIELD_GET(ADXL345_INACT_XYZ_EN, axis_ctrl);
+ if (!en)
+ return false;
+ break;
+ case ADXL345_INACTIVITY_FF:
+ en = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Check if specific interrupt is enabled */
+ ret = regmap_read(st->regmap, ADXL345_REG_INT_ENABLE, &regval);
+ if (ret)
+ return ret;
+
+ int_en = adxl345_act_int_reg[type] & regval;
+ if (!int_en)
+ return false;
+
+ /* Check if configured coupling matches provided type */
+ return adxl345_is_act_inact_ac(st, type);
+}
+
+static int adxl345_set_act_inact_linkbit(struct adxl345_state *st,
+ enum adxl345_activity_type type,
+ bool en)
+{
+ int act_ac_en, inact_ac_en;
+ int act_en, inact_en;
+
+ act_en = adxl345_is_act_inact_en(st, ADXL345_ACTIVITY);
+ if (act_en < 0)
+ return act_en;
+
+ act_ac_en = adxl345_is_act_inact_en(st, ADXL345_ACTIVITY_AC);
+ if (act_ac_en < 0)
+ return act_ac_en;
+
+ if (type == ADXL345_INACTIVITY_FF) {
+ inact_en = false;
+ } else {
+ inact_en = adxl345_is_act_inact_en(st, ADXL345_INACTIVITY);
+ if (inact_en < 0)
+ return inact_en;
+
+ inact_ac_en = adxl345_is_act_inact_en(st, ADXL345_INACTIVITY_AC);
+ if (inact_ac_en < 0)
+ return inact_ac_en;
+
+ inact_en = inact_en || inact_ac_en;
+ }
+
+ act_en = act_en || act_ac_en;
+
+ return regmap_assign_bits(st->regmap, ADXL345_REG_POWER_CTL,
+ ADXL345_POWER_CTL_INACT_MSK,
+ en && act_en && inact_en);
+}
+
+static int adxl345_set_act_inact_en(struct adxl345_state *st,
+ enum adxl345_activity_type type,
+ bool cmd_en)
+{
+ unsigned int axis_ctrl;
+ unsigned int threshold;
+ unsigned int period;
+ int ret;
+
+ if (cmd_en) {
+ /* When turning on, check if threshold is valid */
+ if (type == ADXL345_ACTIVITY || type == ADXL345_ACTIVITY_AC) {
+ ret = regmap_read(st->regmap,
+ adxl345_act_thresh_reg[type],
+ &threshold);
+ if (ret)
+ return ret;
+ } else {
+ threshold = st->inact_threshold;
+ }
+
+ if (!threshold) /* Just ignore the command if threshold is 0 */
+ return 0;
+
+ /* When turning on inactivity, check if inact time is valid */
+ if (type == ADXL345_INACTIVITY || type == ADXL345_INACTIVITY_AC) {
+ ret = regmap_read(st->regmap,
+ ADXL345_REG_TIME_INACT,
+ &period);
+ if (ret)
+ return ret;
+
+ if (!period)
+ return 0;
+ }
+ } else {
+ /*
+ * When turning off an activity, ensure that the correct
+ * coupling event is specified. This step helps prevent misuse -
+ * for example, if an AC-coupled activity is active and the
+ * current call attempts to turn off a DC-coupled activity, this
+ * inconsistency should be detected here.
+ */
+ if (adxl345_is_act_inact_ac(st, type) <= 0)
+ return 0;
+ }
+
+ /* Start modifying configuration registers */
+ ret = adxl345_set_measure_en(st, false);
+ if (ret)
+ return ret;
+
+ /* Enable axis according to the command */
+ switch (type) {
+ case ADXL345_ACTIVITY:
+ case ADXL345_ACTIVITY_AC:
+ axis_ctrl = ADXL345_ACT_XYZ_EN;
+ break;
+ case ADXL345_INACTIVITY:
+ case ADXL345_INACTIVITY_AC:
+ axis_ctrl = ADXL345_INACT_XYZ_EN;
+ break;
+ case ADXL345_INACTIVITY_FF:
+ axis_ctrl = ADXL345_ACT_INACT_NO_AXIS_EN;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = regmap_assign_bits(st->regmap, ADXL345_REG_ACT_INACT_CTRL,
+ axis_ctrl, cmd_en);
+ if (ret)
+ return ret;
+
+ /* Update AC/DC-coupling according to the command */
+ ret = adxl345_set_act_inact_ac(st, type, cmd_en);
+ if (ret)
+ return ret;
+
+ /* Enable the interrupt line, according to the command */
+ ret = regmap_assign_bits(st->regmap, ADXL345_REG_INT_ENABLE,
+ adxl345_act_int_reg[type], cmd_en);
+ if (ret)
+ return ret;
+
+ /* Set link-bit and auto-sleep only when ACT and INACT are enabled */
+ ret = adxl345_set_act_inact_linkbit(st, type, cmd_en);
+ if (ret)
+ return ret;
+
+ return adxl345_set_measure_en(st, true);
+}
+
/* tap */
static int _adxl345_set_tap_int(struct adxl345_state *st,
@@ -368,9 +864,8 @@ static int adxl345_set_doubletap_en(struct adxl345_state *st, bool en)
* Generally suppress detection of spikes during the latency period as
* double taps here, this is fully optional for double tap detection
*/
- ret = regmap_update_bits(st->regmap, ADXL345_REG_TAP_AXIS,
- ADXL345_REG_TAP_SUPPRESS_MSK,
- en ? ADXL345_REG_TAP_SUPPRESS : 0x00);
+ ret = regmap_assign_bits(st->regmap, ADXL345_REG_TAP_AXIS,
+ ADXL345_REG_TAP_SUPPRESS, en);
if (ret)
return ret;
@@ -466,9 +961,16 @@ static int adxl345_find_odr(struct adxl345_state *st, int val,
static int adxl345_set_odr(struct adxl345_state *st, enum adxl345_odr odr)
{
- return regmap_update_bits(st->regmap, ADXL345_REG_BW_RATE,
+ int ret;
+
+ ret = regmap_update_bits(st->regmap, ADXL345_REG_BW_RATE,
ADXL345_BW_RATE_MSK,
FIELD_PREP(ADXL345_BW_RATE_MSK, odr));
+ if (ret)
+ return ret;
+
+ /* update inactivity time by ODR */
+ return adxl345_set_inact_time(st, 0, 0);
}
static int adxl345_find_range(struct adxl345_state *st, int val, int val2,
@@ -489,9 +991,43 @@ static int adxl345_find_range(struct adxl345_state *st, int val, int val2,
static int adxl345_set_range(struct adxl345_state *st, enum adxl345_range range)
{
- return regmap_update_bits(st->regmap, ADXL345_REG_DATA_FORMAT,
+ unsigned int act_threshold, inact_threshold;
+ unsigned int range_old;
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(st->regmap, ADXL345_REG_DATA_FORMAT, &regval);
+ if (ret)
+ return ret;
+ range_old = FIELD_GET(ADXL345_DATA_FORMAT_RANGE, regval);
+
+ ret = regmap_read(st->regmap,
+ adxl345_act_thresh_reg[ADXL345_ACTIVITY],
+ &act_threshold);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(st->regmap, ADXL345_REG_DATA_FORMAT,
ADXL345_DATA_FORMAT_RANGE,
FIELD_PREP(ADXL345_DATA_FORMAT_RANGE, range));
+ if (ret)
+ return ret;
+
+ act_threshold = act_threshold * adxl345_range_factor_tbl[range_old]
+ / adxl345_range_factor_tbl[range];
+ act_threshold = min(U8_MAX, max(1, act_threshold));
+
+ inact_threshold = st->inact_threshold;
+ inact_threshold = inact_threshold * adxl345_range_factor_tbl[range_old]
+ / adxl345_range_factor_tbl[range];
+ inact_threshold = min(U8_MAX, max(1, inact_threshold));
+
+ ret = regmap_write(st->regmap, adxl345_act_thresh_reg[ADXL345_ACTIVITY],
+ act_threshold);
+ if (ret)
+ return ret;
+
+ return adxl345_set_inact_threshold(st, inact_threshold);
}
static int adxl345_read_avail(struct iio_dev *indio_dev,
@@ -624,6 +1160,37 @@ static int adxl345_write_raw(struct iio_dev *indio_dev,
return adxl345_set_measure_en(st, true);
}
+static int adxl345_read_mag_config(struct adxl345_state *st,
+ enum iio_event_direction dir,
+ enum adxl345_activity_type type_act,
+ enum adxl345_activity_type type_inact)
+{
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ return !!adxl345_is_act_inact_en(st, type_act);
+ case IIO_EV_DIR_FALLING:
+ return !!adxl345_is_act_inact_en(st, type_inact);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int adxl345_write_mag_config(struct adxl345_state *st,
+ enum iio_event_direction dir,
+ enum adxl345_activity_type type_act,
+ enum adxl345_activity_type type_inact,
+ bool state)
+{
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ return adxl345_set_act_inact_en(st, type_act, state);
+ case IIO_EV_DIR_FALLING:
+ return adxl345_set_act_inact_en(st, type_inact, state);
+ default:
+ return -EINVAL;
+ }
+}
+
static int adxl345_read_event_config(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan,
enum iio_event_type type,
@@ -634,6 +1201,14 @@ static int adxl345_read_event_config(struct iio_dev *indio_dev,
int ret;
switch (type) {
+ case IIO_EV_TYPE_MAG:
+ return adxl345_read_mag_config(st, dir,
+ ADXL345_ACTIVITY,
+ ADXL345_INACTIVITY);
+ case IIO_EV_TYPE_MAG_ADAPTIVE:
+ return adxl345_read_mag_config(st, dir,
+ ADXL345_ACTIVITY_AC,
+ ADXL345_INACTIVITY_AC);
case IIO_EV_TYPE_GESTURE:
switch (dir) {
case IIO_EV_DIR_SINGLETAP:
@@ -665,6 +1240,16 @@ static int adxl345_write_event_config(struct iio_dev *indio_dev,
struct adxl345_state *st = iio_priv(indio_dev);
switch (type) {
+ case IIO_EV_TYPE_MAG:
+ return adxl345_write_mag_config(st, dir,
+ ADXL345_ACTIVITY,
+ ADXL345_INACTIVITY,
+ state);
+ case IIO_EV_TYPE_MAG_ADAPTIVE:
+ return adxl345_write_mag_config(st, dir,
+ ADXL345_ACTIVITY_AC,
+ ADXL345_INACTIVITY_AC,
+ state);
case IIO_EV_TYPE_GESTURE:
switch (dir) {
case IIO_EV_DIR_SINGLETAP:
@@ -679,6 +1264,72 @@ static int adxl345_write_event_config(struct iio_dev *indio_dev,
}
}
+static int adxl345_read_mag_value(struct adxl345_state *st,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ enum adxl345_activity_type type_act,
+ enum adxl345_activity_type type_inact,
+ int *val, int *val2)
+{
+ unsigned int threshold;
+ int ret;
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ ret = regmap_read(st->regmap,
+ adxl345_act_thresh_reg[type_act],
+ &threshold);
+ if (ret)
+ return ret;
+ *val = 62500 * threshold;
+ *val2 = MICRO;
+ return IIO_VAL_FRACTIONAL;
+ case IIO_EV_DIR_FALLING:
+ *val = 62500 * st->inact_threshold;
+ *val2 = MICRO;
+ return IIO_VAL_FRACTIONAL;
+ default:
+ return -EINVAL;
+ }
+ case IIO_EV_INFO_PERIOD:
+ *val = st->inact_time_ms;
+ *val2 = MILLI;
+ return IIO_VAL_FRACTIONAL;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int adxl345_write_mag_value(struct adxl345_state *st,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ enum adxl345_activity_type type_act,
+ enum adxl345_activity_type type_inact,
+ int val, int val2)
+{
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ /* Scaling factor 62.5mg/LSB, i.e. ~16g corresponds to 0xff */
+ val = DIV_ROUND_CLOSEST(val * MICRO + val2, 62500);
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ return regmap_write(st->regmap,
+ adxl345_act_thresh_reg[type_act],
+ val);
+ case IIO_EV_DIR_FALLING:
+ return adxl345_set_inact_threshold(st, val);
+ default:
+ return -EINVAL;
+ }
+ case IIO_EV_INFO_PERIOD:
+ return adxl345_set_inact_time(st, val, val2);
+ default:
+ return -EINVAL;
+ }
+}
+
static int adxl345_read_event_value(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan,
enum iio_event_type type,
@@ -691,6 +1342,16 @@ static int adxl345_read_event_value(struct iio_dev *indio_dev,
int ret;
switch (type) {
+ case IIO_EV_TYPE_MAG:
+ return adxl345_read_mag_value(st, dir, info,
+ ADXL345_ACTIVITY,
+ ADXL345_INACTIVITY,
+ val, val2);
+ case IIO_EV_TYPE_MAG_ADAPTIVE:
+ return adxl345_read_mag_value(st, dir, info,
+ ADXL345_ACTIVITY_AC,
+ ADXL345_INACTIVITY_AC,
+ val, val2);
case IIO_EV_TYPE_GESTURE:
switch (info) {
case IIO_EV_INFO_VALUE:
@@ -741,6 +1402,22 @@ static int adxl345_write_event_value(struct iio_dev *indio_dev,
return ret;
switch (type) {
+ case IIO_EV_TYPE_MAG:
+ ret = adxl345_write_mag_value(st, dir, info,
+ ADXL345_ACTIVITY,
+ ADXL345_INACTIVITY,
+ val, val2);
+ if (ret)
+ return ret;
+ break;
+ case IIO_EV_TYPE_MAG_ADAPTIVE:
+ ret = adxl345_write_mag_value(st, dir, info,
+ ADXL345_ACTIVITY_AC,
+ ADXL345_INACTIVITY_AC,
+ val, val2);
+ if (ret)
+ return ret;
+ break;
case IIO_EV_TYPE_GESTURE:
switch (info) {
case IIO_EV_INFO_VALUE:
@@ -980,10 +1657,12 @@ static int adxl345_fifo_push(struct iio_dev *indio_dev,
}
static int adxl345_push_event(struct iio_dev *indio_dev, int int_stat,
+ enum iio_modifier act_dir,
enum iio_modifier tap_dir)
{
s64 ts = iio_get_time_ns(indio_dev);
struct adxl345_state *st = iio_priv(indio_dev);
+ unsigned int regval;
int samples;
int ret = -ENOENT;
@@ -1007,6 +1686,68 @@ static int adxl345_push_event(struct iio_dev *indio_dev, int int_stat,
return ret;
}
+ if (FIELD_GET(ADXL345_INT_ACTIVITY, int_stat)) {
+ ret = regmap_read(st->regmap, ADXL345_REG_ACT_INACT_CTRL, &regval);
+ if (ret)
+ return ret;
+
+ if (FIELD_GET(ADXL345_REG_ACT_ACDC, regval)) {
+ /* AC coupled */
+ ret = iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, act_dir,
+ IIO_EV_TYPE_MAG_ADAPTIVE,
+ IIO_EV_DIR_RISING),
+ ts);
+
+ } else {
+ /* DC coupled, relying on THRESH */
+ ret = iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, act_dir,
+ IIO_EV_TYPE_MAG,
+ IIO_EV_DIR_RISING),
+ ts);
+ }
+ if (ret)
+ return ret;
+ }
+
+ if (FIELD_GET(ADXL345_INT_INACTIVITY, int_stat)) {
+ ret = regmap_read(st->regmap, ADXL345_REG_ACT_INACT_CTRL, &regval);
+ if (ret)
+ return ret;
+
+ if (FIELD_GET(ADXL345_REG_INACT_ACDC, regval)) {
+ /* AC coupled */
+ ret = iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL, 0,
+ IIO_MOD_X_AND_Y_AND_Z,
+ IIO_EV_TYPE_MAG_ADAPTIVE,
+ IIO_EV_DIR_FALLING),
+ ts);
+ } else {
+ /* DC coupled, relying on THRESH */
+ ret = iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL, 0,
+ IIO_MOD_X_AND_Y_AND_Z,
+ IIO_EV_TYPE_MAG,
+ IIO_EV_DIR_FALLING),
+ ts);
+ }
+ if (ret)
+ return ret;
+ }
+
+ if (FIELD_GET(ADXL345_INT_FREE_FALL, int_stat)) {
+ ret = iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL, 0,
+ IIO_MOD_X_AND_Y_AND_Z,
+ IIO_EV_TYPE_MAG,
+ IIO_EV_DIR_FALLING),
+ ts);
+ if (ret)
+ return ret;
+ }
+
if (FIELD_GET(ADXL345_INT_WATERMARK, int_stat)) {
samples = adxl345_get_samples(st);
if (samples < 0)
@@ -1034,6 +1775,7 @@ static irqreturn_t adxl345_irq_handler(int irq, void *p)
struct adxl345_state *st = iio_priv(indio_dev);
unsigned int regval;
enum iio_modifier tap_dir = IIO_NO_MOD;
+ enum iio_modifier act_dir = IIO_NO_MOD;
u32 axis_ctrl;
int int_stat;
int ret;
@@ -1042,7 +1784,8 @@ static irqreturn_t adxl345_irq_handler(int irq, void *p)
if (ret)
return IRQ_NONE;
- if (FIELD_GET(ADXL345_REG_TAP_AXIS_MSK, axis_ctrl)) {
+ if (FIELD_GET(ADXL345_REG_TAP_AXIS_MSK, axis_ctrl) ||
+ FIELD_GET(ADXL345_ACT_XYZ_EN, axis_ctrl)) {
ret = regmap_read(st->regmap, ADXL345_REG_ACT_TAP_STATUS, &regval);
if (ret)
return IRQ_NONE;
@@ -1053,12 +1796,19 @@ static irqreturn_t adxl345_irq_handler(int irq, void *p)
tap_dir = IIO_MOD_Y;
else if (FIELD_GET(ADXL345_TAP_X_EN, regval))
tap_dir = IIO_MOD_X;
+
+ if (FIELD_GET(ADXL345_ACT_Z_EN, regval))
+ act_dir = IIO_MOD_Z;
+ else if (FIELD_GET(ADXL345_ACT_Y_EN, regval))
+ act_dir = IIO_MOD_Y;
+ else if (FIELD_GET(ADXL345_ACT_X_EN, regval))
+ act_dir = IIO_MOD_X;
}
if (regmap_read(st->regmap, ADXL345_REG_INT_SOURCE, &int_stat))
return IRQ_NONE;
- if (adxl345_push_event(indio_dev, int_stat, tap_dir))
+ if (adxl345_push_event(indio_dev, int_stat, act_dir, tap_dir))
goto err;
if (FIELD_GET(ADXL345_INT_OVERRUN, int_stat))
@@ -1226,6 +1976,24 @@ int adxl345_core_probe(struct device *dev, struct regmap *regmap,
if (ret)
return ret;
+ /*
+ * Initialized with sensible default values to streamline
+ * sensor operation. These defaults are partly derived from
+ * the previous input driver for the ADXL345 and partly
+ * based on the recommendations provided in the datasheet.
+ */
+ ret = regmap_write(st->regmap, ADXL345_REG_ACT_INACT_CTRL, 0);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(st->regmap, ADXL345_REG_THRESH_ACT, 6);
+ if (ret)
+ return ret;
+
+ ret = adxl345_set_inact_threshold(st, 4);
+ if (ret)
+ return ret;
+
ret = regmap_write(st->regmap, ADXL345_REG_THRESH_TAP, tap_threshold);
if (ret)
return ret;
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
index 4fccbcb76e04..8925f5279e62 100644
--- a/drivers/iio/accel/bma180.c
+++ b/drivers/iio/accel/bma180.c
@@ -139,11 +139,6 @@ struct bma180_data {
int scale;
int bw;
bool pmode;
- /* Ensure timestamp is naturally aligned */
- struct {
- s16 chan[4];
- aligned_s64 timestamp;
- } scan;
};
enum bma180_chan {
@@ -870,6 +865,10 @@ static irqreturn_t bma180_trigger_handler(int irq, void *p)
struct bma180_data *data = iio_priv(indio_dev);
s64 time_ns = iio_get_time_ns(indio_dev);
int bit, ret, i = 0;
+ struct {
+ s16 chan[4];
+ aligned_s64 timestamp;
+ } scan = { };
mutex_lock(&data->mutex);
@@ -879,12 +878,12 @@ static irqreturn_t bma180_trigger_handler(int irq, void *p)
mutex_unlock(&data->mutex);
goto err;
}
- data->scan.chan[i++] = ret;
+ scan.chan[i++] = ret;
}
mutex_unlock(&data->mutex);
- iio_push_to_buffers_with_ts(indio_dev, &data->scan, sizeof(data->scan), time_ns);
+ iio_push_to_buffers_with_ts(indio_dev, &scan, sizeof(scan), time_ns);
err:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/accel/bma220_spi.c b/drivers/iio/accel/bma220_spi.c
index 38f7498431ee..01592eebf05b 100644
--- a/drivers/iio/accel/bma220_spi.c
+++ b/drivers/iio/accel/bma220_spi.c
@@ -255,10 +255,8 @@ static int bma220_probe(struct spi_device *spi)
struct bma220_data *data;
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*data));
- if (!indio_dev) {
- dev_err(&spi->dev, "iio allocation failed!\n");
+ if (!indio_dev)
return -ENOMEM;
- }
data = iio_priv(indio_dev);
data->spi_device = spi;
diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c
index be5fbb0c5d29..3c5d1560b163 100644
--- a/drivers/iio/accel/bmc150-accel-core.c
+++ b/drivers/iio/accel/bmc150-accel-core.c
@@ -332,13 +332,10 @@ static int bmc150_accel_set_power_state(struct bmc150_accel_data *data, bool on)
struct device *dev = regmap_get_device(data->regmap);
int ret;
- if (on) {
+ if (on)
ret = pm_runtime_resume_and_get(dev);
- } else {
- pm_runtime_mark_last_busy(dev);
+ else
ret = pm_runtime_put_autosuspend(dev);
- }
-
if (ret < 0) {
dev_err(dev,
"Failed: %s for %d\n", __func__, on);
diff --git a/drivers/iio/accel/bmi088-accel-core.c b/drivers/iio/accel/bmi088-accel-core.c
index dea126f993c1..c7da90af0d2d 100644
--- a/drivers/iio/accel/bmi088-accel-core.c
+++ b/drivers/iio/accel/bmi088-accel-core.c
@@ -375,7 +375,6 @@ static int bmi088_accel_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
out_read_raw_pm_put:
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
@@ -419,7 +418,6 @@ static int bmi088_accel_write_raw(struct iio_dev *indio_dev,
return ret;
ret = bmi088_accel_set_scale(data, val, val2);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
case IIO_CHAN_INFO_SAMP_FREQ:
@@ -428,7 +426,6 @@ static int bmi088_accel_write_raw(struct iio_dev *indio_dev,
return ret;
ret = bmi088_accel_set_sample_freq(data, val);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
default:
diff --git a/drivers/iio/accel/dmard06.c b/drivers/iio/accel/dmard06.c
index fb14894c66f9..33f225d73e7b 100644
--- a/drivers/iio/accel/dmard06.c
+++ b/drivers/iio/accel/dmard06.c
@@ -137,10 +137,8 @@ static int dmard06_probe(struct i2c_client *client)
}
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*dmard06));
- if (!indio_dev) {
- dev_err(&client->dev, "Failed to allocate iio device\n");
+ if (!indio_dev)
return -ENOMEM;
- }
dmard06 = iio_priv(indio_dev);
dmard06->client = client;
diff --git a/drivers/iio/accel/dmard09.c b/drivers/iio/accel/dmard09.c
index 4ec70ca6910d..d9290e3b9c46 100644
--- a/drivers/iio/accel/dmard09.c
+++ b/drivers/iio/accel/dmard09.c
@@ -95,10 +95,8 @@ static int dmard09_probe(struct i2c_client *client)
struct dmard09_data *data;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
- if (!indio_dev) {
- dev_err(&client->dev, "iio allocation failed\n");
+ if (!indio_dev)
return -ENOMEM;
- }
data = iio_priv(indio_dev);
data->client = client;
diff --git a/drivers/iio/accel/dmard10.c b/drivers/iio/accel/dmard10.c
index 71cd1928baa6..575e8510e1bd 100644
--- a/drivers/iio/accel/dmard10.c
+++ b/drivers/iio/accel/dmard10.c
@@ -191,10 +191,8 @@ static int dmard10_probe(struct i2c_client *client)
return (ret < 0) ? ret : -ENODEV;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
- if (!indio_dev) {
- dev_err(&client->dev, "iio allocation failed!\n");
+ if (!indio_dev)
return -ENOMEM;
- }
data = iio_priv(indio_dev);
data->client = client;
diff --git a/drivers/iio/accel/fxls8962af-core.c b/drivers/iio/accel/fxls8962af-core.c
index b10a30960e1e..8763e91c63d2 100644
--- a/drivers/iio/accel/fxls8962af-core.c
+++ b/drivers/iio/accel/fxls8962af-core.c
@@ -222,7 +222,6 @@ static int fxls8962af_power_off(struct fxls8962af_data *data)
struct device *dev = regmap_get_device(data->regmap);
int ret;
- pm_runtime_mark_last_busy(dev);
ret = pm_runtime_put_autosuspend(dev);
if (ret)
dev_err(dev, "failed to power off\n");
diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
index 6aefe8221296..2823ddde4bf2 100644
--- a/drivers/iio/accel/kxcjk-1013.c
+++ b/drivers/iio/accel/kxcjk-1013.c
@@ -636,10 +636,8 @@ static int kxcjk1013_set_power_state(struct kxcjk1013_data *data, bool on)
if (on)
ret = pm_runtime_resume_and_get(&data->client->dev);
- else {
- pm_runtime_mark_last_busy(&data->client->dev);
+ else
ret = pm_runtime_put_autosuspend(&data->client->dev);
- }
if (ret < 0) {
dev_err(&data->client->dev,
"Failed: %s for %d\n", __func__, on);
diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c
index cfc31265cdd0..4717d80fc24a 100644
--- a/drivers/iio/accel/kxsd9.c
+++ b/drivers/iio/accel/kxsd9.c
@@ -151,7 +151,6 @@ static int kxsd9_write_raw(struct iio_dev *indio_dev,
ret = kxsd9_write_scale(indio_dev, val2);
}
- pm_runtime_mark_last_busy(st->dev);
pm_runtime_put_autosuspend(st->dev);
return ret;
@@ -199,7 +198,6 @@ static int kxsd9_read_raw(struct iio_dev *indio_dev,
}
error_ret:
- pm_runtime_mark_last_busy(st->dev);
pm_runtime_put_autosuspend(st->dev);
return ret;
@@ -250,7 +248,6 @@ static int kxsd9_buffer_postdisable(struct iio_dev *indio_dev)
{
struct kxsd9_state *st = iio_priv(indio_dev);
- pm_runtime_mark_last_busy(st->dev);
pm_runtime_put_autosuspend(st->dev);
return 0;
diff --git a/drivers/iio/accel/mc3230.c b/drivers/iio/accel/mc3230.c
index e2853090fa6e..3e494f9ddc56 100644
--- a/drivers/iio/accel/mc3230.c
+++ b/drivers/iio/accel/mc3230.c
@@ -169,10 +169,8 @@ static int mc3230_probe(struct i2c_client *client)
}
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
- if (!indio_dev) {
- dev_err(&client->dev, "iio allocation failed!\n");
+ if (!indio_dev)
return -ENOMEM;
- }
data = iio_priv(indio_dev);
data->chip_info = chip_info;
diff --git a/drivers/iio/accel/mma7660.c b/drivers/iio/accel/mma7660.c
index d0a16f227903..be3213600cf4 100644
--- a/drivers/iio/accel/mma7660.c
+++ b/drivers/iio/accel/mma7660.c
@@ -192,10 +192,8 @@ static int mma7660_probe(struct i2c_client *client)
struct mma7660_data *data;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
- if (!indio_dev) {
- dev_err(&client->dev, "iio allocation failed!\n");
+ if (!indio_dev)
return -ENOMEM;
- }
data = iio_priv(indio_dev);
data->client = client;
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index aba444a980d9..15172ba2972c 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -224,13 +224,10 @@ static int mma8452_set_runtime_pm_state(struct i2c_client *client, bool on)
#ifdef CONFIG_PM
int ret;
- if (on) {
+ if (on)
ret = pm_runtime_resume_and_get(&client->dev);
- } else {
- pm_runtime_mark_last_busy(&client->dev);
+ else
ret = pm_runtime_put_autosuspend(&client->dev);
- }
-
if (ret < 0) {
dev_err(&client->dev,
"failed to change power state to %d\n", on);
diff --git a/drivers/iio/accel/mma9551_core.c b/drivers/iio/accel/mma9551_core.c
index 3e7d9b79ed0e..2ccb1fb19b96 100644
--- a/drivers/iio/accel/mma9551_core.c
+++ b/drivers/iio/accel/mma9551_core.c
@@ -671,11 +671,8 @@ int mma9551_set_power_state(struct i2c_client *client, bool on)
if (on)
ret = pm_runtime_resume_and_get(&client->dev);
- else {
- pm_runtime_mark_last_busy(&client->dev);
+ else
ret = pm_runtime_put_autosuspend(&client->dev);
- }
-
if (ret < 0) {
dev_err(&client->dev,
"failed to change power state to %d\n", on);
diff --git a/drivers/iio/accel/msa311.c b/drivers/iio/accel/msa311.c
index 3e10225410e8..5eace0de3750 100644
--- a/drivers/iio/accel/msa311.c
+++ b/drivers/iio/accel/msa311.c
@@ -607,7 +607,6 @@ static int msa311_read_raw_data(struct iio_dev *indio_dev,
err = msa311_get_axis(msa311, chan, &axis);
mutex_unlock(&msa311->lock);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
iio_device_release_direct(indio_dev);
@@ -741,7 +740,6 @@ static int msa311_write_scale(struct iio_dev *indio_dev, int val, int val2)
break;
}
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
if (err)
@@ -781,7 +779,6 @@ static int msa311_write_samp_freq(struct iio_dev *indio_dev, int val, int val2)
break;
}
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
iio_device_release_direct(indio_dev);
@@ -832,7 +829,6 @@ static int msa311_debugfs_reg_access(struct iio_dev *indio_dev,
mutex_unlock(&msa311->lock);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
if (err)
@@ -855,7 +851,6 @@ static int msa311_buffer_postdisable(struct iio_dev *indio_dev)
struct msa311_priv *msa311 = iio_priv(indio_dev);
struct device *dev = msa311->dev;
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return 0;
@@ -990,7 +985,7 @@ static int msa311_check_partid(struct msa311_priv *msa311)
msa311->chip_name = devm_kasprintf(dev, GFP_KERNEL,
"msa311-%02x", partid);
if (!msa311->chip_name)
- return dev_err_probe(dev, -ENOMEM, "can't alloc chip name\n");
+ return -ENOMEM;
return 0;
}
@@ -1069,8 +1064,7 @@ static int msa311_setup_interrupts(struct msa311_priv *msa311)
trig = devm_iio_trigger_alloc(dev, "%s-new-data", msa311->chip_name);
if (!trig)
- return dev_err_probe(dev, -ENOMEM,
- "can't allocate newdata trigger\n");
+ return -ENOMEM;
msa311->new_data_trig = trig;
msa311->new_data_trig->ops = &msa311_new_data_trig_ops;
@@ -1153,8 +1147,7 @@ static int msa311_probe(struct i2c_client *i2c)
indio_dev = devm_iio_device_alloc(dev, sizeof(*msa311));
if (!indio_dev)
- return dev_err_probe(dev, -ENOMEM,
- "IIO device allocation failed\n");
+ return -ENOMEM;
msa311 = iio_priv(indio_dev);
msa311->dev = dev;
@@ -1195,7 +1188,7 @@ static int msa311_probe(struct i2c_client *i2c)
*/
err = devm_add_action_or_reset(dev, msa311_powerdown, msa311);
if (err)
- return dev_err_probe(dev, err, "can't add powerdown action\n");
+ return err;
err = pm_runtime_set_active(dev);
if (err)
@@ -1231,7 +1224,6 @@ static int msa311_probe(struct i2c_client *i2c)
if (err)
return err;
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
err = devm_iio_device_register(dev, indio_dev);
diff --git a/drivers/iio/accel/sca3300.c b/drivers/iio/accel/sca3300.c
index bda370c0f660..8380b237831c 100644
--- a/drivers/iio/accel/sca3300.c
+++ b/drivers/iio/accel/sca3300.c
@@ -477,7 +477,7 @@ static irqreturn_t sca3300_trigger_handler(int irq, void *p)
struct iio_dev *indio_dev = pf->indio_dev;
struct sca3300_data *data = iio_priv(indio_dev);
int bit, ret, val, i = 0;
- IIO_DECLARE_BUFFER_WITH_TS(s16, channels, SCA3300_SCAN_MAX);
+ IIO_DECLARE_BUFFER_WITH_TS(s16, channels, SCA3300_SCAN_MAX) = { };
iio_for_each_active_channel(indio_dev, bit) {
ret = sca3300_read_reg(data, indio_dev->channels[bit].address, &val);
diff --git a/drivers/iio/accel/stk8312.c b/drivers/iio/accel/stk8312.c
index 89569ce221d7..f31c6ab3392d 100644
--- a/drivers/iio/accel/stk8312.c
+++ b/drivers/iio/accel/stk8312.c
@@ -504,10 +504,8 @@ static int stk8312_probe(struct i2c_client *client)
struct stk8312_data *data;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
- if (!indio_dev) {
- dev_err(&client->dev, "iio allocation failed!\n");
+ if (!indio_dev)
return -ENOMEM;
- }
data = iio_priv(indio_dev);
data->client = client;
diff --git a/drivers/iio/accel/stk8ba50.c b/drivers/iio/accel/stk8ba50.c
index c1d7e7dcb09b..384f1fbcbcb3 100644
--- a/drivers/iio/accel/stk8ba50.c
+++ b/drivers/iio/accel/stk8ba50.c
@@ -385,10 +385,8 @@ static int stk8ba50_probe(struct i2c_client *client)
struct stk8ba50_data *data;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
- if (!indio_dev) {
- dev_err(&client->dev, "iio allocation failed!\n");
+ if (!indio_dev)
return -ENOMEM;
- }
data = iio_priv(indio_dev);
data->client = client;
diff --git a/drivers/iio/adc/88pm886-gpadc.c b/drivers/iio/adc/88pm886-gpadc.c
new file mode 100644
index 000000000000..cffe35136685
--- /dev/null
+++ b/drivers/iio/adc/88pm886-gpadc.c
@@ -0,0 +1,393 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2025, Duje Mihanović <duje@dujemihanovic.xyz>
+ */
+
+#include <linux/bits.h>
+#include <linux/bug.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/math.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+#include <linux/units.h>
+
+#include <asm/byteorder.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/types.h>
+
+#include <linux/mfd/88pm886.h>
+
+struct pm886_gpadc {
+ struct regmap *map;
+};
+
+enum pm886_gpadc_channel {
+ VSC_CHAN,
+ VCHG_PWR_CHAN,
+ VCF_OUT_CHAN,
+ VBAT_CHAN,
+ VBAT_SLP_CHAN,
+ VBUS_CHAN,
+
+ GPADC0_CHAN,
+ GPADC1_CHAN,
+ GPADC2_CHAN,
+ GPADC3_CHAN,
+
+ GND_DET1_CHAN,
+ GND_DET2_CHAN,
+ MIC_DET_CHAN,
+
+ TINT_CHAN,
+};
+
+static const int pm886_gpadc_regs[] = {
+ [VSC_CHAN] = PM886_REG_GPADC_VSC,
+ [VCHG_PWR_CHAN] = PM886_REG_GPADC_VCHG_PWR,
+ [VCF_OUT_CHAN] = PM886_REG_GPADC_VCF_OUT,
+ [VBAT_CHAN] = PM886_REG_GPADC_VBAT,
+ [VBAT_SLP_CHAN] = PM886_REG_GPADC_VBAT_SLP,
+ [VBUS_CHAN] = PM886_REG_GPADC_VBUS,
+
+ [GPADC0_CHAN] = PM886_REG_GPADC_GPADC0,
+ [GPADC1_CHAN] = PM886_REG_GPADC_GPADC1,
+ [GPADC2_CHAN] = PM886_REG_GPADC_GPADC2,
+ [GPADC3_CHAN] = PM886_REG_GPADC_GPADC3,
+
+ [GND_DET1_CHAN] = PM886_REG_GPADC_GND_DET1,
+ [GND_DET2_CHAN] = PM886_REG_GPADC_GND_DET2,
+ [MIC_DET_CHAN] = PM886_REG_GPADC_MIC_DET,
+
+ [TINT_CHAN] = PM886_REG_GPADC_TINT,
+};
+
+#define ADC_CHANNEL_VOLTAGE(index, lsb, name) \
+{ \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = index, \
+ .address = lsb, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .datasheet_name = name, \
+}
+
+#define ADC_CHANNEL_RESISTANCE(index, lsb, name) \
+{ \
+ .type = IIO_RESISTANCE, \
+ .indexed = 1, \
+ .channel = index, \
+ .address = lsb, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), \
+ .datasheet_name = name, \
+}
+
+#define ADC_CHANNEL_TEMPERATURE(index, lsb, name) \
+{ \
+ .type = IIO_TEMP, \
+ .indexed = 1, \
+ .channel = index, \
+ .address = lsb, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_OFFSET), \
+ .datasheet_name = name, \
+}
+
+static const struct iio_chan_spec pm886_gpadc_channels[] = {
+ ADC_CHANNEL_VOLTAGE(VSC_CHAN, 1367, "vsc"),
+ ADC_CHANNEL_VOLTAGE(VCHG_PWR_CHAN, 1709, "vchg_pwr"),
+ ADC_CHANNEL_VOLTAGE(VCF_OUT_CHAN, 1367, "vcf_out"),
+ ADC_CHANNEL_VOLTAGE(VBAT_CHAN, 1367, "vbat"),
+ ADC_CHANNEL_VOLTAGE(VBAT_SLP_CHAN, 1367, "vbat_slp"),
+ ADC_CHANNEL_VOLTAGE(VBUS_CHAN, 1709, "vbus"),
+
+ ADC_CHANNEL_RESISTANCE(GPADC0_CHAN, 342, "gpadc0"),
+ ADC_CHANNEL_RESISTANCE(GPADC1_CHAN, 342, "gpadc1"),
+ ADC_CHANNEL_RESISTANCE(GPADC2_CHAN, 342, "gpadc2"),
+ ADC_CHANNEL_RESISTANCE(GPADC3_CHAN, 342, "gpadc3"),
+
+ ADC_CHANNEL_VOLTAGE(GND_DET1_CHAN, 342, "gnddet1"),
+ ADC_CHANNEL_VOLTAGE(GND_DET2_CHAN, 342, "gnddet2"),
+ ADC_CHANNEL_VOLTAGE(MIC_DET_CHAN, 1367, "mic_det"),
+
+ ADC_CHANNEL_TEMPERATURE(TINT_CHAN, 104, "tint"),
+};
+
+static const struct regmap_config pm886_gpadc_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = PM886_GPADC_MAX_REGISTER,
+};
+
+static int gpadc_get_raw(struct iio_dev *iio, enum pm886_gpadc_channel chan)
+{
+ struct pm886_gpadc *gpadc = iio_priv(iio);
+ __be16 buf;
+ int ret;
+
+ ret = regmap_bulk_read(gpadc->map, pm886_gpadc_regs[chan], &buf, sizeof(buf));
+ if (ret)
+ return ret;
+
+ return be16_to_cpu(buf) >> 4;
+}
+
+static int
+gpadc_set_bias(struct pm886_gpadc *gpadc, enum pm886_gpadc_channel chan, bool on)
+{
+ unsigned int gpadc_num = chan - GPADC0_CHAN;
+ unsigned int bits = BIT(gpadc_num + 4) | BIT(gpadc_num);
+
+ return regmap_assign_bits(gpadc->map, PM886_REG_GPADC_CONFIG(0x14), bits, on);
+}
+
+static int
+gpadc_find_bias_current(struct iio_dev *iio, struct iio_chan_spec const *chan,
+ unsigned int *raw_uV, unsigned int *raw_uA)
+{
+ struct pm886_gpadc *gpadc = iio_priv(iio);
+ unsigned int gpadc_num = chan->channel - GPADC0_CHAN;
+ unsigned int reg = PM886_REG_GPADC_CONFIG(0xb + gpadc_num);
+ unsigned long lsb = chan->address;
+ int ret;
+
+ for (unsigned int i = 0; i < PM886_GPADC_BIAS_LEVELS; i++) {
+ ret = regmap_update_bits(gpadc->map, reg, GENMASK(3, 0), i);
+ if (ret)
+ return ret;
+
+ /* Wait for the new bias level to apply. */
+ fsleep(5 * USEC_PER_MSEC);
+
+ *raw_uA = PM886_GPADC_INDEX_TO_BIAS_uA(i);
+ *raw_uV = gpadc_get_raw(iio, chan->channel) * lsb;
+
+ /*
+ * Vendor kernel errors out above 1.25 V, but testing shows
+ * that the resistance of the battery detection channel (GPADC2
+ * on coreprimevelte) reaches about 1.4 MΩ when the battery is
+ * removed, which can't be measured with such a low upper
+ * limit. Therefore, to be able to detect the battery without
+ * ugly externs as used in the vendor fuel gauge driver,
+ * increase this limit a bit.
+ */
+ if (WARN_ON(*raw_uV > 1500 * (MICRO / MILLI)))
+ return -EIO;
+
+ /*
+ * Vendor kernel errors out under 300 mV, but for the same
+ * reason as above (except the channel hovers around 3.5 kΩ
+ * with battery present) reduce this limit.
+ */
+ if (*raw_uV < 200 * (MICRO / MILLI)) {
+ dev_dbg(&iio->dev, "bad bias for chan %d: %d uA @ %d uV\n",
+ chan->channel, *raw_uA, *raw_uV);
+ continue;
+ }
+
+ dev_dbg(&iio->dev, "good bias for chan %d: %d uA @ %d uV\n",
+ chan->channel, *raw_uA, *raw_uV);
+ return 0;
+ }
+
+ dev_err(&iio->dev, "failed to find good bias for chan %d\n", chan->channel);
+ return -EINVAL;
+}
+
+static int
+gpadc_get_resistance_ohm(struct iio_dev *iio, struct iio_chan_spec const *chan)
+{
+ struct pm886_gpadc *gpadc = iio_priv(iio);
+ unsigned int raw_uV, raw_uA;
+ int ret;
+
+ ret = gpadc_set_bias(gpadc, chan->channel, true);
+ if (ret)
+ goto out;
+
+ ret = gpadc_find_bias_current(iio, chan, &raw_uV, &raw_uA);
+ if (ret)
+ goto out;
+
+ ret = DIV_ROUND_CLOSEST(raw_uV, raw_uA);
+out:
+ gpadc_set_bias(gpadc, chan->channel, false);
+ return ret;
+}
+
+static int
+__pm886_gpadc_read_raw(struct iio_dev *iio, struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ unsigned long lsb = chan->address;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ *val = gpadc_get_raw(iio, chan->channel);
+ if (*val < 0)
+ return *val;
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = lsb;
+
+ if (chan->type == IIO_VOLTAGE) {
+ *val2 = MILLI;
+ return IIO_VAL_FRACTIONAL;
+ } else {
+ return IIO_VAL_INT;
+ }
+ case IIO_CHAN_INFO_OFFSET:
+ /* Raw value is 104 millikelvin/LSB, convert it to 104 millicelsius/LSB */
+ *val = ABSOLUTE_ZERO_MILLICELSIUS;
+ *val2 = lsb;
+ return IIO_VAL_FRACTIONAL;
+ case IIO_CHAN_INFO_PROCESSED:
+ *val = gpadc_get_resistance_ohm(iio, chan);
+ if (*val < 0)
+ return *val;
+
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int pm886_gpadc_read_raw(struct iio_dev *iio, struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct device *dev = iio->dev.parent;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return ret;
+
+ ret = __pm886_gpadc_read_raw(iio, chan, val, val2, mask);
+
+ pm_runtime_put_autosuspend(dev);
+ return ret;
+}
+
+static int pm886_gpadc_hw_enable(struct regmap *map)
+{
+ const u8 config[] = {
+ PM886_GPADC_CONFIG1_EN_ALL,
+ PM886_GPADC_CONFIG2_EN_ALL,
+ PM886_GPADC_GND_DET2_EN,
+ };
+ int ret;
+
+ /* Enable the ADC block. */
+ ret = regmap_set_bits(map, PM886_REG_GPADC_CONFIG(0x6), BIT(0));
+ if (ret)
+ return ret;
+
+ /* Enable all channels. */
+ return regmap_bulk_write(map, PM886_REG_GPADC_CONFIG(0x1), config, ARRAY_SIZE(config));
+}
+
+static int pm886_gpadc_hw_disable(struct regmap *map)
+{
+ return regmap_clear_bits(map, PM886_REG_GPADC_CONFIG(0x6), BIT(0));
+}
+
+static const struct iio_info pm886_gpadc_iio_info = {
+ .read_raw = pm886_gpadc_read_raw,
+};
+
+static int pm886_gpadc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct pm886_chip *chip = dev_get_drvdata(dev->parent);
+ struct i2c_client *client = chip->client;
+ struct pm886_gpadc *gpadc;
+ struct i2c_client *page;
+ struct iio_dev *iio;
+ int ret;
+
+ iio = devm_iio_device_alloc(dev, sizeof(*gpadc));
+ if (!iio)
+ return -ENOMEM;
+
+ gpadc = iio_priv(iio);
+ dev_set_drvdata(dev, iio);
+
+ page = devm_i2c_new_dummy_device(dev, client->adapter,
+ client->addr + PM886_PAGE_OFFSET_GPADC);
+ if (IS_ERR(page))
+ return dev_err_probe(dev, PTR_ERR(page), "Failed to initialize GPADC page\n");
+
+ gpadc->map = devm_regmap_init_i2c(page, &pm886_gpadc_regmap_config);
+ if (IS_ERR(gpadc->map))
+ return dev_err_probe(dev, PTR_ERR(gpadc->map),
+ "Failed to initialize GPADC regmap\n");
+
+ iio->name = "88pm886-gpadc";
+ iio->modes = INDIO_DIRECT_MODE;
+ iio->info = &pm886_gpadc_iio_info;
+ iio->channels = pm886_gpadc_channels;
+ iio->num_channels = ARRAY_SIZE(pm886_gpadc_channels);
+ device_set_node(&iio->dev, dev_fwnode(dev->parent));
+
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to enable runtime PM\n");
+
+ pm_runtime_set_autosuspend_delay(dev, 50);
+ pm_runtime_use_autosuspend(dev);
+ ret = devm_iio_device_register(dev, iio);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to register ADC\n");
+
+ return 0;
+}
+
+static int pm886_gpadc_runtime_resume(struct device *dev)
+{
+ struct iio_dev *iio = dev_get_drvdata(dev);
+ struct pm886_gpadc *gpadc = iio_priv(iio);
+
+ return pm886_gpadc_hw_enable(gpadc->map);
+}
+
+static int pm886_gpadc_runtime_suspend(struct device *dev)
+{
+ struct iio_dev *iio = dev_get_drvdata(dev);
+ struct pm886_gpadc *gpadc = iio_priv(iio);
+
+ return pm886_gpadc_hw_disable(gpadc->map);
+}
+
+static DEFINE_RUNTIME_DEV_PM_OPS(pm886_gpadc_pm_ops,
+ pm886_gpadc_runtime_suspend,
+ pm886_gpadc_runtime_resume, NULL);
+
+static const struct platform_device_id pm886_gpadc_id[] = {
+ { "88pm886-gpadc" },
+ { }
+};
+MODULE_DEVICE_TABLE(platform, pm886_gpadc_id);
+
+static struct platform_driver pm886_gpadc_driver = {
+ .driver = {
+ .name = "88pm886-gpadc",
+ .pm = pm_ptr(&pm886_gpadc_pm_ops),
+ },
+ .probe = pm886_gpadc_probe,
+ .id_table = pm886_gpadc_id,
+};
+module_platform_driver(pm886_gpadc_driver);
+
+MODULE_AUTHOR("Duje Mihanović <duje@dujemihanovic.xyz>");
+MODULE_DESCRIPTION("Marvell 88PM886 GPADC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 6de2abad0197..58a14e6833f6 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -9,6 +9,19 @@ menu "Analog to digital converters"
config IIO_ADC_HELPER
tristate
+config 88PM886_GPADC
+ tristate "Marvell 88PM886 GPADC driver"
+ depends on MFD_88PM886_PMIC
+ default MFD_88PM886_PMIC
+ help
+ Say Y here to enable support for the GPADC (General Purpose ADC)
+ found on the Marvell 88PM886 PMIC. The GPADC measures various
+ internal voltages and temperatures, including (but not limited to)
+ system, battery and USB Vbus.
+
+ To compile this driver as a module, choose M here: the module will be
+ called 88pm886-gpadc.
+
config AB8500_GPADC
bool "ST-Ericsson AB8500 GPADC driver"
depends on AB8500_CORE && REGULATOR_AB8500
@@ -389,6 +402,7 @@ config AD7779
depends on SPI
select CRC8
select IIO_BUFFER
+ select IIO_BACKEND
help
Say yes here to build support for Analog Devices AD777X family
(AD7770, AD7771, AD7779) analog to digital converter (ADC).
@@ -507,6 +521,25 @@ config AD9467
To compile this driver as a module, choose M here: the module will be
called ad9467.
+config ADE9000
+ tristate "Analog Devices ADE9000 Multiphase Energy, and Power Quality Monitoring IC Driver"
+ depends on SPI
+ select REGMAP_SPI
+ select IIO_BUFFER
+ select IIO_KFIFO_BUF
+ help
+ Say yes here to build support for the Analog Devices ADE9000,
+ a highly accurate, multiphase energy and power quality monitoring
+ integrated circuit.
+
+ The device features high-precision analog-to-digital converters
+ and digital signal processing to compute RMS values, power factor,
+ frequency, and harmonic analysis. It supports SPI communication
+ and provides buffered data output through the IIO framework.
+
+ To compile this driver as a module, choose M here: the module will
+ be called ade9000.
+
config ADI_AXI_ADC
tristate "Analog Devices Generic AXI ADC IP core driver"
depends on MICROBLAZE || NIOS2 || ARCH_ZYNQ || ARCH_ZYNQMP || ARCH_INTEL_SOCFPGA || COMPILE_TEST
@@ -766,6 +799,17 @@ config INGENIC_ADC
This driver can also be built as a module. If so, the module will be
called ingenic_adc.
+config INTEL_DC_TI_ADC
+ tristate "Intel Bay Trail / Cherry Trail Dollar Cove TI ADC driver"
+ depends on INTEL_SOC_PMIC_CHTDC_TI
+ help
+ Say yes here to have support for the Dollar Cove TI PMIC ADC device.
+ Depending on platform configuration, this general purpose ADC can be
+ used for sensors such as battery voltage and thermal resistors.
+
+ To compile this driver as a module, choose M here: the module will be
+ called intel_dc_ti_adc.
+
config INTEL_MRFLD_ADC
tristate "Intel Merrifield Basin Cove ADC driver"
depends on INTEL_SOC_PMIC_MRFLD
@@ -1298,9 +1342,19 @@ config RN5T618_ADC
This driver can also be built as a module. If so, the module
will be called rn5t618-adc.
+config ROHM_BD79112
+ tristate "Rohm BD79112 ADC driver"
+ depends on SPI && GPIOLIB
+ select REGMAP_SPI
+ select IIO_ADC_HELPER
+ help
+ Say yes here to build support for the ROHM BD79112 ADC. The
+ ROHM BD79112 is a 12-bit, 32-channel, SAR ADC. Analog inputs
+ can also be used for GPIO.
+
config ROHM_BD79124
tristate "Rohm BD79124 ADC driver"
- depends on I2C
+ depends on I2C && GPIOLIB
select REGMAP_I2C
select IIO_ADC_HELPER
help
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index 1c6ca5fd4b6d..d008f78dc010 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -6,6 +6,7 @@
obj-$(CONFIG_IIO_ADC_HELPER) += industrialio-adc.o
# When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_88PM886_GPADC) += 88pm886-gpadc.o
obj-$(CONFIG_AB8500_GPADC) += ab8500-gpadc.o
obj-$(CONFIG_AD_SIGMA_DELTA) += ad_sigma_delta.o
obj-$(CONFIG_AD4000) += ad4000.o
@@ -46,6 +47,7 @@ obj-$(CONFIG_AD7944) += ad7944.o
obj-$(CONFIG_AD7949) += ad7949.o
obj-$(CONFIG_AD799X) += ad799x.o
obj-$(CONFIG_AD9467) += ad9467.o
+obj-$(CONFIG_ADE9000) += ade9000.o
obj-$(CONFIG_ADI_AXI_ADC) += adi-axi-adc.o
obj-$(CONFIG_ASPEED_ADC) += aspeed_adc.o
obj-$(CONFIG_AT91_ADC) += at91_adc.o
@@ -70,6 +72,7 @@ obj-$(CONFIG_IMX8QXP_ADC) += imx8qxp-adc.o
obj-$(CONFIG_IMX93_ADC) += imx93_adc.o
obj-$(CONFIG_INA2XX_ADC) += ina2xx-adc.o
obj-$(CONFIG_INGENIC_ADC) += ingenic-adc.o
+obj-$(CONFIG_INTEL_DC_TI_ADC) += intel_dc_ti_adc.o
obj-$(CONFIG_INTEL_MRFLD_ADC) += intel_mrfld_adc.o
obj-$(CONFIG_LP8788_ADC) += lp8788_adc.o
obj-$(CONFIG_LPC18XX_ADC) += lpc18xx_adc.o
@@ -116,6 +119,7 @@ obj-$(CONFIG_QCOM_VADC_COMMON) += qcom-vadc-common.o
obj-$(CONFIG_RCAR_GYRO_ADC) += rcar-gyroadc.o
obj-$(CONFIG_RICHTEK_RTQ6056) += rtq6056.o
obj-$(CONFIG_RN5T618_ADC) += rn5t618-adc.o
+obj-$(CONFIG_ROHM_BD79112) += rohm-bd79112.o
obj-$(CONFIG_ROHM_BD79124) += rohm-bd79124.o
obj-$(CONFIG_ROCKCHIP_SARADC) += rockchip_saradc.o
obj-$(CONFIG_RZG2L_ADC) += rzg2l_adc.o
diff --git a/drivers/iio/adc/ab8500-gpadc.c b/drivers/iio/adc/ab8500-gpadc.c
index f3b057f92310..8eaa1dd6a89b 100644
--- a/drivers/iio/adc/ab8500-gpadc.c
+++ b/drivers/iio/adc/ab8500-gpadc.c
@@ -607,7 +607,6 @@ static int ab8500_gpadc_read(struct ab8500_gpadc *gpadc,
}
/* This eventually drops the regulator */
- pm_runtime_mark_last_busy(gpadc->dev);
pm_runtime_put_autosuspend(gpadc->dev);
return (high_data << 8) | low_data;
diff --git a/drivers/iio/adc/ad4130.c b/drivers/iio/adc/ad4130.c
index 6cf790ff3eb5..5567ae5dee88 100644
--- a/drivers/iio/adc/ad4130.c
+++ b/drivers/iio/adc/ad4130.c
@@ -2035,8 +2035,7 @@ static int ad4130_probe(struct spi_device *spi)
ret = devm_add_action_or_reset(dev, ad4130_disable_regulators, st);
if (ret)
- return dev_err_probe(dev, ret,
- "Failed to add regulators disable action\n");
+ return ret;
ret = ad4130_soft_reset(st);
if (ret)
@@ -2064,7 +2063,7 @@ static int ad4130_probe(struct spi_device *spi)
st->gc.can_sleep = true;
st->gc.init_valid_mask = ad4130_gpio_init_valid_mask;
st->gc.get_direction = ad4130_gpio_get_direction;
- st->gc.set_rv = ad4130_gpio_set;
+ st->gc.set = ad4130_gpio_set;
ret = devm_gpiochip_add_data(dev, &st->gc, st);
if (ret)
diff --git a/drivers/iio/adc/ad4170-4.c b/drivers/iio/adc/ad4170-4.c
index 6cd84d6fb08b..efaed92191f1 100644
--- a/drivers/iio/adc/ad4170-4.c
+++ b/drivers/iio/adc/ad4170-4.c
@@ -1807,7 +1807,7 @@ static int ad4170_gpio_init(struct iio_dev *indio_dev)
st->gpiochip.direction_input = ad4170_gpio_direction_input;
st->gpiochip.direction_output = ad4170_gpio_direction_output;
st->gpiochip.get = ad4170_gpio_get;
- st->gpiochip.set_rv = ad4170_gpio_set;
+ st->gpiochip.set = ad4170_gpio_set;
st->gpiochip.owner = THIS_MODULE;
return devm_gpiochip_add_data(&st->spi->dev, &st->gpiochip, indio_dev);
diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c
index 9808df2e9242..910b40393f77 100644
--- a/drivers/iio/adc/ad7124.c
+++ b/drivers/iio/adc/ad7124.c
@@ -3,21 +3,27 @@
* AD7124 SPI ADC driver
*
* Copyright 2018 Analog Devices Inc.
+ * Copyright 2025 BayLibre, SAS
*/
#include <linux/bitfield.h>
#include <linux/bitops.h>
+#include <linux/cleanup.h>
#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/kfifo.h>
+#include <linux/minmax.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
+#include <linux/sprintf.h>
+#include <linux/units.h>
#include <linux/iio/iio.h>
#include <linux/iio/adc/ad_sigma_delta.h>
@@ -44,6 +50,11 @@
#define AD7124_STATUS_POR_FLAG BIT(4)
/* AD7124_ADC_CONTROL */
+#define AD7124_ADC_CONTROL_CLK_SEL GENMASK(1, 0)
+#define AD7124_ADC_CONTROL_CLK_SEL_INT 0
+#define AD7124_ADC_CONTROL_CLK_SEL_INT_OUT 1
+#define AD7124_ADC_CONTROL_CLK_SEL_EXT 2
+#define AD7124_ADC_CONTROL_CLK_SEL_EXT_DIV4 3
#define AD7124_ADC_CONTROL_MODE GENMASK(5, 2)
#define AD7124_ADC_CONTROL_MODE_CONTINUOUS 0
#define AD7124_ADC_CONTROL_MODE_SINGLE 1
@@ -84,14 +95,26 @@
#define AD7124_CONFIG_PGA GENMASK(2, 0)
/* AD7124_FILTER_X */
-#define AD7124_FILTER_FS GENMASK(10, 0)
#define AD7124_FILTER_FILTER GENMASK(23, 21)
#define AD7124_FILTER_FILTER_SINC4 0
#define AD7124_FILTER_FILTER_SINC3 2
+#define AD7124_FILTER_FILTER_SINC4_SINC1 4
+#define AD7124_FILTER_FILTER_SINC3_SINC1 5
+#define AD7124_FILTER_FILTER_SINC3_PF 7
+#define AD7124_FILTER_REJ60 BIT(20)
+#define AD7124_FILTER_POST_FILTER GENMASK(19, 17)
+#define AD7124_FILTER_POST_FILTER_47dB 2
+#define AD7124_FILTER_POST_FILTER_62dB 3
+#define AD7124_FILTER_POST_FILTER_86dB 5
+#define AD7124_FILTER_POST_FILTER_92dB 6
+#define AD7124_FILTER_SINGLE_CYCLE BIT(16)
+#define AD7124_FILTER_FS GENMASK(10, 0)
#define AD7124_MAX_CONFIGS 8
#define AD7124_MAX_CHANNELS 16
+#define AD7124_INT_CLK_HZ 614400
+
/* AD7124 input sources */
enum ad7124_ref_sel {
@@ -120,9 +143,9 @@ static const unsigned int ad7124_reg_size[] = {
};
static const int ad7124_master_clk_freq_hz[3] = {
- [AD7124_LOW_POWER] = 76800,
- [AD7124_MID_POWER] = 153600,
- [AD7124_FULL_POWER] = 614400,
+ [AD7124_LOW_POWER] = AD7124_INT_CLK_HZ / 8,
+ [AD7124_MID_POWER] = AD7124_INT_CLK_HZ / 4,
+ [AD7124_FULL_POWER] = AD7124_INT_CLK_HZ,
};
static const char * const ad7124_ref_names[] = {
@@ -138,9 +161,24 @@ struct ad7124_chip_info {
unsigned int num_inputs;
};
+enum ad7124_filter_type {
+ AD7124_FILTER_TYPE_SINC3,
+ AD7124_FILTER_TYPE_SINC3_PF1,
+ AD7124_FILTER_TYPE_SINC3_PF2,
+ AD7124_FILTER_TYPE_SINC3_PF3,
+ AD7124_FILTER_TYPE_SINC3_PF4,
+ AD7124_FILTER_TYPE_SINC3_REJ60,
+ AD7124_FILTER_TYPE_SINC3_SINC1,
+ AD7124_FILTER_TYPE_SINC4,
+ AD7124_FILTER_TYPE_SINC4_REJ60,
+ AD7124_FILTER_TYPE_SINC4_SINC1,
+};
+
struct ad7124_channel_config {
bool live;
unsigned int cfg_slot;
+ unsigned int requested_odr;
+ unsigned int requested_odr_micro;
/*
* Following fields are used to compare for equality. If you
* make adaptations in it, you most likely also have to adapt
@@ -153,9 +191,8 @@ struct ad7124_channel_config {
bool buf_negative;
unsigned int vref_mv;
unsigned int pga_bits;
- unsigned int odr;
unsigned int odr_sel_bits;
- unsigned int filter_type;
+ enum ad7124_filter_type filter_type;
unsigned int calibration_offset;
unsigned int calibration_gain;
);
@@ -174,7 +211,7 @@ struct ad7124_state {
struct ad_sigma_delta sd;
struct ad7124_channel *channels;
struct regulator *vref[4];
- struct clk *mclk;
+ u32 clk_hz;
unsigned int adc_control;
unsigned int num_channels;
struct mutex cfgs_lock; /* lock for configs access */
@@ -250,44 +287,117 @@ static int ad7124_set_mode(struct ad_sigma_delta *sd,
return ad_sd_write_reg(&st->sd, AD7124_ADC_CONTROL, 2, st->adc_control);
}
-static void ad7124_set_channel_odr(struct ad7124_state *st, unsigned int channel, unsigned int odr)
+static u32 ad7124_get_fclk_hz(struct ad7124_state *st)
+{
+ enum ad7124_power_mode power_mode;
+ u32 fclk_hz;
+
+ power_mode = FIELD_GET(AD7124_ADC_CONTROL_POWER_MODE, st->adc_control);
+ fclk_hz = st->clk_hz;
+
+ switch (power_mode) {
+ case AD7124_LOW_POWER:
+ fclk_hz /= 8;
+ break;
+ case AD7124_MID_POWER:
+ fclk_hz /= 4;
+ break;
+ default:
+ break;
+ }
+
+ return fclk_hz;
+}
+
+static u32 ad7124_get_fs_factor(struct ad7124_state *st, unsigned int channel)
{
- unsigned int fclk, odr_sel_bits;
+ enum ad7124_power_mode power_mode =
+ FIELD_GET(AD7124_ADC_CONTROL_POWER_MODE, st->adc_control);
+ u32 avg = power_mode == AD7124_LOW_POWER ? 8 : 16;
- fclk = clk_get_rate(st->mclk);
/*
- * FS[10:0] = fCLK / (fADC x 32) where:
+ * These are the "zero-latency" factors from the data sheet. For the
+ * sinc1 filters, these aren't documented, but derived by taking the
+ * single-channel formula from the sinc1 section of the data sheet and
+ * multiplying that by the sinc3/4 factor from the corresponding zero-
+ * latency sections.
+ */
+ switch (st->channels[channel].cfg.filter_type) {
+ case AD7124_FILTER_TYPE_SINC4:
+ case AD7124_FILTER_TYPE_SINC4_REJ60:
+ return 4 * 32;
+ case AD7124_FILTER_TYPE_SINC4_SINC1:
+ return 4 * avg * 32;
+ case AD7124_FILTER_TYPE_SINC3_SINC1:
+ return 3 * avg * 32;
+ default:
+ return 3 * 32;
+ }
+}
+
+static u32 ad7124_get_fadc_divisor(struct ad7124_state *st, unsigned int channel)
+{
+ u32 factor = ad7124_get_fs_factor(st, channel);
+
+ /*
+ * The output data rate (f_ADC) is f_CLK / divisor. We are returning
+ * the divisor.
+ */
+ return st->channels[channel].cfg.odr_sel_bits * factor;
+}
+
+static void ad7124_set_channel_odr(struct ad7124_state *st, unsigned int channel)
+{
+ struct ad7124_channel_config *cfg = &st->channels[channel].cfg;
+ unsigned int fclk, factor, divisor, odr_sel_bits;
+
+ fclk = ad7124_get_fclk_hz(st);
+ factor = ad7124_get_fs_factor(st, channel);
+
+ /*
+ * FS[10:0] = fCLK / (fADC x 32 * N) where:
* fADC is the output data rate
* fCLK is the master clock frequency
+ * N is number of conversions per sample (depends on filter type)
* FS[10:0] are the bits in the filter register
* FS[10:0] can have a value from 1 to 2047
*/
- odr_sel_bits = DIV_ROUND_CLOSEST(fclk, odr * 32);
- if (odr_sel_bits < 1)
- odr_sel_bits = 1;
- else if (odr_sel_bits > 2047)
- odr_sel_bits = 2047;
+ divisor = cfg->requested_odr * factor +
+ cfg->requested_odr_micro * factor / MICRO;
+ odr_sel_bits = clamp(DIV_ROUND_CLOSEST(fclk, divisor), 1, 2047);
if (odr_sel_bits != st->channels[channel].cfg.odr_sel_bits)
st->channels[channel].cfg.live = false;
- /* fADC = fCLK / (FS[10:0] x 32) */
- st->channels[channel].cfg.odr = DIV_ROUND_CLOSEST(fclk, odr_sel_bits * 32);
st->channels[channel].cfg.odr_sel_bits = odr_sel_bits;
}
-static int ad7124_get_3db_filter_freq(struct ad7124_state *st,
- unsigned int channel)
+static int ad7124_get_3db_filter_factor(struct ad7124_state *st,
+ unsigned int channel)
{
- unsigned int fadc;
+ struct ad7124_channel_config *cfg = &st->channels[channel].cfg;
- fadc = st->channels[channel].cfg.odr;
-
- switch (st->channels[channel].cfg.filter_type) {
- case AD7124_FILTER_FILTER_SINC3:
- return DIV_ROUND_CLOSEST(fadc * 272, 1000);
- case AD7124_FILTER_FILTER_SINC4:
- return DIV_ROUND_CLOSEST(fadc * 230, 1000);
+ /*
+ * 3dB point is the f_CLK rate times some factor. This functions returns
+ * the factor times 1000.
+ */
+ switch (cfg->filter_type) {
+ case AD7124_FILTER_TYPE_SINC3:
+ case AD7124_FILTER_TYPE_SINC3_REJ60:
+ case AD7124_FILTER_TYPE_SINC3_SINC1:
+ return 272;
+ case AD7124_FILTER_TYPE_SINC4:
+ case AD7124_FILTER_TYPE_SINC4_REJ60:
+ case AD7124_FILTER_TYPE_SINC4_SINC1:
+ return 230;
+ case AD7124_FILTER_TYPE_SINC3_PF1:
+ return 633;
+ case AD7124_FILTER_TYPE_SINC3_PF2:
+ return 605;
+ case AD7124_FILTER_TYPE_SINC3_PF3:
+ return 669;
+ case AD7124_FILTER_TYPE_SINC3_PF4:
+ return 759;
default:
return -EINVAL;
}
@@ -311,9 +421,8 @@ static struct ad7124_channel_config *ad7124_find_similar_live_cfg(struct ad7124_
bool buf_negative;
unsigned int vref_mv;
unsigned int pga_bits;
- unsigned int odr;
unsigned int odr_sel_bits;
- unsigned int filter_type;
+ enum ad7124_filter_type filter_type;
unsigned int calibration_offset;
unsigned int calibration_gain;
}));
@@ -328,7 +437,6 @@ static struct ad7124_channel_config *ad7124_find_similar_live_cfg(struct ad7124_
cfg->buf_negative == cfg_aux->buf_negative &&
cfg->vref_mv == cfg_aux->vref_mv &&
cfg->pga_bits == cfg_aux->pga_bits &&
- cfg->odr == cfg_aux->odr &&
cfg->odr_sel_bits == cfg_aux->odr_sel_bits &&
cfg->filter_type == cfg_aux->filter_type &&
cfg->calibration_offset == cfg_aux->calibration_offset &&
@@ -381,8 +489,9 @@ static int ad7124_init_config_vref(struct ad7124_state *st, struct ad7124_channe
static int ad7124_write_config(struct ad7124_state *st, struct ad7124_channel_config *cfg,
unsigned int cfg_slot)
{
- unsigned int tmp;
- unsigned int val;
+ unsigned int val, filter;
+ unsigned int rej60 = 0;
+ unsigned int post = 0;
int ret;
cfg->cfg_slot = cfg_slot;
@@ -405,11 +514,60 @@ static int ad7124_write_config(struct ad7124_state *st, struct ad7124_channel_co
if (ret < 0)
return ret;
- tmp = FIELD_PREP(AD7124_FILTER_FILTER, cfg->filter_type) |
- FIELD_PREP(AD7124_FILTER_FS, cfg->odr_sel_bits);
- return ad7124_spi_write_mask(st, AD7124_FILTER(cfg->cfg_slot),
- AD7124_FILTER_FILTER | AD7124_FILTER_FS,
- tmp, 3);
+ switch (cfg->filter_type) {
+ case AD7124_FILTER_TYPE_SINC3:
+ filter = AD7124_FILTER_FILTER_SINC3;
+ break;
+ case AD7124_FILTER_TYPE_SINC3_PF1:
+ filter = AD7124_FILTER_FILTER_SINC3_PF;
+ post = AD7124_FILTER_POST_FILTER_47dB;
+ break;
+ case AD7124_FILTER_TYPE_SINC3_PF2:
+ filter = AD7124_FILTER_FILTER_SINC3_PF;
+ post = AD7124_FILTER_POST_FILTER_62dB;
+ break;
+ case AD7124_FILTER_TYPE_SINC3_PF3:
+ filter = AD7124_FILTER_FILTER_SINC3_PF;
+ post = AD7124_FILTER_POST_FILTER_86dB;
+ break;
+ case AD7124_FILTER_TYPE_SINC3_PF4:
+ filter = AD7124_FILTER_FILTER_SINC3_PF;
+ post = AD7124_FILTER_POST_FILTER_92dB;
+ break;
+ case AD7124_FILTER_TYPE_SINC3_REJ60:
+ filter = AD7124_FILTER_FILTER_SINC3;
+ rej60 = 1;
+ break;
+ case AD7124_FILTER_TYPE_SINC3_SINC1:
+ filter = AD7124_FILTER_FILTER_SINC3_SINC1;
+ break;
+ case AD7124_FILTER_TYPE_SINC4:
+ filter = AD7124_FILTER_FILTER_SINC4;
+ break;
+ case AD7124_FILTER_TYPE_SINC4_REJ60:
+ filter = AD7124_FILTER_FILTER_SINC4;
+ rej60 = 1;
+ break;
+ case AD7124_FILTER_TYPE_SINC4_SINC1:
+ filter = AD7124_FILTER_FILTER_SINC4_SINC1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * NB: AD7124_FILTER_SINGLE_CYCLE is always set so that we get the same
+ * sampling frequency even when only one channel is enabled in a
+ * buffered read. If it was not set, the N in ad7124_set_channel_odr()
+ * would be 1 and we would get a faster sampling frequency than what
+ * was requested.
+ */
+ return ad_sd_write_reg(&st->sd, AD7124_FILTER(cfg->cfg_slot), 3,
+ FIELD_PREP(AD7124_FILTER_FILTER, filter) |
+ FIELD_PREP(AD7124_FILTER_REJ60, rej60) |
+ FIELD_PREP(AD7124_FILTER_POST_FILTER, post) |
+ AD7124_FILTER_SINGLE_CYCLE |
+ FIELD_PREP(AD7124_FILTER_FS, cfg->odr_sel_bits));
}
static struct ad7124_channel_config *ad7124_pop_config(struct ad7124_state *st)
@@ -576,6 +734,33 @@ static const struct ad_sigma_delta_info ad7124_sigma_delta_info = {
.num_resetclks = 64,
};
+static const int ad7124_voltage_scales[][2] = {
+ { 0, 1164 },
+ { 0, 2328 },
+ { 0, 4656 },
+ { 0, 9313 },
+ { 0, 18626 },
+ { 0, 37252 },
+ { 0, 74505 },
+ { 0, 149011 },
+ { 0, 298023 },
+};
+
+static int ad7124_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length, long info)
+{
+ switch (info) {
+ case IIO_CHAN_INFO_SCALE:
+ *vals = (const int *)ad7124_voltage_scales;
+ *type = IIO_VAL_INT_PLUS_NANO;
+ *length = ARRAY_SIZE(ad7124_voltage_scales) * 2;
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+}
+
static int ad7124_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2, long info)
@@ -644,18 +829,59 @@ static int ad7124_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
- case IIO_CHAN_INFO_SAMP_FREQ:
- mutex_lock(&st->cfgs_lock);
- *val = st->channels[chan->address].cfg.odr;
- mutex_unlock(&st->cfgs_lock);
+ case IIO_CHAN_INFO_SAMP_FREQ: {
+ struct ad7124_channel_config *cfg = &st->channels[chan->address].cfg;
- return IIO_VAL_INT;
- case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
- mutex_lock(&st->cfgs_lock);
- *val = ad7124_get_3db_filter_freq(st, chan->scan_index);
- mutex_unlock(&st->cfgs_lock);
+ guard(mutex)(&st->cfgs_lock);
- return IIO_VAL_INT;
+ switch (cfg->filter_type) {
+ case AD7124_FILTER_TYPE_SINC3:
+ case AD7124_FILTER_TYPE_SINC3_REJ60:
+ case AD7124_FILTER_TYPE_SINC3_SINC1:
+ case AD7124_FILTER_TYPE_SINC4:
+ case AD7124_FILTER_TYPE_SINC4_REJ60:
+ case AD7124_FILTER_TYPE_SINC4_SINC1:
+ *val = ad7124_get_fclk_hz(st);
+ *val2 = ad7124_get_fadc_divisor(st, chan->address);
+ return IIO_VAL_FRACTIONAL;
+ /*
+ * Post filters force the chip to a fixed rate. These are the
+ * single-channel rates from the data sheet divided by 3 for
+ * the multi-channel case (data sheet doesn't explicitly state
+ * this but confirmed through testing).
+ */
+ case AD7124_FILTER_TYPE_SINC3_PF1:
+ *val = 300;
+ *val2 = 33;
+ return IIO_VAL_FRACTIONAL;
+ case AD7124_FILTER_TYPE_SINC3_PF2:
+ *val = 25;
+ *val2 = 3;
+ return IIO_VAL_FRACTIONAL;
+ case AD7124_FILTER_TYPE_SINC3_PF3:
+ *val = 20;
+ *val2 = 3;
+ return IIO_VAL_FRACTIONAL;
+ case AD7124_FILTER_TYPE_SINC3_PF4:
+ *val = 50;
+ *val2 = 9;
+ return IIO_VAL_FRACTIONAL;
+ default:
+ return -EINVAL;
+ }
+ }
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY: {
+ guard(mutex)(&st->cfgs_lock);
+
+ ret = ad7124_get_3db_filter_factor(st, chan->address);
+ if (ret < 0)
+ return ret;
+
+ /* 3dB point is the f_CLK rate times a fractional value */
+ *val = ret * ad7124_get_fclk_hz(st);
+ *val2 = MILLI * ad7124_get_fadc_divisor(st, chan->address);
+ return IIO_VAL_FRACTIONAL;
+ }
default:
return -EINVAL;
}
@@ -666,25 +892,24 @@ static int ad7124_write_raw(struct iio_dev *indio_dev,
int val, int val2, long info)
{
struct ad7124_state *st = iio_priv(indio_dev);
+ struct ad7124_channel_config *cfg = &st->channels[chan->address].cfg;
unsigned int res, gain, full_scale, vref;
- int ret = 0;
- mutex_lock(&st->cfgs_lock);
+ guard(mutex)(&st->cfgs_lock);
switch (info) {
case IIO_CHAN_INFO_SAMP_FREQ:
- if (val2 != 0 || val == 0) {
- ret = -EINVAL;
- break;
- }
+ if (val2 < 0 || val < 0 || (val2 == 0 && val == 0))
+ return -EINVAL;
- ad7124_set_channel_odr(st, chan->address, val);
- break;
+ cfg->requested_odr = val;
+ cfg->requested_odr_micro = val2;
+ ad7124_set_channel_odr(st, chan->address);
+
+ return 0;
case IIO_CHAN_INFO_SCALE:
- if (val != 0) {
- ret = -EINVAL;
- break;
- }
+ if (val != 0)
+ return -EINVAL;
if (st->channels[chan->address].cfg.bipolar)
full_scale = 1 << (chan->scan_type.realbits - 1);
@@ -700,13 +925,10 @@ static int ad7124_write_raw(struct iio_dev *indio_dev,
st->channels[chan->address].cfg.live = false;
st->channels[chan->address].cfg.pga_bits = res;
- break;
+ return 0;
default:
- ret = -EINVAL;
+ return -EINVAL;
}
-
- mutex_unlock(&st->cfgs_lock);
- return ret;
}
static int ad7124_reg_access(struct iio_dev *indio_dev,
@@ -730,18 +952,6 @@ static int ad7124_reg_access(struct iio_dev *indio_dev,
return ret;
}
-static IIO_CONST_ATTR(in_voltage_scale_available,
- "0.000001164 0.000002328 0.000004656 0.000009313 0.000018626 0.000037252 0.000074505 0.000149011 0.000298023");
-
-static struct attribute *ad7124_attributes[] = {
- &iio_const_attr_in_voltage_scale_available.dev_attr.attr,
- NULL,
-};
-
-static const struct attribute_group ad7124_attrs_group = {
- .attrs = ad7124_attributes,
-};
-
static int ad7124_update_scan_mode(struct iio_dev *indio_dev,
const unsigned long *scan_mask)
{
@@ -750,7 +960,8 @@ static int ad7124_update_scan_mode(struct iio_dev *indio_dev,
int ret;
int i;
- mutex_lock(&st->cfgs_lock);
+ guard(mutex)(&st->cfgs_lock);
+
for (i = 0; i < st->num_channels; i++) {
bit_set = test_bit(i, scan_mask);
if (bit_set)
@@ -758,25 +969,20 @@ static int ad7124_update_scan_mode(struct iio_dev *indio_dev,
else
ret = ad7124_spi_write_mask(st, AD7124_CHANNEL(i), AD7124_CHANNEL_ENABLE,
0, 2);
- if (ret < 0) {
- mutex_unlock(&st->cfgs_lock);
-
+ if (ret < 0)
return ret;
- }
}
- mutex_unlock(&st->cfgs_lock);
-
return 0;
}
static const struct iio_info ad7124_info = {
+ .read_avail = ad7124_read_avail,
.read_raw = ad7124_read_raw,
.write_raw = ad7124_write_raw,
.debugfs_reg_access = &ad7124_reg_access,
.validate_trigger = ad_sd_validate_trigger,
.update_scan_mode = ad7124_update_scan_mode,
- .attrs = &ad7124_attrs_group,
};
/* Only called during probe, so dev_err_probe() can be used */
@@ -849,7 +1055,7 @@ enum {
static int ad7124_syscalib_locked(struct ad7124_state *st, const struct iio_chan_spec *chan)
{
struct device *dev = &st->sd.spi->dev;
- struct ad7124_channel *ch = &st->channels[chan->channel];
+ struct ad7124_channel *ch = &st->channels[chan->address];
int ret;
if (ch->syscalib_mode == AD7124_SYSCALIB_ZERO_SCALE) {
@@ -865,8 +1071,8 @@ static int ad7124_syscalib_locked(struct ad7124_state *st, const struct iio_chan
if (ret < 0)
return ret;
- dev_dbg(dev, "offset for channel %d after zero-scale calibration: 0x%x\n",
- chan->channel, ch->cfg.calibration_offset);
+ dev_dbg(dev, "offset for channel %lu after zero-scale calibration: 0x%x\n",
+ chan->address, ch->cfg.calibration_offset);
} else {
ch->cfg.calibration_gain = st->gain_default;
@@ -880,8 +1086,8 @@ static int ad7124_syscalib_locked(struct ad7124_state *st, const struct iio_chan
if (ret < 0)
return ret;
- dev_dbg(dev, "gain for channel %d after full-scale calibration: 0x%x\n",
- chan->channel, ch->cfg.calibration_gain);
+ dev_dbg(dev, "gain for channel %lu after full-scale calibration: 0x%x\n",
+ chan->address, ch->cfg.calibration_gain);
}
return 0;
@@ -924,7 +1130,7 @@ static int ad7124_set_syscalib_mode(struct iio_dev *indio_dev,
{
struct ad7124_state *st = iio_priv(indio_dev);
- st->channels[chan->channel].syscalib_mode = mode;
+ st->channels[chan->address].syscalib_mode = mode;
return 0;
}
@@ -934,7 +1140,7 @@ static int ad7124_get_syscalib_mode(struct iio_dev *indio_dev,
{
struct ad7124_state *st = iio_priv(indio_dev);
- return st->channels[chan->channel].syscalib_mode;
+ return st->channels[chan->address].syscalib_mode;
}
static const struct iio_enum ad7124_syscalib_mode_enum = {
@@ -944,6 +1150,52 @@ static const struct iio_enum ad7124_syscalib_mode_enum = {
.get = ad7124_get_syscalib_mode
};
+static const char * const ad7124_filter_types[] = {
+ [AD7124_FILTER_TYPE_SINC3] = "sinc3",
+ [AD7124_FILTER_TYPE_SINC3_PF1] = "sinc3+pf1",
+ [AD7124_FILTER_TYPE_SINC3_PF2] = "sinc3+pf2",
+ [AD7124_FILTER_TYPE_SINC3_PF3] = "sinc3+pf3",
+ [AD7124_FILTER_TYPE_SINC3_PF4] = "sinc3+pf4",
+ [AD7124_FILTER_TYPE_SINC3_REJ60] = "sinc3+rej60",
+ [AD7124_FILTER_TYPE_SINC3_SINC1] = "sinc3+sinc1",
+ [AD7124_FILTER_TYPE_SINC4] = "sinc4",
+ [AD7124_FILTER_TYPE_SINC4_REJ60] = "sinc4+rej60",
+ [AD7124_FILTER_TYPE_SINC4_SINC1] = "sinc4+sinc1",
+};
+
+static int ad7124_set_filter_type_attr(struct iio_dev *dev,
+ const struct iio_chan_spec *chan,
+ unsigned int value)
+{
+ struct ad7124_state *st = iio_priv(dev);
+ struct ad7124_channel_config *cfg = &st->channels[chan->address].cfg;
+
+ guard(mutex)(&st->cfgs_lock);
+
+ cfg->live = false;
+ cfg->filter_type = value;
+ ad7124_set_channel_odr(st, chan->address);
+
+ return 0;
+}
+
+static int ad7124_get_filter_type_attr(struct iio_dev *dev,
+ const struct iio_chan_spec *chan)
+{
+ struct ad7124_state *st = iio_priv(dev);
+
+ guard(mutex)(&st->cfgs_lock);
+
+ return st->channels[chan->address].cfg.filter_type;
+}
+
+static const struct iio_enum ad7124_filter_type_enum = {
+ .items = ad7124_filter_types,
+ .num_items = ARRAY_SIZE(ad7124_filter_types),
+ .set = ad7124_set_filter_type_attr,
+ .get = ad7124_get_filter_type_attr,
+};
+
static const struct iio_chan_spec_ext_info ad7124_calibsys_ext_info[] = {
{
.name = "sys_calibration",
@@ -954,6 +1206,9 @@ static const struct iio_chan_spec_ext_info ad7124_calibsys_ext_info[] = {
&ad7124_syscalib_mode_enum),
IIO_ENUM_AVAILABLE("sys_calibration_mode", IIO_SHARED_BY_TYPE,
&ad7124_syscalib_mode_enum),
+ IIO_ENUM("filter_type", IIO_SEPARATE, &ad7124_filter_type_enum),
+ IIO_ENUM_AVAILABLE("filter_type", IIO_SHARED_BY_TYPE,
+ &ad7124_filter_type_enum),
{ }
};
@@ -966,6 +1221,7 @@ static const struct iio_chan_spec ad7124_channel_template = {
BIT(IIO_CHAN_INFO_OFFSET) |
BIT(IIO_CHAN_INFO_SAMP_FREQ) |
BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY),
+ .info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SCALE),
.scan_type = {
.sign = 'u',
.realbits = 24,
@@ -1111,24 +1367,122 @@ static int ad7124_parse_channel_config(struct iio_dev *indio_dev,
static int ad7124_setup(struct ad7124_state *st)
{
struct device *dev = &st->sd.spi->dev;
- unsigned int fclk, power_mode;
+ unsigned int power_mode, clk_sel;
+ struct clk *mclk;
int i, ret;
- fclk = clk_get_rate(st->mclk);
- if (!fclk)
- return dev_err_probe(dev, -EINVAL, "Failed to get mclk rate\n");
+ /*
+ * Always use full power mode for max performance. If needed, the driver
+ * could be adapted to use a dynamic power mode based on the requested
+ * output data rate.
+ */
+ power_mode = AD7124_ADC_CONTROL_POWER_MODE_FULL;
+
+ /*
+ * This "mclk" business is needed for backwards compatibility with old
+ * devicetrees that specified a fake clock named "mclk" to select the
+ * power mode.
+ */
+ mclk = devm_clk_get_optional_enabled(dev, "mclk");
+ if (IS_ERR(mclk))
+ return dev_err_probe(dev, PTR_ERR(mclk), "Failed to get mclk\n");
+
+ if (mclk) {
+ unsigned long mclk_hz;
- /* The power mode changes the master clock frequency */
- power_mode = ad7124_find_closest_match(ad7124_master_clk_freq_hz,
- ARRAY_SIZE(ad7124_master_clk_freq_hz),
- fclk);
- if (fclk != ad7124_master_clk_freq_hz[power_mode]) {
- ret = clk_set_rate(st->mclk, fclk);
+ mclk_hz = clk_get_rate(mclk);
+ if (!mclk_hz)
+ return dev_err_probe(dev, -EINVAL,
+ "Failed to get mclk rate\n");
+
+ /*
+ * This logic is a bit backwards, which is why it is only here
+ * for backwards compatibility. The driver should be able to set
+ * the power mode as it sees fit and the f_clk/mclk rate should
+ * be dynamic accordingly. But here, we are selecting a fixed
+ * power mode based on the given "mclk" rate.
+ */
+ power_mode = ad7124_find_closest_match(ad7124_master_clk_freq_hz,
+ ARRAY_SIZE(ad7124_master_clk_freq_hz), mclk_hz);
+
+ if (mclk_hz != ad7124_master_clk_freq_hz[power_mode]) {
+ ret = clk_set_rate(mclk, mclk_hz);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to set mclk rate\n");
+ }
+
+ clk_sel = AD7124_ADC_CONTROL_CLK_SEL_INT;
+ st->clk_hz = AD7124_INT_CLK_HZ;
+ } else if (!device_property_present(dev, "clocks") &&
+ device_property_present(dev, "#clock-cells")) {
+#ifdef CONFIG_COMMON_CLK
+ struct clk_hw *clk_hw;
+
+ const char *name __free(kfree) = kasprintf(GFP_KERNEL, "%pfwP-clk",
+ dev_fwnode(dev));
+ if (!name)
+ return -ENOMEM;
+
+ clk_hw = devm_clk_hw_register_fixed_rate(dev, name, NULL, 0,
+ AD7124_INT_CLK_HZ);
+ if (IS_ERR(clk_hw))
+ return dev_err_probe(dev, PTR_ERR(clk_hw),
+ "Failed to register clock provider\n");
+
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get,
+ clk_hw);
if (ret)
- return dev_err_probe(dev, ret, "Failed to set mclk rate\n");
+ return dev_err_probe(dev, ret,
+ "Failed to add clock provider\n");
+#endif
+
+ /*
+ * Treat the clock as always on. This way we don't have to deal
+ * with someone trying to enable/disable the clock while we are
+ * reading samples.
+ */
+ clk_sel = AD7124_ADC_CONTROL_CLK_SEL_INT_OUT;
+ st->clk_hz = AD7124_INT_CLK_HZ;
+ } else {
+ struct clk *clk;
+
+ clk = devm_clk_get_optional_enabled(dev, NULL);
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk),
+ "Failed to get external clock\n");
+
+ if (clk) {
+ unsigned long clk_hz;
+
+ clk_hz = clk_get_rate(clk);
+ if (!clk_hz)
+ return dev_err_probe(dev, -EINVAL,
+ "Failed to get external clock rate\n");
+
+ /*
+ * The external clock may be 4x the nominal clock rate,
+ * in which case the ADC needs to be configured to
+ * divide it by 4. Using MEGA is a bit arbitrary, but
+ * the expected clock rates are either 614.4 kHz or
+ * 2.4576 MHz, so this should work.
+ */
+ if (clk_hz > (1 * HZ_PER_MHZ)) {
+ clk_sel = AD7124_ADC_CONTROL_CLK_SEL_EXT_DIV4;
+ st->clk_hz = clk_hz / 4;
+ } else {
+ clk_sel = AD7124_ADC_CONTROL_CLK_SEL_EXT;
+ st->clk_hz = clk_hz;
+ }
+ } else {
+ clk_sel = AD7124_ADC_CONTROL_CLK_SEL_INT;
+ st->clk_hz = AD7124_INT_CLK_HZ;
+ }
}
- /* Set the power mode */
+ st->adc_control &= ~AD7124_ADC_CONTROL_CLK_SEL;
+ st->adc_control |= FIELD_PREP(AD7124_ADC_CONTROL_CLK_SEL, clk_sel);
+
st->adc_control &= ~AD7124_ADC_CONTROL_POWER_MODE;
st->adc_control |= FIELD_PREP(AD7124_ADC_CONTROL_POWER_MODE, power_mode);
@@ -1138,17 +1492,22 @@ static int ad7124_setup(struct ad7124_state *st)
mutex_init(&st->cfgs_lock);
INIT_KFIFO(st->live_cfgs_fifo);
for (i = 0; i < st->num_channels; i++) {
+ struct ad7124_channel_config *cfg = &st->channels[i].cfg;
- ret = ad7124_init_config_vref(st, &st->channels[i].cfg);
+ ret = ad7124_init_config_vref(st, cfg);
if (ret < 0)
return ret;
+ /* Default filter type on the ADC after reset. */
+ cfg->filter_type = AD7124_FILTER_TYPE_SINC4;
+
/*
* 9.38 SPS is the minimum output data rate supported
* regardless of the selected power mode. Round it up to 10 and
* set all channels to this default value.
*/
- ad7124_set_channel_odr(st, i, 10);
+ cfg->requested_odr = 10;
+ ad7124_set_channel_odr(st, i);
}
ad7124_disable_all(&st->sd);
@@ -1300,13 +1659,9 @@ static int ad7124_probe(struct spi_device *spi)
ret = devm_add_action_or_reset(&spi->dev, ad7124_reg_disable,
st->vref[i]);
if (ret)
- return dev_err_probe(dev, ret, "Failed to register disable handler for regulator #%d\n", i);
+ return ret;
}
- st->mclk = devm_clk_get_enabled(&spi->dev, "mclk");
- if (IS_ERR(st->mclk))
- return dev_err_probe(dev, PTR_ERR(st->mclk), "Failed to get mclk\n");
-
ret = ad7124_soft_reset(st);
if (ret < 0)
return ret;
diff --git a/drivers/iio/adc/ad7173.c b/drivers/iio/adc/ad7173.c
index 4413207be28f..d36612352b44 100644
--- a/drivers/iio/adc/ad7173.c
+++ b/drivers/iio/adc/ad7173.c
@@ -8,6 +8,7 @@
* AD7175-8/AD7176-2/AD7177-2
*
* Copyright (C) 2015, 2024 Analog Devices, Inc.
+ * Copyright (C) 2025 BayLibre, SAS
*/
#include <linux/array_size.h>
@@ -149,7 +150,12 @@
(pin2) < st->info->num_voltage_in && \
(pin2) >= st->info->num_voltage_in_div)
-#define AD7173_FILTER_ODR0_MASK GENMASK(5, 0)
+#define AD7173_FILTER_SINC3_MAP BIT(15)
+#define AD7173_FILTER_SINC3_MAP_DIV GENMASK(14, 0)
+#define AD7173_FILTER_ENHFILTEN BIT(11)
+#define AD7173_FILTER_ENHFILT_MASK GENMASK(10, 8)
+#define AD7173_FILTER_ORDER BIT(6)
+#define AD7173_FILTER_ODR_MASK GENMASK(5, 0)
#define AD7173_MAX_CONFIGS 8
#define AD4111_OW_DET_THRSH_MV 300
@@ -190,6 +196,15 @@ struct ad7173_device_info {
u8 num_gpios;
};
+enum ad7173_filter_type {
+ AD7173_FILTER_SINC3,
+ AD7173_FILTER_SINC5_SINC1,
+ AD7173_FILTER_SINC5_SINC1_PF1,
+ AD7173_FILTER_SINC5_SINC1_PF2,
+ AD7173_FILTER_SINC5_SINC1_PF3,
+ AD7173_FILTER_SINC5_SINC1_PF4,
+};
+
struct ad7173_channel_config {
/* Openwire detection threshold */
unsigned int openwire_thrsh_raw;
@@ -200,13 +215,15 @@ struct ad7173_channel_config {
/*
* Following fields are used to compare equality. If you
* make adaptations in it, you most likely also have to adapt
- * ad7173_find_live_config(), too.
+ * ad7173_is_setup_equal(), too.
*/
struct_group(config_props,
bool bipolar;
bool input_buf;
- u8 odr;
+ u16 sinc3_odr_div;
+ u8 sinc5_odr_index;
u8 ref_sel;
+ enum ad7173_filter_type filter_type;
);
};
@@ -266,6 +283,24 @@ static const unsigned int ad7175_sinc5_data_rates[] = {
5000, /* 20 */
};
+/**
+ * ad7173_sinc3_odr_div_from_odr() - Convert ODR to divider value
+ * @odr_millihz: ODR (sampling_frequency) in milliHz
+ * Returns: Divider value for SINC3 filter to pass.
+ */
+static u16 ad7173_sinc3_odr_div_from_odr(u32 odr_millihz)
+{
+ /*
+ * Divider is f_MOD (1 MHz) / 32 / ODR. ODR freq is in milliHz, so
+ * we need to convert f_MOD to the same units. When SING_CYC=1 or
+ * multiple channels are enabled (currently always the case), there
+ * is an additional factor of 3.
+ */
+ u32 div = DIV_ROUND_CLOSEST(MEGA * MILLI, odr_millihz * 32 * 3);
+ /* Avoid divide by 0 and limit to register field size. */
+ return clamp(div, 1U, AD7173_FILTER_SINC3_MAP_DIV);
+}
+
static unsigned int ad4111_current_channel_config[] = {
/* Ain sel: pos neg */
0x1E8, /* 15:IIN0+ 8:IIN0− */
@@ -369,7 +404,48 @@ static const struct iio_enum ad7173_syscalib_mode_enum = {
.get = ad7173_get_syscalib_mode
};
-static const struct iio_chan_spec_ext_info ad7173_calibsys_ext_info[] = {
+static const char * const ad7173_filter_types_str[] = {
+ [AD7173_FILTER_SINC3] = "sinc3",
+ [AD7173_FILTER_SINC5_SINC1] = "sinc5+sinc1",
+ [AD7173_FILTER_SINC5_SINC1_PF1] = "sinc5+sinc1+pf1",
+ [AD7173_FILTER_SINC5_SINC1_PF2] = "sinc5+sinc1+pf2",
+ [AD7173_FILTER_SINC5_SINC1_PF3] = "sinc5+sinc1+pf3",
+ [AD7173_FILTER_SINC5_SINC1_PF4] = "sinc5+sinc1+pf4",
+};
+
+static int ad7173_set_filter_type(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ unsigned int val)
+{
+ struct ad7173_state *st = iio_priv(indio_dev);
+
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ st->channels[chan->address].cfg.filter_type = val;
+ st->channels[chan->address].cfg.live = false;
+
+ iio_device_release_direct(indio_dev);
+
+ return 0;
+}
+
+static int ad7173_get_filter_type(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct ad7173_state *st = iio_priv(indio_dev);
+
+ return st->channels[chan->address].cfg.filter_type;
+}
+
+static const struct iio_enum ad7173_filter_type_enum = {
+ .items = ad7173_filter_types_str,
+ .num_items = ARRAY_SIZE(ad7173_filter_types_str),
+ .set = ad7173_set_filter_type,
+ .get = ad7173_get_filter_type,
+};
+
+static const struct iio_chan_spec_ext_info ad7173_chan_spec_ext_info[] = {
{
.name = "sys_calibration",
.write = ad7173_write_syscalib,
@@ -379,6 +455,16 @@ static const struct iio_chan_spec_ext_info ad7173_calibsys_ext_info[] = {
&ad7173_syscalib_mode_enum),
IIO_ENUM_AVAILABLE("sys_calibration_mode", IIO_SHARED_BY_TYPE,
&ad7173_syscalib_mode_enum),
+ IIO_ENUM("filter_type", IIO_SEPARATE, &ad7173_filter_type_enum),
+ IIO_ENUM_AVAILABLE("filter_type", IIO_SHARED_BY_TYPE,
+ &ad7173_filter_type_enum),
+ { }
+};
+
+static const struct iio_chan_spec_ext_info ad7173_temp_chan_spec_ext_info[] = {
+ IIO_ENUM("filter_type", IIO_SEPARATE, &ad7173_filter_type_enum),
+ IIO_ENUM_AVAILABLE("filter_type", IIO_SHARED_BY_TYPE,
+ &ad7173_filter_type_enum),
{ }
};
@@ -561,12 +647,19 @@ static void ad7173_reset_usage_cnts(struct ad7173_state *st)
st->config_usage_counter = 0;
}
-static struct ad7173_channel_config *
-ad7173_find_live_config(struct ad7173_state *st, struct ad7173_channel_config *cfg)
+/**
+ * ad7173_is_setup_equal - Compare two channel setups
+ * @cfg1: First channel configuration
+ * @cfg2: Second channel configuration
+ *
+ * Compares all configuration options that affect the registers connected to
+ * SETUP_SEL, namely CONFIGx, FILTERx, GAINx and OFFSETx.
+ *
+ * Returns: true if the setups are identical, false otherwise
+ */
+static bool ad7173_is_setup_equal(const struct ad7173_channel_config *cfg1,
+ const struct ad7173_channel_config *cfg2)
{
- struct ad7173_channel_config *cfg_aux;
- int i;
-
/*
* This is just to make sure that the comparison is adapted after
* struct ad7173_channel_config was changed.
@@ -575,18 +668,30 @@ ad7173_find_live_config(struct ad7173_state *st, struct ad7173_channel_config *c
sizeof(struct {
bool bipolar;
bool input_buf;
- u8 odr;
+ u16 sinc3_odr_div;
+ u8 sinc5_odr_index;
u8 ref_sel;
+ enum ad7173_filter_type filter_type;
}));
+ return cfg1->bipolar == cfg2->bipolar &&
+ cfg1->input_buf == cfg2->input_buf &&
+ cfg1->sinc3_odr_div == cfg2->sinc3_odr_div &&
+ cfg1->sinc5_odr_index == cfg2->sinc5_odr_index &&
+ cfg1->ref_sel == cfg2->ref_sel &&
+ cfg1->filter_type == cfg2->filter_type;
+}
+
+static struct ad7173_channel_config *
+ad7173_find_live_config(struct ad7173_state *st, struct ad7173_channel_config *cfg)
+{
+ struct ad7173_channel_config *cfg_aux;
+ int i;
+
for (i = 0; i < st->num_channels; i++) {
cfg_aux = &st->channels[i].cfg;
- if (cfg_aux->live &&
- cfg->bipolar == cfg_aux->bipolar &&
- cfg->input_buf == cfg_aux->input_buf &&
- cfg->odr == cfg_aux->odr &&
- cfg->ref_sel == cfg_aux->ref_sel)
+ if (cfg_aux->live && ad7173_is_setup_equal(cfg, cfg_aux))
return cfg_aux;
}
return NULL;
@@ -615,6 +720,7 @@ static int ad7173_load_config(struct ad7173_state *st,
{
unsigned int config;
int free_cfg_slot, ret;
+ u8 post_filter_enable, post_filter_select;
free_cfg_slot = ida_alloc_range(&st->cfg_slots_status, 0,
st->info->num_configs - 1, GFP_KERNEL);
@@ -634,8 +740,49 @@ static int ad7173_load_config(struct ad7173_state *st,
if (ret)
return ret;
+ /*
+ * When SINC3_MAP flag is enabled, the rest of the register has a
+ * different meaning. We are using this option to allow the most
+ * possible sampling frequencies with SINC3 filter.
+ */
+ if (cfg->filter_type == AD7173_FILTER_SINC3)
+ return ad_sd_write_reg(&st->sd, AD7173_REG_FILTER(free_cfg_slot), 2,
+ FIELD_PREP(AD7173_FILTER_SINC3_MAP, 1) |
+ FIELD_PREP(AD7173_FILTER_SINC3_MAP_DIV,
+ cfg->sinc3_odr_div));
+
+ switch (cfg->filter_type) {
+ case AD7173_FILTER_SINC5_SINC1_PF1:
+ post_filter_enable = 1;
+ post_filter_select = 2;
+ break;
+ case AD7173_FILTER_SINC5_SINC1_PF2:
+ post_filter_enable = 1;
+ post_filter_select = 3;
+ break;
+ case AD7173_FILTER_SINC5_SINC1_PF3:
+ post_filter_enable = 1;
+ post_filter_select = 5;
+ break;
+ case AD7173_FILTER_SINC5_SINC1_PF4:
+ post_filter_enable = 1;
+ post_filter_select = 6;
+ break;
+ default:
+ post_filter_enable = 0;
+ post_filter_select = 0;
+ break;
+ }
+
return ad_sd_write_reg(&st->sd, AD7173_REG_FILTER(free_cfg_slot), 2,
- AD7173_FILTER_ODR0_MASK & cfg->odr);
+ FIELD_PREP(AD7173_FILTER_SINC3_MAP, 0) |
+ FIELD_PREP(AD7173_FILTER_ENHFILT_MASK,
+ post_filter_enable) |
+ FIELD_PREP(AD7173_FILTER_ENHFILTEN,
+ post_filter_select) |
+ FIELD_PREP(AD7173_FILTER_ORDER, 0) |
+ FIELD_PREP(AD7173_FILTER_ODR_MASK,
+ cfg->sinc5_odr_index));
}
static int ad7173_config_channel(struct ad7173_state *st, int addr)
@@ -746,6 +893,7 @@ static const struct ad_sigma_delta_info ad7173_sigma_delta_info_4_slots = {
.set_mode = ad7173_set_mode,
.has_registers = true,
.has_named_irqs = true,
+ .supports_spi_offload = true,
.addr_shift = 0,
.read_mask = BIT(6),
.status_ch_mask = GENMASK(3, 0),
@@ -762,6 +910,7 @@ static const struct ad_sigma_delta_info ad7173_sigma_delta_info_8_slots = {
.set_mode = ad7173_set_mode,
.has_registers = true,
.has_named_irqs = true,
+ .supports_spi_offload = true,
.addr_shift = 0,
.read_mask = BIT(6),
.status_ch_mask = GENMASK(3, 0),
@@ -778,6 +927,7 @@ static const struct ad_sigma_delta_info ad7173_sigma_delta_info_16_slots = {
.set_mode = ad7173_set_mode,
.has_registers = true,
.has_named_irqs = true,
+ .supports_spi_offload = true,
.addr_shift = 0,
.read_mask = BIT(6),
.status_ch_mask = GENMASK(3, 0),
@@ -1165,7 +1315,14 @@ static int ad7173_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
case IIO_CHAN_INFO_SAMP_FREQ:
- reg = st->channels[chan->address].cfg.odr;
+ if (st->channels[chan->address].cfg.filter_type == AD7173_FILTER_SINC3) {
+ /* Inverse operation of ad7173_sinc3_odr_div_from_odr() */
+ *val = MEGA;
+ *val2 = 3 * 32 * st->channels[chan->address].cfg.sinc3_odr_div;
+ return IIO_VAL_FRACTIONAL;
+ }
+
+ reg = st->channels[chan->address].cfg.sinc5_odr_index;
*val = st->info->sinc5_data_rates[reg] / MILLI;
*val2 = (st->info->sinc5_data_rates[reg] % MILLI) * (MICRO / MILLI);
@@ -1203,6 +1360,10 @@ static int ad7173_write_raw(struct iio_dev *indio_dev,
*
* This will cause the reading of CH1 to be actually done once every
* 200.16ms, an effective rate of 4.99sps.
+ *
+ * Both the sinc5 and sinc3 rates are set here so that if the filter
+ * type is changed, the requested rate will still be set (aside from
+ * rounding differences).
*/
case IIO_CHAN_INFO_SAMP_FREQ:
freq = val * MILLI + val2 / MILLI;
@@ -1211,7 +1372,8 @@ static int ad7173_write_raw(struct iio_dev *indio_dev,
break;
cfg = &st->channels[chan->address].cfg;
- cfg->odr = i;
+ cfg->sinc5_odr_index = i;
+ cfg->sinc3_odr_div = ad7173_sinc3_odr_div_from_odr(freq);
cfg->live = false;
break;
@@ -1228,17 +1390,88 @@ static int ad7173_update_scan_mode(struct iio_dev *indio_dev,
const unsigned long *scan_mask)
{
struct ad7173_state *st = iio_priv(indio_dev);
- int i, ret;
+ u16 sinc3_count = 0;
+ u16 sinc3_div = 0;
+ int i, j, k, ret;
for (i = 0; i < indio_dev->num_channels; i++) {
- if (test_bit(i, scan_mask))
+ const struct ad7173_channel_config *cfg = &st->channels[i].cfg;
+
+ if (test_bit(i, scan_mask)) {
+ if (cfg->filter_type == AD7173_FILTER_SINC3) {
+ sinc3_count++;
+
+ if (sinc3_div == 0) {
+ sinc3_div = cfg->sinc3_odr_div;
+ } else if (sinc3_div != cfg->sinc3_odr_div) {
+ dev_err(&st->sd.spi->dev,
+ "All enabled channels must have the same sampling_frequency for sinc3 filter_type\n");
+ return -EINVAL;
+ }
+ }
+
ret = ad7173_set_channel(&st->sd, i);
- else
+ } else {
ret = ad_sd_write_reg(&st->sd, AD7173_REG_CH(i), 2, 0);
+ }
if (ret < 0)
return ret;
}
+ if (sinc3_count && sinc3_count < bitmap_weight(scan_mask, indio_dev->num_channels)) {
+ dev_err(&st->sd.spi->dev,
+ "All enabled channels must have sinc3 filter_type\n");
+ return -EINVAL;
+ }
+
+ /*
+ * On some chips, there are more channels that setups, so if there were
+ * more unique setups requested than the number of available slots,
+ * ad7173_set_channel() will have written over some of the slots. We
+ * can detect this by making sure each assigned cfg_slot matches the
+ * requested configuration. If it doesn't, we know that the slot was
+ * overwritten by a different channel.
+ */
+ for_each_set_bit(i, scan_mask, indio_dev->num_channels) {
+ const struct ad7173_channel_config *cfg1, *cfg2;
+
+ cfg1 = &st->channels[i].cfg;
+
+ for_each_set_bit(j, scan_mask, indio_dev->num_channels) {
+ cfg2 = &st->channels[j].cfg;
+
+ /*
+ * Only compare configs that are assigned to the same
+ * SETUP_SEL slot and don't compare channel to itself.
+ */
+ if (i == j || cfg1->cfg_slot != cfg2->cfg_slot)
+ continue;
+
+ /*
+ * If we find two different configs trying to use the
+ * same SETUP_SEL slot, then we know that the that we
+ * have too many unique configurations requested for
+ * the available slots and at least one was overwritten.
+ */
+ if (!ad7173_is_setup_equal(cfg1, cfg2)) {
+ /*
+ * At this point, there isn't a way to tell
+ * which setups are actually programmed in the
+ * ADC anymore, so we could read them back to
+ * see, but it is simpler to just turn off all
+ * of the live flags so that everything gets
+ * reprogramed on the next attempt read a sample.
+ */
+ for (k = 0; k < st->num_channels; k++)
+ st->channels[k].cfg.live = false;
+
+ dev_err(&st->sd.spi->dev,
+ "Too many unique channel configurations requested for scan\n");
+ return -EINVAL;
+ }
+ }
+ }
+
return 0;
}
@@ -1333,7 +1566,7 @@ static const struct iio_chan_spec ad7173_channel_template = {
.storagebits = 32,
.endianness = IIO_BE,
},
- .ext_info = ad7173_calibsys_ext_info,
+ .ext_info = ad7173_chan_spec_ext_info,
};
static const struct iio_chan_spec ad7173_temp_iio_channel_template = {
@@ -1349,6 +1582,7 @@ static const struct iio_chan_spec ad7173_temp_iio_channel_template = {
.storagebits = 32,
.endianness = IIO_BE,
},
+ .ext_info = ad7173_temp_chan_spec_ext_info,
};
static void ad7173_disable_regulators(void *data)
@@ -1589,12 +1823,21 @@ static int ad7173_fw_parse_channel_config(struct iio_dev *indio_dev)
chan_st_priv->cfg.bipolar = false;
chan_st_priv->cfg.input_buf = st->info->has_input_buf;
chan_st_priv->cfg.ref_sel = AD7173_SETUP_REF_SEL_INT_REF;
- chan_st_priv->cfg.odr = st->info->odr_start_value;
+ chan_st_priv->cfg.sinc3_odr_div = ad7173_sinc3_odr_div_from_odr(
+ st->info->sinc5_data_rates[st->info->odr_start_value]
+ );
+ chan_st_priv->cfg.sinc5_odr_index = st->info->odr_start_value;
+ chan_st_priv->cfg.filter_type = AD7173_FILTER_SINC5_SINC1;
chan_st_priv->cfg.openwire_comp_chan = -1;
st->adc_mode |= AD7173_ADC_MODE_REF_EN;
if (st->info->data_reg_only_16bit)
chan_arr[chan_index].scan_type = ad4113_scan_type;
+ if (ad_sigma_delta_has_spi_offload(&st->sd)) {
+ chan_arr[chan_index].scan_type.storagebits = 32;
+ chan_arr[chan_index].scan_type.endianness = IIO_CPU;
+ }
+
chan_index++;
}
@@ -1656,7 +1899,11 @@ static int ad7173_fw_parse_channel_config(struct iio_dev *indio_dev)
chan->scan_index = chan_index;
chan->channel = ain[0];
chan_st_priv->cfg.input_buf = st->info->has_input_buf;
- chan_st_priv->cfg.odr = st->info->odr_start_value;
+ chan_st_priv->cfg.sinc3_odr_div = ad7173_sinc3_odr_div_from_odr(
+ st->info->sinc5_data_rates[st->info->odr_start_value]
+ );
+ chan_st_priv->cfg.sinc5_odr_index = st->info->odr_start_value;
+ chan_st_priv->cfg.filter_type = AD7173_FILTER_SINC5_SINC1;
chan_st_priv->cfg.openwire_comp_chan = -1;
chan_st_priv->cfg.bipolar = fwnode_property_read_bool(child, "bipolar");
@@ -1685,6 +1932,12 @@ static int ad7173_fw_parse_channel_config(struct iio_dev *indio_dev)
if (st->info->data_reg_only_16bit)
chan_arr[chan_index].scan_type = ad4113_scan_type;
+ /* Assuming SPI offload is ad411x_ad717x HDL project. */
+ if (ad_sigma_delta_has_spi_offload(&st->sd)) {
+ chan_arr[chan_index].scan_type.storagebits = 32;
+ chan_arr[chan_index].scan_type.endianness = IIO_CPU;
+ }
+
chan_index++;
}
return 0;
@@ -1717,8 +1970,7 @@ static int ad7173_fw_parse_device_config(struct iio_dev *indio_dev)
ret = devm_add_action_or_reset(dev, ad7173_disable_regulators, st);
if (ret)
- return dev_err_probe(dev, ret,
- "Failed to add regulators disable action\n");
+ return ret;
ret = device_property_match_property_string(dev, "clock-names",
ad7173_clk_sel,
diff --git a/drivers/iio/adc/ad7380.c b/drivers/iio/adc/ad7380.c
index 6f7034b6c266..fa251dc1aae6 100644
--- a/drivers/iio/adc/ad7380.c
+++ b/drivers/iio/adc/ad7380.c
@@ -873,6 +873,7 @@ static const struct ad7380_chip_info adaq4381_4_chip_info = {
.has_hardware_gain = true,
.available_scan_masks = ad7380_4_channel_scan_masks,
.timing_specs = &ad7380_4_timing,
+ .max_conversion_rate_hz = 4 * MEGA,
};
static const struct spi_offload_config ad7380_offload_config = {
diff --git a/drivers/iio/adc/ad7476.c b/drivers/iio/adc/ad7476.c
index aea734aa06bd..1bec6657394c 100644
--- a/drivers/iio/adc/ad7476.c
+++ b/drivers/iio/adc/ad7476.c
@@ -6,6 +6,7 @@
* Copyright 2010 Analog Devices Inc.
*/
+#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -27,22 +28,24 @@
struct ad7476_state;
struct ad7476_chip_info {
- unsigned int int_vref_uv;
+ unsigned int int_vref_mv;
struct iio_chan_spec channel[2];
- /* channels used when convst gpio is defined */
- struct iio_chan_spec convst_channel[2];
void (*reset)(struct ad7476_state *);
+ void (*conversion_pre_op)(struct ad7476_state *st);
+ void (*conversion_post_op)(struct ad7476_state *st);
bool has_vref;
bool has_vdrive;
+ bool convstart_required;
};
struct ad7476_state {
struct spi_device *spi;
const struct ad7476_chip_info *chip_info;
- struct regulator *ref_reg;
struct gpio_desc *convst_gpio;
struct spi_transfer xfer;
struct spi_message msg;
+ struct iio_chan_spec channel[2];
+ int scale_mv;
/*
* DMA (thus cache coherency maintenance) may require the
* transfer buffers to live in their own cache lines.
@@ -52,40 +55,29 @@ struct ad7476_state {
unsigned char data[ALIGN(2, sizeof(s64)) + sizeof(s64)] __aligned(IIO_DMA_MINALIGN);
};
-enum ad7476_supported_device_ids {
- ID_AD7091,
- ID_AD7091R,
- ID_AD7273,
- ID_AD7274,
- ID_AD7276,
- ID_AD7277,
- ID_AD7278,
- ID_AD7466,
- ID_AD7467,
- ID_AD7468,
- ID_AD7475,
- ID_AD7495,
- ID_AD7940,
- ID_ADC081S,
- ID_ADC101S,
- ID_ADC121S,
- ID_ADS7866,
- ID_ADS7867,
- ID_ADS7868,
- ID_LTC2314_14,
-};
-
static void ad7091_convst(struct ad7476_state *st)
{
if (!st->convst_gpio)
return;
- gpiod_set_value(st->convst_gpio, 0);
+ gpiod_set_value_cansleep(st->convst_gpio, 0);
udelay(1); /* CONVST pulse width: 10 ns min */
- gpiod_set_value(st->convst_gpio, 1);
+ gpiod_set_value_cansleep(st->convst_gpio, 1);
udelay(1); /* Conversion time: 650 ns max */
}
+static void bd79105_convst_disable(struct ad7476_state *st)
+{
+ gpiod_set_value_cansleep(st->convst_gpio, 0);
+}
+
+static void bd79105_convst_enable(struct ad7476_state *st)
+{
+ gpiod_set_value_cansleep(st->convst_gpio, 1);
+ /* Worst case, 2790 ns required for conversion */
+ ndelay(2790);
+}
+
static irqreturn_t ad7476_trigger_handler(int irq, void *p)
{
struct iio_poll_func *pf = p;
@@ -93,7 +85,8 @@ static irqreturn_t ad7476_trigger_handler(int irq, void *p)
struct ad7476_state *st = iio_priv(indio_dev);
int b_sent;
- ad7091_convst(st);
+ if (st->chip_info->conversion_pre_op)
+ st->chip_info->conversion_pre_op(st);
b_sent = spi_sync(st->spi, &st->msg);
if (b_sent < 0)
@@ -102,6 +95,8 @@ static irqreturn_t ad7476_trigger_handler(int irq, void *p)
iio_push_to_buffers_with_ts(indio_dev, st->data, sizeof(st->data),
iio_get_time_ns(indio_dev));
done:
+ if (st->chip_info->conversion_post_op)
+ st->chip_info->conversion_post_op(st);
iio_trigger_notify_done(indio_dev->trig);
return IRQ_HANDLED;
@@ -117,12 +112,16 @@ static int ad7476_scan_direct(struct ad7476_state *st)
{
int ret;
- ad7091_convst(st);
+ if (st->chip_info->conversion_pre_op)
+ st->chip_info->conversion_pre_op(st);
ret = spi_sync(st->spi, &st->msg);
if (ret)
return ret;
+ if (st->chip_info->conversion_post_op)
+ st->chip_info->conversion_post_op(st);
+
return be16_to_cpup((__be16 *)st->data);
}
@@ -134,7 +133,6 @@ static int ad7476_read_raw(struct iio_dev *indio_dev,
{
int ret;
struct ad7476_state *st = iio_priv(indio_dev);
- int scale_uv;
switch (m) {
case IIO_CHAN_INFO_RAW:
@@ -145,18 +143,11 @@ static int ad7476_read_raw(struct iio_dev *indio_dev,
if (ret < 0)
return ret;
- *val = (ret >> st->chip_info->channel[0].scan_type.shift) &
- GENMASK(st->chip_info->channel[0].scan_type.realbits - 1, 0);
+ *val = (ret >> chan->scan_type.shift) &
+ GENMASK(chan->scan_type.realbits - 1, 0);
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
- if (st->ref_reg) {
- scale_uv = regulator_get_voltage(st->ref_reg);
- if (scale_uv < 0)
- return scale_uv;
- } else {
- scale_uv = st->chip_info->int_vref_uv;
- }
- *val = scale_uv / 1000;
+ *val = st->scale_mv;
*val2 = chan->scan_type.realbits;
return IIO_VAL_FRACTIONAL_LOG2;
}
@@ -185,125 +176,147 @@ static int ad7476_read_raw(struct iio_dev *indio_dev,
#define AD7940_CHAN(bits) _AD7476_CHAN((bits), 15 - (bits), \
BIT(IIO_CHAN_INFO_RAW))
#define AD7091R_CHAN(bits) _AD7476_CHAN((bits), 16 - (bits), 0)
-#define AD7091R_CONVST_CHAN(bits) _AD7476_CHAN((bits), 16 - (bits), \
- BIT(IIO_CHAN_INFO_RAW))
#define ADS786X_CHAN(bits) _AD7476_CHAN((bits), 12 - (bits), \
BIT(IIO_CHAN_INFO_RAW))
-static const struct ad7476_chip_info ad7476_chip_info_tbl[] = {
- [ID_AD7091] = {
- .channel[0] = AD7091R_CHAN(12),
- .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
- .convst_channel[0] = AD7091R_CONVST_CHAN(12),
- .convst_channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
- .reset = ad7091_reset,
- },
- [ID_AD7091R] = {
- .channel[0] = AD7091R_CHAN(12),
- .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
- .convst_channel[0] = AD7091R_CONVST_CHAN(12),
- .convst_channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
- .int_vref_uv = 2500000,
- .has_vref = true,
- .reset = ad7091_reset,
- },
- [ID_AD7273] = {
- .channel[0] = AD7940_CHAN(10),
- .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
- .has_vref = true,
- },
- [ID_AD7274] = {
- .channel[0] = AD7940_CHAN(12),
- .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
- .has_vref = true,
- },
- [ID_AD7276] = {
- .channel[0] = AD7940_CHAN(12),
- .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
- },
- [ID_AD7277] = {
- .channel[0] = AD7940_CHAN(10),
- .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
- },
- [ID_AD7278] = {
- .channel[0] = AD7940_CHAN(8),
- .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
- },
- [ID_AD7466] = {
- .channel[0] = AD7476_CHAN(12),
- .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
- },
- [ID_AD7467] = {
- .channel[0] = AD7476_CHAN(10),
- .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
- },
- [ID_AD7468] = {
- .channel[0] = AD7476_CHAN(8),
- .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
- },
- [ID_AD7475] = {
- .channel[0] = AD7476_CHAN(12),
- .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
- .has_vref = true,
- .has_vdrive = true,
- },
- [ID_AD7495] = {
- .channel[0] = AD7476_CHAN(12),
- .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
- .int_vref_uv = 2500000,
- .has_vdrive = true,
- },
- [ID_AD7940] = {
- .channel[0] = AD7940_CHAN(14),
- .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
- },
- [ID_ADC081S] = {
- .channel[0] = ADC081S_CHAN(8),
- .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
- },
- [ID_ADC101S] = {
- .channel[0] = ADC081S_CHAN(10),
- .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
- },
- [ID_ADC121S] = {
- .channel[0] = ADC081S_CHAN(12),
- .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
- },
- [ID_ADS7866] = {
- .channel[0] = ADS786X_CHAN(12),
- .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
- },
- [ID_ADS7867] = {
- .channel[0] = ADS786X_CHAN(10),
- .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
- },
- [ID_ADS7868] = {
- .channel[0] = ADS786X_CHAN(8),
- .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
- },
- [ID_LTC2314_14] = {
- .channel[0] = AD7940_CHAN(14),
- .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
- .has_vref = true,
- },
+static const struct ad7476_chip_info ad7091_chip_info = {
+ .channel[0] = AD7091R_CHAN(12),
+ .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+ .conversion_pre_op = ad7091_convst,
+ .reset = ad7091_reset,
};
-static const struct iio_info ad7476_info = {
- .read_raw = &ad7476_read_raw,
+static const struct ad7476_chip_info ad7091r_chip_info = {
+ .channel[0] = AD7091R_CHAN(12),
+ .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+ .conversion_pre_op = ad7091_convst,
+ .int_vref_mv = 2500,
+ .has_vref = true,
+ .reset = ad7091_reset,
};
-static void ad7476_reg_disable(void *data)
-{
- struct regulator *reg = data;
+static const struct ad7476_chip_info ad7273_chip_info = {
+ .channel[0] = AD7940_CHAN(10),
+ .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+ .has_vref = true,
+};
- regulator_disable(reg);
-}
+static const struct ad7476_chip_info ad7274_chip_info = {
+ .channel[0] = AD7940_CHAN(12),
+ .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+ .has_vref = true,
+};
+
+static const struct ad7476_chip_info ad7276_chip_info = {
+ .channel[0] = AD7940_CHAN(12),
+ .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+};
+
+static const struct ad7476_chip_info ad7277_chip_info = {
+ .channel[0] = AD7940_CHAN(10),
+ .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+};
+
+static const struct ad7476_chip_info ad7278_chip_info = {
+ .channel[0] = AD7940_CHAN(8),
+ .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+};
+
+static const struct ad7476_chip_info ad7466_chip_info = {
+ .channel[0] = AD7476_CHAN(12),
+ .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+};
+
+static const struct ad7476_chip_info ad7467_chip_info = {
+ .channel[0] = AD7476_CHAN(10),
+ .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+};
+
+static const struct ad7476_chip_info ad7468_chip_info = {
+ .channel[0] = AD7476_CHAN(8),
+ .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+};
+
+static const struct ad7476_chip_info ad7475_chip_info = {
+ .channel[0] = AD7476_CHAN(12),
+ .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+ .has_vref = true,
+ .has_vdrive = true,
+};
+
+static const struct ad7476_chip_info ad7495_chip_info = {
+ .channel[0] = AD7476_CHAN(12),
+ .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+ .int_vref_mv = 2500,
+ .has_vdrive = true,
+};
+
+static const struct ad7476_chip_info ad7940_chip_info = {
+ .channel[0] = AD7940_CHAN(14),
+ .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+};
+
+static const struct ad7476_chip_info adc081s_chip_info = {
+ .channel[0] = ADC081S_CHAN(8),
+ .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+};
+
+static const struct ad7476_chip_info adc101s_chip_info = {
+ .channel[0] = ADC081S_CHAN(10),
+ .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+};
+
+static const struct ad7476_chip_info adc121s_chip_info = {
+ .channel[0] = ADC081S_CHAN(12),
+ .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+};
+
+static const struct ad7476_chip_info ads7866_chip_info = {
+ .channel[0] = ADS786X_CHAN(12),
+ .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+};
+
+static const struct ad7476_chip_info ads7867_chip_info = {
+ .channel[0] = ADS786X_CHAN(10),
+ .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+};
+
+static const struct ad7476_chip_info ads7868_chip_info = {
+ .channel[0] = ADS786X_CHAN(8),
+ .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+};
+
+static const struct ad7476_chip_info ltc2314_14_chip_info = {
+ .channel[0] = AD7940_CHAN(14),
+ .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+ .has_vref = true,
+};
+
+static const struct ad7476_chip_info bd79105_chip_info = {
+ .channel[0] = AD7091R_CHAN(16),
+ .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+ /*
+ * The BD79105 starts ADC data conversion when the CONVSTART line is
+ * set HIGH. The CONVSTART must be kept HIGH until the data has been
+ * read from the ADC.
+ */
+ .conversion_pre_op = bd79105_convst_enable,
+ .conversion_post_op = bd79105_convst_disable,
+ /* BD79105 won't do conversion without convstart */
+ .convstart_required = true,
+ .has_vref = true,
+ .has_vdrive = true,
+};
+
+static const struct iio_info ad7476_info = {
+ .read_raw = &ad7476_read_raw,
+};
static int ad7476_probe(struct spi_device *spi)
{
struct ad7476_state *st;
struct iio_dev *indio_dev;
- struct regulator *reg;
+ unsigned int i;
int ret;
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
@@ -311,61 +324,37 @@ static int ad7476_probe(struct spi_device *spi)
return -ENOMEM;
st = iio_priv(indio_dev);
- st->chip_info =
- &ad7476_chip_info_tbl[spi_get_device_id(spi)->driver_data];
- reg = devm_regulator_get(&spi->dev, "vcc");
- if (IS_ERR(reg))
- return PTR_ERR(reg);
+ st->chip_info = spi_get_device_match_data(spi);
+ if (!st->chip_info)
+ return -ENODEV;
- ret = regulator_enable(reg);
- if (ret)
- return ret;
-
- ret = devm_add_action_or_reset(&spi->dev, ad7476_reg_disable, reg);
- if (ret)
- return ret;
-
- /* Either vcc or vref (below) as appropriate */
- if (!st->chip_info->int_vref_uv)
- st->ref_reg = reg;
+ /* Use VCC for reference voltage if vref / internal vref aren't used */
+ if (!st->chip_info->int_vref_mv && !st->chip_info->has_vref) {
+ ret = devm_regulator_get_enable_read_voltage(&spi->dev, "vcc");
+ if (ret < 0)
+ return ret;
+ st->scale_mv = ret / 1000;
+ } else {
+ ret = devm_regulator_get_enable(&spi->dev, "vcc");
+ if (ret < 0)
+ return ret;
+ }
if (st->chip_info->has_vref) {
-
- /* If a device has an internal reference vref is optional */
- if (st->chip_info->int_vref_uv) {
- reg = devm_regulator_get_optional(&spi->dev, "vref");
- if (IS_ERR(reg) && (PTR_ERR(reg) != -ENODEV))
- return PTR_ERR(reg);
- } else {
- reg = devm_regulator_get(&spi->dev, "vref");
- if (IS_ERR(reg))
- return PTR_ERR(reg);
- }
-
- if (!IS_ERR(reg)) {
- ret = regulator_enable(reg);
- if (ret)
- return ret;
-
- ret = devm_add_action_or_reset(&spi->dev,
- ad7476_reg_disable,
- reg);
- if (ret)
+ ret = devm_regulator_get_enable_read_voltage(&spi->dev, "vref");
+ if (ret < 0) {
+ /* Vref is optional if a device has an internal reference */
+ if (!st->chip_info->int_vref_mv || ret != -ENODEV)
return ret;
- st->ref_reg = reg;
} else {
- /*
- * Can only get here if device supports both internal
- * and external reference, but the regulator connected
- * to the external reference is not connected.
- * Set the reference regulator pointer to NULL to
- * indicate this.
- */
- st->ref_reg = NULL;
+ st->scale_mv = ret / 1000;
}
}
+ if (!st->scale_mv)
+ st->scale_mv = st->chip_info->int_vref_mv;
+
if (st->chip_info->has_vdrive) {
ret = devm_regulator_get_enable(&spi->dev, "vdrive");
if (ret)
@@ -378,20 +367,35 @@ static int ad7476_probe(struct spi_device *spi)
if (IS_ERR(st->convst_gpio))
return PTR_ERR(st->convst_gpio);
+ if (st->chip_info->convstart_required && !st->convst_gpio)
+ return dev_err_probe(&spi->dev, -EINVAL, "No convstart GPIO\n");
+
+ /*
+ * This will never happen. Unless someone changes the channel specs
+ * in this driver. And if someone does, without changing the loop
+ * below, then we'd better immediately produce a big fat error, before
+ * the change proceeds from that developer's table.
+ */
+ static_assert(ARRAY_SIZE(st->channel) == ARRAY_SIZE(st->chip_info->channel));
+ for (i = 0; i < ARRAY_SIZE(st->channel); i++) {
+ st->channel[i] = st->chip_info->channel[i];
+ if (st->convst_gpio)
+ __set_bit(IIO_CHAN_INFO_RAW,
+ &st->channel[i].info_mask_separate);
+ }
+
st->spi = spi;
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->channels = st->chip_info->channel;
- indio_dev->num_channels = 2;
+ indio_dev->channels = st->channel;
+ indio_dev->num_channels = ARRAY_SIZE(st->channel);
indio_dev->info = &ad7476_info;
- if (st->convst_gpio)
- indio_dev->channels = st->chip_info->convst_channel;
/* Setup default message */
st->xfer.rx_buf = &st->data;
- st->xfer.len = st->chip_info->channel[0].scan_type.storagebits / 8;
+ st->xfer.len = indio_dev->channels[0].scan_type.storagebits / 8;
spi_message_init(&st->msg);
spi_message_add_tail(&st->xfer, &st->msg);
@@ -408,41 +412,42 @@ static int ad7476_probe(struct spi_device *spi)
}
static const struct spi_device_id ad7476_id[] = {
- { "ad7091", ID_AD7091 },
- { "ad7091r", ID_AD7091R },
- { "ad7273", ID_AD7273 },
- { "ad7274", ID_AD7274 },
- { "ad7276", ID_AD7276},
- { "ad7277", ID_AD7277 },
- { "ad7278", ID_AD7278 },
- { "ad7466", ID_AD7466 },
- { "ad7467", ID_AD7467 },
- { "ad7468", ID_AD7468 },
- { "ad7475", ID_AD7475 },
- { "ad7476", ID_AD7466 },
- { "ad7476a", ID_AD7466 },
- { "ad7477", ID_AD7467 },
- { "ad7477a", ID_AD7467 },
- { "ad7478", ID_AD7468 },
- { "ad7478a", ID_AD7468 },
- { "ad7495", ID_AD7495 },
- { "ad7910", ID_AD7467 },
- { "ad7920", ID_AD7466 },
- { "ad7940", ID_AD7940 },
- { "adc081s", ID_ADC081S },
- { "adc101s", ID_ADC101S },
- { "adc121s", ID_ADC121S },
- { "ads7866", ID_ADS7866 },
- { "ads7867", ID_ADS7867 },
- { "ads7868", ID_ADS7868 },
+ { "ad7091", (kernel_ulong_t)&ad7091_chip_info },
+ { "ad7091r", (kernel_ulong_t)&ad7091r_chip_info },
+ { "ad7273", (kernel_ulong_t)&ad7273_chip_info },
+ { "ad7274", (kernel_ulong_t)&ad7274_chip_info },
+ { "ad7276", (kernel_ulong_t)&ad7276_chip_info },
+ { "ad7277", (kernel_ulong_t)&ad7277_chip_info },
+ { "ad7278", (kernel_ulong_t)&ad7278_chip_info },
+ { "ad7466", (kernel_ulong_t)&ad7466_chip_info },
+ { "ad7467", (kernel_ulong_t)&ad7467_chip_info },
+ { "ad7468", (kernel_ulong_t)&ad7468_chip_info },
+ { "ad7475", (kernel_ulong_t)&ad7475_chip_info },
+ { "ad7476", (kernel_ulong_t)&ad7466_chip_info },
+ { "ad7476a", (kernel_ulong_t)&ad7466_chip_info },
+ { "ad7477", (kernel_ulong_t)&ad7467_chip_info },
+ { "ad7477a", (kernel_ulong_t)&ad7467_chip_info },
+ { "ad7478", (kernel_ulong_t)&ad7468_chip_info },
+ { "ad7478a", (kernel_ulong_t)&ad7468_chip_info },
+ { "ad7495", (kernel_ulong_t)&ad7495_chip_info },
+ { "ad7910", (kernel_ulong_t)&ad7467_chip_info },
+ { "ad7920", (kernel_ulong_t)&ad7466_chip_info },
+ { "ad7940", (kernel_ulong_t)&ad7940_chip_info },
+ { "adc081s", (kernel_ulong_t)&adc081s_chip_info },
+ { "adc101s", (kernel_ulong_t)&adc101s_chip_info },
+ { "adc121s", (kernel_ulong_t)&adc121s_chip_info },
+ { "ads7866", (kernel_ulong_t)&ads7866_chip_info },
+ { "ads7867", (kernel_ulong_t)&ads7867_chip_info },
+ { "ads7868", (kernel_ulong_t)&ads7868_chip_info },
+ { "bd79105", (kernel_ulong_t)&bd79105_chip_info },
/*
* The ROHM BU79100G is identical to the TI's ADS7866 from the software
* point of view. The binding document mandates the ADS7866 to be
* marked as a fallback for the BU79100G, but we still need the SPI ID
* here to make the module loading work.
*/
- { "bu79100g", ID_ADS7866 },
- { "ltc2314-14", ID_LTC2314_14 },
+ { "bu79100g", (kernel_ulong_t)&ads7866_chip_info },
+ { "ltc2314-14", (kernel_ulong_t)&ltc2314_14_chip_info },
{ }
};
MODULE_DEVICE_TABLE(spi, ad7476_id);
diff --git a/drivers/iio/adc/ad7768-1.c b/drivers/iio/adc/ad7768-1.c
index a2e061f0cb08..872c88d0c86c 100644
--- a/drivers/iio/adc/ad7768-1.c
+++ b/drivers/iio/adc/ad7768-1.c
@@ -217,7 +217,7 @@ struct ad7768_state {
struct spi_device *spi;
struct regmap *regmap;
struct regmap *regmap24;
- struct regulator *vref;
+ int vref_uv;
struct regulator_dev *vcm_rdev;
unsigned int vcm_output_sel;
struct clk *mclk;
@@ -673,7 +673,7 @@ static int ad7768_gpio_init(struct iio_dev *indio_dev)
.direction_input = ad7768_gpio_direction_input,
.direction_output = ad7768_gpio_direction_output,
.get = ad7768_gpio_get,
- .set_rv = ad7768_gpio_set,
+ .set = ad7768_gpio_set,
.owner = THIS_MODULE,
};
@@ -687,8 +687,6 @@ static int ad7768_set_freq(struct ad7768_state *st,
int ret;
freq = clamp(freq, 50, 1024000);
- if (freq == 0)
- return -EINVAL;
mclk_div = DIV_ROUND_CLOSEST(st->mclk_freq, freq * st->oversampling_ratio);
/* Find the closest match for the desired sampling frequency */
@@ -776,7 +774,7 @@ static int ad7768_read_raw(struct iio_dev *indio_dev,
{
struct ad7768_state *st = iio_priv(indio_dev);
const struct iio_scan_type *scan_type;
- int scale_uv, ret, temp;
+ int ret, temp;
scan_type = iio_get_current_scan_type(indio_dev, chan);
if (IS_ERR(scan_type))
@@ -797,11 +795,7 @@ static int ad7768_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
- scale_uv = regulator_get_voltage(st->vref);
- if (scale_uv < 0)
- return scale_uv;
-
- *val = (scale_uv * 2) / 1000;
+ *val = (st->vref_uv * 2) / 1000;
*val2 = scan_type->realbits;
return IIO_VAL_FRACTIONAL_LOG2;
@@ -1134,13 +1128,6 @@ static const struct iio_trigger_ops ad7768_trigger_ops = {
.validate_device = iio_trigger_validate_own_device,
};
-static void ad7768_regulator_disable(void *data)
-{
- struct ad7768_state *st = data;
-
- regulator_disable(st->vref);
-}
-
static int ad7768_set_channel_label(struct iio_dev *indio_dev,
int num_channels)
{
@@ -1372,19 +1359,11 @@ static int ad7768_probe(struct spi_device *spi)
return dev_err_probe(&spi->dev, PTR_ERR(st->regmap24),
"Failed to initialize regmap24");
- st->vref = devm_regulator_get(&spi->dev, "vref");
- if (IS_ERR(st->vref))
- return PTR_ERR(st->vref);
-
- ret = regulator_enable(st->vref);
- if (ret) {
- dev_err(&spi->dev, "Failed to enable specified vref supply\n");
- return ret;
- }
-
- ret = devm_add_action_or_reset(&spi->dev, ad7768_regulator_disable, st);
- if (ret)
- return ret;
+ ret = devm_regulator_get_enable_read_voltage(&spi->dev, "vref");
+ if (ret < 0)
+ return dev_err_probe(&spi->dev, ret,
+ "Failed to get VREF voltage\n");
+ st->vref_uv = ret;
st->mclk = devm_clk_get_enabled(&spi->dev, "mclk");
if (IS_ERR(st->mclk))
diff --git a/drivers/iio/adc/ad7779.c b/drivers/iio/adc/ad7779.c
index 845adc510239..aac5049c9a07 100644
--- a/drivers/iio/adc/ad7779.c
+++ b/drivers/iio/adc/ad7779.c
@@ -25,6 +25,7 @@
#include <linux/units.h>
#include <linux/iio/iio.h>
+#include <linux/iio/backend.h>
#include <linux/iio/buffer.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/trigger.h>
@@ -145,6 +146,7 @@ struct ad7779_state {
struct completion completion;
unsigned int sampling_freq;
enum ad7779_filter filter_enabled;
+ struct iio_backend *back;
/*
* DMA (thus cache coherency maintenance) requires the
* transfer buffers to live in their own cache lines.
@@ -630,12 +632,38 @@ static int ad7779_reset(struct iio_dev *indio_dev, struct gpio_desc *reset_gpio)
return ret;
}
+static int ad7779_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask)
+{
+ struct ad7779_state *st = iio_priv(indio_dev);
+ unsigned int c;
+ int ret;
+
+ for (c = 0; c < AD7779_NUM_CHANNELS; c++) {
+ if (test_bit(c, scan_mask))
+ ret = iio_backend_chan_enable(st->back, c);
+ else
+ ret = iio_backend_chan_disable(st->back, c);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static const struct iio_info ad7779_info = {
.read_raw = ad7779_read_raw,
.write_raw = ad7779_write_raw,
.debugfs_reg_access = &ad7779_reg_access,
};
+static const struct iio_info ad7779_info_data = {
+ .read_raw = ad7779_read_raw,
+ .write_raw = ad7779_write_raw,
+ .debugfs_reg_access = &ad7779_reg_access,
+ .update_scan_mode = &ad7779_update_scan_mode,
+};
+
static const struct iio_enum ad7779_filter_enum = {
.items = ad7779_filter_type,
.num_items = ARRAY_SIZE(ad7779_filter_type),
@@ -752,6 +780,125 @@ static int ad7779_conf(struct ad7779_state *st, struct gpio_desc *start_gpio)
return 0;
}
+static int ad7779_set_data_lines(struct iio_dev *indio_dev, u32 num_lanes)
+{
+ struct ad7779_state *st = iio_priv(indio_dev);
+ int ret;
+
+ if (num_lanes != 1 && num_lanes != 2 && num_lanes != 4)
+ return -EINVAL;
+
+ ret = ad7779_set_sampling_frequency(st, num_lanes * AD7779_DEFAULT_SAMPLING_1LINE);
+ if (ret)
+ return ret;
+
+ ret = iio_backend_num_lanes_set(st->back, num_lanes);
+ if (ret)
+ return ret;
+
+ return ad7779_spi_write_mask(st, AD7779_REG_DOUT_FORMAT,
+ AD7779_DOUT_FORMAT_MSK,
+ FIELD_PREP(AD7779_DOUT_FORMAT_MSK, 2 - ilog2(num_lanes)));
+}
+
+static int ad7779_setup_channels(struct iio_dev *indio_dev, const struct ad7779_state *st)
+{
+ struct iio_chan_spec *channels;
+ struct device *dev = &st->spi->dev;
+
+ channels = devm_kmemdup_array(dev, st->chip_info->channels,
+ ARRAY_SIZE(ad7779_channels),
+ sizeof(*channels), GFP_KERNEL);
+ if (!channels)
+ return -ENOMEM;
+
+ for (unsigned int i = 0; i < ARRAY_SIZE(ad7779_channels); i++)
+ channels[i].scan_type.endianness = IIO_CPU;
+
+ indio_dev->channels = channels;
+ indio_dev->num_channels = ARRAY_SIZE(ad7779_channels);
+
+ return 0;
+}
+
+static int ad7779_setup_without_backend(struct ad7779_state *st, struct iio_dev *indio_dev)
+{
+ int ret;
+ struct device *dev = &st->spi->dev;
+
+ indio_dev->info = &ad7779_info;
+ indio_dev->channels = st->chip_info->channels;
+ indio_dev->num_channels = ARRAY_SIZE(ad7779_channels);
+
+ st->trig = devm_iio_trigger_alloc(dev, "%s-dev%d", indio_dev->name,
+ iio_device_id(indio_dev));
+ if (!st->trig)
+ return -ENOMEM;
+
+ st->trig->ops = &ad7779_trigger_ops;
+
+ iio_trigger_set_drvdata(st->trig, st);
+
+ ret = devm_request_irq(dev, st->spi->irq, iio_trigger_generic_data_rdy_poll,
+ IRQF_ONESHOT | IRQF_NO_AUTOEN, indio_dev->name,
+ st->trig);
+ if (ret)
+ return dev_err_probe(dev, ret, "request IRQ %d failed\n",
+ st->spi->irq);
+
+ ret = devm_iio_trigger_register(dev, st->trig);
+ if (ret)
+ return ret;
+
+ indio_dev->trig = iio_trigger_get(st->trig);
+
+ init_completion(&st->completion);
+
+ ret = devm_iio_triggered_buffer_setup(dev, indio_dev,
+ &iio_pollfunc_store_time,
+ &ad7779_trigger_handler,
+ &ad7779_buffer_setup_ops);
+ if (ret)
+ return ret;
+
+ return ad7779_spi_write_mask(st, AD7779_REG_DOUT_FORMAT,
+ AD7779_DCLK_CLK_DIV_MSK,
+ FIELD_PREP(AD7779_DCLK_CLK_DIV_MSK, 7));
+}
+
+static int ad7779_setup_backend(struct ad7779_state *st, struct iio_dev *indio_dev)
+{
+ struct device *dev = &st->spi->dev;
+ int ret;
+ u32 num_lanes;
+
+ indio_dev->info = &ad7779_info_data;
+
+ ret = ad7779_setup_channels(indio_dev, st);
+ if (ret)
+ return ret;
+
+ st->back = devm_iio_backend_get(dev, NULL);
+ if (IS_ERR(st->back))
+ return dev_err_probe(dev, PTR_ERR(st->back),
+ "failed to get iio backend");
+
+ ret = devm_iio_backend_request_buffer(dev, st->back, indio_dev);
+ if (ret)
+ return ret;
+
+ ret = devm_iio_backend_enable(dev, st->back);
+ if (ret)
+ return ret;
+
+ num_lanes = 4;
+ ret = device_property_read_u32(dev, "adi,num-lanes", &num_lanes);
+ if (ret && ret != -EINVAL)
+ return ret;
+
+ return ad7779_set_data_lines(indio_dev, num_lanes);
+}
+
static int ad7779_probe(struct spi_device *spi)
{
struct iio_dev *indio_dev;
@@ -760,9 +907,6 @@ static int ad7779_probe(struct spi_device *spi)
struct device *dev = &spi->dev;
int ret = -EINVAL;
- if (!spi->irq)
- return dev_err_probe(dev, ret, "DRDY irq not present\n");
-
indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
if (!indio_dev)
return -ENOMEM;
@@ -804,45 +948,12 @@ static int ad7779_probe(struct spi_device *spi)
return ret;
indio_dev->name = st->chip_info->name;
- indio_dev->info = &ad7779_info;
indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->channels = st->chip_info->channels;
- indio_dev->num_channels = ARRAY_SIZE(ad7779_channels);
- st->trig = devm_iio_trigger_alloc(dev, "%s-dev%d", indio_dev->name,
- iio_device_id(indio_dev));
- if (!st->trig)
- return -ENOMEM;
-
- st->trig->ops = &ad7779_trigger_ops;
-
- iio_trigger_set_drvdata(st->trig, st);
-
- ret = devm_request_irq(dev, spi->irq, iio_trigger_generic_data_rdy_poll,
- IRQF_ONESHOT | IRQF_NO_AUTOEN, indio_dev->name,
- st->trig);
- if (ret)
- return dev_err_probe(dev, ret, "request IRQ %d failed\n",
- st->spi->irq);
-
- ret = devm_iio_trigger_register(dev, st->trig);
- if (ret)
- return ret;
-
- indio_dev->trig = iio_trigger_get(st->trig);
-
- init_completion(&st->completion);
-
- ret = devm_iio_triggered_buffer_setup(dev, indio_dev,
- &iio_pollfunc_store_time,
- &ad7779_trigger_handler,
- &ad7779_buffer_setup_ops);
- if (ret)
- return ret;
-
- ret = ad7779_spi_write_mask(st, AD7779_REG_DOUT_FORMAT,
- AD7779_DCLK_CLK_DIV_MSK,
- FIELD_PREP(AD7779_DCLK_CLK_DIV_MSK, 7));
+ if (device_property_present(dev, "io-backends"))
+ ret = ad7779_setup_backend(st, indio_dev);
+ else
+ ret = ad7779_setup_without_backend(st, indio_dev);
if (ret)
return ret;
@@ -936,3 +1047,4 @@ module_spi_driver(ad7779_driver);
MODULE_AUTHOR("Ramona Alexandra Nechita <ramona.nechita@analog.com>");
MODULE_DESCRIPTION("Analog Devices AD7779 ADC");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("IIO_BACKEND");
diff --git a/drivers/iio/adc/ad7949.c b/drivers/iio/adc/ad7949.c
index 202561cad401..b35d299a3977 100644
--- a/drivers/iio/adc/ad7949.c
+++ b/drivers/iio/adc/ad7949.c
@@ -316,10 +316,8 @@ static int ad7949_spi_probe(struct spi_device *spi)
int ret;
indio_dev = devm_iio_device_alloc(dev, sizeof(*ad7949_adc));
- if (!indio_dev) {
- dev_err(dev, "can not allocate iio device\n");
+ if (!indio_dev)
return -ENOMEM;
- }
indio_dev->info = &ad7949_spi_info;
indio_dev->name = spi_get_device_id(spi)->name;
diff --git a/drivers/iio/adc/ad799x.c b/drivers/iio/adc/ad799x.c
index 9c02f9199139..108bb22162ef 100644
--- a/drivers/iio/adc/ad799x.c
+++ b/drivers/iio/adc/ad799x.c
@@ -114,11 +114,13 @@ struct ad799x_chip_config {
* @num_channels: number of channels
* @noirq_config: device configuration w/o IRQ
* @irq_config: device configuration w/IRQ
+ * @has_vref: device supports external reference voltage
*/
struct ad799x_chip_info {
int num_channels;
const struct ad799x_chip_config noirq_config;
const struct ad799x_chip_config irq_config;
+ bool has_vref;
};
struct ad799x_state {
@@ -604,6 +606,7 @@ static const struct iio_event_spec ad799x_events[] = {
static const struct ad799x_chip_info ad799x_chip_info_tbl[] = {
[ad7991] = {
.num_channels = 5,
+ .has_vref = true,
.noirq_config = {
.channel = {
AD799X_CHANNEL(0, 12),
@@ -617,6 +620,7 @@ static const struct ad799x_chip_info ad799x_chip_info_tbl[] = {
},
[ad7995] = {
.num_channels = 5,
+ .has_vref = true,
.noirq_config = {
.channel = {
AD799X_CHANNEL(0, 10),
@@ -630,6 +634,7 @@ static const struct ad799x_chip_info ad799x_chip_info_tbl[] = {
},
[ad7999] = {
.num_channels = 5,
+ .has_vref = true,
.noirq_config = {
.channel = {
AD799X_CHANNEL(0, 8),
@@ -687,6 +692,7 @@ static const struct ad799x_chip_info ad799x_chip_info_tbl[] = {
},
[ad7994] = {
.num_channels = 5,
+ .has_vref = true,
.noirq_config = {
.channel = {
AD799X_CHANNEL(0, 12),
@@ -809,32 +815,22 @@ static int ad799x_probe(struct i2c_client *client)
return ret;
/* check if an external reference is supplied */
- st->vref = devm_regulator_get_optional(&client->dev, "vref");
-
- if (IS_ERR(st->vref)) {
- if (PTR_ERR(st->vref) == -ENODEV) {
+ if (chip_info->has_vref) {
+ st->vref = devm_regulator_get_optional(&client->dev, "vref");
+ ret = PTR_ERR_OR_ZERO(st->vref);
+ if (ret) {
+ if (ret != -ENODEV)
+ goto error_disable_reg;
st->vref = NULL;
dev_info(&client->dev, "Using VCC reference voltage\n");
- } else {
- ret = PTR_ERR(st->vref);
- goto error_disable_reg;
}
- }
- if (st->vref) {
- /*
- * Use external reference voltage if supported by hardware.
- * This is optional if voltage / regulator present, use VCC otherwise.
- */
- if ((st->id == ad7991) || (st->id == ad7995) || (st->id == ad7999)) {
+ if (st->vref) {
dev_info(&client->dev, "Using external reference voltage\n");
extra_config |= AD7991_REF_SEL;
ret = regulator_enable(st->vref);
if (ret)
goto error_disable_reg;
- } else {
- st->vref = NULL;
- dev_warn(&client->dev, "Supplied reference not supported\n");
}
}
diff --git a/drivers/iio/adc/ade9000.c b/drivers/iio/adc/ade9000.c
new file mode 100644
index 000000000000..94e05e11abd9
--- /dev/null
+++ b/drivers/iio/adc/ade9000.c
@@ -0,0 +1,1799 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/**
+ * ADE9000 driver
+ *
+ * Copyright 2025 Analog Devices Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/kfifo_buf.h>
+#include <linux/iio/events.h>
+#include <linux/interrupt.h>
+#include <linux/minmax.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+#include <linux/unaligned.h>
+
+/* Address of ADE9000 registers */
+#define ADE9000_REG_AIGAIN 0x000
+#define ADE9000_REG_AVGAIN 0x00B
+#define ADE9000_REG_AIRMSOS 0x00C
+#define ADE9000_REG_AVRMSOS 0x00D
+#define ADE9000_REG_APGAIN 0x00E
+#define ADE9000_REG_AWATTOS 0x00F
+#define ADE9000_REG_AVAROS 0x010
+#define ADE9000_REG_AFVAROS 0x012
+#define ADE9000_REG_CONFIG0 0x060
+#define ADE9000_REG_DICOEFF 0x072
+#define ADE9000_REG_AI_PCF 0x20A
+#define ADE9000_REG_AV_PCF 0x20B
+#define ADE9000_REG_AIRMS 0x20C
+#define ADE9000_REG_AVRMS 0x20D
+#define ADE9000_REG_AWATT 0x210
+#define ADE9000_REG_AVAR 0x211
+#define ADE9000_REG_AVA 0x212
+#define ADE9000_REG_AFVAR 0x214
+#define ADE9000_REG_APF 0x216
+#define ADE9000_REG_BI_PCF 0x22A
+#define ADE9000_REG_BV_PCF 0x22B
+#define ADE9000_REG_BIRMS 0x22C
+#define ADE9000_REG_BVRMS 0x22D
+#define ADE9000_REG_CI_PCF 0x24A
+#define ADE9000_REG_CV_PCF 0x24B
+#define ADE9000_REG_CIRMS 0x24C
+#define ADE9000_REG_CVRMS 0x24D
+#define ADE9000_REG_AWATT_ACC 0x2E5
+#define ADE9000_REG_AWATTHR_LO 0x2E6
+#define ADE9000_REG_AVAHR_LO 0x2FA
+#define ADE9000_REG_AFVARHR_LO 0x30E
+#define ADE9000_REG_BWATTHR_LO 0x322
+#define ADE9000_REG_BVAHR_LO 0x336
+#define ADE9000_REG_BFVARHR_LO 0x34A
+#define ADE9000_REG_CWATTHR_LO 0x35E
+#define ADE9000_REG_CVAHR_LO 0x372
+#define ADE9000_REG_CFVARHR_LO 0x386
+#define ADE9000_REG_STATUS0 0x402
+#define ADE9000_REG_STATUS1 0x403
+#define ADE9000_REG_MASK0 0x405
+#define ADE9000_REG_MASK1 0x406
+#define ADE9000_REG_EVENT_MASK 0x407
+#define ADE9000_REG_VLEVEL 0x40F
+#define ADE9000_REG_DIP_LVL 0x410
+#define ADE9000_REG_DIPA 0x411
+#define ADE9000_REG_DIPB 0x412
+#define ADE9000_REG_DIPC 0x413
+#define ADE9000_REG_SWELL_LVL 0x414
+#define ADE9000_REG_SWELLA 0x415
+#define ADE9000_REG_SWELLB 0x416
+#define ADE9000_REG_SWELLC 0x417
+#define ADE9000_REG_APERIOD 0x418
+#define ADE9000_REG_BPERIOD 0x419
+#define ADE9000_REG_CPERIOD 0x41A
+#define ADE9000_REG_RUN 0x480
+#define ADE9000_REG_CONFIG1 0x481
+#define ADE9000_REG_ACCMODE 0x492
+#define ADE9000_REG_CONFIG3 0x493
+#define ADE9000_REG_ZXTOUT 0x498
+#define ADE9000_REG_ZX_LP_SEL 0x49A
+#define ADE9000_REG_WFB_CFG 0x4A0
+#define ADE9000_REG_WFB_PG_IRQEN 0x4A1
+#define ADE9000_REG_WFB_TRG_CFG 0x4A2
+#define ADE9000_REG_WFB_TRG_STAT 0x4A3
+#define ADE9000_REG_CONFIG2 0x4AF
+#define ADE9000_REG_EP_CFG 0x4B0
+#define ADE9000_REG_EGY_TIME 0x4B2
+#define ADE9000_REG_PGA_GAIN 0x4B9
+#define ADE9000_REG_VERSION 0x4FE
+#define ADE9000_REG_WF_BUFF 0x800
+#define ADE9000_REG_WF_HALF_BUFF 0xC00
+
+#define ADE9000_REG_ADDR_MASK GENMASK(15, 4)
+#define ADE9000_REG_READ_BIT_MASK BIT(3)
+
+#define ADE9000_WF_CAP_EN_MASK BIT(4)
+#define ADE9000_WF_CAP_SEL_MASK BIT(5)
+#define ADE9000_WF_MODE_MASK GENMASK(7, 6)
+#define ADE9000_WF_SRC_MASK GENMASK(9, 8)
+#define ADE9000_WF_IN_EN_MASK BIT(12)
+
+/* External reference selection bit in CONFIG1 */
+#define ADE9000_EXT_REF_MASK BIT(15)
+
+/*
+ * Configuration registers
+ */
+#define ADE9000_PGA_GAIN 0x0000
+
+/* Default configuration */
+
+#define ADE9000_CONFIG0 0x00000000
+
+/* CF3/ZX pin outputs Zero crossing, CF4 = DREADY */
+#define ADE9000_CONFIG1 0x000E
+
+/* Default High pass corner frequency of 1.25Hz */
+#define ADE9000_CONFIG2 0x0A00
+
+/* Peak and overcurrent detection disabled */
+#define ADE9000_CONFIG3 0x0000
+
+/*
+ * 50Hz operation, 3P4W Wye configuration, signed accumulation
+ * 3P4W Wye = 3-Phase 4-Wire star configuration (3 phases + neutral wire)
+ * Clear bit 8 i.e. ACCMODE=0x00xx for 50Hz operation
+ * ACCMODE=0x0x9x for 3Wire delta when phase B is used as reference
+ * 3Wire delta = 3-Phase 3-Wire triangle configuration (3 phases, no neutral)
+ */
+#define ADE9000_ACCMODE 0x0000
+#define ADE9000_ACCMODE_60HZ 0x0100
+
+/*Line period and zero crossing obtained from VA */
+#define ADE9000_ZX_LP_SEL 0x0000
+
+/* Interrupt mask values for initialization */
+#define ADE9000_MASK0_ALL_INT_DIS 0
+#define ADE9000_MASK1_ALL_INT_DIS 0x00000000
+
+/* Events disabled */
+#define ADE9000_EVENT_DISABLE 0x00000000
+
+/*
+ * Assuming Vnom=1/2 of full scale.
+ * Refer to Technical reference manual for detailed calculations.
+ */
+#define ADE9000_VLEVEL 0x0022EA28
+
+/* Set DICOEFF= 0xFFFFE000 when integrator is enabled */
+#define ADE9000_DICOEFF 0x00000000
+
+/* DSP ON */
+#define ADE9000_RUN_ON 0xFFFFFFFF
+
+/*
+ * Energy Accumulation Settings
+ * Enable energy accumulation, accumulate samples at 8ksps
+ * latch energy accumulation after EGYRDY
+ * If accumulation is changed to half line cycle mode, change EGY_TIME
+ */
+#define ADE9000_EP_CFG 0x0011
+
+/* Accumulate 4000 samples */
+#define ADE9000_EGY_TIME 7999
+
+/*
+ * Constant Definitions
+ * ADE9000 FDSP: 8000sps, ADE9000 FDSP: 4000sps
+ */
+#define ADE9000_FDSP 4000
+#define ADE9000_DEFAULT_CLK_FREQ_HZ 24576000
+#define ADE9000_WFB_CFG 0x03E9
+#define ADE9000_WFB_PAGE_SIZE 128
+#define ADE9000_WFB_NR_OF_PAGES 16
+#define ADE9000_WFB_MAX_CHANNELS 8
+#define ADE9000_WFB_BYTES_IN_SAMPLE 4
+#define ADE9000_WFB_SAMPLES_IN_PAGE \
+ (ADE9000_WFB_PAGE_SIZE / ADE9000_WFB_MAX_CHANNELS)
+#define ADE9000_WFB_MAX_SAMPLES_CHAN \
+ (ADE9000_WFB_SAMPLES_IN_PAGE * ADE9000_WFB_NR_OF_PAGES)
+#define ADE9000_WFB_FULL_BUFF_NR_SAMPLES \
+ (ADE9000_WFB_PAGE_SIZE * ADE9000_WFB_NR_OF_PAGES)
+#define ADE9000_WFB_FULL_BUFF_SIZE \
+ (ADE9000_WFB_FULL_BUFF_NR_SAMPLES * ADE9000_WFB_BYTES_IN_SAMPLE)
+
+#define ADE9000_SWRST_BIT BIT(0)
+
+/* Status and Mask register bits*/
+#define ADE9000_ST0_WFB_TRIG_BIT BIT(16)
+#define ADE9000_ST0_PAGE_FULL_BIT BIT(17)
+#define ADE9000_ST0_EGYRDY BIT(0)
+
+#define ADE9000_ST1_ZXTOVA_BIT BIT(6)
+#define ADE9000_ST1_ZXTOVB_BIT BIT(7)
+#define ADE9000_ST1_ZXTOVC_BIT BIT(8)
+#define ADE9000_ST1_ZXVA_BIT BIT(9)
+#define ADE9000_ST1_ZXVB_BIT BIT(10)
+#define ADE9000_ST1_ZXVC_BIT BIT(11)
+#define ADE9000_ST1_ZXIA_BIT BIT(13)
+#define ADE9000_ST1_ZXIB_BIT BIT(14)
+#define ADE9000_ST1_ZXIC_BIT BIT(15)
+#define ADE9000_ST1_RSTDONE_BIT BIT(16)
+#define ADE9000_ST1_SEQERR_BIT BIT(18)
+#define ADE9000_ST1_SWELLA_BIT BIT(20)
+#define ADE9000_ST1_SWELLB_BIT BIT(21)
+#define ADE9000_ST1_SWELLC_BIT BIT(22)
+#define ADE9000_ST1_DIPA_BIT BIT(23)
+#define ADE9000_ST1_DIPB_BIT BIT(24)
+#define ADE9000_ST1_DIPC_BIT BIT(25)
+#define ADE9000_ST1_ERROR0_BIT BIT(28)
+#define ADE9000_ST1_ERROR1_BIT BIT(29)
+#define ADE9000_ST1_ERROR2_BIT BIT(30)
+#define ADE9000_ST1_ERROR3_BIT BIT(31)
+#define ADE9000_ST_ERROR \
+ (ADE9000_ST1_ERROR0 | ADE9000_ST1_ERROR1 | \
+ ADE9000_ST1_ERROR2 | ADE9000_ST1_ERROR3)
+#define ADE9000_ST1_CROSSING_FIRST 6
+#define ADE9000_ST1_CROSSING_DEPTH 25
+
+#define ADE9000_WFB_TRG_DIP_BIT BIT(0)
+#define ADE9000_WFB_TRG_SWELL_BIT BIT(1)
+#define ADE9000_WFB_TRG_ZXIA_BIT BIT(3)
+#define ADE9000_WFB_TRG_ZXIB_BIT BIT(4)
+#define ADE9000_WFB_TRG_ZXIC_BIT BIT(5)
+#define ADE9000_WFB_TRG_ZXVA_BIT BIT(6)
+#define ADE9000_WFB_TRG_ZXVB_BIT BIT(7)
+#define ADE9000_WFB_TRG_ZXVC_BIT BIT(8)
+
+/* Stop when waveform buffer is full */
+#define ADE9000_WFB_FULL_MODE 0x0
+/* Continuous fill—stop only on enabled trigger events */
+#define ADE9000_WFB_EN_TRIG_MODE 0x1
+/* Continuous filling—center capture around enabled trigger events */
+#define ADE9000_WFB_C_EN_TRIG_MODE 0x2
+/* Continuous fill—used as streaming mode for continuous data output */
+#define ADE9000_WFB_STREAMING_MODE 0x3
+
+#define ADE9000_LAST_PAGE_BIT BIT(15)
+#define ADE9000_MIDDLE_PAGE_BIT BIT(7)
+
+/*
+ * Full scale Codes referred from Datasheet. Respective digital codes are
+ * produced when ADC inputs are at full scale.
+ */
+#define ADE9000_RMS_FULL_SCALE_CODES 52866837
+#define ADE9000_WATT_FULL_SCALE_CODES 20694066
+#define ADE9000_PCF_FULL_SCALE_CODES 74770000
+
+/* Phase and channel definitions */
+#define ADE9000_PHASE_A_NR 0
+#define ADE9000_PHASE_B_NR 1
+#define ADE9000_PHASE_C_NR 2
+
+#define ADE9000_SCAN_POS_IA BIT(0)
+#define ADE9000_SCAN_POS_VA BIT(1)
+#define ADE9000_SCAN_POS_IB BIT(2)
+#define ADE9000_SCAN_POS_VB BIT(3)
+#define ADE9000_SCAN_POS_IC BIT(4)
+#define ADE9000_SCAN_POS_VC BIT(5)
+
+/* Waveform buffer configuration values */
+enum ade9000_wfb_cfg {
+ ADE9000_WFB_CFG_ALL_CHAN = 0x0,
+ ADE9000_WFB_CFG_IA_VA = 0x1,
+ ADE9000_WFB_CFG_IB_VB = 0x2,
+ ADE9000_WFB_CFG_IC_VC = 0x3,
+ ADE9000_WFB_CFG_IA = 0x8,
+ ADE9000_WFB_CFG_VA = 0x9,
+ ADE9000_WFB_CFG_IB = 0xA,
+ ADE9000_WFB_CFG_VB = 0xB,
+ ADE9000_WFB_CFG_IC = 0xC,
+ ADE9000_WFB_CFG_VC = 0xD,
+};
+
+#define ADE9000_PHASE_B_POS_BIT BIT(5)
+#define ADE9000_PHASE_C_POS_BIT BIT(6)
+
+#define ADE9000_MAX_PHASE_NR 3
+#define AD9000_CHANNELS_PER_PHASE 10
+
+/*
+ * Calculate register address for multi-phase device.
+ * Phase A (chan 0): base address + 0x00
+ * Phase B (chan 1): base address + 0x20
+ * Phase C (chan 2): base address + 0x40
+ */
+#define ADE9000_ADDR_ADJUST(addr, chan) \
+ (((chan) == 0 ? 0 : (chan) == 1 ? 2 : 4) << 4 | (addr))
+
+struct ade9000_state {
+ struct completion reset_completion;
+ struct mutex lock; /* Protects SPI transactions */
+ u8 wf_src;
+ u32 wfb_trg;
+ u8 wfb_nr_activ_chan;
+ u32 wfb_nr_samples;
+ struct spi_device *spi;
+ struct clk *clkin;
+ struct spi_transfer xfer[2];
+ struct spi_message spi_msg;
+ struct regmap *regmap;
+ union{
+ u8 byte[ADE9000_WFB_FULL_BUFF_SIZE];
+ __be32 word[ADE9000_WFB_FULL_BUFF_NR_SAMPLES];
+ } rx_buff __aligned(IIO_DMA_MINALIGN);
+ u8 tx_buff[2] __aligned(IIO_DMA_MINALIGN);
+ unsigned int bulk_read_buf[2];
+};
+
+struct ade9000_irq1_event {
+ u32 bit_mask;
+ enum iio_chan_type chan_type;
+ u32 channel;
+ enum iio_event_type event_type;
+ enum iio_event_direction event_dir;
+};
+
+static const struct ade9000_irq1_event ade9000_irq1_events[] = {
+ { ADE9000_ST1_ZXVA_BIT, IIO_VOLTAGE, ADE9000_PHASE_A_NR, IIO_EV_TYPE_THRESH, IIO_EV_DIR_EITHER },
+ { ADE9000_ST1_ZXIA_BIT, IIO_CURRENT, ADE9000_PHASE_A_NR, IIO_EV_TYPE_THRESH, IIO_EV_DIR_EITHER },
+ { ADE9000_ST1_ZXVB_BIT, IIO_VOLTAGE, ADE9000_PHASE_B_NR, IIO_EV_TYPE_THRESH, IIO_EV_DIR_EITHER },
+ { ADE9000_ST1_ZXIB_BIT, IIO_CURRENT, ADE9000_PHASE_B_NR, IIO_EV_TYPE_THRESH, IIO_EV_DIR_EITHER },
+ { ADE9000_ST1_ZXVC_BIT, IIO_VOLTAGE, ADE9000_PHASE_C_NR, IIO_EV_TYPE_THRESH, IIO_EV_DIR_EITHER },
+ { ADE9000_ST1_ZXIC_BIT, IIO_CURRENT, ADE9000_PHASE_C_NR, IIO_EV_TYPE_THRESH, IIO_EV_DIR_EITHER },
+ { ADE9000_ST1_SWELLA_BIT, IIO_ALTVOLTAGE, ADE9000_PHASE_A_NR, IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING },
+ { ADE9000_ST1_SWELLB_BIT, IIO_ALTVOLTAGE, ADE9000_PHASE_B_NR, IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING },
+ { ADE9000_ST1_SWELLC_BIT, IIO_ALTVOLTAGE, ADE9000_PHASE_C_NR, IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING },
+ { ADE9000_ST1_DIPA_BIT, IIO_ALTVOLTAGE, ADE9000_PHASE_A_NR, IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING },
+ { ADE9000_ST1_DIPB_BIT, IIO_ALTVOLTAGE, ADE9000_PHASE_B_NR, IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING },
+ { ADE9000_ST1_DIPC_BIT, IIO_ALTVOLTAGE, ADE9000_PHASE_C_NR, IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING },
+};
+
+/* Voltage events (zero crossing on instantaneous voltage) */
+static const struct iio_event_spec ade9000_voltage_events[] = {
+ {
+ /* Zero crossing detection - datasheet: ZXV interrupts */
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
+/* Current events (zero crossing on instantaneous current) */
+static const struct iio_event_spec ade9000_current_events[] = {
+ {
+ /* Zero crossing detection - datasheet: ZXI interrupts */
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
+/* RMS voltage events (swell/sag detection on RMS values) */
+static const struct iio_event_spec ade9000_rms_voltage_events[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING, /* RMS swell detection */
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE) | BIT(IIO_EV_INFO_VALUE),
+ },
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING, /* RMS sag/dip detection */
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE) | BIT(IIO_EV_INFO_VALUE),
+ },
+};
+
+static const char * const ade9000_filter_type_items[] = {
+ "sinc4", "sinc4+lp",
+};
+
+static const int ade9000_filter_type_values[] = {
+ 0, 2,
+};
+
+static int ade9000_filter_type_get(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct ade9000_state *st = iio_priv(indio_dev);
+ u32 val;
+ int ret;
+ unsigned int i;
+
+ ret = regmap_read(st->regmap, ADE9000_REG_WFB_CFG, &val);
+ if (ret)
+ return ret;
+
+ val = FIELD_GET(ADE9000_WF_SRC_MASK, val);
+
+ for (i = 0; i < ARRAY_SIZE(ade9000_filter_type_values); i++) {
+ if (ade9000_filter_type_values[i] == val)
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+static int ade9000_filter_type_set(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ unsigned int index)
+{
+ struct ade9000_state *st = iio_priv(indio_dev);
+ int ret, val;
+
+ if (index >= ARRAY_SIZE(ade9000_filter_type_values))
+ return -EINVAL;
+
+ val = ade9000_filter_type_values[index];
+
+ /* Update the WFB_CFG register with the new filter type */
+ ret = regmap_update_bits(st->regmap, ADE9000_REG_WFB_CFG,
+ ADE9000_WF_SRC_MASK,
+ FIELD_PREP(ADE9000_WF_SRC_MASK, val));
+ if (ret)
+ return ret;
+
+ /* Update cached value */
+ st->wf_src = val;
+
+ return 0;
+}
+
+static const struct iio_enum ade9000_filter_type_enum = {
+ .items = ade9000_filter_type_items,
+ .num_items = ARRAY_SIZE(ade9000_filter_type_items),
+ .get = ade9000_filter_type_get,
+ .set = ade9000_filter_type_set,
+};
+
+static const struct iio_chan_spec_ext_info ade9000_ext_info[] = {
+ IIO_ENUM("filter_type", IIO_SHARED_BY_ALL, &ade9000_filter_type_enum),
+ IIO_ENUM_AVAILABLE("filter_type", IIO_SHARED_BY_ALL, &ade9000_filter_type_enum),
+ { }
+};
+
+#define ADE9000_CURRENT_CHANNEL(num) { \
+ .type = IIO_CURRENT, \
+ .channel = num, \
+ .address = ADE9000_ADDR_ADJUST(ADE9000_REG_AI_PCF, num), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_CALIBSCALE), \
+ .event_spec = ade9000_current_events, \
+ .num_event_specs = ARRAY_SIZE(ade9000_current_events), \
+ .scan_index = num, \
+ .indexed = 1, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 32, \
+ .storagebits = 32, \
+ .endianness = IIO_BE, \
+ }, \
+}
+
+#define ADE9000_VOLTAGE_CHANNEL(num) { \
+ .type = IIO_VOLTAGE, \
+ .channel = num, \
+ .address = ADE9000_ADDR_ADJUST(ADE9000_REG_AV_PCF, num), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_CALIBSCALE) | \
+ BIT(IIO_CHAN_INFO_FREQUENCY), \
+ .event_spec = ade9000_voltage_events, \
+ .num_event_specs = ARRAY_SIZE(ade9000_voltage_events), \
+ .scan_index = num + 1, /* interleave with current channels */ \
+ .indexed = 1, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 32, \
+ .storagebits = 32, \
+ .endianness = IIO_BE, \
+ }, \
+ .ext_info = ade9000_ext_info, \
+}
+
+#define ADE9000_ALTCURRENT_RMS_CHANNEL(num) { \
+ .type = IIO_ALTCURRENT, \
+ .channel = num, \
+ .address = ADE9000_ADDR_ADJUST(ADE9000_REG_AIRMS, num), \
+ .channel2 = IIO_MOD_RMS, \
+ .modified = 1, \
+ .indexed = 1, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_CALIBBIAS), \
+ .scan_index = -1 \
+}
+
+#define ADE9000_ALTVOLTAGE_RMS_CHANNEL(num) { \
+ .type = IIO_ALTVOLTAGE, \
+ .channel = num, \
+ .address = ADE9000_ADDR_ADJUST(ADE9000_REG_AVRMS, num), \
+ .channel2 = IIO_MOD_RMS, \
+ .modified = 1, \
+ .indexed = 1, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_CALIBBIAS), \
+ .event_spec = ade9000_rms_voltage_events, \
+ .num_event_specs = ARRAY_SIZE(ade9000_rms_voltage_events), \
+ .scan_index = -1 \
+}
+
+#define ADE9000_POWER_ACTIVE_CHANNEL(num) { \
+ .type = IIO_POWER, \
+ .channel = num, \
+ .address = ADE9000_ADDR_ADJUST(ADE9000_REG_AWATT, num), \
+ .channel2 = IIO_MOD_ACTIVE, \
+ .modified = 1, \
+ .indexed = 1, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_CALIBBIAS) | \
+ BIT(IIO_CHAN_INFO_CALIBSCALE), \
+ .scan_index = -1 \
+}
+
+#define ADE9000_POWER_REACTIVE_CHANNEL(num) { \
+ .type = IIO_POWER, \
+ .channel = num, \
+ .address = ADE9000_ADDR_ADJUST(ADE9000_REG_AVAR, num), \
+ .channel2 = IIO_MOD_REACTIVE, \
+ .modified = 1, \
+ .indexed = 1, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_CALIBBIAS), \
+ .scan_index = -1 \
+}
+
+#define ADE9000_POWER_APPARENT_CHANNEL(num) { \
+ .type = IIO_POWER, \
+ .channel = num, \
+ .address = ADE9000_ADDR_ADJUST(ADE9000_REG_AVA, num), \
+ .channel2 = IIO_MOD_APPARENT, \
+ .modified = 1, \
+ .indexed = 1, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .scan_index = -1 \
+}
+
+ #define ADE9000_ENERGY_ACTIVE_CHANNEL(num, addr) { \
+ .type = IIO_ENERGY, \
+ .channel = num, \
+ .address = addr, \
+ .channel2 = IIO_MOD_ACTIVE, \
+ .modified = 1, \
+ .indexed = 1, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .scan_index = -1 \
+}
+
+#define ADE9000_ENERGY_APPARENT_CHANNEL(num, addr) { \
+ .type = IIO_ENERGY, \
+ .channel = num, \
+ .address = addr, \
+ .channel2 = IIO_MOD_APPARENT, \
+ .modified = 1, \
+ .indexed = 1, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .scan_index = -1 \
+}
+
+#define ADE9000_ENERGY_REACTIVE_CHANNEL(num, addr) { \
+ .type = IIO_ENERGY, \
+ .channel = num, \
+ .address = addr, \
+ .channel2 = IIO_MOD_REACTIVE, \
+ .modified = 1, \
+ .indexed = 1, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .scan_index = -1 \
+}
+
+#define ADE9000_POWER_FACTOR_CHANNEL(num) { \
+ .type = IIO_POWER, \
+ .channel = num, \
+ .address = ADE9000_ADDR_ADJUST(ADE9000_REG_APF, num), \
+ .indexed = 1, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_POWERFACTOR), \
+ .scan_index = -1 \
+}
+
+static const struct iio_chan_spec ade9000_channels[] = {
+ /* Phase A channels */
+ ADE9000_CURRENT_CHANNEL(ADE9000_PHASE_A_NR),
+ ADE9000_VOLTAGE_CHANNEL(ADE9000_PHASE_A_NR),
+ ADE9000_ALTCURRENT_RMS_CHANNEL(ADE9000_PHASE_A_NR),
+ ADE9000_ALTVOLTAGE_RMS_CHANNEL(ADE9000_PHASE_A_NR),
+ ADE9000_POWER_ACTIVE_CHANNEL(ADE9000_PHASE_A_NR),
+ ADE9000_POWER_REACTIVE_CHANNEL(ADE9000_PHASE_A_NR),
+ ADE9000_POWER_APPARENT_CHANNEL(ADE9000_PHASE_A_NR),
+ ADE9000_ENERGY_ACTIVE_CHANNEL(ADE9000_PHASE_A_NR, ADE9000_REG_AWATTHR_LO),
+ ADE9000_ENERGY_APPARENT_CHANNEL(ADE9000_PHASE_A_NR, ADE9000_REG_AVAHR_LO),
+ ADE9000_ENERGY_REACTIVE_CHANNEL(ADE9000_PHASE_A_NR, ADE9000_REG_AFVARHR_LO),
+ ADE9000_POWER_FACTOR_CHANNEL(ADE9000_PHASE_A_NR),
+ /* Phase B channels */
+ ADE9000_CURRENT_CHANNEL(ADE9000_PHASE_B_NR),
+ ADE9000_VOLTAGE_CHANNEL(ADE9000_PHASE_B_NR),
+ ADE9000_ALTCURRENT_RMS_CHANNEL(ADE9000_PHASE_B_NR),
+ ADE9000_ALTVOLTAGE_RMS_CHANNEL(ADE9000_PHASE_B_NR),
+ ADE9000_POWER_ACTIVE_CHANNEL(ADE9000_PHASE_B_NR),
+ ADE9000_POWER_REACTIVE_CHANNEL(ADE9000_PHASE_B_NR),
+ ADE9000_POWER_APPARENT_CHANNEL(ADE9000_PHASE_B_NR),
+ ADE9000_ENERGY_ACTIVE_CHANNEL(ADE9000_PHASE_B_NR, ADE9000_REG_BWATTHR_LO),
+ ADE9000_ENERGY_APPARENT_CHANNEL(ADE9000_PHASE_B_NR, ADE9000_REG_BVAHR_LO),
+ ADE9000_ENERGY_REACTIVE_CHANNEL(ADE9000_PHASE_B_NR, ADE9000_REG_BFVARHR_LO),
+ ADE9000_POWER_FACTOR_CHANNEL(ADE9000_PHASE_B_NR),
+ /* Phase C channels */
+ ADE9000_CURRENT_CHANNEL(ADE9000_PHASE_C_NR),
+ ADE9000_VOLTAGE_CHANNEL(ADE9000_PHASE_C_NR),
+ ADE9000_ALTCURRENT_RMS_CHANNEL(ADE9000_PHASE_C_NR),
+ ADE9000_ALTVOLTAGE_RMS_CHANNEL(ADE9000_PHASE_C_NR),
+ ADE9000_POWER_ACTIVE_CHANNEL(ADE9000_PHASE_C_NR),
+ ADE9000_POWER_REACTIVE_CHANNEL(ADE9000_PHASE_C_NR),
+ ADE9000_POWER_APPARENT_CHANNEL(ADE9000_PHASE_C_NR),
+ ADE9000_ENERGY_ACTIVE_CHANNEL(ADE9000_PHASE_C_NR, ADE9000_REG_CWATTHR_LO),
+ ADE9000_ENERGY_APPARENT_CHANNEL(ADE9000_PHASE_C_NR, ADE9000_REG_CVAHR_LO),
+ ADE9000_ENERGY_REACTIVE_CHANNEL(ADE9000_PHASE_C_NR, ADE9000_REG_CFVARHR_LO),
+ ADE9000_POWER_FACTOR_CHANNEL(ADE9000_PHASE_C_NR),
+};
+
+static const struct reg_sequence ade9000_initialization_sequence[] = {
+ { ADE9000_REG_PGA_GAIN, ADE9000_PGA_GAIN },
+ { ADE9000_REG_CONFIG0, ADE9000_CONFIG0 },
+ { ADE9000_REG_CONFIG1, ADE9000_CONFIG1 },
+ { ADE9000_REG_CONFIG2, ADE9000_CONFIG2 },
+ { ADE9000_REG_CONFIG3, ADE9000_CONFIG3 },
+ { ADE9000_REG_ACCMODE, ADE9000_ACCMODE },
+ { ADE9000_REG_ZX_LP_SEL, ADE9000_ZX_LP_SEL },
+ { ADE9000_REG_MASK0, ADE9000_MASK0_ALL_INT_DIS },
+ { ADE9000_REG_MASK1, ADE9000_MASK1_ALL_INT_DIS },
+ { ADE9000_REG_EVENT_MASK, ADE9000_EVENT_DISABLE },
+ { ADE9000_REG_WFB_CFG, ADE9000_WFB_CFG },
+ { ADE9000_REG_VLEVEL, ADE9000_VLEVEL },
+ { ADE9000_REG_DICOEFF, ADE9000_DICOEFF },
+ { ADE9000_REG_EGY_TIME, ADE9000_EGY_TIME },
+ { ADE9000_REG_EP_CFG, ADE9000_EP_CFG },
+ /* Clear all pending status bits by writing 1s */
+ { ADE9000_REG_STATUS0, GENMASK(31, 0) },
+ { ADE9000_REG_STATUS1, GENMASK(31, 0) },
+ { ADE9000_REG_RUN, ADE9000_RUN_ON }
+};
+
+static int ade9000_spi_write_reg(void *context, unsigned int reg,
+ unsigned int val)
+{
+ struct ade9000_state *st = context;
+ u8 tx_buf[6];
+ u16 addr;
+ int ret, len;
+
+ guard(mutex)(&st->lock);
+
+ addr = FIELD_PREP(ADE9000_REG_ADDR_MASK, reg);
+ put_unaligned_be16(addr, tx_buf);
+
+ if (reg > ADE9000_REG_RUN && reg < ADE9000_REG_VERSION) {
+ put_unaligned_be16(val, &tx_buf[2]);
+ len = 4;
+ } else {
+ put_unaligned_be32(val, &tx_buf[2]);
+ len = 6;
+ }
+
+ ret = spi_write_then_read(st->spi, tx_buf, len, NULL, 0);
+ if (ret)
+ dev_err(&st->spi->dev, "problem when writing register 0x%x\n", reg);
+
+ return ret;
+}
+
+static int ade9000_spi_read_reg(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ struct ade9000_state *st = context;
+ u8 tx_buf[2];
+ u8 rx_buf[4];
+ u16 addr;
+ int ret, rx_len;
+
+ guard(mutex)(&st->lock);
+
+ addr = FIELD_PREP(ADE9000_REG_ADDR_MASK, reg) |
+ ADE9000_REG_READ_BIT_MASK;
+
+ put_unaligned_be16(addr, tx_buf);
+
+ /* Skip CRC bytes - only read actual data */
+ if (reg > ADE9000_REG_RUN && reg < ADE9000_REG_VERSION)
+ rx_len = 2;
+ else
+ rx_len = 4;
+
+ ret = spi_write_then_read(st->spi, tx_buf, 2, rx_buf, rx_len);
+ if (ret) {
+ dev_err(&st->spi->dev, "error reading register 0x%x\n", reg);
+ return ret;
+ }
+
+ if (reg > ADE9000_REG_RUN && reg < ADE9000_REG_VERSION)
+ *val = get_unaligned_be16(rx_buf);
+ else
+ *val = get_unaligned_be32(rx_buf);
+
+ return 0;
+}
+
+static bool ade9000_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ /* Interrupt/error status registers - volatile */
+ case ADE9000_REG_STATUS0:
+ case ADE9000_REG_STATUS1:
+ return true;
+ default:
+ /* All other registers are non-volatile */
+ return false;
+ }
+}
+
+static void ade9000_configure_scan(struct iio_dev *indio_dev, u32 wfb_addr)
+{
+ struct ade9000_state *st = iio_priv(indio_dev);
+ u16 addr;
+
+ addr = FIELD_PREP(ADE9000_REG_ADDR_MASK, wfb_addr) |
+ ADE9000_REG_READ_BIT_MASK;
+
+ put_unaligned_be16(addr, st->tx_buff);
+
+ st->xfer[0].tx_buf = &st->tx_buff[0];
+ st->xfer[0].len = 2;
+
+ st->xfer[1].rx_buf = st->rx_buff.byte;
+
+ /* Always use streaming mode */
+ st->xfer[1].len = (st->wfb_nr_samples / 2) * 4;
+
+ spi_message_init_with_transfers(&st->spi_msg, st->xfer, ARRAY_SIZE(st->xfer));
+}
+
+static int ade9000_iio_push_streaming(struct iio_dev *indio_dev)
+{
+ struct ade9000_state *st = iio_priv(indio_dev);
+ struct device *dev = &st->spi->dev;
+ u32 current_page, i;
+ int ret;
+
+ guard(mutex)(&st->lock);
+
+ ret = spi_sync(st->spi, &st->spi_msg);
+ if (ret) {
+ dev_err_ratelimited(dev, "SPI fail in trigger handler\n");
+ return ret;
+ }
+
+ /* In streaming mode, only half the buffer is filled per interrupt */
+ for (i = 0; i < st->wfb_nr_samples / 2; i += st->wfb_nr_activ_chan)
+ iio_push_to_buffers(indio_dev, &st->rx_buff.word[i]);
+
+ ret = regmap_read(st->regmap, ADE9000_REG_WFB_PG_IRQEN, &current_page);
+ if (ret) {
+ dev_err_ratelimited(dev, "IRQ0 WFB read fail\n");
+ return ret;
+ }
+
+ if (current_page & ADE9000_MIDDLE_PAGE_BIT) {
+ ret = regmap_write(st->regmap, ADE9000_REG_WFB_PG_IRQEN,
+ ADE9000_LAST_PAGE_BIT);
+ if (ret) {
+ dev_err_ratelimited(dev, "IRQ0 WFB write fail\n");
+ return ret;
+ }
+
+ ade9000_configure_scan(indio_dev,
+ ADE9000_REG_WF_HALF_BUFF);
+ } else {
+ ret = regmap_write(st->regmap, ADE9000_REG_WFB_PG_IRQEN,
+ ADE9000_MIDDLE_PAGE_BIT);
+ if (ret) {
+ dev_err_ratelimited(dev, "IRQ0 WFB write fail");
+ return IRQ_HANDLED;
+ }
+
+ ade9000_configure_scan(indio_dev, ADE9000_REG_WF_BUFF);
+ }
+
+ return 0;
+}
+
+static int ade9000_iio_push_buffer(struct iio_dev *indio_dev)
+{
+ struct ade9000_state *st = iio_priv(indio_dev);
+ int ret;
+ u32 i;
+
+ guard(mutex)(&st->lock);
+
+ ret = spi_sync(st->spi, &st->spi_msg);
+ if (ret) {
+ dev_err_ratelimited(&st->spi->dev,
+ "SPI fail in trigger handler\n");
+ return ret;
+ }
+
+ for (i = 0; i < st->wfb_nr_samples; i += st->wfb_nr_activ_chan)
+ iio_push_to_buffers(indio_dev, &st->rx_buff.word[i]);
+
+ return 0;
+}
+
+static irqreturn_t ade9000_irq0_thread(int irq, void *data)
+{
+ struct iio_dev *indio_dev = data;
+ struct ade9000_state *st = iio_priv(indio_dev);
+ struct device *dev = &st->spi->dev;
+ u32 handled_irq = 0;
+ u32 interrupts, status;
+ int ret;
+
+ ret = regmap_read(st->regmap, ADE9000_REG_STATUS0, &status);
+ if (ret) {
+ dev_err_ratelimited(dev, "IRQ0 read status fail\n");
+ return IRQ_HANDLED;
+ }
+
+ ret = regmap_read(st->regmap, ADE9000_REG_MASK0, &interrupts);
+ if (ret) {
+ dev_err_ratelimited(dev, "IRQ0 read mask fail\n");
+ return IRQ_HANDLED;
+ }
+
+ if ((status & ADE9000_ST0_PAGE_FULL_BIT) &&
+ (interrupts & ADE9000_ST0_PAGE_FULL_BIT)) {
+ /* Always use streaming mode */
+ ret = ade9000_iio_push_streaming(indio_dev);
+ if (ret) {
+ dev_err_ratelimited(dev, "IRQ0 IIO push fail\n");
+ return IRQ_HANDLED;
+ }
+
+ handled_irq |= ADE9000_ST0_PAGE_FULL_BIT;
+ }
+
+ if ((status & ADE9000_ST0_WFB_TRIG_BIT) &&
+ (interrupts & ADE9000_ST0_WFB_TRIG_BIT)) {
+ ret = regmap_update_bits(st->regmap, ADE9000_REG_WFB_CFG,
+ ADE9000_WF_CAP_EN_MASK, 0);
+ if (ret) {
+ dev_err_ratelimited(dev, "IRQ0 WFB fail\n");
+ return IRQ_HANDLED;
+ }
+
+ if (iio_buffer_enabled(indio_dev)) {
+ ret = ade9000_iio_push_buffer(indio_dev);
+ if (ret) {
+ dev_err_ratelimited(dev,
+ "IRQ0 IIO push fail @ WFB TRIG\n");
+ return IRQ_HANDLED;
+ }
+ }
+
+ handled_irq |= ADE9000_ST0_WFB_TRIG_BIT;
+ }
+
+ ret = regmap_write(st->regmap, ADE9000_REG_STATUS0, handled_irq);
+ if (ret)
+ dev_err_ratelimited(dev, "IRQ0 write status fail\n");
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ade9000_irq1_thread(int irq, void *data)
+{
+ struct iio_dev *indio_dev = data;
+ struct ade9000_state *st = iio_priv(indio_dev);
+ unsigned int bit = ADE9000_ST1_CROSSING_FIRST;
+ s64 timestamp = iio_get_time_ns(indio_dev);
+ u32 handled_irq = 0;
+ u32 interrupts, result, status, tmp;
+ DECLARE_BITMAP(interrupt_bits, ADE9000_ST1_CROSSING_DEPTH);
+ const struct ade9000_irq1_event *event;
+ int ret, i;
+
+ if (!completion_done(&st->reset_completion)) {
+ ret = regmap_read(st->regmap, ADE9000_REG_STATUS1, &result);
+ if (ret) {
+ dev_err_ratelimited(&st->spi->dev, "IRQ1 read status fail\n");
+ return IRQ_HANDLED;
+ }
+
+ if (result & ADE9000_ST1_RSTDONE_BIT) {
+ complete(&st->reset_completion);
+ /* Clear the reset done status bit */
+ ret = regmap_write(st->regmap, ADE9000_REG_STATUS1, ADE9000_ST1_RSTDONE_BIT);
+ if (ret)
+ dev_err_ratelimited(&st->spi->dev,
+ "IRQ1 clear reset status fail\n");
+ } else {
+ dev_err_ratelimited(&st->spi->dev,
+ "Error testing reset done\n");
+ }
+
+ return IRQ_HANDLED;
+ }
+
+ ret = regmap_read(st->regmap, ADE9000_REG_STATUS1, &status);
+ if (ret) {
+ dev_err_ratelimited(&st->spi->dev, "IRQ1 read status fail\n");
+ return IRQ_HANDLED;
+ }
+
+ ret = regmap_read(st->regmap, ADE9000_REG_MASK1, &interrupts);
+ if (ret) {
+ dev_err_ratelimited(&st->spi->dev, "IRQ1 read mask fail\n");
+ return IRQ_HANDLED;
+ }
+
+ bitmap_from_arr32(interrupt_bits, &interrupts, ADE9000_ST1_CROSSING_DEPTH);
+ for_each_set_bit_from(bit, interrupt_bits,
+ ADE9000_ST1_CROSSING_DEPTH) {
+ tmp = status & BIT(bit);
+ if (!tmp)
+ continue;
+
+ event = NULL;
+
+ /* Find corresponding event in lookup table */
+ for (i = 0; i < ARRAY_SIZE(ade9000_irq1_events); i++) {
+ if (ade9000_irq1_events[i].bit_mask == tmp) {
+ event = &ade9000_irq1_events[i];
+ break;
+ }
+ }
+
+ if (event) {
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(event->chan_type,
+ event->channel,
+ event->event_type,
+ event->event_dir),
+ timestamp);
+ }
+ handled_irq |= tmp;
+ }
+
+ ret = regmap_write(st->regmap, ADE9000_REG_STATUS1, handled_irq);
+ if (ret)
+ dev_err_ratelimited(&st->spi->dev, "IRQ1 write status fail\n");
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ade9000_dready_thread(int irq, void *data)
+{
+ struct iio_dev *indio_dev = data;
+
+ /* Handle data ready interrupt from C4/EVENT/DREADY pin */
+ if (!iio_device_claim_buffer_mode(indio_dev)) {
+ ade9000_iio_push_buffer(indio_dev);
+ iio_device_release_buffer_mode(indio_dev);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int ade9000_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long mask)
+{
+ struct ade9000_state *st = iio_priv(indio_dev);
+ unsigned int measured;
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_FREQUENCY:
+ if (chan->type == IIO_VOLTAGE) {
+ int period_reg;
+ int period;
+
+ switch (chan->channel) {
+ case ADE9000_PHASE_A_NR:
+ period_reg = ADE9000_REG_APERIOD;
+ break;
+ case ADE9000_PHASE_B_NR:
+ period_reg = ADE9000_REG_BPERIOD;
+ break;
+ case ADE9000_PHASE_C_NR:
+ period_reg = ADE9000_REG_CPERIOD;
+ break;
+ default:
+ return -EINVAL;
+ }
+ ret = regmap_read(st->regmap, period_reg, &period);
+ if (ret)
+ return ret;
+ /*
+ * Frequency = (4MHz * 65536) / (PERIOD + 1)
+ * 4MHz = ADC sample rate, 65536 = 2^16 period register scaling
+ * See ADE9000 datasheet section on period measurement
+ */
+ *val = 4000 * 65536;
+ *val2 = period + 1;
+ return IIO_VAL_FRACTIONAL;
+ }
+
+ return -EINVAL;
+ case IIO_CHAN_INFO_RAW:
+ if (chan->type == IIO_ENERGY) {
+ u16 lo_reg = chan->address;
+
+ ret = regmap_bulk_read(st->regmap, lo_reg,
+ st->bulk_read_buf, 2);
+ if (ret)
+ return ret;
+
+ *val = st->bulk_read_buf[0]; /* Lower 32 bits */
+ *val2 = st->bulk_read_buf[1]; /* Upper 32 bits */
+ return IIO_VAL_INT_64;
+ }
+
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = regmap_read(st->regmap, chan->address, &measured);
+ iio_device_release_direct(indio_dev);
+ if (ret)
+ return ret;
+
+ *val = measured;
+
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_POWERFACTOR:
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = regmap_read(st->regmap, chan->address, &measured);
+ iio_device_release_direct(indio_dev);
+ if (ret)
+ return ret;
+
+ *val = measured;
+
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_CURRENT:
+ case IIO_VOLTAGE:
+ case IIO_ALTVOLTAGE:
+ case IIO_ALTCURRENT:
+ switch (chan->address) {
+ case ADE9000_REG_AI_PCF:
+ case ADE9000_REG_AV_PCF:
+ case ADE9000_REG_BI_PCF:
+ case ADE9000_REG_BV_PCF:
+ case ADE9000_REG_CI_PCF:
+ case ADE9000_REG_CV_PCF:
+ *val = 1;
+ *val2 = ADE9000_PCF_FULL_SCALE_CODES;
+ return IIO_VAL_FRACTIONAL;
+ case ADE9000_REG_AIRMS:
+ case ADE9000_REG_AVRMS:
+ case ADE9000_REG_BIRMS:
+ case ADE9000_REG_BVRMS:
+ case ADE9000_REG_CIRMS:
+ case ADE9000_REG_CVRMS:
+ *val = 1;
+ *val2 = ADE9000_RMS_FULL_SCALE_CODES;
+ return IIO_VAL_FRACTIONAL;
+ default:
+ return -EINVAL;
+ }
+ case IIO_POWER:
+ *val = 1;
+ *val2 = ADE9000_WATT_FULL_SCALE_CODES;
+ return IIO_VAL_FRACTIONAL;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ade9000_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ struct ade9000_state *st = iio_priv(indio_dev);
+ u32 tmp;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_CALIBBIAS:
+ switch (chan->type) {
+ case IIO_CURRENT:
+ return regmap_write(st->regmap,
+ ADE9000_ADDR_ADJUST(ADE9000_REG_AIRMSOS,
+ chan->channel), val);
+ case IIO_VOLTAGE:
+ case IIO_ALTVOLTAGE:
+ return regmap_write(st->regmap,
+ ADE9000_ADDR_ADJUST(ADE9000_REG_AVRMSOS,
+ chan->channel), val);
+ case IIO_POWER:
+ tmp = chan->address;
+ tmp &= ~ADE9000_PHASE_B_POS_BIT;
+ tmp &= ~ADE9000_PHASE_C_POS_BIT;
+
+ switch (tmp) {
+ case ADE9000_REG_AWATTOS:
+ return regmap_write(st->regmap,
+ ADE9000_ADDR_ADJUST(ADE9000_REG_AWATTOS,
+ chan->channel), val);
+ case ADE9000_REG_AVAR:
+ return regmap_write(st->regmap,
+ ADE9000_ADDR_ADJUST(ADE9000_REG_AVAROS,
+ chan->channel), val);
+ case ADE9000_REG_AFVAR:
+ return regmap_write(st->regmap,
+ ADE9000_ADDR_ADJUST(ADE9000_REG_AFVAROS,
+ chan->channel), val);
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_CALIBSCALE:
+ /*
+ * Calibration gain registers for fine-tuning measurements.
+ * These are separate from PGA gain and applied in the digital domain.
+ */
+ switch (chan->type) {
+ case IIO_CURRENT:
+ return regmap_write(st->regmap,
+ ADE9000_ADDR_ADJUST(ADE9000_REG_AIGAIN,
+ chan->channel), val);
+ case IIO_VOLTAGE:
+ return regmap_write(st->regmap,
+ ADE9000_ADDR_ADJUST(ADE9000_REG_AVGAIN,
+ chan->channel), val);
+ case IIO_POWER:
+ return regmap_write(st->regmap,
+ ADE9000_ADDR_ADJUST(ADE9000_REG_APGAIN,
+ chan->channel), val);
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_SCALE:
+ /* Per-channel scales are read-only */
+ return -EINVAL;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ade9000_reg_access(struct iio_dev *indio_dev,
+ unsigned int reg,
+ unsigned int tx_val,
+ unsigned int *rx_val)
+{
+ struct ade9000_state *st = iio_priv(indio_dev);
+
+ if (rx_val)
+ return regmap_read(st->regmap, reg, rx_val);
+
+ return regmap_write(st->regmap, reg, tx_val);
+}
+
+static int ade9000_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct ade9000_state *st = iio_priv(indio_dev);
+ u32 interrupts1;
+ int ret;
+
+ /* All events use MASK1 register */
+ ret = regmap_read(st->regmap, ADE9000_REG_MASK1, &interrupts1);
+ if (ret)
+ return ret;
+
+ switch (chan->channel) {
+ case ADE9000_PHASE_A_NR:
+ if (chan->type == IIO_VOLTAGE && dir == IIO_EV_DIR_EITHER)
+ return !!(interrupts1 & ADE9000_ST1_ZXVA_BIT);
+ else if (chan->type == IIO_CURRENT && dir == IIO_EV_DIR_EITHER)
+ return !!(interrupts1 & ADE9000_ST1_ZXIA_BIT);
+ else if (chan->type == IIO_ALTVOLTAGE && dir == IIO_EV_DIR_RISING)
+ return !!(interrupts1 & ADE9000_ST1_SWELLA_BIT);
+ else if (chan->type == IIO_ALTVOLTAGE && dir == IIO_EV_DIR_FALLING)
+ return !!(interrupts1 & ADE9000_ST1_DIPA_BIT);
+ dev_err_ratelimited(&indio_dev->dev,
+ "Invalid channel type %d or direction %d for phase A\n", chan->type, dir);
+ return -EINVAL;
+ case ADE9000_PHASE_B_NR:
+ if (chan->type == IIO_VOLTAGE && dir == IIO_EV_DIR_EITHER)
+ return !!(interrupts1 & ADE9000_ST1_ZXVB_BIT);
+ else if (chan->type == IIO_CURRENT && dir == IIO_EV_DIR_EITHER)
+ return !!(interrupts1 & ADE9000_ST1_ZXIB_BIT);
+ else if (chan->type == IIO_ALTVOLTAGE && dir == IIO_EV_DIR_RISING)
+ return !!(interrupts1 & ADE9000_ST1_SWELLB_BIT);
+ else if (chan->type == IIO_ALTVOLTAGE && dir == IIO_EV_DIR_FALLING)
+ return !!(interrupts1 & ADE9000_ST1_DIPB_BIT);
+ dev_err_ratelimited(&indio_dev->dev,
+ "Invalid channel type %d or direction %d for phase B\n", chan->type, dir);
+ return -EINVAL;
+ case ADE9000_PHASE_C_NR:
+ if (chan->type == IIO_VOLTAGE && dir == IIO_EV_DIR_EITHER)
+ return !!(interrupts1 & ADE9000_ST1_ZXVC_BIT);
+ else if (chan->type == IIO_CURRENT && dir == IIO_EV_DIR_EITHER)
+ return !!(interrupts1 & ADE9000_ST1_ZXIC_BIT);
+ else if (chan->type == IIO_ALTVOLTAGE && dir == IIO_EV_DIR_RISING)
+ return !!(interrupts1 & ADE9000_ST1_SWELLC_BIT);
+ else if (chan->type == IIO_ALTVOLTAGE && dir == IIO_EV_DIR_FALLING)
+ return !!(interrupts1 & ADE9000_ST1_DIPC_BIT);
+ dev_err_ratelimited(&indio_dev->dev,
+ "Invalid channel type %d or direction %d for phase C\n", chan->type, dir);
+ return -EINVAL;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ade9000_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ bool state)
+{
+ struct ade9000_state *st = iio_priv(indio_dev);
+ u32 bit_mask;
+ int ret;
+
+ /* Clear all pending events in STATUS1 register (write 1 to clear) */
+ ret = regmap_write(st->regmap, ADE9000_REG_STATUS1, GENMASK(31, 0));
+ if (ret)
+ return ret;
+
+ /* Determine which interrupt bit to enable/disable */
+ switch (chan->channel) {
+ case ADE9000_PHASE_A_NR:
+ if (chan->type == IIO_VOLTAGE && dir == IIO_EV_DIR_EITHER) {
+ bit_mask = ADE9000_ST1_ZXVA_BIT;
+ if (state)
+ st->wfb_trg |= ADE9000_WFB_TRG_ZXVA_BIT;
+ else
+ st->wfb_trg &= ~ADE9000_WFB_TRG_ZXVA_BIT;
+ } else if (chan->type == IIO_CURRENT && dir == IIO_EV_DIR_EITHER) {
+ bit_mask = ADE9000_ST1_ZXIA_BIT;
+ if (state)
+ st->wfb_trg |= ADE9000_WFB_TRG_ZXIA_BIT;
+ else
+ st->wfb_trg &= ~ADE9000_WFB_TRG_ZXIA_BIT;
+ } else if (chan->type == IIO_ALTVOLTAGE && dir == IIO_EV_DIR_RISING) {
+ bit_mask = ADE9000_ST1_SWELLA_BIT;
+ if (state)
+ st->wfb_trg |= ADE9000_WFB_TRG_SWELL_BIT;
+ else
+ st->wfb_trg &= ~ADE9000_WFB_TRG_SWELL_BIT;
+ } else if (chan->type == IIO_ALTVOLTAGE && dir == IIO_EV_DIR_FALLING) {
+ bit_mask = ADE9000_ST1_DIPA_BIT;
+ if (state)
+ st->wfb_trg |= ADE9000_WFB_TRG_DIP_BIT;
+ else
+ st->wfb_trg &= ~ADE9000_WFB_TRG_DIP_BIT;
+ } else {
+ dev_err_ratelimited(&indio_dev->dev, "Invalid channel type %d or direction %d for phase A\n",
+ chan->type, dir);
+ return -EINVAL;
+ }
+ break;
+ case ADE9000_PHASE_B_NR:
+ if (chan->type == IIO_VOLTAGE && dir == IIO_EV_DIR_EITHER) {
+ bit_mask = ADE9000_ST1_ZXVB_BIT;
+ if (state)
+ st->wfb_trg |= ADE9000_WFB_TRG_ZXVB_BIT;
+ else
+ st->wfb_trg &= ~ADE9000_WFB_TRG_ZXVB_BIT;
+ } else if (chan->type == IIO_CURRENT && dir == IIO_EV_DIR_EITHER) {
+ bit_mask = ADE9000_ST1_ZXIB_BIT;
+ if (state)
+ st->wfb_trg |= ADE9000_WFB_TRG_ZXIB_BIT;
+ else
+ st->wfb_trg &= ~ADE9000_WFB_TRG_ZXIB_BIT;
+ } else if (chan->type == IIO_ALTVOLTAGE && dir == IIO_EV_DIR_RISING) {
+ bit_mask = ADE9000_ST1_SWELLB_BIT;
+ if (state)
+ st->wfb_trg |= ADE9000_WFB_TRG_SWELL_BIT;
+ else
+ st->wfb_trg &= ~ADE9000_WFB_TRG_SWELL_BIT;
+ } else if (chan->type == IIO_ALTVOLTAGE && dir == IIO_EV_DIR_FALLING) {
+ bit_mask = ADE9000_ST1_DIPB_BIT;
+ if (state)
+ st->wfb_trg |= ADE9000_WFB_TRG_DIP_BIT;
+ else
+ st->wfb_trg &= ~ADE9000_WFB_TRG_DIP_BIT;
+ } else {
+ dev_err_ratelimited(&indio_dev->dev,
+ "Invalid channel type %d or direction %d for phase B\n",
+ chan->type, dir);
+ return -EINVAL;
+ }
+ break;
+ case ADE9000_PHASE_C_NR:
+ if (chan->type == IIO_VOLTAGE && dir == IIO_EV_DIR_EITHER) {
+ bit_mask = ADE9000_ST1_ZXVC_BIT;
+ if (state)
+ st->wfb_trg |= ADE9000_WFB_TRG_ZXVC_BIT;
+ else
+ st->wfb_trg &= ~ADE9000_WFB_TRG_ZXVC_BIT;
+ } else if (chan->type == IIO_CURRENT && dir == IIO_EV_DIR_EITHER) {
+ bit_mask = ADE9000_ST1_ZXIC_BIT;
+ if (state)
+ st->wfb_trg |= ADE9000_WFB_TRG_ZXIC_BIT;
+ else
+ st->wfb_trg &= ~ADE9000_WFB_TRG_ZXIC_BIT;
+ } else if (chan->type == IIO_ALTVOLTAGE && dir == IIO_EV_DIR_RISING) {
+ bit_mask = ADE9000_ST1_SWELLC_BIT;
+ if (state)
+ st->wfb_trg |= ADE9000_WFB_TRG_SWELL_BIT;
+ else
+ st->wfb_trg &= ~ADE9000_WFB_TRG_SWELL_BIT;
+ } else if (chan->type == IIO_ALTVOLTAGE && dir == IIO_EV_DIR_FALLING) {
+ bit_mask = ADE9000_ST1_DIPC_BIT;
+ if (state)
+ st->wfb_trg |= ADE9000_WFB_TRG_DIP_BIT;
+ else
+ st->wfb_trg &= ~ADE9000_WFB_TRG_DIP_BIT;
+ } else {
+ dev_err_ratelimited(&indio_dev->dev,
+ "Invalid channel type %d or direction %d for phase C\n",
+ chan->type, dir);
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Set bits if enabling event, clear bits if disabling */
+ return regmap_assign_bits(st->regmap, ADE9000_REG_MASK1, bit_mask, state ? bit_mask : 0);
+}
+
+static int ade9000_write_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int val, int val2)
+{
+ struct ade9000_state *st = iio_priv(indio_dev);
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ switch (dir) {
+ case IIO_EV_DIR_FALLING:
+ return regmap_write(st->regmap, ADE9000_REG_DIP_LVL, val);
+ case IIO_EV_DIR_RISING:
+ return regmap_write(st->regmap, ADE9000_REG_SWELL_LVL, val);
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ade9000_read_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int *val, int *val2)
+{
+ struct ade9000_state *st = iio_priv(indio_dev);
+ unsigned int data;
+ int ret;
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ switch (dir) {
+ case IIO_EV_DIR_FALLING:
+ ret = regmap_read(st->regmap, ADE9000_REG_DIP_LVL, &data);
+ if (ret)
+ return ret;
+ *val = data;
+ return IIO_VAL_INT;
+ case IIO_EV_DIR_RISING:
+ ret = regmap_read(st->regmap, ADE9000_REG_SWELL_LVL, &data);
+ if (ret)
+ return ret;
+ *val = data;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ade9000_waveform_buffer_config(struct iio_dev *indio_dev)
+{
+ struct ade9000_state *st = iio_priv(indio_dev);
+ u32 wfb_cfg_val;
+ u32 active_scans;
+
+ bitmap_to_arr32(&active_scans, indio_dev->active_scan_mask,
+ iio_get_masklength(indio_dev));
+
+ switch (active_scans) {
+ case ADE9000_SCAN_POS_IA | ADE9000_SCAN_POS_VA:
+ wfb_cfg_val = ADE9000_WFB_CFG_IA_VA;
+ st->wfb_nr_activ_chan = 2;
+ break;
+ case ADE9000_SCAN_POS_IB | ADE9000_SCAN_POS_VB:
+ wfb_cfg_val = ADE9000_WFB_CFG_IB_VB;
+ st->wfb_nr_activ_chan = 2;
+ break;
+ case ADE9000_SCAN_POS_IC | ADE9000_SCAN_POS_VC:
+ wfb_cfg_val = ADE9000_WFB_CFG_IC_VC;
+ st->wfb_nr_activ_chan = 2;
+ break;
+ case ADE9000_SCAN_POS_IA:
+ wfb_cfg_val = ADE9000_WFB_CFG_IA;
+ st->wfb_nr_activ_chan = 1;
+ break;
+ case ADE9000_SCAN_POS_VA:
+ wfb_cfg_val = ADE9000_WFB_CFG_VA;
+ st->wfb_nr_activ_chan = 1;
+ break;
+ case ADE9000_SCAN_POS_IB:
+ wfb_cfg_val = ADE9000_WFB_CFG_IB;
+ st->wfb_nr_activ_chan = 1;
+ break;
+ case ADE9000_SCAN_POS_VB:
+ wfb_cfg_val = ADE9000_WFB_CFG_VB;
+ st->wfb_nr_activ_chan = 1;
+ break;
+ case ADE9000_SCAN_POS_IC:
+ wfb_cfg_val = ADE9000_WFB_CFG_IC;
+ st->wfb_nr_activ_chan = 1;
+ break;
+ case ADE9000_SCAN_POS_VC:
+ wfb_cfg_val = ADE9000_WFB_CFG_VC;
+ st->wfb_nr_activ_chan = 1;
+ break;
+ case (ADE9000_SCAN_POS_IA | ADE9000_SCAN_POS_VA | ADE9000_SCAN_POS_IB |
+ ADE9000_SCAN_POS_VB | ADE9000_SCAN_POS_IC | ADE9000_SCAN_POS_VC):
+ wfb_cfg_val = ADE9000_WFB_CFG_ALL_CHAN;
+ st->wfb_nr_activ_chan = 6;
+ break;
+ default:
+ dev_err(&st->spi->dev, "Unsupported combination of scans\n");
+ return -EINVAL;
+ }
+
+ wfb_cfg_val |= FIELD_PREP(ADE9000_WF_SRC_MASK, st->wf_src);
+
+ return regmap_write(st->regmap, ADE9000_REG_WFB_CFG, wfb_cfg_val);
+}
+
+static int ade9000_waveform_buffer_interrupt_setup(struct ade9000_state *st)
+{
+ int ret;
+
+ ret = regmap_write(st->regmap, ADE9000_REG_WFB_TRG_CFG, 0x0);
+ if (ret)
+ return ret;
+
+ /* Always use streaming mode setup */
+ ret = regmap_write(st->regmap, ADE9000_REG_WFB_PG_IRQEN,
+ ADE9000_MIDDLE_PAGE_BIT);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(st->regmap, ADE9000_REG_STATUS0, GENMASK(31, 0));
+ if (ret)
+ return ret;
+
+ return regmap_set_bits(st->regmap, ADE9000_REG_MASK0,
+ ADE9000_ST0_PAGE_FULL_BIT);
+}
+
+static int ade9000_buffer_preenable(struct iio_dev *indio_dev)
+{
+ struct ade9000_state *st = iio_priv(indio_dev);
+ int ret;
+
+ ret = ade9000_waveform_buffer_config(indio_dev);
+ if (ret)
+ return ret;
+
+ st->wfb_nr_samples = ADE9000_WFB_MAX_SAMPLES_CHAN * st->wfb_nr_activ_chan;
+
+ ade9000_configure_scan(indio_dev, ADE9000_REG_WF_BUFF);
+
+ ret = ade9000_waveform_buffer_interrupt_setup(st);
+ if (ret)
+ return ret;
+
+ ret = regmap_set_bits(st->regmap, ADE9000_REG_WFB_CFG,
+ ADE9000_WF_CAP_EN_MASK);
+ if (ret) {
+ dev_err(&st->spi->dev, "Post-enable waveform buffer enable fail\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ade9000_buffer_postdisable(struct iio_dev *indio_dev)
+{
+ struct ade9000_state *st = iio_priv(indio_dev);
+ struct device *dev = &st->spi->dev;
+ u32 interrupts;
+ int ret;
+
+ ret = regmap_clear_bits(st->regmap, ADE9000_REG_WFB_CFG,
+ ADE9000_WF_CAP_EN_MASK);
+ if (ret) {
+ dev_err(dev, "Post-disable waveform buffer disable fail\n");
+ return ret;
+ }
+
+ ret = regmap_write(st->regmap, ADE9000_REG_WFB_TRG_CFG, 0x0);
+ if (ret)
+ return ret;
+
+ interrupts = ADE9000_ST0_WFB_TRIG_BIT | ADE9000_ST0_PAGE_FULL_BIT;
+
+ ret = regmap_clear_bits(st->regmap, ADE9000_REG_MASK0, interrupts);
+ if (ret) {
+ dev_err(dev, "Post-disable update maks0 fail\n");
+ return ret;
+ }
+
+ return regmap_write(st->regmap, ADE9000_REG_STATUS0, GENMASK(31, 0));
+}
+
+static const struct iio_buffer_setup_ops ade9000_buffer_ops = {
+ .preenable = &ade9000_buffer_preenable,
+ .postdisable = &ade9000_buffer_postdisable,
+};
+
+static int ade9000_reset(struct ade9000_state *st)
+{
+ struct device *dev = &st->spi->dev;
+ struct gpio_desc *gpio_reset;
+ int ret;
+
+ gpio_reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(gpio_reset))
+ return PTR_ERR(gpio_reset);
+
+ /* Software reset via register if no GPIO available */
+ if (!gpio_reset) {
+ ret = regmap_set_bits(st->regmap, ADE9000_REG_CONFIG1,
+ ADE9000_SWRST_BIT);
+ if (ret)
+ return ret;
+ fsleep(90);
+ return 0;
+ }
+
+ /* Hardware reset via GPIO */
+ fsleep(10);
+ gpiod_set_value_cansleep(gpio_reset, 0);
+ fsleep(50000);
+
+ /* Only wait for completion if IRQ1 is available to signal reset done */
+ if (fwnode_irq_get_byname(dev_fwnode(dev), "irq1") >= 0) {
+ if (!wait_for_completion_timeout(&st->reset_completion,
+ msecs_to_jiffies(1000))) {
+ dev_err(dev, "Reset timeout after 1s\n");
+ return -ETIMEDOUT;
+ }
+ }
+ /* If no IRQ available, reset is already complete after the 50ms delay above */
+
+ return 0;
+}
+
+static int ade9000_setup(struct ade9000_state *st)
+{
+ struct device *dev = &st->spi->dev;
+ int ret;
+
+ ret = regmap_multi_reg_write(st->regmap, ade9000_initialization_sequence,
+ ARRAY_SIZE(ade9000_initialization_sequence));
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to write register sequence");
+
+ fsleep(2000);
+
+ return 0;
+}
+
+static const struct iio_info ade9000_info = {
+ .read_raw = ade9000_read_raw,
+ .write_raw = ade9000_write_raw,
+ .debugfs_reg_access = ade9000_reg_access,
+ .write_event_config = ade9000_write_event_config,
+ .read_event_config = ade9000_read_event_config,
+ .write_event_value = ade9000_write_event_value,
+ .read_event_value = ade9000_read_event_value,
+};
+
+static const struct regmap_config ade9000_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 32,
+ .max_register = 0x6bc,
+ .zero_flag_mask = true,
+ .cache_type = REGCACHE_RBTREE,
+ .reg_read = ade9000_spi_read_reg,
+ .reg_write = ade9000_spi_write_reg,
+ .volatile_reg = ade9000_is_volatile_reg,
+};
+
+static int ade9000_setup_clkout(struct device *dev, struct ade9000_state *st)
+{
+ struct clk_hw *clkout_hw;
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_COMMON_CLK))
+ return 0;
+
+ /*
+ * Only provide clock output when using external CMOS clock.
+ * When using crystal, CLKOUT is connected to crystal and shouldn't
+ * be used as clock provider for other devices.
+ */
+ if (!device_property_present(dev, "#clock-cells") || !st->clkin)
+ return 0;
+
+ /* CLKOUT passes through CLKIN with divider of 1 */
+ clkout_hw = devm_clk_hw_register_divider(dev, "clkout", __clk_get_name(st->clkin),
+ CLK_SET_RATE_PARENT, NULL, 0, 1, 0, NULL);
+ if (IS_ERR(clkout_hw))
+ return dev_err_probe(dev, PTR_ERR(clkout_hw), "Failed to register clkout");
+
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, clkout_hw);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add clock provider");
+
+ return 0;
+}
+
+static int ade9000_request_irq(struct device *dev, const char *name,
+ irq_handler_t handler, void *dev_id)
+{
+ int irq, ret;
+
+ irq = fwnode_irq_get_byname(dev_fwnode(dev), name);
+ if (irq == -EINVAL)
+ return 0; /* interrupts are optional */
+ if (irq < 0)
+ return dev_err_probe(dev, irq, "Failed to get %s irq", name);
+
+ ret = devm_request_threaded_irq(dev, irq, NULL, handler,
+ IRQF_ONESHOT, KBUILD_MODNAME, dev_id);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to request %s irq", name);
+
+ return 0;
+}
+
+static int ade9000_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct iio_dev *indio_dev;
+ struct ade9000_state *st;
+ struct regmap *regmap;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+
+ regmap = devm_regmap_init(dev, NULL, st, &ade9000_regmap_config);
+ if (IS_ERR(regmap))
+ return dev_err_probe(dev, PTR_ERR(regmap), "Unable to allocate ADE9000 regmap");
+
+ st->regmap = regmap;
+ st->spi = spi;
+
+ init_completion(&st->reset_completion);
+
+ ret = ade9000_request_irq(dev, "irq0", ade9000_irq0_thread, indio_dev);
+ if (ret)
+ return ret;
+
+ ret = ade9000_request_irq(dev, "irq1", ade9000_irq1_thread, indio_dev);
+ if (ret)
+ return ret;
+
+ ret = ade9000_request_irq(dev, "dready", ade9000_dready_thread, indio_dev);
+ if (ret)
+ return ret;
+
+ ret = devm_mutex_init(dev, &st->lock);
+ if (ret)
+ return ret;
+
+ /* External CMOS clock input (optional - crystal can be used instead) */
+ st->clkin = devm_clk_get_optional_enabled(dev, NULL);
+ if (IS_ERR(st->clkin))
+ return dev_err_probe(dev, PTR_ERR(st->clkin), "Failed to get and enable clkin");
+
+ ret = ade9000_setup_clkout(dev, st);
+ if (ret)
+ return ret;
+
+ indio_dev->name = "ade9000";
+ indio_dev->info = &ade9000_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->setup_ops = &ade9000_buffer_ops;
+
+ ret = devm_regulator_get_enable(&spi->dev, "vdd");
+ if (ret)
+ return dev_err_probe(&spi->dev, ret,
+ "Failed to get and enable vdd regulator\n");
+
+ indio_dev->channels = ade9000_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ade9000_channels);
+
+ ret = devm_iio_kfifo_buffer_setup(dev, indio_dev,
+ &ade9000_buffer_ops);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to setup IIO buffer");
+
+ ret = ade9000_reset(st);
+ if (ret)
+ return ret;
+
+ /* Configure reference selection if vref regulator is available */
+ ret = devm_regulator_get_enable_optional(dev, "vref");
+ if (ret != -ENODEV && ret >= 0) {
+ ret = regmap_set_bits(st->regmap, ADE9000_REG_CONFIG1,
+ ADE9000_EXT_REF_MASK);
+ if (ret)
+ return ret;
+ } else if (ret < 0 && ret != -ENODEV) {
+ return dev_err_probe(dev, ret,
+ "Failed to get and enable vref regulator\n");
+ }
+
+ ret = ade9000_setup(st);
+ if (ret)
+ return ret;
+
+ return devm_iio_device_register(dev, indio_dev);
+};
+
+static const struct spi_device_id ade9000_id[] = {
+ { "ade9000", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, ade9000_id);
+
+static const struct of_device_id ade9000_of_match[] = {
+ { .compatible = "adi,ade9000" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ade9000_of_match);
+
+static struct spi_driver ade9000_driver = {
+ .driver = {
+ .name = "ade9000",
+ .of_match_table = ade9000_of_match,
+ },
+ .probe = ade9000_probe,
+ .id_table = ade9000_id,
+};
+module_spi_driver(ade9000_driver);
+
+MODULE_AUTHOR("Antoniu Miclaus <antoniu.miclaus@analog.com>");
+MODULE_DESCRIPTION("Analog Devices ADE9000");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/adc/adi-axi-adc.c b/drivers/iio/adc/adi-axi-adc.c
index eb42e29960e4..14fa4238c2b9 100644
--- a/drivers/iio/adc/adi-axi-adc.c
+++ b/drivers/iio/adc/adi-axi-adc.c
@@ -618,6 +618,7 @@ static const struct iio_backend_ops adi_axi_adc_ops = {
.chan_status = axi_adc_chan_status,
.interface_type_get = axi_adc_interface_type_get,
.oversampling_ratio_set = axi_adc_oversampling_ratio_set,
+ .num_lanes_set = axi_adc_num_lanes_set,
.debugfs_reg_access = iio_backend_debugfs_ptr(axi_adc_reg_access),
.debugfs_print_chan_status = iio_backend_debugfs_ptr(axi_adc_debugfs_print_chan_status),
};
diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
index c3450246730e..b4c36e6a7490 100644
--- a/drivers/iio/adc/at91-sama5d2_adc.c
+++ b/drivers/iio/adc/at91-sama5d2_adc.c
@@ -896,7 +896,6 @@ static int at91_adc_config_emr(struct at91_adc_state *st,
emr |= osr | AT91_SAMA5D2_TRACKX(trackx);
at91_adc_writel(st, EMR, emr);
- pm_runtime_mark_last_busy(st->dev);
pm_runtime_put_autosuspend(st->dev);
st->oversampling_ratio = oversampling_ratio;
@@ -971,7 +970,6 @@ static int at91_adc_configure_touch(struct at91_adc_state *st, bool state)
AT91_SAMA5D2_IER_PEN | AT91_SAMA5D2_IER_NOPEN);
at91_adc_writel(st, TSMR, 0);
- pm_runtime_mark_last_busy(st->dev);
pm_runtime_put_autosuspend(st->dev);
return 0;
}
@@ -1142,10 +1140,8 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
at91_adc_configure_trigger_registers(st, state);
- if (!state) {
- pm_runtime_mark_last_busy(st->dev);
+ if (!state)
pm_runtime_put_autosuspend(st->dev);
- }
return 0;
}
@@ -1336,7 +1332,6 @@ static int at91_adc_buffer_prepare(struct iio_dev *indio_dev)
at91_adc_writel(st, IER, AT91_SAMA5D2_IER_DRDY);
pm_runtime_put:
- pm_runtime_mark_last_busy(st->dev);
pm_runtime_put_autosuspend(st->dev);
return ret;
}
@@ -1394,7 +1389,6 @@ static int at91_adc_buffer_postdisable(struct iio_dev *indio_dev)
if (st->dma_st.dma_chan)
dmaengine_terminate_sync(st->dma_st.dma_chan);
- pm_runtime_mark_last_busy(st->dev);
pm_runtime_put_autosuspend(st->dev);
return 0;
@@ -1603,7 +1597,6 @@ static void at91_adc_setup_samp_freq(struct iio_dev *indio_dev, unsigned freq,
mr |= AT91_SAMA5D2_MR_TRACKTIM(tracktim);
at91_adc_writel(st, MR, mr);
- pm_runtime_mark_last_busy(st->dev);
pm_runtime_put_autosuspend(st->dev);
dev_dbg(&indio_dev->dev, "freq: %u, startup: %u, prescal: %u, tracktim=%u\n",
@@ -1809,7 +1802,6 @@ static int at91_adc_read_info_raw(struct iio_dev *indio_dev,
at91_adc_readl(st, LCDR);
pm_runtime_put:
- pm_runtime_mark_last_busy(st->dev);
pm_runtime_put_autosuspend(st->dev);
return ret;
}
@@ -1890,7 +1882,6 @@ static int at91_adc_read_temp(struct iio_dev *indio_dev,
restore_config:
/* Revert previous settings. */
at91_adc_temp_sensor_configure(st, false);
- pm_runtime_mark_last_busy(st->dev);
pm_runtime_put_autosuspend(st->dev);
if (ret < 0)
return ret;
@@ -2465,7 +2456,6 @@ static int at91_adc_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "version: %x\n",
readl_relaxed(st->base + st->soc_info.platform->layout->VERSION));
- pm_runtime_mark_last_busy(st->dev);
pm_runtime_put_autosuspend(st->dev);
return 0;
@@ -2567,7 +2557,6 @@ static int at91_adc_resume(struct device *dev)
at91_adc_configure_trigger_registers(st, true);
}
- pm_runtime_mark_last_busy(st->dev);
pm_runtime_put_autosuspend(st->dev);
return 0;
diff --git a/drivers/iio/adc/bcm_iproc_adc.c b/drivers/iio/adc/bcm_iproc_adc.c
index f258668b0dc7..6426c9e6ccc9 100644
--- a/drivers/iio/adc/bcm_iproc_adc.c
+++ b/drivers/iio/adc/bcm_iproc_adc.c
@@ -511,10 +511,8 @@ static int iproc_adc_probe(struct platform_device *pdev)
indio_dev = devm_iio_device_alloc(&pdev->dev,
sizeof(*adc_priv));
- if (!indio_dev) {
- dev_err(&pdev->dev, "failed to allocate iio device\n");
+ if (!indio_dev)
return -ENOMEM;
- }
adc_priv = iio_priv(indio_dev);
platform_set_drvdata(pdev, indio_dev);
diff --git a/drivers/iio/adc/cpcap-adc.c b/drivers/iio/adc/cpcap-adc.c
index ba7cbd3b4822..d9ee2ea116a7 100644
--- a/drivers/iio/adc/cpcap-adc.c
+++ b/drivers/iio/adc/cpcap-adc.c
@@ -953,11 +953,9 @@ static int cpcap_adc_probe(struct platform_device *pdev)
int error;
indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*ddata));
- if (!indio_dev) {
- dev_err(&pdev->dev, "failed to allocate iio device\n");
-
+ if (!indio_dev)
return -ENOMEM;
- }
+
ddata = iio_priv(indio_dev);
ddata->ato = device_get_match_data(&pdev->dev);
if (!ddata->ato)
diff --git a/drivers/iio/adc/da9150-gpadc.c b/drivers/iio/adc/da9150-gpadc.c
index b99291ce2a45..625e3a8e4d03 100644
--- a/drivers/iio/adc/da9150-gpadc.c
+++ b/drivers/iio/adc/da9150-gpadc.c
@@ -308,10 +308,9 @@ static int da9150_gpadc_probe(struct platform_device *pdev)
int irq, ret;
indio_dev = devm_iio_device_alloc(dev, sizeof(*gpadc));
- if (!indio_dev) {
- dev_err(&pdev->dev, "Failed to allocate IIO device\n");
+ if (!indio_dev)
return -ENOMEM;
- }
+
gpadc = iio_priv(indio_dev);
gpadc->da9150 = da9150;
diff --git a/drivers/iio/adc/dln2-adc.c b/drivers/iio/adc/dln2-adc.c
index 5aea7644780f..eb902a946efe 100644
--- a/drivers/iio/adc/dln2-adc.c
+++ b/drivers/iio/adc/dln2-adc.c
@@ -584,10 +584,8 @@ static int dln2_adc_probe(struct platform_device *pdev)
int i, ret, chans;
indio_dev = devm_iio_device_alloc(dev, sizeof(*dln2));
- if (!indio_dev) {
- dev_err(dev, "failed allocating iio device\n");
+ if (!indio_dev)
return -ENOMEM;
- }
dln2 = iio_priv(indio_dev);
dln2->pdev = pdev;
@@ -628,10 +626,9 @@ static int dln2_adc_probe(struct platform_device *pdev)
dln2->trig = devm_iio_trigger_alloc(dev, "%s-dev%d",
indio_dev->name,
iio_device_id(indio_dev));
- if (!dln2->trig) {
- dev_err(dev, "failed to allocate trigger\n");
+ if (!dln2->trig)
return -ENOMEM;
- }
+
iio_trigger_set_drvdata(dln2->trig, dln2);
ret = devm_iio_trigger_register(dev, dln2->trig);
if (ret) {
diff --git a/drivers/iio/adc/exynos_adc.c b/drivers/iio/adc/exynos_adc.c
index 4614cf848535..1484adff00df 100644
--- a/drivers/iio/adc/exynos_adc.c
+++ b/drivers/iio/adc/exynos_adc.c
@@ -19,11 +19,9 @@
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/of.h>
-#include <linux/of_irq.h>
#include <linux/regulator/consumer.h>
#include <linux/of_platform.h>
#include <linux/err.h>
-#include <linux/input.h>
#include <linux/iio/iio.h>
#include <linux/iio/machine.h>
@@ -31,21 +29,14 @@
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
-#include <linux/platform_data/touchscreen-s3c2410.h>
-
/* S3C/EXYNOS4412/5250 ADC_V1 registers definitions */
#define ADC_V1_CON(x) ((x) + 0x00)
-#define ADC_V1_TSC(x) ((x) + 0x04)
#define ADC_V1_DLY(x) ((x) + 0x08)
#define ADC_V1_DATX(x) ((x) + 0x0C)
#define ADC_V1_DATY(x) ((x) + 0x10)
#define ADC_V1_UPDN(x) ((x) + 0x14)
#define ADC_V1_INTCLR(x) ((x) + 0x18)
#define ADC_V1_MUX(x) ((x) + 0x1c)
-#define ADC_V1_CLRINTPNDNUP(x) ((x) + 0x20)
-
-/* S3C2410 ADC registers definitions */
-#define ADC_S3C2410_MUX(x) ((x) + 0x18)
/* Future ADC_V2 registers definitions */
#define ADC_V2_CON1(x) ((x) + 0x00)
@@ -61,13 +52,8 @@
#define ADC_V1_CON_PRSCLV(x) (((x) & 0xFF) << 6)
#define ADC_V1_CON_STANDBY (1u << 2)
-/* Bit definitions for S3C2410 ADC */
+/* Bit definitions for S3C2410 / S3C6410 ADC */
#define ADC_S3C2410_CON_SELMUX(x) (((x) & 7) << 3)
-#define ADC_S3C2410_DATX_MASK 0x3FF
-#define ADC_S3C2416_CON_RES_SEL (1u << 3)
-
-/* touch screen always uses channel 0 */
-#define ADC_S3C2410_MUX_TS 0
/* ADCTSC Register Bits */
#define ADC_S3C2443_TSC_UD_SEN (1u << 8)
@@ -75,8 +61,6 @@
#define ADC_S3C2410_TSC_YP_SEN (1u << 6)
#define ADC_S3C2410_TSC_XM_SEN (1u << 5)
#define ADC_S3C2410_TSC_XP_SEN (1u << 4)
-#define ADC_S3C2410_TSC_PULL_UP_DISABLE (1u << 3)
-#define ADC_S3C2410_TSC_AUTO_PST (1u << 2)
#define ADC_S3C2410_TSC_XY_PST(x) (((x) & 0x3) << 0)
#define ADC_TSC_WAIT4INT (ADC_S3C2410_TSC_YM_SEN | \
@@ -84,12 +68,6 @@
ADC_S3C2410_TSC_XP_SEN | \
ADC_S3C2410_TSC_XY_PST(3))
-#define ADC_TSC_AUTOPST (ADC_S3C2410_TSC_YM_SEN | \
- ADC_S3C2410_TSC_YP_SEN | \
- ADC_S3C2410_TSC_XP_SEN | \
- ADC_S3C2410_TSC_AUTO_PST | \
- ADC_S3C2410_TSC_XY_PST(0))
-
/* Bit definitions for ADC_V2 */
#define ADC_V2_CON1_SOFT_RESET (1u << 2)
@@ -121,14 +99,11 @@
struct exynos_adc {
struct exynos_adc_data *data;
struct device *dev;
- struct input_dev *input;
void __iomem *regs;
struct regmap *pmu_map;
struct clk *clk;
struct clk *sclk;
unsigned int irq;
- unsigned int tsirq;
- unsigned int delay;
struct regulator *vdd;
struct completion completion;
@@ -136,12 +111,6 @@ struct exynos_adc {
u32 value;
unsigned int version;
- bool ts_enabled;
-
- bool read_ts;
- u32 ts_x;
- u32 ts_y;
-
/*
* Lock to protect from potential concurrent access to the
* completion callback during a manual conversion. For this driver
@@ -241,7 +210,7 @@ static void exynos_adc_v1_init_hw(struct exynos_adc *info)
writel(con1, ADC_V1_CON(info->regs));
/* set touchscreen delay */
- writel(info->delay, ADC_V1_DLY(info->regs));
+ writel(10000, ADC_V1_DLY(info->regs));
}
static void exynos_adc_v1_exit_hw(struct exynos_adc *info)
@@ -307,53 +276,6 @@ static const struct exynos_adc_data exynos_adc_s5pv210_data = {
.start_conv = exynos_adc_v1_start_conv,
};
-static void exynos_adc_s3c2416_start_conv(struct exynos_adc *info,
- unsigned long addr)
-{
- u32 con1;
-
- /* Enable 12 bit ADC resolution */
- con1 = readl(ADC_V1_CON(info->regs));
- con1 |= ADC_S3C2416_CON_RES_SEL;
- writel(con1, ADC_V1_CON(info->regs));
-
- /* Select channel for S3C2416 */
- writel(addr, ADC_S3C2410_MUX(info->regs));
-
- con1 = readl(ADC_V1_CON(info->regs));
- writel(con1 | ADC_CON_EN_START, ADC_V1_CON(info->regs));
-}
-
-static struct exynos_adc_data const exynos_adc_s3c2416_data = {
- .num_channels = MAX_ADC_V1_CHANNELS,
- .mask = ADC_DATX_MASK, /* 12 bit ADC resolution */
-
- .init_hw = exynos_adc_v1_init_hw,
- .exit_hw = exynos_adc_v1_exit_hw,
- .start_conv = exynos_adc_s3c2416_start_conv,
-};
-
-static void exynos_adc_s3c2443_start_conv(struct exynos_adc *info,
- unsigned long addr)
-{
- u32 con1;
-
- /* Select channel for S3C2433 */
- writel(addr, ADC_S3C2410_MUX(info->regs));
-
- con1 = readl(ADC_V1_CON(info->regs));
- writel(con1 | ADC_CON_EN_START, ADC_V1_CON(info->regs));
-}
-
-static struct exynos_adc_data const exynos_adc_s3c2443_data = {
- .num_channels = MAX_ADC_V1_CHANNELS,
- .mask = ADC_S3C2410_DATX_MASK, /* 10 bit ADC resolution */
-
- .init_hw = exynos_adc_v1_init_hw,
- .exit_hw = exynos_adc_v1_exit_hw,
- .start_conv = exynos_adc_s3c2443_start_conv,
-};
-
static void exynos_adc_s3c64xx_start_conv(struct exynos_adc *info,
unsigned long addr)
{
@@ -365,15 +287,6 @@ static void exynos_adc_s3c64xx_start_conv(struct exynos_adc *info,
writel(con1 | ADC_CON_EN_START, ADC_V1_CON(info->regs));
}
-static struct exynos_adc_data const exynos_adc_s3c24xx_data = {
- .num_channels = MAX_ADC_V1_CHANNELS,
- .mask = ADC_S3C2410_DATX_MASK, /* 10 bit ADC resolution */
-
- .init_hw = exynos_adc_v1_init_hw,
- .exit_hw = exynos_adc_v1_exit_hw,
- .start_conv = exynos_adc_s3c64xx_start_conv,
-};
-
static struct exynos_adc_data const exynos_adc_s3c64xx_data = {
.num_channels = MAX_ADC_V1_CHANNELS,
.mask = ADC_DATX_MASK, /* 12 bit ADC resolution */
@@ -486,18 +399,6 @@ static const struct exynos_adc_data exynos7_adc_data = {
static const struct of_device_id exynos_adc_match[] = {
{
- .compatible = "samsung,s3c2410-adc",
- .data = &exynos_adc_s3c24xx_data,
- }, {
- .compatible = "samsung,s3c2416-adc",
- .data = &exynos_adc_s3c2416_data,
- }, {
- .compatible = "samsung,s3c2440-adc",
- .data = &exynos_adc_s3c24xx_data,
- }, {
- .compatible = "samsung,s3c2443-adc",
- .data = &exynos_adc_s3c2443_data,
- }, {
.compatible = "samsung,s3c6410-adc",
.data = &exynos_adc_s3c64xx_data,
}, {
@@ -580,55 +481,13 @@ static int exynos_read_raw(struct iio_dev *indio_dev,
return ret;
}
-static int exynos_read_s3c64xx_ts(struct iio_dev *indio_dev, int *x, int *y)
-{
- struct exynos_adc *info = iio_priv(indio_dev);
- unsigned long time_left;
- int ret;
-
- mutex_lock(&info->lock);
- info->read_ts = true;
-
- reinit_completion(&info->completion);
-
- writel(ADC_S3C2410_TSC_PULL_UP_DISABLE | ADC_TSC_AUTOPST,
- ADC_V1_TSC(info->regs));
-
- /* Select the ts channel to be used and Trigger conversion */
- info->data->start_conv(info, ADC_S3C2410_MUX_TS);
-
- time_left = wait_for_completion_timeout(&info->completion,
- EXYNOS_ADC_TIMEOUT);
- if (time_left == 0) {
- dev_warn(&indio_dev->dev, "Conversion timed out! Resetting\n");
- if (info->data->init_hw)
- info->data->init_hw(info);
- ret = -ETIMEDOUT;
- } else {
- *x = info->ts_x;
- *y = info->ts_y;
- ret = 0;
- }
-
- info->read_ts = false;
- mutex_unlock(&info->lock);
-
- return ret;
-}
-
static irqreturn_t exynos_adc_isr(int irq, void *dev_id)
{
struct exynos_adc *info = dev_id;
u32 mask = info->data->mask;
/* Read value */
- if (info->read_ts) {
- info->ts_x = readl(ADC_V1_DATX(info->regs));
- info->ts_y = readl(ADC_V1_DATY(info->regs));
- writel(ADC_TSC_WAIT4INT | ADC_S3C2443_TSC_UD_SEN, ADC_V1_TSC(info->regs));
- } else {
- info->value = readl(ADC_V1_DATX(info->regs)) & mask;
- }
+ info->value = readl(ADC_V1_DATX(info->regs)) & mask;
/* clear irq */
if (info->data->clear_irq)
@@ -639,46 +498,6 @@ static irqreturn_t exynos_adc_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-/*
- * Here we (ab)use a threaded interrupt handler to stay running
- * for as long as the touchscreen remains pressed, we report
- * a new event with the latest data and then sleep until the
- * next timer tick. This mirrors the behavior of the old
- * driver, with much less code.
- */
-static irqreturn_t exynos_ts_isr(int irq, void *dev_id)
-{
- struct exynos_adc *info = dev_id;
- struct iio_dev *dev = dev_get_drvdata(info->dev);
- u32 x, y;
- bool pressed;
- int ret;
-
- while (READ_ONCE(info->ts_enabled)) {
- ret = exynos_read_s3c64xx_ts(dev, &x, &y);
- if (ret == -ETIMEDOUT)
- break;
-
- pressed = x & y & ADC_DATX_PRESSED;
- if (!pressed) {
- input_report_key(info->input, BTN_TOUCH, 0);
- input_sync(info->input);
- break;
- }
-
- input_report_abs(info->input, ABS_X, x & ADC_DATX_MASK);
- input_report_abs(info->input, ABS_Y, y & ADC_DATY_MASK);
- input_report_key(info->input, BTN_TOUCH, 1);
- input_sync(info->input);
-
- usleep_range(1000, 1100);
- }
-
- writel(0, ADC_V1_CLRINTPNDNUP(info->regs));
-
- return IRQ_HANDLED;
-}
-
static int exynos_adc_reg_access(struct iio_dev *indio_dev,
unsigned reg, unsigned writeval,
unsigned *readval)
@@ -730,78 +549,17 @@ static int exynos_adc_remove_devices(struct device *dev, void *c)
return 0;
}
-static int exynos_adc_ts_open(struct input_dev *dev)
-{
- struct exynos_adc *info = input_get_drvdata(dev);
-
- WRITE_ONCE(info->ts_enabled, true);
- enable_irq(info->tsirq);
-
- return 0;
-}
-
-static void exynos_adc_ts_close(struct input_dev *dev)
-{
- struct exynos_adc *info = input_get_drvdata(dev);
-
- WRITE_ONCE(info->ts_enabled, false);
- disable_irq(info->tsirq);
-}
-
-static int exynos_adc_ts_init(struct exynos_adc *info)
-{
- int ret;
-
- if (info->tsirq <= 0)
- return -ENODEV;
-
- info->input = input_allocate_device();
- if (!info->input)
- return -ENOMEM;
-
- info->input->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
- info->input->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
-
- input_set_abs_params(info->input, ABS_X, 0, 0x3FF, 0, 0);
- input_set_abs_params(info->input, ABS_Y, 0, 0x3FF, 0, 0);
-
- info->input->name = "S3C24xx TouchScreen";
- info->input->id.bustype = BUS_HOST;
- info->input->open = exynos_adc_ts_open;
- info->input->close = exynos_adc_ts_close;
-
- input_set_drvdata(info->input, info);
-
- ret = input_register_device(info->input);
- if (ret) {
- input_free_device(info->input);
- return ret;
- }
-
- ret = request_threaded_irq(info->tsirq, NULL, exynos_ts_isr,
- IRQF_ONESHOT | IRQF_NO_AUTOEN,
- "touchscreen", info);
- if (ret)
- input_unregister_device(info->input);
-
- return ret;
-}
-
static int exynos_adc_probe(struct platform_device *pdev)
{
struct exynos_adc *info = NULL;
struct device_node *np = pdev->dev.of_node;
- struct s3c2410_ts_mach_info *pdata = dev_get_platdata(&pdev->dev);
struct iio_dev *indio_dev = NULL;
- bool has_ts = false;
int ret;
int irq;
indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(struct exynos_adc));
- if (!indio_dev) {
- dev_err(&pdev->dev, "failed allocating iio device\n");
+ if (!indio_dev)
return -ENOMEM;
- }
info = iio_priv(indio_dev);
@@ -826,27 +584,10 @@ static int exynos_adc_probe(struct platform_device *pdev)
}
}
- /* leave out any TS related code if unreachable */
- if (IS_REACHABLE(CONFIG_INPUT)) {
- has_ts = of_property_read_bool(pdev->dev.of_node,
- "has-touchscreen") || pdata;
- }
-
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
info->irq = irq;
-
- if (has_ts) {
- irq = platform_get_irq(pdev, 1);
- if (irq == -EPROBE_DEFER)
- return irq;
-
- info->tsirq = irq;
- } else {
- info->tsirq = -1;
- }
-
info->dev = &pdev->dev;
init_completion(&info->completion);
@@ -910,16 +651,6 @@ static int exynos_adc_probe(struct platform_device *pdev)
if (info->data->init_hw)
info->data->init_hw(info);
- if (pdata)
- info->delay = pdata->delay;
- else
- info->delay = 10000;
-
- if (has_ts)
- ret = exynos_adc_ts_init(info);
- if (ret)
- goto err_iio;
-
ret = of_platform_populate(np, exynos_adc_match, NULL, &indio_dev->dev);
if (ret < 0) {
dev_err(&pdev->dev, "failed adding child nodes\n");
@@ -931,11 +662,6 @@ static int exynos_adc_probe(struct platform_device *pdev)
err_of_populate:
device_for_each_child(&indio_dev->dev, NULL,
exynos_adc_remove_devices);
- if (has_ts) {
- input_unregister_device(info->input);
- free_irq(info->tsirq, info);
- }
-err_iio:
iio_device_unregister(indio_dev);
err_irq:
free_irq(info->irq, info);
@@ -955,10 +681,6 @@ static void exynos_adc_remove(struct platform_device *pdev)
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
struct exynos_adc *info = iio_priv(indio_dev);
- if (IS_REACHABLE(CONFIG_INPUT) && info->input) {
- free_irq(info->tsirq, info);
- input_unregister_device(info->input);
- }
device_for_each_child(&indio_dev->dev, NULL,
exynos_adc_remove_devices);
iio_device_unregister(indio_dev);
diff --git a/drivers/iio/adc/hx711.c b/drivers/iio/adc/hx711.c
index 7235fa9e13d5..1db8b68a8f64 100644
--- a/drivers/iio/adc/hx711.c
+++ b/drivers/iio/adc/hx711.c
@@ -465,7 +465,7 @@ static int hx711_probe(struct platform_device *pdev)
indio_dev = devm_iio_device_alloc(dev, sizeof(struct hx711_data));
if (!indio_dev)
- return dev_err_probe(dev, -ENOMEM, "failed to allocate IIO device\n");
+ return -ENOMEM;
hx711_data = iio_priv(indio_dev);
hx711_data->dev = dev;
diff --git a/drivers/iio/adc/imx7d_adc.c b/drivers/iio/adc/imx7d_adc.c
index 09ce71f6e941..039c0387da23 100644
--- a/drivers/iio/adc/imx7d_adc.c
+++ b/drivers/iio/adc/imx7d_adc.c
@@ -482,10 +482,8 @@ static int imx7d_adc_probe(struct platform_device *pdev)
int ret;
indio_dev = devm_iio_device_alloc(dev, sizeof(*info));
- if (!indio_dev) {
- dev_err(&pdev->dev, "Failed allocating iio device\n");
+ if (!indio_dev)
return -ENOMEM;
- }
info = iio_priv(indio_dev);
info->dev = dev;
diff --git a/drivers/iio/adc/imx8qxp-adc.c b/drivers/iio/adc/imx8qxp-adc.c
index be13a6ed7e00..6fc50394ad90 100644
--- a/drivers/iio/adc/imx8qxp-adc.c
+++ b/drivers/iio/adc/imx8qxp-adc.c
@@ -229,7 +229,6 @@ static int imx8qxp_adc_read_raw(struct iio_dev *indio_dev,
ret = wait_for_completion_interruptible_timeout(&adc->completion,
IMX8QXP_ADC_TIMEOUT);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_sync_autosuspend(dev);
if (ret == 0) {
@@ -295,7 +294,6 @@ static int imx8qxp_adc_reg_access(struct iio_dev *indio_dev, unsigned int reg,
*readval = readl(adc->regs + reg);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_sync_autosuspend(dev);
return 0;
@@ -315,10 +313,8 @@ static int imx8qxp_adc_probe(struct platform_device *pdev)
int ret;
indio_dev = devm_iio_device_alloc(dev, sizeof(*adc));
- if (!indio_dev) {
- dev_err(dev, "Failed allocating iio device\n");
+ if (!indio_dev)
return -ENOMEM;
- }
adc = iio_priv(indio_dev);
adc->dev = dev;
diff --git a/drivers/iio/adc/imx93_adc.c b/drivers/iio/adc/imx93_adc.c
index 7feaafd2316f..787e80db5de3 100644
--- a/drivers/iio/adc/imx93_adc.c
+++ b/drivers/iio/adc/imx93_adc.c
@@ -32,12 +32,13 @@
#define IMX93_ADC_PCDR0 0x100
#define IMX93_ADC_PCDR1 0x104
#define IMX93_ADC_PCDR2 0x108
-#define IMX93_ADC_PCDR3 0x10c
+#define IMX93_ADC_PCDR3 0x10C
#define IMX93_ADC_PCDR4 0x110
#define IMX93_ADC_PCDR5 0x114
#define IMX93_ADC_PCDR6 0x118
-#define IMX93_ADC_PCDR7 0x11c
+#define IMX93_ADC_PCDR7 0x11C
#define IMX93_ADC_CALSTAT 0x39C
+#define IMX93_ADC_CALCFG0 0x3A0
/* ADC bit shift */
#define IMX93_ADC_MCR_MODE_MASK BIT(29)
@@ -58,6 +59,8 @@
#define IMX93_ADC_IMR_ECH_MASK BIT(0)
#define IMX93_ADC_PCDR_CDATA_MASK GENMASK(11, 0)
+#define IMX93_ADC_CALCFG0_LDFAIL_MASK BIT(4)
+
/* ADC status */
#define IMX93_ADC_MSR_ADCSTATUS_IDLE 0
#define IMX93_ADC_MSR_ADCSTATUS_POWER_DOWN 1
@@ -145,7 +148,7 @@ static void imx93_adc_config_ad_clk(struct imx93_adc *adc)
static int imx93_adc_calibration(struct imx93_adc *adc)
{
- u32 mcr, msr;
+ u32 mcr, msr, calcfg;
int ret;
/* make sure ADC in power down mode */
@@ -158,6 +161,11 @@ static int imx93_adc_calibration(struct imx93_adc *adc)
imx93_adc_power_up(adc);
+ /* Enable loading of calibrated values even in fail condition */
+ calcfg = readl(adc->regs + IMX93_ADC_CALCFG0);
+ calcfg |= IMX93_ADC_CALCFG0_LDFAIL_MASK;
+ writel(calcfg, adc->regs + IMX93_ADC_CALCFG0);
+
/*
* TODO: we use the default TSAMP/NRSMPL/AVGEN in MCR,
* can add the setting of these bit if need in future.
@@ -180,9 +188,13 @@ static int imx93_adc_calibration(struct imx93_adc *adc)
/* check whether calbration is success or not */
msr = readl(adc->regs + IMX93_ADC_MSR);
if (msr & IMX93_ADC_MSR_CALFAIL_MASK) {
+ /*
+ * Only give warning here, this means the noise of the
+ * reference voltage do not meet the requirement:
+ * ADC reference voltage Noise < 1.8V * 1/2^ENOB
+ * And the resault of ADC is not that accurate.
+ */
dev_warn(adc->dev, "ADC calibration failed!\n");
- imx93_adc_power_down(adc);
- return -EAGAIN;
}
return 0;
@@ -248,7 +260,6 @@ static int imx93_adc_read_raw(struct iio_dev *indio_dev,
mutex_lock(&adc->lock);
ret = imx93_adc_read_channel_conversion(adc, chan->channel, val);
mutex_unlock(&adc->lock);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_sync_autosuspend(dev);
if (ret < 0)
return ret;
@@ -308,8 +319,7 @@ static int imx93_adc_probe(struct platform_device *pdev)
indio_dev = devm_iio_device_alloc(dev, sizeof(*adc));
if (!indio_dev)
- return dev_err_probe(dev, -ENOMEM,
- "Failed allocating iio device\n");
+ return -ENOMEM;
adc = iio_priv(indio_dev);
adc->dev = dev;
diff --git a/drivers/iio/adc/intel_dc_ti_adc.c b/drivers/iio/adc/intel_dc_ti_adc.c
new file mode 100644
index 000000000000..0fe34f1c338e
--- /dev/null
+++ b/drivers/iio/adc/intel_dc_ti_adc.c
@@ -0,0 +1,328 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel Dollar Cove TI PMIC GPADC Driver
+ *
+ * Copyright (C) 2014 Intel Corporation (Ramakrishna Pallala <ramakrishna.pallala@intel.com>)
+ * Copyright (C) 2024 - 2025 Hans de Goede <hansg@kernel.org>
+ */
+
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/cleanup.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/intel_soc_pmic.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/wait.h>
+
+#include <linux/iio/driver.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/machine.h>
+
+#define DC_TI_ADC_CNTL_REG 0x50
+#define DC_TI_ADC_START BIT(0)
+#define DC_TI_ADC_CH_SEL GENMASK(2, 1)
+#define DC_TI_ADC_EN BIT(5)
+#define DC_TI_ADC_EN_EXT_BPTH_BIAS BIT(6)
+
+#define DC_TI_VBAT_ZSE_GE_REG 0x53
+#define DC_TI_VBAT_GE GENMASK(3, 0)
+#define DC_TI_VBAT_ZSE GENMASK(7, 4)
+
+/* VBAT GE gain correction is in 0.0015 increments, ZSE is in 1.0 increments */
+#define DC_TI_VBAT_GE_STEP 15
+#define DC_TI_VBAT_GE_DIV 10000
+
+#define DC_TI_ADC_DATA_REG_CH(x) (0x54 + 2 * (x))
+
+enum dc_ti_adc_id {
+ DC_TI_ADC_VBAT,
+ DC_TI_ADC_PMICTEMP,
+ DC_TI_ADC_BATTEMP,
+ DC_TI_ADC_SYSTEMP0,
+};
+
+struct dc_ti_adc_info {
+ struct mutex lock; /* Protects against concurrent accesses to the ADC */
+ wait_queue_head_t wait;
+ struct device *dev;
+ struct regmap *regmap;
+ int vbat_zse;
+ int vbat_ge;
+ bool conversion_done;
+};
+
+static const struct iio_chan_spec dc_ti_adc_channels[] = {
+ {
+ .indexed = 1,
+ .type = IIO_VOLTAGE,
+ .channel = DC_TI_ADC_VBAT,
+ .address = DC_TI_ADC_DATA_REG_CH(0),
+ .datasheet_name = "CH0",
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_PROCESSED),
+ }, {
+ .indexed = 1,
+ .type = IIO_TEMP,
+ .channel = DC_TI_ADC_PMICTEMP,
+ .address = DC_TI_ADC_DATA_REG_CH(1),
+ .datasheet_name = "CH1",
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ }, {
+ .indexed = 1,
+ .type = IIO_TEMP,
+ .channel = DC_TI_ADC_BATTEMP,
+ .address = DC_TI_ADC_DATA_REG_CH(2),
+ .datasheet_name = "CH2",
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ }, {
+ .indexed = 1,
+ .type = IIO_TEMP,
+ .channel = DC_TI_ADC_SYSTEMP0,
+ .address = DC_TI_ADC_DATA_REG_CH(3),
+ .datasheet_name = "CH3",
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ }
+};
+
+static struct iio_map dc_ti_adc_default_maps[] = {
+ IIO_MAP("CH0", "chtdc_ti_battery", "VBAT"),
+ IIO_MAP("CH1", "chtdc_ti_battery", "PMICTEMP"),
+ IIO_MAP("CH2", "chtdc_ti_battery", "BATTEMP"),
+ IIO_MAP("CH3", "chtdc_ti_battery", "SYSTEMP0"),
+ { }
+};
+
+static irqreturn_t dc_ti_adc_isr(int irq, void *data)
+{
+ struct dc_ti_adc_info *info = data;
+
+ info->conversion_done = true;
+ wake_up(&info->wait);
+ return IRQ_HANDLED;
+}
+
+static int dc_ti_adc_scale(struct dc_ti_adc_info *info,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2)
+{
+ if (chan->channel != DC_TI_ADC_VBAT)
+ return -EINVAL;
+
+ /* Vbat ADC scale is 4.6875 mV / unit */
+ *val = 4;
+ *val2 = 687500;
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static int dc_ti_adc_raw_to_processed(struct dc_ti_adc_info *info,
+ struct iio_chan_spec const *chan,
+ int raw, int *val, int *val2)
+{
+ if (chan->channel != DC_TI_ADC_VBAT)
+ return -EINVAL;
+
+ /* Apply calibration */
+ raw -= info->vbat_zse;
+ raw = raw * (DC_TI_VBAT_GE_DIV - info->vbat_ge * DC_TI_VBAT_GE_STEP) /
+ DC_TI_VBAT_GE_DIV;
+ /* Vbat ADC scale is 4.6875 mV / unit */
+ raw *= 46875;
+
+ /* raw is now in 10000 units / mV, convert to milli + milli/1e6 */
+ *val = raw / 10000;
+ *val2 = (raw % 10000) * 100;
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static int dc_ti_adc_sample(struct dc_ti_adc_info *info,
+ struct iio_chan_spec const *chan, int *val)
+{
+ int ret, ch = chan->channel;
+ __be16 buf;
+
+ info->conversion_done = false;
+
+ /*
+ * As per TI (PMIC Vendor), the ADC enable and ADC start commands should
+ * not be sent together. Hence send the commands separately.
+ */
+ ret = regmap_set_bits(info->regmap, DC_TI_ADC_CNTL_REG, DC_TI_ADC_EN);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(info->regmap, DC_TI_ADC_CNTL_REG,
+ DC_TI_ADC_CH_SEL,
+ FIELD_PREP(DC_TI_ADC_CH_SEL, ch));
+ if (ret)
+ return ret;
+
+ /*
+ * As per PMIC Vendor, a minimum of 50 ųs delay is required between ADC
+ * Enable and ADC START commands. This is also recommended by Intel
+ * Hardware team after the timing analysis of GPADC signals. Since the
+ * I2C Write transaction to set the channel number also imparts 25 ųs
+ * delay, we need to wait for another 25 ųs before issuing ADC START.
+ */
+ fsleep(25);
+
+ ret = regmap_set_bits(info->regmap, DC_TI_ADC_CNTL_REG,
+ DC_TI_ADC_START);
+ if (ret)
+ return ret;
+
+ /* TI (PMIC Vendor) recommends 5 s timeout for conversion */
+ ret = wait_event_timeout(info->wait, info->conversion_done, 5 * HZ);
+ if (ret == 0) {
+ ret = -ETIMEDOUT;
+ goto disable_adc;
+ }
+
+ ret = regmap_bulk_read(info->regmap, chan->address, &buf, sizeof(buf));
+ if (ret)
+ goto disable_adc;
+
+ /* The ADC values are 10 bits wide */
+ *val = be16_to_cpu(buf) & GENMASK(9, 0);
+
+disable_adc:
+ regmap_clear_bits(info->regmap, DC_TI_ADC_CNTL_REG,
+ DC_TI_ADC_START | DC_TI_ADC_EN);
+ return ret;
+}
+
+static int dc_ti_adc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct dc_ti_adc_info *info = iio_priv(indio_dev);
+ int ret;
+
+ if (mask == IIO_CHAN_INFO_SCALE)
+ return dc_ti_adc_scale(info, chan, val, val2);
+
+ guard(mutex)(&info->lock);
+
+ /*
+ * If channel BPTHERM has been selected, first enable the BPTHERM BIAS
+ * which provides the VREF Voltage reference to convert BPTHERM Input
+ * voltage to temperature.
+ */
+ if (chan->channel == DC_TI_ADC_BATTEMP) {
+ ret = regmap_set_bits(info->regmap, DC_TI_ADC_CNTL_REG,
+ DC_TI_ADC_EN_EXT_BPTH_BIAS);
+ if (ret)
+ return ret;
+ /*
+ * As per PMIC Vendor specifications, BPTHERM BIAS should be
+ * enabled 35 ms before ADC_EN command.
+ */
+ msleep(35);
+ }
+
+ ret = dc_ti_adc_sample(info, chan, val);
+
+ if (chan->channel == DC_TI_ADC_BATTEMP)
+ regmap_clear_bits(info->regmap, DC_TI_ADC_CNTL_REG,
+ DC_TI_ADC_EN_EXT_BPTH_BIAS);
+
+ if (ret)
+ return ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_PROCESSED:
+ return dc_ti_adc_raw_to_processed(info, chan, *val, val, val2);
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info dc_ti_adc_iio_info = {
+ .read_raw = dc_ti_adc_read_raw,
+};
+
+static int dc_ti_adc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct intel_soc_pmic *pmic = dev_get_drvdata(dev->parent);
+ struct dc_ti_adc_info *info;
+ struct iio_dev *indio_dev;
+ unsigned int val;
+ int irq, ret;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*info));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ info = iio_priv(indio_dev);
+
+ ret = devm_mutex_init(dev, &info->lock);
+ if (ret)
+ return ret;
+
+ init_waitqueue_head(&info->wait);
+
+ info->dev = dev;
+ info->regmap = pmic->regmap;
+
+ indio_dev->name = "dc_ti_adc";
+ indio_dev->channels = dc_ti_adc_channels;
+ indio_dev->num_channels = ARRAY_SIZE(dc_ti_adc_channels);
+ indio_dev->info = &dc_ti_adc_iio_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = regmap_read(info->regmap, DC_TI_VBAT_ZSE_GE_REG, &val);
+ if (ret)
+ return ret;
+
+ info->vbat_zse = sign_extend32(FIELD_GET(DC_TI_VBAT_ZSE, val), 3);
+ info->vbat_ge = sign_extend32(FIELD_GET(DC_TI_VBAT_GE, val), 3);
+
+ dev_dbg(dev, "vbat-zse %d vbat-ge %d\n", info->vbat_zse, info->vbat_ge);
+
+ ret = devm_iio_map_array_register(dev, indio_dev, dc_ti_adc_default_maps);
+ if (ret)
+ return ret;
+
+ ret = devm_request_threaded_irq(dev, irq, NULL, dc_ti_adc_isr,
+ IRQF_ONESHOT, indio_dev->name, info);
+ if (ret)
+ return ret;
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static const struct platform_device_id dc_ti_adc_ids[] = {
+ { .name = "chtdc_ti_adc" },
+ { }
+};
+MODULE_DEVICE_TABLE(platform, dc_ti_adc_ids);
+
+static struct platform_driver dc_ti_adc_driver = {
+ .driver = {
+ .name = "dc_ti_adc",
+ },
+ .probe = dc_ti_adc_probe,
+ .id_table = dc_ti_adc_ids,
+};
+module_platform_driver(dc_ti_adc_driver);
+
+MODULE_AUTHOR("Ramakrishna Pallala (Intel)");
+MODULE_AUTHOR("Hans de Goede <hansg@kernel.org>");
+MODULE_DESCRIPTION("Intel Dollar Cove (TI) GPADC Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/adc/mcp3564.c b/drivers/iio/adc/mcp3564.c
index a68f1cd6883e..cd679ff10a97 100644
--- a/drivers/iio/adc/mcp3564.c
+++ b/drivers/iio/adc/mcp3564.c
@@ -1019,7 +1019,7 @@ static int mcp3564_parse_fw_children(struct iio_dev *indio_dev)
channels = devm_kcalloc(dev, num_ch, sizeof(*channels), GFP_KERNEL);
if (!channels)
- return dev_err_probe(dev, -ENOMEM, "Can't allocate memory\n");
+ return -ENOMEM;
device_for_each_child_node_scoped(dev, child) {
node_name = fwnode_get_name(child);
diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c
index 4ff88603e4fc..f7e7172ef4f6 100644
--- a/drivers/iio/adc/meson_saradc.c
+++ b/drivers/iio/adc/meson_saradc.c
@@ -1357,7 +1357,7 @@ static int meson_sar_adc_probe(struct platform_device *pdev)
indio_dev = devm_iio_device_alloc(dev, sizeof(*priv));
if (!indio_dev)
- return dev_err_probe(dev, -ENOMEM, "failed allocating iio device\n");
+ return -ENOMEM;
priv = iio_priv(indio_dev);
init_completion(&priv->done);
diff --git a/drivers/iio/adc/mt6577_auxadc.c b/drivers/iio/adc/mt6577_auxadc.c
index 3343b54e8e44..fe9e3ece3fda 100644
--- a/drivers/iio/adc/mt6577_auxadc.c
+++ b/drivers/iio/adc/mt6577_auxadc.c
@@ -297,8 +297,7 @@ static int mt6577_auxadc_probe(struct platform_device *pdev)
ret = devm_add_action_or_reset(&pdev->dev, mt6577_power_off, adc_dev);
if (ret)
- return dev_err_probe(&pdev->dev, ret,
- "Failed to add action to managed power off\n");
+ return ret;
ret = devm_iio_device_register(&pdev->dev, indio_dev);
if (ret < 0)
diff --git a/drivers/iio/adc/mxs-lradc-adc.c b/drivers/iio/adc/mxs-lradc-adc.c
index 92baf3f5f560..dda5182a5076 100644
--- a/drivers/iio/adc/mxs-lradc-adc.c
+++ b/drivers/iio/adc/mxs-lradc-adc.c
@@ -697,10 +697,8 @@ static int mxs_lradc_adc_probe(struct platform_device *pdev)
/* Allocate the IIO device. */
iio = devm_iio_device_alloc(dev, sizeof(*adc));
- if (!iio) {
- dev_err(dev, "Failed to allocate IIO device\n");
+ if (!iio)
return -ENOMEM;
- }
adc = iio_priv(iio);
adc->lradc = lradc;
diff --git a/drivers/iio/adc/pac1921.c b/drivers/iio/adc/pac1921.c
index 72aa4ca2e5a4..35433250b008 100644
--- a/drivers/iio/adc/pac1921.c
+++ b/drivers/iio/adc/pac1921.c
@@ -1279,8 +1279,7 @@ static int pac1921_probe(struct i2c_client *client)
ret = devm_add_action_or_reset(dev, pac1921_regulator_disable,
priv->vdd);
if (ret)
- return dev_err_probe(dev, ret,
- "Cannot add action for vdd regulator disposal\n");
+ return ret;
msleep(PAC1921_POWERUP_TIME_MS);
diff --git a/drivers/iio/adc/pac1934.c b/drivers/iio/adc/pac1934.c
index 09fe88eb3fb0..48df16509260 100644
--- a/drivers/iio/adc/pac1934.c
+++ b/drivers/iio/adc/pac1934.c
@@ -88,6 +88,7 @@
#define PAC1934_VPOWER_3_ADDR 0x19
#define PAC1934_VPOWER_4_ADDR 0x1A
#define PAC1934_REFRESH_V_REG_ADDR 0x1F
+#define PAC1934_SLOW_REG_ADDR 0x20
#define PAC1934_CTRL_STAT_REGS_ADDR 0x1C
#define PAC1934_PID_REG_ADDR 0xFD
#define PAC1934_MID_REG_ADDR 0xFE
@@ -1265,8 +1266,23 @@ static int pac1934_chip_configure(struct pac1934_chip_info *info)
/* no SLOW triggered REFRESH, clear POR */
regs[PAC1934_SLOW_REG_OFF] = 0;
- ret = i2c_smbus_write_block_data(client, PAC1934_CTRL_STAT_REGS_ADDR,
- ARRAY_SIZE(regs), (u8 *)regs);
+ /*
+ * Write the three bytes sequentially, as the device does not support
+ * block write.
+ */
+ ret = i2c_smbus_write_byte_data(client, PAC1934_CTRL_STAT_REGS_ADDR,
+ regs[PAC1934_CHANNEL_DIS_REG_OFF]);
+ if (ret)
+ return ret;
+
+ ret = i2c_smbus_write_byte_data(client,
+ PAC1934_CTRL_STAT_REGS_ADDR + PAC1934_NEG_PWR_REG_OFF,
+ regs[PAC1934_NEG_PWR_REG_OFF]);
+ if (ret)
+ return ret;
+
+ ret = i2c_smbus_write_byte_data(client, PAC1934_SLOW_REG_ADDR,
+ regs[PAC1934_SLOW_REG_OFF]);
if (ret)
return ret;
@@ -1455,13 +1471,6 @@ static int pac1934_prep_custom_attributes(struct pac1934_chip_info *info,
return 0;
}
-static void pac1934_mutex_destroy(void *data)
-{
- struct mutex *lock = data;
-
- mutex_destroy(lock);
-}
-
static const struct iio_info pac1934_info = {
.read_raw = pac1934_read_raw,
.write_raw = pac1934_write_raw,
@@ -1520,9 +1529,7 @@ static int pac1934_probe(struct i2c_client *client)
return dev_err_probe(dev, ret,
"parameter parsing returned an error\n");
- mutex_init(&info->lock);
- ret = devm_add_action_or_reset(dev, pac1934_mutex_destroy,
- &info->lock);
+ ret = devm_mutex_init(dev, &info->lock);
if (ret < 0)
return ret;
diff --git a/drivers/iio/adc/palmas_gpadc.c b/drivers/iio/adc/palmas_gpadc.c
index 7c01e33be04c..3f433064618e 100644
--- a/drivers/iio/adc/palmas_gpadc.c
+++ b/drivers/iio/adc/palmas_gpadc.c
@@ -885,10 +885,8 @@ static int palmas_gpadc_probe(struct platform_device *pdev)
return -EINVAL;
indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*adc));
- if (!indio_dev) {
- dev_err(&pdev->dev, "iio_device_alloc failed\n");
+ if (!indio_dev)
return -ENOMEM;
- }
adc = iio_priv(indio_dev);
adc->dev = &pdev->dev;
diff --git a/drivers/iio/adc/rcar-gyroadc.c b/drivers/iio/adc/rcar-gyroadc.c
index cc326f21d398..3a17b3898bf6 100644
--- a/drivers/iio/adc/rcar-gyroadc.c
+++ b/drivers/iio/adc/rcar-gyroadc.c
@@ -163,12 +163,10 @@ static int rcar_gyroadc_set_power(struct rcar_gyroadc *priv, bool on)
{
struct device *dev = priv->dev;
- if (on) {
+ if (on)
return pm_runtime_resume_and_get(dev);
- } else {
- pm_runtime_mark_last_busy(dev);
- return pm_runtime_put_autosuspend(dev);
- }
+
+ return pm_runtime_put_autosuspend(dev);
}
static int rcar_gyroadc_read_raw(struct iio_dev *indio_dev,
diff --git a/drivers/iio/adc/rn5t618-adc.c b/drivers/iio/adc/rn5t618-adc.c
index d6f6b351f2af..f78fc795b69a 100644
--- a/drivers/iio/adc/rn5t618-adc.c
+++ b/drivers/iio/adc/rn5t618-adc.c
@@ -199,10 +199,8 @@ static int rn5t618_adc_probe(struct platform_device *pdev)
struct rn5t618 *rn5t618 = dev_get_drvdata(pdev->dev.parent);
iio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*adc));
- if (!iio_dev) {
- dev_err(&pdev->dev, "failed allocating iio device\n");
+ if (!iio_dev)
return -ENOMEM;
- }
adc = iio_priv(iio_dev);
adc->dev = &pdev->dev;
diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c
index bd62daea0a3e..6721da0ed7bb 100644
--- a/drivers/iio/adc/rockchip_saradc.c
+++ b/drivers/iio/adc/rockchip_saradc.c
@@ -466,8 +466,7 @@ static int rockchip_saradc_probe(struct platform_device *pdev)
indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*info));
if (!indio_dev)
- return dev_err_probe(&pdev->dev, -ENOMEM,
- "failed allocating iio device\n");
+ return -ENOMEM;
info = iio_priv(indio_dev);
@@ -527,8 +526,7 @@ static int rockchip_saradc_probe(struct platform_device *pdev)
ret = devm_add_action_or_reset(&pdev->dev,
rockchip_saradc_regulator_disable, info);
if (ret)
- return dev_err_probe(&pdev->dev, ret,
- "failed to register devm action\n");
+ return ret;
ret = regulator_get_voltage(info->vref);
if (ret < 0)
diff --git a/drivers/iio/adc/rohm-bd79112.c b/drivers/iio/adc/rohm-bd79112.c
new file mode 100644
index 000000000000..d15e06c8b94d
--- /dev/null
+++ b/drivers/iio/adc/rohm-bd79112.c
@@ -0,0 +1,556 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ROHM ADC driver for BD79112 signal monitoring hub.
+ * Copyright (C) 2025, ROHM Semiconductor.
+ *
+ * SPI communication derived from ad7923.c and ti-ads7950.c
+ */
+
+#include <linux/array_size.h>
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/bits.h>
+#include <linux/dev_printk.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/gpio/driver.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+#include <linux/types.h>
+#include <asm/byteorder.h>
+
+#include <linux/iio/adc-helpers.h>
+#include <linux/iio/iio.h>
+
+#define BD79112_MAX_NUM_CHANNELS 32
+
+struct bd79112_data {
+ struct spi_device *spi;
+ struct regmap *map;
+ struct device *dev;
+ struct gpio_chip gc;
+ unsigned long gpio_valid_mask;
+ unsigned int vref_mv;
+ struct spi_transfer read_xfer[2];
+ struct spi_transfer write_xfer;
+ struct spi_message read_msg;
+ struct spi_message write_msg;
+ /* 16-bit TX, valid data in high byte */
+ u8 read_tx[2] __aligned(IIO_DMA_MINALIGN);
+ /* 8-bit address followed by 8-bit data */
+ u8 reg_write_tx[2];
+ /* 12-bit of ADC data or 8 bit of reg data */
+ __be16 read_rx;
+};
+
+/*
+ * The ADC data is read issuing SPI-command matching the channel number.
+ * We treat this as a register address.
+ */
+#define BD79112_REG_AGIO0A 0x00
+#define BD79112_REG_AGIO15B 0x1f
+
+/*
+ * ADC STATUS_FLAG appended to ADC data will be set, if the ADC result is being
+ * read for a channel, which input pin is muxed to be a GPIO.
+ */
+#define BD79112_ADC_STATUS_FLAG BIT(14)
+
+/*
+ * The BD79112 requires "R/W bit" to be set for SPI register (not ADC data)
+ * reads and an "IOSET bit" to be set for read/write operations (which aren't
+ * reading the ADC data).
+ */
+#define BD79112_BIT_RW BIT(4)
+#define BD79112_BIT_IO BIT(5)
+
+#define BD79112_REG_GPI_VALUE_B8_15 (BD79112_BIT_IO | 0x0)
+#define BD79112_REG_GPI_VALUE_B0_B7 (BD79112_BIT_IO | 0x1)
+#define BD79112_REG_GPI_VALUE_A8_15 (BD79112_BIT_IO | 0x2)
+#define BD79112_REG_GPI_VALUE_A0_A7 (BD79112_BIT_IO | 0x3)
+
+#define BD79112_REG_GPI_EN_B7_B15 (BD79112_BIT_IO | 0x4)
+#define BD79112_REG_GPI_EN_B0_B7 (BD79112_BIT_IO | 0x5)
+#define BD79112_REG_GPI_EN_A8_A15 (BD79112_BIT_IO | 0x6)
+#define BD79112_REG_GPI_EN_A0_A7 (BD79112_BIT_IO | 0x7)
+
+#define BD79112_REG_GPO_EN_B7_B15 (BD79112_BIT_IO | 0x8)
+#define BD79112_REG_GPO_EN_B0_B7 (BD79112_BIT_IO | 0x9)
+#define BD79112_REG_GPO_EN_A8_A15 (BD79112_BIT_IO | 0xa)
+#define BD79112_REG_GPO_EN_A0_A7 (BD79112_BIT_IO | 0xb)
+
+#define BD79112_NUM_GPIO_EN_REGS 8
+#define BD79112_FIRST_GPIO_EN_REG BD79112_REG_GPI_EN_B7_B15
+
+#define BD79112_REG_GPO_VALUE_B8_15 (BD79112_BIT_IO | 0xc)
+#define BD79112_REG_GPO_VALUE_B0_B7 (BD79112_BIT_IO | 0xd)
+#define BD79112_REG_GPO_VALUE_A8_15 (BD79112_BIT_IO | 0xe)
+#define BD79112_REG_GPO_VALUE_A0_A7 (BD79112_BIT_IO | 0xf)
+
+#define BD79112_REG_MAX BD79112_REG_GPO_VALUE_A0_A7
+
+/*
+ * Read transaction consists of two 16-bit sequences separated by CSB.
+ * For register read, 'IOSET' bit must be set. For ADC read, IOSET is cleared
+ * and ADDR equals the channel number (0 ... 31).
+ *
+ * First 16-bit sequence, MOSI as below, MISO data ignored:
+ * - SCK: | 1 | 2 | 3 | 4 | 5 .. 8 | 9 .. 16 |
+ * - MOSI:| 0 | 0 | IOSET | RW (1) | ADDR | 8'b0 |
+ *
+ * CSB released and re-acquired between these sequences
+ *
+ * Second 16-bit sequence, MISO as below, MOSI data ignored:
+ * For Register read data is 8 bits:
+ * - SCK: | 1 .. 8 | 9 .. 16 |
+ * - MISO:| 8'b0 | 8-bit data |
+ *
+ * For ADC read data is 12 bits:
+ * - SCK: | 1 | 2 | 3 4 | 4 .. 16 |
+ * - MISO:| 0 | STATUS_FLAG | 2'b0 | 12-bit data |
+ * The 'STATUS_FLAG' is set if the read input pin was configured as a GPIO.
+ */
+static int bd79112_reg_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct bd79112_data *data = context;
+ int ret;
+
+ if (reg & BD79112_BIT_IO)
+ reg |= BD79112_BIT_RW;
+
+ data->read_tx[0] = reg;
+
+ ret = spi_sync(data->spi, &data->read_msg);
+ if (!ret)
+ *val = be16_to_cpu(data->read_rx);
+
+ return ret;
+}
+
+/*
+ * Write, single 16-bit sequence (broken down below):
+ *
+ * First 8-bit, MOSI as below, MISO data ignored:
+ * - SCK: | 1 | 2 | 3 | 4 | 5 .. 8 |
+ * - MOSI:| 0 | 0 |IOSET| RW(0) | ADDR |
+ *
+ * Last 8 SCK cycles (b8 ... b15), MISO contains register data, MOSI ignored.
+ * - SCK: | 9 .. 16 |
+ * - MISO:| data |
+ */
+static int bd79112_reg_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct bd79112_data *data = context;
+
+ data->reg_write_tx[0] = reg;
+ data->reg_write_tx[1] = val;
+
+ return spi_sync(data->spi, &data->write_msg);
+}
+
+static int _get_gpio_reg(unsigned int offset, unsigned int base)
+{
+ int regoffset = offset / 8;
+
+ if (offset > 31)
+ return -EINVAL;
+
+ return base - regoffset;
+}
+
+#define GET_GPIO_BIT(offset) BIT((offset) % 8)
+#define GET_GPO_EN_REG(offset) _get_gpio_reg((offset), BD79112_REG_GPO_EN_A0_A7)
+#define GET_GPI_EN_REG(offset) _get_gpio_reg((offset), BD79112_REG_GPI_EN_A0_A7)
+#define GET_GPO_VAL_REG(offset) _get_gpio_reg((offset), BD79112_REG_GPO_VALUE_A0_A7)
+#define GET_GPI_VAL_REG(offset) _get_gpio_reg((offset), BD79112_REG_GPI_VALUE_A0_A7)
+
+static const struct regmap_range bd71815_volatile_ro_ranges[] = {
+ {
+ /* Read ADC data */
+ .range_min = BD79112_REG_AGIO0A,
+ .range_max = BD79112_REG_AGIO15B,
+ }, {
+ /* GPI state */
+ .range_min = BD79112_REG_GPI_VALUE_B8_15,
+ .range_max = BD79112_REG_GPI_VALUE_A0_A7,
+ },
+};
+
+static const struct regmap_access_table bd79112_volatile_regs = {
+ .yes_ranges = &bd71815_volatile_ro_ranges[0],
+ .n_yes_ranges = ARRAY_SIZE(bd71815_volatile_ro_ranges),
+};
+
+static const struct regmap_access_table bd79112_ro_regs = {
+ .no_ranges = &bd71815_volatile_ro_ranges[0],
+ .n_no_ranges = ARRAY_SIZE(bd71815_volatile_ro_ranges),
+};
+
+static const struct regmap_config bd79112_regmap = {
+ .reg_read = bd79112_reg_read,
+ .reg_write = bd79112_reg_write,
+ .volatile_table = &bd79112_volatile_regs,
+ .wr_table = &bd79112_ro_regs,
+ .cache_type = REGCACHE_MAPLE,
+ .max_register = BD79112_REG_MAX,
+};
+
+static int bd79112_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long m)
+{
+ struct bd79112_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (m) {
+ case IIO_CHAN_INFO_RAW:
+ ret = regmap_read(data->map, chan->channel, val);
+ if (ret < 0)
+ return ret;
+
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+ *val = data->vref_mv;
+ *val2 = 12;
+
+ return IIO_VAL_FRACTIONAL_LOG2;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info bd79112_info = {
+ .read_raw = bd79112_read_raw,
+};
+
+static const struct iio_chan_spec bd79112_chan_template = {
+ .type = IIO_VOLTAGE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ .indexed = 1,
+};
+
+static int bd79112_gpio_init_valid_mask(struct gpio_chip *gc,
+ unsigned long *valid_mask,
+ unsigned int ngpios)
+{
+ struct bd79112_data *data = gpiochip_get_data(gc);
+
+ *valid_mask = data->gpio_valid_mask;
+
+ return 0;
+}
+
+static int bd79112_gpio_dir_get(struct gpio_chip *gc, unsigned int offset)
+{
+ struct bd79112_data *data = gpiochip_get_data(gc);
+ unsigned int reg, bit, val;
+ int ret;
+
+ bit = GET_GPIO_BIT(offset);
+ reg = GET_GPO_EN_REG(offset);
+
+ ret = regmap_read(data->map, reg, &val);
+ if (ret)
+ return ret;
+
+ if (bit & val)
+ return GPIO_LINE_DIRECTION_OUT;
+
+ reg = GET_GPI_EN_REG(offset);
+ ret = regmap_read(data->map, reg, &val);
+ if (ret)
+ return ret;
+
+ if (bit & val)
+ return GPIO_LINE_DIRECTION_IN;
+
+ /*
+ * Ouch. Seems the pin is ADC input - shouldn't happen as changing mux
+ * at runtime is not supported and non GPIO pins should be invalidated
+ * by the valid_mask at probe. Maybe someone wrote a register bypassing
+ * the driver?
+ */
+ dev_err(data->dev, "Pin not a GPIO\n");
+
+ return -EINVAL;
+}
+
+static int bd79112_gpio_get(struct gpio_chip *gc, unsigned int offset)
+{
+ struct bd79112_data *data = gpiochip_get_data(gc);
+ unsigned int reg, bit, val;
+ int ret;
+
+ bit = GET_GPIO_BIT(offset);
+ reg = GET_GPI_VAL_REG(offset);
+
+ ret = regmap_read(data->map, reg, &val);
+ if (ret)
+ return ret;
+
+ return !!(val & bit);
+}
+
+static int bd79112_gpio_set(struct gpio_chip *gc, unsigned int offset,
+ int value)
+{
+ struct bd79112_data *data = gpiochip_get_data(gc);
+ unsigned int reg, bit;
+
+ bit = GET_GPIO_BIT(offset);
+ reg = GET_GPO_VAL_REG(offset);
+
+ return regmap_assign_bits(data->map, reg, bit, value);
+}
+
+static int bd79112_gpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
+ unsigned long *bits)
+{
+ struct bd79112_data *data = gpiochip_get_data(gc);
+ unsigned long i, bank_mask;
+
+ for_each_set_clump8(i, bank_mask, mask, gc->ngpio) {
+ unsigned long bank_bits;
+ unsigned int reg;
+ int ret;
+
+ bank_bits = bitmap_get_value8(bits, i);
+ reg = BD79112_REG_GPO_VALUE_A0_A7 - i / 8;
+ ret = regmap_update_bits(data->map, reg, bank_mask, bank_bits);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int bd79112_gpio_dir_set(struct bd79112_data *data, unsigned int offset,
+ int dir)
+{
+ unsigned int gpi_reg, gpo_reg, bit;
+ int ret;
+
+ bit = GET_GPIO_BIT(offset);
+ gpi_reg = GET_GPI_EN_REG(offset);
+ gpo_reg = GET_GPO_EN_REG(offset);
+
+ if (dir == GPIO_LINE_DIRECTION_OUT) {
+ ret = regmap_clear_bits(data->map, gpi_reg, bit);
+ if (ret)
+ return ret;
+
+ return regmap_set_bits(data->map, gpo_reg, bit);
+ }
+
+ ret = regmap_set_bits(data->map, gpi_reg, bit);
+ if (ret)
+ return ret;
+
+ return regmap_clear_bits(data->map, gpo_reg, bit);
+}
+
+static int bd79112_gpio_input(struct gpio_chip *gc, unsigned int offset)
+{
+ struct bd79112_data *data = gpiochip_get_data(gc);
+
+ return bd79112_gpio_dir_set(data, offset, GPIO_LINE_DIRECTION_IN);
+}
+
+static int bd79112_gpio_output(struct gpio_chip *gc, unsigned int offset,
+ int value)
+{
+ struct bd79112_data *data = gpiochip_get_data(gc);
+ int ret;
+
+ ret = bd79112_gpio_set(gc, offset, value);
+ if (ret)
+ return ret;
+
+ return bd79112_gpio_dir_set(data, offset, GPIO_LINE_DIRECTION_OUT);
+}
+
+static const struct gpio_chip bd79112_gpio_chip = {
+ .label = "bd79112-gpio",
+ .get_direction = bd79112_gpio_dir_get,
+ .direction_input = bd79112_gpio_input,
+ .direction_output = bd79112_gpio_output,
+ .get = bd79112_gpio_get,
+ .set = bd79112_gpio_set,
+ .set_multiple = bd79112_gpio_set_multiple,
+ .init_valid_mask = bd79112_gpio_init_valid_mask,
+ .can_sleep = true,
+ .ngpio = 32,
+ .base = -1,
+};
+
+static unsigned int bd79112_get_gpio_pins(const struct iio_chan_spec *cs, int num_channels)
+{
+ unsigned int i, gpio_channels;
+
+ /*
+ * Let's initialize the mux config to say that all 32 channels are
+ * GPIOs. Then we can just loop through the iio_chan_spec and clear the
+ * bits for found ADC channels.
+ */
+ gpio_channels = GENMASK(31, 0);
+ for (i = 0; i < num_channels; i++)
+ gpio_channels &= ~BIT(cs[i].channel);
+
+ return gpio_channels;
+}
+
+/* ADC channels as named in the data-sheet */
+static const char * const bd79112_chan_names[] = {
+ "AGIO0A", "AGIO1A", "AGIO2A", "AGIO3A", /* 0 - 3 */
+ "AGIO4A", "AGIO5A", "AGIO6A", "AGIO7A", /* 4 - 7 */
+ "AGIO8A", "AGIO9A", "AGIO10A", "AGIO11A", /* 8 - 11 */
+ "AGIO12A", "AGIO13A", "AGIO14A", "AGIO15A", /* 12 - 15 */
+ "AGIO0B", "AGIO1B", "AGIO2B", "AGIO3B", /* 16 - 19 */
+ "AGIO4B", "AGIO5B", "AGIO6B", "AGIO7B", /* 20 - 23 */
+ "AGIO8B", "AGIO9B", "AGIO10B", "AGIO11B", /* 24 - 27 */
+ "AGIO12B", "AGIO13B", "AGIO14B", "AGIO15B", /* 28 - 31 */
+};
+
+static int bd79112_probe(struct spi_device *spi)
+{
+ struct bd79112_data *data;
+ struct iio_dev *iio_dev;
+ struct iio_chan_spec *cs;
+ struct device *dev = &spi->dev;
+ unsigned long gpio_pins, pin;
+ unsigned int i;
+ int ret;
+
+ iio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!iio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(iio_dev);
+ data->spi = spi;
+ data->dev = dev;
+ data->map = devm_regmap_init(dev, NULL, data, &bd79112_regmap);
+ if (IS_ERR(data->map))
+ return dev_err_probe(dev, PTR_ERR(data->map),
+ "Failed to initialize Regmap\n");
+
+ ret = devm_regulator_get_enable_read_voltage(dev, "vdd");
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to get the Vdd\n");
+
+ data->vref_mv = ret / 1000;
+
+ ret = devm_regulator_get_enable(dev, "iovdd");
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to enable I/O voltage\n");
+
+ data->read_xfer[0].tx_buf = &data->read_tx[0];
+ data->read_xfer[0].len = sizeof(data->read_tx);
+ data->read_xfer[0].cs_change = 1;
+ data->read_xfer[1].rx_buf = &data->read_rx;
+ data->read_xfer[1].len = sizeof(data->read_rx);
+ spi_message_init_with_transfers(&data->read_msg, data->read_xfer, 2);
+ ret = devm_spi_optimize_message(dev, spi, &data->read_msg);
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "Failed to optimize SPI read message\n");
+
+ data->write_xfer.tx_buf = &data->reg_write_tx[0];
+ data->write_xfer.len = sizeof(data->reg_write_tx);
+ spi_message_init_with_transfers(&data->write_msg, &data->write_xfer, 1);
+ ret = devm_spi_optimize_message(dev, spi, &data->write_msg);
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "Failed to optimize SPI write message\n");
+
+ ret = devm_iio_adc_device_alloc_chaninfo_se(dev, &bd79112_chan_template,
+ BD79112_MAX_NUM_CHANNELS - 1,
+ &cs);
+
+ /* Register all pins as GPIOs if there are no ADC channels */
+ if (ret == -ENOENT)
+ goto register_gpios;
+
+ if (ret < 0)
+ return ret;
+
+ iio_dev->num_channels = ret;
+ iio_dev->channels = cs;
+
+ for (i = 0; i < iio_dev->num_channels; i++)
+ cs[i].datasheet_name = bd79112_chan_names[cs[i].channel];
+
+ iio_dev->info = &bd79112_info;
+ iio_dev->name = "bd79112";
+ iio_dev->modes = INDIO_DIRECT_MODE;
+
+ /*
+ * Ensure all channels are ADCs. This allows us to register the IIO
+ * device early (before checking which pins are to be used for GPIO)
+ * without having to worry about some pins being initially used for
+ * GPIO.
+ */
+ for (i = 0; i < BD79112_NUM_GPIO_EN_REGS; i++) {
+ ret = regmap_write(data->map, BD79112_FIRST_GPIO_EN_REG + i, 0);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to initialize channels\n");
+ }
+
+ ret = devm_iio_device_register(data->dev, iio_dev);
+ if (ret)
+ return dev_err_probe(data->dev, ret, "Failed to register ADC\n");
+
+register_gpios:
+ gpio_pins = bd79112_get_gpio_pins(iio_dev->channels,
+ iio_dev->num_channels);
+
+ /* If all channels are reserved for ADC, then we're done. */
+ if (!gpio_pins)
+ return 0;
+
+ /* Default all the GPIO pins to GPI */
+ for_each_set_bit(pin, &gpio_pins, BD79112_MAX_NUM_CHANNELS) {
+ ret = bd79112_gpio_dir_set(data, pin, GPIO_LINE_DIRECTION_IN);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to mark pin as GPI\n");
+ }
+
+ data->gpio_valid_mask = gpio_pins;
+ data->gc = bd79112_gpio_chip;
+ data->gc.parent = dev;
+
+ return devm_gpiochip_add_data(dev, &data->gc, data);
+}
+
+static const struct of_device_id bd79112_of_match[] = {
+ { .compatible = "rohm,bd79112" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, bd79112_of_match);
+
+static const struct spi_device_id bd79112_id[] = {
+ { "bd79112" },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, bd79112_id);
+
+static struct spi_driver bd79112_driver = {
+ .driver = {
+ .name = "bd79112",
+ .of_match_table = bd79112_of_match,
+ },
+ .probe = bd79112_probe,
+ .id_table = bd79112_id,
+};
+module_spi_driver(bd79112_driver);
+
+MODULE_AUTHOR("Matti Vaittinen <mazziesaccount@gmail.com>");
+MODULE_DESCRIPTION("Driver for ROHM BD79112 ADC/GPIO");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("IIO_DRIVER");
diff --git a/drivers/iio/adc/rohm-bd79124.c b/drivers/iio/adc/rohm-bd79124.c
index bb7c93ae4055..06c55c8da93f 100644
--- a/drivers/iio/adc/rohm-bd79124.c
+++ b/drivers/iio/adc/rohm-bd79124.c
@@ -246,8 +246,8 @@ static int bd79124_init_valid_mask(struct gpio_chip *gc,
static const struct gpio_chip bd79124gpo_chip = {
.label = "bd79124-gpo",
.get_direction = bd79124gpo_direction_get,
- .set_rv = bd79124gpo_set,
- .set_multiple_rv = bd79124gpo_set_multiple,
+ .set = bd79124gpo_set,
+ .set_multiple = bd79124gpo_set_multiple,
.init_valid_mask = bd79124_init_valid_mask,
.can_sleep = true,
.ngpio = 8,
diff --git a/drivers/iio/adc/rzg2l_adc.c b/drivers/iio/adc/rzg2l_adc.c
index 9674d48074c9..1010e0511b3e 100644
--- a/drivers/iio/adc/rzg2l_adc.c
+++ b/drivers/iio/adc/rzg2l_adc.c
@@ -89,7 +89,6 @@ struct rzg2l_adc {
struct completion completion;
struct mutex lock;
u16 last_val[RZG2L_ADC_MAX_CHANNELS];
- bool was_rpm_active;
};
/**
@@ -249,7 +248,6 @@ static int rzg2l_adc_conversion(struct iio_dev *indio_dev, struct rzg2l_adc *adc
rzg2l_adc_start_stop(adc, false);
rpm_put:
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
}
@@ -411,7 +409,6 @@ static int rzg2l_adc_hw_init(struct device *dev, struct rzg2l_adc *adc)
rzg2l_adc_writel(adc, RZG2L_ADM(3), reg);
exit_hw_init:
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
}
@@ -428,6 +425,8 @@ static int rzg2l_adc_probe(struct platform_device *pdev)
if (!indio_dev)
return -ENOMEM;
+ platform_set_drvdata(pdev, indio_dev);
+
adc = iio_priv(indio_dev);
adc->hw_params = device_get_match_data(dev);
@@ -460,8 +459,6 @@ static int rzg2l_adc_probe(struct platform_device *pdev)
if (ret)
return ret;
- platform_set_drvdata(pdev, indio_dev);
-
ret = rzg2l_adc_hw_init(dev, adc);
if (ret)
return dev_err_probe(&pdev->dev, ret,
@@ -541,14 +538,9 @@ static int rzg2l_adc_suspend(struct device *dev)
};
int ret;
- if (pm_runtime_suspended(dev)) {
- adc->was_rpm_active = false;
- } else {
- ret = pm_runtime_force_suspend(dev);
- if (ret)
- return ret;
- adc->was_rpm_active = true;
- }
+ ret = pm_runtime_force_suspend(dev);
+ if (ret)
+ return ret;
ret = reset_control_bulk_assert(ARRAY_SIZE(resets), resets);
if (ret)
@@ -557,9 +549,7 @@ static int rzg2l_adc_suspend(struct device *dev)
return 0;
rpm_restore:
- if (adc->was_rpm_active)
- pm_runtime_force_resume(dev);
-
+ pm_runtime_force_resume(dev);
return ret;
}
@@ -577,11 +567,9 @@ static int rzg2l_adc_resume(struct device *dev)
if (ret)
return ret;
- if (adc->was_rpm_active) {
- ret = pm_runtime_force_resume(dev);
- if (ret)
- goto resets_restore;
- }
+ ret = pm_runtime_force_resume(dev);
+ if (ret)
+ goto resets_restore;
ret = rzg2l_adc_hw_init(dev, adc);
if (ret)
@@ -590,10 +578,7 @@ static int rzg2l_adc_resume(struct device *dev)
return 0;
rpm_restore:
- if (adc->was_rpm_active) {
- pm_runtime_mark_last_busy(dev);
- pm_runtime_put_autosuspend(dev);
- }
+ pm_runtime_force_suspend(dev);
resets_restore:
reset_control_bulk_assert(ARRAY_SIZE(resets), resets);
return ret;
diff --git a/drivers/iio/adc/spear_adc.c b/drivers/iio/adc/spear_adc.c
index e3a865c79686..50b0a607baeb 100644
--- a/drivers/iio/adc/spear_adc.c
+++ b/drivers/iio/adc/spear_adc.c
@@ -14,6 +14,7 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/io.h>
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/completion.h>
@@ -29,9 +30,9 @@
/* Bit definitions for SPEAR_ADC_STATUS */
#define SPEAR_ADC_STATUS_START_CONVERSION BIT(0)
-#define SPEAR_ADC_STATUS_CHANNEL_NUM(x) ((x) << 1)
+#define SPEAR_ADC_STATUS_CHANNEL_NUM_MASK GENMASK(3, 1)
#define SPEAR_ADC_STATUS_ADC_ENABLE BIT(4)
-#define SPEAR_ADC_STATUS_AVG_SAMPLE(x) ((x) << 5)
+#define SPEAR_ADC_STATUS_AVG_SAMPLE_MASK GENMASK(8, 5)
#define SPEAR_ADC_STATUS_VREF_INTERNAL BIT(9)
#define SPEAR_ADC_DATA_MASK 0x03ff
@@ -157,8 +158,8 @@ static int spear_adc_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_RAW:
mutex_lock(&st->lock);
- status = SPEAR_ADC_STATUS_CHANNEL_NUM(chan->channel) |
- SPEAR_ADC_STATUS_AVG_SAMPLE(st->avg_samples) |
+ status = FIELD_PREP(SPEAR_ADC_STATUS_CHANNEL_NUM_MASK, chan->channel) |
+ FIELD_PREP(SPEAR_ADC_STATUS_AVG_SAMPLE_MASK, st->avg_samples) |
SPEAR_ADC_STATUS_START_CONVERSION |
SPEAR_ADC_STATUS_ADC_ENABLE;
if (st->vref_external == 0)
@@ -274,8 +275,7 @@ static int spear_adc_probe(struct platform_device *pdev)
indio_dev = devm_iio_device_alloc(dev, sizeof(struct spear_adc_state));
if (!indio_dev)
- return dev_err_probe(dev, -ENOMEM,
- "failed allocating iio device\n");
+ return -ENOMEM;
st = iio_priv(indio_dev);
st->dev = dev;
diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
index 3d800762c5fc..e39a4c0db25e 100644
--- a/drivers/iio/adc/stm32-adc-core.c
+++ b/drivers/iio/adc/stm32-adc-core.c
@@ -794,7 +794,6 @@ static int stm32_adc_probe(struct platform_device *pdev)
goto err_irq_remove;
}
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return 0;
diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
index b9f93116e114..2d7f88459c7c 100644
--- a/drivers/iio/adc/stm32-adc.c
+++ b/drivers/iio/adc/stm32-adc.c
@@ -1528,7 +1528,6 @@ static int stm32_adc_single_conv(struct iio_dev *indio_dev,
stm32_adc_conv_irq_disable(adc);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
@@ -1564,7 +1563,6 @@ static int stm32_adc_write_raw(struct iio_dev *indio_dev,
adc->cfg->set_ovs(indio_dev, idx);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
adc->ovs_idx = idx;
@@ -1759,7 +1757,6 @@ static int stm32_adc_update_scan_mode(struct iio_dev *indio_dev,
adc->num_conv = bitmap_weight(scan_mask, iio_get_masklength(indio_dev));
ret = stm32_adc_conf_scan_seq(indio_dev, scan_mask);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
@@ -1808,7 +1805,6 @@ static int stm32_adc_debugfs_reg_access(struct iio_dev *indio_dev,
else
*readval = stm32_adc_readl(adc, reg);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return 0;
@@ -1954,7 +1950,6 @@ static int stm32_adc_buffer_postenable(struct iio_dev *indio_dev)
err_clr_trig:
stm32_adc_set_trig(indio_dev, NULL);
err_pm_put:
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
@@ -1977,7 +1972,6 @@ static int stm32_adc_buffer_predisable(struct iio_dev *indio_dev)
if (stm32_adc_set_trig(indio_dev, NULL))
dev_err(&indio_dev->dev, "Can't clear trigger\n");
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return 0;
@@ -2614,7 +2608,6 @@ static int stm32_adc_probe(struct platform_device *pdev)
goto err_hw_stop;
}
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
if (IS_ENABLED(CONFIG_DEBUG_FS))
diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c
index c2d21eecafe7..74b1b4dc6e81 100644
--- a/drivers/iio/adc/stm32-dfsdm-adc.c
+++ b/drivers/iio/adc/stm32-dfsdm-adc.c
@@ -1764,10 +1764,8 @@ static int stm32_dfsdm_adc_probe(struct platform_device *pdev)
dev_data = of_device_get_match_data(dev);
iio = devm_iio_device_alloc(dev, sizeof(*adc));
- if (!iio) {
- dev_err(dev, "%s: Failed to allocate IIO\n", __func__);
+ if (!iio)
return -ENOMEM;
- }
adc = iio_priv(iio);
adc->dfsdm = dev_get_drvdata(dev->parent);
diff --git a/drivers/iio/adc/stmpe-adc.c b/drivers/iio/adc/stmpe-adc.c
index b0add5a2eab5..8e26c47edc08 100644
--- a/drivers/iio/adc/stmpe-adc.c
+++ b/drivers/iio/adc/stmpe-adc.c
@@ -267,10 +267,8 @@ static int stmpe_adc_probe(struct platform_device *pdev)
return irq_adc;
indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(struct stmpe_adc));
- if (!indio_dev) {
- dev_err(&pdev->dev, "failed allocating iio device\n");
+ if (!indio_dev)
return -ENOMEM;
- }
info = iio_priv(indio_dev);
mutex_init(&info->lock);
diff --git a/drivers/iio/adc/sun4i-gpadc-iio.c b/drivers/iio/adc/sun4i-gpadc-iio.c
index 6b8d6bee1873..479115ea50bf 100644
--- a/drivers/iio/adc/sun4i-gpadc-iio.c
+++ b/drivers/iio/adc/sun4i-gpadc-iio.c
@@ -154,7 +154,6 @@ static const struct regmap_config sun4i_gpadc_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
- .fast_io = true,
};
static int sun4i_prepare_for_irq(struct iio_dev *indio_dev, int channel,
@@ -245,7 +244,6 @@ static int sun4i_gpadc_read(struct iio_dev *indio_dev, int channel, int *val,
*val = info->temp_data;
ret = 0;
- pm_runtime_mark_last_busy(indio_dev->dev.parent);
err:
pm_runtime_put_autosuspend(indio_dev->dev.parent);
@@ -272,7 +270,6 @@ static int sun4i_gpadc_temp_read(struct iio_dev *indio_dev, int *val)
regmap_read(info->regmap, SUN4I_GPADC_TEMP_DATA, val);
- pm_runtime_mark_last_busy(indio_dev->dev.parent);
pm_runtime_put_autosuspend(indio_dev->dev.parent);
return 0;
diff --git a/drivers/iio/adc/ti-adc081c.c b/drivers/iio/adc/ti-adc081c.c
index 4f514db5c26e..8ef51c57912d 100644
--- a/drivers/iio/adc/ti-adc081c.c
+++ b/drivers/iio/adc/ti-adc081c.c
@@ -102,27 +102,23 @@ struct adcxx1c_model {
int bits;
};
-#define ADCxx1C_MODEL(_name, _bits) \
- { \
- .channels = _name ## _channels, \
- .bits = (_bits), \
- }
-
DEFINE_ADCxx1C_CHANNELS(adc081c, 8);
DEFINE_ADCxx1C_CHANNELS(adc101c, 10);
DEFINE_ADCxx1C_CHANNELS(adc121c, 12);
-/* Model ids are indexes in _models array */
-enum adcxx1c_model_id {
- ADC081C = 0,
- ADC101C = 1,
- ADC121C = 2,
+static const struct adcxx1c_model adc081c_model = {
+ .channels = adc081c_channels,
+ .bits = 8,
+};
+
+static const struct adcxx1c_model adc101c_model = {
+ .channels = adc101c_channels,
+ .bits = 10,
};
-static struct adcxx1c_model adcxx1c_models[] = {
- ADCxx1C_MODEL(adc081c, 8),
- ADCxx1C_MODEL(adc101c, 10),
- ADCxx1C_MODEL(adc121c, 12),
+static const struct adcxx1c_model adc121c_model = {
+ .channels = adc121c_channels,
+ .bits = 12,
};
static const struct iio_info adc081c_info = {
@@ -203,24 +199,24 @@ static int adc081c_probe(struct i2c_client *client)
}
static const struct i2c_device_id adc081c_id[] = {
- { "adc081c", (kernel_ulong_t)&adcxx1c_models[ADC081C] },
- { "adc101c", (kernel_ulong_t)&adcxx1c_models[ADC101C] },
- { "adc121c", (kernel_ulong_t)&adcxx1c_models[ADC121C] },
+ { "adc081c", (kernel_ulong_t)&adc081c_model },
+ { "adc101c", (kernel_ulong_t)&adc101c_model },
+ { "adc121c", (kernel_ulong_t)&adc121c_model },
{ }
};
MODULE_DEVICE_TABLE(i2c, adc081c_id);
static const struct acpi_device_id adc081c_acpi_match[] = {
/* Used on some AAEON boards */
- { "ADC081C", (kernel_ulong_t)&adcxx1c_models[ADC081C] },
+ { "ADC081C", (kernel_ulong_t)&adc081c_model },
{ }
};
MODULE_DEVICE_TABLE(acpi, adc081c_acpi_match);
static const struct of_device_id adc081c_of_match[] = {
- { .compatible = "ti,adc081c", .data = &adcxx1c_models[ADC081C] },
- { .compatible = "ti,adc101c", .data = &adcxx1c_models[ADC101C] },
- { .compatible = "ti,adc121c", .data = &adcxx1c_models[ADC121C] },
+ { .compatible = "ti,adc081c", .data = &adc081c_model },
+ { .compatible = "ti,adc101c", .data = &adc101c_model },
+ { .compatible = "ti,adc121c", .data = &adc121c_model },
{ }
};
MODULE_DEVICE_TABLE(of, adc081c_of_match);
diff --git a/drivers/iio/adc/ti-adc084s021.c b/drivers/iio/adc/ti-adc084s021.c
index 50a474f4d9f5..a100f770fa1c 100644
--- a/drivers/iio/adc/ti-adc084s021.c
+++ b/drivers/iio/adc/ti-adc084s021.c
@@ -200,10 +200,8 @@ static int adc084s021_probe(struct spi_device *spi)
int ret;
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*adc));
- if (!indio_dev) {
- dev_err(&spi->dev, "Failed to allocate IIO device\n");
+ if (!indio_dev)
return -ENOMEM;
- }
adc = iio_priv(indio_dev);
adc->spi = spi;
diff --git a/drivers/iio/adc/ti-adc12138.c b/drivers/iio/adc/ti-adc12138.c
index 9dc465a10ffc..e5ec4b073daa 100644
--- a/drivers/iio/adc/ti-adc12138.c
+++ b/drivers/iio/adc/ti-adc12138.c
@@ -38,15 +38,13 @@ enum {
struct adc12138 {
struct spi_device *spi;
unsigned int id;
- /* conversion clock */
- struct clk *cclk;
/* positive analog voltage reference */
struct regulator *vref_p;
/* negative analog voltage reference */
struct regulator *vref_n;
struct mutex lock;
struct completion complete;
- /* The number of cclk periods for the S/H's acquisition time */
+ /* The number of conversion clock periods for the S/H's acquisition time */
unsigned int acquisition_time;
/*
* Maximum size needed: 16x 2 bytes ADC data + 8 bytes timestamp.
@@ -400,6 +398,7 @@ static int adc12138_probe(struct spi_device *spi)
{
struct iio_dev *indio_dev;
struct adc12138 *adc;
+ struct clk *cclk;
int ret;
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*adc));
@@ -435,9 +434,14 @@ static int adc12138_probe(struct spi_device *spi)
if (ret)
adc->acquisition_time = 10;
- adc->cclk = devm_clk_get(&spi->dev, NULL);
- if (IS_ERR(adc->cclk))
- return PTR_ERR(adc->cclk);
+ ret = devm_request_irq(&spi->dev, spi->irq, adc12138_eoc_handler,
+ IRQF_TRIGGER_RISING, indio_dev->name, indio_dev);
+ if (ret)
+ return ret;
+
+ cclk = devm_clk_get_enabled(&spi->dev, NULL);
+ if (IS_ERR(cclk))
+ return PTR_ERR(cclk);
adc->vref_p = devm_regulator_get(&spi->dev, "vref-p");
if (IS_ERR(adc->vref_p))
@@ -454,18 +458,9 @@ static int adc12138_probe(struct spi_device *spi)
return ret;
}
- ret = devm_request_irq(&spi->dev, spi->irq, adc12138_eoc_handler,
- IRQF_TRIGGER_RISING, indio_dev->name, indio_dev);
- if (ret)
- return ret;
-
- ret = clk_prepare_enable(adc->cclk);
- if (ret)
- return ret;
-
ret = regulator_enable(adc->vref_p);
if (ret)
- goto err_clk_disable;
+ return ret;
if (!IS_ERR(adc->vref_n)) {
ret = regulator_enable(adc->vref_n);
@@ -496,8 +491,6 @@ err_vref_n_disable:
regulator_disable(adc->vref_n);
err_vref_p_disable:
regulator_disable(adc->vref_p);
-err_clk_disable:
- clk_disable_unprepare(adc->cclk);
return ret;
}
@@ -512,7 +505,6 @@ static void adc12138_remove(struct spi_device *spi)
if (!IS_ERR(adc->vref_n))
regulator_disable(adc->vref_n);
regulator_disable(adc->vref_p);
- clk_disable_unprepare(adc->cclk);
}
static const struct of_device_id adc12138_dt_ids[] = {
diff --git a/drivers/iio/adc/ti-adc128s052.c b/drivers/iio/adc/ti-adc128s052.c
index 1b46a8155803..4ae65793ad9b 100644
--- a/drivers/iio/adc/ti-adc128s052.c
+++ b/drivers/iio/adc/ti-adc128s052.c
@@ -99,51 +99,83 @@ static int adc128_read_raw(struct iio_dev *indio_dev,
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) \
}
-static const struct iio_chan_spec adc128s052_channels[] = {
+static const struct iio_chan_spec simple_1chan_adc_channels[] = {
+ ADC128_VOLTAGE_CHANNEL(0),
+};
+
+static const struct iio_chan_spec simple_2chan_adc_channels[] = {
ADC128_VOLTAGE_CHANNEL(0),
ADC128_VOLTAGE_CHANNEL(1),
- ADC128_VOLTAGE_CHANNEL(2),
- ADC128_VOLTAGE_CHANNEL(3),
- ADC128_VOLTAGE_CHANNEL(4),
- ADC128_VOLTAGE_CHANNEL(5),
- ADC128_VOLTAGE_CHANNEL(6),
- ADC128_VOLTAGE_CHANNEL(7),
};
-static const struct iio_chan_spec adc122s021_channels[] = {
+static const struct iio_chan_spec simple_4chan_adc_channels[] = {
ADC128_VOLTAGE_CHANNEL(0),
ADC128_VOLTAGE_CHANNEL(1),
+ ADC128_VOLTAGE_CHANNEL(2),
+ ADC128_VOLTAGE_CHANNEL(3),
};
-static const struct iio_chan_spec adc124s021_channels[] = {
+static const struct iio_chan_spec simple_8chan_adc_channels[] = {
ADC128_VOLTAGE_CHANNEL(0),
ADC128_VOLTAGE_CHANNEL(1),
ADC128_VOLTAGE_CHANNEL(2),
ADC128_VOLTAGE_CHANNEL(3),
+ ADC128_VOLTAGE_CHANNEL(4),
+ ADC128_VOLTAGE_CHANNEL(5),
+ ADC128_VOLTAGE_CHANNEL(6),
+ ADC128_VOLTAGE_CHANNEL(7),
};
static const char * const bd79104_regulators[] = { "iovdd" };
-static const struct adc128_configuration adc128_config[] = {
- {
- .channels = adc128s052_channels,
- .num_channels = ARRAY_SIZE(adc128s052_channels),
- .refname = "vref",
- }, {
- .channels = adc122s021_channels,
- .num_channels = ARRAY_SIZE(adc122s021_channels),
- .refname = "vref",
- }, {
- .channels = adc124s021_channels,
- .num_channels = ARRAY_SIZE(adc124s021_channels),
- .refname = "vref",
- }, {
- .channels = adc128s052_channels,
- .num_channels = ARRAY_SIZE(adc128s052_channels),
- .refname = "vdd",
- .other_regulators = &bd79104_regulators,
- .num_other_regulators = 1,
- },
+static const struct adc128_configuration adc122s_config = {
+ .channels = simple_2chan_adc_channels,
+ .num_channels = ARRAY_SIZE(simple_2chan_adc_channels),
+ .refname = "vref",
+};
+
+static const struct adc128_configuration adc124s_config = {
+ .channels = simple_4chan_adc_channels,
+ .num_channels = ARRAY_SIZE(simple_4chan_adc_channels),
+ .refname = "vref",
+};
+
+static const struct adc128_configuration adc128s_config = {
+ .channels = simple_8chan_adc_channels,
+ .num_channels = ARRAY_SIZE(simple_8chan_adc_channels),
+ .refname = "vref",
+};
+
+static const struct adc128_configuration bd79100_config = {
+ .channels = simple_1chan_adc_channels,
+ .num_channels = ARRAY_SIZE(simple_1chan_adc_channels),
+ .refname = "vdd",
+ .other_regulators = &bd79104_regulators,
+ .num_other_regulators = 1,
+};
+
+static const struct adc128_configuration bd79101_config = {
+ .channels = simple_2chan_adc_channels,
+ .num_channels = ARRAY_SIZE(simple_2chan_adc_channels),
+ .refname = "vdd",
+ .other_regulators = &bd79104_regulators,
+ .num_other_regulators = 1,
+};
+
+static const struct adc128_configuration bd79102_config = {
+ .channels = simple_4chan_adc_channels,
+ .num_channels = ARRAY_SIZE(simple_4chan_adc_channels),
+ .refname = "vdd",
+ .other_regulators = &bd79104_regulators,
+ .num_other_regulators = 1,
+};
+
+static const struct adc128_configuration bd79104_config = {
+ .channels = simple_8chan_adc_channels,
+ .num_channels = ARRAY_SIZE(simple_8chan_adc_channels),
+ .refname = "vdd",
+ .other_regulators = &bd79104_regulators,
+ .num_other_regulators = 1,
};
static const struct iio_info adc128_info = {
@@ -199,33 +231,41 @@ static int adc128_probe(struct spi_device *spi)
}
static const struct of_device_id adc128_of_match[] = {
- { .compatible = "ti,adc128s052", .data = &adc128_config[0] },
- { .compatible = "ti,adc122s021", .data = &adc128_config[1] },
- { .compatible = "ti,adc122s051", .data = &adc128_config[1] },
- { .compatible = "ti,adc122s101", .data = &adc128_config[1] },
- { .compatible = "ti,adc124s021", .data = &adc128_config[2] },
- { .compatible = "ti,adc124s051", .data = &adc128_config[2] },
- { .compatible = "ti,adc124s101", .data = &adc128_config[2] },
- { .compatible = "rohm,bd79104", .data = &adc128_config[3] },
+ { .compatible = "ti,adc128s052", .data = &adc128s_config },
+ { .compatible = "ti,adc122s021", .data = &adc122s_config },
+ { .compatible = "ti,adc122s051", .data = &adc122s_config },
+ { .compatible = "ti,adc122s101", .data = &adc122s_config },
+ { .compatible = "ti,adc124s021", .data = &adc124s_config },
+ { .compatible = "ti,adc124s051", .data = &adc124s_config },
+ { .compatible = "ti,adc124s101", .data = &adc124s_config },
+ { .compatible = "rohm,bd79100", .data = &bd79100_config },
+ { .compatible = "rohm,bd79101", .data = &bd79101_config },
+ { .compatible = "rohm,bd79102", .data = &bd79102_config },
+ { .compatible = "rohm,bd79103", .data = &bd79104_config },
+ { .compatible = "rohm,bd79104", .data = &bd79104_config },
{ }
};
MODULE_DEVICE_TABLE(of, adc128_of_match);
static const struct spi_device_id adc128_id[] = {
- { "adc128s052", (kernel_ulong_t)&adc128_config[0] },
- { "adc122s021", (kernel_ulong_t)&adc128_config[1] },
- { "adc122s051", (kernel_ulong_t)&adc128_config[1] },
- { "adc122s101", (kernel_ulong_t)&adc128_config[1] },
- { "adc124s021", (kernel_ulong_t)&adc128_config[2] },
- { "adc124s051", (kernel_ulong_t)&adc128_config[2] },
- { "adc124s101", (kernel_ulong_t)&adc128_config[2] },
- { "bd79104", (kernel_ulong_t)&adc128_config[3] },
+ { "adc128s052", (kernel_ulong_t)&adc128s_config },
+ { "adc122s021", (kernel_ulong_t)&adc122s_config },
+ { "adc122s051", (kernel_ulong_t)&adc122s_config },
+ { "adc122s101", (kernel_ulong_t)&adc122s_config },
+ { "adc124s021", (kernel_ulong_t)&adc124s_config },
+ { "adc124s051", (kernel_ulong_t)&adc124s_config },
+ { "adc124s101", (kernel_ulong_t)&adc124s_config },
+ { "bd79100", (kernel_ulong_t)&bd79100_config },
+ { "bd79101", (kernel_ulong_t)&bd79101_config },
+ { "bd79102", (kernel_ulong_t)&bd79102_config },
+ { "bd79103", (kernel_ulong_t)&bd79104_config },
+ { "bd79104", (kernel_ulong_t)&bd79104_config },
{ }
};
MODULE_DEVICE_TABLE(spi, adc128_id);
static const struct acpi_device_id adc128_acpi_match[] = {
- { "AANT1280", (kernel_ulong_t)&adc128_config[2] },
+ { "AANT1280", (kernel_ulong_t)&adc124s_config },
{ }
};
MODULE_DEVICE_TABLE(acpi, adc128_acpi_match);
diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c
index 48549d617e5f..f2a93c63ca14 100644
--- a/drivers/iio/adc/ti-ads1015.c
+++ b/drivers/iio/adc/ti-ads1015.c
@@ -374,12 +374,10 @@ static int ads1015_set_power_state(struct ads1015_data *data, bool on)
int ret;
struct device *dev = regmap_get_device(data->regmap);
- if (on) {
+ if (on)
ret = pm_runtime_resume_and_get(dev);
- } else {
- pm_runtime_mark_last_busy(dev);
+ else
ret = pm_runtime_put_autosuspend(dev);
- }
return ret < 0 ? ret : 0;
}
diff --git a/drivers/iio/adc/ti-ads1100.c b/drivers/iio/adc/ti-ads1100.c
index b0790e300b18..aa8946063c7d 100644
--- a/drivers/iio/adc/ti-ads1100.c
+++ b/drivers/iio/adc/ti-ads1100.c
@@ -105,7 +105,6 @@ static int ads1100_get_adc_result(struct ads1100_data *data, int chan, int *val)
ret = i2c_master_recv(data->client, (char *)&buffer, sizeof(buffer));
- pm_runtime_mark_last_busy(&data->client->dev);
pm_runtime_put_autosuspend(&data->client->dev);
if (ret < 0) {
diff --git a/drivers/iio/adc/ti-ads1119.c b/drivers/iio/adc/ti-ads1119.c
index d2f86e1ec656..c9cedc59cdcd 100644
--- a/drivers/iio/adc/ti-ads1119.c
+++ b/drivers/iio/adc/ti-ads1119.c
@@ -291,7 +291,6 @@ static int ads1119_single_conversion(struct ads1119_state *st,
*val = sign_extend32(sample, chan->scan_type.realbits - 1);
ret = IIO_VAL_INT;
pdown:
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
}
@@ -470,7 +469,6 @@ static int ads1119_triggered_buffer_postdisable(struct iio_dev *indio_dev)
if (ret)
return ret;
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return 0;
@@ -693,8 +691,7 @@ static int ads1119_probe(struct i2c_client *client)
indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
if (!indio_dev)
- return dev_err_probe(dev, -ENOMEM,
- "Failed to allocate IIO device\n");
+ return -ENOMEM;
st = iio_priv(indio_dev);
st->client = client;
@@ -750,8 +747,7 @@ static int ads1119_probe(struct i2c_client *client)
indio_dev->name,
iio_device_id(indio_dev));
if (!st->trig)
- return dev_err_probe(dev, -ENOMEM,
- "Failed to allocate IIO trigger\n");
+ return -ENOMEM;
st->trig->ops = &ads1119_trigger_ops;
iio_trigger_set_drvdata(st->trig, indio_dev);
@@ -778,8 +774,7 @@ static int ads1119_probe(struct i2c_client *client)
ret = devm_add_action_or_reset(dev, ads1119_powerdown, st);
if (ret)
- return dev_err_probe(dev, ret,
- "Failed to add powerdown action\n");
+ return ret;
return devm_iio_device_register(dev, indio_dev);
}
diff --git a/drivers/iio/adc/ti-ads131e08.c b/drivers/iio/adc/ti-ads131e08.c
index b18f30d3fdbe..742acc6d8cf9 100644
--- a/drivers/iio/adc/ti-ads131e08.c
+++ b/drivers/iio/adc/ti-ads131e08.c
@@ -807,10 +807,8 @@ static int ads131e08_probe(struct spi_device *spi)
}
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
- if (!indio_dev) {
- dev_err(&spi->dev, "failed to allocate IIO device\n");
+ if (!indio_dev)
return -ENOMEM;
- }
st = iio_priv(indio_dev);
st->info = info;
@@ -841,10 +839,8 @@ static int ads131e08_probe(struct spi_device *spi)
st->trig = devm_iio_trigger_alloc(&spi->dev, "%s-dev%d",
indio_dev->name, iio_device_id(indio_dev));
- if (!st->trig) {
- dev_err(&spi->dev, "failed to allocate IIO trigger\n");
+ if (!st->trig)
return -ENOMEM;
- }
st->trig->ops = &ads131e08_trigger_ops;
st->trig->dev.parent = &spi->dev;
diff --git a/drivers/iio/adc/ti-ads7924.c b/drivers/iio/adc/ti-ads7924.c
index b1f745f75dbe..bbcc4fc22b6e 100644
--- a/drivers/iio/adc/ti-ads7924.c
+++ b/drivers/iio/adc/ti-ads7924.c
@@ -355,8 +355,7 @@ static int ads7924_probe(struct i2c_client *client)
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
if (!indio_dev)
- return dev_err_probe(dev, -ENOMEM,
- "failed to allocate iio device\n");
+ return -ENOMEM;
data = iio_priv(indio_dev);
@@ -399,8 +398,7 @@ static int ads7924_probe(struct i2c_client *client)
ret = devm_add_action_or_reset(dev, ads7924_reg_disable, data->vref_reg);
if (ret)
- return dev_err_probe(dev, ret,
- "failed to add regulator disable action\n");
+ return ret;
ret = ads7924_reset(indio_dev);
if (ret < 0)
@@ -414,8 +412,7 @@ static int ads7924_probe(struct i2c_client *client)
ret = devm_add_action_or_reset(dev, ads7924_set_idle_mode, data);
if (ret)
- return dev_err_probe(dev, ret,
- "failed to add idle mode action\n");
+ return ret;
/* Use minimum signal acquire time. */
ret = regmap_update_bits(data->regmap, ADS7924_ACQCONFIG_REG,
diff --git a/drivers/iio/adc/ti-ads7950.c b/drivers/iio/adc/ti-ads7950.c
index 0356ccf23fea..bbe1ce577789 100644
--- a/drivers/iio/adc/ti-ads7950.c
+++ b/drivers/iio/adc/ti-ads7950.c
@@ -648,7 +648,7 @@ static int ti_ads7950_probe(struct spi_device *spi)
st->chip.direction_input = ti_ads7950_direction_input;
st->chip.direction_output = ti_ads7950_direction_output;
st->chip.get = ti_ads7950_get;
- st->chip.set_rv = ti_ads7950_set;
+ st->chip.set = ti_ads7950_set;
ret = gpiochip_add_data(&st->chip, st);
if (ret) {
diff --git a/drivers/iio/adc/ti-tsc2046.c b/drivers/iio/adc/ti-tsc2046.c
index 74471f08662e..8eb717b11cff 100644
--- a/drivers/iio/adc/ti-tsc2046.c
+++ b/drivers/iio/adc/ti-tsc2046.c
@@ -535,8 +535,7 @@ static enum hrtimer_restart tsc2046_adc_timer(struct hrtimer *hrtimer)
if (priv->poll_cnt < TI_TSC2046_POLL_CNT) {
priv->poll_cnt++;
hrtimer_start(&priv->trig_timer,
- ns_to_ktime(priv->scan_interval_us *
- NSEC_PER_USEC),
+ us_to_ktime(priv->scan_interval_us),
HRTIMER_MODE_REL_SOFT);
if (priv->poll_cnt >= TI_TSC2046_MIN_POLL_CNT) {
@@ -605,8 +604,7 @@ static void tsc2046_adc_reenable_trigger(struct iio_trigger *trig)
* many samples. Reduce the sample rate for default (touchscreen) use
* case.
*/
- tim = ns_to_ktime((priv->scan_interval_us - priv->time_per_scan_us) *
- NSEC_PER_USEC);
+ tim = us_to_ktime(priv->scan_interval_us - priv->time_per_scan_us);
hrtimer_start(&priv->trig_timer, tim, HRTIMER_MODE_REL_SOFT);
}
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index fe1509d3b1e7..99f274adc870 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -631,10 +631,9 @@ static int tiadc_probe(struct platform_device *pdev)
}
indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*adc_dev));
- if (!indio_dev) {
- dev_err(&pdev->dev, "failed to allocate iio device\n");
+ if (!indio_dev)
return -ENOMEM;
- }
+
adc_dev = iio_priv(indio_dev);
adc_dev->mfd_tscadc = ti_tscadc_dev_get(pdev);
diff --git a/drivers/iio/adc/twl4030-madc.c b/drivers/iio/adc/twl4030-madc.c
index 0ea51ddeaa0a..fe3b31ec976e 100644
--- a/drivers/iio/adc/twl4030-madc.c
+++ b/drivers/iio/adc/twl4030-madc.c
@@ -758,10 +758,8 @@ static int twl4030_madc_probe(struct platform_device *pdev)
}
iio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*madc));
- if (!iio_dev) {
- dev_err(&pdev->dev, "failed allocating iio device\n");
+ if (!iio_dev)
return -ENOMEM;
- }
madc = iio_priv(iio_dev);
madc->dev = &pdev->dev;
diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c
index 1b3b1843a801..d7182ed0d2a7 100644
--- a/drivers/iio/adc/vf610_adc.c
+++ b/drivers/iio/adc/vf610_adc.c
@@ -832,7 +832,7 @@ static int vf610_adc_probe(struct platform_device *pdev)
indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(struct vf610_adc));
if (!indio_dev)
- return dev_err_probe(&pdev->dev, -ENOMEM, "Failed allocating iio device\n");
+ return -ENOMEM;
info = iio_priv(indio_dev);
info->dev = &pdev->dev;
diff --git a/drivers/iio/adc/viperboard_adc.c b/drivers/iio/adc/viperboard_adc.c
index 1028b101cf56..9bb0b83c8f67 100644
--- a/drivers/iio/adc/viperboard_adc.c
+++ b/drivers/iio/adc/viperboard_adc.c
@@ -113,10 +113,8 @@ static int vprbrd_adc_probe(struct platform_device *pdev)
/* registering iio */
indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*adc));
- if (!indio_dev) {
- dev_err(&pdev->dev, "failed allocating iio device\n");
+ if (!indio_dev)
return -ENOMEM;
- }
adc = iio_priv(indio_dev);
adc->vb = vb;
diff --git a/drivers/iio/adc/xilinx-ams.c b/drivers/iio/adc/xilinx-ams.c
index 76dd0343f5f7..124470c92529 100644
--- a/drivers/iio/adc/xilinx-ams.c
+++ b/drivers/iio/adc/xilinx-ams.c
@@ -118,7 +118,7 @@
#define AMS_ALARM_THRESHOLD_OFF_10 0x10
#define AMS_ALARM_THRESHOLD_OFF_20 0x20
-#define AMS_ALARM_THR_DIRECT_MASK BIT(1)
+#define AMS_ALARM_THR_DIRECT_MASK BIT(0)
#define AMS_ALARM_THR_MIN 0x0000
#define AMS_ALARM_THR_MAX (BIT(16) - 1)
@@ -389,6 +389,29 @@ static void ams_update_pl_alarm(struct ams *ams, unsigned long alarm_mask)
ams_pl_update_reg(ams, AMS_REG_CONFIG3, AMS_REGCFG3_ALARM_MASK, cfg);
}
+static void ams_unmask(struct ams *ams)
+{
+ unsigned int status, unmask;
+
+ status = readl(ams->base + AMS_ISR_0);
+
+ /* Clear those bits which are not active anymore */
+ unmask = (ams->current_masked_alarm ^ status) & ams->current_masked_alarm;
+
+ /* Clear status of disabled alarm */
+ unmask |= ams->intr_mask;
+
+ ams->current_masked_alarm &= status;
+
+ /* Also clear those which are masked out anyway */
+ ams->current_masked_alarm &= ~ams->intr_mask;
+
+ /* Clear the interrupts before we unmask them */
+ writel(unmask, ams->base + AMS_ISR_0);
+
+ ams_update_intrmask(ams, ~AMS_ALARM_MASK, ~AMS_ALARM_MASK);
+}
+
static void ams_update_alarm(struct ams *ams, unsigned long alarm_mask)
{
unsigned long flags;
@@ -401,6 +424,7 @@ static void ams_update_alarm(struct ams *ams, unsigned long alarm_mask)
spin_lock_irqsave(&ams->intr_lock, flags);
ams_update_intrmask(ams, AMS_ISR0_ALARM_MASK, ~alarm_mask);
+ ams_unmask(ams);
spin_unlock_irqrestore(&ams->intr_lock, flags);
}
@@ -1035,28 +1059,9 @@ static void ams_handle_events(struct iio_dev *indio_dev, unsigned long events)
static void ams_unmask_worker(struct work_struct *work)
{
struct ams *ams = container_of(work, struct ams, ams_unmask_work.work);
- unsigned int status, unmask;
spin_lock_irq(&ams->intr_lock);
-
- status = readl(ams->base + AMS_ISR_0);
-
- /* Clear those bits which are not active anymore */
- unmask = (ams->current_masked_alarm ^ status) & ams->current_masked_alarm;
-
- /* Clear status of disabled alarm */
- unmask |= ams->intr_mask;
-
- ams->current_masked_alarm &= status;
-
- /* Also clear those which are masked out anyway */
- ams->current_masked_alarm &= ~ams->intr_mask;
-
- /* Clear the interrupts before we unmask them */
- writel(unmask, ams->base + AMS_ISR_0);
-
- ams_update_intrmask(ams, ~AMS_ALARM_MASK, ~AMS_ALARM_MASK);
-
+ ams_unmask(ams);
spin_unlock_irq(&ams->intr_lock);
/* If still pending some alarm re-trigger the timer */
diff --git a/drivers/iio/addac/ad74115.c b/drivers/iio/addac/ad74115.c
index 4d8b64048e4f..f8b04d86b01f 100644
--- a/drivers/iio/addac/ad74115.c
+++ b/drivers/iio/addac/ad74115.c
@@ -1577,7 +1577,7 @@ static int ad74115_setup_gpio_chip(struct ad74115_state *st)
.direction_input = ad74115_gpio_direction_input,
.direction_output = ad74115_gpio_direction_output,
.get = ad74115_gpio_get,
- .set_rv = ad74115_gpio_set,
+ .set = ad74115_gpio_set,
};
return devm_gpiochip_add_data(dev, &st->gc, st);
diff --git a/drivers/iio/addac/ad74413r.c b/drivers/iio/addac/ad74413r.c
index a0bb1dbcb7ad..a20b4d48c5f7 100644
--- a/drivers/iio/addac/ad74413r.c
+++ b/drivers/iio/addac/ad74413r.c
@@ -1425,8 +1425,8 @@ static int ad74413r_probe(struct spi_device *spi)
st->gpo_gpiochip.ngpio = st->num_gpo_gpios;
st->gpo_gpiochip.parent = st->dev;
st->gpo_gpiochip.can_sleep = true;
- st->gpo_gpiochip.set_rv = ad74413r_gpio_set;
- st->gpo_gpiochip.set_multiple_rv = ad74413r_gpio_set_multiple;
+ st->gpo_gpiochip.set = ad74413r_gpio_set;
+ st->gpo_gpiochip.set_multiple = ad74413r_gpio_set_multiple;
st->gpo_gpiochip.set_config = ad74413r_gpio_set_gpo_config;
st->gpo_gpiochip.get_direction =
ad74413r_gpio_get_gpo_direction;
diff --git a/drivers/iio/buffer/industrialio-buffer-cb.c b/drivers/iio/buffer/industrialio-buffer-cb.c
index 4befc9f55201..3e27385069ed 100644
--- a/drivers/iio/buffer/industrialio-buffer-cb.c
+++ b/drivers/iio/buffer/industrialio-buffer-cb.c
@@ -68,7 +68,6 @@ struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev,
cb_buff->private = private;
cb_buff->cb = cb;
cb_buff->buffer.access = &iio_cb_access;
- INIT_LIST_HEAD(&cb_buff->buffer.demux_list);
cb_buff->channels = iio_channel_get_all(dev);
if (IS_ERR(cb_buff->channels)) {
diff --git a/drivers/iio/chemical/atlas-sensor.c b/drivers/iio/chemical/atlas-sensor.c
index 1daaa36f87a9..8bbba85af699 100644
--- a/drivers/iio/chemical/atlas-sensor.c
+++ b/drivers/iio/chemical/atlas-sensor.c
@@ -425,7 +425,6 @@ static int atlas_buffer_predisable(struct iio_dev *indio_dev)
if (ret)
return ret;
- pm_runtime_mark_last_busy(&data->client->dev);
ret = pm_runtime_put_autosuspend(&data->client->dev);
if (ret)
return ret;
@@ -491,7 +490,6 @@ static int atlas_read_measurement(struct atlas_data *data, int reg, __be32 *val)
ret = regmap_bulk_read(data->regmap, reg, val, sizeof(*val));
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
diff --git a/drivers/iio/chemical/bme680_core.c b/drivers/iio/chemical/bme680_core.c
index 61d446fd456c..70f81c4a96ba 100644
--- a/drivers/iio/chemical/bme680_core.c
+++ b/drivers/iio/chemical/bme680_core.c
@@ -950,7 +950,6 @@ static int bme680_read_raw(struct iio_dev *indio_dev,
return ret;
ret = __bme680_read_raw(indio_dev, chan, val, val2, mask);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
@@ -1021,7 +1020,6 @@ static int bme680_write_raw(struct iio_dev *indio_dev,
return ret;
ret = __bme680_write_raw(indio_dev, chan, val, val2, mask);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
@@ -1140,7 +1138,6 @@ static int bme680_buffer_postdisable(struct iio_dev *indio_dev)
struct bme680_data *data = iio_priv(indio_dev);
struct device *dev = regmap_get_device(data->regmap);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return 0;
}
diff --git a/drivers/iio/chemical/ens160_core.c b/drivers/iio/chemical/ens160_core.c
index 6cec60074827..86bde4a91bf7 100644
--- a/drivers/iio/chemical/ens160_core.c
+++ b/drivers/iio/chemical/ens160_core.c
@@ -305,8 +305,7 @@ static int ens160_setup_trigger(struct iio_dev *indio_dev, int irq)
trig = devm_iio_trigger_alloc(dev, "%s-dev%d", indio_dev->name,
iio_device_id(indio_dev));
if (!trig)
- return dev_err_probe(dev, -ENOMEM,
- "failed to allocate trigger\n");
+ return -ENOMEM;
trig->ops = &ens160_trigger_ops;
iio_trigger_set_drvdata(trig, indio_dev);
diff --git a/drivers/iio/chemical/scd30_core.c b/drivers/iio/chemical/scd30_core.c
index 5df1926cd5d9..a665fcb78806 100644
--- a/drivers/iio/chemical/scd30_core.c
+++ b/drivers/iio/chemical/scd30_core.c
@@ -635,7 +635,7 @@ static int scd30_setup_trigger(struct iio_dev *indio_dev)
trig = devm_iio_trigger_alloc(dev, "%s-dev%d", indio_dev->name,
iio_device_id(indio_dev));
if (!trig)
- return dev_err_probe(dev, -ENOMEM, "failed to allocate trigger\n");
+ return -ENOMEM;
trig->ops = &scd30_trigger_ops;
iio_trigger_set_drvdata(trig, indio_dev);
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
index 48193937275b..5540e2d28f4a 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
@@ -163,7 +163,6 @@ int hid_sensor_power_state(struct hid_sensor_common *st, bool state)
ret = pm_runtime_resume_and_get(&st->pdev->dev);
} else {
atomic_dec(&st->user_requested_state);
- pm_runtime_mark_last_busy(&st->pdev->dev);
pm_runtime_use_autosuspend(&st->pdev->dev);
ret = pm_runtime_put_autosuspend(&st->pdev->dev);
}
diff --git a/drivers/iio/common/scmi_sensors/scmi_iio.c b/drivers/iio/common/scmi_sensors/scmi_iio.c
index da516c46e057..39c61c47022a 100644
--- a/drivers/iio/common/scmi_sensors/scmi_iio.c
+++ b/drivers/iio/common/scmi_sensors/scmi_iio.c
@@ -521,9 +521,9 @@ static int scmi_iio_set_sampling_freq_avail(struct iio_dev *iio_dev)
int i;
sensor->freq_avail =
- devm_kzalloc(&iio_dev->dev,
- sizeof(*sensor->freq_avail) *
- (sensor->sensor_info->intervals.count * 2),
+ devm_kcalloc(&iio_dev->dev,
+ array_size(sensor->sensor_info->intervals.count, 2),
+ sizeof(*sensor->freq_avail),
GFP_KERNEL);
if (!sensor->freq_avail)
return -ENOMEM;
@@ -597,8 +597,8 @@ scmi_alloc_iiodev(struct scmi_device *sdev,
iiodev->info = &scmi_iio_info;
iio_channels =
- devm_kzalloc(dev,
- sizeof(*iio_channels) * (iiodev->num_channels),
+ devm_kcalloc(dev, iiodev->num_channels,
+ sizeof(*iio_channels),
GFP_KERNEL);
if (!iio_channels)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/iio/dac/ad5360.c b/drivers/iio/dac/ad5360.c
index a57b0a093112..8271849b1c83 100644
--- a/drivers/iio/dac/ad5360.c
+++ b/drivers/iio/dac/ad5360.c
@@ -262,7 +262,7 @@ static int ad5360_update_ctrl(struct iio_dev *indio_dev, unsigned int set,
unsigned int clr)
{
struct ad5360_state *st = iio_priv(indio_dev);
- unsigned int ret;
+ int ret;
mutex_lock(&st->lock);
diff --git a/drivers/iio/dac/ad5380.c b/drivers/iio/dac/ad5380.c
index 0ddce7b218e3..8b813cee7625 100644
--- a/drivers/iio/dac/ad5380.c
+++ b/drivers/iio/dac/ad5380.c
@@ -371,10 +371,8 @@ static int ad5380_probe(struct device *dev, struct regmap *regmap,
int ret;
indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
- if (indio_dev == NULL) {
- dev_err(dev, "Failed to allocate iio device\n");
+ if (indio_dev == NULL)
return -ENOMEM;
- }
st = iio_priv(indio_dev);
diff --git a/drivers/iio/dac/ad5421.c b/drivers/iio/dac/ad5421.c
index 1462ee640b16..d9d7031c4432 100644
--- a/drivers/iio/dac/ad5421.c
+++ b/drivers/iio/dac/ad5421.c
@@ -186,7 +186,7 @@ static int ad5421_update_ctrl(struct iio_dev *indio_dev, unsigned int set,
unsigned int clr)
{
struct ad5421_state *st = iio_priv(indio_dev);
- unsigned int ret;
+ int ret;
mutex_lock(&st->lock);
diff --git a/drivers/iio/dac/ad5592r-base.c b/drivers/iio/dac/ad5592r-base.c
index 5f2cd51723f6..4720733d66b2 100644
--- a/drivers/iio/dac/ad5592r-base.c
+++ b/drivers/iio/dac/ad5592r-base.c
@@ -129,7 +129,7 @@ static int ad5592r_gpio_init(struct ad5592r_state *st)
st->gpiochip.direction_input = ad5592r_gpio_direction_input;
st->gpiochip.direction_output = ad5592r_gpio_direction_output;
st->gpiochip.get = ad5592r_gpio_get;
- st->gpiochip.set_rv = ad5592r_gpio_set;
+ st->gpiochip.set = ad5592r_gpio_set;
st->gpiochip.request = ad5592r_gpio_request;
st->gpiochip.owner = THIS_MODULE;
st->gpiochip.names = ad5592r_gpio_names;
diff --git a/drivers/iio/dac/ad5764.c b/drivers/iio/dac/ad5764.c
index 26c049d5b73a..fbbd7105a80c 100644
--- a/drivers/iio/dac/ad5764.c
+++ b/drivers/iio/dac/ad5764.c
@@ -278,10 +278,8 @@ static int ad5764_probe(struct spi_device *spi)
int ret;
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
- if (indio_dev == NULL) {
- dev_err(&spi->dev, "Failed to allocate iio device\n");
+ if (indio_dev == NULL)
return -ENOMEM;
- }
st = iio_priv(indio_dev);
spi_set_drvdata(spi, indio_dev);
diff --git a/drivers/iio/dac/ad5791.c b/drivers/iio/dac/ad5791.c
index 41582f2b90fb..ae7297f08398 100644
--- a/drivers/iio/dac/ad5791.c
+++ b/drivers/iio/dac/ad5791.c
@@ -80,8 +80,6 @@ struct ad5791_chip_info {
/**
* struct ad5791_state - driver instance specific data
* @spi: spi_device
- * @reg_vdd: positive supply regulator
- * @reg_vss: negative supply regulator
* @gpio_reset: reset gpio
* @gpio_clear: clear gpio
* @gpio_ldac: load dac gpio
@@ -100,8 +98,6 @@ struct ad5791_chip_info {
*/
struct ad5791_state {
struct spi_device *spi;
- struct regulator *reg_vdd;
- struct regulator *reg_vss;
struct gpio_desc *gpio_reset;
struct gpio_desc *gpio_clear;
struct gpio_desc *gpio_ldac;
diff --git a/drivers/iio/dac/ds4424.c b/drivers/iio/dac/ds4424.c
index a26a99753418..a8198ba4f98a 100644
--- a/drivers/iio/dac/ds4424.c
+++ b/drivers/iio/dac/ds4424.c
@@ -221,10 +221,8 @@ static int ds4424_probe(struct i2c_client *client)
int ret;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
- if (!indio_dev) {
- dev_err(&client->dev, "iio dev alloc failed.\n");
+ if (!indio_dev)
return -ENOMEM;
- }
data = iio_priv(indio_dev);
i2c_set_clientdata(client, indio_dev);
diff --git a/drivers/iio/dac/stm32-dac.c b/drivers/iio/dac/stm32-dac.c
index 344388338d9b..b860e18d52a1 100644
--- a/drivers/iio/dac/stm32-dac.c
+++ b/drivers/iio/dac/stm32-dac.c
@@ -82,9 +82,11 @@ static int stm32_dac_set_enable_state(struct iio_dev *indio_dev, int ch,
ret = regmap_update_bits(dac->common->regmap, STM32_DAC_CR, msk, en);
mutex_unlock(&dac->lock);
- if (ret < 0) {
+ if (ret) {
dev_err(&indio_dev->dev, "%s failed\n", str_enable_disable(en));
- goto err_put_pm;
+ if (enable)
+ pm_runtime_put_autosuspend(dev);
+ return ret;
}
/*
@@ -95,20 +97,10 @@ static int stm32_dac_set_enable_state(struct iio_dev *indio_dev, int ch,
if (en && dac->common->hfsel)
udelay(1);
- if (!enable) {
- pm_runtime_mark_last_busy(dev);
+ if (!enable)
pm_runtime_put_autosuspend(dev);
- }
return 0;
-
-err_put_pm:
- if (enable) {
- pm_runtime_mark_last_busy(dev);
- pm_runtime_put_autosuspend(dev);
- }
-
- return ret;
}
static int stm32_dac_get_value(struct stm32_dac *dac, int channel, int *val)
@@ -349,7 +341,6 @@ static int stm32_dac_probe(struct platform_device *pdev)
if (ret)
goto err_pm_put;
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return 0;
diff --git a/drivers/iio/dac/ti-dac7311.c b/drivers/iio/dac/ti-dac7311.c
index 3d2ce61f0db6..5c1c5213962f 100644
--- a/drivers/iio/dac/ti-dac7311.c
+++ b/drivers/iio/dac/ti-dac7311.c
@@ -242,10 +242,8 @@ static int ti_dac_probe(struct spi_device *spi)
int ret;
indio_dev = devm_iio_device_alloc(dev, sizeof(*ti_dac));
- if (!indio_dev) {
- dev_err(dev, "can not allocate iio device\n");
+ if (!indio_dev)
return -ENOMEM;
- }
spi->mode = SPI_MODE_1;
spi->bits_per_word = 16;
diff --git a/drivers/iio/frequency/adf4350.c b/drivers/iio/frequency/adf4350.c
index 47f1c7e9efa9..ed1741165f55 100644
--- a/drivers/iio/frequency/adf4350.c
+++ b/drivers/iio/frequency/adf4350.c
@@ -149,6 +149,19 @@ static int adf4350_set_freq(struct adf4350_state *st, unsigned long long freq)
if (freq > ADF4350_MAX_OUT_FREQ || freq < st->min_out_freq)
return -EINVAL;
+ st->r4_rf_div_sel = 0;
+
+ /*
+ * !\TODO: The below computation is making sure we get a power of 2
+ * shift (st->r4_rf_div_sel) so that freq becomes higher or equal to
+ * ADF4350_MIN_VCO_FREQ. This might be simplified with fls()/fls_long()
+ * and friends.
+ */
+ while (freq < ADF4350_MIN_VCO_FREQ) {
+ freq <<= 1;
+ st->r4_rf_div_sel++;
+ }
+
if (freq > ADF4350_MAX_FREQ_45_PRESC) {
prescaler = ADF4350_REG1_PRESCALER;
mdiv = 75;
@@ -157,13 +170,6 @@ static int adf4350_set_freq(struct adf4350_state *st, unsigned long long freq)
mdiv = 23;
}
- st->r4_rf_div_sel = 0;
-
- while (freq < ADF4350_MIN_VCO_FREQ) {
- freq <<= 1;
- st->r4_rf_div_sel++;
- }
-
/*
* Allow a predefined reference division factor
* if not set, compute our own
@@ -673,8 +679,7 @@ static int adf4350_probe(struct spi_device *spi)
ret = devm_add_action_or_reset(&spi->dev, adf4350_power_down, indio_dev);
if (ret)
- return dev_err_probe(&spi->dev, ret,
- "Failed to add action to managed power down\n");
+ return ret;
return devm_iio_device_register(&spi->dev, indio_dev);
}
diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c
index 781d3e96645f..38394b5f3275 100644
--- a/drivers/iio/gyro/bmg160_core.c
+++ b/drivers/iio/gyro/bmg160_core.c
@@ -309,10 +309,8 @@ static int bmg160_set_power_state(struct bmg160_data *data, bool on)
if (on)
ret = pm_runtime_get_sync(dev);
- else {
- pm_runtime_mark_last_busy(dev);
+ else
ret = pm_runtime_put_autosuspend(dev);
- }
if (ret < 0) {
dev_err(dev, "Failed: bmg160_set_power_state for %d\n", on);
diff --git a/drivers/iio/gyro/fxas21002c_core.c b/drivers/iio/gyro/fxas21002c_core.c
index 754c8a564ba4..a88670207cec 100644
--- a/drivers/iio/gyro/fxas21002c_core.c
+++ b/drivers/iio/gyro/fxas21002c_core.c
@@ -373,8 +373,6 @@ static int fxas21002c_pm_put(struct fxas21002c_data *data)
{
struct device *dev = regmap_get_device(data->regmap);
- pm_runtime_mark_last_busy(dev);
-
return pm_runtime_put_autosuspend(dev);
}
diff --git a/drivers/iio/gyro/mpu3050-core.c b/drivers/iio/gyro/mpu3050-core.c
index 16553948c5c3..67ae7d1012bc 100644
--- a/drivers/iio/gyro/mpu3050-core.c
+++ b/drivers/iio/gyro/mpu3050-core.c
@@ -370,7 +370,6 @@ static int mpu3050_read_raw(struct iio_dev *indio_dev,
out_read_raw_unlock:
mutex_unlock(&mpu3050->lock);
- pm_runtime_mark_last_busy(mpu3050->dev);
pm_runtime_put_autosuspend(mpu3050->dev);
return ret;
@@ -662,7 +661,6 @@ static int mpu3050_buffer_postdisable(struct iio_dev *indio_dev)
{
struct mpu3050 *mpu3050 = iio_priv(indio_dev);
- pm_runtime_mark_last_busy(mpu3050->dev);
pm_runtime_put_autosuspend(mpu3050->dev);
return 0;
@@ -976,7 +974,6 @@ static int mpu3050_drdy_trigger_set_state(struct iio_trigger *trig,
if (ret)
dev_err(mpu3050->dev, "error resetting FIFO\n");
- pm_runtime_mark_last_busy(mpu3050->dev);
pm_runtime_put_autosuspend(mpu3050->dev);
mpu3050->hw_irq_trigger = false;
diff --git a/drivers/iio/gyro/mpu3050-i2c.c b/drivers/iio/gyro/mpu3050-i2c.c
index 8e284f47242c..092878f2c886 100644
--- a/drivers/iio/gyro/mpu3050-i2c.c
+++ b/drivers/iio/gyro/mpu3050-i2c.c
@@ -27,7 +27,6 @@ static int mpu3050_i2c_bypass_deselect(struct i2c_mux_core *mux, u32 chan_id)
{
struct mpu3050 *mpu3050 = i2c_mux_priv(mux);
- pm_runtime_mark_last_busy(mpu3050->dev);
pm_runtime_put_autosuspend(mpu3050->dev);
return 0;
}
diff --git a/drivers/iio/health/afe4403.c b/drivers/iio/health/afe4403.c
index 30d3f984b032..0e5a512e3bb8 100644
--- a/drivers/iio/health/afe4403.c
+++ b/drivers/iio/health/afe4403.c
@@ -58,7 +58,6 @@ static const struct reg_field afe4403_reg_fields[] = {
/**
* struct afe4403_data - AFE4403 device instance data
- * @dev: Device structure
* @spi: SPI device handle
* @regmap: Register map of the device
* @fields: Register fields of the device
@@ -68,7 +67,6 @@ static const struct reg_field afe4403_reg_fields[] = {
* @buffer: Used to construct data layout to push into IIO buffer.
*/
struct afe4403_data {
- struct device *dev;
struct spi_device *spi;
struct regmap *regmap;
struct regmap_field *fields[F_MAX_FIELDS];
@@ -460,63 +458,63 @@ static DEFINE_SIMPLE_DEV_PM_OPS(afe4403_pm_ops, afe4403_suspend,
static int afe4403_probe(struct spi_device *spi)
{
+ struct device *dev = &spi->dev;
struct iio_dev *indio_dev;
struct afe4403_data *afe;
int i, ret;
- indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*afe));
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*afe));
if (!indio_dev)
return -ENOMEM;
afe = iio_priv(indio_dev);
spi_set_drvdata(spi, indio_dev);
- afe->dev = &spi->dev;
afe->spi = spi;
afe->irq = spi->irq;
afe->regmap = devm_regmap_init_spi(spi, &afe4403_regmap_config);
if (IS_ERR(afe->regmap)) {
- dev_err(afe->dev, "Unable to allocate register map\n");
+ dev_err(dev, "Unable to allocate register map\n");
return PTR_ERR(afe->regmap);
}
for (i = 0; i < F_MAX_FIELDS; i++) {
- afe->fields[i] = devm_regmap_field_alloc(afe->dev, afe->regmap,
+ afe->fields[i] = devm_regmap_field_alloc(dev, afe->regmap,
afe4403_reg_fields[i]);
if (IS_ERR(afe->fields[i])) {
- dev_err(afe->dev, "Unable to allocate regmap fields\n");
+ dev_err(dev, "Unable to allocate regmap fields\n");
return PTR_ERR(afe->fields[i]);
}
}
- afe->regulator = devm_regulator_get(afe->dev, "tx_sup");
+ afe->regulator = devm_regulator_get(dev, "tx_sup");
if (IS_ERR(afe->regulator))
- return dev_err_probe(afe->dev, PTR_ERR(afe->regulator),
+ return dev_err_probe(dev, PTR_ERR(afe->regulator),
"Unable to get regulator\n");
ret = regulator_enable(afe->regulator);
if (ret) {
- dev_err(afe->dev, "Unable to enable regulator\n");
+ dev_err(dev, "Unable to enable regulator\n");
return ret;
}
- ret = devm_add_action_or_reset(afe->dev, afe4403_regulator_disable, afe->regulator);
+ ret = devm_add_action_or_reset(dev, afe4403_regulator_disable, afe->regulator);
if (ret) {
- dev_err(afe->dev, "Unable to add regulator disable action\n");
+ dev_err(dev, "Unable to add regulator disable action\n");
return ret;
}
ret = regmap_write(afe->regmap, AFE440X_CONTROL0,
AFE440X_CONTROL0_SW_RESET);
if (ret) {
- dev_err(afe->dev, "Unable to reset device\n");
+ dev_err(dev, "Unable to reset device\n");
return ret;
}
ret = regmap_multi_reg_write(afe->regmap, afe4403_reg_sequences,
ARRAY_SIZE(afe4403_reg_sequences));
if (ret) {
- dev_err(afe->dev, "Unable to set register defaults\n");
+ dev_err(dev, "Unable to set register defaults\n");
return ret;
}
@@ -527,45 +525,43 @@ static int afe4403_probe(struct spi_device *spi)
indio_dev->info = &afe4403_iio_info;
if (afe->irq > 0) {
- afe->trig = devm_iio_trigger_alloc(afe->dev,
+ afe->trig = devm_iio_trigger_alloc(dev,
"%s-dev%d",
indio_dev->name,
iio_device_id(indio_dev));
- if (!afe->trig) {
- dev_err(afe->dev, "Unable to allocate IIO trigger\n");
+ if (!afe->trig)
return -ENOMEM;
- }
iio_trigger_set_drvdata(afe->trig, indio_dev);
- ret = devm_iio_trigger_register(afe->dev, afe->trig);
+ ret = devm_iio_trigger_register(dev, afe->trig);
if (ret) {
- dev_err(afe->dev, "Unable to register IIO trigger\n");
+ dev_err(dev, "Unable to register IIO trigger\n");
return ret;
}
- ret = devm_request_threaded_irq(afe->dev, afe->irq,
+ ret = devm_request_threaded_irq(dev, afe->irq,
iio_trigger_generic_data_rdy_poll,
NULL, IRQF_ONESHOT,
AFE4403_DRIVER_NAME,
afe->trig);
if (ret) {
- dev_err(afe->dev, "Unable to request IRQ\n");
+ dev_err(dev, "Unable to request IRQ\n");
return ret;
}
}
- ret = devm_iio_triggered_buffer_setup(afe->dev, indio_dev,
+ ret = devm_iio_triggered_buffer_setup(dev, indio_dev,
&iio_pollfunc_store_time,
afe4403_trigger_handler, NULL);
if (ret) {
- dev_err(afe->dev, "Unable to setup buffer\n");
+ dev_err(dev, "Unable to setup buffer\n");
return ret;
}
- ret = devm_iio_device_register(afe->dev, indio_dev);
+ ret = devm_iio_device_register(dev, indio_dev);
if (ret) {
- dev_err(afe->dev, "Unable to register IIO device\n");
+ dev_err(dev, "Unable to register IIO device\n");
return ret;
}
diff --git a/drivers/iio/health/afe4404.c b/drivers/iio/health/afe4404.c
index b2727effecaa..768d794e574b 100644
--- a/drivers/iio/health/afe4404.c
+++ b/drivers/iio/health/afe4404.c
@@ -77,7 +77,6 @@ static const struct reg_field afe4404_reg_fields[] = {
/**
* struct afe4404_data - AFE4404 device instance data
- * @dev: Device structure
* @regmap: Register map of the device
* @fields: Register fields of the device
* @regulator: Pointer to the regulator for the IC
@@ -86,7 +85,6 @@ static const struct reg_field afe4404_reg_fields[] = {
* @buffer: Used to construct a scan to push to the iio buffer.
*/
struct afe4404_data {
- struct device *dev;
struct regmap *regmap;
struct regmap_field *fields[F_MAX_FIELDS];
struct regulator *regulator;
@@ -468,62 +466,62 @@ static DEFINE_SIMPLE_DEV_PM_OPS(afe4404_pm_ops, afe4404_suspend,
static int afe4404_probe(struct i2c_client *client)
{
+ struct device *dev = &client->dev;
struct iio_dev *indio_dev;
struct afe4404_data *afe;
int i, ret;
- indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*afe));
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*afe));
if (!indio_dev)
return -ENOMEM;
afe = iio_priv(indio_dev);
i2c_set_clientdata(client, indio_dev);
- afe->dev = &client->dev;
afe->irq = client->irq;
afe->regmap = devm_regmap_init_i2c(client, &afe4404_regmap_config);
if (IS_ERR(afe->regmap)) {
- dev_err(afe->dev, "Unable to allocate register map\n");
+ dev_err(dev, "Unable to allocate register map\n");
return PTR_ERR(afe->regmap);
}
for (i = 0; i < F_MAX_FIELDS; i++) {
- afe->fields[i] = devm_regmap_field_alloc(afe->dev, afe->regmap,
+ afe->fields[i] = devm_regmap_field_alloc(dev, afe->regmap,
afe4404_reg_fields[i]);
if (IS_ERR(afe->fields[i])) {
- dev_err(afe->dev, "Unable to allocate regmap fields\n");
+ dev_err(dev, "Unable to allocate regmap fields\n");
return PTR_ERR(afe->fields[i]);
}
}
- afe->regulator = devm_regulator_get(afe->dev, "tx_sup");
+ afe->regulator = devm_regulator_get(dev, "tx_sup");
if (IS_ERR(afe->regulator))
- return dev_err_probe(afe->dev, PTR_ERR(afe->regulator),
+ return dev_err_probe(dev, PTR_ERR(afe->regulator),
"Unable to get regulator\n");
ret = regulator_enable(afe->regulator);
if (ret) {
- dev_err(afe->dev, "Unable to enable regulator\n");
+ dev_err(dev, "Unable to enable regulator\n");
return ret;
}
- ret = devm_add_action_or_reset(afe->dev, afe4404_regulator_disable, afe->regulator);
+ ret = devm_add_action_or_reset(dev, afe4404_regulator_disable, afe->regulator);
if (ret) {
- dev_err(afe->dev, "Unable to enable regulator\n");
+ dev_err(dev, "Unable to enable regulator\n");
return ret;
}
ret = regmap_write(afe->regmap, AFE440X_CONTROL0,
AFE440X_CONTROL0_SW_RESET);
if (ret) {
- dev_err(afe->dev, "Unable to reset device\n");
+ dev_err(dev, "Unable to reset device\n");
return ret;
}
ret = regmap_multi_reg_write(afe->regmap, afe4404_reg_sequences,
ARRAY_SIZE(afe4404_reg_sequences));
if (ret) {
- dev_err(afe->dev, "Unable to set register defaults\n");
+ dev_err(dev, "Unable to set register defaults\n");
return ret;
}
@@ -534,45 +532,43 @@ static int afe4404_probe(struct i2c_client *client)
indio_dev->info = &afe4404_iio_info;
if (afe->irq > 0) {
- afe->trig = devm_iio_trigger_alloc(afe->dev,
+ afe->trig = devm_iio_trigger_alloc(dev,
"%s-dev%d",
indio_dev->name,
iio_device_id(indio_dev));
- if (!afe->trig) {
- dev_err(afe->dev, "Unable to allocate IIO trigger\n");
+ if (!afe->trig)
return -ENOMEM;
- }
iio_trigger_set_drvdata(afe->trig, indio_dev);
- ret = devm_iio_trigger_register(afe->dev, afe->trig);
+ ret = devm_iio_trigger_register(dev, afe->trig);
if (ret) {
- dev_err(afe->dev, "Unable to register IIO trigger\n");
+ dev_err(dev, "Unable to register IIO trigger\n");
return ret;
}
- ret = devm_request_threaded_irq(afe->dev, afe->irq,
+ ret = devm_request_threaded_irq(dev, afe->irq,
iio_trigger_generic_data_rdy_poll,
NULL, IRQF_ONESHOT,
AFE4404_DRIVER_NAME,
afe->trig);
if (ret) {
- dev_err(afe->dev, "Unable to request IRQ\n");
+ dev_err(dev, "Unable to request IRQ\n");
return ret;
}
}
- ret = devm_iio_triggered_buffer_setup(afe->dev, indio_dev,
+ ret = devm_iio_triggered_buffer_setup(dev, indio_dev,
&iio_pollfunc_store_time,
afe4404_trigger_handler, NULL);
if (ret) {
- dev_err(afe->dev, "Unable to setup buffer\n");
+ dev_err(dev, "Unable to setup buffer\n");
return ret;
}
- ret = devm_iio_device_register(afe->dev, indio_dev);
+ ret = devm_iio_device_register(dev, indio_dev);
if (ret) {
- dev_err(afe->dev, "Unable to register IIO device\n");
+ dev_err(dev, "Unable to register IIO device\n");
return ret;
}
diff --git a/drivers/iio/humidity/am2315.c b/drivers/iio/humidity/am2315.c
index f021c3e6d886..02ca23eb8991 100644
--- a/drivers/iio/humidity/am2315.c
+++ b/drivers/iio/humidity/am2315.c
@@ -224,10 +224,8 @@ static int am2315_probe(struct i2c_client *client)
struct am2315_data *data;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
- if (!indio_dev) {
- dev_err(&client->dev, "iio allocation failed!\n");
+ if (!indio_dev)
return -ENOMEM;
- }
data = iio_priv(indio_dev);
data->client = client;
diff --git a/drivers/iio/humidity/dht11.c b/drivers/iio/humidity/dht11.c
index 73d2033954e7..980cb946bbf7 100644
--- a/drivers/iio/humidity/dht11.c
+++ b/drivers/iio/humidity/dht11.c
@@ -294,10 +294,8 @@ static int dht11_probe(struct platform_device *pdev)
struct iio_dev *iio;
iio = devm_iio_device_alloc(dev, sizeof(*dht11));
- if (!iio) {
- dev_err(dev, "Failed to allocate IIO device\n");
+ if (!iio)
return -ENOMEM;
- }
dht11 = iio_priv(iio);
dht11->dev = dev;
diff --git a/drivers/iio/imu/adis16475.c b/drivers/iio/imu/adis16475.c
index 924395b7e3b4..ab39bea1e729 100644
--- a/drivers/iio/imu/adis16475.c
+++ b/drivers/iio/imu/adis16475.c
@@ -1930,7 +1930,6 @@ static int adis16475_config_irq_pin(struct adis16475 *st)
return 0;
}
-
static int adis16475_probe(struct spi_device *spi)
{
struct iio_dev *indio_dev;
diff --git a/drivers/iio/imu/bmi270/bmi270_i2c.c b/drivers/iio/imu/bmi270/bmi270_i2c.c
index c77839b03a96..b909a421ad01 100644
--- a/drivers/iio/imu/bmi270/bmi270_i2c.c
+++ b/drivers/iio/imu/bmi270/bmi270_i2c.c
@@ -41,6 +41,8 @@ static const struct i2c_device_id bmi270_i2c_id[] = {
static const struct acpi_device_id bmi270_acpi_match[] = {
/* GPD Win Mini, Aya Neo AIR Pro, OXP Mini Pro, etc. */
{ "BMI0160", (kernel_ulong_t)&bmi260_chip_info },
+ /* GPD Win Max 2 2023(sincice BIOS v0.40), etc. */
+ { "BMI0260", (kernel_ulong_t)&bmi260_chip_info },
{ }
};
diff --git a/drivers/iio/imu/bmi323/bmi323_core.c b/drivers/iio/imu/bmi323/bmi323_core.c
index fc54d464a3ae..6bcb9a436581 100644
--- a/drivers/iio/imu/bmi323/bmi323_core.c
+++ b/drivers/iio/imu/bmi323/bmi323_core.c
@@ -2112,8 +2112,7 @@ int bmi323_core_probe(struct device *dev)
indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
if (!indio_dev)
- return dev_err_probe(dev, -ENOMEM,
- "Failed to allocate device\n");
+ return -ENOMEM;
ret = devm_regulator_bulk_get_enable(dev, ARRAY_SIZE(regulator_names),
regulator_names);
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600.h b/drivers/iio/imu/inv_icm42600/inv_icm42600.h
index 1430ab4f1dea..c8b48a5c5ed0 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600.h
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600.h
@@ -167,7 +167,6 @@ struct inv_icm42600_state {
enum inv_icm42600_chip chip;
const char *name;
struct regmap *map;
- struct regulator *vdd_supply;
struct regulator *vddio_supply;
int irq;
struct iio_mount_matrix orientation;
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
index 7a28051330b7..54760d8f92a2 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
@@ -315,7 +315,6 @@ static int inv_icm42600_accel_read_sensor(struct iio_dev *indio_dev,
ret = -EINVAL;
exit:
mutex_unlock(&st->lock);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
}
@@ -562,12 +561,10 @@ static int inv_icm42600_accel_write_scale(struct iio_dev *indio_dev,
conf.fs = idx / 2;
pm_runtime_get_sync(dev);
- mutex_lock(&st->lock);
- ret = inv_icm42600_set_accel_conf(st, &conf, NULL);
+ scoped_guard(mutex, &st->lock)
+ ret = inv_icm42600_set_accel_conf(st, &conf, NULL);
- mutex_unlock(&st->lock);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
@@ -675,7 +672,6 @@ static int inv_icm42600_accel_write_odr(struct iio_dev *indio_dev,
out_unlock:
mutex_unlock(&st->lock);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
@@ -727,7 +723,6 @@ static int inv_icm42600_accel_read_offset(struct inv_icm42600_state *st,
memcpy(data, st->buffer, sizeof(data));
mutex_unlock(&st->lock);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
if (ret)
return ret;
@@ -865,7 +860,6 @@ static int inv_icm42600_accel_write_offset(struct inv_icm42600_state *st,
out_unlock:
mutex_unlock(&st->lock);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
}
@@ -991,16 +985,11 @@ static int inv_icm42600_accel_hwfifo_set_watermark(struct iio_dev *indio_dev,
unsigned int val)
{
struct inv_icm42600_state *st = iio_device_get_drvdata(indio_dev);
- int ret;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
st->fifo.watermark.accel = val;
- ret = inv_icm42600_buffer_update_watermark(st);
-
- mutex_unlock(&st->lock);
-
- return ret;
+ return inv_icm42600_buffer_update_watermark(st);
}
static int inv_icm42600_accel_hwfifo_flush(struct iio_dev *indio_dev,
@@ -1012,15 +1001,13 @@ static int inv_icm42600_accel_hwfifo_flush(struct iio_dev *indio_dev,
if (count == 0)
return 0;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
ret = inv_icm42600_buffer_hwfifo_flush(st, count);
- if (!ret)
- ret = st->fifo.nb.accel;
-
- mutex_unlock(&st->lock);
+ if (ret)
+ return ret;
- return ret;
+ return st->fifo.nb.accel;
}
static int inv_icm42600_accel_read_event_config(struct iio_dev *indio_dev,
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c
index 7c4ed981db04..ada968be954d 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c
@@ -5,6 +5,7 @@
#include <linux/kernel.h>
#include <linux/device.h>
+#include <linux/minmax.h>
#include <linux/mutex.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
@@ -100,7 +101,7 @@ ssize_t inv_icm42600_fifo_decode_packet(const void *packet, const void **accel,
void inv_icm42600_buffer_update_fifo_period(struct inv_icm42600_state *st)
{
- u32 period_gyro, period_accel, period;
+ u32 period_gyro, period_accel;
if (st->fifo.en & INV_ICM42600_SENSOR_GYRO)
period_gyro = inv_icm42600_odr_to_period(st->conf.gyro.odr);
@@ -112,12 +113,7 @@ void inv_icm42600_buffer_update_fifo_period(struct inv_icm42600_state *st)
else
period_accel = U32_MAX;
- if (period_gyro <= period_accel)
- period = period_gyro;
- else
- period = period_accel;
-
- st->fifo.period = period;
+ st->fifo.period = min(period_gyro, period_accel);
}
int inv_icm42600_buffer_set_fifo_en(struct inv_icm42600_state *st,
@@ -204,7 +200,7 @@ int inv_icm42600_buffer_update_watermark(struct inv_icm42600_state *st)
{
size_t packet_size, wm_size;
unsigned int wm_gyro, wm_accel, watermark;
- u32 period_gyro, period_accel, period;
+ u32 period_gyro, period_accel;
u32 latency_gyro, latency_accel, latency;
bool restore;
__le16 raw_wm;
@@ -237,13 +233,8 @@ int inv_icm42600_buffer_update_watermark(struct inv_icm42600_state *st)
latency = latency_gyro - (latency_accel % latency_gyro);
else
latency = latency_accel - (latency_gyro % latency_accel);
- /* use the shortest period */
- if (period_gyro <= period_accel)
- period = period_gyro;
- else
- period = period_accel;
/* all this works because periods are multiple of each others */
- watermark = latency / period;
+ watermark = latency / min(period_gyro, period_accel);
if (watermark < 1)
watermark = 1;
/* update effective watermark */
@@ -292,9 +283,8 @@ static int inv_icm42600_buffer_preenable(struct iio_dev *indio_dev)
pm_runtime_get_sync(dev);
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
inv_sensors_timestamp_reset(ts);
- mutex_unlock(&st->lock);
return 0;
}
@@ -308,43 +298,39 @@ static int inv_icm42600_buffer_postenable(struct iio_dev *indio_dev)
struct inv_icm42600_state *st = iio_device_get_drvdata(indio_dev);
int ret;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
- /* exit if FIFO is already on */
if (st->fifo.on) {
- ret = 0;
- goto out_on;
+ st->fifo.on++;
+ return 0;
}
/* set FIFO threshold interrupt */
ret = regmap_set_bits(st->map, INV_ICM42600_REG_INT_SOURCE0,
INV_ICM42600_INT_SOURCE0_FIFO_THS_INT1_EN);
if (ret)
- goto out_unlock;
+ return ret;
/* flush FIFO data */
ret = regmap_write(st->map, INV_ICM42600_REG_SIGNAL_PATH_RESET,
INV_ICM42600_SIGNAL_PATH_RESET_FIFO_FLUSH);
if (ret)
- goto out_unlock;
+ return ret;
/* set FIFO in streaming mode */
ret = regmap_write(st->map, INV_ICM42600_REG_FIFO_CONFIG,
INV_ICM42600_FIFO_CONFIG_STREAM);
if (ret)
- goto out_unlock;
+ return ret;
/* workaround: first read of FIFO count after reset is always 0 */
ret = regmap_bulk_read(st->map, INV_ICM42600_REG_FIFO_COUNT, st->buffer, 2);
if (ret)
- goto out_unlock;
+ return ret;
-out_on:
- /* increase FIFO on counter */
st->fifo.on++;
-out_unlock:
- mutex_unlock(&st->lock);
- return ret;
+
+ return 0;
}
static int inv_icm42600_buffer_predisable(struct iio_dev *indio_dev)
@@ -352,38 +338,34 @@ static int inv_icm42600_buffer_predisable(struct iio_dev *indio_dev)
struct inv_icm42600_state *st = iio_device_get_drvdata(indio_dev);
int ret;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
- /* exit if there are several sensors using the FIFO */
if (st->fifo.on > 1) {
- ret = 0;
- goto out_off;
+ st->fifo.on--;
+ return 0;
}
/* set FIFO in bypass mode */
ret = regmap_write(st->map, INV_ICM42600_REG_FIFO_CONFIG,
INV_ICM42600_FIFO_CONFIG_BYPASS);
if (ret)
- goto out_unlock;
+ return ret;
/* flush FIFO data */
ret = regmap_write(st->map, INV_ICM42600_REG_SIGNAL_PATH_RESET,
INV_ICM42600_SIGNAL_PATH_RESET_FIFO_FLUSH);
if (ret)
- goto out_unlock;
+ return ret;
/* disable FIFO threshold interrupt */
ret = regmap_clear_bits(st->map, INV_ICM42600_REG_INT_SOURCE0,
INV_ICM42600_INT_SOURCE0_FIFO_THS_INT1_EN);
if (ret)
- goto out_unlock;
+ return ret;
-out_off:
- /* decrease FIFO on counter */
st->fifo.on--;
-out_unlock:
- mutex_unlock(&st->lock);
- return ret;
+
+ return 0;
}
static int inv_icm42600_buffer_postdisable(struct iio_dev *indio_dev)
@@ -439,7 +421,6 @@ out_unlock:
if (sleep)
msleep(sleep);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
index a4d42e7e2180..76eb22488e5f 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
@@ -439,18 +439,13 @@ int inv_icm42600_debugfs_reg(struct iio_dev *indio_dev, unsigned int reg,
unsigned int writeval, unsigned int *readval)
{
struct inv_icm42600_state *st = iio_device_get_drvdata(indio_dev);
- int ret;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
if (readval)
- ret = regmap_read(st->map, reg, readval);
- else
- ret = regmap_write(st->map, reg, writeval);
+ return regmap_read(st->map, reg, readval);
- mutex_unlock(&st->lock);
-
- return ret;
+ return regmap_write(st->map, reg, writeval);
}
static int inv_icm42600_set_conf(struct inv_icm42600_state *st,
@@ -697,34 +692,15 @@ static int inv_icm42600_enable_regulator_vddio(struct inv_icm42600_state *st)
return 0;
}
-static void inv_icm42600_disable_vdd_reg(void *_data)
-{
- struct inv_icm42600_state *st = _data;
- const struct device *dev = regmap_get_device(st->map);
- int ret;
-
- ret = regulator_disable(st->vdd_supply);
- if (ret)
- dev_err(dev, "failed to disable vdd error %d\n", ret);
-}
-
static void inv_icm42600_disable_vddio_reg(void *_data)
{
struct inv_icm42600_state *st = _data;
- const struct device *dev = regmap_get_device(st->map);
- int ret;
-
- ret = regulator_disable(st->vddio_supply);
- if (ret)
- dev_err(dev, "failed to disable vddio error %d\n", ret);
-}
+ struct device *dev = regmap_get_device(st->map);
-static void inv_icm42600_disable_pm(void *_data)
-{
- struct device *dev = _data;
+ if (pm_runtime_status_suspended(dev))
+ return;
- pm_runtime_put_sync(dev);
- pm_runtime_disable(dev);
+ regulator_disable(st->vddio_supply);
}
int inv_icm42600_core_probe(struct regmap *regmap, int chip,
@@ -773,23 +749,17 @@ int inv_icm42600_core_probe(struct regmap *regmap, int chip,
return ret;
}
- st->vdd_supply = devm_regulator_get(dev, "vdd");
- if (IS_ERR(st->vdd_supply))
- return PTR_ERR(st->vdd_supply);
+ ret = devm_regulator_get_enable(dev, "vdd");
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to get vdd regulator\n");
+
+ msleep(INV_ICM42600_POWER_UP_TIME_MS);
st->vddio_supply = devm_regulator_get(dev, "vddio");
if (IS_ERR(st->vddio_supply))
return PTR_ERR(st->vddio_supply);
- ret = regulator_enable(st->vdd_supply);
- if (ret)
- return ret;
- msleep(INV_ICM42600_POWER_UP_TIME_MS);
-
- ret = devm_add_action_or_reset(dev, inv_icm42600_disable_vdd_reg, st);
- if (ret)
- return ret;
-
ret = inv_icm42600_enable_regulator_vddio(st);
if (ret)
return ret;
@@ -824,16 +794,14 @@ int inv_icm42600_core_probe(struct regmap *regmap, int chip,
return ret;
/* setup runtime power management */
- ret = pm_runtime_set_active(dev);
+ ret = devm_pm_runtime_set_active_enabled(dev);
if (ret)
return ret;
- pm_runtime_get_noresume(dev);
- pm_runtime_enable(dev);
+
pm_runtime_set_autosuspend_delay(dev, INV_ICM42600_SUSPEND_DELAY_MS);
pm_runtime_use_autosuspend(dev);
- pm_runtime_put(dev);
- return devm_add_action_or_reset(dev, inv_icm42600_disable_pm, dev);
+ return ret;
}
EXPORT_SYMBOL_NS_GPL(inv_icm42600_core_probe, "IIO_ICM42600");
@@ -849,22 +817,20 @@ static int inv_icm42600_suspend(struct device *dev)
int accel_conf;
int ret;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
st->suspended.gyro = st->conf.gyro.mode;
st->suspended.accel = st->conf.accel.mode;
st->suspended.temp = st->conf.temp_en;
- if (pm_runtime_suspended(dev)) {
- ret = 0;
- goto out_unlock;
- }
+ if (pm_runtime_suspended(dev))
+ return 0;
/* disable FIFO data streaming */
if (st->fifo.on) {
ret = regmap_write(st->map, INV_ICM42600_REG_FIFO_CONFIG,
INV_ICM42600_FIFO_CONFIG_BYPASS);
if (ret)
- goto out_unlock;
+ return ret;
}
/* keep chip on and wake-up capable if APEX and wakeup on */
@@ -880,7 +846,7 @@ static int inv_icm42600_suspend(struct device *dev)
if (st->apex.wom.enable) {
ret = inv_icm42600_disable_wom(st);
if (ret)
- goto out_unlock;
+ return ret;
}
accel_conf = INV_ICM42600_SENSOR_MODE_OFF;
}
@@ -888,15 +854,13 @@ static int inv_icm42600_suspend(struct device *dev)
ret = inv_icm42600_set_pwr_mgmt0(st, INV_ICM42600_SENSOR_MODE_OFF,
accel_conf, false, NULL);
if (ret)
- goto out_unlock;
+ return ret;
/* disable vddio regulator if chip is sleeping */
if (!wakeup)
regulator_disable(st->vddio_supply);
-out_unlock:
- mutex_unlock(&st->lock);
- return ret;
+ return 0;
}
/*
@@ -912,7 +876,10 @@ static int inv_icm42600_resume(struct device *dev)
bool wakeup;
int ret;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
+
+ if (pm_runtime_suspended(dev))
+ return 0;
/* check wakeup capability */
accel_dev = &st->indio_accel->dev;
@@ -924,25 +891,21 @@ static int inv_icm42600_resume(struct device *dev)
} else {
ret = inv_icm42600_enable_regulator_vddio(st);
if (ret)
- goto out_unlock;
+ return ret;
}
- pm_runtime_disable(dev);
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
-
/* restore sensors state */
ret = inv_icm42600_set_pwr_mgmt0(st, st->suspended.gyro,
st->suspended.accel,
st->suspended.temp, NULL);
if (ret)
- goto out_unlock;
+ return ret;
/* restore APEX features if disabled */
if (!wakeup && st->apex.wom.enable) {
ret = inv_icm42600_enable_wom(st);
if (ret)
- goto out_unlock;
+ return ret;
}
/* restore FIFO data streaming */
@@ -953,9 +916,7 @@ static int inv_icm42600_resume(struct device *dev)
INV_ICM42600_FIFO_CONFIG_STREAM);
}
-out_unlock:
- mutex_unlock(&st->lock);
- return ret;
+ return 0;
}
/* Runtime suspend will turn off sensors that are enabled by iio devices. */
@@ -964,34 +925,28 @@ static int inv_icm42600_runtime_suspend(struct device *dev)
struct inv_icm42600_state *st = dev_get_drvdata(dev);
int ret;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
/* disable all sensors */
ret = inv_icm42600_set_pwr_mgmt0(st, INV_ICM42600_SENSOR_MODE_OFF,
INV_ICM42600_SENSOR_MODE_OFF, false,
NULL);
if (ret)
- goto error_unlock;
+ return ret;
regulator_disable(st->vddio_supply);
-error_unlock:
- mutex_unlock(&st->lock);
- return ret;
+ return 0;
}
/* Sensors are enabled by iio devices, no need to turn them back on here. */
static int inv_icm42600_runtime_resume(struct device *dev)
{
struct inv_icm42600_state *st = dev_get_drvdata(dev);
- int ret;
-
- mutex_lock(&st->lock);
- ret = inv_icm42600_enable_regulator_vddio(st);
+ guard(mutex)(&st->lock);
- mutex_unlock(&st->lock);
- return ret;
+ return inv_icm42600_enable_regulator_vddio(st);
}
EXPORT_NS_GPL_DEV_PM_OPS(inv_icm42600_pm_ops, IIO_ICM42600) = {
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
index 9ba6f13628e6..7ef0a25ec74f 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
@@ -184,7 +184,6 @@ static int inv_icm42600_gyro_read_sensor(struct inv_icm42600_state *st,
ret = -EINVAL;
exit:
mutex_unlock(&st->lock);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
}
@@ -278,12 +277,10 @@ static int inv_icm42600_gyro_write_scale(struct iio_dev *indio_dev,
conf.fs = idx / 2;
pm_runtime_get_sync(dev);
- mutex_lock(&st->lock);
- ret = inv_icm42600_set_gyro_conf(st, &conf, NULL);
+ scoped_guard(mutex, &st->lock)
+ ret = inv_icm42600_set_gyro_conf(st, &conf, NULL);
- mutex_unlock(&st->lock);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
@@ -378,7 +375,6 @@ static int inv_icm42600_gyro_write_odr(struct iio_dev *indio_dev,
out_unlock:
mutex_unlock(&st->lock);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
@@ -430,7 +426,6 @@ static int inv_icm42600_gyro_read_offset(struct inv_icm42600_state *st,
memcpy(data, st->buffer, sizeof(data));
mutex_unlock(&st->lock);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
if (ret)
return ret;
@@ -567,7 +562,6 @@ static int inv_icm42600_gyro_write_offset(struct inv_icm42600_state *st,
out_unlock:
mutex_unlock(&st->lock);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
}
@@ -693,16 +687,11 @@ static int inv_icm42600_gyro_hwfifo_set_watermark(struct iio_dev *indio_dev,
unsigned int val)
{
struct inv_icm42600_state *st = iio_device_get_drvdata(indio_dev);
- int ret;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
st->fifo.watermark.gyro = val;
- ret = inv_icm42600_buffer_update_watermark(st);
-
- mutex_unlock(&st->lock);
-
- return ret;
+ return inv_icm42600_buffer_update_watermark(st);
}
static int inv_icm42600_gyro_hwfifo_flush(struct iio_dev *indio_dev,
@@ -714,15 +703,13 @@ static int inv_icm42600_gyro_hwfifo_flush(struct iio_dev *indio_dev,
if (count == 0)
return 0;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
ret = inv_icm42600_buffer_hwfifo_flush(st, count);
- if (!ret)
- ret = st->fifo.nb.gyro;
-
- mutex_unlock(&st->lock);
+ if (ret)
+ return ret;
- return ret;
+ return st->fifo.nb.gyro;
}
static const struct iio_info inv_icm42600_gyro_info = {
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_temp.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_temp.c
index 8b15afca498c..30f6a9595eea 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_temp.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_temp.c
@@ -32,12 +32,15 @@ static int inv_icm42600_temp_read(struct inv_icm42600_state *st, s16 *temp)
goto exit;
*temp = (s16)be16_to_cpup(raw);
+ /*
+ * Temperature data is invalid if both accel and gyro are off.
+ * Return -EBUSY in this case.
+ */
if (*temp == INV_ICM42600_DATA_INVALID)
- ret = -EINVAL;
+ ret = -EBUSY;
exit:
mutex_unlock(&st->lock);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index 39eb516acc73..b2fa1f4957a5 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -735,7 +735,6 @@ static int inv_mpu6050_read_channel_data(struct iio_dev *indio_dev,
break;
}
- pm_runtime_mark_last_busy(pdev);
pm_runtime_put_autosuspend(pdev);
return ret;
@@ -938,7 +937,6 @@ static int inv_mpu6050_write_raw(struct iio_dev *indio_dev,
break;
}
- pm_runtime_mark_last_busy(pdev);
pm_runtime_put_autosuspend(pdev);
error_write_raw_unlock:
mutex_unlock(&st->lock);
@@ -1146,14 +1144,12 @@ static int inv_mpu6050_enable_wom(struct inv_mpu6050_state *st, bool en)
st->chip_config.wom_en = false;
}
- pm_runtime_mark_last_busy(pdev);
pm_runtime_put_autosuspend(pdev);
}
return result;
error_suspend:
- pm_runtime_mark_last_busy(pdev);
pm_runtime_put_autosuspend(pdev);
return result;
}
@@ -1249,7 +1245,6 @@ static int inv_mpu6050_write_event_value(struct iio_dev *indio_dev,
value = (u64)val * 1000000ULL + (u64)val2;
result = inv_mpu6050_set_wom_threshold(st, value, INV_MPU6050_FREQ_DIVIDER(st));
- pm_runtime_mark_last_busy(pdev);
pm_runtime_put_autosuspend(pdev);
return result;
@@ -1357,7 +1352,6 @@ inv_mpu6050_fifo_rate_store(struct device *dev, struct device_attribute *attr,
if (result)
goto fifo_rate_fail_power_off;
- pm_runtime_mark_last_busy(pdev);
fifo_rate_fail_power_off:
pm_runtime_put_autosuspend(pdev);
fifo_rate_fail_unlock:
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
index 5b1088cc3704..10a473342075 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
@@ -194,7 +194,6 @@ static int inv_mpu6050_set_enable(struct iio_dev *indio_dev, bool enable)
result = inv_mpu6050_prepare_fifo(st, false);
if (result)
goto error_power_off;
- pm_runtime_mark_last_busy(pdev);
pm_runtime_put_autosuspend(pdev);
}
diff --git a/drivers/iio/imu/kmx61.c b/drivers/iio/imu/kmx61.c
index 55c82891e08c..3cd91d8a89ee 100644
--- a/drivers/iio/imu/kmx61.c
+++ b/drivers/iio/imu/kmx61.c
@@ -747,12 +747,10 @@ static int kmx61_set_power_state(struct kmx61_data *data, bool on, u8 device)
data->mag_ps = on;
}
- if (on) {
+ if (on)
ret = pm_runtime_resume_and_get(&data->client->dev);
- } else {
- pm_runtime_mark_last_busy(&data->client->dev);
+ else
ret = pm_runtime_put_autosuspend(&data->client->dev);
- }
if (ret < 0) {
dev_err(&data->client->dev,
"Failed: kmx61_set_power_state for %d, ret %d\n",
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
index c65ad49829e7..d8cb4b0218d5 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
@@ -2035,10 +2035,10 @@ st_lsm6dsx_sysfs_sampling_frequency_avail(struct device *dev,
odr_table = &sensor->hw->settings->odr_table[sensor->id];
for (i = 0; i < odr_table->odr_len; i++)
- len += scnprintf(buf + len, PAGE_SIZE - len, "%d.%03d ",
- odr_table->odr_avl[i].milli_hz / 1000,
- odr_table->odr_avl[i].milli_hz % 1000);
- buf[len - 1] = '\n';
+ len += sysfs_emit_at(buf, len, "%d.%03d%c",
+ odr_table->odr_avl[i].milli_hz / 1000,
+ odr_table->odr_avl[i].milli_hz % 1000,
+ (i == odr_table->odr_len - 1) ? '\n' : ' ');
return len;
}
@@ -2054,9 +2054,9 @@ static ssize_t st_lsm6dsx_sysfs_scale_avail(struct device *dev,
fs_table = &hw->settings->fs_table[sensor->id];
for (i = 0; i < fs_table->fs_len; i++)
- len += scnprintf(buf + len, PAGE_SIZE - len, "0.%09u ",
- fs_table->fs_avl[i].gain);
- buf[len - 1] = '\n';
+ len += sysfs_emit_at(buf, len, "0.%09u%c",
+ fs_table->fs_avl[i].gain,
+ (i == fs_table->fs_len - 1) ? '\n' : ' ');
return len;
}
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 159d6c5ca3ce..88c3d585a1bd 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -97,6 +97,7 @@ static const char * const iio_chan_type_name_spec[] = {
[IIO_COLORTEMP] = "colortemp",
[IIO_CHROMATICITY] = "chromaticity",
[IIO_ATTENTION] = "attention",
+ [IIO_ALTCURRENT] = "altcurrent",
};
static const char * const iio_modifier_names[] = {
@@ -152,6 +153,10 @@ static const char * const iio_modifier_names[] = {
[IIO_MOD_PITCH] = "pitch",
[IIO_MOD_YAW] = "yaw",
[IIO_MOD_ROLL] = "roll",
+ [IIO_MOD_RMS] = "rms",
+ [IIO_MOD_ACTIVE] = "active",
+ [IIO_MOD_REACTIVE] = "reactive",
+ [IIO_MOD_APPARENT] = "apparent",
};
/* relies on pairs of these shared then separate */
@@ -189,6 +194,7 @@ static const char * const iio_chan_info_postfix[] = {
[IIO_CHAN_INFO_ZEROPOINT] = "zeropoint",
[IIO_CHAN_INFO_TROUGH] = "trough_raw",
[IIO_CHAN_INFO_CONVDELAY] = "convdelay",
+ [IIO_CHAN_INFO_POWERFACTOR] = "powerfactor",
};
/**
* iio_device_id() - query the unique ID for the device
@@ -790,6 +796,7 @@ static ssize_t iio_format_list(char *buf, const int *vals, int type, int length,
switch (type) {
case IIO_VAL_INT:
+ case IIO_VAL_CHAR:
stride = 1;
break;
default:
@@ -1243,7 +1250,7 @@ static int iio_device_add_channel_label(struct iio_dev *indio_dev,
static int iio_device_add_info_mask_type(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
enum iio_shared_by shared_by,
- const long *infomask)
+ const unsigned long *infomask)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
int i, ret, attrcount = 0;
@@ -1273,7 +1280,7 @@ static int iio_device_add_info_mask_type(struct iio_dev *indio_dev,
static int iio_device_add_info_mask_type_avail(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
enum iio_shared_by shared_by,
- const long *infomask)
+ const unsigned long *infomask)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
int i, ret, attrcount = 0;
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index c174ebb7d5e6..1e5eb5a41271 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -11,6 +11,7 @@
#include <linux/mutex.h>
#include <linux/property.h>
#include <linux/slab.h>
+#include <linux/units.h>
#include <linux/iio/iio.h>
#include <linux/iio/iio-opaque.h>
@@ -598,6 +599,42 @@ int iio_read_channel_average_raw(struct iio_channel *chan, int *val)
}
EXPORT_SYMBOL_GPL(iio_read_channel_average_raw);
+int iio_multiply_value(int *result, s64 multiplier,
+ unsigned int type, int val, int val2)
+{
+ s64 denominator;
+
+ switch (type) {
+ case IIO_VAL_INT:
+ *result = multiplier * val;
+ return IIO_VAL_INT;
+ case IIO_VAL_INT_PLUS_MICRO:
+ case IIO_VAL_INT_PLUS_NANO:
+ switch (type) {
+ case IIO_VAL_INT_PLUS_MICRO:
+ denominator = MICRO;
+ break;
+ case IIO_VAL_INT_PLUS_NANO:
+ denominator = NANO;
+ break;
+ }
+ *result = multiplier * abs(val);
+ *result += div_s64(multiplier * abs(val2), denominator);
+ if (val < 0 || val2 < 0)
+ *result *= -1;
+ return IIO_VAL_INT;
+ case IIO_VAL_FRACTIONAL:
+ *result = div_s64(multiplier * val, val2);
+ return IIO_VAL_INT;
+ case IIO_VAL_FRACTIONAL_LOG2:
+ *result = (multiplier * val) >> val2;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL_NS_GPL(iio_multiply_value, "IIO_UNIT_TEST");
+
static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
int raw, int *processed,
unsigned int scale)
@@ -605,6 +642,7 @@ static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
int scale_type, scale_val, scale_val2;
int offset_type, offset_val, offset_val2;
s64 raw64 = raw;
+ int ret;
offset_type = iio_channel_read(chan, &offset_val, &offset_val2,
IIO_CHAN_INFO_OFFSET);
@@ -639,40 +677,14 @@ static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
* If no channel scaling is available apply consumer scale to
* raw value and return.
*/
- *processed = raw * scale;
+ *processed = raw64 * scale;
return 0;
}
- switch (scale_type) {
- case IIO_VAL_INT:
- *processed = raw64 * scale_val * scale;
- break;
- case IIO_VAL_INT_PLUS_MICRO:
- if (scale_val2 < 0)
- *processed = -raw64 * scale_val * scale;
- else
- *processed = raw64 * scale_val * scale;
- *processed += div_s64(raw64 * (s64)scale_val2 * scale,
- 1000000LL);
- break;
- case IIO_VAL_INT_PLUS_NANO:
- if (scale_val2 < 0)
- *processed = -raw64 * scale_val * scale;
- else
- *processed = raw64 * scale_val * scale;
- *processed += div_s64(raw64 * (s64)scale_val2 * scale,
- 1000000000LL);
- break;
- case IIO_VAL_FRACTIONAL:
- *processed = div_s64(raw64 * (s64)scale_val * scale,
- scale_val2);
- break;
- case IIO_VAL_FRACTIONAL_LOG2:
- *processed = (raw64 * (s64)scale_val * scale) >> scale_val2;
- break;
- default:
- return -EINVAL;
- }
+ ret = iio_multiply_value(processed, raw64 * scale,
+ scale_type, scale_val, scale_val2);
+ if (ret < 0)
+ return ret;
return 0;
}
@@ -714,20 +726,19 @@ int iio_read_channel_processed_scale(struct iio_channel *chan, int *val,
unsigned int scale)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
- int ret;
+ int ret, pval, pval2;
guard(mutex)(&iio_dev_opaque->info_exist_lock);
if (!chan->indio_dev->info)
return -ENODEV;
if (iio_channel_has_info(chan->channel, IIO_CHAN_INFO_PROCESSED)) {
- ret = iio_channel_read(chan, val, NULL,
+ ret = iio_channel_read(chan, &pval, &pval2,
IIO_CHAN_INFO_PROCESSED);
if (ret < 0)
return ret;
- *val *= scale;
- return ret;
+ return iio_multiply_value(val, scale, ret, pval, pval2);
} else {
ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
if (ret < 0)
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index 4a7d983c9cd4..ac1408d374c9 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -724,6 +724,19 @@ config VEML6040
To compile this driver as a module, choose M here: the
module will be called veml6040.
+config VEML6046X00
+ tristate "VEML6046X00 RGBIR color sensor"
+ select REGMAP_I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ depends on I2C
+ help
+ Say Y here if you want to build a driver for the Vishay VEML6046X00
+ high accuracy RGBIR color sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called veml6046x00.
+
config VEML6070
tristate "VEML6070 UV A light sensor"
depends on I2C
diff --git a/drivers/iio/light/Makefile b/drivers/iio/light/Makefile
index 8229ebe6edc4..c0048e0d5ca8 100644
--- a/drivers/iio/light/Makefile
+++ b/drivers/iio/light/Makefile
@@ -67,6 +67,7 @@ obj-$(CONFIG_VCNL4035) += vcnl4035.o
obj-$(CONFIG_VEML3235) += veml3235.o
obj-$(CONFIG_VEML6030) += veml6030.o
obj-$(CONFIG_VEML6040) += veml6040.o
+obj-$(CONFIG_VEML6046X00) += veml6046x00.o
obj-$(CONFIG_VEML6070) += veml6070.o
obj-$(CONFIG_VEML6075) += veml6075.o
obj-$(CONFIG_VL6180) += vl6180.o
diff --git a/drivers/iio/light/acpi-als.c b/drivers/iio/light/acpi-als.c
index 032e6cae8b80..d5d1a8b9c035 100644
--- a/drivers/iio/light/acpi-als.c
+++ b/drivers/iio/light/acpi-als.c
@@ -49,20 +49,10 @@ static const struct iio_chan_spec acpi_als_channels[] = {
IIO_CHAN_SOFT_TIMESTAMP(1),
};
-/*
- * The event buffer contains timestamp and all the data from
- * the ACPI0008 block. There are multiple, but so far we only
- * support _ALI (illuminance): One channel, padding and timestamp.
- */
-#define ACPI_ALS_EVT_BUFFER_SIZE \
- (sizeof(s32) + sizeof(s32) + sizeof(s64))
-
struct acpi_als {
struct acpi_device *device;
struct mutex lock;
struct iio_trigger *trig;
-
- s32 evt_buffer[ACPI_ALS_EVT_BUFFER_SIZE / sizeof(s32)] __aligned(8);
};
/*
@@ -152,7 +142,10 @@ static irqreturn_t acpi_als_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct acpi_als *als = iio_priv(indio_dev);
- s32 *buffer = als->evt_buffer;
+ struct {
+ s32 light;
+ aligned_s64 ts;
+ } scan = { };
s32 val;
int ret;
@@ -161,7 +154,7 @@ static irqreturn_t acpi_als_trigger_handler(int irq, void *p)
ret = acpi_als_read_value(als, ACPI_ALS_ILLUMINANCE, &val);
if (ret < 0)
goto out;
- *buffer = val;
+ scan.light = val;
/*
* When coming from own trigger via polls, set polling function
@@ -174,7 +167,7 @@ static irqreturn_t acpi_als_trigger_handler(int irq, void *p)
if (!pf->timestamp)
pf->timestamp = iio_get_time_ns(indio_dev);
- iio_push_to_buffers_with_timestamp(indio_dev, buffer, pf->timestamp);
+ iio_push_to_buffers_with_ts(indio_dev, &scan, sizeof(scan), pf->timestamp);
out:
mutex_unlock(&als->lock);
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/light/adjd_s311.c b/drivers/iio/light/adjd_s311.c
index cf96e3dd8bc6..edb3d9dc8bed 100644
--- a/drivers/iio/light/adjd_s311.c
+++ b/drivers/iio/light/adjd_s311.c
@@ -54,10 +54,6 @@
struct adjd_s311_data {
struct i2c_client *client;
- struct {
- s16 chans[4];
- aligned_s64 ts;
- } scan;
};
enum adjd_s311_channel_idx {
@@ -120,6 +116,10 @@ static irqreturn_t adjd_s311_trigger_handler(int irq, void *p)
struct adjd_s311_data *data = iio_priv(indio_dev);
s64 time_ns = iio_get_time_ns(indio_dev);
int i, j = 0;
+ struct {
+ s16 chans[4];
+ aligned_s64 ts;
+ } scan = { };
int ret = adjd_s311_req_data(indio_dev);
if (ret < 0)
@@ -131,10 +131,10 @@ static irqreturn_t adjd_s311_trigger_handler(int irq, void *p)
if (ret < 0)
goto done;
- data->scan.chans[j++] = ret & ADJD_S311_DATA_MASK;
+ scan.chans[j++] = ret & ADJD_S311_DATA_MASK;
}
- iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, time_ns);
+ iio_push_to_buffers_with_ts(indio_dev, &scan, sizeof(scan), time_ns);
done:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/light/al3000a.c b/drivers/iio/light/al3000a.c
index 6f301c067045..9871096cbab3 100644
--- a/drivers/iio/light/al3000a.c
+++ b/drivers/iio/light/al3000a.c
@@ -94,7 +94,7 @@ static int al3000a_init(struct al3000a_data *data)
ret = devm_add_action_or_reset(dev, al3000a_set_pwr_off, data);
if (ret)
- return dev_err_probe(dev, ret, "failed to add action\n");
+ return ret;
ret = regmap_write(data->regmap, AL3000A_REG_SYSTEM, AL3000A_CONFIG_RESET);
if (ret)
diff --git a/drivers/iio/light/apds9306.c b/drivers/iio/light/apds9306.c
index f676da245aa7..389125675caa 100644
--- a/drivers/iio/light/apds9306.c
+++ b/drivers/iio/light/apds9306.c
@@ -537,7 +537,6 @@ static int apds9306_read_data(struct apds9306_data *data, int *val, int reg)
*val = get_unaligned_le24(&buff);
- pm_runtime_mark_last_busy(data->dev);
pm_runtime_put_autosuspend(data->dev);
return 0;
@@ -1121,7 +1120,6 @@ static int apds9306_write_event_config(struct iio_dev *indio_dev,
if (ret)
return ret;
- pm_runtime_mark_last_busy(data->dev);
pm_runtime_put_autosuspend(data->dev);
return 0;
@@ -1309,7 +1307,7 @@ static int apds9306_probe(struct i2c_client *client)
ret = devm_add_action_or_reset(dev, apds9306_powerdown, data);
if (ret)
- return dev_err_probe(dev, ret, "failed to add action or reset\n");
+ return ret;
ret = devm_iio_device_register(dev, indio_dev);
if (ret)
diff --git a/drivers/iio/light/apds9960.c b/drivers/iio/light/apds9960.c
index b92d0fce5aec..79b202c59a0f 100644
--- a/drivers/iio/light/apds9960.c
+++ b/drivers/iio/light/apds9960.c
@@ -495,7 +495,6 @@ static int apds9960_set_power_state(struct apds9960_data *data, bool on)
usleep_range(data->als_adc_int_us,
APDS9960_MAX_INT_TIME_IN_US);
} else {
- pm_runtime_mark_last_busy(dev);
ret = pm_runtime_put_autosuspend(dev);
}
diff --git a/drivers/iio/light/as73211.c b/drivers/iio/light/as73211.c
index 68f60dc3c79d..32719f584c47 100644
--- a/drivers/iio/light/as73211.c
+++ b/drivers/iio/light/as73211.c
@@ -639,7 +639,7 @@ static irqreturn_t as73211_trigger_handler(int irq __always_unused, void *p)
struct {
__le16 chan[4];
aligned_s64 ts;
- } scan;
+ } scan = { };
int data_result, ret;
mutex_lock(&data->mutex);
diff --git a/drivers/iio/light/bh1745.c b/drivers/iio/light/bh1745.c
index 4e9bd8f831f7..10b00344bbed 100644
--- a/drivers/iio/light/bh1745.c
+++ b/drivers/iio/light/bh1745.c
@@ -755,8 +755,8 @@ static irqreturn_t bh1745_trigger_handler(int interrupt, void *p)
scan.chans[j++] = value;
}
- iio_push_to_buffers_with_timestamp(indio_dev, &scan,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, &scan, sizeof(scan),
+ iio_get_time_ns(indio_dev));
err:
iio_trigger_notify_done(indio_dev->trig);
@@ -814,8 +814,7 @@ static int bh1745_init(struct bh1745_data *data)
ret = devm_add_action_or_reset(dev, bh1745_power_off, data);
if (ret)
- return dev_err_probe(dev, ret,
- "Failed to add action or reset\n");
+ return ret;
return 0;
}
diff --git a/drivers/iio/light/bh1780.c b/drivers/iio/light/bh1780.c
index c7c877d2fe67..5d3c6d5276ba 100644
--- a/drivers/iio/light/bh1780.c
+++ b/drivers/iio/light/bh1780.c
@@ -111,7 +111,6 @@ static int bh1780_read_raw(struct iio_dev *indio_dev,
value = bh1780_read_word(bh1780, BH1780_REG_DLOW);
if (value < 0)
return value;
- pm_runtime_mark_last_busy(&bh1780->client->dev);
pm_runtime_put_autosuspend(&bh1780->client->dev);
*val = value;
diff --git a/drivers/iio/light/gp2ap002.c b/drivers/iio/light/gp2ap002.c
index 42859e5b1089..a0d8a58f2704 100644
--- a/drivers/iio/light/gp2ap002.c
+++ b/drivers/iio/light/gp2ap002.c
@@ -271,7 +271,6 @@ static int gp2ap002_read_raw(struct iio_dev *indio_dev,
}
out:
- pm_runtime_mark_last_busy(gp2ap002->dev);
pm_runtime_put_autosuspend(gp2ap002->dev);
return ret;
@@ -353,7 +352,6 @@ static int gp2ap002_write_event_config(struct iio_dev *indio_dev,
pm_runtime_get_sync(gp2ap002->dev);
gp2ap002->enabled = true;
} else {
- pm_runtime_mark_last_busy(gp2ap002->dev);
pm_runtime_put_autosuspend(gp2ap002->dev);
gp2ap002->enabled = false;
}
diff --git a/drivers/iio/light/hid-sensor-als.c b/drivers/iio/light/hid-sensor-als.c
index 830e5ae7f34a..384572844162 100644
--- a/drivers/iio/light/hid-sensor-als.c
+++ b/drivers/iio/light/hid-sensor-als.c
@@ -262,8 +262,9 @@ static int als_proc_event(struct hid_sensor_hub_device *hsdev,
if (!als_state->timestamp)
als_state->timestamp = iio_get_time_ns(indio_dev);
- iio_push_to_buffers_with_timestamp(indio_dev, &als_state->scan,
- als_state->timestamp);
+ iio_push_to_buffers_with_ts(indio_dev, &als_state->scan,
+ sizeof(als_state->scan),
+ als_state->timestamp);
als_state->timestamp = 0;
}
diff --git a/drivers/iio/light/isl29028.c b/drivers/iio/light/isl29028.c
index 0e4284823d44..374bccad9119 100644
--- a/drivers/iio/light/isl29028.c
+++ b/drivers/iio/light/isl29028.c
@@ -336,16 +336,11 @@ static int isl29028_ir_get(struct isl29028_chip *chip, int *ir_data)
static int isl29028_set_pm_runtime_busy(struct isl29028_chip *chip, bool on)
{
struct device *dev = regmap_get_device(chip->regmap);
- int ret;
- if (on) {
- ret = pm_runtime_resume_and_get(dev);
- } else {
- pm_runtime_mark_last_busy(dev);
- ret = pm_runtime_put_autosuspend(dev);
- }
+ if (on)
+ return pm_runtime_resume_and_get(dev);
- return ret;
+ return pm_runtime_put_autosuspend(dev);
}
/* Channel IO */
diff --git a/drivers/iio/light/isl29125.c b/drivers/iio/light/isl29125.c
index 6bc23b164cc5..3acb8a4f1d12 100644
--- a/drivers/iio/light/isl29125.c
+++ b/drivers/iio/light/isl29125.c
@@ -51,11 +51,6 @@
struct isl29125_data {
struct i2c_client *client;
u8 conf1;
- /* Ensure timestamp is naturally aligned */
- struct {
- u16 chans[3];
- aligned_s64 timestamp;
- } scan;
};
#define ISL29125_CHANNEL(_color, _si) { \
@@ -179,6 +174,11 @@ static irqreturn_t isl29125_trigger_handler(int irq, void *p)
struct iio_dev *indio_dev = pf->indio_dev;
struct isl29125_data *data = iio_priv(indio_dev);
int i, j = 0;
+ /* Ensure timestamp is naturally aligned */
+ struct {
+ u16 chans[3];
+ aligned_s64 timestamp;
+ } scan = { };
iio_for_each_active_channel(indio_dev, i) {
int ret = i2c_smbus_read_word_data(data->client,
@@ -186,10 +186,10 @@ static irqreturn_t isl29125_trigger_handler(int irq, void *p)
if (ret < 0)
goto done;
- data->scan.chans[j++] = ret;
+ scan.chans[j++] = ret;
}
- iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
+ iio_push_to_buffers_with_ts(indio_dev, &scan, sizeof(scan),
iio_get_time_ns(indio_dev));
done:
diff --git a/drivers/iio/light/ltr390.c b/drivers/iio/light/ltr390.c
index ee59bbb8aa09..a2b804e9089a 100644
--- a/drivers/iio/light/ltr390.c
+++ b/drivers/iio/light/ltr390.c
@@ -26,6 +26,7 @@
#include <linux/math.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/iio/iio.h>
@@ -38,12 +39,21 @@
#define LTR390_ALS_UVS_GAIN 0x05
#define LTR390_PART_ID 0x06
#define LTR390_MAIN_STATUS 0x07
+
#define LTR390_ALS_DATA 0x0D
+#define LTR390_ALS_DATA_BYTE(n) (LTR390_ALS_DATA + (n))
+
#define LTR390_UVS_DATA 0x10
+#define LTR390_UVS_DATA_BYTE(n) (LTR390_UVS_DATA + (n))
+
#define LTR390_INT_CFG 0x19
#define LTR390_INT_PST 0x1A
+
#define LTR390_THRESH_UP 0x21
+#define LTR390_THRESH_UP_BYTE(n) (LTR390_THRESH_UP + (n))
+
#define LTR390_THRESH_LOW 0x24
+#define LTR390_THRESH_LOW_BYTE(n) (LTR390_THRESH_LOW + (n))
#define LTR390_PART_NUMBER_ID 0xb
#define LTR390_ALS_UVS_GAIN_MASK GENMASK(2, 0)
@@ -96,6 +106,32 @@ struct ltr390_data {
enum ltr390_mode mode;
int gain;
int int_time_us;
+ bool irq_enabled;
+};
+
+static const struct regmap_range ltr390_readable_reg_ranges[] = {
+ regmap_reg_range(LTR390_MAIN_CTRL, LTR390_MAIN_CTRL),
+ regmap_reg_range(LTR390_ALS_UVS_MEAS_RATE, LTR390_MAIN_STATUS),
+ regmap_reg_range(LTR390_ALS_DATA_BYTE(0), LTR390_UVS_DATA_BYTE(2)),
+ regmap_reg_range(LTR390_INT_CFG, LTR390_INT_PST),
+ regmap_reg_range(LTR390_THRESH_UP_BYTE(0), LTR390_THRESH_LOW_BYTE(2)),
+};
+
+static const struct regmap_access_table ltr390_readable_reg_table = {
+ .yes_ranges = ltr390_readable_reg_ranges,
+ .n_yes_ranges = ARRAY_SIZE(ltr390_readable_reg_ranges),
+};
+
+static const struct regmap_range ltr390_writeable_reg_ranges[] = {
+ regmap_reg_range(LTR390_MAIN_CTRL, LTR390_MAIN_CTRL),
+ regmap_reg_range(LTR390_ALS_UVS_MEAS_RATE, LTR390_ALS_UVS_GAIN),
+ regmap_reg_range(LTR390_INT_CFG, LTR390_INT_PST),
+ regmap_reg_range(LTR390_THRESH_UP_BYTE(0), LTR390_THRESH_LOW_BYTE(2)),
+};
+
+static const struct regmap_access_table ltr390_writeable_reg_table = {
+ .yes_ranges = ltr390_writeable_reg_ranges,
+ .n_yes_ranges = ARRAY_SIZE(ltr390_writeable_reg_ranges),
};
static const struct regmap_config ltr390_regmap_config = {
@@ -103,6 +139,9 @@ static const struct regmap_config ltr390_regmap_config = {
.reg_bits = 8,
.reg_stride = 1,
.val_bits = 8,
+ .max_register = LTR390_THRESH_LOW_BYTE(2),
+ .rd_table = &ltr390_readable_reg_table,
+ .wr_table = &ltr390_writeable_reg_table,
};
/* Sampling frequency is in mili Hz and mili Seconds */
@@ -178,9 +217,10 @@ static int ltr390_get_samp_freq_or_period(struct ltr390_data *data,
return ltr390_samp_freq_table[value][option];
}
-static int ltr390_read_raw(struct iio_dev *iio_device,
- struct iio_chan_spec const *chan, int *val,
- int *val2, long mask)
+
+static int ltr390_do_read_raw(struct iio_dev *iio_device,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
{
int ret;
struct ltr390_data *data = iio_priv(iio_device);
@@ -243,6 +283,27 @@ static int ltr390_read_raw(struct iio_dev *iio_device,
}
}
+static int ltr390_read_raw(struct iio_dev *iio_device,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ int ret;
+ struct ltr390_data *data = iio_priv(iio_device);
+ struct device *dev = &data->client->dev;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0) {
+ dev_err(dev, "runtime PM failed to resume: %d\n", ret);
+ return ret;
+ }
+
+ ret = ltr390_do_read_raw(iio_device, chan, val, val2, mask);
+
+ pm_runtime_put_autosuspend(dev);
+
+ return ret;
+}
+
/* integration time in us */
static const int ltr390_int_time_map_us[] = { 400000, 200000, 100000, 50000, 25000, 12500 };
static const int ltr390_gain_map[] = { 1, 3, 6, 9, 18 };
@@ -549,11 +610,11 @@ static int ltr390_read_event_config(struct iio_dev *indio_dev,
return FIELD_GET(LTR390_LS_INT_EN, status);
}
-static int ltr390_write_event_config(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan,
- enum iio_event_type type,
- enum iio_event_direction dir,
- bool state)
+static int ltr390_do_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ bool state)
{
struct ltr390_data *data = iio_priv(indio_dev);
int ret;
@@ -561,7 +622,6 @@ static int ltr390_write_event_config(struct iio_dev *indio_dev,
if (!state)
return regmap_clear_bits(data->regmap, LTR390_INT_CFG, LTR390_LS_INT_EN);
- guard(mutex)(&data->lock);
ret = regmap_set_bits(data->regmap, LTR390_INT_CFG, LTR390_LS_INT_EN);
if (ret < 0)
return ret;
@@ -586,6 +646,51 @@ static int ltr390_write_event_config(struct iio_dev *indio_dev,
}
}
+static int ltr390_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ bool state)
+{
+ int ret;
+ struct ltr390_data *data = iio_priv(indio_dev);
+ struct device *dev = &data->client->dev;
+
+ guard(mutex)(&data->lock);
+
+ if (state && !data->irq_enabled) {
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0) {
+ dev_err(dev, "runtime PM failed to resume: %d\n", ret);
+ return ret;
+ }
+ data->irq_enabled = true;
+ }
+
+ ret = ltr390_do_event_config(indio_dev, chan, type, dir, state);
+
+ if (!state && data->irq_enabled) {
+ data->irq_enabled = false;
+ pm_runtime_put_autosuspend(dev);
+ }
+
+ return ret;
+}
+
+static int ltr390_debugfs_reg_access(struct iio_dev *indio_dev,
+ unsigned int reg, unsigned int writeval,
+ unsigned int *readval)
+{
+ struct ltr390_data *data = iio_priv(indio_dev);
+
+ guard(mutex)(&data->lock);
+
+ if (readval)
+ return regmap_read(data->regmap, reg, readval);
+
+ return regmap_write(data->regmap, reg, writeval);
+}
+
static const struct iio_info ltr390_info = {
.read_raw = ltr390_read_raw,
.write_raw = ltr390_write_raw,
@@ -594,6 +699,7 @@ static const struct iio_info ltr390_info = {
.read_event_config = ltr390_read_event_config,
.write_event_value = ltr390_write_event_value,
.write_event_config = ltr390_write_event_config,
+ .debugfs_reg_access = ltr390_debugfs_reg_access,
};
static irqreturn_t ltr390_interrupt_handler(int irq, void *private)
@@ -628,6 +734,43 @@ static irqreturn_t ltr390_interrupt_handler(int irq, void *private)
return IRQ_HANDLED;
}
+static void ltr390_powerdown(void *priv)
+{
+ struct ltr390_data *data = priv;
+ struct device *dev = &data->client->dev;
+ int ret;
+
+ guard(mutex)(&data->lock);
+
+ /* Ensure that power off and interrupts are disabled */
+ if (data->irq_enabled) {
+ ret = regmap_clear_bits(data->regmap, LTR390_INT_CFG, LTR390_LS_INT_EN);
+ if (ret < 0)
+ dev_err(dev, "failed to disable interrupts\n");
+
+ data->irq_enabled = false;
+ pm_runtime_put_autosuspend(dev);
+ }
+
+ ret = regmap_clear_bits(data->regmap, LTR390_MAIN_CTRL, LTR390_SENSOR_ENABLE);
+ if (ret < 0)
+ dev_err(dev, "failed to disable sensor\n");
+}
+
+static int ltr390_pm_init(struct ltr390_data *data)
+{
+ int ret;
+ struct device *dev = &data->client->dev;
+
+ ret = devm_pm_runtime_set_active_enabled(dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to enable runtime PM\n");
+
+ pm_runtime_set_autosuspend_delay(dev, 1000);
+ pm_runtime_use_autosuspend(dev);
+ return 0;
+}
+
static int ltr390_probe(struct i2c_client *client)
{
struct ltr390_data *data;
@@ -640,8 +783,9 @@ static int ltr390_probe(struct i2c_client *client)
if (!indio_dev)
return -ENOMEM;
- data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data = iio_priv(indio_dev);
data->regmap = devm_regmap_init_i2c(client, &ltr390_regmap_config);
if (IS_ERR(data->regmap))
return dev_err_probe(dev, PTR_ERR(data->regmap),
@@ -654,6 +798,8 @@ static int ltr390_probe(struct i2c_client *client)
data->gain = 3;
/* default mode for ltr390 is ALS mode */
data->mode = LTR390_SET_ALS_MODE;
+ /* default value of irq_enabled is false */
+ data->irq_enabled = false;
mutex_init(&data->lock);
@@ -681,6 +827,10 @@ static int ltr390_probe(struct i2c_client *client)
if (ret)
return dev_err_probe(dev, ret, "failed to enable the sensor\n");
+ ret = devm_add_action_or_reset(dev, ltr390_powerdown, data);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to add action or reset\n");
+
if (client->irq) {
ret = devm_request_threaded_irq(dev, client->irq,
NULL, ltr390_interrupt_handler,
@@ -692,6 +842,10 @@ static int ltr390_probe(struct i2c_client *client)
"request irq (%d) failed\n", client->irq);
}
+ ret = ltr390_pm_init(data);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to initialize runtime PM\n");
+
return devm_iio_device_register(dev, indio_dev);
}
@@ -713,7 +867,26 @@ static int ltr390_resume(struct device *dev)
LTR390_SENSOR_ENABLE);
}
-static DEFINE_SIMPLE_DEV_PM_OPS(ltr390_pm_ops, ltr390_suspend, ltr390_resume);
+static int ltr390_runtime_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ltr390_data *data = iio_priv(indio_dev);
+
+ return regmap_clear_bits(data->regmap, LTR390_MAIN_CTRL, LTR390_SENSOR_ENABLE);
+}
+
+static int ltr390_runtime_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ltr390_data *data = iio_priv(indio_dev);
+
+ return regmap_set_bits(data->regmap, LTR390_MAIN_CTRL, LTR390_SENSOR_ENABLE);
+}
+
+static const struct dev_pm_ops ltr390_pm_ops = {
+ SYSTEM_SLEEP_PM_OPS(ltr390_suspend, ltr390_resume)
+ RUNTIME_PM_OPS(ltr390_runtime_suspend, ltr390_runtime_resume, NULL)
+};
static const struct i2c_device_id ltr390_id[] = {
{ "ltr390" },
@@ -731,7 +904,7 @@ static struct i2c_driver ltr390_driver = {
.driver = {
.name = "ltr390",
.of_match_table = ltr390_of_table,
- .pm = pm_sleep_ptr(&ltr390_pm_ops),
+ .pm = pm_ptr(&ltr390_pm_ops),
},
.probe = ltr390_probe,
.id_table = ltr390_id,
diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c
index debf57a52d1c..022e0693983b 100644
--- a/drivers/iio/light/ltr501.c
+++ b/drivers/iio/light/ltr501.c
@@ -1315,8 +1315,8 @@ static irqreturn_t ltr501_trigger_handler(int irq, void *p)
scan.channels[j++] = psdata & LTR501_PS_DATA_MASK;
}
- iio_push_to_buffers_with_timestamp(indio_dev, &scan,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, &scan, sizeof(scan),
+ iio_get_time_ns(indio_dev));
done:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/light/ltrf216a.c b/drivers/iio/light/ltrf216a.c
index 61f57a82b872..5f27f754fe1c 100644
--- a/drivers/iio/light/ltrf216a.c
+++ b/drivers/iio/light/ltrf216a.c
@@ -208,7 +208,6 @@ static int ltrf216a_set_power_state(struct ltrf216a_data *data, bool on)
return ret;
}
} else {
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
}
diff --git a/drivers/iio/light/max44000.c b/drivers/iio/light/max44000.c
index e8b767680133..039d45af3a7f 100644
--- a/drivers/iio/light/max44000.c
+++ b/drivers/iio/light/max44000.c
@@ -75,11 +75,6 @@
struct max44000_data {
struct mutex lock;
struct regmap *regmap;
- /* Ensure naturally aligned timestamp */
- struct {
- u16 channels[2];
- aligned_s64 ts;
- } scan;
};
/* Default scale is set to the minimum of 0.03125 or 1 / (1 << 5) lux */
@@ -496,24 +491,29 @@ static irqreturn_t max44000_trigger_handler(int irq, void *p)
int index = 0;
unsigned int regval;
int ret;
+ struct {
+ u16 channels[2];
+ aligned_s64 ts;
+ } scan = { };
+
mutex_lock(&data->lock);
if (test_bit(MAX44000_SCAN_INDEX_ALS, indio_dev->active_scan_mask)) {
ret = max44000_read_alsval(data);
if (ret < 0)
goto out_unlock;
- data->scan.channels[index++] = ret;
+ scan.channels[index++] = ret;
}
if (test_bit(MAX44000_SCAN_INDEX_PRX, indio_dev->active_scan_mask)) {
ret = regmap_read(data->regmap, MAX44000_REG_PRX_DATA, &regval);
if (ret < 0)
goto out_unlock;
- data->scan.channels[index] = regval;
+ scan.channels[index] = regval;
}
mutex_unlock(&data->lock);
- iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, &scan, sizeof(scan),
+ iio_get_time_ns(indio_dev));
iio_trigger_notify_done(indio_dev->trig);
return IRQ_HANDLED;
diff --git a/drivers/iio/light/opt4001.c b/drivers/iio/light/opt4001.c
index ba4eb82d9bc2..95167273bb90 100644
--- a/drivers/iio/light/opt4001.c
+++ b/drivers/iio/light/opt4001.c
@@ -428,8 +428,7 @@ static int opt4001_probe(struct i2c_client *client)
opt4001_chip_off_action,
chip);
if (ret < 0)
- return dev_err_probe(&client->dev, ret,
- "Failed to setup power off action\n");
+ return ret;
return devm_iio_device_register(&client->dev, indio_dev);
}
diff --git a/drivers/iio/light/opt4060.c b/drivers/iio/light/opt4060.c
index 566f1bb8fe2a..981c704e7df5 100644
--- a/drivers/iio/light/opt4060.c
+++ b/drivers/iio/light/opt4060.c
@@ -1104,7 +1104,7 @@ static irqreturn_t opt4060_trigger_handler(int irq, void *p)
}
}
- iio_push_to_buffers_with_timestamp(idev, &raw, pf->timestamp);
+ iio_push_to_buffers_with_ts(idev, &raw, sizeof(raw), pf->timestamp);
err_read:
iio_trigger_notify_done(idev->trig);
return IRQ_HANDLED;
@@ -1212,7 +1212,7 @@ static int opt4060_setup_trigger(struct opt4060_chip *chip, struct iio_dev *idev
name = devm_kasprintf(chip->dev, GFP_KERNEL, "%s-opt4060",
dev_name(chip->dev));
if (!name)
- return dev_err_probe(chip->dev, -ENOMEM, "Failed to alloc chip name\n");
+ return -ENOMEM;
ret = devm_request_threaded_irq(chip->dev, chip->irq, NULL, opt4060_irq_thread,
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
@@ -1299,8 +1299,7 @@ static int opt4060_probe(struct i2c_client *client)
ret = devm_add_action_or_reset(dev, opt4060_chip_off_action, chip);
if (ret < 0)
- return dev_err_probe(dev, ret,
- "Failed to setup power off action\n");
+ return ret;
ret = opt4060_setup_buffer(chip, indio_dev);
if (ret)
diff --git a/drivers/iio/light/pa12203001.c b/drivers/iio/light/pa12203001.c
index 8885852bef22..98a1f1624c75 100644
--- a/drivers/iio/light/pa12203001.c
+++ b/drivers/iio/light/pa12203001.c
@@ -185,15 +185,10 @@ static int pa12203001_set_power_state(struct pa12203001_data *data, bool on,
mutex_unlock(&data->lock);
}
- if (on) {
- ret = pm_runtime_resume_and_get(&data->client->dev);
+ if (on)
+ return pm_runtime_resume_and_get(&data->client->dev);
- } else {
- pm_runtime_mark_last_busy(&data->client->dev);
- ret = pm_runtime_put_autosuspend(&data->client->dev);
- }
-
- return ret;
+ return pm_runtime_put_autosuspend(&data->client->dev);
err:
mutex_unlock(&data->lock);
diff --git a/drivers/iio/light/rohm-bu27034.c b/drivers/iio/light/rohm-bu27034.c
index 7cec5e943373..28d111ac8c0a 100644
--- a/drivers/iio/light/rohm-bu27034.c
+++ b/drivers/iio/light/rohm-bu27034.c
@@ -1193,7 +1193,8 @@ static int bu27034_buffer_thread(void *arg)
*/
data->scan.mlux = (u32)mlux;
}
- iio_push_to_buffers_with_timestamp(idev, &data->scan, tstamp);
+ iio_push_to_buffers_with_ts(idev, &data->scan,
+ sizeof(data->scan), tstamp);
}
return 0;
diff --git a/drivers/iio/light/rpr0521.c b/drivers/iio/light/rpr0521.c
index c50183f07240..9341c1d58cbe 100644
--- a/drivers/iio/light/rpr0521.c
+++ b/drivers/iio/light/rpr0521.c
@@ -358,12 +358,10 @@ static int rpr0521_set_power_state(struct rpr0521_data *data, bool on,
* Note: If either measurement is re-enabled before _suspend(),
* both stay enabled until _suspend().
*/
- if (on) {
+ if (on)
ret = pm_runtime_resume_and_get(&data->client->dev);
- } else {
- pm_runtime_mark_last_busy(&data->client->dev);
+ else
ret = pm_runtime_put_autosuspend(&data->client->dev);
- }
if (ret < 0) {
dev_err(&data->client->dev,
"Failed: rpr0521_set_power_state for %d, ret %d\n",
@@ -457,8 +455,8 @@ static irqreturn_t rpr0521_trigger_consumer_handler(int irq, void *p)
data->scan.channels,
(3 * 2) + 1); /* 3 * 16-bit + (discarded) int clear reg. */
if (!err)
- iio_push_to_buffers_with_timestamp(indio_dev,
- &data->scan, pf->timestamp);
+ iio_push_to_buffers_with_ts(indio_dev, &data->scan,
+ sizeof(data->scan), pf->timestamp);
else
dev_err(&data->client->dev,
"Trigger consumer can't read from sensor.\n");
diff --git a/drivers/iio/light/si1145.c b/drivers/iio/light/si1145.c
index 4aa02afd853e..f8eb251eca8d 100644
--- a/drivers/iio/light/si1145.c
+++ b/drivers/iio/light/si1145.c
@@ -494,8 +494,9 @@ static irqreturn_t si1145_trigger_handler(int irq, void *private)
goto done;
}
- iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, data->buffer,
+ sizeof(data->buffer),
+ iio_get_time_ns(indio_dev));
done:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/light/st_uvis25.h b/drivers/iio/light/st_uvis25.h
index 1f93e3dc45c2..78bc56aad129 100644
--- a/drivers/iio/light/st_uvis25.h
+++ b/drivers/iio/light/st_uvis25.h
@@ -27,11 +27,6 @@ struct st_uvis25_hw {
struct iio_trigger *trig;
bool enabled;
int irq;
- /* Ensure timestamp is naturally aligned */
- struct {
- u8 chan;
- aligned_s64 ts;
- } scan;
};
extern const struct dev_pm_ops st_uvis25_pm_ops;
diff --git a/drivers/iio/light/st_uvis25_core.c b/drivers/iio/light/st_uvis25_core.c
index 124a8f9204a9..bcd729a9924e 100644
--- a/drivers/iio/light/st_uvis25_core.c
+++ b/drivers/iio/light/st_uvis25_core.c
@@ -234,15 +234,21 @@ static irqreturn_t st_uvis25_buffer_handler_thread(int irq, void *p)
struct st_uvis25_hw *hw = iio_priv(iio_dev);
unsigned int val;
int err;
+ /* Ensure timestamp is naturally aligned */
+ struct {
+ u8 chan;
+ aligned_s64 ts;
+ } scan = { };
+
err = regmap_read(hw->regmap, ST_UVIS25_REG_OUT_ADDR, &val);
if (err < 0)
goto out;
- hw->scan.chan = val;
+ scan.chan = val;
- iio_push_to_buffers_with_timestamp(iio_dev, &hw->scan,
- iio_get_time_ns(iio_dev));
+ iio_push_to_buffers_with_ts(iio_dev, &scan, sizeof(scan),
+ iio_get_time_ns(iio_dev));
out:
iio_trigger_notify_done(hw->trig);
diff --git a/drivers/iio/light/stk3310.c b/drivers/iio/light/stk3310.c
index 81dd2bfc22c0..a75a83594a7e 100644
--- a/drivers/iio/light/stk3310.c
+++ b/drivers/iio/light/stk3310.c
@@ -607,10 +607,8 @@ static int stk3310_probe(struct i2c_client *client)
struct stk3310_data *data;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
- if (!indio_dev) {
- dev_err(&client->dev, "iio allocation failed!\n");
+ if (!indio_dev)
return -ENOMEM;
- }
data = iio_priv(indio_dev);
data->client = client;
diff --git a/drivers/iio/light/tcs3414.c b/drivers/iio/light/tcs3414.c
index 39268f855c77..5be461e6dbdb 100644
--- a/drivers/iio/light/tcs3414.c
+++ b/drivers/iio/light/tcs3414.c
@@ -53,11 +53,6 @@ struct tcs3414_data {
u8 control;
u8 gain;
u8 timing;
- /* Ensure timestamp is naturally aligned */
- struct {
- u16 chans[4];
- aligned_s64 timestamp;
- } scan;
};
#define TCS3414_CHANNEL(_color, _si, _addr) { \
@@ -204,6 +199,12 @@ static irqreturn_t tcs3414_trigger_handler(int irq, void *p)
struct iio_dev *indio_dev = pf->indio_dev;
struct tcs3414_data *data = iio_priv(indio_dev);
int i, j = 0;
+ /* Ensure timestamp is naturally aligned */
+ struct {
+ u16 chans[4];
+ aligned_s64 timestamp;
+ } scan = { };
+
iio_for_each_active_channel(indio_dev, i) {
int ret = i2c_smbus_read_word_data(data->client,
@@ -211,10 +212,10 @@ static irqreturn_t tcs3414_trigger_handler(int irq, void *p)
if (ret < 0)
goto done;
- data->scan.chans[j++] = ret;
+ scan.chans[j++] = ret;
}
- iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
+ iio_push_to_buffers_with_ts(indio_dev, &scan, sizeof(scan),
iio_get_time_ns(indio_dev));
done:
diff --git a/drivers/iio/light/tcs3472.c b/drivers/iio/light/tcs3472.c
index 0f8bf8503edd..12429a3261b3 100644
--- a/drivers/iio/light/tcs3472.c
+++ b/drivers/iio/light/tcs3472.c
@@ -64,11 +64,6 @@ struct tcs3472_data {
u8 control;
u8 atime;
u8 apers;
- /* Ensure timestamp is naturally aligned */
- struct {
- u16 chans[4];
- aligned_s64 timestamp;
- } scan;
};
static const struct iio_event_spec tcs3472_events[] = {
@@ -377,6 +372,11 @@ static irqreturn_t tcs3472_trigger_handler(int irq, void *p)
struct iio_dev *indio_dev = pf->indio_dev;
struct tcs3472_data *data = iio_priv(indio_dev);
int i, j = 0;
+ /* Ensure timestamp is naturally aligned */
+ struct {
+ u16 chans[4];
+ aligned_s64 timestamp;
+ } scan = { };
int ret = tcs3472_req_data(data);
if (ret < 0)
@@ -388,10 +388,10 @@ static irqreturn_t tcs3472_trigger_handler(int irq, void *p)
if (ret < 0)
goto done;
- data->scan.chans[j++] = ret;
+ scan.chans[j++] = ret;
}
- iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
+ iio_push_to_buffers_with_ts(indio_dev, &scan, sizeof(scan),
iio_get_time_ns(indio_dev));
done:
diff --git a/drivers/iio/light/tsl2583.c b/drivers/iio/light/tsl2583.c
index fc3b0c4226be..8801a491de77 100644
--- a/drivers/iio/light/tsl2583.c
+++ b/drivers/iio/light/tsl2583.c
@@ -641,16 +641,10 @@ static const struct iio_chan_spec tsl2583_channels[] = {
static int tsl2583_set_pm_runtime_busy(struct tsl2583_chip *chip, bool on)
{
- int ret;
+ if (on)
+ return pm_runtime_resume_and_get(&chip->client->dev);
- if (on) {
- ret = pm_runtime_resume_and_get(&chip->client->dev);
- } else {
- pm_runtime_mark_last_busy(&chip->client->dev);
- ret = pm_runtime_put_autosuspend(&chip->client->dev);
- }
-
- return ret;
+ return pm_runtime_put_autosuspend(&chip->client->dev);
}
static int tsl2583_read_raw(struct iio_dev *indio_dev,
diff --git a/drivers/iio/light/tsl2591.c b/drivers/iio/light/tsl2591.c
index 08476f193a44..c5557867ea43 100644
--- a/drivers/iio/light/tsl2591.c
+++ b/drivers/iio/light/tsl2591.c
@@ -772,7 +772,6 @@ static int tsl2591_read_raw(struct iio_dev *indio_dev,
err_unlock:
mutex_unlock(&chip->als_mutex);
- pm_runtime_mark_last_busy(&client->dev);
pm_runtime_put_autosuspend(&client->dev);
return ret;
@@ -995,7 +994,6 @@ static int tsl2591_write_event_config(struct iio_dev *indio_dev,
pm_runtime_get_sync(&client->dev);
} else if (!state && chip->events_enabled) {
chip->events_enabled = false;
- pm_runtime_mark_last_busy(&client->dev);
pm_runtime_put_autosuspend(&client->dev);
}
diff --git a/drivers/iio/light/us5182d.c b/drivers/iio/light/us5182d.c
index 61a0957317a1..d2f5a44892a8 100644
--- a/drivers/iio/light/us5182d.c
+++ b/drivers/iio/light/us5182d.c
@@ -361,19 +361,13 @@ static int us5182d_shutdown_en(struct us5182d_data *data, u8 state)
static int us5182d_set_power_state(struct us5182d_data *data, bool on)
{
- int ret;
-
if (data->power_mode == US5182D_ONESHOT)
return 0;
- if (on) {
- ret = pm_runtime_resume_and_get(&data->client->dev);
- } else {
- pm_runtime_mark_last_busy(&data->client->dev);
- ret = pm_runtime_put_autosuspend(&data->client->dev);
- }
+ if (on)
+ return pm_runtime_resume_and_get(&data->client->dev);
- return ret;
+ return pm_runtime_put_autosuspend(&data->client->dev);
}
static int us5182d_read_value(struct us5182d_data *data,
diff --git a/drivers/iio/light/vcnl4000.c b/drivers/iio/light/vcnl4000.c
index 90e7d4421abf..4dbb2294a843 100644
--- a/drivers/iio/light/vcnl4000.c
+++ b/drivers/iio/light/vcnl4000.c
@@ -576,16 +576,11 @@ static bool vcnl4010_is_in_periodic_mode(struct vcnl4000_data *data)
static int vcnl4000_set_pm_runtime_state(struct vcnl4000_data *data, bool on)
{
struct device *dev = &data->client->dev;
- int ret;
- if (on) {
- ret = pm_runtime_resume_and_get(dev);
- } else {
- pm_runtime_mark_last_busy(dev);
- ret = pm_runtime_put_autosuspend(dev);
- }
+ if (on)
+ return pm_runtime_resume_and_get(dev);
- return ret;
+ return pm_runtime_put_autosuspend(dev);
}
static int vcnl4040_read_als_it(struct vcnl4000_data *data, int *val, int *val2)
@@ -1662,7 +1657,10 @@ static irqreturn_t vcnl4010_trigger_handler(int irq, void *p)
struct iio_dev *indio_dev = pf->indio_dev;
struct vcnl4000_data *data = iio_priv(indio_dev);
const unsigned long *active_scan_mask = indio_dev->active_scan_mask;
- u16 buffer[8] __aligned(8) = {0}; /* 1x16-bit + naturally aligned ts */
+ struct {
+ u16 chan;
+ aligned_s64 ts;
+ } scan = { };
bool data_read = false;
unsigned long isr;
int val = 0;
@@ -1682,7 +1680,7 @@ static irqreturn_t vcnl4010_trigger_handler(int irq, void *p)
if (ret < 0)
goto end;
- buffer[0] = val;
+ scan.chan = val;
data_read = true;
}
}
@@ -1695,8 +1693,8 @@ static irqreturn_t vcnl4010_trigger_handler(int irq, void *p)
if (!data_read)
goto end;
- iio_push_to_buffers_with_timestamp(indio_dev, buffer,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, &scan, sizeof(scan),
+ iio_get_time_ns(indio_dev));
end:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/light/vcnl4035.c b/drivers/iio/light/vcnl4035.c
index 01bc99564f98..963747927425 100644
--- a/drivers/iio/light/vcnl4035.c
+++ b/drivers/iio/light/vcnl4035.c
@@ -141,17 +141,12 @@ static const struct iio_trigger_ops vcnl4035_trigger_ops = {
static int vcnl4035_set_pm_runtime_state(struct vcnl4035_data *data, bool on)
{
- int ret;
struct device *dev = &data->client->dev;
- if (on) {
- ret = pm_runtime_resume_and_get(dev);
- } else {
- pm_runtime_mark_last_busy(dev);
- ret = pm_runtime_put_autosuspend(dev);
- }
+ if (on)
+ return pm_runtime_resume_and_get(dev);
- return ret;
+ return pm_runtime_put_autosuspend(dev);
}
static int vcnl4035_read_info_raw(struct iio_dev *indio_dev,
diff --git a/drivers/iio/light/veml6030.c b/drivers/iio/light/veml6030.c
index 0945f146bedb..6bcacae3863c 100644
--- a/drivers/iio/light/veml6030.c
+++ b/drivers/iio/light/veml6030.c
@@ -903,7 +903,7 @@ static irqreturn_t veml6030_trigger_handler(int irq, void *p)
scan.chans[i++] = reg;
}
- iio_push_to_buffers_with_timestamp(iio, &scan, pf->timestamp);
+ iio_push_to_buffers_with_ts(iio, &scan, sizeof(scan), pf->timestamp);
done:
iio_trigger_notify_done(iio->trig);
diff --git a/drivers/iio/light/veml6040.c b/drivers/iio/light/veml6040.c
index 71a594b2ec85..f563f9f0ee67 100644
--- a/drivers/iio/light/veml6040.c
+++ b/drivers/iio/light/veml6040.c
@@ -219,8 +219,7 @@ static int veml6040_probe(struct i2c_client *client)
indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
if (!indio_dev)
- return dev_err_probe(dev, -ENOMEM,
- "IIO device allocation failed\n");
+ return -ENOMEM;
regmap = devm_regmap_init_i2c(client, &veml6040_regmap_config);
if (IS_ERR(regmap))
diff --git a/drivers/iio/light/veml6046x00.c b/drivers/iio/light/veml6046x00.c
new file mode 100644
index 000000000000..e60f24d46e7b
--- /dev/null
+++ b/drivers/iio/light/veml6046x00.c
@@ -0,0 +1,1030 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * VEML6046X00 High Accuracy RGBIR Color Sensor
+ *
+ * Copyright (c) 2025 Andreas Klinger <ak@it-klinger.de>
+ */
+
+#include <linux/array_size.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/dev_printk.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/time.h>
+#include <linux/types.h>
+#include <linux/units.h>
+
+#include <asm/byteorder.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+/*
+ * Device registers
+ * Those which are accessed as bulk io are omitted
+ */
+#define VEML6046X00_REG_CONF0 0x00
+#define VEML6046X00_REG_CONF1 0x01
+#define VEML6046X00_REG_THDH 0x04
+#define VEML6046X00_REG_THDL 0x06
+#define VEML6046X00_REG_R 0x10
+#define VEML6046X00_REG_G 0x12
+#define VEML6046X00_REG_B 0x14
+#define VEML6046X00_REG_IR 0x16
+#define VEML6046X00_REG_ID 0x18
+#define VEML6046X00_REG_INT 0x1A
+#define VEML6046X00_REG_INT_H 0x1B
+
+/* Bit masks for specific functionality */
+#define VEML6046X00_CONF0_ON_0 BIT(0)
+#define VEML6046X00_CONF0_INT BIT(1)
+#define VEML6046X00_CONF0_AF_TRIG BIT(2)
+#define VEML6046X00_CONF0_AF BIT(3)
+#define VEML6046X00_CONF0_IT GENMASK(6, 4)
+#define VEML6046X00_CONF1_CAL BIT(0)
+#define VEML6046X00_CONF1_PERS GENMASK(2, 1)
+#define VEML6046X00_CONF1_GAIN GENMASK(4, 3)
+#define VEML6046X00_CONF1_PD_D2 BIT(6)
+#define VEML6046X00_CONF1_ON_1 BIT(7)
+#define VEML6046X00_INT_TH_H BIT(1)
+#define VEML6046X00_INT_TH_L BIT(2)
+#define VEML6046X00_INT_DRDY BIT(3)
+#define VEML6046X00_INT_MASK \
+ (VEML6046X00_INT_TH_H | VEML6046X00_INT_TH_L | VEML6046X00_INT_DRDY)
+
+#define VEML6046X00_GAIN_1 0x0
+#define VEML6046X00_GAIN_2 0x1
+#define VEML6046X00_GAIN_0_66 0x2
+#define VEML6046X00_GAIN_0_5 0x3
+
+#define VEML6046X00_PD_2_2 0x0
+#define VEML6046X00_PD_1_2 BIT(6)
+
+/* Autosuspend delay */
+#define VEML6046X00_AUTOSUSPEND_MS (3 * MSEC_PER_SEC)
+
+enum veml6046x00_scan {
+ VEML6046X00_SCAN_R,
+ VEML6046X00_SCAN_G,
+ VEML6046X00_SCAN_B,
+ VEML6046X00_SCAN_IR,
+ VEML6046X00_SCAN_TIMESTAMP,
+};
+
+/**
+ * struct veml6046x00_rf - Regmap field of configuration registers.
+ * @int_en: Interrupt enable of green channel.
+ * @mode: Mode of operation.
+ * Driver uses always Active force mode.
+ * @trig: Trigger to be set in active force mode for starting
+ * measurement.
+ * @it: Integration time.
+ * @pers: Persistense - Number of threshold crossing for triggering
+ * interrupt.
+ */
+struct veml6046x00_rf {
+ struct regmap_field *int_en;
+ struct regmap_field *mode;
+ struct regmap_field *trig;
+ struct regmap_field *it;
+ struct regmap_field *pers;
+};
+
+/**
+ * struct veml6046x00_data - Private data of driver.
+ * @regmap: Regmap definition of sensor.
+ * @trig: Industrial-IO trigger.
+ * @rf: Regmap field of configuration.
+ */
+struct veml6046x00_data {
+ struct regmap *regmap;
+ struct iio_trigger *trig;
+ struct veml6046x00_rf rf;
+};
+
+/**
+ * DOC: Valid integration times (IT)
+ *
+ * static const int veml6046x00_it contains the array with valid IT.
+ *
+ * Register value to be read or written in regmap_field it on veml6046x00 is
+ * identical with array index.
+ * This means there is no separate translation table between valid integration
+ * times and register values needed. The index of the array is identical with
+ * the register value.
+ *
+ * The array is in the form as expected by the callback of the sysfs attribute
+ * integration_time_available (IIO_CHAN_INFO_INT_TIME). So there is no
+ * additional conversion needed.
+ */
+static const int veml6046x00_it[][2] = {
+ { 0, 3125 },
+ { 0, 6250 },
+ { 0, 12500 },
+ { 0, 25000 },
+ { 0, 50000 },
+ { 0, 100000 },
+ { 0, 200000 },
+ { 0, 400000 },
+};
+
+/**
+ * DOC: Handling of gain and photodiode size (PD)
+ *
+ * Gains here in the driver are not exactly the same as in the datasheet of the
+ * sensor. The gain in the driver is a combination of the gain of the sensor
+ * with the photodiode size (PD).
+ * The following combinations are possible:
+ * gain(driver) = gain(sensor) * PD
+ * 0.25 = x0.5 * 1/2
+ * 0.33 = x0.66 * 1/2
+ * 0.5 = x0.5 * 2/2
+ * 0.66 = x0.66 * 2/2
+ * 1 = x1 * 2/2
+ * 2 = x2 * 2/2
+ */
+
+/**
+ * struct veml6046x00_gain_pd - Translation of gain and photodiode size (PD).
+ * @gain_sen: Gain used in the sensor as described in the datasheet of the
+ * sensor
+ * @pd: Photodiode size in the sensor
+ *
+ * This is the translation table from the gain used in the driver (and also used
+ * by the userspace interface in sysfs) to the gain and PD used in the sensor
+ * hardware.
+ *
+ * There are six gain values visible to the user (0.25 .. 2) which translate to
+ * two different gains in the sensor hardware (x0.5 .. x2) and two PD (1/2 and
+ * 2/2). Theoretical are there eight combinations, but gain values 0.5 and 1 are
+ * doubled and therefore the combination with the larger PD (2/2) is taken as
+ * more photodiode cells are supposed to deliver a more precise result.
+ */
+struct veml6046x00_gain_pd {
+ unsigned int gain_sen;
+ unsigned int pd;
+};
+
+static const struct veml6046x00_gain_pd veml6046x00_gain_pd[] = {
+ { .gain_sen = VEML6046X00_GAIN_0_5, .pd = VEML6046X00_PD_1_2 },
+ { .gain_sen = VEML6046X00_GAIN_0_66, .pd = VEML6046X00_PD_1_2 },
+ { .gain_sen = VEML6046X00_GAIN_0_5, .pd = VEML6046X00_PD_2_2 },
+ { .gain_sen = VEML6046X00_GAIN_0_66, .pd = VEML6046X00_PD_2_2 },
+ { .gain_sen = VEML6046X00_GAIN_1, .pd = VEML6046X00_PD_2_2 },
+ { .gain_sen = VEML6046X00_GAIN_2, .pd = VEML6046X00_PD_2_2 },
+};
+
+/**
+ * DOC: Factors for calculation of lux
+ *
+ * static const int veml6046x00_it_gains contains the factors for calculation of
+ * lux.
+ *
+ * Depending on the set up integration time (IT), gain and photodiode size (PD)
+ * the measured raw values are different if the light is constant. As the gain
+ * and PD are already coupled in the driver (see &struct veml6046x00_gain_pd)
+ * there are two dimensions remaining: IT and gain(driver).
+ *
+ * The array of available factors for a certain IT are grouped together in the
+ * same form as expected by the callback of scale_available
+ * (IIO_CHAN_INFO_SCALE).
+ *
+ * Factors for lux / raw count are taken directly from the datasheet.
+ */
+static const int veml6046x00_it_gains[][6][2] = {
+ /* integration time: 3.125 ms */
+ {
+ { 5, 376000 }, /* gain: x0.25 */
+ { 4, 72700 }, /* gain: x0.33 */
+ { 2, 688000 }, /* gain: x0.5 */
+ { 2, 36400 }, /* gain: x0.66 */
+ { 1, 344000 }, /* gain: x1 */
+ { 0, 672000 }, /* gain: x2 */
+ },
+ /* integration time: 6.25 ms */
+ {
+ { 2, 688000 }, /* gain: x0.25 */
+ { 2, 36350 }, /* gain: x0.33 */
+ { 1, 344000 }, /* gain: x0.5 */
+ { 1, 18200 }, /* gain: x0.66 */
+ { 0, 672000 }, /* gain: x1 */
+ { 0, 336000 }, /* gain: x2 */
+ },
+ /* integration time: 12.5 ms */
+ {
+ { 1, 344000 }, /* gain: x0.25 */
+ { 1, 18175 }, /* gain: x0.33 */
+ { 0, 672000 }, /* gain: x0.5 */
+ { 0, 509100 }, /* gain: x0.66 */
+ { 0, 336000 }, /* gain: x1 */
+ { 0, 168000 }, /* gain: x2 */
+ },
+ /* integration time: 25 ms */
+ {
+ { 0, 672000 }, /* gain: x0.25 */
+ { 0, 509087 }, /* gain: x0.33 */
+ { 0, 336000 }, /* gain: x0.5 */
+ { 0, 254550 }, /* gain: x0.66 */
+ { 0, 168000 }, /* gain: x1 */
+ { 0, 84000 }, /* gain: x2 */
+ },
+ /* integration time: 50 ms */
+ {
+ { 0, 336000 }, /* gain: x0.25 */
+ { 0, 254543 }, /* gain: x0.33 */
+ { 0, 168000 }, /* gain: x0.5 */
+ { 0, 127275 }, /* gain: x0.66 */
+ { 0, 84000 }, /* gain: x1 */
+ { 0, 42000 }, /* gain: x2 */
+ },
+ /* integration time: 100 ms */
+ {
+ { 0, 168000 }, /* gain: x0.25 */
+ { 0, 127271 }, /* gain: x0.33 */
+ { 0, 84000 }, /* gain: x0.5 */
+ { 0, 63637 }, /* gain: x0.66 */
+ { 0, 42000 }, /* gain: x1 */
+ { 0, 21000 }, /* gain: x2 */
+ },
+ /* integration time: 200 ms */
+ {
+ { 0, 84000 }, /* gain: x0.25 */
+ { 0, 63635 }, /* gain: x0.33 */
+ { 0, 42000 }, /* gain: x0.5 */
+ { 0, 31818 }, /* gain: x0.66 */
+ { 0, 21000 }, /* gain: x1 */
+ { 0, 10500 }, /* gain: x2 */
+ },
+ /* integration time: 400 ms */
+ {
+ { 0, 42000 }, /* gain: x0.25 */
+ { 0, 31817 }, /* gain: x0.33 */
+ { 0, 21000 }, /* gain: x0.5 */
+ { 0, 15909 }, /* gain: x0.66 */
+ { 0, 10500 }, /* gain: x1 */
+ { 0, 5250 }, /* gain: x2 */
+ },
+};
+
+/*
+ * Two bits (RGB_ON_0 and RGB_ON_1) must be cleared to power on the device.
+ */
+static int veml6046x00_power_on(struct veml6046x00_data *data)
+{
+ int ret;
+ struct device *dev = regmap_get_device(data->regmap);
+
+ ret = regmap_clear_bits(data->regmap, VEML6046X00_REG_CONF0,
+ VEML6046X00_CONF0_ON_0);
+ if (ret) {
+ dev_err(dev, "Failed to set bit for power on %d\n", ret);
+ return ret;
+ }
+
+ return regmap_clear_bits(data->regmap, VEML6046X00_REG_CONF1,
+ VEML6046X00_CONF1_ON_1);
+}
+
+/*
+ * Two bits (RGB_ON_0 and RGB_ON_1) must be set to power off the device.
+ */
+static int veml6046x00_shutdown(struct veml6046x00_data *data)
+{
+ int ret;
+ struct device *dev = regmap_get_device(data->regmap);
+
+ ret = regmap_set_bits(data->regmap, VEML6046X00_REG_CONF0,
+ VEML6046X00_CONF0_ON_0);
+ if (ret) {
+ dev_err(dev, "Failed to set bit for shutdown %d\n", ret);
+ return ret;
+ }
+
+ return regmap_set_bits(data->regmap, VEML6046X00_REG_CONF1,
+ VEML6046X00_CONF1_ON_1);
+}
+
+static void veml6046x00_shutdown_action(void *data)
+{
+ veml6046x00_shutdown(data);
+}
+
+static const struct iio_chan_spec veml6046x00_channels[] = {
+ {
+ .type = IIO_INTENSITY,
+ .address = VEML6046X00_REG_R,
+ .modified = 1,
+ .channel2 = IIO_MOD_LIGHT_RED,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_INT_TIME) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_INT_TIME) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = VEML6046X00_SCAN_R,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_LE,
+ },
+ },
+ {
+ .type = IIO_INTENSITY,
+ .address = VEML6046X00_REG_G,
+ .modified = 1,
+ .channel2 = IIO_MOD_LIGHT_GREEN,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_INT_TIME) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_INT_TIME) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = VEML6046X00_SCAN_G,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_LE,
+ },
+ },
+ {
+ .type = IIO_INTENSITY,
+ .address = VEML6046X00_REG_B,
+ .modified = 1,
+ .channel2 = IIO_MOD_LIGHT_BLUE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_INT_TIME) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_INT_TIME) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = VEML6046X00_SCAN_B,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_LE,
+ },
+ },
+ {
+ .type = IIO_INTENSITY,
+ .address = VEML6046X00_REG_IR,
+ .modified = 1,
+ .channel2 = IIO_MOD_LIGHT_IR,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_INT_TIME) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_INT_TIME) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = VEML6046X00_SCAN_IR,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_LE,
+ },
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(VEML6046X00_SCAN_TIMESTAMP),
+};
+
+static const struct regmap_config veml6046x00_regmap_config = {
+ .name = "veml6046x00_regm",
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = VEML6046X00_REG_INT_H,
+};
+
+static const struct reg_field veml6046x00_rf_int_en =
+ REG_FIELD(VEML6046X00_REG_CONF0, 1, 1);
+
+static const struct reg_field veml6046x00_rf_trig =
+ REG_FIELD(VEML6046X00_REG_CONF0, 2, 2);
+
+static const struct reg_field veml6046x00_rf_mode =
+ REG_FIELD(VEML6046X00_REG_CONF0, 3, 3);
+
+static const struct reg_field veml6046x00_rf_it =
+ REG_FIELD(VEML6046X00_REG_CONF0, 4, 6);
+
+static const struct reg_field veml6046x00_rf_pers =
+ REG_FIELD(VEML6046X00_REG_CONF1, 1, 2);
+
+static int veml6046x00_regfield_init(struct veml6046x00_data *data)
+{
+ struct regmap *regmap = data->regmap;
+ struct device *dev = regmap_get_device(data->regmap);
+ struct regmap_field *rm_field;
+ struct veml6046x00_rf *rf = &data->rf;
+
+ rm_field = devm_regmap_field_alloc(dev, regmap, veml6046x00_rf_int_en);
+ if (IS_ERR(rm_field))
+ return PTR_ERR(rm_field);
+ rf->int_en = rm_field;
+
+ rm_field = devm_regmap_field_alloc(dev, regmap, veml6046x00_rf_mode);
+ if (IS_ERR(rm_field))
+ return PTR_ERR(rm_field);
+ rf->mode = rm_field;
+
+ rm_field = devm_regmap_field_alloc(dev, regmap, veml6046x00_rf_trig);
+ if (IS_ERR(rm_field))
+ return PTR_ERR(rm_field);
+ rf->trig = rm_field;
+
+ rm_field = devm_regmap_field_alloc(dev, regmap, veml6046x00_rf_it);
+ if (IS_ERR(rm_field))
+ return PTR_ERR(rm_field);
+ rf->it = rm_field;
+
+ rm_field = devm_regmap_field_alloc(dev, regmap, veml6046x00_rf_pers);
+ if (IS_ERR(rm_field))
+ return PTR_ERR(rm_field);
+ rf->pers = rm_field;
+
+ return 0;
+}
+
+static int veml6046x00_get_it_index(struct veml6046x00_data *data)
+{
+ int ret;
+ unsigned int reg;
+
+ ret = regmap_field_read(data->rf.it, &reg);
+ if (ret)
+ return ret;
+
+ /* register value is identical with index of array */
+ if (reg >= ARRAY_SIZE(veml6046x00_it))
+ return -EINVAL;
+
+ return reg;
+}
+
+static int veml6046x00_get_it_usec(struct veml6046x00_data *data, unsigned int *it_usec)
+{
+ int ret;
+ unsigned int reg;
+
+ ret = regmap_field_read(data->rf.it, &reg);
+ if (ret)
+ return ret;
+
+ if (reg >= ARRAY_SIZE(veml6046x00_it))
+ return -EINVAL;
+
+ *it_usec = veml6046x00_it[reg][1];
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static int veml6046x00_set_it(struct iio_dev *iio, int val, int val2)
+{
+ struct veml6046x00_data *data = iio_priv(iio);
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(veml6046x00_it); i++) {
+ if ((veml6046x00_it[i][0] == val) &&
+ (veml6046x00_it[i][1] == val2))
+ return regmap_field_write(data->rf.it, i);
+ }
+
+ return -EINVAL;
+}
+
+static int veml6046x00_get_val_gain_idx(struct veml6046x00_data *data, int val,
+ int val2)
+{
+ unsigned int i;
+ int it_idx;
+
+ it_idx = veml6046x00_get_it_index(data);
+ if (it_idx < 0)
+ return it_idx;
+
+ for (i = 0; i < ARRAY_SIZE(veml6046x00_it_gains[it_idx]); i++) {
+ if ((veml6046x00_it_gains[it_idx][i][0] == val) &&
+ (veml6046x00_it_gains[it_idx][i][1] == val2))
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+static int veml6046x00_get_gain_idx(struct veml6046x00_data *data)
+{
+ int ret;
+ unsigned int i, reg, reg_gain, reg_pd;
+
+ ret = regmap_read(data->regmap, VEML6046X00_REG_CONF1, &reg);
+ if (ret)
+ return ret;
+
+ reg_gain = FIELD_GET(VEML6046X00_CONF1_GAIN, reg);
+ reg_pd = reg & VEML6046X00_CONF1_PD_D2;
+
+ for (i = 0; i < ARRAY_SIZE(veml6046x00_gain_pd); i++) {
+ if ((veml6046x00_gain_pd[i].gain_sen == reg_gain) &&
+ (veml6046x00_gain_pd[i].pd == reg_pd))
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+static int veml6046x00_set_scale(struct iio_dev *iio, int val, int val2)
+{
+ struct veml6046x00_data *data = iio_priv(iio);
+ unsigned int new_scale;
+ int gain_idx;
+
+ gain_idx = veml6046x00_get_val_gain_idx(data, val, val2);
+ if (gain_idx < 0)
+ return gain_idx;
+
+ new_scale = FIELD_PREP(VEML6046X00_CONF1_GAIN,
+ veml6046x00_gain_pd[gain_idx].gain_sen) |
+ veml6046x00_gain_pd[gain_idx].pd;
+
+ return regmap_update_bits(data->regmap, VEML6046X00_REG_CONF1,
+ VEML6046X00_CONF1_GAIN |
+ VEML6046X00_CONF1_PD_D2,
+ new_scale);
+}
+
+static int veml6046x00_get_scale(struct veml6046x00_data *data,
+ int *val, int *val2)
+{
+ int gain_idx, it_idx;
+
+ gain_idx = veml6046x00_get_gain_idx(data);
+ if (gain_idx < 0)
+ return gain_idx;
+
+ it_idx = veml6046x00_get_it_index(data);
+ if (it_idx < 0)
+ return it_idx;
+
+ *val = veml6046x00_it_gains[it_idx][gain_idx][0];
+ *val2 = veml6046x00_it_gains[it_idx][gain_idx][1];
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+/**
+ * veml6046x00_read_data_ready() - Read data ready bit
+ * @data: Private data.
+ *
+ * Helper function for reading data ready bit from interrupt register.
+ *
+ * Return:
+ * * %1 - Data is available (AF_DATA_READY is set)
+ * * %0 - No data available
+ * * %-EIO - Error during bulk read
+ */
+static int veml6046x00_read_data_ready(struct veml6046x00_data *data)
+{
+ struct device *dev = regmap_get_device(data->regmap);
+ int ret;
+ u8 reg[2];
+
+ /*
+ * Note from the vendor, but not explicitly in the datasheet: we
+ * should always read both registers together.
+ */
+ ret = regmap_bulk_read(data->regmap, VEML6046X00_REG_INT,
+ &reg, sizeof(reg));
+ if (ret) {
+ dev_err(dev, "Failed to read interrupt register %d\n", ret);
+ return -EIO;
+ }
+
+ if (reg[1] & VEML6046X00_INT_DRDY)
+ return 1;
+
+ return 0;
+}
+
+/**
+ * veml6046x00_wait_data_available() - Wait until data is available
+ * @iio: Industrial IO.
+ * @usecs: Microseconds to wait for data.
+ *
+ * This function waits for a certain bit in the interrupt register which signals
+ * that there is data to be read available.
+ *
+ * It tries it two times with a waiting time of usecs in between.
+ *
+ * Return:
+ * * %1 - Data is available (AF_DATA_READY is set)
+ * * %0 - Timeout, no data available after usecs timeout
+ * * %-EIO - Error during bulk read
+ */
+static int veml6046x00_wait_data_available(struct iio_dev *iio, unsigned int usecs)
+{
+ struct veml6046x00_data *data = iio_priv(iio);
+ int ret;
+
+ ret = veml6046x00_read_data_ready(data);
+ if (ret)
+ return ret;
+
+ fsleep(usecs);
+ return veml6046x00_read_data_ready(data);
+}
+
+static int veml6046x00_single_read(struct iio_dev *iio,
+ enum iio_modifier modifier, int *val)
+{
+ struct veml6046x00_data *data = iio_priv(iio);
+ struct device *dev = regmap_get_device(data->regmap);
+ unsigned int addr, it_usec;
+ int ret;
+ __le16 reg;
+
+ switch (modifier) {
+ case IIO_MOD_LIGHT_RED:
+ addr = VEML6046X00_REG_R;
+ break;
+ case IIO_MOD_LIGHT_GREEN:
+ addr = VEML6046X00_REG_G;
+ break;
+ case IIO_MOD_LIGHT_BLUE:
+ addr = VEML6046X00_REG_B;
+ break;
+ case IIO_MOD_LIGHT_IR:
+ addr = VEML6046X00_REG_IR;
+ break;
+ default:
+ return -EINVAL;
+ }
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return ret;
+
+ ret = veml6046x00_get_it_usec(data, &it_usec);
+ if (ret < 0) {
+ dev_err(dev, "Failed to get integration time ret: %d", ret);
+ goto out;
+ }
+
+ ret = regmap_field_write(data->rf.mode, 1);
+ if (ret) {
+ dev_err(dev, "Failed to write mode ret: %d", ret);
+ goto out;
+ }
+
+ ret = regmap_field_write(data->rf.trig, 1);
+ if (ret) {
+ dev_err(dev, "Failed to write trigger ret: %d", ret);
+ goto out;
+ }
+
+ /* integration time + 12.5 % to ensure completion */
+ fsleep(it_usec + it_usec / 8);
+
+ ret = veml6046x00_wait_data_available(iio, it_usec * 4);
+ if (ret < 0)
+ goto out;
+ if (ret == 0) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ if (!iio_device_claim_direct(iio)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ret = regmap_bulk_read(data->regmap, addr, &reg, sizeof(reg));
+ iio_device_release_direct(iio);
+ if (ret)
+ goto out;
+
+ *val = le16_to_cpu(reg);
+
+ ret = IIO_VAL_INT;
+
+out:
+ pm_runtime_put_autosuspend(dev);
+
+ return ret;
+}
+
+static int veml6046x00_read_raw(struct iio_dev *iio,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct veml6046x00_data *data = iio_priv(iio);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (chan->type != IIO_INTENSITY)
+ return -EINVAL;
+ return veml6046x00_single_read(iio, chan->channel2, val);
+ case IIO_CHAN_INFO_INT_TIME:
+ *val = 0;
+ return veml6046x00_get_it_usec(data, val2);
+ case IIO_CHAN_INFO_SCALE:
+ return veml6046x00_get_scale(data, val, val2);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int veml6046x00_read_avail(struct iio_dev *iio,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ struct veml6046x00_data *data = iio_priv(iio);
+ int it_idx;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_INT_TIME:
+ *vals = (int *)&veml6046x00_it;
+ *length = 2 * ARRAY_SIZE(veml6046x00_it);
+ *type = IIO_VAL_INT_PLUS_MICRO;
+ return IIO_AVAIL_LIST;
+ case IIO_CHAN_INFO_SCALE:
+ it_idx = veml6046x00_get_it_index(data);
+ if (it_idx < 0)
+ return it_idx;
+ *vals = (int *)&veml6046x00_it_gains[it_idx];
+ *length = 2 * ARRAY_SIZE(veml6046x00_it_gains[it_idx]);
+ *type = IIO_VAL_INT_PLUS_MICRO;
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int veml6046x00_write_raw(struct iio_dev *iio,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_INT_TIME:
+ return veml6046x00_set_it(iio, val, val2);
+ case IIO_CHAN_INFO_SCALE:
+ return veml6046x00_set_scale(iio, val, val2);
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info veml6046x00_info_no_irq = {
+ .read_raw = veml6046x00_read_raw,
+ .read_avail = veml6046x00_read_avail,
+ .write_raw = veml6046x00_write_raw,
+};
+
+static int veml6046x00_buffer_preenable(struct iio_dev *iio)
+{
+ struct veml6046x00_data *data = iio_priv(iio);
+ struct device *dev = regmap_get_device(data->regmap);
+ int ret;
+
+ ret = regmap_field_write(data->rf.mode, 0);
+ if (ret) {
+ dev_err(dev, "Failed to set mode %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_field_write(data->rf.trig, 0);
+ if (ret) {
+ /*
+ * no unrolling of mode as it is set appropriately with next
+ * single read.
+ */
+ dev_err(dev, "Failed to set trigger %d\n", ret);
+ return ret;
+ }
+
+ return pm_runtime_resume_and_get(dev);
+}
+
+static int veml6046x00_buffer_postdisable(struct iio_dev *iio)
+{
+ struct veml6046x00_data *data = iio_priv(iio);
+ struct device *dev = regmap_get_device(data->regmap);
+ int ret;
+
+ ret = regmap_field_write(data->rf.mode, 1);
+ if (ret) {
+ dev_err(dev, "Failed to set mode %d\n", ret);
+ return ret;
+ }
+
+ pm_runtime_put_autosuspend(dev);
+
+ return 0;
+}
+
+static const struct iio_buffer_setup_ops veml6046x00_buffer_setup_ops = {
+ .preenable = veml6046x00_buffer_preenable,
+ .postdisable = veml6046x00_buffer_postdisable,
+};
+
+static irqreturn_t veml6046x00_trig_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *iio = pf->indio_dev;
+ struct veml6046x00_data *data = iio_priv(iio);
+ int ret;
+ struct {
+ __le16 chans[4];
+ aligned_s64 timestamp;
+ } scan;
+
+ ret = regmap_bulk_read(data->regmap, VEML6046X00_REG_R,
+ &scan.chans, sizeof(scan.chans));
+ if (ret)
+ goto done;
+
+ iio_push_to_buffers_with_ts(iio, &scan, sizeof(scan),
+ iio_get_time_ns(iio));
+
+done:
+ iio_trigger_notify_done(iio->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int veml6046x00_validate_part_id(struct veml6046x00_data *data)
+{
+ struct device *dev = regmap_get_device(data->regmap);
+ unsigned int part_id;
+ int ret;
+ __le16 reg;
+
+ ret = regmap_bulk_read(data->regmap, VEML6046X00_REG_ID,
+ &reg, sizeof(reg));
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to read ID\n");
+
+ part_id = le16_to_cpu(reg);
+ if (part_id != 0x01)
+ dev_info(dev, "Unknown ID %#04x\n", part_id);
+
+ return 0;
+}
+
+static int veml6046x00_setup_device(struct iio_dev *iio)
+{
+ struct veml6046x00_data *data = iio_priv(iio);
+ struct device *dev = regmap_get_device(data->regmap);
+ int ret;
+ __le16 reg16;
+
+ reg16 = cpu_to_le16(VEML6046X00_CONF0_AF);
+ ret = regmap_bulk_write(data->regmap, VEML6046X00_REG_CONF0,
+ &reg16, sizeof(reg16));
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to set configuration\n");
+
+ reg16 = cpu_to_le16(0);
+ ret = regmap_bulk_write(data->regmap, VEML6046X00_REG_THDL,
+ &reg16, sizeof(reg16));
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to set low threshold\n");
+
+ reg16 = cpu_to_le16(U16_MAX);
+ ret = regmap_bulk_write(data->regmap, VEML6046X00_REG_THDH,
+ &reg16, sizeof(reg16));
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to set high threshold\n");
+
+ ret = regmap_bulk_read(data->regmap, VEML6046X00_REG_INT,
+ &reg16, sizeof(reg16));
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to clear interrupts\n");
+
+ return 0;
+}
+
+static int veml6046x00_probe(struct i2c_client *i2c)
+{
+ struct device *dev = &i2c->dev;
+ struct veml6046x00_data *data;
+ struct iio_dev *iio;
+ struct regmap *regmap;
+ int ret;
+
+ regmap = devm_regmap_init_i2c(i2c, &veml6046x00_regmap_config);
+ if (IS_ERR(regmap))
+ return dev_err_probe(dev, PTR_ERR(regmap), "Failed to set regmap\n");
+
+ iio = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!iio)
+ return -ENOMEM;
+
+ data = iio_priv(iio);
+ /* struct iio_dev is retrieved via dev_get_drvdata(). */
+ i2c_set_clientdata(i2c, iio);
+ data->regmap = regmap;
+
+ ret = veml6046x00_regfield_init(data);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to init regfield\n");
+
+ ret = devm_regulator_get_enable(dev, "vdd");
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to enable regulator\n");
+
+ /* bring device in a known state and switch device on */
+ ret = veml6046x00_setup_device(iio);
+ if (ret < 0)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, veml6046x00_shutdown_action, data);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to add shut down action\n");
+
+ ret = pm_runtime_set_active(dev);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to activate PM runtime\n");
+
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to enable PM runtime\n");
+
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_autosuspend_delay(dev, VEML6046X00_AUTOSUSPEND_MS);
+ pm_runtime_use_autosuspend(dev);
+
+ ret = veml6046x00_validate_part_id(data);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to validate device ID\n");
+
+ iio->name = "veml6046x00";
+ iio->channels = veml6046x00_channels;
+ iio->num_channels = ARRAY_SIZE(veml6046x00_channels);
+ iio->modes = INDIO_DIRECT_MODE;
+
+ iio->info = &veml6046x00_info_no_irq;
+
+ ret = devm_iio_triggered_buffer_setup(dev, iio, NULL,
+ veml6046x00_trig_handler,
+ &veml6046x00_buffer_setup_ops);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to register triggered buffer");
+
+ pm_runtime_put_autosuspend(dev);
+
+ ret = devm_iio_device_register(dev, iio);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to register iio device");
+
+ return 0;
+}
+
+static int veml6046x00_runtime_suspend(struct device *dev)
+{
+ struct veml6046x00_data *data = iio_priv(dev_get_drvdata(dev));
+
+ return veml6046x00_shutdown(data);
+}
+
+static int veml6046x00_runtime_resume(struct device *dev)
+{
+ struct veml6046x00_data *data = iio_priv(dev_get_drvdata(dev));
+
+ return veml6046x00_power_on(data);
+}
+
+static DEFINE_RUNTIME_DEV_PM_OPS(veml6046x00_pm_ops,
+ veml6046x00_runtime_suspend,
+ veml6046x00_runtime_resume, NULL);
+
+static const struct of_device_id veml6046x00_of_match[] = {
+ { .compatible = "vishay,veml6046x00" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, veml6046x00_of_match);
+
+static const struct i2c_device_id veml6046x00_id[] = {
+ { "veml6046x00" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, veml6046x00_id);
+
+static struct i2c_driver veml6046x00_driver = {
+ .driver = {
+ .name = "veml6046x00",
+ .of_match_table = veml6046x00_of_match,
+ .pm = pm_ptr(&veml6046x00_pm_ops),
+ },
+ .probe = veml6046x00_probe,
+ .id_table = veml6046x00_id,
+};
+module_i2c_driver(veml6046x00_driver);
+
+MODULE_AUTHOR("Andreas Klinger <ak@it-klinger.de>");
+MODULE_DESCRIPTION("VEML6046X00 RGBIR Color Sensor");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/light/vl6180.c b/drivers/iio/light/vl6180.c
index cc4f2e5404aa..c1314b144367 100644
--- a/drivers/iio/light/vl6180.c
+++ b/drivers/iio/light/vl6180.c
@@ -96,11 +96,6 @@ struct vl6180_data {
unsigned int als_it_ms;
unsigned int als_meas_rate;
unsigned int range_meas_rate;
-
- struct {
- u16 chan[2];
- aligned_s64 timestamp;
- } scan;
};
enum { VL6180_ALS, VL6180_RANGE, VL6180_PROX };
@@ -545,6 +540,11 @@ static irqreturn_t vl6180_trigger_handler(int irq, void *priv)
struct vl6180_data *data = iio_priv(indio_dev);
s64 time_ns = iio_get_time_ns(indio_dev);
int ret, bit, i = 0;
+ struct {
+ u16 chan[2];
+ aligned_s64 timestamp;
+ } scan = { };
+
iio_for_each_active_channel(indio_dev, bit) {
if (vl6180_chan_regs_table[bit].word)
@@ -560,10 +560,10 @@ static irqreturn_t vl6180_trigger_handler(int irq, void *priv)
return IRQ_HANDLED;
}
- data->scan.chan[i++] = ret;
+ scan.chan[i++] = ret;
}
- iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, time_ns);
+ iio_push_to_buffers_with_ts(indio_dev, &scan, sizeof(scan), time_ns);
iio_trigger_notify_done(indio_dev->trig);
/* Clear the interrupt flag after data read */
@@ -722,7 +722,7 @@ static int vl6180_probe(struct i2c_client *client)
IRQF_ONESHOT,
indio_dev->name, indio_dev);
if (ret)
- return dev_err_probe(&client->dev, ret, "devm_request_irq error \n");
+ return dev_err_probe(&client->dev, ret, "devm_request_irq error\n");
init_completion(&data->completion);
diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig
index 3debf1320ad1..81b812a29044 100644
--- a/drivers/iio/magnetometer/Kconfig
+++ b/drivers/iio/magnetometer/Kconfig
@@ -123,7 +123,7 @@ config HID_SENSOR_MAGNETOMETER_3D
select IIO_BUFFER
select HID_SENSOR_IIO_COMMON
select HID_SENSOR_IIO_TRIGGER
- tristate "HID Magenetometer 3D"
+ tristate "HID Magnetometer 3D"
help
Say yes here to build support for the HID SENSOR
Magnetometer 3D.
@@ -173,6 +173,19 @@ config IIO_ST_MAGN_SPI_3AXIS
To compile this driver as a module, choose M here. The module
will be called st_magn_spi.
+config INFINEON_TLV493D
+ tristate "Infineon TLV493D Low-Power 3D Magnetic Sensor"
+ depends on I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say Y here to add support for the Infineon TLV493D-A1B6 Low-
+ Power 3D Magnetic Sensor.
+
+ This driver can also be compiled as a module.
+ To compile this driver as a module, choose M here: the module
+ will be called tlv493d.
+
config SENSORS_HMC5843
tristate
select IIO_BUFFER
diff --git a/drivers/iio/magnetometer/Makefile b/drivers/iio/magnetometer/Makefile
index 9297723a97d8..dfe970fcacb8 100644
--- a/drivers/iio/magnetometer/Makefile
+++ b/drivers/iio/magnetometer/Makefile
@@ -23,6 +23,8 @@ st_magn-$(CONFIG_IIO_BUFFER) += st_magn_buffer.o
obj-$(CONFIG_IIO_ST_MAGN_I2C_3AXIS) += st_magn_i2c.o
obj-$(CONFIG_IIO_ST_MAGN_SPI_3AXIS) += st_magn_spi.o
+obj-$(CONFIG_INFINEON_TLV493D) += tlv493d.o
+
obj-$(CONFIG_SENSORS_HMC5843) += hmc5843_core.o
obj-$(CONFIG_SENSORS_HMC5843_I2C) += hmc5843_i2c.o
obj-$(CONFIG_SENSORS_HMC5843_SPI) += hmc5843_spi.o
diff --git a/drivers/iio/magnetometer/ak8974.c b/drivers/iio/magnetometer/ak8974.c
index 947fe8a475f2..68ece700c7ce 100644
--- a/drivers/iio/magnetometer/ak8974.c
+++ b/drivers/iio/magnetometer/ak8974.c
@@ -583,7 +583,6 @@ static int ak8974_measure_channel(struct ak8974 *ak8974, unsigned long address,
*val = (s16)le16_to_cpu(hw_values[address]);
out_unlock:
mutex_unlock(&ak8974->lock);
- pm_runtime_mark_last_busy(&ak8974->i2c->dev);
pm_runtime_put_autosuspend(&ak8974->i2c->dev);
return ret;
@@ -678,7 +677,6 @@ static void ak8974_fill_buffer(struct iio_dev *indio_dev)
out_unlock:
mutex_unlock(&ak8974->lock);
- pm_runtime_mark_last_busy(&ak8974->i2c->dev);
pm_runtime_put_autosuspend(&ak8974->i2c->dev);
}
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
index a1e92b2abffd..3fd0171e5d69 100644
--- a/drivers/iio/magnetometer/ak8975.c
+++ b/drivers/iio/magnetometer/ak8975.c
@@ -775,7 +775,6 @@ static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val)
mutex_unlock(&data->lock);
- pm_runtime_mark_last_busy(&data->client->dev);
pm_runtime_put_autosuspend(&data->client->dev);
/* Swap bytes and convert to valid range. */
diff --git a/drivers/iio/magnetometer/als31300.c b/drivers/iio/magnetometer/als31300.c
index f72af829715f..2a2677428ed5 100644
--- a/drivers/iio/magnetometer/als31300.c
+++ b/drivers/iio/magnetometer/als31300.c
@@ -140,7 +140,6 @@ static int als31300_get_measure(struct als31300_data *data,
*z = ALS31300_DATA_Z_GET(buf);
out:
- pm_runtime_mark_last_busy(data->dev);
pm_runtime_put_autosuspend(data->dev);
return ret;
@@ -156,7 +155,6 @@ static int als31300_read_raw(struct iio_dev *indio_dev,
int ret;
switch (mask) {
- case IIO_CHAN_INFO_PROCESSED:
case IIO_CHAN_INFO_RAW:
ret = als31300_get_measure(data, &t, &x, &y, &z);
if (ret)
@@ -373,7 +371,7 @@ static int als31300_probe(struct i2c_client *i2c)
ret = devm_add_action_or_reset(dev, als31300_power_down, data);
if (ret)
- return dev_err_probe(dev, ret, "failed to add powerdown action\n");
+ return ret;
indio_dev->info = &als31300_info;
indio_dev->modes = INDIO_DIRECT_MODE;
@@ -401,7 +399,6 @@ static int als31300_probe(struct i2c_client *i2c)
pm_runtime_set_autosuspend_delay(dev, 200);
pm_runtime_use_autosuspend(dev);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
ret = devm_iio_device_register(dev, indio_dev);
diff --git a/drivers/iio/magnetometer/bmc150_magn.c b/drivers/iio/magnetometer/bmc150_magn.c
index 761daead5ada..6a73f6e2f1f0 100644
--- a/drivers/iio/magnetometer/bmc150_magn.c
+++ b/drivers/iio/magnetometer/bmc150_magn.c
@@ -257,22 +257,17 @@ static int bmc150_magn_set_power_mode(struct bmc150_magn_data *data,
static int bmc150_magn_set_power_state(struct bmc150_magn_data *data, bool on)
{
-#ifdef CONFIG_PM
- int ret;
+ int ret = 0;
- if (on) {
+ if (on)
ret = pm_runtime_resume_and_get(data->dev);
- } else {
- pm_runtime_mark_last_busy(data->dev);
- ret = pm_runtime_put_autosuspend(data->dev);
- }
-
+ else
+ pm_runtime_put_autosuspend(data->dev);
if (ret < 0) {
dev_err(data->dev,
"failed to change power state to %d\n", on);
return ret;
}
-#endif
return 0;
}
diff --git a/drivers/iio/magnetometer/tlv493d.c b/drivers/iio/magnetometer/tlv493d.c
new file mode 100644
index 000000000000..ec53fd40277b
--- /dev/null
+++ b/drivers/iio/magnetometer/tlv493d.c
@@ -0,0 +1,526 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Driver for the Infineon TLV493D Low-Power 3D Magnetic Sensor
+ *
+ * Copyright (C) 2025 Dixit Parmar <dixitparmar19@gmail.com>
+ */
+
+#include <linux/array_size.h>
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/cleanup.h>
+#include <linux/delay.h>
+#include <linux/dev_printk.h>
+#include <linux/i2c.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/types.h>
+#include <linux/units.h>
+
+#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+/*
+ * TLV493D sensor I2C communication note:
+ *
+ * The sensor supports only direct byte-stream write starting from the
+ * register address 0x0. So for any modification to be made to any write
+ * registers, it must be written starting from the register address 0x0.
+ * I2C write operation should not contain the register address in the I2C
+ * frame, it should contain only raw byte stream for the write registers.
+ * I2C Frame: |S|SlaveAddr Wr|Ack|Byte[0]|Ack|Byte[1]|Ack|.....|Sp|
+ *
+ * Same as the write operation, reading from the sensor registers is also
+ * performed starting from the register address 0x0 for as many bytes as
+ * need to be read.
+ * I2C read operation should not contain the register address in the I2C frame.
+ * I2C Frame: |S|SlaveAddr Rd|Ack|Byte[0]|Ack|Byte[1]|Ack|.....|Sp|
+ */
+
+#define TLV493D_RD_REG_BX 0x00
+#define TLV493D_RD_REG_BY 0x01
+#define TLV493D_RD_REG_BZ 0x02
+#define TLV493D_RD_REG_TEMP 0x03
+#define TLV493D_RD_REG_BX2 0x04
+#define TLV493D_RD_REG_BZ2 0x05
+#define TLV493D_RD_REG_TEMP2 0x06
+#define TLV493D_RD_REG_RES1 0x07
+#define TLV493D_RD_REG_RES2 0x08
+#define TLV493D_RD_REG_RES3 0x09
+#define TLV493D_RD_REG_MAX 0x0a
+
+#define TLV493D_WR_REG_MODE1 0x01
+#define TLV493D_WR_REG_MODE2 0x03
+#define TLV493D_WR_REG_MAX 0x04
+
+#define TLV493D_BX_MAG_X_AXIS_MSB GENMASK(7, 0)
+#define TLV493D_BX2_MAG_X_AXIS_LSB GENMASK(7, 4)
+#define TLV493D_BY_MAG_Y_AXIS_MSB GENMASK(7, 0)
+#define TLV493D_BX2_MAG_Y_AXIS_LSB GENMASK(3, 0)
+#define TLV493D_BZ_MAG_Z_AXIS_MSB GENMASK(7, 0)
+#define TLV493D_BZ2_MAG_Z_AXIS_LSB GENMASK(3, 0)
+#define TLV493D_TEMP_TEMP_MSB GENMASK(7, 4)
+#define TLV493D_TEMP2_TEMP_LSB GENMASK(7, 0)
+#define TLV493D_TEMP_CHANNEL GENMASK(1, 0)
+#define TLV493D_MODE1_MOD_LOWFAST GENMASK(1, 0)
+#define TLV493D_MODE2_LP_PERIOD BIT(6)
+#define TLV493D_RD_REG_RES1_WR_MASK GENMASK(4, 3)
+#define TLV493D_RD_REG_RES2_WR_MASK GENMASK(7, 0)
+#define TLV493D_RD_REG_RES3_WR_MASK GENMASK(4, 0)
+
+enum tlv493d_channels {
+ TLV493D_AXIS_X,
+ TLV493D_AXIS_Y,
+ TLV493D_AXIS_Z,
+ TLV493D_TEMPERATURE,
+};
+
+enum tlv493d_op_mode {
+ TLV493D_OP_MODE_POWERDOWN,
+ TLV493D_OP_MODE_FAST,
+ TLV493D_OP_MODE_LOWPOWER,
+ TLV493D_OP_MODE_ULTRA_LOWPOWER,
+ TLV493D_OP_MODE_MASTERCONTROLLED,
+};
+
+struct tlv493d_data {
+ struct i2c_client *client;
+ /* protects from simultaneous sensor access and register readings */
+ struct mutex lock;
+ enum tlv493d_op_mode mode;
+ u8 wr_regs[TLV493D_WR_REG_MAX];
+};
+
+/*
+ * Different mode has different measurement sampling time, this time is
+ * used in deriving the sleep and timeout while reading the data from
+ * sensor in polling.
+ * Power-down mode: No measurement.
+ * Fast mode: Freq:3.3 KHz. Measurement time:305 usec.
+ * Low-power mode: Freq:100 Hz. Measurement time:10 msec.
+ * Ultra low-power mode: Freq:10 Hz. Measurement time:100 msec.
+ * Master controlled mode: Freq:3.3 Khz. Measurement time:305 usec.
+ */
+static const u32 tlv493d_sample_rate_us[] = {
+ [TLV493D_OP_MODE_POWERDOWN] = 0,
+ [TLV493D_OP_MODE_FAST] = 305,
+ [TLV493D_OP_MODE_LOWPOWER] = 10 * USEC_PER_MSEC,
+ [TLV493D_OP_MODE_ULTRA_LOWPOWER] = 100 * USEC_PER_MSEC,
+ [TLV493D_OP_MODE_MASTERCONTROLLED] = 305,
+};
+
+static int tlv493d_write_all_regs(struct tlv493d_data *data)
+{
+ int ret;
+ struct device *dev = &data->client->dev;
+
+ ret = i2c_master_send(data->client, data->wr_regs, ARRAY_SIZE(data->wr_regs));
+ if (ret < 0) {
+ dev_err(dev, "i2c write registers failed, error: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tlv493d_set_operating_mode(struct tlv493d_data *data, enum tlv493d_op_mode mode)
+{
+ u8 *mode1_cfg = &data->wr_regs[TLV493D_WR_REG_MODE1];
+ u8 *mode2_cfg = &data->wr_regs[TLV493D_WR_REG_MODE2];
+
+ switch (mode) {
+ case TLV493D_OP_MODE_POWERDOWN:
+ FIELD_MODIFY(TLV493D_MODE1_MOD_LOWFAST, mode1_cfg, 0);
+ FIELD_MODIFY(TLV493D_MODE2_LP_PERIOD, mode2_cfg, 0);
+ break;
+
+ case TLV493D_OP_MODE_FAST:
+ FIELD_MODIFY(TLV493D_MODE1_MOD_LOWFAST, mode1_cfg, 1);
+ FIELD_MODIFY(TLV493D_MODE2_LP_PERIOD, mode2_cfg, 0);
+ break;
+
+ case TLV493D_OP_MODE_LOWPOWER:
+ FIELD_MODIFY(TLV493D_MODE1_MOD_LOWFAST, mode1_cfg, 2);
+ FIELD_MODIFY(TLV493D_MODE2_LP_PERIOD, mode2_cfg, 1);
+ break;
+
+ case TLV493D_OP_MODE_ULTRA_LOWPOWER:
+ FIELD_MODIFY(TLV493D_MODE1_MOD_LOWFAST, mode1_cfg, 2);
+ FIELD_MODIFY(TLV493D_MODE2_LP_PERIOD, mode2_cfg, 0);
+ break;
+
+ case TLV493D_OP_MODE_MASTERCONTROLLED:
+ FIELD_MODIFY(TLV493D_MODE1_MOD_LOWFAST, mode1_cfg, 3);
+ FIELD_MODIFY(TLV493D_MODE2_LP_PERIOD, mode2_cfg, 0);
+ break;
+ }
+
+ return tlv493d_write_all_regs(data);
+}
+
+static s16 tlv493d_get_channel_data(u8 *b, enum tlv493d_channels ch)
+{
+ u16 val;
+
+ switch (ch) {
+ case TLV493D_AXIS_X:
+ val = FIELD_GET(TLV493D_BX_MAG_X_AXIS_MSB, b[TLV493D_RD_REG_BX]) << 4 |
+ FIELD_GET(TLV493D_BX2_MAG_X_AXIS_LSB, b[TLV493D_RD_REG_BX2]) >> 4;
+ break;
+ case TLV493D_AXIS_Y:
+ val = FIELD_GET(TLV493D_BY_MAG_Y_AXIS_MSB, b[TLV493D_RD_REG_BY]) << 4 |
+ FIELD_GET(TLV493D_BX2_MAG_Y_AXIS_LSB, b[TLV493D_RD_REG_BX2]);
+ break;
+ case TLV493D_AXIS_Z:
+ val = FIELD_GET(TLV493D_BZ_MAG_Z_AXIS_MSB, b[TLV493D_RD_REG_BZ]) << 4 |
+ FIELD_GET(TLV493D_BZ2_MAG_Z_AXIS_LSB, b[TLV493D_RD_REG_BZ2]);
+ break;
+ case TLV493D_TEMPERATURE:
+ val = FIELD_GET(TLV493D_TEMP_TEMP_MSB, b[TLV493D_RD_REG_TEMP]) << 8 |
+ FIELD_GET(TLV493D_TEMP2_TEMP_LSB, b[TLV493D_RD_REG_TEMP2]);
+ break;
+ }
+
+ return sign_extend32(val, 11);
+}
+
+static int tlv493d_get_measurements(struct tlv493d_data *data, s16 *x, s16 *y,
+ s16 *z, s16 *t)
+{
+ u8 buff[7] = {};
+ int err, ret;
+ struct device *dev = &data->client->dev;
+ u32 sleep_us = tlv493d_sample_rate_us[data->mode];
+
+ guard(mutex)(&data->lock);
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Poll until data is valid.
+ * For a valid data TLV493D_TEMP_CHANNEL bit of TLV493D_RD_REG_TEMP
+ * should be set to 0. The sampling time depends on the sensor mode.
+ * Poll 3x the time of the sampling time.
+ */
+ ret = read_poll_timeout(i2c_master_recv, err,
+ err || !FIELD_GET(TLV493D_TEMP_CHANNEL, buff[TLV493D_RD_REG_TEMP]),
+ sleep_us, 3 * sleep_us, false, data->client, buff,
+ ARRAY_SIZE(buff));
+ if (ret) {
+ dev_err(dev, "i2c read poll timeout, error:%d\n", ret);
+ goto out_put_autosuspend;
+ }
+ if (err < 0) {
+ dev_err(dev, "i2c read data failed, error:%d\n", err);
+ ret = err;
+ goto out_put_autosuspend;
+ }
+
+ *x = tlv493d_get_channel_data(buff, TLV493D_AXIS_X);
+ *y = tlv493d_get_channel_data(buff, TLV493D_AXIS_Y);
+ *z = tlv493d_get_channel_data(buff, TLV493D_AXIS_Z);
+ *t = tlv493d_get_channel_data(buff, TLV493D_TEMPERATURE);
+
+out_put_autosuspend:
+ pm_runtime_put_autosuspend(dev);
+ return ret;
+}
+
+static int tlv493d_init(struct tlv493d_data *data)
+{
+ int ret;
+ u8 buff[TLV493D_RD_REG_MAX];
+ struct device *dev = &data->client->dev;
+
+ /*
+ * The sensor initialization requires below steps to be followed,
+ * 1. Power-up sensor.
+ * 2. Read and store read-registers map (0x0-0x9).
+ * 3. Copy values from read reserved registers to write reserved fields
+ * (0x0-0x3).
+ * 4. Set operating mode.
+ * 5. Write to all registers.
+ */
+ ret = i2c_master_recv(data->client, buff, ARRAY_SIZE(buff));
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "i2c read failed\n");
+
+ /* Write register 0x0 is reserved. Does not require to be updated.*/
+ data->wr_regs[0] = 0;
+ data->wr_regs[1] = buff[TLV493D_RD_REG_RES1] & TLV493D_RD_REG_RES1_WR_MASK;
+ data->wr_regs[2] = buff[TLV493D_RD_REG_RES2] & TLV493D_RD_REG_RES2_WR_MASK;
+ data->wr_regs[3] = buff[TLV493D_RD_REG_RES3] & TLV493D_RD_REG_RES3_WR_MASK;
+
+ ret = tlv493d_set_operating_mode(data, data->mode);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to set operating mode\n");
+
+ return 0;
+}
+
+static int tlv493d_read_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, int *val,
+ int *val2, long mask)
+{
+ struct tlv493d_data *data = iio_priv(indio_dev);
+ s16 x, y, z, t;
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = tlv493d_get_measurements(data, &x, &y, &z, &t);
+ if (ret)
+ return ret;
+
+ switch (chan->address) {
+ case TLV493D_AXIS_X:
+ *val = x;
+ return IIO_VAL_INT;
+ case TLV493D_AXIS_Y:
+ *val = y;
+ return IIO_VAL_INT;
+ case TLV493D_AXIS_Z:
+ *val = z;
+ return IIO_VAL_INT;
+ case TLV493D_TEMPERATURE:
+ *val = t;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_MAGN:
+ /*
+ * Magnetic field scale: 0.0098 mTesla (i.e. 9.8 µT)
+ * Magnetic field in Gauss: mT * 10 = 0.098.
+ */
+ *val = 98;
+ *val2 = 1000;
+ return IIO_VAL_FRACTIONAL;
+ case IIO_TEMP:
+ /*
+ * Temperature scale: 1.1 °C per LSB, expressed as 1100 m°C
+ * Returned as integer for IIO core to apply:
+ * temp = (raw + offset) * scale
+ */
+ *val = 1100;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_OFFSET:
+ switch (chan->type) {
+ case IIO_TEMP:
+ /*
+ * Temperature offset includes sensor-specific raw offset
+ * plus compensation for +25°C bias in formula.
+ * offset = -raw_offset + (25000 / 1100)
+ * -340 + 22.72 = -317.28
+ */
+ *val = -31728;
+ *val2 = 100;
+ return IIO_VAL_FRACTIONAL;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static irqreturn_t tlv493d_trigger_handler(int irq, void *ptr)
+{
+ int ret;
+ s16 x, y, z, t;
+ struct iio_poll_func *pf = ptr;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct tlv493d_data *data = iio_priv(indio_dev);
+ struct device *dev = &data->client->dev;
+ struct {
+ s16 channels[3];
+ s16 temperature;
+ aligned_s64 timestamp;
+ } scan;
+
+ ret = tlv493d_get_measurements(data, &x, &y, &z, &t);
+ if (ret) {
+ dev_err(dev, "failed to read sensor data\n");
+ goto out_trigger_notify;
+ }
+
+ scan.channels[0] = x;
+ scan.channels[1] = y;
+ scan.channels[2] = z;
+ scan.temperature = t;
+ iio_push_to_buffers_with_ts(indio_dev, &scan, sizeof(scan), pf->timestamp);
+
+out_trigger_notify:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+#define TLV493D_AXIS_CHANNEL(axis, index) \
+ { \
+ .type = IIO_MAGN, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .address = index, \
+ .scan_index = index, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 12, \
+ .storagebits = 16, \
+ .endianness = IIO_CPU, \
+ }, \
+ }
+
+static const struct iio_chan_spec tlv493d_channels[] = {
+ TLV493D_AXIS_CHANNEL(X, TLV493D_AXIS_X),
+ TLV493D_AXIS_CHANNEL(Y, TLV493D_AXIS_Y),
+ TLV493D_AXIS_CHANNEL(Z, TLV493D_AXIS_Z),
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET),
+ .address = TLV493D_TEMPERATURE,
+ .scan_index = TLV493D_TEMPERATURE,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 12,
+ .storagebits = 16,
+ .endianness = IIO_CPU,
+ },
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(4),
+};
+
+static const struct iio_info tlv493d_info = {
+ .read_raw = tlv493d_read_raw,
+};
+
+static const unsigned long tlv493d_scan_masks[] = { GENMASK(3, 0), 0 };
+
+static int tlv493d_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct iio_dev *indio_dev;
+ struct tlv493d_data *data;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ data->client = client;
+ i2c_set_clientdata(client, indio_dev);
+
+ ret = devm_mutex_init(dev, &data->lock);
+ if (ret)
+ return ret;
+
+ ret = devm_regulator_get_enable(dev, "vdd");
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to enable regulator\n");
+
+ /*
+ * Setting Sensor default operating mode to Master-Controlled mode since
+ * it performs measurement cycle only on-request and stays in Power-Down
+ * state until next cycle is initiated.
+ */
+ data->mode = TLV493D_OP_MODE_MASTERCONTROLLED;
+ ret = tlv493d_init(data);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to initialize\n");
+
+ indio_dev->info = &tlv493d_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->name = client->name;
+ indio_dev->channels = tlv493d_channels;
+ indio_dev->num_channels = ARRAY_SIZE(tlv493d_channels);
+ indio_dev->available_scan_masks = tlv493d_scan_masks;
+
+ ret = devm_iio_triggered_buffer_setup(dev, indio_dev,
+ iio_pollfunc_store_time,
+ tlv493d_trigger_handler,
+ NULL);
+ if (ret)
+ return dev_err_probe(dev, ret, "iio triggered buffer setup failed\n");
+
+ ret = pm_runtime_set_active(dev);
+ if (ret)
+ return ret;
+
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return ret;
+
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_autosuspend_delay(dev, 500);
+ pm_runtime_use_autosuspend(dev);
+
+ pm_runtime_put_autosuspend(dev);
+
+ ret = devm_iio_device_register(dev, indio_dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "iio device register failed\n");
+
+ return 0;
+}
+
+static int tlv493d_runtime_suspend(struct device *dev)
+{
+ struct tlv493d_data *data = iio_priv(dev_get_drvdata(dev));
+
+ return tlv493d_set_operating_mode(data, TLV493D_OP_MODE_POWERDOWN);
+}
+
+static int tlv493d_runtime_resume(struct device *dev)
+{
+ struct tlv493d_data *data = iio_priv(dev_get_drvdata(dev));
+
+ return tlv493d_set_operating_mode(data, data->mode);
+}
+
+static DEFINE_RUNTIME_DEV_PM_OPS(tlv493d_pm_ops, tlv493d_runtime_suspend,
+ tlv493d_runtime_resume, NULL);
+
+static const struct i2c_device_id tlv493d_id[] = {
+ { "tlv493d" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tlv493d_id);
+
+static const struct of_device_id tlv493d_of_match[] = {
+ { .compatible = "infineon,tlv493d-a1b6" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tlv493d_of_match);
+
+static struct i2c_driver tlv493d_driver = {
+ .driver = {
+ .name = "tlv493d",
+ .of_match_table = tlv493d_of_match,
+ .pm = pm_ptr(&tlv493d_pm_ops),
+ },
+ .probe = tlv493d_probe,
+ .id_table = tlv493d_id,
+};
+module_i2c_driver(tlv493d_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Infineon TLV493D Low-Power 3D Magnetic Sensor");
+MODULE_AUTHOR("Dixit Parmar <dixitparmar19@gmail.com>");
diff --git a/drivers/iio/magnetometer/tmag5273.c b/drivers/iio/magnetometer/tmag5273.c
index 2ca5c26f0091..2adc3c036ab4 100644
--- a/drivers/iio/magnetometer/tmag5273.c
+++ b/drivers/iio/magnetometer/tmag5273.c
@@ -287,7 +287,6 @@ static int tmag5273_read_raw(struct iio_dev *indio_dev,
int ret;
switch (mask) {
- case IIO_CHAN_INFO_PROCESSED:
case IIO_CHAN_INFO_RAW:
ret = pm_runtime_resume_and_get(data->dev);
if (ret < 0)
@@ -295,7 +294,6 @@ static int tmag5273_read_raw(struct iio_dev *indio_dev,
ret = tmag5273_get_measure(data, &t, &x, &y, &z, &angle, &magnitude);
- pm_runtime_mark_last_busy(data->dev);
pm_runtime_put_autosuspend(data->dev);
if (ret)
@@ -642,7 +640,7 @@ static int tmag5273_probe(struct i2c_client *i2c)
*/
ret = devm_add_action_or_reset(dev, tmag5273_power_down, data);
if (ret)
- return dev_err_probe(dev, ret, "failed to add powerdown action\n");
+ return ret;
ret = pm_runtime_set_active(dev);
if (ret < 0)
@@ -668,7 +666,6 @@ static int tmag5273_probe(struct i2c_client *i2c)
indio_dev->channels = tmag5273_channels;
indio_dev->num_channels = ARRAY_SIZE(tmag5273_channels);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
ret = devm_iio_device_register(dev, indio_dev);
diff --git a/drivers/iio/magnetometer/yamaha-yas530.c b/drivers/iio/magnetometer/yamaha-yas530.c
index 340607111d9a..d49e37edcbed 100644
--- a/drivers/iio/magnetometer/yamaha-yas530.c
+++ b/drivers/iio/magnetometer/yamaha-yas530.c
@@ -623,7 +623,6 @@ static int yas5xx_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_RAW:
pm_runtime_get_sync(yas5xx->dev);
ret = ci->get_measure(yas5xx, &t, &x, &y, &z);
- pm_runtime_mark_last_busy(yas5xx->dev);
pm_runtime_put_autosuspend(yas5xx->dev);
if (ret)
return ret;
@@ -664,7 +663,6 @@ static void yas5xx_fill_buffer(struct iio_dev *indio_dev)
pm_runtime_get_sync(yas5xx->dev);
ret = ci->get_measure(yas5xx, &t, &x, &y, &z);
- pm_runtime_mark_last_busy(yas5xx->dev);
pm_runtime_put_autosuspend(yas5xx->dev);
if (ret) {
dev_err(yas5xx->dev, "error refilling buffer\n");
diff --git a/drivers/iio/potentiostat/lmp91000.c b/drivers/iio/potentiostat/lmp91000.c
index 030498d0b763..eccc2a34358f 100644
--- a/drivers/iio/potentiostat/lmp91000.c
+++ b/drivers/iio/potentiostat/lmp91000.c
@@ -321,10 +321,8 @@ static int lmp91000_probe(struct i2c_client *client)
data->trig = devm_iio_trigger_alloc(dev, "%s-mux%d",
indio_dev->name,
iio_device_id(indio_dev));
- if (!data->trig) {
- dev_err(dev, "cannot allocate iio trigger.\n");
+ if (!data->trig)
return -ENOMEM;
- }
init_completion(&data->completion);
diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
index 74505c9ec1a0..c04e8bb4c993 100644
--- a/drivers/iio/pressure/bmp280-core.c
+++ b/drivers/iio/pressure/bmp280-core.c
@@ -752,7 +752,6 @@ static int bmp280_read_raw(struct iio_dev *indio_dev,
pm_runtime_get_sync(data->dev);
ret = bmp280_read_raw_impl(indio_dev, chan, val, val2, mask);
- pm_runtime_mark_last_busy(data->dev);
pm_runtime_put_autosuspend(data->dev);
return ret;
@@ -927,7 +926,6 @@ static int bmp280_write_raw(struct iio_dev *indio_dev,
pm_runtime_get_sync(data->dev);
ret = bmp280_write_raw_impl(indio_dev, chan, val, val2, mask);
- pm_runtime_mark_last_busy(data->dev);
pm_runtime_put_autosuspend(data->dev);
return ret;
@@ -2255,7 +2253,6 @@ static int bmp580_nvmem_read(void *priv, unsigned int offset, void *val,
pm_runtime_get_sync(data->dev);
ret = bmp580_nvmem_read_impl(priv, offset, val, bytes);
- pm_runtime_mark_last_busy(data->dev);
pm_runtime_put_autosuspend(data->dev);
return ret;
@@ -2330,7 +2327,6 @@ static int bmp580_nvmem_write(void *priv, unsigned int offset, void *val,
pm_runtime_get_sync(data->dev);
ret = bmp580_nvmem_write_impl(priv, offset, val, bytes);
- pm_runtime_mark_last_busy(data->dev);
pm_runtime_put_autosuspend(data->dev);
return ret;
@@ -3120,7 +3116,6 @@ static int bmp280_buffer_postdisable(struct iio_dev *indio_dev)
{
struct bmp280_data *data = iio_priv(indio_dev);
- pm_runtime_mark_last_busy(data->dev);
pm_runtime_put_autosuspend(data->dev);
return 0;
@@ -3213,11 +3208,11 @@ int bmp280_common_probe(struct device *dev,
/* Bring chip out of reset if there is an assigned GPIO line */
gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(gpiod))
+ return dev_err_probe(dev, PTR_ERR(gpiod), "failed to get reset GPIO\n");
+
/* Deassert the signal */
- if (gpiod) {
- dev_info(dev, "release reset\n");
- gpiod_set_value(gpiod, 0);
- }
+ gpiod_set_value_cansleep(gpiod, 0);
data->regmap = regmap;
diff --git a/drivers/iio/pressure/dlhl60d.c b/drivers/iio/pressure/dlhl60d.c
index 6a13cf2eaf50..8bad7162fec6 100644
--- a/drivers/iio/pressure/dlhl60d.c
+++ b/drivers/iio/pressure/dlhl60d.c
@@ -289,10 +289,8 @@ static int dlh_probe(struct i2c_client *client)
}
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*st));
- if (!indio_dev) {
- dev_err(&client->dev, "failed to allocate iio device\n");
+ if (!indio_dev)
return -ENOMEM;
- }
i2c_set_clientdata(client, indio_dev);
diff --git a/drivers/iio/pressure/icp10100.c b/drivers/iio/pressure/icp10100.c
index 1951c1cc84cf..3d83d0098a57 100644
--- a/drivers/iio/pressure/icp10100.c
+++ b/drivers/iio/pressure/icp10100.c
@@ -265,7 +265,6 @@ static int icp10100_get_measures(struct icp10100_state *st,
(be16_to_cpu(measures[1]) >> 8);
*temperature = be16_to_cpu(measures[2]);
- pm_runtime_mark_last_busy(&st->client->dev);
error_measure:
pm_runtime_put_autosuspend(&st->client->dev);
return ret;
diff --git a/drivers/iio/pressure/mpl115.c b/drivers/iio/pressure/mpl115.c
index 71beb28b7f2c..830a5065c008 100644
--- a/drivers/iio/pressure/mpl115.c
+++ b/drivers/iio/pressure/mpl115.c
@@ -108,7 +108,6 @@ static int mpl115_read_raw(struct iio_dev *indio_dev,
ret = mpl115_comp_pressure(data, val, val2);
if (ret < 0)
return ret;
- pm_runtime_mark_last_busy(data->dev);
pm_runtime_put_autosuspend(data->dev);
return IIO_VAL_INT_PLUS_MICRO;
@@ -118,7 +117,6 @@ static int mpl115_read_raw(struct iio_dev *indio_dev,
ret = mpl115_read_temp(data);
if (ret < 0)
return ret;
- pm_runtime_mark_last_busy(data->dev);
pm_runtime_put_autosuspend(data->dev);
*val = ret >> 6;
diff --git a/drivers/iio/pressure/zpa2326.c b/drivers/iio/pressure/zpa2326.c
index 6eef37c0952d..4923a558a26a 100644
--- a/drivers/iio/pressure/zpa2326.c
+++ b/drivers/iio/pressure/zpa2326.c
@@ -697,7 +697,6 @@ static void zpa2326_suspend(struct iio_dev *indio_dev)
zpa2326_sleep(indio_dev);
- pm_runtime_mark_last_busy(parent);
pm_runtime_put_autosuspend(parent);
}
@@ -708,7 +707,6 @@ static void zpa2326_init_runtime(struct device *parent)
pm_runtime_enable(parent);
pm_runtime_set_autosuspend_delay(parent, 1000);
pm_runtime_use_autosuspend(parent);
- pm_runtime_mark_last_busy(parent);
pm_runtime_put_autosuspend(parent);
}
diff --git a/drivers/iio/proximity/d3323aa.c b/drivers/iio/proximity/d3323aa.c
index d4c3dbea9bb0..30821f583454 100644
--- a/drivers/iio/proximity/d3323aa.c
+++ b/drivers/iio/proximity/d3323aa.c
@@ -722,8 +722,7 @@ static int d3323aa_probe(struct platform_device *pdev)
indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
if (!indio_dev)
- return dev_err_probe(dev, -ENOMEM,
- "Could not allocate iio device\n");
+ return -ENOMEM;
data = iio_priv(indio_dev);
data->dev = dev;
diff --git a/drivers/iio/proximity/hx9023s.c b/drivers/iio/proximity/hx9023s.c
index 33781c314728..2918dfc0df54 100644
--- a/drivers/iio/proximity/hx9023s.c
+++ b/drivers/iio/proximity/hx9023s.c
@@ -1141,8 +1141,7 @@ static int hx9023s_probe(struct i2c_client *client)
indio_dev->name,
iio_device_id(indio_dev));
if (!data->trig)
- return dev_err_probe(dev, -ENOMEM,
- "iio trigger alloc failed\n");
+ return -ENOMEM;
data->trig->ops = &hx9023s_trigger_ops;
iio_trigger_set_drvdata(data->trig, indio_dev);
diff --git a/drivers/iio/proximity/irsd200.c b/drivers/iio/proximity/irsd200.c
index 253e4aef22fb..65af31d43453 100644
--- a/drivers/iio/proximity/irsd200.c
+++ b/drivers/iio/proximity/irsd200.c
@@ -862,8 +862,7 @@ static int irsd200_probe(struct i2c_client *client)
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
if (!indio_dev)
- return dev_err_probe(&client->dev, -ENOMEM,
- "Could not allocate iio device\n");
+ return -ENOMEM;
data = iio_priv(indio_dev);
data->dev = &client->dev;
@@ -916,8 +915,7 @@ static int irsd200_probe(struct i2c_client *client)
trigger = devm_iio_trigger_alloc(data->dev, "%s-dev%d", indio_dev->name,
iio_device_id(indio_dev));
if (!trigger)
- return dev_err_probe(data->dev, -ENOMEM,
- "Could not allocate iio trigger\n");
+ return -ENOMEM;
trigger->ops = &irsd200_trigger_ops;
iio_trigger_set_drvdata(trigger, data);
diff --git a/drivers/iio/proximity/isl29501.c b/drivers/iio/proximity/isl29501.c
index d1510fe24050..f69db6f2f380 100644
--- a/drivers/iio/proximity/isl29501.c
+++ b/drivers/iio/proximity/isl29501.c
@@ -938,12 +938,18 @@ static irqreturn_t isl29501_trigger_handler(int irq, void *p)
struct iio_dev *indio_dev = pf->indio_dev;
struct isl29501_private *isl29501 = iio_priv(indio_dev);
const unsigned long *active_mask = indio_dev->active_scan_mask;
- u32 buffer[4] __aligned(8) = {}; /* 1x16-bit + naturally aligned ts */
-
- if (test_bit(ISL29501_DISTANCE_SCAN_INDEX, active_mask))
- isl29501_register_read(isl29501, REG_DISTANCE, buffer);
+ u32 value;
+ struct {
+ u16 data;
+ aligned_s64 ts;
+ } scan = { };
+
+ if (test_bit(ISL29501_DISTANCE_SCAN_INDEX, active_mask)) {
+ isl29501_register_read(isl29501, REG_DISTANCE, &value);
+ scan.data = value;
+ }
- iio_push_to_buffers_with_timestamp(indio_dev, buffer, pf->timestamp);
+ iio_push_to_buffers_with_timestamp(indio_dev, &scan, pf->timestamp);
iio_trigger_notify_done(indio_dev->trig);
return IRQ_HANDLED;
diff --git a/drivers/iio/proximity/mb1232.c b/drivers/iio/proximity/mb1232.c
index 01783486bc7d..34b49c54e68b 100644
--- a/drivers/iio/proximity/mb1232.c
+++ b/drivers/iio/proximity/mb1232.c
@@ -42,11 +42,6 @@ struct mb1232_data {
*/
struct completion ranging;
int irqnr;
- /* Ensure correct alignment of data to push to IIO buffer */
- struct {
- s16 distance;
- aligned_s64 ts;
- } scan;
};
static irqreturn_t mb1232_handle_irq(int irq, void *dev_id)
@@ -120,12 +115,16 @@ static irqreturn_t mb1232_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct mb1232_data *data = iio_priv(indio_dev);
+ struct {
+ s16 distance;
+ aligned_s64 ts;
+ } scan = { };
- data->scan.distance = mb1232_read_distance(data);
- if (data->scan.distance < 0)
+ scan.distance = mb1232_read_distance(data);
+ if (scan.distance < 0)
goto err;
- iio_push_to_buffers_with_ts(indio_dev, &data->scan, sizeof(data->scan),
+ iio_push_to_buffers_with_ts(indio_dev, &scan, sizeof(scan),
pf->timestamp);
err:
diff --git a/drivers/iio/proximity/ping.c b/drivers/iio/proximity/ping.c
index c5b4e1378b7d..e3487094d7be 100644
--- a/drivers/iio/proximity/ping.c
+++ b/drivers/iio/proximity/ping.c
@@ -280,10 +280,8 @@ static int ping_probe(struct platform_device *pdev)
struct iio_dev *indio_dev;
indio_dev = devm_iio_device_alloc(dev, sizeof(struct ping_data));
- if (!indio_dev) {
- dev_err(dev, "failed to allocate IIO device\n");
+ if (!indio_dev)
return -ENOMEM;
- }
data = iio_priv(indio_dev);
data->dev = dev;
diff --git a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
index 1deaf70e92ce..21336b8f122a 100644
--- a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
+++ b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
@@ -43,12 +43,6 @@ struct lidar_data {
int (*xfer)(struct lidar_data *data, u8 reg, u8 *val, int len);
int i2c_enabled;
-
- /* Ensure timestamp is naturally aligned */
- struct {
- u16 chan;
- aligned_s64 timestamp;
- } scan;
};
static const struct iio_chan_spec lidar_channels[] = {
@@ -191,7 +185,6 @@ static int lidar_get_measurement(struct lidar_data *data, u16 *reg)
}
ret = -EIO;
}
- pm_runtime_mark_last_busy(&client->dev);
pm_runtime_put_autosuspend(&client->dev);
return ret;
@@ -235,11 +228,14 @@ static irqreturn_t lidar_trigger_handler(int irq, void *private)
struct iio_dev *indio_dev = pf->indio_dev;
struct lidar_data *data = iio_priv(indio_dev);
int ret;
+ struct {
+ u16 chan;
+ aligned_s64 timestamp;
+ } scan = { };
- ret = lidar_get_measurement(data, &data->scan.chan);
+ ret = lidar_get_measurement(data, &scan.chan);
if (!ret) {
- iio_push_to_buffers_with_ts(indio_dev, &data->scan,
- sizeof(data->scan),
+ iio_push_to_buffers_with_ts(indio_dev, &scan, sizeof(scan),
iio_get_time_ns(indio_dev));
} else if (ret != -EINVAL) {
dev_err(&data->client->dev, "cannot read LIDAR measurement");
diff --git a/drivers/iio/proximity/srf04.c b/drivers/iio/proximity/srf04.c
index b059bac1078b..e97f9a20ac7a 100644
--- a/drivers/iio/proximity/srf04.c
+++ b/drivers/iio/proximity/srf04.c
@@ -117,10 +117,8 @@ static int srf04_read(struct srf04_data *data)
udelay(data->cfg->trigger_pulse_us);
gpiod_set_value(data->gpiod_trig, 0);
- if (data->gpiod_power) {
- pm_runtime_mark_last_busy(data->dev);
+ if (data->gpiod_power)
pm_runtime_put_autosuspend(data->dev);
- }
/* it should not take more than 20 ms until echo is rising */
ret = wait_for_completion_killable_timeout(&data->rising, HZ/50);
@@ -253,10 +251,8 @@ static int srf04_probe(struct platform_device *pdev)
int ret;
indio_dev = devm_iio_device_alloc(dev, sizeof(struct srf04_data));
- if (!indio_dev) {
- dev_err(dev, "failed to allocate IIO device\n");
+ if (!indio_dev)
return -ENOMEM;
- }
data = iio_priv(indio_dev);
data->dev = dev;
diff --git a/drivers/iio/proximity/srf08.c b/drivers/iio/proximity/srf08.c
index 6e32fdfd161b..d7e4cc48cfbf 100644
--- a/drivers/iio/proximity/srf08.c
+++ b/drivers/iio/proximity/srf08.c
@@ -63,12 +63,6 @@ struct srf08_data {
int range_mm;
struct mutex lock;
- /* Ensure timestamp is naturally aligned */
- struct {
- s16 chan;
- aligned_s64 timestamp;
- } scan;
-
/* Sensor-Type */
enum srf08_sensor_type sensor_type;
@@ -182,16 +176,18 @@ static irqreturn_t srf08_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct srf08_data *data = iio_priv(indio_dev);
- s16 sensor_data;
+ struct {
+ s16 chan;
+ aligned_s64 timestamp;
+ } scan = { };
- sensor_data = srf08_read_ranging(data);
- if (sensor_data < 0)
+ scan.chan = srf08_read_ranging(data);
+ if (scan.chan < 0)
goto err;
mutex_lock(&data->lock);
- data->scan.chan = sensor_data;
- iio_push_to_buffers_with_ts(indio_dev, &data->scan, sizeof(data->scan),
+ iio_push_to_buffers_with_ts(indio_dev, &scan, sizeof(scan),
pf->timestamp);
mutex_unlock(&data->lock);
diff --git a/drivers/iio/proximity/sx9500.c b/drivers/iio/proximity/sx9500.c
index 05844f17a15f..6c67bae7488c 100644
--- a/drivers/iio/proximity/sx9500.c
+++ b/drivers/iio/proximity/sx9500.c
@@ -88,7 +88,6 @@ struct sx9500_data {
bool prox_stat[SX9500_NUM_CHANNELS];
bool event_enabled[SX9500_NUM_CHANNELS];
bool trigger_enabled;
- u16 *buffer;
/* Remember enabled channels and sample rate during suspend. */
unsigned int suspend_ctrl0;
struct completion completion;
@@ -578,22 +577,6 @@ out_unlock:
return ret;
}
-static int sx9500_update_scan_mode(struct iio_dev *indio_dev,
- const unsigned long *scan_mask)
-{
- struct sx9500_data *data = iio_priv(indio_dev);
-
- mutex_lock(&data->mutex);
- kfree(data->buffer);
- data->buffer = kzalloc(indio_dev->scan_bytes, GFP_KERNEL);
- mutex_unlock(&data->mutex);
-
- if (data->buffer == NULL)
- return -ENOMEM;
-
- return 0;
-}
-
static IIO_CONST_ATTR_SAMP_FREQ_AVAIL(
"2.500000 3.333333 5 6.666666 8.333333 11.111111 16.666666 33.333333");
@@ -612,7 +595,6 @@ static const struct iio_info sx9500_info = {
.write_raw = &sx9500_write_raw,
.read_event_config = &sx9500_read_event_config,
.write_event_config = &sx9500_write_event_config,
- .update_scan_mode = &sx9500_update_scan_mode,
};
static int sx9500_set_trigger_state(struct iio_trigger *trig,
@@ -649,6 +631,10 @@ static irqreturn_t sx9500_trigger_handler(int irq, void *private)
struct iio_dev *indio_dev = pf->indio_dev;
struct sx9500_data *data = iio_priv(indio_dev);
int val, bit, ret, i = 0;
+ struct {
+ u16 chan[SX9500_NUM_CHANNELS];
+ aligned_s64 timestamp;
+ } scan = { };
mutex_lock(&data->mutex);
@@ -658,10 +644,10 @@ static irqreturn_t sx9500_trigger_handler(int irq, void *private)
if (ret < 0)
goto out;
- data->buffer[i++] = val;
+ scan.chan[i++] = val;
}
- iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
+ iio_push_to_buffers_with_timestamp(indio_dev, &scan,
iio_get_time_ns(indio_dev));
out:
@@ -984,7 +970,6 @@ static void sx9500_remove(struct i2c_client *client)
iio_triggered_buffer_cleanup(indio_dev);
if (client->irq > 0)
iio_trigger_unregister(data->trig);
- kfree(data->buffer);
}
static int sx9500_suspend(struct device *dev)
diff --git a/drivers/iio/proximity/vl53l0x-i2c.c b/drivers/iio/proximity/vl53l0x-i2c.c
index ef4aa7b2835e..ad3e46d47fa8 100644
--- a/drivers/iio/proximity/vl53l0x-i2c.c
+++ b/drivers/iio/proximity/vl53l0x-i2c.c
@@ -57,11 +57,6 @@ struct vl53l0x_data {
struct regulator *vdd_supply;
struct gpio_desc *reset_gpio;
struct iio_trigger *trig;
-
- struct {
- u16 chan;
- aligned_s64 timestamp;
- } scan;
};
static int vl53l0x_clear_irq(struct vl53l0x_data *data)
@@ -84,6 +79,10 @@ static irqreturn_t vl53l0x_trigger_handler(int irq, void *priv)
struct vl53l0x_data *data = iio_priv(indio_dev);
u8 buffer[12];
int ret;
+ struct {
+ u16 chan;
+ aligned_s64 timestamp;
+ } scan = { };
ret = i2c_smbus_read_i2c_block_data(data->client,
VL_REG_RESULT_RANGE_STATUS,
@@ -93,8 +92,8 @@ static irqreturn_t vl53l0x_trigger_handler(int irq, void *priv)
else if (ret != 12)
return -EREMOTEIO;
- data->scan.chan = get_unaligned_be16(&buffer[10]);
- iio_push_to_buffers_with_ts(indio_dev, &data->scan, sizeof(data->scan),
+ scan.chan = get_unaligned_be16(&buffer[10]);
+ iio_push_to_buffers_with_ts(indio_dev, &scan, sizeof(scan),
iio_get_time_ns(indio_dev));
iio_trigger_notify_done(indio_dev->trig);
@@ -312,7 +311,6 @@ static int vl53l0x_probe(struct i2c_client *client)
{
struct vl53l0x_data *data;
struct iio_dev *indio_dev;
- int error;
int ret;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
@@ -345,15 +343,14 @@ static int vl53l0x_probe(struct i2c_client *client)
return dev_err_probe(&client->dev, PTR_ERR(data->reset_gpio),
"Cannot get reset GPIO\n");
- error = vl53l0x_power_on(data);
- if (error)
- return dev_err_probe(&client->dev, error,
+ ret = vl53l0x_power_on(data);
+ if (ret)
+ return dev_err_probe(&client->dev, ret,
"Failed to power on the chip\n");
- error = devm_add_action_or_reset(&client->dev, vl53l0x_power_off, data);
- if (error)
- return dev_err_probe(&client->dev, error,
- "Failed to install poweroff action\n");
+ ret = devm_add_action_or_reset(&client->dev, vl53l0x_power_off, data);
+ if (ret)
+ return ret;
indio_dev->name = "vl53l0x";
indio_dev->info = &vl53l0x_info;
diff --git a/drivers/iio/temperature/Kconfig b/drivers/iio/temperature/Kconfig
index 1244d8e17d50..9328b2250ace 100644
--- a/drivers/iio/temperature/Kconfig
+++ b/drivers/iio/temperature/Kconfig
@@ -173,11 +173,13 @@ config MAX31865
will be called max31865.
config MCP9600
- tristate "MCP9600 thermocouple EMF converter"
+ tristate "MCP9600 and similar thermocouple EMF converters"
depends on I2C
help
- If you say yes here you get support for MCP9600
- thermocouple EMF converter connected via I2C.
+ If you say yes here you get support for...
+ - MCP9600
+ - MCP9601
+ ...thermocouple EMF converters connected via I2C.
This driver can also be built as a module. If so, the module
will be called mcp9600.
diff --git a/drivers/iio/temperature/maxim_thermocouple.c b/drivers/iio/temperature/maxim_thermocouple.c
index cae8e84821d7..205939680fd4 100644
--- a/drivers/iio/temperature/maxim_thermocouple.c
+++ b/drivers/iio/temperature/maxim_thermocouple.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/err.h>
#include <linux/spi/spi.h>
+#include <linux/types.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/trigger.h>
@@ -121,8 +122,15 @@ struct maxim_thermocouple_data {
struct spi_device *spi;
const struct maxim_thermocouple_chip *chip;
char tc_type;
-
- u8 buffer[16] __aligned(IIO_DMA_MINALIGN);
+ /* Buffer for reading up to 2 hardware channels. */
+ struct {
+ union {
+ __be16 raw16;
+ __be32 raw32;
+ __be16 raw[2];
+ };
+ aligned_s64 timestamp;
+ } buffer __aligned(IIO_DMA_MINALIGN);
};
static int maxim_thermocouple_read(struct maxim_thermocouple_data *data,
@@ -130,18 +138,16 @@ static int maxim_thermocouple_read(struct maxim_thermocouple_data *data,
{
unsigned int storage_bytes = data->chip->read_size;
unsigned int shift = chan->scan_type.shift + (chan->address * 8);
- __be16 buf16;
- __be32 buf32;
int ret;
switch (storage_bytes) {
case 2:
- ret = spi_read(data->spi, (void *)&buf16, storage_bytes);
- *val = be16_to_cpu(buf16);
+ ret = spi_read(data->spi, &data->buffer.raw16, storage_bytes);
+ *val = be16_to_cpu(data->buffer.raw16);
break;
case 4:
- ret = spi_read(data->spi, (void *)&buf32, storage_bytes);
- *val = be32_to_cpu(buf32);
+ ret = spi_read(data->spi, &data->buffer.raw32, storage_bytes);
+ *val = be32_to_cpu(data->buffer.raw32);
break;
default:
ret = -EINVAL;
@@ -166,9 +172,9 @@ static irqreturn_t maxim_thermocouple_trigger_handler(int irq, void *private)
struct maxim_thermocouple_data *data = iio_priv(indio_dev);
int ret;
- ret = spi_read(data->spi, data->buffer, data->chip->read_size);
+ ret = spi_read(data->spi, data->buffer.raw, data->chip->read_size);
if (!ret) {
- iio_push_to_buffers_with_ts(indio_dev, data->buffer,
+ iio_push_to_buffers_with_ts(indio_dev, &data->buffer,
sizeof(data->buffer),
iio_get_time_ns(indio_dev));
}
diff --git a/drivers/iio/temperature/mcp9600.c b/drivers/iio/temperature/mcp9600.c
index 6e9108d5cf75..aa42c2b1a369 100644
--- a/drivers/iio/temperature/mcp9600.c
+++ b/drivers/iio/temperature/mcp9600.c
@@ -22,26 +22,31 @@
#include <linux/iio/events.h>
#include <linux/iio/iio.h>
+#include <dt-bindings/iio/temperature/thermocouple.h>
+
/* MCP9600 registers */
-#define MCP9600_HOT_JUNCTION 0x0
-#define MCP9600_COLD_JUNCTION 0x2
-#define MCP9600_STATUS 0x4
+#define MCP9600_HOT_JUNCTION 0x00
+#define MCP9600_COLD_JUNCTION 0x02
+#define MCP9600_STATUS 0x04
#define MCP9600_STATUS_ALERT(x) BIT(x)
-#define MCP9600_ALERT_CFG1 0x8
+#define MCP9600_SENSOR_CFG 0x05
+#define MCP9600_SENSOR_TYPE_MASK GENMASK(6, 4)
+#define MCP9600_ALERT_CFG1 0x08
#define MCP9600_ALERT_CFG(x) (MCP9600_ALERT_CFG1 + (x - 1))
#define MCP9600_ALERT_CFG_ENABLE BIT(0)
#define MCP9600_ALERT_CFG_ACTIVE_HIGH BIT(2)
#define MCP9600_ALERT_CFG_FALLING BIT(3)
#define MCP9600_ALERT_CFG_COLD_JUNCTION BIT(4)
-#define MCP9600_ALERT_HYSTERESIS1 0xc
+#define MCP9600_ALERT_HYSTERESIS1 0x0c
#define MCP9600_ALERT_HYSTERESIS(x) (MCP9600_ALERT_HYSTERESIS1 + (x - 1))
#define MCP9600_ALERT_LIMIT1 0x10
#define MCP9600_ALERT_LIMIT(x) (MCP9600_ALERT_LIMIT1 + (x - 1))
#define MCP9600_ALERT_LIMIT_MASK GENMASK(15, 2)
-#define MCP9600_DEVICE_ID 0x20
+#define MCP9600_DEVICE_ID 0x20
/* MCP9600 device id value */
-#define MCP9600_DEVICE_ID_MCP9600 0x40
+#define MCP9600_DEVICE_ID_MCP9600 0x40
+#define MCP9600_DEVICE_ID_MCP9601 0x41
#define MCP9600_ALERT_COUNT 4
@@ -65,6 +70,30 @@ static const char * const mcp9600_alert_name[MCP9600_ALERT_COUNT] = {
[MCP9600_ALERT4] = "alert4",
};
+/* Map between dt-bindings enum and the chip's type value */
+static const unsigned int mcp9600_type_map[] = {
+ [THERMOCOUPLE_TYPE_K] = 0,
+ [THERMOCOUPLE_TYPE_J] = 1,
+ [THERMOCOUPLE_TYPE_T] = 2,
+ [THERMOCOUPLE_TYPE_N] = 3,
+ [THERMOCOUPLE_TYPE_S] = 4,
+ [THERMOCOUPLE_TYPE_E] = 5,
+ [THERMOCOUPLE_TYPE_B] = 6,
+ [THERMOCOUPLE_TYPE_R] = 7,
+};
+
+/* Map thermocouple type to a char for iio info in sysfs */
+static const int mcp9600_tc_types[] = {
+ [THERMOCOUPLE_TYPE_K] = 'K',
+ [THERMOCOUPLE_TYPE_J] = 'J',
+ [THERMOCOUPLE_TYPE_T] = 'T',
+ [THERMOCOUPLE_TYPE_N] = 'N',
+ [THERMOCOUPLE_TYPE_S] = 'S',
+ [THERMOCOUPLE_TYPE_E] = 'E',
+ [THERMOCOUPLE_TYPE_B] = 'B',
+ [THERMOCOUPLE_TYPE_R] = 'R',
+};
+
static const struct iio_event_spec mcp9600_events[] = {
{
.type = IIO_EV_TYPE_THRESH,
@@ -82,12 +111,41 @@ static const struct iio_event_spec mcp9600_events[] = {
},
};
+struct mcp_chip_info {
+ u8 chip_id;
+ const char *chip_name;
+};
+
+struct mcp9600_data {
+ struct i2c_client *client;
+ u32 thermocouple_type;
+};
+
+static int mcp9600_config(struct mcp9600_data *data)
+{
+ struct i2c_client *client = data->client;
+ int ret;
+ u8 cfg;
+
+ cfg = FIELD_PREP(MCP9600_SENSOR_TYPE_MASK,
+ mcp9600_type_map[data->thermocouple_type]);
+
+ ret = i2c_smbus_write_byte_data(client, MCP9600_SENSOR_CFG, cfg);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to set sensor configuration\n");
+ return ret;
+ }
+
+ return 0;
+}
+
#define MCP9600_CHANNELS(hj_num_ev, hj_ev_spec_off, cj_num_ev, cj_ev_spec_off) \
{ \
{ \
.type = IIO_TEMP, \
.address = MCP9600_HOT_JUNCTION, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_THERMOCOUPLE_TYPE) | \
BIT(IIO_CHAN_INFO_SCALE), \
.event_spec = &mcp9600_events[hj_ev_spec_off], \
.num_event_specs = hj_num_ev, \
@@ -123,10 +181,6 @@ static const struct iio_chan_spec mcp9600_channels[][2] = {
MCP9600_CHANNELS(2, 0, 2, 0), /* Alerts: 1 2 3 4 */
};
-struct mcp9600_data {
- struct i2c_client *client;
-};
-
static int mcp9600_read(struct mcp9600_data *data,
struct iio_chan_spec const *chan, int *val)
{
@@ -159,6 +213,9 @@ static int mcp9600_read_raw(struct iio_dev *indio_dev,
*val = 62;
*val2 = 500000;
return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_THERMOCOUPLE_TYPE:
+ *val = mcp9600_tc_types[data->thermocouple_type];
+ return IIO_VAL_CHAR;
default:
return -EINVAL;
}
@@ -416,45 +473,93 @@ static int mcp9600_probe_alerts(struct iio_dev *indio_dev)
static int mcp9600_probe(struct i2c_client *client)
{
+ struct device *dev = &client->dev;
+ const struct mcp_chip_info *chip_info;
struct iio_dev *indio_dev;
struct mcp9600_data *data;
- int ret, ch_sel;
+ int ch_sel, dev_id, ret;
+
+ chip_info = i2c_get_match_data(client);
+ if (!chip_info)
+ return dev_err_probe(dev, -ENODEV,
+ "No chip-info found for device\n");
+
+ dev_id = i2c_smbus_read_byte_data(client, MCP9600_DEVICE_ID);
+ if (dev_id < 0)
+ return dev_err_probe(dev, dev_id, "Failed to read device ID\n");
+
+ switch (dev_id) {
+ case MCP9600_DEVICE_ID_MCP9600:
+ case MCP9600_DEVICE_ID_MCP9601:
+ if (dev_id != chip_info->chip_id)
+ dev_warn(dev,
+ "Expected id %02x, but device responded with %02x\n",
+ chip_info->chip_id, dev_id);
+ break;
- ret = i2c_smbus_read_byte_data(client, MCP9600_DEVICE_ID);
- if (ret < 0)
- return dev_err_probe(&client->dev, ret, "Failed to read device ID\n");
- if (ret != MCP9600_DEVICE_ID_MCP9600)
- dev_warn(&client->dev, "Expected ID %x, got %x\n",
- MCP9600_DEVICE_ID_MCP9600, ret);
+ default:
+ dev_warn(dev, "Unknown id %x, using %x\n", dev_id,
+ chip_info->chip_id);
+ }
- indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
if (!indio_dev)
return -ENOMEM;
data = iio_priv(indio_dev);
data->client = client;
+ /* Accept type from dt with default of Type-K. */
+ data->thermocouple_type = THERMOCOUPLE_TYPE_K;
+ ret = device_property_read_u32(dev, "thermocouple-type",
+ &data->thermocouple_type);
+ if (ret && ret != -EINVAL)
+ return dev_err_probe(dev, ret,
+ "Error reading thermocouple-type property\n");
+
+ if (data->thermocouple_type >= ARRAY_SIZE(mcp9600_type_map))
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid thermocouple-type property %u.\n",
+ data->thermocouple_type);
+
+ /* Set initial config. */
+ ret = mcp9600_config(data);
+ if (ret)
+ return ret;
+
ch_sel = mcp9600_probe_alerts(indio_dev);
if (ch_sel < 0)
return ch_sel;
indio_dev->info = &mcp9600_info;
- indio_dev->name = "mcp9600";
+ indio_dev->name = chip_info->chip_name;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = mcp9600_channels[ch_sel];
indio_dev->num_channels = ARRAY_SIZE(mcp9600_channels[ch_sel]);
- return devm_iio_device_register(&client->dev, indio_dev);
+ return devm_iio_device_register(dev, indio_dev);
}
+static const struct mcp_chip_info mcp9600_chip_info = {
+ .chip_id = MCP9600_DEVICE_ID_MCP9600,
+ .chip_name = "mcp9600",
+};
+
+static const struct mcp_chip_info mcp9601_chip_info = {
+ .chip_id = MCP9600_DEVICE_ID_MCP9601,
+ .chip_name = "mcp9601",
+};
+
static const struct i2c_device_id mcp9600_id[] = {
- { "mcp9600" },
+ { "mcp9600", .driver_data = (kernel_ulong_t)&mcp9600_chip_info },
+ { "mcp9601", .driver_data = (kernel_ulong_t)&mcp9601_chip_info },
{ }
};
MODULE_DEVICE_TABLE(i2c, mcp9600_id);
static const struct of_device_id mcp9600_of_match[] = {
- { .compatible = "microchip,mcp9600" },
+ { .compatible = "microchip,mcp9600", .data = &mcp9600_chip_info },
+ { .compatible = "microchip,mcp9601", .data = &mcp9601_chip_info },
{ }
};
MODULE_DEVICE_TABLE(of, mcp9600_of_match);
diff --git a/drivers/iio/temperature/mlx90614.c b/drivers/iio/temperature/mlx90614.c
index 740018d4b3df..8a44a00bfd5e 100644
--- a/drivers/iio/temperature/mlx90614.c
+++ b/drivers/iio/temperature/mlx90614.c
@@ -225,7 +225,6 @@ static void mlx90614_power_put(struct mlx90614_data *data)
if (!data->wakeup_gpio)
return;
- pm_runtime_mark_last_busy(&data->client->dev);
pm_runtime_put_autosuspend(&data->client->dev);
}
#else
diff --git a/drivers/iio/temperature/mlx90632.c b/drivers/iio/temperature/mlx90632.c
index ae4ea587e7f9..b44f7036c2cc 100644
--- a/drivers/iio/temperature/mlx90632.c
+++ b/drivers/iio/temperature/mlx90632.c
@@ -1043,7 +1043,6 @@ static int mlx90632_read_raw(struct iio_dev *indio_dev,
}
mlx90632_read_raw_pm:
- pm_runtime_mark_last_busy(&data->client->dev);
pm_runtime_put_autosuspend(&data->client->dev);
return ret;
}
@@ -1178,10 +1177,8 @@ static int mlx90632_probe(struct i2c_client *client)
int ret;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*mlx90632));
- if (!indio_dev) {
- dev_err(&client->dev, "Failed to allocate device\n");
+ if (!indio_dev)
return -ENOMEM;
- }
regmap = devm_regmap_init_i2c(client, &mlx90632_regmap);
if (IS_ERR(regmap)) {
diff --git a/drivers/iio/temperature/mlx90635.c b/drivers/iio/temperature/mlx90635.c
index f7f88498ba0e..1c8948ca54df 100644
--- a/drivers/iio/temperature/mlx90635.c
+++ b/drivers/iio/temperature/mlx90635.c
@@ -749,7 +749,6 @@ static int mlx90635_read_raw(struct iio_dev *indio_dev,
}
mlx90635_read_raw_pm:
- pm_runtime_mark_last_busy(&data->client->dev);
pm_runtime_put_autosuspend(&data->client->dev);
return ret;
}
@@ -939,7 +938,7 @@ static int mlx90635_probe(struct i2c_client *client)
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*mlx90635));
if (!indio_dev)
- return dev_err_probe(&client->dev, -ENOMEM, "failed to allocate device\n");
+ return -ENOMEM;
regmap = devm_regmap_init_i2c(client, &mlx90635_regmap);
if (IS_ERR(regmap))
@@ -977,8 +976,7 @@ static int mlx90635_probe(struct i2c_client *client)
ret = devm_add_action_or_reset(&client->dev, mlx90635_disable_regulator,
mlx90635);
if (ret < 0)
- return dev_err_probe(&client->dev, ret,
- "failed to setup regulator cleanup action\n");
+ return ret;
ret = mlx90635_wakeup(mlx90635);
if (ret < 0)
@@ -986,8 +984,7 @@ static int mlx90635_probe(struct i2c_client *client)
ret = devm_add_action_or_reset(&client->dev, mlx90635_sleep, mlx90635);
if (ret < 0)
- return dev_err_probe(&client->dev, ret,
- "failed to setup low power cleanup\n");
+ return ret;
ret = regmap_read(mlx90635->regmap_ee, MLX90635_EE_VERSION, &dsp_version);
if (ret < 0)
diff --git a/drivers/iio/test/Kconfig b/drivers/iio/test/Kconfig
index 7a181cac3cc9..6e65e929791c 100644
--- a/drivers/iio/test/Kconfig
+++ b/drivers/iio/test/Kconfig
@@ -41,3 +41,15 @@ config IIO_FORMAT_KUNIT_TEST
to the KUnit documentation in Documentation/dev-tools/kunit/.
If unsure, say N.
+
+config IIO_MULTIPLY_KUNIT_TEST
+ tristate "Test IIO multiply functions" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ build unit tests for the IIO multiply functions.
+
+ For more information on KUnit and unit tests in general, please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
diff --git a/drivers/iio/test/Makefile b/drivers/iio/test/Makefile
index e9a4cf1ff57f..0c846bc21acd 100644
--- a/drivers/iio/test/Makefile
+++ b/drivers/iio/test/Makefile
@@ -7,4 +7,5 @@
obj-$(CONFIG_IIO_RESCALE_KUNIT_TEST) += iio-test-rescale.o
obj-$(CONFIG_IIO_FORMAT_KUNIT_TEST) += iio-test-format.o
obj-$(CONFIG_IIO_GTS_KUNIT_TEST) += iio-test-gts.o
+obj-$(CONFIG_IIO_MULTIPLY_KUNIT_TEST) += iio-test-multiply.o
CFLAGS_iio-test-format.o += $(DISABLE_STRUCTLEAK_PLUGIN)
diff --git a/drivers/iio/test/iio-test-multiply.c b/drivers/iio/test/iio-test-multiply.c
new file mode 100644
index 000000000000..432e279ffe5b
--- /dev/null
+++ b/drivers/iio/test/iio-test-multiply.c
@@ -0,0 +1,212 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Unit tests for IIO multiply functions
+ *
+ * Copyright (c) 2025 Hans de Goede <hans@hansg.org>
+ * Based on iio-test-format.c which is:
+ * Copyright (c) 2020 Lars-Peter Clausen <lars@metafoo.de>
+ */
+
+#include <kunit/test.h>
+#include <linux/iio/consumer.h>
+#include <linux/math64.h>
+#include <linux/types.h>
+
+static void __iio_test_iio_multiply_value_integer(struct kunit *test, s64 multiplier)
+{
+ int ret, result, val;
+
+ val = 42;
+ ret = iio_multiply_value(&result, multiplier, IIO_VAL_INT, val, 0);
+ KUNIT_EXPECT_EQ(test, ret, IIO_VAL_INT);
+ KUNIT_EXPECT_EQ(test, result, multiplier * val);
+
+ val = -23;
+ ret = iio_multiply_value(&result, multiplier, IIO_VAL_INT, val, 0);
+ KUNIT_EXPECT_EQ(test, ret, IIO_VAL_INT);
+ KUNIT_EXPECT_EQ(test, result, multiplier * val);
+
+ val = 0;
+ ret = iio_multiply_value(&result, multiplier, IIO_VAL_INT, val, 0);
+ KUNIT_EXPECT_EQ(test, ret, IIO_VAL_INT);
+ KUNIT_EXPECT_EQ(test, result, multiplier * val);
+}
+
+static void iio_test_iio_multiply_value_integer(struct kunit *test)
+{
+ __iio_test_iio_multiply_value_integer(test, 20);
+ __iio_test_iio_multiply_value_integer(test, -20);
+}
+
+static void __iio_test_iio_multiply_value_fixedpoint(struct kunit *test, s64 multiplier)
+{
+ int ret, result, val, val2;
+
+ /* positive >= 1 (1.5) */
+ val = 1;
+ val2 = 500000;
+ ret = iio_multiply_value(&result, multiplier, IIO_VAL_INT_PLUS_MICRO, val, val2);
+ KUNIT_EXPECT_EQ(test, ret, IIO_VAL_INT);
+ KUNIT_EXPECT_EQ(test, result, div_s64(multiplier * 15, 10));
+
+ val = 1;
+ val2 = 500000000;
+ ret = iio_multiply_value(&result, multiplier, IIO_VAL_INT_PLUS_NANO, val, val2);
+ KUNIT_EXPECT_EQ(test, ret, IIO_VAL_INT);
+ KUNIT_EXPECT_EQ(test, result, div_s64(multiplier * 15, 10));
+
+ /* positive < 1 (0.5) */
+ val = 0;
+ val2 = 500000;
+ ret = iio_multiply_value(&result, multiplier, IIO_VAL_INT_PLUS_MICRO, val, val2);
+ KUNIT_EXPECT_EQ(test, ret, IIO_VAL_INT);
+ KUNIT_EXPECT_EQ(test, result, div_s64(multiplier * 5, 10));
+
+ val = 0;
+ val2 = 500000000;
+ ret = iio_multiply_value(&result, multiplier, IIO_VAL_INT_PLUS_NANO, val, val2);
+ KUNIT_EXPECT_EQ(test, ret, IIO_VAL_INT);
+ KUNIT_EXPECT_EQ(test, result, div_s64(multiplier * 5, 10));
+
+ /* negative <= -1 (-1.5) */
+ val = -1;
+ val2 = 500000;
+ ret = iio_multiply_value(&result, multiplier, IIO_VAL_INT_PLUS_MICRO, val, val2);
+ KUNIT_EXPECT_EQ(test, ret, IIO_VAL_INT);
+ KUNIT_EXPECT_EQ(test, result, div_s64(multiplier * -15, 10));
+
+ val = -1;
+ val2 = 500000000;
+ ret = iio_multiply_value(&result, multiplier, IIO_VAL_INT_PLUS_NANO, val, val2);
+ KUNIT_EXPECT_EQ(test, ret, IIO_VAL_INT);
+ KUNIT_EXPECT_EQ(test, result, div_s64(multiplier * -15, 10));
+
+ /* negative > -1 (-0.5) */
+ val = 0;
+ val2 = -500000;
+ ret = iio_multiply_value(&result, multiplier, IIO_VAL_INT_PLUS_MICRO, val, val2);
+ KUNIT_EXPECT_EQ(test, ret, IIO_VAL_INT);
+ KUNIT_EXPECT_EQ(test, result, div_s64(multiplier * -5, 10));
+
+ val = 0;
+ val2 = -500000000;
+ ret = iio_multiply_value(&result, multiplier, IIO_VAL_INT_PLUS_NANO, val, val2);
+ KUNIT_EXPECT_EQ(test, ret, IIO_VAL_INT);
+ KUNIT_EXPECT_EQ(test, result, div_s64(multiplier * -5, 10));
+}
+
+static void iio_test_iio_multiply_value_fixedpoint(struct kunit *test)
+{
+ __iio_test_iio_multiply_value_fixedpoint(test, 20);
+ __iio_test_iio_multiply_value_fixedpoint(test, -20);
+}
+
+static void __iio_test_iio_multiply_value_fractional(struct kunit *test, s64 multiplier)
+{
+ int ret, result, val, val2;
+
+ /* positive < 1 (1/10)*/
+ val = 1;
+ val2 = 10;
+ ret = iio_multiply_value(&result, multiplier, IIO_VAL_FRACTIONAL, val, val2);
+ KUNIT_EXPECT_EQ(test, ret, IIO_VAL_INT);
+ KUNIT_EXPECT_EQ(test, result, div_s64(multiplier * val, val2));
+
+ /* positive >= 1 (100/3)*/
+ val = 100;
+ val2 = 3;
+ ret = iio_multiply_value(&result, multiplier, IIO_VAL_FRACTIONAL, val, val2);
+ KUNIT_EXPECT_EQ(test, ret, IIO_VAL_INT);
+ KUNIT_EXPECT_EQ(test, result, div_s64(multiplier * val, val2));
+
+ /* negative > -1 (-1/10) */
+ val = -1;
+ val2 = 10;
+ ret = iio_multiply_value(&result, multiplier, IIO_VAL_FRACTIONAL, val, val2);
+ KUNIT_EXPECT_EQ(test, ret, IIO_VAL_INT);
+ KUNIT_EXPECT_EQ(test, result, div_s64(multiplier * val, val2));
+
+ /* negative <= -1 (-200/3)*/
+ val = -200;
+ val2 = 3;
+ ret = iio_multiply_value(&result, multiplier, IIO_VAL_FRACTIONAL, val, val2);
+ KUNIT_EXPECT_EQ(test, ret, IIO_VAL_INT);
+ KUNIT_EXPECT_EQ(test, result, div_s64(multiplier * val, val2));
+
+ /* Zero (0/-10) */
+ val = 0;
+ val2 = -10;
+ ret = iio_multiply_value(&result, multiplier, IIO_VAL_FRACTIONAL, val, val2);
+ KUNIT_EXPECT_EQ(test, ret, IIO_VAL_INT);
+ KUNIT_EXPECT_EQ(test, result, div_s64(multiplier * val, val2));
+}
+
+static void iio_test_iio_multiply_value_fractional(struct kunit *test)
+{
+ __iio_test_iio_multiply_value_fractional(test, 20);
+ __iio_test_iio_multiply_value_fractional(test, -20);
+}
+
+static void __iio_test_iio_multiply_value_fractional_log2(struct kunit *test, s64 multiplier)
+{
+ int ret, result, val, val2;
+
+ /* positive < 1 (123/1024) */
+ val = 123;
+ val2 = 10;
+ ret = iio_multiply_value(&result, multiplier, IIO_VAL_FRACTIONAL_LOG2, val, val2);
+ KUNIT_EXPECT_EQ(test, ret, IIO_VAL_INT);
+ KUNIT_EXPECT_EQ(test, result, (multiplier * val) >> val2);
+
+ /* positive >= 1 (1234567/1024) */
+ val = 1234567;
+ val2 = 10;
+ ret = iio_multiply_value(&result, multiplier, IIO_VAL_FRACTIONAL_LOG2, val, val2);
+ KUNIT_EXPECT_EQ(test, ret, IIO_VAL_INT);
+ KUNIT_EXPECT_EQ(test, result, (multiplier * val) >> val2);
+
+ /* negative > -1 (-123/1024) */
+ val = -123;
+ val2 = 10;
+ ret = iio_multiply_value(&result, multiplier, IIO_VAL_FRACTIONAL_LOG2, val, val2);
+ KUNIT_EXPECT_EQ(test, ret, IIO_VAL_INT);
+ KUNIT_EXPECT_EQ(test, result, (multiplier * val) >> val2);
+
+ /* negative <= -1 (-1234567/1024) */
+ val = -1234567;
+ val2 = 10;
+ ret = iio_multiply_value(&result, multiplier, IIO_VAL_FRACTIONAL_LOG2, val, val2);
+ KUNIT_EXPECT_EQ(test, ret, IIO_VAL_INT);
+ KUNIT_EXPECT_EQ(test, result, (multiplier * val) >> val2);
+
+ /* Zero (0/1024) */
+ val = 0;
+ val2 = 10;
+ ret = iio_multiply_value(&result, multiplier, IIO_VAL_FRACTIONAL_LOG2, val, val2);
+ KUNIT_EXPECT_EQ(test, ret, IIO_VAL_INT);
+ KUNIT_EXPECT_EQ(test, result, (multiplier * val) >> val2);
+}
+
+static void iio_test_iio_multiply_value_fractional_log2(struct kunit *test)
+{
+ __iio_test_iio_multiply_value_fractional_log2(test, 20);
+ __iio_test_iio_multiply_value_fractional_log2(test, -20);
+}
+
+static struct kunit_case iio_multiply_test_cases[] = {
+ KUNIT_CASE(iio_test_iio_multiply_value_integer),
+ KUNIT_CASE(iio_test_iio_multiply_value_fixedpoint),
+ KUNIT_CASE(iio_test_iio_multiply_value_fractional),
+ KUNIT_CASE(iio_test_iio_multiply_value_fractional_log2),
+ { }
+};
+
+static struct kunit_suite iio_multiply_test_suite = {
+ .name = "iio-multiply",
+ .test_cases = iio_multiply_test_cases,
+};
+kunit_test_suite(iio_multiply_test_suite);
+
+MODULE_AUTHOR("Hans de Goede <hans@hansg.org>");
+MODULE_DESCRIPTION("Test IIO multiply functions");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("IIO_UNIT_TEST");
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 3a394cd772f6..f0323f1d6f01 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -85,6 +85,7 @@ source "drivers/infiniband/hw/efa/Kconfig"
source "drivers/infiniband/hw/erdma/Kconfig"
source "drivers/infiniband/hw/hfi1/Kconfig"
source "drivers/infiniband/hw/hns/Kconfig"
+source "drivers/infiniband/hw/ionic/Kconfig"
source "drivers/infiniband/hw/irdma/Kconfig"
source "drivers/infiniband/hw/mana/Kconfig"
source "drivers/infiniband/hw/mlx4/Kconfig"
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index be0743dac3ff..61596cda2b65 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -446,63 +446,41 @@ static int addr6_resolve(struct sockaddr *src_sock,
}
#endif
+static bool is_dst_local(const struct dst_entry *dst)
+{
+ if (dst->ops->family == AF_INET)
+ return !!(dst_rtable(dst)->rt_type & RTN_LOCAL);
+ else if (dst->ops->family == AF_INET6)
+ return !!(dst_rt6_info(dst)->rt6i_flags & RTF_LOCAL);
+ else
+ return false;
+}
+
static int addr_resolve_neigh(const struct dst_entry *dst,
const struct sockaddr *dst_in,
struct rdma_dev_addr *addr,
- unsigned int ndev_flags,
u32 seq)
{
- int ret = 0;
-
- if (ndev_flags & IFF_LOOPBACK) {
+ if (is_dst_local(dst)) {
+ /* When the destination is local entry, source and destination
+ * are same. Skip the neighbour lookup.
+ */
memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN);
- } else {
- if (!(ndev_flags & IFF_NOARP)) {
- /* If the device doesn't do ARP internally */
- ret = fetch_ha(dst, addr, dst_in, seq);
- }
+ return 0;
}
- return ret;
-}
-
-static int copy_src_l2_addr(struct rdma_dev_addr *dev_addr,
- const struct sockaddr *dst_in,
- const struct dst_entry *dst,
- const struct net_device *ndev)
-{
- int ret = 0;
-
- if (dst->dev->flags & IFF_LOOPBACK)
- ret = rdma_translate_ip(dst_in, dev_addr);
- else
- rdma_copy_src_l2_addr(dev_addr, dst->dev);
-
- /*
- * If there's a gateway and type of device not ARPHRD_INFINIBAND,
- * we're definitely in RoCE v2 (as RoCE v1 isn't routable) set the
- * network type accordingly.
- */
- if (has_gateway(dst, dst_in->sa_family) &&
- ndev->type != ARPHRD_INFINIBAND)
- dev_addr->network = dst_in->sa_family == AF_INET ?
- RDMA_NETWORK_IPV4 :
- RDMA_NETWORK_IPV6;
- else
- dev_addr->network = RDMA_NETWORK_IB;
- return ret;
+ return fetch_ha(dst, addr, dst_in, seq);
}
static int rdma_set_src_addr_rcu(struct rdma_dev_addr *dev_addr,
- unsigned int *ndev_flags,
const struct sockaddr *dst_in,
const struct dst_entry *dst)
{
struct net_device *ndev = READ_ONCE(dst->dev);
- *ndev_flags = ndev->flags;
/* A physical device must be the RDMA device to use */
- if (ndev->flags & IFF_LOOPBACK) {
+ if (is_dst_local(dst)) {
+ int ret;
/*
* RDMA (IB/RoCE, iWarp) doesn't run on lo interface or
* loopback IP address. So if route is resolved to loopback
@@ -512,9 +490,27 @@ static int rdma_set_src_addr_rcu(struct rdma_dev_addr *dev_addr,
ndev = rdma_find_ndev_for_src_ip_rcu(dev_net(ndev), dst_in);
if (IS_ERR(ndev))
return -ENODEV;
+ ret = rdma_translate_ip(dst_in, dev_addr);
+ if (ret)
+ return ret;
+ } else {
+ rdma_copy_src_l2_addr(dev_addr, dst->dev);
}
- return copy_src_l2_addr(dev_addr, dst_in, dst, ndev);
+ /*
+ * If there's a gateway and type of device not ARPHRD_INFINIBAND,
+ * we're definitely in RoCE v2 (as RoCE v1 isn't routable) set the
+ * network type accordingly.
+ */
+ if (has_gateway(dst, dst_in->sa_family) &&
+ ndev->type != ARPHRD_INFINIBAND)
+ dev_addr->network = dst_in->sa_family == AF_INET ?
+ RDMA_NETWORK_IPV4 :
+ RDMA_NETWORK_IPV6;
+ else
+ dev_addr->network = RDMA_NETWORK_IB;
+
+ return 0;
}
static int set_addr_netns_by_gid_rcu(struct rdma_dev_addr *addr)
@@ -551,7 +547,6 @@ static int addr_resolve(struct sockaddr *src_in,
u32 seq)
{
struct dst_entry *dst = NULL;
- unsigned int ndev_flags = 0;
struct rtable *rt = NULL;
int ret;
@@ -588,7 +583,7 @@ static int addr_resolve(struct sockaddr *src_in,
rcu_read_unlock();
goto done;
}
- ret = rdma_set_src_addr_rcu(addr, &ndev_flags, dst_in, dst);
+ ret = rdma_set_src_addr_rcu(addr, dst_in, dst);
rcu_read_unlock();
/*
@@ -596,7 +591,7 @@ static int addr_resolve(struct sockaddr *src_in,
* only if src addr translation didn't fail.
*/
if (!ret && resolve_neigh)
- ret = addr_resolve_neigh(dst, dst_in, addr, ndev_flags, seq);
+ ret = addr_resolve_neigh(dst, dst_in, addr, seq);
if (src_in->sa_family == AF_INET)
ip_rt_put(rt);
diff --git a/drivers/infiniband/core/agent.c b/drivers/infiniband/core/agent.c
index 3bb46696731e..25a060a28301 100644
--- a/drivers/infiniband/core/agent.c
+++ b/drivers/infiniband/core/agent.c
@@ -110,8 +110,7 @@ void agent_send_response(const struct ib_mad_hdr *mad_hdr, const struct ib_grh *
agent = port_priv->agent[qpn];
ah = ib_create_ah_from_wc(agent->qp->pd, wc, grh, port_num);
if (IS_ERR(ah)) {
- dev_err(&device->dev, "ib_create_ah_from_wc error %ld\n",
- PTR_ERR(ah));
+ dev_err(&device->dev, "ib_create_ah_from_wc error %pe\n", ah);
return;
}
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 92678e438ff4..01bede8ba105 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -1049,8 +1049,8 @@ static noinline void cm_destroy_id_wait_timeout(struct ib_cm_id *cm_id,
struct cm_id_private *cm_id_priv;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
- pr_err("%s: cm_id=%p timed out. state %d -> %d, refcnt=%d\n", __func__,
- cm_id, old_state, cm_id->state, refcount_read(&cm_id_priv->refcount));
+ pr_err_ratelimited("%s: cm_id=%p timed out. state %d -> %d, refcnt=%d\n", __func__,
+ cm_id, old_state, cm_id->state, refcount_read(&cm_id_priv->refcount));
}
static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 9b471548e7ae..5b2d3ae3f9fc 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -2076,6 +2076,7 @@ static void _destroy_id(struct rdma_id_private *id_priv,
kfree(id_priv->id.route.path_rec);
kfree(id_priv->id.route.path_rec_inbound);
kfree(id_priv->id.route.path_rec_outbound);
+ kfree(id_priv->id.route.service_recs);
put_net(id_priv->id.route.addr.dev_addr.net);
kfree(id_priv);
@@ -3382,13 +3383,18 @@ err1:
int rdma_resolve_route(struct rdma_cm_id *id, unsigned long timeout_ms)
{
struct rdma_id_private *id_priv;
+ enum rdma_cm_state state;
int ret;
if (!timeout_ms)
return -EINVAL;
id_priv = container_of(id, struct rdma_id_private, id);
- if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ROUTE_QUERY))
+ state = id_priv->state;
+ if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED,
+ RDMA_CM_ROUTE_QUERY) &&
+ !cma_comp_exch(id_priv, RDMA_CM_ADDRINFO_RESOLVED,
+ RDMA_CM_ROUTE_QUERY))
return -EINVAL;
cma_id_get(id_priv);
@@ -3409,7 +3415,7 @@ int rdma_resolve_route(struct rdma_cm_id *id, unsigned long timeout_ms)
return 0;
err:
- cma_comp_exch(id_priv, RDMA_CM_ROUTE_QUERY, RDMA_CM_ADDR_RESOLVED);
+ cma_comp_exch(id_priv, RDMA_CM_ROUTE_QUERY, state);
cma_id_put(id_priv);
return ret;
}
@@ -5506,3 +5512,129 @@ static void __exit cma_cleanup(void)
module_init(cma_init);
module_exit(cma_cleanup);
+
+static void cma_query_ib_service_handler(int status,
+ struct sa_service_rec *recs,
+ unsigned int num_recs, void *context)
+{
+ struct cma_work *work = context;
+ struct rdma_id_private *id_priv = work->id;
+ struct sockaddr_ib *addr;
+
+ if (status)
+ goto fail;
+
+ if (!num_recs) {
+ status = -ENOENT;
+ goto fail;
+ }
+
+ if (id_priv->id.route.service_recs) {
+ status = -EALREADY;
+ goto fail;
+ }
+
+ id_priv->id.route.service_recs =
+ kmalloc_array(num_recs, sizeof(*recs), GFP_KERNEL);
+ if (!id_priv->id.route.service_recs) {
+ status = -ENOMEM;
+ goto fail;
+ }
+
+ id_priv->id.route.num_service_recs = num_recs;
+ memcpy(id_priv->id.route.service_recs, recs, sizeof(*recs) * num_recs);
+
+ addr = (struct sockaddr_ib *)&id_priv->id.route.addr.dst_addr;
+ addr->sib_family = AF_IB;
+ addr->sib_addr = *(struct ib_addr *)&recs->gid;
+ addr->sib_pkey = recs->pkey;
+ addr->sib_sid = recs->id;
+ rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr,
+ (union ib_gid *)&addr->sib_addr);
+ ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr,
+ ntohs(addr->sib_pkey));
+
+ queue_work(cma_wq, &work->work);
+ return;
+
+fail:
+ work->old_state = RDMA_CM_ADDRINFO_QUERY;
+ work->new_state = RDMA_CM_ADDR_BOUND;
+ work->event.event = RDMA_CM_EVENT_ADDRINFO_ERROR;
+ work->event.status = status;
+ pr_debug_ratelimited(
+ "RDMA CM: SERVICE_ERROR: failed to query service record. status %d\n",
+ status);
+ queue_work(cma_wq, &work->work);
+}
+
+static int cma_resolve_ib_service(struct rdma_id_private *id_priv,
+ struct rdma_ucm_ib_service *ibs)
+{
+ struct sa_service_rec sr = {};
+ ib_sa_comp_mask mask = 0;
+ struct cma_work *work;
+
+ work = kzalloc(sizeof(*work), GFP_KERNEL);
+ if (!work)
+ return -ENOMEM;
+
+ cma_id_get(id_priv);
+
+ work->id = id_priv;
+ INIT_WORK(&work->work, cma_work_handler);
+ work->old_state = RDMA_CM_ADDRINFO_QUERY;
+ work->new_state = RDMA_CM_ADDRINFO_RESOLVED;
+ work->event.event = RDMA_CM_EVENT_ADDRINFO_RESOLVED;
+
+ if (ibs->flags & RDMA_USER_CM_IB_SERVICE_FLAG_ID) {
+ sr.id = cpu_to_be64(ibs->service_id);
+ mask |= IB_SA_SERVICE_REC_SERVICE_ID;
+ }
+ if (ibs->flags & RDMA_USER_CM_IB_SERVICE_FLAG_NAME) {
+ strscpy(sr.name, ibs->service_name, sizeof(sr.name));
+ mask |= IB_SA_SERVICE_REC_SERVICE_NAME;
+ }
+
+ id_priv->query_id = ib_sa_service_rec_get(&sa_client,
+ id_priv->id.device,
+ id_priv->id.port_num,
+ &sr, mask,
+ 2000, GFP_KERNEL,
+ cma_query_ib_service_handler,
+ work, &id_priv->query);
+
+ if (id_priv->query_id < 0) {
+ cma_id_put(id_priv);
+ kfree(work);
+ return id_priv->query_id;
+ }
+
+ return 0;
+}
+
+int rdma_resolve_ib_service(struct rdma_cm_id *id,
+ struct rdma_ucm_ib_service *ibs)
+{
+ struct rdma_id_private *id_priv;
+ int ret;
+
+ id_priv = container_of(id, struct rdma_id_private, id);
+ if (!id_priv->cma_dev ||
+ !cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDRINFO_QUERY))
+ return -EINVAL;
+
+ if (rdma_cap_ib_sa(id->device, id->port_num))
+ ret = cma_resolve_ib_service(id_priv, ibs);
+ else
+ ret = -EOPNOTSUPP;
+
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ cma_comp_exch(id_priv, RDMA_CM_ADDRINFO_QUERY, RDMA_CM_ADDR_BOUND);
+ return ret;
+}
+EXPORT_SYMBOL(rdma_resolve_ib_service);
diff --git a/drivers/infiniband/core/cma_priv.h b/drivers/infiniband/core/cma_priv.h
index b7354c94cf1b..c604b601f4d9 100644
--- a/drivers/infiniband/core/cma_priv.h
+++ b/drivers/infiniband/core/cma_priv.h
@@ -47,7 +47,9 @@ enum rdma_cm_state {
RDMA_CM_ADDR_BOUND,
RDMA_CM_LISTEN,
RDMA_CM_DEVICE_REMOVAL,
- RDMA_CM_DESTROYING
+ RDMA_CM_DESTROYING,
+ RDMA_CM_ADDRINFO_QUERY,
+ RDMA_CM_ADDRINFO_RESOLVED
};
struct rdma_id_private {
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 3145cb34a1d2..b4f3c835844a 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -1543,7 +1543,7 @@ static void __ib_unregister_device(struct ib_device *ib_dev)
/*
* We have a registration lock so that all the calls to unregister are
- * fully fenced, once any unregister returns the device is truely
+ * fully fenced, once any unregister returns the device is truly
* unregistered even if multiple callers are unregistering it at the
* same time. This also interacts with the registration flow and
* provides sane semantics if register and unregister are racing.
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 53571e6b3162..c23e9c847314 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -107,6 +107,8 @@ struct ib_sa_device {
struct ib_sa_query {
void (*callback)(struct ib_sa_query *sa_query, int status,
struct ib_sa_mad *mad);
+ void (*rmpp_callback)(struct ib_sa_query *sa_query, int status,
+ struct ib_mad_recv_wc *mad);
void (*release)(struct ib_sa_query *);
struct ib_sa_client *client;
struct ib_sa_port *port;
@@ -150,6 +152,13 @@ struct ib_sa_mcmember_query {
struct ib_sa_query sa_query;
};
+struct ib_sa_service_query {
+ void (*callback)(int status, struct sa_service_rec *rec,
+ unsigned int num_services, void *context);
+ void *context;
+ struct ib_sa_query sa_query;
+};
+
static LIST_HEAD(ib_nl_request_list);
static DEFINE_SPINLOCK(ib_nl_request_lock);
static atomic_t ib_nl_sa_request_seq;
@@ -684,6 +693,58 @@ static const struct ib_field guidinfo_rec_table[] = {
.size_bits = 512 },
};
+#define SERVICE_REC_FIELD(field) \
+ .struct_offset_bytes = offsetof(struct sa_service_rec, field), \
+ .struct_size_bytes = sizeof_field(struct sa_service_rec, field), \
+ .field_name = "sa_service_rec:" #field
+
+static const struct ib_field service_rec_table[] = {
+ { SERVICE_REC_FIELD(id),
+ .offset_words = 0,
+ .offset_bits = 0,
+ .size_bits = 64 },
+ { SERVICE_REC_FIELD(gid),
+ .offset_words = 2,
+ .offset_bits = 0,
+ .size_bits = 128 },
+ { SERVICE_REC_FIELD(pkey),
+ .offset_words = 6,
+ .offset_bits = 0,
+ .size_bits = 16 },
+ { RESERVED,
+ .offset_words = 6,
+ .offset_bits = 16,
+ .size_bits = 16 },
+ { SERVICE_REC_FIELD(lease),
+ .offset_words = 7,
+ .offset_bits = 0,
+ .size_bits = 32 },
+ { SERVICE_REC_FIELD(key),
+ .offset_words = 8,
+ .offset_bits = 0,
+ .size_bits = 128 },
+ { SERVICE_REC_FIELD(name),
+ .offset_words = 12,
+ .offset_bits = 0,
+ .size_bits = 512 },
+ { SERVICE_REC_FIELD(data_8),
+ .offset_words = 28,
+ .offset_bits = 0,
+ .size_bits = 128 },
+ { SERVICE_REC_FIELD(data_16),
+ .offset_words = 32,
+ .offset_bits = 0,
+ .size_bits = 128 },
+ { SERVICE_REC_FIELD(data_32),
+ .offset_words = 36,
+ .offset_bits = 0,
+ .size_bits = 128 },
+ { SERVICE_REC_FIELD(data_64),
+ .offset_words = 40,
+ .offset_bits = 0,
+ .size_bits = 128 },
+};
+
#define RDMA_PRIMARY_PATH_MAX_REC_NUM 3
static inline void ib_sa_disable_local_svc(struct ib_sa_query *query)
@@ -1013,6 +1074,8 @@ int ib_nl_handle_set_timeout(struct sk_buff *skb,
if (timeout > IB_SA_LOCAL_SVC_TIMEOUT_MAX)
timeout = IB_SA_LOCAL_SVC_TIMEOUT_MAX;
+ spin_lock_irqsave(&ib_nl_request_lock, flags);
+
delta = timeout - sa_local_svc_timeout_ms;
if (delta < 0)
abs_delta = -delta;
@@ -1020,7 +1083,6 @@ int ib_nl_handle_set_timeout(struct sk_buff *skb,
abs_delta = delta;
if (delta != 0) {
- spin_lock_irqsave(&ib_nl_request_lock, flags);
sa_local_svc_timeout_ms = timeout;
list_for_each_entry(query, &ib_nl_request_list, list) {
if (delta < 0 && abs_delta > query->timeout)
@@ -1038,9 +1100,10 @@ int ib_nl_handle_set_timeout(struct sk_buff *skb,
if (delay)
mod_delayed_work(ib_nl_wq, &ib_nl_timed_work,
(unsigned long)delay);
- spin_unlock_irqrestore(&ib_nl_request_lock, flags);
}
+ spin_unlock_irqrestore(&ib_nl_request_lock, flags);
+
settimeout_out:
return 0;
}
@@ -1390,6 +1453,20 @@ void ib_sa_pack_path(struct sa_path_rec *rec, void *attribute)
}
EXPORT_SYMBOL(ib_sa_pack_path);
+void ib_sa_pack_service(struct sa_service_rec *rec, void *attribute)
+{
+ ib_pack(service_rec_table, ARRAY_SIZE(service_rec_table), rec,
+ attribute);
+}
+EXPORT_SYMBOL(ib_sa_pack_service);
+
+void ib_sa_unpack_service(void *attribute, struct sa_service_rec *rec)
+{
+ ib_unpack(service_rec_table, ARRAY_SIZE(service_rec_table), attribute,
+ rec);
+}
+EXPORT_SYMBOL(ib_sa_unpack_service);
+
static bool ib_sa_opa_pathrecord_support(struct ib_sa_client *client,
struct ib_sa_device *sa_dev,
u32 port_num)
@@ -1479,6 +1556,68 @@ static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
}
}
+#define IB_SA_DATA_OFFS 56
+#define IB_SERVICE_REC_SZ 176
+
+static void ib_unpack_service_rmpp(struct sa_service_rec *rec,
+ struct ib_mad_recv_wc *mad_wc,
+ int num_services)
+{
+ unsigned int cp_sz, data_i, data_size, rec_i = 0, buf_i = 0;
+ struct ib_mad_recv_buf *mad_buf;
+ u8 buf[IB_SERVICE_REC_SZ];
+ u8 *data;
+
+ data_size = sizeof(((struct ib_sa_mad *) mad_buf->mad)->data);
+
+ list_for_each_entry(mad_buf, &mad_wc->rmpp_list, list) {
+ data = ((struct ib_sa_mad *) mad_buf->mad)->data;
+ data_i = 0;
+ while (data_i < data_size && rec_i < num_services) {
+ cp_sz = min(IB_SERVICE_REC_SZ - buf_i,
+ data_size - data_i);
+ memcpy(buf + buf_i, data + data_i, cp_sz);
+ data_i += cp_sz;
+ buf_i += cp_sz;
+ if (buf_i == IB_SERVICE_REC_SZ) {
+ ib_sa_unpack_service(buf, rec + rec_i);
+ buf_i = 0;
+ rec_i++;
+ }
+ }
+ }
+}
+
+static void ib_sa_service_rec_callback(struct ib_sa_query *sa_query, int status,
+ struct ib_mad_recv_wc *mad_wc)
+{
+ struct ib_sa_service_query *query =
+ container_of(sa_query, struct ib_sa_service_query, sa_query);
+ struct sa_service_rec *rec;
+ int num_services;
+
+ if (!mad_wc || !mad_wc->recv_buf.mad) {
+ query->callback(status, NULL, 0, query->context);
+ return;
+ }
+
+ num_services = (mad_wc->mad_len - IB_SA_DATA_OFFS) / IB_SERVICE_REC_SZ;
+ if (!num_services) {
+ query->callback(-ENODATA, NULL, 0, query->context);
+ return;
+ }
+
+ rec = kmalloc_array(num_services, sizeof(*rec), GFP_KERNEL);
+ if (!rec) {
+ query->callback(-ENOMEM, NULL, 0, query->context);
+ return;
+ }
+
+ ib_unpack_service_rmpp(rec, mad_wc, num_services);
+ query->callback(status, rec, num_services, query->context);
+ kfree(rec);
+}
+
static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
{
struct ib_sa_path_query *query =
@@ -1488,6 +1627,14 @@ static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
kfree(query);
}
+static void ib_sa_service_rec_release(struct ib_sa_query *sa_query)
+{
+ struct ib_sa_service_query *query =
+ container_of(sa_query, struct ib_sa_service_query, sa_query);
+
+ kfree(query);
+}
+
/**
* ib_sa_path_rec_get - Start a Path get query
* @client:SA client
@@ -1618,6 +1765,101 @@ err1:
}
EXPORT_SYMBOL(ib_sa_path_rec_get);
+/**
+ * ib_sa_service_rec_get - Start a Service get query
+ * @client: SA client
+ * @device: device to send query on
+ * @port_num: port number to send query on
+ * @rec: Service Record to send in query
+ * @comp_mask: component mask to send in query
+ * @timeout_ms: time to wait for response
+ * @gfp_mask: GFP mask to use for internal allocations
+ * @callback: function called when query completes, times out or is
+ * canceled
+ * @context: opaque user context passed to callback
+ * @sa_query: query context, used to cancel query
+ *
+ * Send a Service Record Get query to the SA to look up a path. The
+ * callback function will be called when the query completes (or
+ * fails); status is 0 for a successful response, -EINTR if the query
+ * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
+ * occurred sending the query. The resp parameter of the callback is
+ * only valid if status is 0.
+ *
+ * If the return value of ib_sa_service_rec_get() is negative, it is an
+ * error code. Otherwise it is a query ID that can be used to cancel
+ * the query.
+ */
+int ib_sa_service_rec_get(struct ib_sa_client *client,
+ struct ib_device *device, u32 port_num,
+ struct sa_service_rec *rec,
+ ib_sa_comp_mask comp_mask,
+ unsigned long timeout_ms, gfp_t gfp_mask,
+ void (*callback)(int status,
+ struct sa_service_rec *resp,
+ unsigned int num_services,
+ void *context),
+ void *context, struct ib_sa_query **sa_query)
+{
+ struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
+ struct ib_sa_service_query *query;
+ struct ib_mad_agent *agent;
+ struct ib_sa_port *port;
+ struct ib_sa_mad *mad;
+ int ret;
+
+ if (!sa_dev)
+ return -ENODEV;
+
+ port = &sa_dev->port[port_num - sa_dev->start_port];
+ agent = port->agent;
+
+ query = kzalloc(sizeof(*query), gfp_mask);
+ if (!query)
+ return -ENOMEM;
+
+ query->sa_query.port = port;
+
+ ret = alloc_mad(&query->sa_query, gfp_mask);
+ if (ret)
+ goto err1;
+
+ ib_sa_client_get(client);
+ query->sa_query.client = client;
+ query->callback = callback;
+ query->context = context;
+
+ mad = query->sa_query.mad_buf->mad;
+ init_mad(&query->sa_query, agent);
+
+ query->sa_query.rmpp_callback = callback ? ib_sa_service_rec_callback :
+ NULL;
+ query->sa_query.release = ib_sa_service_rec_release;
+ mad->mad_hdr.method = IB_MGMT_METHOD_GET_TABLE;
+ mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_SERVICE_REC);
+ mad->sa_hdr.comp_mask = comp_mask;
+
+ ib_sa_pack_service(rec, mad->data);
+
+ *sa_query = &query->sa_query;
+ query->sa_query.mad_buf->context[1] = rec;
+
+ ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
+ if (ret < 0)
+ goto err2;
+
+ return ret;
+
+err2:
+ *sa_query = NULL;
+ ib_sa_client_put(query->sa_query.client);
+ free_mad(&query->sa_query);
+err1:
+ kfree(query);
+ return ret;
+}
+EXPORT_SYMBOL(ib_sa_service_rec_get);
+
static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query,
int status, struct ib_sa_mad *mad)
{
@@ -1987,23 +2229,29 @@ static void send_handler(struct ib_mad_agent *agent,
{
struct ib_sa_query *query = mad_send_wc->send_buf->context[0];
unsigned long flags;
+ int status = 0;
- if (query->callback)
+ if (query->callback || query->rmpp_callback) {
switch (mad_send_wc->status) {
case IB_WC_SUCCESS:
/* No callback -- already got recv */
break;
case IB_WC_RESP_TIMEOUT_ERR:
- query->callback(query, -ETIMEDOUT, NULL);
+ status = -ETIMEDOUT;
break;
case IB_WC_WR_FLUSH_ERR:
- query->callback(query, -EINTR, NULL);
+ status = -EINTR;
break;
default:
- query->callback(query, -EIO, NULL);
+ status = -EIO;
break;
}
+ if (status)
+ query->callback ? query->callback(query, status, NULL) :
+ query->rmpp_callback(query, status, NULL);
+ }
+
xa_lock_irqsave(&queries, flags);
__xa_erase(&queries, query->id);
xa_unlock_irqrestore(&queries, flags);
@@ -2019,17 +2267,25 @@ static void recv_handler(struct ib_mad_agent *mad_agent,
struct ib_mad_recv_wc *mad_recv_wc)
{
struct ib_sa_query *query;
+ struct ib_mad *mad;
+
if (!send_buf)
return;
query = send_buf->context[0];
- if (query->callback) {
+ mad = mad_recv_wc->recv_buf.mad;
+
+ if (query->rmpp_callback) {
+ if (mad_recv_wc->wc->status == IB_WC_SUCCESS)
+ query->rmpp_callback(query, mad->mad_hdr.status ?
+ -EINVAL : 0, mad_recv_wc);
+ else
+ query->rmpp_callback(query, -EIO, NULL);
+ } else if (query->callback) {
if (mad_recv_wc->wc->status == IB_WC_SUCCESS)
- query->callback(query,
- mad_recv_wc->recv_buf.mad->mad_hdr.status ?
- -EINVAL : 0,
- (struct ib_sa_mad *) mad_recv_wc->recv_buf.mad);
+ query->callback(query, mad->mad_hdr.status ?
+ -EINVAL : 0, (struct ib_sa_mad *)mad);
else
query->callback(query, -EIO, NULL);
}
@@ -2181,8 +2437,9 @@ static int ib_sa_add_one(struct ib_device *device)
sa_dev->port[i].agent =
ib_register_mad_agent(device, i + s, IB_QPT_GSI,
- NULL, 0, send_handler,
- recv_handler, sa_dev, 0);
+ NULL, IB_MGMT_RMPP_VERSION,
+ send_handler, recv_handler,
+ sa_dev, 0);
if (IS_ERR(sa_dev->port[i].agent)) {
ret = PTR_ERR(sa_dev->port[i].agent);
goto err;
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 6e700b974033..f86ece701db6 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -282,6 +282,10 @@ static struct ucma_event *ucma_create_uevent(struct ucma_context *ctx,
}
uevent->resp.event = event->event;
uevent->resp.status = event->status;
+
+ if (event->event == RDMA_CM_EVENT_ADDRINFO_RESOLVED)
+ goto out;
+
if (ctx->cm_id->qp_type == IB_QPT_UD)
ucma_copy_ud_event(ctx->cm_id->device, &uevent->resp.param.ud,
&event->param.ud);
@@ -289,6 +293,7 @@ static struct ucma_event *ucma_create_uevent(struct ucma_context *ctx,
ucma_copy_conn_event(&uevent->resp.param.conn,
&event->param.conn);
+out:
uevent->resp.ece.vendor_id = event->ece.vendor_id;
uevent->resp.ece.attr_mod = event->ece.attr_mod;
return uevent;
@@ -728,6 +733,28 @@ static ssize_t ucma_resolve_addr(struct ucma_file *file,
return ret;
}
+static ssize_t ucma_resolve_ib_service(struct ucma_file *file,
+ const char __user *inbuf, int in_len,
+ int out_len)
+{
+ struct rdma_ucm_resolve_ib_service cmd;
+ struct ucma_context *ctx;
+ int ret;
+
+ if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+ return -EFAULT;
+
+ ctx = ucma_get_ctx(file, cmd.id);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ mutex_lock(&ctx->mutex);
+ ret = rdma_resolve_ib_service(ctx->cm_id, &cmd.ibs);
+ mutex_unlock(&ctx->mutex);
+ ucma_put_ctx(ctx);
+ return ret;
+}
+
static ssize_t ucma_resolve_route(struct ucma_file *file,
const char __user *inbuf,
int in_len, int out_len)
@@ -994,6 +1021,43 @@ static ssize_t ucma_query_gid(struct ucma_context *ctx,
return ret;
}
+static ssize_t ucma_query_ib_service(struct ucma_context *ctx,
+ void __user *response, int out_len)
+{
+ struct rdma_ucm_query_ib_service_resp *resp;
+ int n, ret = 0;
+
+ if (out_len < sizeof(struct rdma_ucm_query_ib_service_resp))
+ return -ENOSPC;
+
+ if (!ctx->cm_id->route.service_recs)
+ return -ENODATA;
+
+ resp = kzalloc(out_len, GFP_KERNEL);
+ if (!resp)
+ return -ENOMEM;
+
+ resp->num_service_recs = ctx->cm_id->route.num_service_recs;
+
+ n = (out_len - sizeof(struct rdma_ucm_query_ib_service_resp)) /
+ sizeof(struct ib_user_service_rec);
+
+ if (!n)
+ goto out;
+
+ if (n > ctx->cm_id->route.num_service_recs)
+ n = ctx->cm_id->route.num_service_recs;
+
+ memcpy(resp->recs, ctx->cm_id->route.service_recs,
+ sizeof(*resp->recs) * n);
+ if (copy_to_user(response, resp, struct_size(resp, recs, n)))
+ ret = -EFAULT;
+
+out:
+ kfree(resp);
+ return ret;
+}
+
static ssize_t ucma_query(struct ucma_file *file,
const char __user *inbuf,
int in_len, int out_len)
@@ -1022,6 +1086,9 @@ static ssize_t ucma_query(struct ucma_file *file,
case RDMA_USER_CM_QUERY_GID:
ret = ucma_query_gid(ctx, response, out_len);
break;
+ case RDMA_USER_CM_QUERY_IB_SERVICE:
+ ret = ucma_query_ib_service(ctx, response, out_len);
+ break;
default:
ret = -ENOSYS;
break;
@@ -1678,6 +1745,55 @@ err_unlock:
return ret;
}
+static ssize_t ucma_write_cm_event(struct ucma_file *file,
+ const char __user *inbuf, int in_len,
+ int out_len)
+{
+ struct rdma_ucm_write_cm_event cmd;
+ struct rdma_cm_event event = {};
+ struct ucma_event *uevent;
+ struct ucma_context *ctx;
+ int ret = 0;
+
+ if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+ return -EFAULT;
+
+ if ((cmd.event != RDMA_CM_EVENT_USER) &&
+ (cmd.event != RDMA_CM_EVENT_INTERNAL))
+ return -EINVAL;
+
+ ctx = ucma_get_ctx(file, cmd.id);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ event.event = cmd.event;
+ event.status = cmd.status;
+ event.param.arg = cmd.param.arg;
+
+ uevent = kzalloc(sizeof(*uevent), GFP_KERNEL);
+ if (!uevent) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ uevent->ctx = ctx;
+ uevent->resp.uid = ctx->uid;
+ uevent->resp.id = ctx->id;
+ uevent->resp.event = event.event;
+ uevent->resp.status = event.status;
+ memcpy(uevent->resp.param.arg32, &event.param.arg,
+ sizeof(event.param.arg));
+
+ mutex_lock(&ctx->file->mut);
+ list_add_tail(&uevent->list, &ctx->file->event_list);
+ mutex_unlock(&ctx->file->mut);
+ wake_up_interruptible(&ctx->file->poll_wait);
+
+out:
+ ucma_put_ctx(ctx);
+ return ret;
+}
+
static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
const char __user *inbuf,
int in_len, int out_len) = {
@@ -1703,7 +1819,9 @@ static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
[RDMA_USER_CM_CMD_QUERY] = ucma_query,
[RDMA_USER_CM_CMD_BIND] = ucma_bind,
[RDMA_USER_CM_CMD_RESOLVE_ADDR] = ucma_resolve_addr,
- [RDMA_USER_CM_CMD_JOIN_MCAST] = ucma_join_multicast
+ [RDMA_USER_CM_CMD_JOIN_MCAST] = ucma_join_multicast,
+ [RDMA_USER_CM_CMD_RESOLVE_IB_SERVICE] = ucma_resolve_ib_service,
+ [RDMA_USER_CM_CMD_WRITE_CM_EVENT] = ucma_write_cm_event,
};
static ssize_t ucma_write(struct file *filp, const char __user *buf,
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index b1c44ec1a3f3..572a91a62a7b 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -115,7 +115,7 @@ static int ib_init_umem_odp(struct ib_umem_odp *umem_odp,
out_free_map:
if (ib_uses_virt_dma(dev))
- kfree(map->pfn_list);
+ kvfree(map->pfn_list);
else
hmm_dma_map_free(dev->dma_device, map);
return ret;
@@ -287,7 +287,7 @@ static void ib_umem_odp_free(struct ib_umem_odp *umem_odp)
mutex_unlock(&umem_odp->umem_mutex);
mmu_interval_notifier_remove(&umem_odp->notifier);
if (ib_uses_virt_dma(dev))
- kfree(umem_odp->map.pfn_list);
+ kvfree(umem_odp->map.pfn_list);
else
hmm_dma_map_free(dev->dma_device, &umem_odp->map);
}
diff --git a/drivers/infiniband/hw/Makefile b/drivers/infiniband/hw/Makefile
index df61b2299ec0..b706dc0d0263 100644
--- a/drivers/infiniband/hw/Makefile
+++ b/drivers/infiniband/hw/Makefile
@@ -14,3 +14,4 @@ obj-$(CONFIG_INFINIBAND_HNS_HIP08) += hns/
obj-$(CONFIG_INFINIBAND_QEDR) += qedr/
obj-$(CONFIG_INFINIBAND_BNXT_RE) += bnxt_re/
obj-$(CONFIG_INFINIBAND_ERDMA) += erdma/
+obj-$(CONFIG_INFINIBAND_IONIC) += ionic/
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index 6df5a2738c95..3485e495ac6a 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -172,9 +172,9 @@ struct bnxt_re_dev {
struct list_head list;
unsigned long flags;
#define BNXT_RE_FLAG_NETDEV_REGISTERED 0
+#define BNXT_RE_FLAG_STATS_CTX3_ALLOC 1
#define BNXT_RE_FLAG_HAVE_L2_REF 3
#define BNXT_RE_FLAG_RCFW_CHANNEL_EN 4
-#define BNXT_RE_FLAG_QOS_WORK_REG 5
#define BNXT_RE_FLAG_RESOURCES_ALLOCATED 7
#define BNXT_RE_FLAG_RESOURCES_INITIALIZED 8
#define BNXT_RE_FLAG_ERR_DEVICE_DETACHED 17
@@ -187,9 +187,6 @@ struct bnxt_re_dev {
int id;
- struct delayed_work worker;
- u8 cur_prio_map;
-
/* RCFW Channel */
struct bnxt_qplib_rcfw rcfw;
@@ -227,6 +224,13 @@ struct bnxt_re_dev {
struct workqueue_struct *dcb_wq;
struct dentry *cc_config;
struct bnxt_re_dbg_cc_config_params *cc_config_params;
+#define BNXT_VPD_FLD_LEN 32
+ char board_partno[BNXT_VPD_FLD_LEN];
+ /* RoCE mirror */
+ u16 mirror_vnic_id;
+ union ib_gid ugid;
+ u32 ugid_index;
+ u8 sniffer_flow_created : 1;
};
#define to_bnxt_re_dev(ptr, member) \
@@ -243,6 +247,10 @@ int bnxt_re_assign_pma_port_counters(struct bnxt_re_dev *rdev, struct ib_mad *ou
int bnxt_re_assign_pma_port_ext_counters(struct bnxt_re_dev *rdev,
struct ib_mad *out_mad);
+void bnxt_re_hwrm_free_vnic(struct bnxt_re_dev *rdev);
+int bnxt_re_hwrm_alloc_vnic(struct bnxt_re_dev *rdev);
+int bnxt_re_hwrm_cfg_vnic(struct bnxt_re_dev *rdev, u32 qp_id);
+
static inline struct device *rdev_to_dev(struct bnxt_re_dev *rdev)
{
if (rdev)
@@ -276,4 +284,7 @@ static inline int bnxt_re_read_context_allowed(struct bnxt_re_dev *rdev)
#define BNXT_RE_CONTEXT_TYPE_MRW_SIZE_P7 192
#define BNXT_RE_CONTEXT_TYPE_SRQ_SIZE_P7 192
+#define BNXT_RE_HWRM_CMD_TIMEOUT(rdev) \
+ ((rdev)->chip_ctx->hwrm_cmd_max_timeout * 1000)
+
#endif
diff --git a/drivers/infiniband/hw/bnxt_re/debugfs.c b/drivers/infiniband/hw/bnxt_re/debugfs.c
index e632f1661b92..be5e9b5ca2f0 100644
--- a/drivers/infiniband/hw/bnxt_re/debugfs.c
+++ b/drivers/infiniband/hw/bnxt_re/debugfs.c
@@ -8,6 +8,7 @@
#include <linux/debugfs.h>
#include <linux/pci.h>
+#include <linux/seq_file.h>
#include <rdma/ib_addr.h>
#include "bnxt_ulp.h"
@@ -314,6 +315,40 @@ static const struct file_operations bnxt_re_cc_config_ops = {
.write = bnxt_re_cc_config_set,
};
+static int info_show(struct seq_file *m, void *unused)
+{
+ struct bnxt_re_dev *rdev = m->private;
+ struct bnxt_re_res_cntrs *res_s = &rdev->stats.res;
+
+ seq_puts(m, "Info:\n");
+ seq_printf(m, "Device Name\t\t: %s\n", dev_name(&rdev->ibdev.dev));
+ seq_printf(m, "PD Watermark\t\t: %llu\n", res_s->pd_watermark);
+ seq_printf(m, "AH Watermark\t\t: %llu\n", res_s->ah_watermark);
+ seq_printf(m, "QP Watermark\t\t: %llu\n", res_s->qp_watermark);
+ seq_printf(m, "RC QP Watermark\t\t: %llu\n", res_s->rc_qp_watermark);
+ seq_printf(m, "UD QP Watermark\t\t: %llu\n", res_s->ud_qp_watermark);
+ seq_printf(m, "SRQ Watermark\t\t: %llu\n", res_s->srq_watermark);
+ seq_printf(m, "CQ Watermark\t\t: %llu\n", res_s->cq_watermark);
+ seq_printf(m, "MR Watermark\t\t: %llu\n", res_s->mr_watermark);
+ seq_printf(m, "MW Watermark\t\t: %llu\n", res_s->mw_watermark);
+ seq_printf(m, "CQ Resize Count\t\t: %d\n", atomic_read(&res_s->resize_count));
+ if (rdev->pacing.dbr_pacing) {
+ seq_printf(m, "DB Pacing Reschedule\t: %llu\n", rdev->stats.pacing.resched);
+ seq_printf(m, "DB Pacing Complete\t: %llu\n", rdev->stats.pacing.complete);
+ seq_printf(m, "DB Pacing Alerts\t: %llu\n", rdev->stats.pacing.alerts);
+ seq_printf(m, "DB FIFO Register\t: 0x%x\n",
+ readl(rdev->en_dev->bar0 + rdev->pacing.dbr_db_fifo_reg_off));
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(info);
+
+static void bnxt_re_debugfs_add_info(struct bnxt_re_dev *rdev)
+{
+ debugfs_create_file("info", 0400, rdev->dbg_root, rdev, &info_fops);
+}
+
void bnxt_re_debugfs_add_pdev(struct bnxt_re_dev *rdev)
{
struct pci_dev *pdev = rdev->en_dev->pdev;
@@ -325,6 +360,8 @@ void bnxt_re_debugfs_add_pdev(struct bnxt_re_dev *rdev)
rdev->qp_debugfs = debugfs_create_dir("QPs", rdev->dbg_root);
rdev->cc_config = debugfs_create_dir("cc_config", rdev->dbg_root);
+ bnxt_re_debugfs_add_info(rdev);
+
rdev->cc_config_params = kzalloc(sizeof(*cc_params), GFP_KERNEL);
for (i = 0; i < BNXT_RE_CC_PARAM_GEN0; i++) {
diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.c b/drivers/infiniband/hw/bnxt_re/hw_counters.c
index 44bb082e0a60..651cf9d0e0c7 100644
--- a/drivers/infiniband/hw/bnxt_re/hw_counters.c
+++ b/drivers/infiniband/hw/bnxt_re/hw_counters.c
@@ -51,25 +51,6 @@
#include "hw_counters.h"
static const struct rdma_stat_desc bnxt_re_stat_descs[] = {
- [BNXT_RE_ACTIVE_PD].name = "active_pds",
- [BNXT_RE_ACTIVE_AH].name = "active_ahs",
- [BNXT_RE_ACTIVE_QP].name = "active_qps",
- [BNXT_RE_ACTIVE_RC_QP].name = "active_rc_qps",
- [BNXT_RE_ACTIVE_UD_QP].name = "active_ud_qps",
- [BNXT_RE_ACTIVE_SRQ].name = "active_srqs",
- [BNXT_RE_ACTIVE_CQ].name = "active_cqs",
- [BNXT_RE_ACTIVE_MR].name = "active_mrs",
- [BNXT_RE_ACTIVE_MW].name = "active_mws",
- [BNXT_RE_WATERMARK_PD].name = "watermark_pds",
- [BNXT_RE_WATERMARK_AH].name = "watermark_ahs",
- [BNXT_RE_WATERMARK_QP].name = "watermark_qps",
- [BNXT_RE_WATERMARK_RC_QP].name = "watermark_rc_qps",
- [BNXT_RE_WATERMARK_UD_QP].name = "watermark_ud_qps",
- [BNXT_RE_WATERMARK_SRQ].name = "watermark_srqs",
- [BNXT_RE_WATERMARK_CQ].name = "watermark_cqs",
- [BNXT_RE_WATERMARK_MR].name = "watermark_mrs",
- [BNXT_RE_WATERMARK_MW].name = "watermark_mws",
- [BNXT_RE_RESIZE_CQ_CNT].name = "resize_cq_cnt",
[BNXT_RE_RX_PKTS].name = "rx_pkts",
[BNXT_RE_RX_BYTES].name = "rx_bytes",
[BNXT_RE_TX_PKTS].name = "tx_pkts",
@@ -79,22 +60,22 @@ static const struct rdma_stat_desc bnxt_re_stat_descs[] = {
[BNXT_RE_TX_DISCARDS].name = "tx_roce_discards",
[BNXT_RE_RX_ERRORS].name = "rx_roce_errors",
[BNXT_RE_RX_DISCARDS].name = "rx_roce_discards",
- [BNXT_RE_TO_RETRANSMITS].name = "to_retransmits",
- [BNXT_RE_SEQ_ERR_NAKS_RCVD].name = "seq_err_naks_rcvd",
- [BNXT_RE_MAX_RETRY_EXCEEDED].name = "max_retry_exceeded",
- [BNXT_RE_RNR_NAKS_RCVD].name = "rnr_naks_rcvd",
- [BNXT_RE_MISSING_RESP].name = "missing_resp",
+ [BNXT_RE_TO_RETRANSMITS].name = "local_ack_timeout_err",
+ [BNXT_RE_SEQ_ERR_NAKS_RCVD].name = "packet_seq_err",
+ [BNXT_RE_MAX_RETRY_EXCEEDED].name = "max_retry_exceeded",
+ [BNXT_RE_RNR_NAKS_RCVD].name = "rnr_nak_retry_err",
+ [BNXT_RE_MISSING_RESP].name = "implied_nak_seq_err",
[BNXT_RE_UNRECOVERABLE_ERR].name = "unrecoverable_err",
[BNXT_RE_BAD_RESP_ERR].name = "bad_resp_err",
[BNXT_RE_LOCAL_QP_OP_ERR].name = "local_qp_op_err",
[BNXT_RE_LOCAL_PROTECTION_ERR].name = "local_protection_err",
[BNXT_RE_MEM_MGMT_OP_ERR].name = "mem_mgmt_op_err",
- [BNXT_RE_REMOTE_INVALID_REQ_ERR].name = "remote_invalid_req_err",
- [BNXT_RE_REMOTE_ACCESS_ERR].name = "remote_access_err",
+ [BNXT_RE_REMOTE_INVALID_REQ_ERR].name = "req_remote_invalid_request",
+ [BNXT_RE_REMOTE_ACCESS_ERR].name = "req_remote_access_errors",
[BNXT_RE_REMOTE_OP_ERR].name = "remote_op_err",
- [BNXT_RE_DUP_REQ].name = "dup_req",
+ [BNXT_RE_DUP_REQ].name = "duplicate_request",
[BNXT_RE_RES_EXCEED_MAX].name = "res_exceed_max",
- [BNXT_RE_RES_LENGTH_MISMATCH].name = "res_length_mismatch",
+ [BNXT_RE_RES_LENGTH_MISMATCH].name = "resp_local_length_error",
[BNXT_RE_RES_EXCEEDS_WQE].name = "res_exceeds_wqe",
[BNXT_RE_RES_OPCODE_ERR].name = "res_opcode_err",
[BNXT_RE_RES_RX_INVALID_RKEY].name = "res_rx_invalid_rkey",
@@ -118,7 +99,7 @@ static const struct rdma_stat_desc bnxt_re_stat_descs[] = {
[BNXT_RE_RES_SRQ_LOAD_ERR].name = "res_srq_load_err",
[BNXT_RE_RES_TX_PCI_ERR].name = "res_tx_pci_err",
[BNXT_RE_RES_RX_PCI_ERR].name = "res_rx_pci_err",
- [BNXT_RE_OUT_OF_SEQ_ERR].name = "oos_drop_count",
+ [BNXT_RE_OUT_OF_SEQ_ERR].name = "out_of_sequence",
[BNXT_RE_TX_ATOMIC_REQ].name = "tx_atomic_req",
[BNXT_RE_TX_READ_REQ].name = "tx_read_req",
[BNXT_RE_TX_READ_RES].name = "tx_read_resp",
@@ -126,23 +107,22 @@ static const struct rdma_stat_desc bnxt_re_stat_descs[] = {
[BNXT_RE_TX_SEND_REQ].name = "tx_send_req",
[BNXT_RE_TX_ROCE_PKTS].name = "tx_roce_only_pkts",
[BNXT_RE_TX_ROCE_BYTES].name = "tx_roce_only_bytes",
- [BNXT_RE_RX_ATOMIC_REQ].name = "rx_atomic_req",
- [BNXT_RE_RX_READ_REQ].name = "rx_read_req",
+ [BNXT_RE_RX_ATOMIC_REQ].name = "rx_atomic_requests",
+ [BNXT_RE_RX_READ_REQ].name = "rx_read_requests",
[BNXT_RE_RX_READ_RESP].name = "rx_read_resp",
- [BNXT_RE_RX_WRITE_REQ].name = "rx_write_req",
+ [BNXT_RE_RX_WRITE_REQ].name = "rx_write_requests",
[BNXT_RE_RX_SEND_REQ].name = "rx_send_req",
[BNXT_RE_RX_ROCE_PKTS].name = "rx_roce_only_pkts",
[BNXT_RE_RX_ROCE_BYTES].name = "rx_roce_only_bytes",
[BNXT_RE_RX_ROCE_GOOD_PKTS].name = "rx_roce_good_pkts",
[BNXT_RE_RX_ROCE_GOOD_BYTES].name = "rx_roce_good_bytes",
- [BNXT_RE_OOB].name = "rx_out_of_buffer",
- [BNXT_RE_TX_CNP].name = "tx_cnp_pkts",
- [BNXT_RE_RX_CNP].name = "rx_cnp_pkts",
- [BNXT_RE_RX_ECN].name = "rx_ecn_marked_pkts",
- [BNXT_RE_PACING_RESCHED].name = "pacing_reschedule",
- [BNXT_RE_PACING_CMPL].name = "pacing_complete",
- [BNXT_RE_PACING_ALERT].name = "pacing_alerts",
- [BNXT_RE_DB_FIFO_REG].name = "db_fifo_register",
+ [BNXT_RE_OOB].name = "out_of_buffer",
+ [BNXT_RE_TX_CNP].name = "np_cnp_pkts",
+ [BNXT_RE_RX_CNP].name = "rp_cnp_handled",
+ [BNXT_RE_RX_ECN].name = "np_ecn_marked_roce_packets",
+ [BNXT_RE_REQ_CQE_ERROR].name = "req_cqe_error",
+ [BNXT_RE_RESP_CQE_ERROR].name = "resp_cqe_error",
+ [BNXT_RE_RESP_REMOTE_ACCESS_ERRS].name = "resp_remote_access_errors",
};
static void bnxt_re_copy_ext_stats(struct bnxt_re_dev *rdev,
@@ -273,18 +253,20 @@ static void bnxt_re_copy_err_stats(struct bnxt_re_dev *rdev,
err_s->res_rx_pci_err;
stats->value[BNXT_RE_OUT_OF_SEQ_ERR] =
err_s->res_oos_drop_count;
-}
-
-static void bnxt_re_copy_db_pacing_stats(struct bnxt_re_dev *rdev,
- struct rdma_hw_stats *stats)
-{
- struct bnxt_re_db_pacing_stats *pacing_s = &rdev->stats.pacing;
-
- stats->value[BNXT_RE_PACING_RESCHED] = pacing_s->resched;
- stats->value[BNXT_RE_PACING_CMPL] = pacing_s->complete;
- stats->value[BNXT_RE_PACING_ALERT] = pacing_s->alerts;
- stats->value[BNXT_RE_DB_FIFO_REG] =
- readl(rdev->en_dev->bar0 + rdev->pacing.dbr_db_fifo_reg_off);
+ stats->value[BNXT_RE_REQ_CQE_ERROR] =
+ err_s->bad_resp_err +
+ err_s->local_qp_op_err +
+ err_s->local_protection_err +
+ err_s->mem_mgmt_op_err +
+ err_s->remote_invalid_req_err +
+ err_s->remote_access_err +
+ err_s->remote_op_err;
+ stats->value[BNXT_RE_RESP_CQE_ERROR] =
+ err_s->res_cmp_err +
+ err_s->res_cq_load_err;
+ stats->value[BNXT_RE_RESP_REMOTE_ACCESS_ERRS] =
+ err_s->res_rx_no_perm +
+ err_s->res_tx_no_perm;
}
int bnxt_re_assign_pma_port_ext_counters(struct bnxt_re_dev *rdev, struct ib_mad *out_mad)
@@ -382,7 +364,6 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
u32 port, int index)
{
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
- struct bnxt_re_res_cntrs *res_s = &rdev->stats.res;
struct bnxt_qplib_roce_stats *err_s = NULL;
struct ctx_hw_stats *hw_stats = NULL;
int rc = 0;
@@ -391,26 +372,6 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
if (!port || !stats)
return -EINVAL;
- stats->value[BNXT_RE_ACTIVE_QP] = atomic_read(&res_s->qp_count);
- stats->value[BNXT_RE_ACTIVE_RC_QP] = atomic_read(&res_s->rc_qp_count);
- stats->value[BNXT_RE_ACTIVE_UD_QP] = atomic_read(&res_s->ud_qp_count);
- stats->value[BNXT_RE_ACTIVE_SRQ] = atomic_read(&res_s->srq_count);
- stats->value[BNXT_RE_ACTIVE_CQ] = atomic_read(&res_s->cq_count);
- stats->value[BNXT_RE_ACTIVE_MR] = atomic_read(&res_s->mr_count);
- stats->value[BNXT_RE_ACTIVE_MW] = atomic_read(&res_s->mw_count);
- stats->value[BNXT_RE_ACTIVE_PD] = atomic_read(&res_s->pd_count);
- stats->value[BNXT_RE_ACTIVE_AH] = atomic_read(&res_s->ah_count);
- stats->value[BNXT_RE_WATERMARK_QP] = res_s->qp_watermark;
- stats->value[BNXT_RE_WATERMARK_RC_QP] = res_s->rc_qp_watermark;
- stats->value[BNXT_RE_WATERMARK_UD_QP] = res_s->ud_qp_watermark;
- stats->value[BNXT_RE_WATERMARK_SRQ] = res_s->srq_watermark;
- stats->value[BNXT_RE_WATERMARK_CQ] = res_s->cq_watermark;
- stats->value[BNXT_RE_WATERMARK_MR] = res_s->mr_watermark;
- stats->value[BNXT_RE_WATERMARK_MW] = res_s->mw_watermark;
- stats->value[BNXT_RE_WATERMARK_PD] = res_s->pd_watermark;
- stats->value[BNXT_RE_WATERMARK_AH] = res_s->ah_watermark;
- stats->value[BNXT_RE_RESIZE_CQ_CNT] = atomic_read(&res_s->resize_count);
-
if (hw_stats) {
stats->value[BNXT_RE_RECOVERABLE_ERRORS] =
le64_to_cpu(hw_stats->tx_bcast_pkts);
@@ -449,8 +410,6 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
goto done;
}
}
- if (rdev->pacing.dbr_pacing && bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
- bnxt_re_copy_db_pacing_stats(rdev, stats);
}
done:
diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.h b/drivers/infiniband/hw/bnxt_re/hw_counters.h
index e541b6f8ca9f..09d371d442aa 100644
--- a/drivers/infiniband/hw/bnxt_re/hw_counters.h
+++ b/drivers/infiniband/hw/bnxt_re/hw_counters.h
@@ -41,25 +41,6 @@
#define __BNXT_RE_HW_STATS_H__
enum bnxt_re_hw_stats {
- BNXT_RE_ACTIVE_PD,
- BNXT_RE_ACTIVE_AH,
- BNXT_RE_ACTIVE_QP,
- BNXT_RE_ACTIVE_RC_QP,
- BNXT_RE_ACTIVE_UD_QP,
- BNXT_RE_ACTIVE_SRQ,
- BNXT_RE_ACTIVE_CQ,
- BNXT_RE_ACTIVE_MR,
- BNXT_RE_ACTIVE_MW,
- BNXT_RE_WATERMARK_PD,
- BNXT_RE_WATERMARK_AH,
- BNXT_RE_WATERMARK_QP,
- BNXT_RE_WATERMARK_RC_QP,
- BNXT_RE_WATERMARK_UD_QP,
- BNXT_RE_WATERMARK_SRQ,
- BNXT_RE_WATERMARK_CQ,
- BNXT_RE_WATERMARK_MR,
- BNXT_RE_WATERMARK_MW,
- BNXT_RE_RESIZE_CQ_CNT,
BNXT_RE_RX_PKTS,
BNXT_RE_RX_BYTES,
BNXT_RE_TX_PKTS,
@@ -129,10 +110,9 @@ enum bnxt_re_hw_stats {
BNXT_RE_TX_CNP,
BNXT_RE_RX_CNP,
BNXT_RE_RX_ECN,
- BNXT_RE_PACING_RESCHED,
- BNXT_RE_PACING_CMPL,
- BNXT_RE_PACING_ALERT,
- BNXT_RE_DB_FIFO_REG,
+ BNXT_RE_REQ_CQE_ERROR,
+ BNXT_RE_RESP_CQE_ERROR,
+ BNXT_RE_RESP_REMOTE_ACCESS_ERRS,
BNXT_RE_NUM_EXT_COUNTERS
};
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 37c2bc3bdba5..4dab5ca7362b 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -288,7 +288,9 @@ int bnxt_re_query_port(struct ib_device *ibdev, u32 port_num,
}
port_attr->max_mtu = IB_MTU_4096;
port_attr->active_mtu = iboe_get_mtu(rdev->netdev->mtu);
- port_attr->gid_tbl_len = dev_attr->max_sgid;
+ /* One GID is reserved for RawEth QP. Report one less */
+ port_attr->gid_tbl_len = (rdev->rcfw.roce_mirror ? (dev_attr->max_sgid - 1) :
+ dev_attr->max_sgid);
port_attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
IB_PORT_DEVICE_MGMT_SUP |
IB_PORT_VENDOR_CLASS_SUP;
@@ -375,7 +377,7 @@ int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
if (!ctx)
return -EINVAL;
- if (sgid_tbl && sgid_tbl->active) {
+ if (sgid_tbl->active) {
if (ctx->idx >= sgid_tbl->max)
return -EINVAL;
gid_to_del = &sgid_tbl->tbl[ctx->idx].gid;
@@ -429,7 +431,7 @@ int bnxt_re_add_gid(const struct ib_gid_attr *attr, void **context)
rc = bnxt_qplib_add_sgid(sgid_tbl, (struct bnxt_qplib_gid *)&attr->gid,
rdev->qplib_res.netdev->dev_addr,
- vlan_id, true, &tbl_idx);
+ vlan_id, true, &tbl_idx, false, 0);
if (rc == -EALREADY) {
ctx_tbl = sgid_tbl->ctx;
ctx_tbl[tbl_idx]->refcnt++;
@@ -955,6 +957,20 @@ fail:
return rc;
}
+static void bnxt_re_del_unique_gid(struct bnxt_re_dev *rdev)
+{
+ int rc;
+
+ if (!rdev->rcfw.roce_mirror)
+ return;
+
+ rc = bnxt_qplib_del_sgid(&rdev->qplib_res.sgid_tbl,
+ (struct bnxt_qplib_gid *)&rdev->ugid,
+ 0xFFFF, true);
+ if (rc)
+ dev_err(rdev_to_dev(rdev), "Failed to delete unique GID, rc: %d\n", rc);
+}
+
/* Queue Pairs */
int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
{
@@ -994,6 +1010,9 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
else if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD)
atomic_dec(&rdev->stats.res.ud_qp_count);
+ if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_RAW_ETHERTYPE)
+ bnxt_re_del_unique_gid(rdev);
+
ib_umem_release(qp->rumem);
ib_umem_release(qp->sumem);
@@ -1018,6 +1037,8 @@ static u8 __from_ib_qp_type(enum ib_qp_type type)
return CMDQ_CREATE_QP_TYPE_RC;
case IB_QPT_UD:
return CMDQ_CREATE_QP_TYPE_UD;
+ case IB_QPT_RAW_PACKET:
+ return CMDQ_CREATE_QP_TYPE_RAW_ETHERTYPE;
default:
return IB_QPT_MAX;
}
@@ -1595,6 +1616,29 @@ static bool bnxt_re_test_qp_limits(struct bnxt_re_dev *rdev,
return rc;
}
+static int bnxt_re_add_unique_gid(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_ctx *hctx = &rdev->qplib_ctx;
+ struct bnxt_qplib_res *res = &rdev->qplib_res;
+ int rc;
+
+ if (!rdev->rcfw.roce_mirror)
+ return 0;
+
+ rdev->ugid.global.subnet_prefix = cpu_to_be64(0xfe8000000000abcdLL);
+ addrconf_ifid_eui48(&rdev->ugid.raw[8], rdev->netdev);
+
+ rc = bnxt_qplib_add_sgid(&res->sgid_tbl,
+ (struct bnxt_qplib_gid *)&rdev->ugid,
+ rdev->qplib_res.netdev->dev_addr,
+ 0xFFFF, true, &rdev->ugid_index, true,
+ hctx->stats3.fw_id);
+ if (rc)
+ dev_err(rdev_to_dev(rdev), "Failed to add unique GID. rc = %d\n", rc);
+
+ return rc;
+}
+
int bnxt_re_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *qp_init_attr,
struct ib_udata *udata)
{
@@ -1656,6 +1700,17 @@ int bnxt_re_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *qp_init_attr,
}
}
+ /* Support for RawEth QP is added to capture TCP pkt dump.
+ * So unique SGID is used to avoid incorrect statistics on per
+ * function stats_ctx
+ */
+ if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_RAW_ETHERTYPE) {
+ rc = bnxt_re_add_unique_gid(rdev);
+ if (rc)
+ goto qp_destroy;
+ qp->qplib_qp.ugid_index = rdev->ugid_index;
+ }
+
qp->ib_qp.qp_num = qp->qplib_qp.id;
if (qp_init_attr->qp_type == IB_QPT_GSI)
rdev->gsi_ctx.gsi_qp = qp;
@@ -1921,7 +1976,6 @@ int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
ib_srq);
struct bnxt_re_dev *rdev = srq->rdev;
- int rc;
switch (srq_attr_mask) {
case IB_SRQ_MAX_WR:
@@ -1933,11 +1987,8 @@ int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
return -EINVAL;
srq->qplib_srq.threshold = srq_attr->srq_limit;
- rc = bnxt_qplib_modify_srq(&rdev->qplib_res, &srq->qplib_srq);
- if (rc) {
- ibdev_err(&rdev->ibdev, "Modify HW SRQ failed!");
- return rc;
- }
+ bnxt_qplib_srq_arm_db(&srq->qplib_srq.dbinfo, srq->qplib_srq.threshold);
+
/* On success, update the shadow */
srq->srq_limit = srq_attr->srq_limit;
/* No need to Build and send response back to udata */
@@ -2305,7 +2356,7 @@ int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
qp_attr->pkey_index = qplib_qp->pkey_index;
qp_attr->qkey = qplib_qp->qkey;
qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
- rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp->ah.flow_label,
+ rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp->udp_sport,
qplib_qp->ah.host_sgid_index,
qplib_qp->ah.hop_limit,
qplib_qp->ah.traffic_class);
@@ -3252,9 +3303,9 @@ int bnxt_re_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(cq->resize_umem)) {
rc = PTR_ERR(cq->resize_umem);
+ ibdev_err(&rdev->ibdev, "%s: ib_umem_get failed! rc = %pe\n",
+ __func__, cq->resize_umem);
cq->resize_umem = NULL;
- ibdev_err(&rdev->ibdev, "%s: ib_umem_get failed! rc = %d\n",
- __func__, rc);
goto fail;
}
cq->resize_cqe = entries;
@@ -4396,6 +4447,93 @@ void bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
}
}
+static int bnxt_re_setup_vnic(struct bnxt_re_dev *rdev, struct bnxt_re_qp *qp)
+{
+ int rc;
+
+ rc = bnxt_re_hwrm_alloc_vnic(rdev);
+ if (rc)
+ return rc;
+
+ rc = bnxt_re_hwrm_cfg_vnic(rdev, qp->qplib_qp.id);
+ if (rc)
+ goto out_free_vnic;
+
+ return 0;
+out_free_vnic:
+ bnxt_re_hwrm_free_vnic(rdev);
+ return rc;
+}
+
+struct ib_flow *bnxt_re_create_flow(struct ib_qp *ib_qp,
+ struct ib_flow_attr *attr,
+ struct ib_udata *udata)
+{
+ struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
+ struct bnxt_re_dev *rdev = qp->rdev;
+ struct bnxt_re_flow *flow;
+ int rc;
+
+ if (attr->type != IB_FLOW_ATTR_SNIFFER ||
+ !rdev->rcfw.roce_mirror)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ mutex_lock(&rdev->qp_lock);
+ if (rdev->sniffer_flow_created) {
+ ibdev_err(&rdev->ibdev, "RoCE Mirroring is already Configured\n");
+ mutex_unlock(&rdev->qp_lock);
+ return ERR_PTR(-EBUSY);
+ }
+
+ flow = kzalloc(sizeof(*flow), GFP_KERNEL);
+ if (!flow) {
+ mutex_unlock(&rdev->qp_lock);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ flow->rdev = rdev;
+
+ rc = bnxt_re_setup_vnic(rdev, qp);
+ if (rc)
+ goto out_free_flow;
+
+ rc = bnxt_qplib_create_flow(&rdev->qplib_res);
+ if (rc)
+ goto out_free_vnic;
+
+ rdev->sniffer_flow_created = 1;
+ mutex_unlock(&rdev->qp_lock);
+
+ return &flow->ib_flow;
+
+out_free_vnic:
+ bnxt_re_hwrm_free_vnic(rdev);
+out_free_flow:
+ mutex_unlock(&rdev->qp_lock);
+ kfree(flow);
+ return ERR_PTR(rc);
+}
+
+int bnxt_re_destroy_flow(struct ib_flow *flow_id)
+{
+ struct bnxt_re_flow *flow =
+ container_of(flow_id, struct bnxt_re_flow, ib_flow);
+ struct bnxt_re_dev *rdev = flow->rdev;
+ int rc;
+
+ mutex_lock(&rdev->qp_lock);
+ rc = bnxt_qplib_destroy_flow(&rdev->qplib_res);
+ if (rc)
+ ibdev_dbg(&rdev->ibdev, "failed to destroy_flow rc = %d\n", rc);
+ rdev->sniffer_flow_created = 0;
+
+ bnxt_re_hwrm_free_vnic(rdev);
+ mutex_unlock(&rdev->qp_lock);
+ kfree(flow);
+
+ return rc;
+}
+
static struct bnxt_re_cq *bnxt_re_search_for_cq(struct bnxt_re_dev *rdev, u32 cq_id)
{
struct bnxt_re_cq *cq = NULL, *tmp_cq;
@@ -4608,7 +4746,7 @@ static int UVERBS_HANDLER(BNXT_RE_METHOD_ALLOC_PAGE)(struct uverbs_attr_bundle *
return err;
err = uverbs_copy_to(attrs, BNXT_RE_ALLOC_PAGE_DPI,
- &dpi, sizeof(length));
+ &dpi, sizeof(dpi));
if (err)
return err;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
index fe00ab691a51..76ba9ab04d5c 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
@@ -164,6 +164,11 @@ struct bnxt_re_user_mmap_entry {
u8 mmap_flag;
};
+struct bnxt_re_flow {
+ struct ib_flow ib_flow;
+ struct bnxt_re_dev *rdev;
+};
+
static inline u16 bnxt_re_get_swqe_size(int nsge)
{
return sizeof(struct sq_send_hdr) + nsge * sizeof(struct sq_sge);
@@ -267,6 +272,11 @@ struct ib_mr *bnxt_re_reg_user_mr_dmabuf(struct ib_pd *ib_pd, u64 start,
struct uverbs_attr_bundle *attrs);
int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata);
void bnxt_re_dealloc_ucontext(struct ib_ucontext *context);
+struct ib_flow *bnxt_re_create_flow(struct ib_qp *ib_qp,
+ struct ib_flow_attr *attr,
+ struct ib_udata *udata);
+int bnxt_re_destroy_flow(struct ib_flow *flow_id);
+
int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
void bnxt_re_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 293b0a96c8e3..b13810572c2e 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -80,6 +80,7 @@ MODULE_LICENSE("Dual BSD/GPL");
static DEFINE_MUTEX(bnxt_re_mutex);
static int bnxt_re_hwrm_qcaps(struct bnxt_re_dev *rdev);
+static int bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev);
static int bnxt_re_hwrm_qcfg(struct bnxt_re_dev *rdev, u32 *db_len,
u32 *offset);
@@ -188,6 +189,10 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev)
rdev->qplib_res.is_vf = BNXT_EN_VF(en_dev);
rdev->qplib_res.en_dev = en_dev;
+ rc = bnxt_re_query_hwrm_intf_version(rdev);
+ if (rc)
+ goto free_dev_attr;
+
bnxt_re_set_drv_mode(rdev);
bnxt_re_set_db_offset(rdev);
@@ -540,6 +545,72 @@ static void bnxt_re_fill_fw_msg(struct bnxt_fw_msg *fw_msg, void *msg,
fw_msg->timeout = timeout;
}
+void bnxt_re_hwrm_free_vnic(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_en_dev *en_dev = rdev->en_dev;
+ struct hwrm_vnic_free_input req = {};
+ struct bnxt_fw_msg fw_msg = {};
+ int rc;
+
+ bnxt_re_init_hwrm_hdr((void *)&req, HWRM_VNIC_FREE);
+
+ req.vnic_id = cpu_to_le32(rdev->mirror_vnic_id);
+ bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), NULL,
+ 0, BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
+ rc = bnxt_send_msg(en_dev, &fw_msg);
+ if (rc)
+ ibdev_dbg(&rdev->ibdev,
+ "Failed to free vnic, rc = %d\n", rc);
+}
+
+int bnxt_re_hwrm_alloc_vnic(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_en_dev *en_dev = rdev->en_dev;
+ struct hwrm_vnic_alloc_output resp = {};
+ struct hwrm_vnic_alloc_input req = {};
+ struct bnxt_fw_msg fw_msg = {};
+ int rc;
+
+ bnxt_re_init_hwrm_hdr((void *)&req, HWRM_VNIC_ALLOC);
+
+ req.vnic_id = cpu_to_le16(rdev->mirror_vnic_id);
+ req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_VNIC_ID_VALID);
+ bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
+ sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
+ rc = bnxt_send_msg(en_dev, &fw_msg);
+ if (rc)
+ ibdev_dbg(&rdev->ibdev,
+ "Failed to alloc vnic, rc = %d\n", rc);
+
+ return rc;
+}
+
+int bnxt_re_hwrm_cfg_vnic(struct bnxt_re_dev *rdev, u32 qp_id)
+{
+ struct bnxt_en_dev *en_dev = rdev->en_dev;
+ struct hwrm_vnic_cfg_input req = {};
+ struct bnxt_fw_msg fw_msg = {};
+ int rc;
+
+ bnxt_re_init_hwrm_hdr((void *)&req, HWRM_VNIC_CFG);
+
+ req.flags = cpu_to_le32(VNIC_CFG_REQ_FLAGS_ROCE_ONLY_VNIC_MODE);
+ req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_RAW_QP_ID |
+ VNIC_CFG_REQ_ENABLES_MRU);
+ req.vnic_id = cpu_to_le16(rdev->mirror_vnic_id);
+ req.raw_qp_id = cpu_to_le32(qp_id);
+ req.mru = cpu_to_le16(rdev->netdev->mtu + VLAN_ETH_HLEN);
+
+ bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), NULL,
+ 0, BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
+ rc = bnxt_send_msg(en_dev, &fw_msg);
+ if (rc)
+ ibdev_dbg(&rdev->ibdev,
+ "Failed to cfg vnic, rc = %d\n", rc);
+
+ return rc;
+}
+
/* Query device config using common hwrm */
static int bnxt_re_hwrm_qcfg(struct bnxt_re_dev *rdev, u32 *db_len,
u32 *offset)
@@ -553,11 +624,12 @@ static int bnxt_re_hwrm_qcfg(struct bnxt_re_dev *rdev, u32 *db_len,
bnxt_re_init_hwrm_hdr((void *)&req, HWRM_FUNC_QCFG);
req.fid = cpu_to_le16(0xffff);
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
- sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
+ sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
rc = bnxt_send_msg(en_dev, &fw_msg);
if (!rc) {
*db_len = PAGE_ALIGN(le16_to_cpu(resp.l2_doorbell_bar_size_kb) * 1024);
*offset = PAGE_ALIGN(le16_to_cpu(resp.legacy_l2_db_size_kb) * 1024);
+ rdev->mirror_vnic_id = le16_to_cpu(resp.mirror_vnic_id);
}
return rc;
}
@@ -577,7 +649,7 @@ int bnxt_re_hwrm_qcaps(struct bnxt_re_dev *rdev)
bnxt_re_init_hwrm_hdr((void *)&req, HWRM_FUNC_QCAPS);
req.fid = cpu_to_le16(0xffff);
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
- sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
+ sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
rc = bnxt_send_msg(en_dev, &fw_msg);
if (rc)
@@ -587,6 +659,8 @@ int bnxt_re_hwrm_qcaps(struct bnxt_re_dev *rdev)
flags_ext2 = le32_to_cpu(resp.flags_ext2);
cctx->modes.dbr_pacing = flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_EXT_SUPPORTED ||
flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_V0_SUPPORTED;
+ cctx->modes.roce_mirror = !!(le32_to_cpu(resp.flags_ext3) &
+ FUNC_QCAPS_RESP_FLAGS_EXT3_MIRROR_ON_ROCE_SUPPORTED);
return 0;
}
@@ -603,7 +677,7 @@ static int bnxt_re_hwrm_dbr_pacing_qcfg(struct bnxt_re_dev *rdev)
cctx = rdev->chip_ctx;
bnxt_re_init_hwrm_hdr((void *)&req, HWRM_FUNC_DBR_PACING_QCFG);
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
- sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
+ sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
rc = bnxt_send_msg(en_dev, &fw_msg);
if (rc)
return rc;
@@ -842,20 +916,12 @@ static void bnxt_re_deinitialize_dbr_pacing(struct bnxt_re_dev *rdev)
static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev,
u16 fw_ring_id, int type)
{
- struct bnxt_en_dev *en_dev;
+ struct bnxt_en_dev *en_dev = rdev->en_dev;
struct hwrm_ring_free_input req = {};
struct hwrm_ring_free_output resp;
struct bnxt_fw_msg fw_msg = {};
int rc = -EINVAL;
- if (!rdev)
- return rc;
-
- en_dev = rdev->en_dev;
-
- if (!en_dev)
- return rc;
-
if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags))
return 0;
@@ -863,7 +929,7 @@ static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev,
req.ring_type = type;
req.ring_id = cpu_to_le16(fw_ring_id);
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
- sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
+ sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
rc = bnxt_send_msg(en_dev, &fw_msg);
if (rc)
ibdev_err(&rdev->ibdev, "Failed to free HW ring:%d :%#x",
@@ -881,9 +947,6 @@ static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev,
struct bnxt_fw_msg fw_msg = {};
int rc = -EINVAL;
- if (!en_dev)
- return rc;
-
bnxt_re_init_hwrm_hdr((void *)&req, HWRM_RING_ALLOC);
req.enables = 0;
req.page_tbl_addr = cpu_to_le64(ring_attr->dma_arr[0]);
@@ -899,7 +962,7 @@ static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev,
req.ring_type = ring_attr->type;
req.int_mode = ring_attr->mode;
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
- sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
+ sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
rc = bnxt_send_msg(en_dev, &fw_msg);
if (!rc)
*fw_ring_id = le16_to_cpu(resp.ring_id);
@@ -916,16 +979,13 @@ static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev,
struct bnxt_fw_msg fw_msg = {};
int rc = -EINVAL;
- if (!en_dev)
- return rc;
-
if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags))
return 0;
bnxt_re_init_hwrm_hdr((void *)&req, HWRM_STAT_CTX_FREE);
req.stat_ctx_id = cpu_to_le32(fw_stats_ctx_id);
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
- sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
+ sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
rc = bnxt_send_msg(en_dev, &fw_msg);
if (rc)
ibdev_err(&rdev->ibdev, "Failed to free HW stats context %#x",
@@ -935,8 +995,7 @@ static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev,
}
static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
- dma_addr_t dma_map,
- u32 *fw_stats_ctx_id)
+ struct bnxt_qplib_stats *stats)
{
struct bnxt_qplib_chip_ctx *chip_ctx = rdev->chip_ctx;
struct hwrm_stat_ctx_alloc_output resp = {};
@@ -945,21 +1004,18 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
struct bnxt_fw_msg fw_msg = {};
int rc = -EINVAL;
- *fw_stats_ctx_id = INVALID_STATS_CTX_ID;
-
- if (!en_dev)
- return rc;
+ stats->fw_id = INVALID_STATS_CTX_ID;
bnxt_re_init_hwrm_hdr((void *)&req, HWRM_STAT_CTX_ALLOC);
req.update_period_ms = cpu_to_le32(1000);
- req.stats_dma_addr = cpu_to_le64(dma_map);
+ req.stats_dma_addr = cpu_to_le64(stats->dma_map);
req.stats_dma_length = cpu_to_le16(chip_ctx->hw_stats_size);
req.stat_ctx_flags = STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE;
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
- sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
+ sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
rc = bnxt_send_msg(en_dev, &fw_msg);
if (!rc)
- *fw_stats_ctx_id = le32_to_cpu(resp.stat_ctx_id);
+ stats->fw_id = le32_to_cpu(resp.stat_ctx_id);
return rc;
}
@@ -975,7 +1031,7 @@ static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
struct bnxt_re_dev *rdev =
rdma_device_to_drv_device(device, struct bnxt_re_dev, ibdev);
- return sysfs_emit(buf, "0x%x\n", rdev->en_dev->pdev->vendor);
+ return sysfs_emit(buf, "0x%x\n", rdev->en_dev->pdev->revision);
}
static DEVICE_ATTR_RO(hw_rev);
@@ -985,13 +1041,31 @@ static ssize_t hca_type_show(struct device *device,
struct bnxt_re_dev *rdev =
rdma_device_to_drv_device(device, struct bnxt_re_dev, ibdev);
- return sysfs_emit(buf, "%s\n", rdev->ibdev.node_desc);
+ return sysfs_emit(buf, "0x%x\n", rdev->en_dev->pdev->device);
}
static DEVICE_ATTR_RO(hca_type);
+static ssize_t board_id_show(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ struct bnxt_re_dev *rdev = rdma_device_to_drv_device(device,
+ struct bnxt_re_dev, ibdev);
+ char buffer[BNXT_VPD_FLD_LEN] = {};
+
+ if (!rdev->is_virtfn)
+ memcpy(buffer, rdev->board_partno, BNXT_VPD_FLD_LEN - 1);
+ else
+ scnprintf(buffer, BNXT_VPD_FLD_LEN, "0x%x-VF",
+ rdev->en_dev->pdev->device);
+
+ return sysfs_emit(buf, "%s\n", buffer);
+}
+static DEVICE_ATTR_RO(board_id);
+
static struct attribute *bnxt_re_attributes[] = {
&dev_attr_hw_rev.attr,
&dev_attr_hca_type.attr,
+ &dev_attr_board_id.attr,
NULL
};
@@ -1207,6 +1281,8 @@ static int bnxt_re_fill_res_srq_entry(struct sk_buff *msg, struct ib_srq *ib_srq
goto err;
if (rdma_nl_put_driver_u32_hex(msg, "max_sge", srq->qplib_srq.max_sge))
goto err;
+ if (rdma_nl_put_driver_u32_hex(msg, "srq_limit", srq->qplib_srq.threshold))
+ goto err;
nla_nest_end(msg, table_attr);
return 0;
@@ -1297,6 +1373,8 @@ static const struct ib_device_ops bnxt_re_dev_ops = {
.reg_user_mr_dmabuf = bnxt_re_reg_user_mr_dmabuf,
.req_notify_cq = bnxt_re_req_notify_cq,
.resize_cq = bnxt_re_resize_cq,
+ .create_flow = bnxt_re_create_flow,
+ .destroy_flow = bnxt_re_destroy_flow,
INIT_RDMA_OBJ_SIZE(ib_ah, bnxt_re_ah, ib_ah),
INIT_RDMA_OBJ_SIZE(ib_cq, bnxt_re_cq, ib_cq),
INIT_RDMA_OBJ_SIZE(ib_pd, bnxt_re_pd, ib_pd),
@@ -1323,8 +1401,7 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
/* ib device init */
ibdev->node_type = RDMA_NODE_IB_CA;
- strscpy(ibdev->node_desc, BNXT_RE_DESC " HCA",
- strlen(BNXT_RE_DESC) + 5);
+ strscpy(ibdev->node_desc, BNXT_RE_DESC " HCA");
ibdev->phys_port_cnt = 1;
addrconf_addr_eui48((u8 *)&ibdev->node_guid, rdev->netdev->dev_addr);
@@ -1850,81 +1927,6 @@ static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev)
mutex_unlock(&rdev->qp_lock);
}
-static int bnxt_re_update_gid(struct bnxt_re_dev *rdev)
-{
- struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
- struct bnxt_qplib_gid gid;
- u16 gid_idx, index;
- int rc = 0;
-
- if (!ib_device_try_get(&rdev->ibdev))
- return 0;
-
- for (index = 0; index < sgid_tbl->active; index++) {
- gid_idx = sgid_tbl->hw_id[index];
-
- if (!memcmp(&sgid_tbl->tbl[index], &bnxt_qplib_gid_zero,
- sizeof(bnxt_qplib_gid_zero)))
- continue;
- /* need to modify the VLAN enable setting of non VLAN GID only
- * as setting is done for VLAN GID while adding GID
- */
- if (sgid_tbl->vlan[index])
- continue;
-
- memcpy(&gid, &sgid_tbl->tbl[index], sizeof(gid));
-
- rc = bnxt_qplib_update_sgid(sgid_tbl, &gid, gid_idx,
- rdev->qplib_res.netdev->dev_addr);
- }
-
- ib_device_put(&rdev->ibdev);
- return rc;
-}
-
-static u32 bnxt_re_get_priority_mask(struct bnxt_re_dev *rdev)
-{
- u32 prio_map = 0, tmp_map = 0;
- struct net_device *netdev;
- struct dcb_app app = {};
-
- netdev = rdev->netdev;
-
- app.selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE;
- app.protocol = ETH_P_IBOE;
- tmp_map = dcb_ieee_getapp_mask(netdev, &app);
- prio_map = tmp_map;
-
- app.selector = IEEE_8021QAZ_APP_SEL_DGRAM;
- app.protocol = ROCE_V2_UDP_DPORT;
- tmp_map = dcb_ieee_getapp_mask(netdev, &app);
- prio_map |= tmp_map;
-
- return prio_map;
-}
-
-static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev)
-{
- u8 prio_map = 0;
-
- /* Get priority for roce */
- prio_map = bnxt_re_get_priority_mask(rdev);
-
- if (prio_map == rdev->cur_prio_map)
- return 0;
- rdev->cur_prio_map = prio_map;
- /* Actual priorities are not programmed as they are already
- * done by L2 driver; just enable or disable priority vlan tagging
- */
- if ((prio_map == 0 && rdev->qplib_res.prio) ||
- (prio_map != 0 && !rdev->qplib_res.prio)) {
- rdev->qplib_res.prio = prio_map;
- bnxt_re_update_gid(rdev);
- }
-
- return 0;
-}
-
static void bnxt_re_net_unregister_async_event(struct bnxt_re_dev *rdev)
{
if (rdev->is_virtfn)
@@ -1945,7 +1947,31 @@ static void bnxt_re_net_register_async_event(struct bnxt_re_dev *rdev)
ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE);
}
-static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev)
+static void bnxt_re_read_vpd_info(struct bnxt_re_dev *rdev)
+{
+ struct pci_dev *pdev = rdev->en_dev->pdev;
+ unsigned int vpd_size, kw_len;
+ int pos, size;
+ u8 *vpd_data;
+
+ vpd_data = pci_vpd_alloc(pdev, &vpd_size);
+ if (IS_ERR(vpd_data)) {
+ pci_warn(pdev, "Unable to read VPD, err=%pe\n", vpd_data);
+ return;
+ }
+
+ pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
+ PCI_VPD_RO_KEYWORD_PARTNO, &kw_len);
+ if (pos < 0)
+ goto free;
+
+ size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
+ memcpy(rdev->board_partno, &vpd_data[pos], size);
+free:
+ kfree(vpd_data);
+}
+
+static int bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev)
{
struct bnxt_en_dev *en_dev = rdev->en_dev;
struct hwrm_ver_get_output resp = {};
@@ -1964,7 +1990,7 @@ static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev)
if (rc) {
ibdev_err(&rdev->ibdev, "Failed to query HW version, rc = 0x%x",
rc);
- return;
+ return rc;
}
cctx = rdev->chip_ctx;
@@ -1978,6 +2004,8 @@ static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev)
if (!cctx->hwrm_cmd_max_timeout)
cctx->hwrm_cmd_max_timeout = RCFW_FW_STALL_MAX_TIMEOUT;
+
+ return 0;
}
static int bnxt_re_ib_init(struct bnxt_re_dev *rdev)
@@ -2017,6 +2045,94 @@ static void bnxt_re_free_nqr_mem(struct bnxt_re_dev *rdev)
rdev->nqr = NULL;
}
+/* When DEL_GID fails, driver is not freeing GID ctx memory.
+ * To avoid the memory leak, free the memory during unload
+ */
+static void bnxt_re_free_gid_ctx(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
+ struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
+ int i;
+
+ if (!sgid_tbl->active)
+ return;
+
+ ctx_tbl = sgid_tbl->ctx;
+ for (i = 0; i < sgid_tbl->max; i++) {
+ if (sgid_tbl->hw_id[i] == 0xFFFF)
+ continue;
+
+ ctx = ctx_tbl[i];
+ kfree(ctx);
+ }
+}
+
+static int bnxt_re_get_stats_ctx(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_ctx *hctx = &rdev->qplib_ctx;
+ struct bnxt_qplib_res *res = &rdev->qplib_res;
+ int rc;
+
+ rc = bnxt_qplib_alloc_stats_ctx(res->pdev, res->cctx, &hctx->stats);
+ if (rc)
+ return rc;
+
+ rc = bnxt_re_net_stats_ctx_alloc(rdev, &hctx->stats);
+ if (rc)
+ goto free_stat_mem;
+
+ return 0;
+free_stat_mem:
+ bnxt_qplib_free_stats_ctx(res->pdev, &hctx->stats);
+
+ return rc;
+}
+
+static int bnxt_re_get_stats3_ctx(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_ctx *hctx = &rdev->qplib_ctx;
+ struct bnxt_qplib_res *res = &rdev->qplib_res;
+ int rc;
+
+ if (!rdev->rcfw.roce_mirror)
+ return 0;
+
+ rc = bnxt_qplib_alloc_stats_ctx(res->pdev, res->cctx, &hctx->stats3);
+ if (rc)
+ return rc;
+
+ rc = bnxt_re_net_stats_ctx_alloc(rdev, &hctx->stats3);
+ if (rc)
+ goto free_stat_mem;
+
+ return 0;
+free_stat_mem:
+ bnxt_qplib_free_stats_ctx(res->pdev, &hctx->stats3);
+
+ return rc;
+}
+
+static void bnxt_re_put_stats3_ctx(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_ctx *hctx = &rdev->qplib_ctx;
+ struct bnxt_qplib_res *res = &rdev->qplib_res;
+
+ if (!rdev->rcfw.roce_mirror)
+ return;
+
+ bnxt_re_net_stats_ctx_free(rdev, hctx->stats3.fw_id);
+ bnxt_qplib_free_stats_ctx(res->pdev, &hctx->stats3);
+}
+
+static void bnxt_re_put_stats_ctx(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_ctx *hctx = &rdev->qplib_ctx;
+ struct bnxt_qplib_res *res = &rdev->qplib_res;
+
+ bnxt_re_net_stats_ctx_free(rdev, hctx->stats.fw_id);
+ bnxt_qplib_free_stats_ctx(res->pdev, &hctx->stats);
+}
+
static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type)
{
u8 type;
@@ -2027,9 +2143,9 @@ static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type)
bnxt_re_net_unregister_async_event(rdev);
bnxt_re_uninit_dcb_wq(rdev);
- if (test_and_clear_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags))
- cancel_delayed_work_sync(&rdev->worker);
+ bnxt_re_put_stats3_ctx(rdev);
+ bnxt_re_free_gid_ctx(rdev);
if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED,
&rdev->flags))
bnxt_re_cleanup_res(rdev);
@@ -2041,8 +2157,8 @@ static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type)
if (rc)
ibdev_warn(&rdev->ibdev,
"Failed to deinitialize RCFW: %#x", rc);
- bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id);
- bnxt_qplib_free_ctx(&rdev->qplib_res, &rdev->qplib_ctx);
+ bnxt_re_put_stats_ctx(rdev);
+ bnxt_qplib_free_hwctx(&rdev->qplib_res, &rdev->qplib_ctx);
bnxt_qplib_disable_rcfw_channel(&rdev->rcfw);
type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
bnxt_re_net_ring_free(rdev, rdev->rcfw.creq.ring_id, type);
@@ -2062,16 +2178,6 @@ static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type)
}
}
-/* worker thread for polling periodic events. Now used for QoS programming*/
-static void bnxt_re_worker(struct work_struct *work)
-{
- struct bnxt_re_dev *rdev = container_of(work, struct bnxt_re_dev,
- worker.work);
-
- bnxt_re_setup_qos(rdev);
- schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000));
-}
-
static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type)
{
struct bnxt_re_ring_attr rattr = {};
@@ -2086,8 +2192,9 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type)
rc = bnxt_re_register_netdev(rdev);
if (rc) {
ibdev_err(&rdev->ibdev,
- "Failed to register with netedev: %#x\n", rc);
- return -EINVAL;
+ "Failed to register with Ethernet driver, rc %d\n",
+ rc);
+ return rc;
}
}
set_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags);
@@ -2125,8 +2232,6 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type)
/* Check whether VF or PF */
bnxt_re_get_sriov_func_type(rdev);
- bnxt_re_query_hwrm_intf_version(rdev);
-
/* Establish RCFW Communication Channel to initialize the context
* memory for the function and all child VFs
*/
@@ -2176,18 +2281,20 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type)
if (rc)
goto disable_rcfw;
+ bnxt_qplib_query_version(&rdev->rcfw);
bnxt_re_set_resource_limits(rdev);
- rc = bnxt_qplib_alloc_ctx(&rdev->qplib_res, &rdev->qplib_ctx, 0,
- bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx));
- if (rc) {
- ibdev_err(&rdev->ibdev,
- "Failed to allocate QPLIB context: %#x\n", rc);
- goto disable_rcfw;
+ if (!rdev->is_virtfn &&
+ !bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) {
+ rc = bnxt_qplib_alloc_hwctx(&rdev->qplib_res, &rdev->qplib_ctx);
+ if (rc) {
+ ibdev_err(&rdev->ibdev,
+ "Failed to allocate hw context: %#x\n", rc);
+ goto disable_rcfw;
+ }
}
- rc = bnxt_re_net_stats_ctx_alloc(rdev,
- rdev->qplib_ctx.stats.dma_map,
- &rdev->qplib_ctx.stats.fw_id);
+
+ rc = bnxt_re_get_stats_ctx(rdev);
if (rc) {
ibdev_err(&rdev->ibdev,
"Failed to allocate stats context: %#x\n", rc);
@@ -2226,15 +2333,6 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type)
if (rc)
ibdev_warn(&rdev->ibdev, "Failed to query CC defaults\n");
- rc = bnxt_re_setup_qos(rdev);
- if (rc)
- ibdev_info(&rdev->ibdev,
- "RoCE priority not yet configured\n");
-
- INIT_DELAYED_WORK(&rdev->worker, bnxt_re_worker);
- set_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags);
- schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000));
-
if (!(rdev->qplib_res.en_dev->flags & BNXT_EN_FLAG_ROCE_VF_RES_MGMT))
bnxt_re_vf_res_config(rdev);
}
@@ -2247,11 +2345,18 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type)
bnxt_re_init_dcb_wq(rdev);
bnxt_re_net_register_async_event(rdev);
+ if (!rdev->is_virtfn)
+ bnxt_re_read_vpd_info(rdev);
+
+ rc = bnxt_re_get_stats3_ctx(rdev);
+ if (rc)
+ goto fail;
+
return 0;
free_sctx:
bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id);
free_ctx:
- bnxt_qplib_free_ctx(&rdev->qplib_res, &rdev->qplib_ctx);
+ bnxt_qplib_free_hwctx(&rdev->qplib_res, &rdev->qplib_ctx);
disable_rcfw:
bnxt_qplib_disable_rcfw_channel(&rdev->rcfw);
free_ring:
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index dfe3177123e5..ce90d3d834d4 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -705,9 +705,7 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
srq->dbinfo.db = srq->dpi->dbr;
srq->dbinfo.max_slot = 1;
srq->dbinfo.priv_db = res->dpi_tbl.priv_db;
- if (srq->threshold)
- bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA);
- srq->arm_req = false;
+ bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA);
return 0;
fail:
@@ -717,24 +715,6 @@ fail:
return rc;
}
-int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
- struct bnxt_qplib_srq *srq)
-{
- struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
- u32 count;
-
- count = __bnxt_qplib_get_avail(srq_hwq);
- if (count > srq->threshold) {
- srq->arm_req = false;
- bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
- } else {
- /* Deferred arming */
- srq->arm_req = true;
- }
-
- return 0;
-}
-
int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
struct bnxt_qplib_srq *srq)
{
@@ -776,7 +756,6 @@ int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
struct rq_wqe *srqe;
struct sq_sge *hw_sge;
- u32 count = 0;
int i, next;
spin_lock(&srq_hwq->lock);
@@ -808,15 +787,8 @@ int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
bnxt_qplib_hwq_incr_prod(&srq->dbinfo, srq_hwq, srq->dbinfo.max_slot);
- spin_lock(&srq_hwq->lock);
- count = __bnxt_qplib_get_avail(srq_hwq);
- spin_unlock(&srq_hwq->lock);
/* Ring DB */
bnxt_qplib_ring_prod_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ);
- if (srq->arm_req == true && count > srq->threshold) {
- srq->arm_req = false;
- bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
- }
return 0;
}
@@ -1335,6 +1307,7 @@ static bool is_optimized_state_transition(struct bnxt_qplib_qp *qp)
int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
{
+ struct bnxt_qplib_sgid_tbl *sgid_tbl = &res->sgid_tbl;
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct creq_modify_qp_resp resp = {};
struct bnxt_qplib_cmdqmsg msg = {};
@@ -1386,9 +1359,14 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL)
req.flow_label = cpu_to_le32(qp->ah.flow_label);
- if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)
- req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id
- [qp->ah.sgid_index]);
+ if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX) {
+ if (qp->type == CMDQ_CREATE_QP_TYPE_RAW_ETHERTYPE)
+ req.sgid_index =
+ cpu_to_le16(sgid_tbl->hw_id[qp->ugid_index]);
+ else
+ req.sgid_index =
+ cpu_to_le16(sgid_tbl->hw_id[qp->ah.sgid_index]);
+ }
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT)
req.hop_limit = qp->ah.hop_limit;
@@ -1492,6 +1470,7 @@ int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
qp->access = sb->access;
qp->pkey_index = le16_to_cpu(sb->pkey);
qp->qkey = le32_to_cpu(sb->qkey);
+ qp->udp_sport = le16_to_cpu(sb->udp_src_port);
temp32[0] = le32_to_cpu(sb->dgid[0]);
temp32[1] = le32_to_cpu(sb->dgid[1]);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
index ab125f1d949e..b990d0c0ce1a 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
@@ -299,6 +299,7 @@ struct bnxt_qplib_qp {
u8 smac[6];
u16 vlan_id;
u16 port_id;
+ u16 udp_sport;
u8 nw_type;
struct bnxt_qplib_ah ah;
@@ -344,6 +345,7 @@ struct bnxt_qplib_qp {
u32 msn_tbl_sz;
bool is_host_msn_tbl;
u8 tos_dscp;
+ u32 ugid_index;
};
#define BNXT_RE_MAX_MSG_SIZE 0x80000000
@@ -546,8 +548,6 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
srqn_handler_t srq_handler);
int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
struct bnxt_qplib_srq *srq);
-int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
- struct bnxt_qplib_srq *srq);
int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
struct bnxt_qplib_srq *srq);
void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 804bc773b4ef..295a9610f3e6 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -186,7 +186,7 @@ static int __wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
* wait for command completion. Maximum holding interval is 8 second.
*
* Returns:
- * -ETIMEOUT if command is not completed in specific time interval.
+ * -ETIMEDOUT if command is not completed in specific time interval.
* 0 if command is completed by firmware.
*/
static int __block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
@@ -366,6 +366,7 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw,
wmb();
writel(cmdq_prod, cmdq->cmdq_mbox.prod);
writel(RCFW_CMDQ_TRIG_VAL, cmdq->cmdq_mbox.db);
+ print_hex_dump_bytes("req: ", DUMP_PREFIX_OFFSET, msg->req, msg->req_sz);
spin_unlock_bh(&hwq->lock);
/* Return the CREQ response pointer */
return 0;
@@ -381,7 +382,7 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw,
* This function can not be called from non-sleepable context.
*
* Returns:
- * -ETIMEOUT if command is not completed in specific time interval.
+ * -ETIMEDOUT if command is not completed in specific time interval.
* 0 if command is completed by firmware.
*/
static int __poll_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
@@ -631,6 +632,7 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
int rc = 0;
pdev = rcfw->pdev;
+ print_hex_dump_bytes("event: ", DUMP_PREFIX_OFFSET, qp_event, sizeof(*qp_event));
switch (qp_event->event) {
case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION:
err_event = (struct creq_qp_error_notification *)qp_event;
@@ -903,6 +905,10 @@ skip_ctx_setup:
flags |= CMDQ_INITIALIZE_FW_FLAGS_OPTIMIZE_MODIFY_QP_SUPPORTED;
if (rcfw->res->en_dev->flags & BNXT_EN_FLAG_ROCE_VF_RES_MGMT)
flags |= CMDQ_INITIALIZE_FW_FLAGS_L2_VF_RESOURCE_MGMT;
+ if (bnxt_qplib_roce_mirror_supported(rcfw->res->cctx)) {
+ flags |= CMDQ_INITIALIZE_FW_FLAGS_MIRROR_ON_ROCE_SUPPORTED;
+ rcfw->roce_mirror = true;
+ }
req.flags |= cpu_to_le16(flags);
req.stat_ctx_id = cpu_to_le32(ctx->stats.fw_id);
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
index ff873c5f1b25..988c89b4232e 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
@@ -236,6 +236,7 @@ struct bnxt_qplib_rcfw {
atomic_t timeout_send;
/* cached from chip cctx for quick reference in slow path */
u16 max_timeout;
+ bool roce_mirror;
};
struct bnxt_qplib_cmdqmsg {
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
index 6cd05207ffed..875d7b52c06a 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -53,12 +53,6 @@
#include "qplib_sp.h"
#include "qplib_rcfw.h"
-static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
- struct bnxt_qplib_stats *stats);
-static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
- struct bnxt_qplib_chip_ctx *cctx,
- struct bnxt_qplib_stats *stats);
-
/* PBL */
static void __free_pbl(struct bnxt_qplib_res *res, struct bnxt_qplib_pbl *pbl,
bool is_umem)
@@ -121,6 +115,7 @@ static int __alloc_pbl(struct bnxt_qplib_res *res,
pbl->pg_arr = vmalloc_array(pages, sizeof(void *));
if (!pbl->pg_arr)
return -ENOMEM;
+ memset(pbl->pg_arr, 0, pages * sizeof(void *));
pbl->pg_map_arr = vmalloc_array(pages, sizeof(dma_addr_t));
if (!pbl->pg_map_arr) {
@@ -128,6 +123,7 @@ static int __alloc_pbl(struct bnxt_qplib_res *res,
pbl->pg_arr = NULL;
return -ENOMEM;
}
+ memset(pbl->pg_map_arr, 0, pages * sizeof(dma_addr_t));
pbl->pg_count = 0;
pbl->pg_size = sginfo->pgsize;
@@ -350,8 +346,8 @@ fail:
}
/* Context Tables */
-void bnxt_qplib_free_ctx(struct bnxt_qplib_res *res,
- struct bnxt_qplib_ctx *ctx)
+void bnxt_qplib_free_hwctx(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_ctx *ctx)
{
int i;
@@ -365,7 +361,6 @@ void bnxt_qplib_free_ctx(struct bnxt_qplib_res *res,
/* restore original pde level before destroy */
ctx->tqm_ctx.pde.level = ctx->tqm_ctx.pde_level;
bnxt_qplib_free_hwq(res, &ctx->tqm_ctx.pde);
- bnxt_qplib_free_stats_ctx(res->pdev, &ctx->stats);
}
static int bnxt_qplib_alloc_tqm_rings(struct bnxt_qplib_res *res,
@@ -464,7 +459,7 @@ fail:
}
/*
- * Routine: bnxt_qplib_alloc_ctx
+ * Routine: bnxt_qplib_alloc_hwctx
* Description:
* Context tables are memories which are used by the chip fw.
* The 6 tables defined are:
@@ -484,17 +479,13 @@ fail:
* Returns:
* 0 if success, else -ERRORS
*/
-int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res *res,
- struct bnxt_qplib_ctx *ctx,
- bool virt_fn, bool is_p5)
+int bnxt_qplib_alloc_hwctx(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_ctx *ctx)
{
struct bnxt_qplib_hwq_attr hwq_attr = {};
struct bnxt_qplib_sg_info sginfo = {};
int rc;
- if (virt_fn || is_p5)
- goto stats_alloc;
-
/* QPC Tables */
sginfo.pgsize = PAGE_SIZE;
sginfo.pgshft = PAGE_SHIFT;
@@ -540,16 +531,11 @@ int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res *res,
rc = bnxt_qplib_alloc_init_hwq(&ctx->tim_tbl, &hwq_attr);
if (rc)
goto fail;
-stats_alloc:
- /* Stats */
- rc = bnxt_qplib_alloc_stats_ctx(res->pdev, res->cctx, &ctx->stats);
- if (rc)
- goto fail;
return 0;
fail:
- bnxt_qplib_free_ctx(res, ctx);
+ bnxt_qplib_free_hwctx(res, ctx);
return rc;
}
@@ -830,8 +816,8 @@ static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res *res,
}
/* Stats */
-static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
- struct bnxt_qplib_stats *stats)
+void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
+ struct bnxt_qplib_stats *stats)
{
if (stats->dma) {
dma_free_coherent(&pdev->dev, stats->size,
@@ -841,9 +827,9 @@ static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
stats->fw_id = -1;
}
-static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
- struct bnxt_qplib_chip_ctx *cctx,
- struct bnxt_qplib_stats *stats)
+int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
+ struct bnxt_qplib_chip_ctx *cctx,
+ struct bnxt_qplib_stats *stats)
{
memset(stats, 0, sizeof(*stats));
stats->fw_id = -1;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
index 6a13927674b4..2ea3b7f232a3 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
@@ -65,6 +65,7 @@ struct bnxt_qplib_drv_modes {
bool db_push;
bool dbr_pacing;
u32 toggle_bits;
+ u8 roce_mirror;
};
enum bnxt_re_toggle_modes {
@@ -303,6 +304,7 @@ struct bnxt_qplib_ctx {
struct bnxt_qplib_hwq tim_tbl;
struct bnxt_qplib_tqm_ctx tqm_ctx;
struct bnxt_qplib_stats stats;
+ struct bnxt_qplib_stats stats3;
struct bnxt_qplib_vf_res vf_res;
};
@@ -432,15 +434,19 @@ void bnxt_qplib_cleanup_res(struct bnxt_qplib_res *res);
int bnxt_qplib_init_res(struct bnxt_qplib_res *res);
void bnxt_qplib_free_res(struct bnxt_qplib_res *res);
int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct net_device *netdev);
-void bnxt_qplib_free_ctx(struct bnxt_qplib_res *res,
- struct bnxt_qplib_ctx *ctx);
-int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res *res,
- struct bnxt_qplib_ctx *ctx,
- bool virt_fn, bool is_p5);
+void bnxt_qplib_free_hwctx(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_ctx *ctx);
+int bnxt_qplib_alloc_hwctx(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_ctx *ctx);
int bnxt_qplib_map_db_bar(struct bnxt_qplib_res *res);
void bnxt_qplib_unmap_db_bar(struct bnxt_qplib_res *res);
int bnxt_qplib_determine_atomics(struct pci_dev *dev);
+int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
+ struct bnxt_qplib_chip_ctx *cctx,
+ struct bnxt_qplib_stats *stats);
+void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
+ struct bnxt_qplib_stats *stats);
static inline void bnxt_qplib_hwq_incr_prod(struct bnxt_qplib_db_info *dbinfo,
struct bnxt_qplib_hwq *hwq, u32 cnt)
@@ -582,6 +588,11 @@ static inline u8 bnxt_qplib_dbr_pacing_en(struct bnxt_qplib_chip_ctx *cctx)
return cctx->modes.dbr_pacing;
}
+static inline u8 bnxt_qplib_roce_mirror_supported(struct bnxt_qplib_chip_ctx *cctx)
+{
+ return cctx->modes.roce_mirror;
+}
+
static inline bool _is_alloc_mr_unified(u16 dev_cap_flags)
{
return dev_cap_flags & CREQ_QUERY_FUNC_RESP_SB_MR_REGISTER_ALLOC;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index 68981399598d..9ef581ed785c 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -66,14 +66,15 @@ static bool bnxt_qplib_is_atomic_cap(struct bnxt_qplib_rcfw *rcfw)
return (pcie_ctl2 & PCI_EXP_DEVCTL2_ATOMIC_REQ);
}
-static void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw,
- char *fw_ver)
+void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw)
{
struct creq_query_version_resp resp = {};
struct bnxt_qplib_cmdqmsg msg = {};
struct cmdq_query_version req = {};
+ struct bnxt_qplib_dev_attr *attr;
int rc;
+ attr = rcfw->res->dattr;
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
CMDQ_BASE_OPCODE_QUERY_VERSION,
sizeof(req));
@@ -82,10 +83,10 @@ static void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw,
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
if (rc)
return;
- fw_ver[0] = resp.fw_maj;
- fw_ver[1] = resp.fw_minor;
- fw_ver[2] = resp.fw_bld;
- fw_ver[3] = resp.fw_rsvd;
+ attr->fw_ver[0] = resp.fw_maj;
+ attr->fw_ver[1] = resp.fw_minor;
+ attr->fw_ver[2] = resp.fw_bld;
+ attr->fw_ver[3] = resp.fw_rsvd;
}
int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw)
@@ -179,8 +180,6 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw)
if (_is_max_srq_ext_supported(attr->dev_cap_flags2))
attr->max_srq += le16_to_cpu(sb->max_srq_ext);
- bnxt_qplib_query_version(rcfw, attr->fw_ver);
-
for (i = 0; i < MAX_TQM_ALLOC_REQ / 4; i++) {
temp = le32_to_cpu(sb->tqm_alloc_reqs[i]);
tqm_alloc = (u8 *)&temp;
@@ -309,7 +308,8 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
struct bnxt_qplib_gid *gid, const u8 *smac,
- u16 vlan_id, bool update, u32 *index)
+ u16 vlan_id, bool update, u32 *index,
+ bool is_ugid, u32 stats_ctx_id)
{
struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl,
struct bnxt_qplib_res,
@@ -374,6 +374,9 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
req.src_mac[1] = cpu_to_be16(((u16 *)smac)[1]);
req.src_mac[2] = cpu_to_be16(((u16 *)smac)[2]);
+ req.stats_ctx = cpu_to_le16(CMDQ_ADD_GID_STATS_CTX_STATS_CTX_VALID |
+ (u16)stats_ctx_id);
+
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
sizeof(resp), 0);
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
@@ -397,46 +400,6 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
return 0;
}
-int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
- struct bnxt_qplib_gid *gid, u16 gid_idx,
- const u8 *smac)
-{
- struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl,
- struct bnxt_qplib_res,
- sgid_tbl);
- struct bnxt_qplib_rcfw *rcfw = res->rcfw;
- struct creq_modify_gid_resp resp = {};
- struct bnxt_qplib_cmdqmsg msg = {};
- struct cmdq_modify_gid req = {};
- int rc;
-
- bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
- CMDQ_BASE_OPCODE_MODIFY_GID,
- sizeof(req));
-
- req.gid[0] = cpu_to_be32(((u32 *)gid->data)[3]);
- req.gid[1] = cpu_to_be32(((u32 *)gid->data)[2]);
- req.gid[2] = cpu_to_be32(((u32 *)gid->data)[1]);
- req.gid[3] = cpu_to_be32(((u32 *)gid->data)[0]);
- if (res->prio) {
- req.vlan |= cpu_to_le16
- (CMDQ_ADD_GID_VLAN_TPID_TPID_8100 |
- CMDQ_ADD_GID_VLAN_VLAN_EN);
- }
-
- /* MAC in network format */
- req.src_mac[0] = cpu_to_be16(((u16 *)smac)[0]);
- req.src_mac[1] = cpu_to_be16(((u16 *)smac)[1]);
- req.src_mac[2] = cpu_to_be16(((u16 *)smac)[2]);
-
- req.gid_index = cpu_to_le16(gid_idx);
-
- bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
- sizeof(resp), 0);
- rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
- return rc;
-}
-
/* AH */
int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah,
bool block)
@@ -1143,3 +1106,40 @@ out:
dma_free_coherent(&rcfw->pdev->dev, sbuf.size, sbuf.sb, sbuf.dma_addr);
return rc;
}
+
+int bnxt_qplib_create_flow(struct bnxt_qplib_res *res)
+{
+ struct creq_roce_mirror_cfg_resp resp = {};
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct cmdq_roce_mirror_cfg req = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
+ CMDQ_BASE_OPCODE_ROCE_MIRROR_CFG,
+ sizeof(req));
+
+ req.mirror_flags = (u8)CMDQ_ROCE_MIRROR_CFG_MIRROR_ENABLE;
+
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
+ sizeof(resp), 0);
+ return bnxt_qplib_rcfw_send_message(rcfw, &msg);
+}
+
+int bnxt_qplib_destroy_flow(struct bnxt_qplib_res *res)
+{
+ struct creq_roce_mirror_cfg_resp resp = {};
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct cmdq_roce_mirror_cfg req = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
+ CMDQ_BASE_OPCODE_ROCE_MIRROR_CFG,
+ sizeof(req));
+
+ req.mirror_flags &= ~((u8)CMDQ_ROCE_MIRROR_CFG_MIRROR_ENABLE);
+
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
+ sizeof(resp), 0);
+
+ return bnxt_qplib_rcfw_send_message(rcfw, &msg);
+}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
index 09faf4a1e849..147b5d9c0313 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
@@ -323,7 +323,8 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
struct bnxt_qplib_gid *gid, u16 vlan_id, bool update);
int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
struct bnxt_qplib_gid *gid, const u8 *mac, u16 vlan_id,
- bool update, u32 *index);
+ bool update, u32 *index,
+ bool is_ugid, u32 stats_ctx_id);
int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
struct bnxt_qplib_gid *gid, u16 gid_idx,
const u8 *smac);
@@ -358,6 +359,9 @@ int bnxt_qplib_read_context(struct bnxt_qplib_rcfw *rcfw, u8 type, u32 xid,
u32 resp_size, void *resp_va);
int bnxt_qplib_query_cc_param(struct bnxt_qplib_res *res,
struct bnxt_qplib_cc_param *cc_param);
+void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw);
+int bnxt_qplib_create_flow(struct bnxt_qplib_res *res);
+int bnxt_qplib_destroy_flow(struct bnxt_qplib_res *res);
#define BNXT_VAR_MAX_WQE 4352
#define BNXT_VAR_MAX_SLOT_ALIGN 256
diff --git a/drivers/infiniband/hw/bnxt_re/roce_hsi.h b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
index 024845f945ff..99ecd72e72e2 100644
--- a/drivers/infiniband/hw/bnxt_re/roce_hsi.h
+++ b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
@@ -144,7 +144,8 @@ struct cmdq_base {
#define CMDQ_BASE_OPCODE_MODIFY_CQ 0x90UL
#define CMDQ_BASE_OPCODE_QUERY_QP_EXTEND 0x91UL
#define CMDQ_BASE_OPCODE_QUERY_ROCE_STATS_EXT 0x92UL
- #define CMDQ_BASE_OPCODE_LAST CMDQ_BASE_OPCODE_QUERY_ROCE_STATS_EXT
+ #define CMDQ_BASE_OPCODE_ROCE_MIRROR_CFG 0x99UL
+ #define CMDQ_BASE_OPCODE_LAST CMDQ_BASE_OPCODE_ROCE_MIRROR_CFG
u8 cmd_size;
__le16 flags;
__le16 cookie;
@@ -218,6 +219,7 @@ struct cmdq_initialize_fw {
#define CMDQ_INITIALIZE_FW_FLAGS_HW_REQUESTER_RETX_SUPPORTED 0x2UL
#define CMDQ_INITIALIZE_FW_FLAGS_OPTIMIZE_MODIFY_QP_SUPPORTED 0x8UL
#define CMDQ_INITIALIZE_FW_FLAGS_L2_VF_RESOURCE_MGMT 0x10UL
+ #define CMDQ_INITIALIZE_FW_FLAGS_MIRROR_ON_ROCE_SUPPORTED 0x80UL
__le16 cookie;
u8 resp_size;
u8 reserved8;
@@ -788,7 +790,8 @@ struct creq_query_qp_resp_sb {
#define CREQ_QUERY_QP_RESP_SB_ACCESS_REMOTE_ATOMIC 0x8UL
__le16 pkey;
__le32 qkey;
- __le32 reserved32;
+ __le16 udp_src_port;
+ __le16 reserved16;
__le32 dgid[4];
__le32 flow_label;
__le16 sgid_index;
@@ -2108,6 +2111,43 @@ struct creq_query_roce_stats_ext_resp_sb {
__le64 dup_req;
};
+/* cmdq_roce_mirror_cfg (size:192b/24B) */
+struct cmdq_roce_mirror_cfg {
+ u8 opcode;
+ #define CMDQ_ROCE_MIRROR_CFG_OPCODE_ROCE_MIRROR_CFG 0x99UL
+ #define CMDQ_ROCE_MIRROR_CFG_OPCODE_LAST \
+ CMDQ_ROCE_MIRROR_CFG_OPCODE_ROCE_MIRROR_CFG
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ u8 mirror_flags;
+ #define CMDQ_ROCE_MIRROR_CFG_MIRROR_ENABLE 0x1UL
+ u8 rsvd[7];
+};
+
+/* creq_roce_mirror_cfg_resp (size:128b/16B) */
+struct creq_roce_mirror_cfg_resp {
+ u8 type;
+ #define CREQ_ROCE_MIRROR_CFG_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_ROCE_MIRROR_CFG_RESP_TYPE_SFT 0
+ #define CREQ_ROCE_MIRROR_CFG_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_ROCE_MIRROR_CFG_RESP_TYPE_LAST \
+ CREQ_ROCE_MIRROR_CFG_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 reserved32;
+ u8 v;
+ #define CREQ_ROCE_MIRROR_CFG_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_ROCE_MIRROR_CFG_RESP_EVENT_ROCE_MIRROR_CFG 0x99UL
+ #define CREQ_ROCE_MIRROR_CFG_RESP_EVENT_LAST \
+ CREQ_ROCE_MIRROR_CFG_RESP_EVENT_ROCE_MIRROR_CFG
+ u8 reserved48[6];
+};
+
/* cmdq_query_func (size:128b/16B) */
struct cmdq_query_func {
u8 opcode;
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index b67747ae6a68..d892f55febe2 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -1228,9 +1228,8 @@ static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
if (!ctx->dev) {
ctx->dev = c4iw_alloc(&ctx->lldi);
if (IS_ERR(ctx->dev)) {
- pr_err("%s: initialization failed: %ld\n",
- pci_name(ctx->lldi.pdev),
- PTR_ERR(ctx->dev));
+ pr_err("%s: initialization failed: %pe\n",
+ pci_name(ctx->lldi.pdev), ctx->dev);
ctx->dev = NULL;
break;
}
diff --git a/drivers/infiniband/hw/efa/efa_com.c b/drivers/infiniband/hw/efa/efa_com.c
index bafd210dd43e..0e979ca10d24 100644
--- a/drivers/infiniband/hw/efa/efa_com.c
+++ b/drivers/infiniband/hw/efa/efa_com.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
/*
- * Copyright 2018-2024 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2025 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#include "efa_com.h"
@@ -30,6 +30,7 @@ struct efa_comp_ctx {
struct efa_admin_acq_entry *user_cqe;
u32 comp_size;
enum efa_cmd_status status;
+ u16 cmd_id;
u8 cmd_opcode;
u8 occupied;
};
@@ -333,6 +334,7 @@ static struct efa_comp_ctx *__efa_com_submit_admin_cmd(struct efa_com_admin_queu
comp_ctx->comp_size = comp_size_in_bytes;
comp_ctx->user_cqe = comp;
comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
+ comp_ctx->cmd_id = cmd_id;
reinit_completion(&comp_ctx->wait_event);
@@ -557,17 +559,19 @@ static int efa_com_wait_and_process_admin_cq_interrupts(struct efa_comp_ctx *com
if (comp_ctx->status == EFA_CMD_COMPLETED)
ibdev_err_ratelimited(
aq->efa_dev,
- "The device sent a completion but the driver didn't receive any MSI-X interrupt for admin cmd %s(%d) status %d (ctx: 0x%p, sq producer: %d, sq consumer: %d, cq consumer: %d)\n",
+ "The device sent a completion but the driver didn't receive any MSI-X interrupt for admin cmd %s(%d) status %d (id: %d, sq producer: %d, sq consumer: %d, cq consumer: %d)\n",
efa_com_cmd_str(comp_ctx->cmd_opcode),
comp_ctx->cmd_opcode, comp_ctx->status,
- comp_ctx, aq->sq.pc, aq->sq.cc, aq->cq.cc);
+ comp_ctx->cmd_id, aq->sq.pc, aq->sq.cc,
+ aq->cq.cc);
else
ibdev_err_ratelimited(
aq->efa_dev,
- "The device didn't send any completion for admin cmd %s(%d) status %d (ctx 0x%p, sq producer: %d, sq consumer: %d, cq consumer: %d)\n",
+ "The device didn't send any completion for admin cmd %s(%d) status %d (id: %d, sq producer: %d, sq consumer: %d, cq consumer: %d)\n",
efa_com_cmd_str(comp_ctx->cmd_opcode),
comp_ctx->cmd_opcode, comp_ctx->status,
- comp_ctx, aq->sq.pc, aq->sq.cc, aq->cq.cc);
+ comp_ctx->cmd_id, aq->sq.pc, aq->sq.cc,
+ aq->cq.cc);
clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state);
err = -ETIME;
@@ -631,9 +635,9 @@ int efa_com_cmd_exec(struct efa_com_admin_queue *aq,
if (IS_ERR(comp_ctx)) {
ibdev_err_ratelimited(
aq->efa_dev,
- "Failed to submit command %s (opcode %u) err %ld\n",
+ "Failed to submit command %s (opcode %u) err %pe\n",
efa_com_cmd_str(cmd->aq_common_descriptor.opcode),
- cmd->aq_common_descriptor.opcode, PTR_ERR(comp_ctx));
+ cmd->aq_common_descriptor.opcode, comp_ctx);
up(&aq->avail_cmds);
atomic64_inc(&aq->stats.cmd_err);
diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
index 886923d5fe50..d9a12681f843 100644
--- a/drivers/infiniband/hw/efa/efa_verbs.c
+++ b/drivers/infiniband/hw/efa/efa_verbs.c
@@ -1788,7 +1788,8 @@ struct ib_mr *efa_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start,
access_flags);
if (IS_ERR(umem_dmabuf)) {
err = PTR_ERR(umem_dmabuf);
- ibdev_dbg(&dev->ibdev, "Failed to get dmabuf umem[%d]\n", err);
+ ibdev_dbg(&dev->ibdev, "Failed to get dmabuf umem[%pe]\n",
+ umem_dmabuf);
goto err_free;
}
@@ -1832,7 +1833,8 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
if (IS_ERR(mr->umem)) {
err = PTR_ERR(mr->umem);
ibdev_dbg(&dev->ibdev,
- "Failed to pin and map user space memory[%d]\n", err);
+ "Failed to pin and map user space memory[%pe]\n",
+ mr->umem);
goto err_free;
}
diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.c b/drivers/infiniband/hw/erdma/erdma_verbs.c
index 94c211df09d8..109a3f3de911 100644
--- a/drivers/infiniband/hw/erdma/erdma_verbs.c
+++ b/drivers/infiniband/hw/erdma/erdma_verbs.c
@@ -149,7 +149,7 @@ static int regmr_cmd(struct erdma_dev *dev, struct erdma_mr *mr)
req.phy_addr[0] = mr->mem.mtt->buf_dma;
mtt_level = ERDMA_MR_MTT_1LEVEL;
} else {
- req.phy_addr[0] = sg_dma_address(mr->mem.mtt->sglist);
+ req.phy_addr[0] = mr->mem.mtt->dma_addrs[0];
mtt_level = mr->mem.mtt->level;
}
} else if (mr->type != ERDMA_MR_TYPE_DMA) {
@@ -626,18 +626,27 @@ err_free_mtt:
return ERR_PTR(-ENOMEM);
}
-static void erdma_destroy_mtt_buf_sg(struct erdma_dev *dev,
- struct erdma_mtt *mtt)
+static void erdma_unmap_page_list(struct erdma_dev *dev, dma_addr_t *pg_dma,
+ u32 npages)
{
- dma_unmap_sg(&dev->pdev->dev, mtt->sglist,
- DIV_ROUND_UP(mtt->size, PAGE_SIZE), DMA_TO_DEVICE);
- vfree(mtt->sglist);
+ u32 i;
+
+ for (i = 0; i < npages; i++)
+ dma_unmap_page(&dev->pdev->dev, pg_dma[i], PAGE_SIZE,
+ DMA_TO_DEVICE);
+}
+
+static void erdma_destroy_mtt_buf_dma_addrs(struct erdma_dev *dev,
+ struct erdma_mtt *mtt)
+{
+ erdma_unmap_page_list(dev, mtt->dma_addrs, mtt->npages);
+ vfree(mtt->dma_addrs);
}
static void erdma_destroy_scatter_mtt(struct erdma_dev *dev,
struct erdma_mtt *mtt)
{
- erdma_destroy_mtt_buf_sg(dev, mtt);
+ erdma_destroy_mtt_buf_dma_addrs(dev, mtt);
vfree(mtt->buf);
kfree(mtt);
}
@@ -645,50 +654,69 @@ static void erdma_destroy_scatter_mtt(struct erdma_dev *dev,
static void erdma_init_middle_mtt(struct erdma_mtt *mtt,
struct erdma_mtt *low_mtt)
{
- struct scatterlist *sg;
- u32 idx = 0, i;
+ dma_addr_t *pg_addr = mtt->buf;
+ u32 i;
- for_each_sg(low_mtt->sglist, sg, low_mtt->nsg, i)
- mtt->buf[idx++] = sg_dma_address(sg);
+ for (i = 0; i < low_mtt->npages; i++)
+ pg_addr[i] = low_mtt->dma_addrs[i];
}
-static int erdma_create_mtt_buf_sg(struct erdma_dev *dev, struct erdma_mtt *mtt)
+static u32 vmalloc_to_dma_addrs(struct erdma_dev *dev, dma_addr_t **dma_addrs,
+ void *buf, u64 len)
{
- struct scatterlist *sglist;
- void *buf = mtt->buf;
- u32 npages, i, nsg;
+ dma_addr_t *pg_dma;
struct page *pg;
+ u32 npages, i;
+ void *addr;
- /* Failed if buf is not page aligned */
- if ((uintptr_t)buf & ~PAGE_MASK)
- return -EINVAL;
-
- npages = DIV_ROUND_UP(mtt->size, PAGE_SIZE);
- sglist = vzalloc(npages * sizeof(*sglist));
- if (!sglist)
- return -ENOMEM;
+ npages = (PAGE_ALIGN((u64)buf + len) - PAGE_ALIGN_DOWN((u64)buf)) >>
+ PAGE_SHIFT;
+ pg_dma = vcalloc(npages, sizeof(*pg_dma));
+ if (!pg_dma)
+ return 0;
- sg_init_table(sglist, npages);
+ addr = buf;
for (i = 0; i < npages; i++) {
- pg = vmalloc_to_page(buf);
+ pg = vmalloc_to_page(addr);
if (!pg)
goto err;
- sg_set_page(&sglist[i], pg, PAGE_SIZE, 0);
- buf += PAGE_SIZE;
+
+ pg_dma[i] = dma_map_page(&dev->pdev->dev, pg, 0, PAGE_SIZE,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&dev->pdev->dev, pg_dma[i]))
+ goto err;
+
+ addr += PAGE_SIZE;
}
- nsg = dma_map_sg(&dev->pdev->dev, sglist, npages, DMA_TO_DEVICE);
- if (!nsg)
- goto err;
+ *dma_addrs = pg_dma;
- mtt->sglist = sglist;
- mtt->nsg = nsg;
+ return npages;
+err:
+ erdma_unmap_page_list(dev, pg_dma, i);
+ vfree(pg_dma);
return 0;
-err:
- vfree(sglist);
+}
- return -ENOMEM;
+static int erdma_create_mtt_buf_dma_addrs(struct erdma_dev *dev,
+ struct erdma_mtt *mtt)
+{
+ dma_addr_t *addrs;
+ u32 npages;
+
+ /* Failed if buf is not page aligned */
+ if ((uintptr_t)mtt->buf & ~PAGE_MASK)
+ return -EINVAL;
+
+ npages = vmalloc_to_dma_addrs(dev, &addrs, mtt->buf, mtt->size);
+ if (!npages)
+ return -ENOMEM;
+
+ mtt->dma_addrs = addrs;
+ mtt->npages = npages;
+
+ return 0;
}
static struct erdma_mtt *erdma_create_scatter_mtt(struct erdma_dev *dev,
@@ -707,12 +735,12 @@ static struct erdma_mtt *erdma_create_scatter_mtt(struct erdma_dev *dev,
if (!mtt->buf)
goto err_free_mtt;
- ret = erdma_create_mtt_buf_sg(dev, mtt);
+ ret = erdma_create_mtt_buf_dma_addrs(dev, mtt);
if (ret)
goto err_free_mtt_buf;
- ibdev_dbg(&dev->ibdev, "create scatter mtt, size:%lu, nsg:%u\n",
- mtt->size, mtt->nsg);
+ ibdev_dbg(&dev->ibdev, "create scatter mtt, size:%lu, npages:%u\n",
+ mtt->size, mtt->npages);
return mtt;
@@ -746,8 +774,8 @@ static struct erdma_mtt *erdma_create_mtt(struct erdma_dev *dev, size_t size,
level = 1;
/* convergence the mtt table. */
- while (mtt->nsg != 1 && level <= 3) {
- tmp_mtt = erdma_create_scatter_mtt(dev, MTT_SIZE(mtt->nsg));
+ while (mtt->npages != 1 && level <= 3) {
+ tmp_mtt = erdma_create_scatter_mtt(dev, MTT_SIZE(mtt->npages));
if (IS_ERR(tmp_mtt)) {
ret = PTR_ERR(tmp_mtt);
goto err_free_mtt;
@@ -765,7 +793,7 @@ static struct erdma_mtt *erdma_create_mtt(struct erdma_dev *dev, size_t size,
mtt->level = level;
ibdev_dbg(&dev->ibdev, "top mtt: level:%d, dma_addr 0x%llx\n",
- mtt->level, mtt->sglist[0].dma_address);
+ mtt->level, mtt->dma_addrs[0]);
return mtt;
err_free_mtt:
@@ -994,6 +1022,8 @@ int erdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
old_entry = xa_store(&dev->qp_xa, 1, qp, GFP_KERNEL);
if (xa_is_err(old_entry))
ret = xa_err(old_entry);
+ else
+ qp->ibqp.qp_num = 1;
} else {
ret = xa_alloc_cyclic(&dev->qp_xa, &qp->ibqp.qp_num, qp,
XA_LIMIT(1, dev->attrs.max_qp - 1),
@@ -1031,7 +1061,9 @@ int erdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
if (ret)
goto err_out_cmd;
} else {
- init_kernel_qp(dev, qp, attrs);
+ ret = init_kernel_qp(dev, qp, attrs);
+ if (ret)
+ goto err_out_xa;
}
qp->attrs.max_send_sge = attrs->cap.max_send_sge;
diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.h b/drivers/infiniband/hw/erdma/erdma_verbs.h
index ef411b81fbd7..7d8d3fe501d5 100644
--- a/drivers/infiniband/hw/erdma/erdma_verbs.h
+++ b/drivers/infiniband/hw/erdma/erdma_verbs.h
@@ -99,8 +99,8 @@ struct erdma_mtt {
union {
dma_addr_t buf_dma;
struct {
- struct scatterlist *sglist;
- u32 nsg;
+ dma_addr_t *dma_addrs;
+ u32 npages;
u32 level;
};
};
diff --git a/drivers/infiniband/hw/hfi1/device.c b/drivers/infiniband/hw/hfi1/device.c
index 4250d077b06f..a98a4175e53b 100644
--- a/drivers/infiniband/hw/hfi1/device.c
+++ b/drivers/infiniband/hw/hfi1/device.c
@@ -64,9 +64,9 @@ int hfi1_cdev_init(int minor, const char *name,
if (IS_ERR(device)) {
ret = PTR_ERR(device);
+ pr_err("Could not create device for minor %d, %s (err %pe)\n",
+ minor, name, device);
device = NULL;
- pr_err("Could not create device for minor %d, %s (err %d)\n",
- minor, name, -ret);
cdev_del(cdev);
}
done:
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index 719b7c34e238..5cfa4f8fbf3d 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -990,7 +990,7 @@ ssize_t sdma_set_cpu_to_sde_map(struct sdma_engine *sde, const char *buf,
}
/* Clean up old mappings */
- for_each_cpu(cpu, cpu_online_mask) {
+ for_each_online_cpu(cpu) {
struct sdma_rht_node *rht_node;
/* Don't cleanup sdes that are set in the new mask */
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
index b72625283fcf..9b1aece1b080 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.c
+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
@@ -498,8 +498,8 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
ntids, sizeof(*req->tids));
if (IS_ERR(tmp)) {
ret = PTR_ERR(tmp);
- SDMA_DBG(req, "Failed to copy %d TIDs (%d)",
- ntids, ret);
+ SDMA_DBG(req, "Failed to copy %d TIDs (%pe)", ntids,
+ tmp);
goto free_req;
}
req->tids = tmp;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 64bca08f3f1a..f82bdd46a917 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -3043,7 +3043,7 @@ static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev)
if (!hr_dev->is_vf)
hns_roce_free_link_table(hr_dev);
- if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP09)
+ if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
free_dip_entry(hr_dev);
}
@@ -5476,7 +5476,7 @@ out:
return ret;
}
-static int hns_roce_v2_query_sccc(struct hns_roce_dev *hr_dev, u32 qpn,
+static int hns_roce_v2_query_sccc(struct hns_roce_dev *hr_dev, u32 sccn,
void *buffer)
{
struct hns_roce_v2_scc_context *context;
@@ -5488,7 +5488,7 @@ static int hns_roce_v2_query_sccc(struct hns_roce_dev *hr_dev, u32 qpn,
return PTR_ERR(mailbox);
ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_SCCC,
- qpn);
+ sccn);
if (ret)
goto out;
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index 0f037e545520..31cb8699e198 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -594,8 +594,8 @@ static int mtr_alloc_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
mtr->umem = ib_umem_get(ibdev, user_addr, total_size,
buf_attr->user_access);
if (IS_ERR(mtr->umem)) {
- ibdev_err(ibdev, "failed to get umem, ret = %ld.\n",
- PTR_ERR(mtr->umem));
+ ibdev_err(ibdev, "failed to get umem, ret = %pe.\n",
+ mtr->umem);
return -ENOMEM;
}
} else {
@@ -605,8 +605,8 @@ static int mtr_alloc_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
!mtr_has_mtt(buf_attr) ?
HNS_ROCE_BUF_DIRECT : 0);
if (IS_ERR(mtr->kmem)) {
- ibdev_err(ibdev, "failed to alloc kmem, ret = %ld.\n",
- PTR_ERR(mtr->kmem));
+ ibdev_err(ibdev, "failed to alloc kmem, ret = %pe.\n",
+ mtr->kmem);
return PTR_ERR(mtr->kmem);
}
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_restrack.c b/drivers/infiniband/hw/hns/hns_roce_restrack.c
index f637b73b946e..230187dda6a0 100644
--- a/drivers/infiniband/hw/hns/hns_roce_restrack.c
+++ b/drivers/infiniband/hw/hns/hns_roce_restrack.c
@@ -100,6 +100,7 @@ int hns_roce_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ib_qp)
struct hns_roce_v2_qp_context qpc;
struct hns_roce_v2_scc_context sccc;
} context = {};
+ u32 sccn = hr_qp->qpn;
int ret;
if (!hr_dev->hw->query_qpc)
@@ -116,7 +117,13 @@ int hns_roce_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ib_qp)
!hr_dev->hw->query_sccc)
goto out;
- ret = hr_dev->hw->query_sccc(hr_dev, hr_qp->qpn, &context.sccc);
+ if (hr_qp->cong_type == CONG_TYPE_DIP) {
+ if (!hr_qp->dip)
+ goto out;
+ sccn = hr_qp->dip->dip_idx;
+ }
+
+ ret = hr_dev->hw->query_sccc(hr_dev, sccn, &context.sccc);
if (ret)
ibdev_warn_ratelimited(&hr_dev->ib_dev,
"failed to query SCCC, ret = %d.\n",
diff --git a/drivers/infiniband/hw/ionic/Kconfig b/drivers/infiniband/hw/ionic/Kconfig
new file mode 100644
index 000000000000..de6f10e9b6e9
--- /dev/null
+++ b/drivers/infiniband/hw/ionic/Kconfig
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (C) 2018-2025, Advanced Micro Devices, Inc.
+
+config INFINIBAND_IONIC
+ tristate "AMD Pensando DSC RDMA/RoCE Support"
+ depends on NETDEVICES && ETHERNET && PCI && INET && IONIC
+ help
+ This enables RDMA/RoCE support for the AMD Pensando family of
+ Distributed Services Cards (DSCs).
+
+ To learn more, visit our website at
+ <https://www.amd.com/en/products/accelerators/pensando.html>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called ionic_rdma.
diff --git a/drivers/infiniband/hw/ionic/Makefile b/drivers/infiniband/hw/ionic/Makefile
new file mode 100644
index 000000000000..957973742820
--- /dev/null
+++ b/drivers/infiniband/hw/ionic/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y := -I $(srctree)/drivers/net/ethernet/pensando/ionic
+
+obj-$(CONFIG_INFINIBAND_IONIC) += ionic_rdma.o
+
+ionic_rdma-y := \
+ ionic_ibdev.o ionic_lif_cfg.o ionic_queue.o ionic_pgtbl.o ionic_admin.o \
+ ionic_controlpath.o ionic_datapath.o ionic_hw_stats.o
diff --git a/drivers/infiniband/hw/ionic/ionic_admin.c b/drivers/infiniband/hw/ionic/ionic_admin.c
new file mode 100644
index 000000000000..2537aa55d12d
--- /dev/null
+++ b/drivers/infiniband/hw/ionic/ionic_admin.c
@@ -0,0 +1,1229 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+
+#include "ionic_fw.h"
+#include "ionic_ibdev.h"
+
+#define IONIC_EQ_COUNT_MIN 4
+#define IONIC_AQ_COUNT_MIN 1
+
+/* not a valid queue position or negative error status */
+#define IONIC_ADMIN_POSTED 0x10000
+
+/* cpu can be held with irq disabled for COUNT * MS (for create/destroy_ah) */
+#define IONIC_ADMIN_BUSY_RETRY_COUNT 2000
+#define IONIC_ADMIN_BUSY_RETRY_MS 1
+
+/* admin queue will be considered failed if a command takes longer */
+#define IONIC_ADMIN_TIMEOUT (HZ * 2)
+#define IONIC_ADMIN_WARN (HZ / 8)
+
+/* will poll for admin cq to tolerate and report from missed event */
+#define IONIC_ADMIN_DELAY (HZ / 8)
+
+/* work queue for polling the event queue and admin cq */
+struct workqueue_struct *ionic_evt_workq;
+
+static void ionic_admin_timedout(struct ionic_aq *aq)
+{
+ struct ionic_ibdev *dev = aq->dev;
+ unsigned long irqflags;
+ u16 pos;
+
+ spin_lock_irqsave(&aq->lock, irqflags);
+ if (ionic_queue_empty(&aq->q))
+ goto out;
+
+ /* Reset ALL adminq if any one times out */
+ if (atomic_read(&aq->admin_state) < IONIC_ADMIN_KILLED)
+ queue_work(ionic_evt_workq, &dev->reset_work);
+
+ ibdev_err(&dev->ibdev, "admin command timed out, aq %d after: %ums\n",
+ aq->aqid, (u32)jiffies_to_msecs(jiffies - aq->stamp));
+
+ pos = (aq->q.prod - 1) & aq->q.mask;
+ if (pos == aq->q.cons)
+ goto out;
+
+ ibdev_warn(&dev->ibdev, "admin pos %u (last posted)\n", pos);
+ print_hex_dump(KERN_WARNING, "cmd ", DUMP_PREFIX_OFFSET, 16, 1,
+ ionic_queue_at(&aq->q, pos),
+ BIT(aq->q.stride_log2), true);
+
+out:
+ spin_unlock_irqrestore(&aq->lock, irqflags);
+}
+
+static void ionic_admin_reset_dwork(struct ionic_ibdev *dev)
+{
+ if (atomic_read(&dev->admin_state) == IONIC_ADMIN_KILLED)
+ return;
+
+ queue_delayed_work(ionic_evt_workq, &dev->admin_dwork,
+ IONIC_ADMIN_DELAY);
+}
+
+static void ionic_admin_reset_wdog(struct ionic_aq *aq)
+{
+ if (atomic_read(&aq->admin_state) == IONIC_ADMIN_KILLED)
+ return;
+
+ aq->stamp = jiffies;
+ ionic_admin_reset_dwork(aq->dev);
+}
+
+static bool ionic_admin_next_cqe(struct ionic_ibdev *dev, struct ionic_cq *cq,
+ struct ionic_v1_cqe **cqe)
+{
+ struct ionic_v1_cqe *qcqe = ionic_queue_at_prod(&cq->q);
+
+ if (unlikely(cq->color != ionic_v1_cqe_color(qcqe)))
+ return false;
+
+ /* Prevent out-of-order reads of the CQE */
+ dma_rmb();
+ *cqe = qcqe;
+
+ return true;
+}
+
+static void ionic_admin_poll_locked(struct ionic_aq *aq)
+{
+ struct ionic_cq *cq = &aq->vcq->cq[0];
+ struct ionic_admin_wr *wr, *wr_next;
+ struct ionic_ibdev *dev = aq->dev;
+ u32 wr_strides, avlbl_strides;
+ struct ionic_v1_cqe *cqe;
+ u32 qtf, qid;
+ u16 old_prod;
+ u8 type;
+
+ lockdep_assert_held(&aq->lock);
+
+ if (atomic_read(&aq->admin_state) == IONIC_ADMIN_KILLED) {
+ list_for_each_entry_safe(wr, wr_next, &aq->wr_prod, aq_ent) {
+ INIT_LIST_HEAD(&wr->aq_ent);
+ aq->q_wr[wr->status].wr = NULL;
+ wr->status = atomic_read(&aq->admin_state);
+ complete_all(&wr->work);
+ }
+ INIT_LIST_HEAD(&aq->wr_prod);
+
+ list_for_each_entry_safe(wr, wr_next, &aq->wr_post, aq_ent) {
+ INIT_LIST_HEAD(&wr->aq_ent);
+ wr->status = atomic_read(&aq->admin_state);
+ complete_all(&wr->work);
+ }
+ INIT_LIST_HEAD(&aq->wr_post);
+
+ return;
+ }
+
+ old_prod = cq->q.prod;
+
+ while (ionic_admin_next_cqe(dev, cq, &cqe)) {
+ qtf = ionic_v1_cqe_qtf(cqe);
+ qid = ionic_v1_cqe_qtf_qid(qtf);
+ type = ionic_v1_cqe_qtf_type(qtf);
+
+ if (unlikely(type != IONIC_V1_CQE_TYPE_ADMIN)) {
+ ibdev_warn_ratelimited(&dev->ibdev,
+ "bad cqe type %u\n", type);
+ goto cq_next;
+ }
+
+ if (unlikely(qid != aq->aqid)) {
+ ibdev_warn_ratelimited(&dev->ibdev,
+ "bad cqe qid %u\n", qid);
+ goto cq_next;
+ }
+
+ if (unlikely(be16_to_cpu(cqe->admin.cmd_idx) != aq->q.cons)) {
+ ibdev_warn_ratelimited(&dev->ibdev,
+ "bad idx %u cons %u qid %u\n",
+ be16_to_cpu(cqe->admin.cmd_idx),
+ aq->q.cons, qid);
+ goto cq_next;
+ }
+
+ if (unlikely(ionic_queue_empty(&aq->q))) {
+ ibdev_warn_ratelimited(&dev->ibdev,
+ "bad cqe for empty adminq\n");
+ goto cq_next;
+ }
+
+ wr = aq->q_wr[aq->q.cons].wr;
+ if (wr) {
+ aq->q_wr[aq->q.cons].wr = NULL;
+ list_del_init(&wr->aq_ent);
+
+ wr->cqe = *cqe;
+ wr->status = atomic_read(&aq->admin_state);
+ complete_all(&wr->work);
+ }
+
+ ionic_queue_consume_entries(&aq->q,
+ aq->q_wr[aq->q.cons].wqe_strides);
+
+cq_next:
+ ionic_queue_produce(&cq->q);
+ cq->color = ionic_color_wrap(cq->q.prod, cq->color);
+ }
+
+ if (old_prod != cq->q.prod) {
+ ionic_admin_reset_wdog(aq);
+ cq->q.cons = cq->q.prod;
+ ionic_dbell_ring(dev->lif_cfg.dbpage, dev->lif_cfg.cq_qtype,
+ ionic_queue_dbell_val(&cq->q));
+ queue_work(ionic_evt_workq, &aq->work);
+ } else if (!aq->armed) {
+ aq->armed = true;
+ cq->arm_any_prod = ionic_queue_next(&cq->q, cq->arm_any_prod);
+ ionic_dbell_ring(dev->lif_cfg.dbpage, dev->lif_cfg.cq_qtype,
+ cq->q.dbell | IONIC_CQ_RING_ARM |
+ cq->arm_any_prod);
+ queue_work(ionic_evt_workq, &aq->work);
+ }
+
+ if (atomic_read(&aq->admin_state) != IONIC_ADMIN_ACTIVE)
+ return;
+
+ old_prod = aq->q.prod;
+
+ if (ionic_queue_empty(&aq->q) && !list_empty(&aq->wr_post))
+ ionic_admin_reset_wdog(aq);
+
+ if (list_empty(&aq->wr_post))
+ return;
+
+ do {
+ u8 *src;
+ int i, src_len;
+ size_t stride_len;
+
+ wr = list_first_entry(&aq->wr_post, struct ionic_admin_wr,
+ aq_ent);
+ wr_strides = (le16_to_cpu(wr->wqe.len) + ADMIN_WQE_HDR_LEN +
+ (ADMIN_WQE_STRIDE - 1)) >> aq->q.stride_log2;
+ avlbl_strides = ionic_queue_length_remaining(&aq->q);
+
+ if (wr_strides > avlbl_strides)
+ break;
+
+ list_move(&wr->aq_ent, &aq->wr_prod);
+ wr->status = aq->q.prod;
+ aq->q_wr[aq->q.prod].wr = wr;
+ aq->q_wr[aq->q.prod].wqe_strides = wr_strides;
+
+ src_len = le16_to_cpu(wr->wqe.len);
+ src = (uint8_t *)&wr->wqe.cmd;
+
+ /* First stride */
+ memcpy(ionic_queue_at_prod(&aq->q), &wr->wqe,
+ ADMIN_WQE_HDR_LEN);
+ stride_len = ADMIN_WQE_STRIDE - ADMIN_WQE_HDR_LEN;
+ if (stride_len > src_len)
+ stride_len = src_len;
+ memcpy(ionic_queue_at_prod(&aq->q) + ADMIN_WQE_HDR_LEN,
+ src, stride_len);
+ ibdev_dbg(&dev->ibdev, "post admin prod %u (%u strides)\n",
+ aq->q.prod, wr_strides);
+ print_hex_dump_debug("wqe ", DUMP_PREFIX_OFFSET, 16, 1,
+ ionic_queue_at_prod(&aq->q),
+ BIT(aq->q.stride_log2), true);
+ ionic_queue_produce(&aq->q);
+
+ /* Remaining strides */
+ for (i = stride_len; i < src_len; i += stride_len) {
+ stride_len = ADMIN_WQE_STRIDE;
+
+ if (i + stride_len > src_len)
+ stride_len = src_len - i;
+
+ memcpy(ionic_queue_at_prod(&aq->q), src + i,
+ stride_len);
+ print_hex_dump_debug("wqe ", DUMP_PREFIX_OFFSET, 16, 1,
+ ionic_queue_at_prod(&aq->q),
+ BIT(aq->q.stride_log2), true);
+ ionic_queue_produce(&aq->q);
+ }
+ } while (!list_empty(&aq->wr_post));
+
+ if (old_prod != aq->q.prod)
+ ionic_dbell_ring(dev->lif_cfg.dbpage, dev->lif_cfg.aq_qtype,
+ ionic_queue_dbell_val(&aq->q));
+}
+
+static void ionic_admin_dwork(struct work_struct *ws)
+{
+ struct ionic_ibdev *dev =
+ container_of(ws, struct ionic_ibdev, admin_dwork.work);
+ struct ionic_aq *aq, *bad_aq = NULL;
+ bool do_reschedule = false;
+ unsigned long irqflags;
+ bool do_reset = false;
+ u16 pos;
+ int i;
+
+ for (i = 0; i < dev->lif_cfg.aq_count; i++) {
+ aq = dev->aq_vec[i];
+
+ spin_lock_irqsave(&aq->lock, irqflags);
+
+ if (ionic_queue_empty(&aq->q))
+ goto next_aq;
+
+ /* Reschedule if any queue has outstanding work */
+ do_reschedule = true;
+
+ if (time_is_after_eq_jiffies(aq->stamp + IONIC_ADMIN_WARN))
+ /* Warning threshold not met, nothing to do */
+ goto next_aq;
+
+ /* See if polling now makes some progress */
+ pos = aq->q.cons;
+ ionic_admin_poll_locked(aq);
+ if (pos != aq->q.cons) {
+ ibdev_dbg(&dev->ibdev,
+ "missed event for acq %d\n", aq->cqid);
+ goto next_aq;
+ }
+
+ if (time_is_after_eq_jiffies(aq->stamp +
+ IONIC_ADMIN_TIMEOUT)) {
+ /* Timeout threshold not met */
+ ibdev_dbg(&dev->ibdev, "no progress after %ums\n",
+ (u32)jiffies_to_msecs(jiffies - aq->stamp));
+ goto next_aq;
+ }
+
+ /* Queue timed out */
+ bad_aq = aq;
+ do_reset = true;
+next_aq:
+ spin_unlock_irqrestore(&aq->lock, irqflags);
+ }
+
+ if (do_reset)
+ /* Reset RDMA lif on a timeout */
+ ionic_admin_timedout(bad_aq);
+ else if (do_reschedule)
+ /* Try to poll again later */
+ ionic_admin_reset_dwork(dev);
+}
+
+static void ionic_admin_work(struct work_struct *ws)
+{
+ struct ionic_aq *aq = container_of(ws, struct ionic_aq, work);
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&aq->lock, irqflags);
+ ionic_admin_poll_locked(aq);
+ spin_unlock_irqrestore(&aq->lock, irqflags);
+}
+
+static void ionic_admin_post_aq(struct ionic_aq *aq, struct ionic_admin_wr *wr)
+{
+ unsigned long irqflags;
+ bool poll;
+
+ wr->status = IONIC_ADMIN_POSTED;
+ wr->aq = aq;
+
+ spin_lock_irqsave(&aq->lock, irqflags);
+ poll = list_empty(&aq->wr_post);
+ list_add(&wr->aq_ent, &aq->wr_post);
+ if (poll)
+ ionic_admin_poll_locked(aq);
+ spin_unlock_irqrestore(&aq->lock, irqflags);
+}
+
+void ionic_admin_post(struct ionic_ibdev *dev, struct ionic_admin_wr *wr)
+{
+ int aq_idx;
+
+ /* Use cpu id for the adminq selection */
+ aq_idx = raw_smp_processor_id() % dev->lif_cfg.aq_count;
+ ionic_admin_post_aq(dev->aq_vec[aq_idx], wr);
+}
+
+static void ionic_admin_cancel(struct ionic_admin_wr *wr)
+{
+ struct ionic_aq *aq = wr->aq;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&aq->lock, irqflags);
+
+ if (!list_empty(&wr->aq_ent)) {
+ list_del(&wr->aq_ent);
+ if (wr->status != IONIC_ADMIN_POSTED)
+ aq->q_wr[wr->status].wr = NULL;
+ }
+
+ spin_unlock_irqrestore(&aq->lock, irqflags);
+}
+
+static int ionic_admin_busy_wait(struct ionic_admin_wr *wr)
+{
+ struct ionic_aq *aq = wr->aq;
+ unsigned long irqflags;
+ int try_i;
+
+ for (try_i = 0; try_i < IONIC_ADMIN_BUSY_RETRY_COUNT; ++try_i) {
+ if (completion_done(&wr->work))
+ return 0;
+
+ mdelay(IONIC_ADMIN_BUSY_RETRY_MS);
+
+ spin_lock_irqsave(&aq->lock, irqflags);
+ ionic_admin_poll_locked(aq);
+ spin_unlock_irqrestore(&aq->lock, irqflags);
+ }
+
+ /*
+ * we timed out. Initiate RDMA LIF reset and indicate
+ * error to caller.
+ */
+ ionic_admin_timedout(aq);
+ return -ETIMEDOUT;
+}
+
+int ionic_admin_wait(struct ionic_ibdev *dev, struct ionic_admin_wr *wr,
+ enum ionic_admin_flags flags)
+{
+ int rc, timo;
+
+ if (flags & IONIC_ADMIN_F_BUSYWAIT) {
+ /* Spin */
+ rc = ionic_admin_busy_wait(wr);
+ } else if (flags & IONIC_ADMIN_F_INTERRUPT) {
+ /*
+ * Interruptible sleep, 1s timeout
+ * This is used for commands which are safe for the caller
+ * to clean up without killing and resetting the adminq.
+ */
+ timo = wait_for_completion_interruptible_timeout(&wr->work,
+ HZ);
+ if (timo > 0)
+ rc = 0;
+ else if (timo == 0)
+ rc = -ETIMEDOUT;
+ else
+ rc = timo;
+ } else {
+ /*
+ * Uninterruptible sleep
+ * This is used for commands which are NOT safe for the
+ * caller to clean up. Cleanup must be handled by the
+ * adminq kill and reset process so that host memory is
+ * not corrupted by the device.
+ */
+ wait_for_completion(&wr->work);
+ rc = 0;
+ }
+
+ if (rc) {
+ ibdev_warn(&dev->ibdev, "wait status %d\n", rc);
+ ionic_admin_cancel(wr);
+ } else if (wr->status == IONIC_ADMIN_KILLED) {
+ ibdev_dbg(&dev->ibdev, "admin killed\n");
+
+ /* No error if admin already killed during teardown */
+ rc = (flags & IONIC_ADMIN_F_TEARDOWN) ? 0 : -ENODEV;
+ } else if (ionic_v1_cqe_error(&wr->cqe)) {
+ ibdev_warn(&dev->ibdev, "opcode %u error %u\n",
+ wr->wqe.op,
+ be32_to_cpu(wr->cqe.status_length));
+ rc = -EINVAL;
+ }
+ return rc;
+}
+
+static int ionic_rdma_devcmd(struct ionic_ibdev *dev,
+ struct ionic_admin_ctx *admin)
+{
+ int rc;
+
+ rc = ionic_adminq_post_wait(dev->lif_cfg.lif, admin);
+ if (rc)
+ return rc;
+
+ return ionic_error_to_errno(admin->comp.comp.status);
+}
+
+int ionic_rdma_reset_devcmd(struct ionic_ibdev *dev)
+{
+ struct ionic_admin_ctx admin = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(admin.work),
+ .cmd.rdma_reset = {
+ .opcode = IONIC_CMD_RDMA_RESET_LIF,
+ .lif_index = cpu_to_le16(dev->lif_cfg.lif_index),
+ },
+ };
+
+ return ionic_rdma_devcmd(dev, &admin);
+}
+
+static int ionic_rdma_queue_devcmd(struct ionic_ibdev *dev,
+ struct ionic_queue *q,
+ u32 qid, u32 cid, u16 opcode)
+{
+ struct ionic_admin_ctx admin = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(admin.work),
+ .cmd.rdma_queue = {
+ .opcode = opcode,
+ .lif_index = cpu_to_le16(dev->lif_cfg.lif_index),
+ .qid_ver = cpu_to_le32(qid),
+ .cid = cpu_to_le32(cid),
+ .dbid = cpu_to_le16(dev->lif_cfg.dbid),
+ .depth_log2 = q->depth_log2,
+ .stride_log2 = q->stride_log2,
+ .dma_addr = cpu_to_le64(q->dma),
+ },
+ };
+
+ return ionic_rdma_devcmd(dev, &admin);
+}
+
+static void ionic_rdma_admincq_comp(struct ib_cq *ibcq, void *cq_context)
+{
+ struct ionic_aq *aq = cq_context;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&aq->lock, irqflags);
+ aq->armed = false;
+ if (atomic_read(&aq->admin_state) < IONIC_ADMIN_KILLED)
+ queue_work(ionic_evt_workq, &aq->work);
+ spin_unlock_irqrestore(&aq->lock, irqflags);
+}
+
+static void ionic_rdma_admincq_event(struct ib_event *event, void *cq_context)
+{
+ struct ionic_aq *aq = cq_context;
+
+ ibdev_err(&aq->dev->ibdev, "admincq event %d\n", event->event);
+}
+
+static struct ionic_vcq *ionic_create_rdma_admincq(struct ionic_ibdev *dev,
+ int comp_vector)
+{
+ struct ib_cq_init_attr attr = {
+ .cqe = IONIC_AQ_DEPTH,
+ .comp_vector = comp_vector,
+ };
+ struct ionic_tbl_buf buf = {};
+ struct ionic_vcq *vcq;
+ struct ionic_cq *cq;
+ int rc;
+
+ vcq = kzalloc(sizeof(*vcq), GFP_KERNEL);
+ if (!vcq)
+ return ERR_PTR(-ENOMEM);
+
+ vcq->ibcq.device = &dev->ibdev;
+ vcq->ibcq.comp_handler = ionic_rdma_admincq_comp;
+ vcq->ibcq.event_handler = ionic_rdma_admincq_event;
+ atomic_set(&vcq->ibcq.usecnt, 0);
+
+ vcq->udma_mask = 1;
+ cq = &vcq->cq[0];
+
+ rc = ionic_create_cq_common(vcq, &buf, &attr, NULL, NULL,
+ NULL, NULL, 0);
+ if (rc)
+ goto err_init;
+
+ rc = ionic_rdma_queue_devcmd(dev, &cq->q, cq->cqid, cq->eqid,
+ IONIC_CMD_RDMA_CREATE_CQ);
+ if (rc)
+ goto err_cmd;
+
+ return vcq;
+
+err_cmd:
+ ionic_destroy_cq_common(dev, cq);
+err_init:
+ kfree(vcq);
+
+ return ERR_PTR(rc);
+}
+
+static struct ionic_aq *__ionic_create_rdma_adminq(struct ionic_ibdev *dev,
+ u32 aqid, u32 cqid)
+{
+ struct ionic_aq *aq;
+ int rc;
+
+ aq = kzalloc(sizeof(*aq), GFP_KERNEL);
+ if (!aq)
+ return ERR_PTR(-ENOMEM);
+
+ atomic_set(&aq->admin_state, IONIC_ADMIN_KILLED);
+ aq->dev = dev;
+ aq->aqid = aqid;
+ aq->cqid = cqid;
+ spin_lock_init(&aq->lock);
+
+ rc = ionic_queue_init(&aq->q, dev->lif_cfg.hwdev, IONIC_EQ_DEPTH,
+ ADMIN_WQE_STRIDE);
+ if (rc)
+ goto err_q;
+
+ ionic_queue_dbell_init(&aq->q, aq->aqid);
+
+ aq->q_wr = kcalloc((u32)aq->q.mask + 1, sizeof(*aq->q_wr), GFP_KERNEL);
+ if (!aq->q_wr) {
+ rc = -ENOMEM;
+ goto err_wr;
+ }
+
+ INIT_LIST_HEAD(&aq->wr_prod);
+ INIT_LIST_HEAD(&aq->wr_post);
+
+ INIT_WORK(&aq->work, ionic_admin_work);
+ aq->armed = false;
+
+ return aq;
+
+err_wr:
+ ionic_queue_destroy(&aq->q, dev->lif_cfg.hwdev);
+err_q:
+ kfree(aq);
+
+ return ERR_PTR(rc);
+}
+
+static void __ionic_destroy_rdma_adminq(struct ionic_ibdev *dev,
+ struct ionic_aq *aq)
+{
+ kfree(aq->q_wr);
+ ionic_queue_destroy(&aq->q, dev->lif_cfg.hwdev);
+ kfree(aq);
+}
+
+static struct ionic_aq *ionic_create_rdma_adminq(struct ionic_ibdev *dev,
+ u32 aqid, u32 cqid)
+{
+ struct ionic_aq *aq;
+ int rc;
+
+ aq = __ionic_create_rdma_adminq(dev, aqid, cqid);
+ if (IS_ERR(aq))
+ return aq;
+
+ rc = ionic_rdma_queue_devcmd(dev, &aq->q, aq->aqid, aq->cqid,
+ IONIC_CMD_RDMA_CREATE_ADMINQ);
+ if (rc)
+ goto err_cmd;
+
+ return aq;
+
+err_cmd:
+ __ionic_destroy_rdma_adminq(dev, aq);
+
+ return ERR_PTR(rc);
+}
+
+static void ionic_flush_qs(struct ionic_ibdev *dev)
+{
+ struct ionic_qp *qp, *qp_tmp;
+ struct ionic_cq *cq, *cq_tmp;
+ LIST_HEAD(flush_list);
+ unsigned long index;
+
+ WARN_ON(!irqs_disabled());
+
+ /* Flush qp send and recv */
+ xa_lock(&dev->qp_tbl);
+ xa_for_each(&dev->qp_tbl, index, qp) {
+ kref_get(&qp->qp_kref);
+ list_add_tail(&qp->ibkill_flush_ent, &flush_list);
+ }
+ xa_unlock(&dev->qp_tbl);
+
+ list_for_each_entry_safe(qp, qp_tmp, &flush_list, ibkill_flush_ent) {
+ ionic_flush_qp(dev, qp);
+ kref_put(&qp->qp_kref, ionic_qp_complete);
+ list_del(&qp->ibkill_flush_ent);
+ }
+
+ /* Notify completions */
+ xa_lock(&dev->cq_tbl);
+ xa_for_each(&dev->cq_tbl, index, cq) {
+ kref_get(&cq->cq_kref);
+ list_add_tail(&cq->ibkill_flush_ent, &flush_list);
+ }
+ xa_unlock(&dev->cq_tbl);
+
+ list_for_each_entry_safe(cq, cq_tmp, &flush_list, ibkill_flush_ent) {
+ ionic_notify_flush_cq(cq);
+ kref_put(&cq->cq_kref, ionic_cq_complete);
+ list_del(&cq->ibkill_flush_ent);
+ }
+}
+
+static void ionic_kill_ibdev(struct ionic_ibdev *dev, bool fatal_path)
+{
+ unsigned long irqflags;
+ bool do_flush = false;
+ int i;
+
+ /* Mark AQs for drain and flush the QPs while irq is disabled */
+ local_irq_save(irqflags);
+
+ /* Mark the admin queue, flushing at most once */
+ for (i = 0; i < dev->lif_cfg.aq_count; i++) {
+ struct ionic_aq *aq = dev->aq_vec[i];
+
+ spin_lock(&aq->lock);
+ if (atomic_read(&aq->admin_state) != IONIC_ADMIN_KILLED) {
+ atomic_set(&aq->admin_state, IONIC_ADMIN_KILLED);
+ /* Flush incomplete admin commands */
+ ionic_admin_poll_locked(aq);
+ do_flush = true;
+ }
+ spin_unlock(&aq->lock);
+ }
+
+ if (do_flush)
+ ionic_flush_qs(dev);
+
+ local_irq_restore(irqflags);
+
+ /* Post a fatal event if requested */
+ if (fatal_path) {
+ struct ib_event ev;
+
+ ev.device = &dev->ibdev;
+ ev.element.port_num = 1;
+ ev.event = IB_EVENT_DEVICE_FATAL;
+
+ ib_dispatch_event(&ev);
+ }
+
+ atomic_set(&dev->admin_state, IONIC_ADMIN_KILLED);
+}
+
+void ionic_kill_rdma_admin(struct ionic_ibdev *dev, bool fatal_path)
+{
+ enum ionic_admin_state old_state;
+ unsigned long irqflags = 0;
+ int i, rc;
+
+ if (!dev->aq_vec)
+ return;
+
+ /*
+ * Admin queues are transitioned from active to paused to killed state.
+ * When in paused state, no new commands are issued to the device,
+ * nor are any completed locally. After resetting the lif, it will be
+ * safe to resume the rdma admin queues in the killed state. Commands
+ * will not be issued to the device, but will complete locally with status
+ * IONIC_ADMIN_KILLED. Handling completion will ensure that creating or
+ * modifying resources fails, but destroying resources succeeds.
+ * If there was a failure resetting the lif using this strategy,
+ * then the state of the device is unknown.
+ */
+ old_state = atomic_cmpxchg(&dev->admin_state, IONIC_ADMIN_ACTIVE,
+ IONIC_ADMIN_PAUSED);
+ if (old_state != IONIC_ADMIN_ACTIVE)
+ return;
+
+ /* Pause all the AQs */
+ local_irq_save(irqflags);
+ for (i = 0; i < dev->lif_cfg.aq_count; i++) {
+ struct ionic_aq *aq = dev->aq_vec[i];
+
+ spin_lock(&aq->lock);
+ /* pause rdma admin queues to reset lif */
+ if (atomic_read(&aq->admin_state) == IONIC_ADMIN_ACTIVE)
+ atomic_set(&aq->admin_state, IONIC_ADMIN_PAUSED);
+ spin_unlock(&aq->lock);
+ }
+ local_irq_restore(irqflags);
+
+ rc = ionic_rdma_reset_devcmd(dev);
+ if (unlikely(rc)) {
+ ibdev_err(&dev->ibdev, "failed to reset rdma %d\n", rc);
+ ionic_request_rdma_reset(dev->lif_cfg.lif);
+ }
+
+ ionic_kill_ibdev(dev, fatal_path);
+}
+
+static void ionic_reset_work(struct work_struct *ws)
+{
+ struct ionic_ibdev *dev =
+ container_of(ws, struct ionic_ibdev, reset_work);
+
+ ionic_kill_rdma_admin(dev, true);
+}
+
+static bool ionic_next_eqe(struct ionic_eq *eq, struct ionic_v1_eqe *eqe)
+{
+ struct ionic_v1_eqe *qeqe;
+ bool color;
+
+ qeqe = ionic_queue_at_prod(&eq->q);
+ color = ionic_v1_eqe_color(qeqe);
+
+ /* cons is color for eq */
+ if (eq->q.cons != color)
+ return false;
+
+ /* Prevent out-of-order reads of the EQE */
+ dma_rmb();
+
+ ibdev_dbg(&eq->dev->ibdev, "poll eq prod %u\n", eq->q.prod);
+ print_hex_dump_debug("eqe ", DUMP_PREFIX_OFFSET, 16, 1,
+ qeqe, BIT(eq->q.stride_log2), true);
+ *eqe = *qeqe;
+
+ return true;
+}
+
+static void ionic_cq_event(struct ionic_ibdev *dev, u32 cqid, u8 code)
+{
+ unsigned long irqflags;
+ struct ib_event ibev;
+ struct ionic_cq *cq;
+
+ xa_lock_irqsave(&dev->cq_tbl, irqflags);
+ cq = xa_load(&dev->cq_tbl, cqid);
+ if (cq)
+ kref_get(&cq->cq_kref);
+ xa_unlock_irqrestore(&dev->cq_tbl, irqflags);
+
+ if (!cq) {
+ ibdev_dbg(&dev->ibdev,
+ "missing cqid %#x code %u\n", cqid, code);
+ return;
+ }
+
+ switch (code) {
+ case IONIC_V1_EQE_CQ_NOTIFY:
+ if (cq->vcq->ibcq.comp_handler)
+ cq->vcq->ibcq.comp_handler(&cq->vcq->ibcq,
+ cq->vcq->ibcq.cq_context);
+ break;
+
+ case IONIC_V1_EQE_CQ_ERR:
+ if (cq->vcq->ibcq.event_handler) {
+ ibev.event = IB_EVENT_CQ_ERR;
+ ibev.device = &dev->ibdev;
+ ibev.element.cq = &cq->vcq->ibcq;
+
+ cq->vcq->ibcq.event_handler(&ibev,
+ cq->vcq->ibcq.cq_context);
+ }
+ break;
+
+ default:
+ ibdev_dbg(&dev->ibdev,
+ "unrecognized cqid %#x code %u\n", cqid, code);
+ break;
+ }
+
+ kref_put(&cq->cq_kref, ionic_cq_complete);
+}
+
+static void ionic_qp_event(struct ionic_ibdev *dev, u32 qpid, u8 code)
+{
+ unsigned long irqflags;
+ struct ib_event ibev;
+ struct ionic_qp *qp;
+
+ xa_lock_irqsave(&dev->qp_tbl, irqflags);
+ qp = xa_load(&dev->qp_tbl, qpid);
+ if (qp)
+ kref_get(&qp->qp_kref);
+ xa_unlock_irqrestore(&dev->qp_tbl, irqflags);
+
+ if (!qp) {
+ ibdev_dbg(&dev->ibdev,
+ "missing qpid %#x code %u\n", qpid, code);
+ return;
+ }
+
+ ibev.device = &dev->ibdev;
+ ibev.element.qp = &qp->ibqp;
+
+ switch (code) {
+ case IONIC_V1_EQE_SQ_DRAIN:
+ ibev.event = IB_EVENT_SQ_DRAINED;
+ break;
+
+ case IONIC_V1_EQE_QP_COMM_EST:
+ ibev.event = IB_EVENT_COMM_EST;
+ break;
+
+ case IONIC_V1_EQE_QP_LAST_WQE:
+ ibev.event = IB_EVENT_QP_LAST_WQE_REACHED;
+ break;
+
+ case IONIC_V1_EQE_QP_ERR:
+ ibev.event = IB_EVENT_QP_FATAL;
+ break;
+
+ case IONIC_V1_EQE_QP_ERR_REQUEST:
+ ibev.event = IB_EVENT_QP_REQ_ERR;
+ break;
+
+ case IONIC_V1_EQE_QP_ERR_ACCESS:
+ ibev.event = IB_EVENT_QP_ACCESS_ERR;
+ break;
+
+ default:
+ ibdev_dbg(&dev->ibdev,
+ "unrecognized qpid %#x code %u\n", qpid, code);
+ goto out;
+ }
+
+ if (qp->ibqp.event_handler)
+ qp->ibqp.event_handler(&ibev, qp->ibqp.qp_context);
+
+out:
+ kref_put(&qp->qp_kref, ionic_qp_complete);
+}
+
+static u16 ionic_poll_eq(struct ionic_eq *eq, u16 budget)
+{
+ struct ionic_ibdev *dev = eq->dev;
+ struct ionic_v1_eqe eqe;
+ u16 npolled = 0;
+ u8 type, code;
+ u32 evt, qid;
+
+ while (npolled < budget) {
+ if (!ionic_next_eqe(eq, &eqe))
+ break;
+
+ ionic_queue_produce(&eq->q);
+
+ /* cons is color for eq */
+ eq->q.cons = ionic_color_wrap(eq->q.prod, eq->q.cons);
+
+ ++npolled;
+
+ evt = ionic_v1_eqe_evt(&eqe);
+ type = ionic_v1_eqe_evt_type(evt);
+ code = ionic_v1_eqe_evt_code(evt);
+ qid = ionic_v1_eqe_evt_qid(evt);
+
+ switch (type) {
+ case IONIC_V1_EQE_TYPE_CQ:
+ ionic_cq_event(dev, qid, code);
+ break;
+
+ case IONIC_V1_EQE_TYPE_QP:
+ ionic_qp_event(dev, qid, code);
+ break;
+
+ default:
+ ibdev_dbg(&dev->ibdev,
+ "unknown event %#x type %u\n", evt, type);
+ }
+ }
+
+ return npolled;
+}
+
+static void ionic_poll_eq_work(struct work_struct *work)
+{
+ struct ionic_eq *eq = container_of(work, struct ionic_eq, work);
+ u32 npolled;
+
+ if (unlikely(!eq->enable) || WARN_ON(eq->armed))
+ return;
+
+ npolled = ionic_poll_eq(eq, IONIC_EQ_WORK_BUDGET);
+ if (npolled == IONIC_EQ_WORK_BUDGET) {
+ ionic_intr_credits(eq->dev->lif_cfg.intr_ctrl, eq->intr,
+ npolled, 0);
+ queue_work(ionic_evt_workq, &eq->work);
+ } else {
+ xchg(&eq->armed, 1);
+ ionic_intr_credits(eq->dev->lif_cfg.intr_ctrl, eq->intr,
+ 0, IONIC_INTR_CRED_UNMASK);
+ }
+}
+
+static irqreturn_t ionic_poll_eq_isr(int irq, void *eqptr)
+{
+ struct ionic_eq *eq = eqptr;
+ int was_armed;
+ u32 npolled;
+
+ was_armed = xchg(&eq->armed, 0);
+
+ if (unlikely(!eq->enable) || !was_armed)
+ return IRQ_HANDLED;
+
+ npolled = ionic_poll_eq(eq, IONIC_EQ_ISR_BUDGET);
+ if (npolled == IONIC_EQ_ISR_BUDGET) {
+ ionic_intr_credits(eq->dev->lif_cfg.intr_ctrl, eq->intr,
+ npolled, 0);
+ queue_work(ionic_evt_workq, &eq->work);
+ } else {
+ xchg(&eq->armed, 1);
+ ionic_intr_credits(eq->dev->lif_cfg.intr_ctrl, eq->intr,
+ 0, IONIC_INTR_CRED_UNMASK);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static struct ionic_eq *ionic_create_eq(struct ionic_ibdev *dev, int eqid)
+{
+ struct ionic_intr_info intr_obj = { };
+ struct ionic_eq *eq;
+ int rc;
+
+ eq = kzalloc(sizeof(*eq), GFP_KERNEL);
+ if (!eq)
+ return ERR_PTR(-ENOMEM);
+
+ eq->dev = dev;
+
+ rc = ionic_queue_init(&eq->q, dev->lif_cfg.hwdev, IONIC_EQ_DEPTH,
+ sizeof(struct ionic_v1_eqe));
+ if (rc)
+ goto err_q;
+
+ eq->eqid = eqid;
+
+ eq->armed = true;
+ eq->enable = false;
+ INIT_WORK(&eq->work, ionic_poll_eq_work);
+
+ rc = ionic_intr_alloc(dev->lif_cfg.lif, &intr_obj);
+ if (rc < 0)
+ goto err_intr;
+
+ eq->irq = intr_obj.vector;
+ eq->intr = intr_obj.index;
+
+ ionic_queue_dbell_init(&eq->q, eq->eqid);
+
+ /* cons is color for eq */
+ eq->q.cons = true;
+
+ snprintf(eq->name, sizeof(eq->name), "%s-%d-%d-eq",
+ "ionr", dev->lif_cfg.lif_index, eq->eqid);
+
+ ionic_intr_mask(dev->lif_cfg.intr_ctrl, eq->intr, IONIC_INTR_MASK_SET);
+ ionic_intr_mask_assert(dev->lif_cfg.intr_ctrl, eq->intr, IONIC_INTR_MASK_SET);
+ ionic_intr_coal_init(dev->lif_cfg.intr_ctrl, eq->intr, 0);
+ ionic_intr_clean(dev->lif_cfg.intr_ctrl, eq->intr);
+
+ eq->enable = true;
+
+ rc = request_irq(eq->irq, ionic_poll_eq_isr, 0, eq->name, eq);
+ if (rc)
+ goto err_irq;
+
+ rc = ionic_rdma_queue_devcmd(dev, &eq->q, eq->eqid, eq->intr,
+ IONIC_CMD_RDMA_CREATE_EQ);
+ if (rc)
+ goto err_cmd;
+
+ ionic_intr_mask(dev->lif_cfg.intr_ctrl, eq->intr, IONIC_INTR_MASK_CLEAR);
+
+ return eq;
+
+err_cmd:
+ eq->enable = false;
+ free_irq(eq->irq, eq);
+ flush_work(&eq->work);
+err_irq:
+ ionic_intr_free(dev->lif_cfg.lif, eq->intr);
+err_intr:
+ ionic_queue_destroy(&eq->q, dev->lif_cfg.hwdev);
+err_q:
+ kfree(eq);
+
+ return ERR_PTR(rc);
+}
+
+static void ionic_destroy_eq(struct ionic_eq *eq)
+{
+ struct ionic_ibdev *dev = eq->dev;
+
+ eq->enable = false;
+ free_irq(eq->irq, eq);
+ flush_work(&eq->work);
+
+ ionic_intr_free(dev->lif_cfg.lif, eq->intr);
+ ionic_queue_destroy(&eq->q, dev->lif_cfg.hwdev);
+ kfree(eq);
+}
+
+int ionic_create_rdma_admin(struct ionic_ibdev *dev)
+{
+ int eq_i = 0, aq_i = 0, rc = 0;
+ struct ionic_vcq *vcq;
+ struct ionic_aq *aq;
+ struct ionic_eq *eq;
+
+ dev->eq_vec = NULL;
+ dev->aq_vec = NULL;
+
+ INIT_WORK(&dev->reset_work, ionic_reset_work);
+ INIT_DELAYED_WORK(&dev->admin_dwork, ionic_admin_dwork);
+ atomic_set(&dev->admin_state, IONIC_ADMIN_KILLED);
+
+ if (dev->lif_cfg.aq_count > IONIC_AQ_COUNT) {
+ ibdev_dbg(&dev->ibdev, "limiting adminq count to %d\n",
+ IONIC_AQ_COUNT);
+ dev->lif_cfg.aq_count = IONIC_AQ_COUNT;
+ }
+
+ if (dev->lif_cfg.eq_count > IONIC_EQ_COUNT) {
+ dev_dbg(&dev->ibdev.dev, "limiting eventq count to %d\n",
+ IONIC_EQ_COUNT);
+ dev->lif_cfg.eq_count = IONIC_EQ_COUNT;
+ }
+
+ /* need at least two eq and one aq */
+ if (dev->lif_cfg.eq_count < IONIC_EQ_COUNT_MIN ||
+ dev->lif_cfg.aq_count < IONIC_AQ_COUNT_MIN) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ dev->eq_vec = kmalloc_array(dev->lif_cfg.eq_count, sizeof(*dev->eq_vec),
+ GFP_KERNEL);
+ if (!dev->eq_vec) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ for (eq_i = 0; eq_i < dev->lif_cfg.eq_count; ++eq_i) {
+ eq = ionic_create_eq(dev, eq_i + dev->lif_cfg.eq_base);
+ if (IS_ERR(eq)) {
+ rc = PTR_ERR(eq);
+
+ if (eq_i < IONIC_EQ_COUNT_MIN) {
+ ibdev_err(&dev->ibdev,
+ "fail create eq %pe\n", eq);
+ goto out;
+ }
+
+ /* ok, just fewer eq than device supports */
+ ibdev_dbg(&dev->ibdev, "eq count %d want %d rc %pe\n",
+ eq_i, dev->lif_cfg.eq_count, eq);
+
+ rc = 0;
+ break;
+ }
+
+ dev->eq_vec[eq_i] = eq;
+ }
+
+ dev->lif_cfg.eq_count = eq_i;
+
+ dev->aq_vec = kmalloc_array(dev->lif_cfg.aq_count, sizeof(*dev->aq_vec),
+ GFP_KERNEL);
+ if (!dev->aq_vec) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Create one CQ per AQ */
+ for (aq_i = 0; aq_i < dev->lif_cfg.aq_count; ++aq_i) {
+ vcq = ionic_create_rdma_admincq(dev, aq_i % eq_i);
+ if (IS_ERR(vcq)) {
+ rc = PTR_ERR(vcq);
+
+ if (!aq_i) {
+ ibdev_err(&dev->ibdev,
+ "failed to create acq %pe\n", vcq);
+ goto out;
+ }
+
+ /* ok, just fewer adminq than device supports */
+ ibdev_dbg(&dev->ibdev, "acq count %d want %d rc %pe\n",
+ aq_i, dev->lif_cfg.aq_count, vcq);
+ break;
+ }
+
+ aq = ionic_create_rdma_adminq(dev, aq_i + dev->lif_cfg.aq_base,
+ vcq->cq[0].cqid);
+ if (IS_ERR(aq)) {
+ /* Clean up the dangling CQ */
+ ionic_destroy_cq_common(dev, &vcq->cq[0]);
+ kfree(vcq);
+
+ rc = PTR_ERR(aq);
+
+ if (!aq_i) {
+ ibdev_err(&dev->ibdev,
+ "failed to create aq %pe\n", aq);
+ goto out;
+ }
+
+ /* ok, just fewer adminq than device supports */
+ ibdev_dbg(&dev->ibdev, "aq count %d want %d rc %pe\n",
+ aq_i, dev->lif_cfg.aq_count, aq);
+ break;
+ }
+
+ vcq->ibcq.cq_context = aq;
+ aq->vcq = vcq;
+
+ atomic_set(&aq->admin_state, IONIC_ADMIN_ACTIVE);
+ dev->aq_vec[aq_i] = aq;
+ }
+
+ atomic_set(&dev->admin_state, IONIC_ADMIN_ACTIVE);
+out:
+ dev->lif_cfg.eq_count = eq_i;
+ dev->lif_cfg.aq_count = aq_i;
+
+ return rc;
+}
+
+void ionic_destroy_rdma_admin(struct ionic_ibdev *dev)
+{
+ struct ionic_vcq *vcq;
+ struct ionic_aq *aq;
+ struct ionic_eq *eq;
+
+ /*
+ * Killing the admin before destroy makes sure all admin and
+ * completions are flushed. admin_state = IONIC_ADMIN_KILLED
+ * stops queueing up further works.
+ */
+ cancel_delayed_work_sync(&dev->admin_dwork);
+ cancel_work_sync(&dev->reset_work);
+
+ if (dev->aq_vec) {
+ while (dev->lif_cfg.aq_count > 0) {
+ aq = dev->aq_vec[--dev->lif_cfg.aq_count];
+ vcq = aq->vcq;
+
+ cancel_work_sync(&aq->work);
+
+ __ionic_destroy_rdma_adminq(dev, aq);
+ if (vcq) {
+ ionic_destroy_cq_common(dev, &vcq->cq[0]);
+ kfree(vcq);
+ }
+ }
+
+ kfree(dev->aq_vec);
+ }
+
+ if (dev->eq_vec) {
+ while (dev->lif_cfg.eq_count > 0) {
+ eq = dev->eq_vec[--dev->lif_cfg.eq_count];
+ ionic_destroy_eq(eq);
+ }
+
+ kfree(dev->eq_vec);
+ }
+}
diff --git a/drivers/infiniband/hw/ionic/ionic_controlpath.c b/drivers/infiniband/hw/ionic/ionic_controlpath.c
new file mode 100644
index 000000000000..ea12d9b8e125
--- /dev/null
+++ b/drivers/infiniband/hw/ionic/ionic_controlpath.c
@@ -0,0 +1,2679 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_cache.h>
+#include <rdma/ib_user_verbs.h>
+#include <ionic_api.h>
+
+#include "ionic_fw.h"
+#include "ionic_ibdev.h"
+
+#define ionic_set_ecn(tos) (((tos) | 2u) & ~1u)
+#define ionic_clear_ecn(tos) ((tos) & ~3u)
+
+static int ionic_validate_qdesc(struct ionic_qdesc *q)
+{
+ if (!q->addr || !q->size || !q->mask ||
+ !q->depth_log2 || !q->stride_log2)
+ return -EINVAL;
+
+ if (q->addr & (PAGE_SIZE - 1))
+ return -EINVAL;
+
+ if (q->mask != BIT(q->depth_log2) - 1)
+ return -EINVAL;
+
+ if (q->size < BIT_ULL(q->depth_log2 + q->stride_log2))
+ return -EINVAL;
+
+ return 0;
+}
+
+static u32 ionic_get_eqid(struct ionic_ibdev *dev, u32 comp_vector, u8 udma_idx)
+{
+ /* EQ per vector per udma, and the first eqs reserved for async events.
+ * The rest of the vectors can be requested for completions.
+ */
+ u32 comp_vec_count = dev->lif_cfg.eq_count / dev->lif_cfg.udma_count - 1;
+
+ return (comp_vector % comp_vec_count + 1) * dev->lif_cfg.udma_count + udma_idx;
+}
+
+static int ionic_get_cqid(struct ionic_ibdev *dev, u32 *cqid, u8 udma_idx)
+{
+ unsigned int size, base, bound;
+ int rc;
+
+ size = dev->lif_cfg.cq_count / dev->lif_cfg.udma_count;
+ base = size * udma_idx;
+ bound = base + size;
+
+ rc = ionic_resid_get_shared(&dev->inuse_cqid, base, bound);
+ if (rc >= 0) {
+ /* cq_base is zero or a multiple of two queue groups */
+ *cqid = dev->lif_cfg.cq_base +
+ ionic_bitid_to_qid(rc, dev->lif_cfg.udma_qgrp_shift,
+ dev->half_cqid_udma_shift);
+
+ rc = 0;
+ }
+
+ return rc;
+}
+
+static void ionic_put_cqid(struct ionic_ibdev *dev, u32 cqid)
+{
+ u32 bitid = ionic_qid_to_bitid(cqid - dev->lif_cfg.cq_base,
+ dev->lif_cfg.udma_qgrp_shift,
+ dev->half_cqid_udma_shift);
+
+ ionic_resid_put(&dev->inuse_cqid, bitid);
+}
+
+int ionic_create_cq_common(struct ionic_vcq *vcq,
+ struct ionic_tbl_buf *buf,
+ const struct ib_cq_init_attr *attr,
+ struct ionic_ctx *ctx,
+ struct ib_udata *udata,
+ struct ionic_qdesc *req_cq,
+ __u32 *resp_cqid,
+ int udma_idx)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(vcq->ibcq.device);
+ struct ionic_cq *cq = &vcq->cq[udma_idx];
+ void *entry;
+ int rc;
+
+ cq->vcq = vcq;
+
+ if (attr->cqe < 1 || attr->cqe + IONIC_CQ_GRACE > 0xffff) {
+ rc = -EINVAL;
+ goto err_args;
+ }
+
+ rc = ionic_get_cqid(dev, &cq->cqid, udma_idx);
+ if (rc)
+ goto err_args;
+
+ cq->eqid = ionic_get_eqid(dev, attr->comp_vector, udma_idx);
+
+ spin_lock_init(&cq->lock);
+ INIT_LIST_HEAD(&cq->poll_sq);
+ INIT_LIST_HEAD(&cq->flush_sq);
+ INIT_LIST_HEAD(&cq->flush_rq);
+
+ if (udata) {
+ rc = ionic_validate_qdesc(req_cq);
+ if (rc)
+ goto err_qdesc;
+
+ cq->umem = ib_umem_get(&dev->ibdev, req_cq->addr, req_cq->size,
+ IB_ACCESS_LOCAL_WRITE);
+ if (IS_ERR(cq->umem)) {
+ rc = PTR_ERR(cq->umem);
+ goto err_qdesc;
+ }
+
+ cq->q.ptr = NULL;
+ cq->q.size = req_cq->size;
+ cq->q.mask = req_cq->mask;
+ cq->q.depth_log2 = req_cq->depth_log2;
+ cq->q.stride_log2 = req_cq->stride_log2;
+
+ *resp_cqid = cq->cqid;
+ } else {
+ rc = ionic_queue_init(&cq->q, dev->lif_cfg.hwdev,
+ attr->cqe + IONIC_CQ_GRACE,
+ sizeof(struct ionic_v1_cqe));
+ if (rc)
+ goto err_q_init;
+
+ ionic_queue_dbell_init(&cq->q, cq->cqid);
+ cq->color = true;
+ cq->credit = cq->q.mask;
+ }
+
+ rc = ionic_pgtbl_init(dev, buf, cq->umem, cq->q.dma, 1, PAGE_SIZE);
+ if (rc)
+ goto err_pgtbl_init;
+
+ init_completion(&cq->cq_rel_comp);
+ kref_init(&cq->cq_kref);
+
+ entry = xa_store_irq(&dev->cq_tbl, cq->cqid, cq, GFP_KERNEL);
+ if (entry) {
+ if (!xa_is_err(entry))
+ rc = -EINVAL;
+ else
+ rc = xa_err(entry);
+
+ goto err_xa;
+ }
+
+ return 0;
+
+err_xa:
+ ionic_pgtbl_unbuf(dev, buf);
+err_pgtbl_init:
+ if (!udata)
+ ionic_queue_destroy(&cq->q, dev->lif_cfg.hwdev);
+err_q_init:
+ if (cq->umem)
+ ib_umem_release(cq->umem);
+err_qdesc:
+ ionic_put_cqid(dev, cq->cqid);
+err_args:
+ cq->vcq = NULL;
+
+ return rc;
+}
+
+void ionic_destroy_cq_common(struct ionic_ibdev *dev, struct ionic_cq *cq)
+{
+ if (!cq->vcq)
+ return;
+
+ xa_erase_irq(&dev->cq_tbl, cq->cqid);
+
+ kref_put(&cq->cq_kref, ionic_cq_complete);
+ wait_for_completion(&cq->cq_rel_comp);
+
+ if (cq->umem)
+ ib_umem_release(cq->umem);
+ else
+ ionic_queue_destroy(&cq->q, dev->lif_cfg.hwdev);
+
+ ionic_put_cqid(dev, cq->cqid);
+
+ cq->vcq = NULL;
+}
+
+static int ionic_validate_qdesc_zero(struct ionic_qdesc *q)
+{
+ if (q->addr || q->size || q->mask || q->depth_log2 || q->stride_log2)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ionic_get_pdid(struct ionic_ibdev *dev, u32 *pdid)
+{
+ int rc;
+
+ rc = ionic_resid_get(&dev->inuse_pdid);
+ if (rc < 0)
+ return rc;
+
+ *pdid = rc;
+ return 0;
+}
+
+static int ionic_get_ahid(struct ionic_ibdev *dev, u32 *ahid)
+{
+ int rc;
+
+ rc = ionic_resid_get(&dev->inuse_ahid);
+ if (rc < 0)
+ return rc;
+
+ *ahid = rc;
+ return 0;
+}
+
+static int ionic_get_mrid(struct ionic_ibdev *dev, u32 *mrid)
+{
+ int rc;
+
+ /* wrap to 1, skip reserved lkey */
+ rc = ionic_resid_get_shared(&dev->inuse_mrid, 1,
+ dev->inuse_mrid.inuse_size);
+ if (rc < 0)
+ return rc;
+
+ *mrid = ionic_mrid(rc, dev->next_mrkey++);
+ return 0;
+}
+
+static int ionic_get_gsi_qpid(struct ionic_ibdev *dev, u32 *qpid)
+{
+ int rc = 0;
+
+ rc = ionic_resid_get_shared(&dev->inuse_qpid, IB_QPT_GSI, IB_QPT_GSI + 1);
+ if (rc < 0)
+ return rc;
+
+ *qpid = IB_QPT_GSI;
+ return 0;
+}
+
+static int ionic_get_qpid(struct ionic_ibdev *dev, u32 *qpid,
+ u8 *udma_idx, u8 udma_mask)
+{
+ unsigned int size, base, bound;
+ int udma_i, udma_x, udma_ix;
+ int rc = -EINVAL;
+
+ udma_x = dev->next_qpid_udma_idx;
+
+ dev->next_qpid_udma_idx ^= dev->lif_cfg.udma_count - 1;
+
+ for (udma_i = 0; udma_i < dev->lif_cfg.udma_count; ++udma_i) {
+ udma_ix = udma_i ^ udma_x;
+
+ if (!(udma_mask & BIT(udma_ix)))
+ continue;
+
+ size = dev->lif_cfg.qp_count / dev->lif_cfg.udma_count;
+ base = size * udma_ix;
+ bound = base + size;
+
+ /* skip reserved SMI and GSI qpids in group zero */
+ if (!base)
+ base = 2;
+
+ rc = ionic_resid_get_shared(&dev->inuse_qpid, base, bound);
+ if (rc >= 0) {
+ *qpid = ionic_bitid_to_qid(rc,
+ dev->lif_cfg.udma_qgrp_shift,
+ dev->half_qpid_udma_shift);
+ *udma_idx = udma_ix;
+
+ rc = 0;
+ break;
+ }
+ }
+
+ return rc;
+}
+
+static int ionic_get_dbid(struct ionic_ibdev *dev, u32 *dbid, phys_addr_t *addr)
+{
+ int rc, dbpage_num;
+
+ /* wrap to 1, skip kernel reserved */
+ rc = ionic_resid_get_shared(&dev->inuse_dbid, 1,
+ dev->inuse_dbid.inuse_size);
+ if (rc < 0)
+ return rc;
+
+ dbpage_num = (dev->lif_cfg.lif_hw_index * dev->lif_cfg.dbid_count) + rc;
+ *addr = dev->lif_cfg.db_phys + ((phys_addr_t)dbpage_num << PAGE_SHIFT);
+
+ *dbid = rc;
+
+ return 0;
+}
+
+static void ionic_put_pdid(struct ionic_ibdev *dev, u32 pdid)
+{
+ ionic_resid_put(&dev->inuse_pdid, pdid);
+}
+
+static void ionic_put_ahid(struct ionic_ibdev *dev, u32 ahid)
+{
+ ionic_resid_put(&dev->inuse_ahid, ahid);
+}
+
+static void ionic_put_mrid(struct ionic_ibdev *dev, u32 mrid)
+{
+ ionic_resid_put(&dev->inuse_mrid, ionic_mrid_index(mrid));
+}
+
+static void ionic_put_qpid(struct ionic_ibdev *dev, u32 qpid)
+{
+ u32 bitid = ionic_qid_to_bitid(qpid,
+ dev->lif_cfg.udma_qgrp_shift,
+ dev->half_qpid_udma_shift);
+
+ ionic_resid_put(&dev->inuse_qpid, bitid);
+}
+
+static void ionic_put_dbid(struct ionic_ibdev *dev, u32 dbid)
+{
+ ionic_resid_put(&dev->inuse_dbid, dbid);
+}
+
+static struct rdma_user_mmap_entry*
+ionic_mmap_entry_insert(struct ionic_ctx *ctx, unsigned long size,
+ unsigned long pfn, u8 mmap_flags, u64 *offset)
+{
+ struct ionic_mmap_entry *entry;
+ int rc;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return NULL;
+
+ entry->size = size;
+ entry->pfn = pfn;
+ entry->mmap_flags = mmap_flags;
+
+ rc = rdma_user_mmap_entry_insert(&ctx->ibctx, &entry->rdma_entry,
+ entry->size);
+ if (rc) {
+ kfree(entry);
+ return NULL;
+ }
+
+ if (offset)
+ *offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
+
+ return &entry->rdma_entry;
+}
+
+int ionic_alloc_ucontext(struct ib_ucontext *ibctx, struct ib_udata *udata)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibctx->device);
+ struct ionic_ctx *ctx = to_ionic_ctx(ibctx);
+ struct ionic_ctx_resp resp = {};
+ struct ionic_ctx_req req;
+ phys_addr_t db_phys = 0;
+ int rc;
+
+ rc = ib_copy_from_udata(&req, udata, sizeof(req));
+ if (rc)
+ return rc;
+
+ /* try to allocate dbid for user ctx */
+ rc = ionic_get_dbid(dev, &ctx->dbid, &db_phys);
+ if (rc < 0)
+ return rc;
+
+ ibdev_dbg(&dev->ibdev, "user space dbid %u\n", ctx->dbid);
+
+ ctx->mmap_dbell = ionic_mmap_entry_insert(ctx, PAGE_SIZE,
+ PHYS_PFN(db_phys), 0, NULL);
+ if (!ctx->mmap_dbell) {
+ rc = -ENOMEM;
+ goto err_mmap_dbell;
+ }
+
+ resp.page_shift = PAGE_SHIFT;
+
+ resp.dbell_offset = db_phys & ~PAGE_MASK;
+
+ resp.version = dev->lif_cfg.rdma_version;
+ resp.qp_opcodes = dev->lif_cfg.qp_opcodes;
+ resp.admin_opcodes = dev->lif_cfg.admin_opcodes;
+
+ resp.sq_qtype = dev->lif_cfg.sq_qtype;
+ resp.rq_qtype = dev->lif_cfg.rq_qtype;
+ resp.cq_qtype = dev->lif_cfg.cq_qtype;
+ resp.admin_qtype = dev->lif_cfg.aq_qtype;
+ resp.max_stride = dev->lif_cfg.max_stride;
+ resp.max_spec = IONIC_SPEC_HIGH;
+
+ resp.udma_count = dev->lif_cfg.udma_count;
+ resp.expdb_mask = dev->lif_cfg.expdb_mask;
+
+ if (dev->lif_cfg.sq_expdb)
+ resp.expdb_qtypes |= IONIC_EXPDB_SQ;
+ if (dev->lif_cfg.rq_expdb)
+ resp.expdb_qtypes |= IONIC_EXPDB_RQ;
+
+ rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
+ if (rc)
+ goto err_resp;
+
+ return 0;
+
+err_resp:
+ rdma_user_mmap_entry_remove(ctx->mmap_dbell);
+err_mmap_dbell:
+ ionic_put_dbid(dev, ctx->dbid);
+
+ return rc;
+}
+
+void ionic_dealloc_ucontext(struct ib_ucontext *ibctx)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibctx->device);
+ struct ionic_ctx *ctx = to_ionic_ctx(ibctx);
+
+ rdma_user_mmap_entry_remove(ctx->mmap_dbell);
+ ionic_put_dbid(dev, ctx->dbid);
+}
+
+int ionic_mmap(struct ib_ucontext *ibctx, struct vm_area_struct *vma)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibctx->device);
+ struct ionic_ctx *ctx = to_ionic_ctx(ibctx);
+ struct rdma_user_mmap_entry *rdma_entry;
+ struct ionic_mmap_entry *ionic_entry;
+ int rc = 0;
+
+ rdma_entry = rdma_user_mmap_entry_get(&ctx->ibctx, vma);
+ if (!rdma_entry) {
+ ibdev_dbg(&dev->ibdev, "not found %#lx\n",
+ vma->vm_pgoff << PAGE_SHIFT);
+ return -EINVAL;
+ }
+
+ ionic_entry = container_of(rdma_entry, struct ionic_mmap_entry,
+ rdma_entry);
+
+ ibdev_dbg(&dev->ibdev, "writecombine? %d\n",
+ ionic_entry->mmap_flags & IONIC_MMAP_WC);
+ if (ionic_entry->mmap_flags & IONIC_MMAP_WC)
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ else
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ ibdev_dbg(&dev->ibdev, "remap st %#lx pf %#lx sz %#lx\n",
+ vma->vm_start, ionic_entry->pfn, ionic_entry->size);
+ rc = rdma_user_mmap_io(&ctx->ibctx, vma, ionic_entry->pfn,
+ ionic_entry->size, vma->vm_page_prot,
+ rdma_entry);
+ if (rc)
+ ibdev_dbg(&dev->ibdev, "remap failed %d\n", rc);
+
+ rdma_user_mmap_entry_put(rdma_entry);
+ return rc;
+}
+
+void ionic_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
+{
+ struct ionic_mmap_entry *ionic_entry;
+
+ ionic_entry = container_of(rdma_entry, struct ionic_mmap_entry,
+ rdma_entry);
+ kfree(ionic_entry);
+}
+
+int ionic_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibpd->device);
+ struct ionic_pd *pd = to_ionic_pd(ibpd);
+
+ return ionic_get_pdid(dev, &pd->pdid);
+}
+
+int ionic_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibpd->device);
+ struct ionic_pd *pd = to_ionic_pd(ibpd);
+
+ ionic_put_pdid(dev, pd->pdid);
+
+ return 0;
+}
+
+static int ionic_build_hdr(struct ionic_ibdev *dev,
+ struct ib_ud_header *hdr,
+ const struct rdma_ah_attr *attr,
+ u16 sport, bool want_ecn)
+{
+ const struct ib_global_route *grh;
+ enum rdma_network_type net;
+ u16 vlan;
+ int rc;
+
+ if (attr->ah_flags != IB_AH_GRH)
+ return -EINVAL;
+ if (attr->type != RDMA_AH_ATTR_TYPE_ROCE)
+ return -EINVAL;
+
+ grh = rdma_ah_read_grh(attr);
+
+ rc = rdma_read_gid_l2_fields(grh->sgid_attr, &vlan, &hdr->eth.smac_h[0]);
+ if (rc)
+ return rc;
+
+ net = rdma_gid_attr_network_type(grh->sgid_attr);
+
+ rc = ib_ud_header_init(0, /* no payload */
+ 0, /* no lrh */
+ 1, /* yes eth */
+ vlan != 0xffff,
+ 0, /* no grh */
+ net == RDMA_NETWORK_IPV4 ? 4 : 6,
+ 1, /* yes udp */
+ 0, /* no imm */
+ hdr);
+ if (rc)
+ return rc;
+
+ ether_addr_copy(hdr->eth.dmac_h, attr->roce.dmac);
+
+ if (net == RDMA_NETWORK_IPV4) {
+ hdr->eth.type = cpu_to_be16(ETH_P_IP);
+ hdr->ip4.frag_off = cpu_to_be16(0x4000); /* don't fragment */
+ hdr->ip4.ttl = grh->hop_limit;
+ hdr->ip4.tot_len = cpu_to_be16(0xffff);
+ hdr->ip4.saddr =
+ *(const __be32 *)(grh->sgid_attr->gid.raw + 12);
+ hdr->ip4.daddr = *(const __be32 *)(grh->dgid.raw + 12);
+
+ if (want_ecn)
+ hdr->ip4.tos = ionic_set_ecn(grh->traffic_class);
+ else
+ hdr->ip4.tos = ionic_clear_ecn(grh->traffic_class);
+ } else {
+ hdr->eth.type = cpu_to_be16(ETH_P_IPV6);
+ hdr->grh.flow_label = cpu_to_be32(grh->flow_label);
+ hdr->grh.hop_limit = grh->hop_limit;
+ hdr->grh.source_gid = grh->sgid_attr->gid;
+ hdr->grh.destination_gid = grh->dgid;
+
+ if (want_ecn)
+ hdr->grh.traffic_class =
+ ionic_set_ecn(grh->traffic_class);
+ else
+ hdr->grh.traffic_class =
+ ionic_clear_ecn(grh->traffic_class);
+ }
+
+ if (vlan != 0xffff) {
+ vlan |= rdma_ah_get_sl(attr) << VLAN_PRIO_SHIFT;
+ hdr->vlan.tag = cpu_to_be16(vlan);
+ hdr->vlan.type = hdr->eth.type;
+ hdr->eth.type = cpu_to_be16(ETH_P_8021Q);
+ }
+
+ hdr->udp.sport = cpu_to_be16(sport);
+ hdr->udp.dport = cpu_to_be16(ROCE_V2_UDP_DPORT);
+
+ return 0;
+}
+
+static void ionic_set_ah_attr(struct ionic_ibdev *dev,
+ struct rdma_ah_attr *ah_attr,
+ struct ib_ud_header *hdr,
+ int sgid_index)
+{
+ u32 flow_label;
+ u16 vlan = 0;
+ u8 tos, ttl;
+
+ if (hdr->vlan_present)
+ vlan = be16_to_cpu(hdr->vlan.tag);
+
+ if (hdr->ipv4_present) {
+ flow_label = 0;
+ ttl = hdr->ip4.ttl;
+ tos = hdr->ip4.tos;
+ *(__be16 *)(hdr->grh.destination_gid.raw + 10) = cpu_to_be16(0xffff);
+ *(__be32 *)(hdr->grh.destination_gid.raw + 12) = hdr->ip4.daddr;
+ } else {
+ flow_label = be32_to_cpu(hdr->grh.flow_label);
+ ttl = hdr->grh.hop_limit;
+ tos = hdr->grh.traffic_class;
+ }
+
+ memset(ah_attr, 0, sizeof(*ah_attr));
+ ah_attr->type = RDMA_AH_ATTR_TYPE_ROCE;
+ if (hdr->eth_present)
+ ether_addr_copy(ah_attr->roce.dmac, hdr->eth.dmac_h);
+ rdma_ah_set_sl(ah_attr, vlan >> VLAN_PRIO_SHIFT);
+ rdma_ah_set_port_num(ah_attr, 1);
+ rdma_ah_set_grh(ah_attr, NULL, flow_label, sgid_index, ttl, tos);
+ rdma_ah_set_dgid_raw(ah_attr, &hdr->grh.destination_gid);
+}
+
+static int ionic_create_ah_cmd(struct ionic_ibdev *dev,
+ struct ionic_ah *ah,
+ struct ionic_pd *pd,
+ struct rdma_ah_attr *attr,
+ u32 flags)
+{
+ struct ionic_admin_wr wr = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
+ .wqe = {
+ .op = IONIC_V1_ADMIN_CREATE_AH,
+ .len = cpu_to_le16(IONIC_ADMIN_CREATE_AH_IN_V1_LEN),
+ .cmd.create_ah = {
+ .pd_id = cpu_to_le32(pd->pdid),
+ .dbid_flags = cpu_to_le16(dev->lif_cfg.dbid),
+ .id_ver = cpu_to_le32(ah->ahid),
+ }
+ }
+ };
+ enum ionic_admin_flags admin_flags = 0;
+ dma_addr_t hdr_dma = 0;
+ void *hdr_buf;
+ gfp_t gfp = GFP_ATOMIC;
+ int rc, hdr_len = 0;
+
+ if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_CREATE_AH)
+ return -EBADRQC;
+
+ if (flags & RDMA_CREATE_AH_SLEEPABLE)
+ gfp = GFP_KERNEL;
+ else
+ admin_flags |= IONIC_ADMIN_F_BUSYWAIT;
+
+ rc = ionic_build_hdr(dev, &ah->hdr, attr, IONIC_ROCE_UDP_SPORT, false);
+ if (rc)
+ return rc;
+
+ if (ah->hdr.eth.type == cpu_to_be16(ETH_P_8021Q)) {
+ if (ah->hdr.vlan.type == cpu_to_be16(ETH_P_IP))
+ wr.wqe.cmd.create_ah.csum_profile =
+ IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV4_UDP;
+ else
+ wr.wqe.cmd.create_ah.csum_profile =
+ IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV6_UDP;
+ } else {
+ if (ah->hdr.eth.type == cpu_to_be16(ETH_P_IP))
+ wr.wqe.cmd.create_ah.csum_profile =
+ IONIC_TFP_CSUM_PROF_ETH_IPV4_UDP;
+ else
+ wr.wqe.cmd.create_ah.csum_profile =
+ IONIC_TFP_CSUM_PROF_ETH_IPV6_UDP;
+ }
+
+ ah->sgid_index = rdma_ah_read_grh(attr)->sgid_index;
+
+ hdr_buf = kmalloc(PAGE_SIZE, gfp);
+ if (!hdr_buf)
+ return -ENOMEM;
+
+ hdr_len = ib_ud_header_pack(&ah->hdr, hdr_buf);
+ hdr_len -= IB_BTH_BYTES;
+ hdr_len -= IB_DETH_BYTES;
+ ibdev_dbg(&dev->ibdev, "roce packet header template\n");
+ print_hex_dump_debug("hdr ", DUMP_PREFIX_OFFSET, 16, 1,
+ hdr_buf, hdr_len, true);
+
+ hdr_dma = dma_map_single(dev->lif_cfg.hwdev, hdr_buf, hdr_len,
+ DMA_TO_DEVICE);
+
+ rc = dma_mapping_error(dev->lif_cfg.hwdev, hdr_dma);
+ if (rc)
+ goto err_dma;
+
+ wr.wqe.cmd.create_ah.dma_addr = cpu_to_le64(hdr_dma);
+ wr.wqe.cmd.create_ah.length = cpu_to_le32(hdr_len);
+
+ ionic_admin_post(dev, &wr);
+ rc = ionic_admin_wait(dev, &wr, admin_flags);
+
+ dma_unmap_single(dev->lif_cfg.hwdev, hdr_dma, hdr_len,
+ DMA_TO_DEVICE);
+err_dma:
+ kfree(hdr_buf);
+
+ return rc;
+}
+
+static int ionic_destroy_ah_cmd(struct ionic_ibdev *dev, u32 ahid, u32 flags)
+{
+ struct ionic_admin_wr wr = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
+ .wqe = {
+ .op = IONIC_V1_ADMIN_DESTROY_AH,
+ .len = cpu_to_le16(IONIC_ADMIN_DESTROY_AH_IN_V1_LEN),
+ .cmd.destroy_ah = {
+ .ah_id = cpu_to_le32(ahid),
+ },
+ }
+ };
+ enum ionic_admin_flags admin_flags = IONIC_ADMIN_F_TEARDOWN;
+
+ if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_DESTROY_AH)
+ return -EBADRQC;
+
+ if (!(flags & RDMA_CREATE_AH_SLEEPABLE))
+ admin_flags |= IONIC_ADMIN_F_BUSYWAIT;
+
+ ionic_admin_post(dev, &wr);
+ ionic_admin_wait(dev, &wr, admin_flags);
+
+ /* No host-memory resource is associated with ah, so it is ok
+ * to "succeed" and complete this destroy ah on the host.
+ */
+ return 0;
+}
+
+int ionic_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibah->device);
+ struct rdma_ah_attr *attr = init_attr->ah_attr;
+ struct ionic_pd *pd = to_ionic_pd(ibah->pd);
+ struct ionic_ah *ah = to_ionic_ah(ibah);
+ struct ionic_ah_resp resp = {};
+ u32 flags = init_attr->flags;
+ int rc;
+
+ rc = ionic_get_ahid(dev, &ah->ahid);
+ if (rc)
+ return rc;
+
+ rc = ionic_create_ah_cmd(dev, ah, pd, attr, flags);
+ if (rc)
+ goto err_cmd;
+
+ if (udata) {
+ resp.ahid = ah->ahid;
+
+ rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
+ if (rc)
+ goto err_resp;
+ }
+
+ return 0;
+
+err_resp:
+ ionic_destroy_ah_cmd(dev, ah->ahid, flags);
+err_cmd:
+ ionic_put_ahid(dev, ah->ahid);
+ return rc;
+}
+
+int ionic_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibah->device);
+ struct ionic_ah *ah = to_ionic_ah(ibah);
+
+ ionic_set_ah_attr(dev, ah_attr, &ah->hdr, ah->sgid_index);
+
+ return 0;
+}
+
+int ionic_destroy_ah(struct ib_ah *ibah, u32 flags)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibah->device);
+ struct ionic_ah *ah = to_ionic_ah(ibah);
+ int rc;
+
+ rc = ionic_destroy_ah_cmd(dev, ah->ahid, flags);
+ if (rc)
+ return rc;
+
+ ionic_put_ahid(dev, ah->ahid);
+
+ return 0;
+}
+
+static int ionic_create_mr_cmd(struct ionic_ibdev *dev,
+ struct ionic_pd *pd,
+ struct ionic_mr *mr,
+ u64 addr,
+ u64 length)
+{
+ struct ionic_admin_wr wr = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
+ .wqe = {
+ .op = IONIC_V1_ADMIN_CREATE_MR,
+ .len = cpu_to_le16(IONIC_ADMIN_CREATE_MR_IN_V1_LEN),
+ .cmd.create_mr = {
+ .va = cpu_to_le64(addr),
+ .length = cpu_to_le64(length),
+ .pd_id = cpu_to_le32(pd->pdid),
+ .page_size_log2 = mr->buf.page_size_log2,
+ .tbl_index = cpu_to_le32(~0),
+ .map_count = cpu_to_le32(mr->buf.tbl_pages),
+ .dma_addr = ionic_pgtbl_dma(&mr->buf, addr),
+ .dbid_flags = cpu_to_le16(mr->flags),
+ .id_ver = cpu_to_le32(mr->mrid),
+ }
+ }
+ };
+ int rc;
+
+ if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_CREATE_MR)
+ return -EBADRQC;
+
+ ionic_admin_post(dev, &wr);
+ rc = ionic_admin_wait(dev, &wr, 0);
+ if (!rc)
+ mr->created = true;
+
+ return rc;
+}
+
+static int ionic_destroy_mr_cmd(struct ionic_ibdev *dev, u32 mrid)
+{
+ struct ionic_admin_wr wr = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
+ .wqe = {
+ .op = IONIC_V1_ADMIN_DESTROY_MR,
+ .len = cpu_to_le16(IONIC_ADMIN_DESTROY_MR_IN_V1_LEN),
+ .cmd.destroy_mr = {
+ .mr_id = cpu_to_le32(mrid),
+ },
+ }
+ };
+
+ if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_DESTROY_MR)
+ return -EBADRQC;
+
+ ionic_admin_post(dev, &wr);
+
+ return ionic_admin_wait(dev, &wr, IONIC_ADMIN_F_TEARDOWN);
+}
+
+struct ib_mr *ionic_get_dma_mr(struct ib_pd *ibpd, int access)
+{
+ struct ionic_pd *pd = to_ionic_pd(ibpd);
+ struct ionic_mr *mr;
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ mr->ibmr.lkey = IONIC_DMA_LKEY;
+ mr->ibmr.rkey = IONIC_DMA_RKEY;
+
+ if (pd)
+ pd->flags |= IONIC_QPF_PRIVILEGED;
+
+ return &mr->ibmr;
+}
+
+struct ib_mr *ionic_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
+ u64 addr, int access, struct ib_dmah *dmah,
+ struct ib_udata *udata)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibpd->device);
+ struct ionic_pd *pd = to_ionic_pd(ibpd);
+ struct ionic_mr *mr;
+ unsigned long pg_sz;
+ int rc;
+
+ if (dmah)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ rc = ionic_get_mrid(dev, &mr->mrid);
+ if (rc)
+ goto err_mrid;
+
+ mr->ibmr.lkey = mr->mrid;
+ mr->ibmr.rkey = mr->mrid;
+ mr->ibmr.iova = addr;
+ mr->ibmr.length = length;
+
+ mr->flags = IONIC_MRF_USER_MR | to_ionic_mr_flags(access);
+
+ mr->umem = ib_umem_get(&dev->ibdev, start, length, access);
+ if (IS_ERR(mr->umem)) {
+ rc = PTR_ERR(mr->umem);
+ goto err_umem;
+ }
+
+ pg_sz = ib_umem_find_best_pgsz(mr->umem,
+ dev->lif_cfg.page_size_supported,
+ addr);
+ if (!pg_sz) {
+ rc = -EINVAL;
+ goto err_pgtbl;
+ }
+
+ rc = ionic_pgtbl_init(dev, &mr->buf, mr->umem, 0, 1, pg_sz);
+ if (rc)
+ goto err_pgtbl;
+
+ rc = ionic_create_mr_cmd(dev, pd, mr, addr, length);
+ if (rc)
+ goto err_cmd;
+
+ ionic_pgtbl_unbuf(dev, &mr->buf);
+
+ return &mr->ibmr;
+
+err_cmd:
+ ionic_pgtbl_unbuf(dev, &mr->buf);
+err_pgtbl:
+ ib_umem_release(mr->umem);
+err_umem:
+ ionic_put_mrid(dev, mr->mrid);
+err_mrid:
+ kfree(mr);
+ return ERR_PTR(rc);
+}
+
+struct ib_mr *ionic_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 offset,
+ u64 length, u64 addr, int fd, int access,
+ struct ib_dmah *dmah,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibpd->device);
+ struct ionic_pd *pd = to_ionic_pd(ibpd);
+ struct ib_umem_dmabuf *umem_dmabuf;
+ struct ionic_mr *mr;
+ u64 pg_sz;
+ int rc;
+
+ if (dmah)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ rc = ionic_get_mrid(dev, &mr->mrid);
+ if (rc)
+ goto err_mrid;
+
+ mr->ibmr.lkey = mr->mrid;
+ mr->ibmr.rkey = mr->mrid;
+ mr->ibmr.iova = addr;
+ mr->ibmr.length = length;
+
+ mr->flags = IONIC_MRF_USER_MR | to_ionic_mr_flags(access);
+
+ umem_dmabuf = ib_umem_dmabuf_get_pinned(&dev->ibdev, offset, length,
+ fd, access);
+ if (IS_ERR(umem_dmabuf)) {
+ rc = PTR_ERR(umem_dmabuf);
+ goto err_umem;
+ }
+
+ mr->umem = &umem_dmabuf->umem;
+
+ pg_sz = ib_umem_find_best_pgsz(mr->umem,
+ dev->lif_cfg.page_size_supported,
+ addr);
+ if (!pg_sz) {
+ rc = -EINVAL;
+ goto err_pgtbl;
+ }
+
+ rc = ionic_pgtbl_init(dev, &mr->buf, mr->umem, 0, 1, pg_sz);
+ if (rc)
+ goto err_pgtbl;
+
+ rc = ionic_create_mr_cmd(dev, pd, mr, addr, length);
+ if (rc)
+ goto err_cmd;
+
+ ionic_pgtbl_unbuf(dev, &mr->buf);
+
+ return &mr->ibmr;
+
+err_cmd:
+ ionic_pgtbl_unbuf(dev, &mr->buf);
+err_pgtbl:
+ ib_umem_release(mr->umem);
+err_umem:
+ ionic_put_mrid(dev, mr->mrid);
+err_mrid:
+ kfree(mr);
+ return ERR_PTR(rc);
+}
+
+int ionic_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibmr->device);
+ struct ionic_mr *mr = to_ionic_mr(ibmr);
+ int rc;
+
+ if (!mr->ibmr.lkey)
+ goto out;
+
+ if (mr->created) {
+ rc = ionic_destroy_mr_cmd(dev, mr->mrid);
+ if (rc)
+ return rc;
+ }
+
+ ionic_pgtbl_unbuf(dev, &mr->buf);
+
+ if (mr->umem)
+ ib_umem_release(mr->umem);
+
+ ionic_put_mrid(dev, mr->mrid);
+
+out:
+ kfree(mr);
+
+ return 0;
+}
+
+struct ib_mr *ionic_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type type,
+ u32 max_sg)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibpd->device);
+ struct ionic_pd *pd = to_ionic_pd(ibpd);
+ struct ionic_mr *mr;
+ int rc;
+
+ if (type != IB_MR_TYPE_MEM_REG)
+ return ERR_PTR(-EINVAL);
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ rc = ionic_get_mrid(dev, &mr->mrid);
+ if (rc)
+ goto err_mrid;
+
+ mr->ibmr.lkey = mr->mrid;
+ mr->ibmr.rkey = mr->mrid;
+
+ mr->flags = IONIC_MRF_PHYS_MR;
+
+ rc = ionic_pgtbl_init(dev, &mr->buf, mr->umem, 0, max_sg, PAGE_SIZE);
+ if (rc)
+ goto err_pgtbl;
+
+ mr->buf.tbl_pages = 0;
+
+ rc = ionic_create_mr_cmd(dev, pd, mr, 0, 0);
+ if (rc)
+ goto err_cmd;
+
+ return &mr->ibmr;
+
+err_cmd:
+ ionic_pgtbl_unbuf(dev, &mr->buf);
+err_pgtbl:
+ ionic_put_mrid(dev, mr->mrid);
+err_mrid:
+ kfree(mr);
+ return ERR_PTR(rc);
+}
+
+static int ionic_map_mr_page(struct ib_mr *ibmr, u64 dma)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibmr->device);
+ struct ionic_mr *mr = to_ionic_mr(ibmr);
+
+ ibdev_dbg(&dev->ibdev, "dma %p\n", (void *)dma);
+ return ionic_pgtbl_page(&mr->buf, dma);
+}
+
+int ionic_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+ unsigned int *sg_offset)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibmr->device);
+ struct ionic_mr *mr = to_ionic_mr(ibmr);
+ int rc;
+
+ /* mr must be allocated using ib_alloc_mr() */
+ if (unlikely(!mr->buf.tbl_limit))
+ return -EINVAL;
+
+ mr->buf.tbl_pages = 0;
+
+ if (mr->buf.tbl_buf)
+ dma_sync_single_for_cpu(dev->lif_cfg.hwdev, mr->buf.tbl_dma,
+ mr->buf.tbl_size, DMA_TO_DEVICE);
+
+ ibdev_dbg(&dev->ibdev, "sg %p nent %d\n", sg, sg_nents);
+ rc = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, ionic_map_mr_page);
+
+ mr->buf.page_size_log2 = order_base_2(ibmr->page_size);
+
+ if (mr->buf.tbl_buf)
+ dma_sync_single_for_device(dev->lif_cfg.hwdev, mr->buf.tbl_dma,
+ mr->buf.tbl_size, DMA_TO_DEVICE);
+
+ return rc;
+}
+
+int ionic_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibmw->device);
+ struct ionic_pd *pd = to_ionic_pd(ibmw->pd);
+ struct ionic_mr *mr = to_ionic_mw(ibmw);
+ int rc;
+
+ rc = ionic_get_mrid(dev, &mr->mrid);
+ if (rc)
+ return rc;
+
+ mr->ibmw.rkey = mr->mrid;
+
+ if (mr->ibmw.type == IB_MW_TYPE_1)
+ mr->flags = IONIC_MRF_MW_1;
+ else
+ mr->flags = IONIC_MRF_MW_2;
+
+ rc = ionic_create_mr_cmd(dev, pd, mr, 0, 0);
+ if (rc)
+ goto err_cmd;
+
+ return 0;
+
+err_cmd:
+ ionic_put_mrid(dev, mr->mrid);
+ return rc;
+}
+
+int ionic_dealloc_mw(struct ib_mw *ibmw)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibmw->device);
+ struct ionic_mr *mr = to_ionic_mw(ibmw);
+ int rc;
+
+ rc = ionic_destroy_mr_cmd(dev, mr->mrid);
+ if (rc)
+ return rc;
+
+ ionic_put_mrid(dev, mr->mrid);
+
+ return 0;
+}
+
+static int ionic_create_cq_cmd(struct ionic_ibdev *dev,
+ struct ionic_ctx *ctx,
+ struct ionic_cq *cq,
+ struct ionic_tbl_buf *buf)
+{
+ const u16 dbid = ionic_ctx_dbid(dev, ctx);
+ struct ionic_admin_wr wr = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
+ .wqe = {
+ .op = IONIC_V1_ADMIN_CREATE_CQ,
+ .len = cpu_to_le16(IONIC_ADMIN_CREATE_CQ_IN_V1_LEN),
+ .cmd.create_cq = {
+ .eq_id = cpu_to_le32(cq->eqid),
+ .depth_log2 = cq->q.depth_log2,
+ .stride_log2 = cq->q.stride_log2,
+ .page_size_log2 = buf->page_size_log2,
+ .tbl_index = cpu_to_le32(~0),
+ .map_count = cpu_to_le32(buf->tbl_pages),
+ .dma_addr = ionic_pgtbl_dma(buf, 0),
+ .dbid_flags = cpu_to_le16(dbid),
+ .id_ver = cpu_to_le32(cq->cqid),
+ }
+ }
+ };
+
+ if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_CREATE_CQ)
+ return -EBADRQC;
+
+ ionic_admin_post(dev, &wr);
+
+ return ionic_admin_wait(dev, &wr, 0);
+}
+
+static int ionic_destroy_cq_cmd(struct ionic_ibdev *dev, u32 cqid)
+{
+ struct ionic_admin_wr wr = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
+ .wqe = {
+ .op = IONIC_V1_ADMIN_DESTROY_CQ,
+ .len = cpu_to_le16(IONIC_ADMIN_DESTROY_CQ_IN_V1_LEN),
+ .cmd.destroy_cq = {
+ .cq_id = cpu_to_le32(cqid),
+ },
+ }
+ };
+
+ if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_DESTROY_CQ)
+ return -EBADRQC;
+
+ ionic_admin_post(dev, &wr);
+
+ return ionic_admin_wait(dev, &wr, IONIC_ADMIN_F_TEARDOWN);
+}
+
+int ionic_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibcq->device);
+ struct ib_udata *udata = &attrs->driver_udata;
+ struct ionic_ctx *ctx =
+ rdma_udata_to_drv_context(udata, struct ionic_ctx, ibctx);
+ struct ionic_vcq *vcq = to_ionic_vcq(ibcq);
+ struct ionic_tbl_buf buf = {};
+ struct ionic_cq_resp resp;
+ struct ionic_cq_req req;
+ int udma_idx = 0, rc;
+
+ if (udata) {
+ rc = ib_copy_from_udata(&req, udata, sizeof(req));
+ if (rc)
+ return rc;
+ }
+
+ vcq->udma_mask = BIT(dev->lif_cfg.udma_count) - 1;
+
+ if (udata)
+ vcq->udma_mask &= req.udma_mask;
+
+ if (!vcq->udma_mask) {
+ rc = -EINVAL;
+ goto err_init;
+ }
+
+ for (; udma_idx < dev->lif_cfg.udma_count; ++udma_idx) {
+ if (!(vcq->udma_mask & BIT(udma_idx)))
+ continue;
+
+ rc = ionic_create_cq_common(vcq, &buf, attr, ctx, udata,
+ &req.cq[udma_idx],
+ &resp.cqid[udma_idx],
+ udma_idx);
+ if (rc)
+ goto err_init;
+
+ rc = ionic_create_cq_cmd(dev, ctx, &vcq->cq[udma_idx], &buf);
+ if (rc)
+ goto err_cmd;
+
+ ionic_pgtbl_unbuf(dev, &buf);
+ }
+
+ vcq->ibcq.cqe = attr->cqe;
+
+ if (udata) {
+ resp.udma_mask = vcq->udma_mask;
+
+ rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
+ if (rc)
+ goto err_resp;
+ }
+
+ return 0;
+
+err_resp:
+ while (udma_idx) {
+ --udma_idx;
+ if (!(vcq->udma_mask & BIT(udma_idx)))
+ continue;
+ ionic_destroy_cq_cmd(dev, vcq->cq[udma_idx].cqid);
+err_cmd:
+ ionic_pgtbl_unbuf(dev, &buf);
+ ionic_destroy_cq_common(dev, &vcq->cq[udma_idx]);
+err_init:
+ ;
+ }
+
+ return rc;
+}
+
+int ionic_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibcq->device);
+ struct ionic_vcq *vcq = to_ionic_vcq(ibcq);
+ int udma_idx, rc_tmp, rc = 0;
+
+ for (udma_idx = dev->lif_cfg.udma_count; udma_idx; ) {
+ --udma_idx;
+
+ if (!(vcq->udma_mask & BIT(udma_idx)))
+ continue;
+
+ rc_tmp = ionic_destroy_cq_cmd(dev, vcq->cq[udma_idx].cqid);
+ if (rc_tmp) {
+ if (!rc)
+ rc = rc_tmp;
+
+ continue;
+ }
+
+ ionic_destroy_cq_common(dev, &vcq->cq[udma_idx]);
+ }
+
+ return rc;
+}
+
+static bool pd_remote_privileged(struct ib_pd *pd)
+{
+ return pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY;
+}
+
+static int ionic_create_qp_cmd(struct ionic_ibdev *dev,
+ struct ionic_pd *pd,
+ struct ionic_cq *send_cq,
+ struct ionic_cq *recv_cq,
+ struct ionic_qp *qp,
+ struct ionic_tbl_buf *sq_buf,
+ struct ionic_tbl_buf *rq_buf,
+ struct ib_qp_init_attr *attr)
+{
+ const u16 dbid = ionic_obj_dbid(dev, pd->ibpd.uobject);
+ const u32 flags = to_ionic_qp_flags(0, 0,
+ qp->sq_cmb & IONIC_CMB_ENABLE,
+ qp->rq_cmb & IONIC_CMB_ENABLE,
+ qp->sq_spec, qp->rq_spec,
+ pd->flags & IONIC_QPF_PRIVILEGED,
+ pd_remote_privileged(&pd->ibpd));
+ struct ionic_admin_wr wr = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
+ .wqe = {
+ .op = IONIC_V1_ADMIN_CREATE_QP,
+ .len = cpu_to_le16(IONIC_ADMIN_CREATE_QP_IN_V1_LEN),
+ .cmd.create_qp = {
+ .pd_id = cpu_to_le32(pd->pdid),
+ .priv_flags = cpu_to_be32(flags),
+ .type_state = to_ionic_qp_type(attr->qp_type),
+ .dbid_flags = cpu_to_le16(dbid),
+ .id_ver = cpu_to_le32(qp->qpid),
+ }
+ }
+ };
+
+ if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_CREATE_QP)
+ return -EBADRQC;
+
+ if (qp->has_sq) {
+ wr.wqe.cmd.create_qp.sq_cq_id = cpu_to_le32(send_cq->cqid);
+ wr.wqe.cmd.create_qp.sq_depth_log2 = qp->sq.depth_log2;
+ wr.wqe.cmd.create_qp.sq_stride_log2 = qp->sq.stride_log2;
+ wr.wqe.cmd.create_qp.sq_page_size_log2 = sq_buf->page_size_log2;
+ wr.wqe.cmd.create_qp.sq_tbl_index_xrcd_id = cpu_to_le32(~0);
+ wr.wqe.cmd.create_qp.sq_map_count =
+ cpu_to_le32(sq_buf->tbl_pages);
+ wr.wqe.cmd.create_qp.sq_dma_addr = ionic_pgtbl_dma(sq_buf, 0);
+ }
+
+ if (qp->has_rq) {
+ wr.wqe.cmd.create_qp.rq_cq_id = cpu_to_le32(recv_cq->cqid);
+ wr.wqe.cmd.create_qp.rq_depth_log2 = qp->rq.depth_log2;
+ wr.wqe.cmd.create_qp.rq_stride_log2 = qp->rq.stride_log2;
+ wr.wqe.cmd.create_qp.rq_page_size_log2 = rq_buf->page_size_log2;
+ wr.wqe.cmd.create_qp.rq_tbl_index_srq_id = cpu_to_le32(~0);
+ wr.wqe.cmd.create_qp.rq_map_count =
+ cpu_to_le32(rq_buf->tbl_pages);
+ wr.wqe.cmd.create_qp.rq_dma_addr = ionic_pgtbl_dma(rq_buf, 0);
+ }
+
+ ionic_admin_post(dev, &wr);
+
+ return ionic_admin_wait(dev, &wr, 0);
+}
+
+static int ionic_modify_qp_cmd(struct ionic_ibdev *dev,
+ struct ionic_pd *pd,
+ struct ionic_qp *qp,
+ struct ib_qp_attr *attr,
+ int mask)
+{
+ const u32 flags = to_ionic_qp_flags(attr->qp_access_flags,
+ attr->en_sqd_async_notify,
+ qp->sq_cmb & IONIC_CMB_ENABLE,
+ qp->rq_cmb & IONIC_CMB_ENABLE,
+ qp->sq_spec, qp->rq_spec,
+ pd->flags & IONIC_QPF_PRIVILEGED,
+ pd_remote_privileged(qp->ibqp.pd));
+ const u8 state = to_ionic_qp_modify_state(attr->qp_state,
+ attr->cur_qp_state);
+ struct ionic_admin_wr wr = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
+ .wqe = {
+ .op = IONIC_V1_ADMIN_MODIFY_QP,
+ .len = cpu_to_le16(IONIC_ADMIN_MODIFY_QP_IN_V1_LEN),
+ .cmd.mod_qp = {
+ .attr_mask = cpu_to_be32(mask),
+ .access_flags = cpu_to_be16(flags),
+ .rq_psn = cpu_to_le32(attr->rq_psn),
+ .sq_psn = cpu_to_le32(attr->sq_psn),
+ .rate_limit_kbps =
+ cpu_to_le32(attr->rate_limit),
+ .pmtu = (attr->path_mtu + 7),
+ .retry = (attr->retry_cnt |
+ (attr->rnr_retry << 4)),
+ .rnr_timer = attr->min_rnr_timer,
+ .retry_timeout = attr->timeout,
+ .type_state = state,
+ .id_ver = cpu_to_le32(qp->qpid),
+ }
+ }
+ };
+ const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
+ void *hdr_buf = NULL;
+ dma_addr_t hdr_dma = 0;
+ int rc, hdr_len = 0;
+ u16 sport;
+
+ if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_MODIFY_QP)
+ return -EBADRQC;
+
+ if ((mask & IB_QP_MAX_DEST_RD_ATOMIC) && attr->max_dest_rd_atomic) {
+ /* Note, round up/down was already done for allocating
+ * resources on the device. The allocation order is in cache
+ * line size. We can't use the order of the resource
+ * allocation to determine the order wqes here, because for
+ * queue length <= one cache line it is not distinct.
+ *
+ * Therefore, order wqes is computed again here.
+ *
+ * Account for hole and round up to the next order.
+ */
+ wr.wqe.cmd.mod_qp.rsq_depth =
+ order_base_2(attr->max_dest_rd_atomic + 1);
+ wr.wqe.cmd.mod_qp.rsq_index = cpu_to_le32(~0);
+ }
+
+ if ((mask & IB_QP_MAX_QP_RD_ATOMIC) && attr->max_rd_atomic) {
+ /* Account for hole and round down to the next order */
+ wr.wqe.cmd.mod_qp.rrq_depth =
+ order_base_2(attr->max_rd_atomic + 2) - 1;
+ wr.wqe.cmd.mod_qp.rrq_index = cpu_to_le32(~0);
+ }
+
+ if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC)
+ wr.wqe.cmd.mod_qp.qkey_dest_qpn =
+ cpu_to_le32(attr->dest_qp_num);
+ else
+ wr.wqe.cmd.mod_qp.qkey_dest_qpn = cpu_to_le32(attr->qkey);
+
+ if (mask & IB_QP_AV) {
+ if (!qp->hdr)
+ return -ENOMEM;
+
+ sport = rdma_get_udp_sport(grh->flow_label,
+ qp->qpid,
+ attr->dest_qp_num);
+
+ rc = ionic_build_hdr(dev, qp->hdr, &attr->ah_attr, sport, true);
+ if (rc)
+ return rc;
+
+ qp->sgid_index = grh->sgid_index;
+
+ hdr_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!hdr_buf)
+ return -ENOMEM;
+
+ hdr_len = ib_ud_header_pack(qp->hdr, hdr_buf);
+ hdr_len -= IB_BTH_BYTES;
+ hdr_len -= IB_DETH_BYTES;
+ ibdev_dbg(&dev->ibdev, "roce packet header template\n");
+ print_hex_dump_debug("hdr ", DUMP_PREFIX_OFFSET, 16, 1,
+ hdr_buf, hdr_len, true);
+
+ hdr_dma = dma_map_single(dev->lif_cfg.hwdev, hdr_buf, hdr_len,
+ DMA_TO_DEVICE);
+
+ rc = dma_mapping_error(dev->lif_cfg.hwdev, hdr_dma);
+ if (rc)
+ goto err_dma;
+
+ if (qp->hdr->ipv4_present) {
+ wr.wqe.cmd.mod_qp.tfp_csum_profile =
+ qp->hdr->vlan_present ?
+ IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV4_UDP :
+ IONIC_TFP_CSUM_PROF_ETH_IPV4_UDP;
+ } else {
+ wr.wqe.cmd.mod_qp.tfp_csum_profile =
+ qp->hdr->vlan_present ?
+ IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV6_UDP :
+ IONIC_TFP_CSUM_PROF_ETH_IPV6_UDP;
+ }
+
+ wr.wqe.cmd.mod_qp.ah_id_len =
+ cpu_to_le32(qp->ahid | (hdr_len << 24));
+ wr.wqe.cmd.mod_qp.dma_addr = cpu_to_le64(hdr_dma);
+
+ wr.wqe.cmd.mod_qp.en_pcp = attr->ah_attr.sl;
+ wr.wqe.cmd.mod_qp.ip_dscp = grh->traffic_class >> 2;
+ }
+
+ ionic_admin_post(dev, &wr);
+
+ rc = ionic_admin_wait(dev, &wr, 0);
+
+ if (mask & IB_QP_AV)
+ dma_unmap_single(dev->lif_cfg.hwdev, hdr_dma, hdr_len,
+ DMA_TO_DEVICE);
+err_dma:
+ if (mask & IB_QP_AV)
+ kfree(hdr_buf);
+
+ return rc;
+}
+
+static int ionic_query_qp_cmd(struct ionic_ibdev *dev,
+ struct ionic_qp *qp,
+ struct ib_qp_attr *attr,
+ int mask)
+{
+ struct ionic_admin_wr wr = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
+ .wqe = {
+ .op = IONIC_V1_ADMIN_QUERY_QP,
+ .len = cpu_to_le16(IONIC_ADMIN_QUERY_QP_IN_V1_LEN),
+ .cmd.query_qp = {
+ .id_ver = cpu_to_le32(qp->qpid),
+ },
+ }
+ };
+ struct ionic_v1_admin_query_qp_sq *query_sqbuf;
+ struct ionic_v1_admin_query_qp_rq *query_rqbuf;
+ dma_addr_t query_sqdma;
+ dma_addr_t query_rqdma;
+ dma_addr_t hdr_dma = 0;
+ void *hdr_buf = NULL;
+ int flags, rc;
+
+ if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_QUERY_QP)
+ return -EBADRQC;
+
+ if (qp->has_sq) {
+ bool expdb = !!(qp->sq_cmb & IONIC_CMB_EXPDB);
+
+ attr->cap.max_send_sge =
+ ionic_v1_send_wqe_max_sge(qp->sq.stride_log2,
+ qp->sq_spec,
+ expdb);
+ attr->cap.max_inline_data =
+ ionic_v1_send_wqe_max_data(qp->sq.stride_log2, expdb);
+ }
+
+ if (qp->has_rq) {
+ attr->cap.max_recv_sge =
+ ionic_v1_recv_wqe_max_sge(qp->rq.stride_log2,
+ qp->rq_spec,
+ qp->rq_cmb & IONIC_CMB_EXPDB);
+ }
+
+ query_sqbuf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!query_sqbuf)
+ return -ENOMEM;
+
+ query_rqbuf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!query_rqbuf) {
+ rc = -ENOMEM;
+ goto err_rqbuf;
+ }
+
+ query_sqdma = dma_map_single(dev->lif_cfg.hwdev, query_sqbuf, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ rc = dma_mapping_error(dev->lif_cfg.hwdev, query_sqdma);
+ if (rc)
+ goto err_sqdma;
+
+ query_rqdma = dma_map_single(dev->lif_cfg.hwdev, query_rqbuf, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ rc = dma_mapping_error(dev->lif_cfg.hwdev, query_rqdma);
+ if (rc)
+ goto err_rqdma;
+
+ if (mask & IB_QP_AV) {
+ hdr_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!hdr_buf) {
+ rc = -ENOMEM;
+ goto err_hdrbuf;
+ }
+
+ hdr_dma = dma_map_single(dev->lif_cfg.hwdev, hdr_buf,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ rc = dma_mapping_error(dev->lif_cfg.hwdev, hdr_dma);
+ if (rc)
+ goto err_hdrdma;
+ }
+
+ wr.wqe.cmd.query_qp.sq_dma_addr = cpu_to_le64(query_sqdma);
+ wr.wqe.cmd.query_qp.rq_dma_addr = cpu_to_le64(query_rqdma);
+ wr.wqe.cmd.query_qp.hdr_dma_addr = cpu_to_le64(hdr_dma);
+ wr.wqe.cmd.query_qp.ah_id = cpu_to_le32(qp->ahid);
+
+ ionic_admin_post(dev, &wr);
+
+ rc = ionic_admin_wait(dev, &wr, 0);
+
+ if (rc)
+ goto err_hdrdma;
+
+ flags = be16_to_cpu(query_sqbuf->access_perms_flags |
+ query_rqbuf->access_perms_flags);
+
+ print_hex_dump_debug("sqbuf ", DUMP_PREFIX_OFFSET, 16, 1,
+ query_sqbuf, sizeof(*query_sqbuf), true);
+ print_hex_dump_debug("rqbuf ", DUMP_PREFIX_OFFSET, 16, 1,
+ query_rqbuf, sizeof(*query_rqbuf), true);
+ ibdev_dbg(&dev->ibdev, "query qp %u state_pmtu %#x flags %#x",
+ qp->qpid, query_rqbuf->state_pmtu, flags);
+
+ attr->qp_state = from_ionic_qp_state(query_rqbuf->state_pmtu >> 4);
+ attr->cur_qp_state = attr->qp_state;
+ attr->path_mtu = (query_rqbuf->state_pmtu & 0xf) - 7;
+ attr->path_mig_state = IB_MIG_MIGRATED;
+ attr->qkey = be32_to_cpu(query_sqbuf->qkey_dest_qpn);
+ attr->rq_psn = be32_to_cpu(query_sqbuf->rq_psn);
+ attr->sq_psn = be32_to_cpu(query_rqbuf->sq_psn);
+ attr->dest_qp_num = attr->qkey;
+ attr->qp_access_flags = from_ionic_qp_flags(flags);
+ attr->pkey_index = 0;
+ attr->alt_pkey_index = 0;
+ attr->en_sqd_async_notify = !!(flags & IONIC_QPF_SQD_NOTIFY);
+ attr->sq_draining = !!(flags & IONIC_QPF_SQ_DRAINING);
+ attr->max_rd_atomic = BIT(query_rqbuf->rrq_depth) - 1;
+ attr->max_dest_rd_atomic = BIT(query_rqbuf->rsq_depth) - 1;
+ attr->min_rnr_timer = query_sqbuf->rnr_timer;
+ attr->port_num = 0;
+ attr->timeout = query_sqbuf->retry_timeout;
+ attr->retry_cnt = query_rqbuf->retry_rnrtry & 0xf;
+ attr->rnr_retry = query_rqbuf->retry_rnrtry >> 4;
+ attr->alt_port_num = 0;
+ attr->alt_timeout = 0;
+ attr->rate_limit = be32_to_cpu(query_sqbuf->rate_limit_kbps);
+
+ if (mask & IB_QP_AV)
+ ionic_set_ah_attr(dev, &attr->ah_attr,
+ qp->hdr, qp->sgid_index);
+
+err_hdrdma:
+ if (mask & IB_QP_AV) {
+ dma_unmap_single(dev->lif_cfg.hwdev, hdr_dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ kfree(hdr_buf);
+ }
+err_hdrbuf:
+ dma_unmap_single(dev->lif_cfg.hwdev, query_rqdma, sizeof(*query_rqbuf),
+ DMA_FROM_DEVICE);
+err_rqdma:
+ dma_unmap_single(dev->lif_cfg.hwdev, query_sqdma, sizeof(*query_sqbuf),
+ DMA_FROM_DEVICE);
+err_sqdma:
+ kfree(query_rqbuf);
+err_rqbuf:
+ kfree(query_sqbuf);
+
+ return rc;
+}
+
+static int ionic_destroy_qp_cmd(struct ionic_ibdev *dev, u32 qpid)
+{
+ struct ionic_admin_wr wr = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
+ .wqe = {
+ .op = IONIC_V1_ADMIN_DESTROY_QP,
+ .len = cpu_to_le16(IONIC_ADMIN_DESTROY_QP_IN_V1_LEN),
+ .cmd.destroy_qp = {
+ .qp_id = cpu_to_le32(qpid),
+ },
+ }
+ };
+
+ if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_DESTROY_QP)
+ return -EBADRQC;
+
+ ionic_admin_post(dev, &wr);
+
+ return ionic_admin_wait(dev, &wr, IONIC_ADMIN_F_TEARDOWN);
+}
+
+static bool ionic_expdb_wqe_size_supported(struct ionic_ibdev *dev,
+ uint32_t wqe_size)
+{
+ switch (wqe_size) {
+ case 64: return dev->lif_cfg.expdb_mask & IONIC_EXPDB_64;
+ case 128: return dev->lif_cfg.expdb_mask & IONIC_EXPDB_128;
+ case 256: return dev->lif_cfg.expdb_mask & IONIC_EXPDB_256;
+ case 512: return dev->lif_cfg.expdb_mask & IONIC_EXPDB_512;
+ }
+
+ return false;
+}
+
+static void ionic_qp_sq_init_cmb(struct ionic_ibdev *dev,
+ struct ionic_qp *qp,
+ struct ib_udata *udata,
+ int max_data)
+{
+ u8 expdb_stride_log2 = 0;
+ bool expdb;
+ int rc;
+
+ if (!(qp->sq_cmb & IONIC_CMB_ENABLE))
+ goto not_in_cmb;
+
+ if (qp->sq_cmb & ~IONIC_CMB_SUPPORTED) {
+ if (qp->sq_cmb & IONIC_CMB_REQUIRE)
+ goto not_in_cmb;
+
+ qp->sq_cmb &= IONIC_CMB_SUPPORTED;
+ }
+
+ if ((qp->sq_cmb & IONIC_CMB_EXPDB) && !dev->lif_cfg.sq_expdb) {
+ if (qp->sq_cmb & IONIC_CMB_REQUIRE)
+ goto not_in_cmb;
+
+ qp->sq_cmb &= ~IONIC_CMB_EXPDB;
+ }
+
+ qp->sq_cmb_order = order_base_2(qp->sq.size / PAGE_SIZE);
+
+ if (qp->sq_cmb_order >= IONIC_SQCMB_ORDER)
+ goto not_in_cmb;
+
+ if (qp->sq_cmb & IONIC_CMB_EXPDB)
+ expdb_stride_log2 = qp->sq.stride_log2;
+
+ rc = ionic_get_cmb(dev->lif_cfg.lif, &qp->sq_cmb_pgid,
+ &qp->sq_cmb_addr, qp->sq_cmb_order,
+ expdb_stride_log2, &expdb);
+ if (rc)
+ goto not_in_cmb;
+
+ if ((qp->sq_cmb & IONIC_CMB_EXPDB) && !expdb) {
+ if (qp->sq_cmb & IONIC_CMB_REQUIRE)
+ goto err_map;
+
+ qp->sq_cmb &= ~IONIC_CMB_EXPDB;
+ }
+
+ return;
+
+err_map:
+ ionic_put_cmb(dev->lif_cfg.lif, qp->sq_cmb_pgid, qp->sq_cmb_order);
+not_in_cmb:
+ if (qp->sq_cmb & IONIC_CMB_REQUIRE)
+ ibdev_dbg(&dev->ibdev, "could not place sq in cmb as required\n");
+
+ qp->sq_cmb = 0;
+ qp->sq_cmb_order = IONIC_RES_INVALID;
+ qp->sq_cmb_pgid = 0;
+ qp->sq_cmb_addr = 0;
+}
+
+static void ionic_qp_sq_destroy_cmb(struct ionic_ibdev *dev,
+ struct ionic_ctx *ctx,
+ struct ionic_qp *qp)
+{
+ if (!(qp->sq_cmb & IONIC_CMB_ENABLE))
+ return;
+
+ if (ctx)
+ rdma_user_mmap_entry_remove(qp->mmap_sq_cmb);
+
+ ionic_put_cmb(dev->lif_cfg.lif, qp->sq_cmb_pgid, qp->sq_cmb_order);
+}
+
+static int ionic_qp_sq_init(struct ionic_ibdev *dev, struct ionic_ctx *ctx,
+ struct ionic_qp *qp, struct ionic_qdesc *sq,
+ struct ionic_tbl_buf *buf, int max_wr, int max_sge,
+ int max_data, int sq_spec, struct ib_udata *udata)
+{
+ u32 wqe_size;
+ int rc = 0;
+
+ qp->sq_msn_prod = 0;
+ qp->sq_msn_cons = 0;
+
+ if (!qp->has_sq) {
+ if (buf) {
+ buf->tbl_buf = NULL;
+ buf->tbl_limit = 0;
+ buf->tbl_pages = 0;
+ }
+ if (udata)
+ rc = ionic_validate_qdesc_zero(sq);
+
+ return rc;
+ }
+
+ rc = -EINVAL;
+
+ if (max_wr < 0 || max_wr > 0xffff)
+ return rc;
+
+ if (max_sge < 1)
+ return rc;
+
+ if (max_sge > min(ionic_v1_send_wqe_max_sge(dev->lif_cfg.max_stride, 0,
+ qp->sq_cmb &
+ IONIC_CMB_EXPDB),
+ IONIC_SPEC_HIGH))
+ return rc;
+
+ if (max_data < 0)
+ return rc;
+
+ if (max_data > ionic_v1_send_wqe_max_data(dev->lif_cfg.max_stride,
+ qp->sq_cmb & IONIC_CMB_EXPDB))
+ return rc;
+
+ if (udata) {
+ rc = ionic_validate_qdesc(sq);
+ if (rc)
+ return rc;
+
+ qp->sq_spec = sq_spec;
+
+ qp->sq.ptr = NULL;
+ qp->sq.size = sq->size;
+ qp->sq.mask = sq->mask;
+ qp->sq.depth_log2 = sq->depth_log2;
+ qp->sq.stride_log2 = sq->stride_log2;
+
+ qp->sq_meta = NULL;
+ qp->sq_msn_idx = NULL;
+
+ qp->sq_umem = ib_umem_get(&dev->ibdev, sq->addr, sq->size, 0);
+ if (IS_ERR(qp->sq_umem))
+ return PTR_ERR(qp->sq_umem);
+ } else {
+ qp->sq_umem = NULL;
+
+ qp->sq_spec = ionic_v1_use_spec_sge(max_sge, sq_spec);
+ if (sq_spec && !qp->sq_spec)
+ ibdev_dbg(&dev->ibdev,
+ "init sq: max_sge %u disables spec\n",
+ max_sge);
+
+ if (qp->sq_cmb & IONIC_CMB_EXPDB) {
+ wqe_size = ionic_v1_send_wqe_min_size(max_sge, max_data,
+ qp->sq_spec,
+ true);
+
+ if (!ionic_expdb_wqe_size_supported(dev, wqe_size))
+ qp->sq_cmb &= ~IONIC_CMB_EXPDB;
+ }
+
+ if (!(qp->sq_cmb & IONIC_CMB_EXPDB))
+ wqe_size = ionic_v1_send_wqe_min_size(max_sge, max_data,
+ qp->sq_spec,
+ false);
+
+ rc = ionic_queue_init(&qp->sq, dev->lif_cfg.hwdev,
+ max_wr, wqe_size);
+ if (rc)
+ return rc;
+
+ ionic_queue_dbell_init(&qp->sq, qp->qpid);
+
+ qp->sq_meta = kmalloc_array((u32)qp->sq.mask + 1,
+ sizeof(*qp->sq_meta),
+ GFP_KERNEL);
+ if (!qp->sq_meta) {
+ rc = -ENOMEM;
+ goto err_sq_meta;
+ }
+
+ qp->sq_msn_idx = kmalloc_array((u32)qp->sq.mask + 1,
+ sizeof(*qp->sq_msn_idx),
+ GFP_KERNEL);
+ if (!qp->sq_msn_idx) {
+ rc = -ENOMEM;
+ goto err_sq_msn;
+ }
+ }
+
+ ionic_qp_sq_init_cmb(dev, qp, udata, max_data);
+
+ if (qp->sq_cmb & IONIC_CMB_ENABLE)
+ rc = ionic_pgtbl_init(dev, buf, NULL,
+ (u64)qp->sq_cmb_pgid << PAGE_SHIFT,
+ 1, PAGE_SIZE);
+ else
+ rc = ionic_pgtbl_init(dev, buf,
+ qp->sq_umem, qp->sq.dma, 1, PAGE_SIZE);
+ if (rc)
+ goto err_sq_tbl;
+
+ return 0;
+
+err_sq_tbl:
+ ionic_qp_sq_destroy_cmb(dev, ctx, qp);
+ kfree(qp->sq_msn_idx);
+err_sq_msn:
+ kfree(qp->sq_meta);
+err_sq_meta:
+ if (qp->sq_umem)
+ ib_umem_release(qp->sq_umem);
+ else
+ ionic_queue_destroy(&qp->sq, dev->lif_cfg.hwdev);
+ return rc;
+}
+
+static void ionic_qp_sq_destroy(struct ionic_ibdev *dev,
+ struct ionic_ctx *ctx,
+ struct ionic_qp *qp)
+{
+ if (!qp->has_sq)
+ return;
+
+ ionic_qp_sq_destroy_cmb(dev, ctx, qp);
+
+ kfree(qp->sq_msn_idx);
+ kfree(qp->sq_meta);
+
+ if (qp->sq_umem)
+ ib_umem_release(qp->sq_umem);
+ else
+ ionic_queue_destroy(&qp->sq, dev->lif_cfg.hwdev);
+}
+
+static void ionic_qp_rq_init_cmb(struct ionic_ibdev *dev,
+ struct ionic_qp *qp,
+ struct ib_udata *udata)
+{
+ u8 expdb_stride_log2 = 0;
+ bool expdb;
+ int rc;
+
+ if (!(qp->rq_cmb & IONIC_CMB_ENABLE))
+ goto not_in_cmb;
+
+ if (qp->rq_cmb & ~IONIC_CMB_SUPPORTED) {
+ if (qp->rq_cmb & IONIC_CMB_REQUIRE)
+ goto not_in_cmb;
+
+ qp->rq_cmb &= IONIC_CMB_SUPPORTED;
+ }
+
+ if ((qp->rq_cmb & IONIC_CMB_EXPDB) && !dev->lif_cfg.rq_expdb) {
+ if (qp->rq_cmb & IONIC_CMB_REQUIRE)
+ goto not_in_cmb;
+
+ qp->rq_cmb &= ~IONIC_CMB_EXPDB;
+ }
+
+ qp->rq_cmb_order = order_base_2(qp->rq.size / PAGE_SIZE);
+
+ if (qp->rq_cmb_order >= IONIC_RQCMB_ORDER)
+ goto not_in_cmb;
+
+ if (qp->rq_cmb & IONIC_CMB_EXPDB)
+ expdb_stride_log2 = qp->rq.stride_log2;
+
+ rc = ionic_get_cmb(dev->lif_cfg.lif, &qp->rq_cmb_pgid,
+ &qp->rq_cmb_addr, qp->rq_cmb_order,
+ expdb_stride_log2, &expdb);
+ if (rc)
+ goto not_in_cmb;
+
+ if ((qp->rq_cmb & IONIC_CMB_EXPDB) && !expdb) {
+ if (qp->rq_cmb & IONIC_CMB_REQUIRE)
+ goto err_map;
+
+ qp->rq_cmb &= ~IONIC_CMB_EXPDB;
+ }
+
+ return;
+
+err_map:
+ ionic_put_cmb(dev->lif_cfg.lif, qp->rq_cmb_pgid, qp->rq_cmb_order);
+not_in_cmb:
+ if (qp->rq_cmb & IONIC_CMB_REQUIRE)
+ ibdev_dbg(&dev->ibdev, "could not place rq in cmb as required\n");
+
+ qp->rq_cmb = 0;
+ qp->rq_cmb_order = IONIC_RES_INVALID;
+ qp->rq_cmb_pgid = 0;
+ qp->rq_cmb_addr = 0;
+}
+
+static void ionic_qp_rq_destroy_cmb(struct ionic_ibdev *dev,
+ struct ionic_ctx *ctx,
+ struct ionic_qp *qp)
+{
+ if (!(qp->rq_cmb & IONIC_CMB_ENABLE))
+ return;
+
+ if (ctx)
+ rdma_user_mmap_entry_remove(qp->mmap_rq_cmb);
+
+ ionic_put_cmb(dev->lif_cfg.lif, qp->rq_cmb_pgid, qp->rq_cmb_order);
+}
+
+static int ionic_qp_rq_init(struct ionic_ibdev *dev, struct ionic_ctx *ctx,
+ struct ionic_qp *qp, struct ionic_qdesc *rq,
+ struct ionic_tbl_buf *buf, int max_wr, int max_sge,
+ int rq_spec, struct ib_udata *udata)
+{
+ int rc = 0, i;
+ u32 wqe_size;
+
+ if (!qp->has_rq) {
+ if (buf) {
+ buf->tbl_buf = NULL;
+ buf->tbl_limit = 0;
+ buf->tbl_pages = 0;
+ }
+ if (udata)
+ rc = ionic_validate_qdesc_zero(rq);
+
+ return rc;
+ }
+
+ rc = -EINVAL;
+
+ if (max_wr < 0 || max_wr > 0xffff)
+ return rc;
+
+ if (max_sge < 1)
+ return rc;
+
+ if (max_sge > min(ionic_v1_recv_wqe_max_sge(dev->lif_cfg.max_stride, 0, false),
+ IONIC_SPEC_HIGH))
+ return rc;
+
+ if (udata) {
+ rc = ionic_validate_qdesc(rq);
+ if (rc)
+ return rc;
+
+ qp->rq_spec = rq_spec;
+
+ qp->rq.ptr = NULL;
+ qp->rq.size = rq->size;
+ qp->rq.mask = rq->mask;
+ qp->rq.depth_log2 = rq->depth_log2;
+ qp->rq.stride_log2 = rq->stride_log2;
+
+ qp->rq_meta = NULL;
+
+ qp->rq_umem = ib_umem_get(&dev->ibdev, rq->addr, rq->size, 0);
+ if (IS_ERR(qp->rq_umem))
+ return PTR_ERR(qp->rq_umem);
+ } else {
+ qp->rq_umem = NULL;
+
+ qp->rq_spec = ionic_v1_use_spec_sge(max_sge, rq_spec);
+ if (rq_spec && !qp->rq_spec)
+ ibdev_dbg(&dev->ibdev,
+ "init rq: max_sge %u disables spec\n",
+ max_sge);
+
+ if (qp->rq_cmb & IONIC_CMB_EXPDB) {
+ wqe_size = ionic_v1_recv_wqe_min_size(max_sge,
+ qp->rq_spec,
+ true);
+
+ if (!ionic_expdb_wqe_size_supported(dev, wqe_size))
+ qp->rq_cmb &= ~IONIC_CMB_EXPDB;
+ }
+
+ if (!(qp->rq_cmb & IONIC_CMB_EXPDB))
+ wqe_size = ionic_v1_recv_wqe_min_size(max_sge,
+ qp->rq_spec,
+ false);
+
+ rc = ionic_queue_init(&qp->rq, dev->lif_cfg.hwdev,
+ max_wr, wqe_size);
+ if (rc)
+ return rc;
+
+ ionic_queue_dbell_init(&qp->rq, qp->qpid);
+
+ qp->rq_meta = kmalloc_array((u32)qp->rq.mask + 1,
+ sizeof(*qp->rq_meta),
+ GFP_KERNEL);
+ if (!qp->rq_meta) {
+ rc = -ENOMEM;
+ goto err_rq_meta;
+ }
+
+ for (i = 0; i < qp->rq.mask; ++i)
+ qp->rq_meta[i].next = &qp->rq_meta[i + 1];
+ qp->rq_meta[i].next = IONIC_META_LAST;
+ qp->rq_meta_head = &qp->rq_meta[0];
+ }
+
+ ionic_qp_rq_init_cmb(dev, qp, udata);
+
+ if (qp->rq_cmb & IONIC_CMB_ENABLE)
+ rc = ionic_pgtbl_init(dev, buf, NULL,
+ (u64)qp->rq_cmb_pgid << PAGE_SHIFT,
+ 1, PAGE_SIZE);
+ else
+ rc = ionic_pgtbl_init(dev, buf,
+ qp->rq_umem, qp->rq.dma, 1, PAGE_SIZE);
+ if (rc)
+ goto err_rq_tbl;
+
+ return 0;
+
+err_rq_tbl:
+ ionic_qp_rq_destroy_cmb(dev, ctx, qp);
+ kfree(qp->rq_meta);
+err_rq_meta:
+ if (qp->rq_umem)
+ ib_umem_release(qp->rq_umem);
+ else
+ ionic_queue_destroy(&qp->rq, dev->lif_cfg.hwdev);
+ return rc;
+}
+
+static void ionic_qp_rq_destroy(struct ionic_ibdev *dev,
+ struct ionic_ctx *ctx,
+ struct ionic_qp *qp)
+{
+ if (!qp->has_rq)
+ return;
+
+ ionic_qp_rq_destroy_cmb(dev, ctx, qp);
+
+ kfree(qp->rq_meta);
+
+ if (qp->rq_umem)
+ ib_umem_release(qp->rq_umem);
+ else
+ ionic_queue_destroy(&qp->rq, dev->lif_cfg.hwdev);
+}
+
+int ionic_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr,
+ struct ib_udata *udata)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibqp->device);
+ struct ionic_tbl_buf sq_buf = {}, rq_buf = {};
+ struct ionic_pd *pd = to_ionic_pd(ibqp->pd);
+ struct ionic_qp *qp = to_ionic_qp(ibqp);
+ struct ionic_ctx *ctx =
+ rdma_udata_to_drv_context(udata, struct ionic_ctx, ibctx);
+ struct ionic_qp_resp resp = {};
+ struct ionic_qp_req req = {};
+ struct ionic_cq *cq;
+ u8 udma_mask;
+ void *entry;
+ int rc;
+
+ if (udata) {
+ rc = ib_copy_from_udata(&req, udata, sizeof(req));
+ if (rc)
+ return rc;
+ } else {
+ req.sq_spec = IONIC_SPEC_HIGH;
+ req.rq_spec = IONIC_SPEC_HIGH;
+ }
+
+ if (attr->qp_type == IB_QPT_SMI || attr->qp_type > IB_QPT_UD)
+ return -EOPNOTSUPP;
+
+ qp->state = IB_QPS_RESET;
+
+ INIT_LIST_HEAD(&qp->cq_poll_sq);
+ INIT_LIST_HEAD(&qp->cq_flush_sq);
+ INIT_LIST_HEAD(&qp->cq_flush_rq);
+
+ spin_lock_init(&qp->sq_lock);
+ spin_lock_init(&qp->rq_lock);
+
+ qp->has_sq = 1;
+ qp->has_rq = 1;
+
+ if (attr->qp_type == IB_QPT_GSI) {
+ rc = ionic_get_gsi_qpid(dev, &qp->qpid);
+ } else {
+ udma_mask = BIT(dev->lif_cfg.udma_count) - 1;
+
+ if (qp->has_sq)
+ udma_mask &= to_ionic_vcq(attr->send_cq)->udma_mask;
+
+ if (qp->has_rq)
+ udma_mask &= to_ionic_vcq(attr->recv_cq)->udma_mask;
+
+ if (udata && req.udma_mask)
+ udma_mask &= req.udma_mask;
+
+ if (!udma_mask)
+ return -EINVAL;
+
+ rc = ionic_get_qpid(dev, &qp->qpid, &qp->udma_idx, udma_mask);
+ }
+ if (rc)
+ return rc;
+
+ qp->sig_all = attr->sq_sig_type == IB_SIGNAL_ALL_WR;
+ qp->has_ah = attr->qp_type == IB_QPT_RC;
+
+ if (qp->has_ah) {
+ qp->hdr = kzalloc(sizeof(*qp->hdr), GFP_KERNEL);
+ if (!qp->hdr) {
+ rc = -ENOMEM;
+ goto err_ah_alloc;
+ }
+
+ rc = ionic_get_ahid(dev, &qp->ahid);
+ if (rc)
+ goto err_ahid;
+ }
+
+ if (udata) {
+ if (req.rq_cmb & IONIC_CMB_ENABLE)
+ qp->rq_cmb = req.rq_cmb;
+
+ if (req.sq_cmb & IONIC_CMB_ENABLE)
+ qp->sq_cmb = req.sq_cmb;
+ }
+
+ rc = ionic_qp_sq_init(dev, ctx, qp, &req.sq, &sq_buf,
+ attr->cap.max_send_wr, attr->cap.max_send_sge,
+ attr->cap.max_inline_data, req.sq_spec, udata);
+ if (rc)
+ goto err_sq;
+
+ rc = ionic_qp_rq_init(dev, ctx, qp, &req.rq, &rq_buf,
+ attr->cap.max_recv_wr, attr->cap.max_recv_sge,
+ req.rq_spec, udata);
+ if (rc)
+ goto err_rq;
+
+ rc = ionic_create_qp_cmd(dev, pd,
+ to_ionic_vcq_cq(attr->send_cq, qp->udma_idx),
+ to_ionic_vcq_cq(attr->recv_cq, qp->udma_idx),
+ qp, &sq_buf, &rq_buf, attr);
+ if (rc)
+ goto err_cmd;
+
+ if (udata) {
+ resp.qpid = qp->qpid;
+ resp.udma_idx = qp->udma_idx;
+
+ if (qp->sq_cmb & IONIC_CMB_ENABLE) {
+ bool wc;
+
+ if ((qp->sq_cmb & (IONIC_CMB_WC | IONIC_CMB_UC)) ==
+ (IONIC_CMB_WC | IONIC_CMB_UC)) {
+ ibdev_dbg(&dev->ibdev,
+ "Both sq_cmb flags IONIC_CMB_WC and IONIC_CMB_UC are set, using default driver mapping\n");
+ qp->sq_cmb &= ~(IONIC_CMB_WC | IONIC_CMB_UC);
+ }
+
+ wc = (qp->sq_cmb & (IONIC_CMB_WC | IONIC_CMB_UC))
+ != IONIC_CMB_UC;
+
+ /* let userspace know the mapping */
+ if (wc)
+ qp->sq_cmb |= IONIC_CMB_WC;
+ else
+ qp->sq_cmb |= IONIC_CMB_UC;
+
+ qp->mmap_sq_cmb =
+ ionic_mmap_entry_insert(ctx,
+ qp->sq.size,
+ PHYS_PFN(qp->sq_cmb_addr),
+ wc ? IONIC_MMAP_WC : 0,
+ &resp.sq_cmb_offset);
+ if (!qp->mmap_sq_cmb) {
+ rc = -ENOMEM;
+ goto err_mmap_sq;
+ }
+
+ resp.sq_cmb = qp->sq_cmb;
+ }
+
+ if (qp->rq_cmb & IONIC_CMB_ENABLE) {
+ bool wc;
+
+ if ((qp->rq_cmb & (IONIC_CMB_WC | IONIC_CMB_UC)) ==
+ (IONIC_CMB_WC | IONIC_CMB_UC)) {
+ ibdev_dbg(&dev->ibdev,
+ "Both rq_cmb flags IONIC_CMB_WC and IONIC_CMB_UC are set, using default driver mapping\n");
+ qp->rq_cmb &= ~(IONIC_CMB_WC | IONIC_CMB_UC);
+ }
+
+ if (qp->rq_cmb & IONIC_CMB_EXPDB)
+ wc = (qp->rq_cmb & (IONIC_CMB_WC | IONIC_CMB_UC))
+ == IONIC_CMB_WC;
+ else
+ wc = (qp->rq_cmb & (IONIC_CMB_WC | IONIC_CMB_UC))
+ != IONIC_CMB_UC;
+
+ /* let userspace know the mapping */
+ if (wc)
+ qp->rq_cmb |= IONIC_CMB_WC;
+ else
+ qp->rq_cmb |= IONIC_CMB_UC;
+
+ qp->mmap_rq_cmb =
+ ionic_mmap_entry_insert(ctx,
+ qp->rq.size,
+ PHYS_PFN(qp->rq_cmb_addr),
+ wc ? IONIC_MMAP_WC : 0,
+ &resp.rq_cmb_offset);
+ if (!qp->mmap_rq_cmb) {
+ rc = -ENOMEM;
+ goto err_mmap_rq;
+ }
+
+ resp.rq_cmb = qp->rq_cmb;
+ }
+
+ rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
+ if (rc)
+ goto err_resp;
+ }
+
+ ionic_pgtbl_unbuf(dev, &rq_buf);
+ ionic_pgtbl_unbuf(dev, &sq_buf);
+
+ qp->ibqp.qp_num = qp->qpid;
+
+ init_completion(&qp->qp_rel_comp);
+ kref_init(&qp->qp_kref);
+
+ entry = xa_store_irq(&dev->qp_tbl, qp->qpid, qp, GFP_KERNEL);
+ if (entry) {
+ if (!xa_is_err(entry))
+ rc = -EINVAL;
+ else
+ rc = xa_err(entry);
+
+ goto err_resp;
+ }
+
+ if (qp->has_sq) {
+ cq = to_ionic_vcq_cq(attr->send_cq, qp->udma_idx);
+
+ attr->cap.max_send_wr = qp->sq.mask;
+ attr->cap.max_send_sge =
+ ionic_v1_send_wqe_max_sge(qp->sq.stride_log2,
+ qp->sq_spec,
+ qp->sq_cmb & IONIC_CMB_EXPDB);
+ attr->cap.max_inline_data =
+ ionic_v1_send_wqe_max_data(qp->sq.stride_log2,
+ qp->sq_cmb &
+ IONIC_CMB_EXPDB);
+ qp->sq_cqid = cq->cqid;
+ }
+
+ if (qp->has_rq) {
+ cq = to_ionic_vcq_cq(attr->recv_cq, qp->udma_idx);
+
+ attr->cap.max_recv_wr = qp->rq.mask;
+ attr->cap.max_recv_sge =
+ ionic_v1_recv_wqe_max_sge(qp->rq.stride_log2,
+ qp->rq_spec,
+ qp->rq_cmb & IONIC_CMB_EXPDB);
+ qp->rq_cqid = cq->cqid;
+ }
+
+ return 0;
+
+err_resp:
+ if (udata && (qp->rq_cmb & IONIC_CMB_ENABLE))
+ rdma_user_mmap_entry_remove(qp->mmap_rq_cmb);
+err_mmap_rq:
+ if (udata && (qp->sq_cmb & IONIC_CMB_ENABLE))
+ rdma_user_mmap_entry_remove(qp->mmap_sq_cmb);
+err_mmap_sq:
+ ionic_destroy_qp_cmd(dev, qp->qpid);
+err_cmd:
+ ionic_pgtbl_unbuf(dev, &rq_buf);
+ ionic_qp_rq_destroy(dev, ctx, qp);
+err_rq:
+ ionic_pgtbl_unbuf(dev, &sq_buf);
+ ionic_qp_sq_destroy(dev, ctx, qp);
+err_sq:
+ if (qp->has_ah)
+ ionic_put_ahid(dev, qp->ahid);
+err_ahid:
+ kfree(qp->hdr);
+err_ah_alloc:
+ ionic_put_qpid(dev, qp->qpid);
+ return rc;
+}
+
+void ionic_notify_flush_cq(struct ionic_cq *cq)
+{
+ if (cq->flush && cq->vcq->ibcq.comp_handler)
+ cq->vcq->ibcq.comp_handler(&cq->vcq->ibcq,
+ cq->vcq->ibcq.cq_context);
+}
+
+static void ionic_notify_qp_cqs(struct ionic_ibdev *dev, struct ionic_qp *qp)
+{
+ if (qp->ibqp.send_cq)
+ ionic_notify_flush_cq(to_ionic_vcq_cq(qp->ibqp.send_cq,
+ qp->udma_idx));
+ if (qp->ibqp.recv_cq && qp->ibqp.recv_cq != qp->ibqp.send_cq)
+ ionic_notify_flush_cq(to_ionic_vcq_cq(qp->ibqp.recv_cq,
+ qp->udma_idx));
+}
+
+void ionic_flush_qp(struct ionic_ibdev *dev, struct ionic_qp *qp)
+{
+ unsigned long irqflags;
+ struct ionic_cq *cq;
+
+ if (qp->ibqp.send_cq) {
+ cq = to_ionic_vcq_cq(qp->ibqp.send_cq, qp->udma_idx);
+
+ /* Hold the CQ lock and QP sq_lock to set up flush */
+ spin_lock_irqsave(&cq->lock, irqflags);
+ spin_lock(&qp->sq_lock);
+ qp->sq_flush = true;
+ if (!ionic_queue_empty(&qp->sq)) {
+ cq->flush = true;
+ list_move_tail(&qp->cq_flush_sq, &cq->flush_sq);
+ }
+ spin_unlock(&qp->sq_lock);
+ spin_unlock_irqrestore(&cq->lock, irqflags);
+ }
+
+ if (qp->ibqp.recv_cq) {
+ cq = to_ionic_vcq_cq(qp->ibqp.recv_cq, qp->udma_idx);
+
+ /* Hold the CQ lock and QP rq_lock to set up flush */
+ spin_lock_irqsave(&cq->lock, irqflags);
+ spin_lock(&qp->rq_lock);
+ qp->rq_flush = true;
+ if (!ionic_queue_empty(&qp->rq)) {
+ cq->flush = true;
+ list_move_tail(&qp->cq_flush_rq, &cq->flush_rq);
+ }
+ spin_unlock(&qp->rq_lock);
+ spin_unlock_irqrestore(&cq->lock, irqflags);
+ }
+}
+
+static void ionic_clean_cq(struct ionic_cq *cq, u32 qpid)
+{
+ struct ionic_v1_cqe *qcqe;
+ int prod, qtf, qid, type;
+ bool color;
+
+ if (!cq->q.ptr)
+ return;
+
+ color = cq->color;
+ prod = cq->q.prod;
+ qcqe = ionic_queue_at(&cq->q, prod);
+
+ while (color == ionic_v1_cqe_color(qcqe)) {
+ qtf = ionic_v1_cqe_qtf(qcqe);
+ qid = ionic_v1_cqe_qtf_qid(qtf);
+ type = ionic_v1_cqe_qtf_type(qtf);
+
+ if (qid == qpid && type != IONIC_V1_CQE_TYPE_ADMIN)
+ ionic_v1_cqe_clean(qcqe);
+
+ prod = ionic_queue_next(&cq->q, prod);
+ qcqe = ionic_queue_at(&cq->q, prod);
+ color = ionic_color_wrap(prod, color);
+ }
+}
+
+static void ionic_reset_qp(struct ionic_ibdev *dev, struct ionic_qp *qp)
+{
+ unsigned long irqflags;
+ struct ionic_cq *cq;
+ int i;
+
+ local_irq_save(irqflags);
+
+ if (qp->ibqp.send_cq) {
+ cq = to_ionic_vcq_cq(qp->ibqp.send_cq, qp->udma_idx);
+ spin_lock(&cq->lock);
+ ionic_clean_cq(cq, qp->qpid);
+ spin_unlock(&cq->lock);
+ }
+
+ if (qp->ibqp.recv_cq) {
+ cq = to_ionic_vcq_cq(qp->ibqp.recv_cq, qp->udma_idx);
+ spin_lock(&cq->lock);
+ ionic_clean_cq(cq, qp->qpid);
+ spin_unlock(&cq->lock);
+ }
+
+ if (qp->has_sq) {
+ spin_lock(&qp->sq_lock);
+ qp->sq_flush = false;
+ qp->sq_flush_rcvd = false;
+ qp->sq_msn_prod = 0;
+ qp->sq_msn_cons = 0;
+ qp->sq.prod = 0;
+ qp->sq.cons = 0;
+ spin_unlock(&qp->sq_lock);
+ }
+
+ if (qp->has_rq) {
+ spin_lock(&qp->rq_lock);
+ qp->rq_flush = false;
+ qp->rq.prod = 0;
+ qp->rq.cons = 0;
+ if (qp->rq_meta) {
+ for (i = 0; i < qp->rq.mask; ++i)
+ qp->rq_meta[i].next = &qp->rq_meta[i + 1];
+ qp->rq_meta[i].next = IONIC_META_LAST;
+ }
+ qp->rq_meta_head = &qp->rq_meta[0];
+ spin_unlock(&qp->rq_lock);
+ }
+
+ local_irq_restore(irqflags);
+}
+
+static bool ionic_qp_cur_state_is_ok(enum ib_qp_state q_state,
+ enum ib_qp_state attr_state)
+{
+ if (q_state == attr_state)
+ return true;
+
+ if (attr_state == IB_QPS_ERR)
+ return true;
+
+ if (attr_state == IB_QPS_SQE)
+ return q_state == IB_QPS_RTS || q_state == IB_QPS_SQD;
+
+ return false;
+}
+
+static int ionic_check_modify_qp(struct ionic_qp *qp, struct ib_qp_attr *attr,
+ int mask)
+{
+ enum ib_qp_state cur_state = (mask & IB_QP_CUR_STATE) ?
+ attr->cur_qp_state : qp->state;
+ enum ib_qp_state next_state = (mask & IB_QP_STATE) ?
+ attr->qp_state : cur_state;
+
+ if ((mask & IB_QP_CUR_STATE) &&
+ !ionic_qp_cur_state_is_ok(qp->state, attr->cur_qp_state))
+ return -EINVAL;
+
+ if (!ib_modify_qp_is_ok(cur_state, next_state, qp->ibqp.qp_type, mask))
+ return -EINVAL;
+
+ /* unprivileged qp not allowed privileged qkey */
+ if ((mask & IB_QP_QKEY) && (attr->qkey & 0x80000000) &&
+ qp->ibqp.uobject)
+ return -EPERM;
+
+ return 0;
+}
+
+int ionic_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int mask,
+ struct ib_udata *udata)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibqp->device);
+ struct ionic_pd *pd = to_ionic_pd(ibqp->pd);
+ struct ionic_qp *qp = to_ionic_qp(ibqp);
+ int rc;
+
+ rc = ionic_check_modify_qp(qp, attr, mask);
+ if (rc)
+ return rc;
+
+ if (mask & IB_QP_CAP)
+ return -EINVAL;
+
+ rc = ionic_modify_qp_cmd(dev, pd, qp, attr, mask);
+ if (rc)
+ return rc;
+
+ if (mask & IB_QP_STATE) {
+ qp->state = attr->qp_state;
+
+ if (attr->qp_state == IB_QPS_ERR) {
+ ionic_flush_qp(dev, qp);
+ ionic_notify_qp_cqs(dev, qp);
+ } else if (attr->qp_state == IB_QPS_RESET) {
+ ionic_reset_qp(dev, qp);
+ }
+ }
+
+ return 0;
+}
+
+int ionic_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int mask, struct ib_qp_init_attr *init_attr)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibqp->device);
+ struct ionic_qp *qp = to_ionic_qp(ibqp);
+ int rc;
+
+ memset(attr, 0, sizeof(*attr));
+ memset(init_attr, 0, sizeof(*init_attr));
+
+ rc = ionic_query_qp_cmd(dev, qp, attr, mask);
+ if (rc)
+ return rc;
+
+ if (qp->has_sq)
+ attr->cap.max_send_wr = qp->sq.mask;
+
+ if (qp->has_rq)
+ attr->cap.max_recv_wr = qp->rq.mask;
+
+ init_attr->event_handler = ibqp->event_handler;
+ init_attr->qp_context = ibqp->qp_context;
+ init_attr->send_cq = ibqp->send_cq;
+ init_attr->recv_cq = ibqp->recv_cq;
+ init_attr->srq = ibqp->srq;
+ init_attr->xrcd = ibqp->xrcd;
+ init_attr->cap = attr->cap;
+ init_attr->sq_sig_type = qp->sig_all ?
+ IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
+ init_attr->qp_type = ibqp->qp_type;
+ init_attr->create_flags = 0;
+ init_attr->port_num = 0;
+ init_attr->rwq_ind_tbl = ibqp->rwq_ind_tbl;
+ init_attr->source_qpn = 0;
+
+ return rc;
+}
+
+int ionic_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
+{
+ struct ionic_ctx *ctx =
+ rdma_udata_to_drv_context(udata, struct ionic_ctx, ibctx);
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibqp->device);
+ struct ionic_qp *qp = to_ionic_qp(ibqp);
+ unsigned long irqflags;
+ struct ionic_cq *cq;
+ int rc;
+
+ rc = ionic_destroy_qp_cmd(dev, qp->qpid);
+ if (rc)
+ return rc;
+
+ xa_erase_irq(&dev->qp_tbl, qp->qpid);
+
+ kref_put(&qp->qp_kref, ionic_qp_complete);
+ wait_for_completion(&qp->qp_rel_comp);
+
+ if (qp->ibqp.send_cq) {
+ cq = to_ionic_vcq_cq(qp->ibqp.send_cq, qp->udma_idx);
+ spin_lock_irqsave(&cq->lock, irqflags);
+ ionic_clean_cq(cq, qp->qpid);
+ list_del(&qp->cq_poll_sq);
+ list_del(&qp->cq_flush_sq);
+ spin_unlock_irqrestore(&cq->lock, irqflags);
+ }
+
+ if (qp->ibqp.recv_cq) {
+ cq = to_ionic_vcq_cq(qp->ibqp.recv_cq, qp->udma_idx);
+ spin_lock_irqsave(&cq->lock, irqflags);
+ ionic_clean_cq(cq, qp->qpid);
+ list_del(&qp->cq_flush_rq);
+ spin_unlock_irqrestore(&cq->lock, irqflags);
+ }
+
+ ionic_qp_rq_destroy(dev, ctx, qp);
+ ionic_qp_sq_destroy(dev, ctx, qp);
+ if (qp->has_ah) {
+ ionic_put_ahid(dev, qp->ahid);
+ kfree(qp->hdr);
+ }
+ ionic_put_qpid(dev, qp->qpid);
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/ionic/ionic_datapath.c b/drivers/infiniband/hw/ionic/ionic_datapath.c
new file mode 100644
index 000000000000..aa2944887f23
--- /dev/null
+++ b/drivers/infiniband/hw/ionic/ionic_datapath.c
@@ -0,0 +1,1399 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_user_verbs.h>
+
+#include "ionic_fw.h"
+#include "ionic_ibdev.h"
+
+#define IONIC_OP(version, opname) \
+ ((version) < 2 ? IONIC_V1_OP_##opname : IONIC_V2_OP_##opname)
+
+static bool ionic_next_cqe(struct ionic_ibdev *dev, struct ionic_cq *cq,
+ struct ionic_v1_cqe **cqe)
+{
+ struct ionic_v1_cqe *qcqe = ionic_queue_at_prod(&cq->q);
+
+ if (unlikely(cq->color != ionic_v1_cqe_color(qcqe)))
+ return false;
+
+ /* Prevent out-of-order reads of the CQE */
+ dma_rmb();
+
+ *cqe = qcqe;
+
+ return true;
+}
+
+static int ionic_flush_recv(struct ionic_qp *qp, struct ib_wc *wc)
+{
+ struct ionic_rq_meta *meta;
+ struct ionic_v1_wqe *wqe;
+
+ if (!qp->rq_flush)
+ return 0;
+
+ if (ionic_queue_empty(&qp->rq))
+ return 0;
+
+ wqe = ionic_queue_at_cons(&qp->rq);
+
+ /* wqe_id must be a valid queue index */
+ if (unlikely(wqe->base.wqe_id >> qp->rq.depth_log2)) {
+ ibdev_warn(qp->ibqp.device,
+ "flush qp %u recv index %llu invalid\n",
+ qp->qpid, (unsigned long long)wqe->base.wqe_id);
+ return -EIO;
+ }
+
+ /* wqe_id must indicate a request that is outstanding */
+ meta = &qp->rq_meta[wqe->base.wqe_id];
+ if (unlikely(meta->next != IONIC_META_POSTED)) {
+ ibdev_warn(qp->ibqp.device,
+ "flush qp %u recv index %llu not posted\n",
+ qp->qpid, (unsigned long long)wqe->base.wqe_id);
+ return -EIO;
+ }
+
+ ionic_queue_consume(&qp->rq);
+
+ memset(wc, 0, sizeof(*wc));
+
+ wc->status = IB_WC_WR_FLUSH_ERR;
+ wc->wr_id = meta->wrid;
+ wc->qp = &qp->ibqp;
+
+ meta->next = qp->rq_meta_head;
+ qp->rq_meta_head = meta;
+
+ return 1;
+}
+
+static int ionic_flush_recv_many(struct ionic_qp *qp,
+ struct ib_wc *wc, int nwc)
+{
+ int rc = 0, npolled = 0;
+
+ while (npolled < nwc) {
+ rc = ionic_flush_recv(qp, wc + npolled);
+ if (rc <= 0)
+ break;
+
+ npolled += rc;
+ }
+
+ return npolled ?: rc;
+}
+
+static int ionic_flush_send(struct ionic_qp *qp, struct ib_wc *wc)
+{
+ struct ionic_sq_meta *meta;
+
+ if (!qp->sq_flush)
+ return 0;
+
+ if (ionic_queue_empty(&qp->sq))
+ return 0;
+
+ meta = &qp->sq_meta[qp->sq.cons];
+
+ ionic_queue_consume(&qp->sq);
+
+ memset(wc, 0, sizeof(*wc));
+
+ wc->status = IB_WC_WR_FLUSH_ERR;
+ wc->wr_id = meta->wrid;
+ wc->qp = &qp->ibqp;
+
+ return 1;
+}
+
+static int ionic_flush_send_many(struct ionic_qp *qp,
+ struct ib_wc *wc, int nwc)
+{
+ int rc = 0, npolled = 0;
+
+ while (npolled < nwc) {
+ rc = ionic_flush_send(qp, wc + npolled);
+ if (rc <= 0)
+ break;
+
+ npolled += rc;
+ }
+
+ return npolled ?: rc;
+}
+
+static int ionic_poll_recv(struct ionic_ibdev *dev, struct ionic_cq *cq,
+ struct ionic_qp *cqe_qp, struct ionic_v1_cqe *cqe,
+ struct ib_wc *wc)
+{
+ struct ionic_qp *qp = NULL;
+ struct ionic_rq_meta *meta;
+ u32 src_qpn, st_len;
+ u16 vlan_tag;
+ u8 op;
+
+ if (cqe_qp->rq_flush)
+ return 0;
+
+ qp = cqe_qp;
+
+ st_len = be32_to_cpu(cqe->status_length);
+
+ /* ignore wqe_id in case of flush error */
+ if (ionic_v1_cqe_error(cqe) && st_len == IONIC_STS_WQE_FLUSHED_ERR) {
+ cqe_qp->rq_flush = true;
+ cq->flush = true;
+ list_move_tail(&qp->cq_flush_rq, &cq->flush_rq);
+
+ /* posted recvs (if any) flushed by ionic_flush_recv */
+ return 0;
+ }
+
+ /* there had better be something in the recv queue to complete */
+ if (ionic_queue_empty(&qp->rq)) {
+ ibdev_warn(&dev->ibdev, "qp %u is empty\n", qp->qpid);
+ return -EIO;
+ }
+
+ /* wqe_id must be a valid queue index */
+ if (unlikely(cqe->recv.wqe_id >> qp->rq.depth_log2)) {
+ ibdev_warn(&dev->ibdev,
+ "qp %u recv index %llu invalid\n",
+ qp->qpid, (unsigned long long)cqe->recv.wqe_id);
+ return -EIO;
+ }
+
+ /* wqe_id must indicate a request that is outstanding */
+ meta = &qp->rq_meta[cqe->recv.wqe_id];
+ if (unlikely(meta->next != IONIC_META_POSTED)) {
+ ibdev_warn(&dev->ibdev,
+ "qp %u recv index %llu not posted\n",
+ qp->qpid, (unsigned long long)cqe->recv.wqe_id);
+ return -EIO;
+ }
+
+ meta->next = qp->rq_meta_head;
+ qp->rq_meta_head = meta;
+
+ memset(wc, 0, sizeof(*wc));
+
+ wc->wr_id = meta->wrid;
+
+ wc->qp = &cqe_qp->ibqp;
+
+ if (ionic_v1_cqe_error(cqe)) {
+ wc->vendor_err = st_len;
+ wc->status = ionic_to_ib_status(st_len);
+
+ cqe_qp->rq_flush = true;
+ cq->flush = true;
+ list_move_tail(&qp->cq_flush_rq, &cq->flush_rq);
+
+ ibdev_warn(&dev->ibdev,
+ "qp %d recv cqe with error\n", qp->qpid);
+ print_hex_dump(KERN_WARNING, "cqe ", DUMP_PREFIX_OFFSET, 16, 1,
+ cqe, BIT(cq->q.stride_log2), true);
+ goto out;
+ }
+
+ wc->vendor_err = 0;
+ wc->status = IB_WC_SUCCESS;
+
+ src_qpn = be32_to_cpu(cqe->recv.src_qpn_op);
+ op = src_qpn >> IONIC_V1_CQE_RECV_OP_SHIFT;
+
+ src_qpn &= IONIC_V1_CQE_RECV_QPN_MASK;
+ op &= IONIC_V1_CQE_RECV_OP_MASK;
+
+ wc->opcode = IB_WC_RECV;
+ switch (op) {
+ case IONIC_V1_CQE_RECV_OP_RDMA_IMM:
+ wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
+ wc->wc_flags |= IB_WC_WITH_IMM;
+ wc->ex.imm_data = cqe->recv.imm_data_rkey; /* be32 in wc */
+ break;
+ case IONIC_V1_CQE_RECV_OP_SEND_IMM:
+ wc->wc_flags |= IB_WC_WITH_IMM;
+ wc->ex.imm_data = cqe->recv.imm_data_rkey; /* be32 in wc */
+ break;
+ case IONIC_V1_CQE_RECV_OP_SEND_INV:
+ wc->wc_flags |= IB_WC_WITH_INVALIDATE;
+ wc->ex.invalidate_rkey = be32_to_cpu(cqe->recv.imm_data_rkey);
+ break;
+ }
+
+ wc->byte_len = st_len;
+ wc->src_qp = src_qpn;
+
+ if (qp->ibqp.qp_type == IB_QPT_UD ||
+ qp->ibqp.qp_type == IB_QPT_GSI) {
+ wc->wc_flags |= IB_WC_GRH | IB_WC_WITH_SMAC;
+ ether_addr_copy(wc->smac, cqe->recv.src_mac);
+
+ wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
+ if (ionic_v1_cqe_recv_is_ipv4(cqe))
+ wc->network_hdr_type = RDMA_NETWORK_IPV4;
+ else
+ wc->network_hdr_type = RDMA_NETWORK_IPV6;
+
+ if (ionic_v1_cqe_recv_is_vlan(cqe))
+ wc->wc_flags |= IB_WC_WITH_VLAN;
+
+ /* vlan_tag in cqe will be valid from dpath even if no vlan */
+ vlan_tag = be16_to_cpu(cqe->recv.vlan_tag);
+ wc->vlan_id = vlan_tag & 0xfff; /* 802.1q VID */
+ wc->sl = vlan_tag >> VLAN_PRIO_SHIFT; /* 802.1q PCP */
+ }
+
+ wc->pkey_index = 0;
+ wc->port_num = 1;
+
+out:
+ ionic_queue_consume(&qp->rq);
+
+ return 1;
+}
+
+static bool ionic_peek_send(struct ionic_qp *qp)
+{
+ struct ionic_sq_meta *meta;
+
+ if (qp->sq_flush)
+ return false;
+
+ /* completed all send queue requests */
+ if (ionic_queue_empty(&qp->sq))
+ return false;
+
+ meta = &qp->sq_meta[qp->sq.cons];
+
+ /* waiting for remote completion */
+ if (meta->remote && meta->seq == qp->sq_msn_cons)
+ return false;
+
+ /* waiting for local completion */
+ if (!meta->remote && !meta->local_comp)
+ return false;
+
+ return true;
+}
+
+static int ionic_poll_send(struct ionic_ibdev *dev, struct ionic_cq *cq,
+ struct ionic_qp *qp, struct ib_wc *wc)
+{
+ struct ionic_sq_meta *meta;
+
+ if (qp->sq_flush)
+ return 0;
+
+ do {
+ /* completed all send queue requests */
+ if (ionic_queue_empty(&qp->sq))
+ goto out_empty;
+
+ meta = &qp->sq_meta[qp->sq.cons];
+
+ /* waiting for remote completion */
+ if (meta->remote && meta->seq == qp->sq_msn_cons)
+ goto out_empty;
+
+ /* waiting for local completion */
+ if (!meta->remote && !meta->local_comp)
+ goto out_empty;
+
+ ionic_queue_consume(&qp->sq);
+
+ /* produce wc only if signaled or error status */
+ } while (!meta->signal && meta->ibsts == IB_WC_SUCCESS);
+
+ memset(wc, 0, sizeof(*wc));
+
+ wc->status = meta->ibsts;
+ wc->wr_id = meta->wrid;
+ wc->qp = &qp->ibqp;
+
+ if (meta->ibsts == IB_WC_SUCCESS) {
+ wc->byte_len = meta->len;
+ wc->opcode = meta->ibop;
+ } else {
+ wc->vendor_err = meta->len;
+
+ qp->sq_flush = true;
+ cq->flush = true;
+ list_move_tail(&qp->cq_flush_sq, &cq->flush_sq);
+ }
+
+ return 1;
+
+out_empty:
+ if (qp->sq_flush_rcvd) {
+ qp->sq_flush = true;
+ cq->flush = true;
+ list_move_tail(&qp->cq_flush_sq, &cq->flush_sq);
+ }
+ return 0;
+}
+
+static int ionic_poll_send_many(struct ionic_ibdev *dev, struct ionic_cq *cq,
+ struct ionic_qp *qp, struct ib_wc *wc, int nwc)
+{
+ int rc = 0, npolled = 0;
+
+ while (npolled < nwc) {
+ rc = ionic_poll_send(dev, cq, qp, wc + npolled);
+ if (rc <= 0)
+ break;
+
+ npolled += rc;
+ }
+
+ return npolled ?: rc;
+}
+
+static int ionic_validate_cons(u16 prod, u16 cons,
+ u16 comp, u16 mask)
+{
+ if (((prod - cons) & mask) <= ((comp - cons) & mask))
+ return -EIO;
+
+ return 0;
+}
+
+static int ionic_comp_msn(struct ionic_qp *qp, struct ionic_v1_cqe *cqe)
+{
+ struct ionic_sq_meta *meta;
+ u16 cqe_seq, cqe_idx;
+ int rc;
+
+ if (qp->sq_flush)
+ return 0;
+
+ cqe_seq = be32_to_cpu(cqe->send.msg_msn) & qp->sq.mask;
+
+ rc = ionic_validate_cons(qp->sq_msn_prod,
+ qp->sq_msn_cons,
+ cqe_seq - 1,
+ qp->sq.mask);
+ if (rc) {
+ ibdev_warn(qp->ibqp.device,
+ "qp %u bad msn %#x seq %u for prod %u cons %u\n",
+ qp->qpid, be32_to_cpu(cqe->send.msg_msn),
+ cqe_seq, qp->sq_msn_prod, qp->sq_msn_cons);
+ return rc;
+ }
+
+ qp->sq_msn_cons = cqe_seq;
+
+ if (ionic_v1_cqe_error(cqe)) {
+ cqe_idx = qp->sq_msn_idx[(cqe_seq - 1) & qp->sq.mask];
+
+ meta = &qp->sq_meta[cqe_idx];
+ meta->len = be32_to_cpu(cqe->status_length);
+ meta->ibsts = ionic_to_ib_status(meta->len);
+
+ ibdev_warn(qp->ibqp.device,
+ "qp %d msn cqe with error\n", qp->qpid);
+ print_hex_dump(KERN_WARNING, "cqe ", DUMP_PREFIX_OFFSET, 16, 1,
+ cqe, sizeof(*cqe), true);
+ }
+
+ return 0;
+}
+
+static int ionic_comp_npg(struct ionic_qp *qp, struct ionic_v1_cqe *cqe)
+{
+ struct ionic_sq_meta *meta;
+ u16 cqe_idx;
+ u32 st_len;
+
+ if (qp->sq_flush)
+ return 0;
+
+ st_len = be32_to_cpu(cqe->status_length);
+
+ if (ionic_v1_cqe_error(cqe) && st_len == IONIC_STS_WQE_FLUSHED_ERR) {
+ /*
+ * Flush cqe does not consume a wqe on the device, and maybe
+ * no such work request is posted.
+ *
+ * The driver should begin flushing after the last indicated
+ * normal or error completion. Here, only set a hint that the
+ * flush request was indicated. In poll_send, if nothing more
+ * can be polled normally, then begin flushing.
+ */
+ qp->sq_flush_rcvd = true;
+ return 0;
+ }
+
+ cqe_idx = cqe->send.npg_wqe_id & qp->sq.mask;
+ meta = &qp->sq_meta[cqe_idx];
+ meta->local_comp = true;
+
+ if (ionic_v1_cqe_error(cqe)) {
+ meta->len = st_len;
+ meta->ibsts = ionic_to_ib_status(st_len);
+ meta->remote = false;
+ ibdev_warn(qp->ibqp.device,
+ "qp %d npg cqe with error\n", qp->qpid);
+ print_hex_dump(KERN_WARNING, "cqe ", DUMP_PREFIX_OFFSET, 16, 1,
+ cqe, sizeof(*cqe), true);
+ }
+
+ return 0;
+}
+
+static void ionic_reserve_sync_cq(struct ionic_ibdev *dev, struct ionic_cq *cq)
+{
+ if (!ionic_queue_empty(&cq->q)) {
+ cq->credit += ionic_queue_length(&cq->q);
+ cq->q.cons = cq->q.prod;
+
+ ionic_dbell_ring(dev->lif_cfg.dbpage, dev->lif_cfg.cq_qtype,
+ ionic_queue_dbell_val(&cq->q));
+ }
+}
+
+static void ionic_reserve_cq(struct ionic_ibdev *dev, struct ionic_cq *cq,
+ int spend)
+{
+ cq->credit -= spend;
+
+ if (cq->credit <= 0)
+ ionic_reserve_sync_cq(dev, cq);
+}
+
+static int ionic_poll_vcq_cq(struct ionic_ibdev *dev,
+ struct ionic_cq *cq,
+ int nwc, struct ib_wc *wc)
+{
+ struct ionic_qp *qp, *qp_next;
+ struct ionic_v1_cqe *cqe;
+ int rc = 0, npolled = 0;
+ unsigned long irqflags;
+ u32 qtf, qid;
+ bool peek;
+ u8 type;
+
+ if (nwc < 1)
+ return 0;
+
+ spin_lock_irqsave(&cq->lock, irqflags);
+
+ /* poll already indicated work completions for send queue */
+ list_for_each_entry_safe(qp, qp_next, &cq->poll_sq, cq_poll_sq) {
+ if (npolled == nwc)
+ goto out;
+
+ spin_lock(&qp->sq_lock);
+ rc = ionic_poll_send_many(dev, cq, qp, wc + npolled,
+ nwc - npolled);
+ spin_unlock(&qp->sq_lock);
+
+ if (rc > 0)
+ npolled += rc;
+
+ if (npolled < nwc)
+ list_del_init(&qp->cq_poll_sq);
+ }
+
+ /* poll for more work completions */
+ while (likely(ionic_next_cqe(dev, cq, &cqe))) {
+ if (npolled == nwc)
+ goto out;
+
+ qtf = ionic_v1_cqe_qtf(cqe);
+ qid = ionic_v1_cqe_qtf_qid(qtf);
+ type = ionic_v1_cqe_qtf_type(qtf);
+
+ /*
+ * Safe to access QP without additional reference here as,
+ * 1. We hold cq->lock throughout
+ * 2. ionic_destroy_qp() acquires the same cq->lock before cleanup
+ * 3. QP is removed from qp_tbl before any cleanup begins
+ * This ensures no concurrent access between polling and destruction.
+ */
+ qp = xa_load(&dev->qp_tbl, qid);
+ if (unlikely(!qp)) {
+ ibdev_dbg(&dev->ibdev, "missing qp for qid %u\n", qid);
+ goto cq_next;
+ }
+
+ switch (type) {
+ case IONIC_V1_CQE_TYPE_RECV:
+ spin_lock(&qp->rq_lock);
+ rc = ionic_poll_recv(dev, cq, qp, cqe, wc + npolled);
+ spin_unlock(&qp->rq_lock);
+
+ if (rc < 0)
+ goto out;
+
+ npolled += rc;
+
+ break;
+
+ case IONIC_V1_CQE_TYPE_SEND_MSN:
+ spin_lock(&qp->sq_lock);
+ rc = ionic_comp_msn(qp, cqe);
+ if (!rc) {
+ rc = ionic_poll_send_many(dev, cq, qp,
+ wc + npolled,
+ nwc - npolled);
+ peek = ionic_peek_send(qp);
+ }
+ spin_unlock(&qp->sq_lock);
+
+ if (rc < 0)
+ goto out;
+
+ npolled += rc;
+
+ if (peek)
+ list_move_tail(&qp->cq_poll_sq, &cq->poll_sq);
+ break;
+
+ case IONIC_V1_CQE_TYPE_SEND_NPG:
+ spin_lock(&qp->sq_lock);
+ rc = ionic_comp_npg(qp, cqe);
+ if (!rc) {
+ rc = ionic_poll_send_many(dev, cq, qp,
+ wc + npolled,
+ nwc - npolled);
+ peek = ionic_peek_send(qp);
+ }
+ spin_unlock(&qp->sq_lock);
+
+ if (rc < 0)
+ goto out;
+
+ npolled += rc;
+
+ if (peek)
+ list_move_tail(&qp->cq_poll_sq, &cq->poll_sq);
+ break;
+
+ default:
+ ibdev_warn(&dev->ibdev,
+ "unexpected cqe type %u\n", type);
+ rc = -EIO;
+ goto out;
+ }
+
+cq_next:
+ ionic_queue_produce(&cq->q);
+ cq->color = ionic_color_wrap(cq->q.prod, cq->color);
+ }
+
+ /* lastly, flush send and recv queues */
+ if (likely(!cq->flush))
+ goto out;
+
+ cq->flush = false;
+
+ list_for_each_entry_safe(qp, qp_next, &cq->flush_sq, cq_flush_sq) {
+ if (npolled == nwc)
+ goto out;
+
+ spin_lock(&qp->sq_lock);
+ rc = ionic_flush_send_many(qp, wc + npolled, nwc - npolled);
+ spin_unlock(&qp->sq_lock);
+
+ if (rc > 0)
+ npolled += rc;
+
+ if (npolled < nwc)
+ list_del_init(&qp->cq_flush_sq);
+ else
+ cq->flush = true;
+ }
+
+ list_for_each_entry_safe(qp, qp_next, &cq->flush_rq, cq_flush_rq) {
+ if (npolled == nwc)
+ goto out;
+
+ spin_lock(&qp->rq_lock);
+ rc = ionic_flush_recv_many(qp, wc + npolled, nwc - npolled);
+ spin_unlock(&qp->rq_lock);
+
+ if (rc > 0)
+ npolled += rc;
+
+ if (npolled < nwc)
+ list_del_init(&qp->cq_flush_rq);
+ else
+ cq->flush = true;
+ }
+
+out:
+ /* in case credit was depleted (more work posted than cq depth) */
+ if (cq->credit <= 0)
+ ionic_reserve_sync_cq(dev, cq);
+
+ spin_unlock_irqrestore(&cq->lock, irqflags);
+
+ return npolled ?: rc;
+}
+
+int ionic_poll_cq(struct ib_cq *ibcq, int nwc, struct ib_wc *wc)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibcq->device);
+ struct ionic_vcq *vcq = to_ionic_vcq(ibcq);
+ int rc_tmp, rc = 0, npolled = 0;
+ int cq_i, cq_x, cq_ix;
+
+ cq_x = vcq->poll_idx;
+ vcq->poll_idx ^= dev->lif_cfg.udma_count - 1;
+
+ for (cq_i = 0; npolled < nwc && cq_i < dev->lif_cfg.udma_count; ++cq_i) {
+ cq_ix = cq_i ^ cq_x;
+
+ if (!(vcq->udma_mask & BIT(cq_ix)))
+ continue;
+
+ rc_tmp = ionic_poll_vcq_cq(dev, &vcq->cq[cq_ix],
+ nwc - npolled,
+ wc + npolled);
+
+ if (rc_tmp >= 0)
+ npolled += rc_tmp;
+ else if (!rc)
+ rc = rc_tmp;
+ }
+
+ return npolled ?: rc;
+}
+
+static int ionic_req_notify_vcq_cq(struct ionic_ibdev *dev, struct ionic_cq *cq,
+ enum ib_cq_notify_flags flags)
+{
+ u64 dbell_val = cq->q.dbell;
+
+ if (flags & IB_CQ_SOLICITED) {
+ cq->arm_sol_prod = ionic_queue_next(&cq->q, cq->arm_sol_prod);
+ dbell_val |= cq->arm_sol_prod | IONIC_CQ_RING_SOL;
+ } else {
+ cq->arm_any_prod = ionic_queue_next(&cq->q, cq->arm_any_prod);
+ dbell_val |= cq->arm_any_prod | IONIC_CQ_RING_ARM;
+ }
+
+ ionic_reserve_sync_cq(dev, cq);
+
+ ionic_dbell_ring(dev->lif_cfg.dbpage, dev->lif_cfg.cq_qtype, dbell_val);
+
+ /*
+ * IB_CQ_REPORT_MISSED_EVENTS:
+ *
+ * The queue index in ring zero guarantees no missed events.
+ *
+ * Here, we check if the color bit in the next cqe is flipped. If it
+ * is flipped, then progress can be made by immediately polling the cq.
+ * Still, the cq will be armed, and an event will be generated. The cq
+ * may be empty when polled after the event, because the next poll
+ * after arming the cq can empty it.
+ */
+ return (flags & IB_CQ_REPORT_MISSED_EVENTS) &&
+ cq->color == ionic_v1_cqe_color(ionic_queue_at_prod(&cq->q));
+}
+
+int ionic_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibcq->device);
+ struct ionic_vcq *vcq = to_ionic_vcq(ibcq);
+ int rc = 0, cq_i;
+
+ for (cq_i = 0; cq_i < dev->lif_cfg.udma_count; ++cq_i) {
+ if (!(vcq->udma_mask & BIT(cq_i)))
+ continue;
+
+ if (ionic_req_notify_vcq_cq(dev, &vcq->cq[cq_i], flags))
+ rc = 1;
+ }
+
+ return rc;
+}
+
+static s64 ionic_prep_inline(void *data, u32 max_data,
+ const struct ib_sge *ib_sgl, int num_sge)
+{
+ static const s64 bit_31 = 1u << 31;
+ s64 len = 0, sg_len;
+ int sg_i;
+
+ for (sg_i = 0; sg_i < num_sge; ++sg_i) {
+ sg_len = ib_sgl[sg_i].length;
+
+ /* sge length zero means 2GB */
+ if (unlikely(sg_len == 0))
+ sg_len = bit_31;
+
+ /* greater than max inline data is invalid */
+ if (unlikely(len + sg_len > max_data))
+ return -EINVAL;
+
+ memcpy(data + len, (void *)ib_sgl[sg_i].addr, sg_len);
+
+ len += sg_len;
+ }
+
+ return len;
+}
+
+static s64 ionic_prep_pld(struct ionic_v1_wqe *wqe,
+ union ionic_v1_pld *pld,
+ int spec, u32 max_sge,
+ const struct ib_sge *ib_sgl,
+ int num_sge)
+{
+ static const s64 bit_31 = 1l << 31;
+ struct ionic_sge *sgl;
+ __be32 *spec32 = NULL;
+ __be16 *spec16 = NULL;
+ s64 len = 0, sg_len;
+ int sg_i = 0;
+
+ if (unlikely(num_sge < 0 || (u32)num_sge > max_sge))
+ return -EINVAL;
+
+ if (spec && num_sge > IONIC_V1_SPEC_FIRST_SGE) {
+ sg_i = IONIC_V1_SPEC_FIRST_SGE;
+
+ if (num_sge > 8) {
+ wqe->base.flags |= cpu_to_be16(IONIC_V1_FLAG_SPEC16);
+ spec16 = pld->spec16;
+ } else {
+ wqe->base.flags |= cpu_to_be16(IONIC_V1_FLAG_SPEC32);
+ spec32 = pld->spec32;
+ }
+ }
+
+ sgl = &pld->sgl[sg_i];
+
+ for (sg_i = 0; sg_i < num_sge; ++sg_i) {
+ sg_len = ib_sgl[sg_i].length;
+
+ /* sge length zero means 2GB */
+ if (unlikely(sg_len == 0))
+ sg_len = bit_31;
+
+ /* greater than 2GB data is invalid */
+ if (unlikely(len + sg_len > bit_31))
+ return -EINVAL;
+
+ sgl[sg_i].va = cpu_to_be64(ib_sgl[sg_i].addr);
+ sgl[sg_i].len = cpu_to_be32(sg_len);
+ sgl[sg_i].lkey = cpu_to_be32(ib_sgl[sg_i].lkey);
+
+ if (spec32) {
+ spec32[sg_i] = sgl[sg_i].len;
+ } else if (spec16) {
+ if (unlikely(sg_len > U16_MAX))
+ return -EINVAL;
+ spec16[sg_i] = cpu_to_be16(sg_len);
+ }
+
+ len += sg_len;
+ }
+
+ return len;
+}
+
+static void ionic_prep_base(struct ionic_qp *qp,
+ const struct ib_send_wr *wr,
+ struct ionic_sq_meta *meta,
+ struct ionic_v1_wqe *wqe)
+{
+ meta->wrid = wr->wr_id;
+ meta->ibsts = IB_WC_SUCCESS;
+ meta->signal = false;
+ meta->local_comp = false;
+
+ wqe->base.wqe_id = qp->sq.prod;
+
+ if (wr->send_flags & IB_SEND_FENCE)
+ wqe->base.flags |= cpu_to_be16(IONIC_V1_FLAG_FENCE);
+
+ if (wr->send_flags & IB_SEND_SOLICITED)
+ wqe->base.flags |= cpu_to_be16(IONIC_V1_FLAG_SOL);
+
+ if (qp->sig_all || wr->send_flags & IB_SEND_SIGNALED) {
+ wqe->base.flags |= cpu_to_be16(IONIC_V1_FLAG_SIG);
+ meta->signal = true;
+ }
+
+ meta->seq = qp->sq_msn_prod;
+ meta->remote =
+ qp->ibqp.qp_type != IB_QPT_UD &&
+ qp->ibqp.qp_type != IB_QPT_GSI &&
+ !ionic_ibop_is_local(wr->opcode);
+
+ if (meta->remote) {
+ qp->sq_msn_idx[meta->seq] = qp->sq.prod;
+ qp->sq_msn_prod = ionic_queue_next(&qp->sq, qp->sq_msn_prod);
+ }
+
+ ionic_queue_produce(&qp->sq);
+}
+
+static int ionic_prep_common(struct ionic_qp *qp,
+ const struct ib_send_wr *wr,
+ struct ionic_sq_meta *meta,
+ struct ionic_v1_wqe *wqe)
+{
+ s64 signed_len;
+ u32 mval;
+
+ if (wr->send_flags & IB_SEND_INLINE) {
+ wqe->base.num_sge_key = 0;
+ wqe->base.flags |= cpu_to_be16(IONIC_V1_FLAG_INL);
+ mval = ionic_v1_send_wqe_max_data(qp->sq.stride_log2, false);
+ signed_len = ionic_prep_inline(wqe->common.pld.data, mval,
+ wr->sg_list, wr->num_sge);
+ } else {
+ wqe->base.num_sge_key = wr->num_sge;
+ mval = ionic_v1_send_wqe_max_sge(qp->sq.stride_log2,
+ qp->sq_spec,
+ false);
+ signed_len = ionic_prep_pld(wqe, &wqe->common.pld,
+ qp->sq_spec, mval,
+ wr->sg_list, wr->num_sge);
+ }
+
+ if (unlikely(signed_len < 0))
+ return signed_len;
+
+ meta->len = signed_len;
+ wqe->common.length = cpu_to_be32(signed_len);
+
+ ionic_prep_base(qp, wr, meta, wqe);
+
+ return 0;
+}
+
+static void ionic_prep_sq_wqe(struct ionic_qp *qp, void *wqe)
+{
+ memset(wqe, 0, 1u << qp->sq.stride_log2);
+}
+
+static void ionic_prep_rq_wqe(struct ionic_qp *qp, void *wqe)
+{
+ memset(wqe, 0, 1u << qp->rq.stride_log2);
+}
+
+static int ionic_prep_send(struct ionic_qp *qp,
+ const struct ib_send_wr *wr)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(qp->ibqp.device);
+ struct ionic_sq_meta *meta;
+ struct ionic_v1_wqe *wqe;
+
+ meta = &qp->sq_meta[qp->sq.prod];
+ wqe = ionic_queue_at_prod(&qp->sq);
+
+ ionic_prep_sq_wqe(qp, wqe);
+
+ meta->ibop = IB_WC_SEND;
+
+ switch (wr->opcode) {
+ case IB_WR_SEND:
+ wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, SEND);
+ break;
+ case IB_WR_SEND_WITH_IMM:
+ wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, SEND_IMM);
+ wqe->base.imm_data_key = wr->ex.imm_data;
+ break;
+ case IB_WR_SEND_WITH_INV:
+ wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, SEND_INV);
+ wqe->base.imm_data_key =
+ cpu_to_be32(wr->ex.invalidate_rkey);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ionic_prep_common(qp, wr, meta, wqe);
+}
+
+static int ionic_prep_send_ud(struct ionic_qp *qp,
+ const struct ib_ud_wr *wr)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(qp->ibqp.device);
+ struct ionic_sq_meta *meta;
+ struct ionic_v1_wqe *wqe;
+ struct ionic_ah *ah;
+
+ if (unlikely(!wr->ah))
+ return -EINVAL;
+
+ ah = to_ionic_ah(wr->ah);
+
+ meta = &qp->sq_meta[qp->sq.prod];
+ wqe = ionic_queue_at_prod(&qp->sq);
+
+ ionic_prep_sq_wqe(qp, wqe);
+
+ wqe->common.send.ah_id = cpu_to_be32(ah->ahid);
+ wqe->common.send.dest_qpn = cpu_to_be32(wr->remote_qpn);
+ wqe->common.send.dest_qkey = cpu_to_be32(wr->remote_qkey);
+
+ meta->ibop = IB_WC_SEND;
+
+ switch (wr->wr.opcode) {
+ case IB_WR_SEND:
+ wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, SEND);
+ break;
+ case IB_WR_SEND_WITH_IMM:
+ wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, SEND_IMM);
+ wqe->base.imm_data_key = wr->wr.ex.imm_data;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ionic_prep_common(qp, &wr->wr, meta, wqe);
+}
+
+static int ionic_prep_rdma(struct ionic_qp *qp,
+ const struct ib_rdma_wr *wr)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(qp->ibqp.device);
+ struct ionic_sq_meta *meta;
+ struct ionic_v1_wqe *wqe;
+
+ meta = &qp->sq_meta[qp->sq.prod];
+ wqe = ionic_queue_at_prod(&qp->sq);
+
+ ionic_prep_sq_wqe(qp, wqe);
+
+ meta->ibop = IB_WC_RDMA_WRITE;
+
+ switch (wr->wr.opcode) {
+ case IB_WR_RDMA_READ:
+ if (wr->wr.send_flags & (IB_SEND_SOLICITED | IB_SEND_INLINE))
+ return -EINVAL;
+ meta->ibop = IB_WC_RDMA_READ;
+ wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, RDMA_READ);
+ break;
+ case IB_WR_RDMA_WRITE:
+ if (wr->wr.send_flags & IB_SEND_SOLICITED)
+ return -EINVAL;
+ wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, RDMA_WRITE);
+ break;
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, RDMA_WRITE_IMM);
+ wqe->base.imm_data_key = wr->wr.ex.imm_data;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ wqe->common.rdma.remote_va_high = cpu_to_be32(wr->remote_addr >> 32);
+ wqe->common.rdma.remote_va_low = cpu_to_be32(wr->remote_addr);
+ wqe->common.rdma.remote_rkey = cpu_to_be32(wr->rkey);
+
+ return ionic_prep_common(qp, &wr->wr, meta, wqe);
+}
+
+static int ionic_prep_atomic(struct ionic_qp *qp,
+ const struct ib_atomic_wr *wr)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(qp->ibqp.device);
+ struct ionic_sq_meta *meta;
+ struct ionic_v1_wqe *wqe;
+
+ if (wr->wr.num_sge != 1 || wr->wr.sg_list[0].length != 8)
+ return -EINVAL;
+
+ if (wr->wr.send_flags & (IB_SEND_SOLICITED | IB_SEND_INLINE))
+ return -EINVAL;
+
+ meta = &qp->sq_meta[qp->sq.prod];
+ wqe = ionic_queue_at_prod(&qp->sq);
+
+ ionic_prep_sq_wqe(qp, wqe);
+
+ meta->ibop = IB_WC_RDMA_WRITE;
+
+ switch (wr->wr.opcode) {
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ meta->ibop = IB_WC_COMP_SWAP;
+ wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, ATOMIC_CS);
+ wqe->atomic.swap_add_high = cpu_to_be32(wr->swap >> 32);
+ wqe->atomic.swap_add_low = cpu_to_be32(wr->swap);
+ wqe->atomic.compare_high = cpu_to_be32(wr->compare_add >> 32);
+ wqe->atomic.compare_low = cpu_to_be32(wr->compare_add);
+ break;
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ meta->ibop = IB_WC_FETCH_ADD;
+ wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, ATOMIC_FA);
+ wqe->atomic.swap_add_high = cpu_to_be32(wr->compare_add >> 32);
+ wqe->atomic.swap_add_low = cpu_to_be32(wr->compare_add);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ wqe->atomic.remote_va_high = cpu_to_be32(wr->remote_addr >> 32);
+ wqe->atomic.remote_va_low = cpu_to_be32(wr->remote_addr);
+ wqe->atomic.remote_rkey = cpu_to_be32(wr->rkey);
+
+ wqe->base.num_sge_key = 1;
+ wqe->atomic.sge.va = cpu_to_be64(wr->wr.sg_list[0].addr);
+ wqe->atomic.sge.len = cpu_to_be32(8);
+ wqe->atomic.sge.lkey = cpu_to_be32(wr->wr.sg_list[0].lkey);
+
+ return ionic_prep_common(qp, &wr->wr, meta, wqe);
+}
+
+static int ionic_prep_inv(struct ionic_qp *qp,
+ const struct ib_send_wr *wr)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(qp->ibqp.device);
+ struct ionic_sq_meta *meta;
+ struct ionic_v1_wqe *wqe;
+
+ if (wr->send_flags & (IB_SEND_SOLICITED | IB_SEND_INLINE))
+ return -EINVAL;
+
+ meta = &qp->sq_meta[qp->sq.prod];
+ wqe = ionic_queue_at_prod(&qp->sq);
+
+ ionic_prep_sq_wqe(qp, wqe);
+
+ wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, LOCAL_INV);
+ wqe->base.imm_data_key = cpu_to_be32(wr->ex.invalidate_rkey);
+
+ meta->len = 0;
+ meta->ibop = IB_WC_LOCAL_INV;
+
+ ionic_prep_base(qp, wr, meta, wqe);
+
+ return 0;
+}
+
+static int ionic_prep_reg(struct ionic_qp *qp,
+ const struct ib_reg_wr *wr)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(qp->ibqp.device);
+ struct ionic_mr *mr = to_ionic_mr(wr->mr);
+ struct ionic_sq_meta *meta;
+ struct ionic_v1_wqe *wqe;
+ __le64 dma_addr;
+ int flags;
+
+ if (wr->wr.send_flags & (IB_SEND_SOLICITED | IB_SEND_INLINE))
+ return -EINVAL;
+
+ /* must call ib_map_mr_sg before posting reg wr */
+ if (!mr->buf.tbl_pages)
+ return -EINVAL;
+
+ meta = &qp->sq_meta[qp->sq.prod];
+ wqe = ionic_queue_at_prod(&qp->sq);
+
+ ionic_prep_sq_wqe(qp, wqe);
+
+ flags = to_ionic_mr_flags(wr->access);
+
+ wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, REG_MR);
+ wqe->base.num_sge_key = wr->key;
+ wqe->base.imm_data_key = cpu_to_be32(mr->ibmr.lkey);
+ wqe->reg_mr.va = cpu_to_be64(mr->ibmr.iova);
+ wqe->reg_mr.length = cpu_to_be64(mr->ibmr.length);
+ wqe->reg_mr.offset = ionic_pgtbl_off(&mr->buf, mr->ibmr.iova);
+ dma_addr = ionic_pgtbl_dma(&mr->buf, mr->ibmr.iova);
+ wqe->reg_mr.dma_addr = cpu_to_be64(le64_to_cpu(dma_addr));
+
+ wqe->reg_mr.map_count = cpu_to_be32(mr->buf.tbl_pages);
+ wqe->reg_mr.flags = cpu_to_be16(flags);
+ wqe->reg_mr.dir_size_log2 = 0;
+ wqe->reg_mr.page_size_log2 = order_base_2(mr->ibmr.page_size);
+
+ meta->len = 0;
+ meta->ibop = IB_WC_REG_MR;
+
+ ionic_prep_base(qp, &wr->wr, meta, wqe);
+
+ return 0;
+}
+
+static int ionic_prep_one_rc(struct ionic_qp *qp,
+ const struct ib_send_wr *wr)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(qp->ibqp.device);
+ int rc = 0;
+
+ switch (wr->opcode) {
+ case IB_WR_SEND:
+ case IB_WR_SEND_WITH_IMM:
+ case IB_WR_SEND_WITH_INV:
+ rc = ionic_prep_send(qp, wr);
+ break;
+ case IB_WR_RDMA_READ:
+ case IB_WR_RDMA_WRITE:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ rc = ionic_prep_rdma(qp, rdma_wr(wr));
+ break;
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ rc = ionic_prep_atomic(qp, atomic_wr(wr));
+ break;
+ case IB_WR_LOCAL_INV:
+ rc = ionic_prep_inv(qp, wr);
+ break;
+ case IB_WR_REG_MR:
+ rc = ionic_prep_reg(qp, reg_wr(wr));
+ break;
+ default:
+ ibdev_dbg(&dev->ibdev, "invalid opcode %d\n", wr->opcode);
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+static int ionic_prep_one_ud(struct ionic_qp *qp,
+ const struct ib_send_wr *wr)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(qp->ibqp.device);
+ int rc = 0;
+
+ switch (wr->opcode) {
+ case IB_WR_SEND:
+ case IB_WR_SEND_WITH_IMM:
+ rc = ionic_prep_send_ud(qp, ud_wr(wr));
+ break;
+ default:
+ ibdev_dbg(&dev->ibdev, "invalid opcode %d\n", wr->opcode);
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+static int ionic_prep_recv(struct ionic_qp *qp,
+ const struct ib_recv_wr *wr)
+{
+ struct ionic_rq_meta *meta;
+ struct ionic_v1_wqe *wqe;
+ s64 signed_len;
+ u32 mval;
+
+ wqe = ionic_queue_at_prod(&qp->rq);
+
+ /* if wqe is owned by device, caller can try posting again soon */
+ if (wqe->base.flags & cpu_to_be16(IONIC_V1_FLAG_FENCE))
+ return -EAGAIN;
+
+ meta = qp->rq_meta_head;
+ if (unlikely(meta == IONIC_META_LAST) ||
+ unlikely(meta == IONIC_META_POSTED))
+ return -EIO;
+
+ ionic_prep_rq_wqe(qp, wqe);
+
+ mval = ionic_v1_recv_wqe_max_sge(qp->rq.stride_log2, qp->rq_spec,
+ false);
+ signed_len = ionic_prep_pld(wqe, &wqe->recv.pld,
+ qp->rq_spec, mval,
+ wr->sg_list, wr->num_sge);
+ if (signed_len < 0)
+ return signed_len;
+
+ meta->wrid = wr->wr_id;
+
+ wqe->base.wqe_id = meta - qp->rq_meta;
+ wqe->base.num_sge_key = wr->num_sge;
+
+ /* total length for recv goes in base imm_data_key */
+ wqe->base.imm_data_key = cpu_to_be32(signed_len);
+
+ ionic_queue_produce(&qp->rq);
+
+ qp->rq_meta_head = meta->next;
+ meta->next = IONIC_META_POSTED;
+
+ return 0;
+}
+
+static int ionic_post_send_common(struct ionic_ibdev *dev,
+ struct ionic_vcq *vcq,
+ struct ionic_cq *cq,
+ struct ionic_qp *qp,
+ const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad)
+{
+ unsigned long irqflags;
+ bool notify = false;
+ int spend, rc = 0;
+
+ if (!bad)
+ return -EINVAL;
+
+ if (!qp->has_sq) {
+ *bad = wr;
+ return -EINVAL;
+ }
+
+ if (qp->state < IB_QPS_RTS) {
+ *bad = wr;
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&qp->sq_lock, irqflags);
+
+ while (wr) {
+ if (ionic_queue_full(&qp->sq)) {
+ ibdev_dbg(&dev->ibdev, "queue full");
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ if (qp->ibqp.qp_type == IB_QPT_UD ||
+ qp->ibqp.qp_type == IB_QPT_GSI)
+ rc = ionic_prep_one_ud(qp, wr);
+ else
+ rc = ionic_prep_one_rc(qp, wr);
+ if (rc)
+ goto out;
+
+ wr = wr->next;
+ }
+
+out:
+ spin_unlock_irqrestore(&qp->sq_lock, irqflags);
+
+ spin_lock_irqsave(&cq->lock, irqflags);
+ spin_lock(&qp->sq_lock);
+
+ if (likely(qp->sq.prod != qp->sq_old_prod)) {
+ /* ring cq doorbell just in time */
+ spend = (qp->sq.prod - qp->sq_old_prod) & qp->sq.mask;
+ ionic_reserve_cq(dev, cq, spend);
+
+ qp->sq_old_prod = qp->sq.prod;
+
+ ionic_dbell_ring(dev->lif_cfg.dbpage, dev->lif_cfg.sq_qtype,
+ ionic_queue_dbell_val(&qp->sq));
+ }
+
+ if (qp->sq_flush) {
+ notify = true;
+ cq->flush = true;
+ list_move_tail(&qp->cq_flush_sq, &cq->flush_sq);
+ }
+
+ spin_unlock(&qp->sq_lock);
+ spin_unlock_irqrestore(&cq->lock, irqflags);
+
+ if (notify && vcq->ibcq.comp_handler)
+ vcq->ibcq.comp_handler(&vcq->ibcq, vcq->ibcq.cq_context);
+
+ *bad = wr;
+ return rc;
+}
+
+static int ionic_post_recv_common(struct ionic_ibdev *dev,
+ struct ionic_vcq *vcq,
+ struct ionic_cq *cq,
+ struct ionic_qp *qp,
+ const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad)
+{
+ unsigned long irqflags;
+ bool notify = false;
+ int spend, rc = 0;
+
+ if (!bad)
+ return -EINVAL;
+
+ if (!qp->has_rq) {
+ *bad = wr;
+ return -EINVAL;
+ }
+
+ if (qp->state < IB_QPS_INIT) {
+ *bad = wr;
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&qp->rq_lock, irqflags);
+
+ while (wr) {
+ if (ionic_queue_full(&qp->rq)) {
+ ibdev_dbg(&dev->ibdev, "queue full");
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ rc = ionic_prep_recv(qp, wr);
+ if (rc)
+ goto out;
+
+ wr = wr->next;
+ }
+
+out:
+ if (!cq) {
+ spin_unlock_irqrestore(&qp->rq_lock, irqflags);
+ goto out_unlocked;
+ }
+ spin_unlock_irqrestore(&qp->rq_lock, irqflags);
+
+ spin_lock_irqsave(&cq->lock, irqflags);
+ spin_lock(&qp->rq_lock);
+
+ if (likely(qp->rq.prod != qp->rq_old_prod)) {
+ /* ring cq doorbell just in time */
+ spend = (qp->rq.prod - qp->rq_old_prod) & qp->rq.mask;
+ ionic_reserve_cq(dev, cq, spend);
+
+ qp->rq_old_prod = qp->rq.prod;
+
+ ionic_dbell_ring(dev->lif_cfg.dbpage, dev->lif_cfg.rq_qtype,
+ ionic_queue_dbell_val(&qp->rq));
+ }
+
+ if (qp->rq_flush) {
+ notify = true;
+ cq->flush = true;
+ list_move_tail(&qp->cq_flush_rq, &cq->flush_rq);
+ }
+
+ spin_unlock(&qp->rq_lock);
+ spin_unlock_irqrestore(&cq->lock, irqflags);
+
+ if (notify && vcq->ibcq.comp_handler)
+ vcq->ibcq.comp_handler(&vcq->ibcq, vcq->ibcq.cq_context);
+
+out_unlocked:
+ *bad = wr;
+ return rc;
+}
+
+int ionic_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibqp->device);
+ struct ionic_vcq *vcq = to_ionic_vcq(ibqp->send_cq);
+ struct ionic_qp *qp = to_ionic_qp(ibqp);
+ struct ionic_cq *cq =
+ to_ionic_vcq_cq(ibqp->send_cq, qp->udma_idx);
+
+ return ionic_post_send_common(dev, vcq, cq, qp, wr, bad);
+}
+
+int ionic_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibqp->device);
+ struct ionic_vcq *vcq = to_ionic_vcq(ibqp->recv_cq);
+ struct ionic_qp *qp = to_ionic_qp(ibqp);
+ struct ionic_cq *cq =
+ to_ionic_vcq_cq(ibqp->recv_cq, qp->udma_idx);
+
+ return ionic_post_recv_common(dev, vcq, cq, qp, wr, bad);
+}
diff --git a/drivers/infiniband/hw/ionic/ionic_fw.h b/drivers/infiniband/hw/ionic/ionic_fw.h
new file mode 100644
index 000000000000..adfbb89d856c
--- /dev/null
+++ b/drivers/infiniband/hw/ionic/ionic_fw.h
@@ -0,0 +1,1029 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#ifndef _IONIC_FW_H_
+#define _IONIC_FW_H_
+
+#include <linux/kernel.h>
+#include <rdma/ib_verbs.h>
+
+/* common for ib spec */
+
+#define IONIC_EXP_DBELL_SZ 8
+
+enum ionic_mrid_bits {
+ IONIC_MRID_INDEX_SHIFT = 8,
+};
+
+static inline u32 ionic_mrid(u32 index, u8 key)
+{
+ return (index << IONIC_MRID_INDEX_SHIFT) | key;
+}
+
+static inline u32 ionic_mrid_index(u32 lrkey)
+{
+ return lrkey >> IONIC_MRID_INDEX_SHIFT;
+}
+
+/* common to all versions */
+
+/* wqe scatter gather element */
+struct ionic_sge {
+ __be64 va;
+ __be32 len;
+ __be32 lkey;
+};
+
+/* admin queue mr type */
+enum ionic_mr_flags {
+ /* bits that determine mr access */
+ IONIC_MRF_LOCAL_WRITE = BIT(0),
+ IONIC_MRF_REMOTE_WRITE = BIT(1),
+ IONIC_MRF_REMOTE_READ = BIT(2),
+ IONIC_MRF_REMOTE_ATOMIC = BIT(3),
+ IONIC_MRF_MW_BIND = BIT(4),
+ IONIC_MRF_ZERO_BASED = BIT(5),
+ IONIC_MRF_ON_DEMAND = BIT(6),
+ IONIC_MRF_PB = BIT(7),
+ IONIC_MRF_ACCESS_MASK = BIT(12) - 1,
+
+ /* bits that determine mr type */
+ IONIC_MRF_UKEY_EN = BIT(13),
+ IONIC_MRF_IS_MW = BIT(14),
+ IONIC_MRF_INV_EN = BIT(15),
+
+ /* base flags combinations for mr types */
+ IONIC_MRF_USER_MR = 0,
+ IONIC_MRF_PHYS_MR = (IONIC_MRF_UKEY_EN |
+ IONIC_MRF_INV_EN),
+ IONIC_MRF_MW_1 = (IONIC_MRF_UKEY_EN |
+ IONIC_MRF_IS_MW),
+ IONIC_MRF_MW_2 = (IONIC_MRF_UKEY_EN |
+ IONIC_MRF_IS_MW |
+ IONIC_MRF_INV_EN),
+};
+
+static inline int to_ionic_mr_flags(int access)
+{
+ int flags = 0;
+
+ if (access & IB_ACCESS_LOCAL_WRITE)
+ flags |= IONIC_MRF_LOCAL_WRITE;
+
+ if (access & IB_ACCESS_REMOTE_READ)
+ flags |= IONIC_MRF_REMOTE_READ;
+
+ if (access & IB_ACCESS_REMOTE_WRITE)
+ flags |= IONIC_MRF_REMOTE_WRITE;
+
+ if (access & IB_ACCESS_REMOTE_ATOMIC)
+ flags |= IONIC_MRF_REMOTE_ATOMIC;
+
+ if (access & IB_ACCESS_MW_BIND)
+ flags |= IONIC_MRF_MW_BIND;
+
+ if (access & IB_ZERO_BASED)
+ flags |= IONIC_MRF_ZERO_BASED;
+
+ return flags;
+}
+
+enum ionic_qp_flags {
+ /* bits that determine qp access */
+ IONIC_QPF_REMOTE_WRITE = BIT(0),
+ IONIC_QPF_REMOTE_READ = BIT(1),
+ IONIC_QPF_REMOTE_ATOMIC = BIT(2),
+
+ /* bits that determine other qp behavior */
+ IONIC_QPF_SQ_PB = BIT(6),
+ IONIC_QPF_RQ_PB = BIT(7),
+ IONIC_QPF_SQ_SPEC = BIT(8),
+ IONIC_QPF_RQ_SPEC = BIT(9),
+ IONIC_QPF_REMOTE_PRIVILEGED = BIT(10),
+ IONIC_QPF_SQ_DRAINING = BIT(11),
+ IONIC_QPF_SQD_NOTIFY = BIT(12),
+ IONIC_QPF_SQ_CMB = BIT(13),
+ IONIC_QPF_RQ_CMB = BIT(14),
+ IONIC_QPF_PRIVILEGED = BIT(15),
+};
+
+static inline int from_ionic_qp_flags(int flags)
+{
+ int access_flags = 0;
+
+ if (flags & IONIC_QPF_REMOTE_WRITE)
+ access_flags |= IB_ACCESS_REMOTE_WRITE;
+
+ if (flags & IONIC_QPF_REMOTE_READ)
+ access_flags |= IB_ACCESS_REMOTE_READ;
+
+ if (flags & IONIC_QPF_REMOTE_ATOMIC)
+ access_flags |= IB_ACCESS_REMOTE_ATOMIC;
+
+ return access_flags;
+}
+
+static inline int to_ionic_qp_flags(int access, bool sqd_notify,
+ bool sq_is_cmb, bool rq_is_cmb,
+ bool sq_spec, bool rq_spec,
+ bool privileged, bool remote_privileged)
+{
+ int flags = 0;
+
+ if (access & IB_ACCESS_REMOTE_WRITE)
+ flags |= IONIC_QPF_REMOTE_WRITE;
+
+ if (access & IB_ACCESS_REMOTE_READ)
+ flags |= IONIC_QPF_REMOTE_READ;
+
+ if (access & IB_ACCESS_REMOTE_ATOMIC)
+ flags |= IONIC_QPF_REMOTE_ATOMIC;
+
+ if (sqd_notify)
+ flags |= IONIC_QPF_SQD_NOTIFY;
+
+ if (sq_is_cmb)
+ flags |= IONIC_QPF_SQ_CMB;
+
+ if (rq_is_cmb)
+ flags |= IONIC_QPF_RQ_CMB;
+
+ if (sq_spec)
+ flags |= IONIC_QPF_SQ_SPEC;
+
+ if (rq_spec)
+ flags |= IONIC_QPF_RQ_SPEC;
+
+ if (privileged)
+ flags |= IONIC_QPF_PRIVILEGED;
+
+ if (remote_privileged)
+ flags |= IONIC_QPF_REMOTE_PRIVILEGED;
+
+ return flags;
+}
+
+/* cqe non-admin status indicated in status_length field when err bit is set */
+enum ionic_status {
+ IONIC_STS_OK,
+ IONIC_STS_LOCAL_LEN_ERR,
+ IONIC_STS_LOCAL_QP_OPER_ERR,
+ IONIC_STS_LOCAL_PROT_ERR,
+ IONIC_STS_WQE_FLUSHED_ERR,
+ IONIC_STS_MEM_MGMT_OPER_ERR,
+ IONIC_STS_BAD_RESP_ERR,
+ IONIC_STS_LOCAL_ACC_ERR,
+ IONIC_STS_REMOTE_INV_REQ_ERR,
+ IONIC_STS_REMOTE_ACC_ERR,
+ IONIC_STS_REMOTE_OPER_ERR,
+ IONIC_STS_RETRY_EXCEEDED,
+ IONIC_STS_RNR_RETRY_EXCEEDED,
+ IONIC_STS_XRC_VIO_ERR,
+ IONIC_STS_LOCAL_SGL_INV_ERR,
+};
+
+static inline int ionic_to_ib_status(int sts)
+{
+ switch (sts) {
+ case IONIC_STS_OK:
+ return IB_WC_SUCCESS;
+ case IONIC_STS_LOCAL_LEN_ERR:
+ return IB_WC_LOC_LEN_ERR;
+ case IONIC_STS_LOCAL_QP_OPER_ERR:
+ case IONIC_STS_LOCAL_SGL_INV_ERR:
+ return IB_WC_LOC_QP_OP_ERR;
+ case IONIC_STS_LOCAL_PROT_ERR:
+ return IB_WC_LOC_PROT_ERR;
+ case IONIC_STS_WQE_FLUSHED_ERR:
+ return IB_WC_WR_FLUSH_ERR;
+ case IONIC_STS_MEM_MGMT_OPER_ERR:
+ return IB_WC_MW_BIND_ERR;
+ case IONIC_STS_BAD_RESP_ERR:
+ return IB_WC_BAD_RESP_ERR;
+ case IONIC_STS_LOCAL_ACC_ERR:
+ return IB_WC_LOC_ACCESS_ERR;
+ case IONIC_STS_REMOTE_INV_REQ_ERR:
+ return IB_WC_REM_INV_REQ_ERR;
+ case IONIC_STS_REMOTE_ACC_ERR:
+ return IB_WC_REM_ACCESS_ERR;
+ case IONIC_STS_REMOTE_OPER_ERR:
+ return IB_WC_REM_OP_ERR;
+ case IONIC_STS_RETRY_EXCEEDED:
+ return IB_WC_RETRY_EXC_ERR;
+ case IONIC_STS_RNR_RETRY_EXCEEDED:
+ return IB_WC_RNR_RETRY_EXC_ERR;
+ case IONIC_STS_XRC_VIO_ERR:
+ default:
+ return IB_WC_GENERAL_ERR;
+ }
+}
+
+/* admin queue qp type */
+enum ionic_qp_type {
+ IONIC_QPT_RC,
+ IONIC_QPT_UC,
+ IONIC_QPT_RD,
+ IONIC_QPT_UD,
+ IONIC_QPT_SRQ,
+ IONIC_QPT_XRC_INI,
+ IONIC_QPT_XRC_TGT,
+ IONIC_QPT_XRC_SRQ,
+};
+
+static inline int to_ionic_qp_type(enum ib_qp_type type)
+{
+ switch (type) {
+ case IB_QPT_GSI:
+ case IB_QPT_UD:
+ return IONIC_QPT_UD;
+ case IB_QPT_RC:
+ return IONIC_QPT_RC;
+ case IB_QPT_UC:
+ return IONIC_QPT_UC;
+ case IB_QPT_XRC_INI:
+ return IONIC_QPT_XRC_INI;
+ case IB_QPT_XRC_TGT:
+ return IONIC_QPT_XRC_TGT;
+ default:
+ return -EINVAL;
+ }
+}
+
+/* admin queue qp state */
+enum ionic_qp_state {
+ IONIC_QPS_RESET,
+ IONIC_QPS_INIT,
+ IONIC_QPS_RTR,
+ IONIC_QPS_RTS,
+ IONIC_QPS_SQD,
+ IONIC_QPS_SQE,
+ IONIC_QPS_ERR,
+};
+
+static inline int from_ionic_qp_state(enum ionic_qp_state state)
+{
+ switch (state) {
+ case IONIC_QPS_RESET:
+ return IB_QPS_RESET;
+ case IONIC_QPS_INIT:
+ return IB_QPS_INIT;
+ case IONIC_QPS_RTR:
+ return IB_QPS_RTR;
+ case IONIC_QPS_RTS:
+ return IB_QPS_RTS;
+ case IONIC_QPS_SQD:
+ return IB_QPS_SQD;
+ case IONIC_QPS_SQE:
+ return IB_QPS_SQE;
+ case IONIC_QPS_ERR:
+ return IB_QPS_ERR;
+ default:
+ return -EINVAL;
+ }
+}
+
+static inline int to_ionic_qp_state(enum ib_qp_state state)
+{
+ switch (state) {
+ case IB_QPS_RESET:
+ return IONIC_QPS_RESET;
+ case IB_QPS_INIT:
+ return IONIC_QPS_INIT;
+ case IB_QPS_RTR:
+ return IONIC_QPS_RTR;
+ case IB_QPS_RTS:
+ return IONIC_QPS_RTS;
+ case IB_QPS_SQD:
+ return IONIC_QPS_SQD;
+ case IB_QPS_SQE:
+ return IONIC_QPS_SQE;
+ case IB_QPS_ERR:
+ return IONIC_QPS_ERR;
+ default:
+ return 0;
+ }
+}
+
+static inline int to_ionic_qp_modify_state(enum ib_qp_state to_state,
+ enum ib_qp_state from_state)
+{
+ return to_ionic_qp_state(to_state) |
+ (to_ionic_qp_state(from_state) << 4);
+}
+
+/* fw abi v1 */
+
+/* data payload part of v1 wqe */
+union ionic_v1_pld {
+ struct ionic_sge sgl[2];
+ __be32 spec32[8];
+ __be16 spec16[16];
+ __u8 data[32];
+};
+
+/* completion queue v1 cqe */
+struct ionic_v1_cqe {
+ union {
+ struct {
+ __be16 cmd_idx;
+ __u8 cmd_op;
+ __u8 rsvd[17];
+ __le16 old_sq_cindex;
+ __le16 old_rq_cq_cindex;
+ } admin;
+ struct {
+ __u64 wqe_id;
+ __be32 src_qpn_op;
+ __u8 src_mac[6];
+ __be16 vlan_tag;
+ __be32 imm_data_rkey;
+ } recv;
+ struct {
+ __u8 rsvd[4];
+ __be32 msg_msn;
+ __u8 rsvd2[8];
+ __u64 npg_wqe_id;
+ } send;
+ };
+ __be32 status_length;
+ __be32 qid_type_flags;
+};
+
+/* bits for cqe recv */
+enum ionic_v1_cqe_src_qpn_bits {
+ IONIC_V1_CQE_RECV_QPN_MASK = 0xffffff,
+ IONIC_V1_CQE_RECV_OP_SHIFT = 24,
+
+ /* MASK could be 0x3, but need 0x1f for makeshift values:
+ * OP_TYPE_RDMA_OPER_WITH_IMM, OP_TYPE_SEND_RCVD
+ */
+ IONIC_V1_CQE_RECV_OP_MASK = 0x1f,
+ IONIC_V1_CQE_RECV_OP_SEND = 0,
+ IONIC_V1_CQE_RECV_OP_SEND_INV = 1,
+ IONIC_V1_CQE_RECV_OP_SEND_IMM = 2,
+ IONIC_V1_CQE_RECV_OP_RDMA_IMM = 3,
+
+ IONIC_V1_CQE_RECV_IS_IPV4 = BIT(7 + IONIC_V1_CQE_RECV_OP_SHIFT),
+ IONIC_V1_CQE_RECV_IS_VLAN = BIT(6 + IONIC_V1_CQE_RECV_OP_SHIFT),
+};
+
+/* bits for cqe qid_type_flags */
+enum ionic_v1_cqe_qtf_bits {
+ IONIC_V1_CQE_COLOR = BIT(0),
+ IONIC_V1_CQE_ERROR = BIT(1),
+ IONIC_V1_CQE_TYPE_SHIFT = 5,
+ IONIC_V1_CQE_TYPE_MASK = 0x7,
+ IONIC_V1_CQE_QID_SHIFT = 8,
+
+ IONIC_V1_CQE_TYPE_ADMIN = 0,
+ IONIC_V1_CQE_TYPE_RECV = 1,
+ IONIC_V1_CQE_TYPE_SEND_MSN = 2,
+ IONIC_V1_CQE_TYPE_SEND_NPG = 3,
+};
+
+static inline bool ionic_v1_cqe_color(struct ionic_v1_cqe *cqe)
+{
+ return cqe->qid_type_flags & cpu_to_be32(IONIC_V1_CQE_COLOR);
+}
+
+static inline bool ionic_v1_cqe_error(struct ionic_v1_cqe *cqe)
+{
+ return cqe->qid_type_flags & cpu_to_be32(IONIC_V1_CQE_ERROR);
+}
+
+static inline bool ionic_v1_cqe_recv_is_ipv4(struct ionic_v1_cqe *cqe)
+{
+ return cqe->recv.src_qpn_op & cpu_to_be32(IONIC_V1_CQE_RECV_IS_IPV4);
+}
+
+static inline bool ionic_v1_cqe_recv_is_vlan(struct ionic_v1_cqe *cqe)
+{
+ return cqe->recv.src_qpn_op & cpu_to_be32(IONIC_V1_CQE_RECV_IS_VLAN);
+}
+
+static inline void ionic_v1_cqe_clean(struct ionic_v1_cqe *cqe)
+{
+ cqe->qid_type_flags |= cpu_to_be32(~0u << IONIC_V1_CQE_QID_SHIFT);
+}
+
+static inline u32 ionic_v1_cqe_qtf(struct ionic_v1_cqe *cqe)
+{
+ return be32_to_cpu(cqe->qid_type_flags);
+}
+
+static inline u8 ionic_v1_cqe_qtf_type(u32 qtf)
+{
+ return (qtf >> IONIC_V1_CQE_TYPE_SHIFT) & IONIC_V1_CQE_TYPE_MASK;
+}
+
+static inline u32 ionic_v1_cqe_qtf_qid(u32 qtf)
+{
+ return qtf >> IONIC_V1_CQE_QID_SHIFT;
+}
+
+/* v1 base wqe header */
+struct ionic_v1_base_hdr {
+ __u64 wqe_id;
+ __u8 op;
+ __u8 num_sge_key;
+ __be16 flags;
+ __be32 imm_data_key;
+};
+
+/* v1 receive wqe body */
+struct ionic_v1_recv_bdy {
+ __u8 rsvd[16];
+ union ionic_v1_pld pld;
+};
+
+/* v1 send/rdma wqe body (common, has sgl) */
+struct ionic_v1_common_bdy {
+ union {
+ struct {
+ __be32 ah_id;
+ __be32 dest_qpn;
+ __be32 dest_qkey;
+ } send;
+ struct {
+ __be32 remote_va_high;
+ __be32 remote_va_low;
+ __be32 remote_rkey;
+ } rdma;
+ };
+ __be32 length;
+ union ionic_v1_pld pld;
+};
+
+/* v1 atomic wqe body */
+struct ionic_v1_atomic_bdy {
+ __be32 remote_va_high;
+ __be32 remote_va_low;
+ __be32 remote_rkey;
+ __be32 swap_add_high;
+ __be32 swap_add_low;
+ __be32 compare_high;
+ __be32 compare_low;
+ __u8 rsvd[4];
+ struct ionic_sge sge;
+};
+
+/* v1 reg mr wqe body */
+struct ionic_v1_reg_mr_bdy {
+ __be64 va;
+ __be64 length;
+ __be64 offset;
+ __be64 dma_addr;
+ __be32 map_count;
+ __be16 flags;
+ __u8 dir_size_log2;
+ __u8 page_size_log2;
+ __u8 rsvd[8];
+};
+
+/* v1 bind mw wqe body */
+struct ionic_v1_bind_mw_bdy {
+ __be64 va;
+ __be64 length;
+ __be32 lkey;
+ __be16 flags;
+ __u8 rsvd[26];
+};
+
+/* v1 send/recv wqe */
+struct ionic_v1_wqe {
+ struct ionic_v1_base_hdr base;
+ union {
+ struct ionic_v1_recv_bdy recv;
+ struct ionic_v1_common_bdy common;
+ struct ionic_v1_atomic_bdy atomic;
+ struct ionic_v1_reg_mr_bdy reg_mr;
+ struct ionic_v1_bind_mw_bdy bind_mw;
+ };
+};
+
+/* queue pair v1 send opcodes */
+enum ionic_v1_op {
+ IONIC_V1_OP_SEND,
+ IONIC_V1_OP_SEND_INV,
+ IONIC_V1_OP_SEND_IMM,
+ IONIC_V1_OP_RDMA_READ,
+ IONIC_V1_OP_RDMA_WRITE,
+ IONIC_V1_OP_RDMA_WRITE_IMM,
+ IONIC_V1_OP_ATOMIC_CS,
+ IONIC_V1_OP_ATOMIC_FA,
+ IONIC_V1_OP_REG_MR,
+ IONIC_V1_OP_LOCAL_INV,
+ IONIC_V1_OP_BIND_MW,
+
+ /* flags */
+ IONIC_V1_FLAG_FENCE = BIT(0),
+ IONIC_V1_FLAG_SOL = BIT(1),
+ IONIC_V1_FLAG_INL = BIT(2),
+ IONIC_V1_FLAG_SIG = BIT(3),
+
+ /* flags last four bits for sgl spec format */
+ IONIC_V1_FLAG_SPEC32 = (1u << 12),
+ IONIC_V1_FLAG_SPEC16 = (2u << 12),
+ IONIC_V1_SPEC_FIRST_SGE = 2,
+};
+
+/* queue pair v2 send opcodes */
+enum ionic_v2_op {
+ IONIC_V2_OPSL_OUT = 0x20,
+ IONIC_V2_OPSL_IMM = 0x40,
+ IONIC_V2_OPSL_INV = 0x80,
+
+ IONIC_V2_OP_SEND = 0x0 | IONIC_V2_OPSL_OUT,
+ IONIC_V2_OP_SEND_IMM = IONIC_V2_OP_SEND | IONIC_V2_OPSL_IMM,
+ IONIC_V2_OP_SEND_INV = IONIC_V2_OP_SEND | IONIC_V2_OPSL_INV,
+
+ IONIC_V2_OP_RDMA_WRITE = 0x1 | IONIC_V2_OPSL_OUT,
+ IONIC_V2_OP_RDMA_WRITE_IMM = IONIC_V2_OP_RDMA_WRITE | IONIC_V2_OPSL_IMM,
+
+ IONIC_V2_OP_RDMA_READ = 0x2,
+
+ IONIC_V2_OP_ATOMIC_CS = 0x4,
+ IONIC_V2_OP_ATOMIC_FA = 0x5,
+ IONIC_V2_OP_REG_MR = 0x6,
+ IONIC_V2_OP_LOCAL_INV = 0x7,
+ IONIC_V2_OP_BIND_MW = 0x8,
+};
+
+static inline size_t ionic_v1_send_wqe_min_size(int min_sge, int min_data,
+ int spec, bool expdb)
+{
+ size_t sz_wqe, sz_sgl, sz_data;
+
+ if (spec > IONIC_V1_SPEC_FIRST_SGE)
+ min_sge += IONIC_V1_SPEC_FIRST_SGE;
+
+ if (expdb) {
+ min_sge += 1;
+ min_data += IONIC_EXP_DBELL_SZ;
+ }
+
+ sz_wqe = sizeof(struct ionic_v1_wqe);
+ sz_sgl = offsetof(struct ionic_v1_wqe, common.pld.sgl[min_sge]);
+ sz_data = offsetof(struct ionic_v1_wqe, common.pld.data[min_data]);
+
+ if (sz_sgl > sz_wqe)
+ sz_wqe = sz_sgl;
+
+ if (sz_data > sz_wqe)
+ sz_wqe = sz_data;
+
+ return sz_wqe;
+}
+
+static inline int ionic_v1_send_wqe_max_sge(u8 stride_log2, int spec,
+ bool expdb)
+{
+ struct ionic_sge *sge = (void *)(1ull << stride_log2);
+ struct ionic_v1_wqe *wqe = (void *)0;
+ int num_sge = 0;
+
+ if (expdb)
+ sge -= 1;
+
+ if (spec > IONIC_V1_SPEC_FIRST_SGE)
+ num_sge = IONIC_V1_SPEC_FIRST_SGE;
+
+ num_sge = sge - &wqe->common.pld.sgl[num_sge];
+
+ if (spec && num_sge > spec)
+ num_sge = spec;
+
+ return num_sge;
+}
+
+static inline int ionic_v1_send_wqe_max_data(u8 stride_log2, bool expdb)
+{
+ struct ionic_v1_wqe *wqe = (void *)0;
+ __u8 *data = (void *)(1ull << stride_log2);
+
+ if (expdb)
+ data -= IONIC_EXP_DBELL_SZ;
+
+ return data - wqe->common.pld.data;
+}
+
+static inline size_t ionic_v1_recv_wqe_min_size(int min_sge, int spec,
+ bool expdb)
+{
+ size_t sz_wqe, sz_sgl;
+
+ if (spec > IONIC_V1_SPEC_FIRST_SGE)
+ min_sge += IONIC_V1_SPEC_FIRST_SGE;
+
+ if (expdb)
+ min_sge += 1;
+
+ sz_wqe = sizeof(struct ionic_v1_wqe);
+ sz_sgl = offsetof(struct ionic_v1_wqe, recv.pld.sgl[min_sge]);
+
+ if (sz_sgl > sz_wqe)
+ sz_wqe = sz_sgl;
+
+ return sz_wqe;
+}
+
+static inline int ionic_v1_recv_wqe_max_sge(u8 stride_log2, int spec,
+ bool expdb)
+{
+ struct ionic_sge *sge = (void *)(1ull << stride_log2);
+ struct ionic_v1_wqe *wqe = (void *)0;
+ int num_sge = 0;
+
+ if (expdb)
+ sge -= 1;
+
+ if (spec > IONIC_V1_SPEC_FIRST_SGE)
+ num_sge = IONIC_V1_SPEC_FIRST_SGE;
+
+ num_sge = sge - &wqe->recv.pld.sgl[num_sge];
+
+ if (spec && num_sge > spec)
+ num_sge = spec;
+
+ return num_sge;
+}
+
+static inline int ionic_v1_use_spec_sge(int min_sge, int spec)
+{
+ if (!spec || min_sge > spec)
+ return 0;
+
+ if (min_sge <= IONIC_V1_SPEC_FIRST_SGE)
+ return IONIC_V1_SPEC_FIRST_SGE;
+
+ return spec;
+}
+
+struct ionic_admin_stats_hdr {
+ __le64 dma_addr;
+ __le32 length;
+ __le32 id_ver;
+ __u8 type_state;
+} __packed;
+
+#define IONIC_ADMIN_STATS_HDRS_IN_V1_LEN 17
+static_assert(sizeof(struct ionic_admin_stats_hdr) ==
+ IONIC_ADMIN_STATS_HDRS_IN_V1_LEN);
+
+struct ionic_admin_create_ah {
+ __le64 dma_addr;
+ __le32 length;
+ __le32 pd_id;
+ __le32 id_ver;
+ __le16 dbid_flags;
+ __u8 csum_profile;
+ __u8 crypto;
+} __packed;
+
+#define IONIC_ADMIN_CREATE_AH_IN_V1_LEN 24
+static_assert(sizeof(struct ionic_admin_create_ah) ==
+ IONIC_ADMIN_CREATE_AH_IN_V1_LEN);
+
+struct ionic_admin_destroy_ah {
+ __le32 ah_id;
+} __packed;
+
+#define IONIC_ADMIN_DESTROY_AH_IN_V1_LEN 4
+static_assert(sizeof(struct ionic_admin_destroy_ah) ==
+ IONIC_ADMIN_DESTROY_AH_IN_V1_LEN);
+
+struct ionic_admin_query_ah {
+ __le64 dma_addr;
+} __packed;
+
+#define IONIC_ADMIN_QUERY_AH_IN_V1_LEN 8
+static_assert(sizeof(struct ionic_admin_query_ah) ==
+ IONIC_ADMIN_QUERY_AH_IN_V1_LEN);
+
+struct ionic_admin_create_mr {
+ __le64 va;
+ __le64 length;
+ __le32 pd_id;
+ __le32 id_ver;
+ __le32 tbl_index;
+ __le32 map_count;
+ __le64 dma_addr;
+ __le16 dbid_flags;
+ __u8 pt_type;
+ __u8 dir_size_log2;
+ __u8 page_size_log2;
+} __packed;
+
+#define IONIC_ADMIN_CREATE_MR_IN_V1_LEN 45
+static_assert(sizeof(struct ionic_admin_create_mr) ==
+ IONIC_ADMIN_CREATE_MR_IN_V1_LEN);
+
+struct ionic_admin_destroy_mr {
+ __le32 mr_id;
+} __packed;
+
+#define IONIC_ADMIN_DESTROY_MR_IN_V1_LEN 4
+static_assert(sizeof(struct ionic_admin_destroy_mr) ==
+ IONIC_ADMIN_DESTROY_MR_IN_V1_LEN);
+
+struct ionic_admin_create_cq {
+ __le32 eq_id;
+ __u8 depth_log2;
+ __u8 stride_log2;
+ __u8 dir_size_log2_rsvd;
+ __u8 page_size_log2;
+ __le32 cq_flags;
+ __le32 id_ver;
+ __le32 tbl_index;
+ __le32 map_count;
+ __le64 dma_addr;
+ __le16 dbid_flags;
+} __packed;
+
+#define IONIC_ADMIN_CREATE_CQ_IN_V1_LEN 34
+static_assert(sizeof(struct ionic_admin_create_cq) ==
+ IONIC_ADMIN_CREATE_CQ_IN_V1_LEN);
+
+struct ionic_admin_destroy_cq {
+ __le32 cq_id;
+} __packed;
+
+#define IONIC_ADMIN_DESTROY_CQ_IN_V1_LEN 4
+static_assert(sizeof(struct ionic_admin_destroy_cq) ==
+ IONIC_ADMIN_DESTROY_CQ_IN_V1_LEN);
+
+struct ionic_admin_create_qp {
+ __le32 pd_id;
+ __be32 priv_flags;
+ __le32 sq_cq_id;
+ __u8 sq_depth_log2;
+ __u8 sq_stride_log2;
+ __u8 sq_dir_size_log2_rsvd;
+ __u8 sq_page_size_log2;
+ __le32 sq_tbl_index_xrcd_id;
+ __le32 sq_map_count;
+ __le64 sq_dma_addr;
+ __le32 rq_cq_id;
+ __u8 rq_depth_log2;
+ __u8 rq_stride_log2;
+ __u8 rq_dir_size_log2_rsvd;
+ __u8 rq_page_size_log2;
+ __le32 rq_tbl_index_srq_id;
+ __le32 rq_map_count;
+ __le64 rq_dma_addr;
+ __le32 id_ver;
+ __le16 dbid_flags;
+ __u8 type_state;
+ __u8 rsvd;
+} __packed;
+
+#define IONIC_ADMIN_CREATE_QP_IN_V1_LEN 64
+static_assert(sizeof(struct ionic_admin_create_qp) ==
+ IONIC_ADMIN_CREATE_QP_IN_V1_LEN);
+
+struct ionic_admin_destroy_qp {
+ __le32 qp_id;
+} __packed;
+
+#define IONIC_ADMIN_DESTROY_QP_IN_V1_LEN 4
+static_assert(sizeof(struct ionic_admin_destroy_qp) ==
+ IONIC_ADMIN_DESTROY_QP_IN_V1_LEN);
+
+struct ionic_admin_mod_qp {
+ __be32 attr_mask;
+ __u8 dcqcn_profile;
+ __u8 tfp_csum_profile;
+ __be16 access_flags;
+ __le32 rq_psn;
+ __le32 sq_psn;
+ __le32 qkey_dest_qpn;
+ __le32 rate_limit_kbps;
+ __u8 pmtu;
+ __u8 retry;
+ __u8 rnr_timer;
+ __u8 retry_timeout;
+ __u8 rsq_depth;
+ __u8 rrq_depth;
+ __le16 pkey_id;
+ __le32 ah_id_len;
+ __u8 en_pcp;
+ __u8 ip_dscp;
+ __u8 rsvd2;
+ __u8 type_state;
+ union {
+ struct {
+ __le16 rsvd1;
+ };
+ __le32 rrq_index;
+ };
+ __le32 rsq_index;
+ __le64 dma_addr;
+ __le32 id_ver;
+} __packed;
+
+#define IONIC_ADMIN_MODIFY_QP_IN_V1_LEN 60
+static_assert(sizeof(struct ionic_admin_mod_qp) ==
+ IONIC_ADMIN_MODIFY_QP_IN_V1_LEN);
+
+struct ionic_admin_query_qp {
+ __le64 hdr_dma_addr;
+ __le64 sq_dma_addr;
+ __le64 rq_dma_addr;
+ __le32 ah_id;
+ __le32 id_ver;
+ __le16 dbid_flags;
+} __packed;
+
+#define IONIC_ADMIN_QUERY_QP_IN_V1_LEN 34
+static_assert(sizeof(struct ionic_admin_query_qp) ==
+ IONIC_ADMIN_QUERY_QP_IN_V1_LEN);
+
+#define ADMIN_WQE_STRIDE 64
+#define ADMIN_WQE_HDR_LEN 4
+
+/* admin queue v1 wqe */
+struct ionic_v1_admin_wqe {
+ __u8 op;
+ __u8 rsvd;
+ __le16 len;
+
+ union {
+ struct ionic_admin_stats_hdr stats;
+ struct ionic_admin_create_ah create_ah;
+ struct ionic_admin_destroy_ah destroy_ah;
+ struct ionic_admin_query_ah query_ah;
+ struct ionic_admin_create_mr create_mr;
+ struct ionic_admin_destroy_mr destroy_mr;
+ struct ionic_admin_create_cq create_cq;
+ struct ionic_admin_destroy_cq destroy_cq;
+ struct ionic_admin_create_qp create_qp;
+ struct ionic_admin_destroy_qp destroy_qp;
+ struct ionic_admin_mod_qp mod_qp;
+ struct ionic_admin_query_qp query_qp;
+ } cmd;
+};
+
+/* side data for query qp */
+struct ionic_v1_admin_query_qp_sq {
+ __u8 rnr_timer;
+ __u8 retry_timeout;
+ __be16 access_perms_flags;
+ __be16 rsvd;
+ __be16 pkey_id;
+ __be32 qkey_dest_qpn;
+ __be32 rate_limit_kbps;
+ __be32 rq_psn;
+};
+
+struct ionic_v1_admin_query_qp_rq {
+ __u8 state_pmtu;
+ __u8 retry_rnrtry;
+ __u8 rrq_depth;
+ __u8 rsq_depth;
+ __be32 sq_psn;
+ __be16 access_perms_flags;
+ __be16 rsvd;
+};
+
+/* admin queue v1 opcodes */
+enum ionic_v1_admin_op {
+ IONIC_V1_ADMIN_NOOP,
+ IONIC_V1_ADMIN_CREATE_CQ,
+ IONIC_V1_ADMIN_CREATE_QP,
+ IONIC_V1_ADMIN_CREATE_MR,
+ IONIC_V1_ADMIN_STATS_HDRS,
+ IONIC_V1_ADMIN_STATS_VALS,
+ IONIC_V1_ADMIN_DESTROY_MR,
+ IONIC_V1_ADMIN_RSVD_7, /* RESIZE_CQ */
+ IONIC_V1_ADMIN_DESTROY_CQ,
+ IONIC_V1_ADMIN_MODIFY_QP,
+ IONIC_V1_ADMIN_QUERY_QP,
+ IONIC_V1_ADMIN_DESTROY_QP,
+ IONIC_V1_ADMIN_DEBUG,
+ IONIC_V1_ADMIN_CREATE_AH,
+ IONIC_V1_ADMIN_QUERY_AH,
+ IONIC_V1_ADMIN_MODIFY_DCQCN,
+ IONIC_V1_ADMIN_DESTROY_AH,
+ IONIC_V1_ADMIN_QP_STATS_HDRS,
+ IONIC_V1_ADMIN_QP_STATS_VALS,
+ IONIC_V1_ADMIN_OPCODES_MAX,
+};
+
+/* admin queue v1 cqe status */
+enum ionic_v1_admin_status {
+ IONIC_V1_ASTS_OK,
+ IONIC_V1_ASTS_BAD_CMD,
+ IONIC_V1_ASTS_BAD_INDEX,
+ IONIC_V1_ASTS_BAD_STATE,
+ IONIC_V1_ASTS_BAD_TYPE,
+ IONIC_V1_ASTS_BAD_ATTR,
+ IONIC_V1_ASTS_MSG_TOO_BIG,
+};
+
+/* event queue v1 eqe */
+struct ionic_v1_eqe {
+ __be32 evt;
+};
+
+/* bits for cqe queue_type_flags */
+enum ionic_v1_eqe_evt_bits {
+ IONIC_V1_EQE_COLOR = BIT(0),
+ IONIC_V1_EQE_TYPE_SHIFT = 1,
+ IONIC_V1_EQE_TYPE_MASK = 0x7,
+ IONIC_V1_EQE_CODE_SHIFT = 4,
+ IONIC_V1_EQE_CODE_MASK = 0xf,
+ IONIC_V1_EQE_QID_SHIFT = 8,
+
+ /* cq events */
+ IONIC_V1_EQE_TYPE_CQ = 0,
+ /* cq normal events */
+ IONIC_V1_EQE_CQ_NOTIFY = 0,
+ /* cq error events */
+ IONIC_V1_EQE_CQ_ERR = 8,
+
+ /* qp and srq events */
+ IONIC_V1_EQE_TYPE_QP = 1,
+ /* qp normal events */
+ IONIC_V1_EQE_SRQ_LEVEL = 0,
+ IONIC_V1_EQE_SQ_DRAIN = 1,
+ IONIC_V1_EQE_QP_COMM_EST = 2,
+ IONIC_V1_EQE_QP_LAST_WQE = 3,
+ /* qp error events */
+ IONIC_V1_EQE_QP_ERR = 8,
+ IONIC_V1_EQE_QP_ERR_REQUEST = 9,
+ IONIC_V1_EQE_QP_ERR_ACCESS = 10,
+};
+
+enum ionic_tfp_csum_profiles {
+ IONIC_TFP_CSUM_PROF_ETH_IPV4_UDP = 0,
+ IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV4_UDP = 1,
+ IONIC_TFP_CSUM_PROF_ETH_IPV6_UDP = 2,
+ IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV6_UDP = 3,
+ IONIC_TFP_CSUM_PROF_IPV4_UDP_VXLAN_ETH_QTAG_IPV4_UDP = 4,
+ IONIC_TFP_CSUM_PROF_IPV4_UDP_VXLAN_ETH_QTAG_IPV6_UDP = 5,
+ IONIC_TFP_CSUM_PROF_QTAG_IPV4_UDP_VXLAN_ETH_QTAG_IPV4_UDP = 6,
+ IONIC_TFP_CSUM_PROF_QTAG_IPV4_UDP_VXLAN_ETH_QTAG_IPV6_UDP = 7,
+ IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV4_UDP_ESP_IPV4_UDP = 8,
+ IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV4_ESP_UDP = 9,
+ IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV4_UDP_ESP_UDP = 10,
+ IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV6_ESP_UDP = 11,
+ IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV4_UDP_CSUM = 12,
+};
+
+static inline bool ionic_v1_eqe_color(struct ionic_v1_eqe *eqe)
+{
+ return eqe->evt & cpu_to_be32(IONIC_V1_EQE_COLOR);
+}
+
+static inline u32 ionic_v1_eqe_evt(struct ionic_v1_eqe *eqe)
+{
+ return be32_to_cpu(eqe->evt);
+}
+
+static inline u8 ionic_v1_eqe_evt_type(u32 evt)
+{
+ return (evt >> IONIC_V1_EQE_TYPE_SHIFT) & IONIC_V1_EQE_TYPE_MASK;
+}
+
+static inline u8 ionic_v1_eqe_evt_code(u32 evt)
+{
+ return (evt >> IONIC_V1_EQE_CODE_SHIFT) & IONIC_V1_EQE_CODE_MASK;
+}
+
+static inline u32 ionic_v1_eqe_evt_qid(u32 evt)
+{
+ return evt >> IONIC_V1_EQE_QID_SHIFT;
+}
+
+enum ionic_v1_stat_bits {
+ IONIC_V1_STAT_TYPE_SHIFT = 28,
+ IONIC_V1_STAT_TYPE_NONE = 0,
+ IONIC_V1_STAT_TYPE_8 = 1,
+ IONIC_V1_STAT_TYPE_LE16 = 2,
+ IONIC_V1_STAT_TYPE_LE32 = 3,
+ IONIC_V1_STAT_TYPE_LE64 = 4,
+ IONIC_V1_STAT_TYPE_BE16 = 5,
+ IONIC_V1_STAT_TYPE_BE32 = 6,
+ IONIC_V1_STAT_TYPE_BE64 = 7,
+ IONIC_V1_STAT_OFF_MASK = BIT(IONIC_V1_STAT_TYPE_SHIFT) - 1,
+};
+
+struct ionic_v1_stat {
+ union {
+ __be32 be_type_off;
+ u32 type_off;
+ };
+ char name[28];
+};
+
+static inline int ionic_v1_stat_type(struct ionic_v1_stat *hdr)
+{
+ return hdr->type_off >> IONIC_V1_STAT_TYPE_SHIFT;
+}
+
+static inline unsigned int ionic_v1_stat_off(struct ionic_v1_stat *hdr)
+{
+ return hdr->type_off & IONIC_V1_STAT_OFF_MASK;
+}
+
+#endif /* _IONIC_FW_H_ */
diff --git a/drivers/infiniband/hw/ionic/ionic_hw_stats.c b/drivers/infiniband/hw/ionic/ionic_hw_stats.c
new file mode 100644
index 000000000000..244a80dde08f
--- /dev/null
+++ b/drivers/infiniband/hw/ionic/ionic_hw_stats.c
@@ -0,0 +1,484 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#include <linux/dma-mapping.h>
+
+#include "ionic_fw.h"
+#include "ionic_ibdev.h"
+
+static int ionic_v1_stat_normalize(struct ionic_v1_stat *hw_stats,
+ int hw_stats_count)
+{
+ int hw_stat_i;
+
+ for (hw_stat_i = 0; hw_stat_i < hw_stats_count; ++hw_stat_i) {
+ struct ionic_v1_stat *stat = &hw_stats[hw_stat_i];
+
+ stat->type_off = be32_to_cpu(stat->be_type_off);
+ stat->name[sizeof(stat->name) - 1] = 0;
+ if (ionic_v1_stat_type(stat) == IONIC_V1_STAT_TYPE_NONE)
+ break;
+ }
+
+ return hw_stat_i;
+}
+
+static void ionic_fill_stats_desc(struct rdma_stat_desc *hw_stats_hdrs,
+ struct ionic_v1_stat *hw_stats,
+ int hw_stats_count)
+{
+ int hw_stat_i;
+
+ for (hw_stat_i = 0; hw_stat_i < hw_stats_count; ++hw_stat_i) {
+ struct ionic_v1_stat *stat = &hw_stats[hw_stat_i];
+
+ hw_stats_hdrs[hw_stat_i].name = stat->name;
+ }
+}
+
+static u64 ionic_v1_stat_val(struct ionic_v1_stat *stat,
+ void *vals_buf, size_t vals_len)
+{
+ unsigned int off = ionic_v1_stat_off(stat);
+ int type = ionic_v1_stat_type(stat);
+
+#define __ionic_v1_stat_validate(__type) \
+ ((off + sizeof(__type) <= vals_len) && \
+ (IS_ALIGNED(off, sizeof(__type))))
+
+ switch (type) {
+ case IONIC_V1_STAT_TYPE_8:
+ if (__ionic_v1_stat_validate(u8))
+ return *(u8 *)(vals_buf + off);
+ break;
+ case IONIC_V1_STAT_TYPE_LE16:
+ if (__ionic_v1_stat_validate(__le16))
+ return le16_to_cpu(*(__le16 *)(vals_buf + off));
+ break;
+ case IONIC_V1_STAT_TYPE_LE32:
+ if (__ionic_v1_stat_validate(__le32))
+ return le32_to_cpu(*(__le32 *)(vals_buf + off));
+ break;
+ case IONIC_V1_STAT_TYPE_LE64:
+ if (__ionic_v1_stat_validate(__le64))
+ return le64_to_cpu(*(__le64 *)(vals_buf + off));
+ break;
+ case IONIC_V1_STAT_TYPE_BE16:
+ if (__ionic_v1_stat_validate(__be16))
+ return be16_to_cpu(*(__be16 *)(vals_buf + off));
+ break;
+ case IONIC_V1_STAT_TYPE_BE32:
+ if (__ionic_v1_stat_validate(__be32))
+ return be32_to_cpu(*(__be32 *)(vals_buf + off));
+ break;
+ case IONIC_V1_STAT_TYPE_BE64:
+ if (__ionic_v1_stat_validate(__be64))
+ return be64_to_cpu(*(__be64 *)(vals_buf + off));
+ break;
+ }
+
+ return ~0ull;
+#undef __ionic_v1_stat_validate
+}
+
+static int ionic_hw_stats_cmd(struct ionic_ibdev *dev,
+ dma_addr_t dma, size_t len, int qid, int op)
+{
+ struct ionic_admin_wr wr = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
+ .wqe = {
+ .op = op,
+ .len = cpu_to_le16(IONIC_ADMIN_STATS_HDRS_IN_V1_LEN),
+ .cmd.stats = {
+ .dma_addr = cpu_to_le64(dma),
+ .length = cpu_to_le32(len),
+ .id_ver = cpu_to_le32(qid),
+ },
+ }
+ };
+
+ if (dev->lif_cfg.admin_opcodes <= op)
+ return -EBADRQC;
+
+ ionic_admin_post(dev, &wr);
+
+ return ionic_admin_wait(dev, &wr, IONIC_ADMIN_F_INTERRUPT);
+}
+
+static int ionic_init_hw_stats(struct ionic_ibdev *dev)
+{
+ dma_addr_t hw_stats_dma;
+ int rc, hw_stats_count;
+
+ if (dev->hw_stats_hdrs)
+ return 0;
+
+ dev->hw_stats_count = 0;
+
+ /* buffer for current values from the device */
+ dev->hw_stats_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!dev->hw_stats_buf) {
+ rc = -ENOMEM;
+ goto err_buf;
+ }
+
+ /* buffer for names, sizes, offsets of values */
+ dev->hw_stats = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!dev->hw_stats) {
+ rc = -ENOMEM;
+ goto err_hw_stats;
+ }
+
+ /* request the names, sizes, offsets */
+ hw_stats_dma = dma_map_single(dev->lif_cfg.hwdev, dev->hw_stats,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ rc = dma_mapping_error(dev->lif_cfg.hwdev, hw_stats_dma);
+ if (rc)
+ goto err_dma;
+
+ rc = ionic_hw_stats_cmd(dev, hw_stats_dma, PAGE_SIZE, 0,
+ IONIC_V1_ADMIN_STATS_HDRS);
+ if (rc)
+ goto err_cmd;
+
+ dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma, PAGE_SIZE, DMA_FROM_DEVICE);
+
+ /* normalize and count the number of hw_stats */
+ hw_stats_count =
+ ionic_v1_stat_normalize(dev->hw_stats,
+ PAGE_SIZE / sizeof(*dev->hw_stats));
+ if (!hw_stats_count) {
+ rc = -ENODATA;
+ goto err_dma;
+ }
+
+ dev->hw_stats_count = hw_stats_count;
+
+ /* alloc and init array of names, for alloc_hw_stats */
+ dev->hw_stats_hdrs = kcalloc(hw_stats_count,
+ sizeof(*dev->hw_stats_hdrs),
+ GFP_KERNEL);
+ if (!dev->hw_stats_hdrs) {
+ rc = -ENOMEM;
+ goto err_dma;
+ }
+
+ ionic_fill_stats_desc(dev->hw_stats_hdrs, dev->hw_stats,
+ hw_stats_count);
+
+ return 0;
+
+err_cmd:
+ dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma, PAGE_SIZE, DMA_FROM_DEVICE);
+err_dma:
+ kfree(dev->hw_stats);
+err_hw_stats:
+ kfree(dev->hw_stats_buf);
+err_buf:
+ dev->hw_stats_count = 0;
+ dev->hw_stats = NULL;
+ dev->hw_stats_buf = NULL;
+ dev->hw_stats_hdrs = NULL;
+ return rc;
+}
+
+static struct rdma_hw_stats *ionic_alloc_hw_stats(struct ib_device *ibdev,
+ u32 port)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibdev);
+
+ if (port != 1)
+ return NULL;
+
+ return rdma_alloc_hw_stats_struct(dev->hw_stats_hdrs,
+ dev->hw_stats_count,
+ RDMA_HW_STATS_DEFAULT_LIFESPAN);
+}
+
+static int ionic_get_hw_stats(struct ib_device *ibdev,
+ struct rdma_hw_stats *hw_stats,
+ u32 port, int index)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibdev);
+ dma_addr_t hw_stats_dma;
+ int rc, hw_stat_i;
+
+ if (port != 1)
+ return -EINVAL;
+
+ hw_stats_dma = dma_map_single(dev->lif_cfg.hwdev, dev->hw_stats_buf,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ rc = dma_mapping_error(dev->lif_cfg.hwdev, hw_stats_dma);
+ if (rc)
+ goto err_dma;
+
+ rc = ionic_hw_stats_cmd(dev, hw_stats_dma, PAGE_SIZE,
+ 0, IONIC_V1_ADMIN_STATS_VALS);
+ if (rc)
+ goto err_cmd;
+
+ dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+
+ for (hw_stat_i = 0; hw_stat_i < dev->hw_stats_count; ++hw_stat_i)
+ hw_stats->value[hw_stat_i] =
+ ionic_v1_stat_val(&dev->hw_stats[hw_stat_i],
+ dev->hw_stats_buf, PAGE_SIZE);
+
+ return hw_stat_i;
+
+err_cmd:
+ dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+err_dma:
+ return rc;
+}
+
+static struct rdma_hw_stats *
+ionic_counter_alloc_stats(struct rdma_counter *counter)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(counter->device);
+ struct ionic_counter *cntr;
+ int err;
+
+ cntr = kzalloc(sizeof(*cntr), GFP_KERNEL);
+ if (!cntr)
+ return NULL;
+
+ /* buffer for current values from the device */
+ cntr->vals = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!cntr->vals)
+ goto err_vals;
+
+ err = xa_alloc(&dev->counter_stats->xa_counters, &counter->id,
+ cntr,
+ XA_LIMIT(0, IONIC_MAX_QPID),
+ GFP_KERNEL);
+ if (err)
+ goto err_xa;
+
+ INIT_LIST_HEAD(&cntr->qp_list);
+
+ return rdma_alloc_hw_stats_struct(dev->counter_stats->stats_hdrs,
+ dev->counter_stats->queue_stats_count,
+ RDMA_HW_STATS_DEFAULT_LIFESPAN);
+err_xa:
+ kfree(cntr->vals);
+err_vals:
+ kfree(cntr);
+
+ return NULL;
+}
+
+static int ionic_counter_dealloc(struct rdma_counter *counter)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(counter->device);
+ struct ionic_counter *cntr;
+
+ cntr = xa_erase(&dev->counter_stats->xa_counters, counter->id);
+ if (!cntr)
+ return -EINVAL;
+
+ kfree(cntr->vals);
+ kfree(cntr);
+
+ return 0;
+}
+
+static int ionic_counter_bind_qp(struct rdma_counter *counter,
+ struct ib_qp *ibqp,
+ u32 port)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(counter->device);
+ struct ionic_qp *qp = to_ionic_qp(ibqp);
+ struct ionic_counter *cntr;
+
+ cntr = xa_load(&dev->counter_stats->xa_counters, counter->id);
+ if (!cntr)
+ return -EINVAL;
+
+ list_add_tail(&qp->qp_list_counter, &cntr->qp_list);
+ ibqp->counter = counter;
+
+ return 0;
+}
+
+static int ionic_counter_unbind_qp(struct ib_qp *ibqp, u32 port)
+{
+ struct ionic_qp *qp = to_ionic_qp(ibqp);
+
+ if (ibqp->counter) {
+ list_del(&qp->qp_list_counter);
+ ibqp->counter = NULL;
+ }
+
+ return 0;
+}
+
+static int ionic_get_qp_stats(struct ib_device *ibdev,
+ struct rdma_hw_stats *hw_stats,
+ u32 counter_id)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibdev);
+ struct ionic_counter_stats *cs;
+ struct ionic_counter *cntr;
+ dma_addr_t hw_stats_dma;
+ struct ionic_qp *qp;
+ int rc, stat_i = 0;
+
+ cs = dev->counter_stats;
+ cntr = xa_load(&cs->xa_counters, counter_id);
+ if (!cntr)
+ return -EINVAL;
+
+ hw_stats_dma = dma_map_single(dev->lif_cfg.hwdev, cntr->vals,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ rc = dma_mapping_error(dev->lif_cfg.hwdev, hw_stats_dma);
+ if (rc)
+ return rc;
+
+ memset(hw_stats->value, 0, sizeof(u64) * hw_stats->num_counters);
+
+ list_for_each_entry(qp, &cntr->qp_list, qp_list_counter) {
+ rc = ionic_hw_stats_cmd(dev, hw_stats_dma, PAGE_SIZE,
+ qp->qpid,
+ IONIC_V1_ADMIN_QP_STATS_VALS);
+ if (rc)
+ goto err_cmd;
+
+ for (stat_i = 0; stat_i < cs->queue_stats_count; ++stat_i)
+ hw_stats->value[stat_i] +=
+ ionic_v1_stat_val(&cs->hdr[stat_i],
+ cntr->vals,
+ PAGE_SIZE);
+ }
+
+ dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma, PAGE_SIZE, DMA_FROM_DEVICE);
+ return stat_i;
+
+err_cmd:
+ dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma, PAGE_SIZE, DMA_FROM_DEVICE);
+
+ return rc;
+}
+
+static int ionic_counter_update_stats(struct rdma_counter *counter)
+{
+ return ionic_get_qp_stats(counter->device, counter->stats, counter->id);
+}
+
+static int ionic_alloc_counters(struct ionic_ibdev *dev)
+{
+ struct ionic_counter_stats *cs = dev->counter_stats;
+ int rc, hw_stats_count;
+ dma_addr_t hdr_dma;
+
+ /* buffer for names, sizes, offsets of values */
+ cs->hdr = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!cs->hdr)
+ return -ENOMEM;
+
+ hdr_dma = dma_map_single(dev->lif_cfg.hwdev, cs->hdr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ rc = dma_mapping_error(dev->lif_cfg.hwdev, hdr_dma);
+ if (rc)
+ goto err_dma;
+
+ rc = ionic_hw_stats_cmd(dev, hdr_dma, PAGE_SIZE, 0,
+ IONIC_V1_ADMIN_QP_STATS_HDRS);
+ if (rc)
+ goto err_cmd;
+
+ dma_unmap_single(dev->lif_cfg.hwdev, hdr_dma, PAGE_SIZE, DMA_FROM_DEVICE);
+
+ /* normalize and count the number of hw_stats */
+ hw_stats_count = ionic_v1_stat_normalize(cs->hdr,
+ PAGE_SIZE / sizeof(*cs->hdr));
+ if (!hw_stats_count) {
+ rc = -ENODATA;
+ goto err_dma;
+ }
+
+ cs->queue_stats_count = hw_stats_count;
+
+ /* alloc and init array of names */
+ cs->stats_hdrs = kcalloc(hw_stats_count, sizeof(*cs->stats_hdrs),
+ GFP_KERNEL);
+ if (!cs->stats_hdrs) {
+ rc = -ENOMEM;
+ goto err_dma;
+ }
+
+ ionic_fill_stats_desc(cs->stats_hdrs, cs->hdr, hw_stats_count);
+
+ return 0;
+
+err_cmd:
+ dma_unmap_single(dev->lif_cfg.hwdev, hdr_dma, PAGE_SIZE, DMA_FROM_DEVICE);
+err_dma:
+ kfree(cs->hdr);
+
+ return rc;
+}
+
+static const struct ib_device_ops ionic_hw_stats_ops = {
+ .driver_id = RDMA_DRIVER_IONIC,
+ .alloc_hw_port_stats = ionic_alloc_hw_stats,
+ .get_hw_stats = ionic_get_hw_stats,
+};
+
+static const struct ib_device_ops ionic_counter_stats_ops = {
+ .counter_alloc_stats = ionic_counter_alloc_stats,
+ .counter_dealloc = ionic_counter_dealloc,
+ .counter_bind_qp = ionic_counter_bind_qp,
+ .counter_unbind_qp = ionic_counter_unbind_qp,
+ .counter_update_stats = ionic_counter_update_stats,
+};
+
+void ionic_stats_init(struct ionic_ibdev *dev)
+{
+ u16 stats_type = dev->lif_cfg.stats_type;
+ int rc;
+
+ if (stats_type & IONIC_LIF_RDMA_STAT_GLOBAL) {
+ rc = ionic_init_hw_stats(dev);
+ if (rc)
+ ibdev_dbg(&dev->ibdev, "Failed to init hw stats\n");
+ else
+ ib_set_device_ops(&dev->ibdev, &ionic_hw_stats_ops);
+ }
+
+ if (stats_type & IONIC_LIF_RDMA_STAT_QP) {
+ dev->counter_stats = kzalloc(sizeof(*dev->counter_stats),
+ GFP_KERNEL);
+ if (!dev->counter_stats)
+ return;
+
+ rc = ionic_alloc_counters(dev);
+ if (rc) {
+ ibdev_dbg(&dev->ibdev, "Failed to init counter stats\n");
+ kfree(dev->counter_stats);
+ dev->counter_stats = NULL;
+ return;
+ }
+
+ xa_init_flags(&dev->counter_stats->xa_counters, XA_FLAGS_ALLOC);
+
+ ib_set_device_ops(&dev->ibdev, &ionic_counter_stats_ops);
+ }
+}
+
+void ionic_stats_cleanup(struct ionic_ibdev *dev)
+{
+ if (dev->counter_stats) {
+ xa_destroy(&dev->counter_stats->xa_counters);
+ kfree(dev->counter_stats->hdr);
+ kfree(dev->counter_stats->stats_hdrs);
+ kfree(dev->counter_stats);
+ dev->counter_stats = NULL;
+ }
+
+ kfree(dev->hw_stats);
+ kfree(dev->hw_stats_buf);
+ kfree(dev->hw_stats_hdrs);
+}
diff --git a/drivers/infiniband/hw/ionic/ionic_ibdev.c b/drivers/infiniband/hw/ionic/ionic_ibdev.c
new file mode 100644
index 000000000000..164046d00e5d
--- /dev/null
+++ b/drivers/infiniband/hw/ionic/ionic_ibdev.c
@@ -0,0 +1,440 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/pci.h>
+#include <linux/irq.h>
+#include <net/addrconf.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_mad.h>
+
+#include "ionic_ibdev.h"
+
+#define DRIVER_DESCRIPTION "AMD Pensando RoCE HCA driver"
+#define DEVICE_DESCRIPTION "AMD Pensando RoCE HCA"
+
+MODULE_AUTHOR("Allen Hubbe <allen.hubbe@amd.com>");
+MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("NET_IONIC");
+
+static int ionic_query_device(struct ib_device *ibdev,
+ struct ib_device_attr *attr,
+ struct ib_udata *udata)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibdev);
+ struct net_device *ndev;
+
+ ndev = ib_device_get_netdev(ibdev, 1);
+ addrconf_ifid_eui48((u8 *)&attr->sys_image_guid, ndev);
+ dev_put(ndev);
+ attr->max_mr_size = dev->lif_cfg.npts_per_lif * PAGE_SIZE / 2;
+ attr->page_size_cap = dev->lif_cfg.page_size_supported;
+
+ attr->vendor_id = to_pci_dev(dev->lif_cfg.hwdev)->vendor;
+ attr->vendor_part_id = to_pci_dev(dev->lif_cfg.hwdev)->device;
+
+ attr->hw_ver = ionic_lif_asic_rev(dev->lif_cfg.lif);
+ attr->fw_ver = 0;
+ attr->max_qp = dev->lif_cfg.qp_count;
+ attr->max_qp_wr = IONIC_MAX_DEPTH;
+ attr->device_cap_flags =
+ IB_DEVICE_MEM_WINDOW |
+ IB_DEVICE_MEM_MGT_EXTENSIONS |
+ IB_DEVICE_MEM_WINDOW_TYPE_2B |
+ 0;
+ attr->max_send_sge =
+ min(ionic_v1_send_wqe_max_sge(dev->lif_cfg.max_stride, 0, false),
+ IONIC_SPEC_HIGH);
+ attr->max_recv_sge =
+ min(ionic_v1_recv_wqe_max_sge(dev->lif_cfg.max_stride, 0, false),
+ IONIC_SPEC_HIGH);
+ attr->max_sge_rd = attr->max_send_sge;
+ attr->max_cq = dev->lif_cfg.cq_count / dev->lif_cfg.udma_count;
+ attr->max_cqe = IONIC_MAX_CQ_DEPTH - IONIC_CQ_GRACE;
+ attr->max_mr = dev->lif_cfg.nmrs_per_lif;
+ attr->max_pd = IONIC_MAX_PD;
+ attr->max_qp_rd_atom = IONIC_MAX_RD_ATOM;
+ attr->max_ee_rd_atom = 0;
+ attr->max_res_rd_atom = IONIC_MAX_RD_ATOM;
+ attr->max_qp_init_rd_atom = IONIC_MAX_RD_ATOM;
+ attr->max_ee_init_rd_atom = 0;
+ attr->atomic_cap = IB_ATOMIC_GLOB;
+ attr->masked_atomic_cap = IB_ATOMIC_GLOB;
+ attr->max_mw = dev->lif_cfg.nmrs_per_lif;
+ attr->max_mcast_grp = 0;
+ attr->max_mcast_qp_attach = 0;
+ attr->max_ah = dev->lif_cfg.nahs_per_lif;
+ attr->max_fast_reg_page_list_len = dev->lif_cfg.npts_per_lif / 2;
+ attr->max_pkeys = IONIC_PKEY_TBL_LEN;
+
+ return 0;
+}
+
+static int ionic_query_port(struct ib_device *ibdev, u32 port,
+ struct ib_port_attr *attr)
+{
+ struct net_device *ndev;
+
+ if (port != 1)
+ return -EINVAL;
+
+ ndev = ib_device_get_netdev(ibdev, port);
+
+ if (netif_running(ndev) && netif_carrier_ok(ndev)) {
+ attr->state = IB_PORT_ACTIVE;
+ attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
+ } else if (netif_running(ndev)) {
+ attr->state = IB_PORT_DOWN;
+ attr->phys_state = IB_PORT_PHYS_STATE_POLLING;
+ } else {
+ attr->state = IB_PORT_DOWN;
+ attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
+ }
+
+ attr->max_mtu = iboe_get_mtu(ndev->max_mtu);
+ attr->active_mtu = min(attr->max_mtu, iboe_get_mtu(ndev->mtu));
+ attr->gid_tbl_len = IONIC_GID_TBL_LEN;
+ attr->ip_gids = true;
+ attr->port_cap_flags = 0;
+ attr->max_msg_sz = 0x80000000;
+ attr->pkey_tbl_len = IONIC_PKEY_TBL_LEN;
+ attr->max_vl_num = 1;
+ attr->subnet_prefix = 0xfe80000000000000ull;
+
+ dev_put(ndev);
+
+ return ib_get_eth_speed(ibdev, port,
+ &attr->active_speed,
+ &attr->active_width);
+}
+
+static enum rdma_link_layer ionic_get_link_layer(struct ib_device *ibdev,
+ u32 port)
+{
+ return IB_LINK_LAYER_ETHERNET;
+}
+
+static int ionic_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
+ u16 *pkey)
+{
+ if (port != 1)
+ return -EINVAL;
+
+ if (index != 0)
+ return -EINVAL;
+
+ *pkey = IB_DEFAULT_PKEY_FULL;
+
+ return 0;
+}
+
+static int ionic_modify_device(struct ib_device *ibdev, int mask,
+ struct ib_device_modify *attr)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibdev);
+
+ if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
+ return -EOPNOTSUPP;
+
+ if (mask & IB_DEVICE_MODIFY_NODE_DESC)
+ memcpy(dev->ibdev.node_desc, attr->node_desc,
+ IB_DEVICE_NODE_DESC_MAX);
+
+ return 0;
+}
+
+static int ionic_get_port_immutable(struct ib_device *ibdev, u32 port,
+ struct ib_port_immutable *attr)
+{
+ if (port != 1)
+ return -EINVAL;
+
+ attr->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
+
+ attr->pkey_tbl_len = IONIC_PKEY_TBL_LEN;
+ attr->gid_tbl_len = IONIC_GID_TBL_LEN;
+ attr->max_mad_size = IB_MGMT_MAD_SIZE;
+
+ return 0;
+}
+
+static void ionic_get_dev_fw_str(struct ib_device *ibdev, char *str)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibdev);
+
+ ionic_lif_fw_version(dev->lif_cfg.lif, str, IB_FW_VERSION_NAME_MAX);
+}
+
+static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ struct ionic_ibdev *dev =
+ rdma_device_to_drv_device(device, struct ionic_ibdev, ibdev);
+
+ return sysfs_emit(buf, "0x%x\n", ionic_lif_asic_rev(dev->lif_cfg.lif));
+}
+static DEVICE_ATTR_RO(hw_rev);
+
+static ssize_t hca_type_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct ionic_ibdev *dev =
+ rdma_device_to_drv_device(device, struct ionic_ibdev, ibdev);
+
+ return sysfs_emit(buf, "%s\n", dev->ibdev.node_desc);
+}
+static DEVICE_ATTR_RO(hca_type);
+
+static struct attribute *ionic_rdma_attributes[] = {
+ &dev_attr_hw_rev.attr,
+ &dev_attr_hca_type.attr,
+ NULL
+};
+
+static const struct attribute_group ionic_rdma_attr_group = {
+ .attrs = ionic_rdma_attributes,
+};
+
+static void ionic_disassociate_ucontext(struct ib_ucontext *ibcontext)
+{
+ /*
+ * Dummy define disassociate_ucontext so that it does not
+ * wait for user context before cleaning up hw resources.
+ */
+}
+
+static const struct ib_device_ops ionic_dev_ops = {
+ .owner = THIS_MODULE,
+ .driver_id = RDMA_DRIVER_IONIC,
+ .uverbs_abi_ver = IONIC_ABI_VERSION,
+
+ .alloc_ucontext = ionic_alloc_ucontext,
+ .dealloc_ucontext = ionic_dealloc_ucontext,
+ .mmap = ionic_mmap,
+ .mmap_free = ionic_mmap_free,
+ .alloc_pd = ionic_alloc_pd,
+ .dealloc_pd = ionic_dealloc_pd,
+ .create_ah = ionic_create_ah,
+ .query_ah = ionic_query_ah,
+ .destroy_ah = ionic_destroy_ah,
+ .create_user_ah = ionic_create_ah,
+ .get_dma_mr = ionic_get_dma_mr,
+ .reg_user_mr = ionic_reg_user_mr,
+ .reg_user_mr_dmabuf = ionic_reg_user_mr_dmabuf,
+ .dereg_mr = ionic_dereg_mr,
+ .alloc_mr = ionic_alloc_mr,
+ .map_mr_sg = ionic_map_mr_sg,
+ .alloc_mw = ionic_alloc_mw,
+ .dealloc_mw = ionic_dealloc_mw,
+ .create_cq = ionic_create_cq,
+ .destroy_cq = ionic_destroy_cq,
+ .create_qp = ionic_create_qp,
+ .modify_qp = ionic_modify_qp,
+ .query_qp = ionic_query_qp,
+ .destroy_qp = ionic_destroy_qp,
+
+ .post_send = ionic_post_send,
+ .post_recv = ionic_post_recv,
+ .poll_cq = ionic_poll_cq,
+ .req_notify_cq = ionic_req_notify_cq,
+
+ .query_device = ionic_query_device,
+ .query_port = ionic_query_port,
+ .get_link_layer = ionic_get_link_layer,
+ .query_pkey = ionic_query_pkey,
+ .modify_device = ionic_modify_device,
+ .get_port_immutable = ionic_get_port_immutable,
+ .get_dev_fw_str = ionic_get_dev_fw_str,
+ .device_group = &ionic_rdma_attr_group,
+ .disassociate_ucontext = ionic_disassociate_ucontext,
+
+ INIT_RDMA_OBJ_SIZE(ib_ucontext, ionic_ctx, ibctx),
+ INIT_RDMA_OBJ_SIZE(ib_pd, ionic_pd, ibpd),
+ INIT_RDMA_OBJ_SIZE(ib_ah, ionic_ah, ibah),
+ INIT_RDMA_OBJ_SIZE(ib_cq, ionic_vcq, ibcq),
+ INIT_RDMA_OBJ_SIZE(ib_qp, ionic_qp, ibqp),
+ INIT_RDMA_OBJ_SIZE(ib_mw, ionic_mr, ibmw),
+};
+
+static void ionic_init_resids(struct ionic_ibdev *dev)
+{
+ ionic_resid_init(&dev->inuse_cqid, dev->lif_cfg.cq_count);
+ dev->half_cqid_udma_shift =
+ order_base_2(dev->lif_cfg.cq_count / dev->lif_cfg.udma_count);
+ ionic_resid_init(&dev->inuse_pdid, IONIC_MAX_PD);
+ ionic_resid_init(&dev->inuse_ahid, dev->lif_cfg.nahs_per_lif);
+ ionic_resid_init(&dev->inuse_mrid, dev->lif_cfg.nmrs_per_lif);
+ /* skip reserved lkey */
+ dev->next_mrkey = 1;
+ ionic_resid_init(&dev->inuse_qpid, dev->lif_cfg.qp_count);
+ /* skip reserved SMI and GSI qpids */
+ dev->half_qpid_udma_shift =
+ order_base_2(dev->lif_cfg.qp_count / dev->lif_cfg.udma_count);
+ ionic_resid_init(&dev->inuse_dbid, dev->lif_cfg.dbid_count);
+}
+
+static void ionic_destroy_resids(struct ionic_ibdev *dev)
+{
+ ionic_resid_destroy(&dev->inuse_cqid);
+ ionic_resid_destroy(&dev->inuse_pdid);
+ ionic_resid_destroy(&dev->inuse_ahid);
+ ionic_resid_destroy(&dev->inuse_mrid);
+ ionic_resid_destroy(&dev->inuse_qpid);
+ ionic_resid_destroy(&dev->inuse_dbid);
+}
+
+static void ionic_destroy_ibdev(struct ionic_ibdev *dev)
+{
+ ionic_kill_rdma_admin(dev, false);
+ ib_unregister_device(&dev->ibdev);
+ ionic_stats_cleanup(dev);
+ ionic_destroy_rdma_admin(dev);
+ ionic_destroy_resids(dev);
+ WARN_ON(!xa_empty(&dev->qp_tbl));
+ xa_destroy(&dev->qp_tbl);
+ WARN_ON(!xa_empty(&dev->cq_tbl));
+ xa_destroy(&dev->cq_tbl);
+ ib_dealloc_device(&dev->ibdev);
+}
+
+static struct ionic_ibdev *ionic_create_ibdev(struct ionic_aux_dev *ionic_adev)
+{
+ struct ib_device *ibdev;
+ struct ionic_ibdev *dev;
+ struct net_device *ndev;
+ int rc;
+
+ dev = ib_alloc_device(ionic_ibdev, ibdev);
+ if (!dev)
+ return ERR_PTR(-EINVAL);
+
+ ionic_fill_lif_cfg(ionic_adev->lif, &dev->lif_cfg);
+
+ xa_init_flags(&dev->qp_tbl, GFP_ATOMIC);
+ xa_init_flags(&dev->cq_tbl, GFP_ATOMIC);
+
+ ionic_init_resids(dev);
+
+ rc = ionic_rdma_reset_devcmd(dev);
+ if (rc)
+ goto err_reset;
+
+ rc = ionic_create_rdma_admin(dev);
+ if (rc)
+ goto err_admin;
+
+ ibdev = &dev->ibdev;
+ ibdev->dev.parent = dev->lif_cfg.hwdev;
+
+ strscpy(ibdev->name, "ionic_%d", IB_DEVICE_NAME_MAX);
+ strscpy(ibdev->node_desc, DEVICE_DESCRIPTION, IB_DEVICE_NODE_DESC_MAX);
+
+ ibdev->node_type = RDMA_NODE_IB_CA;
+ ibdev->phys_port_cnt = 1;
+
+ /* the first two eq are reserved for async events */
+ ibdev->num_comp_vectors = dev->lif_cfg.eq_count - 2;
+
+ ndev = ionic_lif_netdev(ionic_adev->lif);
+ addrconf_ifid_eui48((u8 *)&ibdev->node_guid, ndev);
+ rc = ib_device_set_netdev(ibdev, ndev, 1);
+ /* ionic_lif_netdev() returns ndev with refcount held */
+ dev_put(ndev);
+ if (rc)
+ goto err_admin;
+
+ ib_set_device_ops(&dev->ibdev, &ionic_dev_ops);
+
+ ionic_stats_init(dev);
+
+ rc = ib_register_device(ibdev, "ionic_%d", ibdev->dev.parent);
+ if (rc)
+ goto err_register;
+
+ return dev;
+
+err_register:
+ ionic_stats_cleanup(dev);
+err_admin:
+ ionic_kill_rdma_admin(dev, false);
+ ionic_destroy_rdma_admin(dev);
+err_reset:
+ ionic_destroy_resids(dev);
+ xa_destroy(&dev->qp_tbl);
+ xa_destroy(&dev->cq_tbl);
+ ib_dealloc_device(&dev->ibdev);
+
+ return ERR_PTR(rc);
+}
+
+static int ionic_aux_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct ionic_aux_dev *ionic_adev;
+ struct ionic_ibdev *dev;
+
+ ionic_adev = container_of(adev, struct ionic_aux_dev, adev);
+ dev = ionic_create_ibdev(ionic_adev);
+ if (IS_ERR(dev))
+ return dev_err_probe(&adev->dev, PTR_ERR(dev),
+ "Failed to register ibdev\n");
+
+ auxiliary_set_drvdata(adev, dev);
+ ibdev_dbg(&dev->ibdev, "registered\n");
+
+ return 0;
+}
+
+static void ionic_aux_remove(struct auxiliary_device *adev)
+{
+ struct ionic_ibdev *dev = auxiliary_get_drvdata(adev);
+
+ dev_dbg(&adev->dev, "unregister ibdev\n");
+ ionic_destroy_ibdev(dev);
+ dev_dbg(&adev->dev, "unregistered\n");
+}
+
+static const struct auxiliary_device_id ionic_aux_id_table[] = {
+ { .name = "ionic.rdma", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(auxiliary, ionic_aux_id_table);
+
+static struct auxiliary_driver ionic_aux_r_driver = {
+ .name = "rdma",
+ .probe = ionic_aux_probe,
+ .remove = ionic_aux_remove,
+ .id_table = ionic_aux_id_table,
+};
+
+static int __init ionic_mod_init(void)
+{
+ int rc;
+
+ ionic_evt_workq = create_workqueue(KBUILD_MODNAME "-evt");
+ if (!ionic_evt_workq)
+ return -ENOMEM;
+
+ rc = auxiliary_driver_register(&ionic_aux_r_driver);
+ if (rc)
+ goto err_aux;
+
+ return 0;
+
+err_aux:
+ destroy_workqueue(ionic_evt_workq);
+
+ return rc;
+}
+
+static void __exit ionic_mod_exit(void)
+{
+ auxiliary_driver_unregister(&ionic_aux_r_driver);
+ destroy_workqueue(ionic_evt_workq);
+}
+
+module_init(ionic_mod_init);
+module_exit(ionic_mod_exit);
diff --git a/drivers/infiniband/hw/ionic/ionic_ibdev.h b/drivers/infiniband/hw/ionic/ionic_ibdev.h
new file mode 100644
index 000000000000..82fda1e3cdb6
--- /dev/null
+++ b/drivers/infiniband/hw/ionic/ionic_ibdev.h
@@ -0,0 +1,517 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#ifndef _IONIC_IBDEV_H_
+#define _IONIC_IBDEV_H_
+
+#include <rdma/ib_umem.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_pack.h>
+#include <rdma/uverbs_ioctl.h>
+
+#include <rdma/ionic-abi.h>
+#include <ionic_api.h>
+#include <ionic_regs.h>
+
+#include "ionic_fw.h"
+#include "ionic_queue.h"
+#include "ionic_res.h"
+
+#include "ionic_lif_cfg.h"
+
+/* Config knobs */
+#define IONIC_EQ_DEPTH 511
+#define IONIC_EQ_COUNT 32
+#define IONIC_AQ_DEPTH 63
+#define IONIC_AQ_COUNT 4
+#define IONIC_EQ_ISR_BUDGET 10
+#define IONIC_EQ_WORK_BUDGET 1000
+#define IONIC_MAX_RD_ATOM 16
+#define IONIC_PKEY_TBL_LEN 1
+#define IONIC_GID_TBL_LEN 256
+
+#define IONIC_MAX_QPID 0xffffff
+#define IONIC_SPEC_HIGH 8
+#define IONIC_MAX_PD 1024
+#define IONIC_SPEC_HIGH 8
+#define IONIC_SQCMB_ORDER 5
+#define IONIC_RQCMB_ORDER 0
+
+#define IONIC_META_LAST ((void *)1ul)
+#define IONIC_META_POSTED ((void *)2ul)
+
+#define IONIC_CQ_GRACE 100
+
+#define IONIC_ROCE_UDP_SPORT 28272
+#define IONIC_DMA_LKEY 0
+#define IONIC_DMA_RKEY IONIC_DMA_LKEY
+
+#define IONIC_CMB_SUPPORTED \
+ (IONIC_CMB_ENABLE | IONIC_CMB_REQUIRE | IONIC_CMB_EXPDB | \
+ IONIC_CMB_WC | IONIC_CMB_UC)
+
+/* resource is not reserved on the device, indicated in tbl_order */
+#define IONIC_RES_INVALID -1
+
+struct ionic_aq;
+struct ionic_cq;
+struct ionic_eq;
+struct ionic_vcq;
+
+enum ionic_admin_state {
+ IONIC_ADMIN_ACTIVE, /* submitting admin commands to queue */
+ IONIC_ADMIN_PAUSED, /* not submitting, but may complete normally */
+ IONIC_ADMIN_KILLED, /* not submitting, locally completed */
+};
+
+enum ionic_admin_flags {
+ IONIC_ADMIN_F_BUSYWAIT = BIT(0), /* Don't sleep */
+ IONIC_ADMIN_F_TEARDOWN = BIT(1), /* In destroy path */
+ IONIC_ADMIN_F_INTERRUPT = BIT(2), /* Interruptible w/timeout */
+};
+
+enum ionic_mmap_flag {
+ IONIC_MMAP_WC = BIT(0),
+};
+
+struct ionic_mmap_entry {
+ struct rdma_user_mmap_entry rdma_entry;
+ unsigned long size;
+ unsigned long pfn;
+ u8 mmap_flags;
+};
+
+struct ionic_ibdev {
+ struct ib_device ibdev;
+
+ struct ionic_lif_cfg lif_cfg;
+
+ struct xarray qp_tbl;
+ struct xarray cq_tbl;
+
+ struct ionic_resid_bits inuse_dbid;
+ struct ionic_resid_bits inuse_pdid;
+ struct ionic_resid_bits inuse_ahid;
+ struct ionic_resid_bits inuse_mrid;
+ struct ionic_resid_bits inuse_qpid;
+ struct ionic_resid_bits inuse_cqid;
+
+ u8 half_cqid_udma_shift;
+ u8 half_qpid_udma_shift;
+ u8 next_qpid_udma_idx;
+ u8 next_mrkey;
+
+ struct work_struct reset_work;
+ bool reset_posted;
+ u32 reset_cnt;
+
+ struct delayed_work admin_dwork;
+ struct ionic_aq **aq_vec;
+ atomic_t admin_state;
+
+ struct ionic_eq **eq_vec;
+
+ struct ionic_v1_stat *hw_stats;
+ void *hw_stats_buf;
+ struct rdma_stat_desc *hw_stats_hdrs;
+ struct ionic_counter_stats *counter_stats;
+ int hw_stats_count;
+};
+
+struct ionic_eq {
+ struct ionic_ibdev *dev;
+
+ u32 eqid;
+ u32 intr;
+
+ struct ionic_queue q;
+
+ int armed;
+ bool enable;
+
+ struct work_struct work;
+
+ int irq;
+ char name[32];
+};
+
+struct ionic_admin_wr {
+ struct completion work;
+ struct list_head aq_ent;
+ struct ionic_v1_admin_wqe wqe;
+ struct ionic_v1_cqe cqe;
+ struct ionic_aq *aq;
+ int status;
+};
+
+struct ionic_admin_wr_q {
+ struct ionic_admin_wr *wr;
+ int wqe_strides;
+};
+
+struct ionic_aq {
+ struct ionic_ibdev *dev;
+ struct ionic_vcq *vcq;
+
+ struct work_struct work;
+
+ atomic_t admin_state;
+ unsigned long stamp;
+ bool armed;
+
+ u32 aqid;
+ u32 cqid;
+
+ spinlock_t lock; /* for posting */
+ struct ionic_queue q;
+ struct ionic_admin_wr_q *q_wr;
+ struct list_head wr_prod;
+ struct list_head wr_post;
+};
+
+struct ionic_ctx {
+ struct ib_ucontext ibctx;
+ u32 dbid;
+ struct rdma_user_mmap_entry *mmap_dbell;
+};
+
+struct ionic_tbl_buf {
+ u32 tbl_limit;
+ u32 tbl_pages;
+ size_t tbl_size;
+ __le64 *tbl_buf;
+ dma_addr_t tbl_dma;
+ u8 page_size_log2;
+};
+
+struct ionic_pd {
+ struct ib_pd ibpd;
+
+ u32 pdid;
+ u32 flags;
+};
+
+struct ionic_cq {
+ struct ionic_vcq *vcq;
+
+ u32 cqid;
+ u32 eqid;
+
+ spinlock_t lock; /* for polling */
+ struct list_head poll_sq;
+ bool flush;
+ struct list_head flush_sq;
+ struct list_head flush_rq;
+ struct list_head ibkill_flush_ent;
+
+ struct ionic_queue q;
+ bool color;
+ int credit;
+ u16 arm_any_prod;
+ u16 arm_sol_prod;
+
+ struct kref cq_kref;
+ struct completion cq_rel_comp;
+
+ /* infrequently accessed, keep at end */
+ struct ib_umem *umem;
+};
+
+struct ionic_vcq {
+ struct ib_cq ibcq;
+ struct ionic_cq cq[2];
+ u8 udma_mask;
+ u8 poll_idx;
+};
+
+struct ionic_sq_meta {
+ u64 wrid;
+ u32 len;
+ u16 seq;
+ u8 ibop;
+ u8 ibsts;
+ u8 remote:1;
+ u8 signal:1;
+ u8 local_comp:1;
+};
+
+struct ionic_rq_meta {
+ struct ionic_rq_meta *next;
+ u64 wrid;
+};
+
+struct ionic_qp {
+ struct ib_qp ibqp;
+ enum ib_qp_state state;
+
+ u32 qpid;
+ u32 ahid;
+ u32 sq_cqid;
+ u32 rq_cqid;
+ u8 udma_idx;
+ u8 has_ah:1;
+ u8 has_sq:1;
+ u8 has_rq:1;
+ u8 sig_all:1;
+
+ struct list_head qp_list_counter;
+
+ struct list_head cq_poll_sq;
+ struct list_head cq_flush_sq;
+ struct list_head cq_flush_rq;
+ struct list_head ibkill_flush_ent;
+
+ spinlock_t sq_lock; /* for posting and polling */
+ struct ionic_queue sq;
+ struct ionic_sq_meta *sq_meta;
+ u16 *sq_msn_idx;
+ int sq_spec;
+ u16 sq_old_prod;
+ u16 sq_msn_prod;
+ u16 sq_msn_cons;
+ u8 sq_cmb;
+ bool sq_flush;
+ bool sq_flush_rcvd;
+
+ spinlock_t rq_lock; /* for posting and polling */
+ struct ionic_queue rq;
+ struct ionic_rq_meta *rq_meta;
+ struct ionic_rq_meta *rq_meta_head;
+ int rq_spec;
+ u16 rq_old_prod;
+ u8 rq_cmb;
+ bool rq_flush;
+
+ struct kref qp_kref;
+ struct completion qp_rel_comp;
+
+ /* infrequently accessed, keep at end */
+ int sgid_index;
+ int sq_cmb_order;
+ u32 sq_cmb_pgid;
+ phys_addr_t sq_cmb_addr;
+ struct rdma_user_mmap_entry *mmap_sq_cmb;
+
+ struct ib_umem *sq_umem;
+
+ int rq_cmb_order;
+ u32 rq_cmb_pgid;
+ phys_addr_t rq_cmb_addr;
+ struct rdma_user_mmap_entry *mmap_rq_cmb;
+
+ struct ib_umem *rq_umem;
+
+ int dcqcn_profile;
+
+ struct ib_ud_header *hdr;
+};
+
+struct ionic_ah {
+ struct ib_ah ibah;
+ u32 ahid;
+ int sgid_index;
+ struct ib_ud_header hdr;
+};
+
+struct ionic_mr {
+ union {
+ struct ib_mr ibmr;
+ struct ib_mw ibmw;
+ };
+
+ u32 mrid;
+ int flags;
+
+ struct ib_umem *umem;
+ struct ionic_tbl_buf buf;
+ bool created;
+};
+
+struct ionic_counter_stats {
+ int queue_stats_count;
+ struct ionic_v1_stat *hdr;
+ struct rdma_stat_desc *stats_hdrs;
+ struct xarray xa_counters;
+};
+
+struct ionic_counter {
+ void *vals;
+ struct list_head qp_list;
+};
+
+static inline struct ionic_ibdev *to_ionic_ibdev(struct ib_device *ibdev)
+{
+ return container_of(ibdev, struct ionic_ibdev, ibdev);
+}
+
+static inline struct ionic_ctx *to_ionic_ctx(struct ib_ucontext *ibctx)
+{
+ return container_of(ibctx, struct ionic_ctx, ibctx);
+}
+
+static inline struct ionic_ctx *to_ionic_ctx_uobj(struct ib_uobject *uobj)
+{
+ if (!uobj)
+ return NULL;
+
+ if (!uobj->context)
+ return NULL;
+
+ return to_ionic_ctx(uobj->context);
+}
+
+static inline struct ionic_pd *to_ionic_pd(struct ib_pd *ibpd)
+{
+ return container_of(ibpd, struct ionic_pd, ibpd);
+}
+
+static inline struct ionic_mr *to_ionic_mr(struct ib_mr *ibmr)
+{
+ return container_of(ibmr, struct ionic_mr, ibmr);
+}
+
+static inline struct ionic_mr *to_ionic_mw(struct ib_mw *ibmw)
+{
+ return container_of(ibmw, struct ionic_mr, ibmw);
+}
+
+static inline struct ionic_vcq *to_ionic_vcq(struct ib_cq *ibcq)
+{
+ return container_of(ibcq, struct ionic_vcq, ibcq);
+}
+
+static inline struct ionic_cq *to_ionic_vcq_cq(struct ib_cq *ibcq,
+ uint8_t udma_idx)
+{
+ return &to_ionic_vcq(ibcq)->cq[udma_idx];
+}
+
+static inline struct ionic_qp *to_ionic_qp(struct ib_qp *ibqp)
+{
+ return container_of(ibqp, struct ionic_qp, ibqp);
+}
+
+static inline struct ionic_ah *to_ionic_ah(struct ib_ah *ibah)
+{
+ return container_of(ibah, struct ionic_ah, ibah);
+}
+
+static inline u32 ionic_ctx_dbid(struct ionic_ibdev *dev,
+ struct ionic_ctx *ctx)
+{
+ if (!ctx)
+ return dev->lif_cfg.dbid;
+
+ return ctx->dbid;
+}
+
+static inline u32 ionic_obj_dbid(struct ionic_ibdev *dev,
+ struct ib_uobject *uobj)
+{
+ return ionic_ctx_dbid(dev, to_ionic_ctx_uobj(uobj));
+}
+
+static inline bool ionic_ibop_is_local(enum ib_wr_opcode op)
+{
+ return op == IB_WR_LOCAL_INV || op == IB_WR_REG_MR;
+}
+
+static inline void ionic_qp_complete(struct kref *kref)
+{
+ struct ionic_qp *qp = container_of(kref, struct ionic_qp, qp_kref);
+
+ complete(&qp->qp_rel_comp);
+}
+
+static inline void ionic_cq_complete(struct kref *kref)
+{
+ struct ionic_cq *cq = container_of(kref, struct ionic_cq, cq_kref);
+
+ complete(&cq->cq_rel_comp);
+}
+
+/* ionic_admin.c */
+extern struct workqueue_struct *ionic_evt_workq;
+void ionic_admin_post(struct ionic_ibdev *dev, struct ionic_admin_wr *wr);
+int ionic_admin_wait(struct ionic_ibdev *dev, struct ionic_admin_wr *wr,
+ enum ionic_admin_flags);
+
+int ionic_rdma_reset_devcmd(struct ionic_ibdev *dev);
+
+int ionic_create_rdma_admin(struct ionic_ibdev *dev);
+void ionic_destroy_rdma_admin(struct ionic_ibdev *dev);
+void ionic_kill_rdma_admin(struct ionic_ibdev *dev, bool fatal_path);
+
+/* ionic_controlpath.c */
+int ionic_create_cq_common(struct ionic_vcq *vcq,
+ struct ionic_tbl_buf *buf,
+ const struct ib_cq_init_attr *attr,
+ struct ionic_ctx *ctx,
+ struct ib_udata *udata,
+ struct ionic_qdesc *req_cq,
+ __u32 *resp_cqid,
+ int udma_idx);
+void ionic_destroy_cq_common(struct ionic_ibdev *dev, struct ionic_cq *cq);
+void ionic_flush_qp(struct ionic_ibdev *dev, struct ionic_qp *qp);
+void ionic_notify_flush_cq(struct ionic_cq *cq);
+
+int ionic_alloc_ucontext(struct ib_ucontext *ibctx, struct ib_udata *udata);
+void ionic_dealloc_ucontext(struct ib_ucontext *ibctx);
+int ionic_mmap(struct ib_ucontext *ibctx, struct vm_area_struct *vma);
+void ionic_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
+int ionic_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
+int ionic_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
+int ionic_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata);
+int ionic_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
+int ionic_destroy_ah(struct ib_ah *ibah, u32 flags);
+struct ib_mr *ionic_get_dma_mr(struct ib_pd *ibpd, int access);
+struct ib_mr *ionic_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
+ u64 addr, int access, struct ib_dmah *dmah,
+ struct ib_udata *udata);
+struct ib_mr *ionic_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 offset,
+ u64 length, u64 addr, int fd, int access,
+ struct ib_dmah *dmah,
+ struct uverbs_attr_bundle *attrs);
+int ionic_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
+struct ib_mr *ionic_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type type,
+ u32 max_sg);
+int ionic_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+ unsigned int *sg_offset);
+int ionic_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata);
+int ionic_dealloc_mw(struct ib_mw *ibmw);
+int ionic_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ struct uverbs_attr_bundle *attrs);
+int ionic_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
+int ionic_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr,
+ struct ib_udata *udata);
+int ionic_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int mask,
+ struct ib_udata *udata);
+int ionic_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int mask,
+ struct ib_qp_init_attr *init_attr);
+int ionic_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
+
+/* ionic_datapath.c */
+int ionic_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad);
+int ionic_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad);
+int ionic_poll_cq(struct ib_cq *ibcq, int nwc, struct ib_wc *wc);
+int ionic_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
+
+/* ionic_hw_stats.c */
+void ionic_stats_init(struct ionic_ibdev *dev);
+void ionic_stats_cleanup(struct ionic_ibdev *dev);
+
+/* ionic_pgtbl.c */
+__le64 ionic_pgtbl_dma(struct ionic_tbl_buf *buf, u64 va);
+__be64 ionic_pgtbl_off(struct ionic_tbl_buf *buf, u64 va);
+int ionic_pgtbl_page(struct ionic_tbl_buf *buf, u64 dma);
+int ionic_pgtbl_init(struct ionic_ibdev *dev,
+ struct ionic_tbl_buf *buf,
+ struct ib_umem *umem,
+ dma_addr_t dma,
+ int limit,
+ u64 page_size);
+void ionic_pgtbl_unbuf(struct ionic_ibdev *dev, struct ionic_tbl_buf *buf);
+#endif /* _IONIC_IBDEV_H_ */
diff --git a/drivers/infiniband/hw/ionic/ionic_lif_cfg.c b/drivers/infiniband/hw/ionic/ionic_lif_cfg.c
new file mode 100644
index 000000000000..f3cd281c3a2f
--- /dev/null
+++ b/drivers/infiniband/hw/ionic/ionic_lif_cfg.c
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#include <linux/kernel.h>
+
+#include <ionic.h>
+#include <ionic_lif.h>
+
+#include "ionic_lif_cfg.h"
+
+#define IONIC_MIN_RDMA_VERSION 0
+#define IONIC_MAX_RDMA_VERSION 2
+
+static u8 ionic_get_expdb(struct ionic_lif *lif)
+{
+ u8 expdb_support = 0;
+
+ if (lif->ionic->idev.phy_cmb_expdb64_pages)
+ expdb_support |= IONIC_EXPDB_64B_WQE;
+ if (lif->ionic->idev.phy_cmb_expdb128_pages)
+ expdb_support |= IONIC_EXPDB_128B_WQE;
+ if (lif->ionic->idev.phy_cmb_expdb256_pages)
+ expdb_support |= IONIC_EXPDB_256B_WQE;
+ if (lif->ionic->idev.phy_cmb_expdb512_pages)
+ expdb_support |= IONIC_EXPDB_512B_WQE;
+
+ return expdb_support;
+}
+
+void ionic_fill_lif_cfg(struct ionic_lif *lif, struct ionic_lif_cfg *cfg)
+{
+ union ionic_lif_identity *ident = &lif->ionic->ident.lif;
+
+ cfg->lif = lif;
+ cfg->hwdev = &lif->ionic->pdev->dev;
+ cfg->lif_index = lif->index;
+ cfg->lif_hw_index = lif->hw_index;
+
+ cfg->dbid = lif->kern_pid;
+ cfg->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif);
+ cfg->dbpage = lif->kern_dbpage;
+ cfg->intr_ctrl = lif->ionic->idev.intr_ctrl;
+
+ cfg->db_phys = lif->ionic->bars[IONIC_PCI_BAR_DBELL].bus_addr;
+
+ if (IONIC_VERSION(ident->rdma.version, ident->rdma.minor_version) >=
+ IONIC_VERSION(2, 1))
+ cfg->page_size_supported =
+ le64_to_cpu(ident->rdma.page_size_cap);
+ else
+ cfg->page_size_supported = IONIC_PAGE_SIZE_SUPPORTED;
+
+ cfg->rdma_version = ident->rdma.version;
+ cfg->qp_opcodes = ident->rdma.qp_opcodes;
+ cfg->admin_opcodes = ident->rdma.admin_opcodes;
+
+ cfg->stats_type = le16_to_cpu(ident->rdma.stats_type);
+ cfg->npts_per_lif = le32_to_cpu(ident->rdma.npts_per_lif);
+ cfg->nmrs_per_lif = le32_to_cpu(ident->rdma.nmrs_per_lif);
+ cfg->nahs_per_lif = le32_to_cpu(ident->rdma.nahs_per_lif);
+
+ cfg->aq_base = le32_to_cpu(ident->rdma.aq_qtype.qid_base);
+ cfg->cq_base = le32_to_cpu(ident->rdma.cq_qtype.qid_base);
+ cfg->eq_base = le32_to_cpu(ident->rdma.eq_qtype.qid_base);
+
+ /*
+ * ionic_create_rdma_admin() may reduce aq_count or eq_count if
+ * it is unable to allocate all that were requested.
+ * aq_count is tunable; see ionic_aq_count
+ * eq_count is tunable; see ionic_eq_count
+ */
+ cfg->aq_count = le32_to_cpu(ident->rdma.aq_qtype.qid_count);
+ cfg->eq_count = le32_to_cpu(ident->rdma.eq_qtype.qid_count);
+ cfg->cq_count = le32_to_cpu(ident->rdma.cq_qtype.qid_count);
+ cfg->qp_count = le32_to_cpu(ident->rdma.sq_qtype.qid_count);
+ cfg->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif);
+
+ cfg->aq_qtype = ident->rdma.aq_qtype.qtype;
+ cfg->sq_qtype = ident->rdma.sq_qtype.qtype;
+ cfg->rq_qtype = ident->rdma.rq_qtype.qtype;
+ cfg->cq_qtype = ident->rdma.cq_qtype.qtype;
+ cfg->eq_qtype = ident->rdma.eq_qtype.qtype;
+ cfg->udma_qgrp_shift = ident->rdma.udma_shift;
+ cfg->udma_count = 2;
+
+ cfg->max_stride = ident->rdma.max_stride;
+ cfg->expdb_mask = ionic_get_expdb(lif);
+
+ cfg->sq_expdb =
+ !!(lif->qtype_info[IONIC_QTYPE_TXQ].features & IONIC_QIDENT_F_EXPDB);
+ cfg->rq_expdb =
+ !!(lif->qtype_info[IONIC_QTYPE_RXQ].features & IONIC_QIDENT_F_EXPDB);
+}
+
+struct net_device *ionic_lif_netdev(struct ionic_lif *lif)
+{
+ struct net_device *netdev = lif->netdev;
+
+ dev_hold(netdev);
+ return netdev;
+}
+
+void ionic_lif_fw_version(struct ionic_lif *lif, char *str, size_t len)
+{
+ strscpy(str, lif->ionic->idev.dev_info.fw_version, len);
+}
+
+u8 ionic_lif_asic_rev(struct ionic_lif *lif)
+{
+ return lif->ionic->idev.dev_info.asic_rev;
+}
diff --git a/drivers/infiniband/hw/ionic/ionic_lif_cfg.h b/drivers/infiniband/hw/ionic/ionic_lif_cfg.h
new file mode 100644
index 000000000000..20853429f623
--- /dev/null
+++ b/drivers/infiniband/hw/ionic/ionic_lif_cfg.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#ifndef _IONIC_LIF_CFG_H_
+
+#define IONIC_VERSION(a, b) (((a) << 16) + ((b) << 8))
+#define IONIC_PAGE_SIZE_SUPPORTED 0x40201000 /* 4kb, 2Mb, 1Gb */
+
+#define IONIC_EXPDB_64B_WQE BIT(0)
+#define IONIC_EXPDB_128B_WQE BIT(1)
+#define IONIC_EXPDB_256B_WQE BIT(2)
+#define IONIC_EXPDB_512B_WQE BIT(3)
+
+struct ionic_lif_cfg {
+ struct device *hwdev;
+ struct ionic_lif *lif;
+
+ int lif_index;
+ int lif_hw_index;
+
+ u32 dbid;
+ int dbid_count;
+ u64 __iomem *dbpage;
+ struct ionic_intr __iomem *intr_ctrl;
+ phys_addr_t db_phys;
+
+ u64 page_size_supported;
+ u32 npts_per_lif;
+ u32 nmrs_per_lif;
+ u32 nahs_per_lif;
+
+ u32 aq_base;
+ u32 cq_base;
+ u32 eq_base;
+
+ int aq_count;
+ int eq_count;
+ int cq_count;
+ int qp_count;
+
+ u16 stats_type;
+ u8 aq_qtype;
+ u8 sq_qtype;
+ u8 rq_qtype;
+ u8 cq_qtype;
+ u8 eq_qtype;
+
+ u8 udma_count;
+ u8 udma_qgrp_shift;
+
+ u8 rdma_version;
+ u8 qp_opcodes;
+ u8 admin_opcodes;
+
+ u8 max_stride;
+ bool sq_expdb;
+ bool rq_expdb;
+ u8 expdb_mask;
+};
+
+void ionic_fill_lif_cfg(struct ionic_lif *lif, struct ionic_lif_cfg *cfg);
+struct net_device *ionic_lif_netdev(struct ionic_lif *lif);
+void ionic_lif_fw_version(struct ionic_lif *lif, char *str, size_t len);
+u8 ionic_lif_asic_rev(struct ionic_lif *lif);
+
+#endif /* _IONIC_LIF_CFG_H_ */
diff --git a/drivers/infiniband/hw/ionic/ionic_pgtbl.c b/drivers/infiniband/hw/ionic/ionic_pgtbl.c
new file mode 100644
index 000000000000..e74db73c9246
--- /dev/null
+++ b/drivers/infiniband/hw/ionic/ionic_pgtbl.c
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#include <linux/mman.h>
+#include <linux/dma-mapping.h>
+
+#include "ionic_fw.h"
+#include "ionic_ibdev.h"
+
+__le64 ionic_pgtbl_dma(struct ionic_tbl_buf *buf, u64 va)
+{
+ u64 pg_mask = BIT_ULL(buf->page_size_log2) - 1;
+ u64 dma;
+
+ if (!buf->tbl_pages)
+ return cpu_to_le64(0);
+
+ if (buf->tbl_pages > 1)
+ return cpu_to_le64(buf->tbl_dma);
+
+ if (buf->tbl_buf)
+ dma = le64_to_cpu(buf->tbl_buf[0]);
+ else
+ dma = buf->tbl_dma;
+
+ return cpu_to_le64(dma + (va & pg_mask));
+}
+
+__be64 ionic_pgtbl_off(struct ionic_tbl_buf *buf, u64 va)
+{
+ if (buf->tbl_pages > 1) {
+ u64 pg_mask = BIT_ULL(buf->page_size_log2) - 1;
+
+ return cpu_to_be64(va & pg_mask);
+ }
+
+ return 0;
+}
+
+int ionic_pgtbl_page(struct ionic_tbl_buf *buf, u64 dma)
+{
+ if (unlikely(buf->tbl_pages == buf->tbl_limit))
+ return -ENOMEM;
+
+ if (buf->tbl_buf)
+ buf->tbl_buf[buf->tbl_pages] = cpu_to_le64(dma);
+ else
+ buf->tbl_dma = dma;
+
+ ++buf->tbl_pages;
+
+ return 0;
+}
+
+static int ionic_tbl_buf_alloc(struct ionic_ibdev *dev,
+ struct ionic_tbl_buf *buf)
+{
+ int rc;
+
+ buf->tbl_size = buf->tbl_limit * sizeof(*buf->tbl_buf);
+ buf->tbl_buf = kmalloc(buf->tbl_size, GFP_KERNEL);
+ if (!buf->tbl_buf)
+ return -ENOMEM;
+
+ buf->tbl_dma = dma_map_single(dev->lif_cfg.hwdev, buf->tbl_buf,
+ buf->tbl_size, DMA_TO_DEVICE);
+ rc = dma_mapping_error(dev->lif_cfg.hwdev, buf->tbl_dma);
+ if (rc) {
+ kfree(buf->tbl_buf);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int ionic_pgtbl_umem(struct ionic_tbl_buf *buf, struct ib_umem *umem)
+{
+ struct ib_block_iter biter;
+ u64 page_dma;
+ int rc;
+
+ rdma_umem_for_each_dma_block(umem, &biter, BIT_ULL(buf->page_size_log2)) {
+ page_dma = rdma_block_iter_dma_address(&biter);
+ rc = ionic_pgtbl_page(buf, page_dma);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+void ionic_pgtbl_unbuf(struct ionic_ibdev *dev, struct ionic_tbl_buf *buf)
+{
+ if (buf->tbl_buf)
+ dma_unmap_single(dev->lif_cfg.hwdev, buf->tbl_dma,
+ buf->tbl_size, DMA_TO_DEVICE);
+
+ kfree(buf->tbl_buf);
+ memset(buf, 0, sizeof(*buf));
+}
+
+int ionic_pgtbl_init(struct ionic_ibdev *dev,
+ struct ionic_tbl_buf *buf,
+ struct ib_umem *umem,
+ dma_addr_t dma,
+ int limit,
+ u64 page_size)
+{
+ int rc;
+
+ memset(buf, 0, sizeof(*buf));
+
+ if (umem) {
+ limit = ib_umem_num_dma_blocks(umem, page_size);
+ buf->page_size_log2 = order_base_2(page_size);
+ }
+
+ if (limit < 1)
+ return -EINVAL;
+
+ buf->tbl_limit = limit;
+
+ /* skip pgtbl if contiguous / direct translation */
+ if (limit > 1) {
+ rc = ionic_tbl_buf_alloc(dev, buf);
+ if (rc)
+ return rc;
+ }
+
+ if (umem)
+ rc = ionic_pgtbl_umem(buf, umem);
+ else
+ rc = ionic_pgtbl_page(buf, dma);
+
+ if (rc)
+ goto err_unbuf;
+
+ return 0;
+
+err_unbuf:
+ ionic_pgtbl_unbuf(dev, buf);
+ return rc;
+}
diff --git a/drivers/infiniband/hw/ionic/ionic_queue.c b/drivers/infiniband/hw/ionic/ionic_queue.c
new file mode 100644
index 000000000000..aa897ed2a412
--- /dev/null
+++ b/drivers/infiniband/hw/ionic/ionic_queue.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#include <linux/dma-mapping.h>
+
+#include "ionic_queue.h"
+
+int ionic_queue_init(struct ionic_queue *q, struct device *dma_dev,
+ int depth, size_t stride)
+{
+ if (depth < 0 || depth > 0xffff)
+ return -EINVAL;
+
+ if (stride == 0 || stride > 0x10000)
+ return -EINVAL;
+
+ if (depth == 0)
+ depth = 1;
+
+ q->depth_log2 = order_base_2(depth + 1);
+ q->stride_log2 = order_base_2(stride);
+
+ if (q->depth_log2 + q->stride_log2 < PAGE_SHIFT)
+ q->depth_log2 = PAGE_SHIFT - q->stride_log2;
+
+ if (q->depth_log2 > 16 || q->stride_log2 > 16)
+ return -EINVAL;
+
+ q->size = BIT_ULL(q->depth_log2 + q->stride_log2);
+ q->mask = BIT(q->depth_log2) - 1;
+
+ q->ptr = dma_alloc_coherent(dma_dev, q->size, &q->dma, GFP_KERNEL);
+ if (!q->ptr)
+ return -ENOMEM;
+
+ /* it will always be page aligned, but just to be sure... */
+ if (!PAGE_ALIGNED(q->ptr)) {
+ dma_free_coherent(dma_dev, q->size, q->ptr, q->dma);
+ return -ENOMEM;
+ }
+
+ q->prod = 0;
+ q->cons = 0;
+ q->dbell = 0;
+
+ return 0;
+}
+
+void ionic_queue_destroy(struct ionic_queue *q, struct device *dma_dev)
+{
+ dma_free_coherent(dma_dev, q->size, q->ptr, q->dma);
+}
diff --git a/drivers/infiniband/hw/ionic/ionic_queue.h b/drivers/infiniband/hw/ionic/ionic_queue.h
new file mode 100644
index 000000000000..d18020d4cad5
--- /dev/null
+++ b/drivers/infiniband/hw/ionic/ionic_queue.h
@@ -0,0 +1,234 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#ifndef _IONIC_QUEUE_H_
+#define _IONIC_QUEUE_H_
+
+#include <linux/io.h>
+#include <ionic_regs.h>
+
+#define IONIC_MAX_DEPTH 0xffff
+#define IONIC_MAX_CQ_DEPTH 0xffff
+#define IONIC_CQ_RING_ARM IONIC_DBELL_RING_1
+#define IONIC_CQ_RING_SOL IONIC_DBELL_RING_2
+
+/**
+ * struct ionic_queue - Ring buffer used between device and driver
+ * @size: Size of the buffer, in bytes
+ * @dma: Dma address of the buffer
+ * @ptr: Buffer virtual address
+ * @prod: Driver position in the queue
+ * @cons: Device position in the queue
+ * @mask: Capacity of the queue, subtracting the hole
+ * This value is equal to ((1 << depth_log2) - 1)
+ * @depth_log2: Log base two size depth of the queue
+ * @stride_log2: Log base two size of an element in the queue
+ * @dbell: Doorbell identifying bits
+ */
+struct ionic_queue {
+ size_t size;
+ dma_addr_t dma;
+ void *ptr;
+ u16 prod;
+ u16 cons;
+ u16 mask;
+ u8 depth_log2;
+ u8 stride_log2;
+ u64 dbell;
+};
+
+/**
+ * ionic_queue_init() - Initialize user space queue
+ * @q: Uninitialized queue structure
+ * @dma_dev: DMA device for mapping
+ * @depth: Depth of the queue
+ * @stride: Size of each element of the queue
+ *
+ * Return: status code
+ */
+int ionic_queue_init(struct ionic_queue *q, struct device *dma_dev,
+ int depth, size_t stride);
+
+/**
+ * ionic_queue_destroy() - Destroy user space queue
+ * @q: Queue structure
+ * @dma_dev: DMA device for mapping
+ *
+ * Return: status code
+ */
+void ionic_queue_destroy(struct ionic_queue *q, struct device *dma_dev);
+
+/**
+ * ionic_queue_empty() - Test if queue is empty
+ * @q: Queue structure
+ *
+ * This is only valid for to-device queues.
+ *
+ * Return: is empty
+ */
+static inline bool ionic_queue_empty(struct ionic_queue *q)
+{
+ return q->prod == q->cons;
+}
+
+/**
+ * ionic_queue_length() - Get the current length of the queue
+ * @q: Queue structure
+ *
+ * This is only valid for to-device queues.
+ *
+ * Return: length
+ */
+static inline u16 ionic_queue_length(struct ionic_queue *q)
+{
+ return (q->prod - q->cons) & q->mask;
+}
+
+/**
+ * ionic_queue_length_remaining() - Get the remaining length of the queue
+ * @q: Queue structure
+ *
+ * This is only valid for to-device queues.
+ *
+ * Return: length remaining
+ */
+static inline u16 ionic_queue_length_remaining(struct ionic_queue *q)
+{
+ return q->mask - ionic_queue_length(q);
+}
+
+/**
+ * ionic_queue_full() - Test if queue is full
+ * @q: Queue structure
+ *
+ * This is only valid for to-device queues.
+ *
+ * Return: is full
+ */
+static inline bool ionic_queue_full(struct ionic_queue *q)
+{
+ return q->mask == ionic_queue_length(q);
+}
+
+/**
+ * ionic_color_wrap() - Flip the color if prod is wrapped
+ * @prod: Queue index just after advancing
+ * @color: Queue color just prior to advancing the index
+ *
+ * Return: color after advancing the index
+ */
+static inline bool ionic_color_wrap(u16 prod, bool color)
+{
+ /* logical xor color with (prod == 0) */
+ return color != (prod == 0);
+}
+
+/**
+ * ionic_queue_at() - Get the element at the given index
+ * @q: Queue structure
+ * @idx: Index in the queue
+ *
+ * The index must be within the bounds of the queue. It is not checked here.
+ *
+ * Return: pointer to element at index
+ */
+static inline void *ionic_queue_at(struct ionic_queue *q, u16 idx)
+{
+ return q->ptr + ((unsigned long)idx << q->stride_log2);
+}
+
+/**
+ * ionic_queue_at_prod() - Get the element at the producer index
+ * @q: Queue structure
+ *
+ * Return: pointer to element at producer index
+ */
+static inline void *ionic_queue_at_prod(struct ionic_queue *q)
+{
+ return ionic_queue_at(q, q->prod);
+}
+
+/**
+ * ionic_queue_at_cons() - Get the element at the consumer index
+ * @q: Queue structure
+ *
+ * Return: pointer to element at consumer index
+ */
+static inline void *ionic_queue_at_cons(struct ionic_queue *q)
+{
+ return ionic_queue_at(q, q->cons);
+}
+
+/**
+ * ionic_queue_next() - Compute the next index
+ * @q: Queue structure
+ * @idx: Index
+ *
+ * Return: next index after idx
+ */
+static inline u16 ionic_queue_next(struct ionic_queue *q, u16 idx)
+{
+ return (idx + 1) & q->mask;
+}
+
+/**
+ * ionic_queue_produce() - Increase the producer index
+ * @q: Queue structure
+ *
+ * Caller must ensure that the queue is not full. It is not checked here.
+ */
+static inline void ionic_queue_produce(struct ionic_queue *q)
+{
+ q->prod = ionic_queue_next(q, q->prod);
+}
+
+/**
+ * ionic_queue_consume() - Increase the consumer index
+ * @q: Queue structure
+ *
+ * Caller must ensure that the queue is not empty. It is not checked here.
+ *
+ * This is only valid for to-device queues.
+ */
+static inline void ionic_queue_consume(struct ionic_queue *q)
+{
+ q->cons = ionic_queue_next(q, q->cons);
+}
+
+/**
+ * ionic_queue_consume_entries() - Increase the consumer index by entries
+ * @q: Queue structure
+ * @entries: Number of entries to increment
+ *
+ * Caller must ensure that the queue is not empty. It is not checked here.
+ *
+ * This is only valid for to-device queues.
+ */
+static inline void ionic_queue_consume_entries(struct ionic_queue *q,
+ u16 entries)
+{
+ q->cons = (q->cons + entries) & q->mask;
+}
+
+/**
+ * ionic_queue_dbell_init() - Initialize doorbell bits for queue id
+ * @q: Queue structure
+ * @qid: Queue identifying number
+ */
+static inline void ionic_queue_dbell_init(struct ionic_queue *q, u32 qid)
+{
+ q->dbell = IONIC_DBELL_QID(qid);
+}
+
+/**
+ * ionic_queue_dbell_val() - Get current doorbell update value
+ * @q: Queue structure
+ *
+ * Return: current doorbell update value
+ */
+static inline u64 ionic_queue_dbell_val(struct ionic_queue *q)
+{
+ return q->dbell | q->prod;
+}
+
+#endif /* _IONIC_QUEUE_H_ */
diff --git a/drivers/infiniband/hw/ionic/ionic_res.h b/drivers/infiniband/hw/ionic/ionic_res.h
new file mode 100644
index 000000000000..46c8c584bd9a
--- /dev/null
+++ b/drivers/infiniband/hw/ionic/ionic_res.h
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#ifndef _IONIC_RES_H_
+#define _IONIC_RES_H_
+
+#include <linux/kernel.h>
+#include <linux/idr.h>
+
+/**
+ * struct ionic_resid_bits - Number allocator based on IDA
+ *
+ * @inuse: IDA handle
+ * @inuse_size: Highest ID limit for IDA
+ */
+struct ionic_resid_bits {
+ struct ida inuse;
+ unsigned int inuse_size;
+};
+
+/**
+ * ionic_resid_init() - Initialize a resid allocator
+ * @resid: Uninitialized resid allocator
+ * @size: Capacity of the allocator
+ *
+ * Return: Zero on success, or negative error number
+ */
+static inline void ionic_resid_init(struct ionic_resid_bits *resid,
+ unsigned int size)
+{
+ resid->inuse_size = size;
+ ida_init(&resid->inuse);
+}
+
+/**
+ * ionic_resid_destroy() - Destroy a resid allocator
+ * @resid: Resid allocator
+ */
+static inline void ionic_resid_destroy(struct ionic_resid_bits *resid)
+{
+ ida_destroy(&resid->inuse);
+}
+
+/**
+ * ionic_resid_get_shared() - Allocate an available shared resource id
+ * @resid: Resid allocator
+ * @min: Smallest valid resource id
+ * @size: One after largest valid resource id
+ *
+ * Return: Resource id, or negative error number
+ */
+static inline int ionic_resid_get_shared(struct ionic_resid_bits *resid,
+ unsigned int min,
+ unsigned int size)
+{
+ return ida_alloc_range(&resid->inuse, min, size - 1, GFP_KERNEL);
+}
+
+/**
+ * ionic_resid_get() - Allocate an available resource id
+ * @resid: Resid allocator
+ *
+ * Return: Resource id, or negative error number
+ */
+static inline int ionic_resid_get(struct ionic_resid_bits *resid)
+{
+ return ionic_resid_get_shared(resid, 0, resid->inuse_size);
+}
+
+/**
+ * ionic_resid_put() - Free a resource id
+ * @resid: Resid allocator
+ * @id: Resource id
+ */
+static inline void ionic_resid_put(struct ionic_resid_bits *resid, int id)
+{
+ ida_free(&resid->inuse, id);
+}
+
+/**
+ * ionic_bitid_to_qid() - Transform a resource bit index into a queue id
+ * @bitid: Bit index
+ * @qgrp_shift: Log2 number of queues per queue group
+ * @half_qid_shift: Log2 of half the total number of queues
+ *
+ * Return: Queue id
+ *
+ * Udma-constrained queues (QPs and CQs) are associated with their udma by
+ * queue group. Even queue groups are associated with udma0, and odd queue
+ * groups with udma1.
+ *
+ * For allocating queue ids, we want to arrange the bits into two halves,
+ * with the even queue groups of udma0 in the lower half of the bitset,
+ * and the odd queue groups of udma1 in the upper half of the bitset.
+ * Then, one or two calls of find_next_zero_bit can examine all the bits
+ * for queues of an entire udma.
+ *
+ * For example, assuming eight queue groups with qgrp qids per group:
+ *
+ * bitid 0*qgrp..1*qgrp-1 : qid 0*qgrp..1*qgrp-1
+ * bitid 1*qgrp..2*qgrp-1 : qid 2*qgrp..3*qgrp-1
+ * bitid 2*qgrp..3*qgrp-1 : qid 4*qgrp..5*qgrp-1
+ * bitid 3*qgrp..4*qgrp-1 : qid 6*qgrp..7*qgrp-1
+ * bitid 4*qgrp..5*qgrp-1 : qid 1*qgrp..2*qgrp-1
+ * bitid 5*qgrp..6*qgrp-1 : qid 3*qgrp..4*qgrp-1
+ * bitid 6*qgrp..7*qgrp-1 : qid 5*qgrp..6*qgrp-1
+ * bitid 7*qgrp..8*qgrp-1 : qid 7*qgrp..8*qgrp-1
+ *
+ * There are three important ranges of bits in the qid. There is the udma
+ * bit "U" at qgrp_shift, which is the least significant bit of the group
+ * index, and determines which udma a queue is associated with.
+ * The bits of lesser significance we can call the idx bits "I", which are
+ * the index of the queue within the group. The bits of greater significance
+ * we can call the grp bits "G", which are other bits of the group index that
+ * do not determine the udma. Those bits are just rearranged in the bit index
+ * in the bitset. A bitid has the udma bit in the most significant place,
+ * then the grp bits, then the idx bits.
+ *
+ * bitid: 00000000000000 U GGG IIIIII
+ * qid: 00000000000000 GGG U IIIIII
+ *
+ * Transforming from bit index to qid, or from qid to bit index, can be
+ * accomplished by rearranging the bits by masking and shifting.
+ */
+static inline u32 ionic_bitid_to_qid(u32 bitid, u8 qgrp_shift,
+ u8 half_qid_shift)
+{
+ u32 udma_bit =
+ (bitid & BIT(half_qid_shift)) >> (half_qid_shift - qgrp_shift);
+ u32 grp_bits = (bitid & GENMASK(half_qid_shift - 1, qgrp_shift)) << 1;
+ u32 idx_bits = bitid & (BIT(qgrp_shift) - 1);
+
+ return grp_bits | udma_bit | idx_bits;
+}
+
+/**
+ * ionic_qid_to_bitid() - Transform a queue id into a resource bit index
+ * @qid: queue index
+ * @qgrp_shift: Log2 number of queues per queue group
+ * @half_qid_shift: Log2 of half the total number of queues
+ *
+ * Return: Resource bit index
+ *
+ * This is the inverse of ionic_bitid_to_qid().
+ */
+static inline u32 ionic_qid_to_bitid(u32 qid, u8 qgrp_shift, u8 half_qid_shift)
+{
+ u32 udma_bit = (qid & BIT(qgrp_shift)) << (half_qid_shift - qgrp_shift);
+ u32 grp_bits = (qid & GENMASK(half_qid_shift, qgrp_shift + 1)) >> 1;
+ u32 idx_bits = qid & (BIT(qgrp_shift) - 1);
+
+ return udma_bit | grp_bits | idx_bits;
+}
+#endif /* _IONIC_RES_H_ */
diff --git a/drivers/infiniband/hw/irdma/Kconfig b/drivers/infiniband/hw/irdma/Kconfig
index 5f49a58590ed..0bd7e3fca1fb 100644
--- a/drivers/infiniband/hw/irdma/Kconfig
+++ b/drivers/infiniband/hw/irdma/Kconfig
@@ -4,10 +4,11 @@ config INFINIBAND_IRDMA
depends on INET
depends on IPV6 || !IPV6
depends on PCI
- depends on ICE && I40E
+ depends on IDPF && ICE && I40E
select GENERIC_ALLOCATOR
select AUXILIARY_BUS
select CRC32
help
- This is an Intel(R) Ethernet Protocol Driver for RDMA driver
- that support E810 (iWARP/RoCE) and X722 (iWARP) network devices.
+ This is an Intel(R) Ethernet Protocol Driver for RDMA that
+ supports IPU E2000 (RoCEv2), E810 (iWARP/RoCEv2) and X722 (iWARP)
+ network devices.
diff --git a/drivers/infiniband/hw/irdma/Makefile b/drivers/infiniband/hw/irdma/Makefile
index 48c3854235a0..03ceb9e5475f 100644
--- a/drivers/infiniband/hw/irdma/Makefile
+++ b/drivers/infiniband/hw/irdma/Makefile
@@ -13,7 +13,10 @@ irdma-objs := cm.o \
hw.o \
i40iw_hw.o \
i40iw_if.o \
+ ig3rdma_if.o\
+ icrdma_if.o \
icrdma_hw.o \
+ ig3rdma_hw.o\
main.o \
pble.o \
puda.o \
@@ -22,6 +25,7 @@ irdma-objs := cm.o \
uk.o \
utils.o \
verbs.o \
+ virtchnl.o \
ws.o \
CFLAGS_trace.o = -I$(src)
diff --git a/drivers/infiniband/hw/irdma/ctrl.c b/drivers/infiniband/hw/irdma/ctrl.c
index 99a7f1a6c0b5..4ef1c29032f7 100644
--- a/drivers/infiniband/hw/irdma/ctrl.c
+++ b/drivers/infiniband/hw/irdma/ctrl.c
@@ -74,6 +74,14 @@ static void irdma_set_qos_info(struct irdma_sc_vsi *vsi,
{
u8 i;
+ if (vsi->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3) {
+ for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
+ vsi->qos[i].qs_handle = vsi->dev->qos[i].qs_handle;
+ vsi->qos[i].valid = true;
+ }
+
+ return;
+ }
vsi->qos_rel_bw = l2p->vsi_rel_bw;
vsi->qos_prio_type = l2p->vsi_prio_type;
vsi->dscp_mode = l2p->dscp_mode;
@@ -404,7 +412,8 @@ int irdma_sc_qp_init(struct irdma_sc_qp *qp, struct irdma_qp_init_info *info)
pble_obj_cnt = info->pd->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
if ((info->virtual_map && info->sq_pa >= pble_obj_cnt) ||
- (info->virtual_map && info->rq_pa >= pble_obj_cnt))
+ (!info->qp_uk_init_info.srq_uk &&
+ info->virtual_map && info->rq_pa >= pble_obj_cnt))
return -EINVAL;
qp->llp_stream_handle = (void *)(-1);
@@ -439,6 +448,208 @@ int irdma_sc_qp_init(struct irdma_sc_qp *qp, struct irdma_qp_init_info *info)
}
/**
+ * irdma_sc_srq_init - init sc_srq structure
+ * @srq: srq sc struct
+ * @info: parameters for srq init
+ */
+int irdma_sc_srq_init(struct irdma_sc_srq *srq,
+ struct irdma_srq_init_info *info)
+{
+ u32 srq_size_quanta;
+ int ret_code;
+
+ ret_code = irdma_uk_srq_init(&srq->srq_uk, &info->srq_uk_init_info);
+ if (ret_code)
+ return ret_code;
+
+ srq->dev = info->pd->dev;
+ srq->pd = info->pd;
+ srq->vsi = info->vsi;
+ srq->srq_pa = info->srq_pa;
+ srq->first_pm_pbl_idx = info->first_pm_pbl_idx;
+ srq->pasid = info->pasid;
+ srq->pasid_valid = info->pasid_valid;
+ srq->srq_limit = info->srq_limit;
+ srq->leaf_pbl_size = info->leaf_pbl_size;
+ srq->virtual_map = info->virtual_map;
+ srq->tph_en = info->tph_en;
+ srq->arm_limit_event = info->arm_limit_event;
+ srq->tph_val = info->tph_value;
+ srq->shadow_area_pa = info->shadow_area_pa;
+
+ /* Smallest SRQ size is 256B i.e. 8 quanta */
+ srq_size_quanta = max((u32)IRDMA_SRQ_MIN_QUANTA,
+ srq->srq_uk.srq_size *
+ srq->srq_uk.wqe_size_multiplier);
+ srq->hw_srq_size = irdma_get_encoded_wqe_size(srq_size_quanta,
+ IRDMA_QUEUE_TYPE_SRQ);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_srq_create - send srq create CQP WQE
+ * @srq: srq sc struct
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static int irdma_sc_srq_create(struct irdma_sc_srq *srq, u64 scratch,
+ bool post_sq)
+{
+ struct irdma_sc_cqp *cqp;
+ __le64 *wqe;
+ u64 hdr;
+
+ cqp = srq->pd->dev->cqp;
+ if (srq->srq_uk.srq_id < cqp->dev->hw_attrs.min_hw_srq_id ||
+ srq->srq_uk.srq_id >
+ (cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_SRQ].max_cnt - 1))
+ return -EINVAL;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 0,
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_SRQ_LIMIT, srq->srq_limit) |
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_RQSIZE, srq->hw_srq_size) |
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_RQ_WQE_SIZE, srq->srq_uk.wqe_size));
+ set_64bit_val(wqe, 8, (uintptr_t)srq);
+ set_64bit_val(wqe, 16,
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_PD_ID, srq->pd->pd_id));
+ set_64bit_val(wqe, 32,
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_PHYSICAL_BUFFER_ADDR,
+ srq->srq_pa >>
+ IRDMA_CQPSQ_SRQ_PHYSICAL_BUFFER_ADDR_S));
+ set_64bit_val(wqe, 40,
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_DB_SHADOW_ADDR,
+ srq->shadow_area_pa >>
+ IRDMA_CQPSQ_SRQ_DB_SHADOW_ADDR_S));
+ set_64bit_val(wqe, 48,
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_FIRST_PM_PBL_IDX,
+ srq->first_pm_pbl_idx));
+
+ hdr = srq->srq_uk.srq_id |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_SRQ) |
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_LEAF_PBL_SIZE, srq->leaf_pbl_size) |
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_VIRTMAP, srq->virtual_map) |
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_ARM_LIMIT_EVENT,
+ srq->arm_limit_event) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: SRQ_CREATE WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_srq_modify - send modify_srq CQP WQE
+ * @srq: srq sc struct
+ * @info: parameters for srq modification
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static int irdma_sc_srq_modify(struct irdma_sc_srq *srq,
+ struct irdma_modify_srq_info *info, u64 scratch,
+ bool post_sq)
+{
+ struct irdma_sc_cqp *cqp;
+ __le64 *wqe;
+ u64 hdr;
+
+ cqp = srq->dev->cqp;
+ if (srq->srq_uk.srq_id < cqp->dev->hw_attrs.min_hw_srq_id ||
+ srq->srq_uk.srq_id >
+ (cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_SRQ].max_cnt - 1))
+ return -EINVAL;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 0,
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_SRQ_LIMIT, info->srq_limit) |
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_RQSIZE, srq->hw_srq_size) |
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_RQ_WQE_SIZE, srq->srq_uk.wqe_size));
+ set_64bit_val(wqe, 8,
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_SRQCTX, srq->srq_uk.srq_id));
+ set_64bit_val(wqe, 16,
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_PD_ID, srq->pd->pd_id));
+ set_64bit_val(wqe, 32,
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_PHYSICAL_BUFFER_ADDR,
+ srq->srq_pa >>
+ IRDMA_CQPSQ_SRQ_PHYSICAL_BUFFER_ADDR_S));
+ set_64bit_val(wqe, 40,
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_DB_SHADOW_ADDR,
+ srq->shadow_area_pa >>
+ IRDMA_CQPSQ_SRQ_DB_SHADOW_ADDR_S));
+ set_64bit_val(wqe, 48,
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_FIRST_PM_PBL_IDX,
+ srq->first_pm_pbl_idx));
+
+ hdr = srq->srq_uk.srq_id |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MODIFY_SRQ) |
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_LEAF_PBL_SIZE, srq->leaf_pbl_size) |
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_VIRTMAP, srq->virtual_map) |
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_ARM_LIMIT_EVENT,
+ info->arm_limit_event) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: SRQ_MODIFY WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_srq_destroy - send srq_destroy CQP WQE
+ * @srq: srq sc struct
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static int irdma_sc_srq_destroy(struct irdma_sc_srq *srq, u64 scratch,
+ bool post_sq)
+{
+ struct irdma_sc_cqp *cqp;
+ __le64 *wqe;
+ u64 hdr;
+
+ cqp = srq->dev->cqp;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 8, (uintptr_t)srq);
+
+ hdr = srq->srq_uk.srq_id |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_SRQ) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: SRQ_DESTROY WQE", DUMP_PREFIX_OFFSET, 16,
+ 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
* irdma_sc_qp_create - create qp
* @qp: sc qp
* @info: qp create info
@@ -629,13 +840,14 @@ static u8 irdma_sc_get_encoded_ird_size(u16 ird_size)
}
/**
- * irdma_sc_qp_setctx_roce - set qp's context
+ * irdma_sc_qp_setctx_roce_gen_2 - set qp's context
* @qp: sc qp
* @qp_ctx: context ptr
* @info: ctx info
*/
-void irdma_sc_qp_setctx_roce(struct irdma_sc_qp *qp, __le64 *qp_ctx,
- struct irdma_qp_host_ctx_info *info)
+static void irdma_sc_qp_setctx_roce_gen_2(struct irdma_sc_qp *qp,
+ __le64 *qp_ctx,
+ struct irdma_qp_host_ctx_info *info)
{
struct irdma_roce_offload_info *roce_info;
struct irdma_udp_offload_info *udp;
@@ -753,6 +965,189 @@ void irdma_sc_qp_setctx_roce(struct irdma_sc_qp *qp, __le64 *qp_ctx,
8, qp_ctx, IRDMA_QP_CTX_SIZE, false);
}
+/**
+ * irdma_sc_get_encoded_ird_size_gen_3 - get encoded IRD size for GEN 3
+ * @ird_size: IRD size
+ * The ird from the connection is rounded to a supported HW setting and then encoded
+ * for ird_size field of qp_ctx. Consumers are expected to provide valid ird size based
+ * on hardware attributes. IRD size defaults to a value of 4 in case of invalid input.
+ */
+static u8 irdma_sc_get_encoded_ird_size_gen_3(u16 ird_size)
+{
+ switch (ird_size ?
+ roundup_pow_of_two(2 * ird_size) : 4) {
+ case 4096:
+ return IRDMA_IRD_HW_SIZE_4096_GEN3;
+ case 2048:
+ return IRDMA_IRD_HW_SIZE_2048_GEN3;
+ case 1024:
+ return IRDMA_IRD_HW_SIZE_1024_GEN3;
+ case 512:
+ return IRDMA_IRD_HW_SIZE_512_GEN3;
+ case 256:
+ return IRDMA_IRD_HW_SIZE_256_GEN3;
+ case 128:
+ return IRDMA_IRD_HW_SIZE_128_GEN3;
+ case 64:
+ return IRDMA_IRD_HW_SIZE_64_GEN3;
+ case 32:
+ return IRDMA_IRD_HW_SIZE_32_GEN3;
+ case 16:
+ return IRDMA_IRD_HW_SIZE_16_GEN3;
+ case 8:
+ return IRDMA_IRD_HW_SIZE_8_GEN3;
+ case 4:
+ default:
+ break;
+ }
+
+ return IRDMA_IRD_HW_SIZE_4_GEN3;
+}
+
+/**
+ * irdma_sc_qp_setctx_roce_gen_3 - set qp's context
+ * @qp: sc qp
+ * @qp_ctx: context ptr
+ * @info: ctx info
+ */
+static void irdma_sc_qp_setctx_roce_gen_3(struct irdma_sc_qp *qp,
+ __le64 *qp_ctx,
+ struct irdma_qp_host_ctx_info *info)
+{
+ struct irdma_roce_offload_info *roce_info = info->roce_info;
+ struct irdma_udp_offload_info *udp = info->udp_info;
+ u64 qw0, qw3, qw7 = 0, qw8 = 0;
+ u8 push_mode_en;
+ u32 push_idx;
+
+ qp->user_pri = info->user_pri;
+ if (qp->push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX) {
+ push_mode_en = 0;
+ push_idx = 0;
+ } else {
+ push_mode_en = 1;
+ push_idx = qp->push_idx;
+ }
+
+ qw0 = FIELD_PREP(IRDMAQPC_RQWQESIZE, qp->qp_uk.rq_wqe_size) |
+ FIELD_PREP(IRDMAQPC_RCVTPHEN, qp->rcv_tph_en) |
+ FIELD_PREP(IRDMAQPC_XMITTPHEN, qp->xmit_tph_en) |
+ FIELD_PREP(IRDMAQPC_RQTPHEN, qp->rq_tph_en) |
+ FIELD_PREP(IRDMAQPC_SQTPHEN, qp->sq_tph_en) |
+ FIELD_PREP(IRDMAQPC_PPIDX, push_idx) |
+ FIELD_PREP(IRDMAQPC_PMENA, push_mode_en) |
+ FIELD_PREP(IRDMAQPC_DC_TCP_EN, roce_info->dctcp_en) |
+ FIELD_PREP(IRDMAQPC_ISQP1, roce_info->is_qp1) |
+ FIELD_PREP(IRDMAQPC_ROCE_TVER, roce_info->roce_tver) |
+ FIELD_PREP(IRDMAQPC_IPV4, udp->ipv4) |
+ FIELD_PREP(IRDMAQPC_USE_SRQ, !qp->qp_uk.srq_uk ? 0 : 1) |
+ FIELD_PREP(IRDMAQPC_INSERTVLANTAG, udp->insert_vlan_tag);
+ set_64bit_val(qp_ctx, 0, qw0);
+ set_64bit_val(qp_ctx, 8, qp->sq_pa);
+ set_64bit_val(qp_ctx, 16, qp->rq_pa);
+ qw3 = FIELD_PREP(IRDMAQPC_RQSIZE, qp->hw_rq_size) |
+ FIELD_PREP(IRDMAQPC_SQSIZE, qp->hw_sq_size) |
+ FIELD_PREP(IRDMAQPC_TTL, udp->ttl) |
+ FIELD_PREP(IRDMAQPC_TOS, udp->tos) |
+ FIELD_PREP(IRDMAQPC_SRCPORTNUM, udp->src_port) |
+ FIELD_PREP(IRDMAQPC_DESTPORTNUM, udp->dst_port);
+ set_64bit_val(qp_ctx, 24, qw3);
+ set_64bit_val(qp_ctx, 32,
+ FIELD_PREP(IRDMAQPC_DESTIPADDR2, udp->dest_ip_addr[2]) |
+ FIELD_PREP(IRDMAQPC_DESTIPADDR3, udp->dest_ip_addr[3]));
+ set_64bit_val(qp_ctx, 40,
+ FIELD_PREP(IRDMAQPC_DESTIPADDR0, udp->dest_ip_addr[0]) |
+ FIELD_PREP(IRDMAQPC_DESTIPADDR1, udp->dest_ip_addr[1]));
+ set_64bit_val(qp_ctx, 48,
+ FIELD_PREP(IRDMAQPC_SNDMSS, udp->snd_mss) |
+ FIELD_PREP(IRDMAQPC_VLANTAG, udp->vlan_tag) |
+ FIELD_PREP(IRDMAQPC_ARPIDX, udp->arp_idx));
+ qw7 = FIELD_PREP(IRDMAQPC_PKEY, roce_info->p_key) |
+ FIELD_PREP(IRDMAQPC_ACKCREDITS, roce_info->ack_credits) |
+ FIELD_PREP(IRDMAQPC_FLOWLABEL, udp->flow_label);
+ set_64bit_val(qp_ctx, 56, qw7);
+ qw8 = FIELD_PREP(IRDMAQPC_QKEY, roce_info->qkey) |
+ FIELD_PREP(IRDMAQPC_DESTQP, roce_info->dest_qp);
+ set_64bit_val(qp_ctx, 64, qw8);
+ set_64bit_val(qp_ctx, 80,
+ FIELD_PREP(IRDMAQPC_PSNNXT, udp->psn_nxt) |
+ FIELD_PREP(IRDMAQPC_LSN, udp->lsn));
+ set_64bit_val(qp_ctx, 88,
+ FIELD_PREP(IRDMAQPC_EPSN, udp->epsn));
+ set_64bit_val(qp_ctx, 96,
+ FIELD_PREP(IRDMAQPC_PSNMAX, udp->psn_max) |
+ FIELD_PREP(IRDMAQPC_PSNUNA, udp->psn_una));
+ set_64bit_val(qp_ctx, 112,
+ FIELD_PREP(IRDMAQPC_CWNDROCE, udp->cwnd));
+ set_64bit_val(qp_ctx, 128,
+ FIELD_PREP(IRDMAQPC_MINRNR_TIMER, udp->min_rnr_timer) |
+ FIELD_PREP(IRDMAQPC_RNRNAK_THRESH, udp->rnr_nak_thresh) |
+ FIELD_PREP(IRDMAQPC_REXMIT_THRESH, udp->rexmit_thresh) |
+ FIELD_PREP(IRDMAQPC_RNRNAK_TMR, udp->rnr_nak_tmr) |
+ FIELD_PREP(IRDMAQPC_RTOMIN, roce_info->rtomin));
+ set_64bit_val(qp_ctx, 136,
+ FIELD_PREP(IRDMAQPC_TXCQNUM, info->send_cq_num) |
+ FIELD_PREP(IRDMAQPC_RXCQNUM, info->rcv_cq_num));
+ set_64bit_val(qp_ctx, 152,
+ FIELD_PREP(IRDMAQPC_MACADDRESS,
+ ether_addr_to_u64(roce_info->mac_addr)) |
+ FIELD_PREP(IRDMAQPC_LOCALACKTIMEOUT,
+ roce_info->local_ack_timeout));
+ set_64bit_val(qp_ctx, 160,
+ FIELD_PREP(IRDMAQPC_ORDSIZE_GEN3, roce_info->ord_size) |
+ FIELD_PREP(IRDMAQPC_IRDSIZE_GEN3,
+ irdma_sc_get_encoded_ird_size_gen_3(roce_info->ird_size)) |
+ FIELD_PREP(IRDMAQPC_WRRDRSPOK, roce_info->wr_rdresp_en) |
+ FIELD_PREP(IRDMAQPC_RDOK, roce_info->rd_en) |
+ FIELD_PREP(IRDMAQPC_USESTATSINSTANCE,
+ info->stats_idx_valid) |
+ FIELD_PREP(IRDMAQPC_BINDEN, roce_info->bind_en) |
+ FIELD_PREP(IRDMAQPC_FASTREGEN, roce_info->fast_reg_en) |
+ FIELD_PREP(IRDMAQPC_DCQCNENABLE, roce_info->dcqcn_en) |
+ FIELD_PREP(IRDMAQPC_RCVNOICRC, roce_info->rcv_no_icrc) |
+ FIELD_PREP(IRDMAQPC_FW_CC_ENABLE,
+ roce_info->fw_cc_enable) |
+ FIELD_PREP(IRDMAQPC_UDPRIVCQENABLE,
+ roce_info->udprivcq_en) |
+ FIELD_PREP(IRDMAQPC_PRIVEN, roce_info->priv_mode_en) |
+ FIELD_PREP(IRDMAQPC_REMOTE_ATOMIC_EN,
+ info->remote_atomics_en) |
+ FIELD_PREP(IRDMAQPC_TIMELYENABLE, roce_info->timely_en));
+ set_64bit_val(qp_ctx, 168,
+ FIELD_PREP(IRDMAQPC_QPCOMPCTX, info->qp_compl_ctx));
+ set_64bit_val(qp_ctx, 176,
+ FIELD_PREP(IRDMAQPC_SQTPHVAL, qp->sq_tph_val) |
+ FIELD_PREP(IRDMAQPC_RQTPHVAL, qp->rq_tph_val) |
+ FIELD_PREP(IRDMAQPC_QSHANDLE, qp->qs_handle));
+ set_64bit_val(qp_ctx, 184,
+ FIELD_PREP(IRDMAQPC_LOCAL_IPADDR3, udp->local_ipaddr[3]) |
+ FIELD_PREP(IRDMAQPC_LOCAL_IPADDR2, udp->local_ipaddr[2]));
+ set_64bit_val(qp_ctx, 192,
+ FIELD_PREP(IRDMAQPC_LOCAL_IPADDR1, udp->local_ipaddr[1]) |
+ FIELD_PREP(IRDMAQPC_LOCAL_IPADDR0, udp->local_ipaddr[0]));
+ set_64bit_val(qp_ctx, 200,
+ FIELD_PREP(IRDMAQPC_THIGH, roce_info->t_high) |
+ FIELD_PREP(IRDMAQPC_SRQ_ID,
+ !qp->qp_uk.srq_uk ?
+ 0 : qp->qp_uk.srq_uk->srq_id) |
+ FIELD_PREP(IRDMAQPC_TLOW, roce_info->t_low));
+ set_64bit_val(qp_ctx, 208, roce_info->pd_id |
+ FIELD_PREP(IRDMAQPC_STAT_INDEX_GEN3, info->stats_idx) |
+ FIELD_PREP(IRDMAQPC_PKT_LIMIT, qp->pkt_limit));
+
+ print_hex_dump_debug("WQE: QP_HOST ROCE CTX WQE", DUMP_PREFIX_OFFSET,
+ 16, 8, qp_ctx, IRDMA_QP_CTX_SIZE, false);
+}
+
+void irdma_sc_qp_setctx_roce(struct irdma_sc_qp *qp, __le64 *qp_ctx,
+ struct irdma_qp_host_ctx_info *info)
+{
+ if (qp->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_2)
+ irdma_sc_qp_setctx_roce_gen_2(qp, qp_ctx, info);
+ else
+ irdma_sc_qp_setctx_roce_gen_3(qp, qp_ctx, info);
+}
+
/* irdma_sc_alloc_local_mac_entry - allocate a mac entry
* @cqp: struct for cqp hw
* @scratch: u64 saved to be used during cqp completion
@@ -1080,7 +1475,8 @@ static int irdma_sc_alloc_stag(struct irdma_sc_dev *dev,
FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID) |
FIELD_PREP(IRDMA_CQPSQ_STAG_STAGLEN, info->total_len));
set_64bit_val(wqe, 16,
- FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->stag_idx));
+ FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->stag_idx) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_PDID_HI, info->pd_id >> 18));
set_64bit_val(wqe, 40,
FIELD_PREP(IRDMA_CQPSQ_STAG_HMCFNIDX, info->hmc_fcn_index));
@@ -1096,6 +1492,8 @@ static int irdma_sc_alloc_stag(struct irdma_sc_dev *dev,
FIELD_PREP(IRDMA_CQPSQ_STAG_REMACCENABLED, info->remote_access) |
FIELD_PREP(IRDMA_CQPSQ_STAG_USEHMCFNIDX, info->use_hmc_fcn_index) |
FIELD_PREP(IRDMA_CQPSQ_STAG_USEPFRID, info->use_pf_rid) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_REMOTE_ATOMIC_EN,
+ info->remote_atomics_en) |
FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
dma_wmb(); /* make sure WQE is written before valid bit is set */
@@ -1165,6 +1563,7 @@ static int irdma_sc_mr_reg_non_shared(struct irdma_sc_dev *dev,
FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID));
set_64bit_val(wqe, 16,
FIELD_PREP(IRDMA_CQPSQ_STAG_KEY, info->stag_key) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_PDID_HI, info->pd_id >> 18) |
FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->stag_idx));
if (!info->chunk_size) {
set_64bit_val(wqe, 32, info->reg_addr_pa);
@@ -1187,6 +1586,8 @@ static int irdma_sc_mr_reg_non_shared(struct irdma_sc_dev *dev,
FIELD_PREP(IRDMA_CQPSQ_STAG_VABASEDTO, addr_type) |
FIELD_PREP(IRDMA_CQPSQ_STAG_USEHMCFNIDX, info->use_hmc_fcn_index) |
FIELD_PREP(IRDMA_CQPSQ_STAG_USEPFRID, info->use_pf_rid) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_REMOTE_ATOMIC_EN,
+ info->remote_atomics_en) |
FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
dma_wmb(); /* make sure WQE is written before valid bit is set */
@@ -1223,7 +1624,8 @@ static int irdma_sc_dealloc_stag(struct irdma_sc_dev *dev,
set_64bit_val(wqe, 8,
FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID));
set_64bit_val(wqe, 16,
- FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->stag_idx));
+ FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->stag_idx) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_PDID_HI, info->pd_id >> 18));
hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DEALLOC_STAG) |
FIELD_PREP(IRDMA_CQPSQ_STAG_MR, info->mr) |
@@ -1263,7 +1665,8 @@ static int irdma_sc_mw_alloc(struct irdma_sc_dev *dev,
set_64bit_val(wqe, 8,
FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID));
set_64bit_val(wqe, 16,
- FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->mw_stag_index));
+ FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->mw_stag_index) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_PDID_HI, info->pd_id >> 18));
hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_ALLOC_STAG) |
FIELD_PREP(IRDMA_CQPSQ_STAG_MWTYPE, info->mw_wide) |
@@ -1343,6 +1746,7 @@ int irdma_sc_mr_fast_register(struct irdma_sc_qp *qp,
FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
+ FIELD_PREP(IRDMAQPSQ_REMOTE_ATOMICS_EN, info->remote_atomics_en) |
FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
dma_wmb(); /* make sure WQE is written before valid bit is set */
@@ -1873,7 +2277,7 @@ void irdma_sc_vsi_init(struct irdma_sc_vsi *vsi,
mutex_init(&vsi->qos[i].qos_mutex);
INIT_LIST_HEAD(&vsi->qos[i].qplist);
}
- if (vsi->register_qset) {
+ if (vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_2) {
vsi->dev->ws_add = irdma_ws_add;
vsi->dev->ws_remove = irdma_ws_remove;
vsi->dev->ws_reset = irdma_ws_reset;
@@ -1888,7 +2292,7 @@ void irdma_sc_vsi_init(struct irdma_sc_vsi *vsi,
* irdma_get_stats_idx - Return stats index
* @vsi: pointer to the vsi
*/
-static u8 irdma_get_stats_idx(struct irdma_sc_vsi *vsi)
+static u16 irdma_get_stats_idx(struct irdma_sc_vsi *vsi)
{
struct irdma_stats_inst_info stats_info = {};
struct irdma_sc_dev *dev = vsi->dev;
@@ -1964,12 +2368,13 @@ int irdma_vsi_stats_init(struct irdma_sc_vsi *vsi,
(void *)((uintptr_t)stats_buff_mem->va +
IRDMA_GATHER_STATS_BUF_SIZE);
- irdma_hw_stats_start_timer(vsi);
+ if (vsi->dev->hw_attrs.uk_attrs.hw_rev < IRDMA_GEN_3)
+ irdma_hw_stats_start_timer(vsi);
/* when stat allocation is not required default to fcn_id. */
vsi->stats_idx = info->fcn_id;
if (info->alloc_stats_inst) {
- u8 stats_idx = irdma_get_stats_idx(vsi);
+ u16 stats_idx = irdma_get_stats_idx(vsi);
if (stats_idx != IRDMA_INVALID_STATS_IDX) {
vsi->stats_inst_alloc = true;
@@ -1993,7 +2398,7 @@ void irdma_vsi_stats_free(struct irdma_sc_vsi *vsi)
{
struct irdma_stats_inst_info stats_info = {};
struct irdma_sc_dev *dev = vsi->dev;
- u8 stats_idx = vsi->stats_idx;
+ u16 stats_idx = vsi->stats_idx;
if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
if (vsi->stats_inst_alloc) {
@@ -2009,7 +2414,9 @@ void irdma_vsi_stats_free(struct irdma_sc_vsi *vsi)
if (!vsi->pestat)
return;
- irdma_hw_stats_stop_timer(vsi);
+
+ if (dev->hw_attrs.uk_attrs.hw_rev < IRDMA_GEN_3)
+ irdma_hw_stats_stop_timer(vsi);
dma_free_coherent(vsi->pestat->hw->device,
vsi->pestat->gather_info.stats_buff_mem.size,
vsi->pestat->gather_info.stats_buff_mem.va,
@@ -2026,6 +2433,14 @@ u8 irdma_get_encoded_wqe_size(u32 wqsize, enum irdma_queue_type queue_type)
{
u8 encoded_size = 0;
+ if (queue_type == IRDMA_QUEUE_TYPE_SRQ) {
+ /* Smallest SRQ size is 256B (8 quanta) that gets
+ * encoded to 0.
+ */
+ encoded_size = ilog2(wqsize) - 3;
+
+ return encoded_size;
+ }
/* cqp sq's hw coded value starts from 1 for size of 4
* while it starts from 0 for qp' wq's.
*/
@@ -2259,6 +2674,12 @@ int irdma_sc_qp_flush_wqes(struct irdma_sc_qp *qp,
info->ae_code | FIELD_PREP(IRDMA_CQPSQ_FWQE_AESOURCE,
info->ae_src) : 0;
set_64bit_val(wqe, 8, temp);
+ if (cqp->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3) {
+ set_64bit_val(wqe, 40,
+ FIELD_PREP(IRDMA_CQPSQ_FWQE_ERR_SQ_IDX, info->err_sq_idx));
+ set_64bit_val(wqe, 48,
+ FIELD_PREP(IRDMA_CQPSQ_FWQE_ERR_RQ_IDX, info->err_rq_idx));
+ }
hdr = qp->qp_uk.qp_id |
FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_FLUSH_WQES) |
@@ -2267,6 +2688,9 @@ int irdma_sc_qp_flush_wqes(struct irdma_sc_qp *qp,
FIELD_PREP(IRDMA_CQPSQ_FWQE_FLUSHSQ, flush_sq) |
FIELD_PREP(IRDMA_CQPSQ_FWQE_FLUSHRQ, flush_rq) |
FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ if (cqp->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3)
+ hdr |= FIELD_PREP(IRDMA_CQPSQ_FWQE_ERR_SQ_IDX_VALID, info->err_sq_idx_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_FWQE_ERR_RQ_IDX_VALID, info->err_rq_idx_valid);
dma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, 24, hdr);
@@ -2562,6 +2986,9 @@ static int irdma_sc_cq_create(struct irdma_sc_cq *cq, u64 scratch,
FIELD_PREP(IRDMA_CQPSQ_CQ_LPBLSIZE, cq->pbl_chunk_size) |
FIELD_PREP(IRDMA_CQPSQ_CQ_CHKOVERFLOW, check_overflow) |
FIELD_PREP(IRDMA_CQPSQ_CQ_VIRTMAP, cq->virtual_map) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_CQID_HIGH, cq->cq_uk.cq_id >> 22) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_CEQID_HIGH,
+ (cq->ceq_id_valid ? cq->ceq_id : 0) >> 10) |
FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, cq->ceqe_mask) |
FIELD_PREP(IRDMA_CQPSQ_CQ_CEQIDVALID, cq->ceq_id_valid) |
FIELD_PREP(IRDMA_CQPSQ_TPHEN, cq->tph_en) |
@@ -2706,6 +3133,41 @@ static int irdma_sc_cq_modify(struct irdma_sc_cq *cq,
}
/**
+ * irdma_sc_get_decoded_ird_size_gen_3 - get decoded IRD size for GEN 3
+ * @ird_enc: IRD encoding
+ * IRD size defaults to a value of 4 in case of invalid input.
+ */
+static u16 irdma_sc_get_decoded_ird_size_gen_3(u8 ird_enc)
+{
+ switch (ird_enc) {
+ case IRDMA_IRD_HW_SIZE_4096_GEN3:
+ return 4096;
+ case IRDMA_IRD_HW_SIZE_2048_GEN3:
+ return 2048;
+ case IRDMA_IRD_HW_SIZE_1024_GEN3:
+ return 1024;
+ case IRDMA_IRD_HW_SIZE_512_GEN3:
+ return 512;
+ case IRDMA_IRD_HW_SIZE_256_GEN3:
+ return 256;
+ case IRDMA_IRD_HW_SIZE_128_GEN3:
+ return 128;
+ case IRDMA_IRD_HW_SIZE_64_GEN3:
+ return 64;
+ case IRDMA_IRD_HW_SIZE_32_GEN3:
+ return 32;
+ case IRDMA_IRD_HW_SIZE_16_GEN3:
+ return 16;
+ case IRDMA_IRD_HW_SIZE_8_GEN3:
+ return 8;
+ case IRDMA_IRD_HW_SIZE_4_GEN3:
+ return 4;
+ default:
+ return 4;
+ }
+}
+
+/**
* irdma_check_cqp_progress - check cqp processing progress
* @timeout: timeout info struct
* @dev: sc device struct
@@ -2738,6 +3200,89 @@ static inline void irdma_get_cqp_reg_info(struct irdma_sc_cqp *cqp, u32 *val,
}
/**
+ * irdma_sc_cqp_def_cmpl_ae_handler - remove completed requests from pending list
+ * @dev: sc device struct
+ * @info: AE entry info
+ * @first: true if this is the first call to this handler for given AEQE
+ * @scratch: (out) scratch entry pointer
+ * @sw_def_info: (in/out) SW ticket value for this AE
+ *
+ * In case of AE_DEF_CMPL event, this function should be called in a loop
+ * until it returns NULL-ptr via scratch.
+ * For each call, it looks for a matching CQP request on pending list,
+ * removes it from the list and returns the pointer to the associated scratch
+ * entry.
+ * If this is the first call to this function for given AEQE, sw_def_info
+ * value is not used to find matching requests. Instead, it is populated
+ * with the value from the first matching cqp_request on the list.
+ * For subsequent calls, ooo_op->sw_def_info need to match the value passed
+ * by a caller.
+ *
+ * Return: scratch entry pointer for cqp_request to be released or NULL
+ * if no matching request is found.
+ */
+void irdma_sc_cqp_def_cmpl_ae_handler(struct irdma_sc_dev *dev,
+ struct irdma_aeqe_info *info,
+ bool first, u64 *scratch,
+ u32 *sw_def_info)
+{
+ struct irdma_ooo_cqp_op *ooo_op;
+ unsigned long flags;
+
+ *scratch = 0;
+
+ spin_lock_irqsave(&dev->cqp->ooo_list_lock, flags);
+ list_for_each_entry(ooo_op, &dev->cqp->ooo_pnd, list_entry) {
+ if (ooo_op->deferred &&
+ ((first && ooo_op->def_info == info->def_info) ||
+ (!first && ooo_op->sw_def_info == *sw_def_info))) {
+ *sw_def_info = ooo_op->sw_def_info;
+ *scratch = ooo_op->scratch;
+
+ list_move(&ooo_op->list_entry, &dev->cqp->ooo_avail);
+ atomic64_inc(&dev->cqp->completed_ops);
+
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&dev->cqp->ooo_list_lock, flags);
+
+ if (first && !*scratch)
+ ibdev_dbg(to_ibdev(dev),
+ "AEQ: deferred completion with unknown ticket: def_info 0x%x\n",
+ info->def_info);
+}
+
+/**
+ * irdma_sc_cqp_cleanup_handler - remove requests from pending list
+ * @dev: sc device struct
+ *
+ * This function should be called in a loop from irdma_cleanup_pending_cqp_op.
+ * For each call, it returns first CQP request on pending list, removes it
+ * from the list and returns the pointer to the associated scratch entry.
+ *
+ * Return: scratch entry pointer for cqp_request to be released or NULL
+ * if pending list is empty.
+ */
+u64 irdma_sc_cqp_cleanup_handler(struct irdma_sc_dev *dev)
+{
+ struct irdma_ooo_cqp_op *ooo_op;
+ u64 scratch = 0;
+
+ list_for_each_entry(ooo_op, &dev->cqp->ooo_pnd, list_entry) {
+ scratch = ooo_op->scratch;
+
+ list_del(&ooo_op->list_entry);
+ list_add(&ooo_op->list_entry, &dev->cqp->ooo_avail);
+ atomic64_inc(&dev->cqp->completed_ops);
+
+ break;
+ }
+
+ return scratch;
+}
+
+/**
* irdma_cqp_poll_registers - poll cqp registers
* @cqp: struct for cqp hw
* @tail: wqtail register value
@@ -2794,7 +3339,10 @@ static u64 irdma_sc_decode_fpm_commit(struct irdma_sc_dev *dev, __le64 *buf,
obj_info[rsrc_idx].cnt = (u32)FLD_RS_64(dev, temp, IRDMA_COMMIT_FPM_CQCNT);
break;
case IRDMA_HMC_IW_APBVT_ENTRY:
- obj_info[rsrc_idx].cnt = 1;
+ if (dev->hw_attrs.uk_attrs.hw_rev <= IRDMA_GEN_2)
+ obj_info[rsrc_idx].cnt = 1;
+ else
+ obj_info[rsrc_idx].cnt = 0;
break;
default:
obj_info[rsrc_idx].cnt = (u32)temp;
@@ -2829,7 +3377,8 @@ irdma_sc_parse_fpm_commit_buf(struct irdma_sc_dev *dev, __le64 *buf,
IRDMA_HMC_IW_QP);
irdma_sc_decode_fpm_commit(dev, buf, 8, info,
IRDMA_HMC_IW_CQ);
- /* skiping RSRVD */
+ irdma_sc_decode_fpm_commit(dev, buf, 16, info,
+ IRDMA_HMC_IW_SRQ);
irdma_sc_decode_fpm_commit(dev, buf, 24, info,
IRDMA_HMC_IW_HTE);
irdma_sc_decode_fpm_commit(dev, buf, 32, info,
@@ -2864,15 +3413,17 @@ irdma_sc_parse_fpm_commit_buf(struct irdma_sc_dev *dev, __le64 *buf,
IRDMA_HMC_IW_HDR);
irdma_sc_decode_fpm_commit(dev, buf, 152, info,
IRDMA_HMC_IW_MD);
- irdma_sc_decode_fpm_commit(dev, buf, 160, info,
- IRDMA_HMC_IW_OOISC);
- irdma_sc_decode_fpm_commit(dev, buf, 168, info,
- IRDMA_HMC_IW_OOISCFFL);
+ if (dev->cqp->protocol_used == IRDMA_IWARP_PROTOCOL_ONLY) {
+ irdma_sc_decode_fpm_commit(dev, buf, 160, info,
+ IRDMA_HMC_IW_OOISC);
+ irdma_sc_decode_fpm_commit(dev, buf, 168, info,
+ IRDMA_HMC_IW_OOISCFFL);
+ }
}
/* searching for the last object in HMC to find the size of the HMC area. */
for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++) {
- if (info[i].base > max_base) {
+ if (info[i].base > max_base && info[i].cnt) {
max_base = info[i].base;
last_hmc_obj = i;
}
@@ -2927,6 +3478,7 @@ static int irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 *buf,
struct irdma_hmc_fpm_misc *hmc_fpm_misc)
{
struct irdma_hmc_obj_info *obj_info;
+ u8 ird_encoding;
u64 temp;
u32 size;
u16 max_pe_sds;
@@ -2935,7 +3487,19 @@ static int irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 *buf,
get_64bit_val(buf, 0, &temp);
hmc_info->first_sd_index = (u16)FIELD_GET(IRDMA_QUERY_FPM_FIRST_PE_SD_INDEX, temp);
- max_pe_sds = (u16)FIELD_GET(IRDMA_QUERY_FPM_MAX_PE_SDS, temp);
+
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3)
+ max_pe_sds = (u16)FIELD_GET(IRDMA_QUERY_FPM_MAX_PE_SDS_GEN3, temp);
+ else
+ max_pe_sds = (u16)FIELD_GET(IRDMA_QUERY_FPM_MAX_PE_SDS, temp);
+
+ /* Reduce SD count for unprivleged functions by 1 to account for PBLE
+ * backing page rounding
+ */
+ if (dev->hw_attrs.uk_attrs.hw_rev <= IRDMA_GEN_2 &&
+ (hmc_info->hmc_fn_id >= dev->hw_attrs.first_hw_vf_fpm_id ||
+ !dev->privileged))
+ max_pe_sds--;
hmc_fpm_misc->max_sds = max_pe_sds;
hmc_info->sd_table.sd_cnt = max_pe_sds + hmc_info->first_sd_index;
@@ -2949,11 +3513,17 @@ static int irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 *buf,
size = (u32)(temp >> 32);
obj_info[IRDMA_HMC_IW_CQ].size = BIT_ULL(size);
+ irdma_sc_decode_fpm_query(buf, 24, obj_info, IRDMA_HMC_IW_SRQ);
irdma_sc_decode_fpm_query(buf, 32, obj_info, IRDMA_HMC_IW_HTE);
irdma_sc_decode_fpm_query(buf, 40, obj_info, IRDMA_HMC_IW_ARP);
- obj_info[IRDMA_HMC_IW_APBVT_ENTRY].size = 8192;
- obj_info[IRDMA_HMC_IW_APBVT_ENTRY].max_cnt = 1;
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3) {
+ obj_info[IRDMA_HMC_IW_APBVT_ENTRY].size = 0;
+ obj_info[IRDMA_HMC_IW_APBVT_ENTRY].max_cnt = 0;
+ } else {
+ obj_info[IRDMA_HMC_IW_APBVT_ENTRY].size = 8192;
+ obj_info[IRDMA_HMC_IW_APBVT_ENTRY].max_cnt = 1;
+ }
irdma_sc_decode_fpm_query(buf, 48, obj_info, IRDMA_HMC_IW_MR);
irdma_sc_decode_fpm_query(buf, 56, obj_info, IRDMA_HMC_IW_XF);
@@ -2962,7 +3532,7 @@ static int irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 *buf,
obj_info[IRDMA_HMC_IW_XFFL].max_cnt = (u32)temp;
obj_info[IRDMA_HMC_IW_XFFL].size = 4;
hmc_fpm_misc->xf_block_size = FIELD_GET(IRDMA_QUERY_FPM_XFBLOCKSIZE, temp);
- if (!hmc_fpm_misc->xf_block_size)
+ if (obj_info[IRDMA_HMC_IW_XF].max_cnt && !hmc_fpm_misc->xf_block_size)
return -EINVAL;
irdma_sc_decode_fpm_query(buf, 72, obj_info, IRDMA_HMC_IW_Q1);
@@ -2984,6 +3554,14 @@ static int irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 *buf,
hmc_fpm_misc->max_ceqs = FIELD_GET(IRDMA_QUERY_FPM_MAX_CEQS, temp);
hmc_fpm_misc->ht_multiplier = FIELD_GET(IRDMA_QUERY_FPM_HTMULTIPLIER, temp);
hmc_fpm_misc->timer_bucket = FIELD_GET(IRDMA_QUERY_FPM_TIMERBUCKET, temp);
+ if (FIELD_GET(IRDMA_MANAGE_RSRC_VER2,
+ dev->feature_info[IRDMA_FTN_FLAGS])) {
+ ird_encoding = (u8)FIELD_GET(IRDMA_QUERY_FPM_MAX_IRD, temp);
+ hmc_fpm_misc->ird =
+ irdma_sc_get_decoded_ird_size_gen_3(ird_encoding) / 2;
+ dev->hw_attrs.max_hw_ird = hmc_fpm_misc->ird;
+ dev->hw_attrs.max_hw_ord = hmc_fpm_misc->ird;
+ }
if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
return 0;
irdma_sc_decode_fpm_query(buf, 96, obj_info, IRDMA_HMC_IW_FSIMC);
@@ -3000,15 +3578,25 @@ static int irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 *buf,
irdma_sc_decode_fpm_query(buf, 144, obj_info, IRDMA_HMC_IW_HDR);
irdma_sc_decode_fpm_query(buf, 152, obj_info, IRDMA_HMC_IW_MD);
- irdma_sc_decode_fpm_query(buf, 160, obj_info, IRDMA_HMC_IW_OOISC);
-
- get_64bit_val(buf, 168, &temp);
- obj_info[IRDMA_HMC_IW_OOISCFFL].max_cnt = (u32)temp;
- obj_info[IRDMA_HMC_IW_OOISCFFL].size = 4;
- hmc_fpm_misc->ooiscf_block_size = FIELD_GET(IRDMA_QUERY_FPM_OOISCFBLOCKSIZE, temp);
- if (!hmc_fpm_misc->ooiscf_block_size &&
- obj_info[IRDMA_HMC_IW_OOISCFFL].max_cnt)
- return -EINVAL;
+
+ if (dev->cqp->protocol_used == IRDMA_IWARP_PROTOCOL_ONLY) {
+ irdma_sc_decode_fpm_query(buf, 160, obj_info, IRDMA_HMC_IW_OOISC);
+
+ get_64bit_val(buf, 168, &temp);
+ obj_info[IRDMA_HMC_IW_OOISCFFL].max_cnt = (u32)temp;
+ obj_info[IRDMA_HMC_IW_OOISCFFL].size = 4;
+ hmc_fpm_misc->ooiscf_block_size = FIELD_GET(IRDMA_QUERY_FPM_OOISCFBLOCKSIZE, temp);
+ if (!hmc_fpm_misc->ooiscf_block_size &&
+ obj_info[IRDMA_HMC_IW_OOISCFFL].max_cnt)
+ return -EINVAL;
+ }
+
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3) {
+ get_64bit_val(buf, 176, &temp);
+ hmc_fpm_misc->loc_mem_pages = (u32)FIELD_GET(IRDMA_QUERY_FPM_LOC_MEM_PAGES, temp);
+ if (!hmc_fpm_misc->loc_mem_pages)
+ return -EINVAL;
+ }
return 0;
}
@@ -3088,6 +3676,8 @@ exit:
int irdma_sc_cqp_init(struct irdma_sc_cqp *cqp,
struct irdma_cqp_init_info *info)
{
+ struct irdma_ooo_cqp_op *ooo_op;
+ u32 num_ooo_ops;
u8 hw_sq_size;
if (info->sq_size > IRDMA_CQP_SW_SQSIZE_2048 ||
@@ -3118,17 +3708,43 @@ int irdma_sc_cqp_init(struct irdma_sc_cqp *cqp,
cqp->rocev2_rto_policy = info->rocev2_rto_policy;
cqp->protocol_used = info->protocol_used;
memcpy(&cqp->dcqcn_params, &info->dcqcn_params, sizeof(cqp->dcqcn_params));
+ if (cqp->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3) {
+ cqp->ooisc_blksize = info->ooisc_blksize;
+ cqp->rrsp_blksize = info->rrsp_blksize;
+ cqp->q1_blksize = info->q1_blksize;
+ cqp->xmit_blksize = info->xmit_blksize;
+ cqp->blksizes_valid = info->blksizes_valid;
+ cqp->ts_shift = info->ts_shift;
+ cqp->ts_override = info->ts_override;
+ cqp->en_fine_grained_timers = info->en_fine_grained_timers;
+ cqp->pe_en_vf_cnt = info->pe_en_vf_cnt;
+ cqp->ooo_op_array = info->ooo_op_array;
+ /* initialize the OOO lists */
+ INIT_LIST_HEAD(&cqp->ooo_avail);
+ INIT_LIST_HEAD(&cqp->ooo_pnd);
+ if (cqp->ooo_op_array) {
+ /* Populate avail list entries */
+ for (num_ooo_ops = 0, ooo_op = info->ooo_op_array;
+ num_ooo_ops < cqp->sq_size;
+ num_ooo_ops++, ooo_op++)
+ list_add(&ooo_op->list_entry, &cqp->ooo_avail);
+ }
+ }
info->dev->cqp = cqp;
IRDMA_RING_INIT(cqp->sq_ring, cqp->sq_size);
+ cqp->last_def_cmpl_ticket = 0;
+ cqp->sw_def_cmpl_ticket = 0;
cqp->requested_ops = 0;
atomic64_set(&cqp->completed_ops, 0);
/* for the cqp commands backlog. */
INIT_LIST_HEAD(&cqp->dev->cqp_cmd_head);
writel(0, cqp->dev->hw_regs[IRDMA_CQPTAIL]);
- writel(0, cqp->dev->hw_regs[IRDMA_CQPDB]);
- writel(0, cqp->dev->hw_regs[IRDMA_CCQPSTATUS]);
+ if (cqp->dev->hw_attrs.uk_attrs.hw_rev <= IRDMA_GEN_2) {
+ writel(0, cqp->dev->hw_regs[IRDMA_CQPDB]);
+ writel(0, cqp->dev->hw_regs[IRDMA_CCQPSTATUS]);
+ }
ibdev_dbg(to_ibdev(cqp->dev),
"WQE: sq_size[%04d] hw_sq_size[%04d] sq_base[%p] sq_pa[%p] cqp[%p] polarity[x%04x]\n",
@@ -3160,6 +3776,7 @@ int irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_err, u16 *min_err)
return -ENOMEM;
spin_lock_init(&cqp->dev->cqp_lock);
+ spin_lock_init(&cqp->ooo_list_lock);
temp = FIELD_PREP(IRDMA_CQPHC_SQSIZE, cqp->hw_sq_size) |
FIELD_PREP(IRDMA_CQPHC_SVER, cqp->struct_ver) |
@@ -3171,12 +3788,29 @@ int irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_err, u16 *min_err)
FIELD_PREP(IRDMA_CQPHC_PROTOCOL_USED,
cqp->protocol_used);
}
+ if (hw_rev >= IRDMA_GEN_3)
+ temp |= FIELD_PREP(IRDMA_CQPHC_EN_FINE_GRAINED_TIMERS,
+ cqp->en_fine_grained_timers);
set_64bit_val(cqp->host_ctx, 0, temp);
set_64bit_val(cqp->host_ctx, 8, cqp->sq_pa);
temp = FIELD_PREP(IRDMA_CQPHC_ENABLED_VFS, cqp->ena_vf_count) |
FIELD_PREP(IRDMA_CQPHC_HMC_PROFILE, cqp->hmc_profile);
+
+ if (hw_rev >= IRDMA_GEN_3)
+ temp |= FIELD_PREP(IRDMA_CQPHC_OOISC_BLKSIZE,
+ cqp->ooisc_blksize) |
+ FIELD_PREP(IRDMA_CQPHC_RRSP_BLKSIZE,
+ cqp->rrsp_blksize) |
+ FIELD_PREP(IRDMA_CQPHC_Q1_BLKSIZE, cqp->q1_blksize) |
+ FIELD_PREP(IRDMA_CQPHC_XMIT_BLKSIZE,
+ cqp->xmit_blksize) |
+ FIELD_PREP(IRDMA_CQPHC_BLKSIZES_VALID,
+ cqp->blksizes_valid) |
+ FIELD_PREP(IRDMA_CQPHC_TIMESTAMP_OVERRIDE,
+ cqp->ts_override) |
+ FIELD_PREP(IRDMA_CQPHC_TS_SHIFT, cqp->ts_shift);
set_64bit_val(cqp->host_ctx, 16, temp);
set_64bit_val(cqp->host_ctx, 24, (uintptr_t)cqp);
temp = FIELD_PREP(IRDMA_CQPHC_HW_MAJVER, cqp->hw_maj_ver) |
@@ -3338,6 +3972,87 @@ void irdma_sc_ccq_arm(struct irdma_sc_cq *ccq)
}
/**
+ * irdma_sc_process_def_cmpl - process deferred or pending completion
+ * @cqp: CQP sc struct
+ * @info: CQP CQE info
+ * @wqe_idx: CQP WQE descriptor index
+ * @def_info: deferred op ticket value or out-of-order completion id
+ * @def_cmpl: true for deferred completion, false for pending (RCA)
+ */
+static void irdma_sc_process_def_cmpl(struct irdma_sc_cqp *cqp,
+ struct irdma_ccq_cqe_info *info,
+ u32 wqe_idx, u32 def_info, bool def_cmpl)
+{
+ struct irdma_ooo_cqp_op *ooo_op;
+ unsigned long flags;
+
+ /* Deferred and out-of-order completions share the same list of pending
+ * completions. Since the list can be also accessed from AE handler,
+ * it must be protected by a lock.
+ */
+ spin_lock_irqsave(&cqp->ooo_list_lock, flags);
+
+ /* For deferred completions bump up SW completion ticket value. */
+ if (def_cmpl) {
+ cqp->last_def_cmpl_ticket = def_info;
+ cqp->sw_def_cmpl_ticket++;
+ }
+ if (!list_empty(&cqp->ooo_avail)) {
+ ooo_op = (struct irdma_ooo_cqp_op *)
+ list_entry(cqp->ooo_avail.next,
+ struct irdma_ooo_cqp_op, list_entry);
+
+ list_del(&ooo_op->list_entry);
+ ooo_op->scratch = info->scratch;
+ ooo_op->def_info = def_info;
+ ooo_op->sw_def_info = cqp->sw_def_cmpl_ticket;
+ ooo_op->deferred = def_cmpl;
+ ooo_op->wqe_idx = wqe_idx;
+ /* Pending completions must be chronologically ordered,
+ * so adding at the end of list.
+ */
+ list_add_tail(&ooo_op->list_entry, &cqp->ooo_pnd);
+ }
+ spin_unlock_irqrestore(&cqp->ooo_list_lock, flags);
+
+ info->pending = true;
+}
+
+/**
+ * irdma_sc_process_ooo_cmpl - process out-of-order (final) completion
+ * @cqp: CQP sc struct
+ * @info: CQP CQE info
+ * @def_info: out-of-order completion id
+ */
+static void irdma_sc_process_ooo_cmpl(struct irdma_sc_cqp *cqp,
+ struct irdma_ccq_cqe_info *info,
+ u32 def_info)
+{
+ struct irdma_ooo_cqp_op *ooo_op_tmp;
+ struct irdma_ooo_cqp_op *ooo_op;
+ unsigned long flags;
+
+ info->scratch = 0;
+
+ spin_lock_irqsave(&cqp->ooo_list_lock, flags);
+ list_for_each_entry_safe(ooo_op, ooo_op_tmp, &cqp->ooo_pnd,
+ list_entry) {
+ if (!ooo_op->deferred && ooo_op->def_info == def_info) {
+ list_del(&ooo_op->list_entry);
+ info->scratch = ooo_op->scratch;
+ list_add(&ooo_op->list_entry, &cqp->ooo_avail);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&cqp->ooo_list_lock, flags);
+
+ if (!info->scratch)
+ ibdev_dbg(to_ibdev(cqp->dev),
+ "CQP: DEBUG_FW_OOO out-of-order completion with unknown def_info = 0x%x\n",
+ def_info);
+}
+
+/**
* irdma_sc_ccq_get_cqe_info - get ccq's cq entry
* @ccq: ccq sc struct
* @info: completion q entry to return
@@ -3345,6 +4060,10 @@ void irdma_sc_ccq_arm(struct irdma_sc_cq *ccq)
int irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
struct irdma_ccq_cqe_info *info)
{
+ u32 def_info;
+ bool def_cmpl = false;
+ bool pend_cmpl = false;
+ bool ooo_final_cmpl = false;
u64 qp_ctx, temp, temp1;
__le64 *cqe;
struct irdma_sc_cqp *cqp;
@@ -3352,6 +4071,7 @@ int irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
u32 error;
u8 polarity;
int ret_code = 0;
+ unsigned long flags;
if (ccq->cq_uk.avoid_mem_cflct)
cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(&ccq->cq_uk);
@@ -3383,6 +4103,25 @@ int irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
get_64bit_val(cqe, 16, &temp1);
info->op_ret_val = (u32)FIELD_GET(IRDMA_CCQ_OPRETVAL, temp1);
+ if (cqp->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3) {
+ def_cmpl = info->maj_err_code == IRDMA_CQPSQ_MAJ_NO_ERROR &&
+ info->min_err_code == IRDMA_CQPSQ_MIN_DEF_CMPL;
+ def_info = (u32)FIELD_GET(IRDMA_CCQ_DEFINFO, temp1);
+
+ pend_cmpl = info->maj_err_code == IRDMA_CQPSQ_MAJ_NO_ERROR &&
+ info->min_err_code == IRDMA_CQPSQ_MIN_OOO_CMPL;
+
+ ooo_final_cmpl = (bool)FIELD_GET(IRDMA_OOO_CMPL, temp);
+
+ if (def_cmpl || pend_cmpl || ooo_final_cmpl) {
+ if (ooo_final_cmpl)
+ irdma_sc_process_ooo_cmpl(cqp, info, def_info);
+ else
+ irdma_sc_process_def_cmpl(cqp, info, wqe_idx,
+ def_info, def_cmpl);
+ }
+ }
+
get_64bit_val(cqp->sq_base[wqe_idx].elem, 24, &temp1);
info->op_code = (u8)FIELD_GET(IRDMA_CQPSQ_OPCODE, temp1);
info->cqp = cqp;
@@ -3399,7 +4138,16 @@ int irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
dma_wmb(); /* make sure shadow area is updated before moving tail */
- IRDMA_RING_MOVE_TAIL(cqp->sq_ring);
+ spin_lock_irqsave(&cqp->dev->cqp_lock, flags);
+ if (!ooo_final_cmpl)
+ IRDMA_RING_MOVE_TAIL(cqp->sq_ring);
+ spin_unlock_irqrestore(&cqp->dev->cqp_lock, flags);
+
+ /* Do not increment completed_ops counter on pending or deferred
+ * completions.
+ */
+ if (pend_cmpl || def_cmpl)
+ return ret_code;
atomic64_inc(&cqp->completed_ops);
return ret_code;
@@ -3647,7 +4395,7 @@ int irdma_sc_ceq_init(struct irdma_sc_ceq *ceq,
ceq->pbl_list = (ceq->virtual_map ? info->pbl_list : NULL);
ceq->tph_en = info->tph_en;
ceq->tph_val = info->tph_val;
- ceq->vsi = info->vsi;
+ ceq->vsi_idx = info->vsi_idx;
ceq->polarity = 1;
IRDMA_RING_INIT(ceq->ceq_ring, ceq->elem_cnt);
ceq->dev->ceq[info->ceq_id] = ceq;
@@ -3680,13 +4428,16 @@ static int irdma_sc_ceq_create(struct irdma_sc_ceq *ceq, u64 scratch,
(ceq->virtual_map ? ceq->first_pm_pbl_idx : 0));
set_64bit_val(wqe, 56,
FIELD_PREP(IRDMA_CQPSQ_TPHVAL, ceq->tph_val) |
- FIELD_PREP(IRDMA_CQPSQ_VSIIDX, ceq->vsi->vsi_idx));
+ FIELD_PREP(IRDMA_CQPSQ_PASID, ceq->pasid) |
+ FIELD_PREP(IRDMA_CQPSQ_VSIIDX, ceq->vsi_idx));
hdr = FIELD_PREP(IRDMA_CQPSQ_CEQ_CEQID, ceq->ceq_id) |
+ FIELD_PREP(IRDMA_CQPSQ_CEQ_CEQID_HIGH, ceq->ceq_id >> 10) |
FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_CEQ) |
FIELD_PREP(IRDMA_CQPSQ_CEQ_LPBLSIZE, ceq->pbl_chunk_size) |
FIELD_PREP(IRDMA_CQPSQ_CEQ_VMAP, ceq->virtual_map) |
FIELD_PREP(IRDMA_CQPSQ_CEQ_ITRNOEXPIRE, ceq->itr_no_expire) |
FIELD_PREP(IRDMA_CQPSQ_TPHEN, ceq->tph_en) |
+ FIELD_PREP(IRDMA_CQPSQ_PASID_VALID, ceq->pasid_valid) |
FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
dma_wmb(); /* make sure WQE is written before valid bit is set */
@@ -3741,7 +4492,7 @@ int irdma_sc_cceq_create(struct irdma_sc_ceq *ceq, u64 scratch)
int ret_code;
struct irdma_sc_dev *dev = ceq->dev;
- dev->ccq->vsi = ceq->vsi;
+ dev->ccq->vsi_idx = ceq->vsi_idx;
if (ceq->reg_cq) {
ret_code = irdma_sc_add_cq_ctx(ceq, ceq->dev->ccq);
if (ret_code)
@@ -3774,11 +4525,14 @@ int irdma_sc_ceq_destroy(struct irdma_sc_ceq *ceq, u64 scratch, bool post_sq)
set_64bit_val(wqe, 16, ceq->elem_cnt);
set_64bit_val(wqe, 48, ceq->first_pm_pbl_idx);
+ set_64bit_val(wqe, 56,
+ FIELD_PREP(IRDMA_CQPSQ_PASID, ceq->pasid));
hdr = ceq->ceq_id |
FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_CEQ) |
FIELD_PREP(IRDMA_CQPSQ_CEQ_LPBLSIZE, ceq->pbl_chunk_size) |
FIELD_PREP(IRDMA_CQPSQ_CEQ_VMAP, ceq->virtual_map) |
FIELD_PREP(IRDMA_CQPSQ_TPHEN, ceq->tph_en) |
+ FIELD_PREP(IRDMA_CQPSQ_PASID_VALID, ceq->pasid_valid) |
FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
dma_wmb(); /* make sure WQE is written before valid bit is set */
@@ -3942,10 +4696,13 @@ static int irdma_sc_aeq_create(struct irdma_sc_aeq *aeq, u64 scratch,
(aeq->virtual_map ? 0 : aeq->aeq_elem_pa));
set_64bit_val(wqe, 48,
(aeq->virtual_map ? aeq->first_pm_pbl_idx : 0));
+ set_64bit_val(wqe, 56,
+ FIELD_PREP(IRDMA_CQPSQ_PASID, aeq->pasid));
hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_AEQ) |
FIELD_PREP(IRDMA_CQPSQ_AEQ_LPBLSIZE, aeq->pbl_chunk_size) |
FIELD_PREP(IRDMA_CQPSQ_AEQ_VMAP, aeq->virtual_map) |
+ FIELD_PREP(IRDMA_CQPSQ_PASID_VALID, aeq->pasid_valid) |
FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
dma_wmb(); /* make sure WQE is written before valid bit is set */
@@ -3974,7 +4731,8 @@ static int irdma_sc_aeq_destroy(struct irdma_sc_aeq *aeq, u64 scratch,
u64 hdr;
dev = aeq->dev;
- writel(0, dev->hw_regs[IRDMA_PFINT_AEQCTL]);
+ if (dev->privileged)
+ writel(0, dev->hw_regs[IRDMA_PFINT_AEQCTL]);
cqp = dev->cqp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
@@ -3982,9 +4740,12 @@ static int irdma_sc_aeq_destroy(struct irdma_sc_aeq *aeq, u64 scratch,
return -ENOMEM;
set_64bit_val(wqe, 16, aeq->elem_cnt);
set_64bit_val(wqe, 48, aeq->first_pm_pbl_idx);
+ set_64bit_val(wqe, 56,
+ FIELD_PREP(IRDMA_CQPSQ_PASID, aeq->pasid));
hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_AEQ) |
FIELD_PREP(IRDMA_CQPSQ_AEQ_LPBLSIZE, aeq->pbl_chunk_size) |
FIELD_PREP(IRDMA_CQPSQ_AEQ_VMAP, aeq->virtual_map) |
+ FIELD_PREP(IRDMA_CQPSQ_PASID_VALID, aeq->pasid_valid) |
FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
dma_wmb(); /* make sure WQE is written before valid bit is set */
@@ -4025,18 +4786,39 @@ int irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
print_hex_dump_debug("WQE: AEQ_ENTRY WQE", DUMP_PREFIX_OFFSET, 16, 8,
aeqe, 16, false);
- ae_src = (u8)FIELD_GET(IRDMA_AEQE_AESRC, temp);
- info->wqe_idx = (u16)FIELD_GET(IRDMA_AEQE_WQDESCIDX, temp);
- info->qp_cq_id = (u32)FIELD_GET(IRDMA_AEQE_QPCQID_LOW, temp) |
+ if (aeq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3) {
+ ae_src = (u8)FIELD_GET(IRDMA_AEQE_AESRC_GEN_3, temp);
+ info->wqe_idx = (u16)FIELD_GET(IRDMA_AEQE_WQDESCIDX_GEN_3,
+ temp);
+ info->qp_cq_id = (u32)FIELD_GET(IRDMA_AEQE_QPCQID_GEN_3, temp);
+ info->ae_id = (u16)FIELD_GET(IRDMA_AEQE_AECODE_GEN_3, temp);
+ info->tcp_state = (u8)FIELD_GET(IRDMA_AEQE_TCPSTATE_GEN_3, compl_ctx);
+ info->iwarp_state = (u8)FIELD_GET(IRDMA_AEQE_IWSTATE_GEN_3, temp);
+ info->q2_data_written = (u8)FIELD_GET(IRDMA_AEQE_Q2DATA_GEN_3, compl_ctx);
+ info->aeqe_overflow = (bool)FIELD_GET(IRDMA_AEQE_OVERFLOW_GEN_3, temp);
+ info->compl_ctx = FIELD_GET(IRDMA_AEQE_CMPL_CTXT, compl_ctx);
+ compl_ctx = FIELD_GET(IRDMA_AEQE_CMPL_CTXT, compl_ctx) << IRDMA_AEQE_CMPL_CTXT_S;
+ } else {
+ ae_src = (u8)FIELD_GET(IRDMA_AEQE_AESRC, temp);
+ info->wqe_idx = (u16)FIELD_GET(IRDMA_AEQE_WQDESCIDX, temp);
+ info->qp_cq_id = (u32)FIELD_GET(IRDMA_AEQE_QPCQID_LOW, temp) |
((u32)FIELD_GET(IRDMA_AEQE_QPCQID_HI, temp) << 18);
- info->ae_id = (u16)FIELD_GET(IRDMA_AEQE_AECODE, temp);
- info->tcp_state = (u8)FIELD_GET(IRDMA_AEQE_TCPSTATE, temp);
- info->iwarp_state = (u8)FIELD_GET(IRDMA_AEQE_IWSTATE, temp);
- info->q2_data_written = (u8)FIELD_GET(IRDMA_AEQE_Q2DATA, temp);
- info->aeqe_overflow = (bool)FIELD_GET(IRDMA_AEQE_OVERFLOW, temp);
+ info->ae_id = (u16)FIELD_GET(IRDMA_AEQE_AECODE, temp);
+ info->tcp_state = (u8)FIELD_GET(IRDMA_AEQE_TCPSTATE, temp);
+ info->iwarp_state = (u8)FIELD_GET(IRDMA_AEQE_IWSTATE, temp);
+ info->q2_data_written = (u8)FIELD_GET(IRDMA_AEQE_Q2DATA, temp);
+ info->aeqe_overflow = (bool)FIELD_GET(IRDMA_AEQE_OVERFLOW,
+ temp);
+ }
info->ae_src = ae_src;
switch (info->ae_id) {
+ case IRDMA_AE_SRQ_LIMIT:
+ info->srq = true;
+ /* [63:6] from CMPL_CTXT, [5:0] from WQDESCIDX. */
+ info->compl_ctx = compl_ctx;
+ ae_src = IRDMA_AE_SOURCE_RSVD;
+ break;
case IRDMA_AE_PRIV_OPERATION_DENIED:
case IRDMA_AE_AMP_INVALIDATE_TYPE1_MW:
case IRDMA_AE_AMP_MWBIND_ZERO_BASED_TYPE1_MW:
@@ -4069,6 +4851,10 @@ int irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
case IRDMA_AE_LLP_TOO_MANY_RETRIES:
+ case IRDMA_AE_LLP_TOO_MANY_RNRS:
+ case IRDMA_AE_REMOTE_QP_CATASTROPHIC:
+ case IRDMA_AE_LOCAL_QP_CATASTROPHIC:
+ case IRDMA_AE_RCE_QP_CATASTROPHIC:
case IRDMA_AE_LLP_DOUBT_REACHABILITY:
case IRDMA_AE_LLP_CONNECTION_ESTABLISHED:
case IRDMA_AE_RESET_SENT:
@@ -4085,6 +4871,10 @@ int irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
info->compl_ctx = compl_ctx << 1;
ae_src = IRDMA_AE_SOURCE_RSVD;
break;
+ case IRDMA_AE_CQP_DEFERRED_COMPLETE:
+ info->def_info = info->wqe_idx;
+ ae_src = IRDMA_AE_SOURCE_RSVD;
+ break;
case IRDMA_AE_ROCE_EMPTY_MCG:
case IRDMA_AE_ROCE_BAD_MC_IP_ADDR:
case IRDMA_AE_ROCE_BAD_MC_QPID:
@@ -4110,6 +4900,7 @@ int irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
info->qp = true;
info->rq = true;
info->compl_ctx = compl_ctx;
+ info->err_rq_idx_valid = true;
break;
case IRDMA_AE_SOURCE_CQ:
case IRDMA_AE_SOURCE_CQ_0110:
@@ -4125,8 +4916,18 @@ int irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
info->compl_ctx = compl_ctx;
break;
case IRDMA_AE_SOURCE_IN_RR_WR:
+ info->qp = true;
+ if (aeq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3)
+ info->err_rq_idx_valid = true;
+ info->compl_ctx = compl_ctx;
+ info->in_rdrsp_wr = true;
+ break;
case IRDMA_AE_SOURCE_IN_RR_WR_1011:
info->qp = true;
+ if (aeq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3) {
+ info->sq = true;
+ info->err_rq_idx_valid = true;
+ }
info->compl_ctx = compl_ctx;
info->in_rdrsp_wr = true;
break;
@@ -4336,6 +5137,26 @@ int irdma_sc_init_iw_hmc(struct irdma_sc_dev *dev, u8 hmc_fn_id)
}
/**
+ * irdma_set_loc_mem() - set a local memory bit field
+ * @buf: ptr to a buffer where local memory gets enabled
+ */
+static void irdma_set_loc_mem(__le64 *buf)
+{
+ u64 loc_mem_en = BIT_ULL(ENABLE_LOC_MEM);
+ u32 offset;
+ u64 temp;
+
+ for (offset = 0; offset < IRDMA_COMMIT_FPM_BUF_SIZE;
+ offset += sizeof(__le64)) {
+ if (offset == IRDMA_PBLE_COMMIT_OFFSET)
+ continue;
+ get_64bit_val(buf, offset, &temp);
+ if (temp)
+ set_64bit_val(buf, offset, temp | loc_mem_en);
+ }
+}
+
+/**
* irdma_sc_cfg_iw_fpm() - commits hmc obj cnt values using cqp
* command and populates fpm base address in hmc_info
* @dev : ptr to irdma_dev struct
@@ -4356,7 +5177,7 @@ static int irdma_sc_cfg_iw_fpm(struct irdma_sc_dev *dev, u8 hmc_fn_id)
set_64bit_val(buf, 0, (u64)obj_info[IRDMA_HMC_IW_QP].cnt);
set_64bit_val(buf, 8, (u64)obj_info[IRDMA_HMC_IW_CQ].cnt);
- set_64bit_val(buf, 16, (u64)0); /* RSRVD */
+ set_64bit_val(buf, 16, (u64)obj_info[IRDMA_HMC_IW_SRQ].cnt);
set_64bit_val(buf, 24, (u64)obj_info[IRDMA_HMC_IW_HTE].cnt);
set_64bit_val(buf, 32, (u64)obj_info[IRDMA_HMC_IW_ARP].cnt);
set_64bit_val(buf, 40, (u64)0); /* RSVD */
@@ -4383,7 +5204,9 @@ static int irdma_sc_cfg_iw_fpm(struct irdma_sc_dev *dev, u8 hmc_fn_id)
(u64)obj_info[IRDMA_HMC_IW_OOISC].cnt);
set_64bit_val(buf, 168,
(u64)obj_info[IRDMA_HMC_IW_OOISCFFL].cnt);
-
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3 &&
+ dev->hmc_fpm_misc.loc_mem_pages)
+ irdma_set_loc_mem(buf);
commit_fpm_mem.pa = dev->fpm_commit_buf_pa;
commit_fpm_mem.va = dev->fpm_commit_buf;
@@ -4592,6 +5415,7 @@ static bool irdma_cqp_ring_full(struct irdma_sc_cqp *cqp)
static u32 irdma_est_sd(struct irdma_sc_dev *dev,
struct irdma_hmc_info *hmc_info)
{
+ struct irdma_hmc_obj_info *pble_info;
int i;
u64 size = 0;
u64 sd;
@@ -4600,12 +5424,22 @@ static u32 irdma_est_sd(struct irdma_sc_dev *dev,
if (i != IRDMA_HMC_IW_PBLE)
size += round_up(hmc_info->hmc_obj[i].cnt *
hmc_info->hmc_obj[i].size, 512);
- size += round_up(hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt *
- hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].size, 512);
+
+ pble_info = &hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE];
+ if (dev->privileged)
+ size += round_up(pble_info->cnt * pble_info->size, 512);
if (size & 0x1FFFFF)
sd = (size >> 21) + 1; /* add 1 for remainder */
else
sd = size >> 21;
+ if (!dev->privileged && !dev->hmc_fpm_misc.loc_mem_pages) {
+ /* 2MB alignment for VF PBLE HMC */
+ size = pble_info->cnt * pble_info->size;
+ if (size & 0x1FFFFF)
+ sd += (size >> 21) + 1; /* add 1 for remainder */
+ else
+ sd += size >> 21;
+ }
if (sd > 0xFFFFFFFF) {
ibdev_dbg(to_ibdev(dev), "HMC: sd overflow[%lld]\n", sd);
sd = 0xFFFFFFFF - 1;
@@ -4615,17 +5449,6 @@ static u32 irdma_est_sd(struct irdma_sc_dev *dev,
}
/**
- * irdma_sc_query_rdma_features_done - poll cqp for query features done
- * @cqp: struct for cqp hw
- */
-static int irdma_sc_query_rdma_features_done(struct irdma_sc_cqp *cqp)
-{
- return irdma_sc_poll_for_cqp_op_done(cqp,
- IRDMA_CQP_OP_QUERY_RDMA_FEATURES,
- NULL);
-}
-
-/**
* irdma_sc_query_rdma_features - query RDMA features and FW ver
* @cqp: struct for cqp hw
* @buf: buffer to hold query info
@@ -4634,7 +5457,9 @@ static int irdma_sc_query_rdma_features_done(struct irdma_sc_cqp *cqp)
static int irdma_sc_query_rdma_features(struct irdma_sc_cqp *cqp,
struct irdma_dma_mem *buf, u64 scratch)
{
+ u32 tail, val, error;
__le64 *wqe;
+ int status;
u64 temp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
@@ -4654,9 +5479,15 @@ static int irdma_sc_query_rdma_features(struct irdma_sc_cqp *cqp,
print_hex_dump_debug("WQE: QUERY RDMA FEATURES", DUMP_PREFIX_OFFSET,
16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
+
irdma_sc_cqp_post_sq(cqp);
+ status = irdma_cqp_poll_registers(cqp, tail,
+ cqp->dev->hw_attrs.max_done_count);
+ if (error || status)
+ status = -EINVAL;
- return 0;
+ return status;
}
/**
@@ -4678,8 +5509,6 @@ int irdma_get_rdma_features(struct irdma_sc_dev *dev)
return -ENOMEM;
ret_code = irdma_sc_query_rdma_features(dev->cqp, &feat_buf, 0);
- if (!ret_code)
- ret_code = irdma_sc_query_rdma_features_done(dev->cqp);
if (ret_code)
goto exit;
@@ -4703,8 +5532,6 @@ int irdma_get_rdma_features(struct irdma_sc_dev *dev)
return -ENOMEM;
ret_code = irdma_sc_query_rdma_features(dev->cqp, &feat_buf, 0);
- if (!ret_code)
- ret_code = irdma_sc_query_rdma_features_done(dev->cqp);
if (ret_code)
goto exit;
@@ -4731,6 +5558,10 @@ int irdma_get_rdma_features(struct irdma_sc_dev *dev)
}
dev->feature_info[feat_type] = temp;
}
+
+ if (dev->feature_info[IRDMA_FTN_FLAGS] & IRDMA_ATOMICS_ALLOWED_BIT)
+ dev->hw_attrs.uk_attrs.feature_flags |= IRDMA_FEATURE_ATOMIC_OPS;
+
exit:
dma_free_coherent(dev->hw->device, feat_buf.size, feat_buf.va,
feat_buf.pa);
@@ -4786,22 +5617,354 @@ static void cfg_fpm_value_gen_2(struct irdma_sc_dev *dev,
}
/**
+ * irdma_get_rsrc_mem_config - configure resources if local memory or host
+ * @dev: sc device struct
+ * @is_mrte_loc_mem: if true, MR's to be in local memory because sd=loc pages
+ *
+ * Only mr can be configured host or local memory if qp's are in local memory.
+ * If qp is in local memory, then all resource object will be in local memory
+ * except mr which can be either host or local memory. The only exception
+ * is pble's which are always in host memory.
+ */
+static void irdma_get_rsrc_mem_config(struct irdma_sc_dev *dev, bool is_mrte_loc_mem)
+{
+ struct irdma_hmc_info *hmc_info = dev->hmc_info;
+ int i;
+
+ for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++)
+ hmc_info->hmc_obj[i].mem_loc = IRDMA_LOC_MEM;
+
+ if (dev->feature_info[IRDMA_OBJ_1] && !is_mrte_loc_mem) {
+ u8 mem_type;
+
+ mem_type = (u8)FIELD_GET(IRDMA_MR_MEM_LOC, dev->feature_info[IRDMA_OBJ_1]);
+
+ hmc_info->hmc_obj[IRDMA_HMC_IW_MR].mem_loc =
+ (mem_type & IRDMA_OBJ_LOC_MEM_BIT) ?
+ IRDMA_LOC_MEM : IRDMA_HOST_MEM;
+ } else {
+ hmc_info->hmc_obj[IRDMA_HMC_IW_MR].mem_loc = IRDMA_LOC_MEM;
+ }
+
+ hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].mem_loc = IRDMA_HOST_MEM;
+
+ ibdev_dbg(to_ibdev(dev), "HMC: INFO: mrte_mem_loc = %d pble = %d\n",
+ hmc_info->hmc_obj[IRDMA_HMC_IW_MR].mem_loc,
+ hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].mem_loc);
+}
+
+/**
+ * irdma_cfg_sd_mem - allocate sd memory
+ * @dev: sc device struct
+ * @hmc_info: ptr to irdma_hmc_obj_info struct
+ */
+static int irdma_cfg_sd_mem(struct irdma_sc_dev *dev,
+ struct irdma_hmc_info *hmc_info)
+{
+ struct irdma_virt_mem virt_mem;
+ u32 mem_size;
+
+ mem_size = sizeof(struct irdma_hmc_sd_entry) * hmc_info->sd_table.sd_cnt;
+ virt_mem.size = mem_size;
+ virt_mem.va = kzalloc(virt_mem.size, GFP_KERNEL);
+ if (!virt_mem.va)
+ return -ENOMEM;
+ hmc_info->sd_table.sd_entry = virt_mem.va;
+
+ return 0;
+}
+
+/**
+ * irdma_get_objs_pages - get number of 2M pages needed
+ * @dev: sc device struct
+ * @hmc_info: pointer to the HMC configuration information struct
+ * @mem_loc: pages for local or host memory
+ */
+static u32 irdma_get_objs_pages(struct irdma_sc_dev *dev,
+ struct irdma_hmc_info *hmc_info,
+ enum irdma_hmc_obj_mem mem_loc)
+{
+ u64 size = 0;
+ int i;
+
+ for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++) {
+ if (hmc_info->hmc_obj[i].mem_loc == mem_loc) {
+ size += round_up(hmc_info->hmc_obj[i].cnt *
+ hmc_info->hmc_obj[i].size, 512);
+ }
+ }
+
+ return DIV_ROUND_UP(size, IRDMA_HMC_PAGE_SIZE);
+}
+
+/**
+ * irdma_set_host_hmc_rsrc_gen_3 - calculate host hmc resources for gen 3
+ * @dev: sc device struct
+ */
+static void irdma_set_host_hmc_rsrc_gen_3(struct irdma_sc_dev *dev)
+{
+ struct irdma_hmc_fpm_misc *hmc_fpm_misc;
+ struct irdma_hmc_info *hmc_info;
+ enum irdma_hmc_obj_mem mrte_loc;
+ u32 mrwanted, pblewanted;
+ u32 avail_sds, mr_sds;
+
+ hmc_info = dev->hmc_info;
+ hmc_fpm_misc = &dev->hmc_fpm_misc;
+ avail_sds = hmc_fpm_misc->max_sds;
+ mrte_loc = hmc_info->hmc_obj[IRDMA_HMC_IW_MR].mem_loc;
+ mrwanted = hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt;
+ pblewanted = hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].max_cnt;
+
+ if (mrte_loc == IRDMA_HOST_MEM && avail_sds > IRDMA_MIN_PBLE_PAGES) {
+ mr_sds = avail_sds - IRDMA_MIN_PBLE_PAGES;
+ mrwanted = min(mrwanted, mr_sds * MAX_MR_PER_SD);
+ hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt = mrwanted;
+ avail_sds -= DIV_ROUND_UP(mrwanted, MAX_MR_PER_SD);
+ }
+
+ if (FIELD_GET(IRDMA_MANAGE_RSRC_VER2, dev->feature_info[IRDMA_FTN_FLAGS]) &&
+ pblewanted > avail_sds * MAX_PBLE_PER_SD)
+ ibdev_dbg(to_ibdev(dev),
+ "HMC: Warn: Resource version 2: pble wanted = 0x%x available = 0x%x\n",
+ pblewanted, avail_sds * MAX_PBLE_PER_SD);
+
+ pblewanted = min(pblewanted, avail_sds * MAX_PBLE_PER_SD);
+ hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt = pblewanted;
+}
+
+/**
+ * irdma_verify_commit_fpm_gen_3 - verify query fpm values
+ * @dev: sc device struct
+ * @max_pages: max local memory available
+ * @qpwanted: number of qp's wanted
+ */
+static int irdma_verify_commit_fpm_gen_3(struct irdma_sc_dev *dev,
+ u32 max_pages,
+ u32 qpwanted)
+{
+ struct irdma_hmc_fpm_misc *hmc_fpm_misc;
+ u32 rrf_cnt, xf_cnt, timer_cnt, pages_needed;
+ struct irdma_hmc_info *hmc_info;
+ u32 rrffl_cnt = 0;
+ u32 xffl_cnt = 0;
+ u32 q1fl_cnt;
+
+ hmc_info = dev->hmc_info;
+ hmc_fpm_misc = &dev->hmc_fpm_misc;
+
+ rrf_cnt = roundup_pow_of_two(IRDMA_RRF_MULTIPLIER * qpwanted);
+
+ if (hmc_info->hmc_obj[IRDMA_HMC_IW_RRFFL].max_cnt)
+ rrffl_cnt =
+ hmc_info->hmc_obj[IRDMA_HMC_IW_RRF].cnt /
+ hmc_fpm_misc->rrf_block_size;
+
+ xf_cnt = roundup_pow_of_two(IRDMA_XF_MULTIPLIER * qpwanted);
+
+ if (xf_cnt)
+ xffl_cnt = xf_cnt / hmc_fpm_misc->xf_block_size;
+
+ timer_cnt = (round_up(qpwanted, 512) / 512 + 1) *
+ hmc_fpm_misc->timer_bucket;
+
+ q1fl_cnt = hmc_info->hmc_obj[IRDMA_HMC_IW_Q1].cnt / hmc_fpm_misc->q1_block_size;
+
+ pages_needed = irdma_get_objs_pages(dev, hmc_info, IRDMA_LOC_MEM);
+ if (pages_needed > max_pages) {
+ ibdev_dbg(to_ibdev(dev),
+ "HMC: FAIL: SW counts rrf_cnt = %u rrffl_cnt = %u timer_cnt = %u",
+ rrf_cnt, rrffl_cnt, timer_cnt);
+ ibdev_dbg(to_ibdev(dev),
+ "HMC: FAIL: SW counts xf_cnt = %u xffl_cnt = %u q1fl_cnt = %u",
+ xf_cnt, xffl_cnt, q1fl_cnt);
+
+ return -EINVAL;
+ }
+
+ hmc_fpm_misc->max_sds -= pages_needed;
+ hmc_fpm_misc->loc_mem_pages -= pages_needed;
+
+ return 0;
+}
+
+/**
+ * irdma_set_loc_hmc_rsrc_gen_3 - calculate hmc resources for gen 3
+ * @dev: sc device struct
+ * @max_pages: max local memory available
+ * @qpwanted: number of qp's wanted
+ */
+static int irdma_set_loc_hmc_rsrc_gen_3(struct irdma_sc_dev *dev,
+ u32 max_pages,
+ u32 qpwanted)
+{
+ struct irdma_hmc_fpm_misc *hmc_fpm_misc;
+ u32 rrf_cnt, xf_cnt, timer_cnt, pages_needed;
+ struct irdma_hmc_info *hmc_info;
+ u32 ird, ord;
+
+ if (FIELD_GET(IRDMA_MANAGE_RSRC_VER2, dev->feature_info[IRDMA_FTN_FLAGS]))
+ return irdma_verify_commit_fpm_gen_3(dev, max_pages, qpwanted);
+
+ hmc_info = dev->hmc_info;
+ hmc_fpm_misc = &dev->hmc_fpm_misc;
+ ird = dev->hw_attrs.max_hw_ird;
+ ord = dev->hw_attrs.max_hw_ord;
+
+ hmc_info->hmc_obj[IRDMA_HMC_IW_HDR].cnt = qpwanted;
+ hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt = qpwanted;
+
+ hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt =
+ min(hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt, qpwanted * 2);
+
+ hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt =
+ min(qpwanted * 8, hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].max_cnt);
+
+ rrf_cnt = roundup_pow_of_two(IRDMA_RRF_MULTIPLIER * qpwanted);
+ hmc_info->hmc_obj[IRDMA_HMC_IW_RRF].cnt =
+ min(hmc_info->hmc_obj[IRDMA_HMC_IW_RRF].max_cnt, rrf_cnt);
+
+ if (hmc_info->hmc_obj[IRDMA_HMC_IW_RRFFL].max_cnt)
+ hmc_info->hmc_obj[IRDMA_HMC_IW_RRFFL].cnt =
+ hmc_info->hmc_obj[IRDMA_HMC_IW_RRF].cnt /
+ hmc_fpm_misc->rrf_block_size;
+
+ xf_cnt = roundup_pow_of_two(IRDMA_XF_MULTIPLIER * qpwanted);
+ hmc_info->hmc_obj[IRDMA_HMC_IW_XF].cnt =
+ min(hmc_info->hmc_obj[IRDMA_HMC_IW_XF].max_cnt, xf_cnt);
+ hmc_info->hmc_obj[IRDMA_HMC_IW_XFFL].cnt =
+ xf_cnt / hmc_fpm_misc->xf_block_size;
+
+ timer_cnt = (round_up(qpwanted, 512) / 512 + 1) *
+ hmc_fpm_misc->timer_bucket;
+ hmc_info->hmc_obj[IRDMA_HMC_IW_TIMER].cnt =
+ min(timer_cnt, hmc_info->hmc_obj[IRDMA_HMC_IW_TIMER].cnt);
+
+ do {
+ hmc_info->hmc_obj[IRDMA_HMC_IW_Q1].cnt = roundup_pow_of_two(ird * 2 * qpwanted);
+ hmc_info->hmc_obj[IRDMA_HMC_IW_Q1FL].cnt =
+ hmc_info->hmc_obj[IRDMA_HMC_IW_Q1].cnt / hmc_fpm_misc->q1_block_size;
+
+ pages_needed = irdma_get_objs_pages(dev, hmc_info, IRDMA_LOC_MEM);
+ if (pages_needed <= max_pages)
+ break;
+
+ ird /= 2;
+ ord /= 2;
+ } while (ird >= IRDMA_MIN_IRD);
+
+ if (ird < IRDMA_MIN_IRD) {
+ ibdev_dbg(to_ibdev(dev), "HMC: FAIL: IRD=%u Q1 CNT = %u\n",
+ ird, hmc_info->hmc_obj[IRDMA_HMC_IW_Q1].cnt);
+ return -EINVAL;
+ }
+
+ dev->hw_attrs.max_hw_ird = ird;
+ dev->hw_attrs.max_hw_ord = ord;
+ hmc_fpm_misc->max_sds -= pages_needed;
+
+ return 0;
+}
+
+/**
+ * cfg_fpm_value_gen_3 - configure fpm for gen 3
+ * @dev: sc device struct
+ * @hmc_info: ptr to irdma_hmc_obj_info struct
+ * @hmc_fpm_misc: ptr to fpm data
+ */
+static int cfg_fpm_value_gen_3(struct irdma_sc_dev *dev,
+ struct irdma_hmc_info *hmc_info,
+ struct irdma_hmc_fpm_misc *hmc_fpm_misc)
+{
+ enum irdma_hmc_obj_mem mrte_loc;
+ u32 mrwanted, qpwanted;
+ int i, ret_code = 0;
+ u32 loc_mem_pages;
+ bool is_mrte_loc_mem;
+
+ loc_mem_pages = hmc_fpm_misc->loc_mem_pages;
+ is_mrte_loc_mem = hmc_fpm_misc->loc_mem_pages == hmc_fpm_misc->max_sds ?
+ true : false;
+
+ irdma_get_rsrc_mem_config(dev, is_mrte_loc_mem);
+ mrte_loc = hmc_info->hmc_obj[IRDMA_HMC_IW_MR].mem_loc;
+
+ if (is_mrte_loc_mem)
+ loc_mem_pages -= IRDMA_MIN_PBLE_PAGES;
+
+ ibdev_dbg(to_ibdev(dev),
+ "HMC: mrte_loc %d loc_mem %u fpm max sds %u host_obj %d\n",
+ hmc_info->hmc_obj[IRDMA_HMC_IW_MR].mem_loc,
+ hmc_fpm_misc->loc_mem_pages, hmc_fpm_misc->max_sds,
+ is_mrte_loc_mem);
+
+ mrwanted = hmc_info->hmc_obj[IRDMA_HMC_IW_MR].max_cnt;
+ qpwanted = hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt;
+ hmc_info->hmc_obj[IRDMA_HMC_IW_HDR].cnt = qpwanted;
+
+ hmc_info->hmc_obj[IRDMA_HMC_IW_OOISC].max_cnt = 0;
+ hmc_info->hmc_obj[IRDMA_HMC_IW_OOISCFFL].max_cnt = 0;
+ hmc_info->hmc_obj[IRDMA_HMC_IW_HTE].max_cnt = 0;
+ hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].max_cnt = 0;
+
+ if (!FIELD_GET(IRDMA_MANAGE_RSRC_VER2, dev->feature_info[IRDMA_FTN_FLAGS]))
+ hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].max_cnt =
+ min(hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].max_cnt,
+ (u32)IRDMA_FSIAV_CNT_MAX);
+
+ for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++)
+ hmc_info->hmc_obj[i].cnt = hmc_info->hmc_obj[i].max_cnt;
+
+ while (qpwanted >= IRDMA_MIN_QP_CNT) {
+ if (!irdma_set_loc_hmc_rsrc_gen_3(dev, loc_mem_pages, qpwanted))
+ break;
+
+ if (FIELD_GET(IRDMA_MANAGE_RSRC_VER2, dev->feature_info[IRDMA_FTN_FLAGS]))
+ return -EINVAL;
+
+ qpwanted /= 2;
+ if (mrte_loc == IRDMA_LOC_MEM) {
+ mrwanted = qpwanted * IRDMA_MIN_MR_PER_QP;
+ hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt =
+ min(hmc_info->hmc_obj[IRDMA_HMC_IW_MR].max_cnt, mrwanted);
+ }
+ }
+
+ if (qpwanted < IRDMA_MIN_QP_CNT) {
+ ibdev_dbg(to_ibdev(dev),
+ "HMC: ERROR: could not allocate fpm resources\n");
+ return -EINVAL;
+ }
+
+ irdma_set_host_hmc_rsrc_gen_3(dev);
+ ret_code = irdma_sc_cfg_iw_fpm(dev, dev->hmc_fn_id);
+ if (ret_code) {
+ ibdev_dbg(to_ibdev(dev),
+ "HMC: cfg_iw_fpm returned error_code[x%08X]\n",
+ readl(dev->hw_regs[IRDMA_CQPERRCODES]));
+
+ return ret_code;
+ }
+
+ return irdma_cfg_sd_mem(dev, hmc_info);
+}
+
+/**
* irdma_cfg_fpm_val - configure HMC objects
* @dev: sc device struct
* @qp_count: desired qp count
*/
int irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count)
{
- struct irdma_virt_mem virt_mem;
- u32 i, mem_size;
u32 qpwanted, mrwanted, pblewanted;
- u32 powerof2, hte;
+ u32 powerof2, hte, i;
u32 sd_needed;
u32 sd_diff;
u32 loop_count = 0;
struct irdma_hmc_info *hmc_info;
struct irdma_hmc_fpm_misc *hmc_fpm_misc;
int ret_code = 0;
+ u32 max_sds;
hmc_info = dev->hmc_info;
hmc_fpm_misc = &dev->hmc_fpm_misc;
@@ -4814,14 +5977,16 @@ int irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count)
return ret_code;
}
+ max_sds = hmc_fpm_misc->max_sds;
+
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3)
+ return cfg_fpm_value_gen_3(dev, hmc_info, hmc_fpm_misc);
+
for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++)
hmc_info->hmc_obj[i].cnt = hmc_info->hmc_obj[i].max_cnt;
sd_needed = irdma_est_sd(dev, hmc_info);
- ibdev_dbg(to_ibdev(dev),
- "HMC: FW max resources sd_needed[%08d] first_sd_index[%04d]\n",
- sd_needed, hmc_info->first_sd_index);
- ibdev_dbg(to_ibdev(dev), "HMC: sd count %d where max sd is %d\n",
- hmc_info->sd_table.sd_cnt, hmc_fpm_misc->max_sds);
+ ibdev_dbg(to_ibdev(dev), "HMC: sd count %u where max sd is %u\n",
+ hmc_info->sd_table.sd_cnt, max_sds);
qpwanted = min(qp_count, hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt);
@@ -4835,21 +6000,21 @@ int irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count)
pblewanted = hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].max_cnt;
ibdev_dbg(to_ibdev(dev),
- "HMC: req_qp=%d max_sd=%d, max_qp = %d, max_cq=%d, max_mr=%d, max_pble=%d, mc=%d, av=%d\n",
- qp_count, hmc_fpm_misc->max_sds,
+ "HMC: req_qp=%d max_sd=%u, max_qp = %u, max_cq=%u, max_mr=%u, max_pble=%u, mc=%d, av=%u\n",
+ qp_count, max_sds,
hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt,
hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].max_cnt,
hmc_info->hmc_obj[IRDMA_HMC_IW_MR].max_cnt,
hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].max_cnt,
hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].max_cnt,
hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].max_cnt);
+
hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt =
hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].max_cnt;
hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt =
hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].max_cnt;
hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].cnt =
hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].max_cnt;
-
hmc_info->hmc_obj[IRDMA_HMC_IW_APBVT_ENTRY].cnt = 1;
while (irdma_q1_cnt(dev, hmc_info, qpwanted) > hmc_info->hmc_obj[IRDMA_HMC_IW_Q1].max_cnt)
@@ -4860,7 +6025,7 @@ int irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count)
hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt = qpwanted;
hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt =
min(2 * qpwanted, hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt);
- hmc_info->hmc_obj[IRDMA_HMC_IW_RESERVED].cnt = 0; /* Reserved */
+ hmc_info->hmc_obj[IRDMA_HMC_IW_SRQ].cnt = 0; /* Reserved */
hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt = mrwanted;
hte = round_up(qpwanted + hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt, 512);
@@ -4898,11 +6063,12 @@ int irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count)
if (!(loop_count % 2) && qpwanted > 128) {
qpwanted /= 2;
} else {
- mrwanted /= 2;
pblewanted /= 2;
+ mrwanted /= 2;
}
continue;
}
+
if (dev->cqp->hmc_profile != IRDMA_HMC_PROFILE_FAVOR_VF &&
pblewanted > (512 * FPM_MULTIPLIER * sd_diff)) {
pblewanted -= 256 * FPM_MULTIPLIER * sd_diff;
@@ -4928,14 +6094,13 @@ int irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count)
if (sd_needed > hmc_fpm_misc->max_sds) {
ibdev_dbg(to_ibdev(dev),
- "HMC: cfg_fpm failed loop_cnt=%d, sd_needed=%d, max sd count %d\n",
+ "HMC: cfg_fpm failed loop_cnt=%u, sd_needed=%u, max sd count %u\n",
loop_count, sd_needed, hmc_info->sd_table.sd_cnt);
return -EINVAL;
}
- if (loop_count > 1 && sd_needed < hmc_fpm_misc->max_sds) {
- pblewanted += (hmc_fpm_misc->max_sds - sd_needed) * 256 *
- FPM_MULTIPLIER;
+ if (loop_count > 1 && sd_needed < max_sds) {
+ pblewanted += (max_sds - sd_needed) * 256 * FPM_MULTIPLIER;
hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt = pblewanted;
sd_needed = irdma_est_sd(dev, hmc_info);
}
@@ -4959,18 +6124,7 @@ int irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count)
return ret_code;
}
- mem_size = sizeof(struct irdma_hmc_sd_entry) *
- (hmc_info->sd_table.sd_cnt + hmc_info->first_sd_index + 1);
- virt_mem.size = mem_size;
- virt_mem.va = kzalloc(virt_mem.size, GFP_KERNEL);
- if (!virt_mem.va) {
- ibdev_dbg(to_ibdev(dev),
- "HMC: failed to allocate memory for sd_entry buffer\n");
- return -ENOMEM;
- }
- hmc_info->sd_table.sd_entry = virt_mem.va;
-
- return ret_code;
+ return irdma_cfg_sd_mem(dev, hmc_info);
}
/**
@@ -5242,6 +6396,22 @@ static int irdma_exec_cqp_cmd(struct irdma_sc_dev *dev,
&pcmdinfo->in.u.mc_modify.info,
pcmdinfo->in.u.mc_modify.scratch);
break;
+ case IRDMA_OP_SRQ_CREATE:
+ status = irdma_sc_srq_create(pcmdinfo->in.u.srq_create.srq,
+ pcmdinfo->in.u.srq_create.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_SRQ_MODIFY:
+ status = irdma_sc_srq_modify(pcmdinfo->in.u.srq_modify.srq,
+ &pcmdinfo->in.u.srq_modify.info,
+ pcmdinfo->in.u.srq_modify.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_SRQ_DESTROY:
+ status = irdma_sc_srq_destroy(pcmdinfo->in.u.srq_destroy.srq,
+ pcmdinfo->in.u.srq_destroy.scratch,
+ pcmdinfo->post_sq);
+ break;
default:
status = -EOPNOTSUPP;
break;
@@ -5314,14 +6484,26 @@ void irdma_cfg_aeq(struct irdma_sc_dev *dev, u32 idx, bool enable)
*/
void sc_vsi_update_stats(struct irdma_sc_vsi *vsi)
{
- struct irdma_gather_stats *gather_stats;
- struct irdma_gather_stats *last_gather_stats;
+ struct irdma_dev_hw_stats *hw_stats = &vsi->pestat->hw_stats;
+ struct irdma_gather_stats *gather_stats =
+ vsi->pestat->gather_info.gather_stats_va;
+ struct irdma_gather_stats *last_gather_stats =
+ vsi->pestat->gather_info.last_gather_stats_va;
+ const struct irdma_hw_stat_map *map = vsi->dev->hw_stats_map;
+ u16 max_stat_idx = vsi->dev->hw_attrs.max_stat_idx;
+ u16 i;
+
+ if (vsi->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3) {
+ for (i = 0; i < max_stat_idx; i++) {
+ u16 idx = map[i].byteoff / sizeof(u64);
+
+ hw_stats->stats_val[i] = gather_stats->val[idx];
+ }
+ return;
+ }
- gather_stats = vsi->pestat->gather_info.gather_stats_va;
- last_gather_stats = vsi->pestat->gather_info.last_gather_stats_va;
- irdma_update_stats(&vsi->pestat->hw_stats, gather_stats,
- last_gather_stats, vsi->dev->hw_stats_map,
- vsi->dev->hw_attrs.max_stat_idx);
+ irdma_update_stats(hw_stats, gather_stats, last_gather_stats,
+ map, max_stat_idx);
}
/**
@@ -5356,6 +6538,9 @@ static inline void irdma_sc_init_hw(struct irdma_sc_dev *dev)
case IRDMA_GEN_2:
icrdma_init_hw(dev);
break;
+ case IRDMA_GEN_3:
+ ig3rdma_init_hw(dev);
+ break;
}
}
@@ -5381,10 +6566,15 @@ int irdma_sc_dev_init(enum irdma_vers ver, struct irdma_sc_dev *dev,
dev->fpm_commit_buf = info->fpm_commit_buf;
dev->hw = info->hw;
dev->hw->hw_addr = info->bar0;
+ dev->protocol_used = info->protocol_used;
/* Setup the hardware limits, hmc may limit further */
dev->hw_attrs.min_hw_qp_id = IRDMA_MIN_IW_QP_ID;
+ dev->hw_attrs.min_hw_srq_id = IRDMA_MIN_IW_SRQ_ID;
dev->hw_attrs.min_hw_aeq_size = IRDMA_MIN_AEQ_ENTRIES;
- dev->hw_attrs.max_hw_aeq_size = IRDMA_MAX_AEQ_ENTRIES;
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3)
+ dev->hw_attrs.max_hw_aeq_size = IRDMA_MAX_AEQ_ENTRIES_GEN_3;
+ else
+ dev->hw_attrs.max_hw_aeq_size = IRDMA_MAX_AEQ_ENTRIES;
dev->hw_attrs.min_hw_ceq_size = IRDMA_MIN_CEQ_ENTRIES;
dev->hw_attrs.max_hw_ceq_size = IRDMA_MAX_CEQ_ENTRIES;
dev->hw_attrs.uk_attrs.min_hw_cq_size = IRDMA_MIN_CQ_SIZE;
@@ -5409,21 +6599,39 @@ int irdma_sc_dev_init(enum irdma_vers ver, struct irdma_sc_dev *dev,
dev->hw_attrs.max_sleep_count = IRDMA_SLEEP_COUNT;
dev->hw_attrs.max_cqp_compl_wait_time_ms = CQP_COMPL_WAIT_TIME_MS;
- dev->hw_attrs.uk_attrs.hw_rev = ver;
+ if (!dev->privileged) {
+ ret_code = irdma_vchnl_req_get_hmc_fcn(dev);
+ if (ret_code) {
+ ibdev_dbg(to_ibdev(dev),
+ "DEV: Get HMC function ret = %d\n",
+ ret_code);
+
+ return ret_code;
+ }
+ }
+
irdma_sc_init_hw(dev);
- if (irdma_wait_pe_ready(dev))
- return -ETIMEDOUT;
+ if (dev->privileged) {
+ if (irdma_wait_pe_ready(dev))
+ return -ETIMEDOUT;
- val = readl(dev->hw_regs[IRDMA_GLPCI_LBARCTRL]);
- db_size = (u8)FIELD_GET(IRDMA_GLPCI_LBARCTRL_PE_DB_SIZE, val);
- if (db_size != IRDMA_PE_DB_SIZE_4M && db_size != IRDMA_PE_DB_SIZE_8M) {
- ibdev_dbg(to_ibdev(dev),
- "DEV: RDMA PE doorbell is not enabled in CSR val 0x%x db_size=%d\n",
- val, db_size);
- return -ENODEV;
+ val = readl(dev->hw_regs[IRDMA_GLPCI_LBARCTRL]);
+ db_size = (u8)FIELD_GET(IRDMA_GLPCI_LBARCTRL_PE_DB_SIZE, val);
+ if (db_size != IRDMA_PE_DB_SIZE_4M &&
+ db_size != IRDMA_PE_DB_SIZE_8M) {
+ ibdev_dbg(to_ibdev(dev),
+ "DEV: RDMA PE doorbell is not enabled in CSR val 0x%x db_size=%d\n",
+ val, db_size);
+ return -ENODEV;
+ }
+ } else {
+ ret_code = irdma_vchnl_req_get_reg_layout(dev);
+ if (ret_code)
+ ibdev_dbg(to_ibdev(dev),
+ "DEV: Get Register layout failed ret = %d\n",
+ ret_code);
}
- dev->db_addr = dev->hw->hw_addr + (uintptr_t)dev->hw_regs[IRDMA_DB_ADDR_OFFSET];
return ret_code;
}
diff --git a/drivers/infiniband/hw/irdma/defs.h b/drivers/infiniband/hw/irdma/defs.h
index 2cb4b96db721..983b22d7ae23 100644
--- a/drivers/infiniband/hw/irdma/defs.h
+++ b/drivers/infiniband/hw/irdma/defs.h
@@ -14,6 +14,18 @@
#define IRDMA_PE_DB_SIZE_4M 1
#define IRDMA_PE_DB_SIZE_8M 2
+#define IRDMA_IRD_HW_SIZE_4_GEN3 0
+#define IRDMA_IRD_HW_SIZE_8_GEN3 1
+#define IRDMA_IRD_HW_SIZE_16_GEN3 2
+#define IRDMA_IRD_HW_SIZE_32_GEN3 3
+#define IRDMA_IRD_HW_SIZE_64_GEN3 4
+#define IRDMA_IRD_HW_SIZE_128_GEN3 5
+#define IRDMA_IRD_HW_SIZE_256_GEN3 6
+#define IRDMA_IRD_HW_SIZE_512_GEN3 7
+#define IRDMA_IRD_HW_SIZE_1024_GEN3 8
+#define IRDMA_IRD_HW_SIZE_2048_GEN3 9
+#define IRDMA_IRD_HW_SIZE_4096_GEN3 10
+
#define IRDMA_IRD_HW_SIZE_4 0
#define IRDMA_IRD_HW_SIZE_16 1
#define IRDMA_IRD_HW_SIZE_64 2
@@ -114,6 +126,13 @@ enum irdma_protocol_used {
#define IRDMA_UPDATE_SD_BUFF_SIZE 128
#define IRDMA_FEATURE_BUF_SIZE (8 * IRDMA_MAX_FEATURES)
+#define ENABLE_LOC_MEM 63
+#define IRDMA_ATOMICS_ALLOWED_BIT 1
+#define MAX_PBLE_PER_SD 0x40000
+#define MAX_PBLE_SD_PER_FCN 0x400
+#define MAX_MR_PER_SD 0x8000
+#define MAX_MR_SD_PER_FCN 0x80
+#define IRDMA_PBLE_COMMIT_OFFSET 112
#define IRDMA_MAX_QUANTA_PER_WR 8
#define IRDMA_QP_SW_MAX_WQ_QUANTA 32768
@@ -121,6 +140,10 @@ enum irdma_protocol_used {
#define IRDMA_QP_SW_MAX_RQ_QUANTA 32768
#define IRDMA_MAX_QP_WRS(max_quanta_per_wr) \
((IRDMA_QP_SW_MAX_WQ_QUANTA - IRDMA_SQ_RSVD) / (max_quanta_per_wr))
+#define IRDMA_SRQ_MIN_QUANTA 8
+#define IRDMA_SRQ_MAX_QUANTA 262144
+#define IRDMA_MAX_SRQ_WRS \
+ ((IRDMA_SRQ_MAX_QUANTA - IRDMA_RQ_RSVD) / IRDMA_MAX_QUANTA_PER_WR)
#define IRDMAQP_TERM_SEND_TERM_AND_FIN 0
#define IRDMAQP_TERM_SEND_TERM_ONLY 1
@@ -147,8 +170,13 @@ enum irdma_protocol_used {
#define IRDMA_SQ_RSVD 258
#define IRDMA_RQ_RSVD 1
-#define IRDMA_FEATURE_RTS_AE 1ULL
-#define IRDMA_FEATURE_CQ_RESIZE 2ULL
+#define IRDMA_FEATURE_RTS_AE BIT_ULL(0)
+#define IRDMA_FEATURE_CQ_RESIZE BIT_ULL(1)
+#define IRDMA_FEATURE_64_BYTE_CQE BIT_ULL(5)
+#define IRDMA_FEATURE_ATOMIC_OPS BIT_ULL(6)
+#define IRDMA_FEATURE_SRQ BIT_ULL(7)
+#define IRDMA_FEATURE_CQE_TIMESTAMPING BIT_ULL(8)
+
#define IRDMAQP_OP_RDMA_WRITE 0x00
#define IRDMAQP_OP_RDMA_READ 0x01
#define IRDMAQP_OP_RDMA_SEND 0x03
@@ -161,6 +189,8 @@ enum irdma_protocol_used {
#define IRDMAQP_OP_RDMA_READ_LOC_INV 0x0b
#define IRDMAQP_OP_NOP 0x0c
#define IRDMAQP_OP_RDMA_WRITE_SOL 0x0d
+#define IRDMAQP_OP_ATOMIC_FETCH_ADD 0x0f
+#define IRDMAQP_OP_ATOMIC_COMPARE_SWAP_ADD 0x11
#define IRDMAQP_OP_GEN_RTS_AE 0x30
enum irdma_cqp_op_type {
@@ -212,9 +242,12 @@ enum irdma_cqp_op_type {
IRDMA_OP_ADD_LOCAL_MAC_ENTRY = 46,
IRDMA_OP_DELETE_LOCAL_MAC_ENTRY = 47,
IRDMA_OP_CQ_MODIFY = 48,
+ IRDMA_OP_SRQ_CREATE = 49,
+ IRDMA_OP_SRQ_MODIFY = 50,
+ IRDMA_OP_SRQ_DESTROY = 51,
/* Must be last entry*/
- IRDMA_MAX_CQP_OPS = 49,
+ IRDMA_MAX_CQP_OPS = 52,
};
/* CQP SQ WQES */
@@ -224,6 +257,9 @@ enum irdma_cqp_op_type {
#define IRDMA_CQP_OP_CREATE_CQ 0x03
#define IRDMA_CQP_OP_MODIFY_CQ 0x04
#define IRDMA_CQP_OP_DESTROY_CQ 0x05
+#define IRDMA_CQP_OP_CREATE_SRQ 0x06
+#define IRDMA_CQP_OP_MODIFY_SRQ 0x07
+#define IRDMA_CQP_OP_DESTROY_SRQ 0x08
#define IRDMA_CQP_OP_ALLOC_STAG 0x09
#define IRDMA_CQP_OP_REG_MR 0x0a
#define IRDMA_CQP_OP_QUERY_STAG 0x0b
@@ -265,97 +301,6 @@ enum irdma_cqp_op_type {
#define IRDMA_CQP_OP_GATHER_STATS 0x2e
#define IRDMA_CQP_OP_UP_MAP 0x2f
-/* Async Events codes */
-#define IRDMA_AE_AMP_UNALLOCATED_STAG 0x0102
-#define IRDMA_AE_AMP_INVALID_STAG 0x0103
-#define IRDMA_AE_AMP_BAD_QP 0x0104
-#define IRDMA_AE_AMP_BAD_PD 0x0105
-#define IRDMA_AE_AMP_BAD_STAG_KEY 0x0106
-#define IRDMA_AE_AMP_BAD_STAG_INDEX 0x0107
-#define IRDMA_AE_AMP_BOUNDS_VIOLATION 0x0108
-#define IRDMA_AE_AMP_RIGHTS_VIOLATION 0x0109
-#define IRDMA_AE_AMP_TO_WRAP 0x010a
-#define IRDMA_AE_AMP_FASTREG_VALID_STAG 0x010c
-#define IRDMA_AE_AMP_FASTREG_MW_STAG 0x010d
-#define IRDMA_AE_AMP_FASTREG_INVALID_RIGHTS 0x010e
-#define IRDMA_AE_AMP_FASTREG_INVALID_LENGTH 0x0110
-#define IRDMA_AE_AMP_INVALIDATE_SHARED 0x0111
-#define IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS 0x0112
-#define IRDMA_AE_AMP_INVALIDATE_MR_WITH_BOUND_WINDOWS 0x0113
-#define IRDMA_AE_AMP_MWBIND_VALID_STAG 0x0114
-#define IRDMA_AE_AMP_MWBIND_OF_MR_STAG 0x0115
-#define IRDMA_AE_AMP_MWBIND_TO_ZERO_BASED_STAG 0x0116
-#define IRDMA_AE_AMP_MWBIND_TO_MW_STAG 0x0117
-#define IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS 0x0118
-#define IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS 0x0119
-#define IRDMA_AE_AMP_MWBIND_TO_INVALID_PARENT 0x011a
-#define IRDMA_AE_AMP_MWBIND_BIND_DISABLED 0x011b
-#define IRDMA_AE_PRIV_OPERATION_DENIED 0x011c
-#define IRDMA_AE_AMP_INVALIDATE_TYPE1_MW 0x011d
-#define IRDMA_AE_AMP_MWBIND_ZERO_BASED_TYPE1_MW 0x011e
-#define IRDMA_AE_AMP_FASTREG_INVALID_PBL_HPS_CFG 0x011f
-#define IRDMA_AE_AMP_MWBIND_WRONG_TYPE 0x0120
-#define IRDMA_AE_AMP_FASTREG_PBLE_MISMATCH 0x0121
-#define IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG 0x0132
-#define IRDMA_AE_UDA_XMIT_BAD_PD 0x0133
-#define IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT 0x0134
-#define IRDMA_AE_UDA_L4LEN_INVALID 0x0135
-#define IRDMA_AE_BAD_CLOSE 0x0201
-#define IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE 0x0202
-#define IRDMA_AE_CQ_OPERATION_ERROR 0x0203
-#define IRDMA_AE_RDMA_READ_WHILE_ORD_ZERO 0x0205
-#define IRDMA_AE_STAG_ZERO_INVALID 0x0206
-#define IRDMA_AE_IB_RREQ_AND_Q1_FULL 0x0207
-#define IRDMA_AE_IB_INVALID_REQUEST 0x0208
-#define IRDMA_AE_WQE_UNEXPECTED_OPCODE 0x020a
-#define IRDMA_AE_WQE_INVALID_PARAMETER 0x020b
-#define IRDMA_AE_WQE_INVALID_FRAG_DATA 0x020c
-#define IRDMA_AE_IB_REMOTE_ACCESS_ERROR 0x020d
-#define IRDMA_AE_IB_REMOTE_OP_ERROR 0x020e
-#define IRDMA_AE_WQE_LSMM_TOO_LONG 0x0220
-#define IRDMA_AE_INVALID_REQUEST 0x0223
-#define IRDMA_AE_DDP_INVALID_MSN_GAP_IN_MSN 0x0301
-#define IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER 0x0303
-#define IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION 0x0304
-#define IRDMA_AE_DDP_UBE_INVALID_MO 0x0305
-#define IRDMA_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE 0x0306
-#define IRDMA_AE_DDP_UBE_INVALID_QN 0x0307
-#define IRDMA_AE_DDP_NO_L_BIT 0x0308
-#define IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION 0x0311
-#define IRDMA_AE_RDMAP_ROE_UNEXPECTED_OPCODE 0x0312
-#define IRDMA_AE_ROE_INVALID_RDMA_READ_REQUEST 0x0313
-#define IRDMA_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP 0x0314
-#define IRDMA_AE_ROCE_RSP_LENGTH_ERROR 0x0316
-#define IRDMA_AE_ROCE_EMPTY_MCG 0x0380
-#define IRDMA_AE_ROCE_BAD_MC_IP_ADDR 0x0381
-#define IRDMA_AE_ROCE_BAD_MC_QPID 0x0382
-#define IRDMA_AE_MCG_QP_PROTOCOL_MISMATCH 0x0383
-#define IRDMA_AE_INVALID_ARP_ENTRY 0x0401
-#define IRDMA_AE_INVALID_TCP_OPTION_RCVD 0x0402
-#define IRDMA_AE_STALE_ARP_ENTRY 0x0403
-#define IRDMA_AE_INVALID_AH_ENTRY 0x0406
-#define IRDMA_AE_LLP_CLOSE_COMPLETE 0x0501
-#define IRDMA_AE_LLP_CONNECTION_RESET 0x0502
-#define IRDMA_AE_LLP_FIN_RECEIVED 0x0503
-#define IRDMA_AE_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH 0x0504
-#define IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR 0x0505
-#define IRDMA_AE_LLP_SEGMENT_TOO_SMALL 0x0507
-#define IRDMA_AE_LLP_SYN_RECEIVED 0x0508
-#define IRDMA_AE_LLP_TERMINATE_RECEIVED 0x0509
-#define IRDMA_AE_LLP_TOO_MANY_RETRIES 0x050a
-#define IRDMA_AE_LLP_TOO_MANY_KEEPALIVE_RETRIES 0x050b
-#define IRDMA_AE_LLP_DOUBT_REACHABILITY 0x050c
-#define IRDMA_AE_LLP_CONNECTION_ESTABLISHED 0x050e
-#define IRDMA_AE_LLP_TOO_MANY_RNRS 0x050f
-#define IRDMA_AE_RESOURCE_EXHAUSTION 0x0520
-#define IRDMA_AE_RESET_SENT 0x0601
-#define IRDMA_AE_TERMINATE_SENT 0x0602
-#define IRDMA_AE_RESET_NOT_SENT 0x0603
-#define IRDMA_AE_LCE_QP_CATASTROPHIC 0x0700
-#define IRDMA_AE_LCE_FUNCTION_CATASTROPHIC 0x0701
-#define IRDMA_AE_LCE_CQ_CATASTROPHIC 0x0702
-#define IRDMA_AE_QP_SUSPEND_COMPLETE 0x0900
-
#define FLD_LS_64(dev, val, field) \
(((u64)(val) << (dev)->hw_shifts[field ## _S]) & (dev)->hw_masks[field ## _M])
#define FLD_RS_64(dev, val, field) \
@@ -393,9 +338,13 @@ enum irdma_cqp_op_type {
#define IRDMA_CQPSQ_STATS_USE_INST BIT_ULL(61)
#define IRDMA_CQPSQ_STATS_OP GENMASK_ULL(37, 32)
#define IRDMA_CQPSQ_STATS_INST_INDEX GENMASK_ULL(6, 0)
-#define IRDMA_CQPSQ_STATS_HMC_FCN_INDEX GENMASK_ULL(5, 0)
+#define IRDMA_CQPSQ_STATS_HMC_FCN_INDEX GENMASK_ULL(15, 0)
#define IRDMA_CQPSQ_WS_WQEVALID BIT_ULL(63)
-#define IRDMA_CQPSQ_WS_NODEOP GENMASK_ULL(53, 52)
+#define IRDMA_CQPSQ_WS_NODEOP GENMASK_ULL(55, 52)
+#define IRDMA_SD_MAX GENMASK_ULL(15, 0)
+#define IRDMA_MEM_MAX GENMASK_ULL(15, 0)
+#define IRDMA_QP_MEM_LOC GENMASK_ULL(47, 44)
+#define IRDMA_MR_MEM_LOC GENMASK_ULL(27, 24)
#define IRDMA_CQPSQ_WS_ENABLENODE BIT_ULL(62)
#define IRDMA_CQPSQ_WS_NODETYPE BIT_ULL(61)
@@ -404,16 +353,16 @@ enum irdma_cqp_op_type {
#define IRDMA_CQPSQ_WS_VMVFTYPE GENMASK_ULL(55, 54)
#define IRDMA_CQPSQ_WS_VMVFNUM GENMASK_ULL(51, 42)
#define IRDMA_CQPSQ_WS_OP GENMASK_ULL(37, 32)
-#define IRDMA_CQPSQ_WS_PARENTID GENMASK_ULL(25, 16)
-#define IRDMA_CQPSQ_WS_NODEID GENMASK_ULL(9, 0)
-#define IRDMA_CQPSQ_WS_VSI GENMASK_ULL(57, 48)
+#define IRDMA_CQPSQ_WS_PARENTID GENMASK_ULL(29, 16)
+#define IRDMA_CQPSQ_WS_NODEID GENMASK_ULL(13, 0)
+#define IRDMA_CQPSQ_WS_VSI GENMASK_ULL(63, 48)
#define IRDMA_CQPSQ_WS_WEIGHT GENMASK_ULL(38, 32)
#define IRDMA_CQPSQ_UP_WQEVALID BIT_ULL(63)
#define IRDMA_CQPSQ_UP_USEVLAN BIT_ULL(62)
#define IRDMA_CQPSQ_UP_USEOVERRIDE BIT_ULL(61)
#define IRDMA_CQPSQ_UP_OP GENMASK_ULL(37, 32)
-#define IRDMA_CQPSQ_UP_HMCFCNIDX GENMASK_ULL(5, 0)
+#define IRDMA_CQPSQ_UP_HMCFCNIDX GENMASK_ULL(15, 0)
#define IRDMA_CQPSQ_UP_CNPOVERRIDE GENMASK_ULL(37, 32)
#define IRDMA_CQPSQ_QUERY_RDMA_FEATURES_WQEVALID BIT_ULL(63)
#define IRDMA_CQPSQ_QUERY_RDMA_FEATURES_BUF_LEN GENMASK_ULL(31, 0)
@@ -448,6 +397,16 @@ enum irdma_cqp_op_type {
#define IRDMA_CQPHC_SVER GENMASK_ULL(31, 24)
#define IRDMA_CQPHC_SQBASE GENMASK_ULL(63, 9)
+#define IRDMA_CQPHC_TIMESTAMP_OVERRIDE BIT_ULL(5)
+#define IRDMA_CQPHC_TS_SHIFT GENMASK_ULL(12, 8)
+#define IRDMA_CQPHC_EN_FINE_GRAINED_TIMERS BIT_ULL(0)
+
+#define IRDMA_CQPHC_OOISC_BLKSIZE GENMASK_ULL(63, 60)
+#define IRDMA_CQPHC_RRSP_BLKSIZE GENMASK_ULL(59, 56)
+#define IRDMA_CQPHC_Q1_BLKSIZE GENMASK_ULL(55, 52)
+#define IRDMA_CQPHC_XMIT_BLKSIZE GENMASK_ULL(51, 48)
+#define IRDMA_CQPHC_BLKSIZES_VALID BIT_ULL(4)
+
#define IRDMA_CQPHC_QPCTX GENMASK_ULL(63, 0)
#define IRDMA_QP_DBSA_HW_SQ_TAIL GENMASK_ULL(14, 0)
#define IRDMA_CQ_DBSA_CQEIDX GENMASK_ULL(19, 0)
@@ -461,6 +420,8 @@ enum irdma_cqp_op_type {
#define IRDMA_CCQ_OPRETVAL GENMASK_ULL(31, 0)
+#define IRDMA_CCQ_DEFINFO GENMASK_ULL(63, 32)
+
#define IRDMA_CQ_MINERR GENMASK_ULL(15, 0)
#define IRDMA_CQ_MAJERR GENMASK_ULL(31, 16)
#define IRDMA_CQ_WQEIDX GENMASK_ULL(46, 32)
@@ -469,6 +430,7 @@ enum irdma_cqp_op_type {
#define IRDMA_CQ_ERROR BIT_ULL(55)
#define IRDMA_CQ_SQ BIT_ULL(62)
+#define IRDMA_CQ_SRQ BIT_ULL(52)
#define IRDMA_CQ_VALID BIT_ULL(63)
#define IRDMA_CQ_IMMVALID BIT_ULL(62)
#define IRDMA_CQ_UDSMACVALID BIT_ULL(61)
@@ -476,8 +438,6 @@ enum irdma_cqp_op_type {
#define IRDMA_CQ_UDSMAC GENMASK_ULL(47, 0)
#define IRDMA_CQ_UDVLAN GENMASK_ULL(63, 48)
-#define IRDMA_CQ_IMMDATA_S 0
-#define IRDMA_CQ_IMMDATA_M (0xffffffffffffffffULL << IRDMA_CQ_IMMVALID_S)
#define IRDMA_CQ_IMMDATALOW32 GENMASK_ULL(31, 0)
#define IRDMA_CQ_IMMDATAUP32 GENMASK_ULL(63, 32)
#define IRDMACQ_PAYLDLEN GENMASK_ULL(31, 0)
@@ -508,6 +468,17 @@ enum irdma_cqp_op_type {
#define IRDMA_AEQE_Q2DATA GENMASK_ULL(62, 61)
#define IRDMA_AEQE_VALID BIT_ULL(63)
+#define IRDMA_AEQE_Q2DATA_GEN_3 GENMASK_ULL(5, 4)
+#define IRDMA_AEQE_TCPSTATE_GEN_3 GENMASK_ULL(3, 0)
+#define IRDMA_AEQE_QPCQID_GEN_3 GENMASK_ULL(24, 0)
+#define IRDMA_AEQE_AECODE_GEN_3 GENMASK_ULL(61, 50)
+#define IRDMA_AEQE_OVERFLOW_GEN_3 BIT_ULL(62)
+#define IRDMA_AEQE_WQDESCIDX_GEN_3 GENMASK_ULL(49, 32)
+#define IRDMA_AEQE_IWSTATE_GEN_3 GENMASK_ULL(31, 29)
+#define IRDMA_AEQE_AESRC_GEN_3 GENMASK_ULL(28, 25)
+#define IRDMA_AEQE_CMPL_CTXT_S 6
+#define IRDMA_AEQE_CMPL_CTXT GENMASK_ULL(63, 6)
+
#define IRDMA_UDA_QPSQ_NEXT_HDR GENMASK_ULL(23, 16)
#define IRDMA_UDA_QPSQ_OPCODE GENMASK_ULL(37, 32)
#define IRDMA_UDA_QPSQ_L4LEN GENMASK_ULL(45, 42)
@@ -530,11 +501,14 @@ enum irdma_cqp_op_type {
#define IRDMA_CQPSQ_WQEVALID BIT_ULL(63)
#define IRDMA_CQPSQ_TPHVAL GENMASK_ULL(7, 0)
-#define IRDMA_CQPSQ_VSIIDX GENMASK_ULL(17, 8)
+#define IRDMA_CQPSQ_VSIIDX GENMASK_ULL(23, 8)
#define IRDMA_CQPSQ_TPHEN BIT_ULL(60)
#define IRDMA_CQPSQ_PBUFADDR IRDMA_CQPHC_QPCTX
+#define IRDMA_CQPSQ_PASID GENMASK_ULL(51, 32)
+#define IRDMA_CQPSQ_PASID_VALID BIT_ULL(62)
+
/* Create/Modify/Destroy QP */
#define IRDMA_CQPSQ_QP_NEWMSS GENMASK_ULL(45, 32)
@@ -566,10 +540,30 @@ enum irdma_cqp_op_type {
#define IRDMA_CQPSQ_QP_DBSHADOWADDR IRDMA_CQPHC_QPCTX
+#define IRDMA_CQPSQ_SRQ_RQSIZE GENMASK_ULL(3, 0)
+#define IRDMA_CQPSQ_SRQ_RQ_WQE_SIZE GENMASK_ULL(5, 4)
+#define IRDMA_CQPSQ_SRQ_SRQ_LIMIT GENMASK_ULL(43, 32)
+#define IRDMA_CQPSQ_SRQ_SRQCTX GENMASK_ULL(63, 6)
+#define IRDMA_CQPSQ_SRQ_PD_ID GENMASK_ULL(39, 16)
+#define IRDMA_CQPSQ_SRQ_SRQ_ID GENMASK_ULL(15, 0)
+#define IRDMA_CQPSQ_SRQ_OP GENMASK_ULL(37, 32)
+#define IRDMA_CQPSQ_SRQ_LEAF_PBL_SIZE GENMASK_ULL(45, 44)
+#define IRDMA_CQPSQ_SRQ_VIRTMAP BIT_ULL(47)
+#define IRDMA_CQPSQ_SRQ_TPH_EN BIT_ULL(60)
+#define IRDMA_CQPSQ_SRQ_ARM_LIMIT_EVENT BIT_ULL(61)
+#define IRDMA_CQPSQ_SRQ_FIRST_PM_PBL_IDX GENMASK_ULL(27, 0)
+#define IRDMA_CQPSQ_SRQ_TPH_VALUE GENMASK_ULL(7, 0)
+#define IRDMA_CQPSQ_SRQ_PHYSICAL_BUFFER_ADDR_S 8
+#define IRDMA_CQPSQ_SRQ_PHYSICAL_BUFFER_ADDR GENMASK_ULL(63, 8)
+#define IRDMA_CQPSQ_SRQ_DB_SHADOW_ADDR_S 6
+#define IRDMA_CQPSQ_SRQ_DB_SHADOW_ADDR GENMASK_ULL(63, 6)
+
#define IRDMA_CQPSQ_CQ_CQSIZE GENMASK_ULL(20, 0)
#define IRDMA_CQPSQ_CQ_CQCTX GENMASK_ULL(62, 0)
#define IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD GENMASK(17, 0)
+#define IRDMA_CQPSQ_CQ_CQID_HIGH GENMASK_ULL(52, 50)
+#define IRDMA_CQPSQ_CQ_CEQID_HIGH GENMASK_ULL(59, 54)
#define IRDMA_CQPSQ_CQ_OP GENMASK_ULL(37, 32)
#define IRDMA_CQPSQ_CQ_CQRESIZE BIT_ULL(43)
#define IRDMA_CQPSQ_CQ_LPBLSIZE GENMASK_ULL(45, 44)
@@ -590,6 +584,7 @@ enum irdma_cqp_op_type {
#define IRDMA_CQPSQ_STAG_MR BIT_ULL(43)
#define IRDMA_CQPSQ_STAG_MWTYPE BIT_ULL(42)
#define IRDMA_CQPSQ_STAG_MW1_BIND_DONT_VLDT_KEY BIT_ULL(58)
+#define IRDMA_CQPSQ_STAG_PDID_HI GENMASK_ULL(59, 54)
#define IRDMA_CQPSQ_STAG_LPBLSIZE IRDMA_CQPSQ_CQ_LPBLSIZE
#define IRDMA_CQPSQ_STAG_HPAGESIZE GENMASK_ULL(47, 46)
@@ -600,7 +595,8 @@ enum irdma_cqp_op_type {
#define IRDMA_CQPSQ_STAG_USEPFRID BIT_ULL(61)
#define IRDMA_CQPSQ_STAG_PBA IRDMA_CQPHC_QPCTX
-#define IRDMA_CQPSQ_STAG_HMCFNIDX GENMASK_ULL(5, 0)
+#define IRDMA_CQPSQ_STAG_HMCFNIDX GENMASK_ULL(15, 0)
+#define IRDMA_CQPSQ_STAG_REMOTE_ATOMIC_EN BIT_ULL(61)
#define IRDMA_CQPSQ_STAG_FIRSTPMPBLIDX GENMASK_ULL(27, 0)
#define IRDMA_CQPSQ_QUERYSTAG_IDX IRDMA_CQPSQ_STAG_IDX
@@ -628,11 +624,8 @@ enum irdma_cqp_op_type {
/* Manage Push Page - MPP */
#define IRDMA_INVALID_PUSH_PAGE_INDEX_GEN_1 0xffff
#define IRDMA_INVALID_PUSH_PAGE_INDEX 0xffffffff
-
-#define IRDMA_CQPSQ_MPP_QS_HANDLE GENMASK_ULL(9, 0)
-#define IRDMA_CQPSQ_MPP_PPIDX GENMASK_ULL(9, 0)
+#define IRDMA_CQPSQ_MPP_PPIDX GENMASK_ULL(31, 0)
#define IRDMA_CQPSQ_MPP_PPTYPE GENMASK_ULL(61, 60)
-
#define IRDMA_CQPSQ_MPP_FREE_PAGE BIT_ULL(62)
/* Upload Context - UCTX */
@@ -651,6 +644,8 @@ enum irdma_cqp_op_type {
#define IRDMA_CQPSQ_CEQ_CEQSIZE GENMASK_ULL(21, 0)
#define IRDMA_CQPSQ_CEQ_CEQID GENMASK_ULL(9, 0)
+#define IRDMA_CQPSQ_CEQ_CEQID_HIGH GENMASK_ULL(15, 10)
+
#define IRDMA_CQPSQ_CEQ_LPBLSIZE IRDMA_CQPSQ_CQ_LPBLSIZE
#define IRDMA_CQPSQ_CEQ_VMAP BIT_ULL(47)
#define IRDMA_CQPSQ_CEQ_ITRNOEXPIRE BIT_ULL(46)
@@ -660,10 +655,10 @@ enum irdma_cqp_op_type {
#define IRDMA_CQPSQ_AEQ_VMAP BIT_ULL(47)
#define IRDMA_CQPSQ_AEQ_FIRSTPMPBLIDX GENMASK_ULL(27, 0)
-#define IRDMA_COMMIT_FPM_QPCNT GENMASK_ULL(18, 0)
-
+#define IRDMA_COMMIT_FPM_QPCNT GENMASK_ULL(20, 0)
#define IRDMA_COMMIT_FPM_BASE_S 32
-#define IRDMA_CQPSQ_CFPM_HMCFNID GENMASK_ULL(5, 0)
+#define IRDMA_CQPSQ_CFPM_HMCFNID GENMASK_ULL(15, 0)
+
#define IRDMA_CQPSQ_FWQE_AECODE GENMASK_ULL(15, 0)
#define IRDMA_CQPSQ_FWQE_AESOURCE GENMASK_ULL(19, 16)
#define IRDMA_CQPSQ_FWQE_RQMNERR GENMASK_ULL(15, 0)
@@ -675,6 +670,10 @@ enum irdma_cqp_op_type {
#define IRDMA_CQPSQ_FWQE_USERFLCODE BIT_ULL(60)
#define IRDMA_CQPSQ_FWQE_FLUSHSQ BIT_ULL(61)
#define IRDMA_CQPSQ_FWQE_FLUSHRQ BIT_ULL(62)
+#define IRDMA_CQPSQ_FWQE_ERR_SQ_IDX_VALID BIT_ULL(42)
+#define IRDMA_CQPSQ_FWQE_ERR_SQ_IDX GENMASK_ULL(49, 32)
+#define IRDMA_CQPSQ_FWQE_ERR_RQ_IDX_VALID BIT_ULL(43)
+#define IRDMA_CQPSQ_FWQE_ERR_RQ_IDX GENMASK_ULL(46, 32)
#define IRDMA_CQPSQ_MAPT_PORT GENMASK_ULL(15, 0)
#define IRDMA_CQPSQ_MAPT_ADDPORT BIT_ULL(62)
#define IRDMA_CQPSQ_UPESD_SDCMD GENMASK_ULL(31, 0)
@@ -693,9 +692,12 @@ enum irdma_cqp_op_type {
#define IRDMA_CQPSQ_SUSPENDQP_QPID GENMASK_ULL(23, 0)
#define IRDMA_CQPSQ_RESUMEQP_QSHANDLE GENMASK_ULL(31, 0)
#define IRDMA_CQPSQ_RESUMEQP_QPID GENMASK(23, 0)
+#define IRDMA_MANAGE_RSRC_VER2 BIT_ULL(2)
#define IRDMA_CQPSQ_MIN_STAG_INVALID 0x0001
#define IRDMA_CQPSQ_MIN_SUSPEND_PND 0x0005
+#define IRDMA_CQPSQ_MIN_DEF_CMPL 0x0006
+#define IRDMA_CQPSQ_MIN_OOO_CMPL 0x0007
#define IRDMA_CQPSQ_MAJ_NO_ERROR 0x0000
#define IRDMA_CQPSQ_MAJ_OBJCACHE_ERROR 0xF000
@@ -712,6 +714,11 @@ enum irdma_cqp_op_type {
#define IRDMAQPC_INSERTL2TAG2 BIT_ULL(11)
#define IRDMAQPC_LIMIT GENMASK_ULL(13, 12)
+#define IRDMAQPC_USE_SRQ BIT_ULL(10)
+#define IRDMAQPC_SRQ_ID GENMASK_ULL(15, 0)
+#define IRDMAQPC_PASID GENMASK_ULL(19, 0)
+#define IRDMAQPC_PASID_VALID BIT_ULL(11)
+
#define IRDMAQPC_ECN_EN BIT_ULL(14)
#define IRDMAQPC_DROPOOOSEG BIT_ULL(15)
#define IRDMAQPC_DUPACK_THRESH GENMASK_ULL(18, 16)
@@ -782,21 +789,31 @@ enum irdma_cqp_op_type {
#define IRDMAQPC_CWNDROCE GENMASK_ULL(55, 32)
#define IRDMAQPC_SNDWL1 GENMASK_ULL(31, 0)
#define IRDMAQPC_SNDWL2 GENMASK_ULL(63, 32)
-#define IRDMAQPC_ERR_RQ_IDX GENMASK_ULL(45, 32)
+#define IRDMAQPC_MINRNR_TIMER GENMASK_ULL(4, 0)
+#define IRDMAQPC_ERR_RQ_IDX GENMASK_ULL(46, 32)
#define IRDMAQPC_RTOMIN GENMASK_ULL(63, 57)
#define IRDMAQPC_MAXSNDWND GENMASK_ULL(31, 0)
#define IRDMAQPC_REXMIT_THRESH GENMASK_ULL(53, 48)
#define IRDMAQPC_RNRNAK_THRESH GENMASK_ULL(56, 54)
-#define IRDMAQPC_TXCQNUM GENMASK_ULL(18, 0)
-#define IRDMAQPC_RXCQNUM GENMASK_ULL(50, 32)
+#define IRDMAQPC_TXCQNUM GENMASK_ULL(24, 0)
+#define IRDMAQPC_RXCQNUM GENMASK_ULL(56, 32)
#define IRDMAQPC_STAT_INDEX GENMASK_ULL(6, 0)
#define IRDMAQPC_Q2ADDR GENMASK_ULL(63, 8)
#define IRDMAQPC_LASTBYTESENT GENMASK_ULL(7, 0)
#define IRDMAQPC_MACADDRESS GENMASK_ULL(63, 16)
#define IRDMAQPC_ORDSIZE GENMASK_ULL(7, 0)
+#define IRDMAQPC_LOCALACKTIMEOUT GENMASK_ULL(12, 8)
+#define IRDMAQPC_RNRNAK_TMR GENMASK_ULL(4, 0)
+#define IRDMAQPC_ORDSIZE_GEN3 GENMASK_ULL(10, 0)
+#define IRDMAQPC_REMOTE_ATOMIC_EN BIT_ULL(18)
+#define IRDMAQPC_STAT_INDEX_GEN3 GENMASK_ULL(47, 32)
+#define IRDMAQPC_PKT_LIMIT GENMASK_ULL(55, 48)
+
#define IRDMAQPC_IRDSIZE GENMASK_ULL(18, 16)
+#define IRDMAQPC_IRDSIZE_GEN3 GENMASK_ULL(17, 14)
+
#define IRDMAQPC_UDPRIVCQENABLE BIT_ULL(19)
#define IRDMAQPC_WRRDRSPOK BIT_ULL(20)
#define IRDMAQPC_RDOK BIT_ULL(21)
@@ -833,6 +850,7 @@ enum irdma_cqp_op_type {
#define IRDMA_FEATURE_INFO GENMASK_ULL(47, 0)
#define IRDMA_FEATURE_CNT GENMASK_ULL(47, 32)
#define IRDMA_FEATURE_TYPE GENMASK_ULL(63, 48)
+#define IRDMA_FEATURE_RSRC_MAX GENMASK_ULL(31, 0)
#define IRDMAQPSQ_OPCODE GENMASK_ULL(37, 32)
#define IRDMAQPSQ_COPY_HOST_PBL BIT_ULL(43)
@@ -856,7 +874,7 @@ enum irdma_cqp_op_type {
#define IRDMAQPSQ_REMSTAGINV GENMASK_ULL(31, 0)
#define IRDMAQPSQ_DESTQKEY GENMASK_ULL(31, 0)
#define IRDMAQPSQ_DESTQPN GENMASK_ULL(55, 32)
-#define IRDMAQPSQ_AHID GENMASK_ULL(16, 0)
+#define IRDMAQPSQ_AHID GENMASK_ULL(24, 0)
#define IRDMAQPSQ_INLINEDATAFLAG BIT_ULL(57)
#define IRDMA_INLINE_VALID_S 7
@@ -869,6 +887,9 @@ enum irdma_cqp_op_type {
#define IRDMAQPSQ_REMTO IRDMA_CQPHC_QPCTX
+#define IRDMAQPSQ_STAG GENMASK_ULL(31, 0)
+#define IRDMAQPSQ_REMOTE_STAG GENMASK_ULL(31, 0)
+
#define IRDMAQPSQ_STAGRIGHTS GENMASK_ULL(52, 48)
#define IRDMAQPSQ_VABASEDTO BIT_ULL(53)
#define IRDMAQPSQ_MEMWINDOWTYPE BIT_ULL(54)
@@ -879,6 +900,8 @@ enum irdma_cqp_op_type {
#define IRDMAQPSQ_BASEVA_TO_FBO IRDMA_CQPHC_QPCTX
+#define IRDMAQPSQ_REMOTE_ATOMICS_EN BIT_ULL(55)
+
#define IRDMAQPSQ_LOCSTAG GENMASK_ULL(31, 0)
#define IRDMAQPSQ_STAGKEY GENMASK_ULL(7, 0)
@@ -903,11 +926,14 @@ enum irdma_cqp_op_type {
#define IRDMAPFINT_OICR_PE_PUSH_M BIT(27)
#define IRDMAPFINT_OICR_PE_CRITERR_M BIT(28)
-#define IRDMA_QUERY_FPM_MAX_QPS GENMASK_ULL(18, 0)
-#define IRDMA_QUERY_FPM_MAX_CQS GENMASK_ULL(19, 0)
+#define IRDMA_QUERY_FPM_LOC_MEM_PAGES GENMASK_ULL(63, 32)
+#define IRDMA_QUERY_FPM_MAX_QPS GENMASK_ULL(31, 0)
+#define IRDMA_QUERY_FPM_MAX_CQS GENMASK_ULL(31, 0)
#define IRDMA_QUERY_FPM_FIRST_PE_SD_INDEX GENMASK_ULL(13, 0)
-#define IRDMA_QUERY_FPM_MAX_PE_SDS GENMASK_ULL(45, 32)
+#define IRDMA_QUERY_FPM_MAX_PE_SDS GENMASK_ULL(44, 32)
+#define IRDMA_QUERY_FPM_MAX_PE_SDS_GEN3 GENMASK_ULL(47, 32)
#define IRDMA_QUERY_FPM_MAX_CEQS GENMASK_ULL(9, 0)
+#define IRDMA_QUERY_FPM_MAX_IRD GENMASK_ULL(53, 50)
#define IRDMA_QUERY_FPM_XFBLOCKSIZE GENMASK_ULL(63, 32)
#define IRDMA_QUERY_FPM_Q1BLOCKSIZE GENMASK_ULL(63, 32)
#define IRDMA_QUERY_FPM_HTMULTIPLIER GENMASK_ULL(19, 16)
@@ -1103,7 +1129,7 @@ enum irdma_alignment {
IRDMA_CEQ_ALIGNMENT = 0x100,
IRDMA_CQ0_ALIGNMENT = 0x100,
IRDMA_SD_BUF_ALIGNMENT = 0x80,
- IRDMA_FEATURE_BUF_ALIGNMENT = 0x8,
+ IRDMA_FEATURE_BUF_ALIGNMENT = 0x10,
};
enum icrdma_protocol_used {
diff --git a/drivers/infiniband/hw/irdma/hmc.c b/drivers/infiniband/hw/irdma/hmc.c
index ac58088a8e41..da18add141da 100644
--- a/drivers/infiniband/hw/irdma/hmc.c
+++ b/drivers/infiniband/hw/irdma/hmc.c
@@ -5,6 +5,7 @@
#include "defs.h"
#include "type.h"
#include "protos.h"
+#include "virtchnl.h"
/**
* irdma_find_sd_index_limit - finds segment descriptor index limit
@@ -228,6 +229,10 @@ int irdma_sc_create_hmc_obj(struct irdma_sc_dev *dev,
bool pd_error = false;
int ret_code = 0;
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3 &&
+ dev->hmc_info->hmc_obj[info->rsrc_type].mem_loc == IRDMA_LOC_MEM)
+ return 0;
+
if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt)
return -EINVAL;
@@ -330,7 +335,7 @@ static int irdma_finish_del_sd_reg(struct irdma_sc_dev *dev,
u32 i, sd_idx;
struct irdma_dma_mem *mem;
- if (!reset)
+ if (dev->privileged && !reset)
ret_code = irdma_hmc_sd_grp(dev, info->hmc_info,
info->hmc_info->sd_indexes[0],
info->del_sd_cnt, false);
@@ -376,6 +381,9 @@ int irdma_sc_del_hmc_obj(struct irdma_sc_dev *dev,
u32 i, j;
int ret_code = 0;
+ if (dev->hmc_info->hmc_obj[info->rsrc_type].mem_loc == IRDMA_LOC_MEM)
+ return 0;
+
if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
ibdev_dbg(to_ibdev(dev),
"HMC: error start_idx[%04d] >= [type %04d].cnt[%04d]\n",
@@ -589,7 +597,10 @@ int irdma_add_pd_table_entry(struct irdma_sc_dev *dev,
pd_entry->sd_index = sd_idx;
pd_entry->valid = true;
pd_table->use_cnt++;
- irdma_invalidate_pf_hmc_pd(dev, sd_idx, rel_pd_idx);
+
+ if (hmc_info->hmc_fn_id < dev->hw_attrs.first_hw_vf_fpm_id &&
+ dev->privileged)
+ irdma_invalidate_pf_hmc_pd(dev, sd_idx, rel_pd_idx);
}
pd_entry->bp.use_cnt++;
@@ -640,7 +651,8 @@ int irdma_remove_pd_bp(struct irdma_sc_dev *dev,
pd_addr = pd_table->pd_page_addr.va;
pd_addr += rel_pd_idx;
memset(pd_addr, 0, sizeof(u64));
- irdma_invalidate_pf_hmc_pd(dev, sd_idx, idx);
+ if (dev->privileged && dev->hmc_fn_id == hmc_info->hmc_fn_id)
+ irdma_invalidate_pf_hmc_pd(dev, sd_idx, idx);
if (!pd_entry->rsrc_pg) {
mem = &pd_entry->bp.addr;
diff --git a/drivers/infiniband/hw/irdma/hmc.h b/drivers/infiniband/hw/irdma/hmc.h
index 415f9e23bbf6..257a5d22aa96 100644
--- a/drivers/infiniband/hw/irdma/hmc.h
+++ b/drivers/infiniband/hw/irdma/hmc.h
@@ -16,11 +16,21 @@
#define IRDMA_HMC_PD_BP_BUF_ALIGNMENT 4096
#define IRDMA_FIRST_VF_FPM_ID 8
#define FPM_MULTIPLIER 1024
+#define IRDMA_OBJ_LOC_MEM_BIT 0x4
+#define IRDMA_XF_MULTIPLIER 16
+#define IRDMA_RRF_MULTIPLIER 8
+#define IRDMA_MIN_PBLE_PAGES 3
+#define IRDMA_HMC_PAGE_SIZE 2097152
+#define IRDMA_MIN_MR_PER_QP 4
+#define IRDMA_MIN_QP_CNT 64
+#define IRDMA_FSIAV_CNT_MAX 1048576
+#define IRDMA_MIN_IRD 8
+#define IRDMA_HMC_MIN_RRF 16
enum irdma_hmc_rsrc_type {
IRDMA_HMC_IW_QP = 0,
IRDMA_HMC_IW_CQ = 1,
- IRDMA_HMC_IW_RESERVED = 2,
+ IRDMA_HMC_IW_SRQ = 2,
IRDMA_HMC_IW_HTE = 3,
IRDMA_HMC_IW_ARP = 4,
IRDMA_HMC_IW_APBVT_ENTRY = 5,
@@ -48,11 +58,17 @@ enum irdma_sd_entry_type {
IRDMA_SD_TYPE_DIRECT = 2,
};
+enum irdma_hmc_obj_mem {
+ IRDMA_HOST_MEM = 0,
+ IRDMA_LOC_MEM = 1,
+};
+
struct irdma_hmc_obj_info {
u64 base;
u32 max_cnt;
u32 cnt;
u64 size;
+ enum irdma_hmc_obj_mem mem_loc;
};
struct irdma_hmc_bp {
@@ -117,6 +133,7 @@ struct irdma_update_sds_info {
struct irdma_ccq_cqe_info;
struct irdma_hmc_fcn_info {
u32 vf_id;
+ u8 protocol_used;
u8 free_fcn;
};
diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c
index 69ce1862eabe..7bad0e38786a 100644
--- a/drivers/infiniband/hw/irdma/hw.c
+++ b/drivers/infiniband/hw/irdma/hw.c
@@ -33,6 +33,7 @@ static struct irdma_rsrc_limits rsrc_limits_table[] = {
static enum irdma_hmc_rsrc_type iw_hmc_obj_types[] = {
IRDMA_HMC_IW_QP,
IRDMA_HMC_IW_CQ,
+ IRDMA_HMC_IW_SRQ,
IRDMA_HMC_IW_HTE,
IRDMA_HMC_IW_ARP,
IRDMA_HMC_IW_APBVT_ENTRY,
@@ -134,75 +135,68 @@ static void irdma_process_ceq(struct irdma_pci_f *rf, struct irdma_ceq *ceq)
static void irdma_set_flush_fields(struct irdma_sc_qp *qp,
struct irdma_aeqe_info *info)
{
+ struct qp_err_code qp_err;
+
qp->sq_flush_code = info->sq;
qp->rq_flush_code = info->rq;
- qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
-
- switch (info->ae_id) {
- case IRDMA_AE_AMP_BOUNDS_VIOLATION:
- case IRDMA_AE_AMP_INVALID_STAG:
- case IRDMA_AE_AMP_RIGHTS_VIOLATION:
- case IRDMA_AE_AMP_UNALLOCATED_STAG:
- case IRDMA_AE_AMP_BAD_PD:
- case IRDMA_AE_AMP_BAD_QP:
- case IRDMA_AE_AMP_BAD_STAG_KEY:
- case IRDMA_AE_AMP_BAD_STAG_INDEX:
- case IRDMA_AE_AMP_TO_WRAP:
- case IRDMA_AE_PRIV_OPERATION_DENIED:
- qp->flush_code = FLUSH_PROT_ERR;
- qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
- break;
- case IRDMA_AE_UDA_XMIT_BAD_PD:
- case IRDMA_AE_WQE_UNEXPECTED_OPCODE:
- qp->flush_code = FLUSH_LOC_QP_OP_ERR;
- qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
- break;
- case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
- case IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT:
- case IRDMA_AE_UDA_L4LEN_INVALID:
- case IRDMA_AE_DDP_UBE_INVALID_MO:
- case IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
- qp->flush_code = FLUSH_LOC_LEN_ERR;
- qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
- break;
- case IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
- case IRDMA_AE_IB_REMOTE_ACCESS_ERROR:
- qp->flush_code = FLUSH_REM_ACCESS_ERR;
- qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
- break;
- case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
- case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
- case IRDMA_AE_ROCE_RSP_LENGTH_ERROR:
- case IRDMA_AE_IB_REMOTE_OP_ERROR:
- qp->flush_code = FLUSH_REM_OP_ERR;
- qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
- break;
- case IRDMA_AE_LCE_QP_CATASTROPHIC:
- qp->flush_code = FLUSH_FATAL_ERR;
- qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
- break;
- case IRDMA_AE_IB_RREQ_AND_Q1_FULL:
- qp->flush_code = FLUSH_GENERAL_ERR;
- break;
- case IRDMA_AE_LLP_TOO_MANY_RETRIES:
- qp->flush_code = FLUSH_RETRY_EXC_ERR;
- qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
- break;
- case IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS:
- case IRDMA_AE_AMP_MWBIND_BIND_DISABLED:
- case IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS:
- case IRDMA_AE_AMP_MWBIND_VALID_STAG:
- qp->flush_code = FLUSH_MW_BIND_ERR;
- qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
- break;
- case IRDMA_AE_IB_INVALID_REQUEST:
- qp->flush_code = FLUSH_REM_INV_REQ_ERR;
- qp->event_type = IRDMA_QP_EVENT_REQ_ERR;
- break;
- default:
- qp->flush_code = FLUSH_GENERAL_ERR;
- qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
- break;
+ if (qp->qp_uk.uk_attrs->hw_rev >= IRDMA_GEN_3) {
+ if (info->sq) {
+ qp->err_sq_idx_valid = true;
+ qp->err_sq_idx = info->wqe_idx;
+ }
+ if (info->rq) {
+ qp->err_rq_idx_valid = true;
+ qp->err_rq_idx = info->wqe_idx;
+ }
+ }
+
+ qp_err = irdma_ae_to_qp_err_code(info->ae_id);
+ qp->flush_code = qp_err.flush_code;
+ qp->event_type = qp_err.event_type;
+}
+
+/**
+ * irdma_complete_cqp_request - perform post-completion cleanup
+ * @cqp: device CQP
+ * @cqp_request: CQP request
+ *
+ * Mark CQP request as done, wake up waiting thread or invoke
+ * callback function and release/free CQP request.
+ */
+static void irdma_complete_cqp_request(struct irdma_cqp *cqp,
+ struct irdma_cqp_request *cqp_request)
+{
+ if (cqp_request->waiting) {
+ WRITE_ONCE(cqp_request->request_done, true);
+ wake_up(&cqp_request->waitq);
+ } else if (cqp_request->callback_fcn) {
+ cqp_request->callback_fcn(cqp_request);
+ }
+ irdma_put_cqp_request(cqp, cqp_request);
+}
+
+/**
+ * irdma_process_ae_def_cmpl - handle IRDMA_AE_CQP_DEFERRED_COMPLETE event
+ * @rf: RDMA PCI function
+ * @info: AEQ entry info
+ */
+static void irdma_process_ae_def_cmpl(struct irdma_pci_f *rf,
+ struct irdma_aeqe_info *info)
+{
+ u32 sw_def_info;
+ u64 scratch;
+
+ irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq);
+
+ irdma_sc_cqp_def_cmpl_ae_handler(&rf->sc_dev, info, true,
+ &scratch, &sw_def_info);
+ while (scratch) {
+ struct irdma_cqp_request *cqp_request =
+ (struct irdma_cqp_request *)(uintptr_t)scratch;
+
+ irdma_complete_cqp_request(&rf->cqp, cqp_request);
+ irdma_sc_cqp_def_cmpl_ae_handler(&rf->sc_dev, info, false,
+ &scratch, &sw_def_info);
}
}
@@ -223,6 +217,7 @@ static void irdma_process_aeq(struct irdma_pci_f *rf)
struct irdma_sc_qp *qp = NULL;
struct irdma_qp_host_ctx_info *ctx_info = NULL;
struct irdma_device *iwdev = rf->iwdev;
+ struct irdma_sc_srq *srq;
unsigned long flags;
u32 aeqcnt = 0;
@@ -236,6 +231,13 @@ static void irdma_process_aeq(struct irdma_pci_f *rf)
if (ret)
break;
+ if (info->aeqe_overflow) {
+ ibdev_err(&iwdev->ibdev, "AEQ has overflowed\n");
+ rf->reset = true;
+ rf->gen_ops.request_reset(rf);
+ return;
+ }
+
aeqcnt++;
ibdev_dbg(&iwdev->ibdev,
"AEQ: ae_id = 0x%x bool qp=%d qp_id = %d tcp_state=%d iwarp_state=%d ae_src=%d\n",
@@ -266,9 +268,12 @@ static void irdma_process_aeq(struct irdma_pci_f *rf)
if (info->ae_id != IRDMA_AE_QP_SUSPEND_COMPLETE)
iwqp->last_aeq = info->ae_id;
spin_unlock_irqrestore(&iwqp->lock, flags);
- ctx_info = &iwqp->ctx_info;
+ } else if (info->srq) {
+ if (info->ae_id != IRDMA_AE_SRQ_LIMIT)
+ continue;
} else {
- if (info->ae_id != IRDMA_AE_CQ_OPERATION_ERROR)
+ if (info->ae_id != IRDMA_AE_CQ_OPERATION_ERROR &&
+ info->ae_id != IRDMA_AE_CQP_DEFERRED_COMPLETE)
continue;
}
@@ -363,6 +368,18 @@ static void irdma_process_aeq(struct irdma_pci_f *rf)
}
irdma_cq_rem_ref(&iwcq->ibcq);
break;
+ case IRDMA_AE_SRQ_LIMIT:
+ srq = (struct irdma_sc_srq *)(uintptr_t)info->compl_ctx;
+ irdma_srq_event(srq);
+ break;
+ case IRDMA_AE_SRQ_CATASTROPHIC_ERROR:
+ break;
+ case IRDMA_AE_CQP_DEFERRED_COMPLETE:
+ /* Remove completed CQP requests from pending list
+ * and notify about those CQP ops completion.
+ */
+ irdma_process_ae_def_cmpl(rf, info);
+ break;
case IRDMA_AE_RESET_NOT_SENT:
case IRDMA_AE_LLP_DOUBT_REACHABILITY:
case IRDMA_AE_RESOURCE_EXHAUSTION:
@@ -389,13 +406,18 @@ static void irdma_process_aeq(struct irdma_pci_f *rf)
case IRDMA_AE_LCE_FUNCTION_CATASTROPHIC:
case IRDMA_AE_LLP_TOO_MANY_RNRS:
case IRDMA_AE_LCE_CQ_CATASTROPHIC:
+ case IRDMA_AE_REMOTE_QP_CATASTROPHIC:
+ case IRDMA_AE_LOCAL_QP_CATASTROPHIC:
+ case IRDMA_AE_RCE_QP_CATASTROPHIC:
case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
default:
ibdev_err(&iwdev->ibdev, "abnormal ae_id = 0x%x bool qp=%d qp_id = %d, ae_src=%d\n",
info->ae_id, info->qp, info->qp_cq_id, info->ae_src);
- if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
- ctx_info->roce_info->err_rq_idx_valid = info->rq;
- if (info->rq) {
+ ctx_info = &iwqp->ctx_info;
+ if (rdma_protocol_roce(&iwqp->iwdev->ibdev, 1)) {
+ ctx_info->roce_info->err_rq_idx_valid =
+ ctx_info->srq_valid ? false : info->err_rq_idx_valid;
+ if (ctx_info->roce_info->err_rq_idx_valid) {
ctx_info->roce_info->err_rq_idx = info->wqe_idx;
irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va,
ctx_info);
@@ -599,6 +621,8 @@ static void irdma_destroy_cqp(struct irdma_pci_f *rf)
dma_free_coherent(dev->hw->device, cqp->sq.size, cqp->sq.va,
cqp->sq.pa);
cqp->sq.va = NULL;
+ kfree(cqp->oop_op_array);
+ cqp->oop_op_array = NULL;
kfree(cqp->scratch_array);
cqp->scratch_array = NULL;
kfree(cqp->cqp_requests);
@@ -631,7 +655,9 @@ static void irdma_destroy_aeq(struct irdma_pci_f *rf)
int status = -EBUSY;
if (!rf->msix_shared) {
- rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev, rf->iw_msixtbl->idx, false);
+ if (rf->sc_dev.privileged)
+ rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev,
+ rf->iw_msixtbl->idx, false);
irdma_destroy_irq(rf, rf->iw_msixtbl, rf);
}
if (rf->reset)
@@ -697,9 +723,10 @@ static void irdma_del_ceq_0(struct irdma_pci_f *rf)
if (rf->msix_shared) {
msix_vec = &rf->iw_msixtbl[0];
- rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev,
- msix_vec->ceq_id,
- msix_vec->idx, false);
+ if (rf->sc_dev.privileged)
+ rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev,
+ msix_vec->ceq_id,
+ msix_vec->idx, false);
irdma_destroy_irq(rf, msix_vec, rf);
} else {
msix_vec = &rf->iw_msixtbl[1];
@@ -730,8 +757,10 @@ static void irdma_del_ceqs(struct irdma_pci_f *rf)
msix_vec = &rf->iw_msixtbl[2];
for (i = 1; i < rf->ceqs_count; i++, msix_vec++, iwceq++) {
- rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev, msix_vec->ceq_id,
- msix_vec->idx, false);
+ if (rf->sc_dev.privileged)
+ rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev,
+ msix_vec->ceq_id,
+ msix_vec->idx, false);
irdma_destroy_irq(rf, msix_vec, iwceq);
irdma_cqp_ceq_cmd(&rf->sc_dev, &iwceq->sc_ceq,
IRDMA_OP_CEQ_DESTROY);
@@ -942,6 +971,13 @@ static int irdma_create_cqp(struct irdma_pci_f *rf)
goto err_scratch;
}
+ cqp->oop_op_array = kcalloc(sqsize, sizeof(*cqp->oop_op_array),
+ GFP_KERNEL);
+ if (!cqp->oop_op_array) {
+ status = -ENOMEM;
+ goto err_oop;
+ }
+ cqp_init_info.ooo_op_array = cqp->oop_op_array;
dev->cqp = &cqp->sc_cqp;
dev->cqp->dev = dev;
cqp->sq.size = ALIGN(sizeof(struct irdma_cqp_sq_wqe) * sqsize,
@@ -978,6 +1014,10 @@ static int irdma_create_cqp(struct irdma_pci_f *rf)
case IRDMA_GEN_2:
cqp_init_info.hw_maj_ver = IRDMA_CQPHC_HW_MAJVER_GEN_2;
break;
+ case IRDMA_GEN_3:
+ cqp_init_info.hw_maj_ver = IRDMA_CQPHC_HW_MAJVER_GEN_3;
+ cqp_init_info.ts_override = 1;
+ break;
}
status = irdma_sc_cqp_init(dev->cqp, &cqp_init_info);
if (status) {
@@ -1012,6 +1052,9 @@ err_ctx:
cqp->sq.va, cqp->sq.pa);
cqp->sq.va = NULL;
err_sq:
+ kfree(cqp->oop_op_array);
+ cqp->oop_op_array = NULL;
+err_oop:
kfree(cqp->scratch_array);
cqp->scratch_array = NULL;
err_scratch:
@@ -1033,13 +1076,15 @@ static int irdma_create_ccq(struct irdma_pci_f *rf)
struct irdma_sc_dev *dev = &rf->sc_dev;
struct irdma_ccq_init_info info = {};
struct irdma_ccq *ccq = &rf->ccq;
+ int ccq_size;
int status;
dev->ccq = &ccq->sc_cq;
dev->ccq->dev = dev;
info.dev = dev;
+ ccq_size = (rf->rdma_ver >= IRDMA_GEN_3) ? IW_GEN_3_CCQ_SIZE : IW_CCQ_SIZE;
ccq->shadow_area.size = sizeof(struct irdma_cq_shadow_area);
- ccq->mem_cq.size = ALIGN(sizeof(struct irdma_cqe) * IW_CCQ_SIZE,
+ ccq->mem_cq.size = ALIGN(sizeof(struct irdma_cqe) * ccq_size,
IRDMA_CQ0_ALIGNMENT);
ccq->mem_cq.va = dma_alloc_coherent(dev->hw->device, ccq->mem_cq.size,
&ccq->mem_cq.pa, GFP_KERNEL);
@@ -1056,7 +1101,7 @@ static int irdma_create_ccq(struct irdma_pci_f *rf)
/* populate the ccq init info */
info.cq_base = ccq->mem_cq.va;
info.cq_pa = ccq->mem_cq.pa;
- info.num_elem = IW_CCQ_SIZE;
+ info.num_elem = ccq_size;
info.shadow_area = ccq->shadow_area.va;
info.shadow_area_pa = ccq->shadow_area.pa;
info.ceqe_mask = false;
@@ -1140,9 +1185,13 @@ static int irdma_cfg_ceq_vector(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
}
msix_vec->ceq_id = ceq_id;
- rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev, ceq_id, msix_vec->idx, true);
-
- return 0;
+ if (rf->sc_dev.privileged)
+ rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev, ceq_id,
+ msix_vec->idx, true);
+ else
+ status = irdma_vchnl_req_ceq_vec_map(&rf->sc_dev, ceq_id,
+ msix_vec->idx);
+ return status;
}
/**
@@ -1155,7 +1204,7 @@ static int irdma_cfg_ceq_vector(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
static int irdma_cfg_aeq_vector(struct irdma_pci_f *rf)
{
struct irdma_msix_vector *msix_vec = rf->iw_msixtbl;
- u32 ret = 0;
+ int ret = 0;
if (!rf->msix_shared) {
snprintf(msix_vec->name, sizeof(msix_vec->name) - 1,
@@ -1166,12 +1215,16 @@ static int irdma_cfg_aeq_vector(struct irdma_pci_f *rf)
}
if (ret) {
ibdev_dbg(&rf->iwdev->ibdev, "ERR: aeq irq config fail\n");
- return -EINVAL;
+ return ret;
}
- rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev, msix_vec->idx, true);
+ if (rf->sc_dev.privileged)
+ rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev, msix_vec->idx,
+ true);
+ else
+ ret = irdma_vchnl_req_aeq_vec_map(&rf->sc_dev, msix_vec->idx);
- return 0;
+ return ret;
}
/**
@@ -1179,13 +1232,13 @@ static int irdma_cfg_aeq_vector(struct irdma_pci_f *rf)
* @rf: RDMA PCI function
* @iwceq: pointer to the ceq resources to be created
* @ceq_id: the id number of the iwceq
- * @vsi: SC vsi struct
+ * @vsi_idx: vsi idx
*
* Return 0, if the ceq and the resources associated with it
* are successfully created, otherwise return error
*/
static int irdma_create_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
- u32 ceq_id, struct irdma_sc_vsi *vsi)
+ u32 ceq_id, u16 vsi_idx)
{
int status;
struct irdma_ceq_init_info info = {};
@@ -1209,7 +1262,7 @@ static int irdma_create_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
info.elem_cnt = ceq_size;
iwceq->sc_ceq.ceq_id = ceq_id;
info.dev = dev;
- info.vsi = vsi;
+ info.vsi_idx = vsi_idx;
status = irdma_sc_ceq_init(&iwceq->sc_ceq, &info);
if (!status) {
if (dev->ceq_valid)
@@ -1252,7 +1305,7 @@ static int irdma_setup_ceq_0(struct irdma_pci_f *rf)
}
iwceq = &rf->ceqlist[0];
- status = irdma_create_ceq(rf, iwceq, 0, &rf->default_vsi);
+ status = irdma_create_ceq(rf, iwceq, 0, rf->default_vsi.vsi_idx);
if (status) {
ibdev_dbg(&rf->iwdev->ibdev, "ERR: create ceq status = %d\n",
status);
@@ -1287,13 +1340,13 @@ exit:
/**
* irdma_setup_ceqs - manage the device ceq's and their interrupt resources
* @rf: RDMA PCI function
- * @vsi: VSI structure for this CEQ
+ * @vsi_idx: vsi_idx for this CEQ
*
* Allocate a list for all device completion event queues
* Create the ceq's and configure their msix interrupt vectors
* Return 0, if ceqs are successfully set up, otherwise return error
*/
-static int irdma_setup_ceqs(struct irdma_pci_f *rf, struct irdma_sc_vsi *vsi)
+static int irdma_setup_ceqs(struct irdma_pci_f *rf, u16 vsi_idx)
{
u32 i;
u32 ceq_id;
@@ -1306,7 +1359,7 @@ static int irdma_setup_ceqs(struct irdma_pci_f *rf, struct irdma_sc_vsi *vsi)
i = (rf->msix_shared) ? 1 : 2;
for (ceq_id = 1; i < num_ceqs; i++, ceq_id++) {
iwceq = &rf->ceqlist[ceq_id];
- status = irdma_create_ceq(rf, iwceq, ceq_id, vsi);
+ status = irdma_create_ceq(rf, iwceq, ceq_id, vsi_idx);
if (status) {
ibdev_dbg(&rf->iwdev->ibdev,
"ERR: create ceq status = %d\n", status);
@@ -1387,7 +1440,10 @@ static int irdma_create_aeq(struct irdma_pci_f *rf)
aeq_size = multiplier * hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt +
hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt;
aeq_size = min(aeq_size, dev->hw_attrs.max_hw_aeq_size);
-
+ /* GEN_3 does not support virtual AEQ. Cap at max Kernel alloc size */
+ if (rf->rdma_ver == IRDMA_GEN_3)
+ aeq_size = min(aeq_size, (u32)((PAGE_SIZE << MAX_PAGE_ORDER) /
+ sizeof(struct irdma_sc_aeqe)));
aeq->mem.size = ALIGN(sizeof(struct irdma_sc_aeqe) * aeq_size,
IRDMA_AEQ_ALIGNMENT);
aeq->mem.va = dma_alloc_coherent(dev->hw->device, aeq->mem.size,
@@ -1395,6 +1451,8 @@ static int irdma_create_aeq(struct irdma_pci_f *rf)
GFP_KERNEL | __GFP_NOWARN);
if (aeq->mem.va)
goto skip_virt_aeq;
+ else if (rf->rdma_ver == IRDMA_GEN_3)
+ return -ENOMEM;
/* physically mapped aeq failed. setup virtual aeq */
status = irdma_create_virt_aeq(rf, aeq_size);
@@ -1569,6 +1627,8 @@ static void irdma_del_init_mem(struct irdma_pci_f *rf)
{
struct irdma_sc_dev *dev = &rf->sc_dev;
+ if (!rf->sc_dev.privileged)
+ irdma_vchnl_req_put_hmc_fcn(&rf->sc_dev);
kfree(dev->hmc_info->sd_table.sd_entry);
dev->hmc_info->sd_table.sd_entry = NULL;
vfree(rf->mem_rsrc);
@@ -1635,6 +1695,7 @@ static int irdma_initialize_dev(struct irdma_pci_f *rf)
info.bar0 = rf->hw.hw_addr;
info.hmc_fn_id = rf->pf_id;
+ info.protocol_used = rf->protocol_used;
info.hw = &rf->hw;
status = irdma_sc_dev_init(rf->rdma_ver, &rf->sc_dev, &info);
if (status)
@@ -1665,9 +1726,6 @@ void irdma_rt_deinit_hw(struct irdma_device *iwdev)
irdma_del_local_mac_entry(iwdev->rf,
(u8)iwdev->mac_ip_table_idx);
fallthrough;
- case AEQ_CREATED:
- case PBLE_CHUNK_MEM:
- case CEQS_CREATED:
case IEQ_CREATED:
if (!iwdev->roce_mode)
irdma_puda_dele_rsrc(&iwdev->vsi, IRDMA_PUDA_RSRC_TYPE_IEQ,
@@ -1740,7 +1798,9 @@ static void irdma_get_used_rsrc(struct irdma_device *iwdev)
iwdev->rf->used_qps = find_first_zero_bit(iwdev->rf->allocated_qps,
iwdev->rf->max_qp);
iwdev->rf->used_cqs = find_first_zero_bit(iwdev->rf->allocated_cqs,
- iwdev->rf->max_cq);
+ iwdev->rf->max_cq);
+ iwdev->rf->used_srqs = find_first_zero_bit(iwdev->rf->allocated_srqs,
+ iwdev->rf->max_srq);
iwdev->rf->used_mrs = find_first_zero_bit(iwdev->rf->allocated_mrs,
iwdev->rf->max_mr);
}
@@ -1750,13 +1810,17 @@ void irdma_ctrl_deinit_hw(struct irdma_pci_f *rf)
enum init_completion_state state = rf->init_state;
rf->init_state = INVALID_STATE;
- if (rf->rsrc_created) {
+
+ switch (state) {
+ case AEQ_CREATED:
irdma_destroy_aeq(rf);
+ fallthrough;
+ case PBLE_CHUNK_MEM:
irdma_destroy_pble_prm(rf->pble_rsrc);
+ fallthrough;
+ case CEQS_CREATED:
irdma_del_ceqs(rf);
- rf->rsrc_created = false;
- }
- switch (state) {
+ fallthrough;
case CEQ0_CREATED:
irdma_del_ceq_0(rf);
fallthrough;
@@ -1835,32 +1899,6 @@ int irdma_rt_init_hw(struct irdma_device *iwdev,
break;
iwdev->init_state = IEQ_CREATED;
}
- if (!rf->rsrc_created) {
- status = irdma_setup_ceqs(rf, &iwdev->vsi);
- if (status)
- break;
-
- iwdev->init_state = CEQS_CREATED;
-
- status = irdma_hmc_init_pble(&rf->sc_dev,
- rf->pble_rsrc);
- if (status) {
- irdma_del_ceqs(rf);
- break;
- }
-
- iwdev->init_state = PBLE_CHUNK_MEM;
-
- status = irdma_setup_aeq(rf);
- if (status) {
- irdma_destroy_pble_prm(rf->pble_rsrc);
- irdma_del_ceqs(rf);
- break;
- }
- iwdev->init_state = AEQ_CREATED;
- rf->rsrc_created = true;
- }
-
if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
irdma_alloc_set_mac(iwdev);
irdma_add_ip(iwdev);
@@ -1907,6 +1945,13 @@ int irdma_ctrl_init_hw(struct irdma_pci_f *rf)
break;
rf->init_state = CQP_CREATED;
+ dev->feature_info[IRDMA_FEATURE_FW_INFO] = IRDMA_FW_VER_DEFAULT;
+ if (rf->rdma_ver != IRDMA_GEN_1) {
+ status = irdma_get_rdma_features(dev);
+ if (status)
+ break;
+ }
+
status = irdma_hmc_setup(rf);
if (status)
break;
@@ -1922,13 +1967,6 @@ int irdma_ctrl_init_hw(struct irdma_pci_f *rf)
break;
rf->init_state = CCQ_CREATED;
- dev->feature_info[IRDMA_FEATURE_FW_INFO] = IRDMA_FW_VER_DEFAULT;
- if (rf->rdma_ver != IRDMA_GEN_1) {
- status = irdma_get_rdma_features(dev);
- if (status)
- break;
- }
-
status = irdma_setup_ceq_0(rf);
if (status)
break;
@@ -1942,6 +1980,25 @@ int irdma_ctrl_init_hw(struct irdma_pci_f *rf)
}
INIT_WORK(&rf->cqp_cmpl_work, cqp_compl_worker);
irdma_sc_ccq_arm(dev->ccq);
+
+ status = irdma_setup_ceqs(rf, rf->iwdev ? rf->iwdev->vsi_num : 0);
+ if (status)
+ break;
+
+ rf->init_state = CEQS_CREATED;
+
+ status = irdma_hmc_init_pble(&rf->sc_dev,
+ rf->pble_rsrc);
+ if (status)
+ break;
+
+ rf->init_state = PBLE_CHUNK_MEM;
+
+ status = irdma_setup_aeq(rf);
+ if (status)
+ break;
+ rf->init_state = AEQ_CREATED;
+
return 0;
} while (0);
@@ -1960,7 +2017,8 @@ static void irdma_set_hw_rsrc(struct irdma_pci_f *rf)
rf->allocated_qps = (void *)(rf->mem_rsrc +
(sizeof(struct irdma_arp_entry) * rf->arp_table_size));
rf->allocated_cqs = &rf->allocated_qps[BITS_TO_LONGS(rf->max_qp)];
- rf->allocated_mrs = &rf->allocated_cqs[BITS_TO_LONGS(rf->max_cq)];
+ rf->allocated_srqs = &rf->allocated_cqs[BITS_TO_LONGS(rf->max_cq)];
+ rf->allocated_mrs = &rf->allocated_srqs[BITS_TO_LONGS(rf->max_srq)];
rf->allocated_pds = &rf->allocated_mrs[BITS_TO_LONGS(rf->max_mr)];
rf->allocated_ahs = &rf->allocated_pds[BITS_TO_LONGS(rf->max_pd)];
rf->allocated_mcgs = &rf->allocated_ahs[BITS_TO_LONGS(rf->max_ah)];
@@ -1988,12 +2046,14 @@ static u32 irdma_calc_mem_rsrc_size(struct irdma_pci_f *rf)
rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_qp);
rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_mr);
rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_cq);
+ rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_srq);
rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_pd);
rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->arp_table_size);
rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_ah);
rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_mcg);
rsrc_size += sizeof(struct irdma_qp **) * rf->max_qp;
rsrc_size += sizeof(struct irdma_cq **) * rf->max_cq;
+ rsrc_size += sizeof(struct irdma_srq **) * rf->max_srq;
return rsrc_size;
}
@@ -2021,6 +2081,7 @@ u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf)
rf->max_qp = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt;
rf->max_mr = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt;
rf->max_cq = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt;
+ rf->max_srq = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_SRQ].cnt;
rf->max_pd = rf->sc_dev.hw_attrs.max_hw_pds;
rf->arp_table_size = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].cnt;
rf->max_ah = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt;
@@ -2040,6 +2101,7 @@ u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf)
set_bit(0, rf->allocated_mrs);
set_bit(0, rf->allocated_qps);
set_bit(0, rf->allocated_cqs);
+ set_bit(0, rf->allocated_srqs);
set_bit(0, rf->allocated_pds);
set_bit(0, rf->allocated_arps);
set_bit(0, rf->allocated_ahs);
@@ -2100,15 +2162,16 @@ void irdma_cqp_ce_handler(struct irdma_pci_f *rf, struct irdma_sc_cq *cq)
cqp_request->compl_info.op_ret_val = info.op_ret_val;
cqp_request->compl_info.error = info.error;
- if (cqp_request->waiting) {
- WRITE_ONCE(cqp_request->request_done, true);
- wake_up(&cqp_request->waitq);
- irdma_put_cqp_request(&rf->cqp, cqp_request);
- } else {
- if (cqp_request->callback_fcn)
- cqp_request->callback_fcn(cqp_request);
- irdma_put_cqp_request(&rf->cqp, cqp_request);
- }
+ /*
+ * If this is deferred or pending completion, then mark
+ * CQP request as pending to not block the CQ, but don't
+ * release CQP request, as it is still on the OOO list.
+ */
+ if (info.pending)
+ cqp_request->pending = true;
+ else
+ irdma_complete_cqp_request(&rf->cqp,
+ cqp_request);
}
cqe_count++;
@@ -2718,7 +2781,9 @@ void irdma_flush_wqes(struct irdma_qp *iwqp, u32 flush_mask)
struct irdma_pci_f *rf = iwqp->iwdev->rf;
u8 flush_code = iwqp->sc_qp.flush_code;
- if (!(flush_mask & IRDMA_FLUSH_SQ) && !(flush_mask & IRDMA_FLUSH_RQ))
+ if ((!(flush_mask & IRDMA_FLUSH_SQ) &&
+ !(flush_mask & IRDMA_FLUSH_RQ)) ||
+ ((flush_mask & IRDMA_REFLUSH) && rf->rdma_ver >= IRDMA_GEN_3))
return;
/* Set flush info fields*/
@@ -2731,6 +2796,10 @@ void irdma_flush_wqes(struct irdma_qp *iwqp, u32 flush_mask)
info.rq_major_code = IRDMA_FLUSH_MAJOR_ERR;
info.rq_minor_code = FLUSH_GENERAL_ERR;
info.userflushcode = true;
+ info.err_sq_idx_valid = iwqp->sc_qp.err_sq_idx_valid;
+ info.err_sq_idx = iwqp->sc_qp.err_sq_idx;
+ info.err_rq_idx_valid = iwqp->sc_qp.err_rq_idx_valid;
+ info.err_rq_idx = iwqp->sc_qp.err_rq_idx;
if (flush_mask & IRDMA_REFLUSH) {
if (info.sq)
diff --git a/drivers/infiniband/hw/irdma/i40iw_hw.c b/drivers/infiniband/hw/irdma/i40iw_hw.c
index ce61a27cb1f6..60c1f2b1811d 100644
--- a/drivers/infiniband/hw/irdma/i40iw_hw.c
+++ b/drivers/infiniband/hw/irdma/i40iw_hw.c
@@ -85,6 +85,7 @@ static u64 i40iw_masks[IRDMA_MAX_MASKS] = {
I40E_CQPSQ_CQ_CEQID,
I40E_CQPSQ_CQ_CQID,
I40E_COMMIT_FPM_CQCNT,
+ I40E_CQPSQ_UPESD_HMCFNID,
};
static u64 i40iw_shifts[IRDMA_MAX_SHIFTS] = {
@@ -94,6 +95,7 @@ static u64 i40iw_shifts[IRDMA_MAX_SHIFTS] = {
I40E_CQPSQ_CQ_CEQID_S,
I40E_CQPSQ_CQ_CQID_S,
I40E_COMMIT_FPM_CQCNT_S,
+ I40E_CQPSQ_UPESD_HMCFNID_S,
};
/**
diff --git a/drivers/infiniband/hw/irdma/i40iw_hw.h b/drivers/infiniband/hw/irdma/i40iw_hw.h
index e1db84d8a62c..0095b327afcc 100644
--- a/drivers/infiniband/hw/irdma/i40iw_hw.h
+++ b/drivers/infiniband/hw/irdma/i40iw_hw.h
@@ -123,6 +123,8 @@
#define I40E_CQPSQ_CQ_CQID GENMASK_ULL(15, 0)
#define I40E_COMMIT_FPM_CQCNT_S 0
#define I40E_COMMIT_FPM_CQCNT GENMASK_ULL(17, 0)
+#define I40E_CQPSQ_UPESD_HMCFNID_S 0
+#define I40E_CQPSQ_UPESD_HMCFNID GENMASK_ULL(5, 0)
#define I40E_VSIQF_CTL(_VSI) (0x0020D800 + ((_VSI) * 4))
diff --git a/drivers/infiniband/hw/irdma/i40iw_if.c b/drivers/infiniband/hw/irdma/i40iw_if.c
index cc50a7070371..15e036ddaffb 100644
--- a/drivers/infiniband/hw/irdma/i40iw_if.c
+++ b/drivers/infiniband/hw/irdma/i40iw_if.c
@@ -75,6 +75,9 @@ static void i40iw_fill_device_info(struct irdma_device *iwdev, struct i40e_info
struct irdma_pci_f *rf = iwdev->rf;
rf->rdma_ver = IRDMA_GEN_1;
+ rf->sc_dev.hw = &rf->hw;
+ rf->sc_dev.hw_attrs.uk_attrs.hw_rev = IRDMA_GEN_1;
+ rf->sc_dev.privileged = true;
rf->gen_ops.request_reset = i40iw_request_reset;
rf->pcidev = cdev_info->pcidev;
rf->pf_id = cdev_info->fid;
diff --git a/drivers/infiniband/hw/irdma/icrdma_hw.c b/drivers/infiniband/hw/irdma/icrdma_hw.c
index 941d3edffadb..32f26284a788 100644
--- a/drivers/infiniband/hw/irdma/icrdma_hw.c
+++ b/drivers/infiniband/hw/irdma/icrdma_hw.c
@@ -38,6 +38,7 @@ static u64 icrdma_masks[IRDMA_MAX_MASKS] = {
ICRDMA_CQPSQ_CQ_CEQID,
ICRDMA_CQPSQ_CQ_CQID,
ICRDMA_COMMIT_FPM_CQCNT,
+ ICRDMA_CQPSQ_UPESD_HMCFNID,
};
static u64 icrdma_shifts[IRDMA_MAX_SHIFTS] = {
@@ -47,6 +48,7 @@ static u64 icrdma_shifts[IRDMA_MAX_SHIFTS] = {
ICRDMA_CQPSQ_CQ_CEQID_S,
ICRDMA_CQPSQ_CQ_CQID_S,
ICRDMA_COMMIT_FPM_CQCNT_S,
+ ICRDMA_CQPSQ_UPESD_HMCFNID_S,
};
/**
@@ -194,6 +196,7 @@ void icrdma_init_hw(struct irdma_sc_dev *dev)
dev->hw_attrs.max_hw_ord = ICRDMA_MAX_ORD_SIZE;
dev->hw_attrs.max_stat_inst = ICRDMA_MAX_STATS_COUNT;
dev->hw_attrs.max_stat_idx = IRDMA_HW_STAT_INDEX_MAX_GEN_2;
+ dev->hw_attrs.max_hw_device_pages = ICRDMA_MAX_PUSH_PAGE_COUNT;
dev->hw_attrs.uk_attrs.min_hw_wq_size = ICRDMA_MIN_WQ_SIZE;
dev->hw_attrs.uk_attrs.max_hw_sq_chunk = IRDMA_MAX_QUANTA_PER_WR;
diff --git a/drivers/infiniband/hw/irdma/icrdma_hw.h b/drivers/infiniband/hw/irdma/icrdma_hw.h
index 697b9572b5c6..d97944ab45da 100644
--- a/drivers/infiniband/hw/irdma/icrdma_hw.h
+++ b/drivers/infiniband/hw/irdma/icrdma_hw.h
@@ -58,14 +58,15 @@
#define ICRDMA_CQPSQ_CQ_CQID GENMASK_ULL(18, 0)
#define ICRDMA_COMMIT_FPM_CQCNT_S 0
#define ICRDMA_COMMIT_FPM_CQCNT GENMASK_ULL(19, 0)
-
+#define ICRDMA_CQPSQ_UPESD_HMCFNID_S 0
+#define ICRDMA_CQPSQ_UPESD_HMCFNID GENMASK_ULL(5, 0)
enum icrdma_device_caps_const {
ICRDMA_MAX_STATS_COUNT = 128,
ICRDMA_MAX_IRD_SIZE = 127,
ICRDMA_MAX_ORD_SIZE = 255,
ICRDMA_MIN_WQ_SIZE = 8 /* WQEs */,
-
+ ICRDMA_MAX_PUSH_PAGE_COUNT = 256,
};
void icrdma_init_hw(struct irdma_sc_dev *dev);
diff --git a/drivers/infiniband/hw/irdma/icrdma_if.c b/drivers/infiniband/hw/irdma/icrdma_if.c
new file mode 100644
index 000000000000..27b191f61caf
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/icrdma_if.c
@@ -0,0 +1,343 @@
+// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+/* Copyright (c) 2015 - 2024 Intel Corporation */
+
+#include "main.h"
+#include <linux/net/intel/iidc_rdma_ice.h>
+
+static void icrdma_prep_tc_change(struct irdma_device *iwdev)
+{
+ iwdev->vsi.tc_change_pending = true;
+ irdma_sc_suspend_resume_qps(&iwdev->vsi, IRDMA_OP_SUSPEND);
+
+ /* Wait for all qp's to suspend */
+ wait_event_timeout(iwdev->suspend_wq,
+ !atomic_read(&iwdev->vsi.qp_suspend_reqs),
+ msecs_to_jiffies(IRDMA_EVENT_TIMEOUT_MS));
+ irdma_ws_reset(&iwdev->vsi);
+}
+
+static void icrdma_fill_qos_info(struct irdma_l2params *l2params,
+ struct iidc_rdma_qos_params *qos_info)
+{
+ int i;
+
+ l2params->num_tc = qos_info->num_tc;
+ l2params->vsi_prio_type = qos_info->vport_priority_type;
+ l2params->vsi_rel_bw = qos_info->vport_relative_bw;
+ for (i = 0; i < l2params->num_tc; i++) {
+ l2params->tc_info[i].egress_virt_up =
+ qos_info->tc_info[i].egress_virt_up;
+ l2params->tc_info[i].ingress_virt_up =
+ qos_info->tc_info[i].ingress_virt_up;
+ l2params->tc_info[i].prio_type = qos_info->tc_info[i].prio_type;
+ l2params->tc_info[i].rel_bw = qos_info->tc_info[i].rel_bw;
+ l2params->tc_info[i].tc_ctx = qos_info->tc_info[i].tc_ctx;
+ }
+ for (i = 0; i < IIDC_MAX_USER_PRIORITY; i++)
+ l2params->up2tc[i] = qos_info->up2tc[i];
+ if (qos_info->pfc_mode == IIDC_DSCP_PFC_MODE) {
+ l2params->dscp_mode = true;
+ memcpy(l2params->dscp_map, qos_info->dscp_map, sizeof(l2params->dscp_map));
+ }
+}
+
+static void icrdma_iidc_event_handler(struct iidc_rdma_core_dev_info *cdev_info,
+ struct iidc_rdma_event *event)
+{
+ struct irdma_device *iwdev = dev_get_drvdata(&cdev_info->adev->dev);
+ struct irdma_l2params l2params = {};
+
+ if (*event->type & BIT(IIDC_RDMA_EVENT_AFTER_MTU_CHANGE)) {
+ ibdev_dbg(&iwdev->ibdev, "CLNT: new MTU = %d\n", iwdev->netdev->mtu);
+ if (iwdev->vsi.mtu != iwdev->netdev->mtu) {
+ l2params.mtu = iwdev->netdev->mtu;
+ l2params.mtu_changed = true;
+ irdma_log_invalid_mtu(l2params.mtu, &iwdev->rf->sc_dev);
+ irdma_change_l2params(&iwdev->vsi, &l2params);
+ }
+ } else if (*event->type & BIT(IIDC_RDMA_EVENT_BEFORE_TC_CHANGE)) {
+ if (iwdev->vsi.tc_change_pending)
+ return;
+
+ icrdma_prep_tc_change(iwdev);
+ } else if (*event->type & BIT(IIDC_RDMA_EVENT_AFTER_TC_CHANGE)) {
+ struct iidc_rdma_priv_dev_info *idc_priv = cdev_info->iidc_priv;
+
+ if (!iwdev->vsi.tc_change_pending)
+ return;
+
+ l2params.tc_changed = true;
+ ibdev_dbg(&iwdev->ibdev, "CLNT: TC Change\n");
+
+ icrdma_fill_qos_info(&l2params, &idc_priv->qos_info);
+ if (iwdev->rf->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY)
+ iwdev->dcb_vlan_mode =
+ l2params.num_tc > 1 && !l2params.dscp_mode;
+ irdma_change_l2params(&iwdev->vsi, &l2params);
+ } else if (*event->type & BIT(IIDC_RDMA_EVENT_CRIT_ERR)) {
+ ibdev_warn(&iwdev->ibdev, "ICE OICR event notification: oicr = 0x%08x\n",
+ event->reg);
+ if (event->reg & IRDMAPFINT_OICR_PE_CRITERR_M) {
+ u32 pe_criterr;
+
+ pe_criterr = readl(iwdev->rf->sc_dev.hw_regs[IRDMA_GLPE_CRITERR]);
+#define IRDMA_Q1_RESOURCE_ERR 0x0001024d
+ if (pe_criterr != IRDMA_Q1_RESOURCE_ERR) {
+ ibdev_err(&iwdev->ibdev, "critical PE Error, GLPE_CRITERR=0x%08x\n",
+ pe_criterr);
+ iwdev->rf->reset = true;
+ } else {
+ ibdev_warn(&iwdev->ibdev, "Q1 Resource Check\n");
+ }
+ }
+ if (event->reg & IRDMAPFINT_OICR_HMC_ERR_M) {
+ ibdev_err(&iwdev->ibdev, "HMC Error\n");
+ iwdev->rf->reset = true;
+ }
+ if (event->reg & IRDMAPFINT_OICR_PE_PUSH_M) {
+ ibdev_err(&iwdev->ibdev, "PE Push Error\n");
+ iwdev->rf->reset = true;
+ }
+ if (iwdev->rf->reset)
+ iwdev->rf->gen_ops.request_reset(iwdev->rf);
+ }
+}
+
+/**
+ * icrdma_lan_register_qset - Register qset with LAN driver
+ * @vsi: vsi structure
+ * @tc_node: Traffic class node
+ */
+static int icrdma_lan_register_qset(struct irdma_sc_vsi *vsi,
+ struct irdma_ws_node *tc_node)
+{
+ struct irdma_device *iwdev = vsi->back_vsi;
+ struct iidc_rdma_core_dev_info *cdev_info = iwdev->rf->cdev;
+ struct iidc_rdma_qset_params qset = {};
+ int ret;
+
+ qset.qs_handle = tc_node->qs_handle;
+ qset.tc = tc_node->traffic_class;
+ qset.vport_id = vsi->vsi_idx;
+ ret = ice_add_rdma_qset(cdev_info, &qset);
+ if (ret) {
+ ibdev_dbg(&iwdev->ibdev, "WS: LAN alloc_res for rdma qset failed.\n");
+ return ret;
+ }
+
+ tc_node->l2_sched_node_id = qset.teid;
+ vsi->qos[tc_node->user_pri].l2_sched_node_id = qset.teid;
+
+ return 0;
+}
+
+/**
+ * icrdma_lan_unregister_qset - Unregister qset with LAN driver
+ * @vsi: vsi structure
+ * @tc_node: Traffic class node
+ */
+static void icrdma_lan_unregister_qset(struct irdma_sc_vsi *vsi,
+ struct irdma_ws_node *tc_node)
+{
+ struct irdma_device *iwdev = vsi->back_vsi;
+ struct iidc_rdma_core_dev_info *cdev_info = iwdev->rf->cdev;
+ struct iidc_rdma_qset_params qset = {};
+
+ qset.qs_handle = tc_node->qs_handle;
+ qset.tc = tc_node->traffic_class;
+ qset.vport_id = vsi->vsi_idx;
+ qset.teid = tc_node->l2_sched_node_id;
+
+ if (ice_del_rdma_qset(cdev_info, &qset))
+ ibdev_dbg(&iwdev->ibdev, "WS: LAN free_res for rdma qset failed.\n");
+}
+
+/**
+ * icrdma_request_reset - Request a reset
+ * @rf: RDMA PCI function
+ */
+static void icrdma_request_reset(struct irdma_pci_f *rf)
+{
+ ibdev_warn(&rf->iwdev->ibdev, "Requesting a reset\n");
+ ice_rdma_request_reset(rf->cdev, IIDC_FUNC_RESET);
+}
+
+static int icrdma_init_interrupts(struct irdma_pci_f *rf, struct iidc_rdma_core_dev_info *cdev)
+{
+ int i;
+
+ rf->msix_count = num_online_cpus() + IRDMA_NUM_AEQ_MSIX;
+ rf->msix_entries = kcalloc(rf->msix_count, sizeof(*rf->msix_entries),
+ GFP_KERNEL);
+ if (!rf->msix_entries)
+ return -ENOMEM;
+
+ for (i = 0; i < rf->msix_count; i++)
+ if (ice_alloc_rdma_qvector(cdev, &rf->msix_entries[i]))
+ break;
+
+ if (i < IRDMA_MIN_MSIX) {
+ while (--i >= 0)
+ ice_free_rdma_qvector(cdev, &rf->msix_entries[i]);
+
+ kfree(rf->msix_entries);
+ return -ENOMEM;
+ }
+
+ rf->msix_count = i;
+
+ return 0;
+}
+
+static void icrdma_deinit_interrupts(struct irdma_pci_f *rf, struct iidc_rdma_core_dev_info *cdev)
+{
+ int i;
+
+ for (i = 0; i < rf->msix_count; i++)
+ ice_free_rdma_qvector(cdev, &rf->msix_entries[i]);
+
+ kfree(rf->msix_entries);
+}
+
+static void icrdma_fill_device_info(struct irdma_device *iwdev,
+ struct iidc_rdma_core_dev_info *cdev_info)
+{
+ struct iidc_rdma_priv_dev_info *idc_priv = cdev_info->iidc_priv;
+ struct irdma_pci_f *rf = iwdev->rf;
+
+ rf->sc_dev.hw = &rf->hw;
+ rf->iwdev = iwdev;
+ rf->cdev = cdev_info;
+ rf->hw.hw_addr = idc_priv->hw_addr;
+ rf->pcidev = cdev_info->pdev;
+ rf->hw.device = &rf->pcidev->dev;
+ rf->pf_id = idc_priv->pf_id;
+ rf->rdma_ver = IRDMA_GEN_2;
+ rf->sc_dev.hw_attrs.uk_attrs.hw_rev = IRDMA_GEN_2;
+ rf->sc_dev.is_pf = true;
+ rf->sc_dev.privileged = true;
+
+ rf->gen_ops.register_qset = icrdma_lan_register_qset;
+ rf->gen_ops.unregister_qset = icrdma_lan_unregister_qset;
+
+ rf->default_vsi.vsi_idx = idc_priv->vport_id;
+ rf->protocol_used =
+ cdev_info->rdma_protocol == IIDC_RDMA_PROTOCOL_ROCEV2 ?
+ IRDMA_ROCE_PROTOCOL_ONLY : IRDMA_IWARP_PROTOCOL_ONLY;
+ rf->rsrc_profile = IRDMA_HMC_PROFILE_DEFAULT;
+ rf->rst_to = IRDMA_RST_TIMEOUT_HZ;
+ rf->gen_ops.request_reset = icrdma_request_reset;
+ rf->limits_sel = 7;
+ mutex_init(&rf->ah_tbl_lock);
+
+ iwdev->netdev = idc_priv->netdev;
+ iwdev->vsi_num = idc_priv->vport_id;
+ iwdev->init_state = INITIAL_STATE;
+ iwdev->roce_cwnd = IRDMA_ROCE_CWND_DEFAULT;
+ iwdev->roce_ackcreds = IRDMA_ROCE_ACKCREDS_DEFAULT;
+ iwdev->rcv_wnd = IRDMA_CM_DEFAULT_RCV_WND_SCALED;
+ iwdev->rcv_wscale = IRDMA_CM_DEFAULT_RCV_WND_SCALE;
+ if (iwdev->rf->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY)
+ iwdev->roce_mode = true;
+}
+
+static int icrdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_device_id *id)
+{
+ struct iidc_rdma_core_auxiliary_dev *iidc_adev;
+ struct iidc_rdma_core_dev_info *cdev_info;
+ struct iidc_rdma_priv_dev_info *idc_priv;
+ struct irdma_l2params l2params = {};
+ struct irdma_device *iwdev;
+ struct irdma_pci_f *rf;
+ int err;
+
+ iidc_adev = container_of(aux_dev, struct iidc_rdma_core_auxiliary_dev, adev);
+ cdev_info = iidc_adev->cdev_info;
+ idc_priv = cdev_info->iidc_priv;
+
+ iwdev = ib_alloc_device(irdma_device, ibdev);
+ if (!iwdev)
+ return -ENOMEM;
+ iwdev->rf = kzalloc(sizeof(*rf), GFP_KERNEL);
+ if (!iwdev->rf) {
+ ib_dealloc_device(&iwdev->ibdev);
+ return -ENOMEM;
+ }
+
+ icrdma_fill_device_info(iwdev, cdev_info);
+ rf = iwdev->rf;
+
+ err = icrdma_init_interrupts(rf, cdev_info);
+ if (err)
+ goto err_init_interrupts;
+
+ err = irdma_ctrl_init_hw(rf);
+ if (err)
+ goto err_ctrl_init;
+
+ l2params.mtu = iwdev->netdev->mtu;
+ icrdma_fill_qos_info(&l2params, &idc_priv->qos_info);
+ if (iwdev->rf->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY)
+ iwdev->dcb_vlan_mode = l2params.num_tc > 1 && !l2params.dscp_mode;
+
+ err = irdma_rt_init_hw(iwdev, &l2params);
+ if (err)
+ goto err_rt_init;
+
+ err = irdma_ib_register_device(iwdev);
+ if (err)
+ goto err_ibreg;
+
+ ice_rdma_update_vsi_filter(cdev_info, iwdev->vsi_num, true);
+
+ ibdev_dbg(&iwdev->ibdev, "INIT: Gen2 PF[%d] device probe success\n", PCI_FUNC(rf->pcidev->devfn));
+ auxiliary_set_drvdata(aux_dev, iwdev);
+
+ return 0;
+
+err_ibreg:
+ irdma_rt_deinit_hw(iwdev);
+err_rt_init:
+ irdma_ctrl_deinit_hw(rf);
+err_ctrl_init:
+ icrdma_deinit_interrupts(rf, cdev_info);
+err_init_interrupts:
+ kfree(iwdev->rf);
+ ib_dealloc_device(&iwdev->ibdev);
+
+ return err;
+}
+
+static void icrdma_remove(struct auxiliary_device *aux_dev)
+{
+ struct iidc_rdma_core_auxiliary_dev *idc_adev =
+ container_of(aux_dev, struct iidc_rdma_core_auxiliary_dev, adev);
+ struct iidc_rdma_core_dev_info *cdev_info = idc_adev->cdev_info;
+ struct irdma_device *iwdev = auxiliary_get_drvdata(aux_dev);
+ u8 rdma_ver = iwdev->rf->rdma_ver;
+
+ ice_rdma_update_vsi_filter(cdev_info, iwdev->vsi_num, false);
+ irdma_ib_unregister_device(iwdev);
+ icrdma_deinit_interrupts(iwdev->rf, cdev_info);
+
+ pr_debug("INIT: Gen[%d] func[%d] device remove success\n",
+ rdma_ver, PCI_FUNC(cdev_info->pdev->devfn));
+}
+
+static const struct auxiliary_device_id icrdma_auxiliary_id_table[] = {
+ {.name = "ice.iwarp", },
+ {.name = "ice.roce", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(auxiliary, icrdma_auxiliary_id_table);
+
+struct iidc_rdma_core_auxiliary_drv icrdma_core_auxiliary_drv = {
+ .adrv = {
+ .name = "gen_2",
+ .id_table = icrdma_auxiliary_id_table,
+ .probe = icrdma_probe,
+ .remove = icrdma_remove,
+ },
+ .event_handler = icrdma_iidc_event_handler,
+};
diff --git a/drivers/infiniband/hw/irdma/ig3rdma_hw.c b/drivers/infiniband/hw/irdma/ig3rdma_hw.c
new file mode 100644
index 000000000000..2e8bb475e22a
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/ig3rdma_hw.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+/* Copyright (c) 2018 - 2024 Intel Corporation */
+#include "osdep.h"
+#include "type.h"
+#include "protos.h"
+#include "ig3rdma_hw.h"
+
+/**
+ * ig3rdma_ena_irq - Enable interrupt
+ * @dev: pointer to the device structure
+ * @idx: vector index
+ */
+static void ig3rdma_ena_irq(struct irdma_sc_dev *dev, u32 idx)
+{
+ u32 val;
+ u32 int_stride = 1; /* one u32 per register */
+
+ if (dev->is_pf)
+ int_stride = 0x400;
+ else
+ idx--; /* VFs use DYN_CTL_N */
+
+ val = FIELD_PREP(IRDMA_GLINT_DYN_CTL_INTENA, 1) |
+ FIELD_PREP(IRDMA_GLINT_DYN_CTL_CLEARPBA, 1);
+
+ writel(val, dev->hw_regs[IRDMA_GLINT_DYN_CTL] + (idx * int_stride));
+}
+
+/**
+ * ig3rdma_disable_irq - Disable interrupt
+ * @dev: pointer to the device structure
+ * @idx: vector index
+ */
+static void ig3rdma_disable_irq(struct irdma_sc_dev *dev, u32 idx)
+{
+ u32 int_stride = 1; /* one u32 per register */
+
+ if (dev->is_pf)
+ int_stride = 0x400;
+ else
+ idx--; /* VFs use DYN_CTL_N */
+
+ writel(0, dev->hw_regs[IRDMA_GLINT_DYN_CTL] + (idx * int_stride));
+}
+
+static const struct irdma_irq_ops ig3rdma_irq_ops = {
+ .irdma_dis_irq = ig3rdma_disable_irq,
+ .irdma_en_irq = ig3rdma_ena_irq,
+};
+
+static const struct irdma_hw_stat_map ig3rdma_hw_stat_map[] = {
+ [IRDMA_HW_STAT_INDEX_RXVLANERR] = { 0, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP4RXOCTS] = { 8, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP4RXPKTS] = { 16, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP4RXDISCARD] = { 24, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP4RXTRUNC] = { 32, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP4RXFRAGS] = { 40, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP4RXMCOCTS] = { 48, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP4RXMCPKTS] = { 56, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP6RXOCTS] = { 64, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP6RXPKTS] = { 72, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP6RXDISCARD] = { 80, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP6RXTRUNC] = { 88, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP6RXFRAGS] = { 96, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP6RXMCOCTS] = { 104, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP6RXMCPKTS] = { 112, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP4TXOCTS] = { 120, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP4TXPKTS] = { 128, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP4TXFRAGS] = { 136, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP4TXMCOCTS] = { 144, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP4TXMCPKTS] = { 152, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP6TXOCTS] = { 160, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP6TXPKTS] = { 168, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP6TXFRAGS] = { 176, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP6TXMCOCTS] = { 184, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP6TXMCPKTS] = { 192, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP4TXNOROUTE] = { 200, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP6TXNOROUTE] = { 208, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_TCPRTXSEG] = { 216, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_TCPRXOPTERR] = { 224, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_TCPRXPROTOERR] = { 232, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_TCPTXSEG] = { 240, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_TCPRXSEGS] = { 248, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_UDPRXPKTS] = { 256, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_UDPTXPKTS] = { 264, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RDMARXWRS] = { 272, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RDMARXRDS] = { 280, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RDMARXSNDS] = { 288, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RDMATXWRS] = { 296, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RDMATXRDS] = { 304, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RDMATXSNDS] = { 312, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RDMAVBND] = { 320, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RDMAVINV] = { 328, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS] = { 336, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED] = { 344, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED] = { 352, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_TXNPCNPSENT] = { 360, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RNR_SENT] = { 368, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RNR_RCVD] = { 376, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RDMAORDLMTCNT] = { 384, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RDMAIRDLMTCNT] = { 392, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RDMARXATS] = { 408, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RDMATXATS] = { 416, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_NAKSEQERR] = { 424, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_NAKSEQERR_IMPLIED] = { 432, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RTO] = { 440, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RXOOOPKTS] = { 448, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_ICRCERR] = { 456, 0, 0 },
+};
+
+void ig3rdma_init_hw(struct irdma_sc_dev *dev)
+{
+ dev->irq_ops = &ig3rdma_irq_ops;
+ dev->hw_stats_map = ig3rdma_hw_stat_map;
+
+ dev->hw_attrs.uk_attrs.hw_rev = IRDMA_GEN_3;
+ dev->hw_attrs.uk_attrs.max_hw_wq_frags = IG3RDMA_MAX_WQ_FRAGMENT_COUNT;
+ dev->hw_attrs.uk_attrs.max_hw_read_sges = IG3RDMA_MAX_SGE_RD;
+ dev->hw_attrs.uk_attrs.max_hw_sq_chunk = IRDMA_MAX_QUANTA_PER_WR;
+ dev->hw_attrs.first_hw_vf_fpm_id = 0;
+ dev->hw_attrs.max_hw_vf_fpm_id = IG3_MAX_APFS + IG3_MAX_AVFS;
+ dev->hw_attrs.uk_attrs.feature_flags |= IRDMA_FEATURE_64_BYTE_CQE;
+ dev->hw_attrs.uk_attrs.feature_flags |= IRDMA_FEATURE_CQE_TIMESTAMPING;
+
+ dev->hw_attrs.uk_attrs.feature_flags |= IRDMA_FEATURE_SRQ;
+ dev->hw_attrs.uk_attrs.feature_flags |= IRDMA_FEATURE_RTS_AE |
+ IRDMA_FEATURE_CQ_RESIZE;
+ dev->hw_attrs.page_size_cap = SZ_4K | SZ_2M | SZ_1G;
+ dev->hw_attrs.max_hw_ird = IG3RDMA_MAX_IRD_SIZE;
+ dev->hw_attrs.max_hw_ord = IG3RDMA_MAX_ORD_SIZE;
+ dev->hw_attrs.max_stat_inst = IG3RDMA_MAX_STATS_COUNT;
+ dev->hw_attrs.max_stat_idx = IRDMA_HW_STAT_INDEX_MAX_GEN_3;
+ dev->hw_attrs.uk_attrs.min_hw_wq_size = IG3RDMA_MIN_WQ_SIZE;
+ dev->hw_attrs.uk_attrs.max_hw_srq_quanta = IRDMA_SRQ_MAX_QUANTA;
+ dev->hw_attrs.uk_attrs.max_hw_inline = IG3RDMA_MAX_INLINE_DATA_SIZE;
+ dev->hw_attrs.max_hw_device_pages =
+ dev->is_pf ? IG3RDMA_MAX_PF_PUSH_PAGE_COUNT : IG3RDMA_MAX_VF_PUSH_PAGE_COUNT;
+}
+
+static void __iomem *__ig3rdma_get_reg_addr(struct irdma_mmio_region *region, u64 reg_offset)
+{
+ if (reg_offset >= region->offset &&
+ reg_offset < (region->offset + region->len)) {
+ reg_offset -= region->offset;
+
+ return region->addr + reg_offset;
+ }
+
+ return NULL;
+}
+
+void __iomem *ig3rdma_get_reg_addr(struct irdma_hw *hw, u64 reg_offset)
+{
+ u8 __iomem *reg_addr;
+ int i;
+
+ reg_addr = __ig3rdma_get_reg_addr(&hw->rdma_reg, reg_offset);
+ if (reg_addr)
+ return reg_addr;
+
+ for (i = 0; i < hw->num_io_regions; i++) {
+ reg_addr = __ig3rdma_get_reg_addr(&hw->io_regs[i], reg_offset);
+ if (reg_addr)
+ return reg_addr;
+ }
+
+ WARN_ON_ONCE(1);
+
+ return NULL;
+}
diff --git a/drivers/infiniband/hw/irdma/ig3rdma_hw.h b/drivers/infiniband/hw/irdma/ig3rdma_hw.h
new file mode 100644
index 000000000000..03d5f1188789
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/ig3rdma_hw.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
+/* Copyright (c) 2021 - 2024 Intel Corporation */
+#ifndef IG3RDMA_HW_H
+#define IG3RDMA_HW_H
+
+#define IG3_MAX_APFS 1
+#define IG3_MAX_AVFS 0
+
+#define IG3_PF_RDMA_REGION_OFFSET 0xBC00000
+#define IG3_PF_RDMA_REGION_LEN 0x401000
+#define IG3_VF_RDMA_REGION_OFFSET 0x8C00
+#define IG3_VF_RDMA_REGION_LEN 0x8400
+
+enum ig3rdma_device_caps_const {
+ IG3RDMA_MAX_WQ_FRAGMENT_COUNT = 14,
+ IG3RDMA_MAX_SGE_RD = 14,
+
+ IG3RDMA_MAX_STATS_COUNT = 128,
+
+ IG3RDMA_MAX_IRD_SIZE = 64,
+ IG3RDMA_MAX_ORD_SIZE = 64,
+ IG3RDMA_MIN_WQ_SIZE = 16 /* WQEs */,
+ IG3RDMA_MAX_INLINE_DATA_SIZE = 216,
+ IG3RDMA_MAX_PF_PUSH_PAGE_COUNT = 8192,
+ IG3RDMA_MAX_VF_PUSH_PAGE_COUNT = 16,
+};
+
+void __iomem *ig3rdma_get_reg_addr(struct irdma_hw *hw, u64 reg_offset);
+int ig3rdma_vchnl_send_sync(struct irdma_sc_dev *dev, u8 *msg, u16 len,
+ u8 *recv_msg, u16 *recv_len);
+
+#endif /* IG3RDMA_HW_H*/
diff --git a/drivers/infiniband/hw/irdma/ig3rdma_if.c b/drivers/infiniband/hw/irdma/ig3rdma_if.c
new file mode 100644
index 000000000000..1bb42eb298ba
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/ig3rdma_if.c
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+/* Copyright (c) 2023 - 2024 Intel Corporation */
+
+#include "main.h"
+#include <linux/net/intel/iidc_rdma_idpf.h>
+#include "ig3rdma_hw.h"
+
+static void ig3rdma_idc_core_event_handler(struct iidc_rdma_core_dev_info *cdev_info,
+ struct iidc_rdma_event *event)
+{
+ struct irdma_pci_f *rf = auxiliary_get_drvdata(cdev_info->adev);
+
+ if (*event->type & BIT(IIDC_RDMA_EVENT_WARN_RESET)) {
+ rf->reset = true;
+ rf->sc_dev.vchnl_up = false;
+ }
+}
+
+int ig3rdma_vchnl_send_sync(struct irdma_sc_dev *dev, u8 *msg, u16 len,
+ u8 *recv_msg, u16 *recv_len)
+{
+ struct iidc_rdma_core_dev_info *cdev_info = dev_to_rf(dev)->cdev;
+ int ret;
+
+ ret = idpf_idc_rdma_vc_send_sync(cdev_info, msg, len, recv_msg,
+ recv_len);
+ if (ret == -ETIMEDOUT) {
+ ibdev_err(&(dev_to_rf(dev)->iwdev->ibdev),
+ "Virtual channel Req <-> Resp completion timeout\n");
+ dev->vchnl_up = false;
+ }
+
+ return ret;
+}
+
+static int ig3rdma_vchnl_init(struct irdma_pci_f *rf,
+ struct iidc_rdma_core_dev_info *cdev_info,
+ u8 *rdma_ver)
+{
+ struct iidc_rdma_priv_dev_info *idc_priv = cdev_info->iidc_priv;
+ struct irdma_vchnl_init_info virt_info;
+ u8 gen = rf->rdma_ver;
+ int ret;
+
+ rf->vchnl_wq = alloc_ordered_workqueue("irdma-virtchnl-wq", 0);
+ if (!rf->vchnl_wq)
+ return -ENOMEM;
+
+ mutex_init(&rf->sc_dev.vchnl_mutex);
+
+ virt_info.is_pf = !idc_priv->ftype;
+ virt_info.hw_rev = gen;
+ virt_info.privileged = gen == IRDMA_GEN_2;
+ virt_info.vchnl_wq = rf->vchnl_wq;
+ ret = irdma_sc_vchnl_init(&rf->sc_dev, &virt_info);
+ if (ret) {
+ destroy_workqueue(rf->vchnl_wq);
+ return ret;
+ }
+
+ *rdma_ver = rf->sc_dev.hw_attrs.uk_attrs.hw_rev;
+
+ return 0;
+}
+
+/**
+ * ig3rdma_request_reset - Request a reset
+ * @rf: RDMA PCI function
+ */
+static void ig3rdma_request_reset(struct irdma_pci_f *rf)
+{
+ ibdev_warn(&rf->iwdev->ibdev, "Requesting a reset\n");
+ idpf_idc_request_reset(rf->cdev, IIDC_FUNC_RESET);
+}
+
+static int ig3rdma_cfg_regions(struct irdma_hw *hw,
+ struct iidc_rdma_core_dev_info *cdev_info)
+{
+ struct iidc_rdma_priv_dev_info *idc_priv = cdev_info->iidc_priv;
+ struct pci_dev *pdev = cdev_info->pdev;
+ int i;
+
+ switch (idc_priv->ftype) {
+ case IIDC_FUNCTION_TYPE_PF:
+ hw->rdma_reg.len = IG3_PF_RDMA_REGION_LEN;
+ hw->rdma_reg.offset = IG3_PF_RDMA_REGION_OFFSET;
+ break;
+ case IIDC_FUNCTION_TYPE_VF:
+ hw->rdma_reg.len = IG3_VF_RDMA_REGION_LEN;
+ hw->rdma_reg.offset = IG3_VF_RDMA_REGION_OFFSET;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ hw->rdma_reg.addr = ioremap(pci_resource_start(pdev, 0) + hw->rdma_reg.offset,
+ hw->rdma_reg.len);
+
+ if (!hw->rdma_reg.addr)
+ return -ENOMEM;
+
+ hw->num_io_regions = le16_to_cpu(idc_priv->num_memory_regions);
+ hw->io_regs = kcalloc(hw->num_io_regions,
+ sizeof(struct irdma_mmio_region), GFP_KERNEL);
+
+ if (!hw->io_regs) {
+ iounmap(hw->rdma_reg.addr);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < hw->num_io_regions; i++) {
+ hw->io_regs[i].addr =
+ idc_priv->mapped_mem_regions[i].region_addr;
+ hw->io_regs[i].len =
+ le64_to_cpu(idc_priv->mapped_mem_regions[i].size);
+ hw->io_regs[i].offset =
+ le64_to_cpu(idc_priv->mapped_mem_regions[i].start_offset);
+ }
+
+ return 0;
+}
+
+static void ig3rdma_decfg_rf(struct irdma_pci_f *rf)
+{
+ struct irdma_hw *hw = &rf->hw;
+
+ destroy_workqueue(rf->vchnl_wq);
+ kfree(hw->io_regs);
+ iounmap(hw->rdma_reg.addr);
+}
+
+static int ig3rdma_cfg_rf(struct irdma_pci_f *rf,
+ struct iidc_rdma_core_dev_info *cdev_info)
+{
+ struct iidc_rdma_priv_dev_info *idc_priv = cdev_info->iidc_priv;
+ int err;
+
+ rf->sc_dev.hw = &rf->hw;
+ rf->cdev = cdev_info;
+ rf->pcidev = cdev_info->pdev;
+ rf->hw.device = &rf->pcidev->dev;
+ rf->msix_count = idc_priv->msix_count;
+ rf->msix_entries = idc_priv->msix_entries;
+
+ err = ig3rdma_vchnl_init(rf, cdev_info, &rf->rdma_ver);
+ if (err)
+ return err;
+
+ err = ig3rdma_cfg_regions(&rf->hw, cdev_info);
+ if (err) {
+ destroy_workqueue(rf->vchnl_wq);
+ return err;
+ }
+
+ rf->protocol_used = IRDMA_ROCE_PROTOCOL_ONLY;
+ rf->rsrc_profile = IRDMA_HMC_PROFILE_DEFAULT;
+ rf->rst_to = IRDMA_RST_TIMEOUT_HZ;
+ rf->gen_ops.request_reset = ig3rdma_request_reset;
+ rf->limits_sel = 7;
+ mutex_init(&rf->ah_tbl_lock);
+
+ return 0;
+}
+
+static int ig3rdma_core_probe(struct auxiliary_device *aux_dev,
+ const struct auxiliary_device_id *id)
+{
+ struct iidc_rdma_core_auxiliary_dev *idc_adev =
+ container_of(aux_dev, struct iidc_rdma_core_auxiliary_dev, adev);
+ struct iidc_rdma_core_dev_info *cdev_info = idc_adev->cdev_info;
+ struct irdma_pci_f *rf;
+ int err;
+
+ rf = kzalloc(sizeof(*rf), GFP_KERNEL);
+ if (!rf)
+ return -ENOMEM;
+
+ err = ig3rdma_cfg_rf(rf, cdev_info);
+ if (err)
+ goto err_cfg_rf;
+
+ err = irdma_ctrl_init_hw(rf);
+ if (err)
+ goto err_ctrl_init;
+
+ auxiliary_set_drvdata(aux_dev, rf);
+
+ err = idpf_idc_vport_dev_ctrl(cdev_info, true);
+ if (err)
+ goto err_vport_ctrl;
+
+ return 0;
+
+err_vport_ctrl:
+ irdma_ctrl_deinit_hw(rf);
+err_ctrl_init:
+ ig3rdma_decfg_rf(rf);
+err_cfg_rf:
+ kfree(rf);
+
+ return err;
+}
+
+static void ig3rdma_core_remove(struct auxiliary_device *aux_dev)
+{
+ struct iidc_rdma_core_auxiliary_dev *idc_adev =
+ container_of(aux_dev, struct iidc_rdma_core_auxiliary_dev, adev);
+ struct iidc_rdma_core_dev_info *cdev_info = idc_adev->cdev_info;
+ struct irdma_pci_f *rf = auxiliary_get_drvdata(aux_dev);
+
+ idpf_idc_vport_dev_ctrl(cdev_info, false);
+ irdma_ctrl_deinit_hw(rf);
+ ig3rdma_decfg_rf(rf);
+ kfree(rf);
+}
+
+static const struct auxiliary_device_id ig3rdma_core_auxiliary_id_table[] = {
+ {.name = "idpf.8086.rdma.core", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(auxiliary, ig3rdma_core_auxiliary_id_table);
+
+struct iidc_rdma_core_auxiliary_drv ig3rdma_core_auxiliary_drv = {
+ .adrv = {
+ .name = "core",
+ .id_table = ig3rdma_core_auxiliary_id_table,
+ .probe = ig3rdma_core_probe,
+ .remove = ig3rdma_core_remove,
+ },
+ .event_handler = ig3rdma_idc_core_event_handler,
+};
diff --git a/drivers/infiniband/hw/irdma/irdma.h b/drivers/infiniband/hw/irdma/irdma.h
index 20d2e7393e3d..ff938a01d70c 100644
--- a/drivers/infiniband/hw/irdma/irdma.h
+++ b/drivers/infiniband/hw/irdma/irdma.h
@@ -32,7 +32,16 @@
#define IRDMA_PFHMC_SDDATALOW_PMSDDATALOW GENMASK(31, 12)
#define IRDMA_PFHMC_SDCMD_PMSDWR BIT(31)
-#define IRDMA_INVALID_CQ_IDX 0xffffffff
+#define IRDMA_INVALID_CQ_IDX 0xffffffff
+#define IRDMA_Q_INVALID_IDX 0xffff
+
+enum irdma_dyn_idx_t {
+ IRDMA_IDX_ITR0 = 0,
+ IRDMA_IDX_ITR1 = 1,
+ IRDMA_IDX_ITR2 = 2,
+ IRDMA_IDX_NOITR = 3,
+};
+
enum irdma_registers {
IRDMA_CQPTAIL,
IRDMA_CQPDB,
@@ -67,6 +76,7 @@ enum irdma_shifts {
IRDMA_CQPSQ_CQ_CEQID_S,
IRDMA_CQPSQ_CQ_CQID_S,
IRDMA_COMMIT_FPM_CQCNT_S,
+ IRDMA_CQPSQ_UPESD_HMCFNID_S,
IRDMA_MAX_SHIFTS,
};
@@ -77,6 +87,7 @@ enum irdma_masks {
IRDMA_CQPSQ_CQ_CEQID_M,
IRDMA_CQPSQ_CQ_CQID_M,
IRDMA_COMMIT_FPM_CQCNT_M,
+ IRDMA_CQPSQ_UPESD_HMCFNID_M,
IRDMA_MAX_MASKS, /* Must be last entry */
};
@@ -92,7 +103,7 @@ struct irdma_mcast_grp_ctx_entry_info {
struct irdma_mcast_grp_info {
u8 dest_mac_addr[ETH_ALEN];
u16 vlan_id;
- u8 hmc_fcn_id;
+ u16 hmc_fcn_id;
bool ipv4_valid:1;
bool vlan_valid:1;
u16 mg_id;
@@ -107,6 +118,9 @@ enum irdma_vers {
IRDMA_GEN_RSVD,
IRDMA_GEN_1,
IRDMA_GEN_2,
+ IRDMA_GEN_3,
+ IRDMA_GEN_NEXT,
+ IRDMA_GEN_MAX = IRDMA_GEN_NEXT-1
};
struct irdma_uk_attrs {
@@ -118,6 +132,7 @@ struct irdma_uk_attrs {
u32 max_hw_wq_quanta;
u32 min_hw_cq_size;
u32 max_hw_cq_size;
+ u32 max_hw_srq_quanta;
u16 max_hw_sq_chunk;
u16 min_hw_wq_size;
u8 hw_rev;
@@ -147,10 +162,13 @@ struct irdma_hw_attrs {
u32 max_done_count;
u32 max_sleep_count;
u32 max_cqp_compl_wait_time_ms;
+ u32 min_hw_srq_id;
u16 max_stat_inst;
u16 max_stat_idx;
};
void i40iw_init_hw(struct irdma_sc_dev *dev);
void icrdma_init_hw(struct irdma_sc_dev *dev);
+void ig3rdma_init_hw(struct irdma_sc_dev *dev);
+void __iomem *ig3rdma_get_reg_addr(struct irdma_hw *hw, u64 reg_offset);
#endif /* IRDMA_H*/
diff --git a/drivers/infiniband/hw/irdma/main.c b/drivers/infiniband/hw/irdma/main.c
index 1e840bbd619d..95957d52883d 100644
--- a/drivers/infiniband/hw/irdma/main.c
+++ b/drivers/infiniband/hw/irdma/main.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2015 - 2021 Intel Corporation */
#include "main.h"
+#include <linux/net/intel/iidc_rdma_idpf.h>
MODULE_ALIAS("i40iw");
MODULE_DESCRIPTION("Intel(R) Ethernet Protocol Driver for RDMA");
@@ -38,19 +39,7 @@ static void irdma_unregister_notifiers(void)
unregister_netdevice_notifier(&irdma_netdevice_notifier);
}
-static void irdma_prep_tc_change(struct irdma_device *iwdev)
-{
- iwdev->vsi.tc_change_pending = true;
- irdma_sc_suspend_resume_qps(&iwdev->vsi, IRDMA_OP_SUSPEND);
-
- /* Wait for all qp's to suspend */
- wait_event_timeout(iwdev->suspend_wq,
- !atomic_read(&iwdev->vsi.qp_suspend_reqs),
- msecs_to_jiffies(IRDMA_EVENT_TIMEOUT_MS));
- irdma_ws_reset(&iwdev->vsi);
-}
-
-static void irdma_log_invalid_mtu(u16 mtu, struct irdma_sc_dev *dev)
+void irdma_log_invalid_mtu(u16 mtu, struct irdma_sc_dev *dev)
{
if (mtu < IRDMA_MIN_MTU_IPV4)
ibdev_warn(to_ibdev(dev), "MTU setting [%d] too low for RDMA traffic. Minimum MTU is 576 for IPv4\n", mtu);
@@ -58,35 +47,10 @@ static void irdma_log_invalid_mtu(u16 mtu, struct irdma_sc_dev *dev)
ibdev_warn(to_ibdev(dev), "MTU setting [%d] too low for RDMA traffic. Minimum MTU is 1280 for IPv6\\n", mtu);
}
-static void irdma_fill_qos_info(struct irdma_l2params *l2params,
- struct iidc_rdma_qos_params *qos_info)
+static void ig3rdma_idc_vport_event_handler(struct iidc_rdma_vport_dev_info *cdev_info,
+ struct iidc_rdma_event *event)
{
- int i;
-
- l2params->num_tc = qos_info->num_tc;
- l2params->vsi_prio_type = qos_info->vport_priority_type;
- l2params->vsi_rel_bw = qos_info->vport_relative_bw;
- for (i = 0; i < l2params->num_tc; i++) {
- l2params->tc_info[i].egress_virt_up =
- qos_info->tc_info[i].egress_virt_up;
- l2params->tc_info[i].ingress_virt_up =
- qos_info->tc_info[i].ingress_virt_up;
- l2params->tc_info[i].prio_type = qos_info->tc_info[i].prio_type;
- l2params->tc_info[i].rel_bw = qos_info->tc_info[i].rel_bw;
- l2params->tc_info[i].tc_ctx = qos_info->tc_info[i].tc_ctx;
- }
- for (i = 0; i < IIDC_MAX_USER_PRIORITY; i++)
- l2params->up2tc[i] = qos_info->up2tc[i];
- if (qos_info->pfc_mode == IIDC_DSCP_PFC_MODE) {
- l2params->dscp_mode = true;
- memcpy(l2params->dscp_map, qos_info->dscp_map, sizeof(l2params->dscp_map));
- }
-}
-
-static void irdma_iidc_event_handler(struct iidc_rdma_core_dev_info *cdev_info,
- struct iidc_rdma_event *event)
-{
- struct irdma_device *iwdev = dev_get_drvdata(&cdev_info->adev->dev);
+ struct irdma_device *iwdev = auxiliary_get_drvdata(cdev_info->adev);
struct irdma_l2params l2params = {};
if (*event->type & BIT(IIDC_RDMA_EVENT_AFTER_MTU_CHANGE)) {
@@ -97,248 +61,39 @@ static void irdma_iidc_event_handler(struct iidc_rdma_core_dev_info *cdev_info,
irdma_log_invalid_mtu(l2params.mtu, &iwdev->rf->sc_dev);
irdma_change_l2params(&iwdev->vsi, &l2params);
}
- } else if (*event->type & BIT(IIDC_RDMA_EVENT_BEFORE_TC_CHANGE)) {
- if (iwdev->vsi.tc_change_pending)
- return;
-
- irdma_prep_tc_change(iwdev);
- } else if (*event->type & BIT(IIDC_RDMA_EVENT_AFTER_TC_CHANGE)) {
- struct iidc_rdma_priv_dev_info *iidc_priv = cdev_info->iidc_priv;
-
- if (!iwdev->vsi.tc_change_pending)
- return;
-
- l2params.tc_changed = true;
- ibdev_dbg(&iwdev->ibdev, "CLNT: TC Change\n");
-
- irdma_fill_qos_info(&l2params, &iidc_priv->qos_info);
- if (iwdev->rf->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY)
- iwdev->dcb_vlan_mode =
- l2params.num_tc > 1 && !l2params.dscp_mode;
- irdma_change_l2params(&iwdev->vsi, &l2params);
- } else if (*event->type & BIT(IIDC_RDMA_EVENT_CRIT_ERR)) {
- ibdev_warn(&iwdev->ibdev, "ICE OICR event notification: oicr = 0x%08x\n",
- event->reg);
- if (event->reg & IRDMAPFINT_OICR_PE_CRITERR_M) {
- u32 pe_criterr;
-
- pe_criterr = readl(iwdev->rf->sc_dev.hw_regs[IRDMA_GLPE_CRITERR]);
-#define IRDMA_Q1_RESOURCE_ERR 0x0001024d
- if (pe_criterr != IRDMA_Q1_RESOURCE_ERR) {
- ibdev_err(&iwdev->ibdev, "critical PE Error, GLPE_CRITERR=0x%08x\n",
- pe_criterr);
- iwdev->rf->reset = true;
- } else {
- ibdev_warn(&iwdev->ibdev, "Q1 Resource Check\n");
- }
- }
- if (event->reg & IRDMAPFINT_OICR_HMC_ERR_M) {
- ibdev_err(&iwdev->ibdev, "HMC Error\n");
- iwdev->rf->reset = true;
- }
- if (event->reg & IRDMAPFINT_OICR_PE_PUSH_M) {
- ibdev_err(&iwdev->ibdev, "PE Push Error\n");
- iwdev->rf->reset = true;
- }
- if (iwdev->rf->reset)
- iwdev->rf->gen_ops.request_reset(iwdev->rf);
}
}
-/**
- * irdma_request_reset - Request a reset
- * @rf: RDMA PCI function
- */
-static void irdma_request_reset(struct irdma_pci_f *rf)
-{
- ibdev_warn(&rf->iwdev->ibdev, "Requesting a reset\n");
- ice_rdma_request_reset(rf->cdev, IIDC_FUNC_RESET);
-}
-
-/**
- * irdma_lan_register_qset - Register qset with LAN driver
- * @vsi: vsi structure
- * @tc_node: Traffic class node
- */
-static int irdma_lan_register_qset(struct irdma_sc_vsi *vsi,
- struct irdma_ws_node *tc_node)
-{
- struct irdma_device *iwdev = vsi->back_vsi;
- struct iidc_rdma_core_dev_info *cdev_info;
- struct iidc_rdma_qset_params qset = {};
- int ret;
-
- cdev_info = iwdev->rf->cdev;
- qset.qs_handle = tc_node->qs_handle;
- qset.tc = tc_node->traffic_class;
- qset.vport_id = vsi->vsi_idx;
- ret = ice_add_rdma_qset(cdev_info, &qset);
- if (ret) {
- ibdev_dbg(&iwdev->ibdev, "WS: LAN alloc_res for rdma qset failed.\n");
- return ret;
- }
-
- tc_node->l2_sched_node_id = qset.teid;
- vsi->qos[tc_node->user_pri].l2_sched_node_id = qset.teid;
-
- return 0;
-}
-
-/**
- * irdma_lan_unregister_qset - Unregister qset with LAN driver
- * @vsi: vsi structure
- * @tc_node: Traffic class node
- */
-static void irdma_lan_unregister_qset(struct irdma_sc_vsi *vsi,
- struct irdma_ws_node *tc_node)
+static int ig3rdma_vport_probe(struct auxiliary_device *aux_dev,
+ const struct auxiliary_device_id *id)
{
- struct irdma_device *iwdev = vsi->back_vsi;
- struct iidc_rdma_core_dev_info *cdev_info;
- struct iidc_rdma_qset_params qset = {};
-
- cdev_info = iwdev->rf->cdev;
- qset.qs_handle = tc_node->qs_handle;
- qset.tc = tc_node->traffic_class;
- qset.vport_id = vsi->vsi_idx;
- qset.teid = tc_node->l2_sched_node_id;
-
- if (ice_del_rdma_qset(cdev_info, &qset))
- ibdev_dbg(&iwdev->ibdev, "WS: LAN free_res for rdma qset failed.\n");
-}
-
-static int irdma_init_interrupts(struct irdma_pci_f *rf, struct iidc_rdma_core_dev_info *cdev)
-{
- int i;
-
- rf->msix_count = num_online_cpus() + IRDMA_NUM_AEQ_MSIX;
- rf->msix_entries = kcalloc(rf->msix_count, sizeof(*rf->msix_entries),
- GFP_KERNEL);
- if (!rf->msix_entries)
- return -ENOMEM;
-
- for (i = 0; i < rf->msix_count; i++)
- if (ice_alloc_rdma_qvector(cdev, &rf->msix_entries[i]))
- break;
-
- if (i < IRDMA_MIN_MSIX) {
- while (--i >= 0)
- ice_free_rdma_qvector(cdev, &rf->msix_entries[i]);
+ struct iidc_rdma_vport_auxiliary_dev *idc_adev =
+ container_of(aux_dev, struct iidc_rdma_vport_auxiliary_dev, adev);
+ struct auxiliary_device *aux_core_dev = idc_adev->vdev_info->core_adev;
+ struct irdma_pci_f *rf = auxiliary_get_drvdata(aux_core_dev);
+ struct irdma_l2params l2params = {};
+ struct irdma_device *iwdev;
+ int err;
- kfree(rf->msix_entries);
+ if (!rf) {
+ WARN_ON_ONCE(1);
return -ENOMEM;
}
-
- rf->msix_count = i;
-
- return 0;
-}
-
-static void irdma_deinit_interrupts(struct irdma_pci_f *rf, struct iidc_rdma_core_dev_info *cdev)
-{
- int i;
-
- for (i = 0; i < rf->msix_count; i++)
- ice_free_rdma_qvector(cdev, &rf->msix_entries[i]);
-
- kfree(rf->msix_entries);
-}
-
-static void irdma_remove(struct auxiliary_device *aux_dev)
-{
- struct irdma_device *iwdev = auxiliary_get_drvdata(aux_dev);
- struct iidc_rdma_core_auxiliary_dev *iidc_adev;
- struct iidc_rdma_core_dev_info *cdev_info;
-
- iidc_adev = container_of(aux_dev, struct iidc_rdma_core_auxiliary_dev, adev);
- cdev_info = iidc_adev->cdev_info;
-
- ice_rdma_update_vsi_filter(cdev_info, iwdev->vsi_num, false);
- irdma_ib_unregister_device(iwdev);
- irdma_deinit_interrupts(iwdev->rf, cdev_info);
-
- kfree(iwdev->rf);
-
- pr_debug("INIT: Gen2 PF[%d] device remove success\n", PCI_FUNC(cdev_info->pdev->devfn));
-}
-
-static void irdma_fill_device_info(struct irdma_device *iwdev,
- struct iidc_rdma_core_dev_info *cdev_info)
-{
- struct iidc_rdma_priv_dev_info *iidc_priv = cdev_info->iidc_priv;
- struct irdma_pci_f *rf = iwdev->rf;
-
- rf->sc_dev.hw = &rf->hw;
- rf->iwdev = iwdev;
- rf->cdev = cdev_info;
- rf->hw.hw_addr = iidc_priv->hw_addr;
- rf->pcidev = cdev_info->pdev;
- rf->hw.device = &rf->pcidev->dev;
- rf->pf_id = iidc_priv->pf_id;
- rf->gen_ops.register_qset = irdma_lan_register_qset;
- rf->gen_ops.unregister_qset = irdma_lan_unregister_qset;
-
- rf->default_vsi.vsi_idx = iidc_priv->vport_id;
- rf->protocol_used =
- cdev_info->rdma_protocol == IIDC_RDMA_PROTOCOL_ROCEV2 ?
- IRDMA_ROCE_PROTOCOL_ONLY : IRDMA_IWARP_PROTOCOL_ONLY;
- rf->rdma_ver = IRDMA_GEN_2;
- rf->rsrc_profile = IRDMA_HMC_PROFILE_DEFAULT;
- rf->rst_to = IRDMA_RST_TIMEOUT_HZ;
- rf->gen_ops.request_reset = irdma_request_reset;
- rf->limits_sel = 7;
- rf->iwdev = iwdev;
-
- mutex_init(&iwdev->ah_tbl_lock);
-
- iwdev->netdev = iidc_priv->netdev;
- iwdev->vsi_num = iidc_priv->vport_id;
+ iwdev = ib_alloc_device(irdma_device, ibdev);
+ /* Fill iwdev info */
+ iwdev->is_vport = true;
+ iwdev->rf = rf;
+ iwdev->vport_id = idc_adev->vdev_info->vport_id;
+ iwdev->netdev = idc_adev->vdev_info->netdev;
iwdev->init_state = INITIAL_STATE;
iwdev->roce_cwnd = IRDMA_ROCE_CWND_DEFAULT;
iwdev->roce_ackcreds = IRDMA_ROCE_ACKCREDS_DEFAULT;
iwdev->rcv_wnd = IRDMA_CM_DEFAULT_RCV_WND_SCALED;
iwdev->rcv_wscale = IRDMA_CM_DEFAULT_RCV_WND_SCALE;
- if (rf->protocol_used == IRDMA_ROCE_PROTOCOL_ONLY)
- iwdev->roce_mode = true;
-}
-
-static int irdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_device_id *id)
-{
- struct iidc_rdma_core_auxiliary_dev *iidc_adev;
- struct iidc_rdma_core_dev_info *cdev_info;
- struct iidc_rdma_priv_dev_info *iidc_priv;
- struct irdma_l2params l2params = {};
- struct irdma_device *iwdev;
- struct irdma_pci_f *rf;
- int err;
-
- iidc_adev = container_of(aux_dev, struct iidc_rdma_core_auxiliary_dev, adev);
- cdev_info = iidc_adev->cdev_info;
- iidc_priv = cdev_info->iidc_priv;
-
- iwdev = ib_alloc_device(irdma_device, ibdev);
- if (!iwdev)
- return -ENOMEM;
- iwdev->rf = kzalloc(sizeof(*rf), GFP_KERNEL);
- if (!iwdev->rf) {
- ib_dealloc_device(&iwdev->ibdev);
- return -ENOMEM;
- }
-
- irdma_fill_device_info(iwdev, cdev_info);
- rf = iwdev->rf;
-
- err = irdma_init_interrupts(rf, cdev_info);
- if (err)
- goto err_init_interrupts;
-
- err = irdma_ctrl_init_hw(rf);
- if (err)
- goto err_ctrl_init;
+ iwdev->roce_mode = true;
+ iwdev->push_mode = false;
l2params.mtu = iwdev->netdev->mtu;
- irdma_fill_qos_info(&l2params, &iidc_priv->qos_info);
- if (iwdev->rf->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY)
- iwdev->dcb_vlan_mode = l2params.num_tc > 1 && !l2params.dscp_mode;
err = irdma_rt_init_hw(iwdev, &l2params);
if (err)
@@ -348,43 +103,57 @@ static int irdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_
if (err)
goto err_ibreg;
- ice_rdma_update_vsi_filter(cdev_info, iwdev->vsi_num, true);
-
- ibdev_dbg(&iwdev->ibdev, "INIT: Gen2 PF[%d] device probe success\n", PCI_FUNC(rf->pcidev->devfn));
auxiliary_set_drvdata(aux_dev, iwdev);
- return 0;
+ ibdev_dbg(&iwdev->ibdev,
+ "INIT: Gen[%d] vport[%d] probe success. dev_name = %s, core_dev_name = %s, netdev=%s\n",
+ rf->rdma_ver, idc_adev->vdev_info->vport_id,
+ dev_name(&aux_dev->dev),
+ dev_name(&idc_adev->vdev_info->core_adev->dev),
+ netdev_name(idc_adev->vdev_info->netdev));
+ return 0;
err_ibreg:
irdma_rt_deinit_hw(iwdev);
err_rt_init:
- irdma_ctrl_deinit_hw(rf);
-err_ctrl_init:
- irdma_deinit_interrupts(rf, cdev_info);
-err_init_interrupts:
- kfree(iwdev->rf);
ib_dealloc_device(&iwdev->ibdev);
return err;
}
-static const struct auxiliary_device_id irdma_auxiliary_id_table[] = {
- {.name = "ice.iwarp", },
- {.name = "ice.roce", },
+static void ig3rdma_vport_remove(struct auxiliary_device *aux_dev)
+{
+ struct iidc_rdma_vport_auxiliary_dev *idc_adev =
+ container_of(aux_dev, struct iidc_rdma_vport_auxiliary_dev, adev);
+ struct irdma_device *iwdev = auxiliary_get_drvdata(aux_dev);
+
+ ibdev_dbg(&iwdev->ibdev,
+ "INIT: Gen[%d] dev_name = %s, core_dev_name = %s, netdev=%s\n",
+ iwdev->rf->rdma_ver, dev_name(&aux_dev->dev),
+ dev_name(&idc_adev->vdev_info->core_adev->dev),
+ netdev_name(idc_adev->vdev_info->netdev));
+
+ irdma_ib_unregister_device(iwdev);
+}
+
+static const struct auxiliary_device_id ig3rdma_vport_auxiliary_id_table[] = {
+ {.name = "idpf.8086.rdma.vdev", },
{},
};
-MODULE_DEVICE_TABLE(auxiliary, irdma_auxiliary_id_table);
+MODULE_DEVICE_TABLE(auxiliary, ig3rdma_vport_auxiliary_id_table);
-static struct iidc_rdma_core_auxiliary_drv irdma_auxiliary_drv = {
+static struct iidc_rdma_vport_auxiliary_drv ig3rdma_vport_auxiliary_drv = {
.adrv = {
- .id_table = irdma_auxiliary_id_table,
- .probe = irdma_probe,
- .remove = irdma_remove,
+ .name = "vdev",
+ .id_table = ig3rdma_vport_auxiliary_id_table,
+ .probe = ig3rdma_vport_probe,
+ .remove = ig3rdma_vport_remove,
},
- .event_handler = irdma_iidc_event_handler,
+ .event_handler = ig3rdma_idc_vport_event_handler,
};
+
static int __init irdma_init_module(void)
{
int ret;
@@ -396,14 +165,34 @@ static int __init irdma_init_module(void)
return ret;
}
- ret = auxiliary_driver_register(&irdma_auxiliary_drv.adrv);
+ ret = auxiliary_driver_register(&icrdma_core_auxiliary_drv.adrv);
+ if (ret) {
+ auxiliary_driver_unregister(&i40iw_auxiliary_drv);
+ pr_err("Failed icrdma(gen_2) auxiliary_driver_register() ret=%d\n",
+ ret);
+ return ret;
+ }
+
+ ret = auxiliary_driver_register(&ig3rdma_core_auxiliary_drv.adrv);
if (ret) {
+ auxiliary_driver_unregister(&icrdma_core_auxiliary_drv.adrv);
auxiliary_driver_unregister(&i40iw_auxiliary_drv);
- pr_err("Failed irdma auxiliary_driver_register() ret=%d\n",
+ pr_err("Failed ig3rdma(gen_3) core auxiliary_driver_register() ret=%d\n",
ret);
+
return ret;
}
+ ret = auxiliary_driver_register(&ig3rdma_vport_auxiliary_drv.adrv);
+ if (ret) {
+ auxiliary_driver_unregister(&ig3rdma_core_auxiliary_drv.adrv);
+ auxiliary_driver_unregister(&icrdma_core_auxiliary_drv.adrv);
+ auxiliary_driver_unregister(&i40iw_auxiliary_drv);
+ pr_err("Failed ig3rdma vport auxiliary_driver_register() ret=%d\n",
+ ret);
+
+ return ret;
+ }
irdma_register_notifiers();
return 0;
@@ -412,8 +201,10 @@ static int __init irdma_init_module(void)
static void __exit irdma_exit_module(void)
{
irdma_unregister_notifiers();
- auxiliary_driver_unregister(&irdma_auxiliary_drv.adrv);
+ auxiliary_driver_unregister(&icrdma_core_auxiliary_drv.adrv);
auxiliary_driver_unregister(&i40iw_auxiliary_drv);
+ auxiliary_driver_unregister(&ig3rdma_core_auxiliary_drv.adrv);
+ auxiliary_driver_unregister(&ig3rdma_vport_auxiliary_drv.adrv);
}
module_init(irdma_init_module);
diff --git a/drivers/infiniband/hw/irdma/main.h b/drivers/infiniband/hw/irdma/main.h
index 674acc952168..886b30da188a 100644
--- a/drivers/infiniband/hw/irdma/main.h
+++ b/drivers/infiniband/hw/irdma/main.h
@@ -30,7 +30,6 @@
#endif
#include <linux/auxiliary_bus.h>
#include <linux/net/intel/iidc_rdma.h>
-#include <linux/net/intel/iidc_rdma_ice.h>
#include <rdma/ib_smi.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_pack.h>
@@ -54,6 +53,8 @@
#include "puda.h"
extern struct auxiliary_driver i40iw_auxiliary_drv;
+extern struct iidc_rdma_core_auxiliary_drv icrdma_core_auxiliary_drv;
+extern struct iidc_rdma_core_auxiliary_drv ig3rdma_core_auxiliary_drv;
#define IRDMA_FW_VER_DEFAULT 2
#define IRDMA_HW_VER 2
@@ -65,7 +66,8 @@ extern struct auxiliary_driver i40iw_auxiliary_drv;
#define IRDMA_MACIP_ADD 1
#define IRDMA_MACIP_DELETE 2
-#define IW_CCQ_SIZE (IRDMA_CQP_SW_SQSIZE_2048 + 1)
+#define IW_GEN_3_CCQ_SIZE (2 * IRDMA_CQP_SW_SQSIZE_2048 + 2)
+#define IW_CCQ_SIZE (IRDMA_CQP_SW_SQSIZE_2048 + 2)
#define IW_CEQ_SIZE 2048
#define IW_AEQ_SIZE 2048
@@ -127,12 +129,12 @@ enum init_completion_state {
HMC_OBJS_CREATED,
HW_RSRC_INITIALIZED,
CCQ_CREATED,
- CEQ0_CREATED, /* Last state of probe */
- ILQ_CREATED,
- IEQ_CREATED,
+ CEQ0_CREATED,
CEQS_CREATED,
PBLE_CHUNK_MEM,
AEQ_CREATED,
+ ILQ_CREATED,
+ IEQ_CREATED, /* Last state of probe */
IP_ADDR_REGISTERED, /* Last state of open */
};
@@ -167,6 +169,7 @@ struct irdma_cqp_request {
bool request_done; /* READ/WRITE_ONCE macros operate on it */
bool waiting:1;
bool dynamic:1;
+ bool pending:1;
};
struct irdma_cqp {
@@ -179,6 +182,7 @@ struct irdma_cqp {
struct irdma_dma_mem host_ctx;
u64 *scratch_array;
struct irdma_cqp_request *cqp_requests;
+ struct irdma_ooo_cqp_op *oop_op_array;
struct list_head cqp_avail_reqs;
struct list_head cqp_pending_reqs;
};
@@ -257,6 +261,7 @@ struct irdma_pci_f {
bool reset:1;
bool rsrc_created:1;
bool msix_shared:1;
+ bool hwqp1_rsvd:1;
u8 rsrc_profile;
u8 *hmc_info_mem;
u8 *mem_rsrc;
@@ -269,6 +274,8 @@ struct irdma_pci_f {
u32 max_mr;
u32 max_qp;
u32 max_cq;
+ u32 max_srq;
+ u32 next_srq;
u32 max_ah;
u32 next_ah;
u32 max_mcg;
@@ -282,6 +289,7 @@ struct irdma_pci_f {
u32 mr_stagmask;
u32 used_pds;
u32 used_cqs;
+ u32 used_srqs;
u32 used_mrs;
u32 used_qps;
u32 arp_table_size;
@@ -293,6 +301,7 @@ struct irdma_pci_f {
unsigned long *allocated_ws_nodes;
unsigned long *allocated_qps;
unsigned long *allocated_cqs;
+ unsigned long *allocated_srqs;
unsigned long *allocated_mrs;
unsigned long *allocated_pds;
unsigned long *allocated_mcgs;
@@ -327,10 +336,13 @@ struct irdma_pci_f {
wait_queue_head_t vchnl_waitq;
struct workqueue_struct *cqp_cmpl_wq;
struct work_struct cqp_cmpl_work;
+ struct workqueue_struct *vchnl_wq;
struct irdma_sc_vsi default_vsi;
void *back_fcn;
struct irdma_gen_ops gen_ops;
struct irdma_device *iwdev;
+ DECLARE_HASHTABLE(ah_hash_tbl, 8);
+ struct mutex ah_tbl_lock; /* protect AH hash table access */
};
struct irdma_device {
@@ -340,8 +352,6 @@ struct irdma_device {
struct workqueue_struct *cleanup_wq;
struct irdma_sc_vsi vsi;
struct irdma_cm_core cm_core;
- DECLARE_HASHTABLE(ah_hash_tbl, 8);
- struct mutex ah_tbl_lock; /* protect AH hash table access */
u32 roce_cwnd;
u32 roce_ackcreds;
u32 vendor_id;
@@ -350,12 +360,14 @@ struct irdma_device {
u32 rcv_wnd;
u16 mac_ip_table_idx;
u16 vsi_num;
+ u16 vport_id;
u8 rcv_wscale;
u8 iw_status;
bool roce_mode:1;
bool roce_dcqcn_en:1;
bool dcb_vlan_mode:1;
bool iw_ooo:1;
+ bool is_vport:1;
enum init_completion_state init_state;
wait_queue_head_t suspend_wq;
@@ -413,6 +425,11 @@ static inline struct irdma_pci_f *dev_to_rf(struct irdma_sc_dev *dev)
return container_of(dev, struct irdma_pci_f, sc_dev);
}
+static inline struct irdma_srq *to_iwsrq(struct ib_srq *ibsrq)
+{
+ return container_of(ibsrq, struct irdma_srq, ibsrq);
+}
+
/**
* irdma_alloc_resource - allocate a resource
* @iwdev: device pointer
@@ -508,7 +525,8 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
void irdma_cq_add_ref(struct ib_cq *ibcq);
void irdma_cq_rem_ref(struct ib_cq *ibcq);
void irdma_cq_wq_destroy(struct irdma_pci_f *rf, struct irdma_sc_cq *cq);
-
+void irdma_srq_event(struct irdma_sc_srq *srq);
+void irdma_srq_wq_destroy(struct irdma_pci_f *rf, struct irdma_sc_srq *srq);
void irdma_cleanup_pending_cqp_op(struct irdma_pci_f *rf);
int irdma_hw_modify_qp(struct irdma_device *iwdev, struct irdma_qp *iwqp,
struct irdma_modify_qp_info *info, bool wait);
@@ -557,4 +575,5 @@ int irdma_netdevice_event(struct notifier_block *notifier, unsigned long event,
void *ptr);
void irdma_add_ip(struct irdma_device *iwdev);
void cqp_compl_worker(struct work_struct *work);
+void irdma_log_invalid_mtu(u16 mtu, struct irdma_sc_dev *dev);
#endif /* IRDMA_MAIN_H */
diff --git a/drivers/infiniband/hw/irdma/pble.c b/drivers/infiniband/hw/irdma/pble.c
index 37ce35cb10e7..3091f9345f12 100644
--- a/drivers/infiniband/hw/irdma/pble.c
+++ b/drivers/infiniband/hw/irdma/pble.c
@@ -193,8 +193,15 @@ static enum irdma_sd_entry_type irdma_get_type(struct irdma_sc_dev *dev,
{
enum irdma_sd_entry_type sd_entry_type;
- sd_entry_type = !idx->rel_pd_idx && pages == IRDMA_HMC_PD_CNT_IN_SD ?
- IRDMA_SD_TYPE_DIRECT : IRDMA_SD_TYPE_PAGED;
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3)
+ sd_entry_type = (!idx->rel_pd_idx &&
+ pages == IRDMA_HMC_PD_CNT_IN_SD) ?
+ IRDMA_SD_TYPE_DIRECT : IRDMA_SD_TYPE_PAGED;
+ else
+ sd_entry_type = (!idx->rel_pd_idx &&
+ pages == IRDMA_HMC_PD_CNT_IN_SD &&
+ dev->privileged) ?
+ IRDMA_SD_TYPE_DIRECT : IRDMA_SD_TYPE_PAGED;
return sd_entry_type;
}
@@ -279,10 +286,11 @@ static int add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc)
sd_reg_val = (sd_entry_type == IRDMA_SD_TYPE_PAGED) ?
sd_entry->u.pd_table.pd_page_addr.pa :
sd_entry->u.bp.addr.pa;
-
- if (!sd_entry->valid) {
- ret_code = irdma_hmc_sd_one(dev, hmc_info->hmc_fn_id, sd_reg_val,
- idx->sd_idx, sd_entry->entry_type, true);
+ if ((dev->privileged && !sd_entry->valid) ||
+ dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3) {
+ ret_code = irdma_hmc_sd_one(dev, hmc_info->hmc_fn_id,
+ sd_reg_val, idx->sd_idx,
+ sd_entry->entry_type, true);
if (ret_code)
goto error;
}
diff --git a/drivers/infiniband/hw/irdma/protos.h b/drivers/infiniband/hw/irdma/protos.h
index c0c9441885d3..324cfbf21764 100644
--- a/drivers/infiniband/hw/irdma/protos.h
+++ b/drivers/infiniband/hw/irdma/protos.h
@@ -10,6 +10,7 @@
#define ALL_TC2PFC 0xff
#define CQP_COMPL_WAIT_TIME_MS 10
#define CQP_TIMEOUT_THRESHOLD 500
+#define CQP_DEF_CMPL_TIMEOUT_THRESHOLD 2500
/* init operations */
int irdma_sc_dev_init(enum irdma_vers ver, struct irdma_sc_dev *dev,
diff --git a/drivers/infiniband/hw/irdma/puda.h b/drivers/infiniband/hw/irdma/puda.h
index 2fc638f2b143..d65041bee667 100644
--- a/drivers/infiniband/hw/irdma/puda.h
+++ b/drivers/infiniband/hw/irdma/puda.h
@@ -91,7 +91,7 @@ struct irdma_puda_rsrc_info {
u32 rq_size;
u32 tx_buf_cnt; /* total bufs allocated will be rq_size + tx_buf_cnt */
u16 buf_size;
- u8 stats_idx;
+ u16 stats_idx;
bool stats_idx_valid:1;
int abi_ver;
};
@@ -140,7 +140,7 @@ struct irdma_puda_rsrc {
u64 crc_err;
u64 pmode_count;
u64 partials_handled;
- u8 stats_idx;
+ u16 stats_idx;
bool check_crc:1;
bool stats_idx_valid:1;
};
diff --git a/drivers/infiniband/hw/irdma/type.h b/drivers/infiniband/hw/irdma/type.h
index 527c6da2c1ac..4ae77cdde9dc 100644
--- a/drivers/infiniband/hw/irdma/type.h
+++ b/drivers/infiniband/hw/irdma/type.h
@@ -8,6 +8,8 @@
#include "hmc.h"
#include "uda.h"
#include "ws.h"
+#include "virtchnl.h"
+
#define IRDMA_DEBUG_ERR "ERR"
#define IRDMA_DEBUG_INIT "INIT"
#define IRDMA_DEBUG_DEV "DEV"
@@ -95,12 +97,6 @@ enum irdma_term_mpa_errors {
MPA_REQ_RSP = 0x04,
};
-enum irdma_qp_event_type {
- IRDMA_QP_EVENT_CATASTROPHIC,
- IRDMA_QP_EVENT_ACCESS_ERR,
- IRDMA_QP_EVENT_REQ_ERR,
-};
-
enum irdma_hw_stats_index {
/* gen1 - 32-bit */
IRDMA_HW_STAT_INDEX_IP4RXDISCARD = 0,
@@ -154,12 +150,46 @@ enum irdma_hw_stats_index {
IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED = 44,
IRDMA_HW_STAT_INDEX_TXNPCNPSENT = 45,
IRDMA_HW_STAT_INDEX_MAX_GEN_2 = 46,
+
+ /* gen3 */
+ IRDMA_HW_STAT_INDEX_RNR_SENT = 46,
+ IRDMA_HW_STAT_INDEX_RNR_RCVD = 47,
+ IRDMA_HW_STAT_INDEX_RDMAORDLMTCNT = 48,
+ IRDMA_HW_STAT_INDEX_RDMAIRDLMTCNT = 49,
+ IRDMA_HW_STAT_INDEX_RDMARXATS = 50,
+ IRDMA_HW_STAT_INDEX_RDMATXATS = 51,
+ IRDMA_HW_STAT_INDEX_NAKSEQERR = 52,
+ IRDMA_HW_STAT_INDEX_NAKSEQERR_IMPLIED = 53,
+ IRDMA_HW_STAT_INDEX_RTO = 54,
+ IRDMA_HW_STAT_INDEX_RXOOOPKTS = 55,
+ IRDMA_HW_STAT_INDEX_ICRCERR = 56,
+
+ IRDMA_HW_STAT_INDEX_MAX_GEN_3 = 57,
};
enum irdma_feature_type {
IRDMA_FEATURE_FW_INFO = 0,
IRDMA_HW_VERSION_INFO = 1,
+ IRDMA_QP_MAX_INCR = 2,
+ IRDMA_CQ_MAX_INCR = 3,
+ IRDMA_CEQ_MAX_INCR = 4,
+ IRDMA_SD_MAX_INCR = 5,
+ IRDMA_MR_MAX_INCR = 6,
+ IRDMA_Q1_MAX_INCR = 7,
+ IRDMA_AH_MAX_INCR = 8,
+ IRDMA_SRQ_MAX_INCR = 9,
+ IRDMA_TIMER_MAX_INCR = 10,
+ IRDMA_XF_MAX_INCR = 11,
+ IRDMA_RRF_MAX_INCR = 12,
+ IRDMA_PBLE_MAX_INCR = 13,
+ IRDMA_OBJ_1 = 22,
+ IRDMA_OBJ_2 = 23,
+ IRDMA_ENDPT_TRK = 24,
+ IRDMA_FTN_INLINE_MAX = 25,
IRDMA_QSETS_MAX = 26,
+ IRDMA_ASO = 27,
+ IRDMA_FTN_FLAGS = 32,
+ IRDMA_FTN_NOP = 33,
IRDMA_MAX_FEATURES, /* Must be last entry */
};
@@ -206,6 +236,7 @@ enum irdma_syn_rst_handling {
enum irdma_queue_type {
IRDMA_QUEUE_TYPE_SQ_RQ = 0,
IRDMA_QUEUE_TYPE_CQP,
+ IRDMA_QUEUE_TYPE_SRQ,
};
struct irdma_sc_dev;
@@ -233,12 +264,22 @@ struct irdma_cqp_init_info {
__le64 *host_ctx;
u64 *scratch_array;
u32 sq_size;
+ struct irdma_ooo_cqp_op *ooo_op_array;
+ u32 pe_en_vf_cnt;
u16 hw_maj_ver;
u16 hw_min_ver;
u8 struct_ver;
u8 hmc_profile;
u8 ena_vf_count;
u8 ceqs_per_vf;
+ u8 ooisc_blksize;
+ u8 rrsp_blksize;
+ u8 q1_blksize;
+ u8 xmit_blksize;
+ u8 ts_override;
+ u8 ts_shift;
+ u8 en_fine_grained_timers;
+ u8 blksizes_valid;
bool en_datacenter_tcp:1;
bool disable_packed:1;
bool rocev2_rto_policy:1;
@@ -310,9 +351,21 @@ struct irdma_vsi_pestat {
spinlock_t lock; /* rdma stats lock */
};
+struct irdma_mmio_region {
+ u8 __iomem *addr;
+ resource_size_t len;
+ resource_size_t offset;
+};
+
struct irdma_hw {
- u8 __iomem *hw_addr;
- u8 __iomem *priv_hw_addr;
+ union {
+ u8 __iomem *hw_addr;
+ struct {
+ struct irdma_mmio_region rdma_reg; /* RDMA region */
+ struct irdma_mmio_region *io_regs; /* Non-RDMA MMIO regions */
+ u16 num_io_regions; /* Number of Non-RDMA MMIO regions */
+ };
+ };
struct device *device;
struct irdma_hmc_info hmc;
};
@@ -351,7 +404,21 @@ struct irdma_cqp_quanta {
__le64 elem[IRDMA_CQP_WQE_SIZE];
};
+struct irdma_ooo_cqp_op {
+ struct list_head list_entry;
+ u64 scratch;
+ u32 def_info;
+ u32 sw_def_info;
+ u32 wqe_idx;
+ bool deferred:1;
+};
+
struct irdma_sc_cqp {
+ spinlock_t ooo_list_lock; /* protects list of pending completions */
+ struct list_head ooo_avail;
+ struct list_head ooo_pnd;
+ u32 last_def_cmpl_ticket;
+ u32 sw_def_cmpl_ticket;
u32 size;
u64 sq_pa;
u64 host_ctx_pa;
@@ -367,8 +434,10 @@ struct irdma_sc_cqp {
u64 *scratch_array;
u64 requested_ops;
atomic64_t completed_ops;
+ struct irdma_ooo_cqp_op *ooo_op_array;
u32 cqp_id;
u32 sq_size;
+ u32 pe_en_vf_cnt;
u32 hw_sq_size;
u16 hw_maj_ver;
u16 hw_min_ver;
@@ -378,6 +447,14 @@ struct irdma_sc_cqp {
u8 ena_vf_count;
u8 timeout_count;
u8 ceqs_per_vf;
+ u8 ooisc_blksize;
+ u8 rrsp_blksize;
+ u8 q1_blksize;
+ u8 xmit_blksize;
+ u8 ts_override;
+ u8 ts_shift;
+ u8 en_fine_grained_timers;
+ u8 blksizes_valid;
bool en_datacenter_tcp:1;
bool disable_packed:1;
bool rocev2_rto_policy:1;
@@ -397,6 +474,8 @@ struct irdma_sc_aeq {
u32 msix_idx;
u8 polarity;
bool virtual_map:1;
+ bool pasid_valid:1;
+ u32 pasid;
};
struct irdma_sc_ceq {
@@ -412,13 +491,15 @@ struct irdma_sc_ceq {
u8 tph_val;
u32 first_pm_pbl_idx;
u8 polarity;
- struct irdma_sc_vsi *vsi;
+ u16 vsi_idx;
struct irdma_sc_cq **reg_cq;
u32 reg_cq_size;
spinlock_t req_cq_lock; /* protect access to reg_cq array */
bool virtual_map:1;
bool tph_en:1;
bool itr_no_expire:1;
+ bool pasid_valid:1;
+ u32 pasid;
};
struct irdma_sc_cq {
@@ -426,6 +507,7 @@ struct irdma_sc_cq {
u64 cq_pa;
u64 shadow_area_pa;
struct irdma_sc_dev *dev;
+ u16 vsi_idx;
struct irdma_sc_vsi *vsi;
void *pbl_list;
void *back_cq;
@@ -477,8 +559,13 @@ struct irdma_sc_qp {
bool virtual_map:1;
bool flush_sq:1;
bool flush_rq:1;
+ bool err_sq_idx_valid:1;
+ bool err_rq_idx_valid:1;
+ u32 err_sq_idx;
+ u32 err_rq_idx;
bool sq_flush_code:1;
bool rq_flush_code:1;
+ u32 pkt_limit;
enum irdma_flush_opcode flush_code;
enum irdma_qp_event_type event_type;
u8 term_flags;
@@ -489,13 +576,13 @@ struct irdma_sc_qp {
struct irdma_stats_inst_info {
bool use_hmc_fcn_index;
u8 hmc_fn_id;
- u8 stats_idx;
+ u16 stats_idx;
};
struct irdma_up_info {
u8 map[8];
u8 cnp_up_override;
- u8 hmc_fcn_idx;
+ u16 hmc_fcn_idx;
bool use_vlan:1;
bool use_cnp_up_override:1;
};
@@ -518,6 +605,8 @@ struct irdma_ws_node_info {
struct irdma_hmc_fpm_misc {
u32 max_ceqs;
u32 max_sds;
+ u32 loc_mem_pages;
+ u8 ird;
u32 xf_block_size;
u32 q1_block_size;
u32 ht_multiplier;
@@ -526,6 +615,7 @@ struct irdma_hmc_fpm_misc {
u32 ooiscf_block_size;
};
+#define IRDMA_VCHNL_MAX_MSG_SIZE 512
#define IRDMA_LEAF_DEFAULT_REL_BW 64
#define IRDMA_PARENT_DEFAULT_REL_BW 1
@@ -601,19 +691,28 @@ struct irdma_sc_dev {
u64 cqp_cmd_stats[IRDMA_MAX_CQP_OPS];
struct irdma_hw_attrs hw_attrs;
struct irdma_hmc_info *hmc_info;
+ struct irdma_vchnl_rdma_caps vc_caps;
+ u8 vc_recv_buf[IRDMA_VCHNL_MAX_MSG_SIZE];
+ u16 vc_recv_len;
struct irdma_sc_cqp *cqp;
struct irdma_sc_aeq *aeq;
struct irdma_sc_ceq *ceq[IRDMA_CEQ_MAX_COUNT];
struct irdma_sc_cq *ccq;
const struct irdma_irq_ops *irq_ops;
+ struct irdma_qos qos[IRDMA_MAX_USER_PRIORITY];
struct irdma_hmc_fpm_misc hmc_fpm_misc;
struct irdma_ws_node *ws_tree_root;
struct mutex ws_mutex; /* ws tree mutex */
+ u32 vchnl_ver;
u16 num_vfs;
- u8 hmc_fn_id;
+ u16 hmc_fn_id;
u8 vf_id;
+ bool privileged:1;
bool vchnl_up:1;
bool ceq_valid:1;
+ bool is_pf:1;
+ u8 protocol_used;
+ struct mutex vchnl_mutex; /* mutex to synchronize RDMA virtual channel messages */
u8 pci_rev;
int (*ws_add)(struct irdma_sc_vsi *vsi, u8 user_pri);
void (*ws_remove)(struct irdma_sc_vsi *vsi, u8 user_pri);
@@ -632,6 +731,51 @@ struct irdma_modify_cq_info {
bool cq_resize:1;
};
+struct irdma_srq_init_info {
+ struct irdma_sc_pd *pd;
+ struct irdma_sc_vsi *vsi;
+ u64 srq_pa;
+ u64 shadow_area_pa;
+ u32 first_pm_pbl_idx;
+ u32 pasid;
+ u32 srq_size;
+ u16 srq_limit;
+ u8 pasid_valid;
+ u8 wqe_size;
+ u8 leaf_pbl_size;
+ u8 virtual_map;
+ u8 tph_en;
+ u8 arm_limit_event;
+ u8 tph_value;
+ u8 pbl_chunk_size;
+ struct irdma_srq_uk_init_info srq_uk_init_info;
+};
+
+struct irdma_sc_srq {
+ struct irdma_sc_dev *dev;
+ struct irdma_sc_vsi *vsi;
+ struct irdma_sc_pd *pd;
+ struct irdma_srq_uk srq_uk;
+ void *back_srq;
+ u64 srq_pa;
+ u64 shadow_area_pa;
+ u32 first_pm_pbl_idx;
+ u32 pasid;
+ u32 hw_srq_size;
+ u16 srq_limit;
+ u8 pasid_valid;
+ u8 leaf_pbl_size;
+ u8 virtual_map;
+ u8 tph_en;
+ u8 arm_limit_event;
+ u8 tph_val;
+};
+
+struct irdma_modify_srq_info {
+ u16 srq_limit;
+ u8 arm_limit_event;
+};
+
struct irdma_create_qp_info {
bool ord_valid:1;
bool tcp_ctx_valid:1;
@@ -671,7 +815,8 @@ struct irdma_ccq_cqe_info {
u16 maj_err_code;
u16 min_err_code;
u8 op_code;
- bool error;
+ bool error:1;
+ bool pending:1;
};
struct irdma_dcb_app_info {
@@ -720,7 +865,7 @@ struct irdma_vsi_init_info {
struct irdma_vsi_stats_info {
struct irdma_vsi_pestat *pestat;
- u8 fcn_id;
+ u16 fcn_id;
bool alloc_stats_inst;
};
@@ -731,7 +876,8 @@ struct irdma_device_init_info {
__le64 *fpm_commit_buf;
struct irdma_hw *hw;
void __iomem *bar0;
- u8 hmc_fn_id;
+ enum irdma_protocol_used protocol_used;
+ u16 hmc_fn_id;
};
struct irdma_ceq_init_info {
@@ -746,8 +892,8 @@ struct irdma_ceq_init_info {
bool itr_no_expire:1;
u8 pbl_chunk_size;
u8 tph_val;
+ u16 vsi_idx;
u32 first_pm_pbl_idx;
- struct irdma_sc_vsi *vsi;
struct irdma_sc_cq **reg_cq;
u32 reg_cq_idx;
};
@@ -807,6 +953,8 @@ struct irdma_udp_offload_info {
u32 cwnd;
u8 rexmit_thresh;
u8 rnr_nak_thresh;
+ u8 rnr_nak_tmr;
+ u8 min_rnr_timer;
};
struct irdma_roce_offload_info {
@@ -833,6 +981,7 @@ struct irdma_roce_offload_info {
bool dctcp_en:1;
bool fw_cc_enable:1;
bool use_stats_inst:1;
+ u8 local_ack_timeout;
u16 t_high;
u16 t_low;
u8 last_byte_sent;
@@ -933,8 +1082,10 @@ struct irdma_qp_host_ctx_info {
};
u32 send_cq_num;
u32 rcv_cq_num;
+ u32 srq_id;
u32 rem_endpoint_idx;
- u8 stats_idx;
+ u16 stats_idx;
+ bool remote_atomics_en:1;
bool srq_valid:1;
bool tcp_info_valid:1;
bool iwarp_info_valid:1;
@@ -945,6 +1096,7 @@ struct irdma_qp_host_ctx_info {
struct irdma_aeqe_info {
u64 compl_ctx;
u32 qp_cq_id;
+ u32 def_info; /* only valid for DEF_CMPL */
u16 ae_id;
u16 wqe_idx;
u8 tcp_state;
@@ -953,9 +1105,11 @@ struct irdma_aeqe_info {
bool cq:1;
bool sq:1;
bool rq:1;
+ bool srq:1;
bool in_rdrsp_wr:1;
bool out_rdrsp:1;
bool aeqe_overflow:1;
+ bool err_rq_idx_valid:1;
u8 q2_data_written;
u8 ae_src;
};
@@ -972,7 +1126,8 @@ struct irdma_allocate_stag_info {
bool use_hmc_fcn_index:1;
bool use_pf_rid:1;
bool all_memory:1;
- u8 hmc_fcn_index;
+ bool remote_atomics_en:1;
+ u16 hmc_fcn_index;
};
struct irdma_mw_alloc_info {
@@ -1000,6 +1155,7 @@ struct irdma_reg_ns_stag_info {
u8 hmc_fcn_index;
bool use_pf_rid:1;
bool all_memory:1;
+ bool remote_atomics_en:1;
};
struct irdma_fast_reg_stag_info {
@@ -1023,6 +1179,7 @@ struct irdma_fast_reg_stag_info {
u8 hmc_fcn_index;
bool use_pf_rid:1;
bool defer_flag:1;
+ bool remote_atomics_en:1;
};
struct irdma_dealloc_stag_info {
@@ -1130,6 +1287,8 @@ struct irdma_cqp_manage_push_page_info {
};
struct irdma_qp_flush_info {
+ u32 err_sq_idx;
+ u32 err_rq_idx;
u16 sq_minor_code;
u16 sq_major_code;
u16 rq_minor_code;
@@ -1140,6 +1299,8 @@ struct irdma_qp_flush_info {
bool rq:1;
bool userflushcode:1;
bool generate_ae:1;
+ bool err_sq_idx_valid:1;
+ bool err_rq_idx_valid:1;
};
struct irdma_gen_ae_info {
@@ -1189,6 +1350,11 @@ void irdma_sc_pd_init(struct irdma_sc_dev *dev, struct irdma_sc_pd *pd, u32 pd_i
void irdma_cfg_aeq(struct irdma_sc_dev *dev, u32 idx, bool enable);
void irdma_check_cqp_progress(struct irdma_cqp_timeout *cqp_timeout,
struct irdma_sc_dev *dev);
+void irdma_sc_cqp_def_cmpl_ae_handler(struct irdma_sc_dev *dev,
+ struct irdma_aeqe_info *info,
+ bool first, u64 *scratch,
+ u32 *sw_def_info);
+u64 irdma_sc_cqp_cleanup_handler(struct irdma_sc_dev *dev);
int irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_err, u16 *min_err);
int irdma_sc_cqp_destroy(struct irdma_sc_cqp *cqp);
int irdma_sc_cqp_init(struct irdma_sc_cqp *cqp,
@@ -1224,6 +1390,8 @@ void irdma_sc_cq_resize(struct irdma_sc_cq *cq, struct irdma_modify_cq_info *inf
int irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, u64 scratch,
u8 hmc_fn_id, bool post_sq,
bool poll_registers);
+int irdma_sc_srq_init(struct irdma_sc_srq *srq,
+ struct irdma_srq_init_info *info);
void sc_vsi_update_stats(struct irdma_sc_vsi *vsi);
struct cqp_info {
@@ -1467,6 +1635,23 @@ struct cqp_info {
struct irdma_dma_mem query_buff_mem;
u64 scratch;
} query_rdma;
+
+ struct {
+ struct irdma_sc_srq *srq;
+ u64 scratch;
+ } srq_create;
+
+ struct {
+ struct irdma_sc_srq *srq;
+ struct irdma_modify_srq_info info;
+ u64 scratch;
+ } srq_modify;
+
+ struct {
+ struct irdma_sc_srq *srq;
+ u64 scratch;
+ } srq_destroy;
+
} u;
};
diff --git a/drivers/infiniband/hw/irdma/uda_d.h b/drivers/infiniband/hw/irdma/uda_d.h
index 5a9e6eabf032..4fb4daa20722 100644
--- a/drivers/infiniband/hw/irdma/uda_d.h
+++ b/drivers/infiniband/hw/irdma/uda_d.h
@@ -78,8 +78,7 @@
#define IRDMA_UDAQPC_IPID GENMASK_ULL(47, 32)
#define IRDMA_UDAQPC_SNDMSS GENMASK_ULL(29, 16)
#define IRDMA_UDAQPC_VLANTAG GENMASK_ULL(15, 0)
-
-#define IRDMA_UDA_CQPSQ_MAV_PDINDEXHI GENMASK_ULL(21, 20)
+#define IRDMA_UDA_CQPSQ_MAV_PDINDEXHI GENMASK_ULL(27, 20)
#define IRDMA_UDA_CQPSQ_MAV_PDINDEXLO GENMASK_ULL(63, 48)
#define IRDMA_UDA_CQPSQ_MAV_SRCMACADDRINDEX GENMASK_ULL(29, 24)
#define IRDMA_UDA_CQPSQ_MAV_ARPINDEX GENMASK_ULL(63, 48)
@@ -94,7 +93,7 @@
#define IRDMA_UDA_CQPSQ_MAV_OPCODE GENMASK_ULL(37, 32)
#define IRDMA_UDA_CQPSQ_MAV_DOLOOPBACKK BIT_ULL(62)
#define IRDMA_UDA_CQPSQ_MAV_IPV4VALID BIT_ULL(59)
-#define IRDMA_UDA_CQPSQ_MAV_AVIDX GENMASK_ULL(16, 0)
+#define IRDMA_UDA_CQPSQ_MAV_AVIDX GENMASK_ULL(23, 0)
#define IRDMA_UDA_CQPSQ_MAV_INSERTVLANTAG BIT_ULL(60)
#define IRDMA_UDA_MGCTX_VFFLAG BIT_ULL(29)
#define IRDMA_UDA_MGCTX_DESTPORT GENMASK_ULL(47, 32)
diff --git a/drivers/infiniband/hw/irdma/uk.c b/drivers/infiniband/hw/irdma/uk.c
index 38c54e59cc2e..ce1ae10c30fc 100644
--- a/drivers/infiniband/hw/irdma/uk.c
+++ b/drivers/infiniband/hw/irdma/uk.c
@@ -198,6 +198,26 @@ __le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
return wqe;
}
+__le64 *irdma_srq_get_next_recv_wqe(struct irdma_srq_uk *srq, u32 *wqe_idx)
+{
+ int ret_code;
+ __le64 *wqe;
+
+ if (IRDMA_RING_FULL_ERR(srq->srq_ring))
+ return NULL;
+
+ IRDMA_ATOMIC_RING_MOVE_HEAD(srq->srq_ring, *wqe_idx, ret_code);
+ if (ret_code)
+ return NULL;
+
+ if (!*wqe_idx)
+ srq->srwqe_polarity = !srq->srwqe_polarity;
+ /* rq_wqe_size_multiplier is no of 32 byte quanta in one rq wqe */
+ wqe = srq->srq_base[*wqe_idx * (srq->wqe_size_multiplier)].elem;
+
+ return wqe;
+}
+
/**
* irdma_qp_get_next_recv_wqe - get next qp's rcv wqe
* @qp: hw qp ptr
@@ -318,6 +338,160 @@ int irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
}
/**
+ * irdma_uk_atomic_fetch_add - atomic fetch and add operation
+ * @qp: hw qp ptr
+ * @info: post sq information
+ * @post_sq: flag to post sq
+ */
+int irdma_uk_atomic_fetch_add(struct irdma_qp_uk *qp,
+ struct irdma_post_sq_info *info, bool post_sq)
+{
+ struct irdma_atomic_fetch_add *op_info;
+ u32 total_size = 0;
+ u16 quanta = 2;
+ u32 wqe_idx;
+ __le64 *wqe;
+ u64 hdr;
+
+ op_info = &info->op.atomic_fetch_add;
+ wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
+ info);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 0, op_info->tagged_offset);
+ set_64bit_val(wqe, 8,
+ FIELD_PREP(IRDMAQPSQ_STAG, op_info->stag));
+ set_64bit_val(wqe, 16, op_info->remote_tagged_offset);
+
+ hdr = FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, 1) |
+ FIELD_PREP(IRDMAQPSQ_REMOTE_STAG, op_info->remote_stag) |
+ FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_ATOMIC_FETCH_ADD) |
+ FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
+ FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
+ FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
+
+ set_64bit_val(wqe, 32, op_info->fetch_add_data_bytes);
+ set_64bit_val(wqe, 40, 0);
+ set_64bit_val(wqe, 48, 0);
+ set_64bit_val(wqe, 56,
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity));
+
+ dma_wmb(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ if (post_sq)
+ irdma_uk_qp_post_wr(qp);
+
+ return 0;
+}
+
+/**
+ * irdma_uk_atomic_compare_swap - atomic compare and swap operation
+ * @qp: hw qp ptr
+ * @info: post sq information
+ * @post_sq: flag to post sq
+ */
+int irdma_uk_atomic_compare_swap(struct irdma_qp_uk *qp,
+ struct irdma_post_sq_info *info, bool post_sq)
+{
+ struct irdma_atomic_compare_swap *op_info;
+ u32 total_size = 0;
+ u16 quanta = 2;
+ u32 wqe_idx;
+ __le64 *wqe;
+ u64 hdr;
+
+ op_info = &info->op.atomic_compare_swap;
+ wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
+ info);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 0, op_info->tagged_offset);
+ set_64bit_val(wqe, 8,
+ FIELD_PREP(IRDMAQPSQ_STAG, op_info->stag));
+ set_64bit_val(wqe, 16, op_info->remote_tagged_offset);
+
+ hdr = FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, 1) |
+ FIELD_PREP(IRDMAQPSQ_REMOTE_STAG, op_info->remote_stag) |
+ FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_ATOMIC_COMPARE_SWAP_ADD) |
+ FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
+ FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
+ FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
+
+ set_64bit_val(wqe, 32, op_info->swap_data_bytes);
+ set_64bit_val(wqe, 40, op_info->compare_data_bytes);
+ set_64bit_val(wqe, 48, 0);
+ set_64bit_val(wqe, 56,
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity));
+
+ dma_wmb(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ if (post_sq)
+ irdma_uk_qp_post_wr(qp);
+
+ return 0;
+}
+
+/**
+ * irdma_uk_srq_post_receive - post a receive wqe to a shared rq
+ * @srq: shared rq ptr
+ * @info: post rq information
+ */
+int irdma_uk_srq_post_receive(struct irdma_srq_uk *srq,
+ struct irdma_post_rq_info *info)
+{
+ u32 wqe_idx, i, byte_off;
+ u32 addl_frag_cnt;
+ __le64 *wqe;
+ u64 hdr;
+
+ if (srq->max_srq_frag_cnt < info->num_sges)
+ return -EINVAL;
+
+ wqe = irdma_srq_get_next_recv_wqe(srq, &wqe_idx);
+ if (!wqe)
+ return -ENOMEM;
+
+ addl_frag_cnt = info->num_sges > 1 ? info->num_sges - 1 : 0;
+ srq->wqe_ops.iw_set_fragment(wqe, 0, info->sg_list,
+ srq->srwqe_polarity);
+
+ for (i = 1, byte_off = 32; i < info->num_sges; i++) {
+ srq->wqe_ops.iw_set_fragment(wqe, byte_off, &info->sg_list[i],
+ srq->srwqe_polarity);
+ byte_off += 16;
+ }
+
+ /* if not an odd number set valid bit in next fragment */
+ if (srq->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(info->num_sges & 0x01) &&
+ info->num_sges) {
+ srq->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
+ srq->srwqe_polarity);
+ if (srq->uk_attrs->hw_rev == IRDMA_GEN_2)
+ ++addl_frag_cnt;
+ }
+
+ set_64bit_val(wqe, 16, (u64)info->wr_id);
+ hdr = FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
+ FIELD_PREP(IRDMAQPSQ_VALID, srq->srwqe_polarity);
+
+ dma_wmb(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ set_64bit_val(srq->shadow_area, 0, (wqe_idx + 1) % srq->srq_ring.size);
+
+ return 0;
+}
+
+/**
* irdma_uk_rdma_read - rdma read command
* @qp: hw qp ptr
* @info: post sq information
@@ -973,6 +1147,9 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
u64 comp_ctx, qword0, qword2, qword3;
__le64 *cqe;
struct irdma_qp_uk *qp;
+ struct irdma_srq_uk *srq;
+ struct qp_err_code qp_err;
+ u8 is_srq;
struct irdma_ring *pring = NULL;
u32 wqe_idx;
int ret_code;
@@ -1046,21 +1223,46 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
}
info->q_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3);
+ is_srq = (u8)FIELD_GET(IRDMA_CQ_SRQ, qword3);
info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, qword3);
info->ipv4 = (bool)FIELD_GET(IRDMACQ_IPV4, qword3);
+ get_64bit_val(cqe, 8, &comp_ctx);
+ if (is_srq)
+ get_64bit_val(cqe, 40, (u64 *)&qp);
+ else
+ qp = (struct irdma_qp_uk *)(unsigned long)comp_ctx;
if (info->error) {
info->major_err = FIELD_GET(IRDMA_CQ_MAJERR, qword3);
info->minor_err = FIELD_GET(IRDMA_CQ_MINERR, qword3);
- if (info->major_err == IRDMA_FLUSH_MAJOR_ERR) {
- info->comp_status = IRDMA_COMPL_STATUS_FLUSHED;
+ switch (info->major_err) {
+ case IRDMA_SRQFLUSH_RSVD_MAJOR_ERR:
+ qp_err = irdma_ae_to_qp_err_code(info->minor_err);
+ info->minor_err = qp_err.flush_code;
+ fallthrough;
+ case IRDMA_FLUSH_MAJOR_ERR:
/* Set the min error to standard flush error code for remaining cqes */
if (info->minor_err != FLUSH_GENERAL_ERR) {
qword3 &= ~IRDMA_CQ_MINERR;
qword3 |= FIELD_PREP(IRDMA_CQ_MINERR, FLUSH_GENERAL_ERR);
set_64bit_val(cqe, 24, qword3);
}
- } else {
- info->comp_status = IRDMA_COMPL_STATUS_UNKNOWN;
+ info->comp_status = IRDMA_COMPL_STATUS_FLUSHED;
+ break;
+ default:
+#define IRDMA_CIE_SIGNATURE 0xE
+#define IRDMA_CQMAJERR_HIGH_NIBBLE GENMASK(15, 12)
+ if (info->q_type == IRDMA_CQE_QTYPE_SQ &&
+ qp->qp_type == IRDMA_QP_TYPE_ROCE_UD &&
+ FIELD_GET(IRDMA_CQMAJERR_HIGH_NIBBLE, info->major_err)
+ == IRDMA_CIE_SIGNATURE) {
+ info->error = 0;
+ info->major_err = 0;
+ info->minor_err = 0;
+ info->comp_status = IRDMA_COMPL_STATUS_SUCCESS;
+ } else {
+ info->comp_status = IRDMA_COMPL_STATUS_UNKNOWN;
+ }
+ break;
}
} else {
info->comp_status = IRDMA_COMPL_STATUS_SUCCESS;
@@ -1069,7 +1271,6 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
get_64bit_val(cqe, 0, &qword0);
get_64bit_val(cqe, 16, &qword2);
- info->tcp_seq_num_rtt = (u32)FIELD_GET(IRDMACQ_TCPSEQNUMRTT, qword0);
info->qp_id = (u32)FIELD_GET(IRDMACQ_QPID, qword2);
info->ud_src_qpn = (u32)FIELD_GET(IRDMACQ_UDSRCQPN, qword2);
@@ -1085,7 +1286,22 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
info->qp_handle = (irdma_qp_handle)(unsigned long)qp;
info->op_type = (u8)FIELD_GET(IRDMACQ_OP, qword3);
- if (info->q_type == IRDMA_CQE_QTYPE_RQ) {
+ if (info->q_type == IRDMA_CQE_QTYPE_RQ && is_srq) {
+ srq = qp->srq_uk;
+
+ get_64bit_val(cqe, 8, &info->wr_id);
+ info->bytes_xfered = (u32)FIELD_GET(IRDMACQ_PAYLDLEN, qword0);
+
+ if (qword3 & IRDMACQ_STAG) {
+ info->stag_invalid_set = true;
+ info->inv_stag = (u32)FIELD_GET(IRDMACQ_INVSTAG,
+ qword2);
+ } else {
+ info->stag_invalid_set = false;
+ }
+ IRDMA_RING_MOVE_TAIL(srq->srq_ring);
+ pring = &srq->srq_ring;
+ } else if (info->q_type == IRDMA_CQE_QTYPE_RQ && !is_srq) {
u32 array_idx;
array_idx = wqe_idx / qp->rq_wqe_size_multiplier;
@@ -1180,9 +1396,15 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
ret_code = 0;
exit:
- if (!ret_code && info->comp_status == IRDMA_COMPL_STATUS_FLUSHED)
+ if (!ret_code && info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) {
if (pring && IRDMA_RING_MORE_WORK(*pring))
- move_cq_head = false;
+ /* Park CQ head during a flush to generate additional CQEs
+ * from SW for all unprocessed WQEs. For GEN3 and beyond
+ * FW will generate/flush these CQEs so move to the next CQE
+ */
+ move_cq_head = qp->uk_attrs->hw_rev <= IRDMA_GEN_2 ?
+ false : true;
+ }
if (move_cq_head) {
IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
@@ -1210,10 +1432,10 @@ exit:
}
/**
- * irdma_qp_round_up - return round up qp wq depth
+ * irdma_round_up_wq - return round up qp wq depth
* @wqdepth: wq depth in quanta to round up
*/
-static int irdma_qp_round_up(u32 wqdepth)
+static int irdma_round_up_wq(u32 wqdepth)
{
int scount = 1;
@@ -1268,7 +1490,7 @@ int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift,
{
u32 min_size = (u32)uk_attrs->min_hw_wq_size << shift;
- *sqdepth = irdma_qp_round_up((sq_size << shift) + IRDMA_SQ_RSVD);
+ *sqdepth = irdma_round_up_wq((sq_size << shift) + IRDMA_SQ_RSVD);
if (*sqdepth < min_size)
*sqdepth = min_size;
@@ -1290,7 +1512,7 @@ int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift,
{
u32 min_size = (u32)uk_attrs->min_hw_wq_size << shift;
- *rqdepth = irdma_qp_round_up((rq_size << shift) + IRDMA_RQ_RSVD);
+ *rqdepth = irdma_round_up_wq((rq_size << shift) + IRDMA_RQ_RSVD);
if (*rqdepth < min_size)
*rqdepth = min_size;
@@ -1300,6 +1522,26 @@ int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift,
return 0;
}
+/*
+ * irdma_get_srqdepth - get SRQ depth (quanta)
+ * @uk_attrs: qp HW attributes
+ * @srq_size: SRQ size
+ * @shift: shift which determines size of WQE
+ * @srqdepth: depth of SRQ
+ */
+int irdma_get_srqdepth(struct irdma_uk_attrs *uk_attrs, u32 srq_size, u8 shift,
+ u32 *srqdepth)
+{
+ *srqdepth = irdma_round_up_wq((srq_size << shift) + IRDMA_RQ_RSVD);
+
+ if (*srqdepth < ((u32)uk_attrs->min_hw_wq_size << shift))
+ *srqdepth = uk_attrs->min_hw_wq_size << shift;
+ else if (*srqdepth > uk_attrs->max_hw_srq_quanta)
+ return -EINVAL;
+
+ return 0;
+}
+
static const struct irdma_wqe_uk_ops iw_wqe_uk_ops = {
.iw_copy_inline_data = irdma_copy_inline_data,
.iw_inline_data_size_to_quanta = irdma_inline_data_size_to_quanta,
@@ -1336,6 +1578,42 @@ static void irdma_setup_connection_wqes(struct irdma_qp_uk *qp,
}
/**
+ * irdma_uk_srq_init - initialize shared qp
+ * @srq: hw srq (user and kernel)
+ * @info: srq initialization info
+ *
+ * Initializes the vars used in both user and kernel mode.
+ * The size of the wqe depends on number of max fragments
+ * allowed. Then size of wqe * the number of wqes should be the
+ * amount of memory allocated for srq.
+ */
+int irdma_uk_srq_init(struct irdma_srq_uk *srq,
+ struct irdma_srq_uk_init_info *info)
+{
+ u8 rqshift;
+
+ srq->uk_attrs = info->uk_attrs;
+ if (info->max_srq_frag_cnt > srq->uk_attrs->max_hw_wq_frags)
+ return -EINVAL;
+
+ irdma_get_wqe_shift(srq->uk_attrs, info->max_srq_frag_cnt, 0, &rqshift);
+ srq->srq_caps = info->srq_caps;
+ srq->srq_base = info->srq;
+ srq->shadow_area = info->shadow_area;
+ srq->srq_id = info->srq_id;
+ srq->srwqe_polarity = 0;
+ srq->srq_size = info->srq_size;
+ srq->wqe_size = rqshift;
+ srq->max_srq_frag_cnt = min(srq->uk_attrs->max_hw_wq_frags,
+ ((u32)2 << rqshift) - 1);
+ IRDMA_RING_INIT(srq->srq_ring, srq->srq_size);
+ srq->wqe_size_multiplier = 1 << rqshift;
+ srq->wqe_ops = iw_wqe_uk_ops;
+
+ return 0;
+}
+
+/**
* irdma_uk_calc_shift_wq - calculate WQE shift for both SQ and RQ
* @ukinfo: qp initialization info
* @sq_shift: Returns shift of SQ
@@ -1461,6 +1739,7 @@ int irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info)
qp->wqe_ops = iw_wqe_uk_ops_gen_1;
else
qp->wqe_ops = iw_wqe_uk_ops;
+ qp->srq_uk = info->srq_uk;
return ret_code;
}
diff --git a/drivers/infiniband/hw/irdma/user.h b/drivers/infiniband/hw/irdma/user.h
index 380e4a47aede..ab57f689827a 100644
--- a/drivers/infiniband/hw/irdma/user.h
+++ b/drivers/infiniband/hw/irdma/user.h
@@ -41,10 +41,114 @@
#define IRDMA_OP_TYPE_INV_STAG 0x0a
#define IRDMA_OP_TYPE_RDMA_READ_INV_STAG 0x0b
#define IRDMA_OP_TYPE_NOP 0x0c
+#define IRDMA_OP_TYPE_ATOMIC_FETCH_AND_ADD 0x0f
+#define IRDMA_OP_TYPE_ATOMIC_COMPARE_AND_SWAP 0x11
#define IRDMA_OP_TYPE_REC 0x3e
#define IRDMA_OP_TYPE_REC_IMM 0x3f
-#define IRDMA_FLUSH_MAJOR_ERR 1
+#define IRDMA_FLUSH_MAJOR_ERR 1
+#define IRDMA_SRQFLUSH_RSVD_MAJOR_ERR 0xfffe
+
+/* Async Events codes */
+#define IRDMA_AE_AMP_UNALLOCATED_STAG 0x0102
+#define IRDMA_AE_AMP_INVALID_STAG 0x0103
+#define IRDMA_AE_AMP_BAD_QP 0x0104
+#define IRDMA_AE_AMP_BAD_PD 0x0105
+#define IRDMA_AE_AMP_BAD_STAG_KEY 0x0106
+#define IRDMA_AE_AMP_BAD_STAG_INDEX 0x0107
+#define IRDMA_AE_AMP_BOUNDS_VIOLATION 0x0108
+#define IRDMA_AE_AMP_RIGHTS_VIOLATION 0x0109
+#define IRDMA_AE_AMP_TO_WRAP 0x010a
+#define IRDMA_AE_AMP_FASTREG_VALID_STAG 0x010c
+#define IRDMA_AE_AMP_FASTREG_MW_STAG 0x010d
+#define IRDMA_AE_AMP_FASTREG_INVALID_RIGHTS 0x010e
+#define IRDMA_AE_AMP_FASTREG_INVALID_LENGTH 0x0110
+#define IRDMA_AE_AMP_INVALIDATE_SHARED 0x0111
+#define IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS 0x0112
+#define IRDMA_AE_AMP_INVALIDATE_MR_WITH_BOUND_WINDOWS 0x0113
+#define IRDMA_AE_AMP_MWBIND_VALID_STAG 0x0114
+#define IRDMA_AE_AMP_MWBIND_OF_MR_STAG 0x0115
+#define IRDMA_AE_AMP_MWBIND_TO_ZERO_BASED_STAG 0x0116
+#define IRDMA_AE_AMP_MWBIND_TO_MW_STAG 0x0117
+#define IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS 0x0118
+#define IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS 0x0119
+#define IRDMA_AE_AMP_MWBIND_TO_INVALID_PARENT 0x011a
+#define IRDMA_AE_AMP_MWBIND_BIND_DISABLED 0x011b
+#define IRDMA_AE_PRIV_OPERATION_DENIED 0x011c
+#define IRDMA_AE_AMP_INVALIDATE_TYPE1_MW 0x011d
+#define IRDMA_AE_AMP_MWBIND_ZERO_BASED_TYPE1_MW 0x011e
+#define IRDMA_AE_AMP_FASTREG_INVALID_PBL_HPS_CFG 0x011f
+#define IRDMA_AE_AMP_MWBIND_WRONG_TYPE 0x0120
+#define IRDMA_AE_AMP_FASTREG_PBLE_MISMATCH 0x0121
+#define IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG 0x0132
+#define IRDMA_AE_UDA_XMIT_BAD_PD 0x0133
+#define IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT 0x0134
+#define IRDMA_AE_UDA_L4LEN_INVALID 0x0135
+#define IRDMA_AE_BAD_CLOSE 0x0201
+#define IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE 0x0202
+#define IRDMA_AE_CQ_OPERATION_ERROR 0x0203
+#define IRDMA_AE_RDMA_READ_WHILE_ORD_ZERO 0x0205
+#define IRDMA_AE_STAG_ZERO_INVALID 0x0206
+#define IRDMA_AE_IB_RREQ_AND_Q1_FULL 0x0207
+#define IRDMA_AE_IB_INVALID_REQUEST 0x0208
+#define IRDMA_AE_SRQ_LIMIT 0x0209
+#define IRDMA_AE_WQE_UNEXPECTED_OPCODE 0x020a
+#define IRDMA_AE_WQE_INVALID_PARAMETER 0x020b
+#define IRDMA_AE_WQE_INVALID_FRAG_DATA 0x020c
+#define IRDMA_AE_IB_REMOTE_ACCESS_ERROR 0x020d
+#define IRDMA_AE_IB_REMOTE_OP_ERROR 0x020e
+#define IRDMA_AE_SRQ_CATASTROPHIC_ERROR 0x020f
+#define IRDMA_AE_WQE_LSMM_TOO_LONG 0x0220
+#define IRDMA_AE_ATOMIC_ALIGNMENT 0x0221
+#define IRDMA_AE_ATOMIC_MASK 0x0222
+#define IRDMA_AE_INVALID_REQUEST 0x0223
+#define IRDMA_AE_PCIE_ATOMIC_DISABLE 0x0224
+#define IRDMA_AE_DDP_INVALID_MSN_GAP_IN_MSN 0x0301
+#define IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER 0x0303
+#define IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION 0x0304
+#define IRDMA_AE_DDP_UBE_INVALID_MO 0x0305
+#define IRDMA_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE 0x0306
+#define IRDMA_AE_DDP_UBE_INVALID_QN 0x0307
+#define IRDMA_AE_DDP_NO_L_BIT 0x0308
+#define IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION 0x0311
+#define IRDMA_AE_RDMAP_ROE_UNEXPECTED_OPCODE 0x0312
+#define IRDMA_AE_ROE_INVALID_RDMA_READ_REQUEST 0x0313
+#define IRDMA_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP 0x0314
+#define IRDMA_AE_ROCE_RSP_LENGTH_ERROR 0x0316
+#define IRDMA_AE_ROCE_EMPTY_MCG 0x0380
+#define IRDMA_AE_ROCE_BAD_MC_IP_ADDR 0x0381
+#define IRDMA_AE_ROCE_BAD_MC_QPID 0x0382
+#define IRDMA_AE_MCG_QP_PROTOCOL_MISMATCH 0x0383
+#define IRDMA_AE_INVALID_ARP_ENTRY 0x0401
+#define IRDMA_AE_INVALID_TCP_OPTION_RCVD 0x0402
+#define IRDMA_AE_STALE_ARP_ENTRY 0x0403
+#define IRDMA_AE_INVALID_AH_ENTRY 0x0406
+#define IRDMA_AE_LLP_CLOSE_COMPLETE 0x0501
+#define IRDMA_AE_LLP_CONNECTION_RESET 0x0502
+#define IRDMA_AE_LLP_FIN_RECEIVED 0x0503
+#define IRDMA_AE_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH 0x0504
+#define IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR 0x0505
+#define IRDMA_AE_LLP_SEGMENT_TOO_SMALL 0x0507
+#define IRDMA_AE_LLP_SYN_RECEIVED 0x0508
+#define IRDMA_AE_LLP_TERMINATE_RECEIVED 0x0509
+#define IRDMA_AE_LLP_TOO_MANY_RETRIES 0x050a
+#define IRDMA_AE_LLP_TOO_MANY_KEEPALIVE_RETRIES 0x050b
+#define IRDMA_AE_LLP_DOUBT_REACHABILITY 0x050c
+#define IRDMA_AE_LLP_CONNECTION_ESTABLISHED 0x050e
+#define IRDMA_AE_LLP_TOO_MANY_RNRS 0x050f
+#define IRDMA_AE_RESOURCE_EXHAUSTION 0x0520
+#define IRDMA_AE_RESET_SENT 0x0601
+#define IRDMA_AE_TERMINATE_SENT 0x0602
+#define IRDMA_AE_RESET_NOT_SENT 0x0603
+#define IRDMA_AE_LCE_QP_CATASTROPHIC 0x0700
+#define IRDMA_AE_LCE_FUNCTION_CATASTROPHIC 0x0701
+#define IRDMA_AE_LCE_CQ_CATASTROPHIC 0x0702
+#define IRDMA_AE_REMOTE_QP_CATASTROPHIC 0x0703
+#define IRDMA_AE_LOCAL_QP_CATASTROPHIC 0x0704
+#define IRDMA_AE_RCE_QP_CATASTROPHIC 0x0705
+#define IRDMA_AE_QP_SUSPEND_COMPLETE 0x0900
+#define IRDMA_AE_CQP_DEFERRED_COMPLETE 0x0901
+#define IRDMA_AE_ADAPTER_CATASTROPHIC 0x0B0B
enum irdma_device_caps_const {
IRDMA_WQE_SIZE = 4,
@@ -55,11 +159,12 @@ enum irdma_device_caps_const {
IRDMA_CEQE_SIZE = 1,
IRDMA_CQP_CTX_SIZE = 8,
IRDMA_SHADOW_AREA_SIZE = 8,
- IRDMA_QUERY_FPM_BUF_SIZE = 176,
- IRDMA_COMMIT_FPM_BUF_SIZE = 176,
+ IRDMA_QUERY_FPM_BUF_SIZE = 192,
+ IRDMA_COMMIT_FPM_BUF_SIZE = 192,
IRDMA_GATHER_STATS_BUF_SIZE = 1024,
IRDMA_MIN_IW_QP_ID = 0,
IRDMA_MAX_IW_QP_ID = 262143,
+ IRDMA_MIN_IW_SRQ_ID = 0,
IRDMA_MIN_CEQID = 0,
IRDMA_MAX_CEQID = 1023,
IRDMA_CEQ_MAX_COUNT = IRDMA_MAX_CEQID + 1,
@@ -67,6 +172,7 @@ enum irdma_device_caps_const {
IRDMA_MAX_CQID = 524287,
IRDMA_MIN_AEQ_ENTRIES = 1,
IRDMA_MAX_AEQ_ENTRIES = 524287,
+ IRDMA_MAX_AEQ_ENTRIES_GEN_3 = 262144,
IRDMA_MIN_CEQ_ENTRIES = 1,
IRDMA_MAX_CEQ_ENTRIES = 262143,
IRDMA_MIN_CQ_SIZE = 1,
@@ -105,6 +211,13 @@ enum irdma_flush_opcode {
FLUSH_RETRY_EXC_ERR,
FLUSH_MW_BIND_ERR,
FLUSH_REM_INV_REQ_ERR,
+ FLUSH_RNR_RETRY_EXC_ERR,
+};
+
+enum irdma_qp_event_type {
+ IRDMA_QP_EVENT_CATASTROPHIC,
+ IRDMA_QP_EVENT_ACCESS_ERR,
+ IRDMA_QP_EVENT_REQ_ERR,
};
enum irdma_cmpl_status {
@@ -147,6 +260,8 @@ enum irdma_qp_caps {
IRDMA_PUSH_MODE = 8,
};
+struct irdma_srq_uk;
+struct irdma_srq_uk_init_info;
struct irdma_qp_uk;
struct irdma_cq_uk;
struct irdma_qp_uk_init_info;
@@ -201,6 +316,24 @@ struct irdma_bind_window {
bool ena_writes:1;
irdma_stag mw_stag;
bool mem_window_type_1:1;
+ bool remote_atomics_en:1;
+};
+
+struct irdma_atomic_fetch_add {
+ u64 tagged_offset;
+ u64 remote_tagged_offset;
+ u64 fetch_add_data_bytes;
+ u32 stag;
+ u32 remote_stag;
+};
+
+struct irdma_atomic_compare_swap {
+ u64 tagged_offset;
+ u64 remote_tagged_offset;
+ u64 swap_data_bytes;
+ u64 compare_data_bytes;
+ u32 stag;
+ u32 remote_stag;
};
struct irdma_inv_local_stag {
@@ -219,6 +352,7 @@ struct irdma_post_sq_info {
bool report_rtt:1;
bool udp_hdr:1;
bool defer_flag:1;
+ bool remote_atomic_en:1;
u32 imm_data;
u32 stag_to_inv;
union {
@@ -227,6 +361,8 @@ struct irdma_post_sq_info {
struct irdma_rdma_read rdma_read;
struct irdma_bind_window bind_window;
struct irdma_inv_local_stag inv_local_stag;
+ struct irdma_atomic_fetch_add atomic_fetch_add;
+ struct irdma_atomic_compare_swap atomic_compare_swap;
} op;
};
@@ -255,6 +391,15 @@ struct irdma_cq_poll_info {
bool imm_valid:1;
};
+struct qp_err_code {
+ enum irdma_flush_opcode flush_code;
+ enum irdma_qp_event_type event_type;
+};
+
+int irdma_uk_atomic_compare_swap(struct irdma_qp_uk *qp,
+ struct irdma_post_sq_info *info, bool post_sq);
+int irdma_uk_atomic_fetch_add(struct irdma_qp_uk *qp,
+ struct irdma_post_sq_info *info, bool post_sq);
int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
struct irdma_post_sq_info *info, bool post_sq);
int irdma_uk_inline_send(struct irdma_qp_uk *qp,
@@ -300,6 +445,39 @@ int irdma_uk_calc_depth_shift_sq(struct irdma_qp_uk_init_info *ukinfo,
u32 *sq_depth, u8 *sq_shift);
int irdma_uk_calc_depth_shift_rq(struct irdma_qp_uk_init_info *ukinfo,
u32 *rq_depth, u8 *rq_shift);
+int irdma_uk_srq_init(struct irdma_srq_uk *srq,
+ struct irdma_srq_uk_init_info *info);
+int irdma_uk_srq_post_receive(struct irdma_srq_uk *srq,
+ struct irdma_post_rq_info *info);
+
+struct irdma_srq_uk {
+ u32 srq_caps;
+ struct irdma_qp_quanta *srq_base;
+ struct irdma_uk_attrs *uk_attrs;
+ __le64 *shadow_area;
+ struct irdma_ring srq_ring;
+ struct irdma_ring initial_ring;
+ u32 srq_id;
+ u32 srq_size;
+ u32 max_srq_frag_cnt;
+ struct irdma_wqe_uk_ops wqe_ops;
+ u8 srwqe_polarity;
+ u8 wqe_size;
+ u8 wqe_size_multiplier;
+ u8 deferred_flag;
+};
+
+struct irdma_srq_uk_init_info {
+ struct irdma_qp_quanta *srq;
+ struct irdma_uk_attrs *uk_attrs;
+ __le64 *shadow_area;
+ u64 *srq_wrid_array;
+ u32 srq_id;
+ u32 srq_caps;
+ u32 srq_size;
+ u32 max_srq_frag_cnt;
+};
+
struct irdma_sq_uk_wr_trk_info {
u64 wrid;
u32 wr_len;
@@ -344,6 +522,7 @@ struct irdma_qp_uk {
bool destroy_pending:1; /* Indicates the QP is being destroyed */
void *back_qp;
u8 dbg_rq_flushed;
+ struct irdma_srq_uk *srq_uk;
u8 sq_flush_seen;
u8 rq_flush_seen;
};
@@ -383,6 +562,7 @@ struct irdma_qp_uk_init_info {
u8 rq_shift;
int abi_ver;
bool legacy_mode;
+ struct irdma_srq_uk *srq_uk;
};
struct irdma_cq_uk_init_info {
@@ -398,6 +578,7 @@ struct irdma_cq_uk_init_info {
__le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
u16 quanta, u32 total_size,
struct irdma_post_sq_info *info);
+__le64 *irdma_srq_get_next_recv_wqe(struct irdma_srq_uk *srq, u32 *wqe_idx);
__le64 *irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx);
void irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq);
int irdma_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled, bool post_sq);
@@ -409,5 +590,85 @@ int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift,
u32 *wqdepth);
int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift,
u32 *wqdepth);
+int irdma_get_srqdepth(struct irdma_uk_attrs *uk_attrs, u32 srq_size, u8 shift,
+ u32 *srqdepth);
void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx);
+
+static inline struct qp_err_code irdma_ae_to_qp_err_code(u16 ae_id)
+{
+ struct qp_err_code qp_err = {};
+
+ switch (ae_id) {
+ case IRDMA_AE_AMP_BOUNDS_VIOLATION:
+ case IRDMA_AE_AMP_INVALID_STAG:
+ case IRDMA_AE_AMP_RIGHTS_VIOLATION:
+ case IRDMA_AE_AMP_UNALLOCATED_STAG:
+ case IRDMA_AE_AMP_BAD_PD:
+ case IRDMA_AE_AMP_BAD_QP:
+ case IRDMA_AE_AMP_BAD_STAG_KEY:
+ case IRDMA_AE_AMP_BAD_STAG_INDEX:
+ case IRDMA_AE_AMP_TO_WRAP:
+ case IRDMA_AE_PRIV_OPERATION_DENIED:
+ qp_err.flush_code = FLUSH_PROT_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_ACCESS_ERR;
+ break;
+ case IRDMA_AE_UDA_XMIT_BAD_PD:
+ case IRDMA_AE_WQE_UNEXPECTED_OPCODE:
+ qp_err.flush_code = FLUSH_LOC_QP_OP_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
+ break;
+ case IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT:
+ case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
+ case IRDMA_AE_UDA_L4LEN_INVALID:
+ case IRDMA_AE_DDP_UBE_INVALID_MO:
+ case IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
+ qp_err.flush_code = FLUSH_LOC_LEN_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
+ break;
+ case IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
+ case IRDMA_AE_IB_REMOTE_ACCESS_ERROR:
+ qp_err.flush_code = FLUSH_REM_ACCESS_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_ACCESS_ERR;
+ break;
+ case IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS:
+ case IRDMA_AE_AMP_MWBIND_BIND_DISABLED:
+ case IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS:
+ case IRDMA_AE_AMP_MWBIND_VALID_STAG:
+ qp_err.flush_code = FLUSH_MW_BIND_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_ACCESS_ERR;
+ break;
+ case IRDMA_AE_LLP_TOO_MANY_RETRIES:
+ qp_err.flush_code = FLUSH_RETRY_EXC_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
+ break;
+ case IRDMA_AE_IB_INVALID_REQUEST:
+ qp_err.flush_code = FLUSH_REM_INV_REQ_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_REQ_ERR;
+ break;
+ case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
+ case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
+ case IRDMA_AE_ROCE_RSP_LENGTH_ERROR:
+ case IRDMA_AE_IB_REMOTE_OP_ERROR:
+ qp_err.flush_code = FLUSH_REM_OP_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
+ break;
+ case IRDMA_AE_LLP_TOO_MANY_RNRS:
+ qp_err.flush_code = FLUSH_RNR_RETRY_EXC_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
+ break;
+ case IRDMA_AE_LCE_QP_CATASTROPHIC:
+ case IRDMA_AE_REMOTE_QP_CATASTROPHIC:
+ case IRDMA_AE_LOCAL_QP_CATASTROPHIC:
+ case IRDMA_AE_RCE_QP_CATASTROPHIC:
+ qp_err.flush_code = FLUSH_FATAL_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
+ break;
+ default:
+ qp_err.flush_code = FLUSH_GENERAL_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
+ break;
+ }
+
+ return qp_err;
+}
#endif /* IRDMA_USER_H */
diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c
index b510ef747399..8b94d87b0192 100644
--- a/drivers/infiniband/hw/irdma/utils.c
+++ b/drivers/infiniband/hw/irdma/utils.c
@@ -481,6 +481,7 @@ void irdma_free_cqp_request(struct irdma_cqp *cqp,
WRITE_ONCE(cqp_request->request_done, false);
cqp_request->callback_fcn = NULL;
cqp_request->waiting = false;
+ cqp_request->pending = false;
spin_lock_irqsave(&cqp->req_lock, flags);
list_add_tail(&cqp_request->list, &cqp->cqp_avail_reqs);
@@ -521,6 +522,22 @@ irdma_free_pending_cqp_request(struct irdma_cqp *cqp,
}
/**
+ * irdma_cleanup_deferred_cqp_ops - clean-up cqp with no completions
+ * @dev: sc_dev
+ * @cqp: cqp
+ */
+static void irdma_cleanup_deferred_cqp_ops(struct irdma_sc_dev *dev,
+ struct irdma_cqp *cqp)
+{
+ u64 scratch;
+
+ /* process all CQP requests with deferred/pending completions */
+ while ((scratch = irdma_sc_cqp_cleanup_handler(dev)))
+ irdma_free_pending_cqp_request(cqp, (struct irdma_cqp_request *)
+ (uintptr_t)scratch);
+}
+
+/**
* irdma_cleanup_pending_cqp_op - clean-up cqp with no
* completions
* @rf: RDMA PCI function
@@ -533,6 +550,8 @@ void irdma_cleanup_pending_cqp_op(struct irdma_pci_f *rf)
struct cqp_cmds_info *pcmdinfo = NULL;
u32 i, pending_work, wqe_idx;
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3)
+ irdma_cleanup_deferred_cqp_ops(dev, cqp);
pending_work = IRDMA_RING_USED_QUANTA(cqp->sc_cqp.sq_ring);
wqe_idx = IRDMA_RING_CURRENT_TAIL(cqp->sc_cqp.sq_ring);
for (i = 0; i < pending_work; i++) {
@@ -552,6 +571,26 @@ void irdma_cleanup_pending_cqp_op(struct irdma_pci_f *rf)
}
}
+static int irdma_get_timeout_threshold(struct irdma_sc_dev *dev)
+{
+ u16 time_s = dev->vc_caps.cqp_timeout_s;
+
+ if (!time_s)
+ return CQP_TIMEOUT_THRESHOLD;
+
+ return time_s * 1000 / dev->hw_attrs.max_cqp_compl_wait_time_ms;
+}
+
+static int irdma_get_def_timeout_threshold(struct irdma_sc_dev *dev)
+{
+ u16 time_s = dev->vc_caps.cqp_def_timeout_s;
+
+ if (!time_s)
+ return CQP_DEF_CMPL_TIMEOUT_THRESHOLD;
+
+ return time_s * 1000 / dev->hw_attrs.max_cqp_compl_wait_time_ms;
+}
+
/**
* irdma_wait_event - wait for completion
* @rf: RDMA PCI function
@@ -561,6 +600,7 @@ static int irdma_wait_event(struct irdma_pci_f *rf,
struct irdma_cqp_request *cqp_request)
{
struct irdma_cqp_timeout cqp_timeout = {};
+ int timeout_threshold = irdma_get_timeout_threshold(&rf->sc_dev);
bool cqp_error = false;
int err_code = 0;
@@ -572,9 +612,17 @@ static int irdma_wait_event(struct irdma_pci_f *rf,
msecs_to_jiffies(CQP_COMPL_WAIT_TIME_MS)))
break;
+ if (cqp_request->pending)
+ /* There was a deferred or pending completion
+ * received for this CQP request, so we need
+ * to wait longer than usual.
+ */
+ timeout_threshold =
+ irdma_get_def_timeout_threshold(&rf->sc_dev);
+
irdma_check_cqp_progress(&cqp_timeout, &rf->sc_dev);
- if (cqp_timeout.count < CQP_TIMEOUT_THRESHOLD)
+ if (cqp_timeout.count < timeout_threshold)
continue;
if (!rf->reset) {
@@ -649,6 +697,9 @@ static const char *const irdma_cqp_cmd_names[IRDMA_MAX_CQP_OPS] = {
[IRDMA_OP_ADD_LOCAL_MAC_ENTRY] = "Add Local MAC Entry Cmd",
[IRDMA_OP_DELETE_LOCAL_MAC_ENTRY] = "Delete Local MAC Entry Cmd",
[IRDMA_OP_CQ_MODIFY] = "CQ Modify Cmd",
+ [IRDMA_OP_SRQ_CREATE] = "Create SRQ Cmd",
+ [IRDMA_OP_SRQ_MODIFY] = "Modify SRQ Cmd",
+ [IRDMA_OP_SRQ_DESTROY] = "Destroy SRQ Cmd",
};
static const struct irdma_cqp_err_info irdma_noncrit_err_list[] = {
@@ -1065,6 +1116,26 @@ static void irdma_dealloc_push_page(struct irdma_pci_f *rf,
irdma_put_cqp_request(&rf->cqp, cqp_request);
}
+static void irdma_free_gsi_qp_rsrc(struct irdma_qp *iwqp, u32 qp_num)
+{
+ struct irdma_device *iwdev = iwqp->iwdev;
+ struct irdma_pci_f *rf = iwdev->rf;
+ unsigned long flags;
+
+ if (rf->sc_dev.hw_attrs.uk_attrs.hw_rev < IRDMA_GEN_3)
+ return;
+
+ irdma_vchnl_req_del_vport(&rf->sc_dev, iwdev->vport_id, qp_num);
+
+ if (qp_num == 1) {
+ spin_lock_irqsave(&rf->rsrc_lock, flags);
+ rf->hwqp1_rsvd = false;
+ spin_unlock_irqrestore(&rf->rsrc_lock, flags);
+ } else if (qp_num > 2) {
+ irdma_free_rsrc(rf, rf->allocated_qps, qp_num);
+ }
+}
+
/**
* irdma_free_qp_rsrc - free up memory resources for qp
* @iwqp: qp ptr (user or kernel)
@@ -1073,7 +1144,7 @@ void irdma_free_qp_rsrc(struct irdma_qp *iwqp)
{
struct irdma_device *iwdev = iwqp->iwdev;
struct irdma_pci_f *rf = iwdev->rf;
- u32 qp_num = iwqp->ibqp.qp_num;
+ u32 qp_num = iwqp->sc_qp.qp_uk.qp_id;
irdma_ieq_cleanup_qp(iwdev->vsi.ieq, &iwqp->sc_qp);
irdma_dealloc_push_page(rf, &iwqp->sc_qp);
@@ -1083,8 +1154,12 @@ void irdma_free_qp_rsrc(struct irdma_qp *iwqp)
iwqp->sc_qp.user_pri);
}
- if (qp_num > 2)
- irdma_free_rsrc(rf, rf->allocated_qps, qp_num);
+ if (iwqp->ibqp.qp_type == IB_QPT_GSI) {
+ irdma_free_gsi_qp_rsrc(iwqp, qp_num);
+ } else {
+ if (qp_num > 2)
+ irdma_free_rsrc(rf, rf->allocated_qps, qp_num);
+ }
dma_free_coherent(rf->sc_dev.hw->device, iwqp->q2_ctx_mem.size,
iwqp->q2_ctx_mem.va, iwqp->q2_ctx_mem.pa);
iwqp->q2_ctx_mem.va = NULL;
@@ -1096,6 +1171,30 @@ void irdma_free_qp_rsrc(struct irdma_qp *iwqp)
}
/**
+ * irdma_srq_wq_destroy - send srq destroy cqp
+ * @rf: RDMA PCI function
+ * @srq: hardware control srq
+ */
+void irdma_srq_wq_destroy(struct irdma_pci_f *rf, struct irdma_sc_srq *srq)
+{
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
+ if (!cqp_request)
+ return;
+
+ cqp_info = &cqp_request->info;
+ cqp_info->cqp_cmd = IRDMA_OP_SRQ_DESTROY;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.srq_destroy.srq = srq;
+ cqp_info->in.u.srq_destroy.scratch = (uintptr_t)cqp_request;
+
+ irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+}
+
+/**
* irdma_cq_wq_destroy - send cq destroy cqp
* @rf: RDMA PCI function
* @cq: hardware control cq
@@ -2266,7 +2365,10 @@ bool irdma_cq_empty(struct irdma_cq *iwcq)
u8 polarity;
ukcq = &iwcq->sc_cq.cq_uk;
- cqe = IRDMA_GET_CURRENT_CQ_ELEM(ukcq);
+ if (ukcq->avoid_mem_cflct)
+ cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(ukcq);
+ else
+ cqe = IRDMA_GET_CURRENT_CQ_ELEM(ukcq);
get_64bit_val(cqe, 24, &qword3);
polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index da5a41b275d8..76ce6137f2ba 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -41,7 +41,8 @@ static int irdma_query_device(struct ib_device *ibdev,
props->max_cq = rf->max_cq - rf->used_cqs;
props->max_cqe = rf->max_cqe - 1;
props->max_mr = rf->max_mr - rf->used_mrs;
- props->max_mw = props->max_mr;
+ if (hw_attrs->uk_attrs.hw_rev >= IRDMA_GEN_3)
+ props->max_mw = props->max_mr;
props->max_pd = rf->max_pd - rf->used_pds;
props->max_sge_rd = hw_attrs->uk_attrs.max_hw_read_sges;
props->max_qp_rd_atom = hw_attrs->max_hw_ird;
@@ -56,9 +57,21 @@ static int irdma_query_device(struct ib_device *ibdev,
props->max_mcast_qp_attach = IRDMA_MAX_MGS_PER_CTX;
props->max_total_mcast_qp_attach = rf->max_qp * IRDMA_MAX_MGS_PER_CTX;
props->max_fast_reg_page_list_len = IRDMA_MAX_PAGES_PER_FMR;
-#define HCA_CLOCK_TIMESTAMP_MASK 0x1ffff
- if (hw_attrs->uk_attrs.hw_rev >= IRDMA_GEN_2)
- props->timestamp_mask = HCA_CLOCK_TIMESTAMP_MASK;
+ props->max_srq = rf->max_srq - rf->used_srqs;
+ props->max_srq_wr = IRDMA_MAX_SRQ_WRS;
+ props->max_srq_sge = hw_attrs->uk_attrs.max_hw_wq_frags;
+ if (hw_attrs->uk_attrs.feature_flags & IRDMA_FEATURE_ATOMIC_OPS)
+ props->atomic_cap = IB_ATOMIC_HCA;
+ else
+ props->atomic_cap = IB_ATOMIC_NONE;
+ props->masked_atomic_cap = props->atomic_cap;
+ if (hw_attrs->uk_attrs.hw_rev >= IRDMA_GEN_3) {
+#define HCA_CORE_CLOCK_KHZ 1000000UL
+ props->timestamp_mask = GENMASK(31, 0);
+ props->hca_core_clock = HCA_CORE_CLOCK_KHZ;
+ }
+ if (hw_attrs->uk_attrs.hw_rev >= IRDMA_GEN_3)
+ props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B;
return 0;
}
@@ -292,6 +305,10 @@ static int irdma_alloc_ucontext(struct ib_ucontext *uctx,
ucontext->iwdev = iwdev;
ucontext->abi_ver = req.userspace_ver;
+ if (!(req.comp_mask & IRDMA_SUPPORT_WQE_FORMAT_V2) &&
+ uk_attrs->hw_rev >= IRDMA_GEN_3)
+ return -EOPNOTSUPP;
+
if (req.comp_mask & IRDMA_ALLOC_UCTX_USE_RAW_ATTR)
ucontext->use_raw_attrs = true;
@@ -332,6 +349,8 @@ static int irdma_alloc_ucontext(struct ib_ucontext *uctx,
uresp.comp_mask |= IRDMA_ALLOC_UCTX_USE_RAW_ATTR;
uresp.min_hw_wq_size = uk_attrs->min_hw_wq_size;
uresp.comp_mask |= IRDMA_ALLOC_UCTX_MIN_HW_WQ_SIZE;
+ uresp.max_hw_srq_quanta = uk_attrs->max_hw_srq_quanta;
+ uresp.comp_mask |= IRDMA_ALLOC_UCTX_MAX_HW_SRQ_QUANTA;
if (ib_copy_to_udata(udata, &uresp,
min(sizeof(uresp), udata->outlen))) {
rdma_user_mmap_entry_remove(ucontext->db_mmap_entry);
@@ -343,6 +362,8 @@ static int irdma_alloc_ucontext(struct ib_ucontext *uctx,
spin_lock_init(&ucontext->cq_reg_mem_list_lock);
INIT_LIST_HEAD(&ucontext->qp_reg_mem_list);
spin_lock_init(&ucontext->qp_reg_mem_list_lock);
+ INIT_LIST_HEAD(&ucontext->srq_reg_mem_list);
+ spin_lock_init(&ucontext->srq_reg_mem_list_lock);
return 0;
@@ -521,7 +542,7 @@ static int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
iwqp->sc_qp.qp_uk.destroy_pending = true;
- if (iwqp->iwarp_state == IRDMA_QP_STATE_RTS)
+ if (iwqp->iwarp_state >= IRDMA_QP_STATE_IDLE)
irdma_modify_qp_to_err(&iwqp->sc_qp);
if (!iwqp->user_mode)
@@ -541,6 +562,9 @@ static int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp);
irdma_remove_push_mmap_entries(iwqp);
+
+ if (iwqp->sc_qp.qp_uk.qp_id == 1)
+ iwdev->rf->hwqp1_rsvd = false;
irdma_free_qp_rsrc(iwqp);
return 0;
@@ -564,7 +588,11 @@ static void irdma_setup_virt_qp(struct irdma_device *iwdev,
if (iwpbl->pbl_allocated) {
init_info->virtual_map = true;
init_info->sq_pa = qpmr->sq_pbl.idx;
- init_info->rq_pa = qpmr->rq_pbl.idx;
+ /* Need to use contiguous buffer for RQ of QP
+ * in case it is associated with SRQ.
+ */
+ init_info->rq_pa = init_info->qp_uk_init_info.srq_uk ?
+ qpmr->rq_pa : qpmr->rq_pbl.idx;
} else {
init_info->sq_pa = qpmr->sq_pbl.addr;
init_info->rq_pa = qpmr->rq_pbl.addr;
@@ -719,6 +747,7 @@ static int irdma_setup_kmode_qp(struct irdma_device *iwdev,
info->rq_pa + (ukinfo->rq_depth * IRDMA_QP_WQE_MIN_SIZE);
ukinfo->sq_size = ukinfo->sq_depth >> ukinfo->sq_shift;
ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift;
+ ukinfo->qp_id = info->qp_uk_init_info.qp_id;
iwqp->max_send_wr = (ukinfo->sq_depth - IRDMA_SQ_RSVD) >> ukinfo->sq_shift;
iwqp->max_recv_wr = (ukinfo->rq_depth - IRDMA_RQ_RSVD) >> ukinfo->rq_shift;
@@ -775,9 +804,12 @@ static void irdma_roce_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
roce_info = &iwqp->roce_info;
ether_addr_copy(roce_info->mac_addr, iwdev->netdev->dev_addr);
+ if (iwqp->ibqp.qp_type == IB_QPT_GSI && iwqp->ibqp.qp_num != 1)
+ roce_info->is_qp1 = true;
roce_info->rd_en = true;
roce_info->wr_rdresp_en = true;
- roce_info->bind_en = true;
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3)
+ roce_info->bind_en = true;
roce_info->dcqcn_en = false;
roce_info->rtomin = 5;
@@ -808,7 +840,6 @@ static void irdma_iw_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
ether_addr_copy(iwarp_info->mac_addr, iwdev->netdev->dev_addr);
iwarp_info->rd_en = true;
iwarp_info->wr_rdresp_en = true;
- iwarp_info->bind_en = true;
iwarp_info->ecn_en = true;
iwarp_info->rtomin = 5;
@@ -864,6 +895,47 @@ static void irdma_flush_worker(struct work_struct *work)
irdma_generate_flush_completions(iwqp);
}
+static int irdma_setup_gsi_qp_rsrc(struct irdma_qp *iwqp, u32 *qp_num)
+{
+ struct irdma_device *iwdev = iwqp->iwdev;
+ struct irdma_pci_f *rf = iwdev->rf;
+ unsigned long flags;
+ int ret;
+
+ if (rf->rdma_ver <= IRDMA_GEN_2) {
+ *qp_num = 1;
+ return 0;
+ }
+
+ spin_lock_irqsave(&rf->rsrc_lock, flags);
+ if (!rf->hwqp1_rsvd) {
+ *qp_num = 1;
+ rf->hwqp1_rsvd = true;
+ spin_unlock_irqrestore(&rf->rsrc_lock, flags);
+ } else {
+ spin_unlock_irqrestore(&rf->rsrc_lock, flags);
+ ret = irdma_alloc_rsrc(rf, rf->allocated_qps, rf->max_qp,
+ qp_num, &rf->next_qp);
+ if (ret)
+ return ret;
+ }
+
+ ret = irdma_vchnl_req_add_vport(&rf->sc_dev, iwdev->vport_id, *qp_num,
+ (&iwdev->vsi)->qos);
+ if (ret) {
+ if (*qp_num != 1) {
+ irdma_free_rsrc(rf, rf->allocated_qps, *qp_num);
+ } else {
+ spin_lock_irqsave(&rf->rsrc_lock, flags);
+ rf->hwqp1_rsvd = false;
+ spin_unlock_irqrestore(&rf->rsrc_lock, flags);
+ }
+ return ret;
+ }
+
+ return 0;
+}
+
/**
* irdma_create_qp - create qp
* @ibqp: ptr of qp
@@ -889,6 +961,18 @@ static int irdma_create_qp(struct ib_qp *ibqp,
struct irdma_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs;
struct irdma_qp_init_info init_info = {};
struct irdma_qp_host_ctx_info *ctx_info;
+ struct irdma_srq *iwsrq;
+ bool srq_valid = false;
+ u32 srq_id = 0;
+
+ if (init_attr->srq) {
+ iwsrq = to_iwsrq(init_attr->srq);
+ srq_valid = true;
+ srq_id = iwsrq->srq_num;
+ init_attr->cap.max_recv_sge = uk_attrs->max_hw_wq_frags;
+ init_attr->cap.max_recv_wr = 4;
+ init_info.qp_uk_init_info.srq_uk = &iwsrq->sc_srq.srq_uk;
+ }
err_code = irdma_validate_qp_attrs(init_attr, iwdev);
if (err_code)
@@ -925,16 +1009,20 @@ static int irdma_create_qp(struct ib_qp *ibqp,
init_info.host_ctx = (__le64 *)(init_info.q2 + IRDMA_Q2_BUF_SIZE);
init_info.host_ctx_pa = init_info.q2_pa + IRDMA_Q2_BUF_SIZE;
- if (init_attr->qp_type == IB_QPT_GSI)
- qp_num = 1;
- else
+ if (init_attr->qp_type == IB_QPT_GSI) {
+ err_code = irdma_setup_gsi_qp_rsrc(iwqp, &qp_num);
+ if (err_code)
+ goto error;
+ iwqp->ibqp.qp_num = 1;
+ } else {
err_code = irdma_alloc_rsrc(rf, rf->allocated_qps, rf->max_qp,
&qp_num, &rf->next_qp);
- if (err_code)
- goto error;
+ if (err_code)
+ goto error;
+ iwqp->ibqp.qp_num = qp_num;
+ }
iwqp->iwpd = iwpd;
- iwqp->ibqp.qp_num = qp_num;
qp = &iwqp->sc_qp;
iwqp->iwscq = to_iwcq(init_attr->send_cq);
iwqp->iwrcq = to_iwcq(init_attr->recv_cq);
@@ -991,13 +1079,22 @@ static int irdma_create_qp(struct ib_qp *ibqp,
}
ctx_info = &iwqp->ctx_info;
+ ctx_info->srq_valid = srq_valid;
+ ctx_info->srq_id = srq_id;
ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
- if (rdma_protocol_roce(&iwdev->ibdev, 1))
+ if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
+ if (dev->ws_add(&iwdev->vsi, 0)) {
+ irdma_cqp_qp_destroy_cmd(&rf->sc_dev, &iwqp->sc_qp);
+ err_code = -EINVAL;
+ goto error;
+ }
+ irdma_qp_add_qos(&iwqp->sc_qp);
irdma_roce_fill_and_set_qpctx_info(iwqp, ctx_info);
- else
+ } else {
irdma_iw_fill_and_set_qpctx_info(iwqp, ctx_info);
+ }
err_code = irdma_cqp_create_qp_cmd(iwqp);
if (err_code)
@@ -1009,16 +1106,6 @@ static int irdma_create_qp(struct ib_qp *ibqp,
iwqp->sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
rf->qp_table[qp_num] = iwqp;
- if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
- if (dev->ws_add(&iwdev->vsi, 0)) {
- irdma_cqp_qp_destroy_cmd(&rf->sc_dev, &iwqp->sc_qp);
- err_code = -EINVAL;
- goto error;
- }
-
- irdma_qp_add_qos(&iwqp->sc_qp);
- }
-
if (udata) {
/* GEN_1 legacy support with libi40iw does not have expanded uresp struct */
if (udata->outlen < sizeof(uresp)) {
@@ -1063,6 +1150,8 @@ static int irdma_get_ib_acc_flags(struct irdma_qp *iwqp)
acc_flags |= IB_ACCESS_REMOTE_READ;
if (iwqp->roce_info.bind_en)
acc_flags |= IB_ACCESS_MW_BIND;
+ if (iwqp->ctx_info.remote_atomics_en)
+ acc_flags |= IB_ACCESS_REMOTE_ATOMIC;
} else {
if (iwqp->iwarp_info.wr_rdresp_en) {
acc_flags |= IB_ACCESS_LOCAL_WRITE;
@@ -1070,8 +1159,8 @@ static int irdma_get_ib_acc_flags(struct irdma_qp *iwqp)
}
if (iwqp->iwarp_info.rd_en)
acc_flags |= IB_ACCESS_REMOTE_READ;
- if (iwqp->iwarp_info.bind_en)
- acc_flags |= IB_ACCESS_MW_BIND;
+ if (iwqp->ctx_info.remote_atomics_en)
+ acc_flags |= IB_ACCESS_REMOTE_ATOMIC;
}
return acc_flags;
}
@@ -1110,6 +1199,7 @@ static int irdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
attr->pkey_index = iwqp->roce_info.p_key;
attr->retry_cnt = iwqp->udp_info.rexmit_thresh;
attr->rnr_retry = iwqp->udp_info.rnr_nak_thresh;
+ attr->min_rnr_timer = iwqp->udp_info.min_rnr_timer;
attr->max_rd_atomic = iwqp->roce_info.ord_size;
attr->max_dest_rd_atomic = iwqp->roce_info.ird_size;
}
@@ -1118,6 +1208,7 @@ static int irdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
init_attr->qp_context = iwqp->ibqp.qp_context;
init_attr->send_cq = iwqp->ibqp.send_cq;
init_attr->recv_cq = iwqp->ibqp.recv_cq;
+ init_attr->srq = iwqp->ibqp.srq;
init_attr->cap = attr->cap;
return 0;
@@ -1242,6 +1333,10 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (attr_mask & IB_QP_RNR_RETRY)
udp_info->rnr_nak_thresh = attr->rnr_retry;
+ if (attr_mask & IB_QP_MIN_RNR_TIMER &&
+ dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3)
+ udp_info->min_rnr_timer = attr->min_rnr_timer;
+
if (attr_mask & IB_QP_RETRY_CNT)
udp_info->rexmit_thresh = attr->retry_cnt;
@@ -1362,6 +1457,9 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
roce_info->wr_rdresp_en = true;
if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
roce_info->rd_en = true;
+ if (dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_ATOMIC_OPS)
+ if (attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)
+ ctx_info->remote_atomics_en = true;
}
wait_event(iwqp->mod_qp_waitq, !atomic_read(&iwqp->hw_mod_qp_pend));
@@ -1777,6 +1875,24 @@ exit:
}
/**
+ * irdma_srq_free_rsrc - free up resources for srq
+ * @rf: RDMA PCI function
+ * @iwsrq: srq ptr
+ */
+static void irdma_srq_free_rsrc(struct irdma_pci_f *rf, struct irdma_srq *iwsrq)
+{
+ struct irdma_sc_srq *srq = &iwsrq->sc_srq;
+
+ if (!iwsrq->user_mode) {
+ dma_free_coherent(rf->sc_dev.hw->device, iwsrq->kmem.size,
+ iwsrq->kmem.va, iwsrq->kmem.pa);
+ iwsrq->kmem.va = NULL;
+ }
+
+ irdma_free_rsrc(rf, rf->allocated_srqs, srq->srq_uk.srq_id);
+}
+
+/**
* irdma_cq_free_rsrc - free up resources for cq
* @rf: RDMA PCI function
* @iwcq: cq ptr
@@ -1840,6 +1956,22 @@ static int irdma_process_resize_list(struct irdma_cq *iwcq,
}
/**
+ * irdma_destroy_srq - destroy srq
+ * @ibsrq: srq pointer
+ * @udata: user data
+ */
+static int irdma_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
+{
+ struct irdma_device *iwdev = to_iwdev(ibsrq->device);
+ struct irdma_srq *iwsrq = to_iwsrq(ibsrq);
+ struct irdma_sc_srq *srq = &iwsrq->sc_srq;
+
+ irdma_srq_wq_destroy(iwdev->rf, srq);
+ irdma_srq_free_rsrc(iwdev->rf, iwsrq);
+ return 0;
+}
+
+/**
* irdma_destroy_cq - destroy cq
* @ib_cq: cq pointer
* @udata: user data
@@ -1914,8 +2046,13 @@ static int irdma_resize_cq(struct ib_cq *ibcq, int entries,
if (!iwcq->user_mode) {
entries++;
- if (rf->sc_dev.hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
+
+ if (!iwcq->sc_cq.cq_uk.avoid_mem_cflct &&
+ dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
entries *= 2;
+
+ if (entries & 1)
+ entries += 1; /* cq size must be an even number */
}
info.cq_size = max(entries, 4);
@@ -2022,10 +2159,297 @@ error:
return ret;
}
+/**
+ * irdma_srq_event - event notification for srq limit
+ * @srq: shared srq struct
+ */
+void irdma_srq_event(struct irdma_sc_srq *srq)
+{
+ struct irdma_srq *iwsrq = container_of(srq, struct irdma_srq, sc_srq);
+ struct ib_srq *ibsrq = &iwsrq->ibsrq;
+ struct ib_event event;
+
+ srq->srq_limit = 0;
+
+ if (!ibsrq->event_handler)
+ return;
+
+ event.device = ibsrq->device;
+ event.element.port_num = 1;
+ event.element.srq = ibsrq;
+ event.event = IB_EVENT_SRQ_LIMIT_REACHED;
+ ibsrq->event_handler(&event, ibsrq->srq_context);
+}
+
+/**
+ * irdma_modify_srq - modify srq request
+ * @ibsrq: srq's pointer for modify
+ * @attr: access attributes
+ * @attr_mask: state mask
+ * @udata: user data
+ */
+static int irdma_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
+ enum ib_srq_attr_mask attr_mask,
+ struct ib_udata *udata)
+{
+ struct irdma_device *iwdev = to_iwdev(ibsrq->device);
+ struct irdma_srq *iwsrq = to_iwsrq(ibsrq);
+ struct irdma_cqp_request *cqp_request;
+ struct irdma_pci_f *rf = iwdev->rf;
+ struct irdma_modify_srq_info *info;
+ struct cqp_cmds_info *cqp_info;
+ int status;
+
+ if (attr_mask & IB_SRQ_MAX_WR)
+ return -EINVAL;
+
+ if (!(attr_mask & IB_SRQ_LIMIT))
+ return 0;
+
+ if (attr->srq_limit > iwsrq->sc_srq.srq_uk.srq_size)
+ return -EINVAL;
+
+ /* Execute this cqp op synchronously, so we can update srq_limit
+ * upon successful completion.
+ */
+ cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ info = &cqp_info->in.u.srq_modify.info;
+ info->srq_limit = attr->srq_limit;
+ if (info->srq_limit > 0xFFF)
+ info->srq_limit = 0xFFF;
+ info->arm_limit_event = 1;
+
+ cqp_info->cqp_cmd = IRDMA_OP_SRQ_MODIFY;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.srq_modify.srq = &iwsrq->sc_srq;
+ cqp_info->in.u.srq_modify.scratch = (uintptr_t)cqp_request;
+ status = irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+ if (status)
+ return status;
+
+ iwsrq->sc_srq.srq_limit = info->srq_limit;
+
+ return 0;
+}
+
+static int irdma_setup_umode_srq(struct irdma_device *iwdev,
+ struct irdma_srq *iwsrq,
+ struct irdma_srq_init_info *info,
+ struct ib_udata *udata)
+{
+#define IRDMA_CREATE_SRQ_MIN_REQ_LEN \
+ offsetofend(struct irdma_create_srq_req, user_shadow_area)
+ struct irdma_create_srq_req req = {};
+ struct irdma_ucontext *ucontext;
+ struct irdma_srq_mr *srqmr;
+ struct irdma_pbl *iwpbl;
+ unsigned long flags;
+
+ iwsrq->user_mode = true;
+ ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
+ ibucontext);
+
+ if (udata->inlen < IRDMA_CREATE_SRQ_MIN_REQ_LEN)
+ return -EINVAL;
+
+ if (ib_copy_from_udata(&req, udata,
+ min(sizeof(req), udata->inlen)))
+ return -EFAULT;
+
+ spin_lock_irqsave(&ucontext->srq_reg_mem_list_lock, flags);
+ iwpbl = irdma_get_pbl((unsigned long)req.user_srq_buf,
+ &ucontext->srq_reg_mem_list);
+ spin_unlock_irqrestore(&ucontext->srq_reg_mem_list_lock, flags);
+ if (!iwpbl)
+ return -EPROTO;
+
+ iwsrq->iwpbl = iwpbl;
+ srqmr = &iwpbl->srq_mr;
+
+ if (iwpbl->pbl_allocated) {
+ info->virtual_map = true;
+ info->pbl_chunk_size = 1;
+ info->first_pm_pbl_idx = srqmr->srq_pbl.idx;
+ info->leaf_pbl_size = 1;
+ } else {
+ info->srq_pa = srqmr->srq_pbl.addr;
+ }
+ info->shadow_area_pa = srqmr->shadow;
+
+ return 0;
+}
+
+static int irdma_setup_kmode_srq(struct irdma_device *iwdev,
+ struct irdma_srq *iwsrq,
+ struct irdma_srq_init_info *info, u32 depth,
+ u8 shift)
+{
+ struct irdma_srq_uk_init_info *ukinfo = &info->srq_uk_init_info;
+ struct irdma_dma_mem *mem = &iwsrq->kmem;
+ u32 size, ring_size;
+
+ ring_size = depth * IRDMA_QP_WQE_MIN_SIZE;
+ size = ring_size + (IRDMA_SHADOW_AREA_SIZE << 3);
+
+ mem->size = ALIGN(size, 256);
+ mem->va = dma_alloc_coherent(iwdev->rf->hw.device, mem->size,
+ &mem->pa, GFP_KERNEL);
+ if (!mem->va)
+ return -ENOMEM;
+
+ ukinfo->srq = mem->va;
+ ukinfo->srq_size = depth >> shift;
+ ukinfo->shadow_area = mem->va + ring_size;
+
+ info->shadow_area_pa = info->srq_pa + ring_size;
+ info->srq_pa = mem->pa;
+
+ return 0;
+}
+
+/**
+ * irdma_create_srq - create srq
+ * @ibsrq: ib's srq pointer
+ * @initattrs: attributes for srq
+ * @udata: user data for create srq
+ */
+static int irdma_create_srq(struct ib_srq *ibsrq,
+ struct ib_srq_init_attr *initattrs,
+ struct ib_udata *udata)
+{
+ struct irdma_device *iwdev = to_iwdev(ibsrq->device);
+ struct ib_srq_attr *attr = &initattrs->attr;
+ struct irdma_pd *iwpd = to_iwpd(ibsrq->pd);
+ struct irdma_srq *iwsrq = to_iwsrq(ibsrq);
+ struct irdma_srq_uk_init_info *ukinfo;
+ struct irdma_cqp_request *cqp_request;
+ struct irdma_srq_init_info info = {};
+ struct irdma_pci_f *rf = iwdev->rf;
+ struct irdma_uk_attrs *uk_attrs;
+ struct cqp_cmds_info *cqp_info;
+ int err_code = 0;
+ u32 depth;
+ u8 shift;
+
+ uk_attrs = &rf->sc_dev.hw_attrs.uk_attrs;
+ ukinfo = &info.srq_uk_init_info;
+
+ if (initattrs->srq_type != IB_SRQT_BASIC)
+ return -EOPNOTSUPP;
+
+ if (!(uk_attrs->feature_flags & IRDMA_FEATURE_SRQ) ||
+ attr->max_sge > uk_attrs->max_hw_wq_frags)
+ return -EINVAL;
+
+ refcount_set(&iwsrq->refcnt, 1);
+ spin_lock_init(&iwsrq->lock);
+ err_code = irdma_alloc_rsrc(rf, rf->allocated_srqs, rf->max_srq,
+ &iwsrq->srq_num, &rf->next_srq);
+ if (err_code)
+ return err_code;
+
+ ukinfo->max_srq_frag_cnt = attr->max_sge;
+ ukinfo->uk_attrs = uk_attrs;
+ ukinfo->srq_id = iwsrq->srq_num;
+
+ irdma_get_wqe_shift(ukinfo->uk_attrs, ukinfo->max_srq_frag_cnt, 0,
+ &shift);
+
+ err_code = irdma_get_srqdepth(ukinfo->uk_attrs, attr->max_wr,
+ shift, &depth);
+ if (err_code)
+ return err_code;
+
+ /* Actual SRQ size in WRs for ring and HW */
+ ukinfo->srq_size = depth >> shift;
+
+ /* Max postable WRs to SRQ */
+ iwsrq->max_wr = (depth - IRDMA_RQ_RSVD) >> shift;
+ attr->max_wr = iwsrq->max_wr;
+
+ if (udata)
+ err_code = irdma_setup_umode_srq(iwdev, iwsrq, &info, udata);
+ else
+ err_code = irdma_setup_kmode_srq(iwdev, iwsrq, &info, depth,
+ shift);
+
+ if (err_code)
+ goto free_rsrc;
+
+ info.vsi = &iwdev->vsi;
+ info.pd = &iwpd->sc_pd;
+
+ err_code = irdma_sc_srq_init(&iwsrq->sc_srq, &info);
+ if (err_code)
+ goto free_dmem;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
+ if (!cqp_request) {
+ err_code = -ENOMEM;
+ goto free_dmem;
+ }
+
+ cqp_info = &cqp_request->info;
+ cqp_info->cqp_cmd = IRDMA_OP_SRQ_CREATE;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.srq_create.srq = &iwsrq->sc_srq;
+ cqp_info->in.u.srq_create.scratch = (uintptr_t)cqp_request;
+ err_code = irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+ if (err_code)
+ goto free_dmem;
+
+ if (udata) {
+ struct irdma_create_srq_resp resp = {};
+
+ resp.srq_id = iwsrq->srq_num;
+ resp.srq_size = ukinfo->srq_size;
+ if (ib_copy_to_udata(udata, &resp,
+ min(sizeof(resp), udata->outlen))) {
+ err_code = -EPROTO;
+ goto srq_destroy;
+ }
+ }
+
+ return 0;
+
+srq_destroy:
+ irdma_srq_wq_destroy(rf, &iwsrq->sc_srq);
+
+free_dmem:
+ if (!iwsrq->user_mode)
+ dma_free_coherent(rf->hw.device, iwsrq->kmem.size,
+ iwsrq->kmem.va, iwsrq->kmem.pa);
+free_rsrc:
+ irdma_free_rsrc(rf, rf->allocated_srqs, iwsrq->srq_num);
+ return err_code;
+}
+
+/**
+ * irdma_query_srq - get SRQ attributes
+ * @ibsrq: the SRQ to query
+ * @attr: the attributes of the SRQ
+ */
+static int irdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
+{
+ struct irdma_srq *iwsrq = to_iwsrq(ibsrq);
+
+ attr->max_wr = iwsrq->max_wr;
+ attr->max_sge = iwsrq->sc_srq.srq_uk.max_srq_frag_cnt;
+ attr->srq_limit = iwsrq->sc_srq.srq_limit;
+
+ return 0;
+}
+
static inline int cq_validate_flags(u32 flags, u8 hw_rev)
{
- /* GEN1 does not support CQ create flags */
- if (hw_rev == IRDMA_GEN_1)
+ /* GEN1/2 does not support CQ create flags */
+ if (hw_rev <= IRDMA_GEN_2)
return flags ? -EOPNOTSUPP : 0;
return flags & ~IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION ? -EOPNOTSUPP : 0;
@@ -2058,6 +2482,7 @@ static int irdma_create_cq(struct ib_cq *ibcq,
unsigned long flags;
int err_code;
int entries = attr->cqe;
+ bool cqe_64byte_ena;
err_code = cq_validate_flags(attr->flags, dev->hw_attrs.uk_attrs.hw_rev);
if (err_code)
@@ -2081,6 +2506,9 @@ static int irdma_create_cq(struct ib_cq *ibcq,
info.dev = dev;
ukinfo->cq_size = max(entries, 4);
ukinfo->cq_id = cq_num;
+ cqe_64byte_ena = dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_64_BYTE_CQE ?
+ true : false;
+ ukinfo->avoid_mem_cflct = cqe_64byte_ena;
iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size;
if (attr->comp_vector < rf->ceqs_count)
info.ceq_id = attr->comp_vector;
@@ -2116,8 +2544,6 @@ static int irdma_create_cq(struct ib_cq *ibcq,
goto cq_free_rsrc;
}
- iwcq->iwpbl = iwpbl;
- iwcq->cq_mem_size = 0;
cqmr = &iwpbl->cq_mr;
if (rf->sc_dev.hw_attrs.uk_attrs.feature_flags &
@@ -2132,7 +2558,6 @@ static int irdma_create_cq(struct ib_cq *ibcq,
err_code = -EPROTO;
goto cq_free_rsrc;
}
- iwcq->iwpbl_shadow = iwpbl_shadow;
cqmr_shadow = &iwpbl_shadow->cq_mr;
info.shadow_area_pa = cqmr_shadow->cq_pbl.addr;
cqmr->split = true;
@@ -2156,11 +2581,18 @@ static int irdma_create_cq(struct ib_cq *ibcq,
}
entries++;
- if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
+ if (!cqe_64byte_ena && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
entries *= 2;
+
+ if (entries & 1)
+ entries += 1; /* cq size must be an even number */
+
ukinfo->cq_size = entries;
- rsize = info.cq_uk_init_info.cq_size * sizeof(struct irdma_cqe);
+ if (cqe_64byte_ena)
+ rsize = info.cq_uk_init_info.cq_size * sizeof(struct irdma_extended_cqe);
+ else
+ rsize = info.cq_uk_init_info.cq_size * sizeof(struct irdma_cqe);
iwcq->kmem.size = ALIGN(round_up(rsize, 256), 256);
iwcq->kmem.va = dma_alloc_coherent(dev->hw->device,
iwcq->kmem.size,
@@ -2240,8 +2672,9 @@ cq_free_rsrc:
/**
* irdma_get_mr_access - get hw MR access permissions from IB access flags
* @access: IB access flags
+ * @hw_rev: Hardware version
*/
-static inline u16 irdma_get_mr_access(int access)
+static inline u16 irdma_get_mr_access(int access, u8 hw_rev)
{
u16 hw_access = 0;
@@ -2251,8 +2684,10 @@ static inline u16 irdma_get_mr_access(int access)
IRDMA_ACCESS_FLAGS_REMOTEWRITE : 0;
hw_access |= (access & IB_ACCESS_REMOTE_READ) ?
IRDMA_ACCESS_FLAGS_REMOTEREAD : 0;
- hw_access |= (access & IB_ACCESS_MW_BIND) ?
- IRDMA_ACCESS_FLAGS_BIND_WINDOW : 0;
+ if (hw_rev >= IRDMA_GEN_3) {
+ hw_access |= (access & IB_ACCESS_MW_BIND) ?
+ IRDMA_ACCESS_FLAGS_BIND_WINDOW : 0;
+ }
hw_access |= (access & IB_ZERO_BASED) ?
IRDMA_ACCESS_FLAGS_ZERO_BASED : 0;
hw_access |= IRDMA_ACCESS_FLAGS_LOCALREAD;
@@ -2463,6 +2898,7 @@ static int irdma_handle_q_mem(struct irdma_device *iwdev,
struct irdma_mr *iwmr = iwpbl->iwmr;
struct irdma_qp_mr *qpmr = &iwpbl->qp_mr;
struct irdma_cq_mr *cqmr = &iwpbl->cq_mr;
+ struct irdma_srq_mr *srqmr = &iwpbl->srq_mr;
struct irdma_hmc_pble *hmc_p;
u64 *arr = iwmr->pgaddrmem;
u32 pg_size, total;
@@ -2482,7 +2918,10 @@ static int irdma_handle_q_mem(struct irdma_device *iwdev,
total = req->sq_pages + req->rq_pages;
hmc_p = &qpmr->sq_pbl;
qpmr->shadow = (dma_addr_t)arr[total];
-
+ /* Need to use physical address for RQ of QP
+ * in case it is associated with SRQ.
+ */
+ qpmr->rq_pa = (dma_addr_t)arr[req->sq_pages];
if (lvl) {
ret = irdma_check_mem_contiguous(arr, req->sq_pages,
pg_size);
@@ -2502,6 +2941,18 @@ static int irdma_handle_q_mem(struct irdma_device *iwdev,
hmc_p->addr = arr[req->sq_pages];
}
break;
+ case IRDMA_MEMREG_TYPE_SRQ:
+ hmc_p = &srqmr->srq_pbl;
+ srqmr->shadow = (dma_addr_t)arr[req->rq_pages];
+ if (lvl)
+ ret = irdma_check_mem_contiguous(arr, req->rq_pages,
+ pg_size);
+
+ if (!ret)
+ hmc_p->idx = palloc->level1.idx;
+ else
+ hmc_p->addr = arr[0];
+ break;
case IRDMA_MEMREG_TYPE_CQ:
hmc_p = &cqmr->cq_pbl;
@@ -2806,7 +3257,10 @@ static int irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr,
stag_info->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
stag_info->stag_key = (u8)iwmr->stag;
stag_info->total_len = iwmr->len;
- stag_info->access_rights = irdma_get_mr_access(access);
+ stag_info->access_rights = irdma_get_mr_access(access,
+ iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev);
+ if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_ATOMIC_OPS)
+ stag_info->remote_atomics_en = (access & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
stag_info->pd_id = iwpd->sc_pd.pd_id;
stag_info->all_memory = pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY;
if (stag_info->access_rights & IRDMA_ACCESS_FLAGS_ZERO_BASED)
@@ -2972,6 +3426,37 @@ static int irdma_reg_user_mr_type_qp(struct irdma_mem_reg_req req,
return 0;
}
+static int irdma_reg_user_mr_type_srq(struct irdma_mem_reg_req req,
+ struct ib_udata *udata,
+ struct irdma_mr *iwmr)
+{
+ struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
+ struct irdma_pbl *iwpbl = &iwmr->iwpbl;
+ struct irdma_ucontext *ucontext;
+ unsigned long flags;
+ u32 total;
+ int err;
+ u8 lvl;
+
+ total = req.rq_pages + IRDMA_SHADOW_PGCNT;
+ if (total > iwmr->page_cnt)
+ return -EINVAL;
+
+ lvl = req.rq_pages > 1 ? PBLE_LEVEL_1 : PBLE_LEVEL_0;
+ err = irdma_handle_q_mem(iwdev, &req, iwpbl, lvl);
+ if (err)
+ return err;
+
+ ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
+ ibucontext);
+ spin_lock_irqsave(&ucontext->srq_reg_mem_list_lock, flags);
+ list_add_tail(&iwpbl->list, &ucontext->srq_reg_mem_list);
+ iwpbl->on_list = true;
+ spin_unlock_irqrestore(&ucontext->srq_reg_mem_list_lock, flags);
+
+ return 0;
+}
+
static int irdma_reg_user_mr_type_cq(struct irdma_mem_reg_req req,
struct ib_udata *udata,
struct irdma_mr *iwmr)
@@ -3063,6 +3548,12 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
goto error;
break;
+ case IRDMA_MEMREG_TYPE_SRQ:
+ err = irdma_reg_user_mr_type_srq(req, udata, iwmr);
+ if (err)
+ goto error;
+
+ break;
case IRDMA_MEMREG_TYPE_CQ:
err = irdma_reg_user_mr_type_cq(req, udata, iwmr);
if (err)
@@ -3106,9 +3597,9 @@ static struct ib_mr *irdma_reg_user_mr_dmabuf(struct ib_pd *pd, u64 start,
umem_dmabuf = ib_umem_dmabuf_get_pinned(pd->device, start, len, fd, access);
if (IS_ERR(umem_dmabuf)) {
- err = PTR_ERR(umem_dmabuf);
- ibdev_dbg(&iwdev->ibdev, "Failed to get dmabuf umem[%d]\n", err);
- return ERR_PTR(err);
+ ibdev_dbg(&iwdev->ibdev, "Failed to get dmabuf umem[%pe]\n",
+ umem_dmabuf);
+ return ERR_CAST(umem_dmabuf);
}
iwmr = irdma_alloc_iwmr(&umem_dmabuf->umem, pd, virt, IRDMA_MEMREG_TYPE_MEM);
@@ -3382,6 +3873,14 @@ static void irdma_del_memlist(struct irdma_mr *iwmr,
}
spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
break;
+ case IRDMA_MEMREG_TYPE_SRQ:
+ spin_lock_irqsave(&ucontext->srq_reg_mem_list_lock, flags);
+ if (iwpbl->on_list) {
+ iwpbl->on_list = false;
+ list_del(&iwpbl->list);
+ }
+ spin_unlock_irqrestore(&ucontext->srq_reg_mem_list_lock, flags);
+ break;
default:
break;
}
@@ -3461,6 +3960,40 @@ static int irdma_post_send(struct ib_qp *ibqp,
if (ib_wr->send_flags & IB_SEND_FENCE)
info.read_fence = true;
switch (ib_wr->opcode) {
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ if (unlikely(!(dev->hw_attrs.uk_attrs.feature_flags &
+ IRDMA_FEATURE_ATOMIC_OPS))) {
+ err = -EINVAL;
+ break;
+ }
+ info.op_type = IRDMA_OP_TYPE_ATOMIC_COMPARE_AND_SWAP;
+ info.op.atomic_compare_swap.tagged_offset = ib_wr->sg_list[0].addr;
+ info.op.atomic_compare_swap.remote_tagged_offset =
+ atomic_wr(ib_wr)->remote_addr;
+ info.op.atomic_compare_swap.swap_data_bytes = atomic_wr(ib_wr)->swap;
+ info.op.atomic_compare_swap.compare_data_bytes =
+ atomic_wr(ib_wr)->compare_add;
+ info.op.atomic_compare_swap.stag = ib_wr->sg_list[0].lkey;
+ info.op.atomic_compare_swap.remote_stag = atomic_wr(ib_wr)->rkey;
+ err = irdma_uk_atomic_compare_swap(ukqp, &info, false);
+ break;
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ if (unlikely(!(dev->hw_attrs.uk_attrs.feature_flags &
+ IRDMA_FEATURE_ATOMIC_OPS))) {
+ err = -EINVAL;
+ break;
+ }
+ info.op_type = IRDMA_OP_TYPE_ATOMIC_FETCH_AND_ADD;
+ info.op.atomic_fetch_add.tagged_offset = ib_wr->sg_list[0].addr;
+ info.op.atomic_fetch_add.remote_tagged_offset =
+ atomic_wr(ib_wr)->remote_addr;
+ info.op.atomic_fetch_add.fetch_add_data_bytes =
+ atomic_wr(ib_wr)->compare_add;
+ info.op.atomic_fetch_add.stag = ib_wr->sg_list[0].lkey;
+ info.op.atomic_fetch_add.remote_stag =
+ atomic_wr(ib_wr)->rkey;
+ err = irdma_uk_atomic_fetch_add(ukqp, &info, false);
+ break;
case IB_WR_SEND_WITH_IMM:
if (ukqp->qp_caps & IRDMA_SEND_WITH_IMM) {
info.imm_data_valid = true;
@@ -3555,7 +4088,9 @@ static int irdma_post_send(struct ib_qp *ibqp,
stag_info.signaled = info.signaled;
stag_info.read_fence = info.read_fence;
- stag_info.access_rights = irdma_get_mr_access(reg_wr(ib_wr)->access);
+ stag_info.access_rights =
+ irdma_get_mr_access(reg_wr(ib_wr)->access,
+ dev->hw_attrs.uk_attrs.hw_rev);
stag_info.stag_key = reg_wr(ib_wr)->key & 0xff;
stag_info.stag_idx = reg_wr(ib_wr)->key >> 8;
stag_info.page_size = reg_wr(ib_wr)->mr->page_size;
@@ -3594,6 +4129,48 @@ static int irdma_post_send(struct ib_qp *ibqp,
mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush,
msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS));
}
+
+ if (err)
+ *bad_wr = ib_wr;
+
+ return err;
+}
+
+/**
+ * irdma_post_srq_recv - post receive wr for kernel application
+ * @ibsrq: ib srq pointer
+ * @ib_wr: work request for receive
+ * @bad_wr: bad wr caused an error
+ */
+static int irdma_post_srq_recv(struct ib_srq *ibsrq,
+ const struct ib_recv_wr *ib_wr,
+ const struct ib_recv_wr **bad_wr)
+{
+ struct irdma_srq *iwsrq = to_iwsrq(ibsrq);
+ struct irdma_srq_uk *uksrq = &iwsrq->sc_srq.srq_uk;
+ struct irdma_post_rq_info post_recv = {};
+ unsigned long flags;
+ int err = 0;
+
+ spin_lock_irqsave(&iwsrq->lock, flags);
+ while (ib_wr) {
+ if (ib_wr->num_sge > uksrq->max_srq_frag_cnt) {
+ err = -EINVAL;
+ goto out;
+ }
+ post_recv.num_sges = ib_wr->num_sge;
+ post_recv.wr_id = ib_wr->wr_id;
+ post_recv.sg_list = ib_wr->sg_list;
+ err = irdma_uk_srq_post_receive(uksrq, &post_recv);
+ if (err)
+ goto out;
+
+ ib_wr = ib_wr->next;
+ }
+
+out:
+ spin_unlock_irqrestore(&iwsrq->lock, flags);
+
if (err)
*bad_wr = ib_wr;
@@ -3619,6 +4196,11 @@ static int irdma_post_recv(struct ib_qp *ibqp,
iwqp = to_iwqp(ibqp);
ukqp = &iwqp->sc_qp.qp_uk;
+ if (ukqp->srq_uk) {
+ *bad_wr = ib_wr;
+ return -EINVAL;
+ }
+
spin_lock_irqsave(&iwqp->lock, flags);
while (ib_wr) {
post_recv.num_sges = ib_wr->num_sge;
@@ -3671,6 +4253,8 @@ static enum ib_wc_status irdma_flush_err_to_ib_wc_status(enum irdma_flush_opcode
return IB_WC_MW_BIND_ERR;
case FLUSH_REM_INV_REQ_ERR:
return IB_WC_REM_INV_REQ_ERR;
+ case FLUSH_RNR_RETRY_EXC_ERR:
+ return IB_WC_RNR_RETRY_EXC_ERR;
case FLUSH_FATAL_ERR:
default:
return IB_WC_FATAL_ERR;
@@ -3727,8 +4311,12 @@ static void irdma_process_cqe(struct ib_wc *entry,
if (cq_poll_info->q_type == IRDMA_CQE_QTYPE_SQ) {
set_ib_wc_op_sq(cq_poll_info, entry);
} else {
- set_ib_wc_op_rq(cq_poll_info, entry,
- qp->qp_uk.qp_caps & IRDMA_SEND_WITH_IMM);
+ if (qp->dev->hw_attrs.uk_attrs.hw_rev <= IRDMA_GEN_2)
+ set_ib_wc_op_rq(cq_poll_info, entry,
+ qp->qp_uk.qp_caps & IRDMA_SEND_WITH_IMM ?
+ true : false);
+ else
+ set_ib_wc_op_rq_gen_3(cq_poll_info, entry);
if (qp->qp_uk.qp_type != IRDMA_QP_TYPE_ROCE_UD &&
cq_poll_info->stag_invalid_set) {
entry->ex.invalidate_rkey = cq_poll_info->inv_stag;
@@ -3923,40 +4511,7 @@ static int irdma_req_notify_cq(struct ib_cq *ibcq,
return ret;
}
-static int irdma_roce_port_immutable(struct ib_device *ibdev, u32 port_num,
- struct ib_port_immutable *immutable)
-{
- struct ib_port_attr attr;
- int err;
-
- immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
- err = ib_query_port(ibdev, port_num, &attr);
- if (err)
- return err;
-
- immutable->max_mad_size = IB_MGMT_MAD_SIZE;
- immutable->pkey_tbl_len = attr.pkey_tbl_len;
- immutable->gid_tbl_len = attr.gid_tbl_len;
-
- return 0;
-}
-
-static int irdma_iw_port_immutable(struct ib_device *ibdev, u32 port_num,
- struct ib_port_immutable *immutable)
-{
- struct ib_port_attr attr;
- int err;
-
- immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
- err = ib_query_port(ibdev, port_num, &attr);
- if (err)
- return err;
- immutable->gid_tbl_len = attr.gid_tbl_len;
-
- return 0;
-}
-
-static const struct rdma_stat_desc irdma_hw_stat_names[] = {
+static const struct rdma_stat_desc irdma_hw_stat_descs[] = {
/* gen1 - 32-bit */
[IRDMA_HW_STAT_INDEX_IP4RXDISCARD].name = "ip4InDiscards",
[IRDMA_HW_STAT_INDEX_IP4RXTRUNC].name = "ip4InTruncatedPkts",
@@ -3964,9 +4519,6 @@ static const struct rdma_stat_desc irdma_hw_stat_names[] = {
[IRDMA_HW_STAT_INDEX_IP6RXDISCARD].name = "ip6InDiscards",
[IRDMA_HW_STAT_INDEX_IP6RXTRUNC].name = "ip6InTruncatedPkts",
[IRDMA_HW_STAT_INDEX_IP6TXNOROUTE].name = "ip6OutNoRoutes",
- [IRDMA_HW_STAT_INDEX_TCPRTXSEG].name = "tcpRetransSegs",
- [IRDMA_HW_STAT_INDEX_TCPRXOPTERR].name = "tcpInOptErrors",
- [IRDMA_HW_STAT_INDEX_TCPRXPROTOERR].name = "tcpInProtoErrors",
[IRDMA_HW_STAT_INDEX_RXVLANERR].name = "rxVlanErrors",
/* gen1 - 64-bit */
[IRDMA_HW_STAT_INDEX_IP4RXOCTS].name = "ip4InOctets",
@@ -3985,16 +4537,14 @@ static const struct rdma_stat_desc irdma_hw_stat_names[] = {
[IRDMA_HW_STAT_INDEX_IP6TXPKTS].name = "ip6OutPkts",
[IRDMA_HW_STAT_INDEX_IP6TXFRAGS].name = "ip6OutSegRqd",
[IRDMA_HW_STAT_INDEX_IP6TXMCPKTS].name = "ip6OutMcastPkts",
- [IRDMA_HW_STAT_INDEX_TCPRXSEGS].name = "tcpInSegs",
- [IRDMA_HW_STAT_INDEX_TCPTXSEG].name = "tcpOutSegs",
- [IRDMA_HW_STAT_INDEX_RDMARXRDS].name = "iwInRdmaReads",
- [IRDMA_HW_STAT_INDEX_RDMARXSNDS].name = "iwInRdmaSends",
- [IRDMA_HW_STAT_INDEX_RDMARXWRS].name = "iwInRdmaWrites",
- [IRDMA_HW_STAT_INDEX_RDMATXRDS].name = "iwOutRdmaReads",
- [IRDMA_HW_STAT_INDEX_RDMATXSNDS].name = "iwOutRdmaSends",
- [IRDMA_HW_STAT_INDEX_RDMATXWRS].name = "iwOutRdmaWrites",
- [IRDMA_HW_STAT_INDEX_RDMAVBND].name = "iwRdmaBnd",
- [IRDMA_HW_STAT_INDEX_RDMAVINV].name = "iwRdmaInv",
+ [IRDMA_HW_STAT_INDEX_RDMARXRDS].name = "InRdmaReads",
+ [IRDMA_HW_STAT_INDEX_RDMARXSNDS].name = "InRdmaSends",
+ [IRDMA_HW_STAT_INDEX_RDMARXWRS].name = "InRdmaWrites",
+ [IRDMA_HW_STAT_INDEX_RDMATXRDS].name = "OutRdmaReads",
+ [IRDMA_HW_STAT_INDEX_RDMATXSNDS].name = "OutRdmaSends",
+ [IRDMA_HW_STAT_INDEX_RDMATXWRS].name = "OutRdmaWrites",
+ [IRDMA_HW_STAT_INDEX_RDMAVBND].name = "RdmaBnd",
+ [IRDMA_HW_STAT_INDEX_RDMAVINV].name = "RdmaInv",
/* gen2 - 32-bit */
[IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED].name = "cnpHandled",
@@ -4008,9 +4558,59 @@ static const struct rdma_stat_desc irdma_hw_stat_names[] = {
[IRDMA_HW_STAT_INDEX_UDPRXPKTS].name = "RxUDP",
[IRDMA_HW_STAT_INDEX_UDPTXPKTS].name = "TxUDP",
[IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS].name = "RxECNMrkd",
-
+ [IRDMA_HW_STAT_INDEX_TCPRTXSEG].name = "RetransSegs",
+ [IRDMA_HW_STAT_INDEX_TCPRXOPTERR].name = "InOptErrors",
+ [IRDMA_HW_STAT_INDEX_TCPRXPROTOERR].name = "InProtoErrors",
+ [IRDMA_HW_STAT_INDEX_TCPRXSEGS].name = "InSegs",
+ [IRDMA_HW_STAT_INDEX_TCPTXSEG].name = "OutSegs",
+
+ /* gen3 */
+ [IRDMA_HW_STAT_INDEX_RNR_SENT].name = "RNR sent",
+ [IRDMA_HW_STAT_INDEX_RNR_RCVD].name = "RNR received",
+ [IRDMA_HW_STAT_INDEX_RDMAORDLMTCNT].name = "ord limit count",
+ [IRDMA_HW_STAT_INDEX_RDMAIRDLMTCNT].name = "ird limit count",
+ [IRDMA_HW_STAT_INDEX_RDMARXATS].name = "Rx atomics",
+ [IRDMA_HW_STAT_INDEX_RDMATXATS].name = "Tx atomics",
+ [IRDMA_HW_STAT_INDEX_NAKSEQERR].name = "Nak Sequence Error",
+ [IRDMA_HW_STAT_INDEX_NAKSEQERR_IMPLIED].name = "Nak Sequence Error Implied",
+ [IRDMA_HW_STAT_INDEX_RTO].name = "RTO",
+ [IRDMA_HW_STAT_INDEX_RXOOOPKTS].name = "Rcvd Out of order packets",
+ [IRDMA_HW_STAT_INDEX_ICRCERR].name = "CRC errors",
};
+static int irdma_roce_port_immutable(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_immutable *immutable)
+{
+ struct ib_port_attr attr;
+ int err;
+
+ immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
+ err = ib_query_port(ibdev, port_num, &attr);
+ if (err)
+ return err;
+
+ immutable->max_mad_size = IB_MGMT_MAD_SIZE;
+ immutable->pkey_tbl_len = attr.pkey_tbl_len;
+ immutable->gid_tbl_len = attr.gid_tbl_len;
+
+ return 0;
+}
+
+static int irdma_iw_port_immutable(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_immutable *immutable)
+{
+ struct ib_port_attr attr;
+ int err;
+
+ immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
+ err = ib_query_port(ibdev, port_num, &attr);
+ if (err)
+ return err;
+ immutable->gid_tbl_len = attr.gid_tbl_len;
+
+ return 0;
+}
+
static void irdma_get_dev_fw_str(struct ib_device *dev, char *str)
{
struct irdma_device *iwdev = to_iwdev(dev);
@@ -4034,7 +4634,7 @@ static struct rdma_hw_stats *irdma_alloc_hw_port_stats(struct ib_device *ibdev,
int num_counters = dev->hw_attrs.max_stat_idx;
unsigned long lifespan = RDMA_HW_STATS_DEFAULT_LIFESPAN;
- return rdma_alloc_hw_stats_struct(irdma_hw_stat_names, num_counters,
+ return rdma_alloc_hw_stats_struct(irdma_hw_stat_descs, num_counters,
lifespan);
}
@@ -4539,7 +5139,7 @@ static bool irdma_ah_exists(struct irdma_device *iwdev,
new_ah->sc_ah.ah_info.dest_ip_addr[2] ^
new_ah->sc_ah.ah_info.dest_ip_addr[3];
- hash_for_each_possible(iwdev->ah_hash_tbl, ah, list, key) {
+ hash_for_each_possible(iwdev->rf->ah_hash_tbl, ah, list, key) {
/* Set ah_valid and ah_id the same so memcmp can work */
new_ah->sc_ah.ah_info.ah_idx = ah->sc_ah.ah_info.ah_idx;
new_ah->sc_ah.ah_info.ah_valid = ah->sc_ah.ah_info.ah_valid;
@@ -4565,14 +5165,14 @@ static int irdma_destroy_ah(struct ib_ah *ibah, u32 ah_flags)
struct irdma_ah *ah = to_iwah(ibah);
if ((ah_flags & RDMA_DESTROY_AH_SLEEPABLE) && ah->parent_ah) {
- mutex_lock(&iwdev->ah_tbl_lock);
+ mutex_lock(&iwdev->rf->ah_tbl_lock);
if (!refcount_dec_and_test(&ah->parent_ah->refcnt)) {
- mutex_unlock(&iwdev->ah_tbl_lock);
+ mutex_unlock(&iwdev->rf->ah_tbl_lock);
return 0;
}
hash_del(&ah->parent_ah->list);
kfree(ah->parent_ah);
- mutex_unlock(&iwdev->ah_tbl_lock);
+ mutex_unlock(&iwdev->rf->ah_tbl_lock);
}
irdma_ah_cqp_op(iwdev->rf, &ah->sc_ah, IRDMA_OP_AH_DESTROY,
@@ -4609,11 +5209,11 @@ static int irdma_create_user_ah(struct ib_ah *ibah,
err = irdma_setup_ah(ibah, attr);
if (err)
return err;
- mutex_lock(&iwdev->ah_tbl_lock);
+ mutex_lock(&iwdev->rf->ah_tbl_lock);
if (!irdma_ah_exists(iwdev, ah)) {
err = irdma_create_hw_ah(iwdev, ah, true);
if (err) {
- mutex_unlock(&iwdev->ah_tbl_lock);
+ mutex_unlock(&iwdev->rf->ah_tbl_lock);
return err;
}
/* Add new AH to list */
@@ -4625,11 +5225,11 @@ static int irdma_create_user_ah(struct ib_ah *ibah,
parent_ah->sc_ah.ah_info.dest_ip_addr[3];
ah->parent_ah = parent_ah;
- hash_add(iwdev->ah_hash_tbl, &parent_ah->list, key);
+ hash_add(iwdev->rf->ah_hash_tbl, &parent_ah->list, key);
refcount_set(&parent_ah->refcnt, 1);
}
}
- mutex_unlock(&iwdev->ah_tbl_lock);
+ mutex_unlock(&iwdev->rf->ah_tbl_lock);
uresp.ah_id = ah->sc_ah.ah_info.ah_idx;
err = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp), udata->outlen));
@@ -4691,6 +5291,20 @@ static enum rdma_link_layer irdma_get_link_layer(struct ib_device *ibdev,
return IB_LINK_LAYER_ETHERNET;
}
+static const struct ib_device_ops irdma_gen1_dev_ops = {
+ .dealloc_driver = irdma_ib_dealloc_device,
+};
+
+static const struct ib_device_ops irdma_gen3_dev_ops = {
+ .alloc_mw = irdma_alloc_mw,
+ .create_srq = irdma_create_srq,
+ .dealloc_mw = irdma_dealloc_mw,
+ .destroy_srq = irdma_destroy_srq,
+ .modify_srq = irdma_modify_srq,
+ .post_srq_recv = irdma_post_srq_recv,
+ .query_srq = irdma_query_srq,
+};
+
static const struct ib_device_ops irdma_roce_dev_ops = {
.attach_mcast = irdma_attach_mcast,
.create_ah = irdma_create_ah,
@@ -4725,7 +5339,6 @@ static const struct ib_device_ops irdma_dev_ops = {
.alloc_hw_port_stats = irdma_alloc_hw_port_stats,
.alloc_mr = irdma_alloc_mr,
- .alloc_mw = irdma_alloc_mw,
.alloc_pd = irdma_alloc_pd,
.alloc_ucontext = irdma_alloc_ucontext,
.create_cq = irdma_create_cq,
@@ -4761,6 +5374,7 @@ static const struct ib_device_ops irdma_dev_ops = {
INIT_RDMA_OBJ_SIZE(ib_cq, irdma_cq, ibcq),
INIT_RDMA_OBJ_SIZE(ib_mw, irdma_mr, ibmw),
INIT_RDMA_OBJ_SIZE(ib_qp, irdma_qp, ibqp),
+ INIT_RDMA_OBJ_SIZE(ib_srq, irdma_srq, ibsrq),
};
/**
@@ -4808,6 +5422,10 @@ static void irdma_init_rdma_device(struct irdma_device *iwdev)
iwdev->ibdev.num_comp_vectors = iwdev->rf->ceqs_count;
iwdev->ibdev.dev.parent = &pcidev->dev;
ib_set_device_ops(&iwdev->ibdev, &irdma_dev_ops);
+ if (iwdev->rf->rdma_ver == IRDMA_GEN_1)
+ ib_set_device_ops(&iwdev->ibdev, &irdma_gen1_dev_ops);
+ if (iwdev->rf->rdma_ver >= IRDMA_GEN_3)
+ ib_set_device_ops(&iwdev->ibdev, &irdma_gen3_dev_ops);
}
/**
@@ -4879,5 +5497,9 @@ void irdma_ib_dealloc_device(struct ib_device *ibdev)
struct irdma_device *iwdev = to_iwdev(ibdev);
irdma_rt_deinit_hw(iwdev);
- irdma_ctrl_deinit_hw(iwdev->rf);
+ if (!iwdev->is_vport) {
+ irdma_ctrl_deinit_hw(iwdev->rf);
+ if (iwdev->rf->vchnl_wq)
+ destroy_workqueue(iwdev->rf->vchnl_wq);
+ }
}
diff --git a/drivers/infiniband/hw/irdma/verbs.h b/drivers/infiniband/hw/irdma/verbs.h
index cfa140b36395..ed21c1b56e8e 100644
--- a/drivers/infiniband/hw/irdma/verbs.h
+++ b/drivers/infiniband/hw/irdma/verbs.h
@@ -8,6 +8,7 @@
#define IRDMA_PKEY_TBL_SZ 1
#define IRDMA_DEFAULT_PKEY 0xFFFF
+#define IRDMA_SHADOW_PGCNT 1
struct irdma_ucontext {
struct ib_ucontext ibucontext;
@@ -17,6 +18,8 @@ struct irdma_ucontext {
spinlock_t cq_reg_mem_list_lock; /* protect CQ memory list */
struct list_head qp_reg_mem_list;
spinlock_t qp_reg_mem_list_lock; /* protect QP memory list */
+ struct list_head srq_reg_mem_list;
+ spinlock_t srq_reg_mem_list_lock; /* protect SRQ memory list */
int abi_ver;
u8 legacy_mode : 1;
u8 use_raw_attrs : 1;
@@ -65,10 +68,16 @@ struct irdma_cq_mr {
bool split;
};
+struct irdma_srq_mr {
+ struct irdma_hmc_pble srq_pbl;
+ dma_addr_t shadow;
+};
+
struct irdma_qp_mr {
struct irdma_hmc_pble sq_pbl;
struct irdma_hmc_pble rq_pbl;
dma_addr_t shadow;
+ dma_addr_t rq_pa;
struct page *sq_page;
};
@@ -85,6 +94,7 @@ struct irdma_pbl {
union {
struct irdma_qp_mr qp_mr;
struct irdma_cq_mr cq_mr;
+ struct irdma_srq_mr srq_mr;
};
bool pbl_allocated:1;
@@ -112,24 +122,33 @@ struct irdma_mr {
struct irdma_pbl iwpbl;
};
+struct irdma_srq {
+ struct ib_srq ibsrq;
+ struct irdma_sc_srq sc_srq __aligned(64);
+ struct irdma_dma_mem kmem;
+ u64 *srq_wrid_mem;
+ refcount_t refcnt;
+ spinlock_t lock; /* for poll srq */
+ struct irdma_pbl *iwpbl;
+ struct irdma_sge *sg_list;
+ u16 srq_head;
+ u32 srq_num;
+ u32 max_wr;
+ bool user_mode:1;
+};
+
struct irdma_cq {
struct ib_cq ibcq;
struct irdma_sc_cq sc_cq;
- u16 cq_head;
- u16 cq_size;
u16 cq_num;
bool user_mode;
atomic_t armed;
enum irdma_cmpl_notify last_notify;
- u32 polled_cmpls;
- u32 cq_mem_size;
struct irdma_dma_mem kmem;
struct irdma_dma_mem kmem_shadow;
struct completion free_cq;
refcount_t refcnt;
spinlock_t lock; /* for poll cq */
- struct irdma_pbl *iwpbl;
- struct irdma_pbl *iwpbl_shadow;
struct list_head resize_list;
struct irdma_cq_poll_info cur_cqe;
struct list_head cmpl_generated;
@@ -259,6 +278,12 @@ static inline void set_ib_wc_op_sq(struct irdma_cq_poll_info *cq_poll_info,
case IRDMA_OP_TYPE_FAST_REG_NSMR:
entry->opcode = IB_WC_REG_MR;
break;
+ case IRDMA_OP_TYPE_ATOMIC_COMPARE_AND_SWAP:
+ entry->opcode = IB_WC_COMP_SWAP;
+ break;
+ case IRDMA_OP_TYPE_ATOMIC_FETCH_AND_ADD:
+ entry->opcode = IB_WC_FETCH_ADD;
+ break;
case IRDMA_OP_TYPE_INV_STAG:
entry->opcode = IB_WC_LOCAL_INV;
break;
@@ -267,6 +292,19 @@ static inline void set_ib_wc_op_sq(struct irdma_cq_poll_info *cq_poll_info,
}
}
+static inline void set_ib_wc_op_rq_gen_3(struct irdma_cq_poll_info *info,
+ struct ib_wc *entry)
+{
+ switch (info->op_type) {
+ case IRDMA_OP_TYPE_RDMA_WRITE:
+ case IRDMA_OP_TYPE_RDMA_WRITE_SOL:
+ entry->opcode = IB_WC_RECV_RDMA_WITH_IMM;
+ break;
+ default:
+ entry->opcode = IB_WC_RECV;
+ }
+}
+
static inline void set_ib_wc_op_rq(struct irdma_cq_poll_info *cq_poll_info,
struct ib_wc *entry, bool send_imm_support)
{
diff --git a/drivers/infiniband/hw/irdma/virtchnl.c b/drivers/infiniband/hw/irdma/virtchnl.c
new file mode 100644
index 000000000000..16ad27247527
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/virtchnl.c
@@ -0,0 +1,618 @@
+// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+/* Copyright (c) 2015 - 2024 Intel Corporation */
+
+#include "osdep.h"
+#include "hmc.h"
+#include "defs.h"
+#include "type.h"
+#include "protos.h"
+#include "virtchnl.h"
+#include "ws.h"
+#include "i40iw_hw.h"
+#include "ig3rdma_hw.h"
+
+struct vchnl_reg_map_elem {
+ u16 reg_id;
+ u16 reg_idx;
+ bool pg_rel;
+};
+
+struct vchnl_regfld_map_elem {
+ u16 regfld_id;
+ u16 regfld_idx;
+};
+
+static struct vchnl_reg_map_elem vchnl_reg_map[] = {
+ {IRDMA_VCHNL_REG_ID_CQPTAIL, IRDMA_CQPTAIL, false},
+ {IRDMA_VCHNL_REG_ID_CQPDB, IRDMA_CQPDB, false},
+ {IRDMA_VCHNL_REG_ID_CCQPSTATUS, IRDMA_CCQPSTATUS, false},
+ {IRDMA_VCHNL_REG_ID_CCQPHIGH, IRDMA_CCQPHIGH, false},
+ {IRDMA_VCHNL_REG_ID_CCQPLOW, IRDMA_CCQPLOW, false},
+ {IRDMA_VCHNL_REG_ID_CQARM, IRDMA_CQARM, false},
+ {IRDMA_VCHNL_REG_ID_CQACK, IRDMA_CQACK, false},
+ {IRDMA_VCHNL_REG_ID_AEQALLOC, IRDMA_AEQALLOC, false},
+ {IRDMA_VCHNL_REG_ID_CQPERRCODES, IRDMA_CQPERRCODES, false},
+ {IRDMA_VCHNL_REG_ID_WQEALLOC, IRDMA_WQEALLOC, false},
+ {IRDMA_VCHNL_REG_ID_DB_ADDR_OFFSET, IRDMA_DB_ADDR_OFFSET, false },
+ {IRDMA_VCHNL_REG_ID_DYN_CTL, IRDMA_GLINT_DYN_CTL, false },
+ {IRDMA_VCHNL_REG_INV_ID, IRDMA_VCHNL_REG_INV_ID, false }
+};
+
+static struct vchnl_regfld_map_elem vchnl_regfld_map[] = {
+ {IRDMA_VCHNL_REGFLD_ID_CCQPSTATUS_CQP_OP_ERR, IRDMA_CCQPSTATUS_CCQP_ERR_M},
+ {IRDMA_VCHNL_REGFLD_ID_CCQPSTATUS_CCQP_DONE, IRDMA_CCQPSTATUS_CCQP_DONE_M},
+ {IRDMA_VCHNL_REGFLD_ID_CQPSQ_STAG_PDID, IRDMA_CQPSQ_STAG_PDID_M},
+ {IRDMA_VCHNL_REGFLD_ID_CQPSQ_CQ_CEQID, IRDMA_CQPSQ_CQ_CEQID_M},
+ {IRDMA_VCHNL_REGFLD_ID_CQPSQ_CQ_CQID, IRDMA_CQPSQ_CQ_CQID_M},
+ {IRDMA_VCHNL_REGFLD_ID_COMMIT_FPM_CQCNT, IRDMA_COMMIT_FPM_CQCNT_M},
+ {IRDMA_VCHNL_REGFLD_ID_UPESD_HMCN_ID, IRDMA_CQPSQ_UPESD_HMCFNID_M},
+ {IRDMA_VCHNL_REGFLD_INV_ID, IRDMA_VCHNL_REGFLD_INV_ID}
+};
+
+#define IRDMA_VCHNL_REG_COUNT ARRAY_SIZE(vchnl_reg_map)
+#define IRDMA_VCHNL_REGFLD_COUNT ARRAY_SIZE(vchnl_regfld_map)
+#define IRDMA_VCHNL_REGFLD_BUF_SIZE \
+ (IRDMA_VCHNL_REG_COUNT * sizeof(struct irdma_vchnl_reg_info) + \
+ IRDMA_VCHNL_REGFLD_COUNT * sizeof(struct irdma_vchnl_reg_field_info))
+#define IRDMA_REGMAP_RESP_BUF_SIZE (IRDMA_VCHNL_RESP_MIN_SIZE + IRDMA_VCHNL_REGFLD_BUF_SIZE)
+
+/**
+ * irdma_sc_vchnl_init - Initialize dev virtchannel and get hw_rev
+ * @dev: dev structure to update
+ * @info: virtchannel info parameters to fill into the dev structure
+ */
+int irdma_sc_vchnl_init(struct irdma_sc_dev *dev,
+ struct irdma_vchnl_init_info *info)
+{
+ dev->vchnl_up = true;
+ dev->privileged = info->privileged;
+ dev->is_pf = info->is_pf;
+ dev->hw_attrs.uk_attrs.hw_rev = info->hw_rev;
+
+ if (!dev->privileged) {
+ int ret = irdma_vchnl_req_get_ver(dev, IRDMA_VCHNL_CHNL_VER_MAX,
+ &dev->vchnl_ver);
+
+ ibdev_dbg(to_ibdev(dev),
+ "DEV: Get Channel version ret = %d, version is %u\n",
+ ret, dev->vchnl_ver);
+
+ if (ret)
+ return ret;
+
+ ret = irdma_vchnl_req_get_caps(dev);
+ if (ret)
+ return ret;
+
+ dev->hw_attrs.uk_attrs.hw_rev = dev->vc_caps.hw_rev;
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_vchnl_req_verify_resp - Verify requested response size
+ * @vchnl_req: vchnl message requested
+ * @resp_len: response length sent from vchnl peer
+ */
+static int irdma_vchnl_req_verify_resp(struct irdma_vchnl_req *vchnl_req,
+ u16 resp_len)
+{
+ switch (vchnl_req->vchnl_msg->op_code) {
+ case IRDMA_VCHNL_OP_GET_VER:
+ case IRDMA_VCHNL_OP_GET_HMC_FCN:
+ case IRDMA_VCHNL_OP_PUT_HMC_FCN:
+ if (resp_len != vchnl_req->parm_len)
+ return -EBADMSG;
+ break;
+ case IRDMA_VCHNL_OP_GET_RDMA_CAPS:
+ if (resp_len < IRDMA_VCHNL_OP_GET_RDMA_CAPS_MIN_SIZE)
+ return -EBADMSG;
+ break;
+ case IRDMA_VCHNL_OP_GET_REG_LAYOUT:
+ case IRDMA_VCHNL_OP_QUEUE_VECTOR_MAP:
+ case IRDMA_VCHNL_OP_QUEUE_VECTOR_UNMAP:
+ case IRDMA_VCHNL_OP_ADD_VPORT:
+ case IRDMA_VCHNL_OP_DEL_VPORT:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static void irdma_free_vchnl_req_msg(struct irdma_vchnl_req *vchnl_req)
+{
+ kfree(vchnl_req->vchnl_msg);
+}
+
+static int irdma_alloc_vchnl_req_msg(struct irdma_vchnl_req *vchnl_req,
+ struct irdma_vchnl_req_init_info *info)
+{
+ struct irdma_vchnl_op_buf *vchnl_msg;
+
+ vchnl_msg = kzalloc(IRDMA_VCHNL_MAX_MSG_SIZE, GFP_KERNEL);
+
+ if (!vchnl_msg)
+ return -ENOMEM;
+
+ vchnl_msg->op_ctx = (uintptr_t)vchnl_req;
+ vchnl_msg->buf_len = sizeof(*vchnl_msg) + info->req_parm_len;
+ if (info->req_parm_len)
+ memcpy(vchnl_msg->buf, info->req_parm, info->req_parm_len);
+ vchnl_msg->op_code = info->op_code;
+ vchnl_msg->op_ver = info->op_ver;
+
+ vchnl_req->vchnl_msg = vchnl_msg;
+ vchnl_req->parm = info->resp_parm;
+ vchnl_req->parm_len = info->resp_parm_len;
+
+ return 0;
+}
+
+static int irdma_vchnl_req_send_sync(struct irdma_sc_dev *dev,
+ struct irdma_vchnl_req_init_info *info)
+{
+ u16 resp_len = sizeof(dev->vc_recv_buf);
+ struct irdma_vchnl_req vchnl_req = {};
+ u16 msg_len;
+ u8 *msg;
+ int ret;
+
+ ret = irdma_alloc_vchnl_req_msg(&vchnl_req, info);
+ if (ret)
+ return ret;
+
+ msg_len = vchnl_req.vchnl_msg->buf_len;
+ msg = (u8 *)vchnl_req.vchnl_msg;
+
+ mutex_lock(&dev->vchnl_mutex);
+ ret = ig3rdma_vchnl_send_sync(dev, msg, msg_len, dev->vc_recv_buf,
+ &resp_len);
+ dev->vc_recv_len = resp_len;
+ if (ret)
+ goto exit;
+
+ ret = irdma_vchnl_req_get_resp(dev, &vchnl_req);
+exit:
+ mutex_unlock(&dev->vchnl_mutex);
+ ibdev_dbg(to_ibdev(dev),
+ "VIRT: virtual channel send %s caller: %pS ret=%d op=%u op_ver=%u req_len=%u parm_len=%u resp_len=%u\n",
+ !ret ? "SUCCEEDS" : "FAILS", __builtin_return_address(0),
+ ret, vchnl_req.vchnl_msg->op_code,
+ vchnl_req.vchnl_msg->op_ver, vchnl_req.vchnl_msg->buf_len,
+ vchnl_req.parm_len, vchnl_req.resp_len);
+ irdma_free_vchnl_req_msg(&vchnl_req);
+
+ return ret;
+}
+
+/**
+ * irdma_vchnl_req_get_reg_layout - Get Register Layout
+ * @dev: RDMA device pointer
+ */
+int irdma_vchnl_req_get_reg_layout(struct irdma_sc_dev *dev)
+{
+ u16 reg_idx, reg_id, tmp_reg_id, regfld_idx, regfld_id, tmp_regfld_id;
+ struct irdma_vchnl_reg_field_info *regfld_array = NULL;
+ u8 resp_buffer[IRDMA_REGMAP_RESP_BUF_SIZE] = {};
+ struct vchnl_regfld_map_elem *regfld_map_array;
+ struct irdma_vchnl_req_init_info info = {};
+ struct vchnl_reg_map_elem *reg_map_array;
+ struct irdma_vchnl_reg_info *reg_array;
+ u8 num_bits, shift_cnt;
+ u16 buf_len = 0;
+ u64 bitmask;
+ u32 rindex;
+ int ret;
+
+ if (!dev->vchnl_up)
+ return -EBUSY;
+
+ info.op_code = IRDMA_VCHNL_OP_GET_REG_LAYOUT;
+ info.op_ver = IRDMA_VCHNL_OP_GET_REG_LAYOUT_V0;
+ info.resp_parm = resp_buffer;
+ info.resp_parm_len = sizeof(resp_buffer);
+
+ ret = irdma_vchnl_req_send_sync(dev, &info);
+
+ if (ret)
+ return ret;
+
+ /* parse the response buffer and update reg info*/
+ /* Parse registers till invalid */
+ /* Parse register fields till invalid */
+ reg_array = (struct irdma_vchnl_reg_info *)resp_buffer;
+ for (rindex = 0; rindex < IRDMA_VCHNL_REG_COUNT; rindex++) {
+ buf_len += sizeof(struct irdma_vchnl_reg_info);
+ if (buf_len >= sizeof(resp_buffer))
+ return -ENOMEM;
+
+ regfld_array =
+ (struct irdma_vchnl_reg_field_info *)&reg_array[rindex + 1];
+ reg_id = reg_array[rindex].reg_id;
+ if (reg_id == IRDMA_VCHNL_REG_INV_ID)
+ break;
+
+ reg_id &= ~IRDMA_VCHNL_REG_PAGE_REL;
+ if (reg_id >= IRDMA_VCHNL_REG_COUNT)
+ return -EINVAL;
+
+ /* search regmap for register index in hw_regs.*/
+ reg_map_array = vchnl_reg_map;
+ do {
+ tmp_reg_id = reg_map_array->reg_id;
+ if (tmp_reg_id == reg_id)
+ break;
+
+ reg_map_array++;
+ } while (tmp_reg_id != IRDMA_VCHNL_REG_INV_ID);
+ if (tmp_reg_id != reg_id)
+ continue;
+
+ reg_idx = reg_map_array->reg_idx;
+
+ /* Page relative, DB Offset do not need bar offset */
+ if (reg_idx == IRDMA_DB_ADDR_OFFSET ||
+ (reg_array[rindex].reg_id & IRDMA_VCHNL_REG_PAGE_REL)) {
+ dev->hw_regs[reg_idx] =
+ (u32 __iomem *)(uintptr_t)reg_array[rindex].reg_offset;
+ continue;
+ }
+
+ /* Update the local HW struct */
+ dev->hw_regs[reg_idx] = ig3rdma_get_reg_addr(dev->hw,
+ reg_array[rindex].reg_offset);
+ if (!dev->hw_regs[reg_idx])
+ return -EINVAL;
+ }
+
+ if (!regfld_array)
+ return -ENOMEM;
+
+ /* set up doorbell variables using mapped DB page */
+ dev->wqe_alloc_db = dev->hw_regs[IRDMA_WQEALLOC];
+ dev->cq_arm_db = dev->hw_regs[IRDMA_CQARM];
+ dev->aeq_alloc_db = dev->hw_regs[IRDMA_AEQALLOC];
+ dev->cqp_db = dev->hw_regs[IRDMA_CQPDB];
+ dev->cq_ack_db = dev->hw_regs[IRDMA_CQACK];
+
+ for (rindex = 0; rindex < IRDMA_VCHNL_REGFLD_COUNT; rindex++) {
+ buf_len += sizeof(struct irdma_vchnl_reg_field_info);
+ if ((buf_len - 1) > sizeof(resp_buffer))
+ break;
+
+ if (regfld_array[rindex].fld_id == IRDMA_VCHNL_REGFLD_INV_ID)
+ break;
+
+ regfld_id = regfld_array[rindex].fld_id;
+ regfld_map_array = vchnl_regfld_map;
+ do {
+ tmp_regfld_id = regfld_map_array->regfld_id;
+ if (tmp_regfld_id == regfld_id)
+ break;
+
+ regfld_map_array++;
+ } while (tmp_regfld_id != IRDMA_VCHNL_REGFLD_INV_ID);
+
+ if (tmp_regfld_id != regfld_id)
+ continue;
+
+ regfld_idx = regfld_map_array->regfld_idx;
+
+ num_bits = regfld_array[rindex].fld_bits;
+ shift_cnt = regfld_array[rindex].fld_shift;
+ if ((num_bits + shift_cnt > 64) || !num_bits) {
+ ibdev_dbg(to_ibdev(dev),
+ "ERR: Invalid field mask id %d bits %d shift %d",
+ regfld_id, num_bits, shift_cnt);
+
+ continue;
+ }
+
+ bitmask = (1ULL << num_bits) - 1;
+ dev->hw_masks[regfld_idx] = bitmask << shift_cnt;
+ dev->hw_shifts[regfld_idx] = shift_cnt;
+ }
+
+ return 0;
+}
+
+int irdma_vchnl_req_add_vport(struct irdma_sc_dev *dev, u16 vport_id,
+ u32 qp1_id, struct irdma_qos *qos)
+{
+ struct irdma_vchnl_resp_vport_info resp_vport = { 0 };
+ struct irdma_vchnl_req_vport_info req_vport = { 0 };
+ struct irdma_vchnl_req_init_info info = { 0 };
+ int ret, i;
+
+ if (!dev->vchnl_up)
+ return -EBUSY;
+
+ info.op_code = IRDMA_VCHNL_OP_ADD_VPORT;
+ info.op_ver = IRDMA_VCHNL_OP_ADD_VPORT_V0;
+ req_vport.vport_id = vport_id;
+ req_vport.qp1_id = qp1_id;
+ info.req_parm_len = sizeof(req_vport);
+ info.req_parm = &req_vport;
+ info.resp_parm = &resp_vport;
+ info.resp_parm_len = sizeof(resp_vport);
+
+ ret = irdma_vchnl_req_send_sync(dev, &info);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
+ qos[i].qs_handle = resp_vport.qs_handle[i];
+ qos[i].valid = true;
+ }
+
+ return 0;
+}
+
+int irdma_vchnl_req_del_vport(struct irdma_sc_dev *dev, u16 vport_id, u32 qp1_id)
+{
+ struct irdma_vchnl_req_init_info info = { 0 };
+ struct irdma_vchnl_req_vport_info req_vport = { 0 };
+
+ if (!dev->vchnl_up)
+ return -EBUSY;
+
+ info.op_code = IRDMA_VCHNL_OP_DEL_VPORT;
+ info.op_ver = IRDMA_VCHNL_OP_DEL_VPORT_V0;
+ req_vport.vport_id = vport_id;
+ req_vport.qp1_id = qp1_id;
+ info.req_parm_len = sizeof(req_vport);
+ info.req_parm = &req_vport;
+
+ return irdma_vchnl_req_send_sync(dev, &info);
+}
+
+/**
+ * irdma_vchnl_req_aeq_vec_map - Map AEQ to vector on this function
+ * @dev: RDMA device pointer
+ * @v_idx: vector index
+ */
+int irdma_vchnl_req_aeq_vec_map(struct irdma_sc_dev *dev, u32 v_idx)
+{
+ struct irdma_vchnl_req_init_info info = {};
+ struct irdma_vchnl_qvlist_info *qvl;
+ struct irdma_vchnl_qv_info *qv;
+ u16 qvl_size, num_vectors = 1;
+ int ret;
+
+ if (!dev->vchnl_up)
+ return -EBUSY;
+
+ qvl_size = struct_size(qvl, qv_info, num_vectors);
+
+ qvl = kzalloc(qvl_size, GFP_KERNEL);
+ if (!qvl)
+ return -ENOMEM;
+
+ qvl->num_vectors = 1;
+ qv = qvl->qv_info;
+
+ qv->ceq_idx = IRDMA_Q_INVALID_IDX;
+ qv->v_idx = v_idx;
+ qv->itr_idx = IRDMA_IDX_ITR0;
+
+ info.op_code = IRDMA_VCHNL_OP_QUEUE_VECTOR_MAP;
+ info.op_ver = IRDMA_VCHNL_OP_QUEUE_VECTOR_MAP_V0;
+ info.req_parm = qvl;
+ info.req_parm_len = qvl_size;
+
+ ret = irdma_vchnl_req_send_sync(dev, &info);
+ kfree(qvl);
+
+ return ret;
+}
+
+/**
+ * irdma_vchnl_req_ceq_vec_map - Map CEQ to vector on this function
+ * @dev: RDMA device pointer
+ * @ceq_id: CEQ index
+ * @v_idx: vector index
+ */
+int irdma_vchnl_req_ceq_vec_map(struct irdma_sc_dev *dev, u16 ceq_id, u32 v_idx)
+{
+ struct irdma_vchnl_req_init_info info = {};
+ struct irdma_vchnl_qvlist_info *qvl;
+ struct irdma_vchnl_qv_info *qv;
+ u16 qvl_size, num_vectors = 1;
+ int ret;
+
+ if (!dev->vchnl_up)
+ return -EBUSY;
+
+ qvl_size = struct_size(qvl, qv_info, num_vectors);
+
+ qvl = kzalloc(qvl_size, GFP_KERNEL);
+ if (!qvl)
+ return -ENOMEM;
+
+ qvl->num_vectors = num_vectors;
+ qv = qvl->qv_info;
+
+ qv->aeq_idx = IRDMA_Q_INVALID_IDX;
+ qv->ceq_idx = ceq_id;
+ qv->v_idx = v_idx;
+ qv->itr_idx = IRDMA_IDX_ITR0;
+
+ info.op_code = IRDMA_VCHNL_OP_QUEUE_VECTOR_MAP;
+ info.op_ver = IRDMA_VCHNL_OP_QUEUE_VECTOR_MAP_V0;
+ info.req_parm = qvl;
+ info.req_parm_len = qvl_size;
+
+ ret = irdma_vchnl_req_send_sync(dev, &info);
+ kfree(qvl);
+
+ return ret;
+}
+
+/**
+ * irdma_vchnl_req_get_ver - Request Channel version
+ * @dev: RDMA device pointer
+ * @ver_req: Virtual channel version requested
+ * @ver_res: Virtual channel version response
+ */
+int irdma_vchnl_req_get_ver(struct irdma_sc_dev *dev, u16 ver_req, u32 *ver_res)
+{
+ struct irdma_vchnl_req_init_info info = {};
+ int ret;
+
+ if (!dev->vchnl_up)
+ return -EBUSY;
+
+ info.op_code = IRDMA_VCHNL_OP_GET_VER;
+ info.op_ver = ver_req;
+ info.resp_parm = ver_res;
+ info.resp_parm_len = sizeof(*ver_res);
+
+ ret = irdma_vchnl_req_send_sync(dev, &info);
+ if (ret)
+ return ret;
+
+ if (*ver_res < IRDMA_VCHNL_CHNL_VER_MIN) {
+ ibdev_dbg(to_ibdev(dev),
+ "VIRT: %s unsupported vchnl version 0x%0x\n",
+ __func__, *ver_res);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_vchnl_req_get_hmc_fcn - Request VF HMC Function
+ * @dev: RDMA device pointer
+ */
+int irdma_vchnl_req_get_hmc_fcn(struct irdma_sc_dev *dev)
+{
+ struct irdma_vchnl_req_hmc_info req_hmc = {};
+ struct irdma_vchnl_resp_hmc_info resp_hmc = {};
+ struct irdma_vchnl_req_init_info info = {};
+ int ret;
+
+ if (!dev->vchnl_up)
+ return -EBUSY;
+
+ info.op_code = IRDMA_VCHNL_OP_GET_HMC_FCN;
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3) {
+ info.op_ver = IRDMA_VCHNL_OP_GET_HMC_FCN_V2;
+ req_hmc.protocol_used = dev->protocol_used;
+ info.req_parm_len = sizeof(req_hmc);
+ info.req_parm = &req_hmc;
+ info.resp_parm = &resp_hmc;
+ info.resp_parm_len = sizeof(resp_hmc);
+ }
+
+ ret = irdma_vchnl_req_send_sync(dev, &info);
+
+ if (ret)
+ return ret;
+
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3) {
+ int i;
+
+ dev->hmc_fn_id = resp_hmc.hmc_func;
+
+ for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
+ dev->qos[i].qs_handle = resp_hmc.qs_handle[i];
+ dev->qos[i].valid = true;
+ }
+ }
+ return 0;
+}
+
+/**
+ * irdma_vchnl_req_put_hmc_fcn - Free VF HMC Function
+ * @dev: RDMA device pointer
+ */
+int irdma_vchnl_req_put_hmc_fcn(struct irdma_sc_dev *dev)
+{
+ struct irdma_vchnl_req_init_info info = {};
+
+ if (!dev->vchnl_up)
+ return -EBUSY;
+
+ info.op_code = IRDMA_VCHNL_OP_PUT_HMC_FCN;
+ info.op_ver = IRDMA_VCHNL_OP_PUT_HMC_FCN_V0;
+
+ return irdma_vchnl_req_send_sync(dev, &info);
+}
+
+/**
+ * irdma_vchnl_req_get_caps - Request RDMA capabilities
+ * @dev: RDMA device pointer
+ */
+int irdma_vchnl_req_get_caps(struct irdma_sc_dev *dev)
+{
+ struct irdma_vchnl_req_init_info info = {};
+ int ret;
+
+ if (!dev->vchnl_up)
+ return -EBUSY;
+
+ info.op_code = IRDMA_VCHNL_OP_GET_RDMA_CAPS;
+ info.op_ver = IRDMA_VCHNL_OP_GET_RDMA_CAPS_V0;
+ info.resp_parm = &dev->vc_caps;
+ info.resp_parm_len = sizeof(dev->vc_caps);
+
+ ret = irdma_vchnl_req_send_sync(dev, &info);
+
+ if (ret)
+ return ret;
+
+ if (dev->vc_caps.hw_rev > IRDMA_GEN_MAX ||
+ dev->vc_caps.hw_rev < IRDMA_GEN_2) {
+ ibdev_dbg(to_ibdev(dev),
+ "ERR: %s unsupported hw_rev version 0x%0x\n",
+ __func__, dev->vc_caps.hw_rev);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_vchnl_req_get_resp - Receive the inbound vchnl response.
+ * @dev: Dev pointer
+ * @vchnl_req: Vchannel request
+ */
+int irdma_vchnl_req_get_resp(struct irdma_sc_dev *dev,
+ struct irdma_vchnl_req *vchnl_req)
+{
+ struct irdma_vchnl_resp_buf *vchnl_msg_resp =
+ (struct irdma_vchnl_resp_buf *)dev->vc_recv_buf;
+ u16 resp_len;
+ int ret;
+
+ if ((uintptr_t)vchnl_req != (uintptr_t)vchnl_msg_resp->op_ctx) {
+ ibdev_dbg(to_ibdev(dev),
+ "VIRT: error vchnl context value does not match\n");
+ return -EBADMSG;
+ }
+
+ resp_len = dev->vc_recv_len - sizeof(*vchnl_msg_resp);
+ resp_len = min(resp_len, vchnl_req->parm_len);
+
+ ret = irdma_vchnl_req_verify_resp(vchnl_req, resp_len);
+ if (ret)
+ return ret;
+
+ ret = (int)vchnl_msg_resp->op_ret;
+ if (ret)
+ return ret;
+
+ vchnl_req->resp_len = 0;
+ if (vchnl_req->parm_len && vchnl_req->parm && resp_len) {
+ memcpy(vchnl_req->parm, vchnl_msg_resp->buf, resp_len);
+ vchnl_req->resp_len = resp_len;
+ ibdev_dbg(to_ibdev(dev), "VIRT: Got response, data size %u\n",
+ resp_len);
+ }
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/irdma/virtchnl.h b/drivers/infiniband/hw/irdma/virtchnl.h
new file mode 100644
index 000000000000..aa955a9125bd
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/virtchnl.h
@@ -0,0 +1,176 @@
+/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
+/* Copyright (c) 2015 - 2024 Intel Corporation */
+#ifndef IRDMA_VIRTCHNL_H
+#define IRDMA_VIRTCHNL_H
+
+#include "hmc.h"
+#include "irdma.h"
+
+/* IRDMA_VCHNL_CHNL_VER_V0 is for legacy hw, no longer supported. */
+#define IRDMA_VCHNL_CHNL_VER_V2 2
+#define IRDMA_VCHNL_CHNL_VER_MIN IRDMA_VCHNL_CHNL_VER_V2
+#define IRDMA_VCHNL_CHNL_VER_MAX IRDMA_VCHNL_CHNL_VER_V2
+#define IRDMA_VCHNL_OP_GET_HMC_FCN_V0 0
+#define IRDMA_VCHNL_OP_GET_HMC_FCN_V1 1
+#define IRDMA_VCHNL_OP_GET_HMC_FCN_V2 2
+#define IRDMA_VCHNL_OP_PUT_HMC_FCN_V0 0
+#define IRDMA_VCHNL_OP_GET_REG_LAYOUT_V0 0
+#define IRDMA_VCHNL_OP_QUEUE_VECTOR_MAP_V0 0
+#define IRDMA_VCHNL_OP_QUEUE_VECTOR_UNMAP_V0 0
+#define IRDMA_VCHNL_OP_ADD_VPORT_V0 0
+#define IRDMA_VCHNL_OP_DEL_VPORT_V0 0
+#define IRDMA_VCHNL_OP_GET_RDMA_CAPS_V0 0
+#define IRDMA_VCHNL_OP_GET_RDMA_CAPS_MIN_SIZE 1
+
+#define IRDMA_VCHNL_REG_ID_CQPTAIL 0
+#define IRDMA_VCHNL_REG_ID_CQPDB 1
+#define IRDMA_VCHNL_REG_ID_CCQPSTATUS 2
+#define IRDMA_VCHNL_REG_ID_CCQPHIGH 3
+#define IRDMA_VCHNL_REG_ID_CCQPLOW 4
+#define IRDMA_VCHNL_REG_ID_CQARM 5
+#define IRDMA_VCHNL_REG_ID_CQACK 6
+#define IRDMA_VCHNL_REG_ID_AEQALLOC 7
+#define IRDMA_VCHNL_REG_ID_CQPERRCODES 8
+#define IRDMA_VCHNL_REG_ID_WQEALLOC 9
+#define IRDMA_VCHNL_REG_ID_IPCONFIG0 10
+#define IRDMA_VCHNL_REG_ID_DB_ADDR_OFFSET 11
+#define IRDMA_VCHNL_REG_ID_DYN_CTL 12
+#define IRDMA_VCHNL_REG_ID_AEQITRMASK 13
+#define IRDMA_VCHNL_REG_ID_CEQITRMASK 14
+#define IRDMA_VCHNL_REG_INV_ID 0xFFFF
+#define IRDMA_VCHNL_REG_PAGE_REL 0x8000
+
+#define IRDMA_VCHNL_REGFLD_ID_CCQPSTATUS_CQP_OP_ERR 2
+#define IRDMA_VCHNL_REGFLD_ID_CCQPSTATUS_CCQP_DONE 5
+#define IRDMA_VCHNL_REGFLD_ID_CQPSQ_STAG_PDID 6
+#define IRDMA_VCHNL_REGFLD_ID_CQPSQ_CQ_CEQID 7
+#define IRDMA_VCHNL_REGFLD_ID_CQPSQ_CQ_CQID 8
+#define IRDMA_VCHNL_REGFLD_ID_COMMIT_FPM_CQCNT 9
+#define IRDMA_VCHNL_REGFLD_ID_UPESD_HMCN_ID 10
+#define IRDMA_VCHNL_REGFLD_INV_ID 0xFFFF
+
+#define IRDMA_VCHNL_RESP_MIN_SIZE (sizeof(struct irdma_vchnl_resp_buf))
+
+enum irdma_vchnl_ops {
+ IRDMA_VCHNL_OP_GET_VER = 0,
+ IRDMA_VCHNL_OP_GET_HMC_FCN = 1,
+ IRDMA_VCHNL_OP_PUT_HMC_FCN = 2,
+ IRDMA_VCHNL_OP_GET_REG_LAYOUT = 11,
+ IRDMA_VCHNL_OP_GET_RDMA_CAPS = 13,
+ IRDMA_VCHNL_OP_QUEUE_VECTOR_MAP = 14,
+ IRDMA_VCHNL_OP_QUEUE_VECTOR_UNMAP = 15,
+ IRDMA_VCHNL_OP_ADD_VPORT = 16,
+ IRDMA_VCHNL_OP_DEL_VPORT = 17,
+};
+
+struct irdma_vchnl_req_hmc_info {
+ u8 protocol_used;
+ u8 disable_qos;
+} __packed;
+
+struct irdma_vchnl_resp_hmc_info {
+ u16 hmc_func;
+ u16 qs_handle[IRDMA_MAX_USER_PRIORITY];
+} __packed;
+
+struct irdma_vchnl_qv_info {
+ u32 v_idx;
+ u16 ceq_idx;
+ u16 aeq_idx;
+ u8 itr_idx;
+};
+
+struct irdma_vchnl_qvlist_info {
+ u32 num_vectors;
+ struct irdma_vchnl_qv_info qv_info[];
+};
+
+struct irdma_vchnl_req_vport_info {
+ u16 vport_id;
+ u32 qp1_id;
+};
+
+struct irdma_vchnl_resp_vport_info {
+ u16 qs_handle[IRDMA_MAX_USER_PRIORITY];
+};
+
+struct irdma_vchnl_op_buf {
+ u16 op_code;
+ u16 op_ver;
+ u16 buf_len;
+ u16 rsvd;
+ u64 op_ctx;
+ u8 buf[];
+} __packed;
+
+struct irdma_vchnl_resp_buf {
+ u64 op_ctx;
+ u16 buf_len;
+ s16 op_ret;
+ u16 rsvd[2];
+ u8 buf[];
+} __packed;
+
+struct irdma_vchnl_rdma_caps {
+ u8 hw_rev;
+ u16 cqp_timeout_s;
+ u16 cqp_def_timeout_s;
+ u16 max_hw_push_len;
+} __packed;
+
+struct irdma_vchnl_init_info {
+ struct workqueue_struct *vchnl_wq;
+ enum irdma_vers hw_rev;
+ bool privileged;
+ bool is_pf;
+};
+
+struct irdma_vchnl_reg_info {
+ u32 reg_offset;
+ u16 field_cnt;
+ u16 reg_id; /* High bit of reg_id: bar or page relative */
+};
+
+struct irdma_vchnl_reg_field_info {
+ u8 fld_shift;
+ u8 fld_bits;
+ u16 fld_id;
+};
+
+struct irdma_vchnl_req {
+ struct irdma_vchnl_op_buf *vchnl_msg;
+ void *parm;
+ u32 vf_id;
+ u16 parm_len;
+ u16 resp_len;
+};
+
+struct irdma_vchnl_req_init_info {
+ void *req_parm;
+ void *resp_parm;
+ u16 req_parm_len;
+ u16 resp_parm_len;
+ u16 op_code;
+ u16 op_ver;
+} __packed;
+
+struct irdma_qos;
+
+int irdma_sc_vchnl_init(struct irdma_sc_dev *dev,
+ struct irdma_vchnl_init_info *info);
+int irdma_vchnl_req_get_ver(struct irdma_sc_dev *dev, u16 ver_req,
+ u32 *ver_res);
+int irdma_vchnl_req_get_hmc_fcn(struct irdma_sc_dev *dev);
+int irdma_vchnl_req_put_hmc_fcn(struct irdma_sc_dev *dev);
+int irdma_vchnl_req_get_caps(struct irdma_sc_dev *dev);
+int irdma_vchnl_req_get_resp(struct irdma_sc_dev *dev,
+ struct irdma_vchnl_req *vc_req);
+int irdma_vchnl_req_get_reg_layout(struct irdma_sc_dev *dev);
+int irdma_vchnl_req_aeq_vec_map(struct irdma_sc_dev *dev, u32 v_idx);
+int irdma_vchnl_req_ceq_vec_map(struct irdma_sc_dev *dev, u16 ceq_id,
+ u32 v_idx);
+int irdma_vchnl_req_add_vport(struct irdma_sc_dev *dev, u16 vport_id,
+ u32 qp1_id, struct irdma_qos *qos);
+int irdma_vchnl_req_del_vport(struct irdma_sc_dev *dev, u16 vport_id,
+ u32 qp1_id);
+#endif /* IRDMA_VIRTCHNL_H */
diff --git a/drivers/infiniband/hw/mana/cq.c b/drivers/infiniband/hw/mana/cq.c
index 28e154bbb50f..1becc8779123 100644
--- a/drivers/infiniband/hw/mana/cq.c
+++ b/drivers/infiniband/hw/mana/cq.c
@@ -291,6 +291,32 @@ out:
return wc_index;
}
+void mana_drain_gsi_sqs(struct mana_ib_dev *mdev)
+{
+ struct mana_ib_qp *qp = mana_get_qp_ref(mdev, MANA_GSI_QPN, false);
+ struct ud_sq_shadow_wqe *shadow_wqe;
+ struct mana_ib_cq *cq;
+ unsigned long flags;
+
+ if (!qp)
+ return;
+
+ cq = container_of(qp->ibqp.send_cq, struct mana_ib_cq, ibcq);
+
+ spin_lock_irqsave(&cq->cq_lock, flags);
+ while ((shadow_wqe = shadow_queue_get_next_to_complete(&qp->shadow_sq))
+ != NULL) {
+ shadow_wqe->header.error_code = IB_WC_GENERAL_ERR;
+ shadow_queue_advance_next_to_complete(&qp->shadow_sq);
+ }
+ spin_unlock_irqrestore(&cq->cq_lock, flags);
+
+ if (cq->ibcq.comp_handler)
+ cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
+
+ mana_put_qp_ref(qp);
+}
+
int mana_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
{
struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq);
diff --git a/drivers/infiniband/hw/mana/device.c b/drivers/infiniband/hw/mana/device.c
index fa60872f169f..bdeddb642b87 100644
--- a/drivers/infiniband/hw/mana/device.c
+++ b/drivers/infiniband/hw/mana/device.c
@@ -230,6 +230,9 @@ static void mana_ib_remove(struct auxiliary_device *adev)
{
struct mana_ib_dev *dev = dev_get_drvdata(&adev->dev);
+ if (mana_ib_is_rnic(dev))
+ mana_drain_gsi_sqs(dev);
+
ib_unregister_device(&dev->ib_dev);
dma_pool_destroy(dev->av_pool);
if (mana_ib_is_rnic(dev)) {
diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c
index 6a2471f2e804..fac159f7128d 100644
--- a/drivers/infiniband/hw/mana/main.c
+++ b/drivers/infiniband/hw/mana/main.c
@@ -273,9 +273,8 @@ int mana_ib_create_queue(struct mana_ib_dev *mdev, u64 addr, u32 size,
umem = ib_umem_get(&mdev->ib_dev, addr, size, IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(umem)) {
- err = PTR_ERR(umem);
- ibdev_dbg(&mdev->ib_dev, "Failed to get umem, %d\n", err);
- return err;
+ ibdev_dbg(&mdev->ib_dev, "Failed to get umem, %pe\n", umem);
+ return PTR_ERR(umem);
}
err = mana_ib_create_zero_offset_dma_region(mdev, umem, &queue->gdma_region);
diff --git a/drivers/infiniband/hw/mana/mana_ib.h b/drivers/infiniband/hw/mana/mana_ib.h
index 5d31034ac7fb..9d36232ed880 100644
--- a/drivers/infiniband/hw/mana/mana_ib.h
+++ b/drivers/infiniband/hw/mana/mana_ib.h
@@ -43,6 +43,8 @@
*/
#define MANA_AV_BUFFER_SIZE 64
+#define MANA_GSI_QPN (1)
+
struct mana_ib_adapter_caps {
u32 max_sq_id;
u32 max_rq_id;
@@ -410,7 +412,7 @@ struct mana_ib_ah_attr {
u8 traffic_class;
u16 src_port;
u16 dest_port;
- u32 reserved;
+ u32 flow_label;
};
struct mana_rnic_set_qp_state_req {
@@ -427,8 +429,15 @@ struct mana_rnic_set_qp_state_req {
u32 retry_cnt;
u32 rnr_retry;
u32 min_rnr_timer;
- u32 reserved;
+ u32 rate_limit;
struct mana_ib_ah_attr ah_attr;
+ u64 reserved1;
+ u32 qkey;
+ u32 qp_access_flags;
+ u8 local_ack_timeout;
+ u8 max_rd_atomic;
+ u16 reserved2;
+ u32 reserved3;
}; /* HW Data */
struct mana_rnic_set_qp_state_resp {
@@ -718,6 +727,7 @@ int mana_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
int mana_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
const struct ib_send_wr **bad_wr);
+void mana_drain_gsi_sqs(struct mana_ib_dev *mdev);
int mana_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
int mana_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
diff --git a/drivers/infiniband/hw/mana/mr.c b/drivers/infiniband/hw/mana/mr.c
index 55701046ffba..3d0245a4c1ed 100644
--- a/drivers/infiniband/hw/mana/mr.c
+++ b/drivers/infiniband/hw/mana/mr.c
@@ -138,7 +138,8 @@ struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
if (IS_ERR(mr->umem)) {
err = PTR_ERR(mr->umem);
ibdev_dbg(ibdev,
- "Failed to get umem for register user-mr, %d\n", err);
+ "Failed to get umem for register user-mr, %pe\n",
+ mr->umem);
goto err_free;
}
@@ -220,7 +221,8 @@ struct ib_mr *mana_ib_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start, u64 leng
umem_dmabuf = ib_umem_dmabuf_get_pinned(ibdev, start, length, fd, access_flags);
if (IS_ERR(umem_dmabuf)) {
err = PTR_ERR(umem_dmabuf);
- ibdev_dbg(ibdev, "Failed to get dmabuf umem, %d\n", err);
+ ibdev_dbg(ibdev, "Failed to get dmabuf umem, %pe\n",
+ umem_dmabuf);
goto err_free;
}
diff --git a/drivers/infiniband/hw/mana/qp.c b/drivers/infiniband/hw/mana/qp.c
index a6bf4d539e67..48c1f4977f21 100644
--- a/drivers/infiniband/hw/mana/qp.c
+++ b/drivers/infiniband/hw/mana/qp.c
@@ -735,6 +735,8 @@ static int mana_ib_gd_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int err;
mana_gd_init_req_hdr(&req.hdr, MANA_IB_SET_QP_STATE, sizeof(req), sizeof(resp));
+
+ req.hdr.req.msg_version = GDMA_MESSAGE_V3;
req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.qp_handle = qp->qp_handle;
@@ -748,6 +750,12 @@ static int mana_ib_gd_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
req.retry_cnt = attr->retry_cnt;
req.rnr_retry = attr->rnr_retry;
req.min_rnr_timer = attr->min_rnr_timer;
+ req.rate_limit = attr->rate_limit;
+ req.qkey = attr->qkey;
+ req.local_ack_timeout = attr->timeout;
+ req.qp_access_flags = attr->qp_access_flags;
+ req.max_rd_atomic = attr->max_rd_atomic;
+
if (attr_mask & IB_QP_AV) {
ndev = mana_ib_get_netdev(&mdev->ib_dev, ibqp->port);
if (!ndev) {
@@ -774,6 +782,7 @@ static int mana_ib_gd_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
ibqp->qp_num, attr->dest_qp_num);
req.ah_attr.traffic_class = attr->ah_attr.grh.traffic_class >> 2;
req.ah_attr.hop_limit = attr->ah_attr.grh.hop_limit;
+ req.ah_attr.flow_label = attr->ah_attr.grh.flow_label;
}
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index e6e132f10625..91c714f72099 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -1836,9 +1836,9 @@ static int create_pv_sqp(struct mlx4_ib_demux_pv_ctx *ctx,
tun_qp->qp = ib_create_qp(ctx->pd, &qp_init_attr.init_attr);
if (IS_ERR(tun_qp->qp)) {
ret = PTR_ERR(tun_qp->qp);
+ pr_err("Couldn't create %s QP (%pe)\n",
+ create_tun ? "tunnel" : "special", tun_qp->qp);
tun_qp->qp = NULL;
- pr_err("Couldn't create %s QP (%d)\n",
- create_tun ? "tunnel" : "special", ret);
return ret;
}
@@ -2017,14 +2017,14 @@ static int create_pv_resources(struct ib_device *ibdev, int slave, int port,
NULL, ctx, &cq_attr);
if (IS_ERR(ctx->cq)) {
ret = PTR_ERR(ctx->cq);
- pr_err("Couldn't create tunnel CQ (%d)\n", ret);
+ pr_err("Couldn't create tunnel CQ (%pe)\n", ctx->cq);
goto err_buf;
}
ctx->pd = ib_alloc_pd(ctx->ib_dev, 0);
if (IS_ERR(ctx->pd)) {
ret = PTR_ERR(ctx->pd);
- pr_err("Couldn't create tunnel PD (%d)\n", ret);
+ pr_err("Couldn't create tunnel PD (%pe)\n", ctx->pd);
goto err_cq;
}
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 50fd407103c7..f2887ae6390e 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1652,7 +1652,8 @@ int mlx4_ib_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr,
sqp->roce_v2_gsi = ib_create_qp(pd, init_attr);
if (IS_ERR(sqp->roce_v2_gsi)) {
- pr_err("Failed to create GSI QP for RoCEv2 (%ld)\n", PTR_ERR(sqp->roce_v2_gsi));
+ pr_err("Failed to create GSI QP for RoCEv2 (%pe)\n",
+ sqp->roce_v2_gsi);
sqp->roce_v2_gsi = NULL;
} else {
to_mqp(sqp->roce_v2_gsi)->flags |=
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 9c8003a78334..a23b364e24ff 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -648,7 +648,7 @@ int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
{
struct mlx5_core_dev *mdev = to_mdev(ibcq->device)->mdev;
struct mlx5_ib_cq *cq = to_mcq(ibcq);
- void __iomem *uar_page = mdev->priv.uar->map;
+ void __iomem *uar_page = mdev->priv.bfreg.up->map;
unsigned long irq_flags;
int ret = 0;
@@ -923,7 +923,7 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
cq->buf.frag_buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
- *index = dev->mdev->priv.uar->index;
+ *index = dev->mdev->priv.bfreg.up->index;
return 0;
diff --git a/drivers/infiniband/hw/mlx5/data_direct.c b/drivers/infiniband/hw/mlx5/data_direct.c
index b9ba84afaae2..b81ac5709b56 100644
--- a/drivers/infiniband/hw/mlx5/data_direct.c
+++ b/drivers/infiniband/hw/mlx5/data_direct.c
@@ -35,7 +35,7 @@ static int mlx5_data_direct_vpd_get_vuid(struct mlx5_data_direct_dev *dev)
vpd_data = pci_vpd_alloc(pdev, &vpd_size);
if (IS_ERR(vpd_data)) {
- pci_err(pdev, "Unable to read VPD, err=%ld\n", PTR_ERR(vpd_data));
+ pci_err(pdev, "Unable to read VPD, err=%pe\n", vpd_data);
return PTR_ERR(vpd_data);
}
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index 028d9f031dde..8b506417ad2f 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -233,6 +233,7 @@ static u16 get_legacy_obj_type(u16 opcode)
{
switch (opcode) {
case MLX5_CMD_OP_CREATE_RQ:
+ case MLX5_CMD_OP_CREATE_RMP:
return MLX5_EVENT_QUEUE_TYPE_RQ;
case MLX5_CMD_OP_CREATE_QP:
return MLX5_EVENT_QUEUE_TYPE_QP;
diff --git a/drivers/infiniband/hw/mlx5/gsi.c b/drivers/infiniband/hw/mlx5/gsi.c
index b804f2dd5628..d5487834ed25 100644
--- a/drivers/infiniband/hw/mlx5/gsi.c
+++ b/drivers/infiniband/hw/mlx5/gsi.c
@@ -131,8 +131,9 @@ int mlx5_ib_create_gsi(struct ib_pd *pd, struct mlx5_ib_qp *mqp,
gsi->cq = ib_alloc_cq(pd->device, gsi, attr->cap.max_send_wr, 0,
IB_POLL_SOFTIRQ);
if (IS_ERR(gsi->cq)) {
- mlx5_ib_warn(dev, "unable to create send CQ for GSI QP. error %ld\n",
- PTR_ERR(gsi->cq));
+ mlx5_ib_warn(dev,
+ "unable to create send CQ for GSI QP. error %pe\n",
+ gsi->cq);
ret = PTR_ERR(gsi->cq);
goto err_free_wrs;
}
@@ -147,8 +148,9 @@ int mlx5_ib_create_gsi(struct ib_pd *pd, struct mlx5_ib_qp *mqp,
gsi->rx_qp = ib_create_qp(pd, &hw_init_attr);
if (IS_ERR(gsi->rx_qp)) {
- mlx5_ib_warn(dev, "unable to create hardware GSI QP. error %ld\n",
- PTR_ERR(gsi->rx_qp));
+ mlx5_ib_warn(dev,
+ "unable to create hardware GSI QP. error %pe\n",
+ gsi->rx_qp);
ret = PTR_ERR(gsi->rx_qp);
goto err_destroy_cq;
}
@@ -294,8 +296,9 @@ static void setup_qp(struct mlx5_ib_gsi_qp *gsi, u16 qp_index)
qp = create_gsi_ud_qp(gsi);
if (IS_ERR(qp)) {
- mlx5_ib_warn(dev, "unable to create hardware UD QP for GSI: %ld\n",
- PTR_ERR(qp));
+ mlx5_ib_warn(dev,
+ "unable to create hardware UD QP for GSI: %pe\n",
+ qp);
return;
}
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index d456e4fde3e1..fc1e86f6c409 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -13,6 +13,7 @@
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/bitmap.h>
+#include <linux/log2.h>
#include <linux/sched.h>
#include <linux/sched/mm.h>
#include <linux/sched/task.h>
@@ -883,6 +884,51 @@ static void fill_esw_mgr_reg_c0(struct mlx5_core_dev *mdev,
resp->reg_c0.mask = mlx5_eswitch_get_vport_metadata_mask();
}
+/*
+ * Calculate maximum SQ overhead across all QP types.
+ * Other QP types (REG_UMR, UC, RC, UD/SMI/GSI, XRC_TGT)
+ * have smaller overhead than the types calculated below,
+ * so they are implicitly included.
+ */
+static u32 mlx5_ib_calc_max_sq_overhead(void)
+{
+ u32 max_overhead_xrc, overhead_ud_lso, a, b;
+
+ /* XRC_INI */
+ max_overhead_xrc = sizeof(struct mlx5_wqe_xrc_seg);
+ max_overhead_xrc += sizeof(struct mlx5_wqe_ctrl_seg);
+ a = sizeof(struct mlx5_wqe_atomic_seg) +
+ sizeof(struct mlx5_wqe_raddr_seg);
+ b = sizeof(struct mlx5_wqe_umr_ctrl_seg) +
+ sizeof(struct mlx5_mkey_seg) +
+ MLX5_IB_SQ_UMR_INLINE_THRESHOLD / MLX5_IB_UMR_OCTOWORD;
+ max_overhead_xrc += max(a, b);
+
+ /* UD with LSO */
+ overhead_ud_lso = sizeof(struct mlx5_wqe_ctrl_seg);
+ overhead_ud_lso += sizeof(struct mlx5_wqe_eth_pad);
+ overhead_ud_lso += sizeof(struct mlx5_wqe_eth_seg);
+ overhead_ud_lso += sizeof(struct mlx5_wqe_datagram_seg);
+
+ return max(max_overhead_xrc, overhead_ud_lso);
+}
+
+static u32 mlx5_ib_calc_max_qp_wr(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_core_dev *mdev = dev->mdev;
+ u32 max_wqe_bb_units = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
+ u32 max_wqe_size;
+ /* max QP overhead + 1 SGE, no inline, no special features */
+ max_wqe_size = mlx5_ib_calc_max_sq_overhead() +
+ sizeof(struct mlx5_wqe_data_seg);
+
+ max_wqe_size = roundup_pow_of_two(max_wqe_size);
+
+ max_wqe_size = ALIGN(max_wqe_size, MLX5_SEND_WQE_BB);
+
+ return (max_wqe_bb_units * MLX5_SEND_WQE_BB) / max_wqe_size;
+}
+
static int mlx5_ib_query_device(struct ib_device *ibdev,
struct ib_device_attr *props,
struct ib_udata *uhw)
@@ -1041,7 +1087,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->max_mr_size = ~0ull;
props->page_size_cap = ~(min_page_size - 1);
props->max_qp = 1 << MLX5_CAP_GEN(mdev, log_max_qp);
- props->max_qp_wr = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
+ props->max_qp_wr = mlx5_ib_calc_max_qp_wr(dev);
max_rq_sg = MLX5_CAP_GEN(mdev, max_wqe_sz_rq) /
sizeof(struct mlx5_wqe_data_seg);
max_sq_desc = min_t(int, MLX5_CAP_GEN(mdev, max_wqe_sz_sq), 512);
@@ -1793,7 +1839,8 @@ static void deallocate_uars(struct mlx5_ib_dev *dev,
}
static int mlx5_ib_enable_lb_mp(struct mlx5_core_dev *master,
- struct mlx5_core_dev *slave)
+ struct mlx5_core_dev *slave,
+ struct mlx5_ib_lb_state *lb_state)
{
int err;
@@ -1805,6 +1852,7 @@ static int mlx5_ib_enable_lb_mp(struct mlx5_core_dev *master,
if (err)
goto out;
+ lb_state->force_enable = true;
return 0;
out:
@@ -1813,16 +1861,22 @@ out:
}
static void mlx5_ib_disable_lb_mp(struct mlx5_core_dev *master,
- struct mlx5_core_dev *slave)
+ struct mlx5_core_dev *slave,
+ struct mlx5_ib_lb_state *lb_state)
{
mlx5_nic_vport_update_local_lb(slave, false);
mlx5_nic_vport_update_local_lb(master, false);
+
+ lb_state->force_enable = false;
}
int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
{
int err = 0;
+ if (dev->lb.force_enable)
+ return 0;
+
mutex_lock(&dev->lb.mutex);
if (td)
dev->lb.user_td++;
@@ -1844,6 +1898,9 @@ int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
{
+ if (dev->lb.force_enable)
+ return;
+
mutex_lock(&dev->lb.mutex);
if (td)
dev->lb.user_td--;
@@ -2994,14 +3051,16 @@ int mlx5_ib_dev_res_cq_init(struct mlx5_ib_dev *dev)
pd = ib_alloc_pd(ibdev, 0);
if (IS_ERR(pd)) {
ret = PTR_ERR(pd);
- mlx5_ib_err(dev, "Couldn't allocate PD for res init, err=%d\n", ret);
+ mlx5_ib_err(dev, "Couldn't allocate PD for res init, err=%pe\n",
+ pd);
goto unlock;
}
cq = ib_create_cq(ibdev, NULL, NULL, NULL, &cq_attr);
if (IS_ERR(cq)) {
ret = PTR_ERR(cq);
- mlx5_ib_err(dev, "Couldn't create CQ for res init, err=%d\n", ret);
+ mlx5_ib_err(dev, "Couldn't create CQ for res init, err=%pe\n",
+ cq);
ib_dealloc_pd(pd);
goto unlock;
}
@@ -3045,7 +3104,9 @@ int mlx5_ib_dev_res_srq_init(struct mlx5_ib_dev *dev)
s0 = ib_create_srq(devr->p0, &attr);
if (IS_ERR(s0)) {
ret = PTR_ERR(s0);
- mlx5_ib_err(dev, "Couldn't create SRQ 0 for res init, err=%d\n", ret);
+ mlx5_ib_err(dev,
+ "Couldn't create SRQ 0 for res init, err=%pe\n",
+ s0);
goto unlock;
}
@@ -3057,7 +3118,9 @@ int mlx5_ib_dev_res_srq_init(struct mlx5_ib_dev *dev)
s1 = ib_create_srq(devr->p0, &attr);
if (IS_ERR(s1)) {
ret = PTR_ERR(s1);
- mlx5_ib_err(dev, "Couldn't create SRQ 1 for res init, err=%d\n", ret);
+ mlx5_ib_err(dev,
+ "Couldn't create SRQ 1 for res init, err=%pe\n",
+ s1);
ib_destroy_srq(s0);
}
@@ -3118,6 +3181,7 @@ mlx5_ib_create_data_direct_resources(struct mlx5_ib_dev *dev)
{
int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
struct mlx5_core_dev *mdev = dev->mdev;
+ bool ro_supp = false;
void *mkc;
u32 mkey;
u32 pdn;
@@ -3146,14 +3210,37 @@ mlx5_ib_create_data_direct_resources(struct mlx5_ib_dev *dev)
MLX5_SET(mkc, mkc, length64, 1);
MLX5_SET(mkc, mkc, qpn, 0xffffff);
err = mlx5_core_create_mkey(mdev, &mkey, in, inlen);
- kvfree(in);
if (err)
- goto err;
+ goto err_mkey;
dev->ddr.mkey = mkey;
dev->ddr.pdn = pdn;
+
+ /* create another mkey with RO support */
+ if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write)) {
+ MLX5_SET(mkc, mkc, relaxed_ordering_write, 1);
+ ro_supp = true;
+ }
+
+ if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read)) {
+ MLX5_SET(mkc, mkc, relaxed_ordering_read, 1);
+ ro_supp = true;
+ }
+
+ if (ro_supp) {
+ err = mlx5_core_create_mkey(mdev, &mkey, in, inlen);
+ /* RO is defined as best effort */
+ if (!err) {
+ dev->ddr.mkey_ro = mkey;
+ dev->ddr.mkey_ro_valid = true;
+ }
+ }
+
+ kvfree(in);
return 0;
+err_mkey:
+ kvfree(in);
err:
mlx5_core_dealloc_pd(mdev, pdn);
return err;
@@ -3162,6 +3249,10 @@ err:
static void
mlx5_ib_free_data_direct_resources(struct mlx5_ib_dev *dev)
{
+
+ if (dev->ddr.mkey_ro_valid)
+ mlx5_core_destroy_mkey(dev->mdev, dev->ddr.mkey_ro);
+
mlx5_core_destroy_mkey(dev->mdev, dev->ddr.mkey);
mlx5_core_dealloc_pd(dev->mdev, dev->ddr.pdn);
}
@@ -3523,7 +3614,7 @@ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
lockdep_assert_held(&mlx5_ib_multiport_mutex);
- mlx5_ib_disable_lb_mp(ibdev->mdev, mpi->mdev);
+ mlx5_ib_disable_lb_mp(ibdev->mdev, mpi->mdev, &ibdev->lb);
mlx5_core_mp_event_replay(ibdev->mdev,
MLX5_DRIVER_EVENT_AFFILIATION_REMOVED,
@@ -3620,7 +3711,7 @@ static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev,
MLX5_DRIVER_EVENT_AFFILIATION_DONE,
&key);
- err = mlx5_ib_enable_lb_mp(ibdev->mdev, mpi->mdev);
+ err = mlx5_ib_enable_lb_mp(ibdev->mdev, mpi->mdev, &ibdev->lb);
if (err)
goto unbind;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 7ffc7ee92cf0..09d82d5f95e3 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -854,6 +854,8 @@ struct mlx5_ib_port_resources {
struct mlx5_data_direct_resources {
u32 pdn;
u32 mkey;
+ u32 mkey_ro;
+ u8 mkey_ro_valid :1;
};
struct mlx5_ib_resources {
@@ -1109,6 +1111,7 @@ struct mlx5_ib_lb_state {
u32 user_td;
int qps;
bool enabled;
+ bool force_enable;
};
struct mlx5_ib_pf_eq {
@@ -1802,6 +1805,10 @@ mlx5_umem_mkc_find_best_pgsz(struct mlx5_ib_dev *dev, struct ib_umem *umem,
bitmap = GENMASK_ULL(max_log_entity_size_cap, min_log_entity_size_cap);
+ /* In KSM mode HW requires IOVA and mkey's page size to be aligned */
+ if (access_mode == MLX5_MKC_ACCESS_MODE_KSM && iova)
+ bitmap &= GENMASK_ULL(__ffs64(iova), 0);
+
return ib_umem_find_best_pgsz(umem, bitmap, iova);
}
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 1317f2cb38a4..325fa04cbe8a 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -1652,8 +1652,7 @@ reg_user_mr_dmabuf(struct ib_pd *pd, struct device *dma_device,
fd, access_flags);
if (IS_ERR(umem_dmabuf)) {
- mlx5_ib_dbg(dev, "umem_dmabuf get failed (%ld)\n",
- PTR_ERR(umem_dmabuf));
+ mlx5_ib_dbg(dev, "umem_dmabuf get failed (%pe)\n", umem_dmabuf);
return ERR_CAST(umem_dmabuf);
}
@@ -1717,11 +1716,11 @@ reg_user_mr_dmabuf_by_data_direct(struct ib_pd *pd, u64 offset,
goto end;
}
- /* The device's 'data direct mkey' was created without RO flags to
- * simplify things and allow for a single mkey per device.
- * Since RO is not a must, mask it out accordingly.
+ /* If no device's 'data direct mkey' with RO flags exists
+ * mask it out accordingly.
*/
- access_flags &= ~IB_ACCESS_RELAXED_ORDERING;
+ if (!dev->ddr.mkey_ro_valid)
+ access_flags &= ~IB_ACCESS_RELAXED_ORDERING;
crossed_mr = reg_user_mr_dmabuf(pd, &data_direct_dev->pdev->dev,
offset, length, virt_addr, fd,
access_flags, MLX5_MKC_ACCESS_MODE_KSM,
diff --git a/drivers/infiniband/hw/mlx5/std_types.c b/drivers/infiniband/hw/mlx5/std_types.c
index bdb568411091..2fcf553044e1 100644
--- a/drivers/infiniband/hw/mlx5/std_types.c
+++ b/drivers/infiniband/hw/mlx5/std_types.c
@@ -83,33 +83,14 @@ static int fill_vport_icm_addr(struct mlx5_core_dev *mdev, u16 vport,
static int fill_vport_vhca_id(struct mlx5_core_dev *mdev, u16 vport,
struct mlx5_ib_uapi_query_port *info)
{
- size_t out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
- u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {};
- void *out;
- int err;
-
- out = kzalloc(out_sz, GFP_KERNEL);
- if (!out)
- return -ENOMEM;
+ int err = mlx5_vport_get_vhca_id(mdev, vport, &info->vport_vhca_id);
- MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
- MLX5_SET(query_hca_cap_in, in, other_function, true);
- MLX5_SET(query_hca_cap_in, in, function_id, vport);
- MLX5_SET(query_hca_cap_in, in, op_mod,
- MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE |
- HCA_CAP_OPMOD_GET_CUR);
-
- err = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_sz);
if (err)
- goto out;
-
- info->vport_vhca_id = MLX5_GET(query_hca_cap_out, out,
- capability.cmd_hca_cap.vhca_id);
+ return err;
info->flags |= MLX5_IB_UAPI_QUERY_PORT_VPORT_VHCA_ID;
-out:
- kfree(out);
- return err;
+
+ return 0;
}
static int fill_multiport_info(struct mlx5_ib_dev *dev, u32 port_num,
diff --git a/drivers/infiniband/hw/mlx5/umr.c b/drivers/infiniband/hw/mlx5/umr.c
index 7ef35cddce81..4e562e0dd9e1 100644
--- a/drivers/infiniband/hw/mlx5/umr.c
+++ b/drivers/infiniband/hw/mlx5/umr.c
@@ -761,7 +761,11 @@ _mlx5r_umr_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags, bool dd,
if (dd) {
cur_ksm->va = cpu_to_be64(rdma_block_iter_dma_address(&biter));
- cur_ksm->key = cpu_to_be32(dev->ddr.mkey);
+ if (mr->access_flags & IB_ACCESS_RELAXED_ORDERING &&
+ dev->ddr.mkey_ro_valid)
+ cur_ksm->key = cpu_to_be32(dev->ddr.mkey_ro);
+ else
+ cur_ksm->key = cpu_to_be32(dev->ddr.mkey);
if (mr->umem->is_dmabuf &&
(flags & MLX5_IB_UPD_XLT_ZAP)) {
cur_ksm->va = 0;
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index e825e2ef7966..134a79eecfcb 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -492,7 +492,7 @@ static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
{
u32 i, offset, max_scan, qpn;
struct rvt_qpn_map *map;
- u32 ret;
+ int ret;
u32 max_qpn = exclude_prefix == RVT_AIP_QP_PREFIX ?
RVT_AIP_QPN_MAX : RVT_QPN_MAX;
@@ -510,7 +510,8 @@ static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
else
qpt->flags |= n;
spin_unlock(&qpt->lock);
- goto bail;
+
+ return ret;
}
qpn = qpt->last + qpt->incr;
@@ -530,7 +531,8 @@ static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
if (!test_and_set_bit(offset, map->page)) {
qpt->last = qpn;
ret = qpn;
- goto bail;
+
+ return ret;
}
offset += qpt->incr;
/*
@@ -565,10 +567,7 @@ static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
qpn = mk_qpn(qpt, map, offset);
}
- ret = -ENOMEM;
-
-bail:
- return ret;
+ return -ENOMEM;
}
/**
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index 132a87e52d5c..ac0183a2ff7a 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -345,33 +345,15 @@ int rxe_prepare(struct rxe_av *av, struct rxe_pkt_info *pkt,
static void rxe_skb_tx_dtor(struct sk_buff *skb)
{
- struct net_device *ndev = skb->dev;
- struct rxe_dev *rxe;
- unsigned int qp_index;
- struct rxe_qp *qp;
+ struct rxe_qp *qp = skb->sk->sk_user_data;
int skb_out;
- rxe = rxe_get_dev_from_net(ndev);
- if (!rxe && is_vlan_dev(ndev))
- rxe = rxe_get_dev_from_net(vlan_dev_real_dev(ndev));
- if (WARN_ON(!rxe))
- return;
-
- qp_index = (int)(uintptr_t)skb->sk->sk_user_data;
- if (!qp_index)
- return;
-
- qp = rxe_pool_get_index(&rxe->qp_pool, qp_index);
- if (!qp)
- goto put_dev;
-
skb_out = atomic_dec_return(&qp->skb_out);
- if (qp->need_req_skb && skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW)
+ if (unlikely(qp->need_req_skb &&
+ skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW))
rxe_sched_task(&qp->send_task);
rxe_put(qp);
-put_dev:
- ib_device_put(&rxe->ib_dev);
sock_put(skb->sk);
}
@@ -383,6 +365,7 @@ static int rxe_send(struct sk_buff *skb, struct rxe_pkt_info *pkt)
sock_hold(sk);
skb->sk = sk;
skb->destructor = rxe_skb_tx_dtor;
+ rxe_get(pkt->qp);
atomic_inc(&pkt->qp->skb_out);
if (skb->protocol == htons(ETH_P_IP))
@@ -405,6 +388,7 @@ static int rxe_loopback(struct sk_buff *skb, struct rxe_pkt_info *pkt)
sock_hold(sk);
skb->sk = sk;
skb->destructor = rxe_skb_tx_dtor;
+ rxe_get(pkt->qp);
atomic_inc(&pkt->qp->skb_out);
if (skb->protocol == htons(ETH_P_IP))
@@ -497,6 +481,9 @@ struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
goto out;
}
+ /* Add time stamp to skb. */
+ skb->tstamp = ktime_get();
+
skb_reserve(skb, hdr_len + LL_RESERVED_SPACE(ndev));
/* FIXME: hold reference to this netdev until life of this skb. */
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index f2af3e0aef35..95f1c1c2949d 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -244,7 +244,7 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk);
if (err < 0)
return err;
- qp->sk->sk->sk_user_data = (void *)(uintptr_t)qp->elem.index;
+ qp->sk->sk->sk_user_data = qp;
/* pick a source UDP port number for this QP based on
* the source QPN. this spreads traffic for different QPs
diff --git a/drivers/infiniband/sw/rxe/rxe_task.c b/drivers/infiniband/sw/rxe/rxe_task.c
index 6f8f353e9583..f522820b950c 100644
--- a/drivers/infiniband/sw/rxe/rxe_task.c
+++ b/drivers/infiniband/sw/rxe/rxe_task.c
@@ -132,8 +132,12 @@ static void do_task(struct rxe_task *task)
* yield the cpu and reschedule the task
*/
if (!ret) {
- task->state = TASK_STATE_IDLE;
- resched = 1;
+ if (task->state != TASK_STATE_DRAINING) {
+ task->state = TASK_STATE_IDLE;
+ resched = 1;
+ } else {
+ cont = 1;
+ }
goto exit;
}
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index 35c3bde0d00a..efa2f097b582 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -769,7 +769,7 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
struct siw_wqe *wqe = tx_wqe(qp);
unsigned long flags;
- int rv = 0;
+ int rv = 0, imm_err = 0;
if (wr && !rdma_is_kernel_res(&qp->base_qp.res)) {
siw_dbg_qp(qp, "wr must be empty for user mapped sq\n");
@@ -955,9 +955,17 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
* Send directly if SQ processing is not in progress.
* Eventual immediate errors (rv < 0) do not affect the involved
* RI resources (Verbs, 8.3.1) and thus do not prevent from SQ
- * processing, if new work is already pending. But rv must be passed
- * to caller.
+ * processing, if new work is already pending. But rv and pointer
+ * to failed work request must be passed to caller.
*/
+ if (unlikely(rv < 0)) {
+ /*
+ * Immediate error
+ */
+ siw_dbg_qp(qp, "Immediate error %d\n", rv);
+ imm_err = rv;
+ *bad_wr = wr;
+ }
if (wqe->wr_status != SIW_WR_IDLE) {
spin_unlock_irqrestore(&qp->sq_lock, flags);
goto skip_direct_sending;
@@ -982,15 +990,10 @@ skip_direct_sending:
up_read(&qp->state_lock);
- if (rv >= 0)
- return 0;
- /*
- * Immediate error
- */
- siw_dbg_qp(qp, "error %d\n", rv);
+ if (unlikely(imm_err))
+ return imm_err;
- *bad_wr = wr;
- return rv;
+ return (rv >= 0) ? 0 : rv;
}
/*
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 7acafc5c0e09..5b4d76e97437 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -351,26 +351,27 @@ static bool ipoib_is_dev_match_addr_rcu(const struct sockaddr *addr,
}
/*
- * Find the master net_device on top of the given net_device.
+ * Find the L2 master net_device on top of the given net_device.
* @dev: base IPoIB net_device
*
- * Returns the master net_device with a reference held, or the same net_device
- * if no master exists.
+ * Returns the L2 master net_device with reference held if the L2 master
+ * exists (such as bond netdevice), or returns same netdev with reference
+ * held when master does not exist or when L3 master (such as VRF netdev).
*/
static struct net_device *ipoib_get_master_net_dev(struct net_device *dev)
{
struct net_device *master;
rcu_read_lock();
+
master = netdev_master_upper_dev_get_rcu(dev);
+ if (!master || netif_is_l3_master(master))
+ master = dev;
+
dev_hold(master);
rcu_read_unlock();
- if (master)
- return master;
-
- dev_hold(dev);
- return dev;
+ return master;
}
struct ipoib_walk_data {
@@ -522,7 +523,7 @@ static struct net_device *ipoib_get_net_dev_by_params(
if (ret)
return NULL;
- /* See if we can find a unique device matching the L2 parameters */
+ /* See if we can find a unique device matching the pkey and GID */
matches = __ipoib_get_net_dev_by_params(dev_list, port, pkey_index,
gid, NULL, &net_dev);
@@ -535,7 +536,7 @@ static struct net_device *ipoib_get_net_dev_by_params(
dev_put(net_dev);
- /* Couldn't find a unique device with L2 parameters only. Use L3
+ /* Couldn't find a unique device with pkey and GID only. Use L3
* address to uniquely match the net device */
matches = __ipoib_get_net_dev_by_params(dev_list, port, pkey_index,
gid, addr, &net_dev);
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 5dfb4644446b..71269446353d 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -667,9 +667,9 @@ static int srpt_refresh_port(struct srpt_port *sport)
srpt_mad_recv_handler,
sport, 0);
if (IS_ERR(mad_agent)) {
- pr_err("%s-%d: MAD agent registration failed (%ld). Note: this is expected if SR-IOV is enabled.\n",
+ pr_err("%s-%d: MAD agent registration failed (%pe). Note: this is expected if SR-IOV is enabled.\n",
dev_name(&sport->sdev->device->dev), sport->port,
- PTR_ERR(mad_agent));
+ mad_agent);
sport->mad_agent = NULL;
memset(&port_modify, 0, sizeof(port_modify));
port_modify.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
@@ -1865,8 +1865,8 @@ retry:
IB_POLL_WORKQUEUE);
if (IS_ERR(ch->cq)) {
ret = PTR_ERR(ch->cq);
- pr_err("failed to create CQ cqe= %d ret= %d\n",
- ch->rq_size + sq_size, ret);
+ pr_err("failed to create CQ cqe= %d ret= %pe\n",
+ ch->rq_size + sq_size, ch->cq);
goto out;
}
ch->cq_size = ch->rq_size + sq_size;
@@ -3132,7 +3132,7 @@ static int srpt_alloc_srq(struct srpt_device *sdev)
WARN_ON_ONCE(sdev->srq);
srq = ib_create_srq(sdev->pd, &srq_attr);
if (IS_ERR(srq)) {
- pr_debug("ib_create_srq() failed: %ld\n", PTR_ERR(srq));
+ pr_debug("ib_create_srq() failed: %pe\n", srq);
return PTR_ERR(srq);
}
@@ -3236,8 +3236,7 @@ static int srpt_add_one(struct ib_device *device)
if (rdma_port_get_link_layer(device, 1) == IB_LINK_LAYER_INFINIBAND)
sdev->cm_id = ib_create_cm_id(device, srpt_cm_handler, sdev);
if (IS_ERR(sdev->cm_id)) {
- pr_info("ib_create_cm_id() failed: %ld\n",
- PTR_ERR(sdev->cm_id));
+ pr_info("ib_create_cm_id() failed: %pe\n", sdev->cm_id);
ret = PTR_ERR(sdev->cm_id);
sdev->cm_id = NULL;
if (!rdma_cm_id)
@@ -3687,8 +3686,7 @@ static struct rdma_cm_id *srpt_create_rdma_id(struct sockaddr *listen_addr)
rdma_cm_id = rdma_create_id(&init_net, srpt_rdma_cm_handler,
NULL, RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(rdma_cm_id)) {
- pr_err("RDMA/CM ID creation failed: %ld\n",
- PTR_ERR(rdma_cm_id));
+ pr_err("RDMA/CM ID creation failed: %pe\n", rdma_cm_id);
goto out;
}
diff --git a/drivers/input/ff-core.c b/drivers/input/ff-core.c
index b527308cb52e..66f7ffe8c7e0 100644
--- a/drivers/input/ff-core.c
+++ b/drivers/input/ff-core.c
@@ -8,9 +8,9 @@
/* #define DEBUG */
+#include <linux/export.h>
#include <linux/input.h>
#include <linux/limits.h>
-#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/overflow.h>
#include <linux/sched.h>
diff --git a/drivers/input/ff-memless.c b/drivers/input/ff-memless.c
index 91636479ee3c..e0c1c61aae71 100644
--- a/drivers/input/ff-memless.c
+++ b/drivers/input/ff-memless.c
@@ -10,6 +10,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/module.h>
diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
index a832bc46bc92..f4f12dd00fff 100644
--- a/drivers/input/gameport/gameport.c
+++ b/drivers/input/gameport/gameport.c
@@ -9,6 +9,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/export.h>
#include <linux/stddef.h>
#include <linux/module.h>
#include <linux/io.h>
diff --git a/drivers/input/input-compat.c b/drivers/input/input-compat.c
index 2ccd3eedbd67..a5043193ead8 100644
--- a/drivers/input/input-compat.c
+++ b/drivers/input/input-compat.c
@@ -6,6 +6,7 @@
*/
#include <linux/export.h>
+#include <linux/sprintf.h>
#include <linux/uaccess.h>
#include "input-compat.h"
@@ -94,6 +95,28 @@ int input_ff_effect_from_user(const char __user *buffer, size_t size,
return 0;
}
+int input_bits_to_string(char *buf, int buf_size, unsigned long bits,
+ bool skip_empty)
+{
+ int len = 0;
+
+ if (in_compat_syscall()) {
+ u32 dword = bits >> 32;
+ if (dword || !skip_empty)
+ len += snprintf(buf, buf_size, "%x ", dword);
+
+ dword = bits & 0xffffffffUL;
+ if (dword || !skip_empty || len)
+ len += snprintf(buf + len, max(buf_size - len, 0),
+ "%x", dword);
+ } else {
+ if (bits || !skip_empty)
+ len += snprintf(buf, buf_size, "%lx", bits);
+ }
+
+ return len;
+}
+
#else
int input_event_from_user(const char __user *buffer,
@@ -126,6 +149,13 @@ int input_ff_effect_from_user(const char __user *buffer, size_t size,
return 0;
}
+int input_bits_to_string(char *buf, int buf_size, unsigned long bits,
+ bool skip_empty)
+{
+ return bits || !skip_empty ?
+ snprintf(buf, buf_size, "%lx", bits) : 0;
+}
+
#endif /* CONFIG_COMPAT */
EXPORT_SYMBOL_GPL(input_event_from_user);
diff --git a/drivers/input/input-compat.h b/drivers/input/input-compat.h
index 3b7bb12b023b..99c87ceb923d 100644
--- a/drivers/input/input-compat.h
+++ b/drivers/input/input-compat.h
@@ -75,4 +75,7 @@ int input_event_to_user(char __user *buffer,
int input_ff_effect_from_user(const char __user *buffer, size_t size,
struct ff_effect *effect);
+int input_bits_to_string(char *buf, int buf_size, unsigned long bits,
+ bool skip_empty);
+
#endif /* _INPUT_COMPAT_H */
diff --git a/drivers/input/input-mt.c b/drivers/input/input-mt.c
index 337006dd9dcf..09f518897d4a 100644
--- a/drivers/input/input-mt.c
+++ b/drivers/input/input-mt.c
@@ -198,6 +198,7 @@ void input_mt_report_pointer_emulation(struct input_dev *dev, bool use_count)
struct input_mt *mt = dev->mt;
struct input_mt_slot *oldest;
int oldid, count, i;
+ int p, reported_p = 0;
if (!mt)
return;
@@ -216,6 +217,13 @@ void input_mt_report_pointer_emulation(struct input_dev *dev, bool use_count)
oldest = ps;
oldid = id;
}
+ if (test_bit(ABS_MT_PRESSURE, dev->absbit)) {
+ p = input_mt_get_value(ps, ABS_MT_PRESSURE);
+ if (mt->flags & INPUT_MT_TOTAL_FORCE)
+ reported_p += p;
+ else if (oldid == id)
+ reported_p = p;
+ }
count++;
}
@@ -245,10 +253,8 @@ void input_mt_report_pointer_emulation(struct input_dev *dev, bool use_count)
input_event(dev, EV_ABS, ABS_X, x);
input_event(dev, EV_ABS, ABS_Y, y);
- if (test_bit(ABS_MT_PRESSURE, dev->absbit)) {
- int p = input_mt_get_value(oldest, ABS_MT_PRESSURE);
- input_event(dev, EV_ABS, ABS_PRESSURE, p);
- }
+ if (test_bit(ABS_MT_PRESSURE, dev->absbit))
+ input_event(dev, EV_ABS, ABS_PRESSURE, reported_p);
} else {
if (test_bit(ABS_MT_PRESSURE, dev->absbit))
input_event(dev, EV_ABS, ABS_PRESSURE, 0);
diff --git a/drivers/input/input-poller.c b/drivers/input/input-poller.c
index 9c57713a6151..1ce83d6521bb 100644
--- a/drivers/input/input-poller.c
+++ b/drivers/input/input-poller.c
@@ -4,6 +4,7 @@
*/
#include <linux/device.h>
+#include <linux/export.h>
#include <linux/input.h>
#include <linux/jiffies.h>
#include <linux/mutex.h>
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 1da41324362b..a500e1e276c2 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -8,6 +8,7 @@
#define pr_fmt(fmt) KBUILD_BASENAME ": " fmt
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/idr.h>
@@ -998,41 +999,6 @@ static int input_attach_handler(struct input_dev *dev, struct input_handler *han
return error;
}
-#ifdef CONFIG_COMPAT
-
-static int input_bits_to_string(char *buf, int buf_size,
- unsigned long bits, bool skip_empty)
-{
- int len = 0;
-
- if (in_compat_syscall()) {
- u32 dword = bits >> 32;
- if (dword || !skip_empty)
- len += snprintf(buf, buf_size, "%x ", dword);
-
- dword = bits & 0xffffffffUL;
- if (dword || !skip_empty || len)
- len += snprintf(buf + len, max(buf_size - len, 0),
- "%x", dword);
- } else {
- if (bits || !skip_empty)
- len += snprintf(buf, buf_size, "%lx", bits);
- }
-
- return len;
-}
-
-#else /* !CONFIG_COMPAT */
-
-static int input_bits_to_string(char *buf, int buf_size,
- unsigned long bits, bool skip_empty)
-{
- return bits || !skip_empty ?
- snprintf(buf, buf_size, "%lx", bits) : 0;
-}
-
-#endif
-
#ifdef CONFIG_PROC_FS
static struct proc_dir_entry *proc_bus_input_dir;
diff --git a/drivers/input/joystick/iforce/iforce-main.c b/drivers/input/joystick/iforce/iforce-main.c
index 55e6321adab9..86d09faa685c 100644
--- a/drivers/input/joystick/iforce/iforce-main.c
+++ b/drivers/input/joystick/iforce/iforce-main.c
@@ -6,6 +6,7 @@
* USB/RS232 I-Force joysticks and wheels.
*/
+#include <linux/export.h>
#include <linux/unaligned.h>
#include "iforce.h"
diff --git a/drivers/input/joystick/iforce/iforce-packets.c b/drivers/input/joystick/iforce/iforce-packets.c
index 74181d5123cd..fd1cd731d781 100644
--- a/drivers/input/joystick/iforce/iforce-packets.c
+++ b/drivers/input/joystick/iforce/iforce-packets.c
@@ -6,6 +6,7 @@
* USB/RS232 I-Force joysticks and wheels.
*/
+#include <linux/export.h>
#include <linux/unaligned.h>
#include "iforce.h"
diff --git a/drivers/input/joystick/psxpad-spi.c b/drivers/input/joystick/psxpad-spi.c
index c47fc5f34bd0..f902a56d011f 100644
--- a/drivers/input/joystick/psxpad-spi.c
+++ b/drivers/input/joystick/psxpad-spi.c
@@ -344,7 +344,11 @@ static int psxpad_spi_probe(struct spi_device *spi)
/* (PlayStation 1/2 joypad might be possible works 250kHz/500kHz) */
spi->controller->min_speed_hz = 125000;
spi->controller->max_speed_hz = 125000;
- spi_setup(spi);
+ err = spi_setup(spi);
+ if (err) {
+ dev_err(&spi->dev, "failed to set up SPI: %d\n", err);
+ return err;
+ }
/* pad settings */
psxpad_set_motor_level(pad, 0, 0);
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 4c94297e17e6..d72e89c25e50 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -422,6 +422,7 @@ static const struct xpad_device {
{ 0x3537, 0x1010, "GameSir G7 SE", 0, XTYPE_XBOXONE },
{ 0x366c, 0x0005, "ByoWave Proteus Controller", MAP_SHARE_BUTTON, XTYPE_XBOXONE, FLAG_DELAY_INIT },
{ 0x3767, 0x0101, "Fanatec Speedster 3 Forceshock Wheel", 0, XTYPE_XBOX },
+ { 0x37d7, 0x2501, "Flydigi Apex 5", 0, XTYPE_XBOX360 },
{ 0x413d, 0x2104, "Black Shark Green Ghost Gamepad", 0, XTYPE_XBOX360 },
{ 0xffff, 0xffff, "Chinese-made Xbox Controller", 0, XTYPE_XBOX },
{ 0x0000, 0x0000, "Generic X-Box pad", 0, XTYPE_UNKNOWN }
@@ -578,6 +579,7 @@ static const struct usb_device_id xpad_table[] = {
XPAD_XBOX360_VENDOR(0x3537), /* GameSir Controllers */
XPAD_XBOXONE_VENDOR(0x3537), /* GameSir Controllers */
XPAD_XBOXONE_VENDOR(0x366c), /* ByoWave controllers */
+ XPAD_XBOX360_VENDOR(0x37d7), /* Flydigi Controllers */
XPAD_XBOX360_VENDOR(0x413d), /* Black Shark Green Ghost Controller */
{ }
};
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 7c4f309a4cb6..2ff4fef322c2 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -262,24 +262,6 @@ config KEYBOARD_GPIO_POLLED
To compile this driver as a module, choose M here: the
module will be called gpio_keys_polled.
-config KEYBOARD_TCA6416
- tristate "TCA6416/TCA6408A Keypad Support"
- depends on I2C
- help
- This driver implements basic keypad functionality
- for keys connected through TCA6416/TCA6408A IO expanders.
-
- Say Y here if your device has keys connected to
- TCA6416/TCA6408A IO expander. Your board-specific setup logic
- must also provide pin-mask details(of which TCA6416 pins
- are used for keypad).
-
- If enabled the entire TCA6416 device will be managed through
- this driver.
-
- To compile this driver as a module, choose M here: the
- module will be called tca6416_keypad.
-
config KEYBOARD_TCA8418
tristate "TCA8418 Keypad Support"
depends on I2C
@@ -422,6 +404,18 @@ config KEYBOARD_MAX7359
To compile this driver as a module, choose M here: the
module will be called max7359_keypad.
+config KEYBOARD_MAX7360
+ tristate "Maxim MAX7360 Key Switch Controller"
+ select INPUT_MATRIXKMAP
+ depends on I2C
+ depends on MFD_MAX7360
+ help
+ If you say yes here you get support for the keypad controller on the
+ Maxim MAX7360 I/O Expander.
+
+ To compile this driver as a module, choose M here: the module will be
+ called max7360_keypad.
+
config KEYBOARD_MPR121
tristate "Freescale MPR121 Touchkey"
depends on I2C
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index 8bc20ab2b103..2d906e14f3e2 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -23,7 +23,6 @@ obj-$(CONFIG_KEYBOARD_EP93XX) += ep93xx_keypad.o
obj-$(CONFIG_KEYBOARD_GOLDFISH_EVENTS) += goldfish_events.o
obj-$(CONFIG_KEYBOARD_GPIO) += gpio_keys.o
obj-$(CONFIG_KEYBOARD_GPIO_POLLED) += gpio_keys_polled.o
-obj-$(CONFIG_KEYBOARD_TCA6416) += tca6416-keypad.o
obj-$(CONFIG_KEYBOARD_TCA8418) += tca8418_keypad.o
obj-$(CONFIG_KEYBOARD_HIL) += hil_kbd.o
obj-$(CONFIG_KEYBOARD_HIL_OLD) += hilkbd.o
@@ -42,6 +41,7 @@ obj-$(CONFIG_KEYBOARD_LPC32XX) += lpc32xx-keys.o
obj-$(CONFIG_KEYBOARD_MAPLE) += maple_keyb.o
obj-$(CONFIG_KEYBOARD_MATRIX) += matrix_keypad.o
obj-$(CONFIG_KEYBOARD_MAX7359) += max7359_keypad.o
+obj-$(CONFIG_KEYBOARD_MAX7360) += max7360-keypad.o
obj-$(CONFIG_KEYBOARD_MPR121) += mpr121_touchkey.o
obj-$(CONFIG_KEYBOARD_MT6779) += mt6779-keypad.o
obj-$(CONFIG_KEYBOARD_MTK_PMIC) += mtk-pmic-keys.o
diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c
index 2b2aca08423a..414fbef4abf9 100644
--- a/drivers/input/keyboard/adp5588-keys.c
+++ b/drivers/input/keyboard/adp5588-keys.c
@@ -425,7 +425,7 @@ static int adp5588_gpio_add(struct adp5588_kpad *kpad)
kpad->gc.direction_input = adp5588_gpio_direction_input;
kpad->gc.direction_output = adp5588_gpio_direction_output;
kpad->gc.get = adp5588_gpio_get_value;
- kpad->gc.set_rv = adp5588_gpio_set_value;
+ kpad->gc.set = adp5588_gpio_set_value;
kpad->gc.set_config = adp5588_gpio_set_config;
kpad->gc.can_sleep = 1;
diff --git a/drivers/input/keyboard/cros_ec_keyb.c b/drivers/input/keyboard/cros_ec_keyb.c
index c1e53d87c8a7..f7209c8ebbcc 100644
--- a/drivers/input/keyboard/cros_ec_keyb.c
+++ b/drivers/input/keyboard/cros_ec_keyb.c
@@ -705,6 +705,12 @@ static int cros_ec_keyb_probe(struct platform_device *pdev)
ec = dev_get_drvdata(pdev->dev.parent);
if (!ec)
return -EPROBE_DEFER;
+ /*
+ * Even if the cros_ec_device pointer is available, still need to check
+ * if the device is fully registered before using it.
+ */
+ if (!cros_ec_device_registered(ec))
+ return -EPROBE_DEFER;
ckdev = devm_kzalloc(dev, sizeof(*ckdev), GFP_KERNEL);
if (!ckdev)
diff --git a/drivers/input/keyboard/max7360-keypad.c b/drivers/input/keyboard/max7360-keypad.c
new file mode 100644
index 000000000000..503be952b0a6
--- /dev/null
+++ b/drivers/input/keyboard/max7360-keypad.c
@@ -0,0 +1,308 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2025 Bootlin
+ *
+ * Author: Mathieu Dubois-Briand <mathieu.dubois-briand@bootlin.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/dev_printk.h>
+#include <linux/device/devres.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/input/matrix_keypad.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/max7360.h>
+#include <linux/mod_devicetable.h>
+#include <linux/minmax.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/platform_device.h>
+#include <linux/pm_wakeirq.h>
+#include <linux/regmap.h>
+
+struct max7360_keypad {
+ struct input_dev *input;
+ unsigned int rows;
+ unsigned int cols;
+ unsigned int debounce_ms;
+ int irq;
+ struct regmap *regmap;
+ unsigned short keycodes[MAX7360_MAX_KEY_ROWS * MAX7360_MAX_KEY_COLS];
+};
+
+static irqreturn_t max7360_keypad_irq(int irq, void *data)
+{
+ struct max7360_keypad *max7360_keypad = data;
+ struct device *dev = max7360_keypad->input->dev.parent;
+ unsigned int val;
+ unsigned int row, col;
+ unsigned int release;
+ unsigned int code;
+ int error;
+
+ error = regmap_read(max7360_keypad->regmap, MAX7360_REG_KEYFIFO, &val);
+ if (error) {
+ dev_err(dev, "Failed to read MAX7360 FIFO");
+ return IRQ_NONE;
+ }
+
+ /* FIFO overflow: ignore it and get next event. */
+ if (val == MAX7360_FIFO_OVERFLOW) {
+ dev_warn(dev, "max7360 FIFO overflow");
+ error = regmap_read_poll_timeout(max7360_keypad->regmap, MAX7360_REG_KEYFIFO,
+ val, val != MAX7360_FIFO_OVERFLOW, 0, 1000);
+ if (error) {
+ dev_err(dev, "Failed to empty MAX7360 FIFO");
+ return IRQ_NONE;
+ }
+ }
+
+ if (val == MAX7360_FIFO_EMPTY) {
+ dev_dbg(dev, "Got a spurious interrupt");
+
+ return IRQ_NONE;
+ }
+
+ row = FIELD_GET(MAX7360_FIFO_ROW, val);
+ col = FIELD_GET(MAX7360_FIFO_COL, val);
+ release = val & MAX7360_FIFO_RELEASE;
+
+ code = MATRIX_SCAN_CODE(row, col, get_count_order(max7360_keypad->cols));
+
+ dev_dbg(dev, "key[%d:%d] %s\n", row, col, release ? "release" : "press");
+
+ input_event(max7360_keypad->input, EV_MSC, MSC_SCAN, code);
+ input_report_key(max7360_keypad->input, max7360_keypad->keycodes[code], !release);
+ input_sync(max7360_keypad->input);
+
+ return IRQ_HANDLED;
+}
+
+static int max7360_keypad_open(struct input_dev *pdev)
+{
+ struct max7360_keypad *max7360_keypad = input_get_drvdata(pdev);
+ struct device *dev = max7360_keypad->input->dev.parent;
+ int error;
+
+ /* Somebody is using the device: get out of sleep. */
+ error = regmap_write_bits(max7360_keypad->regmap, MAX7360_REG_CONFIG,
+ MAX7360_CFG_SLEEP, MAX7360_CFG_SLEEP);
+ if (error)
+ dev_err(dev, "Failed to write max7360 configuration: %d\n", error);
+
+ return error;
+}
+
+static void max7360_keypad_close(struct input_dev *pdev)
+{
+ struct max7360_keypad *max7360_keypad = input_get_drvdata(pdev);
+ struct device *dev = max7360_keypad->input->dev.parent;
+ int error;
+
+ /* Nobody is using the device anymore: go to sleep. */
+ error = regmap_write_bits(max7360_keypad->regmap, MAX7360_REG_CONFIG, MAX7360_CFG_SLEEP, 0);
+ if (error)
+ dev_err(dev, "Failed to write max7360 configuration: %d\n", error);
+}
+
+static int max7360_keypad_hw_init(struct max7360_keypad *max7360_keypad)
+{
+ struct device *dev = max7360_keypad->input->dev.parent;
+ unsigned int val;
+ int error;
+
+ val = max7360_keypad->debounce_ms - MAX7360_DEBOUNCE_MIN;
+ error = regmap_write_bits(max7360_keypad->regmap, MAX7360_REG_DEBOUNCE,
+ MAX7360_DEBOUNCE,
+ FIELD_PREP(MAX7360_DEBOUNCE, val));
+ if (error)
+ return dev_err_probe(dev, error,
+ "Failed to write max7360 debounce configuration\n");
+
+ error = regmap_write_bits(max7360_keypad->regmap, MAX7360_REG_INTERRUPT,
+ MAX7360_INTERRUPT_TIME_MASK,
+ FIELD_PREP(MAX7360_INTERRUPT_TIME_MASK, 1));
+ if (error)
+ return dev_err_probe(dev, error,
+ "Failed to write max7360 keypad interrupt configuration\n");
+
+ return 0;
+}
+
+static int max7360_keypad_build_keymap(struct max7360_keypad *max7360_keypad)
+{
+ struct input_dev *input_dev = max7360_keypad->input;
+ struct device *dev = input_dev->dev.parent->parent;
+ struct matrix_keymap_data keymap_data;
+ const char *propname = "linux,keymap";
+ unsigned int max_keys;
+ int error;
+ int size;
+
+ size = device_property_count_u32(dev, propname);
+ if (size <= 0) {
+ dev_err(dev, "missing or malformed property %s: %d\n", propname, size);
+ return size < 0 ? size : -EINVAL;
+ }
+
+ max_keys = max7360_keypad->cols * max7360_keypad->rows;
+ if (size > max_keys) {
+ dev_err(dev, "%s size overflow (%d vs max %u)\n", propname, size, max_keys);
+ return -EINVAL;
+ }
+
+ u32 *keys __free(kfree) = kmalloc_array(size, sizeof(*keys), GFP_KERNEL);
+ if (!keys)
+ return -ENOMEM;
+
+ error = device_property_read_u32_array(dev, propname, keys, size);
+ if (error) {
+ dev_err(dev, "failed to read %s property: %d\n", propname, error);
+ return error;
+ }
+
+ keymap_data.keymap = keys;
+ keymap_data.keymap_size = size;
+ error = matrix_keypad_build_keymap(&keymap_data, NULL,
+ max7360_keypad->rows, max7360_keypad->cols,
+ max7360_keypad->keycodes, max7360_keypad->input);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+static int max7360_keypad_parse_fw(struct device *dev,
+ struct max7360_keypad *max7360_keypad,
+ bool *autorepeat)
+{
+ int error;
+
+ error = matrix_keypad_parse_properties(dev->parent, &max7360_keypad->rows,
+ &max7360_keypad->cols);
+ if (error)
+ return error;
+
+ if (!max7360_keypad->rows || !max7360_keypad->cols ||
+ max7360_keypad->rows > MAX7360_MAX_KEY_ROWS ||
+ max7360_keypad->cols > MAX7360_MAX_KEY_COLS) {
+ dev_err(dev, "Invalid number of columns or rows (%ux%u)\n",
+ max7360_keypad->cols, max7360_keypad->rows);
+ return -EINVAL;
+ }
+
+ *autorepeat = device_property_read_bool(dev->parent, "autorepeat");
+
+ max7360_keypad->debounce_ms = MAX7360_DEBOUNCE_MIN;
+ error = device_property_read_u32(dev->parent, "keypad-debounce-delay-ms",
+ &max7360_keypad->debounce_ms);
+ if (error == -EINVAL) {
+ dev_info(dev, "Using default keypad-debounce-delay-ms: %u\n",
+ max7360_keypad->debounce_ms);
+ } else if (error < 0) {
+ dev_err(dev, "Failed to read keypad-debounce-delay-ms property\n");
+ return error;
+ }
+
+ if (!in_range(max7360_keypad->debounce_ms, MAX7360_DEBOUNCE_MIN,
+ MAX7360_DEBOUNCE_MAX - MAX7360_DEBOUNCE_MIN + 1)) {
+ dev_err(dev, "Invalid keypad-debounce-delay-ms: %u, should be between %u and %u.\n",
+ max7360_keypad->debounce_ms, MAX7360_DEBOUNCE_MIN, MAX7360_DEBOUNCE_MAX);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int max7360_keypad_probe(struct platform_device *pdev)
+{
+ struct max7360_keypad *max7360_keypad;
+ struct device *dev = &pdev->dev;
+ struct input_dev *input;
+ struct regmap *regmap;
+ bool autorepeat;
+ int error;
+ int irq;
+
+ regmap = dev_get_regmap(dev->parent, NULL);
+ if (!regmap)
+ return dev_err_probe(dev, -ENODEV, "Could not get parent regmap\n");
+
+ irq = fwnode_irq_get_byname(dev_fwnode(dev->parent), "intk");
+ if (irq < 0)
+ return dev_err_probe(dev, irq, "Failed to get IRQ\n");
+
+ max7360_keypad = devm_kzalloc(dev, sizeof(*max7360_keypad), GFP_KERNEL);
+ if (!max7360_keypad)
+ return -ENOMEM;
+
+ max7360_keypad->regmap = regmap;
+
+ error = max7360_keypad_parse_fw(dev, max7360_keypad, &autorepeat);
+ if (error)
+ return error;
+
+ input = devm_input_allocate_device(dev);
+ if (!input)
+ return -ENOMEM;
+
+ max7360_keypad->input = input;
+
+ input->id.bustype = BUS_I2C;
+ input->name = pdev->name;
+ input->open = max7360_keypad_open;
+ input->close = max7360_keypad_close;
+
+ error = max7360_keypad_build_keymap(max7360_keypad);
+ if (error)
+ return dev_err_probe(dev, error, "Failed to build keymap\n");
+
+ input_set_capability(input, EV_MSC, MSC_SCAN);
+ if (autorepeat)
+ __set_bit(EV_REP, input->evbit);
+
+ input_set_drvdata(input, max7360_keypad);
+
+ error = devm_request_threaded_irq(dev, irq, NULL, max7360_keypad_irq,
+ IRQF_ONESHOT,
+ "max7360-keypad", max7360_keypad);
+ if (error)
+ return dev_err_probe(dev, error, "Failed to register interrupt\n");
+
+ error = input_register_device(input);
+ if (error)
+ return dev_err_probe(dev, error, "Could not register input device\n");
+
+ error = max7360_keypad_hw_init(max7360_keypad);
+ if (error)
+ return dev_err_probe(dev, error, "Failed to initialize max7360 keypad\n");
+
+ device_init_wakeup(dev, true);
+ error = dev_pm_set_wake_irq(dev, irq);
+ if (error)
+ dev_warn(dev, "Failed to set up wakeup irq: %d\n", error);
+
+ return 0;
+}
+
+static void max7360_keypad_remove(struct platform_device *pdev)
+{
+ dev_pm_clear_wake_irq(&pdev->dev);
+ device_init_wakeup(&pdev->dev, false);
+}
+
+static struct platform_driver max7360_keypad_driver = {
+ .driver = {
+ .name = "max7360-keypad",
+ },
+ .probe = max7360_keypad_probe,
+ .remove = max7360_keypad_remove,
+};
+module_platform_driver(max7360_keypad_driver);
+
+MODULE_DESCRIPTION("MAX7360 Keypad driver");
+MODULE_AUTHOR("Mathieu Dubois-Briand <mathieu.dubois-briand@bootlin.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/keyboard/mtk-pmic-keys.c b/drivers/input/keyboard/mtk-pmic-keys.c
index 50e2e792c91d..c78d9f6d97c4 100644
--- a/drivers/input/keyboard/mtk-pmic-keys.c
+++ b/drivers/input/keyboard/mtk-pmic-keys.c
@@ -55,6 +55,7 @@ struct mtk_pmic_regs {
const struct mtk_pmic_keys_regs keys_regs[MTK_PMIC_MAX_KEY_COUNT];
u32 pmic_rst_reg;
u32 rst_lprst_mask; /* Long-press reset timeout bitmask */
+ bool key_release_irq;
};
static const struct mtk_pmic_regs mt6397_regs = {
@@ -116,6 +117,7 @@ static const struct mtk_pmic_regs mt6358_regs = {
MTK_PMIC_HOMEKEY_RST),
.pmic_rst_reg = MT6358_TOP_RST_MISC,
.rst_lprst_mask = MTK_PMIC_RST_DU_MASK,
+ .key_release_irq = true,
};
static const struct mtk_pmic_regs mt6359_regs = {
@@ -129,6 +131,7 @@ static const struct mtk_pmic_regs mt6359_regs = {
MTK_PMIC_HOMEKEY_RST),
.pmic_rst_reg = MT6359_TOP_RST_MISC,
.rst_lprst_mask = MTK_PMIC_RST_DU_MASK,
+ .key_release_irq = true,
};
struct mtk_pmic_keys_info {
@@ -368,7 +371,7 @@ static int mtk_pmic_keys_probe(struct platform_device *pdev)
if (keys->keys[index].irq < 0)
return keys->keys[index].irq;
- if (of_device_is_compatible(node, "mediatek,mt6358-keys")) {
+ if (mtk_pmic_regs->key_release_irq) {
keys->keys[index].irq_r = platform_get_irq_byname(pdev,
irqnames_r[index]);
diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c
index 38ec619aa359..4519eecb317b 100644
--- a/drivers/input/keyboard/pxa27x_keypad.c
+++ b/drivers/input/keyboard/pxa27x_keypad.c
@@ -12,7 +12,8 @@
* on some suggestions by Nicolas Pitre <nico@fluxnic.net>.
*/
-
+#include <linux/bits.h>
+#include <linux/bitfield.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/interrupt.h>
@@ -20,124 +21,148 @@
#include <linux/io.h>
#include <linux/device.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/input/matrix_keypad.h>
#include <linux/slab.h>
#include <linux/of.h>
-#include <linux/platform_data/keypad-pxa27x.h>
/*
* Keypad Controller registers
*/
-#define KPC 0x0000 /* Keypad Control register */
-#define KPDK 0x0008 /* Keypad Direct Key register */
-#define KPREC 0x0010 /* Keypad Rotary Encoder register */
-#define KPMK 0x0018 /* Keypad Matrix Key register */
-#define KPAS 0x0020 /* Keypad Automatic Scan register */
+#define KPC 0x0000 /* Keypad Control register */
+#define KPDK 0x0008 /* Keypad Direct Key register */
+#define KPREC 0x0010 /* Keypad Rotary Encoder register */
+#define KPMK 0x0018 /* Keypad Matrix Key register */
+#define KPAS 0x0020 /* Keypad Automatic Scan register */
/* Keypad Automatic Scan Multiple Key Presser register 0-3 */
-#define KPASMKP0 0x0028
-#define KPASMKP1 0x0030
-#define KPASMKP2 0x0038
-#define KPASMKP3 0x0040
-#define KPKDI 0x0048
+#define KPASMKP0 0x0028
+#define KPASMKP1 0x0030
+#define KPASMKP2 0x0038
+#define KPASMKP3 0x0040
+#define KPKDI 0x0048
/* bit definitions */
-#define KPC_MKRN(n) ((((n) - 1) & 0x7) << 26) /* matrix key row number */
-#define KPC_MKCN(n) ((((n) - 1) & 0x7) << 23) /* matrix key column number */
-#define KPC_DKN(n) ((((n) - 1) & 0x7) << 6) /* direct key number */
-
-#define KPC_AS (0x1 << 30) /* Automatic Scan bit */
-#define KPC_ASACT (0x1 << 29) /* Automatic Scan on Activity */
-#define KPC_MI (0x1 << 22) /* Matrix interrupt bit */
-#define KPC_IMKP (0x1 << 21) /* Ignore Multiple Key Press */
-
-#define KPC_MS(n) (0x1 << (13 + (n))) /* Matrix scan line 'n' */
-#define KPC_MS_ALL (0xff << 13)
-
-#define KPC_ME (0x1 << 12) /* Matrix Keypad Enable */
-#define KPC_MIE (0x1 << 11) /* Matrix Interrupt Enable */
-#define KPC_DK_DEB_SEL (0x1 << 9) /* Direct Keypad Debounce Select */
-#define KPC_DI (0x1 << 5) /* Direct key interrupt bit */
-#define KPC_RE_ZERO_DEB (0x1 << 4) /* Rotary Encoder Zero Debounce */
-#define KPC_REE1 (0x1 << 3) /* Rotary Encoder1 Enable */
-#define KPC_REE0 (0x1 << 2) /* Rotary Encoder0 Enable */
-#define KPC_DE (0x1 << 1) /* Direct Keypad Enable */
-#define KPC_DIE (0x1 << 0) /* Direct Keypad interrupt Enable */
-
-#define KPDK_DKP (0x1 << 31)
-#define KPDK_DK(n) ((n) & 0xff)
-
-#define KPREC_OF1 (0x1 << 31)
-#define kPREC_UF1 (0x1 << 30)
-#define KPREC_OF0 (0x1 << 15)
-#define KPREC_UF0 (0x1 << 14)
-
-#define KPREC_RECOUNT0(n) ((n) & 0xff)
-#define KPREC_RECOUNT1(n) (((n) >> 16) & 0xff)
-
-#define KPMK_MKP (0x1 << 31)
-#define KPAS_SO (0x1 << 31)
-#define KPASMKPx_SO (0x1 << 31)
-
-#define KPAS_MUKP(n) (((n) >> 26) & 0x1f)
-#define KPAS_RP(n) (((n) >> 4) & 0xf)
-#define KPAS_CP(n) ((n) & 0xf)
-
-#define KPASMKP_MKC_MASK (0xff)
+#define KPC_MKRN_MASK GENMASK(28, 26)
+#define KPC_MKCN_MASK GENMASK(25, 23)
+#define KPC_DKN_MASK GENMASK(8, 6)
+#define KPC_MKRN(n) FIELD_PREP(KPC_MKRN_MASK, (n) - 1)
+#define KPC_MKCN(n) FIELD_PREP(KPC_MKCN_MASK, (n) - 1)
+#define KPC_DKN(n) FIELD_PREP(KPC_DKN_MASK, (n) - 1)
+
+#define KPC_AS BIT(30) /* Automatic Scan bit */
+#define KPC_ASACT BIT(29) /* Automatic Scan on Activity */
+#define KPC_MI BIT(22) /* Matrix interrupt bit */
+#define KPC_IMKP BIT(21) /* Ignore Multiple Key Press */
+
+#define KPC_MS(n) BIT(13 + (n)) /* Matrix scan line 'n' */
+#define KPC_MS_ALL GENMASK(20, 13)
+
+#define KPC_ME BIT(12) /* Matrix Keypad Enable */
+#define KPC_MIE BIT(11) /* Matrix Interrupt Enable */
+#define KPC_DK_DEB_SEL BIT(9) /* Direct Keypad Debounce Select */
+#define KPC_DI BIT(5) /* Direct key interrupt bit */
+#define KPC_RE_ZERO_DEB BIT(4) /* Rotary Encoder Zero Debounce */
+#define KPC_REE1 BIT(3) /* Rotary Encoder1 Enable */
+#define KPC_REE0 BIT(2) /* Rotary Encoder0 Enable */
+#define KPC_DE BIT(1) /* Direct Keypad Enable */
+#define KPC_DIE BIT(0) /* Direct Keypad interrupt Enable */
+
+#define KPDK_DKP BIT(31)
+#define KPDK_DK_MASK GENMASK(7, 0)
+#define KPDK_DK(n) FIELD_GET(KPDK_DK_MASK, n)
+
+#define KPREC_OF1 BIT(31)
+#define KPREC_UF1 BIT(30)
+#define KPREC_OF0 BIT(15)
+#define KPREC_UF0 BIT(14)
+
+#define KPREC_RECOUNT0_MASK GENMASK(7, 0)
+#define KPREC_RECOUNT1_MASK GENMASK(23, 16)
+#define KPREC_RECOUNT0(n) FIELD_GET(KPREC_RECOUNT0_MASK, n)
+#define KPREC_RECOUNT1(n) FIELD_GET(KPREC_RECOUNT1_MASK, n)
+
+#define KPMK_MKP BIT(31)
+#define KPAS_SO BIT(31)
+#define KPASMKPx_SO BIT(31)
+
+#define KPAS_MUKP_MASK GENMASK(30, 26)
+#define KPAS_RP_MASK GENMASK(7, 4)
+#define KPAS_CP_MASK GENMASK(3, 0)
+#define KPAS_MUKP(n) FIELD_GET(KPAS_MUKP_MASK, n)
+#define KPAS_RP(n) FIELD_GET(KPAS_RP_MASK, n)
+#define KPAS_CP(n) FIELD_GET(KPAS_CP_MASK, n)
+
+#define KPASMKP_MKC_MASK GENMASK(7, 0)
#define keypad_readl(off) __raw_readl(keypad->mmio_base + (off))
#define keypad_writel(off, v) __raw_writel((v), keypad->mmio_base + (off))
+#define MAX_MATRIX_KEY_ROWS 8
+#define MAX_MATRIX_KEY_COLS 8
+#define MAX_DIRECT_KEY_NUM 8
+#define MAX_ROTARY_ENCODERS 2
+
#define MAX_MATRIX_KEY_NUM (MAX_MATRIX_KEY_ROWS * MAX_MATRIX_KEY_COLS)
#define MAX_KEYPAD_KEYS (MAX_MATRIX_KEY_NUM + MAX_DIRECT_KEY_NUM)
-struct pxa27x_keypad {
- const struct pxa27x_keypad_platform_data *pdata;
+struct pxa27x_keypad_rotary {
+ unsigned short *key_codes;
+ int rel_code;
+ bool enabled;
+};
+struct pxa27x_keypad {
struct clk *clk;
struct input_dev *input_dev;
void __iomem *mmio_base;
int irq;
- unsigned short keycodes[MAX_KEYPAD_KEYS];
- int rotary_rel_code[2];
-
+ unsigned int matrix_key_rows;
+ unsigned int matrix_key_cols;
unsigned int row_shift;
+ unsigned int direct_key_num;
+ unsigned int direct_key_mask;
+ bool direct_key_low_active;
+
+ /* key debounce interval */
+ unsigned int debounce_interval;
+
+ unsigned short keycodes[MAX_KEYPAD_KEYS];
+
/* state row bits of each column scan */
- uint32_t matrix_key_state[MAX_MATRIX_KEY_COLS];
- uint32_t direct_key_state;
+ u32 matrix_key_state[MAX_MATRIX_KEY_COLS];
+ u32 direct_key_state;
- unsigned int direct_key_mask;
+ struct pxa27x_keypad_rotary rotary[MAX_ROTARY_ENCODERS];
};
-#ifdef CONFIG_OF
-static int pxa27x_keypad_matrix_key_parse_dt(struct pxa27x_keypad *keypad,
- struct pxa27x_keypad_platform_data *pdata)
+static int pxa27x_keypad_matrix_key_parse(struct pxa27x_keypad *keypad)
{
struct input_dev *input_dev = keypad->input_dev;
struct device *dev = input_dev->dev.parent;
- u32 rows, cols;
int error;
- error = matrix_keypad_parse_properties(dev, &rows, &cols);
+ error = matrix_keypad_parse_properties(dev, &keypad->matrix_key_rows,
+ &keypad->matrix_key_cols);
if (error)
return error;
- if (rows > MAX_MATRIX_KEY_ROWS || cols > MAX_MATRIX_KEY_COLS) {
+ if (keypad->matrix_key_rows > MAX_MATRIX_KEY_ROWS ||
+ keypad->matrix_key_cols > MAX_MATRIX_KEY_COLS) {
dev_err(dev, "rows or cols exceeds maximum value\n");
return -EINVAL;
}
- pdata->matrix_key_rows = rows;
- pdata->matrix_key_cols = cols;
+ keypad->row_shift = get_count_order(keypad->matrix_key_cols);
error = matrix_keypad_build_keymap(NULL, NULL,
- pdata->matrix_key_rows,
- pdata->matrix_key_cols,
+ keypad->matrix_key_rows,
+ keypad->matrix_key_cols,
keypad->keycodes, input_dev);
if (error)
return error;
@@ -145,20 +170,17 @@ static int pxa27x_keypad_matrix_key_parse_dt(struct pxa27x_keypad *keypad,
return 0;
}
-static int pxa27x_keypad_direct_key_parse_dt(struct pxa27x_keypad *keypad,
- struct pxa27x_keypad_platform_data *pdata)
+static int pxa27x_keypad_direct_key_parse(struct pxa27x_keypad *keypad)
{
struct input_dev *input_dev = keypad->input_dev;
struct device *dev = input_dev->dev.parent;
- struct device_node *np = dev->of_node;
- const __be16 *prop;
unsigned short code;
- unsigned int proplen, size;
+ int count;
int i;
int error;
- error = of_property_read_u32(np, "marvell,direct-key-count",
- &pdata->direct_key_num);
+ error = device_property_read_u32(dev, "marvell,direct-key-count",
+ &keypad->direct_key_num);
if (error) {
/*
* If do not have marvel,direct-key-count defined,
@@ -167,151 +189,121 @@ static int pxa27x_keypad_direct_key_parse_dt(struct pxa27x_keypad *keypad,
return error == -EINVAL ? 0 : error;
}
- error = of_property_read_u32(np, "marvell,direct-key-mask",
- &pdata->direct_key_mask);
+ error = device_property_read_u32(dev, "marvell,direct-key-mask",
+ &keypad->direct_key_mask);
if (error) {
if (error != -EINVAL)
return error;
/*
* If marvell,direct-key-mask is not defined, driver will use
- * default value. Default value is set when configure the keypad.
+ * a default value based on number of direct keys set up.
+ * The default value is calculated in pxa27x_keypad_config().
*/
- pdata->direct_key_mask = 0;
+ keypad->direct_key_mask = 0;
}
- pdata->direct_key_low_active = of_property_read_bool(np,
- "marvell,direct-key-low-active");
-
- prop = of_get_property(np, "marvell,direct-key-map", &proplen);
- if (!prop)
- return -EINVAL;
+ keypad->direct_key_low_active =
+ device_property_read_bool(dev, "marvell,direct-key-low-active");
- if (proplen % sizeof(u16))
+ count = device_property_count_u16(dev, "marvell,direct-key-map");
+ if (count <= 0 || count > MAX_DIRECT_KEY_NUM)
return -EINVAL;
- size = proplen / sizeof(u16);
+ error = device_property_read_u16_array(dev, "marvell,direct-key-map",
+ &keypad->keycodes[MAX_MATRIX_KEY_NUM],
+ count);
- /* Only MAX_DIRECT_KEY_NUM is accepted.*/
- if (size > MAX_DIRECT_KEY_NUM)
- return -EINVAL;
-
- for (i = 0; i < size; i++) {
- code = be16_to_cpup(prop + i);
- keypad->keycodes[MAX_MATRIX_KEY_NUM + i] = code;
+ for (i = 0; i < count; i++) {
+ code = keypad->keycodes[MAX_MATRIX_KEY_NUM + i];
__set_bit(code, input_dev->keybit);
}
return 0;
}
-static int pxa27x_keypad_rotary_parse_dt(struct pxa27x_keypad *keypad,
- struct pxa27x_keypad_platform_data *pdata)
+static int pxa27x_keypad_rotary_parse(struct pxa27x_keypad *keypad)
{
- const __be32 *prop;
- int i, relkey_ret;
- unsigned int code, proplen;
- const char *rotaryname[2] = {
- "marvell,rotary0", "marvell,rotary1"};
- const char relkeyname[] = {"marvell,rotary-rel-key"};
+ static const char * const rotaryname[] = { "marvell,rotary0", "marvell,rotary1" };
struct input_dev *input_dev = keypad->input_dev;
struct device *dev = input_dev->dev.parent;
- struct device_node *np = dev->of_node;
-
- relkey_ret = of_property_read_u32(np, relkeyname, &code);
- /* if can read correct rotary key-code, we do not need this. */
- if (relkey_ret == 0) {
- unsigned short relcode;
+ struct pxa27x_keypad_rotary *encoder;
+ unsigned int code;
+ int i;
+ int error;
- /* rotary0 taks lower half, rotary1 taks upper half. */
- relcode = code & 0xffff;
- pdata->rotary0_rel_code = (code & 0xffff);
- __set_bit(relcode, input_dev->relbit);
+ error = device_property_read_u32(dev, "marvell,rotary-rel-key", &code);
+ if (!error) {
+ for (i = 0; i < MAX_ROTARY_ENCODERS; i++, code >>= 16) {
+ encoder = &keypad->rotary[i];
+ encoder->enabled = true;
+ encoder->rel_code = code & 0xffff;
+ input_set_capability(input_dev, EV_REL, encoder->rel_code);
+ }
- relcode = code >> 16;
- pdata->rotary1_rel_code = relcode;
- __set_bit(relcode, input_dev->relbit);
+ return 0;
}
- for (i = 0; i < 2; i++) {
- prop = of_get_property(np, rotaryname[i], &proplen);
+ for (i = 0; i < MAX_ROTARY_ENCODERS; i++) {
+ encoder = &keypad->rotary[i];
+
/*
* If the prop is not set, it means keypad does not need
* initialize the rotaryX.
*/
- if (!prop)
+ if (!device_property_present(dev, rotaryname[i]))
continue;
- code = be32_to_cpup(prop);
+ error = device_property_read_u32(dev, rotaryname[i], &code);
+ if (error)
+ return error;
+
/*
* Not all up/down key code are valid.
* Now we depends on direct-rel-code.
*/
- if ((!(code & 0xffff) || !(code >> 16)) && relkey_ret) {
- return relkey_ret;
- } else {
- unsigned int n = MAX_MATRIX_KEY_NUM + (i << 1);
- unsigned short keycode;
-
- keycode = code & 0xffff;
- keypad->keycodes[n] = keycode;
- __set_bit(keycode, input_dev->keybit);
-
- keycode = code >> 16;
- keypad->keycodes[n + 1] = keycode;
- __set_bit(keycode, input_dev->keybit);
-
- if (i == 0)
- pdata->rotary0_rel_code = -1;
- else
- pdata->rotary1_rel_code = -1;
- }
- if (i == 0)
- pdata->enable_rotary0 = 1;
- else
- pdata->enable_rotary1 = 1;
- }
+ if (!(code & 0xffff) || !(code >> 16))
+ return -EINVAL;
+
+ encoder->enabled = true;
+ encoder->rel_code = -1;
+ encoder->key_codes = &keypad->keycodes[MAX_MATRIX_KEY_NUM + i * 2];
+ encoder->key_codes[0] = code & 0xffff;
+ encoder->key_codes[1] = code >> 16;
- keypad->rotary_rel_code[0] = pdata->rotary0_rel_code;
- keypad->rotary_rel_code[1] = pdata->rotary1_rel_code;
+ input_set_capability(input_dev, EV_KEY, encoder->key_codes[0]);
+ input_set_capability(input_dev, EV_KEY, encoder->key_codes[1]);
+ }
return 0;
}
-static int pxa27x_keypad_build_keycode_from_dt(struct pxa27x_keypad *keypad)
+static int pxa27x_keypad_parse_properties(struct pxa27x_keypad *keypad)
{
struct input_dev *input_dev = keypad->input_dev;
struct device *dev = input_dev->dev.parent;
- struct device_node *np = dev->of_node;
- struct pxa27x_keypad_platform_data *pdata;
int error;
- pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata) {
- dev_err(dev, "failed to allocate memory for pdata\n");
- return -ENOMEM;
- }
-
- error = pxa27x_keypad_matrix_key_parse_dt(keypad, pdata);
+ error = pxa27x_keypad_matrix_key_parse(keypad);
if (error) {
dev_err(dev, "failed to parse matrix key\n");
return error;
}
- error = pxa27x_keypad_direct_key_parse_dt(keypad, pdata);
+ error = pxa27x_keypad_direct_key_parse(keypad);
if (error) {
dev_err(dev, "failed to parse direct key\n");
return error;
}
- error = pxa27x_keypad_rotary_parse_dt(keypad, pdata);
+ error = pxa27x_keypad_rotary_parse(keypad);
if (error) {
dev_err(dev, "failed to parse rotary key\n");
return error;
}
- error = of_property_read_u32(np, "marvell,debounce-interval",
- &pdata->debounce_interval);
+ error = device_property_read_u32(dev, "marvell,debounce-interval",
+ &keypad->debounce_interval);
if (error) {
dev_err(dev, "failed to parse debounce-interval\n");
return error;
@@ -323,95 +315,15 @@ static int pxa27x_keypad_build_keycode_from_dt(struct pxa27x_keypad *keypad)
*/
input_dev->keycodemax = ARRAY_SIZE(keypad->keycodes);
- keypad->pdata = pdata;
- return 0;
-}
-
-#else
-
-static int pxa27x_keypad_build_keycode_from_dt(struct pxa27x_keypad *keypad)
-{
- dev_info(keypad->input_dev->dev.parent, "missing platform data\n");
-
- return -EINVAL;
-}
-
-#endif
-
-static int pxa27x_keypad_build_keycode(struct pxa27x_keypad *keypad)
-{
- const struct pxa27x_keypad_platform_data *pdata = keypad->pdata;
- struct input_dev *input_dev = keypad->input_dev;
- unsigned short keycode;
- int i;
- int error;
-
- error = matrix_keypad_build_keymap(pdata->matrix_keymap_data, NULL,
- pdata->matrix_key_rows,
- pdata->matrix_key_cols,
- keypad->keycodes, input_dev);
- if (error)
- return error;
-
- /*
- * The keycodes may not only include matrix keys but also the direct
- * or rotary keys.
- */
- input_dev->keycodemax = ARRAY_SIZE(keypad->keycodes);
-
- /* For direct keys. */
- for (i = 0; i < pdata->direct_key_num; i++) {
- keycode = pdata->direct_key_map[i];
- keypad->keycodes[MAX_MATRIX_KEY_NUM + i] = keycode;
- __set_bit(keycode, input_dev->keybit);
- }
-
- if (pdata->enable_rotary0) {
- if (pdata->rotary0_up_key && pdata->rotary0_down_key) {
- keycode = pdata->rotary0_up_key;
- keypad->keycodes[MAX_MATRIX_KEY_NUM + 0] = keycode;
- __set_bit(keycode, input_dev->keybit);
-
- keycode = pdata->rotary0_down_key;
- keypad->keycodes[MAX_MATRIX_KEY_NUM + 1] = keycode;
- __set_bit(keycode, input_dev->keybit);
-
- keypad->rotary_rel_code[0] = -1;
- } else {
- keypad->rotary_rel_code[0] = pdata->rotary0_rel_code;
- __set_bit(pdata->rotary0_rel_code, input_dev->relbit);
- }
- }
-
- if (pdata->enable_rotary1) {
- if (pdata->rotary1_up_key && pdata->rotary1_down_key) {
- keycode = pdata->rotary1_up_key;
- keypad->keycodes[MAX_MATRIX_KEY_NUM + 2] = keycode;
- __set_bit(keycode, input_dev->keybit);
-
- keycode = pdata->rotary1_down_key;
- keypad->keycodes[MAX_MATRIX_KEY_NUM + 3] = keycode;
- __set_bit(keycode, input_dev->keybit);
-
- keypad->rotary_rel_code[1] = -1;
- } else {
- keypad->rotary_rel_code[1] = pdata->rotary1_rel_code;
- __set_bit(pdata->rotary1_rel_code, input_dev->relbit);
- }
- }
-
- __clear_bit(KEY_RESERVED, input_dev->keybit);
-
return 0;
}
static void pxa27x_keypad_scan_matrix(struct pxa27x_keypad *keypad)
{
- const struct pxa27x_keypad_platform_data *pdata = keypad->pdata;
struct input_dev *input_dev = keypad->input_dev;
int row, col, num_keys_pressed = 0;
- uint32_t new_state[MAX_MATRIX_KEY_COLS];
- uint32_t kpas = keypad_readl(KPAS);
+ u32 new_state[MAX_MATRIX_KEY_COLS];
+ u32 kpas = keypad_readl(KPAS);
num_keys_pressed = KPAS_MUKP(kpas);
@@ -425,19 +337,19 @@ static void pxa27x_keypad_scan_matrix(struct pxa27x_keypad *keypad)
row = KPAS_RP(kpas);
/* if invalid row/col, treat as no key pressed */
- if (col >= pdata->matrix_key_cols ||
- row >= pdata->matrix_key_rows)
+ if (col >= keypad->matrix_key_cols ||
+ row >= keypad->matrix_key_rows)
goto scan;
- new_state[col] = (1 << row);
+ new_state[col] = BIT(row);
goto scan;
}
if (num_keys_pressed > 1) {
- uint32_t kpasmkp0 = keypad_readl(KPASMKP0);
- uint32_t kpasmkp1 = keypad_readl(KPASMKP1);
- uint32_t kpasmkp2 = keypad_readl(KPASMKP2);
- uint32_t kpasmkp3 = keypad_readl(KPASMKP3);
+ u32 kpasmkp0 = keypad_readl(KPASMKP0);
+ u32 kpasmkp1 = keypad_readl(KPASMKP1);
+ u32 kpasmkp2 = keypad_readl(KPASMKP2);
+ u32 kpasmkp3 = keypad_readl(KPASMKP3);
new_state[0] = kpasmkp0 & KPASMKP_MKC_MASK;
new_state[1] = (kpasmkp0 >> 16) & KPASMKP_MKC_MASK;
@@ -449,23 +361,23 @@ static void pxa27x_keypad_scan_matrix(struct pxa27x_keypad *keypad)
new_state[7] = (kpasmkp3 >> 16) & KPASMKP_MKC_MASK;
}
scan:
- for (col = 0; col < pdata->matrix_key_cols; col++) {
- uint32_t bits_changed;
+ for (col = 0; col < keypad->matrix_key_cols; col++) {
+ u32 bits_changed;
int code;
bits_changed = keypad->matrix_key_state[col] ^ new_state[col];
if (bits_changed == 0)
continue;
- for (row = 0; row < pdata->matrix_key_rows; row++) {
- if ((bits_changed & (1 << row)) == 0)
+ for (row = 0; row < keypad->matrix_key_rows; row++) {
+ if ((bits_changed & BIT(row)) == 0)
continue;
code = MATRIX_SCAN_CODE(row, col, keypad->row_shift);
input_event(input_dev, EV_MSC, MSC_SCAN, code);
input_report_key(input_dev, keypad->keycodes[code],
- new_state[col] & (1 << row));
+ new_state[col] & BIT(row));
}
}
input_sync(input_dev);
@@ -474,7 +386,7 @@ scan:
#define DEFAULT_KPREC (0x007f007f)
-static inline int rotary_delta(uint32_t kprec)
+static inline int rotary_delta(u32 kprec)
{
if (kprec & KPREC_OF0)
return (kprec & 0xff) + 0x7f;
@@ -486,14 +398,16 @@ static inline int rotary_delta(uint32_t kprec)
static void report_rotary_event(struct pxa27x_keypad *keypad, int r, int delta)
{
+ struct pxa27x_keypad_rotary *encoder = &keypad->rotary[r];
struct input_dev *dev = keypad->input_dev;
- if (delta == 0)
+ if (!encoder->enabled || delta == 0)
return;
- if (keypad->rotary_rel_code[r] == -1) {
- int code = MAX_MATRIX_KEY_NUM + 2 * r + (delta > 0 ? 0 : 1);
- unsigned char keycode = keypad->keycodes[code];
+ if (encoder->rel_code == -1) {
+ int idx = delta > 0 ? 0 : 1;
+ int code = MAX_MATRIX_KEY_NUM + 2 * r + idx;
+ unsigned char keycode = encoder->key_codes[idx];
/* simulate a press-n-release */
input_event(dev, EV_MSC, MSC_SCAN, code);
@@ -503,45 +417,43 @@ static void report_rotary_event(struct pxa27x_keypad *keypad, int r, int delta)
input_report_key(dev, keycode, 0);
input_sync(dev);
} else {
- input_report_rel(dev, keypad->rotary_rel_code[r], delta);
+ input_report_rel(dev, encoder->rel_code, delta);
input_sync(dev);
}
}
static void pxa27x_keypad_scan_rotary(struct pxa27x_keypad *keypad)
{
- const struct pxa27x_keypad_platform_data *pdata = keypad->pdata;
- uint32_t kprec;
+ u32 kprec;
+ int i;
/* read and reset to default count value */
kprec = keypad_readl(KPREC);
keypad_writel(KPREC, DEFAULT_KPREC);
- if (pdata->enable_rotary0)
+ for (i = 0; i < MAX_ROTARY_ENCODERS; i++) {
report_rotary_event(keypad, 0, rotary_delta(kprec));
-
- if (pdata->enable_rotary1)
- report_rotary_event(keypad, 1, rotary_delta(kprec >> 16));
+ kprec >>= 16;
+ }
}
static void pxa27x_keypad_scan_direct(struct pxa27x_keypad *keypad)
{
- const struct pxa27x_keypad_platform_data *pdata = keypad->pdata;
struct input_dev *input_dev = keypad->input_dev;
unsigned int new_state;
- uint32_t kpdk, bits_changed;
+ u32 kpdk, bits_changed;
int i;
kpdk = keypad_readl(KPDK);
- if (pdata->enable_rotary0 || pdata->enable_rotary1)
+ if (keypad->rotary[0].enabled || keypad->rotary[1].enabled)
pxa27x_keypad_scan_rotary(keypad);
/*
* The KPDR_DK only output the key pin level, so it relates to board,
* and low level may be active.
*/
- if (pdata->direct_key_low_active)
+ if (keypad->direct_key_low_active)
new_state = ~KPDK_DK(kpdk) & keypad->direct_key_mask;
else
new_state = KPDK_DK(kpdk) & keypad->direct_key_mask;
@@ -551,34 +463,24 @@ static void pxa27x_keypad_scan_direct(struct pxa27x_keypad *keypad)
if (bits_changed == 0)
return;
- for (i = 0; i < pdata->direct_key_num; i++) {
- if (bits_changed & (1 << i)) {
+ for (i = 0; i < keypad->direct_key_num; i++) {
+ if (bits_changed & BIT(i)) {
int code = MAX_MATRIX_KEY_NUM + i;
input_event(input_dev, EV_MSC, MSC_SCAN, code);
input_report_key(input_dev, keypad->keycodes[code],
- new_state & (1 << i));
+ new_state & BIT(i));
}
}
input_sync(input_dev);
keypad->direct_key_state = new_state;
}
-static void clear_wakeup_event(struct pxa27x_keypad *keypad)
-{
- const struct pxa27x_keypad_platform_data *pdata = keypad->pdata;
-
- if (pdata->clear_wakeup_event)
- (pdata->clear_wakeup_event)();
-}
-
static irqreturn_t pxa27x_keypad_irq_handler(int irq, void *dev_id)
{
struct pxa27x_keypad *keypad = dev_id;
unsigned long kpc = keypad_readl(KPC);
- clear_wakeup_event(keypad);
-
if (kpc & KPC_DI)
pxa27x_keypad_scan_direct(keypad);
@@ -590,7 +492,6 @@ static irqreturn_t pxa27x_keypad_irq_handler(int irq, void *dev_id)
static void pxa27x_keypad_config(struct pxa27x_keypad *keypad)
{
- const struct pxa27x_keypad_platform_data *pdata = keypad->pdata;
unsigned int mask = 0, direct_key_num = 0;
unsigned long kpc = 0;
@@ -598,36 +499,34 @@ static void pxa27x_keypad_config(struct pxa27x_keypad *keypad)
keypad_readl(KPC);
/* enable matrix keys with automatic scan */
- if (pdata->matrix_key_rows && pdata->matrix_key_cols) {
+ if (keypad->matrix_key_rows && keypad->matrix_key_cols) {
kpc |= KPC_ASACT | KPC_MIE | KPC_ME | KPC_MS_ALL;
- kpc |= KPC_MKRN(pdata->matrix_key_rows) |
- KPC_MKCN(pdata->matrix_key_cols);
+ kpc |= KPC_MKRN(keypad->matrix_key_rows) |
+ KPC_MKCN(keypad->matrix_key_cols);
}
/* enable rotary key, debounce interval same as direct keys */
- if (pdata->enable_rotary0) {
+ if (keypad->rotary[0].enabled) {
mask |= 0x03;
direct_key_num = 2;
kpc |= KPC_REE0;
}
- if (pdata->enable_rotary1) {
+ if (keypad->rotary[1].enabled) {
mask |= 0x0c;
direct_key_num = 4;
kpc |= KPC_REE1;
}
- if (pdata->direct_key_num > direct_key_num)
- direct_key_num = pdata->direct_key_num;
+ if (keypad->direct_key_num > direct_key_num)
+ direct_key_num = keypad->direct_key_num;
/*
* Direct keys usage may not start from KP_DKIN0, check the platfrom
* mask data to config the specific.
*/
- if (pdata->direct_key_mask)
- keypad->direct_key_mask = pdata->direct_key_mask;
- else
- keypad->direct_key_mask = ((1 << direct_key_num) - 1) & ~mask;
+ if (!keypad->direct_key_mask)
+ keypad->direct_key_mask = GENMASK(direct_key_num - 1, 0) & ~mask;
/* enable direct key */
if (direct_key_num)
@@ -635,7 +534,7 @@ static void pxa27x_keypad_config(struct pxa27x_keypad *keypad)
keypad_writel(KPC, kpc | KPC_RE_ZERO_DEB);
keypad_writel(KPREC, DEFAULT_KPREC);
- keypad_writel(KPKDI, pdata->debounce_interval);
+ keypad_writel(KPKDI, keypad->debounce_interval);
}
static int pxa27x_keypad_open(struct input_dev *dev)
@@ -709,19 +608,12 @@ static int pxa27x_keypad_resume(struct device *dev)
static DEFINE_SIMPLE_DEV_PM_OPS(pxa27x_keypad_pm_ops,
pxa27x_keypad_suspend, pxa27x_keypad_resume);
-
static int pxa27x_keypad_probe(struct platform_device *pdev)
{
- const struct pxa27x_keypad_platform_data *pdata =
- dev_get_platdata(&pdev->dev);
- struct device_node *np = pdev->dev.of_node;
struct pxa27x_keypad *keypad;
struct input_dev *input_dev;
- int irq, error;
-
- /* Driver need build keycode from device tree or pdata */
- if (!np && !pdata)
- return -EINVAL;
+ int irq;
+ int error;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
@@ -736,7 +628,6 @@ static int pxa27x_keypad_probe(struct platform_device *pdev)
if (!input_dev)
return -ENOMEM;
- keypad->pdata = pdata;
keypad->input_dev = input_dev;
keypad->irq = irq;
@@ -765,29 +656,12 @@ static int pxa27x_keypad_probe(struct platform_device *pdev)
input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP);
input_set_capability(input_dev, EV_MSC, MSC_SCAN);
- if (pdata) {
- error = pxa27x_keypad_build_keycode(keypad);
- } else {
- error = pxa27x_keypad_build_keycode_from_dt(keypad);
- /*
- * Data that we get from DT resides in dynamically
- * allocated memory so we need to update our pdata
- * pointer.
- */
- pdata = keypad->pdata;
- }
+ error = pxa27x_keypad_parse_properties(keypad);
if (error) {
- dev_err(&pdev->dev, "failed to build keycode\n");
+ dev_err(&pdev->dev, "failed to parse keypad properties\n");
return error;
}
- keypad->row_shift = get_count_order(pdata->matrix_key_cols);
-
- if ((pdata->enable_rotary0 && keypad->rotary_rel_code[0] != -1) ||
- (pdata->enable_rotary1 && keypad->rotary_rel_code[1] != -1)) {
- input_dev->evbit[0] |= BIT_MASK(EV_REL);
- }
-
error = devm_request_irq(&pdev->dev, irq, pxa27x_keypad_irq_handler,
0, pdev->name, keypad);
if (error) {
diff --git a/drivers/input/keyboard/spear-keyboard.c b/drivers/input/keyboard/spear-keyboard.c
index 2fae337562a2..53f3ac64c980 100644
--- a/drivers/input/keyboard/spear-keyboard.c
+++ b/drivers/input/keyboard/spear-keyboard.c
@@ -14,6 +14,7 @@
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/input.h>
+#include <linux/input/matrix_keypad.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/kernel.h>
@@ -22,7 +23,6 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/types.h>
-#include <linux/platform_data/keyboard-spear.h>
/* Keyboard Registers */
#define MODE_CTL_REG 0x00
@@ -56,13 +56,12 @@ struct spear_kbd {
void __iomem *io_base;
struct clk *clk;
unsigned int irq;
- unsigned int mode;
- unsigned int suspended_rate;
+ u32 mode;
+ u32 suspended_rate;
+ u32 mode_ctl_reg;
unsigned short last_key;
unsigned short keycodes[NUM_ROWS * NUM_COLS];
- bool rep;
bool irq_wake_enabled;
- u32 mode_ctl_reg;
};
static irqreturn_t spear_kbd_interrupt(int irq, void *dev_id)
@@ -143,46 +142,8 @@ static void spear_kbd_close(struct input_dev *dev)
kbd->last_key = KEY_RESERVED;
}
-#ifdef CONFIG_OF
-static int spear_kbd_parse_dt(struct platform_device *pdev,
- struct spear_kbd *kbd)
-{
- struct device_node *np = pdev->dev.of_node;
- int error;
- u32 val, suspended_rate;
-
- if (!np) {
- dev_err(&pdev->dev, "Missing DT data\n");
- return -EINVAL;
- }
-
- if (of_property_read_bool(np, "autorepeat"))
- kbd->rep = true;
-
- if (of_property_read_u32(np, "suspended_rate", &suspended_rate))
- kbd->suspended_rate = suspended_rate;
-
- error = of_property_read_u32(np, "st,mode", &val);
- if (error) {
- dev_err(&pdev->dev, "DT: Invalid or missing mode\n");
- return error;
- }
-
- kbd->mode = val;
- return 0;
-}
-#else
-static inline int spear_kbd_parse_dt(struct platform_device *pdev,
- struct spear_kbd *kbd)
-{
- return -ENOSYS;
-}
-#endif
-
static int spear_kbd_probe(struct platform_device *pdev)
{
- struct kbd_platform_data *pdata = dev_get_platdata(&pdev->dev);
- const struct matrix_keymap_data *keymap = pdata ? pdata->keymap : NULL;
struct spear_kbd *kbd;
struct input_dev *input_dev;
int irq;
@@ -198,6 +159,14 @@ static int spear_kbd_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ error = device_property_read_u32(&pdev->dev, "st,mode", &kbd->mode);
+ if (error) {
+ dev_err(&pdev->dev, "Invalid or missing mode\n");
+ return error;
+ }
+
+ device_property_read_u32(&pdev->dev, "suspended_rate", &kbd->suspended_rate);
+
input_dev = devm_input_allocate_device(&pdev->dev);
if (!input_dev) {
dev_err(&pdev->dev, "unable to allocate input device\n");
@@ -207,16 +176,6 @@ static int spear_kbd_probe(struct platform_device *pdev)
kbd->input = input_dev;
kbd->irq = irq;
- if (!pdata) {
- error = spear_kbd_parse_dt(pdev, kbd);
- if (error)
- return error;
- } else {
- kbd->mode = pdata->mode;
- kbd->rep = pdata->rep;
- kbd->suspended_rate = pdata->suspended_rate;
- }
-
kbd->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(kbd->io_base))
return PTR_ERR(kbd->io_base);
@@ -234,21 +193,21 @@ static int spear_kbd_probe(struct platform_device *pdev)
input_dev->open = spear_kbd_open;
input_dev->close = spear_kbd_close;
- error = matrix_keypad_build_keymap(keymap, NULL, NUM_ROWS, NUM_COLS,
+ error = matrix_keypad_build_keymap(NULL, NULL, NUM_ROWS, NUM_COLS,
kbd->keycodes, input_dev);
if (error) {
dev_err(&pdev->dev, "Failed to build keymap\n");
return error;
}
- if (kbd->rep)
+ if (device_property_read_bool(&pdev->dev, "autorepeat"))
__set_bit(EV_REP, input_dev->evbit);
input_set_capability(input_dev, EV_MSC, MSC_SCAN);
input_set_drvdata(input_dev, kbd);
error = devm_request_irq(&pdev->dev, irq, spear_kbd_interrupt, 0,
- "keyboard", kbd);
+ "keyboard", kbd);
if (error) {
dev_err(&pdev->dev, "request_irq failed\n");
return error;
diff --git a/drivers/input/keyboard/tca6416-keypad.c b/drivers/input/keyboard/tca6416-keypad.c
deleted file mode 100644
index fbc674d7b9f0..000000000000
--- a/drivers/input/keyboard/tca6416-keypad.c
+++ /dev/null
@@ -1,305 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Driver for keys on TCA6416 I2C IO expander
- *
- * Copyright (C) 2010 Texas Instruments
- *
- * Author : Sriramakrishnan.A.G. <srk@ti.com>
- */
-
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/workqueue.h>
-#include <linux/i2c.h>
-#include <linux/input.h>
-#include <linux/tca6416_keypad.h>
-
-#define TCA6416_INPUT 0
-#define TCA6416_OUTPUT 1
-#define TCA6416_INVERT 2
-#define TCA6416_DIRECTION 3
-
-#define TCA6416_POLL_INTERVAL 100 /* msec */
-
-static const struct i2c_device_id tca6416_id[] = {
- { "tca6416-keys", 16, },
- { "tca6408-keys", 8, },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, tca6416_id);
-
-struct tca6416_keypad_chip {
- uint16_t reg_output;
- uint16_t reg_direction;
- uint16_t reg_input;
-
- struct i2c_client *client;
- struct input_dev *input;
- int io_size;
- u16 pinmask;
- bool use_polling;
- struct tca6416_button buttons[];
-};
-
-static int tca6416_write_reg(struct tca6416_keypad_chip *chip, int reg, u16 val)
-{
- int error;
-
- error = chip->io_size > 8 ?
- i2c_smbus_write_word_data(chip->client, reg << 1, val) :
- i2c_smbus_write_byte_data(chip->client, reg, val);
- if (error < 0) {
- dev_err(&chip->client->dev,
- "%s failed, reg: %d, val: %d, error: %d\n",
- __func__, reg, val, error);
- return error;
- }
-
- return 0;
-}
-
-static int tca6416_read_reg(struct tca6416_keypad_chip *chip, int reg, u16 *val)
-{
- int retval;
-
- retval = chip->io_size > 8 ?
- i2c_smbus_read_word_data(chip->client, reg << 1) :
- i2c_smbus_read_byte_data(chip->client, reg);
- if (retval < 0) {
- dev_err(&chip->client->dev, "%s failed, reg: %d, error: %d\n",
- __func__, reg, retval);
- return retval;
- }
-
- *val = (u16)retval;
- return 0;
-}
-
-static void tca6416_keys_scan(struct input_dev *input)
-{
- struct tca6416_keypad_chip *chip = input_get_drvdata(input);
- u16 reg_val, val;
- int error, i, pin_index;
-
- error = tca6416_read_reg(chip, TCA6416_INPUT, &reg_val);
- if (error)
- return;
-
- reg_val &= chip->pinmask;
-
- /* Figure out which lines have changed */
- val = reg_val ^ chip->reg_input;
- chip->reg_input = reg_val;
-
- for (i = 0, pin_index = 0; i < 16; i++) {
- if (val & (1 << i)) {
- struct tca6416_button *button = &chip->buttons[pin_index];
- unsigned int type = button->type ?: EV_KEY;
- int state = ((reg_val & (1 << i)) ? 1 : 0)
- ^ button->active_low;
-
- input_event(input, type, button->code, !!state);
- input_sync(input);
- }
-
- if (chip->pinmask & (1 << i))
- pin_index++;
- }
-}
-
-/*
- * This is threaded IRQ handler and this can (and will) sleep.
- */
-static irqreturn_t tca6416_keys_isr(int irq, void *dev_id)
-{
- tca6416_keys_scan(dev_id);
-
- return IRQ_HANDLED;
-}
-
-static int tca6416_keys_open(struct input_dev *dev)
-{
- struct tca6416_keypad_chip *chip = input_get_drvdata(dev);
-
- if (!chip->use_polling) {
- /* Get initial device state in case it has switches */
- tca6416_keys_scan(dev);
- enable_irq(chip->client->irq);
- }
-
- return 0;
-}
-
-static void tca6416_keys_close(struct input_dev *dev)
-{
- struct tca6416_keypad_chip *chip = input_get_drvdata(dev);
-
- if (!chip->use_polling)
- disable_irq(chip->client->irq);
-}
-
-static int tca6416_setup_registers(struct tca6416_keypad_chip *chip)
-{
- int error;
-
- error = tca6416_read_reg(chip, TCA6416_OUTPUT, &chip->reg_output);
- if (error)
- return error;
-
- error = tca6416_read_reg(chip, TCA6416_DIRECTION, &chip->reg_direction);
- if (error)
- return error;
-
- /* ensure that keypad pins are set to input */
- error = tca6416_write_reg(chip, TCA6416_DIRECTION,
- chip->reg_direction | chip->pinmask);
- if (error)
- return error;
-
- error = tca6416_read_reg(chip, TCA6416_DIRECTION, &chip->reg_direction);
- if (error)
- return error;
-
- error = tca6416_read_reg(chip, TCA6416_INPUT, &chip->reg_input);
- if (error)
- return error;
-
- chip->reg_input &= chip->pinmask;
-
- return 0;
-}
-
-static int tca6416_keypad_probe(struct i2c_client *client)
-{
- const struct i2c_device_id *id = i2c_client_get_device_id(client);
- struct tca6416_keys_platform_data *pdata;
- struct tca6416_keypad_chip *chip;
- struct input_dev *input;
- int error;
- int i;
-
- /* Check functionality */
- if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE)) {
- dev_err(&client->dev, "%s adapter not supported\n",
- dev_driver_string(&client->adapter->dev));
- return -ENODEV;
- }
-
- pdata = dev_get_platdata(&client->dev);
- if (!pdata) {
- dev_dbg(&client->dev, "no platform data\n");
- return -EINVAL;
- }
-
- chip = devm_kzalloc(&client->dev,
- struct_size(chip, buttons, pdata->nbuttons),
- GFP_KERNEL);
- if (!chip)
- return -ENOMEM;
-
- input = devm_input_allocate_device(&client->dev);
- if (!input)
- return -ENOMEM;
-
- chip->client = client;
- chip->input = input;
- chip->io_size = id->driver_data;
- chip->pinmask = pdata->pinmask;
- chip->use_polling = pdata->use_polling;
-
- input->phys = "tca6416-keys/input0";
- input->name = client->name;
-
- input->open = tca6416_keys_open;
- input->close = tca6416_keys_close;
-
- input->id.bustype = BUS_HOST;
- input->id.vendor = 0x0001;
- input->id.product = 0x0001;
- input->id.version = 0x0100;
-
- /* Enable auto repeat feature of Linux input subsystem */
- if (pdata->rep)
- __set_bit(EV_REP, input->evbit);
-
- for (i = 0; i < pdata->nbuttons; i++) {
- unsigned int type;
-
- chip->buttons[i] = pdata->buttons[i];
- type = (pdata->buttons[i].type) ?: EV_KEY;
- input_set_capability(input, type, pdata->buttons[i].code);
- }
-
- input_set_drvdata(input, chip);
-
- /*
- * Initialize cached registers from their original values.
- * we can't share this chip with another i2c master.
- */
- error = tca6416_setup_registers(chip);
- if (error)
- return error;
-
- if (chip->use_polling) {
- error = input_setup_polling(input, tca6416_keys_scan);
- if (error) {
- dev_err(&client->dev, "Failed to setup polling\n");
- return error;
- }
-
- input_set_poll_interval(input, TCA6416_POLL_INTERVAL);
- } else {
- error = devm_request_threaded_irq(&client->dev, client->irq,
- NULL, tca6416_keys_isr,
- IRQF_TRIGGER_FALLING |
- IRQF_ONESHOT |
- IRQF_NO_AUTOEN,
- "tca6416-keypad", input);
- if (error) {
- dev_dbg(&client->dev,
- "Unable to claim irq %d; error %d\n",
- client->irq, error);
- return error;
- }
- }
-
- error = input_register_device(input);
- if (error) {
- dev_dbg(&client->dev,
- "Unable to register input device, error: %d\n", error);
- return error;
- }
-
- i2c_set_clientdata(client, chip);
-
- return 0;
-}
-
-static struct i2c_driver tca6416_keypad_driver = {
- .driver = {
- .name = "tca6416-keypad",
- },
- .probe = tca6416_keypad_probe,
- .id_table = tca6416_id,
-};
-
-static int __init tca6416_keypad_init(void)
-{
- return i2c_add_driver(&tca6416_keypad_driver);
-}
-
-subsys_initcall(tca6416_keypad_init);
-
-static void __exit tca6416_keypad_exit(void)
-{
- i2c_del_driver(&tca6416_keypad_driver);
-}
-module_exit(tca6416_keypad_exit);
-
-MODULE_AUTHOR("Sriramakrishnan <srk@ti.com>");
-MODULE_DESCRIPTION("Keypad driver over tca6416 IO expander");
-MODULE_LICENSE("GPL");
diff --git a/drivers/input/keyboard/tca8418_keypad.c b/drivers/input/keyboard/tca8418_keypad.c
index 76fc19ffe21d..68c0afafee7b 100644
--- a/drivers/input/keyboard/tca8418_keypad.c
+++ b/drivers/input/keyboard/tca8418_keypad.c
@@ -373,18 +373,7 @@ static struct i2c_driver tca8418_keypad_driver = {
.probe = tca8418_keypad_probe,
.id_table = tca8418_id,
};
-
-static int __init tca8418_keypad_init(void)
-{
- return i2c_add_driver(&tca8418_keypad_driver);
-}
-subsys_initcall(tca8418_keypad_init);
-
-static void __exit tca8418_keypad_exit(void)
-{
- i2c_del_driver(&tca8418_keypad_driver);
-}
-module_exit(tca8418_keypad_exit);
+module_i2c_driver(tca8418_keypad_driver);
MODULE_AUTHOR("Kyle Manna <kyle.manna@fuel7.com>");
MODULE_DESCRIPTION("Keypad driver for TCA8418");
diff --git a/drivers/input/keyboard/twl4030_keypad.c b/drivers/input/keyboard/twl4030_keypad.c
index 77e0743a3cf8..5e3d17c5dc9b 100644
--- a/drivers/input/keyboard/twl4030_keypad.c
+++ b/drivers/input/keyboard/twl4030_keypad.c
@@ -28,10 +28,6 @@
* an internal state machine that decodes pressed keys, including
* multi-key combinations.
*
- * This driver lets boards define what keycodes they wish to report for
- * which scancodes, as part of the "struct twl4030_keypad_data" used in
- * the probe() routine.
- *
* See the TPS65950 documentation; that's the general availability
* version of the TWL5030 second generation part.
*/
@@ -47,7 +43,6 @@
struct twl4030_keypad {
unsigned short keymap[TWL4030_KEYMAP_SIZE];
u16 kp_state[TWL4030_MAX_ROWS];
- bool autorepeat;
unsigned int n_rows;
unsigned int n_cols;
int irq;
@@ -322,8 +317,6 @@ static int twl4030_kp_program(struct twl4030_keypad *kp)
*/
static int twl4030_kp_probe(struct platform_device *pdev)
{
- struct twl4030_keypad_data *pdata = dev_get_platdata(&pdev->dev);
- const struct matrix_keymap_data *keymap_data = NULL;
struct twl4030_keypad *kp;
struct input_dev *input;
u8 reg;
@@ -350,24 +343,10 @@ static int twl4030_kp_probe(struct platform_device *pdev)
input->id.product = 0x0001;
input->id.version = 0x0003;
- if (pdata) {
- if (!pdata->rows || !pdata->cols || !pdata->keymap_data) {
- dev_err(&pdev->dev, "Missing platform_data\n");
- return -EINVAL;
- }
-
- kp->n_rows = pdata->rows;
- kp->n_cols = pdata->cols;
- kp->autorepeat = pdata->rep;
- keymap_data = pdata->keymap_data;
- } else {
- error = matrix_keypad_parse_properties(&pdev->dev, &kp->n_rows,
- &kp->n_cols);
- if (error)
- return error;
-
- kp->autorepeat = true;
- }
+ error = matrix_keypad_parse_properties(&pdev->dev,
+ &kp->n_rows, &kp->n_cols);
+ if (error)
+ return error;
if (kp->n_rows > TWL4030_MAX_ROWS || kp->n_cols > TWL4030_MAX_COLS) {
dev_err(&pdev->dev,
@@ -379,7 +358,7 @@ static int twl4030_kp_probe(struct platform_device *pdev)
if (kp->irq < 0)
return kp->irq;
- error = matrix_keypad_build_keymap(keymap_data, NULL,
+ error = matrix_keypad_build_keymap(NULL, NULL,
TWL4030_MAX_ROWS,
1 << TWL4030_ROW_SHIFT,
kp->keymap, input);
@@ -389,9 +368,7 @@ static int twl4030_kp_probe(struct platform_device *pdev)
}
input_set_capability(input, EV_MSC, MSC_SCAN);
- /* Enable auto repeat feature of Linux input subsystem */
- if (kp->autorepeat)
- __set_bit(EV_REP, input->evbit);
+ __set_bit(EV_REP, input->evbit);
error = input_register_device(input);
if (error) {
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 0fb21c99a5e3..cc2558630797 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -126,6 +126,17 @@ config INPUT_ATMEL_CAPTOUCH
To compile this driver as a module, choose M here: the
module will be called atmel_captouch.
+config INPUT_AW86927
+ tristate "Awinic AW86927 Haptic Driver Support"
+ depends on I2C && INPUT
+ select INPUT_FF_MEMLESS
+ select REGMAP_I2C
+ help
+ Say Y here if you have an Awinic AW86927 haptic chip.
+
+ To compile this driver as a module, choose M here: the
+ module will be called aw86927.
+
config INPUT_BBNSM_PWRKEY
tristate "NXP BBNSM Power Key Driver"
depends on ARCH_MXC || COMPILE_TEST
@@ -230,6 +241,16 @@ config INPUT_M68K_BEEP
tristate "M68k Beeper support"
depends on M68K
+config INPUT_MAX7360_ROTARY
+ tristate "Maxim MAX7360 Rotary Encoder"
+ depends on MFD_MAX7360
+ help
+ If you say yes here you get support for the rotary encoder on the
+ Maxim MAX7360 I/O Expander.
+
+ To compile this driver as a module, choose M here: the module will be
+ called max7360_rotary.
+
config INPUT_MAX77650_ONKEY
tristate "Maxim MAX77650 ONKEY support"
depends on MFD_MAX77650
@@ -506,6 +527,16 @@ config INPUT_TPS65219_PWRBUTTON
To compile this driver as a module, choose M here. The module will
be called tps65219-pwrbutton.
+config INPUT_TPS6594_PWRBUTTON
+ tristate "TPS6594 Power button driver"
+ depends on MFD_TPS6594
+ help
+ Say Y here if you want to enable power button reporting for
+ TPS6594 Power Management IC devices.
+
+ To compile this driver as a module, choose M here. The module will
+ be called tps6594-pwrbutton.
+
config INPUT_AXP20X_PEK
tristate "X-Powers AXP20X power button driver"
depends on MFD_AXP20X
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index d468c8140b93..f5ebfa9d9983 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_INPUT_ATC260X_ONKEY) += atc260x-onkey.o
obj-$(CONFIG_INPUT_ATI_REMOTE2) += ati_remote2.o
obj-$(CONFIG_INPUT_ATLAS_BTNS) += atlas_btns.o
obj-$(CONFIG_INPUT_ATMEL_CAPTOUCH) += atmel_captouch.o
+obj-$(CONFIG_INPUT_AW86927) += aw86927.o
obj-$(CONFIG_INPUT_BBNSM_PWRKEY) += nxp-bbnsm-pwrkey.o
obj-$(CONFIG_INPUT_BMA150) += bma150.o
obj-$(CONFIG_INPUT_CM109) += cm109.o
@@ -51,6 +52,7 @@ obj-$(CONFIG_INPUT_IQS7222) += iqs7222.o
obj-$(CONFIG_INPUT_KEYSPAN_REMOTE) += keyspan_remote.o
obj-$(CONFIG_INPUT_KXTJ9) += kxtj9.o
obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o
+obj-$(CONFIG_INPUT_MAX7360_ROTARY) += max7360-rotary.o
obj-$(CONFIG_INPUT_MAX77650_ONKEY) += max77650-onkey.o
obj-$(CONFIG_INPUT_MAX77693_HAPTIC) += max77693-haptic.o
obj-$(CONFIG_INPUT_MAX8925_ONKEY) += max8925_onkey.o
@@ -83,6 +85,7 @@ obj-$(CONFIG_INPUT_SPARCSPKR) += sparcspkr.o
obj-$(CONFIG_INPUT_STPMIC1_ONKEY) += stpmic1_onkey.o
obj-$(CONFIG_INPUT_TPS65218_PWRBUTTON) += tps65218-pwrbutton.o
obj-$(CONFIG_INPUT_TPS65219_PWRBUTTON) += tps65219-pwrbutton.o
+obj-$(CONFIG_INPUT_TPS6594_PWRBUTTON) += tps6594-pwrbutton.o
obj-$(CONFIG_INPUT_TWL4030_PWRBUTTON) += twl4030-pwrbutton.o
obj-$(CONFIG_INPUT_TWL4030_VIBRA) += twl4030-vibra.o
obj-$(CONFIG_INPUT_TWL6040_VIBRA) += twl6040-vibra.o
diff --git a/drivers/input/misc/ad714x.c b/drivers/input/misc/ad714x.c
index d106f37df6bc..c9fa789337ba 100644
--- a/drivers/input/misc/ad714x.c
+++ b/drivers/input/misc/ad714x.c
@@ -6,6 +6,7 @@
*/
#include <linux/device.h>
+#include <linux/export.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
diff --git a/drivers/input/misc/adxl34x.c b/drivers/input/misc/adxl34x.c
index 7cafbf8d5f1a..ac7674647c09 100644
--- a/drivers/input/misc/adxl34x.c
+++ b/drivers/input/misc/adxl34x.c
@@ -9,6 +9,7 @@
#include <linux/device.h>
#include <linux/delay.h>
+#include <linux/export.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
diff --git a/drivers/input/misc/aw86927.c b/drivers/input/misc/aw86927.c
new file mode 100644
index 000000000000..8ad361239cfe
--- /dev/null
+++ b/drivers/input/misc/aw86927.c
@@ -0,0 +1,846 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2025 Griffin Kroah-Hartman <griffin.kroah@fairphone.com>
+ *
+ * Partially based on vendor driver:
+ * Copyright (c) 2021 AWINIC Technology CO., LTD
+ *
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/types.h>
+
+#define AW86927_RSTCFG_REG 0x00
+#define AW86927_RSTCFG_SOFTRST 0xaa
+
+#define AW86927_SYSINT_REG 0x02
+#define AW86927_SYSINT_BST_SCPI BIT(7)
+#define AW86927_SYSINT_BST_OVPI BIT(6)
+#define AW86927_SYSINT_UVLI BIT(5)
+#define AW86927_SYSINT_FF_AEI BIT(4)
+#define AW86927_SYSINT_FF_AFI BIT(3)
+#define AW86927_SYSINT_OCDI BIT(2)
+#define AW86927_SYSINT_OTI BIT(1)
+#define AW86927_SYSINT_DONEI BIT(0)
+
+#define AW86927_SYSINTM_REG 0x03
+#define AW86927_SYSINTM_BST_OVPM BIT(6)
+#define AW86927_SYSINTM_FF_AEM BIT(4)
+#define AW86927_SYSINTM_FF_AFM BIT(3)
+#define AW86927_SYSINTM_DONEM BIT(0)
+
+#define AW86927_PLAYCFG1_REG 0x06
+#define AW86927_PLAYCFG1_BST_MODE_MASK GENMASK(7, 7)
+#define AW86927_PLAYCFG1_BST_MODE_BYPASS 0
+#define AW86927_PLAYCFG1_BST_VOUT_VREFSET_MASK GENMASK(6, 0)
+#define AW86927_PLAYCFG1_BST_8500MV 0x50
+
+#define AW86927_PLAYCFG2_REG 0x07
+
+#define AW86927_PLAYCFG3_REG 0x08
+#define AW86927_PLAYCFG3_AUTO_BST_MASK GENMASK(4, 4)
+#define AW86927_PLAYCFG3_AUTO_BST_ENABLE 1
+#define AW86927_PLAYCFG3_AUTO_BST_DISABLE 0
+#define AW86927_PLAYCFG3_PLAY_MODE_MASK GENMASK(1, 0)
+#define AW86927_PLAYCFG3_PLAY_MODE_RAM 0
+
+#define AW86927_PLAYCFG4_REG 0x09
+#define AW86927_PLAYCFG4_STOP BIT(1)
+#define AW86927_PLAYCFG4_GO BIT(0)
+
+#define AW86927_WAVCFG1_REG 0x0a
+#define AW86927_WAVCFG1_WAVSEQ1_MASK GENMASK(6, 0)
+
+#define AW86927_WAVCFG2_REG 0x0b
+#define AW86927_WAVCFG2_WAVSEQ2_MASK GENMASK(6, 0)
+
+#define AW86927_WAVCFG9_REG 0x12
+#define AW86927_WAVCFG9_SEQ1LOOP_MASK GENMASK(7, 4)
+#define AW86927_WAVCFG9_SEQ1LOOP_INFINITELY 0x0f
+
+#define AW86927_CONTCFG1_REG 0x18
+#define AW86927_CONTCFG1_BRK_BST_MD_MASK GENMASK(6, 6)
+
+#define AW86927_CONTCFG5_REG 0x1c
+#define AW86927_CONTCFG5_BST_BRK_GAIN_MASK GENMASK(7, 4)
+#define AW86927_CONTCFG5_BRK_GAIN_MASK GENMASK(3, 0)
+
+#define AW86927_CONTCFG10_REG 0x21
+#define AW86927_CONTCFG10_BRK_TIME_MASK GENMASK(7, 0)
+#define AW86927_CONTCFG10_BRK_TIME_DEFAULT 8
+
+#define AW86927_CONTCFG13_REG 0x24
+#define AW86927_CONTCFG13_TSET_MASK GENMASK(7, 4)
+#define AW86927_CONTCFG13_BEME_SET_MASK GENMASK(3, 0)
+
+#define AW86927_BASEADDRH_REG 0x2d
+#define AW86927_BASEADDRL_REG 0x2e
+
+#define AW86927_GLBRD5_REG 0x3f
+#define AW86927_GLBRD5_STATE_MASK GENMASK(3, 0)
+#define AW86927_GLBRD5_STATE_STANDBY 0
+
+#define AW86927_RAMADDRH_REG 0x40
+
+#define AW86927_RAMADDRL_REG 0x41
+
+#define AW86927_RAMDATA_REG 0x42
+
+#define AW86927_SYSCTRL3_REG 0x45
+#define AW86927_SYSCTRL3_STANDBY_MASK GENMASK(5, 5)
+#define AW86927_SYSCTRL3_STANDBY_ON 1
+#define AW86927_SYSCTRL3_STANDBY_OFF 0
+#define AW86927_SYSCTRL3_EN_RAMINIT_MASK GENMASK(2, 2)
+#define AW86927_SYSCTRL3_EN_RAMINIT_ON 1
+#define AW86927_SYSCTRL3_EN_RAMINIT_OFF 0
+
+#define AW86927_SYSCTRL4_REG 0x46
+#define AW86927_SYSCTRL4_WAVDAT_MODE_MASK GENMASK(6, 5)
+#define AW86927_SYSCTRL4_WAVDAT_24K 0
+#define AW86927_SYSCTRL4_INT_EDGE_MODE_MASK GENMASK(4, 4)
+#define AW86927_SYSCTRL4_INT_EDGE_MODE_POS 0
+#define AW86927_SYSCTRL4_INT_MODE_MASK GENMASK(3, 3)
+#define AW86927_SYSCTRL4_INT_MODE_EDGE 1
+#define AW86927_SYSCTRL4_GAIN_BYPASS_MASK GENMASK(0, 0)
+
+#define AW86927_PWMCFG1_REG 0x48
+#define AW86927_PWMCFG1_PRC_EN_MASK GENMASK(7, 7)
+#define AW86927_PWMCFG1_PRC_DISABLE 0
+
+#define AW86927_PWMCFG3_REG 0x4a
+#define AW86927_PWMCFG3_PR_EN_MASK GENMASK(7, 7)
+#define AW86927_PWMCFG3_PRCTIME_MASK GENMASK(6, 0)
+
+#define AW86927_PWMCFG4_REG 0x4b
+#define AW86927_PWMCFG4_PRTIME_MASK GENMASK(7, 0)
+
+#define AW86927_VBATCTRL_REG 0x4c
+#define AW86927_VBATCTRL_VBAT_MODE_MASK GENMASK(6, 6)
+#define AW86927_VBATCTRL_VBAT_MODE_SW 0
+
+#define AW86927_DETCFG1_REG 0x4d
+#define AW86927_DETCFG1_DET_GO_MASK GENMASK(1, 0)
+#define AW86927_DETCFG1_DET_GO_DET_SEQ0 1
+#define AW86927_DETCFG1_DET_GO_NA 0
+
+#define AW86927_DETCFG2_REG 0x4e
+#define AW86927_DETCFG2_DET_SEQ0_MASK GENMASK(6, 3)
+#define AW86927_DETCFG2_DET_SEQ0_VBAT 0
+#define AW86927_DETCFG2_D2S_GAIN_MASK GENMASK(2, 0)
+#define AW86927_DETCFG2_D2S_GAIN_10 4
+
+#define AW86927_CHIPIDH_REG 0x57
+#define AW86927_CHIPIDL_REG 0x58
+#define AW86927_CHIPID 0x9270
+
+#define AW86927_TMCFG_REG 0x5b
+#define AW86927_TMCFG_UNLOCK 0x7d
+#define AW86927_TMCFG_LOCK 0x00
+
+#define AW86927_ANACFG11_REG 0x70
+
+#define AW86927_ANACFG12_REG 0x71
+#define AW86927_ANACFG12_BST_SKIP_MASK GENMASK(7, 7)
+#define AW86927_ANACFG12_BST_SKIP_SHUTDOWN 1
+
+#define AW86927_ANACFG13_REG 0x72
+#define AW86927_ANACFG13_BST_PC_MASK GENMASK(7, 4)
+#define AW86927_ANACFG13_BST_PEAKCUR_3P45A 6
+
+#define AW86927_ANACFG15_REG 0x74
+#define AW86927_ANACFG15_BST_PEAK_MODE_MASK GENMASK(7, 7)
+#define AW86927_ANACFG15_BST_PEAK_BACK 1
+
+#define AW86927_ANACFG16_REG 0x75
+#define AW86927_ANACFG16_BST_SRC_MASK GENMASK(4, 4)
+#define AW86927_ANACFG16_BST_SRC_3NS 0
+
+/* default value of base addr */
+#define AW86927_RAM_BASE_ADDR 0x800
+#define AW86927_BASEADDRH_VAL 0x08
+#define AW86927_BASEADDRL_VAL 0x00
+
+enum aw86927_work_mode {
+ AW86927_STANDBY_MODE,
+ AW86927_RAM_MODE,
+};
+
+struct aw86927_data {
+ struct work_struct play_work;
+ struct device *dev;
+ struct input_dev *input_dev;
+ struct i2c_client *client;
+ struct regmap *regmap;
+ struct gpio_desc *reset_gpio;
+ bool running;
+};
+
+static const struct regmap_config aw86927_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .cache_type = REGCACHE_NONE,
+ .max_register = 0x80,
+};
+
+/*
+ * Sine wave representing the magnitude of the drive to be used.
+ * Data is encoded in two's complement.
+ * round(84 * sin(x / 16.25))
+ */
+static const u8 aw86927_waveform[] = {
+ 0x00, 0x05, 0x0a, 0x0f, 0x14, 0x1a, 0x1f, 0x23, 0x28, 0x2d, 0x31, 0x35,
+ 0x39, 0x3d, 0x41, 0x44, 0x47, 0x4a, 0x4c, 0x4f, 0x51, 0x52, 0x53, 0x54,
+ 0x55, 0x55, 0x55, 0x55, 0x55, 0x54, 0x52, 0x51, 0x4f, 0x4d, 0x4a, 0x47,
+ 0x44, 0x41, 0x3d, 0x3a, 0x36, 0x31, 0x2d, 0x28, 0x24, 0x1f, 0x1a, 0x15,
+ 0x10, 0x0a, 0x05, 0x00, 0xfc, 0xf6, 0xf1, 0xec, 0xe7, 0xe2, 0xdd, 0xd8,
+ 0xd4, 0xcf, 0xcb, 0xc7, 0xc3, 0xbf, 0xbc, 0xb9, 0xb6, 0xb4, 0xb1, 0xb0,
+ 0xae, 0xad, 0xac, 0xab, 0xab, 0xab, 0xab, 0xab, 0xac, 0xae, 0xaf, 0xb1,
+ 0xb3, 0xb6, 0xb8, 0xbc, 0xbf, 0xc2, 0xc6, 0xca, 0xce, 0xd3, 0xd7, 0xdc,
+ 0xe1, 0xe6, 0xeb, 0xf0, 0xf5, 0xfb
+};
+
+struct aw86927_sram_waveform_header {
+ u8 version;
+ __be16 start_address;
+ __be16 end_address;
+} __packed;
+
+static const struct aw86927_sram_waveform_header sram_waveform_header = {
+ .version = 0x01,
+ .start_address = cpu_to_be16(AW86927_RAM_BASE_ADDR +
+ sizeof(struct aw86927_sram_waveform_header)),
+ .end_address = cpu_to_be16(AW86927_RAM_BASE_ADDR +
+ sizeof(struct aw86927_sram_waveform_header) +
+ ARRAY_SIZE(aw86927_waveform) - 1),
+};
+
+static int aw86927_wait_enter_standby(struct aw86927_data *haptics)
+{
+ unsigned int reg_val;
+ int err;
+
+ err = regmap_read_poll_timeout(haptics->regmap, AW86927_GLBRD5_REG, reg_val,
+ (FIELD_GET(AW86927_GLBRD5_STATE_MASK, reg_val) ==
+ AW86927_GLBRD5_STATE_STANDBY),
+ 2500, 2500 * 100);
+
+ if (err) {
+ dev_err(haptics->dev, "did not enter standby: %d\n", err);
+ return err;
+ }
+ return 0;
+}
+
+static int aw86927_play_mode(struct aw86927_data *haptics, u8 play_mode)
+{
+ int err;
+
+ switch (play_mode) {
+ case AW86927_STANDBY_MODE:
+ /* Briefly toggle standby, then toggle back to standby off */
+ err = regmap_update_bits(haptics->regmap,
+ AW86927_SYSCTRL3_REG,
+ AW86927_SYSCTRL3_STANDBY_MASK,
+ FIELD_PREP(AW86927_SYSCTRL3_STANDBY_MASK,
+ AW86927_SYSCTRL3_STANDBY_ON));
+ if (err)
+ return err;
+
+ err = regmap_update_bits(haptics->regmap,
+ AW86927_SYSCTRL3_REG,
+ AW86927_SYSCTRL3_STANDBY_MASK,
+ FIELD_PREP(AW86927_SYSCTRL3_STANDBY_MASK,
+ AW86927_SYSCTRL3_STANDBY_OFF));
+ if (err)
+ return err;
+
+ break;
+
+ case AW86927_RAM_MODE:
+ err = regmap_update_bits(haptics->regmap,
+ AW86927_PLAYCFG3_REG,
+ AW86927_PLAYCFG3_PLAY_MODE_MASK,
+ FIELD_PREP(AW86927_PLAYCFG3_PLAY_MODE_MASK,
+ AW86927_PLAYCFG3_PLAY_MODE_RAM));
+ if (err)
+ return err;
+
+ err = regmap_update_bits(haptics->regmap,
+ AW86927_PLAYCFG1_REG,
+ AW86927_PLAYCFG1_BST_MODE_MASK,
+ FIELD_PREP(AW86927_PLAYCFG1_BST_MODE_MASK,
+ AW86927_PLAYCFG1_BST_MODE_BYPASS));
+ if (err)
+ return err;
+
+ err = regmap_update_bits(haptics->regmap,
+ AW86927_VBATCTRL_REG,
+ AW86927_VBATCTRL_VBAT_MODE_MASK,
+ FIELD_PREP(AW86927_VBATCTRL_VBAT_MODE_MASK,
+ AW86927_VBATCTRL_VBAT_MODE_SW));
+ if (err)
+ return err;
+
+ break;
+ }
+
+ return 0;
+}
+
+static int aw86927_stop(struct aw86927_data *haptics)
+{
+ int err;
+
+ err = regmap_write(haptics->regmap, AW86927_PLAYCFG4_REG, AW86927_PLAYCFG4_STOP);
+ if (err) {
+ dev_err(haptics->dev, "Failed to stop playback: %d\n", err);
+ return err;
+ }
+
+ err = aw86927_wait_enter_standby(haptics);
+ if (err) {
+ dev_err(haptics->dev, "Failed to enter standby, trying to force it\n");
+ err = aw86927_play_mode(haptics, AW86927_STANDBY_MODE);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int aw86927_haptics_play(struct input_dev *dev, void *data, struct ff_effect *effect)
+{
+ struct aw86927_data *haptics = input_get_drvdata(dev);
+ int level;
+
+ level = effect->u.rumble.strong_magnitude;
+ if (!level)
+ level = effect->u.rumble.weak_magnitude;
+
+ /* If already running, don't restart playback */
+ if (haptics->running && level)
+ return 0;
+
+ haptics->running = level;
+ schedule_work(&haptics->play_work);
+
+ return 0;
+}
+
+static int aw86927_play_sine(struct aw86927_data *haptics)
+{
+ int err;
+
+ err = aw86927_stop(haptics);
+ if (err)
+ return err;
+
+ err = aw86927_play_mode(haptics, AW86927_RAM_MODE);
+ if (err)
+ return err;
+
+ err = regmap_update_bits(haptics->regmap, AW86927_PLAYCFG3_REG,
+ AW86927_PLAYCFG3_AUTO_BST_MASK,
+ FIELD_PREP(AW86927_PLAYCFG3_AUTO_BST_MASK,
+ AW86927_PLAYCFG3_AUTO_BST_ENABLE));
+ if (err)
+ return err;
+
+ /* Set waveseq 1 to the first wave */
+ err = regmap_update_bits(haptics->regmap, AW86927_WAVCFG1_REG,
+ AW86927_WAVCFG1_WAVSEQ1_MASK,
+ FIELD_PREP(AW86927_WAVCFG1_WAVSEQ1_MASK, 1));
+ if (err)
+ return err;
+
+ /* set wavseq 2 to zero */
+ err = regmap_update_bits(haptics->regmap, AW86927_WAVCFG2_REG,
+ AW86927_WAVCFG2_WAVSEQ2_MASK,
+ FIELD_PREP(AW86927_WAVCFG2_WAVSEQ2_MASK, 0));
+ if (err)
+ return err;
+
+ err = regmap_update_bits(haptics->regmap,
+ AW86927_WAVCFG9_REG,
+ AW86927_WAVCFG9_SEQ1LOOP_MASK,
+ FIELD_PREP(AW86927_WAVCFG9_SEQ1LOOP_MASK,
+ AW86927_WAVCFG9_SEQ1LOOP_INFINITELY));
+ if (err)
+ return err;
+
+ /* set gain to value lower than 0x80 to avoid distorted playback */
+ err = regmap_write(haptics->regmap, AW86927_PLAYCFG2_REG, 0x7c);
+ if (err)
+ return err;
+
+ /* Start playback */
+ err = regmap_write(haptics->regmap, AW86927_PLAYCFG4_REG, AW86927_PLAYCFG4_GO);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static void aw86927_close(struct input_dev *input)
+{
+ struct aw86927_data *haptics = input_get_drvdata(input);
+ struct device *dev = &haptics->client->dev;
+ int err;
+
+ cancel_work_sync(&haptics->play_work);
+
+ err = aw86927_stop(haptics);
+ if (err)
+ dev_err(dev, "Failed to close the Driver: %d\n", err);
+}
+
+static void aw86927_haptics_play_work(struct work_struct *work)
+{
+ struct aw86927_data *haptics =
+ container_of(work, struct aw86927_data, play_work);
+ struct device *dev = &haptics->client->dev;
+ int err;
+
+ if (haptics->running)
+ err = aw86927_play_sine(haptics);
+ else
+ err = aw86927_stop(haptics);
+
+ if (err)
+ dev_err(dev, "Failed to execute work command: %d\n", err);
+}
+
+static void aw86927_hw_reset(struct aw86927_data *haptics)
+{
+ /* Assert reset */
+ gpiod_set_value_cansleep(haptics->reset_gpio, 1);
+ /* Wait ~1ms */
+ usleep_range(1000, 2000);
+ /* Deassert reset */
+ gpiod_set_value_cansleep(haptics->reset_gpio, 0);
+ /* Wait ~8ms until I2C is accessible */
+ usleep_range(8000, 8500);
+}
+
+static int aw86927_haptic_init(struct aw86927_data *haptics)
+{
+ int err;
+
+ err = regmap_update_bits(haptics->regmap,
+ AW86927_SYSCTRL4_REG,
+ AW86927_SYSCTRL4_WAVDAT_MODE_MASK,
+ FIELD_PREP(AW86927_SYSCTRL4_WAVDAT_MODE_MASK,
+ AW86927_SYSCTRL4_WAVDAT_24K));
+ if (err)
+ return err;
+
+ /* enable gain bypass */
+ err = regmap_update_bits(haptics->regmap,
+ AW86927_SYSCTRL4_REG,
+ AW86927_SYSCTRL4_GAIN_BYPASS_MASK,
+ FIELD_PREP(AW86927_SYSCTRL4_GAIN_BYPASS_MASK,
+ 0x01));
+ if (err)
+ return err;
+
+ err = regmap_write(haptics->regmap,
+ AW86927_TMCFG_REG, AW86927_TMCFG_UNLOCK);
+ if (err)
+ return err;
+
+ err = regmap_write(haptics->regmap, AW86927_ANACFG11_REG, 0x0f);
+ if (err)
+ return err;
+
+ err = regmap_update_bits(haptics->regmap,
+ AW86927_ANACFG12_REG,
+ AW86927_ANACFG12_BST_SKIP_MASK,
+ FIELD_PREP(AW86927_ANACFG12_BST_SKIP_MASK,
+ AW86927_ANACFG12_BST_SKIP_SHUTDOWN));
+ if (err)
+ return err;
+
+ err = regmap_update_bits(haptics->regmap,
+ AW86927_ANACFG15_REG,
+ AW86927_ANACFG15_BST_PEAK_MODE_MASK,
+ FIELD_PREP(AW86927_ANACFG15_BST_PEAK_MODE_MASK,
+ AW86927_ANACFG15_BST_PEAK_BACK));
+ if (err)
+ return err;
+
+ err = regmap_update_bits(haptics->regmap,
+ AW86927_ANACFG16_REG,
+ AW86927_ANACFG16_BST_SRC_MASK,
+ FIELD_PREP(AW86927_ANACFG16_BST_SRC_MASK,
+ AW86927_ANACFG16_BST_SRC_3NS));
+ if (err)
+ return err;
+
+ err = regmap_write(haptics->regmap,
+ AW86927_TMCFG_REG, AW86927_TMCFG_LOCK);
+ if (err)
+ return err;
+
+ err = regmap_update_bits(haptics->regmap,
+ AW86927_CONTCFG1_REG,
+ AW86927_CONTCFG1_BRK_BST_MD_MASK,
+ FIELD_PREP(AW86927_CONTCFG1_BRK_BST_MD_MASK, 0x00));
+ if (err)
+ return err;
+
+ err = regmap_write(haptics->regmap,
+ AW86927_CONTCFG5_REG,
+ FIELD_PREP(AW86927_CONTCFG5_BST_BRK_GAIN_MASK, 0x05) |
+ FIELD_PREP(AW86927_CONTCFG5_BRK_GAIN_MASK, 0x08));
+ if (err)
+ return err;
+
+ err = regmap_update_bits(haptics->regmap, AW86927_CONTCFG10_REG,
+ AW86927_CONTCFG10_BRK_TIME_MASK,
+ FIELD_PREP(AW86927_CONTCFG10_BRK_TIME_MASK,
+ AW86927_CONTCFG10_BRK_TIME_DEFAULT));
+ if (err)
+ return err;
+
+ err = regmap_write(haptics->regmap,
+ AW86927_CONTCFG13_REG,
+ FIELD_PREP(AW86927_CONTCFG13_TSET_MASK, 0x06) |
+ FIELD_PREP(AW86927_CONTCFG13_BEME_SET_MASK, 0x02));
+ if (err)
+ return err;
+
+ err = regmap_update_bits(haptics->regmap,
+ AW86927_DETCFG2_REG,
+ AW86927_DETCFG2_D2S_GAIN_MASK,
+ FIELD_PREP(AW86927_DETCFG2_D2S_GAIN_MASK,
+ AW86927_DETCFG2_D2S_GAIN_10));
+ if (err)
+ return err;
+
+ err = regmap_update_bits(haptics->regmap,
+ AW86927_PWMCFG1_REG,
+ AW86927_PWMCFG1_PRC_EN_MASK,
+ FIELD_PREP(AW86927_PWMCFG1_PRC_EN_MASK,
+ AW86927_PWMCFG1_PRC_DISABLE));
+ if (err)
+ return err;
+
+ err = regmap_write(haptics->regmap,
+ AW86927_PWMCFG3_REG,
+ FIELD_PREP(AW86927_PWMCFG3_PR_EN_MASK, 0x01) |
+ FIELD_PREP(AW86927_PWMCFG3_PRCTIME_MASK, 0x3f));
+ if (err)
+ return err;
+
+ err = regmap_update_bits(haptics->regmap,
+ AW86927_PWMCFG4_REG,
+ AW86927_PWMCFG4_PRTIME_MASK,
+ FIELD_PREP(AW86927_PWMCFG4_PRTIME_MASK, 0x32));
+ if (err)
+ return err;
+
+ err = regmap_write(haptics->regmap,
+ AW86927_TMCFG_REG, AW86927_TMCFG_UNLOCK);
+ if (err)
+ return err;
+
+ err = regmap_update_bits(haptics->regmap,
+ AW86927_ANACFG13_REG,
+ AW86927_ANACFG13_BST_PC_MASK,
+ FIELD_PREP(AW86927_ANACFG13_BST_PC_MASK,
+ AW86927_ANACFG13_BST_PEAKCUR_3P45A));
+ if (err)
+ return err;
+
+ err = regmap_write(haptics->regmap,
+ AW86927_TMCFG_REG, AW86927_TMCFG_LOCK);
+ if (err)
+ return err;
+
+ err = regmap_update_bits(haptics->regmap,
+ AW86927_PLAYCFG1_REG,
+ AW86927_PLAYCFG1_BST_VOUT_VREFSET_MASK,
+ FIELD_PREP(AW86927_PLAYCFG1_BST_VOUT_VREFSET_MASK,
+ AW86927_PLAYCFG1_BST_8500MV));
+ if (err)
+ return err;
+
+ err = regmap_update_bits(haptics->regmap,
+ AW86927_PLAYCFG3_REG,
+ AW86927_PLAYCFG3_AUTO_BST_MASK,
+ FIELD_PREP(AW86927_PLAYCFG3_AUTO_BST_MASK,
+ AW86927_PLAYCFG3_AUTO_BST_DISABLE));
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int aw86927_ram_init(struct aw86927_data *haptics)
+{
+ int err;
+
+ err = aw86927_wait_enter_standby(haptics);
+ if (err)
+ return err;
+
+ /* Enable SRAM init */
+ err = regmap_update_bits(haptics->regmap,
+ AW86927_SYSCTRL3_REG,
+ AW86927_SYSCTRL3_EN_RAMINIT_MASK,
+ FIELD_PREP(AW86927_SYSCTRL3_EN_RAMINIT_MASK,
+ AW86927_SYSCTRL3_EN_RAMINIT_ON));
+
+ /* Set base address for the start of the SRAM waveforms */
+ err = regmap_write(haptics->regmap,
+ AW86927_BASEADDRH_REG, AW86927_BASEADDRH_VAL);
+ if (err)
+ return err;
+
+ err = regmap_write(haptics->regmap,
+ AW86927_BASEADDRL_REG, AW86927_BASEADDRL_VAL);
+ if (err)
+ return err;
+
+ /* Set start of SRAM, before the data is written it will be the same as the base */
+ err = regmap_write(haptics->regmap,
+ AW86927_RAMADDRH_REG, AW86927_BASEADDRH_VAL);
+ if (err)
+ return err;
+
+ err = regmap_write(haptics->regmap,
+ AW86927_RAMADDRL_REG, AW86927_BASEADDRL_VAL);
+ if (err)
+ return err;
+
+ /* Write waveform header to SRAM */
+ err = regmap_noinc_write(haptics->regmap, AW86927_RAMDATA_REG,
+ &sram_waveform_header, sizeof(sram_waveform_header));
+ if (err)
+ return err;
+
+ /* Write waveform to SRAM */
+ err = regmap_noinc_write(haptics->regmap, AW86927_RAMDATA_REG,
+ aw86927_waveform, ARRAY_SIZE(aw86927_waveform));
+ if (err)
+ return err;
+
+ err = regmap_update_bits(haptics->regmap,
+ AW86927_DETCFG2_REG,
+ AW86927_DETCFG2_DET_SEQ0_MASK,
+ FIELD_PREP(AW86927_DETCFG2_DET_SEQ0_MASK,
+ AW86927_DETCFG2_DET_SEQ0_VBAT));
+ if (err)
+ return err;
+
+ err = regmap_update_bits(haptics->regmap,
+ AW86927_DETCFG1_REG,
+ AW86927_DETCFG1_DET_GO_MASK,
+ FIELD_PREP(AW86927_DETCFG1_DET_GO_MASK,
+ AW86927_DETCFG1_DET_GO_DET_SEQ0));
+ if (err)
+ return err;
+
+ usleep_range(3000, 3500);
+
+ err = regmap_update_bits(haptics->regmap,
+ AW86927_DETCFG1_REG,
+ AW86927_DETCFG1_DET_GO_MASK,
+ FIELD_PREP(AW86927_DETCFG1_DET_GO_MASK,
+ AW86927_DETCFG1_DET_GO_NA));
+ if (err)
+ return err;
+
+ /* Disable SRAM init */
+ err = regmap_update_bits(haptics->regmap,
+ AW86927_SYSCTRL3_REG,
+ AW86927_SYSCTRL3_EN_RAMINIT_MASK,
+ FIELD_PREP(AW86927_SYSCTRL3_EN_RAMINIT_MASK,
+ AW86927_SYSCTRL3_EN_RAMINIT_OFF));
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static irqreturn_t aw86927_irq(int irq, void *data)
+{
+ struct aw86927_data *haptics = data;
+ struct device *dev = &haptics->client->dev;
+ unsigned int reg_val;
+ int err;
+
+ err = regmap_read(haptics->regmap, AW86927_SYSINT_REG, &reg_val);
+ if (err) {
+ dev_err(dev, "Failed to read SYSINT register: %d\n", err);
+ return IRQ_NONE;
+ }
+
+ if (reg_val & AW86927_SYSINT_BST_SCPI)
+ dev_err(dev, "Received a Short Circuit Protection interrupt\n");
+ if (reg_val & AW86927_SYSINT_BST_OVPI)
+ dev_err(dev, "Received an Over Voltage Protection interrupt\n");
+ if (reg_val & AW86927_SYSINT_UVLI)
+ dev_err(dev, "Received an Under Voltage Lock Out interrupt\n");
+ if (reg_val & AW86927_SYSINT_OCDI)
+ dev_err(dev, "Received an Over Current interrupt\n");
+ if (reg_val & AW86927_SYSINT_OTI)
+ dev_err(dev, "Received an Over Temperature interrupt\n");
+
+ if (reg_val & AW86927_SYSINT_DONEI)
+ dev_dbg(dev, "Chip playback done!\n");
+ if (reg_val & AW86927_SYSINT_FF_AFI)
+ dev_dbg(dev, "The RTP mode FIFO is almost full!\n");
+ if (reg_val & AW86927_SYSINT_FF_AEI)
+ dev_dbg(dev, "The RTP mode FIFO is almost empty!\n");
+
+ return IRQ_HANDLED;
+}
+
+static int aw86927_detect(struct aw86927_data *haptics)
+{
+ __be16 read_buf;
+ u16 chip_id;
+ int err;
+
+ err = regmap_bulk_read(haptics->regmap, AW86927_CHIPIDH_REG, &read_buf, 2);
+ if (err)
+ return dev_err_probe(haptics->dev, err, "Failed to read CHIPID registers\n");
+
+ chip_id = be16_to_cpu(read_buf);
+
+ if (chip_id != AW86927_CHIPID) {
+ dev_err(haptics->dev, "Unexpected CHIPID value 0x%x\n", chip_id);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int aw86927_probe(struct i2c_client *client)
+{
+ struct aw86927_data *haptics;
+ int err;
+
+ haptics = devm_kzalloc(&client->dev, sizeof(struct aw86927_data), GFP_KERNEL);
+ if (!haptics)
+ return -ENOMEM;
+
+ haptics->dev = &client->dev;
+ haptics->client = client;
+
+ i2c_set_clientdata(client, haptics);
+
+ haptics->regmap = devm_regmap_init_i2c(client, &aw86927_regmap_config);
+ if (IS_ERR(haptics->regmap))
+ return dev_err_probe(haptics->dev, PTR_ERR(haptics->regmap),
+ "Failed to allocate register map\n");
+
+ haptics->input_dev = devm_input_allocate_device(haptics->dev);
+ if (!haptics->input_dev)
+ return -ENOMEM;
+
+ haptics->reset_gpio = devm_gpiod_get(haptics->dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(haptics->reset_gpio))
+ return dev_err_probe(haptics->dev, PTR_ERR(haptics->reset_gpio),
+ "Failed to get reset gpio\n");
+
+ /* Hardware reset */
+ aw86927_hw_reset(haptics);
+
+ /* Software reset */
+ err = regmap_write(haptics->regmap, AW86927_RSTCFG_REG, AW86927_RSTCFG_SOFTRST);
+ if (err)
+ return dev_err_probe(haptics->dev, err, "Failed Software reset\n");
+
+ /* Wait ~3ms until I2C is accessible */
+ usleep_range(3000, 3500);
+
+ err = aw86927_detect(haptics);
+ if (err)
+ return dev_err_probe(haptics->dev, err, "Failed to find chip\n");
+
+ /* IRQ config */
+ err = regmap_write(haptics->regmap, AW86927_SYSCTRL4_REG,
+ FIELD_PREP(AW86927_SYSCTRL4_INT_MODE_MASK,
+ AW86927_SYSCTRL4_INT_MODE_EDGE) |
+ FIELD_PREP(AW86927_SYSCTRL4_INT_EDGE_MODE_MASK,
+ AW86927_SYSCTRL4_INT_EDGE_MODE_POS));
+ if (err)
+ return dev_err_probe(haptics->dev, err, "Failed to configure interrupt modes\n");
+
+ err = regmap_write(haptics->regmap, AW86927_SYSINTM_REG,
+ AW86927_SYSINTM_BST_OVPM |
+ AW86927_SYSINTM_FF_AEM |
+ AW86927_SYSINTM_FF_AFM |
+ AW86927_SYSINTM_DONEM);
+ if (err)
+ return dev_err_probe(haptics->dev, err, "Failed to configure interrupt masks\n");
+
+ err = devm_request_threaded_irq(haptics->dev, client->irq, NULL,
+ aw86927_irq, IRQF_ONESHOT, NULL, haptics);
+ if (err)
+ return dev_err_probe(haptics->dev, err, "Failed to request threaded irq\n");
+
+ INIT_WORK(&haptics->play_work, aw86927_haptics_play_work);
+
+ haptics->input_dev->name = "aw86927-haptics";
+ haptics->input_dev->close = aw86927_close;
+
+ input_set_drvdata(haptics->input_dev, haptics);
+ input_set_capability(haptics->input_dev, EV_FF, FF_RUMBLE);
+
+ err = input_ff_create_memless(haptics->input_dev, NULL, aw86927_haptics_play);
+ if (err)
+ return dev_err_probe(haptics->dev, err, "Failed to create FF dev\n");
+
+ /* Set up registers */
+ err = aw86927_play_mode(haptics, AW86927_STANDBY_MODE);
+ if (err)
+ return dev_err_probe(haptics->dev, err,
+ "Failed to enter standby for Haptic init\n");
+
+ err = aw86927_haptic_init(haptics);
+ if (err)
+ return dev_err_probe(haptics->dev, err, "Haptic init failed\n");
+
+ /* RAM init, upload the waveform for playback */
+ err = aw86927_ram_init(haptics);
+ if (err)
+ return dev_err_probe(haptics->dev, err, "Failed to init aw86927 sram\n");
+
+ err = input_register_device(haptics->input_dev);
+ if (err)
+ return dev_err_probe(haptics->dev, err, "Failed to register input device\n");
+
+ return 0;
+}
+
+static const struct of_device_id aw86927_of_id[] = {
+ { .compatible = "awinic,aw86927" },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, aw86927_of_id);
+
+static struct i2c_driver aw86927_driver = {
+ .driver = {
+ .name = "aw86927-haptics",
+ .of_match_table = aw86927_of_id,
+ },
+ .probe = aw86927_probe,
+};
+
+module_i2c_driver(aw86927_driver);
+
+MODULE_AUTHOR("Griffin Kroah-Hartman <griffin.kroah@fairphone.com>");
+MODULE_DESCRIPTION("AWINIC AW86927 LRA Haptic Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/cma3000_d0x.c b/drivers/input/misc/cma3000_d0x.c
index cfc12332bee1..b4232b0a3957 100644
--- a/drivers/input/misc/cma3000_d0x.c
+++ b/drivers/input/misc/cma3000_d0x.c
@@ -6,6 +6,7 @@
* Author: Hemanth V <hemanthv@ti.com>
*/
+#include <linux/export.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
diff --git a/drivers/input/misc/iqs7222.c b/drivers/input/misc/iqs7222.c
index 6fac31c0d99f..ff23219a582a 100644
--- a/drivers/input/misc/iqs7222.c
+++ b/drivers/input/misc/iqs7222.c
@@ -2427,6 +2427,9 @@ static int iqs7222_parse_chan(struct iqs7222_private *iqs7222,
if (error)
return error;
+ if (!iqs7222->kp_type[chan_index][i])
+ continue;
+
if (!dev_desc->event_offset)
continue;
diff --git a/drivers/input/misc/max7360-rotary.c b/drivers/input/misc/max7360-rotary.c
new file mode 100644
index 000000000000..385831ef34b6
--- /dev/null
+++ b/drivers/input/misc/max7360-rotary.c
@@ -0,0 +1,192 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2025 Bootlin
+ *
+ * Author: Mathieu Dubois-Briand <mathieu.dubois-briand@bootlin.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/device/devres.h>
+#include <linux/dev_printk.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/max7360.h>
+#include <linux/property.h>
+#include <linux/platform_device.h>
+#include <linux/pm_wakeirq.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+
+#define MAX7360_ROTARY_DEFAULT_STEPS 24
+
+struct max7360_rotary {
+ struct input_dev *input;
+ struct regmap *regmap;
+ unsigned int debounce_ms;
+
+ unsigned int pos;
+
+ u32 steps;
+ u32 axis;
+ bool relative_axis;
+ bool rollover;
+};
+
+static void max7360_rotary_report_event(struct max7360_rotary *max7360_rotary, int steps)
+{
+ if (max7360_rotary->relative_axis) {
+ input_report_rel(max7360_rotary->input, max7360_rotary->axis, steps);
+ } else {
+ int pos = max7360_rotary->pos;
+ int maxval = max7360_rotary->steps;
+
+ /*
+ * Add steps to the position.
+ * Make sure added steps are always in ]-maxval; maxval[
+ * interval, so (pos + maxval) is always >= 0.
+ * Then set back pos to the [0; maxval[ interval.
+ */
+ pos += steps % maxval;
+ if (max7360_rotary->rollover)
+ pos = (pos + maxval) % maxval;
+ else
+ pos = clamp(pos, 0, maxval - 1);
+
+ max7360_rotary->pos = pos;
+ input_report_abs(max7360_rotary->input, max7360_rotary->axis, max7360_rotary->pos);
+ }
+
+ input_sync(max7360_rotary->input);
+}
+
+static irqreturn_t max7360_rotary_irq(int irq, void *data)
+{
+ struct max7360_rotary *max7360_rotary = data;
+ struct device *dev = max7360_rotary->input->dev.parent;
+ unsigned int val;
+ int error;
+
+ error = regmap_read(max7360_rotary->regmap, MAX7360_REG_RTR_CNT, &val);
+ if (error < 0) {
+ dev_err(dev, "Failed to read rotary counter\n");
+ return IRQ_NONE;
+ }
+
+ if (val == 0)
+ return IRQ_NONE;
+
+ max7360_rotary_report_event(max7360_rotary, sign_extend32(val, 7));
+
+ return IRQ_HANDLED;
+}
+
+static int max7360_rotary_hw_init(struct max7360_rotary *max7360_rotary)
+{
+ struct device *dev = max7360_rotary->input->dev.parent;
+ int val;
+ int error;
+
+ val = FIELD_PREP(MAX7360_ROT_DEBOUNCE, max7360_rotary->debounce_ms) |
+ FIELD_PREP(MAX7360_ROT_INTCNT, 1) | MAX7360_ROT_INTCNT_DLY;
+ error = regmap_write(max7360_rotary->regmap, MAX7360_REG_RTRCFG, val);
+ if (error)
+ dev_err(dev, "Failed to set max7360 rotary encoder configuration\n");
+
+ return error;
+}
+
+static int max7360_rotary_probe(struct platform_device *pdev)
+{
+ struct max7360_rotary *max7360_rotary;
+ struct device *dev = &pdev->dev;
+ struct input_dev *input;
+ struct regmap *regmap;
+ int irq;
+ int error;
+
+ regmap = dev_get_regmap(dev->parent, NULL);
+ if (!regmap)
+ return dev_err_probe(dev, -ENODEV, "Could not get parent regmap\n");
+
+ irq = fwnode_irq_get_byname(dev_fwnode(dev->parent), "inti");
+ if (irq < 0)
+ return dev_err_probe(dev, irq, "Failed to get IRQ\n");
+
+ max7360_rotary = devm_kzalloc(dev, sizeof(*max7360_rotary), GFP_KERNEL);
+ if (!max7360_rotary)
+ return -ENOMEM;
+
+ max7360_rotary->regmap = regmap;
+
+ device_property_read_u32(dev->parent, "linux,axis", &max7360_rotary->axis);
+ max7360_rotary->rollover = device_property_read_bool(dev->parent,
+ "rotary-encoder,rollover");
+ max7360_rotary->relative_axis =
+ device_property_read_bool(dev->parent, "rotary-encoder,relative-axis");
+
+ error = device_property_read_u32(dev->parent, "rotary-encoder,steps",
+ &max7360_rotary->steps);
+ if (error)
+ max7360_rotary->steps = MAX7360_ROTARY_DEFAULT_STEPS;
+
+ device_property_read_u32(dev->parent, "rotary-debounce-delay-ms",
+ &max7360_rotary->debounce_ms);
+ if (max7360_rotary->debounce_ms > MAX7360_ROT_DEBOUNCE_MAX)
+ return dev_err_probe(dev, -EINVAL, "Invalid debounce timing: %u\n",
+ max7360_rotary->debounce_ms);
+
+ input = devm_input_allocate_device(dev);
+ if (!input)
+ return -ENOMEM;
+
+ max7360_rotary->input = input;
+
+ input->id.bustype = BUS_I2C;
+ input->name = pdev->name;
+
+ if (max7360_rotary->relative_axis)
+ input_set_capability(input, EV_REL, max7360_rotary->axis);
+ else
+ input_set_abs_params(input, max7360_rotary->axis, 0, max7360_rotary->steps, 0, 1);
+
+ error = devm_request_threaded_irq(dev, irq, NULL, max7360_rotary_irq,
+ IRQF_ONESHOT | IRQF_SHARED,
+ "max7360-rotary", max7360_rotary);
+ if (error)
+ return dev_err_probe(dev, error, "Failed to register interrupt\n");
+
+ error = input_register_device(input);
+ if (error)
+ return dev_err_probe(dev, error, "Could not register input device\n");
+
+ error = max7360_rotary_hw_init(max7360_rotary);
+ if (error)
+ return dev_err_probe(dev, error, "Failed to initialize max7360 rotary\n");
+
+ device_init_wakeup(dev, true);
+ error = dev_pm_set_wake_irq(dev, irq);
+ if (error)
+ dev_warn(dev, "Failed to set up wakeup irq: %d\n", error);
+
+ return 0;
+}
+
+static void max7360_rotary_remove(struct platform_device *pdev)
+{
+ dev_pm_clear_wake_irq(&pdev->dev);
+ device_init_wakeup(&pdev->dev, false);
+}
+
+static struct platform_driver max7360_rotary_driver = {
+ .driver = {
+ .name = "max7360-rotary",
+ },
+ .probe = max7360_rotary_probe,
+ .remove = max7360_rotary_remove,
+};
+module_platform_driver(max7360_rotary_driver);
+
+MODULE_DESCRIPTION("MAX7360 Rotary driver");
+MODULE_AUTHOR("Mathieu Dubois-Briand <mathieu.dubois-briand@bootlin.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/mc13783-pwrbutton.c b/drivers/input/misc/mc13783-pwrbutton.c
index 1c7faa9b7afe..b83d762ae2e9 100644
--- a/drivers/input/misc/mc13783-pwrbutton.c
+++ b/drivers/input/misc/mc13783-pwrbutton.c
@@ -57,7 +57,6 @@ static irqreturn_t button_irq(int irq, void *_priv)
struct mc13783_pwrb *priv = _priv;
int val;
- mc13xxx_irq_ack(priv->mc13783, irq);
mc13xxx_reg_read(priv->mc13783, MC13783_REG_INTERRUPT_SENSE_1, &val);
switch (irq) {
diff --git a/drivers/input/misc/pm8941-pwrkey.c b/drivers/input/misc/pm8941-pwrkey.c
index d952c16f2458..53249d2c081f 100644
--- a/drivers/input/misc/pm8941-pwrkey.c
+++ b/drivers/input/misc/pm8941-pwrkey.c
@@ -60,6 +60,7 @@ struct pm8941_data {
bool supports_ps_hold_poff_config;
bool supports_debounce_config;
bool has_pon_pbs;
+ bool wakeup_source_default;
const char *name;
const char *phys;
};
@@ -245,7 +246,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(pm8941_pwr_key_pm_ops,
static int pm8941_pwrkey_probe(struct platform_device *pdev)
{
struct pm8941_pwrkey *pwrkey;
- bool pull_up;
+ bool pull_up, wakeup;
struct device *parent;
struct device_node *regmap_node;
const __be32 *addr;
@@ -402,8 +403,11 @@ static int pm8941_pwrkey_probe(struct platform_device *pdev)
}
}
+ wakeup = pwrkey->data->wakeup_source_default ||
+ of_property_read_bool(pdev->dev.of_node, "wakeup-source");
+
platform_set_drvdata(pdev, pwrkey);
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, wakeup);
return 0;
}
@@ -424,6 +428,7 @@ static const struct pm8941_data pwrkey_data = {
.supports_ps_hold_poff_config = true,
.supports_debounce_config = true,
.has_pon_pbs = false,
+ .wakeup_source_default = true,
};
static const struct pm8941_data resin_data = {
@@ -434,6 +439,7 @@ static const struct pm8941_data resin_data = {
.supports_ps_hold_poff_config = true,
.supports_debounce_config = true,
.has_pon_pbs = false,
+ .wakeup_source_default = false,
};
static const struct pm8941_data pon_gen3_pwrkey_data = {
@@ -443,6 +449,7 @@ static const struct pm8941_data pon_gen3_pwrkey_data = {
.supports_ps_hold_poff_config = false,
.supports_debounce_config = false,
.has_pon_pbs = true,
+ .wakeup_source_default = true,
};
static const struct pm8941_data pon_gen3_resin_data = {
@@ -452,6 +459,7 @@ static const struct pm8941_data pon_gen3_resin_data = {
.supports_ps_hold_poff_config = false,
.supports_debounce_config = false,
.has_pon_pbs = true,
+ .wakeup_source_default = false,
};
static const struct of_device_id pm8941_pwr_key_id_table[] = {
diff --git a/drivers/input/misc/tps6594-pwrbutton.c b/drivers/input/misc/tps6594-pwrbutton.c
new file mode 100644
index 000000000000..cd039b3866dc
--- /dev/null
+++ b/drivers/input/misc/tps6594-pwrbutton.c
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * power button driver for TI TPS6594 PMICs
+ *
+ * Copyright (C) 2025 Critical Link LLC - https://www.criticallink.com/
+ */
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/tps6594.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+struct tps6594_pwrbutton {
+ struct device *dev;
+ struct input_dev *idev;
+ char phys[32];
+};
+
+static irqreturn_t tps6594_pb_push_irq(int irq, void *_pwr)
+{
+ struct tps6594_pwrbutton *pwr = _pwr;
+
+ input_report_key(pwr->idev, KEY_POWER, 1);
+ pm_wakeup_event(pwr->dev, 0);
+ input_sync(pwr->idev);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t tps6594_pb_release_irq(int irq, void *_pwr)
+{
+ struct tps6594_pwrbutton *pwr = _pwr;
+
+ input_report_key(pwr->idev, KEY_POWER, 0);
+ input_sync(pwr->idev);
+
+ return IRQ_HANDLED;
+}
+
+static int tps6594_pb_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct tps6594_pwrbutton *pwr;
+ struct input_dev *idev;
+ int error;
+ int push_irq;
+ int release_irq;
+
+ pwr = devm_kzalloc(dev, sizeof(*pwr), GFP_KERNEL);
+ if (!pwr)
+ return -ENOMEM;
+
+ idev = devm_input_allocate_device(dev);
+ if (!idev)
+ return -ENOMEM;
+
+ idev->name = pdev->name;
+ snprintf(pwr->phys, sizeof(pwr->phys), "%s/input0",
+ pdev->name);
+ idev->phys = pwr->phys;
+ idev->id.bustype = BUS_I2C;
+
+ input_set_capability(idev, EV_KEY, KEY_POWER);
+
+ pwr->dev = dev;
+ pwr->idev = idev;
+ device_init_wakeup(dev, true);
+
+ push_irq = platform_get_irq(pdev, 0);
+ if (push_irq < 0)
+ return -EINVAL;
+
+ release_irq = platform_get_irq(pdev, 1);
+ if (release_irq < 0)
+ return -EINVAL;
+
+ error = devm_request_threaded_irq(dev, push_irq, NULL,
+ tps6594_pb_push_irq,
+ IRQF_ONESHOT,
+ pdev->resource[0].name, pwr);
+ if (error) {
+ dev_err(dev, "failed to request push IRQ #%d: %d\n", push_irq,
+ error);
+ return error;
+ }
+
+ error = devm_request_threaded_irq(dev, release_irq, NULL,
+ tps6594_pb_release_irq,
+ IRQF_ONESHOT,
+ pdev->resource[1].name, pwr);
+ if (error) {
+ dev_err(dev, "failed to request release IRQ #%d: %d\n",
+ release_irq, error);
+ return error;
+ }
+
+ error = input_register_device(idev);
+ if (error) {
+ dev_err(dev, "Can't register power button: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static const struct platform_device_id tps6594_pwrbtn_id_table[] = {
+ { "tps6594-pwrbutton", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, tps6594_pwrbtn_id_table);
+
+static struct platform_driver tps6594_pb_driver = {
+ .probe = tps6594_pb_probe,
+ .driver = {
+ .name = "tps6594_pwrbutton",
+ },
+ .id_table = tps6594_pwrbtn_id_table,
+};
+module_platform_driver(tps6594_pb_driver);
+
+MODULE_DESCRIPTION("TPS6594 Power Button");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index 2c51ea9d01d7..13336a2fd49c 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -775,6 +775,7 @@ static int uinput_ff_upload_to_user(char __user *buffer,
if (in_compat_syscall()) {
struct uinput_ff_upload_compat ff_up_compat;
+ memset(&ff_up_compat, 0, sizeof(ff_up_compat));
ff_up_compat.request_id = ff_up->request_id;
ff_up_compat.retval = ff_up->retval;
/*
diff --git a/drivers/input/rmi4/rmi_2d_sensor.c b/drivers/input/rmi4/rmi_2d_sensor.c
index b7fe6eb35a4e..ea3eb87a89af 100644
--- a/drivers/input/rmi4/rmi_2d_sensor.c
+++ b/drivers/input/rmi4/rmi_2d_sensor.c
@@ -4,6 +4,7 @@
* Copyright (c) 2011 Unixphere
*/
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/of.h>
diff --git a/drivers/input/rmi4/rmi_2d_sensor.h b/drivers/input/rmi4/rmi_2d_sensor.h
index 7d335d809710..61a99c8a7a26 100644
--- a/drivers/input/rmi4/rmi_2d_sensor.h
+++ b/drivers/input/rmi4/rmi_2d_sensor.h
@@ -7,6 +7,9 @@
#ifndef _RMI_2D_SENSOR_H
#define _RMI_2D_SENSOR_H
+#include <linux/rmi.h>
+#include <linux/types.h>
+
enum rmi_2d_sensor_object_type {
RMI_2D_OBJECT_NONE,
RMI_2D_OBJECT_FINGER,
diff --git a/drivers/input/rmi4/rmi_bus.c b/drivers/input/rmi4/rmi_bus.c
index 5f98c3bcfd46..b85ee9db87b0 100644
--- a/drivers/input/rmi4/rmi_bus.c
+++ b/drivers/input/rmi4/rmi_bus.c
@@ -4,6 +4,7 @@
* Copyright (c) 2011 Unixphere
*/
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/irq.h>
diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c
index 2168b6cd7167..ccd9338a44db 100644
--- a/drivers/input/rmi4/rmi_driver.c
+++ b/drivers/input/rmi4/rmi_driver.c
@@ -21,6 +21,7 @@
#include <linux/irqdomain.h>
#include <uapi/linux/input.h>
#include <linux/rmi.h>
+#include <linux/export.h>
#include "rmi_bus.h"
#include "rmi_driver.h"
diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig
index 17edc1597446..c7ef347a4dff 100644
--- a/drivers/input/serio/Kconfig
+++ b/drivers/input/serio/Kconfig
@@ -276,8 +276,8 @@ config SERIO_OLPC_APSP
config HYPERV_KEYBOARD
tristate "Microsoft Synthetic Keyboard driver"
- depends on HYPERV
- default HYPERV
+ depends on HYPERV_VMBUS
+ default HYPERV_VMBUS
help
Select this option to enable the Hyper-V Keyboard driver.
diff --git a/drivers/input/serio/hil_mlc.c b/drivers/input/serio/hil_mlc.c
index 94e8bcbbf94d..3fedfc5abc73 100644
--- a/drivers/input/serio/hil_mlc.c
+++ b/drivers/input/serio/hil_mlc.c
@@ -54,6 +54,7 @@
#include <linux/hil_mlc.h>
#include <linux/errno.h>
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
diff --git a/drivers/input/serio/hp_sdc.c b/drivers/input/serio/hp_sdc.c
index 0eec4c5585cb..1461ef319f92 100644
--- a/drivers/input/serio/hp_sdc.c
+++ b/drivers/input/serio/hp_sdc.c
@@ -63,6 +63,7 @@
#include <linux/hp_sdc.h>
#include <linux/errno.h>
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/ioport.h>
diff --git a/drivers/input/serio/i8042-acpipnpio.h b/drivers/input/serio/i8042-acpipnpio.h
index 6ed9fc34948c..1caa6c4ca435 100644
--- a/drivers/input/serio/i8042-acpipnpio.h
+++ b/drivers/input/serio/i8042-acpipnpio.h
@@ -1155,6 +1155,20 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "XxHP4NAx"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "XxKK4NAx_XxSP4NAx"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ },
/*
* A lot of modern Clevo barebones have touchpad and/or keyboard issues
* after suspend fixable with the forcenorestore quirk.
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index cab5a4c5baf5..c135254665b6 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -10,6 +10,7 @@
#include <linux/types.h>
#include <linux/delay.h>
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
diff --git a/drivers/input/serio/libps2.c b/drivers/input/serio/libps2.c
index c22ea532276e..269df83a167d 100644
--- a/drivers/input/serio/libps2.c
+++ b/drivers/input/serio/libps2.c
@@ -8,6 +8,7 @@
#include <linux/delay.h>
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
diff --git a/drivers/input/serio/ps2-gpio.c b/drivers/input/serio/ps2-gpio.c
index 93769910ce24..46fb7667b244 100644
--- a/drivers/input/serio/ps2-gpio.c
+++ b/drivers/input/serio/ps2-gpio.c
@@ -50,7 +50,7 @@
* interrupt interval should be ~60us. Let's allow +/- 20us for frequency
* deviations and interrupt latency.
*
- * The data line must be samples after ~30us to 50us after the falling edge,
+ * The data line must be sampled after ~30us to 50us after the falling edge,
* since the device updates the data line at the rising edge.
*
* ___ ______ ______ ______ ___
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index 4468018cef66..2b5ddc5dac19 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -9,6 +9,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/export.h>
#include <linux/stddef.h>
#include <linux/module.h>
#include <linux/serio.h>
diff --git a/drivers/input/sparse-keymap.c b/drivers/input/sparse-keymap.c
index 96f23ae57d5a..164f8fcfd1aa 100644
--- a/drivers/input/sparse-keymap.c
+++ b/drivers/input/sparse-keymap.c
@@ -10,6 +10,7 @@
* Copyright (C) 2005 Dmitry Torokhov <dtor@mail.ru>
*/
+#include <linux/export.h>
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
#include <linux/module.h>
diff --git a/drivers/input/touch-overlay.c b/drivers/input/touch-overlay.c
index 8806373f7a4a..b9fd82c4829d 100644
--- a/drivers/input/touch-overlay.c
+++ b/drivers/input/touch-overlay.c
@@ -5,6 +5,7 @@
* Copyright (c) 2023 Javier Carrasco <javier.carrasco@wolfvision.net>
*/
+#include <linux/export.h>
#include <linux/input.h>
#include <linux/input/mt.h>
#include <linux/input/touch-overlay.h>
diff --git a/drivers/input/touchscreen.c b/drivers/input/touchscreen.c
index 4620e20d0190..d699b24bb548 100644
--- a/drivers/input/touchscreen.c
+++ b/drivers/input/touchscreen.c
@@ -6,6 +6,7 @@
* Copyright (c) 2014 Sebastian Reichel <sre@kernel.org>
*/
+#include <linux/export.h>
#include <linux/property.h>
#include <linux/input.h>
#include <linux/input/mt.h>
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 196905162945..7d5b72ee07fa 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -441,6 +441,16 @@ config TOUCHSCREEN_HIDEEP
To compile this driver as a module, choose M here : the
module will be called hideep_ts.
+config TOUCHSCREEN_HIMAX_HX852X
+ tristate "Himax HX852x(ES) touchscreen"
+ depends on I2C
+ help
+ Say Y here if you have a Himax HX852x(ES) touchscreen.
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the module
+ will be called himax_hx852x.
+
config TOUCHSCREEN_HYCON_HY46XX
tristate "Hycon hy46xx touchscreen support"
depends on I2C
@@ -465,6 +475,18 @@ config TOUCHSCREEN_HYNITRON_CSTXXX
To compile this driver as a module, choose M here: the
module will be called hynitron-cstxxx.
+config TOUCHSCREEN_HYNITRON_CST816X
+ tristate "Hynitron CST816x touchscreen"
+ depends on I2C
+ help
+ Say Y here if you have a touchscreen using a Hynitron
+ CST816x series touchscreen controller.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called hynitron-cst816x.
+
config TOUCHSCREEN_ILI210X
tristate "Ilitek ILI210X based touchscreen"
depends on I2C
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 97a025c6a377..ab9abd151078 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -49,7 +49,9 @@ obj-$(CONFIG_TOUCHSCREEN_GOODIX_BERLIN_CORE) += goodix_berlin_core.o
obj-$(CONFIG_TOUCHSCREEN_GOODIX_BERLIN_I2C) += goodix_berlin_i2c.o
obj-$(CONFIG_TOUCHSCREEN_GOODIX_BERLIN_SPI) += goodix_berlin_spi.o
obj-$(CONFIG_TOUCHSCREEN_HIDEEP) += hideep.o
+obj-$(CONFIG_TOUCHSCREEN_HIMAX_HX852X) += himax_hx852x.o
obj-$(CONFIG_TOUCHSCREEN_HYNITRON_CSTXXX) += hynitron_cstxxx.o
+obj-$(CONFIG_TOUCHSCREEN_HYNITRON_CST816X) += hynitron-cst816x.o
obj-$(CONFIG_TOUCHSCREEN_ILI210X) += ili210x.o
obj-$(CONFIG_TOUCHSCREEN_ILITEK) += ilitek_ts_i2c.o
obj-$(CONFIG_TOUCHSCREEN_IMAGIS) += imagis.o
diff --git a/drivers/input/touchscreen/ad7879.c b/drivers/input/touchscreen/ad7879.c
index d2a3a5e016b6..4c448f39bf57 100644
--- a/drivers/input/touchscreen/ad7879.c
+++ b/drivers/input/touchscreen/ad7879.c
@@ -22,6 +22,7 @@
#include <linux/device.h>
#include <linux/delay.h>
+#include <linux/export.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
@@ -475,7 +476,7 @@ static int ad7879_gpio_add(struct ad7879 *ts)
ts->gc.direction_input = ad7879_gpio_direction_input;
ts->gc.direction_output = ad7879_gpio_direction_output;
ts->gc.get = ad7879_gpio_get_value;
- ts->gc.set_rv = ad7879_gpio_set_value;
+ ts->gc.set = ad7879_gpio_set_value;
ts->gc.can_sleep = 1;
ts->gc.base = -1;
ts->gc.ngpio = 1;
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index 322d5a3d40a0..dd0544cc1bc1 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -19,6 +19,7 @@
#include <linux/firmware.h>
#include <linux/i2c.h>
#include <linux/input/mt.h>
+#include <linux/input/touchscreen.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/of.h>
@@ -355,6 +356,8 @@ struct mxt_data {
enum mxt_suspend_mode suspend_mode;
u32 wakeup_method;
+
+ struct touchscreen_properties prop;
};
struct mxt_vb2_buffer {
@@ -888,8 +891,7 @@ static void mxt_proc_t9_message(struct mxt_data *data, u8 *message)
/* Touch active */
input_mt_report_slot_state(input_dev, MT_TOOL_FINGER, 1);
- input_report_abs(input_dev, ABS_MT_POSITION_X, x);
- input_report_abs(input_dev, ABS_MT_POSITION_Y, y);
+ touchscreen_report_pos(input_dev, &data->prop, x, y, true);
input_report_abs(input_dev, ABS_MT_PRESSURE, amplitude);
input_report_abs(input_dev, ABS_MT_TOUCH_MAJOR, area);
} else {
@@ -1010,8 +1012,7 @@ static void mxt_proc_t100_message(struct mxt_data *data, u8 *message)
id, type, x, y, major, pressure, orientation);
input_mt_report_slot_state(input_dev, tool, 1);
- input_report_abs(input_dev, ABS_MT_POSITION_X, x);
- input_report_abs(input_dev, ABS_MT_POSITION_Y, y);
+ touchscreen_report_pos(input_dev, &data->prop, x, y, true);
input_report_abs(input_dev, ABS_MT_TOUCH_MAJOR, major);
input_report_abs(input_dev, ABS_MT_PRESSURE, pressure);
input_report_abs(input_dev, ABS_MT_DISTANCE, distance);
@@ -2212,6 +2213,8 @@ static int mxt_initialize_input_device(struct mxt_data *data)
0, 255, 0, 0);
}
+ touchscreen_parse_properties(input_dev, true, &data->prop);
+
/* For T15 and T97 Key Array */
if (data->T15_reportid_min || data->T97_reportid_min) {
for (i = 0; i < data->t15_num_keys; i++)
@@ -3317,7 +3320,7 @@ static int mxt_probe(struct i2c_client *client)
if (data->reset_gpio) {
/* Wait a while and then de-assert the RESET GPIO line */
msleep(MXT_RESET_GPIO_TIME);
- gpiod_set_value(data->reset_gpio, 0);
+ gpiod_set_value_cansleep(data->reset_gpio, 0);
msleep(MXT_RESET_INVALID_CHG);
}
diff --git a/drivers/input/touchscreen/cyttsp_core.c b/drivers/input/touchscreen/cyttsp_core.c
index b8ce6012364c..9e729910fbc8 100644
--- a/drivers/input/touchscreen/cyttsp_core.c
+++ b/drivers/input/touchscreen/cyttsp_core.c
@@ -14,6 +14,7 @@
*/
#include <linux/delay.h>
+#include <linux/export.h>
#include <linux/input.h>
#include <linux/input/mt.h>
#include <linux/input/touchscreen.h>
diff --git a/drivers/input/touchscreen/fsl-imx25-tcq.c b/drivers/input/touchscreen/fsl-imx25-tcq.c
index a32708652d10..ff270b3b8572 100644
--- a/drivers/input/touchscreen/fsl-imx25-tcq.c
+++ b/drivers/input/touchscreen/fsl-imx25-tcq.c
@@ -39,7 +39,6 @@ struct mx25_tcq_priv {
};
static const struct regmap_config mx25_tcq_regconfig = {
- .fast_io = true,
.max_register = 0x5c,
.reg_bits = 32,
.val_bits = 32,
diff --git a/drivers/input/touchscreen/goodix_berlin_core.c b/drivers/input/touchscreen/goodix_berlin_core.c
index c78d512d97cd..83f28b870531 100644
--- a/drivers/input/touchscreen/goodix_berlin_core.c
+++ b/drivers/input/touchscreen/goodix_berlin_core.c
@@ -24,6 +24,7 @@
*/
#include <linux/bitfield.h>
+#include <linux/export.h>
#include <linux/gpio/consumer.h>
#include <linux/input.h>
#include <linux/input/mt.h>
diff --git a/drivers/input/touchscreen/himax_hx852x.c b/drivers/input/touchscreen/himax_hx852x.c
new file mode 100644
index 000000000000..83c60e137a55
--- /dev/null
+++ b/drivers/input/touchscreen/himax_hx852x.c
@@ -0,0 +1,503 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Himax HX852x(ES) Touchscreen Driver
+ * Copyright (c) 2020-2024 Stephan Gerhold <stephan@gerhold.net>
+ * Copyright (c) 2020 Jonathan Albrieux <jonathan.albrieux@gmail.com>
+ *
+ * Based on the Himax Android Driver Sample Code Ver 0.3 for HMX852xES chipset:
+ * Copyright (c) 2014 Himax Corporation.
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/input/mt.h>
+#include <linux/input/touchscreen.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+#include <linux/unaligned.h>
+
+#define HX852X_COORD_SIZE(fingers) ((fingers) * sizeof(struct hx852x_coord))
+#define HX852X_WIDTH_SIZE(fingers) ALIGN(fingers, 4)
+#define HX852X_BUF_SIZE(fingers) (HX852X_COORD_SIZE(fingers) + \
+ HX852X_WIDTH_SIZE(fingers) + \
+ sizeof(struct hx852x_touch_info))
+
+#define HX852X_MAX_FINGERS 12
+#define HX852X_MAX_KEY_COUNT 4
+#define HX852X_MAX_BUF_SIZE HX852X_BUF_SIZE(HX852X_MAX_FINGERS)
+
+#define HX852X_TS_SLEEP_IN 0x80
+#define HX852X_TS_SLEEP_OUT 0x81
+#define HX852X_TS_SENSE_OFF 0x82
+#define HX852X_TS_SENSE_ON 0x83
+#define HX852X_READ_ONE_EVENT 0x85
+#define HX852X_READ_ALL_EVENTS 0x86
+#define HX852X_READ_LATEST_EVENT 0x87
+#define HX852X_CLEAR_EVENT_STACK 0x88
+
+#define HX852X_REG_SRAM_SWITCH 0x8c
+#define HX852X_REG_SRAM_ADDR 0x8b
+#define HX852X_REG_FLASH_RPLACE 0x5a
+
+#define HX852X_SRAM_SWITCH_TEST_MODE 0x14
+#define HX852X_SRAM_ADDR_CONFIG 0x7000
+
+struct hx852x {
+ struct i2c_client *client;
+ struct input_dev *input_dev;
+ struct touchscreen_properties props;
+ struct gpio_desc *reset_gpiod;
+ struct regulator_bulk_data supplies[2];
+ unsigned int max_fingers;
+ unsigned int keycount;
+ unsigned int keycodes[HX852X_MAX_KEY_COUNT];
+};
+
+struct hx852x_config {
+ u8 rx_num;
+ u8 tx_num;
+ u8 max_pt;
+ u8 padding1[3];
+ __be16 x_res;
+ __be16 y_res;
+ u8 padding2[2];
+} __packed __aligned(4);
+
+struct hx852x_coord {
+ __be16 x;
+ __be16 y;
+} __packed __aligned(4);
+
+struct hx852x_touch_info {
+ u8 finger_num;
+ __le16 finger_pressed;
+ u8 padding;
+} __packed __aligned(4);
+
+static int hx852x_i2c_read(struct hx852x *hx, u8 cmd, void *data, u16 len)
+{
+ struct i2c_client *client = hx->client;
+ int error;
+ int ret;
+
+ struct i2c_msg msg[] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = 1,
+ .buf = &cmd,
+ },
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = len,
+ .buf = data,
+ },
+ };
+
+ ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
+ if (ret != ARRAY_SIZE(msg)) {
+ error = ret < 0 ? ret : -EIO;
+ dev_err(&client->dev, "failed to read %#x: %d\n", cmd, error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int hx852x_power_on(struct hx852x *hx)
+{
+ struct device *dev = &hx->client->dev;
+ int error;
+
+ error = regulator_bulk_enable(ARRAY_SIZE(hx->supplies), hx->supplies);
+ if (error) {
+ dev_err(dev, "failed to enable regulators: %d\n", error);
+ return error;
+ }
+
+ gpiod_set_value_cansleep(hx->reset_gpiod, 1);
+ msleep(20);
+ gpiod_set_value_cansleep(hx->reset_gpiod, 0);
+ msleep(50);
+
+ return 0;
+}
+
+static int hx852x_start(struct hx852x *hx)
+{
+ struct device *dev = &hx->client->dev;
+ int error;
+
+ error = i2c_smbus_write_byte(hx->client, HX852X_TS_SLEEP_OUT);
+ if (error) {
+ dev_err(dev, "failed to send TS_SLEEP_OUT: %d\n", error);
+ return error;
+ }
+ msleep(30);
+
+ error = i2c_smbus_write_byte(hx->client, HX852X_TS_SENSE_ON);
+ if (error) {
+ dev_err(dev, "failed to send TS_SENSE_ON: %d\n", error);
+ return error;
+ }
+ msleep(20);
+
+ return 0;
+}
+
+static int hx852x_stop(struct hx852x *hx)
+{
+ struct device *dev = &hx->client->dev;
+ int error;
+
+ error = i2c_smbus_write_byte(hx->client, HX852X_TS_SENSE_OFF);
+ if (error) {
+ dev_err(dev, "failed to send TS_SENSE_OFF: %d\n", error);
+ return error;
+ }
+ msleep(20);
+
+ error = i2c_smbus_write_byte(hx->client, HX852X_TS_SLEEP_IN);
+ if (error) {
+ dev_err(dev, "failed to send TS_SLEEP_IN: %d\n", error);
+ return error;
+ }
+ msleep(30);
+
+ return 0;
+}
+
+static int hx852x_power_off(struct hx852x *hx)
+{
+ struct device *dev = &hx->client->dev;
+ int error;
+
+ error = regulator_bulk_disable(ARRAY_SIZE(hx->supplies), hx->supplies);
+ if (error) {
+ dev_err(dev, "failed to disable regulators: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int hx852x_read_config(struct hx852x *hx)
+{
+ struct device *dev = &hx->client->dev;
+ struct hx852x_config conf;
+ int x_res, y_res;
+ int error, error2;
+
+ error = hx852x_power_on(hx);
+ if (error)
+ return error;
+
+ /* Sensing must be turned on briefly to load the config */
+ error = hx852x_start(hx);
+ if (error)
+ goto err_power_off;
+
+ error = hx852x_stop(hx);
+ if (error)
+ goto err_power_off;
+
+ error = i2c_smbus_write_byte_data(hx->client, HX852X_REG_SRAM_SWITCH,
+ HX852X_SRAM_SWITCH_TEST_MODE);
+ if (error)
+ goto err_power_off;
+
+ error = i2c_smbus_write_word_data(hx->client, HX852X_REG_SRAM_ADDR,
+ HX852X_SRAM_ADDR_CONFIG);
+ if (error)
+ goto err_test_mode;
+
+ error = hx852x_i2c_read(hx, HX852X_REG_FLASH_RPLACE, &conf, sizeof(conf));
+ if (error)
+ goto err_test_mode;
+
+ x_res = be16_to_cpu(conf.x_res);
+ y_res = be16_to_cpu(conf.y_res);
+ hx->max_fingers = (conf.max_pt & 0xf0) >> 4;
+ dev_dbg(dev, "x res: %u, y res: %u, max fingers: %u\n",
+ x_res, y_res, hx->max_fingers);
+
+ if (hx->max_fingers > HX852X_MAX_FINGERS) {
+ dev_err(dev, "max supported fingers: %u, found: %u\n",
+ HX852X_MAX_FINGERS, hx->max_fingers);
+ error = -EINVAL;
+ goto err_test_mode;
+ }
+
+ if (x_res && y_res) {
+ input_set_abs_params(hx->input_dev, ABS_MT_POSITION_X, 0, x_res - 1, 0, 0);
+ input_set_abs_params(hx->input_dev, ABS_MT_POSITION_Y, 0, y_res - 1, 0, 0);
+ }
+
+err_test_mode:
+ error2 = i2c_smbus_write_byte_data(hx->client, HX852X_REG_SRAM_SWITCH, 0);
+ error = error ?: error2;
+err_power_off:
+ error2 = hx852x_power_off(hx);
+ return error ?: error2;
+}
+
+static int hx852x_handle_events(struct hx852x *hx)
+{
+ /*
+ * The event packets have variable size, depending on the amount of
+ * supported fingers (hx->max_fingers). They are laid out as follows:
+ * - struct hx852x_coord[hx->max_fingers]: Coordinates for each finger
+ * - u8[ALIGN(hx->max_fingers, 4)]: Touch width for each finger
+ * with padding for 32-bit alignment
+ * - struct hx852x_touch_info
+ *
+ * Load everything into a 32-bit aligned buffer so the coordinates
+ * can be assigned directly, without using get_unaligned_*().
+ */
+ u8 buf[HX852X_MAX_BUF_SIZE] __aligned(4);
+ struct hx852x_coord *coord = (struct hx852x_coord *)buf;
+ u8 *width = &buf[HX852X_COORD_SIZE(hx->max_fingers)];
+ struct hx852x_touch_info *info = (struct hx852x_touch_info *)
+ &width[HX852X_WIDTH_SIZE(hx->max_fingers)];
+ unsigned long finger_pressed, key_pressed;
+ unsigned int i, x, y, w;
+ int error;
+
+ error = hx852x_i2c_read(hx, HX852X_READ_ALL_EVENTS, buf,
+ HX852X_BUF_SIZE(hx->max_fingers));
+ if (error)
+ return error;
+
+ finger_pressed = get_unaligned_le16(&info->finger_pressed);
+ key_pressed = finger_pressed >> HX852X_MAX_FINGERS;
+
+ /* All bits are set when no touch is detected */
+ if (info->finger_num == 0xff || !(info->finger_num & 0x0f))
+ finger_pressed = 0;
+ if (key_pressed == 0xf)
+ key_pressed = 0;
+
+ for_each_set_bit(i, &finger_pressed, hx->max_fingers) {
+ x = be16_to_cpu(coord[i].x);
+ y = be16_to_cpu(coord[i].y);
+ w = width[i];
+
+ input_mt_slot(hx->input_dev, i);
+ input_mt_report_slot_state(hx->input_dev, MT_TOOL_FINGER, 1);
+ touchscreen_report_pos(hx->input_dev, &hx->props, x, y, true);
+ input_report_abs(hx->input_dev, ABS_MT_TOUCH_MAJOR, w);
+ }
+ input_mt_sync_frame(hx->input_dev);
+
+ for (i = 0; i < hx->keycount; i++)
+ input_report_key(hx->input_dev, hx->keycodes[i], key_pressed & BIT(i));
+
+ input_sync(hx->input_dev);
+ return 0;
+}
+
+static irqreturn_t hx852x_interrupt(int irq, void *ptr)
+{
+ struct hx852x *hx = ptr;
+ int error;
+
+ error = hx852x_handle_events(hx);
+ if (error) {
+ dev_err_ratelimited(&hx->client->dev,
+ "failed to handle events: %d\n", error);
+ return IRQ_NONE;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int hx852x_input_open(struct input_dev *dev)
+{
+ struct hx852x *hx = input_get_drvdata(dev);
+ int error;
+
+ error = hx852x_power_on(hx);
+ if (error)
+ return error;
+
+ error = hx852x_start(hx);
+ if (error) {
+ hx852x_power_off(hx);
+ return error;
+ }
+
+ enable_irq(hx->client->irq);
+ return 0;
+}
+
+static void hx852x_input_close(struct input_dev *dev)
+{
+ struct hx852x *hx = input_get_drvdata(dev);
+
+ hx852x_stop(hx);
+ disable_irq(hx->client->irq);
+ hx852x_power_off(hx);
+}
+
+static int hx852x_parse_properties(struct hx852x *hx)
+{
+ struct device *dev = &hx->client->dev;
+ int error, count;
+
+ count = device_property_count_u32(dev, "linux,keycodes");
+ if (count == -EINVAL) {
+ /* Property does not exist, keycodes are optional */
+ return 0;
+ } else if (count < 0) {
+ dev_err(dev, "Failed to read linux,keycodes: %d\n", count);
+ return count;
+ } else if (count > HX852X_MAX_KEY_COUNT) {
+ dev_err(dev, "max supported keys: %u, found: %u\n",
+ HX852X_MAX_KEY_COUNT, hx->keycount);
+ return -EINVAL;
+ }
+ hx->keycount = count;
+
+ error = device_property_read_u32_array(dev, "linux,keycodes",
+ hx->keycodes, hx->keycount);
+ if (error) {
+ dev_err(dev, "failed to read linux,keycodes: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int hx852x_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct hx852x *hx;
+ int error, i;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C |
+ I2C_FUNC_SMBUS_WRITE_BYTE |
+ I2C_FUNC_SMBUS_WRITE_BYTE_DATA |
+ I2C_FUNC_SMBUS_WRITE_WORD_DATA)) {
+ dev_err(dev, "not all required i2c functionality supported\n");
+ return -ENXIO;
+ }
+
+ hx = devm_kzalloc(dev, sizeof(*hx), GFP_KERNEL);
+ if (!hx)
+ return -ENOMEM;
+
+ hx->client = client;
+ hx->input_dev = devm_input_allocate_device(dev);
+ if (!hx->input_dev)
+ return -ENOMEM;
+
+ hx->input_dev->name = "Himax HX852x";
+ hx->input_dev->id.bustype = BUS_I2C;
+ hx->input_dev->open = hx852x_input_open;
+ hx->input_dev->close = hx852x_input_close;
+
+ i2c_set_clientdata(client, hx);
+ input_set_drvdata(hx->input_dev, hx);
+
+ hx->supplies[0].supply = "vcca";
+ hx->supplies[1].supply = "vccd";
+ error = devm_regulator_bulk_get(dev, ARRAY_SIZE(hx->supplies), hx->supplies);
+ if (error)
+ return dev_err_probe(dev, error, "failed to get regulators\n");
+
+ hx->reset_gpiod = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(hx->reset_gpiod))
+ return dev_err_probe(dev, PTR_ERR(hx->reset_gpiod),
+ "failed to get reset gpio\n");
+
+ error = devm_request_threaded_irq(dev, client->irq, NULL, hx852x_interrupt,
+ IRQF_ONESHOT | IRQF_NO_AUTOEN, NULL, hx);
+ if (error)
+ return dev_err_probe(dev, error, "failed to request irq %d", client->irq);
+
+ error = hx852x_read_config(hx);
+ if (error)
+ return error;
+
+ input_set_capability(hx->input_dev, EV_ABS, ABS_MT_POSITION_X);
+ input_set_capability(hx->input_dev, EV_ABS, ABS_MT_POSITION_Y);
+ input_set_abs_params(hx->input_dev, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0);
+
+ touchscreen_parse_properties(hx->input_dev, true, &hx->props);
+ error = hx852x_parse_properties(hx);
+ if (error)
+ return error;
+
+ hx->input_dev->keycode = hx->keycodes;
+ hx->input_dev->keycodemax = hx->keycount;
+ hx->input_dev->keycodesize = sizeof(hx->keycodes[0]);
+ for (i = 0; i < hx->keycount; i++)
+ input_set_capability(hx->input_dev, EV_KEY, hx->keycodes[i]);
+
+ error = input_mt_init_slots(hx->input_dev, hx->max_fingers,
+ INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED);
+ if (error)
+ return dev_err_probe(dev, error, "failed to init MT slots\n");
+
+ error = input_register_device(hx->input_dev);
+ if (error)
+ return dev_err_probe(dev, error, "failed to register input device\n");
+
+ return 0;
+}
+
+static int hx852x_suspend(struct device *dev)
+{
+ struct hx852x *hx = dev_get_drvdata(dev);
+
+ guard(mutex)(&hx->input_dev->mutex);
+
+ if (input_device_enabled(hx->input_dev))
+ return hx852x_stop(hx);
+
+ return 0;
+}
+
+static int hx852x_resume(struct device *dev)
+{
+ struct hx852x *hx = dev_get_drvdata(dev);
+
+ guard(mutex)(&hx->input_dev->mutex);
+
+ if (input_device_enabled(hx->input_dev))
+ return hx852x_start(hx);
+
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(hx852x_pm_ops, hx852x_suspend, hx852x_resume);
+
+#ifdef CONFIG_OF
+static const struct of_device_id hx852x_of_match[] = {
+ { .compatible = "himax,hx852es" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, hx852x_of_match);
+#endif
+
+static struct i2c_driver hx852x_driver = {
+ .probe = hx852x_probe,
+ .driver = {
+ .name = "himax_hx852x",
+ .pm = pm_sleep_ptr(&hx852x_pm_ops),
+ .of_match_table = of_match_ptr(hx852x_of_match),
+ },
+};
+module_i2c_driver(hx852x_driver);
+
+MODULE_DESCRIPTION("Himax HX852x(ES) Touchscreen Driver");
+MODULE_AUTHOR("Jonathan Albrieux <jonathan.albrieux@gmail.com>");
+MODULE_AUTHOR("Stephan Gerhold <stephan@gerhold.net>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/hynitron-cst816x.c b/drivers/input/touchscreen/hynitron-cst816x.c
new file mode 100644
index 000000000000..b64d7928e18f
--- /dev/null
+++ b/drivers/input/touchscreen/hynitron-cst816x.c
@@ -0,0 +1,253 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for I2C connected Hynitron CST816x Series Touchscreen
+ *
+ * Copyright (C) 2025 Oleh Kuzhylnyi <kuzhylol@gmail.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/unaligned.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+
+#define CST816X_RD_REG 0x01
+#define CST816X_NUM_KEYS 5
+
+struct cst816x_touch {
+ u8 gest;
+ u8 active;
+ u16 abs_x;
+ u16 abs_y;
+} __packed;
+
+struct cst816x_priv {
+ struct i2c_client *client;
+ struct gpio_desc *reset;
+ struct input_dev *input;
+ unsigned int keycode[CST816X_NUM_KEYS];
+ unsigned int keycodemax;
+};
+
+static int cst816x_parse_keycodes(struct device *dev, struct cst816x_priv *priv)
+{
+ int count;
+ int error;
+
+ if (device_property_present(dev, "linux,keycodes")) {
+ count = device_property_count_u32(dev, "linux,keycodes");
+ if (count < 0) {
+ error = count;
+ dev_err(dev, "failed to count keys: %d\n", error);
+ return error;
+ } else if (count > ARRAY_SIZE(priv->keycode)) {
+ dev_err(dev, "too many keys defined: %d\n", count);
+ return -EINVAL;
+ }
+ priv->keycodemax = count;
+
+ error = device_property_read_u32_array(dev, "linux,keycodes",
+ priv->keycode,
+ priv->keycodemax);
+ if (error) {
+ dev_err(dev, "failed to read keycodes: %d\n", error);
+ return error;
+ }
+ }
+
+ return 0;
+}
+
+static int cst816x_i2c_read_register(struct cst816x_priv *priv, u8 reg,
+ void *buf, size_t len)
+{
+ struct i2c_msg xfer[] = {
+ {
+ .addr = priv->client->addr,
+ .flags = 0,
+ .buf = &reg,
+ .len = sizeof(reg),
+ },
+ {
+ .addr = priv->client->addr,
+ .flags = I2C_M_RD,
+ .buf = buf,
+ .len = len,
+ },
+ };
+ int error;
+ int ret;
+
+ ret = i2c_transfer(priv->client->adapter, xfer, ARRAY_SIZE(xfer));
+ if (ret != ARRAY_SIZE(xfer)) {
+ error = ret < 0 ? ret : -EIO;
+ dev_err(&priv->client->dev, "i2c rx err: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static u8 cst816x_gest_idx(u8 gest)
+{
+ u8 index;
+
+ switch (gest) {
+ case 0x01: /* Slide up gesture */
+ case 0x02: /* Slide down gesture */
+ case 0x03: /* Slide left gesture */
+ case 0x04: /* Slide right gesture */
+ index = gest;
+ break;
+ case 0x0c: /* Long press gesture */
+ default:
+ index = CST816X_NUM_KEYS;
+ break;
+ }
+
+ return index - 1;
+}
+
+static bool cst816x_process_touch(struct cst816x_priv *priv,
+ struct cst816x_touch *tch)
+{
+ if (cst816x_i2c_read_register(priv, CST816X_RD_REG, tch, sizeof(*tch)))
+ return false;
+
+ tch->abs_x = get_unaligned_be16(&tch->abs_x) & GENMASK(11, 0);
+ tch->abs_y = get_unaligned_be16(&tch->abs_y) & GENMASK(11, 0);
+
+ dev_dbg(&priv->client->dev, "x: %u, y: %u, t: %u, g: 0x%x\n",
+ tch->abs_x, tch->abs_y, tch->active, tch->gest);
+
+ return true;
+}
+
+static int cst816x_register_input(struct cst816x_priv *priv)
+{
+ priv->input = devm_input_allocate_device(&priv->client->dev);
+ if (!priv->input)
+ return -ENOMEM;
+
+ priv->input->name = "Hynitron CST816x Series Touchscreen";
+ priv->input->phys = "input/ts";
+ priv->input->id.bustype = BUS_I2C;
+
+ input_set_drvdata(priv->input, priv);
+
+ input_set_abs_params(priv->input, ABS_X, 0, 240, 0, 0);
+ input_set_abs_params(priv->input, ABS_Y, 0, 240, 0, 0);
+ input_set_capability(priv->input, EV_KEY, BTN_TOUCH);
+
+ priv->input->keycode = priv->keycode;
+ priv->input->keycodesize = sizeof(priv->keycode[0]);
+ priv->input->keycodemax = priv->keycodemax;
+
+ for (int i = 0; i < priv->keycodemax; i++) {
+ if (priv->keycode[i] == KEY_RESERVED)
+ continue;
+
+ input_set_capability(priv->input, EV_KEY, priv->keycode[i]);
+ }
+
+ return input_register_device(priv->input);
+}
+
+static void cst816x_reset(struct cst816x_priv *priv)
+{
+ gpiod_set_value_cansleep(priv->reset, 1);
+ msleep(50);
+ gpiod_set_value_cansleep(priv->reset, 0);
+ msleep(100);
+}
+
+static irqreturn_t cst816x_irq_cb(int irq, void *cookie)
+{
+ struct cst816x_priv *priv = cookie;
+ struct cst816x_touch tch;
+
+ if (!cst816x_process_touch(priv, &tch))
+ return IRQ_HANDLED;
+
+ input_report_abs(priv->input, ABS_X, tch.abs_x);
+ input_report_abs(priv->input, ABS_Y, tch.abs_y);
+
+ if (tch.gest)
+ input_report_key(priv->input,
+ priv->keycode[cst816x_gest_idx(tch.gest)],
+ tch.active);
+
+ input_report_key(priv->input, BTN_TOUCH, tch.active);
+
+ input_sync(priv->input);
+
+ return IRQ_HANDLED;
+}
+
+static int cst816x_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct cst816x_priv *priv;
+ int error;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->client = client;
+
+ priv->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(priv->reset))
+ return dev_err_probe(dev, PTR_ERR(priv->reset),
+ "gpio reset request failed\n");
+
+ if (priv->reset)
+ cst816x_reset(priv);
+
+ error = cst816x_parse_keycodes(dev, priv);
+ if (error)
+ dev_warn(dev, "no gestures found in dt\n");
+
+ error = cst816x_register_input(priv);
+ if (error)
+ return dev_err_probe(dev, error, "input register failed\n");
+
+ error = devm_request_threaded_irq(dev, client->irq,
+ NULL, cst816x_irq_cb, IRQF_ONESHOT,
+ dev_driver_string(dev), priv);
+ if (error)
+ return dev_err_probe(dev, error, "irq request failed\n");
+
+ return 0;
+}
+
+static const struct i2c_device_id cst816x_id[] = {
+ { .name = "cst816s", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, cst816x_id);
+
+static const struct of_device_id cst816x_of_match[] = {
+ { .compatible = "hynitron,cst816s", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, cst816x_of_match);
+
+static struct i2c_driver cst816x_driver = {
+ .driver = {
+ .name = "cst816x",
+ .of_match_table = cst816x_of_match,
+ },
+ .id_table = cst816x_id,
+ .probe = cst816x_probe,
+};
+
+module_i2c_driver(cst816x_driver);
+
+MODULE_AUTHOR("Oleh Kuzhylnyi <kuzhylol@gmail.com>");
+MODULE_DESCRIPTION("Hynitron CST816x Series Touchscreen Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/imx6ul_tsc.c b/drivers/input/touchscreen/imx6ul_tsc.c
index 6ac8fa84ed9f..85f697de2b7e 100644
--- a/drivers/input/touchscreen/imx6ul_tsc.c
+++ b/drivers/input/touchscreen/imx6ul_tsc.c
@@ -7,6 +7,7 @@
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/bitfield.h>
#include <linux/gpio/consumer.h>
#include <linux/input.h>
#include <linux/slab.h>
@@ -20,25 +21,23 @@
#include <linux/log2.h>
/* ADC configuration registers field define */
-#define ADC_AIEN (0x1 << 7)
+#define ADC_AIEN BIT(7)
+#define ADC_ADCH_MASK GENMASK(4, 0)
#define ADC_CONV_DISABLE 0x1F
-#define ADC_AVGE (0x1 << 5)
-#define ADC_CAL (0x1 << 7)
-#define ADC_CALF 0x2
-#define ADC_12BIT_MODE (0x2 << 2)
-#define ADC_CONV_MODE_MASK (0x3 << 2)
+#define ADC_AVGE BIT(5)
+#define ADC_CAL BIT(7)
+#define ADC_CALF BIT(1)
+#define ADC_CONV_MODE_MASK GENMASK(3, 2)
+#define ADC_12BIT_MODE 0x2
#define ADC_IPG_CLK 0x00
-#define ADC_INPUT_CLK_MASK 0x3
-#define ADC_CLK_DIV_8 (0x03 << 5)
-#define ADC_CLK_DIV_MASK (0x3 << 5)
-#define ADC_SHORT_SAMPLE_MODE (0x0 << 4)
-#define ADC_SAMPLE_MODE_MASK (0x1 << 4)
-#define ADC_HARDWARE_TRIGGER (0x1 << 13)
-#define ADC_AVGS_SHIFT 14
-#define ADC_AVGS_MASK (0x3 << 14)
+#define ADC_INPUT_CLK_MASK GENMASK(1, 0)
+#define ADC_CLK_DIV_8 0x03
+#define ADC_CLK_DIV_MASK GENMASK(6, 5)
+#define ADC_SAMPLE_MODE BIT(4)
+#define ADC_HARDWARE_TRIGGER BIT(13)
+#define ADC_AVGS_MASK GENMASK(15, 14)
#define SELECT_CHANNEL_4 0x04
#define SELECT_CHANNEL_1 0x01
-#define DISABLE_CONVERSION_INT (0x0 << 7)
/* ADC registers */
#define REG_ADC_HC0 0x00
@@ -55,7 +54,7 @@
#define ADC_TIMEOUT msecs_to_jiffies(100)
/* TSC registers */
-#define REG_TSC_BASIC_SETING 0x00
+#define REG_TSC_BASIC_SETTING 0x00
#define REG_TSC_PRE_CHARGE_TIME 0x10
#define REG_TSC_FLOW_CONTROL 0x20
#define REG_TSC_MEASURE_VALUE 0x30
@@ -65,19 +64,26 @@
#define REG_TSC_DEBUG_MODE 0x70
#define REG_TSC_DEBUG_MODE2 0x80
+/* TSC_MEASURE_VALUE register field define */
+#define X_VALUE_MASK GENMASK(27, 16)
+#define Y_VALUE_MASK GENMASK(11, 0)
+
/* TSC configuration registers field define */
-#define DETECT_4_WIRE_MODE (0x0 << 4)
-#define AUTO_MEASURE 0x1
-#define MEASURE_SIGNAL 0x1
-#define DETECT_SIGNAL (0x1 << 4)
-#define VALID_SIGNAL (0x1 << 8)
-#define MEASURE_INT_EN 0x1
-#define MEASURE_SIG_EN 0x1
-#define VALID_SIG_EN (0x1 << 8)
-#define DE_GLITCH_2 (0x2 << 29)
-#define START_SENSE (0x1 << 12)
-#define TSC_DISABLE (0x1 << 16)
+#define MEASURE_DELAY_TIME_MASK GENMASK(31, 8)
+#define DETECT_5_WIRE_MODE BIT(4)
+#define AUTO_MEASURE BIT(0)
+#define MEASURE_SIGNAL BIT(0)
+#define DETECT_SIGNAL BIT(4)
+#define VALID_SIGNAL BIT(8)
+#define MEASURE_INT_EN BIT(0)
+#define MEASURE_SIG_EN BIT(0)
+#define VALID_SIG_EN BIT(8)
+#define DE_GLITCH_MASK GENMASK(30, 29)
+#define DE_GLITCH_DEF 0x02
+#define START_SENSE BIT(12)
+#define TSC_DISABLE BIT(16)
#define DETECT_MODE 0x2
+#define STATE_MACHINE_MASK GENMASK(22, 20)
struct imx6ul_tsc {
struct device *dev;
@@ -92,6 +98,7 @@ struct imx6ul_tsc {
u32 pre_charge_time;
bool average_enable;
u32 average_select;
+ u32 de_glitch;
struct completion completion;
};
@@ -112,19 +119,20 @@ static int imx6ul_adc_init(struct imx6ul_tsc *tsc)
adc_cfg = readl(tsc->adc_regs + REG_ADC_CFG);
adc_cfg &= ~(ADC_CONV_MODE_MASK | ADC_INPUT_CLK_MASK);
- adc_cfg |= ADC_12BIT_MODE | ADC_IPG_CLK;
- adc_cfg &= ~(ADC_CLK_DIV_MASK | ADC_SAMPLE_MODE_MASK);
- adc_cfg |= ADC_CLK_DIV_8 | ADC_SHORT_SAMPLE_MODE;
+ adc_cfg |= FIELD_PREP(ADC_CONV_MODE_MASK, ADC_12BIT_MODE) |
+ FIELD_PREP(ADC_INPUT_CLK_MASK, ADC_IPG_CLK);
+ adc_cfg &= ~(ADC_CLK_DIV_MASK | ADC_SAMPLE_MODE);
+ adc_cfg |= FIELD_PREP(ADC_CLK_DIV_MASK, ADC_CLK_DIV_8);
if (tsc->average_enable) {
adc_cfg &= ~ADC_AVGS_MASK;
- adc_cfg |= (tsc->average_select) << ADC_AVGS_SHIFT;
+ adc_cfg |= FIELD_PREP(ADC_AVGS_MASK, tsc->average_select);
}
adc_cfg &= ~ADC_HARDWARE_TRIGGER;
writel(adc_cfg, tsc->adc_regs + REG_ADC_CFG);
/* enable calibration interrupt */
adc_hc |= ADC_AIEN;
- adc_hc |= ADC_CONV_DISABLE;
+ adc_hc |= FIELD_PREP(ADC_ADCH_MASK, ADC_CONV_DISABLE);
writel(adc_hc, tsc->adc_regs + REG_ADC_HC0);
/* start ADC calibration */
@@ -164,19 +172,21 @@ static void imx6ul_tsc_channel_config(struct imx6ul_tsc *tsc)
{
u32 adc_hc0, adc_hc1, adc_hc2, adc_hc3, adc_hc4;
- adc_hc0 = DISABLE_CONVERSION_INT;
+ adc_hc0 = FIELD_PREP(ADC_AIEN, 0);
writel(adc_hc0, tsc->adc_regs + REG_ADC_HC0);
- adc_hc1 = DISABLE_CONVERSION_INT | SELECT_CHANNEL_4;
+ adc_hc1 = FIELD_PREP(ADC_AIEN, 0) |
+ FIELD_PREP(ADC_ADCH_MASK, SELECT_CHANNEL_4);
writel(adc_hc1, tsc->adc_regs + REG_ADC_HC1);
- adc_hc2 = DISABLE_CONVERSION_INT;
+ adc_hc2 = FIELD_PREP(ADC_AIEN, 0);
writel(adc_hc2, tsc->adc_regs + REG_ADC_HC2);
- adc_hc3 = DISABLE_CONVERSION_INT | SELECT_CHANNEL_1;
+ adc_hc3 = FIELD_PREP(ADC_AIEN, 0) |
+ FIELD_PREP(ADC_ADCH_MASK, SELECT_CHANNEL_1);
writel(adc_hc3, tsc->adc_regs + REG_ADC_HC3);
- adc_hc4 = DISABLE_CONVERSION_INT;
+ adc_hc4 = FIELD_PREP(ADC_AIEN, 0);
writel(adc_hc4, tsc->adc_regs + REG_ADC_HC4);
}
@@ -188,13 +198,16 @@ static void imx6ul_tsc_channel_config(struct imx6ul_tsc *tsc)
static void imx6ul_tsc_set(struct imx6ul_tsc *tsc)
{
u32 basic_setting = 0;
+ u32 debug_mode2;
u32 start;
- basic_setting |= tsc->measure_delay_time << 8;
- basic_setting |= DETECT_4_WIRE_MODE | AUTO_MEASURE;
- writel(basic_setting, tsc->tsc_regs + REG_TSC_BASIC_SETING);
+ basic_setting |= FIELD_PREP(MEASURE_DELAY_TIME_MASK,
+ tsc->measure_delay_time);
+ basic_setting |= AUTO_MEASURE;
+ writel(basic_setting, tsc->tsc_regs + REG_TSC_BASIC_SETTING);
- writel(DE_GLITCH_2, tsc->tsc_regs + REG_TSC_DEBUG_MODE2);
+ debug_mode2 = FIELD_PREP(DE_GLITCH_MASK, tsc->de_glitch);
+ writel(debug_mode2, tsc->tsc_regs + REG_TSC_DEBUG_MODE2);
writel(tsc->pre_charge_time, tsc->tsc_regs + REG_TSC_PRE_CHARGE_TIME);
writel(MEASURE_INT_EN, tsc->tsc_regs + REG_TSC_INT_EN);
@@ -250,7 +263,7 @@ static bool tsc_wait_detect_mode(struct imx6ul_tsc *tsc)
usleep_range(200, 400);
debug_mode2 = readl(tsc->tsc_regs + REG_TSC_DEBUG_MODE2);
- state_machine = (debug_mode2 >> 20) & 0x7;
+ state_machine = FIELD_GET(STATE_MACHINE_MASK, debug_mode2);
} while (state_machine != DETECT_MODE);
usleep_range(200, 400);
@@ -278,8 +291,8 @@ static irqreturn_t tsc_irq_fn(int irq, void *dev_id)
if (status & MEASURE_SIGNAL) {
value = readl(tsc->tsc_regs + REG_TSC_MEASURE_VALUE);
- x = (value >> 16) & 0x0fff;
- y = value & 0x0fff;
+ x = FIELD_GET(X_VALUE_MASK, value);
+ y = FIELD_GET(Y_VALUE_MASK, value);
/*
* In detect mode, we can get the xnur gpio value,
@@ -379,6 +392,7 @@ static int imx6ul_tsc_probe(struct platform_device *pdev)
int tsc_irq;
int adc_irq;
u32 average_samples;
+ u32 de_glitch;
tsc = devm_kzalloc(&pdev->dev, sizeof(*tsc), GFP_KERNEL);
if (!tsc)
@@ -501,6 +515,25 @@ static int imx6ul_tsc_probe(struct platform_device *pdev)
return -EINVAL;
}
+ err = of_property_read_u32(np, "debounce-delay-us", &de_glitch);
+ if (err) {
+ tsc->de_glitch = DE_GLITCH_DEF;
+ } else {
+ u64 cycles;
+ unsigned long rate = clk_get_rate(tsc->tsc_clk);
+
+ cycles = DIV64_U64_ROUND_UP((u64)de_glitch * rate, USEC_PER_SEC);
+
+ if (cycles <= 0x3ff)
+ tsc->de_glitch = 3;
+ else if (cycles <= 0x7ff)
+ tsc->de_glitch = 2;
+ else if (cycles <= 0xfff)
+ tsc->de_glitch = 1;
+ else
+ tsc->de_glitch = 0;
+ }
+
err = input_register_device(tsc->input);
if (err) {
dev_err(&pdev->dev,
diff --git a/drivers/input/touchscreen/mc13783_ts.c b/drivers/input/touchscreen/mc13783_ts.c
index 33635da85079..47b8da00027f 100644
--- a/drivers/input/touchscreen/mc13783_ts.c
+++ b/drivers/input/touchscreen/mc13783_ts.c
@@ -42,8 +42,6 @@ static irqreturn_t mc13783_ts_handler(int irq, void *data)
{
struct mc13783_ts_priv *priv = data;
- mc13xxx_irq_ack(priv->mc13xxx, irq);
-
/*
* Kick off reading coordinates. Note that if work happens already
* be queued for future execution (it rearms itself) it will not
@@ -137,8 +135,6 @@ static int mc13783_ts_open(struct input_dev *dev)
mc13xxx_lock(priv->mc13xxx);
- mc13xxx_irq_ack(priv->mc13xxx, MC13XXX_IRQ_TS);
-
ret = mc13xxx_irq_request(priv->mc13xxx, MC13XXX_IRQ_TS,
mc13783_ts_handler, MC13783_TS_NAME, priv);
if (ret)
diff --git a/drivers/input/touchscreen/tsc2007_core.c b/drivers/input/touchscreen/tsc2007_core.c
index 5252301686ec..948935de894b 100644
--- a/drivers/input/touchscreen/tsc2007_core.c
+++ b/drivers/input/touchscreen/tsc2007_core.c
@@ -23,6 +23,7 @@
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
+#include <linux/math64.h>
#include <linux/mod_devicetable.h>
#include <linux/property.h>
#include <linux/platform_data/tsc2007.h>
@@ -68,7 +69,7 @@ static void tsc2007_read_values(struct tsc2007 *tsc, struct ts_event *tc)
u32 tsc2007_calculate_resistance(struct tsc2007 *tsc, struct ts_event *tc)
{
- u32 rt = 0;
+ u64 rt = 0;
/* range filtering */
if (tc->x == MAX_12BIT)
@@ -79,11 +80,13 @@ u32 tsc2007_calculate_resistance(struct tsc2007 *tsc, struct ts_event *tc)
rt = tc->z2 - tc->z1;
rt *= tc->x;
rt *= tsc->x_plate_ohms;
- rt /= tc->z1;
+ rt = div_u64(rt, tc->z1);
rt = (rt + 2047) >> 12;
}
- return rt;
+ if (rt > U32_MAX)
+ return U32_MAX;
+ return (u32) rt;
}
bool tsc2007_is_pen_down(struct tsc2007 *ts)
@@ -177,7 +180,8 @@ static void tsc2007_stop(struct tsc2007 *ts)
mb();
wake_up(&ts->wait);
- disable_irq(ts->irq);
+ if (ts->irq)
+ disable_irq(ts->irq);
}
static int tsc2007_open(struct input_dev *input_dev)
@@ -188,7 +192,8 @@ static int tsc2007_open(struct input_dev *input_dev)
ts->stopped = false;
mb();
- enable_irq(ts->irq);
+ if (ts->irq)
+ enable_irq(ts->irq);
/* Prepare for touch readings - power down ADC and enable PENIRQ */
err = tsc2007_xfer(ts, PWRDOWN);
@@ -253,7 +258,7 @@ static int tsc2007_probe_properties(struct device *dev, struct tsc2007 *ts)
if (ts->gpiod)
ts->get_pendown_state = tsc2007_get_pendown_state_gpio;
else
- dev_warn(dev, "Pen down GPIO is not specified in properties\n");
+ dev_dbg(dev, "Pen down GPIO is not specified in properties\n");
return 0;
}
@@ -361,17 +366,19 @@ static int tsc2007_probe(struct i2c_client *client)
pdata->init_platform_hw();
}
- err = devm_request_threaded_irq(&client->dev, ts->irq,
- NULL, tsc2007_soft_irq,
- IRQF_ONESHOT,
- client->dev.driver->name, ts);
- if (err) {
- dev_err(&client->dev, "Failed to request irq %d: %d\n",
- ts->irq, err);
- return err;
- }
+ if (ts->irq) {
+ err = devm_request_threaded_irq(&client->dev, ts->irq,
+ NULL, tsc2007_soft_irq,
+ IRQF_ONESHOT,
+ client->dev.driver->name, ts);
+ if (err) {
+ dev_err(&client->dev, "Failed to request irq %d: %d\n",
+ ts->irq, err);
+ return err;
+ }
- tsc2007_stop(ts);
+ tsc2007_stop(ts);
+ }
/* power down the chip (TSC2007_SETUP does not ACK on I2C) */
err = tsc2007_xfer(ts, PWRDOWN);
diff --git a/drivers/input/touchscreen/tsc200x-core.c b/drivers/input/touchscreen/tsc200x-core.c
index 82d7d1cf5010..eba53613b005 100644
--- a/drivers/input/touchscreen/tsc200x-core.c
+++ b/drivers/input/touchscreen/tsc200x-core.c
@@ -10,6 +10,7 @@
* based on TSC2301 driver by Klaus K. Pedersen <klaus.k.pedersen@nokia.com>
*/
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/input.h>
diff --git a/drivers/input/touchscreen/wm9705.c b/drivers/input/touchscreen/wm9705.c
index 4b55d5e1ea0f..96484aae030c 100644
--- a/drivers/input/touchscreen/wm9705.c
+++ b/drivers/input/touchscreen/wm9705.c
@@ -9,6 +9,7 @@
* Russell King <rmk@arm.linux.org.uk>
*/
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
diff --git a/drivers/input/touchscreen/wm9712.c b/drivers/input/touchscreen/wm9712.c
index 6947714dfefa..087ece57741a 100644
--- a/drivers/input/touchscreen/wm9712.c
+++ b/drivers/input/touchscreen/wm9712.c
@@ -9,6 +9,7 @@
* Russell King <rmk@arm.linux.org.uk>
*/
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
diff --git a/drivers/input/touchscreen/wm9713.c b/drivers/input/touchscreen/wm9713.c
index a67fbe304f92..6f13f46ce6e6 100644
--- a/drivers/input/touchscreen/wm9713.c
+++ b/drivers/input/touchscreen/wm9713.c
@@ -9,6 +9,7 @@
* Russell King <rmk@arm.linux.org.uk>
*/
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
diff --git a/drivers/input/touchscreen/wm97xx-core.c b/drivers/input/touchscreen/wm97xx-core.c
index b25771a8df2b..96354c44af87 100644
--- a/drivers/input/touchscreen/wm97xx-core.c
+++ b/drivers/input/touchscreen/wm97xx-core.c
@@ -29,6 +29,7 @@
* - Support for async sampling control for noisy LCDs.
*/
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
index 3ebf37ddfc18..6cc979b26151 100644
--- a/drivers/interconnect/core.c
+++ b/drivers/interconnect/core.c
@@ -385,7 +385,7 @@ struct icc_node_data *of_icc_get_from_provider(const struct of_phandle_args *spe
mutex_lock(&icc_lock);
list_for_each_entry(provider, &icc_providers, provider_list) {
- if (provider->dev->of_node == spec->np) {
+ if (device_match_of_node(provider->dev, spec->np)) {
if (provider->xlate_extended) {
data = provider->xlate_extended(spec, provider->data);
if (!IS_ERR(data)) {
diff --git a/drivers/interconnect/qcom/Kconfig b/drivers/interconnect/qcom/Kconfig
index 31dc4781abef..5b4bb9f1382b 100644
--- a/drivers/interconnect/qcom/Kconfig
+++ b/drivers/interconnect/qcom/Kconfig
@@ -8,6 +8,15 @@ config INTERCONNECT_QCOM
config INTERCONNECT_QCOM_BCM_VOTER
tristate
+config INTERCONNECT_QCOM_GLYMUR
+ tristate "Qualcomm GLYMUR interconnect driver"
+ depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
+ select INTERCONNECT_QCOM_RPMH
+ select INTERCONNECT_QCOM_BCM_VOTER
+ help
+ This is a driver for the Qualcomm Network-on-Chip on glymur-based
+ platforms.
+
config INTERCONNECT_QCOM_MSM8909
tristate "Qualcomm MSM8909 interconnect driver"
depends on INTERCONNECT_QCOM
diff --git a/drivers/interconnect/qcom/Makefile b/drivers/interconnect/qcom/Makefile
index f16ac242eba5..cf8cba73ee3e 100644
--- a/drivers/interconnect/qcom/Makefile
+++ b/drivers/interconnect/qcom/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_INTERCONNECT_QCOM) += interconnect_qcom.o
interconnect_qcom-y := icc-common.o
icc-bcm-voter-objs := bcm-voter.o
+qnoc-glymur-objs := glymur.o
qnoc-milos-objs := milos.o
qnoc-msm8909-objs := msm8909.o
qnoc-msm8916-objs := msm8916.o
@@ -46,6 +47,7 @@ qnoc-x1e80100-objs := x1e80100.o
icc-smd-rpm-objs := smd-rpm.o icc-rpm.o icc-rpm-clocks.o
obj-$(CONFIG_INTERCONNECT_QCOM_BCM_VOTER) += icc-bcm-voter.o
+obj-$(CONFIG_INTERCONNECT_QCOM_GLYMUR) += qnoc-glymur.o
obj-$(CONFIG_INTERCONNECT_QCOM_MILOS) += qnoc-milos.o
obj-$(CONFIG_INTERCONNECT_QCOM_MSM8909) += qnoc-msm8909.o
obj-$(CONFIG_INTERCONNECT_QCOM_MSM8916) += qnoc-msm8916.o
diff --git a/drivers/interconnect/qcom/glymur.c b/drivers/interconnect/qcom/glymur.c
new file mode 100644
index 000000000000..cf20b5752dbb
--- /dev/null
+++ b/drivers/interconnect/qcom/glymur.c
@@ -0,0 +1,2543 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025, Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/interconnect.h>
+#include <linux/interconnect-provider.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <dt-bindings/interconnect/qcom,glymur-rpmh.h>
+
+#include "bcm-voter.h"
+#include "icc-rpmh.h"
+
+static struct qcom_icc_node qup0_core_slave = {
+ .name = "qup0_core_slave",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qup1_core_slave = {
+ .name = "qup1_core_slave",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qup2_core_slave = {
+ .name = "qup2_core_slave",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ahb2phy0 = {
+ .name = "qhs_ahb2phy0",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ahb2phy1 = {
+ .name = "qhs_ahb2phy1",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ahb2phy2 = {
+ .name = "qhs_ahb2phy2",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ahb2phy3 = {
+ .name = "qhs_ahb2phy3",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_av1_enc_cfg = {
+ .name = "qhs_av1_enc_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_camera_cfg = {
+ .name = "qhs_camera_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_clk_ctl = {
+ .name = "qhs_clk_ctl",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_crypto0_cfg = {
+ .name = "qhs_crypto0_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_display_cfg = {
+ .name = "qhs_display_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_gpuss_cfg = {
+ .name = "qhs_gpuss_cfg",
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node qhs_imem_cfg = {
+ .name = "qhs_imem_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie0_cfg = {
+ .name = "qhs_pcie0_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie1_cfg = {
+ .name = "qhs_pcie1_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie2_cfg = {
+ .name = "qhs_pcie2_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie3a_cfg = {
+ .name = "qhs_pcie3a_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie3b_cfg = {
+ .name = "qhs_pcie3b_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie4_cfg = {
+ .name = "qhs_pcie4_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie5_cfg = {
+ .name = "qhs_pcie5_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie6_cfg = {
+ .name = "qhs_pcie6_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie_rscc = {
+ .name = "qhs_pcie_rscc",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pdm = {
+ .name = "qhs_pdm",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_prng = {
+ .name = "qhs_prng",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qdss_cfg = {
+ .name = "qhs_qdss_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qspi = {
+ .name = "qhs_qspi",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qup0 = {
+ .name = "qhs_qup0",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qup1 = {
+ .name = "qhs_qup1",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qup2 = {
+ .name = "qhs_qup2",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_sdc2 = {
+ .name = "qhs_sdc2",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_sdc4 = {
+ .name = "qhs_sdc4",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_smmuv3_cfg = {
+ .name = "qhs_smmuv3_cfg",
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node qhs_tcsr = {
+ .name = "qhs_tcsr",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_tlmm = {
+ .name = "qhs_tlmm",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ufs_mem_cfg = {
+ .name = "qhs_ufs_mem_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_usb2_0_cfg = {
+ .name = "qhs_usb2_0_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_usb3_0_cfg = {
+ .name = "qhs_usb3_0_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_usb3_1_cfg = {
+ .name = "qhs_usb3_1_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_usb3_2_cfg = {
+ .name = "qhs_usb3_2_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_usb3_mp_cfg = {
+ .name = "qhs_usb3_mp_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_usb4_0_cfg = {
+ .name = "qhs_usb4_0_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_usb4_1_cfg = {
+ .name = "qhs_usb4_1_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_usb4_2_cfg = {
+ .name = "qhs_usb4_2_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_venus_cfg = {
+ .name = "qhs_venus_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qss_lpass_qtb_cfg = {
+ .name = "qss_lpass_qtb_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qss_nsp_qtb_cfg = {
+ .name = "qss_nsp_qtb_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node xs_qdss_stm = {
+ .name = "xs_qdss_stm",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node xs_sys_tcu_cfg = {
+ .name = "xs_sys_tcu_cfg",
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node qhs_aoss = {
+ .name = "qhs_aoss",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ipc_router = {
+ .name = "qhs_ipc_router",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_soccp = {
+ .name = "qhs_soccp",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_tme_cfg = {
+ .name = "qhs_tme_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_apss = {
+ .name = "qns_apss",
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node qxs_boot_imem = {
+ .name = "qxs_boot_imem",
+ .channels = 1,
+ .buswidth = 16,
+};
+
+static struct qcom_icc_node qxs_imem = {
+ .name = "qxs_imem",
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node ebi = {
+ .name = "ebi",
+ .channels = 12,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node srvc_mnoc = {
+ .name = "srvc_mnoc",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node srvc_nsinoc = {
+ .name = "srvc_nsinoc",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node srvc_pcie_east_aggre_noc = {
+ .name = "srvc_pcie_east_aggre_noc",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_hscnoc_pcie_east_ms_mpu_cfg = {
+ .name = "qhs_hscnoc_pcie_east_ms_mpu_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node srvc_pcie_east = {
+ .name = "srvc_pcie_east",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node xs_pcie_0 = {
+ .name = "xs_pcie_0",
+ .channels = 1,
+ .buswidth = 16,
+};
+
+static struct qcom_icc_node xs_pcie_1 = {
+ .name = "xs_pcie_1",
+ .channels = 1,
+ .buswidth = 32,
+};
+
+static struct qcom_icc_node xs_pcie_5 = {
+ .name = "xs_pcie_5",
+ .channels = 1,
+ .buswidth = 32,
+};
+
+static struct qcom_icc_node srvc_pcie_west_aggre_noc = {
+ .name = "srvc_pcie_west_aggre_noc",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_hscnoc_pcie_west_ms_mpu_cfg = {
+ .name = "qhs_hscnoc_pcie_west_ms_mpu_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node srvc_pcie_west = {
+ .name = "srvc_pcie_west",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node xs_pcie_2 = {
+ .name = "xs_pcie_2",
+ .channels = 1,
+ .buswidth = 16,
+};
+
+static struct qcom_icc_node xs_pcie_3a = {
+ .name = "xs_pcie_3a",
+ .channels = 1,
+ .buswidth = 64,
+};
+
+static struct qcom_icc_node xs_pcie_3b = {
+ .name = "xs_pcie_3b",
+ .channels = 1,
+ .buswidth = 32,
+};
+
+static struct qcom_icc_node xs_pcie_4 = {
+ .name = "xs_pcie_4",
+ .channels = 1,
+ .buswidth = 16,
+};
+
+static struct qcom_icc_node xs_pcie_6 = {
+ .name = "xs_pcie_6",
+ .channels = 1,
+ .buswidth = 16,
+};
+
+static struct qcom_icc_node qup0_core_master = {
+ .name = "qup0_core_master",
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qup0_core_slave },
+};
+
+static struct qcom_icc_node qup1_core_master = {
+ .name = "qup1_core_master",
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qup1_core_slave },
+};
+
+static struct qcom_icc_node qup2_core_master = {
+ .name = "qup2_core_master",
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qup2_core_slave },
+};
+
+static struct qcom_icc_node llcc_mc = {
+ .name = "llcc_mc",
+ .channels = 12,
+ .buswidth = 4,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &ebi },
+};
+
+static struct qcom_icc_node qsm_mnoc_cfg = {
+ .name = "qsm_mnoc_cfg",
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &srvc_mnoc },
+};
+
+static struct qcom_icc_node qsm_pcie_east_anoc_cfg = {
+ .name = "qsm_pcie_east_anoc_cfg",
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &srvc_pcie_east_aggre_noc },
+};
+
+static struct qcom_icc_node qnm_hscnoc_pcie_east = {
+ .name = "qnm_hscnoc_pcie_east",
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 3,
+ .link_nodes = (struct qcom_icc_node *[]) { &xs_pcie_0, &xs_pcie_1,
+ &xs_pcie_5 },
+};
+
+static struct qcom_icc_node qsm_cnoc_pcie_east_slave_cfg = {
+ .name = "qsm_cnoc_pcie_east_slave_cfg",
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 2,
+ .link_nodes = (struct qcom_icc_node *[]) { &qhs_hscnoc_pcie_east_ms_mpu_cfg,
+ &srvc_pcie_east },
+};
+
+static struct qcom_icc_node qsm_pcie_west_anoc_cfg = {
+ .name = "qsm_pcie_west_anoc_cfg",
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &srvc_pcie_west_aggre_noc },
+};
+
+static struct qcom_icc_node qnm_hscnoc_pcie_west = {
+ .name = "qnm_hscnoc_pcie_west",
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 5,
+ .link_nodes = (struct qcom_icc_node *[]) { &xs_pcie_2, &xs_pcie_3a,
+ &xs_pcie_3b, &xs_pcie_4,
+ &xs_pcie_6 },
+};
+
+static struct qcom_icc_node qsm_cnoc_pcie_west_slave_cfg = {
+ .name = "qsm_cnoc_pcie_west_slave_cfg",
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 2,
+ .link_nodes = (struct qcom_icc_node *[]) { &qhs_hscnoc_pcie_west_ms_mpu_cfg,
+ &srvc_pcie_west },
+};
+
+static struct qcom_icc_node qss_cnoc_pcie_slave_east_cfg = {
+ .name = "qss_cnoc_pcie_slave_east_cfg",
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qsm_cnoc_pcie_east_slave_cfg },
+};
+
+static struct qcom_icc_node qss_cnoc_pcie_slave_west_cfg = {
+ .name = "qss_cnoc_pcie_slave_west_cfg",
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qsm_cnoc_pcie_west_slave_cfg },
+};
+
+static struct qcom_icc_node qss_mnoc_cfg = {
+ .name = "qss_mnoc_cfg",
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qsm_mnoc_cfg },
+};
+
+static struct qcom_icc_node qss_pcie_east_anoc_cfg = {
+ .name = "qss_pcie_east_anoc_cfg",
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qsm_pcie_east_anoc_cfg },
+};
+
+static struct qcom_icc_node qss_pcie_west_anoc_cfg = {
+ .name = "qss_pcie_west_anoc_cfg",
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qsm_pcie_west_anoc_cfg },
+};
+
+static struct qcom_icc_node qns_llcc = {
+ .name = "qns_llcc",
+ .channels = 12,
+ .buswidth = 16,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &llcc_mc },
+};
+
+static struct qcom_icc_node qns_pcie_east = {
+ .name = "qns_pcie_east",
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qnm_hscnoc_pcie_east },
+};
+
+static struct qcom_icc_node qns_pcie_west = {
+ .name = "qns_pcie_west",
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qnm_hscnoc_pcie_west },
+};
+
+static struct qcom_icc_node qsm_cfg = {
+ .name = "qsm_cfg",
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 51,
+ .link_nodes = (struct qcom_icc_node *[]) { &qhs_ahb2phy0, &qhs_ahb2phy1,
+ &qhs_ahb2phy2, &qhs_ahb2phy3,
+ &qhs_av1_enc_cfg, &qhs_camera_cfg,
+ &qhs_clk_ctl, &qhs_crypto0_cfg,
+ &qhs_display_cfg, &qhs_gpuss_cfg,
+ &qhs_imem_cfg, &qhs_pcie0_cfg,
+ &qhs_pcie1_cfg, &qhs_pcie2_cfg,
+ &qhs_pcie3a_cfg, &qhs_pcie3b_cfg,
+ &qhs_pcie4_cfg, &qhs_pcie5_cfg,
+ &qhs_pcie6_cfg, &qhs_pcie_rscc,
+ &qhs_pdm, &qhs_prng,
+ &qhs_qdss_cfg, &qhs_qspi,
+ &qhs_qup0, &qhs_qup1,
+ &qhs_qup2, &qhs_sdc2,
+ &qhs_sdc4, &qhs_smmuv3_cfg,
+ &qhs_tcsr, &qhs_tlmm,
+ &qhs_ufs_mem_cfg, &qhs_usb2_0_cfg,
+ &qhs_usb3_0_cfg, &qhs_usb3_1_cfg,
+ &qhs_usb3_2_cfg, &qhs_usb3_mp_cfg,
+ &qhs_usb4_0_cfg, &qhs_usb4_1_cfg,
+ &qhs_usb4_2_cfg, &qhs_venus_cfg,
+ &qss_cnoc_pcie_slave_east_cfg, &qss_cnoc_pcie_slave_west_cfg,
+ &qss_lpass_qtb_cfg, &qss_mnoc_cfg,
+ &qss_nsp_qtb_cfg, &qss_pcie_east_anoc_cfg,
+ &qss_pcie_west_anoc_cfg, &xs_qdss_stm,
+ &xs_sys_tcu_cfg },
+};
+
+static struct qcom_icc_node xm_gic = {
+ .name = "xm_gic",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x33000 },
+ .prio = 0,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 0,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_llcc },
+};
+
+static struct qcom_icc_node qss_cfg = {
+ .name = "qss_cfg",
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qsm_cfg },
+};
+
+static struct qcom_icc_node qnm_hscnoc_cnoc = {
+ .name = "qnm_hscnoc_cnoc",
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 8,
+ .link_nodes = (struct qcom_icc_node *[]) { &qhs_aoss, &qhs_ipc_router,
+ &qhs_soccp, &qhs_tme_cfg,
+ &qns_apss, &qss_cfg,
+ &qxs_boot_imem, &qxs_imem },
+};
+
+static struct qcom_icc_node qns_hscnoc_cnoc = {
+ .name = "qns_hscnoc_cnoc",
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qnm_hscnoc_cnoc },
+};
+
+static struct qcom_icc_node alm_gpu_tcu = {
+ .name = "alm_gpu_tcu",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x933000 },
+ .prio = 1,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 2,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_hscnoc_cnoc, &qns_llcc },
+};
+
+static struct qcom_icc_node alm_pcie_qtc = {
+ .name = "alm_pcie_qtc",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x51f000 },
+ .prio = 3,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 2,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_hscnoc_cnoc, &qns_llcc },
+};
+
+static struct qcom_icc_node alm_sys_tcu = {
+ .name = "alm_sys_tcu",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x51f080 },
+ .prio = 6,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 2,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_hscnoc_cnoc, &qns_llcc },
+};
+
+static struct qcom_icc_node chm_apps = {
+ .name = "chm_apps",
+ .channels = 6,
+ .buswidth = 32,
+ .num_links = 4,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_hscnoc_cnoc, &qns_llcc,
+ &qns_pcie_east, &qns_pcie_west },
+};
+
+static struct qcom_icc_node qnm_aggre_noc_east = {
+ .name = "qnm_aggre_noc_east",
+ .channels = 1,
+ .buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x934000 },
+ .prio = 2,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 4,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_hscnoc_cnoc, &qns_llcc,
+ &qns_pcie_east, &qns_pcie_west },
+};
+
+static struct qcom_icc_node qnm_gpu = {
+ .name = "qnm_gpu",
+ .channels = 4,
+ .buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 4,
+ .port_offsets = { 0x935000, 0x936000, 0x937000, 0x938000 },
+ .prio = 0,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 4,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_hscnoc_cnoc, &qns_llcc,
+ &qns_pcie_east, &qns_pcie_west },
+};
+
+static struct qcom_icc_node qnm_lpass = {
+ .name = "qnm_lpass",
+ .channels = 1,
+ .buswidth = 16,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x939000 },
+ .prio = 0,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 0,
+ },
+ .num_links = 4,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_hscnoc_cnoc, &qns_llcc,
+ &qns_pcie_east, &qns_pcie_west },
+};
+
+static struct qcom_icc_node qnm_mnoc_hf = {
+ .name = "qnm_mnoc_hf",
+ .channels = 2,
+ .buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 2,
+ .port_offsets = { 0x721000, 0x721080 },
+ .prio = 0,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 0,
+ },
+ .num_links = 4,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_hscnoc_cnoc, &qns_llcc,
+ &qns_pcie_east, &qns_pcie_west },
+};
+
+static struct qcom_icc_node qnm_mnoc_sf = {
+ .name = "qnm_mnoc_sf",
+ .channels = 2,
+ .buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 2,
+ .port_offsets = { 0x721100, 0x721180 },
+ .prio = 0,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 0,
+ },
+ .num_links = 4,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_hscnoc_cnoc, &qns_llcc,
+ &qns_pcie_east, &qns_pcie_west },
+};
+
+static struct qcom_icc_node qnm_nsp_noc = {
+ .name = "qnm_nsp_noc",
+ .channels = 4,
+ .buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 4,
+ .port_offsets = { 0x816000, 0x816080, 0x816100, 0x816180 },
+ .prio = 0,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 4,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_hscnoc_cnoc, &qns_llcc,
+ &qns_pcie_east, &qns_pcie_west },
+};
+
+static struct qcom_icc_node qnm_pcie_east = {
+ .name = "qnm_pcie_east",
+ .channels = 1,
+ .buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x93a000 },
+ .prio = 2,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 2,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_hscnoc_cnoc, &qns_llcc },
+};
+
+static struct qcom_icc_node qnm_pcie_west = {
+ .name = "qnm_pcie_west",
+ .channels = 1,
+ .buswidth = 64,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x721200 },
+ .prio = 2,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 2,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_hscnoc_cnoc, &qns_llcc },
+};
+
+static struct qcom_icc_node qnm_snoc_sf = {
+ .name = "qnm_snoc_sf",
+ .channels = 1,
+ .buswidth = 64,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x51f100 },
+ .prio = 2,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 4,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_hscnoc_cnoc, &qns_llcc,
+ &qns_pcie_east, &qns_pcie_west },
+};
+
+static struct qcom_icc_node qxm_wlan_q6 = {
+ .name = "qxm_wlan_q6",
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 4,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_hscnoc_cnoc, &qns_llcc,
+ &qns_pcie_east, &qns_pcie_west },
+};
+
+static struct qcom_icc_node qns_a4noc_hscnoc = {
+ .name = "qns_a4noc_hscnoc",
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qnm_aggre_noc_east },
+};
+
+static struct qcom_icc_node qns_lpass_ag_noc_gemnoc = {
+ .name = "qns_lpass_ag_noc_gemnoc",
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qnm_lpass },
+};
+
+static struct qcom_icc_node qns_mem_noc_hf = {
+ .name = "qns_mem_noc_hf",
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qnm_mnoc_hf },
+};
+
+static struct qcom_icc_node qns_mem_noc_sf = {
+ .name = "qns_mem_noc_sf",
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qnm_mnoc_sf },
+};
+
+static struct qcom_icc_node qns_nsp_hscnoc = {
+ .name = "qns_nsp_hscnoc",
+ .channels = 4,
+ .buswidth = 32,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qnm_nsp_noc },
+};
+
+static struct qcom_icc_node qns_pcie_east_mem_noc = {
+ .name = "qns_pcie_east_mem_noc",
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qnm_pcie_east },
+};
+
+static struct qcom_icc_node qns_pcie_west_mem_noc = {
+ .name = "qns_pcie_west_mem_noc",
+ .channels = 1,
+ .buswidth = 64,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qnm_pcie_west },
+};
+
+static struct qcom_icc_node qns_gemnoc_sf = {
+ .name = "qns_gemnoc_sf",
+ .channels = 1,
+ .buswidth = 64,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qnm_snoc_sf },
+};
+
+static struct qcom_icc_node xm_usb3_0 = {
+ .name = "xm_usb3_0",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xa000 },
+ .prio = 0,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_a4noc_hscnoc },
+};
+
+static struct qcom_icc_node xm_usb3_1 = {
+ .name = "xm_usb3_1",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xb000 },
+ .prio = 0,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_a4noc_hscnoc },
+};
+
+static struct qcom_icc_node xm_usb4_0 = {
+ .name = "xm_usb4_0",
+ .channels = 1,
+ .buswidth = 16,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xc000 },
+ .prio = 0,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_a4noc_hscnoc },
+};
+
+static struct qcom_icc_node xm_usb4_1 = {
+ .name = "xm_usb4_1",
+ .channels = 1,
+ .buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xd000 },
+ .prio = 0,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_a4noc_hscnoc },
+};
+
+static struct qcom_icc_node qnm_lpiaon_noc = {
+ .name = "qnm_lpiaon_noc",
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_lpass_ag_noc_gemnoc },
+};
+
+static struct qcom_icc_node qnm_av1_enc = {
+ .name = "qnm_av1_enc",
+ .channels = 1,
+ .buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x30000 },
+ .prio = 4,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_mem_noc_sf },
+};
+
+static struct qcom_icc_node qnm_camnoc_hf = {
+ .name = "qnm_camnoc_hf",
+ .channels = 2,
+ .buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 2,
+ .port_offsets = { 0x29000, 0x2a000 },
+ .prio = 0,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 0,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_mem_noc_hf },
+};
+
+static struct qcom_icc_node qnm_camnoc_icp = {
+ .name = "qnm_camnoc_icp",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x2b000 },
+ .prio = 4,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_mem_noc_sf },
+};
+
+static struct qcom_icc_node qnm_camnoc_sf = {
+ .name = "qnm_camnoc_sf",
+ .channels = 2,
+ .buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 2,
+ .port_offsets = { 0x2c000, 0x2d000 },
+ .prio = 0,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 0,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_mem_noc_sf },
+};
+
+static struct qcom_icc_node qnm_eva = {
+ .name = "qnm_eva",
+ .channels = 1,
+ .buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x34000 },
+ .prio = 0,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 0,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_mem_noc_sf },
+};
+
+static struct qcom_icc_node qnm_mdp = {
+ .name = "qnm_mdp",
+ .channels = 2,
+ .buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 2,
+ .port_offsets = { 0x2e000, 0x2f000 },
+ .prio = 0,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 0,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_mem_noc_hf },
+};
+
+static struct qcom_icc_node qnm_vapss_hcp = {
+ .name = "qnm_vapss_hcp",
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_mem_noc_sf },
+};
+
+static struct qcom_icc_node qnm_video = {
+ .name = "qnm_video",
+ .channels = 4,
+ .buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 4,
+ .port_offsets = { 0x31000, 0x32000, 0x37000, 0x38000 },
+ .prio = 0,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 0,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_mem_noc_sf },
+};
+
+static struct qcom_icc_node qnm_video_cv_cpu = {
+ .name = "qnm_video_cv_cpu",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x33000 },
+ .prio = 4,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_mem_noc_sf },
+};
+
+static struct qcom_icc_node qnm_video_v_cpu = {
+ .name = "qnm_video_v_cpu",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x35000 },
+ .prio = 4,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_mem_noc_sf },
+};
+
+static struct qcom_icc_node qnm_nsp = {
+ .name = "qnm_nsp",
+ .channels = 4,
+ .buswidth = 32,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_nsp_hscnoc },
+};
+
+static struct qcom_icc_node xm_pcie_0 = {
+ .name = "xm_pcie_0",
+ .channels = 1,
+ .buswidth = 16,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xb000 },
+ .prio = 2,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 0,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_pcie_east_mem_noc },
+};
+
+static struct qcom_icc_node xm_pcie_1 = {
+ .name = "xm_pcie_1",
+ .channels = 1,
+ .buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xc000 },
+ .prio = 2,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 0,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_pcie_east_mem_noc },
+};
+
+static struct qcom_icc_node xm_pcie_5 = {
+ .name = "xm_pcie_5",
+ .channels = 1,
+ .buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xd000 },
+ .prio = 2,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 0,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_pcie_east_mem_noc },
+};
+
+static struct qcom_icc_node xm_pcie_2 = {
+ .name = "xm_pcie_2",
+ .channels = 1,
+ .buswidth = 16,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xd000 },
+ .prio = 2,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 0,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_pcie_west_mem_noc },
+};
+
+static struct qcom_icc_node xm_pcie_3a = {
+ .name = "xm_pcie_3a",
+ .channels = 1,
+ .buswidth = 64,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xd200 },
+ .prio = 2,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 0,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_pcie_west_mem_noc },
+};
+
+static struct qcom_icc_node xm_pcie_3b = {
+ .name = "xm_pcie_3b",
+ .channels = 1,
+ .buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xd400 },
+ .prio = 2,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 0,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_pcie_west_mem_noc },
+};
+
+static struct qcom_icc_node xm_pcie_4 = {
+ .name = "xm_pcie_4",
+ .channels = 1,
+ .buswidth = 16,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xd600 },
+ .prio = 2,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 0,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_pcie_west_mem_noc },
+};
+
+static struct qcom_icc_node xm_pcie_6 = {
+ .name = "xm_pcie_6",
+ .channels = 1,
+ .buswidth = 16,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xd800 },
+ .prio = 2,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 0,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_pcie_west_mem_noc },
+};
+
+static struct qcom_icc_node qnm_aggre1_noc = {
+ .name = "qnm_aggre1_noc",
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_gemnoc_sf },
+};
+
+static struct qcom_icc_node qnm_aggre2_noc = {
+ .name = "qnm_aggre2_noc",
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_gemnoc_sf },
+};
+
+static struct qcom_icc_node qnm_aggre3_noc = {
+ .name = "qnm_aggre3_noc",
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_gemnoc_sf },
+};
+
+static struct qcom_icc_node qnm_nsi_noc = {
+ .name = "qnm_nsi_noc",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x1c000 },
+ .prio = 0,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_gemnoc_sf },
+};
+
+static struct qcom_icc_node qnm_oobmss = {
+ .name = "qnm_oobmss",
+ .channels = 1,
+ .buswidth = 16,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x1b000 },
+ .prio = 0,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_gemnoc_sf },
+};
+
+static struct qcom_icc_node qns_a1noc_snoc = {
+ .name = "qns_a1noc_snoc",
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qnm_aggre1_noc },
+};
+
+static struct qcom_icc_node qns_a2noc_snoc = {
+ .name = "qns_a2noc_snoc",
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qnm_aggre2_noc },
+};
+
+static struct qcom_icc_node qns_a3noc_snoc = {
+ .name = "qns_a3noc_snoc",
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qnm_aggre3_noc },
+};
+
+static struct qcom_icc_node qns_lpass_aggnoc = {
+ .name = "qns_lpass_aggnoc",
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qnm_lpiaon_noc },
+};
+
+static struct qcom_icc_node qns_system_noc = {
+ .name = "qns_system_noc",
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qnm_nsi_noc },
+};
+
+static struct qcom_icc_node qns_oobmss_snoc = {
+ .name = "qns_oobmss_snoc",
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qnm_oobmss },
+};
+
+static struct qcom_icc_node qxm_crypto = {
+ .name = "qxm_crypto",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xb000 },
+ .prio = 0,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_a1noc_snoc },
+};
+
+static struct qcom_icc_node qxm_soccp = {
+ .name = "qxm_soccp",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xe000 },
+ .prio = 0,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_a1noc_snoc },
+};
+
+static struct qcom_icc_node xm_qdss_etr_0 = {
+ .name = "xm_qdss_etr_0",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xc000 },
+ .prio = 0,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_a1noc_snoc },
+};
+
+static struct qcom_icc_node xm_qdss_etr_1 = {
+ .name = "xm_qdss_etr_1",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xd000 },
+ .prio = 0,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_a1noc_snoc },
+};
+
+static struct qcom_icc_node xm_ufs_mem = {
+ .name = "xm_ufs_mem",
+ .channels = 1,
+ .buswidth = 16,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xa000 },
+ .prio = 0,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_a2noc_snoc },
+};
+
+static struct qcom_icc_node xm_usb3_2 = {
+ .name = "xm_usb3_2",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x8000 },
+ .prio = 0,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_a2noc_snoc },
+};
+
+static struct qcom_icc_node xm_usb4_2 = {
+ .name = "xm_usb4_2",
+ .channels = 1,
+ .buswidth = 16,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x9000 },
+ .prio = 0,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_a2noc_snoc },
+};
+
+static struct qcom_icc_node qhm_qspi = {
+ .name = "qhm_qspi",
+ .channels = 1,
+ .buswidth = 4,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x10000 },
+ .prio = 0,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_a3noc_snoc },
+};
+
+static struct qcom_icc_node qhm_qup0 = {
+ .name = "qhm_qup0",
+ .channels = 1,
+ .buswidth = 4,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x11000 },
+ .prio = 0,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_a3noc_snoc },
+};
+
+static struct qcom_icc_node qhm_qup1 = {
+ .name = "qhm_qup1",
+ .channels = 1,
+ .buswidth = 4,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x12000 },
+ .prio = 0,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_a3noc_snoc },
+};
+
+static struct qcom_icc_node qhm_qup2 = {
+ .name = "qhm_qup2",
+ .channels = 1,
+ .buswidth = 4,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x13000 },
+ .prio = 0,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_a3noc_snoc },
+};
+
+static struct qcom_icc_node qxm_sp = {
+ .name = "qxm_sp",
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_a3noc_snoc },
+};
+
+static struct qcom_icc_node xm_sdc2 = {
+ .name = "xm_sdc2",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x18000 },
+ .prio = 0,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_a3noc_snoc },
+};
+
+static struct qcom_icc_node xm_sdc4 = {
+ .name = "xm_sdc4",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x14000 },
+ .prio = 0,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_a3noc_snoc },
+};
+
+static struct qcom_icc_node xm_usb2_0 = {
+ .name = "xm_usb2_0",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x15000 },
+ .prio = 0,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_a3noc_snoc },
+};
+
+static struct qcom_icc_node xm_usb3_mp = {
+ .name = "xm_usb3_mp",
+ .channels = 1,
+ .buswidth = 16,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x16000 },
+ .prio = 0,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_a3noc_snoc },
+};
+
+static struct qcom_icc_node qnm_lpass_lpinoc = {
+ .name = "qnm_lpass_lpinoc",
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_lpass_aggnoc },
+};
+
+static struct qcom_icc_node xm_cpucp = {
+ .name = "xm_cpucp",
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_system_noc, &srvc_nsinoc },
+};
+
+static struct qcom_icc_node xm_mem_sp = {
+ .name = "xm_mem_sp",
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_oobmss_snoc },
+};
+
+static struct qcom_icc_node qns_lpi_aon_noc = {
+ .name = "qns_lpi_aon_noc",
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qnm_lpass_lpinoc },
+};
+
+static struct qcom_icc_node qnm_lpinoc_dsp_qns4m = {
+ .name = "qnm_lpinoc_dsp_qns4m",
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_lpi_aon_noc },
+};
+
+static struct qcom_icc_bcm bcm_acv = {
+ .name = "ACV",
+ .enable_mask = BIT(3),
+ .num_nodes = 1,
+ .nodes = { &ebi },
+};
+
+static struct qcom_icc_bcm bcm_ce0 = {
+ .name = "CE0",
+ .num_nodes = 1,
+ .nodes = { &qxm_crypto },
+};
+
+static struct qcom_icc_bcm bcm_cn0 = {
+ .name = "CN0",
+ .keepalive = true,
+ .enable_mask = BIT(0),
+ .num_nodes = 60,
+ .nodes = { &qsm_cfg, &qhs_ahb2phy0,
+ &qhs_ahb2phy1, &qhs_ahb2phy2,
+ &qhs_ahb2phy3, &qhs_av1_enc_cfg,
+ &qhs_camera_cfg, &qhs_clk_ctl,
+ &qhs_crypto0_cfg, &qhs_gpuss_cfg,
+ &qhs_imem_cfg, &qhs_pcie0_cfg,
+ &qhs_pcie1_cfg, &qhs_pcie2_cfg,
+ &qhs_pcie3a_cfg, &qhs_pcie3b_cfg,
+ &qhs_pcie4_cfg, &qhs_pcie5_cfg,
+ &qhs_pcie6_cfg, &qhs_pcie_rscc,
+ &qhs_pdm, &qhs_prng,
+ &qhs_qdss_cfg, &qhs_qspi,
+ &qhs_qup0, &qhs_qup1,
+ &qhs_qup2, &qhs_sdc2,
+ &qhs_sdc4, &qhs_smmuv3_cfg,
+ &qhs_tcsr, &qhs_tlmm,
+ &qhs_ufs_mem_cfg, &qhs_usb2_0_cfg,
+ &qhs_usb3_0_cfg, &qhs_usb3_1_cfg,
+ &qhs_usb3_2_cfg, &qhs_usb3_mp_cfg,
+ &qhs_usb4_0_cfg, &qhs_usb4_1_cfg,
+ &qhs_usb4_2_cfg, &qhs_venus_cfg,
+ &qss_cnoc_pcie_slave_east_cfg, &qss_cnoc_pcie_slave_west_cfg,
+ &qss_lpass_qtb_cfg, &qss_mnoc_cfg,
+ &qss_nsp_qtb_cfg, &qss_pcie_east_anoc_cfg,
+ &qss_pcie_west_anoc_cfg, &xs_qdss_stm,
+ &xs_sys_tcu_cfg, &qnm_hscnoc_cnoc,
+ &qhs_aoss, &qhs_ipc_router,
+ &qhs_soccp, &qhs_tme_cfg,
+ &qns_apss, &qss_cfg,
+ &qxs_boot_imem, &qxs_imem },
+};
+
+static struct qcom_icc_bcm bcm_cn1 = {
+ .name = "CN1",
+ .num_nodes = 1,
+ .nodes = { &qhs_display_cfg },
+};
+
+static struct qcom_icc_bcm bcm_co0 = {
+ .name = "CO0",
+ .enable_mask = BIT(0),
+ .num_nodes = 2,
+ .nodes = { &qnm_nsp, &qns_nsp_hscnoc },
+};
+
+static struct qcom_icc_bcm bcm_lp0 = {
+ .name = "LP0",
+ .num_nodes = 2,
+ .nodes = { &qnm_lpass_lpinoc, &qns_lpass_aggnoc },
+};
+
+static struct qcom_icc_bcm bcm_mc0 = {
+ .name = "MC0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &ebi },
+};
+
+static struct qcom_icc_bcm bcm_mm0 = {
+ .name = "MM0",
+ .num_nodes = 1,
+ .nodes = { &qns_mem_noc_hf },
+};
+
+static struct qcom_icc_bcm bcm_mm1 = {
+ .name = "MM1",
+ .enable_mask = BIT(0),
+ .num_nodes = 11,
+ .nodes = { &qnm_av1_enc, &qnm_camnoc_hf,
+ &qnm_camnoc_icp, &qnm_camnoc_sf,
+ &qnm_eva, &qnm_mdp,
+ &qnm_vapss_hcp, &qnm_video,
+ &qnm_video_cv_cpu, &qnm_video_v_cpu,
+ &qns_mem_noc_sf },
+};
+
+static struct qcom_icc_bcm bcm_qup0 = {
+ .name = "QUP0",
+ .vote_scale = 1,
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qup0_core_slave },
+};
+
+static struct qcom_icc_bcm bcm_qup1 = {
+ .name = "QUP1",
+ .vote_scale = 1,
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qup1_core_slave },
+};
+
+static struct qcom_icc_bcm bcm_qup2 = {
+ .name = "QUP2",
+ .vote_scale = 1,
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qup2_core_slave },
+};
+
+static struct qcom_icc_bcm bcm_sh0 = {
+ .name = "SH0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_llcc },
+};
+
+static struct qcom_icc_bcm bcm_sh1 = {
+ .name = "SH1",
+ .enable_mask = BIT(0),
+ .num_nodes = 18,
+ .nodes = { &alm_gpu_tcu, &alm_pcie_qtc,
+ &alm_sys_tcu, &chm_apps,
+ &qnm_aggre_noc_east, &qnm_gpu,
+ &qnm_lpass, &qnm_mnoc_hf,
+ &qnm_mnoc_sf, &qnm_nsp_noc,
+ &qnm_pcie_east, &qnm_pcie_west,
+ &qnm_snoc_sf, &qxm_wlan_q6,
+ &xm_gic, &qns_hscnoc_cnoc,
+ &qns_pcie_east, &qns_pcie_west },
+};
+
+static struct qcom_icc_bcm bcm_sn0 = {
+ .name = "SN0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_gemnoc_sf },
+};
+
+static struct qcom_icc_bcm bcm_sn1 = {
+ .name = "SN1",
+ .enable_mask = BIT(0),
+ .num_nodes = 1,
+ .nodes = { &qnm_oobmss },
+};
+
+static struct qcom_icc_bcm bcm_sn2 = {
+ .name = "SN2",
+ .num_nodes = 1,
+ .nodes = { &qnm_aggre1_noc },
+};
+
+static struct qcom_icc_bcm bcm_sn3 = {
+ .name = "SN3",
+ .num_nodes = 1,
+ .nodes = { &qnm_aggre2_noc },
+};
+
+static struct qcom_icc_bcm bcm_sn4 = {
+ .name = "SN4",
+ .num_nodes = 1,
+ .nodes = { &qnm_aggre3_noc },
+};
+
+static struct qcom_icc_bcm bcm_sn5 = {
+ .name = "SN5",
+ .num_nodes = 1,
+ .nodes = { &qns_a4noc_hscnoc },
+};
+
+static struct qcom_icc_bcm bcm_sn6 = {
+ .name = "SN6",
+ .num_nodes = 4,
+ .nodes = { &qns_pcie_east_mem_noc, &qnm_hscnoc_pcie_east,
+ &qns_pcie_west_mem_noc, &qnm_hscnoc_pcie_west },
+};
+
+static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
+ &bcm_ce0,
+};
+
+static struct qcom_icc_node * const aggre1_noc_nodes[] = {
+ [MASTER_CRYPTO] = &qxm_crypto,
+ [MASTER_SOCCP_PROC] = &qxm_soccp,
+ [MASTER_QDSS_ETR] = &xm_qdss_etr_0,
+ [MASTER_QDSS_ETR_1] = &xm_qdss_etr_1,
+ [SLAVE_A1NOC_SNOC] = &qns_a1noc_snoc,
+};
+
+static const struct regmap_config glymur_aggre1_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x14400,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc glymur_aggre1_noc = {
+ .config = &glymur_aggre1_noc_regmap_config,
+ .nodes = aggre1_noc_nodes,
+ .num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
+ .bcms = aggre1_noc_bcms,
+ .num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
+ .alloc_dyn_id = true,
+};
+
+static struct qcom_icc_node * const aggre2_noc_nodes[] = {
+ [MASTER_UFS_MEM] = &xm_ufs_mem,
+ [MASTER_USB3_2] = &xm_usb3_2,
+ [MASTER_USB4_2] = &xm_usb4_2,
+ [SLAVE_A2NOC_SNOC] = &qns_a2noc_snoc,
+};
+
+static const struct regmap_config glymur_aggre2_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x14400,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc glymur_aggre2_noc = {
+ .config = &glymur_aggre2_noc_regmap_config,
+ .nodes = aggre2_noc_nodes,
+ .num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
+ .alloc_dyn_id = true,
+ .qos_requires_clocks = true,
+};
+
+static struct qcom_icc_node * const aggre3_noc_nodes[] = {
+ [MASTER_QSPI_0] = &qhm_qspi,
+ [MASTER_QUP_0] = &qhm_qup0,
+ [MASTER_QUP_1] = &qhm_qup1,
+ [MASTER_QUP_2] = &qhm_qup2,
+ [MASTER_SP] = &qxm_sp,
+ [MASTER_SDCC_2] = &xm_sdc2,
+ [MASTER_SDCC_4] = &xm_sdc4,
+ [MASTER_USB2] = &xm_usb2_0,
+ [MASTER_USB3_MP] = &xm_usb3_mp,
+ [SLAVE_A3NOC_SNOC] = &qns_a3noc_snoc,
+};
+
+static const struct regmap_config glymur_aggre3_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x1d400,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc glymur_aggre3_noc = {
+ .config = &glymur_aggre3_noc_regmap_config,
+ .nodes = aggre3_noc_nodes,
+ .num_nodes = ARRAY_SIZE(aggre3_noc_nodes),
+ .alloc_dyn_id = true,
+};
+
+static struct qcom_icc_bcm * const aggre4_noc_bcms[] = {
+ &bcm_sn5,
+};
+
+static struct qcom_icc_node * const aggre4_noc_nodes[] = {
+ [MASTER_USB3_0] = &xm_usb3_0,
+ [MASTER_USB3_1] = &xm_usb3_1,
+ [MASTER_USB4_0] = &xm_usb4_0,
+ [MASTER_USB4_1] = &xm_usb4_1,
+ [SLAVE_A4NOC_HSCNOC] = &qns_a4noc_hscnoc,
+};
+
+static const struct regmap_config glymur_aggre4_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x14400,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc glymur_aggre4_noc = {
+ .config = &glymur_aggre4_noc_regmap_config,
+ .nodes = aggre4_noc_nodes,
+ .num_nodes = ARRAY_SIZE(aggre4_noc_nodes),
+ .bcms = aggre4_noc_bcms,
+ .num_bcms = ARRAY_SIZE(aggre4_noc_bcms),
+ .alloc_dyn_id = true,
+ .qos_requires_clocks = true,
+};
+
+static struct qcom_icc_bcm * const clk_virt_bcms[] = {
+ &bcm_qup0,
+ &bcm_qup1,
+ &bcm_qup2,
+};
+
+static struct qcom_icc_node * const clk_virt_nodes[] = {
+ [MASTER_QUP_CORE_0] = &qup0_core_master,
+ [MASTER_QUP_CORE_1] = &qup1_core_master,
+ [MASTER_QUP_CORE_2] = &qup2_core_master,
+ [SLAVE_QUP_CORE_0] = &qup0_core_slave,
+ [SLAVE_QUP_CORE_1] = &qup1_core_slave,
+ [SLAVE_QUP_CORE_2] = &qup2_core_slave,
+};
+
+static const struct qcom_icc_desc glymur_clk_virt = {
+ .nodes = clk_virt_nodes,
+ .num_nodes = ARRAY_SIZE(clk_virt_nodes),
+ .bcms = clk_virt_bcms,
+ .num_bcms = ARRAY_SIZE(clk_virt_bcms),
+ .alloc_dyn_id = true,
+};
+
+static struct qcom_icc_bcm * const cnoc_cfg_bcms[] = {
+ &bcm_cn0,
+ &bcm_cn1,
+};
+
+static struct qcom_icc_node * const cnoc_cfg_nodes[] = {
+ [MASTER_CNOC_CFG] = &qsm_cfg,
+ [SLAVE_AHB2PHY_SOUTH] = &qhs_ahb2phy0,
+ [SLAVE_AHB2PHY_NORTH] = &qhs_ahb2phy1,
+ [SLAVE_AHB2PHY_2] = &qhs_ahb2phy2,
+ [SLAVE_AHB2PHY_3] = &qhs_ahb2phy3,
+ [SLAVE_AV1_ENC_CFG] = &qhs_av1_enc_cfg,
+ [SLAVE_CAMERA_CFG] = &qhs_camera_cfg,
+ [SLAVE_CLK_CTL] = &qhs_clk_ctl,
+ [SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg,
+ [SLAVE_DISPLAY_CFG] = &qhs_display_cfg,
+ [SLAVE_GFX3D_CFG] = &qhs_gpuss_cfg,
+ [SLAVE_IMEM_CFG] = &qhs_imem_cfg,
+ [SLAVE_PCIE_0_CFG] = &qhs_pcie0_cfg,
+ [SLAVE_PCIE_1_CFG] = &qhs_pcie1_cfg,
+ [SLAVE_PCIE_2_CFG] = &qhs_pcie2_cfg,
+ [SLAVE_PCIE_3A_CFG] = &qhs_pcie3a_cfg,
+ [SLAVE_PCIE_3B_CFG] = &qhs_pcie3b_cfg,
+ [SLAVE_PCIE_4_CFG] = &qhs_pcie4_cfg,
+ [SLAVE_PCIE_5_CFG] = &qhs_pcie5_cfg,
+ [SLAVE_PCIE_6_CFG] = &qhs_pcie6_cfg,
+ [SLAVE_PCIE_RSCC] = &qhs_pcie_rscc,
+ [SLAVE_PDM] = &qhs_pdm,
+ [SLAVE_PRNG] = &qhs_prng,
+ [SLAVE_QDSS_CFG] = &qhs_qdss_cfg,
+ [SLAVE_QSPI_0] = &qhs_qspi,
+ [SLAVE_QUP_0] = &qhs_qup0,
+ [SLAVE_QUP_1] = &qhs_qup1,
+ [SLAVE_QUP_2] = &qhs_qup2,
+ [SLAVE_SDCC_2] = &qhs_sdc2,
+ [SLAVE_SDCC_4] = &qhs_sdc4,
+ [SLAVE_SMMUV3_CFG] = &qhs_smmuv3_cfg,
+ [SLAVE_TCSR] = &qhs_tcsr,
+ [SLAVE_TLMM] = &qhs_tlmm,
+ [SLAVE_UFS_MEM_CFG] = &qhs_ufs_mem_cfg,
+ [SLAVE_USB2] = &qhs_usb2_0_cfg,
+ [SLAVE_USB3_0] = &qhs_usb3_0_cfg,
+ [SLAVE_USB3_1] = &qhs_usb3_1_cfg,
+ [SLAVE_USB3_2] = &qhs_usb3_2_cfg,
+ [SLAVE_USB3_MP] = &qhs_usb3_mp_cfg,
+ [SLAVE_USB4_0] = &qhs_usb4_0_cfg,
+ [SLAVE_USB4_1] = &qhs_usb4_1_cfg,
+ [SLAVE_USB4_2] = &qhs_usb4_2_cfg,
+ [SLAVE_VENUS_CFG] = &qhs_venus_cfg,
+ [SLAVE_CNOC_PCIE_SLAVE_EAST_CFG] = &qss_cnoc_pcie_slave_east_cfg,
+ [SLAVE_CNOC_PCIE_SLAVE_WEST_CFG] = &qss_cnoc_pcie_slave_west_cfg,
+ [SLAVE_LPASS_QTB_CFG] = &qss_lpass_qtb_cfg,
+ [SLAVE_CNOC_MNOC_CFG] = &qss_mnoc_cfg,
+ [SLAVE_NSP_QTB_CFG] = &qss_nsp_qtb_cfg,
+ [SLAVE_PCIE_EAST_ANOC_CFG] = &qss_pcie_east_anoc_cfg,
+ [SLAVE_PCIE_WEST_ANOC_CFG] = &qss_pcie_west_anoc_cfg,
+ [SLAVE_QDSS_STM] = &xs_qdss_stm,
+ [SLAVE_TCU] = &xs_sys_tcu_cfg,
+};
+
+static const struct regmap_config glymur_cnoc_cfg_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x6600,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc glymur_cnoc_cfg = {
+ .config = &glymur_cnoc_cfg_regmap_config,
+ .nodes = cnoc_cfg_nodes,
+ .num_nodes = ARRAY_SIZE(cnoc_cfg_nodes),
+ .bcms = cnoc_cfg_bcms,
+ .num_bcms = ARRAY_SIZE(cnoc_cfg_bcms),
+ .alloc_dyn_id = true,
+};
+
+static struct qcom_icc_bcm * const cnoc_main_bcms[] = {
+ &bcm_cn0,
+};
+
+static struct qcom_icc_node * const cnoc_main_nodes[] = {
+ [MASTER_HSCNOC_CNOC] = &qnm_hscnoc_cnoc,
+ [SLAVE_AOSS] = &qhs_aoss,
+ [SLAVE_IPC_ROUTER_CFG] = &qhs_ipc_router,
+ [SLAVE_SOCCP] = &qhs_soccp,
+ [SLAVE_TME_CFG] = &qhs_tme_cfg,
+ [SLAVE_APPSS] = &qns_apss,
+ [SLAVE_CNOC_CFG] = &qss_cfg,
+ [SLAVE_BOOT_IMEM] = &qxs_boot_imem,
+ [SLAVE_IMEM] = &qxs_imem,
+};
+
+static const struct regmap_config glymur_cnoc_main_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x17080,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc glymur_cnoc_main = {
+ .config = &glymur_cnoc_main_regmap_config,
+ .nodes = cnoc_main_nodes,
+ .num_nodes = ARRAY_SIZE(cnoc_main_nodes),
+ .bcms = cnoc_main_bcms,
+ .num_bcms = ARRAY_SIZE(cnoc_main_bcms),
+ .alloc_dyn_id = true,
+};
+
+static struct qcom_icc_bcm * const hscnoc_bcms[] = {
+ &bcm_sh0,
+ &bcm_sh1,
+};
+
+static struct qcom_icc_node * const hscnoc_nodes[] = {
+ [MASTER_GPU_TCU] = &alm_gpu_tcu,
+ [MASTER_PCIE_TCU] = &alm_pcie_qtc,
+ [MASTER_SYS_TCU] = &alm_sys_tcu,
+ [MASTER_APPSS_PROC] = &chm_apps,
+ [MASTER_AGGRE_NOC_EAST] = &qnm_aggre_noc_east,
+ [MASTER_GFX3D] = &qnm_gpu,
+ [MASTER_LPASS_GEM_NOC] = &qnm_lpass,
+ [MASTER_MNOC_HF_MEM_NOC] = &qnm_mnoc_hf,
+ [MASTER_MNOC_SF_MEM_NOC] = &qnm_mnoc_sf,
+ [MASTER_COMPUTE_NOC] = &qnm_nsp_noc,
+ [MASTER_PCIE_EAST] = &qnm_pcie_east,
+ [MASTER_PCIE_WEST] = &qnm_pcie_west,
+ [MASTER_SNOC_SF_MEM_NOC] = &qnm_snoc_sf,
+ [MASTER_WLAN_Q6] = &qxm_wlan_q6,
+ [MASTER_GIC] = &xm_gic,
+ [SLAVE_HSCNOC_CNOC] = &qns_hscnoc_cnoc,
+ [SLAVE_LLCC] = &qns_llcc,
+ [SLAVE_PCIE_EAST] = &qns_pcie_east,
+ [SLAVE_PCIE_WEST] = &qns_pcie_west,
+};
+
+static const struct regmap_config glymur_hscnoc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x93a080,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc glymur_hscnoc = {
+ .config = &glymur_hscnoc_regmap_config,
+ .nodes = hscnoc_nodes,
+ .num_nodes = ARRAY_SIZE(hscnoc_nodes),
+ .bcms = hscnoc_bcms,
+ .num_bcms = ARRAY_SIZE(hscnoc_bcms),
+ .alloc_dyn_id = true,
+};
+
+static struct qcom_icc_node * const lpass_ag_noc_nodes[] = {
+ [MASTER_LPIAON_NOC] = &qnm_lpiaon_noc,
+ [SLAVE_LPASS_GEM_NOC] = &qns_lpass_ag_noc_gemnoc,
+};
+
+static const struct regmap_config glymur_lpass_ag_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xe080,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc glymur_lpass_ag_noc = {
+ .config = &glymur_lpass_ag_noc_regmap_config,
+ .nodes = lpass_ag_noc_nodes,
+ .num_nodes = ARRAY_SIZE(lpass_ag_noc_nodes),
+ .alloc_dyn_id = true,
+};
+
+static struct qcom_icc_bcm * const lpass_lpiaon_noc_bcms[] = {
+ &bcm_lp0,
+};
+
+static struct qcom_icc_node * const lpass_lpiaon_noc_nodes[] = {
+ [MASTER_LPASS_LPINOC] = &qnm_lpass_lpinoc,
+ [SLAVE_LPIAON_NOC_LPASS_AG_NOC] = &qns_lpass_aggnoc,
+};
+
+static const struct regmap_config glymur_lpass_lpiaon_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x19080,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc glymur_lpass_lpiaon_noc = {
+ .config = &glymur_lpass_lpiaon_noc_regmap_config,
+ .nodes = lpass_lpiaon_noc_nodes,
+ .num_nodes = ARRAY_SIZE(lpass_lpiaon_noc_nodes),
+ .bcms = lpass_lpiaon_noc_bcms,
+ .num_bcms = ARRAY_SIZE(lpass_lpiaon_noc_bcms),
+ .alloc_dyn_id = true,
+};
+
+static struct qcom_icc_node * const lpass_lpicx_noc_nodes[] = {
+ [MASTER_LPASS_PROC] = &qnm_lpinoc_dsp_qns4m,
+ [SLAVE_LPICX_NOC_LPIAON_NOC] = &qns_lpi_aon_noc,
+};
+
+static const struct regmap_config glymur_lpass_lpicx_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x44080,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc glymur_lpass_lpicx_noc = {
+ .config = &glymur_lpass_lpicx_noc_regmap_config,
+ .nodes = lpass_lpicx_noc_nodes,
+ .num_nodes = ARRAY_SIZE(lpass_lpicx_noc_nodes),
+ .alloc_dyn_id = true,
+};
+
+static struct qcom_icc_bcm * const mc_virt_bcms[] = {
+ &bcm_acv,
+ &bcm_mc0,
+};
+
+static struct qcom_icc_node * const mc_virt_nodes[] = {
+ [MASTER_LLCC] = &llcc_mc,
+ [SLAVE_EBI1] = &ebi,
+};
+
+static const struct qcom_icc_desc glymur_mc_virt = {
+ .nodes = mc_virt_nodes,
+ .num_nodes = ARRAY_SIZE(mc_virt_nodes),
+ .bcms = mc_virt_bcms,
+ .num_bcms = ARRAY_SIZE(mc_virt_bcms),
+ .alloc_dyn_id = true,
+};
+
+static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
+ &bcm_mm0,
+ &bcm_mm1,
+};
+
+static struct qcom_icc_node * const mmss_noc_nodes[] = {
+ [MASTER_AV1_ENC] = &qnm_av1_enc,
+ [MASTER_CAMNOC_HF] = &qnm_camnoc_hf,
+ [MASTER_CAMNOC_ICP] = &qnm_camnoc_icp,
+ [MASTER_CAMNOC_SF] = &qnm_camnoc_sf,
+ [MASTER_EVA] = &qnm_eva,
+ [MASTER_MDP] = &qnm_mdp,
+ [MASTER_CDSP_HCP] = &qnm_vapss_hcp,
+ [MASTER_VIDEO] = &qnm_video,
+ [MASTER_VIDEO_CV_PROC] = &qnm_video_cv_cpu,
+ [MASTER_VIDEO_V_PROC] = &qnm_video_v_cpu,
+ [MASTER_CNOC_MNOC_CFG] = &qsm_mnoc_cfg,
+ [SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf,
+ [SLAVE_MNOC_SF_MEM_NOC] = &qns_mem_noc_sf,
+ [SLAVE_SERVICE_MNOC] = &srvc_mnoc,
+};
+
+static const struct regmap_config glymur_mmss_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x5b800,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc glymur_mmss_noc = {
+ .config = &glymur_mmss_noc_regmap_config,
+ .nodes = mmss_noc_nodes,
+ .num_nodes = ARRAY_SIZE(mmss_noc_nodes),
+ .bcms = mmss_noc_bcms,
+ .num_bcms = ARRAY_SIZE(mmss_noc_bcms),
+ .alloc_dyn_id = true,
+};
+
+static struct qcom_icc_node * const nsinoc_nodes[] = {
+ [MASTER_CPUCP] = &xm_cpucp,
+ [SLAVE_NSINOC_SYSTEM_NOC] = &qns_system_noc,
+ [SLAVE_SERVICE_NSINOC] = &srvc_nsinoc,
+};
+
+static const struct regmap_config glymur_nsinoc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x14080,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc glymur_nsinoc = {
+ .config = &glymur_nsinoc_regmap_config,
+ .nodes = nsinoc_nodes,
+ .num_nodes = ARRAY_SIZE(nsinoc_nodes),
+ .alloc_dyn_id = true,
+};
+
+static struct qcom_icc_bcm * const nsp_noc_bcms[] = {
+ &bcm_co0,
+};
+
+static struct qcom_icc_node * const nsp_noc_nodes[] = {
+ [MASTER_CDSP_PROC] = &qnm_nsp,
+ [SLAVE_NSP0_HSC_NOC] = &qns_nsp_hscnoc,
+};
+
+static const struct regmap_config glymur_nsp_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x21280,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc glymur_nsp_noc = {
+ .config = &glymur_nsp_noc_regmap_config,
+ .nodes = nsp_noc_nodes,
+ .num_nodes = ARRAY_SIZE(nsp_noc_nodes),
+ .bcms = nsp_noc_bcms,
+ .num_bcms = ARRAY_SIZE(nsp_noc_bcms),
+ .alloc_dyn_id = true,
+};
+
+static struct qcom_icc_node * const oobm_ss_noc_nodes[] = {
+ [MASTER_OOBMSS_SP_PROC] = &xm_mem_sp,
+ [SLAVE_OOBMSS_SNOC] = &qns_oobmss_snoc,
+};
+
+static const struct regmap_config glymur_oobm_ss_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x1e080,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc glymur_oobm_ss_noc = {
+ .config = &glymur_oobm_ss_noc_regmap_config,
+ .nodes = oobm_ss_noc_nodes,
+ .num_nodes = ARRAY_SIZE(oobm_ss_noc_nodes),
+ .alloc_dyn_id = true,
+};
+
+static struct qcom_icc_bcm * const pcie_east_anoc_bcms[] = {
+ &bcm_sn6,
+};
+
+static struct qcom_icc_node * const pcie_east_anoc_nodes[] = {
+ [MASTER_PCIE_EAST_ANOC_CFG] = &qsm_pcie_east_anoc_cfg,
+ [MASTER_PCIE_0] = &xm_pcie_0,
+ [MASTER_PCIE_1] = &xm_pcie_1,
+ [MASTER_PCIE_5] = &xm_pcie_5,
+ [SLAVE_PCIE_EAST_MEM_NOC] = &qns_pcie_east_mem_noc,
+ [SLAVE_SERVICE_PCIE_EAST_AGGRE_NOC] = &srvc_pcie_east_aggre_noc,
+};
+
+static const struct regmap_config glymur_pcie_east_anoc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xf300,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc glymur_pcie_east_anoc = {
+ .config = &glymur_pcie_east_anoc_regmap_config,
+ .nodes = pcie_east_anoc_nodes,
+ .num_nodes = ARRAY_SIZE(pcie_east_anoc_nodes),
+ .bcms = pcie_east_anoc_bcms,
+ .num_bcms = ARRAY_SIZE(pcie_east_anoc_bcms),
+ .alloc_dyn_id = true,
+ .qos_requires_clocks = true,
+};
+
+static struct qcom_icc_bcm * const pcie_east_slv_noc_bcms[] = {
+ &bcm_sn6,
+};
+
+static struct qcom_icc_node * const pcie_east_slv_noc_nodes[] = {
+ [MASTER_HSCNOC_PCIE_EAST] = &qnm_hscnoc_pcie_east,
+ [MASTER_CNOC_PCIE_EAST_SLAVE_CFG] = &qsm_cnoc_pcie_east_slave_cfg,
+ [SLAVE_HSCNOC_PCIE_EAST_MS_MPU_CFG] = &qhs_hscnoc_pcie_east_ms_mpu_cfg,
+ [SLAVE_SERVICE_PCIE_EAST] = &srvc_pcie_east,
+ [SLAVE_PCIE_0] = &xs_pcie_0,
+ [SLAVE_PCIE_1] = &xs_pcie_1,
+ [SLAVE_PCIE_5] = &xs_pcie_5,
+};
+
+static const struct regmap_config glymur_pcie_east_slv_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xe080,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc glymur_pcie_east_slv_noc = {
+ .config = &glymur_pcie_east_slv_noc_regmap_config,
+ .nodes = pcie_east_slv_noc_nodes,
+ .num_nodes = ARRAY_SIZE(pcie_east_slv_noc_nodes),
+ .bcms = pcie_east_slv_noc_bcms,
+ .num_bcms = ARRAY_SIZE(pcie_east_slv_noc_bcms),
+ .alloc_dyn_id = true,
+};
+
+static struct qcom_icc_bcm * const pcie_west_anoc_bcms[] = {
+ &bcm_sn6,
+};
+
+static struct qcom_icc_node * const pcie_west_anoc_nodes[] = {
+ [MASTER_PCIE_WEST_ANOC_CFG] = &qsm_pcie_west_anoc_cfg,
+ [MASTER_PCIE_2] = &xm_pcie_2,
+ [MASTER_PCIE_3A] = &xm_pcie_3a,
+ [MASTER_PCIE_3B] = &xm_pcie_3b,
+ [MASTER_PCIE_4] = &xm_pcie_4,
+ [MASTER_PCIE_6] = &xm_pcie_6,
+ [SLAVE_PCIE_WEST_MEM_NOC] = &qns_pcie_west_mem_noc,
+ [SLAVE_SERVICE_PCIE_WEST_AGGRE_NOC] = &srvc_pcie_west_aggre_noc,
+};
+
+static const struct regmap_config glymur_pcie_west_anoc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xf580,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc glymur_pcie_west_anoc = {
+ .config = &glymur_pcie_west_anoc_regmap_config,
+ .nodes = pcie_west_anoc_nodes,
+ .num_nodes = ARRAY_SIZE(pcie_west_anoc_nodes),
+ .bcms = pcie_west_anoc_bcms,
+ .num_bcms = ARRAY_SIZE(pcie_west_anoc_bcms),
+ .alloc_dyn_id = true,
+ .qos_requires_clocks = true,
+};
+
+static struct qcom_icc_bcm * const pcie_west_slv_noc_bcms[] = {
+ &bcm_sn6,
+};
+
+static struct qcom_icc_node * const pcie_west_slv_noc_nodes[] = {
+ [MASTER_HSCNOC_PCIE_WEST] = &qnm_hscnoc_pcie_west,
+ [MASTER_CNOC_PCIE_WEST_SLAVE_CFG] = &qsm_cnoc_pcie_west_slave_cfg,
+ [SLAVE_HSCNOC_PCIE_WEST_MS_MPU_CFG] = &qhs_hscnoc_pcie_west_ms_mpu_cfg,
+ [SLAVE_SERVICE_PCIE_WEST] = &srvc_pcie_west,
+ [SLAVE_PCIE_2] = &xs_pcie_2,
+ [SLAVE_PCIE_3A] = &xs_pcie_3a,
+ [SLAVE_PCIE_3B] = &xs_pcie_3b,
+ [SLAVE_PCIE_4] = &xs_pcie_4,
+ [SLAVE_PCIE_6] = &xs_pcie_6,
+};
+
+static const struct regmap_config glymur_pcie_west_slv_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xf180,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc glymur_pcie_west_slv_noc = {
+ .config = &glymur_pcie_west_slv_noc_regmap_config,
+ .nodes = pcie_west_slv_noc_nodes,
+ .num_nodes = ARRAY_SIZE(pcie_west_slv_noc_nodes),
+ .bcms = pcie_west_slv_noc_bcms,
+ .num_bcms = ARRAY_SIZE(pcie_west_slv_noc_bcms),
+ .alloc_dyn_id = true,
+};
+
+static struct qcom_icc_bcm * const system_noc_bcms[] = {
+ &bcm_sn0,
+ &bcm_sn1,
+ &bcm_sn2,
+ &bcm_sn3,
+ &bcm_sn4,
+};
+
+static struct qcom_icc_node * const system_noc_nodes[] = {
+ [MASTER_A1NOC_SNOC] = &qnm_aggre1_noc,
+ [MASTER_A2NOC_SNOC] = &qnm_aggre2_noc,
+ [MASTER_A3NOC_SNOC] = &qnm_aggre3_noc,
+ [MASTER_NSINOC_SNOC] = &qnm_nsi_noc,
+ [MASTER_OOBMSS] = &qnm_oobmss,
+ [SLAVE_SNOC_GEM_NOC_SF] = &qns_gemnoc_sf,
+};
+
+static const struct regmap_config glymur_system_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x1c080,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc glymur_system_noc = {
+ .config = &glymur_system_noc_regmap_config,
+ .nodes = system_noc_nodes,
+ .num_nodes = ARRAY_SIZE(system_noc_nodes),
+ .bcms = system_noc_bcms,
+ .num_bcms = ARRAY_SIZE(system_noc_bcms),
+ .alloc_dyn_id = true,
+};
+
+static const struct of_device_id qnoc_of_match[] = {
+ { .compatible = "qcom,glymur-aggre1-noc", .data = &glymur_aggre1_noc},
+ { .compatible = "qcom,glymur-aggre2-noc", .data = &glymur_aggre2_noc},
+ { .compatible = "qcom,glymur-aggre3-noc", .data = &glymur_aggre3_noc},
+ { .compatible = "qcom,glymur-aggre4-noc", .data = &glymur_aggre4_noc},
+ { .compatible = "qcom,glymur-clk-virt", .data = &glymur_clk_virt},
+ { .compatible = "qcom,glymur-cnoc-cfg", .data = &glymur_cnoc_cfg},
+ { .compatible = "qcom,glymur-cnoc-main", .data = &glymur_cnoc_main},
+ { .compatible = "qcom,glymur-hscnoc", .data = &glymur_hscnoc},
+ { .compatible = "qcom,glymur-lpass-ag-noc", .data = &glymur_lpass_ag_noc},
+ { .compatible = "qcom,glymur-lpass-lpiaon-noc", .data = &glymur_lpass_lpiaon_noc},
+ { .compatible = "qcom,glymur-lpass-lpicx-noc", .data = &glymur_lpass_lpicx_noc},
+ { .compatible = "qcom,glymur-mc-virt", .data = &glymur_mc_virt},
+ { .compatible = "qcom,glymur-mmss-noc", .data = &glymur_mmss_noc},
+ { .compatible = "qcom,glymur-nsinoc", .data = &glymur_nsinoc},
+ { .compatible = "qcom,glymur-nsp-noc", .data = &glymur_nsp_noc},
+ { .compatible = "qcom,glymur-oobm-ss-noc", .data = &glymur_oobm_ss_noc},
+ { .compatible = "qcom,glymur-pcie-east-anoc", .data = &glymur_pcie_east_anoc},
+ { .compatible = "qcom,glymur-pcie-east-slv-noc", .data = &glymur_pcie_east_slv_noc},
+ { .compatible = "qcom,glymur-pcie-west-anoc", .data = &glymur_pcie_west_anoc},
+ { .compatible = "qcom,glymur-pcie-west-slv-noc", .data = &glymur_pcie_west_slv_noc},
+ { .compatible = "qcom,glymur-system-noc", .data = &glymur_system_noc},
+ { }
+};
+MODULE_DEVICE_TABLE(of, qnoc_of_match);
+
+static struct platform_driver qnoc_driver = {
+ .probe = qcom_icc_rpmh_probe,
+ .remove = qcom_icc_rpmh_remove,
+ .driver = {
+ .name = "qnoc-glymur",
+ .of_match_table = qnoc_of_match,
+ .sync_state = icc_sync_state,
+ },
+};
+
+static int __init qnoc_driver_init(void)
+{
+ return platform_driver_register(&qnoc_driver);
+}
+core_initcall(qnoc_driver_init);
+
+static void __exit qnoc_driver_exit(void)
+{
+ platform_driver_unregister(&qnoc_driver);
+}
+module_exit(qnoc_driver_exit);
+
+MODULE_DESCRIPTION("GLYMUR NoC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/interconnect/qcom/icc-rpmh.h b/drivers/interconnect/qcom/icc-rpmh.h
index bd8d730249b1..307f48412563 100644
--- a/drivers/interconnect/qcom/icc-rpmh.h
+++ b/drivers/interconnect/qcom/icc-rpmh.h
@@ -53,7 +53,7 @@ struct bcm_db {
u8 reserved;
};
-#define MAX_PORTS 2
+#define MAX_PORTS 4
/**
* struct qcom_icc_qosbox - Qualcomm specific QoS config
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index 5219d7ddfdaa..a698a2e7ce2a 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -555,6 +555,7 @@ struct gcr3_tbl_info {
};
struct amd_io_pgtable {
+ seqcount_t seqcount; /* Protects root/mode update */
struct io_pgtable pgtbl;
int mode;
u64 *root;
@@ -791,6 +792,11 @@ struct amd_iommu {
u32 flags;
volatile u64 *cmd_sem;
atomic64_t cmd_sem_val;
+ /*
+ * Track physical address to directly use it in build_completion_wait()
+ * and avoid adding any special checks and handling for kdump.
+ */
+ u64 cmd_sem_paddr;
#ifdef CONFIG_AMD_IOMMU_DEBUGFS
/* DebugFS Info */
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index 7b5af6176de9..f2991c11867c 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -406,6 +406,9 @@ static void iommu_set_device_table(struct amd_iommu *iommu)
BUG_ON(iommu->mmio_base == NULL);
+ if (is_kdump_kernel())
+ return;
+
entry = iommu_virt_to_phys(dev_table);
entry |= (dev_table_size >> 12) - 1;
memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
@@ -646,7 +649,10 @@ static inline int __init alloc_dev_table(struct amd_iommu_pci_seg *pci_seg)
static inline void free_dev_table(struct amd_iommu_pci_seg *pci_seg)
{
- iommu_free_pages(pci_seg->dev_table);
+ if (is_kdump_kernel())
+ memunmap((void *)pci_seg->dev_table);
+ else
+ iommu_free_pages(pci_seg->dev_table);
pci_seg->dev_table = NULL;
}
@@ -710,6 +716,26 @@ static void __init free_alias_table(struct amd_iommu_pci_seg *pci_seg)
pci_seg->alias_table = NULL;
}
+static inline void *iommu_memremap(unsigned long paddr, size_t size)
+{
+ phys_addr_t phys;
+
+ if (!paddr)
+ return NULL;
+
+ /*
+ * Obtain true physical address in kdump kernel when SME is enabled.
+ * Currently, previous kernel with SME enabled and kdump kernel
+ * with SME support disabled is not supported.
+ */
+ phys = __sme_clr(paddr);
+
+ if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
+ return (__force void *)ioremap_encrypted(phys, size);
+ else
+ return memremap(phys, size, MEMREMAP_WB);
+}
+
/*
* Allocates the command buffer. This buffer is per AMD IOMMU. We can
* write commands to that buffer later and the IOMMU will execute them
@@ -795,11 +821,16 @@ static void iommu_enable_command_buffer(struct amd_iommu *iommu)
BUG_ON(iommu->cmd_buf == NULL);
- entry = iommu_virt_to_phys(iommu->cmd_buf);
- entry |= MMIO_CMD_SIZE_512;
-
- memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
- &entry, sizeof(entry));
+ if (!is_kdump_kernel()) {
+ /*
+ * Command buffer is re-used for kdump kernel and setting
+ * of MMIO register is not required.
+ */
+ entry = iommu_virt_to_phys(iommu->cmd_buf);
+ entry |= MMIO_CMD_SIZE_512;
+ memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
+ &entry, sizeof(entry));
+ }
amd_iommu_reset_cmd_buffer(iommu);
}
@@ -850,10 +881,15 @@ static void iommu_enable_event_buffer(struct amd_iommu *iommu)
BUG_ON(iommu->evt_buf == NULL);
- entry = iommu_virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
-
- memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
- &entry, sizeof(entry));
+ if (!is_kdump_kernel()) {
+ /*
+ * Event buffer is re-used for kdump kernel and setting
+ * of MMIO register is not required.
+ */
+ entry = iommu_virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
+ memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
+ &entry, sizeof(entry));
+ }
/* set head and tail to zero manually */
writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
@@ -942,8 +978,91 @@ err_out:
static int __init alloc_cwwb_sem(struct amd_iommu *iommu)
{
iommu->cmd_sem = iommu_alloc_4k_pages(iommu, GFP_KERNEL, 1);
+ if (!iommu->cmd_sem)
+ return -ENOMEM;
+ iommu->cmd_sem_paddr = iommu_virt_to_phys((void *)iommu->cmd_sem);
+ return 0;
+}
+
+static int __init remap_event_buffer(struct amd_iommu *iommu)
+{
+ u64 paddr;
+
+ pr_info_once("Re-using event buffer from the previous kernel\n");
+ paddr = readq(iommu->mmio_base + MMIO_EVT_BUF_OFFSET) & PM_ADDR_MASK;
+ iommu->evt_buf = iommu_memremap(paddr, EVT_BUFFER_SIZE);
+
+ return iommu->evt_buf ? 0 : -ENOMEM;
+}
+
+static int __init remap_command_buffer(struct amd_iommu *iommu)
+{
+ u64 paddr;
+
+ pr_info_once("Re-using command buffer from the previous kernel\n");
+ paddr = readq(iommu->mmio_base + MMIO_CMD_BUF_OFFSET) & PM_ADDR_MASK;
+ iommu->cmd_buf = iommu_memremap(paddr, CMD_BUFFER_SIZE);
+
+ return iommu->cmd_buf ? 0 : -ENOMEM;
+}
+
+static int __init remap_or_alloc_cwwb_sem(struct amd_iommu *iommu)
+{
+ u64 paddr;
+
+ if (check_feature(FEATURE_SNP)) {
+ /*
+ * When SNP is enabled, the exclusion base register is used for the
+ * completion wait buffer (CWB) address. Read and re-use it.
+ */
+ pr_info_once("Re-using CWB buffers from the previous kernel\n");
+ paddr = readq(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET) & PM_ADDR_MASK;
+ iommu->cmd_sem = iommu_memremap(paddr, PAGE_SIZE);
+ if (!iommu->cmd_sem)
+ return -ENOMEM;
+ iommu->cmd_sem_paddr = paddr;
+ } else {
+ return alloc_cwwb_sem(iommu);
+ }
- return iommu->cmd_sem ? 0 : -ENOMEM;
+ return 0;
+}
+
+static int __init alloc_iommu_buffers(struct amd_iommu *iommu)
+{
+ int ret;
+
+ /*
+ * Reuse/Remap the previous kernel's allocated completion wait
+ * command and event buffers for kdump boot.
+ */
+ if (is_kdump_kernel()) {
+ ret = remap_or_alloc_cwwb_sem(iommu);
+ if (ret)
+ return ret;
+
+ ret = remap_command_buffer(iommu);
+ if (ret)
+ return ret;
+
+ ret = remap_event_buffer(iommu);
+ if (ret)
+ return ret;
+ } else {
+ ret = alloc_cwwb_sem(iommu);
+ if (ret)
+ return ret;
+
+ ret = alloc_command_buffer(iommu);
+ if (ret)
+ return ret;
+
+ ret = alloc_event_buffer(iommu);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
static void __init free_cwwb_sem(struct amd_iommu *iommu)
@@ -951,6 +1070,38 @@ static void __init free_cwwb_sem(struct amd_iommu *iommu)
if (iommu->cmd_sem)
iommu_free_pages((void *)iommu->cmd_sem);
}
+static void __init unmap_cwwb_sem(struct amd_iommu *iommu)
+{
+ if (iommu->cmd_sem) {
+ if (check_feature(FEATURE_SNP))
+ memunmap((void *)iommu->cmd_sem);
+ else
+ iommu_free_pages((void *)iommu->cmd_sem);
+ }
+}
+
+static void __init unmap_command_buffer(struct amd_iommu *iommu)
+{
+ memunmap((void *)iommu->cmd_buf);
+}
+
+static void __init unmap_event_buffer(struct amd_iommu *iommu)
+{
+ memunmap(iommu->evt_buf);
+}
+
+static void __init free_iommu_buffers(struct amd_iommu *iommu)
+{
+ if (is_kdump_kernel()) {
+ unmap_cwwb_sem(iommu);
+ unmap_command_buffer(iommu);
+ unmap_event_buffer(iommu);
+ } else {
+ free_cwwb_sem(iommu);
+ free_command_buffer(iommu);
+ free_event_buffer(iommu);
+ }
+}
static void iommu_enable_xt(struct amd_iommu *iommu)
{
@@ -982,15 +1133,12 @@ static void set_dte_bit(struct dev_table_entry *dte, u8 bit)
dte->data[i] |= (1UL << _bit);
}
-static bool __copy_device_table(struct amd_iommu *iommu)
+static bool __reuse_device_table(struct amd_iommu *iommu)
{
- u64 int_ctl, int_tab_len, entry = 0;
struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
- struct dev_table_entry *old_devtb = NULL;
- u32 lo, hi, devid, old_devtb_size;
+ u32 lo, hi, old_devtb_size;
phys_addr_t old_devtb_phys;
- u16 dom_id, dte_v, irq_v;
- u64 tmp;
+ u64 entry;
/* Each IOMMU use separate device table with the same size */
lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET);
@@ -1015,66 +1163,20 @@ static bool __copy_device_table(struct amd_iommu *iommu)
pr_err("The address of old device table is above 4G, not trustworthy!\n");
return false;
}
- old_devtb = (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT) && is_kdump_kernel())
- ? (__force void *)ioremap_encrypted(old_devtb_phys,
- pci_seg->dev_table_size)
- : memremap(old_devtb_phys, pci_seg->dev_table_size, MEMREMAP_WB);
- if (!old_devtb)
- return false;
-
- pci_seg->old_dev_tbl_cpy = iommu_alloc_pages_sz(
- GFP_KERNEL | GFP_DMA32, pci_seg->dev_table_size);
+ /*
+ * Re-use the previous kernel's device table for kdump.
+ */
+ pci_seg->old_dev_tbl_cpy = iommu_memremap(old_devtb_phys, pci_seg->dev_table_size);
if (pci_seg->old_dev_tbl_cpy == NULL) {
- pr_err("Failed to allocate memory for copying old device table!\n");
- memunmap(old_devtb);
+ pr_err("Failed to remap memory for reusing old device table!\n");
return false;
}
- for (devid = 0; devid <= pci_seg->last_bdf; ++devid) {
- pci_seg->old_dev_tbl_cpy[devid] = old_devtb[devid];
- dom_id = old_devtb[devid].data[1] & DEV_DOMID_MASK;
- dte_v = old_devtb[devid].data[0] & DTE_FLAG_V;
-
- if (dte_v && dom_id) {
- pci_seg->old_dev_tbl_cpy[devid].data[0] = old_devtb[devid].data[0];
- pci_seg->old_dev_tbl_cpy[devid].data[1] = old_devtb[devid].data[1];
- /* Reserve the Domain IDs used by previous kernel */
- if (ida_alloc_range(&pdom_ids, dom_id, dom_id, GFP_ATOMIC) != dom_id) {
- pr_err("Failed to reserve domain ID 0x%x\n", dom_id);
- memunmap(old_devtb);
- return false;
- }
- /* If gcr3 table existed, mask it out */
- if (old_devtb[devid].data[0] & DTE_FLAG_GV) {
- tmp = (DTE_GCR3_30_15 | DTE_GCR3_51_31);
- pci_seg->old_dev_tbl_cpy[devid].data[1] &= ~tmp;
- tmp = (DTE_GCR3_14_12 | DTE_FLAG_GV);
- pci_seg->old_dev_tbl_cpy[devid].data[0] &= ~tmp;
- }
- }
-
- irq_v = old_devtb[devid].data[2] & DTE_IRQ_REMAP_ENABLE;
- int_ctl = old_devtb[devid].data[2] & DTE_IRQ_REMAP_INTCTL_MASK;
- int_tab_len = old_devtb[devid].data[2] & DTE_INTTABLEN_MASK;
- if (irq_v && (int_ctl || int_tab_len)) {
- if ((int_ctl != DTE_IRQ_REMAP_INTCTL) ||
- (int_tab_len != DTE_INTTABLEN_512 &&
- int_tab_len != DTE_INTTABLEN_2K)) {
- pr_err("Wrong old irq remapping flag: %#x\n", devid);
- memunmap(old_devtb);
- return false;
- }
-
- pci_seg->old_dev_tbl_cpy[devid].data[2] = old_devtb[devid].data[2];
- }
- }
- memunmap(old_devtb);
-
return true;
}
-static bool copy_device_table(void)
+static bool reuse_device_table(void)
{
struct amd_iommu *iommu;
struct amd_iommu_pci_seg *pci_seg;
@@ -1082,17 +1184,17 @@ static bool copy_device_table(void)
if (!amd_iommu_pre_enabled)
return false;
- pr_warn("Translation is already enabled - trying to copy translation structures\n");
+ pr_warn("Translation is already enabled - trying to reuse translation structures\n");
/*
* All IOMMUs within PCI segment shares common device table.
- * Hence copy device table only once per PCI segment.
+ * Hence reuse device table only once per PCI segment.
*/
for_each_pci_segment(pci_seg) {
for_each_iommu(iommu) {
if (pci_seg->id != iommu->pci_seg->id)
continue;
- if (!__copy_device_table(iommu))
+ if (!__reuse_device_table(iommu))
return false;
break;
}
@@ -1455,12 +1557,12 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
PCI_FUNC(e->devid));
devid = e->devid;
- for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
- if (alias)
+ if (alias) {
+ for (dev_i = devid_start; dev_i <= devid; ++dev_i)
pci_seg->alias_table[dev_i] = devid_to;
+ set_dev_entry_from_acpi(iommu, devid_to, flags, ext_flags);
}
set_dev_entry_from_acpi_range(iommu, devid_start, devid, flags, ext_flags);
- set_dev_entry_from_acpi(iommu, devid_to, flags, ext_flags);
break;
case IVHD_DEV_SPECIAL: {
u8 handle, type;
@@ -1655,9 +1757,7 @@ static void __init free_sysfs(struct amd_iommu *iommu)
static void __init free_iommu_one(struct amd_iommu *iommu)
{
free_sysfs(iommu);
- free_cwwb_sem(iommu);
- free_command_buffer(iommu);
- free_event_buffer(iommu);
+ free_iommu_buffers(iommu);
amd_iommu_free_ppr_log(iommu);
free_ga_log(iommu);
iommu_unmap_mmio_space(iommu);
@@ -1821,14 +1921,9 @@ static int __init init_iommu_one_late(struct amd_iommu *iommu)
{
int ret;
- if (alloc_cwwb_sem(iommu))
- return -ENOMEM;
-
- if (alloc_command_buffer(iommu))
- return -ENOMEM;
-
- if (alloc_event_buffer(iommu))
- return -ENOMEM;
+ ret = alloc_iommu_buffers(iommu);
+ if (ret)
+ return ret;
iommu->int_enabled = false;
@@ -2778,8 +2873,8 @@ static void early_enable_iommu(struct amd_iommu *iommu)
* This function finally enables all IOMMUs found in the system after
* they have been initialized.
*
- * Or if in kdump kernel and IOMMUs are all pre-enabled, try to copy
- * the old content of device table entries. Not this case or copy failed,
+ * Or if in kdump kernel and IOMMUs are all pre-enabled, try to reuse
+ * the old content of device table entries. Not this case or reuse failed,
* just continue as normal kernel does.
*/
static void early_enable_iommus(void)
@@ -2787,18 +2882,25 @@ static void early_enable_iommus(void)
struct amd_iommu *iommu;
struct amd_iommu_pci_seg *pci_seg;
- if (!copy_device_table()) {
+ if (!reuse_device_table()) {
/*
- * If come here because of failure in copying device table from old
+ * If come here because of failure in reusing device table from old
* kernel with all IOMMUs enabled, print error message and try to
* free allocated old_dev_tbl_cpy.
*/
- if (amd_iommu_pre_enabled)
- pr_err("Failed to copy DEV table from previous kernel.\n");
+ if (amd_iommu_pre_enabled) {
+ pr_err("Failed to reuse DEV table from previous kernel.\n");
+ /*
+ * Bail out early if unable to remap/reuse DEV table from
+ * previous kernel if SNP enabled as IOMMU commands will
+ * time out without DEV table and cause kdump boot panic.
+ */
+ BUG_ON(check_feature(FEATURE_SNP));
+ }
for_each_pci_segment(pci_seg) {
if (pci_seg->old_dev_tbl_cpy != NULL) {
- iommu_free_pages(pci_seg->old_dev_tbl_cpy);
+ memunmap((void *)pci_seg->old_dev_tbl_cpy);
pci_seg->old_dev_tbl_cpy = NULL;
}
}
@@ -2808,7 +2910,7 @@ static void early_enable_iommus(void)
early_enable_iommu(iommu);
}
} else {
- pr_info("Copied DEV table from previous kernel.\n");
+ pr_info("Reused DEV table from previous kernel.\n");
for_each_pci_segment(pci_seg) {
iommu_free_pages(pci_seg->dev_table);
@@ -3067,7 +3169,8 @@ static int __init early_amd_iommu_init(void)
if (!boot_cpu_has(X86_FEATURE_CX16)) {
pr_err("Failed to initialize. The CMPXCHG16B feature is required.\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
/*
@@ -3638,7 +3741,7 @@ static int __init parse_ivrs_acpihid(char *str)
{
u32 seg = 0, bus, dev, fn;
char *hid, *uid, *p, *addr;
- char acpiid[ACPIID_LEN] = {0};
+ char acpiid[ACPIID_LEN + 1] = { }; /* size with NULL terminator */
int i;
addr = strchr(str, '@');
@@ -3664,7 +3767,7 @@ static int __init parse_ivrs_acpihid(char *str)
/* We have the '@', make it the terminator to get just the acpiid */
*addr++ = 0;
- if (strlen(str) > ACPIID_LEN + 1)
+ if (strlen(str) > ACPIID_LEN)
goto not_found;
if (sscanf(str, "=%s", acpiid) != 1)
diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c
index a91e71f981ef..70c2f5b1631b 100644
--- a/drivers/iommu/amd/io_pgtable.c
+++ b/drivers/iommu/amd/io_pgtable.c
@@ -17,6 +17,7 @@
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/dma-mapping.h>
+#include <linux/seqlock.h>
#include <asm/barrier.h>
@@ -130,8 +131,11 @@ static bool increase_address_space(struct amd_io_pgtable *pgtable,
*pte = PM_LEVEL_PDE(pgtable->mode, iommu_virt_to_phys(pgtable->root));
+ write_seqcount_begin(&pgtable->seqcount);
pgtable->root = pte;
pgtable->mode += 1;
+ write_seqcount_end(&pgtable->seqcount);
+
amd_iommu_update_and_flush_device_table(domain);
pte = NULL;
@@ -153,6 +157,7 @@ static u64 *alloc_pte(struct amd_io_pgtable *pgtable,
{
unsigned long last_addr = address + (page_size - 1);
struct io_pgtable_cfg *cfg = &pgtable->pgtbl.cfg;
+ unsigned int seqcount;
int level, end_lvl;
u64 *pte, *page;
@@ -170,8 +175,14 @@ static u64 *alloc_pte(struct amd_io_pgtable *pgtable,
}
- level = pgtable->mode - 1;
- pte = &pgtable->root[PM_LEVEL_INDEX(level, address)];
+ do {
+ seqcount = read_seqcount_begin(&pgtable->seqcount);
+
+ level = pgtable->mode - 1;
+ pte = &pgtable->root[PM_LEVEL_INDEX(level, address)];
+ } while (read_seqcount_retry(&pgtable->seqcount, seqcount));
+
+
address = PAGE_SIZE_ALIGN(address, page_size);
end_lvl = PAGE_SIZE_LEVEL(page_size);
@@ -249,6 +260,7 @@ static u64 *fetch_pte(struct amd_io_pgtable *pgtable,
unsigned long *page_size)
{
int level;
+ unsigned int seqcount;
u64 *pte;
*page_size = 0;
@@ -256,8 +268,12 @@ static u64 *fetch_pte(struct amd_io_pgtable *pgtable,
if (address > PM_LEVEL_SIZE(pgtable->mode))
return NULL;
- level = pgtable->mode - 1;
- pte = &pgtable->root[PM_LEVEL_INDEX(level, address)];
+ do {
+ seqcount = read_seqcount_begin(&pgtable->seqcount);
+ level = pgtable->mode - 1;
+ pte = &pgtable->root[PM_LEVEL_INDEX(level, address)];
+ } while (read_seqcount_retry(&pgtable->seqcount, seqcount));
+
*page_size = PTE_LEVEL_PAGE_SIZE(level);
while (level > 0) {
@@ -541,6 +557,7 @@ static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo
if (!pgtable->root)
return NULL;
pgtable->mode = PAGE_MODE_3_LEVEL;
+ seqcount_init(&pgtable->seqcount);
cfg->pgsize_bitmap = amd_iommu_pgsize_bitmap;
cfg->ias = IOMMU_IN_ADDR_BIT_SIZE;
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index eb348c63a8d0..2e1865daa1ce 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -14,6 +14,7 @@
#include <linux/pci-ats.h>
#include <linux/bitmap.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/debugfs.h>
#include <linux/scatterlist.h>
#include <linux/dma-map-ops.h>
@@ -265,7 +266,7 @@ static inline int get_acpihid_device_id(struct device *dev,
return -EINVAL;
if (fw_bug)
dev_err_once(dev, FW_BUG "No ACPI device matched UID, but %d device%s matched HID.\n",
- hid_count, hid_count > 1 ? "s" : "");
+ hid_count, str_plural(hid_count));
if (hid_count > 1)
return -EINVAL;
if (entry)
@@ -1195,7 +1196,7 @@ static void build_completion_wait(struct iommu_cmd *cmd,
struct amd_iommu *iommu,
u64 data)
{
- u64 paddr = iommu_virt_to_phys((void *)iommu->cmd_sem);
+ u64 paddr = iommu->cmd_sem_paddr;
memset(cmd, 0, sizeof(*cmd));
cmd->data[0] = lower_32_bits(paddr) | CMD_COMPL_WAIT_STORE_MASK;
diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c
index 190f28d76615..95a4e62b8f63 100644
--- a/drivers/iommu/apple-dart.c
+++ b/drivers/iommu/apple-dart.c
@@ -122,6 +122,8 @@
#define DART_T8110_ERROR_ADDR_LO 0x170
#define DART_T8110_ERROR_ADDR_HI 0x174
+#define DART_T8110_ERROR_STREAMS 0x1c0
+
#define DART_T8110_PROTECT 0x200
#define DART_T8110_UNPROTECT 0x204
#define DART_T8110_PROTECT_LOCK 0x208
@@ -133,6 +135,7 @@
#define DART_T8110_TCR 0x1000
#define DART_T8110_TCR_REMAP GENMASK(11, 8)
#define DART_T8110_TCR_REMAP_EN BIT(7)
+#define DART_T8110_TCR_FOUR_LEVEL BIT(3)
#define DART_T8110_TCR_BYPASS_DAPF BIT(2)
#define DART_T8110_TCR_BYPASS_DART BIT(1)
#define DART_T8110_TCR_TRANSLATE_ENABLE BIT(0)
@@ -166,22 +169,23 @@ struct apple_dart_hw {
int max_sid_count;
- u64 lock;
- u64 lock_bit;
+ u32 lock;
+ u32 lock_bit;
- u64 error;
+ u32 error;
- u64 enable_streams;
+ u32 enable_streams;
- u64 tcr;
- u64 tcr_enabled;
- u64 tcr_disabled;
- u64 tcr_bypass;
+ u32 tcr;
+ u32 tcr_enabled;
+ u32 tcr_disabled;
+ u32 tcr_bypass;
+ u32 tcr_4level;
- u64 ttbr;
- u64 ttbr_valid;
- u64 ttbr_addr_field_shift;
- u64 ttbr_shift;
+ u32 ttbr;
+ u32 ttbr_valid;
+ u32 ttbr_addr_field_shift;
+ u32 ttbr_shift;
int ttbr_count;
};
@@ -217,6 +221,7 @@ struct apple_dart {
u32 pgsize;
u32 num_streams;
u32 supports_bypass : 1;
+ u32 four_level : 1;
struct iommu_group *sid2group[DART_MAX_STREAMS];
struct iommu_device iommu;
@@ -305,13 +310,19 @@ static struct apple_dart_domain *to_dart_domain(struct iommu_domain *dom)
}
static void
-apple_dart_hw_enable_translation(struct apple_dart_stream_map *stream_map)
+apple_dart_hw_enable_translation(struct apple_dart_stream_map *stream_map, int levels)
{
struct apple_dart *dart = stream_map->dart;
+ u32 tcr = dart->hw->tcr_enabled;
int sid;
+ if (levels == 4)
+ tcr |= dart->hw->tcr_4level;
+
+ WARN_ON(levels != 3 && levels != 4);
+ WARN_ON(levels == 4 && !dart->four_level);
for_each_set_bit(sid, stream_map->sidmap, dart->num_streams)
- writel(dart->hw->tcr_enabled, dart->regs + DART_TCR(dart, sid));
+ writel(tcr, dart->regs + DART_TCR(dart, sid));
}
static void apple_dart_hw_disable_dma(struct apple_dart_stream_map *stream_map)
@@ -569,7 +580,8 @@ apple_dart_setup_translation(struct apple_dart_domain *domain,
for (; i < stream_map->dart->hw->ttbr_count; ++i)
apple_dart_hw_clear_ttbr(stream_map, i);
- apple_dart_hw_enable_translation(stream_map);
+ apple_dart_hw_enable_translation(stream_map,
+ pgtbl_cfg->apple_dart_cfg.n_levels);
stream_map->dart->hw->invalidate_tlb(stream_map);
}
@@ -614,7 +626,7 @@ static int apple_dart_finalize_domain(struct apple_dart_domain *dart_domain,
dart_domain->domain.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
dart_domain->domain.geometry.aperture_start = 0;
dart_domain->domain.geometry.aperture_end =
- (dma_addr_t)DMA_BIT_MASK(dart->ias);
+ (dma_addr_t)DMA_BIT_MASK(pgtbl_cfg.ias);
dart_domain->domain.geometry.force_aperture = true;
dart_domain->finalized = true;
@@ -807,6 +819,8 @@ static int apple_dart_of_xlate(struct device *dev,
if (cfg_dart) {
if (cfg_dart->pgsize != dart->pgsize)
return -EINVAL;
+ if (cfg_dart->ias != dart->ias)
+ return -EINVAL;
}
cfg->supports_bypass &= dart->supports_bypass;
@@ -1077,6 +1091,9 @@ static irqreturn_t apple_dart_t8110_irq(int irq, void *dev)
error, stream_idx, error_code, fault_name, addr);
writel(error, dart->regs + DART_T8110_ERROR);
+ for (int i = 0; i < BITS_TO_U32(dart->num_streams); i++)
+ writel(U32_MAX, dart->regs + DART_T8110_ERROR_STREAMS + 4 * i);
+
return IRQ_HANDLED;
}
@@ -1137,6 +1154,7 @@ static int apple_dart_probe(struct platform_device *pdev)
dart->ias = FIELD_GET(DART_T8110_PARAMS3_VA_WIDTH, dart_params[2]);
dart->oas = FIELD_GET(DART_T8110_PARAMS3_PA_WIDTH, dart_params[2]);
dart->num_streams = FIELD_GET(DART_T8110_PARAMS4_NUM_SIDS, dart_params[3]);
+ dart->four_level = dart->ias > 36;
break;
}
@@ -1169,9 +1187,9 @@ static int apple_dart_probe(struct platform_device *pdev)
dev_info(
&pdev->dev,
- "DART [pagesize %x, %d streams, bypass support: %d, bypass forced: %d] initialized\n",
+ "DART [pagesize %x, %d streams, bypass support: %d, bypass forced: %d, AS %d -> %d] initialized\n",
dart->pgsize, dart->num_streams, dart->supports_bypass,
- dart->pgsize > PAGE_SIZE);
+ dart->pgsize > PAGE_SIZE, dart->ias, dart->oas);
return 0;
err_sysfs_remove:
@@ -1292,6 +1310,7 @@ static const struct apple_dart_hw apple_dart_hw_t8110 = {
.tcr_enabled = DART_T8110_TCR_TRANSLATE_ENABLE,
.tcr_disabled = 0,
.tcr_bypass = DART_T8110_TCR_BYPASS_DAPF | DART_T8110_TCR_BYPASS_DART,
+ .tcr_4level = DART_T8110_TCR_FOUR_LEVEL,
.ttbr = DART_T8110_TTBR,
.ttbr_valid = DART_T8110_TTBR_VALID,
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 5968043ac802..2a8b46b948f0 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -2997,9 +2997,9 @@ void arm_smmu_attach_commit(struct arm_smmu_attach_state *state)
/* ATS is being switched off, invalidate the entire ATC */
arm_smmu_atc_inv_master(master, IOMMU_NO_PASID);
}
- master->ats_enabled = state->ats_enabled;
arm_smmu_remove_master_domain(master, state->old_domain, state->ssid);
+ master->ats_enabled = state->ats_enabled;
}
static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
diff --git a/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c b/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
index be1aaaf8cd17..378104cd395e 100644
--- a/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
+++ b/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c
@@ -301,9 +301,11 @@ static void tegra241_vintf_user_handle_error(struct tegra241_vintf *vintf)
struct iommu_vevent_tegra241_cmdqv vevent_data;
int i;
- for (i = 0; i < LVCMDQ_ERR_MAP_NUM_64; i++)
- vevent_data.lvcmdq_err_map[i] =
- readq_relaxed(REG_VINTF(vintf, LVCMDQ_ERR_MAP_64(i)));
+ for (i = 0; i < LVCMDQ_ERR_MAP_NUM_64; i++) {
+ u64 err = readq_relaxed(REG_VINTF(vintf, LVCMDQ_ERR_MAP_64(i)));
+
+ vevent_data.lvcmdq_err_map[i] = cpu_to_le64(err);
+ }
iommufd_viommu_report_event(viommu, IOMMU_VEVENTQ_TYPE_TEGRA241_CMDQV,
&vevent_data, sizeof(vevent_data));
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index ea2ef53bd4fe..7944a3af4545 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -724,7 +724,12 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, struct device *dev
static int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
unsigned long attrs)
{
- int prot = coherent ? IOMMU_CACHE : 0;
+ int prot;
+
+ if (attrs & DMA_ATTR_MMIO)
+ prot = IOMMU_MMIO;
+ else
+ prot = coherent ? IOMMU_CACHE : 0;
if (attrs & DMA_ATTR_PRIVILEGED)
prot |= IOMMU_PRIV;
@@ -1190,11 +1195,9 @@ static inline size_t iova_unaligned(struct iova_domain *iovad, phys_addr_t phys,
return iova_offset(iovad, phys | size);
}
-dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction dir,
- unsigned long attrs)
+dma_addr_t iommu_dma_map_phys(struct device *dev, phys_addr_t phys, size_t size,
+ enum dma_data_direction dir, unsigned long attrs)
{
- phys_addr_t phys = page_to_phys(page) + offset;
bool coherent = dev_is_dma_coherent(dev);
int prot = dma_info_to_prot(dir, coherent, attrs);
struct iommu_domain *domain = iommu_get_dma_domain(dev);
@@ -1208,27 +1211,34 @@ dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
*/
if (dev_use_swiotlb(dev, size, dir) &&
iova_unaligned(iovad, phys, size)) {
+ if (attrs & DMA_ATTR_MMIO)
+ return DMA_MAPPING_ERROR;
+
phys = iommu_dma_map_swiotlb(dev, phys, size, dir, attrs);
if (phys == (phys_addr_t)DMA_MAPPING_ERROR)
return DMA_MAPPING_ERROR;
}
- if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ if (!coherent && !(attrs & (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_MMIO)))
arch_sync_dma_for_device(phys, size, dir);
iova = __iommu_dma_map(dev, phys, size, prot, dma_mask);
- if (iova == DMA_MAPPING_ERROR)
+ if (iova == DMA_MAPPING_ERROR && !(attrs & DMA_ATTR_MMIO))
swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
return iova;
}
-void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
+void iommu_dma_unmap_phys(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
- struct iommu_domain *domain = iommu_get_dma_domain(dev);
phys_addr_t phys;
- phys = iommu_iova_to_phys(domain, dma_handle);
+ if (attrs & DMA_ATTR_MMIO) {
+ __iommu_dma_unmap(dev, dma_handle, size);
+ return;
+ }
+
+ phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
if (WARN_ON(!phys))
return;
@@ -1341,7 +1351,7 @@ static void iommu_dma_unmap_sg_swiotlb(struct device *dev, struct scatterlist *s
int i;
for_each_sg(sg, s, nents, i)
- iommu_dma_unmap_page(dev, sg_dma_address(s),
+ iommu_dma_unmap_phys(dev, sg_dma_address(s),
sg_dma_len(s), dir, attrs);
}
@@ -1354,8 +1364,8 @@ static int iommu_dma_map_sg_swiotlb(struct device *dev, struct scatterlist *sg,
sg_dma_mark_swiotlb(sg);
for_each_sg(sg, s, nents, i) {
- sg_dma_address(s) = iommu_dma_map_page(dev, sg_page(s),
- s->offset, s->length, dir, attrs);
+ sg_dma_address(s) = iommu_dma_map_phys(dev, sg_phys(s),
+ s->length, dir, attrs);
if (sg_dma_address(s) == DMA_MAPPING_ERROR)
goto out_unmap;
sg_dma_len(s) = s->length;
@@ -1546,20 +1556,6 @@ void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
__iommu_dma_unmap(dev, start, end - start);
}
-dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
- size_t size, enum dma_data_direction dir, unsigned long attrs)
-{
- return __iommu_dma_map(dev, phys, size,
- dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO,
- dma_get_mask(dev));
-}
-
-void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir, unsigned long attrs)
-{
- __iommu_dma_unmap(dev, handle, size);
-}
-
static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr)
{
size_t alloc_size = PAGE_ALIGN(size);
@@ -1838,12 +1834,13 @@ static int __dma_iova_link(struct device *dev, dma_addr_t addr,
unsigned long attrs)
{
bool coherent = dev_is_dma_coherent(dev);
+ int prot = dma_info_to_prot(dir, coherent, attrs);
- if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ if (!coherent && !(attrs & (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_MMIO)))
arch_sync_dma_for_device(phys, size, dir);
return iommu_map_nosync(iommu_get_dma_domain(dev), addr, phys, size,
- dma_info_to_prot(dir, coherent, attrs), GFP_ATOMIC);
+ prot, GFP_ATOMIC);
}
static int iommu_dma_iova_bounce_and_link(struct device *dev, dma_addr_t addr,
@@ -1949,9 +1946,13 @@ int dma_iova_link(struct device *dev, struct dma_iova_state *state,
return -EIO;
if (dev_use_swiotlb(dev, size, dir) &&
- iova_unaligned(iovad, phys, size))
+ iova_unaligned(iovad, phys, size)) {
+ if (attrs & DMA_ATTR_MMIO)
+ return -EPERM;
+
return iommu_dma_iova_link_swiotlb(dev, state, phys, offset,
size, dir, attrs);
+ }
return __dma_iova_link(dev, state->addr + offset - iova_start_pad,
phys - iova_start_pad,
diff --git a/drivers/iommu/intel/debugfs.c b/drivers/iommu/intel/debugfs.c
index affbf4a1558d..617fd81a80f0 100644
--- a/drivers/iommu/intel/debugfs.c
+++ b/drivers/iommu/intel/debugfs.c
@@ -62,8 +62,6 @@ static const struct iommu_regset iommu_regs_64[] = {
IOMMU_REGSET_ENTRY(CAP),
IOMMU_REGSET_ENTRY(ECAP),
IOMMU_REGSET_ENTRY(RTADDR),
- IOMMU_REGSET_ENTRY(CCMD),
- IOMMU_REGSET_ENTRY(AFLOG),
IOMMU_REGSET_ENTRY(PHMBASE),
IOMMU_REGSET_ENTRY(PHMLIMIT),
IOMMU_REGSET_ENTRY(IQH),
@@ -435,8 +433,21 @@ static int domain_translation_struct_show(struct seq_file *m,
}
pgd &= VTD_PAGE_MASK;
} else { /* legacy mode */
- pgd = context->lo & VTD_PAGE_MASK;
- agaw = context->hi & 7;
+ u8 tt = (u8)(context->lo & GENMASK_ULL(3, 2)) >> 2;
+
+ /*
+ * According to Translation Type(TT),
+ * get the page table pointer(SSPTPTR).
+ */
+ switch (tt) {
+ case CONTEXT_TT_MULTI_LEVEL:
+ case CONTEXT_TT_DEV_IOTLB:
+ pgd = context->lo & VTD_PAGE_MASK;
+ agaw = context->hi & 7;
+ break;
+ default:
+ goto iommu_unlock;
+ }
}
seq_printf(m, "Device %04x:%02x:%02x.%x ",
@@ -648,17 +659,11 @@ DEFINE_SHOW_ATTRIBUTE(ir_translation_struct);
static void latency_show_one(struct seq_file *m, struct intel_iommu *iommu,
struct dmar_drhd_unit *drhd)
{
- int ret;
-
seq_printf(m, "IOMMU: %s Register Base Address: %llx\n",
iommu->name, drhd->reg_base_addr);
- ret = dmar_latency_snapshot(iommu, debug_buf, DEBUG_BUFFER_SIZE);
- if (ret < 0)
- seq_puts(m, "Failed to get latency snapshot");
- else
- seq_puts(m, debug_buf);
- seq_puts(m, "\n");
+ dmar_latency_snapshot(iommu, debug_buf, DEBUG_BUFFER_SIZE);
+ seq_printf(m, "%s\n", debug_buf);
}
static int latency_show(struct seq_file *m, void *v)
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 9c3ab9d9f69a..e236c7ec221f 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -1575,6 +1575,10 @@ static void switch_to_super_page(struct dmar_domain *domain,
unsigned long lvl_pages = lvl_to_nr_pages(level);
struct dma_pte *pte = NULL;
+ if (WARN_ON(!IS_ALIGNED(start_pfn, lvl_pages) ||
+ !IS_ALIGNED(end_pfn + 1, lvl_pages)))
+ return;
+
while (start_pfn <= end_pfn) {
if (!pte)
pte = pfn_to_dma_pte(domain, start_pfn, &level,
@@ -1650,7 +1654,8 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
unsigned long pages_to_remove;
pteval |= DMA_PTE_LARGE_PAGE;
- pages_to_remove = min_t(unsigned long, nr_pages,
+ pages_to_remove = min_t(unsigned long,
+ round_down(nr_pages, lvl_pages),
nr_pte_to_next_page(pte) * lvl_pages);
end_pfn = iov_pfn + pages_to_remove - 1;
switch_to_super_page(domain, iov_pfn, end_pfn, largepage_lvl);
@@ -3812,7 +3817,7 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
}
if (info->ats_supported && ecap_prs(iommu->ecap) &&
- pci_pri_supported(pdev))
+ ecap_pds(iommu->ecap) && pci_pri_supported(pdev))
info->pri_supported = 1;
}
}
diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h
index d09b92871659..3056583d7f56 100644
--- a/drivers/iommu/intel/iommu.h
+++ b/drivers/iommu/intel/iommu.h
@@ -77,7 +77,6 @@
#define DMAR_FEDATA_REG 0x3c /* Fault event interrupt data register */
#define DMAR_FEADDR_REG 0x40 /* Fault event interrupt addr register */
#define DMAR_FEUADDR_REG 0x44 /* Upper address register */
-#define DMAR_AFLOG_REG 0x58 /* Advanced Fault control */
#define DMAR_PMEN_REG 0x64 /* Enable Protected Memory Region */
#define DMAR_PLMBASE_REG 0x68 /* PMRR Low addr */
#define DMAR_PLMLIMIT_REG 0x6c /* PMRR low limit */
@@ -173,8 +172,6 @@
#define cap_pgsel_inv(c) (((c) >> 39) & 1)
#define cap_super_page_val(c) (((c) >> 34) & 0xf)
-#define cap_super_offset(c) (((find_first_bit(&cap_super_page_val(c), 4)) \
- * OFFSET_STRIDE) + 21)
#define cap_fault_reg_offset(c) ((((c) >> 24) & 0x3ff) * 16)
#define cap_max_fault_reg_offset(c) \
@@ -462,7 +459,6 @@ enum {
#define QI_PGRP_PASID(pasid) (((u64)(pasid)) << 32)
/* Page group response descriptor QW1 */
-#define QI_PGRP_LPIG(x) (((u64)(x)) << 2)
#define QI_PGRP_IDX(idx) (((u64)(idx)) << 3)
@@ -541,7 +537,8 @@ enum {
#define pasid_supported(iommu) (sm_supported(iommu) && \
ecap_pasid((iommu)->ecap))
#define ssads_supported(iommu) (sm_supported(iommu) && \
- ecap_slads((iommu)->ecap))
+ ecap_slads((iommu)->ecap) && \
+ ecap_smpwc(iommu->ecap))
#define nested_supported(iommu) (sm_supported(iommu) && \
ecap_nest((iommu)->ecap))
diff --git a/drivers/iommu/intel/perf.c b/drivers/iommu/intel/perf.c
index adc4de6bbd88..dceeadc3ee7c 100644
--- a/drivers/iommu/intel/perf.c
+++ b/drivers/iommu/intel/perf.c
@@ -113,7 +113,7 @@ static char *latency_type_names[] = {
" svm_prq"
};
-int dmar_latency_snapshot(struct intel_iommu *iommu, char *str, size_t size)
+void dmar_latency_snapshot(struct intel_iommu *iommu, char *str, size_t size)
{
struct latency_statistic *lstat = iommu->perf_statistic;
unsigned long flags;
@@ -122,7 +122,7 @@ int dmar_latency_snapshot(struct intel_iommu *iommu, char *str, size_t size)
memset(str, 0, size);
for (i = 0; i < COUNTS_NUM; i++)
- bytes += snprintf(str + bytes, size - bytes,
+ bytes += scnprintf(str + bytes, size - bytes,
"%s", latency_counter_names[i]);
spin_lock_irqsave(&latency_lock, flags);
@@ -130,7 +130,7 @@ int dmar_latency_snapshot(struct intel_iommu *iommu, char *str, size_t size)
if (!dmar_latency_enabled(iommu, i))
continue;
- bytes += snprintf(str + bytes, size - bytes,
+ bytes += scnprintf(str + bytes, size - bytes,
"\n%s", latency_type_names[i]);
for (j = 0; j < COUNTS_NUM; j++) {
@@ -156,11 +156,9 @@ int dmar_latency_snapshot(struct intel_iommu *iommu, char *str, size_t size)
break;
}
- bytes += snprintf(str + bytes, size - bytes,
+ bytes += scnprintf(str + bytes, size - bytes,
"%12lld", val);
}
}
spin_unlock_irqrestore(&latency_lock, flags);
-
- return bytes;
}
diff --git a/drivers/iommu/intel/perf.h b/drivers/iommu/intel/perf.h
index df9a36942d64..1d4baad7e852 100644
--- a/drivers/iommu/intel/perf.h
+++ b/drivers/iommu/intel/perf.h
@@ -40,7 +40,7 @@ void dmar_latency_disable(struct intel_iommu *iommu, enum latency_type type);
bool dmar_latency_enabled(struct intel_iommu *iommu, enum latency_type type);
void dmar_latency_update(struct intel_iommu *iommu, enum latency_type type,
u64 latency);
-int dmar_latency_snapshot(struct intel_iommu *iommu, char *str, size_t size);
+void dmar_latency_snapshot(struct intel_iommu *iommu, char *str, size_t size);
#else
static inline int
dmar_latency_enable(struct intel_iommu *iommu, enum latency_type type)
@@ -64,9 +64,8 @@ dmar_latency_update(struct intel_iommu *iommu, enum latency_type type, u64 laten
{
}
-static inline int
+static inline void
dmar_latency_snapshot(struct intel_iommu *iommu, char *str, size_t size)
{
- return 0;
}
#endif /* CONFIG_DMAR_PERF */
diff --git a/drivers/iommu/intel/prq.c b/drivers/iommu/intel/prq.c
index 52570e42a14c..ff63c228e6e1 100644
--- a/drivers/iommu/intel/prq.c
+++ b/drivers/iommu/intel/prq.c
@@ -151,8 +151,7 @@ static void handle_bad_prq_event(struct intel_iommu *iommu,
QI_PGRP_PASID_P(req->pasid_present) |
QI_PGRP_RESP_CODE(result) |
QI_PGRP_RESP_TYPE;
- desc.qw1 = QI_PGRP_IDX(req->prg_index) |
- QI_PGRP_LPIG(req->lpig);
+ desc.qw1 = QI_PGRP_IDX(req->prg_index);
qi_submit_sync(iommu, &desc, 1, 0);
}
@@ -379,19 +378,17 @@ void intel_iommu_page_response(struct device *dev, struct iopf_fault *evt,
struct iommu_fault_page_request *prm;
struct qi_desc desc;
bool pasid_present;
- bool last_page;
u16 sid;
prm = &evt->fault.prm;
sid = PCI_DEVID(bus, devfn);
pasid_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
- last_page = prm->flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
desc.qw0 = QI_PGRP_PASID(prm->pasid) | QI_PGRP_DID(sid) |
QI_PGRP_PASID_P(pasid_present) |
QI_PGRP_RESP_CODE(msg->code) |
QI_PGRP_RESP_TYPE;
- desc.qw1 = QI_PGRP_IDX(prm->grpid) | QI_PGRP_LPIG(last_page);
+ desc.qw1 = QI_PGRP_IDX(prm->grpid);
desc.qw2 = 0;
desc.qw3 = 0;
diff --git a/drivers/iommu/io-pgtable-dart.c b/drivers/iommu/io-pgtable-dart.c
index 679bda104797..54d287cc0dd1 100644
--- a/drivers/iommu/io-pgtable-dart.c
+++ b/drivers/iommu/io-pgtable-dart.c
@@ -27,8 +27,9 @@
#define DART1_MAX_ADDR_BITS 36
-#define DART_MAX_TABLES 4
-#define DART_LEVELS 2
+#define DART_MAX_TABLE_BITS 2
+#define DART_MAX_TABLES BIT(DART_MAX_TABLE_BITS)
+#define DART_MAX_LEVELS 4 /* Includes TTBR level */
/* Struct accessors */
#define io_pgtable_to_data(x) \
@@ -68,6 +69,7 @@
struct dart_io_pgtable {
struct io_pgtable iop;
+ int levels;
int tbl_bits;
int bits_per_level;
@@ -156,44 +158,45 @@ static dart_iopte dart_install_table(dart_iopte *table,
return old;
}
-static int dart_get_table(struct dart_io_pgtable *data, unsigned long iova)
+static int dart_get_index(struct dart_io_pgtable *data, unsigned long iova, int level)
{
- return (iova >> (3 * data->bits_per_level + ilog2(sizeof(dart_iopte)))) &
- ((1 << data->tbl_bits) - 1);
+ return (iova >> (level * data->bits_per_level + ilog2(sizeof(dart_iopte)))) &
+ ((1 << data->bits_per_level) - 1);
}
-static int dart_get_l1_index(struct dart_io_pgtable *data, unsigned long iova)
-{
-
- return (iova >> (2 * data->bits_per_level + ilog2(sizeof(dart_iopte)))) &
- ((1 << data->bits_per_level) - 1);
-}
-
-static int dart_get_l2_index(struct dart_io_pgtable *data, unsigned long iova)
+static int dart_get_last_index(struct dart_io_pgtable *data, unsigned long iova)
{
return (iova >> (data->bits_per_level + ilog2(sizeof(dart_iopte)))) &
((1 << data->bits_per_level) - 1);
}
-static dart_iopte *dart_get_l2(struct dart_io_pgtable *data, unsigned long iova)
+static dart_iopte *dart_get_last(struct dart_io_pgtable *data, unsigned long iova)
{
dart_iopte pte, *ptep;
- int tbl = dart_get_table(data, iova);
+ int level = data->levels;
+ int tbl = dart_get_index(data, iova, level);
+
+ if (tbl >= (1 << data->tbl_bits))
+ return NULL;
ptep = data->pgd[tbl];
if (!ptep)
return NULL;
- ptep += dart_get_l1_index(data, iova);
- pte = READ_ONCE(*ptep);
+ while (--level > 1) {
+ ptep += dart_get_index(data, iova, level);
+ pte = READ_ONCE(*ptep);
- /* Valid entry? */
- if (!pte)
- return NULL;
+ /* Valid entry? */
+ if (!pte)
+ return NULL;
- /* Deref to get level 2 table */
- return iopte_deref(pte, data);
+ /* Deref to get next level table */
+ ptep = iopte_deref(pte, data);
+ }
+
+ return ptep;
}
static dart_iopte dart_prot_to_pte(struct dart_io_pgtable *data,
@@ -230,6 +233,7 @@ static int dart_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
int ret = 0, tbl, num_entries, max_entries, map_idx_start;
dart_iopte pte, *cptep, *ptep;
dart_iopte prot;
+ int level = data->levels;
if (WARN_ON(pgsize != cfg->pgsize_bitmap))
return -EINVAL;
@@ -240,31 +244,36 @@ static int dart_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
return -EINVAL;
- tbl = dart_get_table(data, iova);
+ tbl = dart_get_index(data, iova, level);
+
+ if (tbl >= (1 << data->tbl_bits))
+ return -ENOMEM;
ptep = data->pgd[tbl];
- ptep += dart_get_l1_index(data, iova);
- pte = READ_ONCE(*ptep);
+ while (--level > 1) {
+ ptep += dart_get_index(data, iova, level);
+ pte = READ_ONCE(*ptep);
- /* no L2 table present */
- if (!pte) {
- cptep = iommu_alloc_pages_sz(gfp, tblsz);
- if (!cptep)
- return -ENOMEM;
+ /* no table present */
+ if (!pte) {
+ cptep = iommu_alloc_pages_sz(gfp, tblsz);
+ if (!cptep)
+ return -ENOMEM;
- pte = dart_install_table(cptep, ptep, 0, data);
- if (pte)
- iommu_free_pages(cptep);
+ pte = dart_install_table(cptep, ptep, 0, data);
+ if (pte)
+ iommu_free_pages(cptep);
- /* L2 table is present (now) */
- pte = READ_ONCE(*ptep);
- }
+ /* L2 table is present (now) */
+ pte = READ_ONCE(*ptep);
+ }
- ptep = iopte_deref(pte, data);
+ ptep = iopte_deref(pte, data);
+ }
/* install a leaf entries into L2 table */
prot = dart_prot_to_pte(data, iommu_prot);
- map_idx_start = dart_get_l2_index(data, iova);
+ map_idx_start = dart_get_last_index(data, iova);
max_entries = DART_PTES_PER_TABLE(data) - map_idx_start;
num_entries = min_t(int, pgcount, max_entries);
ptep += map_idx_start;
@@ -293,13 +302,13 @@ static size_t dart_unmap_pages(struct io_pgtable_ops *ops, unsigned long iova,
if (WARN_ON(pgsize != cfg->pgsize_bitmap || !pgcount))
return 0;
- ptep = dart_get_l2(data, iova);
+ ptep = dart_get_last(data, iova);
/* Valid L2 IOPTE pointer? */
if (WARN_ON(!ptep))
return 0;
- unmap_idx_start = dart_get_l2_index(data, iova);
+ unmap_idx_start = dart_get_last_index(data, iova);
ptep += unmap_idx_start;
max_entries = DART_PTES_PER_TABLE(data) - unmap_idx_start;
@@ -330,13 +339,13 @@ static phys_addr_t dart_iova_to_phys(struct io_pgtable_ops *ops,
struct dart_io_pgtable *data = io_pgtable_ops_to_data(ops);
dart_iopte pte, *ptep;
- ptep = dart_get_l2(data, iova);
+ ptep = dart_get_last(data, iova);
/* Valid L2 IOPTE pointer? */
if (!ptep)
return 0;
- ptep += dart_get_l2_index(data, iova);
+ ptep += dart_get_last_index(data, iova);
pte = READ_ONCE(*ptep);
/* Found translation */
@@ -353,21 +362,37 @@ static struct dart_io_pgtable *
dart_alloc_pgtable(struct io_pgtable_cfg *cfg)
{
struct dart_io_pgtable *data;
- int tbl_bits, bits_per_level, va_bits, pg_shift;
+ int levels, max_tbl_bits, tbl_bits, bits_per_level, va_bits, pg_shift;
+
+ /*
+ * Old 4K page DARTs can use up to 4 top-level tables.
+ * Newer ones only ever use a maximum of 1.
+ */
+ if (cfg->pgsize_bitmap == SZ_4K)
+ max_tbl_bits = DART_MAX_TABLE_BITS;
+ else
+ max_tbl_bits = 0;
pg_shift = __ffs(cfg->pgsize_bitmap);
bits_per_level = pg_shift - ilog2(sizeof(dart_iopte));
va_bits = cfg->ias - pg_shift;
- tbl_bits = max_t(int, 0, va_bits - (bits_per_level * DART_LEVELS));
- if ((1 << tbl_bits) > DART_MAX_TABLES)
+ levels = max_t(int, 2, (va_bits - max_tbl_bits + bits_per_level - 1) / bits_per_level);
+
+ if (levels > (DART_MAX_LEVELS - 1))
+ return NULL;
+
+ tbl_bits = max_t(int, 0, va_bits - (bits_per_level * levels));
+
+ if (tbl_bits > max_tbl_bits)
return NULL;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return NULL;
+ data->levels = levels + 1; /* Table level counts as one level */
data->tbl_bits = tbl_bits;
data->bits_per_level = bits_per_level;
@@ -403,6 +428,7 @@ apple_dart_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
return NULL;
cfg->apple_dart_cfg.n_ttbrs = 1 << data->tbl_bits;
+ cfg->apple_dart_cfg.n_levels = data->levels;
for (i = 0; i < cfg->apple_dart_cfg.n_ttbrs; ++i) {
data->pgd[i] =
@@ -422,24 +448,31 @@ out_free_data:
return NULL;
}
-static void apple_dart_free_pgtable(struct io_pgtable *iop)
+static void apple_dart_free_pgtables(struct dart_io_pgtable *data, dart_iopte *ptep, int level)
{
- struct dart_io_pgtable *data = io_pgtable_to_data(iop);
- dart_iopte *ptep, *end;
- int i;
+ dart_iopte *end;
+ dart_iopte *start = ptep;
- for (i = 0; i < (1 << data->tbl_bits) && data->pgd[i]; ++i) {
- ptep = data->pgd[i];
+ if (level > 1) {
end = (void *)ptep + DART_GRANULE(data);
while (ptep != end) {
dart_iopte pte = *ptep++;
if (pte)
- iommu_free_pages(iopte_deref(pte, data));
+ apple_dart_free_pgtables(data, iopte_deref(pte, data), level - 1);
}
- iommu_free_pages(data->pgd[i]);
}
+ iommu_free_pages(start);
+}
+
+static void apple_dart_free_pgtable(struct io_pgtable *iop)
+{
+ struct dart_io_pgtable *data = io_pgtable_to_data(iop);
+ int i;
+
+ for (i = 0; i < (1 << data->tbl_bits) && data->pgd[i]; ++i)
+ apple_dart_free_pgtables(data, data->pgd[i], data->levels - 1);
kfree(data);
}
diff --git a/drivers/iommu/iommu-priv.h b/drivers/iommu/iommu-priv.h
index e236b932e766..c95394cd03a7 100644
--- a/drivers/iommu/iommu-priv.h
+++ b/drivers/iommu/iommu-priv.h
@@ -37,6 +37,8 @@ void iommu_device_unregister_bus(struct iommu_device *iommu,
const struct bus_type *bus,
struct notifier_block *nb);
+int iommu_mock_device_add(struct device *dev, struct iommu_device *iommu);
+
struct iommu_attach_handle *iommu_attach_handle_get(struct iommu_group *group,
ioasid_t pasid,
unsigned int type);
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 060ebe330ee1..59244c744eab 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -304,6 +304,7 @@ void iommu_device_unregister_bus(struct iommu_device *iommu,
struct notifier_block *nb)
{
bus_unregister_notifier(bus, nb);
+ fwnode_remove_software_node(iommu->fwnode);
iommu_device_unregister(iommu);
}
EXPORT_SYMBOL_GPL(iommu_device_unregister_bus);
@@ -326,6 +327,12 @@ int iommu_device_register_bus(struct iommu_device *iommu,
if (err)
return err;
+ iommu->fwnode = fwnode_create_software_node(NULL, NULL);
+ if (IS_ERR(iommu->fwnode)) {
+ bus_unregister_notifier(bus, nb);
+ return PTR_ERR(iommu->fwnode);
+ }
+
spin_lock(&iommu_device_lock);
list_add_tail(&iommu->list, &iommu_device_list);
spin_unlock(&iommu_device_lock);
@@ -335,9 +342,28 @@ int iommu_device_register_bus(struct iommu_device *iommu,
iommu_device_unregister_bus(iommu, bus, nb);
return err;
}
+ WRITE_ONCE(iommu->ready, true);
return 0;
}
EXPORT_SYMBOL_GPL(iommu_device_register_bus);
+
+int iommu_mock_device_add(struct device *dev, struct iommu_device *iommu)
+{
+ int rc;
+
+ mutex_lock(&iommu_probe_device_lock);
+ rc = iommu_fwspec_init(dev, iommu->fwnode);
+ mutex_unlock(&iommu_probe_device_lock);
+
+ if (rc)
+ return rc;
+
+ rc = device_add(dev);
+ if (rc)
+ iommu_fwspec_free(dev);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(iommu_mock_device_add);
#endif
static struct dev_iommu *dev_iommu_get(struct device *dev)
diff --git a/drivers/iommu/iommufd/device.c b/drivers/iommu/iommufd/device.c
index 65fbd098f9e9..4c842368289f 100644
--- a/drivers/iommu/iommufd/device.c
+++ b/drivers/iommu/iommufd/device.c
@@ -711,6 +711,8 @@ iommufd_hw_pagetable_detach(struct iommufd_device *idev, ioasid_t pasid)
iopt_remove_reserved_iova(&hwpt_paging->ioas->iopt, idev->dev);
mutex_unlock(&igroup->lock);
+ iommufd_hw_pagetable_put(idev->ictx, hwpt);
+
/* Caller must destroy hwpt */
return hwpt;
}
@@ -1057,7 +1059,6 @@ void iommufd_device_detach(struct iommufd_device *idev, ioasid_t pasid)
hwpt = iommufd_hw_pagetable_detach(idev, pasid);
if (!hwpt)
return;
- iommufd_hw_pagetable_put(idev->ictx, hwpt);
refcount_dec(&idev->obj.users);
}
EXPORT_SYMBOL_NS_GPL(iommufd_device_detach, "IOMMUFD");
diff --git a/drivers/iommu/iommufd/eventq.c b/drivers/iommu/iommufd/eventq.c
index fc4de63b0bce..e23d9ee4fe38 100644
--- a/drivers/iommu/iommufd/eventq.c
+++ b/drivers/iommu/iommufd/eventq.c
@@ -393,12 +393,12 @@ static int iommufd_eventq_init(struct iommufd_eventq *eventq, char *name,
const struct file_operations *fops)
{
struct file *filep;
- int fdno;
spin_lock_init(&eventq->lock);
INIT_LIST_HEAD(&eventq->deliver);
init_waitqueue_head(&eventq->wait_queue);
+ /* The filep is fput() by the core code during failure */
filep = anon_inode_getfile(name, fops, eventq, O_RDWR);
if (IS_ERR(filep))
return PTR_ERR(filep);
@@ -408,10 +408,7 @@ static int iommufd_eventq_init(struct iommufd_eventq *eventq, char *name,
eventq->filep = filep;
refcount_inc(&eventq->obj.users);
- fdno = get_unused_fd_flags(O_CLOEXEC);
- if (fdno < 0)
- fput(filep);
- return fdno;
+ return get_unused_fd_flags(O_CLOEXEC);
}
static const struct file_operations iommufd_fault_fops =
@@ -452,7 +449,6 @@ int iommufd_fault_alloc(struct iommufd_ucmd *ucmd)
return 0;
out_put_fdno:
put_unused_fd(fdno);
- fput(fault->common.filep);
return rc;
}
@@ -536,7 +532,6 @@ int iommufd_veventq_alloc(struct iommufd_ucmd *ucmd)
out_put_fdno:
put_unused_fd(fdno);
- fput(veventq->common.filep);
out_abort:
iommufd_object_abort_and_destroy(ucmd->ictx, &veventq->common.obj);
out_unlock_veventqs:
diff --git a/drivers/iommu/iommufd/iommufd_private.h b/drivers/iommu/iommufd/iommufd_private.h
index 0da2a81eedfa..627f9b78483a 100644
--- a/drivers/iommu/iommufd/iommufd_private.h
+++ b/drivers/iommu/iommufd/iommufd_private.h
@@ -454,9 +454,8 @@ static inline void iommufd_hw_pagetable_put(struct iommufd_ctx *ictx,
if (hwpt->obj.type == IOMMUFD_OBJ_HWPT_PAGING) {
struct iommufd_hwpt_paging *hwpt_paging = to_hwpt_paging(hwpt);
- lockdep_assert_not_held(&hwpt_paging->ioas->mutex);
-
if (hwpt_paging->auto_domain) {
+ lockdep_assert_not_held(&hwpt_paging->ioas->mutex);
iommufd_object_put_and_try_destroy(ictx, &hwpt->obj);
return;
}
diff --git a/drivers/iommu/iommufd/main.c b/drivers/iommu/iommufd/main.c
index 15af7ced0501..ce775fbbae94 100644
--- a/drivers/iommu/iommufd/main.c
+++ b/drivers/iommu/iommufd/main.c
@@ -23,6 +23,7 @@
#include "iommufd_test.h"
struct iommufd_object_ops {
+ size_t file_offset;
void (*pre_destroy)(struct iommufd_object *obj);
void (*destroy)(struct iommufd_object *obj);
void (*abort)(struct iommufd_object *obj);
@@ -121,6 +122,10 @@ void iommufd_object_abort(struct iommufd_ctx *ictx, struct iommufd_object *obj)
old = xas_store(&xas, NULL);
xa_unlock(&ictx->objects);
WARN_ON(old != XA_ZERO_ENTRY);
+
+ if (WARN_ON(!refcount_dec_and_test(&obj->users)))
+ return;
+
kfree(obj);
}
@@ -131,10 +136,30 @@ void iommufd_object_abort(struct iommufd_ctx *ictx, struct iommufd_object *obj)
void iommufd_object_abort_and_destroy(struct iommufd_ctx *ictx,
struct iommufd_object *obj)
{
- if (iommufd_object_ops[obj->type].abort)
- iommufd_object_ops[obj->type].abort(obj);
+ const struct iommufd_object_ops *ops = &iommufd_object_ops[obj->type];
+
+ if (ops->file_offset) {
+ struct file **filep = ((void *)obj) + ops->file_offset;
+
+ /*
+ * A file should hold a users refcount while the file is open
+ * and put it back in its release. The file should hold a
+ * pointer to obj in their private data. Normal fput() is
+ * deferred to a workqueue and can get out of order with the
+ * following kfree(obj). Using the sync version ensures the
+ * release happens immediately. During abort we require the file
+ * refcount is one at this point - meaning the object alloc
+ * function cannot do anything to allow another thread to take a
+ * refcount prior to a guaranteed success.
+ */
+ if (*filep)
+ __fput_sync(*filep);
+ }
+
+ if (ops->abort)
+ ops->abort(obj);
else
- iommufd_object_ops[obj->type].destroy(obj);
+ ops->destroy(obj);
iommufd_object_abort(ictx, obj);
}
@@ -550,16 +575,23 @@ static int iommufd_fops_mmap(struct file *filp, struct vm_area_struct *vma)
if (vma->vm_flags & VM_EXEC)
return -EPERM;
+ mtree_lock(&ictx->mt_mmap);
/* vma->vm_pgoff carries a page-shifted start position to an immap */
immap = mtree_load(&ictx->mt_mmap, vma->vm_pgoff << PAGE_SHIFT);
- if (!immap)
+ if (!immap || !refcount_inc_not_zero(&immap->owner->users)) {
+ mtree_unlock(&ictx->mt_mmap);
return -ENXIO;
+ }
+ mtree_unlock(&ictx->mt_mmap);
+
/*
* mtree_load() returns the immap for any contained mmio_addr, so only
* allow the exact immap thing to be mapped
*/
- if (vma->vm_pgoff != immap->vm_pgoff || length != immap->length)
- return -ENXIO;
+ if (vma->vm_pgoff != immap->vm_pgoff || length != immap->length) {
+ rc = -ENXIO;
+ goto err_refcount;
+ }
vma->vm_pgoff = 0;
vma->vm_private_data = immap;
@@ -570,10 +602,11 @@ static int iommufd_fops_mmap(struct file *filp, struct vm_area_struct *vma)
immap->mmio_addr >> PAGE_SHIFT, length,
vma->vm_page_prot);
if (rc)
- return rc;
+ goto err_refcount;
+ return 0;
- /* vm_ops.open won't be called for mmap itself. */
- refcount_inc(&immap->owner->users);
+err_refcount:
+ refcount_dec(&immap->owner->users);
return rc;
}
@@ -651,6 +684,12 @@ void iommufd_ctx_put(struct iommufd_ctx *ictx)
}
EXPORT_SYMBOL_NS_GPL(iommufd_ctx_put, "IOMMUFD");
+#define IOMMUFD_FILE_OFFSET(_struct, _filep, _obj) \
+ .file_offset = (offsetof(_struct, _filep) + \
+ BUILD_BUG_ON_ZERO(!__same_type( \
+ struct file *, ((_struct *)NULL)->_filep)) + \
+ BUILD_BUG_ON_ZERO(offsetof(_struct, _obj)))
+
static const struct iommufd_object_ops iommufd_object_ops[] = {
[IOMMUFD_OBJ_ACCESS] = {
.destroy = iommufd_access_destroy_object,
@@ -661,6 +700,7 @@ static const struct iommufd_object_ops iommufd_object_ops[] = {
},
[IOMMUFD_OBJ_FAULT] = {
.destroy = iommufd_fault_destroy,
+ IOMMUFD_FILE_OFFSET(struct iommufd_fault, common.filep, common.obj),
},
[IOMMUFD_OBJ_HW_QUEUE] = {
.destroy = iommufd_hw_queue_destroy,
@@ -683,6 +723,7 @@ static const struct iommufd_object_ops iommufd_object_ops[] = {
[IOMMUFD_OBJ_VEVENTQ] = {
.destroy = iommufd_veventq_destroy,
.abort = iommufd_veventq_abort,
+ IOMMUFD_FILE_OFFSET(struct iommufd_veventq, common.filep, common.obj),
},
[IOMMUFD_OBJ_VIOMMU] = {
.destroy = iommufd_viommu_destroy,
diff --git a/drivers/iommu/iommufd/selftest.c b/drivers/iommu/iommufd/selftest.c
index 61686603c769..de178827a078 100644
--- a/drivers/iommu/iommufd/selftest.c
+++ b/drivers/iommu/iommufd/selftest.c
@@ -1126,7 +1126,7 @@ static struct mock_dev *mock_dev_create(unsigned long dev_flags)
goto err_put;
}
- rc = device_add(&mdev->dev);
+ rc = iommu_mock_device_add(&mdev->dev, &mock_iommu.iommu_dev);
if (rc)
goto err_put;
return mdev;
diff --git a/drivers/iommu/iommufd/viommu.c b/drivers/iommu/iommufd/viommu.c
index 2ca5809b238b..462b457ffd0c 100644
--- a/drivers/iommu/iommufd/viommu.c
+++ b/drivers/iommu/iommufd/viommu.c
@@ -339,7 +339,7 @@ iommufd_hw_queue_alloc_phys(struct iommu_hw_queue_alloc *cmd,
}
*base_pa = (page_to_pfn(pages[0]) << PAGE_SHIFT) + offset;
- kfree(pages);
+ kvfree(pages);
return access;
out_unpin:
@@ -349,7 +349,7 @@ out_detach:
out_destroy:
iommufd_access_destroy_internal(viommu->ictx, access);
out_free:
- kfree(pages);
+ kvfree(pages);
return ERR_PTR(rc);
}
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index 6fb93927bdb9..5c6f5943f44b 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -1303,8 +1303,8 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
struct omap_iommu_device *iommu;
struct omap_iommu *oiommu;
struct iotlb_entry e;
+ int ret = -EINVAL;
int omap_pgsz;
- u32 ret = -EINVAL;
int i;
omap_pgsz = bytes_to_iopgsz(bytes);
diff --git a/drivers/iommu/riscv/iommu-platform.c b/drivers/iommu/riscv/iommu-platform.c
index 725e919b97ef..83a28c83f991 100644
--- a/drivers/iommu/riscv/iommu-platform.c
+++ b/drivers/iommu/riscv/iommu-platform.c
@@ -10,6 +10,8 @@
* Tomasz Jeznach <tjeznach@rivosinc.com>
*/
+#include <linux/acpi.h>
+#include <linux/irqchip/riscv-imsic.h>
#include <linux/kernel.h>
#include <linux/msi.h>
#include <linux/of_irq.h>
@@ -46,6 +48,7 @@ static int riscv_iommu_platform_probe(struct platform_device *pdev)
enum riscv_iommu_igs_settings igs;
struct device *dev = &pdev->dev;
struct riscv_iommu_device *iommu = NULL;
+ struct irq_domain *msi_domain;
struct resource *res = NULL;
int vec, ret;
@@ -76,8 +79,13 @@ static int riscv_iommu_platform_probe(struct platform_device *pdev)
switch (igs) {
case RISCV_IOMMU_CAPABILITIES_IGS_BOTH:
case RISCV_IOMMU_CAPABILITIES_IGS_MSI:
- if (is_of_node(dev->fwnode))
+ if (is_of_node(dev_fwnode(dev))) {
of_msi_configure(dev, to_of_node(dev->fwnode));
+ } else {
+ msi_domain = irq_find_matching_fwnode(imsic_acpi_get_fwnode(dev),
+ DOMAIN_BUS_PLATFORM_MSI);
+ dev_set_msi_domain(dev, msi_domain);
+ }
if (!dev_get_msi_domain(dev)) {
dev_warn(dev, "failed to find an MSI domain\n");
@@ -150,6 +158,12 @@ static const struct of_device_id riscv_iommu_of_match[] = {
{},
};
+static const struct acpi_device_id riscv_iommu_acpi_match[] = {
+ { "RSCV0004", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, riscv_iommu_acpi_match);
+
static struct platform_driver riscv_iommu_platform_driver = {
.probe = riscv_iommu_platform_probe,
.remove = riscv_iommu_platform_remove,
@@ -158,6 +172,7 @@ static struct platform_driver riscv_iommu_platform_driver = {
.name = "riscv,iommu",
.of_match_table = riscv_iommu_of_match,
.suppress_bind_attrs = true,
+ .acpi_match_table = riscv_iommu_acpi_match,
},
};
diff --git a/drivers/iommu/riscv/iommu.c b/drivers/iommu/riscv/iommu.c
index 2d0d31ba2886..ebb22979075d 100644
--- a/drivers/iommu/riscv/iommu.c
+++ b/drivers/iommu/riscv/iommu.c
@@ -12,6 +12,8 @@
#define pr_fmt(fmt) "riscv-iommu: " fmt
+#include <linux/acpi.h>
+#include <linux/acpi_rimt.h>
#include <linux/compiler.h>
#include <linux/crash_dump.h>
#include <linux/init.h>
@@ -1283,7 +1285,7 @@ static phys_addr_t riscv_iommu_iova_to_phys(struct iommu_domain *iommu_domain,
unsigned long *ptr;
ptr = riscv_iommu_pte_fetch(domain, iova, &pte_size);
- if (_io_pte_none(*ptr) || !_io_pte_present(*ptr))
+ if (!ptr)
return 0;
return pfn_to_phys(__page_val_to_pfn(*ptr)) | (iova & (pte_size - 1));
@@ -1650,6 +1652,14 @@ int riscv_iommu_init(struct riscv_iommu_device *iommu)
goto err_iodir_off;
}
+ if (!acpi_disabled) {
+ rc = rimt_iommu_register(iommu->dev);
+ if (rc) {
+ dev_err_probe(iommu->dev, rc, "cannot register iommu with RIMT\n");
+ goto err_remove_sysfs;
+ }
+ }
+
rc = iommu_device_register(&iommu->iommu, &riscv_iommu_ops, iommu->dev);
if (rc) {
dev_err_probe(iommu->dev, rc, "cannot register iommu interface\n");
diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
index 9c80d61deb2c..aa576736d60b 100644
--- a/drivers/iommu/s390-iommu.c
+++ b/drivers/iommu/s390-iommu.c
@@ -612,6 +612,23 @@ static u64 get_iota_region_flag(struct s390_domain *domain)
}
}
+static bool reg_ioat_propagate_error(int cc, u8 status)
+{
+ /*
+ * If the device is in the error state the reset routine
+ * will register the IOAT of the newly set domain on re-enable
+ */
+ if (cc == ZPCI_CC_ERR && status == ZPCI_PCI_ST_FUNC_NOT_AVAIL)
+ return false;
+ /*
+ * If the device was removed treat registration as success
+ * and let the subsequent error event trigger tear down.
+ */
+ if (cc == ZPCI_CC_INVAL_HANDLE)
+ return false;
+ return cc != ZPCI_CC_OK;
+}
+
static int s390_iommu_domain_reg_ioat(struct zpci_dev *zdev,
struct iommu_domain *domain, u8 *status)
{
@@ -696,7 +713,7 @@ static int s390_iommu_attach_device(struct iommu_domain *domain,
/* If we fail now DMA remains blocked via blocking domain */
cc = s390_iommu_domain_reg_ioat(zdev, domain, &status);
- if (cc && status != ZPCI_PCI_ST_FUNC_NOT_AVAIL)
+ if (reg_ioat_propagate_error(cc, status))
return -EIO;
zdev->dma_table = s390_domain->dma_table;
zdev_s390_domain_update(zdev, domain);
@@ -1032,7 +1049,8 @@ struct zpci_iommu_ctrs *zpci_get_iommu_ctrs(struct zpci_dev *zdev)
lockdep_assert_held(&zdev->dom_lock);
- if (zdev->s390_domain->type == IOMMU_DOMAIN_BLOCKED)
+ if (zdev->s390_domain->type == IOMMU_DOMAIN_BLOCKED ||
+ zdev->s390_domain->type == IOMMU_DOMAIN_IDENTITY)
return NULL;
s390_domain = to_s390_domain(zdev->s390_domain);
@@ -1123,12 +1141,7 @@ static int s390_attach_dev_identity(struct iommu_domain *domain,
/* If we fail now DMA remains blocked via blocking domain */
cc = s390_iommu_domain_reg_ioat(zdev, domain, &status);
-
- /*
- * If the device is undergoing error recovery the reset code
- * will re-establish the new domain.
- */
- if (cc && status != ZPCI_PCI_ST_FUNC_NOT_AVAIL)
+ if (reg_ioat_propagate_error(cc, status))
return -EIO;
zdev_s390_domain_update(zdev, domain);
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index 532db1de201b..b39d6f134ab2 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -998,8 +998,7 @@ static void viommu_get_resv_regions(struct device *dev, struct list_head *head)
iommu_dma_get_resv_regions(dev, head);
}
-static const struct iommu_ops viommu_ops;
-static struct virtio_driver virtio_iommu_drv;
+static const struct bus_type *virtio_bus_type;
static int viommu_match_node(struct device *dev, const void *data)
{
@@ -1008,8 +1007,9 @@ static int viommu_match_node(struct device *dev, const void *data)
static struct viommu_dev *viommu_get_by_fwnode(struct fwnode_handle *fwnode)
{
- struct device *dev = driver_find_device(&virtio_iommu_drv.driver, NULL,
- fwnode, viommu_match_node);
+ struct device *dev = bus_find_device(virtio_bus_type, NULL, fwnode,
+ viommu_match_node);
+
put_device(dev);
return dev ? dev_to_virtio(dev)->priv : NULL;
@@ -1160,6 +1160,9 @@ static int viommu_probe(struct virtio_device *vdev)
if (!viommu)
return -ENOMEM;
+ /* Borrow this for easy lookups later */
+ virtio_bus_type = dev->bus;
+
spin_lock_init(&viommu->request_lock);
ida_init(&viommu->domain_ids);
viommu->dev = dev;
@@ -1229,10 +1232,10 @@ static int viommu_probe(struct virtio_device *vdev)
if (ret)
goto err_free_vqs;
- iommu_device_register(&viommu->iommu, &viommu_ops, parent_dev);
-
vdev->priv = viommu;
+ iommu_device_register(&viommu->iommu, &viommu_ops, parent_dev);
+
dev_info(dev, "input address: %u bits\n",
order_base_2(viommu->geometry.aperture_end));
dev_info(dev, "page mask: %#llx\n", viommu->pgsize_bitmap);
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 39a6ae1d574b..a61c6dc63c29 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -554,6 +554,7 @@ config IMX_MU_MSI
tristate "i.MX MU used as MSI controller"
depends on OF && HAS_IOMEM
depends on ARCH_MXC || COMPILE_TEST
+ depends on ARM || ARM64
default m if ARCH_MXC
select IRQ_DOMAIN
select IRQ_DOMAIN_HIERARCHY
@@ -633,6 +634,13 @@ config RISCV_IMSIC
select GENERIC_MSI_IRQ
select IRQ_MSI_LIB
+config RISCV_RPMI_SYSMSI
+ bool
+ depends on RISCV && MAILBOX
+ select IRQ_DOMAIN_HIERARCHY
+ select GENERIC_MSI_IRQ
+ default RISCV
+
config SIFIVE_PLIC
bool
depends on RISCV
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 93e3ced023bb..3de083f5484c 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -106,6 +106,7 @@ obj-$(CONFIG_RISCV_INTC) += irq-riscv-intc.o
obj-$(CONFIG_RISCV_APLIC) += irq-riscv-aplic-main.o irq-riscv-aplic-direct.o
obj-$(CONFIG_RISCV_APLIC_MSI) += irq-riscv-aplic-msi.o
obj-$(CONFIG_RISCV_IMSIC) += irq-riscv-imsic-state.o irq-riscv-imsic-early.o irq-riscv-imsic-platform.o
+obj-$(CONFIG_RISCV_RPMI_SYSMSI) += irq-riscv-rpmi-sysmsi.o
obj-$(CONFIG_SIFIVE_PLIC) += irq-sifive-plic.o
obj-$(CONFIG_STARFIVE_JH8100_INTC) += irq-starfive-jh8100-intc.o
obj-$(CONFIG_ACLINT_SSWI) += irq-aclint-sswi.o
diff --git a/drivers/irqchip/irq-aspeed-scu-ic.c b/drivers/irqchip/irq-aspeed-scu-ic.c
index 1c7045467c48..bee59c8c4c93 100644
--- a/drivers/irqchip/irq-aspeed-scu-ic.c
+++ b/drivers/irqchip/irq-aspeed-scu-ic.c
@@ -1,61 +1,78 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * Aspeed AST24XX, AST25XX, and AST26XX SCU Interrupt Controller
+ * Aspeed AST24XX, AST25XX, AST26XX, and AST27XX SCU Interrupt Controller
* Copyright 2019 IBM Corporation
*
* Eddie James <eajames@linux.ibm.com>
*/
#include <linux/bitops.h>
+#include <linux/io.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
-#include <linux/mfd/syscon.h>
+#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <linux/regmap.h>
-#define ASPEED_SCU_IC_REG 0x018
-#define ASPEED_SCU_IC_SHIFT 0
-#define ASPEED_SCU_IC_ENABLE GENMASK(15, ASPEED_SCU_IC_SHIFT)
-#define ASPEED_SCU_IC_NUM_IRQS 7
#define ASPEED_SCU_IC_STATUS GENMASK(28, 16)
#define ASPEED_SCU_IC_STATUS_SHIFT 16
+#define AST2700_SCU_IC_STATUS GENMASK(15, 0)
+
+struct aspeed_scu_ic_variant {
+ const char *compatible;
+ unsigned long irq_enable;
+ unsigned long irq_shift;
+ unsigned int num_irqs;
+ unsigned long ier;
+ unsigned long isr;
+};
-#define ASPEED_AST2600_SCU_IC0_REG 0x560
-#define ASPEED_AST2600_SCU_IC0_SHIFT 0
-#define ASPEED_AST2600_SCU_IC0_ENABLE \
- GENMASK(5, ASPEED_AST2600_SCU_IC0_SHIFT)
-#define ASPEED_AST2600_SCU_IC0_NUM_IRQS 6
+#define SCU_VARIANT(_compat, _shift, _enable, _num, _ier, _isr) { \
+ .compatible = _compat, \
+ .irq_shift = _shift, \
+ .irq_enable = _enable, \
+ .num_irqs = _num, \
+ .ier = _ier, \
+ .isr = _isr, \
+}
-#define ASPEED_AST2600_SCU_IC1_REG 0x570
-#define ASPEED_AST2600_SCU_IC1_SHIFT 4
-#define ASPEED_AST2600_SCU_IC1_ENABLE \
- GENMASK(5, ASPEED_AST2600_SCU_IC1_SHIFT)
-#define ASPEED_AST2600_SCU_IC1_NUM_IRQS 2
+static const struct aspeed_scu_ic_variant scu_ic_variants[] __initconst = {
+ SCU_VARIANT("aspeed,ast2400-scu-ic", 0, GENMASK(15, 0), 7, 0x00, 0x00),
+ SCU_VARIANT("aspeed,ast2500-scu-ic", 0, GENMASK(15, 0), 7, 0x00, 0x00),
+ SCU_VARIANT("aspeed,ast2600-scu-ic0", 0, GENMASK(5, 0), 6, 0x00, 0x00),
+ SCU_VARIANT("aspeed,ast2600-scu-ic1", 4, GENMASK(5, 4), 2, 0x00, 0x00),
+ SCU_VARIANT("aspeed,ast2700-scu-ic0", 0, GENMASK(3, 0), 4, 0x00, 0x04),
+ SCU_VARIANT("aspeed,ast2700-scu-ic1", 0, GENMASK(3, 0), 4, 0x00, 0x04),
+ SCU_VARIANT("aspeed,ast2700-scu-ic2", 0, GENMASK(3, 0), 4, 0x04, 0x00),
+ SCU_VARIANT("aspeed,ast2700-scu-ic3", 0, GENMASK(1, 0), 2, 0x04, 0x00),
+};
struct aspeed_scu_ic {
- unsigned long irq_enable;
- unsigned long irq_shift;
- unsigned int num_irqs;
- unsigned int reg;
- struct regmap *scu;
- struct irq_domain *irq_domain;
+ unsigned long irq_enable;
+ unsigned long irq_shift;
+ unsigned int num_irqs;
+ void __iomem *base;
+ struct irq_domain *irq_domain;
+ unsigned long ier;
+ unsigned long isr;
};
-static void aspeed_scu_ic_irq_handler(struct irq_desc *desc)
+static inline bool scu_has_split_isr(struct aspeed_scu_ic *scu)
+{
+ return scu->ier != scu->isr;
+}
+
+static void aspeed_scu_ic_irq_handler_combined(struct irq_desc *desc)
{
- unsigned int sts;
- unsigned long bit;
- unsigned long enabled;
- unsigned long max;
- unsigned long status;
struct aspeed_scu_ic *scu_ic = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
- unsigned int mask = scu_ic->irq_enable << ASPEED_SCU_IC_STATUS_SHIFT;
+ unsigned long bit, enabled, max, status;
+ unsigned int sts, mask;
chained_irq_enter(chip, desc);
+ mask = scu_ic->irq_enable << ASPEED_SCU_IC_STATUS_SHIFT;
/*
* The SCU IC has just one register to control its operation and read
* status. The interrupt enable bits occupy the lower 16 bits of the
@@ -66,7 +83,7 @@ static void aspeed_scu_ic_irq_handler(struct irq_desc *desc)
* shifting the status down to get the mapping and then back up to
* clear the bit.
*/
- regmap_read(scu_ic->scu, scu_ic->reg, &sts);
+ sts = readl(scu_ic->base);
enabled = sts & scu_ic->irq_enable;
status = (sts >> ASPEED_SCU_IC_STATUS_SHIFT) & enabled;
@@ -74,43 +91,83 @@ static void aspeed_scu_ic_irq_handler(struct irq_desc *desc)
max = scu_ic->num_irqs + bit;
for_each_set_bit_from(bit, &status, max) {
- generic_handle_domain_irq(scu_ic->irq_domain,
- bit - scu_ic->irq_shift);
+ generic_handle_domain_irq(scu_ic->irq_domain, bit - scu_ic->irq_shift);
+ writel((readl(scu_ic->base) & ~mask) | BIT(bit + ASPEED_SCU_IC_STATUS_SHIFT),
+ scu_ic->base);
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static void aspeed_scu_ic_irq_handler_split(struct irq_desc *desc)
+{
+ struct aspeed_scu_ic *scu_ic = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ unsigned long bit, enabled, max, status;
+ unsigned int sts, mask;
- regmap_write_bits(scu_ic->scu, scu_ic->reg, mask,
- BIT(bit + ASPEED_SCU_IC_STATUS_SHIFT));
+ chained_irq_enter(chip, desc);
+
+ mask = scu_ic->irq_enable;
+ sts = readl(scu_ic->base + scu_ic->isr);
+ enabled = sts & scu_ic->irq_enable;
+ sts = readl(scu_ic->base + scu_ic->isr);
+ status = sts & enabled;
+
+ bit = scu_ic->irq_shift;
+ max = scu_ic->num_irqs + bit;
+
+ for_each_set_bit_from(bit, &status, max) {
+ generic_handle_domain_irq(scu_ic->irq_domain, bit - scu_ic->irq_shift);
+ /* Clear interrupt */
+ writel(BIT(bit), scu_ic->base + scu_ic->isr);
}
chained_irq_exit(chip, desc);
}
-static void aspeed_scu_ic_irq_mask(struct irq_data *data)
+static void aspeed_scu_ic_irq_mask_combined(struct irq_data *data)
{
struct aspeed_scu_ic *scu_ic = irq_data_get_irq_chip_data(data);
- unsigned int mask = BIT(data->hwirq + scu_ic->irq_shift) |
- (scu_ic->irq_enable << ASPEED_SCU_IC_STATUS_SHIFT);
+ unsigned int bit = BIT(data->hwirq + scu_ic->irq_shift);
+ unsigned int mask = bit | (scu_ic->irq_enable << ASPEED_SCU_IC_STATUS_SHIFT);
/*
* Status bits are cleared by writing 1. In order to prevent the mask
* operation from clearing the status bits, they should be under the
* mask and written with 0.
*/
- regmap_update_bits(scu_ic->scu, scu_ic->reg, mask, 0);
+ writel(readl(scu_ic->base) & ~mask, scu_ic->base);
}
-static void aspeed_scu_ic_irq_unmask(struct irq_data *data)
+static void aspeed_scu_ic_irq_unmask_combined(struct irq_data *data)
{
struct aspeed_scu_ic *scu_ic = irq_data_get_irq_chip_data(data);
unsigned int bit = BIT(data->hwirq + scu_ic->irq_shift);
- unsigned int mask = bit |
- (scu_ic->irq_enable << ASPEED_SCU_IC_STATUS_SHIFT);
+ unsigned int mask = bit | (scu_ic->irq_enable << ASPEED_SCU_IC_STATUS_SHIFT);
/*
* Status bits are cleared by writing 1. In order to prevent the unmask
* operation from clearing the status bits, they should be under the
* mask and written with 0.
*/
- regmap_update_bits(scu_ic->scu, scu_ic->reg, mask, bit);
+ writel((readl(scu_ic->base) & ~mask) | bit, scu_ic->base);
+}
+
+static void aspeed_scu_ic_irq_mask_split(struct irq_data *data)
+{
+ struct aspeed_scu_ic *scu_ic = irq_data_get_irq_chip_data(data);
+ unsigned int mask = BIT(data->hwirq + scu_ic->irq_shift);
+
+ writel(readl(scu_ic->base) & ~mask, scu_ic->base + scu_ic->ier);
+}
+
+static void aspeed_scu_ic_irq_unmask_split(struct irq_data *data)
+{
+ struct aspeed_scu_ic *scu_ic = irq_data_get_irq_chip_data(data);
+ unsigned int bit = BIT(data->hwirq + scu_ic->irq_shift);
+
+ writel(readl(scu_ic->base) | bit, scu_ic->base + scu_ic->ier);
}
static int aspeed_scu_ic_irq_set_affinity(struct irq_data *data,
@@ -120,17 +177,29 @@ static int aspeed_scu_ic_irq_set_affinity(struct irq_data *data,
return -EINVAL;
}
-static struct irq_chip aspeed_scu_ic_chip = {
+static struct irq_chip aspeed_scu_ic_chip_combined = {
.name = "aspeed-scu-ic",
- .irq_mask = aspeed_scu_ic_irq_mask,
- .irq_unmask = aspeed_scu_ic_irq_unmask,
- .irq_set_affinity = aspeed_scu_ic_irq_set_affinity,
+ .irq_mask = aspeed_scu_ic_irq_mask_combined,
+ .irq_unmask = aspeed_scu_ic_irq_unmask_combined,
+ .irq_set_affinity = aspeed_scu_ic_irq_set_affinity,
+};
+
+static struct irq_chip aspeed_scu_ic_chip_split = {
+ .name = "ast2700-scu-ic",
+ .irq_mask = aspeed_scu_ic_irq_mask_split,
+ .irq_unmask = aspeed_scu_ic_irq_unmask_split,
+ .irq_set_affinity = aspeed_scu_ic_irq_set_affinity,
};
static int aspeed_scu_ic_map(struct irq_domain *domain, unsigned int irq,
irq_hw_number_t hwirq)
{
- irq_set_chip_and_handler(irq, &aspeed_scu_ic_chip, handle_level_irq);
+ struct aspeed_scu_ic *scu_ic = domain->host_data;
+
+ if (scu_has_split_isr(scu_ic))
+ irq_set_chip_and_handler(irq, &aspeed_scu_ic_chip_split, handle_level_irq);
+ else
+ irq_set_chip_and_handler(irq, &aspeed_scu_ic_chip_combined, handle_level_irq);
irq_set_chip_data(irq, domain->host_data);
return 0;
@@ -143,21 +212,21 @@ static const struct irq_domain_ops aspeed_scu_ic_domain_ops = {
static int aspeed_scu_ic_of_init_common(struct aspeed_scu_ic *scu_ic,
struct device_node *node)
{
- int irq;
- int rc = 0;
+ int irq, rc = 0;
- if (!node->parent) {
- rc = -ENODEV;
+ scu_ic->base = of_iomap(node, 0);
+ if (!scu_ic->base) {
+ rc = -ENOMEM;
goto err;
}
- scu_ic->scu = syscon_node_to_regmap(node->parent);
- if (IS_ERR(scu_ic->scu)) {
- rc = PTR_ERR(scu_ic->scu);
- goto err;
+ if (scu_has_split_isr(scu_ic)) {
+ writel(AST2700_SCU_IC_STATUS, scu_ic->base + scu_ic->isr);
+ writel(0, scu_ic->base + scu_ic->ier);
+ } else {
+ writel(ASPEED_SCU_IC_STATUS, scu_ic->base);
+ writel(0, scu_ic->base);
}
- regmap_write_bits(scu_ic->scu, scu_ic->reg, ASPEED_SCU_IC_STATUS, ASPEED_SCU_IC_STATUS);
- regmap_write_bits(scu_ic->scu, scu_ic->reg, ASPEED_SCU_IC_ENABLE, 0);
irq = irq_of_parse_and_map(node, 0);
if (!irq) {
@@ -166,75 +235,60 @@ static int aspeed_scu_ic_of_init_common(struct aspeed_scu_ic *scu_ic,
}
scu_ic->irq_domain = irq_domain_create_linear(of_fwnode_handle(node), scu_ic->num_irqs,
- &aspeed_scu_ic_domain_ops,
- scu_ic);
+ &aspeed_scu_ic_domain_ops, scu_ic);
if (!scu_ic->irq_domain) {
rc = -ENOMEM;
goto err;
}
- irq_set_chained_handler_and_data(irq, aspeed_scu_ic_irq_handler,
+ irq_set_chained_handler_and_data(irq, scu_has_split_isr(scu_ic) ?
+ aspeed_scu_ic_irq_handler_split :
+ aspeed_scu_ic_irq_handler_combined,
scu_ic);
return 0;
err:
kfree(scu_ic);
-
return rc;
}
-static int __init aspeed_scu_ic_of_init(struct device_node *node,
- struct device_node *parent)
+static const struct aspeed_scu_ic_variant *aspeed_scu_ic_find_variant(struct device_node *np)
{
- struct aspeed_scu_ic *scu_ic = kzalloc(sizeof(*scu_ic), GFP_KERNEL);
-
- if (!scu_ic)
- return -ENOMEM;
-
- scu_ic->irq_enable = ASPEED_SCU_IC_ENABLE;
- scu_ic->irq_shift = ASPEED_SCU_IC_SHIFT;
- scu_ic->num_irqs = ASPEED_SCU_IC_NUM_IRQS;
- scu_ic->reg = ASPEED_SCU_IC_REG;
-
- return aspeed_scu_ic_of_init_common(scu_ic, node);
+ for (int i = 0; i < ARRAY_SIZE(scu_ic_variants); i++) {
+ if (of_device_is_compatible(np, scu_ic_variants[i].compatible))
+ return &scu_ic_variants[i];
+ }
+ return NULL;
}
-static int __init aspeed_ast2600_scu_ic0_of_init(struct device_node *node,
- struct device_node *parent)
+static int __init aspeed_scu_ic_of_init(struct device_node *node, struct device_node *parent)
{
- struct aspeed_scu_ic *scu_ic = kzalloc(sizeof(*scu_ic), GFP_KERNEL);
+ const struct aspeed_scu_ic_variant *variant;
+ struct aspeed_scu_ic *scu_ic;
- if (!scu_ic)
- return -ENOMEM;
-
- scu_ic->irq_enable = ASPEED_AST2600_SCU_IC0_ENABLE;
- scu_ic->irq_shift = ASPEED_AST2600_SCU_IC0_SHIFT;
- scu_ic->num_irqs = ASPEED_AST2600_SCU_IC0_NUM_IRQS;
- scu_ic->reg = ASPEED_AST2600_SCU_IC0_REG;
-
- return aspeed_scu_ic_of_init_common(scu_ic, node);
-}
-
-static int __init aspeed_ast2600_scu_ic1_of_init(struct device_node *node,
- struct device_node *parent)
-{
- struct aspeed_scu_ic *scu_ic = kzalloc(sizeof(*scu_ic), GFP_KERNEL);
+ variant = aspeed_scu_ic_find_variant(node);
+ if (!variant)
+ return -ENODEV;
+ scu_ic = kzalloc(sizeof(*scu_ic), GFP_KERNEL);
if (!scu_ic)
return -ENOMEM;
- scu_ic->irq_enable = ASPEED_AST2600_SCU_IC1_ENABLE;
- scu_ic->irq_shift = ASPEED_AST2600_SCU_IC1_SHIFT;
- scu_ic->num_irqs = ASPEED_AST2600_SCU_IC1_NUM_IRQS;
- scu_ic->reg = ASPEED_AST2600_SCU_IC1_REG;
+ scu_ic->irq_enable = variant->irq_enable;
+ scu_ic->irq_shift = variant->irq_shift;
+ scu_ic->num_irqs = variant->num_irqs;
+ scu_ic->ier = variant->ier;
+ scu_ic->isr = variant->isr;
return aspeed_scu_ic_of_init_common(scu_ic, node);
}
IRQCHIP_DECLARE(ast2400_scu_ic, "aspeed,ast2400-scu-ic", aspeed_scu_ic_of_init);
IRQCHIP_DECLARE(ast2500_scu_ic, "aspeed,ast2500-scu-ic", aspeed_scu_ic_of_init);
-IRQCHIP_DECLARE(ast2600_scu_ic0, "aspeed,ast2600-scu-ic0",
- aspeed_ast2600_scu_ic0_of_init);
-IRQCHIP_DECLARE(ast2600_scu_ic1, "aspeed,ast2600-scu-ic1",
- aspeed_ast2600_scu_ic1_of_init);
+IRQCHIP_DECLARE(ast2600_scu_ic0, "aspeed,ast2600-scu-ic0", aspeed_scu_ic_of_init);
+IRQCHIP_DECLARE(ast2600_scu_ic1, "aspeed,ast2600-scu-ic1", aspeed_scu_ic_of_init);
+IRQCHIP_DECLARE(ast2700_scu_ic0, "aspeed,ast2700-scu-ic0", aspeed_scu_ic_of_init);
+IRQCHIP_DECLARE(ast2700_scu_ic1, "aspeed,ast2700-scu-ic1", aspeed_scu_ic_of_init);
+IRQCHIP_DECLARE(ast2700_scu_ic2, "aspeed,ast2700-scu-ic2", aspeed_scu_ic_of_init);
+IRQCHIP_DECLARE(ast2700_scu_ic3, "aspeed,ast2700-scu-ic3", aspeed_scu_ic_of_init);
diff --git a/drivers/irqchip/irq-atmel-aic.c b/drivers/irqchip/irq-atmel-aic.c
index 03aeed39a4d2..1dcc52760eca 100644
--- a/drivers/irqchip/irq-atmel-aic.c
+++ b/drivers/irqchip/irq-atmel-aic.c
@@ -188,7 +188,7 @@ static int aic_irq_domain_xlate(struct irq_domain *d,
gc = dgc->gc[idx];
- guard(raw_spinlock_irq)(&gc->lock);
+ guard(raw_spinlock_irqsave)(&gc->lock);
smr = irq_reg_readl(gc, AT91_AIC_SMR(*out_hwirq));
aic_common_set_priority(intspec[2], &smr);
irq_reg_writel(gc, smr, AT91_AIC_SMR(*out_hwirq));
diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c
index 60b00d2c3d7a..1f14b401f71d 100644
--- a/drivers/irqchip/irq-atmel-aic5.c
+++ b/drivers/irqchip/irq-atmel-aic5.c
@@ -279,7 +279,7 @@ static int aic5_irq_domain_xlate(struct irq_domain *d,
if (ret)
return ret;
- guard(raw_spinlock_irq)(&bgc->lock);
+ guard(raw_spinlock_irqsave)(&bgc->lock);
irq_reg_writel(bgc, *out_hwirq, AT91_AIC5_SSR);
smr = irq_reg_readl(bgc, AT91_AIC5_SMR);
aic_common_set_priority(intspec[2], &smr);
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
index 24ef5af569fe..8a3410c2b7b5 100644
--- a/drivers/irqchip/irq-gic-v2m.c
+++ b/drivers/irqchip/irq-gic-v2m.c
@@ -153,14 +153,19 @@ static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
{
msi_alloc_info_t *info = args;
struct v2m_data *v2m = NULL, *tmp;
- int hwirq, offset, i, err = 0;
+ int hwirq, i, err = 0;
+ unsigned long offset;
+ unsigned long align_mask = nr_irqs - 1;
spin_lock(&v2m_lock);
list_for_each_entry(tmp, &v2m_nodes, entry) {
- offset = bitmap_find_free_region(tmp->bm, tmp->nr_spis,
- get_count_order(nr_irqs));
- if (offset >= 0) {
+ unsigned long align_off = tmp->spi_start - (tmp->spi_start & ~align_mask);
+
+ offset = bitmap_find_next_zero_area_off(tmp->bm, tmp->nr_spis, 0,
+ nr_irqs, align_mask, align_off);
+ if (offset < tmp->nr_spis) {
v2m = tmp;
+ bitmap_set(v2m->bm, offset, nr_irqs);
break;
}
}
diff --git a/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c b/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c
index 11549d85f23b..b5785472765a 100644
--- a/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c
+++ b/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c
@@ -30,7 +30,7 @@ static u32 fsl_mc_msi_domain_get_msi_id(struct irq_domain *domain,
u32 out_id;
of_node = irq_domain_get_of_node(domain);
- out_id = of_node ? of_msi_map_id(&mc_dev->dev, of_node, mc_dev->icid) :
+ out_id = of_node ? of_msi_xlate(&mc_dev->dev, &of_node, mc_dev->icid) :
iort_msi_map_id(&mc_dev->dev, mc_dev->icid);
return out_id;
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index dbeb85677b08..3de351e66ee8 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -1766,8 +1766,9 @@ static int gic_irq_domain_select(struct irq_domain *d,
struct irq_fwspec *fwspec,
enum irq_domain_bus_token bus_token)
{
- unsigned int type, ret, ppi_idx;
+ unsigned int type, ppi_idx;
irq_hw_number_t hwirq;
+ int ret;
/* Not for us */
if (fwspec->fwnode != d->fwnode)
diff --git a/drivers/irqchip/irq-gic-v5-irs.c b/drivers/irqchip/irq-gic-v5-irs.c
index f845415f9143..ce2732d649a3 100644
--- a/drivers/irqchip/irq-gic-v5-irs.c
+++ b/drivers/irqchip/irq-gic-v5-irs.c
@@ -5,6 +5,7 @@
#define pr_fmt(fmt) "GICv5 IRS: " fmt
+#include <linux/kmemleak.h>
#include <linux/log2.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -117,6 +118,7 @@ static int __init gicv5_irs_init_ist_linear(struct gicv5_irs_chip_data *irs_data
kfree(ist);
return ret;
}
+ kmemleak_ignore(ist);
return 0;
}
@@ -232,6 +234,7 @@ int gicv5_irs_iste_alloc(const u32 lpi)
kfree(l2ist);
return ret;
}
+ kmemleak_ignore(l2ist);
/*
* Make sure we invalidate the cache line pulled before the IRS
@@ -568,7 +571,7 @@ static void __init gicv5_irs_init_bases(struct gicv5_irs_chip_data *irs_data,
FIELD_PREP(GICV5_IRS_CR1_IST_RA, GICV5_NO_READ_ALLOC) |
FIELD_PREP(GICV5_IRS_CR1_IC, GICV5_NON_CACHE) |
FIELD_PREP(GICV5_IRS_CR1_OC, GICV5_NON_CACHE);
- irs_data->flags |= IRS_FLAGS_NON_COHERENT;
+ irs_data->flags |= IRS_FLAGS_NON_COHERENT;
} else {
cr1 = FIELD_PREP(GICV5_IRS_CR1_VPED_WA, GICV5_WRITE_ALLOC) |
FIELD_PREP(GICV5_IRS_CR1_VPED_RA, GICV5_READ_ALLOC) |
@@ -623,12 +626,14 @@ static int __init gicv5_irs_of_init_affinity(struct device_node *node,
int cpu;
cpu_node = of_parse_phandle(node, "cpus", i);
- if (WARN_ON(!cpu_node))
+ if (!cpu_node) {
+ pr_warn(FW_BUG "Erroneous CPU node phandle\n");
continue;
+ }
cpu = of_cpu_node_to_id(cpu_node);
of_node_put(cpu_node);
- if (WARN_ON(cpu < 0))
+ if (cpu < 0)
continue;
if (iaffids[i] & ~iaffid_mask) {
diff --git a/drivers/irqchip/irq-gic-v5-its.c b/drivers/irqchip/irq-gic-v5-its.c
index 340640fdbdf6..554485f0be1f 100644
--- a/drivers/irqchip/irq-gic-v5-its.c
+++ b/drivers/irqchip/irq-gic-v5-its.c
@@ -191,9 +191,9 @@ static int gicv5_its_create_itt_two_level(struct gicv5_its_chip_data *its,
unsigned int num_events)
{
unsigned int l1_bits, l2_bits, span, events_per_l2_table;
- unsigned int i, complete_tables, final_span, num_ents;
+ unsigned int complete_tables, final_span, num_ents;
__le64 *itt_l1, *itt_l2, **l2ptrs;
- int ret;
+ int i, ret;
u64 val;
ret = gicv5_its_l2sz_to_l2_bits(itt_l2sz);
@@ -768,8 +768,6 @@ static struct gicv5_its_dev *gicv5_its_alloc_device(struct gicv5_its_chip_data *
goto out_dev_free;
}
- gicv5_its_device_cache_inv(its, its_dev);
-
its_dev->its_node = its;
its_dev->event_map = (unsigned long *)bitmap_zalloc(its_dev->num_events, GFP_KERNEL);
@@ -949,15 +947,18 @@ static int gicv5_its_irq_domain_alloc(struct irq_domain *domain, unsigned int vi
device_id = its_dev->device_id;
for (i = 0; i < nr_irqs; i++) {
- lpi = gicv5_alloc_lpi();
+ ret = gicv5_alloc_lpi();
if (ret < 0) {
pr_debug("Failed to find free LPI!\n");
- goto out_eventid;
+ goto out_free_irqs;
}
+ lpi = ret;
ret = irq_domain_alloc_irqs_parent(domain, virq + i, 1, &lpi);
- if (ret)
- goto out_free_lpi;
+ if (ret) {
+ gicv5_free_lpi(lpi);
+ goto out_free_irqs;
+ }
/*
* Store eventid and deviceid into the hwirq for later use.
@@ -973,13 +974,17 @@ static int gicv5_its_irq_domain_alloc(struct irq_domain *domain, unsigned int vi
irqd = irq_get_irq_data(virq + i);
irqd_set_single_target(irqd);
irqd_set_affinity_on_activate(irqd);
- irqd_set_resend_when_in_progress(irqd);
}
return 0;
-out_free_lpi:
- gicv5_free_lpi(lpi);
+out_free_irqs:
+ while (--i >= 0) {
+ irqd = irq_domain_get_irq_data(domain, virq + i);
+ gicv5_free_lpi(irqd->parent_data->hwirq);
+ irq_domain_reset_irq_data(irqd);
+ irq_domain_free_irqs_parent(domain, virq + i, 1);
+ }
out_eventid:
gicv5_its_free_eventid(its_dev, event_id_base, nr_irqs);
return ret;
diff --git a/drivers/irqchip/irq-gic-v5-iwb.c b/drivers/irqchip/irq-gic-v5-iwb.c
index ed72fbdd4900..ad9fdc14d1c6 100644
--- a/drivers/irqchip/irq-gic-v5-iwb.c
+++ b/drivers/irqchip/irq-gic-v5-iwb.c
@@ -241,7 +241,6 @@ static int gicv5_iwb_device_probe(struct platform_device *pdev)
struct gicv5_iwb_chip_data *iwb_node;
void __iomem *iwb_base;
struct resource *res;
- int ret;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
@@ -254,16 +253,10 @@ static int gicv5_iwb_device_probe(struct platform_device *pdev)
}
iwb_node = gicv5_iwb_init_bases(iwb_base, pdev);
- if (IS_ERR(iwb_node)) {
- ret = PTR_ERR(iwb_node);
- goto out_unmap;
- }
+ if (IS_ERR(iwb_node))
+ return PTR_ERR(iwb_node);
return 0;
-
-out_unmap:
- iounmap(iwb_base);
- return ret;
}
static const struct of_device_id gicv5_iwb_of_match[] = {
diff --git a/drivers/irqchip/irq-gic-v5.c b/drivers/irqchip/irq-gic-v5.c
index 4bd224f359a7..41ef286c4d78 100644
--- a/drivers/irqchip/irq-gic-v5.c
+++ b/drivers/irqchip/irq-gic-v5.c
@@ -1062,16 +1062,9 @@ static void gicv5_set_cpuif_idbits(void)
#ifdef CONFIG_KVM
static struct gic_kvm_info gic_v5_kvm_info __initdata;
-static bool __init gicv5_cpuif_has_gcie_legacy(void)
-{
- u64 idr0 = read_sysreg_s(SYS_ICC_IDR0_EL1);
- return !!FIELD_GET(ICC_IDR0_EL1_GCIE_LEGACY, idr0);
-}
-
static void __init gic_of_setup_kvm_info(struct device_node *node)
{
gic_v5_kvm_info.type = GIC_V5;
- gic_v5_kvm_info.has_gcie_v3_compat = gicv5_cpuif_has_gcie_legacy();
/* GIC Virtual CPU interface maintenance interrupt */
gic_v5_kvm_info.no_maint_irq_mask = false;
diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c
index b2860eb2d32c..39e5a72ccd3c 100644
--- a/drivers/irqchip/irq-loongson-eiointc.c
+++ b/drivers/irqchip/irq-loongson-eiointc.c
@@ -46,6 +46,7 @@
#define EIOINTC_ALL_ENABLE_VEC_MASK(vector) (EIOINTC_ALL_ENABLE & ~BIT(vector & 0x1f))
#define EIOINTC_REG_ENABLE_VEC(vector) (EIOINTC_REG_ENABLE + ((vector >> 5) << 2))
#define EIOINTC_USE_CPU_ENCODE BIT(0)
+#define EIOINTC_ROUTE_MULT_IP BIT(1)
#define MAX_EIO_NODES (NR_CPUS / CORES_PER_EIO_NODE)
@@ -59,6 +60,14 @@
#define EIOINTC_REG_ROUTE_VEC_MASK(vector) (0xff << EIOINTC_REG_ROUTE_VEC_SHIFT(vector))
static int nr_pics;
+struct eiointc_priv;
+
+struct eiointc_ip_route {
+ struct eiointc_priv *priv;
+ /* Offset Routed destination IP */
+ int start;
+ int end;
+};
struct eiointc_priv {
u32 node;
@@ -68,6 +77,8 @@ struct eiointc_priv {
struct fwnode_handle *domain_handle;
struct irq_domain *eiointc_domain;
int flags;
+ irq_hw_number_t parent_hwirq;
+ struct eiointc_ip_route route_info[VEC_REG_COUNT];
};
static struct eiointc_priv *eiointc_priv[MAX_IO_PICS];
@@ -188,6 +199,7 @@ static int eiointc_router_init(unsigned int cpu)
{
int i, bit, cores, index, node;
unsigned int data;
+ int hwirq, mask;
node = cpu_to_eio_node(cpu);
index = eiointc_index(node);
@@ -197,6 +209,13 @@ static int eiointc_router_init(unsigned int cpu)
return -EINVAL;
}
+ /* Enable cpu interrupt pin from eiointc */
+ hwirq = eiointc_priv[index]->parent_hwirq;
+ mask = BIT(hwirq);
+ if (eiointc_priv[index]->flags & EIOINTC_ROUTE_MULT_IP)
+ mask |= BIT(hwirq + 1) | BIT(hwirq + 2) | BIT(hwirq + 3);
+ set_csr_ecfg(mask);
+
if (!(eiointc_priv[index]->flags & EIOINTC_USE_CPU_ENCODE))
cores = CORES_PER_EIO_NODE;
else
@@ -211,8 +230,31 @@ static int eiointc_router_init(unsigned int cpu)
}
for (i = 0; i < eiointc_priv[0]->vec_count / 32 / 4; i++) {
- bit = BIT(1 + index); /* Route to IP[1 + index] */
- data = bit | (bit << 8) | (bit << 16) | (bit << 24);
+ /*
+ * Route to interrupt pin, relative offset used here
+ * Offset 0 means routing to IP0 and so on
+ *
+ * If flags is set with EIOINTC_ROUTE_MULT_IP,
+ * every 64 vector routes to different consecutive
+ * IPs, otherwise all vector routes to the same IP
+ */
+ if (eiointc_priv[index]->flags & EIOINTC_ROUTE_MULT_IP) {
+ /* The first 64 vectors route to hwirq */
+ bit = BIT(hwirq++ - INT_HWI0);
+ data = bit | (bit << 8);
+
+ /* The second 64 vectors route to hwirq + 1 */
+ bit = BIT(hwirq++ - INT_HWI0);
+ data |= (bit << 16) | (bit << 24);
+
+ /*
+ * Route to hwirq + 2/hwirq + 3 separately
+ * in next loop
+ */
+ } else {
+ bit = BIT(hwirq - INT_HWI0);
+ data = bit | (bit << 8) | (bit << 16) | (bit << 24);
+ }
iocsr_write32(data, EIOINTC_REG_IPMAP + i * 4);
}
@@ -241,15 +283,22 @@ static int eiointc_router_init(unsigned int cpu)
static void eiointc_irq_dispatch(struct irq_desc *desc)
{
- int i;
- u64 pending;
- bool handled = false;
+ struct eiointc_ip_route *info = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
- struct eiointc_priv *priv = irq_desc_get_handler_data(desc);
+ bool handled = false;
+ u64 pending;
+ int i;
chained_irq_enter(chip, desc);
- for (i = 0; i < eiointc_priv[0]->vec_count / VEC_COUNT_PER_REG; i++) {
+ /*
+ * If EIOINTC_ROUTE_MULT_IP is set, every 64 interrupt vectors in
+ * eiointc interrupt controller routes to different cpu interrupt pins
+ *
+ * Every cpu interrupt pin has its own irq handler, it is ok to
+ * read ISR for these 64 interrupt vectors rather than all vectors
+ */
+ for (i = info->start; i < info->end; i++) {
pending = iocsr_read64(EIOINTC_REG_ISR + (i << 3));
/* Skip handling if pending bitmap is zero */
@@ -262,7 +311,7 @@ static void eiointc_irq_dispatch(struct irq_desc *desc)
int bit = __ffs(pending);
int irq = bit + VEC_COUNT_PER_REG * i;
- generic_handle_domain_irq(priv->eiointc_domain, irq);
+ generic_handle_domain_irq(info->priv->eiointc_domain, irq);
pending &= ~BIT(bit);
handled = true;
}
@@ -462,8 +511,33 @@ static int __init eiointc_init(struct eiointc_priv *priv, int parent_irq,
}
eiointc_priv[nr_pics++] = priv;
+ /*
+ * Only the first eiointc device on VM supports routing to
+ * different CPU interrupt pins. The later eiointc devices use
+ * generic method if there are multiple eiointc devices in future
+ */
+ if (cpu_has_hypervisor && (nr_pics == 1)) {
+ priv->flags |= EIOINTC_ROUTE_MULT_IP;
+ priv->parent_hwirq = INT_HWI0;
+ }
+
+ if (priv->flags & EIOINTC_ROUTE_MULT_IP) {
+ for (i = 0; i < priv->vec_count / VEC_COUNT_PER_REG; i++) {
+ priv->route_info[i].start = priv->parent_hwirq - INT_HWI0 + i;
+ priv->route_info[i].end = priv->route_info[i].start + 1;
+ priv->route_info[i].priv = priv;
+ parent_irq = get_percpu_irq(priv->parent_hwirq + i);
+ irq_set_chained_handler_and_data(parent_irq, eiointc_irq_dispatch,
+ &priv->route_info[i]);
+ }
+ } else {
+ priv->route_info[0].start = 0;
+ priv->route_info[0].end = priv->vec_count / VEC_COUNT_PER_REG;
+ priv->route_info[0].priv = priv;
+ irq_set_chained_handler_and_data(parent_irq, eiointc_irq_dispatch,
+ &priv->route_info[0]);
+ }
eiointc_router_init(0);
- irq_set_chained_handler_and_data(parent_irq, eiointc_irq_dispatch, priv);
if (nr_pics == 1) {
register_syscore_ops(&eiointc_syscore_ops);
@@ -495,7 +569,7 @@ int __init eiointc_acpi_init(struct irq_domain *parent,
priv->vec_count = VEC_COUNT;
priv->node = acpi_eiointc->node;
-
+ priv->parent_hwirq = acpi_eiointc->cascade;
parent_irq = irq_create_mapping(parent, acpi_eiointc->cascade);
ret = eiointc_init(priv, parent_irq, acpi_eiointc->node_map);
@@ -527,8 +601,9 @@ out_free_priv:
static int __init eiointc_of_init(struct device_node *of_node,
struct device_node *parent)
{
- int parent_irq, ret;
struct eiointc_priv *priv;
+ struct irq_data *irq_data;
+ int parent_irq, ret;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -544,6 +619,12 @@ static int __init eiointc_of_init(struct device_node *of_node,
if (ret < 0)
goto out_free_priv;
+ irq_data = irq_get_irq_data(parent_irq);
+ if (!irq_data) {
+ ret = -ENODEV;
+ goto out_free_priv;
+ }
+
/*
* In particular, the number of devices supported by the LS2K0500
* extended I/O interrupt vector is 128.
@@ -552,7 +633,7 @@ static int __init eiointc_of_init(struct device_node *of_node,
priv->vec_count = 128;
else
priv->vec_count = VEC_COUNT;
-
+ priv->parent_hwirq = irqd_to_hwirq(irq_data);
priv->node = 0;
priv->domain_handle = of_fwnode_handle(of_node);
diff --git a/drivers/irqchip/irq-loongson-pch-lpc.c b/drivers/irqchip/irq-loongson-pch-lpc.c
index 2d4c3ec128b8..912bf50a5c7c 100644
--- a/drivers/irqchip/irq-loongson-pch-lpc.c
+++ b/drivers/irqchip/irq-loongson-pch-lpc.c
@@ -200,8 +200,13 @@ int __init pch_lpc_acpi_init(struct irq_domain *parent,
goto iounmap_base;
}
- priv->lpc_domain = irq_domain_create_linear(irq_handle, LPC_COUNT,
- &pch_lpc_domain_ops, priv);
+ /*
+ * The LPC interrupt controller is a legacy i8259-compatible device,
+ * which requires a static 1:1 mapping for IRQs 0-15.
+ * Use irq_domain_create_legacy to establish this static mapping early.
+ */
+ priv->lpc_domain = irq_domain_create_legacy(irq_handle, LPC_COUNT, 0, 0,
+ &pch_lpc_domain_ops, priv);
if (!priv->lpc_domain) {
pr_err("Failed to create IRQ domain\n");
goto free_irq_handle;
diff --git a/drivers/irqchip/irq-msi-lib.c b/drivers/irqchip/irq-msi-lib.c
index 454c7f16dd4d..d5eefc3d7215 100644
--- a/drivers/irqchip/irq-msi-lib.c
+++ b/drivers/irqchip/irq-msi-lib.c
@@ -112,6 +112,20 @@ bool msi_lib_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
*/
if (!chip->irq_set_affinity && !(info->flags & MSI_FLAG_NO_AFFINITY))
chip->irq_set_affinity = msi_domain_set_affinity;
+
+ /*
+ * If the parent domain insists on being in charge of masking, obey
+ * blindly. The interrupt is un-masked at the PCI level on startup
+ * and masked on shutdown to prevent rogue interrupts after the
+ * driver freed the interrupt. Not masking it at the PCI level
+ * speeds up operation for disable/enable_irq() as it avoids
+ * getting all the way out to the PCI device.
+ */
+ if (info->flags & MSI_FLAG_PCI_MSI_MASK_PARENT) {
+ chip->irq_mask = irq_chip_mask_parent;
+ chip->irq_unmask = irq_chip_unmask_parent;
+ }
+
return true;
}
EXPORT_SYMBOL_GPL(msi_lib_init_dev_msi_info);
@@ -133,13 +147,13 @@ int msi_lib_irq_domain_select(struct irq_domain *d, struct irq_fwspec *fwspec,
{
const struct msi_parent_ops *ops = d->msi_parent_ops;
u32 busmask = BIT(bus_token);
- struct fwnode_handle *fwh;
if (!ops)
return 0;
- fwh = d->flags & IRQ_DOMAIN_FLAG_FWNODE_PARENT ? fwnode_get_parent(fwspec->fwnode)
- : fwspec->fwnode;
+ struct fwnode_handle *fwh __free(fwnode_handle) =
+ d->flags & IRQ_DOMAIN_FLAG_FWNODE_PARENT ? fwnode_get_parent(fwspec->fwnode)
+ : fwnode_handle_get(fwspec->fwnode);
if (fwh != d->fwnode || fwspec->param_count != 0)
return 0;
diff --git a/drivers/irqchip/irq-mvebu-gicp.c b/drivers/irqchip/irq-mvebu-gicp.c
index d3232d6d8dce..667bde3c651f 100644
--- a/drivers/irqchip/irq-mvebu-gicp.c
+++ b/drivers/irqchip/irq-mvebu-gicp.c
@@ -177,6 +177,7 @@ static int mvebu_gicp_probe(struct platform_device *pdev)
.ops = &gicp_domain_ops,
};
struct mvebu_gicp *gicp;
+ void __iomem *base;
int ret, i;
gicp = devm_kzalloc(&pdev->dev, sizeof(*gicp), GFP_KERNEL);
@@ -236,6 +237,15 @@ static int mvebu_gicp_probe(struct platform_device *pdev)
return -ENODEV;
}
+ base = ioremap(gicp->res->start, resource_size(gicp->res));
+ if (!base) {
+ dev_err(&pdev->dev, "ioremap() failed. Unable to clear pending interrupts.\n");
+ } else {
+ for (i = 0; i < 64; i++)
+ writel(i, base + GICP_CLRSPI_NSR_OFFSET);
+ iounmap(base);
+ }
+
return msi_create_parent_irq_domain(&info, &gicp_msi_parent_ops) ? 0 : -ENOMEM;
}
diff --git a/drivers/irqchip/irq-nvic.c b/drivers/irqchip/irq-nvic.c
index 76e11cac9631..2191a2b79578 100644
--- a/drivers/irqchip/irq-nvic.c
+++ b/drivers/irqchip/irq-nvic.c
@@ -73,8 +73,9 @@ static int __init nvic_of_init(struct device_node *node,
struct device_node *parent)
{
unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
- unsigned int irqs, i, ret, numbanks;
+ unsigned int irqs, i, numbanks;
void __iomem *nvic_base;
+ int ret;
numbanks = (readl_relaxed(V7M_SCS_ICTR) &
V7M_SCS_ICTR_INTLINESNUM_MASK) + 1;
diff --git a/drivers/irqchip/irq-renesas-rza1.c b/drivers/irqchip/irq-renesas-rza1.c
index a697eb55ac90..6047a524ac77 100644
--- a/drivers/irqchip/irq-renesas-rza1.c
+++ b/drivers/irqchip/irq-renesas-rza1.c
@@ -142,11 +142,12 @@ static const struct irq_domain_ops rza1_irqc_domain_ops = {
static int rza1_irqc_parse_map(struct rza1_irqc_priv *priv,
struct device_node *gic_node)
{
- unsigned int imaplen, i, j, ret;
struct device *dev = priv->dev;
+ unsigned int imaplen, i, j;
struct device_node *ipar;
const __be32 *imap;
u32 intsize;
+ int ret;
imap = of_get_property(dev->of_node, "interrupt-map", &imaplen);
if (!imap)
diff --git a/drivers/irqchip/irq-renesas-rzg2l.c b/drivers/irqchip/irq-renesas-rzg2l.c
index 360d88687e4f..2a54adeb4cc7 100644
--- a/drivers/irqchip/irq-renesas-rzg2l.c
+++ b/drivers/irqchip/irq-renesas-rzg2l.c
@@ -578,7 +578,7 @@ static int rzg2l_irqc_common_init(struct device_node *node, struct device_node *
&rzg2l_irqc_domain_ops, rzg2l_irqc_data);
if (!irq_domain) {
pm_runtime_put(dev);
- return dev_err_probe(dev, -ENOMEM, "failed to add irq domain\n");
+ return -ENOMEM;
}
register_syscore_ops(&rzg2l_irqc_syscore_ops);
diff --git a/drivers/irqchip/irq-riscv-imsic-early.c b/drivers/irqchip/irq-riscv-imsic-early.c
index 2709cacf4855..2c4c682627b8 100644
--- a/drivers/irqchip/irq-riscv-imsic-early.c
+++ b/drivers/irqchip/irq-riscv-imsic-early.c
@@ -7,6 +7,7 @@
#define pr_fmt(fmt) "riscv-imsic: " fmt
#include <linux/acpi.h>
#include <linux/cpu.h>
+#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/io.h>
@@ -233,6 +234,7 @@ struct fwnode_handle *imsic_acpi_get_fwnode(struct device *dev)
{
return imsic_acpi_fwnode;
}
+EXPORT_SYMBOL_GPL(imsic_acpi_get_fwnode);
static int __init imsic_early_acpi_init(union acpi_subtable_headers *header,
const unsigned long end)
diff --git a/drivers/irqchip/irq-riscv-imsic-platform.c b/drivers/irqchip/irq-riscv-imsic-platform.c
index 74a2a28f9403..643c8e459611 100644
--- a/drivers/irqchip/irq-riscv-imsic-platform.c
+++ b/drivers/irqchip/irq-riscv-imsic-platform.c
@@ -308,7 +308,6 @@ static const struct msi_parent_ops imsic_msi_parent_ops = {
int imsic_irqdomain_init(void)
{
struct irq_domain_info info = {
- .fwnode = imsic->fwnode,
.ops = &imsic_base_domain_ops,
.host_data = imsic,
};
@@ -325,6 +324,7 @@ int imsic_irqdomain_init(void)
}
/* Create Base IRQ domain */
+ info.fwnode = imsic->fwnode,
imsic->base_domain = msi_create_parent_irq_domain(&info, &imsic_msi_parent_ops);
if (!imsic->base_domain) {
pr_err("%pfwP: failed to create IMSIC base domain\n", imsic->fwnode);
diff --git a/drivers/irqchip/irq-riscv-rpmi-sysmsi.c b/drivers/irqchip/irq-riscv-rpmi-sysmsi.c
new file mode 100644
index 000000000000..5c74c561ce31
--- /dev/null
+++ b/drivers/irqchip/irq-riscv-rpmi-sysmsi.c
@@ -0,0 +1,328 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2025 Ventana Micro Systems Inc. */
+
+#include <linux/acpi.h>
+#include <linux/bits.h>
+#include <linux/bug.h>
+#include <linux/device.h>
+#include <linux/device/devres.h>
+#include <linux/dev_printk.h>
+#include <linux/errno.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip/riscv-imsic.h>
+#include <linux/mailbox_client.h>
+#include <linux/mailbox/riscv-rpmi-message.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+struct rpmi_sysmsi_get_attrs_rx {
+ __le32 status;
+ __le32 sys_num_msi;
+ __le32 flag0;
+ __le32 flag1;
+};
+
+#define RPMI_SYSMSI_MSI_ATTRIBUTES_FLAG0_PREF_PRIV BIT(0)
+
+struct rpmi_sysmsi_set_msi_state_tx {
+ __le32 sys_msi_index;
+ __le32 sys_msi_state;
+};
+
+struct rpmi_sysmsi_set_msi_state_rx {
+ __le32 status;
+};
+
+#define RPMI_SYSMSI_MSI_STATE_ENABLE BIT(0)
+#define RPMI_SYSMSI_MSI_STATE_PENDING BIT(1)
+
+struct rpmi_sysmsi_set_msi_target_tx {
+ __le32 sys_msi_index;
+ __le32 sys_msi_address_low;
+ __le32 sys_msi_address_high;
+ __le32 sys_msi_data;
+};
+
+struct rpmi_sysmsi_set_msi_target_rx {
+ __le32 status;
+};
+
+struct rpmi_sysmsi_priv {
+ struct device *dev;
+ struct mbox_client client;
+ struct mbox_chan *chan;
+ u32 nr_irqs;
+ u32 gsi_base;
+};
+
+static int rpmi_sysmsi_get_num_msi(struct rpmi_sysmsi_priv *priv)
+{
+ struct rpmi_sysmsi_get_attrs_rx rx;
+ struct rpmi_mbox_message msg;
+ int ret;
+
+ rpmi_mbox_init_send_with_response(&msg, RPMI_SYSMSI_SRV_GET_ATTRIBUTES,
+ NULL, 0, &rx, sizeof(rx));
+ ret = rpmi_mbox_send_message(priv->chan, &msg);
+ if (ret)
+ return ret;
+ if (rx.status)
+ return rpmi_to_linux_error(le32_to_cpu(rx.status));
+
+ return le32_to_cpu(rx.sys_num_msi);
+}
+
+static int rpmi_sysmsi_set_msi_state(struct rpmi_sysmsi_priv *priv,
+ u32 sys_msi_index, u32 sys_msi_state)
+{
+ struct rpmi_sysmsi_set_msi_state_tx tx;
+ struct rpmi_sysmsi_set_msi_state_rx rx;
+ struct rpmi_mbox_message msg;
+ int ret;
+
+ tx.sys_msi_index = cpu_to_le32(sys_msi_index);
+ tx.sys_msi_state = cpu_to_le32(sys_msi_state);
+ rpmi_mbox_init_send_with_response(&msg, RPMI_SYSMSI_SRV_SET_MSI_STATE,
+ &tx, sizeof(tx), &rx, sizeof(rx));
+ ret = rpmi_mbox_send_message(priv->chan, &msg);
+ if (ret)
+ return ret;
+ if (rx.status)
+ return rpmi_to_linux_error(le32_to_cpu(rx.status));
+
+ return 0;
+}
+
+static int rpmi_sysmsi_set_msi_target(struct rpmi_sysmsi_priv *priv,
+ u32 sys_msi_index, struct msi_msg *m)
+{
+ struct rpmi_sysmsi_set_msi_target_tx tx;
+ struct rpmi_sysmsi_set_msi_target_rx rx;
+ struct rpmi_mbox_message msg;
+ int ret;
+
+ tx.sys_msi_index = cpu_to_le32(sys_msi_index);
+ tx.sys_msi_address_low = cpu_to_le32(m->address_lo);
+ tx.sys_msi_address_high = cpu_to_le32(m->address_hi);
+ tx.sys_msi_data = cpu_to_le32(m->data);
+ rpmi_mbox_init_send_with_response(&msg, RPMI_SYSMSI_SRV_SET_MSI_TARGET,
+ &tx, sizeof(tx), &rx, sizeof(rx));
+ ret = rpmi_mbox_send_message(priv->chan, &msg);
+ if (ret)
+ return ret;
+ if (rx.status)
+ return rpmi_to_linux_error(le32_to_cpu(rx.status));
+
+ return 0;
+}
+
+static void rpmi_sysmsi_irq_mask(struct irq_data *d)
+{
+ struct rpmi_sysmsi_priv *priv = irq_data_get_irq_chip_data(d);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ int ret;
+
+ ret = rpmi_sysmsi_set_msi_state(priv, hwirq, 0);
+ if (ret)
+ dev_warn(priv->dev, "Failed to mask hwirq %lu (error %d)\n", hwirq, ret);
+ irq_chip_mask_parent(d);
+}
+
+static void rpmi_sysmsi_irq_unmask(struct irq_data *d)
+{
+ struct rpmi_sysmsi_priv *priv = irq_data_get_irq_chip_data(d);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ int ret;
+
+ irq_chip_unmask_parent(d);
+ ret = rpmi_sysmsi_set_msi_state(priv, hwirq, RPMI_SYSMSI_MSI_STATE_ENABLE);
+ if (ret)
+ dev_warn(priv->dev, "Failed to unmask hwirq %lu (error %d)\n", hwirq, ret);
+}
+
+static void rpmi_sysmsi_write_msg(struct irq_data *d, struct msi_msg *msg)
+{
+ struct rpmi_sysmsi_priv *priv = irq_data_get_irq_chip_data(d);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ int ret;
+
+ /* For zeroed MSI, do nothing as of now */
+ if (!msg->address_hi && !msg->address_lo && !msg->data)
+ return;
+
+ ret = rpmi_sysmsi_set_msi_target(priv, hwirq, msg);
+ if (ret)
+ dev_warn(priv->dev, "Failed to set target for hwirq %lu (error %d)\n", hwirq, ret);
+}
+
+static void rpmi_sysmsi_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
+{
+ arg->desc = desc;
+ arg->hwirq = desc->data.icookie.value;
+}
+
+static int rpmi_sysmsi_translate(struct irq_domain *d, struct irq_fwspec *fwspec,
+ unsigned long *hwirq, unsigned int *type)
+{
+ struct msi_domain_info *info = d->host_data;
+ struct rpmi_sysmsi_priv *priv = info->data;
+
+ if (WARN_ON(fwspec->param_count < 1))
+ return -EINVAL;
+
+ /* For DT, gsi_base is always zero. */
+ *hwirq = fwspec->param[0] - priv->gsi_base;
+ *type = IRQ_TYPE_NONE;
+ return 0;
+}
+
+static const struct msi_domain_template rpmi_sysmsi_template = {
+ .chip = {
+ .name = "RPMI-SYSMSI",
+ .irq_mask = rpmi_sysmsi_irq_mask,
+ .irq_unmask = rpmi_sysmsi_irq_unmask,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+#endif
+ .irq_write_msi_msg = rpmi_sysmsi_write_msg,
+ .flags = IRQCHIP_SET_TYPE_MASKED |
+ IRQCHIP_SKIP_SET_WAKE |
+ IRQCHIP_MASK_ON_SUSPEND,
+ },
+
+ .ops = {
+ .set_desc = rpmi_sysmsi_set_desc,
+ .msi_translate = rpmi_sysmsi_translate,
+ },
+
+ .info = {
+ .bus_token = DOMAIN_BUS_WIRED_TO_MSI,
+ .flags = MSI_FLAG_USE_DEV_FWNODE,
+ .handler = handle_simple_irq,
+ .handler_name = "simple",
+ },
+};
+
+static int rpmi_sysmsi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rpmi_sysmsi_priv *priv;
+ struct fwnode_handle *fwnode;
+ u32 id;
+ int rc;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ priv->dev = dev;
+
+ /* Setup mailbox client */
+ priv->client.dev = priv->dev;
+ priv->client.rx_callback = NULL;
+ priv->client.tx_block = false;
+ priv->client.knows_txdone = true;
+ priv->client.tx_tout = 0;
+
+ /* Request mailbox channel */
+ priv->chan = mbox_request_channel(&priv->client, 0);
+ if (IS_ERR(priv->chan))
+ return PTR_ERR(priv->chan);
+
+ /* Get number of system MSIs */
+ rc = rpmi_sysmsi_get_num_msi(priv);
+ if (rc < 1) {
+ mbox_free_channel(priv->chan);
+ if (rc)
+ return dev_err_probe(dev, rc, "Failed to get number of system MSIs\n");
+ else
+ return dev_err_probe(dev, -ENODEV, "No system MSIs found\n");
+ }
+ priv->nr_irqs = rc;
+
+ fwnode = dev_fwnode(dev);
+ if (is_acpi_node(fwnode)) {
+ u32 nr_irqs;
+
+ rc = riscv_acpi_get_gsi_info(fwnode, &priv->gsi_base, &id,
+ &nr_irqs, NULL);
+ if (rc) {
+ dev_err(dev, "failed to find GSI mapping\n");
+ return rc;
+ }
+
+ /* Update with actual GSI range */
+ if (nr_irqs != priv->nr_irqs)
+ riscv_acpi_update_gsi_range(priv->gsi_base, priv->nr_irqs);
+ }
+
+ /*
+ * The device MSI domain for platform devices on RISC-V architecture
+ * is only available after the MSI controller driver is probed so,
+ * explicitly configure here.
+ */
+ if (!dev_get_msi_domain(dev)) {
+ /*
+ * The device MSI domain for OF devices is only set at the
+ * time of populating/creating OF device. If the device MSI
+ * domain is discovered later after the OF device is created
+ * then we need to set it explicitly before using any platform
+ * MSI functions.
+ */
+ if (is_of_node(fwnode)) {
+ of_msi_configure(dev, dev_of_node(dev));
+ } else if (is_acpi_device_node(fwnode)) {
+ struct irq_domain *msi_domain;
+
+ msi_domain = irq_find_matching_fwnode(imsic_acpi_get_fwnode(dev),
+ DOMAIN_BUS_PLATFORM_MSI);
+ dev_set_msi_domain(dev, msi_domain);
+ }
+
+ if (!dev_get_msi_domain(dev)) {
+ mbox_free_channel(priv->chan);
+ return -EPROBE_DEFER;
+ }
+ }
+
+ if (!msi_create_device_irq_domain(dev, MSI_DEFAULT_DOMAIN,
+ &rpmi_sysmsi_template,
+ priv->nr_irqs, priv, priv)) {
+ mbox_free_channel(priv->chan);
+ return dev_err_probe(dev, -ENOMEM, "failed to create MSI irq domain\n");
+ }
+
+#ifdef CONFIG_ACPI
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+
+ if (adev)
+ acpi_dev_clear_dependencies(adev);
+#endif
+
+ dev_info(dev, "%u system MSIs registered\n", priv->nr_irqs);
+ return 0;
+}
+
+static const struct of_device_id rpmi_sysmsi_match[] = {
+ { .compatible = "riscv,rpmi-system-msi" },
+ {}
+};
+
+static const struct acpi_device_id acpi_rpmi_sysmsi_match[] = {
+ { "RSCV0006" },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, acpi_rpmi_sysmsi_match);
+
+static struct platform_driver rpmi_sysmsi_driver = {
+ .driver = {
+ .name = "rpmi-sysmsi",
+ .of_match_table = rpmi_sysmsi_match,
+ .acpi_match_table = acpi_rpmi_sysmsi_match,
+ },
+ .probe = rpmi_sysmsi_probe,
+};
+builtin_platform_driver(rpmi_sysmsi_driver);
diff --git a/drivers/irqchip/irq-sg2042-msi.c b/drivers/irqchip/irq-sg2042-msi.c
index bcfddc51bc6a..f7cf0dc72eab 100644
--- a/drivers/irqchip/irq-sg2042-msi.c
+++ b/drivers/irqchip/irq-sg2042-msi.c
@@ -30,6 +30,7 @@ struct sg204x_msi_chip_info {
* @doorbell_addr: see TRM, 10.1.32, GP_INTR0_SET
* @irq_first: First vectors number that MSIs starts
* @num_irqs: Number of vectors for MSIs
+ * @irq_type: IRQ type for MSIs
* @msi_map: mapping for allocated MSI vectors.
* @msi_map_lock: Lock for msi_map
* @chip_info: chip specific infomations
@@ -41,6 +42,7 @@ struct sg204x_msi_chipdata {
u32 irq_first;
u32 num_irqs;
+ unsigned int irq_type;
unsigned long *msi_map;
struct mutex msi_map_lock;
@@ -85,6 +87,8 @@ static void sg2042_msi_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *m
static const struct irq_chip sg2042_msi_middle_irq_chip = {
.name = "SG2042 MSI",
+ .irq_startup = irq_chip_startup_parent,
+ .irq_shutdown = irq_chip_shutdown_parent,
.irq_ack = sg2042_msi_irq_ack,
.irq_mask = irq_chip_mask_parent,
.irq_unmask = irq_chip_unmask_parent,
@@ -114,6 +118,8 @@ static void sg2044_msi_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *m
static struct irq_chip sg2044_msi_middle_irq_chip = {
.name = "SG2044 MSI",
+ .irq_startup = irq_chip_startup_parent,
+ .irq_shutdown = irq_chip_shutdown_parent,
.irq_ack = sg2044_msi_irq_ack,
.irq_mask = irq_chip_mask_parent,
.irq_unmask = irq_chip_unmask_parent,
@@ -133,14 +139,14 @@ static int sg204x_msi_parent_domain_alloc(struct irq_domain *domain, unsigned in
fwspec.fwnode = domain->parent->fwnode;
fwspec.param_count = 2;
fwspec.param[0] = data->irq_first + hwirq;
- fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
+ fwspec.param[1] = data->irq_type;
ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
if (ret)
return ret;
d = irq_domain_get_irq_data(domain->parent, virq);
- return d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING);
+ return d->chip->irq_set_type(d, data->irq_type);
}
static int sg204x_msi_middle_domain_alloc(struct irq_domain *domain, unsigned int virq,
@@ -185,8 +191,10 @@ static const struct irq_domain_ops sg204x_msi_middle_domain_ops = {
.select = msi_lib_irq_domain_select,
};
-#define SG2042_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
- MSI_FLAG_USE_DEF_CHIP_OPS)
+#define SG2042_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_PCI_MSI_MASK_PARENT | \
+ MSI_FLAG_PCI_MSI_STARTUP_PARENT)
#define SG2042_MSI_FLAGS_SUPPORTED MSI_GENERIC_FLAGS_MASK
@@ -200,10 +208,13 @@ static const struct msi_parent_ops sg2042_msi_parent_ops = {
.init_dev_msi_info = msi_lib_init_dev_msi_info,
};
-#define SG2044_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
- MSI_FLAG_USE_DEF_CHIP_OPS)
+#define SG2044_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_PCI_MSI_MASK_PARENT | \
+ MSI_FLAG_PCI_MSI_STARTUP_PARENT)
-#define SG2044_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+#define SG2044_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+ MSI_FLAG_MULTI_PCI_MSI | \
MSI_FLAG_PCI_MSIX)
static const struct msi_parent_ops sg2044_msi_parent_ops = {
@@ -289,6 +300,7 @@ static int sg2042_msi_probe(struct platform_device *pdev)
}
data->irq_first = (u32)args.args[0];
+ data->irq_type = (unsigned int)args.args[1];
data->num_irqs = (u32)args.args[args.nargs - 1];
mutex_init(&data->msi_map_lock);
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index bf69a4802b71..cbd7697bc148 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -179,12 +179,14 @@ static int plic_set_affinity(struct irq_data *d,
if (cpu >= nr_cpu_ids)
return -EINVAL;
- plic_irq_disable(d);
+ /* Invalidate the original routing entry */
+ plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 0);
irq_data_update_effective_affinity(d, cpumask_of(cpu));
+ /* Setting the new routing entry if irq is enabled */
if (!irqd_irq_disabled(d))
- plic_irq_enable(d);
+ plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 1);
return IRQ_SET_MASK_OK_DONE;
}
@@ -252,12 +254,13 @@ static int plic_irq_suspend(void)
priv = per_cpu_ptr(&plic_handlers, smp_processor_id())->priv;
- for (i = 0; i < priv->nr_irqs; i++) {
+ /* irq ID 0 is reserved */
+ for (i = 1; i < priv->nr_irqs; i++) {
__assign_bit(i, priv->prio_save,
readl(priv->regs + PRIORITY_BASE + i * PRIORITY_PER_ID));
}
- for_each_cpu(cpu, cpu_present_mask) {
+ for_each_present_cpu(cpu) {
struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);
if (!handler->present)
@@ -283,13 +286,14 @@ static void plic_irq_resume(void)
priv = per_cpu_ptr(&plic_handlers, smp_processor_id())->priv;
- for (i = 0; i < priv->nr_irqs; i++) {
+ /* irq ID 0 is reserved */
+ for (i = 1; i < priv->nr_irqs; i++) {
index = BIT_WORD(i);
writel((priv->prio_save[index] & BIT_MASK(i)) ? 1 : 0,
priv->regs + PRIORITY_BASE + i * PRIORITY_PER_ID);
}
- for_each_cpu(cpu, cpu_present_mask) {
+ for_each_present_cpu(cpu) {
struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);
if (!handler->present)
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index 2b05722d4dbe..ea8a0ab47afd 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -39,12 +39,13 @@
#include "hfc_pci.h"
+static void hfcpci_softirq(struct timer_list *unused);
static const char *hfcpci_revision = "2.0";
static int HFC_cnt;
static uint debug;
static uint poll, tics;
-static struct timer_list hfc_tl;
+static DEFINE_TIMER(hfc_tl, hfcpci_softirq);
static unsigned long hfc_jiffies;
MODULE_AUTHOR("Karsten Keil");
@@ -2305,8 +2306,7 @@ hfcpci_softirq(struct timer_list *unused)
hfc_jiffies = jiffies + 1;
else
hfc_jiffies += tics;
- hfc_tl.expires = hfc_jiffies;
- add_timer(&hfc_tl);
+ mod_timer(&hfc_tl, hfc_jiffies);
}
static int __init
@@ -2332,10 +2332,8 @@ HFC_init(void)
if (poll != HFCPCI_BTRANS_THRESHOLD) {
printk(KERN_INFO "%s: Using alternative poll value of %d\n",
__func__, poll);
- timer_setup(&hfc_tl, hfcpci_softirq, 0);
- hfc_tl.expires = jiffies + tics;
- hfc_jiffies = hfc_tl.expires;
- add_timer(&hfc_tl);
+ hfc_jiffies = jiffies + tics;
+ mod_timer(&hfc_tl, hfc_jiffies);
} else
tics = 0; /* indicate the use of controller's timer */
diff --git a/drivers/isdn/mISDN/dsp_hwec.c b/drivers/isdn/mISDN/dsp_hwec.c
index 0b3f29195330..0cd216e28f00 100644
--- a/drivers/isdn/mISDN/dsp_hwec.c
+++ b/drivers/isdn/mISDN/dsp_hwec.c
@@ -51,14 +51,14 @@ void dsp_hwec_enable(struct dsp *dsp, const char *arg)
goto _do;
{
- char *dup, *tok, *name, *val;
+ char *dup, *next, *tok, *name, *val;
int tmp;
- dup = kstrdup(arg, GFP_ATOMIC);
+ dup = next = kstrdup(arg, GFP_ATOMIC);
if (!dup)
return;
- while ((tok = strsep(&dup, ","))) {
+ while ((tok = strsep(&next, ","))) {
if (!strlen(tok))
continue;
name = strsep(&tok, "=");
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 6e3dce7e35a4..06e6291be11b 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -674,7 +674,7 @@ config LEDS_BD2606MVV
help
This option enables support for BD2606MVV LED driver chips
accessed via the I2C bus. It supports setting brightness, with
- the limitiation that there are groups of two channels sharing
+ the limitation that there are groups of two channels sharing
a brightness setting, but not the on/off setting.
To compile this driver as a module, choose M here: the module will
diff --git a/drivers/leds/blink/leds-lgm-sso.c b/drivers/leds/blink/leds-lgm-sso.c
index c9027f9c4bb7..8923d2df4704 100644
--- a/drivers/leds/blink/leds-lgm-sso.c
+++ b/drivers/leds/blink/leds-lgm-sso.c
@@ -471,7 +471,7 @@ static int sso_gpio_gc_init(struct device *dev, struct sso_led_priv *priv)
gc->get_direction = sso_gpio_get_dir;
gc->direction_output = sso_gpio_dir_out;
gc->get = sso_gpio_get;
- gc->set_rv = sso_gpio_set;
+ gc->set = sso_gpio_set;
gc->label = "lgm-sso";
gc->base = -1;
diff --git a/drivers/leds/flash/leds-qcom-flash.c b/drivers/leds/flash/leds-qcom-flash.c
index 89cf5120f5d5..b03a6833e3e3 100644
--- a/drivers/leds/flash/leds-qcom-flash.c
+++ b/drivers/leds/flash/leds-qcom-flash.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022, 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/bitfield.h>
@@ -114,36 +114,55 @@ enum {
REG_THERM_THRSH1,
REG_THERM_THRSH2,
REG_THERM_THRSH3,
+ REG_TORCH_CLAMP,
REG_MAX_COUNT,
};
+static const struct reg_field mvflash_3ch_pmi8998_regs[REG_MAX_COUNT] = {
+ [REG_STATUS1] = REG_FIELD(0x08, 0, 5),
+ [REG_STATUS2] = REG_FIELD(0x09, 0, 7),
+ [REG_STATUS3] = REG_FIELD(0x0a, 0, 7),
+ [REG_CHAN_TIMER] = REG_FIELD_ID(0x40, 0, 7, 3, 1),
+ [REG_ITARGET] = REG_FIELD_ID(0x43, 0, 6, 3, 1),
+ [REG_MODULE_EN] = REG_FIELD(0x46, 7, 7),
+ [REG_IRESOLUTION] = REG_FIELD(0x47, 0, 5),
+ [REG_CHAN_STROBE] = REG_FIELD_ID(0x49, 0, 2, 3, 1),
+ [REG_CHAN_EN] = REG_FIELD(0x4c, 0, 2),
+ [REG_THERM_THRSH1] = REG_FIELD(0x56, 0, 2),
+ [REG_THERM_THRSH2] = REG_FIELD(0x57, 0, 2),
+ [REG_THERM_THRSH3] = REG_FIELD(0x58, 0, 2),
+ [REG_TORCH_CLAMP] = REG_FIELD(0xea, 0, 6),
+};
+
static const struct reg_field mvflash_3ch_regs[REG_MAX_COUNT] = {
- REG_FIELD(0x08, 0, 7), /* status1 */
- REG_FIELD(0x09, 0, 7), /* status2 */
- REG_FIELD(0x0a, 0, 7), /* status3 */
- REG_FIELD_ID(0x40, 0, 7, 3, 1), /* chan_timer */
- REG_FIELD_ID(0x43, 0, 6, 3, 1), /* itarget */
- REG_FIELD(0x46, 7, 7), /* module_en */
- REG_FIELD(0x47, 0, 5), /* iresolution */
- REG_FIELD_ID(0x49, 0, 2, 3, 1), /* chan_strobe */
- REG_FIELD(0x4c, 0, 2), /* chan_en */
- REG_FIELD(0x56, 0, 2), /* therm_thrsh1 */
- REG_FIELD(0x57, 0, 2), /* therm_thrsh2 */
- REG_FIELD(0x58, 0, 2), /* therm_thrsh3 */
+ [REG_STATUS1] = REG_FIELD(0x08, 0, 7),
+ [REG_STATUS2] = REG_FIELD(0x09, 0, 7),
+ [REG_STATUS3] = REG_FIELD(0x0a, 0, 7),
+ [REG_CHAN_TIMER] = REG_FIELD_ID(0x40, 0, 7, 3, 1),
+ [REG_ITARGET] = REG_FIELD_ID(0x43, 0, 6, 3, 1),
+ [REG_MODULE_EN] = REG_FIELD(0x46, 7, 7),
+ [REG_IRESOLUTION] = REG_FIELD(0x47, 0, 5),
+ [REG_CHAN_STROBE] = REG_FIELD_ID(0x49, 0, 2, 3, 1),
+ [REG_CHAN_EN] = REG_FIELD(0x4c, 0, 2),
+ [REG_THERM_THRSH1] = REG_FIELD(0x56, 0, 2),
+ [REG_THERM_THRSH2] = REG_FIELD(0x57, 0, 2),
+ [REG_THERM_THRSH3] = REG_FIELD(0x58, 0, 2),
+ [REG_TORCH_CLAMP] = REG_FIELD(0xec, 0, 6),
};
static const struct reg_field mvflash_4ch_regs[REG_MAX_COUNT] = {
- REG_FIELD(0x06, 0, 7), /* status1 */
- REG_FIELD(0x07, 0, 6), /* status2 */
- REG_FIELD(0x09, 0, 7), /* status3 */
- REG_FIELD_ID(0x3e, 0, 7, 4, 1), /* chan_timer */
- REG_FIELD_ID(0x42, 0, 6, 4, 1), /* itarget */
- REG_FIELD(0x46, 7, 7), /* module_en */
- REG_FIELD(0x49, 0, 3), /* iresolution */
- REG_FIELD_ID(0x4a, 0, 6, 4, 1), /* chan_strobe */
- REG_FIELD(0x4e, 0, 3), /* chan_en */
- REG_FIELD(0x7a, 0, 2), /* therm_thrsh1 */
- REG_FIELD(0x78, 0, 2), /* therm_thrsh2 */
+ [REG_STATUS1] = REG_FIELD(0x06, 0, 7),
+ [REG_STATUS2] = REG_FIELD(0x07, 0, 6),
+ [REG_STATUS3] = REG_FIELD(0x09, 0, 7),
+ [REG_CHAN_TIMER] = REG_FIELD_ID(0x3e, 0, 7, 4, 1),
+ [REG_ITARGET] = REG_FIELD_ID(0x42, 0, 6, 4, 1),
+ [REG_MODULE_EN] = REG_FIELD(0x46, 7, 7),
+ [REG_IRESOLUTION] = REG_FIELD(0x49, 0, 3),
+ [REG_CHAN_STROBE] = REG_FIELD_ID(0x4a, 0, 6, 4, 1),
+ [REG_CHAN_EN] = REG_FIELD(0x4e, 0, 3),
+ [REG_THERM_THRSH1] = REG_FIELD(0x7a, 0, 2),
+ [REG_THERM_THRSH2] = REG_FIELD(0x78, 0, 2),
+ [REG_TORCH_CLAMP] = REG_FIELD(0xed, 0, 6),
};
struct qcom_flash_data {
@@ -156,6 +175,7 @@ struct qcom_flash_data {
u8 max_channels;
u8 chan_en_bits;
u8 revision;
+ u8 torch_clamp;
};
struct qcom_flash_led {
@@ -702,6 +722,7 @@ static int qcom_flash_register_led_device(struct device *dev,
u32 current_ua, timeout_us;
u32 channels[4];
int i, rc, count;
+ u8 torch_clamp;
count = fwnode_property_count_u32(node, "led-sources");
if (count <= 0) {
@@ -751,6 +772,12 @@ static int qcom_flash_register_led_device(struct device *dev,
current_ua = min_t(u32, current_ua, TORCH_CURRENT_MAX_UA * led->chan_count);
led->max_torch_current_ma = current_ua / UA_PER_MA;
+ torch_clamp = (current_ua / led->chan_count) / TORCH_IRES_UA;
+ if (torch_clamp != 0)
+ torch_clamp--;
+
+ flash_data->torch_clamp = max_t(u8, flash_data->torch_clamp, torch_clamp);
+
if (fwnode_property_present(node, "flash-max-microamp")) {
flash->led_cdev.flags |= LED_DEV_CAP_FLASH;
@@ -851,13 +878,20 @@ static int qcom_flash_led_probe(struct platform_device *pdev)
return rc;
}
- if (val == FLASH_SUBTYPE_3CH_PM8150_VAL || val == FLASH_SUBTYPE_3CH_PMI8998_VAL) {
+ if (val == FLASH_SUBTYPE_3CH_PM8150_VAL) {
flash_data->hw_type = QCOM_MVFLASH_3CH;
flash_data->max_channels = 3;
regs = devm_kmemdup(dev, mvflash_3ch_regs, sizeof(mvflash_3ch_regs),
GFP_KERNEL);
if (!regs)
return -ENOMEM;
+ } else if (val == FLASH_SUBTYPE_3CH_PMI8998_VAL) {
+ flash_data->hw_type = QCOM_MVFLASH_3CH;
+ flash_data->max_channels = 3;
+ regs = devm_kmemdup(dev, mvflash_3ch_pmi8998_regs,
+ sizeof(mvflash_3ch_pmi8998_regs), GFP_KERNEL);
+ if (!regs)
+ return -ENOMEM;
} else if (val == FLASH_SUBTYPE_4CH_VAL) {
flash_data->hw_type = QCOM_MVFLASH_4CH;
flash_data->max_channels = 4;
@@ -917,8 +951,7 @@ static int qcom_flash_led_probe(struct platform_device *pdev)
flash_data->leds_count++;
}
- return 0;
-
+ return regmap_field_write(flash_data->r_fields[REG_TORCH_CLAMP], flash_data->torch_clamp);
release:
while (flash_data->v4l2_flash[flash_data->leds_count] && flash_data->leds_count)
v4l2_flash_release(flash_data->v4l2_flash[flash_data->leds_count--]);
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 15633fbf3c16..f3faf37f9a08 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -252,15 +252,23 @@ static const struct class leds_class = {
* of_led_get() - request a LED device via the LED framework
* @np: device node to get the LED device from
* @index: the index of the LED
+ * @name: the name of the LED used to map it to its function, if present
*
* Returns the LED device parsed from the phandle specified in the "leds"
* property of a device tree node or a negative error-code on failure.
*/
-static struct led_classdev *of_led_get(struct device_node *np, int index)
+static struct led_classdev *of_led_get(struct device_node *np, int index,
+ const char *name)
{
struct device *led_dev;
struct device_node *led_node;
+ /*
+ * For named LEDs, first look up the name in the "led-names" property.
+ * If it cannot be found, then of_parse_phandle() will propagate the error.
+ */
+ if (name)
+ index = of_property_match_string(np, "led-names", name);
led_node = of_parse_phandle(np, "leds", index);
if (!led_node)
return ERR_PTR(-ENOENT);
@@ -324,7 +332,7 @@ struct led_classdev *__must_check devm_of_led_get(struct device *dev,
if (!dev)
return ERR_PTR(-EINVAL);
- led = of_led_get(dev->of_node, index);
+ led = of_led_get(dev->of_node, index, NULL);
if (IS_ERR(led))
return led;
@@ -342,9 +350,14 @@ EXPORT_SYMBOL_GPL(devm_of_led_get);
struct led_classdev *led_get(struct device *dev, char *con_id)
{
struct led_lookup_data *lookup;
+ struct led_classdev *led_cdev;
const char *provider = NULL;
struct device *led_dev;
+ led_cdev = of_led_get(dev->of_node, -1, con_id);
+ if (!IS_ERR(led_cdev) || PTR_ERR(led_cdev) != -ENOENT)
+ return led_cdev;
+
mutex_lock(&leds_lookup_lock);
list_for_each_entry(lookup, &leds_lookup_list, list) {
if (!strcmp(lookup->dev_id, dev_name(dev)) &&
diff --git a/drivers/leds/leds-is31fl319x.c b/drivers/leds/leds-is31fl319x.c
index 27bfab3da479..e411cee06dab 100644
--- a/drivers/leds/leds-is31fl319x.c
+++ b/drivers/leds/leds-is31fl319x.c
@@ -483,11 +483,6 @@ static inline int is31fl3196_db_to_gain(u32 dezibel)
return dezibel / IS31FL3196_AUDIO_GAIN_DB_STEP;
}
-static void is31f1319x_mutex_destroy(void *lock)
-{
- mutex_destroy(lock);
-}
-
static int is31fl319x_probe(struct i2c_client *client)
{
struct is31fl319x_chip *is31;
@@ -503,8 +498,7 @@ static int is31fl319x_probe(struct i2c_client *client)
if (!is31)
return -ENOMEM;
- mutex_init(&is31->lock);
- err = devm_add_action_or_reset(dev, is31f1319x_mutex_destroy, &is31->lock);
+ err = devm_mutex_init(dev, &is31->lock);
if (err)
return err;
diff --git a/drivers/leds/leds-is31fl32xx.c b/drivers/leds/leds-is31fl32xx.c
index 8793330dd414..dc9349f9d350 100644
--- a/drivers/leds/leds-is31fl32xx.c
+++ b/drivers/leds/leds-is31fl32xx.c
@@ -32,6 +32,8 @@
#define IS31FL3216_CONFIG_SSD_ENABLE BIT(7)
#define IS31FL3216_CONFIG_SSD_DISABLE 0
+#define IS31FL32XX_PWM_FREQUENCY_22KHZ 0x01
+
struct is31fl32xx_priv;
struct is31fl32xx_led_data {
struct led_classdev cdev;
@@ -53,6 +55,7 @@ struct is31fl32xx_priv {
* @pwm_update_reg : address of PWM Update register
* @global_control_reg : address of Global Control register (optional)
* @reset_reg : address of Reset register (optional)
+ * @output_frequency_setting_reg: address of output frequency register (optional)
* @pwm_register_base : address of first PWM register
* @pwm_registers_reversed: : true if PWM registers count down instead of up
* @led_control_register_base : address of first LED control register (optional)
@@ -76,6 +79,7 @@ struct is31fl32xx_chipdef {
u8 pwm_update_reg;
u8 global_control_reg;
u8 reset_reg;
+ u8 output_frequency_setting_reg;
u8 pwm_register_base;
bool pwm_registers_reversed;
u8 led_control_register_base;
@@ -90,6 +94,19 @@ static const struct is31fl32xx_chipdef is31fl3236_cdef = {
.pwm_update_reg = 0x25,
.global_control_reg = 0x4a,
.reset_reg = 0x4f,
+ .output_frequency_setting_reg = IS31FL32XX_REG_NONE,
+ .pwm_register_base = 0x01,
+ .led_control_register_base = 0x26,
+ .enable_bits_per_led_control_register = 1,
+};
+
+static const struct is31fl32xx_chipdef is31fl3236a_cdef = {
+ .channels = 36,
+ .shutdown_reg = 0x00,
+ .pwm_update_reg = 0x25,
+ .global_control_reg = 0x4a,
+ .reset_reg = 0x4f,
+ .output_frequency_setting_reg = 0x4b,
.pwm_register_base = 0x01,
.led_control_register_base = 0x26,
.enable_bits_per_led_control_register = 1,
@@ -101,6 +118,7 @@ static const struct is31fl32xx_chipdef is31fl3235_cdef = {
.pwm_update_reg = 0x25,
.global_control_reg = 0x4a,
.reset_reg = 0x4f,
+ .output_frequency_setting_reg = IS31FL32XX_REG_NONE,
.pwm_register_base = 0x05,
.led_control_register_base = 0x2a,
.enable_bits_per_led_control_register = 1,
@@ -112,6 +130,7 @@ static const struct is31fl32xx_chipdef is31fl3218_cdef = {
.pwm_update_reg = 0x16,
.global_control_reg = IS31FL32XX_REG_NONE,
.reset_reg = 0x17,
+ .output_frequency_setting_reg = IS31FL32XX_REG_NONE,
.pwm_register_base = 0x01,
.led_control_register_base = 0x13,
.enable_bits_per_led_control_register = 6,
@@ -126,6 +145,7 @@ static const struct is31fl32xx_chipdef is31fl3216_cdef = {
.pwm_update_reg = 0xB0,
.global_control_reg = IS31FL32XX_REG_NONE,
.reset_reg = IS31FL32XX_REG_NONE,
+ .output_frequency_setting_reg = IS31FL32XX_REG_NONE,
.pwm_register_base = 0x10,
.pwm_registers_reversed = true,
.led_control_register_base = 0x01,
@@ -363,8 +383,21 @@ static struct is31fl32xx_led_data *is31fl32xx_find_led_data(
static int is31fl32xx_parse_dt(struct device *dev,
struct is31fl32xx_priv *priv)
{
+ const struct is31fl32xx_chipdef *cdef = priv->cdef;
int ret = 0;
+ if ((cdef->output_frequency_setting_reg != IS31FL32XX_REG_NONE) &&
+ of_property_read_bool(dev_of_node(dev), "issi,22khz-pwm")) {
+
+ ret = is31fl32xx_write(priv, cdef->output_frequency_setting_reg,
+ IS31FL32XX_PWM_FREQUENCY_22KHZ);
+
+ if (ret) {
+ dev_err(dev, "Failed to write output PWM frequency register\n");
+ return ret;
+ }
+ }
+
for_each_available_child_of_node_scoped(dev_of_node(dev), child) {
struct led_init_data init_data = {};
struct is31fl32xx_led_data *led_data =
@@ -404,12 +437,13 @@ static int is31fl32xx_parse_dt(struct device *dev,
}
static const struct of_device_id of_is31fl32xx_match[] = {
- { .compatible = "issi,is31fl3236", .data = &is31fl3236_cdef, },
- { .compatible = "issi,is31fl3235", .data = &is31fl3235_cdef, },
- { .compatible = "issi,is31fl3218", .data = &is31fl3218_cdef, },
- { .compatible = "si-en,sn3218", .data = &is31fl3218_cdef, },
- { .compatible = "issi,is31fl3216", .data = &is31fl3216_cdef, },
- { .compatible = "si-en,sn3216", .data = &is31fl3216_cdef, },
+ { .compatible = "issi,is31fl3236", .data = &is31fl3236_cdef, },
+ { .compatible = "issi,is31fl3236a", .data = &is31fl3236a_cdef, },
+ { .compatible = "issi,is31fl3235", .data = &is31fl3235_cdef, },
+ { .compatible = "issi,is31fl3218", .data = &is31fl3218_cdef, },
+ { .compatible = "si-en,sn3218", .data = &is31fl3218_cdef, },
+ { .compatible = "issi,is31fl3216", .data = &is31fl3216_cdef, },
+ { .compatible = "si-en,sn3216", .data = &is31fl3216_cdef, },
{},
};
@@ -466,6 +500,7 @@ static void is31fl32xx_remove(struct i2c_client *client)
*/
static const struct i2c_device_id is31fl32xx_id[] = {
{ "is31fl3236" },
+ { "is31fl3236a" },
{ "is31fl3235" },
{ "is31fl3218" },
{ "sn3218" },
diff --git a/drivers/leds/leds-lp55xx-common.c b/drivers/leds/leds-lp55xx-common.c
index e71456a56ab8..fd447eb7eb15 100644
--- a/drivers/leds/leds-lp55xx-common.c
+++ b/drivers/leds/leds-lp55xx-common.c
@@ -212,7 +212,7 @@ int lp55xx_update_program_memory(struct lp55xx_chip *chip,
* For LED chip that support page, PAGE is already set in load_engine.
*/
if (!cfg->pages_per_engine)
- start_addr += LP55xx_BYTES_PER_PAGE * idx;
+ start_addr += LP55xx_BYTES_PER_PAGE * (idx - 1);
for (page = 0; page < program_length / LP55xx_BYTES_PER_PAGE; page++) {
/* Write to the next page each 32 bytes (if supported) */
diff --git a/drivers/leds/leds-max77705.c b/drivers/leds/leds-max77705.c
index 933cb4f19be9..b7403b3fcf5e 100644
--- a/drivers/leds/leds-max77705.c
+++ b/drivers/leds/leds-max77705.c
@@ -180,7 +180,7 @@ static int max77705_add_led(struct device *dev, struct regmap *regmap, struct fw
ret = fwnode_property_read_u32(np, "reg", &reg);
if (ret || reg >= MAX77705_LED_NUM_LEDS)
- ret = -EINVAL;
+ return -EINVAL;
info = devm_kcalloc(dev, num_channels, sizeof(*info), GFP_KERNEL);
if (!info)
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index 7d4c071a6cd0..0344189bb991 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -473,7 +473,7 @@ static int pca9532_configure(struct i2c_client *client,
data->gpio.label = "gpio-pca9532";
data->gpio.direction_input = pca9532_gpio_direction_input;
data->gpio.direction_output = pca9532_gpio_direction_output;
- data->gpio.set_rv = pca9532_gpio_set_value;
+ data->gpio.set = pca9532_gpio_set_value;
data->gpio.get = pca9532_gpio_get_value;
data->gpio.request = pca9532_gpio_request_pin;
data->gpio.can_sleep = 1;
diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c
index 70d109246088..2007fe6217ec 100644
--- a/drivers/leds/leds-pca955x.c
+++ b/drivers/leds/leds-pca955x.c
@@ -737,7 +737,7 @@ static int pca955x_probe(struct i2c_client *client)
pca955x->gpio.label = "gpio-pca955x";
pca955x->gpio.direction_input = pca955x_gpio_direction_input;
pca955x->gpio.direction_output = pca955x_gpio_direction_output;
- pca955x->gpio.set_rv = pca955x_gpio_set_value;
+ pca955x->gpio.set = pca955x_gpio_set_value;
pca955x->gpio.get = pca955x_gpio_get_value;
pca955x->gpio.request = pca955x_gpio_request_pin;
pca955x->gpio.free = pca955x_gpio_free_pin;
diff --git a/drivers/leds/leds-qnap-mcu.c b/drivers/leds/leds-qnap-mcu.c
index 4e4709456261..6df110e33ac9 100644
--- a/drivers/leds/leds-qnap-mcu.c
+++ b/drivers/leds/leds-qnap-mcu.c
@@ -104,9 +104,9 @@ static int qnap_mcu_register_err_led(struct device *dev, struct qnap_mcu *mcu, i
}
enum qnap_mcu_usb_led_mode {
- QNAP_MCU_USB_LED_ON = 1,
- QNAP_MCU_USB_LED_OFF = 3,
- QNAP_MCU_USB_LED_BLINK = 2,
+ QNAP_MCU_USB_LED_ON = 0,
+ QNAP_MCU_USB_LED_OFF = 2,
+ QNAP_MCU_USB_LED_BLINK = 1,
};
struct qnap_mcu_usb_led {
@@ -137,7 +137,7 @@ static int qnap_mcu_usb_led_set(struct led_classdev *led_cdev,
* Byte 3 is shared between the usb led target on/off/blink
* and also the buzzer control (in the input driver)
*/
- cmd[2] = 'D' + usb_led->mode;
+ cmd[2] = 'E' + usb_led->mode;
return qnap_mcu_exec_with_ack(usb_led->mcu, cmd, sizeof(cmd));
}
@@ -161,7 +161,7 @@ static int qnap_mcu_usb_led_blink_set(struct led_classdev *led_cdev,
* Byte 3 is shared between the USB LED target on/off/blink
* and also the buzzer control (in the input driver)
*/
- cmd[2] = 'D' + usb_led->mode;
+ cmd[2] = 'E' + usb_led->mode;
return qnap_mcu_exec_with_ack(usb_led->mcu, cmd, sizeof(cmd));
}
@@ -190,6 +190,166 @@ static int qnap_mcu_register_usb_led(struct device *dev, struct qnap_mcu *mcu)
return qnap_mcu_usb_led_set(&usb_led->cdev, 0);
}
+enum qnap_mcu_status_led_mode {
+ QNAP_MCU_STATUS_LED_OFF = 0,
+ QNAP_MCU_STATUS_LED_ON = 1,
+ QNAP_MCU_STATUS_LED_BLINK_FAST = 2, /* 500ms / 500ms */
+ QNAP_MCU_STATUS_LED_BLINK_SLOW = 3, /* 1s / 1s */
+};
+
+struct qnap_mcu_status_led {
+ struct led_classdev cdev;
+ struct qnap_mcu_status_led *red;
+ u8 mode;
+};
+
+struct qnap_mcu_status {
+ struct qnap_mcu *mcu;
+ struct qnap_mcu_status_led red;
+ struct qnap_mcu_status_led green;
+};
+
+static inline struct qnap_mcu_status_led *cdev_to_qnap_mcu_status_led(struct led_classdev *led_cdev)
+{
+ return container_of(led_cdev, struct qnap_mcu_status_led, cdev);
+}
+
+static inline struct qnap_mcu_status *statusled_to_qnap_mcu_status(struct qnap_mcu_status_led *led)
+{
+ return container_of(led->red, struct qnap_mcu_status, red);
+}
+
+static u8 qnap_mcu_status_led_encode(struct qnap_mcu_status *status)
+{
+ if (status->red.mode == QNAP_MCU_STATUS_LED_OFF) {
+ switch (status->green.mode) {
+ case QNAP_MCU_STATUS_LED_OFF:
+ return '9';
+ case QNAP_MCU_STATUS_LED_ON:
+ return '6';
+ case QNAP_MCU_STATUS_LED_BLINK_FAST:
+ return '5';
+ case QNAP_MCU_STATUS_LED_BLINK_SLOW:
+ return 'A';
+ }
+ } else if (status->green.mode == QNAP_MCU_STATUS_LED_OFF) {
+ switch (status->red.mode) {
+ case QNAP_MCU_STATUS_LED_OFF:
+ return '9';
+ case QNAP_MCU_STATUS_LED_ON:
+ return '7';
+ case QNAP_MCU_STATUS_LED_BLINK_FAST:
+ return '4';
+ case QNAP_MCU_STATUS_LED_BLINK_SLOW:
+ return 'B';
+ }
+ } else if (status->green.mode == QNAP_MCU_STATUS_LED_ON &&
+ status->red.mode == QNAP_MCU_STATUS_LED_ON) {
+ return 'D';
+ } else if (status->green.mode == QNAP_MCU_STATUS_LED_BLINK_SLOW &&
+ status->red.mode == QNAP_MCU_STATUS_LED_BLINK_SLOW) {
+ return 'C';
+ }
+
+ /*
+ * Here both LEDs are on in some fashion, either both blinking fast,
+ * or in different speeds, so default to fast blinking for both.
+ */
+ return '8';
+}
+
+static int qnap_mcu_status_led_update(struct qnap_mcu *mcu,
+ struct qnap_mcu_status *status)
+{
+ u8 cmd[] = { '@', 'C', 0 };
+
+ cmd[2] = qnap_mcu_status_led_encode(status);
+
+ return qnap_mcu_exec_with_ack(mcu, cmd, sizeof(cmd));
+}
+
+static int qnap_mcu_status_led_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct qnap_mcu_status_led *status_led = cdev_to_qnap_mcu_status_led(led_cdev);
+ struct qnap_mcu_status *base = statusled_to_qnap_mcu_status(status_led);
+
+ /* Don't disturb a possible set blink-mode if LED stays on */
+ if (brightness != 0 && status_led->mode >= QNAP_MCU_STATUS_LED_BLINK_FAST)
+ return 0;
+
+ status_led->mode = brightness ? QNAP_MCU_STATUS_LED_ON :
+ QNAP_MCU_STATUS_LED_OFF;
+
+ return qnap_mcu_status_led_update(base->mcu, base);
+}
+
+static int qnap_mcu_status_led_blink_set(struct led_classdev *led_cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ struct qnap_mcu_status_led *status_led = cdev_to_qnap_mcu_status_led(led_cdev);
+ struct qnap_mcu_status *base = statusled_to_qnap_mcu_status(status_led);
+
+ if (status_led->mode == QNAP_MCU_STATUS_LED_OFF)
+ return 0;
+
+ if (*delay_on <= 500) {
+ *delay_on = 500;
+ *delay_off = 500;
+ status_led->mode = QNAP_MCU_STATUS_LED_BLINK_FAST;
+ } else {
+ *delay_on = 1000;
+ *delay_off = 1000;
+ status_led->mode = QNAP_MCU_STATUS_LED_BLINK_SLOW;
+ }
+
+ return qnap_mcu_status_led_update(base->mcu, base);
+}
+
+static int qnap_mcu_register_status_leds(struct device *dev, struct qnap_mcu *mcu)
+{
+ struct qnap_mcu_status *status;
+ int ret;
+
+ status = devm_kzalloc(dev, sizeof(*status), GFP_KERNEL);
+ if (!status)
+ return -ENOMEM;
+
+ status->mcu = mcu;
+
+ /*
+ * point to the red led, so that statusled_to_qnap_mcu_status
+ * can resolve the main status struct containing both leds
+ */
+ status->red.red = &status->red;
+ status->green.red = &status->red;
+
+ status->red.mode = QNAP_MCU_STATUS_LED_OFF;
+ status->red.cdev.name = "red:status";
+ status->red.cdev.brightness_set_blocking = qnap_mcu_status_led_set;
+ status->red.cdev.blink_set = qnap_mcu_status_led_blink_set;
+ status->red.cdev.brightness = 0;
+ status->red.cdev.max_brightness = 1;
+
+ status->green.mode = QNAP_MCU_STATUS_LED_OFF;
+ status->green.cdev.name = "green:status";
+ status->green.cdev.brightness_set_blocking = qnap_mcu_status_led_set;
+ status->green.cdev.blink_set = qnap_mcu_status_led_blink_set;
+ status->green.cdev.brightness = 0;
+ status->green.cdev.max_brightness = 1;
+
+ ret = devm_led_classdev_register(dev, &status->red.cdev);
+ if (ret)
+ return ret;
+
+ ret = devm_led_classdev_register(dev, &status->green.cdev);
+ if (ret)
+ return ret;
+
+ return qnap_mcu_status_led_update(status->mcu, status);
+}
+
static int qnap_mcu_leds_probe(struct platform_device *pdev)
{
struct qnap_mcu *mcu = dev_get_drvdata(pdev->dev.parent);
@@ -210,6 +370,11 @@ static int qnap_mcu_leds_probe(struct platform_device *pdev)
"failed to register USB LED\n");
}
+ ret = qnap_mcu_register_status_leds(&pdev->dev, mcu);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to register status LEDs\n");
+
return 0;
}
diff --git a/drivers/leds/leds-tca6507.c b/drivers/leds/leds-tca6507.c
index 89c165c8ee9c..fd0e8bab9a4b 100644
--- a/drivers/leds/leds-tca6507.c
+++ b/drivers/leds/leds-tca6507.c
@@ -637,7 +637,7 @@ static int tca6507_probe_gpios(struct device *dev,
tca->gpio.base = -1;
tca->gpio.owner = THIS_MODULE;
tca->gpio.direction_output = tca6507_gpio_direction_output;
- tca->gpio.set_rv = tca6507_gpio_set_value;
+ tca->gpio.set = tca6507_gpio_set_value;
tca->gpio.parent = dev;
err = devm_gpiochip_add_data(dev, &tca->gpio, tca);
if (err) {
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index 4fef4797b110..29f16f220384 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -36,6 +36,15 @@ config ARM_MHU_V3
that provides different means of transports: supported extensions
will be discovered and possibly managed at probe-time.
+config AST2700_MBOX
+ tristate "ASPEED AST2700 IPC driver"
+ depends on ARCH_ASPEED || COMPILE_TEST
+ help
+ Mailbox driver implementation for ASPEED AST27XX SoCs. This driver
+ can be used to send message between different processors in SoC.
+ The driver provides mailbox support for sending interrupts to the
+ clients. Say Y here if you want to build this driver.
+
config CV1800_MBOX
tristate "cv1800 mailbox"
depends on ARCH_SOPHGO || COMPILE_TEST
@@ -285,6 +294,16 @@ config MTK_CMDQ_MBOX
critical time limitation, such as updating display configuration
during the vblank.
+config MTK_GPUEB_MBOX
+ tristate "MediaTek GPUEB Mailbox Support"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ help
+ The MediaTek GPUEB mailbox is used to communicate with the embedded
+ controller in charge of GPU frequency and power management on some
+ MediaTek SoCs, such as the MT8196.
+ Say Y or m here if you want to support the MT8196 SoC in your kernel
+ build.
+
config ZYNQMP_IPI_MBOX
tristate "Xilinx ZynqMP IPI Mailbox"
depends on ARCH_ZYNQMP && OF
@@ -350,4 +369,25 @@ config CIX_MBOX
is unidirectional. Say Y here if you want to use the CIX Mailbox
support.
+config BCM74110_MAILBOX
+ tristate "Brcmstb BCM74110 Mailbox"
+ depends on ARCH_BRCMSTB || COMPILE_TEST
+ default ARCH_BRCMSTB
+ help
+ Broadcom STB mailbox driver present starting with brcmstb bcm74110
+ SoCs. The mailbox is a communication channel between the host
+ processor and coprocessor that handles various power management task
+ and more.
+
+config RISCV_SBI_MPXY_MBOX
+ tristate "RISC-V SBI Message Proxy (MPXY) Mailbox"
+ depends on RISCV_SBI
+ default RISCV
+ help
+ Mailbox driver implementation for RISC-V SBI Message Proxy (MPXY)
+ extension. This mailbox driver is used to send messages to the
+ remote processor through the SBI implementation (M-mode firmware
+ or HS-mode hypervisor). Say Y here if you want to have this support.
+ If unsure say N.
+
endif
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index 786a46587ba1..81820a4f5528 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -11,6 +11,8 @@ obj-$(CONFIG_ARM_MHU_V2) += arm_mhuv2.o
obj-$(CONFIG_ARM_MHU_V3) += arm_mhuv3.o
+obj-$(CONFIG_AST2700_MBOX) += ast2700-mailbox.o
+
obj-$(CONFIG_CV1800_MBOX) += cv1800-mailbox.o
obj-$(CONFIG_EXYNOS_MBOX) += exynos-mailbox.o
@@ -61,6 +63,8 @@ obj-$(CONFIG_MTK_ADSP_MBOX) += mtk-adsp-mailbox.o
obj-$(CONFIG_MTK_CMDQ_MBOX) += mtk-cmdq-mailbox.o
+obj-$(CONFIG_MTK_GPUEB_MBOX) += mtk-gpueb-mailbox.o
+
obj-$(CONFIG_ZYNQMP_IPI_MBOX) += zynqmp-ipi-mailbox.o
obj-$(CONFIG_SUN6I_MSGBOX) += sun6i-msgbox.o
@@ -74,3 +78,7 @@ obj-$(CONFIG_QCOM_IPCC) += qcom-ipcc.o
obj-$(CONFIG_THEAD_TH1520_MBOX) += mailbox-th1520.o
obj-$(CONFIG_CIX_MBOX) += cix-mailbox.o
+
+obj-$(CONFIG_BCM74110_MAILBOX) += bcm74110-mailbox.o
+
+obj-$(CONFIG_RISCV_SBI_MPXY_MBOX) += riscv-sbi-mpxy-mbox.o
diff --git a/drivers/mailbox/arm_mhuv3.c b/drivers/mailbox/arm_mhuv3.c
index b97e79a5870f..0910da67f8a1 100644
--- a/drivers/mailbox/arm_mhuv3.c
+++ b/drivers/mailbox/arm_mhuv3.c
@@ -945,7 +945,7 @@ static irqreturn_t mhuv3_mbx_comb_interrupt(int irq, void *arg)
if (IS_ERR(data)) {
dev_err(dev,
"Failed to read in-band data. err:%ld\n",
- PTR_ERR(no_free_ptr(data)));
+ PTR_ERR(data));
goto rx_ack;
}
}
diff --git a/drivers/mailbox/ast2700-mailbox.c b/drivers/mailbox/ast2700-mailbox.c
new file mode 100644
index 000000000000..83c6afe5411f
--- /dev/null
+++ b/drivers/mailbox/ast2700-mailbox.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright Aspeed Technology Inc. (C) 2025. All rights reserved
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/* Each bit in the register represents an IPC ID */
+#define IPCR_TX_TRIG 0x00
+#define IPCR_ENABLE 0x04
+#define IPCR_STATUS 0x08
+#define RX_IRQ(n) BIT(n)
+#define RX_IRQ_MASK 0xf
+#define IPCR_DATA 0x10
+
+struct ast2700_mbox_data {
+ u8 num_chans;
+ u8 msg_size;
+};
+
+struct ast2700_mbox {
+ struct mbox_controller mbox;
+ u8 msg_size;
+ void __iomem *tx_regs;
+ void __iomem *rx_regs;
+ spinlock_t lock;
+};
+
+static inline int ch_num(struct mbox_chan *chan)
+{
+ return chan - chan->mbox->chans;
+}
+
+static inline bool ast2700_mbox_tx_done(struct ast2700_mbox *mb, int idx)
+{
+ return !(readl(mb->tx_regs + IPCR_STATUS) & BIT(idx));
+}
+
+static irqreturn_t ast2700_mbox_irq(int irq, void *p)
+{
+ struct ast2700_mbox *mb = p;
+ void __iomem *data_reg;
+ int num_words = mb->msg_size / sizeof(u32);
+ u32 *word_data;
+ u32 status;
+ int n, i;
+
+ /* Only examine channels that are currently enabled. */
+ status = readl(mb->rx_regs + IPCR_ENABLE) &
+ readl(mb->rx_regs + IPCR_STATUS);
+
+ if (!(status & RX_IRQ_MASK))
+ return IRQ_NONE;
+
+ for (n = 0; n < mb->mbox.num_chans; ++n) {
+ struct mbox_chan *chan = &mb->mbox.chans[n];
+
+ if (!(status & RX_IRQ(n)))
+ continue;
+
+ data_reg = mb->rx_regs + IPCR_DATA + mb->msg_size * n;
+ word_data = chan->con_priv;
+ /* Read the message data */
+ for (i = 0; i < num_words; i++)
+ word_data[i] = readl(data_reg + i * sizeof(u32));
+
+ mbox_chan_received_data(chan, chan->con_priv);
+
+ /* The IRQ can be cleared only once the FIFO is empty. */
+ writel(RX_IRQ(n), mb->rx_regs + IPCR_STATUS);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int ast2700_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct ast2700_mbox *mb = dev_get_drvdata(chan->mbox->dev);
+ int idx = ch_num(chan);
+ void __iomem *data_reg = mb->tx_regs + IPCR_DATA + mb->msg_size * idx;
+ u32 *word_data = data;
+ int num_words = mb->msg_size / sizeof(u32);
+ int i;
+
+ if (!(readl(mb->tx_regs + IPCR_ENABLE) & BIT(idx))) {
+ dev_warn(mb->mbox.dev, "%s: Ch-%d not enabled yet\n", __func__, idx);
+ return -ENODEV;
+ }
+
+ if (!(ast2700_mbox_tx_done(mb, idx))) {
+ dev_warn(mb->mbox.dev, "%s: Ch-%d last data has not finished\n", __func__, idx);
+ return -EBUSY;
+ }
+
+ /* Write the message data */
+ for (i = 0 ; i < num_words; i++)
+ writel(word_data[i], data_reg + i * sizeof(u32));
+
+ writel(BIT(idx), mb->tx_regs + IPCR_TX_TRIG);
+ dev_dbg(mb->mbox.dev, "%s: Ch-%d sent\n", __func__, idx);
+
+ return 0;
+}
+
+static int ast2700_mbox_startup(struct mbox_chan *chan)
+{
+ struct ast2700_mbox *mb = dev_get_drvdata(chan->mbox->dev);
+ int idx = ch_num(chan);
+ void __iomem *reg = mb->rx_regs + IPCR_ENABLE;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mb->lock, flags);
+ writel(readl(reg) | BIT(idx), reg);
+ spin_unlock_irqrestore(&mb->lock, flags);
+
+ return 0;
+}
+
+static void ast2700_mbox_shutdown(struct mbox_chan *chan)
+{
+ struct ast2700_mbox *mb = dev_get_drvdata(chan->mbox->dev);
+ int idx = ch_num(chan);
+ void __iomem *reg = mb->rx_regs + IPCR_ENABLE;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mb->lock, flags);
+ writel(readl(reg) & ~BIT(idx), reg);
+ spin_unlock_irqrestore(&mb->lock, flags);
+}
+
+static bool ast2700_mbox_last_tx_done(struct mbox_chan *chan)
+{
+ struct ast2700_mbox *mb = dev_get_drvdata(chan->mbox->dev);
+ int idx = ch_num(chan);
+
+ return ast2700_mbox_tx_done(mb, idx);
+}
+
+static const struct mbox_chan_ops ast2700_mbox_chan_ops = {
+ .send_data = ast2700_mbox_send_data,
+ .startup = ast2700_mbox_startup,
+ .shutdown = ast2700_mbox_shutdown,
+ .last_tx_done = ast2700_mbox_last_tx_done,
+};
+
+static int ast2700_mbox_probe(struct platform_device *pdev)
+{
+ struct ast2700_mbox *mb;
+ const struct ast2700_mbox_data *dev_data;
+ struct device *dev = &pdev->dev;
+ int irq, ret;
+
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
+ dev_data = device_get_match_data(&pdev->dev);
+
+ mb = devm_kzalloc(dev, sizeof(*mb), GFP_KERNEL);
+ if (!mb)
+ return -ENOMEM;
+
+ mb->mbox.chans = devm_kcalloc(&pdev->dev, dev_data->num_chans,
+ sizeof(*mb->mbox.chans), GFP_KERNEL);
+ if (!mb->mbox.chans)
+ return -ENOMEM;
+
+ /* con_priv of each channel is used to store the message received */
+ for (int i = 0; i < dev_data->num_chans; i++) {
+ mb->mbox.chans[i].con_priv = devm_kcalloc(dev, dev_data->msg_size,
+ sizeof(u8), GFP_KERNEL);
+ if (!mb->mbox.chans[i].con_priv)
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, mb);
+
+ mb->tx_regs = devm_platform_ioremap_resource_byname(pdev, "tx");
+ if (IS_ERR(mb->tx_regs))
+ return PTR_ERR(mb->tx_regs);
+
+ mb->rx_regs = devm_platform_ioremap_resource_byname(pdev, "rx");
+ if (IS_ERR(mb->rx_regs))
+ return PTR_ERR(mb->rx_regs);
+
+ mb->msg_size = dev_data->msg_size;
+ mb->mbox.dev = dev;
+ mb->mbox.num_chans = dev_data->num_chans;
+ mb->mbox.ops = &ast2700_mbox_chan_ops;
+ mb->mbox.txdone_irq = false;
+ mb->mbox.txdone_poll = true;
+ mb->mbox.txpoll_period = 5;
+ spin_lock_init(&mb->lock);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(dev, irq, ast2700_mbox_irq, 0, dev_name(dev), mb);
+ if (ret)
+ return ret;
+
+ return devm_mbox_controller_register(dev, &mb->mbox);
+}
+
+static const struct ast2700_mbox_data ast2700_dev_data = {
+ .num_chans = 4,
+ .msg_size = 0x20,
+};
+
+static const struct of_device_id ast2700_mbox_of_match[] = {
+ { .compatible = "aspeed,ast2700-mailbox", .data = &ast2700_dev_data },
+ {}
+};
+MODULE_DEVICE_TABLE(of, ast2700_mbox_of_match);
+
+static struct platform_driver ast2700_mbox_driver = {
+ .driver = {
+ .name = "ast2700-mailbox",
+ .of_match_table = ast2700_mbox_of_match,
+ },
+ .probe = ast2700_mbox_probe,
+};
+module_platform_driver(ast2700_mbox_driver);
+
+MODULE_AUTHOR("Jammy Huang <jammy_huang@aspeedtech.com>");
+MODULE_DESCRIPTION("ASPEED AST2700 IPC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mailbox/bcm74110-mailbox.c b/drivers/mailbox/bcm74110-mailbox.c
new file mode 100644
index 000000000000..2e7e86f3e6a4
--- /dev/null
+++ b/drivers/mailbox/bcm74110-mailbox.c
@@ -0,0 +1,656 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Broadcom BCM74110 Mailbox Driver
+ *
+ * Copyright (c) 2025 Broadcom
+ */
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/delay.h>
+#include <linux/mailbox_controller.h>
+#include <linux/bitfield.h>
+#include <linux/slab.h>
+
+#define BCM_MBOX_BASE(sel) ((sel) * 0x40)
+#define BCM_MBOX_IRQ_BASE(sel) (((sel) * 0x20) + 0x800)
+
+#define BCM_MBOX_CFGA 0x0
+#define BCM_MBOX_CFGB 0x4
+#define BCM_MBOX_CFGC 0x8
+#define BCM_MBOX_CFGD 0xc
+#define BCM_MBOX_CTRL 0x10
+#define BCM_MBOX_CTRL_EN BIT(0)
+#define BCM_MBOX_CTRL_CLR BIT(1)
+#define BCM_MBOX_STATUS0 0x14
+#define BCM_MBOX_STATUS0_NOT_EMPTY BIT(28)
+#define BCM_MBOX_STATUS0_FULL BIT(29)
+#define BCM_MBOX_STATUS1 0x18
+#define BCM_MBOX_STATUS2 0x1c
+#define BCM_MBOX_WDATA 0x20
+#define BCM_MBOX_RDATA 0x28
+
+#define BCM_MBOX_IRQ_STATUS 0x0
+#define BCM_MBOX_IRQ_SET 0x4
+#define BCM_MBOX_IRQ_CLEAR 0x8
+#define BCM_MBOX_IRQ_MASK_STATUS 0xc
+#define BCM_MBOX_IRQ_MASK_SET 0x10
+#define BCM_MBOX_IRQ_MASK_CLEAR 0x14
+#define BCM_MBOX_IRQ_TIMEOUT BIT(0)
+#define BCM_MBOX_IRQ_NOT_EMPTY BIT(1)
+#define BCM_MBOX_IRQ_FULL BIT(2)
+#define BCM_MBOX_IRQ_LOW_WM BIT(3)
+#define BCM_MBOX_IRQ_HIGH_WM BIT(4)
+
+#define BCM_LINK_CODE0 0xbe0
+#define BCM_LINK_CODE1 0xbe1
+#define BCM_LINK_CODE2 0xbe2
+
+enum {
+ BCM_MSG_FUNC_LINK_START = 0,
+ BCM_MSG_FUNC_LINK_STOP,
+ BCM_MSG_FUNC_SHMEM_TX,
+ BCM_MSG_FUNC_SHMEM_RX,
+ BCM_MSG_FUNC_SHMEM_STOP,
+ BCM_MSG_FUNC_MAX,
+};
+
+enum {
+ BCM_MSG_SVC_INIT = 0,
+ BCM_MSG_SVC_PMC,
+ BCM_MSG_SVC_SCMI,
+ BCM_MSG_SVC_DPFE,
+ BCM_MSG_SVC_MAX,
+};
+
+struct bcm74110_mbox_msg {
+ struct list_head list_entry;
+#define BCM_MSG_VERSION_MASK GENMASK(31, 29)
+#define BCM_MSG_VERSION 0x1
+#define BCM_MSG_REQ_MASK BIT(28)
+#define BCM_MSG_RPLY_MASK BIT(27)
+#define BCM_MSG_SVC_MASK GENMASK(26, 24)
+#define BCM_MSG_FUNC_MASK GENMASK(23, 16)
+#define BCM_MSG_LENGTH_MASK GENMASK(15, 4)
+#define BCM_MSG_SLOT_MASK GENMASK(3, 0)
+
+#define BCM_MSG_SET_FIELD(hdr, field, val) \
+ do { \
+ hdr &= ~BCM_MSG_##field##_MASK; \
+ hdr |= FIELD_PREP(BCM_MSG_##field##_MASK, val); \
+ } while (0)
+
+#define BCM_MSG_GET_FIELD(hdr, field) \
+ FIELD_GET(BCM_MSG_##field##_MASK, hdr)
+ u32 msg;
+};
+
+struct bcm74110_mbox_chan {
+ struct bcm74110_mbox *mbox;
+ bool en;
+ int slot;
+ int type;
+};
+
+struct bcm74110_mbox {
+ struct platform_device *pdev;
+ void __iomem *base;
+
+ int tx_chan;
+ int rx_chan;
+ int rx_irq;
+ struct list_head rx_svc_init_list;
+ spinlock_t rx_svc_list_lock;
+
+ struct mbox_controller controller;
+ struct bcm74110_mbox_chan *mbox_chan;
+};
+
+#define BCM74110_OFFSET_IO_WRITEL_MACRO(name, offset_base) \
+static void bcm74110_##name##_writel(struct bcm74110_mbox *mbox,\
+ u32 val, u32 off) \
+{ \
+ writel_relaxed(val, mbox->base + offset_base + off); \
+}
+BCM74110_OFFSET_IO_WRITEL_MACRO(tx, BCM_MBOX_BASE(mbox->tx_chan));
+BCM74110_OFFSET_IO_WRITEL_MACRO(irq, BCM_MBOX_IRQ_BASE(mbox->rx_chan));
+
+#define BCM74110_OFFSET_IO_READL_MACRO(name, offset_base) \
+static u32 bcm74110_##name##_readl(struct bcm74110_mbox *mbox, \
+ u32 off) \
+{ \
+ return readl_relaxed(mbox->base + offset_base + off); \
+}
+BCM74110_OFFSET_IO_READL_MACRO(tx, BCM_MBOX_BASE(mbox->tx_chan));
+BCM74110_OFFSET_IO_READL_MACRO(rx, BCM_MBOX_BASE(mbox->rx_chan));
+BCM74110_OFFSET_IO_READL_MACRO(irq, BCM_MBOX_IRQ_BASE(mbox->rx_chan));
+
+static inline struct bcm74110_mbox *bcm74110_mbox_from_cntrl(
+ struct mbox_controller *cntrl)
+{
+ return container_of(cntrl, struct bcm74110_mbox, controller);
+}
+
+static void bcm74110_rx_push_init_msg(struct bcm74110_mbox *mbox, u32 val)
+{
+ struct bcm74110_mbox_msg *msg;
+
+ msg = kzalloc(sizeof(*msg), GFP_ATOMIC);
+ if (!msg)
+ return;
+
+ INIT_LIST_HEAD(&msg->list_entry);
+ msg->msg = val;
+
+ spin_lock(&mbox->rx_svc_list_lock);
+ list_add_tail(&msg->list_entry, &mbox->rx_svc_init_list);
+ spin_unlock(&mbox->rx_svc_list_lock);
+}
+
+static void bcm74110_rx_process_msg(struct bcm74110_mbox *mbox)
+{
+ struct device *dev = &mbox->pdev->dev;
+ struct bcm74110_mbox_chan *chan_priv;
+ struct mbox_chan *chan;
+ u32 msg, status;
+ int type;
+
+ do {
+ msg = bcm74110_rx_readl(mbox, BCM_MBOX_RDATA);
+ status = bcm74110_rx_readl(mbox, BCM_MBOX_STATUS0);
+
+ dev_dbg(dev, "rx: [{req=%lu|rply=%lu|srv=%lu|fn=%lu|length=%lu|slot=%lu]\n",
+ BCM_MSG_GET_FIELD(msg, REQ), BCM_MSG_GET_FIELD(msg, RPLY),
+ BCM_MSG_GET_FIELD(msg, SVC), BCM_MSG_GET_FIELD(msg, FUNC),
+ BCM_MSG_GET_FIELD(msg, LENGTH), BCM_MSG_GET_FIELD(msg, SLOT));
+
+ type = BCM_MSG_GET_FIELD(msg, SVC);
+ switch (type) {
+ case BCM_MSG_SVC_INIT:
+ bcm74110_rx_push_init_msg(mbox, msg);
+ break;
+ case BCM_MSG_SVC_PMC:
+ case BCM_MSG_SVC_SCMI:
+ case BCM_MSG_SVC_DPFE:
+ chan = &mbox->controller.chans[type];
+ chan_priv = chan->con_priv;
+ if (chan_priv->en)
+ mbox_chan_received_data(chan, NULL);
+ else
+ dev_warn(dev, "Channel not enabled\n");
+ break;
+ default:
+ dev_warn(dev, "Unsupported msg received\n");
+ }
+ } while (status & BCM_MBOX_STATUS0_NOT_EMPTY);
+}
+
+static irqreturn_t bcm74110_mbox_isr(int irq, void *data)
+{
+ struct bcm74110_mbox *mbox = data;
+ u32 status;
+
+ status = bcm74110_irq_readl(mbox, BCM_MBOX_IRQ_STATUS);
+
+ bcm74110_irq_writel(mbox, 0xffffffff, BCM_MBOX_IRQ_CLEAR);
+
+ if (status & BCM_MBOX_IRQ_NOT_EMPTY)
+ bcm74110_rx_process_msg(mbox);
+ else
+ dev_warn(&mbox->pdev->dev, "Spurious interrupt\n");
+
+ return IRQ_HANDLED;
+}
+
+static void bcm74110_mbox_mask_and_clear(struct bcm74110_mbox *mbox)
+{
+ bcm74110_irq_writel(mbox, 0xffffffff, BCM_MBOX_IRQ_MASK_SET);
+ bcm74110_irq_writel(mbox, 0xffffffff, BCM_MBOX_IRQ_CLEAR);
+}
+
+static int bcm74110_rx_pop_init_msg(struct bcm74110_mbox *mbox, u32 func_type,
+ u32 *val)
+{
+ struct bcm74110_mbox_msg *msg, *msg_tmp;
+ unsigned long flags;
+ bool found = false;
+
+ spin_lock_irqsave(&mbox->rx_svc_list_lock, flags);
+ list_for_each_entry_safe(msg, msg_tmp, &mbox->rx_svc_init_list,
+ list_entry) {
+ if (BCM_MSG_GET_FIELD(msg->msg, FUNC) == func_type) {
+ list_del(&msg->list_entry);
+ found = true;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&mbox->rx_svc_list_lock, flags);
+
+ if (!found)
+ return -EINVAL;
+
+ *val = msg->msg;
+ kfree(msg);
+
+ return 0;
+}
+
+static void bcm74110_rx_flush_msg(struct bcm74110_mbox *mbox)
+{
+ struct bcm74110_mbox_msg *msg, *msg_tmp;
+ LIST_HEAD(list_temp);
+ unsigned long flags;
+
+ spin_lock_irqsave(&mbox->rx_svc_list_lock, flags);
+ list_splice_init(&mbox->rx_svc_init_list, &list_temp);
+ spin_unlock_irqrestore(&mbox->rx_svc_list_lock, flags);
+
+ list_for_each_entry_safe(msg, msg_tmp, &list_temp, list_entry) {
+ list_del(&msg->list_entry);
+ kfree(msg);
+ }
+}
+
+#define BCM_DEQUEUE_TIMEOUT_MS 30
+static int bcm74110_rx_pop_init_msg_block(struct bcm74110_mbox *mbox, u32 func_type,
+ u32 *val)
+{
+ int ret, timeout = 0;
+
+ do {
+ ret = bcm74110_rx_pop_init_msg(mbox, func_type, val);
+
+ if (!ret)
+ return 0;
+
+ /* TODO: Figure out what is a good sleep here. */
+ usleep_range(1000, 2000);
+ timeout++;
+ } while (timeout < BCM_DEQUEUE_TIMEOUT_MS);
+
+ dev_warn(&mbox->pdev->dev, "Timeout waiting for service init response\n");
+ return -ETIMEDOUT;
+}
+
+static int bcm74110_mbox_create_msg(int req, int rply, int svc, int func,
+ int length, int slot)
+{
+ u32 msg = 0;
+
+ BCM_MSG_SET_FIELD(msg, REQ, req);
+ BCM_MSG_SET_FIELD(msg, RPLY, rply);
+ BCM_MSG_SET_FIELD(msg, SVC, svc);
+ BCM_MSG_SET_FIELD(msg, FUNC, func);
+ BCM_MSG_SET_FIELD(msg, LENGTH, length);
+ BCM_MSG_SET_FIELD(msg, SLOT, slot);
+
+ return msg;
+}
+
+static int bcm74110_mbox_tx_msg(struct bcm74110_mbox *mbox, u32 msg)
+{
+ int val;
+
+ /* We can potentially poll with timeout here instead */
+ val = bcm74110_tx_readl(mbox, BCM_MBOX_STATUS0);
+ if (val & BCM_MBOX_STATUS0_FULL) {
+ dev_err(&mbox->pdev->dev, "Mailbox full\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(&mbox->pdev->dev, "tx: [{req=%lu|rply=%lu|srv=%lu|fn=%lu|length=%lu|slot=%lu]\n",
+ BCM_MSG_GET_FIELD(msg, REQ), BCM_MSG_GET_FIELD(msg, RPLY),
+ BCM_MSG_GET_FIELD(msg, SVC), BCM_MSG_GET_FIELD(msg, FUNC),
+ BCM_MSG_GET_FIELD(msg, LENGTH), BCM_MSG_GET_FIELD(msg, SLOT));
+
+ bcm74110_tx_writel(mbox, msg, BCM_MBOX_WDATA);
+
+ return 0;
+}
+
+#define BCM_MBOX_LINK_TRAINING_RETRIES 5
+static int bcm74110_mbox_link_training(struct bcm74110_mbox *mbox)
+{
+ int ret, retries = 0;
+ u32 msg = 0, orig_len = 0, len = BCM_LINK_CODE0;
+
+ do {
+ switch (len) {
+ case 0:
+ retries++;
+ dev_warn(&mbox->pdev->dev,
+ "Link train failed, trying again... %d\n",
+ retries);
+ if (retries > BCM_MBOX_LINK_TRAINING_RETRIES)
+ return -EINVAL;
+ len = BCM_LINK_CODE0;
+ fallthrough;
+ case BCM_LINK_CODE0:
+ case BCM_LINK_CODE1:
+ case BCM_LINK_CODE2:
+ msg = bcm74110_mbox_create_msg(1, 0, BCM_MSG_SVC_INIT,
+ BCM_MSG_FUNC_LINK_START,
+ len, BCM_MSG_SVC_INIT);
+ break;
+ default:
+ break;
+ }
+
+ bcm74110_mbox_tx_msg(mbox, msg);
+
+ /* No response expected for LINK_CODE2 */
+ if (len == BCM_LINK_CODE2)
+ return 0;
+
+ orig_len = len;
+
+ ret = bcm74110_rx_pop_init_msg_block(mbox,
+ BCM_MSG_GET_FIELD(msg, FUNC),
+ &msg);
+ if (ret) {
+ len = 0;
+ continue;
+ }
+
+ if ((BCM_MSG_GET_FIELD(msg, SVC) != BCM_MSG_SVC_INIT) ||
+ (BCM_MSG_GET_FIELD(msg, FUNC) != BCM_MSG_FUNC_LINK_START) ||
+ (BCM_MSG_GET_FIELD(msg, SLOT) != 0) ||
+ (BCM_MSG_GET_FIELD(msg, RPLY) != 1) ||
+ (BCM_MSG_GET_FIELD(msg, REQ) != 0)) {
+ len = 0;
+ continue;
+ }
+
+ len = BCM_MSG_GET_FIELD(msg, LENGTH);
+
+ /* Make sure sequence is good */
+ if (len != (orig_len + 1)) {
+ len = 0;
+ continue;
+ }
+ } while (1);
+
+ return -EINVAL;
+}
+
+static int bcm74110_mbox_tx_msg_and_wait_ack(struct bcm74110_mbox *mbox, u32 msg)
+{
+ int ret;
+ u32 recv_msg;
+
+ ret = bcm74110_mbox_tx_msg(mbox, msg);
+ if (ret)
+ return ret;
+
+ ret = bcm74110_rx_pop_init_msg_block(mbox, BCM_MSG_GET_FIELD(msg, FUNC),
+ &recv_msg);
+ if (ret)
+ return ret;
+
+ /*
+ * Modify tx message to verify rx ack.
+ * Flip RPLY/REQ for synchronous messages
+ */
+ if (BCM_MSG_GET_FIELD(msg, REQ) == 1) {
+ BCM_MSG_SET_FIELD(msg, RPLY, 1);
+ BCM_MSG_SET_FIELD(msg, REQ, 0);
+ }
+
+ if (msg != recv_msg) {
+ dev_err(&mbox->pdev->dev, "Found ack, but ack is invalid\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Each index points to 0x100 of HAB MEM. IDX size counts from 0 */
+#define BCM_MBOX_HAB_MEM_IDX_START 0x30
+#define BCM_MBOX_HAB_MEM_IDX_SIZE 0x0
+static int bcm74110_mbox_shmem_init(struct bcm74110_mbox *mbox)
+{
+ u32 msg = 0;
+ int ret;
+
+ msg = bcm74110_mbox_create_msg(1, 0, BCM_MSG_SVC_INIT,
+ BCM_MSG_FUNC_SHMEM_STOP,
+ 0, BCM_MSG_SVC_INIT);
+ ret = bcm74110_mbox_tx_msg_and_wait_ack(mbox, msg);
+ if (ret)
+ return -EINVAL;
+
+ msg = bcm74110_mbox_create_msg(1, 0, BCM_MSG_SVC_INIT,
+ BCM_MSG_FUNC_SHMEM_TX,
+ BCM_MBOX_HAB_MEM_IDX_START,
+ BCM_MBOX_HAB_MEM_IDX_SIZE);
+ ret = bcm74110_mbox_tx_msg_and_wait_ack(mbox, msg);
+ if (ret)
+ return -EINVAL;
+
+ msg = bcm74110_mbox_create_msg(1, 0, BCM_MSG_SVC_INIT,
+ BCM_MSG_FUNC_SHMEM_RX,
+ BCM_MBOX_HAB_MEM_IDX_START,
+ BCM_MBOX_HAB_MEM_IDX_SIZE);
+ ret = bcm74110_mbox_tx_msg_and_wait_ack(mbox, msg);
+ if (ret)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int bcm74110_mbox_init(struct bcm74110_mbox *mbox)
+{
+ int ret = 0;
+
+ /* Disable queues tx/rx */
+ bcm74110_tx_writel(mbox, 0x0, BCM_MBOX_CTRL);
+
+ /* Clear status & restart tx/rx*/
+ bcm74110_tx_writel(mbox, BCM_MBOX_CTRL_EN | BCM_MBOX_CTRL_CLR,
+ BCM_MBOX_CTRL);
+
+ /* Unmask irq */
+ bcm74110_irq_writel(mbox, BCM_MBOX_IRQ_NOT_EMPTY, BCM_MBOX_IRQ_MASK_CLEAR);
+
+ ret = bcm74110_mbox_link_training(mbox);
+ if (ret) {
+ dev_err(&mbox->pdev->dev, "Training failed\n");
+ return ret;
+ }
+
+ return bcm74110_mbox_shmem_init(mbox);
+}
+
+static int bcm74110_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct bcm74110_mbox_chan *chan_priv = chan->con_priv;
+ u32 msg;
+
+ switch (chan_priv->type) {
+ case BCM_MSG_SVC_PMC:
+ case BCM_MSG_SVC_SCMI:
+ case BCM_MSG_SVC_DPFE:
+ msg = bcm74110_mbox_create_msg(1, 0, chan_priv->type, 0,
+ 128 + 28, chan_priv->slot);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return bcm74110_mbox_tx_msg(chan_priv->mbox, msg);
+}
+
+static int bcm74110_mbox_chan_startup(struct mbox_chan *chan)
+{
+ struct bcm74110_mbox_chan *chan_priv = chan->con_priv;
+
+ chan_priv->en = true;
+
+ return 0;
+}
+
+static void bcm74110_mbox_chan_shutdown(struct mbox_chan *chan)
+{
+ struct bcm74110_mbox_chan *chan_priv = chan->con_priv;
+
+ chan_priv->en = false;
+}
+
+static const struct mbox_chan_ops bcm74110_mbox_chan_ops = {
+ .send_data = bcm74110_mbox_send_data,
+ .startup = bcm74110_mbox_chan_startup,
+ .shutdown = bcm74110_mbox_chan_shutdown,
+};
+
+static void bcm74110_mbox_shutdown(struct platform_device *pdev)
+{
+ struct bcm74110_mbox *mbox = dev_get_drvdata(&pdev->dev);
+ u32 msg;
+
+ msg = bcm74110_mbox_create_msg(1, 0, BCM_MSG_SVC_INIT,
+ BCM_MSG_FUNC_LINK_STOP,
+ 0, 0);
+
+ bcm74110_mbox_tx_msg_and_wait_ack(mbox, msg);
+
+ /* Even if we don't receive ACK, lets shut it down */
+
+ bcm74110_mbox_mask_and_clear(mbox);
+
+ /* Disable queues tx/rx */
+ bcm74110_tx_writel(mbox, 0x0, BCM_MBOX_CTRL);
+
+ /* Flush queues */
+ bcm74110_rx_flush_msg(mbox);
+}
+
+static struct mbox_chan *bcm74110_mbox_of_xlate(struct mbox_controller *cntrl,
+ const struct of_phandle_args *p)
+{
+ struct bcm74110_mbox *mbox = bcm74110_mbox_from_cntrl(cntrl);
+ struct device *dev = &mbox->pdev->dev;
+ struct bcm74110_mbox_chan *chan_priv;
+ int slot, type;
+
+ if (p->args_count != 2) {
+ dev_err(dev, "Invalid arguments\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ type = p->args[0];
+ slot = p->args[1];
+
+ switch (type) {
+ case BCM_MSG_SVC_PMC:
+ case BCM_MSG_SVC_SCMI:
+ case BCM_MSG_SVC_DPFE:
+ if (slot > BCM_MBOX_HAB_MEM_IDX_SIZE) {
+ dev_err(dev, "Not enough shared memory\n");
+ return ERR_PTR(-EINVAL);
+ }
+ chan_priv = cntrl->chans[type].con_priv;
+ chan_priv->slot = slot;
+ chan_priv->type = type;
+ break;
+ default:
+ dev_err(dev, "Invalid channel type: %d\n", type);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return &cntrl->chans[type];
+}
+
+static int bcm74110_mbox_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct bcm74110_mbox *mbox;
+ int i, ret;
+
+ mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ mbox->pdev = pdev;
+ platform_set_drvdata(pdev, mbox);
+
+ mbox->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mbox->base))
+ return dev_err_probe(dev, PTR_ERR(mbox->base), "Failed to iomap\n");
+
+ ret = of_property_read_u32(dev->of_node, "brcm,tx", &mbox->tx_chan);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to find tx channel\n");
+
+ ret = of_property_read_u32(dev->of_node, "brcm,rx", &mbox->rx_chan);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to find rx channel\n");
+
+ mbox->rx_irq = platform_get_irq(pdev, 0);
+ if (mbox->rx_irq < 0)
+ return mbox->rx_irq;
+
+ INIT_LIST_HEAD(&mbox->rx_svc_init_list);
+ spin_lock_init(&mbox->rx_svc_list_lock);
+ bcm74110_mbox_mask_and_clear(mbox);
+
+ ret = devm_request_irq(dev, mbox->rx_irq, bcm74110_mbox_isr,
+ IRQF_NO_SUSPEND, pdev->name, mbox);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to request irq\n");
+
+ mbox->controller.ops = &bcm74110_mbox_chan_ops;
+ mbox->controller.dev = dev;
+ mbox->controller.num_chans = BCM_MSG_SVC_MAX;
+ mbox->controller.of_xlate = &bcm74110_mbox_of_xlate;
+ mbox->controller.chans = devm_kcalloc(dev, BCM_MSG_SVC_MAX,
+ sizeof(*mbox->controller.chans),
+ GFP_KERNEL);
+ if (!mbox->controller.chans)
+ return -ENOMEM;
+
+ mbox->mbox_chan = devm_kcalloc(dev, BCM_MSG_SVC_MAX,
+ sizeof(*mbox->mbox_chan),
+ GFP_KERNEL);
+ if (!mbox->mbox_chan)
+ return -ENOMEM;
+
+ for (i = 0; i < BCM_MSG_SVC_MAX; i++) {
+ mbox->mbox_chan[i].mbox = mbox;
+ mbox->controller.chans[i].con_priv = &mbox->mbox_chan[i];
+ }
+
+ ret = devm_mbox_controller_register(dev, &mbox->controller);
+ if (ret)
+ return ret;
+
+ ret = bcm74110_mbox_init(mbox);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct of_device_id bcm74110_mbox_of_match[] = {
+ { .compatible = "brcm,bcm74110-mbox", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, bcm74110_mbox_of_match);
+
+static struct platform_driver bcm74110_mbox_driver = {
+ .driver = {
+ .name = "bcm74110-mbox",
+ .of_match_table = bcm74110_mbox_of_match,
+ },
+ .probe = bcm74110_mbox_probe,
+ .shutdown = bcm74110_mbox_shutdown,
+};
+module_platform_driver(bcm74110_mbox_driver);
+
+MODULE_AUTHOR("Justin Chen <justin.chen@broadcom.com>");
+MODULE_DESCRIPTION("BCM74110 mailbox driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index 5cd8ae222073..2acc6ec229a4 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
+#include <linux/property.h>
#include <linux/spinlock.h>
#include "mailbox.h"
@@ -383,34 +384,56 @@ EXPORT_SYMBOL_GPL(mbox_bind_client);
*/
struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index)
{
- struct device *dev = cl->dev;
+ struct fwnode_reference_args fwspec;
+ struct fwnode_handle *fwnode;
struct mbox_controller *mbox;
struct of_phandle_args spec;
struct mbox_chan *chan;
+ struct device *dev;
+ unsigned int i;
int ret;
- if (!dev || !dev->of_node) {
- pr_debug("%s: No owner device node\n", __func__);
+ dev = cl->dev;
+ if (!dev) {
+ pr_debug("No owner device\n");
return ERR_PTR(-ENODEV);
}
- ret = of_parse_phandle_with_args(dev->of_node, "mboxes", "#mbox-cells",
- index, &spec);
+ fwnode = dev_fwnode(dev);
+ if (!fwnode) {
+ dev_dbg(dev, "No owner fwnode\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ ret = fwnode_property_get_reference_args(fwnode, "mboxes", "#mbox-cells",
+ 0, index, &fwspec);
if (ret) {
- dev_err(dev, "%s: can't parse \"mboxes\" property\n", __func__);
+ dev_err(dev, "%s: can't parse \"%s\" property\n", __func__, "mboxes");
return ERR_PTR(ret);
}
+ spec.np = to_of_node(fwspec.fwnode);
+ spec.args_count = fwspec.nargs;
+ for (i = 0; i < spec.args_count; i++)
+ spec.args[i] = fwspec.args[i];
+
scoped_guard(mutex, &con_mutex) {
chan = ERR_PTR(-EPROBE_DEFER);
- list_for_each_entry(mbox, &mbox_cons, node)
- if (mbox->dev->of_node == spec.np) {
- chan = mbox->of_xlate(mbox, &spec);
- if (!IS_ERR(chan))
- break;
+ list_for_each_entry(mbox, &mbox_cons, node) {
+ if (device_match_fwnode(mbox->dev, fwspec.fwnode)) {
+ if (mbox->fw_xlate) {
+ chan = mbox->fw_xlate(mbox, &fwspec);
+ if (!IS_ERR(chan))
+ break;
+ } else if (mbox->of_xlate) {
+ chan = mbox->of_xlate(mbox, &spec);
+ if (!IS_ERR(chan))
+ break;
+ }
}
+ }
- of_node_put(spec.np);
+ fwnode_handle_put(fwspec.fwnode);
if (IS_ERR(chan))
return chan;
@@ -427,15 +450,8 @@ EXPORT_SYMBOL_GPL(mbox_request_channel);
struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl,
const char *name)
{
- struct device_node *np = cl->dev->of_node;
- int index;
-
- if (!np) {
- dev_err(cl->dev, "%s() currently only supports DT\n", __func__);
- return ERR_PTR(-EINVAL);
- }
+ int index = device_property_match_string(cl->dev, "mbox-names", name);
- index = of_property_match_string(np, "mbox-names", name);
if (index < 0) {
dev_err(cl->dev, "%s() could not locate channel named \"%s\"\n",
__func__, name);
@@ -470,9 +486,8 @@ void mbox_free_channel(struct mbox_chan *chan)
}
EXPORT_SYMBOL_GPL(mbox_free_channel);
-static struct mbox_chan *
-of_mbox_index_xlate(struct mbox_controller *mbox,
- const struct of_phandle_args *sp)
+static struct mbox_chan *fw_mbox_index_xlate(struct mbox_controller *mbox,
+ const struct fwnode_reference_args *sp)
{
int ind = sp->args[0];
@@ -523,8 +538,8 @@ int mbox_controller_register(struct mbox_controller *mbox)
spin_lock_init(&chan->lock);
}
- if (!mbox->of_xlate)
- mbox->of_xlate = of_mbox_index_xlate;
+ if (!mbox->fw_xlate && !mbox->of_xlate)
+ mbox->fw_xlate = fw_mbox_index_xlate;
scoped_guard(mutex, &con_mutex)
list_add_tail(&mbox->node, &mbox_cons);
diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
index ab4e8d1954a1..654a60f63756 100644
--- a/drivers/mailbox/mtk-cmdq-mailbox.c
+++ b/drivers/mailbox/mtk-cmdq-mailbox.c
@@ -379,20 +379,13 @@ static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
struct cmdq_task *task;
unsigned long curr_pa, end_pa;
- int ret;
/* Client should not flush new tasks if suspended. */
WARN_ON(cmdq->suspended);
- ret = pm_runtime_get_sync(cmdq->mbox.dev);
- if (ret < 0)
- return ret;
-
task = kzalloc(sizeof(*task), GFP_ATOMIC);
- if (!task) {
- __pm_runtime_put_autosuspend(cmdq->mbox.dev);
+ if (!task)
return -ENOMEM;
- }
task->cmdq = cmdq;
INIT_LIST_HEAD(&task->list_entry);
@@ -439,9 +432,6 @@ static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
}
list_move_tail(&task->list_entry, &thread->task_busy_list);
- pm_runtime_mark_last_busy(cmdq->mbox.dev);
- __pm_runtime_put_autosuspend(cmdq->mbox.dev);
-
return 0;
}
@@ -488,7 +478,7 @@ done:
spin_unlock_irqrestore(&thread->chan->lock, flags);
pm_runtime_mark_last_busy(cmdq->mbox.dev);
- __pm_runtime_put_autosuspend(cmdq->mbox.dev);
+ pm_runtime_put_autosuspend(cmdq->mbox.dev);
}
static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout)
@@ -528,7 +518,7 @@ static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout)
out:
spin_unlock_irqrestore(&thread->chan->lock, flags);
pm_runtime_mark_last_busy(cmdq->mbox.dev);
- __pm_runtime_put_autosuspend(cmdq->mbox.dev);
+ pm_runtime_put_autosuspend(cmdq->mbox.dev);
return 0;
@@ -543,7 +533,7 @@ wait:
return -EFAULT;
}
pm_runtime_mark_last_busy(cmdq->mbox.dev);
- __pm_runtime_put_autosuspend(cmdq->mbox.dev);
+ pm_runtime_put_autosuspend(cmdq->mbox.dev);
return 0;
}
diff --git a/drivers/mailbox/mtk-gpueb-mailbox.c b/drivers/mailbox/mtk-gpueb-mailbox.c
new file mode 100644
index 000000000000..925bcf21f650
--- /dev/null
+++ b/drivers/mailbox/mtk-gpueb-mailbox.c
@@ -0,0 +1,319 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * MediaTek GPUEB mailbox driver for SoCs such as the MT8196
+ *
+ * Copyright (C) 2025, Collabora Ltd.
+ *
+ * Developers harmed in the making of this driver:
+ * - Nicolas Frattaroli <nicolas.frattaroli@collabora.com>
+ */
+
+#include <linux/atomic.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#define GPUEB_MBOX_CTL_TX_STS 0x00
+#define GPUEB_MBOX_CTL_IRQ_SET 0x04
+#define GPUEB_MBOX_CTL_IRQ_CLR 0x74
+#define GPUEB_MBOX_CTL_RX_STS 0x78
+
+#define GPUEB_MBOX_FULL BIT(0) /* i.e. we've received data */
+#define GPUEB_MBOX_BLOCKED BIT(1) /* i.e. the channel is shutdown */
+
+#define GPUEB_MBOX_MAX_RX_SIZE 32 /* in bytes */
+
+struct mtk_gpueb_mbox {
+ struct device *dev;
+ struct clk *clk;
+ void __iomem *mbox_mmio;
+ void __iomem *mbox_ctl;
+ struct mbox_controller mbox;
+ struct mtk_gpueb_mbox_chan *ch;
+ int irq;
+ const struct mtk_gpueb_mbox_variant *v;
+};
+
+/**
+ * struct mtk_gpueb_mbox_chan - per-channel runtime data
+ * @ebm: pointer to the parent &struct mtk_gpueb_mbox mailbox
+ * @full_name: descriptive name of channel for IRQ subsystem
+ * @num: channel number, starting at 0
+ * @rx_status: signifies whether channel reception is turned off, or full
+ * @c: pointer to the constant &struct mtk_gpueb_mbox_chan_desc channel data
+ */
+struct mtk_gpueb_mbox_chan {
+ struct mtk_gpueb_mbox *ebm;
+ char *full_name;
+ u8 num;
+ atomic_t rx_status;
+ const struct mtk_gpueb_mbox_chan_desc *c;
+};
+
+/**
+ * struct mtk_gpueb_mbox_chan_desc - per-channel constant data
+ * @name: name of this channel
+ * @num: index of this channel, starting at 0
+ * @tx_offset: byte offset measured from mmio base for outgoing data
+ * @tx_len: size, in bytes, of the outgoing data on this channel
+ * @rx_offset: bytes offset measured from mmio base for incoming data
+ * @rx_len: size, in bytes, of the incoming data on this channel
+ */
+struct mtk_gpueb_mbox_chan_desc {
+ const char *name;
+ const u8 num;
+ const u16 tx_offset;
+ const u8 tx_len;
+ const u16 rx_offset;
+ const u8 rx_len;
+};
+
+struct mtk_gpueb_mbox_variant {
+ const u8 num_channels;
+ const struct mtk_gpueb_mbox_chan_desc channels[] __counted_by(num_channels);
+};
+
+/**
+ * mtk_gpueb_mbox_read_rx - read RX buffer from MMIO into channel's RX buffer
+ * @buf: buffer to read into
+ * @chan: pointer to the channel to read
+ */
+static void mtk_gpueb_mbox_read_rx(void *buf, struct mtk_gpueb_mbox_chan *chan)
+{
+ memcpy_fromio(buf, chan->ebm->mbox_mmio + chan->c->rx_offset, chan->c->rx_len);
+}
+
+static irqreturn_t mtk_gpueb_mbox_isr(int irq, void *data)
+{
+ struct mtk_gpueb_mbox_chan *ch = data;
+ u32 rx_sts;
+
+ rx_sts = readl(ch->ebm->mbox_ctl + GPUEB_MBOX_CTL_RX_STS);
+
+ if (rx_sts & BIT(ch->num)) {
+ if (!atomic_cmpxchg(&ch->rx_status, 0, GPUEB_MBOX_FULL | GPUEB_MBOX_BLOCKED))
+ return IRQ_WAKE_THREAD;
+ }
+
+ return IRQ_NONE;
+}
+
+static irqreturn_t mtk_gpueb_mbox_thread(int irq, void *data)
+{
+ struct mtk_gpueb_mbox_chan *ch = data;
+ int status;
+
+ status = atomic_cmpxchg(&ch->rx_status, GPUEB_MBOX_FULL | GPUEB_MBOX_BLOCKED,
+ GPUEB_MBOX_FULL);
+ if (status == (GPUEB_MBOX_FULL | GPUEB_MBOX_BLOCKED)) {
+ u8 buf[GPUEB_MBOX_MAX_RX_SIZE] = {};
+
+ mtk_gpueb_mbox_read_rx(buf, ch);
+ writel(BIT(ch->num), ch->ebm->mbox_ctl + GPUEB_MBOX_CTL_IRQ_CLR);
+ mbox_chan_received_data(&ch->ebm->mbox.chans[ch->num], buf);
+ atomic_set(&ch->rx_status, 0);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int mtk_gpueb_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct mtk_gpueb_mbox_chan *ch = chan->con_priv;
+ u32 *values = data;
+ int i;
+
+ if (atomic_read(&ch->rx_status))
+ return -EBUSY;
+
+ /*
+ * We don't want any fancy nonsense, just write the 32-bit values in
+ * order. memcpy_toio/__iowrite32_copy don't work here, as they may use
+ * writes of different sizes or memory ordering characteristics depending
+ * on the architecture, alignment and the current phase of the moon.
+ */
+ for (i = 0; i < ch->c->tx_len; i += 4)
+ writel(values[i / 4], ch->ebm->mbox_mmio + ch->c->tx_offset + i);
+
+ writel(BIT(ch->num), ch->ebm->mbox_ctl + GPUEB_MBOX_CTL_IRQ_SET);
+
+ return 0;
+}
+
+static int mtk_gpueb_mbox_startup(struct mbox_chan *chan)
+{
+ struct mtk_gpueb_mbox_chan *ch = chan->con_priv;
+ int ret;
+
+ atomic_set(&ch->rx_status, 0);
+
+ ret = clk_enable(ch->ebm->clk);
+ if (ret) {
+ dev_err(ch->ebm->dev, "Failed to enable EB clock: %pe\n",
+ ERR_PTR(ret));
+ goto err_block;
+ }
+
+ writel(BIT(ch->num), ch->ebm->mbox_ctl + GPUEB_MBOX_CTL_IRQ_CLR);
+
+ ret = devm_request_threaded_irq(ch->ebm->dev, ch->ebm->irq, mtk_gpueb_mbox_isr,
+ mtk_gpueb_mbox_thread, IRQF_SHARED | IRQF_ONESHOT,
+ ch->full_name, ch);
+ if (ret) {
+ dev_err(ch->ebm->dev, "Failed to request IRQ: %pe\n",
+ ERR_PTR(ret));
+ goto err_unclk;
+ }
+
+ return 0;
+
+err_unclk:
+ clk_disable(ch->ebm->clk);
+err_block:
+ atomic_set(&ch->rx_status, GPUEB_MBOX_BLOCKED);
+
+ return ret;
+}
+
+static void mtk_gpueb_mbox_shutdown(struct mbox_chan *chan)
+{
+ struct mtk_gpueb_mbox_chan *ch = chan->con_priv;
+
+ atomic_set(&ch->rx_status, GPUEB_MBOX_BLOCKED);
+
+ devm_free_irq(ch->ebm->dev, ch->ebm->irq, ch);
+
+ clk_disable(ch->ebm->clk);
+}
+
+static bool mtk_gpueb_mbox_last_tx_done(struct mbox_chan *chan)
+{
+ struct mtk_gpueb_mbox_chan *ch = chan->con_priv;
+
+ return !(readl(ch->ebm->mbox_ctl + GPUEB_MBOX_CTL_TX_STS) & BIT(ch->num));
+}
+
+const struct mbox_chan_ops mtk_gpueb_mbox_ops = {
+ .send_data = mtk_gpueb_mbox_send_data,
+ .startup = mtk_gpueb_mbox_startup,
+ .shutdown = mtk_gpueb_mbox_shutdown,
+ .last_tx_done = mtk_gpueb_mbox_last_tx_done,
+};
+
+static int mtk_gpueb_mbox_probe(struct platform_device *pdev)
+{
+ struct mtk_gpueb_mbox_chan *ch;
+ struct mtk_gpueb_mbox *ebm;
+ unsigned int i;
+
+ ebm = devm_kzalloc(&pdev->dev, sizeof(*ebm), GFP_KERNEL);
+ if (!ebm)
+ return -ENOMEM;
+
+ ebm->dev = &pdev->dev;
+ ebm->v = of_device_get_match_data(ebm->dev);
+
+ ebm->irq = platform_get_irq(pdev, 0);
+ if (ebm->irq < 0)
+ return ebm->irq;
+
+ ebm->clk = devm_clk_get_prepared(ebm->dev, NULL);
+ if (IS_ERR(ebm->clk))
+ return dev_err_probe(ebm->dev, PTR_ERR(ebm->clk),
+ "Failed to get 'eb' clock\n");
+
+ ebm->mbox_mmio = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ebm->mbox_mmio))
+ return dev_err_probe(ebm->dev, PTR_ERR(ebm->mbox_mmio),
+ "Couldn't map mailbox data registers\n");
+
+ ebm->mbox_ctl = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(ebm->mbox_ctl))
+ return dev_err_probe(
+ ebm->dev, PTR_ERR(ebm->mbox_ctl),
+ "Couldn't map mailbox control registers\n");
+
+ ebm->ch = devm_kmalloc_array(ebm->dev, ebm->v->num_channels,
+ sizeof(*ebm->ch), GFP_KERNEL);
+ if (!ebm->ch)
+ return -ENOMEM;
+
+ ebm->mbox.chans = devm_kcalloc(ebm->dev, ebm->v->num_channels,
+ sizeof(struct mbox_chan), GFP_KERNEL);
+ if (!ebm->mbox.chans)
+ return -ENOMEM;
+
+ for (i = 0; i < ebm->v->num_channels; i++) {
+ ch = &ebm->ch[i];
+ ch->c = &ebm->v->channels[i];
+ if (ch->c->rx_len > GPUEB_MBOX_MAX_RX_SIZE) {
+ dev_err(ebm->dev, "Channel %s RX size (%d) too large\n",
+ ch->c->name, ch->c->rx_len);
+ return -EINVAL;
+ }
+ ch->full_name = devm_kasprintf(ebm->dev, GFP_KERNEL, "%s:%s",
+ dev_name(ebm->dev), ch->c->name);
+ if (!ch->full_name)
+ return -ENOMEM;
+
+ ch->ebm = ebm;
+ ch->num = i;
+ spin_lock_init(&ebm->mbox.chans[i].lock);
+ ebm->mbox.chans[i].con_priv = ch;
+ atomic_set(&ch->rx_status, GPUEB_MBOX_BLOCKED);
+ }
+
+ ebm->mbox.dev = ebm->dev;
+ ebm->mbox.num_chans = ebm->v->num_channels;
+ ebm->mbox.txdone_poll = true;
+ ebm->mbox.txpoll_period = 0; /* minimum hrtimer interval */
+ ebm->mbox.ops = &mtk_gpueb_mbox_ops;
+
+ dev_set_drvdata(ebm->dev, ebm);
+
+ return devm_mbox_controller_register(ebm->dev, &ebm->mbox);
+}
+
+static const struct mtk_gpueb_mbox_variant mtk_gpueb_mbox_mt8196 = {
+ .num_channels = 12,
+ .channels = {
+ { "fast-dvfs-event", 0, 0x0000, 16, 0x00e0, 16 },
+ { "gpufreq", 1, 0x0010, 32, 0x00f0, 32 },
+ { "sleep", 2, 0x0030, 12, 0x0110, 4 },
+ { "timer", 3, 0x003c, 24, 0x0114, 4 },
+ { "fhctl", 4, 0x0054, 36, 0x0118, 4 },
+ { "ccf", 5, 0x0078, 16, 0x011c, 16 },
+ { "gpumpu", 6, 0x0088, 24, 0x012c, 4 },
+ { "fast-dvfs", 7, 0x00a0, 24, 0x0130, 24 },
+ { "ipir-c-met", 8, 0x00b8, 4, 0x0148, 16 },
+ { "ipis-c-met", 9, 0x00bc, 16, 0x0158, 4 },
+ { "brisket", 10, 0x00cc, 16, 0x015c, 16 },
+ { "ppb", 11, 0x00dc, 4, 0x016c, 4 },
+ },
+};
+
+static const struct of_device_id mtk_gpueb_mbox_of_ids[] = {
+ { .compatible = "mediatek,mt8196-gpueb-mbox", .data = &mtk_gpueb_mbox_mt8196 },
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mtk_gpueb_mbox_of_ids);
+
+static struct platform_driver mtk_gpueb_mbox_drv = {
+ .probe = mtk_gpueb_mbox_probe,
+ .driver = {
+ .name = "mtk-gpueb-mbox",
+ .of_match_table = mtk_gpueb_mbox_of_ids,
+ }
+};
+module_platform_driver(mtk_gpueb_mbox_drv);
+
+MODULE_AUTHOR("Nicolas Frattaroli <nicolas.frattaroli@collabora.com>");
+MODULE_DESCRIPTION("MediaTek GPUEB mailbox driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
index f6714c233f5a..0a00719b2482 100644
--- a/drivers/mailbox/pcc.c
+++ b/drivers/mailbox/pcc.c
@@ -306,6 +306,22 @@ static void pcc_chan_acknowledge(struct pcc_chan_info *pchan)
pcc_chan_reg_read_modify_write(&pchan->db);
}
+static void *write_response(struct pcc_chan_info *pchan)
+{
+ struct pcc_header pcc_header;
+ void *buffer;
+ int data_len;
+
+ memcpy_fromio(&pcc_header, pchan->chan.shmem,
+ sizeof(pcc_header));
+ data_len = pcc_header.length - sizeof(u32) + sizeof(struct pcc_header);
+
+ buffer = pchan->chan.rx_alloc(pchan->chan.mchan->cl, data_len);
+ if (buffer != NULL)
+ memcpy_fromio(buffer, pchan->chan.shmem, data_len);
+ return buffer;
+}
+
/**
* pcc_mbox_irq - PCC mailbox interrupt handler
* @irq: interrupt number
@@ -317,6 +333,8 @@ static irqreturn_t pcc_mbox_irq(int irq, void *p)
{
struct pcc_chan_info *pchan;
struct mbox_chan *chan = p;
+ struct pcc_header *pcc_header = chan->active_req;
+ void *handle = NULL;
pchan = chan->con_priv;
@@ -340,7 +358,17 @@ static irqreturn_t pcc_mbox_irq(int irq, void *p)
* required to avoid any possible race in updatation of this flag.
*/
pchan->chan_in_use = false;
- mbox_chan_received_data(chan, NULL);
+
+ if (pchan->chan.rx_alloc)
+ handle = write_response(pchan);
+
+ if (chan->active_req) {
+ pcc_header = chan->active_req;
+ if (pcc_header->flags & PCC_CMD_COMPLETION_NOTIFY)
+ mbox_chan_txdone(chan, 0);
+ }
+
+ mbox_chan_received_data(chan, handle);
pcc_chan_acknowledge(pchan);
@@ -384,9 +412,24 @@ pcc_mbox_request_channel(struct mbox_client *cl, int subspace_id)
pcc_mchan = &pchan->chan;
pcc_mchan->shmem = acpi_os_ioremap(pcc_mchan->shmem_base_addr,
pcc_mchan->shmem_size);
- if (pcc_mchan->shmem)
- return pcc_mchan;
+ if (!pcc_mchan->shmem)
+ goto err;
+
+ pcc_mchan->manage_writes = false;
+
+ /* This indicates that the channel is ready to accept messages.
+ * This needs to happen after the channel has registered
+ * its callback. There is no access point to do that in
+ * the mailbox API. That implies that the mailbox client must
+ * have set the allocate callback function prior to
+ * sending any messages.
+ */
+ if (pchan->type == ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE)
+ pcc_chan_reg_read_modify_write(&pchan->cmd_update);
+
+ return pcc_mchan;
+err:
mbox_free_channel(chan);
return ERR_PTR(-ENXIO);
}
@@ -417,8 +460,38 @@ void pcc_mbox_free_channel(struct pcc_mbox_chan *pchan)
}
EXPORT_SYMBOL_GPL(pcc_mbox_free_channel);
+static int pcc_write_to_buffer(struct mbox_chan *chan, void *data)
+{
+ struct pcc_chan_info *pchan = chan->con_priv;
+ struct pcc_mbox_chan *pcc_mbox_chan = &pchan->chan;
+ struct pcc_header *pcc_header = data;
+
+ if (!pchan->chan.manage_writes)
+ return 0;
+
+ /* The PCC header length includes the command field
+ * but not the other values from the header.
+ */
+ int len = pcc_header->length - sizeof(u32) + sizeof(struct pcc_header);
+ u64 val;
+
+ pcc_chan_reg_read(&pchan->cmd_complete, &val);
+ if (!val) {
+ pr_info("%s pchan->cmd_complete not set", __func__);
+ return -1;
+ }
+ memcpy_toio(pcc_mbox_chan->shmem, data, len);
+ return 0;
+}
+
+
/**
- * pcc_send_data - Called from Mailbox Controller code. Used
+ * pcc_send_data - Called from Mailbox Controller code. If
+ * pchan->chan.rx_alloc is set, then the command complete
+ * flag is checked and the data is written to the shared
+ * buffer io memory.
+ *
+ * If pchan->chan.rx_alloc is not set, then it is used
* here only to ring the channel doorbell. The PCC client
* specific read/write is done in the client driver in
* order to maintain atomicity over PCC channel once
@@ -434,17 +507,37 @@ static int pcc_send_data(struct mbox_chan *chan, void *data)
int ret;
struct pcc_chan_info *pchan = chan->con_priv;
+ ret = pcc_write_to_buffer(chan, data);
+ if (ret)
+ return ret;
+
ret = pcc_chan_reg_read_modify_write(&pchan->cmd_update);
if (ret)
return ret;
ret = pcc_chan_reg_read_modify_write(&pchan->db);
+
if (!ret && pchan->plat_irq > 0)
pchan->chan_in_use = true;
return ret;
}
+
+static bool pcc_last_tx_done(struct mbox_chan *chan)
+{
+ struct pcc_chan_info *pchan = chan->con_priv;
+ u64 val;
+
+ pcc_chan_reg_read(&pchan->cmd_complete, &val);
+ if (!val)
+ return false;
+ else
+ return true;
+}
+
+
+
/**
* pcc_startup - Called from Mailbox Controller code. Used here
* to request the interrupt.
@@ -490,6 +583,7 @@ static const struct mbox_chan_ops pcc_chan_ops = {
.send_data = pcc_send_data,
.startup = pcc_startup,
.shutdown = pcc_shutdown,
+ .last_tx_done = pcc_last_tx_done,
};
/**
diff --git a/drivers/mailbox/qcom-apcs-ipc-mailbox.c b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
index 8b24ec0fa191..d3a8f6b4a03b 100644
--- a/drivers/mailbox/qcom-apcs-ipc-mailbox.c
+++ b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
@@ -58,7 +58,6 @@ static const struct regmap_config apcs_regmap_config = {
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x1008,
- .fast_io = true,
};
static int qcom_apcs_ipc_send_data(struct mbox_chan *chan, void *data)
diff --git a/drivers/mailbox/qcom-ipcc.c b/drivers/mailbox/qcom-ipcc.c
index ea44ffb5ce1a..d957d989c0ce 100644
--- a/drivers/mailbox/qcom-ipcc.c
+++ b/drivers/mailbox/qcom-ipcc.c
@@ -312,8 +312,7 @@ static int qcom_ipcc_probe(struct platform_device *pdev)
if (!name)
return -ENOMEM;
- ipcc->irq_domain = irq_domain_create_tree(of_fwnode_handle(pdev->dev.of_node),
- &qcom_ipcc_irq_ops, ipcc);
+ ipcc->irq_domain = irq_domain_create_tree(dev_fwnode(&pdev->dev), &qcom_ipcc_irq_ops, ipcc);
if (!ipcc->irq_domain)
return -ENOMEM;
diff --git a/drivers/mailbox/riscv-sbi-mpxy-mbox.c b/drivers/mailbox/riscv-sbi-mpxy-mbox.c
new file mode 100644
index 000000000000..7c9c006b7244
--- /dev/null
+++ b/drivers/mailbox/riscv-sbi-mpxy-mbox.c
@@ -0,0 +1,1019 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * RISC-V SBI Message Proxy (MPXY) mailbox controller driver
+ *
+ * Copyright (C) 2025 Ventana Micro Systems Inc.
+ */
+
+#include <linux/acpi.h>
+#include <linux/cpu.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/irqchip/riscv-imsic.h>
+#include <linux/mailbox_controller.h>
+#include <linux/mailbox/riscv-rpmi-message.h>
+#include <linux/minmax.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_irq.h>
+#include <linux/percpu.h>
+#include <linux/platform_device.h>
+#include <linux/smp.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <asm/sbi.h>
+
+/* ====== SBI MPXY extension data structures ====== */
+
+/* SBI MPXY MSI related channel attributes */
+struct sbi_mpxy_msi_info {
+ /* Lower 32-bits of the MSI target address */
+ u32 msi_addr_lo;
+ /* Upper 32-bits of the MSI target address */
+ u32 msi_addr_hi;
+ /* MSI data value */
+ u32 msi_data;
+};
+
+/*
+ * SBI MPXY standard channel attributes.
+ *
+ * NOTE: The sequence of attribute fields are as-per the
+ * defined sequence in the attribute table in spec (or
+ * as-per the enum sbi_mpxy_attribute_id).
+ */
+struct sbi_mpxy_channel_attrs {
+ /* Message protocol ID */
+ u32 msg_proto_id;
+ /* Message protocol version */
+ u32 msg_proto_version;
+ /* Message protocol maximum message length */
+ u32 msg_max_len;
+ /* Message protocol message send timeout in microseconds */
+ u32 msg_send_timeout;
+ /* Message protocol message completion timeout in microseconds */
+ u32 msg_completion_timeout;
+ /* Bit array for channel capabilities */
+ u32 capability;
+ /* SSE event ID */
+ u32 sse_event_id;
+ /* MSI enable/disable control knob */
+ u32 msi_control;
+ /* Channel MSI info */
+ struct sbi_mpxy_msi_info msi_info;
+ /* Events state control */
+ u32 events_state_ctrl;
+};
+
+/*
+ * RPMI specific SBI MPXY channel attributes.
+ *
+ * NOTE: The sequence of attribute fields are as-per the
+ * defined sequence in the attribute table in spec (or
+ * as-per the enum sbi_mpxy_rpmi_attribute_id).
+ */
+struct sbi_mpxy_rpmi_channel_attrs {
+ /* RPMI service group ID */
+ u32 servicegroup_id;
+ /* RPMI service group version */
+ u32 servicegroup_version;
+ /* RPMI implementation ID */
+ u32 impl_id;
+ /* RPMI implementation version */
+ u32 impl_version;
+};
+
+/* SBI MPXY channel IDs data in shared memory */
+struct sbi_mpxy_channel_ids_data {
+ /* Remaining number of channel ids */
+ __le32 remaining;
+ /* Returned channel ids in current function call */
+ __le32 returned;
+ /* Returned channel id array */
+ __le32 channel_array[];
+};
+
+/* SBI MPXY notification data in shared memory */
+struct sbi_mpxy_notification_data {
+ /* Remaining number of notification events */
+ __le32 remaining;
+ /* Number of notification events returned */
+ __le32 returned;
+ /* Number of notification events lost */
+ __le32 lost;
+ /* Reserved for future use */
+ __le32 reserved;
+ /* Returned channel id array */
+ u8 events_data[];
+};
+
+/* ====== MPXY data structures & helper routines ====== */
+
+/* MPXY Per-CPU or local context */
+struct mpxy_local {
+ /* Shared memory base address */
+ void *shmem;
+ /* Shared memory physical address */
+ phys_addr_t shmem_phys_addr;
+ /* Flag representing whether shared memory is active or not */
+ bool shmem_active;
+};
+
+static DEFINE_PER_CPU(struct mpxy_local, mpxy_local);
+static unsigned long mpxy_shmem_size;
+static bool mpxy_shmem_init_done;
+
+static int mpxy_get_channel_count(u32 *channel_count)
+{
+ struct mpxy_local *mpxy = this_cpu_ptr(&mpxy_local);
+ struct sbi_mpxy_channel_ids_data *sdata = mpxy->shmem;
+ u32 remaining, returned;
+ struct sbiret sret;
+
+ if (!mpxy->shmem_active)
+ return -ENODEV;
+ if (!channel_count)
+ return -EINVAL;
+
+ get_cpu();
+
+ /* Get the remaining and returned fields to calculate total */
+ sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_GET_CHANNEL_IDS,
+ 0, 0, 0, 0, 0, 0);
+ if (sret.error)
+ goto err_put_cpu;
+
+ remaining = le32_to_cpu(sdata->remaining);
+ returned = le32_to_cpu(sdata->returned);
+ *channel_count = remaining + returned;
+
+err_put_cpu:
+ put_cpu();
+ return sbi_err_map_linux_errno(sret.error);
+}
+
+static int mpxy_get_channel_ids(u32 channel_count, u32 *channel_ids)
+{
+ struct mpxy_local *mpxy = this_cpu_ptr(&mpxy_local);
+ struct sbi_mpxy_channel_ids_data *sdata = mpxy->shmem;
+ u32 remaining, returned, count, start_index = 0;
+ struct sbiret sret;
+
+ if (!mpxy->shmem_active)
+ return -ENODEV;
+ if (!channel_count || !channel_ids)
+ return -EINVAL;
+
+ get_cpu();
+
+ do {
+ sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_GET_CHANNEL_IDS,
+ start_index, 0, 0, 0, 0, 0);
+ if (sret.error)
+ goto err_put_cpu;
+
+ remaining = le32_to_cpu(sdata->remaining);
+ returned = le32_to_cpu(sdata->returned);
+
+ count = returned < (channel_count - start_index) ?
+ returned : (channel_count - start_index);
+ memcpy_from_le32(&channel_ids[start_index], sdata->channel_array, count);
+ start_index += count;
+ } while (remaining && start_index < channel_count);
+
+err_put_cpu:
+ put_cpu();
+ return sbi_err_map_linux_errno(sret.error);
+}
+
+static int mpxy_read_attrs(u32 channel_id, u32 base_attrid, u32 attr_count,
+ u32 *attrs_buf)
+{
+ struct mpxy_local *mpxy = this_cpu_ptr(&mpxy_local);
+ struct sbiret sret;
+
+ if (!mpxy->shmem_active)
+ return -ENODEV;
+ if (!attr_count || !attrs_buf)
+ return -EINVAL;
+
+ get_cpu();
+
+ sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_READ_ATTRS,
+ channel_id, base_attrid, attr_count, 0, 0, 0);
+ if (sret.error)
+ goto err_put_cpu;
+
+ memcpy_from_le32(attrs_buf, (__le32 *)mpxy->shmem, attr_count);
+
+err_put_cpu:
+ put_cpu();
+ return sbi_err_map_linux_errno(sret.error);
+}
+
+static int mpxy_write_attrs(u32 channel_id, u32 base_attrid, u32 attr_count,
+ u32 *attrs_buf)
+{
+ struct mpxy_local *mpxy = this_cpu_ptr(&mpxy_local);
+ struct sbiret sret;
+
+ if (!mpxy->shmem_active)
+ return -ENODEV;
+ if (!attr_count || !attrs_buf)
+ return -EINVAL;
+
+ get_cpu();
+
+ memcpy_to_le32((__le32 *)mpxy->shmem, attrs_buf, attr_count);
+ sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_WRITE_ATTRS,
+ channel_id, base_attrid, attr_count, 0, 0, 0);
+
+ put_cpu();
+ return sbi_err_map_linux_errno(sret.error);
+}
+
+static int mpxy_send_message_with_resp(u32 channel_id, u32 msg_id,
+ void *tx, unsigned long tx_len,
+ void *rx, unsigned long max_rx_len,
+ unsigned long *rx_len)
+{
+ struct mpxy_local *mpxy = this_cpu_ptr(&mpxy_local);
+ unsigned long rx_bytes;
+ struct sbiret sret;
+
+ if (!mpxy->shmem_active)
+ return -ENODEV;
+ if (!tx && tx_len)
+ return -EINVAL;
+
+ get_cpu();
+
+ /* Message protocols allowed to have no data in messages */
+ if (tx_len)
+ memcpy(mpxy->shmem, tx, tx_len);
+
+ sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_SEND_MSG_WITH_RESP,
+ channel_id, msg_id, tx_len, 0, 0, 0);
+ if (rx && !sret.error) {
+ rx_bytes = sret.value;
+ if (rx_bytes > max_rx_len) {
+ put_cpu();
+ return -ENOSPC;
+ }
+
+ memcpy(rx, mpxy->shmem, rx_bytes);
+ if (rx_len)
+ *rx_len = rx_bytes;
+ }
+
+ put_cpu();
+ return sbi_err_map_linux_errno(sret.error);
+}
+
+static int mpxy_send_message_without_resp(u32 channel_id, u32 msg_id,
+ void *tx, unsigned long tx_len)
+{
+ struct mpxy_local *mpxy = this_cpu_ptr(&mpxy_local);
+ struct sbiret sret;
+
+ if (!mpxy->shmem_active)
+ return -ENODEV;
+ if (!tx && tx_len)
+ return -EINVAL;
+
+ get_cpu();
+
+ /* Message protocols allowed to have no data in messages */
+ if (tx_len)
+ memcpy(mpxy->shmem, tx, tx_len);
+
+ sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_SEND_MSG_WITHOUT_RESP,
+ channel_id, msg_id, tx_len, 0, 0, 0);
+
+ put_cpu();
+ return sbi_err_map_linux_errno(sret.error);
+}
+
+static int mpxy_get_notifications(u32 channel_id,
+ struct sbi_mpxy_notification_data *notif_data,
+ unsigned long *events_data_len)
+{
+ struct mpxy_local *mpxy = this_cpu_ptr(&mpxy_local);
+ struct sbiret sret;
+
+ if (!mpxy->shmem_active)
+ return -ENODEV;
+ if (!notif_data || !events_data_len)
+ return -EINVAL;
+
+ get_cpu();
+
+ sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_GET_NOTIFICATION_EVENTS,
+ channel_id, 0, 0, 0, 0, 0);
+ if (sret.error)
+ goto err_put_cpu;
+
+ memcpy(notif_data, mpxy->shmem, sret.value + 16);
+ *events_data_len = sret.value;
+
+err_put_cpu:
+ put_cpu();
+ return sbi_err_map_linux_errno(sret.error);
+}
+
+static int mpxy_get_shmem_size(unsigned long *shmem_size)
+{
+ struct sbiret sret;
+
+ sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_GET_SHMEM_SIZE,
+ 0, 0, 0, 0, 0, 0);
+ if (sret.error)
+ return sbi_err_map_linux_errno(sret.error);
+ if (shmem_size)
+ *shmem_size = sret.value;
+ return 0;
+}
+
+static int mpxy_setup_shmem(unsigned int cpu)
+{
+ struct page *shmem_page;
+ struct mpxy_local *mpxy;
+ struct sbiret sret;
+
+ mpxy = per_cpu_ptr(&mpxy_local, cpu);
+ if (mpxy->shmem_active)
+ return 0;
+
+ shmem_page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(mpxy_shmem_size));
+ if (!shmem_page)
+ return -ENOMEM;
+
+ /*
+ * Linux setup of shmem is done in mpxy OVERWRITE mode.
+ * flags[1:0] = 00b
+ */
+ sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_SET_SHMEM,
+ page_to_phys(shmem_page), 0, 0, 0, 0, 0);
+ if (sret.error) {
+ free_pages((unsigned long)page_to_virt(shmem_page),
+ get_order(mpxy_shmem_size));
+ return sbi_err_map_linux_errno(sret.error);
+ }
+
+ mpxy->shmem = page_to_virt(shmem_page);
+ mpxy->shmem_phys_addr = page_to_phys(shmem_page);
+ mpxy->shmem_active = true;
+
+ return 0;
+}
+
+/* ====== MPXY mailbox data structures ====== */
+
+/* MPXY mailbox channel */
+struct mpxy_mbox_channel {
+ struct mpxy_mbox *mbox;
+ u32 channel_id;
+ struct sbi_mpxy_channel_attrs attrs;
+ struct sbi_mpxy_rpmi_channel_attrs rpmi_attrs;
+ struct sbi_mpxy_notification_data *notif;
+ u32 max_xfer_len;
+ bool have_events_state;
+ u32 msi_index;
+ u32 msi_irq;
+ bool started;
+};
+
+/* MPXY mailbox */
+struct mpxy_mbox {
+ struct device *dev;
+ u32 channel_count;
+ struct mpxy_mbox_channel *channels;
+ u32 msi_count;
+ struct mpxy_mbox_channel **msi_index_to_channel;
+ struct mbox_controller controller;
+};
+
+/* ====== MPXY RPMI processing ====== */
+
+static void mpxy_mbox_send_rpmi_data(struct mpxy_mbox_channel *mchan,
+ struct rpmi_mbox_message *msg)
+{
+ msg->error = 0;
+ switch (msg->type) {
+ case RPMI_MBOX_MSG_TYPE_GET_ATTRIBUTE:
+ switch (msg->attr.id) {
+ case RPMI_MBOX_ATTR_SPEC_VERSION:
+ msg->attr.value = mchan->attrs.msg_proto_version;
+ break;
+ case RPMI_MBOX_ATTR_MAX_MSG_DATA_SIZE:
+ msg->attr.value = mchan->max_xfer_len;
+ break;
+ case RPMI_MBOX_ATTR_SERVICEGROUP_ID:
+ msg->attr.value = mchan->rpmi_attrs.servicegroup_id;
+ break;
+ case RPMI_MBOX_ATTR_SERVICEGROUP_VERSION:
+ msg->attr.value = mchan->rpmi_attrs.servicegroup_version;
+ break;
+ case RPMI_MBOX_ATTR_IMPL_ID:
+ msg->attr.value = mchan->rpmi_attrs.impl_id;
+ break;
+ case RPMI_MBOX_ATTR_IMPL_VERSION:
+ msg->attr.value = mchan->rpmi_attrs.impl_version;
+ break;
+ default:
+ msg->error = -EOPNOTSUPP;
+ break;
+ }
+ break;
+ case RPMI_MBOX_MSG_TYPE_SET_ATTRIBUTE:
+ /* None of the RPMI linux mailbox attributes are writeable */
+ msg->error = -EOPNOTSUPP;
+ break;
+ case RPMI_MBOX_MSG_TYPE_SEND_WITH_RESPONSE:
+ if ((!msg->data.request && msg->data.request_len) ||
+ (msg->data.request && msg->data.request_len > mchan->max_xfer_len) ||
+ (!msg->data.response && msg->data.max_response_len)) {
+ msg->error = -EINVAL;
+ break;
+ }
+ if (!(mchan->attrs.capability & SBI_MPXY_CHAN_CAP_SEND_WITH_RESP)) {
+ msg->error = -EIO;
+ break;
+ }
+ msg->error = mpxy_send_message_with_resp(mchan->channel_id,
+ msg->data.service_id,
+ msg->data.request,
+ msg->data.request_len,
+ msg->data.response,
+ msg->data.max_response_len,
+ &msg->data.out_response_len);
+ break;
+ case RPMI_MBOX_MSG_TYPE_SEND_WITHOUT_RESPONSE:
+ if ((!msg->data.request && msg->data.request_len) ||
+ (msg->data.request && msg->data.request_len > mchan->max_xfer_len)) {
+ msg->error = -EINVAL;
+ break;
+ }
+ if (!(mchan->attrs.capability & SBI_MPXY_CHAN_CAP_SEND_WITHOUT_RESP)) {
+ msg->error = -EIO;
+ break;
+ }
+ msg->error = mpxy_send_message_without_resp(mchan->channel_id,
+ msg->data.service_id,
+ msg->data.request,
+ msg->data.request_len);
+ break;
+ default:
+ msg->error = -EOPNOTSUPP;
+ break;
+ }
+}
+
+static void mpxy_mbox_peek_rpmi_data(struct mbox_chan *chan,
+ struct mpxy_mbox_channel *mchan,
+ struct sbi_mpxy_notification_data *notif,
+ unsigned long events_data_len)
+{
+ struct rpmi_notification_event *event;
+ struct rpmi_mbox_message msg;
+ unsigned long pos = 0;
+
+ while (pos < events_data_len && (events_data_len - pos) <= sizeof(*event)) {
+ event = (struct rpmi_notification_event *)(notif->events_data + pos);
+
+ msg.type = RPMI_MBOX_MSG_TYPE_NOTIFICATION_EVENT;
+ msg.notif.event_datalen = le16_to_cpu(event->event_datalen);
+ msg.notif.event_id = event->event_id;
+ msg.notif.event_data = event->event_data;
+ msg.error = 0;
+
+ mbox_chan_received_data(chan, &msg);
+ pos += sizeof(*event) + msg.notif.event_datalen;
+ }
+}
+
+static int mpxy_mbox_read_rpmi_attrs(struct mpxy_mbox_channel *mchan)
+{
+ return mpxy_read_attrs(mchan->channel_id,
+ SBI_MPXY_ATTR_MSGPROTO_ATTR_START,
+ sizeof(mchan->rpmi_attrs) / sizeof(u32),
+ (u32 *)&mchan->rpmi_attrs);
+}
+
+/* ====== MPXY mailbox callbacks ====== */
+
+static int mpxy_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct mpxy_mbox_channel *mchan = chan->con_priv;
+
+ if (mchan->attrs.msg_proto_id == SBI_MPXY_MSGPROTO_RPMI_ID) {
+ mpxy_mbox_send_rpmi_data(mchan, data);
+ return 0;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static bool mpxy_mbox_peek_data(struct mbox_chan *chan)
+{
+ struct mpxy_mbox_channel *mchan = chan->con_priv;
+ struct sbi_mpxy_notification_data *notif = mchan->notif;
+ bool have_notifications = false;
+ unsigned long data_len;
+ int rc;
+
+ if (!(mchan->attrs.capability & SBI_MPXY_CHAN_CAP_GET_NOTIFICATIONS))
+ return false;
+
+ do {
+ rc = mpxy_get_notifications(mchan->channel_id, notif, &data_len);
+ if (rc || !data_len)
+ break;
+
+ if (mchan->attrs.msg_proto_id == SBI_MPXY_MSGPROTO_RPMI_ID)
+ mpxy_mbox_peek_rpmi_data(chan, mchan, notif, data_len);
+
+ have_notifications = true;
+ } while (1);
+
+ return have_notifications;
+}
+
+static irqreturn_t mpxy_mbox_irq_thread(int irq, void *dev_id)
+{
+ mpxy_mbox_peek_data(dev_id);
+ return IRQ_HANDLED;
+}
+
+static int mpxy_mbox_setup_msi(struct mbox_chan *chan,
+ struct mpxy_mbox_channel *mchan)
+{
+ struct device *dev = mchan->mbox->dev;
+ int rc;
+
+ /* Do nothing if MSI not supported */
+ if (mchan->msi_irq == U32_MAX)
+ return 0;
+
+ /* Fail if MSI already enabled */
+ if (mchan->attrs.msi_control)
+ return -EALREADY;
+
+ /* Request channel MSI handler */
+ rc = request_threaded_irq(mchan->msi_irq, NULL, mpxy_mbox_irq_thread,
+ 0, dev_name(dev), chan);
+ if (rc) {
+ dev_err(dev, "failed to request MPXY channel 0x%x IRQ\n",
+ mchan->channel_id);
+ return rc;
+ }
+
+ /* Enable channel MSI control */
+ mchan->attrs.msi_control = 1;
+ rc = mpxy_write_attrs(mchan->channel_id, SBI_MPXY_ATTR_MSI_CONTROL,
+ 1, &mchan->attrs.msi_control);
+ if (rc) {
+ dev_err(dev, "enable MSI control failed for MPXY channel 0x%x\n",
+ mchan->channel_id);
+ mchan->attrs.msi_control = 0;
+ free_irq(mchan->msi_irq, chan);
+ return rc;
+ }
+
+ return 0;
+}
+
+static void mpxy_mbox_cleanup_msi(struct mbox_chan *chan,
+ struct mpxy_mbox_channel *mchan)
+{
+ struct device *dev = mchan->mbox->dev;
+ int rc;
+
+ /* Do nothing if MSI not supported */
+ if (mchan->msi_irq == U32_MAX)
+ return;
+
+ /* Do nothing if MSI already disabled */
+ if (!mchan->attrs.msi_control)
+ return;
+
+ /* Disable channel MSI control */
+ mchan->attrs.msi_control = 0;
+ rc = mpxy_write_attrs(mchan->channel_id, SBI_MPXY_ATTR_MSI_CONTROL,
+ 1, &mchan->attrs.msi_control);
+ if (rc) {
+ dev_err(dev, "disable MSI control failed for MPXY channel 0x%x\n",
+ mchan->channel_id);
+ }
+
+ /* Free channel MSI handler */
+ free_irq(mchan->msi_irq, chan);
+}
+
+static int mpxy_mbox_setup_events(struct mpxy_mbox_channel *mchan)
+{
+ struct device *dev = mchan->mbox->dev;
+ int rc;
+
+ /* Do nothing if events state not supported */
+ if (!mchan->have_events_state)
+ return 0;
+
+ /* Fail if events state already enabled */
+ if (mchan->attrs.events_state_ctrl)
+ return -EALREADY;
+
+ /* Enable channel events state */
+ mchan->attrs.events_state_ctrl = 1;
+ rc = mpxy_write_attrs(mchan->channel_id, SBI_MPXY_ATTR_EVENTS_STATE_CONTROL,
+ 1, &mchan->attrs.events_state_ctrl);
+ if (rc) {
+ dev_err(dev, "enable events state failed for MPXY channel 0x%x\n",
+ mchan->channel_id);
+ mchan->attrs.events_state_ctrl = 0;
+ return rc;
+ }
+
+ return 0;
+}
+
+static void mpxy_mbox_cleanup_events(struct mpxy_mbox_channel *mchan)
+{
+ struct device *dev = mchan->mbox->dev;
+ int rc;
+
+ /* Do nothing if events state not supported */
+ if (!mchan->have_events_state)
+ return;
+
+ /* Do nothing if events state already disabled */
+ if (!mchan->attrs.events_state_ctrl)
+ return;
+
+ /* Disable channel events state */
+ mchan->attrs.events_state_ctrl = 0;
+ rc = mpxy_write_attrs(mchan->channel_id, SBI_MPXY_ATTR_EVENTS_STATE_CONTROL,
+ 1, &mchan->attrs.events_state_ctrl);
+ if (rc)
+ dev_err(dev, "disable events state failed for MPXY channel 0x%x\n",
+ mchan->channel_id);
+}
+
+static int mpxy_mbox_startup(struct mbox_chan *chan)
+{
+ struct mpxy_mbox_channel *mchan = chan->con_priv;
+ int rc;
+
+ if (mchan->started)
+ return -EALREADY;
+
+ /* Setup channel MSI */
+ rc = mpxy_mbox_setup_msi(chan, mchan);
+ if (rc)
+ return rc;
+
+ /* Setup channel notification events */
+ rc = mpxy_mbox_setup_events(mchan);
+ if (rc) {
+ mpxy_mbox_cleanup_msi(chan, mchan);
+ return rc;
+ }
+
+ /* Mark the channel as started */
+ mchan->started = true;
+
+ return 0;
+}
+
+static void mpxy_mbox_shutdown(struct mbox_chan *chan)
+{
+ struct mpxy_mbox_channel *mchan = chan->con_priv;
+
+ if (!mchan->started)
+ return;
+
+ /* Mark the channel as stopped */
+ mchan->started = false;
+
+ /* Cleanup channel notification events */
+ mpxy_mbox_cleanup_events(mchan);
+
+ /* Cleanup channel MSI */
+ mpxy_mbox_cleanup_msi(chan, mchan);
+}
+
+static const struct mbox_chan_ops mpxy_mbox_ops = {
+ .send_data = mpxy_mbox_send_data,
+ .peek_data = mpxy_mbox_peek_data,
+ .startup = mpxy_mbox_startup,
+ .shutdown = mpxy_mbox_shutdown,
+};
+
+/* ====== MPXY platform driver ===== */
+
+static void mpxy_mbox_msi_write(struct msi_desc *desc, struct msi_msg *msg)
+{
+ struct device *dev = msi_desc_to_dev(desc);
+ struct mpxy_mbox *mbox = dev_get_drvdata(dev);
+ struct mpxy_mbox_channel *mchan;
+ struct sbi_mpxy_msi_info *minfo;
+ int rc;
+
+ mchan = mbox->msi_index_to_channel[desc->msi_index];
+ if (!mchan) {
+ dev_warn(dev, "MPXY channel not available for MSI index %d\n",
+ desc->msi_index);
+ return;
+ }
+
+ minfo = &mchan->attrs.msi_info;
+ minfo->msi_addr_lo = msg->address_lo;
+ minfo->msi_addr_hi = msg->address_hi;
+ minfo->msi_data = msg->data;
+
+ rc = mpxy_write_attrs(mchan->channel_id, SBI_MPXY_ATTR_MSI_ADDR_LO,
+ sizeof(*minfo) / sizeof(u32), (u32 *)minfo);
+ if (rc) {
+ dev_warn(dev, "failed to write MSI info for MPXY channel 0x%x\n",
+ mchan->channel_id);
+ }
+}
+
+static struct mbox_chan *mpxy_mbox_fw_xlate(struct mbox_controller *ctlr,
+ const struct fwnode_reference_args *pa)
+{
+ struct mpxy_mbox *mbox = container_of(ctlr, struct mpxy_mbox, controller);
+ struct mpxy_mbox_channel *mchan;
+ u32 i;
+
+ if (pa->nargs != 2)
+ return ERR_PTR(-EINVAL);
+
+ for (i = 0; i < mbox->channel_count; i++) {
+ mchan = &mbox->channels[i];
+ if (mchan->channel_id == pa->args[0] &&
+ mchan->attrs.msg_proto_id == pa->args[1])
+ return &mbox->controller.chans[i];
+ }
+
+ return ERR_PTR(-ENOENT);
+}
+
+static int mpxy_mbox_populate_channels(struct mpxy_mbox *mbox)
+{
+ u32 i, *channel_ids __free(kfree) = NULL;
+ struct mpxy_mbox_channel *mchan;
+ int rc;
+
+ /* Find-out of number of channels */
+ rc = mpxy_get_channel_count(&mbox->channel_count);
+ if (rc)
+ return dev_err_probe(mbox->dev, rc, "failed to get number of MPXY channels\n");
+ if (!mbox->channel_count)
+ return dev_err_probe(mbox->dev, -ENODEV, "no MPXY channels available\n");
+
+ /* Allocate and fetch all channel IDs */
+ channel_ids = kcalloc(mbox->channel_count, sizeof(*channel_ids), GFP_KERNEL);
+ if (!channel_ids)
+ return -ENOMEM;
+ rc = mpxy_get_channel_ids(mbox->channel_count, channel_ids);
+ if (rc)
+ return dev_err_probe(mbox->dev, rc, "failed to get MPXY channel IDs\n");
+
+ /* Populate all channels */
+ mbox->channels = devm_kcalloc(mbox->dev, mbox->channel_count,
+ sizeof(*mbox->channels), GFP_KERNEL);
+ if (!mbox->channels)
+ return -ENOMEM;
+ for (i = 0; i < mbox->channel_count; i++) {
+ mchan = &mbox->channels[i];
+ mchan->mbox = mbox;
+ mchan->channel_id = channel_ids[i];
+
+ rc = mpxy_read_attrs(mchan->channel_id, SBI_MPXY_ATTR_MSG_PROT_ID,
+ sizeof(mchan->attrs) / sizeof(u32),
+ (u32 *)&mchan->attrs);
+ if (rc) {
+ return dev_err_probe(mbox->dev, rc,
+ "MPXY channel 0x%x read attrs failed\n",
+ mchan->channel_id);
+ }
+
+ if (mchan->attrs.msg_proto_id == SBI_MPXY_MSGPROTO_RPMI_ID) {
+ rc = mpxy_mbox_read_rpmi_attrs(mchan);
+ if (rc) {
+ return dev_err_probe(mbox->dev, rc,
+ "MPXY channel 0x%x read RPMI attrs failed\n",
+ mchan->channel_id);
+ }
+ }
+
+ mchan->notif = devm_kzalloc(mbox->dev, mpxy_shmem_size, GFP_KERNEL);
+ if (!mchan->notif)
+ return -ENOMEM;
+
+ mchan->max_xfer_len = min(mpxy_shmem_size, mchan->attrs.msg_max_len);
+
+ if ((mchan->attrs.capability & SBI_MPXY_CHAN_CAP_GET_NOTIFICATIONS) &&
+ (mchan->attrs.capability & SBI_MPXY_CHAN_CAP_EVENTS_STATE))
+ mchan->have_events_state = true;
+
+ if ((mchan->attrs.capability & SBI_MPXY_CHAN_CAP_GET_NOTIFICATIONS) &&
+ (mchan->attrs.capability & SBI_MPXY_CHAN_CAP_MSI))
+ mchan->msi_index = mbox->msi_count++;
+ else
+ mchan->msi_index = U32_MAX;
+ mchan->msi_irq = U32_MAX;
+ }
+
+ return 0;
+}
+
+static int mpxy_mbox_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mpxy_mbox_channel *mchan;
+ struct mpxy_mbox *mbox;
+ int msi_idx, rc;
+ u32 i;
+
+ /*
+ * Initialize MPXY shared memory only once. This also ensures
+ * that SBI MPXY mailbox is probed only once.
+ */
+ if (mpxy_shmem_init_done) {
+ dev_err(dev, "SBI MPXY mailbox already initialized\n");
+ return -EALREADY;
+ }
+
+ /* Probe for SBI MPXY extension */
+ if (sbi_spec_version < sbi_mk_version(1, 0) ||
+ sbi_probe_extension(SBI_EXT_MPXY) <= 0) {
+ dev_info(dev, "SBI MPXY extension not available\n");
+ return -ENODEV;
+ }
+
+ /* Find-out shared memory size */
+ rc = mpxy_get_shmem_size(&mpxy_shmem_size);
+ if (rc)
+ return dev_err_probe(dev, rc, "failed to get MPXY shared memory size\n");
+
+ /*
+ * Setup MPXY shared memory on each CPU
+ *
+ * Note: Don't cleanup MPXY shared memory upon CPU power-down
+ * because the RPMI System MSI irqchip driver needs it to be
+ * available when migrating IRQs in CPU power-down path.
+ */
+ cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "riscv/sbi-mpxy-shmem",
+ mpxy_setup_shmem, NULL);
+
+ /* Mark as MPXY shared memory initialization done */
+ mpxy_shmem_init_done = true;
+
+ /* Allocate mailbox instance */
+ mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+ mbox->dev = dev;
+ platform_set_drvdata(pdev, mbox);
+
+ /* Populate mailbox channels */
+ rc = mpxy_mbox_populate_channels(mbox);
+ if (rc)
+ return rc;
+
+ /* Initialize mailbox controller */
+ mbox->controller.txdone_irq = false;
+ mbox->controller.txdone_poll = false;
+ mbox->controller.ops = &mpxy_mbox_ops;
+ mbox->controller.dev = dev;
+ mbox->controller.num_chans = mbox->channel_count;
+ mbox->controller.fw_xlate = mpxy_mbox_fw_xlate;
+ mbox->controller.chans = devm_kcalloc(dev, mbox->channel_count,
+ sizeof(*mbox->controller.chans),
+ GFP_KERNEL);
+ if (!mbox->controller.chans)
+ return -ENOMEM;
+ for (i = 0; i < mbox->channel_count; i++)
+ mbox->controller.chans[i].con_priv = &mbox->channels[i];
+
+ /* Setup MSIs for mailbox (if required) */
+ if (mbox->msi_count) {
+ /*
+ * The device MSI domain for platform devices on RISC-V architecture
+ * is only available after the MSI controller driver is probed so,
+ * explicitly configure here.
+ */
+ if (!dev_get_msi_domain(dev)) {
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
+
+ /*
+ * The device MSI domain for OF devices is only set at the
+ * time of populating/creating OF device. If the device MSI
+ * domain is discovered later after the OF device is created
+ * then we need to set it explicitly before using any platform
+ * MSI functions.
+ */
+ if (is_of_node(fwnode)) {
+ of_msi_configure(dev, dev_of_node(dev));
+ } else if (is_acpi_device_node(fwnode)) {
+ struct irq_domain *msi_domain;
+
+ msi_domain = irq_find_matching_fwnode(imsic_acpi_get_fwnode(dev),
+ DOMAIN_BUS_PLATFORM_MSI);
+ dev_set_msi_domain(dev, msi_domain);
+ }
+
+ if (!dev_get_msi_domain(dev))
+ return -EPROBE_DEFER;
+ }
+
+ mbox->msi_index_to_channel = devm_kcalloc(dev, mbox->msi_count,
+ sizeof(*mbox->msi_index_to_channel),
+ GFP_KERNEL);
+ if (!mbox->msi_index_to_channel)
+ return -ENOMEM;
+
+ for (msi_idx = 0; msi_idx < mbox->msi_count; msi_idx++) {
+ for (i = 0; i < mbox->channel_count; i++) {
+ mchan = &mbox->channels[i];
+ if (mchan->msi_index == msi_idx) {
+ mbox->msi_index_to_channel[msi_idx] = mchan;
+ break;
+ }
+ }
+ }
+
+ rc = platform_device_msi_init_and_alloc_irqs(dev, mbox->msi_count,
+ mpxy_mbox_msi_write);
+ if (rc) {
+ return dev_err_probe(dev, rc, "Failed to allocate %d MSIs\n",
+ mbox->msi_count);
+ }
+
+ for (i = 0; i < mbox->channel_count; i++) {
+ mchan = &mbox->channels[i];
+ if (mchan->msi_index == U32_MAX)
+ continue;
+ mchan->msi_irq = msi_get_virq(dev, mchan->msi_index);
+ }
+ }
+
+ /* Register mailbox controller */
+ rc = devm_mbox_controller_register(dev, &mbox->controller);
+ if (rc) {
+ dev_err_probe(dev, rc, "Registering SBI MPXY mailbox failed\n");
+ if (mbox->msi_count)
+ platform_device_msi_free_irqs_all(dev);
+ return rc;
+ }
+
+#ifdef CONFIG_ACPI
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+
+ if (adev)
+ acpi_dev_clear_dependencies(adev);
+#endif
+
+ dev_info(dev, "mailbox registered with %d channels\n",
+ mbox->channel_count);
+ return 0;
+}
+
+static void mpxy_mbox_remove(struct platform_device *pdev)
+{
+ struct mpxy_mbox *mbox = platform_get_drvdata(pdev);
+
+ if (mbox->msi_count)
+ platform_device_msi_free_irqs_all(mbox->dev);
+}
+
+static const struct of_device_id mpxy_mbox_of_match[] = {
+ { .compatible = "riscv,sbi-mpxy-mbox" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mpxy_mbox_of_match);
+
+static const struct acpi_device_id mpxy_mbox_acpi_match[] = {
+ { "RSCV0005" },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, mpxy_mbox_acpi_match);
+
+static struct platform_driver mpxy_mbox_driver = {
+ .driver = {
+ .name = "riscv-sbi-mpxy-mbox",
+ .of_match_table = mpxy_mbox_of_match,
+ .acpi_match_table = mpxy_mbox_acpi_match,
+ },
+ .probe = mpxy_mbox_probe,
+ .remove = mpxy_mbox_remove,
+};
+module_platform_driver(mpxy_mbox_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Anup Patel <apatel@ventanamicro.com>");
+MODULE_DESCRIPTION("RISC-V SBI MPXY mailbox controller driver");
diff --git a/drivers/mailbox/zynqmp-ipi-mailbox.c b/drivers/mailbox/zynqmp-ipi-mailbox.c
index 0c143beaafda..967967b2b8a9 100644
--- a/drivers/mailbox/zynqmp-ipi-mailbox.c
+++ b/drivers/mailbox/zynqmp-ipi-mailbox.c
@@ -62,7 +62,8 @@
#define DST_BIT_POS 9U
#define SRC_BITMASK GENMASK(11, 8)
-#define MAX_SGI 16
+/* Macro to represent SGI type for IPI IRQs */
+#define IPI_IRQ_TYPE_SGI 2
/*
* Module parameters
@@ -121,6 +122,7 @@ struct zynqmp_ipi_mbox {
* @dev: device pointer corresponding to the Xilinx ZynqMP
* IPI agent
* @irq: IPI agent interrupt ID
+ * @irq_type: IPI SGI or SPI IRQ type
* @method: IPI SMC or HVC is going to be used
* @local_id: local IPI agent ID
* @virq_sgi: IRQ number mapped to SGI
@@ -130,6 +132,7 @@ struct zynqmp_ipi_mbox {
struct zynqmp_ipi_pdata {
struct device *dev;
int irq;
+ unsigned int irq_type;
unsigned int method;
u32 local_id;
int virq_sgi;
@@ -887,17 +890,14 @@ static void zynqmp_ipi_free_mboxes(struct zynqmp_ipi_pdata *pdata)
struct zynqmp_ipi_mbox *ipi_mbox;
int i;
- if (pdata->irq < MAX_SGI)
+ if (pdata->irq_type == IPI_IRQ_TYPE_SGI)
xlnx_mbox_cleanup_sgi(pdata);
- i = pdata->num_mboxes;
+ i = pdata->num_mboxes - 1;
for (; i >= 0; i--) {
ipi_mbox = &pdata->ipi_mboxes[i];
- if (ipi_mbox->dev.parent) {
- mbox_controller_unregister(&ipi_mbox->mbox);
- if (device_is_registered(&ipi_mbox->dev))
- device_unregister(&ipi_mbox->dev);
- }
+ if (device_is_registered(&ipi_mbox->dev))
+ device_unregister(&ipi_mbox->dev);
}
}
@@ -959,14 +959,16 @@ static int zynqmp_ipi_probe(struct platform_device *pdev)
dev_err(dev, "failed to parse interrupts\n");
goto free_mbox_dev;
}
- ret = out_irq.args[1];
+
+ /* Use interrupt type to distinguish SGI and SPI interrupts */
+ pdata->irq_type = out_irq.args[0];
/*
* If Interrupt number is in SGI range, then request SGI else request
* IPI system IRQ.
*/
- if (ret < MAX_SGI) {
- pdata->irq = ret;
+ if (pdata->irq_type == IPI_IRQ_TYPE_SGI) {
+ pdata->irq = out_irq.args[1];
ret = xlnx_mbox_init_sgi(pdev, pdata->irq, pdata);
if (ret)
goto free_mbox_dev;
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index ddb37f6670de..104aa5355090 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -37,6 +37,32 @@ config BLK_DEV_MD
If unsure, say N.
+config MD_BITMAP
+ bool "MD RAID bitmap support"
+ default y
+ depends on BLK_DEV_MD
+ help
+ If you say Y here, support for the write intent bitmap will be
+ enabled. The bitmap can be used to optimize resync speed after power
+ failure or readding a disk, limiting it to recorded dirty sectors in
+ bitmap.
+
+ This feature can be added to existing MD array or MD array can be
+ created with bitmap via mdadm(8).
+
+ If unsure, say Y.
+
+config MD_LLBITMAP
+ bool "MD RAID lockless bitmap support"
+ depends on BLK_DEV_MD
+ help
+ If you say Y here, support for the lockless write intent bitmap will
+ be enabled.
+
+ Note, this is an experimental feature.
+
+ If unsure, say N.
+
config MD_AUTODETECT
bool "Autodetect RAID arrays during kernel boot"
depends on BLK_DEV_MD=y
@@ -54,6 +80,7 @@ config MD_AUTODETECT
config MD_BITMAP_FILE
bool "MD bitmap file support (deprecated)"
default y
+ depends on MD_BITMAP
help
If you say Y here, support for write intent bitmaps in files on an
external file system is enabled. This is an alternative to the internal
@@ -174,6 +201,7 @@ config MD_RAID456
config MD_CLUSTER
tristate "Cluster Support for MD"
+ select MD_BITMAP
depends on BLK_DEV_MD
depends on DLM
default n
@@ -393,6 +421,7 @@ config DM_RAID
select MD_RAID1
select MD_RAID10
select MD_RAID456
+ select MD_BITMAP
select BLK_DEV_MD
help
A dm target that supports RAID1, RAID10, RAID4, RAID5 and RAID6 mappings
@@ -659,4 +688,6 @@ config DM_AUDIT
source "drivers/md/dm-vdo/Kconfig"
+source "drivers/md/dm-pcache/Kconfig"
+
endif # MD
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index 87bdfc9fe14c..c338cc6fbe2e 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -27,7 +27,9 @@ dm-clone-y += dm-clone-target.o dm-clone-metadata.o
dm-verity-y += dm-verity-target.o
dm-zoned-y += dm-zoned-target.o dm-zoned-metadata.o dm-zoned-reclaim.o
-md-mod-y += md.o md-bitmap.o
+md-mod-y += md.o
+md-mod-$(CONFIG_MD_BITMAP) += md-bitmap.o
+md-mod-$(CONFIG_MD_LLBITMAP) += md-llbitmap.o
raid456-y += raid5.o raid5-cache.o raid5-ppl.o
linear-y += md-linear.o
@@ -71,6 +73,7 @@ obj-$(CONFIG_DM_RAID) += dm-raid.o
obj-$(CONFIG_DM_THIN_PROVISIONING) += dm-thin-pool.o
obj-$(CONFIG_DM_VERITY) += dm-verity.o
obj-$(CONFIG_DM_VDO) += dm-vdo/
+obj-$(CONFIG_DM_PCACHE) += dm-pcache/
obj-$(CONFIG_DM_CACHE) += dm-cache.o
obj-$(CONFIG_DM_CACHE_SMQ) += dm-cache-smq.o
obj-$(CONFIG_DM_EBS) += dm-ebs.o
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
index 7510d1c983a5..f327456fc4e0 100644
--- a/drivers/md/bcache/debug.c
+++ b/drivers/md/bcache/debug.c
@@ -115,8 +115,7 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
check = bio_kmalloc(nr_segs, GFP_NOIO);
if (!check)
return;
- bio_init(check, bio->bi_bdev, check->bi_inline_vecs, nr_segs,
- REQ_OP_READ);
+ bio_init_inline(check, bio->bi_bdev, nr_segs, REQ_OP_READ);
check->bi_iter.bi_sector = bio->bi_iter.bi_sector;
check->bi_iter.bi_size = bio->bi_iter.bi_size;
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index 020712c5203f..2386d08bf4e4 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -26,8 +26,7 @@ struct bio *bch_bbio_alloc(struct cache_set *c)
struct bbio *b = mempool_alloc(&c->bio_meta, GFP_NOIO);
struct bio *bio = &b->bio;
- bio_init(bio, NULL, bio->bi_inline_vecs,
- meta_bucket_pages(&c->cache->sb), 0);
+ bio_init_inline(bio, NULL, meta_bucket_pages(&c->cache->sb), 0);
return bio;
}
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 7ff14bd2feb8..d50eb82ccb4f 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -615,7 +615,7 @@ static void do_journal_discard(struct cache *ca)
atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT);
- bio_init(bio, ca->bdev, bio->bi_inline_vecs, 1, REQ_OP_DISCARD);
+ bio_init_inline(bio, ca->bdev, 1, REQ_OP_DISCARD);
bio->bi_iter.bi_sector = bucket_to_sector(ca->set,
ca->sb.d[ja->discard_idx]);
bio->bi_iter.bi_size = bucket_bytes(ca);
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index 26a6a535ec32..73918e55bf04 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -79,7 +79,7 @@ static void moving_init(struct moving_io *io)
{
struct bio *bio = &io->bio.bio;
- bio_init(bio, NULL, bio->bi_inline_vecs,
+ bio_init_inline(bio, NULL,
DIV_ROUND_UP(KEY_SIZE(&io->w->key), PAGE_SECTORS), 0);
bio_get(bio);
bio->bi_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0);
@@ -145,9 +145,9 @@ static void read_moving(struct cache_set *c)
continue;
}
- io = kzalloc(struct_size(io, bio.bio.bi_inline_vecs,
- DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS)),
- GFP_KERNEL);
+ io = kzalloc(sizeof(*io) + sizeof(struct bio_vec) *
+ DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
+ GFP_KERNEL);
if (!io)
goto err;
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 1492c8552255..6d250e366412 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -2236,7 +2236,7 @@ static int cache_alloc(struct cache *ca)
__module_get(THIS_MODULE);
kobject_init(&ca->kobj, &bch_cache_ktype);
- bio_init(&ca->journal.bio, NULL, ca->journal.bio.bi_inline_vecs, 8, 0);
+ bio_init_inline(&ca->journal.bio, NULL, 8, 0);
/*
* When the cache disk is first registered, ca->sb.njournal_buckets
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 302e75f1fc4b..6ba73dc1a3df 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -331,7 +331,7 @@ static void dirty_init(struct keybuf_key *w)
struct dirty_io *io = w->private;
struct bio *bio = &io->bio;
- bio_init(bio, NULL, bio->bi_inline_vecs,
+ bio_init_inline(bio, NULL,
DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS), 0);
if (!io->dc->writeback_percent)
bio->bi_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0);
@@ -536,9 +536,9 @@ static void read_dirty(struct cached_dev *dc)
for (i = 0; i < nk; i++) {
w = keys[i];
- io = kzalloc(struct_size(io, bio.bi_inline_vecs,
- DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS)),
- GFP_KERNEL);
+ io = kzalloc(sizeof(*io) + sizeof(struct bio_vec) *
+ DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
+ GFP_KERNEL);
if (!io)
goto err;
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index ff7595caf440..e6d28be11c5c 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1337,12 +1337,12 @@ static void use_bio(struct dm_buffer *b, enum req_op op, sector_t sector,
char *ptr;
unsigned int len;
- bio = bio_kmalloc(1, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOWARN);
+ bio = bio_kmalloc(1, GFP_NOWAIT);
if (!bio) {
use_dmio(b, op, sector, n_sectors, offset, ioprio);
return;
}
- bio_init(bio, b->c->bdev, bio->bi_inline_vecs, 1, op);
+ bio_init_inline(bio, b->c->bdev, 1, op);
bio->bi_iter.bi_sector = sector;
bio->bi_end_io = bio_complete;
bio->bi_private = b;
@@ -1601,18 +1601,18 @@ static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client
* dm-bufio is resistant to allocation failures (it just keeps
* one buffer reserved in cases all the allocations fail).
* So set flags to not try too hard:
- * GFP_NOWAIT: don't wait; if we need to sleep we'll release our
- * mutex and wait ourselves.
+ * GFP_NOWAIT: don't wait and don't print a warning in case of
+ * failure; if we need to sleep we'll release our mutex
+ * and wait ourselves.
* __GFP_NORETRY: don't retry and rather return failure
* __GFP_NOMEMALLOC: don't use emergency reserves
- * __GFP_NOWARN: don't print a warning in case of failure
*
* For debugging, if we set the cache size to 1, no new buffers will
* be allocated.
*/
while (1) {
if (dm_bufio_cache_size_latch != 1) {
- b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
+ b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC);
if (b)
return b;
}
diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c
index 2ed894155cab..7e1e8cc0e33a 100644
--- a/drivers/md/dm-cache-policy-smq.c
+++ b/drivers/md/dm-cache-policy-smq.c
@@ -590,7 +590,7 @@ static int h_init(struct smq_hash_table *ht, struct entry_space *es, unsigned in
nr_buckets = roundup_pow_of_two(max(nr_entries / 4u, 16u));
ht->hash_bits = __ffs(nr_buckets);
- ht->buckets = vmalloc(array_size(nr_buckets, sizeof(*ht->buckets)));
+ ht->buckets = vmalloc_array(nr_buckets, sizeof(*ht->buckets));
if (!ht->buckets)
return -ENOMEM;
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index c889332e533b..a3c9f74fe2dc 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -162,6 +162,7 @@ struct mapped_device {
#define DMF_SUSPENDED_INTERNALLY 7
#define DMF_POST_SUSPENDING 8
#define DMF_EMULATE_ZONE_APPEND 9
+#define DMF_QUEUE_STOPPED 10
static inline sector_t dm_get_size(struct mapped_device *md)
{
@@ -291,6 +292,7 @@ struct dm_io {
struct dm_io *next;
struct dm_stats_aux stats_aux;
blk_status_t status;
+ bool requeue_flush_with_data;
atomic_t io_count;
struct mapped_device *md;
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index cf17fd46e255..08925aca838c 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -441,7 +441,7 @@ static struct bio *clone_bio(struct dm_target *ti, struct flakey_c *fc, struct b
if (!clone)
return NULL;
- bio_init(clone, fc->dev->bdev, clone->bi_inline_vecs, nr_iovecs, bio->bi_opf);
+ bio_init_inline(clone, fc->dev->bdev, nr_iovecs, bio->bi_opf);
clone->bi_iter.bi_sector = flakey_map_sector(ti, bio->bi_iter.bi_sector);
clone->bi_private = bio;
diff --git a/drivers/md/dm-ima.c b/drivers/md/dm-ima.c
index 8b50c908c6f4..efb3cd4f9cd4 100644
--- a/drivers/md/dm-ima.c
+++ b/drivers/md/dm-ima.c
@@ -45,7 +45,7 @@ static void fix_separator_chars(char **buf)
/*
* Internal function to allocate memory for IMA measurements.
*/
-static void *dm_ima_alloc(size_t len, gfp_t flags, bool noio)
+static void *dm_ima_alloc(size_t len, bool noio)
{
unsigned int noio_flag;
void *ptr;
@@ -53,7 +53,7 @@ static void *dm_ima_alloc(size_t len, gfp_t flags, bool noio)
if (noio)
noio_flag = memalloc_noio_save();
- ptr = kzalloc(len, flags);
+ ptr = kzalloc(len, GFP_KERNEL);
if (noio)
memalloc_noio_restore(noio_flag);
@@ -68,13 +68,13 @@ static int dm_ima_alloc_and_copy_name_uuid(struct mapped_device *md, char **dev_
char **dev_uuid, bool noio)
{
int r;
- *dev_name = dm_ima_alloc(DM_NAME_LEN*2, GFP_KERNEL, noio);
+ *dev_name = dm_ima_alloc(DM_NAME_LEN*2, noio);
if (!(*dev_name)) {
r = -ENOMEM;
goto error;
}
- *dev_uuid = dm_ima_alloc(DM_UUID_LEN*2, GFP_KERNEL, noio);
+ *dev_uuid = dm_ima_alloc(DM_UUID_LEN*2, noio);
if (!(*dev_uuid)) {
r = -ENOMEM;
goto error;
@@ -109,7 +109,7 @@ static int dm_ima_alloc_and_copy_device_data(struct mapped_device *md, char **de
if (r)
return r;
- *device_data = dm_ima_alloc(DM_IMA_DEVICE_BUF_LEN, GFP_KERNEL, noio);
+ *device_data = dm_ima_alloc(DM_IMA_DEVICE_BUF_LEN, noio);
if (!(*device_data)) {
r = -ENOMEM;
goto error;
@@ -153,14 +153,12 @@ static int dm_ima_alloc_and_copy_capacity_str(struct mapped_device *md, char **c
capacity = get_capacity(md->disk);
- *capacity_str = dm_ima_alloc(DM_IMA_DEVICE_CAPACITY_BUF_LEN, GFP_KERNEL, noio);
+ *capacity_str = dm_ima_alloc(DM_IMA_DEVICE_CAPACITY_BUF_LEN, noio);
if (!(*capacity_str))
return -ENOMEM;
- scnprintf(*capacity_str, DM_IMA_DEVICE_BUF_LEN, "current_device_capacity=%llu;",
- capacity);
-
- return 0;
+ return scnprintf(*capacity_str, DM_IMA_DEVICE_BUF_LEN, "current_device_capacity=%llu;",
+ capacity);
}
/*
@@ -195,15 +193,15 @@ void dm_ima_measure_on_table_load(struct dm_table *table, unsigned int status_fl
const size_t hash_alg_prefix_len = strlen(DM_IMA_TABLE_HASH_ALG) + 1;
char table_load_event_name[] = "dm_table_load";
- ima_buf = dm_ima_alloc(DM_IMA_MEASUREMENT_BUF_LEN, GFP_KERNEL, noio);
+ ima_buf = dm_ima_alloc(DM_IMA_MEASUREMENT_BUF_LEN, noio);
if (!ima_buf)
return;
- target_metadata_buf = dm_ima_alloc(DM_IMA_TARGET_METADATA_BUF_LEN, GFP_KERNEL, noio);
+ target_metadata_buf = dm_ima_alloc(DM_IMA_TARGET_METADATA_BUF_LEN, noio);
if (!target_metadata_buf)
goto error;
- target_data_buf = dm_ima_alloc(DM_IMA_TARGET_DATA_BUF_LEN, GFP_KERNEL, noio);
+ target_data_buf = dm_ima_alloc(DM_IMA_TARGET_DATA_BUF_LEN, noio);
if (!target_data_buf)
goto error;
@@ -218,7 +216,7 @@ void dm_ima_measure_on_table_load(struct dm_table *table, unsigned int status_fl
shash->tfm = tfm;
digest_size = crypto_shash_digestsize(tfm);
- digest = dm_ima_alloc(digest_size, GFP_KERNEL, noio);
+ digest = dm_ima_alloc(digest_size, noio);
if (!digest)
goto error;
@@ -327,7 +325,7 @@ void dm_ima_measure_on_table_load(struct dm_table *table, unsigned int status_fl
if (r < 0)
goto error;
- digest_buf = dm_ima_alloc((digest_size*2) + hash_alg_prefix_len + 1, GFP_KERNEL, noio);
+ digest_buf = dm_ima_alloc((digest_size*2) + hash_alg_prefix_len + 1, noio);
if (!digest_buf)
goto error;
@@ -371,18 +369,18 @@ void dm_ima_measure_on_device_resume(struct mapped_device *md, bool swap)
{
char *device_table_data, *dev_name = NULL, *dev_uuid = NULL, *capacity_str = NULL;
char active[] = "active_table_hash=";
- unsigned int active_len = strlen(active), capacity_len = 0;
+ unsigned int active_len = strlen(active);
unsigned int l = 0;
bool noio = true;
bool nodata = true;
- int r;
+ int capacity_len;
- device_table_data = dm_ima_alloc(DM_IMA_DEVICE_BUF_LEN, GFP_KERNEL, noio);
+ device_table_data = dm_ima_alloc(DM_IMA_DEVICE_BUF_LEN, noio);
if (!device_table_data)
return;
- r = dm_ima_alloc_and_copy_capacity_str(md, &capacity_str, noio);
- if (r)
+ capacity_len = dm_ima_alloc_and_copy_capacity_str(md, &capacity_str, noio);
+ if (capacity_len < 0)
goto error;
memcpy(device_table_data + l, DM_IMA_VERSION_STR, md->ima.dm_version_str_len);
@@ -445,8 +443,7 @@ void dm_ima_measure_on_device_resume(struct mapped_device *md, bool swap)
}
if (nodata) {
- r = dm_ima_alloc_and_copy_name_uuid(md, &dev_name, &dev_uuid, noio);
- if (r)
+ if (dm_ima_alloc_and_copy_name_uuid(md, &dev_name, &dev_uuid, noio))
goto error;
l = scnprintf(device_table_data, DM_IMA_DEVICE_BUF_LEN,
@@ -454,7 +451,6 @@ void dm_ima_measure_on_device_resume(struct mapped_device *md, bool swap)
DM_IMA_VERSION_STR, dev_name, dev_uuid);
}
- capacity_len = strlen(capacity_str);
memcpy(device_table_data + l, capacity_str, capacity_len);
l += capacity_len;
@@ -483,18 +479,17 @@ void dm_ima_measure_on_device_remove(struct mapped_device *md, bool remove_all)
unsigned int device_active_len = strlen(device_active_str);
unsigned int device_inactive_len = strlen(device_inactive_str);
unsigned int remove_all_len = strlen(remove_all_str);
- unsigned int capacity_len = 0;
unsigned int l = 0;
bool noio = true;
bool nodata = true;
- int r;
+ int capacity_len;
- device_table_data = dm_ima_alloc(DM_IMA_DEVICE_BUF_LEN*2, GFP_KERNEL, noio);
+ device_table_data = dm_ima_alloc(DM_IMA_DEVICE_BUF_LEN*2, noio);
if (!device_table_data)
goto exit;
- r = dm_ima_alloc_and_copy_capacity_str(md, &capacity_str, noio);
- if (r) {
+ capacity_len = dm_ima_alloc_and_copy_capacity_str(md, &capacity_str, noio);
+ if (capacity_len < 0) {
kfree(device_table_data);
goto exit;
}
@@ -570,7 +565,6 @@ void dm_ima_measure_on_device_remove(struct mapped_device *md, bool remove_all)
memcpy(device_table_data + l, remove_all ? "y;" : "n;", 2);
l += 2;
- capacity_len = strlen(capacity_str);
memcpy(device_table_data + l, capacity_str, capacity_len);
l += capacity_len;
@@ -602,20 +596,20 @@ exit:
*/
void dm_ima_measure_on_table_clear(struct mapped_device *md, bool new_map)
{
- unsigned int l = 0, capacity_len = 0;
+ unsigned int l = 0;
char *device_table_data = NULL, *dev_name = NULL, *dev_uuid = NULL, *capacity_str = NULL;
char inactive_str[] = "inactive_table_hash=";
unsigned int inactive_len = strlen(inactive_str);
bool noio = true;
bool nodata = true;
- int r;
+ int capacity_len;
- device_table_data = dm_ima_alloc(DM_IMA_DEVICE_BUF_LEN, GFP_KERNEL, noio);
+ device_table_data = dm_ima_alloc(DM_IMA_DEVICE_BUF_LEN, noio);
if (!device_table_data)
return;
- r = dm_ima_alloc_and_copy_capacity_str(md, &capacity_str, noio);
- if (r)
+ capacity_len = dm_ima_alloc_and_copy_capacity_str(md, &capacity_str, noio);
+ if (capacity_len < 0)
goto error1;
memcpy(device_table_data + l, DM_IMA_VERSION_STR, md->ima.dm_version_str_len);
@@ -650,7 +644,6 @@ void dm_ima_measure_on_table_clear(struct mapped_device *md, bool new_map)
DM_IMA_VERSION_STR, dev_name, dev_uuid);
}
- capacity_len = strlen(capacity_str);
memcpy(device_table_data + l, capacity_str, capacity_len);
l += capacity_len;
@@ -703,7 +696,7 @@ void dm_ima_measure_on_device_rename(struct mapped_device *md)
char *old_device_data = NULL, *new_device_data = NULL, *combined_device_data = NULL;
char *new_dev_name = NULL, *new_dev_uuid = NULL, *capacity_str = NULL;
bool noio = true;
- int r, len;
+ int len;
if (dm_ima_alloc_and_copy_device_data(md, &new_device_data,
md->ima.active_table.num_targets, noio))
@@ -712,12 +705,11 @@ void dm_ima_measure_on_device_rename(struct mapped_device *md)
if (dm_ima_alloc_and_copy_name_uuid(md, &new_dev_name, &new_dev_uuid, noio))
goto error;
- combined_device_data = dm_ima_alloc(DM_IMA_DEVICE_BUF_LEN * 2, GFP_KERNEL, noio);
+ combined_device_data = dm_ima_alloc(DM_IMA_DEVICE_BUF_LEN * 2, noio);
if (!combined_device_data)
goto error;
- r = dm_ima_alloc_and_copy_capacity_str(md, &capacity_str, noio);
- if (r)
+ if (dm_ima_alloc_and_copy_capacity_str(md, &capacity_str, noio) < 0)
goto error;
old_device_data = md->ima.active_table.device_metadata;
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index efeee0a873c0..170bf67a2edd 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -133,7 +133,7 @@ struct journal_sector {
commit_id_t commit_id;
};
-#define MAX_TAG_SIZE (JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR - offsetof(struct journal_entry, last_bytes[MAX_SECTORS_PER_BLOCK]))
+#define MAX_TAG_SIZE 255
#define METADATA_PADDING_SECTORS 8
@@ -219,10 +219,13 @@ struct dm_integrity_c {
__u8 log2_blocks_per_bitmap_bit;
unsigned char mode;
+ bool internal_hash;
int failed;
- struct crypto_shash *internal_hash;
+ struct crypto_shash *internal_shash;
+ struct crypto_ahash *internal_ahash;
+ unsigned int internal_hash_digestsize;
struct dm_target *ti;
@@ -277,6 +280,9 @@ struct dm_integrity_c {
bool fix_hmac;
bool legacy_recalculate;
+ mempool_t ahash_req_pool;
+ struct ahash_request *journal_ahash_req;
+
struct alg_spec internal_hash_alg;
struct alg_spec journal_crypt_alg;
struct alg_spec journal_mac_alg;
@@ -326,6 +332,8 @@ struct dm_integrity_io {
unsigned payload_len;
bool integrity_payload_from_mempool;
bool integrity_range_locked;
+
+ struct ahash_request *ahash_req;
};
struct journal_completion {
@@ -352,6 +360,7 @@ struct bitmap_block_status {
static struct kmem_cache *journal_io_cache;
#define JOURNAL_IO_MEMPOOL 32
+#define AHASH_MEMPOOL 32
#ifdef DEBUG_PRINT
#define DEBUG_print(x, ...) printk(KERN_DEBUG x, ##__VA_ARGS__)
@@ -1634,15 +1643,15 @@ static void integrity_end_io(struct bio *bio)
dec_in_flight(dio);
}
-static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector,
- const char *data, char *result)
+static void integrity_sector_checksum_shash(struct dm_integrity_c *ic, sector_t sector,
+ const char *data, unsigned offset, char *result)
{
__le64 sector_le = cpu_to_le64(sector);
- SHASH_DESC_ON_STACK(req, ic->internal_hash);
+ SHASH_DESC_ON_STACK(req, ic->internal_shash);
int r;
unsigned int digest_size;
- req->tfm = ic->internal_hash;
+ req->tfm = ic->internal_shash;
r = crypto_shash_init(req);
if (unlikely(r < 0)) {
@@ -1664,7 +1673,7 @@ static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector
goto failed;
}
- r = crypto_shash_update(req, data, ic->sectors_per_block << SECTOR_SHIFT);
+ r = crypto_shash_update(req, data + offset, ic->sectors_per_block << SECTOR_SHIFT);
if (unlikely(r < 0)) {
dm_integrity_io_error(ic, "crypto_shash_update", r);
goto failed;
@@ -1676,7 +1685,7 @@ static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector
goto failed;
}
- digest_size = crypto_shash_digestsize(ic->internal_hash);
+ digest_size = ic->internal_hash_digestsize;
if (unlikely(digest_size < ic->tag_size))
memset(result + digest_size, 0, ic->tag_size - digest_size);
@@ -1687,6 +1696,104 @@ failed:
get_random_bytes(result, ic->tag_size);
}
+static void integrity_sector_checksum_ahash(struct dm_integrity_c *ic, struct ahash_request **ahash_req,
+ sector_t sector, struct page *page, unsigned offset, char *result)
+{
+ __le64 sector_le = cpu_to_le64(sector);
+ struct ahash_request *req;
+ DECLARE_CRYPTO_WAIT(wait);
+ struct scatterlist sg[3], *s = sg;
+ int r;
+ unsigned int digest_size;
+ unsigned int nbytes = 0;
+
+ might_sleep();
+
+ req = *ahash_req;
+ if (unlikely(!req)) {
+ req = mempool_alloc(&ic->ahash_req_pool, GFP_NOIO);
+ *ahash_req = req;
+ }
+
+ ahash_request_set_tfm(req, ic->internal_ahash);
+ ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, crypto_req_done, &wait);
+
+ if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) {
+ sg_init_table(sg, 3);
+ sg_set_buf(s, (const __u8 *)&ic->sb->salt, SALT_SIZE);
+ nbytes += SALT_SIZE;
+ s++;
+ } else {
+ sg_init_table(sg, 2);
+ }
+
+ if (likely(!is_vmalloc_addr(&sector_le))) {
+ sg_set_buf(s, &sector_le, sizeof(sector_le));
+ } else {
+ struct page *sec_page = vmalloc_to_page(&sector_le);
+ unsigned int sec_off = offset_in_page(&sector_le);
+ sg_set_page(s, sec_page, sizeof(sector_le), sec_off);
+ }
+ nbytes += sizeof(sector_le);
+ s++;
+
+ sg_set_page(s, page, ic->sectors_per_block << SECTOR_SHIFT, offset);
+ nbytes += ic->sectors_per_block << SECTOR_SHIFT;
+
+ ahash_request_set_crypt(req, sg, result, nbytes);
+
+ r = crypto_wait_req(crypto_ahash_digest(req), &wait);
+ if (unlikely(r)) {
+ dm_integrity_io_error(ic, "crypto_ahash_digest", r);
+ goto failed;
+ }
+
+ digest_size = ic->internal_hash_digestsize;
+ if (unlikely(digest_size < ic->tag_size))
+ memset(result + digest_size, 0, ic->tag_size - digest_size);
+
+ return;
+
+failed:
+ /* this shouldn't happen anyway, the hash functions have no reason to fail */
+ get_random_bytes(result, ic->tag_size);
+}
+
+static void integrity_sector_checksum(struct dm_integrity_c *ic, struct ahash_request **ahash_req,
+ sector_t sector, const char *data, unsigned offset, char *result)
+{
+ if (likely(ic->internal_shash != NULL))
+ integrity_sector_checksum_shash(ic, sector, data, offset, result);
+ else
+ integrity_sector_checksum_ahash(ic, ahash_req, sector, (struct page *)data, offset, result);
+}
+
+static void *integrity_kmap(struct dm_integrity_c *ic, struct page *p)
+{
+ if (likely(ic->internal_shash != NULL))
+ return kmap_local_page(p);
+ else
+ return p;
+}
+
+static void integrity_kunmap(struct dm_integrity_c *ic, const void *ptr)
+{
+ if (likely(ic->internal_shash != NULL))
+ kunmap_local(ptr);
+}
+
+static void *integrity_identity(struct dm_integrity_c *ic, void *data)
+{
+#ifdef CONFIG_DEBUG_SG
+ BUG_ON(offset_in_page(data));
+ BUG_ON(!virt_addr_valid(data));
+#endif
+ if (likely(ic->internal_shash != NULL))
+ return data;
+ else
+ return virt_to_page(data);
+}
+
static noinline void integrity_recheck(struct dm_integrity_io *dio, char *checksum)
{
struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
@@ -1711,6 +1818,7 @@ static noinline void integrity_recheck(struct dm_integrity_io *dio, char *checks
sector_t alignment;
char *mem;
char *buffer = page_to_virt(page);
+ unsigned int buffer_offset;
int r;
struct dm_io_request io_req;
struct dm_io_region io_loc;
@@ -1728,7 +1836,7 @@ static noinline void integrity_recheck(struct dm_integrity_io *dio, char *checks
alignment &= -alignment;
io_loc.sector = round_down(io_loc.sector, alignment);
io_loc.count += sector - io_loc.sector;
- buffer += (sector - io_loc.sector) << SECTOR_SHIFT;
+ buffer_offset = (sector - io_loc.sector) << SECTOR_SHIFT;
io_loc.count = round_up(io_loc.count, alignment);
r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
@@ -1737,7 +1845,7 @@ static noinline void integrity_recheck(struct dm_integrity_io *dio, char *checks
goto free_ret;
}
- integrity_sector_checksum(ic, logical_sector, buffer, checksum);
+ integrity_sector_checksum(ic, &dio->ahash_req, logical_sector, integrity_identity(ic, buffer), buffer_offset, checksum);
r = dm_integrity_rw_tag(ic, checksum, &dio->metadata_block,
&dio->metadata_offset, ic->tag_size, TAG_CMP);
if (r) {
@@ -1754,7 +1862,7 @@ static noinline void integrity_recheck(struct dm_integrity_io *dio, char *checks
}
mem = bvec_kmap_local(&bv);
- memcpy(mem + pos, buffer, ic->sectors_per_block << SECTOR_SHIFT);
+ memcpy(mem + pos, buffer + buffer_offset, ic->sectors_per_block << SECTOR_SHIFT);
kunmap_local(mem);
pos += ic->sectors_per_block << SECTOR_SHIFT;
@@ -1776,7 +1884,7 @@ static void integrity_metadata(struct work_struct *w)
if (ic->internal_hash) {
struct bvec_iter iter;
struct bio_vec bv;
- unsigned int digest_size = crypto_shash_digestsize(ic->internal_hash);
+ unsigned int digest_size = ic->internal_hash_digestsize;
struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
char *checksums;
unsigned int extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0;
@@ -1837,17 +1945,17 @@ static void integrity_metadata(struct work_struct *w)
char *mem, *checksums_ptr;
again:
- mem = bvec_kmap_local(&bv_copy);
+ mem = integrity_kmap(ic, bv_copy.bv_page);
pos = 0;
checksums_ptr = checksums;
do {
- integrity_sector_checksum(ic, sector, mem + pos, checksums_ptr);
+ integrity_sector_checksum(ic, &dio->ahash_req, sector, mem, bv_copy.bv_offset + pos, checksums_ptr);
checksums_ptr += ic->tag_size;
sectors_to_process -= ic->sectors_per_block;
pos += ic->sectors_per_block << SECTOR_SHIFT;
sector += ic->sectors_per_block;
} while (pos < bv_copy.bv_len && sectors_to_process && checksums != checksums_onstack);
- kunmap_local(mem);
+ integrity_kunmap(ic, mem);
r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
checksums_ptr - checksums, dio->op == REQ_OP_READ ? TAG_CMP : TAG_WRITE);
@@ -1949,6 +2057,7 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
dio->ic = ic;
dio->bi_status = 0;
dio->op = bio_op(bio);
+ dio->ahash_req = NULL;
if (ic->mode == 'I') {
bio->bi_iter.bi_sector = dm_target_offset(ic->ti, bio->bi_iter.bi_sector);
@@ -2071,19 +2180,6 @@ retry_kmap:
js++;
mem_ptr += 1 << SECTOR_SHIFT;
} while (++s < ic->sectors_per_block);
-#ifdef INTERNAL_VERIFY
- if (ic->internal_hash) {
- char checksums_onstack[MAX_T(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
-
- integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack);
- if (unlikely(crypto_memneq(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) {
- DMERR_LIMIT("Checksum failed when reading from journal, at sector 0x%llx",
- logical_sector);
- dm_audit_log_bio(DM_MSG_PREFIX, "journal-checksum",
- bio, logical_sector, 0);
- }
- }
-#endif
}
if (!ic->internal_hash) {
@@ -2124,15 +2220,17 @@ retry_kmap:
} while (++s < ic->sectors_per_block);
if (ic->internal_hash) {
- unsigned int digest_size = crypto_shash_digestsize(ic->internal_hash);
+ unsigned int digest_size = ic->internal_hash_digestsize;
+ void *js_page = integrity_identity(ic, (char *)js - offset_in_page(js));
+ unsigned js_offset = offset_in_page(js);
if (unlikely(digest_size > ic->tag_size)) {
char checksums_onstack[HASH_MAX_DIGESTSIZE];
- integrity_sector_checksum(ic, logical_sector, (char *)js, checksums_onstack);
+ integrity_sector_checksum(ic, &dio->ahash_req, logical_sector, js_page, js_offset, checksums_onstack);
memcpy(journal_entry_tag(ic, je), checksums_onstack, ic->tag_size);
} else
- integrity_sector_checksum(ic, logical_sector, (char *)js, journal_entry_tag(ic, je));
+ integrity_sector_checksum(ic, &dio->ahash_req, logical_sector, js_page, js_offset, journal_entry_tag(ic, je));
}
journal_entry_set_sector(je, logical_sector);
@@ -2428,7 +2526,7 @@ retry:
if (!dio->integrity_payload) {
unsigned digest_size, extra_size;
dio->payload_len = ic->tuple_size * (bio_sectors(bio) >> ic->sb->log2_sectors_per_block);
- digest_size = crypto_shash_digestsize(ic->internal_hash);
+ digest_size = ic->internal_hash_digestsize;
extra_size = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0;
dio->payload_len += extra_size;
dio->integrity_payload = kmalloc(dio->payload_len, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
@@ -2505,11 +2603,11 @@ skip_spinlock:
unsigned pos = 0;
while (dio->bio_details.bi_iter.bi_size) {
struct bio_vec bv = bio_iter_iovec(bio, dio->bio_details.bi_iter);
- const char *mem = bvec_kmap_local(&bv);
+ const char *mem = integrity_kmap(ic, bv.bv_page);
if (ic->tag_size < ic->tuple_size)
memset(dio->integrity_payload + pos + ic->tag_size, 0, ic->tuple_size - ic->tuple_size);
- integrity_sector_checksum(ic, dio->bio_details.bi_iter.bi_sector, mem, dio->integrity_payload + pos);
- kunmap_local(mem);
+ integrity_sector_checksum(ic, &dio->ahash_req, dio->bio_details.bi_iter.bi_sector, mem, bv.bv_offset, dio->integrity_payload + pos);
+ integrity_kunmap(ic, mem);
pos += ic->tuple_size;
bio_advance_iter_single(bio, &dio->bio_details.bi_iter, ic->sectors_per_block << SECTOR_SHIFT);
}
@@ -2588,8 +2686,8 @@ static void dm_integrity_inline_recheck(struct work_struct *w)
}
bio_put(outgoing_bio);
- integrity_sector_checksum(ic, dio->bio_details.bi_iter.bi_sector, outgoing_data, digest);
- if (unlikely(crypto_memneq(digest, dio->integrity_payload, min(crypto_shash_digestsize(ic->internal_hash), ic->tag_size)))) {
+ integrity_sector_checksum(ic, &dio->ahash_req, dio->bio_details.bi_iter.bi_sector, integrity_identity(ic, outgoing_data), 0, digest);
+ if (unlikely(crypto_memneq(digest, dio->integrity_payload, min(ic->internal_hash_digestsize, ic->tag_size)))) {
DMERR_LIMIT("%pg: Checksum failed at sector 0x%llx",
ic->dev->bdev, dio->bio_details.bi_iter.bi_sector);
atomic64_inc(&ic->number_of_mismatches);
@@ -2612,33 +2710,58 @@ static void dm_integrity_inline_recheck(struct work_struct *w)
bio_endio(bio);
}
+static inline bool dm_integrity_check(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
+{
+ struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
+ unsigned pos = 0;
+
+ while (dio->bio_details.bi_iter.bi_size) {
+ char digest[HASH_MAX_DIGESTSIZE];
+ struct bio_vec bv = bio_iter_iovec(bio, dio->bio_details.bi_iter);
+ char *mem = integrity_kmap(ic, bv.bv_page);
+ integrity_sector_checksum(ic, &dio->ahash_req, dio->bio_details.bi_iter.bi_sector, mem, bv.bv_offset, digest);
+ if (unlikely(crypto_memneq(digest, dio->integrity_payload + pos,
+ min(ic->internal_hash_digestsize, ic->tag_size)))) {
+ integrity_kunmap(ic, mem);
+ dm_integrity_free_payload(dio);
+ INIT_WORK(&dio->work, dm_integrity_inline_recheck);
+ queue_work(ic->offload_wq, &dio->work);
+ return false;
+ }
+ integrity_kunmap(ic, mem);
+ pos += ic->tuple_size;
+ bio_advance_iter_single(bio, &dio->bio_details.bi_iter, ic->sectors_per_block << SECTOR_SHIFT);
+ }
+
+ return true;
+}
+
+static void dm_integrity_inline_async_check(struct work_struct *w)
+{
+ struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
+ struct dm_integrity_c *ic = dio->ic;
+ struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
+
+ if (likely(dm_integrity_check(ic, dio)))
+ bio_endio(bio);
+}
+
static int dm_integrity_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *status)
{
struct dm_integrity_c *ic = ti->private;
+ struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
if (ic->mode == 'I') {
- struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
- if (dio->op == REQ_OP_READ && likely(*status == BLK_STS_OK)) {
- unsigned pos = 0;
+ if (dio->op == REQ_OP_READ && likely(*status == BLK_STS_OK) && likely(dio->bio_details.bi_iter.bi_size != 0)) {
if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
unlikely(dio->integrity_range_locked))
- goto skip_check;
- while (dio->bio_details.bi_iter.bi_size) {
- char digest[HASH_MAX_DIGESTSIZE];
- struct bio_vec bv = bio_iter_iovec(bio, dio->bio_details.bi_iter);
- char *mem = bvec_kmap_local(&bv);
- //memset(mem, 0xff, ic->sectors_per_block << SECTOR_SHIFT);
- integrity_sector_checksum(ic, dio->bio_details.bi_iter.bi_sector, mem, digest);
- if (unlikely(crypto_memneq(digest, dio->integrity_payload + pos,
- min(crypto_shash_digestsize(ic->internal_hash), ic->tag_size)))) {
- kunmap_local(mem);
- dm_integrity_free_payload(dio);
- INIT_WORK(&dio->work, dm_integrity_inline_recheck);
- queue_work(ic->offload_wq, &dio->work);
+ goto skip_check;
+ if (likely(ic->internal_shash != NULL)) {
+ if (unlikely(!dm_integrity_check(ic, dio)))
return DM_ENDIO_INCOMPLETE;
- }
- kunmap_local(mem);
- pos += ic->tuple_size;
- bio_advance_iter_single(bio, &dio->bio_details.bi_iter, ic->sectors_per_block << SECTOR_SHIFT);
+ } else {
+ INIT_WORK(&dio->work, dm_integrity_inline_async_check);
+ queue_work(ic->offload_wq, &dio->work);
+ return DM_ENDIO_INCOMPLETE;
}
}
skip_check:
@@ -2646,6 +2769,8 @@ skip_check:
if (unlikely(dio->integrity_range_locked))
remove_range(ic, &dio->range);
}
+ if (unlikely(dio->ahash_req))
+ mempool_free(dio->ahash_req, &ic->ahash_req_pool);
return DM_ENDIO_DONE;
}
@@ -2902,9 +3027,12 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned int write_start
#endif
ic->internal_hash) {
char test_tag[MAX_T(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
+ struct journal_sector *js = access_journal_data(ic, i, l);
+ void *js_page = integrity_identity(ic, (char *)js - offset_in_page(js));
+ unsigned js_offset = offset_in_page(js);
- integrity_sector_checksum(ic, sec + ((l - j) << ic->sb->log2_sectors_per_block),
- (char *)access_journal_data(ic, i, l), test_tag);
+ integrity_sector_checksum(ic, &ic->journal_ahash_req, sec + ((l - j) << ic->sb->log2_sectors_per_block),
+ js_page, js_offset, test_tag);
if (unlikely(crypto_memneq(test_tag, journal_entry_tag(ic, je2), ic->tag_size))) {
dm_integrity_io_error(ic, "tag mismatch when replaying journal", -EILSEQ);
dm_audit_log_target(DM_MSG_PREFIX, "integrity-replay-journal", ic->ti, 0);
@@ -2987,6 +3115,7 @@ static void integrity_recalc(struct work_struct *w)
size_t recalc_tags_size;
u8 *recalc_buffer = NULL;
u8 *recalc_tags = NULL;
+ struct ahash_request *ahash_req = NULL;
struct dm_integrity_range range;
struct dm_io_request io_req;
struct dm_io_region io_loc;
@@ -3001,7 +3130,7 @@ static void integrity_recalc(struct work_struct *w)
unsigned recalc_sectors = RECALC_SECTORS;
retry:
- recalc_buffer = __vmalloc(recalc_sectors << SECTOR_SHIFT, GFP_NOIO);
+ recalc_buffer = kmalloc(recalc_sectors << SECTOR_SHIFT, GFP_NOIO | __GFP_NOWARN);
if (!recalc_buffer) {
oom:
recalc_sectors >>= 1;
@@ -3011,11 +3140,11 @@ oom:
goto free_ret;
}
recalc_tags_size = (recalc_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size;
- if (crypto_shash_digestsize(ic->internal_hash) > ic->tag_size)
- recalc_tags_size += crypto_shash_digestsize(ic->internal_hash) - ic->tag_size;
+ if (ic->internal_hash_digestsize > ic->tag_size)
+ recalc_tags_size += ic->internal_hash_digestsize - ic->tag_size;
recalc_tags = kvmalloc(recalc_tags_size, GFP_NOIO);
if (!recalc_tags) {
- vfree(recalc_buffer);
+ kfree(recalc_buffer);
recalc_buffer = NULL;
goto oom;
}
@@ -3081,7 +3210,7 @@ next_chunk:
goto err;
io_req.bi_opf = REQ_OP_READ;
- io_req.mem.type = DM_IO_VMA;
+ io_req.mem.type = DM_IO_KMEM;
io_req.mem.ptr.addr = recalc_buffer;
io_req.notify.fn = NULL;
io_req.client = ic->io;
@@ -3097,7 +3226,10 @@ next_chunk:
t = recalc_tags;
for (i = 0; i < n_sectors; i += ic->sectors_per_block) {
- integrity_sector_checksum(ic, logical_sector + i, recalc_buffer + (i << SECTOR_SHIFT), t);
+ void *ptr = recalc_buffer + (i << SECTOR_SHIFT);
+ void *ptr_page = integrity_identity(ic, (char *)ptr - offset_in_page(ptr));
+ unsigned ptr_offset = offset_in_page(ptr);
+ integrity_sector_checksum(ic, &ahash_req, logical_sector + i, ptr_page, ptr_offset, t);
t += ic->tag_size;
}
@@ -3139,8 +3271,9 @@ unlock_ret:
recalc_write_super(ic);
free_ret:
- vfree(recalc_buffer);
+ kfree(recalc_buffer);
kvfree(recalc_tags);
+ mempool_free(ahash_req, &ic->ahash_req_pool);
}
static void integrity_recalc_inline(struct work_struct *w)
@@ -3149,6 +3282,7 @@ static void integrity_recalc_inline(struct work_struct *w)
size_t recalc_tags_size;
u8 *recalc_buffer = NULL;
u8 *recalc_tags = NULL;
+ struct ahash_request *ahash_req = NULL;
struct dm_integrity_range range;
struct bio *bio;
struct bio_integrity_payload *bip;
@@ -3171,8 +3305,8 @@ oom:
}
recalc_tags_size = (recalc_sectors >> ic->sb->log2_sectors_per_block) * ic->tuple_size;
- if (crypto_shash_digestsize(ic->internal_hash) > ic->tuple_size)
- recalc_tags_size += crypto_shash_digestsize(ic->internal_hash) - ic->tuple_size;
+ if (ic->internal_hash_digestsize > ic->tuple_size)
+ recalc_tags_size += ic->internal_hash_digestsize - ic->tuple_size;
recalc_tags = kmalloc(recalc_tags_size, GFP_NOIO | __GFP_NOWARN);
if (!recalc_tags) {
kfree(recalc_buffer);
@@ -3217,8 +3351,11 @@ next_chunk:
t = recalc_tags;
for (i = 0; i < range.n_sectors; i += ic->sectors_per_block) {
+ void *ptr = recalc_buffer + (i << SECTOR_SHIFT);
+ void *ptr_page = integrity_identity(ic, (char *)ptr - offset_in_page(ptr));
+ unsigned ptr_offset = offset_in_page(ptr);
memset(t, 0, ic->tuple_size);
- integrity_sector_checksum(ic, range.logical_sector + i, recalc_buffer + (i << SECTOR_SHIFT), t);
+ integrity_sector_checksum(ic, &ahash_req, range.logical_sector + i, ptr_page, ptr_offset, t);
t += ic->tuple_size;
}
@@ -3270,6 +3407,7 @@ unlock_ret:
free_ret:
kfree(recalc_buffer);
kfree(recalc_tags);
+ mempool_free(ahash_req, &ic->ahash_req_pool);
}
static void bitmap_block_work(struct work_struct *w)
@@ -4210,30 +4348,53 @@ nomem:
return -ENOMEM;
}
-static int get_mac(struct crypto_shash **hash, struct alg_spec *a, char **error,
- char *error_alg, char *error_key)
+static int get_mac(struct crypto_shash **shash, struct crypto_ahash **ahash,
+ struct alg_spec *a, char **error, char *error_alg, char *error_key)
{
int r;
if (a->alg_string) {
- *hash = crypto_alloc_shash(a->alg_string, 0, CRYPTO_ALG_ALLOCATES_MEMORY);
- if (IS_ERR(*hash)) {
- *error = error_alg;
- r = PTR_ERR(*hash);
- *hash = NULL;
- return r;
- }
-
- if (a->key) {
- r = crypto_shash_setkey(*hash, a->key, a->key_size);
- if (r) {
+ if (shash) {
+ *shash = crypto_alloc_shash(a->alg_string, 0, CRYPTO_ALG_ALLOCATES_MEMORY);
+ if (IS_ERR(*shash)) {
+ *shash = NULL;
+ goto try_ahash;
+ }
+ if (a->key) {
+ r = crypto_shash_setkey(*shash, a->key, a->key_size);
+ if (r) {
+ *error = error_key;
+ return r;
+ }
+ } else if (crypto_shash_get_flags(*shash) & CRYPTO_TFM_NEED_KEY) {
*error = error_key;
+ return -ENOKEY;
+ }
+ return 0;
+ }
+try_ahash:
+ if (ahash) {
+ *ahash = crypto_alloc_ahash(a->alg_string, 0, CRYPTO_ALG_ALLOCATES_MEMORY);
+ if (IS_ERR(*ahash)) {
+ *error = error_alg;
+ r = PTR_ERR(*ahash);
+ *ahash = NULL;
return r;
}
- } else if (crypto_shash_get_flags(*hash) & CRYPTO_TFM_NEED_KEY) {
- *error = error_key;
- return -ENOKEY;
+ if (a->key) {
+ r = crypto_ahash_setkey(*ahash, a->key, a->key_size);
+ if (r) {
+ *error = error_key;
+ return r;
+ }
+ } else if (crypto_ahash_get_flags(*ahash) & CRYPTO_TFM_NEED_KEY) {
+ *error = error_key;
+ return -ENOKEY;
+ }
+ return 0;
}
+ *error = error_alg;
+ return -ENOENT;
}
return 0;
@@ -4690,12 +4851,26 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv
buffer_sectors = 1;
ic->log2_buffer_sectors = min((int)__fls(buffer_sectors), 31 - SECTOR_SHIFT);
- r = get_mac(&ic->internal_hash, &ic->internal_hash_alg, &ti->error,
+ r = get_mac(&ic->internal_shash, &ic->internal_ahash, &ic->internal_hash_alg, &ti->error,
"Invalid internal hash", "Error setting internal hash key");
if (r)
goto bad;
+ if (ic->internal_shash) {
+ ic->internal_hash = true;
+ ic->internal_hash_digestsize = crypto_shash_digestsize(ic->internal_shash);
+ }
+ if (ic->internal_ahash) {
+ ic->internal_hash = true;
+ ic->internal_hash_digestsize = crypto_ahash_digestsize(ic->internal_ahash);
+ r = mempool_init_kmalloc_pool(&ic->ahash_req_pool, AHASH_MEMPOOL,
+ sizeof(struct ahash_request) + crypto_ahash_reqsize(ic->internal_ahash));
+ if (r) {
+ ti->error = "Cannot allocate mempool";
+ goto bad;
+ }
+ }
- r = get_mac(&ic->journal_mac, &ic->journal_mac_alg, &ti->error,
+ r = get_mac(&ic->journal_mac, NULL, &ic->journal_mac_alg, &ti->error,
"Invalid journal mac", "Error setting journal mac key");
if (r)
goto bad;
@@ -4706,7 +4881,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv
r = -EINVAL;
goto bad;
}
- ic->tag_size = crypto_shash_digestsize(ic->internal_hash);
+ ic->tag_size = ic->internal_hash_digestsize;
}
if (ic->tag_size > MAX_TAG_SIZE) {
ti->error = "Too big tag size";
@@ -5178,6 +5353,8 @@ static void dm_integrity_dtr(struct dm_target *ti)
kvfree(ic->bbs);
if (ic->bufio)
dm_bufio_client_destroy(ic->bufio);
+ mempool_free(ic->journal_ahash_req, &ic->ahash_req_pool);
+ mempool_exit(&ic->ahash_req_pool);
bioset_exit(&ic->recalc_bios);
bioset_exit(&ic->recheck_bios);
mempool_exit(&ic->recheck_pool);
@@ -5215,8 +5392,10 @@ static void dm_integrity_dtr(struct dm_target *ti)
if (ic->sb)
free_pages_exact(ic->sb, SB_SECTORS << SECTOR_SHIFT);
- if (ic->internal_hash)
- crypto_free_shash(ic->internal_hash);
+ if (ic->internal_shash)
+ crypto_free_shash(ic->internal_shash);
+ if (ic->internal_ahash)
+ crypto_free_ahash(ic->internal_ahash);
free_alg(&ic->internal_hash_alg);
if (ic->journal_crypt)
@@ -5233,7 +5412,7 @@ static void dm_integrity_dtr(struct dm_target *ti)
static struct target_type integrity_target = {
.name = "integrity",
- .version = {1, 13, 0},
+ .version = {1, 14, 0},
.module = THIS_MODULE,
.features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY,
.ctr = dm_integrity_ctr,
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index 679b07dee229..7bb7174f8f4f 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -414,7 +414,7 @@ static int log_super(struct log_writes_c *lc)
}
/*
- * Super sector should be writen in-order, otherwise the
+ * Super sector should be written in-order, otherwise the
* nr_entries could be rewritten incorrectly by an old bio.
*/
wait_for_completion_io(&lc->super_done);
diff --git a/drivers/md/dm-pcache/Kconfig b/drivers/md/dm-pcache/Kconfig
new file mode 100644
index 000000000000..0e251eca892e
--- /dev/null
+++ b/drivers/md/dm-pcache/Kconfig
@@ -0,0 +1,17 @@
+config DM_PCACHE
+ tristate "Persistent cache for Block Device (Experimental)"
+ depends on BLK_DEV_DM
+ depends on DEV_DAX
+ help
+ PCACHE provides a mechanism to use persistent memory (e.g., CXL persistent memory,
+ DAX-enabled devices) as a high-performance cache layer in front of
+ traditional block devices such as SSDs or HDDs.
+
+ PCACHE is implemented as a kernel module that integrates with the block
+ layer and supports direct access (DAX) to persistent memory for low-latency,
+ byte-addressable caching.
+
+ Note: This feature is experimental and should be tested thoroughly
+ before use in production environments.
+
+ If unsure, say 'N'.
diff --git a/drivers/md/dm-pcache/Makefile b/drivers/md/dm-pcache/Makefile
new file mode 100644
index 000000000000..86776e4acad2
--- /dev/null
+++ b/drivers/md/dm-pcache/Makefile
@@ -0,0 +1,3 @@
+dm-pcache-y := dm_pcache.o cache_dev.o segment.o backing_dev.o cache.o cache_gc.o cache_writeback.o cache_segment.o cache_key.o cache_req.o
+
+obj-m += dm-pcache.o
diff --git a/drivers/md/dm-pcache/backing_dev.c b/drivers/md/dm-pcache/backing_dev.c
new file mode 100644
index 000000000000..7165fc0364bb
--- /dev/null
+++ b/drivers/md/dm-pcache/backing_dev.c
@@ -0,0 +1,374 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include <linux/blkdev.h>
+
+#include "../dm-core.h"
+#include "pcache_internal.h"
+#include "cache_dev.h"
+#include "backing_dev.h"
+#include "cache.h"
+#include "dm_pcache.h"
+
+static struct kmem_cache *backing_req_cache;
+static struct kmem_cache *backing_bvec_cache;
+
+static void backing_dev_exit(struct pcache_backing_dev *backing_dev)
+{
+ mempool_exit(&backing_dev->req_pool);
+ mempool_exit(&backing_dev->bvec_pool);
+}
+
+static void req_submit_fn(struct work_struct *work);
+static void req_complete_fn(struct work_struct *work);
+static int backing_dev_init(struct dm_pcache *pcache)
+{
+ struct pcache_backing_dev *backing_dev = &pcache->backing_dev;
+ int ret;
+
+ ret = mempool_init_slab_pool(&backing_dev->req_pool, 128, backing_req_cache);
+ if (ret)
+ goto err;
+
+ ret = mempool_init_slab_pool(&backing_dev->bvec_pool, 128, backing_bvec_cache);
+ if (ret)
+ goto req_pool_exit;
+
+ INIT_LIST_HEAD(&backing_dev->submit_list);
+ INIT_LIST_HEAD(&backing_dev->complete_list);
+ spin_lock_init(&backing_dev->submit_lock);
+ spin_lock_init(&backing_dev->complete_lock);
+ INIT_WORK(&backing_dev->req_submit_work, req_submit_fn);
+ INIT_WORK(&backing_dev->req_complete_work, req_complete_fn);
+ atomic_set(&backing_dev->inflight_reqs, 0);
+ init_waitqueue_head(&backing_dev->inflight_wq);
+
+ return 0;
+
+req_pool_exit:
+ mempool_exit(&backing_dev->req_pool);
+err:
+ return ret;
+}
+
+int backing_dev_start(struct dm_pcache *pcache)
+{
+ struct pcache_backing_dev *backing_dev = &pcache->backing_dev;
+ int ret;
+
+ ret = backing_dev_init(pcache);
+ if (ret)
+ return ret;
+
+ backing_dev->dev_size = bdev_nr_sectors(backing_dev->dm_dev->bdev);
+
+ return 0;
+}
+
+void backing_dev_stop(struct dm_pcache *pcache)
+{
+ struct pcache_backing_dev *backing_dev = &pcache->backing_dev;
+
+ /*
+ * There should not be any new request comming, just wait
+ * inflight requests done.
+ */
+ wait_event(backing_dev->inflight_wq,
+ atomic_read(&backing_dev->inflight_reqs) == 0);
+
+ flush_work(&backing_dev->req_submit_work);
+ flush_work(&backing_dev->req_complete_work);
+
+ backing_dev_exit(backing_dev);
+}
+
+/* pcache_backing_dev_req functions */
+void backing_dev_req_end(struct pcache_backing_dev_req *backing_req)
+{
+ struct pcache_backing_dev *backing_dev = backing_req->backing_dev;
+
+ if (backing_req->end_req)
+ backing_req->end_req(backing_req, backing_req->ret);
+
+ switch (backing_req->type) {
+ case BACKING_DEV_REQ_TYPE_REQ:
+ if (backing_req->req.upper_req)
+ pcache_req_put(backing_req->req.upper_req, backing_req->ret);
+ break;
+ case BACKING_DEV_REQ_TYPE_KMEM:
+ if (backing_req->kmem.bvecs != backing_req->kmem.inline_bvecs)
+ mempool_free(backing_req->kmem.bvecs, &backing_dev->bvec_pool);
+ break;
+ default:
+ BUG();
+ }
+
+ mempool_free(backing_req, &backing_dev->req_pool);
+
+ if (atomic_dec_and_test(&backing_dev->inflight_reqs))
+ wake_up(&backing_dev->inflight_wq);
+}
+
+static void req_complete_fn(struct work_struct *work)
+{
+ struct pcache_backing_dev *backing_dev = container_of(work, struct pcache_backing_dev, req_complete_work);
+ struct pcache_backing_dev_req *backing_req;
+ LIST_HEAD(tmp_list);
+
+ spin_lock_irq(&backing_dev->complete_lock);
+ list_splice_init(&backing_dev->complete_list, &tmp_list);
+ spin_unlock_irq(&backing_dev->complete_lock);
+
+ while (!list_empty(&tmp_list)) {
+ backing_req = list_first_entry(&tmp_list,
+ struct pcache_backing_dev_req, node);
+ list_del_init(&backing_req->node);
+ backing_dev_req_end(backing_req);
+ }
+}
+
+static void backing_dev_bio_end(struct bio *bio)
+{
+ struct pcache_backing_dev_req *backing_req = bio->bi_private;
+ struct pcache_backing_dev *backing_dev = backing_req->backing_dev;
+ unsigned long flags;
+
+ backing_req->ret = blk_status_to_errno(bio->bi_status);
+
+ spin_lock_irqsave(&backing_dev->complete_lock, flags);
+ list_move_tail(&backing_req->node, &backing_dev->complete_list);
+ queue_work(BACKING_DEV_TO_PCACHE(backing_dev)->task_wq, &backing_dev->req_complete_work);
+ spin_unlock_irqrestore(&backing_dev->complete_lock, flags);
+}
+
+static void req_submit_fn(struct work_struct *work)
+{
+ struct pcache_backing_dev *backing_dev = container_of(work, struct pcache_backing_dev, req_submit_work);
+ struct pcache_backing_dev_req *backing_req;
+ LIST_HEAD(tmp_list);
+
+ spin_lock(&backing_dev->submit_lock);
+ list_splice_init(&backing_dev->submit_list, &tmp_list);
+ spin_unlock(&backing_dev->submit_lock);
+
+ while (!list_empty(&tmp_list)) {
+ backing_req = list_first_entry(&tmp_list,
+ struct pcache_backing_dev_req, node);
+ list_del_init(&backing_req->node);
+ submit_bio_noacct(&backing_req->bio);
+ }
+}
+
+void backing_dev_req_submit(struct pcache_backing_dev_req *backing_req, bool direct)
+{
+ struct pcache_backing_dev *backing_dev = backing_req->backing_dev;
+
+ if (direct) {
+ submit_bio_noacct(&backing_req->bio);
+ return;
+ }
+
+ spin_lock(&backing_dev->submit_lock);
+ list_add_tail(&backing_req->node, &backing_dev->submit_list);
+ queue_work(BACKING_DEV_TO_PCACHE(backing_dev)->task_wq, &backing_dev->req_submit_work);
+ spin_unlock(&backing_dev->submit_lock);
+}
+
+static void bio_map(struct bio *bio, void *base, size_t size)
+{
+ struct page *page;
+ unsigned int offset;
+ unsigned int len;
+
+ if (!is_vmalloc_addr(base)) {
+ page = virt_to_page(base);
+ offset = offset_in_page(base);
+
+ BUG_ON(!bio_add_page(bio, page, size, offset));
+ return;
+ }
+
+ flush_kernel_vmap_range(base, size);
+ while (size) {
+ page = vmalloc_to_page(base);
+ offset = offset_in_page(base);
+ len = min_t(size_t, PAGE_SIZE - offset, size);
+
+ BUG_ON(!bio_add_page(bio, page, len, offset));
+ size -= len;
+ base += len;
+ }
+}
+
+static struct pcache_backing_dev_req *req_type_req_alloc(struct pcache_backing_dev *backing_dev,
+ struct pcache_backing_dev_req_opts *opts)
+{
+ struct pcache_request *pcache_req = opts->req.upper_req;
+ struct pcache_backing_dev_req *backing_req;
+ struct bio *orig = pcache_req->bio;
+
+ backing_req = mempool_alloc(&backing_dev->req_pool, opts->gfp_mask);
+ if (!backing_req)
+ return NULL;
+
+ memset(backing_req, 0, sizeof(struct pcache_backing_dev_req));
+
+ bio_init_clone(backing_dev->dm_dev->bdev, &backing_req->bio, orig, opts->gfp_mask);
+
+ backing_req->type = BACKING_DEV_REQ_TYPE_REQ;
+ backing_req->backing_dev = backing_dev;
+ atomic_inc(&backing_dev->inflight_reqs);
+
+ return backing_req;
+}
+
+static struct pcache_backing_dev_req *kmem_type_req_alloc(struct pcache_backing_dev *backing_dev,
+ struct pcache_backing_dev_req_opts *opts)
+{
+ struct pcache_backing_dev_req *backing_req;
+ u32 n_vecs = bio_add_max_vecs(opts->kmem.data, opts->kmem.len);
+
+ backing_req = mempool_alloc(&backing_dev->req_pool, opts->gfp_mask);
+ if (!backing_req)
+ return NULL;
+
+ memset(backing_req, 0, sizeof(struct pcache_backing_dev_req));
+
+ if (n_vecs > BACKING_DEV_REQ_INLINE_BVECS) {
+ backing_req->kmem.bvecs = mempool_alloc(&backing_dev->bvec_pool, opts->gfp_mask);
+ if (!backing_req->kmem.bvecs)
+ goto free_backing_req;
+ } else {
+ backing_req->kmem.bvecs = backing_req->kmem.inline_bvecs;
+ }
+
+ backing_req->kmem.n_vecs = n_vecs;
+ backing_req->type = BACKING_DEV_REQ_TYPE_KMEM;
+ backing_req->backing_dev = backing_dev;
+ atomic_inc(&backing_dev->inflight_reqs);
+
+ return backing_req;
+
+free_backing_req:
+ mempool_free(backing_req, &backing_dev->req_pool);
+ return NULL;
+}
+
+struct pcache_backing_dev_req *backing_dev_req_alloc(struct pcache_backing_dev *backing_dev,
+ struct pcache_backing_dev_req_opts *opts)
+{
+ if (opts->type == BACKING_DEV_REQ_TYPE_REQ)
+ return req_type_req_alloc(backing_dev, opts);
+
+ if (opts->type == BACKING_DEV_REQ_TYPE_KMEM)
+ return kmem_type_req_alloc(backing_dev, opts);
+
+ BUG();
+}
+
+static void req_type_req_init(struct pcache_backing_dev_req *backing_req,
+ struct pcache_backing_dev_req_opts *opts)
+{
+ struct pcache_request *pcache_req = opts->req.upper_req;
+ struct bio *clone;
+ u32 off = opts->req.req_off;
+ u32 len = opts->req.len;
+
+ clone = &backing_req->bio;
+ BUG_ON(off & SECTOR_MASK);
+ BUG_ON(len & SECTOR_MASK);
+ bio_trim(clone, off >> SECTOR_SHIFT, len >> SECTOR_SHIFT);
+
+ clone->bi_iter.bi_sector = (pcache_req->off + off) >> SECTOR_SHIFT;
+ clone->bi_private = backing_req;
+ clone->bi_end_io = backing_dev_bio_end;
+
+ INIT_LIST_HEAD(&backing_req->node);
+ backing_req->end_req = opts->end_fn;
+
+ pcache_req_get(pcache_req);
+ backing_req->req.upper_req = pcache_req;
+ backing_req->req.bio_off = off;
+}
+
+static void kmem_type_req_init(struct pcache_backing_dev_req *backing_req,
+ struct pcache_backing_dev_req_opts *opts)
+{
+ struct pcache_backing_dev *backing_dev = backing_req->backing_dev;
+ struct bio *backing_bio;
+
+ bio_init(&backing_req->bio, backing_dev->dm_dev->bdev, backing_req->kmem.bvecs,
+ backing_req->kmem.n_vecs, opts->kmem.opf);
+
+ backing_bio = &backing_req->bio;
+ bio_map(backing_bio, opts->kmem.data, opts->kmem.len);
+
+ backing_bio->bi_iter.bi_sector = (opts->kmem.backing_off) >> SECTOR_SHIFT;
+ backing_bio->bi_private = backing_req;
+ backing_bio->bi_end_io = backing_dev_bio_end;
+
+ INIT_LIST_HEAD(&backing_req->node);
+ backing_req->end_req = opts->end_fn;
+ backing_req->priv_data = opts->priv_data;
+}
+
+void backing_dev_req_init(struct pcache_backing_dev_req *backing_req,
+ struct pcache_backing_dev_req_opts *opts)
+{
+ if (opts->type == BACKING_DEV_REQ_TYPE_REQ)
+ return req_type_req_init(backing_req, opts);
+
+ if (opts->type == BACKING_DEV_REQ_TYPE_KMEM)
+ return kmem_type_req_init(backing_req, opts);
+
+ BUG();
+}
+
+struct pcache_backing_dev_req *backing_dev_req_create(struct pcache_backing_dev *backing_dev,
+ struct pcache_backing_dev_req_opts *opts)
+{
+ struct pcache_backing_dev_req *backing_req;
+
+ backing_req = backing_dev_req_alloc(backing_dev, opts);
+ if (!backing_req)
+ return NULL;
+
+ backing_dev_req_init(backing_req, opts);
+
+ return backing_req;
+}
+
+void backing_dev_flush(struct pcache_backing_dev *backing_dev)
+{
+ blkdev_issue_flush(backing_dev->dm_dev->bdev);
+}
+
+int pcache_backing_init(void)
+{
+ u32 max_bvecs = (PCACHE_CACHE_SUBTREE_SIZE >> PAGE_SHIFT) + 1;
+ int ret;
+
+ backing_req_cache = KMEM_CACHE(pcache_backing_dev_req, 0);
+ if (!backing_req_cache) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ backing_bvec_cache = kmem_cache_create("pcache-bvec-slab",
+ max_bvecs * sizeof(struct bio_vec),
+ 0, 0, NULL);
+ if (!backing_bvec_cache) {
+ ret = -ENOMEM;
+ goto destroy_req_cache;
+ }
+
+ return 0;
+destroy_req_cache:
+ kmem_cache_destroy(backing_req_cache);
+err:
+ return ret;
+}
+
+void pcache_backing_exit(void)
+{
+ kmem_cache_destroy(backing_bvec_cache);
+ kmem_cache_destroy(backing_req_cache);
+}
diff --git a/drivers/md/dm-pcache/backing_dev.h b/drivers/md/dm-pcache/backing_dev.h
new file mode 100644
index 000000000000..b371cba483b9
--- /dev/null
+++ b/drivers/md/dm-pcache/backing_dev.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _BACKING_DEV_H
+#define _BACKING_DEV_H
+
+#include <linux/device-mapper.h>
+
+#include "pcache_internal.h"
+
+struct pcache_backing_dev_req;
+typedef void (*backing_req_end_fn_t)(struct pcache_backing_dev_req *backing_req, int ret);
+
+#define BACKING_DEV_REQ_TYPE_REQ 1
+#define BACKING_DEV_REQ_TYPE_KMEM 2
+
+#define BACKING_DEV_REQ_INLINE_BVECS 4
+
+struct pcache_request;
+struct pcache_backing_dev_req {
+ u8 type;
+ struct bio bio;
+ struct pcache_backing_dev *backing_dev;
+
+ void *priv_data;
+ backing_req_end_fn_t end_req;
+
+ struct list_head node;
+ int ret;
+
+ union {
+ struct {
+ struct pcache_request *upper_req;
+ u32 bio_off;
+ } req;
+ struct {
+ struct bio_vec inline_bvecs[BACKING_DEV_REQ_INLINE_BVECS];
+ struct bio_vec *bvecs;
+ u32 n_vecs;
+ } kmem;
+ };
+};
+
+struct pcache_backing_dev {
+ struct pcache_cache *cache;
+
+ struct dm_dev *dm_dev;
+ mempool_t req_pool;
+ mempool_t bvec_pool;
+
+ struct list_head submit_list;
+ spinlock_t submit_lock;
+ struct work_struct req_submit_work;
+
+ struct list_head complete_list;
+ spinlock_t complete_lock;
+ struct work_struct req_complete_work;
+
+ atomic_t inflight_reqs;
+ wait_queue_head_t inflight_wq;
+
+ u64 dev_size;
+};
+
+struct dm_pcache;
+int backing_dev_start(struct dm_pcache *pcache);
+void backing_dev_stop(struct dm_pcache *pcache);
+
+struct pcache_backing_dev_req_opts {
+ u32 type;
+ union {
+ struct {
+ struct pcache_request *upper_req;
+ u32 req_off;
+ u32 len;
+ } req;
+ struct {
+ void *data;
+ blk_opf_t opf;
+ u32 len;
+ u64 backing_off;
+ } kmem;
+ };
+
+ gfp_t gfp_mask;
+ backing_req_end_fn_t end_fn;
+ void *priv_data;
+};
+
+static inline u32 backing_dev_req_coalesced_max_len(const void *data, u32 len)
+{
+ const void *p = data;
+ u32 done = 0, in_page, to_advance;
+ struct page *first_page, *next_page;
+
+ if (!is_vmalloc_addr(data))
+ return len;
+
+ first_page = vmalloc_to_page(p);
+advance:
+ in_page = PAGE_SIZE - offset_in_page(p);
+ to_advance = min_t(u32, in_page, len - done);
+
+ done += to_advance;
+ p += to_advance;
+
+ if (done == len)
+ return done;
+
+ next_page = vmalloc_to_page(p);
+ if (zone_device_pages_have_same_pgmap(first_page, next_page))
+ goto advance;
+
+ return done;
+}
+
+void backing_dev_req_submit(struct pcache_backing_dev_req *backing_req, bool direct);
+void backing_dev_req_end(struct pcache_backing_dev_req *backing_req);
+struct pcache_backing_dev_req *backing_dev_req_create(struct pcache_backing_dev *backing_dev,
+ struct pcache_backing_dev_req_opts *opts);
+struct pcache_backing_dev_req *backing_dev_req_alloc(struct pcache_backing_dev *backing_dev,
+ struct pcache_backing_dev_req_opts *opts);
+void backing_dev_req_init(struct pcache_backing_dev_req *backing_req,
+ struct pcache_backing_dev_req_opts *opts);
+void backing_dev_flush(struct pcache_backing_dev *backing_dev);
+
+int pcache_backing_init(void);
+void pcache_backing_exit(void);
+#endif /* _BACKING_DEV_H */
diff --git a/drivers/md/dm-pcache/cache.c b/drivers/md/dm-pcache/cache.c
new file mode 100644
index 000000000000..d8e92367d947
--- /dev/null
+++ b/drivers/md/dm-pcache/cache.c
@@ -0,0 +1,445 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include <linux/blk_types.h>
+
+#include "cache.h"
+#include "cache_dev.h"
+#include "backing_dev.h"
+#include "dm_pcache.h"
+
+struct kmem_cache *key_cache;
+
+static inline struct pcache_cache_info *get_cache_info_addr(struct pcache_cache *cache)
+{
+ return cache->cache_info_addr + cache->info_index;
+}
+
+static void cache_info_write(struct pcache_cache *cache)
+{
+ struct pcache_cache_info *cache_info = &cache->cache_info;
+
+ cache_info->header.seq++;
+ cache_info->header.crc = pcache_meta_crc(&cache_info->header,
+ sizeof(struct pcache_cache_info));
+
+ memcpy_flushcache(get_cache_info_addr(cache), cache_info,
+ sizeof(struct pcache_cache_info));
+
+ cache->info_index = (cache->info_index + 1) % PCACHE_META_INDEX_MAX;
+}
+
+static void cache_info_init_default(struct pcache_cache *cache);
+static int cache_info_init(struct pcache_cache *cache, struct pcache_cache_options *opts)
+{
+ struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
+ struct pcache_cache_info *cache_info_addr;
+
+ cache_info_addr = pcache_meta_find_latest(&cache->cache_info_addr->header,
+ sizeof(struct pcache_cache_info),
+ PCACHE_CACHE_INFO_SIZE,
+ &cache->cache_info);
+ if (IS_ERR(cache_info_addr))
+ return PTR_ERR(cache_info_addr);
+
+ if (cache_info_addr) {
+ if (opts->data_crc !=
+ (cache->cache_info.flags & PCACHE_CACHE_FLAGS_DATA_CRC)) {
+ pcache_dev_err(pcache, "invalid option for data_crc: %s, expected: %s",
+ opts->data_crc ? "true" : "false",
+ cache->cache_info.flags & PCACHE_CACHE_FLAGS_DATA_CRC ? "true" : "false");
+ return -EINVAL;
+ }
+
+ return 0;
+ }
+
+ /* init cache_info for new cache */
+ cache_info_init_default(cache);
+ cache_mode_set(cache, opts->cache_mode);
+ if (opts->data_crc)
+ cache->cache_info.flags |= PCACHE_CACHE_FLAGS_DATA_CRC;
+
+ return 0;
+}
+
+static void cache_info_set_gc_percent(struct pcache_cache_info *cache_info, u8 percent)
+{
+ cache_info->flags &= ~PCACHE_CACHE_FLAGS_GC_PERCENT_MASK;
+ cache_info->flags |= FIELD_PREP(PCACHE_CACHE_FLAGS_GC_PERCENT_MASK, percent);
+}
+
+int pcache_cache_set_gc_percent(struct pcache_cache *cache, u8 percent)
+{
+ if (percent > PCACHE_CACHE_GC_PERCENT_MAX || percent < PCACHE_CACHE_GC_PERCENT_MIN)
+ return -EINVAL;
+
+ mutex_lock(&cache->cache_info_lock);
+ cache_info_set_gc_percent(&cache->cache_info, percent);
+
+ cache_info_write(cache);
+ mutex_unlock(&cache->cache_info_lock);
+
+ return 0;
+}
+
+void cache_pos_encode(struct pcache_cache *cache,
+ struct pcache_cache_pos_onmedia *pos_onmedia_base,
+ struct pcache_cache_pos *pos, u64 seq, u32 *index)
+{
+ struct pcache_cache_pos_onmedia pos_onmedia;
+ struct pcache_cache_pos_onmedia *pos_onmedia_addr = pos_onmedia_base + *index;
+
+ pos_onmedia.cache_seg_id = pos->cache_seg->cache_seg_id;
+ pos_onmedia.seg_off = pos->seg_off;
+ pos_onmedia.header.seq = seq;
+ pos_onmedia.header.crc = cache_pos_onmedia_crc(&pos_onmedia);
+
+ memcpy_flushcache(pos_onmedia_addr, &pos_onmedia, sizeof(struct pcache_cache_pos_onmedia));
+ pmem_wmb();
+
+ *index = (*index + 1) % PCACHE_META_INDEX_MAX;
+}
+
+int cache_pos_decode(struct pcache_cache *cache,
+ struct pcache_cache_pos_onmedia *pos_onmedia,
+ struct pcache_cache_pos *pos, u64 *seq, u32 *index)
+{
+ struct pcache_cache_pos_onmedia latest, *latest_addr;
+
+ latest_addr = pcache_meta_find_latest(&pos_onmedia->header,
+ sizeof(struct pcache_cache_pos_onmedia),
+ sizeof(struct pcache_cache_pos_onmedia),
+ &latest);
+ if (IS_ERR(latest_addr))
+ return PTR_ERR(latest_addr);
+
+ if (!latest_addr)
+ return -EIO;
+
+ pos->cache_seg = &cache->segments[latest.cache_seg_id];
+ pos->seg_off = latest.seg_off;
+ *seq = latest.header.seq;
+ *index = (latest_addr - pos_onmedia);
+
+ return 0;
+}
+
+static inline void cache_info_set_seg_id(struct pcache_cache *cache, u32 seg_id)
+{
+ cache->cache_info.seg_id = seg_id;
+}
+
+static int cache_init(struct dm_pcache *pcache)
+{
+ struct pcache_cache *cache = &pcache->cache;
+ struct pcache_backing_dev *backing_dev = &pcache->backing_dev;
+ struct pcache_cache_dev *cache_dev = &pcache->cache_dev;
+ int ret;
+
+ cache->segments = kvcalloc(cache_dev->seg_num, sizeof(struct pcache_cache_segment), GFP_KERNEL);
+ if (!cache->segments) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ cache->seg_map = kvcalloc(BITS_TO_LONGS(cache_dev->seg_num), sizeof(unsigned long), GFP_KERNEL);
+ if (!cache->seg_map) {
+ ret = -ENOMEM;
+ goto free_segments;
+ }
+
+ cache->backing_dev = backing_dev;
+ cache->cache_dev = &pcache->cache_dev;
+ cache->n_segs = cache_dev->seg_num;
+ atomic_set(&cache->gc_errors, 0);
+ spin_lock_init(&cache->seg_map_lock);
+ spin_lock_init(&cache->key_head_lock);
+
+ mutex_init(&cache->cache_info_lock);
+ mutex_init(&cache->key_tail_lock);
+ mutex_init(&cache->dirty_tail_lock);
+ mutex_init(&cache->writeback_lock);
+
+ INIT_DELAYED_WORK(&cache->writeback_work, cache_writeback_fn);
+ INIT_DELAYED_WORK(&cache->gc_work, pcache_cache_gc_fn);
+ INIT_WORK(&cache->clean_work, clean_fn);
+
+ return 0;
+
+free_segments:
+ kvfree(cache->segments);
+err:
+ return ret;
+}
+
+static void cache_exit(struct pcache_cache *cache)
+{
+ kvfree(cache->seg_map);
+ kvfree(cache->segments);
+}
+
+static void cache_info_init_default(struct pcache_cache *cache)
+{
+ struct pcache_cache_info *cache_info = &cache->cache_info;
+
+ cache_info->header.seq = 0;
+ cache_info->n_segs = cache->cache_dev->seg_num;
+ cache_info_set_gc_percent(cache_info, PCACHE_CACHE_GC_PERCENT_DEFAULT);
+}
+
+static int cache_tail_init(struct pcache_cache *cache)
+{
+ struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
+ bool new_cache = !(cache->cache_info.flags & PCACHE_CACHE_FLAGS_INIT_DONE);
+
+ if (new_cache) {
+ __set_bit(0, cache->seg_map);
+
+ cache->key_head.cache_seg = &cache->segments[0];
+ cache->key_head.seg_off = 0;
+ cache_pos_copy(&cache->key_tail, &cache->key_head);
+ cache_pos_copy(&cache->dirty_tail, &cache->key_head);
+
+ cache_encode_dirty_tail(cache);
+ cache_encode_key_tail(cache);
+ } else {
+ if (cache_decode_key_tail(cache) || cache_decode_dirty_tail(cache)) {
+ pcache_dev_err(pcache, "Corrupted key tail or dirty tail.\n");
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+static int get_seg_id(struct pcache_cache *cache,
+ struct pcache_cache_segment *prev_cache_seg,
+ bool new_cache, u32 *seg_id)
+{
+ struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
+ struct pcache_cache_dev *cache_dev = cache->cache_dev;
+ int ret;
+
+ if (new_cache) {
+ ret = cache_dev_get_empty_segment_id(cache_dev, seg_id);
+ if (ret) {
+ pcache_dev_err(pcache, "no available segment\n");
+ goto err;
+ }
+
+ if (prev_cache_seg)
+ cache_seg_set_next_seg(prev_cache_seg, *seg_id);
+ else
+ cache_info_set_seg_id(cache, *seg_id);
+ } else {
+ if (prev_cache_seg) {
+ struct pcache_segment_info *prev_seg_info;
+
+ prev_seg_info = &prev_cache_seg->cache_seg_info;
+ if (!segment_info_has_next(prev_seg_info)) {
+ ret = -EFAULT;
+ goto err;
+ }
+ *seg_id = prev_cache_seg->cache_seg_info.next_seg;
+ } else {
+ *seg_id = cache->cache_info.seg_id;
+ }
+ }
+ return 0;
+err:
+ return ret;
+}
+
+static int cache_segs_init(struct pcache_cache *cache)
+{
+ struct pcache_cache_segment *prev_cache_seg = NULL;
+ struct pcache_cache_info *cache_info = &cache->cache_info;
+ bool new_cache = !(cache->cache_info.flags & PCACHE_CACHE_FLAGS_INIT_DONE);
+ u32 seg_id;
+ int ret;
+ u32 i;
+
+ for (i = 0; i < cache_info->n_segs; i++) {
+ ret = get_seg_id(cache, prev_cache_seg, new_cache, &seg_id);
+ if (ret)
+ goto err;
+
+ ret = cache_seg_init(cache, seg_id, i, new_cache);
+ if (ret)
+ goto err;
+
+ prev_cache_seg = &cache->segments[i];
+ }
+ return 0;
+err:
+ return ret;
+}
+
+static int cache_init_req_keys(struct pcache_cache *cache, u32 n_paral)
+{
+ struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
+ u32 n_subtrees;
+ int ret;
+ u32 i, cpu;
+
+ /* Calculate number of cache trees based on the device size */
+ n_subtrees = DIV_ROUND_UP(cache->dev_size << SECTOR_SHIFT, PCACHE_CACHE_SUBTREE_SIZE);
+ ret = cache_tree_init(cache, &cache->req_key_tree, n_subtrees);
+ if (ret)
+ goto err;
+
+ cache->n_ksets = n_paral;
+ cache->ksets = kvcalloc(cache->n_ksets, PCACHE_KSET_SIZE, GFP_KERNEL);
+ if (!cache->ksets) {
+ ret = -ENOMEM;
+ goto req_tree_exit;
+ }
+
+ /*
+ * Initialize each kset with a spinlock and delayed work for flushing.
+ * Each kset is associated with one queue to ensure independent handling
+ * of cache keys across multiple queues, maximizing multiqueue concurrency.
+ */
+ for (i = 0; i < cache->n_ksets; i++) {
+ struct pcache_cache_kset *kset = get_kset(cache, i);
+
+ kset->cache = cache;
+ spin_lock_init(&kset->kset_lock);
+ INIT_DELAYED_WORK(&kset->flush_work, kset_flush_fn);
+ }
+
+ cache->data_heads = alloc_percpu(struct pcache_cache_data_head);
+ if (!cache->data_heads) {
+ ret = -ENOMEM;
+ goto free_kset;
+ }
+
+ for_each_possible_cpu(cpu) {
+ struct pcache_cache_data_head *h =
+ per_cpu_ptr(cache->data_heads, cpu);
+ h->head_pos.cache_seg = NULL;
+ }
+
+ /*
+ * Replay persisted cache keys using cache_replay.
+ * This function loads and replays cache keys from previously stored
+ * ksets, allowing the cache to restore its state after a restart.
+ */
+ ret = cache_replay(cache);
+ if (ret) {
+ pcache_dev_err(pcache, "failed to replay keys\n");
+ goto free_heads;
+ }
+
+ return 0;
+
+free_heads:
+ free_percpu(cache->data_heads);
+free_kset:
+ kvfree(cache->ksets);
+req_tree_exit:
+ cache_tree_exit(&cache->req_key_tree);
+err:
+ return ret;
+}
+
+static void cache_destroy_req_keys(struct pcache_cache *cache)
+{
+ u32 i;
+
+ for (i = 0; i < cache->n_ksets; i++) {
+ struct pcache_cache_kset *kset = get_kset(cache, i);
+
+ cancel_delayed_work_sync(&kset->flush_work);
+ }
+
+ free_percpu(cache->data_heads);
+ kvfree(cache->ksets);
+ cache_tree_exit(&cache->req_key_tree);
+}
+
+int pcache_cache_start(struct dm_pcache *pcache)
+{
+ struct pcache_backing_dev *backing_dev = &pcache->backing_dev;
+ struct pcache_cache *cache = &pcache->cache;
+ struct pcache_cache_options *opts = &pcache->opts;
+ int ret;
+
+ ret = cache_init(pcache);
+ if (ret)
+ return ret;
+
+ cache->cache_info_addr = CACHE_DEV_CACHE_INFO(cache->cache_dev);
+ cache->cache_ctrl = CACHE_DEV_CACHE_CTRL(cache->cache_dev);
+ backing_dev->cache = cache;
+ cache->dev_size = backing_dev->dev_size;
+
+ ret = cache_info_init(cache, opts);
+ if (ret)
+ goto cache_exit;
+
+ ret = cache_segs_init(cache);
+ if (ret)
+ goto cache_exit;
+
+ ret = cache_tail_init(cache);
+ if (ret)
+ goto cache_exit;
+
+ ret = cache_init_req_keys(cache, num_online_cpus());
+ if (ret)
+ goto cache_exit;
+
+ ret = cache_writeback_init(cache);
+ if (ret)
+ goto destroy_keys;
+
+ cache->cache_info.flags |= PCACHE_CACHE_FLAGS_INIT_DONE;
+ cache_info_write(cache);
+ queue_delayed_work(cache_get_wq(cache), &cache->gc_work, 0);
+
+ return 0;
+
+destroy_keys:
+ cache_destroy_req_keys(cache);
+cache_exit:
+ cache_exit(cache);
+
+ return ret;
+}
+
+void pcache_cache_stop(struct dm_pcache *pcache)
+{
+ struct pcache_cache *cache = &pcache->cache;
+
+ cache_flush(cache);
+
+ cancel_delayed_work_sync(&cache->gc_work);
+ flush_work(&cache->clean_work);
+ cache_writeback_exit(cache);
+
+ if (cache->req_key_tree.n_subtrees)
+ cache_destroy_req_keys(cache);
+
+ cache_exit(cache);
+}
+
+struct workqueue_struct *cache_get_wq(struct pcache_cache *cache)
+{
+ struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
+
+ return pcache->task_wq;
+}
+
+int pcache_cache_init(void)
+{
+ key_cache = KMEM_CACHE(pcache_cache_key, 0);
+ if (!key_cache)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void pcache_cache_exit(void)
+{
+ kmem_cache_destroy(key_cache);
+}
diff --git a/drivers/md/dm-pcache/cache.h b/drivers/md/dm-pcache/cache.h
new file mode 100644
index 000000000000..1136d86958c8
--- /dev/null
+++ b/drivers/md/dm-pcache/cache.h
@@ -0,0 +1,635 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _PCACHE_CACHE_H
+#define _PCACHE_CACHE_H
+
+#include "segment.h"
+
+/* Garbage collection thresholds */
+#define PCACHE_CACHE_GC_PERCENT_MIN 0 /* Minimum GC percentage */
+#define PCACHE_CACHE_GC_PERCENT_MAX 90 /* Maximum GC percentage */
+#define PCACHE_CACHE_GC_PERCENT_DEFAULT 70 /* Default GC percentage */
+
+#define PCACHE_CACHE_SUBTREE_SIZE (4 * PCACHE_MB) /* 4MB total tree size */
+#define PCACHE_CACHE_SUBTREE_SIZE_MASK 0x3FFFFF /* Mask for tree size */
+#define PCACHE_CACHE_SUBTREE_SIZE_SHIFT 22 /* Bit shift for tree size */
+
+/* Maximum number of keys per key set */
+#define PCACHE_KSET_KEYS_MAX 128
+#define PCACHE_CACHE_SEGS_MAX (1024 * 1024) /* maximum cache size for each device is 16T */
+#define PCACHE_KSET_ONMEDIA_SIZE_MAX struct_size_t(struct pcache_cache_kset_onmedia, data, PCACHE_KSET_KEYS_MAX)
+#define PCACHE_KSET_SIZE (sizeof(struct pcache_cache_kset) + sizeof(struct pcache_cache_key_onmedia) * PCACHE_KSET_KEYS_MAX)
+
+/* Maximum number of keys to clean in one round of clean_work */
+#define PCACHE_CLEAN_KEYS_MAX 10
+
+/* Writeback and garbage collection intervals in jiffies */
+#define PCACHE_CACHE_WRITEBACK_INTERVAL (5 * HZ)
+#define PCACHE_CACHE_GC_INTERVAL (5 * HZ)
+
+/* Macro to get the cache key structure from an rb_node pointer */
+#define CACHE_KEY(node) (container_of(node, struct pcache_cache_key, rb_node))
+
+struct pcache_cache_pos_onmedia {
+ struct pcache_meta_header header;
+ __u32 cache_seg_id;
+ __u32 seg_off;
+};
+
+/* Offset and size definitions for cache segment control */
+#define PCACHE_CACHE_SEG_CTRL_OFF (PCACHE_SEG_INFO_SIZE * PCACHE_META_INDEX_MAX)
+#define PCACHE_CACHE_SEG_CTRL_SIZE (4 * PCACHE_KB)
+
+struct pcache_cache_seg_gen {
+ struct pcache_meta_header header;
+ __u64 gen;
+};
+
+/* Control structure for cache segments */
+struct pcache_cache_seg_ctrl {
+ struct pcache_cache_seg_gen gen[PCACHE_META_INDEX_MAX];
+ __u64 res[64];
+};
+
+#define PCACHE_CACHE_FLAGS_DATA_CRC BIT(0)
+#define PCACHE_CACHE_FLAGS_INIT_DONE BIT(1)
+
+#define PCACHE_CACHE_FLAGS_CACHE_MODE_MASK GENMASK(5, 2)
+#define PCACHE_CACHE_MODE_WRITEBACK 0
+#define PCACHE_CACHE_MODE_WRITETHROUGH 1
+#define PCACHE_CACHE_MODE_WRITEAROUND 2
+#define PCACHE_CACHE_MODE_WRITEONLY 3
+
+#define PCACHE_CACHE_FLAGS_GC_PERCENT_MASK GENMASK(12, 6)
+
+struct pcache_cache_info {
+ struct pcache_meta_header header;
+ __u32 seg_id;
+ __u32 n_segs;
+ __u32 flags;
+ __u32 reserved;
+};
+
+struct pcache_cache_pos {
+ struct pcache_cache_segment *cache_seg;
+ u32 seg_off;
+};
+
+struct pcache_cache_segment {
+ struct pcache_cache *cache;
+ u32 cache_seg_id; /* Index in cache->segments */
+ struct pcache_segment segment;
+ atomic_t refs;
+
+ struct pcache_segment_info cache_seg_info;
+ struct mutex info_lock;
+ u32 info_index;
+
+ spinlock_t gen_lock;
+ u64 gen;
+ u64 gen_seq;
+ u32 gen_index;
+
+ struct pcache_cache_seg_ctrl *cache_seg_ctrl;
+};
+
+/* rbtree for cache entries */
+struct pcache_cache_subtree {
+ struct rb_root root;
+ spinlock_t tree_lock;
+};
+
+struct pcache_cache_tree {
+ struct pcache_cache *cache;
+ u32 n_subtrees;
+ mempool_t key_pool;
+ struct pcache_cache_subtree *subtrees;
+};
+
+extern struct kmem_cache *key_cache;
+
+struct pcache_cache_key {
+ struct pcache_cache_tree *cache_tree;
+ struct pcache_cache_subtree *cache_subtree;
+ struct kref ref;
+ struct rb_node rb_node;
+ struct list_head list_node;
+ u64 off;
+ u32 len;
+ u32 flags;
+ struct pcache_cache_pos cache_pos;
+ u64 seg_gen;
+};
+
+#define PCACHE_CACHE_KEY_FLAGS_EMPTY BIT(0)
+#define PCACHE_CACHE_KEY_FLAGS_CLEAN BIT(1)
+
+struct pcache_cache_key_onmedia {
+ __u64 off;
+ __u32 len;
+ __u32 flags;
+ __u32 cache_seg_id;
+ __u32 cache_seg_off;
+ __u64 seg_gen;
+ __u32 data_crc;
+ __u32 reserved;
+};
+
+struct pcache_cache_kset_onmedia {
+ __u32 crc;
+ union {
+ __u32 key_num;
+ __u32 next_cache_seg_id;
+ };
+ __u64 magic;
+ __u64 flags;
+ struct pcache_cache_key_onmedia data[];
+};
+
+struct pcache_cache {
+ struct pcache_backing_dev *backing_dev;
+ struct pcache_cache_dev *cache_dev;
+ struct pcache_cache_ctrl *cache_ctrl;
+ u64 dev_size;
+
+ struct pcache_cache_data_head __percpu *data_heads;
+
+ spinlock_t key_head_lock;
+ struct pcache_cache_pos key_head;
+ u32 n_ksets;
+ struct pcache_cache_kset *ksets;
+
+ struct mutex key_tail_lock;
+ struct pcache_cache_pos key_tail;
+ u64 key_tail_seq;
+ u32 key_tail_index;
+
+ struct mutex dirty_tail_lock;
+ struct pcache_cache_pos dirty_tail;
+ u64 dirty_tail_seq;
+ u32 dirty_tail_index;
+
+ struct pcache_cache_tree req_key_tree;
+ struct work_struct clean_work;
+
+ struct mutex writeback_lock;
+ char wb_kset_onmedia_buf[PCACHE_KSET_ONMEDIA_SIZE_MAX];
+ struct pcache_cache_tree writeback_key_tree;
+ struct delayed_work writeback_work;
+ struct {
+ atomic_t pending;
+ u32 advance;
+ int ret;
+ } writeback_ctx;
+
+ char gc_kset_onmedia_buf[PCACHE_KSET_ONMEDIA_SIZE_MAX];
+ struct delayed_work gc_work;
+ atomic_t gc_errors;
+
+ struct mutex cache_info_lock;
+ struct pcache_cache_info cache_info;
+ struct pcache_cache_info *cache_info_addr;
+ u32 info_index;
+
+ u32 n_segs;
+ unsigned long *seg_map;
+ u32 last_cache_seg;
+ bool cache_full;
+ spinlock_t seg_map_lock;
+ struct pcache_cache_segment *segments;
+};
+
+struct workqueue_struct *cache_get_wq(struct pcache_cache *cache);
+
+struct dm_pcache;
+struct pcache_cache_options {
+ u32 cache_mode:4;
+ u32 data_crc:1;
+};
+int pcache_cache_start(struct dm_pcache *pcache);
+void pcache_cache_stop(struct dm_pcache *pcache);
+
+struct pcache_cache_ctrl {
+ /* Updated by gc_thread */
+ struct pcache_cache_pos_onmedia key_tail_pos[PCACHE_META_INDEX_MAX];
+
+ /* Updated by writeback_thread */
+ struct pcache_cache_pos_onmedia dirty_tail_pos[PCACHE_META_INDEX_MAX];
+};
+
+struct pcache_cache_data_head {
+ struct pcache_cache_pos head_pos;
+};
+
+static inline u16 pcache_cache_get_gc_percent(struct pcache_cache *cache)
+{
+ return FIELD_GET(PCACHE_CACHE_FLAGS_GC_PERCENT_MASK, cache->cache_info.flags);
+}
+
+int pcache_cache_set_gc_percent(struct pcache_cache *cache, u8 percent);
+
+/* cache key */
+struct pcache_cache_key *cache_key_alloc(struct pcache_cache_tree *cache_tree, gfp_t gfp_mask);
+void cache_key_init(struct pcache_cache_tree *cache_tree, struct pcache_cache_key *key);
+void cache_key_get(struct pcache_cache_key *key);
+void cache_key_put(struct pcache_cache_key *key);
+int cache_key_append(struct pcache_cache *cache, struct pcache_cache_key *key, bool force_close);
+void cache_key_insert(struct pcache_cache_tree *cache_tree, struct pcache_cache_key *key, bool fixup);
+int cache_key_decode(struct pcache_cache *cache,
+ struct pcache_cache_key_onmedia *key_onmedia,
+ struct pcache_cache_key *key);
+void cache_pos_advance(struct pcache_cache_pos *pos, u32 len);
+
+#define PCACHE_KSET_FLAGS_LAST BIT(0)
+#define PCACHE_KSET_MAGIC 0x676894a64e164f1aULL
+
+struct pcache_cache_kset {
+ struct pcache_cache *cache;
+ spinlock_t kset_lock;
+ struct delayed_work flush_work;
+ struct pcache_cache_kset_onmedia kset_onmedia;
+};
+
+extern struct pcache_cache_kset_onmedia pcache_empty_kset;
+
+#define SUBTREE_WALK_RET_OK 0
+#define SUBTREE_WALK_RET_ERR 1
+#define SUBTREE_WALK_RET_NEED_KEY 2
+#define SUBTREE_WALK_RET_NEED_REQ 3
+#define SUBTREE_WALK_RET_RESEARCH 4
+
+struct pcache_cache_subtree_walk_ctx {
+ struct pcache_cache_tree *cache_tree;
+ struct rb_node *start_node;
+ struct pcache_request *pcache_req;
+ struct pcache_cache_key *key;
+ u32 req_done;
+ int ret;
+
+ /* pre-allocated key and backing_dev_req */
+ struct pcache_cache_key *pre_alloc_key;
+ struct pcache_backing_dev_req *pre_alloc_req;
+
+ struct list_head *delete_key_list;
+ struct list_head *submit_req_list;
+
+ /*
+ * |--------| key_tmp
+ * |====| key
+ */
+ int (*before)(struct pcache_cache_key *key, struct pcache_cache_key *key_tmp,
+ struct pcache_cache_subtree_walk_ctx *ctx);
+
+ /*
+ * |----------| key_tmp
+ * |=====| key
+ */
+ int (*after)(struct pcache_cache_key *key, struct pcache_cache_key *key_tmp,
+ struct pcache_cache_subtree_walk_ctx *ctx);
+
+ /*
+ * |----------------| key_tmp
+ * |===========| key
+ */
+ int (*overlap_tail)(struct pcache_cache_key *key, struct pcache_cache_key *key_tmp,
+ struct pcache_cache_subtree_walk_ctx *ctx);
+
+ /*
+ * |--------| key_tmp
+ * |==========| key
+ */
+ int (*overlap_head)(struct pcache_cache_key *key, struct pcache_cache_key *key_tmp,
+ struct pcache_cache_subtree_walk_ctx *ctx);
+
+ /*
+ * |----| key_tmp
+ * |==========| key
+ */
+ int (*overlap_contain)(struct pcache_cache_key *key, struct pcache_cache_key *key_tmp,
+ struct pcache_cache_subtree_walk_ctx *ctx);
+
+ /*
+ * |-----------| key_tmp
+ * |====| key
+ */
+ int (*overlap_contained)(struct pcache_cache_key *key, struct pcache_cache_key *key_tmp,
+ struct pcache_cache_subtree_walk_ctx *ctx);
+
+ int (*walk_finally)(struct pcache_cache_subtree_walk_ctx *ctx, int ret);
+ bool (*walk_done)(struct pcache_cache_subtree_walk_ctx *ctx);
+};
+
+int cache_subtree_walk(struct pcache_cache_subtree_walk_ctx *ctx);
+struct rb_node *cache_subtree_search(struct pcache_cache_subtree *cache_subtree, struct pcache_cache_key *key,
+ struct rb_node **parentp, struct rb_node ***newp,
+ struct list_head *delete_key_list);
+int cache_kset_close(struct pcache_cache *cache, struct pcache_cache_kset *kset);
+void clean_fn(struct work_struct *work);
+void kset_flush_fn(struct work_struct *work);
+int cache_replay(struct pcache_cache *cache);
+int cache_tree_init(struct pcache_cache *cache, struct pcache_cache_tree *cache_tree, u32 n_subtrees);
+void cache_tree_clear(struct pcache_cache_tree *cache_tree);
+void cache_tree_exit(struct pcache_cache_tree *cache_tree);
+
+/* cache segments */
+struct pcache_cache_segment *get_cache_segment(struct pcache_cache *cache);
+int cache_seg_init(struct pcache_cache *cache, u32 seg_id, u32 cache_seg_id,
+ bool new_cache);
+void cache_seg_get(struct pcache_cache_segment *cache_seg);
+void cache_seg_put(struct pcache_cache_segment *cache_seg);
+void cache_seg_set_next_seg(struct pcache_cache_segment *cache_seg, u32 seg_id);
+
+/* cache request*/
+int cache_flush(struct pcache_cache *cache);
+void miss_read_end_work_fn(struct work_struct *work);
+int pcache_cache_handle_req(struct pcache_cache *cache, struct pcache_request *pcache_req);
+
+/* gc */
+void pcache_cache_gc_fn(struct work_struct *work);
+
+/* writeback */
+void cache_writeback_exit(struct pcache_cache *cache);
+int cache_writeback_init(struct pcache_cache *cache);
+void cache_writeback_fn(struct work_struct *work);
+
+/* inline functions */
+static inline struct pcache_cache_subtree *get_subtree(struct pcache_cache_tree *cache_tree, u64 off)
+{
+ if (cache_tree->n_subtrees == 1)
+ return &cache_tree->subtrees[0];
+
+ return &cache_tree->subtrees[off >> PCACHE_CACHE_SUBTREE_SIZE_SHIFT];
+}
+
+static inline void *cache_pos_addr(struct pcache_cache_pos *pos)
+{
+ return (pos->cache_seg->segment.data + pos->seg_off);
+}
+
+static inline void *get_key_head_addr(struct pcache_cache *cache)
+{
+ return cache_pos_addr(&cache->key_head);
+}
+
+static inline u32 get_kset_id(struct pcache_cache *cache, u64 off)
+{
+ u32 kset_id;
+
+ div_u64_rem(off >> PCACHE_CACHE_SUBTREE_SIZE_SHIFT, cache->n_ksets, &kset_id);
+
+ return kset_id;
+}
+
+static inline struct pcache_cache_kset *get_kset(struct pcache_cache *cache, u32 kset_id)
+{
+ return (void *)cache->ksets + PCACHE_KSET_SIZE * kset_id;
+}
+
+static inline struct pcache_cache_data_head *get_data_head(struct pcache_cache *cache)
+{
+ return this_cpu_ptr(cache->data_heads);
+}
+
+static inline bool cache_key_empty(struct pcache_cache_key *key)
+{
+ return key->flags & PCACHE_CACHE_KEY_FLAGS_EMPTY;
+}
+
+static inline bool cache_key_clean(struct pcache_cache_key *key)
+{
+ return key->flags & PCACHE_CACHE_KEY_FLAGS_CLEAN;
+}
+
+static inline void cache_pos_copy(struct pcache_cache_pos *dst, struct pcache_cache_pos *src)
+{
+ memcpy(dst, src, sizeof(struct pcache_cache_pos));
+}
+
+/**
+ * cache_seg_is_ctrl_seg - Checks if a cache segment is a cache ctrl segment.
+ * @cache_seg_id: ID of the cache segment.
+ *
+ * Returns true if the cache segment ID corresponds to a cache ctrl segment.
+ *
+ * Note: We extend the segment control of the first cache segment
+ * (cache segment ID 0) to serve as the cache control (pcache_cache_ctrl)
+ * for the entire PCACHE cache. This function determines whether the given
+ * cache segment is the one storing the pcache_cache_ctrl information.
+ */
+static inline bool cache_seg_is_ctrl_seg(u32 cache_seg_id)
+{
+ return (cache_seg_id == 0);
+}
+
+/**
+ * cache_key_cutfront - Cuts a specified length from the front of a cache key.
+ * @key: Pointer to pcache_cache_key structure.
+ * @cut_len: Length to cut from the front.
+ *
+ * Advances the cache key position by cut_len and adjusts offset and length accordingly.
+ */
+static inline void cache_key_cutfront(struct pcache_cache_key *key, u32 cut_len)
+{
+ if (key->cache_pos.cache_seg)
+ cache_pos_advance(&key->cache_pos, cut_len);
+
+ key->off += cut_len;
+ key->len -= cut_len;
+}
+
+/**
+ * cache_key_cutback - Cuts a specified length from the back of a cache key.
+ * @key: Pointer to pcache_cache_key structure.
+ * @cut_len: Length to cut from the back.
+ *
+ * Reduces the length of the cache key by cut_len.
+ */
+static inline void cache_key_cutback(struct pcache_cache_key *key, u32 cut_len)
+{
+ key->len -= cut_len;
+}
+
+static inline void cache_key_delete(struct pcache_cache_key *key)
+{
+ struct pcache_cache_subtree *cache_subtree;
+
+ cache_subtree = key->cache_subtree;
+ BUG_ON(!cache_subtree);
+
+ rb_erase(&key->rb_node, &cache_subtree->root);
+ key->flags = 0;
+ cache_key_put(key);
+}
+
+static inline bool cache_data_crc_on(struct pcache_cache *cache)
+{
+ return (cache->cache_info.flags & PCACHE_CACHE_FLAGS_DATA_CRC);
+}
+
+static inline u32 cache_mode_get(struct pcache_cache *cache)
+{
+ return FIELD_GET(PCACHE_CACHE_FLAGS_CACHE_MODE_MASK, cache->cache_info.flags);
+}
+
+static inline void cache_mode_set(struct pcache_cache *cache, u32 cache_mode)
+{
+ cache->cache_info.flags &= ~PCACHE_CACHE_FLAGS_CACHE_MODE_MASK;
+ cache->cache_info.flags |= FIELD_PREP(PCACHE_CACHE_FLAGS_CACHE_MODE_MASK, cache_mode);
+}
+
+/**
+ * cache_key_data_crc - Calculates CRC for data in a cache key.
+ * @key: Pointer to the pcache_cache_key structure.
+ *
+ * Returns the CRC-32 checksum of the data within the cache key's position.
+ */
+static inline u32 cache_key_data_crc(struct pcache_cache_key *key)
+{
+ void *data;
+
+ data = cache_pos_addr(&key->cache_pos);
+
+ return crc32c(PCACHE_CRC_SEED, data, key->len);
+}
+
+static inline u32 cache_kset_crc(struct pcache_cache_kset_onmedia *kset_onmedia)
+{
+ u32 crc_size;
+
+ if (kset_onmedia->flags & PCACHE_KSET_FLAGS_LAST)
+ crc_size = sizeof(struct pcache_cache_kset_onmedia) - 4;
+ else
+ crc_size = struct_size(kset_onmedia, data, kset_onmedia->key_num) - 4;
+
+ return crc32c(PCACHE_CRC_SEED, (void *)kset_onmedia + 4, crc_size);
+}
+
+static inline u32 get_kset_onmedia_size(struct pcache_cache_kset_onmedia *kset_onmedia)
+{
+ return struct_size_t(struct pcache_cache_kset_onmedia, data, kset_onmedia->key_num);
+}
+
+/**
+ * cache_seg_remain - Computes remaining space in a cache segment.
+ * @pos: Pointer to pcache_cache_pos structure.
+ *
+ * Returns the amount of remaining space in the segment data starting from
+ * the current position offset.
+ */
+static inline u32 cache_seg_remain(struct pcache_cache_pos *pos)
+{
+ struct pcache_cache_segment *cache_seg;
+ struct pcache_segment *segment;
+ u32 seg_remain;
+
+ cache_seg = pos->cache_seg;
+ segment = &cache_seg->segment;
+ seg_remain = segment->data_size - pos->seg_off;
+
+ return seg_remain;
+}
+
+/**
+ * cache_key_invalid - Checks if a cache key is invalid.
+ * @key: Pointer to pcache_cache_key structure.
+ *
+ * Returns true if the cache key is invalid due to its generation being
+ * less than the generation of its segment; otherwise returns false.
+ *
+ * When the GC (garbage collection) thread identifies a segment
+ * as reclaimable, it increments the segment's generation (gen). However,
+ * it does not immediately remove all related cache keys. When accessing
+ * such a cache key, this function can be used to determine if the cache
+ * key has already become invalid.
+ */
+static inline bool cache_key_invalid(struct pcache_cache_key *key)
+{
+ if (cache_key_empty(key))
+ return false;
+
+ return (key->seg_gen < key->cache_pos.cache_seg->gen);
+}
+
+/**
+ * cache_key_lstart - Retrieves the logical start offset of a cache key.
+ * @key: Pointer to pcache_cache_key structure.
+ *
+ * Returns the logical start offset for the cache key.
+ */
+static inline u64 cache_key_lstart(struct pcache_cache_key *key)
+{
+ return key->off;
+}
+
+/**
+ * cache_key_lend - Retrieves the logical end offset of a cache key.
+ * @key: Pointer to pcache_cache_key structure.
+ *
+ * Returns the logical end offset for the cache key.
+ */
+static inline u64 cache_key_lend(struct pcache_cache_key *key)
+{
+ return key->off + key->len;
+}
+
+static inline void cache_key_copy(struct pcache_cache_key *key_dst, struct pcache_cache_key *key_src)
+{
+ key_dst->off = key_src->off;
+ key_dst->len = key_src->len;
+ key_dst->seg_gen = key_src->seg_gen;
+ key_dst->cache_tree = key_src->cache_tree;
+ key_dst->cache_subtree = key_src->cache_subtree;
+ key_dst->flags = key_src->flags;
+
+ cache_pos_copy(&key_dst->cache_pos, &key_src->cache_pos);
+}
+
+/**
+ * cache_pos_onmedia_crc - Calculates the CRC for an on-media cache position.
+ * @pos_om: Pointer to pcache_cache_pos_onmedia structure.
+ *
+ * Calculates the CRC-32 checksum of the position, excluding the first 4 bytes.
+ * Returns the computed CRC value.
+ */
+static inline u32 cache_pos_onmedia_crc(struct pcache_cache_pos_onmedia *pos_om)
+{
+ return pcache_meta_crc(&pos_om->header, sizeof(struct pcache_cache_pos_onmedia));
+}
+
+void cache_pos_encode(struct pcache_cache *cache,
+ struct pcache_cache_pos_onmedia *pos_onmedia,
+ struct pcache_cache_pos *pos, u64 seq, u32 *index);
+int cache_pos_decode(struct pcache_cache *cache,
+ struct pcache_cache_pos_onmedia *pos_onmedia,
+ struct pcache_cache_pos *pos, u64 *seq, u32 *index);
+
+static inline void cache_encode_key_tail(struct pcache_cache *cache)
+{
+ cache_pos_encode(cache, cache->cache_ctrl->key_tail_pos,
+ &cache->key_tail, ++cache->key_tail_seq,
+ &cache->key_tail_index);
+}
+
+static inline int cache_decode_key_tail(struct pcache_cache *cache)
+{
+ return cache_pos_decode(cache, cache->cache_ctrl->key_tail_pos,
+ &cache->key_tail, &cache->key_tail_seq,
+ &cache->key_tail_index);
+}
+
+static inline void cache_encode_dirty_tail(struct pcache_cache *cache)
+{
+ cache_pos_encode(cache, cache->cache_ctrl->dirty_tail_pos,
+ &cache->dirty_tail, ++cache->dirty_tail_seq,
+ &cache->dirty_tail_index);
+}
+
+static inline int cache_decode_dirty_tail(struct pcache_cache *cache)
+{
+ return cache_pos_decode(cache, cache->cache_ctrl->dirty_tail_pos,
+ &cache->dirty_tail, &cache->dirty_tail_seq,
+ &cache->dirty_tail_index);
+}
+
+int pcache_cache_init(void);
+void pcache_cache_exit(void);
+#endif /* _PCACHE_CACHE_H */
diff --git a/drivers/md/dm-pcache/cache_dev.c b/drivers/md/dm-pcache/cache_dev.c
new file mode 100644
index 000000000000..ece689e6ce59
--- /dev/null
+++ b/drivers/md/dm-pcache/cache_dev.c
@@ -0,0 +1,303 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/blkdev.h>
+#include <linux/dax.h>
+#include <linux/vmalloc.h>
+#include <linux/parser.h>
+
+#include "cache_dev.h"
+#include "backing_dev.h"
+#include "cache.h"
+#include "dm_pcache.h"
+
+static void cache_dev_dax_exit(struct pcache_cache_dev *cache_dev)
+{
+ if (cache_dev->use_vmap)
+ vunmap(cache_dev->mapping);
+}
+
+static int build_vmap(struct dax_device *dax_dev, long total_pages, void **vaddr)
+{
+ struct page **pages;
+ long i = 0, chunk;
+ unsigned long pfn;
+ int ret;
+
+ pages = vmalloc_array(total_pages, sizeof(struct page *));
+ if (!pages)
+ return -ENOMEM;
+
+ do {
+ chunk = dax_direct_access(dax_dev, i, total_pages - i,
+ DAX_ACCESS, NULL, &pfn);
+ if (chunk <= 0) {
+ ret = chunk ? chunk : -EINVAL;
+ goto out_free;
+ }
+
+ if (!pfn_valid(pfn)) {
+ ret = -EOPNOTSUPP;
+ goto out_free;
+ }
+
+ while (chunk-- && i < total_pages) {
+ pages[i++] = pfn_to_page(pfn);
+ pfn++;
+ if (!(i & 15))
+ cond_resched();
+ }
+ } while (i < total_pages);
+
+ *vaddr = vmap(pages, total_pages, VM_MAP, PAGE_KERNEL);
+ if (!*vaddr) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ ret = 0;
+
+out_free:
+ vfree(pages);
+ return ret;
+}
+
+static int cache_dev_dax_init(struct pcache_cache_dev *cache_dev)
+{
+ struct dm_pcache *pcache = CACHE_DEV_TO_PCACHE(cache_dev);
+ struct dax_device *dax_dev;
+ long total_pages, mapped_pages;
+ u64 bdev_size;
+ void *vaddr;
+ int ret;
+ int id;
+ unsigned long pfn;
+
+ dax_dev = cache_dev->dm_dev->dax_dev;
+ /* total size check */
+ bdev_size = bdev_nr_bytes(cache_dev->dm_dev->bdev);
+ if (bdev_size < PCACHE_CACHE_DEV_SIZE_MIN) {
+ pcache_dev_err(pcache, "dax device is too small, required at least %llu",
+ PCACHE_CACHE_DEV_SIZE_MIN);
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ total_pages = bdev_size >> PAGE_SHIFT;
+ /* attempt: direct-map the whole range */
+ id = dax_read_lock();
+ mapped_pages = dax_direct_access(dax_dev, 0, total_pages,
+ DAX_ACCESS, &vaddr, &pfn);
+ if (mapped_pages < 0) {
+ pcache_dev_err(pcache, "dax_direct_access failed: %ld\n", mapped_pages);
+ ret = mapped_pages;
+ goto unlock;
+ }
+
+ if (!pfn_valid(pfn)) {
+ ret = -EOPNOTSUPP;
+ goto unlock;
+ }
+
+ if (mapped_pages == total_pages) {
+ /* success: contiguous direct mapping */
+ cache_dev->mapping = vaddr;
+ } else {
+ /* need vmap fallback */
+ ret = build_vmap(dax_dev, total_pages, &vaddr);
+ if (ret) {
+ pcache_dev_err(pcache, "vmap fallback failed: %d\n", ret);
+ goto unlock;
+ }
+
+ cache_dev->mapping = vaddr;
+ cache_dev->use_vmap = true;
+ }
+ dax_read_unlock(id);
+
+ return 0;
+unlock:
+ dax_read_unlock(id);
+out:
+ return ret;
+}
+
+void cache_dev_zero_range(struct pcache_cache_dev *cache_dev, void *pos, u32 size)
+{
+ memset(pos, 0, size);
+ dax_flush(cache_dev->dm_dev->dax_dev, pos, size);
+}
+
+static int sb_read(struct pcache_cache_dev *cache_dev, struct pcache_sb *sb)
+{
+ struct pcache_sb *sb_addr = CACHE_DEV_SB(cache_dev);
+
+ if (copy_mc_to_kernel(sb, sb_addr, sizeof(struct pcache_sb)))
+ return -EIO;
+
+ return 0;
+}
+
+static void sb_write(struct pcache_cache_dev *cache_dev, struct pcache_sb *sb)
+{
+ struct pcache_sb *sb_addr = CACHE_DEV_SB(cache_dev);
+
+ memcpy_flushcache(sb_addr, sb, sizeof(struct pcache_sb));
+ pmem_wmb();
+}
+
+static int sb_init(struct pcache_cache_dev *cache_dev, struct pcache_sb *sb)
+{
+ struct dm_pcache *pcache = CACHE_DEV_TO_PCACHE(cache_dev);
+ u64 nr_segs;
+ u64 cache_dev_size;
+ u64 magic;
+ u32 flags = 0;
+
+ magic = le64_to_cpu(sb->magic);
+ if (magic)
+ return -EEXIST;
+
+ cache_dev_size = bdev_nr_bytes(file_bdev(cache_dev->dm_dev->bdev_file));
+ if (cache_dev_size < PCACHE_CACHE_DEV_SIZE_MIN) {
+ pcache_dev_err(pcache, "dax device is too small, required at least %llu",
+ PCACHE_CACHE_DEV_SIZE_MIN);
+ return -ENOSPC;
+ }
+
+ nr_segs = (cache_dev_size - PCACHE_SEGMENTS_OFF) / ((PCACHE_SEG_SIZE));
+
+#if defined(__BYTE_ORDER) ? (__BIG_ENDIAN == __BYTE_ORDER) : defined(__BIG_ENDIAN)
+ flags |= PCACHE_SB_F_BIGENDIAN;
+#endif
+ sb->flags = cpu_to_le32(flags);
+ sb->magic = cpu_to_le64(PCACHE_MAGIC);
+ sb->seg_num = cpu_to_le32(nr_segs);
+ sb->crc = cpu_to_le32(crc32c(PCACHE_CRC_SEED, (void *)(sb) + 4, sizeof(struct pcache_sb) - 4));
+
+ cache_dev_zero_range(cache_dev, CACHE_DEV_CACHE_INFO(cache_dev),
+ PCACHE_CACHE_INFO_SIZE * PCACHE_META_INDEX_MAX +
+ PCACHE_CACHE_CTRL_SIZE);
+
+ return 0;
+}
+
+static int sb_validate(struct pcache_cache_dev *cache_dev, struct pcache_sb *sb)
+{
+ struct dm_pcache *pcache = CACHE_DEV_TO_PCACHE(cache_dev);
+ u32 flags;
+ u32 crc;
+
+ if (le64_to_cpu(sb->magic) != PCACHE_MAGIC) {
+ pcache_dev_err(pcache, "unexpected magic: %llx\n",
+ le64_to_cpu(sb->magic));
+ return -EINVAL;
+ }
+
+ crc = crc32c(PCACHE_CRC_SEED, (void *)(sb) + 4, sizeof(struct pcache_sb) - 4);
+ if (crc != le32_to_cpu(sb->crc)) {
+ pcache_dev_err(pcache, "corrupted sb: %u, expected: %u\n", crc, le32_to_cpu(sb->crc));
+ return -EINVAL;
+ }
+
+ flags = le32_to_cpu(sb->flags);
+#if defined(__BYTE_ORDER) ? (__BIG_ENDIAN == __BYTE_ORDER) : defined(__BIG_ENDIAN)
+ if (!(flags & PCACHE_SB_F_BIGENDIAN)) {
+ pcache_dev_err(pcache, "cache_dev is not big endian\n");
+ return -EINVAL;
+ }
+#else
+ if (flags & PCACHE_SB_F_BIGENDIAN) {
+ pcache_dev_err(pcache, "cache_dev is big endian\n");
+ return -EINVAL;
+ }
+#endif
+ return 0;
+}
+
+static int cache_dev_init(struct pcache_cache_dev *cache_dev, u32 seg_num)
+{
+ cache_dev->seg_num = seg_num;
+ cache_dev->seg_bitmap = kvcalloc(BITS_TO_LONGS(cache_dev->seg_num), sizeof(unsigned long), GFP_KERNEL);
+ if (!cache_dev->seg_bitmap)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void cache_dev_exit(struct pcache_cache_dev *cache_dev)
+{
+ kvfree(cache_dev->seg_bitmap);
+}
+
+void cache_dev_stop(struct dm_pcache *pcache)
+{
+ struct pcache_cache_dev *cache_dev = &pcache->cache_dev;
+
+ cache_dev_exit(cache_dev);
+ cache_dev_dax_exit(cache_dev);
+}
+
+int cache_dev_start(struct dm_pcache *pcache)
+{
+ struct pcache_cache_dev *cache_dev = &pcache->cache_dev;
+ struct pcache_sb sb;
+ bool format = false;
+ int ret;
+
+ mutex_init(&cache_dev->seg_lock);
+
+ ret = cache_dev_dax_init(cache_dev);
+ if (ret) {
+ pcache_dev_err(pcache, "failed to init cache_dev %s via dax way: %d.",
+ cache_dev->dm_dev->name, ret);
+ goto err;
+ }
+
+ ret = sb_read(cache_dev, &sb);
+ if (ret)
+ goto dax_release;
+
+ if (le64_to_cpu(sb.magic) == 0) {
+ format = true;
+ ret = sb_init(cache_dev, &sb);
+ if (ret < 0)
+ goto dax_release;
+ }
+
+ ret = sb_validate(cache_dev, &sb);
+ if (ret)
+ goto dax_release;
+
+ cache_dev->sb_flags = le32_to_cpu(sb.flags);
+ ret = cache_dev_init(cache_dev, le32_to_cpu(sb.seg_num));
+ if (ret)
+ goto dax_release;
+
+ if (format)
+ sb_write(cache_dev, &sb);
+
+ return 0;
+
+dax_release:
+ cache_dev_dax_exit(cache_dev);
+err:
+ return ret;
+}
+
+int cache_dev_get_empty_segment_id(struct pcache_cache_dev *cache_dev, u32 *seg_id)
+{
+ int ret;
+
+ mutex_lock(&cache_dev->seg_lock);
+ *seg_id = find_next_zero_bit(cache_dev->seg_bitmap, cache_dev->seg_num, 0);
+ if (*seg_id == cache_dev->seg_num) {
+ ret = -ENOSPC;
+ goto unlock;
+ }
+
+ __set_bit(*seg_id, cache_dev->seg_bitmap);
+ ret = 0;
+unlock:
+ mutex_unlock(&cache_dev->seg_lock);
+ return ret;
+}
diff --git a/drivers/md/dm-pcache/cache_dev.h b/drivers/md/dm-pcache/cache_dev.h
new file mode 100644
index 000000000000..6251eb4ebe96
--- /dev/null
+++ b/drivers/md/dm-pcache/cache_dev.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _PCACHE_CACHE_DEV_H
+#define _PCACHE_CACHE_DEV_H
+
+#include <linux/device.h>
+#include <linux/device-mapper.h>
+
+#include "pcache_internal.h"
+
+#define PCACHE_MAGIC 0x65B05EFA96C596EFULL
+
+#define PCACHE_SB_OFF (4 * PCACHE_KB)
+#define PCACHE_SB_SIZE (4 * PCACHE_KB)
+
+#define PCACHE_CACHE_INFO_OFF (PCACHE_SB_OFF + PCACHE_SB_SIZE)
+#define PCACHE_CACHE_INFO_SIZE (4 * PCACHE_KB)
+
+#define PCACHE_CACHE_CTRL_OFF (PCACHE_CACHE_INFO_OFF + (PCACHE_CACHE_INFO_SIZE * PCACHE_META_INDEX_MAX))
+#define PCACHE_CACHE_CTRL_SIZE (4 * PCACHE_KB)
+
+#define PCACHE_SEGMENTS_OFF (PCACHE_CACHE_CTRL_OFF + PCACHE_CACHE_CTRL_SIZE)
+#define PCACHE_SEG_INFO_SIZE (4 * PCACHE_KB)
+
+#define PCACHE_CACHE_DEV_SIZE_MIN (512 * PCACHE_MB) /* 512 MB */
+#define PCACHE_SEG_SIZE (16 * PCACHE_MB) /* Size of each PCACHE segment (16 MB) */
+
+#define CACHE_DEV_SB(cache_dev) ((struct pcache_sb *)(cache_dev->mapping + PCACHE_SB_OFF))
+#define CACHE_DEV_CACHE_INFO(cache_dev) ((void *)cache_dev->mapping + PCACHE_CACHE_INFO_OFF)
+#define CACHE_DEV_CACHE_CTRL(cache_dev) ((void *)cache_dev->mapping + PCACHE_CACHE_CTRL_OFF)
+#define CACHE_DEV_SEGMENTS(cache_dev) ((void *)cache_dev->mapping + PCACHE_SEGMENTS_OFF)
+#define CACHE_DEV_SEGMENT(cache_dev, id) ((void *)CACHE_DEV_SEGMENTS(cache_dev) + (u64)id * PCACHE_SEG_SIZE)
+
+/*
+ * PCACHE SB flags configured during formatting
+ *
+ * The PCACHE_SB_F_xxx flags define registration requirements based on cache_dev
+ * formatting. For a machine to register a cache_dev:
+ * - PCACHE_SB_F_BIGENDIAN: Requires a big-endian machine.
+ */
+#define PCACHE_SB_F_BIGENDIAN BIT(0)
+
+struct pcache_sb {
+ __le32 crc;
+ __le32 flags;
+ __le64 magic;
+
+ __le32 seg_num;
+};
+
+struct pcache_cache_dev {
+ u32 sb_flags;
+ u32 seg_num;
+ void *mapping;
+ bool use_vmap;
+
+ struct dm_dev *dm_dev;
+
+ struct mutex seg_lock;
+ unsigned long *seg_bitmap;
+};
+
+struct dm_pcache;
+int cache_dev_start(struct dm_pcache *pcache);
+void cache_dev_stop(struct dm_pcache *pcache);
+
+void cache_dev_zero_range(struct pcache_cache_dev *cache_dev, void *pos, u32 size);
+
+int cache_dev_get_empty_segment_id(struct pcache_cache_dev *cache_dev, u32 *seg_id);
+
+#endif /* _PCACHE_CACHE_DEV_H */
diff --git a/drivers/md/dm-pcache/cache_gc.c b/drivers/md/dm-pcache/cache_gc.c
new file mode 100644
index 000000000000..94f8b276a021
--- /dev/null
+++ b/drivers/md/dm-pcache/cache_gc.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include "cache.h"
+#include "backing_dev.h"
+#include "cache_dev.h"
+#include "dm_pcache.h"
+
+/**
+ * cache_key_gc - Releases the reference of a cache key segment.
+ * @cache: Pointer to the pcache_cache structure.
+ * @key: Pointer to the cache key to be garbage collected.
+ *
+ * This function decrements the reference count of the cache segment
+ * associated with the given key. If the reference count drops to zero,
+ * the segment may be invalidated and reused.
+ */
+static void cache_key_gc(struct pcache_cache *cache, struct pcache_cache_key *key)
+{
+ cache_seg_put(key->cache_pos.cache_seg);
+}
+
+static bool need_gc(struct pcache_cache *cache, struct pcache_cache_pos *dirty_tail, struct pcache_cache_pos *key_tail)
+{
+ struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
+ struct pcache_cache_kset_onmedia *kset_onmedia;
+ void *dirty_addr, *key_addr;
+ u32 segs_used, segs_gc_threshold, to_copy;
+ int ret;
+
+ dirty_addr = cache_pos_addr(dirty_tail);
+ key_addr = cache_pos_addr(key_tail);
+ if (dirty_addr == key_addr) {
+ pcache_dev_debug(pcache, "key tail is equal to dirty tail: %u:%u\n",
+ dirty_tail->cache_seg->cache_seg_id,
+ dirty_tail->seg_off);
+ return false;
+ }
+
+ kset_onmedia = (struct pcache_cache_kset_onmedia *)cache->gc_kset_onmedia_buf;
+
+ to_copy = min(PCACHE_KSET_ONMEDIA_SIZE_MAX, PCACHE_SEG_SIZE - key_tail->seg_off);
+ ret = copy_mc_to_kernel(kset_onmedia, key_addr, to_copy);
+ if (ret) {
+ pcache_dev_err(pcache, "error to read kset: %d", ret);
+ return false;
+ }
+
+ /* Check if kset_onmedia is corrupted */
+ if (kset_onmedia->magic != PCACHE_KSET_MAGIC) {
+ pcache_dev_debug(pcache, "gc error: magic is not as expected. key_tail: %u:%u magic: %llx, expected: %llx\n",
+ key_tail->cache_seg->cache_seg_id, key_tail->seg_off,
+ kset_onmedia->magic, PCACHE_KSET_MAGIC);
+ return false;
+ }
+
+ /* Verify the CRC of the kset_onmedia */
+ if (kset_onmedia->crc != cache_kset_crc(kset_onmedia)) {
+ pcache_dev_debug(pcache, "gc error: crc is not as expected. crc: %x, expected: %x\n",
+ cache_kset_crc(kset_onmedia), kset_onmedia->crc);
+ return false;
+ }
+
+ segs_used = bitmap_weight(cache->seg_map, cache->n_segs);
+ segs_gc_threshold = cache->n_segs * pcache_cache_get_gc_percent(cache) / 100;
+ if (segs_used < segs_gc_threshold) {
+ pcache_dev_debug(pcache, "segs_used: %u, segs_gc_threshold: %u\n", segs_used, segs_gc_threshold);
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * last_kset_gc - Advances the garbage collection for the last kset.
+ * @cache: Pointer to the pcache_cache structure.
+ * @kset_onmedia: Pointer to the kset_onmedia structure for the last kset.
+ */
+static void last_kset_gc(struct pcache_cache *cache, struct pcache_cache_kset_onmedia *kset_onmedia)
+{
+ struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
+ struct pcache_cache_segment *cur_seg, *next_seg;
+
+ cur_seg = cache->key_tail.cache_seg;
+
+ next_seg = &cache->segments[kset_onmedia->next_cache_seg_id];
+
+ mutex_lock(&cache->key_tail_lock);
+ cache->key_tail.cache_seg = next_seg;
+ cache->key_tail.seg_off = 0;
+ cache_encode_key_tail(cache);
+ mutex_unlock(&cache->key_tail_lock);
+
+ pcache_dev_debug(pcache, "gc advance kset seg: %u\n", cur_seg->cache_seg_id);
+
+ spin_lock(&cache->seg_map_lock);
+ __clear_bit(cur_seg->cache_seg_id, cache->seg_map);
+ spin_unlock(&cache->seg_map_lock);
+}
+
+void pcache_cache_gc_fn(struct work_struct *work)
+{
+ struct pcache_cache *cache = container_of(work, struct pcache_cache, gc_work.work);
+ struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
+ struct pcache_cache_pos dirty_tail, key_tail;
+ struct pcache_cache_kset_onmedia *kset_onmedia;
+ struct pcache_cache_key_onmedia *key_onmedia;
+ struct pcache_cache_key *key;
+ int ret;
+ int i;
+
+ kset_onmedia = (struct pcache_cache_kset_onmedia *)cache->gc_kset_onmedia_buf;
+
+ while (true) {
+ if (pcache_is_stopping(pcache) || atomic_read(&cache->gc_errors))
+ return;
+
+ /* Get new tail positions */
+ mutex_lock(&cache->dirty_tail_lock);
+ cache_pos_copy(&dirty_tail, &cache->dirty_tail);
+ mutex_unlock(&cache->dirty_tail_lock);
+
+ mutex_lock(&cache->key_tail_lock);
+ cache_pos_copy(&key_tail, &cache->key_tail);
+ mutex_unlock(&cache->key_tail_lock);
+
+ if (!need_gc(cache, &dirty_tail, &key_tail))
+ break;
+
+ if (kset_onmedia->flags & PCACHE_KSET_FLAGS_LAST) {
+ /* Don't move to the next segment if dirty_tail has not moved */
+ if (dirty_tail.cache_seg == key_tail.cache_seg)
+ break;
+
+ last_kset_gc(cache, kset_onmedia);
+ continue;
+ }
+
+ for (i = 0; i < kset_onmedia->key_num; i++) {
+ struct pcache_cache_key key_tmp = { 0 };
+
+ key_onmedia = &kset_onmedia->data[i];
+
+ key = &key_tmp;
+ cache_key_init(&cache->req_key_tree, key);
+
+ ret = cache_key_decode(cache, key_onmedia, key);
+ if (ret) {
+ /* return without re-arm gc work, and prevent future
+ * gc, because we can't retry the partial-gc-ed kset
+ */
+ atomic_inc(&cache->gc_errors);
+ pcache_dev_err(pcache, "failed to decode cache key in gc\n");
+ return;
+ }
+
+ cache_key_gc(cache, key);
+ }
+
+ pcache_dev_debug(pcache, "gc advance: %u:%u %u\n",
+ key_tail.cache_seg->cache_seg_id,
+ key_tail.seg_off,
+ get_kset_onmedia_size(kset_onmedia));
+
+ mutex_lock(&cache->key_tail_lock);
+ cache_pos_advance(&cache->key_tail, get_kset_onmedia_size(kset_onmedia));
+ cache_encode_key_tail(cache);
+ mutex_unlock(&cache->key_tail_lock);
+ }
+
+ queue_delayed_work(cache_get_wq(cache), &cache->gc_work, PCACHE_CACHE_GC_INTERVAL);
+}
diff --git a/drivers/md/dm-pcache/cache_key.c b/drivers/md/dm-pcache/cache_key.c
new file mode 100644
index 000000000000..2b77e121f89b
--- /dev/null
+++ b/drivers/md/dm-pcache/cache_key.c
@@ -0,0 +1,888 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include "cache.h"
+#include "backing_dev.h"
+#include "cache_dev.h"
+#include "dm_pcache.h"
+
+struct pcache_cache_kset_onmedia pcache_empty_kset = { 0 };
+
+void cache_key_init(struct pcache_cache_tree *cache_tree, struct pcache_cache_key *key)
+{
+ kref_init(&key->ref);
+ key->cache_tree = cache_tree;
+ INIT_LIST_HEAD(&key->list_node);
+ RB_CLEAR_NODE(&key->rb_node);
+}
+
+struct pcache_cache_key *cache_key_alloc(struct pcache_cache_tree *cache_tree, gfp_t gfp_mask)
+{
+ struct pcache_cache_key *key;
+
+ key = mempool_alloc(&cache_tree->key_pool, gfp_mask);
+ if (!key)
+ return NULL;
+
+ memset(key, 0, sizeof(struct pcache_cache_key));
+ cache_key_init(cache_tree, key);
+
+ return key;
+}
+
+/**
+ * cache_key_get - Increment the reference count of a cache key.
+ * @key: Pointer to the pcache_cache_key structure.
+ *
+ * This function increments the reference count of the specified cache key,
+ * ensuring that it is not freed while still in use.
+ */
+void cache_key_get(struct pcache_cache_key *key)
+{
+ kref_get(&key->ref);
+}
+
+/**
+ * cache_key_destroy - Free a cache key structure when its reference count drops to zero.
+ * @ref: Pointer to the kref structure.
+ *
+ * This function is called when the reference count of the cache key reaches zero.
+ * It frees the allocated cache key back to the slab cache.
+ */
+static void cache_key_destroy(struct kref *ref)
+{
+ struct pcache_cache_key *key = container_of(ref, struct pcache_cache_key, ref);
+ struct pcache_cache_tree *cache_tree = key->cache_tree;
+
+ mempool_free(key, &cache_tree->key_pool);
+}
+
+void cache_key_put(struct pcache_cache_key *key)
+{
+ kref_put(&key->ref, cache_key_destroy);
+}
+
+void cache_pos_advance(struct pcache_cache_pos *pos, u32 len)
+{
+ /* Ensure enough space remains in the current segment */
+ BUG_ON(cache_seg_remain(pos) < len);
+
+ pos->seg_off += len;
+}
+
+static void cache_key_encode(struct pcache_cache *cache,
+ struct pcache_cache_key_onmedia *key_onmedia,
+ struct pcache_cache_key *key)
+{
+ key_onmedia->off = key->off;
+ key_onmedia->len = key->len;
+
+ key_onmedia->cache_seg_id = key->cache_pos.cache_seg->cache_seg_id;
+ key_onmedia->cache_seg_off = key->cache_pos.seg_off;
+
+ key_onmedia->seg_gen = key->seg_gen;
+ key_onmedia->flags = key->flags;
+
+ if (cache_data_crc_on(cache))
+ key_onmedia->data_crc = cache_key_data_crc(key);
+}
+
+int cache_key_decode(struct pcache_cache *cache,
+ struct pcache_cache_key_onmedia *key_onmedia,
+ struct pcache_cache_key *key)
+{
+ struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
+
+ key->off = key_onmedia->off;
+ key->len = key_onmedia->len;
+
+ key->cache_pos.cache_seg = &cache->segments[key_onmedia->cache_seg_id];
+ key->cache_pos.seg_off = key_onmedia->cache_seg_off;
+
+ key->seg_gen = key_onmedia->seg_gen;
+ key->flags = key_onmedia->flags;
+
+ if (cache_data_crc_on(cache) &&
+ key_onmedia->data_crc != cache_key_data_crc(key)) {
+ pcache_dev_err(pcache, "key: %llu:%u seg %u:%u data_crc error: %x, expected: %x\n",
+ key->off, key->len, key->cache_pos.cache_seg->cache_seg_id,
+ key->cache_pos.seg_off, cache_key_data_crc(key), key_onmedia->data_crc);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void append_last_kset(struct pcache_cache *cache, u32 next_seg)
+{
+ struct pcache_cache_kset_onmedia kset_onmedia = { 0 };
+
+ kset_onmedia.flags |= PCACHE_KSET_FLAGS_LAST;
+ kset_onmedia.next_cache_seg_id = next_seg;
+ kset_onmedia.magic = PCACHE_KSET_MAGIC;
+ kset_onmedia.crc = cache_kset_crc(&kset_onmedia);
+
+ memcpy_flushcache(get_key_head_addr(cache), &kset_onmedia, sizeof(struct pcache_cache_kset_onmedia));
+ pmem_wmb();
+ cache_pos_advance(&cache->key_head, sizeof(struct pcache_cache_kset_onmedia));
+}
+
+int cache_kset_close(struct pcache_cache *cache, struct pcache_cache_kset *kset)
+{
+ struct pcache_cache_kset_onmedia *kset_onmedia;
+ u32 kset_onmedia_size;
+ int ret;
+
+ kset_onmedia = &kset->kset_onmedia;
+
+ if (!kset_onmedia->key_num)
+ return 0;
+
+ kset_onmedia_size = struct_size(kset_onmedia, data, kset_onmedia->key_num);
+
+ spin_lock(&cache->key_head_lock);
+again:
+ /* Reserve space for the last kset */
+ if (cache_seg_remain(&cache->key_head) < kset_onmedia_size + sizeof(struct pcache_cache_kset_onmedia)) {
+ struct pcache_cache_segment *next_seg;
+
+ next_seg = get_cache_segment(cache);
+ if (!next_seg) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /* clear outdated kset in next seg */
+ memcpy_flushcache(next_seg->segment.data, &pcache_empty_kset,
+ sizeof(struct pcache_cache_kset_onmedia));
+ append_last_kset(cache, next_seg->cache_seg_id);
+ cache->key_head.cache_seg = next_seg;
+ cache->key_head.seg_off = 0;
+ goto again;
+ }
+
+ kset_onmedia->magic = PCACHE_KSET_MAGIC;
+ kset_onmedia->crc = cache_kset_crc(kset_onmedia);
+
+ /* clear outdated kset after current kset */
+ memcpy_flushcache(get_key_head_addr(cache) + kset_onmedia_size, &pcache_empty_kset,
+ sizeof(struct pcache_cache_kset_onmedia));
+ /* write current kset into segment */
+ memcpy_flushcache(get_key_head_addr(cache), kset_onmedia, kset_onmedia_size);
+ pmem_wmb();
+
+ /* reset kset_onmedia */
+ memset(kset_onmedia, 0, sizeof(struct pcache_cache_kset_onmedia));
+ cache_pos_advance(&cache->key_head, kset_onmedia_size);
+
+ ret = 0;
+out:
+ spin_unlock(&cache->key_head_lock);
+
+ return ret;
+}
+
+/**
+ * cache_key_append - Append a cache key to the related kset.
+ * @cache: Pointer to the pcache_cache structure.
+ * @key: Pointer to the cache key structure to append.
+ * @force_close: Need to close current kset if true.
+ *
+ * This function appends a cache key to the appropriate kset. If the kset
+ * is full, it closes the kset. If not, it queues a flush work to write
+ * the kset to media.
+ *
+ * Returns 0 on success, or a negative error code on failure.
+ */
+int cache_key_append(struct pcache_cache *cache, struct pcache_cache_key *key, bool force_close)
+{
+ struct pcache_cache_kset *kset;
+ struct pcache_cache_kset_onmedia *kset_onmedia;
+ struct pcache_cache_key_onmedia *key_onmedia;
+ u32 kset_id = get_kset_id(cache, key->off);
+ int ret = 0;
+
+ kset = get_kset(cache, kset_id);
+ kset_onmedia = &kset->kset_onmedia;
+
+ spin_lock(&kset->kset_lock);
+ key_onmedia = &kset_onmedia->data[kset_onmedia->key_num];
+ cache_key_encode(cache, key_onmedia, key);
+
+ /* Check if the current kset has reached the maximum number of keys */
+ if (++kset_onmedia->key_num == PCACHE_KSET_KEYS_MAX || force_close) {
+ /* If full, close the kset */
+ ret = cache_kset_close(cache, kset);
+ if (ret) {
+ kset_onmedia->key_num--;
+ goto out;
+ }
+ } else {
+ /* If not full, queue a delayed work to flush the kset */
+ queue_delayed_work(cache_get_wq(cache), &kset->flush_work, 1 * HZ);
+ }
+out:
+ spin_unlock(&kset->kset_lock);
+
+ return ret;
+}
+
+/**
+ * cache_subtree_walk - Traverse the cache tree.
+ * @ctx: Pointer to the context structure for traversal.
+ *
+ * This function traverses the cache tree starting from the specified node.
+ * It calls the appropriate callback functions based on the relationships
+ * between the keys in the cache tree.
+ *
+ * Returns 0 on success, or a negative error code on failure.
+ */
+int cache_subtree_walk(struct pcache_cache_subtree_walk_ctx *ctx)
+{
+ struct pcache_cache_key *key_tmp, *key;
+ struct rb_node *node_tmp;
+ int ret = SUBTREE_WALK_RET_OK;
+
+ key = ctx->key;
+ node_tmp = ctx->start_node;
+
+ while (node_tmp) {
+ if (ctx->walk_done && ctx->walk_done(ctx))
+ break;
+
+ key_tmp = CACHE_KEY(node_tmp);
+ /*
+ * If key_tmp ends before the start of key, continue to the next node.
+ * |----------|
+ * |=====|
+ */
+ if (cache_key_lend(key_tmp) <= cache_key_lstart(key)) {
+ if (ctx->after) {
+ ret = ctx->after(key, key_tmp, ctx);
+ if (ret)
+ goto out;
+ }
+ goto next;
+ }
+
+ /*
+ * If key_tmp starts after the end of key, stop traversing.
+ * |--------|
+ * |====|
+ */
+ if (cache_key_lstart(key_tmp) >= cache_key_lend(key)) {
+ if (ctx->before) {
+ ret = ctx->before(key, key_tmp, ctx);
+ if (ret)
+ goto out;
+ }
+ break;
+ }
+
+ /* Handle overlapping keys */
+ if (cache_key_lstart(key_tmp) >= cache_key_lstart(key)) {
+ /*
+ * If key_tmp encompasses key.
+ * |----------------| key_tmp
+ * |===========| key
+ */
+ if (cache_key_lend(key_tmp) >= cache_key_lend(key)) {
+ if (ctx->overlap_tail) {
+ ret = ctx->overlap_tail(key, key_tmp, ctx);
+ if (ret)
+ goto out;
+ }
+ break;
+ }
+
+ /*
+ * If key_tmp is contained within key.
+ * |----| key_tmp
+ * |==========| key
+ */
+ if (ctx->overlap_contain) {
+ ret = ctx->overlap_contain(key, key_tmp, ctx);
+ if (ret)
+ goto out;
+ }
+
+ goto next;
+ }
+
+ /*
+ * If key_tmp starts before key ends but ends after key.
+ * |-----------| key_tmp
+ * |====| key
+ */
+ if (cache_key_lend(key_tmp) > cache_key_lend(key)) {
+ if (ctx->overlap_contained) {
+ ret = ctx->overlap_contained(key, key_tmp, ctx);
+ if (ret)
+ goto out;
+ }
+ break;
+ }
+
+ /*
+ * If key_tmp starts before key and ends within key.
+ * |--------| key_tmp
+ * |==========| key
+ */
+ if (ctx->overlap_head) {
+ ret = ctx->overlap_head(key, key_tmp, ctx);
+ if (ret)
+ goto out;
+ }
+next:
+ node_tmp = rb_next(node_tmp);
+ }
+
+out:
+ if (ctx->walk_finally)
+ ret = ctx->walk_finally(ctx, ret);
+
+ return ret;
+}
+
+/**
+ * cache_subtree_search - Search for a key in the cache tree.
+ * @cache_subtree: Pointer to the cache tree structure.
+ * @key: Pointer to the cache key to search for.
+ * @parentp: Pointer to store the parent node of the found node.
+ * @newp: Pointer to store the location where the new node should be inserted.
+ * @delete_key_list: List to collect invalid keys for deletion.
+ *
+ * This function searches the cache tree for a specific key and returns
+ * the node that is the predecessor of the key, or first node if the key is
+ * less than all keys in the tree. If any invalid keys are found during
+ * the search, they are added to the delete_key_list for later cleanup.
+ *
+ * Returns a pointer to the previous node.
+ */
+struct rb_node *cache_subtree_search(struct pcache_cache_subtree *cache_subtree, struct pcache_cache_key *key,
+ struct rb_node **parentp, struct rb_node ***newp,
+ struct list_head *delete_key_list)
+{
+ struct rb_node **new, *parent = NULL;
+ struct pcache_cache_key *key_tmp;
+ struct rb_node *prev_node = NULL;
+
+ new = &(cache_subtree->root.rb_node);
+ while (*new) {
+ key_tmp = container_of(*new, struct pcache_cache_key, rb_node);
+ if (cache_key_invalid(key_tmp))
+ list_add(&key_tmp->list_node, delete_key_list);
+
+ parent = *new;
+ if (key_tmp->off >= key->off) {
+ new = &((*new)->rb_left);
+ } else {
+ prev_node = *new;
+ new = &((*new)->rb_right);
+ }
+ }
+
+ if (!prev_node)
+ prev_node = rb_first(&cache_subtree->root);
+
+ if (parentp)
+ *parentp = parent;
+
+ if (newp)
+ *newp = new;
+
+ return prev_node;
+}
+
+static struct pcache_cache_key *get_pre_alloc_key(struct pcache_cache_subtree_walk_ctx *ctx)
+{
+ struct pcache_cache_key *key;
+
+ if (ctx->pre_alloc_key) {
+ key = ctx->pre_alloc_key;
+ ctx->pre_alloc_key = NULL;
+
+ return key;
+ }
+
+ return cache_key_alloc(ctx->cache_tree, GFP_NOWAIT);
+}
+
+/**
+ * fixup_overlap_tail - Adjust the key when it overlaps at the tail.
+ * @key: Pointer to the new cache key being inserted.
+ * @key_tmp: Pointer to the existing key that overlaps.
+ * @ctx: Pointer to the context for walking the cache tree.
+ *
+ * This function modifies the existing key (key_tmp) when there is an
+ * overlap at the tail with the new key. If the modified key becomes
+ * empty, it is deleted.
+ */
+static int fixup_overlap_tail(struct pcache_cache_key *key,
+ struct pcache_cache_key *key_tmp,
+ struct pcache_cache_subtree_walk_ctx *ctx)
+{
+ /*
+ * |----------------| key_tmp
+ * |===========| key
+ */
+ BUG_ON(cache_key_empty(key));
+ if (cache_key_empty(key_tmp)) {
+ cache_key_delete(key_tmp);
+ return SUBTREE_WALK_RET_RESEARCH;
+ }
+
+ cache_key_cutfront(key_tmp, cache_key_lend(key) - cache_key_lstart(key_tmp));
+ if (key_tmp->len == 0) {
+ cache_key_delete(key_tmp);
+ return SUBTREE_WALK_RET_RESEARCH;
+ }
+
+ return SUBTREE_WALK_RET_OK;
+}
+
+/**
+ * fixup_overlap_contain - Handle case where new key completely contains an existing key.
+ * @key: Pointer to the new cache key being inserted.
+ * @key_tmp: Pointer to the existing key that is being contained.
+ * @ctx: Pointer to the context for walking the cache tree.
+ *
+ * This function deletes the existing key (key_tmp) when the new key
+ * completely contains it. It returns SUBTREE_WALK_RET_RESEARCH to indicate that the
+ * tree structure may have changed, necessitating a re-insertion of
+ * the new key.
+ */
+static int fixup_overlap_contain(struct pcache_cache_key *key,
+ struct pcache_cache_key *key_tmp,
+ struct pcache_cache_subtree_walk_ctx *ctx)
+{
+ /*
+ * |----| key_tmp
+ * |==========| key
+ */
+ BUG_ON(cache_key_empty(key));
+ cache_key_delete(key_tmp);
+
+ return SUBTREE_WALK_RET_RESEARCH;
+}
+
+/**
+ * fixup_overlap_contained - Handle overlap when a new key is contained in an existing key.
+ * @key: The new cache key being inserted.
+ * @key_tmp: The existing cache key that overlaps with the new key.
+ * @ctx: Context for the cache tree walk.
+ *
+ * This function adjusts the existing key if the new key is contained
+ * within it. If the existing key is empty, it indicates a placeholder key
+ * that was inserted during a miss read. This placeholder will later be
+ * updated with real data from the backing_dev, making it no longer an empty key.
+ *
+ * If we delete key or insert a key, the structure of the entire cache tree may change,
+ * requiring a full research of the tree to find a new insertion point.
+ */
+static int fixup_overlap_contained(struct pcache_cache_key *key,
+ struct pcache_cache_key *key_tmp, struct pcache_cache_subtree_walk_ctx *ctx)
+{
+ struct pcache_cache_tree *cache_tree = ctx->cache_tree;
+
+ /*
+ * |-----------| key_tmp
+ * |====| key
+ */
+ BUG_ON(cache_key_empty(key));
+ if (cache_key_empty(key_tmp)) {
+ /* If key_tmp is empty, don't split it;
+ * it's a placeholder key for miss reads that will be updated later.
+ */
+ cache_key_cutback(key_tmp, cache_key_lend(key_tmp) - cache_key_lstart(key));
+ if (key_tmp->len == 0) {
+ cache_key_delete(key_tmp);
+ return SUBTREE_WALK_RET_RESEARCH;
+ }
+ } else {
+ struct pcache_cache_key *key_fixup;
+ bool need_research = false;
+
+ key_fixup = get_pre_alloc_key(ctx);
+ if (!key_fixup)
+ return SUBTREE_WALK_RET_NEED_KEY;
+
+ cache_key_copy(key_fixup, key_tmp);
+
+ /* Split key_tmp based on the new key's range */
+ cache_key_cutback(key_tmp, cache_key_lend(key_tmp) - cache_key_lstart(key));
+ if (key_tmp->len == 0) {
+ cache_key_delete(key_tmp);
+ need_research = true;
+ }
+
+ /* Create a new portion for key_fixup */
+ cache_key_cutfront(key_fixup, cache_key_lend(key) - cache_key_lstart(key_tmp));
+ if (key_fixup->len == 0) {
+ cache_key_put(key_fixup);
+ } else {
+ /* Insert the new key into the cache */
+ cache_key_insert(cache_tree, key_fixup, false);
+ need_research = true;
+ }
+
+ if (need_research)
+ return SUBTREE_WALK_RET_RESEARCH;
+ }
+
+ return SUBTREE_WALK_RET_OK;
+}
+
+/**
+ * fixup_overlap_head - Handle overlap when a new key overlaps with the head of an existing key.
+ * @key: The new cache key being inserted.
+ * @key_tmp: The existing cache key that overlaps with the new key.
+ * @ctx: Context for the cache tree walk.
+ *
+ * This function adjusts the existing key if the new key overlaps
+ * with the beginning of it. If the resulting key length is zero
+ * after the adjustment, the key is deleted. This indicates that
+ * the key no longer holds valid data and requires the tree to be
+ * re-researched for a new insertion point.
+ */
+static int fixup_overlap_head(struct pcache_cache_key *key,
+ struct pcache_cache_key *key_tmp, struct pcache_cache_subtree_walk_ctx *ctx)
+{
+ /*
+ * |--------| key_tmp
+ * |==========| key
+ */
+ BUG_ON(cache_key_empty(key));
+ /* Adjust key_tmp by cutting back based on the new key's start */
+ cache_key_cutback(key_tmp, cache_key_lend(key_tmp) - cache_key_lstart(key));
+ if (key_tmp->len == 0) {
+ /* If the adjusted key_tmp length is zero, delete it */
+ cache_key_delete(key_tmp);
+ return SUBTREE_WALK_RET_RESEARCH;
+ }
+
+ return SUBTREE_WALK_RET_OK;
+}
+
+/**
+ * cache_key_insert - Insert a new cache key into the cache tree.
+ * @cache_tree: Pointer to the cache_tree structure.
+ * @key: The cache key to insert.
+ * @fixup: Indicates if this is a new key being inserted.
+ *
+ * This function searches for the appropriate location to insert
+ * a new cache key into the cache tree. It handles key overlaps
+ * and ensures any invalid keys are removed before insertion.
+ */
+void cache_key_insert(struct pcache_cache_tree *cache_tree, struct pcache_cache_key *key, bool fixup)
+{
+ struct pcache_cache *cache = cache_tree->cache;
+ struct pcache_cache_subtree_walk_ctx walk_ctx = { 0 };
+ struct rb_node **new, *parent = NULL;
+ struct pcache_cache_subtree *cache_subtree;
+ struct pcache_cache_key *key_tmp = NULL, *key_next;
+ struct rb_node *prev_node = NULL;
+ LIST_HEAD(delete_key_list);
+ int ret;
+
+ cache_subtree = get_subtree(cache_tree, key->off);
+ key->cache_subtree = cache_subtree;
+search:
+ prev_node = cache_subtree_search(cache_subtree, key, &parent, &new, &delete_key_list);
+ if (!list_empty(&delete_key_list)) {
+ /* Remove invalid keys from the delete list */
+ list_for_each_entry_safe(key_tmp, key_next, &delete_key_list, list_node) {
+ list_del_init(&key_tmp->list_node);
+ cache_key_delete(key_tmp);
+ }
+ goto search;
+ }
+
+ if (fixup) {
+ /* Set up the context with the cache, start node, and new key */
+ walk_ctx.cache_tree = cache_tree;
+ walk_ctx.start_node = prev_node;
+ walk_ctx.key = key;
+
+ /* Assign overlap handling functions for different scenarios */
+ walk_ctx.overlap_tail = fixup_overlap_tail;
+ walk_ctx.overlap_head = fixup_overlap_head;
+ walk_ctx.overlap_contain = fixup_overlap_contain;
+ walk_ctx.overlap_contained = fixup_overlap_contained;
+
+ ret = cache_subtree_walk(&walk_ctx);
+ switch (ret) {
+ case SUBTREE_WALK_RET_OK:
+ break;
+ case SUBTREE_WALK_RET_RESEARCH:
+ goto search;
+ case SUBTREE_WALK_RET_NEED_KEY:
+ spin_unlock(&cache_subtree->tree_lock);
+ pcache_dev_debug(CACHE_TO_PCACHE(cache), "allocate pre_alloc_key with GFP_NOIO");
+ walk_ctx.pre_alloc_key = cache_key_alloc(cache_tree, GFP_NOIO);
+ spin_lock(&cache_subtree->tree_lock);
+ goto search;
+ default:
+ BUG();
+ }
+ }
+
+ if (walk_ctx.pre_alloc_key)
+ cache_key_put(walk_ctx.pre_alloc_key);
+
+ /* Link and insert the new key into the red-black tree */
+ rb_link_node(&key->rb_node, parent, new);
+ rb_insert_color(&key->rb_node, &cache_subtree->root);
+}
+
+/**
+ * clean_fn - Cleanup function to remove invalid keys from the cache tree.
+ * @work: Pointer to the work_struct associated with the cleanup.
+ *
+ * This function cleans up invalid keys from the cache tree in the background
+ * after a cache segment has been invalidated during cache garbage collection.
+ * It processes a maximum of PCACHE_CLEAN_KEYS_MAX keys per iteration and holds
+ * the tree lock to ensure thread safety.
+ */
+void clean_fn(struct work_struct *work)
+{
+ struct pcache_cache *cache = container_of(work, struct pcache_cache, clean_work);
+ struct pcache_cache_subtree *cache_subtree;
+ struct rb_node *node;
+ struct pcache_cache_key *key;
+ int i, count;
+
+ for (i = 0; i < cache->req_key_tree.n_subtrees; i++) {
+ cache_subtree = &cache->req_key_tree.subtrees[i];
+
+again:
+ if (pcache_is_stopping(CACHE_TO_PCACHE(cache)))
+ return;
+
+ /* Delete up to PCACHE_CLEAN_KEYS_MAX keys in one iteration */
+ count = 0;
+ spin_lock(&cache_subtree->tree_lock);
+ node = rb_first(&cache_subtree->root);
+ while (node) {
+ key = CACHE_KEY(node);
+ node = rb_next(node);
+ if (cache_key_invalid(key)) {
+ count++;
+ cache_key_delete(key);
+ }
+
+ if (count >= PCACHE_CLEAN_KEYS_MAX) {
+ /* Unlock and pause before continuing cleanup */
+ spin_unlock(&cache_subtree->tree_lock);
+ usleep_range(1000, 2000);
+ goto again;
+ }
+ }
+ spin_unlock(&cache_subtree->tree_lock);
+ }
+}
+
+/*
+ * kset_flush_fn - Flush work for a cache kset.
+ *
+ * This function is called when a kset flush work is queued from
+ * cache_key_append(). If the kset is full, it will be closed
+ * immediately. If not, the flush work will be queued for later closure.
+ *
+ * If cache_kset_close detects that a new segment is required to store
+ * the kset and there are no available segments, it will return an error.
+ * In this scenario, a retry will be attempted.
+ */
+void kset_flush_fn(struct work_struct *work)
+{
+ struct pcache_cache_kset *kset = container_of(work, struct pcache_cache_kset, flush_work.work);
+ struct pcache_cache *cache = kset->cache;
+ int ret;
+
+ if (pcache_is_stopping(CACHE_TO_PCACHE(cache)))
+ return;
+
+ spin_lock(&kset->kset_lock);
+ ret = cache_kset_close(cache, kset);
+ spin_unlock(&kset->kset_lock);
+
+ if (ret) {
+ /* Failed to flush kset, schedule a retry. */
+ queue_delayed_work(cache_get_wq(cache), &kset->flush_work, msecs_to_jiffies(100));
+ }
+}
+
+static int kset_replay(struct pcache_cache *cache, struct pcache_cache_kset_onmedia *kset_onmedia)
+{
+ struct pcache_cache_key_onmedia *key_onmedia;
+ struct pcache_cache_subtree *cache_subtree;
+ struct pcache_cache_key *key;
+ int ret;
+ int i;
+
+ for (i = 0; i < kset_onmedia->key_num; i++) {
+ key_onmedia = &kset_onmedia->data[i];
+
+ key = cache_key_alloc(&cache->req_key_tree, GFP_NOIO);
+ ret = cache_key_decode(cache, key_onmedia, key);
+ if (ret) {
+ cache_key_put(key);
+ goto err;
+ }
+
+ __set_bit(key->cache_pos.cache_seg->cache_seg_id, cache->seg_map);
+
+ /* Check if the segment generation is valid for insertion. */
+ if (key->seg_gen < key->cache_pos.cache_seg->gen) {
+ cache_key_put(key);
+ } else {
+ cache_subtree = get_subtree(&cache->req_key_tree, key->off);
+ spin_lock(&cache_subtree->tree_lock);
+ cache_key_insert(&cache->req_key_tree, key, true);
+ spin_unlock(&cache_subtree->tree_lock);
+ }
+
+ cache_seg_get(key->cache_pos.cache_seg);
+ }
+
+ return 0;
+err:
+ return ret;
+}
+
+int cache_replay(struct pcache_cache *cache)
+{
+ struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
+ struct pcache_cache_pos pos_tail;
+ struct pcache_cache_pos *pos;
+ struct pcache_cache_kset_onmedia *kset_onmedia;
+ u32 to_copy, count = 0;
+ int ret = 0;
+
+ kset_onmedia = kzalloc(PCACHE_KSET_ONMEDIA_SIZE_MAX, GFP_KERNEL);
+ if (!kset_onmedia)
+ return -ENOMEM;
+
+ cache_pos_copy(&pos_tail, &cache->key_tail);
+ pos = &pos_tail;
+
+ /*
+ * In cache replaying stage, there is no other one will access
+ * cache->seg_map, so we can set bit here without cache->seg_map_lock.
+ */
+ __set_bit(pos->cache_seg->cache_seg_id, cache->seg_map);
+
+ while (true) {
+ to_copy = min(PCACHE_KSET_ONMEDIA_SIZE_MAX, PCACHE_SEG_SIZE - pos->seg_off);
+ ret = copy_mc_to_kernel(kset_onmedia, cache_pos_addr(pos), to_copy);
+ if (ret) {
+ ret = -EIO;
+ goto out;
+ }
+
+ if (kset_onmedia->magic != PCACHE_KSET_MAGIC ||
+ kset_onmedia->crc != cache_kset_crc(kset_onmedia)) {
+ break;
+ }
+
+ /* Process the last kset and prepare for the next segment. */
+ if (kset_onmedia->flags & PCACHE_KSET_FLAGS_LAST) {
+ struct pcache_cache_segment *next_seg;
+
+ pcache_dev_debug(pcache, "last kset replay, next: %u\n", kset_onmedia->next_cache_seg_id);
+
+ next_seg = &cache->segments[kset_onmedia->next_cache_seg_id];
+
+ pos->cache_seg = next_seg;
+ pos->seg_off = 0;
+
+ __set_bit(pos->cache_seg->cache_seg_id, cache->seg_map);
+ continue;
+ }
+
+ /* Replay the kset and check for errors. */
+ ret = kset_replay(cache, kset_onmedia);
+ if (ret)
+ goto out;
+
+ /* Advance the position after processing the kset. */
+ cache_pos_advance(pos, get_kset_onmedia_size(kset_onmedia));
+ if (++count > 512) {
+ cond_resched();
+ count = 0;
+ }
+ }
+
+ /* Update the key_head position after replaying. */
+ spin_lock(&cache->key_head_lock);
+ cache_pos_copy(&cache->key_head, pos);
+ spin_unlock(&cache->key_head_lock);
+out:
+ kfree(kset_onmedia);
+ return ret;
+}
+
+int cache_tree_init(struct pcache_cache *cache, struct pcache_cache_tree *cache_tree, u32 n_subtrees)
+{
+ int ret;
+ u32 i;
+
+ cache_tree->cache = cache;
+ cache_tree->n_subtrees = n_subtrees;
+
+ ret = mempool_init_slab_pool(&cache_tree->key_pool, 1024, key_cache);
+ if (ret)
+ goto err;
+
+ /*
+ * Allocate and initialize the subtrees array.
+ * Each element is a cache tree structure that contains
+ * an RB tree root and a spinlock for protecting its contents.
+ */
+ cache_tree->subtrees = kvcalloc(cache_tree->n_subtrees, sizeof(struct pcache_cache_subtree), GFP_KERNEL);
+ if (!cache_tree->subtrees) {
+ ret = -ENOMEM;
+ goto key_pool_exit;
+ }
+
+ for (i = 0; i < cache_tree->n_subtrees; i++) {
+ struct pcache_cache_subtree *cache_subtree = &cache_tree->subtrees[i];
+
+ cache_subtree->root = RB_ROOT;
+ spin_lock_init(&cache_subtree->tree_lock);
+ }
+
+ return 0;
+
+key_pool_exit:
+ mempool_exit(&cache_tree->key_pool);
+err:
+ return ret;
+}
+
+void cache_tree_clear(struct pcache_cache_tree *cache_tree)
+{
+ struct pcache_cache_subtree *cache_subtree;
+ struct rb_node *node;
+ struct pcache_cache_key *key;
+ u32 i;
+
+ for (i = 0; i < cache_tree->n_subtrees; i++) {
+ cache_subtree = &cache_tree->subtrees[i];
+
+ spin_lock(&cache_subtree->tree_lock);
+ node = rb_first(&cache_subtree->root);
+ while (node) {
+ key = CACHE_KEY(node);
+ node = rb_next(node);
+
+ cache_key_delete(key);
+ }
+ spin_unlock(&cache_subtree->tree_lock);
+ }
+}
+
+void cache_tree_exit(struct pcache_cache_tree *cache_tree)
+{
+ cache_tree_clear(cache_tree);
+ kvfree(cache_tree->subtrees);
+ mempool_exit(&cache_tree->key_pool);
+}
diff --git a/drivers/md/dm-pcache/cache_req.c b/drivers/md/dm-pcache/cache_req.c
new file mode 100644
index 000000000000..27f94c1fa968
--- /dev/null
+++ b/drivers/md/dm-pcache/cache_req.c
@@ -0,0 +1,836 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "cache.h"
+#include "backing_dev.h"
+#include "cache_dev.h"
+#include "dm_pcache.h"
+
+static int cache_data_head_init(struct pcache_cache *cache)
+{
+ struct pcache_cache_segment *next_seg;
+ struct pcache_cache_data_head *data_head;
+
+ data_head = get_data_head(cache);
+ next_seg = get_cache_segment(cache);
+ if (!next_seg)
+ return -EBUSY;
+
+ cache_seg_get(next_seg);
+ data_head->head_pos.cache_seg = next_seg;
+ data_head->head_pos.seg_off = 0;
+
+ return 0;
+}
+
+/**
+ * cache_data_alloc - Allocate data for a cache key.
+ * @cache: Pointer to the cache structure.
+ * @key: Pointer to the cache key to allocate data for.
+ *
+ * This function tries to allocate space from the cache segment specified by the
+ * data head. If the remaining space in the segment is insufficient to allocate
+ * the requested length for the cache key, it will allocate whatever is available
+ * and adjust the key's length accordingly. This function does not allocate
+ * space that crosses segment boundaries.
+ */
+static int cache_data_alloc(struct pcache_cache *cache, struct pcache_cache_key *key)
+{
+ struct pcache_cache_data_head *data_head;
+ struct pcache_cache_pos *head_pos;
+ struct pcache_cache_segment *cache_seg;
+ u32 seg_remain;
+ u32 allocated = 0, to_alloc;
+ int ret = 0;
+
+ preempt_disable();
+ data_head = get_data_head(cache);
+again:
+ to_alloc = key->len - allocated;
+ if (!data_head->head_pos.cache_seg) {
+ seg_remain = 0;
+ } else {
+ cache_pos_copy(&key->cache_pos, &data_head->head_pos);
+ key->seg_gen = key->cache_pos.cache_seg->gen;
+
+ head_pos = &data_head->head_pos;
+ cache_seg = head_pos->cache_seg;
+ seg_remain = cache_seg_remain(head_pos);
+ }
+
+ if (seg_remain > to_alloc) {
+ /* If remaining space in segment is sufficient for the cache key, allocate it. */
+ cache_pos_advance(head_pos, to_alloc);
+ allocated += to_alloc;
+ cache_seg_get(cache_seg);
+ } else if (seg_remain) {
+ /* If remaining space is not enough, allocate the remaining space and adjust the cache key length. */
+ cache_pos_advance(head_pos, seg_remain);
+ key->len = seg_remain;
+
+ /* Get for key: obtain a reference to the cache segment for the key. */
+ cache_seg_get(cache_seg);
+ /* Put for head_pos->cache_seg: release the reference for the current head's segment. */
+ cache_seg_put(head_pos->cache_seg);
+ head_pos->cache_seg = NULL;
+ } else {
+ /* Initialize a new data head if no segment is available. */
+ ret = cache_data_head_init(cache);
+ if (ret)
+ goto out;
+
+ goto again;
+ }
+
+out:
+ preempt_enable();
+
+ return ret;
+}
+
+static int cache_copy_from_req_bio(struct pcache_cache *cache, struct pcache_cache_key *key,
+ struct pcache_request *pcache_req, u32 bio_off)
+{
+ struct pcache_cache_pos *pos = &key->cache_pos;
+ struct pcache_segment *segment;
+
+ segment = &pos->cache_seg->segment;
+
+ return segment_copy_from_bio(segment, pos->seg_off, key->len, pcache_req->bio, bio_off);
+}
+
+static int cache_copy_to_req_bio(struct pcache_cache *cache, struct pcache_request *pcache_req,
+ u32 bio_off, u32 len, struct pcache_cache_pos *pos, u64 key_gen)
+{
+ struct pcache_cache_segment *cache_seg = pos->cache_seg;
+ struct pcache_segment *segment = &cache_seg->segment;
+ int ret;
+
+ spin_lock(&cache_seg->gen_lock);
+ if (key_gen < cache_seg->gen) {
+ spin_unlock(&cache_seg->gen_lock);
+ return -EINVAL;
+ }
+
+ ret = segment_copy_to_bio(segment, pos->seg_off, len, pcache_req->bio, bio_off);
+ spin_unlock(&cache_seg->gen_lock);
+
+ return ret;
+}
+
+/**
+ * miss_read_end_req - Handle the end of a miss read request.
+ * @backing_req: Pointer to the request structure.
+ * @read_ret: Return value of read.
+ *
+ * This function is called when a backing request to read data from
+ * the backing_dev is completed. If the key associated with the request
+ * is empty (a placeholder), it allocates cache space for the key,
+ * copies the data read from the bio into the cache, and updates
+ * the key's status. If the key has been overwritten by a write
+ * request during this process, it will be deleted from the cache
+ * tree and no further action will be taken.
+ */
+static void miss_read_end_req(struct pcache_backing_dev_req *backing_req, int read_ret)
+{
+ void *priv_data = backing_req->priv_data;
+ struct pcache_request *pcache_req = backing_req->req.upper_req;
+ struct pcache_cache *cache = backing_req->backing_dev->cache;
+ int ret;
+
+ if (priv_data) {
+ struct pcache_cache_key *key;
+ struct pcache_cache_subtree *cache_subtree;
+
+ key = (struct pcache_cache_key *)priv_data;
+ cache_subtree = key->cache_subtree;
+
+ /* if this key was deleted from cache_subtree by a write, key->flags should be cleared,
+ * so if cache_key_empty() return true, this key is still in cache_subtree
+ */
+ spin_lock(&cache_subtree->tree_lock);
+ if (cache_key_empty(key)) {
+ /* Check if the backing request was successful. */
+ if (read_ret) {
+ cache_key_delete(key);
+ goto unlock;
+ }
+
+ /* Allocate cache space for the key and copy data from the backing_dev. */
+ ret = cache_data_alloc(cache, key);
+ if (ret) {
+ cache_key_delete(key);
+ goto unlock;
+ }
+
+ ret = cache_copy_from_req_bio(cache, key, pcache_req, backing_req->req.bio_off);
+ if (ret) {
+ cache_seg_put(key->cache_pos.cache_seg);
+ cache_key_delete(key);
+ goto unlock;
+ }
+ key->flags &= ~PCACHE_CACHE_KEY_FLAGS_EMPTY;
+ key->flags |= PCACHE_CACHE_KEY_FLAGS_CLEAN;
+
+ /* Append the key to the cache. */
+ ret = cache_key_append(cache, key, false);
+ if (ret) {
+ cache_seg_put(key->cache_pos.cache_seg);
+ cache_key_delete(key);
+ goto unlock;
+ }
+ }
+unlock:
+ spin_unlock(&cache_subtree->tree_lock);
+ cache_key_put(key);
+ }
+}
+
+/**
+ * submit_cache_miss_req - Submit a backing request when cache data is missing
+ * @cache: The cache context that manages cache operations
+ * @backing_req: The cache request containing information about the read request
+ *
+ * This function is used to handle cases where a cache read request cannot locate
+ * the required data in the cache. When such a miss occurs during `cache_subtree_walk`,
+ * it triggers a backing read request to fetch data from the backing storage.
+ *
+ * If `pcache_req->priv_data` is set, it points to a `pcache_cache_key`, representing
+ * a new cache key to be inserted into the cache. The function calls `cache_key_insert`
+ * to attempt adding the key. On insertion failure, it releases the key reference and
+ * clears `priv_data` to avoid further processing.
+ */
+static void submit_cache_miss_req(struct pcache_cache *cache, struct pcache_backing_dev_req *backing_req)
+{
+ if (backing_req->priv_data) {
+ struct pcache_cache_key *key;
+
+ /* Attempt to insert the key into the cache if priv_data is set */
+ key = (struct pcache_cache_key *)backing_req->priv_data;
+ cache_key_insert(&cache->req_key_tree, key, true);
+ }
+ backing_dev_req_submit(backing_req, false);
+}
+
+static void cache_miss_req_free(struct pcache_backing_dev_req *backing_req)
+{
+ struct pcache_cache_key *key;
+
+ if (backing_req->priv_data) {
+ key = backing_req->priv_data;
+ backing_req->priv_data = NULL;
+ cache_key_put(key); /* for ->priv_data */
+ cache_key_put(key); /* for init ref in alloc */
+ }
+
+ backing_dev_req_end(backing_req);
+}
+
+static struct pcache_backing_dev_req *cache_miss_req_alloc(struct pcache_cache *cache,
+ struct pcache_request *parent,
+ gfp_t gfp_mask)
+{
+ struct pcache_backing_dev *backing_dev = cache->backing_dev;
+ struct pcache_backing_dev_req *backing_req;
+ struct pcache_cache_key *key = NULL;
+ struct pcache_backing_dev_req_opts req_opts = { 0 };
+
+ req_opts.type = BACKING_DEV_REQ_TYPE_REQ;
+ req_opts.gfp_mask = gfp_mask;
+ req_opts.req.upper_req = parent;
+
+ backing_req = backing_dev_req_alloc(backing_dev, &req_opts);
+ if (!backing_req)
+ return NULL;
+
+ key = cache_key_alloc(&cache->req_key_tree, gfp_mask);
+ if (!key)
+ goto free_backing_req;
+
+ cache_key_get(key);
+ backing_req->priv_data = key;
+
+ return backing_req;
+
+free_backing_req:
+ cache_miss_req_free(backing_req);
+ return NULL;
+}
+
+static void cache_miss_req_init(struct pcache_cache *cache,
+ struct pcache_backing_dev_req *backing_req,
+ struct pcache_request *parent,
+ u32 off, u32 len, bool insert_key)
+{
+ struct pcache_cache_key *key;
+ struct pcache_backing_dev_req_opts req_opts = { 0 };
+
+ req_opts.type = BACKING_DEV_REQ_TYPE_REQ;
+ req_opts.req.upper_req = parent;
+ req_opts.req.req_off = off;
+ req_opts.req.len = len;
+ req_opts.end_fn = miss_read_end_req;
+
+ backing_dev_req_init(backing_req, &req_opts);
+
+ if (insert_key) {
+ key = backing_req->priv_data;
+ key->off = parent->off + off;
+ key->len = len;
+ key->flags |= PCACHE_CACHE_KEY_FLAGS_EMPTY;
+ } else {
+ key = backing_req->priv_data;
+ backing_req->priv_data = NULL;
+ cache_key_put(key);
+ cache_key_put(key);
+ }
+}
+
+static struct pcache_backing_dev_req *get_pre_alloc_req(struct pcache_cache_subtree_walk_ctx *ctx)
+{
+ struct pcache_cache *cache = ctx->cache_tree->cache;
+ struct pcache_request *pcache_req = ctx->pcache_req;
+ struct pcache_backing_dev_req *backing_req;
+
+ if (ctx->pre_alloc_req) {
+ backing_req = ctx->pre_alloc_req;
+ ctx->pre_alloc_req = NULL;
+
+ return backing_req;
+ }
+
+ return cache_miss_req_alloc(cache, pcache_req, GFP_NOWAIT);
+}
+
+/*
+ * In the process of walking the cache tree to locate cached data, this
+ * function handles the situation where the requested data range lies
+ * entirely before an existing cache node (`key_tmp`). This outcome
+ * signifies that the target data is absent from the cache (cache miss).
+ *
+ * To fulfill this portion of the read request, the function creates a
+ * backing request (`backing_req`) for the missing data range represented
+ * by `key`. It then appends this request to the submission list in the
+ * `ctx`, which will later be processed to retrieve the data from backing
+ * storage. After setting up the backing request, `req_done` in `ctx` is
+ * updated to reflect the length of the handled range, and the range
+ * in `key` is adjusted by trimming off the portion that is now handled.
+ *
+ * The scenario handled here:
+ *
+ * |--------| key_tmp (existing cached range)
+ * |====| key (requested range, preceding key_tmp)
+ *
+ * Since `key` is before `key_tmp`, it signifies that the requested data
+ * range is missing in the cache (cache miss) and needs retrieval from
+ * backing storage.
+ */
+static int read_before(struct pcache_cache_key *key, struct pcache_cache_key *key_tmp,
+ struct pcache_cache_subtree_walk_ctx *ctx)
+{
+ struct pcache_backing_dev_req *backing_req;
+ struct pcache_cache *cache = ctx->cache_tree->cache;
+
+ /*
+ * In this scenario, `key` represents a range that precedes `key_tmp`,
+ * meaning the requested data range is missing from the cache tree
+ * and must be retrieved from the backing_dev.
+ */
+ backing_req = get_pre_alloc_req(ctx);
+ if (!backing_req)
+ return SUBTREE_WALK_RET_NEED_REQ;
+
+ cache_miss_req_init(cache, backing_req, ctx->pcache_req, ctx->req_done, key->len, true);
+
+ list_add(&backing_req->node, ctx->submit_req_list);
+ ctx->req_done += key->len;
+ cache_key_cutfront(key, key->len);
+
+ return SUBTREE_WALK_RET_OK;
+}
+
+/*
+ * During cache_subtree_walk, this function manages a scenario where part of the
+ * requested data range overlaps with an existing cache node (`key_tmp`).
+ *
+ * |----------------| key_tmp (existing cached range)
+ * |===========| key (requested range, overlapping the tail of key_tmp)
+ */
+static int read_overlap_tail(struct pcache_cache_key *key, struct pcache_cache_key *key_tmp,
+ struct pcache_cache_subtree_walk_ctx *ctx)
+{
+ struct pcache_cache *cache = ctx->cache_tree->cache;
+ struct pcache_backing_dev_req *backing_req;
+ u32 io_len;
+ int ret;
+
+ /*
+ * Calculate the length of the non-overlapping portion of `key`
+ * before `key_tmp`, representing the data missing in the cache.
+ */
+ io_len = cache_key_lstart(key_tmp) - cache_key_lstart(key);
+ if (io_len) {
+ backing_req = get_pre_alloc_req(ctx);
+ if (!backing_req)
+ return SUBTREE_WALK_RET_NEED_REQ;
+
+ cache_miss_req_init(cache, backing_req, ctx->pcache_req, ctx->req_done, io_len, true);
+
+ list_add(&backing_req->node, ctx->submit_req_list);
+ ctx->req_done += io_len;
+ cache_key_cutfront(key, io_len);
+ }
+
+ /*
+ * Handle the overlapping portion by calculating the length of
+ * the remaining data in `key` that coincides with `key_tmp`.
+ */
+ io_len = cache_key_lend(key) - cache_key_lstart(key_tmp);
+ if (cache_key_empty(key_tmp)) {
+ backing_req = get_pre_alloc_req(ctx);
+ if (!backing_req)
+ return SUBTREE_WALK_RET_NEED_REQ;
+
+ cache_miss_req_init(cache, backing_req, ctx->pcache_req, ctx->req_done, io_len, false);
+ submit_cache_miss_req(cache, backing_req);
+ } else {
+ ret = cache_copy_to_req_bio(ctx->cache_tree->cache, ctx->pcache_req, ctx->req_done,
+ io_len, &key_tmp->cache_pos, key_tmp->seg_gen);
+ if (ret) {
+ if (ret == -EINVAL) {
+ cache_key_delete(key_tmp);
+ return SUBTREE_WALK_RET_RESEARCH;
+ }
+
+ ctx->ret = ret;
+ return SUBTREE_WALK_RET_ERR;
+ }
+ }
+
+ ctx->req_done += io_len;
+ cache_key_cutfront(key, io_len);
+
+ return SUBTREE_WALK_RET_OK;
+}
+
+/*
+ * |----| key_tmp (existing cached range)
+ * |==========| key (requested range)
+ */
+static int read_overlap_contain(struct pcache_cache_key *key, struct pcache_cache_key *key_tmp,
+ struct pcache_cache_subtree_walk_ctx *ctx)
+{
+ struct pcache_cache *cache = ctx->cache_tree->cache;
+ struct pcache_backing_dev_req *backing_req;
+ u32 io_len;
+ int ret;
+
+ /*
+ * Calculate the non-overlapping part of `key` before `key_tmp`
+ * to identify the missing data length.
+ */
+ io_len = cache_key_lstart(key_tmp) - cache_key_lstart(key);
+ if (io_len) {
+ backing_req = get_pre_alloc_req(ctx);
+ if (!backing_req)
+ return SUBTREE_WALK_RET_NEED_REQ;
+
+ cache_miss_req_init(cache, backing_req, ctx->pcache_req, ctx->req_done, io_len, true);
+
+ list_add(&backing_req->node, ctx->submit_req_list);
+
+ ctx->req_done += io_len;
+ cache_key_cutfront(key, io_len);
+ }
+
+ /*
+ * Handle the overlapping portion between `key` and `key_tmp`.
+ */
+ io_len = key_tmp->len;
+ if (cache_key_empty(key_tmp)) {
+ backing_req = get_pre_alloc_req(ctx);
+ if (!backing_req)
+ return SUBTREE_WALK_RET_NEED_REQ;
+
+ cache_miss_req_init(cache, backing_req, ctx->pcache_req, ctx->req_done, io_len, false);
+ submit_cache_miss_req(cache, backing_req);
+ } else {
+ ret = cache_copy_to_req_bio(ctx->cache_tree->cache, ctx->pcache_req, ctx->req_done,
+ io_len, &key_tmp->cache_pos, key_tmp->seg_gen);
+ if (ret) {
+ if (ret == -EINVAL) {
+ cache_key_delete(key_tmp);
+ return SUBTREE_WALK_RET_RESEARCH;
+ }
+
+ ctx->ret = ret;
+ return SUBTREE_WALK_RET_ERR;
+ }
+ }
+
+ ctx->req_done += io_len;
+ cache_key_cutfront(key, io_len);
+
+ return SUBTREE_WALK_RET_OK;
+}
+
+/*
+ * |-----------| key_tmp (existing cached range)
+ * |====| key (requested range, fully within key_tmp)
+ *
+ * If `key_tmp` contains valid cached data, this function copies the relevant
+ * portion to the request's bio. Otherwise, it sends a backing request to
+ * fetch the required data range.
+ */
+static int read_overlap_contained(struct pcache_cache_key *key, struct pcache_cache_key *key_tmp,
+ struct pcache_cache_subtree_walk_ctx *ctx)
+{
+ struct pcache_cache *cache = ctx->cache_tree->cache;
+ struct pcache_backing_dev_req *backing_req;
+ struct pcache_cache_pos pos;
+ int ret;
+
+ /*
+ * Check if `key_tmp` is empty, indicating a miss. If so, initiate
+ * a backing request to fetch the required data for `key`.
+ */
+ if (cache_key_empty(key_tmp)) {
+ backing_req = get_pre_alloc_req(ctx);
+ if (!backing_req)
+ return SUBTREE_WALK_RET_NEED_REQ;
+
+ cache_miss_req_init(cache, backing_req, ctx->pcache_req, ctx->req_done, key->len, false);
+ submit_cache_miss_req(cache, backing_req);
+ } else {
+ cache_pos_copy(&pos, &key_tmp->cache_pos);
+ cache_pos_advance(&pos, cache_key_lstart(key) - cache_key_lstart(key_tmp));
+
+ ret = cache_copy_to_req_bio(ctx->cache_tree->cache, ctx->pcache_req, ctx->req_done,
+ key->len, &pos, key_tmp->seg_gen);
+ if (ret) {
+ if (ret == -EINVAL) {
+ cache_key_delete(key_tmp);
+ return SUBTREE_WALK_RET_RESEARCH;
+ }
+
+ ctx->ret = ret;
+ return SUBTREE_WALK_RET_ERR;
+ }
+ }
+
+ ctx->req_done += key->len;
+ cache_key_cutfront(key, key->len);
+
+ return SUBTREE_WALK_RET_OK;
+}
+
+/*
+ * |--------| key_tmp (existing cached range)
+ * |==========| key (requested range, overlapping the head of key_tmp)
+ */
+static int read_overlap_head(struct pcache_cache_key *key, struct pcache_cache_key *key_tmp,
+ struct pcache_cache_subtree_walk_ctx *ctx)
+{
+ struct pcache_cache *cache = ctx->cache_tree->cache;
+ struct pcache_backing_dev_req *backing_req;
+ struct pcache_cache_pos pos;
+ u32 io_len;
+ int ret;
+
+ io_len = cache_key_lend(key_tmp) - cache_key_lstart(key);
+
+ if (cache_key_empty(key_tmp)) {
+ backing_req = get_pre_alloc_req(ctx);
+ if (!backing_req)
+ return SUBTREE_WALK_RET_NEED_REQ;
+
+ cache_miss_req_init(cache, backing_req, ctx->pcache_req, ctx->req_done, io_len, false);
+ submit_cache_miss_req(cache, backing_req);
+ } else {
+ cache_pos_copy(&pos, &key_tmp->cache_pos);
+ cache_pos_advance(&pos, cache_key_lstart(key) - cache_key_lstart(key_tmp));
+
+ ret = cache_copy_to_req_bio(ctx->cache_tree->cache, ctx->pcache_req, ctx->req_done,
+ io_len, &pos, key_tmp->seg_gen);
+ if (ret) {
+ if (ret == -EINVAL) {
+ cache_key_delete(key_tmp);
+ return SUBTREE_WALK_RET_RESEARCH;
+ }
+
+ ctx->ret = ret;
+ return SUBTREE_WALK_RET_ERR;
+ }
+ }
+
+ ctx->req_done += io_len;
+ cache_key_cutfront(key, io_len);
+
+ return SUBTREE_WALK_RET_OK;
+}
+
+/**
+ * read_walk_finally - Finalizes the cache read tree walk by submitting any
+ * remaining backing requests
+ * @ctx: Context structure holding information about the cache,
+ * read request, and submission list
+ * @ret: the return value after this walk.
+ *
+ * This function is called at the end of the `cache_subtree_walk` during a
+ * cache read operation. It completes the walk by checking if any data
+ * requested by `key` was not found in the cache tree, and if so, it sends
+ * a backing request to retrieve that data. Then, it iterates through the
+ * submission list of backing requests created during the walk, removing
+ * each request from the list and submitting it.
+ *
+ * The scenario managed here includes:
+ * - Sending a backing request for the remaining length of `key` if it was
+ * not fulfilled by existing cache entries.
+ * - Iterating through `ctx->submit_req_list` to submit each backing request
+ * enqueued during the walk.
+ *
+ * This ensures all necessary backing requests for cache misses are submitted
+ * to the backing storage to retrieve any data that could not be found in
+ * the cache.
+ */
+static int read_walk_finally(struct pcache_cache_subtree_walk_ctx *ctx, int ret)
+{
+ struct pcache_cache *cache = ctx->cache_tree->cache;
+ struct pcache_backing_dev_req *backing_req, *next_req;
+ struct pcache_cache_key *key = ctx->key;
+
+ list_for_each_entry_safe(backing_req, next_req, ctx->submit_req_list, node) {
+ list_del_init(&backing_req->node);
+ submit_cache_miss_req(ctx->cache_tree->cache, backing_req);
+ }
+
+ if (ret != SUBTREE_WALK_RET_OK)
+ return ret;
+
+ if (key->len) {
+ backing_req = get_pre_alloc_req(ctx);
+ if (!backing_req)
+ return SUBTREE_WALK_RET_NEED_REQ;
+
+ cache_miss_req_init(cache, backing_req, ctx->pcache_req, ctx->req_done, key->len, true);
+ submit_cache_miss_req(cache, backing_req);
+ ctx->req_done += key->len;
+ }
+
+ return SUBTREE_WALK_RET_OK;
+}
+
+/*
+ * This function is used within `cache_subtree_walk` to determine whether the
+ * read operation has covered the requested data length. It compares the
+ * amount of data processed (`ctx->req_done`) with the total data length
+ * specified in the original request (`ctx->pcache_req->data_len`).
+ *
+ * If `req_done` meets or exceeds the required data length, the function
+ * returns `true`, indicating the walk is complete. Otherwise, it returns `false`,
+ * signaling that additional data processing is needed to fulfill the request.
+ */
+static bool read_walk_done(struct pcache_cache_subtree_walk_ctx *ctx)
+{
+ return (ctx->req_done >= ctx->pcache_req->data_len);
+}
+
+/**
+ * cache_read - Process a read request by traversing the cache tree
+ * @cache: Cache structure holding cache trees and related configurations
+ * @pcache_req: Request structure with information about the data to read
+ *
+ * This function attempts to fulfill a read request by traversing the cache tree(s)
+ * to locate cached data for the requested range. If parts of the data are missing
+ * in the cache, backing requests are generated to retrieve the required segments.
+ *
+ * The function operates by initializing a key for the requested data range and
+ * preparing a context (`walk_ctx`) to manage the cache tree traversal. The context
+ * includes pointers to functions (e.g., `read_before`, `read_overlap_tail`) that handle
+ * specific conditions encountered during the traversal. The `walk_finally` and `walk_done`
+ * functions manage the end stages of the traversal, while the `delete_key_list` and
+ * `submit_req_list` lists track any keys to be deleted or requests to be submitted.
+ *
+ * The function first calculates the requested range and checks if it fits within the
+ * current cache tree (based on the tree's size limits). It then locks the cache tree
+ * and performs a search to locate any matching keys. If there are outdated keys,
+ * these are deleted, and the search is restarted to ensure accurate data retrieval.
+ *
+ * If the requested range spans multiple cache trees, the function moves on to the
+ * next tree once the current range has been processed. This continues until the
+ * entire requested data length has been handled.
+ */
+static int cache_read(struct pcache_cache *cache, struct pcache_request *pcache_req)
+{
+ struct pcache_cache_key key_data = { .off = pcache_req->off, .len = pcache_req->data_len };
+ struct pcache_cache_subtree *cache_subtree;
+ struct pcache_cache_key *key_tmp = NULL, *key_next;
+ struct rb_node *prev_node = NULL;
+ struct pcache_cache_key *key = &key_data;
+ struct pcache_cache_subtree_walk_ctx walk_ctx = { 0 };
+ struct pcache_backing_dev_req *backing_req, *next_req;
+ LIST_HEAD(delete_key_list);
+ LIST_HEAD(submit_req_list);
+ int ret;
+
+ walk_ctx.cache_tree = &cache->req_key_tree;
+ walk_ctx.req_done = 0;
+ walk_ctx.pcache_req = pcache_req;
+ walk_ctx.before = read_before;
+ walk_ctx.overlap_tail = read_overlap_tail;
+ walk_ctx.overlap_head = read_overlap_head;
+ walk_ctx.overlap_contain = read_overlap_contain;
+ walk_ctx.overlap_contained = read_overlap_contained;
+ walk_ctx.walk_finally = read_walk_finally;
+ walk_ctx.walk_done = read_walk_done;
+ walk_ctx.delete_key_list = &delete_key_list;
+ walk_ctx.submit_req_list = &submit_req_list;
+
+next:
+ key->off = pcache_req->off + walk_ctx.req_done;
+ key->len = pcache_req->data_len - walk_ctx.req_done;
+ if (key->len > PCACHE_CACHE_SUBTREE_SIZE - (key->off & PCACHE_CACHE_SUBTREE_SIZE_MASK))
+ key->len = PCACHE_CACHE_SUBTREE_SIZE - (key->off & PCACHE_CACHE_SUBTREE_SIZE_MASK);
+
+ cache_subtree = get_subtree(&cache->req_key_tree, key->off);
+ spin_lock(&cache_subtree->tree_lock);
+search:
+ prev_node = cache_subtree_search(cache_subtree, key, NULL, NULL, &delete_key_list);
+ if (!list_empty(&delete_key_list)) {
+ list_for_each_entry_safe(key_tmp, key_next, &delete_key_list, list_node) {
+ list_del_init(&key_tmp->list_node);
+ cache_key_delete(key_tmp);
+ }
+ goto search;
+ }
+
+ walk_ctx.start_node = prev_node;
+ walk_ctx.key = key;
+
+ ret = cache_subtree_walk(&walk_ctx);
+ if (ret == SUBTREE_WALK_RET_RESEARCH)
+ goto search;
+ spin_unlock(&cache_subtree->tree_lock);
+
+ if (ret == SUBTREE_WALK_RET_ERR) {
+ ret = walk_ctx.ret;
+ goto out;
+ }
+
+ if (ret == SUBTREE_WALK_RET_NEED_REQ) {
+ walk_ctx.pre_alloc_req = cache_miss_req_alloc(cache, pcache_req, GFP_NOIO);
+ pcache_dev_debug(CACHE_TO_PCACHE(cache), "allocate pre_alloc_req with GFP_NOIO");
+ }
+
+ if (walk_ctx.req_done < pcache_req->data_len)
+ goto next;
+ ret = 0;
+out:
+ if (walk_ctx.pre_alloc_req)
+ cache_miss_req_free(walk_ctx.pre_alloc_req);
+
+ list_for_each_entry_safe(backing_req, next_req, &submit_req_list, node) {
+ list_del_init(&backing_req->node);
+ backing_dev_req_end(backing_req);
+ }
+
+ return ret;
+}
+
+static int cache_write(struct pcache_cache *cache, struct pcache_request *pcache_req)
+{
+ struct pcache_cache_subtree *cache_subtree;
+ struct pcache_cache_key *key;
+ u64 offset = pcache_req->off;
+ u32 length = pcache_req->data_len;
+ u32 io_done = 0;
+ int ret;
+
+ while (true) {
+ if (io_done >= length)
+ break;
+
+ key = cache_key_alloc(&cache->req_key_tree, GFP_NOIO);
+ key->off = offset + io_done;
+ key->len = length - io_done;
+ if (key->len > PCACHE_CACHE_SUBTREE_SIZE - (key->off & PCACHE_CACHE_SUBTREE_SIZE_MASK))
+ key->len = PCACHE_CACHE_SUBTREE_SIZE - (key->off & PCACHE_CACHE_SUBTREE_SIZE_MASK);
+
+ ret = cache_data_alloc(cache, key);
+ if (ret) {
+ cache_key_put(key);
+ goto err;
+ }
+
+ ret = cache_copy_from_req_bio(cache, key, pcache_req, io_done);
+ if (ret) {
+ cache_seg_put(key->cache_pos.cache_seg);
+ cache_key_put(key);
+ goto err;
+ }
+
+ cache_subtree = get_subtree(&cache->req_key_tree, key->off);
+ spin_lock(&cache_subtree->tree_lock);
+ cache_key_insert(&cache->req_key_tree, key, true);
+ ret = cache_key_append(cache, key, pcache_req->bio->bi_opf & REQ_FUA);
+ if (ret) {
+ cache_seg_put(key->cache_pos.cache_seg);
+ cache_key_delete(key);
+ goto unlock;
+ }
+
+ io_done += key->len;
+ spin_unlock(&cache_subtree->tree_lock);
+ }
+
+ return 0;
+unlock:
+ spin_unlock(&cache_subtree->tree_lock);
+err:
+ return ret;
+}
+
+/**
+ * cache_flush - Flush all ksets to persist any pending cache data
+ * @cache: Pointer to the cache structure
+ *
+ * This function iterates through all ksets associated with the provided `cache`
+ * and ensures that any data marked for persistence is written to media. For each
+ * kset, it acquires the kset lock, then invokes `cache_kset_close`, which handles
+ * the persistence logic for that kset.
+ *
+ * If `cache_kset_close` encounters an error, the function exits immediately with
+ * the respective error code, preventing the flush operation from proceeding to
+ * subsequent ksets.
+ */
+int cache_flush(struct pcache_cache *cache)
+{
+ struct pcache_cache_kset *kset;
+ int ret;
+ u32 i;
+
+ for (i = 0; i < cache->n_ksets; i++) {
+ kset = get_kset(cache, i);
+
+ spin_lock(&kset->kset_lock);
+ ret = cache_kset_close(cache, kset);
+ spin_unlock(&kset->kset_lock);
+
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int pcache_cache_handle_req(struct pcache_cache *cache, struct pcache_request *pcache_req)
+{
+ struct bio *bio = pcache_req->bio;
+
+ if (unlikely(bio->bi_opf & REQ_PREFLUSH))
+ return cache_flush(cache);
+
+ if (bio_data_dir(bio) == READ)
+ return cache_read(cache, pcache_req);
+
+ return cache_write(cache, pcache_req);
+}
diff --git a/drivers/md/dm-pcache/cache_segment.c b/drivers/md/dm-pcache/cache_segment.c
new file mode 100644
index 000000000000..f0b58980806e
--- /dev/null
+++ b/drivers/md/dm-pcache/cache_segment.c
@@ -0,0 +1,305 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "cache_dev.h"
+#include "cache.h"
+#include "backing_dev.h"
+#include "dm_pcache.h"
+
+static inline struct pcache_segment_info *get_seg_info_addr(struct pcache_cache_segment *cache_seg)
+{
+ struct pcache_segment_info *seg_info_addr;
+ u32 seg_id = cache_seg->segment.seg_id;
+ void *seg_addr;
+
+ seg_addr = CACHE_DEV_SEGMENT(cache_seg->cache->cache_dev, seg_id);
+ seg_info_addr = seg_addr + PCACHE_SEG_INFO_SIZE * cache_seg->info_index;
+
+ return seg_info_addr;
+}
+
+static void cache_seg_info_write(struct pcache_cache_segment *cache_seg)
+{
+ struct pcache_segment_info *seg_info_addr;
+ struct pcache_segment_info *seg_info = &cache_seg->cache_seg_info;
+
+ mutex_lock(&cache_seg->info_lock);
+ seg_info->header.seq++;
+ seg_info->header.crc = pcache_meta_crc(&seg_info->header, sizeof(struct pcache_segment_info));
+
+ seg_info_addr = get_seg_info_addr(cache_seg);
+ memcpy_flushcache(seg_info_addr, seg_info, sizeof(struct pcache_segment_info));
+ pmem_wmb();
+
+ cache_seg->info_index = (cache_seg->info_index + 1) % PCACHE_META_INDEX_MAX;
+ mutex_unlock(&cache_seg->info_lock);
+}
+
+static int cache_seg_info_load(struct pcache_cache_segment *cache_seg)
+{
+ struct pcache_segment_info *cache_seg_info_addr_base, *cache_seg_info_addr;
+ struct pcache_cache_dev *cache_dev = cache_seg->cache->cache_dev;
+ struct dm_pcache *pcache = CACHE_DEV_TO_PCACHE(cache_dev);
+ u32 seg_id = cache_seg->segment.seg_id;
+ int ret = 0;
+
+ cache_seg_info_addr_base = CACHE_DEV_SEGMENT(cache_dev, seg_id);
+
+ mutex_lock(&cache_seg->info_lock);
+ cache_seg_info_addr = pcache_meta_find_latest(&cache_seg_info_addr_base->header,
+ sizeof(struct pcache_segment_info),
+ PCACHE_SEG_INFO_SIZE,
+ &cache_seg->cache_seg_info);
+ if (IS_ERR(cache_seg_info_addr)) {
+ ret = PTR_ERR(cache_seg_info_addr);
+ goto out;
+ } else if (!cache_seg_info_addr) {
+ ret = -EIO;
+ goto out;
+ }
+ cache_seg->info_index = cache_seg_info_addr - cache_seg_info_addr_base;
+out:
+ mutex_unlock(&cache_seg->info_lock);
+
+ if (ret)
+ pcache_dev_err(pcache, "can't read segment info of segment: %u, ret: %d\n",
+ cache_seg->segment.seg_id, ret);
+ return ret;
+}
+
+static int cache_seg_ctrl_load(struct pcache_cache_segment *cache_seg)
+{
+ struct pcache_cache_seg_ctrl *cache_seg_ctrl = cache_seg->cache_seg_ctrl;
+ struct pcache_cache_seg_gen cache_seg_gen, *cache_seg_gen_addr;
+ int ret = 0;
+
+ cache_seg_gen_addr = pcache_meta_find_latest(&cache_seg_ctrl->gen->header,
+ sizeof(struct pcache_cache_seg_gen),
+ sizeof(struct pcache_cache_seg_gen),
+ &cache_seg_gen);
+ if (IS_ERR(cache_seg_gen_addr)) {
+ ret = PTR_ERR(cache_seg_gen_addr);
+ goto out;
+ }
+
+ if (!cache_seg_gen_addr) {
+ cache_seg->gen = 0;
+ cache_seg->gen_seq = 0;
+ cache_seg->gen_index = 0;
+ goto out;
+ }
+
+ cache_seg->gen = cache_seg_gen.gen;
+ cache_seg->gen_seq = cache_seg_gen.header.seq;
+ cache_seg->gen_index = (cache_seg_gen_addr - cache_seg_ctrl->gen);
+out:
+
+ return ret;
+}
+
+static inline struct pcache_cache_seg_gen *get_cache_seg_gen_addr(struct pcache_cache_segment *cache_seg)
+{
+ struct pcache_cache_seg_ctrl *cache_seg_ctrl = cache_seg->cache_seg_ctrl;
+
+ return (cache_seg_ctrl->gen + cache_seg->gen_index);
+}
+
+/*
+ * cache_seg_ctrl_write - write cache segment control information
+ * @seg: the cache segment to update
+ *
+ * This function writes the control information of a cache segment to media.
+ *
+ * Although this updates shared control data, we intentionally do not use
+ * any locking here. All accesses to control information are single-threaded:
+ *
+ * - All reads occur during the init phase, where no concurrent writes
+ * can happen.
+ * - Writes happen once during init and once when the last reference
+ * to the segment is dropped in cache_seg_put().
+ *
+ * Both cases are guaranteed to be single-threaded, so there is no risk
+ * of concurrent read/write races.
+ */
+static void cache_seg_ctrl_write(struct pcache_cache_segment *cache_seg)
+{
+ struct pcache_cache_seg_gen cache_seg_gen;
+
+ cache_seg_gen.gen = cache_seg->gen;
+ cache_seg_gen.header.seq = ++cache_seg->gen_seq;
+ cache_seg_gen.header.crc = pcache_meta_crc(&cache_seg_gen.header,
+ sizeof(struct pcache_cache_seg_gen));
+
+ memcpy_flushcache(get_cache_seg_gen_addr(cache_seg), &cache_seg_gen, sizeof(struct pcache_cache_seg_gen));
+ pmem_wmb();
+
+ cache_seg->gen_index = (cache_seg->gen_index + 1) % PCACHE_META_INDEX_MAX;
+}
+
+static void cache_seg_ctrl_init(struct pcache_cache_segment *cache_seg)
+{
+ cache_seg->gen = 0;
+ cache_seg->gen_seq = 0;
+ cache_seg->gen_index = 0;
+ cache_seg_ctrl_write(cache_seg);
+}
+
+static int cache_seg_meta_load(struct pcache_cache_segment *cache_seg)
+{
+ int ret;
+
+ ret = cache_seg_info_load(cache_seg);
+ if (ret)
+ goto err;
+
+ ret = cache_seg_ctrl_load(cache_seg);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ return ret;
+}
+
+/**
+ * cache_seg_set_next_seg - Sets the ID of the next segment
+ * @cache_seg: Pointer to the cache segment structure.
+ * @seg_id: The segment ID to set as the next segment.
+ *
+ * A pcache_cache allocates multiple cache segments, which are linked together
+ * through next_seg. When loading a pcache_cache, the first cache segment can
+ * be found using cache->seg_id, which allows access to all the cache segments.
+ */
+void cache_seg_set_next_seg(struct pcache_cache_segment *cache_seg, u32 seg_id)
+{
+ cache_seg->cache_seg_info.flags |= PCACHE_SEG_INFO_FLAGS_HAS_NEXT;
+ cache_seg->cache_seg_info.next_seg = seg_id;
+ cache_seg_info_write(cache_seg);
+}
+
+int cache_seg_init(struct pcache_cache *cache, u32 seg_id, u32 cache_seg_id,
+ bool new_cache)
+{
+ struct pcache_cache_dev *cache_dev = cache->cache_dev;
+ struct pcache_cache_segment *cache_seg = &cache->segments[cache_seg_id];
+ struct pcache_segment_init_options seg_options = { 0 };
+ struct pcache_segment *segment = &cache_seg->segment;
+ int ret;
+
+ cache_seg->cache = cache;
+ cache_seg->cache_seg_id = cache_seg_id;
+ spin_lock_init(&cache_seg->gen_lock);
+ atomic_set(&cache_seg->refs, 0);
+ mutex_init(&cache_seg->info_lock);
+
+ /* init pcache_segment */
+ seg_options.type = PCACHE_SEGMENT_TYPE_CACHE_DATA;
+ seg_options.data_off = PCACHE_CACHE_SEG_CTRL_OFF + PCACHE_CACHE_SEG_CTRL_SIZE;
+ seg_options.seg_id = seg_id;
+ seg_options.seg_info = &cache_seg->cache_seg_info;
+ pcache_segment_init(cache_dev, segment, &seg_options);
+
+ cache_seg->cache_seg_ctrl = CACHE_DEV_SEGMENT(cache_dev, seg_id) + PCACHE_CACHE_SEG_CTRL_OFF;
+
+ if (new_cache) {
+ cache_dev_zero_range(cache_dev, CACHE_DEV_SEGMENT(cache_dev, seg_id),
+ PCACHE_SEG_INFO_SIZE * PCACHE_META_INDEX_MAX +
+ PCACHE_CACHE_SEG_CTRL_SIZE);
+
+ cache_seg_ctrl_init(cache_seg);
+
+ cache_seg->info_index = 0;
+ cache_seg_info_write(cache_seg);
+
+ /* clear outdated kset in segment */
+ memcpy_flushcache(segment->data, &pcache_empty_kset, sizeof(struct pcache_cache_kset_onmedia));
+ pmem_wmb();
+ } else {
+ ret = cache_seg_meta_load(cache_seg);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+err:
+ return ret;
+}
+
+/**
+ * get_cache_segment - Retrieves a free cache segment from the cache.
+ * @cache: Pointer to the cache structure.
+ *
+ * This function attempts to find a free cache segment that can be used.
+ * It locks the segment map and checks for the next available segment ID.
+ * If a free segment is found, it initializes it and returns a pointer to the
+ * cache segment structure. Returns NULL if no segments are available.
+ */
+struct pcache_cache_segment *get_cache_segment(struct pcache_cache *cache)
+{
+ struct pcache_cache_segment *cache_seg;
+ u32 seg_id;
+
+ spin_lock(&cache->seg_map_lock);
+again:
+ seg_id = find_next_zero_bit(cache->seg_map, cache->n_segs, cache->last_cache_seg);
+ if (seg_id == cache->n_segs) {
+ /* reset the hint of ->last_cache_seg and retry */
+ if (cache->last_cache_seg) {
+ cache->last_cache_seg = 0;
+ goto again;
+ }
+ cache->cache_full = true;
+ spin_unlock(&cache->seg_map_lock);
+ return NULL;
+ }
+
+ /*
+ * found an available cache_seg, mark it used in seg_map
+ * and update the search hint ->last_cache_seg
+ */
+ __set_bit(seg_id, cache->seg_map);
+ cache->last_cache_seg = seg_id;
+ spin_unlock(&cache->seg_map_lock);
+
+ cache_seg = &cache->segments[seg_id];
+ cache_seg->cache_seg_id = seg_id;
+
+ return cache_seg;
+}
+
+static void cache_seg_gen_increase(struct pcache_cache_segment *cache_seg)
+{
+ spin_lock(&cache_seg->gen_lock);
+ cache_seg->gen++;
+ spin_unlock(&cache_seg->gen_lock);
+
+ cache_seg_ctrl_write(cache_seg);
+}
+
+void cache_seg_get(struct pcache_cache_segment *cache_seg)
+{
+ atomic_inc(&cache_seg->refs);
+}
+
+static void cache_seg_invalidate(struct pcache_cache_segment *cache_seg)
+{
+ struct pcache_cache *cache;
+
+ cache = cache_seg->cache;
+ cache_seg_gen_increase(cache_seg);
+
+ spin_lock(&cache->seg_map_lock);
+ if (cache->cache_full)
+ cache->cache_full = false;
+ __clear_bit(cache_seg->cache_seg_id, cache->seg_map);
+ spin_unlock(&cache->seg_map_lock);
+
+ pcache_defer_reqs_kick(CACHE_TO_PCACHE(cache));
+ /* clean_work will clean the bad key in key_tree*/
+ queue_work(cache_get_wq(cache), &cache->clean_work);
+}
+
+void cache_seg_put(struct pcache_cache_segment *cache_seg)
+{
+ if (atomic_dec_and_test(&cache_seg->refs))
+ cache_seg_invalidate(cache_seg);
+}
diff --git a/drivers/md/dm-pcache/cache_writeback.c b/drivers/md/dm-pcache/cache_writeback.c
new file mode 100644
index 000000000000..87a82b3fe836
--- /dev/null
+++ b/drivers/md/dm-pcache/cache_writeback.c
@@ -0,0 +1,261 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/bio.h>
+
+#include "cache.h"
+#include "backing_dev.h"
+#include "cache_dev.h"
+#include "dm_pcache.h"
+
+static void writeback_ctx_end(struct pcache_cache *cache, int ret)
+{
+ if (ret && !cache->writeback_ctx.ret) {
+ pcache_dev_err(CACHE_TO_PCACHE(cache), "writeback error: %d", ret);
+ cache->writeback_ctx.ret = ret;
+ }
+
+ if (!atomic_dec_and_test(&cache->writeback_ctx.pending))
+ return;
+
+ if (!cache->writeback_ctx.ret) {
+ backing_dev_flush(cache->backing_dev);
+
+ mutex_lock(&cache->dirty_tail_lock);
+ cache_pos_advance(&cache->dirty_tail, cache->writeback_ctx.advance);
+ cache_encode_dirty_tail(cache);
+ mutex_unlock(&cache->dirty_tail_lock);
+ }
+ queue_delayed_work(cache_get_wq(cache), &cache->writeback_work, 0);
+}
+
+static void writeback_end_req(struct pcache_backing_dev_req *backing_req, int ret)
+{
+ struct pcache_cache *cache = backing_req->priv_data;
+
+ mutex_lock(&cache->writeback_lock);
+ writeback_ctx_end(cache, ret);
+ mutex_unlock(&cache->writeback_lock);
+}
+
+static inline bool is_cache_clean(struct pcache_cache *cache, struct pcache_cache_pos *dirty_tail)
+{
+ struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
+ struct pcache_cache_kset_onmedia *kset_onmedia;
+ u32 to_copy;
+ void *addr;
+ int ret;
+
+ addr = cache_pos_addr(dirty_tail);
+ kset_onmedia = (struct pcache_cache_kset_onmedia *)cache->wb_kset_onmedia_buf;
+
+ to_copy = min(PCACHE_KSET_ONMEDIA_SIZE_MAX, PCACHE_SEG_SIZE - dirty_tail->seg_off);
+ ret = copy_mc_to_kernel(kset_onmedia, addr, to_copy);
+ if (ret) {
+ pcache_dev_err(pcache, "error to read kset: %d", ret);
+ return true;
+ }
+
+ /* Check if the magic number matches the expected value */
+ if (kset_onmedia->magic != PCACHE_KSET_MAGIC) {
+ pcache_dev_debug(pcache, "dirty_tail: %u:%u magic: %llx, not expected: %llx\n",
+ dirty_tail->cache_seg->cache_seg_id, dirty_tail->seg_off,
+ kset_onmedia->magic, PCACHE_KSET_MAGIC);
+ return true;
+ }
+
+ /* Verify the CRC checksum for data integrity */
+ if (kset_onmedia->crc != cache_kset_crc(kset_onmedia)) {
+ pcache_dev_debug(pcache, "dirty_tail: %u:%u crc: %x, not expected: %x\n",
+ dirty_tail->cache_seg->cache_seg_id, dirty_tail->seg_off,
+ cache_kset_crc(kset_onmedia), kset_onmedia->crc);
+ return true;
+ }
+
+ return false;
+}
+
+void cache_writeback_exit(struct pcache_cache *cache)
+{
+ cancel_delayed_work_sync(&cache->writeback_work);
+ backing_dev_flush(cache->backing_dev);
+ cache_tree_exit(&cache->writeback_key_tree);
+}
+
+int cache_writeback_init(struct pcache_cache *cache)
+{
+ int ret;
+
+ ret = cache_tree_init(cache, &cache->writeback_key_tree, 1);
+ if (ret)
+ goto err;
+
+ atomic_set(&cache->writeback_ctx.pending, 0);
+
+ /* Queue delayed work to start writeback handling */
+ queue_delayed_work(cache_get_wq(cache), &cache->writeback_work, 0);
+
+ return 0;
+err:
+ return ret;
+}
+
+static void cache_key_writeback(struct pcache_cache *cache, struct pcache_cache_key *key)
+{
+ struct pcache_backing_dev_req *writeback_req;
+ struct pcache_backing_dev_req_opts writeback_req_opts = { 0 };
+ struct pcache_cache_pos *pos;
+ void *addr;
+ u32 seg_remain, req_len, done = 0;
+
+ if (cache_key_clean(key))
+ return;
+
+ pos = &key->cache_pos;
+
+ seg_remain = cache_seg_remain(pos);
+ BUG_ON(seg_remain < key->len);
+next_req:
+ addr = cache_pos_addr(pos) + done;
+ req_len = backing_dev_req_coalesced_max_len(addr, key->len - done);
+
+ writeback_req_opts.type = BACKING_DEV_REQ_TYPE_KMEM;
+ writeback_req_opts.gfp_mask = GFP_NOIO;
+ writeback_req_opts.end_fn = writeback_end_req;
+ writeback_req_opts.priv_data = cache;
+
+ writeback_req_opts.kmem.data = addr;
+ writeback_req_opts.kmem.opf = REQ_OP_WRITE;
+ writeback_req_opts.kmem.len = req_len;
+ writeback_req_opts.kmem.backing_off = key->off + done;
+
+ writeback_req = backing_dev_req_create(cache->backing_dev, &writeback_req_opts);
+
+ atomic_inc(&cache->writeback_ctx.pending);
+ backing_dev_req_submit(writeback_req, true);
+
+ done += req_len;
+ if (done < key->len)
+ goto next_req;
+}
+
+static void cache_wb_tree_writeback(struct pcache_cache *cache, u32 advance)
+{
+ struct pcache_cache_tree *cache_tree = &cache->writeback_key_tree;
+ struct pcache_cache_subtree *cache_subtree;
+ struct rb_node *node;
+ struct pcache_cache_key *key;
+ u32 i;
+
+ cache->writeback_ctx.ret = 0;
+ cache->writeback_ctx.advance = advance;
+ atomic_set(&cache->writeback_ctx.pending, 1);
+
+ for (i = 0; i < cache_tree->n_subtrees; i++) {
+ cache_subtree = &cache_tree->subtrees[i];
+
+ node = rb_first(&cache_subtree->root);
+ while (node) {
+ key = CACHE_KEY(node);
+ node = rb_next(node);
+
+ cache_key_writeback(cache, key);
+ cache_key_delete(key);
+ }
+ }
+ writeback_ctx_end(cache, 0);
+}
+
+static int cache_kset_insert_tree(struct pcache_cache *cache, struct pcache_cache_kset_onmedia *kset_onmedia)
+{
+ struct pcache_cache_key_onmedia *key_onmedia;
+ struct pcache_cache_subtree *cache_subtree;
+ struct pcache_cache_key *key;
+ int ret;
+ u32 i;
+
+ /* Iterate through all keys in the kset and write each back to storage */
+ for (i = 0; i < kset_onmedia->key_num; i++) {
+ key_onmedia = &kset_onmedia->data[i];
+
+ key = cache_key_alloc(&cache->writeback_key_tree, GFP_NOIO);
+ ret = cache_key_decode(cache, key_onmedia, key);
+ if (ret) {
+ cache_key_put(key);
+ goto clear_tree;
+ }
+
+ cache_subtree = get_subtree(&cache->writeback_key_tree, key->off);
+ spin_lock(&cache_subtree->tree_lock);
+ cache_key_insert(&cache->writeback_key_tree, key, true);
+ spin_unlock(&cache_subtree->tree_lock);
+ }
+
+ return 0;
+clear_tree:
+ cache_tree_clear(&cache->writeback_key_tree);
+ return ret;
+}
+
+static void last_kset_writeback(struct pcache_cache *cache,
+ struct pcache_cache_kset_onmedia *last_kset_onmedia)
+{
+ struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
+ struct pcache_cache_segment *next_seg;
+
+ pcache_dev_debug(pcache, "last kset, next: %u\n", last_kset_onmedia->next_cache_seg_id);
+
+ next_seg = &cache->segments[last_kset_onmedia->next_cache_seg_id];
+
+ mutex_lock(&cache->dirty_tail_lock);
+ cache->dirty_tail.cache_seg = next_seg;
+ cache->dirty_tail.seg_off = 0;
+ cache_encode_dirty_tail(cache);
+ mutex_unlock(&cache->dirty_tail_lock);
+}
+
+void cache_writeback_fn(struct work_struct *work)
+{
+ struct pcache_cache *cache = container_of(work, struct pcache_cache, writeback_work.work);
+ struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
+ struct pcache_cache_pos dirty_tail;
+ struct pcache_cache_kset_onmedia *kset_onmedia;
+ u32 delay;
+ int ret;
+
+ mutex_lock(&cache->writeback_lock);
+ if (atomic_read(&cache->writeback_ctx.pending))
+ goto unlock;
+
+ if (pcache_is_stopping(pcache))
+ goto unlock;
+
+ kset_onmedia = (struct pcache_cache_kset_onmedia *)cache->wb_kset_onmedia_buf;
+
+ mutex_lock(&cache->dirty_tail_lock);
+ cache_pos_copy(&dirty_tail, &cache->dirty_tail);
+ mutex_unlock(&cache->dirty_tail_lock);
+
+ if (is_cache_clean(cache, &dirty_tail)) {
+ delay = PCACHE_CACHE_WRITEBACK_INTERVAL;
+ goto queue_work;
+ }
+
+ if (kset_onmedia->flags & PCACHE_KSET_FLAGS_LAST) {
+ last_kset_writeback(cache, kset_onmedia);
+ delay = 0;
+ goto queue_work;
+ }
+
+ ret = cache_kset_insert_tree(cache, kset_onmedia);
+ if (ret) {
+ delay = PCACHE_CACHE_WRITEBACK_INTERVAL;
+ goto queue_work;
+ }
+
+ cache_wb_tree_writeback(cache, get_kset_onmedia_size(kset_onmedia));
+ delay = 0;
+queue_work:
+ queue_delayed_work(cache_get_wq(cache), &cache->writeback_work, delay);
+unlock:
+ mutex_unlock(&cache->writeback_lock);
+}
diff --git a/drivers/md/dm-pcache/dm_pcache.c b/drivers/md/dm-pcache/dm_pcache.c
new file mode 100644
index 000000000000..e5f5936fa6f0
--- /dev/null
+++ b/drivers/md/dm-pcache/dm_pcache.c
@@ -0,0 +1,497 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include <linux/module.h>
+#include <linux/blkdev.h>
+#include <linux/bio.h>
+
+#include "../dm-core.h"
+#include "cache_dev.h"
+#include "backing_dev.h"
+#include "cache.h"
+#include "dm_pcache.h"
+
+void pcache_defer_reqs_kick(struct dm_pcache *pcache)
+{
+ struct pcache_cache *cache = &pcache->cache;
+
+ spin_lock(&cache->seg_map_lock);
+ if (!cache->cache_full)
+ queue_work(pcache->task_wq, &pcache->defered_req_work);
+ spin_unlock(&cache->seg_map_lock);
+}
+
+static void defer_req(struct pcache_request *pcache_req)
+{
+ struct dm_pcache *pcache = pcache_req->pcache;
+
+ BUG_ON(!list_empty(&pcache_req->list_node));
+
+ spin_lock(&pcache->defered_req_list_lock);
+ list_add(&pcache_req->list_node, &pcache->defered_req_list);
+ pcache_defer_reqs_kick(pcache);
+ spin_unlock(&pcache->defered_req_list_lock);
+}
+
+static void defered_req_fn(struct work_struct *work)
+{
+ struct dm_pcache *pcache = container_of(work, struct dm_pcache, defered_req_work);
+ struct pcache_request *pcache_req;
+ LIST_HEAD(tmp_list);
+ int ret;
+
+ if (pcache_is_stopping(pcache))
+ return;
+
+ spin_lock(&pcache->defered_req_list_lock);
+ list_splice_init(&pcache->defered_req_list, &tmp_list);
+ spin_unlock(&pcache->defered_req_list_lock);
+
+ while (!list_empty(&tmp_list)) {
+ pcache_req = list_first_entry(&tmp_list,
+ struct pcache_request, list_node);
+ list_del_init(&pcache_req->list_node);
+ pcache_req->ret = 0;
+ ret = pcache_cache_handle_req(&pcache->cache, pcache_req);
+ if (ret == -EBUSY)
+ defer_req(pcache_req);
+ else
+ pcache_req_put(pcache_req, ret);
+ }
+}
+
+void pcache_req_get(struct pcache_request *pcache_req)
+{
+ kref_get(&pcache_req->ref);
+}
+
+static void end_req(struct kref *ref)
+{
+ struct pcache_request *pcache_req = container_of(ref, struct pcache_request, ref);
+ struct dm_pcache *pcache = pcache_req->pcache;
+ struct bio *bio = pcache_req->bio;
+ int ret = pcache_req->ret;
+
+ if (ret == -EBUSY) {
+ pcache_req_get(pcache_req);
+ defer_req(pcache_req);
+ } else {
+ bio->bi_status = errno_to_blk_status(ret);
+ bio_endio(bio);
+
+ if (atomic_dec_and_test(&pcache->inflight_reqs))
+ wake_up(&pcache->inflight_wq);
+ }
+}
+
+void pcache_req_put(struct pcache_request *pcache_req, int ret)
+{
+ /* Set the return status if it is not already set */
+ if (ret && !pcache_req->ret)
+ pcache_req->ret = ret;
+
+ kref_put(&pcache_req->ref, end_req);
+}
+
+static bool at_least_one_arg(struct dm_arg_set *as, char **error)
+{
+ if (!as->argc) {
+ *error = "Insufficient args";
+ return false;
+ }
+
+ return true;
+}
+
+static int parse_cache_dev(struct dm_pcache *pcache, struct dm_arg_set *as,
+ char **error)
+{
+ int ret;
+
+ if (!at_least_one_arg(as, error))
+ return -EINVAL;
+ ret = dm_get_device(pcache->ti, dm_shift_arg(as),
+ BLK_OPEN_READ | BLK_OPEN_WRITE,
+ &pcache->cache_dev.dm_dev);
+ if (ret) {
+ *error = "Error opening cache device";
+ return ret;
+ }
+
+ return 0;
+}
+
+static int parse_backing_dev(struct dm_pcache *pcache, struct dm_arg_set *as,
+ char **error)
+{
+ int ret;
+
+ if (!at_least_one_arg(as, error))
+ return -EINVAL;
+
+ ret = dm_get_device(pcache->ti, dm_shift_arg(as),
+ BLK_OPEN_READ | BLK_OPEN_WRITE,
+ &pcache->backing_dev.dm_dev);
+ if (ret) {
+ *error = "Error opening backing device";
+ return ret;
+ }
+
+ return 0;
+}
+
+static void pcache_init_opts(struct pcache_cache_options *opts)
+{
+ opts->cache_mode = PCACHE_CACHE_MODE_WRITEBACK;
+ opts->data_crc = false;
+}
+
+static int parse_cache_opts(struct dm_pcache *pcache, struct dm_arg_set *as,
+ char **error)
+{
+ struct pcache_cache_options *opts = &pcache->opts;
+ static const struct dm_arg _args[] = {
+ {0, 4, "Invalid number of cache option arguments"},
+ };
+ unsigned int argc;
+ const char *arg;
+ int ret;
+
+ pcache_init_opts(opts);
+ if (!as->argc)
+ return 0;
+
+ ret = dm_read_arg_group(_args, as, &argc, error);
+ if (ret)
+ return -EINVAL;
+
+ while (argc) {
+ arg = dm_shift_arg(as);
+ argc--;
+
+ if (!strcmp(arg, "cache_mode")) {
+ arg = dm_shift_arg(as);
+ if (!strcmp(arg, "writeback")) {
+ opts->cache_mode = PCACHE_CACHE_MODE_WRITEBACK;
+ } else {
+ *error = "Invalid cache mode parameter";
+ return -EINVAL;
+ }
+ argc--;
+ } else if (!strcmp(arg, "data_crc")) {
+ arg = dm_shift_arg(as);
+ if (!strcmp(arg, "true")) {
+ opts->data_crc = true;
+ } else if (!strcmp(arg, "false")) {
+ opts->data_crc = false;
+ } else {
+ *error = "Invalid data crc parameter";
+ return -EINVAL;
+ }
+ argc--;
+ } else {
+ *error = "Unrecognised cache option requested";
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int pcache_start(struct dm_pcache *pcache, char **error)
+{
+ int ret;
+
+ ret = cache_dev_start(pcache);
+ if (ret) {
+ *error = "Failed to start cache dev";
+ return ret;
+ }
+
+ ret = backing_dev_start(pcache);
+ if (ret) {
+ *error = "Failed to start backing dev";
+ goto stop_cache;
+ }
+
+ ret = pcache_cache_start(pcache);
+ if (ret) {
+ *error = "Failed to start pcache";
+ goto stop_backing;
+ }
+
+ return 0;
+stop_backing:
+ backing_dev_stop(pcache);
+stop_cache:
+ cache_dev_stop(pcache);
+
+ return ret;
+}
+
+static void pcache_destroy_args(struct dm_pcache *pcache)
+{
+ if (pcache->cache_dev.dm_dev)
+ dm_put_device(pcache->ti, pcache->cache_dev.dm_dev);
+ if (pcache->backing_dev.dm_dev)
+ dm_put_device(pcache->ti, pcache->backing_dev.dm_dev);
+}
+
+static int pcache_parse_args(struct dm_pcache *pcache, unsigned int argc, char **argv,
+ char **error)
+{
+ struct dm_arg_set as;
+ int ret;
+
+ as.argc = argc;
+ as.argv = argv;
+
+ /*
+ * Parse cache device
+ */
+ ret = parse_cache_dev(pcache, &as, error);
+ if (ret)
+ return ret;
+ /*
+ * Parse backing device
+ */
+ ret = parse_backing_dev(pcache, &as, error);
+ if (ret)
+ goto out;
+ /*
+ * Parse optional arguments
+ */
+ ret = parse_cache_opts(pcache, &as, error);
+ if (ret)
+ goto out;
+
+ return 0;
+out:
+ pcache_destroy_args(pcache);
+ return ret;
+}
+
+static int dm_pcache_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+{
+ struct mapped_device *md = ti->table->md;
+ struct dm_pcache *pcache;
+ int ret;
+
+ if (md->map) {
+ ti->error = "Don't support table loading for live md";
+ return -EOPNOTSUPP;
+ }
+
+ /* Allocate memory for the cache structure */
+ pcache = kzalloc(sizeof(struct dm_pcache), GFP_KERNEL);
+ if (!pcache)
+ return -ENOMEM;
+
+ pcache->task_wq = alloc_workqueue("pcache-%s-wq", WQ_UNBOUND | WQ_MEM_RECLAIM,
+ 0, md->name);
+ if (!pcache->task_wq) {
+ ret = -ENOMEM;
+ goto free_pcache;
+ }
+
+ spin_lock_init(&pcache->defered_req_list_lock);
+ INIT_LIST_HEAD(&pcache->defered_req_list);
+ INIT_WORK(&pcache->defered_req_work, defered_req_fn);
+ pcache->ti = ti;
+
+ ret = pcache_parse_args(pcache, argc, argv, &ti->error);
+ if (ret)
+ goto destroy_wq;
+
+ ret = pcache_start(pcache, &ti->error);
+ if (ret)
+ goto destroy_args;
+
+ ti->num_flush_bios = 1;
+ ti->flush_supported = true;
+ ti->per_io_data_size = sizeof(struct pcache_request);
+ ti->private = pcache;
+ atomic_set(&pcache->inflight_reqs, 0);
+ atomic_set(&pcache->state, PCACHE_STATE_RUNNING);
+ init_waitqueue_head(&pcache->inflight_wq);
+
+ return 0;
+destroy_args:
+ pcache_destroy_args(pcache);
+destroy_wq:
+ destroy_workqueue(pcache->task_wq);
+free_pcache:
+ kfree(pcache);
+
+ return ret;
+}
+
+static void defer_req_stop(struct dm_pcache *pcache)
+{
+ struct pcache_request *pcache_req;
+ LIST_HEAD(tmp_list);
+
+ flush_work(&pcache->defered_req_work);
+
+ spin_lock(&pcache->defered_req_list_lock);
+ list_splice_init(&pcache->defered_req_list, &tmp_list);
+ spin_unlock(&pcache->defered_req_list_lock);
+
+ while (!list_empty(&tmp_list)) {
+ pcache_req = list_first_entry(&tmp_list,
+ struct pcache_request, list_node);
+ list_del_init(&pcache_req->list_node);
+ pcache_req_put(pcache_req, -EIO);
+ }
+}
+
+static void dm_pcache_dtr(struct dm_target *ti)
+{
+ struct dm_pcache *pcache;
+
+ pcache = ti->private;
+ atomic_set(&pcache->state, PCACHE_STATE_STOPPING);
+ defer_req_stop(pcache);
+
+ wait_event(pcache->inflight_wq,
+ atomic_read(&pcache->inflight_reqs) == 0);
+
+ pcache_cache_stop(pcache);
+ backing_dev_stop(pcache);
+ cache_dev_stop(pcache);
+
+ pcache_destroy_args(pcache);
+ drain_workqueue(pcache->task_wq);
+ destroy_workqueue(pcache->task_wq);
+
+ kfree(pcache);
+}
+
+static int dm_pcache_map_bio(struct dm_target *ti, struct bio *bio)
+{
+ struct pcache_request *pcache_req = dm_per_bio_data(bio, sizeof(struct pcache_request));
+ struct dm_pcache *pcache = ti->private;
+ int ret;
+
+ pcache_req->pcache = pcache;
+ kref_init(&pcache_req->ref);
+ pcache_req->ret = 0;
+ pcache_req->bio = bio;
+ pcache_req->off = (u64)bio->bi_iter.bi_sector << SECTOR_SHIFT;
+ pcache_req->data_len = bio->bi_iter.bi_size;
+ INIT_LIST_HEAD(&pcache_req->list_node);
+ atomic_inc(&pcache->inflight_reqs);
+
+ ret = pcache_cache_handle_req(&pcache->cache, pcache_req);
+ if (ret == -EBUSY)
+ defer_req(pcache_req);
+ else
+ pcache_req_put(pcache_req, ret);
+
+ return DM_MAPIO_SUBMITTED;
+}
+
+static void dm_pcache_status(struct dm_target *ti, status_type_t type,
+ unsigned int status_flags, char *result,
+ unsigned int maxlen)
+{
+ struct dm_pcache *pcache = ti->private;
+ struct pcache_cache_dev *cache_dev = &pcache->cache_dev;
+ struct pcache_backing_dev *backing_dev = &pcache->backing_dev;
+ struct pcache_cache *cache = &pcache->cache;
+ unsigned int sz = 0;
+
+ switch (type) {
+ case STATUSTYPE_INFO:
+ DMEMIT("%x %u %u %u %u %x %u:%u %u:%u %u:%u",
+ cache_dev->sb_flags,
+ cache_dev->seg_num,
+ cache->n_segs,
+ bitmap_weight(cache->seg_map, cache->n_segs),
+ pcache_cache_get_gc_percent(cache),
+ cache->cache_info.flags,
+ cache->key_head.cache_seg->cache_seg_id,
+ cache->key_head.seg_off,
+ cache->dirty_tail.cache_seg->cache_seg_id,
+ cache->dirty_tail.seg_off,
+ cache->key_tail.cache_seg->cache_seg_id,
+ cache->key_tail.seg_off);
+ break;
+ case STATUSTYPE_TABLE:
+ DMEMIT("%s %s 4 cache_mode writeback crc %s",
+ cache_dev->dm_dev->name,
+ backing_dev->dm_dev->name,
+ cache_data_crc_on(cache) ? "true" : "false");
+ break;
+ case STATUSTYPE_IMA:
+ *result = '\0';
+ break;
+ }
+}
+
+static int dm_pcache_message(struct dm_target *ti, unsigned int argc,
+ char **argv, char *result, unsigned int maxlen)
+{
+ struct dm_pcache *pcache = ti->private;
+ unsigned long val;
+
+ if (argc != 2)
+ goto err;
+
+ if (!strcasecmp(argv[0], "gc_percent")) {
+ if (kstrtoul(argv[1], 10, &val))
+ goto err;
+
+ return pcache_cache_set_gc_percent(&pcache->cache, val);
+ }
+err:
+ return -EINVAL;
+}
+
+static struct target_type dm_pcache_target = {
+ .name = "pcache",
+ .version = {0, 1, 0},
+ .module = THIS_MODULE,
+ .features = DM_TARGET_SINGLETON,
+ .ctr = dm_pcache_ctr,
+ .dtr = dm_pcache_dtr,
+ .map = dm_pcache_map_bio,
+ .status = dm_pcache_status,
+ .message = dm_pcache_message,
+};
+
+static int __init dm_pcache_init(void)
+{
+ int ret;
+
+ ret = pcache_backing_init();
+ if (ret)
+ goto err;
+
+ ret = pcache_cache_init();
+ if (ret)
+ goto backing_exit;
+
+ ret = dm_register_target(&dm_pcache_target);
+ if (ret)
+ goto cache_exit;
+ return 0;
+
+cache_exit:
+ pcache_cache_exit();
+backing_exit:
+ pcache_backing_exit();
+err:
+ return ret;
+}
+module_init(dm_pcache_init);
+
+static void __exit dm_pcache_exit(void)
+{
+ dm_unregister_target(&dm_pcache_target);
+ pcache_cache_exit();
+ pcache_backing_exit();
+}
+module_exit(dm_pcache_exit);
+
+MODULE_DESCRIPTION("dm-pcache Persistent Cache for block device");
+MODULE_AUTHOR("Dongsheng Yang <dongsheng.yang@linux.dev>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-pcache/dm_pcache.h b/drivers/md/dm-pcache/dm_pcache.h
new file mode 100644
index 000000000000..b4e06be0c0b9
--- /dev/null
+++ b/drivers/md/dm-pcache/dm_pcache.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _DM_PCACHE_H
+#define _DM_PCACHE_H
+#include <linux/device-mapper.h>
+
+#include "../dm-core.h"
+
+#define CACHE_DEV_TO_PCACHE(cache_dev) (container_of(cache_dev, struct dm_pcache, cache_dev))
+#define BACKING_DEV_TO_PCACHE(backing_dev) (container_of(backing_dev, struct dm_pcache, backing_dev))
+#define CACHE_TO_PCACHE(cache) (container_of(cache, struct dm_pcache, cache))
+
+#define PCACHE_STATE_RUNNING 1
+#define PCACHE_STATE_STOPPING 2
+
+struct pcache_cache_dev;
+struct pcache_backing_dev;
+struct pcache_cache;
+struct pcache_cache_options;
+struct dm_pcache {
+ struct dm_target *ti;
+ struct pcache_cache_dev cache_dev;
+ struct pcache_backing_dev backing_dev;
+ struct pcache_cache cache;
+ struct pcache_cache_options opts;
+
+ spinlock_t defered_req_list_lock;
+ struct list_head defered_req_list;
+ struct workqueue_struct *task_wq;
+
+ struct work_struct defered_req_work;
+
+ atomic_t state;
+ atomic_t inflight_reqs;
+ wait_queue_head_t inflight_wq;
+};
+
+static inline bool pcache_is_stopping(struct dm_pcache *pcache)
+{
+ return (atomic_read(&pcache->state) == PCACHE_STATE_STOPPING);
+}
+
+#define pcache_dev_err(pcache, fmt, ...) \
+ pcache_err("%s " fmt, pcache->ti->table->md->name, ##__VA_ARGS__)
+#define pcache_dev_info(pcache, fmt, ...) \
+ pcache_info("%s " fmt, pcache->ti->table->md->name, ##__VA_ARGS__)
+#define pcache_dev_debug(pcache, fmt, ...) \
+ pcache_debug("%s " fmt, pcache->ti->table->md->name, ##__VA_ARGS__)
+
+struct pcache_request {
+ struct dm_pcache *pcache;
+ struct bio *bio;
+
+ u64 off;
+ u32 data_len;
+
+ struct kref ref;
+ int ret;
+
+ struct list_head list_node;
+};
+
+void pcache_req_get(struct pcache_request *pcache_req);
+void pcache_req_put(struct pcache_request *pcache_req, int ret);
+
+void pcache_defer_reqs_kick(struct dm_pcache *pcache);
+
+#endif /* _DM_PCACHE_H */
diff --git a/drivers/md/dm-pcache/pcache_internal.h b/drivers/md/dm-pcache/pcache_internal.h
new file mode 100644
index 000000000000..d427e534727c
--- /dev/null
+++ b/drivers/md/dm-pcache/pcache_internal.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _PCACHE_INTERNAL_H
+#define _PCACHE_INTERNAL_H
+
+#include <linux/delay.h>
+#include <linux/crc32c.h>
+
+#define pcache_err(fmt, ...) \
+ pr_err("dm-pcache: %s:%u " fmt, __func__, __LINE__, ##__VA_ARGS__)
+#define pcache_info(fmt, ...) \
+ pr_info("dm-pcache: %s:%u " fmt, __func__, __LINE__, ##__VA_ARGS__)
+#define pcache_debug(fmt, ...) \
+ pr_debug("dm-pcache: %s:%u " fmt, __func__, __LINE__, ##__VA_ARGS__)
+
+#define PCACHE_KB (1024ULL)
+#define PCACHE_MB (1024 * PCACHE_KB)
+
+/* Maximum number of metadata indices */
+#define PCACHE_META_INDEX_MAX 2
+
+#define PCACHE_CRC_SEED 0x3B15A
+/*
+ * struct pcache_meta_header - PCACHE metadata header structure
+ * @crc: CRC checksum for validating metadata integrity.
+ * @seq: Sequence number to track metadata updates.
+ * @version: Metadata version.
+ * @res: Reserved space for future use.
+ */
+struct pcache_meta_header {
+ __u32 crc;
+ __u8 seq;
+ __u8 version;
+ __u16 res;
+};
+
+/*
+ * pcache_meta_crc - Calculate CRC for the given metadata header.
+ * @header: Pointer to the metadata header.
+ * @meta_size: Size of the metadata structure.
+ *
+ * Returns the CRC checksum calculated by excluding the CRC field itself.
+ */
+static inline u32 pcache_meta_crc(struct pcache_meta_header *header, u32 meta_size)
+{
+ return crc32c(PCACHE_CRC_SEED, (void *)header + 4, meta_size - 4);
+}
+
+/*
+ * pcache_meta_seq_after - Check if a sequence number is more recent, accounting for overflow.
+ * @seq1: First sequence number.
+ * @seq2: Second sequence number.
+ *
+ * Determines if @seq1 is more recent than @seq2 by calculating the signed
+ * difference between them. This approach allows handling sequence number
+ * overflow correctly because the difference wraps naturally, and any value
+ * greater than zero indicates that @seq1 is "after" @seq2. This method
+ * assumes 8-bit unsigned sequence numbers, where the difference wraps
+ * around if seq1 overflows past seq2.
+ *
+ * Returns:
+ * - true if @seq1 is more recent than @seq2, indicating it comes "after"
+ * - false otherwise.
+ */
+static inline bool pcache_meta_seq_after(u8 seq1, u8 seq2)
+{
+ return (s8)(seq1 - seq2) > 0;
+}
+
+/*
+ * pcache_meta_find_latest - Find the latest valid metadata.
+ * @header: Pointer to the metadata header.
+ * @meta_size: Size of each metadata block.
+ *
+ * Finds the latest valid metadata by checking sequence numbers. If a
+ * valid entry with the highest sequence number is found, its pointer
+ * is returned. Returns NULL if no valid metadata is found.
+ */
+static inline void __must_check *pcache_meta_find_latest(struct pcache_meta_header *header,
+ u32 meta_size, u32 meta_max_size,
+ void *meta_ret)
+{
+ struct pcache_meta_header *meta, *latest = NULL;
+ u32 i, seq_latest = 0;
+ void *meta_addr;
+
+ meta = meta_ret;
+
+ for (i = 0; i < PCACHE_META_INDEX_MAX; i++) {
+ meta_addr = (void *)header + (i * meta_max_size);
+ if (copy_mc_to_kernel(meta, meta_addr, meta_size)) {
+ pcache_err("hardware memory error when copy meta");
+ return ERR_PTR(-EIO);
+ }
+
+ /* Skip if CRC check fails, which means corrupted */
+ if (meta->crc != pcache_meta_crc(meta, meta_size))
+ continue;
+
+ /* Update latest if a more recent sequence is found */
+ if (!latest || pcache_meta_seq_after(meta->seq, seq_latest)) {
+ seq_latest = meta->seq;
+ latest = (void *)header + (i * meta_max_size);
+ }
+ }
+
+ if (!latest)
+ return NULL;
+
+ if (copy_mc_to_kernel(meta_ret, latest, meta_size)) {
+ pcache_err("hardware memory error");
+ return ERR_PTR(-EIO);
+ }
+
+ return latest;
+}
+
+#endif /* _PCACHE_INTERNAL_H */
diff --git a/drivers/md/dm-pcache/segment.c b/drivers/md/dm-pcache/segment.c
new file mode 100644
index 000000000000..7e9818701445
--- /dev/null
+++ b/drivers/md/dm-pcache/segment.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include <linux/dax.h>
+
+#include "pcache_internal.h"
+#include "cache_dev.h"
+#include "segment.h"
+
+int segment_copy_to_bio(struct pcache_segment *segment,
+ u32 data_off, u32 data_len, struct bio *bio, u32 bio_off)
+{
+ struct iov_iter iter;
+ size_t copied;
+ void *src;
+
+ iov_iter_bvec(&iter, ITER_DEST, &bio->bi_io_vec[bio->bi_iter.bi_idx],
+ bio_segments(bio), bio->bi_iter.bi_size);
+ iter.iov_offset = bio->bi_iter.bi_bvec_done;
+ if (bio_off)
+ iov_iter_advance(&iter, bio_off);
+
+ src = segment->data + data_off;
+ copied = _copy_mc_to_iter(src, data_len, &iter);
+ if (copied != data_len)
+ return -EIO;
+
+ return 0;
+}
+
+int segment_copy_from_bio(struct pcache_segment *segment,
+ u32 data_off, u32 data_len, struct bio *bio, u32 bio_off)
+{
+ struct iov_iter iter;
+ size_t copied;
+ void *dst;
+
+ iov_iter_bvec(&iter, ITER_SOURCE, &bio->bi_io_vec[bio->bi_iter.bi_idx],
+ bio_segments(bio), bio->bi_iter.bi_size);
+ iter.iov_offset = bio->bi_iter.bi_bvec_done;
+ if (bio_off)
+ iov_iter_advance(&iter, bio_off);
+
+ dst = segment->data + data_off;
+ copied = _copy_from_iter_flushcache(dst, data_len, &iter);
+ if (copied != data_len)
+ return -EIO;
+ pmem_wmb();
+
+ return 0;
+}
+
+void pcache_segment_init(struct pcache_cache_dev *cache_dev, struct pcache_segment *segment,
+ struct pcache_segment_init_options *options)
+{
+ segment->seg_info = options->seg_info;
+ segment_info_set_type(segment->seg_info, options->type);
+
+ segment->cache_dev = cache_dev;
+ segment->seg_id = options->seg_id;
+ segment->data_size = PCACHE_SEG_SIZE - options->data_off;
+ segment->data = CACHE_DEV_SEGMENT(cache_dev, options->seg_id) + options->data_off;
+}
diff --git a/drivers/md/dm-pcache/segment.h b/drivers/md/dm-pcache/segment.h
new file mode 100644
index 000000000000..deca1ddcb02b
--- /dev/null
+++ b/drivers/md/dm-pcache/segment.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _PCACHE_SEGMENT_H
+#define _PCACHE_SEGMENT_H
+
+#include <linux/bio.h>
+#include <linux/bitfield.h>
+
+#include "pcache_internal.h"
+
+struct pcache_segment_info {
+ struct pcache_meta_header header;
+ __u32 flags;
+ __u32 next_seg;
+};
+
+#define PCACHE_SEG_INFO_FLAGS_HAS_NEXT BIT(0)
+
+#define PCACHE_SEG_INFO_FLAGS_TYPE_MASK GENMASK(4, 1)
+#define PCACHE_SEGMENT_TYPE_CACHE_DATA 1
+
+static inline bool segment_info_has_next(struct pcache_segment_info *seg_info)
+{
+ return (seg_info->flags & PCACHE_SEG_INFO_FLAGS_HAS_NEXT);
+}
+
+static inline void segment_info_set_type(struct pcache_segment_info *seg_info, u8 type)
+{
+ seg_info->flags &= ~PCACHE_SEG_INFO_FLAGS_TYPE_MASK;
+ seg_info->flags |= FIELD_PREP(PCACHE_SEG_INFO_FLAGS_TYPE_MASK, type);
+}
+
+static inline u8 segment_info_get_type(struct pcache_segment_info *seg_info)
+{
+ return FIELD_GET(PCACHE_SEG_INFO_FLAGS_TYPE_MASK, seg_info->flags);
+}
+
+struct pcache_segment_pos {
+ struct pcache_segment *segment; /* Segment associated with the position */
+ u32 off; /* Offset within the segment */
+};
+
+struct pcache_segment_init_options {
+ u8 type;
+ u32 seg_id;
+ u32 data_off;
+
+ struct pcache_segment_info *seg_info;
+};
+
+struct pcache_segment {
+ struct pcache_cache_dev *cache_dev;
+
+ void *data;
+ u32 data_size;
+ u32 seg_id;
+
+ struct pcache_segment_info *seg_info;
+};
+
+int segment_copy_to_bio(struct pcache_segment *segment,
+ u32 data_off, u32 data_len, struct bio *bio, u32 bio_off);
+int segment_copy_from_bio(struct pcache_segment *segment,
+ u32 data_off, u32 data_len, struct bio *bio, u32 bio_off);
+
+static inline void segment_pos_advance(struct pcache_segment_pos *seg_pos, u32 len)
+{
+ BUG_ON(seg_pos->off + len > seg_pos->segment->data_size);
+
+ seg_pos->off += len;
+}
+
+void pcache_segment_init(struct pcache_cache_dev *cache_dev, struct pcache_segment *segment,
+ struct pcache_segment_init_options *options);
+#endif /* _PCACHE_SEGMENT_H */
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 15c538ee9537..c6f7129e43d3 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -438,7 +438,7 @@ static bool rs_is_reshapable(struct raid_set *rs)
/* Return true, if raid set in @rs is recovering */
static bool rs_is_recovering(struct raid_set *rs)
{
- return rs->md.recovery_cp < rs->md.dev_sectors;
+ return rs->md.resync_offset < rs->md.dev_sectors;
}
/* Return true, if raid set in @rs is reshaping */
@@ -768,7 +768,7 @@ static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *r
rs->md.layout = raid_type->algorithm;
rs->md.new_layout = rs->md.layout;
rs->md.delta_disks = 0;
- rs->md.recovery_cp = MaxSector;
+ rs->md.resync_offset = MaxSector;
for (i = 0; i < raid_devs; i++)
md_rdev_init(&rs->dev[i].rdev);
@@ -912,7 +912,7 @@ static int parse_dev_params(struct raid_set *rs, struct dm_arg_set *as)
rs->md.external = 0;
rs->md.persistent = 1;
rs->md.major_version = 2;
- } else if (rebuild && !rs->md.recovery_cp) {
+ } else if (rebuild && !rs->md.resync_offset) {
/*
* Without metadata, we will not be able to tell if the array
* is in-sync or not - we must assume it is not. Therefore,
@@ -1695,20 +1695,20 @@ static void rs_setup_recovery(struct raid_set *rs, sector_t dev_sectors)
{
/* raid0 does not recover */
if (rs_is_raid0(rs))
- rs->md.recovery_cp = MaxSector;
+ rs->md.resync_offset = MaxSector;
/*
* A raid6 set has to be recovered either
* completely or for the grown part to
* ensure proper parity and Q-Syndrome
*/
else if (rs_is_raid6(rs))
- rs->md.recovery_cp = dev_sectors;
+ rs->md.resync_offset = dev_sectors;
/*
* Other raid set types may skip recovery
* depending on the 'nosync' flag.
*/
else
- rs->md.recovery_cp = test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)
+ rs->md.resync_offset = test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)
? MaxSector : dev_sectors;
}
@@ -2143,7 +2143,7 @@ static void super_sync(struct mddev *mddev, struct md_rdev *rdev)
sb->events = cpu_to_le64(mddev->events);
sb->disk_recovery_offset = cpu_to_le64(rdev->recovery_offset);
- sb->array_resync_offset = cpu_to_le64(mddev->recovery_cp);
+ sb->array_resync_offset = cpu_to_le64(mddev->resync_offset);
sb->level = cpu_to_le32(mddev->level);
sb->layout = cpu_to_le32(mddev->layout);
@@ -2334,18 +2334,18 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
}
if (!test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags))
- mddev->recovery_cp = le64_to_cpu(sb->array_resync_offset);
+ mddev->resync_offset = le64_to_cpu(sb->array_resync_offset);
/*
* During load, we set FirstUse if a new superblock was written.
* There are two reasons we might not have a superblock:
* 1) The raid set is brand new - in which case, all of the
* devices must have their In_sync bit set. Also,
- * recovery_cp must be 0, unless forced.
+ * resync_offset must be 0, unless forced.
* 2) This is a new device being added to an old raid set
* and the new device needs to be rebuilt - in which
* case the In_sync bit will /not/ be set and
- * recovery_cp must be MaxSector.
+ * resync_offset must be MaxSector.
* 3) This is/are a new device(s) being added to an old
* raid set during takeover to a higher raid level
* to provide capacity for redundancy or during reshape
@@ -2390,8 +2390,8 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
new_devs > 1 ? "s" : "");
return -EINVAL;
} else if (!test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags) && rs_is_recovering(rs)) {
- DMERR("'rebuild' specified while raid set is not in-sync (recovery_cp=%llu)",
- (unsigned long long) mddev->recovery_cp);
+ DMERR("'rebuild' specified while raid set is not in-sync (resync_offset=%llu)",
+ (unsigned long long) mddev->resync_offset);
return -EINVAL;
} else if (rs_is_reshaping(rs)) {
DMERR("'rebuild' specified while raid set is being reshaped (reshape_position=%llu)",
@@ -2700,11 +2700,11 @@ static int rs_adjust_data_offsets(struct raid_set *rs)
}
out:
/*
- * Raise recovery_cp in case data_offset != 0 to
+ * Raise resync_offset in case data_offset != 0 to
* avoid false recovery positives in the constructor.
*/
- if (rs->md.recovery_cp < rs->md.dev_sectors)
- rs->md.recovery_cp += rs->dev[0].rdev.data_offset;
+ if (rs->md.resync_offset < rs->md.dev_sectors)
+ rs->md.resync_offset += rs->dev[0].rdev.data_offset;
/* Adjust data offsets on all rdevs but on any raid4/5/6 journal device */
rdev_for_each(rdev, &rs->md) {
@@ -2759,7 +2759,7 @@ static int rs_setup_takeover(struct raid_set *rs)
}
clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
- mddev->recovery_cp = MaxSector;
+ mddev->resync_offset = MaxSector;
while (d--) {
rdev = &rs->dev[d].rdev;
@@ -2767,7 +2767,7 @@ static int rs_setup_takeover(struct raid_set *rs)
if (test_bit(d, (void *) rs->rebuild_disks)) {
clear_bit(In_sync, &rdev->flags);
clear_bit(Faulty, &rdev->flags);
- mddev->recovery_cp = rdev->recovery_offset = 0;
+ mddev->resync_offset = rdev->recovery_offset = 0;
/* Bitmap has to be created when we do an "up" takeover */
set_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
}
@@ -3225,7 +3225,7 @@ size_check:
if (r)
goto bad;
- rs_setup_recovery(rs, rs->md.recovery_cp < rs->md.dev_sectors ? rs->md.recovery_cp : rs->md.dev_sectors);
+ rs_setup_recovery(rs, rs->md.resync_offset < rs->md.dev_sectors ? rs->md.resync_offset : rs->md.dev_sectors);
} else {
/* This is no size change or it is shrinking, update size and record in superblocks */
r = rs_set_dev_and_array_sectors(rs, rs->ti->len, false);
@@ -3247,7 +3247,7 @@ size_check:
rs_reset_inconclusive_reshape(rs);
/* Start raid set read-only and assumed clean to change in raid_resume() */
- rs->md.ro = 1;
+ rs->md.ro = MD_RDONLY;
rs->md.in_sync = 1;
/* Has to be held on running the array */
@@ -3385,7 +3385,7 @@ static enum sync_state decipher_sync_action(struct mddev *mddev, unsigned long r
/* The MD sync thread can be done with io or be interrupted but still be running */
if (!test_bit(MD_RECOVERY_DONE, &recovery) &&
(test_bit(MD_RECOVERY_RUNNING, &recovery) ||
- (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery)))) {
+ (md_is_rdwr(mddev) && test_bit(MD_RECOVERY_NEEDED, &recovery)))) {
if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
return st_reshape;
@@ -3449,7 +3449,7 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
} else {
if (state == st_idle && !test_bit(MD_RECOVERY_INTR, &recovery))
- r = mddev->recovery_cp;
+ r = mddev->resync_offset;
else
r = mddev->curr_resync_completed;
@@ -3775,11 +3775,11 @@ static int raid_message(struct dm_target *ti, unsigned int argc, char **argv,
} else
return -EINVAL;
}
- if (mddev->ro == 2) {
+ if (mddev->ro == MD_AUTO_READ) {
/* A write to sync_action is enough to justify
* canceling read-auto mode
*/
- mddev->ro = 0;
+ mddev->ro = MD_RDWR;
if (!mddev->suspended)
md_wakeup_thread(mddev->sync_thread);
}
@@ -3813,8 +3813,10 @@ static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
struct raid_set *rs = ti->private;
unsigned int chunk_size_bytes = to_bytes(rs->md.chunk_sectors);
- limits->io_min = chunk_size_bytes;
- limits->io_opt = chunk_size_bytes * mddev_data_stripes(rs);
+ if (chunk_size_bytes) {
+ limits->io_min = chunk_size_bytes;
+ limits->io_opt = chunk_size_bytes * mddev_data_stripes(rs);
+ }
}
static void raid_presuspend(struct dm_target *ti)
@@ -3858,6 +3860,7 @@ static void raid_postsuspend(struct dm_target *ti)
*/
md_stop_writes(&rs->md);
mddev_suspend(&rs->md, false);
+ rs->md.ro = MD_RDONLY;
}
}
@@ -3953,9 +3956,11 @@ static int __load_dirty_region_bitmap(struct raid_set *rs)
!test_and_set_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags)) {
struct mddev *mddev = &rs->md;
- r = mddev->bitmap_ops->load(mddev);
- if (r)
- DMERR("Failed to load bitmap");
+ if (md_bitmap_enabled(mddev, false)) {
+ r = mddev->bitmap_ops->load(mddev);
+ if (r)
+ DMERR("Failed to load bitmap");
+ }
}
return r;
@@ -3968,7 +3973,7 @@ static void rs_update_sbs(struct raid_set *rs)
int ro = mddev->ro;
set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
- mddev->ro = 0;
+ mddev->ro = MD_RDWR;
md_update_sb(mddev, 1);
mddev->ro = ro;
}
@@ -4070,16 +4075,18 @@ static int raid_preresume(struct dm_target *ti)
mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)))) {
int chunksize = to_bytes(rs->requested_bitmap_chunk_sectors) ?: mddev->bitmap_info.chunksize;
- r = mddev->bitmap_ops->resize(mddev, mddev->dev_sectors,
- chunksize, false);
- if (r)
- DMERR("Failed to resize bitmap");
+ if (md_bitmap_enabled(mddev, false)) {
+ r = mddev->bitmap_ops->resize(mddev, mddev->dev_sectors,
+ chunksize);
+ if (r)
+ DMERR("Failed to resize bitmap");
+ }
}
/* Check for any resize/reshape on @rs and adjust/initiate */
- if (mddev->recovery_cp && mddev->recovery_cp < MaxSector) {
+ if (mddev->resync_offset && mddev->resync_offset < MaxSector) {
set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
- mddev->resync_min = mddev->recovery_cp;
+ mddev->resync_min = mddev->resync_offset;
if (test_bit(RT_FLAG_RS_GROW, &rs->runtime_flags))
mddev->resync_max_sectors = mddev->dev_sectors;
}
@@ -4125,7 +4132,7 @@ static void raid_resume(struct dm_target *ti)
WARN_ON_ONCE(rcu_dereference_protected(mddev->sync_thread,
lockdep_is_held(&mddev->reconfig_mutex)));
clear_bit(RT_FLAG_RS_FROZEN, &rs->runtime_flags);
- mddev->ro = 0;
+ mddev->ro = MD_RDWR;
mddev->in_sync = 0;
md_unfrozen_sync_thread(mddev);
mddev_unlock_and_resume(mddev);
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c
index a4550975c27d..e9b47b659976 100644
--- a/drivers/md/dm-region-hash.c
+++ b/drivers/md/dm-region-hash.c
@@ -206,7 +206,7 @@ struct dm_region_hash *dm_region_hash_create(
rh->shift = RH_HASH_SHIFT;
rh->prime = RH_HASH_MULT;
- rh->buckets = vmalloc(array_size(nr_buckets, sizeof(*rh->buckets)));
+ rh->buckets = vmalloc_array(nr_buckets, sizeof(*rh->buckets));
if (!rh->buckets) {
DMERR("unable to allocate region hash bucket memory");
kfree(rh);
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index 58902091bf79..1461dc740dae 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -456,11 +456,15 @@ static void stripe_io_hints(struct dm_target *ti,
struct queue_limits *limits)
{
struct stripe_c *sc = ti->private;
- unsigned int chunk_size = sc->chunk_size << SECTOR_SHIFT;
+ unsigned int io_min, io_opt;
limits->chunk_sectors = sc->chunk_size;
- limits->io_min = chunk_size;
- limits->io_opt = chunk_size * sc->stripes;
+
+ if (!check_shl_overflow(sc->chunk_size, SECTOR_SHIFT, &io_min) &&
+ !check_mul_overflow(io_min, sc->stripes, &io_opt)) {
+ limits->io_min = io_min;
+ limits->io_opt = io_opt;
+ }
}
static struct target_type stripe_target = {
diff --git a/drivers/md/dm-switch.c b/drivers/md/dm-switch.c
index bb1a70b5a215..50a52ca50b34 100644
--- a/drivers/md/dm-switch.c
+++ b/drivers/md/dm-switch.c
@@ -114,8 +114,8 @@ static int alloc_region_table(struct dm_target *ti, unsigned int nr_paths)
return -EINVAL;
}
- sctx->region_table = vmalloc(array_size(nr_slots,
- sizeof(region_table_slot_t)));
+ sctx->region_table = vmalloc_array(nr_slots,
+ sizeof(region_table_slot_t));
if (!sctx->region_table) {
ti->error = "Cannot allocate region table";
return -ENOMEM;
diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c
index 2af5a9514c05..8fede41adec0 100644
--- a/drivers/md/dm-target.c
+++ b/drivers/md/dm-target.c
@@ -263,7 +263,8 @@ static long io_err_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
static struct target_type error_target = {
.name = "error",
.version = {1, 7, 0},
- .features = DM_TARGET_WILDCARD | DM_TARGET_ZONED_HM,
+ .features = DM_TARGET_WILDCARD | DM_TARGET_ZONED_HM |
+ DM_TARGET_PASSES_INTEGRITY,
.ctr = io_err_ctr,
.dtr = io_err_dtr,
.map = io_err_map,
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 007bb93e5fca..c84149ba4e38 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -3031,8 +3031,8 @@ static struct pool *pool_create(struct mapped_device *pool_md,
}
pool->cell_sort_array =
- vmalloc(array_size(CELL_SORT_ARRAY_SIZE,
- sizeof(*pool->cell_sort_array)));
+ vmalloc_array(CELL_SORT_ARRAY_SIZE,
+ sizeof(*pool->cell_sort_array));
if (!pool->cell_sort_array) {
*error = "Error allocating cell sort array";
err_p = ERR_PTR(-ENOMEM);
diff --git a/drivers/md/dm-vdo/data-vio.c b/drivers/md/dm-vdo/data-vio.c
index 810002747091..262e11581f2d 100644
--- a/drivers/md/dm-vdo/data-vio.c
+++ b/drivers/md/dm-vdo/data-vio.c
@@ -17,6 +17,7 @@
#include <linux/minmax.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
+#include <linux/string.h>
#include <linux/wait.h>
#include "logger.h"
@@ -509,18 +510,6 @@ static void launch_data_vio(struct data_vio *data_vio, logical_block_number_t lb
vdo_enqueue_completion(completion, VDO_DEFAULT_Q_MAP_BIO_PRIORITY);
}
-static bool is_zero_block(char *block)
-{
- int i;
-
- for (i = 0; i < VDO_BLOCK_SIZE; i += sizeof(u64)) {
- if (*((u64 *) &block[i]))
- return false;
- }
-
- return true;
-}
-
static void copy_from_bio(struct bio *bio, char *data_ptr)
{
struct bio_vec biovec;
@@ -572,7 +561,7 @@ static void launch_bio(struct vdo *vdo, struct data_vio *data_vio, struct bio *b
* we acknowledge the bio.
*/
copy_from_bio(bio, data_vio->vio.data);
- data_vio->is_zero = is_zero_block(data_vio->vio.data);
+ data_vio->is_zero = mem_is_zero(data_vio->vio.data, VDO_BLOCK_SIZE);
data_vio->write = true;
}
@@ -1459,7 +1448,7 @@ static void modify_for_partial_write(struct vdo_completion *completion)
copy_from_bio(bio, data + data_vio->offset);
}
- data_vio->is_zero = is_zero_block(data);
+ data_vio->is_zero = mem_is_zero(data, VDO_BLOCK_SIZE);
data_vio->read = false;
launch_data_vio_logical_callback(data_vio,
continue_data_vio_with_block_map_slot);
diff --git a/drivers/md/dm-vdo/indexer/volume-index.c b/drivers/md/dm-vdo/indexer/volume-index.c
index 12f954a0c532..afb062e1f1fb 100644
--- a/drivers/md/dm-vdo/indexer/volume-index.c
+++ b/drivers/md/dm-vdo/indexer/volume-index.c
@@ -836,7 +836,7 @@ static int start_restoring_volume_sub_index(struct volume_sub_index *sub_index,
"%zu bytes decoded of %zu expected", offset,
sizeof(buffer));
if (result != VDO_SUCCESS)
- result = UDS_CORRUPT_DATA;
+ return UDS_CORRUPT_DATA;
if (memcmp(header.magic, MAGIC_START_5, MAGIC_SIZE) != 0) {
return vdo_log_warning_strerror(UDS_CORRUPT_DATA,
@@ -928,7 +928,7 @@ static int start_restoring_volume_index(struct volume_index *volume_index,
"%zu bytes decoded of %zu expected", offset,
sizeof(buffer));
if (result != VDO_SUCCESS)
- result = UDS_CORRUPT_DATA;
+ return UDS_CORRUPT_DATA;
if (memcmp(header.magic, MAGIC_START_6, MAGIC_SIZE) != 0)
return vdo_log_warning_strerror(UDS_CORRUPT_DATA,
diff --git a/drivers/md/dm-vdo/vio.c b/drivers/md/dm-vdo/vio.c
index e7f4153e55e3..8fc22fb14196 100644
--- a/drivers/md/dm-vdo/vio.c
+++ b/drivers/md/dm-vdo/vio.c
@@ -212,7 +212,7 @@ int vio_reset_bio_with_size(struct vio *vio, char *data, int size, bio_end_io_t
return VDO_SUCCESS;
bio->bi_ioprio = 0;
- bio->bi_io_vec = bio->bi_inline_vecs;
+ bio->bi_io_vec = bio_inline_vecs(bio);
bio->bi_max_vecs = vio->block_count + 1;
if (VDO_ASSERT(size <= vio_size, "specified size %d is not greater than allocated %d",
size, vio_size) != VDO_SUCCESS)
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index a44e8c2dccee..f5e5e59b232b 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -403,9 +403,9 @@ static void do_deferred_remove(struct work_struct *w)
dm_deferred_remove();
}
-static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+static int dm_blk_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
- struct mapped_device *md = bdev->bd_disk->private_data;
+ struct mapped_device *md = disk->private_data;
return dm_get_geometry(md, geo);
}
@@ -490,18 +490,13 @@ u64 dm_start_time_ns_from_clone(struct bio *bio)
}
EXPORT_SYMBOL_GPL(dm_start_time_ns_from_clone);
-static inline bool bio_is_flush_with_data(struct bio *bio)
-{
- return ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size);
-}
-
static inline unsigned int dm_io_sectors(struct dm_io *io, struct bio *bio)
{
/*
* If REQ_PREFLUSH set, don't account payload, it will be
* submitted (and accounted) after this flush completes.
*/
- if (bio_is_flush_with_data(bio))
+ if (io->requeue_flush_with_data)
return 0;
if (unlikely(dm_io_flagged(io, DM_IO_WAS_SPLIT)))
return io->sectors;
@@ -590,6 +585,7 @@ static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio, gfp_t g
io = container_of(tio, struct dm_io, tio);
io->magic = DM_IO_MAGIC;
io->status = BLK_STS_OK;
+ io->requeue_flush_with_data = false;
/* one ref is for submission, the other is for completion */
atomic_set(&io->io_count, 2);
@@ -948,6 +944,7 @@ static void __dm_io_complete(struct dm_io *io, bool first_stage)
struct mapped_device *md = io->md;
blk_status_t io_error;
bool requeued;
+ bool requeue_flush_with_data;
requeued = dm_handle_requeue(io, first_stage);
if (requeued && first_stage)
@@ -964,6 +961,7 @@ static void __dm_io_complete(struct dm_io *io, bool first_stage)
__dm_start_io_acct(io);
dm_end_io_acct(io);
}
+ requeue_flush_with_data = io->requeue_flush_with_data;
free_io(io);
smp_wmb();
this_cpu_dec(*md->pending_io);
@@ -976,7 +974,7 @@ static void __dm_io_complete(struct dm_io *io, bool first_stage)
if (requeued)
return;
- if (bio_is_flush_with_data(bio)) {
+ if (unlikely(requeue_flush_with_data)) {
/*
* Preflush done for flush with data, reissue
* without REQ_PREFLUSH.
@@ -1996,12 +1994,30 @@ static void dm_split_and_process_bio(struct mapped_device *md,
}
init_clone_info(&ci, io, map, bio, is_abnormal);
- if (bio->bi_opf & REQ_PREFLUSH) {
+ if (unlikely((bio->bi_opf & REQ_PREFLUSH) != 0)) {
+ /*
+ * The "flush_bypasses_map" is set on targets where it is safe
+ * to skip the map function and submit bios directly to the
+ * underlying block devices - currently, it is set for dm-linear
+ * and dm-stripe.
+ *
+ * If we have just one underlying device (i.e. there is one
+ * linear target or multiple linear targets pointing to the same
+ * device), we can send the flush with data directly to it.
+ */
+ if (map->flush_bypasses_map) {
+ struct list_head *devices = dm_table_get_devices(map);
+ if (devices->next == devices->prev)
+ goto send_preflush_with_data;
+ }
+ if (bio->bi_iter.bi_size)
+ io->requeue_flush_with_data = true;
__send_empty_flush(&ci);
/* dm_io_complete submits any data associated with flush */
goto out;
}
+send_preflush_with_data:
if (static_branch_unlikely(&zoned_enabled) &&
(bio_op(bio) == REQ_OP_ZONE_RESET_ALL)) {
error = __send_zone_reset_all(&ci);
@@ -2908,7 +2924,7 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
{
bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG;
bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG;
- int r;
+ int r = 0;
lockdep_assert_held(&md->suspend_lock);
@@ -2960,8 +2976,10 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
* Stop md->queue before flushing md->wq in case request-based
* dm defers requests to md->wq from md->queue.
*/
- if (dm_request_based(md))
+ if (map && dm_request_based(md)) {
dm_stop_queue(md->queue);
+ set_bit(DMF_QUEUE_STOPPED, &md->flags);
+ }
flush_workqueue(md->wq);
@@ -2970,7 +2988,8 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
* We call dm_wait_for_completion to wait for all existing requests
* to finish.
*/
- r = dm_wait_for_completion(md, task_state);
+ if (map)
+ r = dm_wait_for_completion(md, task_state);
if (!r)
set_bit(dmf_suspended_flag, &md->flags);
@@ -2983,7 +3002,7 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
if (r < 0) {
dm_queue_flush(md);
- if (dm_request_based(md))
+ if (test_and_clear_bit(DMF_QUEUE_STOPPED, &md->flags))
dm_start_queue(md->queue);
unlock_fs(md);
@@ -3067,7 +3086,7 @@ static int __dm_resume(struct mapped_device *md, struct dm_table *map)
* so that mapping of targets can work correctly.
* Request-based dm is queueing the deferred I/Os in its request_queue.
*/
- if (dm_request_based(md))
+ if (test_and_clear_bit(DMF_QUEUE_STOPPED, &md->flags))
dm_start_queue(md->queue);
unlock_fs(md);
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index 7f524a26cebc..84b7e2af6dba 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -34,15 +34,6 @@
#include "md-bitmap.h"
#include "md-cluster.h"
-#define BITMAP_MAJOR_LO 3
-/* version 4 insists the bitmap is in little-endian order
- * with version 3, it is host-endian which is non-portable
- * Version 5 is currently set only for clustered devices
- */
-#define BITMAP_MAJOR_HI 4
-#define BITMAP_MAJOR_CLUSTERED 5
-#define BITMAP_MAJOR_HOSTENDIAN 3
-
/*
* in-memory bitmap:
*
@@ -224,6 +215,8 @@ struct bitmap {
int cluster_slot;
};
+static struct workqueue_struct *md_bitmap_wq;
+
static int __bitmap_resize(struct bitmap *bitmap, sector_t blocks,
int chunksize, bool init);
@@ -232,20 +225,19 @@ static inline char *bmname(struct bitmap *bitmap)
return bitmap->mddev ? mdname(bitmap->mddev) : "mdX";
}
-static bool __bitmap_enabled(struct bitmap *bitmap)
-{
- return bitmap->storage.filemap &&
- !test_bit(BITMAP_STALE, &bitmap->flags);
-}
-
-static bool bitmap_enabled(struct mddev *mddev)
+static bool bitmap_enabled(void *data, bool flush)
{
- struct bitmap *bitmap = mddev->bitmap;
+ struct bitmap *bitmap = data;
- if (!bitmap)
- return false;
+ if (!flush)
+ return true;
- return __bitmap_enabled(bitmap);
+ /*
+ * If caller want to flush bitmap pages to underlying disks, check if
+ * there are cached pages in filemap.
+ */
+ return !test_bit(BITMAP_STALE, &bitmap->flags) &&
+ bitmap->storage.filemap != NULL;
}
/*
@@ -484,7 +476,8 @@ static int __write_sb_page(struct md_rdev *rdev, struct bitmap *bitmap,
return -EINVAL;
}
- md_super_write(mddev, rdev, sboff + ps, (int)min(size, bitmap_limit), page);
+ md_write_metadata(mddev, rdev, sboff + ps, (int)min(size, bitmap_limit),
+ page, 0);
return 0;
}
@@ -1244,7 +1237,7 @@ static void __bitmap_unplug(struct bitmap *bitmap)
int dirty, need_write;
int writing = 0;
- if (!__bitmap_enabled(bitmap))
+ if (!bitmap_enabled(bitmap, true))
return;
/* look at each page to see if there are any set bits that need to be
@@ -1788,15 +1781,9 @@ static bool __bitmap_start_sync(struct bitmap *bitmap, sector_t offset,
sector_t *blocks, bool degraded)
{
bitmap_counter_t *bmc;
- bool rv;
+ bool rv = false;
- if (bitmap == NULL) {/* FIXME or bitmap set as 'failed' */
- *blocks = 1024;
- return true; /* always resync if no bitmap */
- }
spin_lock_irq(&bitmap->counts.lock);
-
- rv = false;
bmc = md_bitmap_get_counter(&bitmap->counts, offset, blocks, 0);
if (bmc) {
/* locked */
@@ -1845,10 +1832,6 @@ static void __bitmap_end_sync(struct bitmap *bitmap, sector_t offset,
bitmap_counter_t *bmc;
unsigned long flags;
- if (bitmap == NULL) {
- *blocks = 1024;
- return;
- }
spin_lock_irqsave(&bitmap->counts.lock, flags);
bmc = md_bitmap_get_counter(&bitmap->counts, offset, blocks, 0);
if (bmc == NULL)
@@ -1987,12 +1970,12 @@ static void bitmap_dirty_bits(struct mddev *mddev, unsigned long s,
md_bitmap_set_memory_bits(bitmap, sec, 1);
md_bitmap_file_set_bit(bitmap, sec);
- if (sec < bitmap->mddev->recovery_cp)
+ if (sec < bitmap->mddev->resync_offset)
/* We are asserting that the array is dirty,
- * so move the recovery_cp address back so
+ * so move the resync_offset address back so
* that it is obvious that it is dirty
*/
- bitmap->mddev->recovery_cp = sec;
+ bitmap->mddev->resync_offset = sec;
}
}
@@ -2060,9 +2043,6 @@ static void bitmap_start_behind_write(struct mddev *mddev)
struct bitmap *bitmap = mddev->bitmap;
int bw;
- if (!bitmap)
- return;
-
atomic_inc(&bitmap->behind_writes);
bw = atomic_read(&bitmap->behind_writes);
if (bw > bitmap->behind_writes_used)
@@ -2076,9 +2056,6 @@ static void bitmap_end_behind_write(struct mddev *mddev)
{
struct bitmap *bitmap = mddev->bitmap;
- if (!bitmap)
- return;
-
if (atomic_dec_and_test(&bitmap->behind_writes))
wake_up(&bitmap->behind_wait);
pr_debug("dec write-behind count %d/%lu\n",
@@ -2258,7 +2235,7 @@ static int bitmap_load(struct mddev *mddev)
|| bitmap->events_cleared == mddev->events)
/* no need to keep dirty bits to optimise a
* re-add of a missing device */
- start = mddev->recovery_cp;
+ start = mddev->resync_offset;
mutex_lock(&mddev->bitmap_info.mutex);
err = md_bitmap_init_from_disk(bitmap, start);
@@ -2593,15 +2570,14 @@ err:
return ret;
}
-static int bitmap_resize(struct mddev *mddev, sector_t blocks, int chunksize,
- bool init)
+static int bitmap_resize(struct mddev *mddev, sector_t blocks, int chunksize)
{
struct bitmap *bitmap = mddev->bitmap;
if (!bitmap)
return 0;
- return __bitmap_resize(bitmap, blocks, chunksize, init);
+ return __bitmap_resize(bitmap, blocks, chunksize, false);
}
static ssize_t
@@ -2990,12 +2966,19 @@ static struct attribute *md_bitmap_attrs[] = {
&max_backlog_used.attr,
NULL
};
-const struct attribute_group md_bitmap_group = {
+
+static struct attribute_group md_bitmap_group = {
.name = "bitmap",
.attrs = md_bitmap_attrs,
};
static struct bitmap_operations bitmap_ops = {
+ .head = {
+ .type = MD_BITMAP,
+ .id = ID_BITMAP,
+ .name = "bitmap",
+ },
+
.enabled = bitmap_enabled,
.create = bitmap_create,
.resize = bitmap_resize,
@@ -3013,6 +2996,9 @@ static struct bitmap_operations bitmap_ops = {
.start_write = bitmap_start_write,
.end_write = bitmap_end_write,
+ .start_discard = bitmap_start_write,
+ .end_discard = bitmap_end_write,
+
.start_sync = bitmap_start_sync,
.end_sync = bitmap_end_sync,
.cond_end_sync = bitmap_cond_end_sync,
@@ -3026,9 +3012,22 @@ static struct bitmap_operations bitmap_ops = {
.copy_from_slot = bitmap_copy_from_slot,
.set_pages = bitmap_set_pages,
.free = md_bitmap_free,
+
+ .group = &md_bitmap_group,
};
-void mddev_set_bitmap_ops(struct mddev *mddev)
+int md_bitmap_init(void)
+{
+ md_bitmap_wq = alloc_workqueue("md_bitmap", WQ_MEM_RECLAIM | WQ_UNBOUND,
+ 0);
+ if (!md_bitmap_wq)
+ return -ENOMEM;
+
+ return register_md_submodule(&bitmap_ops.head);
+}
+
+void md_bitmap_exit(void)
{
- mddev->bitmap_ops = &bitmap_ops;
+ destroy_workqueue(md_bitmap_wq);
+ unregister_md_submodule(&bitmap_ops.head);
}
diff --git a/drivers/md/md-bitmap.h b/drivers/md/md-bitmap.h
index 59e9dd45cfde..b42a28fa83a0 100644
--- a/drivers/md/md-bitmap.h
+++ b/drivers/md/md-bitmap.h
@@ -9,10 +9,26 @@
#define BITMAP_MAGIC 0x6d746962
+/*
+ * version 3 is host-endian order, this is deprecated and not used for new
+ * array
+ */
+#define BITMAP_MAJOR_LO 3
+#define BITMAP_MAJOR_HOSTENDIAN 3
+/* version 4 is little-endian order, the default value */
+#define BITMAP_MAJOR_HI 4
+/* version 5 is only used for cluster */
+#define BITMAP_MAJOR_CLUSTERED 5
+/* version 6 is only used for lockless bitmap */
+#define BITMAP_MAJOR_LOCKLESS 6
+
/* use these for bitmap->flags and bitmap->sb->state bit-fields */
enum bitmap_state {
- BITMAP_STALE = 1, /* the bitmap file is out of date or had -EIO */
+ BITMAP_STALE = 1, /* the bitmap file is out of date or had -EIO */
BITMAP_WRITE_ERROR = 2, /* A write error has occurred */
+ BITMAP_FIRST_USE = 3, /* llbitmap is just created */
+ BITMAP_CLEAN = 4, /* llbitmap is created with assume_clean */
+ BITMAP_DAEMON_BUSY = 5, /* llbitmap daemon is not finished after daemon_sleep */
BITMAP_HOSTENDIAN =15,
};
@@ -61,11 +77,15 @@ struct md_bitmap_stats {
struct file *file;
};
+typedef void (md_bitmap_fn)(struct mddev *mddev, sector_t offset,
+ unsigned long sectors);
+
struct bitmap_operations {
- bool (*enabled)(struct mddev *mddev);
+ struct md_submodule_head head;
+
+ bool (*enabled)(void *data, bool flush);
int (*create)(struct mddev *mddev);
- int (*resize)(struct mddev *mddev, sector_t blocks, int chunksize,
- bool init);
+ int (*resize)(struct mddev *mddev, sector_t blocks, int chunksize);
int (*load)(struct mddev *mddev);
void (*destroy)(struct mddev *mddev);
@@ -80,10 +100,13 @@ struct bitmap_operations {
void (*end_behind_write)(struct mddev *mddev);
void (*wait_behind_writes)(struct mddev *mddev);
- void (*start_write)(struct mddev *mddev, sector_t offset,
- unsigned long sectors);
- void (*end_write)(struct mddev *mddev, sector_t offset,
- unsigned long sectors);
+ md_bitmap_fn *start_write;
+ md_bitmap_fn *end_write;
+ md_bitmap_fn *start_discard;
+ md_bitmap_fn *end_discard;
+
+ sector_t (*skip_sync_blocks)(struct mddev *mddev, sector_t offset);
+ bool (*blocks_synced)(struct mddev *mddev, sector_t offset);
bool (*start_sync)(struct mddev *mddev, sector_t offset,
sector_t *blocks, bool degraded);
void (*end_sync)(struct mddev *mddev, sector_t offset, sector_t *blocks);
@@ -101,9 +124,75 @@ struct bitmap_operations {
sector_t *hi, bool clear_bits);
void (*set_pages)(void *data, unsigned long pages);
void (*free)(void *data);
+
+ struct attribute_group *group;
};
/* the bitmap API */
-void mddev_set_bitmap_ops(struct mddev *mddev);
+static inline bool md_bitmap_registered(struct mddev *mddev)
+{
+ return mddev->bitmap_ops != NULL;
+}
+
+static inline bool md_bitmap_enabled(struct mddev *mddev, bool flush)
+{
+ /* bitmap_ops must be registered before creating bitmap. */
+ if (!md_bitmap_registered(mddev))
+ return false;
+
+ if (!mddev->bitmap)
+ return false;
+
+ return mddev->bitmap_ops->enabled(mddev->bitmap, flush);
+}
+
+static inline bool md_bitmap_start_sync(struct mddev *mddev, sector_t offset,
+ sector_t *blocks, bool degraded)
+{
+ /* always resync if no bitmap */
+ if (!md_bitmap_enabled(mddev, false)) {
+ *blocks = 1024;
+ return true;
+ }
+
+ return mddev->bitmap_ops->start_sync(mddev, offset, blocks, degraded);
+}
+
+static inline void md_bitmap_end_sync(struct mddev *mddev, sector_t offset,
+ sector_t *blocks)
+{
+ if (!md_bitmap_enabled(mddev, false)) {
+ *blocks = 1024;
+ return;
+ }
+
+ mddev->bitmap_ops->end_sync(mddev, offset, blocks);
+}
+
+#ifdef CONFIG_MD_BITMAP
+int md_bitmap_init(void);
+void md_bitmap_exit(void);
+#else
+static inline int md_bitmap_init(void)
+{
+ return 0;
+}
+static inline void md_bitmap_exit(void)
+{
+}
+#endif
+
+#ifdef CONFIG_MD_LLBITMAP
+int md_llbitmap_init(void);
+void md_llbitmap_exit(void);
+#else
+static inline int md_llbitmap_init(void)
+{
+ return 0;
+}
+static inline void md_llbitmap_exit(void)
+{
+}
+#endif
#endif
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index 94221d964d4f..11f1e91d387d 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -337,11 +337,11 @@ static void recover_bitmaps(struct md_thread *thread)
md_wakeup_thread(mddev->sync_thread);
if (hi > 0) {
- if (lo < mddev->recovery_cp)
- mddev->recovery_cp = lo;
+ if (lo < mddev->resync_offset)
+ mddev->resync_offset = lo;
/* wake up thread to continue resync in case resync
* is not finished */
- if (mddev->recovery_cp != MaxSector) {
+ if (mddev->resync_offset != MaxSector) {
/*
* clear the REMOTE flag since we will launch
* resync thread in current node.
@@ -630,7 +630,7 @@ static int process_recvd_msg(struct mddev *mddev, struct cluster_msg *msg)
if (le64_to_cpu(msg->high) != mddev->pers->size(mddev, 0, 0))
ret = mddev->bitmap_ops->resize(mddev,
le64_to_cpu(msg->high),
- 0, false);
+ 0);
break;
default:
ret = -1;
@@ -863,9 +863,9 @@ static int gather_all_resync_info(struct mddev *mddev, int total_slots)
lockres_free(bm_lockres);
continue;
}
- if ((hi > 0) && (lo < mddev->recovery_cp)) {
+ if ((hi > 0) && (lo < mddev->resync_offset)) {
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
- mddev->recovery_cp = lo;
+ mddev->resync_offset = lo;
md_check_recovery(mddev);
}
@@ -979,7 +979,7 @@ err:
lockres_free(cinfo->resync_lockres);
lockres_free(cinfo->bitmap_lockres);
if (cinfo->lockspace)
- dlm_release_lockspace(cinfo->lockspace, 2);
+ dlm_release_lockspace(cinfo->lockspace, DLM_RELEASE_NORMAL);
mddev->cluster_info = NULL;
kfree(cinfo);
return ret;
@@ -1027,7 +1027,7 @@ static int leave(struct mddev *mddev)
* Also, we should send BITMAP_NEEDS_SYNC message in
* case reshaping is interrupted.
*/
- if ((cinfo->slot_number > 0 && mddev->recovery_cp != MaxSector) ||
+ if ((cinfo->slot_number > 0 && mddev->resync_offset != MaxSector) ||
(mddev->reshape_position != MaxSector &&
test_bit(MD_CLOSING, &mddev->flags)))
resync_bitmap(mddev);
@@ -1042,7 +1042,7 @@ static int leave(struct mddev *mddev)
lockres_free(cinfo->resync_lockres);
lockres_free(cinfo->bitmap_lockres);
unlock_all_bitmaps(mddev);
- dlm_release_lockspace(cinfo->lockspace, 2);
+ dlm_release_lockspace(cinfo->lockspace, DLM_RELEASE_NORMAL);
kfree(cinfo);
return 0;
}
@@ -1605,8 +1605,8 @@ static int gather_bitmaps(struct md_rdev *rdev)
pr_warn("md-cluster: Could not gather bitmaps from slot %d", sn);
goto out;
}
- if ((hi > 0) && (lo < mddev->recovery_cp))
- mddev->recovery_cp = lo;
+ if ((hi > 0) && (lo < mddev->resync_offset))
+ mddev->resync_offset = lo;
}
out:
return err;
diff --git a/drivers/md/md-linear.c b/drivers/md/md-linear.c
index 5d9b08115375..7033d982d377 100644
--- a/drivers/md/md-linear.c
+++ b/drivers/md/md-linear.c
@@ -73,6 +73,7 @@ static int linear_set_limits(struct mddev *mddev)
md_init_stacking_limits(&lim);
lim.max_hw_sectors = mddev->chunk_sectors;
lim.max_write_zeroes_sectors = mddev->chunk_sectors;
+ lim.max_hw_wzeroes_unmap_sectors = mddev->chunk_sectors;
lim.io_min = mddev->chunk_sectors << 9;
err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
if (err)
@@ -256,18 +257,10 @@ static bool linear_make_request(struct mddev *mddev, struct bio *bio)
if (unlikely(bio_end_sector(bio) > end_sector)) {
/* This bio crosses a device boundary, so we have to split it */
- struct bio *split = bio_split(bio, end_sector - bio_sector,
- GFP_NOIO, &mddev->bio_set);
-
- if (IS_ERR(split)) {
- bio->bi_status = errno_to_blk_status(PTR_ERR(split));
- bio_endio(bio);
+ bio = bio_submit_split_bioset(bio, end_sector - bio_sector,
+ &mddev->bio_set);
+ if (!bio)
return true;
- }
-
- bio_chain(split, bio);
- submit_bio_noacct(bio);
- bio = split;
}
md_account_bio(mddev, &bio);
diff --git a/drivers/md/md-llbitmap.c b/drivers/md/md-llbitmap.c
new file mode 100644
index 000000000000..1eb434306162
--- /dev/null
+++ b/drivers/md/md-llbitmap.c
@@ -0,0 +1,1626 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/blkdev.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/sched.h>
+#include <linux/list.h>
+#include <linux/file.h>
+#include <linux/seq_file.h>
+#include <trace/events/block.h>
+
+#include "md.h"
+#include "md-bitmap.h"
+
+/*
+ * #### Background
+ *
+ * Redundant data is used to enhance data fault tolerance, and the storage
+ * methods for redundant data vary depending on the RAID levels. And it's
+ * important to maintain the consistency of redundant data.
+ *
+ * Bitmap is used to record which data blocks have been synchronized and which
+ * ones need to be resynchronized or recovered. Each bit in the bitmap
+ * represents a segment of data in the array. When a bit is set, it indicates
+ * that the multiple redundant copies of that data segment may not be
+ * consistent. Data synchronization can be performed based on the bitmap after
+ * power failure or readding a disk. If there is no bitmap, a full disk
+ * synchronization is required.
+ *
+ * #### Key Features
+ *
+ * - IO fastpath is lockless, if user issues lots of write IO to the same
+ * bitmap bit in a short time, only the first write has additional overhead
+ * to update bitmap bit, no additional overhead for the following writes;
+ * - support only resync or recover written data, means in the case creating
+ * new array or replacing with a new disk, there is no need to do a full disk
+ * resync/recovery;
+ *
+ * #### Key Concept
+ *
+ * ##### State Machine
+ *
+ * Each bit is one byte, contain 6 different states, see llbitmap_state. And
+ * there are total 8 different actions, see llbitmap_action, can change state:
+ *
+ * llbitmap state machine: transitions between states
+ *
+ * | | Startwrite | Startsync | Endsync | Abortsync|
+ * | --------- | ---------- | --------- | ------- | ------- |
+ * | Unwritten | Dirty | x | x | x |
+ * | Clean | Dirty | x | x | x |
+ * | Dirty | x | x | x | x |
+ * | NeedSync | x | Syncing | x | x |
+ * | Syncing | x | Syncing | Dirty | NeedSync |
+ *
+ * | | Reload | Daemon | Discard | Stale |
+ * | --------- | -------- | ------ | --------- | --------- |
+ * | Unwritten | x | x | x | x |
+ * | Clean | x | x | Unwritten | NeedSync |
+ * | Dirty | NeedSync | Clean | Unwritten | NeedSync |
+ * | NeedSync | x | x | Unwritten | x |
+ * | Syncing | NeedSync | x | Unwritten | NeedSync |
+ *
+ * Typical scenarios:
+ *
+ * 1) Create new array
+ * All bits will be set to Unwritten by default, if --assume-clean is set,
+ * all bits will be set to Clean instead.
+ *
+ * 2) write data, raid1/raid10 have full copy of data, while raid456 doesn't and
+ * rely on xor data
+ *
+ * 2.1) write new data to raid1/raid10:
+ * Unwritten --StartWrite--> Dirty
+ *
+ * 2.2) write new data to raid456:
+ * Unwritten --StartWrite--> NeedSync
+ *
+ * Because the initial recover for raid456 is skipped, the xor data is not built
+ * yet, the bit must be set to NeedSync first and after lazy initial recover is
+ * finished, the bit will finally set to Dirty(see 5.1 and 5.4);
+ *
+ * 2.3) cover write
+ * Clean --StartWrite--> Dirty
+ *
+ * 3) daemon, if the array is not degraded:
+ * Dirty --Daemon--> Clean
+ *
+ * 4) discard
+ * {Clean, Dirty, NeedSync, Syncing} --Discard--> Unwritten
+ *
+ * 5) resync and recover
+ *
+ * 5.1) common process
+ * NeedSync --Startsync--> Syncing --Endsync--> Dirty --Daemon--> Clean
+ *
+ * 5.2) resync after power failure
+ * Dirty --Reload--> NeedSync
+ *
+ * 5.3) recover while replacing with a new disk
+ * By default, the old bitmap framework will recover all data, and llbitmap
+ * implements this by a new helper, see llbitmap_skip_sync_blocks:
+ *
+ * skip recover for bits other than dirty or clean;
+ *
+ * 5.4) lazy initial recover for raid5:
+ * By default, the old bitmap framework will only allow new recover when there
+ * are spares(new disk), a new recovery flag MD_RECOVERY_LAZY_RECOVER is added
+ * to perform raid456 lazy recover for set bits(from 2.2).
+ *
+ * 6. special handling for degraded array:
+ *
+ * - Dirty bits will never be cleared, daemon will just do nothing, so that if
+ * a disk is readded, Clean bits can be skipped with recovery;
+ * - Dirty bits will convert to Syncing from start write, to do data recovery
+ * for new added disks;
+ * - New write will convert bits to NeedSync directly;
+ *
+ * ##### Bitmap IO
+ *
+ * ##### Chunksize
+ *
+ * The default bitmap size is 128k, incluing 1k bitmap super block, and
+ * the default size of segment of data in the array each bit(chunksize) is 64k,
+ * and chunksize will adjust to twice the old size each time if the total number
+ * bits is not less than 127k.(see llbitmap_init)
+ *
+ * ##### READ
+ *
+ * While creating bitmap, all pages will be allocated and read for llbitmap,
+ * there won't be read afterwards
+ *
+ * ##### WRITE
+ *
+ * WRITE IO is divided into logical_block_size of the array, the dirty state
+ * of each block is tracked independently, for example:
+ *
+ * each page is 4k, contain 8 blocks; each block is 512 bytes contain 512 bit;
+ *
+ * | page0 | page1 | ... | page 31 |
+ * | |
+ * | \-----------------------\
+ * | |
+ * | block0 | block1 | ... | block 8|
+ * | |
+ * | \-----------------\
+ * | |
+ * | bit0 | bit1 | ... | bit511 |
+ *
+ * From IO path, if one bit is changed to Dirty or NeedSync, the corresponding
+ * subpage will be marked dirty, such block must write first before the IO is
+ * issued. This behaviour will affect IO performance, to reduce the impact, if
+ * multiple bits are changed in the same block in a short time, all bits in this
+ * block will be changed to Dirty/NeedSync, so that there won't be any overhead
+ * until daemon clears dirty bits.
+ *
+ * ##### Dirty Bits synchronization
+ *
+ * IO fast path will set bits to dirty, and those dirty bits will be cleared
+ * by daemon after IO is done. llbitmap_page_ctl is used to synchronize between
+ * IO path and daemon;
+ *
+ * IO path:
+ * 1) try to grab a reference, if succeed, set expire time after 5s and return;
+ * 2) if failed to grab a reference, wait for daemon to finish clearing dirty
+ * bits;
+ *
+ * Daemon (Daemon will be woken up every daemon_sleep seconds):
+ * For each page:
+ * 1) check if page expired, if not skip this page; for expired page:
+ * 2) suspend the page and wait for inflight write IO to be done;
+ * 3) change dirty page to clean;
+ * 4) resume the page;
+ */
+
+#define BITMAP_DATA_OFFSET 1024
+
+/* 64k is the max IO size of sync IO for raid1/raid10 */
+#define MIN_CHUNK_SIZE (64 * 2)
+
+/* By default, daemon will be woken up every 30s */
+#define DEFAULT_DAEMON_SLEEP 30
+
+/*
+ * Dirtied bits that have not been accessed for more than 5s will be cleared
+ * by daemon.
+ */
+#define DEFAULT_BARRIER_IDLE 5
+
+enum llbitmap_state {
+ /* No valid data, init state after assemble the array */
+ BitUnwritten = 0,
+ /* data is consistent */
+ BitClean,
+ /* data will be consistent after IO is done, set directly for writes */
+ BitDirty,
+ /*
+ * data need to be resynchronized:
+ * 1) set directly for writes if array is degraded, prevent full disk
+ * synchronization after readding a disk;
+ * 2) reassemble the array after power failure, and dirty bits are
+ * found after reloading the bitmap;
+ * 3) set for first write for raid5, to build initial xor data lazily
+ */
+ BitNeedSync,
+ /* data is synchronizing */
+ BitSyncing,
+ BitStateCount,
+ BitNone = 0xff,
+};
+
+enum llbitmap_action {
+ /* User write new data, this is the only action from IO fast path */
+ BitmapActionStartwrite = 0,
+ /* Start recovery */
+ BitmapActionStartsync,
+ /* Finish recovery */
+ BitmapActionEndsync,
+ /* Failed recovery */
+ BitmapActionAbortsync,
+ /* Reassemble the array */
+ BitmapActionReload,
+ /* Daemon thread is trying to clear dirty bits */
+ BitmapActionDaemon,
+ /* Data is deleted */
+ BitmapActionDiscard,
+ /*
+ * Bitmap is stale, mark all bits in addition to BitUnwritten to
+ * BitNeedSync.
+ */
+ BitmapActionStale,
+ BitmapActionCount,
+ /* Init state is BitUnwritten */
+ BitmapActionInit,
+};
+
+enum llbitmap_page_state {
+ LLPageFlush = 0,
+ LLPageDirty,
+};
+
+struct llbitmap_page_ctl {
+ char *state;
+ struct page *page;
+ unsigned long expire;
+ unsigned long flags;
+ wait_queue_head_t wait;
+ struct percpu_ref active;
+ /* Per block size dirty state, maximum 64k page / 1 sector = 128 */
+ unsigned long dirty[];
+};
+
+struct llbitmap {
+ struct mddev *mddev;
+ struct llbitmap_page_ctl **pctl;
+
+ unsigned int nr_pages;
+ unsigned int io_size;
+ unsigned int blocks_per_page;
+
+ /* shift of one chunk */
+ unsigned long chunkshift;
+ /* size of one chunk in sector */
+ unsigned long chunksize;
+ /* total number of chunks */
+ unsigned long chunks;
+ unsigned long last_end_sync;
+ /*
+ * time in seconds that dirty bits will be cleared if the page is not
+ * accessed.
+ */
+ unsigned long barrier_idle;
+ /* fires on first BitDirty state */
+ struct timer_list pending_timer;
+ struct work_struct daemon_work;
+
+ unsigned long flags;
+ __u64 events_cleared;
+
+ /* for slow disks */
+ atomic_t behind_writes;
+ wait_queue_head_t behind_wait;
+};
+
+struct llbitmap_unplug_work {
+ struct work_struct work;
+ struct llbitmap *llbitmap;
+ struct completion *done;
+};
+
+static struct workqueue_struct *md_llbitmap_io_wq;
+static struct workqueue_struct *md_llbitmap_unplug_wq;
+
+static char state_machine[BitStateCount][BitmapActionCount] = {
+ [BitUnwritten] = {
+ [BitmapActionStartwrite] = BitDirty,
+ [BitmapActionStartsync] = BitNone,
+ [BitmapActionEndsync] = BitNone,
+ [BitmapActionAbortsync] = BitNone,
+ [BitmapActionReload] = BitNone,
+ [BitmapActionDaemon] = BitNone,
+ [BitmapActionDiscard] = BitNone,
+ [BitmapActionStale] = BitNone,
+ },
+ [BitClean] = {
+ [BitmapActionStartwrite] = BitDirty,
+ [BitmapActionStartsync] = BitNone,
+ [BitmapActionEndsync] = BitNone,
+ [BitmapActionAbortsync] = BitNone,
+ [BitmapActionReload] = BitNone,
+ [BitmapActionDaemon] = BitNone,
+ [BitmapActionDiscard] = BitUnwritten,
+ [BitmapActionStale] = BitNeedSync,
+ },
+ [BitDirty] = {
+ [BitmapActionStartwrite] = BitNone,
+ [BitmapActionStartsync] = BitNone,
+ [BitmapActionEndsync] = BitNone,
+ [BitmapActionAbortsync] = BitNone,
+ [BitmapActionReload] = BitNeedSync,
+ [BitmapActionDaemon] = BitClean,
+ [BitmapActionDiscard] = BitUnwritten,
+ [BitmapActionStale] = BitNeedSync,
+ },
+ [BitNeedSync] = {
+ [BitmapActionStartwrite] = BitNone,
+ [BitmapActionStartsync] = BitSyncing,
+ [BitmapActionEndsync] = BitNone,
+ [BitmapActionAbortsync] = BitNone,
+ [BitmapActionReload] = BitNone,
+ [BitmapActionDaemon] = BitNone,
+ [BitmapActionDiscard] = BitUnwritten,
+ [BitmapActionStale] = BitNone,
+ },
+ [BitSyncing] = {
+ [BitmapActionStartwrite] = BitNone,
+ [BitmapActionStartsync] = BitSyncing,
+ [BitmapActionEndsync] = BitDirty,
+ [BitmapActionAbortsync] = BitNeedSync,
+ [BitmapActionReload] = BitNeedSync,
+ [BitmapActionDaemon] = BitNone,
+ [BitmapActionDiscard] = BitUnwritten,
+ [BitmapActionStale] = BitNeedSync,
+ },
+};
+
+static void __llbitmap_flush(struct mddev *mddev);
+
+static enum llbitmap_state llbitmap_read(struct llbitmap *llbitmap, loff_t pos)
+{
+ unsigned int idx;
+ unsigned int offset;
+
+ pos += BITMAP_DATA_OFFSET;
+ idx = pos >> PAGE_SHIFT;
+ offset = offset_in_page(pos);
+
+ return llbitmap->pctl[idx]->state[offset];
+}
+
+/* set all the bits in the subpage as dirty */
+static void llbitmap_infect_dirty_bits(struct llbitmap *llbitmap,
+ struct llbitmap_page_ctl *pctl,
+ unsigned int block)
+{
+ bool level_456 = raid_is_456(llbitmap->mddev);
+ unsigned int io_size = llbitmap->io_size;
+ int pos;
+
+ for (pos = block * io_size; pos < (block + 1) * io_size; pos++) {
+ switch (pctl->state[pos]) {
+ case BitUnwritten:
+ pctl->state[pos] = level_456 ? BitNeedSync : BitDirty;
+ break;
+ case BitClean:
+ pctl->state[pos] = BitDirty;
+ break;
+ };
+ }
+}
+
+static void llbitmap_set_page_dirty(struct llbitmap *llbitmap, int idx,
+ int offset)
+{
+ struct llbitmap_page_ctl *pctl = llbitmap->pctl[idx];
+ unsigned int io_size = llbitmap->io_size;
+ int block = offset / io_size;
+ int pos;
+
+ if (!test_bit(LLPageDirty, &pctl->flags))
+ set_bit(LLPageDirty, &pctl->flags);
+
+ /*
+ * For degraded array, dirty bits will never be cleared, and we must
+ * resync all the dirty bits, hence skip infect new dirty bits to
+ * prevent resync unnecessary data.
+ */
+ if (llbitmap->mddev->degraded) {
+ set_bit(block, pctl->dirty);
+ return;
+ }
+
+ /*
+ * The subpage usually contains a total of 512 bits. If any single bit
+ * within the subpage is marked as dirty, the entire sector will be
+ * written. To avoid impacting write performance, when multiple bits
+ * within the same sector are modified within llbitmap->barrier_idle,
+ * all bits in the sector will be collectively marked as dirty at once.
+ */
+ if (test_and_set_bit(block, pctl->dirty)) {
+ llbitmap_infect_dirty_bits(llbitmap, pctl, block);
+ return;
+ }
+
+ for (pos = block * io_size; pos < (block + 1) * io_size; pos++) {
+ if (pos == offset)
+ continue;
+ if (pctl->state[pos] == BitDirty ||
+ pctl->state[pos] == BitNeedSync) {
+ llbitmap_infect_dirty_bits(llbitmap, pctl, block);
+ return;
+ }
+ }
+}
+
+static void llbitmap_write(struct llbitmap *llbitmap, enum llbitmap_state state,
+ loff_t pos)
+{
+ unsigned int idx;
+ unsigned int bit;
+
+ pos += BITMAP_DATA_OFFSET;
+ idx = pos >> PAGE_SHIFT;
+ bit = offset_in_page(pos);
+
+ llbitmap->pctl[idx]->state[bit] = state;
+ if (state == BitDirty || state == BitNeedSync)
+ llbitmap_set_page_dirty(llbitmap, idx, bit);
+}
+
+static struct page *llbitmap_read_page(struct llbitmap *llbitmap, int idx)
+{
+ struct mddev *mddev = llbitmap->mddev;
+ struct page *page = NULL;
+ struct md_rdev *rdev;
+
+ if (llbitmap->pctl && llbitmap->pctl[idx])
+ page = llbitmap->pctl[idx]->page;
+ if (page)
+ return page;
+
+ page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!page)
+ return ERR_PTR(-ENOMEM);
+
+ rdev_for_each(rdev, mddev) {
+ sector_t sector;
+
+ if (rdev->raid_disk < 0 || test_bit(Faulty, &rdev->flags))
+ continue;
+
+ sector = mddev->bitmap_info.offset +
+ (idx << PAGE_SECTORS_SHIFT);
+
+ if (sync_page_io(rdev, sector, PAGE_SIZE, page, REQ_OP_READ,
+ true))
+ return page;
+
+ md_error(mddev, rdev);
+ }
+
+ __free_page(page);
+ return ERR_PTR(-EIO);
+}
+
+static void llbitmap_write_page(struct llbitmap *llbitmap, int idx)
+{
+ struct page *page = llbitmap->pctl[idx]->page;
+ struct mddev *mddev = llbitmap->mddev;
+ struct md_rdev *rdev;
+ int block;
+
+ for (block = 0; block < llbitmap->blocks_per_page; block++) {
+ struct llbitmap_page_ctl *pctl = llbitmap->pctl[idx];
+
+ if (!test_and_clear_bit(block, pctl->dirty))
+ continue;
+
+ rdev_for_each(rdev, mddev) {
+ sector_t sector;
+ sector_t bit_sector = llbitmap->io_size >> SECTOR_SHIFT;
+
+ if (rdev->raid_disk < 0 || test_bit(Faulty, &rdev->flags))
+ continue;
+
+ sector = mddev->bitmap_info.offset + rdev->sb_start +
+ (idx << PAGE_SECTORS_SHIFT) +
+ block * bit_sector;
+ md_write_metadata(mddev, rdev, sector,
+ llbitmap->io_size, page,
+ block * llbitmap->io_size);
+ }
+ }
+}
+
+static void active_release(struct percpu_ref *ref)
+{
+ struct llbitmap_page_ctl *pctl =
+ container_of(ref, struct llbitmap_page_ctl, active);
+
+ wake_up(&pctl->wait);
+}
+
+static void llbitmap_free_pages(struct llbitmap *llbitmap)
+{
+ int i;
+
+ if (!llbitmap->pctl)
+ return;
+
+ for (i = 0; i < llbitmap->nr_pages; i++) {
+ struct llbitmap_page_ctl *pctl = llbitmap->pctl[i];
+
+ if (!pctl || !pctl->page)
+ break;
+
+ __free_page(pctl->page);
+ percpu_ref_exit(&pctl->active);
+ }
+
+ kfree(llbitmap->pctl[0]);
+ kfree(llbitmap->pctl);
+ llbitmap->pctl = NULL;
+}
+
+static int llbitmap_cache_pages(struct llbitmap *llbitmap)
+{
+ struct llbitmap_page_ctl *pctl;
+ unsigned int nr_pages = DIV_ROUND_UP(llbitmap->chunks +
+ BITMAP_DATA_OFFSET, PAGE_SIZE);
+ unsigned int size = struct_size(pctl, dirty, BITS_TO_LONGS(
+ llbitmap->blocks_per_page));
+ int i;
+
+ llbitmap->pctl = kmalloc_array(nr_pages, sizeof(void *),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!llbitmap->pctl)
+ return -ENOMEM;
+
+ size = round_up(size, cache_line_size());
+ pctl = kmalloc_array(nr_pages, size, GFP_KERNEL | __GFP_ZERO);
+ if (!pctl) {
+ kfree(llbitmap->pctl);
+ return -ENOMEM;
+ }
+
+ llbitmap->nr_pages = nr_pages;
+
+ for (i = 0; i < nr_pages; i++, pctl = (void *)pctl + size) {
+ struct page *page = llbitmap_read_page(llbitmap, i);
+
+ llbitmap->pctl[i] = pctl;
+
+ if (IS_ERR(page)) {
+ llbitmap_free_pages(llbitmap);
+ return PTR_ERR(page);
+ }
+
+ if (percpu_ref_init(&pctl->active, active_release,
+ PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) {
+ __free_page(page);
+ llbitmap_free_pages(llbitmap);
+ return -ENOMEM;
+ }
+
+ pctl->page = page;
+ pctl->state = page_address(page);
+ init_waitqueue_head(&pctl->wait);
+ }
+
+ return 0;
+}
+
+static void llbitmap_init_state(struct llbitmap *llbitmap)
+{
+ enum llbitmap_state state = BitUnwritten;
+ unsigned long i;
+
+ if (test_and_clear_bit(BITMAP_CLEAN, &llbitmap->flags))
+ state = BitClean;
+
+ for (i = 0; i < llbitmap->chunks; i++)
+ llbitmap_write(llbitmap, state, i);
+}
+
+/* The return value is only used from resync, where @start == @end. */
+static enum llbitmap_state llbitmap_state_machine(struct llbitmap *llbitmap,
+ unsigned long start,
+ unsigned long end,
+ enum llbitmap_action action)
+{
+ struct mddev *mddev = llbitmap->mddev;
+ enum llbitmap_state state = BitNone;
+ bool level_456 = raid_is_456(llbitmap->mddev);
+ bool need_resync = false;
+ bool need_recovery = false;
+
+ if (test_bit(BITMAP_WRITE_ERROR, &llbitmap->flags))
+ return BitNone;
+
+ if (action == BitmapActionInit) {
+ llbitmap_init_state(llbitmap);
+ return BitNone;
+ }
+
+ while (start <= end) {
+ enum llbitmap_state c = llbitmap_read(llbitmap, start);
+
+ if (c < 0 || c >= BitStateCount) {
+ pr_err("%s: invalid bit %lu state %d action %d, forcing resync\n",
+ __func__, start, c, action);
+ state = BitNeedSync;
+ goto write_bitmap;
+ }
+
+ if (c == BitNeedSync)
+ need_resync = !mddev->degraded;
+
+ state = state_machine[c][action];
+
+write_bitmap:
+ if (unlikely(mddev->degraded)) {
+ /* For degraded array, mark new data as need sync. */
+ if (state == BitDirty &&
+ action == BitmapActionStartwrite)
+ state = BitNeedSync;
+ /*
+ * For degraded array, resync dirty data as well, noted
+ * if array is still degraded after resync is done, all
+ * new data will still be dirty until array is clean.
+ */
+ else if (c == BitDirty &&
+ action == BitmapActionStartsync)
+ state = BitSyncing;
+ } else if (c == BitUnwritten && state == BitDirty &&
+ action == BitmapActionStartwrite && level_456) {
+ /* Delay raid456 initial recovery to first write. */
+ state = BitNeedSync;
+ }
+
+ if (state == BitNone) {
+ start++;
+ continue;
+ }
+
+ llbitmap_write(llbitmap, state, start);
+
+ if (state == BitNeedSync)
+ need_resync = !mddev->degraded;
+ else if (state == BitDirty &&
+ !timer_pending(&llbitmap->pending_timer))
+ mod_timer(&llbitmap->pending_timer,
+ jiffies + mddev->bitmap_info.daemon_sleep * HZ);
+
+ start++;
+ }
+
+ if (need_resync && level_456)
+ need_recovery = true;
+
+ if (need_recovery) {
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ set_bit(MD_RECOVERY_LAZY_RECOVER, &mddev->recovery);
+ md_wakeup_thread(mddev->thread);
+ } else if (need_resync) {
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
+ md_wakeup_thread(mddev->thread);
+ }
+
+ return state;
+}
+
+static void llbitmap_raise_barrier(struct llbitmap *llbitmap, int page_idx)
+{
+ struct llbitmap_page_ctl *pctl = llbitmap->pctl[page_idx];
+
+retry:
+ if (likely(percpu_ref_tryget_live(&pctl->active))) {
+ WRITE_ONCE(pctl->expire, jiffies + llbitmap->barrier_idle * HZ);
+ return;
+ }
+
+ wait_event(pctl->wait, !percpu_ref_is_dying(&pctl->active));
+ goto retry;
+}
+
+static void llbitmap_release_barrier(struct llbitmap *llbitmap, int page_idx)
+{
+ struct llbitmap_page_ctl *pctl = llbitmap->pctl[page_idx];
+
+ percpu_ref_put(&pctl->active);
+}
+
+static int llbitmap_suspend_timeout(struct llbitmap *llbitmap, int page_idx)
+{
+ struct llbitmap_page_ctl *pctl = llbitmap->pctl[page_idx];
+
+ percpu_ref_kill(&pctl->active);
+
+ if (!wait_event_timeout(pctl->wait, percpu_ref_is_zero(&pctl->active),
+ llbitmap->mddev->bitmap_info.daemon_sleep * HZ))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static void llbitmap_resume(struct llbitmap *llbitmap, int page_idx)
+{
+ struct llbitmap_page_ctl *pctl = llbitmap->pctl[page_idx];
+
+ pctl->expire = LONG_MAX;
+ percpu_ref_resurrect(&pctl->active);
+ wake_up(&pctl->wait);
+}
+
+static int llbitmap_check_support(struct mddev *mddev)
+{
+ if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
+ pr_notice("md/llbitmap: %s: array with journal cannot have bitmap\n",
+ mdname(mddev));
+ return -EBUSY;
+ }
+
+ if (mddev->bitmap_info.space == 0) {
+ if (mddev->bitmap_info.default_space == 0) {
+ pr_notice("md/llbitmap: %s: no space for bitmap\n",
+ mdname(mddev));
+ return -ENOSPC;
+ }
+ }
+
+ if (!mddev->persistent) {
+ pr_notice("md/llbitmap: %s: array must be persistent\n",
+ mdname(mddev));
+ return -EOPNOTSUPP;
+ }
+
+ if (mddev->bitmap_info.file) {
+ pr_notice("md/llbitmap: %s: doesn't support bitmap file\n",
+ mdname(mddev));
+ return -EOPNOTSUPP;
+ }
+
+ if (mddev->bitmap_info.external) {
+ pr_notice("md/llbitmap: %s: doesn't support external metadata\n",
+ mdname(mddev));
+ return -EOPNOTSUPP;
+ }
+
+ if (mddev_is_dm(mddev)) {
+ pr_notice("md/llbitmap: %s: doesn't support dm-raid\n",
+ mdname(mddev));
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int llbitmap_init(struct llbitmap *llbitmap)
+{
+ struct mddev *mddev = llbitmap->mddev;
+ sector_t blocks = mddev->resync_max_sectors;
+ unsigned long chunksize = MIN_CHUNK_SIZE;
+ unsigned long chunks = DIV_ROUND_UP(blocks, chunksize);
+ unsigned long space = mddev->bitmap_info.space << SECTOR_SHIFT;
+ int ret;
+
+ while (chunks > space) {
+ chunksize = chunksize << 1;
+ chunks = DIV_ROUND_UP_SECTOR_T(blocks, chunksize);
+ }
+
+ llbitmap->barrier_idle = DEFAULT_BARRIER_IDLE;
+ llbitmap->chunkshift = ffz(~chunksize);
+ llbitmap->chunksize = chunksize;
+ llbitmap->chunks = chunks;
+ mddev->bitmap_info.daemon_sleep = DEFAULT_DAEMON_SLEEP;
+
+ ret = llbitmap_cache_pages(llbitmap);
+ if (ret)
+ return ret;
+
+ llbitmap_state_machine(llbitmap, 0, llbitmap->chunks - 1,
+ BitmapActionInit);
+ /* flush initial llbitmap to disk */
+ __llbitmap_flush(mddev);
+
+ return 0;
+}
+
+static int llbitmap_read_sb(struct llbitmap *llbitmap)
+{
+ struct mddev *mddev = llbitmap->mddev;
+ unsigned long daemon_sleep;
+ unsigned long chunksize;
+ unsigned long events;
+ struct page *sb_page;
+ bitmap_super_t *sb;
+ int ret = -EINVAL;
+
+ if (!mddev->bitmap_info.offset) {
+ pr_err("md/llbitmap: %s: no super block found", mdname(mddev));
+ return -EINVAL;
+ }
+
+ sb_page = llbitmap_read_page(llbitmap, 0);
+ if (IS_ERR(sb_page)) {
+ pr_err("md/llbitmap: %s: read super block failed",
+ mdname(mddev));
+ return -EIO;
+ }
+
+ sb = kmap_local_page(sb_page);
+ if (sb->magic != cpu_to_le32(BITMAP_MAGIC)) {
+ pr_err("md/llbitmap: %s: invalid super block magic number",
+ mdname(mddev));
+ goto out_put_page;
+ }
+
+ if (sb->version != cpu_to_le32(BITMAP_MAJOR_LOCKLESS)) {
+ pr_err("md/llbitmap: %s: invalid super block version",
+ mdname(mddev));
+ goto out_put_page;
+ }
+
+ if (memcmp(sb->uuid, mddev->uuid, 16)) {
+ pr_err("md/llbitmap: %s: bitmap superblock UUID mismatch\n",
+ mdname(mddev));
+ goto out_put_page;
+ }
+
+ if (mddev->bitmap_info.space == 0) {
+ int room = le32_to_cpu(sb->sectors_reserved);
+
+ if (room)
+ mddev->bitmap_info.space = room;
+ else
+ mddev->bitmap_info.space = mddev->bitmap_info.default_space;
+ }
+ llbitmap->flags = le32_to_cpu(sb->state);
+ if (test_and_clear_bit(BITMAP_FIRST_USE, &llbitmap->flags)) {
+ ret = llbitmap_init(llbitmap);
+ goto out_put_page;
+ }
+
+ chunksize = le32_to_cpu(sb->chunksize);
+ if (!is_power_of_2(chunksize)) {
+ pr_err("md/llbitmap: %s: chunksize not a power of 2",
+ mdname(mddev));
+ goto out_put_page;
+ }
+
+ if (chunksize < DIV_ROUND_UP_SECTOR_T(mddev->resync_max_sectors,
+ mddev->bitmap_info.space << SECTOR_SHIFT)) {
+ pr_err("md/llbitmap: %s: chunksize too small %lu < %llu / %lu",
+ mdname(mddev), chunksize, mddev->resync_max_sectors,
+ mddev->bitmap_info.space);
+ goto out_put_page;
+ }
+
+ daemon_sleep = le32_to_cpu(sb->daemon_sleep);
+ if (daemon_sleep < 1 || daemon_sleep > MAX_SCHEDULE_TIMEOUT / HZ) {
+ pr_err("md/llbitmap: %s: daemon sleep %lu period out of range",
+ mdname(mddev), daemon_sleep);
+ goto out_put_page;
+ }
+
+ events = le64_to_cpu(sb->events);
+ if (events < mddev->events) {
+ pr_warn("md/llbitmap :%s: bitmap file is out of date (%lu < %llu) -- forcing full recovery",
+ mdname(mddev), events, mddev->events);
+ set_bit(BITMAP_STALE, &llbitmap->flags);
+ }
+
+ sb->sync_size = cpu_to_le64(mddev->resync_max_sectors);
+ mddev->bitmap_info.chunksize = chunksize;
+ mddev->bitmap_info.daemon_sleep = daemon_sleep;
+
+ llbitmap->barrier_idle = DEFAULT_BARRIER_IDLE;
+ llbitmap->chunksize = chunksize;
+ llbitmap->chunks = DIV_ROUND_UP_SECTOR_T(mddev->resync_max_sectors, chunksize);
+ llbitmap->chunkshift = ffz(~chunksize);
+ ret = llbitmap_cache_pages(llbitmap);
+
+out_put_page:
+ __free_page(sb_page);
+ kunmap_local(sb);
+ return ret;
+}
+
+static void llbitmap_pending_timer_fn(struct timer_list *pending_timer)
+{
+ struct llbitmap *llbitmap =
+ container_of(pending_timer, struct llbitmap, pending_timer);
+
+ if (work_busy(&llbitmap->daemon_work)) {
+ pr_warn("md/llbitmap: %s daemon_work not finished in %lu seconds\n",
+ mdname(llbitmap->mddev),
+ llbitmap->mddev->bitmap_info.daemon_sleep);
+ set_bit(BITMAP_DAEMON_BUSY, &llbitmap->flags);
+ return;
+ }
+
+ queue_work(md_llbitmap_io_wq, &llbitmap->daemon_work);
+}
+
+static void md_llbitmap_daemon_fn(struct work_struct *work)
+{
+ struct llbitmap *llbitmap =
+ container_of(work, struct llbitmap, daemon_work);
+ unsigned long start;
+ unsigned long end;
+ bool restart;
+ int idx;
+
+ if (llbitmap->mddev->degraded)
+ return;
+retry:
+ start = 0;
+ end = min(llbitmap->chunks, PAGE_SIZE - BITMAP_DATA_OFFSET) - 1;
+ restart = false;
+
+ for (idx = 0; idx < llbitmap->nr_pages; idx++) {
+ struct llbitmap_page_ctl *pctl = llbitmap->pctl[idx];
+
+ if (idx > 0) {
+ start = end + 1;
+ end = min(end + PAGE_SIZE, llbitmap->chunks - 1);
+ }
+
+ if (!test_bit(LLPageFlush, &pctl->flags) &&
+ time_before(jiffies, pctl->expire)) {
+ restart = true;
+ continue;
+ }
+
+ if (llbitmap_suspend_timeout(llbitmap, idx) < 0) {
+ pr_warn("md/llbitmap: %s: %s waiting for page %d timeout\n",
+ mdname(llbitmap->mddev), __func__, idx);
+ continue;
+ }
+
+ llbitmap_state_machine(llbitmap, start, end, BitmapActionDaemon);
+ llbitmap_resume(llbitmap, idx);
+ }
+
+ /*
+ * If the daemon took a long time to finish, retry to prevent missing
+ * clearing dirty bits.
+ */
+ if (test_and_clear_bit(BITMAP_DAEMON_BUSY, &llbitmap->flags))
+ goto retry;
+
+ /* If some page is dirty but not expired, setup timer again */
+ if (restart)
+ mod_timer(&llbitmap->pending_timer,
+ jiffies + llbitmap->mddev->bitmap_info.daemon_sleep * HZ);
+}
+
+static int llbitmap_create(struct mddev *mddev)
+{
+ struct llbitmap *llbitmap;
+ int ret;
+
+ ret = llbitmap_check_support(mddev);
+ if (ret)
+ return ret;
+
+ llbitmap = kzalloc(sizeof(*llbitmap), GFP_KERNEL);
+ if (!llbitmap)
+ return -ENOMEM;
+
+ llbitmap->mddev = mddev;
+ llbitmap->io_size = bdev_logical_block_size(mddev->gendisk->part0);
+ llbitmap->blocks_per_page = PAGE_SIZE / llbitmap->io_size;
+
+ timer_setup(&llbitmap->pending_timer, llbitmap_pending_timer_fn, 0);
+ INIT_WORK(&llbitmap->daemon_work, md_llbitmap_daemon_fn);
+ atomic_set(&llbitmap->behind_writes, 0);
+ init_waitqueue_head(&llbitmap->behind_wait);
+
+ mutex_lock(&mddev->bitmap_info.mutex);
+ mddev->bitmap = llbitmap;
+ ret = llbitmap_read_sb(llbitmap);
+ mutex_unlock(&mddev->bitmap_info.mutex);
+ if (ret) {
+ kfree(llbitmap);
+ mddev->bitmap = NULL;
+ }
+
+ return ret;
+}
+
+static int llbitmap_resize(struct mddev *mddev, sector_t blocks, int chunksize)
+{
+ struct llbitmap *llbitmap = mddev->bitmap;
+ unsigned long chunks;
+
+ if (chunksize == 0)
+ chunksize = llbitmap->chunksize;
+
+ /* If there is enough space, leave the chunksize unchanged. */
+ chunks = DIV_ROUND_UP_SECTOR_T(blocks, chunksize);
+ while (chunks > mddev->bitmap_info.space << SECTOR_SHIFT) {
+ chunksize = chunksize << 1;
+ chunks = DIV_ROUND_UP_SECTOR_T(blocks, chunksize);
+ }
+
+ llbitmap->chunkshift = ffz(~chunksize);
+ llbitmap->chunksize = chunksize;
+ llbitmap->chunks = chunks;
+
+ return 0;
+}
+
+static int llbitmap_load(struct mddev *mddev)
+{
+ enum llbitmap_action action = BitmapActionReload;
+ struct llbitmap *llbitmap = mddev->bitmap;
+
+ if (test_and_clear_bit(BITMAP_STALE, &llbitmap->flags))
+ action = BitmapActionStale;
+
+ llbitmap_state_machine(llbitmap, 0, llbitmap->chunks - 1, action);
+ return 0;
+}
+
+static void llbitmap_destroy(struct mddev *mddev)
+{
+ struct llbitmap *llbitmap = mddev->bitmap;
+
+ if (!llbitmap)
+ return;
+
+ mutex_lock(&mddev->bitmap_info.mutex);
+
+ timer_delete_sync(&llbitmap->pending_timer);
+ flush_workqueue(md_llbitmap_io_wq);
+ flush_workqueue(md_llbitmap_unplug_wq);
+
+ mddev->bitmap = NULL;
+ llbitmap_free_pages(llbitmap);
+ kfree(llbitmap);
+ mutex_unlock(&mddev->bitmap_info.mutex);
+}
+
+static void llbitmap_start_write(struct mddev *mddev, sector_t offset,
+ unsigned long sectors)
+{
+ struct llbitmap *llbitmap = mddev->bitmap;
+ unsigned long start = offset >> llbitmap->chunkshift;
+ unsigned long end = (offset + sectors - 1) >> llbitmap->chunkshift;
+ int page_start = (start + BITMAP_DATA_OFFSET) >> PAGE_SHIFT;
+ int page_end = (end + BITMAP_DATA_OFFSET) >> PAGE_SHIFT;
+
+ llbitmap_state_machine(llbitmap, start, end, BitmapActionStartwrite);
+
+ while (page_start <= page_end) {
+ llbitmap_raise_barrier(llbitmap, page_start);
+ page_start++;
+ }
+}
+
+static void llbitmap_end_write(struct mddev *mddev, sector_t offset,
+ unsigned long sectors)
+{
+ struct llbitmap *llbitmap = mddev->bitmap;
+ unsigned long start = offset >> llbitmap->chunkshift;
+ unsigned long end = (offset + sectors - 1) >> llbitmap->chunkshift;
+ int page_start = (start + BITMAP_DATA_OFFSET) >> PAGE_SHIFT;
+ int page_end = (end + BITMAP_DATA_OFFSET) >> PAGE_SHIFT;
+
+ while (page_start <= page_end) {
+ llbitmap_release_barrier(llbitmap, page_start);
+ page_start++;
+ }
+}
+
+static void llbitmap_start_discard(struct mddev *mddev, sector_t offset,
+ unsigned long sectors)
+{
+ struct llbitmap *llbitmap = mddev->bitmap;
+ unsigned long start = DIV_ROUND_UP_SECTOR_T(offset, llbitmap->chunksize);
+ unsigned long end = (offset + sectors - 1) >> llbitmap->chunkshift;
+ int page_start = (start + BITMAP_DATA_OFFSET) >> PAGE_SHIFT;
+ int page_end = (end + BITMAP_DATA_OFFSET) >> PAGE_SHIFT;
+
+ llbitmap_state_machine(llbitmap, start, end, BitmapActionDiscard);
+
+ while (page_start <= page_end) {
+ llbitmap_raise_barrier(llbitmap, page_start);
+ page_start++;
+ }
+}
+
+static void llbitmap_end_discard(struct mddev *mddev, sector_t offset,
+ unsigned long sectors)
+{
+ struct llbitmap *llbitmap = mddev->bitmap;
+ unsigned long start = DIV_ROUND_UP_SECTOR_T(offset, llbitmap->chunksize);
+ unsigned long end = (offset + sectors - 1) >> llbitmap->chunkshift;
+ int page_start = (start + BITMAP_DATA_OFFSET) >> PAGE_SHIFT;
+ int page_end = (end + BITMAP_DATA_OFFSET) >> PAGE_SHIFT;
+
+ while (page_start <= page_end) {
+ llbitmap_release_barrier(llbitmap, page_start);
+ page_start++;
+ }
+}
+
+static void llbitmap_unplug_fn(struct work_struct *work)
+{
+ struct llbitmap_unplug_work *unplug_work =
+ container_of(work, struct llbitmap_unplug_work, work);
+ struct llbitmap *llbitmap = unplug_work->llbitmap;
+ struct blk_plug plug;
+ int i;
+
+ blk_start_plug(&plug);
+
+ for (i = 0; i < llbitmap->nr_pages; i++) {
+ if (!test_bit(LLPageDirty, &llbitmap->pctl[i]->flags) ||
+ !test_and_clear_bit(LLPageDirty, &llbitmap->pctl[i]->flags))
+ continue;
+
+ llbitmap_write_page(llbitmap, i);
+ }
+
+ blk_finish_plug(&plug);
+ md_super_wait(llbitmap->mddev);
+ complete(unplug_work->done);
+}
+
+static bool llbitmap_dirty(struct llbitmap *llbitmap)
+{
+ int i;
+
+ for (i = 0; i < llbitmap->nr_pages; i++)
+ if (test_bit(LLPageDirty, &llbitmap->pctl[i]->flags))
+ return true;
+
+ return false;
+}
+
+static void llbitmap_unplug(struct mddev *mddev, bool sync)
+{
+ DECLARE_COMPLETION_ONSTACK(done);
+ struct llbitmap *llbitmap = mddev->bitmap;
+ struct llbitmap_unplug_work unplug_work = {
+ .llbitmap = llbitmap,
+ .done = &done,
+ };
+
+ if (!llbitmap_dirty(llbitmap))
+ return;
+
+ /*
+ * Issue new bitmap IO under submit_bio() context will deadlock:
+ * - the bio will wait for bitmap bio to be done, before it can be
+ * issued;
+ * - bitmap bio will be added to current->bio_list and wait for this
+ * bio to be issued;
+ */
+ INIT_WORK_ONSTACK(&unplug_work.work, llbitmap_unplug_fn);
+ queue_work(md_llbitmap_unplug_wq, &unplug_work.work);
+ wait_for_completion(&done);
+ destroy_work_on_stack(&unplug_work.work);
+}
+
+/*
+ * Force to write all bitmap pages to disk, called when stopping the array, or
+ * every daemon_sleep seconds when sync_thread is running.
+ */
+static void __llbitmap_flush(struct mddev *mddev)
+{
+ struct llbitmap *llbitmap = mddev->bitmap;
+ struct blk_plug plug;
+ int i;
+
+ blk_start_plug(&plug);
+ for (i = 0; i < llbitmap->nr_pages; i++) {
+ struct llbitmap_page_ctl *pctl = llbitmap->pctl[i];
+
+ /* mark all blocks as dirty */
+ set_bit(LLPageDirty, &pctl->flags);
+ bitmap_fill(pctl->dirty, llbitmap->blocks_per_page);
+ llbitmap_write_page(llbitmap, i);
+ }
+ blk_finish_plug(&plug);
+ md_super_wait(llbitmap->mddev);
+}
+
+static void llbitmap_flush(struct mddev *mddev)
+{
+ struct llbitmap *llbitmap = mddev->bitmap;
+ int i;
+
+ for (i = 0; i < llbitmap->nr_pages; i++)
+ set_bit(LLPageFlush, &llbitmap->pctl[i]->flags);
+
+ timer_delete_sync(&llbitmap->pending_timer);
+ queue_work(md_llbitmap_io_wq, &llbitmap->daemon_work);
+ flush_work(&llbitmap->daemon_work);
+
+ __llbitmap_flush(mddev);
+}
+
+/* This is used for raid5 lazy initial recovery */
+static bool llbitmap_blocks_synced(struct mddev *mddev, sector_t offset)
+{
+ struct llbitmap *llbitmap = mddev->bitmap;
+ unsigned long p = offset >> llbitmap->chunkshift;
+ enum llbitmap_state c = llbitmap_read(llbitmap, p);
+
+ return c == BitClean || c == BitDirty;
+}
+
+static sector_t llbitmap_skip_sync_blocks(struct mddev *mddev, sector_t offset)
+{
+ struct llbitmap *llbitmap = mddev->bitmap;
+ unsigned long p = offset >> llbitmap->chunkshift;
+ int blocks = llbitmap->chunksize - (offset & (llbitmap->chunksize - 1));
+ enum llbitmap_state c = llbitmap_read(llbitmap, p);
+
+ /* always skip unwritten blocks */
+ if (c == BitUnwritten)
+ return blocks;
+
+ /* For degraded array, don't skip */
+ if (mddev->degraded)
+ return 0;
+
+ /* For resync also skip clean/dirty blocks */
+ if ((c == BitClean || c == BitDirty) &&
+ test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
+ !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
+ return blocks;
+
+ return 0;
+}
+
+static bool llbitmap_start_sync(struct mddev *mddev, sector_t offset,
+ sector_t *blocks, bool degraded)
+{
+ struct llbitmap *llbitmap = mddev->bitmap;
+ unsigned long p = offset >> llbitmap->chunkshift;
+
+ /*
+ * Handle one bit at a time, this is much simpler. And it doesn't matter
+ * if md_do_sync() loop more times.
+ */
+ *blocks = llbitmap->chunksize - (offset & (llbitmap->chunksize - 1));
+ return llbitmap_state_machine(llbitmap, p, p,
+ BitmapActionStartsync) == BitSyncing;
+}
+
+/* Something is wrong, sync_thread stop at @offset */
+static void llbitmap_end_sync(struct mddev *mddev, sector_t offset,
+ sector_t *blocks)
+{
+ struct llbitmap *llbitmap = mddev->bitmap;
+ unsigned long p = offset >> llbitmap->chunkshift;
+
+ *blocks = llbitmap->chunksize - (offset & (llbitmap->chunksize - 1));
+ llbitmap_state_machine(llbitmap, p, llbitmap->chunks - 1,
+ BitmapActionAbortsync);
+}
+
+/* A full sync_thread is finished */
+static void llbitmap_close_sync(struct mddev *mddev)
+{
+ struct llbitmap *llbitmap = mddev->bitmap;
+ int i;
+
+ for (i = 0; i < llbitmap->nr_pages; i++) {
+ struct llbitmap_page_ctl *pctl = llbitmap->pctl[i];
+
+ /* let daemon_fn clear dirty bits immediately */
+ WRITE_ONCE(pctl->expire, jiffies);
+ }
+
+ llbitmap_state_machine(llbitmap, 0, llbitmap->chunks - 1,
+ BitmapActionEndsync);
+}
+
+/*
+ * sync_thread have reached @sector, update metadata every daemon_sleep seconds,
+ * just in case sync_thread have to restart after power failure.
+ */
+static void llbitmap_cond_end_sync(struct mddev *mddev, sector_t sector,
+ bool force)
+{
+ struct llbitmap *llbitmap = mddev->bitmap;
+
+ if (sector == 0) {
+ llbitmap->last_end_sync = jiffies;
+ return;
+ }
+
+ if (time_before(jiffies, llbitmap->last_end_sync +
+ HZ * mddev->bitmap_info.daemon_sleep))
+ return;
+
+ wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
+
+ mddev->curr_resync_completed = sector;
+ set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
+ llbitmap_state_machine(llbitmap, 0, sector >> llbitmap->chunkshift,
+ BitmapActionEndsync);
+ __llbitmap_flush(mddev);
+
+ llbitmap->last_end_sync = jiffies;
+ sysfs_notify_dirent_safe(mddev->sysfs_completed);
+}
+
+static bool llbitmap_enabled(void *data, bool flush)
+{
+ struct llbitmap *llbitmap = data;
+
+ return llbitmap && !test_bit(BITMAP_WRITE_ERROR, &llbitmap->flags);
+}
+
+static void llbitmap_dirty_bits(struct mddev *mddev, unsigned long s,
+ unsigned long e)
+{
+ llbitmap_state_machine(mddev->bitmap, s, e, BitmapActionStartwrite);
+}
+
+static void llbitmap_write_sb(struct llbitmap *llbitmap)
+{
+ int nr_blocks = DIV_ROUND_UP(BITMAP_DATA_OFFSET, llbitmap->io_size);
+
+ bitmap_fill(llbitmap->pctl[0]->dirty, nr_blocks);
+ llbitmap_write_page(llbitmap, 0);
+ md_super_wait(llbitmap->mddev);
+}
+
+static void llbitmap_update_sb(void *data)
+{
+ struct llbitmap *llbitmap = data;
+ struct mddev *mddev = llbitmap->mddev;
+ struct page *sb_page;
+ bitmap_super_t *sb;
+
+ if (test_bit(BITMAP_WRITE_ERROR, &llbitmap->flags))
+ return;
+
+ sb_page = llbitmap_read_page(llbitmap, 0);
+ if (IS_ERR(sb_page)) {
+ pr_err("%s: %s: read super block failed", __func__,
+ mdname(mddev));
+ set_bit(BITMAP_WRITE_ERROR, &llbitmap->flags);
+ return;
+ }
+
+ if (mddev->events < llbitmap->events_cleared)
+ llbitmap->events_cleared = mddev->events;
+
+ sb = kmap_local_page(sb_page);
+ sb->events = cpu_to_le64(mddev->events);
+ sb->state = cpu_to_le32(llbitmap->flags);
+ sb->chunksize = cpu_to_le32(llbitmap->chunksize);
+ sb->sync_size = cpu_to_le64(mddev->resync_max_sectors);
+ sb->events_cleared = cpu_to_le64(llbitmap->events_cleared);
+ sb->sectors_reserved = cpu_to_le32(mddev->bitmap_info.space);
+ sb->daemon_sleep = cpu_to_le32(mddev->bitmap_info.daemon_sleep);
+
+ kunmap_local(sb);
+ llbitmap_write_sb(llbitmap);
+}
+
+static int llbitmap_get_stats(void *data, struct md_bitmap_stats *stats)
+{
+ struct llbitmap *llbitmap = data;
+
+ memset(stats, 0, sizeof(*stats));
+
+ stats->missing_pages = 0;
+ stats->pages = llbitmap->nr_pages;
+ stats->file_pages = llbitmap->nr_pages;
+
+ stats->behind_writes = atomic_read(&llbitmap->behind_writes);
+ stats->behind_wait = wq_has_sleeper(&llbitmap->behind_wait);
+ stats->events_cleared = llbitmap->events_cleared;
+
+ return 0;
+}
+
+/* just flag all pages as needing to be written */
+static void llbitmap_write_all(struct mddev *mddev)
+{
+ int i;
+ struct llbitmap *llbitmap = mddev->bitmap;
+
+ for (i = 0; i < llbitmap->nr_pages; i++) {
+ struct llbitmap_page_ctl *pctl = llbitmap->pctl[i];
+
+ set_bit(LLPageDirty, &pctl->flags);
+ bitmap_fill(pctl->dirty, llbitmap->blocks_per_page);
+ }
+}
+
+static void llbitmap_start_behind_write(struct mddev *mddev)
+{
+ struct llbitmap *llbitmap = mddev->bitmap;
+
+ atomic_inc(&llbitmap->behind_writes);
+}
+
+static void llbitmap_end_behind_write(struct mddev *mddev)
+{
+ struct llbitmap *llbitmap = mddev->bitmap;
+
+ if (atomic_dec_and_test(&llbitmap->behind_writes))
+ wake_up(&llbitmap->behind_wait);
+}
+
+static void llbitmap_wait_behind_writes(struct mddev *mddev)
+{
+ struct llbitmap *llbitmap = mddev->bitmap;
+
+ if (!llbitmap)
+ return;
+
+ wait_event(llbitmap->behind_wait,
+ atomic_read(&llbitmap->behind_writes) == 0);
+
+}
+
+static ssize_t bits_show(struct mddev *mddev, char *page)
+{
+ struct llbitmap *llbitmap;
+ int bits[BitStateCount] = {0};
+ loff_t start = 0;
+
+ mutex_lock(&mddev->bitmap_info.mutex);
+ llbitmap = mddev->bitmap;
+ if (!llbitmap || !llbitmap->pctl) {
+ mutex_unlock(&mddev->bitmap_info.mutex);
+ return sprintf(page, "no bitmap\n");
+ }
+
+ if (test_bit(BITMAP_WRITE_ERROR, &llbitmap->flags)) {
+ mutex_unlock(&mddev->bitmap_info.mutex);
+ return sprintf(page, "bitmap io error\n");
+ }
+
+ while (start < llbitmap->chunks) {
+ enum llbitmap_state c = llbitmap_read(llbitmap, start);
+
+ if (c < 0 || c >= BitStateCount)
+ pr_err("%s: invalid bit %llu state %d\n",
+ __func__, start, c);
+ else
+ bits[c]++;
+ start++;
+ }
+
+ mutex_unlock(&mddev->bitmap_info.mutex);
+ return sprintf(page, "unwritten %d\nclean %d\ndirty %d\nneed sync %d\nsyncing %d\n",
+ bits[BitUnwritten], bits[BitClean], bits[BitDirty],
+ bits[BitNeedSync], bits[BitSyncing]);
+}
+
+static struct md_sysfs_entry llbitmap_bits = __ATTR_RO(bits);
+
+static ssize_t metadata_show(struct mddev *mddev, char *page)
+{
+ struct llbitmap *llbitmap;
+ ssize_t ret;
+
+ mutex_lock(&mddev->bitmap_info.mutex);
+ llbitmap = mddev->bitmap;
+ if (!llbitmap) {
+ mutex_unlock(&mddev->bitmap_info.mutex);
+ return sprintf(page, "no bitmap\n");
+ }
+
+ ret = sprintf(page, "chunksize %lu\nchunkshift %lu\nchunks %lu\noffset %llu\ndaemon_sleep %lu\n",
+ llbitmap->chunksize, llbitmap->chunkshift,
+ llbitmap->chunks, mddev->bitmap_info.offset,
+ llbitmap->mddev->bitmap_info.daemon_sleep);
+ mutex_unlock(&mddev->bitmap_info.mutex);
+
+ return ret;
+}
+
+static struct md_sysfs_entry llbitmap_metadata = __ATTR_RO(metadata);
+
+static ssize_t
+daemon_sleep_show(struct mddev *mddev, char *page)
+{
+ return sprintf(page, "%lu\n", mddev->bitmap_info.daemon_sleep);
+}
+
+static ssize_t
+daemon_sleep_store(struct mddev *mddev, const char *buf, size_t len)
+{
+ unsigned long timeout;
+ int rv = kstrtoul(buf, 10, &timeout);
+
+ if (rv)
+ return rv;
+
+ mddev->bitmap_info.daemon_sleep = timeout;
+ return len;
+}
+
+static struct md_sysfs_entry llbitmap_daemon_sleep = __ATTR_RW(daemon_sleep);
+
+static ssize_t
+barrier_idle_show(struct mddev *mddev, char *page)
+{
+ struct llbitmap *llbitmap = mddev->bitmap;
+
+ return sprintf(page, "%lu\n", llbitmap->barrier_idle);
+}
+
+static ssize_t
+barrier_idle_store(struct mddev *mddev, const char *buf, size_t len)
+{
+ struct llbitmap *llbitmap = mddev->bitmap;
+ unsigned long timeout;
+ int rv = kstrtoul(buf, 10, &timeout);
+
+ if (rv)
+ return rv;
+
+ llbitmap->barrier_idle = timeout;
+ return len;
+}
+
+static struct md_sysfs_entry llbitmap_barrier_idle = __ATTR_RW(barrier_idle);
+
+static struct attribute *md_llbitmap_attrs[] = {
+ &llbitmap_bits.attr,
+ &llbitmap_metadata.attr,
+ &llbitmap_daemon_sleep.attr,
+ &llbitmap_barrier_idle.attr,
+ NULL
+};
+
+static struct attribute_group md_llbitmap_group = {
+ .name = "llbitmap",
+ .attrs = md_llbitmap_attrs,
+};
+
+static struct bitmap_operations llbitmap_ops = {
+ .head = {
+ .type = MD_BITMAP,
+ .id = ID_LLBITMAP,
+ .name = "llbitmap",
+ },
+
+ .enabled = llbitmap_enabled,
+ .create = llbitmap_create,
+ .resize = llbitmap_resize,
+ .load = llbitmap_load,
+ .destroy = llbitmap_destroy,
+
+ .start_write = llbitmap_start_write,
+ .end_write = llbitmap_end_write,
+ .start_discard = llbitmap_start_discard,
+ .end_discard = llbitmap_end_discard,
+ .unplug = llbitmap_unplug,
+ .flush = llbitmap_flush,
+
+ .start_behind_write = llbitmap_start_behind_write,
+ .end_behind_write = llbitmap_end_behind_write,
+ .wait_behind_writes = llbitmap_wait_behind_writes,
+
+ .blocks_synced = llbitmap_blocks_synced,
+ .skip_sync_blocks = llbitmap_skip_sync_blocks,
+ .start_sync = llbitmap_start_sync,
+ .end_sync = llbitmap_end_sync,
+ .close_sync = llbitmap_close_sync,
+ .cond_end_sync = llbitmap_cond_end_sync,
+
+ .update_sb = llbitmap_update_sb,
+ .get_stats = llbitmap_get_stats,
+ .dirty_bits = llbitmap_dirty_bits,
+ .write_all = llbitmap_write_all,
+
+ .group = &md_llbitmap_group,
+};
+
+int md_llbitmap_init(void)
+{
+ md_llbitmap_io_wq = alloc_workqueue("md_llbitmap_io",
+ WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
+ if (!md_llbitmap_io_wq)
+ return -ENOMEM;
+
+ md_llbitmap_unplug_wq = alloc_workqueue("md_llbitmap_unplug",
+ WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
+ if (!md_llbitmap_unplug_wq) {
+ destroy_workqueue(md_llbitmap_io_wq);
+ md_llbitmap_io_wq = NULL;
+ return -ENOMEM;
+ }
+
+ return register_md_submodule(&llbitmap_ops.head);
+}
+
+void md_llbitmap_exit(void)
+{
+ destroy_workqueue(md_llbitmap_io_wq);
+ md_llbitmap_io_wq = NULL;
+ destroy_workqueue(md_llbitmap_unplug_wq);
+ md_llbitmap_unplug_wq = NULL;
+ unregister_md_submodule(&llbitmap_ops.head);
+}
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 046fe85c76fe..41c476b40c7a 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -94,7 +94,6 @@ static struct workqueue_struct *md_wq;
* workqueue whith reconfig_mutex grabbed.
*/
static struct workqueue_struct *md_misc_wq;
-struct workqueue_struct *md_bitmap_wq;
static int remove_and_add_spares(struct mddev *mddev,
struct md_rdev *this);
@@ -339,6 +338,7 @@ static int start_readonly;
* so all the races disappear.
*/
static bool create_on_open = true;
+static bool legacy_async_del_gendisk = true;
/*
* We have a system wide 'event count' that is incremented
@@ -637,6 +637,12 @@ static void __mddev_put(struct mddev *mddev)
return;
/*
+ * If array is freed by stopping array, MD_DELETED is set by
+ * do_md_stop(), MD_DELETED is still set here in case mddev is freed
+ * directly by closing a mddev that is created by create_on_open.
+ */
+ set_bit(MD_DELETED, &mddev->flags);
+ /*
* Call queue_work inside the spinlock so that flush_workqueue() after
* mddev_find will succeed in waiting for the work to be done.
*/
@@ -670,8 +676,64 @@ static void active_io_release(struct percpu_ref *ref)
static void no_op(struct percpu_ref *r) {}
+static bool mddev_set_bitmap_ops(struct mddev *mddev)
+{
+ struct bitmap_operations *old = mddev->bitmap_ops;
+ struct md_submodule_head *head;
+
+ if (mddev->bitmap_id == ID_BITMAP_NONE ||
+ (old && old->head.id == mddev->bitmap_id))
+ return true;
+
+ xa_lock(&md_submodule);
+ head = xa_load(&md_submodule, mddev->bitmap_id);
+
+ if (!head) {
+ pr_warn("md: can't find bitmap id %d\n", mddev->bitmap_id);
+ goto err;
+ }
+
+ if (head->type != MD_BITMAP) {
+ pr_warn("md: invalid bitmap id %d\n", mddev->bitmap_id);
+ goto err;
+ }
+
+ mddev->bitmap_ops = (void *)head;
+ xa_unlock(&md_submodule);
+
+ if (!mddev_is_dm(mddev) && mddev->bitmap_ops->group) {
+ if (sysfs_create_group(&mddev->kobj, mddev->bitmap_ops->group))
+ pr_warn("md: cannot register extra bitmap attributes for %s\n",
+ mdname(mddev));
+ else
+ /*
+ * Inform user with KOBJ_CHANGE about new bitmap
+ * attributes.
+ */
+ kobject_uevent(&mddev->kobj, KOBJ_CHANGE);
+ }
+ return true;
+
+err:
+ xa_unlock(&md_submodule);
+ return false;
+}
+
+static void mddev_clear_bitmap_ops(struct mddev *mddev)
+{
+ if (!mddev_is_dm(mddev) && mddev->bitmap_ops &&
+ mddev->bitmap_ops->group)
+ sysfs_remove_group(&mddev->kobj, mddev->bitmap_ops->group);
+
+ mddev->bitmap_ops = NULL;
+}
+
int mddev_init(struct mddev *mddev)
{
+ if (!IS_ENABLED(CONFIG_MD_BITMAP))
+ mddev->bitmap_id = ID_BITMAP_NONE;
+ else
+ mddev->bitmap_id = ID_BITMAP;
if (percpu_ref_init(&mddev->active_io, active_io_release,
PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
@@ -706,7 +768,6 @@ int mddev_init(struct mddev *mddev)
mddev->resync_min = 0;
mddev->resync_max = MaxSector;
mddev->level = LEVEL_NONE;
- mddev_set_bitmap_ops(mddev);
INIT_WORK(&mddev->sync_work, md_start_sync);
INIT_WORK(&mddev->del_work, mddev_delayed_delete);
@@ -871,15 +932,18 @@ void mddev_unlock(struct mddev *mddev)
export_rdev(rdev, mddev);
}
- /* Call del_gendisk after release reconfig_mutex to avoid
- * deadlock (e.g. call del_gendisk under the lock and an
- * access to sysfs files waits the lock)
- * And MD_DELETED is only used for md raid which is set in
- * do_md_stop. dm raid only uses md_stop to stop. So dm raid
- * doesn't need to check MD_DELETED when getting reconfig lock
- */
- if (test_bit(MD_DELETED, &mddev->flags))
- del_gendisk(mddev->gendisk);
+ if (!legacy_async_del_gendisk) {
+ /*
+ * Call del_gendisk after release reconfig_mutex to avoid
+ * deadlock (e.g. call del_gendisk under the lock and an
+ * access to sysfs files waits the lock)
+ * And MD_DELETED is only used for md raid which is set in
+ * do_md_stop. dm raid only uses md_stop to stop. So dm raid
+ * doesn't need to check MD_DELETED when getting reconfig lock
+ */
+ if (test_bit(MD_DELETED, &mddev->flags))
+ del_gendisk(mddev->gendisk);
+ }
}
EXPORT_SYMBOL_GPL(mddev_unlock);
@@ -1010,15 +1074,26 @@ static void super_written(struct bio *bio)
wake_up(&mddev->sb_wait);
}
-void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
- sector_t sector, int size, struct page *page)
+/**
+ * md_write_metadata - write metadata to underlying disk, including
+ * array superblock, badblocks, bitmap superblock and bitmap bits.
+ * @mddev: the array to write
+ * @rdev: the underlying disk to write
+ * @sector: the offset to @rdev
+ * @size: the length of the metadata
+ * @page: the metadata
+ * @offset: the offset to @page
+ *
+ * Write @size bytes of @page start from @offset, to @sector of @rdev, Increment
+ * mddev->pending_writes before returning, and decrement it on completion,
+ * waking up sb_wait. Caller must call md_super_wait() after issuing io to all
+ * rdev. If an error occurred, md_error() will be called, and the @rdev will be
+ * kicked out from @mddev.
+ */
+void md_write_metadata(struct mddev *mddev, struct md_rdev *rdev,
+ sector_t sector, int size, struct page *page,
+ unsigned int offset)
{
- /* write first size bytes of page to sector of rdev
- * Increment mddev->pending_writes before returning
- * and decrement it on completion, waking up sb_wait
- * if zero is reached.
- * If an error occurred, call md_error
- */
struct bio *bio;
if (!page)
@@ -1036,7 +1111,7 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
atomic_inc(&rdev->nr_pending);
bio->bi_iter.bi_sector = sector;
- __bio_add_page(bio, page, size, 0);
+ __bio_add_page(bio, page, size, offset);
bio->bi_private = rdev;
bio->bi_end_io = super_written;
@@ -1346,6 +1421,9 @@ static u64 md_bitmap_events_cleared(struct mddev *mddev)
struct md_bitmap_stats stats;
int err;
+ if (!md_bitmap_enabled(mddev, false))
+ return 0;
+
err = mddev->bitmap_ops->get_stats(mddev->bitmap, &stats);
if (err)
return 0;
@@ -1409,13 +1487,13 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *freshest, stru
mddev->layout = -1;
if (sb->state & (1<<MD_SB_CLEAN))
- mddev->recovery_cp = MaxSector;
+ mddev->resync_offset = MaxSector;
else {
if (sb->events_hi == sb->cp_events_hi &&
sb->events_lo == sb->cp_events_lo) {
- mddev->recovery_cp = sb->recovery_cp;
+ mddev->resync_offset = sb->recovery_cp;
} else
- mddev->recovery_cp = 0;
+ mddev->resync_offset = 0;
}
memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
@@ -1541,10 +1619,10 @@ static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev)
mddev->minor_version = sb->minor_version;
if (mddev->in_sync)
{
- sb->recovery_cp = mddev->recovery_cp;
+ sb->recovery_cp = mddev->resync_offset;
sb->cp_events_hi = (mddev->events>>32);
sb->cp_events_lo = (u32)mddev->events;
- if (mddev->recovery_cp == MaxSector)
+ if (mddev->resync_offset == MaxSector)
sb->state = (1<< MD_SB_CLEAN);
} else
sb->recovery_cp = 0;
@@ -1643,8 +1721,8 @@ super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
if ((u64)num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1)
num_sectors = (sector_t)(2ULL << 32) - 2;
do {
- md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
- rdev->sb_page);
+ md_write_metadata(rdev->mddev, rdev, rdev->sb_start,
+ rdev->sb_size, rdev->sb_page, 0);
} while (md_super_wait(rdev->mddev) < 0);
return num_sectors;
}
@@ -1895,7 +1973,7 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *freshest, struc
mddev->bitmap_info.default_space = (4096-1024) >> 9;
mddev->reshape_backwards = 0;
- mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
+ mddev->resync_offset = le64_to_cpu(sb->resync_offset);
memcpy(mddev->uuid, sb->set_uuid, 16);
mddev->max_disks = (4096-256)/2;
@@ -2081,7 +2159,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
sb->utime = cpu_to_le64((__u64)mddev->utime);
sb->events = cpu_to_le64(mddev->events);
if (mddev->in_sync)
- sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
+ sb->resync_offset = cpu_to_le64(mddev->resync_offset);
else if (test_bit(MD_JOURNAL_CLEAN, &mddev->flags))
sb->resync_offset = cpu_to_le64(MaxSector);
else
@@ -2292,8 +2370,8 @@ super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
sb->super_offset = cpu_to_le64(rdev->sb_start);
sb->sb_csum = calc_sb_1_csum(sb);
do {
- md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
- rdev->sb_page);
+ md_write_metadata(rdev->mddev, rdev, rdev->sb_start,
+ rdev->sb_size, rdev->sb_page, 0);
} while (md_super_wait(rdev->mddev) < 0);
return num_sectors;
@@ -2303,13 +2381,15 @@ static int
super_1_allow_new_offset(struct md_rdev *rdev,
unsigned long long new_offset)
{
+ struct mddev *mddev = rdev->mddev;
+
/* All necessary checks on new >= old have been done */
if (new_offset >= rdev->data_offset)
return 1;
/* with 1.0 metadata, there is no metadata to tread on
* so we can always move back */
- if (rdev->mddev->minor_version == 0)
+ if (mddev->minor_version == 0)
return 1;
/* otherwise we must be sure not to step on
@@ -2321,8 +2401,7 @@ super_1_allow_new_offset(struct md_rdev *rdev,
if (rdev->sb_start + (32+4)*2 > new_offset)
return 0;
- if (!rdev->mddev->bitmap_info.file) {
- struct mddev *mddev = rdev->mddev;
+ if (md_bitmap_registered(mddev) && !mddev->bitmap_info.file) {
struct md_bitmap_stats stats;
int err;
@@ -2761,7 +2840,7 @@ repeat:
/* If this is just a dirty<->clean transition, and the array is clean
* and 'events' is odd, we can roll back to the previous clean state */
if (nospares
- && (mddev->in_sync && mddev->recovery_cp == MaxSector)
+ && (mddev->in_sync && mddev->resync_offset == MaxSector)
&& mddev->can_decrease_events
&& mddev->events != 1) {
mddev->events--;
@@ -2794,24 +2873,24 @@ repeat:
mddev_add_trace_msg(mddev, "md md_update_sb");
rewrite:
- mddev->bitmap_ops->update_sb(mddev->bitmap);
+ if (md_bitmap_enabled(mddev, false))
+ mddev->bitmap_ops->update_sb(mddev->bitmap);
rdev_for_each(rdev, mddev) {
if (rdev->sb_loaded != 1)
continue; /* no noise on spare devices */
if (!test_bit(Faulty, &rdev->flags)) {
- md_super_write(mddev,rdev,
- rdev->sb_start, rdev->sb_size,
- rdev->sb_page);
+ md_write_metadata(mddev, rdev, rdev->sb_start,
+ rdev->sb_size, rdev->sb_page, 0);
pr_debug("md: (write) %pg's sb offset: %llu\n",
rdev->bdev,
(unsigned long long)rdev->sb_start);
rdev->sb_events = mddev->events;
if (rdev->badblocks.size) {
- md_super_write(mddev, rdev,
- rdev->badblocks.sector,
- rdev->badblocks.size << 9,
- rdev->bb_page);
+ md_write_metadata(mddev, rdev,
+ rdev->badblocks.sector,
+ rdev->badblocks.size << 9,
+ rdev->bb_page, 0);
rdev->badblocks.size = 0;
}
@@ -4140,6 +4219,86 @@ static struct md_sysfs_entry md_new_level =
__ATTR(new_level, 0664, new_level_show, new_level_store);
static ssize_t
+bitmap_type_show(struct mddev *mddev, char *page)
+{
+ struct md_submodule_head *head;
+ unsigned long i;
+ ssize_t len = 0;
+
+ if (mddev->bitmap_id == ID_BITMAP_NONE)
+ len += sprintf(page + len, "[none] ");
+ else
+ len += sprintf(page + len, "none ");
+
+ xa_lock(&md_submodule);
+ xa_for_each(&md_submodule, i, head) {
+ if (head->type != MD_BITMAP)
+ continue;
+
+ if (mddev->bitmap_id == head->id)
+ len += sprintf(page + len, "[%s] ", head->name);
+ else
+ len += sprintf(page + len, "%s ", head->name);
+ }
+ xa_unlock(&md_submodule);
+
+ len += sprintf(page + len, "\n");
+ return len;
+}
+
+static ssize_t
+bitmap_type_store(struct mddev *mddev, const char *buf, size_t len)
+{
+ struct md_submodule_head *head;
+ enum md_submodule_id id;
+ unsigned long i;
+ int err = 0;
+
+ xa_lock(&md_submodule);
+
+ if (mddev->bitmap_ops) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ if (cmd_match(buf, "none")) {
+ mddev->bitmap_id = ID_BITMAP_NONE;
+ goto out;
+ }
+
+ xa_for_each(&md_submodule, i, head) {
+ if (head->type == MD_BITMAP && cmd_match(buf, head->name)) {
+ mddev->bitmap_id = head->id;
+ goto out;
+ }
+ }
+
+ err = kstrtoint(buf, 10, &id);
+ if (err)
+ goto out;
+
+ if (id == ID_BITMAP_NONE) {
+ mddev->bitmap_id = id;
+ goto out;
+ }
+
+ head = xa_load(&md_submodule, id);
+ if (head && head->type == MD_BITMAP) {
+ mddev->bitmap_id = id;
+ goto out;
+ }
+
+ err = -ENOENT;
+
+out:
+ xa_unlock(&md_submodule);
+ return err ? err : len;
+}
+
+static struct md_sysfs_entry md_bitmap_type =
+__ATTR(bitmap_type, 0664, bitmap_type_show, bitmap_type_store);
+
+static ssize_t
layout_show(struct mddev *mddev, char *page)
{
/* just a number, not meaningful for all levels */
@@ -4297,9 +4456,9 @@ __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
static ssize_t
resync_start_show(struct mddev *mddev, char *page)
{
- if (mddev->recovery_cp == MaxSector)
+ if (mddev->resync_offset == MaxSector)
return sprintf(page, "none\n");
- return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
+ return sprintf(page, "%llu\n", (unsigned long long)mddev->resync_offset);
}
static ssize_t
@@ -4325,7 +4484,7 @@ resync_start_store(struct mddev *mddev, const char *buf, size_t len)
err = -EBUSY;
if (!err) {
- mddev->recovery_cp = n;
+ mddev->resync_offset = n;
if (mddev->pers)
set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
}
@@ -4670,6 +4829,9 @@ bitmap_store(struct mddev *mddev, const char *buf, size_t len)
unsigned long chunk, end_chunk;
int err;
+ if (!md_bitmap_enabled(mddev, false))
+ return len;
+
err = mddev_lock(mddev);
if (err)
return err;
@@ -4829,9 +4991,42 @@ out_unlock:
static struct md_sysfs_entry md_metadata =
__ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
+static bool rdev_needs_recovery(struct md_rdev *rdev, sector_t sectors)
+{
+ return rdev->raid_disk >= 0 &&
+ !test_bit(Journal, &rdev->flags) &&
+ !test_bit(Faulty, &rdev->flags) &&
+ !test_bit(In_sync, &rdev->flags) &&
+ rdev->recovery_offset < sectors;
+}
+
+static enum sync_action md_get_active_sync_action(struct mddev *mddev)
+{
+ struct md_rdev *rdev;
+ bool is_recover = false;
+
+ if (mddev->resync_offset < MaxSector)
+ return ACTION_RESYNC;
+
+ if (mddev->reshape_position != MaxSector)
+ return ACTION_RESHAPE;
+
+ rcu_read_lock();
+ rdev_for_each_rcu(rdev, mddev) {
+ if (rdev_needs_recovery(rdev, MaxSector)) {
+ is_recover = true;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return is_recover ? ACTION_RECOVER : ACTION_IDLE;
+}
+
enum sync_action md_sync_action(struct mddev *mddev)
{
unsigned long recovery = mddev->recovery;
+ enum sync_action active_action;
/*
* frozen has the highest priority, means running sync_thread will be
@@ -4855,8 +5050,17 @@ enum sync_action md_sync_action(struct mddev *mddev)
!test_bit(MD_RECOVERY_NEEDED, &recovery))
return ACTION_IDLE;
- if (test_bit(MD_RECOVERY_RESHAPE, &recovery) ||
- mddev->reshape_position != MaxSector)
+ /*
+ * Check if any sync operation (resync/recover/reshape) is
+ * currently active. This ensures that only one sync operation
+ * can run at a time. Returns the type of active operation, or
+ * ACTION_IDLE if none are active.
+ */
+ active_action = md_get_active_sync_action(mddev);
+ if (active_action != ACTION_IDLE)
+ return active_action;
+
+ if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
return ACTION_RESHAPE;
if (test_bit(MD_RECOVERY_RECOVER, &recovery))
@@ -5700,6 +5904,7 @@ __ATTR(serialize_policy, S_IRUGO | S_IWUSR, serialize_policy_show,
static struct attribute *md_default_attrs[] = {
&md_level.attr,
&md_new_level.attr,
+ &md_bitmap_type.attr,
&md_layout.attr,
&md_raid_disks.attr,
&md_uuid.attr,
@@ -5749,7 +5954,6 @@ static const struct attribute_group md_redundancy_group = {
static const struct attribute_group *md_attr_groups[] = {
&md_default_group,
- &md_bitmap_group,
NULL,
};
@@ -5812,6 +6016,13 @@ static void md_kobj_release(struct kobject *ko)
{
struct mddev *mddev = container_of(ko, struct mddev, kobj);
+ if (legacy_async_del_gendisk) {
+ if (mddev->sysfs_state)
+ sysfs_put(mddev->sysfs_state);
+ if (mddev->sysfs_level)
+ sysfs_put(mddev->sysfs_level);
+ del_gendisk(mddev->gendisk);
+ }
put_disk(mddev->gendisk);
}
@@ -6015,6 +6226,9 @@ static int md_alloc_and_put(dev_t dev, char *name)
{
struct mddev *mddev = md_alloc(dev, name);
+ if (legacy_async_del_gendisk)
+ pr_warn("md: async del_gendisk mode will be removed in future, please upgrade to mdadm-4.5+\n");
+
if (IS_ERR(mddev))
return PTR_ERR(mddev);
mddev_put(mddev);
@@ -6071,6 +6285,26 @@ static void md_safemode_timeout(struct timer_list *t)
static int start_dirty_degraded;
+static int md_bitmap_create(struct mddev *mddev)
+{
+ if (mddev->bitmap_id == ID_BITMAP_NONE)
+ return -EINVAL;
+
+ if (!mddev_set_bitmap_ops(mddev))
+ return -ENOENT;
+
+ return mddev->bitmap_ops->create(mddev);
+}
+
+static void md_bitmap_destroy(struct mddev *mddev)
+{
+ if (!md_bitmap_registered(mddev))
+ return;
+
+ mddev->bitmap_ops->destroy(mddev);
+ mddev_clear_bitmap_ops(mddev);
+}
+
int md_run(struct mddev *mddev)
{
int err;
@@ -6237,7 +6471,7 @@ int md_run(struct mddev *mddev)
}
if (err == 0 && pers->sync_request &&
(mddev->bitmap_info.file || mddev->bitmap_info.offset)) {
- err = mddev->bitmap_ops->create(mddev);
+ err = md_bitmap_create(mddev);
if (err)
pr_warn("%s: failed to create bitmap (%d)\n",
mdname(mddev), err);
@@ -6310,7 +6544,7 @@ bitmap_abort:
pers->free(mddev, mddev->private);
mddev->private = NULL;
put_pers(pers);
- mddev->bitmap_ops->destroy(mddev);
+ md_bitmap_destroy(mddev);
abort:
bioset_exit(&mddev->io_clone_set);
exit_sync_set:
@@ -6330,10 +6564,12 @@ int do_md_run(struct mddev *mddev)
if (err)
goto out;
- err = mddev->bitmap_ops->load(mddev);
- if (err) {
- mddev->bitmap_ops->destroy(mddev);
- goto out;
+ if (md_bitmap_registered(mddev)) {
+ err = mddev->bitmap_ops->load(mddev);
+ if (err) {
+ md_bitmap_destroy(mddev);
+ goto out;
+ }
}
if (mddev_is_clustered(mddev))
@@ -6417,7 +6653,7 @@ static void md_clean(struct mddev *mddev)
mddev->external_size = 0;
mddev->dev_sectors = 0;
mddev->raid_disks = 0;
- mddev->recovery_cp = 0;
+ mddev->resync_offset = 0;
mddev->resync_min = 0;
mddev->resync_max = MaxSector;
mddev->reshape_position = MaxSector;
@@ -6425,10 +6661,22 @@ static void md_clean(struct mddev *mddev)
mddev->persistent = 0;
mddev->level = LEVEL_NONE;
mddev->clevel[0] = 0;
- /* if UNTIL_STOP is set, it's cleared here */
- mddev->hold_active = 0;
- /* Don't clear MD_CLOSING, or mddev can be opened again. */
- mddev->flags &= BIT_ULL_MASK(MD_CLOSING);
+
+ /*
+ * For legacy_async_del_gendisk mode, it can stop the array in the
+ * middle of assembling it, then it still can access the array. So
+ * it needs to clear MD_CLOSING. If not legacy_async_del_gendisk,
+ * it can't open the array again after stopping it. So it doesn't
+ * clear MD_CLOSING.
+ */
+ if (legacy_async_del_gendisk && mddev->hold_active) {
+ clear_bit(MD_CLOSING, &mddev->flags);
+ } else {
+ /* if UNTIL_STOP is set, it's cleared here */
+ mddev->hold_active = 0;
+ /* Don't clear MD_CLOSING, or mddev can be opened again. */
+ mddev->flags &= BIT_ULL_MASK(MD_CLOSING);
+ }
mddev->sb_flags = 0;
mddev->ro = MD_RDWR;
mddev->metadata_type[0] = 0;
@@ -6472,7 +6720,8 @@ static void __md_stop_writes(struct mddev *mddev)
mddev->pers->quiesce(mddev, 0);
}
- mddev->bitmap_ops->flush(mddev);
+ if (md_bitmap_enabled(mddev, true))
+ mddev->bitmap_ops->flush(mddev);
if (md_is_rdwr(mddev) &&
((!mddev->in_sync && !mddev_is_clustered(mddev)) ||
@@ -6499,7 +6748,8 @@ EXPORT_SYMBOL_GPL(md_stop_writes);
static void mddev_detach(struct mddev *mddev)
{
- mddev->bitmap_ops->wait_behind_writes(mddev);
+ if (md_bitmap_enabled(mddev, false))
+ mddev->bitmap_ops->wait_behind_writes(mddev);
if (mddev->pers && mddev->pers->quiesce && !is_md_suspended(mddev)) {
mddev->pers->quiesce(mddev, 1);
mddev->pers->quiesce(mddev, 0);
@@ -6515,7 +6765,7 @@ static void __md_stop(struct mddev *mddev)
{
struct md_personality *pers = mddev->pers;
- mddev->bitmap_ops->destroy(mddev);
+ md_bitmap_destroy(mddev);
mddev_detach(mddev);
spin_lock(&mddev->lock);
mddev->pers = NULL;
@@ -6652,7 +6902,8 @@ static int do_md_stop(struct mddev *mddev, int mode)
export_array(mddev);
md_clean(mddev);
- set_bit(MD_DELETED, &mddev->flags);
+ if (!legacy_async_del_gendisk)
+ set_bit(MD_DELETED, &mddev->flags);
}
md_new_event();
sysfs_notify_dirent_safe(mddev->sysfs_state);
@@ -7232,6 +7483,9 @@ static int set_bitmap_file(struct mddev *mddev, int fd)
{
int err = 0;
+ if (!md_bitmap_registered(mddev))
+ return -EINVAL;
+
if (mddev->pers) {
if (!mddev->pers->quiesce || !mddev->thread)
return -EBUSY;
@@ -7288,16 +7542,16 @@ static int set_bitmap_file(struct mddev *mddev, int fd)
err = 0;
if (mddev->pers) {
if (fd >= 0) {
- err = mddev->bitmap_ops->create(mddev);
+ err = md_bitmap_create(mddev);
if (!err)
err = mddev->bitmap_ops->load(mddev);
if (err) {
- mddev->bitmap_ops->destroy(mddev);
+ md_bitmap_destroy(mddev);
fd = -1;
}
} else if (fd < 0) {
- mddev->bitmap_ops->destroy(mddev);
+ md_bitmap_destroy(mddev);
}
}
@@ -7362,9 +7616,9 @@ int md_set_array_info(struct mddev *mddev, struct mdu_array_info_s *info)
* openned
*/
if (info->state & (1<<MD_SB_CLEAN))
- mddev->recovery_cp = MaxSector;
+ mddev->resync_offset = MaxSector;
else
- mddev->recovery_cp = 0;
+ mddev->resync_offset = 0;
mddev->persistent = ! info->not_persistent;
mddev->external = 0;
@@ -7604,12 +7858,12 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
mddev->bitmap_info.default_offset;
mddev->bitmap_info.space =
mddev->bitmap_info.default_space;
- rv = mddev->bitmap_ops->create(mddev);
+ rv = md_bitmap_create(mddev);
if (!rv)
rv = mddev->bitmap_ops->load(mddev);
if (rv)
- mddev->bitmap_ops->destroy(mddev);
+ md_bitmap_destroy(mddev);
} else {
struct md_bitmap_stats stats;
@@ -7635,7 +7889,7 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
put_cluster_ops(mddev);
mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY;
}
- mddev->bitmap_ops->destroy(mddev);
+ md_bitmap_destroy(mddev);
mddev->bitmap_info.offset = 0;
}
}
@@ -7672,9 +7926,9 @@ static int set_disk_faulty(struct mddev *mddev, dev_t dev)
* 4 sectors (with a BIG number of cylinders...). This drives
* dosfs just mad... ;-)
*/
-static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+static int md_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
- struct mddev *mddev = bdev->bd_disk->private_data;
+ struct mddev *mddev = disk->private_data;
geo->heads = 2;
geo->sectors = 4;
@@ -8303,7 +8557,7 @@ static int status_resync(struct seq_file *seq, struct mddev *mddev)
seq_printf(seq, "\tresync=REMOTE");
return 1;
}
- if (mddev->recovery_cp < MaxSector) {
+ if (mddev->resync_offset < MaxSector) {
seq_printf(seq, "\tresync=PENDING");
return 1;
}
@@ -8416,6 +8670,9 @@ static void md_bitmap_status(struct seq_file *seq, struct mddev *mddev)
unsigned long chunk_kb;
int err;
+ if (!md_bitmap_enabled(mddev, false))
+ return;
+
err = mddev->bitmap_ops->get_stats(mddev->bitmap, &stats);
if (err)
return;
@@ -8798,18 +9055,24 @@ EXPORT_SYMBOL_GPL(md_submit_discard_bio);
static void md_bitmap_start(struct mddev *mddev,
struct md_io_clone *md_io_clone)
{
+ md_bitmap_fn *fn = unlikely(md_io_clone->rw == STAT_DISCARD) ?
+ mddev->bitmap_ops->start_discard :
+ mddev->bitmap_ops->start_write;
+
if (mddev->pers->bitmap_sector)
mddev->pers->bitmap_sector(mddev, &md_io_clone->offset,
&md_io_clone->sectors);
- mddev->bitmap_ops->start_write(mddev, md_io_clone->offset,
- md_io_clone->sectors);
+ fn(mddev, md_io_clone->offset, md_io_clone->sectors);
}
static void md_bitmap_end(struct mddev *mddev, struct md_io_clone *md_io_clone)
{
- mddev->bitmap_ops->end_write(mddev, md_io_clone->offset,
- md_io_clone->sectors);
+ md_bitmap_fn *fn = unlikely(md_io_clone->rw == STAT_DISCARD) ?
+ mddev->bitmap_ops->end_discard :
+ mddev->bitmap_ops->end_write;
+
+ fn(mddev, md_io_clone->offset, md_io_clone->sectors);
}
static void md_end_clone_io(struct bio *bio)
@@ -8818,7 +9081,7 @@ static void md_end_clone_io(struct bio *bio)
struct bio *orig_bio = md_io_clone->orig_bio;
struct mddev *mddev = md_io_clone->mddev;
- if (bio_data_dir(orig_bio) == WRITE && mddev->bitmap)
+ if (bio_data_dir(orig_bio) == WRITE && md_bitmap_enabled(mddev, false))
md_bitmap_end(mddev, md_io_clone);
if (bio->bi_status && !orig_bio->bi_status)
@@ -8845,9 +9108,10 @@ static void md_clone_bio(struct mddev *mddev, struct bio **bio)
if (blk_queue_io_stat(bdev->bd_disk->queue))
md_io_clone->start_time = bio_start_io_acct(*bio);
- if (bio_data_dir(*bio) == WRITE && mddev->bitmap) {
+ if (bio_data_dir(*bio) == WRITE && md_bitmap_enabled(mddev, false)) {
md_io_clone->offset = (*bio)->bi_iter.bi_sector;
md_io_clone->sectors = bio_sectors(*bio);
+ md_io_clone->rw = op_stat_group(bio_op(*bio));
md_bitmap_start(mddev, md_io_clone);
}
@@ -8869,7 +9133,7 @@ void md_free_cloned_bio(struct bio *bio)
struct bio *orig_bio = md_io_clone->orig_bio;
struct mddev *mddev = md_io_clone->mddev;
- if (bio_data_dir(orig_bio) == WRITE && mddev->bitmap)
+ if (bio_data_dir(orig_bio) == WRITE && md_bitmap_enabled(mddev, false))
md_bitmap_end(mddev, md_io_clone);
if (bio->bi_status && !orig_bio->bi_status)
@@ -8935,6 +9199,39 @@ static sector_t md_sync_max_sectors(struct mddev *mddev,
}
}
+/*
+ * If lazy recovery is requested and all rdevs are in sync, select the rdev with
+ * the higest index to perfore recovery to build initial xor data, this is the
+ * same as old bitmap.
+ */
+static bool mddev_select_lazy_recover_rdev(struct mddev *mddev)
+{
+ struct md_rdev *recover_rdev = NULL;
+ struct md_rdev *rdev;
+ bool ret = false;
+
+ rcu_read_lock();
+ rdev_for_each_rcu(rdev, mddev) {
+ if (rdev->raid_disk < 0)
+ continue;
+
+ if (test_bit(Faulty, &rdev->flags) ||
+ !test_bit(In_sync, &rdev->flags))
+ break;
+
+ if (!recover_rdev || recover_rdev->raid_disk < rdev->raid_disk)
+ recover_rdev = rdev;
+ }
+
+ if (recover_rdev) {
+ clear_bit(In_sync, &recover_rdev->flags);
+ ret = true;
+ }
+
+ rcu_read_unlock();
+ return ret;
+}
+
static sector_t md_sync_position(struct mddev *mddev, enum sync_action action)
{
sector_t start = 0;
@@ -8946,7 +9243,7 @@ static sector_t md_sync_position(struct mddev *mddev, enum sync_action action)
return mddev->resync_min;
case ACTION_RESYNC:
if (!mddev->bitmap)
- return mddev->recovery_cp;
+ return mddev->resync_offset;
return 0;
case ACTION_RESHAPE:
/*
@@ -8962,14 +9259,18 @@ static sector_t md_sync_position(struct mddev *mddev, enum sync_action action)
start = MaxSector;
rcu_read_lock();
rdev_for_each_rcu(rdev, mddev)
- if (rdev->raid_disk >= 0 &&
- !test_bit(Journal, &rdev->flags) &&
- !test_bit(Faulty, &rdev->flags) &&
- !test_bit(In_sync, &rdev->flags) &&
- rdev->recovery_offset < start)
+ if (rdev_needs_recovery(rdev, start))
start = rdev->recovery_offset;
rcu_read_unlock();
+ /*
+ * If there are no spares, and raid456 lazy initial recover is
+ * requested.
+ */
+ if (test_bit(MD_RECOVERY_LAZY_RECOVER, &mddev->recovery) &&
+ start == MaxSector && mddev_select_lazy_recover_rdev(mddev))
+ start = 0;
+
/* If there is a bitmap, we need to make sure all
* writes that started before we added a spare
* complete before we start doing a recovery.
@@ -8990,19 +9291,12 @@ static sector_t md_sync_position(struct mddev *mddev, enum sync_action action)
static bool sync_io_within_limit(struct mddev *mddev)
{
- int io_sectors;
-
/*
* For raid456, sync IO is stripe(4k) per IO, for other levels, it's
* RESYNC_PAGES(64k) per IO.
*/
- if (mddev->level == 4 || mddev->level == 5 || mddev->level == 6)
- io_sectors = 8;
- else
- io_sectors = 128;
-
return atomic_read(&mddev->recovery_active) <
- io_sectors * sync_io_depth(mddev);
+ (raid_is_456(mddev) ? 8 : 128) * sync_io_depth(mddev);
}
#define SYNC_MARKS 10
@@ -9054,6 +9348,11 @@ void md_do_sync(struct md_thread *thread)
}
action = md_sync_action(mddev);
+ if (action == ACTION_FROZEN || action == ACTION_IDLE) {
+ set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+ goto skip;
+ }
+
desc = md_sync_action_name(action);
mddev->last_sync_action = action;
@@ -9184,8 +9483,8 @@ void md_do_sync(struct md_thread *thread)
atomic_read(&mddev->recovery_active) == 0);
mddev->curr_resync_completed = j;
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
- j > mddev->recovery_cp)
- mddev->recovery_cp = j;
+ j > mddev->resync_offset)
+ mddev->resync_offset = j;
update_time = jiffies;
set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
sysfs_notify_dirent_safe(mddev->sysfs_completed);
@@ -9207,6 +9506,12 @@ void md_do_sync(struct md_thread *thread)
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
break;
+ if (mddev->bitmap_ops && mddev->bitmap_ops->skip_sync_blocks) {
+ sectors = mddev->bitmap_ops->skip_sync_blocks(mddev, j);
+ if (sectors)
+ goto update;
+ }
+
sectors = mddev->pers->sync_request(mddev, j, max_sectors,
&skipped);
if (sectors == 0) {
@@ -9222,6 +9527,7 @@ void md_do_sync(struct md_thread *thread)
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
break;
+update:
j += sectors;
if (j > max_sectors)
/* when skipping, extra large numbers can be returned. */
@@ -9305,19 +9611,19 @@ void md_do_sync(struct md_thread *thread)
mddev->curr_resync > MD_RESYNC_ACTIVE) {
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
- if (mddev->curr_resync >= mddev->recovery_cp) {
+ if (mddev->curr_resync >= mddev->resync_offset) {
pr_debug("md: checkpointing %s of %s.\n",
desc, mdname(mddev));
if (test_bit(MD_RECOVERY_ERROR,
&mddev->recovery))
- mddev->recovery_cp =
+ mddev->resync_offset =
mddev->curr_resync_completed;
else
- mddev->recovery_cp =
+ mddev->resync_offset =
mddev->curr_resync;
}
} else
- mddev->recovery_cp = MaxSector;
+ mddev->resync_offset = MaxSector;
} else {
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
mddev->curr_resync = MaxSector;
@@ -9325,12 +9631,8 @@ void md_do_sync(struct md_thread *thread)
test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) {
rcu_read_lock();
rdev_for_each_rcu(rdev, mddev)
- if (rdev->raid_disk >= 0 &&
- mddev->delta_disks >= 0 &&
- !test_bit(Journal, &rdev->flags) &&
- !test_bit(Faulty, &rdev->flags) &&
- !test_bit(In_sync, &rdev->flags) &&
- rdev->recovery_offset < mddev->curr_resync)
+ if (mddev->delta_disks >= 0 &&
+ rdev_needs_recovery(rdev, mddev->curr_resync))
rdev->recovery_offset = mddev->curr_resync;
rcu_read_unlock();
}
@@ -9421,6 +9723,12 @@ static bool rdev_is_spare(struct md_rdev *rdev)
static bool rdev_addable(struct md_rdev *rdev)
{
+ struct mddev *mddev;
+
+ mddev = READ_ONCE(rdev->mddev);
+ if (!mddev)
+ return false;
+
/* rdev is already used, don't add it again. */
if (test_bit(Candidate, &rdev->flags) || rdev->raid_disk >= 0 ||
test_bit(Faulty, &rdev->flags))
@@ -9431,7 +9739,7 @@ static bool rdev_addable(struct md_rdev *rdev)
return true;
/* Allow to add if array is read-write. */
- if (md_is_rdwr(rdev->mddev))
+ if (md_is_rdwr(mddev))
return true;
/*
@@ -9529,14 +9837,16 @@ static bool md_choose_sync_action(struct mddev *mddev, int *spares)
set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
+ clear_bit(MD_RECOVERY_LAZY_RECOVER, &mddev->recovery);
return true;
}
/* Check if resync is in progress. */
- if (mddev->recovery_cp < MaxSector) {
+ if (mddev->resync_offset < MaxSector) {
remove_spares(mddev, NULL);
set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
+ clear_bit(MD_RECOVERY_LAZY_RECOVER, &mddev->recovery);
return true;
}
@@ -9546,7 +9856,7 @@ static bool md_choose_sync_action(struct mddev *mddev, int *spares)
* re-add.
*/
*spares = remove_and_add_spares(mddev, NULL);
- if (*spares) {
+ if (*spares || test_bit(MD_RECOVERY_LAZY_RECOVER, &mddev->recovery)) {
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
@@ -9604,7 +9914,7 @@ static void md_start_sync(struct work_struct *ws)
* We are adding a device or devices to an array which has the bitmap
* stored on all devices. So make sure all bitmap pages get written.
*/
- if (spares)
+ if (spares && md_bitmap_enabled(mddev, true))
mddev->bitmap_ops->write_all(mddev);
name = test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) ?
@@ -9692,7 +10002,7 @@ static void unregister_sync_thread(struct mddev *mddev)
*/
void md_check_recovery(struct mddev *mddev)
{
- if (mddev->bitmap)
+ if (md_bitmap_enabled(mddev, false) && mddev->bitmap_ops->daemon_work)
mddev->bitmap_ops->daemon_work(mddev);
if (signal_pending(current)) {
@@ -9714,7 +10024,7 @@ void md_check_recovery(struct mddev *mddev)
test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
(mddev->external == 0 && mddev->safemode == 1) ||
(mddev->safemode == 2
- && !mddev->in_sync && mddev->recovery_cp == MaxSector)
+ && !mddev->in_sync && mddev->resync_offset == MaxSector)
))
return;
@@ -9759,6 +10069,7 @@ void md_check_recovery(struct mddev *mddev)
}
clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
+ clear_bit(MD_RECOVERY_LAZY_RECOVER, &mddev->recovery);
clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
@@ -9771,8 +10082,8 @@ void md_check_recovery(struct mddev *mddev)
* remove disk.
*/
rdev_for_each_safe(rdev, tmp, mddev) {
- if (test_and_clear_bit(ClusterRemove, &rdev->flags) &&
- rdev->raid_disk < 0)
+ if (rdev->raid_disk < 0 &&
+ test_and_clear_bit(ClusterRemove, &rdev->flags))
md_kick_rdev_from_array(rdev);
}
}
@@ -9869,6 +10180,7 @@ void md_reap_sync_thread(struct mddev *mddev)
clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
+ clear_bit(MD_RECOVERY_LAZY_RECOVER, &mddev->recovery);
/*
* We call mddev->cluster_ops->update_size here because sync_size could
* be changed by md_update_sb, and MD_RECOVERY_RESHAPE is cleared,
@@ -10016,8 +10328,16 @@ static void md_geninit(void)
static int __init md_init(void)
{
- int ret = -ENOMEM;
+ int ret = md_bitmap_init();
+ if (ret)
+ return ret;
+
+ ret = md_llbitmap_init();
+ if (ret)
+ goto err_bitmap;
+
+ ret = -ENOMEM;
md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0);
if (!md_wq)
goto err_wq;
@@ -10026,11 +10346,6 @@ static int __init md_init(void)
if (!md_misc_wq)
goto err_misc_wq;
- md_bitmap_wq = alloc_workqueue("md_bitmap", WQ_MEM_RECLAIM | WQ_UNBOUND,
- 0);
- if (!md_bitmap_wq)
- goto err_bitmap_wq;
-
ret = __register_blkdev(MD_MAJOR, "md", md_probe);
if (ret < 0)
goto err_md;
@@ -10049,12 +10364,13 @@ static int __init md_init(void)
err_mdp:
unregister_blkdev(MD_MAJOR, "md");
err_md:
- destroy_workqueue(md_bitmap_wq);
-err_bitmap_wq:
destroy_workqueue(md_misc_wq);
err_misc_wq:
destroy_workqueue(md_wq);
err_wq:
+ md_llbitmap_exit();
+err_bitmap:
+ md_bitmap_exit();
return ret;
}
@@ -10072,14 +10388,17 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
ret = mddev->pers->resize(mddev, le64_to_cpu(sb->size));
if (ret)
pr_info("md-cluster: resize failed\n");
- else
+ else if (md_bitmap_enabled(mddev, false))
mddev->bitmap_ops->update_sb(mddev->bitmap);
}
/* Check for change of roles in the active devices */
rdev_for_each_safe(rdev2, tmp, mddev) {
- if (test_bit(Faulty, &rdev2->flags))
+ if (test_bit(Faulty, &rdev2->flags)) {
+ if (test_bit(ClusterRemove, &rdev2->flags))
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
continue;
+ }
/* Check if the roles changed */
role = le16_to_cpu(sb->dev_roles[rdev2->desc_nr]);
@@ -10357,8 +10676,8 @@ static __exit void md_exit(void)
spin_unlock(&all_mddevs_lock);
destroy_workqueue(md_misc_wq);
- destroy_workqueue(md_bitmap_wq);
destroy_workqueue(md_wq);
+ md_bitmap_exit();
}
subsys_initcall(md_init);
@@ -10377,6 +10696,7 @@ module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR);
module_param(create_on_open, bool, S_IRUSR|S_IWUSR);
+module_param(legacy_async_del_gendisk, bool, 0600);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MD RAID framework");
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 67b365621507..1979c2d4fe89 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -26,7 +26,7 @@
enum md_submodule_type {
MD_PERSONALITY = 0,
MD_CLUSTER,
- MD_BITMAP, /* TODO */
+ MD_BITMAP,
};
enum md_submodule_id {
@@ -38,8 +38,9 @@ enum md_submodule_id {
ID_RAID6 = 6,
ID_RAID10 = 10,
ID_CLUSTER,
- ID_BITMAP, /* TODO */
- ID_LLBITMAP, /* TODO */
+ ID_BITMAP,
+ ID_LLBITMAP,
+ ID_BITMAP_NONE,
};
struct md_submodule_head {
@@ -523,7 +524,7 @@ struct mddev {
unsigned long normal_io_events; /* IO event timestamp */
atomic_t recovery_active; /* blocks scheduled, but not written */
wait_queue_head_t recovery_wait;
- sector_t recovery_cp;
+ sector_t resync_offset;
sector_t resync_min; /* user requested sync
* starts here */
sector_t resync_max; /* resync should pause
@@ -565,6 +566,7 @@ struct mddev {
struct percpu_ref writes_pending;
int sync_checkers; /* # of threads checking writes_pending */
+ enum md_submodule_id bitmap_id;
void *bitmap; /* the bitmap for the device */
struct bitmap_operations *bitmap_ops;
struct {
@@ -665,6 +667,8 @@ enum recovery_flags {
MD_RECOVERY_RESHAPE,
/* remote node is running resync thread */
MD_RESYNCING_REMOTE,
+ /* raid456 lazy initial recover */
+ MD_RECOVERY_LAZY_RECOVER,
};
enum md_ro_state {
@@ -796,7 +800,6 @@ struct md_sysfs_entry {
ssize_t (*show)(struct mddev *, char *);
ssize_t (*store)(struct mddev *, const char *, size_t);
};
-extern const struct attribute_group md_bitmap_group;
static inline struct kernfs_node *sysfs_get_dirent_safe(struct kernfs_node *sd, char *name)
{
@@ -873,6 +876,7 @@ struct md_io_clone {
unsigned long start_time;
sector_t offset;
unsigned long sectors;
+ enum stat_group rw;
struct bio bio_clone;
};
@@ -909,8 +913,9 @@ void md_account_bio(struct mddev *mddev, struct bio **bio);
void md_free_cloned_bio(struct bio *bio);
extern bool __must_check md_flush_request(struct mddev *mddev, struct bio *bio);
-extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
- sector_t sector, int size, struct page *page);
+void md_write_metadata(struct mddev *mddev, struct md_rdev *rdev,
+ sector_t sector, int size, struct page *page,
+ unsigned int offset);
extern int md_super_wait(struct mddev *mddev);
extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
struct page *page, blk_opf_t opf, bool metadata_op);
@@ -1013,7 +1018,6 @@ struct mdu_array_info_s;
struct mdu_disk_info_s;
extern int mdp_major;
-extern struct workqueue_struct *md_bitmap_wq;
void md_autostart_arrays(int part);
int md_set_array_info(struct mddev *mddev, struct mdu_array_info_s *info);
int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info);
@@ -1034,6 +1038,12 @@ static inline bool mddev_is_dm(struct mddev *mddev)
return !mddev->gendisk;
}
+static inline bool raid_is_456(struct mddev *mddev)
+{
+ return mddev->level == ID_RAID4 || mddev->level == ID_RAID5 ||
+ mddev->level == ID_RAID6;
+}
+
static inline void mddev_trace_remap(struct mddev *mddev, struct bio *bio,
sector_t sector)
{
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index cbe2a9054cb9..e443e478645a 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -382,6 +382,7 @@ static int raid0_set_limits(struct mddev *mddev)
md_init_stacking_limits(&lim);
lim.max_hw_sectors = mddev->chunk_sectors;
lim.max_write_zeroes_sectors = mddev->chunk_sectors;
+ lim.max_hw_wzeroes_unmap_sectors = mddev->chunk_sectors;
lim.io_min = mddev->chunk_sectors << 9;
lim.io_opt = lim.io_min * mddev->raid_disks;
lim.chunk_sectors = mddev->chunk_sectors;
@@ -463,21 +464,16 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
zone = find_zone(conf, &start);
if (bio_end_sector(bio) > zone->zone_end) {
- struct bio *split = bio_split(bio,
- zone->zone_end - bio->bi_iter.bi_sector, GFP_NOIO,
- &mddev->bio_set);
-
- if (IS_ERR(split)) {
- bio->bi_status = errno_to_blk_status(PTR_ERR(split));
- bio_endio(bio);
+ bio = bio_submit_split_bioset(bio,
+ zone->zone_end - bio->bi_iter.bi_sector,
+ &mddev->bio_set);
+ if (!bio)
return;
- }
- bio_chain(split, bio);
- submit_bio_noacct(bio);
- bio = split;
+
end = zone->zone_end;
- } else
+ } else {
end = bio_end_sector(bio);
+ }
orig_end = end;
if (zone != conf->strip_zone)
@@ -612,17 +608,10 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
: sector_div(sector, chunk_sects));
if (sectors < bio_sectors(bio)) {
- struct bio *split = bio_split(bio, sectors, GFP_NOIO,
+ bio = bio_submit_split_bioset(bio, sectors,
&mddev->bio_set);
-
- if (IS_ERR(split)) {
- bio->bi_status = errno_to_blk_status(PTR_ERR(split));
- bio_endio(bio);
+ if (!bio)
return true;
- }
- bio_chain(split, bio);
- raid0_map_submit_bio(mddev, bio);
- bio = split;
}
raid0_map_submit_bio(mddev, bio);
@@ -674,7 +663,7 @@ static void *raid0_takeover_raid45(struct mddev *mddev)
mddev->raid_disks--;
mddev->delta_disks = -1;
/* make sure it will be not marked as dirty */
- mddev->recovery_cp = MaxSector;
+ mddev->resync_offset = MaxSector;
mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
create_strip_zones(mddev, &priv_conf);
@@ -717,7 +706,7 @@ static void *raid0_takeover_raid10(struct mddev *mddev)
mddev->raid_disks += mddev->delta_disks;
mddev->degraded = 0;
/* make sure it will be not marked as dirty */
- mddev->recovery_cp = MaxSector;
+ mddev->resync_offset = MaxSector;
mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
create_strip_zones(mddev, &priv_conf);
@@ -760,7 +749,7 @@ static void *raid0_takeover_raid1(struct mddev *mddev)
mddev->delta_disks = 1 - mddev->raid_disks;
mddev->raid_disks = 1;
/* make sure it will be not marked as dirty */
- mddev->recovery_cp = MaxSector;
+ mddev->resync_offset = MaxSector;
mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
create_strip_zones(mddev, &priv_conf);
diff --git a/drivers/md/raid1-10.c b/drivers/md/raid1-10.c
index b8b3a9069701..521625756128 100644
--- a/drivers/md/raid1-10.c
+++ b/drivers/md/raid1-10.c
@@ -140,7 +140,7 @@ static inline bool raid1_add_bio_to_plug(struct mddev *mddev, struct bio *bio,
* If bitmap is not enabled, it's safe to submit the io directly, and
* this can get optimal performance.
*/
- if (!mddev->bitmap_ops->enabled(mddev)) {
+ if (!md_bitmap_enabled(mddev, true)) {
raid1_submit_write(bio);
return true;
}
@@ -283,7 +283,7 @@ static inline int raid1_check_read_range(struct md_rdev *rdev,
static inline bool raid1_should_read_first(struct mddev *mddev,
sector_t this_sector, int len)
{
- if ((mddev->recovery_cp < this_sector + len))
+ if ((mddev->resync_offset < this_sector + len))
return true;
if (mddev_is_clustered(mddev) &&
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 64b8176907a9..592a40233004 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -127,10 +127,9 @@ static inline struct r1bio *get_resync_r1bio(struct bio *bio)
return get_resync_pages(bio)->raid_bio;
}
-static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
+static void *r1bio_pool_alloc(gfp_t gfp_flags, struct r1conf *conf)
{
- struct pool_info *pi = data;
- int size = offsetof(struct r1bio, bios[pi->raid_disks]);
+ int size = offsetof(struct r1bio, bios[conf->raid_disks * 2]);
/* allocate a r1bio with room for raid_disks entries in the bios array */
return kzalloc(size, gfp_flags);
@@ -145,18 +144,18 @@ static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
{
- struct pool_info *pi = data;
+ struct r1conf *conf = data;
struct r1bio *r1_bio;
struct bio *bio;
int need_pages;
int j;
struct resync_pages *rps;
- r1_bio = r1bio_pool_alloc(gfp_flags, pi);
+ r1_bio = r1bio_pool_alloc(gfp_flags, conf);
if (!r1_bio)
return NULL;
- rps = kmalloc_array(pi->raid_disks, sizeof(struct resync_pages),
+ rps = kmalloc_array(conf->raid_disks * 2, sizeof(struct resync_pages),
gfp_flags);
if (!rps)
goto out_free_r1bio;
@@ -164,11 +163,11 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
/*
* Allocate bios : 1 for reading, n-1 for writing
*/
- for (j = pi->raid_disks ; j-- ; ) {
+ for (j = conf->raid_disks * 2; j-- ; ) {
bio = bio_kmalloc(RESYNC_PAGES, gfp_flags);
if (!bio)
goto out_free_bio;
- bio_init(bio, NULL, bio->bi_inline_vecs, RESYNC_PAGES, 0);
+ bio_init_inline(bio, NULL, RESYNC_PAGES, 0);
r1_bio->bios[j] = bio;
}
/*
@@ -177,11 +176,11 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
* If this is a user-requested check/repair, allocate
* RESYNC_PAGES for each bio.
*/
- if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery))
- need_pages = pi->raid_disks;
+ if (test_bit(MD_RECOVERY_REQUESTED, &conf->mddev->recovery))
+ need_pages = conf->raid_disks * 2;
else
need_pages = 1;
- for (j = 0; j < pi->raid_disks; j++) {
+ for (j = 0; j < conf->raid_disks * 2; j++) {
struct resync_pages *rp = &rps[j];
bio = r1_bio->bios[j];
@@ -207,7 +206,7 @@ out_free_pages:
resync_free_pages(&rps[j]);
out_free_bio:
- while (++j < pi->raid_disks) {
+ while (++j < conf->raid_disks * 2) {
bio_uninit(r1_bio->bios[j]);
kfree(r1_bio->bios[j]);
}
@@ -220,12 +219,12 @@ out_free_r1bio:
static void r1buf_pool_free(void *__r1_bio, void *data)
{
- struct pool_info *pi = data;
+ struct r1conf *conf = data;
int i;
struct r1bio *r1bio = __r1_bio;
struct resync_pages *rp = NULL;
- for (i = pi->raid_disks; i--; ) {
+ for (i = conf->raid_disks * 2; i--; ) {
rp = get_resync_pages(r1bio->bios[i]);
resync_free_pages(rp);
bio_uninit(r1bio->bios[i]);
@@ -255,7 +254,7 @@ static void free_r1bio(struct r1bio *r1_bio)
struct r1conf *conf = r1_bio->mddev->private;
put_all_bios(conf, r1_bio);
- mempool_free(r1_bio, &conf->r1bio_pool);
+ mempool_free(r1_bio, conf->r1bio_pool);
}
static void put_buf(struct r1bio *r1_bio)
@@ -1226,7 +1225,7 @@ static void alloc_behind_master_bio(struct r1bio *r1_bio,
int i = 0;
struct bio *behind_bio = NULL;
- behind_bio = bio_alloc_bioset(NULL, vcnt, 0, GFP_NOIO,
+ behind_bio = bio_alloc_bioset(NULL, vcnt, bio->bi_opf, GFP_NOIO,
&r1_bio->mddev->bio_set);
/* discard op, we don't support writezero/writesame yet */
@@ -1305,9 +1304,8 @@ alloc_r1bio(struct mddev *mddev, struct bio *bio)
struct r1conf *conf = mddev->private;
struct r1bio *r1_bio;
- r1_bio = mempool_alloc(&conf->r1bio_pool, GFP_NOIO);
- /* Ensure no bio records IO_BLOCKED */
- memset(r1_bio->bios, 0, conf->raid_disks * sizeof(r1_bio->bios[0]));
+ r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
+ memset(r1_bio, 0, offsetof(struct r1bio, bios[conf->raid_disks * 2]));
init_r1bio(r1_bio, mddev, bio);
return r1_bio;
}
@@ -1319,7 +1317,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
struct raid1_info *mirror;
struct bio *read_bio;
int max_sectors;
- int rdisk, error;
+ int rdisk;
bool r1bio_existed = !!r1_bio;
/*
@@ -1368,7 +1366,8 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
(unsigned long long)r1_bio->sector,
mirror->rdev->bdev);
- if (test_bit(WriteMostly, &mirror->rdev->flags)) {
+ if (test_bit(WriteMostly, &mirror->rdev->flags) &&
+ md_bitmap_enabled(mddev, false)) {
/*
* Reading from a write-mostly device must take care not to
* over-take any writes that are 'behind'
@@ -1378,16 +1377,13 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
}
if (max_sectors < bio_sectors(bio)) {
- struct bio *split = bio_split(bio, max_sectors,
- gfp, &conf->bio_split);
-
- if (IS_ERR(split)) {
- error = PTR_ERR(split);
+ bio = bio_submit_split_bioset(bio, max_sectors,
+ &conf->bio_split);
+ if (!bio) {
+ set_bit(R1BIO_Returned, &r1_bio->state);
goto err_handle;
}
- bio_chain(split, bio);
- submit_bio_noacct(bio);
- bio = split;
+
r1_bio->master_bio = bio;
r1_bio->sectors = max_sectors;
}
@@ -1415,8 +1411,6 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
err_handle:
atomic_dec(&mirror->rdev->nr_pending);
- bio->bi_status = errno_to_blk_status(error);
- set_bit(R1BIO_Uptodate, &r1_bio->state);
raid_end_bio_io(r1_bio);
}
@@ -1454,12 +1448,36 @@ retry:
return true;
}
+static void raid1_start_write_behind(struct mddev *mddev, struct r1bio *r1_bio,
+ struct bio *bio)
+{
+ unsigned long max_write_behind = mddev->bitmap_info.max_write_behind;
+ struct md_bitmap_stats stats;
+ int err;
+
+ /* behind write rely on bitmap, see bitmap_operations */
+ if (!md_bitmap_enabled(mddev, false))
+ return;
+
+ err = mddev->bitmap_ops->get_stats(mddev->bitmap, &stats);
+ if (err)
+ return;
+
+ /* Don't do behind IO if reader is waiting, or there are too many. */
+ if (!stats.behind_wait && stats.behind_writes < max_write_behind)
+ alloc_behind_master_bio(r1_bio, bio);
+
+ if (test_bit(R1BIO_BehindIO, &r1_bio->state))
+ mddev->bitmap_ops->start_behind_write(mddev);
+
+}
+
static void raid1_write_request(struct mddev *mddev, struct bio *bio,
int max_write_sectors)
{
struct r1conf *conf = mddev->private;
struct r1bio *r1_bio;
- int i, disks, k, error;
+ int i, disks, k;
unsigned long flags;
int first_clone;
int max_sectors;
@@ -1563,10 +1581,8 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
* complexity of supporting that is not worth
* the benefit.
*/
- if (bio->bi_opf & REQ_ATOMIC) {
- error = -EIO;
+ if (bio->bi_opf & REQ_ATOMIC)
goto err_handle;
- }
good_sectors = first_bad - r1_bio->sector;
if (good_sectors < max_sectors)
@@ -1586,16 +1602,13 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
max_sectors = min_t(int, max_sectors,
BIO_MAX_VECS * (PAGE_SIZE >> 9));
if (max_sectors < bio_sectors(bio)) {
- struct bio *split = bio_split(bio, max_sectors,
- GFP_NOIO, &conf->bio_split);
-
- if (IS_ERR(split)) {
- error = PTR_ERR(split);
+ bio = bio_submit_split_bioset(bio, max_sectors,
+ &conf->bio_split);
+ if (!bio) {
+ set_bit(R1BIO_Returned, &r1_bio->state);
goto err_handle;
}
- bio_chain(split, bio);
- submit_bio_noacct(bio);
- bio = split;
+
r1_bio->master_bio = bio;
r1_bio->sectors = max_sectors;
}
@@ -1614,22 +1627,8 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
continue;
if (first_clone) {
- unsigned long max_write_behind =
- mddev->bitmap_info.max_write_behind;
- struct md_bitmap_stats stats;
- int err;
-
- /* do behind I/O ?
- * Not if there are too many, or cannot
- * allocate memory, or a reader on WriteMostly
- * is waiting for behind writes to flush */
- err = mddev->bitmap_ops->get_stats(mddev->bitmap, &stats);
- if (!err && write_behind && !stats.behind_wait &&
- stats.behind_writes < max_write_behind)
- alloc_behind_master_bio(r1_bio, bio);
-
- if (test_bit(R1BIO_BehindIO, &r1_bio->state))
- mddev->bitmap_ops->start_behind_write(mddev);
+ if (write_behind)
+ raid1_start_write_behind(mddev, r1_bio, bio);
first_clone = 0;
}
@@ -1685,8 +1684,6 @@ err_handle:
}
}
- bio->bi_status = errno_to_blk_status(error);
- set_bit(R1BIO_Uptodate, &r1_bio->state);
raid_end_bio_io(r1_bio);
}
@@ -2059,7 +2056,7 @@ static void abort_sync_write(struct mddev *mddev, struct r1bio *r1_bio)
/* make sure these bits don't get cleared. */
do {
- mddev->bitmap_ops->end_sync(mddev, s, &sync_blocks);
+ md_bitmap_end_sync(mddev, s, &sync_blocks);
s += sync_blocks;
sectors_to_go -= sync_blocks;
} while (sectors_to_go > 0);
@@ -2747,7 +2744,7 @@ static int init_resync(struct r1conf *conf)
BUG_ON(mempool_initialized(&conf->r1buf_pool));
return mempool_init(&conf->r1buf_pool, buffs, r1buf_pool_alloc,
- r1buf_pool_free, conf->poolinfo);
+ r1buf_pool_free, conf);
}
static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf)
@@ -2757,7 +2754,7 @@ static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf)
struct bio *bio;
int i;
- for (i = conf->poolinfo->raid_disks; i--; ) {
+ for (i = conf->raid_disks * 2; i--; ) {
bio = r1bio->bios[i];
rps = bio->bi_private;
bio_reset(bio, NULL, 0);
@@ -2806,12 +2803,13 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
* We can find the current addess in mddev->curr_resync
*/
if (mddev->curr_resync < max_sector) /* aborted */
- mddev->bitmap_ops->end_sync(mddev, mddev->curr_resync,
- &sync_blocks);
+ md_bitmap_end_sync(mddev, mddev->curr_resync,
+ &sync_blocks);
else /* completed sync */
conf->fullsync = 0;
- mddev->bitmap_ops->close_sync(mddev);
+ if (md_bitmap_enabled(mddev, false))
+ mddev->bitmap_ops->close_sync(mddev);
close_sync(conf);
if (mddev_is_clustered(mddev)) {
@@ -2822,7 +2820,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
}
if (mddev->bitmap == NULL &&
- mddev->recovery_cp == MaxSector &&
+ mddev->resync_offset == MaxSector &&
!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
conf->fullsync == 0) {
*skipped = 1;
@@ -2831,7 +2829,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
/* before building a request, check if we can skip these blocks..
* This call the bitmap_start_sync doesn't actually record anything
*/
- if (!mddev->bitmap_ops->start_sync(mddev, sector_nr, &sync_blocks, true) &&
+ if (!md_bitmap_start_sync(mddev, sector_nr, &sync_blocks, true) &&
!conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
/* We can skip this block, and probably several more */
*skipped = 1;
@@ -2848,10 +2846,11 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
/* we are incrementing sector_nr below. To be safe, we check against
* sector_nr + two times RESYNC_SECTORS
*/
-
- mddev->bitmap_ops->cond_end_sync(mddev, sector_nr,
- mddev_is_clustered(mddev) &&
- (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
+ if (md_bitmap_enabled(mddev, false))
+ mddev->bitmap_ops->cond_end_sync(mddev, sector_nr,
+ mddev_is_clustered(mddev) &&
+ (sector_nr + 2 * RESYNC_SECTORS >
+ conf->cluster_sync_high));
if (raise_barrier(conf, sector_nr))
return 0;
@@ -3006,8 +3005,8 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
if (len == 0)
break;
if (sync_blocks == 0) {
- if (!mddev->bitmap_ops->start_sync(mddev, sector_nr,
- &sync_blocks, still_degraded) &&
+ if (!md_bitmap_start_sync(mddev, sector_nr,
+ &sync_blocks, still_degraded) &&
!conf->fullsync &&
!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
break;
@@ -3085,6 +3084,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
int i;
struct raid1_info *disk;
struct md_rdev *rdev;
+ size_t r1bio_size;
int err = -ENOMEM;
conf = kzalloc(sizeof(struct r1conf), GFP_KERNEL);
@@ -3121,21 +3121,15 @@ static struct r1conf *setup_conf(struct mddev *mddev)
if (!conf->tmppage)
goto abort;
- conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
- if (!conf->poolinfo)
- goto abort;
- conf->poolinfo->raid_disks = mddev->raid_disks * 2;
- err = mempool_init(&conf->r1bio_pool, NR_RAID_BIOS, r1bio_pool_alloc,
- rbio_pool_free, conf->poolinfo);
- if (err)
+ r1bio_size = offsetof(struct r1bio, bios[mddev->raid_disks * 2]);
+ conf->r1bio_pool = mempool_create_kmalloc_pool(NR_RAID_BIOS, r1bio_size);
+ if (!conf->r1bio_pool)
goto abort;
err = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0);
if (err)
goto abort;
- conf->poolinfo->mddev = mddev;
-
err = -EINVAL;
spin_lock_init(&conf->device_lock);
conf->raid_disks = mddev->raid_disks;
@@ -3198,10 +3192,9 @@ static struct r1conf *setup_conf(struct mddev *mddev)
abort:
if (conf) {
- mempool_exit(&conf->r1bio_pool);
+ mempool_destroy(conf->r1bio_pool);
kfree(conf->mirrors);
safe_put_page(conf->tmppage);
- kfree(conf->poolinfo);
kfree(conf->nr_pending);
kfree(conf->nr_waiting);
kfree(conf->nr_queued);
@@ -3219,6 +3212,7 @@ static int raid1_set_limits(struct mddev *mddev)
md_init_stacking_limits(&lim);
lim.max_write_zeroes_sectors = 0;
+ lim.max_hw_wzeroes_unmap_sectors = 0;
lim.features |= BLK_FEAT_ATOMIC_WRITES;
err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
if (err)
@@ -3282,9 +3276,9 @@ static int raid1_run(struct mddev *mddev)
}
if (conf->raid_disks - mddev->degraded == 1)
- mddev->recovery_cp = MaxSector;
+ mddev->resync_offset = MaxSector;
- if (mddev->recovery_cp != MaxSector)
+ if (mddev->resync_offset != MaxSector)
pr_info("md/raid1:%s: not clean -- starting background reconstruction\n",
mdname(mddev));
pr_info("md/raid1:%s: active with %d out of %d mirrors\n",
@@ -3311,10 +3305,9 @@ static void raid1_free(struct mddev *mddev, void *priv)
{
struct r1conf *conf = priv;
- mempool_exit(&conf->r1bio_pool);
+ mempool_destroy(conf->r1bio_pool);
kfree(conf->mirrors);
safe_put_page(conf->tmppage);
- kfree(conf->poolinfo);
kfree(conf->nr_pending);
kfree(conf->nr_waiting);
kfree(conf->nr_queued);
@@ -3333,20 +3326,22 @@ static int raid1_resize(struct mddev *mddev, sector_t sectors)
* worth it.
*/
sector_t newsize = raid1_size(mddev, sectors, 0);
- int ret;
if (mddev->external_size &&
mddev->array_sectors > newsize)
return -EINVAL;
- ret = mddev->bitmap_ops->resize(mddev, newsize, 0, false);
- if (ret)
- return ret;
+ if (md_bitmap_enabled(mddev, false)) {
+ int ret = mddev->bitmap_ops->resize(mddev, newsize, 0);
+
+ if (ret)
+ return ret;
+ }
md_set_array_sectors(mddev, newsize);
if (sectors > mddev->dev_sectors &&
- mddev->recovery_cp > mddev->dev_sectors) {
- mddev->recovery_cp = mddev->dev_sectors;
+ mddev->resync_offset > mddev->dev_sectors) {
+ mddev->resync_offset = mddev->dev_sectors;
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
}
mddev->dev_sectors = sectors;
@@ -3367,17 +3362,13 @@ static int raid1_reshape(struct mddev *mddev)
* At the same time, we "pack" the devices so that all the missing
* devices have the higher raid_disk numbers.
*/
- mempool_t newpool, oldpool;
- struct pool_info *newpoolinfo;
+ mempool_t *newpool, *oldpool;
+ size_t new_r1bio_size;
struct raid1_info *newmirrors;
struct r1conf *conf = mddev->private;
int cnt, raid_disks;
unsigned long flags;
int d, d2;
- int ret;
-
- memset(&newpool, 0, sizeof(newpool));
- memset(&oldpool, 0, sizeof(oldpool));
/* Cannot change chunk_size, layout, or level */
if (mddev->chunk_sectors != mddev->new_chunk_sectors ||
@@ -3403,24 +3394,16 @@ static int raid1_reshape(struct mddev *mddev)
return -EBUSY;
}
- newpoolinfo = kmalloc(sizeof(*newpoolinfo), GFP_KERNEL);
- if (!newpoolinfo)
+ new_r1bio_size = offsetof(struct r1bio, bios[raid_disks * 2]);
+ newpool = mempool_create_kmalloc_pool(NR_RAID_BIOS, new_r1bio_size);
+ if (!newpool) {
return -ENOMEM;
- newpoolinfo->mddev = mddev;
- newpoolinfo->raid_disks = raid_disks * 2;
-
- ret = mempool_init(&newpool, NR_RAID_BIOS, r1bio_pool_alloc,
- rbio_pool_free, newpoolinfo);
- if (ret) {
- kfree(newpoolinfo);
- return ret;
}
newmirrors = kzalloc(array3_size(sizeof(struct raid1_info),
raid_disks, 2),
GFP_KERNEL);
if (!newmirrors) {
- kfree(newpoolinfo);
- mempool_exit(&newpool);
+ mempool_destroy(newpool);
return -ENOMEM;
}
@@ -3429,7 +3412,6 @@ static int raid1_reshape(struct mddev *mddev)
/* ok, everything is stopped */
oldpool = conf->r1bio_pool;
conf->r1bio_pool = newpool;
- init_waitqueue_head(&conf->r1bio_pool.wait);
for (d = d2 = 0; d < conf->raid_disks; d++) {
struct md_rdev *rdev = conf->mirrors[d].rdev;
@@ -3446,8 +3428,6 @@ static int raid1_reshape(struct mddev *mddev)
}
kfree(conf->mirrors);
conf->mirrors = newmirrors;
- kfree(conf->poolinfo);
- conf->poolinfo = newpoolinfo;
spin_lock_irqsave(&conf->device_lock, flags);
mddev->degraded += (raid_disks - conf->raid_disks);
@@ -3461,7 +3441,7 @@ static int raid1_reshape(struct mddev *mddev)
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
- mempool_exit(&oldpool);
+ mempool_destroy(oldpool);
return 0;
}
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
index 33f318fcc268..2ebe35aaa534 100644
--- a/drivers/md/raid1.h
+++ b/drivers/md/raid1.h
@@ -49,22 +49,6 @@ struct raid1_info {
sector_t seq_start;
};
-/*
- * memory pools need a pointer to the mddev, so they can force an unplug
- * when memory is tight, and a count of the number of drives that the
- * pool was allocated for, so they know how much to allocate and free.
- * mddev->raid_disks cannot be used, as it can change while a pool is active
- * These two datums are stored in a kmalloced struct.
- * The 'raid_disks' here is twice the raid_disks in r1conf.
- * This allows space for each 'real' device can have a replacement in the
- * second half of the array.
- */
-
-struct pool_info {
- struct mddev *mddev;
- int raid_disks;
-};
-
struct r1conf {
struct mddev *mddev;
struct raid1_info *mirrors; /* twice 'raid_disks' to
@@ -114,11 +98,7 @@ struct r1conf {
*/
int recovery_disabled;
- /* poolinfo contains information about the content of the
- * mempools - it changes when the array grows or shrinks
- */
- struct pool_info *poolinfo;
- mempool_t r1bio_pool;
+ mempool_t *r1bio_pool;
mempool_t r1buf_pool;
struct bio_set bio_split;
@@ -198,7 +178,9 @@ enum r1bio_state {
* any write was successful. Otherwise we call when
* any write-behind write succeeds, otherwise we call
* with failure when last write completes (and all failed).
- * Record that bi_end_io was called with this flag...
+ *
+ * And for bio_split errors, record that bi_end_io was called
+ * with this flag...
*/
R1BIO_Returned,
/* If a write for this request means we can clear some
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 95dc354a86a0..14dcd5142eb4 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -163,14 +163,14 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
bio = bio_kmalloc(RESYNC_PAGES, gfp_flags);
if (!bio)
goto out_free_bio;
- bio_init(bio, NULL, bio->bi_inline_vecs, RESYNC_PAGES, 0);
+ bio_init_inline(bio, NULL, RESYNC_PAGES, 0);
r10_bio->devs[j].bio = bio;
if (!conf->have_replacement)
continue;
bio = bio_kmalloc(RESYNC_PAGES, gfp_flags);
if (!bio)
goto out_free_bio;
- bio_init(bio, NULL, bio->bi_inline_vecs, RESYNC_PAGES, 0);
+ bio_init_inline(bio, NULL, RESYNC_PAGES, 0);
r10_bio->devs[j].repl_bio = bio;
}
/*
@@ -322,10 +322,12 @@ static void raid_end_bio_io(struct r10bio *r10_bio)
struct bio *bio = r10_bio->master_bio;
struct r10conf *conf = r10_bio->mddev->private;
- if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
- bio->bi_status = BLK_STS_IOERR;
+ if (!test_and_set_bit(R10BIO_Returned, &r10_bio->state)) {
+ if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
+ bio->bi_status = BLK_STS_IOERR;
+ bio_endio(bio);
+ }
- bio_endio(bio);
/*
* Wake up any possible resync thread that waits for the device
* to go idle.
@@ -1154,7 +1156,6 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
int slot = r10_bio->read_slot;
struct md_rdev *err_rdev = NULL;
gfp_t gfp = GFP_NOIO;
- int error;
if (slot >= 0 && r10_bio->devs[slot].rdev) {
/*
@@ -1203,17 +1204,15 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
rdev->bdev,
(unsigned long long)r10_bio->sector);
if (max_sectors < bio_sectors(bio)) {
- struct bio *split = bio_split(bio, max_sectors,
- gfp, &conf->bio_split);
- if (IS_ERR(split)) {
- error = PTR_ERR(split);
- goto err_handle;
- }
- bio_chain(split, bio);
allow_barrier(conf);
- submit_bio_noacct(bio);
+ bio = bio_submit_split_bioset(bio, max_sectors,
+ &conf->bio_split);
wait_barrier(conf, false);
- bio = split;
+ if (!bio) {
+ set_bit(R10BIO_Returned, &r10_bio->state);
+ goto err_handle;
+ }
+
r10_bio->master_bio = bio;
r10_bio->sectors = max_sectors;
}
@@ -1241,8 +1240,6 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
return;
err_handle:
atomic_dec(&rdev->nr_pending);
- bio->bi_status = errno_to_blk_status(error);
- set_bit(R10BIO_Uptodate, &r10_bio->state);
raid_end_bio_io(r10_bio);
}
@@ -1351,7 +1348,6 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
int i, k;
sector_t sectors;
int max_sectors;
- int error;
if ((mddev_is_clustered(mddev) &&
mddev->cluster_ops->area_resyncing(mddev, WRITE,
@@ -1465,10 +1461,8 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
* complexity of supporting that is not worth
* the benefit.
*/
- if (bio->bi_opf & REQ_ATOMIC) {
- error = -EIO;
+ if (bio->bi_opf & REQ_ATOMIC)
goto err_handle;
- }
good_sectors = first_bad - dev_sector;
if (good_sectors < max_sectors)
@@ -1489,17 +1483,15 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
r10_bio->sectors = max_sectors;
if (r10_bio->sectors < bio_sectors(bio)) {
- struct bio *split = bio_split(bio, r10_bio->sectors,
- GFP_NOIO, &conf->bio_split);
- if (IS_ERR(split)) {
- error = PTR_ERR(split);
- goto err_handle;
- }
- bio_chain(split, bio);
allow_barrier(conf);
- submit_bio_noacct(bio);
+ bio = bio_submit_split_bioset(bio, r10_bio->sectors,
+ &conf->bio_split);
wait_barrier(conf, false);
- bio = split;
+ if (!bio) {
+ set_bit(R10BIO_Returned, &r10_bio->state);
+ goto err_handle;
+ }
+
r10_bio->master_bio = bio;
}
@@ -1531,8 +1523,6 @@ err_handle:
}
}
- bio->bi_status = errno_to_blk_status(error);
- set_bit(R10BIO_Uptodate, &r10_bio->state);
raid_end_bio_io(r10_bio);
}
@@ -1679,7 +1669,9 @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
bio_endio(bio);
return 0;
}
+
bio_chain(split, bio);
+ trace_block_split(split, bio->bi_iter.bi_sector);
allow_barrier(conf);
/* Resend the fist split part */
submit_bio_noacct(split);
@@ -1694,7 +1686,9 @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
bio_endio(bio);
return 0;
}
+
bio_chain(split, bio);
+ trace_block_split(split, bio->bi_iter.bi_sector);
allow_barrier(conf);
/* Resend the second split part */
submit_bio_noacct(bio);
@@ -2117,7 +2111,7 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
int last = conf->geo.raid_disks - 1;
struct raid10_info *p;
- if (mddev->recovery_cp < MaxSector)
+ if (mddev->resync_offset < MaxSector)
/* only hot-add to in-sync arrays, as recovery is
* very different from resync
*/
@@ -3185,7 +3179,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
* of a clean array, like RAID1 does.
*/
if (mddev->bitmap == NULL &&
- mddev->recovery_cp == MaxSector &&
+ mddev->resync_offset == MaxSector &&
mddev->reshape_position == MaxSector &&
!test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
@@ -3221,15 +3215,13 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
if (mddev->curr_resync < max_sector) { /* aborted */
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
- mddev->bitmap_ops->end_sync(mddev,
- mddev->curr_resync,
- &sync_blocks);
+ md_bitmap_end_sync(mddev, mddev->curr_resync,
+ &sync_blocks);
else for (i = 0; i < conf->geo.raid_disks; i++) {
sector_t sect =
raid10_find_virt(conf, mddev->curr_resync, i);
- mddev->bitmap_ops->end_sync(mddev, sect,
- &sync_blocks);
+ md_bitmap_end_sync(mddev, sect, &sync_blocks);
}
} else {
/* completed sync */
@@ -3249,7 +3241,8 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
}
conf->fullsync = 0;
}
- mddev->bitmap_ops->close_sync(mddev);
+ if (md_bitmap_enabled(mddev, false))
+ mddev->bitmap_ops->close_sync(mddev);
close_sync(conf);
*skipped = 1;
return sectors_skipped;
@@ -3351,9 +3344,8 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
* we only need to recover the block if it is set in
* the bitmap
*/
- must_sync = mddev->bitmap_ops->start_sync(mddev, sect,
- &sync_blocks,
- true);
+ must_sync = md_bitmap_start_sync(mddev, sect,
+ &sync_blocks, true);
if (sync_blocks < max_sync)
max_sync = sync_blocks;
if (!must_sync &&
@@ -3396,9 +3388,8 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
}
}
- must_sync = mddev->bitmap_ops->start_sync(mddev, sect,
- &sync_blocks, still_degraded);
-
+ md_bitmap_start_sync(mddev, sect, &sync_blocks,
+ still_degraded);
any_working = 0;
for (j=0; j<conf->copies;j++) {
int k;
@@ -3570,13 +3561,13 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
* safety reason, which ensures curr_resync_completed is
* updated in bitmap_cond_end_sync.
*/
- mddev->bitmap_ops->cond_end_sync(mddev, sector_nr,
+ if (md_bitmap_enabled(mddev, false))
+ mddev->bitmap_ops->cond_end_sync(mddev, sector_nr,
mddev_is_clustered(mddev) &&
(sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
- if (!mddev->bitmap_ops->start_sync(mddev, sector_nr,
- &sync_blocks,
- mddev->degraded) &&
+ if (!md_bitmap_start_sync(mddev, sector_nr, &sync_blocks,
+ mddev->degraded) &&
!conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED,
&mddev->recovery)) {
/* We can skip this block */
@@ -4008,6 +3999,7 @@ static int raid10_set_queue_limits(struct mddev *mddev)
md_init_stacking_limits(&lim);
lim.max_write_zeroes_sectors = 0;
+ lim.max_hw_wzeroes_unmap_sectors = 0;
lim.io_min = mddev->chunk_sectors << 9;
lim.chunk_sectors = mddev->chunk_sectors;
lim.io_opt = lim.io_min * raid10_nr_stripes(conf);
@@ -4145,7 +4137,7 @@ static int raid10_run(struct mddev *mddev)
disk->recovery_disabled = mddev->recovery_disabled - 1;
}
- if (mddev->recovery_cp != MaxSector)
+ if (mddev->resync_offset != MaxSector)
pr_notice("md/raid10:%s: not clean -- starting background reconstruction\n",
mdname(mddev));
pr_info("md/raid10:%s: active with %d out of %d devices\n",
@@ -4225,7 +4217,6 @@ static int raid10_resize(struct mddev *mddev, sector_t sectors)
*/
struct r10conf *conf = mddev->private;
sector_t oldsize, size;
- int ret;
if (mddev->reshape_position != MaxSector)
return -EBUSY;
@@ -4239,14 +4230,17 @@ static int raid10_resize(struct mddev *mddev, sector_t sectors)
mddev->array_sectors > size)
return -EINVAL;
- ret = mddev->bitmap_ops->resize(mddev, size, 0, false);
- if (ret)
- return ret;
+ if (md_bitmap_enabled(mddev, false)) {
+ int ret = mddev->bitmap_ops->resize(mddev, size, 0);
+
+ if (ret)
+ return ret;
+ }
md_set_array_sectors(mddev, size);
if (sectors > mddev->dev_sectors &&
- mddev->recovery_cp > oldsize) {
- mddev->recovery_cp = oldsize;
+ mddev->resync_offset > oldsize) {
+ mddev->resync_offset = oldsize;
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
}
calc_sectors(conf, sectors);
@@ -4275,7 +4269,7 @@ static void *raid10_takeover_raid0(struct mddev *mddev, sector_t size, int devs)
mddev->delta_disks = mddev->raid_disks;
mddev->raid_disks *= 2;
/* make sure it will be not marked as dirty */
- mddev->recovery_cp = MaxSector;
+ mddev->resync_offset = MaxSector;
mddev->dev_sectors = size;
conf = setup_conf(mddev);
@@ -4507,8 +4501,9 @@ static int raid10_start_reshape(struct mddev *mddev)
oldsize = raid10_size(mddev, 0, 0);
newsize = raid10_size(mddev, 0, conf->geo.raid_disks);
- if (!mddev_is_clustered(mddev)) {
- ret = mddev->bitmap_ops->resize(mddev, newsize, 0, false);
+ if (!mddev_is_clustered(mddev) &&
+ md_bitmap_enabled(mddev, false)) {
+ ret = mddev->bitmap_ops->resize(mddev, newsize, 0);
if (ret)
goto abort;
else
@@ -4530,13 +4525,14 @@ static int raid10_start_reshape(struct mddev *mddev)
MD_FEATURE_RESHAPE_ACTIVE)) || (oldsize == newsize))
goto out;
- ret = mddev->bitmap_ops->resize(mddev, newsize, 0, false);
+ /* cluster can't be setup without bitmap */
+ ret = mddev->bitmap_ops->resize(mddev, newsize, 0);
if (ret)
goto abort;
ret = mddev->cluster_ops->resize_bitmaps(mddev, newsize, oldsize);
if (ret) {
- mddev->bitmap_ops->resize(mddev, oldsize, 0, false);
+ mddev->bitmap_ops->resize(mddev, oldsize, 0);
goto abort;
}
}
@@ -5087,8 +5083,8 @@ static void raid10_finish_reshape(struct mddev *mddev)
return;
if (mddev->delta_disks > 0) {
- if (mddev->recovery_cp > mddev->resync_max_sectors) {
- mddev->recovery_cp = mddev->resync_max_sectors;
+ if (mddev->resync_offset > mddev->resync_max_sectors) {
+ mddev->resync_offset = mddev->resync_max_sectors;
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
}
mddev->resync_max_sectors = mddev->array_sectors;
diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h
index 3f16ad6904a9..da00a55f7a55 100644
--- a/drivers/md/raid10.h
+++ b/drivers/md/raid10.h
@@ -165,6 +165,8 @@ enum r10bio_state {
* so that raid10d knows what to do with them.
*/
R10BIO_ReadError,
+/* For bio_split errors, record that bi_end_io was called. */
+ R10BIO_Returned,
/* If a write for this request means we can clear some
* known-bad-block records, we set this flag.
*/
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c
index c0fb335311aa..56b234683ee6 100644
--- a/drivers/md/raid5-ppl.c
+++ b/drivers/md/raid5-ppl.c
@@ -1163,7 +1163,7 @@ static int ppl_load_distributed(struct ppl_log *log)
le64_to_cpu(pplhdr->generation));
/* attempt to recover from log if we are starting a dirty array */
- if (pplhdr && !mddev->pers && mddev->recovery_cp != MaxSector)
+ if (pplhdr && !mddev->pers && mddev->resync_offset != MaxSector)
ret = ppl_recover(log, pplhdr, pplhdr_offset);
/* write empty header if we are starting the array */
@@ -1422,14 +1422,14 @@ int ppl_init_log(struct r5conf *conf)
if (ret) {
goto err;
- } else if (!mddev->pers && mddev->recovery_cp == 0 &&
+ } else if (!mddev->pers && mddev->resync_offset == 0 &&
ppl_conf->recovered_entries > 0 &&
ppl_conf->mismatch_count == 0) {
/*
* If we are starting a dirty array and the recovery succeeds
* without any issues, set the array as clean.
*/
- mddev->recovery_cp = MaxSector;
+ mddev->resync_offset = MaxSector;
set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
} else if (mddev->pers && ppl_conf->mismatch_count > 0) {
/* no mismatch allowed when enabling PPL for a running array */
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 7ec61ee7b218..24b32a0c95b4 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3740,7 +3740,7 @@ static int want_replace(struct stripe_head *sh, int disk_idx)
&& !test_bit(Faulty, &rdev->flags)
&& !test_bit(In_sync, &rdev->flags)
&& (rdev->recovery_offset <= sh->sector
- || rdev->mddev->recovery_cp <= sh->sector))
+ || rdev->mddev->resync_offset <= sh->sector))
rv = 1;
return rv;
}
@@ -3832,7 +3832,7 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s,
* is missing/faulty, then we need to read everything we can.
*/
if (!force_rcw &&
- sh->sector < sh->raid_conf->mddev->recovery_cp)
+ sh->sector < sh->raid_conf->mddev->resync_offset)
/* reconstruct-write isn't being forced */
return 0;
for (i = 0; i < s->failed && i < 2; i++) {
@@ -4097,7 +4097,8 @@ static int handle_stripe_dirtying(struct r5conf *conf,
int disks)
{
int rmw = 0, rcw = 0, i;
- sector_t recovery_cp = conf->mddev->recovery_cp;
+ struct mddev *mddev = conf->mddev;
+ sector_t resync_offset = mddev->resync_offset;
/* Check whether resync is now happening or should start.
* If yes, then the array is dirty (after unclean shutdown or
@@ -4107,15 +4108,21 @@ static int handle_stripe_dirtying(struct r5conf *conf,
* generate correct data from the parity.
*/
if (conf->rmw_level == PARITY_DISABLE_RMW ||
- (recovery_cp < MaxSector && sh->sector >= recovery_cp &&
+ (resync_offset < MaxSector && sh->sector >= resync_offset &&
s->failed == 0)) {
/* Calculate the real rcw later - for now make it
* look like rcw is cheaper
*/
rcw = 1; rmw = 2;
- pr_debug("force RCW rmw_level=%u, recovery_cp=%llu sh->sector=%llu\n",
- conf->rmw_level, (unsigned long long)recovery_cp,
+ pr_debug("force RCW rmw_level=%u, resync_offset=%llu sh->sector=%llu\n",
+ conf->rmw_level, (unsigned long long)resync_offset,
(unsigned long long)sh->sector);
+ } else if (mddev->bitmap_ops && mddev->bitmap_ops->blocks_synced &&
+ !mddev->bitmap_ops->blocks_synced(mddev, sh->sector)) {
+ /* The initial recover is not done, must read everything */
+ rcw = 1; rmw = 2;
+ pr_debug("force RCW by lazy recovery, sh->sector=%llu\n",
+ sh->sector);
} else for (i = disks; i--; ) {
/* would I have to read this buffer for read_modify_write */
struct r5dev *dev = &sh->dev[i];
@@ -4148,7 +4155,7 @@ static int handle_stripe_dirtying(struct r5conf *conf,
set_bit(STRIPE_HANDLE, &sh->state);
if ((rmw < rcw || (rmw == rcw && conf->rmw_level == PARITY_PREFER_RMW)) && rmw > 0) {
/* prefer read-modify-write, but need to get some data */
- mddev_add_trace_msg(conf->mddev, "raid5 rmw %llu %d",
+ mddev_add_trace_msg(mddev, "raid5 rmw %llu %d",
sh->sector, rmw);
for (i = disks; i--; ) {
@@ -4227,8 +4234,8 @@ static int handle_stripe_dirtying(struct r5conf *conf,
set_bit(STRIPE_DELAYED, &sh->state);
}
}
- if (rcw && !mddev_is_dm(conf->mddev))
- blk_add_trace_msg(conf->mddev->gendisk->queue,
+ if (rcw && !mddev_is_dm(mddev))
+ blk_add_trace_msg(mddev->gendisk->queue,
"raid5 rcw %llu %d %d %d",
(unsigned long long)sh->sector, rcw, qread,
test_bit(STRIPE_DELAYED, &sh->state));
@@ -4698,10 +4705,21 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
}
} else if (test_bit(In_sync, &rdev->flags))
set_bit(R5_Insync, &dev->flags);
- else if (sh->sector + RAID5_STRIPE_SECTORS(conf) <= rdev->recovery_offset)
- /* in sync if before recovery_offset */
- set_bit(R5_Insync, &dev->flags);
- else if (test_bit(R5_UPTODATE, &dev->flags) &&
+ else if (sh->sector + RAID5_STRIPE_SECTORS(conf) <=
+ rdev->recovery_offset) {
+ /*
+ * in sync if:
+ * - normal IO, or
+ * - resync IO that is not lazy recovery
+ *
+ * For lazy recovery, we have to mark the rdev without
+ * In_sync as failed, to build initial xor data.
+ */
+ if (!test_bit(STRIPE_SYNCING, &sh->state) ||
+ !test_bit(MD_RECOVERY_LAZY_RECOVER,
+ &conf->mddev->recovery))
+ set_bit(R5_Insync, &dev->flags);
+ } else if (test_bit(R5_UPTODATE, &dev->flags) &&
test_bit(R5_Expanded, &dev->flags))
/* If we've reshaped into here, we assume it is Insync.
* We will shortly update recovery_offset to make
@@ -4770,14 +4788,14 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
if (test_bit(STRIPE_SYNCING, &sh->state)) {
/* If there is a failed device being replaced,
* we must be recovering.
- * else if we are after recovery_cp, we must be syncing
+ * else if we are after resync_offset, we must be syncing
* else if MD_RECOVERY_REQUESTED is set, we also are syncing.
* else we can only be replacing
* sync and recovery both need to read all devices, and so
* use the same flag.
*/
if (do_recovery ||
- sh->sector >= conf->mddev->recovery_cp ||
+ sh->sector >= conf->mddev->resync_offset ||
test_bit(MD_RECOVERY_REQUESTED, &(conf->mddev->recovery)))
s->syncing = 1;
else
@@ -5468,17 +5486,17 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
static struct bio *chunk_aligned_read(struct mddev *mddev, struct bio *raid_bio)
{
- struct bio *split;
sector_t sector = raid_bio->bi_iter.bi_sector;
unsigned chunk_sects = mddev->chunk_sectors;
unsigned sectors = chunk_sects - (sector & (chunk_sects-1));
if (sectors < bio_sectors(raid_bio)) {
struct r5conf *conf = mddev->private;
- split = bio_split(raid_bio, sectors, GFP_NOIO, &conf->bio_split);
- bio_chain(split, raid_bio);
- submit_bio_noacct(raid_bio);
- raid_bio = split;
+
+ raid_bio = bio_submit_split_bioset(raid_bio, sectors,
+ &conf->bio_split);
+ if (!raid_bio)
+ return NULL;
}
if (!raid5_read_one_chunk(mddev, raid_bio))
@@ -6492,11 +6510,12 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n
}
if (mddev->curr_resync < max_sector) /* aborted */
- mddev->bitmap_ops->end_sync(mddev, mddev->curr_resync,
- &sync_blocks);
+ md_bitmap_end_sync(mddev, mddev->curr_resync,
+ &sync_blocks);
else /* completed sync */
conf->fullsync = 0;
- mddev->bitmap_ops->close_sync(mddev);
+ if (md_bitmap_enabled(mddev, false))
+ mddev->bitmap_ops->close_sync(mddev);
return 0;
}
@@ -6525,8 +6544,7 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n
}
if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
!conf->fullsync &&
- !mddev->bitmap_ops->start_sync(mddev, sector_nr, &sync_blocks,
- true) &&
+ !md_bitmap_start_sync(mddev, sector_nr, &sync_blocks, true) &&
sync_blocks >= RAID5_STRIPE_SECTORS(conf)) {
/* we can skip this block, and probably more */
do_div(sync_blocks, RAID5_STRIPE_SECTORS(conf));
@@ -6535,7 +6553,8 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n
return sync_blocks * RAID5_STRIPE_SECTORS(conf);
}
- mddev->bitmap_ops->cond_end_sync(mddev, sector_nr, false);
+ if (md_bitmap_enabled(mddev, false))
+ mddev->bitmap_ops->cond_end_sync(mddev, sector_nr, false);
sh = raid5_get_active_stripe(conf, NULL, sector_nr,
R5_GAS_NOBLOCK);
@@ -6557,9 +6576,7 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n
still_degraded = true;
}
- mddev->bitmap_ops->start_sync(mddev, sector_nr, &sync_blocks,
- still_degraded);
-
+ md_bitmap_start_sync(mddev, sector_nr, &sync_blocks, still_degraded);
set_bit(STRIPE_SYNC_REQUESTED, &sh->state);
set_bit(STRIPE_HANDLE, &sh->state);
@@ -6763,7 +6780,8 @@ static void raid5d(struct md_thread *thread)
/* Now is a good time to flush some bitmap updates */
conf->seq_flush++;
spin_unlock_irq(&conf->device_lock);
- mddev->bitmap_ops->unplug(mddev, true);
+ if (md_bitmap_enabled(mddev, true))
+ mddev->bitmap_ops->unplug(mddev, true);
spin_lock_irq(&conf->device_lock);
conf->seq_write = conf->seq_flush;
activate_bit_delay(conf, conf->temp_inactive_list);
@@ -7732,6 +7750,7 @@ static int raid5_set_limits(struct mddev *mddev)
lim.features |= BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE;
lim.discard_granularity = stripe;
lim.max_write_zeroes_sectors = 0;
+ lim.max_hw_wzeroes_unmap_sectors = 0;
mddev_stack_rdev_limits(mddev, &lim, 0);
rdev_for_each(rdev, mddev)
queue_limits_stack_bdev(&lim, rdev->bdev, rdev->new_data_offset,
@@ -7780,7 +7799,7 @@ static int raid5_run(struct mddev *mddev)
int first = 1;
int ret = -EIO;
- if (mddev->recovery_cp != MaxSector)
+ if (mddev->resync_offset != MaxSector)
pr_notice("md/raid:%s: not clean -- starting background reconstruction\n",
mdname(mddev));
@@ -7921,7 +7940,7 @@ static int raid5_run(struct mddev *mddev)
mdname(mddev));
mddev->ro = 1;
set_disk_ro(mddev->gendisk, 1);
- } else if (mddev->recovery_cp == MaxSector)
+ } else if (mddev->resync_offset == MaxSector)
set_bit(MD_JOURNAL_CLEAN, &mddev->flags);
}
@@ -7988,7 +8007,7 @@ static int raid5_run(struct mddev *mddev)
mddev->resync_max_sectors = mddev->dev_sectors;
if (mddev->degraded > dirty_parity_disks &&
- mddev->recovery_cp != MaxSector) {
+ mddev->resync_offset != MaxSector) {
if (test_bit(MD_HAS_PPL, &mddev->flags))
pr_crit("md/raid:%s: starting dirty degraded array with PPL.\n",
mdname(mddev));
@@ -8312,7 +8331,6 @@ static int raid5_resize(struct mddev *mddev, sector_t sectors)
*/
sector_t newsize;
struct r5conf *conf = mddev->private;
- int ret;
if (raid5_has_log(conf) || raid5_has_ppl(conf))
return -EINVAL;
@@ -8322,14 +8340,17 @@ static int raid5_resize(struct mddev *mddev, sector_t sectors)
mddev->array_sectors > newsize)
return -EINVAL;
- ret = mddev->bitmap_ops->resize(mddev, sectors, 0, false);
- if (ret)
- return ret;
+ if (md_bitmap_enabled(mddev, false)) {
+ int ret = mddev->bitmap_ops->resize(mddev, sectors, 0);
+
+ if (ret)
+ return ret;
+ }
md_set_array_sectors(mddev, newsize);
if (sectors > mddev->dev_sectors &&
- mddev->recovery_cp > mddev->dev_sectors) {
- mddev->recovery_cp = mddev->dev_sectors;
+ mddev->resync_offset > mddev->dev_sectors) {
+ mddev->resync_offset = mddev->dev_sectors;
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
}
mddev->dev_sectors = sectors;
@@ -8423,7 +8444,7 @@ static int raid5_start_reshape(struct mddev *mddev)
return -EINVAL;
/* raid5 can't handle concurrent reshape and recovery */
- if (mddev->recovery_cp < MaxSector)
+ if (mddev->resync_offset < MaxSector)
return -EBUSY;
for (i = 0; i < conf->raid_disks; i++)
if (conf->disks[i].replacement)
@@ -8648,7 +8669,7 @@ static void *raid45_takeover_raid0(struct mddev *mddev, int level)
mddev->raid_disks += 1;
mddev->delta_disks = 1;
/* make sure it will be not marked as dirty */
- mddev->recovery_cp = MaxSector;
+ mddev->resync_offset = MaxSector;
return setup_conf(mddev);
}
diff --git a/drivers/media/cec/core/cec-core.c b/drivers/media/cec/core/cec-core.c
index e10bd588a586..d7259599029f 100644
--- a/drivers/media/cec/core/cec-core.c
+++ b/drivers/media/cec/core/cec-core.c
@@ -439,6 +439,6 @@ static void __exit cec_devnode_exit(void)
subsys_initcall(cec_devnode_init);
module_exit(cec_devnode_exit)
-MODULE_AUTHOR("Hans Verkuil <hansverk@cisco.com>");
+MODULE_AUTHOR("Hans Verkuil <hverkuil@kernel.org>");
MODULE_DESCRIPTION("Device node registration for cec drivers");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/cec/platform/cec-gpio/cec-gpio.c b/drivers/media/cec/platform/cec-gpio/cec-gpio.c
index 3c27789d8657..842555ed42c7 100644
--- a/drivers/media/cec/platform/cec-gpio/cec-gpio.c
+++ b/drivers/media/cec/platform/cec-gpio/cec-gpio.c
@@ -291,6 +291,6 @@ static struct platform_driver cec_gpio_pdrv = {
module_platform_driver(cec_gpio_pdrv);
-MODULE_AUTHOR("Hans Verkuil <hansverk@cisco.com>");
+MODULE_AUTHOR("Hans Verkuil <hverkuil@kernel.org>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("CEC GPIO driver");
diff --git a/drivers/media/cec/platform/stm32/stm32-cec.c b/drivers/media/cec/platform/stm32/stm32-cec.c
index fea2d65acffc..1ec0cece0a5b 100644
--- a/drivers/media/cec/platform/stm32/stm32-cec.c
+++ b/drivers/media/cec/platform/stm32/stm32-cec.c
@@ -248,7 +248,6 @@ static const struct regmap_config stm32_cec_regmap_cfg = {
.val_bits = 32,
.reg_stride = sizeof(u32),
.max_register = 0x14,
- .fast_io = true,
};
static int stm32_cec_probe(struct platform_device *pdev)
diff --git a/drivers/media/cec/usb/extron-da-hd-4k-plus/Makefile b/drivers/media/cec/usb/extron-da-hd-4k-plus/Makefile
index 2e8f7f60263f..08d58524419f 100644
--- a/drivers/media/cec/usb/extron-da-hd-4k-plus/Makefile
+++ b/drivers/media/cec/usb/extron-da-hd-4k-plus/Makefile
@@ -1,8 +1,2 @@
extron-da-hd-4k-plus-cec-objs := extron-da-hd-4k-plus.o cec-splitter.o
obj-$(CONFIG_USB_EXTRON_DA_HD_4K_PLUS_CEC) := extron-da-hd-4k-plus-cec.o
-
-all:
- $(MAKE) -C $(KDIR) M=$(shell pwd) modules
-
-install:
- $(MAKE) -C $(KDIR) M=$(shell pwd) modules_install
diff --git a/drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.c b/drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.c
index 41d019b01ec0..e2eff17952ab 100644
--- a/drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.c
+++ b/drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.c
@@ -28,7 +28,7 @@
#include "extron-da-hd-4k-plus.h"
-MODULE_AUTHOR("Hans Verkuil <hansverk@cisco.com>");
+MODULE_AUTHOR("Hans Verkuil <hverkuil@kernel.org>");
MODULE_DESCRIPTION("Extron DA HD 4K PLUS HDMI CEC driver");
MODULE_LICENSE("GPL");
@@ -1252,7 +1252,7 @@ static int extron_s_output(struct file *file, void *priv, unsigned int o)
return o ? -EINVAL : 0;
}
-static int extron_g_edid(struct file *file, void *_fh,
+static int extron_g_edid(struct file *file, void *priv,
struct v4l2_edid *edid)
{
struct extron_port *port = video_drvdata(file);
@@ -1280,7 +1280,7 @@ static int extron_g_edid(struct file *file, void *_fh,
return 0;
}
-static int extron_s_edid(struct file *file, void *_fh, struct v4l2_edid *edid)
+static int extron_s_edid(struct file *file, void *priv, struct v4l2_edid *edid)
{
struct extron_port *port = video_drvdata(file);
diff --git a/drivers/media/cec/usb/pulse8/pulse8-cec.c b/drivers/media/cec/usb/pulse8/pulse8-cec.c
index 171366fe3544..60569f1670fe 100644
--- a/drivers/media/cec/usb/pulse8/pulse8-cec.c
+++ b/drivers/media/cec/usb/pulse8/pulse8-cec.c
@@ -2,7 +2,7 @@
/*
* Pulse Eight HDMI CEC driver
*
- * Copyright 2016 Hans Verkuil <hverkuil@xs4all.nl
+ * Copyright 2016 Hans Verkuil <hverkuil@kernel.org>
*/
/*
@@ -41,7 +41,7 @@
#include <media/cec.h>
-MODULE_AUTHOR("Hans Verkuil <hverkuil@xs4all.nl>");
+MODULE_AUTHOR("Hans Verkuil <hverkuil@kernel.org>");
MODULE_DESCRIPTION("Pulse Eight HDMI CEC driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/cec/usb/rainshadow/rainshadow-cec.c b/drivers/media/cec/usb/rainshadow/rainshadow-cec.c
index 6f8d6797c614..08f58456d682 100644
--- a/drivers/media/cec/usb/rainshadow/rainshadow-cec.c
+++ b/drivers/media/cec/usb/rainshadow/rainshadow-cec.c
@@ -2,7 +2,7 @@
/*
* RainShadow Tech HDMI CEC driver
*
- * Copyright 2016 Hans Verkuil <hverkuil@xs4all.nl
+ * Copyright 2016 Hans Verkuil <hverkuil@kernel.org>
*/
/*
@@ -31,7 +31,7 @@
#include <media/cec.h>
-MODULE_AUTHOR("Hans Verkuil <hverkuil@xs4all.nl>");
+MODULE_AUTHOR("Hans Verkuil <hverkuil@kernel.org>");
MODULE_DESCRIPTION("RainShadow Tech HDMI CEC driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/common/b2c2/flexcop-sram.c b/drivers/media/common/b2c2/flexcop-sram.c
index d97962eb0112..dba03769f263 100644
--- a/drivers/media/common/b2c2/flexcop-sram.c
+++ b/drivers/media/common/b2c2/flexcop-sram.c
@@ -352,7 +352,7 @@ static int flexcop_sram_detect(struct flexcop_device *fc)
sram_set_size(adapter, 0x10000);
sram_init(adapter);
write_reg_dw(adapter, 0x208, tmp);
- dprintk("%s: SRAM detection failed. Set to 32K \n", __func__);
+ dprintk("%s: SRAM detection failed. Set to 32K\n", __func__);
return 0;
}
diff --git a/drivers/media/common/b2c2/flexcop.c b/drivers/media/common/b2c2/flexcop.c
index e7a88a2d248c..8506de48ba45 100644
--- a/drivers/media/common/b2c2/flexcop.c
+++ b/drivers/media/common/b2c2/flexcop.c
@@ -170,7 +170,7 @@ static void flexcop_reset(struct flexcop_device *fc)
flexcop_ibi_value v210, v204;
/* reset the flexcop itself */
- fc->write_ibi_reg(fc,ctrl_208,ibi_zero);
+ fc->write_ibi_reg(fc, ctrl_208, ibi_zero);
v210.raw = 0;
v210.sw_reset_210.reset_block_000 = 1;
@@ -183,17 +183,17 @@ static void flexcop_reset(struct flexcop_device *fc)
v210.sw_reset_210.reset_block_700 = 1;
v210.sw_reset_210.Block_reset_enable = 0xb2;
v210.sw_reset_210.Special_controls = 0xc259;
- fc->write_ibi_reg(fc,sw_reset_210,v210);
+ fc->write_ibi_reg(fc, sw_reset_210, v210);
msleep(1);
/* reset the periphical devices */
- v204 = fc->read_ibi_reg(fc,misc_204);
+ v204 = fc->read_ibi_reg(fc, misc_204);
v204.misc_204.Per_reset_sig = 0;
- fc->write_ibi_reg(fc,misc_204,v204);
+ fc->write_ibi_reg(fc, misc_204, v204);
msleep(1);
v204.misc_204.Per_reset_sig = 1;
- fc->write_ibi_reg(fc,misc_204,v204);
+ fc->write_ibi_reg(fc, misc_204, v204);
}
void flexcop_reset_block_300(struct flexcop_device *fc)
@@ -202,13 +202,13 @@ void flexcop_reset_block_300(struct flexcop_device *fc)
v210 = fc->read_ibi_reg(fc, sw_reset_210);
deb_rdump("208: %08x, 210: %08x\n", v208_save.raw, v210.raw);
- fc->write_ibi_reg(fc,ctrl_208,ibi_zero);
+ fc->write_ibi_reg(fc, ctrl_208, ibi_zero);
v210.sw_reset_210.reset_block_300 = 1;
v210.sw_reset_210.Block_reset_enable = 0xb2;
- fc->write_ibi_reg(fc,sw_reset_210,v210);
- fc->write_ibi_reg(fc,ctrl_208,v208_save);
+ fc->write_ibi_reg(fc, sw_reset_210, v210);
+ fc->write_ibi_reg(fc, ctrl_208, v208_save);
}
struct flexcop_device *flexcop_device_kmalloc(size_t bus_specific_len)
@@ -266,8 +266,8 @@ int flexcop_device_initialize(struct flexcop_device *fc)
if (fc->get_mac_addr(fc, 0) == 0) {
u8 *b = fc->dvb_adapter.proposed_mac;
info("MAC address = %pM", b);
- flexcop_set_mac_filter(fc,b);
- flexcop_mac_filter_ctrl(fc,1);
+ flexcop_set_mac_filter(fc, b);
+ flexcop_mac_filter_ctrl(fc, 1);
} else
warn("reading of MAC address failed.\n");
@@ -275,7 +275,7 @@ int flexcop_device_initialize(struct flexcop_device *fc)
if (ret)
goto error;
- flexcop_device_name(fc,"initialization of","complete");
+ flexcop_device_name(fc, "initialization of", "complete");
return 0;
error:
diff --git a/drivers/media/common/cx2341x.c b/drivers/media/common/cx2341x.c
index 1392bd6b0026..1ee159ef7f38 100644
--- a/drivers/media/common/cx2341x.c
+++ b/drivers/media/common/cx2341x.c
@@ -2,7 +2,7 @@
/*
* cx2341x - generic code for cx23415/6/8 based devices
*
- * Copyright (C) 2006 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2006 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c
index 1cd26faee503..d911021c1bb0 100644
--- a/drivers/media/common/videobuf2/videobuf2-v4l2.c
+++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c
@@ -973,18 +973,14 @@ EXPORT_SYMBOL_GPL(vb2_queue_change_type);
__poll_t vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
{
- struct video_device *vfd = video_devdata(file);
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
__poll_t res;
res = vb2_core_poll(q, file, wait);
- if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
- struct v4l2_fh *fh = file->private_data;
-
- poll_wait(file, &fh->wait, wait);
- if (v4l2_event_pending(fh))
- res |= EPOLLPRI;
- }
+ poll_wait(file, &fh->wait, wait);
+ if (v4l2_event_pending(fh))
+ res |= EPOLLPRI;
return res;
}
diff --git a/drivers/media/dvb-frontends/Kconfig b/drivers/media/dvb-frontends/Kconfig
index 2ef2ff2a38ff..bcc97ca86ed5 100644
--- a/drivers/media/dvb-frontends/Kconfig
+++ b/drivers/media/dvb-frontends/Kconfig
@@ -163,7 +163,7 @@ config DVB_CX24123
A DVB-S tuner module. Say Y when you want to support this frontend.
config DVB_DS3000
- tristate "Montage Tehnology DS3000 based"
+ tristate "Montage Technology DS3000 based"
depends on DVB_CORE && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
@@ -270,7 +270,7 @@ config DVB_TDA826X
A DVB-S silicon tuner module. Say Y when you want to support this tuner.
config DVB_TS2020
- tristate "Montage Tehnology TS2020 based tuners"
+ tristate "Montage Technology TS2020 based tuners"
depends on DVB_CORE && I2C
select REGMAP_I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
diff --git a/drivers/media/dvb-frontends/cxd2820r_core.c b/drivers/media/dvb-frontends/cxd2820r_core.c
index a31a8a6a4946..5aa3d45a691a 100644
--- a/drivers/media/dvb-frontends/cxd2820r_core.c
+++ b/drivers/media/dvb-frontends/cxd2820r_core.c
@@ -651,7 +651,7 @@ static int cxd2820r_probe(struct i2c_client *client)
priv->gpio_chip.parent = &client->dev;
priv->gpio_chip.owner = THIS_MODULE;
priv->gpio_chip.direction_output = cxd2820r_gpio_direction_output;
- priv->gpio_chip.set_rv = cxd2820r_gpio_set;
+ priv->gpio_chip.set = cxd2820r_gpio_set;
priv->gpio_chip.get = cxd2820r_gpio_get;
priv->gpio_chip.base = -1; /* Dynamic allocation */
priv->gpio_chip.ngpio = GPIO_COUNT;
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index 6237fe804a5c..cdd7ba5da0d5 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -27,7 +27,7 @@ config VIDEO_IR_I2C
menuconfig VIDEO_CAMERA_SENSOR
bool "Camera sensor devices"
- depends on MEDIA_CAMERA_SUPPORT && I2C
+ depends on MEDIA_CAMERA_SUPPORT && I2C && HAVE_CLK
select MEDIA_CONTROLLER
select V4L2_FWNODE
select VIDEO_V4L2_SUBDEV_API
@@ -70,6 +70,16 @@ config VIDEO_GC0308
To compile this driver as a module, choose M here: the
module will be called gc0308.
+config VIDEO_GC0310
+ tristate "GalaxyCore GC0310 sensor support"
+ select V4L2_CCI_I2C
+ help
+ This is a Video4Linux2 sensor-level driver for the Galaxycore
+ GC0310 0.3MP sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called gc0310.
+
config VIDEO_GC05A2
tristate "GalaxyCore gc05a2 sensor support"
select V4L2_CCI_I2C
@@ -317,6 +327,7 @@ config VIDEO_MT9V011
config VIDEO_MT9V032
tristate "Micron MT9V032 sensor support"
+ depends on OF
select REGMAP_I2C
help
This is a Video4Linux2 sensor driver for the Micron
@@ -340,6 +351,16 @@ config VIDEO_OG01A1B
To compile this driver as a module, choose M here: the
module will be called og01a1b.
+config VIDEO_OG0VE1B
+ tristate "OmniVision OG0VE1B sensor support"
+ select V4L2_CCI_I2C
+ help
+ This is a Video4Linux2 sensor driver for the OmniVision
+ OG0VE1B camera.
+
+ To compile this driver as a module, choose M here: the
+ module will be called og0ve1b.
+
config VIDEO_OV01A10
tristate "OmniVision OV01A10 sensor support"
help
@@ -446,6 +467,16 @@ config VIDEO_OV2685
To compile this driver as a module, choose M here: the
module will be called ov2685.
+config VIDEO_OV2735
+ tristate "OmniVision OV2735 sensor support"
+ select V4L2_CCI_I2C
+ help
+ This is a Video4Linux2 sensor driver for the Sony
+ OV2735 camera.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ov2735.
+
config VIDEO_OV2740
tristate "OmniVision OV2740 sensor support"
depends on ACPI || COMPILE_TEST
@@ -542,24 +573,25 @@ config VIDEO_OV5695
To compile this driver as a module, choose M here: the
module will be called ov5695.
-config VIDEO_OV64A40
- tristate "OmniVision OV64A40 sensor support"
+config VIDEO_OV6211
+ tristate "OmniVision OV6211 sensor support"
select V4L2_CCI_I2C
help
This is a Video4Linux2 sensor driver for the OmniVision
- OV64A40 camera.
+ OV6211 camera.
To compile this driver as a module, choose M here: the
- module will be called ov64a40.
+ module will be called ov6211.
-config VIDEO_OV6650
- tristate "OmniVision OV6650 sensor support"
+config VIDEO_OV64A40
+ tristate "OmniVision OV64A40 sensor support"
+ select V4L2_CCI_I2C
help
This is a Video4Linux2 sensor driver for the OmniVision
- OV6650 camera.
+ OV64A40 camera.
To compile this driver as a module, choose M here: the
- module will be called ov6650.
+ module will be called ov64a40.
config VIDEO_OV7251
tristate "OmniVision OV7251 sensor support"
diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
index 5873d29433ee..57cdd8dc96f6 100644
--- a/drivers/media/i2c/Makefile
+++ b/drivers/media/i2c/Makefile
@@ -38,6 +38,7 @@ obj-$(CONFIG_VIDEO_DW9768) += dw9768.o
obj-$(CONFIG_VIDEO_DW9807_VCM) += dw9807-vcm.o
obj-$(CONFIG_VIDEO_ET8EK8) += et8ek8/
obj-$(CONFIG_VIDEO_GC0308) += gc0308.o
+obj-$(CONFIG_VIDEO_GC0310) += gc0310.o
obj-$(CONFIG_VIDEO_GC05A2) += gc05a2.o
obj-$(CONFIG_VIDEO_GC08A3) += gc08a3.o
obj-$(CONFIG_VIDEO_GC2145) += gc2145.o
@@ -81,6 +82,7 @@ obj-$(CONFIG_VIDEO_MT9V011) += mt9v011.o
obj-$(CONFIG_VIDEO_MT9V032) += mt9v032.o
obj-$(CONFIG_VIDEO_MT9V111) += mt9v111.o
obj-$(CONFIG_VIDEO_OG01A1B) += og01a1b.o
+obj-$(CONFIG_VIDEO_OG0VE1B) += og0ve1b.o
obj-$(CONFIG_VIDEO_OV01A10) += ov01a10.o
obj-$(CONFIG_VIDEO_OV02A10) += ov02a10.o
obj-$(CONFIG_VIDEO_OV02C10) += ov02c10.o
@@ -93,6 +95,7 @@ obj-$(CONFIG_VIDEO_OV2640) += ov2640.o
obj-$(CONFIG_VIDEO_OV2659) += ov2659.o
obj-$(CONFIG_VIDEO_OV2680) += ov2680.o
obj-$(CONFIG_VIDEO_OV2685) += ov2685.o
+obj-$(CONFIG_VIDEO_OV2735) += ov2735.o
obj-$(CONFIG_VIDEO_OV2740) += ov2740.o
obj-$(CONFIG_VIDEO_OV4689) += ov4689.o
obj-$(CONFIG_VIDEO_OV5640) += ov5640.o
@@ -103,8 +106,8 @@ obj-$(CONFIG_VIDEO_OV5670) += ov5670.o
obj-$(CONFIG_VIDEO_OV5675) += ov5675.o
obj-$(CONFIG_VIDEO_OV5693) += ov5693.o
obj-$(CONFIG_VIDEO_OV5695) += ov5695.o
+obj-$(CONFIG_VIDEO_OV6211) += ov6211.o
obj-$(CONFIG_VIDEO_OV64A40) += ov64a40.o
-obj-$(CONFIG_VIDEO_OV6650) += ov6650.o
obj-$(CONFIG_VIDEO_OV7251) += ov7251.o
obj-$(CONFIG_VIDEO_OV7640) += ov7640.o
obj-$(CONFIG_VIDEO_OV7670) += ov7670.o
diff --git a/drivers/media/i2c/adv7180.c b/drivers/media/i2c/adv7180.c
index 5d90b8ab9b6d..378f4e6af12c 100644
--- a/drivers/media/i2c/adv7180.c
+++ b/drivers/media/i2c/adv7180.c
@@ -214,7 +214,6 @@ struct adv7180_state {
struct gpio_desc *pwdn_gpio;
struct gpio_desc *rst_gpio;
v4l2_std_id curr_norm;
- bool powered;
bool streaming;
u8 input;
@@ -274,6 +273,38 @@ static int adv7180_vpp_write(struct adv7180_state *state, unsigned int reg,
return i2c_smbus_write_byte_data(state->vpp_client, reg, value);
}
+static int adv7180_set_power(struct adv7180_state *state, bool on)
+{
+ u8 val;
+ int ret;
+
+ if (on)
+ val = ADV7180_PWR_MAN_ON;
+ else
+ val = ADV7180_PWR_MAN_OFF;
+
+ ret = adv7180_write(state, ADV7180_REG_PWR_MAN, val);
+ if (ret)
+ return ret;
+
+ if (state->chip_info->flags & ADV7180_FLAG_MIPI_CSI2) {
+ if (on) {
+ adv7180_csi_write(state, 0xDE, 0x02);
+ adv7180_csi_write(state, 0xD2, 0xF7);
+ adv7180_csi_write(state, 0xD8, 0x65);
+ adv7180_csi_write(state, 0xE0, 0x09);
+ adv7180_csi_write(state, 0x2C, 0x00);
+ if (state->field == V4L2_FIELD_NONE)
+ adv7180_csi_write(state, 0x1D, 0x80);
+ adv7180_csi_write(state, 0x00, 0x00);
+ } else {
+ adv7180_csi_write(state, 0x00, 0x80);
+ }
+ }
+
+ return 0;
+}
+
static v4l2_std_id adv7180_std_to_v4l2(u8 status1)
{
/* in case V4L2_IN_ST_NO_SIGNAL */
@@ -357,32 +388,27 @@ static inline struct adv7180_state *to_state(struct v4l2_subdev *sd)
static int adv7180_querystd(struct v4l2_subdev *sd, v4l2_std_id *std)
{
struct adv7180_state *state = to_state(sd);
- int err = mutex_lock_interruptible(&state->mutex);
- if (err)
- return err;
-
- if (state->streaming) {
- err = -EBUSY;
- goto unlock;
- }
+ int ret;
- err = adv7180_set_video_standard(state,
- ADV7180_STD_AD_PAL_BG_NTSC_J_SECAM);
- if (err)
- goto unlock;
+ guard(mutex)(&state->mutex);
- msleep(100);
- __adv7180_status(state, NULL, std);
+ /*
+ * We can't sample the standard if the device is streaming as that would
+ * interfere with the capture session as the VID_SEL reg is touched.
+ */
+ if (state->streaming)
+ return -EBUSY;
- err = v4l2_std_to_adv7180(state->curr_norm);
- if (err < 0)
- goto unlock;
+ /* Set the standard to autodetect PAL B/G/H/I/D, NTSC J or SECAM */
+ ret = adv7180_set_video_standard(state,
+ ADV7180_STD_AD_PAL_BG_NTSC_J_SECAM);
+ if (ret)
+ return ret;
- err = adv7180_set_video_standard(state, err);
+ /* Allow some time for the autodetection to run. */
+ msleep(100);
-unlock:
- mutex_unlock(&state->mutex);
- return err;
+ return __adv7180_status(state, NULL, std);
}
static int adv7180_s_routing(struct v4l2_subdev *sd, u32 input,
@@ -437,22 +463,18 @@ static int adv7180_program_std(struct adv7180_state *state)
static int adv7180_s_std(struct v4l2_subdev *sd, v4l2_std_id std)
{
struct adv7180_state *state = to_state(sd);
- int ret = mutex_lock_interruptible(&state->mutex);
+ int ret;
- if (ret)
- return ret;
+ guard(mutex)(&state->mutex);
/* Make sure we can support this std */
ret = v4l2_std_to_adv7180(std);
if (ret < 0)
- goto out;
+ return ret;
state->curr_norm = std;
- ret = adv7180_program_std(state);
-out:
- mutex_unlock(&state->mutex);
- return ret;
+ return 0;
}
static int adv7180_g_std(struct v4l2_subdev *sd, v4l2_std_id *norm)
@@ -514,55 +536,6 @@ static void adv7180_set_reset_pin(struct adv7180_state *state, bool on)
}
}
-static int adv7180_set_power(struct adv7180_state *state, bool on)
-{
- u8 val;
- int ret;
-
- if (on)
- val = ADV7180_PWR_MAN_ON;
- else
- val = ADV7180_PWR_MAN_OFF;
-
- ret = adv7180_write(state, ADV7180_REG_PWR_MAN, val);
- if (ret)
- return ret;
-
- if (state->chip_info->flags & ADV7180_FLAG_MIPI_CSI2) {
- if (on) {
- adv7180_csi_write(state, 0xDE, 0x02);
- adv7180_csi_write(state, 0xD2, 0xF7);
- adv7180_csi_write(state, 0xD8, 0x65);
- adv7180_csi_write(state, 0xE0, 0x09);
- adv7180_csi_write(state, 0x2C, 0x00);
- if (state->field == V4L2_FIELD_NONE)
- adv7180_csi_write(state, 0x1D, 0x80);
- adv7180_csi_write(state, 0x00, 0x00);
- } else {
- adv7180_csi_write(state, 0x00, 0x80);
- }
- }
-
- return 0;
-}
-
-static int adv7180_s_power(struct v4l2_subdev *sd, int on)
-{
- struct adv7180_state *state = to_state(sd);
- int ret;
-
- ret = mutex_lock_interruptible(&state->mutex);
- if (ret)
- return ret;
-
- ret = adv7180_set_power(state, on);
- if (ret == 0)
- state->powered = on;
-
- mutex_unlock(&state->mutex);
- return ret;
-}
-
static const char * const test_pattern_menu[] = {
"Single color",
"Color bars",
@@ -601,11 +574,11 @@ static int adv7180_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct v4l2_subdev *sd = to_adv7180_sd(ctrl);
struct adv7180_state *state = to_state(sd);
- int ret = mutex_lock_interruptible(&state->mutex);
+ int ret = 0;
int val;
- if (ret)
- return ret;
+ lockdep_assert_held(&state->mutex);
+
val = ctrl->val;
switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
@@ -647,7 +620,6 @@ static int adv7180_s_ctrl(struct v4l2_ctrl *ctrl)
ret = -EINVAL;
}
- mutex_unlock(&state->mutex);
return ret;
}
@@ -668,6 +640,7 @@ static const struct v4l2_ctrl_config adv7180_ctrl_fast_switch = {
static int adv7180_init_controls(struct adv7180_state *state)
{
v4l2_ctrl_handler_init(&state->ctrl_hdl, 4);
+ state->ctrl_hdl.lock = &state->mutex;
v4l2_ctrl_new_std(&state->ctrl_hdl, &adv7180_ctrl_ops,
V4L2_CID_BRIGHTNESS, ADV7180_BRI_MIN,
@@ -700,7 +673,6 @@ static int adv7180_init_controls(struct adv7180_state *state)
v4l2_ctrl_handler_free(&state->ctrl_hdl);
return err;
}
- v4l2_ctrl_handler_setup(&state->ctrl_hdl);
return 0;
}
@@ -812,12 +784,7 @@ static int adv7180_set_pad_format(struct v4l2_subdev *sd,
ret = adv7180_mbus_fmt(sd, &format->format);
if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
- if (state->field != format->format.field) {
- state->field = format->format.field;
- adv7180_set_power(state, false);
- adv7180_set_field_mode(state);
- adv7180_set_power(state, true);
- }
+ state->field = format->format.field;
} else {
framefmt = v4l2_subdev_state_get_format(sd_state, 0);
*framefmt = format->format;
@@ -874,23 +841,117 @@ static int adv7180_g_tvnorms(struct v4l2_subdev *sd, v4l2_std_id *norm)
return 0;
}
-static int adv7180_s_stream(struct v4l2_subdev *sd, int enable)
+static int init_device(struct adv7180_state *state)
{
- struct adv7180_state *state = to_state(sd);
int ret;
- /* It's always safe to stop streaming, no need to take the lock */
- if (!enable) {
- state->streaming = enable;
- return 0;
+ lockdep_assert_held(&state->mutex);
+
+ ret = adv7180_program_std(state);
+ if (ret)
+ return ret;
+
+ adv7180_set_field_mode(state);
+
+ __v4l2_ctrl_handler_setup(&state->ctrl_hdl);
+
+ return ret;
+}
+
+static int adv7180_reset_device(struct adv7180_state *state)
+{
+ int ret;
+
+ lockdep_assert_held(&state->mutex);
+
+ adv7180_set_power_pin(state, true);
+ adv7180_set_reset_pin(state, false);
+
+ adv7180_write(state, ADV7180_REG_PWR_MAN, ADV7180_PWR_MAN_RES);
+ usleep_range(5000, 10000);
+
+ /*
+ * If the devices decoder is power on after reset, power off so the
+ * device can be configured.
+ */
+ if (state->chip_info->flags & ADV7180_FLAG_RESET_POWERED)
+ adv7180_set_power(state, false);
+
+ ret = state->chip_info->init(state);
+ if (ret)
+ return ret;
+
+ ret = init_device(state);
+ if (ret)
+ return ret;
+
+ /* register for interrupts */
+ if (state->irq > 0) {
+ /* config the Interrupt pin to be active low */
+ ret = adv7180_write(state, ADV7180_REG_ICONF1,
+ ADV7180_ICONF1_ACTIVE_LOW |
+ ADV7180_ICONF1_PSYNC_ONLY);
+ if (ret < 0)
+ return ret;
+
+ ret = adv7180_write(state, ADV7180_REG_IMR1, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = adv7180_write(state, ADV7180_REG_IMR2, 0);
+ if (ret < 0)
+ return ret;
+
+ /* enable AD change interrupts */
+ ret = adv7180_write(state, ADV7180_REG_IMR3,
+ ADV7180_IRQ3_AD_CHANGE);
+ if (ret < 0)
+ return ret;
+
+ ret = adv7180_write(state, ADV7180_REG_IMR4, 0);
+ if (ret < 0)
+ return ret;
}
+ /*
+ * If the devices decoder is power on after reset, restore the power
+ * after configuration. This is to preserve the behavior of the driver,
+ * not doing this result in the first 35+ frames captured being garbage.
+ */
+ if (state->chip_info->flags & ADV7180_FLAG_RESET_POWERED)
+ adv7180_set_power(state, true);
+
+ return 0;
+}
+
+static int adv7180_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct adv7180_state *state = to_state(sd);
+ int ret;
+
/* Must wait until querystd released the lock */
- ret = mutex_lock_interruptible(&state->mutex);
+ guard(mutex)(&state->mutex);
+
+ /*
+ * Always power off the decoder even if streaming is to be enabled, the
+ * decoder needs to be off for the device to be configured.
+ */
+ ret = adv7180_set_power(state, false);
if (ret)
return ret;
+
+ if (enable) {
+ ret = init_device(state);
+ if (ret)
+ return ret;
+
+ ret = adv7180_set_power(state, true);
+ if (ret)
+ return ret;
+ }
+
state->streaming = enable;
- mutex_unlock(&state->mutex);
+
return 0;
}
@@ -919,7 +980,6 @@ static const struct v4l2_subdev_video_ops adv7180_video_ops = {
};
static const struct v4l2_subdev_core_ops adv7180_core_ops = {
- .s_power = adv7180_s_power,
.subscribe_event = adv7180_subscribe_event,
.unsubscribe_event = v4l2_event_subdev_unsubscribe,
};
@@ -1343,62 +1403,6 @@ static const struct adv7180_chip_info adv7282_m_info = {
.select_input = adv7182_select_input,
};
-static int init_device(struct adv7180_state *state)
-{
- int ret;
-
- mutex_lock(&state->mutex);
-
- adv7180_set_power_pin(state, true);
- adv7180_set_reset_pin(state, false);
-
- adv7180_write(state, ADV7180_REG_PWR_MAN, ADV7180_PWR_MAN_RES);
- usleep_range(5000, 10000);
-
- ret = state->chip_info->init(state);
- if (ret)
- goto out_unlock;
-
- ret = adv7180_program_std(state);
- if (ret)
- goto out_unlock;
-
- adv7180_set_field_mode(state);
-
- /* register for interrupts */
- if (state->irq > 0) {
- /* config the Interrupt pin to be active low */
- ret = adv7180_write(state, ADV7180_REG_ICONF1,
- ADV7180_ICONF1_ACTIVE_LOW |
- ADV7180_ICONF1_PSYNC_ONLY);
- if (ret < 0)
- goto out_unlock;
-
- ret = adv7180_write(state, ADV7180_REG_IMR1, 0);
- if (ret < 0)
- goto out_unlock;
-
- ret = adv7180_write(state, ADV7180_REG_IMR2, 0);
- if (ret < 0)
- goto out_unlock;
-
- /* enable AD change interrupts interrupts */
- ret = adv7180_write(state, ADV7180_REG_IMR3,
- ADV7180_IRQ3_AD_CHANGE);
- if (ret < 0)
- goto out_unlock;
-
- ret = adv7180_write(state, ADV7180_REG_IMR4, 0);
- if (ret < 0)
- goto out_unlock;
- }
-
-out_unlock:
- mutex_unlock(&state->mutex);
-
- return ret;
-}
-
static int adv7180_probe(struct i2c_client *client)
{
struct device_node *np = client->dev.of_node;
@@ -1457,10 +1461,7 @@ static int adv7180_probe(struct i2c_client *client)
state->irq = client->irq;
mutex_init(&state->mutex);
state->curr_norm = V4L2_STD_NTSC;
- if (state->chip_info->flags & ADV7180_FLAG_RESET_POWERED)
- state->powered = true;
- else
- state->powered = false;
+
state->input = 0;
sd = &state->sd;
v4l2_i2c_subdev_init(sd, client, &adv7180_ops);
@@ -1477,7 +1478,9 @@ static int adv7180_probe(struct i2c_client *client)
if (ret)
goto err_free_ctrl;
- ret = init_device(state);
+ mutex_lock(&state->mutex);
+ ret = adv7180_reset_device(state);
+ mutex_unlock(&state->mutex);
if (ret)
goto err_media_entity_cleanup;
@@ -1549,6 +1552,8 @@ static int adv7180_suspend(struct device *dev)
struct v4l2_subdev *sd = dev_get_drvdata(dev);
struct adv7180_state *state = to_state(sd);
+ guard(mutex)(&state->mutex);
+
return adv7180_set_power(state, false);
}
@@ -1558,13 +1563,18 @@ static int adv7180_resume(struct device *dev)
struct adv7180_state *state = to_state(sd);
int ret;
- ret = init_device(state);
+ guard(mutex)(&state->mutex);
+
+ ret = adv7180_reset_device(state);
if (ret < 0)
return ret;
- ret = adv7180_set_power(state, state->powered);
- if (ret)
- return ret;
+ /* If we were streaming when suspending, start decoder. */
+ if (state->streaming) {
+ ret = adv7180_set_power(state, true);
+ if (ret)
+ return ret;
+ }
return 0;
}
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index afed38596362..8fe7c2f72883 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -42,7 +42,7 @@ module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "debug level (0-2)");
MODULE_DESCRIPTION("Analog Devices ADV7604/10/11/12 video decoder driver");
-MODULE_AUTHOR("Hans Verkuil <hansverk@cisco.com>");
+MODULE_AUTHOR("Hans Verkuil <hverkuil@kernel.org>");
MODULE_AUTHOR("Mats Randgaard <mats.randgaard@cisco.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index 5545cd23e113..9780082db841 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -38,7 +38,7 @@ module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "debug level (0-2)");
MODULE_DESCRIPTION("Analog Devices ADV7842 video decoder driver");
-MODULE_AUTHOR("Hans Verkuil <hansverk@cisco.com>");
+MODULE_AUTHOR("Hans Verkuil <hverkuil@kernel.org>");
MODULE_AUTHOR("Martin Bugge <marbugge@cisco.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/i2c/alvium-csi2.c b/drivers/media/i2c/alvium-csi2.c
index 05b708bd0a64..1f088acecf36 100644
--- a/drivers/media/i2c/alvium-csi2.c
+++ b/drivers/media/i2c/alvium-csi2.c
@@ -1841,7 +1841,6 @@ static int alvium_s_stream(struct v4l2_subdev *sd, int enable)
} else {
alvium_set_stream_mipi(alvium, enable);
- pm_runtime_mark_last_busy(&client->dev);
pm_runtime_put_autosuspend(&client->dev);
}
diff --git a/drivers/media/i2c/ar0521.c b/drivers/media/i2c/ar0521.c
index 24873149096c..939bf590d4b2 100644
--- a/drivers/media/i2c/ar0521.c
+++ b/drivers/media/i2c/ar0521.c
@@ -1077,11 +1077,10 @@ static int ar0521_probe(struct i2c_client *client)
}
/* Get master clock (extclk) */
- sensor->extclk = devm_clk_get(dev, "extclk");
- if (IS_ERR(sensor->extclk)) {
- dev_err(dev, "failed to get extclk\n");
- return PTR_ERR(sensor->extclk);
- }
+ sensor->extclk = devm_v4l2_sensor_clk_get(dev, "extclk");
+ if (IS_ERR(sensor->extclk))
+ return dev_err_probe(dev, PTR_ERR(sensor->extclk),
+ "failed to get extclk\n");
sensor->extclk_freq = clk_get_rate(sensor->extclk);
diff --git a/drivers/media/i2c/ccs/ccs-core.c b/drivers/media/i2c/ccs/ccs-core.c
index 487bcabb4a19..1c889c878abd 100644
--- a/drivers/media/i2c/ccs/ccs-core.c
+++ b/drivers/media/i2c/ccs/ccs-core.c
@@ -787,10 +787,8 @@ static int ccs_set_ctrl(struct v4l2_ctrl *ctrl)
rval = -EINVAL;
}
- if (pm_status > 0) {
- pm_runtime_mark_last_busy(&client->dev);
+ if (pm_status > 0)
pm_runtime_put_autosuspend(&client->dev);
- }
return rval;
}
@@ -1914,7 +1912,6 @@ static int ccs_set_stream(struct v4l2_subdev *subdev, int enable)
if (!enable) {
ccs_stop_streaming(sensor);
sensor->streaming = false;
- pm_runtime_mark_last_busy(&client->dev);
pm_runtime_put_autosuspend(&client->dev);
return 0;
@@ -1929,7 +1926,6 @@ static int ccs_set_stream(struct v4l2_subdev *subdev, int enable)
rval = ccs_start_streaming(sensor);
if (rval < 0) {
sensor->streaming = false;
- pm_runtime_mark_last_busy(&client->dev);
pm_runtime_put_autosuspend(&client->dev);
}
@@ -2677,7 +2673,6 @@ nvm_show(struct device *dev, struct device_attribute *attr, char *buf)
return -ENODEV;
}
- pm_runtime_mark_last_busy(&client->dev);
pm_runtime_put_autosuspend(&client->dev);
/*
diff --git a/drivers/media/i2c/cx25840/cx25840-core.c b/drivers/media/i2c/cx25840/cx25840-core.c
index a90a9e5705a0..a86306304330 100644
--- a/drivers/media/i2c/cx25840/cx25840-core.c
+++ b/drivers/media/i2c/cx25840/cx25840-core.c
@@ -9,10 +9,10 @@
* Changes by Tyler Trafford <tatrafford@comcast.net>
* - cleanup/rewrite for V4L2 API (2005)
*
- * VBI support by Hans Verkuil <hverkuil@xs4all.nl>.
+ * VBI support by Hans Verkuil <hverkuil@kernel.org>.
*
* NTSC sliced VBI support by Christopher Neufeld <television@cneufeld.ca>
- * with additional fixes by Hans Verkuil <hverkuil@xs4all.nl>.
+ * with additional fixes by Hans Verkuil <hverkuil@kernel.org>.
*
* CX23885 support by Steven Toth <stoth@linuxtv.org>.
*
diff --git a/drivers/media/i2c/ds90ub913.c b/drivers/media/i2c/ds90ub913.c
index bc74499b0a96..73150061ea45 100644
--- a/drivers/media/i2c/ds90ub913.c
+++ b/drivers/media/i2c/ds90ub913.c
@@ -235,7 +235,7 @@ static int ub913_gpiochip_probe(struct ub913_data *priv)
gc->ngpio = UB913_NUM_GPIOS;
gc->get_direction = ub913_gpio_get_direction;
gc->direction_output = ub913_gpio_direction_out;
- gc->set_rv = ub913_gpio_set;
+ gc->set = ub913_gpio_set;
gc->of_xlate = ub913_gpio_of_xlate;
gc->of_gpio_n_cells = 2;
@@ -333,8 +333,7 @@ static int _ub913_set_routing(struct v4l2_subdev *sd,
.quantization = V4L2_QUANTIZATION_LIM_RANGE,
.xfer_func = V4L2_XFER_FUNC_SRGB,
};
- struct v4l2_subdev_stream_configs *stream_configs;
- unsigned int i;
+ struct v4l2_subdev_route *route;
int ret;
ret = v4l2_subdev_routing_validate(sd, routing,
@@ -346,13 +345,15 @@ static int _ub913_set_routing(struct v4l2_subdev *sd,
if (ret)
return ret;
- stream_configs = &state->stream_configs;
+ for_each_active_route(&state->routing, route) {
+ struct v4l2_mbus_framefmt *fmt;
- for (i = 0; i < stream_configs->num_configs; i++) {
- if (stream_configs->configs[i].pad == UB913_PAD_SINK)
- stream_configs->configs[i].fmt = in_format;
- else
- stream_configs->configs[i].fmt = out_format;
+ fmt = v4l2_subdev_state_get_format(state, route->sink_pad,
+ route->sink_stream);
+ *fmt = in_format;
+ fmt = v4l2_subdev_state_get_format(state, route->source_pad,
+ route->source_stream);
+ *fmt = out_format;
}
return 0;
diff --git a/drivers/media/i2c/ds90ub953.c b/drivers/media/i2c/ds90ub953.c
index a865bfc89500..e3fc9d66970a 100644
--- a/drivers/media/i2c/ds90ub953.c
+++ b/drivers/media/i2c/ds90ub953.c
@@ -361,7 +361,7 @@ static int ub953_gpiochip_probe(struct ub953_data *priv)
gc->direction_input = ub953_gpio_direction_in;
gc->direction_output = ub953_gpio_direction_out;
gc->get = ub953_gpio_get;
- gc->set_rv = ub953_gpio_set;
+ gc->set = ub953_gpio_set;
gc->of_xlate = ub953_gpio_of_xlate;
gc->of_gpio_n_cells = 2;
diff --git a/drivers/media/i2c/dw9768.c b/drivers/media/i2c/dw9768.c
index 3a4d100b9199..d434721ba8ed 100644
--- a/drivers/media/i2c/dw9768.c
+++ b/drivers/media/i2c/dw9768.c
@@ -374,7 +374,6 @@ static int dw9768_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
static int dw9768_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
- pm_runtime_mark_last_busy(sd->dev);
pm_runtime_put_autosuspend(sd->dev);
return 0;
diff --git a/drivers/media/i2c/et8ek8/et8ek8_driver.c b/drivers/media/i2c/et8ek8/et8ek8_driver.c
index 7519863d77b1..2cb7b718782b 100644
--- a/drivers/media/i2c/et8ek8/et8ek8_driver.c
+++ b/drivers/media/i2c/et8ek8/et8ek8_driver.c
@@ -816,7 +816,6 @@ static int et8ek8_power_on(struct et8ek8_sensor *sensor)
{
struct v4l2_subdev *subdev = &sensor->subdev;
struct i2c_client *client = v4l2_get_subdevdata(subdev);
- unsigned int xclk_freq;
int val, rval;
rval = regulator_enable(sensor->vana);
@@ -825,17 +824,6 @@ static int et8ek8_power_on(struct et8ek8_sensor *sensor)
return rval;
}
- if (sensor->current_reglist)
- xclk_freq = sensor->current_reglist->mode.ext_clock;
- else
- xclk_freq = sensor->xclk_freq;
-
- rval = clk_set_rate(sensor->ext_clk, xclk_freq);
- if (rval < 0) {
- dev_err(&client->dev, "unable to set extclk clock freq to %u\n",
- xclk_freq);
- goto out;
- }
rval = clk_prepare_enable(sensor->ext_clk);
if (rval < 0) {
dev_err(&client->dev, "failed to enable extclk\n");
@@ -849,7 +837,7 @@ static int et8ek8_power_on(struct et8ek8_sensor *sensor)
gpiod_set_value(sensor->reset, 1);
- msleep(5000 * 1000 / xclk_freq + 1); /* Wait 5000 cycles */
+ msleep(5000 * 1000 / sensor->xclk_freq + 1); /* Wait 5000 cycles */
rval = et8ek8_i2c_reglist_find_write(client, &meta_reglist,
ET8EK8_REGLIST_POWERON);
@@ -1085,9 +1073,6 @@ static int et8ek8_set_frame_interval(struct v4l2_subdev *subdev,
if (!reglist)
return -EINVAL;
- if (sensor->current_reglist->mode.ext_clock != reglist->mode.ext_clock)
- return -EINVAL;
-
sensor->current_reglist = reglist;
et8ek8_update_controls(sensor);
@@ -1433,18 +1418,13 @@ static int et8ek8_probe(struct i2c_client *client)
return PTR_ERR(sensor->vana);
}
- sensor->ext_clk = devm_clk_get(dev, NULL);
- if (IS_ERR(sensor->ext_clk)) {
- dev_err(&client->dev, "could not get clock\n");
- return PTR_ERR(sensor->ext_clk);
- }
+ sensor->ext_clk = devm_v4l2_sensor_clk_get_legacy(dev, NULL, true,
+ 9600000);
+ if (IS_ERR(sensor->ext_clk))
+ return dev_err_probe(&client->dev, PTR_ERR(sensor->ext_clk),
+ "could not get clock\n");
- ret = of_property_read_u32(dev->of_node, "clock-frequency",
- &sensor->xclk_freq);
- if (ret) {
- dev_warn(dev, "can't get clock-frequency\n");
- return ret;
- }
+ sensor->xclk_freq = clk_get_rate(sensor->ext_clk);
mutex_init(&sensor->power_lock);
diff --git a/drivers/media/i2c/et8ek8/et8ek8_mode.c b/drivers/media/i2c/et8ek8/et8ek8_mode.c
index c9088eb0a812..914be1007099 100644
--- a/drivers/media/i2c/et8ek8/et8ek8_mode.c
+++ b/drivers/media/i2c/et8ek8/et8ek8_mode.c
@@ -44,7 +44,6 @@ static struct et8ek8_reglist mode1_poweron_mode2_16vga_2592x1968_12_07fps = {
.window_width = 2592,
.window_height = 1968,
.pixel_clock = 80000000,
- .ext_clock = 9600000,
.timeperframe = {
.numerator = 100,
.denominator = 1207
@@ -145,7 +144,6 @@ static struct et8ek8_reglist mode1_16vga_2592x1968_13_12fps_dpcm10_8 = {
.window_width = 2592,
.window_height = 1968,
.pixel_clock = 80000000,
- .ext_clock = 9600000,
.timeperframe = {
.numerator = 100,
.denominator = 1292
@@ -201,7 +199,6 @@ static struct et8ek8_reglist mode3_4vga_1296x984_29_99fps_dpcm10_8 = {
.window_width = 1296,
.window_height = 984,
.pixel_clock = 96533333,
- .ext_clock = 9600000,
.timeperframe = {
.numerator = 100,
.denominator = 3000
@@ -257,7 +254,6 @@ static struct et8ek8_reglist mode4_svga_864x656_29_88fps = {
.window_width = 864,
.window_height = 656,
.pixel_clock = 80000000,
- .ext_clock = 9600000,
.timeperframe = {
.numerator = 100,
.denominator = 2988
@@ -313,7 +309,6 @@ static struct et8ek8_reglist mode5_vga_648x492_29_93fps = {
.window_width = 648,
.window_height = 492,
.pixel_clock = 80000000,
- .ext_clock = 9600000,
.timeperframe = {
.numerator = 100,
.denominator = 2993
@@ -369,7 +364,6 @@ static struct et8ek8_reglist mode2_16vga_2592x1968_3_99fps = {
.window_width = 2592,
.window_height = 1968,
.pixel_clock = 80000000,
- .ext_clock = 9600000,
.timeperframe = {
.numerator = 100,
.denominator = 399
@@ -424,7 +418,6 @@ static struct et8ek8_reglist mode_648x492_5fps = {
.window_width = 648,
.window_height = 492,
.pixel_clock = 13333333,
- .ext_clock = 9600000,
.timeperframe = {
.numerator = 100,
.denominator = 499
@@ -480,7 +473,6 @@ static struct et8ek8_reglist mode3_4vga_1296x984_5fps = {
.window_width = 1296,
.window_height = 984,
.pixel_clock = 49400000,
- .ext_clock = 9600000,
.timeperframe = {
.numerator = 100,
.denominator = 501
@@ -536,7 +528,6 @@ static struct et8ek8_reglist mode_4vga_1296x984_25fps_dpcm10_8 = {
.window_width = 1296,
.window_height = 984,
.pixel_clock = 84266667,
- .ext_clock = 9600000,
.timeperframe = {
.numerator = 100,
.denominator = 2500
diff --git a/drivers/media/i2c/et8ek8/et8ek8_reg.h b/drivers/media/i2c/et8ek8/et8ek8_reg.h
index c90e74935f12..3305986c7c9c 100644
--- a/drivers/media/i2c/et8ek8/et8ek8_reg.h
+++ b/drivers/media/i2c/et8ek8/et8ek8_reg.h
@@ -37,7 +37,6 @@ struct et8ek8_mode {
u16 window_height;
u32 pixel_clock; /* in Hz */
- u32 ext_clock; /* in Hz */
struct v4l2_fract timeperframe;
u32 max_exp; /* Maximum exposure value */
u32 bus_format; /* MEDIA_BUS_FMT_ */
diff --git a/drivers/media/i2c/gc0308.c b/drivers/media/i2c/gc0308.c
index 069f42785b3c..cbcda0e18ff1 100644
--- a/drivers/media/i2c/gc0308.c
+++ b/drivers/media/i2c/gc0308.c
@@ -974,7 +974,6 @@ static int gc0308_s_ctrl(struct v4l2_ctrl *ctrl)
if (ret)
dev_err(gc0308->dev, "failed to set control: %d\n", ret);
- pm_runtime_mark_last_busy(gc0308->dev);
pm_runtime_put_autosuspend(gc0308->dev);
return ret;
@@ -1157,14 +1156,12 @@ static int gc0308_start_stream(struct gc0308 *gc0308)
return 0;
disable_pm:
- pm_runtime_mark_last_busy(gc0308->dev);
pm_runtime_put_autosuspend(gc0308->dev);
return ret;
}
static int gc0308_stop_stream(struct gc0308 *gc0308)
{
- pm_runtime_mark_last_busy(gc0308->dev);
pm_runtime_put_autosuspend(gc0308->dev);
return 0;
}
diff --git a/drivers/staging/media/atomisp/i2c/atomisp-gc0310.c b/drivers/media/i2c/gc0310.c
index 7af4d66f42a0..7af4d66f42a0 100644
--- a/drivers/staging/media/atomisp/i2c/atomisp-gc0310.c
+++ b/drivers/media/i2c/gc0310.c
diff --git a/drivers/media/i2c/gc05a2.c b/drivers/media/i2c/gc05a2.c
index 3f7f3d5abeeb..8ba17f80fffe 100644
--- a/drivers/media/i2c/gc05a2.c
+++ b/drivers/media/i2c/gc05a2.c
@@ -1235,16 +1235,12 @@ static int gc05a2_probe(struct i2c_client *client)
return dev_err_probe(dev, PTR_ERR(gc05a2->regmap),
"failed to init CCI\n");
- gc05a2->xclk = devm_clk_get(dev, NULL);
+ gc05a2->xclk = devm_v4l2_sensor_clk_get_legacy(dev, NULL, true,
+ GC05A2_DEFAULT_CLK_FREQ);
if (IS_ERR(gc05a2->xclk))
return dev_err_probe(dev, PTR_ERR(gc05a2->xclk),
"failed to get xclk\n");
- ret = clk_set_rate(gc05a2->xclk, GC05A2_DEFAULT_CLK_FREQ);
- if (ret)
- return dev_err_probe(dev, ret,
- "failed to set xclk frequency\n");
-
ret = gc05a2_get_regulators(dev, gc05a2);
if (ret < 0)
return dev_err_probe(dev, ret,
diff --git a/drivers/media/i2c/gc08a3.c b/drivers/media/i2c/gc08a3.c
index 938709a677b6..11fd936db9c3 100644
--- a/drivers/media/i2c/gc08a3.c
+++ b/drivers/media/i2c/gc08a3.c
@@ -1199,16 +1199,12 @@ static int gc08a3_probe(struct i2c_client *client)
return dev_err_probe(dev, PTR_ERR(gc08a3->regmap),
"failed to init CCI\n");
- gc08a3->xclk = devm_clk_get(dev, NULL);
+ gc08a3->xclk = devm_v4l2_sensor_clk_get_legacy(dev, NULL, true,
+ GC08A3_DEFAULT_CLK_FREQ);
if (IS_ERR(gc08a3->xclk))
return dev_err_probe(dev, PTR_ERR(gc08a3->xclk),
"failed to get xclk\n");
- ret = clk_set_rate(gc08a3->xclk, GC08A3_DEFAULT_CLK_FREQ);
- if (ret)
- return dev_err_probe(dev, ret,
- "failed to set xclk frequency\n");
-
ret = gc08a3_get_regulators(dev, gc08a3);
if (ret < 0)
return dev_err_probe(dev, ret,
diff --git a/drivers/media/i2c/gc2145.c b/drivers/media/i2c/gc2145.c
index ba02161d46e7..b215963a2648 100644
--- a/drivers/media/i2c/gc2145.c
+++ b/drivers/media/i2c/gc2145.c
@@ -963,7 +963,6 @@ static int gc2145_enable_streams(struct v4l2_subdev *sd,
return 0;
err_rpm_put:
- pm_runtime_mark_last_busy(&client->dev);
pm_runtime_put_autosuspend(&client->dev);
return ret;
}
@@ -985,7 +984,6 @@ static int gc2145_disable_streams(struct v4l2_subdev *sd,
if (ret)
dev_err(&client->dev, "%s failed to write regs\n", __func__);
- pm_runtime_mark_last_busy(&client->dev);
pm_runtime_put_autosuspend(&client->dev);
return ret;
@@ -1193,7 +1191,6 @@ static int gc2145_s_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_mark_last_busy(&client->dev);
pm_runtime_put_autosuspend(&client->dev);
return ret;
@@ -1334,7 +1331,7 @@ static int gc2145_probe(struct i2c_client *client)
return -EINVAL;
/* Get system clock (xclk) */
- gc2145->xclk = devm_clk_get(dev, NULL);
+ gc2145->xclk = devm_v4l2_sensor_clk_get(dev, NULL);
if (IS_ERR(gc2145->xclk))
return dev_err_probe(dev, PTR_ERR(gc2145->xclk),
"failed to get xclk\n");
diff --git a/drivers/media/i2c/hi556.c b/drivers/media/i2c/hi556.c
index 076c19fcf99c..de573cee4451 100644
--- a/drivers/media/i2c/hi556.c
+++ b/drivers/media/i2c/hi556.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Intel Corporation.
-#include <linux/unaligned.h>
#include <linux/acpi.h>
#include <linux/clk.h>
#include <linux/delay.h>
@@ -10,6 +9,8 @@
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
+#include <linux/unaligned.h>
+
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fwnode.h>
@@ -631,6 +632,8 @@ static const char * const hi556_supply_names[] = {
};
struct hi556 {
+ struct device *dev;
+
struct v4l2_subdev sd;
struct media_pad pad;
struct v4l2_ctrl_handler ctrl_handler;
@@ -715,7 +718,6 @@ static int hi556_write_reg(struct hi556 *hi556, u16 reg, u16 len, u32 val)
static int hi556_write_reg_list(struct hi556 *hi556,
const struct hi556_reg_list *r_list)
{
- struct i2c_client *client = v4l2_get_subdevdata(&hi556->sd);
unsigned int i;
int ret;
@@ -724,7 +726,7 @@ static int hi556_write_reg_list(struct hi556 *hi556,
HI556_REG_VALUE_16BIT,
r_list->regs[i].val);
if (ret) {
- dev_err_ratelimited(&client->dev,
+ dev_err_ratelimited(hi556->dev,
"failed to write reg 0x%4.4x. error = %d\n",
r_list->regs[i].address, ret);
return ret;
@@ -785,7 +787,6 @@ static int hi556_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct hi556 *hi556 = container_of(ctrl->handler,
struct hi556, ctrl_handler);
- struct i2c_client *client = v4l2_get_subdevdata(&hi556->sd);
s64 exposure_max;
int ret = 0;
@@ -801,7 +802,7 @@ static int hi556_set_ctrl(struct v4l2_ctrl *ctrl)
}
/* V4L2 controls values will be applied only when power is already up */
- if (!pm_runtime_get_if_in_use(&client->dev))
+ if (!pm_runtime_get_if_in_use(hi556->dev))
return 0;
switch (ctrl->id) {
@@ -835,7 +836,7 @@ static int hi556_set_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_put(&client->dev);
+ pm_runtime_put(hi556->dev);
return ret;
}
@@ -921,7 +922,6 @@ static void hi556_assign_pad_format(const struct hi556_mode *mode,
static int hi556_identify_module(struct hi556 *hi556)
{
- struct i2c_client *client = v4l2_get_subdevdata(&hi556->sd);
int ret;
u32 val;
@@ -934,7 +934,7 @@ static int hi556_identify_module(struct hi556 *hi556)
return ret;
if (val != HI556_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x!=%x\n",
+ dev_err(hi556->dev, "chip id mismatch: %x!=%x\n",
HI556_CHIP_ID, val);
return -ENXIO;
}
@@ -998,7 +998,6 @@ static int hi556_get_selection(struct v4l2_subdev *sd,
static int hi556_start_streaming(struct hi556 *hi556)
{
- struct i2c_client *client = v4l2_get_subdevdata(&hi556->sd);
const struct hi556_reg_list *reg_list;
int link_freq_index, ret;
@@ -1010,14 +1009,14 @@ static int hi556_start_streaming(struct hi556 *hi556)
reg_list = &link_freq_configs[link_freq_index].reg_list;
ret = hi556_write_reg_list(hi556, reg_list);
if (ret) {
- dev_err(&client->dev, "failed to set plls\n");
+ dev_err(hi556->dev, "failed to set plls\n");
return ret;
}
reg_list = &hi556->cur_mode->reg_list;
ret = hi556_write_reg_list(hi556, reg_list);
if (ret) {
- dev_err(&client->dev, "failed to set mode\n");
+ dev_err(hi556->dev, "failed to set mode\n");
return ret;
}
@@ -1029,7 +1028,7 @@ static int hi556_start_streaming(struct hi556 *hi556)
HI556_REG_VALUE_16BIT, HI556_MODE_STREAMING);
if (ret) {
- dev_err(&client->dev, "failed to set stream\n");
+ dev_err(hi556->dev, "failed to set stream\n");
return ret;
}
@@ -1038,22 +1037,19 @@ static int hi556_start_streaming(struct hi556 *hi556)
static void hi556_stop_streaming(struct hi556 *hi556)
{
- struct i2c_client *client = v4l2_get_subdevdata(&hi556->sd);
-
if (hi556_write_reg(hi556, HI556_REG_MODE_SELECT,
HI556_REG_VALUE_16BIT, HI556_MODE_STANDBY))
- dev_err(&client->dev, "failed to set stream\n");
+ dev_err(hi556->dev, "failed to set stream\n");
}
static int hi556_set_stream(struct v4l2_subdev *sd, int enable)
{
struct hi556 *hi556 = to_hi556(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret = 0;
mutex_lock(&hi556->mutex);
if (enable) {
- ret = pm_runtime_resume_and_get(&client->dev);
+ ret = pm_runtime_resume_and_get(hi556->dev);
if (ret < 0) {
mutex_unlock(&hi556->mutex);
return ret;
@@ -1062,11 +1058,11 @@ static int hi556_set_stream(struct v4l2_subdev *sd, int enable)
ret = hi556_start_streaming(hi556);
if (ret) {
hi556_stop_streaming(hi556);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(hi556->dev);
}
} else {
hi556_stop_streaming(hi556);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(hi556->dev);
}
mutex_unlock(&hi556->mutex);
@@ -1217,7 +1213,6 @@ static int hi556_check_hwcfg(struct device *dev)
struct v4l2_fwnode_endpoint bus_cfg = {
.bus_type = V4L2_MBUS_CSI2_DPHY
};
- u32 mclk;
int ret = 0;
unsigned int i, j;
@@ -1235,18 +1230,6 @@ static int hi556_check_hwcfg(struct device *dev)
if (ret)
return dev_err_probe(dev, ret, "parsing endpoint failed\n");
- ret = fwnode_property_read_u32(fwnode, "clock-frequency", &mclk);
- if (ret) {
- dev_err(dev, "can't get clock frequency\n");
- goto check_hwcfg_error;
- }
-
- if (mclk != HI556_MCLK) {
- dev_err(dev, "external clock %d is not supported\n", mclk);
- ret = -EINVAL;
- goto check_hwcfg_error;
- }
-
if (bus_cfg.bus.mipi_csi2.num_data_lanes != 2) {
dev_err(dev, "number of CSI2 data lanes %d is not supported\n",
bus_cfg.bus.mipi_csi2.num_data_lanes);
@@ -1289,7 +1272,7 @@ static void hi556_remove(struct i2c_client *client)
v4l2_async_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
v4l2_ctrl_handler_free(sd->ctrl_handler);
- pm_runtime_disable(&client->dev);
+ pm_runtime_disable(hi556->dev);
mutex_destroy(&hi556->mutex);
}
@@ -1336,6 +1319,7 @@ static int hi556_resume(struct device *dev)
static int hi556_probe(struct i2c_client *client)
{
struct hi556 *hi556;
+ unsigned long freq;
bool full_power;
int i, ret;
@@ -1347,40 +1331,48 @@ static int hi556_probe(struct i2c_client *client)
if (!hi556)
return -ENOMEM;
+ hi556->dev = &client->dev;
+
v4l2_i2c_subdev_init(&hi556->sd, client, &hi556_subdev_ops);
- hi556->reset_gpio = devm_gpiod_get_optional(&client->dev, "reset",
+ hi556->reset_gpio = devm_gpiod_get_optional(hi556->dev, "reset",
GPIOD_OUT_HIGH);
if (IS_ERR(hi556->reset_gpio))
- return dev_err_probe(&client->dev, PTR_ERR(hi556->reset_gpio),
+ return dev_err_probe(hi556->dev, PTR_ERR(hi556->reset_gpio),
"failed to get reset GPIO\n");
- hi556->clk = devm_clk_get_optional(&client->dev, "clk");
+ hi556->clk = devm_v4l2_sensor_clk_get(hi556->dev, "clk");
if (IS_ERR(hi556->clk))
- return dev_err_probe(&client->dev, PTR_ERR(hi556->clk),
+ return dev_err_probe(hi556->dev, PTR_ERR(hi556->clk),
"failed to get clock\n");
+ freq = clk_get_rate(hi556->clk);
+ if (freq != HI556_MCLK)
+ return dev_err_probe(hi556->dev, -EINVAL,
+ "external clock %lu is not supported\n",
+ freq);
+
for (i = 0; i < ARRAY_SIZE(hi556_supply_names); i++)
hi556->supplies[i].supply = hi556_supply_names[i];
- ret = devm_regulator_bulk_get(&client->dev,
+ ret = devm_regulator_bulk_get(hi556->dev,
ARRAY_SIZE(hi556_supply_names),
hi556->supplies);
if (ret)
- return dev_err_probe(&client->dev, ret,
+ return dev_err_probe(hi556->dev, ret,
"failed to get regulators\n");
- full_power = acpi_dev_state_d0(&client->dev);
+ full_power = acpi_dev_state_d0(hi556->dev);
if (full_power) {
/* Ensure non ACPI managed resources are enabled */
- ret = hi556_resume(&client->dev);
+ ret = hi556_resume(hi556->dev);
if (ret)
- return dev_err_probe(&client->dev, ret,
+ return dev_err_probe(hi556->dev, ret,
"failed to power on sensor\n");
ret = hi556_identify_module(hi556);
if (ret) {
- dev_err(&client->dev, "failed to find sensor: %d\n", ret);
+ dev_err(hi556->dev, "failed to find sensor: %d\n", ret);
goto probe_error_power_off;
}
}
@@ -1389,7 +1381,7 @@ static int hi556_probe(struct i2c_client *client)
hi556->cur_mode = &supported_modes[0];
ret = hi556_init_controls(hi556);
if (ret) {
- dev_err(&client->dev, "failed to init controls: %d\n", ret);
+ dev_err(hi556->dev, "failed to init controls: %d\n", ret);
goto probe_error_v4l2_ctrl_handler_free;
}
@@ -1400,22 +1392,22 @@ static int hi556_probe(struct i2c_client *client)
hi556->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&hi556->sd.entity, 1, &hi556->pad);
if (ret) {
- dev_err(&client->dev, "failed to init entity pads: %d\n", ret);
+ dev_err(hi556->dev, "failed to init entity pads: %d\n", ret);
goto probe_error_v4l2_ctrl_handler_free;
}
ret = v4l2_async_register_subdev_sensor(&hi556->sd);
if (ret < 0) {
- dev_err(&client->dev, "failed to register V4L2 subdev: %d\n",
+ dev_err(hi556->dev, "failed to register V4L2 subdev: %d\n",
ret);
goto probe_error_media_entity_cleanup;
}
/* Set the device's state to active if it's in D0 state. */
if (full_power)
- pm_runtime_set_active(&client->dev);
- pm_runtime_enable(&client->dev);
- pm_runtime_idle(&client->dev);
+ pm_runtime_set_active(hi556->dev);
+ pm_runtime_enable(hi556->dev);
+ pm_runtime_idle(hi556->dev);
return 0;
@@ -1428,7 +1420,7 @@ probe_error_v4l2_ctrl_handler_free:
probe_error_power_off:
if (full_power)
- hi556_suspend(&client->dev);
+ hi556_suspend(hi556->dev);
return ret;
}
diff --git a/drivers/media/i2c/hi846.c b/drivers/media/i2c/hi846.c
index 172772decd3d..a3f77b8434ca 100644
--- a/drivers/media/i2c/hi846.c
+++ b/drivers/media/i2c/hi846.c
@@ -2052,12 +2052,11 @@ static int hi846_probe(struct i2c_client *client)
return ret;
}
- hi846->clock = devm_clk_get(&client->dev, NULL);
- if (IS_ERR(hi846->clock)) {
- dev_err(&client->dev, "failed to get clock: %pe\n",
- hi846->clock);
- return PTR_ERR(hi846->clock);
- }
+ hi846->clock = devm_v4l2_sensor_clk_get(&client->dev, NULL);
+ if (IS_ERR(hi846->clock))
+ return dev_err_probe(&client->dev, PTR_ERR(hi846->clock),
+ "failed to get clock: %pe\n",
+ hi846->clock);
mclk_freq = clk_get_rate(hi846->clock);
if (mclk_freq != 25000000)
diff --git a/drivers/media/i2c/hi847.c b/drivers/media/i2c/hi847.c
index 546833f5b5f5..def01aa07b2f 100644
--- a/drivers/media/i2c/hi847.c
+++ b/drivers/media/i2c/hi847.c
@@ -1,12 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2022 Intel Corporation.
-#include <linux/unaligned.h>
#include <linux/acpi.h>
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
+#include <linux/unaligned.h>
+
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fwnode.h>
@@ -2166,6 +2168,9 @@ static const struct hi847_mode supported_modes[] = {
};
struct hi847 {
+ struct device *dev;
+ struct clk *clk;
+
struct v4l2_subdev sd;
struct media_pad pad;
struct v4l2_ctrl_handler ctrl_handler;
@@ -2244,7 +2249,6 @@ static int hi847_write_reg(struct hi847 *hi847, u16 reg, u16 len, u32 val)
static int hi847_write_reg_list(struct hi847 *hi847,
const struct hi847_reg_list *r_list)
{
- struct i2c_client *client = v4l2_get_subdevdata(&hi847->sd);
unsigned int i;
int ret;
@@ -2253,7 +2257,7 @@ static int hi847_write_reg_list(struct hi847 *hi847,
HI847_REG_VALUE_16BIT,
r_list->regs[i].val);
if (ret) {
- dev_err_ratelimited(&client->dev,
+ dev_err_ratelimited(hi847->dev,
"failed to write reg 0x%4.4x. error = %d",
r_list->regs[i].address, ret);
return ret;
@@ -2408,7 +2412,6 @@ static int hi847_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct hi847 *hi847 = container_of(ctrl->handler,
struct hi847, ctrl_handler);
- struct i2c_client *client = v4l2_get_subdevdata(&hi847->sd);
s64 exposure_max;
int ret = 0;
@@ -2424,7 +2427,7 @@ static int hi847_set_ctrl(struct v4l2_ctrl *ctrl)
}
/* V4L2 controls values will be applied only when power is already up */
- if (!pm_runtime_get_if_in_use(&client->dev))
+ if (!pm_runtime_get_if_in_use(hi847->dev))
return 0;
switch (ctrl->id) {
@@ -2466,7 +2469,7 @@ static int hi847_set_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_put(&client->dev);
+ pm_runtime_put(hi847->dev);
return ret;
}
@@ -2557,7 +2560,6 @@ static void hi847_assign_pad_format(const struct hi847_mode *mode,
static int hi847_start_streaming(struct hi847 *hi847)
{
- struct i2c_client *client = v4l2_get_subdevdata(&hi847->sd);
const struct hi847_reg_list *reg_list;
int link_freq_index, ret;
@@ -2565,14 +2567,14 @@ static int hi847_start_streaming(struct hi847 *hi847)
reg_list = &link_freq_configs[link_freq_index].reg_list;
ret = hi847_write_reg_list(hi847, reg_list);
if (ret) {
- dev_err(&client->dev, "failed to set plls");
+ dev_err(hi847->dev, "failed to set plls");
return ret;
}
reg_list = &hi847->cur_mode->reg_list;
ret = hi847_write_reg_list(hi847, reg_list);
if (ret) {
- dev_err(&client->dev, "failed to set mode");
+ dev_err(hi847->dev, "failed to set mode");
return ret;
}
@@ -2587,7 +2589,7 @@ static int hi847_start_streaming(struct hi847 *hi847)
HI847_REG_VALUE_16BIT, HI847_MODE_STREAMING);
if (ret) {
- dev_err(&client->dev, "failed to set stream");
+ dev_err(hi847->dev, "failed to set stream");
return ret;
}
@@ -2596,28 +2598,25 @@ static int hi847_start_streaming(struct hi847 *hi847)
static void hi847_stop_streaming(struct hi847 *hi847)
{
- struct i2c_client *client = v4l2_get_subdevdata(&hi847->sd);
-
if (hi847_write_reg(hi847, HI847_REG_MODE_TG,
HI847_REG_VALUE_16BIT, HI847_REG_MODE_TG_DISABLE))
- dev_err(&client->dev, "failed to set stream 0x%x",
+ dev_err(hi847->dev, "failed to set stream 0x%x",
HI847_REG_MODE_TG);
if (hi847_write_reg(hi847, HI847_REG_MODE_SELECT,
HI847_REG_VALUE_16BIT, HI847_MODE_STANDBY))
- dev_err(&client->dev, "failed to set stream 0x%x",
+ dev_err(hi847->dev, "failed to set stream 0x%x",
HI847_REG_MODE_SELECT);
}
static int hi847_set_stream(struct v4l2_subdev *sd, int enable)
{
struct hi847 *hi847 = to_hi847(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret = 0;
mutex_lock(&hi847->mutex);
if (enable) {
- ret = pm_runtime_resume_and_get(&client->dev);
+ ret = pm_runtime_resume_and_get(hi847->dev);
if (ret) {
mutex_unlock(&hi847->mutex);
return ret;
@@ -2627,11 +2626,11 @@ static int hi847_set_stream(struct v4l2_subdev *sd, int enable)
if (ret) {
enable = 0;
hi847_stop_streaming(hi847);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(hi847->dev);
}
} else {
hi847_stop_streaming(hi847);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(hi847->dev);
}
mutex_unlock(&hi847->mutex);
@@ -2768,7 +2767,6 @@ static const struct v4l2_subdev_internal_ops hi847_internal_ops = {
static int hi847_identify_module(struct hi847 *hi847)
{
- struct i2c_client *client = v4l2_get_subdevdata(&hi847->sd);
int ret;
u32 val;
@@ -2778,7 +2776,7 @@ static int hi847_identify_module(struct hi847 *hi847)
return ret;
if (val != HI847_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x!=%x",
+ dev_err(hi847->dev, "chip id mismatch: %x!=%x",
HI847_CHIP_ID, val);
return -ENXIO;
}
@@ -2793,24 +2791,12 @@ static int hi847_check_hwcfg(struct device *dev)
struct v4l2_fwnode_endpoint bus_cfg = {
.bus_type = V4L2_MBUS_CSI2_DPHY
};
- u32 mclk;
int ret;
unsigned int i, j;
if (!fwnode)
return -ENXIO;
- ret = fwnode_property_read_u32(fwnode, "clock-frequency", &mclk);
- if (ret) {
- dev_err(dev, "can't get clock frequency");
- return ret;
- }
-
- if (mclk != HI847_MCLK) {
- dev_err(dev, "external clock %d is not supported", mclk);
- return -EINVAL;
- }
-
ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
if (!ep)
return -ENXIO;
@@ -2862,22 +2848,36 @@ static void hi847_remove(struct i2c_client *client)
v4l2_async_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
v4l2_ctrl_handler_free(sd->ctrl_handler);
- pm_runtime_disable(&client->dev);
+ pm_runtime_disable(hi847->dev);
mutex_destroy(&hi847->mutex);
}
static int hi847_probe(struct i2c_client *client)
{
struct hi847 *hi847;
+ unsigned long freq;
int ret;
hi847 = devm_kzalloc(&client->dev, sizeof(*hi847), GFP_KERNEL);
if (!hi847)
return -ENOMEM;
- ret = hi847_check_hwcfg(&client->dev);
+ hi847->dev = &client->dev;
+
+ hi847->clk = devm_v4l2_sensor_clk_get(hi847->dev, NULL);
+ if (IS_ERR(hi847->clk))
+ return dev_err_probe(hi847->dev, PTR_ERR(hi847->clk),
+ "failed to get clock\n");
+
+ freq = clk_get_rate(hi847->clk);
+ if (freq != HI847_MCLK)
+ return dev_err_probe(hi847->dev, -EINVAL,
+ "external clock %lu is not supported\n",
+ freq);
+
+ ret = hi847_check_hwcfg(hi847->dev);
if (ret) {
- dev_err(&client->dev, "failed to get HW configuration: %d",
+ dev_err(hi847->dev, "failed to get HW configuration: %d",
ret);
return ret;
}
@@ -2885,7 +2885,7 @@ static int hi847_probe(struct i2c_client *client)
v4l2_i2c_subdev_init(&hi847->sd, client, &hi847_subdev_ops);
ret = hi847_identify_module(hi847);
if (ret) {
- dev_err(&client->dev, "failed to find sensor: %d", ret);
+ dev_err(hi847->dev, "failed to find sensor: %d", ret);
return ret;
}
@@ -2893,7 +2893,7 @@ static int hi847_probe(struct i2c_client *client)
hi847->cur_mode = &supported_modes[0];
ret = hi847_init_controls(hi847);
if (ret) {
- dev_err(&client->dev, "failed to init controls: %d", ret);
+ dev_err(hi847->dev, "failed to init controls: %d", ret);
goto probe_error_v4l2_ctrl_handler_free;
}
@@ -2904,20 +2904,20 @@ static int hi847_probe(struct i2c_client *client)
hi847->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&hi847->sd.entity, 1, &hi847->pad);
if (ret) {
- dev_err(&client->dev, "failed to init entity pads: %d", ret);
+ dev_err(hi847->dev, "failed to init entity pads: %d", ret);
goto probe_error_v4l2_ctrl_handler_free;
}
ret = v4l2_async_register_subdev_sensor(&hi847->sd);
if (ret < 0) {
- dev_err(&client->dev, "failed to register V4L2 subdev: %d",
+ dev_err(hi847->dev, "failed to register V4L2 subdev: %d",
ret);
goto probe_error_media_entity_cleanup;
}
- pm_runtime_set_active(&client->dev);
- pm_runtime_enable(&client->dev);
- pm_runtime_idle(&client->dev);
+ pm_runtime_set_active(hi847->dev);
+ pm_runtime_enable(hi847->dev);
+ pm_runtime_idle(hi847->dev);
return 0;
diff --git a/drivers/media/i2c/imx208.c b/drivers/media/i2c/imx208.c
index 2b5a6ce7b1ae..d5350bb46f14 100644
--- a/drivers/media/i2c/imx208.c
+++ b/drivers/media/i2c/imx208.c
@@ -2,13 +2,15 @@
// Copyright (C) 2021 Intel Corporation
#include <linux/acpi.h>
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
+#include <linux/unaligned.h>
+
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
-#include <linux/unaligned.h>
#define IMX208_REG_MODE_SELECT 0x0100
#define IMX208_MODE_STANDBY 0x00
@@ -268,6 +270,9 @@ static const struct imx208_mode supported_modes[] = {
};
struct imx208 {
+ struct device *dev;
+ struct clk *clk;
+
struct v4l2_subdev sd;
struct media_pad pad;
@@ -372,7 +377,6 @@ static int imx208_write_reg(struct imx208 *imx208, u16 reg, u32 len, u32 val)
static int imx208_write_regs(struct imx208 *imx208,
const struct imx208_reg *regs, u32 len)
{
- struct i2c_client *client = v4l2_get_subdevdata(&imx208->sd);
unsigned int i;
int ret;
@@ -380,7 +384,7 @@ static int imx208_write_regs(struct imx208 *imx208,
ret = imx208_write_reg(imx208, regs[i].address, 1,
regs[i].val);
if (ret) {
- dev_err_ratelimited(&client->dev,
+ dev_err_ratelimited(imx208->dev,
"Failed to write reg 0x%4.4x. error = %d\n",
regs[i].address, ret);
@@ -431,14 +435,13 @@ static int imx208_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct imx208 *imx208 =
container_of(ctrl->handler, struct imx208, ctrl_handler);
- struct i2c_client *client = v4l2_get_subdevdata(&imx208->sd);
int ret;
/*
* Applying V4L2 control value only happens
* when power is up for streaming
*/
- if (!pm_runtime_get_if_in_use(&client->dev))
+ if (!pm_runtime_get_if_in_use(imx208->dev))
return 0;
switch (ctrl->id) {
@@ -471,13 +474,13 @@ static int imx208_set_ctrl(struct v4l2_ctrl *ctrl)
break;
default:
ret = -EINVAL;
- dev_err(&client->dev,
+ dev_err(imx208->dev,
"ctrl(id:0x%x,val:0x%x) is not handled\n",
ctrl->id, ctrl->val);
break;
}
- pm_runtime_put(&client->dev);
+ pm_runtime_put(imx208->dev);
return ret;
}
@@ -620,7 +623,6 @@ static int imx208_set_pad_format(struct v4l2_subdev *sd,
static int imx208_identify_module(struct imx208 *imx208)
{
- struct i2c_client *client = v4l2_get_subdevdata(&imx208->sd);
int ret;
u32 val;
@@ -630,13 +632,13 @@ static int imx208_identify_module(struct imx208 *imx208)
ret = imx208_read_reg(imx208, IMX208_REG_CHIP_ID,
2, &val);
if (ret) {
- dev_err(&client->dev, "failed to read chip id %x\n",
+ dev_err(imx208->dev, "failed to read chip id %x\n",
IMX208_CHIP_ID);
return ret;
}
if (val != IMX208_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x!=%x\n",
+ dev_err(imx208->dev, "chip id mismatch: %x!=%x\n",
IMX208_CHIP_ID, val);
return -EIO;
}
@@ -649,7 +651,6 @@ static int imx208_identify_module(struct imx208 *imx208)
/* Start streaming */
static int imx208_start_streaming(struct imx208 *imx208)
{
- struct i2c_client *client = v4l2_get_subdevdata(&imx208->sd);
const struct imx208_reg_list *reg_list;
int ret, link_freq_index;
@@ -662,7 +663,7 @@ static int imx208_start_streaming(struct imx208 *imx208)
reg_list = &link_freq_configs[link_freq_index].reg_list;
ret = imx208_write_regs(imx208, reg_list->regs, reg_list->num_of_regs);
if (ret) {
- dev_err(&client->dev, "%s failed to set plls\n", __func__);
+ dev_err(imx208->dev, "%s failed to set plls\n", __func__);
return ret;
}
@@ -670,7 +671,7 @@ static int imx208_start_streaming(struct imx208 *imx208)
reg_list = &imx208->cur_mode->reg_list;
ret = imx208_write_regs(imx208, reg_list->regs, reg_list->num_of_regs);
if (ret) {
- dev_err(&client->dev, "%s failed to set mode\n", __func__);
+ dev_err(imx208->dev, "%s failed to set mode\n", __func__);
return ret;
}
@@ -687,14 +688,13 @@ static int imx208_start_streaming(struct imx208 *imx208)
/* Stop streaming */
static int imx208_stop_streaming(struct imx208 *imx208)
{
- struct i2c_client *client = v4l2_get_subdevdata(&imx208->sd);
int ret;
/* set stream off register */
ret = imx208_write_reg(imx208, IMX208_REG_MODE_SELECT,
1, IMX208_MODE_STANDBY);
if (ret)
- dev_err(&client->dev, "%s failed to set stream\n", __func__);
+ dev_err(imx208->dev, "%s failed to set stream\n", __func__);
/*
* Return success even if it was an error, as there is nothing the
@@ -706,13 +706,12 @@ static int imx208_stop_streaming(struct imx208 *imx208)
static int imx208_set_stream(struct v4l2_subdev *sd, int enable)
{
struct imx208 *imx208 = to_imx208(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret = 0;
mutex_lock(&imx208->imx208_mx);
if (enable) {
- ret = pm_runtime_resume_and_get(&client->dev);
+ ret = pm_runtime_resume_and_get(imx208->dev);
if (ret) {
mutex_unlock(&imx208->imx208_mx);
return ret;
@@ -727,7 +726,7 @@ static int imx208_set_stream(struct v4l2_subdev *sd, int enable)
goto err_rpm_put;
} else {
imx208_stop_streaming(imx208);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(imx208->dev);
}
mutex_unlock(&imx208->imx208_mx);
@@ -739,7 +738,7 @@ static int imx208_set_stream(struct v4l2_subdev *sd, int enable)
return ret;
err_rpm_put:
- pm_runtime_put(&client->dev);
+ pm_runtime_put(imx208->dev);
mutex_unlock(&imx208->imx208_mx);
return ret;
@@ -778,7 +777,7 @@ static int imx208_read_otp(struct imx208 *imx208)
if (imx208->otp_read)
goto out_unlock;
- ret = pm_runtime_resume_and_get(&client->dev);
+ ret = pm_runtime_resume_and_get(imx208->dev);
if (ret)
goto out_unlock;
@@ -805,7 +804,7 @@ static int imx208_read_otp(struct imx208 *imx208)
}
out_pm_put:
- pm_runtime_put(&client->dev);
+ pm_runtime_put(imx208->dev);
out_unlock:
mutex_unlock(&imx208->imx208_mx);
@@ -835,7 +834,6 @@ static const BIN_ATTR_RO(otp, IMX208_OTP_SIZE);
/* Initialize control handlers */
static int imx208_init_controls(struct imx208 *imx208)
{
- struct i2c_client *client = v4l2_get_subdevdata(&imx208->sd);
struct v4l2_ctrl_handler *ctrl_hdlr = &imx208->ctrl_handler;
s64 exposure_max;
s64 vblank_def;
@@ -914,7 +912,7 @@ static int imx208_init_controls(struct imx208 *imx208)
if (ctrl_hdlr->error) {
ret = ctrl_hdlr->error;
- dev_err(&client->dev, "%s control init failed (%d)\n",
+ dev_err(imx208->dev, "%s control init failed (%d)\n",
__func__, ret);
goto error;
}
@@ -938,31 +936,36 @@ static void imx208_free_controls(struct imx208 *imx208)
static int imx208_probe(struct i2c_client *client)
{
struct imx208 *imx208;
+ unsigned long freq;
int ret;
bool full_power;
- u32 val = 0;
-
- device_property_read_u32(&client->dev, "clock-frequency", &val);
- if (val != 19200000) {
- dev_err(&client->dev,
- "Unsupported clock-frequency %u. Expected 19200000.\n",
- val);
- return -EINVAL;
- }
imx208 = devm_kzalloc(&client->dev, sizeof(*imx208), GFP_KERNEL);
if (!imx208)
return -ENOMEM;
+ imx208->dev = &client->dev;
+
+ imx208->clk = devm_v4l2_sensor_clk_get(imx208->dev, NULL);
+ if (IS_ERR(imx208->clk))
+ return dev_err_probe(imx208->dev, PTR_ERR(imx208->clk),
+ "failed to get clock\n");
+
+ freq = clk_get_rate(imx208->clk);
+ if (freq != 19200000)
+ return dev_err_probe(imx208->dev, -EINVAL,
+ "external clock %lu is not supported\n",
+ freq);
+
/* Initialize subdev */
v4l2_i2c_subdev_init(&imx208->sd, client, &imx208_subdev_ops);
- full_power = acpi_dev_state_d0(&client->dev);
+ full_power = acpi_dev_state_d0(imx208->dev);
if (full_power) {
/* Check module identity */
ret = imx208_identify_module(imx208);
if (ret) {
- dev_err(&client->dev, "failed to find sensor: %d", ret);
+ dev_err(imx208->dev, "failed to find sensor: %d", ret);
goto error_probe;
}
}
@@ -972,7 +975,7 @@ static int imx208_probe(struct i2c_client *client)
ret = imx208_init_controls(imx208);
if (ret) {
- dev_err(&client->dev, "failed to init controls: %d", ret);
+ dev_err(imx208->dev, "failed to init controls: %d", ret);
goto error_probe;
}
@@ -985,7 +988,7 @@ static int imx208_probe(struct i2c_client *client)
imx208->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&imx208->sd.entity, 1, &imx208->pad);
if (ret) {
- dev_err(&client->dev, "%s failed:%d\n", __func__, ret);
+ dev_err(imx208->dev, "%s failed:%d\n", __func__, ret);
goto error_handler_free;
}
@@ -993,17 +996,17 @@ static int imx208_probe(struct i2c_client *client)
if (ret < 0)
goto error_media_entity;
- ret = device_create_bin_file(&client->dev, &bin_attr_otp);
+ ret = device_create_bin_file(imx208->dev, &bin_attr_otp);
if (ret) {
- dev_err(&client->dev, "sysfs otp creation failed\n");
+ dev_err(imx208->dev, "sysfs otp creation failed\n");
goto error_async_subdev;
}
/* Set the device's state to active if it's in D0 state. */
if (full_power)
- pm_runtime_set_active(&client->dev);
- pm_runtime_enable(&client->dev);
- pm_runtime_idle(&client->dev);
+ pm_runtime_set_active(imx208->dev);
+ pm_runtime_enable(imx208->dev);
+ pm_runtime_idle(imx208->dev);
return 0;
@@ -1027,13 +1030,13 @@ static void imx208_remove(struct i2c_client *client)
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct imx208 *imx208 = to_imx208(sd);
- device_remove_bin_file(&client->dev, &bin_attr_otp);
+ device_remove_bin_file(imx208->dev, &bin_attr_otp);
v4l2_async_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
imx208_free_controls(imx208);
- pm_runtime_disable(&client->dev);
- pm_runtime_set_suspended(&client->dev);
+ pm_runtime_disable(imx208->dev);
+ pm_runtime_set_suspended(imx208->dev);
mutex_destroy(&imx208->imx208_mx);
}
diff --git a/drivers/media/i2c/imx214.c b/drivers/media/i2c/imx214.c
index a0cef9e61b41..94ebe625c9e6 100644
--- a/drivers/media/i2c/imx214.c
+++ b/drivers/media/i2c/imx214.c
@@ -881,6 +881,109 @@ static const struct v4l2_ctrl_ops imx214_ctrl_ops = {
.s_ctrl = imx214_set_ctrl,
};
+static int imx214_pll_calculate(struct imx214 *imx214, struct ccs_pll *pll,
+ unsigned int link_freq)
+{
+ struct ccs_pll_limits limits = {
+ .min_ext_clk_freq_hz = 6000000,
+ .max_ext_clk_freq_hz = 27000000,
+
+ .vt_fr = {
+ .min_pre_pll_clk_div = 1,
+ .max_pre_pll_clk_div = 15,
+ /* Value is educated guess as we don't have a spec */
+ .min_pll_ip_clk_freq_hz = 6000000,
+ /* Value is educated guess as we don't have a spec */
+ .max_pll_ip_clk_freq_hz = 12000000,
+ .min_pll_multiplier = 12,
+ .max_pll_multiplier = 1200,
+ .min_pll_op_clk_freq_hz = 338000000,
+ .max_pll_op_clk_freq_hz = 1200000000,
+ },
+ .vt_bk = {
+ .min_sys_clk_div = 2,
+ .max_sys_clk_div = 4,
+ .min_pix_clk_div = 5,
+ .max_pix_clk_div = 10,
+ .min_pix_clk_freq_hz = 30000000,
+ .max_pix_clk_freq_hz = 120000000,
+ },
+ .op_bk = {
+ .min_sys_clk_div = 1,
+ .max_sys_clk_div = 2,
+ .min_pix_clk_div = 6,
+ .max_pix_clk_div = 10,
+ .min_pix_clk_freq_hz = 30000000,
+ .max_pix_clk_freq_hz = 120000000,
+ },
+
+ .min_line_length_pck_bin = IMX214_PPL_DEFAULT,
+ .min_line_length_pck = IMX214_PPL_DEFAULT,
+ };
+ unsigned int num_lanes = imx214->bus_cfg.bus.mipi_csi2.num_data_lanes;
+
+ /*
+ * There are no documented constraints on the sys clock frequency, for
+ * either branch. Recover them based on the PLL output clock frequency
+ * and sys_clk_div limits on one hand, and the pix clock frequency and
+ * the pix_clk_div limits on the other hand.
+ */
+ limits.vt_bk.min_sys_clk_freq_hz =
+ max(limits.vt_fr.min_pll_op_clk_freq_hz / limits.vt_bk.max_sys_clk_div,
+ limits.vt_bk.min_pix_clk_freq_hz * limits.vt_bk.min_pix_clk_div);
+ limits.vt_bk.max_sys_clk_freq_hz =
+ min(limits.vt_fr.max_pll_op_clk_freq_hz / limits.vt_bk.min_sys_clk_div,
+ limits.vt_bk.max_pix_clk_freq_hz * limits.vt_bk.max_pix_clk_div);
+
+ limits.op_bk.min_sys_clk_freq_hz =
+ max(limits.vt_fr.min_pll_op_clk_freq_hz / limits.op_bk.max_sys_clk_div,
+ limits.op_bk.min_pix_clk_freq_hz * limits.op_bk.min_pix_clk_div);
+ limits.op_bk.max_sys_clk_freq_hz =
+ min(limits.vt_fr.max_pll_op_clk_freq_hz / limits.op_bk.min_sys_clk_div,
+ limits.op_bk.max_pix_clk_freq_hz * limits.op_bk.max_pix_clk_div);
+
+ memset(pll, 0, sizeof(*pll));
+
+ pll->bus_type = CCS_PLL_BUS_TYPE_CSI2_DPHY;
+ pll->op_lanes = num_lanes;
+ pll->vt_lanes = num_lanes;
+ pll->csi2.lanes = num_lanes;
+
+ pll->binning_horizontal = 1;
+ pll->binning_vertical = 1;
+ pll->scale_m = 1;
+ pll->scale_n = 1;
+ pll->bits_per_pixel =
+ IMX214_CSI_DATA_FORMAT_RAW10 & IMX214_BITS_PER_PIXEL_MASK;
+ pll->flags = CCS_PLL_FLAG_LANE_SPEED_MODEL;
+ pll->link_freq = link_freq;
+ pll->ext_clk_freq_hz = clk_get_rate(imx214->xclk);
+
+ return ccs_pll_calculate(imx214->dev, &limits, pll);
+}
+
+static int imx214_pll_update(struct imx214 *imx214)
+{
+ u64 link_freq;
+ int ret;
+
+ link_freq = imx214->bus_cfg.link_frequencies[imx214->link_freq->val];
+ ret = imx214_pll_calculate(imx214, &imx214->pll, link_freq);
+ if (ret) {
+ dev_err(imx214->dev, "PLL calculations failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = v4l2_ctrl_s_ctrl_int64(imx214->pixel_rate,
+ imx214->pll.pixel_rate_pixel_array);
+ if (ret) {
+ dev_err(imx214->dev, "failed to set pixel rate\n");
+ return ret;
+ }
+
+ return 0;
+}
+
static int imx214_ctrls_init(struct imx214 *imx214)
{
static const struct v4l2_area unit_size = {
@@ -1003,6 +1106,13 @@ static int imx214_ctrls_init(struct imx214 *imx214)
return ret;
}
+ ret = imx214_pll_update(imx214);
+ if (ret < 0) {
+ v4l2_ctrl_handler_free(ctrl_hdlr);
+ dev_err(imx214->dev, "failed to update PLL\n");
+ return ret;
+ }
+
imx214->sd.ctrl_handler = ctrl_hdlr;
return 0;
@@ -1029,8 +1139,8 @@ static int imx214_start_streaming(struct imx214 *imx214)
return ret;
}
- bit_rate_mbps = (imx214->pll.pixel_rate_csi / 1000000)
- * imx214->pll.bits_per_pixel;
+ bit_rate_mbps = imx214->pll.pixel_rate_csi / 1000000
+ * imx214->pll.bits_per_pixel;
ret = cci_write(imx214->regmap, IMX214_REG_REQ_LINK_BIT_RATE,
IMX214_LINK_BIT_RATE_MBPS(bit_rate_mbps), NULL);
if (ret) {
@@ -1115,109 +1225,6 @@ err_rpm_put:
return ret;
}
-static int imx214_pll_calculate(struct imx214 *imx214, struct ccs_pll *pll,
- unsigned int link_freq)
-{
- struct ccs_pll_limits limits = {
- .min_ext_clk_freq_hz = 6000000,
- .max_ext_clk_freq_hz = 27000000,
-
- .vt_fr = {
- .min_pre_pll_clk_div = 1,
- .max_pre_pll_clk_div = 15,
- /* Value is educated guess as we don't have a spec */
- .min_pll_ip_clk_freq_hz = 6000000,
- /* Value is educated guess as we don't have a spec */
- .max_pll_ip_clk_freq_hz = 12000000,
- .min_pll_multiplier = 12,
- .max_pll_multiplier = 1200,
- .min_pll_op_clk_freq_hz = 338000000,
- .max_pll_op_clk_freq_hz = 1200000000,
- },
- .vt_bk = {
- .min_sys_clk_div = 2,
- .max_sys_clk_div = 4,
- .min_pix_clk_div = 5,
- .max_pix_clk_div = 10,
- .min_pix_clk_freq_hz = 30000000,
- .max_pix_clk_freq_hz = 120000000,
- },
- .op_bk = {
- .min_sys_clk_div = 1,
- .max_sys_clk_div = 2,
- .min_pix_clk_div = 6,
- .max_pix_clk_div = 10,
- .min_pix_clk_freq_hz = 30000000,
- .max_pix_clk_freq_hz = 120000000,
- },
-
- .min_line_length_pck_bin = IMX214_PPL_DEFAULT,
- .min_line_length_pck = IMX214_PPL_DEFAULT,
- };
- unsigned int num_lanes = imx214->bus_cfg.bus.mipi_csi2.num_data_lanes;
-
- /*
- * There are no documented constraints on the sys clock frequency, for
- * either branch. Recover them based on the PLL output clock frequency
- * and sys_clk_div limits on one hand, and the pix clock frequency and
- * the pix_clk_div limits on the other hand.
- */
- limits.vt_bk.min_sys_clk_freq_hz =
- max(limits.vt_fr.min_pll_op_clk_freq_hz / limits.vt_bk.max_sys_clk_div,
- limits.vt_bk.min_pix_clk_freq_hz * limits.vt_bk.min_pix_clk_div);
- limits.vt_bk.max_sys_clk_freq_hz =
- min(limits.vt_fr.max_pll_op_clk_freq_hz / limits.vt_bk.min_sys_clk_div,
- limits.vt_bk.max_pix_clk_freq_hz * limits.vt_bk.max_pix_clk_div);
-
- limits.op_bk.min_sys_clk_freq_hz =
- max(limits.vt_fr.min_pll_op_clk_freq_hz / limits.op_bk.max_sys_clk_div,
- limits.op_bk.min_pix_clk_freq_hz * limits.op_bk.min_pix_clk_div);
- limits.op_bk.max_sys_clk_freq_hz =
- min(limits.vt_fr.max_pll_op_clk_freq_hz / limits.op_bk.min_sys_clk_div,
- limits.op_bk.max_pix_clk_freq_hz * limits.op_bk.max_pix_clk_div);
-
- memset(pll, 0, sizeof(*pll));
-
- pll->bus_type = CCS_PLL_BUS_TYPE_CSI2_DPHY;
- pll->op_lanes = num_lanes;
- pll->vt_lanes = num_lanes;
- pll->csi2.lanes = num_lanes;
-
- pll->binning_horizontal = 1;
- pll->binning_vertical = 1;
- pll->scale_m = 1;
- pll->scale_n = 1;
- pll->bits_per_pixel =
- IMX214_CSI_DATA_FORMAT_RAW10 & IMX214_BITS_PER_PIXEL_MASK;
- pll->flags = CCS_PLL_FLAG_LANE_SPEED_MODEL;
- pll->link_freq = link_freq;
- pll->ext_clk_freq_hz = clk_get_rate(imx214->xclk);
-
- return ccs_pll_calculate(imx214->dev, &limits, pll);
-}
-
-static int imx214_pll_update(struct imx214 *imx214)
-{
- u64 link_freq;
- int ret;
-
- link_freq = imx214->bus_cfg.link_frequencies[imx214->link_freq->val];
- ret = imx214_pll_calculate(imx214, &imx214->pll, link_freq);
- if (ret) {
- dev_err(imx214->dev, "PLL calculations failed: %d\n", ret);
- return ret;
- }
-
- ret = v4l2_ctrl_s_ctrl_int64(imx214->pixel_rate,
- imx214->pll.pixel_rate_pixel_array);
- if (ret) {
- dev_err(imx214->dev, "failed to set pixel rate\n");
- return ret;
- }
-
- return 0;
-}
-
static int imx214_get_frame_interval(struct v4l2_subdev *subdev,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_interval *fival)
@@ -1324,10 +1331,11 @@ static int imx214_identify_module(struct imx214 *imx214)
return 0;
}
-static int imx214_parse_fwnode(struct device *dev, struct imx214 *imx214)
+static int imx214_parse_fwnode(struct imx214 *imx214)
{
+ struct fwnode_handle *endpoint __free(fwnode_handle) = NULL;
struct v4l2_fwnode_endpoint *bus_cfg = &imx214->bus_cfg;
- struct fwnode_handle *endpoint;
+ struct device *dev = imx214->dev;
unsigned int i;
int ret;
@@ -1337,11 +1345,8 @@ static int imx214_parse_fwnode(struct device *dev, struct imx214 *imx214)
bus_cfg->bus_type = V4L2_MBUS_CSI2_DPHY;
ret = v4l2_fwnode_endpoint_alloc_parse(endpoint, bus_cfg);
- fwnode_handle_put(endpoint);
- if (ret) {
- dev_err_probe(dev, ret, "parsing endpoint node failed\n");
- goto error;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "parsing endpoint node failed\n");
/* Check the number of MIPI CSI2 data lanes */
if (bus_cfg->bus.mipi_csi2.num_data_lanes != 4) {
@@ -1357,18 +1362,16 @@ static int imx214_parse_fwnode(struct device *dev, struct imx214 *imx214)
u64 freq = bus_cfg->link_frequencies[i];
struct ccs_pll pll;
- if (!imx214_pll_calculate(imx214, &pll, freq))
- break;
if (freq == IMX214_DEFAULT_LINK_FREQ_LEGACY) {
dev_warn(dev,
"link-frequencies %d not supported, please review your DT. Continuing anyway\n",
IMX214_DEFAULT_LINK_FREQ);
freq = IMX214_DEFAULT_LINK_FREQ;
- if (imx214_pll_calculate(imx214, &pll, freq))
- continue;
bus_cfg->link_frequencies[i] = freq;
- break;
}
+
+ if (!imx214_pll_calculate(imx214, &pll, freq))
+ break;
}
if (i == bus_cfg->nr_of_link_frequencies)
@@ -1396,7 +1399,7 @@ static int imx214_probe(struct i2c_client *client)
imx214->dev = dev;
- imx214->xclk = devm_clk_get(dev, NULL);
+ imx214->xclk = devm_v4l2_sensor_clk_get(dev, NULL);
if (IS_ERR(imx214->xclk))
return dev_err_probe(dev, PTR_ERR(imx214->xclk),
"failed to get xclk\n");
@@ -1415,7 +1418,7 @@ static int imx214_probe(struct i2c_client *client)
return dev_err_probe(dev, PTR_ERR(imx214->regmap),
"failed to initialize CCI\n");
- ret = imx214_parse_fwnode(dev, imx214);
+ ret = imx214_parse_fwnode(imx214);
if (ret)
return ret;
@@ -1459,12 +1462,6 @@ static int imx214_probe(struct i2c_client *client)
pm_runtime_set_active(imx214->dev);
pm_runtime_enable(imx214->dev);
- ret = imx214_pll_update(imx214);
- if (ret < 0) {
- dev_err_probe(dev, ret, "failed to update PLL\n");
- goto error_subdev_cleanup;
- }
-
ret = v4l2_async_register_subdev_sensor(&imx214->sd);
if (ret < 0) {
dev_err_probe(dev, ret,
diff --git a/drivers/media/i2c/imx219.c b/drivers/media/i2c/imx219.c
index 3b4f68543342..c680aa6c3a55 100644
--- a/drivers/media/i2c/imx219.c
+++ b/drivers/media/i2c/imx219.c
@@ -771,7 +771,6 @@ static int imx219_enable_streams(struct v4l2_subdev *sd,
return 0;
err_rpm_put:
- pm_runtime_mark_last_busy(&client->dev);
pm_runtime_put_autosuspend(&client->dev);
return ret;
}
@@ -793,7 +792,6 @@ static int imx219_disable_streams(struct v4l2_subdev *sd,
__v4l2_ctrl_grab(imx219->vflip, false);
__v4l2_ctrl_grab(imx219->hflip, false);
- pm_runtime_mark_last_busy(&client->dev);
pm_runtime_put_autosuspend(&client->dev);
return ret;
@@ -1034,6 +1032,10 @@ static int imx219_power_on(struct device *dev)
goto reg_off;
}
+ /*
+ * Note: Misinterpretation of reset assertion - do not re-use this code.
+ * XCLR pin is using incorrect (for reset signal) logical level.
+ */
gpiod_set_value_cansleep(imx219->reset_gpio, 1);
usleep_range(IMX219_XCLR_MIN_DELAY_US,
IMX219_XCLR_MIN_DELAY_US + IMX219_XCLR_DELAY_RANGE_US);
@@ -1188,7 +1190,7 @@ static int imx219_probe(struct i2c_client *client)
"failed to initialize CCI\n");
/* Get system clock (xclk) */
- imx219->xclk = devm_clk_get(dev, NULL);
+ imx219->xclk = devm_v4l2_sensor_clk_get(dev, NULL);
if (IS_ERR(imx219->xclk))
return dev_err_probe(dev, PTR_ERR(imx219->xclk),
"failed to get xclk\n");
diff --git a/drivers/media/i2c/imx258.c b/drivers/media/i2c/imx258.c
index 9e30fce1f223..e50dcfd830f5 100644
--- a/drivers/media/i2c/imx258.c
+++ b/drivers/media/i2c/imx258.c
@@ -8,11 +8,12 @@
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
+#include <linux/unaligned.h>
+
#include <media/v4l2-cci.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fwnode.h>
-#include <linux/unaligned.h>
#define IMX258_REG_MODE_SELECT CCI_REG8(0x0100)
#define IMX258_MODE_STANDBY 0x00
@@ -645,6 +646,8 @@ static const struct imx258_mode supported_modes[] = {
};
struct imx258 {
+ struct device *dev;
+
struct v4l2_subdev sd;
struct media_pad pad;
struct regmap *regmap;
@@ -751,7 +754,6 @@ static int imx258_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct imx258 *imx258 =
container_of(ctrl->handler, struct imx258, ctrl_handler);
- struct i2c_client *client = v4l2_get_subdevdata(&imx258->sd);
int ret = 0;
/*
@@ -765,7 +767,7 @@ static int imx258_set_ctrl(struct v4l2_ctrl *ctrl)
* Applying V4L2 control value only happens
* when power is up for streaming
*/
- if (pm_runtime_get_if_in_use(&client->dev) == 0)
+ if (pm_runtime_get_if_in_use(imx258->dev) == 0)
return 0;
switch (ctrl->id) {
@@ -811,14 +813,14 @@ static int imx258_set_ctrl(struct v4l2_ctrl *ctrl)
NULL);
break;
default:
- dev_info(&client->dev,
+ dev_info(imx258->dev,
"ctrl(id:0x%x,val:0x%x) is not handled\n",
ctrl->id, ctrl->val);
ret = -EINVAL;
break;
}
- pm_runtime_put(&client->dev);
+ pm_runtime_put(imx258->dev);
return ret;
}
@@ -1013,14 +1015,13 @@ static int imx258_get_selection(struct v4l2_subdev *sd,
/* Start streaming */
static int imx258_start_streaming(struct imx258 *imx258)
{
- struct i2c_client *client = v4l2_get_subdevdata(&imx258->sd);
const struct imx258_reg_list *reg_list;
const struct imx258_link_freq_config *link_freq_cfg;
int ret, link_freq_index;
ret = cci_write(imx258->regmap, IMX258_REG_RESET, 0x01, NULL);
if (ret) {
- dev_err(&client->dev, "%s failed to reset sensor\n", __func__);
+ dev_err(imx258->dev, "%s failed to reset sensor\n", __func__);
return ret;
}
@@ -1034,21 +1035,21 @@ static int imx258_start_streaming(struct imx258 *imx258)
reg_list = &link_freq_cfg->link_cfg[imx258->lane_mode_idx].reg_list;
ret = cci_multi_reg_write(imx258->regmap, reg_list->regs, reg_list->num_of_regs, NULL);
if (ret) {
- dev_err(&client->dev, "%s failed to set plls\n", __func__);
+ dev_err(imx258->dev, "%s failed to set plls\n", __func__);
return ret;
}
ret = cci_multi_reg_write(imx258->regmap, mode_common_regs,
ARRAY_SIZE(mode_common_regs), NULL);
if (ret) {
- dev_err(&client->dev, "%s failed to set common regs\n", __func__);
+ dev_err(imx258->dev, "%s failed to set common regs\n", __func__);
return ret;
}
ret = cci_multi_reg_write(imx258->regmap, imx258->variant_cfg->regs,
imx258->variant_cfg->num_regs, NULL);
if (ret) {
- dev_err(&client->dev, "%s failed to set variant config\n",
+ dev_err(imx258->dev, "%s failed to set variant config\n",
__func__);
return ret;
}
@@ -1057,7 +1058,7 @@ static int imx258_start_streaming(struct imx258 *imx258)
!!(imx258->csi2_flags & V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK),
NULL);
if (ret) {
- dev_err(&client->dev, "%s failed to set clock lane mode\n", __func__);
+ dev_err(imx258->dev, "%s failed to set clock lane mode\n", __func__);
return ret;
}
@@ -1065,7 +1066,7 @@ static int imx258_start_streaming(struct imx258 *imx258)
reg_list = &imx258->cur_mode->reg_list;
ret = cci_multi_reg_write(imx258->regmap, reg_list->regs, reg_list->num_of_regs, NULL);
if (ret) {
- dev_err(&client->dev, "%s failed to set mode\n", __func__);
+ dev_err(imx258->dev, "%s failed to set mode\n", __func__);
return ret;
}
@@ -1082,14 +1083,13 @@ static int imx258_start_streaming(struct imx258 *imx258)
/* Stop streaming */
static int imx258_stop_streaming(struct imx258 *imx258)
{
- struct i2c_client *client = v4l2_get_subdevdata(&imx258->sd);
int ret;
/* set stream off register */
ret = cci_write(imx258->regmap, IMX258_REG_MODE_SELECT,
IMX258_MODE_STANDBY, NULL);
if (ret)
- dev_err(&client->dev, "%s failed to set stream\n", __func__);
+ dev_err(imx258->dev, "%s failed to set stream\n", __func__);
/*
* Return success even if it was an error, as there is nothing the
@@ -1135,13 +1135,12 @@ static int imx258_power_off(struct device *dev)
static int imx258_set_stream(struct v4l2_subdev *sd, int enable)
{
struct imx258 *imx258 = to_imx258(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret = 0;
mutex_lock(&imx258->mutex);
if (enable) {
- ret = pm_runtime_resume_and_get(&client->dev);
+ ret = pm_runtime_resume_and_get(imx258->dev);
if (ret < 0)
goto err_unlock;
@@ -1154,7 +1153,7 @@ static int imx258_set_stream(struct v4l2_subdev *sd, int enable)
goto err_rpm_put;
} else {
imx258_stop_streaming(imx258);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(imx258->dev);
}
mutex_unlock(&imx258->mutex);
@@ -1162,7 +1161,7 @@ static int imx258_set_stream(struct v4l2_subdev *sd, int enable)
return ret;
err_rpm_put:
- pm_runtime_put(&client->dev);
+ pm_runtime_put(imx258->dev);
err_unlock:
mutex_unlock(&imx258->mutex);
@@ -1172,20 +1171,19 @@ err_unlock:
/* Verify chip ID */
static int imx258_identify_module(struct imx258 *imx258)
{
- struct i2c_client *client = v4l2_get_subdevdata(&imx258->sd);
int ret;
u64 val;
ret = cci_read(imx258->regmap, IMX258_REG_CHIP_ID,
&val, NULL);
if (ret) {
- dev_err(&client->dev, "failed to read chip id %x\n",
+ dev_err(imx258->dev, "failed to read chip id %x\n",
IMX258_CHIP_ID);
return ret;
}
if (val != IMX258_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x!=%llx\n",
+ dev_err(imx258->dev, "chip id mismatch: %x!=%llx\n",
IMX258_CHIP_ID, val);
return -EIO;
}
@@ -1217,7 +1215,6 @@ static const struct v4l2_subdev_internal_ops imx258_internal_ops = {
/* Initialize control handlers */
static int imx258_init_controls(struct imx258 *imx258)
{
- struct i2c_client *client = v4l2_get_subdevdata(&imx258->sd);
const struct imx258_link_freq_config *link_freq_cfgs;
struct v4l2_fwnode_device_properties props;
struct v4l2_ctrl_handler *ctrl_hdlr;
@@ -1308,12 +1305,12 @@ static int imx258_init_controls(struct imx258 *imx258)
if (ctrl_hdlr->error) {
ret = ctrl_hdlr->error;
- dev_err(&client->dev, "%s control init failed (%d)\n",
+ dev_err(imx258->dev, "%s control init failed (%d)\n",
__func__, ret);
goto error;
}
- ret = v4l2_fwnode_device_parse(&client->dev, &props);
+ ret = v4l2_fwnode_device_parse(imx258->dev, &props);
if (ret)
goto error;
@@ -1339,15 +1336,14 @@ static void imx258_free_controls(struct imx258 *imx258)
mutex_destroy(&imx258->mutex);
}
-static int imx258_get_regulators(struct imx258 *imx258,
- struct i2c_client *client)
+static int imx258_get_regulators(struct imx258 *imx258)
{
unsigned int i;
for (i = 0; i < IMX258_NUM_SUPPLIES; i++)
imx258->supplies[i].supply = imx258_supply_name[i];
- return devm_regulator_bulk_get(&client->dev,
+ return devm_regulator_bulk_get(imx258->dev,
IMX258_NUM_SUPPLIES, imx258->supplies);
}
@@ -1365,30 +1361,27 @@ static int imx258_probe(struct i2c_client *client)
if (!imx258)
return -ENOMEM;
+ imx258->dev = &client->dev;
+
imx258->regmap = devm_cci_regmap_init_i2c(client, 16);
if (IS_ERR(imx258->regmap)) {
ret = PTR_ERR(imx258->regmap);
- dev_err(&client->dev, "failed to initialize CCI: %d\n", ret);
+ dev_err(imx258->dev, "failed to initialize CCI: %d\n", ret);
return ret;
}
- ret = imx258_get_regulators(imx258, client);
+ ret = imx258_get_regulators(imx258);
if (ret)
- return dev_err_probe(&client->dev, ret,
+ return dev_err_probe(imx258->dev, ret,
"failed to get regulators\n");
- imx258->clk = devm_clk_get_optional(&client->dev, NULL);
+ imx258->clk = devm_v4l2_sensor_clk_get_legacy(imx258->dev, NULL, false,
+ 0);
if (IS_ERR(imx258->clk))
- return dev_err_probe(&client->dev, PTR_ERR(imx258->clk),
+ return dev_err_probe(imx258->dev, PTR_ERR(imx258->clk),
"error getting clock\n");
- if (!imx258->clk) {
- dev_dbg(&client->dev,
- "no clock provided, using clock-frequency property\n");
- device_property_read_u32(&client->dev, "clock-frequency", &val);
- } else {
- val = clk_get_rate(imx258->clk);
- }
+ val = clk_get_rate(imx258->clk);
switch (val) {
case 19200000:
@@ -1400,32 +1393,32 @@ static int imx258_probe(struct i2c_client *client)
imx258->link_freq_menu_items = link_freq_menu_items_24;
break;
default:
- dev_err(&client->dev, "input clock frequency of %u not supported\n",
+ dev_err(imx258->dev, "input clock frequency of %u not supported\n",
val);
return -EINVAL;
}
- endpoint = fwnode_graph_get_next_endpoint(dev_fwnode(&client->dev), NULL);
+ endpoint = fwnode_graph_get_next_endpoint(dev_fwnode(imx258->dev), NULL);
if (!endpoint) {
- dev_err(&client->dev, "Endpoint node not found\n");
+ dev_err(imx258->dev, "Endpoint node not found\n");
return -EINVAL;
}
ret = v4l2_fwnode_endpoint_alloc_parse(endpoint, &ep);
fwnode_handle_put(endpoint);
if (ret) {
- dev_err(&client->dev, "Parsing endpoint node failed\n");
+ dev_err(imx258->dev, "Parsing endpoint node failed\n");
return ret;
}
- ret = v4l2_link_freq_to_bitmap(&client->dev,
+ ret = v4l2_link_freq_to_bitmap(imx258->dev,
ep.link_frequencies,
ep.nr_of_link_frequencies,
imx258->link_freq_menu_items,
ARRAY_SIZE(link_freq_menu_items_19_2),
&imx258->link_freq_bitmap);
if (ret) {
- dev_err(&client->dev, "Link frequency not supported\n");
+ dev_err(imx258->dev, "Link frequency not supported\n");
goto error_endpoint_free;
}
@@ -1438,7 +1431,7 @@ static int imx258_probe(struct i2c_client *client)
imx258->lane_mode_idx = IMX258_4_LANE_MODE;
break;
default:
- dev_err(&client->dev, "Invalid data lanes: %u\n",
+ dev_err(imx258->dev, "Invalid data lanes: %u\n",
ep.bus.mipi_csi2.num_data_lanes);
ret = -EINVAL;
goto error_endpoint_free;
@@ -1446,7 +1439,7 @@ static int imx258_probe(struct i2c_client *client)
imx258->csi2_flags = ep.bus.mipi_csi2.flags;
- imx258->variant_cfg = device_get_match_data(&client->dev);
+ imx258->variant_cfg = device_get_match_data(imx258->dev);
if (!imx258->variant_cfg)
imx258->variant_cfg = &imx258_cfg;
@@ -1454,7 +1447,7 @@ static int imx258_probe(struct i2c_client *client)
v4l2_i2c_subdev_init(&imx258->sd, client, &imx258_subdev_ops);
/* Will be powered off via pm_runtime_idle */
- ret = imx258_power_on(&client->dev);
+ ret = imx258_power_on(imx258->dev);
if (ret)
goto error_endpoint_free;
@@ -1486,9 +1479,9 @@ static int imx258_probe(struct i2c_client *client)
if (ret < 0)
goto error_media_entity;
- pm_runtime_set_active(&client->dev);
- pm_runtime_enable(&client->dev);
- pm_runtime_idle(&client->dev);
+ pm_runtime_set_active(imx258->dev);
+ pm_runtime_enable(imx258->dev);
+ pm_runtime_idle(imx258->dev);
v4l2_fwnode_endpoint_free(&ep);
return 0;
@@ -1500,7 +1493,7 @@ error_handler_free:
imx258_free_controls(imx258);
error_identify:
- imx258_power_off(&client->dev);
+ imx258_power_off(imx258->dev);
error_endpoint_free:
v4l2_fwnode_endpoint_free(&ep);
@@ -1517,10 +1510,10 @@ static void imx258_remove(struct i2c_client *client)
media_entity_cleanup(&sd->entity);
imx258_free_controls(imx258);
- pm_runtime_disable(&client->dev);
- if (!pm_runtime_status_suspended(&client->dev))
- imx258_power_off(&client->dev);
- pm_runtime_set_suspended(&client->dev);
+ pm_runtime_disable(imx258->dev);
+ if (!pm_runtime_status_suspended(imx258->dev))
+ imx258_power_off(imx258->dev);
+ pm_runtime_set_suspended(imx258->dev);
}
static const struct dev_pm_ops imx258_pm_ops = {
diff --git a/drivers/media/i2c/imx274.c b/drivers/media/i2c/imx274.c
index a2b824986027..d86d08c29174 100644
--- a/drivers/media/i2c/imx274.c
+++ b/drivers/media/i2c/imx274.c
@@ -826,6 +826,8 @@ static int imx274_start_stream(struct stimx274 *priv)
* if rst = 0, keep it in reset;
* if rst = 1, bring it out of reset.
*
+ * Note: Misinterpretation of reset assertion - do not re-use this code.
+ * XCLR pin is using incorrect (for reset signal) logical level.
*/
static void imx274_reset(struct stimx274 *priv, int rst)
{
diff --git a/drivers/media/i2c/imx283.c b/drivers/media/i2c/imx283.c
index da618c8cbadc..8ab63ad8f385 100644
--- a/drivers/media/i2c/imx283.c
+++ b/drivers/media/i2c/imx283.c
@@ -1143,7 +1143,6 @@ static int imx283_enable_streams(struct v4l2_subdev *sd,
return 0;
err_rpm_put:
- pm_runtime_mark_last_busy(imx283->dev);
pm_runtime_put_autosuspend(imx283->dev);
return ret;
@@ -1163,7 +1162,6 @@ static int imx283_disable_streams(struct v4l2_subdev *sd,
if (ret)
dev_err(imx283->dev, "Failed to stop stream\n");
- pm_runtime_mark_last_busy(imx283->dev);
pm_runtime_put_autosuspend(imx283->dev);
return ret;
@@ -1462,11 +1460,10 @@ static int imx283_probe(struct i2c_client *client)
}
/* Get system clock (xclk) */
- imx283->xclk = devm_clk_get(imx283->dev, NULL);
- if (IS_ERR(imx283->xclk)) {
+ imx283->xclk = devm_v4l2_sensor_clk_get(imx283->dev, NULL);
+ if (IS_ERR(imx283->xclk))
return dev_err_probe(imx283->dev, PTR_ERR(imx283->xclk),
"failed to get xclk\n");
- }
xclk_freq = clk_get_rate(imx283->xclk);
for (i = 0; i < ARRAY_SIZE(imx283_frequencies); i++) {
@@ -1558,7 +1555,6 @@ static int imx283_probe(struct i2c_client *client)
* Decrease the PM usage count. The device will get suspended after the
* autosuspend delay, turning the power off.
*/
- pm_runtime_mark_last_busy(imx283->dev);
pm_runtime_put_autosuspend(imx283->dev);
return 0;
diff --git a/drivers/media/i2c/imx290.c b/drivers/media/i2c/imx290.c
index 4f3f386c5353..21cbc81cb2ed 100644
--- a/drivers/media/i2c/imx290.c
+++ b/drivers/media/i2c/imx290.c
@@ -869,7 +869,6 @@ static int imx290_set_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_mark_last_busy(imx290->dev);
pm_runtime_put_autosuspend(imx290->dev);
return ret;
@@ -1099,7 +1098,6 @@ static int imx290_set_stream(struct v4l2_subdev *sd, int enable)
}
} else {
imx290_stop_streaming(imx290);
- pm_runtime_mark_last_busy(imx290->dev);
pm_runtime_put_autosuspend(imx290->dev);
}
@@ -1294,7 +1292,6 @@ static int imx290_subdev_init(struct imx290 *imx290)
* will already be prevented even before the delay.
*/
v4l2_i2c_subdev_init(&imx290->sd, client, &imx290_subdev_ops);
- pm_runtime_mark_last_busy(imx290->dev);
pm_runtime_put_autosuspend(imx290->dev);
imx290->sd.internal_ops = &imx290_internal_ops;
@@ -1425,14 +1422,14 @@ static int imx290_get_regulators(struct device *dev, struct imx290 *imx290)
static int imx290_init_clk(struct imx290 *imx290)
{
u32 xclk_freq;
- int ret;
- ret = device_property_read_u32(imx290->dev, "clock-frequency",
- &xclk_freq);
- if (ret) {
- dev_err(imx290->dev, "Could not get xclk frequency\n");
- return ret;
- }
+ imx290->xclk = devm_v4l2_sensor_clk_get_legacy(imx290->dev, "xclk",
+ false, 0);
+ if (IS_ERR(imx290->xclk))
+ return dev_err_probe(imx290->dev, PTR_ERR(imx290->xclk),
+ "Could not get xclk\n");
+
+ xclk_freq = clk_get_rate(imx290->xclk);
/* external clock must be 37.125 MHz or 74.25MHz */
switch (xclk_freq) {
@@ -1448,12 +1445,6 @@ static int imx290_init_clk(struct imx290 *imx290)
return -EINVAL;
}
- ret = clk_set_rate(imx290->xclk, xclk_freq);
- if (ret) {
- dev_err(imx290->dev, "Could not set xclk frequency\n");
- return ret;
- }
-
return 0;
}
@@ -1599,11 +1590,6 @@ static int imx290_probe(struct i2c_client *client)
return ret;
/* Acquire resources. */
- imx290->xclk = devm_clk_get(dev, "xclk");
- if (IS_ERR(imx290->xclk))
- return dev_err_probe(dev, PTR_ERR(imx290->xclk),
- "Could not get xclk\n");
-
ret = imx290_get_regulators(dev, imx290);
if (ret < 0)
return dev_err_probe(dev, ret, "Cannot get regulators\n");
@@ -1614,7 +1600,7 @@ static int imx290_probe(struct i2c_client *client)
return dev_err_probe(dev, PTR_ERR(imx290->rst_gpio),
"Cannot get reset gpio\n");
- /* Initialize external clock frequency. */
+ /* Initialize external clock. */
ret = imx290_init_clk(imx290);
if (ret)
return ret;
diff --git a/drivers/media/i2c/imx296.c b/drivers/media/i2c/imx296.c
index f3bec16b527c..69636db11a2b 100644
--- a/drivers/media/i2c/imx296.c
+++ b/drivers/media/i2c/imx296.c
@@ -604,7 +604,6 @@ static int imx296_s_stream(struct v4l2_subdev *sd, int enable)
if (!enable) {
ret = imx296_stream_off(sensor);
- pm_runtime_mark_last_busy(sensor->dev);
pm_runtime_put_autosuspend(sensor->dev);
goto unlock;
@@ -922,7 +921,7 @@ static int imx296_read_temperature(struct imx296 *sensor, int *temp)
tmdout &= IMX296_TMDOUT_MASK;
- /* T(°C) = 246.312 - 0.304 * TMDOUT */;
+ /* T(°C) = 246.312 - 0.304 * TMDOUT */
*temp = 246312 - 304 * tmdout;
return imx296_write(sensor, IMX296_TMDCTRL, 0, NULL);
@@ -1044,7 +1043,7 @@ static int imx296_probe(struct i2c_client *client)
return dev_err_probe(sensor->dev, PTR_ERR(sensor->reset),
"failed to get reset GPIO\n");
- sensor->clk = devm_clk_get(sensor->dev, "inck");
+ sensor->clk = devm_v4l2_sensor_clk_get(sensor->dev, "inck");
if (IS_ERR(sensor->clk))
return dev_err_probe(sensor->dev, PTR_ERR(sensor->clk),
"failed to get clock\n");
diff --git a/drivers/media/i2c/imx319.c b/drivers/media/i2c/imx319.c
index 701840f4a5cc..953310ef3046 100644
--- a/drivers/media/i2c/imx319.c
+++ b/drivers/media/i2c/imx319.c
@@ -1,11 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2018 Intel Corporation
-#include <linux/unaligned.h>
#include <linux/acpi.h>
+#include <linux/clk.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
+#include <linux/unaligned.h>
+
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-event.h>
@@ -106,11 +108,12 @@ struct imx319_mode {
};
struct imx319_hwcfg {
- u32 ext_clk; /* sensor external clk */
unsigned long link_freq_bitmap;
};
struct imx319 {
+ struct device *dev;
+
struct v4l2_subdev sd;
struct media_pad pad;
@@ -1839,14 +1842,13 @@ static int imx319_write_reg(struct imx319 *imx319, u16 reg, u32 len, u32 val)
static int imx319_write_regs(struct imx319 *imx319,
const struct imx319_reg *regs, u32 len)
{
- struct i2c_client *client = v4l2_get_subdevdata(&imx319->sd);
int ret;
u32 i;
for (i = 0; i < len; i++) {
ret = imx319_write_reg(imx319, regs[i].address, 1, regs[i].val);
if (ret) {
- dev_err_ratelimited(&client->dev,
+ dev_err_ratelimited(imx319->dev,
"write reg 0x%4.4x return err %d",
regs[i].address, ret);
return ret;
@@ -1880,7 +1882,6 @@ static int imx319_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct imx319 *imx319 = container_of(ctrl->handler,
struct imx319, ctrl_handler);
- struct i2c_client *client = v4l2_get_subdevdata(&imx319->sd);
s64 max;
int ret;
@@ -1899,7 +1900,7 @@ static int imx319_set_ctrl(struct v4l2_ctrl *ctrl)
* Applying V4L2 control value only happens
* when power is up for streaming
*/
- if (!pm_runtime_get_if_in_use(&client->dev))
+ if (!pm_runtime_get_if_in_use(imx319->dev))
return 0;
switch (ctrl->id) {
@@ -1933,12 +1934,12 @@ static int imx319_set_ctrl(struct v4l2_ctrl *ctrl)
break;
default:
ret = -EINVAL;
- dev_info(&client->dev, "ctrl(id:0x%x,val:0x%x) is not handled",
+ dev_info(imx319->dev, "ctrl(id:0x%x,val:0x%x) is not handled",
ctrl->id, ctrl->val);
break;
}
- pm_runtime_put(&client->dev);
+ pm_runtime_put(imx319->dev);
return ret;
}
@@ -2087,7 +2088,6 @@ imx319_set_pad_format(struct v4l2_subdev *sd,
/* Verify chip ID */
static int imx319_identify_module(struct imx319 *imx319)
{
- struct i2c_client *client = v4l2_get_subdevdata(&imx319->sd);
int ret;
u32 val;
@@ -2099,7 +2099,7 @@ static int imx319_identify_module(struct imx319 *imx319)
return ret;
if (val != IMX319_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x!=%x",
+ dev_err(imx319->dev, "chip id mismatch: %x!=%x",
IMX319_CHIP_ID, val);
return -EIO;
}
@@ -2112,7 +2112,6 @@ static int imx319_identify_module(struct imx319 *imx319)
/* Start streaming */
static int imx319_start_streaming(struct imx319 *imx319)
{
- struct i2c_client *client = v4l2_get_subdevdata(&imx319->sd);
const struct imx319_reg_list *reg_list;
int ret;
@@ -2124,7 +2123,7 @@ static int imx319_start_streaming(struct imx319 *imx319)
reg_list = &imx319_global_setting;
ret = imx319_write_regs(imx319, reg_list->regs, reg_list->num_of_regs);
if (ret) {
- dev_err(&client->dev, "failed to set global settings");
+ dev_err(imx319->dev, "failed to set global settings");
return ret;
}
@@ -2132,7 +2131,7 @@ static int imx319_start_streaming(struct imx319 *imx319)
reg_list = &imx319->cur_mode->reg_list;
ret = imx319_write_regs(imx319, reg_list->regs, reg_list->num_of_regs);
if (ret) {
- dev_err(&client->dev, "failed to set mode");
+ dev_err(imx319->dev, "failed to set mode");
return ret;
}
@@ -2160,13 +2159,12 @@ static int imx319_stop_streaming(struct imx319 *imx319)
static int imx319_set_stream(struct v4l2_subdev *sd, int enable)
{
struct imx319 *imx319 = to_imx319(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret = 0;
mutex_lock(&imx319->mutex);
if (enable) {
- ret = pm_runtime_resume_and_get(&client->dev);
+ ret = pm_runtime_resume_and_get(imx319->dev);
if (ret < 0)
goto err_unlock;
@@ -2179,7 +2177,7 @@ static int imx319_set_stream(struct v4l2_subdev *sd, int enable)
goto err_rpm_put;
} else {
imx319_stop_streaming(imx319);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(imx319->dev);
}
/* vflip and hflip cannot change during streaming */
@@ -2191,7 +2189,7 @@ static int imx319_set_stream(struct v4l2_subdev *sd, int enable)
return ret;
err_rpm_put:
- pm_runtime_put(&client->dev);
+ pm_runtime_put(imx319->dev);
err_unlock:
mutex_unlock(&imx319->mutex);
@@ -2231,7 +2229,6 @@ static const struct v4l2_subdev_internal_ops imx319_internal_ops = {
/* Initialize control handlers */
static int imx319_init_controls(struct imx319 *imx319)
{
- struct i2c_client *client = v4l2_get_subdevdata(&imx319->sd);
struct v4l2_ctrl_handler *ctrl_hdlr;
s64 exposure_max;
s64 vblank_def;
@@ -2311,7 +2308,7 @@ static int imx319_init_controls(struct imx319 *imx319)
0, 0, imx319_test_pattern_menu);
if (ctrl_hdlr->error) {
ret = ctrl_hdlr->error;
- dev_err(&client->dev, "control init failed: %d", ret);
+ dev_err(imx319->dev, "control init failed: %d", ret);
goto error;
}
@@ -2350,20 +2347,6 @@ static struct imx319_hwcfg *imx319_get_hwcfg(struct device *dev)
if (!cfg)
goto out_err;
- ret = fwnode_property_read_u32(dev_fwnode(dev), "clock-frequency",
- &cfg->ext_clk);
- if (ret) {
- dev_err(dev, "can't get clock frequency");
- goto out_err;
- }
-
- dev_dbg(dev, "ext clk: %d", cfg->ext_clk);
- if (cfg->ext_clk != IMX319_EXT_CLK) {
- dev_err(dev, "external clock %d is not supported",
- cfg->ext_clk);
- goto out_err;
- }
-
ret = v4l2_link_freq_to_bitmap(dev, bus_cfg.link_frequencies,
bus_cfg.nr_of_link_frequencies,
link_freq_menu_items,
@@ -2385,6 +2368,8 @@ out_err:
static int imx319_probe(struct i2c_client *client)
{
struct imx319 *imx319;
+ unsigned long freq;
+ struct clk *clk;
bool full_power;
int ret;
@@ -2392,24 +2377,37 @@ static int imx319_probe(struct i2c_client *client)
if (!imx319)
return -ENOMEM;
+ imx319->dev = &client->dev;
+
mutex_init(&imx319->mutex);
+ clk = devm_v4l2_sensor_clk_get(imx319->dev, NULL);
+ if (IS_ERR(clk))
+ return dev_err_probe(imx319->dev, PTR_ERR(clk),
+ "failed to acquire clock\n");
+
+ freq = clk_get_rate(clk);
+ if (freq != IMX319_EXT_CLK)
+ return dev_err_probe(imx319->dev, -EINVAL,
+ "external clock %lu is not supported",
+ freq);
+
/* Initialize subdev */
v4l2_i2c_subdev_init(&imx319->sd, client, &imx319_subdev_ops);
- full_power = acpi_dev_state_d0(&client->dev);
+ full_power = acpi_dev_state_d0(imx319->dev);
if (full_power) {
/* Check module identity */
ret = imx319_identify_module(imx319);
if (ret) {
- dev_err(&client->dev, "failed to find sensor: %d", ret);
+ dev_err(imx319->dev, "failed to find sensor: %d", ret);
goto error_probe;
}
}
- imx319->hwcfg = imx319_get_hwcfg(&client->dev);
+ imx319->hwcfg = imx319_get_hwcfg(imx319->dev);
if (!imx319->hwcfg) {
- dev_err(&client->dev, "failed to get hwcfg");
+ dev_err(imx319->dev, "failed to get hwcfg");
ret = -ENODEV;
goto error_probe;
}
@@ -2419,7 +2417,7 @@ static int imx319_probe(struct i2c_client *client)
ret = imx319_init_controls(imx319);
if (ret) {
- dev_err(&client->dev, "failed to init controls: %d", ret);
+ dev_err(imx319->dev, "failed to init controls: %d", ret);
goto error_probe;
}
@@ -2434,27 +2432,27 @@ static int imx319_probe(struct i2c_client *client)
imx319->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&imx319->sd.entity, 1, &imx319->pad);
if (ret) {
- dev_err(&client->dev, "failed to init entity pads: %d", ret);
+ dev_err(imx319->dev, "failed to init entity pads: %d", ret);
goto error_handler_free;
}
/* Set the device's state to active if it's in D0 state. */
if (full_power)
- pm_runtime_set_active(&client->dev);
- pm_runtime_enable(&client->dev);
+ pm_runtime_set_active(imx319->dev);
+ pm_runtime_enable(imx319->dev);
ret = v4l2_async_register_subdev_sensor(&imx319->sd);
if (ret < 0)
goto error_media_entity_pm;
- pm_runtime_idle(&client->dev);
+ pm_runtime_idle(imx319->dev);
return 0;
error_media_entity_pm:
- pm_runtime_disable(&client->dev);
+ pm_runtime_disable(imx319->dev);
if (full_power)
- pm_runtime_set_suspended(&client->dev);
+ pm_runtime_set_suspended(imx319->dev);
media_entity_cleanup(&imx319->sd.entity);
error_handler_free:
@@ -2475,9 +2473,9 @@ static void imx319_remove(struct i2c_client *client)
media_entity_cleanup(&sd->entity);
v4l2_ctrl_handler_free(sd->ctrl_handler);
- pm_runtime_disable(&client->dev);
- if (!pm_runtime_status_suspended(&client->dev))
- pm_runtime_set_suspended(&client->dev);
+ pm_runtime_disable(imx319->dev);
+ if (!pm_runtime_status_suspended(imx319->dev))
+ pm_runtime_set_suspended(imx319->dev);
mutex_destroy(&imx319->mutex);
}
diff --git a/drivers/media/i2c/imx334.c b/drivers/media/i2c/imx334.c
index 846b9928d4e8..9654f9268056 100644
--- a/drivers/media/i2c/imx334.c
+++ b/drivers/media/i2c/imx334.c
@@ -118,6 +118,9 @@
#define IMX334_REG_TP CCI_REG8(0x329e)
#define IMX334_TP_COLOR_HBARS 0xa
#define IMX334_TP_COLOR_VBARS 0xb
+#define IMX334_TP_BLACK 0x0
+#define IMX334_TP_WHITE 0x1
+#define IMX334_TP_BLACK_GREY 0xc
#define IMX334_TPG_EN_DOUT CCI_REG8(0x329c)
#define IMX334_TP_ENABLE 0x1
@@ -398,12 +401,18 @@ static const char * const imx334_test_pattern_menu[] = {
"Disabled",
"Vertical Color Bars",
"Horizontal Color Bars",
+ "Black and Grey Bars",
+ "Black Color",
+ "White Color",
};
static const int imx334_test_pattern_val[] = {
IMX334_TP_DISABLE,
IMX334_TP_COLOR_HBARS,
IMX334_TP_COLOR_VBARS,
+ IMX334_TP_BLACK_GREY,
+ IMX334_TP_BLACK,
+ IMX334_TP_WHITE,
};
static const struct cci_reg_sequence raw10_framefmt_regs[] = {
@@ -997,7 +1006,7 @@ static int imx334_parse_hw_config(struct imx334 *imx334)
"failed to get reset gpio\n");
/* Get sensor input clock */
- imx334->inclk = devm_clk_get(imx334->dev, NULL);
+ imx334->inclk = devm_v4l2_sensor_clk_get(imx334->dev, NULL);
if (IS_ERR(imx334->inclk))
return dev_err_probe(imx334->dev, PTR_ERR(imx334->inclk),
"could not get inclk\n");
@@ -1070,6 +1079,10 @@ static int imx334_power_on(struct device *dev)
struct imx334 *imx334 = to_imx334(sd);
int ret;
+ /*
+ * Note: Misinterpretation of reset assertion - do not re-use this code.
+ * XCLR pin is using incorrect (for reset signal) logical level.
+ */
gpiod_set_value_cansleep(imx334->reset_gpio, 1);
ret = clk_prepare_enable(imx334->inclk);
diff --git a/drivers/media/i2c/imx335.c b/drivers/media/i2c/imx335.c
index 9b4db4cd4929..c043df2f15fb 100644
--- a/drivers/media/i2c/imx335.c
+++ b/drivers/media/i2c/imx335.c
@@ -1026,11 +1026,10 @@ static int imx335_parse_hw_config(struct imx335 *imx335)
}
/* Get sensor input clock */
- imx335->inclk = devm_clk_get(imx335->dev, NULL);
- if (IS_ERR(imx335->inclk)) {
- dev_err(imx335->dev, "could not get inclk\n");
- return PTR_ERR(imx335->inclk);
- }
+ imx335->inclk = devm_v4l2_sensor_clk_get(imx335->dev, NULL);
+ if (IS_ERR(imx335->inclk))
+ return dev_err_probe(imx335->dev, PTR_ERR(imx335->inclk),
+ "could not get inclk\n");
rate = clk_get_rate(imx335->inclk);
if (rate != IMX335_INCLK_RATE) {
diff --git a/drivers/media/i2c/imx355.c b/drivers/media/i2c/imx355.c
index b2dce67c0b6b..776107efe386 100644
--- a/drivers/media/i2c/imx355.c
+++ b/drivers/media/i2c/imx355.c
@@ -1,11 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2018 Intel Corporation
-#include <linux/unaligned.h>
#include <linux/acpi.h>
+#include <linux/clk.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
+#include <linux/unaligned.h>
+
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-event.h>
@@ -92,11 +94,13 @@ struct imx355_mode {
};
struct imx355_hwcfg {
- u32 ext_clk; /* sensor external clk */
unsigned long link_freq_bitmap;
};
struct imx355 {
+ struct device *dev;
+ struct clk *clk;
+
struct v4l2_subdev sd;
struct media_pad pad;
@@ -1136,14 +1140,13 @@ static int imx355_write_reg(struct imx355 *imx355, u16 reg, u32 len, u32 val)
static int imx355_write_regs(struct imx355 *imx355,
const struct imx355_reg *regs, u32 len)
{
- struct i2c_client *client = v4l2_get_subdevdata(&imx355->sd);
int ret;
u32 i;
for (i = 0; i < len; i++) {
ret = imx355_write_reg(imx355, regs[i].address, 1, regs[i].val);
if (ret) {
- dev_err_ratelimited(&client->dev,
+ dev_err_ratelimited(imx355->dev,
"write reg 0x%4.4x return err %d",
regs[i].address, ret);
@@ -1178,7 +1181,6 @@ static int imx355_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct imx355 *imx355 = container_of(ctrl->handler,
struct imx355, ctrl_handler);
- struct i2c_client *client = v4l2_get_subdevdata(&imx355->sd);
s64 max;
int ret;
@@ -1197,7 +1199,7 @@ static int imx355_set_ctrl(struct v4l2_ctrl *ctrl)
* Applying V4L2 control value only happens
* when power is up for streaming
*/
- if (!pm_runtime_get_if_in_use(&client->dev))
+ if (!pm_runtime_get_if_in_use(imx355->dev))
return 0;
switch (ctrl->id) {
@@ -1231,12 +1233,12 @@ static int imx355_set_ctrl(struct v4l2_ctrl *ctrl)
break;
default:
ret = -EINVAL;
- dev_info(&client->dev, "ctrl(id:0x%x,val:0x%x) is not handled",
+ dev_info(imx355->dev, "ctrl(id:0x%x,val:0x%x) is not handled",
ctrl->id, ctrl->val);
break;
}
- pm_runtime_put(&client->dev);
+ pm_runtime_put(imx355->dev);
return ret;
}
@@ -1385,7 +1387,6 @@ imx355_set_pad_format(struct v4l2_subdev *sd,
/* Start streaming */
static int imx355_start_streaming(struct imx355 *imx355)
{
- struct i2c_client *client = v4l2_get_subdevdata(&imx355->sd);
const struct imx355_reg_list *reg_list;
int ret;
@@ -1393,7 +1394,7 @@ static int imx355_start_streaming(struct imx355 *imx355)
reg_list = &imx355_global_setting;
ret = imx355_write_regs(imx355, reg_list->regs, reg_list->num_of_regs);
if (ret) {
- dev_err(&client->dev, "failed to set global settings");
+ dev_err(imx355->dev, "failed to set global settings");
return ret;
}
@@ -1401,7 +1402,7 @@ static int imx355_start_streaming(struct imx355 *imx355)
reg_list = &imx355->cur_mode->reg_list;
ret = imx355_write_regs(imx355, reg_list->regs, reg_list->num_of_regs);
if (ret) {
- dev_err(&client->dev, "failed to set mode");
+ dev_err(imx355->dev, "failed to set mode");
return ret;
}
@@ -1429,13 +1430,12 @@ static int imx355_stop_streaming(struct imx355 *imx355)
static int imx355_set_stream(struct v4l2_subdev *sd, int enable)
{
struct imx355 *imx355 = to_imx355(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret = 0;
mutex_lock(&imx355->mutex);
if (enable) {
- ret = pm_runtime_resume_and_get(&client->dev);
+ ret = pm_runtime_resume_and_get(imx355->dev);
if (ret < 0)
goto err_unlock;
@@ -1448,7 +1448,7 @@ static int imx355_set_stream(struct v4l2_subdev *sd, int enable)
goto err_rpm_put;
} else {
imx355_stop_streaming(imx355);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(imx355->dev);
}
/* vflip and hflip cannot change during streaming */
@@ -1460,7 +1460,7 @@ static int imx355_set_stream(struct v4l2_subdev *sd, int enable)
return ret;
err_rpm_put:
- pm_runtime_put(&client->dev);
+ pm_runtime_put(imx355->dev);
err_unlock:
mutex_unlock(&imx355->mutex);
@@ -1470,7 +1470,6 @@ err_unlock:
/* Verify chip ID */
static int imx355_identify_module(struct imx355 *imx355)
{
- struct i2c_client *client = v4l2_get_subdevdata(&imx355->sd);
int ret;
u32 val;
@@ -1479,7 +1478,7 @@ static int imx355_identify_module(struct imx355 *imx355)
return ret;
if (val != IMX355_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x!=%x",
+ dev_err(imx355->dev, "chip id mismatch: %x!=%x",
IMX355_CHIP_ID, val);
return -EIO;
}
@@ -1519,7 +1518,6 @@ static const struct v4l2_subdev_internal_ops imx355_internal_ops = {
/* Initialize control handlers */
static int imx355_init_controls(struct imx355 *imx355)
{
- struct i2c_client *client = v4l2_get_subdevdata(&imx355->sd);
struct v4l2_fwnode_device_properties props;
struct v4l2_ctrl_handler *ctrl_hdlr;
s64 exposure_max;
@@ -1600,11 +1598,11 @@ static int imx355_init_controls(struct imx355 *imx355)
0, 0, imx355_test_pattern_menu);
if (ctrl_hdlr->error) {
ret = ctrl_hdlr->error;
- dev_err(&client->dev, "control init failed: %d", ret);
+ dev_err(imx355->dev, "control init failed: %d", ret);
goto error;
}
- ret = v4l2_fwnode_device_parse(&client->dev, &props);
+ ret = v4l2_fwnode_device_parse(imx355->dev, &props);
if (ret)
goto error;
@@ -1648,20 +1646,6 @@ static struct imx355_hwcfg *imx355_get_hwcfg(struct device *dev)
if (!cfg)
goto out_err;
- ret = fwnode_property_read_u32(dev_fwnode(dev), "clock-frequency",
- &cfg->ext_clk);
- if (ret) {
- dev_err(dev, "can't get clock frequency");
- goto out_err;
- }
-
- dev_dbg(dev, "ext clk: %d", cfg->ext_clk);
- if (cfg->ext_clk != IMX355_EXT_CLK) {
- dev_err(dev, "external clock %d is not supported",
- cfg->ext_clk);
- goto out_err;
- }
-
ret = v4l2_link_freq_to_bitmap(dev, bus_cfg.link_frequencies,
bus_cfg.nr_of_link_frequencies,
link_freq_menu_items,
@@ -1683,27 +1667,41 @@ out_err:
static int imx355_probe(struct i2c_client *client)
{
struct imx355 *imx355;
+ unsigned long freq;
int ret;
imx355 = devm_kzalloc(&client->dev, sizeof(*imx355), GFP_KERNEL);
if (!imx355)
return -ENOMEM;
+ imx355->dev = &client->dev;
+
mutex_init(&imx355->mutex);
+ imx355->clk = devm_v4l2_sensor_clk_get(imx355->dev, NULL);
+ if (IS_ERR(imx355->clk))
+ return dev_err_probe(imx355->dev, PTR_ERR(imx355->clk),
+ "failed to get clock\n");
+
+ freq = clk_get_rate(imx355->clk);
+ if (freq != IMX355_EXT_CLK)
+ return dev_err_probe(imx355->dev, -EINVAL,
+ "external clock %lu is not supported\n",
+ freq);
+
/* Initialize subdev */
v4l2_i2c_subdev_init(&imx355->sd, client, &imx355_subdev_ops);
/* Check module identity */
ret = imx355_identify_module(imx355);
if (ret) {
- dev_err(&client->dev, "failed to find sensor: %d", ret);
+ dev_err(imx355->dev, "failed to find sensor: %d", ret);
goto error_probe;
}
- imx355->hwcfg = imx355_get_hwcfg(&client->dev);
+ imx355->hwcfg = imx355_get_hwcfg(imx355->dev);
if (!imx355->hwcfg) {
- dev_err(&client->dev, "failed to get hwcfg");
+ dev_err(imx355->dev, "failed to get hwcfg");
ret = -ENODEV;
goto error_probe;
}
@@ -1713,7 +1711,7 @@ static int imx355_probe(struct i2c_client *client)
ret = imx355_init_controls(imx355);
if (ret) {
- dev_err(&client->dev, "failed to init controls: %d", ret);
+ dev_err(imx355->dev, "failed to init controls: %d", ret);
goto error_probe;
}
@@ -1728,7 +1726,7 @@ static int imx355_probe(struct i2c_client *client)
imx355->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&imx355->sd.entity, 1, &imx355->pad);
if (ret) {
- dev_err(&client->dev, "failed to init entity pads: %d", ret);
+ dev_err(imx355->dev, "failed to init entity pads: %d", ret);
goto error_handler_free;
}
@@ -1736,9 +1734,9 @@ static int imx355_probe(struct i2c_client *client)
* Device is already turned on by i2c-core with ACPI domain PM.
* Enable runtime PM and turn off the device.
*/
- pm_runtime_set_active(&client->dev);
- pm_runtime_enable(&client->dev);
- pm_runtime_idle(&client->dev);
+ pm_runtime_set_active(imx355->dev);
+ pm_runtime_enable(imx355->dev);
+ pm_runtime_idle(imx355->dev);
ret = v4l2_async_register_subdev_sensor(&imx355->sd);
if (ret < 0)
@@ -1747,8 +1745,8 @@ static int imx355_probe(struct i2c_client *client)
return 0;
error_media_entity_runtime_pm:
- pm_runtime_disable(&client->dev);
- pm_runtime_set_suspended(&client->dev);
+ pm_runtime_disable(imx355->dev);
+ pm_runtime_set_suspended(imx355->dev);
media_entity_cleanup(&imx355->sd.entity);
error_handler_free:
@@ -1769,8 +1767,8 @@ static void imx355_remove(struct i2c_client *client)
media_entity_cleanup(&sd->entity);
v4l2_ctrl_handler_free(sd->ctrl_handler);
- pm_runtime_disable(&client->dev);
- pm_runtime_set_suspended(&client->dev);
+ pm_runtime_disable(imx355->dev);
+ pm_runtime_set_suspended(imx355->dev);
mutex_destroy(&imx355->mutex);
}
diff --git a/drivers/media/i2c/imx412.c b/drivers/media/i2c/imx412.c
index c74097a59c42..7bbd639a9ddf 100644
--- a/drivers/media/i2c/imx412.c
+++ b/drivers/media/i2c/imx412.c
@@ -933,11 +933,10 @@ static int imx412_parse_hw_config(struct imx412 *imx412)
}
/* Get sensor input clock */
- imx412->inclk = devm_clk_get(imx412->dev, NULL);
- if (IS_ERR(imx412->inclk)) {
- dev_err(imx412->dev, "could not get inclk\n");
- return PTR_ERR(imx412->inclk);
- }
+ imx412->inclk = devm_v4l2_sensor_clk_get(imx412->dev, NULL);
+ if (IS_ERR(imx412->inclk))
+ return dev_err_probe(imx412->dev, PTR_ERR(imx412->inclk),
+ "could not get inclk\n");
rate = clk_get_rate(imx412->inclk);
if (rate != IMX412_INCLK_RATE) {
diff --git a/drivers/media/i2c/imx415.c b/drivers/media/i2c/imx415.c
index 278e743646ea..0b424c17e880 100644
--- a/drivers/media/i2c/imx415.c
+++ b/drivers/media/i2c/imx415.c
@@ -952,7 +952,6 @@ static int imx415_s_stream(struct v4l2_subdev *sd, int enable)
if (!enable) {
ret = imx415_stream_off(sensor);
- pm_runtime_mark_last_busy(sensor->dev);
pm_runtime_put_autosuspend(sensor->dev);
goto unlock;
@@ -1251,7 +1250,7 @@ static int imx415_parse_hw_config(struct imx415 *sensor)
return dev_err_probe(sensor->dev, PTR_ERR(sensor->reset),
"failed to get reset GPIO\n");
- sensor->clk = devm_clk_get(sensor->dev, NULL);
+ sensor->clk = devm_v4l2_sensor_clk_get(sensor->dev, NULL);
if (IS_ERR(sensor->clk))
return dev_err_probe(sensor->dev, PTR_ERR(sensor->clk),
"failed to get clock\n");
diff --git a/drivers/media/i2c/ir-kbd-i2c.c b/drivers/media/i2c/ir-kbd-i2c.c
index c84e1e0e6109..5588cdd7ec20 100644
--- a/drivers/media/i2c/ir-kbd-i2c.c
+++ b/drivers/media/i2c/ir-kbd-i2c.c
@@ -321,9 +321,9 @@ static int get_key_avermedia_cardbus(struct IR_i2c *ir, enum rc_proto *protocol,
static int ir_key_poll(struct IR_i2c *ir)
{
- enum rc_proto protocol;
- u32 scancode;
- u8 toggle;
+ enum rc_proto protocol = 0;
+ u32 scancode = 0;
+ u8 toggle = 0;
int rc;
dev_dbg(&ir->rc->dev, "%s\n", __func__);
diff --git a/drivers/media/i2c/max9286.c b/drivers/media/i2c/max9286.c
index 1d0b5f56f989..7c0961688d61 100644
--- a/drivers/media/i2c/max9286.c
+++ b/drivers/media/i2c/max9286.c
@@ -1220,7 +1220,7 @@ static int max9286_register_gpio(struct max9286_priv *priv)
gpio->owner = THIS_MODULE;
gpio->ngpio = 2;
gpio->base = -1;
- gpio->set_rv = max9286_gpiochip_set;
+ gpio->set = max9286_gpiochip_set;
gpio->get = max9286_gpiochip_get;
gpio->can_sleep = true;
diff --git a/drivers/media/i2c/max96717.c b/drivers/media/i2c/max96717.c
index 015e42fbe246..c8ae7890d9fa 100644
--- a/drivers/media/i2c/max96717.c
+++ b/drivers/media/i2c/max96717.c
@@ -355,7 +355,7 @@ static int max96717_gpiochip_probe(struct max96717_priv *priv)
gc->get_direction = max96717_gpio_get_direction;
gc->direction_input = max96717_gpio_direction_in;
gc->direction_output = max96717_gpio_direction_out;
- gc->set_rv = max96717_gpiochip_set;
+ gc->set = max96717_gpiochip_set;
gc->get = max96717_gpiochip_get;
/* Disable GPIO forwarding */
diff --git a/drivers/media/i2c/mt9m001.c b/drivers/media/i2c/mt9m001.c
index 12d3e86bdc0f..7a6114d18dfc 100644
--- a/drivers/media/i2c/mt9m001.c
+++ b/drivers/media/i2c/mt9m001.c
@@ -743,9 +743,10 @@ static int mt9m001_probe(struct i2c_client *client)
if (!mt9m001)
return -ENOMEM;
- mt9m001->clk = devm_clk_get(&client->dev, NULL);
+ mt9m001->clk = devm_v4l2_sensor_clk_get(&client->dev, NULL);
if (IS_ERR(mt9m001->clk))
- return PTR_ERR(mt9m001->clk);
+ return dev_err_probe(&client->dev, PTR_ERR(mt9m001->clk),
+ "failed to get the clock\n");
mt9m001->standby_gpio = devm_gpiod_get_optional(&client->dev, "standby",
GPIOD_OUT_LOW);
diff --git a/drivers/media/i2c/mt9m111.c b/drivers/media/i2c/mt9m111.c
index 9aa5dcda3805..05dcf37c6f01 100644
--- a/drivers/media/i2c/mt9m111.c
+++ b/drivers/media/i2c/mt9m111.c
@@ -1279,9 +1279,10 @@ static int mt9m111_probe(struct i2c_client *client)
return ret;
}
- mt9m111->clk = devm_clk_get(&client->dev, "mclk");
+ mt9m111->clk = devm_v4l2_sensor_clk_get(&client->dev, "mclk");
if (IS_ERR(mt9m111->clk))
- return PTR_ERR(mt9m111->clk);
+ return dev_err_probe(&client->dev, PTR_ERR(mt9m111->clk),
+ "failed to get mclk\n");
mt9m111->regulator = devm_regulator_get(&client->dev, "vdd");
if (IS_ERR(mt9m111->regulator)) {
diff --git a/drivers/media/i2c/mt9m114.c b/drivers/media/i2c/mt9m114.c
index 3f540ca40f3c..51ebbe7ae996 100644
--- a/drivers/media/i2c/mt9m114.c
+++ b/drivers/media/i2c/mt9m114.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/types.h>
@@ -42,6 +43,9 @@
#define MT9M114_RESET_AND_MISC_CONTROL CCI_REG16(0x001a)
#define MT9M114_RESET_SOC BIT(0)
#define MT9M114_PAD_SLEW CCI_REG16(0x001e)
+#define MT9M114_PAD_SLEW_MIN 0
+#define MT9M114_PAD_SLEW_MAX 7
+#define MT9M114_PAD_SLEW_DEFAULT 7
#define MT9M114_PAD_CONTROL CCI_REG16(0x0032)
/* XDMA registers */
@@ -388,6 +392,7 @@ struct mt9m114 {
unsigned int pixrate;
bool streaming;
+ u32 pad_slew_rate;
/* Pixel Array */
struct {
@@ -645,9 +650,6 @@ static const struct cci_reg_sequence mt9m114_init[] = {
{ MT9M114_CAM_SENSOR_CFG_FINE_INTEG_TIME_MAX, 1459 },
{ MT9M114_CAM_SENSOR_CFG_FINE_CORRECTION, 96 },
{ MT9M114_CAM_SENSOR_CFG_REG_0_DATA, 32 },
-
- /* Miscellaneous settings */
- { MT9M114_PAD_SLEW, 0x0777 },
};
/* -----------------------------------------------------------------------------
@@ -779,6 +781,13 @@ static int mt9m114_initialize(struct mt9m114 *sensor)
if (ret < 0)
return ret;
+ value = sensor->pad_slew_rate
+ | sensor->pad_slew_rate << 4
+ | sensor->pad_slew_rate << 8;
+ cci_write(sensor->regmap, MT9M114_PAD_SLEW, value, &ret);
+ if (ret < 0)
+ return ret;
+
ret = mt9m114_set_state(sensor, MT9M114_SYS_STATE_ENTER_CONFIG_CHANGE);
if (ret < 0)
return ret;
@@ -974,7 +983,6 @@ static int mt9m114_start_streaming(struct mt9m114 *sensor,
return 0;
error:
- pm_runtime_mark_last_busy(&sensor->client->dev);
pm_runtime_put_autosuspend(&sensor->client->dev);
return ret;
@@ -988,7 +996,6 @@ static int mt9m114_stop_streaming(struct mt9m114 *sensor)
ret = mt9m114_set_state(sensor, MT9M114_SYS_STATE_ENTER_SUSPEND);
- pm_runtime_mark_last_busy(&sensor->client->dev);
pm_runtime_put_autosuspend(&sensor->client->dev);
return ret;
@@ -1046,7 +1053,6 @@ static int mt9m114_pa_g_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_mark_last_busy(&sensor->client->dev);
pm_runtime_put_autosuspend(&sensor->client->dev);
return ret;
@@ -1113,7 +1119,6 @@ static int mt9m114_pa_s_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_mark_last_busy(&sensor->client->dev);
pm_runtime_put_autosuspend(&sensor->client->dev);
return ret;
@@ -1287,6 +1292,7 @@ static int mt9m114_pa_set_selection(struct v4l2_subdev *sd,
struct mt9m114 *sensor = pa_to_mt9m114(sd);
struct v4l2_mbus_framefmt *format;
struct v4l2_rect *crop;
+ int ret = 0;
if (sel->target != V4L2_SEL_TGT_CROP)
return -EINVAL;
@@ -1302,25 +1308,41 @@ static int mt9m114_pa_set_selection(struct v4l2_subdev *sd,
* binning, but binning is configured after setting the selection, so
* we can't know tell here if it will be used.
*/
- crop->left = ALIGN(sel->r.left, 4);
- crop->top = ALIGN(sel->r.top, 2);
- crop->width = clamp_t(unsigned int, ALIGN(sel->r.width, 4),
- MT9M114_PIXEL_ARRAY_MIN_OUTPUT_WIDTH,
- MT9M114_PIXEL_ARRAY_WIDTH - crop->left);
- crop->height = clamp_t(unsigned int, ALIGN(sel->r.height, 2),
- MT9M114_PIXEL_ARRAY_MIN_OUTPUT_HEIGHT,
- MT9M114_PIXEL_ARRAY_HEIGHT - crop->top);
-
- sel->r = *crop;
+ sel->r.left = ALIGN(sel->r.left, 4);
+ sel->r.top = ALIGN(sel->r.top, 2);
+ sel->r.width = clamp_t(unsigned int, ALIGN(sel->r.width, 4),
+ MT9M114_PIXEL_ARRAY_MIN_OUTPUT_WIDTH,
+ MT9M114_PIXEL_ARRAY_WIDTH - sel->r.left);
+ sel->r.height = clamp_t(unsigned int, ALIGN(sel->r.height, 2),
+ MT9M114_PIXEL_ARRAY_MIN_OUTPUT_HEIGHT,
+ MT9M114_PIXEL_ARRAY_HEIGHT - sel->r.top);
+
+ /* Changing the selection size is not allowed in streaming state. */
+ if (sensor->streaming &&
+ (sel->r.height != crop->height || sel->r.width != crop->width))
+ return -EBUSY;
+
+ *crop = sel->r;
/* Reset the format. */
format->width = crop->width;
format->height = crop->height;
- if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE)
- mt9m114_pa_ctrl_update_blanking(sensor, format);
+ if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return ret;
- return 0;
+ mt9m114_pa_ctrl_update_blanking(sensor, format);
+
+ /* Apply values immediately if streaming. */
+ if (sensor->streaming) {
+ ret = mt9m114_configure_pa(sensor, state);
+ if (ret)
+ return ret;
+ /* Changing the cropping config requires a CONFIG_CHANGE. */
+ ret = mt9m114_set_state(sensor,
+ MT9M114_SYS_STATE_ENTER_CONFIG_CHANGE);
+ }
+ return ret;
}
static const struct v4l2_subdev_pad_ops mt9m114_pa_pad_ops = {
@@ -1565,7 +1587,6 @@ static int mt9m114_ifp_s_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_mark_last_busy(&sensor->client->dev);
pm_runtime_put_autosuspend(&sensor->client->dev);
return ret;
@@ -2365,6 +2386,17 @@ static int mt9m114_parse_dt(struct mt9m114 *sensor)
goto error;
}
+ sensor->pad_slew_rate = MT9M114_PAD_SLEW_DEFAULT;
+ device_property_read_u32(&sensor->client->dev, "slew-rate",
+ &sensor->pad_slew_rate);
+
+ if (sensor->pad_slew_rate < MT9M114_PAD_SLEW_MIN ||
+ sensor->pad_slew_rate > MT9M114_PAD_SLEW_MAX) {
+ dev_err(&sensor->client->dev, "Invalid slew-rate %u\n",
+ sensor->pad_slew_rate);
+ return -EINVAL;
+ }
+
return 0;
error:
@@ -2395,10 +2427,10 @@ static int mt9m114_probe(struct i2c_client *client)
return ret;
/* Acquire clocks, GPIOs and regulators. */
- sensor->clk = devm_clk_get(dev, NULL);
+ sensor->clk = devm_v4l2_sensor_clk_get(dev, NULL);
if (IS_ERR(sensor->clk)) {
- ret = PTR_ERR(sensor->clk);
- dev_err_probe(dev, ret, "Failed to get clock\n");
+ ret = dev_err_probe(dev, PTR_ERR(sensor->clk),
+ "Failed to get clock\n");
goto error_ep_free;
}
@@ -2472,7 +2504,6 @@ static int mt9m114_probe(struct i2c_client *client)
* Decrease the PM usage count. The device will get suspended after the
* autosuspend delay, turning the power off.
*/
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return 0;
diff --git a/drivers/media/i2c/mt9p031.c b/drivers/media/i2c/mt9p031.c
index 4ef5fb06131d..1500ee4db47e 100644
--- a/drivers/media/i2c/mt9p031.c
+++ b/drivers/media/i2c/mt9p031.c
@@ -233,9 +233,10 @@ static int mt9p031_clk_setup(struct mt9p031 *mt9p031)
unsigned long ext_freq;
int ret;
- mt9p031->clk = devm_clk_get(&client->dev, NULL);
+ mt9p031->clk = devm_v4l2_sensor_clk_get(&client->dev, NULL);
if (IS_ERR(mt9p031->clk))
- return PTR_ERR(mt9p031->clk);
+ return dev_err_probe(&client->dev, PTR_ERR(mt9p031->clk),
+ "failed to get the clock\n");
ret = clk_set_rate(mt9p031->clk, mt9p031->ext_freq);
if (ret < 0)
@@ -1092,6 +1093,7 @@ static int mt9p031_parse_properties(struct mt9p031 *mt9p031, struct device *dev)
static int mt9p031_probe(struct i2c_client *client)
{
struct i2c_adapter *adapter = client->adapter;
+ const struct mt9p031_model_info *info;
struct mt9p031 *mt9p031;
unsigned int i;
int ret;
@@ -1112,7 +1114,8 @@ static int mt9p031_probe(struct i2c_client *client)
mt9p031->output_control = MT9P031_OUTPUT_CONTROL_DEF;
mt9p031->mode2 = MT9P031_READ_MODE_2_ROW_BLC;
- mt9p031->code = (uintptr_t)device_get_match_data(&client->dev);
+ info = device_get_match_data(&client->dev);
+ mt9p031->code = info->code;
mt9p031->regulators[0].supply = "vdd";
mt9p031->regulators[1].supply = "vdd_io";
diff --git a/drivers/media/i2c/mt9t112.c b/drivers/media/i2c/mt9t112.c
index 878dff9b7577..2d2c840fc002 100644
--- a/drivers/media/i2c/mt9t112.c
+++ b/drivers/media/i2c/mt9t112.c
@@ -1078,13 +1078,12 @@ static int mt9t112_probe(struct i2c_client *client)
v4l2_i2c_subdev_init(&priv->subdev, client, &mt9t112_subdev_ops);
- priv->clk = devm_clk_get(&client->dev, "extclk");
- if (PTR_ERR(priv->clk) == -ENOENT) {
+ priv->clk = devm_v4l2_sensor_clk_get(&client->dev, "extclk");
+ if (PTR_ERR(priv->clk) == -ENOENT)
priv->clk = NULL;
- } else if (IS_ERR(priv->clk)) {
- dev_err(&client->dev, "Unable to get clock \"extclk\"\n");
- return PTR_ERR(priv->clk);
- }
+ else if (IS_ERR(priv->clk))
+ return dev_err_probe(&client->dev, PTR_ERR(priv->clk),
+ "Unable to get clock \"extclk\"\n");
priv->standby_gpio = devm_gpiod_get_optional(&client->dev, "standby",
GPIOD_OUT_HIGH);
diff --git a/drivers/media/i2c/mt9v032.c b/drivers/media/i2c/mt9v032.c
index 302120ff125e..d4359d5b92bb 100644
--- a/drivers/media/i2c/mt9v032.c
+++ b/drivers/media/i2c/mt9v032.c
@@ -15,16 +15,15 @@
#include <linux/i2c.h>
#include <linux/log2.h>
#include <linux/mod_devicetable.h>
+#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/regmap.h>
#include <linux/slab.h>
-#include <linux/videodev2.h>
#include <linux/v4l2-mediabus.h>
-#include <linux/module.h>
+#include <linux/videodev2.h>
-#include <media/i2c/mt9v032.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fwnode.h>
@@ -182,7 +181,16 @@ static const struct mt9v032_model_version mt9v032_versions[] = {
{ MT9V034_CHIP_ID_REV1, "MT9V024/MT9V034 rev1" },
};
+struct mt9v032_platform_data {
+ unsigned int clk_pol:1;
+
+ const s64 *link_freqs;
+ s64 link_def_freq;
+};
+
struct mt9v032 {
+ struct device *dev;
+
struct v4l2_subdev subdev;
struct media_pad pad;
@@ -205,7 +213,7 @@ struct mt9v032 {
struct gpio_desc *reset_gpio;
struct gpio_desc *standby_gpio;
- struct mt9v032_platform_data *pdata;
+ struct mt9v032_platform_data pdata;
const struct mt9v032_model_info *model;
const struct mt9v032_model_version *version;
@@ -330,7 +338,7 @@ static int __mt9v032_set_power(struct mt9v032 *mt9v032, bool on)
return ret;
/* Configure the pixel clock polarity */
- if (mt9v032->pdata && mt9v032->pdata->clk_pol) {
+ if (mt9v032->pdata.clk_pol) {
ret = regmap_write(map, mt9v032->model->data->pclk_reg,
MT9V032_PIXEL_CLOCK_INV_PXL_CLK);
if (ret < 0)
@@ -473,13 +481,12 @@ static int mt9v032_get_format(struct v4l2_subdev *subdev,
static void mt9v032_configure_pixel_rate(struct mt9v032 *mt9v032)
{
- struct i2c_client *client = v4l2_get_subdevdata(&mt9v032->subdev);
int ret;
ret = v4l2_ctrl_s_ctrl_int64(mt9v032->pixel_rate,
mt9v032->sysclk / mt9v032->hratio);
if (ret < 0)
- dev_warn(&client->dev, "failed to set pixel rate (%d)\n", ret);
+ dev_warn(mt9v032->dev, "failed to set pixel rate (%d)\n", ret);
}
static unsigned int mt9v032_calc_ratio(unsigned int input, unsigned int output)
@@ -682,7 +689,7 @@ static int mt9v032_s_ctrl(struct v4l2_ctrl *ctrl)
if (mt9v032->link_freq == NULL)
break;
- freq = mt9v032->pdata->link_freqs[mt9v032->link_freq->val];
+ freq = mt9v032->pdata.link_freqs[mt9v032->link_freq->val];
*mt9v032->pixel_rate->p_new.p_s64 = freq;
mt9v032->sysclk = freq;
break;
@@ -883,12 +890,12 @@ static int mt9v032_registered(struct v4l2_subdev *subdev)
u32 version;
int ret;
- dev_info(&client->dev, "Probing MT9V032 at address 0x%02x\n",
+ dev_info(mt9v032->dev, "Probing MT9V032 at address 0x%02x\n",
client->addr);
ret = mt9v032_power_on(mt9v032);
if (ret < 0) {
- dev_err(&client->dev, "MT9V032 power up failed\n");
+ dev_err(mt9v032->dev, "MT9V032 power up failed\n");
return ret;
}
@@ -898,7 +905,7 @@ static int mt9v032_registered(struct v4l2_subdev *subdev)
mt9v032_power_off(mt9v032);
if (ret < 0) {
- dev_err(&client->dev, "Failed reading chip version\n");
+ dev_err(mt9v032->dev, "Failed reading chip version\n");
return ret;
}
@@ -910,12 +917,12 @@ static int mt9v032_registered(struct v4l2_subdev *subdev)
}
if (mt9v032->version == NULL) {
- dev_err(&client->dev, "Unsupported chip version 0x%04x\n",
+ dev_err(mt9v032->dev, "Unsupported chip version 0x%04x\n",
version);
return -ENODEV;
}
- dev_info(&client->dev, "%s detected at address 0x%02x\n",
+ dev_info(mt9v032->dev, "%s detected at address 0x%02x\n",
mt9v032->version->name, client->addr);
mt9v032_configure_pixel_rate(mt9v032);
@@ -995,41 +1002,33 @@ static const struct regmap_config mt9v032_regmap_config = {
* Driver initialization and probing
*/
-static struct mt9v032_platform_data *
-mt9v032_get_pdata(struct i2c_client *client)
+static int mt9v032_get_pdata(struct mt9v032 *mt9v032)
{
- struct mt9v032_platform_data *pdata = NULL;
+ struct mt9v032_platform_data *pdata = &mt9v032->pdata;
struct v4l2_fwnode_endpoint endpoint = { .bus_type = 0 };
- struct device_node *np;
+ struct device_node *np __free(device_node) = NULL;
struct property *prop;
- if (!IS_ENABLED(CONFIG_OF) || !client->dev.of_node)
- return client->dev.platform_data;
-
- np = of_graph_get_endpoint_by_regs(client->dev.of_node, 0, -1);
+ np = of_graph_get_endpoint_by_regs(mt9v032->dev->of_node, 0, -1);
if (!np)
- return NULL;
+ return -EINVAL;
if (v4l2_fwnode_endpoint_parse(of_fwnode_handle(np), &endpoint) < 0)
- goto done;
-
- pdata = devm_kzalloc(&client->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- goto done;
+ return -EINVAL;
prop = of_find_property(np, "link-frequencies", NULL);
if (prop) {
u64 *link_freqs;
size_t size = prop->length / sizeof(*link_freqs);
- link_freqs = devm_kcalloc(&client->dev, size,
+ link_freqs = devm_kcalloc(mt9v032->dev, size,
sizeof(*link_freqs), GFP_KERNEL);
if (!link_freqs)
- goto done;
+ return -EINVAL;
if (of_property_read_u64_array(np, "link-frequencies",
link_freqs, size) < 0)
- goto done;
+ return -EINVAL;
pdata->link_freqs = link_freqs;
pdata->link_def_freq = link_freqs[0];
@@ -1038,14 +1037,11 @@ mt9v032_get_pdata(struct i2c_client *client)
pdata->clk_pol = !!(endpoint.bus.parallel.flags &
V4L2_MBUS_PCLK_SAMPLE_RISING);
-done:
- of_node_put(np);
- return pdata;
+ return 0;
}
static int mt9v032_probe(struct i2c_client *client)
{
- struct mt9v032_platform_data *pdata = mt9v032_get_pdata(client);
struct mt9v032 *mt9v032;
unsigned int i;
int ret;
@@ -1054,27 +1050,35 @@ static int mt9v032_probe(struct i2c_client *client)
if (!mt9v032)
return -ENOMEM;
+ mt9v032->dev = &client->dev;
+
mt9v032->regmap = devm_regmap_init_i2c(client, &mt9v032_regmap_config);
if (IS_ERR(mt9v032->regmap))
return PTR_ERR(mt9v032->regmap);
- mt9v032->clk = devm_clk_get(&client->dev, NULL);
+ mt9v032->clk = devm_v4l2_sensor_clk_get(mt9v032->dev, NULL);
if (IS_ERR(mt9v032->clk))
- return PTR_ERR(mt9v032->clk);
+ return dev_err_probe(mt9v032->dev, PTR_ERR(mt9v032->clk),
+ "failed to get the clock\n");
- mt9v032->reset_gpio = devm_gpiod_get_optional(&client->dev, "reset",
+ mt9v032->reset_gpio = devm_gpiod_get_optional(mt9v032->dev, "reset",
GPIOD_OUT_HIGH);
if (IS_ERR(mt9v032->reset_gpio))
return PTR_ERR(mt9v032->reset_gpio);
- mt9v032->standby_gpio = devm_gpiod_get_optional(&client->dev, "standby",
+ mt9v032->standby_gpio = devm_gpiod_get_optional(mt9v032->dev, "standby",
GPIOD_OUT_LOW);
if (IS_ERR(mt9v032->standby_gpio))
return PTR_ERR(mt9v032->standby_gpio);
mutex_init(&mt9v032->power_lock);
- mt9v032->pdata = pdata;
- mt9v032->model = i2c_get_match_data(client);
+
+ ret = mt9v032_get_pdata(mt9v032);
+ if (ret)
+ return dev_err_probe(mt9v032->dev, -EINVAL,
+ "Failed to parse DT properties\n");
+
+ mt9v032->model = device_get_match_data(mt9v032->dev);
v4l2_ctrl_handler_init(&mt9v032->ctrls, 11 +
ARRAY_SIZE(mt9v032_aegc_controls));
@@ -1119,7 +1123,8 @@ static int mt9v032_probe(struct i2c_client *client)
v4l2_ctrl_new_std(&mt9v032->ctrls, &mt9v032_ctrl_ops,
V4L2_CID_PIXEL_RATE, 1, INT_MAX, 1, 1);
- if (pdata && pdata->link_freqs) {
+ if (mt9v032->pdata.link_freqs) {
+ const struct mt9v032_platform_data *pdata = &mt9v032->pdata;
unsigned int def = 0;
for (i = 0; pdata->link_freqs[i]; ++i) {
@@ -1139,7 +1144,7 @@ static int mt9v032_probe(struct i2c_client *client)
mt9v032->subdev.ctrl_handler = &mt9v032->ctrls;
if (mt9v032->ctrls.error) {
- dev_err(&client->dev, "control initialization error %d\n",
+ dev_err(mt9v032->dev, "control initialization error %d\n",
mt9v032->ctrls.error);
ret = mt9v032->ctrls.error;
goto err;
@@ -1177,7 +1182,7 @@ static int mt9v032_probe(struct i2c_client *client)
if (ret < 0)
goto err;
- mt9v032->subdev.dev = &client->dev;
+ mt9v032->subdev.dev = mt9v032->dev;
ret = v4l2_async_register_subdev(&mt9v032->subdev);
if (ret < 0)
goto err;
@@ -1261,19 +1266,6 @@ static const struct mt9v032_model_info mt9v032_models[] = {
},
};
-static const struct i2c_device_id mt9v032_id[] = {
- { "mt9v022", (kernel_ulong_t)&mt9v032_models[MT9V032_MODEL_V022_COLOR] },
- { "mt9v022m", (kernel_ulong_t)&mt9v032_models[MT9V032_MODEL_V022_MONO] },
- { "mt9v024", (kernel_ulong_t)&mt9v032_models[MT9V032_MODEL_V024_COLOR] },
- { "mt9v024m", (kernel_ulong_t)&mt9v032_models[MT9V032_MODEL_V024_MONO] },
- { "mt9v032", (kernel_ulong_t)&mt9v032_models[MT9V032_MODEL_V032_COLOR] },
- { "mt9v032m", (kernel_ulong_t)&mt9v032_models[MT9V032_MODEL_V032_MONO] },
- { "mt9v034", (kernel_ulong_t)&mt9v032_models[MT9V032_MODEL_V034_COLOR] },
- { "mt9v034m", (kernel_ulong_t)&mt9v032_models[MT9V032_MODEL_V034_MONO] },
- { /* Sentinel */ }
-};
-MODULE_DEVICE_TABLE(i2c, mt9v032_id);
-
static const struct of_device_id mt9v032_of_match[] = {
{ .compatible = "aptina,mt9v022", .data = &mt9v032_models[MT9V032_MODEL_V022_COLOR] },
{ .compatible = "aptina,mt9v022m", .data = &mt9v032_models[MT9V032_MODEL_V022_MONO] },
@@ -1294,7 +1286,6 @@ static struct i2c_driver mt9v032_driver = {
},
.probe = mt9v032_probe,
.remove = mt9v032_remove,
- .id_table = mt9v032_id,
};
module_i2c_driver(mt9v032_driver);
diff --git a/drivers/media/i2c/mt9v111.c b/drivers/media/i2c/mt9v111.c
index 723fe138e7bc..b4f2703faa18 100644
--- a/drivers/media/i2c/mt9v111.c
+++ b/drivers/media/i2c/mt9v111.c
@@ -365,8 +365,6 @@ static int __mt9v111_power_on(struct v4l2_subdev *sd)
if (ret)
return ret;
- clk_set_rate(mt9v111->clk, mt9v111->sysclk);
-
gpiod_set_value(mt9v111->standby, 0);
usleep_range(500, 1000);
@@ -532,8 +530,8 @@ static int mt9v111_calc_frame_rate(struct mt9v111_dev *mt9v111,
static int mt9v111_hw_config(struct mt9v111_dev *mt9v111)
{
struct i2c_client *c = mt9v111->client;
- unsigned int ret;
u16 outfmtctrl2;
+ int ret;
/* Force device reset. */
ret = __mt9v111_hw_reset(mt9v111);
@@ -1129,9 +1127,10 @@ static int mt9v111_probe(struct i2c_client *client)
mt9v111->dev = &client->dev;
mt9v111->client = client;
- mt9v111->clk = devm_clk_get(&client->dev, NULL);
+ mt9v111->clk = devm_v4l2_sensor_clk_get(&client->dev, NULL);
if (IS_ERR(mt9v111->clk))
- return PTR_ERR(mt9v111->clk);
+ return dev_err_probe(&client->dev, PTR_ERR(mt9v111->clk),
+ "failed to get the clock\n");
mt9v111->sysclk = clk_get_rate(mt9v111->clk);
if (mt9v111->sysclk > MT9V111_MAX_CLKIN)
diff --git a/drivers/media/i2c/og01a1b.c b/drivers/media/i2c/og01a1b.c
index 78d5d406e4b7..c7184de6251a 100644
--- a/drivers/media/i2c/og01a1b.c
+++ b/drivers/media/i2c/og01a1b.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2022 Intel Corporation.
-#include <linux/unaligned.h>
#include <linux/acpi.h>
#include <linux/clk.h>
#include <linux/delay.h>
@@ -10,6 +9,8 @@
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
+#include <linux/unaligned.h>
+
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fwnode.h>
@@ -421,6 +422,7 @@ static const struct og01a1b_mode supported_modes[] = {
};
struct og01a1b {
+ struct device *dev;
struct clk *xvclk;
struct gpio_desc *reset_gpio;
struct regulator *avdd;
@@ -512,7 +514,6 @@ static int og01a1b_write_reg(struct og01a1b *og01a1b, u16 reg, u16 len, u32 val)
static int og01a1b_write_reg_list(struct og01a1b *og01a1b,
const struct og01a1b_reg_list *r_list)
{
- struct i2c_client *client = v4l2_get_subdevdata(&og01a1b->sd);
unsigned int i;
int ret;
@@ -520,7 +521,7 @@ static int og01a1b_write_reg_list(struct og01a1b *og01a1b,
ret = og01a1b_write_reg(og01a1b, r_list->regs[i].address, 1,
r_list->regs[i].val);
if (ret) {
- dev_err_ratelimited(&client->dev,
+ dev_err_ratelimited(og01a1b->dev,
"failed to write reg 0x%4.4x. error = %d",
r_list->regs[i].address, ret);
return ret;
@@ -544,7 +545,6 @@ static int og01a1b_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct og01a1b *og01a1b = container_of(ctrl->handler,
struct og01a1b, ctrl_handler);
- struct i2c_client *client = v4l2_get_subdevdata(&og01a1b->sd);
s64 exposure_max;
int ret = 0;
@@ -560,7 +560,7 @@ static int og01a1b_set_ctrl(struct v4l2_ctrl *ctrl)
}
/* V4L2 controls values will be applied only when power is already up */
- if (!pm_runtime_get_if_in_use(&client->dev))
+ if (!pm_runtime_get_if_in_use(og01a1b->dev))
return 0;
switch (ctrl->id) {
@@ -596,7 +596,7 @@ static int og01a1b_set_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_put(&client->dev);
+ pm_runtime_put(og01a1b->dev);
return ret;
}
@@ -682,13 +682,12 @@ static void og01a1b_update_pad_format(const struct og01a1b_mode *mode,
{
fmt->width = mode->width;
fmt->height = mode->height;
- fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+ fmt->code = MEDIA_BUS_FMT_Y10_1X10;
fmt->field = V4L2_FIELD_NONE;
}
static int og01a1b_start_streaming(struct og01a1b *og01a1b)
{
- struct i2c_client *client = v4l2_get_subdevdata(&og01a1b->sd);
const struct og01a1b_reg_list *reg_list;
int link_freq_index, ret;
@@ -697,14 +696,14 @@ static int og01a1b_start_streaming(struct og01a1b *og01a1b)
ret = og01a1b_write_reg_list(og01a1b, reg_list);
if (ret) {
- dev_err(&client->dev, "failed to set plls");
+ dev_err(og01a1b->dev, "failed to set plls");
return ret;
}
reg_list = &og01a1b->cur_mode->reg_list;
ret = og01a1b_write_reg_list(og01a1b, reg_list);
if (ret) {
- dev_err(&client->dev, "failed to set mode");
+ dev_err(og01a1b->dev, "failed to set mode");
return ret;
}
@@ -716,7 +715,7 @@ static int og01a1b_start_streaming(struct og01a1b *og01a1b)
OG01A1B_REG_VALUE_08BIT,
OG01A1B_MODE_STREAMING);
if (ret) {
- dev_err(&client->dev, "failed to set stream");
+ dev_err(og01a1b->dev, "failed to set stream");
return ret;
}
@@ -725,22 +724,19 @@ static int og01a1b_start_streaming(struct og01a1b *og01a1b)
static void og01a1b_stop_streaming(struct og01a1b *og01a1b)
{
- struct i2c_client *client = v4l2_get_subdevdata(&og01a1b->sd);
-
if (og01a1b_write_reg(og01a1b, OG01A1B_REG_MODE_SELECT,
OG01A1B_REG_VALUE_08BIT, OG01A1B_MODE_STANDBY))
- dev_err(&client->dev, "failed to set stream");
+ dev_err(og01a1b->dev, "failed to set stream");
}
static int og01a1b_set_stream(struct v4l2_subdev *sd, int enable)
{
struct og01a1b *og01a1b = to_og01a1b(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret = 0;
mutex_lock(&og01a1b->mutex);
if (enable) {
- ret = pm_runtime_resume_and_get(&client->dev);
+ ret = pm_runtime_resume_and_get(og01a1b->dev);
if (ret) {
mutex_unlock(&og01a1b->mutex);
return ret;
@@ -750,11 +746,11 @@ static int og01a1b_set_stream(struct v4l2_subdev *sd, int enable)
if (ret) {
enable = 0;
og01a1b_stop_streaming(og01a1b);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(og01a1b->dev);
}
} else {
og01a1b_stop_streaming(og01a1b);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(og01a1b->dev);
}
mutex_unlock(&og01a1b->mutex);
@@ -828,7 +824,7 @@ static int og01a1b_enum_mbus_code(struct v4l2_subdev *sd,
if (code->index > 0)
return -EINVAL;
- code->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+ code->code = MEDIA_BUS_FMT_Y10_1X10;
return 0;
}
@@ -840,7 +836,7 @@ static int og01a1b_enum_frame_size(struct v4l2_subdev *sd,
if (fse->index >= ARRAY_SIZE(supported_modes))
return -EINVAL;
- if (fse->code != MEDIA_BUS_FMT_SGRBG10_1X10)
+ if (fse->code != MEDIA_BUS_FMT_Y10_1X10)
return -EINVAL;
fse->min_width = supported_modes[fse->index].width;
@@ -889,7 +885,6 @@ static const struct v4l2_subdev_internal_ops og01a1b_internal_ops = {
static int og01a1b_identify_module(struct og01a1b *og01a1b)
{
- struct i2c_client *client = v4l2_get_subdevdata(&og01a1b->sd);
int ret;
u32 val;
@@ -899,7 +894,7 @@ static int og01a1b_identify_module(struct og01a1b *og01a1b)
return ret;
if (val != OG01A1B_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x!=%x",
+ dev_err(og01a1b->dev, "chip id mismatch: %x!=%x",
OG01A1B_CHIP_ID, val);
return -ENXIO;
}
@@ -909,35 +904,18 @@ static int og01a1b_identify_module(struct og01a1b *og01a1b)
static int og01a1b_check_hwcfg(struct og01a1b *og01a1b)
{
- struct i2c_client *client = v4l2_get_subdevdata(&og01a1b->sd);
- struct device *dev = &client->dev;
+ struct device *dev = og01a1b->dev;
struct fwnode_handle *ep;
struct fwnode_handle *fwnode = dev_fwnode(dev);
struct v4l2_fwnode_endpoint bus_cfg = {
.bus_type = V4L2_MBUS_CSI2_DPHY
};
- u32 mclk;
int ret;
unsigned int i, j;
if (!fwnode)
return -ENXIO;
- ret = fwnode_property_read_u32(fwnode, "clock-frequency", &mclk);
- if (ret) {
- if (!og01a1b->xvclk) {
- dev_err(dev, "can't get clock frequency");
- return ret;
- }
-
- mclk = clk_get_rate(og01a1b->xvclk);
- }
-
- if (mclk != OG01A1B_MCLK) {
- dev_err(dev, "external clock %d is not supported", mclk);
- return -EINVAL;
- }
-
ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
if (!ep)
return -ENXIO;
@@ -1066,47 +1044,54 @@ static void og01a1b_remove(struct i2c_client *client)
v4l2_async_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
v4l2_ctrl_handler_free(sd->ctrl_handler);
- pm_runtime_disable(&client->dev);
+ pm_runtime_disable(og01a1b->dev);
mutex_destroy(&og01a1b->mutex);
}
static int og01a1b_probe(struct i2c_client *client)
{
struct og01a1b *og01a1b;
+ unsigned long freq;
int ret;
og01a1b = devm_kzalloc(&client->dev, sizeof(*og01a1b), GFP_KERNEL);
if (!og01a1b)
return -ENOMEM;
+ og01a1b->dev = &client->dev;
+
v4l2_i2c_subdev_init(&og01a1b->sd, client, &og01a1b_subdev_ops);
- og01a1b->xvclk = devm_clk_get_optional(&client->dev, NULL);
- if (IS_ERR(og01a1b->xvclk)) {
- ret = PTR_ERR(og01a1b->xvclk);
- dev_err(&client->dev, "failed to get xvclk clock: %d\n", ret);
- return ret;
- }
+ og01a1b->xvclk = devm_v4l2_sensor_clk_get(og01a1b->dev, NULL);
+ if (IS_ERR(og01a1b->xvclk))
+ return dev_err_probe(og01a1b->dev, PTR_ERR(og01a1b->xvclk),
+ "failed to get xvclk clock\n");
+
+ freq = clk_get_rate(og01a1b->xvclk);
+ if (freq != OG01A1B_MCLK)
+ return dev_err_probe(og01a1b->dev, -EINVAL,
+ "external clock %lu is not supported",
+ freq);
ret = og01a1b_check_hwcfg(og01a1b);
if (ret) {
- dev_err(&client->dev, "failed to check HW configuration: %d",
+ dev_err(og01a1b->dev, "failed to check HW configuration: %d",
ret);
return ret;
}
- og01a1b->reset_gpio = devm_gpiod_get_optional(&client->dev, "reset",
+ og01a1b->reset_gpio = devm_gpiod_get_optional(og01a1b->dev, "reset",
GPIOD_OUT_LOW);
if (IS_ERR(og01a1b->reset_gpio)) {
- dev_err(&client->dev, "cannot get reset GPIO\n");
+ dev_err(og01a1b->dev, "cannot get reset GPIO\n");
return PTR_ERR(og01a1b->reset_gpio);
}
- og01a1b->avdd = devm_regulator_get_optional(&client->dev, "avdd");
+ og01a1b->avdd = devm_regulator_get_optional(og01a1b->dev, "avdd");
if (IS_ERR(og01a1b->avdd)) {
ret = PTR_ERR(og01a1b->avdd);
if (ret != -ENODEV) {
- dev_err_probe(&client->dev, ret,
+ dev_err_probe(og01a1b->dev, ret,
"Failed to get 'avdd' regulator\n");
return ret;
}
@@ -1114,11 +1099,11 @@ static int og01a1b_probe(struct i2c_client *client)
og01a1b->avdd = NULL;
}
- og01a1b->dovdd = devm_regulator_get_optional(&client->dev, "dovdd");
+ og01a1b->dovdd = devm_regulator_get_optional(og01a1b->dev, "dovdd");
if (IS_ERR(og01a1b->dovdd)) {
ret = PTR_ERR(og01a1b->dovdd);
if (ret != -ENODEV) {
- dev_err_probe(&client->dev, ret,
+ dev_err_probe(og01a1b->dev, ret,
"Failed to get 'dovdd' regulator\n");
return ret;
}
@@ -1126,11 +1111,11 @@ static int og01a1b_probe(struct i2c_client *client)
og01a1b->dovdd = NULL;
}
- og01a1b->dvdd = devm_regulator_get_optional(&client->dev, "dvdd");
+ og01a1b->dvdd = devm_regulator_get_optional(og01a1b->dev, "dvdd");
if (IS_ERR(og01a1b->dvdd)) {
ret = PTR_ERR(og01a1b->dvdd);
if (ret != -ENODEV) {
- dev_err_probe(&client->dev, ret,
+ dev_err_probe(og01a1b->dev, ret,
"Failed to get 'dvdd' regulator\n");
return ret;
}
@@ -1139,13 +1124,13 @@ static int og01a1b_probe(struct i2c_client *client)
}
/* The sensor must be powered on to read the CHIP_ID register */
- ret = og01a1b_power_on(&client->dev);
+ ret = og01a1b_power_on(og01a1b->dev);
if (ret)
return ret;
ret = og01a1b_identify_module(og01a1b);
if (ret) {
- dev_err(&client->dev, "failed to find sensor: %d", ret);
+ dev_err(og01a1b->dev, "failed to find sensor: %d", ret);
goto power_off;
}
@@ -1153,7 +1138,7 @@ static int og01a1b_probe(struct i2c_client *client)
og01a1b->cur_mode = &supported_modes[0];
ret = og01a1b_init_controls(og01a1b);
if (ret) {
- dev_err(&client->dev, "failed to init controls: %d", ret);
+ dev_err(og01a1b->dev, "failed to init controls: %d", ret);
goto probe_error_v4l2_ctrl_handler_free;
}
@@ -1164,21 +1149,21 @@ static int og01a1b_probe(struct i2c_client *client)
og01a1b->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&og01a1b->sd.entity, 1, &og01a1b->pad);
if (ret) {
- dev_err(&client->dev, "failed to init entity pads: %d", ret);
+ dev_err(og01a1b->dev, "failed to init entity pads: %d", ret);
goto probe_error_v4l2_ctrl_handler_free;
}
ret = v4l2_async_register_subdev_sensor(&og01a1b->sd);
if (ret < 0) {
- dev_err(&client->dev, "failed to register V4L2 subdev: %d",
+ dev_err(og01a1b->dev, "failed to register V4L2 subdev: %d",
ret);
goto probe_error_media_entity_cleanup;
}
/* Enable runtime PM and turn off the device */
- pm_runtime_set_active(&client->dev);
- pm_runtime_enable(&client->dev);
- pm_runtime_idle(&client->dev);
+ pm_runtime_set_active(og01a1b->dev);
+ pm_runtime_enable(og01a1b->dev);
+ pm_runtime_idle(og01a1b->dev);
return 0;
@@ -1190,7 +1175,7 @@ probe_error_v4l2_ctrl_handler_free:
mutex_destroy(&og01a1b->mutex);
power_off:
- og01a1b_power_off(&client->dev);
+ og01a1b_power_off(og01a1b->dev);
return ret;
}
diff --git a/drivers/media/i2c/og0ve1b.c b/drivers/media/i2c/og0ve1b.c
new file mode 100644
index 000000000000..262d9df766fe
--- /dev/null
+++ b/drivers/media/i2c/og0ve1b.c
@@ -0,0 +1,816 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2024-2025 Linaro Ltd
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/units.h>
+#include <media/v4l2-cci.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+
+#define OG0VE1B_LINK_FREQ_500MHZ (500 * HZ_PER_MHZ)
+#define OG0VE1B_MCLK_FREQ_24MHZ (24 * HZ_PER_MHZ)
+
+#define OG0VE1B_REG_CHIP_ID CCI_REG24(0x300a)
+#define OG0VE1B_CHIP_ID 0xc75645
+
+#define OG0VE1B_REG_MODE_SELECT CCI_REG8(0x0100)
+#define OG0VE1B_MODE_STANDBY 0x00
+#define OG0VE1B_MODE_STREAMING BIT(0)
+
+#define OG0VE1B_REG_SOFTWARE_RST CCI_REG8(0x0103)
+#define OG0VE1B_SOFTWARE_RST BIT(0)
+
+/* Exposure controls from sensor */
+#define OG0VE1B_REG_EXPOSURE CCI_REG24(0x3500)
+#define OG0VE1B_EXPOSURE_MIN 1
+#define OG0VE1B_EXPOSURE_MAX_MARGIN 14
+#define OG0VE1B_EXPOSURE_STEP 1
+#define OG0VE1B_EXPOSURE_DEFAULT 554
+
+/* Analogue gain controls from sensor */
+#define OG0VE1B_REG_ANALOGUE_GAIN CCI_REG16(0x350a)
+#define OG0VE1B_ANALOGUE_GAIN_MIN 1
+#define OG0VE1B_ANALOGUE_GAIN_MAX 0x1ff
+#define OG0VE1B_ANALOGUE_GAIN_STEP 1
+#define OG0VE1B_ANALOGUE_GAIN_DEFAULT 16
+
+/* Test pattern */
+#define OG0VE1B_REG_PRE_ISP CCI_REG8(0x5e00)
+#define OG0VE1B_TEST_PATTERN_ENABLE BIT(7)
+
+#define to_og0ve1b(_sd) container_of(_sd, struct og0ve1b, sd)
+
+static const s64 og0ve1b_link_freq_menu[] = {
+ OG0VE1B_LINK_FREQ_500MHZ,
+};
+
+struct og0ve1b_reg_list {
+ const struct cci_reg_sequence *regs;
+ unsigned int num_regs;
+};
+
+struct og0ve1b_mode {
+ u32 width; /* Frame width in pixels */
+ u32 height; /* Frame height in pixels */
+ u32 hts; /* Horizontal timing size */
+ u32 vts; /* Default vertical timing size */
+ u32 bpp; /* Bits per pixel */
+
+ const struct og0ve1b_reg_list reg_list; /* Sensor register setting */
+};
+
+static const char * const og0ve1b_test_pattern_menu[] = {
+ "Disabled",
+ "Vertical Colour Bars",
+};
+
+static const char * const og0ve1b_supply_names[] = {
+ "avdd", /* Analog power */
+ "dovdd", /* Digital I/O power */
+ "dvdd", /* Digital core power */
+};
+
+#define OG0VE1B_NUM_SUPPLIES ARRAY_SIZE(og0ve1b_supply_names)
+
+struct og0ve1b {
+ struct device *dev;
+ struct regmap *regmap;
+ struct clk *xvclk;
+ struct gpio_desc *reset_gpio;
+ struct regulator_bulk_data supplies[OG0VE1B_NUM_SUPPLIES];
+
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+
+ struct v4l2_ctrl_handler ctrl_handler;
+
+ /* Saved register value */
+ u64 pre_isp;
+};
+
+static const struct cci_reg_sequence og0ve1b_640x480_120fps_mode[] = {
+ { CCI_REG8(0x30a0), 0x02 },
+ { CCI_REG8(0x30a1), 0x00 },
+ { CCI_REG8(0x30a2), 0x48 },
+ { CCI_REG8(0x30a3), 0x34 },
+ { CCI_REG8(0x30a4), 0xf7 },
+ { CCI_REG8(0x30a5), 0x00 },
+ { CCI_REG8(0x3082), 0x32 },
+ { CCI_REG8(0x3083), 0x01 },
+ { CCI_REG8(0x301c), 0xf0 },
+ { CCI_REG8(0x301e), 0x0b },
+ { CCI_REG8(0x3106), 0x10 },
+ { CCI_REG8(0x3708), 0x77 },
+ { CCI_REG8(0x3709), 0xf8 },
+ { CCI_REG8(0x3717), 0x00 },
+ { CCI_REG8(0x3782), 0x00 },
+ { CCI_REG8(0x3783), 0x47 },
+ { CCI_REG8(0x37a2), 0x00 },
+ { CCI_REG8(0x3503), 0x07 },
+ { CCI_REG8(0x3509), 0x10 },
+ { CCI_REG8(0x3600), 0x83 },
+ { CCI_REG8(0x3601), 0x21 },
+ { CCI_REG8(0x3602), 0xf1 },
+ { CCI_REG8(0x360a), 0x18 },
+ { CCI_REG8(0x360e), 0xb3 },
+ { CCI_REG8(0x3613), 0x20 },
+ { CCI_REG8(0x366a), 0x78 },
+ { CCI_REG8(0x3706), 0x63 },
+ { CCI_REG8(0x3713), 0x00 },
+ { CCI_REG8(0x3716), 0xb0 },
+ { CCI_REG8(0x37a1), 0x38 },
+ { CCI_REG8(0x3800), 0x00 },
+ { CCI_REG8(0x3801), 0x04 },
+ { CCI_REG8(0x3802), 0x00 },
+ { CCI_REG8(0x3803), 0x04 },
+ { CCI_REG8(0x3804), 0x02 },
+ { CCI_REG8(0x3805), 0x8b },
+ { CCI_REG8(0x3806), 0x01 },
+ { CCI_REG8(0x3807), 0xeb },
+ { CCI_REG8(0x3808), 0x02 }, /* output width */
+ { CCI_REG8(0x3809), 0x80 },
+ { CCI_REG8(0x380a), 0x01 }, /* output height */
+ { CCI_REG8(0x380b), 0xe0 },
+ { CCI_REG8(0x380c), 0x03 }, /* horizontal timing size */
+ { CCI_REG8(0x380d), 0x18 },
+ { CCI_REG8(0x380e), 0x02 }, /* vertical timing size */
+ { CCI_REG8(0x380f), 0x38 },
+ { CCI_REG8(0x3811), 0x04 },
+ { CCI_REG8(0x3813), 0x04 },
+ { CCI_REG8(0x3814), 0x11 },
+ { CCI_REG8(0x3815), 0x11 },
+ { CCI_REG8(0x3820), 0x00 },
+ { CCI_REG8(0x3821), 0x00 },
+ { CCI_REG8(0x3823), 0x04 },
+ { CCI_REG8(0x382a), 0x00 },
+ { CCI_REG8(0x382b), 0x03 },
+ { CCI_REG8(0x3840), 0x00 },
+ { CCI_REG8(0x389e), 0x00 },
+ { CCI_REG8(0x3c05), 0x08 },
+ { CCI_REG8(0x3c26), 0x02 },
+ { CCI_REG8(0x3c27), 0xc0 },
+ { CCI_REG8(0x3c28), 0x00 },
+ { CCI_REG8(0x3c29), 0x40 },
+ { CCI_REG8(0x3c2c), 0x00 },
+ { CCI_REG8(0x3c2d), 0x50 },
+ { CCI_REG8(0x3c2e), 0x02 },
+ { CCI_REG8(0x3c2f), 0x66 },
+ { CCI_REG8(0x3c33), 0x08 },
+ { CCI_REG8(0x3c35), 0x00 },
+ { CCI_REG8(0x3c36), 0x00 },
+ { CCI_REG8(0x3c37), 0x00 },
+ { CCI_REG8(0x3f52), 0x9b },
+ { CCI_REG8(0x4001), 0x42 },
+ { CCI_REG8(0x4004), 0x08 },
+ { CCI_REG8(0x4005), 0x00 },
+ { CCI_REG8(0x4007), 0x28 },
+ { CCI_REG8(0x4009), 0x40 },
+ { CCI_REG8(0x4307), 0x30 },
+ { CCI_REG8(0x4500), 0x80 },
+ { CCI_REG8(0x4501), 0x02 },
+ { CCI_REG8(0x4502), 0x47 },
+ { CCI_REG8(0x4504), 0x7f },
+ { CCI_REG8(0x4601), 0x48 },
+ { CCI_REG8(0x4800), 0x64 },
+ { CCI_REG8(0x4801), 0x0f },
+ { CCI_REG8(0x4806), 0x2f },
+ { CCI_REG8(0x4819), 0xaa },
+ { CCI_REG8(0x4823), 0x3e },
+ { CCI_REG8(0x5000), 0x85 },
+ { CCI_REG8(0x5e00), 0x0c },
+ { CCI_REG8(0x3899), 0x09 },
+ { CCI_REG8(0x4f00), 0x64 },
+ { CCI_REG8(0x4f02), 0x0a },
+ { CCI_REG8(0x4f05), 0x0e },
+ { CCI_REG8(0x4f06), 0x11 },
+ { CCI_REG8(0x4f08), 0x0b },
+ { CCI_REG8(0x4f0a), 0xc4 },
+ { CCI_REG8(0x4f20), 0x1f },
+ { CCI_REG8(0x4f25), 0x10 },
+ { CCI_REG8(0x3016), 0x10 },
+ { CCI_REG8(0x3017), 0x00 },
+ { CCI_REG8(0x3018), 0x00 },
+ { CCI_REG8(0x3019), 0x00 },
+ { CCI_REG8(0x301a), 0x00 },
+ { CCI_REG8(0x301b), 0x00 },
+ { CCI_REG8(0x301c), 0x72 },
+ { CCI_REG8(0x3037), 0x40 },
+ { CCI_REG8(0x4f2c), 0x00 },
+ { CCI_REG8(0x4f21), 0x00 },
+ { CCI_REG8(0x4f23), 0x00 },
+ { CCI_REG8(0x4f2a), 0x00 },
+ { CCI_REG8(0x3665), 0xe7 },
+ { CCI_REG8(0x3668), 0x48 },
+ { CCI_REG8(0x3671), 0x3c },
+ { CCI_REG8(0x389a), 0x02 },
+ { CCI_REG8(0x389b), 0x00 },
+ { CCI_REG8(0x303c), 0xa0 },
+ { CCI_REG8(0x300f), 0xf0 },
+ { CCI_REG8(0x304b), 0x0f },
+ { CCI_REG8(0x3662), 0x24 },
+ { CCI_REG8(0x3006), 0x40 },
+ { CCI_REG8(0x4f26), 0x45 },
+ { CCI_REG8(0x3607), 0x34 },
+ { CCI_REG8(0x3608), 0x01 },
+ { CCI_REG8(0x360a), 0x0c },
+ { CCI_REG8(0x360b), 0x86 },
+ { CCI_REG8(0x360c), 0xcc },
+ { CCI_REG8(0x3013), 0x00 },
+ { CCI_REG8(0x3083), 0x02 },
+ { CCI_REG8(0x3084), 0x12 },
+ { CCI_REG8(0x4601), 0x38 },
+ { CCI_REG8(0x366f), 0x3a },
+ { CCI_REG8(0x3713), 0x19 },
+ { CCI_REG8(0x37a2), 0x00 },
+ { CCI_REG8(0x3f43), 0x27 },
+ { CCI_REG8(0x3f45), 0x27 },
+ { CCI_REG8(0x3f47), 0x32 },
+ { CCI_REG8(0x3f49), 0x3e },
+ { CCI_REG8(0x3f4b), 0x20 },
+ { CCI_REG8(0x3f4d), 0x30 },
+ { CCI_REG8(0x4300), 0x3f },
+ { CCI_REG8(0x4009), 0x10 },
+ { CCI_REG8(0x3f02), 0x68 },
+ { CCI_REG8(0x3700), 0x8c },
+ { CCI_REG8(0x370b), 0x7e },
+ { CCI_REG8(0x3f47), 0x35 },
+};
+
+static const struct og0ve1b_mode supported_modes[] = {
+ {
+ .width = 640,
+ .height = 480,
+ .hts = 792,
+ .vts = 568,
+ .bpp = 8,
+ .reg_list = {
+ .regs = og0ve1b_640x480_120fps_mode,
+ .num_regs = ARRAY_SIZE(og0ve1b_640x480_120fps_mode),
+ },
+ },
+};
+
+static int og0ve1b_enable_test_pattern(struct og0ve1b *og0ve1b, u32 pattern)
+{
+ u64 val = og0ve1b->pre_isp;
+
+ if (pattern)
+ val |= OG0VE1B_TEST_PATTERN_ENABLE;
+ else
+ val &= ~OG0VE1B_TEST_PATTERN_ENABLE;
+
+ return cci_write(og0ve1b->regmap, OG0VE1B_REG_PRE_ISP, val, NULL);
+}
+
+static int og0ve1b_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct og0ve1b *og0ve1b = container_of(ctrl->handler, struct og0ve1b,
+ ctrl_handler);
+ int ret;
+
+ /* V4L2 controls are applied, when sensor is powered up for streaming */
+ if (!pm_runtime_get_if_active(og0ve1b->dev))
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_ANALOGUE_GAIN:
+ ret = cci_write(og0ve1b->regmap, OG0VE1B_REG_ANALOGUE_GAIN,
+ ctrl->val, NULL);
+ break;
+ case V4L2_CID_EXPOSURE:
+ ret = cci_write(og0ve1b->regmap, OG0VE1B_REG_EXPOSURE,
+ ctrl->val << 4, NULL);
+ break;
+ case V4L2_CID_TEST_PATTERN:
+ ret = og0ve1b_enable_test_pattern(og0ve1b, ctrl->val);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ pm_runtime_put(og0ve1b->dev);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops og0ve1b_ctrl_ops = {
+ .s_ctrl = og0ve1b_set_ctrl,
+};
+
+static int og0ve1b_init_controls(struct og0ve1b *og0ve1b)
+{
+ struct v4l2_ctrl_handler *ctrl_hdlr = &og0ve1b->ctrl_handler;
+ const struct og0ve1b_mode *mode = &supported_modes[0];
+ struct v4l2_fwnode_device_properties props;
+ s64 exposure_max, pixel_rate, h_blank;
+ struct v4l2_ctrl *ctrl;
+ int ret;
+
+ v4l2_ctrl_handler_init(ctrl_hdlr, 9);
+
+ ctrl = v4l2_ctrl_new_int_menu(ctrl_hdlr, &og0ve1b_ctrl_ops,
+ V4L2_CID_LINK_FREQ,
+ ARRAY_SIZE(og0ve1b_link_freq_menu) - 1,
+ 0, og0ve1b_link_freq_menu);
+ if (ctrl)
+ ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ pixel_rate = og0ve1b_link_freq_menu[0] / mode->bpp;
+ v4l2_ctrl_new_std(ctrl_hdlr, &og0ve1b_ctrl_ops, V4L2_CID_PIXEL_RATE,
+ 0, pixel_rate, 1, pixel_rate);
+
+ h_blank = mode->hts - mode->width;
+ ctrl = v4l2_ctrl_new_std(ctrl_hdlr, &og0ve1b_ctrl_ops, V4L2_CID_HBLANK,
+ h_blank, h_blank, 1, h_blank);
+ if (ctrl)
+ ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ ctrl = v4l2_ctrl_new_std(ctrl_hdlr, &og0ve1b_ctrl_ops, V4L2_CID_VBLANK,
+ mode->vts - mode->height,
+ mode->vts - mode->height, 1,
+ mode->vts - mode->height);
+ if (ctrl)
+ ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ v4l2_ctrl_new_std(ctrl_hdlr, &og0ve1b_ctrl_ops, V4L2_CID_ANALOGUE_GAIN,
+ OG0VE1B_ANALOGUE_GAIN_MIN, OG0VE1B_ANALOGUE_GAIN_MAX,
+ OG0VE1B_ANALOGUE_GAIN_STEP,
+ OG0VE1B_ANALOGUE_GAIN_DEFAULT);
+
+ exposure_max = (mode->vts - OG0VE1B_EXPOSURE_MAX_MARGIN);
+ v4l2_ctrl_new_std(ctrl_hdlr, &og0ve1b_ctrl_ops,
+ V4L2_CID_EXPOSURE,
+ OG0VE1B_EXPOSURE_MIN, exposure_max,
+ OG0VE1B_EXPOSURE_STEP,
+ OG0VE1B_EXPOSURE_DEFAULT);
+
+ v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &og0ve1b_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(og0ve1b_test_pattern_menu) - 1,
+ 0, 0, og0ve1b_test_pattern_menu);
+
+ if (ctrl_hdlr->error)
+ return ctrl_hdlr->error;
+
+ ret = v4l2_fwnode_device_parse(og0ve1b->dev, &props);
+ if (ret)
+ goto error_free_hdlr;
+
+ ret = v4l2_ctrl_new_fwnode_properties(ctrl_hdlr, &og0ve1b_ctrl_ops,
+ &props);
+ if (ret)
+ goto error_free_hdlr;
+
+ og0ve1b->sd.ctrl_handler = ctrl_hdlr;
+
+ return 0;
+
+error_free_hdlr:
+ v4l2_ctrl_handler_free(ctrl_hdlr);
+
+ return ret;
+}
+
+static void og0ve1b_update_pad_format(const struct og0ve1b_mode *mode,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ fmt->code = MEDIA_BUS_FMT_Y8_1X8;
+ fmt->width = mode->width;
+ fmt->height = mode->height;
+ fmt->field = V4L2_FIELD_NONE;
+ fmt->colorspace = V4L2_COLORSPACE_RAW;
+ fmt->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
+ fmt->xfer_func = V4L2_XFER_FUNC_NONE;
+}
+
+static int og0ve1b_enable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state, u32 pad,
+ u64 streams_mask)
+{
+ const struct og0ve1b_reg_list *reg_list = &supported_modes[0].reg_list;
+ struct og0ve1b *og0ve1b = to_og0ve1b(sd);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(og0ve1b->dev);
+ if (ret)
+ return ret;
+
+ /* Skip a step of explicit entering into the standby mode */
+ ret = cci_write(og0ve1b->regmap, OG0VE1B_REG_SOFTWARE_RST,
+ OG0VE1B_SOFTWARE_RST, NULL);
+ if (ret) {
+ dev_err(og0ve1b->dev, "failed to software reset: %d\n", ret);
+ goto error;
+ }
+
+ ret = cci_multi_reg_write(og0ve1b->regmap, reg_list->regs,
+ reg_list->num_regs, NULL);
+ if (ret) {
+ dev_err(og0ve1b->dev, "failed to set mode: %d\n", ret);
+ goto error;
+ }
+
+ ret = __v4l2_ctrl_handler_setup(og0ve1b->sd.ctrl_handler);
+ if (ret)
+ goto error;
+
+ ret = cci_write(og0ve1b->regmap, OG0VE1B_REG_MODE_SELECT,
+ OG0VE1B_MODE_STREAMING, NULL);
+ if (ret) {
+ dev_err(og0ve1b->dev, "failed to start streaming: %d\n", ret);
+ goto error;
+ }
+
+ return 0;
+
+error:
+ pm_runtime_put_autosuspend(og0ve1b->dev);
+
+ return ret;
+}
+
+static int og0ve1b_disable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state, u32 pad,
+ u64 streams_mask)
+{
+ struct og0ve1b *og0ve1b = to_og0ve1b(sd);
+ int ret;
+
+ ret = cci_write(og0ve1b->regmap, OG0VE1B_REG_MODE_SELECT,
+ OG0VE1B_MODE_STANDBY, NULL);
+ if (ret)
+ dev_err(og0ve1b->dev, "failed to stop streaming: %d\n", ret);
+
+ pm_runtime_put_autosuspend(og0ve1b->dev);
+
+ return ret;
+}
+
+static int og0ve1b_set_pad_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct v4l2_mbus_framefmt *format;
+ const struct og0ve1b_mode *mode;
+
+ format = v4l2_subdev_state_get_format(state, 0);
+
+ mode = v4l2_find_nearest_size(supported_modes,
+ ARRAY_SIZE(supported_modes),
+ width, height,
+ fmt->format.width,
+ fmt->format.height);
+
+ og0ve1b_update_pad_format(mode, &fmt->format);
+ *format = fmt->format;
+
+ return 0;
+}
+
+static int og0ve1b_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index > 0)
+ return -EINVAL;
+
+ code->code = MEDIA_BUS_FMT_Y8_1X8;
+
+ return 0;
+}
+
+static int og0ve1b_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ if (fse->index >= ARRAY_SIZE(supported_modes))
+ return -EINVAL;
+
+ if (fse->code != MEDIA_BUS_FMT_Y8_1X8)
+ return -EINVAL;
+
+ fse->min_width = supported_modes[fse->index].width;
+ fse->max_width = fse->min_width;
+ fse->min_height = supported_modes[fse->index].height;
+ fse->max_height = fse->min_height;
+
+ return 0;
+}
+
+static int og0ve1b_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state)
+{
+ struct v4l2_subdev_format fmt = {
+ .which = V4L2_SUBDEV_FORMAT_TRY,
+ .pad = 0,
+ .format = {
+ .code = MEDIA_BUS_FMT_Y8_1X8,
+ .width = supported_modes[0].width,
+ .height = supported_modes[0].height,
+ },
+ };
+
+ og0ve1b_set_pad_format(sd, state, &fmt);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops og0ve1b_video_ops = {
+ .s_stream = v4l2_subdev_s_stream_helper,
+};
+
+static const struct v4l2_subdev_pad_ops og0ve1b_pad_ops = {
+ .set_fmt = og0ve1b_set_pad_format,
+ .get_fmt = v4l2_subdev_get_fmt,
+ .enum_mbus_code = og0ve1b_enum_mbus_code,
+ .enum_frame_size = og0ve1b_enum_frame_size,
+ .enable_streams = og0ve1b_enable_streams,
+ .disable_streams = og0ve1b_disable_streams,
+};
+
+static const struct v4l2_subdev_ops og0ve1b_subdev_ops = {
+ .video = &og0ve1b_video_ops,
+ .pad = &og0ve1b_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops og0ve1b_internal_ops = {
+ .init_state = og0ve1b_init_state,
+};
+
+static const struct media_entity_operations og0ve1b_subdev_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static int og0ve1b_identify_sensor(struct og0ve1b *og0ve1b)
+{
+ u64 val;
+ int ret;
+
+ ret = cci_read(og0ve1b->regmap, OG0VE1B_REG_CHIP_ID, &val, NULL);
+ if (ret) {
+ dev_err(og0ve1b->dev, "failed to read chip id: %d\n", ret);
+ return ret;
+ }
+
+ if (val != OG0VE1B_CHIP_ID) {
+ dev_err(og0ve1b->dev, "chip id mismatch: %x!=%llx\n",
+ OG0VE1B_CHIP_ID, val);
+ return -ENODEV;
+ }
+
+ ret = cci_read(og0ve1b->regmap, OG0VE1B_REG_PRE_ISP,
+ &og0ve1b->pre_isp, NULL);
+ if (ret)
+ dev_err(og0ve1b->dev, "failed to read pre_isp: %d\n", ret);
+
+ return ret;
+}
+
+static int og0ve1b_check_hwcfg(struct og0ve1b *og0ve1b)
+{
+ struct fwnode_handle *fwnode = dev_fwnode(og0ve1b->dev), *ep;
+ struct v4l2_fwnode_endpoint bus_cfg = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY,
+ };
+ unsigned long freq_bitmap;
+ int ret;
+
+ if (!fwnode)
+ return -ENODEV;
+
+ ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
+ if (!ep)
+ return -EINVAL;
+
+ ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg);
+ fwnode_handle_put(ep);
+ if (ret)
+ return ret;
+
+ ret = v4l2_link_freq_to_bitmap(og0ve1b->dev,
+ bus_cfg.link_frequencies,
+ bus_cfg.nr_of_link_frequencies,
+ og0ve1b_link_freq_menu,
+ ARRAY_SIZE(og0ve1b_link_freq_menu),
+ &freq_bitmap);
+
+ v4l2_fwnode_endpoint_free(&bus_cfg);
+
+ return ret;
+}
+
+static int og0ve1b_power_on(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct og0ve1b *og0ve1b = to_og0ve1b(sd);
+ int ret;
+
+ ret = regulator_bulk_enable(OG0VE1B_NUM_SUPPLIES, og0ve1b->supplies);
+ if (ret)
+ return ret;
+
+ gpiod_set_value_cansleep(og0ve1b->reset_gpio, 0);
+ usleep_range(10 * USEC_PER_MSEC, 15 * USEC_PER_MSEC);
+
+ ret = clk_prepare_enable(og0ve1b->xvclk);
+ if (ret)
+ goto reset_gpio;
+
+ return 0;
+
+reset_gpio:
+ gpiod_set_value_cansleep(og0ve1b->reset_gpio, 1);
+
+ regulator_bulk_disable(OG0VE1B_NUM_SUPPLIES, og0ve1b->supplies);
+
+ return ret;
+}
+
+static int og0ve1b_power_off(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct og0ve1b *og0ve1b = to_og0ve1b(sd);
+
+ clk_disable_unprepare(og0ve1b->xvclk);
+
+ gpiod_set_value_cansleep(og0ve1b->reset_gpio, 1);
+
+ regulator_bulk_disable(OG0VE1B_NUM_SUPPLIES, og0ve1b->supplies);
+
+ return 0;
+}
+
+static int og0ve1b_probe(struct i2c_client *client)
+{
+ struct og0ve1b *og0ve1b;
+ unsigned long freq;
+ unsigned int i;
+ int ret;
+
+ og0ve1b = devm_kzalloc(&client->dev, sizeof(*og0ve1b), GFP_KERNEL);
+ if (!og0ve1b)
+ return -ENOMEM;
+
+ og0ve1b->dev = &client->dev;
+
+ v4l2_i2c_subdev_init(&og0ve1b->sd, client, &og0ve1b_subdev_ops);
+
+ og0ve1b->regmap = devm_cci_regmap_init_i2c(client, 16);
+ if (IS_ERR(og0ve1b->regmap))
+ return dev_err_probe(og0ve1b->dev, PTR_ERR(og0ve1b->regmap),
+ "failed to init CCI\n");
+
+ og0ve1b->xvclk = devm_v4l2_sensor_clk_get(og0ve1b->dev, NULL);
+ if (IS_ERR(og0ve1b->xvclk))
+ return dev_err_probe(og0ve1b->dev, PTR_ERR(og0ve1b->xvclk),
+ "failed to get XVCLK clock\n");
+
+ freq = clk_get_rate(og0ve1b->xvclk);
+ if (freq && freq != OG0VE1B_MCLK_FREQ_24MHZ)
+ return dev_err_probe(og0ve1b->dev, -EINVAL,
+ "XVCLK clock frequency %lu is not supported\n",
+ freq);
+
+ ret = og0ve1b_check_hwcfg(og0ve1b);
+ if (ret)
+ return dev_err_probe(og0ve1b->dev, ret,
+ "failed to check HW configuration\n");
+
+ og0ve1b->reset_gpio = devm_gpiod_get_optional(og0ve1b->dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(og0ve1b->reset_gpio))
+ return dev_err_probe(og0ve1b->dev, PTR_ERR(og0ve1b->reset_gpio),
+ "cannot get reset GPIO\n");
+
+ for (i = 0; i < OG0VE1B_NUM_SUPPLIES; i++)
+ og0ve1b->supplies[i].supply = og0ve1b_supply_names[i];
+
+ ret = devm_regulator_bulk_get(og0ve1b->dev, OG0VE1B_NUM_SUPPLIES,
+ og0ve1b->supplies);
+ if (ret)
+ return dev_err_probe(og0ve1b->dev, ret,
+ "failed to get supply regulators\n");
+
+ /* The sensor must be powered on to read the CHIP_ID register */
+ ret = og0ve1b_power_on(og0ve1b->dev);
+ if (ret)
+ return ret;
+
+ ret = og0ve1b_identify_sensor(og0ve1b);
+ if (ret) {
+ dev_err_probe(og0ve1b->dev, ret, "failed to find sensor\n");
+ goto power_off;
+ }
+
+ ret = og0ve1b_init_controls(og0ve1b);
+ if (ret) {
+ dev_err_probe(og0ve1b->dev, ret, "failed to init controls\n");
+ goto power_off;
+ }
+
+ og0ve1b->sd.state_lock = og0ve1b->ctrl_handler.lock;
+ og0ve1b->sd.internal_ops = &og0ve1b_internal_ops;
+ og0ve1b->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ og0ve1b->sd.entity.ops = &og0ve1b_subdev_entity_ops;
+ og0ve1b->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ og0ve1b->pad.flags = MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_pads_init(&og0ve1b->sd.entity, 1, &og0ve1b->pad);
+ if (ret) {
+ dev_err_probe(og0ve1b->dev, ret,
+ "failed to init media entity pads\n");
+ goto v4l2_ctrl_handler_free;
+ }
+
+ ret = v4l2_subdev_init_finalize(&og0ve1b->sd);
+ if (ret < 0) {
+ dev_err_probe(og0ve1b->dev, ret,
+ "failed to init media entity pads\n");
+ goto media_entity_cleanup;
+ }
+
+ pm_runtime_set_active(og0ve1b->dev);
+ pm_runtime_enable(og0ve1b->dev);
+
+ ret = v4l2_async_register_subdev_sensor(&og0ve1b->sd);
+ if (ret < 0) {
+ dev_err_probe(og0ve1b->dev, ret,
+ "failed to register V4L2 subdev\n");
+ goto subdev_cleanup;
+ }
+
+ /* Enable runtime PM and turn off the device */
+ pm_runtime_idle(og0ve1b->dev);
+ pm_runtime_set_autosuspend_delay(og0ve1b->dev, 1000);
+ pm_runtime_use_autosuspend(og0ve1b->dev);
+
+ return 0;
+
+subdev_cleanup:
+ v4l2_subdev_cleanup(&og0ve1b->sd);
+ pm_runtime_disable(og0ve1b->dev);
+ pm_runtime_set_suspended(og0ve1b->dev);
+
+media_entity_cleanup:
+ media_entity_cleanup(&og0ve1b->sd.entity);
+
+v4l2_ctrl_handler_free:
+ v4l2_ctrl_handler_free(og0ve1b->sd.ctrl_handler);
+
+power_off:
+ og0ve1b_power_off(og0ve1b->dev);
+
+ return ret;
+}
+
+static void og0ve1b_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct og0ve1b *og0ve1b = to_og0ve1b(sd);
+
+ v4l2_async_unregister_subdev(sd);
+ v4l2_subdev_cleanup(sd);
+ media_entity_cleanup(&sd->entity);
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+ pm_runtime_disable(og0ve1b->dev);
+
+ if (!pm_runtime_status_suspended(og0ve1b->dev)) {
+ og0ve1b_power_off(og0ve1b->dev);
+ pm_runtime_set_suspended(og0ve1b->dev);
+ }
+}
+
+static const struct dev_pm_ops og0ve1b_pm_ops = {
+ SET_RUNTIME_PM_OPS(og0ve1b_power_off, og0ve1b_power_on, NULL)
+};
+
+static const struct of_device_id og0ve1b_of_match[] = {
+ { .compatible = "ovti,og0ve1b" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, og0ve1b_of_match);
+
+static struct i2c_driver og0ve1b_i2c_driver = {
+ .driver = {
+ .name = "og0ve1b",
+ .pm = &og0ve1b_pm_ops,
+ .of_match_table = og0ve1b_of_match,
+ },
+ .probe = og0ve1b_probe,
+ .remove = og0ve1b_remove,
+};
+
+module_i2c_driver(og0ve1b_i2c_driver);
+
+MODULE_AUTHOR("Vladimir Zapolskiy <vladimir.zapolskiy@linaro.org>");
+MODULE_DESCRIPTION("OmniVision OG0VE1B sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/i2c/ov02a10.c b/drivers/media/i2c/ov02a10.c
index 6c30e1a0d814..70d9d7c43f18 100644
--- a/drivers/media/i2c/ov02a10.c
+++ b/drivers/media/i2c/ov02a10.c
@@ -100,7 +100,8 @@ struct ov02a10_mode {
};
struct ov02a10 {
- u32 eclk_freq;
+ struct device *dev;
+
/* Indication of MIPI transmission speed select */
u32 mipi_clock_voltage;
@@ -392,7 +393,7 @@ static int ov02a10_check_sensor_id(struct ov02a10 *ov02a10)
chip_id = le16_to_cpu((__force __le16)ret);
if ((chip_id & OV02A10_ID_MASK) != OV02A10_ID) {
- dev_err(&client->dev, "unexpected sensor id(0x%04x)\n", chip_id);
+ dev_err(ov02a10->dev, "unexpected sensor id(0x%04x)\n", chip_id);
return -EINVAL;
}
@@ -481,7 +482,7 @@ static int __ov02a10_start_stream(struct ov02a10 *ov02a10)
ret = i2c_smbus_write_byte_data(client, REG_MIRROR_FLIP_CONTROL,
REG_MIRROR_FLIP_ENABLE);
if (ret < 0) {
- dev_err(&client->dev, "failed to set orientation\n");
+ dev_err(ov02a10->dev, "failed to set orientation\n");
return ret;
}
ret = i2c_smbus_write_byte_data(client, REG_GLOBAL_EFFECTIVE,
@@ -530,7 +531,6 @@ static int ov02a10_init_state(struct v4l2_subdev *sd,
static int ov02a10_s_stream(struct v4l2_subdev *sd, int on)
{
struct ov02a10 *ov02a10 = to_ov02a10(sd);
- struct i2c_client *client = v4l2_get_subdevdata(&ov02a10->subdev);
int ret;
mutex_lock(&ov02a10->mutex);
@@ -541,7 +541,7 @@ static int ov02a10_s_stream(struct v4l2_subdev *sd, int on)
}
if (on) {
- ret = pm_runtime_resume_and_get(&client->dev);
+ ret = pm_runtime_resume_and_get(ov02a10->dev);
if (ret < 0)
goto unlock_and_return;
@@ -553,7 +553,7 @@ static int ov02a10_s_stream(struct v4l2_subdev *sd, int on)
}
} else {
__ov02a10_stop_stream(ov02a10);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov02a10->dev);
}
ov02a10->streaming = on;
@@ -562,7 +562,7 @@ static int ov02a10_s_stream(struct v4l2_subdev *sd, int on)
return 0;
err_rpm_put:
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov02a10->dev);
unlock_and_return:
mutex_unlock(&ov02a10->mutex);
@@ -662,7 +662,6 @@ static int ov02a10_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct ov02a10 *ov02a10 = container_of(ctrl->handler,
struct ov02a10, ctrl_handler);
- struct i2c_client *client = v4l2_get_subdevdata(&ov02a10->subdev);
s64 max_expo;
int ret;
@@ -678,7 +677,7 @@ static int ov02a10_set_ctrl(struct v4l2_ctrl *ctrl)
}
/* V4L2 controls values will be applied only when power is already up */
- if (!pm_runtime_get_if_in_use(&client->dev))
+ if (!pm_runtime_get_if_in_use(ov02a10->dev))
return 0;
switch (ctrl->id) {
@@ -699,7 +698,7 @@ static int ov02a10_set_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov02a10->dev);
return ret;
}
@@ -734,7 +733,6 @@ static const struct v4l2_ctrl_ops ov02a10_ctrl_ops = {
static int ov02a10_initialize_controls(struct ov02a10 *ov02a10)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov02a10->subdev);
const struct ov02a10_mode *mode;
struct v4l2_ctrl_handler *handler;
struct v4l2_ctrl *ctrl;
@@ -790,7 +788,7 @@ static int ov02a10_initialize_controls(struct ov02a10 *ov02a10)
if (handler->error) {
ret = handler->error;
- dev_err(&client->dev, "failed to init controls(%d)\n", ret);
+ dev_err(ov02a10->dev, "failed to init controls(%d)\n", ret);
goto err_free_handler;
}
@@ -866,6 +864,8 @@ static int ov02a10_probe(struct i2c_client *client)
if (!ov02a10)
return -ENOMEM;
+ ov02a10->dev = dev;
+
ret = ov02a10_check_hwcfg(dev, ov02a10);
if (ret)
return dev_err_probe(dev, ret,
@@ -885,22 +885,11 @@ static int ov02a10_probe(struct i2c_client *client)
ov02a10->fmt.code = MEDIA_BUS_FMT_SRGGB10_1X10;
}
- ov02a10->eclk = devm_clk_get(dev, "eclk");
+ ov02a10->eclk = devm_v4l2_sensor_clk_get_legacy(dev, "eclk", false, 0);
if (IS_ERR(ov02a10->eclk))
return dev_err_probe(dev, PTR_ERR(ov02a10->eclk),
"failed to get eclk\n");
- ret = device_property_read_u32(dev, "clock-frequency",
- &ov02a10->eclk_freq);
- if (ret < 0)
- return dev_err_probe(dev, ret,
- "failed to get eclk frequency\n");
-
- ret = clk_set_rate(ov02a10->eclk, ov02a10->eclk_freq);
- if (ret < 0)
- return dev_err_probe(dev, ret,
- "failed to set eclk frequency (24MHz)\n");
-
if (clk_get_rate(ov02a10->eclk) != OV02A10_ECLK_FREQ)
dev_warn(dev, "eclk mismatched, mode is based on 24MHz\n");
@@ -985,10 +974,10 @@ static void ov02a10_remove(struct i2c_client *client)
v4l2_async_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
v4l2_ctrl_handler_free(sd->ctrl_handler);
- pm_runtime_disable(&client->dev);
- if (!pm_runtime_status_suspended(&client->dev))
- ov02a10_power_off(&client->dev);
- pm_runtime_set_suspended(&client->dev);
+ pm_runtime_disable(ov02a10->dev);
+ if (!pm_runtime_status_suspended(ov02a10->dev))
+ ov02a10_power_off(ov02a10->dev);
+ pm_runtime_set_suspended(ov02a10->dev);
mutex_destroy(&ov02a10->mutex);
}
diff --git a/drivers/media/i2c/ov02c10.c b/drivers/media/i2c/ov02c10.c
index 089a4fd9627c..8c4d85dc7922 100644
--- a/drivers/media/i2c/ov02c10.c
+++ b/drivers/media/i2c/ov02c10.c
@@ -9,7 +9,6 @@
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
-#include <linux/version.h>
#include <media/v4l2-cci.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
@@ -373,6 +372,8 @@ static const char * const ov02c10_supply_names[] = {
};
struct ov02c10 {
+ struct device *dev;
+
struct v4l2_subdev sd;
struct media_pad pad;
struct v4l2_ctrl_handler ctrl_handler;
@@ -418,7 +419,6 @@ static int ov02c10_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct ov02c10 *ov02c10 = container_of(ctrl->handler,
struct ov02c10, ctrl_handler);
- struct i2c_client *client = v4l2_get_subdevdata(&ov02c10->sd);
const u32 height = supported_modes[0].height;
s64 exposure_max;
int ret = 0;
@@ -434,7 +434,7 @@ static int ov02c10_set_ctrl(struct v4l2_ctrl *ctrl)
}
/* V4L2 controls values will be applied only when power is already up */
- if (!pm_runtime_get_if_in_use(&client->dev))
+ if (!pm_runtime_get_if_in_use(ov02c10->dev))
return 0;
switch (ctrl->id) {
@@ -467,7 +467,7 @@ static int ov02c10_set_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov02c10->dev);
return ret;
}
@@ -478,7 +478,6 @@ static const struct v4l2_ctrl_ops ov02c10_ctrl_ops = {
static int ov02c10_init_controls(struct ov02c10 *ov02c10)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov02c10->sd);
struct v4l2_ctrl_handler *ctrl_hdlr = &ov02c10->ctrl_handler;
const struct ov02c10_mode *mode = &supported_modes[0];
u32 vblank_min, vblank_max, vblank_default, vts_def;
@@ -542,7 +541,7 @@ static int ov02c10_init_controls(struct ov02c10 *ov02c10)
ARRAY_SIZE(ov02c10_test_pattern_menu) - 1,
0, 0, ov02c10_test_pattern_menu);
- ret = v4l2_fwnode_device_parse(&client->dev, &props);
+ ret = v4l2_fwnode_device_parse(ov02c10->dev, &props);
if (ret)
return ret;
@@ -570,12 +569,11 @@ static int ov02c10_enable_streams(struct v4l2_subdev *sd,
u32 pad, u64 streams_mask)
{
const struct ov02c10_mode *mode = &supported_modes[0];
- struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov02c10 *ov02c10 = to_ov02c10(sd);
const struct reg_sequence *reg_sequence;
int ret, sequence_length;
- ret = pm_runtime_resume_and_get(&client->dev);
+ ret = pm_runtime_resume_and_get(ov02c10->dev);
if (ret)
return ret;
@@ -584,7 +582,7 @@ static int ov02c10_enable_streams(struct v4l2_subdev *sd,
ret = regmap_multi_reg_write(ov02c10->regmap,
reg_sequence, sequence_length);
if (ret) {
- dev_err(&client->dev, "failed to set mode\n");
+ dev_err(ov02c10->dev, "failed to set mode\n");
goto out;
}
@@ -593,7 +591,7 @@ static int ov02c10_enable_streams(struct v4l2_subdev *sd,
ret = regmap_multi_reg_write(ov02c10->regmap,
reg_sequence, sequence_length);
if (ret) {
- dev_err(&client->dev, "failed to write lane settings\n");
+ dev_err(ov02c10->dev, "failed to write lane settings\n");
goto out;
}
@@ -604,7 +602,7 @@ static int ov02c10_enable_streams(struct v4l2_subdev *sd,
ret = cci_write(ov02c10->regmap, OV02C10_REG_STREAM_CONTROL, 1, NULL);
out:
if (ret)
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov02c10->dev);
return ret;
}
@@ -613,11 +611,10 @@ static int ov02c10_disable_streams(struct v4l2_subdev *sd,
struct v4l2_subdev_state *state,
u32 pad, u64 streams_mask)
{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov02c10 *ov02c10 = to_ov02c10(sd);
cci_write(ov02c10->regmap, OV02C10_REG_STREAM_CONTROL, 0, NULL);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov02c10->dev);
return 0;
}
@@ -778,7 +775,6 @@ static const struct v4l2_subdev_internal_ops ov02c10_internal_ops = {
static int ov02c10_identify_module(struct ov02c10 *ov02c10)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov02c10->sd);
u64 chip_id;
int ret;
@@ -787,7 +783,7 @@ static int ov02c10_identify_module(struct ov02c10 *ov02c10)
return ret;
if (chip_id != OV02C10_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x!=%llx",
+ dev_err(ov02c10->dev, "chip id mismatch: %x!=%llx",
OV02C10_CHIP_ID, chip_id);
return -ENXIO;
}
@@ -795,14 +791,14 @@ static int ov02c10_identify_module(struct ov02c10 *ov02c10)
return 0;
}
-static int ov02c10_check_hwcfg(struct device *dev, struct ov02c10 *ov02c10)
+static int ov02c10_check_hwcfg(struct ov02c10 *ov02c10)
{
struct v4l2_fwnode_endpoint bus_cfg = {
.bus_type = V4L2_MBUS_CSI2_DPHY
};
+ struct device *dev = ov02c10->dev;
struct fwnode_handle *ep, *fwnode = dev_fwnode(dev);
unsigned long link_freq_bitmap;
- u32 mclk;
int ret;
/*
@@ -814,31 +810,6 @@ static int ov02c10_check_hwcfg(struct device *dev, struct ov02c10 *ov02c10)
return dev_err_probe(dev, -EPROBE_DEFER,
"waiting for fwnode graph endpoint\n");
- ov02c10->img_clk = devm_clk_get_optional(dev, NULL);
- if (IS_ERR(ov02c10->img_clk)) {
- fwnode_handle_put(ep);
- return dev_err_probe(dev, PTR_ERR(ov02c10->img_clk),
- "failed to get imaging clock\n");
- }
-
- if (ov02c10->img_clk) {
- mclk = clk_get_rate(ov02c10->img_clk);
- } else {
- ret = fwnode_property_read_u32(fwnode, "clock-frequency", &mclk);
- if (ret) {
- fwnode_handle_put(ep);
- return dev_err_probe(dev, ret,
- "reading clock-frequency property\n");
- }
- }
-
- if (mclk != OV02C10_MCLK) {
- fwnode_handle_put(ep);
- return dev_err_probe(dev, -EINVAL,
- "external clock %u is not supported\n",
- mclk);
- }
-
ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg);
fwnode_handle_put(ep);
if (ret)
@@ -873,35 +844,50 @@ check_hwcfg_error:
static void ov02c10_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov02c10 *ov02c10 = to_ov02c10(sd);
v4l2_async_unregister_subdev(sd);
v4l2_subdev_cleanup(sd);
media_entity_cleanup(&sd->entity);
v4l2_ctrl_handler_free(sd->ctrl_handler);
- pm_runtime_disable(&client->dev);
- if (!pm_runtime_status_suspended(&client->dev)) {
- ov02c10_power_off(&client->dev);
- pm_runtime_set_suspended(&client->dev);
+ pm_runtime_disable(ov02c10->dev);
+ if (!pm_runtime_status_suspended(ov02c10->dev)) {
+ ov02c10_power_off(ov02c10->dev);
+ pm_runtime_set_suspended(ov02c10->dev);
}
}
static int ov02c10_probe(struct i2c_client *client)
{
struct ov02c10 *ov02c10;
+ unsigned long freq;
int ret;
ov02c10 = devm_kzalloc(&client->dev, sizeof(*ov02c10), GFP_KERNEL);
if (!ov02c10)
return -ENOMEM;
+ ov02c10->dev = &client->dev;
+
+ ov02c10->img_clk = devm_v4l2_sensor_clk_get(ov02c10->dev, NULL);
+ if (IS_ERR(ov02c10->img_clk))
+ return dev_err_probe(ov02c10->dev, PTR_ERR(ov02c10->img_clk),
+ "failed to get imaging clock\n");
+
+ freq = clk_get_rate(ov02c10->img_clk);
+ if (freq != OV02C10_MCLK)
+ return dev_err_probe(ov02c10->dev, -EINVAL,
+ "external clock %lu is not supported",
+ freq);
+
v4l2_i2c_subdev_init(&ov02c10->sd, client, &ov02c10_subdev_ops);
/* Check HW config */
- ret = ov02c10_check_hwcfg(&client->dev, ov02c10);
+ ret = ov02c10_check_hwcfg(ov02c10);
if (ret)
return ret;
- ret = ov02c10_get_pm_resources(&client->dev);
+ ret = ov02c10_get_pm_resources(ov02c10->dev);
if (ret)
return ret;
@@ -909,21 +895,21 @@ static int ov02c10_probe(struct i2c_client *client)
if (IS_ERR(ov02c10->regmap))
return PTR_ERR(ov02c10->regmap);
- ret = ov02c10_power_on(&client->dev);
+ ret = ov02c10_power_on(ov02c10->dev);
if (ret) {
- dev_err_probe(&client->dev, ret, "failed to power on\n");
+ dev_err_probe(ov02c10->dev, ret, "failed to power on\n");
return ret;
}
ret = ov02c10_identify_module(ov02c10);
if (ret) {
- dev_err(&client->dev, "failed to find sensor: %d", ret);
+ dev_err(ov02c10->dev, "failed to find sensor: %d", ret);
goto probe_error_power_off;
}
ret = ov02c10_init_controls(ov02c10);
if (ret) {
- dev_err(&client->dev, "failed to init controls: %d", ret);
+ dev_err(ov02c10->dev, "failed to init controls: %d", ret);
goto probe_error_v4l2_ctrl_handler_free;
}
@@ -934,33 +920,33 @@ static int ov02c10_probe(struct i2c_client *client)
ov02c10->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&ov02c10->sd.entity, 1, &ov02c10->pad);
if (ret) {
- dev_err(&client->dev, "failed to init entity pads: %d", ret);
+ dev_err(ov02c10->dev, "failed to init entity pads: %d", ret);
goto probe_error_v4l2_ctrl_handler_free;
}
ov02c10->sd.state_lock = ov02c10->ctrl_handler.lock;
ret = v4l2_subdev_init_finalize(&ov02c10->sd);
if (ret < 0) {
- dev_err(&client->dev, "failed to init subdev: %d", ret);
+ dev_err(ov02c10->dev, "failed to init subdev: %d", ret);
goto probe_error_media_entity_cleanup;
}
- pm_runtime_set_active(&client->dev);
- pm_runtime_enable(&client->dev);
+ pm_runtime_set_active(ov02c10->dev);
+ pm_runtime_enable(ov02c10->dev);
ret = v4l2_async_register_subdev_sensor(&ov02c10->sd);
if (ret < 0) {
- dev_err(&client->dev, "failed to register V4L2 subdev: %d",
+ dev_err(ov02c10->dev, "failed to register V4L2 subdev: %d",
ret);
goto probe_error_v4l2_subdev_cleanup;
}
- pm_runtime_idle(&client->dev);
+ pm_runtime_idle(ov02c10->dev);
return 0;
probe_error_v4l2_subdev_cleanup:
- pm_runtime_disable(&client->dev);
- pm_runtime_set_suspended(&client->dev);
+ pm_runtime_disable(ov02c10->dev);
+ pm_runtime_set_suspended(ov02c10->dev);
v4l2_subdev_cleanup(&ov02c10->sd);
probe_error_media_entity_cleanup:
@@ -970,7 +956,7 @@ probe_error_v4l2_ctrl_handler_free:
v4l2_ctrl_handler_free(ov02c10->sd.ctrl_handler);
probe_error_power_off:
- ov02c10_power_off(&client->dev);
+ ov02c10_power_off(ov02c10->dev);
return ret;
}
diff --git a/drivers/media/i2c/ov02e10.c b/drivers/media/i2c/ov02e10.c
index d74dc62e189d..4a64cba99991 100644
--- a/drivers/media/i2c/ov02e10.c
+++ b/drivers/media/i2c/ov02e10.c
@@ -226,6 +226,8 @@ static const char * const ov02e10_supply_names[] = {
};
struct ov02e10 {
+ struct device *dev;
+
struct regmap *regmap;
struct v4l2_subdev sd;
struct media_pad pad;
@@ -288,7 +290,6 @@ static int ov02e10_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct ov02e10 *ov02e10 = container_of(ctrl->handler,
struct ov02e10, ctrl_handler);
- struct i2c_client *client = v4l2_get_subdevdata(&ov02e10->sd);
s64 exposure_max;
int ret;
@@ -307,7 +308,7 @@ static int ov02e10_set_ctrl(struct v4l2_ctrl *ctrl)
}
/* V4L2 controls values will be applied only when power is already up */
- if (!pm_runtime_get_if_in_use(&client->dev))
+ if (!pm_runtime_get_if_in_use(ov02e10->dev))
return 0;
ret = cci_write(ov02e10->regmap, OV02E10_REG_COMMAND_UPDATE,
@@ -363,7 +364,7 @@ static int ov02e10_set_ctrl(struct v4l2_ctrl *ctrl)
cci_write(ov02e10->regmap, OV02E10_REG_COMMAND_UPDATE,
OV02E10_COMMAND_UPDATE, &ret);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov02e10->dev);
return ret;
}
@@ -374,7 +375,6 @@ static const struct v4l2_ctrl_ops ov02e10_ctrl_ops = {
static int ov02e10_init_controls(struct ov02e10 *ov02e10)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov02e10->sd);
struct v4l2_ctrl_handler *ctrl_hdlr = &ov02e10->ctrl_handler;
const struct ov02e10_mode *mode = ov02e10->cur_mode;
u32 vblank_min, vblank_max, vblank_def;
@@ -442,7 +442,7 @@ static int ov02e10_init_controls(struct ov02e10 *ov02e10)
ARRAY_SIZE(ov02e10_test_pattern_menu) - 1,
0, 0, ov02e10_test_pattern_menu);
- ret = v4l2_fwnode_device_parse(&client->dev, &props);
+ ret = v4l2_fwnode_device_parse(ov02e10->dev, &props);
if (ret)
return ret;
@@ -481,12 +481,11 @@ static int ov02e10_enable_streams(struct v4l2_subdev *sd,
struct v4l2_subdev_state *state,
u32 pad, u64 streams_mask)
{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov02e10 *ov02e10 = to_ov02e10(sd);
const struct reg_sequence_list *reg_list;
int ret;
- ret = pm_runtime_resume_and_get(&client->dev);
+ ret = pm_runtime_resume_and_get(ov02e10->dev);
if (ret)
return ret;
@@ -494,7 +493,7 @@ static int ov02e10_enable_streams(struct v4l2_subdev *sd,
ret = regmap_multi_reg_write(ov02e10->regmap, reg_list->regs,
reg_list->num_regs);
if (ret) {
- dev_err(&client->dev, "failed to set mode\n");
+ dev_err(ov02e10->dev, "failed to set mode\n");
goto out;
}
@@ -506,7 +505,7 @@ static int ov02e10_enable_streams(struct v4l2_subdev *sd,
out:
if (ret)
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov02e10->dev);
return ret;
}
@@ -515,11 +514,10 @@ static int ov02e10_disable_streams(struct v4l2_subdev *sd,
struct v4l2_subdev_state *state,
u32 pad, u64 streams_mask)
{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov02e10 *ov02e10 = to_ov02e10(sd);
ov02e10_set_stream_mode(ov02e10, 0);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov02e10->dev);
return 0;
}
@@ -724,7 +722,6 @@ static const struct v4l2_subdev_internal_ops ov02e10_internal_ops = {
static int ov02e10_identify_module(struct ov02e10 *ov02e10)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov02e10->sd);
int ret;
u64 val;
@@ -735,7 +732,7 @@ static int ov02e10_identify_module(struct ov02e10 *ov02e10)
return ret;
if (val != OV02E10_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x!=%x\n",
+ dev_err(ov02e10->dev, "chip id mismatch: %x!=%x\n",
OV02E10_CHIP_ID, (u32)val);
return -ENXIO;
}
@@ -743,15 +740,15 @@ static int ov02e10_identify_module(struct ov02e10 *ov02e10)
return 0;
}
-static int ov02e10_check_hwcfg(struct device *dev, struct ov02e10 *ov02e10)
+static int ov02e10_check_hwcfg(struct ov02e10 *ov02e10)
{
struct v4l2_fwnode_endpoint bus_cfg = {
.bus_type = V4L2_MBUS_CSI2_DPHY
};
+ struct device *dev = ov02e10->dev;
struct fwnode_handle *ep;
struct fwnode_handle *fwnode = dev_fwnode(dev);
unsigned long link_freq_bitmap;
- u32 ext_clk;
int ret;
ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
@@ -764,31 +761,6 @@ static int ov02e10_check_hwcfg(struct device *dev, struct ov02e10 *ov02e10)
if (ret)
return dev_err_probe(dev, ret, "parsing endpoint failed\n");
- ov02e10->img_clk = devm_clk_get_optional(dev, NULL);
- if (IS_ERR(ov02e10->img_clk)) {
- ret = dev_err_probe(dev, PTR_ERR(ov02e10->img_clk),
- "failed to get imaging clock\n");
- goto out_err;
- }
-
- if (ov02e10->img_clk) {
- ext_clk = clk_get_rate(ov02e10->img_clk);
- } else {
- ret = fwnode_property_read_u32(dev_fwnode(dev), "clock-frequency",
- &ext_clk);
- if (ret) {
- dev_err(dev, "can't get clock frequency\n");
- goto out_err;
- }
- }
-
- if (ext_clk != OV02E10_MCLK) {
- dev_err(dev, "external clock %d is not supported\n",
- ext_clk);
- ret = -EINVAL;
- goto out_err;
- }
-
if (bus_cfg.bus.mipi_csi2.num_data_lanes != OV02E10_DATA_LANES) {
dev_err(dev, "number of CSI2 data lanes %d is not supported\n",
bus_cfg.bus.mipi_csi2.num_data_lanes);
@@ -823,32 +795,47 @@ out_err:
static void ov02e10_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov02e10 *ov02e10 = to_ov02e10(sd);
v4l2_async_unregister_subdev(sd);
v4l2_subdev_cleanup(sd);
media_entity_cleanup(&sd->entity);
v4l2_ctrl_handler_free(sd->ctrl_handler);
- pm_runtime_disable(&client->dev);
+ pm_runtime_disable(ov02e10->dev);
- if (!pm_runtime_status_suspended(&client->dev)) {
- ov02e10_power_off(&client->dev);
- pm_runtime_set_suspended(&client->dev);
+ if (!pm_runtime_status_suspended(ov02e10->dev)) {
+ ov02e10_power_off(ov02e10->dev);
+ pm_runtime_set_suspended(ov02e10->dev);
}
}
static int ov02e10_probe(struct i2c_client *client)
{
struct ov02e10 *ov02e10;
+ unsigned long freq;
int ret;
ov02e10 = devm_kzalloc(&client->dev, sizeof(*ov02e10), GFP_KERNEL);
if (!ov02e10)
return -ENOMEM;
+ ov02e10->dev = &client->dev;
+
+ ov02e10->img_clk = devm_v4l2_sensor_clk_get(ov02e10->dev, NULL);
+ if (IS_ERR(ov02e10->img_clk))
+ return dev_err_probe(ov02e10->dev, PTR_ERR(ov02e10->img_clk),
+ "failed to get imaging clock\n");
+
+ freq = clk_get_rate(ov02e10->img_clk);
+ if (freq != OV02E10_MCLK)
+ return dev_err_probe(ov02e10->dev, -EINVAL,
+ "external clock %lu is not supported",
+ freq);
+
v4l2_i2c_subdev_init(&ov02e10->sd, client, &ov02e10_subdev_ops);
/* Check HW config */
- ret = ov02e10_check_hwcfg(&client->dev, ov02e10);
+ ret = ov02e10_check_hwcfg(ov02e10);
if (ret)
return ret;
@@ -857,27 +844,27 @@ static int ov02e10_probe(struct i2c_client *client)
if (IS_ERR(ov02e10->regmap))
return PTR_ERR(ov02e10->regmap);
- ret = ov02e10_get_pm_resources(&client->dev);
+ ret = ov02e10_get_pm_resources(ov02e10->dev);
if (ret)
return ret;
- ret = ov02e10_power_on(&client->dev);
+ ret = ov02e10_power_on(ov02e10->dev);
if (ret) {
- dev_err_probe(&client->dev, ret, "failed to power on\n");
+ dev_err_probe(ov02e10->dev, ret, "failed to power on\n");
return ret;
}
/* Check module identity */
ret = ov02e10_identify_module(ov02e10);
if (ret) {
- dev_err(&client->dev, "failed to find sensor: %d\n", ret);
+ dev_err(ov02e10->dev, "failed to find sensor: %d\n", ret);
goto probe_error_power_off;
}
ov02e10->cur_mode = &supported_modes[0];
ret = ov02e10_init_controls(ov02e10);
if (ret) {
- dev_err(&client->dev, "failed to init controls: %d\n", ret);
+ dev_err(ov02e10->dev, "failed to init controls: %d\n", ret);
goto probe_error_v4l2_ctrl_handler_free;
}
@@ -891,33 +878,33 @@ static int ov02e10_probe(struct i2c_client *client)
ov02e10->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&ov02e10->sd.entity, 1, &ov02e10->pad);
if (ret) {
- dev_err(&client->dev, "failed to init entity pads: %d", ret);
+ dev_err(ov02e10->dev, "failed to init entity pads: %d", ret);
goto probe_error_v4l2_ctrl_handler_free;
}
ov02e10->sd.state_lock = ov02e10->ctrl_handler.lock;
ret = v4l2_subdev_init_finalize(&ov02e10->sd);
if (ret < 0) {
- dev_err(&client->dev, "failed to init subdev: %d", ret);
+ dev_err(ov02e10->dev, "failed to init subdev: %d", ret);
goto probe_error_media_entity_cleanup;
}
- pm_runtime_set_active(&client->dev);
- pm_runtime_enable(&client->dev);
+ pm_runtime_set_active(ov02e10->dev);
+ pm_runtime_enable(ov02e10->dev);
ret = v4l2_async_register_subdev_sensor(&ov02e10->sd);
if (ret < 0) {
- dev_err(&client->dev, "failed to register V4L2 subdev: %d",
+ dev_err(ov02e10->dev, "failed to register V4L2 subdev: %d",
ret);
goto probe_error_v4l2_subdev_cleanup;
}
- pm_runtime_idle(&client->dev);
+ pm_runtime_idle(ov02e10->dev);
return 0;
probe_error_v4l2_subdev_cleanup:
- pm_runtime_disable(&client->dev);
- pm_runtime_set_suspended(&client->dev);
+ pm_runtime_disable(ov02e10->dev);
+ pm_runtime_set_suspended(ov02e10->dev);
v4l2_subdev_cleanup(&ov02e10->sd);
probe_error_media_entity_cleanup:
@@ -927,7 +914,7 @@ probe_error_v4l2_ctrl_handler_free:
v4l2_ctrl_handler_free(ov02e10->sd.ctrl_handler);
probe_error_power_off:
- ov02e10_power_off(&client->dev);
+ ov02e10_power_off(ov02e10->dev);
return ret;
}
@@ -961,7 +948,7 @@ static struct i2c_driver ov02e10_i2c_driver = {
module_i2c_driver(ov02e10_i2c_driver);
-MODULE_AUTHOR("Jingjing Xiong <jingjing.xiong@intel.com>");
+MODULE_AUTHOR("Jingjing Xiong");
MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
MODULE_AUTHOR("Alan Stern <stern@rowland.harvard.edu>");
MODULE_AUTHOR("Bryan O'Donoghue <bryan.odonoghue@linaro.org>");
diff --git a/drivers/media/i2c/ov08d10.c b/drivers/media/i2c/ov08d10.c
index 1bacbdfa4298..43ec2a1f2fcf 100644
--- a/drivers/media/i2c/ov08d10.c
+++ b/drivers/media/i2c/ov08d10.c
@@ -515,12 +515,13 @@ static const char * const ov08d10_test_pattern_menu[] = {
};
struct ov08d10 {
+ struct device *dev;
+ struct clk *clk;
+
struct v4l2_subdev sd;
struct media_pad pad;
struct v4l2_ctrl_handler ctrl_handler;
- struct clk *xvclk;
-
/* V4L2 Controls */
struct v4l2_ctrl *link_freq;
struct v4l2_ctrl *pixel_rate;
@@ -663,7 +664,7 @@ static int ov08d10_write_reg_list(struct ov08d10 *ov08d10,
ret = i2c_smbus_write_byte_data(client, r_list->regs[i].address,
r_list->regs[i].val);
if (ret) {
- dev_err_ratelimited(&client->dev,
+ dev_err_ratelimited(ov08d10->dev,
"failed to write reg 0x%2.2x. error = %d",
r_list->regs[i].address, ret);
return ret;
@@ -849,7 +850,6 @@ static int ov08d10_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct ov08d10 *ov08d10 = container_of(ctrl->handler,
struct ov08d10, ctrl_handler);
- struct i2c_client *client = v4l2_get_subdevdata(&ov08d10->sd);
s64 exposure_max;
int ret;
@@ -865,7 +865,7 @@ static int ov08d10_set_ctrl(struct v4l2_ctrl *ctrl)
}
/* V4L2 controls values will be applied only when power is already up */
- if (!pm_runtime_get_if_in_use(&client->dev))
+ if (!pm_runtime_get_if_in_use(ov08d10->dev))
return 0;
switch (ctrl->id) {
@@ -901,7 +901,7 @@ static int ov08d10_set_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov08d10->dev);
return ret;
}
@@ -1025,32 +1025,32 @@ static int ov08d10_start_streaming(struct ov08d10 *ov08d10)
/* soft reset */
ret = i2c_smbus_write_byte_data(client, OV08D10_REG_PAGE, 0x00);
if (ret < 0) {
- dev_err(&client->dev, "failed to reset sensor");
+ dev_err(ov08d10->dev, "failed to reset sensor");
return ret;
}
ret = i2c_smbus_write_byte_data(client, 0x20, 0x0e);
if (ret < 0) {
- dev_err(&client->dev, "failed to reset sensor");
+ dev_err(ov08d10->dev, "failed to reset sensor");
return ret;
}
usleep_range(3000, 4000);
ret = i2c_smbus_write_byte_data(client, 0x20, 0x0b);
if (ret < 0) {
- dev_err(&client->dev, "failed to reset sensor");
+ dev_err(ov08d10->dev, "failed to reset sensor");
return ret;
}
/* update sensor setting */
ret = ov08d10_write_reg_list(ov08d10, reg_list);
if (ret) {
- dev_err(&client->dev, "failed to set plls");
+ dev_err(ov08d10->dev, "failed to set plls");
return ret;
}
reg_list = &ov08d10->cur_mode->reg_list;
ret = ov08d10_write_reg_list(ov08d10, reg_list);
if (ret) {
- dev_err(&client->dev, "failed to set mode");
+ dev_err(ov08d10->dev, "failed to set mode");
return ret;
}
@@ -1077,19 +1077,19 @@ static void ov08d10_stop_streaming(struct ov08d10 *ov08d10)
ret = i2c_smbus_write_byte_data(client, OV08D10_REG_PAGE, 0x00);
if (ret < 0) {
- dev_err(&client->dev, "failed to stop streaming");
+ dev_err(ov08d10->dev, "failed to stop streaming");
return;
}
ret = i2c_smbus_write_byte_data(client, OV08D10_REG_MODE_SELECT,
OV08D10_MODE_STANDBY);
if (ret < 0) {
- dev_err(&client->dev, "failed to stop streaming");
+ dev_err(ov08d10->dev, "failed to stop streaming");
return;
}
ret = i2c_smbus_write_byte_data(client, OV08D10_REG_PAGE, 0x01);
if (ret < 0) {
- dev_err(&client->dev, "failed to stop streaming");
+ dev_err(ov08d10->dev, "failed to stop streaming");
return;
}
}
@@ -1097,12 +1097,11 @@ static void ov08d10_stop_streaming(struct ov08d10 *ov08d10)
static int ov08d10_set_stream(struct v4l2_subdev *sd, int enable)
{
struct ov08d10 *ov08d10 = to_ov08d10(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret = 0;
mutex_lock(&ov08d10->mutex);
if (enable) {
- ret = pm_runtime_resume_and_get(&client->dev);
+ ret = pm_runtime_resume_and_get(ov08d10->dev);
if (ret < 0) {
mutex_unlock(&ov08d10->mutex);
return ret;
@@ -1112,11 +1111,11 @@ static int ov08d10_set_stream(struct v4l2_subdev *sd, int enable)
if (ret) {
enable = 0;
ov08d10_stop_streaming(ov08d10);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov08d10->dev);
}
} else {
ov08d10_stop_streaming(ov08d10);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov08d10->dev);
}
/* vflip and hflip cannot change during streaming */
@@ -1293,7 +1292,7 @@ static int ov08d10_identify_module(struct ov08d10 *ov08d10)
chip_id = val | ret;
if ((chip_id & OV08D10_ID_MASK) != OV08D10_CHIP_ID) {
- dev_err(&client->dev, "unexpected sensor id(0x%04x)\n",
+ dev_err(ov08d10->dev, "unexpected sensor id(0x%04x)\n",
chip_id);
return -EINVAL;
}
@@ -1301,28 +1300,20 @@ static int ov08d10_identify_module(struct ov08d10 *ov08d10)
return 0;
}
-static int ov08d10_get_hwcfg(struct ov08d10 *ov08d10, struct device *dev)
+static int ov08d10_get_hwcfg(struct ov08d10 *ov08d10)
{
+ struct device *dev = ov08d10->dev;
struct fwnode_handle *ep;
struct fwnode_handle *fwnode = dev_fwnode(dev);
struct v4l2_fwnode_endpoint bus_cfg = {
.bus_type = V4L2_MBUS_CSI2_DPHY
};
- u32 xvclk_rate;
unsigned int i, j;
int ret;
if (!fwnode)
return -ENXIO;
- ret = fwnode_property_read_u32(fwnode, "clock-frequency", &xvclk_rate);
- if (ret)
- return ret;
-
- if (xvclk_rate != OV08D10_XVCLK_19_2)
- dev_warn(dev, "external clock rate %u is unsupported",
- xvclk_rate);
-
ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
if (!ep)
return -ENXIO;
@@ -1380,22 +1371,35 @@ static void ov08d10_remove(struct i2c_client *client)
v4l2_async_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
v4l2_ctrl_handler_free(sd->ctrl_handler);
- pm_runtime_disable(&client->dev);
+ pm_runtime_disable(ov08d10->dev);
mutex_destroy(&ov08d10->mutex);
}
static int ov08d10_probe(struct i2c_client *client)
{
struct ov08d10 *ov08d10;
+ unsigned long freq;
int ret;
ov08d10 = devm_kzalloc(&client->dev, sizeof(*ov08d10), GFP_KERNEL);
if (!ov08d10)
return -ENOMEM;
- ret = ov08d10_get_hwcfg(ov08d10, &client->dev);
+ ov08d10->dev = &client->dev;
+
+ ov08d10->clk = devm_v4l2_sensor_clk_get(ov08d10->dev, NULL);
+ if (IS_ERR(ov08d10->clk))
+ return dev_err_probe(ov08d10->dev, PTR_ERR(ov08d10->clk),
+ "failed to get clock\n");
+
+ freq = clk_get_rate(ov08d10->clk);
+ if (freq != OV08D10_XVCLK_19_2)
+ dev_warn(ov08d10->dev,
+ "external clock rate %lu is not supported\n", freq);
+
+ ret = ov08d10_get_hwcfg(ov08d10);
if (ret) {
- dev_err(&client->dev, "failed to get HW configuration: %d",
+ dev_err(ov08d10->dev, "failed to get HW configuration: %d",
ret);
return ret;
}
@@ -1404,7 +1408,7 @@ static int ov08d10_probe(struct i2c_client *client)
ret = ov08d10_identify_module(ov08d10);
if (ret) {
- dev_err(&client->dev, "failed to find sensor: %d", ret);
+ dev_err(ov08d10->dev, "failed to find sensor: %d", ret);
return ret;
}
@@ -1412,7 +1416,7 @@ static int ov08d10_probe(struct i2c_client *client)
ov08d10->cur_mode = &ov08d10->priv_lane->sp_modes[0];
ret = ov08d10_init_controls(ov08d10);
if (ret) {
- dev_err(&client->dev, "failed to init controls: %d", ret);
+ dev_err(ov08d10->dev, "failed to init controls: %d", ret);
goto probe_error_v4l2_ctrl_handler_free;
}
@@ -1422,13 +1426,13 @@ static int ov08d10_probe(struct i2c_client *client)
ov08d10->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&ov08d10->sd.entity, 1, &ov08d10->pad);
if (ret) {
- dev_err(&client->dev, "failed to init entity pads: %d", ret);
+ dev_err(ov08d10->dev, "failed to init entity pads: %d", ret);
goto probe_error_v4l2_ctrl_handler_free;
}
ret = v4l2_async_register_subdev_sensor(&ov08d10->sd);
if (ret < 0) {
- dev_err(&client->dev, "failed to register V4L2 subdev: %d",
+ dev_err(ov08d10->dev, "failed to register V4L2 subdev: %d",
ret);
goto probe_error_media_entity_cleanup;
}
@@ -1437,9 +1441,9 @@ static int ov08d10_probe(struct i2c_client *client)
* Device is already turned on by i2c-core with ACPI domain PM.
* Enable runtime PM and turn off the device.
*/
- pm_runtime_set_active(&client->dev);
- pm_runtime_enable(&client->dev);
- pm_runtime_idle(&client->dev);
+ pm_runtime_set_active(ov08d10->dev);
+ pm_runtime_enable(ov08d10->dev);
+ pm_runtime_idle(ov08d10->dev);
return 0;
diff --git a/drivers/media/i2c/ov08x40.c b/drivers/media/i2c/ov08x40.c
index e0094305ca2a..5eaf454f4763 100644
--- a/drivers/media/i2c/ov08x40.c
+++ b/drivers/media/i2c/ov08x40.c
@@ -1,15 +1,16 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2022 Intel Corporation.
-#include <linux/unaligned.h>
#include <linux/acpi.h>
#include <linux/clk.h>
-#include <linux/i2c.h>
+#include <linux/delay.h>
#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
#include <linux/module.h>
-#include <linux/delay.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
+#include <linux/unaligned.h>
+
#include <media/v4l2-common.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
@@ -1305,6 +1306,8 @@ static const char * const ov08x40_supply_names[] = {
};
struct ov08x40 {
+ struct device *dev;
+
struct v4l2_subdev sd;
struct media_pad pad;
@@ -1513,7 +1516,6 @@ static int ov08x40_write_reg(struct ov08x40 *ov08x,
static int ov08x40_write_regs(struct ov08x40 *ov08x,
const struct ov08x40_reg *regs, u32 len)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov08x->sd);
int ret;
u32 i;
@@ -1522,7 +1524,7 @@ static int ov08x40_write_regs(struct ov08x40 *ov08x,
regs[i].val);
if (ret) {
- dev_err_ratelimited(&client->dev,
+ dev_err_ratelimited(ov08x->dev,
"Failed to write reg 0x%4.4x. error = %d\n",
regs[i].address, ret);
@@ -1648,7 +1650,7 @@ static int ov08x40_set_ctrl_hflip(struct ov08x40 *ov08x, u32 ctrl_val)
return ov08x40_write_reg(ov08x, OV08X40_REG_MIRROR,
OV08X40_REG_VALUE_08BIT,
- ctrl_val ? val | BIT(2) : val & ~BIT(2));
+ ctrl_val ? val & ~BIT(2) : val | BIT(2));
}
static int ov08x40_set_ctrl_vflip(struct ov08x40 *ov08x, u32 ctrl_val)
@@ -1670,7 +1672,6 @@ static int ov08x40_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct ov08x40 *ov08x = container_of(ctrl->handler,
struct ov08x40, ctrl_handler);
- struct i2c_client *client = v4l2_get_subdevdata(&ov08x->sd);
s64 max;
int exp;
int fll;
@@ -1699,7 +1700,7 @@ static int ov08x40_set_ctrl(struct v4l2_ctrl *ctrl)
* Applying V4L2 control value only happens
* when power is up for streaming
*/
- if (!pm_runtime_get_if_in_use(&client->dev))
+ if (!pm_runtime_get_if_in_use(ov08x->dev))
return 0;
switch (ctrl->id) {
@@ -1737,13 +1738,13 @@ static int ov08x40_set_ctrl(struct v4l2_ctrl *ctrl)
ov08x40_set_ctrl_vflip(ov08x, ctrl->val);
break;
default:
- dev_info(&client->dev,
+ dev_info(ov08x->dev,
"ctrl(id:0x%x,val:0x%x) is not handled\n",
ctrl->id, ctrl->val);
break;
}
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov08x->dev);
return ret;
}
@@ -1912,7 +1913,6 @@ ov08x40_set_pad_format(struct v4l2_subdev *sd,
static int ov08x40_start_streaming(struct ov08x40 *ov08x)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov08x->sd);
const struct ov08x40_reg_list *reg_list;
int ret, link_freq_index;
@@ -1920,7 +1920,7 @@ static int ov08x40_start_streaming(struct ov08x40 *ov08x)
ret = ov08x40_write_reg(ov08x, OV08X40_REG_SOFTWARE_RST,
OV08X40_REG_VALUE_08BIT, OV08X40_SOFTWARE_RST);
if (ret) {
- dev_err(&client->dev, "%s failed to set powerup registers\n",
+ dev_err(ov08x->dev, "%s failed to set powerup registers\n",
__func__);
return ret;
}
@@ -1930,14 +1930,14 @@ static int ov08x40_start_streaming(struct ov08x40 *ov08x)
ret = ov08x40_write_reg_list(ov08x, reg_list);
if (ret) {
- dev_err(&client->dev, "%s failed to set plls\n", __func__);
+ dev_err(ov08x->dev, "%s failed to set plls\n", __func__);
return ret;
}
reg_list = &ov08x40_global_setting;
ret = ov08x40_write_reg_list(ov08x, reg_list);
if (ret) {
- dev_err(&client->dev, "%s failed to set global setting\n",
+ dev_err(ov08x->dev, "%s failed to set global setting\n",
__func__);
return ret;
}
@@ -1946,7 +1946,7 @@ static int ov08x40_start_streaming(struct ov08x40 *ov08x)
reg_list = &ov08x->cur_mode->reg_list;
ret = ov08x40_write_reg_list(ov08x, reg_list);
if (ret) {
- dev_err(&client->dev, "%s failed to set mode\n", __func__);
+ dev_err(ov08x->dev, "%s failed to set mode\n", __func__);
return ret;
}
@@ -1962,7 +1962,7 @@ static int ov08x40_start_streaming(struct ov08x40 *ov08x)
}
if (ret) {
- dev_err(&client->dev, "%s failed to set regs\n", __func__);
+ dev_err(ov08x->dev, "%s failed to set regs\n", __func__);
return ret;
}
@@ -1986,7 +1986,6 @@ static int ov08x40_stop_streaming(struct ov08x40 *ov08x)
/* Verify chip ID */
static int ov08x40_identify_module(struct ov08x40 *ov08x)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov08x->sd);
int ret;
u32 val;
@@ -1996,17 +1995,17 @@ static int ov08x40_identify_module(struct ov08x40 *ov08x)
ret = ov08x40_read_reg(ov08x, OV08X40_REG_CHIP_ID,
OV08X40_REG_VALUE_24BIT, &val);
if (ret) {
- dev_err(&client->dev, "error reading chip-id register: %d\n", ret);
+ dev_err(ov08x->dev, "error reading chip-id register: %d\n", ret);
return ret;
}
if (val != OV08X40_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x!=%x\n",
+ dev_err(ov08x->dev, "chip id mismatch: %x!=%x\n",
OV08X40_CHIP_ID, val);
return -ENXIO;
}
- dev_dbg(&client->dev, "chip id 0x%x\n", val);
+ dev_dbg(ov08x->dev, "chip id 0x%x\n", val);
ov08x->identified = true;
return 0;
@@ -2015,13 +2014,12 @@ static int ov08x40_identify_module(struct ov08x40 *ov08x)
static int ov08x40_set_stream(struct v4l2_subdev *sd, int enable)
{
struct ov08x40 *ov08x = to_ov08x40(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret = 0;
mutex_lock(&ov08x->mutex);
if (enable) {
- ret = pm_runtime_resume_and_get(&client->dev);
+ ret = pm_runtime_resume_and_get(ov08x->dev);
if (ret < 0)
goto err_unlock;
@@ -2038,7 +2036,7 @@ static int ov08x40_set_stream(struct v4l2_subdev *sd, int enable)
goto err_rpm_put;
} else {
ov08x40_stop_streaming(ov08x);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov08x->dev);
}
mutex_unlock(&ov08x->mutex);
@@ -2046,7 +2044,7 @@ static int ov08x40_set_stream(struct v4l2_subdev *sd, int enable)
return ret;
err_rpm_put:
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov08x->dev);
err_unlock:
mutex_unlock(&ov08x->mutex);
@@ -2079,7 +2077,6 @@ static const struct v4l2_subdev_internal_ops ov08x40_internal_ops = {
static int ov08x40_init_controls(struct ov08x40 *ov08x)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov08x->sd);
struct v4l2_fwnode_device_properties props;
struct v4l2_ctrl_handler *ctrl_hdlr;
s64 exposure_max;
@@ -2160,12 +2157,12 @@ static int ov08x40_init_controls(struct ov08x40 *ov08x)
if (ctrl_hdlr->error) {
ret = ctrl_hdlr->error;
- dev_err(&client->dev, "%s control init failed (%d)\n",
+ dev_err(ov08x->dev, "%s control init failed (%d)\n",
__func__, ret);
goto error;
}
- ret = v4l2_fwnode_device_parse(&client->dev, &props);
+ ret = v4l2_fwnode_device_parse(ov08x->dev, &props);
if (ret)
goto error;
@@ -2191,11 +2188,12 @@ static void ov08x40_free_controls(struct ov08x40 *ov08x)
mutex_destroy(&ov08x->mutex);
}
-static int ov08x40_check_hwcfg(struct ov08x40 *ov08x, struct device *dev)
+static int ov08x40_check_hwcfg(struct ov08x40 *ov08x)
{
struct v4l2_fwnode_endpoint bus_cfg = {
.bus_type = V4L2_MBUS_CSI2_DPHY
};
+ struct device *dev = ov08x->dev;
struct fwnode_handle *ep;
struct fwnode_handle *fwnode = dev_fwnode(dev);
unsigned int i;
@@ -2232,23 +2230,14 @@ static int ov08x40_check_hwcfg(struct ov08x40 *ov08x, struct device *dev)
if (ret)
goto out_err;
- ov08x->xvclk = devm_clk_get_optional(dev, NULL);
+ ov08x->xvclk = devm_v4l2_sensor_clk_get(dev, NULL);
if (IS_ERR(ov08x->xvclk)) {
ret = dev_err_probe(dev, PTR_ERR(ov08x->xvclk),
"getting xvclk\n");
goto out_err;
}
- if (ov08x->xvclk) {
- xvclk_rate = clk_get_rate(ov08x->xvclk);
- } else {
- ret = fwnode_property_read_u32(dev_fwnode(dev), "clock-frequency",
- &xvclk_rate);
- if (ret) {
- dev_err(dev, "can't get clock frequency\n");
- goto out_err;
- }
- }
+ xvclk_rate = clk_get_rate(ov08x->xvclk);
if (xvclk_rate != OV08X40_XVCLK) {
dev_err(dev, "external clock %d is not supported\n",
xvclk_rate);
@@ -2294,19 +2283,21 @@ static int ov08x40_probe(struct i2c_client *client)
if (!ov08x)
return -ENOMEM;
+ ov08x->dev = &client->dev;
+
/* Check HW config */
- ret = ov08x40_check_hwcfg(ov08x, &client->dev);
+ ret = ov08x40_check_hwcfg(ov08x);
if (ret)
return ret;
/* Initialize subdev */
v4l2_i2c_subdev_init(&ov08x->sd, client, &ov08x40_subdev_ops);
- full_power = acpi_dev_state_d0(&client->dev);
+ full_power = acpi_dev_state_d0(ov08x->dev);
if (full_power) {
- ret = ov08x40_power_on(&client->dev);
+ ret = ov08x40_power_on(ov08x->dev);
if (ret) {
- dev_err(&client->dev, "failed to power on\n");
+ dev_err(ov08x->dev, "failed to power on\n");
return ret;
}
@@ -2333,7 +2324,7 @@ static int ov08x40_probe(struct i2c_client *client)
ov08x->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&ov08x->sd.entity, 1, &ov08x->pad);
if (ret) {
- dev_err(&client->dev, "%s failed:%d\n", __func__, ret);
+ dev_err(ov08x->dev, "%s failed:%d\n", __func__, ret);
goto error_handler_free;
}
@@ -2342,9 +2333,9 @@ static int ov08x40_probe(struct i2c_client *client)
goto error_media_entity;
if (full_power)
- pm_runtime_set_active(&client->dev);
- pm_runtime_enable(&client->dev);
- pm_runtime_idle(&client->dev);
+ pm_runtime_set_active(ov08x->dev);
+ pm_runtime_enable(ov08x->dev);
+ pm_runtime_idle(ov08x->dev);
return 0;
@@ -2355,7 +2346,7 @@ error_handler_free:
ov08x40_free_controls(ov08x);
probe_power_off:
- ov08x40_power_off(&client->dev);
+ ov08x40_power_off(ov08x->dev);
return ret;
}
@@ -2369,10 +2360,10 @@ static void ov08x40_remove(struct i2c_client *client)
media_entity_cleanup(&sd->entity);
ov08x40_free_controls(ov08x);
- pm_runtime_disable(&client->dev);
- if (!pm_runtime_status_suspended(&client->dev))
- ov08x40_power_off(&client->dev);
- pm_runtime_set_suspended(&client->dev);
+ pm_runtime_disable(ov08x->dev);
+ if (!pm_runtime_status_suspended(ov08x->dev))
+ ov08x40_power_off(ov08x->dev);
+ pm_runtime_set_suspended(ov08x->dev);
}
static DEFINE_RUNTIME_DEV_PM_OPS(ov08x40_pm_ops, ov08x40_power_off,
diff --git a/drivers/media/i2c/ov13858.c b/drivers/media/i2c/ov13858.c
index 7a3fc1d28514..162b49046990 100644
--- a/drivers/media/i2c/ov13858.c
+++ b/drivers/media/i2c/ov13858.c
@@ -2,6 +2,7 @@
// Copyright (c) 2017 Intel Corporation.
#include <linux/acpi.h>
+#include <linux/clk.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
@@ -1028,6 +1029,9 @@ static const struct ov13858_mode supported_modes[] = {
};
struct ov13858 {
+ struct device *dev;
+ struct clk *clk;
+
struct v4l2_subdev sd;
struct media_pad pad;
@@ -1117,7 +1121,6 @@ static int ov13858_write_reg(struct ov13858 *ov13858, u16 reg, u32 len,
static int ov13858_write_regs(struct ov13858 *ov13858,
const struct ov13858_reg *regs, u32 len)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov13858->sd);
int ret;
u32 i;
@@ -1126,7 +1129,7 @@ static int ov13858_write_regs(struct ov13858 *ov13858,
regs[i].val);
if (ret) {
dev_err_ratelimited(
- &client->dev,
+ ov13858->dev,
"Failed to write reg 0x%4.4x. error = %d\n",
regs[i].address, ret);
@@ -1209,7 +1212,6 @@ static int ov13858_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct ov13858 *ov13858 = container_of(ctrl->handler,
struct ov13858, ctrl_handler);
- struct i2c_client *client = v4l2_get_subdevdata(&ov13858->sd);
s64 max;
int ret;
@@ -1228,7 +1230,7 @@ static int ov13858_set_ctrl(struct v4l2_ctrl *ctrl)
* Applying V4L2 control value only happens
* when power is up for streaming
*/
- if (!pm_runtime_get_if_in_use(&client->dev))
+ if (!pm_runtime_get_if_in_use(ov13858->dev))
return 0;
ret = 0;
@@ -1256,13 +1258,13 @@ static int ov13858_set_ctrl(struct v4l2_ctrl *ctrl)
ret = ov13858_enable_test_pattern(ov13858, ctrl->val);
break;
default:
- dev_info(&client->dev,
+ dev_info(ov13858->dev,
"ctrl(id:0x%x,val:0x%x) is not handled\n",
ctrl->id, ctrl->val);
break;
}
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov13858->dev);
return ret;
}
@@ -1408,7 +1410,6 @@ static int ov13858_get_skip_frames(struct v4l2_subdev *sd, u32 *frames)
/* Start streaming */
static int ov13858_start_streaming(struct ov13858 *ov13858)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov13858->sd);
const struct ov13858_reg_list *reg_list;
int ret, link_freq_index;
@@ -1416,7 +1417,7 @@ static int ov13858_start_streaming(struct ov13858 *ov13858)
ret = ov13858_write_reg(ov13858, OV13858_REG_SOFTWARE_RST,
OV13858_REG_VALUE_08BIT, OV13858_SOFTWARE_RST);
if (ret) {
- dev_err(&client->dev, "%s failed to set powerup registers\n",
+ dev_err(ov13858->dev, "%s failed to set powerup registers\n",
__func__);
return ret;
}
@@ -1426,7 +1427,7 @@ static int ov13858_start_streaming(struct ov13858 *ov13858)
reg_list = &link_freq_configs[link_freq_index].reg_list;
ret = ov13858_write_reg_list(ov13858, reg_list);
if (ret) {
- dev_err(&client->dev, "%s failed to set plls\n", __func__);
+ dev_err(ov13858->dev, "%s failed to set plls\n", __func__);
return ret;
}
@@ -1434,7 +1435,7 @@ static int ov13858_start_streaming(struct ov13858 *ov13858)
reg_list = &ov13858->cur_mode->reg_list;
ret = ov13858_write_reg_list(ov13858, reg_list);
if (ret) {
- dev_err(&client->dev, "%s failed to set mode\n", __func__);
+ dev_err(ov13858->dev, "%s failed to set mode\n", __func__);
return ret;
}
@@ -1458,13 +1459,12 @@ static int ov13858_stop_streaming(struct ov13858 *ov13858)
static int ov13858_set_stream(struct v4l2_subdev *sd, int enable)
{
struct ov13858 *ov13858 = to_ov13858(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret = 0;
mutex_lock(&ov13858->mutex);
if (enable) {
- ret = pm_runtime_resume_and_get(&client->dev);
+ ret = pm_runtime_resume_and_get(ov13858->dev);
if (ret < 0)
goto err_unlock;
@@ -1477,7 +1477,7 @@ static int ov13858_set_stream(struct v4l2_subdev *sd, int enable)
goto err_rpm_put;
} else {
ov13858_stop_streaming(ov13858);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov13858->dev);
}
mutex_unlock(&ov13858->mutex);
@@ -1485,7 +1485,7 @@ static int ov13858_set_stream(struct v4l2_subdev *sd, int enable)
return ret;
err_rpm_put:
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov13858->dev);
err_unlock:
mutex_unlock(&ov13858->mutex);
@@ -1495,7 +1495,6 @@ err_unlock:
/* Verify chip ID */
static int ov13858_identify_module(struct ov13858 *ov13858)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov13858->sd);
int ret;
u32 val;
@@ -1505,7 +1504,7 @@ static int ov13858_identify_module(struct ov13858 *ov13858)
return ret;
if (val != OV13858_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x!=%x\n",
+ dev_err(ov13858->dev, "chip id mismatch: %x!=%x\n",
OV13858_CHIP_ID, val);
return -EIO;
}
@@ -1552,7 +1551,6 @@ static const struct v4l2_subdev_internal_ops ov13858_internal_ops = {
/* Initialize control handlers */
static int ov13858_init_controls(struct ov13858 *ov13858)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov13858->sd);
struct v4l2_fwnode_device_properties props;
struct v4l2_ctrl_handler *ctrl_hdlr;
s64 exposure_max;
@@ -1626,12 +1624,12 @@ static int ov13858_init_controls(struct ov13858 *ov13858)
0, 0, ov13858_test_pattern_menu);
if (ctrl_hdlr->error) {
ret = ctrl_hdlr->error;
- dev_err(&client->dev, "%s control init failed (%d)\n",
+ dev_err(ov13858->dev, "%s control init failed (%d)\n",
__func__, ret);
goto error;
}
- ret = v4l2_fwnode_device_parse(&client->dev, &props);
+ ret = v4l2_fwnode_device_parse(ov13858->dev, &props);
if (ret)
goto error;
@@ -1660,24 +1658,33 @@ static void ov13858_free_controls(struct ov13858 *ov13858)
static int ov13858_probe(struct i2c_client *client)
{
struct ov13858 *ov13858;
+ unsigned long freq;
int ret;
- u32 val = 0;
-
- device_property_read_u32(&client->dev, "clock-frequency", &val);
- if (val != 19200000)
- return -EINVAL;
ov13858 = devm_kzalloc(&client->dev, sizeof(*ov13858), GFP_KERNEL);
if (!ov13858)
return -ENOMEM;
+ ov13858->dev = &client->dev;
+
+ ov13858->clk = devm_v4l2_sensor_clk_get(ov13858->dev, NULL);
+ if (IS_ERR(ov13858->clk))
+ return dev_err_probe(ov13858->dev, PTR_ERR(ov13858->clk),
+ "failed to get clock\n");
+
+ freq = clk_get_rate(ov13858->clk);
+ if (freq != 19200000)
+ return dev_err_probe(ov13858->dev, -EINVAL,
+ "external clock %lu is not supported\n",
+ freq);
+
/* Initialize subdev */
v4l2_i2c_subdev_init(&ov13858->sd, client, &ov13858_subdev_ops);
/* Check module identity */
ret = ov13858_identify_module(ov13858);
if (ret) {
- dev_err(&client->dev, "failed to find sensor: %d\n", ret);
+ dev_err(ov13858->dev, "failed to find sensor: %d\n", ret);
return ret;
}
@@ -1699,7 +1706,7 @@ static int ov13858_probe(struct i2c_client *client)
ov13858->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&ov13858->sd.entity, 1, &ov13858->pad);
if (ret) {
- dev_err(&client->dev, "%s failed:%d\n", __func__, ret);
+ dev_err(ov13858->dev, "%s failed:%d\n", __func__, ret);
goto error_handler_free;
}
@@ -1711,9 +1718,9 @@ static int ov13858_probe(struct i2c_client *client)
* Device is already turned on by i2c-core with ACPI domain PM.
* Enable runtime PM and turn off the device.
*/
- pm_runtime_set_active(&client->dev);
- pm_runtime_enable(&client->dev);
- pm_runtime_idle(&client->dev);
+ pm_runtime_set_active(ov13858->dev);
+ pm_runtime_enable(ov13858->dev);
+ pm_runtime_idle(ov13858->dev);
return 0;
@@ -1722,7 +1729,7 @@ error_media_entity:
error_handler_free:
ov13858_free_controls(ov13858);
- dev_err(&client->dev, "%s failed:%d\n", __func__, ret);
+ dev_err(ov13858->dev, "%s failed:%d\n", __func__, ret);
return ret;
}
@@ -1736,7 +1743,7 @@ static void ov13858_remove(struct i2c_client *client)
media_entity_cleanup(&sd->entity);
ov13858_free_controls(ov13858);
- pm_runtime_disable(&client->dev);
+ pm_runtime_disable(ov13858->dev);
}
static const struct i2c_device_id ov13858_id_table[] = {
diff --git a/drivers/media/i2c/ov13b10.c b/drivers/media/i2c/ov13b10.c
index e85c7d33a670..869bc78ed792 100644
--- a/drivers/media/i2c/ov13b10.c
+++ b/drivers/media/i2c/ov13b10.c
@@ -700,6 +700,8 @@ static const struct ov13b10_mode supported_2_lanes_modes[] = {
};
struct ov13b10 {
+ struct device *dev;
+
struct v4l2_subdev sd;
struct media_pad pad;
@@ -805,7 +807,6 @@ static int ov13b10_write_reg(struct ov13b10 *ov13b,
static int ov13b10_write_regs(struct ov13b10 *ov13b,
const struct ov13b10_reg *regs, u32 len)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov13b->sd);
int ret;
u32 i;
@@ -813,7 +814,7 @@ static int ov13b10_write_regs(struct ov13b10 *ov13b,
ret = ov13b10_write_reg(ov13b, regs[i].address, 1,
regs[i].val);
if (ret) {
- dev_err_ratelimited(&client->dev,
+ dev_err_ratelimited(ov13b->dev,
"Failed to write reg 0x%4.4x. error = %d\n",
regs[i].address, ret);
@@ -968,7 +969,6 @@ static int ov13b10_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct ov13b10 *ov13b = container_of(ctrl->handler,
struct ov13b10, ctrl_handler);
- struct i2c_client *client = v4l2_get_subdevdata(&ov13b->sd);
s64 max;
int ret;
@@ -987,7 +987,7 @@ static int ov13b10_set_ctrl(struct v4l2_ctrl *ctrl)
* Applying V4L2 control value only happens
* when power is up for streaming
*/
- if (!pm_runtime_get_if_in_use(&client->dev))
+ if (!pm_runtime_get_if_in_use(ov13b->dev))
return 0;
ret = 0;
@@ -1021,13 +1021,13 @@ static int ov13b10_set_ctrl(struct v4l2_ctrl *ctrl)
ov13b10_set_ctrl_vflip(ov13b, ctrl->val);
break;
default:
- dev_info(&client->dev,
+ dev_info(ov13b->dev,
"ctrl(id:0x%x,val:0x%x) is not handled\n",
ctrl->id, ctrl->val);
break;
}
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov13b->dev);
return ret;
}
@@ -1166,7 +1166,6 @@ ov13b10_set_pad_format(struct v4l2_subdev *sd,
/* Verify chip ID */
static int ov13b10_identify_module(struct ov13b10 *ov13b)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov13b->sd);
int ret;
u32 val;
@@ -1179,7 +1178,7 @@ static int ov13b10_identify_module(struct ov13b10 *ov13b)
return ret;
if (val != OV13B10_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x!=%x\n",
+ dev_err(ov13b->dev, "chip id mismatch: %x!=%x\n",
OV13B10_CHIP_ID, val);
return -EIO;
}
@@ -1234,7 +1233,6 @@ static int ov13b10_power_on(struct device *dev)
static int ov13b10_start_streaming(struct ov13b10 *ov13b)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov13b->sd);
const struct ov13b10_reg_list *reg_list;
int ret, link_freq_index;
@@ -1246,7 +1244,7 @@ static int ov13b10_start_streaming(struct ov13b10 *ov13b)
ret = ov13b10_write_reg(ov13b, OV13B10_REG_SOFTWARE_RST,
OV13B10_REG_VALUE_08BIT, OV13B10_SOFTWARE_RST);
if (ret) {
- dev_err(&client->dev, "%s failed to set powerup registers\n",
+ dev_err(ov13b->dev, "%s failed to set powerup registers\n",
__func__);
return ret;
}
@@ -1255,7 +1253,7 @@ static int ov13b10_start_streaming(struct ov13b10 *ov13b)
reg_list = &link_freq_configs[link_freq_index].reg_list;
ret = ov13b10_write_reg_list(ov13b, reg_list);
if (ret) {
- dev_err(&client->dev, "%s failed to set plls\n", __func__);
+ dev_err(ov13b->dev, "%s failed to set plls\n", __func__);
return ret;
}
@@ -1263,7 +1261,7 @@ static int ov13b10_start_streaming(struct ov13b10 *ov13b)
reg_list = &ov13b->cur_mode->reg_list;
ret = ov13b10_write_reg_list(ov13b, reg_list);
if (ret) {
- dev_err(&client->dev, "%s failed to set mode\n", __func__);
+ dev_err(ov13b->dev, "%s failed to set mode\n", __func__);
return ret;
}
@@ -1287,13 +1285,12 @@ static int ov13b10_stop_streaming(struct ov13b10 *ov13b)
static int ov13b10_set_stream(struct v4l2_subdev *sd, int enable)
{
struct ov13b10 *ov13b = to_ov13b10(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret = 0;
mutex_lock(&ov13b->mutex);
if (enable) {
- ret = pm_runtime_resume_and_get(&client->dev);
+ ret = pm_runtime_resume_and_get(ov13b->dev);
if (ret < 0)
goto err_unlock;
@@ -1306,7 +1303,7 @@ static int ov13b10_set_stream(struct v4l2_subdev *sd, int enable)
goto err_rpm_put;
} else {
ov13b10_stop_streaming(ov13b);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov13b->dev);
}
mutex_unlock(&ov13b->mutex);
@@ -1314,7 +1311,7 @@ static int ov13b10_set_stream(struct v4l2_subdev *sd, int enable)
return ret;
err_rpm_put:
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov13b->dev);
err_unlock:
mutex_unlock(&ov13b->mutex);
@@ -1360,7 +1357,6 @@ static const struct v4l2_subdev_internal_ops ov13b10_internal_ops = {
/* Initialize control handlers */
static int ov13b10_init_controls(struct ov13b10 *ov13b)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov13b->sd);
struct v4l2_fwnode_device_properties props;
struct v4l2_ctrl_handler *ctrl_hdlr;
s64 exposure_max;
@@ -1443,12 +1439,12 @@ static int ov13b10_init_controls(struct ov13b10 *ov13b)
if (ctrl_hdlr->error) {
ret = ctrl_hdlr->error;
- dev_err(&client->dev, "%s control init failed (%d)\n",
+ dev_err(ov13b->dev, "%s control init failed (%d)\n",
__func__, ret);
goto error;
}
- ret = v4l2_fwnode_device_parse(&client->dev, &props);
+ ret = v4l2_fwnode_device_parse(ov13b->dev, &props);
if (ret)
goto error;
@@ -1474,44 +1470,49 @@ static void ov13b10_free_controls(struct ov13b10 *ov13b)
mutex_destroy(&ov13b->mutex);
}
-static int ov13b10_get_pm_resources(struct device *dev)
+static int ov13b10_get_pm_resources(struct ov13b10 *ov13b)
{
- struct v4l2_subdev *sd = dev_get_drvdata(dev);
- struct ov13b10 *ov13b = to_ov13b10(sd);
+ unsigned long freq;
int ret;
- ov13b->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ ov13b->reset = devm_gpiod_get_optional(ov13b->dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(ov13b->reset))
- return dev_err_probe(dev, PTR_ERR(ov13b->reset),
+ return dev_err_probe(ov13b->dev, PTR_ERR(ov13b->reset),
"failed to get reset gpio\n");
- ov13b->img_clk = devm_clk_get_optional(dev, NULL);
+ ov13b->img_clk = devm_v4l2_sensor_clk_get(ov13b->dev, NULL);
if (IS_ERR(ov13b->img_clk))
- return dev_err_probe(dev, PTR_ERR(ov13b->img_clk),
+ return dev_err_probe(ov13b->dev, PTR_ERR(ov13b->img_clk),
"failed to get imaging clock\n");
- ov13b->avdd = devm_regulator_get_optional(dev, "avdd");
+ freq = clk_get_rate(ov13b->img_clk);
+ if (freq != OV13B10_EXT_CLK)
+ return dev_err_probe(ov13b->dev, -EINVAL,
+ "external clock %lu is not supported\n",
+ freq);
+
+ ov13b->avdd = devm_regulator_get_optional(ov13b->dev, "avdd");
if (IS_ERR(ov13b->avdd)) {
ret = PTR_ERR(ov13b->avdd);
ov13b->avdd = NULL;
if (ret != -ENODEV)
- return dev_err_probe(dev, ret,
+ return dev_err_probe(ov13b->dev, ret,
"failed to get avdd regulator\n");
}
return 0;
}
-static int ov13b10_check_hwcfg(struct device *dev, struct ov13b10 *ov13b)
+static int ov13b10_check_hwcfg(struct ov13b10 *ov13b)
{
struct v4l2_fwnode_endpoint bus_cfg = {
.bus_type = V4L2_MBUS_CSI2_DPHY
};
+ struct device *dev = ov13b->dev;
struct fwnode_handle *ep;
struct fwnode_handle *fwnode = dev_fwnode(dev);
unsigned int i, j;
int ret;
- u32 ext_clk;
u8 dlane;
if (!fwnode)
@@ -1521,19 +1522,6 @@ static int ov13b10_check_hwcfg(struct device *dev, struct ov13b10 *ov13b)
if (!ep)
return -EPROBE_DEFER;
- ret = fwnode_property_read_u32(dev_fwnode(dev), "clock-frequency",
- &ext_clk);
- if (ret) {
- dev_err(dev, "can't get clock frequency");
- return ret;
- }
-
- if (ext_clk != OV13B10_EXT_CLK) {
- dev_err(dev, "external clock %d is not supported",
- ext_clk);
- return -EINVAL;
- }
-
ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg);
fwnode_handle_put(ep);
if (ret)
@@ -1602,32 +1590,34 @@ static int ov13b10_probe(struct i2c_client *client)
if (!ov13b)
return -ENOMEM;
+ ov13b->dev = &client->dev;
+
/* Check HW config */
- ret = ov13b10_check_hwcfg(&client->dev, ov13b);
+ ret = ov13b10_check_hwcfg(ov13b);
if (ret) {
- dev_err(&client->dev, "failed to check hwcfg: %d", ret);
+ dev_err(ov13b->dev, "failed to check hwcfg: %d", ret);
return ret;
}
/* Initialize subdev */
v4l2_i2c_subdev_init(&ov13b->sd, client, &ov13b10_subdev_ops);
- ret = ov13b10_get_pm_resources(&client->dev);
+ ret = ov13b10_get_pm_resources(ov13b);
if (ret)
return ret;
- full_power = acpi_dev_state_d0(&client->dev);
+ full_power = acpi_dev_state_d0(ov13b->dev);
if (full_power) {
- ret = ov13b10_power_on(&client->dev);
+ ret = ov13b10_power_on(ov13b->dev);
if (ret) {
- dev_err(&client->dev, "failed to power on\n");
+ dev_err(ov13b->dev, "failed to power on\n");
return ret;
}
/* Check module identity */
ret = ov13b10_identify_module(ov13b);
if (ret) {
- dev_err(&client->dev, "failed to find sensor: %d\n", ret);
+ dev_err(ov13b->dev, "failed to find sensor: %d\n", ret);
goto error_power_off;
}
}
@@ -1646,7 +1636,7 @@ static int ov13b10_probe(struct i2c_client *client)
ov13b->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&ov13b->sd.entity, 1, &ov13b->pad);
if (ret) {
- dev_err(&client->dev, "%s failed:%d\n", __func__, ret);
+ dev_err(ov13b->dev, "%s failed:%d\n", __func__, ret);
goto error_handler_free;
}
@@ -1657,9 +1647,9 @@ static int ov13b10_probe(struct i2c_client *client)
*/
/* Set the device's state to active if it's in D0 state. */
if (full_power)
- pm_runtime_set_active(&client->dev);
- pm_runtime_enable(&client->dev);
- pm_runtime_idle(&client->dev);
+ pm_runtime_set_active(ov13b->dev);
+ pm_runtime_enable(ov13b->dev);
+ pm_runtime_idle(ov13b->dev);
ret = v4l2_async_register_subdev_sensor(&ov13b->sd);
if (ret < 0)
@@ -1668,17 +1658,17 @@ static int ov13b10_probe(struct i2c_client *client)
return 0;
error_media_entity_runtime_pm:
- pm_runtime_disable(&client->dev);
+ pm_runtime_disable(ov13b->dev);
if (full_power)
- pm_runtime_set_suspended(&client->dev);
+ pm_runtime_set_suspended(ov13b->dev);
media_entity_cleanup(&ov13b->sd.entity);
error_handler_free:
ov13b10_free_controls(ov13b);
- dev_err(&client->dev, "%s failed:%d\n", __func__, ret);
+ dev_err(ov13b->dev, "%s failed:%d\n", __func__, ret);
error_power_off:
- ov13b10_power_off(&client->dev);
+ ov13b10_power_off(ov13b->dev);
return ret;
}
@@ -1692,8 +1682,8 @@ static void ov13b10_remove(struct i2c_client *client)
media_entity_cleanup(&sd->entity);
ov13b10_free_controls(ov13b);
- pm_runtime_disable(&client->dev);
- pm_runtime_set_suspended(&client->dev);
+ pm_runtime_disable(ov13b->dev);
+ pm_runtime_set_suspended(ov13b->dev);
}
static DEFINE_RUNTIME_DEV_PM_OPS(ov13b10_pm_ops, ov13b10_suspend,
diff --git a/drivers/media/i2c/ov2659.c b/drivers/media/i2c/ov2659.c
index 586b31ba076b..061401b020fc 100644
--- a/drivers/media/i2c/ov2659.c
+++ b/drivers/media/i2c/ov2659.c
@@ -1437,9 +1437,10 @@ static int ov2659_probe(struct i2c_client *client)
ov2659->pdata = pdata;
ov2659->client = client;
- ov2659->clk = devm_clk_get(&client->dev, "xvclk");
+ ov2659->clk = devm_v4l2_sensor_clk_get(&client->dev, "xvclk");
if (IS_ERR(ov2659->clk))
- return PTR_ERR(ov2659->clk);
+ return dev_err_probe(&client->dev, PTR_ERR(ov2659->clk),
+ "failed to get xvclk\n");
ov2659->xvclk_frequency = clk_get_rate(ov2659->clk);
if (ov2659->xvclk_frequency < 6000000 ||
diff --git a/drivers/media/i2c/ov2680.c b/drivers/media/i2c/ov2680.c
index 7237fb27ecd0..78e63bd1b35b 100644
--- a/drivers/media/i2c/ov2680.c
+++ b/drivers/media/i2c/ov2680.c
@@ -1079,7 +1079,6 @@ static int ov2680_parse_dt(struct ov2680_dev *sensor)
struct device *dev = sensor->dev;
struct fwnode_handle *ep_fwnode;
struct gpio_desc *gpio;
- unsigned int rate = 0;
int i, ret;
/*
@@ -1114,38 +1113,14 @@ static int ov2680_parse_dt(struct ov2680_dev *sensor)
sensor->pwdn_gpio = gpio;
- sensor->xvclk = devm_clk_get_optional(dev, "xvclk");
+ sensor->xvclk = devm_v4l2_sensor_clk_get(dev, "xvclk");
if (IS_ERR(sensor->xvclk)) {
ret = dev_err_probe(dev, PTR_ERR(sensor->xvclk),
"xvclk clock missing or invalid\n");
goto out_free_bus_cfg;
}
- /*
- * We could have either a 24MHz or 19.2MHz clock rate from either DT or
- * ACPI... but we also need to support the weird IPU3 case which will
- * have an external clock AND a clock-frequency property. Check for the
- * clock-frequency property and if found, set that rate if we managed
- * to acquire a clock. This should cover the ACPI case. If the system
- * uses devicetree then the configured rate should already be set, so
- * we can just read it.
- */
- ret = fwnode_property_read_u32(dev_fwnode(dev), "clock-frequency",
- &rate);
- if (ret && !sensor->xvclk) {
- dev_err_probe(dev, ret, "invalid clock config\n");
- goto out_free_bus_cfg;
- }
-
- if (!ret && sensor->xvclk) {
- ret = clk_set_rate(sensor->xvclk, rate);
- if (ret) {
- dev_err_probe(dev, ret, "failed to set clock rate\n");
- goto out_free_bus_cfg;
- }
- }
-
- sensor->xvclk_freq = rate ?: clk_get_rate(sensor->xvclk);
+ sensor->xvclk_freq = clk_get_rate(sensor->xvclk);
for (i = 0; i < ARRAY_SIZE(ov2680_xvclk_freqs); i++) {
if (sensor->xvclk_freq == ov2680_xvclk_freqs[i])
diff --git a/drivers/media/i2c/ov2685.c b/drivers/media/i2c/ov2685.c
index 9b8481b8dcd4..4911a4eea126 100644
--- a/drivers/media/i2c/ov2685.c
+++ b/drivers/media/i2c/ov2685.c
@@ -783,16 +783,12 @@ static int ov2685_probe(struct i2c_client *client)
ov2685->client = client;
ov2685->cur_mode = &supported_modes[0];
- ov2685->xvclk = devm_clk_get(dev, "xvclk");
- if (IS_ERR(ov2685->xvclk)) {
- dev_err(dev, "Failed to get xvclk\n");
- return -EINVAL;
- }
- ret = clk_set_rate(ov2685->xvclk, OV2685_XVCLK_FREQ);
- if (ret < 0) {
- dev_err(dev, "Failed to set xvclk rate (24MHz)\n");
- return ret;
- }
+ ov2685->xvclk = devm_v4l2_sensor_clk_get_legacy(dev, "xvclk", true,
+ OV2685_XVCLK_FREQ);
+ if (IS_ERR(ov2685->xvclk))
+ return dev_err_probe(dev, PTR_ERR(ov2685->xvclk),
+ "Failed to get xvclk\n");
+
if (clk_get_rate(ov2685->xvclk) != OV2685_XVCLK_FREQ)
dev_warn(dev, "xvclk mismatched, modes are based on 24MHz\n");
diff --git a/drivers/media/i2c/ov2735.c b/drivers/media/i2c/ov2735.c
new file mode 100644
index 000000000000..b96600204141
--- /dev/null
+++ b/drivers/media/i2c/ov2735.c
@@ -0,0 +1,1109 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * V4L2 Support for the OV2735
+ *
+ * Copyright (C) 2025 Silicon Signals Pvt. Ltd.
+ *
+ * Based on Rockchip ov2735 Camera Driver
+ * Copyright (C) 2017 Fuzhou Rockchip Electronics Co., Ltd.
+ *
+ * Inspired from ov8858, imx219, imx283 camera drivers.
+ */
+
+#include <linux/array_size.h>
+#include <linux/bitops.h>
+#include <linux/cleanup.h>
+#include <linux/clk.h>
+#include <linux/container_of.h>
+#include <linux/delay.h>
+#include <linux/device/devres.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pm_runtime.h>
+#include <linux/property.h>
+#include <linux/regulator/consumer.h>
+#include <linux/units.h>
+#include <linux/types.h>
+#include <linux/time.h>
+
+#include <media/v4l2-cci.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-mediabus.h>
+
+#define OV2735_XCLK_FREQ (24 * HZ_PER_MHZ)
+
+/* Add page number in CCI private bits [31:28] of the register address */
+#define OV2735_PAGE_REG8(p, x) (((p) << CCI_REG_PRIVATE_SHIFT) | CCI_REG8(x))
+#define OV2735_PAGE_REG16(p, x) (((p) << CCI_REG_PRIVATE_SHIFT) | CCI_REG16(x))
+
+#define OV2735_REG_PAGE_SELECT CCI_REG8(0xfd)
+
+/* Page 0 */
+#define OV2735_REG_CHIPID OV2735_PAGE_REG16(0x00, 0x02)
+#define OV2735_CHIPID 0x2735
+
+#define OV2735_REG_SOFT_RESET OV2735_PAGE_REG8(0x00, 0x20)
+
+/* Clock Settings */
+#define OV2735_REG_PLL_CTRL OV2735_PAGE_REG8(0x00, 0x2f)
+#define OV2735_PLL_CTRL_ENABLE 0x7f
+#define OV2735_REG_PLL_OUTDIV OV2735_PAGE_REG8(0x00, 0x34)
+#define OV2735_REG_CLK_MODE OV2735_PAGE_REG8(0x00, 0x30)
+#define OV2735_REG_CLOCK_REG1 OV2735_PAGE_REG8(0x00, 0x33)
+#define OV2735_REG_CLOCK_REG2 OV2735_PAGE_REG8(0x00, 0x35)
+
+/* Page 1 */
+#define OV2735_REG_STREAM_CTRL OV2735_PAGE_REG8(0x01, 0xa0)
+#define OV2735_STREAM_CTRL_ON 0x01
+#define OV2735_STREAM_CTRL_OFF 0x00
+
+#define OV2735_REG_UPDOWN_MIRROR OV2735_PAGE_REG8(0x01, 0x3f)
+#define OV2735_REG_BINNING_DAC_CODE_MODE OV2735_PAGE_REG8(0x01, 0x30)
+#define OV2735_REG_FRAME_LENGTH OV2735_PAGE_REG16(0x01, 0x0e)
+#define OV2735_FRAME_LENGTH_MAX 0x0fff
+#define OV2735_REG_FRAME_EXP_SEPERATE_EN OV2735_PAGE_REG8(0x01, 0x0d)
+#define OV2735_FRAME_EXP_SEPERATE_EN 0x10
+#define OV2735_REG_FRAME_SYNC OV2735_PAGE_REG8(0x01, 0x01)
+
+#define OV2735_REG_HBLANK OV2735_PAGE_REG16(0x01, 0x09)
+
+#define OV2735_REG_HS_MIPI OV2735_PAGE_REG8(0x01, 0xb1)
+#define OV2735_REG_MIPI_CTRL1 OV2735_PAGE_REG8(0x01, 0x92)
+#define OV2735_REG_MIPI_CTRL2 OV2735_PAGE_REG8(0x01, 0x94)
+#define OV2735_REG_MIPI_CTRL3 OV2735_PAGE_REG8(0x01, 0xa1)
+#define OV2735_REG_MIPI_CTRL4 OV2735_PAGE_REG8(0x01, 0xb2)
+#define OV2735_REG_MIPI_CTRL5 OV2735_PAGE_REG8(0x01, 0xb3)
+#define OV2735_REG_MIPI_CTRL6 OV2735_PAGE_REG8(0x01, 0xb4)
+#define OV2735_REG_MIPI_CTRL7 OV2735_PAGE_REG8(0x01, 0xb5)
+#define OV2735_REG_HIGH_SPEED OV2735_PAGE_REG8(0x01, 0x9d)
+#define OV2735_REG_PREPARE OV2735_PAGE_REG8(0x01, 0x95)
+#define OV2735_REG_R_HS_ZERO OV2735_PAGE_REG8(0x01, 0x96)
+#define OV2735_REG_TRAIL OV2735_PAGE_REG8(0x01, 0x98)
+#define OV2735_REG_R_CLK_ZERO OV2735_PAGE_REG8(0x01, 0x9c)
+#define OV2735_REG_MIPI_COLOMN_NUMBER OV2735_PAGE_REG16(0x01, 0x8e)
+#define OV2735_REG_MIPI_LINE_NUMBER OV2735_PAGE_REG16(0x01, 0x90)
+
+/* Timing control registers */
+#define OV2735_REG_TIMING_CTRL2 OV2735_PAGE_REG8(0x01, 0x1a)
+#define OV2735_REG_TIMING_CTRL3 OV2735_PAGE_REG8(0x01, 0x1c)
+#define OV2735_REG_TIMING_CTRL1 OV2735_PAGE_REG8(0x01, 0x16)
+#define OV2735_REG_RST_NUM OV2735_PAGE_REG16(0x01, 0x10)
+#define OV2735_REG_RST_NUM2 OV2735_PAGE_REG16(0x01, 0x32)
+#define OV2735_REG_BOOST_EN OV2735_PAGE_REG8(0x01, 0xd0)
+#define OV2735_REG_B2_NUM OV2735_PAGE_REG16(0x01, 0xd1)
+#define OV2735_REG_B4_NUM OV2735_PAGE_REG16(0x01, 0xd3)
+#define OV2735_REG_PIXEL_CYCLE_P0 OV2735_PAGE_REG8(0x01, 0x50)
+#define OV2735_REG_PIXEL_CYCLE_P1 OV2735_PAGE_REG8(0x01, 0x51)
+#define OV2735_REG_PIXEL_CYCLE_P2 OV2735_PAGE_REG8(0x01, 0x52)
+#define OV2735_REG_PIXEL_CYCLE_P3 OV2735_PAGE_REG8(0x01, 0x53)
+#define OV2735_REG_PIXEL_CYCLE_P5 OV2735_PAGE_REG8(0x01, 0x55)
+#define OV2735_REG_PIXEL_CYCLE_P7 OV2735_PAGE_REG16(0x01, 0x57)
+#define OV2735_REG_PIXEL_CYCLE_P9 OV2735_PAGE_REG8(0x01, 0x5a)
+#define OV2735_REG_PIXEL_CYCLE_P10 OV2735_PAGE_REG8(0x01, 0x5b)
+#define OV2735_REG_PIXEL_CYCLE_P12 OV2735_PAGE_REG8(0x01, 0x5d)
+#define OV2735_REG_PIXEL_CYCLE_P18 OV2735_PAGE_REG8(0x01, 0x64)
+#define OV2735_REG_PIXEL_CYCLE_P20 OV2735_PAGE_REG8(0x01, 0x66)
+#define OV2735_REG_PIXEL_CYCLE_P22 OV2735_PAGE_REG8(0x01, 0x68)
+#define OV2735_REG_PIXEL_CYCLE_P33 OV2735_PAGE_REG16(0x01, 0x74)
+#define OV2735_REG_PIXEL_CYCLE_P34 OV2735_PAGE_REG8(0x01, 0x76)
+#define OV2735_REG_PIXEL_CYCLE_P35_P36 OV2735_PAGE_REG8(0x01, 0x77)
+#define OV2735_REG_PIXEL_CYCLE_P37_P38 OV2735_PAGE_REG8(0x01, 0x78)
+#define OV2735_REG_PIXEL_CYCLE_P31 OV2735_PAGE_REG8(0x01, 0x72)
+#define OV2735_REG_PIXEL_CYCLE_P32 OV2735_PAGE_REG8(0x01, 0x73)
+#define OV2735_REG_PIXEL_CYCLE_P44 OV2735_PAGE_REG8(0x01, 0x7d)
+#define OV2735_REG_PIXEL_CYCLE_P45 OV2735_PAGE_REG8(0x01, 0x7e)
+#define OV2735_REG_PIXEL_BIAS_CTRL_RH_RL OV2735_PAGE_REG8(0x01, 0x8a)
+#define OV2735_REG_PIXEL_BIAS_CTRL_SH_SL OV2735_PAGE_REG8(0x01, 0x8b)
+
+/* Analog Control registers */
+#define OV2735_REG_ICOMP OV2735_PAGE_REG8(0x01, 0x19)
+#define OV2735_REG_PCP_RST_SEL OV2735_PAGE_REG8(0x01, 0x21)
+#define OV2735_REG_VNCP OV2735_PAGE_REG8(0x01, 0x20)
+#define OV2735_REG_ANALOG_CTRL3 OV2735_PAGE_REG8(0x01, 0x25)
+#define OV2735_REG_ANALOG_CTRL4 OV2735_PAGE_REG8(0x01, 0x26)
+#define OV2735_REG_ANALOG_CTRL5 OV2735_PAGE_REG8(0x01, 0x29)
+#define OV2735_REG_ANALOG_CTRL6 OV2735_PAGE_REG8(0x01, 0x2a)
+#define OV2735_REG_ANALOG_CTRL8 OV2735_PAGE_REG8(0x01, 0x2c)
+
+/* BLC registers */
+#define OV2735_REG_BLC_GAIN_BLUE OV2735_PAGE_REG8(0x01, 0x86)
+#define OV2735_REG_BLC_GAIN_RED OV2735_PAGE_REG8(0x01, 0x87)
+#define OV2735_REG_BLC_GAIN_GR OV2735_PAGE_REG8(0x01, 0x88)
+#define OV2735_REG_BLC_GAIN_GB OV2735_PAGE_REG8(0x01, 0x89)
+#define OV2735_REG_GB_SUBOFFSET OV2735_PAGE_REG8(0x01, 0xf0)
+#define OV2735_REG_BLUE_SUBOFFSET OV2735_PAGE_REG8(0x01, 0xf1)
+#define OV2735_REG_RED_SUBOFFSET OV2735_PAGE_REG8(0x01, 0xf2)
+#define OV2735_REG_GR_SUBOFFSET OV2735_PAGE_REG8(0x01, 0xf3)
+#define OV2735_REG_BLC_BPC_TH_P OV2735_PAGE_REG8(0x01, 0xfc)
+#define OV2735_REG_BLC_BPC_TH_N OV2735_PAGE_REG8(0x01, 0xfe)
+#define OV2735_REG_ABL OV2735_PAGE_REG8(0x01, 0xfb)
+
+#define OV2735_REG_TEST_PATTERN OV2735_PAGE_REG8(0x01, 0xb2)
+#define OV2735_TEST_PATTERN_ENABLE 0x01
+#define OV2735_TEST_PATTERN_DISABLE 0xfe
+
+#define OV2735_REG_LONG_EXPOSURE OV2735_PAGE_REG16(0x01, 0x03)
+#define OV2735_EXPOSURE_MIN 4
+#define OV2735_EXPOSURE_STEP 1
+#define OV2735_EXPOSURE_MARGIN 4
+
+#define OV2735_REG_ANALOG_GAIN OV2735_PAGE_REG8(0x01, 0x24)
+#define OV2735_ANALOG_GAIN_MIN 0x10
+#define OV2735_ANALOG_GAIN_MAX 0xff
+#define OV2735_ANALOG_GAIN_STEP 1
+#define OV2735_ANALOG_GAIN_DEFAULT 0x10
+
+/* Page 2 */
+#define OV2735_REG_V_START OV2735_PAGE_REG16(0x02, 0xa0)
+#define OV2735_REG_V_SIZE OV2735_PAGE_REG16(0x02, 0xa2)
+#define OV2735_REG_H_START OV2735_PAGE_REG16(0x02, 0xa4)
+#define OV2735_REG_H_SIZE OV2735_PAGE_REG16(0x02, 0xa6)
+
+#define OV2735_LINK_FREQ_420MHZ (420 * HZ_PER_MHZ)
+#define OV2735_PIXEL_RATE (168 * HZ_PER_MHZ)
+
+/* OV2735 native and active pixel array size */
+static const struct v4l2_rect ov2735_native_area = {
+ .top = 0,
+ .left = 0,
+ .width = 1936,
+ .height = 1096,
+};
+
+static const struct v4l2_rect ov2735_active_area = {
+ .top = 8,
+ .left = 8,
+ .width = 1920,
+ .height = 1080,
+};
+
+static const char * const ov2735_supply_name[] = {
+ "avdd", /* Analog power */
+ "dovdd", /* Digital I/O power */
+ "dvdd", /* Digital core power */
+};
+
+/* PLL_OUT = [PLL_IN * (pll_nc +3)] / [(pll_mc + 1) * (pll_outdiv + 1)] */
+struct ov2735_pll_parameters {
+ u8 pll_nc;
+ u8 pll_mc;
+ u8 pll_outdiv;
+};
+
+struct ov2735 {
+ struct device *dev;
+ struct regmap *cci;
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct clk *xclk;
+ struct gpio_desc *reset_gpio;
+ struct gpio_desc *enable_gpio;
+ struct regulator_bulk_data supplies[ARRAY_SIZE(ov2735_supply_name)];
+
+ /* V4L2 Controls */
+ struct v4l2_ctrl_handler handler;
+ struct v4l2_ctrl *link_freq;
+ struct v4l2_ctrl *pixel_rate;
+ struct v4l2_ctrl *hblank;
+ struct v4l2_ctrl *vblank;
+ struct v4l2_ctrl *gain;
+ struct v4l2_ctrl *exposure;
+ struct v4l2_ctrl *test_pattern;
+
+ u32 link_freq_index;
+
+ u8 current_page;
+ struct mutex page_lock;
+};
+
+struct ov2735_mode {
+ u32 width;
+ u32 height;
+ u32 hts_def;
+ u32 vts_def;
+ u32 exp_def;
+ struct v4l2_rect crop;
+};
+
+static const struct cci_reg_sequence ov2735_common_regs[] = {
+ { OV2735_REG_CLK_MODE, 0x15 },
+ { OV2735_REG_CLOCK_REG1, 0x01 },
+ { OV2735_REG_CLOCK_REG2, 0x20 },
+ { OV2735_REG_BINNING_DAC_CODE_MODE, 0x00 },
+ { OV2735_REG_ABL, 0x73 },
+ { OV2735_REG_FRAME_SYNC, 0x01 },
+
+ /* Timing ctrl */
+ { OV2735_REG_TIMING_CTRL2, 0x6b },
+ { OV2735_REG_TIMING_CTRL3, 0xea },
+ { OV2735_REG_TIMING_CTRL1, 0x0c },
+ { OV2735_REG_RST_NUM, 0x0063 },
+ { OV2735_REG_RST_NUM2, 0x006f },
+ { OV2735_REG_BOOST_EN, 0x02 },
+ { OV2735_REG_B2_NUM, 0x0120 },
+ { OV2735_REG_B4_NUM, 0x042a },
+ { OV2735_REG_PIXEL_CYCLE_P0, 0x00 },
+ { OV2735_REG_PIXEL_CYCLE_P1, 0x2c },
+ { OV2735_REG_PIXEL_CYCLE_P2, 0x29 },
+ { OV2735_REG_PIXEL_CYCLE_P3, 0x00 },
+ { OV2735_REG_PIXEL_CYCLE_P5, 0x44 },
+ { OV2735_REG_PIXEL_CYCLE_P7, 0x0029 },
+ { OV2735_REG_PIXEL_CYCLE_P9, 0x00 },
+ { OV2735_REG_PIXEL_CYCLE_P10, 0x00 },
+ { OV2735_REG_PIXEL_CYCLE_P12, 0x00 },
+ { OV2735_REG_PIXEL_CYCLE_P18, 0x2f },
+ { OV2735_REG_PIXEL_CYCLE_P20, 0x62 },
+ { OV2735_REG_PIXEL_CYCLE_P22, 0x5b },
+ { OV2735_REG_PIXEL_CYCLE_P33, 0x0046 },
+ { OV2735_REG_PIXEL_CYCLE_P34, 0x36 },
+ { OV2735_REG_PIXEL_CYCLE_P35_P36, 0x4f },
+ { OV2735_REG_PIXEL_CYCLE_P37_P38, 0xef },
+ { OV2735_REG_PIXEL_CYCLE_P31, 0xcf },
+ { OV2735_REG_PIXEL_CYCLE_P32, 0x36 },
+ { OV2735_REG_PIXEL_CYCLE_P44, 0x0d },
+ { OV2735_REG_PIXEL_CYCLE_P45, 0x0d },
+ { OV2735_REG_PIXEL_BIAS_CTRL_RH_RL, 0x77 },
+ { OV2735_REG_PIXEL_BIAS_CTRL_SH_SL, 0x77 },
+
+ /* Analog ctrl */
+ { OV2735_REG_ANALOG_CTRL4, 0x5a },
+ { OV2735_REG_ANALOG_CTRL5, 0x01 },
+ { OV2735_REG_ANALOG_CTRL6, 0xd2 },
+ { OV2735_REG_ANALOG_CTRL8, 0x40 },
+ { OV2735_REG_PCP_RST_SEL, 0x00 },
+ { OV2735_REG_ICOMP, 0xc3 },
+
+ { OV2735_REG_HS_MIPI, 0x83 },
+ { OV2735_REG_MIPI_CTRL5, 0x0b },
+ { OV2735_REG_MIPI_CTRL6, 0x14 },
+ { OV2735_REG_HIGH_SPEED, 0x40 },
+ { OV2735_REG_MIPI_CTRL3, 0x05 },
+ { OV2735_REG_MIPI_CTRL2, 0x44 },
+ { OV2735_REG_PREPARE, 0x33 },
+ { OV2735_REG_R_HS_ZERO, 0x1f },
+ { OV2735_REG_TRAIL, 0x45 },
+ { OV2735_REG_R_CLK_ZERO, 0x10 },
+ { OV2735_REG_MIPI_CTRL7, 0x70 },
+ { OV2735_REG_ANALOG_CTRL3, 0xe0 },
+ { OV2735_REG_VNCP, 0x7b },
+
+ /* BLC */
+ { OV2735_REG_BLC_GAIN_BLUE, 0x77 },
+ { OV2735_REG_BLC_GAIN_GB, 0x77 },
+ { OV2735_REG_BLC_GAIN_RED, 0x74 },
+ { OV2735_REG_BLC_GAIN_GR, 0x74 },
+ { OV2735_REG_BLC_BPC_TH_P, 0xe0 },
+ { OV2735_REG_BLC_BPC_TH_N, 0xe0 },
+ { OV2735_REG_GB_SUBOFFSET, 0x40 },
+ { OV2735_REG_BLUE_SUBOFFSET, 0x40 },
+ { OV2735_REG_RED_SUBOFFSET, 0x40 },
+ { OV2735_REG_GR_SUBOFFSET, 0x40 },
+};
+
+static const struct ov2735_mode supported_modes[] = {
+ {
+ .width = 1920,
+ .height = 1080,
+ .exp_def = 399,
+ .hts_def = 2200,
+ .vts_def = 2545,
+ .crop = {
+ .top = 8,
+ .left = 8,
+ .width = 1920,
+ .height = 1080,
+ },
+ },
+};
+
+static const s64 link_freq_menu_items[] = {
+ OV2735_LINK_FREQ_420MHZ,
+};
+
+static const struct ov2735_pll_parameters pll_configs[] = {
+ /* For 420MHz pll_configs */
+ {
+ .pll_nc = 4,
+ .pll_mc = 0,
+ .pll_outdiv = 1,
+ },
+};
+
+static const char * const ov2735_test_pattern_menu[] = {
+ "Disabled",
+ "Vertical Color",
+};
+
+static int ov2735_page_access(struct ov2735 *ov2735, u32 reg, int *err)
+{
+ u8 page = reg >> CCI_REG_PRIVATE_SHIFT;
+ int ret = 0;
+
+ if (err && *err)
+ return *err;
+
+ guard(mutex)(&ov2735->page_lock);
+
+ /* Perform page access before read/write */
+ if (ov2735->current_page == page)
+ return ret;
+
+ ret = cci_write(ov2735->cci, OV2735_REG_PAGE_SELECT, page, err);
+ if (!ret)
+ ov2735->current_page = page;
+
+ return ret;
+}
+
+static int ov2735_read(struct ov2735 *ov2735, u32 reg, u64 *val, int *err)
+{
+ u32 addr = reg & ~CCI_REG_PRIVATE_MASK;
+ int ret;
+
+ ret = ov2735_page_access(ov2735, reg, err);
+ if (ret)
+ return ret;
+
+ return cci_read(ov2735->cci, addr, val, err);
+}
+
+static int ov2735_write(struct ov2735 *ov2735, u32 reg, u64 val, int *err)
+{
+ u32 addr = reg & ~CCI_REG_PRIVATE_MASK;
+ int ret;
+
+ ret = ov2735_page_access(ov2735, reg, err);
+ if (ret)
+ return ret;
+
+ return cci_write(ov2735->cci, addr, val, err);
+}
+
+static int ov2735_multi_reg_write(struct ov2735 *ov2735,
+ const struct cci_reg_sequence *regs,
+ unsigned int num_regs, int *err)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < num_regs; i++) {
+ ret = ov2735_write(ov2735, regs[i].reg, regs[i].val, err);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static inline struct ov2735 *to_ov2735(struct v4l2_subdev *_sd)
+{
+ return container_of_const(_sd, struct ov2735, sd);
+}
+
+static int ov2735_enable_test_pattern(struct ov2735 *ov2735, u32 pattern)
+{
+ int ret;
+ u64 val;
+
+ ret = ov2735_read(ov2735, OV2735_REG_TEST_PATTERN, &val, NULL);
+ if (ret)
+ return ret;
+
+ switch (pattern) {
+ case 0:
+ val &= ~OV2735_TEST_PATTERN_ENABLE;
+ break;
+ case 1:
+ val |= OV2735_TEST_PATTERN_ENABLE;
+ break;
+ }
+
+ return ov2735_write(ov2735, OV2735_REG_TEST_PATTERN, val, NULL);
+}
+
+static int ov2735_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct ov2735 *ov2735 =
+ container_of_const(ctrl->handler, struct ov2735, handler);
+ struct v4l2_mbus_framefmt *fmt;
+ struct v4l2_subdev_state *state;
+ u64 vts;
+ int ret = 0;
+
+ state = v4l2_subdev_get_locked_active_state(&ov2735->sd);
+ fmt = v4l2_subdev_state_get_format(state, 0);
+
+ if (ctrl->id == V4L2_CID_VBLANK) {
+ /* Honour the VBLANK limits when setting exposure */
+ s64 max = fmt->height + ctrl->val - OV2735_EXPOSURE_MARGIN;
+
+ ret = __v4l2_ctrl_modify_range(ov2735->exposure,
+ ov2735->exposure->minimum, max,
+ ov2735->exposure->step,
+ ov2735->exposure->default_value);
+ if (ret)
+ return ret;
+ }
+
+ if (pm_runtime_get_if_in_use(ov2735->dev) == 0)
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_EXPOSURE:
+ ov2735_write(ov2735, OV2735_REG_LONG_EXPOSURE, ctrl->val, &ret);
+ break;
+ case V4L2_CID_ANALOGUE_GAIN:
+ ov2735_write(ov2735, OV2735_REG_ANALOG_GAIN, ctrl->val, &ret);
+ break;
+ case V4L2_CID_HBLANK:
+ ov2735_write(ov2735, OV2735_REG_HBLANK, ctrl->val, &ret);
+ break;
+ case V4L2_CID_VBLANK:
+ vts = ctrl->val + fmt->height;
+ ov2735_write(ov2735, OV2735_REG_FRAME_EXP_SEPERATE_EN,
+ OV2735_FRAME_EXP_SEPERATE_EN, &ret);
+ ov2735_write(ov2735, OV2735_REG_FRAME_LENGTH, vts, &ret);
+ break;
+ case V4L2_CID_TEST_PATTERN:
+ ret = ov2735_enable_test_pattern(ov2735, ctrl->val);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ ov2735_write(ov2735, OV2735_REG_FRAME_SYNC, 0x01, &ret);
+
+ pm_runtime_put(ov2735->dev);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops ov2735_ctrl_ops = {
+ .s_ctrl = ov2735_set_ctrl,
+};
+
+static int ov2735_init_controls(struct ov2735 *ov2735)
+{
+ struct v4l2_ctrl_handler *ctrl_hdlr;
+ struct v4l2_fwnode_device_properties props;
+ const struct ov2735_mode *mode = &supported_modes[0];
+ u64 hblank_def, vblank_def, exp_max;
+ int ret;
+
+ ctrl_hdlr = &ov2735->handler;
+ v4l2_ctrl_handler_init(ctrl_hdlr, 9);
+
+ ov2735->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &ov2735_ctrl_ops,
+ V4L2_CID_PIXEL_RATE, 0,
+ OV2735_PIXEL_RATE, 1,
+ OV2735_PIXEL_RATE);
+
+ ov2735->link_freq = v4l2_ctrl_new_int_menu(ctrl_hdlr, &ov2735_ctrl_ops,
+ V4L2_CID_LINK_FREQ,
+ ov2735->link_freq_index,
+ 0, link_freq_menu_items);
+ if (ov2735->link_freq)
+ ov2735->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ hblank_def = mode->hts_def - mode->width;
+ ov2735->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov2735_ctrl_ops,
+ V4L2_CID_HBLANK, hblank_def,
+ hblank_def, 1, hblank_def);
+
+ vblank_def = mode->vts_def - mode->height;
+ ov2735->vblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov2735_ctrl_ops,
+ V4L2_CID_VBLANK, vblank_def,
+ OV2735_FRAME_LENGTH_MAX - mode->height,
+ 1, vblank_def);
+
+ exp_max = mode->vts_def - OV2735_EXPOSURE_MARGIN;
+ ov2735->exposure =
+ v4l2_ctrl_new_std(ctrl_hdlr, &ov2735_ctrl_ops,
+ V4L2_CID_EXPOSURE,
+ OV2735_EXPOSURE_MIN, exp_max,
+ OV2735_EXPOSURE_STEP, mode->exp_def);
+
+ ov2735->gain =
+ v4l2_ctrl_new_std(ctrl_hdlr, &ov2735_ctrl_ops,
+ V4L2_CID_ANALOGUE_GAIN, OV2735_ANALOG_GAIN_MIN,
+ OV2735_ANALOG_GAIN_MAX, OV2735_ANALOG_GAIN_STEP,
+ OV2735_ANALOG_GAIN_DEFAULT);
+
+ ov2735->test_pattern =
+ v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &ov2735_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(ov2735_test_pattern_menu) - 1,
+ 0, 0, ov2735_test_pattern_menu);
+
+ if (ctrl_hdlr->error) {
+ ret = ctrl_hdlr->error;
+ dev_err(ov2735->dev, "control init failed (%d)\n", ret);
+ goto err_handler_free;
+ }
+
+ ret = v4l2_fwnode_device_parse(ov2735->dev, &props);
+ if (ret)
+ goto err_handler_free;
+
+ ret = v4l2_ctrl_new_fwnode_properties(ctrl_hdlr,
+ &ov2735_ctrl_ops, &props);
+ if (ret)
+ goto err_handler_free;
+
+ ov2735->sd.ctrl_handler = ctrl_hdlr;
+
+ return 0;
+
+err_handler_free:
+ v4l2_ctrl_handler_free(ctrl_hdlr);
+
+ return ret;
+}
+
+static int ov2735_set_pll_ctrl(struct ov2735 *ov2735)
+{
+ const struct ov2735_pll_parameters *pll_parameters;
+ u8 pll_ctrl;
+ u8 pll_outdiv;
+ int ret = 0;
+
+ pll_parameters = &pll_configs[ov2735->link_freq_index];
+
+ /* BIT[7]: pll_clk_sel, BIT[6:2]: pll_nc, BIT[1:0]: pll_mc */
+ pll_ctrl = ((pll_parameters->pll_nc << 2) | (pll_parameters->pll_mc << 0)) &
+ OV2735_PLL_CTRL_ENABLE;
+
+ pll_outdiv = pll_parameters->pll_outdiv;
+
+ ov2735_write(ov2735, OV2735_REG_PLL_CTRL, pll_ctrl, &ret);
+ ov2735_write(ov2735, OV2735_REG_PLL_OUTDIV, pll_outdiv, &ret);
+
+ return ret;
+}
+
+static int ov2735_set_framefmt(struct ov2735 *ov2735,
+ struct v4l2_subdev_state *state)
+{
+ const struct v4l2_mbus_framefmt *format;
+ const struct v4l2_rect *crop;
+ int ret = 0;
+
+ format = v4l2_subdev_state_get_format(state, 0);
+ crop = v4l2_subdev_state_get_crop(state, 0);
+
+ ov2735_write(ov2735, OV2735_REG_V_START, crop->top, &ret);
+ ov2735_write(ov2735, OV2735_REG_V_SIZE, format->height, &ret);
+ ov2735_write(ov2735, OV2735_REG_MIPI_LINE_NUMBER, format->height, &ret);
+ ov2735_write(ov2735, OV2735_REG_H_START, crop->left, &ret);
+ /* OV2735_REG_H_SIZE: Image half horizontal size */
+ ov2735_write(ov2735, OV2735_REG_H_SIZE, (format->width / 2), &ret);
+ ov2735_write(ov2735, OV2735_REG_MIPI_COLOMN_NUMBER, format->width, &ret);
+
+ return ret;
+}
+
+static int ov2735_enable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state, u32 pad,
+ u64 streams_mask)
+{
+ struct ov2735 *ov2735 = to_ov2735(sd);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(ov2735->dev);
+ if (ret < 0)
+ return ret;
+
+ /* Apply pll settings */
+ ret = ov2735_set_pll_ctrl(ov2735);
+ if (ret) {
+ dev_err(ov2735->dev, "failed to set frame format: %d\n", ret);
+ goto err_rpm_put;
+ }
+
+ ret = ov2735_multi_reg_write(ov2735, ov2735_common_regs,
+ ARRAY_SIZE(ov2735_common_regs), NULL);
+ if (ret) {
+ dev_err(ov2735->dev, "failed to write common registers\n");
+ goto err_rpm_put;
+ }
+
+ /* Apply format settings */
+ ret = ov2735_set_framefmt(ov2735, state);
+ if (ret) {
+ dev_err(ov2735->dev, "failed to set frame format: %d\n", ret);
+ goto err_rpm_put;
+ }
+
+ /* Apply customized values from user */
+ ret = __v4l2_ctrl_handler_setup(ov2735->sd.ctrl_handler);
+ if (ret)
+ goto err_rpm_put;
+
+ ret = ov2735_write(ov2735, OV2735_REG_STREAM_CTRL,
+ OV2735_STREAM_CTRL_ON, NULL);
+ if (ret)
+ goto err_rpm_put;
+
+ return 0;
+
+err_rpm_put:
+ pm_runtime_put(ov2735->dev);
+ return ret;
+}
+
+static int ov2735_disable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state, u32 pad,
+ u64 streams_mask)
+{
+ struct ov2735 *ov2735 = to_ov2735(sd);
+ int ret;
+
+ ret = ov2735_write(ov2735, OV2735_REG_STREAM_CTRL,
+ OV2735_STREAM_CTRL_OFF, NULL);
+ if (ret)
+ dev_err(ov2735->dev, "%s failed to set stream\n", __func__);
+
+ pm_runtime_put(ov2735->dev);
+
+ return ret;
+}
+
+static int ov2735_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_selection *sel)
+{
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ sel->r = *v4l2_subdev_state_get_crop(sd_state, 0);
+ return 0;
+ case V4L2_SEL_TGT_NATIVE_SIZE:
+ sel->r = ov2735_native_area;
+ return 0;
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ sel->r = ov2735_active_area;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ov2735_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index)
+ return -EINVAL;
+
+ code->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+
+ return 0;
+}
+
+static int ov2735_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ if (fse->index >= ARRAY_SIZE(supported_modes))
+ return -EINVAL;
+
+ if (fse->code != MEDIA_BUS_FMT_SGRBG10_1X10)
+ return -EINVAL;
+
+ fse->min_width = supported_modes[fse->index].width;
+ fse->max_width = fse->min_width;
+ fse->min_height = supported_modes[fse->index].height;
+ fse->max_height = fse->min_height;
+
+ return 0;
+}
+
+static int ov2735_set_framing_limits(struct ov2735 *ov2735,
+ const struct ov2735_mode *mode)
+{
+ u32 hblank, vblank_def;
+ int ret;
+
+ hblank = mode->hts_def - mode->width;
+ ret = __v4l2_ctrl_modify_range(ov2735->hblank, hblank, hblank, 1,
+ hblank);
+ if (ret)
+ return ret;
+
+ vblank_def = mode->vts_def - mode->height;
+ return __v4l2_ctrl_modify_range(ov2735->vblank, vblank_def,
+ OV2735_FRAME_LENGTH_MAX - mode->height,
+ 1, vblank_def);
+}
+
+static int ov2735_set_pad_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct v4l2_mbus_framefmt *format;
+ const struct ov2735_mode *mode;
+ struct v4l2_rect *crop;
+ struct ov2735 *ov2735 = to_ov2735(sd);
+ int ret;
+
+ format = v4l2_subdev_state_get_format(sd_state, 0);
+
+ mode = v4l2_find_nearest_size(supported_modes,
+ ARRAY_SIZE(supported_modes),
+ width, height,
+ fmt->format.width, fmt->format.height);
+
+ fmt->format.width = mode->width;
+ fmt->format.height = mode->height;
+ fmt->format.field = V4L2_FIELD_NONE;
+ fmt->format.colorspace = V4L2_COLORSPACE_RAW;
+ fmt->format.quantization = V4L2_QUANTIZATION_FULL_RANGE;
+ fmt->format.xfer_func = V4L2_XFER_FUNC_NONE;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ ret = ov2735_set_framing_limits(ov2735, mode);
+ if (ret)
+ return ret;
+ }
+
+ *format = fmt->format;
+
+ /* Initialize crop rectangle */
+ crop = v4l2_subdev_state_get_crop(sd_state, 0);
+ *crop = mode->crop;
+
+ return 0;
+}
+
+static int ov2735_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state)
+{
+ struct v4l2_subdev_format fmt = {
+ .which = V4L2_SUBDEV_FORMAT_TRY,
+ .format = {
+ .code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .width = supported_modes[0].width,
+ .height = supported_modes[0].height,
+ },
+ };
+
+ ov2735_set_pad_format(sd, state, &fmt);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops ov2735_video_ops = {
+ .s_stream = v4l2_subdev_s_stream_helper,
+};
+
+static const struct v4l2_subdev_pad_ops ov2735_pad_ops = {
+ .enum_mbus_code = ov2735_enum_mbus_code,
+ .get_fmt = v4l2_subdev_get_fmt,
+ .set_fmt = ov2735_set_pad_format,
+ .get_selection = ov2735_get_selection,
+ .enum_frame_size = ov2735_enum_frame_size,
+ .enable_streams = ov2735_enable_streams,
+ .disable_streams = ov2735_disable_streams,
+};
+
+static const struct v4l2_subdev_ops ov2735_subdev_ops = {
+ .video = &ov2735_video_ops,
+ .pad = &ov2735_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops ov2735_internal_ops = {
+ .init_state = ov2735_init_state,
+};
+
+static int ov2735_power_on(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct ov2735 *ov2735 = to_ov2735(sd);
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ov2735_supply_name),
+ ov2735->supplies);
+ if (ret) {
+ dev_err(ov2735->dev, "failed to enable regulators\n");
+ return ret;
+ }
+
+ gpiod_set_value_cansleep(ov2735->enable_gpio, 1);
+ /* T4: delay from PWDN pulling low to RSTB pulling high */
+ fsleep(4 * USEC_PER_MSEC);
+
+ ret = clk_prepare_enable(ov2735->xclk);
+ if (ret) {
+ dev_err(ov2735->dev, "failed to enable clock\n");
+ goto err_regulator_off;
+ }
+
+ gpiod_set_value_cansleep(ov2735->reset_gpio, 0);
+ /* T5: delay from RSTB pulling high to first I2C command */
+ fsleep(5 * USEC_PER_MSEC);
+
+ return 0;
+
+err_regulator_off:
+ regulator_bulk_disable(ARRAY_SIZE(ov2735_supply_name), ov2735->supplies);
+ return ret;
+}
+
+static int ov2735_power_off(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct ov2735 *ov2735 = to_ov2735(sd);
+
+ gpiod_set_value_cansleep(ov2735->enable_gpio, 0);
+ clk_disable_unprepare(ov2735->xclk);
+ gpiod_set_value_cansleep(ov2735->reset_gpio, 1);
+ regulator_bulk_disable(ARRAY_SIZE(ov2735_supply_name), ov2735->supplies);
+
+ return 0;
+}
+
+static int ov2735_get_regulators(struct ov2735 *ov2735)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(ov2735_supply_name); i++)
+ ov2735->supplies[i].supply = ov2735_supply_name[i];
+
+ return devm_regulator_bulk_get(ov2735->dev,
+ ARRAY_SIZE(ov2735_supply_name),
+ ov2735->supplies);
+}
+
+static int ov2735_identify_module(struct ov2735 *ov2735)
+{
+ u64 chip_id;
+ int ret;
+
+ ret = ov2735_read(ov2735, OV2735_REG_CHIPID, &chip_id, NULL);
+ if (ret)
+ return dev_err_probe(ov2735->dev, ret,
+ "failed to read chip id %x\n",
+ OV2735_CHIPID);
+
+ if (chip_id != OV2735_CHIPID)
+ return dev_err_probe(ov2735->dev, -EIO,
+ "chip id mismatch: %x!=%llx\n",
+ OV2735_CHIPID, chip_id);
+
+ return 0;
+}
+
+static int ov2735_parse_endpoint(struct ov2735 *ov2735)
+{
+ struct v4l2_fwnode_endpoint bus_cfg = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY,
+ };
+ struct fwnode_handle *ep;
+ unsigned long link_freq_bitmap;
+ int ret;
+
+ ep = fwnode_graph_get_next_endpoint(dev_fwnode(ov2735->dev), NULL);
+ if (!ep)
+ return dev_err_probe(ov2735->dev, -ENXIO,
+ "Failed to get next endpoint\n");
+
+ ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg);
+ fwnode_handle_put(ep);
+ if (ret)
+ return ret;
+
+ if (bus_cfg.bus.mipi_csi2.num_data_lanes != 2) {
+ ret = dev_err_probe(ov2735->dev, -EINVAL,
+ "only 2 data lanes are supported\n");
+ goto error_out;
+ }
+
+ ret = v4l2_link_freq_to_bitmap(ov2735->dev, bus_cfg.link_frequencies,
+ bus_cfg.nr_of_link_frequencies,
+ link_freq_menu_items,
+ ARRAY_SIZE(link_freq_menu_items),
+ &link_freq_bitmap);
+ if (ret) {
+ ret = dev_err_probe(ov2735->dev, -EINVAL,
+ "only 420MHz frequency is available\n");
+ goto error_out;
+ }
+
+ ov2735->link_freq_index = __ffs(link_freq_bitmap);
+
+error_out:
+ v4l2_fwnode_endpoint_free(&bus_cfg);
+
+ return ret;
+};
+
+static int ov2735_probe(struct i2c_client *client)
+{
+ struct ov2735 *ov2735;
+ unsigned int xclk_freq;
+ int ret;
+
+ ov2735 = devm_kzalloc(&client->dev, sizeof(*ov2735), GFP_KERNEL);
+ if (!ov2735)
+ return -ENOMEM;
+
+ ov2735->dev = &client->dev;
+
+ v4l2_i2c_subdev_init(&ov2735->sd, client, &ov2735_subdev_ops);
+ ov2735->sd.internal_ops = &ov2735_internal_ops;
+
+ ov2735->cci = devm_cci_regmap_init_i2c(client, 8);
+ if (IS_ERR(ov2735->cci))
+ return dev_err_probe(ov2735->dev, PTR_ERR(ov2735->cci),
+ "failed to initialize CCI\n");
+
+ /* Set Current page to 0 */
+ ov2735->current_page = 0;
+
+ ret = devm_mutex_init(ov2735->dev, &ov2735->page_lock);
+ if (ret)
+ return dev_err_probe(ov2735->dev, ret,
+ "Failed to initialize lock\n");
+
+ /* Get system clock (xvclk) */
+ ov2735->xclk = devm_v4l2_sensor_clk_get(ov2735->dev, NULL);
+ if (IS_ERR(ov2735->xclk))
+ return dev_err_probe(ov2735->dev, PTR_ERR(ov2735->xclk),
+ "failed to get xclk\n");
+
+ xclk_freq = clk_get_rate(ov2735->xclk);
+ if (xclk_freq != OV2735_XCLK_FREQ)
+ return dev_err_probe(ov2735->dev, -EINVAL,
+ "xclk frequency not supported: %u Hz\n",
+ xclk_freq);
+
+ ret = ov2735_get_regulators(ov2735);
+ if (ret)
+ return dev_err_probe(ov2735->dev, ret,
+ "failed to get regulators\n");
+
+ ret = ov2735_parse_endpoint(ov2735);
+ if (ret)
+ return dev_err_probe(ov2735->dev, ret,
+ "failed to parse endpoint configuration\n");
+
+ ov2735->reset_gpio = devm_gpiod_get_optional(ov2735->dev,
+ "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ov2735->reset_gpio))
+ return dev_err_probe(ov2735->dev, PTR_ERR(ov2735->reset_gpio),
+ "failed to get reset GPIO\n");
+
+ ov2735->enable_gpio = devm_gpiod_get_optional(ov2735->dev,
+ "enable", GPIOD_OUT_LOW);
+ if (IS_ERR(ov2735->enable_gpio))
+ return dev_err_probe(ov2735->dev, PTR_ERR(ov2735->enable_gpio),
+ "failed to get enable GPIO\n");
+
+ ret = ov2735_power_on(ov2735->dev);
+ if (ret)
+ return ret;
+
+ ret = ov2735_identify_module(ov2735);
+ if (ret)
+ goto error_power_off;
+
+ ret = ov2735_init_controls(ov2735);
+ if (ret)
+ goto error_power_off;
+
+ /* Initialize subdev */
+ ov2735->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ ov2735->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ ov2735->pad.flags = MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_pads_init(&ov2735->sd.entity, 1, &ov2735->pad);
+ if (ret) {
+ dev_err_probe(ov2735->dev, ret, "failed to init entity pads\n");
+ goto error_handler_free;
+ }
+
+ ov2735->sd.state_lock = ov2735->handler.lock;
+ ret = v4l2_subdev_init_finalize(&ov2735->sd);
+ if (ret) {
+ dev_err_probe(ov2735->dev, ret, "subdev init error\n");
+ goto error_media_entity;
+ }
+
+ ret = devm_pm_runtime_get_noresume(ov2735->dev);
+ if (ret) {
+ dev_err_probe(ov2735->dev, ret,
+ "failed to get runtime PM noresume\n");
+ goto error_subdev_cleanup;
+ }
+
+ ret = devm_pm_runtime_set_active_enabled(ov2735->dev);
+ if (ret) {
+ dev_err_probe(ov2735->dev, ret,
+ "failed to set runtime PM active+enabled\n");
+ goto error_subdev_cleanup;
+ }
+
+ ret = v4l2_async_register_subdev_sensor(&ov2735->sd);
+ if (ret) {
+ dev_err_probe(ov2735->dev, ret,
+ "failed to register ov2735 sub-device\n");
+ goto error_subdev_cleanup;
+ }
+
+ return 0;
+
+error_subdev_cleanup:
+ v4l2_subdev_cleanup(&ov2735->sd);
+
+error_media_entity:
+ media_entity_cleanup(&ov2735->sd.entity);
+
+error_handler_free:
+ v4l2_ctrl_handler_free(ov2735->sd.ctrl_handler);
+
+error_power_off:
+ ov2735_power_off(ov2735->dev);
+
+ return ret;
+}
+
+static void ov2735_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov2735 *ov2735 = to_ov2735(sd);
+
+ v4l2_async_unregister_subdev(sd);
+ v4l2_subdev_cleanup(&ov2735->sd);
+ media_entity_cleanup(&sd->entity);
+ v4l2_ctrl_handler_free(ov2735->sd.ctrl_handler);
+}
+
+static DEFINE_RUNTIME_DEV_PM_OPS(ov2735_pm_ops,
+ ov2735_power_off, ov2735_power_on, NULL);
+
+static const struct of_device_id ov2735_id[] = {
+ { .compatible = "ovti,ov2735" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ov2735_id);
+
+static struct i2c_driver ov2735_driver = {
+ .driver = {
+ .name = "ov2735",
+ .pm = pm_ptr(&ov2735_pm_ops),
+ .of_match_table = ov2735_id,
+ },
+ .probe = ov2735_probe,
+ .remove = ov2735_remove,
+};
+module_i2c_driver(ov2735_driver);
+
+MODULE_DESCRIPTION("OV2735 Camera Sensor Driver");
+MODULE_AUTHOR("Hardevsinh Palaniya <hardevsinh.palaniya@siliconsignals.io>");
+MODULE_AUTHOR("Himanshu Bhavani <himanshu.bhavani@siliconsignals.io>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/i2c/ov2740.c b/drivers/media/i2c/ov2740.c
index 4e959534e6e7..fb590dfadda1 100644
--- a/drivers/media/i2c/ov2740.c
+++ b/drivers/media/i2c/ov2740.c
@@ -1,17 +1,17 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Intel Corporation.
-#include <linux/unaligned.h>
#include <linux/acpi.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/module.h>
-#include <linux/pm_runtime.h>
#include <linux/nvmem-provider.h>
+#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
+#include <linux/unaligned.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fwnode.h>
@@ -519,6 +519,8 @@ static const struct ov2740_mode supported_modes_180mhz[] = {
};
struct ov2740 {
+ struct device *dev;
+
struct v4l2_subdev sd;
struct media_pad pad;
struct v4l2_ctrl_handler ctrl_handler;
@@ -616,7 +618,6 @@ static int ov2740_write_reg(struct ov2740 *ov2740, u16 reg, u16 len, u32 val)
static int ov2740_write_reg_list(struct ov2740 *ov2740,
const struct ov2740_reg_list *r_list)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov2740->sd);
unsigned int i;
int ret;
@@ -624,7 +625,7 @@ static int ov2740_write_reg_list(struct ov2740 *ov2740,
ret = ov2740_write_reg(ov2740, r_list->regs[i].address, 1,
r_list->regs[i].val);
if (ret) {
- dev_err_ratelimited(&client->dev,
+ dev_err_ratelimited(ov2740->dev,
"write reg 0x%4.4x return err = %d\n",
r_list->regs[i].address, ret);
return ret;
@@ -636,7 +637,6 @@ static int ov2740_write_reg_list(struct ov2740 *ov2740,
static int ov2740_identify_module(struct ov2740 *ov2740)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov2740->sd);
int ret;
u32 val;
@@ -648,12 +648,12 @@ static int ov2740_identify_module(struct ov2740 *ov2740)
return ret;
if (val != OV2740_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x != %x\n",
+ dev_err(ov2740->dev, "chip id mismatch: %x != %x\n",
OV2740_CHIP_ID, val);
return -ENXIO;
}
- dev_dbg(&client->dev, "chip id: 0x%x\n", val);
+ dev_dbg(ov2740->dev, "chip id: 0x%x\n", val);
ov2740->identified = true;
@@ -704,7 +704,6 @@ static int ov2740_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct ov2740 *ov2740 = container_of(ctrl->handler,
struct ov2740, ctrl_handler);
- struct i2c_client *client = v4l2_get_subdevdata(&ov2740->sd);
s64 exposure_max;
int ret;
@@ -720,7 +719,7 @@ static int ov2740_set_ctrl(struct v4l2_ctrl *ctrl)
}
/* V4L2 controls values will be applied only when power is already up */
- if (!pm_runtime_get_if_in_use(&client->dev))
+ if (!pm_runtime_get_if_in_use(ov2740->dev))
return 0;
switch (ctrl->id) {
@@ -753,7 +752,7 @@ static int ov2740_set_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov2740->dev);
return ret;
}
@@ -764,7 +763,6 @@ static const struct v4l2_ctrl_ops ov2740_ctrl_ops = {
static int ov2740_init_controls(struct ov2740 *ov2740)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov2740->sd);
struct v4l2_ctrl_handler *ctrl_hdlr;
s64 exposure_max, h_blank, pixel_rate;
u32 vblank_min, vblank_max, vblank_default;
@@ -821,7 +819,7 @@ static int ov2740_init_controls(struct ov2740 *ov2740)
ARRAY_SIZE(ov2740_test_pattern_menu) - 1,
0, 0, ov2740_test_pattern_menu);
- ret = v4l2_fwnode_device_parse(&client->dev, &props);
+ ret = v4l2_fwnode_device_parse(ov2740->dev, &props);
if (ret) {
v4l2_ctrl_handler_free(ctrl_hdlr);
return ret;
@@ -940,7 +938,6 @@ err:
static int ov2740_start_streaming(struct ov2740 *ov2740)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov2740->sd);
const struct ov2740_reg_list *reg_list;
int link_freq_index;
int ret;
@@ -955,7 +952,7 @@ static int ov2740_start_streaming(struct ov2740 *ov2740)
/* Reset the sensor */
ret = ov2740_write_reg(ov2740, 0x0103, 1, 0x01);
if (ret) {
- dev_err(&client->dev, "failed to reset\n");
+ dev_err(ov2740->dev, "failed to reset\n");
return ret;
}
@@ -965,14 +962,14 @@ static int ov2740_start_streaming(struct ov2740 *ov2740)
reg_list = &link_freq_configs[link_freq_index].reg_list;
ret = ov2740_write_reg_list(ov2740, reg_list);
if (ret) {
- dev_err(&client->dev, "failed to set plls\n");
+ dev_err(ov2740->dev, "failed to set plls\n");
return ret;
}
reg_list = &ov2740->cur_mode->reg_list;
ret = ov2740_write_reg_list(ov2740, reg_list);
if (ret) {
- dev_err(&client->dev, "failed to set mode\n");
+ dev_err(ov2740->dev, "failed to set mode\n");
return ret;
}
@@ -983,31 +980,28 @@ static int ov2740_start_streaming(struct ov2740 *ov2740)
ret = ov2740_write_reg(ov2740, OV2740_REG_MODE_SELECT, 1,
OV2740_MODE_STREAMING);
if (ret)
- dev_err(&client->dev, "failed to start streaming\n");
+ dev_err(ov2740->dev, "failed to start streaming\n");
return ret;
}
static void ov2740_stop_streaming(struct ov2740 *ov2740)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov2740->sd);
-
if (ov2740_write_reg(ov2740, OV2740_REG_MODE_SELECT, 1,
OV2740_MODE_STANDBY))
- dev_err(&client->dev, "failed to stop streaming\n");
+ dev_err(ov2740->dev, "failed to stop streaming\n");
}
static int ov2740_set_stream(struct v4l2_subdev *sd, int enable)
{
struct ov2740 *ov2740 = to_ov2740(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
struct v4l2_subdev_state *sd_state;
int ret = 0;
sd_state = v4l2_subdev_lock_and_get_active_state(&ov2740->sd);
if (enable) {
- ret = pm_runtime_resume_and_get(&client->dev);
+ ret = pm_runtime_resume_and_get(ov2740->dev);
if (ret < 0)
goto out_unlock;
@@ -1015,11 +1009,11 @@ static int ov2740_set_stream(struct v4l2_subdev *sd, int enable)
if (ret) {
enable = 0;
ov2740_stop_streaming(ov2740);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov2740->dev);
}
} else {
ov2740_stop_streaming(ov2740);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov2740->dev);
}
out_unlock:
@@ -1131,16 +1125,14 @@ static const struct media_entity_operations ov2740_subdev_entity_ops = {
.link_validate = v4l2_subdev_link_validate,
};
-static int ov2740_check_hwcfg(struct device *dev)
+static int ov2740_check_hwcfg(struct ov2740 *ov2740)
{
- struct v4l2_subdev *sd = dev_get_drvdata(dev);
- struct ov2740 *ov2740 = to_ov2740(sd);
+ struct device *dev = ov2740->dev;
struct fwnode_handle *ep;
struct fwnode_handle *fwnode = dev_fwnode(dev);
struct v4l2_fwnode_endpoint bus_cfg = {
.bus_type = V4L2_MBUS_CSI2_DPHY
};
- u32 mclk;
int ret;
unsigned int i, j;
@@ -1153,20 +1145,6 @@ static int ov2740_check_hwcfg(struct device *dev)
return dev_err_probe(dev, -EPROBE_DEFER,
"waiting for fwnode graph endpoint\n");
- ret = fwnode_property_read_u32(fwnode, "clock-frequency", &mclk);
- if (ret) {
- fwnode_handle_put(ep);
- return dev_err_probe(dev, ret,
- "reading clock-frequency property\n");
- }
-
- if (mclk != OV2740_MCLK) {
- fwnode_handle_put(ep);
- return dev_err_probe(dev, -EINVAL,
- "external clock %d is not supported\n",
- mclk);
- }
-
ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg);
fwnode_handle_put(ep);
if (ret)
@@ -1270,7 +1248,7 @@ static int ov2740_register_nvmem(struct i2c_client *client,
struct regmap_config regmap_config = { };
struct nvmem_config nvmem_config = { };
struct regmap *regmap;
- struct device *dev = &client->dev;
+ struct device *dev = ov2740->dev;
nvm = devm_kzalloc(dev, sizeof(*nvm), GFP_KERNEL);
if (!nvm)
@@ -1349,6 +1327,7 @@ static int ov2740_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct ov2740 *ov2740;
+ unsigned long freq;
bool full_power;
unsigned int i;
int ret;
@@ -1357,10 +1336,12 @@ static int ov2740_probe(struct i2c_client *client)
if (!ov2740)
return -ENOMEM;
+ ov2740->dev = &client->dev;
+
v4l2_i2c_subdev_init(&ov2740->sd, client, &ov2740_subdev_ops);
ov2740->sd.internal_ops = &ov2740_internal_ops;
- ret = ov2740_check_hwcfg(dev);
+ ret = ov2740_check_hwcfg(ov2740);
if (ret)
return ret;
@@ -1384,11 +1365,17 @@ static int ov2740_probe(struct i2c_client *client)
msleep(20);
}
- ov2740->clk = devm_clk_get_optional(dev, "clk");
+ ov2740->clk = devm_v4l2_sensor_clk_get(dev, "clk");
if (IS_ERR(ov2740->clk))
return dev_err_probe(dev, PTR_ERR(ov2740->clk),
"failed to get clock\n");
+ freq = clk_get_rate(ov2740->clk);
+ if (freq != OV2740_MCLK)
+ return dev_err_probe(dev, -EINVAL,
+ "external clock %lu is not supported\n",
+ freq);
+
for (i = 0; i < ARRAY_SIZE(ov2740_supply_name); i++)
ov2740->supplies[i].supply = ov2740_supply_name[i];
@@ -1397,7 +1384,7 @@ static int ov2740_probe(struct i2c_client *client)
if (ret)
return dev_err_probe(dev, ret, "failed to get regulators\n");
- full_power = acpi_dev_state_d0(&client->dev);
+ full_power = acpi_dev_state_d0(ov2740->dev);
if (full_power) {
/* ACPI does not always clear the reset GPIO / enable the clock */
ret = ov2740_resume(dev);
@@ -1435,9 +1422,9 @@ static int ov2740_probe(struct i2c_client *client)
/* Set the device's state to active if it's in D0 state. */
if (full_power)
- pm_runtime_set_active(&client->dev);
- pm_runtime_enable(&client->dev);
- pm_runtime_idle(&client->dev);
+ pm_runtime_set_active(ov2740->dev);
+ pm_runtime_enable(ov2740->dev);
+ pm_runtime_idle(ov2740->dev);
ret = v4l2_async_register_subdev_sensor(&ov2740->sd);
if (ret < 0) {
@@ -1447,13 +1434,13 @@ static int ov2740_probe(struct i2c_client *client)
ret = ov2740_register_nvmem(client, ov2740);
if (ret)
- dev_warn(&client->dev, "register nvmem failed, ret %d\n", ret);
+ dev_warn(ov2740->dev, "register nvmem failed, ret %d\n", ret);
return 0;
probe_error_v4l2_subdev_cleanup:
- pm_runtime_disable(&client->dev);
- pm_runtime_set_suspended(&client->dev);
+ pm_runtime_disable(ov2740->dev);
+ pm_runtime_set_suspended(ov2740->dev);
v4l2_subdev_cleanup(&ov2740->sd);
probe_error_media_entity_cleanup:
diff --git a/drivers/media/i2c/ov4689.c b/drivers/media/i2c/ov4689.c
index 1c3a449f9354..a59d25b09b5b 100644
--- a/drivers/media/i2c/ov4689.c
+++ b/drivers/media/i2c/ov4689.c
@@ -497,7 +497,6 @@ static int ov4689_s_stream(struct v4l2_subdev *sd, int on)
} else {
cci_write(ov4689->regmap, OV4689_REG_CTRL_MODE,
OV4689_MODE_SW_STANDBY, NULL);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
}
@@ -702,7 +701,6 @@ static int ov4689_set_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
@@ -909,20 +907,12 @@ static int ov4689_probe(struct i2c_client *client)
ov4689->cur_mode = &supported_modes[OV4689_MODE_2688_1520];
- ov4689->xvclk = devm_clk_get_optional(dev, NULL);
+ ov4689->xvclk = devm_v4l2_sensor_clk_get(dev, NULL);
if (IS_ERR(ov4689->xvclk))
return dev_err_probe(dev, PTR_ERR(ov4689->xvclk),
"Failed to get external clock\n");
- if (!ov4689->xvclk) {
- dev_dbg(dev,
- "No clock provided, using clock-frequency property\n");
- device_property_read_u32(dev, "clock-frequency",
- &ov4689->clock_rate);
- } else {
- ov4689->clock_rate = clk_get_rate(ov4689->xvclk);
- }
-
+ ov4689->clock_rate = clk_get_rate(ov4689->xvclk);
if (ov4689->clock_rate != OV4689_XVCLK_FREQ) {
dev_err(dev,
"External clock rate mismatch: got %d Hz, expected %d Hz\n",
@@ -999,7 +989,6 @@ static int ov4689_probe(struct i2c_client *client)
goto err_clean_subdev_pm;
}
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return 0;
diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
index 0dae0438aa80..85ecc23b3587 100644
--- a/drivers/media/i2c/ov5640.c
+++ b/drivers/media/i2c/ov5640.c
@@ -3341,7 +3341,6 @@ static int ov5640_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_mark_last_busy(&sensor->i2c_client->dev);
pm_runtime_put_autosuspend(&sensor->i2c_client->dev);
return 0;
@@ -3417,7 +3416,6 @@ static int ov5640_s_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_mark_last_busy(&sensor->i2c_client->dev);
pm_runtime_put_autosuspend(&sensor->i2c_client->dev);
return ret;
@@ -3754,7 +3752,6 @@ out:
mutex_unlock(&sensor->lock);
if (!enable || ret) {
- pm_runtime_mark_last_busy(&sensor->i2c_client->dev);
pm_runtime_put_autosuspend(&sensor->i2c_client->dev);
}
@@ -3898,11 +3895,10 @@ static int ov5640_probe(struct i2c_client *client)
ov5640_dvp_default_fmt;
/* get system clock (xclk) */
- sensor->xclk = devm_clk_get(dev, "xclk");
- if (IS_ERR(sensor->xclk)) {
- dev_err(dev, "failed to get xclk\n");
- return PTR_ERR(sensor->xclk);
- }
+ sensor->xclk = devm_v4l2_sensor_clk_get(dev, "xclk");
+ if (IS_ERR(sensor->xclk))
+ return dev_err_probe(dev, PTR_ERR(sensor->xclk),
+ "failed to get xclk\n");
sensor->xclk_freq = clk_get_rate(sensor->xclk);
if (sensor->xclk_freq < OV5640_XCLK_MIN ||
@@ -3965,7 +3961,6 @@ static int ov5640_probe(struct i2c_client *client)
pm_runtime_set_autosuspend_delay(dev, 1000);
pm_runtime_use_autosuspend(dev);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return 0;
diff --git a/drivers/media/i2c/ov5645.c b/drivers/media/i2c/ov5645.c
index 004d0ee5c3f5..b10d408034a1 100644
--- a/drivers/media/i2c/ov5645.c
+++ b/drivers/media/i2c/ov5645.c
@@ -808,7 +808,6 @@ static int ov5645_s_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_mark_last_busy(ov5645->dev);
pm_runtime_put_autosuspend(ov5645->dev);
return ret;
@@ -979,7 +978,6 @@ static int ov5645_disable_streams(struct v4l2_subdev *sd,
OV5645_SYSTEM_CTRL0_STOP);
rpm_put:
- pm_runtime_mark_last_busy(ov5645->dev);
pm_runtime_put_autosuspend(ov5645->dev);
return ret;
@@ -1044,27 +1042,18 @@ static int ov5645_probe(struct i2c_client *client)
"invalid bus type, must be CSI2\n");
/* get system clock (xclk) */
- ov5645->xclk = devm_clk_get(dev, NULL);
+ ov5645->xclk = devm_v4l2_sensor_clk_get_legacy(dev, NULL, false, 0);
if (IS_ERR(ov5645->xclk))
return dev_err_probe(dev, PTR_ERR(ov5645->xclk),
"could not get xclk");
- ret = of_property_read_u32(dev->of_node, "clock-frequency", &xclk_freq);
- if (ret)
- return dev_err_probe(dev, ret,
- "could not get xclk frequency\n");
-
/* external clock must be 24MHz, allow 1% tolerance */
+ xclk_freq = clk_get_rate(ov5645->xclk);
if (xclk_freq < 23760000 || xclk_freq > 24240000)
return dev_err_probe(dev, -EINVAL,
"unsupported xclk frequency %u\n",
xclk_freq);
- ret = clk_set_rate(ov5645->xclk, xclk_freq);
- if (ret)
- return dev_err_probe(dev, ret,
- "could not set xclk frequency\n");
-
for (i = 0; i < OV5645_NUM_SUPPLIES; i++)
ov5645->supplies[i].supply = ov5645_supply_name[i];
@@ -1196,7 +1185,6 @@ static int ov5645_probe(struct i2c_client *client)
pm_runtime_set_autosuspend_delay(dev, 1000);
pm_runtime_use_autosuspend(dev);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return 0;
diff --git a/drivers/media/i2c/ov5647.c b/drivers/media/i2c/ov5647.c
index a727beb9d57e..e193fef4fced 100644
--- a/drivers/media/i2c/ov5647.c
+++ b/drivers/media/i2c/ov5647.c
@@ -1398,11 +1398,10 @@ static int ov5647_probe(struct i2c_client *client)
}
}
- sensor->xclk = devm_clk_get(dev, NULL);
- if (IS_ERR(sensor->xclk)) {
- dev_err(dev, "could not get xclk");
- return PTR_ERR(sensor->xclk);
- }
+ sensor->xclk = devm_v4l2_sensor_clk_get(dev, NULL);
+ if (IS_ERR(sensor->xclk))
+ return dev_err_probe(dev, PTR_ERR(sensor->xclk),
+ "could not get xclk\n");
xclk_freq = clk_get_rate(sensor->xclk);
if (xclk_freq != 25000000) {
diff --git a/drivers/media/i2c/ov5648.c b/drivers/media/i2c/ov5648.c
index 4b86d2631bd1..f0b839cd65f1 100644
--- a/drivers/media/i2c/ov5648.c
+++ b/drivers/media/i2c/ov5648.c
@@ -1061,8 +1061,8 @@ static int ov5648_sw_standby(struct ov5648_sensor *sensor, int standby)
static int ov5648_chip_id_check(struct ov5648_sensor *sensor)
{
- u16 regs[] = { OV5648_CHIP_ID_H_REG, OV5648_CHIP_ID_L_REG };
- u8 values[] = { OV5648_CHIP_ID_H_VALUE, OV5648_CHIP_ID_L_VALUE };
+ static const u16 regs[] = { OV5648_CHIP_ID_H_REG, OV5648_CHIP_ID_L_REG };
+ static const u8 values[] = { OV5648_CHIP_ID_H_VALUE, OV5648_CHIP_ID_L_VALUE };
unsigned int i;
u8 value;
int ret;
@@ -2521,10 +2521,10 @@ static int ov5648_probe(struct i2c_client *client)
/* External Clock */
- sensor->xvclk = devm_clk_get(dev, NULL);
+ sensor->xvclk = devm_v4l2_sensor_clk_get(dev, NULL);
if (IS_ERR(sensor->xvclk)) {
- dev_err(dev, "failed to get external clock\n");
- ret = PTR_ERR(sensor->xvclk);
+ ret = dev_err_probe(dev, PTR_ERR(sensor->xvclk),
+ "failed to get external clock\n");
goto error_endpoint;
}
diff --git a/drivers/media/i2c/ov5670.c b/drivers/media/i2c/ov5670.c
index b9efb2d2276a..04b3183b7bcb 100644
--- a/drivers/media/i2c/ov5670.c
+++ b/drivers/media/i2c/ov5670.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2017 Intel Corporation.
-#include <linux/unaligned.h>
#include <linux/acpi.h>
#include <linux/clk.h>
#include <linux/delay.h>
@@ -12,6 +11,8 @@
#include <linux/of.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
+#include <linux/unaligned.h>
+
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-event.h>
@@ -1854,6 +1855,8 @@ static const struct ov5670_mode supported_modes[] = {
};
struct ov5670 {
+ struct device *dev;
+
struct v4l2_subdev sd;
struct media_pad pad;
struct v4l2_fwnode_endpoint endpoint;
@@ -1959,7 +1962,6 @@ static int ov5670_write_reg(struct ov5670 *ov5670, u16 reg, unsigned int len,
static int ov5670_write_regs(struct ov5670 *ov5670,
const struct ov5670_reg *regs, unsigned int len)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov5670->sd);
unsigned int i;
int ret;
@@ -1967,7 +1969,7 @@ static int ov5670_write_regs(struct ov5670 *ov5670,
ret = ov5670_write_reg(ov5670, regs[i].address, 1, regs[i].val);
if (ret) {
dev_err_ratelimited(
- &client->dev,
+ ov5670->dev,
"Failed to write reg 0x%4.4x. error = %d\n",
regs[i].address, ret);
@@ -2032,7 +2034,6 @@ static int ov5670_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct ov5670 *ov5670 = container_of(ctrl->handler,
struct ov5670, ctrl_handler);
- struct i2c_client *client = v4l2_get_subdevdata(&ov5670->sd);
s64 max;
int ret;
@@ -2048,7 +2049,7 @@ static int ov5670_set_ctrl(struct v4l2_ctrl *ctrl)
}
/* V4L2 controls values will be applied only when power is already up */
- if (!pm_runtime_get_if_in_use(&client->dev))
+ if (!pm_runtime_get_if_in_use(ov5670->dev))
return 0;
switch (ctrl->id) {
@@ -2080,12 +2081,12 @@ static int ov5670_set_ctrl(struct v4l2_ctrl *ctrl)
break;
default:
ret = -EINVAL;
- dev_info(&client->dev, "%s Unhandled id:0x%x, val:0x%x\n",
+ dev_info(ov5670->dev, "%s Unhandled id:0x%x, val:0x%x\n",
__func__, ctrl->id, ctrl->val);
break;
}
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov5670->dev);
return ret;
}
@@ -2099,7 +2100,6 @@ static int ov5670_init_controls(struct ov5670 *ov5670)
{
struct v4l2_mbus_config_mipi_csi2 *bus_mipi_csi2 =
&ov5670->endpoint.bus.mipi_csi2;
- struct i2c_client *client = v4l2_get_subdevdata(&ov5670->sd);
struct v4l2_fwnode_device_properties props;
struct v4l2_ctrl_handler *ctrl_hdlr;
unsigned int lanes_count;
@@ -2177,7 +2177,7 @@ static int ov5670_init_controls(struct ov5670 *ov5670)
goto error;
}
- ret = v4l2_fwnode_device_parse(&client->dev, &props);
+ ret = v4l2_fwnode_device_parse(ov5670->dev, &props);
if (ret)
goto error;
@@ -2350,7 +2350,6 @@ static int ov5670_get_skip_frames(struct v4l2_subdev *sd, u32 *frames)
/* Verify chip ID */
static int ov5670_identify_module(struct ov5670 *ov5670)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov5670->sd);
int ret;
u32 val;
@@ -2363,7 +2362,7 @@ static int ov5670_identify_module(struct ov5670 *ov5670)
return ret;
if (val != OV5670_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x!=%x\n",
+ dev_err(ov5670->dev, "chip id mismatch: %x!=%x\n",
OV5670_CHIP_ID, val);
return -ENXIO;
}
@@ -2389,7 +2388,6 @@ static int ov5670_mipi_configure(struct ov5670 *ov5670)
/* Prepare streaming by writing default values and customized values */
static int ov5670_start_streaming(struct ov5670 *ov5670)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov5670->sd);
const struct ov5670_reg_list *reg_list;
int link_freq_index;
int ret;
@@ -2402,7 +2400,7 @@ static int ov5670_start_streaming(struct ov5670 *ov5670)
ret = ov5670_write_reg(ov5670, OV5670_REG_SOFTWARE_RST,
OV5670_REG_VALUE_08BIT, OV5670_SOFTWARE_RST);
if (ret) {
- dev_err(&client->dev, "%s failed to set powerup registers\n",
+ dev_err(ov5670->dev, "%s failed to set powerup registers\n",
__func__);
return ret;
}
@@ -2412,7 +2410,7 @@ static int ov5670_start_streaming(struct ov5670 *ov5670)
reg_list = &link_freq_configs[link_freq_index].reg_list;
ret = ov5670_write_reg_list(ov5670, reg_list);
if (ret) {
- dev_err(&client->dev, "%s failed to set plls\n", __func__);
+ dev_err(ov5670->dev, "%s failed to set plls\n", __func__);
return ret;
}
@@ -2420,13 +2418,13 @@ static int ov5670_start_streaming(struct ov5670 *ov5670)
reg_list = &ov5670->cur_mode->reg_list;
ret = ov5670_write_reg_list(ov5670, reg_list);
if (ret) {
- dev_err(&client->dev, "%s failed to set mode\n", __func__);
+ dev_err(ov5670->dev, "%s failed to set mode\n", __func__);
return ret;
}
ret = ov5670_mipi_configure(ov5670);
if (ret) {
- dev_err(&client->dev, "%s failed to configure MIPI\n", __func__);
+ dev_err(ov5670->dev, "%s failed to configure MIPI\n", __func__);
return ret;
}
@@ -2438,7 +2436,7 @@ static int ov5670_start_streaming(struct ov5670 *ov5670)
ret = ov5670_write_reg(ov5670, OV5670_REG_MODE_SELECT,
OV5670_REG_VALUE_08BIT, OV5670_MODE_STREAMING);
if (ret) {
- dev_err(&client->dev, "%s failed to set stream\n", __func__);
+ dev_err(ov5670->dev, "%s failed to set stream\n", __func__);
return ret;
}
@@ -2447,13 +2445,12 @@ static int ov5670_start_streaming(struct ov5670 *ov5670)
static int ov5670_stop_streaming(struct ov5670 *ov5670)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov5670->sd);
int ret;
ret = ov5670_write_reg(ov5670, OV5670_REG_MODE_SELECT,
OV5670_REG_VALUE_08BIT, OV5670_MODE_STANDBY);
if (ret)
- dev_err(&client->dev, "%s failed to set stream\n", __func__);
+ dev_err(ov5670->dev, "%s failed to set stream\n", __func__);
/* Return success even if it was an error, as there is nothing the
* caller can do about it.
@@ -2464,13 +2461,12 @@ static int ov5670_stop_streaming(struct ov5670 *ov5670)
static int ov5670_set_stream(struct v4l2_subdev *sd, int enable)
{
struct ov5670 *ov5670 = to_ov5670(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret = 0;
mutex_lock(&ov5670->mutex);
if (enable) {
- ret = pm_runtime_resume_and_get(&client->dev);
+ ret = pm_runtime_resume_and_get(ov5670->dev);
if (ret < 0)
goto unlock_and_return;
@@ -2479,12 +2475,12 @@ static int ov5670_set_stream(struct v4l2_subdev *sd, int enable)
goto error;
} else {
ret = ov5670_stop_streaming(ov5670);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov5670->dev);
}
goto unlock_and_return;
error:
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov5670->dev);
unlock_and_return:
mutex_unlock(&ov5670->mutex);
@@ -2621,26 +2617,23 @@ static const struct media_entity_operations ov5670_subdev_entity_ops = {
static int ov5670_regulators_probe(struct ov5670 *ov5670)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov5670->sd);
unsigned int i;
for (i = 0; i < OV5670_NUM_SUPPLIES; i++)
ov5670->supplies[i].supply = ov5670_supply_names[i];
- return devm_regulator_bulk_get(&client->dev, OV5670_NUM_SUPPLIES,
+ return devm_regulator_bulk_get(ov5670->dev, OV5670_NUM_SUPPLIES,
ov5670->supplies);
}
static int ov5670_gpio_probe(struct ov5670 *ov5670)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov5670->sd);
-
- ov5670->pwdn_gpio = devm_gpiod_get_optional(&client->dev, "powerdown",
+ ov5670->pwdn_gpio = devm_gpiod_get_optional(ov5670->dev, "powerdown",
GPIOD_OUT_LOW);
if (IS_ERR(ov5670->pwdn_gpio))
return PTR_ERR(ov5670->pwdn_gpio);
- ov5670->reset_gpio = devm_gpiod_get_optional(&client->dev, "reset",
+ ov5670->reset_gpio = devm_gpiod_get_optional(ov5670->dev, "reset",
GPIOD_OUT_LOW);
if (IS_ERR(ov5670->reset_gpio))
return PTR_ERR(ov5670->reset_gpio);
@@ -2660,18 +2653,16 @@ static int ov5670_probe(struct i2c_client *client)
if (!ov5670)
return -ENOMEM;
- ov5670->xvclk = devm_clk_get_optional(&client->dev, NULL);
- if (!IS_ERR_OR_NULL(ov5670->xvclk))
- input_clk = clk_get_rate(ov5670->xvclk);
- else if (!ov5670->xvclk || PTR_ERR(ov5670->xvclk) == -ENOENT)
- device_property_read_u32(&client->dev, "clock-frequency",
- &input_clk);
- else
- return dev_err_probe(&client->dev, PTR_ERR(ov5670->xvclk),
+ ov5670->dev = &client->dev;
+
+ ov5670->xvclk = devm_v4l2_sensor_clk_get(ov5670->dev, NULL);
+ if (IS_ERR(ov5670->xvclk))
+ return dev_err_probe(ov5670->dev, PTR_ERR(ov5670->xvclk),
"error getting clock\n");
+ input_clk = clk_get_rate(ov5670->xvclk);
if (input_clk != OV5670_XVCLK_FREQ) {
- dev_err(&client->dev,
+ dev_err(ov5670->dev,
"Unsupported clock frequency %u\n", input_clk);
return -EINVAL;
}
@@ -2682,20 +2673,20 @@ static int ov5670_probe(struct i2c_client *client)
ret = ov5670_regulators_probe(ov5670);
if (ret)
- return dev_err_probe(&client->dev, ret, "Regulators probe failed\n");
+ return dev_err_probe(ov5670->dev, ret, "Regulators probe failed\n");
ret = ov5670_gpio_probe(ov5670);
if (ret)
- return dev_err_probe(&client->dev, ret, "GPIO probe failed\n");
+ return dev_err_probe(ov5670->dev, ret, "GPIO probe failed\n");
/*
* Graph Endpoint. If it's missing we defer rather than fail, as this
* sensor is known to co-exist on systems with the IPU3 and so it might
* be created by the ipu-bridge.
*/
- handle = fwnode_graph_get_next_endpoint(dev_fwnode(&client->dev), NULL);
+ handle = fwnode_graph_get_next_endpoint(dev_fwnode(ov5670->dev), NULL);
if (!handle)
- return dev_err_probe(&client->dev, -EPROBE_DEFER,
+ return dev_err_probe(ov5670->dev, -EPROBE_DEFER,
"Endpoint for node get failed\n");
ov5670->endpoint.bus_type = V4L2_MBUS_CSI2_DPHY;
@@ -2704,20 +2695,20 @@ static int ov5670_probe(struct i2c_client *client)
ret = v4l2_fwnode_endpoint_alloc_parse(handle, &ov5670->endpoint);
fwnode_handle_put(handle);
if (ret)
- return dev_err_probe(&client->dev, ret, "Endpoint parse failed\n");
+ return dev_err_probe(ov5670->dev, ret, "Endpoint parse failed\n");
- full_power = acpi_dev_state_d0(&client->dev);
+ full_power = acpi_dev_state_d0(ov5670->dev);
if (full_power) {
- ret = ov5670_runtime_resume(&client->dev);
+ ret = ov5670_runtime_resume(ov5670->dev);
if (ret) {
- dev_err_probe(&client->dev, ret, "Power up failed\n");
+ dev_err_probe(ov5670->dev, ret, "Power up failed\n");
goto error_endpoint;
}
/* Check module identity */
ret = ov5670_identify_module(ov5670);
if (ret) {
- dev_err_probe(&client->dev, ret, "ov5670_identify_module() error\n");
+ dev_err_probe(ov5670->dev, ret, "ov5670_identify_module() error\n");
goto error_power_off;
}
}
@@ -2729,7 +2720,7 @@ static int ov5670_probe(struct i2c_client *client)
ret = ov5670_init_controls(ov5670);
if (ret) {
- dev_err_probe(&client->dev, ret, "ov5670_init_controls() error\n");
+ dev_err_probe(ov5670->dev, ret, "ov5670_init_controls() error\n");
goto error_mutex_destroy;
}
@@ -2742,28 +2733,28 @@ static int ov5670_probe(struct i2c_client *client)
ov5670->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&ov5670->sd.entity, 1, &ov5670->pad);
if (ret) {
- dev_err_probe(&client->dev, ret, "media_entity_pads_init() error\n");
+ dev_err_probe(ov5670->dev, ret, "media_entity_pads_init() error\n");
goto error_handler_free;
}
/* Set the device's state to active if it's in D0 state. */
if (full_power)
- pm_runtime_set_active(&client->dev);
- pm_runtime_enable(&client->dev);
+ pm_runtime_set_active(ov5670->dev);
+ pm_runtime_enable(ov5670->dev);
/* Async register for subdev */
ret = v4l2_async_register_subdev_sensor(&ov5670->sd);
if (ret < 0) {
- dev_err_probe(&client->dev, ret, "v4l2_async_register_subdev() error\n");
+ dev_err_probe(ov5670->dev, ret, "v4l2_async_register_subdev() error\n");
goto error_pm_disable;
}
- pm_runtime_idle(&client->dev);
+ pm_runtime_idle(ov5670->dev);
return 0;
error_pm_disable:
- pm_runtime_disable(&client->dev);
+ pm_runtime_disable(ov5670->dev);
media_entity_cleanup(&ov5670->sd.entity);
@@ -2775,7 +2766,7 @@ error_mutex_destroy:
error_power_off:
if (full_power)
- ov5670_runtime_suspend(&client->dev);
+ ov5670_runtime_suspend(ov5670->dev);
error_endpoint:
v4l2_fwnode_endpoint_free(&ov5670->endpoint);
@@ -2793,8 +2784,8 @@ static void ov5670_remove(struct i2c_client *client)
v4l2_ctrl_handler_free(sd->ctrl_handler);
mutex_destroy(&ov5670->mutex);
- pm_runtime_disable(&client->dev);
- ov5670_runtime_suspend(&client->dev);
+ pm_runtime_disable(ov5670->dev);
+ ov5670_runtime_suspend(ov5670->dev);
v4l2_fwnode_endpoint_free(&ov5670->endpoint);
}
diff --git a/drivers/media/i2c/ov5675.c b/drivers/media/i2c/ov5675.c
index e7aec281e9a4..30e27d39ee44 100644
--- a/drivers/media/i2c/ov5675.c
+++ b/drivers/media/i2c/ov5675.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Intel Corporation.
-#include <linux/unaligned.h>
#include <linux/acpi.h>
#include <linux/clk.h>
#include <linux/delay.h>
@@ -11,6 +10,8 @@
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
+#include <linux/unaligned.h>
+
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fwnode.h>
@@ -493,6 +494,8 @@ static const struct ov5675_mode supported_modes[] = {
};
struct ov5675 {
+ struct device *dev;
+
struct v4l2_subdev sd;
struct media_pad pad;
struct v4l2_ctrl_handler ctrl_handler;
@@ -584,7 +587,6 @@ static int ov5675_write_reg(struct ov5675 *ov5675, u16 reg, u16 len, u32 val)
static int ov5675_write_reg_list(struct ov5675 *ov5675,
const struct ov5675_reg_list *r_list)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov5675->sd);
unsigned int i;
int ret;
@@ -592,7 +594,7 @@ static int ov5675_write_reg_list(struct ov5675 *ov5675,
ret = ov5675_write_reg(ov5675, r_list->regs[i].address, 1,
r_list->regs[i].val);
if (ret) {
- dev_err_ratelimited(&client->dev,
+ dev_err_ratelimited(ov5675->dev,
"failed to write reg 0x%4.4x. error = %d",
r_list->regs[i].address, ret);
return ret;
@@ -700,7 +702,6 @@ static int ov5675_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct ov5675 *ov5675 = container_of(ctrl->handler,
struct ov5675, ctrl_handler);
- struct i2c_client *client = v4l2_get_subdevdata(&ov5675->sd);
s64 exposure_max;
int ret = 0;
@@ -716,7 +717,7 @@ static int ov5675_set_ctrl(struct v4l2_ctrl *ctrl)
}
/* V4L2 controls values will be applied only when power is already up */
- if (!pm_runtime_get_if_in_use(&client->dev))
+ if (!pm_runtime_get_if_in_use(ov5675->dev))
return 0;
switch (ctrl->id) {
@@ -765,7 +766,7 @@ static int ov5675_set_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov5675->dev);
return ret;
}
@@ -776,7 +777,6 @@ static const struct v4l2_ctrl_ops ov5675_ctrl_ops = {
static int ov5675_init_controls(struct ov5675 *ov5675)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov5675->sd);
struct v4l2_fwnode_device_properties props;
struct v4l2_ctrl_handler *ctrl_hdlr;
s64 exposure_max, h_blank;
@@ -839,7 +839,7 @@ static int ov5675_init_controls(struct ov5675 *ov5675)
return ctrl_hdlr->error;
}
- ret = v4l2_fwnode_device_parse(&client->dev, &props);
+ ret = v4l2_fwnode_device_parse(ov5675->dev, &props);
if (ret)
goto error;
@@ -869,7 +869,6 @@ static void ov5675_update_pad_format(const struct ov5675_mode *mode,
static int ov5675_identify_module(struct ov5675 *ov5675)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov5675->sd);
int ret;
u32 val;
@@ -882,7 +881,7 @@ static int ov5675_identify_module(struct ov5675 *ov5675)
return ret;
if (val != OV5675_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x!=%x",
+ dev_err(ov5675->dev, "chip id mismatch: %x!=%x",
OV5675_CHIP_ID, val);
return -ENXIO;
}
@@ -894,7 +893,6 @@ static int ov5675_identify_module(struct ov5675 *ov5675)
static int ov5675_start_streaming(struct ov5675 *ov5675)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov5675->sd);
const struct ov5675_reg_list *reg_list;
int link_freq_index, ret;
@@ -906,14 +904,14 @@ static int ov5675_start_streaming(struct ov5675 *ov5675)
reg_list = &link_freq_configs[link_freq_index].reg_list;
ret = ov5675_write_reg_list(ov5675, reg_list);
if (ret) {
- dev_err(&client->dev, "failed to set plls");
+ dev_err(ov5675->dev, "failed to set plls");
return ret;
}
reg_list = &ov5675->cur_mode->reg_list;
ret = ov5675_write_reg_list(ov5675, reg_list);
if (ret) {
- dev_err(&client->dev, "failed to set mode");
+ dev_err(ov5675->dev, "failed to set mode");
return ret;
}
@@ -924,7 +922,7 @@ static int ov5675_start_streaming(struct ov5675 *ov5675)
ret = ov5675_write_reg(ov5675, OV5675_REG_MODE_SELECT,
OV5675_REG_VALUE_08BIT, OV5675_MODE_STREAMING);
if (ret) {
- dev_err(&client->dev, "failed to set stream");
+ dev_err(ov5675->dev, "failed to set stream");
return ret;
}
@@ -933,22 +931,19 @@ static int ov5675_start_streaming(struct ov5675 *ov5675)
static void ov5675_stop_streaming(struct ov5675 *ov5675)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov5675->sd);
-
if (ov5675_write_reg(ov5675, OV5675_REG_MODE_SELECT,
OV5675_REG_VALUE_08BIT, OV5675_MODE_STANDBY))
- dev_err(&client->dev, "failed to set stream");
+ dev_err(ov5675->dev, "failed to set stream");
}
static int ov5675_set_stream(struct v4l2_subdev *sd, int enable)
{
struct ov5675 *ov5675 = to_ov5675(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret = 0;
mutex_lock(&ov5675->mutex);
if (enable) {
- ret = pm_runtime_resume_and_get(&client->dev);
+ ret = pm_runtime_resume_and_get(ov5675->dev);
if (ret < 0) {
mutex_unlock(&ov5675->mutex);
return ret;
@@ -958,11 +953,11 @@ static int ov5675_set_stream(struct v4l2_subdev *sd, int enable)
if (ret) {
enable = 0;
ov5675_stop_streaming(ov5675);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov5675->dev);
}
} else {
ov5675_stop_streaming(ov5675);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov5675->dev);
}
mutex_unlock(&ov5675->mutex);
@@ -1171,8 +1166,9 @@ static const struct v4l2_subdev_internal_ops ov5675_internal_ops = {
.open = ov5675_open,
};
-static int ov5675_get_hwcfg(struct ov5675 *ov5675, struct device *dev)
+static int ov5675_get_hwcfg(struct ov5675 *ov5675)
{
+ struct device *dev = ov5675->dev;
struct fwnode_handle *ep;
struct fwnode_handle *fwnode = dev_fwnode(dev);
struct v4l2_fwnode_endpoint bus_cfg = {
@@ -1185,24 +1181,13 @@ static int ov5675_get_hwcfg(struct ov5675 *ov5675, struct device *dev)
if (!fwnode)
return -ENXIO;
- ov5675->xvclk = devm_clk_get_optional(dev, NULL);
+ ov5675->xvclk = devm_v4l2_sensor_clk_get(dev, NULL);
if (IS_ERR(ov5675->xvclk))
return dev_err_probe(dev, PTR_ERR(ov5675->xvclk),
"failed to get xvclk: %ld\n",
PTR_ERR(ov5675->xvclk));
- if (ov5675->xvclk) {
- xvclk_rate = clk_get_rate(ov5675->xvclk);
- } else {
- ret = fwnode_property_read_u32(fwnode, "clock-frequency",
- &xvclk_rate);
-
- if (ret) {
- dev_err(dev, "can't get clock frequency");
- return ret;
- }
- }
-
+ xvclk_rate = clk_get_rate(ov5675->xvclk);
if (xvclk_rate != OV5675_XVCLK_19_2) {
dev_err(dev, "external clock rate %u is unsupported",
xvclk_rate);
@@ -1276,12 +1261,12 @@ static void ov5675_remove(struct i2c_client *client)
v4l2_async_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
v4l2_ctrl_handler_free(sd->ctrl_handler);
- pm_runtime_disable(&client->dev);
+ pm_runtime_disable(ov5675->dev);
mutex_destroy(&ov5675->mutex);
- if (!pm_runtime_status_suspended(&client->dev))
- ov5675_power_off(&client->dev);
- pm_runtime_set_suspended(&client->dev);
+ if (!pm_runtime_status_suspended(ov5675->dev))
+ ov5675_power_off(ov5675->dev);
+ pm_runtime_set_suspended(ov5675->dev);
}
static int ov5675_probe(struct i2c_client *client)
@@ -1294,23 +1279,25 @@ static int ov5675_probe(struct i2c_client *client)
if (!ov5675)
return -ENOMEM;
- ret = ov5675_get_hwcfg(ov5675, &client->dev);
+ ov5675->dev = &client->dev;
+
+ ret = ov5675_get_hwcfg(ov5675);
if (ret)
return ret;
v4l2_i2c_subdev_init(&ov5675->sd, client, &ov5675_subdev_ops);
- ret = ov5675_power_on(&client->dev);
+ ret = ov5675_power_on(ov5675->dev);
if (ret) {
- dev_err(&client->dev, "failed to power on: %d\n", ret);
+ dev_err(ov5675->dev, "failed to power on: %d\n", ret);
return ret;
}
- full_power = acpi_dev_state_d0(&client->dev);
+ full_power = acpi_dev_state_d0(ov5675->dev);
if (full_power) {
ret = ov5675_identify_module(ov5675);
if (ret) {
- dev_err(&client->dev, "failed to find sensor: %d", ret);
+ dev_err(ov5675->dev, "failed to find sensor: %d", ret);
goto probe_power_off;
}
}
@@ -1319,7 +1306,7 @@ static int ov5675_probe(struct i2c_client *client)
ov5675->cur_mode = &supported_modes[0];
ret = ov5675_init_controls(ov5675);
if (ret) {
- dev_err(&client->dev, "failed to init controls: %d", ret);
+ dev_err(ov5675->dev, "failed to init controls: %d", ret);
goto probe_error_v4l2_ctrl_handler_free;
}
@@ -1330,22 +1317,22 @@ static int ov5675_probe(struct i2c_client *client)
ov5675->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&ov5675->sd.entity, 1, &ov5675->pad);
if (ret) {
- dev_err(&client->dev, "failed to init entity pads: %d", ret);
+ dev_err(ov5675->dev, "failed to init entity pads: %d", ret);
goto probe_error_v4l2_ctrl_handler_free;
}
ret = v4l2_async_register_subdev_sensor(&ov5675->sd);
if (ret < 0) {
- dev_err(&client->dev, "failed to register V4L2 subdev: %d",
+ dev_err(ov5675->dev, "failed to register V4L2 subdev: %d",
ret);
goto probe_error_media_entity_cleanup;
}
/* Set the device's state to active if it's in D0 state. */
if (full_power)
- pm_runtime_set_active(&client->dev);
- pm_runtime_enable(&client->dev);
- pm_runtime_idle(&client->dev);
+ pm_runtime_set_active(ov5675->dev);
+ pm_runtime_enable(ov5675->dev);
+ pm_runtime_idle(ov5675->dev);
return 0;
@@ -1356,7 +1343,7 @@ probe_error_v4l2_ctrl_handler_free:
v4l2_ctrl_handler_free(ov5675->sd.ctrl_handler);
mutex_destroy(&ov5675->mutex);
probe_power_off:
- ov5675_power_off(&client->dev);
+ ov5675_power_off(ov5675->dev);
return ret;
}
diff --git a/drivers/media/i2c/ov5693.c b/drivers/media/i2c/ov5693.c
index 485efd15257e..d294477f9dd3 100644
--- a/drivers/media/i2c/ov5693.c
+++ b/drivers/media/i2c/ov5693.c
@@ -1289,25 +1289,13 @@ static int ov5693_probe(struct i2c_client *client)
v4l2_i2c_subdev_init(&ov5693->sd, client, &ov5693_ops);
- ov5693->xvclk = devm_clk_get_optional(&client->dev, "xvclk");
+ ov5693->xvclk = devm_v4l2_sensor_clk_get(&client->dev, "xvclk");
if (IS_ERR(ov5693->xvclk))
return dev_err_probe(&client->dev, PTR_ERR(ov5693->xvclk),
"failed to get xvclk: %ld\n",
PTR_ERR(ov5693->xvclk));
- if (ov5693->xvclk) {
- xvclk_rate = clk_get_rate(ov5693->xvclk);
- } else {
- ret = fwnode_property_read_u32(dev_fwnode(&client->dev),
- "clock-frequency",
- &xvclk_rate);
-
- if (ret) {
- dev_err(&client->dev, "can't get clock frequency");
- return ret;
- }
- }
-
+ xvclk_rate = clk_get_rate(ov5693->xvclk);
if (xvclk_rate != OV5693_XVCLK_FREQ)
dev_warn(&client->dev, "Found clk freq %u, expected %u\n",
xvclk_rate, OV5693_XVCLK_FREQ);
diff --git a/drivers/media/i2c/ov5695.c b/drivers/media/i2c/ov5695.c
index 663eccdfea6a..5bb6ce7b3237 100644
--- a/drivers/media/i2c/ov5695.c
+++ b/drivers/media/i2c/ov5695.c
@@ -1264,16 +1264,12 @@ static int ov5695_probe(struct i2c_client *client)
ov5695->client = client;
ov5695->cur_mode = &supported_modes[0];
- ov5695->xvclk = devm_clk_get(dev, "xvclk");
- if (IS_ERR(ov5695->xvclk)) {
- dev_err(dev, "Failed to get xvclk\n");
- return -EINVAL;
- }
- ret = clk_set_rate(ov5695->xvclk, OV5695_XVCLK_FREQ);
- if (ret < 0) {
- dev_err(dev, "Failed to set xvclk rate (24MHz)\n");
- return ret;
- }
+ ov5695->xvclk = devm_v4l2_sensor_clk_get_legacy(dev, "xvclk", true,
+ OV5695_XVCLK_FREQ);
+ if (IS_ERR(ov5695->xvclk))
+ return dev_err_probe(dev, PTR_ERR(ov5695->xvclk),
+ "Failed to get xvclk\n");
+
if (clk_get_rate(ov5695->xvclk) != OV5695_XVCLK_FREQ)
dev_warn(dev, "xvclk mismatched, modes are based on 24MHz\n");
diff --git a/drivers/media/i2c/ov6211.c b/drivers/media/i2c/ov6211.c
new file mode 100644
index 000000000000..e3ac5ecf27d1
--- /dev/null
+++ b/drivers/media/i2c/ov6211.c
@@ -0,0 +1,793 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2024-2025 Linaro Ltd
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/units.h>
+#include <media/v4l2-cci.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+
+#define OV6211_LINK_FREQ_480MHZ (480 * HZ_PER_MHZ)
+#define OV6211_MCLK_FREQ_24MHZ (24 * HZ_PER_MHZ)
+
+#define OV6211_REG_CHIP_ID CCI_REG16(0x300a)
+#define OV6211_CHIP_ID 0x6710
+
+#define OV6211_REG_MODE_SELECT CCI_REG8(0x0100)
+#define OV6211_MODE_STANDBY 0x00
+#define OV6211_MODE_STREAMING BIT(0)
+
+#define OV6211_REG_SOFTWARE_RST CCI_REG8(0x0103)
+#define OV6211_SOFTWARE_RST BIT(0)
+
+/* Exposure controls from sensor */
+#define OV6211_REG_EXPOSURE CCI_REG24(0x3500)
+#define OV6211_EXPOSURE_MIN 1
+#define OV6211_EXPOSURE_MAX_MARGIN 4
+#define OV6211_EXPOSURE_STEP 1
+#define OV6211_EXPOSURE_DEFAULT 210
+
+/* Analogue gain controls from sensor */
+#define OV6211_REG_ANALOGUE_GAIN CCI_REG16(0x350a)
+#define OV6211_ANALOGUE_GAIN_MIN 1
+#define OV6211_ANALOGUE_GAIN_MAX 0x3ff
+#define OV6211_ANALOGUE_GAIN_STEP 1
+#define OV6211_ANALOGUE_GAIN_DEFAULT 160
+
+/* Test pattern */
+#define OV6211_REG_PRE_ISP CCI_REG8(0x5e00)
+#define OV6211_TEST_PATTERN_ENABLE BIT(7)
+
+#define to_ov6211(_sd) container_of(_sd, struct ov6211, sd)
+
+static const s64 ov6211_link_freq_menu[] = {
+ OV6211_LINK_FREQ_480MHZ,
+};
+
+struct ov6211_reg_list {
+ const struct cci_reg_sequence *regs;
+ unsigned int num_regs;
+};
+
+struct ov6211_mode {
+ u32 width; /* Frame width in pixels */
+ u32 height; /* Frame height in pixels */
+ u32 hts; /* Horizontal timing size */
+ u32 vts; /* Default vertical timing size */
+ u32 bpp; /* Bits per pixel */
+
+ const struct ov6211_reg_list reg_list; /* Sensor register setting */
+};
+
+static const char * const ov6211_test_pattern_menu[] = {
+ "Disabled",
+ "Vertical Colour Bars",
+};
+
+static const char * const ov6211_supply_names[] = {
+ "avdd", /* Analog power */
+ "dovdd", /* Digital I/O power */
+ "dvdd", /* Digital core power */
+};
+
+#define OV6211_NUM_SUPPLIES ARRAY_SIZE(ov6211_supply_names)
+
+struct ov6211 {
+ struct device *dev;
+ struct regmap *regmap;
+ struct clk *xvclk;
+ struct gpio_desc *reset_gpio;
+ struct regulator_bulk_data supplies[OV6211_NUM_SUPPLIES];
+
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+
+ struct v4l2_ctrl_handler ctrl_handler;
+
+ /* Saved register values */
+ u64 pre_isp;
+};
+
+static const struct cci_reg_sequence ov6211_400x400_120fps_mode[] = {
+ { CCI_REG8(0x3005), 0x00 },
+ { CCI_REG8(0x3013), 0x12 },
+ { CCI_REG8(0x3014), 0x04 },
+ { CCI_REG8(0x3016), 0x10 },
+ { CCI_REG8(0x3017), 0x00 },
+ { CCI_REG8(0x3018), 0x00 },
+ { CCI_REG8(0x301a), 0x00 },
+ { CCI_REG8(0x301b), 0x00 },
+ { CCI_REG8(0x301c), 0x00 },
+ { CCI_REG8(0x3037), 0xf0 },
+ { CCI_REG8(0x3080), 0x01 },
+ { CCI_REG8(0x3081), 0x00 },
+ { CCI_REG8(0x3082), 0x01 },
+ { CCI_REG8(0x3098), 0x04 },
+ { CCI_REG8(0x3099), 0x28 },
+ { CCI_REG8(0x309a), 0x06 },
+ { CCI_REG8(0x309b), 0x04 },
+ { CCI_REG8(0x309c), 0x00 },
+ { CCI_REG8(0x309d), 0x00 },
+ { CCI_REG8(0x309e), 0x01 },
+ { CCI_REG8(0x309f), 0x00 },
+ { CCI_REG8(0x30b0), 0x08 },
+ { CCI_REG8(0x30b1), 0x02 },
+ { CCI_REG8(0x30b2), 0x00 },
+ { CCI_REG8(0x30b3), 0x28 },
+ { CCI_REG8(0x30b4), 0x02 },
+ { CCI_REG8(0x30b5), 0x00 },
+ { CCI_REG8(0x3106), 0xd9 },
+ { CCI_REG8(0x3503), 0x07 },
+ { CCI_REG8(0x3509), 0x10 },
+ { CCI_REG8(0x3600), 0xfc },
+ { CCI_REG8(0x3620), 0xb7 },
+ { CCI_REG8(0x3621), 0x05 },
+ { CCI_REG8(0x3626), 0x31 },
+ { CCI_REG8(0x3627), 0x40 },
+ { CCI_REG8(0x3632), 0xa3 },
+ { CCI_REG8(0x3633), 0x34 },
+ { CCI_REG8(0x3634), 0x40 },
+ { CCI_REG8(0x3636), 0x00 },
+ { CCI_REG8(0x3660), 0x80 },
+ { CCI_REG8(0x3662), 0x03 },
+ { CCI_REG8(0x3664), 0xf0 },
+ { CCI_REG8(0x366a), 0x10 },
+ { CCI_REG8(0x366b), 0x06 },
+ { CCI_REG8(0x3680), 0xf4 },
+ { CCI_REG8(0x3681), 0x50 },
+ { CCI_REG8(0x3682), 0x00 },
+ { CCI_REG8(0x3708), 0x20 },
+ { CCI_REG8(0x3709), 0x40 },
+ { CCI_REG8(0x370d), 0x03 },
+ { CCI_REG8(0x373b), 0x02 },
+ { CCI_REG8(0x373c), 0x08 },
+ { CCI_REG8(0x3742), 0x00 },
+ { CCI_REG8(0x3744), 0x16 },
+ { CCI_REG8(0x3745), 0x08 },
+ { CCI_REG8(0x3781), 0xfc },
+ { CCI_REG8(0x3788), 0x00 },
+ { CCI_REG8(0x3800), 0x00 },
+ { CCI_REG8(0x3801), 0x04 },
+ { CCI_REG8(0x3802), 0x00 },
+ { CCI_REG8(0x3803), 0x04 },
+ { CCI_REG8(0x3804), 0x01 },
+ { CCI_REG8(0x3805), 0x9b },
+ { CCI_REG8(0x3806), 0x01 },
+ { CCI_REG8(0x3807), 0x9b },
+ { CCI_REG8(0x3808), 0x01 }, /* output width */
+ { CCI_REG8(0x3809), 0x90 },
+ { CCI_REG8(0x380a), 0x01 }, /* output height */
+ { CCI_REG8(0x380b), 0x90 },
+ { CCI_REG8(0x380c), 0x05 }, /* horizontal timing size */
+ { CCI_REG8(0x380d), 0xf2 },
+ { CCI_REG8(0x380e), 0x01 }, /* vertical timing size */
+ { CCI_REG8(0x380f), 0xb6 },
+ { CCI_REG8(0x3810), 0x00 },
+ { CCI_REG8(0x3811), 0x04 },
+ { CCI_REG8(0x3812), 0x00 },
+ { CCI_REG8(0x3813), 0x04 },
+ { CCI_REG8(0x3814), 0x11 },
+ { CCI_REG8(0x3815), 0x11 },
+ { CCI_REG8(0x3820), 0x00 },
+ { CCI_REG8(0x3821), 0x00 },
+ { CCI_REG8(0x382b), 0xfa },
+ { CCI_REG8(0x382f), 0x04 },
+ { CCI_REG8(0x3832), 0x00 },
+ { CCI_REG8(0x3833), 0x05 },
+ { CCI_REG8(0x3834), 0x00 },
+ { CCI_REG8(0x3835), 0x05 },
+ { CCI_REG8(0x3882), 0x04 },
+ { CCI_REG8(0x3883), 0x00 },
+ { CCI_REG8(0x38a4), 0x10 },
+ { CCI_REG8(0x38a5), 0x00 },
+ { CCI_REG8(0x38b1), 0x03 },
+ { CCI_REG8(0x3b80), 0x00 },
+ { CCI_REG8(0x3b81), 0xff },
+ { CCI_REG8(0x3b82), 0x10 },
+ { CCI_REG8(0x3b83), 0x00 },
+ { CCI_REG8(0x3b84), 0x08 },
+ { CCI_REG8(0x3b85), 0x00 },
+ { CCI_REG8(0x3b86), 0x01 },
+ { CCI_REG8(0x3b87), 0x00 },
+ { CCI_REG8(0x3b88), 0x00 },
+ { CCI_REG8(0x3b89), 0x00 },
+ { CCI_REG8(0x3b8a), 0x00 },
+ { CCI_REG8(0x3b8b), 0x05 },
+ { CCI_REG8(0x3b8c), 0x00 },
+ { CCI_REG8(0x3b8d), 0x00 },
+ { CCI_REG8(0x3b8e), 0x01 },
+ { CCI_REG8(0x3b8f), 0xb2 },
+ { CCI_REG8(0x3b94), 0x05 },
+ { CCI_REG8(0x3b95), 0xf2 },
+ { CCI_REG8(0x3b96), 0xc0 },
+ { CCI_REG8(0x4004), 0x04 },
+ { CCI_REG8(0x404e), 0x01 },
+ { CCI_REG8(0x4801), 0x0f },
+ { CCI_REG8(0x4806), 0x0f },
+ { CCI_REG8(0x4837), 0x43 },
+ { CCI_REG8(0x5a08), 0x00 },
+ { CCI_REG8(0x5a01), 0x00 },
+ { CCI_REG8(0x5a03), 0x00 },
+ { CCI_REG8(0x5a04), 0x10 },
+ { CCI_REG8(0x5a05), 0xa0 },
+ { CCI_REG8(0x5a06), 0x0c },
+ { CCI_REG8(0x5a07), 0x78 },
+};
+
+static const struct ov6211_mode supported_modes[] = {
+ {
+ .width = 400,
+ .height = 400,
+ .hts = 1522,
+ .vts = 438,
+ .bpp = 8,
+ .reg_list = {
+ .regs = ov6211_400x400_120fps_mode,
+ .num_regs = ARRAY_SIZE(ov6211_400x400_120fps_mode),
+ },
+ },
+};
+
+static int ov6211_set_test_pattern(struct ov6211 *ov6211, u32 pattern)
+{
+ u64 val = ov6211->pre_isp;
+
+ if (pattern)
+ val |= OV6211_TEST_PATTERN_ENABLE;
+ else
+ val &= ~OV6211_TEST_PATTERN_ENABLE;
+
+ return cci_write(ov6211->regmap, OV6211_REG_PRE_ISP, val, NULL);
+}
+
+static int ov6211_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct ov6211 *ov6211 = container_of(ctrl->handler, struct ov6211,
+ ctrl_handler);
+ int ret;
+
+ /* V4L2 controls are applied, when sensor is powered up for streaming */
+ if (!pm_runtime_get_if_active(ov6211->dev))
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_ANALOGUE_GAIN:
+ ret = cci_write(ov6211->regmap, OV6211_REG_ANALOGUE_GAIN,
+ ctrl->val, NULL);
+ break;
+ case V4L2_CID_EXPOSURE:
+ ret = cci_write(ov6211->regmap, OV6211_REG_EXPOSURE,
+ ctrl->val << 4, NULL);
+ break;
+ case V4L2_CID_TEST_PATTERN:
+ ret = ov6211_set_test_pattern(ov6211, ctrl->val);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ pm_runtime_put(ov6211->dev);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops ov6211_ctrl_ops = {
+ .s_ctrl = ov6211_set_ctrl,
+};
+
+static int ov6211_init_controls(struct ov6211 *ov6211)
+{
+ struct v4l2_ctrl_handler *ctrl_hdlr = &ov6211->ctrl_handler;
+ const struct ov6211_mode *mode = &supported_modes[0];
+ struct v4l2_fwnode_device_properties props;
+ s64 exposure_max, pixel_rate, h_blank;
+ struct v4l2_ctrl *ctrl;
+ int ret;
+
+ v4l2_ctrl_handler_init(ctrl_hdlr, 9);
+
+ ctrl = v4l2_ctrl_new_int_menu(ctrl_hdlr, &ov6211_ctrl_ops,
+ V4L2_CID_LINK_FREQ,
+ ARRAY_SIZE(ov6211_link_freq_menu) - 1,
+ 0, ov6211_link_freq_menu);
+ if (ctrl)
+ ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ pixel_rate = ov6211_link_freq_menu[0] / mode->bpp;
+ v4l2_ctrl_new_std(ctrl_hdlr, &ov6211_ctrl_ops, V4L2_CID_PIXEL_RATE,
+ 0, pixel_rate, 1, pixel_rate);
+
+ h_blank = mode->hts - mode->width;
+ ctrl = v4l2_ctrl_new_std(ctrl_hdlr, &ov6211_ctrl_ops, V4L2_CID_HBLANK,
+ h_blank, h_blank, 1, h_blank);
+ if (ctrl)
+ ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ ctrl = v4l2_ctrl_new_std(ctrl_hdlr, &ov6211_ctrl_ops, V4L2_CID_VBLANK,
+ mode->vts - mode->height,
+ mode->vts - mode->height, 1,
+ mode->vts - mode->height);
+ if (ctrl)
+ ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ v4l2_ctrl_new_std(ctrl_hdlr, &ov6211_ctrl_ops, V4L2_CID_ANALOGUE_GAIN,
+ OV6211_ANALOGUE_GAIN_MIN, OV6211_ANALOGUE_GAIN_MAX,
+ OV6211_ANALOGUE_GAIN_STEP,
+ OV6211_ANALOGUE_GAIN_DEFAULT);
+
+ exposure_max = (mode->vts - OV6211_EXPOSURE_MAX_MARGIN);
+ v4l2_ctrl_new_std(ctrl_hdlr, &ov6211_ctrl_ops,
+ V4L2_CID_EXPOSURE,
+ OV6211_EXPOSURE_MIN, exposure_max,
+ OV6211_EXPOSURE_STEP,
+ OV6211_EXPOSURE_DEFAULT);
+
+ v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &ov6211_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(ov6211_test_pattern_menu) - 1,
+ 0, 0, ov6211_test_pattern_menu);
+
+ if (ctrl_hdlr->error)
+ return ctrl_hdlr->error;
+
+ ret = v4l2_fwnode_device_parse(ov6211->dev, &props);
+ if (ret)
+ goto error_free_hdlr;
+
+ ret = v4l2_ctrl_new_fwnode_properties(ctrl_hdlr, &ov6211_ctrl_ops,
+ &props);
+ if (ret)
+ goto error_free_hdlr;
+
+ ov6211->sd.ctrl_handler = ctrl_hdlr;
+
+ return 0;
+
+error_free_hdlr:
+ v4l2_ctrl_handler_free(ctrl_hdlr);
+
+ return ret;
+}
+
+static void ov6211_update_pad_format(const struct ov6211_mode *mode,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ fmt->code = MEDIA_BUS_FMT_Y8_1X8;
+ fmt->width = mode->width;
+ fmt->height = mode->height;
+ fmt->field = V4L2_FIELD_NONE;
+ fmt->colorspace = V4L2_COLORSPACE_RAW;
+ fmt->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
+ fmt->xfer_func = V4L2_XFER_FUNC_NONE;
+}
+
+static int ov6211_enable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state, u32 pad,
+ u64 streams_mask)
+{
+ const struct ov6211_reg_list *reg_list = &supported_modes[0].reg_list;
+ struct ov6211 *ov6211 = to_ov6211(sd);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(ov6211->dev);
+ if (ret)
+ return ret;
+
+ /* Skip a step of explicit entering into the standby mode */
+ ret = cci_write(ov6211->regmap, OV6211_REG_SOFTWARE_RST,
+ OV6211_SOFTWARE_RST, NULL);
+ if (ret) {
+ dev_err(ov6211->dev, "failed to software reset: %d\n", ret);
+ goto error;
+ }
+
+ ret = cci_multi_reg_write(ov6211->regmap, reg_list->regs,
+ reg_list->num_regs, NULL);
+ if (ret) {
+ dev_err(ov6211->dev, "failed to set mode: %d\n", ret);
+ goto error;
+ }
+
+ ret = __v4l2_ctrl_handler_setup(ov6211->sd.ctrl_handler);
+ if (ret)
+ goto error;
+
+ ret = cci_write(ov6211->regmap, OV6211_REG_MODE_SELECT,
+ OV6211_MODE_STREAMING, NULL);
+ if (ret) {
+ dev_err(ov6211->dev, "failed to start streaming: %d\n", ret);
+ goto error;
+ }
+
+ return 0;
+
+error:
+ pm_runtime_put_autosuspend(ov6211->dev);
+
+ return ret;
+}
+
+static int ov6211_disable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state, u32 pad,
+ u64 streams_mask)
+{
+ struct ov6211 *ov6211 = to_ov6211(sd);
+ int ret;
+
+ ret = cci_write(ov6211->regmap, OV6211_REG_MODE_SELECT,
+ OV6211_MODE_STANDBY, NULL);
+ if (ret)
+ dev_err(ov6211->dev, "failed to stop streaming: %d\n", ret);
+
+ pm_runtime_put_autosuspend(ov6211->dev);
+
+ return ret;
+}
+
+static int ov6211_set_pad_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct v4l2_mbus_framefmt *format;
+ const struct ov6211_mode *mode;
+
+ format = v4l2_subdev_state_get_format(state, 0);
+
+ mode = v4l2_find_nearest_size(supported_modes,
+ ARRAY_SIZE(supported_modes),
+ width, height,
+ fmt->format.width,
+ fmt->format.height);
+
+ ov6211_update_pad_format(mode, &fmt->format);
+ *format = fmt->format;
+
+ return 0;
+}
+
+static int ov6211_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index > 0)
+ return -EINVAL;
+
+ code->code = MEDIA_BUS_FMT_Y8_1X8;
+
+ return 0;
+}
+
+static int ov6211_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ if (fse->index >= ARRAY_SIZE(supported_modes))
+ return -EINVAL;
+
+ if (fse->code != MEDIA_BUS_FMT_Y8_1X8)
+ return -EINVAL;
+
+ fse->min_width = supported_modes[fse->index].width;
+ fse->max_width = fse->min_width;
+ fse->min_height = supported_modes[fse->index].height;
+ fse->max_height = fse->min_height;
+
+ return 0;
+}
+
+static int ov6211_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state)
+{
+ struct v4l2_subdev_format fmt = {
+ .which = V4L2_SUBDEV_FORMAT_TRY,
+ .pad = 0,
+ .format = {
+ .code = MEDIA_BUS_FMT_Y8_1X8,
+ .width = supported_modes[0].width,
+ .height = supported_modes[0].height,
+ },
+ };
+
+ ov6211_set_pad_format(sd, state, &fmt);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops ov6211_video_ops = {
+ .s_stream = v4l2_subdev_s_stream_helper,
+};
+
+static const struct v4l2_subdev_pad_ops ov6211_pad_ops = {
+ .set_fmt = ov6211_set_pad_format,
+ .get_fmt = v4l2_subdev_get_fmt,
+ .enum_mbus_code = ov6211_enum_mbus_code,
+ .enum_frame_size = ov6211_enum_frame_size,
+ .enable_streams = ov6211_enable_streams,
+ .disable_streams = ov6211_disable_streams,
+};
+
+static const struct v4l2_subdev_ops ov6211_subdev_ops = {
+ .video = &ov6211_video_ops,
+ .pad = &ov6211_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops ov6211_internal_ops = {
+ .init_state = ov6211_init_state,
+};
+
+static const struct media_entity_operations ov6211_subdev_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static int ov6211_identify_sensor(struct ov6211 *ov6211)
+{
+ u64 val;
+ int ret;
+
+ ret = cci_read(ov6211->regmap, OV6211_REG_CHIP_ID, &val, NULL);
+ if (ret) {
+ dev_err(ov6211->dev, "failed to read chip id: %d\n", ret);
+ return ret;
+ }
+
+ if (val != OV6211_CHIP_ID) {
+ dev_err(ov6211->dev, "chip id mismatch: %x!=%llx\n",
+ OV6211_CHIP_ID, val);
+ return -ENODEV;
+ }
+
+ ret = cci_read(ov6211->regmap, OV6211_REG_PRE_ISP,
+ &ov6211->pre_isp, NULL);
+ if (ret)
+ dev_err(ov6211->dev, "failed to read pre_isp: %d\n", ret);
+
+ return ret;
+}
+
+static int ov6211_check_hwcfg(struct ov6211 *ov6211)
+{
+ struct fwnode_handle *fwnode = dev_fwnode(ov6211->dev), *ep;
+ struct v4l2_fwnode_endpoint bus_cfg = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY,
+ };
+ unsigned long freq_bitmap;
+ int ret;
+
+ if (!fwnode)
+ return -ENODEV;
+
+ ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
+ if (!ep)
+ return -EINVAL;
+
+ ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg);
+ fwnode_handle_put(ep);
+ if (ret)
+ return ret;
+
+ ret = v4l2_link_freq_to_bitmap(ov6211->dev, bus_cfg.link_frequencies,
+ bus_cfg.nr_of_link_frequencies,
+ ov6211_link_freq_menu,
+ ARRAY_SIZE(ov6211_link_freq_menu),
+ &freq_bitmap);
+
+ v4l2_fwnode_endpoint_free(&bus_cfg);
+
+ return ret;
+}
+
+static int ov6211_power_on(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct ov6211 *ov6211 = to_ov6211(sd);
+ int ret;
+
+ ret = regulator_bulk_enable(OV6211_NUM_SUPPLIES, ov6211->supplies);
+ if (ret)
+ return ret;
+
+ gpiod_set_value_cansleep(ov6211->reset_gpio, 0);
+ usleep_range(10 * USEC_PER_MSEC, 15 * USEC_PER_MSEC);
+
+ ret = clk_prepare_enable(ov6211->xvclk);
+ if (ret)
+ goto reset_gpio;
+
+ return 0;
+
+reset_gpio:
+ gpiod_set_value_cansleep(ov6211->reset_gpio, 1);
+
+ regulator_bulk_disable(OV6211_NUM_SUPPLIES, ov6211->supplies);
+
+ return ret;
+}
+
+static int ov6211_power_off(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct ov6211 *ov6211 = to_ov6211(sd);
+
+ clk_disable_unprepare(ov6211->xvclk);
+
+ gpiod_set_value_cansleep(ov6211->reset_gpio, 1);
+
+ regulator_bulk_disable(OV6211_NUM_SUPPLIES, ov6211->supplies);
+
+ return 0;
+}
+
+static int ov6211_probe(struct i2c_client *client)
+{
+ struct ov6211 *ov6211;
+ unsigned long freq;
+ unsigned int i;
+ int ret;
+
+ ov6211 = devm_kzalloc(&client->dev, sizeof(*ov6211), GFP_KERNEL);
+ if (!ov6211)
+ return -ENOMEM;
+
+ ov6211->dev = &client->dev;
+
+ v4l2_i2c_subdev_init(&ov6211->sd, client, &ov6211_subdev_ops);
+
+ ov6211->regmap = devm_cci_regmap_init_i2c(client, 16);
+ if (IS_ERR(ov6211->regmap))
+ return dev_err_probe(ov6211->dev, PTR_ERR(ov6211->regmap),
+ "failed to init CCI\n");
+
+ ov6211->xvclk = devm_v4l2_sensor_clk_get(ov6211->dev, NULL);
+ if (IS_ERR(ov6211->xvclk))
+ return dev_err_probe(ov6211->dev, PTR_ERR(ov6211->xvclk),
+ "failed to get XVCLK clock\n");
+
+ freq = clk_get_rate(ov6211->xvclk);
+ if (freq && freq != OV6211_MCLK_FREQ_24MHZ)
+ return dev_err_probe(ov6211->dev, -EINVAL,
+ "XVCLK clock frequency %lu is not supported\n",
+ freq);
+
+ ret = ov6211_check_hwcfg(ov6211);
+ if (ret)
+ return dev_err_probe(ov6211->dev, ret,
+ "failed to check HW configuration\n");
+
+ ov6211->reset_gpio = devm_gpiod_get_optional(ov6211->dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(ov6211->reset_gpio))
+ return dev_err_probe(ov6211->dev, PTR_ERR(ov6211->reset_gpio),
+ "cannot get reset GPIO\n");
+
+ for (i = 0; i < OV6211_NUM_SUPPLIES; i++)
+ ov6211->supplies[i].supply = ov6211_supply_names[i];
+
+ ret = devm_regulator_bulk_get(ov6211->dev, OV6211_NUM_SUPPLIES,
+ ov6211->supplies);
+ if (ret)
+ return dev_err_probe(ov6211->dev, ret,
+ "failed to get supply regulators\n");
+
+ /* The sensor must be powered on to read the CHIP_ID register */
+ ret = ov6211_power_on(ov6211->dev);
+ if (ret)
+ return ret;
+
+ ret = ov6211_identify_sensor(ov6211);
+ if (ret) {
+ dev_err_probe(ov6211->dev, ret, "failed to find sensor\n");
+ goto power_off;
+ }
+
+ ret = ov6211_init_controls(ov6211);
+ if (ret) {
+ dev_err_probe(ov6211->dev, ret, "failed to init controls\n");
+ goto power_off;
+ }
+
+ ov6211->sd.state_lock = ov6211->ctrl_handler.lock;
+ ov6211->sd.internal_ops = &ov6211_internal_ops;
+ ov6211->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ ov6211->sd.entity.ops = &ov6211_subdev_entity_ops;
+ ov6211->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ ov6211->pad.flags = MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_pads_init(&ov6211->sd.entity, 1, &ov6211->pad);
+ if (ret) {
+ dev_err_probe(ov6211->dev, ret,
+ "failed to init media entity pads\n");
+ goto v4l2_ctrl_handler_free;
+ }
+
+ ret = v4l2_subdev_init_finalize(&ov6211->sd);
+ if (ret < 0) {
+ dev_err_probe(ov6211->dev, ret,
+ "failed to init media entity pads\n");
+ goto media_entity_cleanup;
+ }
+
+ pm_runtime_set_active(ov6211->dev);
+ pm_runtime_enable(ov6211->dev);
+
+ ret = v4l2_async_register_subdev_sensor(&ov6211->sd);
+ if (ret < 0) {
+ dev_err_probe(ov6211->dev, ret,
+ "failed to register V4L2 subdev\n");
+ goto subdev_cleanup;
+ }
+
+ /* Enable runtime PM and turn off the device */
+ pm_runtime_idle(ov6211->dev);
+ pm_runtime_set_autosuspend_delay(ov6211->dev, 1000);
+ pm_runtime_use_autosuspend(ov6211->dev);
+
+ return 0;
+
+subdev_cleanup:
+ v4l2_subdev_cleanup(&ov6211->sd);
+ pm_runtime_disable(ov6211->dev);
+ pm_runtime_set_suspended(ov6211->dev);
+
+media_entity_cleanup:
+ media_entity_cleanup(&ov6211->sd.entity);
+
+v4l2_ctrl_handler_free:
+ v4l2_ctrl_handler_free(ov6211->sd.ctrl_handler);
+
+power_off:
+ ov6211_power_off(ov6211->dev);
+
+ return ret;
+}
+
+static void ov6211_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov6211 *ov6211 = to_ov6211(sd);
+
+ v4l2_async_unregister_subdev(sd);
+ v4l2_subdev_cleanup(sd);
+ media_entity_cleanup(&sd->entity);
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+ pm_runtime_disable(ov6211->dev);
+
+ if (!pm_runtime_status_suspended(ov6211->dev)) {
+ ov6211_power_off(ov6211->dev);
+ pm_runtime_set_suspended(ov6211->dev);
+ }
+}
+
+static const struct dev_pm_ops ov6211_pm_ops = {
+ SET_RUNTIME_PM_OPS(ov6211_power_off, ov6211_power_on, NULL)
+};
+
+static const struct of_device_id ov6211_of_match[] = {
+ { .compatible = "ovti,ov6211" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ov6211_of_match);
+
+static struct i2c_driver ov6211_i2c_driver = {
+ .driver = {
+ .name = "ov6211",
+ .pm = &ov6211_pm_ops,
+ .of_match_table = ov6211_of_match,
+ },
+ .probe = ov6211_probe,
+ .remove = ov6211_remove,
+};
+
+module_i2c_driver(ov6211_i2c_driver);
+
+MODULE_AUTHOR("Vladimir Zapolskiy <vladimir.zapolskiy@linaro.org>");
+MODULE_DESCRIPTION("OmniVision OV6211 sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/i2c/ov64a40.c b/drivers/media/i2c/ov64a40.c
index a5da4fe47e0b..78b62c169b99 100644
--- a/drivers/media/i2c/ov64a40.c
+++ b/drivers/media/i2c/ov64a40.c
@@ -2990,7 +2990,6 @@ static int ov64a40_start_streaming(struct ov64a40 *ov64a40,
return 0;
error_power_off:
- pm_runtime_mark_last_busy(ov64a40->dev);
pm_runtime_put_autosuspend(ov64a40->dev);
return ret;
@@ -3000,7 +2999,6 @@ static int ov64a40_stop_streaming(struct ov64a40 *ov64a40,
struct v4l2_subdev_state *state)
{
cci_update_bits(ov64a40->cci, OV64A40_REG_SMIA, BIT(0), 0, NULL);
- pm_runtime_mark_last_busy(ov64a40->dev);
pm_runtime_put_autosuspend(ov64a40->dev);
__v4l2_ctrl_grab(ov64a40->link_freq, false);
@@ -3329,10 +3327,8 @@ static int ov64a40_set_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- if (pm_status > 0) {
- pm_runtime_mark_last_busy(ov64a40->dev);
+ if (pm_status > 0)
pm_runtime_put_autosuspend(ov64a40->dev);
- }
return ret;
}
@@ -3550,7 +3546,7 @@ static int ov64a40_probe(struct i2c_client *client)
return PTR_ERR(ov64a40->cci);
}
- ov64a40->xclk = devm_clk_get(&client->dev, NULL);
+ ov64a40->xclk = devm_v4l2_sensor_clk_get(&client->dev, NULL);
if (IS_ERR(ov64a40->xclk))
return dev_err_probe(&client->dev, PTR_ERR(ov64a40->xclk),
"Failed to get clock\n");
@@ -3622,7 +3618,6 @@ static int ov64a40_probe(struct i2c_client *client)
goto error_subdev_cleanup;
}
- pm_runtime_mark_last_busy(&client->dev);
pm_runtime_put_autosuspend(&client->dev);
return 0;
diff --git a/drivers/media/i2c/ov6650.c b/drivers/media/i2c/ov6650.c
deleted file mode 100644
index 9c7627161142..000000000000
--- a/drivers/media/i2c/ov6650.c
+++ /dev/null
@@ -1,1149 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * V4L2 subdevice driver for OmniVision OV6650 Camera Sensor
- *
- * Copyright (C) 2010 Janusz Krzysztofik <jkrzyszt@tis.icnet.pl>
- *
- * Based on OmniVision OV96xx Camera Driver
- * Copyright (C) 2009 Marek Vasut <marek.vasut@gmail.com>
- *
- * Based on ov772x camera driver:
- * Copyright (C) 2008 Renesas Solutions Corp.
- * Kuninori Morimoto <morimoto.kuninori@renesas.com>
- *
- * Based on ov7670 and soc_camera_platform driver,
- * Copyright 2006-7 Jonathan Corbet <corbet@lwn.net>
- * Copyright (C) 2008 Magnus Damm
- * Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de>
- *
- * Hardware specific bits initially based on former work by Matt Callow
- * drivers/media/video/omap/sensor_ov6650.c
- * Copyright (C) 2006 Matt Callow
- */
-
-#include <linux/bitops.h>
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/i2c.h>
-#include <linux/slab.h>
-#include <linux/v4l2-mediabus.h>
-#include <linux/module.h>
-
-#include <media/v4l2-ctrls.h>
-#include <media/v4l2-device.h>
-
-/* Register definitions */
-#define REG_GAIN 0x00 /* range 00 - 3F */
-#define REG_BLUE 0x01
-#define REG_RED 0x02
-#define REG_SAT 0x03 /* [7:4] saturation [0:3] reserved */
-#define REG_HUE 0x04 /* [7:6] rsrvd [5] hue en [4:0] hue */
-
-#define REG_BRT 0x06
-
-#define REG_PIDH 0x0a
-#define REG_PIDL 0x0b
-
-#define REG_AECH 0x10
-#define REG_CLKRC 0x11 /* Data Format and Internal Clock */
- /* [7:6] Input system clock (MHz)*/
- /* 00=8, 01=12, 10=16, 11=24 */
- /* [5:0]: Internal Clock Pre-Scaler */
-#define REG_COMA 0x12 /* [7] Reset */
-#define REG_COMB 0x13
-#define REG_COMC 0x14
-#define REG_COMD 0x15
-#define REG_COML 0x16
-#define REG_HSTRT 0x17
-#define REG_HSTOP 0x18
-#define REG_VSTRT 0x19
-#define REG_VSTOP 0x1a
-#define REG_PSHFT 0x1b
-#define REG_MIDH 0x1c
-#define REG_MIDL 0x1d
-#define REG_HSYNS 0x1e
-#define REG_HSYNE 0x1f
-#define REG_COME 0x20
-#define REG_YOFF 0x21
-#define REG_UOFF 0x22
-#define REG_VOFF 0x23
-#define REG_AEW 0x24
-#define REG_AEB 0x25
-#define REG_COMF 0x26
-#define REG_COMG 0x27
-#define REG_COMH 0x28
-#define REG_COMI 0x29
-
-#define REG_FRARL 0x2b
-#define REG_COMJ 0x2c
-#define REG_COMK 0x2d
-#define REG_AVGY 0x2e
-#define REG_REF0 0x2f
-#define REG_REF1 0x30
-#define REG_REF2 0x31
-#define REG_FRAJH 0x32
-#define REG_FRAJL 0x33
-#define REG_FACT 0x34
-#define REG_L1AEC 0x35
-#define REG_AVGU 0x36
-#define REG_AVGV 0x37
-
-#define REG_SPCB 0x60
-#define REG_SPCC 0x61
-#define REG_GAM1 0x62
-#define REG_GAM2 0x63
-#define REG_GAM3 0x64
-#define REG_SPCD 0x65
-
-#define REG_SPCE 0x68
-#define REG_ADCL 0x69
-
-#define REG_RMCO 0x6c
-#define REG_GMCO 0x6d
-#define REG_BMCO 0x6e
-
-
-/* Register bits, values, etc. */
-#define OV6650_PIDH 0x66 /* high byte of product ID number */
-#define OV6650_PIDL 0x50 /* low byte of product ID number */
-#define OV6650_MIDH 0x7F /* high byte of mfg ID */
-#define OV6650_MIDL 0xA2 /* low byte of mfg ID */
-
-#define DEF_GAIN 0x00
-#define DEF_BLUE 0x80
-#define DEF_RED 0x80
-
-#define SAT_SHIFT 4
-#define SAT_MASK (0xf << SAT_SHIFT)
-#define SET_SAT(x) (((x) << SAT_SHIFT) & SAT_MASK)
-
-#define HUE_EN BIT(5)
-#define HUE_MASK 0x1f
-#define DEF_HUE 0x10
-#define SET_HUE(x) (HUE_EN | ((x) & HUE_MASK))
-
-#define DEF_AECH 0x4D
-
-#define CLKRC_8MHz 0x00
-#define CLKRC_12MHz 0x40
-#define CLKRC_16MHz 0x80
-#define CLKRC_24MHz 0xc0
-#define CLKRC_DIV_MASK 0x3f
-#define GET_CLKRC_DIV(x) (((x) & CLKRC_DIV_MASK) + 1)
-#define DEF_CLKRC 0x00
-
-#define COMA_RESET BIT(7)
-#define COMA_QCIF BIT(5)
-#define COMA_RAW_RGB BIT(4)
-#define COMA_RGB BIT(3)
-#define COMA_BW BIT(2)
-#define COMA_WORD_SWAP BIT(1)
-#define COMA_BYTE_SWAP BIT(0)
-#define DEF_COMA 0x00
-
-#define COMB_FLIP_V BIT(7)
-#define COMB_FLIP_H BIT(5)
-#define COMB_BAND_FILTER BIT(4)
-#define COMB_AWB BIT(2)
-#define COMB_AGC BIT(1)
-#define COMB_AEC BIT(0)
-#define DEF_COMB 0x5f
-
-#define COML_ONE_CHANNEL BIT(7)
-
-#define DEF_HSTRT 0x24
-#define DEF_HSTOP 0xd4
-#define DEF_VSTRT 0x04
-#define DEF_VSTOP 0x94
-
-#define COMF_HREF_LOW BIT(4)
-
-#define COMJ_PCLK_RISING BIT(4)
-#define COMJ_VSYNC_HIGH BIT(0)
-
-/* supported resolutions */
-#define W_QCIF (DEF_HSTOP - DEF_HSTRT)
-#define W_CIF (W_QCIF << 1)
-#define H_QCIF (DEF_VSTOP - DEF_VSTRT)
-#define H_CIF (H_QCIF << 1)
-
-#define FRAME_RATE_MAX 30
-
-
-struct ov6650_reg {
- u8 reg;
- u8 val;
-};
-
-struct ov6650 {
- struct v4l2_subdev subdev;
- struct v4l2_ctrl_handler hdl;
- struct {
- /* exposure/autoexposure cluster */
- struct v4l2_ctrl *autoexposure;
- struct v4l2_ctrl *exposure;
- };
- struct {
- /* gain/autogain cluster */
- struct v4l2_ctrl *autogain;
- struct v4l2_ctrl *gain;
- };
- struct {
- /* blue/red/autowhitebalance cluster */
- struct v4l2_ctrl *autowb;
- struct v4l2_ctrl *blue;
- struct v4l2_ctrl *red;
- };
- struct clk *clk;
- bool half_scale; /* scale down output by 2 */
- struct v4l2_rect rect; /* sensor cropping window */
- struct v4l2_fract tpf; /* as requested with set_frame_interval */
- u32 code;
-};
-
-struct ov6650_xclk {
- unsigned long rate;
- u8 clkrc;
-};
-
-static const struct ov6650_xclk ov6650_xclk[] = {
-{
- .rate = 8000000,
- .clkrc = CLKRC_8MHz,
-},
-{
- .rate = 12000000,
- .clkrc = CLKRC_12MHz,
-},
-{
- .rate = 16000000,
- .clkrc = CLKRC_16MHz,
-},
-{
- .rate = 24000000,
- .clkrc = CLKRC_24MHz,
-},
-};
-
-static u32 ov6650_codes[] = {
- MEDIA_BUS_FMT_YUYV8_2X8,
- MEDIA_BUS_FMT_UYVY8_2X8,
- MEDIA_BUS_FMT_YVYU8_2X8,
- MEDIA_BUS_FMT_VYUY8_2X8,
- MEDIA_BUS_FMT_SBGGR8_1X8,
- MEDIA_BUS_FMT_Y8_1X8,
-};
-
-static const struct v4l2_mbus_framefmt ov6650_def_fmt = {
- .width = W_CIF,
- .height = H_CIF,
- .code = MEDIA_BUS_FMT_SBGGR8_1X8,
- .colorspace = V4L2_COLORSPACE_SRGB,
- .field = V4L2_FIELD_NONE,
- .ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT,
- .quantization = V4L2_QUANTIZATION_DEFAULT,
- .xfer_func = V4L2_XFER_FUNC_DEFAULT,
-};
-
-/* read a register */
-static int ov6650_reg_read(struct i2c_client *client, u8 reg, u8 *val)
-{
- int ret;
- u8 data = reg;
- struct i2c_msg msg = {
- .addr = client->addr,
- .flags = 0,
- .len = 1,
- .buf = &data,
- };
-
- ret = i2c_transfer(client->adapter, &msg, 1);
- if (ret < 0)
- goto err;
-
- msg.flags = I2C_M_RD;
- ret = i2c_transfer(client->adapter, &msg, 1);
- if (ret < 0)
- goto err;
-
- *val = data;
- return 0;
-
-err:
- dev_err(&client->dev, "Failed reading register 0x%02x!\n", reg);
- return ret;
-}
-
-/* write a register */
-static int ov6650_reg_write(struct i2c_client *client, u8 reg, u8 val)
-{
- int ret;
- unsigned char data[2] = { reg, val };
- struct i2c_msg msg = {
- .addr = client->addr,
- .flags = 0,
- .len = 2,
- .buf = data,
- };
-
- ret = i2c_transfer(client->adapter, &msg, 1);
- udelay(100);
-
- if (ret < 0) {
- dev_err(&client->dev, "Failed writing register 0x%02x!\n", reg);
- return ret;
- }
- return 0;
-}
-
-
-/* Read a register, alter its bits, write it back */
-static int ov6650_reg_rmw(struct i2c_client *client, u8 reg, u8 set, u8 mask)
-{
- u8 val;
- int ret;
-
- ret = ov6650_reg_read(client, reg, &val);
- if (ret) {
- dev_err(&client->dev,
- "[Read]-Modify-Write of register 0x%02x failed!\n",
- reg);
- return ret;
- }
-
- val &= ~mask;
- val |= set;
-
- ret = ov6650_reg_write(client, reg, val);
- if (ret)
- dev_err(&client->dev,
- "Read-Modify-[Write] of register 0x%02x failed!\n",
- reg);
-
- return ret;
-}
-
-static struct ov6650 *to_ov6650(const struct i2c_client *client)
-{
- return container_of(i2c_get_clientdata(client), struct ov6650, subdev);
-}
-
-/* Start/Stop streaming from the device */
-static int ov6650_s_stream(struct v4l2_subdev *sd, int enable)
-{
- return 0;
-}
-
-/* Get status of additional camera capabilities */
-static int ov6550_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct ov6650 *priv = container_of(ctrl->handler, struct ov6650, hdl);
- struct v4l2_subdev *sd = &priv->subdev;
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- uint8_t reg, reg2;
- int ret;
-
- switch (ctrl->id) {
- case V4L2_CID_AUTOGAIN:
- ret = ov6650_reg_read(client, REG_GAIN, &reg);
- if (!ret)
- priv->gain->val = reg;
- return ret;
- case V4L2_CID_AUTO_WHITE_BALANCE:
- ret = ov6650_reg_read(client, REG_BLUE, &reg);
- if (!ret)
- ret = ov6650_reg_read(client, REG_RED, &reg2);
- if (!ret) {
- priv->blue->val = reg;
- priv->red->val = reg2;
- }
- return ret;
- case V4L2_CID_EXPOSURE_AUTO:
- ret = ov6650_reg_read(client, REG_AECH, &reg);
- if (!ret)
- priv->exposure->val = reg;
- return ret;
- }
- return -EINVAL;
-}
-
-/* Set status of additional camera capabilities */
-static int ov6550_s_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct ov6650 *priv = container_of(ctrl->handler, struct ov6650, hdl);
- struct v4l2_subdev *sd = &priv->subdev;
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
-
- switch (ctrl->id) {
- case V4L2_CID_AUTOGAIN:
- ret = ov6650_reg_rmw(client, REG_COMB,
- ctrl->val ? COMB_AGC : 0, COMB_AGC);
- if (!ret && !ctrl->val)
- ret = ov6650_reg_write(client, REG_GAIN, priv->gain->val);
- return ret;
- case V4L2_CID_AUTO_WHITE_BALANCE:
- ret = ov6650_reg_rmw(client, REG_COMB,
- ctrl->val ? COMB_AWB : 0, COMB_AWB);
- if (!ret && !ctrl->val) {
- ret = ov6650_reg_write(client, REG_BLUE, priv->blue->val);
- if (!ret)
- ret = ov6650_reg_write(client, REG_RED,
- priv->red->val);
- }
- return ret;
- case V4L2_CID_SATURATION:
- return ov6650_reg_rmw(client, REG_SAT, SET_SAT(ctrl->val),
- SAT_MASK);
- case V4L2_CID_HUE:
- return ov6650_reg_rmw(client, REG_HUE, SET_HUE(ctrl->val),
- HUE_MASK);
- case V4L2_CID_BRIGHTNESS:
- return ov6650_reg_write(client, REG_BRT, ctrl->val);
- case V4L2_CID_EXPOSURE_AUTO:
- ret = ov6650_reg_rmw(client, REG_COMB, ctrl->val ==
- V4L2_EXPOSURE_AUTO ? COMB_AEC : 0, COMB_AEC);
- if (!ret && ctrl->val == V4L2_EXPOSURE_MANUAL)
- ret = ov6650_reg_write(client, REG_AECH,
- priv->exposure->val);
- return ret;
- case V4L2_CID_GAMMA:
- return ov6650_reg_write(client, REG_GAM1, ctrl->val);
- case V4L2_CID_VFLIP:
- return ov6650_reg_rmw(client, REG_COMB,
- ctrl->val ? COMB_FLIP_V : 0, COMB_FLIP_V);
- case V4L2_CID_HFLIP:
- return ov6650_reg_rmw(client, REG_COMB,
- ctrl->val ? COMB_FLIP_H : 0, COMB_FLIP_H);
- }
-
- return -EINVAL;
-}
-
-#ifdef CONFIG_VIDEO_ADV_DEBUG
-static int ov6650_get_register(struct v4l2_subdev *sd,
- struct v4l2_dbg_register *reg)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
- u8 val;
-
- if (reg->reg & ~0xff)
- return -EINVAL;
-
- reg->size = 1;
-
- ret = ov6650_reg_read(client, reg->reg, &val);
- if (!ret)
- reg->val = (__u64)val;
-
- return ret;
-}
-
-static int ov6650_set_register(struct v4l2_subdev *sd,
- const struct v4l2_dbg_register *reg)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
-
- if (reg->reg & ~0xff || reg->val & ~0xff)
- return -EINVAL;
-
- return ov6650_reg_write(client, reg->reg, reg->val);
-}
-#endif
-
-static int ov6650_s_power(struct v4l2_subdev *sd, int on)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct ov6650 *priv = to_ov6650(client);
- int ret = 0;
-
- if (on)
- ret = clk_prepare_enable(priv->clk);
- else
- clk_disable_unprepare(priv->clk);
-
- return ret;
-}
-
-static int ov6650_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_selection *sel)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct ov6650 *priv = to_ov6650(client);
- struct v4l2_rect *rect;
-
- if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
- /* pre-select try crop rectangle */
- rect = v4l2_subdev_state_get_crop(sd_state, 0);
-
- } else {
- /* pre-select active crop rectangle */
- rect = &priv->rect;
- }
-
- switch (sel->target) {
- case V4L2_SEL_TGT_CROP_BOUNDS:
- sel->r.left = DEF_HSTRT << 1;
- sel->r.top = DEF_VSTRT << 1;
- sel->r.width = W_CIF;
- sel->r.height = H_CIF;
- return 0;
-
- case V4L2_SEL_TGT_CROP:
- /* use selected crop rectangle */
- sel->r = *rect;
- return 0;
-
- default:
- return -EINVAL;
- }
-}
-
-static bool is_unscaled_ok(int width, int height, struct v4l2_rect *rect)
-{
- return width > rect->width >> 1 || height > rect->height >> 1;
-}
-
-static void ov6650_bind_align_crop_rectangle(struct v4l2_rect *rect)
-{
- v4l_bound_align_image(&rect->width, 2, W_CIF, 1,
- &rect->height, 2, H_CIF, 1, 0);
- v4l_bound_align_image(&rect->left, DEF_HSTRT << 1,
- (DEF_HSTRT << 1) + W_CIF - (__s32)rect->width, 1,
- &rect->top, DEF_VSTRT << 1,
- (DEF_VSTRT << 1) + H_CIF - (__s32)rect->height,
- 1, 0);
-}
-
-static int ov6650_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_selection *sel)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct ov6650 *priv = to_ov6650(client);
- int ret;
-
- if (sel->target != V4L2_SEL_TGT_CROP)
- return -EINVAL;
-
- ov6650_bind_align_crop_rectangle(&sel->r);
-
- if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
- struct v4l2_rect *crop =
- v4l2_subdev_state_get_crop(sd_state, 0);
- struct v4l2_mbus_framefmt *mf =
- v4l2_subdev_state_get_format(sd_state, 0);
- /* detect current pad config scaling factor */
- bool half_scale = !is_unscaled_ok(mf->width, mf->height, crop);
-
- /* store new crop rectangle */
- *crop = sel->r;
-
- /* adjust frame size */
- mf->width = crop->width >> half_scale;
- mf->height = crop->height >> half_scale;
-
- return 0;
- }
-
- /* V4L2_SUBDEV_FORMAT_ACTIVE */
-
- /* apply new crop rectangle */
- ret = ov6650_reg_write(client, REG_HSTRT, sel->r.left >> 1);
- if (!ret) {
- priv->rect.width += priv->rect.left - sel->r.left;
- priv->rect.left = sel->r.left;
- ret = ov6650_reg_write(client, REG_HSTOP,
- (sel->r.left + sel->r.width) >> 1);
- }
- if (!ret) {
- priv->rect.width = sel->r.width;
- ret = ov6650_reg_write(client, REG_VSTRT, sel->r.top >> 1);
- }
- if (!ret) {
- priv->rect.height += priv->rect.top - sel->r.top;
- priv->rect.top = sel->r.top;
- ret = ov6650_reg_write(client, REG_VSTOP,
- (sel->r.top + sel->r.height) >> 1);
- }
- if (!ret)
- priv->rect.height = sel->r.height;
-
- return ret;
-}
-
-static int ov6650_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_format *format)
-{
- struct v4l2_mbus_framefmt *mf = &format->format;
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct ov6650 *priv = to_ov6650(client);
-
- if (format->pad)
- return -EINVAL;
-
- /* initialize response with default media bus frame format */
- *mf = ov6650_def_fmt;
-
- /* update media bus format code and frame size */
- if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- struct v4l2_mbus_framefmt *try_fmt =
- v4l2_subdev_state_get_format(sd_state, 0);
-
- mf->width = try_fmt->width;
- mf->height = try_fmt->height;
- mf->code = try_fmt->code;
-
- } else {
- mf->width = priv->rect.width >> priv->half_scale;
- mf->height = priv->rect.height >> priv->half_scale;
- mf->code = priv->code;
- }
- return 0;
-}
-
-#define to_clkrc(div) ((div) - 1)
-
-/* set the format we will capture in */
-static int ov6650_s_fmt(struct v4l2_subdev *sd, u32 code, bool half_scale)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct ov6650 *priv = to_ov6650(client);
- u8 coma_set = 0, coma_mask = 0, coml_set, coml_mask;
- int ret;
-
- /* select color matrix configuration for given color encoding */
- switch (code) {
- case MEDIA_BUS_FMT_Y8_1X8:
- dev_dbg(&client->dev, "pixel format GREY8_1X8\n");
- coma_mask |= COMA_RGB | COMA_WORD_SWAP | COMA_BYTE_SWAP;
- coma_set |= COMA_BW;
- break;
- case MEDIA_BUS_FMT_YUYV8_2X8:
- dev_dbg(&client->dev, "pixel format YUYV8_2X8_LE\n");
- coma_mask |= COMA_RGB | COMA_BW | COMA_BYTE_SWAP;
- coma_set |= COMA_WORD_SWAP;
- break;
- case MEDIA_BUS_FMT_YVYU8_2X8:
- dev_dbg(&client->dev, "pixel format YVYU8_2X8_LE (untested)\n");
- coma_mask |= COMA_RGB | COMA_BW | COMA_WORD_SWAP |
- COMA_BYTE_SWAP;
- break;
- case MEDIA_BUS_FMT_UYVY8_2X8:
- dev_dbg(&client->dev, "pixel format YUYV8_2X8_BE\n");
- if (half_scale) {
- coma_mask |= COMA_RGB | COMA_BW | COMA_WORD_SWAP;
- coma_set |= COMA_BYTE_SWAP;
- } else {
- coma_mask |= COMA_RGB | COMA_BW;
- coma_set |= COMA_BYTE_SWAP | COMA_WORD_SWAP;
- }
- break;
- case MEDIA_BUS_FMT_VYUY8_2X8:
- dev_dbg(&client->dev, "pixel format YVYU8_2X8_BE (untested)\n");
- if (half_scale) {
- coma_mask |= COMA_RGB | COMA_BW;
- coma_set |= COMA_BYTE_SWAP | COMA_WORD_SWAP;
- } else {
- coma_mask |= COMA_RGB | COMA_BW | COMA_WORD_SWAP;
- coma_set |= COMA_BYTE_SWAP;
- }
- break;
- case MEDIA_BUS_FMT_SBGGR8_1X8:
- dev_dbg(&client->dev, "pixel format SBGGR8_1X8 (untested)\n");
- coma_mask |= COMA_BW | COMA_BYTE_SWAP | COMA_WORD_SWAP;
- coma_set |= COMA_RAW_RGB | COMA_RGB;
- break;
- default:
- dev_err(&client->dev, "Pixel format not handled: 0x%x\n", code);
- return -EINVAL;
- }
-
- if (code == MEDIA_BUS_FMT_Y8_1X8 ||
- code == MEDIA_BUS_FMT_SBGGR8_1X8) {
- coml_mask = COML_ONE_CHANNEL;
- coml_set = 0;
- } else {
- coml_mask = 0;
- coml_set = COML_ONE_CHANNEL;
- }
-
- if (half_scale) {
- dev_dbg(&client->dev, "max resolution: QCIF\n");
- coma_set |= COMA_QCIF;
- } else {
- dev_dbg(&client->dev, "max resolution: CIF\n");
- coma_mask |= COMA_QCIF;
- }
-
- ret = ov6650_reg_rmw(client, REG_COMA, coma_set, coma_mask);
- if (!ret) {
- priv->half_scale = half_scale;
-
- ret = ov6650_reg_rmw(client, REG_COML, coml_set, coml_mask);
- }
- if (!ret)
- priv->code = code;
-
- return ret;
-}
-
-static int ov6650_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_format *format)
-{
- struct v4l2_mbus_framefmt *mf = &format->format;
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct ov6650 *priv = to_ov6650(client);
- struct v4l2_rect *crop;
- bool half_scale;
-
- if (format->pad)
- return -EINVAL;
-
- switch (mf->code) {
- case MEDIA_BUS_FMT_Y10_1X10:
- mf->code = MEDIA_BUS_FMT_Y8_1X8;
- fallthrough;
- case MEDIA_BUS_FMT_Y8_1X8:
- case MEDIA_BUS_FMT_YVYU8_2X8:
- case MEDIA_BUS_FMT_YUYV8_2X8:
- case MEDIA_BUS_FMT_VYUY8_2X8:
- case MEDIA_BUS_FMT_UYVY8_2X8:
- break;
- default:
- mf->code = MEDIA_BUS_FMT_SBGGR8_1X8;
- fallthrough;
- case MEDIA_BUS_FMT_SBGGR8_1X8:
- break;
- }
-
- if (format->which == V4L2_SUBDEV_FORMAT_TRY)
- crop = v4l2_subdev_state_get_crop(sd_state, 0);
- else
- crop = &priv->rect;
-
- half_scale = !is_unscaled_ok(mf->width, mf->height, crop);
-
- if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- struct v4l2_mbus_framefmt *try_fmt =
- v4l2_subdev_state_get_format(sd_state, 0);
-
- /* store new mbus frame format code and size in pad config */
- try_fmt->width = crop->width >> half_scale;
- try_fmt->height = crop->height >> half_scale;
- try_fmt->code = mf->code;
-
- /* return default mbus frame format updated with pad config */
- *mf = ov6650_def_fmt;
- mf->width = try_fmt->width;
- mf->height = try_fmt->height;
- mf->code = try_fmt->code;
-
- } else {
- int ret = 0;
-
- /* apply new media bus frame format and scaling if changed */
- if (mf->code != priv->code || half_scale != priv->half_scale)
- ret = ov6650_s_fmt(sd, mf->code, half_scale);
- if (ret)
- return ret;
-
- /* return default format updated with active size and code */
- *mf = ov6650_def_fmt;
- mf->width = priv->rect.width >> priv->half_scale;
- mf->height = priv->rect.height >> priv->half_scale;
- mf->code = priv->code;
- }
- return 0;
-}
-
-static int ov6650_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_mbus_code_enum *code)
-{
- if (code->pad || code->index >= ARRAY_SIZE(ov6650_codes))
- return -EINVAL;
-
- code->code = ov6650_codes[code->index];
- return 0;
-}
-
-static int ov6650_enum_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_frame_interval_enum *fie)
-{
- int i;
-
- /* enumerate supported frame intervals not exceeding 1 second */
- if (fie->index > CLKRC_DIV_MASK ||
- GET_CLKRC_DIV(fie->index) > FRAME_RATE_MAX)
- return -EINVAL;
-
- for (i = 0; i < ARRAY_SIZE(ov6650_codes); i++)
- if (fie->code == ov6650_codes[i])
- break;
- if (i == ARRAY_SIZE(ov6650_codes))
- return -EINVAL;
-
- if (!fie->width || fie->width > W_CIF ||
- !fie->height || fie->height > H_CIF)
- return -EINVAL;
-
- fie->interval.numerator = GET_CLKRC_DIV(fie->index);
- fie->interval.denominator = FRAME_RATE_MAX;
-
- return 0;
-}
-
-static int ov6650_get_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_frame_interval *ival)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct ov6650 *priv = to_ov6650(client);
-
- /*
- * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
- * subdev active state API.
- */
- if (ival->which != V4L2_SUBDEV_FORMAT_ACTIVE)
- return -EINVAL;
-
- ival->interval = priv->tpf;
-
- dev_dbg(&client->dev, "Frame interval: %u/%u s\n",
- ival->interval.numerator, ival->interval.denominator);
-
- return 0;
-}
-
-static int ov6650_set_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_frame_interval *ival)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct ov6650 *priv = to_ov6650(client);
- struct v4l2_fract *tpf = &ival->interval;
- int div, ret;
-
- /*
- * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
- * subdev active state API.
- */
- if (ival->which != V4L2_SUBDEV_FORMAT_ACTIVE)
- return -EINVAL;
-
- if (tpf->numerator == 0 || tpf->denominator == 0)
- div = 1; /* Reset to full rate */
- else
- div = (tpf->numerator * FRAME_RATE_MAX) / tpf->denominator;
-
- if (div == 0)
- div = 1;
- else if (div > GET_CLKRC_DIV(CLKRC_DIV_MASK))
- div = GET_CLKRC_DIV(CLKRC_DIV_MASK);
-
- ret = ov6650_reg_rmw(client, REG_CLKRC, to_clkrc(div), CLKRC_DIV_MASK);
- if (!ret) {
- priv->tpf.numerator = div;
- priv->tpf.denominator = FRAME_RATE_MAX;
-
- *tpf = priv->tpf;
- }
-
- return ret;
-}
-
-/* Soft reset the camera. This has nothing to do with the RESET pin! */
-static int ov6650_reset(struct i2c_client *client)
-{
- int ret;
-
- dev_dbg(&client->dev, "reset\n");
-
- ret = ov6650_reg_rmw(client, REG_COMA, COMA_RESET, 0);
- if (ret)
- dev_err(&client->dev,
- "An error occurred while entering soft reset!\n");
-
- return ret;
-}
-
-/* program default register values */
-static int ov6650_prog_dflt(struct i2c_client *client, u8 clkrc)
-{
- int ret;
-
- dev_dbg(&client->dev, "initializing\n");
-
- ret = ov6650_reg_write(client, REG_COMA, 0); /* ~COMA_RESET */
- if (!ret)
- ret = ov6650_reg_write(client, REG_CLKRC, clkrc);
- if (!ret)
- ret = ov6650_reg_rmw(client, REG_COMB, 0, COMB_BAND_FILTER);
-
- return ret;
-}
-
-static int ov6650_video_probe(struct v4l2_subdev *sd)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct ov6650 *priv = to_ov6650(client);
- const struct ov6650_xclk *xclk = NULL;
- unsigned long rate;
- u8 pidh, pidl, midh, midl;
- int i, ret = 0;
-
- priv->clk = devm_clk_get(&client->dev, NULL);
- if (IS_ERR(priv->clk)) {
- ret = PTR_ERR(priv->clk);
- dev_err(&client->dev, "clk request err: %d\n", ret);
- return ret;
- }
-
- rate = clk_get_rate(priv->clk);
- for (i = 0; rate && i < ARRAY_SIZE(ov6650_xclk); i++) {
- if (rate != ov6650_xclk[i].rate)
- continue;
-
- xclk = &ov6650_xclk[i];
- dev_info(&client->dev, "using host default clock rate %lukHz\n",
- rate / 1000);
- break;
- }
- for (i = 0; !xclk && i < ARRAY_SIZE(ov6650_xclk); i++) {
- ret = clk_set_rate(priv->clk, ov6650_xclk[i].rate);
- if (ret || clk_get_rate(priv->clk) != ov6650_xclk[i].rate)
- continue;
-
- xclk = &ov6650_xclk[i];
- dev_info(&client->dev, "using negotiated clock rate %lukHz\n",
- xclk->rate / 1000);
- break;
- }
- if (!xclk) {
- dev_err(&client->dev, "unable to get supported clock rate\n");
- if (!ret)
- ret = -EINVAL;
- return ret;
- }
-
- ret = ov6650_s_power(sd, 1);
- if (ret < 0)
- return ret;
-
- msleep(20);
-
- /*
- * check and show product ID and manufacturer ID
- */
- ret = ov6650_reg_read(client, REG_PIDH, &pidh);
- if (!ret)
- ret = ov6650_reg_read(client, REG_PIDL, &pidl);
- if (!ret)
- ret = ov6650_reg_read(client, REG_MIDH, &midh);
- if (!ret)
- ret = ov6650_reg_read(client, REG_MIDL, &midl);
-
- if (ret)
- goto done;
-
- if ((pidh != OV6650_PIDH) || (pidl != OV6650_PIDL)) {
- dev_err(&client->dev, "Product ID error 0x%02x:0x%02x\n",
- pidh, pidl);
- ret = -ENODEV;
- goto done;
- }
-
- dev_info(&client->dev,
- "ov6650 Product ID 0x%02x:0x%02x Manufacturer ID 0x%02x:0x%02x\n",
- pidh, pidl, midh, midl);
-
- ret = ov6650_reset(client);
- if (!ret)
- ret = ov6650_prog_dflt(client, xclk->clkrc);
- if (!ret) {
- /* driver default frame format, no scaling */
- ret = ov6650_s_fmt(sd, ov6650_def_fmt.code, false);
- }
- if (!ret)
- ret = v4l2_ctrl_handler_setup(&priv->hdl);
-
-done:
- ov6650_s_power(sd, 0);
- return ret;
-}
-
-static const struct v4l2_ctrl_ops ov6550_ctrl_ops = {
- .g_volatile_ctrl = ov6550_g_volatile_ctrl,
- .s_ctrl = ov6550_s_ctrl,
-};
-
-static const struct v4l2_subdev_core_ops ov6650_core_ops = {
-#ifdef CONFIG_VIDEO_ADV_DEBUG
- .g_register = ov6650_get_register,
- .s_register = ov6650_set_register,
-#endif
- .s_power = ov6650_s_power,
-};
-
-/* Request bus settings on camera side */
-static int ov6650_get_mbus_config(struct v4l2_subdev *sd,
- unsigned int pad,
- struct v4l2_mbus_config *cfg)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- u8 comj, comf;
- int ret;
-
- ret = ov6650_reg_read(client, REG_COMJ, &comj);
- if (ret)
- return ret;
-
- ret = ov6650_reg_read(client, REG_COMF, &comf);
- if (ret)
- return ret;
-
- cfg->type = V4L2_MBUS_PARALLEL;
-
- cfg->bus.parallel.flags = V4L2_MBUS_MASTER | V4L2_MBUS_DATA_ACTIVE_HIGH
- | ((comj & COMJ_VSYNC_HIGH) ? V4L2_MBUS_VSYNC_ACTIVE_HIGH
- : V4L2_MBUS_VSYNC_ACTIVE_LOW)
- | ((comf & COMF_HREF_LOW) ? V4L2_MBUS_HSYNC_ACTIVE_LOW
- : V4L2_MBUS_HSYNC_ACTIVE_HIGH)
- | ((comj & COMJ_PCLK_RISING) ? V4L2_MBUS_PCLK_SAMPLE_RISING
- : V4L2_MBUS_PCLK_SAMPLE_FALLING);
- return 0;
-}
-
-static const struct v4l2_subdev_video_ops ov6650_video_ops = {
- .s_stream = ov6650_s_stream,
-};
-
-static const struct v4l2_subdev_pad_ops ov6650_pad_ops = {
- .enum_mbus_code = ov6650_enum_mbus_code,
- .enum_frame_interval = ov6650_enum_frame_interval,
- .get_selection = ov6650_get_selection,
- .set_selection = ov6650_set_selection,
- .get_fmt = ov6650_get_fmt,
- .set_fmt = ov6650_set_fmt,
- .get_frame_interval = ov6650_get_frame_interval,
- .set_frame_interval = ov6650_set_frame_interval,
- .get_mbus_config = ov6650_get_mbus_config,
-};
-
-static const struct v4l2_subdev_ops ov6650_subdev_ops = {
- .core = &ov6650_core_ops,
- .video = &ov6650_video_ops,
- .pad = &ov6650_pad_ops,
-};
-
-static const struct v4l2_subdev_internal_ops ov6650_internal_ops = {
- .registered = ov6650_video_probe,
-};
-
-/*
- * i2c_driver function
- */
-static int ov6650_probe(struct i2c_client *client)
-{
- struct ov6650 *priv;
- int ret;
-
- priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- v4l2_i2c_subdev_init(&priv->subdev, client, &ov6650_subdev_ops);
- v4l2_ctrl_handler_init(&priv->hdl, 13);
- v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops,
- V4L2_CID_VFLIP, 0, 1, 1, 0);
- v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops,
- V4L2_CID_HFLIP, 0, 1, 1, 0);
- priv->autogain = v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops,
- V4L2_CID_AUTOGAIN, 0, 1, 1, 1);
- priv->gain = v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops,
- V4L2_CID_GAIN, 0, 0x3f, 1, DEF_GAIN);
- priv->autowb = v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops,
- V4L2_CID_AUTO_WHITE_BALANCE, 0, 1, 1, 1);
- priv->blue = v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops,
- V4L2_CID_BLUE_BALANCE, 0, 0xff, 1, DEF_BLUE);
- priv->red = v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops,
- V4L2_CID_RED_BALANCE, 0, 0xff, 1, DEF_RED);
- v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops,
- V4L2_CID_SATURATION, 0, 0xf, 1, 0x8);
- v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops,
- V4L2_CID_HUE, 0, HUE_MASK, 1, DEF_HUE);
- v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops,
- V4L2_CID_BRIGHTNESS, 0, 0xff, 1, 0x80);
- priv->autoexposure = v4l2_ctrl_new_std_menu(&priv->hdl,
- &ov6550_ctrl_ops, V4L2_CID_EXPOSURE_AUTO,
- V4L2_EXPOSURE_MANUAL, 0, V4L2_EXPOSURE_AUTO);
- priv->exposure = v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops,
- V4L2_CID_EXPOSURE, 0, 0xff, 1, DEF_AECH);
- v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops,
- V4L2_CID_GAMMA, 0, 0xff, 1, 0x12);
-
- priv->subdev.ctrl_handler = &priv->hdl;
- if (priv->hdl.error) {
- ret = priv->hdl.error;
- goto ectlhdlfree;
- }
-
- v4l2_ctrl_auto_cluster(2, &priv->autogain, 0, true);
- v4l2_ctrl_auto_cluster(3, &priv->autowb, 0, true);
- v4l2_ctrl_auto_cluster(2, &priv->autoexposure,
- V4L2_EXPOSURE_MANUAL, true);
-
- priv->rect.left = DEF_HSTRT << 1;
- priv->rect.top = DEF_VSTRT << 1;
- priv->rect.width = W_CIF;
- priv->rect.height = H_CIF;
-
- /* Hardware default frame interval */
- priv->tpf.numerator = GET_CLKRC_DIV(DEF_CLKRC);
- priv->tpf.denominator = FRAME_RATE_MAX;
-
- priv->subdev.internal_ops = &ov6650_internal_ops;
-
- ret = v4l2_async_register_subdev(&priv->subdev);
- if (!ret)
- return 0;
-ectlhdlfree:
- v4l2_ctrl_handler_free(&priv->hdl);
-
- return ret;
-}
-
-static void ov6650_remove(struct i2c_client *client)
-{
- struct ov6650 *priv = to_ov6650(client);
-
- v4l2_async_unregister_subdev(&priv->subdev);
- v4l2_ctrl_handler_free(&priv->hdl);
-}
-
-static const struct i2c_device_id ov6650_id[] = {
- { "ov6650" },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, ov6650_id);
-
-static struct i2c_driver ov6650_i2c_driver = {
- .driver = {
- .name = "ov6650",
- },
- .probe = ov6650_probe,
- .remove = ov6650_remove,
- .id_table = ov6650_id,
-};
-
-module_i2c_driver(ov6650_i2c_driver);
-
-MODULE_DESCRIPTION("V4L2 subdevice driver for OmniVision OV6650 camera sensor");
-MODULE_AUTHOR("Janusz Krzysztofik <jmkrzyszt@gmail.com");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/ov7251.c b/drivers/media/i2c/ov7251.c
index 31a42d81e970..27afc3fc0175 100644
--- a/drivers/media/i2c/ov7251.c
+++ b/drivers/media/i2c/ov7251.c
@@ -1630,7 +1630,6 @@ static int ov7251_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct ov7251 *ov7251;
- unsigned int rate = 0, clk_rate = 0;
int ret;
int i;
@@ -1646,33 +1645,12 @@ static int ov7251_probe(struct i2c_client *client)
return ret;
/* get system clock (xclk) */
- ov7251->xclk = devm_clk_get_optional(dev, NULL);
+ ov7251->xclk = devm_v4l2_sensor_clk_get(dev, NULL);
if (IS_ERR(ov7251->xclk))
return dev_err_probe(dev, PTR_ERR(ov7251->xclk),
"could not get xclk");
- /*
- * We could have either a 24MHz or 19.2MHz clock rate from either DT or
- * ACPI. We also need to support the IPU3 case which will have both an
- * external clock AND a clock-frequency property.
- */
- ret = fwnode_property_read_u32(dev_fwnode(dev), "clock-frequency",
- &rate);
- if (ret && !ov7251->xclk)
- return dev_err_probe(dev, ret, "invalid clock config\n");
-
- clk_rate = clk_get_rate(ov7251->xclk);
- ov7251->xclk_freq = clk_rate ? clk_rate : rate;
-
- if (ov7251->xclk_freq == 0)
- return dev_err_probe(dev, -EINVAL, "invalid clock frequency\n");
-
- if (!ret && ov7251->xclk) {
- ret = clk_set_rate(ov7251->xclk, rate);
- if (ret)
- return dev_err_probe(dev, ret,
- "failed to set clock rate\n");
- }
+ ov7251->xclk_freq = clk_get_rate(ov7251->xclk);
for (i = 0; i < ARRAY_SIZE(supported_xclk_rates); i++)
if (ov7251->xclk_freq == supported_xclk_rates[i])
diff --git a/drivers/media/i2c/ov7740.c b/drivers/media/i2c/ov7740.c
index 1f1c0de8e510..632fb80469be 100644
--- a/drivers/media/i2c/ov7740.c
+++ b/drivers/media/i2c/ov7740.c
@@ -1036,13 +1036,10 @@ static int ov7740_probe(struct i2c_client *client)
if (!ov7740)
return -ENOMEM;
- ov7740->xvclk = devm_clk_get(&client->dev, "xvclk");
- if (IS_ERR(ov7740->xvclk)) {
- ret = PTR_ERR(ov7740->xvclk);
- dev_err(&client->dev,
- "OV7740: fail to get xvclk: %d\n", ret);
- return ret;
- }
+ ov7740->xvclk = devm_v4l2_sensor_clk_get(&client->dev, "xvclk");
+ if (IS_ERR(ov7740->xvclk))
+ return dev_err_probe(&client->dev, PTR_ERR(ov7740->xvclk),
+ "OV7740: fail to get xvclk\n");
ret = ov7740_probe_dt(client, ov7740);
if (ret)
diff --git a/drivers/media/i2c/ov8856.c b/drivers/media/i2c/ov8856.c
index 4b6874d2a104..e2998cfa0d18 100644
--- a/drivers/media/i2c/ov8856.c
+++ b/drivers/media/i2c/ov8856.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Intel Corporation.
-#include <linux/unaligned.h>
#include <linux/acpi.h>
#include <linux/clk.h>
#include <linux/delay.h>
@@ -10,6 +9,8 @@
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
+#include <linux/unaligned.h>
+
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fwnode.h>
@@ -1414,6 +1415,8 @@ static const struct ov8856_reg_list bayer_offset_configs[] = {
};
struct ov8856 {
+ struct device *dev;
+
struct v4l2_subdev sd;
struct media_pad pad;
struct v4l2_ctrl_handler ctrl_handler;
@@ -1668,7 +1671,6 @@ static int ov8856_write_reg(struct ov8856 *ov8856, u16 reg, u16 len, u32 val)
static int ov8856_write_reg_list(struct ov8856 *ov8856,
const struct ov8856_reg_list *r_list)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov8856->sd);
unsigned int i;
int ret;
@@ -1676,7 +1678,7 @@ static int ov8856_write_reg_list(struct ov8856 *ov8856,
ret = ov8856_write_reg(ov8856, r_list->regs[i].address, 1,
r_list->regs[i].val);
if (ret) {
- dev_err_ratelimited(&client->dev,
+ dev_err_ratelimited(ov8856->dev,
"failed to write reg 0x%4.4x. error = %d",
r_list->regs[i].address, ret);
return ret;
@@ -1688,7 +1690,6 @@ static int ov8856_write_reg_list(struct ov8856 *ov8856,
static int ov8856_identify_module(struct ov8856 *ov8856)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov8856->sd);
int ret;
u32 val;
@@ -1701,7 +1702,7 @@ static int ov8856_identify_module(struct ov8856 *ov8856)
return ret;
if (val != OV8856_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x!=%x",
+ dev_err(ov8856->dev, "chip id mismatch: %x!=%x",
OV8856_CHIP_ID, val);
return -ENXIO;
}
@@ -1818,7 +1819,6 @@ static int ov8856_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct ov8856 *ov8856 = container_of(ctrl->handler,
struct ov8856, ctrl_handler);
- struct i2c_client *client = v4l2_get_subdevdata(&ov8856->sd);
s64 exposure_max;
int ret = 0;
@@ -1834,7 +1834,7 @@ static int ov8856_set_ctrl(struct v4l2_ctrl *ctrl)
}
/* V4L2 controls values will be applied only when power is already up */
- if (!pm_runtime_get_if_in_use(&client->dev))
+ if (!pm_runtime_get_if_in_use(ov8856->dev))
return 0;
switch (ctrl->id) {
@@ -1876,7 +1876,7 @@ static int ov8856_set_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov8856->dev);
return ret;
}
@@ -1979,7 +1979,6 @@ static void ov8856_update_pad_format(struct ov8856 *ov8856,
static int ov8856_start_streaming(struct ov8856 *ov8856)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov8856->sd);
const struct ov8856_reg_list *reg_list;
int link_freq_index, ret;
@@ -1992,21 +1991,21 @@ static int ov8856_start_streaming(struct ov8856 *ov8856)
ret = ov8856_write_reg_list(ov8856, reg_list);
if (ret) {
- dev_err(&client->dev, "failed to set plls");
+ dev_err(ov8856->dev, "failed to set plls");
return ret;
}
reg_list = &ov8856->cur_mode->reg_list;
ret = ov8856_write_reg_list(ov8856, reg_list);
if (ret) {
- dev_err(&client->dev, "failed to set mode");
+ dev_err(ov8856->dev, "failed to set mode");
return ret;
}
reg_list = &bayer_offset_configs[ov8856->cur_mbus_index];
ret = ov8856_write_reg_list(ov8856, reg_list);
if (ret) {
- dev_err(&client->dev, "failed to set mbus format");
+ dev_err(ov8856->dev, "failed to set mbus format");
return ret;
}
@@ -2017,7 +2016,7 @@ static int ov8856_start_streaming(struct ov8856 *ov8856)
ret = ov8856_write_reg(ov8856, OV8856_REG_MODE_SELECT,
OV8856_REG_VALUE_08BIT, OV8856_MODE_STREAMING);
if (ret) {
- dev_err(&client->dev, "failed to set stream");
+ dev_err(ov8856->dev, "failed to set stream");
return ret;
}
@@ -2026,22 +2025,19 @@ static int ov8856_start_streaming(struct ov8856 *ov8856)
static void ov8856_stop_streaming(struct ov8856 *ov8856)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov8856->sd);
-
if (ov8856_write_reg(ov8856, OV8856_REG_MODE_SELECT,
OV8856_REG_VALUE_08BIT, OV8856_MODE_STANDBY))
- dev_err(&client->dev, "failed to set stream");
+ dev_err(ov8856->dev, "failed to set stream");
}
static int ov8856_set_stream(struct v4l2_subdev *sd, int enable)
{
struct ov8856 *ov8856 = to_ov8856(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret = 0;
mutex_lock(&ov8856->mutex);
if (enable) {
- ret = pm_runtime_resume_and_get(&client->dev);
+ ret = pm_runtime_resume_and_get(ov8856->dev);
if (ret < 0) {
mutex_unlock(&ov8856->mutex);
return ret;
@@ -2051,11 +2047,11 @@ static int ov8856_set_stream(struct v4l2_subdev *sd, int enable)
if (ret) {
enable = 0;
ov8856_stop_streaming(ov8856);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov8856->dev);
}
} else {
ov8856_stop_streaming(ov8856);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov8856->dev);
}
mutex_unlock(&ov8856->mutex);
@@ -2255,8 +2251,9 @@ static const struct v4l2_subdev_internal_ops ov8856_internal_ops = {
};
-static int ov8856_get_hwcfg(struct ov8856 *ov8856, struct device *dev)
+static int ov8856_get_hwcfg(struct ov8856 *ov8856)
{
+ struct device *dev = ov8856->dev;
struct fwnode_handle *ep;
struct fwnode_handle *fwnode = dev_fwnode(dev);
struct v4l2_fwnode_endpoint bus_cfg = {
@@ -2269,21 +2266,17 @@ static int ov8856_get_hwcfg(struct ov8856 *ov8856, struct device *dev)
if (!fwnode)
return -ENXIO;
- ret = fwnode_property_read_u32(fwnode, "clock-frequency", &xvclk_rate);
- if (ret)
- return ret;
-
- if (!is_acpi_node(fwnode)) {
- ov8856->xvclk = devm_clk_get(dev, "xvclk");
- if (IS_ERR(ov8856->xvclk)) {
- dev_err_probe(dev, PTR_ERR(ov8856->xvclk),
- "could not get xvclk clock\n");
- return PTR_ERR(ov8856->xvclk);
- }
+ ov8856->xvclk = devm_v4l2_sensor_clk_get_legacy(dev, "xvclk", false, 0);
+ if (IS_ERR(ov8856->xvclk))
+ return dev_err_probe(dev, PTR_ERR(ov8856->xvclk),
+ "could not get xvclk clock\n");
- clk_set_rate(ov8856->xvclk, xvclk_rate);
- xvclk_rate = clk_get_rate(ov8856->xvclk);
+ xvclk_rate = clk_get_rate(ov8856->xvclk);
+ if (xvclk_rate != OV8856_XVCLK_19_2)
+ dev_warn(dev, "external clock rate %u is unsupported",
+ xvclk_rate);
+ if (!is_acpi_node(fwnode)) {
ov8856->reset_gpio = devm_gpiod_get_optional(dev, "reset",
GPIOD_OUT_LOW);
if (IS_ERR(ov8856->reset_gpio))
@@ -2299,10 +2292,6 @@ static int ov8856_get_hwcfg(struct ov8856 *ov8856, struct device *dev)
return ret;
}
- if (xvclk_rate != OV8856_XVCLK_19_2)
- dev_warn(dev, "external clock rate %u is unsupported",
- xvclk_rate);
-
ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
if (!ep)
return -ENXIO;
@@ -2365,10 +2354,10 @@ static void ov8856_remove(struct i2c_client *client)
v4l2_async_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
v4l2_ctrl_handler_free(sd->ctrl_handler);
- pm_runtime_disable(&client->dev);
+ pm_runtime_disable(ov8856->dev);
mutex_destroy(&ov8856->mutex);
- ov8856_power_off(&client->dev);
+ ov8856_power_off(ov8856->dev);
}
static int ov8856_probe(struct i2c_client *client)
@@ -2381,23 +2370,25 @@ static int ov8856_probe(struct i2c_client *client)
if (!ov8856)
return -ENOMEM;
- ret = ov8856_get_hwcfg(ov8856, &client->dev);
+ ov8856->dev = &client->dev;
+
+ ret = ov8856_get_hwcfg(ov8856);
if (ret)
return ret;
v4l2_i2c_subdev_init(&ov8856->sd, client, &ov8856_subdev_ops);
- full_power = acpi_dev_state_d0(&client->dev);
+ full_power = acpi_dev_state_d0(ov8856->dev);
if (full_power) {
- ret = ov8856_power_on(&client->dev);
+ ret = ov8856_power_on(ov8856->dev);
if (ret) {
- dev_err(&client->dev, "failed to power on\n");
+ dev_err(ov8856->dev, "failed to power on\n");
return ret;
}
ret = ov8856_identify_module(ov8856);
if (ret) {
- dev_err(&client->dev, "failed to find sensor: %d", ret);
+ dev_err(ov8856->dev, "failed to find sensor: %d", ret);
goto probe_power_off;
}
}
@@ -2407,7 +2398,7 @@ static int ov8856_probe(struct i2c_client *client)
ov8856->cur_mbus_index = ov8856->cur_mode->default_mbus_index;
ret = ov8856_init_controls(ov8856);
if (ret) {
- dev_err(&client->dev, "failed to init controls: %d", ret);
+ dev_err(ov8856->dev, "failed to init controls: %d", ret);
goto probe_error_v4l2_ctrl_handler_free;
}
@@ -2418,22 +2409,22 @@ static int ov8856_probe(struct i2c_client *client)
ov8856->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&ov8856->sd.entity, 1, &ov8856->pad);
if (ret) {
- dev_err(&client->dev, "failed to init entity pads: %d", ret);
+ dev_err(ov8856->dev, "failed to init entity pads: %d", ret);
goto probe_error_v4l2_ctrl_handler_free;
}
ret = v4l2_async_register_subdev_sensor(&ov8856->sd);
if (ret < 0) {
- dev_err(&client->dev, "failed to register V4L2 subdev: %d",
+ dev_err(ov8856->dev, "failed to register V4L2 subdev: %d",
ret);
goto probe_error_media_entity_cleanup;
}
/* Set the device's state to active if it's in D0 state. */
if (full_power)
- pm_runtime_set_active(&client->dev);
- pm_runtime_enable(&client->dev);
- pm_runtime_idle(&client->dev);
+ pm_runtime_set_active(ov8856->dev);
+ pm_runtime_enable(ov8856->dev);
+ pm_runtime_idle(ov8856->dev);
return 0;
@@ -2445,7 +2436,7 @@ probe_error_v4l2_ctrl_handler_free:
mutex_destroy(&ov8856->mutex);
probe_power_off:
- ov8856_power_off(&client->dev);
+ ov8856_power_off(ov8856->dev);
return ret;
}
diff --git a/drivers/media/i2c/ov8858.c b/drivers/media/i2c/ov8858.c
index 95f9ae794846..3f45f7fab833 100644
--- a/drivers/media/i2c/ov8858.c
+++ b/drivers/media/i2c/ov8858.c
@@ -1391,7 +1391,6 @@ static int ov8858_s_stream(struct v4l2_subdev *sd, int on)
}
} else {
ov8858_stop_stream(ov8858);
- pm_runtime_mark_last_busy(&client->dev);
pm_runtime_put_autosuspend(&client->dev);
}
@@ -1877,7 +1876,7 @@ static int ov8858_probe(struct i2c_client *client)
if (!ov8858)
return -ENOMEM;
- ov8858->xvclk = devm_clk_get(dev, "xvclk");
+ ov8858->xvclk = devm_v4l2_sensor_clk_get(dev, "xvclk");
if (IS_ERR(ov8858->xvclk))
return dev_err_probe(dev, PTR_ERR(ov8858->xvclk),
"Failed to get xvclk\n");
@@ -1945,7 +1944,6 @@ static int ov8858_probe(struct i2c_client *client)
goto err_power_off;
}
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return 0;
diff --git a/drivers/media/i2c/ov8865.c b/drivers/media/i2c/ov8865.c
index a2138f7988aa..a8586df14f77 100644
--- a/drivers/media/i2c/ov8865.c
+++ b/drivers/media/i2c/ov8865.c
@@ -2304,14 +2304,6 @@ static int ov8865_state_configure(struct ov8865_sensor *sensor,
if (sensor->state.streaming)
return -EBUSY;
- /* State will be configured at first power on otherwise. */
- if (pm_runtime_enabled(sensor->dev) &&
- !pm_runtime_suspended(sensor->dev)) {
- ret = ov8865_mode_configure(sensor, mode, mbus_code);
- if (ret)
- return ret;
- }
-
ret = ov8865_state_mipi_configure(sensor, mode, mbus_code);
if (ret)
return ret;
@@ -2384,10 +2376,10 @@ static int ov8865_sensor_init(struct ov8865_sensor *sensor)
}
/* Configure current mode. */
- ret = ov8865_state_configure(sensor, sensor->state.mode,
- sensor->state.mbus_code);
+ ret = ov8865_mode_configure(sensor, sensor->state.mode,
+ sensor->state.mbus_code);
if (ret) {
- dev_err(sensor->dev, "failed to configure state\n");
+ dev_err(sensor->dev, "failed to configure mode\n");
return ret;
}
@@ -2956,7 +2948,6 @@ static int ov8865_probe(struct i2c_client *client)
struct ov8865_sensor *sensor;
struct v4l2_subdev *subdev;
struct media_pad *pad;
- unsigned int rate = 0;
unsigned int i;
int ret;
@@ -3020,39 +3011,14 @@ static int ov8865_probe(struct i2c_client *client)
/* External Clock */
- sensor->extclk = devm_clk_get(dev, NULL);
- if (PTR_ERR(sensor->extclk) == -ENOENT) {
- dev_info(dev, "no external clock found, continuing...\n");
- sensor->extclk = NULL;
- } else if (IS_ERR(sensor->extclk)) {
- dev_err(dev, "failed to get external clock\n");
- ret = PTR_ERR(sensor->extclk);
- goto error_endpoint;
- }
-
- /*
- * We could have either a 24MHz or 19.2MHz clock rate from either dt or
- * ACPI...but we also need to support the weird IPU3 case which will
- * have an external clock AND a clock-frequency property. Check for the
- * clock-frequency property and if found, set that rate if we managed
- * to acquire a clock. This should cover the ACPI case. If the system
- * uses devicetree then the configured rate should already be set, so
- * we can just read it.
- */
- ret = fwnode_property_read_u32(dev_fwnode(dev), "clock-frequency",
- &rate);
- if (!ret && sensor->extclk) {
- ret = clk_set_rate(sensor->extclk, rate);
- if (ret) {
- dev_err_probe(dev, ret, "failed to set clock rate\n");
- goto error_endpoint;
- }
- } else if (ret && !sensor->extclk) {
- dev_err_probe(dev, ret, "invalid clock config\n");
+ sensor->extclk = devm_v4l2_sensor_clk_get(dev, NULL);
+ if (IS_ERR(sensor->extclk)) {
+ ret = dev_err_probe(dev, PTR_ERR(sensor->extclk),
+ "failed to get external clock\n");
goto error_endpoint;
}
- sensor->extclk_rate = rate ? rate : clk_get_rate(sensor->extclk);
+ sensor->extclk_rate = clk_get_rate(sensor->extclk);
for (i = 0; i < ARRAY_SIZE(supported_extclk_rates); i++) {
if (sensor->extclk_rate == supported_extclk_rates[i])
diff --git a/drivers/media/i2c/ov9282.c b/drivers/media/i2c/ov9282.c
index c882a021cf18..a9f6176e9729 100644
--- a/drivers/media/i2c/ov9282.c
+++ b/drivers/media/i2c/ov9282.c
@@ -1135,11 +1135,10 @@ static int ov9282_parse_hw_config(struct ov9282 *ov9282)
}
/* Get sensor input clock */
- ov9282->inclk = devm_clk_get(ov9282->dev, NULL);
- if (IS_ERR(ov9282->inclk)) {
- dev_err(ov9282->dev, "could not get inclk");
- return PTR_ERR(ov9282->inclk);
- }
+ ov9282->inclk = devm_v4l2_sensor_clk_get(ov9282->dev, NULL);
+ if (IS_ERR(ov9282->inclk))
+ return dev_err_probe(ov9282->dev, PTR_ERR(ov9282->inclk),
+ "could not get inclk\n");
ret = ov9282_configure_regulators(ov9282);
if (ret)
diff --git a/drivers/media/i2c/ov9640.c b/drivers/media/i2c/ov9640.c
index 01dbc0ba89c8..2190c52b1433 100644
--- a/drivers/media/i2c/ov9640.c
+++ b/drivers/media/i2c/ov9640.c
@@ -718,9 +718,10 @@ static int ov9640_probe(struct i2c_client *client)
priv->subdev.ctrl_handler = &priv->hdl;
- priv->clk = devm_clk_get(&client->dev, "mclk");
+ priv->clk = devm_v4l2_sensor_clk_get(&client->dev, "mclk");
if (IS_ERR(priv->clk)) {
- ret = PTR_ERR(priv->clk);
+ ret = dev_err_probe(&client->dev, PTR_ERR(priv->clk),
+ "failed to get mclk\n");
goto ectrlinit;
}
diff --git a/drivers/media/i2c/ov9650.c b/drivers/media/i2c/ov9650.c
index 026ea34d6291..c94e8fe29f22 100644
--- a/drivers/media/i2c/ov9650.c
+++ b/drivers/media/i2c/ov9650.c
@@ -1494,9 +1494,10 @@ static int ov965x_probe(struct i2c_client *client)
}
if (dev_fwnode(&client->dev)) {
- ov965x->clk = devm_clk_get(&client->dev, NULL);
+ ov965x->clk = devm_v4l2_sensor_clk_get(&client->dev, NULL);
if (IS_ERR(ov965x->clk))
- return PTR_ERR(ov965x->clk);
+ return dev_err_probe(&client->dev, PTR_ERR(ov965x->clk),
+ "failed to get the clock\n");
ov965x->mclk_frequency = clk_get_rate(ov965x->clk);
ret = ov965x_configure_gpios(ov965x);
diff --git a/drivers/media/i2c/ov9734.c b/drivers/media/i2c/ov9734.c
index cae3aeefb616..0eaf33807fc9 100644
--- a/drivers/media/i2c/ov9734.c
+++ b/drivers/media/i2c/ov9734.c
@@ -1,12 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Intel Corporation.
-#include <linux/unaligned.h>
#include <linux/acpi.h>
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
+#include <linux/unaligned.h>
+
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fwnode.h>
@@ -321,6 +323,9 @@ static const struct ov9734_mode supported_modes[] = {
};
struct ov9734 {
+ struct device *dev;
+ struct clk *clk;
+
struct v4l2_subdev sd;
struct media_pad pad;
struct v4l2_ctrl_handler ctrl_handler;
@@ -414,7 +419,6 @@ static int ov9734_write_reg(struct ov9734 *ov9734, u16 reg, u16 len, u32 val)
static int ov9734_write_reg_list(struct ov9734 *ov9734,
const struct ov9734_reg_list *r_list)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov9734->sd);
unsigned int i;
int ret;
@@ -422,7 +426,7 @@ static int ov9734_write_reg_list(struct ov9734 *ov9734,
ret = ov9734_write_reg(ov9734, r_list->regs[i].address, 1,
r_list->regs[i].val);
if (ret) {
- dev_err_ratelimited(&client->dev,
+ dev_err_ratelimited(ov9734->dev,
"write reg 0x%4.4x return err = %d",
r_list->regs[i].address, ret);
return ret;
@@ -476,7 +480,6 @@ static int ov9734_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct ov9734 *ov9734 = container_of(ctrl->handler,
struct ov9734, ctrl_handler);
- struct i2c_client *client = v4l2_get_subdevdata(&ov9734->sd);
s64 exposure_max;
int ret = 0;
@@ -492,7 +495,7 @@ static int ov9734_set_ctrl(struct v4l2_ctrl *ctrl)
}
/* V4L2 controls values will be applied only when power is already up */
- if (!pm_runtime_get_if_in_use(&client->dev))
+ if (!pm_runtime_get_if_in_use(ov9734->dev))
return 0;
switch (ctrl->id) {
@@ -525,7 +528,7 @@ static int ov9734_set_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov9734->dev);
return ret;
}
@@ -610,7 +613,6 @@ static void ov9734_update_pad_format(const struct ov9734_mode *mode,
static int ov9734_start_streaming(struct ov9734 *ov9734)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov9734->sd);
const struct ov9734_reg_list *reg_list;
int link_freq_index, ret;
@@ -618,14 +620,14 @@ static int ov9734_start_streaming(struct ov9734 *ov9734)
reg_list = &link_freq_configs[link_freq_index].reg_list;
ret = ov9734_write_reg_list(ov9734, reg_list);
if (ret) {
- dev_err(&client->dev, "failed to set plls");
+ dev_err(ov9734->dev, "failed to set plls");
return ret;
}
reg_list = &ov9734->cur_mode->reg_list;
ret = ov9734_write_reg_list(ov9734, reg_list);
if (ret) {
- dev_err(&client->dev, "failed to set mode");
+ dev_err(ov9734->dev, "failed to set mode");
return ret;
}
@@ -636,30 +638,27 @@ static int ov9734_start_streaming(struct ov9734 *ov9734)
ret = ov9734_write_reg(ov9734, OV9734_REG_MODE_SELECT,
1, OV9734_MODE_STREAMING);
if (ret)
- dev_err(&client->dev, "failed to start stream");
+ dev_err(ov9734->dev, "failed to start stream");
return ret;
}
static void ov9734_stop_streaming(struct ov9734 *ov9734)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov9734->sd);
-
if (ov9734_write_reg(ov9734, OV9734_REG_MODE_SELECT,
1, OV9734_MODE_STANDBY))
- dev_err(&client->dev, "failed to stop stream");
+ dev_err(ov9734->dev, "failed to stop stream");
}
static int ov9734_set_stream(struct v4l2_subdev *sd, int enable)
{
struct ov9734 *ov9734 = to_ov9734(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret = 0;
mutex_lock(&ov9734->mutex);
if (enable) {
- ret = pm_runtime_resume_and_get(&client->dev);
+ ret = pm_runtime_resume_and_get(ov9734->dev);
if (ret < 0) {
mutex_unlock(&ov9734->mutex);
return ret;
@@ -669,11 +668,11 @@ static int ov9734_set_stream(struct v4l2_subdev *sd, int enable)
if (ret) {
enable = 0;
ov9734_stop_streaming(ov9734);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov9734->dev);
}
} else {
ov9734_stop_streaming(ov9734);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov9734->dev);
}
mutex_unlock(&ov9734->mutex);
@@ -808,7 +807,6 @@ static const struct v4l2_subdev_internal_ops ov9734_internal_ops = {
static int ov9734_identify_module(struct ov9734 *ov9734)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov9734->sd);
int ret;
u32 val;
@@ -817,7 +815,7 @@ static int ov9734_identify_module(struct ov9734 *ov9734)
return ret;
if (val != OV9734_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x!=%x",
+ dev_err(ov9734->dev, "chip id mismatch: %x!=%x",
OV9734_CHIP_ID, val);
return -ENXIO;
}
@@ -832,22 +830,12 @@ static int ov9734_check_hwcfg(struct device *dev)
struct v4l2_fwnode_endpoint bus_cfg = {
.bus_type = V4L2_MBUS_CSI2_DPHY
};
- u32 mclk;
int ret;
unsigned int i, j;
if (!fwnode)
return -ENXIO;
- ret = fwnode_property_read_u32(fwnode, "clock-frequency", &mclk);
- if (ret)
- return ret;
-
- if (mclk != OV9734_MCLK) {
- dev_err(dev, "external clock %d is not supported", mclk);
- return -EINVAL;
- }
-
ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
if (!ep)
return -ENXIO;
@@ -892,14 +880,15 @@ static void ov9734_remove(struct i2c_client *client)
v4l2_async_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
v4l2_ctrl_handler_free(sd->ctrl_handler);
- pm_runtime_disable(&client->dev);
- pm_runtime_set_suspended(&client->dev);
+ pm_runtime_disable(ov9734->dev);
+ pm_runtime_set_suspended(ov9734->dev);
mutex_destroy(&ov9734->mutex);
}
static int ov9734_probe(struct i2c_client *client)
{
struct ov9734 *ov9734;
+ unsigned long freq;
int ret;
ret = ov9734_check_hwcfg(&client->dev);
@@ -913,10 +902,23 @@ static int ov9734_probe(struct i2c_client *client)
if (!ov9734)
return -ENOMEM;
+ ov9734->dev = &client->dev;
+
+ ov9734->clk = devm_v4l2_sensor_clk_get(ov9734->dev, NULL);
+ if (IS_ERR(ov9734->clk))
+ return dev_err_probe(ov9734->dev, PTR_ERR(ov9734->clk),
+ "failed to get clock\n");
+
+ freq = clk_get_rate(ov9734->clk);
+ if (freq != OV9734_MCLK)
+ return dev_err_probe(ov9734->dev, -EINVAL,
+ "external clock %lu is not supported",
+ freq);
+
v4l2_i2c_subdev_init(&ov9734->sd, client, &ov9734_subdev_ops);
ret = ov9734_identify_module(ov9734);
if (ret) {
- dev_err(&client->dev, "failed to find sensor: %d", ret);
+ dev_err(ov9734->dev, "failed to find sensor: %d", ret);
return ret;
}
@@ -924,7 +926,7 @@ static int ov9734_probe(struct i2c_client *client)
ov9734->cur_mode = &supported_modes[0];
ret = ov9734_init_controls(ov9734);
if (ret) {
- dev_err(&client->dev, "failed to init controls: %d", ret);
+ dev_err(ov9734->dev, "failed to init controls: %d", ret);
goto probe_error_v4l2_ctrl_handler_free;
}
@@ -935,7 +937,7 @@ static int ov9734_probe(struct i2c_client *client)
ov9734->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&ov9734->sd.entity, 1, &ov9734->pad);
if (ret) {
- dev_err(&client->dev, "failed to init entity pads: %d", ret);
+ dev_err(ov9734->dev, "failed to init entity pads: %d", ret);
goto probe_error_v4l2_ctrl_handler_free;
}
@@ -943,13 +945,13 @@ static int ov9734_probe(struct i2c_client *client)
* Device is already turned on by i2c-core with ACPI domain PM.
* Enable runtime PM and turn off the device.
*/
- pm_runtime_set_active(&client->dev);
- pm_runtime_enable(&client->dev);
- pm_runtime_idle(&client->dev);
+ pm_runtime_set_active(ov9734->dev);
+ pm_runtime_enable(ov9734->dev);
+ pm_runtime_idle(ov9734->dev);
ret = v4l2_async_register_subdev_sensor(&ov9734->sd);
if (ret < 0) {
- dev_err(&client->dev, "failed to register V4L2 subdev: %d",
+ dev_err(ov9734->dev, "failed to register V4L2 subdev: %d",
ret);
goto probe_error_media_entity_cleanup_pm;
}
@@ -957,8 +959,8 @@ static int ov9734_probe(struct i2c_client *client)
return 0;
probe_error_media_entity_cleanup_pm:
- pm_runtime_disable(&client->dev);
- pm_runtime_set_suspended(&client->dev);
+ pm_runtime_disable(ov9734->dev);
+ pm_runtime_set_suspended(ov9734->dev);
media_entity_cleanup(&ov9734->sd.entity);
probe_error_v4l2_ctrl_handler_free:
diff --git a/drivers/media/i2c/rj54n1cb0c.c b/drivers/media/i2c/rj54n1cb0c.c
index b7ca39f63dba..6dfc91216851 100644
--- a/drivers/media/i2c/rj54n1cb0c.c
+++ b/drivers/media/i2c/rj54n1cb0c.c
@@ -1329,10 +1329,13 @@ static int rj54n1_probe(struct i2c_client *client)
V4L2_CID_GAIN, 0, 127, 1, 66);
v4l2_ctrl_new_std(&rj54n1->hdl, &rj54n1_ctrl_ops,
V4L2_CID_AUTO_WHITE_BALANCE, 0, 1, 1, 1);
- rj54n1->subdev.ctrl_handler = &rj54n1->hdl;
- if (rj54n1->hdl.error)
- return rj54n1->hdl.error;
+ if (rj54n1->hdl.error) {
+ ret = rj54n1->hdl.error;
+ goto err_free_ctrl;
+ }
+
+ rj54n1->subdev.ctrl_handler = &rj54n1->hdl;
rj54n1->clk_div = clk_div;
rj54n1->rect.left = RJ54N1_COLUMN_SKIP;
rj54n1->rect.top = RJ54N1_ROW_SKIP;
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-core.c b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
index 7716dfe2b8c9..ab31ee2b596b 100644
--- a/drivers/media/i2c/s5c73m3/s5c73m3-core.c
+++ b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
@@ -1368,10 +1368,6 @@ static int __s5c73m3_power_on(struct s5c73m3 *state)
goto err_reg_dis;
}
- ret = clk_set_rate(state->clock, state->mclk_frequency);
- if (ret < 0)
- goto err_reg_dis;
-
ret = clk_prepare_enable(state->clock);
if (ret < 0)
goto err_reg_dis;
@@ -1556,16 +1552,13 @@ static int s5c73m3_get_dt_data(struct s5c73m3 *state)
if (!node)
return -EINVAL;
- state->clock = devm_clk_get(dev, S5C73M3_CLK_NAME);
+ state->clock = devm_v4l2_sensor_clk_get_legacy(dev, S5C73M3_CLK_NAME,
+ false,
+ S5C73M3_DEFAULT_MCLK_FREQ);
if (IS_ERR(state->clock))
- return PTR_ERR(state->clock);
-
- if (of_property_read_u32(node, "clock-frequency",
- &state->mclk_frequency)) {
- state->mclk_frequency = S5C73M3_DEFAULT_MCLK_FREQ;
- dev_info(dev, "using default %u Hz clock frequency\n",
- state->mclk_frequency);
- }
+ return dev_err_probe(dev, PTR_ERR(state->clock),
+ "Failed to get the clock %s\n",
+ S5C73M3_CLK_NAME);
/* Request GPIO lines asserted */
state->stby = devm_gpiod_get(dev, "standby", GPIOD_OUT_HIGH);
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3.h b/drivers/media/i2c/s5c73m3/s5c73m3.h
index 627e80cf5b72..68a19c2c8db8 100644
--- a/drivers/media/i2c/s5c73m3/s5c73m3.h
+++ b/drivers/media/i2c/s5c73m3/s5c73m3.h
@@ -382,8 +382,6 @@ struct s5c73m3 {
struct clk *clock;
- /* External master clock frequency */
- u32 mclk_frequency;
/* Video bus type - MIPI-CSI2/parallel */
enum v4l2_mbus_type bus_type;
diff --git a/drivers/media/i2c/s5k5baf.c b/drivers/media/i2c/s5k5baf.c
index 24f399cd2124..d1d00eca8708 100644
--- a/drivers/media/i2c/s5k5baf.c
+++ b/drivers/media/i2c/s5k5baf.c
@@ -284,7 +284,6 @@ struct s5k5baf {
struct regulator_bulk_data supplies[S5K5BAF_NUM_SUPPLIES];
struct clk *clock;
- u32 mclk_frequency;
struct s5k5baf_fw *fw;
@@ -576,7 +575,7 @@ static void s5k5baf_hw_patch(struct s5k5baf *state)
static void s5k5baf_hw_set_clocks(struct s5k5baf *state)
{
- unsigned long mclk = state->mclk_frequency / 1000;
+ unsigned long mclk = clk_get_rate(state->clock) / 1000;
u16 status;
static const u16 nseq_clk_cfg[] = {
NSEQ(REG_I_USE_NPVI_CLOCKS,
@@ -946,10 +945,6 @@ static int s5k5baf_power_on(struct s5k5baf *state)
if (ret < 0)
goto err;
- ret = clk_set_rate(state->clock, state->mclk_frequency);
- if (ret < 0)
- goto err_reg_dis;
-
ret = clk_prepare_enable(state->clock);
if (ret < 0)
goto err_reg_dis;
@@ -1841,14 +1836,6 @@ static int s5k5baf_parse_device_node(struct s5k5baf *state, struct device *dev)
return -EINVAL;
}
- ret = of_property_read_u32(node, "clock-frequency",
- &state->mclk_frequency);
- if (ret < 0) {
- state->mclk_frequency = S5K5BAF_DEFAULT_MCLK_FREQ;
- dev_info(dev, "using default %u Hz clock frequency\n",
- state->mclk_frequency);
- }
-
node_ep = of_graph_get_endpoint_by_regs(node, 0, -1);
if (!node_ep) {
dev_err(dev, "no endpoint defined at node %pOF\n", node);
@@ -1967,9 +1954,11 @@ static int s5k5baf_probe(struct i2c_client *c)
if (ret < 0)
goto err_me;
- state->clock = devm_clk_get(state->sd.dev, S5K5BAF_CLK_NAME);
+ state->clock = devm_v4l2_sensor_clk_get_legacy(state->sd.dev,
+ S5K5BAF_CLK_NAME, false,
+ S5K5BAF_DEFAULT_MCLK_FREQ);
if (IS_ERR(state->clock)) {
- ret = -EPROBE_DEFER;
+ ret = PTR_ERR(state->clock);
goto err_me;
}
diff --git a/drivers/media/i2c/s5k6a3.c b/drivers/media/i2c/s5k6a3.c
index 0c2674115b7b..ba6477e88da3 100644
--- a/drivers/media/i2c/s5k6a3.c
+++ b/drivers/media/i2c/s5k6a3.c
@@ -51,7 +51,6 @@ enum {
* @lock: mutex protecting the structure's members below
* @format: media bus format at the sensor's source pad
* @clock: pointer to &struct clk.
- * @clock_frequency: clock frequency
* @power_count: stores state if device is powered
*/
struct s5k6a3 {
@@ -63,7 +62,6 @@ struct s5k6a3 {
struct mutex lock;
struct v4l2_mbus_framefmt format;
struct clk *clock;
- u32 clock_frequency;
int power_count;
};
@@ -192,10 +190,6 @@ static int __s5k6a3_power_on(struct s5k6a3 *sensor)
int i = S5K6A3_SUPP_VDDA;
int ret;
- ret = clk_set_rate(sensor->clock, sensor->clock_frequency);
- if (ret < 0)
- return ret;
-
ret = pm_runtime_get(sensor->dev);
if (ret < 0)
goto error_rpm_put;
@@ -292,22 +286,18 @@ static int s5k6a3_probe(struct i2c_client *client)
mutex_init(&sensor->lock);
sensor->dev = dev;
- sensor->clock = devm_clk_get(sensor->dev, S5K6A3_CLK_NAME);
+ sensor->clock = devm_v4l2_sensor_clk_get_legacy(sensor->dev,
+ S5K6A3_CLK_NAME, false,
+ S5K6A3_DEFAULT_CLK_FREQ);
if (IS_ERR(sensor->clock))
- return PTR_ERR(sensor->clock);
+ return dev_err_probe(sensor->dev, PTR_ERR(sensor->clock),
+ "failed to get extclk\n");
sensor->gpio_reset = devm_gpiod_get(dev, NULL, GPIOD_OUT_HIGH);
ret = PTR_ERR_OR_ZERO(sensor->gpio_reset);
if (ret)
return ret;
- if (of_property_read_u32(dev->of_node, "clock-frequency",
- &sensor->clock_frequency)) {
- sensor->clock_frequency = S5K6A3_DEFAULT_CLK_FREQ;
- dev_info(dev, "using default %u Hz clock frequency\n",
- sensor->clock_frequency);
- }
-
for (i = 0; i < S5K6A3_NUM_SUPPLIES; i++)
sensor->supplies[i].supply = s5k6a3_supply_names[i];
diff --git a/drivers/media/i2c/saa6752hs.c b/drivers/media/i2c/saa6752hs.c
index 1ed8b5edb3fb..1c0031ba43b4 100644
--- a/drivers/media/i2c/saa6752hs.c
+++ b/drivers/media/i2c/saa6752hs.c
@@ -6,7 +6,7 @@
AC-3 support:
- Copyright (C) 2008 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2008 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/i2c/saa7115.c b/drivers/media/i2c/saa7115.c
index b8b8f206ec3a..48d6730d9271 100644
--- a/drivers/media/i2c/saa7115.c
+++ b/drivers/media/i2c/saa7115.c
@@ -18,7 +18,7 @@
// Added saa7115 support by Kevin Thayer <nufan_wfk at yahoo.com>
// (2/17/2003)
//
-// VBI support (2004) and cleanups (2005) by Hans Verkuil <hverkuil@xs4all.nl>
+// VBI support (2004) and cleanups (2005) by Hans Verkuil <hverkuil@kernel.org>
//
// Copyright (c) 2005-2006 Mauro Carvalho Chehab <mchehab@kernel.org>
// SAA7111, SAA7113 and SAA7118 support
diff --git a/drivers/media/i2c/saa7127.c b/drivers/media/i2c/saa7127.c
index 818ed19cf37b..a42a7ffe3768 100644
--- a/drivers/media/i2c/saa7127.c
+++ b/drivers/media/i2c/saa7127.c
@@ -25,7 +25,7 @@
* Copyright (C) 2004 Chris Kennedy <c@groovy.org>
*
* VBI additions & cleanup:
- * Copyright (C) 2004, 2005 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2004, 2005 Hans Verkuil <hverkuil@kernel.org>
*
* Note: the saa7126 is identical to the saa7127, and the saa7128 is
* identical to the saa7129, except that the saa7126 and saa7128 have
diff --git a/drivers/media/i2c/saa717x.c b/drivers/media/i2c/saa717x.c
index b0793bb0c02a..713331be947c 100644
--- a/drivers/media/i2c/saa717x.c
+++ b/drivers/media/i2c/saa717x.c
@@ -10,7 +10,7 @@
* Changes by T.Adachi (tadachi@tadachi-net.com)
* - support audio, video scaler etc, and checked the initialize sequence.
*
- * Cleaned up by Hans Verkuil <hverkuil@xs4all.nl>
+ * Cleaned up by Hans Verkuil <hverkuil@kernel.org>
*
* Note: this is a reversed engineered driver based on captures from
* the I2C bus under Windows. This chip is very similar to the saa7134,
diff --git a/drivers/media/i2c/st-mipid02.c b/drivers/media/i2c/st-mipid02.c
index f4568e87f018..41ae25b0911f 100644
--- a/drivers/media/i2c/st-mipid02.c
+++ b/drivers/media/i2c/st-mipid02.c
@@ -465,7 +465,6 @@ static int mipid02_disable_streams(struct v4l2_subdev *sd,
if (ret)
goto error;
- pm_runtime_mark_last_busy(&client->dev);
pm_runtime_put_autosuspend(&client->dev);
error:
@@ -542,7 +541,6 @@ error:
cci_write(bridge->regmap, MIPID02_DATA_LANE0_REG1, 0, &ret);
cci_write(bridge->regmap, MIPID02_DATA_LANE1_REG1, 0, &ret);
- pm_runtime_mark_last_busy(&client->dev);
pm_runtime_put_autosuspend(&client->dev);
return ret;
}
diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
index 1cc7636e446d..a0ca19359c43 100644
--- a/drivers/media/i2c/tc358743.c
+++ b/drivers/media/i2c/tc358743.c
@@ -38,7 +38,21 @@
static int debug;
module_param(debug, int, 0644);
-MODULE_PARM_DESC(debug, "debug level (0-3)");
+MODULE_PARM_DESC(debug, " debug level (0-3)");
+
+static int packet_type = 0x87;
+module_param(packet_type, int, 0644);
+MODULE_PARM_DESC(packet_type,
+ " Programmable Packet Type. Possible values:\n"
+ "\t\t 0x87: DRM InfoFrame (Default).\n"
+ "\t\t 0x01: Audio Clock Regeneration Packet\n"
+ "\t\t 0x02: Audio Sample Packet\n"
+ "\t\t 0x03: General Control Packet\n"
+ "\t\t 0x04: ACP Packet\n"
+ "\t\t 0x07: One Bit Audio Sample Packet\n"
+ "\t\t 0x08: DST Audio Packet\n"
+ "\t\t 0x09: High Bitrate Audio Stream Packet\n"
+ "\t\t 0x0a: Gamut Metadata Packet\n");
MODULE_DESCRIPTION("Toshiba TC358743 HDMI to CSI-2 bridge driver");
MODULE_AUTHOR("Ramakrishnan Muthukrishnan <ram@rkrishnan.org>");
@@ -466,10 +480,29 @@ tc358743_debugfs_if_read(u32 type, void *priv, struct file *filp,
if (!is_hdmi(sd))
return 0;
- if (type != V4L2_DEBUGFS_IF_AVI)
+ switch (type) {
+ case V4L2_DEBUGFS_IF_AVI:
+ i2c_rd(sd, PK_AVI_0HEAD, buf, PK_AVI_LEN);
+ break;
+ case V4L2_DEBUGFS_IF_AUDIO:
+ i2c_rd(sd, PK_AUD_0HEAD, buf, PK_AUD_LEN);
+ break;
+ case V4L2_DEBUGFS_IF_SPD:
+ i2c_rd(sd, PK_SPD_0HEAD, buf, PK_SPD_LEN);
+ break;
+ case V4L2_DEBUGFS_IF_HDMI:
+ i2c_rd(sd, PK_VS_0HEAD, buf, PK_VS_LEN);
+ break;
+ case V4L2_DEBUGFS_IF_DRM:
+ i2c_rd(sd, PK_ACP_0HEAD, buf, PK_ACP_LEN);
+ break;
+ default:
return 0;
+ }
+
+ if (!buf[2])
+ return -ENOENT;
- i2c_rd(sd, PK_AVI_0HEAD, buf, PK_AVI_16BYTE - PK_AVI_0HEAD + 1);
len = buf[2] + 4;
if (len > V4L2_DEBUGFS_IF_MAX_LEN)
len = -ENOENT;
@@ -478,26 +511,69 @@ tc358743_debugfs_if_read(u32 type, void *priv, struct file *filp,
return len < 0 ? 0 : len;
}
-static void print_avi_infoframe(struct v4l2_subdev *sd)
+static void print_infoframes(struct v4l2_subdev *sd)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct device *dev = &client->dev;
union hdmi_infoframe frame;
- u8 buffer[HDMI_INFOFRAME_SIZE(AVI)] = {};
+ u8 buffer[V4L2_DEBUGFS_IF_MAX_LEN] = {};
+
+ /*
+ * Updating the ACP TYPE here allows for dynamically
+ * changing the type you want to monitor, without having
+ * to reload the driver with a new packet_type module option value.
+ *
+ * Instead you can set it with the new value, then call
+ * VIDIOC_LOG_STATUS.
+ */
+ i2c_wr8(sd, TYP_ACP_SET, packet_type);
if (!is_hdmi(sd)) {
- v4l2_info(sd, "DVI-D signal - AVI infoframe not supported\n");
+ v4l2_info(sd, "DVI-D signal - InfoFrames not supported\n");
return;
}
- i2c_rd(sd, PK_AVI_0HEAD, buffer, HDMI_INFOFRAME_SIZE(AVI));
+ i2c_rd(sd, PK_AVI_0HEAD, buffer, PK_AVI_LEN);
+ if (hdmi_infoframe_unpack(&frame, buffer, sizeof(buffer)) >= 0)
+ hdmi_infoframe_log(KERN_INFO, dev, &frame);
- if (hdmi_infoframe_unpack(&frame, buffer, sizeof(buffer)) < 0) {
- v4l2_err(sd, "%s: unpack of AVI infoframe failed\n", __func__);
- return;
+ i2c_rd(sd, PK_VS_0HEAD, buffer, PK_VS_LEN);
+ if (hdmi_infoframe_unpack(&frame, buffer, sizeof(buffer)) >= 0)
+ hdmi_infoframe_log(KERN_INFO, dev, &frame);
+
+ i2c_rd(sd, PK_AUD_0HEAD, buffer, PK_AUD_LEN);
+ if (hdmi_infoframe_unpack(&frame, buffer, sizeof(buffer)) >= 0)
+ hdmi_infoframe_log(KERN_INFO, dev, &frame);
+
+ i2c_rd(sd, PK_SPD_0HEAD, buffer, PK_SPD_LEN);
+ if (hdmi_infoframe_unpack(&frame, buffer, sizeof(buffer)) >= 0)
+ hdmi_infoframe_log(KERN_INFO, dev, &frame);
+
+ i2c_rd(sd, PK_ACP_0HEAD, buffer, PK_ACP_LEN);
+ if (buffer[0] == packet_type) {
+ if (packet_type < 0x80)
+ v4l2_info(sd, "Packet: %*ph\n", PK_ACP_LEN, buffer);
+ else if (packet_type != 0x87)
+ v4l2_info(sd, "InfoFrame: %*ph\n", PK_ACP_LEN, buffer);
+ else if (hdmi_infoframe_unpack(&frame, buffer,
+ sizeof(buffer)) >= 0)
+ hdmi_infoframe_log(KERN_INFO, dev, &frame);
}
- hdmi_infoframe_log(KERN_INFO, dev, &frame);
+ i2c_rd(sd, PK_MS_0HEAD, buffer, PK_MS_LEN);
+ if (buffer[2] && buffer[2] + 3 <= PK_MS_LEN)
+ v4l2_info(sd, "MPEG Source InfoFrame: %*ph\n",
+ buffer[2] + 3, buffer);
+
+ i2c_rd(sd, PK_ISRC1_0HEAD, buffer, PK_ISRC1_LEN);
+ if (buffer[0] == 0x05)
+ v4l2_info(sd, "ISRC1 Packet: %*ph\n",
+ PK_ISRC1_LEN, buffer);
+
+ i2c_rd(sd, PK_ISRC2_0HEAD, buffer, PK_ISRC2_LEN);
+ if (buffer[0] == 0x06)
+ v4l2_info(sd, "ISRC2 Packet: %*ph\n",
+ PK_ISRC2_LEN, buffer);
}
/* --------------- CTRLS --------------- */
@@ -1375,7 +1451,7 @@ static int tc358743_log_status(struct v4l2_subdev *sd)
v4l2_info(sd, "Deep color mode: %d-bits per channel\n",
deep_color_mode[(i2c_rd8(sd, VI_STATUS1) &
MASK_S_DEEPCOLOR) >> 2]);
- print_avi_infoframe(sd);
+ print_infoframes(sd);
return 0;
}
@@ -2232,10 +2308,15 @@ static int tc358743_probe(struct i2c_client *client)
if (err < 0)
goto err_work_queues;
+ i2c_wr8(sd, TYP_ACP_SET, packet_type);
+ i2c_wr8(sd, PK_AUTO_CLR, 0xff);
+ i2c_wr8(sd, NO_PKT_CLR, MASK_NO_ACP_CLR);
+
state->debugfs_dir = debugfs_create_dir(sd->name, v4l2_debugfs_root());
state->infoframes = v4l2_debugfs_if_alloc(state->debugfs_dir,
- V4L2_DEBUGFS_IF_AVI, sd,
- tc358743_debugfs_if_read);
+ V4L2_DEBUGFS_IF_AVI | V4L2_DEBUGFS_IF_AUDIO |
+ V4L2_DEBUGFS_IF_SPD | V4L2_DEBUGFS_IF_HDMI |
+ V4L2_DEBUGFS_IF_DRM, sd, tc358743_debugfs_if_read);
v4l2_info(sd, "%s found @ 0x%x (%s)\n", client->name,
client->addr << 1, client->adapter->name);
@@ -2245,10 +2326,10 @@ static int tc358743_probe(struct i2c_client *client)
err_work_queues:
cec_unregister_adapter(state->cec_adap);
if (!state->i2c_client->irq) {
- timer_delete(&state->timer);
+ timer_delete_sync(&state->timer);
flush_work(&state->work_i2c_poll);
}
- cancel_delayed_work(&state->delayed_work_enable_hotplug);
+ cancel_delayed_work_sync(&state->delayed_work_enable_hotplug);
mutex_destroy(&state->confctl_mutex);
err_hdl:
media_entity_cleanup(&sd->entity);
diff --git a/drivers/media/i2c/tc358743_regs.h b/drivers/media/i2c/tc358743_regs.h
index 2495878dc358..aae288f8add3 100644
--- a/drivers/media/i2c/tc358743_regs.h
+++ b/drivers/media/i2c/tc358743_regs.h
@@ -692,6 +692,8 @@
#define MASK_NCO_F0_MOD_42MHZ 0x00
#define MASK_NCO_F0_MOD_27MHZ 0x01
+#define TYP_ACP_SET 0x8706
+
#define PK_INT_MODE 0x8709
#define MASK_ISRC2_INT_MODE 0x80
#define MASK_ISRC_INT_MODE 0x40
@@ -702,6 +704,8 @@
#define MASK_AUD_INT_MODE 0x02
#define MASK_AVI_INT_MODE 0x01
+#define PK_AUTO_CLR 0x870a
+
#define NO_PKT_LIMIT 0x870B
#define MASK_NO_ACP_LIMIT 0xf0
#define SET_NO_ACP_LIMIT_MS(milliseconds) ((((milliseconds) / 80) << 4) & \
@@ -720,25 +724,44 @@
#define ERR_PK_LIMIT 0x870D
#define NO_PKT_LIMIT2 0x870E
#define PK_AVI_0HEAD 0x8710
-#define PK_AVI_1HEAD 0x8711
-#define PK_AVI_2HEAD 0x8712
#define PK_AVI_0BYTE 0x8713
-#define PK_AVI_1BYTE 0x8714
-#define PK_AVI_2BYTE 0x8715
-#define PK_AVI_3BYTE 0x8716
-#define PK_AVI_4BYTE 0x8717
-#define PK_AVI_5BYTE 0x8718
-#define PK_AVI_6BYTE 0x8719
-#define PK_AVI_7BYTE 0x871A
-#define PK_AVI_8BYTE 0x871B
-#define PK_AVI_9BYTE 0x871C
-#define PK_AVI_10BYTE 0x871D
-#define PK_AVI_11BYTE 0x871E
-#define PK_AVI_12BYTE 0x871F
-#define PK_AVI_13BYTE 0x8720
-#define PK_AVI_14BYTE 0x8721
-#define PK_AVI_15BYTE 0x8722
#define PK_AVI_16BYTE 0x8723
+#define PK_AVI_LEN (PK_AVI_16BYTE - PK_AVI_0HEAD + 1)
+
+#define PK_AUD_0HEAD 0x8730
+#define PK_AUD_0BYTE 0x8733
+#define PK_AUD_10BYTE 0x873d
+#define PK_AUD_LEN (PK_AUD_10BYTE - PK_AUD_0HEAD + 1)
+
+#define PK_MS_0HEAD 0x8740
+#define PK_MS_0BYTE 0x8743
+#define PK_MS_10BYTE 0x874d
+#define PK_MS_LEN (PK_MS_10BYTE - PK_MS_0HEAD + 1)
+
+#define PK_SPD_0HEAD 0x8750
+#define PK_SPD_0BYTE 0x8753
+#define PK_SPD_27BYTE 0x876e
+#define PK_SPD_LEN (PK_SPD_27BYTE - PK_SPD_0HEAD + 1)
+
+#define PK_VS_0HEAD 0x8770
+#define PK_VS_0BYTE 0x8773
+#define PK_VS_27BYTE 0x878e
+#define PK_VS_LEN (PK_VS_27BYTE - PK_VS_0HEAD + 1)
+
+#define PK_ACP_0HEAD 0x8790
+#define PK_ACP_0BYTE 0x8793
+#define PK_ACP_27BYTE 0x87ae
+#define PK_ACP_LEN (PK_ACP_27BYTE - PK_ACP_0HEAD + 1)
+
+#define PK_ISRC1_0HEAD 0x87b0
+#define PK_ISRC1_0BYTE 0x87b3
+#define PK_ISRC1_27BYTE 0x87c2
+#define PK_ISRC1_LEN (PK_ISRC1_27BYTE - PK_ISRC1_0HEAD + 1)
+
+#define PK_ISRC2_0HEAD 0x87d0
+#define PK_ISRC2_0BYTE 0x87d3
+#define PK_ISRC2_27BYTE 0x87ee
+#define PK_ISRC2_LEN (PK_ISRC2_27BYTE - PK_ISRC2_0HEAD + 1)
#define BKSV 0x8800
diff --git a/drivers/media/i2c/tc358746.c b/drivers/media/i2c/tc358746.c
index 143aa1359aba..bcfc274cf891 100644
--- a/drivers/media/i2c/tc358746.c
+++ b/drivers/media/i2c/tc358746.c
@@ -816,7 +816,6 @@ static int tc358746_s_stream(struct v4l2_subdev *sd, int enable)
return 0;
err_out:
- pm_runtime_mark_last_busy(sd->dev);
pm_runtime_put_sync_autosuspend(sd->dev);
return err;
@@ -838,7 +837,6 @@ err_out:
if (err)
return err;
- pm_runtime_mark_last_busy(sd->dev);
pm_runtime_put_sync_autosuspend(sd->dev);
return v4l2_subdev_call(src, video, s_stream, 0);
@@ -1016,7 +1014,6 @@ tc358746_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg)
err = tc358746_read(tc358746, reg->reg, &val);
reg->val = val;
- pm_runtime_mark_last_busy(sd->dev);
pm_runtime_put_sync_autosuspend(sd->dev);
return err;
@@ -1032,7 +1029,6 @@ tc358746_s_register(struct v4l2_subdev *sd, const struct v4l2_dbg_register *reg)
tc358746_write(tc358746, (u32)reg->reg, (u32)reg->val);
- pm_runtime_mark_last_busy(sd->dev);
pm_runtime_put_sync_autosuspend(sd->dev);
return 0;
@@ -1395,7 +1391,6 @@ static int tc358746_init_hw(struct tc358746 *tc358746)
}
err = tc358746_read(tc358746, CHIPID_REG, &val);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_sync_autosuspend(dev);
if (err)
return -ENODEV;
diff --git a/drivers/media/i2c/tda9840.c b/drivers/media/i2c/tda9840.c
index d61da811c9da..e3b266db571f 100644
--- a/drivers/media/i2c/tda9840.c
+++ b/drivers/media/i2c/tda9840.c
@@ -3,7 +3,7 @@
tda9840 - i2c-driver for the tda9840 by SGS Thomson
Copyright (C) 1998-2003 Michael Hunold <michael@mihu.de>
- Copyright (C) 2008 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2008 Hans Verkuil <hverkuil@kernel.org>
The tda9840 is a stereo/dual sound processor with digital
identification. It can be found at address 0x84 on the i2c-bus.
diff --git a/drivers/media/i2c/tea6415c.c b/drivers/media/i2c/tea6415c.c
index 4aaf66353610..0cd2e6c52e20 100644
--- a/drivers/media/i2c/tea6415c.c
+++ b/drivers/media/i2c/tea6415c.c
@@ -3,7 +3,7 @@
tea6415c - i2c-driver for the tea6415c by SGS Thomson
Copyright (C) 1998-2003 Michael Hunold <michael@mihu.de>
- Copyright (C) 2008 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2008 Hans Verkuil <hverkuil@kernel.org>
The tea6415c is a bus controlled video-matrix-switch
with 8 inputs and 6 outputs.
diff --git a/drivers/media/i2c/tea6420.c b/drivers/media/i2c/tea6420.c
index 5c5ea3973251..400883fc0c0f 100644
--- a/drivers/media/i2c/tea6420.c
+++ b/drivers/media/i2c/tea6420.c
@@ -3,7 +3,7 @@
tea6420 - i2c-driver for the tea6420 by SGS Thomson
Copyright (C) 1998-2003 Michael Hunold <michael@mihu.de>
- Copyright (C) 2008 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2008 Hans Verkuil <hverkuil@kernel.org>
The tea6420 is a bus controlled audio-matrix with 5 stereo inputs,
4 stereo outputs and gain control for each output.
diff --git a/drivers/media/i2c/thp7312.c b/drivers/media/i2c/thp7312.c
index 8852c56431fe..775cfba188d8 100644
--- a/drivers/media/i2c/thp7312.c
+++ b/drivers/media/i2c/thp7312.c
@@ -808,7 +808,6 @@ static int thp7312_s_stream(struct v4l2_subdev *sd, int enable)
if (!enable) {
thp7312_stream_enable(thp7312, false);
- pm_runtime_mark_last_busy(thp7312->dev);
pm_runtime_put_autosuspend(thp7312->dev);
v4l2_subdev_unlock_state(sd_state);
@@ -839,7 +838,6 @@ static int thp7312_s_stream(struct v4l2_subdev *sd, int enable)
goto finish_unlock;
finish_pm:
- pm_runtime_mark_last_busy(thp7312->dev);
pm_runtime_put_autosuspend(thp7312->dev);
finish_unlock:
v4l2_subdev_unlock_state(sd_state);
@@ -1147,7 +1145,6 @@ static int thp7312_s_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_mark_last_busy(thp7312->dev);
pm_runtime_put_autosuspend(thp7312->dev);
return ret;
@@ -2183,7 +2180,6 @@ static int thp7312_probe(struct i2c_client *client)
* Decrease the PM usage count. The device will get suspended after the
* autosuspend delay, turning the power off.
*/
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
dev_info(dev, "THP7312 firmware version %02u.%02u\n",
diff --git a/drivers/media/i2c/ths7303.c b/drivers/media/i2c/ths7303.c
index b7cedc5b3e8e..ff268ebeb4d9 100644
--- a/drivers/media/i2c/ths7303.c
+++ b/drivers/media/i2c/ths7303.c
@@ -7,7 +7,7 @@
* Author: Chaithrika U S <chaithrika@ti.com>
*
* Contributors:
- * Hans Verkuil <hansverk@cisco.com>
+ * Hans Verkuil <hverkuil@kernel.org>
* Lad, Prabhakar <prabhakar.lad@ti.com>
* Martin Bugge <marbugge@cisco.com>
*
diff --git a/drivers/media/i2c/tlv320aic23b.c b/drivers/media/i2c/tlv320aic23b.c
index b7b31b6192af..6f6bc5236565 100644
--- a/drivers/media/i2c/tlv320aic23b.c
+++ b/drivers/media/i2c/tlv320aic23b.c
@@ -7,7 +7,7 @@
* Based on wm8775 driver
*
* Copyright (C) 2004 Ulf Eklund <ivtv at eklund.to>
- * Copyright (C) 2005 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2005 Hans Verkuil <hverkuil@kernel.org>
*/
#include <linux/module.h>
diff --git a/drivers/media/i2c/upd64031a.c b/drivers/media/i2c/upd64031a.c
index 9d0b72a213be..a178af46e695 100644
--- a/drivers/media/i2c/upd64031a.c
+++ b/drivers/media/i2c/upd64031a.c
@@ -4,7 +4,7 @@
*
* 2003 by T.Adachi <tadachi@tadachi-net.com>
* 2003 by Takeru KOMORIYA <komoriya@paken.org>
- * 2006 by Hans Verkuil <hverkuil@xs4all.nl>
+ * 2006 by Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/i2c/upd64083.c b/drivers/media/i2c/upd64083.c
index 2e99ed5da42c..5421dc5e32c9 100644
--- a/drivers/media/i2c/upd64083.c
+++ b/drivers/media/i2c/upd64083.c
@@ -4,7 +4,7 @@
*
* 2003 by T.Adachi (tadachi@tadachi-net.com)
* 2003 by Takeru KOMORIYA <komoriya@paken.org>
- * 2006 by Hans Verkuil <hverkuil@xs4all.nl>
+ * 2006 by Hans Verkuil <hverkuil@kernel.org>
*/
#include <linux/module.h>
diff --git a/drivers/media/i2c/vd55g1.c b/drivers/media/i2c/vd55g1.c
index c0754fd03b1d..f09d6bf32641 100644
--- a/drivers/media/i2c/vd55g1.c
+++ b/drivers/media/i2c/vd55g1.c
@@ -66,7 +66,7 @@
#define VD55G1_REG_READOUT_CTRL CCI_REG8(0x052e)
#define VD55G1_READOUT_CTRL_BIN_MODE_NORMAL 0
#define VD55G1_READOUT_CTRL_BIN_MODE_DIGITAL_X2 1
-#define VD55G1_REG_DUSTER_CTRL CCI_REG8(0x03ea)
+#define VD55G1_REG_DUSTER_CTRL CCI_REG8(0x03ae)
#define VD55G1_DUSTER_ENABLE BIT(0)
#define VD55G1_DUSTER_DISABLE 0
#define VD55G1_DUSTER_DYN_ENABLE BIT(1)
@@ -1104,7 +1104,6 @@ static int vd55g1_disable_streams(struct v4l2_subdev *sd,
vd55g1_grab_ctrls(sensor, false);
- pm_runtime_mark_last_busy(sensor->dev);
pm_runtime_put_autosuspend(sensor->dev);
return ret;
@@ -1338,7 +1337,6 @@ static int vd55g1_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_mark_last_busy(sensor->dev);
pm_runtime_put_autosuspend(sensor->dev);
return ret;
@@ -1433,7 +1431,6 @@ static int vd55g1_s_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_mark_last_busy(sensor->dev);
pm_runtime_put_autosuspend(sensor->dev);
return ret;
@@ -1863,7 +1860,7 @@ static int vd55g1_probe(struct i2c_client *client)
if (ret)
return dev_err_probe(dev, ret, "Failed to get regulators\n");
- sensor->xclk = devm_clk_get(dev, NULL);
+ sensor->xclk = devm_v4l2_sensor_clk_get(dev, NULL);
if (IS_ERR(sensor->xclk))
return dev_err_probe(dev, PTR_ERR(sensor->xclk),
"Failed to get xclk\n");
@@ -1895,7 +1892,6 @@ static int vd55g1_probe(struct i2c_client *client)
pm_runtime_enable(dev);
pm_runtime_set_autosuspend_delay(dev, 4000);
pm_runtime_use_autosuspend(dev);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
ret = vd55g1_subdev_init(sensor);
diff --git a/drivers/media/i2c/vd56g3.c b/drivers/media/i2c/vd56g3.c
index 5d951ad0b478..157acea9e286 100644
--- a/drivers/media/i2c/vd56g3.c
+++ b/drivers/media/i2c/vd56g3.c
@@ -493,7 +493,6 @@ static int vd56g3_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_mark_last_busy(sensor->dev);
pm_runtime_put_autosuspend(sensor->dev);
return ret;
@@ -577,7 +576,6 @@ static int vd56g3_s_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_mark_last_busy(sensor->dev);
pm_runtime_put_autosuspend(sensor->dev);
return ret;
@@ -1021,7 +1019,6 @@ static int vd56g3_disable_streams(struct v4l2_subdev *sd,
__v4l2_ctrl_grab(sensor->vflip_ctrl, false);
__v4l2_ctrl_grab(sensor->patgen_ctrl, false);
- pm_runtime_mark_last_busy(sensor->dev);
pm_runtime_put_autosuspend(sensor->dev);
return ret;
@@ -1474,7 +1471,7 @@ static int vd56g3_probe(struct i2c_client *client)
if (ret)
return dev_err_probe(dev, ret, "Failed to get regulators\n");
- sensor->xclk = devm_clk_get(dev, NULL);
+ sensor->xclk = devm_v4l2_sensor_clk_get(dev, NULL);
if (IS_ERR(sensor->xclk))
return dev_err_probe(dev, PTR_ERR(sensor->xclk),
"Failed to get xclk\n");
@@ -1527,7 +1524,6 @@ static int vd56g3_probe(struct i2c_client *client)
}
/* Sensor could now be powered off (after the autosuspend delay) */
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
dev_dbg(dev, "Successfully probe %s sensor\n",
diff --git a/drivers/media/i2c/vgxy61.c b/drivers/media/i2c/vgxy61.c
index 5b0479f3a3c0..d64d0099e6fe 100644
--- a/drivers/media/i2c/vgxy61.c
+++ b/drivers/media/i2c/vgxy61.c
@@ -1181,6 +1181,21 @@ static int vgxy61_s_stream(struct v4l2_subdev *sd, int enable)
return ret;
}
+static int vgxy61_get_frame_desc(struct v4l2_subdev *sd, unsigned int pad,
+ struct v4l2_mbus_frame_desc *fd)
+{
+ struct vgxy61_dev *sensor = to_vgxy61_dev(sd);
+
+ fd->type = V4L2_MBUS_FRAME_DESC_TYPE_CSI2;
+ fd->num_entries = 1;
+ fd->entry[0].pixelcode = sensor->fmt.code;
+ fd->entry[0].stream = 0;
+ fd->entry[0].bus.csi2.vc = 0;
+ fd->entry[0].bus.csi2.dt = get_data_type_by_code(sensor->fmt.code);
+
+ return 0;
+}
+
static int vgxy61_set_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
@@ -1402,6 +1417,7 @@ static const struct v4l2_subdev_pad_ops vgxy61_pad_ops = {
.set_fmt = vgxy61_set_fmt,
.get_selection = vgxy61_get_selection,
.enum_frame_size = vgxy61_enum_frame_size,
+ .get_frame_desc = vgxy61_get_frame_desc,
};
static const struct v4l2_subdev_ops vgxy61_subdev_ops = {
@@ -1761,11 +1777,11 @@ static int vgxy61_probe(struct i2c_client *client)
return ret;
}
- sensor->xclk = devm_clk_get(dev, NULL);
- if (IS_ERR(sensor->xclk)) {
- dev_err(dev, "failed to get xclk\n");
- return PTR_ERR(sensor->xclk);
- }
+ sensor->xclk = devm_v4l2_sensor_clk_get(dev, NULL);
+ if (IS_ERR(sensor->xclk))
+ return dev_err_probe(dev, PTR_ERR(sensor->xclk),
+ "failed to get xclk\n");
+
sensor->clk_freq = clk_get_rate(sensor->xclk);
if (sensor->clk_freq < 6 * HZ_PER_MHZ ||
sensor->clk_freq > 27 * HZ_PER_MHZ) {
diff --git a/drivers/media/i2c/video-i2c.c b/drivers/media/i2c/video-i2c.c
index 0dd991d70d53..1eee2d4f5b40 100644
--- a/drivers/media/i2c/video-i2c.c
+++ b/drivers/media/i2c/video-i2c.c
@@ -288,7 +288,6 @@ static int amg88xx_read(struct device *dev, enum hwmon_sensor_types type,
return tmp;
tmp = regmap_bulk_read(data->regmap, AMG88XX_REG_TTHL, &buf, 2);
- pm_runtime_mark_last_busy(regmap_get_device(data->regmap));
pm_runtime_put_autosuspend(regmap_get_device(data->regmap));
if (tmp)
return tmp;
@@ -527,7 +526,6 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
return 0;
error_rpm_put:
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
error_del_list:
video_i2c_del_list(vq, VB2_BUF_STATE_QUEUED);
@@ -544,7 +542,6 @@ static void stop_streaming(struct vb2_queue *vq)
kthread_stop(data->kthread_vid_cap);
data->kthread_vid_cap = NULL;
- pm_runtime_mark_last_busy(regmap_get_device(data->regmap));
pm_runtime_put_autosuspend(regmap_get_device(data->regmap));
video_i2c_del_list(vq, VB2_BUF_STATE_ERROR);
@@ -853,7 +850,6 @@ static int video_i2c_probe(struct i2c_client *client)
if (ret < 0)
goto error_pm_disable;
- pm_runtime_mark_last_busy(&client->dev);
pm_runtime_put_autosuspend(&client->dev);
return 0;
diff --git a/drivers/media/i2c/vp27smpx.c b/drivers/media/i2c/vp27smpx.c
index 06fd46a63c72..df21950be24f 100644
--- a/drivers/media/i2c/vp27smpx.c
+++ b/drivers/media/i2c/vp27smpx.c
@@ -2,7 +2,7 @@
/*
* vp27smpx - driver version 0.0.1
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
*
* Based on a tvaudio patch from Takahiro Adachi <tadachi@tadachi-net.com>
* and Kazuhiko Kawakami <kazz-0@mail.goo.ne.jp>
diff --git a/drivers/media/i2c/wm8739.c b/drivers/media/i2c/wm8739.c
index c091b78a5b41..72eb10339d06 100644
--- a/drivers/media/i2c/wm8739.c
+++ b/drivers/media/i2c/wm8739.c
@@ -4,7 +4,7 @@
*
* Copyright (C) 2005 T. Adachi <tadachi@tadachi-net.com>
*
- * Copyright (C) 2005 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2005 Hans Verkuil <hverkuil@kernel.org>
* - Cleanup
*/
diff --git a/drivers/media/i2c/wm8775.c b/drivers/media/i2c/wm8775.c
index 619b2988577c..56778d3bc28a 100644
--- a/drivers/media/i2c/wm8775.c
+++ b/drivers/media/i2c/wm8775.c
@@ -6,7 +6,7 @@
*
* Based on saa7115 driver
*
- * Copyright (C) 2005 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2005 Hans Verkuil <hverkuil@kernel.org>
* - Cleanup
* - V4L2 API update
* - sound fixes
diff --git a/drivers/media/mc/mc-devnode.c b/drivers/media/mc/mc-devnode.c
index 56444edaf136..6daa7aa99442 100644
--- a/drivers/media/mc/mc-devnode.c
+++ b/drivers/media/mc/mc-devnode.c
@@ -50,11 +50,6 @@ static void media_devnode_release(struct device *cd)
{
struct media_devnode *devnode = to_media_devnode(cd);
- mutex_lock(&media_devnode_lock);
- /* Mark device node number as free */
- clear_bit(devnode->minor, media_devnode_nums);
- mutex_unlock(&media_devnode_lock);
-
/* Release media_devnode and perform other cleanups as needed. */
if (devnode->release)
devnode->release(devnode);
@@ -281,6 +276,7 @@ void media_devnode_unregister(struct media_devnode *devnode)
/* Delete the cdev on this minor as well */
cdev_device_del(&devnode->cdev, &devnode->dev);
devnode->media_dev = NULL;
+ clear_bit(devnode->minor, media_devnode_nums);
mutex_unlock(&media_devnode_lock);
put_device(&devnode->dev);
diff --git a/drivers/media/mc/mc-entity.c b/drivers/media/mc/mc-entity.c
index 045590905582..9519a537bfa2 100644
--- a/drivers/media/mc/mc-entity.c
+++ b/drivers/media/mc/mc-entity.c
@@ -682,8 +682,8 @@ done:
return 0;
dev_dbg(walk->mdev->dev,
- "media pipeline: adding unconnected pads of '%s'\n",
- local->entity->name);
+ "media pipeline: adding unconnected pads of '%s' reachable from pad %u\n",
+ origin->entity->name, origin->index);
media_entity_for_each_pad(origin->entity, local) {
/*
@@ -691,7 +691,7 @@ done:
* (already discovered through iterating over links) and pads
* not internally connected.
*/
- if (origin == local || !local->num_links ||
+ if (origin == local || local->num_links ||
!media_entity_has_pad_interdep(origin->entity, origin->index,
local->index))
continue;
diff --git a/drivers/media/mc/mc-request.c b/drivers/media/mc/mc-request.c
index 5edfc2791ce7..f66f728b1b43 100644
--- a/drivers/media/mc/mc-request.c
+++ b/drivers/media/mc/mc-request.c
@@ -6,7 +6,7 @@
* Copyright (C) 2018 Intel Corporation
* Copyright (C) 2018 Google, Inc.
*
- * Author: Hans Verkuil <hansverk@cisco.com>
+ * Author: Hans Verkuil <hverkuil@kernel.org>
* Author: Sakari Ailus <sakari.ailus@linux.intel.com>
*/
diff --git a/drivers/media/pci/b2c2/flexcop-pci.c b/drivers/media/pci/b2c2/flexcop-pci.c
index 486c8ec0fa60..ab53c5b02c48 100644
--- a/drivers/media/pci/b2c2/flexcop-pci.c
+++ b/drivers/media/pci/b2c2/flexcop-pci.c
@@ -411,7 +411,7 @@ static void flexcop_pci_remove(struct pci_dev *pdev)
struct flexcop_pci *fc_pci = pci_get_drvdata(pdev);
if (irq_chk_intv > 0)
- cancel_delayed_work(&fc_pci->irq_check_work);
+ cancel_delayed_work_sync(&fc_pci->irq_check_work);
flexcop_pci_dma_exit(fc_pci);
flexcop_device_exit(fc_pci->fc_dev);
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index 9ce67f515843..17e4529e537a 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -1620,7 +1620,7 @@ static int bttv_g_std(struct file *file, void *priv, v4l2_std_id *id)
return 0;
}
-static int bttv_querystd(struct file *file, void *f, v4l2_std_id *id)
+static int bttv_querystd(struct file *file, void *priv, v4l2_std_id *id)
{
struct bttv *btv = video_drvdata(file);
@@ -1750,7 +1750,7 @@ static int bttv_s_frequency(struct file *file, void *priv,
return 0;
}
-static int bttv_log_status(struct file *file, void *f)
+static int bttv_log_status(struct file *file, void *priv)
{
struct video_device *vdev = video_devdata(file);
struct bttv *btv = video_drvdata(file);
@@ -1761,7 +1761,7 @@ static int bttv_log_status(struct file *file, void *f)
}
#ifdef CONFIG_VIDEO_ADV_DEBUG
-static int bttv_g_register(struct file *file, void *f,
+static int bttv_g_register(struct file *file, void *priv,
struct v4l2_dbg_register *reg)
{
struct bttv *btv = video_drvdata(file);
@@ -1774,7 +1774,7 @@ static int bttv_g_register(struct file *file, void *f,
return 0;
}
-static int bttv_s_register(struct file *file, void *f,
+static int bttv_s_register(struct file *file, void *priv,
const struct v4l2_dbg_register *reg)
{
struct bttv *btv = video_drvdata(file);
@@ -2159,7 +2159,7 @@ static int bttv_enum_fmt_vid_cap(struct file *file, void *priv,
return 0;
}
-static int bttv_g_parm(struct file *file, void *f,
+static int bttv_g_parm(struct file *file, void *priv,
struct v4l2_streamparm *parm)
{
struct bttv *btv = video_drvdata(file);
@@ -2208,7 +2208,7 @@ static int bttv_g_pixelaspect(struct file *file, void *priv,
return 0;
}
-static int bttv_g_selection(struct file *file, void *f, struct v4l2_selection *sel)
+static int bttv_g_selection(struct file *file, void *priv, struct v4l2_selection *sel)
{
struct bttv *btv = video_drvdata(file);
@@ -2232,7 +2232,7 @@ static int bttv_g_selection(struct file *file, void *f, struct v4l2_selection *s
return 0;
}
-static int bttv_s_selection(struct file *file, void *f, struct v4l2_selection *sel)
+static int bttv_s_selection(struct file *file, void *priv, struct v4l2_selection *sel)
{
struct bttv *btv = video_drvdata(file);
const struct v4l2_rect *b;
diff --git a/drivers/media/pci/bt8xx/bttv-vbi.c b/drivers/media/pci/bt8xx/bttv-vbi.c
index a71440611e46..0ca88a2400ee 100644
--- a/drivers/media/pci/bt8xx/bttv-vbi.c
+++ b/drivers/media/pci/bt8xx/bttv-vbi.c
@@ -241,7 +241,7 @@ static int try_fmt(struct v4l2_vbi_format *f, const struct bttv_tvnorm *tvnorm,
return 0;
}
-int bttv_try_fmt_vbi_cap(struct file *file, void *f, struct v4l2_format *frt)
+int bttv_try_fmt_vbi_cap(struct file *file, void *priv, struct v4l2_format *frt)
{
struct bttv *btv = video_drvdata(file);
const struct bttv_tvnorm *tvnorm;
@@ -258,7 +258,7 @@ int bttv_try_fmt_vbi_cap(struct file *file, void *f, struct v4l2_format *frt)
}
-int bttv_s_fmt_vbi_cap(struct file *file, void *f, struct v4l2_format *frt)
+int bttv_s_fmt_vbi_cap(struct file *file, void *priv, struct v4l2_format *frt)
{
struct bttv *btv = video_drvdata(file);
const struct bttv_tvnorm *tvnorm;
@@ -301,7 +301,7 @@ int bttv_s_fmt_vbi_cap(struct file *file, void *f, struct v4l2_format *frt)
}
-int bttv_g_fmt_vbi_cap(struct file *file, void *f, struct v4l2_format *frt)
+int bttv_g_fmt_vbi_cap(struct file *file, void *priv, struct v4l2_format *frt)
{
const struct bttv_tvnorm *tvnorm;
struct bttv *btv = video_drvdata(file);
diff --git a/drivers/media/pci/cobalt/cobalt-driver.c b/drivers/media/pci/cobalt/cobalt-driver.c
index 39e25cc53edb..b7695705fdee 100644
--- a/drivers/media/pci/cobalt/cobalt-driver.c
+++ b/drivers/media/pci/cobalt/cobalt-driver.c
@@ -44,7 +44,7 @@ module_param_named(ignore_err, cobalt_ignore_err, int, 0644);
MODULE_PARM_DESC(ignore_err,
"If set then ignore missing i2c adapters/receivers. Default: 0\n");
-MODULE_AUTHOR("Hans Verkuil <hansverk@cisco.com> & Morten Hestnes");
+MODULE_AUTHOR("Hans Verkuil <hverkuil@kernel.org> & Morten Hestnes");
MODULE_DESCRIPTION("cobalt driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/pci/cobalt/cobalt-v4l2.c b/drivers/media/pci/cobalt/cobalt-v4l2.c
index ae82427e3479..51fd9576c6c2 100644
--- a/drivers/media/pci/cobalt/cobalt-v4l2.c
+++ b/drivers/media/pci/cobalt/cobalt-v4l2.c
@@ -447,7 +447,7 @@ static int cobalt_cobaltc(struct cobalt *cobalt, unsigned int cmd, void *arg)
return 0;
}
-static int cobalt_g_register(struct file *file, void *priv_fh,
+static int cobalt_g_register(struct file *file, void *priv,
struct v4l2_dbg_register *reg)
{
struct cobalt_stream *s = video_drvdata(file);
@@ -456,7 +456,7 @@ static int cobalt_g_register(struct file *file, void *priv_fh,
return cobalt_cobaltc(cobalt, VIDIOC_DBG_G_REGISTER, reg);
}
-static int cobalt_s_register(struct file *file, void *priv_fh,
+static int cobalt_s_register(struct file *file, void *priv,
const struct v4l2_dbg_register *reg)
{
struct cobalt_stream *s = video_drvdata(file);
@@ -467,7 +467,7 @@ static int cobalt_s_register(struct file *file, void *priv_fh,
}
#endif
-static int cobalt_querycap(struct file *file, void *priv_fh,
+static int cobalt_querycap(struct file *file, void *priv,
struct v4l2_capability *vcap)
{
struct cobalt_stream *s = video_drvdata(file);
@@ -562,7 +562,7 @@ static void cobalt_video_input_status_show(struct cobalt_stream *s)
cobalt_info("rx%d: Packer: %x\n", rx, ioread32(&packer->control));
}
-static int cobalt_log_status(struct file *file, void *priv_fh)
+static int cobalt_log_status(struct file *file, void *priv)
{
struct cobalt_stream *s = video_drvdata(file);
struct cobalt *cobalt = s->cobalt;
@@ -596,7 +596,7 @@ static int cobalt_log_status(struct file *file, void *priv_fh)
return 0;
}
-static int cobalt_enum_dv_timings(struct file *file, void *priv_fh,
+static int cobalt_enum_dv_timings(struct file *file, void *priv,
struct v4l2_enum_dv_timings *timings)
{
struct cobalt_stream *s = video_drvdata(file);
@@ -613,7 +613,7 @@ static int cobalt_enum_dv_timings(struct file *file, void *priv_fh,
pad, enum_dv_timings, timings);
}
-static int cobalt_s_dv_timings(struct file *file, void *priv_fh,
+static int cobalt_s_dv_timings(struct file *file, void *priv,
struct v4l2_dv_timings *timings)
{
struct cobalt_stream *s = video_drvdata(file);
@@ -641,7 +641,7 @@ static int cobalt_s_dv_timings(struct file *file, void *priv_fh,
return err;
}
-static int cobalt_g_dv_timings(struct file *file, void *priv_fh,
+static int cobalt_g_dv_timings(struct file *file, void *priv,
struct v4l2_dv_timings *timings)
{
struct cobalt_stream *s = video_drvdata(file);
@@ -654,7 +654,7 @@ static int cobalt_g_dv_timings(struct file *file, void *priv_fh,
pad, g_dv_timings, 0, timings);
}
-static int cobalt_query_dv_timings(struct file *file, void *priv_fh,
+static int cobalt_query_dv_timings(struct file *file, void *priv,
struct v4l2_dv_timings *timings)
{
struct cobalt_stream *s = video_drvdata(file);
@@ -667,7 +667,7 @@ static int cobalt_query_dv_timings(struct file *file, void *priv_fh,
pad, query_dv_timings, 0, timings);
}
-static int cobalt_dv_timings_cap(struct file *file, void *priv_fh,
+static int cobalt_dv_timings_cap(struct file *file, void *priv,
struct v4l2_dv_timings_cap *cap)
{
struct cobalt_stream *s = video_drvdata(file);
@@ -677,7 +677,7 @@ static int cobalt_dv_timings_cap(struct file *file, void *priv_fh,
pad, dv_timings_cap, cap);
}
-static int cobalt_enum_fmt_vid_cap(struct file *file, void *priv_fh,
+static int cobalt_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
switch (f->index) {
@@ -697,7 +697,7 @@ static int cobalt_enum_fmt_vid_cap(struct file *file, void *priv_fh,
return 0;
}
-static int cobalt_g_fmt_vid_cap(struct file *file, void *priv_fh,
+static int cobalt_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct cobalt_stream *s = video_drvdata(file);
@@ -726,7 +726,7 @@ static int cobalt_g_fmt_vid_cap(struct file *file, void *priv_fh,
return 0;
}
-static int cobalt_try_fmt_vid_cap(struct file *file, void *priv_fh,
+static int cobalt_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct cobalt_stream *s = video_drvdata(file);
@@ -787,7 +787,7 @@ static int cobalt_try_fmt_vid_cap(struct file *file, void *priv_fh,
return 0;
}
-static int cobalt_s_fmt_vid_cap(struct file *file, void *priv_fh,
+static int cobalt_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct cobalt_stream *s = video_drvdata(file);
@@ -796,7 +796,7 @@ static int cobalt_s_fmt_vid_cap(struct file *file, void *priv_fh,
if (vb2_is_busy(&s->q))
return -EBUSY;
- if (cobalt_try_fmt_vid_cap(file, priv_fh, f))
+ if (cobalt_try_fmt_vid_cap(file, priv, f))
return -EINVAL;
s->width = pix->width;
@@ -821,7 +821,7 @@ static int cobalt_s_fmt_vid_cap(struct file *file, void *priv_fh,
return 0;
}
-static int cobalt_try_fmt_vid_out(struct file *file, void *priv_fh,
+static int cobalt_try_fmt_vid_out(struct file *file, void *priv,
struct v4l2_format *f)
{
struct v4l2_pix_format *pix = &f->fmt.pix;
@@ -862,7 +862,7 @@ static int cobalt_try_fmt_vid_out(struct file *file, void *priv_fh,
return 0;
}
-static int cobalt_g_fmt_vid_out(struct file *file, void *priv_fh,
+static int cobalt_g_fmt_vid_out(struct file *file, void *priv,
struct v4l2_format *f)
{
struct cobalt_stream *s = video_drvdata(file);
@@ -882,7 +882,7 @@ static int cobalt_g_fmt_vid_out(struct file *file, void *priv_fh,
return 0;
}
-static int cobalt_enum_fmt_vid_out(struct file *file, void *priv_fh,
+static int cobalt_enum_fmt_vid_out(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
switch (f->index) {
@@ -899,7 +899,7 @@ static int cobalt_enum_fmt_vid_out(struct file *file, void *priv_fh,
return 0;
}
-static int cobalt_s_fmt_vid_out(struct file *file, void *priv_fh,
+static int cobalt_s_fmt_vid_out(struct file *file, void *priv,
struct v4l2_format *f)
{
struct cobalt_stream *s = video_drvdata(file);
@@ -909,7 +909,7 @@ static int cobalt_s_fmt_vid_out(struct file *file, void *priv_fh,
};
u32 code;
- if (cobalt_try_fmt_vid_out(file, priv_fh, f))
+ if (cobalt_try_fmt_vid_out(file, priv, f))
return -EINVAL;
if (vb2_is_busy(&s->q) && (pix->pixelformat != s->pixfmt ||
@@ -942,7 +942,7 @@ static int cobalt_s_fmt_vid_out(struct file *file, void *priv_fh,
return 0;
}
-static int cobalt_enum_input(struct file *file, void *priv_fh,
+static int cobalt_enum_input(struct file *file, void *priv,
struct v4l2_input *inp)
{
struct cobalt_stream *s = video_drvdata(file);
@@ -963,7 +963,7 @@ static int cobalt_enum_input(struct file *file, void *priv_fh,
video, g_input_status, &inp->status);
}
-static int cobalt_g_input(struct file *file, void *priv_fh, unsigned int *i)
+static int cobalt_g_input(struct file *file, void *priv, unsigned int *i)
{
struct cobalt_stream *s = video_drvdata(file);
@@ -971,7 +971,7 @@ static int cobalt_g_input(struct file *file, void *priv_fh, unsigned int *i)
return 0;
}
-static int cobalt_s_input(struct file *file, void *priv_fh, unsigned int i)
+static int cobalt_s_input(struct file *file, void *priv, unsigned int i)
{
struct cobalt_stream *s = video_drvdata(file);
@@ -990,7 +990,7 @@ static int cobalt_s_input(struct file *file, void *priv_fh, unsigned int i)
ADV76XX_PAD_HDMI_PORT_A, 0, 0);
}
-static int cobalt_enum_output(struct file *file, void *priv_fh,
+static int cobalt_enum_output(struct file *file, void *priv,
struct v4l2_output *out)
{
if (out->index)
@@ -1001,18 +1001,18 @@ static int cobalt_enum_output(struct file *file, void *priv_fh,
return 0;
}
-static int cobalt_g_output(struct file *file, void *priv_fh, unsigned int *i)
+static int cobalt_g_output(struct file *file, void *priv, unsigned int *i)
{
*i = 0;
return 0;
}
-static int cobalt_s_output(struct file *file, void *priv_fh, unsigned int i)
+static int cobalt_s_output(struct file *file, void *priv, unsigned int i)
{
return i ? -EINVAL : 0;
}
-static int cobalt_g_edid(struct file *file, void *fh, struct v4l2_edid *edid)
+static int cobalt_g_edid(struct file *file, void *priv, struct v4l2_edid *edid)
{
struct cobalt_stream *s = video_drvdata(file);
u32 pad = edid->pad;
@@ -1026,7 +1026,7 @@ static int cobalt_g_edid(struct file *file, void *fh, struct v4l2_edid *edid)
return ret;
}
-static int cobalt_s_edid(struct file *file, void *fh, struct v4l2_edid *edid)
+static int cobalt_s_edid(struct file *file, void *priv, struct v4l2_edid *edid)
{
struct cobalt_stream *s = video_drvdata(file);
u32 pad = edid->pad;
@@ -1050,7 +1050,7 @@ static int cobalt_subscribe_event(struct v4l2_fh *fh,
return v4l2_ctrl_subscribe_event(fh, sub);
}
-static int cobalt_g_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
+static int cobalt_g_parm(struct file *file, void *priv, struct v4l2_streamparm *a)
{
struct cobalt_stream *s = video_drvdata(file);
struct v4l2_fract fps;
@@ -1065,7 +1065,7 @@ static int cobalt_g_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
return 0;
}
-static int cobalt_g_pixelaspect(struct file *file, void *fh,
+static int cobalt_g_pixelaspect(struct file *file, void *priv,
int type, struct v4l2_fract *f)
{
struct cobalt_stream *s = video_drvdata(file);
@@ -1084,7 +1084,7 @@ static int cobalt_g_pixelaspect(struct file *file, void *fh,
return err;
}
-static int cobalt_g_selection(struct file *file, void *fh,
+static int cobalt_g_selection(struct file *file, void *priv,
struct v4l2_selection *sel)
{
struct cobalt_stream *s = video_drvdata(file);
diff --git a/drivers/media/pci/cx18/cx18-audio.c b/drivers/media/pci/cx18/cx18-audio.c
index 8602d088601b..1464795619f9 100644
--- a/drivers/media/pci/cx18/cx18-audio.c
+++ b/drivers/media/pci/cx18/cx18-audio.c
@@ -4,7 +4,7 @@
*
* Derived from ivtv-audio.c
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
*/
#include "cx18-driver.h"
diff --git a/drivers/media/pci/cx18/cx18-audio.h b/drivers/media/pci/cx18/cx18-audio.h
index 36ce333ab07a..29cf89d38d60 100644
--- a/drivers/media/pci/cx18/cx18-audio.h
+++ b/drivers/media/pci/cx18/cx18-audio.h
@@ -4,7 +4,7 @@
*
* Derived from ivtv-audio.c
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
*/
int cx18_audio_set_io(struct cx18 *cx);
diff --git a/drivers/media/pci/cx18/cx18-av-audio.c b/drivers/media/pci/cx18/cx18-av-audio.c
index 78e05df9a7ba..644d8ca4519b 100644
--- a/drivers/media/pci/cx18/cx18-av-audio.c
+++ b/drivers/media/pci/cx18/cx18-av-audio.c
@@ -4,7 +4,7 @@
*
* Derived from cx25840-audio.c
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-av-core.c b/drivers/media/pci/cx18/cx18-av-core.c
index ee6e71157786..4fb19d26ee29 100644
--- a/drivers/media/pci/cx18/cx18-av-core.c
+++ b/drivers/media/pci/cx18/cx18-av-core.c
@@ -4,7 +4,7 @@
*
* Derived from cx25840-core.c
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-av-core.h b/drivers/media/pci/cx18/cx18-av-core.h
index 55aceb064b33..71ac9d7af28f 100644
--- a/drivers/media/pci/cx18/cx18-av-core.h
+++ b/drivers/media/pci/cx18/cx18-av-core.h
@@ -4,7 +4,7 @@
*
* Derived from cx25840-core.h
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-av-firmware.c b/drivers/media/pci/cx18/cx18-av-firmware.c
index 61aeb8c9af7f..02dde685a6ad 100644
--- a/drivers/media/pci/cx18/cx18-av-firmware.c
+++ b/drivers/media/pci/cx18/cx18-av-firmware.c
@@ -2,7 +2,7 @@
/*
* cx18 ADEC firmware functions
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-av-vbi.c b/drivers/media/pci/cx18/cx18-av-vbi.c
index 1a113aad9cd4..f9beeaeaa1cb 100644
--- a/drivers/media/pci/cx18/cx18-av-vbi.c
+++ b/drivers/media/pci/cx18/cx18-av-vbi.c
@@ -4,7 +4,7 @@
*
* Derived from cx25840-vbi.c
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/cx18/cx18-cards.c b/drivers/media/pci/cx18/cx18-cards.c
index f5a30959a367..bddb9e0fffe0 100644
--- a/drivers/media/pci/cx18/cx18-cards.c
+++ b/drivers/media/pci/cx18/cx18-cards.c
@@ -4,7 +4,7 @@
*
* Derived from ivtv-cards.c
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-cards.h b/drivers/media/pci/cx18/cx18-cards.h
index ae9cf5bfdd59..511123b741d5 100644
--- a/drivers/media/pci/cx18/cx18-cards.h
+++ b/drivers/media/pci/cx18/cx18-cards.h
@@ -4,7 +4,7 @@
*
* Derived from ivtv-cards.c
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-controls.c b/drivers/media/pci/cx18/cx18-controls.c
index bb5fc120473c..78eadad8b6e8 100644
--- a/drivers/media/pci/cx18/cx18-controls.c
+++ b/drivers/media/pci/cx18/cx18-controls.c
@@ -4,7 +4,7 @@
*
* Derived from ivtv-controls.c
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
*/
#include <linux/kernel.h>
#include <linux/slab.h>
diff --git a/drivers/media/pci/cx18/cx18-controls.h b/drivers/media/pci/cx18/cx18-controls.h
index 458a3595a2ae..99de78878a76 100644
--- a/drivers/media/pci/cx18/cx18-controls.h
+++ b/drivers/media/pci/cx18/cx18-controls.h
@@ -4,7 +4,7 @@
*
* Derived from ivtv-controls.h
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/cx18/cx18-driver.c b/drivers/media/pci/cx18/cx18-driver.c
index 743fcc961374..b62fd12c93c1 100644
--- a/drivers/media/pci/cx18/cx18-driver.c
+++ b/drivers/media/pci/cx18/cx18-driver.c
@@ -4,7 +4,7 @@
*
* Derived from ivtv-driver.c
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-driver.h b/drivers/media/pci/cx18/cx18-driver.h
index 485ca9747c4c..ef38903709d0 100644
--- a/drivers/media/pci/cx18/cx18-driver.h
+++ b/drivers/media/pci/cx18/cx18-driver.h
@@ -4,7 +4,7 @@
*
* Derived from ivtv-driver.h
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
@@ -414,7 +414,7 @@ static inline struct cx18_open_id *fh2id(struct v4l2_fh *fh)
static inline struct cx18_open_id *file2id(struct file *file)
{
- return fh2id(file->private_data);
+ return fh2id(file_to_v4l2_fh(file));
}
/* forward declaration of struct defined in cx18-cards.h */
diff --git a/drivers/media/pci/cx18/cx18-fileops.c b/drivers/media/pci/cx18/cx18-fileops.c
index cefa91b37f89..4944033dbb20 100644
--- a/drivers/media/pci/cx18/cx18-fileops.c
+++ b/drivers/media/pci/cx18/cx18-fileops.c
@@ -4,7 +4,7 @@
*
* Derived from ivtv-fileops.c
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
@@ -678,7 +678,7 @@ void cx18_stop_capture(struct cx18_stream *s, int gop_end)
int cx18_v4l2_close(struct file *filp)
{
- struct v4l2_fh *fh = filp->private_data;
+ struct v4l2_fh *fh = file_to_v4l2_fh(filp);
struct cx18_open_id *id = fh2id(fh);
struct cx18 *cx = id->cx;
struct cx18_stream *s = &cx->streams[id->type];
@@ -709,11 +709,11 @@ int cx18_v4l2_close(struct file *filp)
}
if (id->type == CX18_ENC_STREAM_TYPE_YUV &&
- filp->private_data == vdev->queue->owner) {
+ file_to_v4l2_fh(filp) == vdev->queue->owner) {
vb2_queue_release(vdev->queue);
vdev->queue->owner = NULL;
}
- v4l2_fh_del(fh);
+ v4l2_fh_del(fh, filp);
v4l2_fh_exit(fh);
/* 'Unclaim' this stream */
@@ -743,8 +743,7 @@ static int cx18_serialized_open(struct cx18_stream *s, struct file *filp)
item->type = s->type;
item->open_id = cx->open_id++;
- filp->private_data = &item->fh;
- v4l2_fh_add(&item->fh);
+ v4l2_fh_add(&item->fh, filp);
if (item->type == CX18_ENC_STREAM_TYPE_RAD &&
v4l2_fh_is_singular_file(filp)) {
@@ -752,7 +751,7 @@ static int cx18_serialized_open(struct cx18_stream *s, struct file *filp)
if (atomic_read(&cx->ana_capturing) > 0) {
/* switching to radio while capture is
in progress is not polite */
- v4l2_fh_del(&item->fh);
+ v4l2_fh_del(&item->fh, filp);
v4l2_fh_exit(&item->fh);
kfree(item);
return -EBUSY;
diff --git a/drivers/media/pci/cx18/cx18-fileops.h b/drivers/media/pci/cx18/cx18-fileops.h
index 943057b83d94..bc999ea85dc2 100644
--- a/drivers/media/pci/cx18/cx18-fileops.h
+++ b/drivers/media/pci/cx18/cx18-fileops.h
@@ -4,7 +4,7 @@
*
* Derived from ivtv-fileops.h
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
*/
/* Testing/Debugging */
diff --git a/drivers/media/pci/cx18/cx18-firmware.c b/drivers/media/pci/cx18/cx18-firmware.c
index 1b038b2802bf..94e17948fb30 100644
--- a/drivers/media/pci/cx18/cx18-firmware.c
+++ b/drivers/media/pci/cx18/cx18-firmware.c
@@ -2,7 +2,7 @@
/*
* cx18 firmware functions
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-firmware.h b/drivers/media/pci/cx18/cx18-firmware.h
index 1357f76d613e..8e63677284bd 100644
--- a/drivers/media/pci/cx18/cx18-firmware.h
+++ b/drivers/media/pci/cx18/cx18-firmware.h
@@ -2,7 +2,7 @@
/*
* cx18 firmware functions
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
*/
int cx18_firmware_init(struct cx18 *cx);
diff --git a/drivers/media/pci/cx18/cx18-gpio.c b/drivers/media/pci/cx18/cx18-gpio.c
index 485a6cbeb15a..4aea92639599 100644
--- a/drivers/media/pci/cx18/cx18-gpio.c
+++ b/drivers/media/pci/cx18/cx18-gpio.c
@@ -4,7 +4,7 @@
*
* Derived from ivtv-gpio.c
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-gpio.h b/drivers/media/pci/cx18/cx18-gpio.h
index 8d5797dea7f5..3eefc4644101 100644
--- a/drivers/media/pci/cx18/cx18-gpio.h
+++ b/drivers/media/pci/cx18/cx18-gpio.h
@@ -4,7 +4,7 @@
*
* Derived from ivtv-gpio.h
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-i2c.c b/drivers/media/pci/cx18/cx18-i2c.c
index a83435245251..a1abb0631cae 100644
--- a/drivers/media/pci/cx18/cx18-i2c.c
+++ b/drivers/media/pci/cx18/cx18-i2c.c
@@ -4,7 +4,7 @@
*
* Derived from ivtv-i2c.c
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-i2c.h b/drivers/media/pci/cx18/cx18-i2c.h
index 4526cb324fec..8ae3125c78c0 100644
--- a/drivers/media/pci/cx18/cx18-i2c.h
+++ b/drivers/media/pci/cx18/cx18-i2c.h
@@ -4,7 +4,7 @@
*
* Derived from ivtv-i2c.h
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
*/
int cx18_i2c_register(struct cx18 *cx, unsigned idx);
diff --git a/drivers/media/pci/cx18/cx18-io.c b/drivers/media/pci/cx18/cx18-io.c
index 50e4e8a598d4..1d3d006e6329 100644
--- a/drivers/media/pci/cx18/cx18-io.c
+++ b/drivers/media/pci/cx18/cx18-io.c
@@ -2,7 +2,7 @@
/*
* cx18 driver PCI memory mapped IO access routines
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-io.h b/drivers/media/pci/cx18/cx18-io.h
index 190b142d047e..1f0bfad7d0f0 100644
--- a/drivers/media/pci/cx18/cx18-io.h
+++ b/drivers/media/pci/cx18/cx18-io.h
@@ -2,7 +2,7 @@
/*
* cx18 driver PCI memory mapped IO access routines
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-ioctl.c b/drivers/media/pci/cx18/cx18-ioctl.c
index 9a1512b1ccaa..0f3019739d03 100644
--- a/drivers/media/pci/cx18/cx18-ioctl.c
+++ b/drivers/media/pci/cx18/cx18-ioctl.c
@@ -4,7 +4,7 @@
*
* Derived from ivtv-ioctl.c
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
@@ -52,7 +52,7 @@ static const struct v4l2_fmtdesc cx18_formats_mpeg[] = {
static int cx18_g_fmt_vid_cap(struct file *file, void *fh,
struct v4l2_format *fmt)
{
- struct cx18_open_id *id = fh2id(fh);
+ struct cx18_open_id *id = file2id(file);
struct cx18 *cx = id->cx;
struct cx18_stream *s = &cx->streams[id->type];
struct v4l2_pix_format *pixfmt = &fmt->fmt.pix;
@@ -76,7 +76,7 @@ static int cx18_g_fmt_vid_cap(struct file *file, void *fh,
static int cx18_try_fmt_vid_cap(struct file *file, void *fh,
struct v4l2_format *fmt)
{
- struct cx18_open_id *id = fh2id(fh);
+ struct cx18_open_id *id = file2id(file);
struct cx18 *cx = id->cx;
struct v4l2_pix_format *pixfmt = &fmt->fmt.pix;
int w = pixfmt->width;
@@ -121,7 +121,7 @@ static int cx18_try_fmt_vid_cap(struct file *file, void *fh,
static int cx18_s_fmt_vid_cap(struct file *file, void *fh,
struct v4l2_format *fmt)
{
- struct cx18_open_id *id = fh2id(fh);
+ struct cx18_open_id *id = file2id(file);
struct cx18 *cx = id->cx;
struct v4l2_subdev_format format = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
@@ -261,7 +261,7 @@ u16 cx18_get_service_set(struct v4l2_sliced_vbi_format *fmt)
static int cx18_g_fmt_vbi_cap(struct file *file, void *fh,
struct v4l2_format *fmt)
{
- struct cx18 *cx = fh2id(fh)->cx;
+ struct cx18 *cx = file2id(file)->cx;
struct v4l2_vbi_format *vbifmt = &fmt->fmt.vbi;
vbifmt->sampling_rate = 27000000;
@@ -280,7 +280,7 @@ static int cx18_g_fmt_vbi_cap(struct file *file, void *fh,
static int cx18_g_fmt_sliced_vbi_cap(struct file *file, void *fh,
struct v4l2_format *fmt)
{
- struct cx18 *cx = fh2id(fh)->cx;
+ struct cx18 *cx = file2id(file)->cx;
struct v4l2_sliced_vbi_format *vbifmt = &fmt->fmt.sliced;
/* sane, V4L2 spec compliant, defaults */
@@ -311,7 +311,7 @@ static int cx18_try_fmt_vbi_cap(struct file *file, void *fh,
static int cx18_try_fmt_sliced_vbi_cap(struct file *file, void *fh,
struct v4l2_format *fmt)
{
- struct cx18 *cx = fh2id(fh)->cx;
+ struct cx18 *cx = file2id(file)->cx;
struct v4l2_sliced_vbi_format *vbifmt = &fmt->fmt.sliced;
vbifmt->io_size = sizeof(struct v4l2_sliced_vbi_data) * 36;
@@ -330,7 +330,7 @@ static int cx18_try_fmt_sliced_vbi_cap(struct file *file, void *fh,
static int cx18_s_fmt_vbi_cap(struct file *file, void *fh,
struct v4l2_format *fmt)
{
- struct cx18_open_id *id = fh2id(fh);
+ struct cx18_open_id *id = file2id(file);
struct cx18 *cx = id->cx;
int ret;
@@ -360,7 +360,7 @@ static int cx18_s_fmt_vbi_cap(struct file *file, void *fh,
static int cx18_s_fmt_sliced_vbi_cap(struct file *file, void *fh,
struct v4l2_format *fmt)
{
- struct cx18_open_id *id = fh2id(fh);
+ struct cx18_open_id *id = file2id(file);
struct cx18 *cx = id->cx;
int ret;
struct v4l2_sliced_vbi_format *vbifmt = &fmt->fmt.sliced;
@@ -392,7 +392,7 @@ static int cx18_s_fmt_sliced_vbi_cap(struct file *file, void *fh,
static int cx18_g_register(struct file *file, void *fh,
struct v4l2_dbg_register *reg)
{
- struct cx18 *cx = fh2id(fh)->cx;
+ struct cx18 *cx = file2id(file)->cx;
if (reg->reg & 0x3)
return -EINVAL;
@@ -406,7 +406,7 @@ static int cx18_g_register(struct file *file, void *fh,
static int cx18_s_register(struct file *file, void *fh,
const struct v4l2_dbg_register *reg)
{
- struct cx18 *cx = fh2id(fh)->cx;
+ struct cx18 *cx = file2id(file)->cx;
if (reg->reg & 0x3)
return -EINVAL;
@@ -420,7 +420,7 @@ static int cx18_s_register(struct file *file, void *fh,
static int cx18_querycap(struct file *file, void *fh,
struct v4l2_capability *vcap)
{
- struct cx18_open_id *id = fh2id(fh);
+ struct cx18_open_id *id = file2id(file);
struct cx18 *cx = id->cx;
strscpy(vcap->driver, CX18_DRIVER_NAME, sizeof(vcap->driver));
@@ -431,14 +431,14 @@ static int cx18_querycap(struct file *file, void *fh,
static int cx18_enumaudio(struct file *file, void *fh, struct v4l2_audio *vin)
{
- struct cx18 *cx = fh2id(fh)->cx;
+ struct cx18 *cx = file2id(file)->cx;
return cx18_get_audio_input(cx, vin->index, vin);
}
static int cx18_g_audio(struct file *file, void *fh, struct v4l2_audio *vin)
{
- struct cx18 *cx = fh2id(fh)->cx;
+ struct cx18 *cx = file2id(file)->cx;
vin->index = cx->audio_input;
return cx18_get_audio_input(cx, vin->index, vin);
@@ -446,7 +446,7 @@ static int cx18_g_audio(struct file *file, void *fh, struct v4l2_audio *vin)
static int cx18_s_audio(struct file *file, void *fh, const struct v4l2_audio *vout)
{
- struct cx18 *cx = fh2id(fh)->cx;
+ struct cx18 *cx = file2id(file)->cx;
if (vout->index >= cx->nof_audio_inputs)
return -EINVAL;
@@ -457,7 +457,7 @@ static int cx18_s_audio(struct file *file, void *fh, const struct v4l2_audio *vo
static int cx18_enum_input(struct file *file, void *fh, struct v4l2_input *vin)
{
- struct cx18 *cx = fh2id(fh)->cx;
+ struct cx18 *cx = file2id(file)->cx;
/* set it to defaults from our table */
return cx18_get_input(cx, vin->index, vin);
@@ -466,7 +466,7 @@ static int cx18_enum_input(struct file *file, void *fh, struct v4l2_input *vin)
static int cx18_g_pixelaspect(struct file *file, void *fh,
int type, struct v4l2_fract *f)
{
- struct cx18 *cx = fh2id(fh)->cx;
+ struct cx18 *cx = file2id(file)->cx;
if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
@@ -479,7 +479,7 @@ static int cx18_g_pixelaspect(struct file *file, void *fh,
static int cx18_g_selection(struct file *file, void *fh,
struct v4l2_selection *sel)
{
- struct cx18 *cx = fh2id(fh)->cx;
+ struct cx18 *cx = file2id(file)->cx;
if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
@@ -499,7 +499,7 @@ static int cx18_g_selection(struct file *file, void *fh,
static int cx18_enum_fmt_vid_cap(struct file *file, void *fh,
struct v4l2_fmtdesc *fmt)
{
- struct cx18_open_id *id = fh2id(fh);
+ struct cx18_open_id *id = file2id(file);
if (id->type == CX18_ENC_STREAM_TYPE_YUV) {
if (fmt->index >= ARRAY_SIZE(cx18_formats_yuv))
@@ -515,7 +515,7 @@ static int cx18_enum_fmt_vid_cap(struct file *file, void *fh,
static int cx18_g_input(struct file *file, void *fh, unsigned int *i)
{
- struct cx18 *cx = fh2id(fh)->cx;
+ struct cx18 *cx = file2id(file)->cx;
*i = cx->active_input;
return 0;
@@ -523,7 +523,7 @@ static int cx18_g_input(struct file *file, void *fh, unsigned int *i)
int cx18_s_input(struct file *file, void *fh, unsigned int inp)
{
- struct cx18_open_id *id = fh2id(fh);
+ struct cx18_open_id *id = file2id(file);
struct cx18 *cx = id->cx;
v4l2_std_id std = V4L2_STD_ALL;
const struct cx18_card_video_input *card_input =
@@ -561,7 +561,7 @@ int cx18_s_input(struct file *file, void *fh, unsigned int inp)
static int cx18_g_frequency(struct file *file, void *fh,
struct v4l2_frequency *vf)
{
- struct cx18 *cx = fh2id(fh)->cx;
+ struct cx18 *cx = file2id(file)->cx;
if (vf->tuner != 0)
return -EINVAL;
@@ -572,7 +572,7 @@ static int cx18_g_frequency(struct file *file, void *fh,
int cx18_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf)
{
- struct cx18_open_id *id = fh2id(fh);
+ struct cx18_open_id *id = file2id(file);
struct cx18 *cx = id->cx;
if (vf->tuner != 0)
@@ -587,7 +587,7 @@ int cx18_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *v
static int cx18_g_std(struct file *file, void *fh, v4l2_std_id *std)
{
- struct cx18 *cx = fh2id(fh)->cx;
+ struct cx18 *cx = file2id(file)->cx;
*std = cx->std;
return 0;
@@ -595,7 +595,7 @@ static int cx18_g_std(struct file *file, void *fh, v4l2_std_id *std)
int cx18_s_std(struct file *file, void *fh, v4l2_std_id std)
{
- struct cx18_open_id *id = fh2id(fh);
+ struct cx18_open_id *id = file2id(file);
struct cx18 *cx = id->cx;
if ((std & V4L2_STD_ALL) == 0)
@@ -644,7 +644,7 @@ int cx18_s_std(struct file *file, void *fh, v4l2_std_id std)
static int cx18_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt)
{
- struct cx18_open_id *id = fh2id(fh);
+ struct cx18_open_id *id = file2id(file);
struct cx18 *cx = id->cx;
if (vt->index != 0)
@@ -656,7 +656,7 @@ static int cx18_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt
static int cx18_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt)
{
- struct cx18 *cx = fh2id(fh)->cx;
+ struct cx18 *cx = file2id(file)->cx;
if (vt->index != 0)
return -EINVAL;
@@ -673,7 +673,7 @@ static int cx18_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt)
static int cx18_g_sliced_vbi_cap(struct file *file, void *fh,
struct v4l2_sliced_vbi_cap *cap)
{
- struct cx18 *cx = fh2id(fh)->cx;
+ struct cx18 *cx = file2id(file)->cx;
int set = cx->is_50hz ? V4L2_SLICED_VBI_625 : V4L2_SLICED_VBI_525;
int f, l;
@@ -794,7 +794,7 @@ static int cx18_process_idx_data(struct cx18_stream *s, struct cx18_mdl *mdl,
static int cx18_g_enc_index(struct file *file, void *fh,
struct v4l2_enc_idx *idx)
{
- struct cx18 *cx = fh2id(fh)->cx;
+ struct cx18 *cx = file2id(file)->cx;
struct cx18_stream *s = &cx->streams[CX18_ENC_STREAM_TYPE_IDX];
s32 tmp;
struct cx18_mdl *mdl;
@@ -841,7 +841,7 @@ static int cx18_g_enc_index(struct file *file, void *fh,
static int cx18_encoder_cmd(struct file *file, void *fh,
struct v4l2_encoder_cmd *enc)
{
- struct cx18_open_id *id = fh2id(fh);
+ struct cx18_open_id *id = file2id(file);
struct cx18 *cx = id->cx;
u32 h;
@@ -900,7 +900,7 @@ static int cx18_encoder_cmd(struct file *file, void *fh,
static int cx18_try_encoder_cmd(struct file *file, void *fh,
struct v4l2_encoder_cmd *enc)
{
- struct cx18 *cx = fh2id(fh)->cx;
+ struct cx18 *cx = file2id(file)->cx;
switch (enc->cmd) {
case V4L2_ENC_CMD_START:
@@ -932,7 +932,7 @@ static int cx18_try_encoder_cmd(struct file *file, void *fh,
static int cx18_log_status(struct file *file, void *fh)
{
- struct cx18 *cx = fh2id(fh)->cx;
+ struct cx18 *cx = file2id(file)->cx;
struct v4l2_input vidin;
struct v4l2_audio audin;
int i;
@@ -976,7 +976,7 @@ static int cx18_log_status(struct file *file, void *fh)
static long cx18_default(struct file *file, void *fh, bool valid_prio,
unsigned int cmd, void *arg)
{
- struct cx18 *cx = fh2id(fh)->cx;
+ struct cx18 *cx = file2id(file)->cx;
switch (cmd) {
case VIDIOC_INT_RESET: {
diff --git a/drivers/media/pci/cx18/cx18-ioctl.h b/drivers/media/pci/cx18/cx18-ioctl.h
index 221e2400fb3e..97cd9f99e22d 100644
--- a/drivers/media/pci/cx18/cx18-ioctl.h
+++ b/drivers/media/pci/cx18/cx18-ioctl.h
@@ -4,7 +4,7 @@
*
* Derived from ivtv-ioctl.h
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-irq.c b/drivers/media/pci/cx18/cx18-irq.c
index db63077821b1..0ef01e98255d 100644
--- a/drivers/media/pci/cx18/cx18-irq.c
+++ b/drivers/media/pci/cx18/cx18-irq.c
@@ -2,7 +2,7 @@
/*
* cx18 interrupt handling
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-irq.h b/drivers/media/pci/cx18/cx18-irq.h
index fdb585a72827..c1456d69dffa 100644
--- a/drivers/media/pci/cx18/cx18-irq.h
+++ b/drivers/media/pci/cx18/cx18-irq.h
@@ -2,7 +2,7 @@
/*
* cx18 interrupt handling
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-mailbox.c b/drivers/media/pci/cx18/cx18-mailbox.c
index a6457c23d18c..8c70b638a40c 100644
--- a/drivers/media/pci/cx18/cx18-mailbox.c
+++ b/drivers/media/pci/cx18/cx18-mailbox.c
@@ -2,7 +2,7 @@
/*
* cx18 mailbox functions
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-mailbox.h b/drivers/media/pci/cx18/cx18-mailbox.h
index 971382ac0eca..1752b8e1ca13 100644
--- a/drivers/media/pci/cx18/cx18-mailbox.h
+++ b/drivers/media/pci/cx18/cx18-mailbox.h
@@ -2,7 +2,7 @@
/*
* cx18 mailbox functions
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-queue.c b/drivers/media/pci/cx18/cx18-queue.c
index 013694bfcb1c..eeb5513b1d52 100644
--- a/drivers/media/pci/cx18/cx18-queue.c
+++ b/drivers/media/pci/cx18/cx18-queue.c
@@ -4,7 +4,7 @@
*
* Derived from ivtv-queue.c
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
@@ -379,15 +379,22 @@ int cx18_stream_alloc(struct cx18_stream *s)
break;
}
+ buf->dma_handle = dma_map_single(&s->cx->pci_dev->dev,
+ buf->buf, s->buf_size,
+ s->dma);
+ if (dma_mapping_error(&s->cx->pci_dev->dev, buf->dma_handle)) {
+ kfree(buf->buf);
+ kfree(mdl);
+ kfree(buf);
+ break;
+ }
+
INIT_LIST_HEAD(&mdl->list);
INIT_LIST_HEAD(&mdl->buf_list);
mdl->id = s->mdl_base_idx; /* a somewhat safe value */
cx18_enqueue(s, mdl, &s->q_idle);
INIT_LIST_HEAD(&buf->list);
- buf->dma_handle = dma_map_single(&s->cx->pci_dev->dev,
- buf->buf, s->buf_size,
- s->dma);
cx18_buf_sync_for_cpu(s, buf);
list_add_tail(&buf->list, &s->buf_pool);
}
diff --git a/drivers/media/pci/cx18/cx18-queue.h b/drivers/media/pci/cx18/cx18-queue.h
index 26f2097c0496..972f234ffead 100644
--- a/drivers/media/pci/cx18/cx18-queue.h
+++ b/drivers/media/pci/cx18/cx18-queue.h
@@ -4,7 +4,7 @@
*
* Derived from ivtv-queue.h
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-scb.c b/drivers/media/pci/cx18/cx18-scb.c
index 4a0edc1e42e7..670dd9b0a332 100644
--- a/drivers/media/pci/cx18/cx18-scb.c
+++ b/drivers/media/pci/cx18/cx18-scb.c
@@ -2,7 +2,7 @@
/*
* cx18 System Control Block initialization
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-scb.h b/drivers/media/pci/cx18/cx18-scb.h
index 841edc0712ab..900f7291979f 100644
--- a/drivers/media/pci/cx18/cx18-scb.h
+++ b/drivers/media/pci/cx18/cx18-scb.h
@@ -2,7 +2,7 @@
/*
* cx18 System Control Block initialization
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-streams.c b/drivers/media/pci/cx18/cx18-streams.c
index 42d6f7b90ede..48203ba16387 100644
--- a/drivers/media/pci/cx18/cx18-streams.c
+++ b/drivers/media/pci/cx18/cx18-streams.c
@@ -4,7 +4,7 @@
*
* Derived from ivtv-streams.c
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-streams.h b/drivers/media/pci/cx18/cx18-streams.h
index bba4349c5c2e..e01bed6b4827 100644
--- a/drivers/media/pci/cx18/cx18-streams.h
+++ b/drivers/media/pci/cx18/cx18-streams.h
@@ -4,7 +4,7 @@
*
* Derived from ivtv-streams.h
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-vbi.c b/drivers/media/pci/cx18/cx18-vbi.c
index c7cce38dd754..8dc4ce325935 100644
--- a/drivers/media/pci/cx18/cx18-vbi.c
+++ b/drivers/media/pci/cx18/cx18-vbi.c
@@ -4,7 +4,7 @@
*
* Derived from ivtv-vbi.c
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
*/
#include "cx18-driver.h"
diff --git a/drivers/media/pci/cx18/cx18-vbi.h b/drivers/media/pci/cx18/cx18-vbi.h
index a5f81c6159f0..41f5026696c4 100644
--- a/drivers/media/pci/cx18/cx18-vbi.h
+++ b/drivers/media/pci/cx18/cx18-vbi.h
@@ -4,7 +4,7 @@
*
* Derived from ivtv-vbi.h
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
*/
void cx18_process_vbi_data(struct cx18 *cx, struct cx18_mdl *mdl,
diff --git a/drivers/media/pci/cx18/cx18-version.h b/drivers/media/pci/cx18/cx18-version.h
index e7396182fc5a..e8636ac5d5a5 100644
--- a/drivers/media/pci/cx18/cx18-version.h
+++ b/drivers/media/pci/cx18/cx18-version.h
@@ -2,7 +2,7 @@
/*
* cx18 driver version information
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
*/
#ifndef CX18_VERSION_H
diff --git a/drivers/media/pci/cx18/cx18-video.c b/drivers/media/pci/cx18/cx18-video.c
index 2fde8c2d3fdc..86cd44053d34 100644
--- a/drivers/media/pci/cx18/cx18-video.c
+++ b/drivers/media/pci/cx18/cx18-video.c
@@ -2,7 +2,7 @@
/*
* cx18 video interface functions
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
*/
#include "cx18-driver.h"
diff --git a/drivers/media/pci/cx18/cx18-video.h b/drivers/media/pci/cx18/cx18-video.h
index f613975ca5f0..ef212f1c5b17 100644
--- a/drivers/media/pci/cx18/cx18-video.h
+++ b/drivers/media/pci/cx18/cx18-video.h
@@ -2,7 +2,7 @@
/*
* cx18 video interface functions
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
*/
void cx18_video_set_io(struct cx18 *cx);
diff --git a/drivers/media/pci/cx18/cx23418.h b/drivers/media/pci/cx18/cx23418.h
index 8859c0e8557f..22486f39bcda 100644
--- a/drivers/media/pci/cx18/cx23418.h
+++ b/drivers/media/pci/cx18/cx23418.h
@@ -2,7 +2,7 @@
/*
* cx18 header containing common defines.
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
*/
#ifndef CX23418_H
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c b/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c
index 6030bd23b4b9..d1fece6210ab 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c
@@ -109,7 +109,7 @@ static int csi2_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
case V4L2_EVENT_FRAME_SYNC:
return v4l2_event_subscribe(fh, sub, 10, NULL);
case V4L2_EVENT_CTRL:
- return v4l2_ctrl_subscribe_event(fh, sub);
+ return v4l2_ctrl_subdev_subscribe_event(sd, fh, sub);
default:
return -EINVAL;
}
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.c b/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.c
index 0a06de5c739c..463a0adf9e13 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.c
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.c
@@ -81,6 +81,12 @@ unsigned int ipu6_isys_mbus_code_to_mipi(u32 code)
case MEDIA_BUS_FMT_SGRBG8_1X8:
case MEDIA_BUS_FMT_SRGGB8_1X8:
return MIPI_CSI2_DT_RAW8;
+ case MEDIA_BUS_FMT_META_8:
+ case MEDIA_BUS_FMT_META_10:
+ case MEDIA_BUS_FMT_META_12:
+ case MEDIA_BUS_FMT_META_16:
+ case MEDIA_BUS_FMT_META_24:
+ return MIPI_CSI2_DT_EMBEDDED_8B;
default:
/* return unavailable MIPI data type - 0x3f */
WARN_ON(1);
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-video.c b/drivers/media/pci/intel/ipu6/ipu6-isys-video.c
index 24a2ef93474c..f3f3bc0615e5 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-isys-video.c
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys-video.c
@@ -1306,7 +1306,6 @@ int ipu6_isys_video_init(struct ipu6_isys_video *av)
__ipu6_isys_vidioc_try_fmt_meta_cap(av, &format_meta);
av->meta_fmt = format_meta.fmt.meta;
- set_bit(V4L2_FL_USES_V4L2_FH, &av->vdev.flags);
video_set_drvdata(&av->vdev, av);
ret = video_register_device(&av->vdev, VFL_TYPE_VIDEO, -1);
diff --git a/drivers/media/pci/ivtv/ivtv-alsa-pcm.c b/drivers/media/pci/ivtv/ivtv-alsa-pcm.c
index 8f346d7da9c8..269a799ec046 100644
--- a/drivers/media/pci/ivtv/ivtv-alsa-pcm.c
+++ b/drivers/media/pci/ivtv/ivtv-alsa-pcm.c
@@ -148,14 +148,12 @@ static int snd_ivtv_pcm_capture_open(struct snd_pcm_substream *substream)
s = &itv->streams[IVTV_ENC_STREAM_TYPE_PCM];
- v4l2_fh_init(&item.fh, &s->vdev);
item.itv = itv;
item.type = s->type;
/* See if the stream is available */
if (ivtv_claim_stream(&item, item.type)) {
/* No, it's already in use */
- v4l2_fh_exit(&item.fh);
snd_ivtv_unlock(itvsc);
return -EBUSY;
}
diff --git a/drivers/media/pci/ivtv/ivtv-cards.c b/drivers/media/pci/ivtv/ivtv-cards.c
index c8f4ed7ff2c6..f2ccf8e98664 100644
--- a/drivers/media/pci/ivtv/ivtv-cards.c
+++ b/drivers/media/pci/ivtv/ivtv-cards.c
@@ -2,7 +2,7 @@
/*
Functions to query card hardware
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-cards.h b/drivers/media/pci/ivtv/ivtv-cards.h
index c252733df340..af64e55d3b80 100644
--- a/drivers/media/pci/ivtv/ivtv-cards.h
+++ b/drivers/media/pci/ivtv/ivtv-cards.h
@@ -2,7 +2,7 @@
/*
Functions to query card hardware
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-controls.c b/drivers/media/pci/ivtv/ivtv-controls.c
index a0b9a5a5c7f1..f087a12c4ebd 100644
--- a/drivers/media/pci/ivtv/ivtv-controls.c
+++ b/drivers/media/pci/ivtv/ivtv-controls.c
@@ -2,7 +2,7 @@
/*
ioctl control functions
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-controls.h b/drivers/media/pci/ivtv/ivtv-controls.h
index 444c86a47e5a..d152691ea255 100644
--- a/drivers/media/pci/ivtv/ivtv-controls.h
+++ b/drivers/media/pci/ivtv/ivtv-controls.h
@@ -2,7 +2,7 @@
/*
ioctl control functions
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
index ecc20cd89926..72a8f76a41f4 100644
--- a/drivers/media/pci/ivtv/ivtv-driver.c
+++ b/drivers/media/pci/ivtv/ivtv-driver.c
@@ -1,22 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
ivtv driver initialization and card probing
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
Copyright (C) 2004 Chris Kennedy <c@groovy.org>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
/* Main Driver file for the ivtv project:
diff --git a/drivers/media/pci/ivtv/ivtv-driver.h b/drivers/media/pci/ivtv/ivtv-driver.h
index a6ffa99e16bc..f1f18911332e 100644
--- a/drivers/media/pci/ivtv/ivtv-driver.h
+++ b/drivers/media/pci/ivtv/ivtv-driver.h
@@ -1,22 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
ivtv driver internal defines and structures
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
Copyright (C) 2004 Chris Kennedy <c@groovy.org>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
#ifndef IVTV_DRIVER_H
@@ -322,6 +309,7 @@ struct ivtv_queue {
};
struct ivtv; /* forward reference */
+struct ivtv_open_id;
struct ivtv_stream {
/* These first four fields are always set, even if the stream
@@ -331,7 +319,7 @@ struct ivtv_stream {
const char *name; /* name of the stream */
int type; /* stream type */
- struct v4l2_fh *fh; /* pointer to the streaming filehandle */
+ struct ivtv_open_id *id; /* pointer to the streaming ivtv_open_id */
spinlock_t qlock; /* locks access to the queues */
unsigned long s_flags; /* status flags, see above */
int dma; /* can be PCI_DMA_TODEVICE, PCI_DMA_FROMDEVICE or PCI_DMA_NONE */
@@ -383,9 +371,9 @@ struct ivtv_open_id {
struct ivtv *itv;
};
-static inline struct ivtv_open_id *fh2id(struct v4l2_fh *fh)
+static inline struct ivtv_open_id *file2id(struct file *filp)
{
- return container_of(fh, struct ivtv_open_id, fh);
+ return container_of(file_to_v4l2_fh(filp), struct ivtv_open_id, fh);
}
struct yuv_frame_info
diff --git a/drivers/media/pci/ivtv/ivtv-fileops.c b/drivers/media/pci/ivtv/ivtv-fileops.c
index cfa28d035586..ef9ec062c03a 100644
--- a/drivers/media/pci/ivtv/ivtv-fileops.c
+++ b/drivers/media/pci/ivtv/ivtv-fileops.c
@@ -3,7 +3,7 @@
file operation functions
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
Copyright (C) 2004 Chris Kennedy <c@groovy.org>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
@@ -39,16 +39,16 @@ int ivtv_claim_stream(struct ivtv_open_id *id, int type)
if (test_and_set_bit(IVTV_F_S_CLAIMED, &s->s_flags)) {
/* someone already claimed this stream */
- if (s->fh == &id->fh) {
+ if (s->id == id) {
/* yes, this file descriptor did. So that's OK. */
return 0;
}
- if (s->fh == NULL && (type == IVTV_DEC_STREAM_TYPE_VBI ||
+ if (s->id == NULL && (type == IVTV_DEC_STREAM_TYPE_VBI ||
type == IVTV_ENC_STREAM_TYPE_VBI)) {
/* VBI is handled already internally, now also assign
the file descriptor to this stream for external
reading of the stream. */
- s->fh = &id->fh;
+ s->id = id;
IVTV_DEBUG_INFO("Start Read VBI\n");
return 0;
}
@@ -56,7 +56,7 @@ int ivtv_claim_stream(struct ivtv_open_id *id, int type)
IVTV_DEBUG_INFO("Stream %d is busy\n", type);
return -EBUSY;
}
- s->fh = &id->fh;
+ s->id = id;
if (type == IVTV_DEC_STREAM_TYPE_VBI) {
/* Enable reinsertion interrupt */
ivtv_clear_irq_mask(itv, IVTV_IRQ_DEC_VBI_RE_INSERT);
@@ -94,7 +94,7 @@ void ivtv_release_stream(struct ivtv_stream *s)
struct ivtv *itv = s->itv;
struct ivtv_stream *s_vbi;
- s->fh = NULL;
+ s->id = NULL;
if ((s->type == IVTV_DEC_STREAM_TYPE_VBI || s->type == IVTV_ENC_STREAM_TYPE_VBI) &&
test_bit(IVTV_F_S_INTERNAL_USE, &s->s_flags)) {
/* this stream is still in use internally */
@@ -126,7 +126,7 @@ void ivtv_release_stream(struct ivtv_stream *s)
/* was already cleared */
return;
}
- if (s_vbi->fh) {
+ if (s_vbi->id) {
/* VBI stream still claimed by a file descriptor */
return;
}
@@ -359,7 +359,7 @@ static ssize_t ivtv_read(struct ivtv_stream *s, char __user *ubuf, size_t tot_co
size_t tot_written = 0;
int single_frame = 0;
- if (atomic_read(&itv->capturing) == 0 && s->fh == NULL) {
+ if (atomic_read(&itv->capturing) == 0 && s->id == NULL) {
/* shouldn't happen */
IVTV_DEBUG_WARN("Stream %s not initialized before read\n", s->name);
return -EIO;
@@ -502,7 +502,7 @@ int ivtv_start_capture(struct ivtv_open_id *id)
ssize_t ivtv_v4l2_read(struct file * filp, char __user *buf, size_t count, loff_t * pos)
{
- struct ivtv_open_id *id = fh2id(filp->private_data);
+ struct ivtv_open_id *id = file2id(filp);
struct ivtv *itv = id->itv;
struct ivtv_stream *s = &itv->streams[id->type];
ssize_t rc;
@@ -564,7 +564,7 @@ static int ivtv_schedule_dma(struct ivtv_stream *s)
static ssize_t ivtv_write(struct file *filp, const char __user *user_buf, size_t count, loff_t *pos)
{
- struct ivtv_open_id *id = fh2id(filp->private_data);
+ struct ivtv_open_id *id = file2id(filp);
struct ivtv *itv = id->itv;
struct ivtv_stream *s = &itv->streams[id->type];
struct yuv_playback_info *yi = &itv->yuv_info;
@@ -719,7 +719,7 @@ retry:
ssize_t ivtv_v4l2_write(struct file *filp, const char __user *user_buf, size_t count, loff_t *pos)
{
- struct ivtv_open_id *id = fh2id(filp->private_data);
+ struct ivtv_open_id *id = file2id(filp);
struct ivtv *itv = id->itv;
ssize_t res;
@@ -732,7 +732,7 @@ ssize_t ivtv_v4l2_write(struct file *filp, const char __user *user_buf, size_t c
__poll_t ivtv_v4l2_dec_poll(struct file *filp, poll_table *wait)
{
- struct ivtv_open_id *id = fh2id(filp->private_data);
+ struct ivtv_open_id *id = file2id(filp);
struct ivtv *itv = id->itv;
struct ivtv_stream *s = &itv->streams[id->type];
__poll_t res = 0;
@@ -767,7 +767,7 @@ __poll_t ivtv_v4l2_dec_poll(struct file *filp, poll_table *wait)
__poll_t ivtv_v4l2_enc_poll(struct file *filp, poll_table *wait)
{
__poll_t req_events = poll_requested_events(wait);
- struct ivtv_open_id *id = fh2id(filp->private_data);
+ struct ivtv_open_id *id = file2id(filp);
struct ivtv *itv = id->itv;
struct ivtv_stream *s = &itv->streams[id->type];
int eof = test_bit(IVTV_F_S_STREAMOFF, &s->s_flags);
@@ -831,7 +831,7 @@ void ivtv_stop_capture(struct ivtv_open_id *id, int gop_end)
id->type == IVTV_ENC_STREAM_TYPE_VBI) &&
test_bit(IVTV_F_S_INTERNAL_USE, &s->s_flags)) {
/* Also used internally, don't stop capturing */
- s->fh = NULL;
+ s->id = NULL;
}
else {
ivtv_stop_v4l2_encode_stream(s, gop_end);
@@ -877,8 +877,8 @@ static void ivtv_stop_decoding(struct ivtv_open_id *id, int flags, u64 pts)
int ivtv_v4l2_close(struct file *filp)
{
- struct v4l2_fh *fh = filp->private_data;
- struct ivtv_open_id *id = fh2id(fh);
+ struct v4l2_fh *fh = file_to_v4l2_fh(filp);
+ struct ivtv_open_id *id = file2id(filp);
struct ivtv *itv = id->itv;
struct ivtv_stream *s = &itv->streams[id->type];
@@ -911,11 +911,11 @@ int ivtv_v4l2_close(struct file *filp)
ivtv_unmute(itv);
}
- v4l2_fh_del(fh);
+ v4l2_fh_del(fh, filp);
v4l2_fh_exit(fh);
/* Easy case first: this stream was never claimed by us */
- if (s->fh != &id->fh)
+ if (s->id != id)
goto close_done;
/* 'Unclaim' this stream */
@@ -998,9 +998,7 @@ static int ivtv_open(struct file *filp)
v4l2_fh_init(&item->fh, &s->vdev);
item->itv = itv;
item->type = s->type;
-
- filp->private_data = &item->fh;
- v4l2_fh_add(&item->fh);
+ v4l2_fh_add(&item->fh, filp);
if (item->type == IVTV_ENC_STREAM_TYPE_RAD &&
v4l2_fh_is_singular_file(filp)) {
@@ -1008,7 +1006,7 @@ static int ivtv_open(struct file *filp)
if (atomic_read(&itv->capturing) > 0) {
/* switching to radio while capture is
in progress is not polite */
- v4l2_fh_del(&item->fh);
+ v4l2_fh_del(&item->fh, filp);
v4l2_fh_exit(&item->fh);
kfree(item);
return -EBUSY;
diff --git a/drivers/media/pci/ivtv/ivtv-fileops.h b/drivers/media/pci/ivtv/ivtv-fileops.h
index c2c01bba5d03..7bbe42b0030c 100644
--- a/drivers/media/pci/ivtv/ivtv-fileops.h
+++ b/drivers/media/pci/ivtv/ivtv-fileops.h
@@ -2,7 +2,7 @@
/*
file operation functions
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-firmware.c b/drivers/media/pci/ivtv/ivtv-firmware.c
index 56b25255faf5..1bc873ebef73 100644
--- a/drivers/media/pci/ivtv/ivtv-firmware.c
+++ b/drivers/media/pci/ivtv/ivtv-firmware.c
@@ -3,7 +3,7 @@
ivtv firmware functions.
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
Copyright (C) 2004 Chris Kennedy <c@groovy.org>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-firmware.h b/drivers/media/pci/ivtv/ivtv-firmware.h
index 393e94a8d0e4..ce94b6c385de 100644
--- a/drivers/media/pci/ivtv/ivtv-firmware.h
+++ b/drivers/media/pci/ivtv/ivtv-firmware.h
@@ -3,7 +3,7 @@
ivtv firmware functions.
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
Copyright (C) 2004 Chris Kennedy <c@groovy.org>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-gpio.c b/drivers/media/pci/ivtv/ivtv-gpio.c
index 6434c0d03a6d..d3477e1529c9 100644
--- a/drivers/media/pci/ivtv/ivtv-gpio.c
+++ b/drivers/media/pci/ivtv/ivtv-gpio.c
@@ -3,7 +3,7 @@
gpio functions.
Merging GPIO support into driver:
Copyright (C) 2004 Chris Kennedy <c@groovy.org>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-gpio.h b/drivers/media/pci/ivtv/ivtv-gpio.h
index 4ad817173f04..686c9b5e9c19 100644
--- a/drivers/media/pci/ivtv/ivtv-gpio.h
+++ b/drivers/media/pci/ivtv/ivtv-gpio.h
@@ -2,7 +2,7 @@
/*
gpio functions.
Copyright (C) 2004 Chris Kennedy <c@groovy.org>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-i2c.c b/drivers/media/pci/ivtv/ivtv-i2c.c
index c052c57c6dce..28cb22d6a892 100644
--- a/drivers/media/pci/ivtv/ivtv-i2c.c
+++ b/drivers/media/pci/ivtv/ivtv-i2c.c
@@ -2,7 +2,7 @@
/*
I2C functions
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-i2c.h b/drivers/media/pci/ivtv/ivtv-i2c.h
index 2d9cdaa682c5..504bbc1dee25 100644
--- a/drivers/media/pci/ivtv/ivtv-i2c.h
+++ b/drivers/media/pci/ivtv/ivtv-i2c.h
@@ -2,7 +2,7 @@
/*
I2C functions
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-ioctl.c b/drivers/media/pci/ivtv/ivtv-ioctl.c
index 7947dcd615e8..84c73bd22f2d 100644
--- a/drivers/media/pci/ivtv/ivtv-ioctl.c
+++ b/drivers/media/pci/ivtv/ivtv-ioctl.c
@@ -2,7 +2,7 @@
/*
ioctl system call
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
@@ -308,7 +308,7 @@ static int ivtv_video_command(struct ivtv *itv, struct ivtv_open_id *id,
static int ivtv_g_fmt_sliced_vbi_out(struct file *file, void *fh, struct v4l2_format *fmt)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
struct v4l2_sliced_vbi_format *vbifmt = &fmt->fmt.sliced;
vbifmt->reserved[0] = 0;
@@ -330,7 +330,7 @@ static int ivtv_g_fmt_sliced_vbi_out(struct file *file, void *fh, struct v4l2_fo
static int ivtv_g_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt)
{
- struct ivtv_open_id *id = fh2id(fh);
+ struct ivtv_open_id *id = file2id(file);
struct ivtv *itv = id->itv;
struct v4l2_pix_format *pixfmt = &fmt->fmt.pix;
@@ -353,7 +353,7 @@ static int ivtv_g_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f
static int ivtv_g_fmt_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
struct v4l2_vbi_format *vbifmt = &fmt->fmt.vbi;
vbifmt->sampling_rate = 27000000;
@@ -372,7 +372,7 @@ static int ivtv_g_fmt_vbi_cap(struct file *file, void *fh, struct v4l2_format *f
static int ivtv_g_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt)
{
struct v4l2_sliced_vbi_format *vbifmt = &fmt->fmt.sliced;
- struct ivtv_open_id *id = fh2id(fh);
+ struct ivtv_open_id *id = file2id(file);
struct ivtv *itv = id->itv;
vbifmt->reserved[0] = 0;
@@ -394,7 +394,7 @@ static int ivtv_g_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_fo
static int ivtv_g_fmt_vid_out(struct file *file, void *fh, struct v4l2_format *fmt)
{
- struct ivtv_open_id *id = fh2id(fh);
+ struct ivtv_open_id *id = file2id(file);
struct ivtv *itv = id->itv;
struct v4l2_pix_format *pixfmt = &fmt->fmt.pix;
@@ -434,8 +434,8 @@ static int ivtv_g_fmt_vid_out(struct file *file, void *fh, struct v4l2_format *f
static int ivtv_g_fmt_vid_out_overlay(struct file *file, void *fh, struct v4l2_format *fmt)
{
- struct ivtv *itv = fh2id(fh)->itv;
- struct ivtv_stream *s = &itv->streams[fh2id(fh)->type];
+ struct ivtv *itv = file2id(file)->itv;
+ struct ivtv_stream *s = &itv->streams[file2id(file)->type];
struct v4l2_window *winfmt = &fmt->fmt.win;
if (!(s->vdev.device_caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY))
@@ -461,7 +461,7 @@ static int ivtv_try_fmt_sliced_vbi_out(struct file *file, void *fh, struct v4l2_
static int ivtv_try_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt)
{
- struct ivtv_open_id *id = fh2id(fh);
+ struct ivtv_open_id *id = file2id(file);
struct ivtv *itv = id->itv;
int w = fmt->fmt.pix.width;
int h = fmt->fmt.pix.height;
@@ -490,7 +490,7 @@ static int ivtv_try_fmt_vbi_cap(struct file *file, void *fh, struct v4l2_format
static int ivtv_try_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt)
{
struct v4l2_sliced_vbi_format *vbifmt = &fmt->fmt.sliced;
- struct ivtv_open_id *id = fh2id(fh);
+ struct ivtv_open_id *id = file2id(file);
struct ivtv *itv = id->itv;
if (id->type == IVTV_DEC_STREAM_TYPE_VBI)
@@ -510,7 +510,7 @@ static int ivtv_try_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_
static int ivtv_try_fmt_vid_out(struct file *file, void *fh, struct v4l2_format *fmt)
{
- struct ivtv_open_id *id = fh2id(fh);
+ struct ivtv_open_id *id = file2id(file);
s32 w = fmt->fmt.pix.width;
s32 h = fmt->fmt.pix.height;
int field = fmt->fmt.pix.field;
@@ -544,8 +544,8 @@ static int ivtv_try_fmt_vid_out(struct file *file, void *fh, struct v4l2_format
static int ivtv_try_fmt_vid_out_overlay(struct file *file, void *fh, struct v4l2_format *fmt)
{
- struct ivtv *itv = fh2id(fh)->itv;
- struct ivtv_stream *s = &itv->streams[fh2id(fh)->type];
+ struct ivtv *itv = file2id(file)->itv;
+ struct ivtv_stream *s = &itv->streams[file2id(file)->type];
u32 chromakey = fmt->fmt.win.chromakey;
u8 global_alpha = fmt->fmt.win.global_alpha;
@@ -566,7 +566,7 @@ static int ivtv_s_fmt_sliced_vbi_out(struct file *file, void *fh, struct v4l2_fo
static int ivtv_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt)
{
- struct ivtv_open_id *id = fh2id(fh);
+ struct ivtv_open_id *id = file2id(file);
struct ivtv *itv = id->itv;
struct v4l2_subdev_format format = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
@@ -597,7 +597,7 @@ static int ivtv_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f
static int ivtv_s_fmt_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
if (!ivtv_raw_vbi(itv) && atomic_read(&itv->capturing) > 0)
return -EBUSY;
@@ -610,7 +610,7 @@ static int ivtv_s_fmt_vbi_cap(struct file *file, void *fh, struct v4l2_format *f
static int ivtv_s_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt)
{
struct v4l2_sliced_vbi_format *vbifmt = &fmt->fmt.sliced;
- struct ivtv_open_id *id = fh2id(fh);
+ struct ivtv_open_id *id = file2id(file);
struct ivtv *itv = id->itv;
int ret = ivtv_try_fmt_sliced_vbi_cap(file, fh, fmt);
@@ -628,7 +628,7 @@ static int ivtv_s_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_fo
static int ivtv_s_fmt_vid_out(struct file *file, void *fh, struct v4l2_format *fmt)
{
- struct ivtv_open_id *id = fh2id(fh);
+ struct ivtv_open_id *id = file2id(file);
struct ivtv *itv = id->itv;
struct yuv_playback_info *yi = &itv->yuv_info;
int ret = ivtv_try_fmt_vid_out(file, fh, fmt);
@@ -673,7 +673,7 @@ static int ivtv_s_fmt_vid_out(struct file *file, void *fh, struct v4l2_format *f
static int ivtv_s_fmt_vid_out_overlay(struct file *file, void *fh, struct v4l2_format *fmt)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
int ret = ivtv_try_fmt_vid_out_overlay(file, fh, fmt);
if (ret == 0) {
@@ -710,7 +710,7 @@ static int ivtv_itvc(struct ivtv *itv, bool get, u64 reg, u64 *val)
static int ivtv_g_register(struct file *file, void *fh, struct v4l2_dbg_register *reg)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
reg->size = 4;
return ivtv_itvc(itv, true, reg->reg, &reg->val);
@@ -718,7 +718,7 @@ static int ivtv_g_register(struct file *file, void *fh, struct v4l2_dbg_register
static int ivtv_s_register(struct file *file, void *fh, const struct v4l2_dbg_register *reg)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
u64 val = reg->val;
return ivtv_itvc(itv, false, reg->reg, &val);
@@ -727,7 +727,7 @@ static int ivtv_s_register(struct file *file, void *fh, const struct v4l2_dbg_re
static int ivtv_querycap(struct file *file, void *fh, struct v4l2_capability *vcap)
{
- struct ivtv_open_id *id = fh2id(file->private_data);
+ struct ivtv_open_id *id = file2id(file);
struct ivtv *itv = id->itv;
strscpy(vcap->driver, IVTV_DRIVER_NAME, sizeof(vcap->driver));
@@ -738,14 +738,14 @@ static int ivtv_querycap(struct file *file, void *fh, struct v4l2_capability *vc
static int ivtv_enumaudio(struct file *file, void *fh, struct v4l2_audio *vin)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
return ivtv_get_audio_input(itv, vin->index, vin);
}
static int ivtv_g_audio(struct file *file, void *fh, struct v4l2_audio *vin)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
vin->index = itv->audio_input;
return ivtv_get_audio_input(itv, vin->index, vin);
@@ -753,7 +753,7 @@ static int ivtv_g_audio(struct file *file, void *fh, struct v4l2_audio *vin)
static int ivtv_s_audio(struct file *file, void *fh, const struct v4l2_audio *vout)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
if (vout->index >= itv->nof_audio_inputs)
return -EINVAL;
@@ -766,7 +766,7 @@ static int ivtv_s_audio(struct file *file, void *fh, const struct v4l2_audio *vo
static int ivtv_enumaudout(struct file *file, void *fh, struct v4l2_audioout *vin)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
/* set it to defaults from our table */
return ivtv_get_audio_output(itv, vin->index, vin);
@@ -774,7 +774,7 @@ static int ivtv_enumaudout(struct file *file, void *fh, struct v4l2_audioout *vi
static int ivtv_g_audout(struct file *file, void *fh, struct v4l2_audioout *vin)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
vin->index = 0;
return ivtv_get_audio_output(itv, vin->index, vin);
@@ -782,7 +782,7 @@ static int ivtv_g_audout(struct file *file, void *fh, struct v4l2_audioout *vin)
static int ivtv_s_audout(struct file *file, void *fh, const struct v4l2_audioout *vout)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
if (itv->card->video_outputs == NULL || vout->index != 0)
return -EINVAL;
@@ -791,7 +791,7 @@ static int ivtv_s_audout(struct file *file, void *fh, const struct v4l2_audioout
static int ivtv_enum_input(struct file *file, void *fh, struct v4l2_input *vin)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
/* set it to defaults from our table */
return ivtv_get_input(itv, vin->index, vin);
@@ -799,7 +799,7 @@ static int ivtv_enum_input(struct file *file, void *fh, struct v4l2_input *vin)
static int ivtv_enum_output(struct file *file, void *fh, struct v4l2_output *vout)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
return ivtv_get_output(itv, vout->index, vout);
}
@@ -807,7 +807,7 @@ static int ivtv_enum_output(struct file *file, void *fh, struct v4l2_output *vou
static int ivtv_g_pixelaspect(struct file *file, void *fh,
int type, struct v4l2_fract *f)
{
- struct ivtv_open_id *id = fh2id(fh);
+ struct ivtv_open_id *id = file2id(file);
struct ivtv *itv = id->itv;
if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
@@ -825,7 +825,7 @@ static int ivtv_g_pixelaspect(struct file *file, void *fh,
static int ivtv_s_selection(struct file *file, void *fh,
struct v4l2_selection *sel)
{
- struct ivtv_open_id *id = fh2id(fh);
+ struct ivtv_open_id *id = file2id(file);
struct ivtv *itv = id->itv;
struct yuv_playback_info *yi = &itv->yuv_info;
struct v4l2_rect r = { 0, 0, 720, 0 };
@@ -868,7 +868,7 @@ static int ivtv_s_selection(struct file *file, void *fh,
static int ivtv_g_selection(struct file *file, void *fh,
struct v4l2_selection *sel)
{
- struct ivtv_open_id *id = fh2id(fh);
+ struct ivtv_open_id *id = file2id(file);
struct ivtv *itv = id->itv;
struct yuv_playback_info *yi = &itv->yuv_info;
struct v4l2_rect r = { 0, 0, 720, 0 };
@@ -924,8 +924,8 @@ static int ivtv_enum_fmt_vid_cap(struct file *file, void *fh, struct v4l2_fmtdes
.description = "MPEG",
.pixelformat = V4L2_PIX_FMT_MPEG,
};
- struct ivtv *itv = fh2id(fh)->itv;
- struct ivtv_stream *s = &itv->streams[fh2id(fh)->type];
+ struct ivtv *itv = file2id(file)->itv;
+ struct ivtv_stream *s = &itv->streams[file2id(file)->type];
if (fmt->index)
return -EINVAL;
@@ -951,8 +951,8 @@ static int ivtv_enum_fmt_vid_out(struct file *file, void *fh, struct v4l2_fmtdes
.description = "MPEG",
.pixelformat = V4L2_PIX_FMT_MPEG,
};
- struct ivtv *itv = fh2id(fh)->itv;
- struct ivtv_stream *s = &itv->streams[fh2id(fh)->type];
+ struct ivtv *itv = file2id(file)->itv;
+ struct ivtv_stream *s = &itv->streams[file2id(file)->type];
if (fmt->index)
return -EINVAL;
@@ -967,7 +967,7 @@ static int ivtv_enum_fmt_vid_out(struct file *file, void *fh, struct v4l2_fmtdes
static int ivtv_g_input(struct file *file, void *fh, unsigned int *i)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
*i = itv->active_input;
@@ -976,7 +976,7 @@ static int ivtv_g_input(struct file *file, void *fh, unsigned int *i)
int ivtv_s_input(struct file *file, void *fh, unsigned int inp)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
v4l2_std_id std;
int i;
@@ -1019,7 +1019,7 @@ int ivtv_s_input(struct file *file, void *fh, unsigned int inp)
static int ivtv_g_output(struct file *file, void *fh, unsigned int *i)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT))
return -EINVAL;
@@ -1031,7 +1031,7 @@ static int ivtv_g_output(struct file *file, void *fh, unsigned int *i)
static int ivtv_s_output(struct file *file, void *fh, unsigned int outp)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
if (outp >= itv->card->nof_outputs)
return -EINVAL;
@@ -1053,8 +1053,8 @@ static int ivtv_s_output(struct file *file, void *fh, unsigned int outp)
static int ivtv_g_frequency(struct file *file, void *fh, struct v4l2_frequency *vf)
{
- struct ivtv *itv = fh2id(fh)->itv;
- struct ivtv_stream *s = &itv->streams[fh2id(fh)->type];
+ struct ivtv *itv = file2id(file)->itv;
+ struct ivtv_stream *s = &itv->streams[file2id(file)->type];
if (s->vdev.vfl_dir)
return -ENOTTY;
@@ -1067,8 +1067,8 @@ static int ivtv_g_frequency(struct file *file, void *fh, struct v4l2_frequency *
int ivtv_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf)
{
- struct ivtv *itv = fh2id(fh)->itv;
- struct ivtv_stream *s = &itv->streams[fh2id(fh)->type];
+ struct ivtv *itv = file2id(file)->itv;
+ struct ivtv_stream *s = &itv->streams[file2id(file)->type];
if (s->vdev.vfl_dir)
return -ENOTTY;
@@ -1084,7 +1084,7 @@ int ivtv_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *v
static int ivtv_g_std(struct file *file, void *fh, v4l2_std_id *std)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
*std = itv->std;
return 0;
@@ -1157,7 +1157,7 @@ void ivtv_s_std_dec(struct ivtv *itv, v4l2_std_id std)
static int ivtv_s_std(struct file *file, void *fh, v4l2_std_id std)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
if ((std & V4L2_STD_ALL) == 0)
return -EINVAL;
@@ -1185,7 +1185,7 @@ static int ivtv_s_std(struct file *file, void *fh, v4l2_std_id std)
static int ivtv_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt)
{
- struct ivtv_open_id *id = fh2id(fh);
+ struct ivtv_open_id *id = file2id(file);
struct ivtv *itv = id->itv;
if (vt->index != 0)
@@ -1198,7 +1198,7 @@ static int ivtv_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt
static int ivtv_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
if (vt->index != 0)
return -EINVAL;
@@ -1214,7 +1214,7 @@ static int ivtv_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt)
static int ivtv_g_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_sliced_vbi_cap *cap)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
int set = itv->is_50hz ? V4L2_SLICED_VBI_625 : V4L2_SLICED_VBI_525;
int f, l;
@@ -1249,7 +1249,7 @@ static int ivtv_g_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_sliced
static int ivtv_g_enc_index(struct file *file, void *fh, struct v4l2_enc_idx *idx)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
struct v4l2_enc_idx_entry *e = idx->entry;
int entries;
int i;
@@ -1275,7 +1275,7 @@ static int ivtv_g_enc_index(struct file *file, void *fh, struct v4l2_enc_idx *id
static int ivtv_encoder_cmd(struct file *file, void *fh, struct v4l2_encoder_cmd *enc)
{
- struct ivtv_open_id *id = fh2id(fh);
+ struct ivtv_open_id *id = file2id(file);
struct ivtv *itv = id->itv;
@@ -1327,7 +1327,7 @@ static int ivtv_encoder_cmd(struct file *file, void *fh, struct v4l2_encoder_cmd
static int ivtv_try_encoder_cmd(struct file *file, void *fh, struct v4l2_encoder_cmd *enc)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
switch (enc->cmd) {
case V4L2_ENC_CMD_START:
@@ -1357,8 +1357,8 @@ static int ivtv_try_encoder_cmd(struct file *file, void *fh, struct v4l2_encoder
static int ivtv_g_fbuf(struct file *file, void *fh, struct v4l2_framebuffer *fb)
{
- struct ivtv *itv = fh2id(fh)->itv;
- struct ivtv_stream *s = &itv->streams[fh2id(fh)->type];
+ struct ivtv *itv = file2id(file)->itv;
+ struct ivtv_stream *s = &itv->streams[file2id(file)->type];
u32 data[CX2341X_MBOX_MAX_DATA];
struct yuv_playback_info *yi = &itv->yuv_info;
@@ -1444,9 +1444,9 @@ static int ivtv_g_fbuf(struct file *file, void *fh, struct v4l2_framebuffer *fb)
static int ivtv_s_fbuf(struct file *file, void *fh, const struct v4l2_framebuffer *fb)
{
- struct ivtv_open_id *id = fh2id(fh);
+ struct ivtv_open_id *id = file2id(file);
struct ivtv *itv = id->itv;
- struct ivtv_stream *s = &itv->streams[fh2id(fh)->type];
+ struct ivtv_stream *s = &itv->streams[file2id(file)->type];
struct yuv_playback_info *yi = &itv->yuv_info;
if (!(s->vdev.device_caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY))
@@ -1465,9 +1465,9 @@ static int ivtv_s_fbuf(struct file *file, void *fh, const struct v4l2_framebuffe
static int ivtv_overlay(struct file *file, void *fh, unsigned int on)
{
- struct ivtv_open_id *id = fh2id(fh);
+ struct ivtv_open_id *id = file2id(file);
struct ivtv *itv = id->itv;
- struct ivtv_stream *s = &itv->streams[fh2id(fh)->type];
+ struct ivtv_stream *s = &itv->streams[file2id(file)->type];
if (!(s->vdev.device_caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY))
return -ENOTTY;
@@ -1492,7 +1492,7 @@ static int ivtv_subscribe_event(struct v4l2_fh *fh, const struct v4l2_event_subs
static int ivtv_log_status(struct file *file, void *fh)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
u32 data[CX2341X_MBOX_MAX_DATA];
int has_output = itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT;
@@ -1584,7 +1584,7 @@ static int ivtv_log_status(struct file *file, void *fh)
static int ivtv_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *dec)
{
- struct ivtv_open_id *id = fh2id(file->private_data);
+ struct ivtv_open_id *id = file2id(file);
struct ivtv *itv = id->itv;
IVTV_DEBUG_IOCTL("VIDIOC_DECODER_CMD %d\n", dec->cmd);
@@ -1593,7 +1593,7 @@ static int ivtv_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd
static int ivtv_try_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *dec)
{
- struct ivtv_open_id *id = fh2id(file->private_data);
+ struct ivtv_open_id *id = file2id(file);
struct ivtv *itv = id->itv;
IVTV_DEBUG_IOCTL("VIDIOC_TRY_DECODER_CMD %d\n", dec->cmd);
@@ -1602,7 +1602,7 @@ static int ivtv_try_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder
static int ivtv_decoder_ioctls(struct file *filp, unsigned int cmd, void *arg)
{
- struct ivtv_open_id *id = fh2id(filp->private_data);
+ struct ivtv_open_id *id = file2id(filp);
struct ivtv *itv = id->itv;
struct ivtv_stream *s = &itv->streams[id->type];
@@ -1645,7 +1645,7 @@ static int ivtv_decoder_ioctls(struct file *filp, unsigned int cmd, void *arg)
static long ivtv_default(struct file *file, void *fh, bool valid_prio,
unsigned int cmd, void *arg)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
if (!valid_prio) {
switch (cmd) {
diff --git a/drivers/media/pci/ivtv/ivtv-ioctl.h b/drivers/media/pci/ivtv/ivtv-ioctl.h
index 42c2516379fc..7f8c6f43d397 100644
--- a/drivers/media/pci/ivtv/ivtv-ioctl.h
+++ b/drivers/media/pci/ivtv/ivtv-ioctl.h
@@ -2,7 +2,7 @@
/*
ioctl system call
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-irq.c b/drivers/media/pci/ivtv/ivtv-irq.c
index 748c14e87963..05e0293b4d44 100644
--- a/drivers/media/pci/ivtv/ivtv-irq.c
+++ b/drivers/media/pci/ivtv/ivtv-irq.c
@@ -2,7 +2,7 @@
/* interrupt handling
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
Copyright (C) 2004 Chris Kennedy <c@groovy.org>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
@@ -305,7 +305,7 @@ static void dma_post(struct ivtv_stream *s)
ivtv_process_vbi_data(itv, buf, 0, s->type);
s->q_dma.bytesused += buf->bytesused;
}
- if (s->fh == NULL) {
+ if (s->id == NULL) {
ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0);
return;
}
@@ -330,7 +330,7 @@ static void dma_post(struct ivtv_stream *s)
set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
}
- if (s->fh)
+ if (s->id)
wake_up(&s->waitq);
}
@@ -351,7 +351,7 @@ void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock)
/* Insert buffer block for YUV if needed */
if (s->type == IVTV_DEC_STREAM_TYPE_YUV && f->offset_y) {
- if (yi->blanking_dmaptr) {
+ if (yi->blanking_ptr) {
s->sg_pending[idx].src = yi->blanking_dmaptr;
s->sg_pending[idx].dst = offset;
s->sg_pending[idx].size = 720 * 16;
diff --git a/drivers/media/pci/ivtv/ivtv-irq.h b/drivers/media/pci/ivtv/ivtv-irq.h
index b8b0703a1c82..8a780bea7de4 100644
--- a/drivers/media/pci/ivtv/ivtv-irq.h
+++ b/drivers/media/pci/ivtv/ivtv-irq.h
@@ -3,7 +3,7 @@
interrupt handling
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
Copyright (C) 2004 Chris Kennedy <c@groovy.org>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-mailbox.c b/drivers/media/pci/ivtv/ivtv-mailbox.c
index d3fdaaa903f1..cd7c9f2d473f 100644
--- a/drivers/media/pci/ivtv/ivtv-mailbox.c
+++ b/drivers/media/pci/ivtv/ivtv-mailbox.c
@@ -3,7 +3,7 @@
mailbox functions
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
Copyright (C) 2004 Chris Kennedy <c@groovy.org>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-mailbox.h b/drivers/media/pci/ivtv/ivtv-mailbox.h
index 537c90437e1d..364e7f51508e 100644
--- a/drivers/media/pci/ivtv/ivtv-mailbox.h
+++ b/drivers/media/pci/ivtv/ivtv-mailbox.h
@@ -2,7 +2,7 @@
/*
mailbox functions
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-queue.c b/drivers/media/pci/ivtv/ivtv-queue.c
index f9b192ab7e7c..f7d2d159d800 100644
--- a/drivers/media/pci/ivtv/ivtv-queue.c
+++ b/drivers/media/pci/ivtv/ivtv-queue.c
@@ -3,7 +3,7 @@
buffer queues.
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
Copyright (C) 2004 Chris Kennedy <c@groovy.org>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-queue.h b/drivers/media/pci/ivtv/ivtv-queue.h
index 983e99642364..9619745d6de1 100644
--- a/drivers/media/pci/ivtv/ivtv-queue.h
+++ b/drivers/media/pci/ivtv/ivtv-queue.h
@@ -3,7 +3,7 @@
buffer queues.
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
Copyright (C) 2004 Chris Kennedy <c@groovy.org>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-routing.c b/drivers/media/pci/ivtv/ivtv-routing.c
index 57d4d5a3cb87..b1dfc2e96d91 100644
--- a/drivers/media/pci/ivtv/ivtv-routing.c
+++ b/drivers/media/pci/ivtv/ivtv-routing.c
@@ -2,7 +2,7 @@
/*
Audio/video-routing-related ivtv functions.
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-routing.h b/drivers/media/pci/ivtv/ivtv-routing.h
index e4a0ae0694d2..69ddb66ef26f 100644
--- a/drivers/media/pci/ivtv/ivtv-routing.h
+++ b/drivers/media/pci/ivtv/ivtv-routing.h
@@ -2,7 +2,7 @@
/*
Audio/video-routing-related ivtv functions.
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-streams.c b/drivers/media/pci/ivtv/ivtv-streams.c
index ac085925d3cb..d98fe0c9d9f1 100644
--- a/drivers/media/pci/ivtv/ivtv-streams.c
+++ b/drivers/media/pci/ivtv/ivtv-streams.c
@@ -1,22 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
init/start/stop/exit stream functions
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
Copyright (C) 2004 Chris Kennedy <c@groovy.org>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
/* License: GPL
diff --git a/drivers/media/pci/ivtv/ivtv-streams.h b/drivers/media/pci/ivtv/ivtv-streams.h
index 5f35c57fcdfd..43d4ecd6dd6f 100644
--- a/drivers/media/pci/ivtv/ivtv-streams.h
+++ b/drivers/media/pci/ivtv/ivtv-streams.h
@@ -2,7 +2,7 @@
/*
init/start/stop/exit stream functions
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-udma.c b/drivers/media/pci/ivtv/ivtv-udma.c
index f467a00492f4..7dedf04f9f87 100644
--- a/drivers/media/pci/ivtv/ivtv-udma.c
+++ b/drivers/media/pci/ivtv/ivtv-udma.c
@@ -4,7 +4,7 @@
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
Copyright (C) 2004 Chris Kennedy <c@groovy.org>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-udma.h b/drivers/media/pci/ivtv/ivtv-udma.h
index 12b9426b2db2..3030fadfdbc7 100644
--- a/drivers/media/pci/ivtv/ivtv-udma.h
+++ b/drivers/media/pci/ivtv/ivtv-udma.h
@@ -2,7 +2,7 @@
/*
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
Copyright (C) 2004 Chris Kennedy <c@groovy.org>
- Copyright (C) 2006-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2006-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-vbi.c b/drivers/media/pci/ivtv/ivtv-vbi.c
index 80478b026d75..ae7a00f46257 100644
--- a/drivers/media/pci/ivtv/ivtv-vbi.c
+++ b/drivers/media/pci/ivtv/ivtv-vbi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
Vertical Blank Interval support functions
- Copyright (C) 2004-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2004-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-vbi.h b/drivers/media/pci/ivtv/ivtv-vbi.h
index 780f73d2ab6b..12fe27da544b 100644
--- a/drivers/media/pci/ivtv/ivtv-vbi.h
+++ b/drivers/media/pci/ivtv/ivtv-vbi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
Vertical Blank Interval support functions
- Copyright (C) 2004-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2004-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-version.h b/drivers/media/pci/ivtv/ivtv-version.h
index 996f1871e49c..21e26d1f66b8 100644
--- a/drivers/media/pci/ivtv/ivtv-version.h
+++ b/drivers/media/pci/ivtv/ivtv-version.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
ivtv driver version information
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-yuv.c b/drivers/media/pci/ivtv/ivtv-yuv.c
index 2d9274537725..71f040106647 100644
--- a/drivers/media/pci/ivtv/ivtv-yuv.c
+++ b/drivers/media/pci/ivtv/ivtv-yuv.c
@@ -125,7 +125,7 @@ static int ivtv_yuv_prep_user_dma(struct ivtv *itv, struct ivtv_user_dma *dma,
ivtv_udma_fill_sg_array(dma, y_buffer_offset, uv_buffer_offset, y_size);
/* If we've offset the y plane, ensure top area is blanked */
- if (f->offset_y && yi->blanking_dmaptr) {
+ if (f->offset_y && yi->blanking_ptr) {
dma->SGarray[dma->SG_length].size = cpu_to_le32(720*16);
dma->SGarray[dma->SG_length].src = cpu_to_le32(yi->blanking_dmaptr);
dma->SGarray[dma->SG_length].dst = cpu_to_le32(IVTV_DECODER_OFFSET + yuv_offset[frame]);
@@ -929,6 +929,12 @@ static void ivtv_yuv_init(struct ivtv *itv)
yi->blanking_dmaptr = dma_map_single(&itv->pdev->dev,
yi->blanking_ptr,
720 * 16, DMA_TO_DEVICE);
+ if (dma_mapping_error(&itv->pdev->dev, yi->blanking_dmaptr)) {
+ kfree(yi->blanking_ptr);
+ yi->blanking_ptr = NULL;
+ yi->blanking_dmaptr = 0;
+ IVTV_DEBUG_WARN("Failed to dma_map yuv blanking buffer\n");
+ }
} else {
yi->blanking_dmaptr = 0;
IVTV_DEBUG_WARN("Failed to allocate yuv blanking buffer\n");
diff --git a/drivers/media/pci/mgb4/mgb4_trigger.c b/drivers/media/pci/mgb4/mgb4_trigger.c
index 923650d53d4c..d7dddc5c8728 100644
--- a/drivers/media/pci/mgb4/mgb4_trigger.c
+++ b/drivers/media/pci/mgb4/mgb4_trigger.c
@@ -91,7 +91,7 @@ static irqreturn_t trigger_handler(int irq, void *p)
struct {
u32 data;
s64 ts __aligned(8);
- } scan;
+ } scan = { };
scan.data = mgb4_read_reg(&st->mgbdev->video, 0xA0);
mgb4_write_reg(&st->mgbdev->video, 0xA0, scan.data);
diff --git a/drivers/media/pci/mgb4/mgb4_vin.c b/drivers/media/pci/mgb4/mgb4_vin.c
index 989e93f67f75..42c327bc50e1 100644
--- a/drivers/media/pci/mgb4/mgb4_vin.c
+++ b/drivers/media/pci/mgb4/mgb4_vin.c
@@ -610,8 +610,7 @@ static int vidioc_s_dv_timings(struct file *file, void *fh,
timings->bt.height < video_timings_cap.bt.min_height ||
timings->bt.height > video_timings_cap.bt.max_height)
return -EINVAL;
- if (timings->bt.width == vindev->timings.bt.width &&
- timings->bt.height == vindev->timings.bt.height)
+ if (v4l2_match_dv_timings(timings, &vindev->timings, 0, false))
return 0;
if (vb2_is_busy(&vindev->queue))
return -EBUSY;
diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
index c88939bce56b..4a51b873e47a 100644
--- a/drivers/media/pci/saa7134/saa7134-video.c
+++ b/drivers/media/pci/saa7134/saa7134-video.c
@@ -1302,7 +1302,7 @@ static int saa7134_g_pixelaspect(struct file *file, void *priv,
return 0;
}
-static int saa7134_g_selection(struct file *file, void *f, struct v4l2_selection *sel)
+static int saa7134_g_selection(struct file *file, void *priv, struct v4l2_selection *sel)
{
struct saa7134_dev *dev = video_drvdata(file);
@@ -1325,7 +1325,7 @@ static int saa7134_g_selection(struct file *file, void *f, struct v4l2_selection
return 0;
}
-static int saa7134_s_selection(struct file *file, void *f, struct v4l2_selection *sel)
+static int saa7134_s_selection(struct file *file, void *priv, struct v4l2_selection *sel)
{
struct saa7134_dev *dev = video_drvdata(file);
struct v4l2_rect *b = &dev->crop_bounds;
diff --git a/drivers/media/pci/saa7164/saa7164-encoder.c b/drivers/media/pci/saa7164/saa7164-encoder.c
index bf73e9e83f52..66d650b5f69a 100644
--- a/drivers/media/pci/saa7164/saa7164-encoder.c
+++ b/drivers/media/pci/saa7164/saa7164-encoder.c
@@ -219,7 +219,7 @@ int saa7164_s_std(struct saa7164_port *port, v4l2_std_id id)
static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id id)
{
- struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_encoder_fh *fh = to_saa7164_encoder_fh(file);
return saa7164_s_std(fh->port, id);
}
@@ -232,7 +232,7 @@ int saa7164_g_std(struct saa7164_port *port, v4l2_std_id *id)
static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *id)
{
- struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_encoder_fh *fh = to_saa7164_encoder_fh(file);
return saa7164_g_std(fh->port, id);
}
@@ -277,7 +277,7 @@ int saa7164_g_input(struct saa7164_port *port, unsigned int *i)
static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
{
- struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_encoder_fh *fh = to_saa7164_encoder_fh(file);
return saa7164_g_input(fh->port, i);
}
@@ -301,14 +301,14 @@ int saa7164_s_input(struct saa7164_port *port, unsigned int i)
static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
{
- struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_encoder_fh *fh = to_saa7164_encoder_fh(file);
return saa7164_s_input(fh->port, i);
}
int saa7164_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
{
- struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_encoder_fh *fh = to_saa7164_encoder_fh(file);
struct saa7164_port *port = fh->port;
struct saa7164_dev *dev = port->dev;
@@ -347,7 +347,7 @@ int saa7164_g_frequency(struct saa7164_port *port, struct v4l2_frequency *f)
static int vidioc_g_frequency(struct file *file, void *priv,
struct v4l2_frequency *f)
{
- struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_encoder_fh *fh = to_saa7164_encoder_fh(file);
return saa7164_g_frequency(fh->port, f);
}
@@ -400,7 +400,7 @@ int saa7164_s_frequency(struct saa7164_port *port,
static int vidioc_s_frequency(struct file *file, void *priv,
const struct v4l2_frequency *f)
{
- struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_encoder_fh *fh = to_saa7164_encoder_fh(file);
return saa7164_s_frequency(fh->port, f);
}
@@ -483,7 +483,7 @@ static int saa7164_s_ctrl(struct v4l2_ctrl *ctrl)
static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_encoder_fh *fh = to_saa7164_encoder_fh(file);
struct saa7164_port *port = fh->port;
struct saa7164_dev *dev = port->dev;
@@ -510,7 +510,7 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
static int vidioc_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_encoder_fh *fh = to_saa7164_encoder_fh(file);
struct saa7164_port *port = fh->port;
f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
@@ -725,15 +725,14 @@ static int fops_open(struct file *file)
fh->port = port;
v4l2_fh_init(&fh->fh, video_devdata(file));
- v4l2_fh_add(&fh->fh);
- file->private_data = fh;
+ v4l2_fh_add(&fh->fh, file);
return 0;
}
static int fops_release(struct file *file)
{
- struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_encoder_fh *fh = to_saa7164_encoder_fh(file);
struct saa7164_port *port = fh->port;
struct saa7164_dev *dev = port->dev;
@@ -747,7 +746,7 @@ static int fops_release(struct file *file)
}
}
- v4l2_fh_del(&fh->fh);
+ v4l2_fh_del(&fh->fh, file);
v4l2_fh_exit(&fh->fh);
kfree(fh);
@@ -787,7 +786,7 @@ saa7164_user_buffer *saa7164_enc_next_buf(struct saa7164_port *port)
static ssize_t fops_read(struct file *file, char __user *buffer,
size_t count, loff_t *pos)
{
- struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_encoder_fh *fh = to_saa7164_encoder_fh(file);
struct saa7164_port *port = fh->port;
struct saa7164_user_buffer *ubuf = NULL;
struct saa7164_dev *dev = port->dev;
@@ -893,8 +892,7 @@ err:
static __poll_t fops_poll(struct file *file, poll_table *wait)
{
__poll_t req_events = poll_requested_events(wait);
- struct saa7164_encoder_fh *fh =
- (struct saa7164_encoder_fh *)file->private_data;
+ struct saa7164_encoder_fh *fh = to_saa7164_encoder_fh(file);
struct saa7164_port *port = fh->port;
__poll_t mask = v4l2_ctrl_poll(file, wait);
diff --git a/drivers/media/pci/saa7164/saa7164-vbi.c b/drivers/media/pci/saa7164/saa7164-vbi.c
index ac958a5fca78..57e4362c0d19 100644
--- a/drivers/media/pci/saa7164/saa7164-vbi.c
+++ b/drivers/media/pci/saa7164/saa7164-vbi.c
@@ -144,28 +144,28 @@ static int saa7164_vbi_initialize(struct saa7164_port *port)
/* -- V4L2 --------------------------------------------------------- */
static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id id)
{
- struct saa7164_vbi_fh *fh = file->private_data;
+ struct saa7164_vbi_fh *fh = to_saa7164_vbi_fh(file);
return saa7164_s_std(fh->port->enc_port, id);
}
static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *id)
{
- struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_vbi_fh *fh = to_saa7164_vbi_fh(file);
return saa7164_g_std(fh->port->enc_port, id);
}
static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
{
- struct saa7164_vbi_fh *fh = file->private_data;
+ struct saa7164_vbi_fh *fh = to_saa7164_vbi_fh(file);
return saa7164_g_input(fh->port->enc_port, i);
}
static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
{
- struct saa7164_vbi_fh *fh = file->private_data;
+ struct saa7164_vbi_fh *fh = to_saa7164_vbi_fh(file);
return saa7164_s_input(fh->port->enc_port, i);
}
@@ -173,7 +173,7 @@ static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
static int vidioc_g_frequency(struct file *file, void *priv,
struct v4l2_frequency *f)
{
- struct saa7164_vbi_fh *fh = file->private_data;
+ struct saa7164_vbi_fh *fh = to_saa7164_vbi_fh(file);
return saa7164_g_frequency(fh->port->enc_port, f);
}
@@ -181,7 +181,7 @@ static int vidioc_g_frequency(struct file *file, void *priv,
static int vidioc_s_frequency(struct file *file, void *priv,
const struct v4l2_frequency *f)
{
- struct saa7164_vbi_fh *fh = file->private_data;
+ struct saa7164_vbi_fh *fh = to_saa7164_vbi_fh(file);
int ret = saa7164_s_frequency(fh->port->enc_port, f);
if (ret == 0)
@@ -192,7 +192,7 @@ static int vidioc_s_frequency(struct file *file, void *priv,
static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- struct saa7164_vbi_fh *fh = file->private_data;
+ struct saa7164_vbi_fh *fh = to_saa7164_vbi_fh(file);
struct saa7164_port *port = fh->port;
struct saa7164_dev *dev = port->dev;
@@ -428,15 +428,14 @@ static int fops_open(struct file *file)
fh->port = port;
v4l2_fh_init(&fh->fh, video_devdata(file));
- v4l2_fh_add(&fh->fh);
- file->private_data = fh;
+ v4l2_fh_add(&fh->fh, file);
return 0;
}
static int fops_release(struct file *file)
{
- struct saa7164_vbi_fh *fh = file->private_data;
+ struct saa7164_vbi_fh *fh = to_saa7164_vbi_fh(file);
struct saa7164_port *port = fh->port;
struct saa7164_dev *dev = port->dev;
@@ -450,7 +449,7 @@ static int fops_release(struct file *file)
}
}
- v4l2_fh_del(&fh->fh);
+ v4l2_fh_del(&fh->fh, file);
v4l2_fh_exit(&fh->fh);
kfree(fh);
@@ -489,7 +488,7 @@ saa7164_user_buffer *saa7164_vbi_next_buf(struct saa7164_port *port)
static ssize_t fops_read(struct file *file, char __user *buffer,
size_t count, loff_t *pos)
{
- struct saa7164_vbi_fh *fh = file->private_data;
+ struct saa7164_vbi_fh *fh = to_saa7164_vbi_fh(file);
struct saa7164_port *port = fh->port;
struct saa7164_user_buffer *ubuf = NULL;
struct saa7164_dev *dev = port->dev;
@@ -596,7 +595,7 @@ err:
static __poll_t fops_poll(struct file *file, poll_table *wait)
{
- struct saa7164_vbi_fh *fh = (struct saa7164_vbi_fh *)file->private_data;
+ struct saa7164_vbi_fh *fh = to_saa7164_vbi_fh(file);
struct saa7164_port *port = fh->port;
__poll_t mask = 0;
diff --git a/drivers/media/pci/saa7164/saa7164.h b/drivers/media/pci/saa7164/saa7164.h
index e1bac1fe19d3..94e987e7b5e5 100644
--- a/drivers/media/pci/saa7164/saa7164.h
+++ b/drivers/media/pci/saa7164/saa7164.h
@@ -180,12 +180,22 @@ struct saa7164_encoder_fh {
atomic_t v4l_reading;
};
+static inline struct saa7164_encoder_fh *to_saa7164_encoder_fh(struct file *filp)
+{
+ return container_of(file_to_v4l2_fh(filp), struct saa7164_encoder_fh, fh);
+}
+
struct saa7164_vbi_fh {
struct v4l2_fh fh;
struct saa7164_port *port;
atomic_t v4l_reading;
};
+static inline struct saa7164_vbi_fh *to_saa7164_vbi_fh(struct file *filp)
+{
+ return container_of(file_to_v4l2_fh(filp), struct saa7164_vbi_fh, fh);
+}
+
struct saa7164_histogram_bucket {
u32 val;
u32 count;
diff --git a/drivers/media/pci/solo6x10/solo6x10-gpio.c b/drivers/media/pci/solo6x10/solo6x10-gpio.c
index b16a8453a62a..71848741c55c 100644
--- a/drivers/media/pci/solo6x10/solo6x10-gpio.c
+++ b/drivers/media/pci/solo6x10/solo6x10-gpio.c
@@ -158,7 +158,7 @@ int solo_gpio_init(struct solo_dev *solo_dev)
solo_dev->gpio_dev.get_direction = solo_gpiochip_get_direction;
solo_dev->gpio_dev.get = solo_gpiochip_get;
- solo_dev->gpio_dev.set_rv = solo_gpiochip_set;
+ solo_dev->gpio_dev.set = solo_gpiochip_set;
ret = gpiochip_add_data(&solo_dev->gpio_dev, solo_dev);
diff --git a/drivers/media/pci/tw68/tw68-core.c b/drivers/media/pci/tw68/tw68-core.c
index 35dd19b2427e..08b7ce1043aa 100644
--- a/drivers/media/pci/tw68/tw68-core.c
+++ b/drivers/media/pci/tw68/tw68-core.c
@@ -14,7 +14,7 @@
*
* Refactored and updated to the latest v4l core frameworks:
*
- * Copyright (C) 2014 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2014 Hans Verkuil <hverkuil@kernel.org>
*/
#include <linux/init.h>
@@ -37,7 +37,7 @@
MODULE_DESCRIPTION("v4l2 driver module for tw6800 based video capture cards");
MODULE_AUTHOR("William M. Brack");
-MODULE_AUTHOR("Hans Verkuil <hverkuil@xs4all.nl>");
+MODULE_AUTHOR("Hans Verkuil <hverkuil@kernel.org>");
MODULE_LICENSE("GPL");
static unsigned int latency = UNSET;
diff --git a/drivers/media/pci/tw68/tw68-reg.h b/drivers/media/pci/tw68/tw68-reg.h
index dcd9931b25cc..8aeef452ea8a 100644
--- a/drivers/media/pci/tw68/tw68-reg.h
+++ b/drivers/media/pci/tw68/tw68-reg.h
@@ -13,7 +13,7 @@
*
* Refactored and updated to the latest v4l core frameworks:
*
- * Copyright (C) 2014 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2014 Hans Verkuil <hverkuil@kernel.org>
*/
#ifndef _TW68_REG_H_
diff --git a/drivers/media/pci/tw68/tw68-risc.c b/drivers/media/pci/tw68/tw68-risc.c
index dacb136c4f3a..e793db6134e4 100644
--- a/drivers/media/pci/tw68/tw68-risc.c
+++ b/drivers/media/pci/tw68/tw68-risc.c
@@ -14,7 +14,7 @@
*
* Refactored and updated to the latest v4l core frameworks:
*
- * Copyright (C) 2014 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2014 Hans Verkuil <hverkuil@kernel.org>
*/
#include "tw68.h"
diff --git a/drivers/media/pci/tw68/tw68-video.c b/drivers/media/pci/tw68/tw68-video.c
index 77773dec48b8..6232bac170d0 100644
--- a/drivers/media/pci/tw68/tw68-video.c
+++ b/drivers/media/pci/tw68/tw68-video.c
@@ -13,7 +13,7 @@
*
* Refactored and updated to the latest v4l core frameworks:
*
- * Copyright (C) 2014 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2014 Hans Verkuil <hverkuil@kernel.org>
*/
#include <linux/module.h>
diff --git a/drivers/media/pci/tw68/tw68.h b/drivers/media/pci/tw68/tw68.h
index a1f422d6e600..66be6b3bb7b6 100644
--- a/drivers/media/pci/tw68/tw68.h
+++ b/drivers/media/pci/tw68/tw68.h
@@ -13,7 +13,7 @@
*
* Refactored and updated to the latest v4l core frameworks:
*
- * Copyright (C) 2014 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2014 Hans Verkuil <hverkuil@kernel.org>
*/
#include <linux/pci.h>
diff --git a/drivers/media/pci/zoran/zoran.h b/drivers/media/pci/zoran/zoran.h
index 1cd990468d3d..d05e222b3921 100644
--- a/drivers/media/pci/zoran/zoran.h
+++ b/drivers/media/pci/zoran/zoran.h
@@ -154,12 +154,6 @@ struct zoran_jpg_settings {
struct zoran;
-/* zoran_fh contains per-open() settings */
-struct zoran_fh {
- struct v4l2_fh fh;
- struct zoran *zr;
-};
-
struct card_info {
enum card_type type;
char name[32];
diff --git a/drivers/media/pci/zoran/zoran_card.c b/drivers/media/pci/zoran/zoran_card.c
index e31f9f19a48a..d81facf735d9 100644
--- a/drivers/media/pci/zoran/zoran_card.c
+++ b/drivers/media/pci/zoran/zoran_card.c
@@ -67,10 +67,6 @@ module_param(pass_through, int, 0644);
MODULE_PARM_DESC(pass_through,
"Pass TV signal through to TV-out when idling");
-int zr36067_debug = 1;
-module_param_named(debug, zr36067_debug, int, 0644);
-MODULE_PARM_DESC(debug, "Debug level (0-5)");
-
#define ZORAN_VERSION "0.10.1"
MODULE_DESCRIPTION("Zoran-36057/36067 JPEG codec driver");
diff --git a/drivers/media/pci/zoran/zoran_card.h b/drivers/media/pci/zoran/zoran_card.h
index 518cb426b446..c4f81777e6ce 100644
--- a/drivers/media/pci/zoran/zoran_card.h
+++ b/drivers/media/pci/zoran/zoran_card.h
@@ -12,8 +12,6 @@
#ifndef __ZORAN_CARD_H__
#define __ZORAN_CARD_H__
-extern int zr36067_debug;
-
/* Anybody who uses more than four? */
#define BUZ_MAX 4
diff --git a/drivers/media/pci/zoran/zoran_driver.c b/drivers/media/pci/zoran/zoran_driver.c
index f42f596d3e62..5b4d5dd06edb 100644
--- a/drivers/media/pci/zoran/zoran_driver.c
+++ b/drivers/media/pci/zoran/zoran_driver.c
@@ -245,7 +245,7 @@ static int zoran_set_input(struct zoran *zr, int input)
* ioctl routine
*/
-static int zoran_querycap(struct file *file, void *__fh, struct v4l2_capability *cap)
+static int zoran_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
{
struct zoran *zr = video_drvdata(file);
@@ -278,7 +278,7 @@ static int zoran_enum_fmt(struct zoran *zr, struct v4l2_fmtdesc *fmt, int flag)
return -EINVAL;
}
-static int zoran_enum_fmt_vid_cap(struct file *file, void *__fh,
+static int zoran_enum_fmt_vid_cap(struct file *file, void *fh,
struct v4l2_fmtdesc *f)
{
struct zoran *zr = video_drvdata(file);
@@ -286,7 +286,7 @@ static int zoran_enum_fmt_vid_cap(struct file *file, void *__fh,
return zoran_enum_fmt(zr, f, ZORAN_FORMAT_CAPTURE);
}
-static int zoran_g_fmt_vid_out(struct file *file, void *__fh,
+static int zoran_g_fmt_vid_out(struct file *file, void *fh,
struct v4l2_format *fmt)
{
struct zoran *zr = video_drvdata(file);
@@ -308,13 +308,13 @@ static int zoran_g_fmt_vid_out(struct file *file, void *__fh,
return 0;
}
-static int zoran_g_fmt_vid_cap(struct file *file, void *__fh,
+static int zoran_g_fmt_vid_cap(struct file *file, void *fh,
struct v4l2_format *fmt)
{
struct zoran *zr = video_drvdata(file);
if (zr->map_mode != ZORAN_MAP_MODE_RAW)
- return zoran_g_fmt_vid_out(file, __fh, fmt);
+ return zoran_g_fmt_vid_out(file, fh, fmt);
fmt->fmt.pix.width = zr->v4l_settings.width;
fmt->fmt.pix.height = zr->v4l_settings.height;
fmt->fmt.pix.sizeimage = zr->buffer_size;
@@ -328,7 +328,7 @@ static int zoran_g_fmt_vid_cap(struct file *file, void *__fh,
return 0;
}
-static int zoran_try_fmt_vid_out(struct file *file, void *__fh,
+static int zoran_try_fmt_vid_out(struct file *file, void *fh,
struct v4l2_format *fmt)
{
struct zoran *zr = video_drvdata(file);
@@ -391,7 +391,7 @@ static int zoran_try_fmt_vid_out(struct file *file, void *__fh,
return res;
}
-static int zoran_try_fmt_vid_cap(struct file *file, void *__fh,
+static int zoran_try_fmt_vid_cap(struct file *file, void *fh,
struct v4l2_format *fmt)
{
struct zoran *zr = video_drvdata(file);
@@ -399,7 +399,7 @@ static int zoran_try_fmt_vid_cap(struct file *file, void *__fh,
int i;
if (fmt->fmt.pix.pixelformat == V4L2_PIX_FMT_MJPEG)
- return zoran_try_fmt_vid_out(file, __fh, fmt);
+ return zoran_try_fmt_vid_out(file, fh, fmt);
for (i = 0; i < NUM_FORMATS; i++)
if (zoran_formats[i].fourcc == fmt->fmt.pix.pixelformat)
@@ -427,7 +427,7 @@ static int zoran_try_fmt_vid_cap(struct file *file, void *__fh,
return 0;
}
-static int zoran_s_fmt_vid_out(struct file *file, void *__fh,
+static int zoran_s_fmt_vid_out(struct file *file, void *fh,
struct v4l2_format *fmt)
{
struct zoran *zr = video_drvdata(file);
@@ -507,11 +507,10 @@ static int zoran_s_fmt_vid_out(struct file *file, void *__fh,
return res;
}
-static int zoran_s_fmt_vid_cap(struct file *file, void *__fh,
+static int zoran_s_fmt_vid_cap(struct file *file, void *fh,
struct v4l2_format *fmt)
{
struct zoran *zr = video_drvdata(file);
- struct zoran_fh *fh = __fh;
int i;
int res = 0;
@@ -556,7 +555,7 @@ static int zoran_s_fmt_vid_cap(struct file *file, void *__fh,
return res;
}
-static int zoran_g_std(struct file *file, void *__fh, v4l2_std_id *std)
+static int zoran_g_std(struct file *file, void *fh, v4l2_std_id *std)
{
struct zoran *zr = video_drvdata(file);
@@ -564,7 +563,7 @@ static int zoran_g_std(struct file *file, void *__fh, v4l2_std_id *std)
return 0;
}
-static int zoran_s_std(struct file *file, void *__fh, v4l2_std_id std)
+static int zoran_s_std(struct file *file, void *fh, v4l2_std_id std)
{
struct zoran *zr = video_drvdata(file);
int res = 0;
@@ -579,7 +578,7 @@ static int zoran_s_std(struct file *file, void *__fh, v4l2_std_id std)
return res;
}
-static int zoran_enum_input(struct file *file, void *__fh,
+static int zoran_enum_input(struct file *file, void *fh,
struct v4l2_input *inp)
{
struct zoran *zr = video_drvdata(file);
@@ -596,7 +595,7 @@ static int zoran_enum_input(struct file *file, void *__fh,
return 0;
}
-static int zoran_g_input(struct file *file, void *__fh, unsigned int *input)
+static int zoran_g_input(struct file *file, void *fh, unsigned int *input)
{
struct zoran *zr = video_drvdata(file);
@@ -605,7 +604,7 @@ static int zoran_g_input(struct file *file, void *__fh, unsigned int *input)
return 0;
}
-static int zoran_s_input(struct file *file, void *__fh, unsigned int input)
+static int zoran_s_input(struct file *file, void *fh, unsigned int input)
{
struct zoran *zr = video_drvdata(file);
int res;
@@ -618,7 +617,7 @@ static int zoran_s_input(struct file *file, void *__fh, unsigned int input)
}
/* cropping (sub-frame capture) */
-static int zoran_g_selection(struct file *file, void *__fh, struct v4l2_selection *sel)
+static int zoran_g_selection(struct file *file, void *fh, struct v4l2_selection *sel)
{
struct zoran *zr = video_drvdata(file);
@@ -653,7 +652,7 @@ static int zoran_g_selection(struct file *file, void *__fh, struct v4l2_selectio
return 0;
}
-static int zoran_s_selection(struct file *file, void *__fh, struct v4l2_selection *sel)
+static int zoran_s_selection(struct file *file, void *fh, struct v4l2_selection *sel)
{
struct zoran *zr = video_drvdata(file);
struct zoran_jpg_settings settings;
diff --git a/drivers/media/platform/allegro-dvt/allegro-core.c b/drivers/media/platform/allegro-dvt/allegro-core.c
index eb03df0d8652..510c3c9661d9 100644
--- a/drivers/media/platform/allegro-dvt/allegro-core.c
+++ b/drivers/media/platform/allegro-dvt/allegro-core.c
@@ -197,8 +197,6 @@ static const struct regmap_config allegro_sram_config = {
.cache_type = REGCACHE_NONE,
};
-#define fh_to_channel(__fh) container_of(__fh, struct allegro_channel, fh)
-
struct allegro_channel {
struct allegro_dev *dev;
struct v4l2_fh fh;
@@ -302,6 +300,11 @@ struct allegro_channel {
unsigned int error;
};
+static inline struct allegro_channel *file_to_channel(struct file *filp)
+{
+ return container_of(file_to_v4l2_fh(filp), struct allegro_channel, fh);
+}
+
static inline int
allegro_channel_get_i_frame_qp(struct allegro_channel *channel)
{
@@ -3214,8 +3217,7 @@ static int allegro_open(struct file *file)
}
list_add(&channel->list, &dev->channels);
- file->private_data = &channel->fh;
- v4l2_fh_add(&channel->fh);
+ v4l2_fh_add(&channel->fh, file);
allegro_channel_adjust(channel);
@@ -3229,7 +3231,7 @@ error:
static int allegro_release(struct file *file)
{
- struct allegro_channel *channel = fh_to_channel(file->private_data);
+ struct allegro_channel *channel = file_to_channel(file);
v4l2_m2m_ctx_release(channel->fh.m2m_ctx);
@@ -3237,7 +3239,7 @@ static int allegro_release(struct file *file)
v4l2_ctrl_handler_free(&channel->ctrl_handler);
- v4l2_fh_del(&channel->fh);
+ v4l2_fh_del(&channel->fh, file);
v4l2_fh_exit(&channel->fh);
kfree(channel);
@@ -3280,7 +3282,7 @@ static int allegro_enum_fmt_vid(struct file *file, void *fh,
static int allegro_g_fmt_vid_cap(struct file *file, void *fh,
struct v4l2_format *f)
{
- struct allegro_channel *channel = fh_to_channel(fh);
+ struct allegro_channel *channel = file_to_channel(file);
f->fmt.pix.field = V4L2_FIELD_NONE;
f->fmt.pix.width = channel->width;
@@ -3322,7 +3324,7 @@ static int allegro_try_fmt_vid_cap(struct file *file, void *fh,
static int allegro_s_fmt_vid_cap(struct file *file, void *fh,
struct v4l2_format *f)
{
- struct allegro_channel *channel = fh_to_channel(fh);
+ struct allegro_channel *channel = file_to_channel(file);
struct vb2_queue *vq;
int err;
@@ -3346,7 +3348,7 @@ static int allegro_s_fmt_vid_cap(struct file *file, void *fh,
static int allegro_g_fmt_vid_out(struct file *file, void *fh,
struct v4l2_format *f)
{
- struct allegro_channel *channel = fh_to_channel(fh);
+ struct allegro_channel *channel = file_to_channel(file);
f->fmt.pix.field = V4L2_FIELD_NONE;
@@ -3393,7 +3395,7 @@ static int allegro_try_fmt_vid_out(struct file *file, void *fh,
static int allegro_s_fmt_vid_out(struct file *file, void *fh,
struct v4l2_format *f)
{
- struct allegro_channel *channel = fh_to_channel(fh);
+ struct allegro_channel *channel = file_to_channel(file);
int err;
err = allegro_try_fmt_vid_out(file, fh, f);
@@ -3434,7 +3436,7 @@ static int allegro_channel_cmd_start(struct allegro_channel *channel)
static int allegro_encoder_cmd(struct file *file, void *fh,
struct v4l2_encoder_cmd *cmd)
{
- struct allegro_channel *channel = fh_to_channel(fh);
+ struct allegro_channel *channel = file_to_channel(file);
int err;
err = v4l2_m2m_ioctl_try_encoder_cmd(file, fh, cmd);
@@ -3483,8 +3485,7 @@ static int allegro_enum_framesizes(struct file *file, void *fh,
static int allegro_ioctl_streamon(struct file *file, void *priv,
enum v4l2_buf_type type)
{
- struct v4l2_fh *fh = file->private_data;
- struct allegro_channel *channel = fh_to_channel(fh);
+ struct allegro_channel *channel = file_to_channel(file);
int err;
if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
@@ -3493,13 +3494,13 @@ static int allegro_ioctl_streamon(struct file *file, void *priv,
return err;
}
- return v4l2_m2m_streamon(file, fh->m2m_ctx, type);
+ return v4l2_m2m_streamon(file, channel->fh.m2m_ctx, type);
}
static int allegro_g_parm(struct file *file, void *fh,
struct v4l2_streamparm *a)
{
- struct allegro_channel *channel = fh_to_channel(fh);
+ struct allegro_channel *channel = file_to_channel(file);
struct v4l2_fract *timeperframe;
if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
@@ -3516,7 +3517,7 @@ static int allegro_g_parm(struct file *file, void *fh,
static int allegro_s_parm(struct file *file, void *fh,
struct v4l2_streamparm *a)
{
- struct allegro_channel *channel = fh_to_channel(fh);
+ struct allegro_channel *channel = file_to_channel(file);
struct v4l2_fract *timeperframe;
int div;
diff --git a/drivers/media/platform/amlogic/c3/mipi-csi2/c3-mipi-csi2.c b/drivers/media/platform/amlogic/c3/mipi-csi2/c3-mipi-csi2.c
index 1011ab3ebac7..b9e4ef3fc308 100644
--- a/drivers/media/platform/amlogic/c3/mipi-csi2/c3-mipi-csi2.c
+++ b/drivers/media/platform/amlogic/c3/mipi-csi2/c3-mipi-csi2.c
@@ -388,13 +388,12 @@ static void c3_mipi_csi_cfg_host(struct c3_csi_device *csi)
c3_mipi_csi_write(csi, CSI2_HOST_N_LANES, csi->bus.num_data_lanes - 1);
}
-static int c3_mipi_csi_start_stream(struct c3_csi_device *csi,
- struct v4l2_subdev *src_sd)
+static int c3_mipi_csi_start_stream(struct c3_csi_device *csi)
{
s64 link_freq;
s64 lane_rate;
- link_freq = v4l2_get_link_freq(src_sd->ctrl_handler, 0, 0);
+ link_freq = v4l2_get_link_freq(csi->src_pad, 0, 0);
if (link_freq < 0) {
dev_err(csi->dev,
"Unable to obtain link frequency: %lld\n", link_freq);
@@ -434,7 +433,7 @@ static int c3_mipi_csi_enable_streams(struct v4l2_subdev *sd,
pm_runtime_resume_and_get(csi->dev);
- c3_mipi_csi_start_stream(csi, src_sd);
+ c3_mipi_csi_start_stream(csi);
ret = v4l2_subdev_enable_streams(src_sd, csi->src_pad->index, BIT(0));
if (ret) {
diff --git a/drivers/media/platform/amlogic/meson-ge2d/ge2d.c b/drivers/media/platform/amlogic/meson-ge2d/ge2d.c
index 0c004bb8ba05..5744853a4003 100644
--- a/drivers/media/platform/amlogic/meson-ge2d/ge2d.c
+++ b/drivers/media/platform/amlogic/meson-ge2d/ge2d.c
@@ -82,6 +82,11 @@ struct ge2d_ctx {
u32 xy_swap;
};
+static inline struct ge2d_ctx *file_to_ge2d_ctx(struct file *filp)
+{
+ return container_of(file_to_v4l2_fh(filp), struct ge2d_ctx, fh);
+}
+
struct meson_ge2d {
struct v4l2_device v4l2_dev;
struct v4l2_m2m_dev *m2m_dev;
@@ -452,7 +457,7 @@ static int vidioc_enum_fmt(struct file *file, void *priv, struct v4l2_fmtdesc *f
static int vidioc_g_selection(struct file *file, void *priv,
struct v4l2_selection *s)
{
- struct ge2d_ctx *ctx = priv;
+ struct ge2d_ctx *ctx = file_to_ge2d_ctx(file);
struct ge2d_frame *f;
bool use_frame = false;
@@ -502,7 +507,7 @@ static int vidioc_g_selection(struct file *file, void *priv,
static int vidioc_s_selection(struct file *file, void *priv,
struct v4l2_selection *s)
{
- struct ge2d_ctx *ctx = priv;
+ struct ge2d_ctx *ctx = file_to_ge2d_ctx(file);
struct meson_ge2d *ge2d = ctx->ge2d;
struct ge2d_frame *f;
int ret = 0;
@@ -569,8 +574,8 @@ static void vidioc_setup_cap_fmt(struct ge2d_ctx *ctx, struct v4l2_pix_format *f
static int vidioc_try_fmt_cap(struct file *file, void *priv, struct v4l2_format *f)
{
+ struct ge2d_ctx *ctx = file_to_ge2d_ctx(file);
const struct ge2d_fmt *fmt = find_fmt(f);
- struct ge2d_ctx *ctx = priv;
struct v4l2_pix_format fmt_cap;
vidioc_setup_cap_fmt(ctx, &fmt_cap);
@@ -590,7 +595,7 @@ static int vidioc_try_fmt_cap(struct file *file, void *priv, struct v4l2_format
static int vidioc_s_fmt_cap(struct file *file, void *priv, struct v4l2_format *f)
{
- struct ge2d_ctx *ctx = priv;
+ struct ge2d_ctx *ctx = file_to_ge2d_ctx(file);
struct meson_ge2d *ge2d = ctx->ge2d;
struct vb2_queue *vq;
struct ge2d_frame *frm;
@@ -626,7 +631,7 @@ static int vidioc_s_fmt_cap(struct file *file, void *priv, struct v4l2_format *f
static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
- struct ge2d_ctx *ctx = priv;
+ struct ge2d_ctx *ctx = file_to_ge2d_ctx(file);
struct vb2_queue *vq;
struct ge2d_frame *frm;
@@ -665,7 +670,7 @@ static int vidioc_try_fmt_out(struct file *file, void *priv, struct v4l2_format
static int vidioc_s_fmt_out(struct file *file, void *priv, struct v4l2_format *f)
{
- struct ge2d_ctx *ctx = priv;
+ struct ge2d_ctx *ctx = file_to_ge2d_ctx(file);
struct meson_ge2d *ge2d = ctx->ge2d;
struct vb2_queue *vq;
struct ge2d_frame *frm, *frm_cap;
@@ -855,8 +860,7 @@ static int ge2d_open(struct file *file)
return ret;
}
v4l2_fh_init(&ctx->fh, video_devdata(file));
- file->private_data = &ctx->fh;
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
ge2d_setup_ctrls(ctx);
@@ -871,8 +875,7 @@ static int ge2d_open(struct file *file)
static int ge2d_release(struct file *file)
{
- struct ge2d_ctx *ctx =
- container_of(file->private_data, struct ge2d_ctx, fh);
+ struct ge2d_ctx *ctx = file_to_ge2d_ctx(file);
struct meson_ge2d *ge2d = ctx->ge2d;
mutex_lock(&ge2d->mutex);
@@ -880,7 +883,7 @@ static int ge2d_release(struct file *file)
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
v4l2_ctrl_handler_free(&ctx->ctrl_handler);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
kfree(ctx);
diff --git a/drivers/media/platform/amphion/vpu.h b/drivers/media/platform/amphion/vpu.h
index cac0f1a64fea..bfd171a3ded4 100644
--- a/drivers/media/platform/amphion/vpu.h
+++ b/drivers/media/platform/amphion/vpu.h
@@ -328,7 +328,7 @@ static inline const char *vpu_core_type_desc(enum vpu_core_type type)
static inline struct vpu_inst *to_inst(struct file *filp)
{
- return container_of(filp->private_data, struct vpu_inst, fh);
+ return container_of(file_to_v4l2_fh(filp), struct vpu_inst, fh);
}
#define ctrl_to_inst(ctrl) \
diff --git a/drivers/media/platform/amphion/vpu_v4l2.c b/drivers/media/platform/amphion/vpu_v4l2.c
index 74668fa362e2..fcb2eff813ac 100644
--- a/drivers/media/platform/amphion/vpu_v4l2.c
+++ b/drivers/media/platform/amphion/vpu_v4l2.c
@@ -24,6 +24,11 @@
#include "vpu_msgs.h"
#include "vpu_helpers.h"
+static char *vpu_type_name(u32 type)
+{
+ return V4L2_TYPE_IS_OUTPUT(type) ? "output" : "capture";
+}
+
void vpu_inst_lock(struct vpu_inst *inst)
{
mutex_lock(&inst->lock);
@@ -42,7 +47,7 @@ dma_addr_t vpu_get_vb_phy_addr(struct vb2_buffer *vb, u32 plane_no)
vb->planes[plane_no].data_offset;
}
-unsigned int vpu_get_vb_length(struct vb2_buffer *vb, u32 plane_no)
+static unsigned int vpu_get_vb_length(struct vb2_buffer *vb, u32 plane_no)
{
if (plane_no >= vb->num_planes)
return 0;
@@ -81,7 +86,7 @@ void vpu_v4l2_set_error(struct vpu_inst *inst)
vpu_inst_unlock(inst);
}
-int vpu_notify_eos(struct vpu_inst *inst)
+static int vpu_notify_eos(struct vpu_inst *inst)
{
static const struct v4l2_event ev = {
.id = 0,
@@ -573,7 +578,8 @@ static void vpu_vb2_buf_finish(struct vb2_buffer *vb)
call_void_vop(inst, on_queue_empty, q->type);
}
-void vpu_vb2_buffers_return(struct vpu_inst *inst, unsigned int type, enum vb2_buffer_state state)
+static void vpu_vb2_buffers_return(struct vpu_inst *inst, unsigned int type,
+ enum vb2_buffer_state state)
{
struct vb2_v4l2_buffer *buf;
@@ -718,8 +724,6 @@ static int vpu_v4l2_release(struct vpu_inst *inst)
v4l2_ctrl_handler_free(&inst->ctrl_handler);
mutex_destroy(&inst->lock);
- v4l2_fh_del(&inst->fh);
- v4l2_fh_exit(&inst->fh);
call_void_vop(inst, cleanup);
@@ -756,7 +760,7 @@ int vpu_v4l2_open(struct file *file, struct vpu_inst *inst)
inst->min_buffer_cap = 2;
inst->min_buffer_out = 2;
v4l2_fh_init(&inst->fh, func->vfd);
- v4l2_fh_add(&inst->fh);
+ v4l2_fh_add(&inst->fh, file);
ret = call_vop(inst, ctrl_init);
if (ret)
@@ -770,7 +774,6 @@ int vpu_v4l2_open(struct file *file, struct vpu_inst *inst)
}
inst->fh.ctrl_handler = &inst->ctrl_handler;
- file->private_data = &inst->fh;
inst->state = VPU_CODEC_STATE_DEINIT;
inst->workqueue = alloc_ordered_workqueue("vpu_inst", WQ_MEM_RECLAIM);
if (inst->workqueue) {
@@ -788,6 +791,8 @@ int vpu_v4l2_open(struct file *file, struct vpu_inst *inst)
return 0;
error:
+ v4l2_fh_del(&inst->fh, file);
+ v4l2_fh_exit(&inst->fh);
vpu_inst_put(inst);
return ret;
}
@@ -807,6 +812,9 @@ int vpu_v4l2_close(struct file *file)
call_void_vop(inst, release);
vpu_inst_unlock(inst);
+ v4l2_fh_del(&inst->fh, file);
+ v4l2_fh_exit(&inst->fh);
+
vpu_inst_unregister(inst);
vpu_inst_put(inst);
diff --git a/drivers/media/platform/amphion/vpu_v4l2.h b/drivers/media/platform/amphion/vpu_v4l2.h
index 56f2939fa84d..4a87b06ae520 100644
--- a/drivers/media/platform/amphion/vpu_v4l2.h
+++ b/drivers/media/platform/amphion/vpu_v4l2.h
@@ -26,15 +26,12 @@ void vpu_skip_frame(struct vpu_inst *inst, int count);
struct vb2_v4l2_buffer *vpu_find_buf_by_sequence(struct vpu_inst *inst, u32 type, u32 sequence);
struct vb2_v4l2_buffer *vpu_find_buf_by_idx(struct vpu_inst *inst, u32 type, u32 idx);
void vpu_v4l2_set_error(struct vpu_inst *inst);
-int vpu_notify_eos(struct vpu_inst *inst);
int vpu_notify_source_change(struct vpu_inst *inst);
int vpu_set_last_buffer_dequeued(struct vpu_inst *inst, bool eos);
-void vpu_vb2_buffers_return(struct vpu_inst *inst, unsigned int type, enum vb2_buffer_state state);
int vpu_get_num_buffers(struct vpu_inst *inst, u32 type);
bool vpu_is_source_empty(struct vpu_inst *inst);
dma_addr_t vpu_get_vb_phy_addr(struct vb2_buffer *vb, u32 plane_no);
-unsigned int vpu_get_vb_length(struct vb2_buffer *vb, u32 plane_no);
static inline struct vpu_format *vpu_get_format(struct vpu_inst *inst, u32 type)
{
if (V4L2_TYPE_IS_OUTPUT(type))
@@ -43,11 +40,6 @@ static inline struct vpu_format *vpu_get_format(struct vpu_inst *inst, u32 type)
return &inst->cap_format;
}
-static inline char *vpu_type_name(u32 type)
-{
- return V4L2_TYPE_IS_OUTPUT(type) ? "output" : "capture";
-}
-
static inline int vpu_vb_is_codecconfig(struct vb2_v4l2_buffer *vbuf)
{
#ifdef V4L2_BUF_FLAG_CODECCONFIG
diff --git a/drivers/media/platform/aspeed/aspeed-video.c b/drivers/media/platform/aspeed/aspeed-video.c
index 54cae0da9aca..b83e43245277 100644
--- a/drivers/media/platform/aspeed/aspeed-video.c
+++ b/drivers/media/platform/aspeed/aspeed-video.c
@@ -4,6 +4,7 @@
#include <linux/atomic.h>
#include <linux/bitfield.h>
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
@@ -25,6 +26,8 @@
#include <linux/workqueue.h>
#include <linux/debugfs.h>
#include <linux/ktime.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-device.h>
@@ -203,6 +206,25 @@
#define VE_MEM_RESTRICT_START 0x310
#define VE_MEM_RESTRICT_END 0x314
+/* SCU's registers */
+#define SCU_MISC_CTRL 0xC0
+#define SCU_DPLL_SOURCE BIT(20)
+
+/* GFX's registers */
+#define GFX_CTRL 0x60
+#define GFX_CTRL_ENABLE BIT(0)
+#define GFX_CTRL_FMT GENMASK(9, 7)
+
+#define GFX_H_DISPLAY 0x70
+#define GFX_H_DISPLAY_DE GENMASK(28, 16)
+#define GFX_H_DISPLAY_TOTAL GENMASK(12, 0)
+
+#define GFX_V_DISPLAY 0x78
+#define GFX_V_DISPLAY_DE GENMASK(27, 16)
+#define GFX_V_DISPLAY_TOTAL GENMASK(11, 0)
+
+#define GFX_DISPLAY_ADDR 0x80
+
/*
* VIDEO_MODE_DETECT_DONE: a flag raised if signal lock
* VIDEO_RES_CHANGE: a flag raised if res_change work on-going
@@ -262,6 +284,7 @@ struct aspeed_video_perf {
/*
* struct aspeed_video - driver data
*
+ * version: holds the version of aspeed SoC
* res_work: holds the delayed_work for res-detection if unlock
* buffers: holds the list of buffer queued from user
* flags: holds the state of video
@@ -273,6 +296,7 @@ struct aspeed_video_perf {
* yuv420: a flag raised if JPEG subsampling is 420
* format: holds the video format
* hq_mode: a flag raised if HQ is enabled. Only for VIDEO_FMT_ASPEED
+ * input: holds the video input
* frame_rate: holds the frame_rate
* jpeg_quality: holds jpeq's quality (0~11)
* jpeg_hq_quality: holds hq's quality (1~12) only if hq_mode enabled
@@ -298,6 +322,9 @@ struct aspeed_video {
struct video_device vdev;
struct mutex video_lock; /* v4l2 and videobuf2 lock */
+ struct regmap *scu;
+ struct regmap *gfx;
+ u32 version;
u32 jpeg_mode;
u32 comp_size_read;
@@ -316,6 +343,7 @@ struct aspeed_video {
bool yuv420;
enum aspeed_video_format format;
bool hq_mode;
+ enum aspeed_video_input input;
unsigned int frame_rate;
unsigned int jpeg_quality;
unsigned int jpeg_hq_quality;
@@ -331,21 +359,25 @@ struct aspeed_video {
#define to_aspeed_video(x) container_of((x), struct aspeed_video, v4l2_dev)
struct aspeed_video_config {
+ u32 version;
u32 jpeg_mode;
u32 comp_size_read;
};
static const struct aspeed_video_config ast2400_config = {
+ .version = 4,
.jpeg_mode = AST2400_VE_SEQ_CTRL_JPEG_MODE,
.comp_size_read = AST2400_VE_COMP_SIZE_READ_BACK,
};
static const struct aspeed_video_config ast2500_config = {
+ .version = 5,
.jpeg_mode = AST2500_VE_SEQ_CTRL_JPEG_MODE,
.comp_size_read = AST2400_VE_COMP_SIZE_READ_BACK,
};
static const struct aspeed_video_config ast2600_config = {
+ .version = 6,
.jpeg_mode = AST2500_VE_SEQ_CTRL_JPEG_MODE,
.comp_size_read = AST2600_VE_COMP_SIZE_READ_BACK,
};
@@ -485,6 +517,7 @@ static const struct v4l2_dv_timings_cap aspeed_video_timings_cap = {
static const char * const format_str[] = {"Standard JPEG",
"Aspeed JPEG"};
+static const char * const input_str[] = {"HOST VGA", "BMC GFX"};
static unsigned int debug;
@@ -609,6 +642,14 @@ static int aspeed_video_start_frame(struct aspeed_video *video)
aspeed_video_free_buf(video, &video->bcd);
}
+ if (video->input == VIDEO_INPUT_GFX) {
+ u32 val;
+
+ // update input buffer address as gfx's
+ regmap_read(video->gfx, GFX_DISPLAY_ADDR, &val);
+ aspeed_video_write(video, VE_TGS_0, val);
+ }
+
spin_lock_irqsave(&video->lock, flags);
buf = list_first_entry_or_null(&video->buffers,
struct aspeed_video_buffer, link);
@@ -1026,9 +1067,23 @@ static void aspeed_video_get_timings(struct aspeed_video *v,
}
}
+static void aspeed_video_get_resolution_gfx(struct aspeed_video *video,
+ struct v4l2_bt_timings *det)
+{
+ u32 h_val, v_val;
+
+ regmap_read(video->gfx, GFX_H_DISPLAY, &h_val);
+ regmap_read(video->gfx, GFX_V_DISPLAY, &v_val);
+
+ det->width = FIELD_GET(GFX_H_DISPLAY_DE, h_val) + 1;
+ det->height = FIELD_GET(GFX_V_DISPLAY_DE, v_val) + 1;
+ video->v4l2_input_status = 0;
+}
+
#define res_check(v) test_and_clear_bit(VIDEO_MODE_DETECT_DONE, &(v)->flags)
-static void aspeed_video_get_resolution(struct aspeed_video *video)
+static void aspeed_video_get_resolution_vga(struct aspeed_video *video,
+ struct v4l2_bt_timings *det)
{
bool invalid_resolution = true;
int rc;
@@ -1036,7 +1091,6 @@ static void aspeed_video_get_resolution(struct aspeed_video *video)
u32 mds;
u32 src_lr_edge;
u32 src_tb_edge;
- struct v4l2_bt_timings *det = &video->detected_timings;
det->width = MIN_WIDTH;
det->height = MIN_HEIGHT;
@@ -1113,14 +1167,20 @@ static void aspeed_video_get_resolution(struct aspeed_video *video)
aspeed_video_get_timings(video, det);
- /*
- * Enable mode-detect watchdog, resolution-change watchdog and
- * automatic compression after frame capture.
- */
+ /* Enable mode-detect watchdog, resolution-change watchdog */
aspeed_video_update(video, VE_INTERRUPT_CTRL, 0,
VE_INTERRUPT_MODE_DETECT_WD);
- aspeed_video_update(video, VE_SEQ_CTRL, 0,
- VE_SEQ_CTRL_AUTO_COMP | VE_SEQ_CTRL_EN_WATCHDOG);
+ aspeed_video_update(video, VE_SEQ_CTRL, 0, VE_SEQ_CTRL_EN_WATCHDOG);
+}
+
+static void aspeed_video_get_resolution(struct aspeed_video *video)
+{
+ struct v4l2_bt_timings *det = &video->detected_timings;
+
+ if (video->input == VIDEO_INPUT_GFX)
+ aspeed_video_get_resolution_gfx(video, det);
+ else
+ aspeed_video_get_resolution_vga(video, det);
v4l2_dbg(1, debug, &video->v4l2_dev, "Got resolution: %dx%d\n",
det->width, det->height);
@@ -1156,7 +1216,7 @@ static void aspeed_video_set_resolution(struct aspeed_video *video)
aspeed_video_write(video, VE_SRC_SCANLINE_OFFSET, act->width * 4);
/* Don't use direct mode below 1024 x 768 (irqs don't fire) */
- if (size < DIRECT_FETCH_THRESHOLD) {
+ if (video->input == VIDEO_INPUT_VGA && size < DIRECT_FETCH_THRESHOLD) {
v4l2_dbg(1, debug, &video->v4l2_dev, "Capture: Sync Mode\n");
aspeed_video_write(video, VE_TGS_0,
FIELD_PREP(VE_TGS_FIRST,
@@ -1171,10 +1231,20 @@ static void aspeed_video_set_resolution(struct aspeed_video *video)
VE_CTRL_INT_DE | VE_CTRL_DIRECT_FETCH,
VE_CTRL_INT_DE);
} else {
+ u32 ctrl, val, bpp;
+
v4l2_dbg(1, debug, &video->v4l2_dev, "Capture: Direct Mode\n");
+ ctrl = VE_CTRL_DIRECT_FETCH;
+ if (video->input == VIDEO_INPUT_GFX) {
+ regmap_read(video->gfx, GFX_CTRL, &val);
+ bpp = FIELD_GET(GFX_CTRL_FMT, val) ? 32 : 16;
+ if (bpp == 16)
+ ctrl |= VE_CTRL_INT_DE;
+ aspeed_video_write(video, VE_TGS_1, act->width * (bpp >> 3));
+ }
aspeed_video_update(video, VE_CTRL,
VE_CTRL_INT_DE | VE_CTRL_DIRECT_FETCH,
- VE_CTRL_DIRECT_FETCH);
+ ctrl);
}
size *= 4;
@@ -1207,6 +1277,22 @@ err_mem:
aspeed_video_free_buf(video, &video->srcs[0]);
}
+/*
+ * Update relative parameters when timing changed.
+ *
+ * @video: the struct of aspeed_video
+ * @timings: the new timings
+ */
+static void aspeed_video_update_timings(struct aspeed_video *video, struct v4l2_bt_timings *timings)
+{
+ video->active_timings = *timings;
+ aspeed_video_set_resolution(video);
+
+ video->pix_fmt.width = timings->width;
+ video->pix_fmt.height = timings->height;
+ video->pix_fmt.sizeimage = video->max_compressed_size;
+}
+
static void aspeed_video_update_regs(struct aspeed_video *video)
{
u8 jpeg_hq_quality = clamp((int)video->jpeg_hq_quality - 1, 0,
@@ -1219,6 +1305,8 @@ static void aspeed_video_update_regs(struct aspeed_video *video)
u32 ctrl = 0;
u32 seq_ctrl = 0;
+ v4l2_dbg(1, debug, &video->v4l2_dev, "input(%s)\n",
+ input_str[video->input]);
v4l2_dbg(1, debug, &video->v4l2_dev, "framerate(%d)\n",
video->frame_rate);
v4l2_dbg(1, debug, &video->v4l2_dev, "jpeg format(%s) subsample(%s)\n",
@@ -1234,6 +1322,9 @@ static void aspeed_video_update_regs(struct aspeed_video *video)
else
aspeed_video_update(video, VE_BCD_CTRL, VE_BCD_CTRL_EN_BCD, 0);
+ if (video->input == VIDEO_INPUT_VGA)
+ ctrl |= VE_CTRL_AUTO_OR_CURSOR;
+
if (video->frame_rate)
ctrl |= FIELD_PREP(VE_CTRL_FRC, video->frame_rate);
@@ -1252,7 +1343,9 @@ static void aspeed_video_update_regs(struct aspeed_video *video)
aspeed_video_update(video, VE_SEQ_CTRL,
video->jpeg_mode | VE_SEQ_CTRL_YUV420,
seq_ctrl);
- aspeed_video_update(video, VE_CTRL, VE_CTRL_FRC, ctrl);
+ aspeed_video_update(video, VE_CTRL,
+ VE_CTRL_FRC | VE_CTRL_AUTO_OR_CURSOR |
+ VE_CTRL_SOURCE, ctrl);
aspeed_video_update(video, VE_COMP_CTRL,
VE_COMP_CTRL_DCT_LUM | VE_COMP_CTRL_DCT_CHR |
VE_COMP_CTRL_EN_HQ | VE_COMP_CTRL_HQ_DCT_LUM |
@@ -1280,6 +1373,7 @@ static void aspeed_video_init_regs(struct aspeed_video *video)
aspeed_video_write(video, VE_JPEG_ADDR, video->jpeg.dma);
/* Set control registers */
+ aspeed_video_write(video, VE_SEQ_CTRL, VE_SEQ_CTRL_AUTO_COMP);
aspeed_video_write(video, VE_CTRL, ctrl);
aspeed_video_write(video, VE_COMP_CTRL, VE_COMP_CTRL_RSVD);
@@ -1311,12 +1405,7 @@ static void aspeed_video_start(struct aspeed_video *video)
aspeed_video_get_resolution(video);
/* Set timings since the device is being opened for the first time */
- video->active_timings = video->detected_timings;
- aspeed_video_set_resolution(video);
-
- video->pix_fmt.width = video->active_timings.width;
- video->pix_fmt.height = video->active_timings.height;
- video->pix_fmt.sizeimage = video->max_compressed_size;
+ aspeed_video_update_timings(video, &video->detected_timings);
}
static void aspeed_video_stop(struct aspeed_video *video)
@@ -1401,10 +1490,10 @@ static int aspeed_video_enum_input(struct file *file, void *fh,
{
struct aspeed_video *video = video_drvdata(file);
- if (inp->index)
+ if (inp->index >= VIDEO_INPUT_MAX)
return -EINVAL;
- strscpy(inp->name, "Host VGA capture", sizeof(inp->name));
+ sprintf(inp->name, "%s capture", input_str[inp->index]);
inp->type = V4L2_INPUT_TYPE_CAMERA;
inp->capabilities = V4L2_IN_CAP_DV_TIMINGS;
inp->status = video->v4l2_input_status;
@@ -1414,16 +1503,57 @@ static int aspeed_video_enum_input(struct file *file, void *fh,
static int aspeed_video_get_input(struct file *file, void *fh, unsigned int *i)
{
- *i = 0;
+ struct aspeed_video *video = video_drvdata(file);
+
+ *i = video->input;
return 0;
}
static int aspeed_video_set_input(struct file *file, void *fh, unsigned int i)
{
- if (i)
+ struct aspeed_video *video = video_drvdata(file);
+
+ if (i >= VIDEO_INPUT_MAX)
return -EINVAL;
+ if (i == video->input)
+ return 0;
+
+ if (vb2_is_busy(&video->queue))
+ return -EBUSY;
+
+ if (IS_ERR(video->scu)) {
+ v4l2_dbg(1, debug, &video->v4l2_dev,
+ "%s: scu isn't ready for input-control\n", __func__);
+ return -EINVAL;
+ }
+
+ if (IS_ERR(video->gfx) && i == VIDEO_INPUT_GFX) {
+ v4l2_dbg(1, debug, &video->v4l2_dev,
+ "%s: gfx isn't ready for GFX input\n", __func__);
+ return -EINVAL;
+ }
+
+ video->input = i;
+
+ if (video->version == 6) {
+ /* modify dpll source per current input */
+ if (video->input == VIDEO_INPUT_VGA)
+ regmap_update_bits(video->scu, SCU_MISC_CTRL,
+ SCU_DPLL_SOURCE, 0);
+ else
+ regmap_update_bits(video->scu, SCU_MISC_CTRL,
+ SCU_DPLL_SOURCE, SCU_DPLL_SOURCE);
+ }
+
+ aspeed_video_update_regs(video);
+
+ /* update signal status */
+ aspeed_video_get_resolution(video);
+ if (!video->v4l2_input_status)
+ aspeed_video_update_timings(video, &video->detected_timings);
+
return 0;
}
@@ -1527,13 +1657,7 @@ static int aspeed_video_set_dv_timings(struct file *file, void *fh,
if (vb2_is_busy(&video->queue))
return -EBUSY;
- video->active_timings = timings->bt;
-
- aspeed_video_set_resolution(video);
-
- video->pix_fmt.width = timings->bt.width;
- video->pix_fmt.height = timings->bt.height;
- video->pix_fmt.sizeimage = video->max_compressed_size;
+ aspeed_video_update_timings(video, &timings->bt);
timings->type = V4L2_DV_BT_656_1120;
@@ -1909,6 +2033,7 @@ static int aspeed_video_debugfs_show(struct seq_file *s, void *data)
val08 = aspeed_video_read(v, VE_CTRL);
if (FIELD_GET(VE_CTRL_DIRECT_FETCH, val08)) {
seq_printf(s, " %-20s:\tDirect fetch\n", "Mode");
+ seq_printf(s, " %-20s:\t%s\n", "Input", input_str[v->input]);
seq_printf(s, " %-20s:\t%s\n", "VGA bpp mode",
FIELD_GET(VE_CTRL_INT_DE, val08) ? "16" : "32");
} else {
@@ -2068,12 +2193,29 @@ static int aspeed_video_setup_video(struct aspeed_video *video)
return 0;
}
+/*
+ * Get regmap without checking res, such as clk/reset, that could lead to
+ * conflict.
+ */
+static struct regmap *aspeed_regmap_lookup(struct device_node *np, const char *property)
+{
+ struct device_node *syscon_np __free(device_node) = of_parse_phandle(np, property, 0);
+
+ if (!syscon_np)
+ return ERR_PTR(-ENODEV);
+
+ return device_node_to_regmap(syscon_np);
+}
+
static int aspeed_video_init(struct aspeed_video *video)
{
int irq;
int rc;
struct device *dev = video->dev;
+ video->scu = aspeed_regmap_lookup(dev->of_node, "aspeed,scu");
+ video->gfx = aspeed_regmap_lookup(dev->of_node, "aspeed,gfx");
+
irq = irq_of_parse_and_map(dev->of_node, 0);
if (!irq) {
dev_err(dev, "Unable to find IRQ\n");
@@ -2165,6 +2307,7 @@ static int aspeed_video_probe(struct platform_device *pdev)
if (!config)
return -ENODEV;
+ video->version = config->version;
video->jpeg_mode = config->jpeg_mode;
video->comp_size_read = config->comp_size_read;
diff --git a/drivers/media/platform/cadence/cdns-csi2rx.c b/drivers/media/platform/cadence/cdns-csi2rx.c
index 7f1ce95cdc3f..8c19f125da3e 100644
--- a/drivers/media/platform/cadence/cdns-csi2rx.c
+++ b/drivers/media/platform/cadence/cdns-csi2rx.c
@@ -5,8 +5,10 @@
* Copyright (C) 2017 Cadence Design Systems Inc.
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/export.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h>
@@ -17,6 +19,7 @@
#include <linux/reset.h>
#include <linux/slab.h>
+#include <media/cadence/cdns-csi2rx.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fwnode.h>
@@ -52,7 +55,9 @@
#define CSI2RX_STREAM_DATA_CFG_VC_SELECT(n) BIT((n) + 16)
#define CSI2RX_STREAM_CFG_REG(n) (CSI2RX_STREAM_BASE(n) + 0x00c)
-#define CSI2RX_STREAM_CFG_FIFO_MODE_LARGE_BUF (1 << 8)
+#define CSI2RX_STREAM_CFG_FIFO_MODE_LARGE_BUF BIT(8)
+#define CSI2RX_STREAM_CFG_NUM_PIXELS_MASK GENMASK(5, 4)
+#define CSI2RX_STREAM_CFG_NUM_PIXELS(n) ((n) >> 1U)
#define CSI2RX_LANES_MAX 4
#define CSI2RX_STREAMS_MAX 4
@@ -87,7 +92,10 @@ enum csi2rx_pads {
struct csi2rx_fmt {
u32 code;
+ /* width of a single pixel on CSI-2 bus */
u8 bpp;
+ /* max pixels per clock supported on output bus */
+ u8 max_pixels;
};
struct csi2rx_event {
@@ -132,6 +140,7 @@ struct csi2rx_priv {
struct reset_control *pixel_rst[CSI2RX_STREAMS_MAX];
struct phy *dphy;
+ u8 num_pixels[CSI2RX_STREAMS_MAX];
u8 lanes[CSI2RX_LANES_MAX];
u8 num_lanes;
u8 max_lanes;
@@ -149,22 +158,22 @@ struct csi2rx_priv {
};
static const struct csi2rx_fmt formats[] = {
- { .code = MEDIA_BUS_FMT_YUYV8_1X16, .bpp = 16, },
- { .code = MEDIA_BUS_FMT_UYVY8_1X16, .bpp = 16, },
- { .code = MEDIA_BUS_FMT_YVYU8_1X16, .bpp = 16, },
- { .code = MEDIA_BUS_FMT_VYUY8_1X16, .bpp = 16, },
- { .code = MEDIA_BUS_FMT_SBGGR8_1X8, .bpp = 8, },
- { .code = MEDIA_BUS_FMT_SGBRG8_1X8, .bpp = 8, },
- { .code = MEDIA_BUS_FMT_SGRBG8_1X8, .bpp = 8, },
- { .code = MEDIA_BUS_FMT_SRGGB8_1X8, .bpp = 8, },
- { .code = MEDIA_BUS_FMT_Y8_1X8, .bpp = 8, },
- { .code = MEDIA_BUS_FMT_SBGGR10_1X10, .bpp = 10, },
- { .code = MEDIA_BUS_FMT_SGBRG10_1X10, .bpp = 10, },
- { .code = MEDIA_BUS_FMT_SGRBG10_1X10, .bpp = 10, },
- { .code = MEDIA_BUS_FMT_SRGGB10_1X10, .bpp = 10, },
- { .code = MEDIA_BUS_FMT_RGB565_1X16, .bpp = 16, },
- { .code = MEDIA_BUS_FMT_RGB888_1X24, .bpp = 24, },
- { .code = MEDIA_BUS_FMT_BGR888_1X24, .bpp = 24, },
+ { .code = MEDIA_BUS_FMT_YUYV8_1X16, .bpp = 16, .max_pixels = 2, },
+ { .code = MEDIA_BUS_FMT_UYVY8_1X16, .bpp = 16, .max_pixels = 2, },
+ { .code = MEDIA_BUS_FMT_YVYU8_1X16, .bpp = 16, .max_pixels = 2, },
+ { .code = MEDIA_BUS_FMT_VYUY8_1X16, .bpp = 16, .max_pixels = 2, },
+ { .code = MEDIA_BUS_FMT_SBGGR8_1X8, .bpp = 8, .max_pixels = 4, },
+ { .code = MEDIA_BUS_FMT_SGBRG8_1X8, .bpp = 8, .max_pixels = 4, },
+ { .code = MEDIA_BUS_FMT_SGRBG8_1X8, .bpp = 8, .max_pixels = 4, },
+ { .code = MEDIA_BUS_FMT_SRGGB8_1X8, .bpp = 8, .max_pixels = 4, },
+ { .code = MEDIA_BUS_FMT_Y8_1X8, .bpp = 8, .max_pixels = 4, },
+ { .code = MEDIA_BUS_FMT_SBGGR10_1X10, .bpp = 10, .max_pixels = 2, },
+ { .code = MEDIA_BUS_FMT_SGBRG10_1X10, .bpp = 10, .max_pixels = 2, },
+ { .code = MEDIA_BUS_FMT_SGRBG10_1X10, .bpp = 10, .max_pixels = 2, },
+ { .code = MEDIA_BUS_FMT_SRGGB10_1X10, .bpp = 10, .max_pixels = 2, },
+ { .code = MEDIA_BUS_FMT_RGB565_1X16, .bpp = 16, .max_pixels = 1, },
+ { .code = MEDIA_BUS_FMT_RGB888_1X24, .bpp = 24, .max_pixels = 1, },
+ { .code = MEDIA_BUS_FMT_BGR888_1X24, .bpp = 24, .max_pixels = 1, },
};
static void csi2rx_configure_error_irq_mask(void __iomem *base,
@@ -370,7 +379,9 @@ static int csi2rx_start(struct csi2rx_priv *csi2rx)
reset_control_deassert(csi2rx->pixel_rst[i]);
- writel(CSI2RX_STREAM_CFG_FIFO_MODE_LARGE_BUF,
+ writel(CSI2RX_STREAM_CFG_FIFO_MODE_LARGE_BUF |
+ FIELD_PREP(CSI2RX_STREAM_CFG_NUM_PIXELS_MASK,
+ csi2rx->num_pixels[i]),
csi2rx->base + CSI2RX_STREAM_CFG_REG(i));
/*
@@ -569,6 +580,33 @@ static int csi2rx_init_state(struct v4l2_subdev *subdev,
return csi2rx_set_fmt(subdev, state, &format);
}
+int cdns_csi2rx_negotiate_ppc(struct v4l2_subdev *subdev, unsigned int pad,
+ u8 *ppc)
+{
+ struct csi2rx_priv *csi2rx = v4l2_subdev_to_csi2rx(subdev);
+ const struct csi2rx_fmt *csi_fmt;
+ struct v4l2_subdev_state *state;
+ struct v4l2_mbus_framefmt *fmt;
+
+ if (!ppc || pad < CSI2RX_PAD_SOURCE_STREAM0 || pad >= CSI2RX_PAD_MAX)
+ return -EINVAL;
+
+ state = v4l2_subdev_lock_and_get_active_state(subdev);
+ fmt = v4l2_subdev_state_get_format(state, pad);
+ csi_fmt = csi2rx_get_fmt_by_code(fmt->code);
+
+ /* Reduce requested PPC if it is too high */
+ *ppc = min(*ppc, csi_fmt->max_pixels);
+
+ v4l2_subdev_unlock_state(state);
+
+ csi2rx->num_pixels[pad - CSI2RX_PAD_SOURCE_STREAM0] =
+ CSI2RX_STREAM_CFG_NUM_PIXELS(*ppc);
+
+ return 0;
+}
+EXPORT_SYMBOL_FOR_MODULES(cdns_csi2rx_negotiate_ppc, "j721e-csi2rx");
+
static const struct v4l2_subdev_pad_ops csi2rx_pad_ops = {
.enum_mbus_code = csi2rx_enum_mbus_code,
.get_fmt = v4l2_subdev_get_fmt,
@@ -595,6 +633,7 @@ static const struct v4l2_subdev_internal_ops csi2rx_internal_ops = {
static const struct media_entity_operations csi2rx_media_ops = {
.link_validate = v4l2_subdev_link_validate,
+ .get_fwnode_pad = v4l2_subdev_get_fwnode_pad_1_to_1,
};
static int csi2rx_async_bound(struct v4l2_async_notifier *notifier,
diff --git a/drivers/media/platform/chips-media/coda/coda-common.c b/drivers/media/platform/chips-media/coda/coda-common.c
index e6e3f5ec24f6..9a57b042d9fd 100644
--- a/drivers/media/platform/chips-media/coda/coda-common.c
+++ b/drivers/media/platform/chips-media/coda/coda-common.c
@@ -56,6 +56,11 @@
#define fh_to_ctx(__fh) container_of(__fh, struct coda_ctx, fh)
+static inline struct coda_ctx *file_to_ctx(struct file *filp)
+{
+ return fh_to_ctx(file_to_v4l2_fh(filp));
+}
+
int coda_debug;
module_param(coda_debug, int, 0644);
MODULE_PARM_DESC(coda_debug, "Debug level (0-2)");
@@ -422,7 +427,7 @@ out:
static int coda_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- struct coda_ctx *ctx = fh_to_ctx(priv);
+ struct coda_ctx *ctx = file_to_ctx(file);
strscpy(cap->driver, CODA_NAME, sizeof(cap->driver));
strscpy(cap->card, coda_product_name(ctx->dev->devtype->product),
@@ -442,7 +447,7 @@ static int coda_enum_fmt(struct file *file, void *priv,
{
struct video_device *vdev = video_devdata(file);
const struct coda_video_device *cvd = to_coda_video_device(vdev);
- struct coda_ctx *ctx = fh_to_ctx(priv);
+ struct coda_ctx *ctx = file_to_ctx(file);
const u32 *formats;
if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
@@ -492,7 +497,7 @@ static int coda_g_fmt(struct file *file, void *priv,
struct v4l2_format *f)
{
struct coda_q_data *q_data;
- struct coda_ctx *ctx = fh_to_ctx(priv);
+ struct coda_ctx *ctx = file_to_ctx(file);
q_data = get_q_data(ctx, f->type);
if (!q_data)
@@ -653,7 +658,7 @@ static int coda_try_fmt(struct coda_ctx *ctx, const struct coda_codec *codec,
static int coda_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct coda_ctx *ctx = fh_to_ctx(priv);
+ struct coda_ctx *ctx = file_to_ctx(file);
const struct coda_q_data *q_data_src;
const struct coda_codec *codec;
struct vb2_queue *src_vq;
@@ -759,7 +764,7 @@ static void coda_set_default_colorspace(struct v4l2_pix_format *fmt)
static int coda_try_fmt_vid_out(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct coda_ctx *ctx = fh_to_ctx(priv);
+ struct coda_ctx *ctx = file_to_ctx(file);
struct coda_dev *dev = ctx->dev;
const struct coda_q_data *q_data_dst;
const struct coda_codec *codec;
@@ -853,7 +858,7 @@ static int coda_s_fmt(struct coda_ctx *ctx, struct v4l2_format *f,
static int coda_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct coda_ctx *ctx = fh_to_ctx(priv);
+ struct coda_ctx *ctx = file_to_ctx(file);
struct coda_q_data *q_data_src;
const struct coda_codec *codec;
struct v4l2_rect r;
@@ -905,7 +910,7 @@ static int coda_s_fmt_vid_cap(struct file *file, void *priv,
static int coda_s_fmt_vid_out(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct coda_ctx *ctx = fh_to_ctx(priv);
+ struct coda_ctx *ctx = file_to_ctx(file);
const struct coda_codec *codec;
struct v4l2_format f_cap;
struct vb2_queue *dst_vq;
@@ -961,7 +966,7 @@ static int coda_s_fmt_vid_out(struct file *file, void *priv,
static int coda_reqbufs(struct file *file, void *priv,
struct v4l2_requestbuffers *rb)
{
- struct coda_ctx *ctx = fh_to_ctx(priv);
+ struct coda_ctx *ctx = file_to_ctx(file);
int ret;
ret = v4l2_m2m_reqbufs(file, ctx->fh.m2m_ctx, rb);
@@ -981,7 +986,7 @@ static int coda_reqbufs(struct file *file, void *priv,
static int coda_qbuf(struct file *file, void *priv,
struct v4l2_buffer *buf)
{
- struct coda_ctx *ctx = fh_to_ctx(priv);
+ struct coda_ctx *ctx = file_to_ctx(file);
if (ctx->inst_type == CODA_INST_DECODER &&
buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
@@ -992,7 +997,7 @@ static int coda_qbuf(struct file *file, void *priv,
static int coda_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
{
- struct coda_ctx *ctx = fh_to_ctx(priv);
+ struct coda_ctx *ctx = file_to_ctx(file);
int ret;
ret = v4l2_m2m_dqbuf(file, ctx->fh.m2m_ctx, buf);
@@ -1020,7 +1025,7 @@ void coda_m2m_buf_done(struct coda_ctx *ctx, struct vb2_v4l2_buffer *buf,
static int coda_g_selection(struct file *file, void *fh,
struct v4l2_selection *s)
{
- struct coda_ctx *ctx = fh_to_ctx(fh);
+ struct coda_ctx *ctx = file_to_ctx(file);
struct coda_q_data *q_data;
struct v4l2_rect r, *rsel;
@@ -1066,7 +1071,7 @@ static int coda_g_selection(struct file *file, void *fh,
static int coda_s_selection(struct file *file, void *fh,
struct v4l2_selection *s)
{
- struct coda_ctx *ctx = fh_to_ctx(fh);
+ struct coda_ctx *ctx = file_to_ctx(file);
struct coda_q_data *q_data;
switch (s->target) {
@@ -1121,7 +1126,7 @@ static void coda_wake_up_capture_queue(struct coda_ctx *ctx)
static int coda_encoder_cmd(struct file *file, void *fh,
struct v4l2_encoder_cmd *ec)
{
- struct coda_ctx *ctx = fh_to_ctx(fh);
+ struct coda_ctx *ctx = file_to_ctx(file);
struct vb2_v4l2_buffer *buf;
int ret;
@@ -1202,7 +1207,7 @@ static bool coda_mark_last_dst_buf(struct coda_ctx *ctx)
static int coda_decoder_cmd(struct file *file, void *fh,
struct v4l2_decoder_cmd *dc)
{
- struct coda_ctx *ctx = fh_to_ctx(fh);
+ struct coda_ctx *ctx = file_to_ctx(file);
struct coda_dev *dev = ctx->dev;
struct vb2_v4l2_buffer *buf;
struct vb2_queue *dst_vq;
@@ -1281,7 +1286,7 @@ static int coda_decoder_cmd(struct file *file, void *fh,
static int coda_enum_framesizes(struct file *file, void *fh,
struct v4l2_frmsizeenum *fsize)
{
- struct coda_ctx *ctx = fh_to_ctx(fh);
+ struct coda_ctx *ctx = file_to_ctx(file);
struct coda_q_data *q_data_dst;
const struct coda_codec *codec;
@@ -1314,7 +1319,7 @@ static int coda_enum_framesizes(struct file *file, void *fh,
static int coda_enum_frameintervals(struct file *file, void *fh,
struct v4l2_frmivalenum *f)
{
- struct coda_ctx *ctx = fh_to_ctx(fh);
+ struct coda_ctx *ctx = file_to_ctx(file);
struct coda_q_data *q_data;
const struct coda_codec *codec;
@@ -1353,7 +1358,7 @@ static int coda_enum_frameintervals(struct file *file, void *fh,
static int coda_g_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
{
- struct coda_ctx *ctx = fh_to_ctx(fh);
+ struct coda_ctx *ctx = file_to_ctx(file);
struct v4l2_fract *tpf;
if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
@@ -1436,7 +1441,7 @@ static uint32_t coda_timeperframe_to_frate(struct v4l2_fract *timeperframe)
static int coda_s_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
{
- struct coda_ctx *ctx = fh_to_ctx(fh);
+ struct coda_ctx *ctx = file_to_ctx(file);
struct v4l2_fract *tpf;
if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
@@ -2637,8 +2642,7 @@ static int coda_open(struct file *file)
if (ctx->ops->seq_end_work)
INIT_WORK(&ctx->seq_end_work, ctx->ops->seq_end_work);
v4l2_fh_init(&ctx->fh, video_devdata(file));
- file->private_data = &ctx->fh;
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
ctx->dev = dev;
ctx->idx = idx;
@@ -2721,7 +2725,7 @@ err_clk_ahb:
err_clk_enable:
pm_runtime_put_sync(dev->dev);
err_pm_get:
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
err_coda_name_init:
ida_free(&dev->ida, ctx->idx);
@@ -2733,7 +2737,7 @@ err_coda_max:
static int coda_release(struct file *file)
{
struct coda_dev *dev = video_drvdata(file);
- struct coda_ctx *ctx = fh_to_ctx(file->private_data);
+ struct coda_ctx *ctx = file_to_ctx(file);
coda_dbg(1, ctx, "release instance (%p)\n", ctx);
@@ -2759,7 +2763,7 @@ static int coda_release(struct file *file)
clk_disable_unprepare(dev->clk_ahb);
clk_disable_unprepare(dev->clk_per);
pm_runtime_put_sync(dev->dev);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
ida_free(&dev->ida, ctx->idx);
if (ctx->ops->release)
diff --git a/drivers/media/platform/chips-media/wave5/wave5-helper.c b/drivers/media/platform/chips-media/wave5/wave5-helper.c
index 2c9d8cbca6e4..f03ad9c0de22 100644
--- a/drivers/media/platform/chips-media/wave5/wave5-helper.c
+++ b/drivers/media/platform/chips-media/wave5/wave5-helper.c
@@ -27,7 +27,7 @@ const char *state_to_str(enum vpu_instance_state state)
}
}
-void wave5_cleanup_instance(struct vpu_instance *inst)
+void wave5_cleanup_instance(struct vpu_instance *inst, struct file *filp)
{
int i;
@@ -46,7 +46,7 @@ void wave5_cleanup_instance(struct vpu_instance *inst)
wave5_vdi_free_dma_memory(inst->dev, &inst->bitstream_vbuf);
v4l2_ctrl_handler_free(&inst->v4l2_ctrl_hdl);
if (inst->v4l2_fh.vdev) {
- v4l2_fh_del(&inst->v4l2_fh);
+ v4l2_fh_del(&inst->v4l2_fh, filp);
v4l2_fh_exit(&inst->v4l2_fh);
}
list_del_init(&inst->list);
@@ -59,7 +59,7 @@ int wave5_vpu_release_device(struct file *filp,
int (*close_func)(struct vpu_instance *inst, u32 *fail_res),
char *name)
{
- struct vpu_instance *inst = wave5_to_vpu_inst(filp->private_data);
+ struct vpu_instance *inst = file_to_vpu_inst(filp);
int ret = 0;
v4l2_m2m_ctx_release(inst->v4l2_fh.m2m_ctx);
@@ -78,7 +78,7 @@ int wave5_vpu_release_device(struct file *filp,
}
}
- wave5_cleanup_instance(inst);
+ wave5_cleanup_instance(inst, filp);
return ret;
}
@@ -142,7 +142,7 @@ int wave5_vpu_subscribe_event(struct v4l2_fh *fh, const struct v4l2_event_subscr
int wave5_vpu_g_fmt_out(struct file *file, void *fh, struct v4l2_format *f)
{
- struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ struct vpu_instance *inst = file_to_vpu_inst(file);
int i;
f->fmt.pix_mp.width = inst->src_fmt.width;
diff --git a/drivers/media/platform/chips-media/wave5/wave5-helper.h b/drivers/media/platform/chips-media/wave5/wave5-helper.h
index 9937fce553fc..976a402e426f 100644
--- a/drivers/media/platform/chips-media/wave5/wave5-helper.h
+++ b/drivers/media/platform/chips-media/wave5/wave5-helper.h
@@ -14,7 +14,7 @@
#define MAX_FMTS 12
const char *state_to_str(enum vpu_instance_state state);
-void wave5_cleanup_instance(struct vpu_instance *inst);
+void wave5_cleanup_instance(struct vpu_instance *inst, struct file *filp);
int wave5_vpu_release_device(struct file *filp,
int (*close_func)(struct vpu_instance *inst, u32 *fail_res),
char *name);
diff --git a/drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c b/drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c
index fd71f0c43ac3..e3038c18ca36 100644
--- a/drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c
+++ b/drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c
@@ -451,7 +451,6 @@ static void wave5_vpu_dec_finish_decode(struct vpu_instance *inst)
if (q_status.report_queue_count == 0 &&
(q_status.instance_queue_count == 0 || dec_info.sequence_changed)) {
dev_dbg(inst->dev->dev, "%s: finishing job.\n", __func__);
- pm_runtime_mark_last_busy(inst->dev->dev);
pm_runtime_put_autosuspend(inst->dev->dev);
v4l2_m2m_job_finish(inst->v4l2_m2m_dev, m2m_ctx);
}
@@ -506,7 +505,7 @@ static int wave5_vpu_dec_enum_fmt_cap(struct file *file, void *fh, struct v4l2_f
static int wave5_vpu_dec_try_fmt_cap(struct file *file, void *fh, struct v4l2_format *f)
{
- struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ struct vpu_instance *inst = file_to_vpu_inst(file);
struct dec_info *p_dec_info = &inst->codec_info->dec_info;
const struct v4l2_frmsize_stepwise *frmsize;
const struct vpu_format *vpu_fmt;
@@ -547,7 +546,7 @@ static int wave5_vpu_dec_try_fmt_cap(struct file *file, void *fh, struct v4l2_fo
static int wave5_vpu_dec_s_fmt_cap(struct file *file, void *fh, struct v4l2_format *f)
{
- struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ struct vpu_instance *inst = file_to_vpu_inst(file);
int i, ret;
dev_dbg(inst->dev->dev,
@@ -606,7 +605,7 @@ static int wave5_vpu_dec_s_fmt_cap(struct file *file, void *fh, struct v4l2_form
static int wave5_vpu_dec_g_fmt_cap(struct file *file, void *fh, struct v4l2_format *f)
{
- struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ struct vpu_instance *inst = file_to_vpu_inst(file);
int i;
f->fmt.pix_mp.width = inst->dst_fmt.width;
@@ -630,7 +629,7 @@ static int wave5_vpu_dec_g_fmt_cap(struct file *file, void *fh, struct v4l2_form
static int wave5_vpu_dec_enum_fmt_out(struct file *file, void *fh, struct v4l2_fmtdesc *f)
{
- struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ struct vpu_instance *inst = file_to_vpu_inst(file);
const struct vpu_format *vpu_fmt;
dev_dbg(inst->dev->dev, "%s: index: %u\n", __func__, f->index);
@@ -647,7 +646,7 @@ static int wave5_vpu_dec_enum_fmt_out(struct file *file, void *fh, struct v4l2_f
static int wave5_vpu_dec_try_fmt_out(struct file *file, void *fh, struct v4l2_format *f)
{
- struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ struct vpu_instance *inst = file_to_vpu_inst(file);
const struct v4l2_frmsize_stepwise *frmsize;
const struct vpu_format *vpu_fmt;
int width, height;
@@ -678,7 +677,7 @@ static int wave5_vpu_dec_try_fmt_out(struct file *file, void *fh, struct v4l2_fo
static int wave5_vpu_dec_s_fmt_out(struct file *file, void *fh, struct v4l2_format *f)
{
- struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ struct vpu_instance *inst = file_to_vpu_inst(file);
const struct vpu_format *vpu_fmt;
int i, ret;
@@ -727,7 +726,7 @@ static int wave5_vpu_dec_s_fmt_out(struct file *file, void *fh, struct v4l2_form
static int wave5_vpu_dec_g_selection(struct file *file, void *fh, struct v4l2_selection *s)
{
- struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ struct vpu_instance *inst = file_to_vpu_inst(file);
dev_dbg(inst->dev->dev, "%s: type: %u | target: %u\n", __func__, s->type, s->target);
@@ -761,7 +760,7 @@ static int wave5_vpu_dec_g_selection(struct file *file, void *fh, struct v4l2_se
static int wave5_vpu_dec_s_selection(struct file *file, void *fh, struct v4l2_selection *s)
{
- struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ struct vpu_instance *inst = file_to_vpu_inst(file);
if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
@@ -872,7 +871,7 @@ unlock_and_return:
static int wave5_vpu_dec_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *dc)
{
- struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ struct vpu_instance *inst = file_to_vpu_inst(file);
struct v4l2_m2m_ctx *m2m_ctx = inst->v4l2_fh.m2m_ctx;
int ret;
@@ -1364,7 +1363,6 @@ static int wave5_vpu_dec_start_streaming(struct vb2_queue *q, unsigned int count
}
}
- pm_runtime_mark_last_busy(inst->dev->dev);
pm_runtime_put_autosuspend(inst->dev->dev);
return ret;
@@ -1498,7 +1496,6 @@ static void wave5_vpu_dec_stop_streaming(struct vb2_queue *q)
else
streamoff_capture(q);
- pm_runtime_mark_last_busy(inst->dev->dev);
pm_runtime_put_autosuspend(inst->dev->dev);
}
@@ -1662,7 +1659,6 @@ static void wave5_vpu_dec_device_run(void *priv)
finish_job_and_return:
dev_dbg(inst->dev->dev, "%s: leave and finish job", __func__);
- pm_runtime_mark_last_busy(inst->dev->dev);
pm_runtime_put_autosuspend(inst->dev->dev);
v4l2_m2m_job_finish(inst->v4l2_m2m_dev, m2m_ctx);
}
@@ -1761,8 +1757,7 @@ static int wave5_vpu_open_dec(struct file *filp)
return -ENOMEM;
v4l2_fh_init(&inst->v4l2_fh, vdev);
- filp->private_data = &inst->v4l2_fh;
- v4l2_fh_add(&inst->v4l2_fh);
+ v4l2_fh_add(&inst->v4l2_fh, filp);
INIT_LIST_HEAD(&inst->list);
@@ -1840,7 +1835,7 @@ static int wave5_vpu_open_dec(struct file *filp)
return 0;
cleanup_inst:
- wave5_cleanup_instance(inst);
+ wave5_cleanup_instance(inst, filp);
return ret;
}
diff --git a/drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c b/drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c
index 1e5fc5f8b856..9bfaa9fb3ceb 100644
--- a/drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c
+++ b/drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c
@@ -360,7 +360,7 @@ static int wave5_vpu_enc_enum_framesizes(struct file *f, void *fh, struct v4l2_f
static int wave5_vpu_enc_enum_fmt_cap(struct file *file, void *fh, struct v4l2_fmtdesc *f)
{
- struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ struct vpu_instance *inst = file_to_vpu_inst(file);
const struct vpu_format *vpu_fmt;
dev_dbg(inst->dev->dev, "%s: index: %u\n", __func__, f->index);
@@ -377,7 +377,7 @@ static int wave5_vpu_enc_enum_fmt_cap(struct file *file, void *fh, struct v4l2_f
static int wave5_vpu_enc_try_fmt_cap(struct file *file, void *fh, struct v4l2_format *f)
{
- struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ struct vpu_instance *inst = file_to_vpu_inst(file);
const struct v4l2_frmsize_stepwise *frmsize;
const struct vpu_format *vpu_fmt;
int width, height;
@@ -411,7 +411,7 @@ static int wave5_vpu_enc_try_fmt_cap(struct file *file, void *fh, struct v4l2_fo
static int wave5_vpu_enc_s_fmt_cap(struct file *file, void *fh, struct v4l2_format *f)
{
- struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ struct vpu_instance *inst = file_to_vpu_inst(file);
int i, ret;
dev_dbg(inst->dev->dev, "%s: fourcc: %u width: %u height: %u num_planes: %u field: %u\n",
@@ -445,7 +445,7 @@ static int wave5_vpu_enc_s_fmt_cap(struct file *file, void *fh, struct v4l2_form
static int wave5_vpu_enc_g_fmt_cap(struct file *file, void *fh, struct v4l2_format *f)
{
- struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ struct vpu_instance *inst = file_to_vpu_inst(file);
int i;
f->fmt.pix_mp.width = inst->dst_fmt.width;
@@ -469,7 +469,7 @@ static int wave5_vpu_enc_g_fmt_cap(struct file *file, void *fh, struct v4l2_form
static int wave5_vpu_enc_enum_fmt_out(struct file *file, void *fh, struct v4l2_fmtdesc *f)
{
- struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ struct vpu_instance *inst = file_to_vpu_inst(file);
const struct vpu_format *vpu_fmt;
dev_dbg(inst->dev->dev, "%s: index: %u\n", __func__, f->index);
@@ -486,7 +486,7 @@ static int wave5_vpu_enc_enum_fmt_out(struct file *file, void *fh, struct v4l2_f
static int wave5_vpu_enc_try_fmt_out(struct file *file, void *fh, struct v4l2_format *f)
{
- struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ struct vpu_instance *inst = file_to_vpu_inst(file);
const struct v4l2_frmsize_stepwise *frmsize;
const struct vpu_format *vpu_fmt;
int width, height;
@@ -515,7 +515,7 @@ static int wave5_vpu_enc_try_fmt_out(struct file *file, void *fh, struct v4l2_fo
static int wave5_vpu_enc_s_fmt_out(struct file *file, void *fh, struct v4l2_format *f)
{
- struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ struct vpu_instance *inst = file_to_vpu_inst(file);
const struct vpu_format *vpu_fmt;
const struct v4l2_format_info *info;
int i, ret;
@@ -543,7 +543,7 @@ static int wave5_vpu_enc_s_fmt_out(struct file *file, void *fh, struct v4l2_form
if (!info)
return -EINVAL;
- inst->cbcr_interleave = (info->comp_planes == 2) ? true : false;
+ inst->cbcr_interleave = info->comp_planes == 2;
switch (inst->src_fmt.pixelformat) {
case V4L2_PIX_FMT_NV21:
@@ -576,7 +576,7 @@ static int wave5_vpu_enc_s_fmt_out(struct file *file, void *fh, struct v4l2_form
static int wave5_vpu_enc_g_selection(struct file *file, void *fh, struct v4l2_selection *s)
{
- struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ struct vpu_instance *inst = file_to_vpu_inst(file);
dev_dbg(inst->dev->dev, "%s: type: %u | target: %u\n", __func__, s->type, s->target);
@@ -605,7 +605,7 @@ static int wave5_vpu_enc_g_selection(struct file *file, void *fh, struct v4l2_se
static int wave5_vpu_enc_s_selection(struct file *file, void *fh, struct v4l2_selection *s)
{
- struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ struct vpu_instance *inst = file_to_vpu_inst(file);
if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
return -EINVAL;
@@ -628,7 +628,7 @@ static int wave5_vpu_enc_s_selection(struct file *file, void *fh, struct v4l2_se
static int wave5_vpu_enc_encoder_cmd(struct file *file, void *fh, struct v4l2_encoder_cmd *ec)
{
- struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ struct vpu_instance *inst = file_to_vpu_inst(file);
struct v4l2_m2m_ctx *m2m_ctx = inst->v4l2_fh.m2m_ctx;
int ret;
@@ -661,7 +661,7 @@ static int wave5_vpu_enc_encoder_cmd(struct file *file, void *fh, struct v4l2_en
static int wave5_vpu_enc_g_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
{
- struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ struct vpu_instance *inst = file_to_vpu_inst(file);
dev_dbg(inst->dev->dev, "%s: type: %u\n", __func__, a->type);
@@ -681,7 +681,7 @@ static int wave5_vpu_enc_g_parm(struct file *file, void *fh, struct v4l2_streamp
static int wave5_vpu_enc_s_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
{
- struct vpu_instance *inst = wave5_to_vpu_inst(fh);
+ struct vpu_instance *inst = file_to_vpu_inst(file);
dev_dbg(inst->dev->dev, "%s: type: %u\n", __func__, a->type);
@@ -1391,12 +1391,10 @@ static int wave5_vpu_enc_start_streaming(struct vb2_queue *q, unsigned int count
if (ret)
goto return_buffers;
- pm_runtime_mark_last_busy(inst->dev->dev);
pm_runtime_put_autosuspend(inst->dev->dev);
return 0;
return_buffers:
wave5_return_bufs(q, VB2_BUF_STATE_QUEUED);
- pm_runtime_mark_last_busy(inst->dev->dev);
pm_runtime_put_autosuspend(inst->dev->dev);
return ret;
}
@@ -1465,7 +1463,6 @@ static void wave5_vpu_enc_stop_streaming(struct vb2_queue *q)
else
streamoff_capture(inst, q);
- pm_runtime_mark_last_busy(inst->dev->dev);
pm_runtime_put_autosuspend(inst->dev->dev);
}
@@ -1520,7 +1517,6 @@ static void wave5_vpu_enc_device_run(void *priv)
break;
}
dev_dbg(inst->dev->dev, "%s: leave with active job", __func__);
- pm_runtime_mark_last_busy(inst->dev->dev);
pm_runtime_put_autosuspend(inst->dev->dev);
return;
default:
@@ -1529,7 +1525,6 @@ static void wave5_vpu_enc_device_run(void *priv)
break;
}
dev_dbg(inst->dev->dev, "%s: leave and finish job", __func__);
- pm_runtime_mark_last_busy(inst->dev->dev);
pm_runtime_put_autosuspend(inst->dev->dev);
v4l2_m2m_job_finish(inst->v4l2_m2m_dev, m2m_ctx);
}
@@ -1587,8 +1582,7 @@ static int wave5_vpu_open_enc(struct file *filp)
return -ENOMEM;
v4l2_fh_init(&inst->v4l2_fh, vdev);
- filp->private_data = &inst->v4l2_fh;
- v4l2_fh_add(&inst->v4l2_fh);
+ v4l2_fh_add(&inst->v4l2_fh, filp);
INIT_LIST_HEAD(&inst->list);
@@ -1784,7 +1778,7 @@ static int wave5_vpu_open_enc(struct file *filp)
return 0;
cleanup_inst:
- wave5_cleanup_instance(inst);
+ wave5_cleanup_instance(inst, filp);
return ret;
}
diff --git a/drivers/media/platform/chips-media/wave5/wave5-vpu.h b/drivers/media/platform/chips-media/wave5/wave5-vpu.h
index 3847332551fc..5943bdaa9c4c 100644
--- a/drivers/media/platform/chips-media/wave5/wave5-vpu.h
+++ b/drivers/media/platform/chips-media/wave5/wave5-vpu.h
@@ -46,6 +46,11 @@ static inline struct vpu_instance *wave5_to_vpu_inst(struct v4l2_fh *vfh)
return container_of(vfh, struct vpu_instance, v4l2_fh);
}
+static inline struct vpu_instance *file_to_vpu_inst(struct file *filp)
+{
+ return wave5_to_vpu_inst(file_to_v4l2_fh(filp));
+}
+
static inline struct vpu_instance *wave5_ctrl_to_vpu_inst(struct v4l2_ctrl *vctrl)
{
return container_of(vctrl->handler, struct vpu_instance, v4l2_ctrl_hdl);
diff --git a/drivers/media/platform/imagination/e5010-jpeg-enc.c b/drivers/media/platform/imagination/e5010-jpeg-enc.c
index ae868d9f73e1..c4e0097cb8b7 100644
--- a/drivers/media/platform/imagination/e5010-jpeg-enc.c
+++ b/drivers/media/platform/imagination/e5010-jpeg-enc.c
@@ -253,7 +253,7 @@ static int e5010_enum_fmt(struct file *file, void *priv, struct v4l2_fmtdesc *f)
{
int i, index = 0;
struct e5010_fmt *fmt = NULL;
- struct e5010_context *ctx = file->private_data;
+ struct e5010_context *ctx = to_e5010_context(file);
if (!V4L2_TYPE_IS_MULTIPLANAR(f->type)) {
v4l2_err(&ctx->e5010->v4l2_dev, "ENUMFMT with Invalid type: %d\n", f->type);
@@ -279,7 +279,7 @@ static int e5010_enum_fmt(struct file *file, void *priv, struct v4l2_fmtdesc *f)
static int e5010_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
- struct e5010_context *ctx = file->private_data;
+ struct e5010_context *ctx = to_e5010_context(file);
struct e5010_q_data *queue;
int i;
struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
@@ -380,14 +380,14 @@ static int e5010_jpeg_try_fmt(struct v4l2_format *f, struct e5010_context *ctx)
static int e5010_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
- struct e5010_context *ctx = file->private_data;
+ struct e5010_context *ctx = to_e5010_context(file);
return e5010_jpeg_try_fmt(f, ctx);
}
static int e5010_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
- struct e5010_context *ctx = file->private_data;
+ struct e5010_context *ctx = to_e5010_context(file);
struct vb2_queue *vq;
int ret = 0, i = 0;
struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
@@ -462,7 +462,7 @@ static int e5010_enum_framesizes(struct file *file, void *priv, struct v4l2_frms
static int e5010_g_selection(struct file *file, void *fh, struct v4l2_selection *s)
{
- struct e5010_context *ctx = file->private_data;
+ struct e5010_context *ctx = to_e5010_context(file);
struct e5010_q_data *queue;
if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
@@ -490,7 +490,7 @@ static int e5010_g_selection(struct file *file, void *fh, struct v4l2_selection
static int e5010_s_selection(struct file *file, void *fh, struct v4l2_selection *s)
{
- struct e5010_context *ctx = file->private_data;
+ struct e5010_context *ctx = to_e5010_context(file);
struct e5010_q_data *queue;
struct vb2_queue *vq;
struct v4l2_rect base_rect;
@@ -742,8 +742,7 @@ static int e5010_open(struct file *file)
}
v4l2_fh_init(&ctx->fh, vdev);
- file->private_data = ctx;
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
ctx->e5010 = e5010;
ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(e5010->m2m_dev, ctx, queue_init);
@@ -770,7 +769,7 @@ static int e5010_open(struct file *file)
err_ctrls_setup:
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
exit:
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
mutex_unlock(&e5010->mutex);
free:
@@ -781,13 +780,13 @@ free:
static int e5010_release(struct file *file)
{
struct e5010_dev *e5010 = video_drvdata(file);
- struct e5010_context *ctx = file->private_data;
+ struct e5010_context *ctx = to_e5010_context(file);
dprintk(e5010, 1, "Releasing instance: 0x%p, m2m_ctx: 0x%p\n", ctx, ctx->fh.m2m_ctx);
mutex_lock(&e5010->mutex);
v4l2_ctrl_handler_free(&ctx->ctrl_handler);
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
kfree(ctx);
mutex_unlock(&e5010->mutex);
@@ -1262,7 +1261,7 @@ static void e5010_buf_queue(struct vb2_buffer *vb)
static int e5010_encoder_cmd(struct file *file, void *priv,
struct v4l2_encoder_cmd *cmd)
{
- struct e5010_context *ctx = file->private_data;
+ struct e5010_context *ctx = to_e5010_context(file);
int ret;
struct vb2_queue *cap_vq;
diff --git a/drivers/media/platform/imagination/e5010-jpeg-enc.h b/drivers/media/platform/imagination/e5010-jpeg-enc.h
index 71f49ead6898..da57bc1baa46 100644
--- a/drivers/media/platform/imagination/e5010-jpeg-enc.h
+++ b/drivers/media/platform/imagination/e5010-jpeg-enc.h
@@ -120,6 +120,11 @@ struct e5010_context {
u8 chroma_qp[QP_TABLE_SIZE];
};
+static inline struct e5010_context *to_e5010_context(struct file *filp)
+{
+ return container_of(file_to_v4l2_fh(filp), struct e5010_context, fh);
+}
+
/*
* Buffer structure
* Contains info for all buffers
diff --git a/drivers/media/platform/m2m-deinterlace.c b/drivers/media/platform/m2m-deinterlace.c
index 5188f3189096..e07e57d4206b 100644
--- a/drivers/media/platform/m2m-deinterlace.c
+++ b/drivers/media/platform/m2m-deinterlace.c
@@ -142,6 +142,11 @@ struct deinterlace_ctx {
struct dma_interleaved_template *xt;
};
+static inline struct deinterlace_ctx *file_to_ctx(struct file *filp)
+{
+ return container_of(file_to_v4l2_fh(filp), struct deinterlace_ctx, fh);
+}
+
/*
* mem2mem callbacks
*/
@@ -512,13 +517,13 @@ static int vidioc_g_fmt(struct deinterlace_ctx *ctx, struct v4l2_format *f)
static int vidioc_g_fmt_vid_out(struct file *file, void *priv,
struct v4l2_format *f)
{
- return vidioc_g_fmt(priv, f);
+ return vidioc_g_fmt(file_to_ctx(file), f);
}
static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- return vidioc_g_fmt(priv, f);
+ return vidioc_g_fmt(file_to_ctx(file), f);
}
static int vidioc_try_fmt(struct v4l2_format *f, struct deinterlace_fmt *fmt)
@@ -539,8 +544,8 @@ static int vidioc_try_fmt(struct v4l2_format *f, struct deinterlace_fmt *fmt)
static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
+ struct deinterlace_ctx *ctx = file_to_ctx(file);
struct deinterlace_fmt *fmt;
- struct deinterlace_ctx *ctx = priv;
fmt = find_format(f);
if (!fmt || !(fmt->types & MEM2MEM_CAPTURE))
@@ -633,20 +638,20 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
ret = vidioc_try_fmt_vid_cap(file, priv, f);
if (ret)
return ret;
- return vidioc_s_fmt(priv, f);
+ return vidioc_s_fmt(file_to_ctx(file), f);
}
static int vidioc_s_fmt_vid_out(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct deinterlace_ctx *ctx = priv;
+ struct deinterlace_ctx *ctx = file_to_ctx(file);
int ret;
ret = vidioc_try_fmt_vid_out(file, priv, f);
if (ret)
return ret;
- ret = vidioc_s_fmt(priv, f);
+ ret = vidioc_s_fmt(ctx, f);
if (!ret)
ctx->colorspace = f->fmt.pix.colorspace;
@@ -656,8 +661,8 @@ static int vidioc_s_fmt_vid_out(struct file *file, void *priv,
static int vidioc_streamon(struct file *file, void *priv,
enum v4l2_buf_type type)
{
+ struct deinterlace_ctx *ctx = file_to_ctx(file);
struct deinterlace_q_data *s_q_data, *d_q_data;
- struct deinterlace_ctx *ctx = priv;
s_q_data = get_q_data(V4L2_BUF_TYPE_VIDEO_OUTPUT);
d_q_data = get_q_data(V4L2_BUF_TYPE_VIDEO_CAPTURE);
@@ -842,7 +847,6 @@ static int deinterlace_open(struct file *file)
return -ENOMEM;
v4l2_fh_init(&ctx->fh, video_devdata(file));
- file->private_data = &ctx->fh;
ctx->dev = pcdev;
ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(pcdev->m2m_dev, ctx, &queue_init);
@@ -861,7 +865,7 @@ static int deinterlace_open(struct file *file)
}
ctx->colorspace = V4L2_COLORSPACE_REC709;
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
dprintk(pcdev, "Created instance %p, m2m_ctx: %p\n",
ctx, ctx->fh.m2m_ctx);
@@ -872,11 +876,11 @@ static int deinterlace_open(struct file *file)
static int deinterlace_release(struct file *file)
{
struct deinterlace_dev *pcdev = video_drvdata(file);
- struct deinterlace_ctx *ctx = file->private_data;
+ struct deinterlace_ctx *ctx = file_to_ctx(file);
dprintk(pcdev, "Releasing instance %p\n", ctx);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
kfree(ctx->xt);
diff --git a/drivers/media/platform/marvell/cafe-driver.c b/drivers/media/platform/marvell/cafe-driver.c
index ef810249def6..f9796de92aa7 100644
--- a/drivers/media/platform/marvell/cafe-driver.c
+++ b/drivers/media/platform/marvell/cafe-driver.c
@@ -14,7 +14,7 @@
* Written by Jonathan Corbet, corbet@lwn.net.
*
* v4l2_device/v4l2_subdev conversion by:
- * Copyright (C) 2009 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2009 Hans Verkuil <hverkuil@kernel.org>
*/
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
index 7eb12449b63a..6268d651bdcf 100644
--- a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
+++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
@@ -119,9 +119,9 @@ static inline struct mtk_jpeg_ctx *ctrl_to_ctx(struct v4l2_ctrl *ctrl)
return container_of(ctrl->handler, struct mtk_jpeg_ctx, ctrl_hdl);
}
-static inline struct mtk_jpeg_ctx *mtk_jpeg_fh_to_ctx(struct v4l2_fh *fh)
+static inline struct mtk_jpeg_ctx *mtk_jpeg_file_to_ctx(struct file *filp)
{
- return container_of(fh, struct mtk_jpeg_ctx, fh);
+ return container_of(file_to_v4l2_fh(filp), struct mtk_jpeg_ctx, fh);
}
static inline struct mtk_jpeg_src_buf *mtk_jpeg_vb2_to_srcbuf(
@@ -212,7 +212,7 @@ static int mtk_jpeg_enum_fmt(struct mtk_jpeg_fmt *mtk_jpeg_formats, int n,
static int mtk_jpeg_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- struct mtk_jpeg_ctx *ctx = mtk_jpeg_fh_to_ctx(priv);
+ struct mtk_jpeg_ctx *ctx = mtk_jpeg_file_to_ctx(file);
struct mtk_jpeg_dev *jpeg = ctx->jpeg;
return mtk_jpeg_enum_fmt(jpeg->variant->formats,
@@ -223,7 +223,7 @@ static int mtk_jpeg_enum_fmt_vid_cap(struct file *file, void *priv,
static int mtk_jpeg_enum_fmt_vid_out(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- struct mtk_jpeg_ctx *ctx = mtk_jpeg_fh_to_ctx(priv);
+ struct mtk_jpeg_ctx *ctx = mtk_jpeg_file_to_ctx(file);
struct mtk_jpeg_dev *jpeg = ctx->jpeg;
return mtk_jpeg_enum_fmt(jpeg->variant->formats,
@@ -305,7 +305,7 @@ static int mtk_jpeg_g_fmt_vid_mplane(struct file *file, void *priv,
struct vb2_queue *vq;
struct mtk_jpeg_q_data *q_data = NULL;
struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
- struct mtk_jpeg_ctx *ctx = mtk_jpeg_fh_to_ctx(priv);
+ struct mtk_jpeg_ctx *ctx = mtk_jpeg_file_to_ctx(file);
struct mtk_jpeg_dev *jpeg = ctx->jpeg;
int i;
@@ -351,7 +351,7 @@ static int mtk_jpeg_g_fmt_vid_mplane(struct file *file, void *priv,
static int mtk_jpeg_try_fmt_vid_cap_mplane(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct mtk_jpeg_ctx *ctx = mtk_jpeg_fh_to_ctx(priv);
+ struct mtk_jpeg_ctx *ctx = mtk_jpeg_file_to_ctx(file);
struct mtk_jpeg_dev *jpeg = ctx->jpeg;
struct mtk_jpeg_fmt *fmt;
@@ -380,7 +380,7 @@ static int mtk_jpeg_try_fmt_vid_cap_mplane(struct file *file, void *priv,
static int mtk_jpeg_try_fmt_vid_out_mplane(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct mtk_jpeg_ctx *ctx = mtk_jpeg_fh_to_ctx(priv);
+ struct mtk_jpeg_ctx *ctx = mtk_jpeg_file_to_ctx(file);
struct mtk_jpeg_dev *jpeg = ctx->jpeg;
struct mtk_jpeg_fmt *fmt;
@@ -470,7 +470,7 @@ static int mtk_jpeg_s_fmt_vid_out_mplane(struct file *file, void *priv,
if (ret)
return ret;
- return mtk_jpeg_s_fmt_mplane(mtk_jpeg_fh_to_ctx(priv), f,
+ return mtk_jpeg_s_fmt_mplane(mtk_jpeg_file_to_ctx(file), f,
MTK_JPEG_FMT_FLAG_OUTPUT);
}
@@ -483,7 +483,7 @@ static int mtk_jpeg_s_fmt_vid_cap_mplane(struct file *file, void *priv,
if (ret)
return ret;
- return mtk_jpeg_s_fmt_mplane(mtk_jpeg_fh_to_ctx(priv), f,
+ return mtk_jpeg_s_fmt_mplane(mtk_jpeg_file_to_ctx(file), f,
MTK_JPEG_FMT_FLAG_CAPTURE);
}
@@ -512,7 +512,7 @@ static int mtk_jpeg_subscribe_event(struct v4l2_fh *fh,
static int mtk_jpeg_enc_g_selection(struct file *file, void *priv,
struct v4l2_selection *s)
{
- struct mtk_jpeg_ctx *ctx = mtk_jpeg_fh_to_ctx(priv);
+ struct mtk_jpeg_ctx *ctx = mtk_jpeg_file_to_ctx(file);
if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
return -EINVAL;
@@ -537,7 +537,7 @@ static int mtk_jpeg_enc_g_selection(struct file *file, void *priv,
static int mtk_jpeg_dec_g_selection(struct file *file, void *priv,
struct v4l2_selection *s)
{
- struct mtk_jpeg_ctx *ctx = mtk_jpeg_fh_to_ctx(priv);
+ struct mtk_jpeg_ctx *ctx = mtk_jpeg_file_to_ctx(file);
if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
@@ -566,7 +566,7 @@ static int mtk_jpeg_dec_g_selection(struct file *file, void *priv,
static int mtk_jpeg_enc_s_selection(struct file *file, void *priv,
struct v4l2_selection *s)
{
- struct mtk_jpeg_ctx *ctx = mtk_jpeg_fh_to_ctx(priv);
+ struct mtk_jpeg_ctx *ctx = mtk_jpeg_file_to_ctx(file);
if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
return -EINVAL;
@@ -588,8 +588,8 @@ static int mtk_jpeg_enc_s_selection(struct file *file, void *priv,
static int mtk_jpeg_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
{
- struct v4l2_fh *fh = file->private_data;
- struct mtk_jpeg_ctx *ctx = mtk_jpeg_fh_to_ctx(priv);
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
+ struct mtk_jpeg_ctx *ctx = mtk_jpeg_file_to_ctx(file);
struct vb2_queue *vq;
struct vb2_buffer *vb;
struct mtk_jpeg_src_buf *jpeg_src_buf;
@@ -1171,8 +1171,7 @@ static int mtk_jpeg_open(struct file *file)
INIT_LIST_HEAD(&ctx->dst_done_queue);
spin_lock_init(&ctx->done_queue_lock);
v4l2_fh_init(&ctx->fh, vfd);
- file->private_data = &ctx->fh;
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
ctx->jpeg = jpeg;
ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(jpeg->m2m_dev, ctx,
@@ -1197,7 +1196,7 @@ static int mtk_jpeg_open(struct file *file)
return 0;
error:
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
mutex_unlock(&jpeg->lock);
free:
@@ -1208,12 +1207,12 @@ free:
static int mtk_jpeg_release(struct file *file)
{
struct mtk_jpeg_dev *jpeg = video_drvdata(file);
- struct mtk_jpeg_ctx *ctx = mtk_jpeg_fh_to_ctx(file->private_data);
+ struct mtk_jpeg_ctx *ctx = mtk_jpeg_file_to_ctx(file);
mutex_lock(&jpeg->lock);
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
kfree(ctx);
mutex_unlock(&jpeg->lock);
diff --git a/drivers/media/platform/mediatek/mdp/mtk_mdp_m2m.c b/drivers/media/platform/mediatek/mdp/mtk_mdp_m2m.c
index d0fd77dcf8e2..03c07948dfdd 100644
--- a/drivers/media/platform/mediatek/mdp/mtk_mdp_m2m.c
+++ b/drivers/media/platform/mediatek/mdp/mtk_mdp_m2m.c
@@ -348,9 +348,9 @@ static int mtk_mdp_try_crop(struct mtk_mdp_ctx *ctx, u32 type,
return 0;
}
-static inline struct mtk_mdp_ctx *fh_to_ctx(struct v4l2_fh *fh)
+static inline struct mtk_mdp_ctx *file_to_ctx(struct file *filp)
{
- return container_of(fh, struct mtk_mdp_ctx, fh);
+ return container_of(file_to_v4l2_fh(filp), struct mtk_mdp_ctx, fh);
}
static inline struct mtk_mdp_ctx *ctrl_to_ctx(struct v4l2_ctrl *ctrl)
@@ -589,7 +589,7 @@ static const struct vb2_ops mtk_mdp_m2m_qops = {
static int mtk_mdp_m2m_querycap(struct file *file, void *fh,
struct v4l2_capability *cap)
{
- struct mtk_mdp_ctx *ctx = fh_to_ctx(fh);
+ struct mtk_mdp_ctx *ctx = file_to_ctx(file);
struct mtk_mdp_dev *mdp = ctx->mdp_dev;
strscpy(cap->driver, MTK_MDP_MODULE_NAME, sizeof(cap->driver));
@@ -627,7 +627,7 @@ static int mtk_mdp_m2m_enum_fmt_vid_out(struct file *file, void *priv,
static int mtk_mdp_m2m_g_fmt_mplane(struct file *file, void *fh,
struct v4l2_format *f)
{
- struct mtk_mdp_ctx *ctx = fh_to_ctx(fh);
+ struct mtk_mdp_ctx *ctx = file_to_ctx(file);
struct mtk_mdp_frame *frame;
struct v4l2_pix_format_mplane *pix_mp;
int i;
@@ -666,7 +666,7 @@ static int mtk_mdp_m2m_g_fmt_mplane(struct file *file, void *fh,
static int mtk_mdp_m2m_try_fmt_mplane(struct file *file, void *fh,
struct v4l2_format *f)
{
- struct mtk_mdp_ctx *ctx = fh_to_ctx(fh);
+ struct mtk_mdp_ctx *ctx = file_to_ctx(file);
if (!mtk_mdp_try_fmt_mplane(ctx, f))
return -EINVAL;
@@ -676,7 +676,7 @@ static int mtk_mdp_m2m_try_fmt_mplane(struct file *file, void *fh,
static int mtk_mdp_m2m_s_fmt_mplane(struct file *file, void *fh,
struct v4l2_format *f)
{
- struct mtk_mdp_ctx *ctx = fh_to_ctx(fh);
+ struct mtk_mdp_ctx *ctx = file_to_ctx(file);
struct vb2_queue *vq;
struct mtk_mdp_frame *frame;
struct v4l2_pix_format_mplane *pix_mp;
@@ -722,7 +722,7 @@ static int mtk_mdp_m2m_s_fmt_mplane(struct file *file, void *fh,
static int mtk_mdp_m2m_reqbufs(struct file *file, void *fh,
struct v4l2_requestbuffers *reqbufs)
{
- struct mtk_mdp_ctx *ctx = fh_to_ctx(fh);
+ struct mtk_mdp_ctx *ctx = file_to_ctx(file);
return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
}
@@ -730,7 +730,7 @@ static int mtk_mdp_m2m_reqbufs(struct file *file, void *fh,
static int mtk_mdp_m2m_streamon(struct file *file, void *fh,
enum v4l2_buf_type type)
{
- struct mtk_mdp_ctx *ctx = fh_to_ctx(fh);
+ struct mtk_mdp_ctx *ctx = file_to_ctx(file);
int ret;
if (!mtk_mdp_ctx_state_is_set(ctx, MTK_MDP_VPU_INIT)) {
@@ -768,8 +768,8 @@ static inline bool mtk_mdp_is_target_crop(u32 target)
static int mtk_mdp_m2m_g_selection(struct file *file, void *fh,
struct v4l2_selection *s)
{
+ struct mtk_mdp_ctx *ctx = file_to_ctx(file);
struct mtk_mdp_frame *frame;
- struct mtk_mdp_ctx *ctx = fh_to_ctx(fh);
bool valid = false;
if (s->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
@@ -835,8 +835,8 @@ static int mtk_mdp_check_scaler_ratio(struct mtk_mdp_variant *var, int src_w,
static int mtk_mdp_m2m_s_selection(struct file *file, void *fh,
struct v4l2_selection *s)
{
+ struct mtk_mdp_ctx *ctx = file_to_ctx(file);
struct mtk_mdp_frame *frame;
- struct mtk_mdp_ctx *ctx = fh_to_ctx(fh);
struct v4l2_rect new_r;
struct mtk_mdp_variant *variant = ctx->mdp_dev->variant;
int ret;
@@ -1065,14 +1065,13 @@ static int mtk_mdp_m2m_open(struct file *file)
mutex_init(&ctx->slock);
ctx->id = mdp->id_counter++;
v4l2_fh_init(&ctx->fh, vfd);
- file->private_data = &ctx->fh;
ret = mtk_mdp_ctrls_create(ctx);
if (ret)
goto error_ctrls;
/* Use separate control handler per file handle */
ctx->fh.ctrl_handler = &ctx->ctrl_handler;
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
INIT_LIST_HEAD(&ctx->list);
ctx->mdp_dev = mdp;
@@ -1126,7 +1125,7 @@ err_load_vpu:
error_m2m_ctx:
v4l2_ctrl_handler_free(&ctx->ctrl_handler);
error_ctrls:
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
mutex_unlock(&mdp->lock);
err_lock:
@@ -1137,14 +1136,14 @@ err_lock:
static int mtk_mdp_m2m_release(struct file *file)
{
- struct mtk_mdp_ctx *ctx = fh_to_ctx(file->private_data);
+ struct mtk_mdp_ctx *ctx = file_to_ctx(file);
struct mtk_mdp_dev *mdp = ctx->mdp_dev;
flush_workqueue(mdp->job_wq);
mutex_lock(&mdp->lock);
v4l2_m2m_ctx_release(ctx->m2m_ctx);
v4l2_ctrl_handler_free(&ctx->ctrl_handler);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
mtk_mdp_vpu_deinit(&ctx->vpu);
mdp->ctx_num--;
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
index 683c066ed975..7fcb2fbdd64e 100644
--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
@@ -1530,6 +1530,9 @@ static const struct of_device_id mdp_comp_dt_ids[] __maybe_unused = {
}, {
.compatible = "mediatek,mt8195-mdp3-tcc",
.data = (void *)MDP_COMP_TYPE_TCC,
+ }, {
+ .compatible = "mediatek,mt8188-mdp3-rdma",
+ .data = (void *)MDP_COMP_TYPE_RDMA,
},
{}
};
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c
index 8de2c8e4d333..6559d72d5d42 100644
--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c
@@ -282,7 +282,7 @@ static int mdp_probe(struct platform_device *pdev)
}
mdp->rproc_handle = scp_get_rproc(mdp->scp);
- dev_dbg(&pdev->dev, "MDP rproc_handle: %pK", mdp->rproc_handle);
+ dev_dbg(&pdev->dev, "MDP rproc_handle: %p", mdp->rproc_handle);
mutex_init(&mdp->vpu_lock);
mutex_init(&mdp->m2m_lock);
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c
index 59ce5cce0698..9ef956b565a7 100644
--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c
@@ -10,9 +10,9 @@
#include <media/videobuf2-dma-contig.h>
#include "mtk-mdp3-m2m.h"
-static inline struct mdp_m2m_ctx *fh_to_ctx(struct v4l2_fh *fh)
+static inline struct mdp_m2m_ctx *file_to_ctx(struct file *filp)
{
- return container_of(fh, struct mdp_m2m_ctx, fh);
+ return container_of(file_to_v4l2_fh(filp), struct mdp_m2m_ctx, fh);
}
static inline struct mdp_m2m_ctx *ctrl_to_ctx(struct v4l2_ctrl *ctrl)
@@ -285,7 +285,7 @@ static int mdp_m2m_querycap(struct file *file, void *fh,
static int mdp_m2m_enum_fmt_mplane(struct file *file, void *fh,
struct v4l2_fmtdesc *f)
{
- struct mdp_m2m_ctx *ctx = fh_to_ctx(fh);
+ struct mdp_m2m_ctx *ctx = file_to_ctx(file);
return mdp_enum_fmt_mplane(ctx->mdp_dev, f);
}
@@ -293,7 +293,7 @@ static int mdp_m2m_enum_fmt_mplane(struct file *file, void *fh,
static int mdp_m2m_g_fmt_mplane(struct file *file, void *fh,
struct v4l2_format *f)
{
- struct mdp_m2m_ctx *ctx = fh_to_ctx(fh);
+ struct mdp_m2m_ctx *ctx = file_to_ctx(file);
struct mdp_frame *frame;
struct v4l2_pix_format_mplane *pix_mp;
@@ -311,7 +311,7 @@ static int mdp_m2m_g_fmt_mplane(struct file *file, void *fh,
static int mdp_m2m_s_fmt_mplane(struct file *file, void *fh,
struct v4l2_format *f)
{
- struct mdp_m2m_ctx *ctx = fh_to_ctx(fh);
+ struct mdp_m2m_ctx *ctx = file_to_ctx(file);
struct mdp_frame *frame = ctx_get_frame(ctx, f->type);
struct mdp_frame *capture;
const struct mdp_format *fmt;
@@ -354,7 +354,7 @@ static int mdp_m2m_s_fmt_mplane(struct file *file, void *fh,
static int mdp_m2m_try_fmt_mplane(struct file *file, void *fh,
struct v4l2_format *f)
{
- struct mdp_m2m_ctx *ctx = fh_to_ctx(fh);
+ struct mdp_m2m_ctx *ctx = file_to_ctx(file);
if (!mdp_try_fmt_mplane(ctx->mdp_dev, f, &ctx->curr_param, ctx->id))
return -EINVAL;
@@ -365,7 +365,7 @@ static int mdp_m2m_try_fmt_mplane(struct file *file, void *fh,
static int mdp_m2m_g_selection(struct file *file, void *fh,
struct v4l2_selection *s)
{
- struct mdp_m2m_ctx *ctx = fh_to_ctx(fh);
+ struct mdp_m2m_ctx *ctx = file_to_ctx(file);
struct mdp_frame *frame;
bool valid = false;
@@ -417,7 +417,7 @@ static int mdp_m2m_g_selection(struct file *file, void *fh,
static int mdp_m2m_s_selection(struct file *file, void *fh,
struct v4l2_selection *s)
{
- struct mdp_m2m_ctx *ctx = fh_to_ctx(fh);
+ struct mdp_m2m_ctx *ctx = file_to_ctx(file);
struct mdp_frame *frame = ctx_get_frame(ctx, s->type);
struct mdp_frame *capture;
struct v4l2_rect r;
@@ -585,14 +585,13 @@ static int mdp_m2m_open(struct file *file)
ctx->mdp_dev = mdp;
v4l2_fh_init(&ctx->fh, vdev);
- file->private_data = &ctx->fh;
ret = mdp_m2m_ctrls_create(ctx);
if (ret)
goto err_exit_fh;
/* Use separate control handler per file handle */
ctx->fh.ctrl_handler = &ctx->ctrl_handler;
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
mutex_init(&ctx->ctx_lock);
ctx->m2m_ctx = v4l2_m2m_ctx_init(mdp->m2m_dev, ctx, mdp_m2m_queue_init);
@@ -629,7 +628,7 @@ err_release_m2m_ctx:
v4l2_m2m_ctx_release(ctx->m2m_ctx);
err_release_handler:
v4l2_ctrl_handler_free(&ctx->ctrl_handler);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
err_exit_fh:
v4l2_fh_exit(&ctx->fh);
ida_free(&mdp->mdp_ida, ctx->id);
@@ -643,7 +642,7 @@ err_free_ctx:
static int mdp_m2m_release(struct file *file)
{
- struct mdp_m2m_ctx *ctx = fh_to_ctx(file->private_data);
+ struct mdp_m2m_ctx *ctx = file_to_ctx(file);
struct mdp_dev *mdp = video_drvdata(file);
struct device *dev = &mdp->pdev->dev;
@@ -653,7 +652,7 @@ static int mdp_m2m_release(struct file *file)
mdp_vpu_put_locked(mdp);
v4l2_ctrl_handler_free(&ctx->ctrl_handler);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
ida_free(&mdp->mdp_ida, ctx->id);
mutex_unlock(&mdp->m2m_lock);
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-vpu.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-vpu.c
index da3a892ad867..fae3e1ad2df7 100644
--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-vpu.c
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-vpu.c
@@ -221,7 +221,7 @@ int mdp_vpu_dev_init(struct mdp_vpu_dev *vpu, struct mtk_scp *scp,
}
dev_dbg(&mdp->pdev->dev,
- "VPU param:%pK pa:%pad sz:%zx, work:%pK pa:%pad sz:%zx, config:%pK pa:%pad sz:%zx",
+ "VPU param:%p pa:%pad sz:%zx, work:%p pa:%pad sz:%zx, config:%p pa:%pad sz:%zx",
vpu->param, &vpu->param_addr, vpu->param_size,
vpu->work, &vpu->work_addr, vpu->work_size,
vpu->config, &vpu->config_addr, vpu->config_size);
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec.c b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec.c
index 98838217b97d..d691bd533103 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec.c
@@ -87,7 +87,7 @@ static int stateful_try_decoder_cmd(struct file *file, void *priv, struct v4l2_d
static int stateful_decoder_cmd(struct file *file, void *priv, struct v4l2_decoder_cmd *cmd)
{
- struct mtk_vcodec_dec_ctx *ctx = fh_to_dec_ctx(priv);
+ struct mtk_vcodec_dec_ctx *ctx = file_to_dec_ctx(file);
struct vb2_queue *src_vq, *dst_vq;
int ret;
@@ -132,7 +132,7 @@ static int stateless_try_decoder_cmd(struct file *file, void *priv, struct v4l2_
static int stateless_decoder_cmd(struct file *file, void *priv, struct v4l2_decoder_cmd *cmd)
{
- struct mtk_vcodec_dec_ctx *ctx = fh_to_dec_ctx(priv);
+ struct mtk_vcodec_dec_ctx *ctx = file_to_dec_ctx(file);
int ret;
ret = v4l2_m2m_ioctl_stateless_try_decoder_cmd(file, priv, cmd);
@@ -158,7 +158,7 @@ static int stateless_decoder_cmd(struct file *file, void *priv, struct v4l2_deco
static int vidioc_try_decoder_cmd(struct file *file, void *priv, struct v4l2_decoder_cmd *cmd)
{
- struct mtk_vcodec_dec_ctx *ctx = fh_to_dec_ctx(priv);
+ struct mtk_vcodec_dec_ctx *ctx = file_to_dec_ctx(file);
if (ctx->dev->vdec_pdata->uses_stateless_api)
return stateless_try_decoder_cmd(file, priv, cmd);
@@ -168,7 +168,7 @@ static int vidioc_try_decoder_cmd(struct file *file, void *priv, struct v4l2_dec
static int vidioc_decoder_cmd(struct file *file, void *priv, struct v4l2_decoder_cmd *cmd)
{
- struct mtk_vcodec_dec_ctx *ctx = fh_to_dec_ctx(priv);
+ struct mtk_vcodec_dec_ctx *ctx = file_to_dec_ctx(file);
if (ctx->dev->vdec_pdata->uses_stateless_api)
return stateless_decoder_cmd(file, priv, cmd);
@@ -233,7 +233,7 @@ void mtk_vcodec_dec_set_default_params(struct mtk_vcodec_dec_ctx *ctx)
static int vidioc_vdec_qbuf(struct file *file, void *priv,
struct v4l2_buffer *buf)
{
- struct mtk_vcodec_dec_ctx *ctx = fh_to_dec_ctx(priv);
+ struct mtk_vcodec_dec_ctx *ctx = file_to_dec_ctx(file);
if (ctx->state == MTK_STATE_ABORT) {
mtk_v4l2_vdec_err(ctx, "[%d] Call on QBUF after unrecoverable error", ctx->id);
@@ -246,7 +246,7 @@ static int vidioc_vdec_qbuf(struct file *file, void *priv,
static int vidioc_vdec_dqbuf(struct file *file, void *priv,
struct v4l2_buffer *buf)
{
- struct mtk_vcodec_dec_ctx *ctx = fh_to_dec_ctx(priv);
+ struct mtk_vcodec_dec_ctx *ctx = file_to_dec_ctx(file);
if (ctx->state == MTK_STATE_ABORT) {
mtk_v4l2_vdec_err(ctx, "[%d] Call on DQBUF after unrecoverable error", ctx->id);
@@ -259,7 +259,7 @@ static int vidioc_vdec_dqbuf(struct file *file, void *priv,
static int vidioc_vdec_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- struct mtk_vcodec_dec_ctx *ctx = fh_to_dec_ctx(priv);
+ struct mtk_vcodec_dec_ctx *ctx = file_to_dec_ctx(file);
struct device *dev = &ctx->dev->plat_dev->dev;
strscpy(cap->driver, dev->driver->name, sizeof(cap->driver));
@@ -354,7 +354,7 @@ static int vidioc_try_fmt_vid_cap_mplane(struct file *file, void *priv,
struct v4l2_format *f)
{
const struct mtk_video_fmt *fmt;
- struct mtk_vcodec_dec_ctx *ctx = fh_to_dec_ctx(priv);
+ struct mtk_vcodec_dec_ctx *ctx = file_to_dec_ctx(file);
const struct mtk_vcodec_dec_pdata *dec_pdata = ctx->dev->vdec_pdata;
fmt = mtk_vdec_find_format(f, dec_pdata);
@@ -372,7 +372,7 @@ static int vidioc_try_fmt_vid_out_mplane(struct file *file, void *priv,
{
struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
const struct mtk_video_fmt *fmt;
- struct mtk_vcodec_dec_ctx *ctx = fh_to_dec_ctx(priv);
+ struct mtk_vcodec_dec_ctx *ctx = file_to_dec_ctx(file);
const struct mtk_vcodec_dec_pdata *dec_pdata = ctx->dev->vdec_pdata;
fmt = mtk_vdec_find_format(f, dec_pdata);
@@ -393,7 +393,7 @@ static int vidioc_try_fmt_vid_out_mplane(struct file *file, void *priv,
static int vidioc_vdec_g_selection(struct file *file, void *priv,
struct v4l2_selection *s)
{
- struct mtk_vcodec_dec_ctx *ctx = fh_to_dec_ctx(priv);
+ struct mtk_vcodec_dec_ctx *ctx = file_to_dec_ctx(file);
struct mtk_q_data *q_data;
if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
@@ -442,7 +442,7 @@ static int vidioc_vdec_g_selection(struct file *file, void *priv,
static int vidioc_vdec_s_selection(struct file *file, void *priv,
struct v4l2_selection *s)
{
- struct mtk_vcodec_dec_ctx *ctx = fh_to_dec_ctx(priv);
+ struct mtk_vcodec_dec_ctx *ctx = file_to_dec_ctx(file);
if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
@@ -464,7 +464,7 @@ static int vidioc_vdec_s_selection(struct file *file, void *priv,
static int vidioc_vdec_s_fmt(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct mtk_vcodec_dec_ctx *ctx = fh_to_dec_ctx(priv);
+ struct mtk_vcodec_dec_ctx *ctx = file_to_dec_ctx(file);
struct v4l2_pix_format_mplane *pix_mp;
struct mtk_q_data *q_data;
int ret = 0;
@@ -594,7 +594,7 @@ static int vidioc_enum_framesizes(struct file *file, void *priv,
struct v4l2_frmsizeenum *fsize)
{
int i = 0;
- struct mtk_vcodec_dec_ctx *ctx = fh_to_dec_ctx(priv);
+ struct mtk_vcodec_dec_ctx *ctx = file_to_dec_ctx(file);
const struct mtk_vcodec_dec_pdata *dec_pdata = ctx->dev->vdec_pdata;
if (fsize->index != 0)
@@ -623,10 +623,10 @@ static int vidioc_enum_framesizes(struct file *file, void *priv,
return -EINVAL;
}
-static int vidioc_enum_fmt(struct v4l2_fmtdesc *f, void *priv,
+static int vidioc_enum_fmt(struct file *file, struct v4l2_fmtdesc *f,
bool output_queue)
{
- struct mtk_vcodec_dec_ctx *ctx = fh_to_dec_ctx(priv);
+ struct mtk_vcodec_dec_ctx *ctx = file_to_dec_ctx(file);
const struct mtk_vcodec_dec_pdata *dec_pdata = ctx->dev->vdec_pdata;
const struct mtk_video_fmt *fmt;
int i, j = 0;
@@ -660,19 +660,19 @@ static int vidioc_enum_fmt(struct v4l2_fmtdesc *f, void *priv,
static int vidioc_vdec_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- return vidioc_enum_fmt(f, priv, false);
+ return vidioc_enum_fmt(file, f, false);
}
static int vidioc_vdec_enum_fmt_vid_out(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- return vidioc_enum_fmt(f, priv, true);
+ return vidioc_enum_fmt(file, f, true);
}
static int vidioc_vdec_g_fmt(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct mtk_vcodec_dec_ctx *ctx = fh_to_dec_ctx(priv);
+ struct mtk_vcodec_dec_ctx *ctx = file_to_dec_ctx(file);
struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
struct vb2_queue *vq;
struct mtk_q_data *q_data;
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.c b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.c
index 9247d92d431d..46d176e6de63 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.c
@@ -206,8 +206,7 @@ static int fops_vcodec_open(struct file *file)
mutex_lock(&dev->dev_mutex);
ctx->id = dev->id_counter++;
v4l2_fh_init(&ctx->fh, video_devdata(file));
- file->private_data = &ctx->fh;
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
INIT_LIST_HEAD(&ctx->list);
ctx->dev = dev;
if (ctx->dev->vdec_pdata->is_subdev_supported) {
@@ -283,7 +282,7 @@ err_load_fw:
err_m2m_ctx_init:
v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
err_ctrls_setup:
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
kfree(ctx);
mutex_unlock(&dev->dev_mutex);
@@ -294,7 +293,7 @@ err_ctrls_setup:
static int fops_vcodec_release(struct file *file)
{
struct mtk_vcodec_dec_dev *dev = video_drvdata(file);
- struct mtk_vcodec_dec_ctx *ctx = fh_to_dec_ctx(file->private_data);
+ struct mtk_vcodec_dec_ctx *ctx = file_to_dec_ctx(file);
mtk_v4l2_vdec_dbg(0, ctx, "[%d] decoder", ctx->id);
mutex_lock(&dev->dev_mutex);
@@ -308,7 +307,7 @@ static int fops_vcodec_release(struct file *file)
v4l2_m2m_ctx_release(ctx->m2m_ctx);
mtk_vcodec_dec_release(ctx);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h
index aececca7ecf8..d047d7c580fb 100644
--- a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h
+++ b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h
@@ -314,6 +314,11 @@ static inline struct mtk_vcodec_dec_ctx *fh_to_dec_ctx(struct v4l2_fh *fh)
return container_of(fh, struct mtk_vcodec_dec_ctx, fh);
}
+static inline struct mtk_vcodec_dec_ctx *file_to_dec_ctx(struct file *filp)
+{
+ return fh_to_dec_ctx(file_to_v4l2_fh(filp));
+}
+
static inline struct mtk_vcodec_dec_ctx *ctrl_to_dec_ctx(struct v4l2_ctrl *ctrl)
{
return container_of(ctrl->handler, struct mtk_vcodec_dec_ctx, ctrl_hdl);
diff --git a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c
index a01dc25a7699..d815e962ab89 100644
--- a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c
+++ b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c
@@ -159,7 +159,7 @@ static int vidioc_enum_framesizes(struct file *file, void *fh,
struct v4l2_frmsizeenum *fsize)
{
const struct mtk_video_fmt *fmt;
- struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(fh);
+ struct mtk_vcodec_enc_ctx *ctx = file_to_enc_ctx(file);
if (fsize->index != 0)
return -EINVAL;
@@ -183,7 +183,7 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
const struct mtk_vcodec_enc_pdata *pdata =
- fh_to_enc_ctx(priv)->dev->venc_pdata;
+ file_to_enc_ctx(file)->dev->venc_pdata;
return vidioc_enum_fmt(f, pdata->capture_formats,
pdata->num_capture_formats);
@@ -193,15 +193,14 @@ static int vidioc_enum_fmt_vid_out(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
const struct mtk_vcodec_enc_pdata *pdata =
- fh_to_enc_ctx(priv)->dev->venc_pdata;
+ file_to_enc_ctx(file)->dev->venc_pdata;
return vidioc_enum_fmt(f, pdata->output_formats,
pdata->num_output_formats);
}
-static int mtk_vcodec_enc_get_chip_name(void *priv)
+static int mtk_vcodec_enc_get_chip_name(struct mtk_vcodec_enc_ctx *ctx)
{
- struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(priv);
struct device *dev = &ctx->dev->plat_dev->dev;
if (of_device_is_compatible(dev->of_node, "mediatek,mt8173-vcodec-enc"))
@@ -221,9 +220,9 @@ static int mtk_vcodec_enc_get_chip_name(void *priv)
static int vidioc_venc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(priv);
+ struct mtk_vcodec_enc_ctx *ctx = file_to_enc_ctx(file);
struct device *dev = &ctx->dev->plat_dev->dev;
- int platform_name = mtk_vcodec_enc_get_chip_name(priv);
+ int platform_name = mtk_vcodec_enc_get_chip_name(ctx);
strscpy(cap->driver, dev->driver->name, sizeof(cap->driver));
snprintf(cap->card, sizeof(cap->card), "MT%d video encoder", platform_name);
@@ -234,7 +233,7 @@ static int vidioc_venc_querycap(struct file *file, void *priv,
static int vidioc_venc_s_parm(struct file *file, void *priv,
struct v4l2_streamparm *a)
{
- struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(priv);
+ struct mtk_vcodec_enc_ctx *ctx = file_to_enc_ctx(file);
struct v4l2_fract *timeperframe = &a->parm.output.timeperframe;
if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
@@ -257,7 +256,7 @@ static int vidioc_venc_s_parm(struct file *file, void *priv,
static int vidioc_venc_g_parm(struct file *file, void *priv,
struct v4l2_streamparm *a)
{
- struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(priv);
+ struct mtk_vcodec_enc_ctx *ctx = file_to_enc_ctx(file);
if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
return -EINVAL;
@@ -414,7 +413,7 @@ static void mtk_venc_set_param(struct mtk_vcodec_enc_ctx *ctx,
static int vidioc_venc_s_fmt_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(priv);
+ struct mtk_vcodec_enc_ctx *ctx = file_to_enc_ctx(file);
const struct mtk_vcodec_enc_pdata *pdata = ctx->dev->venc_pdata;
struct vb2_queue *vq;
struct mtk_q_data *q_data = mtk_venc_get_q_data(ctx, f->type);
@@ -469,7 +468,7 @@ static int vidioc_venc_s_fmt_cap(struct file *file, void *priv,
static int vidioc_venc_s_fmt_out(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(priv);
+ struct mtk_vcodec_enc_ctx *ctx = file_to_enc_ctx(file);
const struct mtk_vcodec_enc_pdata *pdata = ctx->dev->venc_pdata;
struct vb2_queue *vq;
struct mtk_q_data *q_data = mtk_venc_get_q_data(ctx, f->type);
@@ -524,7 +523,7 @@ static int vidioc_venc_g_fmt(struct file *file, void *priv,
struct v4l2_format *f)
{
struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
- struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(priv);
+ struct mtk_vcodec_enc_ctx *ctx = file_to_enc_ctx(file);
struct vb2_queue *vq;
struct mtk_q_data *q_data = mtk_venc_get_q_data(ctx, f->type);
int i;
@@ -557,7 +556,7 @@ static int vidioc_try_fmt_vid_cap_mplane(struct file *file, void *priv,
struct v4l2_format *f)
{
const struct mtk_video_fmt *fmt;
- struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(priv);
+ struct mtk_vcodec_enc_ctx *ctx = file_to_enc_ctx(file);
const struct mtk_vcodec_enc_pdata *pdata = ctx->dev->venc_pdata;
fmt = mtk_venc_find_format(f->fmt.pix.pixelformat, pdata);
@@ -579,7 +578,7 @@ static int vidioc_try_fmt_vid_out_mplane(struct file *file, void *priv,
struct v4l2_format *f)
{
const struct mtk_video_fmt *fmt;
- struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(priv);
+ struct mtk_vcodec_enc_ctx *ctx = file_to_enc_ctx(file);
const struct mtk_vcodec_enc_pdata *pdata = ctx->dev->venc_pdata;
fmt = mtk_venc_find_format(f->fmt.pix.pixelformat, pdata);
@@ -600,7 +599,7 @@ static int vidioc_try_fmt_vid_out_mplane(struct file *file, void *priv,
static int vidioc_venc_g_selection(struct file *file, void *priv,
struct v4l2_selection *s)
{
- struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(priv);
+ struct mtk_vcodec_enc_ctx *ctx = file_to_enc_ctx(file);
struct mtk_q_data *q_data = mtk_venc_get_q_data(ctx, s->type);
if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
@@ -630,7 +629,7 @@ static int vidioc_venc_g_selection(struct file *file, void *priv,
static int vidioc_venc_s_selection(struct file *file, void *priv,
struct v4l2_selection *s)
{
- struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(priv);
+ struct mtk_vcodec_enc_ctx *ctx = file_to_enc_ctx(file);
struct mtk_q_data *q_data = mtk_venc_get_q_data(ctx, s->type);
if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
@@ -655,7 +654,7 @@ static int vidioc_venc_s_selection(struct file *file, void *priv,
static int vidioc_venc_qbuf(struct file *file, void *priv,
struct v4l2_buffer *buf)
{
- struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(priv);
+ struct mtk_vcodec_enc_ctx *ctx = file_to_enc_ctx(file);
if (ctx->state == MTK_STATE_ABORT) {
mtk_v4l2_venc_err(ctx, "[%d] Call on QBUF after unrecoverable error",
@@ -669,7 +668,7 @@ static int vidioc_venc_qbuf(struct file *file, void *priv,
static int vidioc_venc_dqbuf(struct file *file, void *priv,
struct v4l2_buffer *buf)
{
- struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(priv);
+ struct mtk_vcodec_enc_ctx *ctx = file_to_enc_ctx(file);
int ret;
if (ctx->state == MTK_STATE_ABORT) {
@@ -707,7 +706,7 @@ static int vidioc_venc_dqbuf(struct file *file, void *priv,
static int vidioc_encoder_cmd(struct file *file, void *priv,
struct v4l2_encoder_cmd *cmd)
{
- struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(priv);
+ struct mtk_vcodec_enc_ctx *ctx = file_to_enc_ctx(file);
struct vb2_queue *src_vq, *dst_vq;
int ret;
diff --git a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.c b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.c
index a1e4483abcdb..fb1c3bdc2dae 100644
--- a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.c
+++ b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.c
@@ -129,8 +129,7 @@ static int fops_vcodec_open(struct file *file)
*/
ctx->id = dev->id_counter++;
v4l2_fh_init(&ctx->fh, video_devdata(file));
- file->private_data = &ctx->fh;
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
INIT_LIST_HEAD(&ctx->list);
ctx->dev = dev;
init_waitqueue_head(&ctx->queue[0]);
@@ -192,7 +191,7 @@ err_load_fw:
err_m2m_ctx_init:
v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
err_ctrls_setup:
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
kfree(ctx);
mutex_unlock(&dev->dev_mutex);
@@ -203,14 +202,14 @@ err_ctrls_setup:
static int fops_vcodec_release(struct file *file)
{
struct mtk_vcodec_enc_dev *dev = video_drvdata(file);
- struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(file->private_data);
+ struct mtk_vcodec_enc_ctx *ctx = file_to_enc_ctx(file);
mtk_v4l2_venc_dbg(1, ctx, "[%d] encoder", ctx->id);
mutex_lock(&dev->dev_mutex);
v4l2_m2m_ctx_release(ctx->m2m_ctx);
mtk_vcodec_enc_release(ctx);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
diff --git a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.h b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.h
index 0bd85d0fb379..5b304a551236 100644
--- a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.h
+++ b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.h
@@ -217,9 +217,9 @@ struct mtk_vcodec_enc_dev {
struct mtk_vcodec_dbgfs dbgfs;
};
-static inline struct mtk_vcodec_enc_ctx *fh_to_enc_ctx(struct v4l2_fh *fh)
+static inline struct mtk_vcodec_enc_ctx *file_to_enc_ctx(struct file *filp)
{
- return container_of(fh, struct mtk_vcodec_enc_ctx, fh);
+ return container_of(file_to_v4l2_fh(filp), struct mtk_vcodec_enc_ctx, fh);
}
static inline struct mtk_vcodec_enc_ctx *ctrl_to_enc_ctx(struct v4l2_ctrl *ctrl)
diff --git a/drivers/media/platform/nvidia/tegra-vde/h264.c b/drivers/media/platform/nvidia/tegra-vde/h264.c
index 0e56a4331b0d..45f8f6904867 100644
--- a/drivers/media/platform/nvidia/tegra-vde/h264.c
+++ b/drivers/media/platform/nvidia/tegra-vde/h264.c
@@ -585,7 +585,6 @@ static int tegra_vde_decode_begin(struct tegra_vde *vde,
return 0;
put_runtime_pm:
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
unlock:
@@ -612,7 +611,6 @@ static void tegra_vde_decode_abort(struct tegra_vde *vde)
if (err)
dev_err(dev, "DEC end: Failed to assert HW reset: %d\n", err);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
mutex_unlock(&vde->lock);
diff --git a/drivers/media/platform/nvidia/tegra-vde/v4l2.c b/drivers/media/platform/nvidia/tegra-vde/v4l2.c
index e3726cab0c82..d94978ae2baf 100644
--- a/drivers/media/platform/nvidia/tegra-vde/v4l2.c
+++ b/drivers/media/platform/nvidia/tegra-vde/v4l2.c
@@ -46,9 +46,9 @@ static const struct v4l2_ctrl_config ctrl_cfgs[] = {
},
};
-static inline struct tegra_ctx *fh_to_tegra_ctx(struct v4l2_fh *fh)
+static inline struct tegra_ctx *file_to_tegra_ctx(struct file *file)
{
- return container_of(fh, struct tegra_ctx, fh);
+ return container_of(file_to_v4l2_fh(file), struct tegra_ctx, fh);
}
static void tegra_set_control_data(struct tegra_ctx *ctx, void *data, u32 id)
@@ -506,7 +506,7 @@ static int tegra_querycap(struct file *file, void *priv,
static int tegra_enum_decoded_fmt(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- struct tegra_ctx *ctx = fh_to_tegra_ctx(priv);
+ struct tegra_ctx *ctx = file_to_tegra_ctx(file);
if (WARN_ON(!ctx->coded_fmt_desc))
return -EINVAL;
@@ -522,7 +522,7 @@ static int tegra_enum_decoded_fmt(struct file *file, void *priv,
static int tegra_g_decoded_fmt(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct tegra_ctx *ctx = fh_to_tegra_ctx(priv);
+ struct tegra_ctx *ctx = file_to_tegra_ctx(file);
*f = ctx->decoded_fmt;
return 0;
@@ -531,8 +531,8 @@ static int tegra_g_decoded_fmt(struct file *file, void *priv,
static int tegra_try_decoded_fmt(struct file *file, void *priv,
struct v4l2_format *f)
{
+ struct tegra_ctx *ctx = file_to_tegra_ctx(file);
struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
- struct tegra_ctx *ctx = fh_to_tegra_ctx(priv);
const struct tegra_coded_fmt_desc *coded_desc;
unsigned int i;
@@ -571,7 +571,7 @@ static int tegra_try_decoded_fmt(struct file *file, void *priv,
static int tegra_s_decoded_fmt(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct tegra_ctx *ctx = fh_to_tegra_ctx(priv);
+ struct tegra_ctx *ctx = file_to_tegra_ctx(file);
struct vb2_queue *vq;
int err;
@@ -593,7 +593,7 @@ static int tegra_s_decoded_fmt(struct file *file, void *priv,
static int tegra_enum_coded_fmt(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- struct tegra_ctx *ctx = fh_to_tegra_ctx(priv);
+ struct tegra_ctx *ctx = file_to_tegra_ctx(file);
const struct tegra_vde_soc *soc = ctx->vde->soc;
if (f->index >= soc->num_coded_fmts)
@@ -607,7 +607,7 @@ static int tegra_enum_coded_fmt(struct file *file, void *priv,
static int tegra_g_coded_fmt(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct tegra_ctx *ctx = fh_to_tegra_ctx(priv);
+ struct tegra_ctx *ctx = file_to_tegra_ctx(file);
*f = ctx->coded_fmt;
return 0;
@@ -631,7 +631,7 @@ static int tegra_try_coded_fmt(struct file *file, void *priv,
struct v4l2_format *f)
{
struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
- struct tegra_ctx *ctx = fh_to_tegra_ctx(priv);
+ struct tegra_ctx *ctx = file_to_tegra_ctx(file);
const struct tegra_vde_soc *soc = ctx->vde->soc;
int size = pix_mp->plane_fmt[0].sizeimage;
const struct tegra_coded_fmt_desc *desc;
@@ -656,7 +656,7 @@ static int tegra_try_coded_fmt(struct file *file, void *priv,
static int tegra_s_coded_fmt(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct tegra_ctx *ctx = fh_to_tegra_ctx(priv);
+ struct tegra_ctx *ctx = file_to_tegra_ctx(file);
struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx;
const struct tegra_coded_fmt_desc *desc;
struct vb2_queue *peer_vq, *vq;
@@ -718,7 +718,7 @@ static int tegra_s_coded_fmt(struct file *file, void *priv,
static int tegra_enum_framesizes(struct file *file, void *priv,
struct v4l2_frmsizeenum *fsize)
{
- struct tegra_ctx *ctx = fh_to_tegra_ctx(priv);
+ struct tegra_ctx *ctx = file_to_tegra_ctx(file);
const struct tegra_coded_fmt_desc *fmt;
if (fsize->index)
@@ -832,14 +832,13 @@ static int tegra_open(struct file *file)
goto free_ctrls;
}
- file->private_data = &ctx->fh;
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
tegra_reset_coded_fmt(ctx);
- tegra_try_coded_fmt(file, file->private_data, &ctx->coded_fmt);
+ tegra_try_coded_fmt(file, &ctx->fh, &ctx->coded_fmt);
tegra_reset_decoded_fmt(ctx);
- tegra_try_decoded_fmt(file, file->private_data, &ctx->decoded_fmt);
+ tegra_try_decoded_fmt(file, &ctx->fh, &ctx->decoded_fmt);
return 0;
@@ -853,11 +852,11 @@ free_ctx:
static int tegra_release(struct file *file)
{
- struct v4l2_fh *fh = file->private_data;
- struct tegra_ctx *ctx = fh_to_tegra_ctx(fh);
+ struct tegra_ctx *ctx = file_to_tegra_ctx(file);
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
struct tegra_vde *vde = ctx->vde;
- v4l2_fh_del(fh);
+ v4l2_fh_del(fh, file);
v4l2_m2m_ctx_release(fh->m2m_ctx);
v4l2_ctrl_handler_free(&ctx->hdl);
v4l2_fh_exit(fh);
diff --git a/drivers/media/platform/nxp/dw100/dw100.c b/drivers/media/platform/nxp/dw100/dw100.c
index 3d1db1121bf9..97744c7b7c03 100644
--- a/drivers/media/platform/nxp/dw100/dw100.c
+++ b/drivers/media/platform/nxp/dw100/dw100.c
@@ -266,7 +266,7 @@ static inline int dw100_dump_regs(struct seq_file *m)
static inline struct dw100_ctx *dw100_file2ctx(struct file *file)
{
- return container_of(file->private_data, struct dw100_ctx, fh);
+ return container_of(file_to_v4l2_fh(file), struct dw100_ctx, fh);
}
static struct dw100_q_data *dw100_get_q_data(struct dw100_ctx *ctx,
@@ -607,7 +607,6 @@ static int dw100_open(struct file *file)
mutex_init(&ctx->vq_mutex);
v4l2_fh_init(&ctx->fh, video_devdata(file));
- file->private_data = &ctx->fh;
ctx->dw_dev = dw_dev;
ctx->q_data[DW100_QUEUE_SRC].fmt = &formats[0];
@@ -651,7 +650,7 @@ static int dw100_open(struct file *file)
goto err;
}
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
return 0;
@@ -668,7 +667,7 @@ static int dw100_release(struct file *file)
{
struct dw100_ctx *ctx = dw100_file2ctx(file);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
v4l2_ctrl_handler_free(&ctx->hdl);
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
diff --git a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
index 8681dd193033..df3ccdf767ba 100644
--- a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
+++ b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
@@ -644,9 +644,9 @@ static void print_mxc_buf(struct mxc_jpeg_dev *jpeg, struct vb2_buffer *buf,
}
}
-static inline struct mxc_jpeg_ctx *mxc_jpeg_fh_to_ctx(struct v4l2_fh *fh)
+static inline struct mxc_jpeg_ctx *mxc_jpeg_file_to_ctx(struct file *filp)
{
- return container_of(fh, struct mxc_jpeg_ctx, fh);
+ return container_of(file_to_v4l2_fh(filp), struct mxc_jpeg_ctx, fh);
}
static int enum_fmt(const struct mxc_jpeg_fmt *mxc_formats, int n,
@@ -1604,8 +1604,8 @@ end:
static int mxc_jpeg_decoder_cmd(struct file *file, void *priv,
struct v4l2_decoder_cmd *cmd)
{
- struct v4l2_fh *fh = file->private_data;
- struct mxc_jpeg_ctx *ctx = mxc_jpeg_fh_to_ctx(fh);
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
+ struct mxc_jpeg_ctx *ctx = mxc_jpeg_file_to_ctx(file);
unsigned long flags;
int ret;
@@ -1637,8 +1637,8 @@ static int mxc_jpeg_decoder_cmd(struct file *file, void *priv,
static int mxc_jpeg_encoder_cmd(struct file *file, void *priv,
struct v4l2_encoder_cmd *cmd)
{
- struct v4l2_fh *fh = file->private_data;
- struct mxc_jpeg_ctx *ctx = mxc_jpeg_fh_to_ctx(fh);
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
+ struct mxc_jpeg_ctx *ctx = mxc_jpeg_file_to_ctx(file);
unsigned long flags;
int ret;
@@ -2200,8 +2200,7 @@ static int mxc_jpeg_open(struct file *file)
}
v4l2_fh_init(&ctx->fh, mxc_vfd);
- file->private_data = &ctx->fh;
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
ctx->mxc_jpeg = mxc_jpeg;
@@ -2234,7 +2233,7 @@ static int mxc_jpeg_open(struct file *file)
err_ctrls_setup:
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
error:
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
mutex_unlock(&mxc_jpeg->lock);
free:
@@ -2256,7 +2255,7 @@ static int mxc_jpeg_querycap(struct file *file, void *priv,
static int mxc_jpeg_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- struct mxc_jpeg_ctx *ctx = mxc_jpeg_fh_to_ctx(priv);
+ struct mxc_jpeg_ctx *ctx = mxc_jpeg_file_to_ctx(file);
struct mxc_jpeg_q_data *q_data = mxc_jpeg_get_q_data(ctx, f->type);
if (ctx->mxc_jpeg->mode == MXC_JPEG_ENCODE) {
@@ -2296,7 +2295,7 @@ static int mxc_jpeg_enum_fmt_vid_cap(struct file *file, void *priv,
static int mxc_jpeg_enum_fmt_vid_out(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- struct mxc_jpeg_ctx *ctx = mxc_jpeg_fh_to_ctx(priv);
+ struct mxc_jpeg_ctx *ctx = mxc_jpeg_file_to_ctx(file);
u32 type = ctx->mxc_jpeg->mode == MXC_JPEG_DECODE ? MXC_JPEG_FMT_TYPE_ENC :
MXC_JPEG_FMT_TYPE_RAW;
int ret;
@@ -2437,7 +2436,7 @@ static int mxc_jpeg_try_fmt(struct v4l2_format *f,
static int mxc_jpeg_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct mxc_jpeg_ctx *ctx = mxc_jpeg_fh_to_ctx(priv);
+ struct mxc_jpeg_ctx *ctx = mxc_jpeg_file_to_ctx(file);
struct mxc_jpeg_dev *jpeg = ctx->mxc_jpeg;
struct device *dev = jpeg->dev;
struct mxc_jpeg_q_data tmp_q;
@@ -2456,7 +2455,7 @@ static int mxc_jpeg_try_fmt_vid_cap(struct file *file, void *priv,
static int mxc_jpeg_try_fmt_vid_out(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct mxc_jpeg_ctx *ctx = mxc_jpeg_fh_to_ctx(priv);
+ struct mxc_jpeg_ctx *ctx = mxc_jpeg_file_to_ctx(file);
struct mxc_jpeg_dev *jpeg = ctx->mxc_jpeg;
struct device *dev = jpeg->dev;
struct mxc_jpeg_q_data tmp_q;
@@ -2508,20 +2507,20 @@ static int mxc_jpeg_s_fmt(struct mxc_jpeg_ctx *ctx,
static int mxc_jpeg_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- return mxc_jpeg_s_fmt(mxc_jpeg_fh_to_ctx(priv), f);
+ return mxc_jpeg_s_fmt(mxc_jpeg_file_to_ctx(file), f);
}
static int mxc_jpeg_s_fmt_vid_out(struct file *file, void *priv,
struct v4l2_format *f)
{
int ret;
- struct mxc_jpeg_ctx *ctx = mxc_jpeg_fh_to_ctx(priv);
+ struct mxc_jpeg_ctx *ctx = mxc_jpeg_file_to_ctx(file);
struct vb2_queue *dst_vq;
struct mxc_jpeg_q_data *q_data_cap;
enum v4l2_buf_type cap_type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
struct v4l2_format fc;
- ret = mxc_jpeg_s_fmt(mxc_jpeg_fh_to_ctx(priv), f);
+ ret = mxc_jpeg_s_fmt(ctx, f);
if (ret)
return ret;
@@ -2550,7 +2549,7 @@ static int mxc_jpeg_s_fmt_vid_out(struct file *file, void *priv,
static int mxc_jpeg_g_fmt_vid(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct mxc_jpeg_ctx *ctx = mxc_jpeg_fh_to_ctx(priv);
+ struct mxc_jpeg_ctx *ctx = mxc_jpeg_file_to_ctx(file);
struct mxc_jpeg_dev *jpeg = ctx->mxc_jpeg;
struct device *dev = jpeg->dev;
struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
@@ -2588,7 +2587,7 @@ static int mxc_jpeg_g_fmt_vid(struct file *file, void *priv,
static int mxc_jpeg_dec_g_selection(struct file *file, void *fh, struct v4l2_selection *s)
{
- struct mxc_jpeg_ctx *ctx = mxc_jpeg_fh_to_ctx(fh);
+ struct mxc_jpeg_ctx *ctx = mxc_jpeg_file_to_ctx(file);
struct mxc_jpeg_q_data *q_data_cap;
if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE && s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
@@ -2617,7 +2616,7 @@ static int mxc_jpeg_dec_g_selection(struct file *file, void *fh, struct v4l2_sel
static int mxc_jpeg_enc_g_selection(struct file *file, void *fh, struct v4l2_selection *s)
{
- struct mxc_jpeg_ctx *ctx = mxc_jpeg_fh_to_ctx(fh);
+ struct mxc_jpeg_ctx *ctx = mxc_jpeg_file_to_ctx(file);
struct mxc_jpeg_q_data *q_data_out;
if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT && s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
@@ -2645,7 +2644,7 @@ static int mxc_jpeg_enc_g_selection(struct file *file, void *fh, struct v4l2_sel
static int mxc_jpeg_g_selection(struct file *file, void *fh, struct v4l2_selection *s)
{
- struct mxc_jpeg_ctx *ctx = mxc_jpeg_fh_to_ctx(fh);
+ struct mxc_jpeg_ctx *ctx = mxc_jpeg_file_to_ctx(file);
if (ctx->mxc_jpeg->mode == MXC_JPEG_DECODE)
return mxc_jpeg_dec_g_selection(file, fh, s);
@@ -2655,7 +2654,7 @@ static int mxc_jpeg_g_selection(struct file *file, void *fh, struct v4l2_selecti
static int mxc_jpeg_s_selection(struct file *file, void *fh, struct v4l2_selection *s)
{
- struct mxc_jpeg_ctx *ctx = mxc_jpeg_fh_to_ctx(fh);
+ struct mxc_jpeg_ctx *ctx = mxc_jpeg_file_to_ctx(file);
struct mxc_jpeg_q_data *q_data_out;
if (ctx->mxc_jpeg->mode != MXC_JPEG_ENCODE)
@@ -2735,7 +2734,7 @@ static const struct v4l2_ioctl_ops mxc_jpeg_ioctl_ops = {
static int mxc_jpeg_release(struct file *file)
{
struct mxc_jpeg_dev *mxc_jpeg = video_drvdata(file);
- struct mxc_jpeg_ctx *ctx = mxc_jpeg_fh_to_ctx(file->private_data);
+ struct mxc_jpeg_ctx *ctx = mxc_jpeg_file_to_ctx(file);
struct device *dev = mxc_jpeg->dev;
mutex_lock(&mxc_jpeg->lock);
@@ -2747,7 +2746,7 @@ static int mxc_jpeg_release(struct file *file)
ctx->slot);
v4l2_ctrl_handler_free(&ctx->ctrl_handler);
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
kfree(ctx);
mutex_unlock(&mxc_jpeg->lock);
diff --git a/drivers/media/platform/nxp/imx-mipi-csis.c b/drivers/media/platform/nxp/imx-mipi-csis.c
index 2beb5f43c2c0..d5de7854f579 100644
--- a/drivers/media/platform/nxp/imx-mipi-csis.c
+++ b/drivers/media/platform/nxp/imx-mipi-csis.c
@@ -54,23 +54,19 @@
/* CSIS common control */
#define MIPI_CSIS_CMN_CTRL 0x04
-#define MIPI_CSIS_CMN_CTRL_UPDATE_SHADOW BIT(16)
-#define MIPI_CSIS_CMN_CTRL_INTER_MODE BIT(10)
+#define MIPI_CSIS_CMN_CTRL_UPDATE_SHADOW(n) BIT((n) + 16)
+#define MIPI_CSIS_CMN_CTRL_INTERLEAVE_MODE_DT BIT(10)
+#define MIPI_CSIS_CMN_CTRL_LANE_NUMBER(n) ((n) << 8)
+#define MIPI_CSIS_CMN_CTRL_LANE_NUMBER_MASK GENMASK(9, 8)
#define MIPI_CSIS_CMN_CTRL_UPDATE_SHADOW_CTRL BIT(2)
-#define MIPI_CSIS_CMN_CTRL_RESET BIT(1)
-#define MIPI_CSIS_CMN_CTRL_ENABLE BIT(0)
-
-#define MIPI_CSIS_CMN_CTRL_LANE_NR_OFFSET 8
-#define MIPI_CSIS_CMN_CTRL_LANE_NR_MASK (3 << 8)
+#define MIPI_CSIS_CMN_CTRL_SW_RESET BIT(1)
+#define MIPI_CSIS_CMN_CTRL_CSI_EN BIT(0)
/* CSIS clock control */
#define MIPI_CSIS_CLK_CTRL 0x08
-#define MIPI_CSIS_CLK_CTRL_CLKGATE_TRAIL_CH3(x) ((x) << 28)
-#define MIPI_CSIS_CLK_CTRL_CLKGATE_TRAIL_CH2(x) ((x) << 24)
-#define MIPI_CSIS_CLK_CTRL_CLKGATE_TRAIL_CH1(x) ((x) << 20)
-#define MIPI_CSIS_CLK_CTRL_CLKGATE_TRAIL_CH0(x) ((x) << 16)
-#define MIPI_CSIS_CLK_CTRL_CLKGATE_EN_MSK (0xf << 4)
-#define MIPI_CSIS_CLK_CTRL_WCLK_SRC BIT(0)
+#define MIPI_CSIS_CLK_CTRL_CLKGATE_TRAIL(n, x) ((x) << ((n) * 4 + 16))
+#define MIPI_CSIS_CLK_CTRL_CLKGATE_EN_MASK GENMASK(7, 4)
+#define MIPI_CSIS_CLK_CTRL_WCLK_SRC(n) BIT(n)
/* CSIS Interrupt mask */
#define MIPI_CSIS_INT_MSK 0x10
@@ -87,7 +83,7 @@
#define MIPI_CSIS_INT_MSK_ERR_WRONG_CFG BIT(3)
#define MIPI_CSIS_INT_MSK_ERR_ECC BIT(2)
#define MIPI_CSIS_INT_MSK_ERR_CRC BIT(1)
-#define MIPI_CSIS_INT_MSK_ERR_UNKNOWN BIT(0)
+#define MIPI_CSIS_INT_MSK_ERR_ID BIT(0)
/* CSIS Interrupt source */
#define MIPI_CSIS_INT_SRC 0x14
@@ -98,16 +94,16 @@
#define MIPI_CSIS_INT_SRC_ODD_AFTER BIT(28)
#define MIPI_CSIS_INT_SRC_ODD (0x3 << 28)
#define MIPI_CSIS_INT_SRC_NON_IMAGE_DATA (0xf << 28)
-#define MIPI_CSIS_INT_SRC_FRAME_START BIT(24)
-#define MIPI_CSIS_INT_SRC_FRAME_END BIT(20)
-#define MIPI_CSIS_INT_SRC_ERR_SOT_HS BIT(16)
-#define MIPI_CSIS_INT_SRC_ERR_LOST_FS BIT(12)
-#define MIPI_CSIS_INT_SRC_ERR_LOST_FE BIT(8)
-#define MIPI_CSIS_INT_SRC_ERR_OVER BIT(4)
+#define MIPI_CSIS_INT_SRC_FRAME_START(n) BIT((n) + 24)
+#define MIPI_CSIS_INT_SRC_FRAME_END(n) BIT((n) + 20)
+#define MIPI_CSIS_INT_SRC_ERR_SOT_HS(n) BIT((n) + 16)
+#define MIPI_CSIS_INT_SRC_ERR_LOST_FS(n) BIT((n) + 12)
+#define MIPI_CSIS_INT_SRC_ERR_LOST_FE(n) BIT((n) + 8)
+#define MIPI_CSIS_INT_SRC_ERR_OVER(n) BIT((n) + 4)
#define MIPI_CSIS_INT_SRC_ERR_WRONG_CFG BIT(3)
#define MIPI_CSIS_INT_SRC_ERR_ECC BIT(2)
#define MIPI_CSIS_INT_SRC_ERR_CRC BIT(1)
-#define MIPI_CSIS_INT_SRC_ERR_UNKNOWN BIT(0)
+#define MIPI_CSIS_INT_SRC_ERR_ID BIT(0)
#define MIPI_CSIS_INT_SRC_ERRORS 0xfffff
/* D-PHY status control */
@@ -123,8 +119,8 @@
#define MIPI_CSIS_DPHY_CMN_CTRL_HSSETTLE_MASK GENMASK(31, 24)
#define MIPI_CSIS_DPHY_CMN_CTRL_CLKSETTLE(n) ((n) << 22)
#define MIPI_CSIS_DPHY_CMN_CTRL_CLKSETTLE_MASK GENMASK(23, 22)
-#define MIPI_CSIS_DPHY_CMN_CTRL_DPDN_SWAP_CLK BIT(6)
-#define MIPI_CSIS_DPHY_CMN_CTRL_DPDN_SWAP_DAT BIT(5)
+#define MIPI_CSIS_DPHY_CMN_CTRL_S_DPDN_SWAP_CLK BIT(6)
+#define MIPI_CSIS_DPHY_CMN_CTRL_S_DPDN_SWAP_DAT BIT(5)
#define MIPI_CSIS_DPHY_CMN_CTRL_ENABLE_DAT BIT(1)
#define MIPI_CSIS_DPHY_CMN_CTRL_ENABLE_CLK BIT(0)
#define MIPI_CSIS_DPHY_CMN_CTRL_ENABLE (0x1f << 0)
@@ -174,26 +170,28 @@
/* ISP Configuration register */
#define MIPI_CSIS_ISP_CONFIG_CH(n) (0x40 + (n) * 0x10)
-#define MIPI_CSIS_ISPCFG_MEM_FULL_GAP_MSK (0xff << 24)
+#define MIPI_CSIS_ISPCFG_MEM_FULL_GAP_MASK GENMASK(31, 24)
#define MIPI_CSIS_ISPCFG_MEM_FULL_GAP(x) ((x) << 24)
#define MIPI_CSIS_ISPCFG_PIXEL_MODE_SINGLE (0 << 12)
#define MIPI_CSIS_ISPCFG_PIXEL_MODE_DUAL (1 << 12)
#define MIPI_CSIS_ISPCFG_PIXEL_MODE_QUAD (2 << 12) /* i.MX8M[MNP] only */
-#define MIPI_CSIS_ISPCFG_PIXEL_MASK (3 << 12)
-#define MIPI_CSIS_ISPCFG_ALIGN_32BIT BIT(11)
-#define MIPI_CSIS_ISPCFG_FMT(fmt) ((fmt) << 2)
-#define MIPI_CSIS_ISPCFG_FMT_MASK (0x3f << 2)
+#define MIPI_CSIS_ISPCFG_PIXEL_MODE_MASK GENMASK(13, 12)
+#define MIPI_CSIS_ISPCFG_PARALLEL BIT(11)
+#define MIPI_CSIS_ISPCFG_DATAFORMAT(fmt) ((fmt) << 2)
+#define MIPI_CSIS_ISPCFG_DATAFORMAT_MASK GENMASK(7, 2)
/* ISP Image Resolution register */
#define MIPI_CSIS_ISP_RESOL_CH(n) (0x44 + (n) * 0x10)
+#define MIPI_CSIS_ISP_RESOL_VRESOL(n) ((n) << 16)
+#define MIPI_CSIS_ISP_RESOL_HRESOL(n) ((n) << 0)
#define CSIS_MAX_PIX_WIDTH 0xffff
#define CSIS_MAX_PIX_HEIGHT 0xffff
/* ISP SYNC register */
#define MIPI_CSIS_ISP_SYNC_CH(n) (0x48 + (n) * 0x10)
-#define MIPI_CSIS_ISP_SYNC_HSYNC_LINTV_OFFSET 18
-#define MIPI_CSIS_ISP_SYNC_VSYNC_SINTV_OFFSET 12
-#define MIPI_CSIS_ISP_SYNC_VSYNC_EINTV_OFFSET 0
+#define MIPI_CSIS_ISP_SYNC_HSYNC_LINTV(n) ((n) << 18)
+#define MIPI_CSIS_ISP_SYNC_VSYNC_SINTV(n) ((n) << 12)
+#define MIPI_CSIS_ISP_SYNC_VSYNC_EINTV(n) ((n) << 0)
/* ISP shadow registers */
#define MIPI_CSIS_SDW_CONFIG_CH(n) (0x80 + (n) * 0x10)
@@ -203,23 +201,23 @@
/* Debug control register */
#define MIPI_CSIS_DBG_CTRL 0xc0
#define MIPI_CSIS_DBG_INTR_MSK 0xc4
-#define MIPI_CSIS_DBG_INTR_MSK_DT_NOT_SUPPORT BIT(25)
-#define MIPI_CSIS_DBG_INTR_MSK_DT_IGNORE BIT(24)
-#define MIPI_CSIS_DBG_INTR_MSK_ERR_FRAME_SIZE BIT(20)
-#define MIPI_CSIS_DBG_INTR_MSK_TRUNCATED_FRAME BIT(16)
-#define MIPI_CSIS_DBG_INTR_MSK_EARLY_FE BIT(12)
-#define MIPI_CSIS_DBG_INTR_MSK_EARLY_FS BIT(8)
-#define MIPI_CSIS_DBG_INTR_MSK_CAM_VSYNC_FALL BIT(4)
-#define MIPI_CSIS_DBG_INTR_MSK_CAM_VSYNC_RISE BIT(0)
+#define MIPI_CSIS_DBG_INTR_MSK_DT_NOT_SUPPORT BIT(25)
+#define MIPI_CSIS_DBG_INTR_MSK_DT_IGNORE BIT(24)
+#define MIPI_CSIS_DBG_INTR_MSK_ERR_FRAME_SIZE(n) BIT((n) + 20)
+#define MIPI_CSIS_DBG_INTR_MSK_TRUNCATED_FRAME(n) BIT((n) + 16)
+#define MIPI_CSIS_DBG_INTR_MSK_EARLY_FE(n) BIT((n) + 12)
+#define MIPI_CSIS_DBG_INTR_MSK_EARLY_FS(n) BIT((n) + 8)
+#define MIPI_CSIS_DBG_INTR_MSK_CAM_VSYNC_FALL(n) BIT((n) + 4)
+#define MIPI_CSIS_DBG_INTR_MSK_CAM_VSYNC_RISE(n) BIT((n) + 0)
#define MIPI_CSIS_DBG_INTR_SRC 0xc8
-#define MIPI_CSIS_DBG_INTR_SRC_DT_NOT_SUPPORT BIT(25)
-#define MIPI_CSIS_DBG_INTR_SRC_DT_IGNORE BIT(24)
-#define MIPI_CSIS_DBG_INTR_SRC_ERR_FRAME_SIZE BIT(20)
-#define MIPI_CSIS_DBG_INTR_SRC_TRUNCATED_FRAME BIT(16)
-#define MIPI_CSIS_DBG_INTR_SRC_EARLY_FE BIT(12)
-#define MIPI_CSIS_DBG_INTR_SRC_EARLY_FS BIT(8)
-#define MIPI_CSIS_DBG_INTR_SRC_CAM_VSYNC_FALL BIT(4)
-#define MIPI_CSIS_DBG_INTR_SRC_CAM_VSYNC_RISE BIT(0)
+#define MIPI_CSIS_DBG_INTR_SRC_DT_NOT_SUPPORT BIT(25)
+#define MIPI_CSIS_DBG_INTR_SRC_DT_IGNORE BIT(24)
+#define MIPI_CSIS_DBG_INTR_SRC_ERR_FRAME_SIZE(n) BIT((n) + 20)
+#define MIPI_CSIS_DBG_INTR_SRC_TRUNCATED_FRAME(n) BIT((n) + 16)
+#define MIPI_CSIS_DBG_INTR_SRC_EARLY_FE(n) BIT((n) + 12)
+#define MIPI_CSIS_DBG_INTR_SRC_EARLY_FS(n) BIT((n) + 8)
+#define MIPI_CSIS_DBG_INTR_SRC_CAM_VSYNC_FALL(n) BIT((n) + 4)
+#define MIPI_CSIS_DBG_INTR_SRC_CAM_VSYNC_RISE(n) BIT((n) + 0)
#define MIPI_CSIS_FRAME_COUNTER_CH(n) (0x0100 + (n) * 4)
@@ -228,10 +226,11 @@
#define MIPI_CSIS_PKTDATA_EVEN 0x3000
#define MIPI_CSIS_PKTDATA_SIZE SZ_4K
-#define DEFAULT_SCLK_CSIS_FREQ 166000000UL
+#define MIPI_CSIS_MAX_CHANNELS 4
struct mipi_csis_event {
bool debug;
+ unsigned int channel;
u32 mask;
const char * const name;
unsigned int counter;
@@ -239,33 +238,70 @@ struct mipi_csis_event {
static const struct mipi_csis_event mipi_csis_events[] = {
/* Errors */
- { false, MIPI_CSIS_INT_SRC_ERR_SOT_HS, "SOT Error" },
- { false, MIPI_CSIS_INT_SRC_ERR_LOST_FS, "Lost Frame Start Error" },
- { false, MIPI_CSIS_INT_SRC_ERR_LOST_FE, "Lost Frame End Error" },
- { false, MIPI_CSIS_INT_SRC_ERR_OVER, "FIFO Overflow Error" },
- { false, MIPI_CSIS_INT_SRC_ERR_WRONG_CFG, "Wrong Configuration Error" },
- { false, MIPI_CSIS_INT_SRC_ERR_ECC, "ECC Error" },
- { false, MIPI_CSIS_INT_SRC_ERR_CRC, "CRC Error" },
- { false, MIPI_CSIS_INT_SRC_ERR_UNKNOWN, "Unknown Error" },
- { true, MIPI_CSIS_DBG_INTR_SRC_DT_NOT_SUPPORT, "Data Type Not Supported" },
- { true, MIPI_CSIS_DBG_INTR_SRC_DT_IGNORE, "Data Type Ignored" },
- { true, MIPI_CSIS_DBG_INTR_SRC_ERR_FRAME_SIZE, "Frame Size Error" },
- { true, MIPI_CSIS_DBG_INTR_SRC_TRUNCATED_FRAME, "Truncated Frame" },
- { true, MIPI_CSIS_DBG_INTR_SRC_EARLY_FE, "Early Frame End" },
- { true, MIPI_CSIS_DBG_INTR_SRC_EARLY_FS, "Early Frame Start" },
+ { false, 0, MIPI_CSIS_INT_SRC_ERR_SOT_HS(0), "SOT 0 Error" },
+ { false, 0, MIPI_CSIS_INT_SRC_ERR_SOT_HS(1), "SOT 1 Error" },
+ { false, 0, MIPI_CSIS_INT_SRC_ERR_SOT_HS(2), "SOT 2 Error" },
+ { false, 0, MIPI_CSIS_INT_SRC_ERR_SOT_HS(3), "SOT 3 Error" },
+ { false, 0, MIPI_CSIS_INT_SRC_ERR_LOST_FS(0), "Lost Frame Start Error 0" },
+ { false, 1, MIPI_CSIS_INT_SRC_ERR_LOST_FS(1), "Lost Frame Start Error 1" },
+ { false, 2, MIPI_CSIS_INT_SRC_ERR_LOST_FS(2), "Lost Frame Start Error 2" },
+ { false, 3, MIPI_CSIS_INT_SRC_ERR_LOST_FS(3), "Lost Frame Start Error 3" },
+ { false, 0, MIPI_CSIS_INT_SRC_ERR_LOST_FE(0), "Lost Frame End Error 0" },
+ { false, 1, MIPI_CSIS_INT_SRC_ERR_LOST_FE(1), "Lost Frame End Error 1" },
+ { false, 2, MIPI_CSIS_INT_SRC_ERR_LOST_FE(2), "Lost Frame End Error 2" },
+ { false, 3, MIPI_CSIS_INT_SRC_ERR_LOST_FE(3), "Lost Frame End Error 3" },
+ { false, 0, MIPI_CSIS_INT_SRC_ERR_OVER(0), "FIFO Overflow Error 0" },
+ { false, 1, MIPI_CSIS_INT_SRC_ERR_OVER(1), "FIFO Overflow Error 1" },
+ { false, 2, MIPI_CSIS_INT_SRC_ERR_OVER(2), "FIFO Overflow Error 2" },
+ { false, 3, MIPI_CSIS_INT_SRC_ERR_OVER(3), "FIFO Overflow Error 3" },
+ { false, 0, MIPI_CSIS_INT_SRC_ERR_WRONG_CFG, "Wrong Configuration Error" },
+ { false, 0, MIPI_CSIS_INT_SRC_ERR_ECC, "ECC Error" },
+ { false, 0, MIPI_CSIS_INT_SRC_ERR_CRC, "CRC Error" },
+ { false, 0, MIPI_CSIS_INT_SRC_ERR_ID, "Unknown ID Error" },
+ { true, 0, MIPI_CSIS_DBG_INTR_SRC_DT_NOT_SUPPORT, "Data Type Not Supported" },
+ { true, 0, MIPI_CSIS_DBG_INTR_SRC_DT_IGNORE, "Data Type Ignored" },
+ { true, 0, MIPI_CSIS_DBG_INTR_SRC_ERR_FRAME_SIZE(0), "Frame Size Error 0" },
+ { true, 1, MIPI_CSIS_DBG_INTR_SRC_ERR_FRAME_SIZE(1), "Frame Size Error 1" },
+ { true, 2, MIPI_CSIS_DBG_INTR_SRC_ERR_FRAME_SIZE(2), "Frame Size Error 2" },
+ { true, 3, MIPI_CSIS_DBG_INTR_SRC_ERR_FRAME_SIZE(3), "Frame Size Error 3" },
+ { true, 0, MIPI_CSIS_DBG_INTR_SRC_TRUNCATED_FRAME(0), "Truncated Frame 0" },
+ { true, 1, MIPI_CSIS_DBG_INTR_SRC_TRUNCATED_FRAME(1), "Truncated Frame 1" },
+ { true, 2, MIPI_CSIS_DBG_INTR_SRC_TRUNCATED_FRAME(2), "Truncated Frame 2" },
+ { true, 3, MIPI_CSIS_DBG_INTR_SRC_TRUNCATED_FRAME(3), "Truncated Frame 3" },
+ { true, 0, MIPI_CSIS_DBG_INTR_SRC_EARLY_FE(0), "Early Frame End 0" },
+ { true, 1, MIPI_CSIS_DBG_INTR_SRC_EARLY_FE(1), "Early Frame End 1" },
+ { true, 2, MIPI_CSIS_DBG_INTR_SRC_EARLY_FE(2), "Early Frame End 2" },
+ { true, 3, MIPI_CSIS_DBG_INTR_SRC_EARLY_FE(3), "Early Frame End 3" },
+ { true, 0, MIPI_CSIS_DBG_INTR_SRC_EARLY_FS(0), "Early Frame Start 0" },
+ { true, 1, MIPI_CSIS_DBG_INTR_SRC_EARLY_FS(1), "Early Frame Start 1" },
+ { true, 2, MIPI_CSIS_DBG_INTR_SRC_EARLY_FS(2), "Early Frame Start 2" },
+ { true, 3, MIPI_CSIS_DBG_INTR_SRC_EARLY_FS(3), "Early Frame Start 3" },
/* Non-image data receive events */
- { false, MIPI_CSIS_INT_SRC_EVEN_BEFORE, "Non-image data before even frame" },
- { false, MIPI_CSIS_INT_SRC_EVEN_AFTER, "Non-image data after even frame" },
- { false, MIPI_CSIS_INT_SRC_ODD_BEFORE, "Non-image data before odd frame" },
- { false, MIPI_CSIS_INT_SRC_ODD_AFTER, "Non-image data after odd frame" },
+ { false, 0, MIPI_CSIS_INT_SRC_EVEN_BEFORE, "Non-image data before even frame" },
+ { false, 0, MIPI_CSIS_INT_SRC_EVEN_AFTER, "Non-image data after even frame" },
+ { false, 0, MIPI_CSIS_INT_SRC_ODD_BEFORE, "Non-image data before odd frame" },
+ { false, 0, MIPI_CSIS_INT_SRC_ODD_AFTER, "Non-image data after odd frame" },
/* Frame start/end */
- { false, MIPI_CSIS_INT_SRC_FRAME_START, "Frame Start" },
- { false, MIPI_CSIS_INT_SRC_FRAME_END, "Frame End" },
- { true, MIPI_CSIS_DBG_INTR_SRC_CAM_VSYNC_FALL, "VSYNC Falling Edge" },
- { true, MIPI_CSIS_DBG_INTR_SRC_CAM_VSYNC_RISE, "VSYNC Rising Edge" },
+ { false, 0, MIPI_CSIS_INT_SRC_FRAME_START(0), "Frame Start 0" },
+ { false, 1, MIPI_CSIS_INT_SRC_FRAME_START(1), "Frame Start 1" },
+ { false, 2, MIPI_CSIS_INT_SRC_FRAME_START(2), "Frame Start 2" },
+ { false, 3, MIPI_CSIS_INT_SRC_FRAME_START(3), "Frame Start 3" },
+ { false, 0, MIPI_CSIS_INT_SRC_FRAME_END(0), "Frame End 0" },
+ { false, 1, MIPI_CSIS_INT_SRC_FRAME_END(1), "Frame End 1" },
+ { false, 2, MIPI_CSIS_INT_SRC_FRAME_END(2), "Frame End 2" },
+ { false, 3, MIPI_CSIS_INT_SRC_FRAME_END(3), "Frame End 3" },
+ { true, 0, MIPI_CSIS_DBG_INTR_SRC_CAM_VSYNC_FALL(0), "VSYNC Falling Edge 0" },
+ { true, 1, MIPI_CSIS_DBG_INTR_SRC_CAM_VSYNC_FALL(1), "VSYNC Falling Edge 1" },
+ { true, 2, MIPI_CSIS_DBG_INTR_SRC_CAM_VSYNC_FALL(2), "VSYNC Falling Edge 2" },
+ { true, 3, MIPI_CSIS_DBG_INTR_SRC_CAM_VSYNC_FALL(3), "VSYNC Falling Edge 3" },
+ { true, 0, MIPI_CSIS_DBG_INTR_SRC_CAM_VSYNC_RISE(0), "VSYNC Rising Edge 0" },
+ { true, 1, MIPI_CSIS_DBG_INTR_SRC_CAM_VSYNC_RISE(1), "VSYNC Rising Edge 1" },
+ { true, 2, MIPI_CSIS_DBG_INTR_SRC_CAM_VSYNC_RISE(2), "VSYNC Rising Edge 2" },
+ { true, 3, MIPI_CSIS_DBG_INTR_SRC_CAM_VSYNC_RISE(3), "VSYNC Rising Edge 3" },
};
-#define MIPI_CSIS_NUM_EVENTS ARRAY_SIZE(mipi_csis_events)
+#define MIPI_CSIS_NUM_EVENTS ARRAY_SIZE(mipi_csis_events)
+#define MIPI_CSIS_NUM_ERROR_EVENTS 38
enum mipi_csis_clk {
MIPI_CSIS_CLK_PCLK,
@@ -297,7 +333,9 @@ struct mipi_csis_device {
struct clk_bulk_data *clks;
struct reset_control *mrst;
struct regulator *mipi_phy_regulator;
+
const struct mipi_csis_info *info;
+ unsigned int num_channels;
struct v4l2_subdev sd;
struct media_pad pads[CSIS_PADS_NUM];
@@ -517,7 +555,7 @@ static void mipi_csis_sw_reset(struct mipi_csis_device *csis)
u32 val = mipi_csis_read(csis, MIPI_CSIS_CMN_CTRL);
mipi_csis_write(csis, MIPI_CSIS_CMN_CTRL,
- val | MIPI_CSIS_CMN_CTRL_RESET);
+ val | MIPI_CSIS_CMN_CTRL_SW_RESET);
usleep_range(10, 20);
}
@@ -527,9 +565,9 @@ static void mipi_csis_system_enable(struct mipi_csis_device *csis, int on)
val = mipi_csis_read(csis, MIPI_CSIS_CMN_CTRL);
if (on)
- val |= MIPI_CSIS_CMN_CTRL_ENABLE;
+ val |= MIPI_CSIS_CMN_CTRL_CSI_EN;
else
- val &= ~MIPI_CSIS_CMN_CTRL_ENABLE;
+ val &= ~MIPI_CSIS_CMN_CTRL_CSI_EN;
mipi_csis_write(csis, MIPI_CSIS_CMN_CTRL, val);
val = mipi_csis_read(csis, MIPI_CSIS_DPHY_CMN_CTRL);
@@ -549,8 +587,8 @@ static void __mipi_csis_set_format(struct mipi_csis_device *csis,
/* Color format */
val = mipi_csis_read(csis, MIPI_CSIS_ISP_CONFIG_CH(0));
- val &= ~(MIPI_CSIS_ISPCFG_ALIGN_32BIT | MIPI_CSIS_ISPCFG_FMT_MASK
- | MIPI_CSIS_ISPCFG_PIXEL_MASK);
+ val &= ~(MIPI_CSIS_ISPCFG_PARALLEL | MIPI_CSIS_ISPCFG_PIXEL_MODE_MASK |
+ MIPI_CSIS_ISPCFG_DATAFORMAT_MASK);
/*
* YUV 4:2:2 can be transferred with 8 or 16 bits per clock sample
@@ -568,24 +606,23 @@ static void __mipi_csis_set_format(struct mipi_csis_device *csis,
if (csis_fmt->data_type == MIPI_CSI2_DT_YUV422_8B)
val |= MIPI_CSIS_ISPCFG_PIXEL_MODE_DUAL;
- val |= MIPI_CSIS_ISPCFG_FMT(csis_fmt->data_type);
+ val |= MIPI_CSIS_ISPCFG_DATAFORMAT(csis_fmt->data_type);
mipi_csis_write(csis, MIPI_CSIS_ISP_CONFIG_CH(0), val);
/* Pixel resolution */
- val = format->width | (format->height << 16);
- mipi_csis_write(csis, MIPI_CSIS_ISP_RESOL_CH(0), val);
+ mipi_csis_write(csis, MIPI_CSIS_ISP_RESOL_CH(0),
+ MIPI_CSIS_ISP_RESOL_VRESOL(format->height) |
+ MIPI_CSIS_ISP_RESOL_HRESOL(format->width));
}
static int mipi_csis_calculate_params(struct mipi_csis_device *csis,
const struct csis_pix_format *csis_fmt)
{
- struct media_pad *src_pad =
- &csis->source.sd->entity.pads[csis->source.pad->index];
s64 link_freq;
u32 lane_rate;
/* Calculate the line rate from the pixel rate. */
- link_freq = v4l2_get_link_freq(src_pad, csis_fmt->width,
+ link_freq = v4l2_get_link_freq(csis->source.pad, csis_fmt->width,
csis->bus.num_data_lanes * 2);
if (link_freq < 0) {
dev_err(csis->dev, "Unable to obtain link frequency: %d\n",
@@ -635,10 +672,10 @@ static void mipi_csis_set_params(struct mipi_csis_device *csis,
u32 val;
val = mipi_csis_read(csis, MIPI_CSIS_CMN_CTRL);
- val &= ~MIPI_CSIS_CMN_CTRL_LANE_NR_MASK;
- val |= (lanes - 1) << MIPI_CSIS_CMN_CTRL_LANE_NR_OFFSET;
+ val &= ~MIPI_CSIS_CMN_CTRL_LANE_NUMBER_MASK;
+ val |= MIPI_CSIS_CMN_CTRL_LANE_NUMBER(lanes - 1);
if (csis->info->version == MIPI_CSIS_V3_3)
- val |= MIPI_CSIS_CMN_CTRL_INTER_MODE;
+ val |= MIPI_CSIS_CMN_CTRL_INTERLEAVE_MODE_DT;
mipi_csis_write(csis, MIPI_CSIS_CMN_CTRL, val);
__mipi_csis_set_format(csis, format, csis_fmt);
@@ -647,15 +684,15 @@ static void mipi_csis_set_params(struct mipi_csis_device *csis,
MIPI_CSIS_DPHY_CMN_CTRL_HSSETTLE(csis->hs_settle) |
MIPI_CSIS_DPHY_CMN_CTRL_CLKSETTLE(csis->clk_settle));
- val = (0 << MIPI_CSIS_ISP_SYNC_HSYNC_LINTV_OFFSET)
- | (0 << MIPI_CSIS_ISP_SYNC_VSYNC_SINTV_OFFSET)
- | (0 << MIPI_CSIS_ISP_SYNC_VSYNC_EINTV_OFFSET);
- mipi_csis_write(csis, MIPI_CSIS_ISP_SYNC_CH(0), val);
+ mipi_csis_write(csis, MIPI_CSIS_ISP_SYNC_CH(0),
+ MIPI_CSIS_ISP_SYNC_HSYNC_LINTV(0) |
+ MIPI_CSIS_ISP_SYNC_VSYNC_SINTV(0) |
+ MIPI_CSIS_ISP_SYNC_VSYNC_EINTV(0));
val = mipi_csis_read(csis, MIPI_CSIS_CLK_CTRL);
- val |= MIPI_CSIS_CLK_CTRL_WCLK_SRC;
- val |= MIPI_CSIS_CLK_CTRL_CLKGATE_TRAIL_CH0(15);
- val &= ~MIPI_CSIS_CLK_CTRL_CLKGATE_EN_MSK;
+ val |= MIPI_CSIS_CLK_CTRL_WCLK_SRC(0);
+ val |= MIPI_CSIS_CLK_CTRL_CLKGATE_TRAIL(0, 15);
+ val &= ~MIPI_CSIS_CLK_CTRL_CLKGATE_EN_MASK;
mipi_csis_write(csis, MIPI_CSIS_CLK_CTRL, val);
mipi_csis_write(csis, MIPI_CSIS_DPHY_BCTRL_L,
@@ -671,7 +708,7 @@ static void mipi_csis_set_params(struct mipi_csis_device *csis,
/* Update the shadow register. */
val = mipi_csis_read(csis, MIPI_CSIS_CMN_CTRL);
mipi_csis_write(csis, MIPI_CSIS_CMN_CTRL,
- val | MIPI_CSIS_CMN_CTRL_UPDATE_SHADOW |
+ val | MIPI_CSIS_CMN_CTRL_UPDATE_SHADOW(0) |
MIPI_CSIS_CMN_CTRL_UPDATE_SHADOW_CTRL);
}
@@ -704,12 +741,17 @@ static int mipi_csis_clk_get(struct mipi_csis_device *csis)
if (ret < 0)
return ret;
- /* Set clock rate */
- ret = clk_set_rate(csis->clks[MIPI_CSIS_CLK_WRAP].clk,
- csis->clk_frequency);
- if (ret < 0)
- dev_err(csis->dev, "set rate=%d failed: %d\n",
- csis->clk_frequency, ret);
+ if (csis->clk_frequency) {
+ /*
+ * Set the clock rate. This is deprecated, for backward
+ * compatibility with old device trees.
+ */
+ ret = clk_set_rate(csis->clks[MIPI_CSIS_CLK_WRAP].clk,
+ csis->clk_frequency);
+ if (ret < 0)
+ dev_err(csis->dev, "set rate=%d failed: %d\n",
+ csis->clk_frequency, ret);
+ }
return ret;
}
@@ -757,16 +799,19 @@ static irqreturn_t mipi_csis_irq_handler(int irq, void *dev_id)
/* Update the event/error counters */
if ((status & MIPI_CSIS_INT_SRC_ERRORS) || csis->debug.enable) {
- for (i = 0; i < MIPI_CSIS_NUM_EVENTS; i++) {
+ for (i = 0; i < ARRAY_SIZE(csis->events); i++) {
struct mipi_csis_event *event = &csis->events[i];
+ if (event->channel >= csis->num_channels)
+ continue;
+
if ((!event->debug && (status & event->mask)) ||
(event->debug && (dbg_status & event->mask)))
event->counter++;
}
}
- if (status & MIPI_CSIS_INT_SRC_FRAME_START)
+ if (status & MIPI_CSIS_INT_SRC_FRAME_START(0))
mipi_csis_queue_event_sof(csis);
spin_unlock_irqrestore(&csis->slock, flags);
@@ -843,7 +888,7 @@ static void mipi_csis_clear_counters(struct mipi_csis_device *csis)
static void mipi_csis_log_counters(struct mipi_csis_device *csis, bool non_errors)
{
unsigned int num_events = non_errors ? MIPI_CSIS_NUM_EVENTS
- : MIPI_CSIS_NUM_EVENTS - 8;
+ : MIPI_CSIS_NUM_ERROR_EVENTS;
unsigned int counters[MIPI_CSIS_NUM_EVENTS];
unsigned long flags;
unsigned int i;
@@ -854,45 +899,67 @@ static void mipi_csis_log_counters(struct mipi_csis_device *csis, bool non_error
spin_unlock_irqrestore(&csis->slock, flags);
for (i = 0; i < num_events; ++i) {
+ const struct mipi_csis_event *event = &csis->events[i];
+
+ if (event->channel >= csis->num_channels)
+ continue;
+
if (counters[i] > 0 || csis->debug.enable)
dev_info(csis->dev, "%s events: %d\n",
- csis->events[i].name,
- counters[i]);
+ event->name, counters[i]);
}
}
+struct mipi_csis_reg_info {
+ u32 addr;
+ unsigned int offset;
+ const char * const name;
+};
+
+static void mipi_csis_dump_channel_reg(struct mipi_csis_device *csis,
+ const struct mipi_csis_reg_info *reg,
+ unsigned int channel)
+{
+ dev_info(csis->dev, "%16s%u: 0x%08x\n", reg->name, channel,
+ mipi_csis_read(csis, reg->addr + channel * reg->offset));
+}
+
static int mipi_csis_dump_regs(struct mipi_csis_device *csis)
{
- static const struct {
- u32 offset;
- const char * const name;
- } registers[] = {
- { MIPI_CSIS_CMN_CTRL, "CMN_CTRL" },
- { MIPI_CSIS_CLK_CTRL, "CLK_CTRL" },
- { MIPI_CSIS_INT_MSK, "INT_MSK" },
- { MIPI_CSIS_DPHY_STATUS, "DPHY_STATUS" },
- { MIPI_CSIS_DPHY_CMN_CTRL, "DPHY_CMN_CTRL" },
- { MIPI_CSIS_DPHY_SCTRL_L, "DPHY_SCTRL_L" },
- { MIPI_CSIS_DPHY_SCTRL_H, "DPHY_SCTRL_H" },
- { MIPI_CSIS_ISP_CONFIG_CH(0), "ISP_CONFIG_CH0" },
- { MIPI_CSIS_ISP_RESOL_CH(0), "ISP_RESOL_CH0" },
- { MIPI_CSIS_SDW_CONFIG_CH(0), "SDW_CONFIG_CH0" },
- { MIPI_CSIS_SDW_RESOL_CH(0), "SDW_RESOL_CH0" },
- { MIPI_CSIS_DBG_CTRL, "DBG_CTRL" },
- { MIPI_CSIS_FRAME_COUNTER_CH(0), "FRAME_COUNTER_CH0" },
+ static const struct mipi_csis_reg_info common_registers[] = {
+ { MIPI_CSIS_CMN_CTRL, 0, "CMN_CTRL" },
+ { MIPI_CSIS_CLK_CTRL, 0, "CLK_CTRL" },
+ { MIPI_CSIS_INT_MSK, 0, "INT_MSK" },
+ { MIPI_CSIS_DPHY_STATUS, 0, "DPHY_STATUS" },
+ { MIPI_CSIS_DPHY_CMN_CTRL, 0, "DPHY_CMN_CTRL" },
+ { MIPI_CSIS_DPHY_SCTRL_L, 0, "DPHY_SCTRL_L" },
+ { MIPI_CSIS_DPHY_SCTRL_H, 0, "DPHY_SCTRL_H" },
+ { MIPI_CSIS_DBG_CTRL, 0, "DBG_CTRL" },
+ };
+ static const struct mipi_csis_reg_info channel_registers[] = {
+ { MIPI_CSIS_ISP_CONFIG_CH(0), 0x10, "ISP_CONFIG_CH" },
+ { MIPI_CSIS_ISP_RESOL_CH(0), 0x10, "ISP_RESOL_CH" },
+ { MIPI_CSIS_SDW_CONFIG_CH(0), 0x10, "SDW_CONFIG_CH" },
+ { MIPI_CSIS_SDW_RESOL_CH(0), 0x10, "SDW_RESOL_CH" },
+ { MIPI_CSIS_FRAME_COUNTER_CH(0), 4, "FRAME_COUNTER_CH" },
};
-
- unsigned int i;
- u32 cfg;
if (!pm_runtime_get_if_in_use(csis->dev))
return 0;
dev_info(csis->dev, "--- REGISTERS ---\n");
- for (i = 0; i < ARRAY_SIZE(registers); i++) {
- cfg = mipi_csis_read(csis, registers[i].offset);
- dev_info(csis->dev, "%14s: 0x%08x\n", registers[i].name, cfg);
+ for (unsigned int i = 0; i < ARRAY_SIZE(common_registers); i++) {
+ const struct mipi_csis_reg_info *reg = &common_registers[i];
+
+ dev_info(csis->dev, "%17s: 0x%08x\n", reg->name,
+ mipi_csis_read(csis, reg->addr));
+ }
+
+ for (unsigned int chan = 0; chan < csis->num_channels; chan++) {
+ for (unsigned int i = 0; i < ARRAY_SIZE(channel_registers); ++i)
+ mipi_csis_dump_channel_reg(csis, &channel_registers[i],
+ chan);
}
pm_runtime_put(csis->dev);
@@ -998,7 +1065,7 @@ err_unlock:
}
static int mipi_csis_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_state *state,
struct v4l2_subdev_mbus_code_enum *code)
{
/*
@@ -1011,7 +1078,7 @@ static int mipi_csis_enum_mbus_code(struct v4l2_subdev *sd,
if (code->index > 0)
return -EINVAL;
- fmt = v4l2_subdev_state_get_format(sd_state, code->pad);
+ fmt = v4l2_subdev_state_get_format(state, code->pad);
code->code = fmt->code;
return 0;
}
@@ -1028,10 +1095,10 @@ static int mipi_csis_enum_mbus_code(struct v4l2_subdev *sd,
}
static int mipi_csis_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_state *state,
struct v4l2_subdev_format *sdformat)
{
- struct csis_pix_format const *csis_fmt;
+ const struct csis_pix_format *csis_fmt;
struct v4l2_mbus_framefmt *fmt;
unsigned int align;
@@ -1040,7 +1107,7 @@ static int mipi_csis_set_fmt(struct v4l2_subdev *sd,
* modified.
*/
if (sdformat->pad == CSIS_PAD_SOURCE)
- return v4l2_subdev_get_fmt(sd, sd_state, sdformat);
+ return v4l2_subdev_get_fmt(sd, state, sdformat);
if (sdformat->pad != CSIS_PAD_SINK)
return -EINVAL;
@@ -1078,7 +1145,7 @@ static int mipi_csis_set_fmt(struct v4l2_subdev *sd,
&sdformat->format.height, 1,
CSIS_MAX_PIX_HEIGHT, 0, 0);
- fmt = v4l2_subdev_state_get_format(sd_state, sdformat->pad);
+ fmt = v4l2_subdev_state_get_format(state, sdformat->pad);
fmt->code = csis_fmt->code;
fmt->width = sdformat->format.width;
@@ -1092,7 +1159,7 @@ static int mipi_csis_set_fmt(struct v4l2_subdev *sd,
sdformat->format = *fmt;
/* Propagate the format from sink to source. */
- fmt = v4l2_subdev_state_get_format(sd_state, CSIS_PAD_SOURCE);
+ fmt = v4l2_subdev_state_get_format(state, CSIS_PAD_SOURCE);
*fmt = sdformat->format;
/* The format on the source pad might change due to unpacking. */
@@ -1132,7 +1199,7 @@ static int mipi_csis_get_frame_desc(struct v4l2_subdev *sd, unsigned int pad,
}
static int mipi_csis_init_state(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state)
+ struct v4l2_subdev_state *state)
{
struct v4l2_subdev_format fmt = {
.pad = CSIS_PAD_SINK,
@@ -1149,7 +1216,7 @@ static int mipi_csis_init_state(struct v4l2_subdev *sd,
V4L2_MAP_QUANTIZATION_DEFAULT(false, fmt.format.colorspace,
fmt.format.ycbcr_enc);
- return mipi_csis_set_fmt(sd, sd_state, &fmt);
+ return mipi_csis_set_fmt(sd, state, &fmt);
}
static int mipi_csis_log_status(struct v4l2_subdev *sd)
@@ -1413,9 +1480,13 @@ static int mipi_csis_parse_dt(struct mipi_csis_device *csis)
{
struct device_node *node = csis->dev->of_node;
- if (of_property_read_u32(node, "clock-frequency",
- &csis->clk_frequency))
- csis->clk_frequency = DEFAULT_SCLK_CSIS_FREQ;
+ of_property_read_u32(node, "clock-frequency", &csis->clk_frequency);
+
+ csis->num_channels = 1;
+ of_property_read_u32(node, "fsl,num-channels", &csis->num_channels);
+ if (csis->num_channels < 1 || csis->num_channels > MIPI_CSIS_MAX_CHANNELS)
+ return dev_err_probe(csis->dev, -EINVAL,
+ "Invalid fsl,num-channels value\n");
return 0;
}
@@ -1440,10 +1511,8 @@ static int mipi_csis_probe(struct platform_device *pdev)
/* Parse DT properties. */
ret = mipi_csis_parse_dt(csis);
- if (ret < 0) {
- dev_err(dev, "Failed to parse device tree: %d\n", ret);
+ if (ret < 0)
return ret;
- }
/* Acquire resources. */
csis->regs = devm_platform_ioremap_resource(pdev, 0);
diff --git a/drivers/media/platform/nxp/imx-pxp.c b/drivers/media/platform/nxp/imx-pxp.c
index 7f8ffbac582f..6cc9b07ea53a 100644
--- a/drivers/media/platform/nxp/imx-pxp.c
+++ b/drivers/media/platform/nxp/imx-pxp.c
@@ -248,7 +248,7 @@ struct pxp_ctx {
static inline struct pxp_ctx *file2ctx(struct file *file)
{
- return container_of(file->private_data, struct pxp_ctx, fh);
+ return container_of(file_to_v4l2_fh(file), struct pxp_ctx, fh);
}
static struct pxp_q_data *get_q_data(struct pxp_ctx *ctx,
@@ -1660,7 +1660,6 @@ static int pxp_open(struct file *file)
}
v4l2_fh_init(&ctx->fh, video_devdata(file));
- file->private_data = &ctx->fh;
ctx->dev = dev;
hdl = &ctx->hdl;
v4l2_ctrl_handler_init(hdl, 4);
@@ -1699,7 +1698,7 @@ static int pxp_open(struct file *file)
goto open_unlock;
}
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
atomic_inc(&dev->num_inst);
dprintk(dev, "Created instance: %p, m2m_ctx: %p\n",
@@ -1717,7 +1716,7 @@ static int pxp_release(struct file *file)
dprintk(dev, "Releasing instance %p\n", ctx);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
v4l2_ctrl_handler_free(&ctx->hdl);
mutex_lock(&dev->dev_mutex);
diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c b/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c
index 981648a03113..adc8d9960bf0 100644
--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c
+++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c
@@ -374,6 +374,8 @@ static int mxc_isi_pm_suspend(struct device *dev)
mxc_isi_video_suspend(pipe);
}
+ mxc_isi_m2m_suspend(&isi->m2m);
+
return pm_runtime_force_suspend(dev);
}
@@ -403,6 +405,12 @@ static int mxc_isi_pm_resume(struct device *dev)
}
}
+ ret = mxc_isi_m2m_resume(&isi->m2m);
+ if (ret) {
+ dev_err(dev, "Failed to resume ISI (%d) for m2m\n", ret);
+ err = ret;
+ }
+
return err;
}
diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.h b/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.h
index 206995bedca4..e84af5127e4e 100644
--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.h
+++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.h
@@ -202,9 +202,8 @@ struct mxc_isi_video {
struct video_device vdev;
struct media_pad pad;
- /* Protects is_streaming, and the vdev and vb2_q operations */
+ /* Protects the vdev and vb2_q operations */
struct mutex lock;
- bool is_streaming;
struct v4l2_pix_format_mplane pix;
const struct mxc_isi_format_info *fmtinfo;
@@ -343,6 +342,8 @@ int mxc_isi_video_buffer_prepare(struct mxc_isi_dev *isi, struct vb2_buffer *vb2
#ifdef CONFIG_VIDEO_IMX8_ISI_M2M
int mxc_isi_m2m_register(struct mxc_isi_dev *isi, struct v4l2_device *v4l2_dev);
int mxc_isi_m2m_unregister(struct mxc_isi_dev *isi);
+void mxc_isi_m2m_suspend(struct mxc_isi_m2m *m2m);
+int mxc_isi_m2m_resume(struct mxc_isi_m2m *m2m);
#else
static inline int mxc_isi_m2m_register(struct mxc_isi_dev *isi,
struct v4l2_device *v4l2_dev)
@@ -353,6 +354,13 @@ static inline int mxc_isi_m2m_unregister(struct mxc_isi_dev *isi)
{
return 0;
}
+static inline void mxc_isi_m2m_suspend(struct mxc_isi_m2m *m2m)
+{
+}
+static inline int mxc_isi_m2m_resume(struct mxc_isi_m2m *m2m)
+{
+ return 0;
+}
#endif
int mxc_isi_channel_acquire(struct mxc_isi_pipe *pipe,
@@ -362,7 +370,7 @@ void mxc_isi_channel_get(struct mxc_isi_pipe *pipe);
void mxc_isi_channel_put(struct mxc_isi_pipe *pipe);
void mxc_isi_channel_enable(struct mxc_isi_pipe *pipe);
void mxc_isi_channel_disable(struct mxc_isi_pipe *pipe);
-int mxc_isi_channel_chain(struct mxc_isi_pipe *pipe, bool bypass);
+int mxc_isi_channel_chain(struct mxc_isi_pipe *pipe);
void mxc_isi_channel_unchain(struct mxc_isi_pipe *pipe);
void mxc_isi_channel_config(struct mxc_isi_pipe *pipe,
diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-hw.c b/drivers/media/platform/nxp/imx8-isi/imx8-isi-hw.c
index 5623914f95e6..9225a7ac1c3e 100644
--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-hw.c
+++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-hw.c
@@ -587,7 +587,7 @@ void mxc_isi_channel_release(struct mxc_isi_pipe *pipe)
*
* TODO: Support secondary line buffer for downscaling YUV420 images.
*/
-int mxc_isi_channel_chain(struct mxc_isi_pipe *pipe, bool bypass)
+int mxc_isi_channel_chain(struct mxc_isi_pipe *pipe)
{
/* Channel chaining requires both line and output buffer. */
const u8 resources = MXC_ISI_CHANNEL_RES_OUTPUT_BUF
diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c b/drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c
index 22e49d3a1287..00afcbfbdde4 100644
--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c
+++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c
@@ -43,7 +43,6 @@ struct mxc_isi_m2m_ctx_queue_data {
struct v4l2_pix_format_mplane format;
const struct mxc_isi_format_info *info;
u32 sequence;
- bool streaming;
};
struct mxc_isi_m2m_ctx {
@@ -74,9 +73,9 @@ to_isi_m2m_buffer(struct vb2_v4l2_buffer *buf)
return container_of(buf, struct mxc_isi_m2m_buffer, buf.vb);
}
-static inline struct mxc_isi_m2m_ctx *to_isi_m2m_ctx(struct v4l2_fh *fh)
+static inline struct mxc_isi_m2m_ctx *file_to_isi_m2m_ctx(struct file *filp)
{
- return container_of(fh, struct mxc_isi_m2m_ctx, fh);
+ return container_of(file_to_v4l2_fh(filp), struct mxc_isi_m2m_ctx, fh);
}
static inline struct mxc_isi_m2m_ctx_queue_data *
@@ -236,6 +235,70 @@ static void mxc_isi_m2m_vb2_buffer_queue(struct vb2_buffer *vb2)
v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
}
+static int mxc_isi_m2m_vb2_prepare_streaming(struct vb2_queue *q)
+{
+ struct mxc_isi_m2m_ctx *ctx = vb2_get_drv_priv(q);
+ const struct v4l2_pix_format_mplane *out_pix = &ctx->queues.out.format;
+ const struct v4l2_pix_format_mplane *cap_pix = &ctx->queues.cap.format;
+ const struct mxc_isi_format_info *cap_info = ctx->queues.cap.info;
+ const struct mxc_isi_format_info *out_info = ctx->queues.out.info;
+ struct mxc_isi_m2m *m2m = ctx->m2m;
+ int ret;
+
+ guard(mutex)(&m2m->lock);
+
+ if (m2m->usage_count == INT_MAX)
+ return -EOVERFLOW;
+
+ ret = pm_runtime_resume_and_get(m2m->isi->dev);
+ if (ret)
+ return ret;
+
+ /*
+ * Acquire the pipe and initialize the channel with the first user of
+ * the M2M device.
+ */
+ if (m2m->usage_count == 0) {
+ bool bypass = cap_pix->width == out_pix->width &&
+ cap_pix->height == out_pix->height &&
+ cap_info->encoding == out_info->encoding;
+
+ ret = mxc_isi_channel_acquire(m2m->pipe,
+ &mxc_isi_m2m_frame_write_done,
+ bypass);
+ if (ret)
+ goto err_pm;
+
+ mxc_isi_channel_get(m2m->pipe);
+ }
+
+ m2m->usage_count++;
+
+ /*
+ * Allocate resources for the channel, counting how many users require
+ * buffer chaining.
+ */
+ if (!ctx->chained && out_pix->width > MXC_ISI_MAX_WIDTH_UNCHAINED) {
+ ret = mxc_isi_channel_chain(m2m->pipe);
+ if (ret)
+ goto err_deinit;
+
+ m2m->chained_count++;
+ ctx->chained = true;
+ }
+
+ return 0;
+
+err_deinit:
+ if (--m2m->usage_count == 0) {
+ mxc_isi_channel_put(m2m->pipe);
+ mxc_isi_channel_release(m2m->pipe);
+ }
+err_pm:
+ pm_runtime_put(m2m->isi->dev);
+ return ret;
+}
+
static int mxc_isi_m2m_vb2_start_streaming(struct vb2_queue *q,
unsigned int count)
{
@@ -265,13 +328,46 @@ static void mxc_isi_m2m_vb2_stop_streaming(struct vb2_queue *q)
}
}
+static void mxc_isi_m2m_vb2_unprepare_streaming(struct vb2_queue *q)
+{
+ struct mxc_isi_m2m_ctx *ctx = vb2_get_drv_priv(q);
+ struct mxc_isi_m2m *m2m = ctx->m2m;
+
+ guard(mutex)(&m2m->lock);
+
+ /*
+ * If the last context is this one, reset it to make sure the device
+ * will be reconfigured when streaming is restarted.
+ */
+ if (m2m->last_ctx == ctx)
+ m2m->last_ctx = NULL;
+
+ /* Free the channel resources if this is the last chained context. */
+ if (ctx->chained && --m2m->chained_count == 0)
+ mxc_isi_channel_unchain(m2m->pipe);
+ ctx->chained = false;
+
+ /* Turn off the light with the last user. */
+ if (--m2m->usage_count == 0) {
+ mxc_isi_channel_disable(m2m->pipe);
+ mxc_isi_channel_put(m2m->pipe);
+ mxc_isi_channel_release(m2m->pipe);
+ }
+
+ WARN_ON(m2m->usage_count < 0);
+
+ pm_runtime_put(m2m->isi->dev);
+}
+
static const struct vb2_ops mxc_isi_m2m_vb2_qops = {
.queue_setup = mxc_isi_m2m_vb2_queue_setup,
.buf_init = mxc_isi_m2m_vb2_buffer_init,
.buf_prepare = mxc_isi_m2m_vb2_buffer_prepare,
.buf_queue = mxc_isi_m2m_vb2_buffer_queue,
+ .prepare_streaming = mxc_isi_m2m_vb2_prepare_streaming,
.start_streaming = mxc_isi_m2m_vb2_start_streaming,
.stop_streaming = mxc_isi_m2m_vb2_stop_streaming,
+ .unprepare_streaming = mxc_isi_m2m_vb2_unprepare_streaming,
};
static int mxc_isi_m2m_queue_init(void *priv, struct vb2_queue *src_vq,
@@ -427,7 +523,7 @@ static int mxc_isi_m2m_try_fmt_vid(struct file *file, void *fh,
const enum mxc_isi_video_type type =
f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ?
MXC_ISI_VIDEO_M2M_OUT : MXC_ISI_VIDEO_M2M_CAP;
- struct mxc_isi_m2m_ctx *ctx = to_isi_m2m_ctx(fh);
+ struct mxc_isi_m2m_ctx *ctx = file_to_isi_m2m_ctx(file);
__mxc_isi_m2m_try_fmt_vid(ctx, &f->fmt.pix_mp, type);
@@ -437,7 +533,7 @@ static int mxc_isi_m2m_try_fmt_vid(struct file *file, void *fh,
static int mxc_isi_m2m_g_fmt_vid(struct file *file, void *fh,
struct v4l2_format *f)
{
- struct mxc_isi_m2m_ctx *ctx = to_isi_m2m_ctx(fh);
+ struct mxc_isi_m2m_ctx *ctx = file_to_isi_m2m_ctx(file);
const struct mxc_isi_m2m_ctx_queue_data *qdata =
mxc_isi_m2m_ctx_qdata(ctx, f->type);
@@ -452,7 +548,7 @@ static int mxc_isi_m2m_s_fmt_vid(struct file *file, void *fh,
const enum mxc_isi_video_type type =
f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ?
MXC_ISI_VIDEO_M2M_OUT : MXC_ISI_VIDEO_M2M_CAP;
- struct mxc_isi_m2m_ctx *ctx = to_isi_m2m_ctx(fh);
+ struct mxc_isi_m2m_ctx *ctx = file_to_isi_m2m_ctx(file);
struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
const struct mxc_isi_format_info *info;
struct vb2_queue *vq;
@@ -481,136 +577,6 @@ static int mxc_isi_m2m_s_fmt_vid(struct file *file, void *fh,
return 0;
}
-static int mxc_isi_m2m_streamon(struct file *file, void *fh,
- enum v4l2_buf_type type)
-{
- struct mxc_isi_m2m_ctx *ctx = to_isi_m2m_ctx(fh);
- struct mxc_isi_m2m_ctx_queue_data *q = mxc_isi_m2m_ctx_qdata(ctx, type);
- const struct v4l2_pix_format_mplane *out_pix = &ctx->queues.out.format;
- const struct v4l2_pix_format_mplane *cap_pix = &ctx->queues.cap.format;
- const struct mxc_isi_format_info *cap_info = ctx->queues.cap.info;
- const struct mxc_isi_format_info *out_info = ctx->queues.out.info;
- struct mxc_isi_m2m *m2m = ctx->m2m;
- bool bypass;
- int ret;
-
- if (q->streaming)
- return 0;
-
- mutex_lock(&m2m->lock);
-
- if (m2m->usage_count == INT_MAX) {
- ret = -EOVERFLOW;
- goto unlock;
- }
-
- bypass = cap_pix->width == out_pix->width &&
- cap_pix->height == out_pix->height &&
- cap_info->encoding == out_info->encoding;
-
- /*
- * Acquire the pipe and initialize the channel with the first user of
- * the M2M device.
- */
- if (m2m->usage_count == 0) {
- ret = mxc_isi_channel_acquire(m2m->pipe,
- &mxc_isi_m2m_frame_write_done,
- bypass);
- if (ret)
- goto unlock;
-
- mxc_isi_channel_get(m2m->pipe);
- }
-
- m2m->usage_count++;
-
- /*
- * Allocate resources for the channel, counting how many users require
- * buffer chaining.
- */
- if (!ctx->chained && out_pix->width > MXC_ISI_MAX_WIDTH_UNCHAINED) {
- ret = mxc_isi_channel_chain(m2m->pipe, bypass);
- if (ret)
- goto deinit;
-
- m2m->chained_count++;
- ctx->chained = true;
- }
-
- /*
- * Drop the lock to start the stream, as the .device_run() operation
- * needs to acquire it.
- */
- mutex_unlock(&m2m->lock);
- ret = v4l2_m2m_ioctl_streamon(file, fh, type);
- if (ret) {
- /* Reacquire the lock for the cleanup path. */
- mutex_lock(&m2m->lock);
- goto unchain;
- }
-
- q->streaming = true;
-
- return 0;
-
-unchain:
- if (ctx->chained && --m2m->chained_count == 0)
- mxc_isi_channel_unchain(m2m->pipe);
- ctx->chained = false;
-
-deinit:
- if (--m2m->usage_count == 0) {
- mxc_isi_channel_put(m2m->pipe);
- mxc_isi_channel_release(m2m->pipe);
- }
-
-unlock:
- mutex_unlock(&m2m->lock);
- return ret;
-}
-
-static int mxc_isi_m2m_streamoff(struct file *file, void *fh,
- enum v4l2_buf_type type)
-{
- struct mxc_isi_m2m_ctx *ctx = to_isi_m2m_ctx(fh);
- struct mxc_isi_m2m_ctx_queue_data *q = mxc_isi_m2m_ctx_qdata(ctx, type);
- struct mxc_isi_m2m *m2m = ctx->m2m;
-
- v4l2_m2m_ioctl_streamoff(file, fh, type);
-
- if (!q->streaming)
- return 0;
-
- mutex_lock(&m2m->lock);
-
- /*
- * If the last context is this one, reset it to make sure the device
- * will be reconfigured when streaming is restarted.
- */
- if (m2m->last_ctx == ctx)
- m2m->last_ctx = NULL;
-
- /* Free the channel resources if this is the last chained context. */
- if (ctx->chained && --m2m->chained_count == 0)
- mxc_isi_channel_unchain(m2m->pipe);
- ctx->chained = false;
-
- /* Turn off the light with the last user. */
- if (--m2m->usage_count == 0) {
- mxc_isi_channel_disable(m2m->pipe);
- mxc_isi_channel_put(m2m->pipe);
- mxc_isi_channel_release(m2m->pipe);
- }
-
- WARN_ON(m2m->usage_count < 0);
-
- mutex_unlock(&m2m->lock);
-
- q->streaming = false;
-
- return 0;
-}
-
static const struct v4l2_ioctl_ops mxc_isi_m2m_ioctl_ops = {
.vidioc_querycap = mxc_isi_m2m_querycap,
@@ -631,8 +597,8 @@ static const struct v4l2_ioctl_ops mxc_isi_m2m_ioctl_ops = {
.vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
.vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
- .vidioc_streamon = mxc_isi_m2m_streamon,
- .vidioc_streamoff = mxc_isi_m2m_streamoff,
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
.vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
@@ -668,7 +634,6 @@ static int mxc_isi_m2m_open(struct file *file)
mutex_init(&ctx->vb2_lock);
v4l2_fh_init(&ctx->fh, vdev);
- file->private_data = &ctx->fh;
ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(m2m->m2m_dev, ctx,
&mxc_isi_m2m_queue_init);
@@ -685,16 +650,10 @@ static int mxc_isi_m2m_open(struct file *file)
if (ret)
goto err_ctx;
- ret = pm_runtime_resume_and_get(m2m->isi->dev);
- if (ret)
- goto err_ctrls;
-
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
return 0;
-err_ctrls:
- mxc_isi_m2m_ctx_ctrls_delete(ctx);
err_ctx:
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
err_fh:
@@ -706,20 +665,17 @@ err_fh:
static int mxc_isi_m2m_release(struct file *file)
{
- struct mxc_isi_m2m *m2m = video_drvdata(file);
- struct mxc_isi_m2m_ctx *ctx = to_isi_m2m_ctx(file->private_data);
+ struct mxc_isi_m2m_ctx *ctx = file_to_isi_m2m_ctx(file);
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
mxc_isi_m2m_ctx_ctrls_delete(ctx);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
mutex_destroy(&ctx->vb2_lock);
kfree(ctx);
- pm_runtime_put(m2m->isi->dev);
-
return 0;
}
@@ -733,6 +689,40 @@ static const struct v4l2_file_operations mxc_isi_m2m_fops = {
};
/* -----------------------------------------------------------------------------
+ * Suspend & resume
+ */
+
+void mxc_isi_m2m_suspend(struct mxc_isi_m2m *m2m)
+{
+ if (m2m->usage_count == 0)
+ return;
+
+ v4l2_m2m_suspend(m2m->m2m_dev);
+
+ if (m2m->chained_count > 0)
+ mxc_isi_channel_unchain(m2m->pipe);
+
+ mxc_isi_channel_disable(m2m->pipe);
+ mxc_isi_channel_put(m2m->pipe);
+}
+
+int mxc_isi_m2m_resume(struct mxc_isi_m2m *m2m)
+{
+ if (m2m->usage_count == 0)
+ return 0;
+
+ mxc_isi_channel_get(m2m->pipe);
+
+ if (m2m->chained_count > 0)
+ mxc_isi_channel_chain(m2m->pipe);
+
+ m2m->last_ctx = NULL;
+ v4l2_m2m_resume(m2m->m2m_dev);
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
* Registration
*/
diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-pipe.c b/drivers/media/platform/nxp/imx8-isi/imx8-isi-pipe.c
index d76eb58deb09..a41c51dd9ce0 100644
--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-pipe.c
+++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-pipe.c
@@ -855,7 +855,7 @@ int mxc_isi_pipe_acquire(struct mxc_isi_pipe *pipe,
/* Chain the channel if needed for wide resolutions. */
if (sink_fmt->width > MXC_ISI_MAX_WIDTH_UNCHAINED) {
- ret = mxc_isi_channel_chain(pipe, bypass);
+ ret = mxc_isi_channel_chain(pipe);
if (ret)
mxc_isi_channel_release(pipe);
}
diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c b/drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c
index 8654150728a8..13682bf6e9f8 100644
--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c
+++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c
@@ -937,6 +937,47 @@ static void mxc_isi_video_init_channel(struct mxc_isi_video *video)
mxc_isi_channel_set_output_format(pipe, video->fmtinfo, &video->pix);
}
+static int mxc_isi_vb2_prepare_streaming(struct vb2_queue *q)
+{
+ struct mxc_isi_video *video = vb2_get_drv_priv(q);
+ struct media_device *mdev = &video->pipe->isi->media_dev;
+ struct media_pipeline *pipe;
+ int ret;
+
+ /* Get a pipeline for the video node and start it. */
+ scoped_guard(mutex, &mdev->graph_mutex) {
+ ret = mxc_isi_pipe_acquire(video->pipe,
+ &mxc_isi_video_frame_write_done);
+ if (ret)
+ return ret;
+
+ pipe = media_entity_pipeline(&video->vdev.entity)
+ ? : &video->pipe->pipe;
+
+ ret = __video_device_pipeline_start(&video->vdev, pipe);
+ if (ret)
+ goto err_release;
+ }
+
+ /* Verify that the video format matches the output of the subdev. */
+ ret = mxc_isi_video_validate_format(video);
+ if (ret)
+ goto err_stop;
+
+ /* Allocate buffers for discard operation. */
+ ret = mxc_isi_video_alloc_discard_buffers(video);
+ if (ret)
+ goto err_stop;
+
+ return 0;
+
+err_stop:
+ video_device_pipeline_stop(&video->vdev);
+err_release:
+ mxc_isi_pipe_release(video->pipe);
+ return ret;
+}
+
static int mxc_isi_vb2_start_streaming(struct vb2_queue *q, unsigned int count)
{
struct mxc_isi_video *video = vb2_get_drv_priv(q);
@@ -985,13 +1026,24 @@ static void mxc_isi_vb2_stop_streaming(struct vb2_queue *q)
mxc_isi_video_return_buffers(video, VB2_BUF_STATE_ERROR);
}
+static void mxc_isi_vb2_unprepare_streaming(struct vb2_queue *q)
+{
+ struct mxc_isi_video *video = vb2_get_drv_priv(q);
+
+ mxc_isi_video_free_discard_buffers(video);
+ video_device_pipeline_stop(&video->vdev);
+ mxc_isi_pipe_release(video->pipe);
+}
+
static const struct vb2_ops mxc_isi_vb2_qops = {
.queue_setup = mxc_isi_vb2_queue_setup,
.buf_init = mxc_isi_vb2_buffer_init,
.buf_prepare = mxc_isi_vb2_buffer_prepare,
.buf_queue = mxc_isi_vb2_buffer_queue,
+ .prepare_streaming = mxc_isi_vb2_prepare_streaming,
.start_streaming = mxc_isi_vb2_start_streaming,
.stop_streaming = mxc_isi_vb2_stop_streaming,
+ .unprepare_streaming = mxc_isi_vb2_unprepare_streaming,
};
/* -----------------------------------------------------------------------------
@@ -1145,97 +1197,6 @@ static int mxc_isi_video_s_fmt(struct file *file, void *priv,
return 0;
}
-static int mxc_isi_video_streamon(struct file *file, void *priv,
- enum v4l2_buf_type type)
-{
- struct mxc_isi_video *video = video_drvdata(file);
- struct media_device *mdev = &video->pipe->isi->media_dev;
- struct media_pipeline *pipe;
- int ret;
-
- if (vb2_queue_is_busy(&video->vb2_q, file))
- return -EBUSY;
-
- /*
- * Get a pipeline for the video node and start it. This must be done
- * here and not in the queue .start_streaming() handler, so that
- * pipeline start errors can be reported from VIDIOC_STREAMON and not
- * delayed until subsequent VIDIOC_QBUF calls.
- */
- mutex_lock(&mdev->graph_mutex);
-
- ret = mxc_isi_pipe_acquire(video->pipe, &mxc_isi_video_frame_write_done);
- if (ret) {
- mutex_unlock(&mdev->graph_mutex);
- return ret;
- }
-
- pipe = media_entity_pipeline(&video->vdev.entity) ? : &video->pipe->pipe;
-
- ret = __video_device_pipeline_start(&video->vdev, pipe);
- if (ret) {
- mutex_unlock(&mdev->graph_mutex);
- goto err_release;
- }
-
- mutex_unlock(&mdev->graph_mutex);
-
- /* Verify that the video format matches the output of the subdev. */
- ret = mxc_isi_video_validate_format(video);
- if (ret)
- goto err_stop;
-
- /* Allocate buffers for discard operation. */
- ret = mxc_isi_video_alloc_discard_buffers(video);
- if (ret)
- goto err_stop;
-
- ret = vb2_streamon(&video->vb2_q, type);
- if (ret)
- goto err_free;
-
- video->is_streaming = true;
-
- return 0;
-
-err_free:
- mxc_isi_video_free_discard_buffers(video);
-err_stop:
- video_device_pipeline_stop(&video->vdev);
-err_release:
- mxc_isi_pipe_release(video->pipe);
- return ret;
-}
-
-static void mxc_isi_video_cleanup_streaming(struct mxc_isi_video *video)
-{
- lockdep_assert_held(&video->lock);
-
- if (!video->is_streaming)
- return;
-
- mxc_isi_video_free_discard_buffers(video);
- video_device_pipeline_stop(&video->vdev);
- mxc_isi_pipe_release(video->pipe);
-
- video->is_streaming = false;
-}
-
-static int mxc_isi_video_streamoff(struct file *file, void *priv,
- enum v4l2_buf_type type)
-{
- struct mxc_isi_video *video = video_drvdata(file);
- int ret;
-
- ret = vb2_ioctl_streamoff(file, priv, type);
- if (ret)
- return ret;
-
- mxc_isi_video_cleanup_streaming(video);
-
- return 0;
-}
-
static int mxc_isi_video_enum_framesizes(struct file *file, void *priv,
struct v4l2_frmsizeenum *fsize)
{
@@ -1291,9 +1252,8 @@ static const struct v4l2_ioctl_ops mxc_isi_video_ioctl_ops = {
.vidioc_expbuf = vb2_ioctl_expbuf,
.vidioc_prepare_buf = vb2_ioctl_prepare_buf,
.vidioc_create_bufs = vb2_ioctl_create_bufs,
-
- .vidioc_streamon = mxc_isi_video_streamon,
- .vidioc_streamoff = mxc_isi_video_streamoff,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
.vidioc_enum_framesizes = mxc_isi_video_enum_framesizes,
@@ -1332,10 +1292,6 @@ static int mxc_isi_video_release(struct file *file)
if (ret)
dev_err(video->pipe->isi->dev, "%s fail\n", __func__);
- mutex_lock(&video->lock);
- mxc_isi_video_cleanup_streaming(video);
- mutex_unlock(&video->lock);
-
pm_runtime_put(video->pipe->isi->dev);
return ret;
}
@@ -1357,7 +1313,7 @@ void mxc_isi_video_suspend(struct mxc_isi_pipe *pipe)
{
struct mxc_isi_video *video = &pipe->video;
- if (!video->is_streaming)
+ if (!vb2_is_streaming(&video->vb2_q))
return;
mxc_isi_pipe_disable(pipe);
@@ -1388,7 +1344,7 @@ int mxc_isi_video_resume(struct mxc_isi_pipe *pipe)
{
struct mxc_isi_video *video = &pipe->video;
- if (!video->is_streaming)
+ if (!vb2_is_streaming(&video->vb2_q))
return 0;
mxc_isi_video_init_channel(video);
diff --git a/drivers/media/platform/nxp/mx2_emmaprp.c b/drivers/media/platform/nxp/mx2_emmaprp.c
index 0c6cc120fd2a..3aae8c0b690c 100644
--- a/drivers/media/platform/nxp/mx2_emmaprp.c
+++ b/drivers/media/platform/nxp/mx2_emmaprp.c
@@ -214,6 +214,11 @@ struct emmaprp_ctx {
struct emmaprp_q_data q_data[2];
};
+static inline struct emmaprp_ctx *file_to_emmaprp_ctx(struct file *filp)
+{
+ return container_of(file_to_v4l2_fh(filp), struct emmaprp_ctx, fh);
+}
+
static struct emmaprp_q_data *get_q_data(struct emmaprp_ctx *ctx,
enum v4l2_buf_type type)
{
@@ -451,13 +456,13 @@ static int vidioc_g_fmt(struct emmaprp_ctx *ctx, struct v4l2_format *f)
static int vidioc_g_fmt_vid_out(struct file *file, void *priv,
struct v4l2_format *f)
{
- return vidioc_g_fmt(priv, f);
+ return vidioc_g_fmt(file_to_emmaprp_ctx(file), f);
}
static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- return vidioc_g_fmt(priv, f);
+ return vidioc_g_fmt(file_to_emmaprp_ctx(file), f);
}
static int vidioc_try_fmt(struct v4l2_format *f)
@@ -497,8 +502,8 @@ static int vidioc_try_fmt(struct v4l2_format *f)
static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
+ struct emmaprp_ctx *ctx = file_to_emmaprp_ctx(file);
struct emmaprp_fmt *fmt;
- struct emmaprp_ctx *ctx = priv;
fmt = find_format(f);
if (!fmt || !(fmt->types & MEM2MEM_CAPTURE)) {
@@ -514,8 +519,8 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
static int vidioc_try_fmt_vid_out(struct file *file, void *priv,
struct v4l2_format *f)
{
+ struct emmaprp_ctx *ctx = file_to_emmaprp_ctx(file);
struct emmaprp_fmt *fmt;
- struct emmaprp_ctx *ctx = priv;
fmt = find_format(f);
if (!fmt || !(fmt->types & MEM2MEM_OUTPUT)) {
@@ -575,7 +580,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
if (ret)
return ret;
- return vidioc_s_fmt(priv, f);
+ return vidioc_s_fmt(file_to_emmaprp_ctx(file), f);
}
static int vidioc_s_fmt_vid_out(struct file *file, void *priv,
@@ -587,7 +592,7 @@ static int vidioc_s_fmt_vid_out(struct file *file, void *priv,
if (ret)
return ret;
- return vidioc_s_fmt(priv, f);
+ return vidioc_s_fmt(file_to_emmaprp_ctx(file), f);
}
static const struct v4l2_ioctl_ops emmaprp_ioctl_ops = {
@@ -725,7 +730,6 @@ static int emmaprp_open(struct file *file)
return -ENOMEM;
v4l2_fh_init(&ctx->fh, video_devdata(file));
- file->private_data = &ctx->fh;
ctx->dev = pcdev;
if (mutex_lock_interruptible(&pcdev->dev_mutex)) {
@@ -747,7 +751,7 @@ static int emmaprp_open(struct file *file)
clk_prepare_enable(pcdev->clk_emma_ahb);
ctx->q_data[V4L2_M2M_SRC].fmt = &formats[1];
ctx->q_data[V4L2_M2M_DST].fmt = &formats[0];
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
mutex_unlock(&pcdev->dev_mutex);
dprintk(pcdev, "Created instance %p, m2m_ctx: %p\n", ctx, ctx->fh.m2m_ctx);
@@ -758,14 +762,14 @@ static int emmaprp_open(struct file *file)
static int emmaprp_release(struct file *file)
{
struct emmaprp_dev *pcdev = video_drvdata(file);
- struct emmaprp_ctx *ctx = file->private_data;
+ struct emmaprp_ctx *ctx = file_to_emmaprp_ctx(file);
dprintk(pcdev, "Releasing instance %p\n", ctx);
mutex_lock(&pcdev->dev_mutex);
clk_disable_unprepare(pcdev->clk_emma_ahb);
clk_disable_unprepare(pcdev->clk_emma_ipg);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
mutex_unlock(&pcdev->dev_mutex);
diff --git a/drivers/media/platform/qcom/camss/Makefile b/drivers/media/platform/qcom/camss/Makefile
index d26a9c24a430..23960d02877d 100644
--- a/drivers/media/platform/qcom/camss/Makefile
+++ b/drivers/media/platform/qcom/camss/Makefile
@@ -6,9 +6,10 @@ qcom-camss-objs += \
camss-csid.o \
camss-csid-4-1.o \
camss-csid-4-7.o \
+ camss-csid-340.o \
camss-csid-680.o \
camss-csid-gen2.o \
- camss-csid-780.o \
+ camss-csid-gen3.o \
camss-csiphy-2ph-1-0.o \
camss-csiphy-3ph-1-0.o \
camss-csiphy.o \
@@ -17,9 +18,10 @@ qcom-camss-objs += \
camss-vfe-4-7.o \
camss-vfe-4-8.o \
camss-vfe-17x.o \
+ camss-vfe-340.o \
camss-vfe-480.o \
camss-vfe-680.o \
- camss-vfe-780.o \
+ camss-vfe-gen3.o \
camss-vfe-gen1.o \
camss-vfe.o \
camss-video.o \
diff --git a/drivers/media/platform/qcom/camss/camss-csid-340.c b/drivers/media/platform/qcom/camss/camss-csid-340.c
new file mode 100644
index 000000000000..22a30510fb79
--- /dev/null
+++ b/drivers/media/platform/qcom/camss/camss-csid-340.c
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Qualcomm MSM Camera Subsystem - CSID (CSI Decoder) Module 340
+ *
+ * Copyright (c) 2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/completion.h>
+#include <linux/bitfield.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+
+#include "camss.h"
+#include "camss-csid.h"
+#include "camss-csid-gen2.h"
+
+#define CSID_RST_STROBES (0x010)
+#define CSID_RST_SW_REGS BIT(0)
+#define CSID_RST_IRQ BIT(1)
+#define CSID_RST_IFE_CLK BIT(2)
+#define CSID_RST_PHY_CLK BIT(3)
+#define CSID_RST_CSID_CLK BIT(4)
+
+#define CSID_IRQ_STATUS (0x070)
+#define CSID_IRQ_MASK (0x074)
+#define CSID_IRQ_MASK_RST_DONE BIT(0)
+#define CSID_IRQ_CLEAR (0x078)
+#define CSID_IRQ_CMD (0x080)
+#define CSID_IRQ_CMD_CLEAR BIT(0)
+
+#define CSID_CSI2_RX_CFG0 (0x100)
+#define CSI2_RX_CFG0_NUM_ACTIVE_LANES_MASK GENMASK(1, 0)
+#define CSI2_RX_CFG0_DLX_INPUT_SEL_MASK GENMASK(17, 4)
+#define CSI2_RX_CFG0_PHY_NUM_SEL_MASK GENMASK(21, 20)
+#define CSI2_RX_CFG0_PHY_NUM_SEL_BASE_IDX 1
+#define CSI2_RX_CFG0_PHY_TYPE_SEL BIT(24)
+
+#define CSID_CSI2_RX_CFG1 (0x104)
+#define CSI2_RX_CFG1_PACKET_ECC_CORRECTION_EN BIT(0)
+#define CSI2_RX_CFG1_MISR_EN BIT(6)
+#define CSI2_RX_CFG1_CGC_MODE BIT(7)
+
+#define CSID_RDI_CFG0(rdi) (0x300 + 0x100 * (rdi))
+#define CSID_RDI_CFG0_BYTE_CNTR_EN BIT(0)
+#define CSID_RDI_CFG0_TIMESTAMP_EN BIT(1)
+#define CSID_RDI_CFG0_DECODE_FORMAT_MASK GENMASK(15, 12)
+#define CSID_RDI_CFG0_DECODE_FORMAT_NOP CSID_RDI_CFG0_DECODE_FORMAT_MASK
+#define CSID_RDI_CFG0_DT_MASK GENMASK(21, 16)
+#define CSID_RDI_CFG0_VC_MASK GENMASK(23, 22)
+#define CSID_RDI_CFG0_DTID_MASK GENMASK(28, 27)
+#define CSID_RDI_CFG0_ENABLE BIT(31)
+
+#define CSID_RDI_CTRL(rdi) (0x308 + 0x100 * (rdi))
+#define CSID_RDI_CTRL_HALT_AT_FRAME_BOUNDARY 0
+#define CSID_RDI_CTRL_RESUME_AT_FRAME_BOUNDARY 1
+
+static void __csid_configure_rx(struct csid_device *csid,
+ struct csid_phy_config *phy, int vc)
+{
+ u32 val;
+
+ val = FIELD_PREP(CSI2_RX_CFG0_NUM_ACTIVE_LANES_MASK, phy->lane_cnt - 1);
+ val |= FIELD_PREP(CSI2_RX_CFG0_DLX_INPUT_SEL_MASK, phy->lane_assign);
+ val |= FIELD_PREP(CSI2_RX_CFG0_PHY_NUM_SEL_MASK,
+ phy->csiphy_id + CSI2_RX_CFG0_PHY_NUM_SEL_BASE_IDX);
+ writel_relaxed(val, csid->base + CSID_CSI2_RX_CFG0);
+
+ val = CSI2_RX_CFG1_PACKET_ECC_CORRECTION_EN;
+ writel_relaxed(val, csid->base + CSID_CSI2_RX_CFG1);
+}
+
+static void __csid_ctrl_rdi(struct csid_device *csid, int enable, u8 rdi)
+{
+ writel_relaxed(!!enable, csid->base + CSID_RDI_CTRL(rdi));
+}
+
+static void __csid_configure_rdi_stream(struct csid_device *csid, u8 enable, u8 vc)
+{
+ struct v4l2_mbus_framefmt *input_format = &csid->fmt[MSM_CSID_PAD_FIRST_SRC + vc];
+ const struct csid_format_info *format = csid_get_fmt_entry(csid->res->formats->formats,
+ csid->res->formats->nformats,
+ input_format->code);
+ u8 lane_cnt = csid->phy.lane_cnt;
+ u8 dt_id;
+ u32 val;
+
+ if (!lane_cnt)
+ lane_cnt = 4;
+
+ /*
+ * DT_ID is a two bit bitfield that is concatenated with
+ * the four least significant bits of the five bit VC
+ * bitfield to generate an internal CID value.
+ *
+ * CSID_RDI_CFG0(vc)
+ * DT_ID : 28:27
+ * VC : 26:22
+ * DT : 21:16
+ *
+ * CID : VC 3:0 << 2 | DT_ID 1:0
+ */
+ dt_id = vc & 0x03;
+
+ val = CSID_RDI_CFG0_DECODE_FORMAT_NOP; /* only for RDI path */
+ val |= FIELD_PREP(CSID_RDI_CFG0_DT_MASK, format->data_type);
+ val |= FIELD_PREP(CSID_RDI_CFG0_VC_MASK, vc);
+ val |= FIELD_PREP(CSID_RDI_CFG0_DTID_MASK, dt_id);
+
+ if (enable)
+ val |= CSID_RDI_CFG0_ENABLE;
+
+ dev_dbg(csid->camss->dev, "CSID%u: Stream %s (dt:0x%x vc=%u)\n",
+ csid->id, enable ? "enable" : "disable", format->data_type, vc);
+
+ writel_relaxed(val, csid->base + CSID_RDI_CFG0(vc));
+}
+
+static void csid_configure_stream(struct csid_device *csid, u8 enable)
+{
+ int i;
+
+ for (i = 0; i < MSM_CSID_MAX_SRC_STREAMS; i++) {
+ if (csid->phy.en_vc & BIT(i)) {
+ __csid_configure_rdi_stream(csid, enable, i);
+ __csid_configure_rx(csid, &csid->phy, i);
+ __csid_ctrl_rdi(csid, enable, i);
+ }
+ }
+}
+
+static int csid_reset(struct csid_device *csid)
+{
+ unsigned long time;
+
+ writel_relaxed(CSID_IRQ_MASK_RST_DONE, csid->base + CSID_IRQ_MASK);
+ writel_relaxed(CSID_IRQ_MASK_RST_DONE, csid->base + CSID_IRQ_CLEAR);
+ writel_relaxed(CSID_IRQ_CMD_CLEAR, csid->base + CSID_IRQ_CMD);
+
+ reinit_completion(&csid->reset_complete);
+
+ /* Reset with registers preserved */
+ writel(CSID_RST_IRQ | CSID_RST_IFE_CLK | CSID_RST_PHY_CLK | CSID_RST_CSID_CLK,
+ csid->base + CSID_RST_STROBES);
+
+ time = wait_for_completion_timeout(&csid->reset_complete,
+ msecs_to_jiffies(CSID_RESET_TIMEOUT_MS));
+ if (!time) {
+ dev_err(csid->camss->dev, "CSID%u: reset timeout\n", csid->id);
+ return -EIO;
+ }
+
+ dev_dbg(csid->camss->dev, "CSID%u: reset done\n", csid->id);
+
+ return 0;
+}
+
+static irqreturn_t csid_isr(int irq, void *dev)
+{
+ struct csid_device *csid = dev;
+ u32 val;
+
+ val = readl_relaxed(csid->base + CSID_IRQ_STATUS);
+ writel_relaxed(val, csid->base + CSID_IRQ_CLEAR);
+ writel_relaxed(CSID_IRQ_CMD_CLEAR, csid->base + CSID_IRQ_CMD);
+
+ if (val & CSID_IRQ_MASK_RST_DONE)
+ complete(&csid->reset_complete);
+ else
+ dev_warn_ratelimited(csid->camss->dev, "Spurious CSID interrupt\n");
+
+ return IRQ_HANDLED;
+}
+
+static int csid_configure_testgen_pattern(struct csid_device *csid, s32 val)
+{
+ return -EOPNOTSUPP; /* Not part of CSID */
+}
+
+static void csid_subdev_init(struct csid_device *csid) {}
+
+const struct csid_hw_ops csid_ops_340 = {
+ .configure_testgen_pattern = csid_configure_testgen_pattern,
+ .configure_stream = csid_configure_stream,
+ .hw_version = csid_hw_version,
+ .isr = csid_isr,
+ .reset = csid_reset,
+ .src_pad_code = csid_src_pad_code,
+ .subdev_init = csid_subdev_init,
+};
diff --git a/drivers/media/platform/qcom/camss/camss-csid-780.c b/drivers/media/platform/qcom/camss/camss-csid-gen3.c
index 4c720d177731..664245cf6eb0 100644
--- a/drivers/media/platform/qcom/camss/camss-csid-780.c
+++ b/drivers/media/platform/qcom/camss/camss-csid-gen3.c
@@ -13,7 +13,7 @@
#include "camss.h"
#include "camss-csid.h"
-#include "camss-csid-780.h"
+#include "camss-csid-gen3.h"
#define CSID_IO_PATH_CFG0(csid) (0x4 * (csid))
#define OUTPUT_IFE_EN 0x100
@@ -45,8 +45,12 @@
#define CSID_CSI2_RX_IRQ_CLEAR 0xA4
#define CSID_CSI2_RX_IRQ_SET 0xA8
+#define IS_CSID_690(csid) ((csid->camss->res->version == CAMSS_8775P) \
+ || (csid->camss->res->version == CAMSS_8300))
#define CSID_BUF_DONE_IRQ_STATUS 0x8C
-#define BUF_DONE_IRQ_STATUS_RDI_OFFSET (csid_is_lite(csid) ? 1 : 14)
+#define BUF_DONE_IRQ_STATUS_RDI_OFFSET (csid_is_lite(csid) ?\
+ 1 : (IS_CSID_690(csid) ?\
+ 13 : 14))
#define CSID_BUF_DONE_IRQ_MASK 0x90
#define CSID_BUF_DONE_IRQ_CLEAR 0x94
#define CSID_BUF_DONE_IRQ_SET 0x98
@@ -59,6 +63,7 @@
#define CSID_CSI2_RX_CFG0 0x200
#define CSI2_RX_CFG0_NUM_ACTIVE_LANES 0
+#define CSI2_RX_CFG0_VC_MODE 3
#define CSI2_RX_CFG0_DL0_INPUT_SEL 4
#define CSI2_RX_CFG0_PHY_NUM_SEL 20
@@ -66,7 +71,9 @@
#define CSI2_RX_CFG1_ECC_CORRECTION_EN BIT(0)
#define CSI2_RX_CFG1_VC_MODE BIT(2)
-#define CSID_RDI_CFG0(rdi) (0x500 + 0x100 * (rdi))
+#define CSID_RDI_CFG0(rdi) (csid_is_lite(csid) && IS_CSID_690(csid) ?\
+ (0x300 + 0x100 * (rdi)) :\
+ (0x500 + 0x100 * (rdi)))
#define RDI_CFG0_TIMESTAMP_EN BIT(6)
#define RDI_CFG0_TIMESTAMP_STB_SEL BIT(8)
#define RDI_CFG0_DECODE_FORMAT 12
@@ -75,10 +82,14 @@
#define RDI_CFG0_DT_ID 27
#define RDI_CFG0_EN BIT(31)
-#define CSID_RDI_CTRL(rdi) (0x504 + 0x100 * (rdi))
+#define CSID_RDI_CTRL(rdi) (csid_is_lite(csid) && IS_CSID_690(csid) ?\
+ (0x304 + 0x100 * (rdi)) :\
+ (0x504 + 0x100 * (rdi)))
#define RDI_CTRL_START_CMD BIT(0)
-#define CSID_RDI_CFG1(rdi) (0x510 + 0x100 * (rdi))
+#define CSID_RDI_CFG1(rdi) (csid_is_lite(csid) && IS_CSID_690(csid) ?\
+ (0x310 + 0x100 * (rdi)) :\
+ (0x510 + 0x100 * (rdi)))
#define RDI_CFG1_DROP_H_EN BIT(5)
#define RDI_CFG1_DROP_V_EN BIT(6)
#define RDI_CFG1_CROP_H_EN BIT(7)
@@ -86,9 +97,12 @@
#define RDI_CFG1_PIX_STORE BIT(10)
#define RDI_CFG1_PACKING_FORMAT_MIPI BIT(15)
-#define CSID_RDI_IRQ_SUBSAMPLE_PATTERN(rdi) (0x548 + 0x100 * (rdi))
-#define CSID_RDI_IRQ_SUBSAMPLE_PERIOD(rdi) (0x54C + 0x100 * (rdi))
-
+#define CSID_RDI_IRQ_SUBSAMPLE_PATTERN(rdi) (csid_is_lite(csid) && IS_CSID_690(csid) ?\
+ (0x348 + 0x100 * (rdi)) :\
+ (0x548 + 0x100 * (rdi)))
+#define CSID_RDI_IRQ_SUBSAMPLE_PERIOD(rdi) (csid_is_lite(csid) && IS_CSID_690(csid) ?\
+ (0x34C + 0x100 * (rdi)) :\
+ (0x54C + 0x100 * (rdi)))
#define CSI2_RX_CFG0_PHY_SEL_BASE_IDX 1
static void __csid_configure_rx(struct csid_device *csid,
@@ -259,7 +273,7 @@ static irqreturn_t csid_isr(int irq, void *dev)
if (buf_done_val & BIT(BUF_DONE_IRQ_STATUS_RDI_OFFSET + i)) {
/*
- * For Titan 780, bus done and RUP IRQ have been moved to
+ * For Titan Gen3, bus done and RUP IRQ have been moved to
* CSID from VFE. Once CSID received bus done, need notify
* VFE of this event. Trigger VFE to handle bus done process.
*/
@@ -325,7 +339,7 @@ static void csid_subdev_init(struct csid_device *csid)
csid->testgen.nmodes = CSID_PAYLOAD_MODE_DISABLED;
}
-const struct csid_hw_ops csid_ops_780 = {
+const struct csid_hw_ops csid_ops_gen3 = {
.configure_stream = csid_configure_stream,
.configure_testgen_pattern = csid_configure_testgen_pattern,
.hw_version = csid_hw_version,
diff --git a/drivers/media/platform/qcom/camss/camss-csid-780.h b/drivers/media/platform/qcom/camss/camss-csid-gen3.h
index a990c66a60ff..6ee62da770c1 100644
--- a/drivers/media/platform/qcom/camss/camss-csid-780.h
+++ b/drivers/media/platform/qcom/camss/camss-csid-gen3.h
@@ -1,13 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * camss-csid-780.h
+ * camss-csid-gen3.h
*
* Qualcomm MSM Camera Subsystem - CSID (CSI Decoder) Module Generation 3
*
* Copyright (c) 2024 Qualcomm Technologies, Inc.
*/
-#ifndef __QC_MSM_CAMSS_CSID_780_H__
-#define __QC_MSM_CAMSS_CSID_780_H__
+#ifndef __QC_MSM_CAMSS_CSID_GEN3_H__
+#define __QC_MSM_CAMSS_CSID_GEN3_H__
#define DECODE_FORMAT_UNCOMPRESSED_8_BIT 0x1
#define DECODE_FORMAT_UNCOMPRESSED_10_BIT 0x2
@@ -22,4 +22,4 @@
#define PLAIN_FORMAT_PLAIN16 0x1 /* supports DPCM, UNCOMPRESSED_10/16_BIT */
#define PLAIN_FORMAT_PLAIN32 0x2 /* supports UNCOMPRESSED_20_BIT */
-#endif /* __QC_MSM_CAMSS_CSID_780_H__ */
+#endif /* __QC_MSM_CAMSS_CSID_GEN3_H__ */
diff --git a/drivers/media/platform/qcom/camss/camss-csid.h b/drivers/media/platform/qcom/camss/camss-csid.h
index 9dc826d8c8f6..aedc96ed84b2 100644
--- a/drivers/media/platform/qcom/camss/camss-csid.h
+++ b/drivers/media/platform/qcom/camss/camss-csid.h
@@ -213,9 +213,10 @@ extern const struct csid_formats csid_formats_gen2;
extern const struct csid_hw_ops csid_ops_4_1;
extern const struct csid_hw_ops csid_ops_4_7;
+extern const struct csid_hw_ops csid_ops_340;
extern const struct csid_hw_ops csid_ops_680;
extern const struct csid_hw_ops csid_ops_gen2;
-extern const struct csid_hw_ops csid_ops_780;
+extern const struct csid_hw_ops csid_ops_gen3;
/*
* csid_is_lite - Check if CSID is CSID lite.
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
index 88c0ba495c32..a229ba04b158 100644
--- a/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
+++ b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
@@ -64,6 +64,85 @@ struct csiphy_lane_regs {
u32 csiphy_param_type;
};
+/* 5nm 2PH v 1.3.0 2p5Gbps 4 lane DPHY mode */
+static const struct
+csiphy_lane_regs lane_regs_sa8775p[] = {
+ {0x0724, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0728, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0700, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x070C, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0738, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x072C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0734, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0710, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x071C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0714, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x073C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0704, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0720, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0708, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0024, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0000, 0x8D, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x002C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0034, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0010, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x001C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0014, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x003C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0004, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0020, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0008, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0224, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0200, 0x8D, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0238, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x022C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0234, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0210, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x021C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0214, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x023C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0204, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0220, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0208, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0424, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0400, 0x8D, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x042C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0434, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0410, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x041C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0414, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x043C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0404, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0420, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0408, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x0624, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0600, 0x8D, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0638, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x062C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0634, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0610, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x061C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0614, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x063C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0604, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0620, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0608, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x005C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0060, 0xFD, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0064, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x025C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0260, 0xFD, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0264, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x045C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0460, 0xFD, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0464, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x065C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0660, 0xFD, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0664, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+};
+
/* GEN2 1.0 2PH */
static const struct
csiphy_lane_regs lane_regs_sdm845[] = {
@@ -319,6 +398,90 @@ csiphy_lane_regs lane_regs_sm8250[] = {
{0x0884, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
};
+/* 14nm 2PH v 2.0.1 2p5Gbps 4 lane DPHY mode */
+static const struct
+csiphy_lane_regs lane_regs_qcm2290[] = {
+ {0x0030, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x002c, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0034, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0028, 0x04, 0x00, CSIPHY_DNP_PARAMS},
+ {0x003c, 0xb8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x001c, 0x0a, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0000, 0xd7, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0004, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0020, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0008, 0x04, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x000c, 0xff, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0010, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0038, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0060, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0064, 0x3f, 0x00, CSIPHY_DEFAULT_PARAMS},
+
+ {0x0730, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x072c, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0734, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0728, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x073c, 0xb8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x071c, 0x0a, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0700, 0xc0, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0704, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0720, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0708, 0x04, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x070c, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0710, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0738, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0760, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0764, 0x3f, 0x00, CSIPHY_DEFAULT_PARAMS},
+
+ {0x0230, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x022c, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0234, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0228, 0x04, 0x00, CSIPHY_DNP_PARAMS},
+ {0x023c, 0xb8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x021c, 0x0a, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0200, 0xd7, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0204, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0220, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0208, 0x04, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x020c, 0xff, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0210, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0238, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0260, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0264, 0x3f, 0x00, CSIPHY_DEFAULT_PARAMS},
+
+ {0x0430, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x042c, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0434, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0428, 0x04, 0x00, CSIPHY_DNP_PARAMS},
+ {0x043c, 0xb8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x041c, 0x0a, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0400, 0xd7, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0404, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0420, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0408, 0x04, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x040C, 0xff, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0410, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0438, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0460, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0464, 0x3f, 0x00, CSIPHY_DEFAULT_PARAMS},
+
+ {0x0630, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x062c, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0634, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0628, 0x04, 0x00, CSIPHY_DNP_PARAMS},
+ {0x063c, 0xb8, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x061c, 0x0a, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0600, 0xd7, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0604, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0620, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0608, 0x04, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+ {0x060C, 0xff, 0x00, CSIPHY_DNP_PARAMS},
+ {0x0610, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0638, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0660, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+ {0x0664, 0x3f, 0x00, CSIPHY_DEFAULT_PARAMS},
+};
+
/* GEN2 2.1.2 2PH DPHY mode */
static const struct
csiphy_lane_regs lane_regs_sm8550[] = {
@@ -744,11 +907,14 @@ static bool csiphy_is_gen2(u32 version)
bool ret = false;
switch (version) {
+ case CAMSS_2290:
case CAMSS_7280:
case CAMSS_8250:
case CAMSS_8280XP:
+ case CAMSS_8300:
case CAMSS_845:
case CAMSS_8550:
+ case CAMSS_8775P:
case CAMSS_X1E80100:
ret = true;
break;
@@ -829,6 +995,10 @@ static int csiphy_init(struct csiphy_device *csiphy)
regs->lane_regs = &lane_regs_sdm845[0];
regs->lane_array_size = ARRAY_SIZE(lane_regs_sdm845);
break;
+ case CAMSS_2290:
+ regs->lane_regs = &lane_regs_qcm2290[0];
+ regs->lane_array_size = ARRAY_SIZE(lane_regs_qcm2290);
+ break;
case CAMSS_7280:
case CAMSS_8250:
regs->lane_regs = &lane_regs_sm8250[0];
@@ -848,6 +1018,11 @@ static int csiphy_init(struct csiphy_device *csiphy)
regs->lane_array_size = ARRAY_SIZE(lane_regs_sm8550);
regs->offset = 0x1000;
break;
+ case CAMSS_8300:
+ case CAMSS_8775P:
+ regs->lane_regs = &lane_regs_sa8775p[0];
+ regs->lane_array_size = ARRAY_SIZE(lane_regs_sa8775p);
+ break;
default:
break;
}
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-340.c b/drivers/media/platform/qcom/camss/camss-vfe-340.c
new file mode 100644
index 000000000000..30d7630b3e8b
--- /dev/null
+++ b/drivers/media/platform/qcom/camss/camss-vfe-340.c
@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Qualcomm MSM Camera Subsystem - VFE (Video Front End) Module 340 (TFE)
+ *
+ * Copyright (c) 2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/delay.h>
+#include <linux/bitfield.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+
+#include "camss.h"
+#include "camss-vfe.h"
+
+#define TFE_GLOBAL_RESET_CMD (0x014)
+#define TFE_GLOBAL_RESET_CMD_CORE BIT(0)
+
+#define TFE_REG_UPDATE_CMD (0x02c)
+
+#define TFE_IRQ_CMD (0x030)
+#define TFE_IRQ_CMD_CLEAR BIT(0)
+#define TFE_IRQ_MASK_0 (0x034)
+#define TFE_IRQ_MASK_0_RST_DONE BIT(0)
+#define TFE_IRQ_MASK_0_BUS_WR BIT(1)
+#define TFE_IRQ_MASK_1 (0x038)
+#define TFE_IRQ_MASK_2 (0x03c)
+#define TFE_IRQ_CLEAR_0 (0x040)
+
+#define TFE_IRQ_STATUS_0 (0x04c)
+
+#define BUS_REG(a) (0xa00 + (a))
+
+#define TFE_BUS_IRQ_MASK_0 BUS_REG(0x18)
+#define TFE_BUS_IRQ_MASK_RUP_DONE_MASK GENMASK(3, 0)
+#define TFE_BUS_IRQ_MASK_RUP_DONE(sc) FIELD_PREP(TFE_BUS_IRQ_MASK_RUP_DONE_MASK, BIT(sc))
+#define TFE_BUS_IRQ_MASK_BUF_DONE_MASK GENMASK(15, 8)
+#define TFE_BUS_IRQ_MASK_BUF_DONE(sg) FIELD_PREP(TFE_BUS_IRQ_MASK_BUF_DONE_MASK, BIT(sg))
+#define TFE_BUS_IRQ_MASK_0_CONS_VIOL BIT(28)
+#define TFE_BUS_IRQ_MASK_0_VIOL BIT(30)
+#define TFE_BUS_IRQ_MASK_0_IMG_VIOL BIT(31)
+
+#define TFE_BUS_IRQ_MASK_1 BUS_REG(0x1c)
+#define TFE_BUS_IRQ_CLEAR_0 BUS_REG(0x20)
+#define TFE_BUS_IRQ_STATUS_0 BUS_REG(0x28)
+#define TFE_BUS_IRQ_CMD BUS_REG(0x30)
+#define TFE_BUS_IRQ_CMD_CLEAR BIT(0)
+
+#define TFE_BUS_STATUS_CLEAR BUS_REG(0x60)
+#define TFE_BUS_VIOLATION_STATUS BUS_REG(0x64)
+#define TFE_BUS_OVERFLOW_STATUS BUS_REG(0x68)
+#define TFE_BUS_IMAGE_SZ_VIOLATION_STATUS BUS_REG(0x70)
+
+#define TFE_BUS_CLIENT_CFG(c) BUS_REG(0x200 + (c) * 0x100)
+#define TFE_BUS_CLIENT_CFG_EN BIT(0)
+#define TFE_BUS_CLIENT_CFG_MODE_FRAME BIT(16)
+#define TFE_BUS_IMAGE_ADDR(c) BUS_REG(0x204 + (c) * 0x100)
+#define TFE_BUS_FRAME_INCR(c) BUS_REG(0x208 + (c) * 0x100)
+#define TFE_BUS_IMAGE_CFG_0(c) BUS_REG(0x20c + (c) * 0x100)
+#define TFE_BUS_IMAGE_CFG_0_DEFAULT 0xffff
+#define TFE_BUS_IMAGE_CFG_1(c) BUS_REG(0x210 + (c) * 0x100)
+#define TFE_BUS_IMAGE_CFG_2(c) BUS_REG(0x214 + (c) * 0x100)
+#define TFE_BUS_IMAGE_CFG_2_DEFAULT 0xffff
+#define TFE_BUS_PACKER_CFG(c) BUS_REG(0x218 + (c) * 0x100)
+#define TFE_BUS_PACKER_CFG_FMT_PLAIN64 0xa
+#define TFE_BUS_IRQ_SUBSAMPLE_CFG_0(c) BUS_REG(0x230 + (c) * 0x100)
+#define TFE_BUS_IRQ_SUBSAMPLE_CFG_1(c) BUS_REG(0x234 + (c) * 0x100)
+#define TFE_BUS_FRAMEDROP_CFG_0(c) BUS_REG(0x238 + (c) * 0x100)
+#define TFE_BUS_FRAMEDROP_CFG_1(c) BUS_REG(0x23c + (c) * 0x100)
+
+/*
+ * TODO: differentiate the port id based on requested type of RDI, BHIST etc
+ *
+ * TFE write master IDs (clients)
+ *
+ * BAYER 0
+ * IDEAL_RAW 1
+ * STATS_TINTLESS_BG 2
+ * STATS_BHIST 3
+ * STATS_AWB_BG 4
+ * STATS_AEC_BG 5
+ * STATS_BAF 6
+ * RDI0 7
+ * RDI1 8
+ * RDI2 9
+ */
+#define RDI_WM(n) (7 + (n))
+#define TFE_WM_NUM 10
+
+enum tfe_iface {
+ TFE_IFACE_PIX,
+ TFE_IFACE_RDI0,
+ TFE_IFACE_RDI1,
+ TFE_IFACE_RDI2,
+ TFE_IFACE_NUM
+};
+
+enum tfe_subgroups {
+ TFE_SUBGROUP_BAYER,
+ TFE_SUBGROUP_IDEAL_RAW,
+ TFE_SUBGROUP_HDR,
+ TFE_SUBGROUP_BG,
+ TFE_SUBGROUP_BAF,
+ TFE_SUBGROUP_RDI0,
+ TFE_SUBGROUP_RDI1,
+ TFE_SUBGROUP_RDI2,
+ TFE_SUBGROUP_NUM
+};
+
+static enum tfe_iface tfe_line_iface_map[VFE_LINE_NUM_MAX] = {
+ [VFE_LINE_RDI0] = TFE_IFACE_RDI0,
+ [VFE_LINE_RDI1] = TFE_IFACE_RDI1,
+ [VFE_LINE_RDI2] = TFE_IFACE_RDI2,
+ [VFE_LINE_PIX] = TFE_IFACE_PIX,
+};
+
+static enum vfe_line_id tfe_subgroup_line_map[TFE_SUBGROUP_NUM] = {
+ [TFE_SUBGROUP_BAYER] = VFE_LINE_PIX,
+ [TFE_SUBGROUP_IDEAL_RAW] = VFE_LINE_PIX,
+ [TFE_SUBGROUP_HDR] = VFE_LINE_PIX,
+ [TFE_SUBGROUP_BG] = VFE_LINE_PIX,
+ [TFE_SUBGROUP_BAF] = VFE_LINE_PIX,
+ [TFE_SUBGROUP_RDI0] = VFE_LINE_RDI0,
+ [TFE_SUBGROUP_RDI1] = VFE_LINE_RDI1,
+ [TFE_SUBGROUP_RDI2] = VFE_LINE_RDI2,
+};
+
+static inline enum tfe_iface __line_to_iface(enum vfe_line_id line_id)
+{
+ if (line_id <= VFE_LINE_NONE || line_id >= VFE_LINE_NUM_MAX) {
+ pr_warn("VFE: Invalid line %d\n", line_id);
+ return TFE_IFACE_RDI0;
+ }
+
+ return tfe_line_iface_map[line_id];
+}
+
+static inline enum vfe_line_id __iface_to_line(unsigned int iface)
+{
+ int i;
+
+ for (i = 0; i < VFE_LINE_NUM_MAX; i++) {
+ if (tfe_line_iface_map[i] == iface)
+ return i;
+ }
+
+ return VFE_LINE_NONE;
+}
+
+static inline enum vfe_line_id __subgroup_to_line(enum tfe_subgroups sg)
+{
+ if (sg >= TFE_SUBGROUP_NUM)
+ return VFE_LINE_NONE;
+
+ return tfe_subgroup_line_map[sg];
+}
+
+static void vfe_global_reset(struct vfe_device *vfe)
+{
+ writel(TFE_IRQ_MASK_0_RST_DONE, vfe->base + TFE_IRQ_MASK_0);
+ writel(TFE_GLOBAL_RESET_CMD_CORE, vfe->base + TFE_GLOBAL_RESET_CMD);
+}
+
+static irqreturn_t vfe_isr(int irq, void *dev)
+{
+ struct vfe_device *vfe = dev;
+ u32 status;
+ int i;
+
+ status = readl_relaxed(vfe->base + TFE_IRQ_STATUS_0);
+ writel_relaxed(status, vfe->base + TFE_IRQ_CLEAR_0);
+ writel_relaxed(TFE_IRQ_CMD_CLEAR, vfe->base + TFE_IRQ_CMD);
+
+ if (status & TFE_IRQ_MASK_0_RST_DONE) {
+ dev_dbg(vfe->camss->dev, "VFE%u: Reset done!", vfe->id);
+ vfe_isr_reset_ack(vfe);
+ }
+
+ if (status & TFE_IRQ_MASK_0_BUS_WR) {
+ u32 bus_status = readl_relaxed(vfe->base + TFE_BUS_IRQ_STATUS_0);
+
+ writel_relaxed(bus_status, vfe->base + TFE_BUS_IRQ_CLEAR_0);
+ writel_relaxed(TFE_BUS_IRQ_CMD_CLEAR, vfe->base + TFE_BUS_IRQ_CMD);
+
+ for (i = 0; i < TFE_IFACE_NUM; i++) {
+ if (bus_status & TFE_BUS_IRQ_MASK_RUP_DONE(i))
+ vfe->res->hw_ops->reg_update_clear(vfe, __iface_to_line(i));
+ }
+
+ for (i = 0; i < TFE_SUBGROUP_NUM; i++) {
+ if (bus_status & TFE_BUS_IRQ_MASK_BUF_DONE(i))
+ vfe_buf_done(vfe, __subgroup_to_line(i));
+ }
+
+ if (bus_status & TFE_BUS_IRQ_MASK_0_CONS_VIOL)
+ dev_err_ratelimited(vfe->camss->dev, "VFE%u: Bad config violation",
+ vfe->id);
+
+ if (bus_status & TFE_BUS_IRQ_MASK_0_VIOL)
+ dev_err_ratelimited(vfe->camss->dev, "VFE%u: Input data violation",
+ vfe->id);
+
+ if (bus_status & TFE_BUS_IRQ_MASK_0_IMG_VIOL)
+ dev_err_ratelimited(vfe->camss->dev, "VFE%u: Image size violation",
+ vfe->id);
+ }
+
+ status = readl_relaxed(vfe->base + TFE_BUS_OVERFLOW_STATUS);
+ if (status) {
+ writel_relaxed(status, vfe->base + TFE_BUS_STATUS_CLEAR);
+ for (i = 0; i < TFE_WM_NUM; i++) {
+ if (status & BIT(i))
+ dev_err_ratelimited(vfe->camss->dev,
+ "VFE%u: bus overflow for wm %u\n",
+ vfe->id, i);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int vfe_halt(struct vfe_device *vfe)
+{
+ /* rely on vfe_disable_output() to stop the VFE */
+ return 0;
+}
+
+static void vfe_enable_irq(struct vfe_device *vfe)
+{
+ writel(TFE_IRQ_MASK_0_RST_DONE | TFE_IRQ_MASK_0_BUS_WR,
+ vfe->base + TFE_IRQ_MASK_0);
+ writel(TFE_BUS_IRQ_MASK_RUP_DONE_MASK | TFE_BUS_IRQ_MASK_BUF_DONE_MASK |
+ TFE_BUS_IRQ_MASK_0_CONS_VIOL | TFE_BUS_IRQ_MASK_0_VIOL |
+ TFE_BUS_IRQ_MASK_0_IMG_VIOL, vfe->base + TFE_BUS_IRQ_MASK_0);
+}
+
+static void vfe_wm_update(struct vfe_device *vfe, u8 rdi, u32 addr,
+ struct vfe_line *line)
+{
+ u8 wm = RDI_WM(rdi);
+
+ writel_relaxed(addr, vfe->base + TFE_BUS_IMAGE_ADDR(wm));
+}
+
+static void vfe_wm_start(struct vfe_device *vfe, u8 rdi, struct vfe_line *line)
+{
+ struct v4l2_pix_format_mplane *pix = &line->video_out.active_fmt.fmt.pix_mp;
+ u32 stride = pix->plane_fmt[0].bytesperline;
+ u8 wm = RDI_WM(rdi);
+
+ /* Configuration for plain RDI frames */
+ writel_relaxed(TFE_BUS_IMAGE_CFG_0_DEFAULT, vfe->base + TFE_BUS_IMAGE_CFG_0(wm));
+ writel_relaxed(0u, vfe->base + TFE_BUS_IMAGE_CFG_1(wm));
+ writel_relaxed(TFE_BUS_IMAGE_CFG_2_DEFAULT, vfe->base + TFE_BUS_IMAGE_CFG_2(wm));
+ writel_relaxed(stride * pix->height, vfe->base + TFE_BUS_FRAME_INCR(wm));
+ writel_relaxed(TFE_BUS_PACKER_CFG_FMT_PLAIN64, vfe->base + TFE_BUS_PACKER_CFG(wm));
+
+ /* No dropped frames, one irq per frame */
+ writel_relaxed(0, vfe->base + TFE_BUS_FRAMEDROP_CFG_0(wm));
+ writel_relaxed(1, vfe->base + TFE_BUS_FRAMEDROP_CFG_1(wm));
+ writel_relaxed(0, vfe->base + TFE_BUS_IRQ_SUBSAMPLE_CFG_0(wm));
+ writel_relaxed(1, vfe->base + TFE_BUS_IRQ_SUBSAMPLE_CFG_1(wm));
+
+ vfe_enable_irq(vfe);
+
+ writel(TFE_BUS_CLIENT_CFG_EN | TFE_BUS_CLIENT_CFG_MODE_FRAME,
+ vfe->base + TFE_BUS_CLIENT_CFG(wm));
+
+ dev_dbg(vfe->camss->dev, "VFE%u: Started RDI%u width %u height %u stride %u\n",
+ vfe->id, rdi, pix->width, pix->height, stride);
+}
+
+static void vfe_wm_stop(struct vfe_device *vfe, u8 rdi)
+{
+ u8 wm = RDI_WM(rdi);
+
+ writel(0, vfe->base + TFE_BUS_CLIENT_CFG(wm));
+
+ dev_dbg(vfe->camss->dev, "VFE%u: Stopped RDI%u\n", vfe->id, rdi);
+}
+
+static const struct camss_video_ops vfe_video_ops_520 = {
+ .queue_buffer = vfe_queue_buffer_v2,
+ .flush_buffers = vfe_flush_buffers,
+};
+
+static void vfe_subdev_init(struct device *dev, struct vfe_device *vfe)
+{
+ vfe->video_ops = vfe_video_ops_520;
+}
+
+static void vfe_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id)
+{
+ vfe->reg_update |= BIT(__line_to_iface(line_id));
+ writel_relaxed(vfe->reg_update, vfe->base + TFE_REG_UPDATE_CMD);
+}
+
+static void vfe_reg_update_clear(struct vfe_device *vfe, enum vfe_line_id line_id)
+{
+ vfe->reg_update &= ~BIT(__line_to_iface(line_id));
+}
+
+const struct vfe_hw_ops vfe_ops_340 = {
+ .global_reset = vfe_global_reset,
+ .hw_version = vfe_hw_version,
+ .isr = vfe_isr,
+ .pm_domain_off = vfe_pm_domain_off,
+ .pm_domain_on = vfe_pm_domain_on,
+ .subdev_init = vfe_subdev_init,
+ .vfe_disable = vfe_disable,
+ .vfe_enable = vfe_enable_v2,
+ .vfe_halt = vfe_halt,
+ .vfe_wm_start = vfe_wm_start,
+ .vfe_wm_stop = vfe_wm_stop,
+ .vfe_buf_done = vfe_buf_done,
+ .vfe_wm_update = vfe_wm_update,
+ .reg_update = vfe_reg_update,
+ .reg_update_clear = vfe_reg_update_clear,
+};
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-780.c b/drivers/media/platform/qcom/camss/camss-vfe-gen3.c
index b9812d70f91b..22579617def7 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe-780.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe-gen3.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Qualcomm MSM Camera Subsystem - VFE (Video Front End) Module v780 (SM8550)
+ * Qualcomm MSM Camera Subsystem - VFE (Video Front End) Module gen3
*
* Copyright (c) 2024 Qualcomm Technologies, Inc.
*/
@@ -12,13 +12,44 @@
#include "camss.h"
#include "camss-vfe.h"
-#define BUS_REG_BASE (vfe_is_lite(vfe) ? 0x200 : 0xC00)
+#define IS_VFE_690(vfe) \
+ ((vfe->camss->res->version == CAMSS_8775P) \
+ || (vfe->camss->res->version == CAMSS_8300))
+
+#define BUS_REG_BASE_690 \
+ (vfe_is_lite(vfe) ? 0x480 : 0x400)
+#define BUS_REG_BASE_780 \
+ (vfe_is_lite(vfe) ? 0x200 : 0xC00)
+#define BUS_REG_BASE \
+ (IS_VFE_690(vfe) ? BUS_REG_BASE_690 : BUS_REG_BASE_780)
+
+#define VFE_TOP_CORE_CFG (0x24)
+#define VFE_DISABLE_DSCALING_DS4 BIT(21)
+#define VFE_DISABLE_DSCALING_DS16 BIT(22)
+
+#define VFE_BUS_WM_TEST_BUS_CTRL_690 (BUS_REG_BASE + 0xFC)
+#define VFE_BUS_WM_TEST_BUS_CTRL_780 (BUS_REG_BASE + 0xDC)
+#define VFE_BUS_WM_TEST_BUS_CTRL \
+ (IS_VFE_690(vfe) ? VFE_BUS_WM_TEST_BUS_CTRL_690 \
+ : VFE_BUS_WM_TEST_BUS_CTRL_780)
+/*
+ * Bus client mapping:
+ *
+ * Full VFE:
+ * VFE_690: 16 = RDI0, 17 = RDI1, 18 = RDI2
+ * VFE_780: 23 = RDI0, 24 = RDI1, 25 = RDI2
+ *
+ * VFE LITE:
+ * VFE_690 : 0 = RDI0, 1 = RDI1, 2 = RDI2, 3 = RDI3, 4 = RDI4, 5 = RDI5
+ * VFE_780 : 0 = RDI0, 1 = RDI1, 2 = RDI2, 3 = RDI3, 4 = RDI4
+ */
+#define RDI_WM_690(n) ((vfe_is_lite(vfe) ? 0x0 : 0x10) + (n))
+#define RDI_WM_780(n) ((vfe_is_lite(vfe) ? 0x0 : 0x17) + (n))
+#define RDI_WM(n) (IS_VFE_690(vfe) ? RDI_WM_690(n) : RDI_WM_780(n))
#define VFE_BUS_WM_CGC_OVERRIDE (BUS_REG_BASE + 0x08)
#define WM_CGC_OVERRIDE_ALL (0x7FFFFFF)
-#define VFE_BUS_WM_TEST_BUS_CTRL (BUS_REG_BASE + 0xDC)
-
#define VFE_BUS_WM_CFG(n) (BUS_REG_BASE + 0x200 + (n) * 0x100)
#define WM_CFG_EN BIT(0)
#define WM_VIR_FRM_EN BIT(1)
@@ -39,17 +70,6 @@
#define VFE_BUS_WM_MMU_PREFETCH_CFG(n) (BUS_REG_BASE + 0x260 + (n) * 0x100)
#define VFE_BUS_WM_MMU_PREFETCH_MAX_OFFSET(n) (BUS_REG_BASE + 0x264 + (n) * 0x100)
-/*
- * Bus client mapping:
- *
- * Full VFE:
- * 23 = RDI0, 24 = RDI1, 25 = RDI2
- *
- * VFE LITE:
- * 0 = RDI0, 1 = RDI1, 2 = RDI3, 4 = RDI4
- */
-#define RDI_WM(n) ((vfe_is_lite(vfe) ? 0x0 : 0x17) + (n))
-
static void vfe_wm_start(struct vfe_device *vfe, u8 wm, struct vfe_line *line)
{
struct v4l2_pix_format_mplane *pix =
@@ -62,14 +82,24 @@ static void vfe_wm_start(struct vfe_device *vfe, u8 wm, struct vfe_line *line)
writel(0x0, vfe->base + VFE_BUS_WM_TEST_BUS_CTRL);
- writel(ALIGN(pix->plane_fmt[0].bytesperline, 16) * pix->height >> 8,
- vfe->base + VFE_BUS_WM_FRAME_INCR(wm));
+ if (IS_VFE_690(vfe))
+ writel(ALIGN(pix->plane_fmt[0].bytesperline, 16) * pix->height,
+ vfe->base + VFE_BUS_WM_FRAME_INCR(wm));
+ else
+ writel(ALIGN(pix->plane_fmt[0].bytesperline, 16) * pix->height >> 8,
+ vfe->base + VFE_BUS_WM_FRAME_INCR(wm));
+
writel((WM_IMAGE_CFG_0_DEFAULT_WIDTH & 0xFFFF),
vfe->base + VFE_BUS_WM_IMAGE_CFG_0(wm));
writel(WM_IMAGE_CFG_2_DEFAULT_STRIDE,
vfe->base + VFE_BUS_WM_IMAGE_CFG_2(wm));
writel(0, vfe->base + VFE_BUS_WM_PACKER_CFG(wm));
+ /* TOP CORE CFG */
+ if (IS_VFE_690(vfe))
+ writel(VFE_DISABLE_DSCALING_DS4 | VFE_DISABLE_DSCALING_DS16,
+ vfe->base + VFE_TOP_CORE_CFG);
+
/* no dropped frames, one irq per frame */
writel(0, vfe->base + VFE_BUS_WM_FRAMEDROP_PERIOD(wm));
writel(1, vfe->base + VFE_BUS_WM_FRAMEDROP_PATTERN(wm));
@@ -92,7 +122,11 @@ static void vfe_wm_update(struct vfe_device *vfe, u8 wm, u32 addr,
struct vfe_line *line)
{
wm = RDI_WM(wm);
- writel((addr >> 8) & 0xFFFFFFFF, vfe->base + VFE_BUS_WM_IMAGE_ADDR(wm));
+
+ if (IS_VFE_690(vfe))
+ writel(addr, vfe->base + VFE_BUS_WM_IMAGE_ADDR(wm));
+ else
+ writel((addr >> 8), vfe->base + VFE_BUS_WM_IMAGE_ADDR(wm));
dev_dbg(vfe->camss->dev, "wm:%d, image buf addr:0x%x\n",
wm, addr);
@@ -113,14 +147,14 @@ static inline void vfe_reg_update_clear(struct vfe_device *vfe,
camss_reg_update(vfe->camss, vfe->id, port_id, true);
}
-static const struct camss_video_ops vfe_video_ops_780 = {
+static const struct camss_video_ops vfe_video_ops_gen3 = {
.queue_buffer = vfe_queue_buffer_v2,
.flush_buffers = vfe_flush_buffers,
};
static void vfe_subdev_init(struct device *dev, struct vfe_device *vfe)
{
- vfe->video_ops = vfe_video_ops_780;
+ vfe->video_ops = vfe_video_ops_gen3;
}
static void vfe_global_reset(struct vfe_device *vfe)
@@ -140,7 +174,7 @@ static int vfe_halt(struct vfe_device *vfe)
return 0;
}
-const struct vfe_hw_ops vfe_ops_780 = {
+const struct vfe_hw_ops vfe_ops_gen3 = {
.global_reset = vfe_global_reset,
.hw_version = vfe_hw_version,
.isr = vfe_isr,
diff --git a/drivers/media/platform/qcom/camss/camss-vfe.c b/drivers/media/platform/qcom/camss/camss-vfe.c
index 4bca6c3abaff..dff8d0a1e8c2 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe.c
@@ -340,12 +340,15 @@ static u32 vfe_src_pad_code(struct vfe_line *line, u32 sink_code,
}
break;
case CAMSS_660:
+ case CAMSS_2290:
case CAMSS_7280:
case CAMSS_8x96:
case CAMSS_8250:
case CAMSS_8280XP:
+ case CAMSS_8300:
case CAMSS_845:
case CAMSS_8550:
+ case CAMSS_8775P:
case CAMSS_X1E80100:
switch (sink_code) {
case MEDIA_BUS_FMT_YUYV8_1X16:
@@ -910,7 +913,24 @@ static int vfe_match_clock_names(struct vfe_device *vfe,
return (!strcmp(clock->name, vfe_name) ||
!strcmp(clock->name, vfe_lite_name) ||
- !strcmp(clock->name, "vfe_lite"));
+ !strcmp(clock->name, "vfe_lite") ||
+ !strcmp(clock->name, "camnoc_axi"));
+}
+
+/*
+ * vfe_check_clock_levels - Calculate and set clock rates on VFE module
+ * @clock: clocks data
+ *
+ * Return false if there is no non-zero clock level and true otherwise.
+ */
+static bool vfe_check_clock_levels(struct camss_clock *clock)
+{
+ int i;
+
+ for (i = 0; i < clock->nfreqs; i++)
+ if (clock->freq[i])
+ return true;
+ return false;
}
/*
@@ -936,7 +956,7 @@ static int vfe_set_clock_rates(struct vfe_device *vfe)
for (i = 0; i < vfe->nclocks; i++) {
struct camss_clock *clock = &vfe->clock[i];
- if (vfe_match_clock_names(vfe, clock)) {
+ if (vfe_match_clock_names(vfe, clock) && vfe_check_clock_levels(clock)) {
u64 min_rate = 0;
long rate;
@@ -1017,7 +1037,7 @@ static int vfe_check_clock_rates(struct vfe_device *vfe)
for (i = 0; i < vfe->nclocks; i++) {
struct camss_clock *clock = &vfe->clock[i];
- if (vfe_match_clock_names(vfe, clock)) {
+ if (vfe_match_clock_names(vfe, clock) && vfe_check_clock_levels(clock)) {
u64 min_rate = 0;
unsigned long rate;
@@ -1972,8 +1992,10 @@ static int vfe_bpl_align(struct vfe_device *vfe)
case CAMSS_7280:
case CAMSS_8250:
case CAMSS_8280XP:
+ case CAMSS_8300:
case CAMSS_845:
case CAMSS_8550:
+ case CAMSS_8775P:
case CAMSS_X1E80100:
ret = 16;
break;
diff --git a/drivers/media/platform/qcom/camss/camss-vfe.h b/drivers/media/platform/qcom/camss/camss-vfe.h
index a23f666be753..0300efdb1c46 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe.h
+++ b/drivers/media/platform/qcom/camss/camss-vfe.h
@@ -242,9 +242,10 @@ extern const struct vfe_hw_ops vfe_ops_4_1;
extern const struct vfe_hw_ops vfe_ops_4_7;
extern const struct vfe_hw_ops vfe_ops_4_8;
extern const struct vfe_hw_ops vfe_ops_170;
+extern const struct vfe_hw_ops vfe_ops_340;
extern const struct vfe_hw_ops vfe_ops_480;
extern const struct vfe_hw_ops vfe_ops_680;
-extern const struct vfe_hw_ops vfe_ops_780;
+extern const struct vfe_hw_ops vfe_ops_gen3;
int vfe_get(struct vfe_device *vfe);
void vfe_put(struct vfe_device *vfe);
diff --git a/drivers/media/platform/qcom/camss/camss-video.c b/drivers/media/platform/qcom/camss/camss-video.c
index 8d05802d1735..831486e14754 100644
--- a/drivers/media/platform/qcom/camss/camss-video.c
+++ b/drivers/media/platform/qcom/camss/camss-video.c
@@ -604,50 +604,11 @@ static const struct v4l2_ioctl_ops msm_vid_ioctl_ops = {
* V4L2 file operations
*/
-static int video_open(struct file *file)
-{
- struct video_device *vdev = video_devdata(file);
- struct camss_video *video = video_drvdata(file);
- struct v4l2_fh *vfh;
- int ret;
-
- mutex_lock(&video->lock);
-
- vfh = kzalloc(sizeof(*vfh), GFP_KERNEL);
- if (vfh == NULL) {
- ret = -ENOMEM;
- goto error_alloc;
- }
-
- v4l2_fh_init(vfh, vdev);
- v4l2_fh_add(vfh);
-
- file->private_data = vfh;
-
- mutex_unlock(&video->lock);
-
- return 0;
-
-error_alloc:
- mutex_unlock(&video->lock);
-
- return ret;
-}
-
-static int video_release(struct file *file)
-{
- vb2_fop_release(file);
-
- file->private_data = NULL;
-
- return 0;
-}
-
static const struct v4l2_file_operations msm_vid_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = video_ioctl2,
- .open = video_open,
- .release = video_release,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
.poll = vb2_fop_poll,
.mmap = vb2_fop_mmap,
.read = vb2_fop_read,
diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c
index e08e70b93824..2fbcd0e343aa 100644
--- a/drivers/media/platform/qcom/camss/camss.c
+++ b/drivers/media/platform/qcom/camss/camss.c
@@ -515,6 +515,140 @@ static const struct camss_subdev_resources vfe_res_8x96[] = {
}
};
+static const struct camss_subdev_resources csiphy_res_2290[] = {
+ /* CSIPHY0 */
+ {
+ .regulators = { "vdd-csiphy-1p2", "vdd-csiphy-1p8" },
+ .clock = { "top_ahb", "ahb", "csiphy0", "csiphy0_timer" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 240000000, 341330000, 384000000 },
+ { 100000000, 200000000, 268800000 } },
+ .reg = { "csiphy0" },
+ .interrupt = { "csiphy0" },
+ .csiphy = {
+ .id = 0,
+ .hw_ops = &csiphy_ops_3ph_1_0,
+ .formats = &csiphy_formats_sdm845
+ }
+ },
+
+ /* CSIPHY1 */
+ {
+ .regulators = { "vdd-csiphy-1p2", "vdd-csiphy-1p8" },
+ .clock = { "top_ahb", "ahb", "csiphy1", "csiphy1_timer" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 240000000, 341330000, 384000000 },
+ { 100000000, 200000000, 268800000 } },
+ .reg = { "csiphy1" },
+ .interrupt = { "csiphy1" },
+ .csiphy = {
+ .id = 1,
+ .hw_ops = &csiphy_ops_3ph_1_0,
+ .formats = &csiphy_formats_sdm845
+ }
+ }
+};
+
+static const struct camss_subdev_resources csid_res_2290[] = {
+ /* CSID0 */
+ {
+ .regulators = {},
+ .clock = { "top_ahb", "ahb", "csi0", "vfe0_cphy_rx", "vfe0" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 192000000, 240000000, 384000000, 426400000 },
+ { 0 },
+ { 0 } },
+ .reg = { "csid0" },
+ .interrupt = { "csid0" },
+ .csid = {
+ .hw_ops = &csid_ops_340,
+ .parent_dev_ops = &vfe_parent_dev_ops,
+ .formats = &csid_formats_gen2
+ }
+ },
+
+ /* CSID1 */
+ {
+ .regulators = {},
+ .clock = { "top_ahb", "ahb", "csi1", "vfe1_cphy_rx", "vfe1" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 192000000, 240000000, 384000000, 426400000 },
+ { 0 },
+ { 0 } },
+ .reg = { "csid1" },
+ .interrupt = { "csid1" },
+ .csid = {
+ .hw_ops = &csid_ops_340,
+ .parent_dev_ops = &vfe_parent_dev_ops,
+ .formats = &csid_formats_gen2
+ }
+ }
+};
+
+static const struct camss_subdev_resources vfe_res_2290[] = {
+ /* VFE0 */
+ {
+ .regulators = {},
+ .clock = { "top_ahb", "ahb", "axi", "vfe0", "camnoc_rt_axi", "camnoc_nrt_axi" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 0 },
+ { 19200000, 153600000, 192000000, 256000000, 384000000, 460800000 },
+ { 0 },
+ { 0 }, },
+ .reg = { "vfe0" },
+ .interrupt = { "vfe0" },
+ .vfe = {
+ .line_num = 4,
+ .hw_ops = &vfe_ops_340,
+ .formats_rdi = &vfe_formats_rdi_845,
+ .formats_pix = &vfe_formats_pix_845
+ }
+ },
+
+ /* VFE1 */
+ {
+ .regulators = {},
+ .clock = { "top_ahb", "ahb", "axi", "vfe1", "camnoc_rt_axi", "camnoc_nrt_axi" },
+ .clock_rate = { { 0 },
+ { 0 },
+ { 0 },
+ { 19200000, 153600000, 192000000, 256000000, 384000000, 460800000 },
+ { 0 },
+ { 0 }, },
+ .reg = { "vfe1" },
+ .interrupt = { "vfe1" },
+ .vfe = {
+ .line_num = 4,
+ .hw_ops = &vfe_ops_340,
+ .formats_rdi = &vfe_formats_rdi_845,
+ .formats_pix = &vfe_formats_pix_845
+ }
+ },
+};
+
+static const struct resources_icc icc_res_2290[] = {
+ {
+ .name = "ahb",
+ .icc_bw_tbl.avg = 150000,
+ .icc_bw_tbl.peak = 300000,
+ },
+ {
+ .name = "hf_mnoc",
+ .icc_bw_tbl.avg = 2097152,
+ .icc_bw_tbl.peak = 3000000,
+ },
+ {
+ .name = "sf_mnoc",
+ .icc_bw_tbl.avg = 2097152,
+ .icc_bw_tbl.peak = 3000000,
+ },
+};
+
static const struct camss_subdev_resources csiphy_res_660[] = {
/* CSIPHY0 */
{
@@ -2285,7 +2419,7 @@ static const struct camss_subdev_resources csid_res_8550[] = {
.csid = {
.is_lite = false,
.parent_dev_ops = &vfe_parent_dev_ops,
- .hw_ops = &csid_ops_780,
+ .hw_ops = &csid_ops_gen3,
.formats = &csid_formats_gen2
}
},
@@ -2300,7 +2434,7 @@ static const struct camss_subdev_resources csid_res_8550[] = {
.csid = {
.is_lite = false,
.parent_dev_ops = &vfe_parent_dev_ops,
- .hw_ops = &csid_ops_780,
+ .hw_ops = &csid_ops_gen3,
.formats = &csid_formats_gen2
}
},
@@ -2315,7 +2449,7 @@ static const struct camss_subdev_resources csid_res_8550[] = {
.csid = {
.is_lite = false,
.parent_dev_ops = &vfe_parent_dev_ops,
- .hw_ops = &csid_ops_780,
+ .hw_ops = &csid_ops_gen3,
.formats = &csid_formats_gen2
}
},
@@ -2330,7 +2464,7 @@ static const struct camss_subdev_resources csid_res_8550[] = {
.csid = {
.is_lite = true,
.parent_dev_ops = &vfe_parent_dev_ops,
- .hw_ops = &csid_ops_780,
+ .hw_ops = &csid_ops_gen3,
.formats = &csid_formats_gen2
}
},
@@ -2345,7 +2479,7 @@ static const struct camss_subdev_resources csid_res_8550[] = {
.csid = {
.is_lite = true,
.parent_dev_ops = &vfe_parent_dev_ops,
- .hw_ops = &csid_ops_780,
+ .hw_ops = &csid_ops_gen3,
.formats = &csid_formats_gen2
}
}
@@ -2371,7 +2505,7 @@ static const struct camss_subdev_resources vfe_res_8550[] = {
.is_lite = false,
.has_pd = true,
.pd_name = "ife0",
- .hw_ops = &vfe_ops_780,
+ .hw_ops = &vfe_ops_gen3,
.formats_rdi = &vfe_formats_rdi_845,
.formats_pix = &vfe_formats_pix_845
}
@@ -2395,7 +2529,7 @@ static const struct camss_subdev_resources vfe_res_8550[] = {
.is_lite = false,
.has_pd = true,
.pd_name = "ife1",
- .hw_ops = &vfe_ops_780,
+ .hw_ops = &vfe_ops_gen3,
.formats_rdi = &vfe_formats_rdi_845,
.formats_pix = &vfe_formats_pix_845
}
@@ -2419,7 +2553,7 @@ static const struct camss_subdev_resources vfe_res_8550[] = {
.is_lite = false,
.has_pd = true,
.pd_name = "ife2",
- .hw_ops = &vfe_ops_780,
+ .hw_ops = &vfe_ops_gen3,
.formats_rdi = &vfe_formats_rdi_845,
.formats_pix = &vfe_formats_pix_845
}
@@ -2441,7 +2575,7 @@ static const struct camss_subdev_resources vfe_res_8550[] = {
.vfe = {
.line_num = 4,
.is_lite = true,
- .hw_ops = &vfe_ops_780,
+ .hw_ops = &vfe_ops_gen3,
.formats_rdi = &vfe_formats_rdi_845,
.formats_pix = &vfe_formats_pix_845
}
@@ -2463,7 +2597,7 @@ static const struct camss_subdev_resources vfe_res_8550[] = {
.vfe = {
.line_num = 4,
.is_lite = true,
- .hw_ops = &vfe_ops_780,
+ .hw_ops = &vfe_ops_gen3,
.formats_rdi = &vfe_formats_rdi_845,
.formats_pix = &vfe_formats_pix_845
}
@@ -2483,6 +2617,467 @@ static const struct resources_icc icc_res_sm8550[] = {
},
};
+static const struct camss_subdev_resources csiphy_res_8300[] = {
+ /* CSIPHY0 */
+ {
+ .regulators = { "vdda-phy", "vdda-pll" },
+
+ .clock = { "csiphy_rx", "csiphy0", "csiphy0_timer" },
+ .clock_rate = {
+ { 400000000 },
+ { 0 },
+ { 400000000 },
+ },
+ .reg = { "csiphy0" },
+ .interrupt = { "csiphy0" },
+ .csiphy = {
+ .id = 0,
+ .hw_ops = &csiphy_ops_3ph_1_0,
+ .formats = &csiphy_formats_sdm845,
+ }
+ },
+ /* CSIPHY1 */
+ {
+ .regulators = { "vdda-phy", "vdda-pll" },
+
+ .clock = { "csiphy_rx", "csiphy1", "csiphy1_timer" },
+ .clock_rate = {
+ { 400000000 },
+ { 0 },
+ { 400000000 },
+ },
+ .reg = { "csiphy1" },
+ .interrupt = { "csiphy1" },
+ .csiphy = {
+ .id = 1,
+ .hw_ops = &csiphy_ops_3ph_1_0,
+ .formats = &csiphy_formats_sdm845,
+ }
+ },
+ /* CSIPHY2 */
+ {
+ .regulators = { "vdda-phy", "vdda-pll" },
+
+ .clock = { "csiphy_rx", "csiphy2", "csiphy2_timer" },
+ .clock_rate = {
+ { 400000000 },
+ { 0 },
+ { 400000000 },
+ },
+ .reg = { "csiphy2" },
+ .interrupt = { "csiphy2" },
+ .csiphy = {
+ .id = 2,
+ .hw_ops = &csiphy_ops_3ph_1_0,
+ .formats = &csiphy_formats_sdm845,
+ }
+ },
+};
+
+static const struct camss_subdev_resources csiphy_res_8775p[] = {
+ /* CSIPHY0 */
+ {
+ .regulators = { "vdda-phy", "vdda-pll" },
+ .clock = { "csiphy_rx", "csiphy0", "csiphy0_timer"},
+ .clock_rate = {
+ { 400000000 },
+ { 0 },
+ { 400000000 },
+ },
+ .reg = { "csiphy0" },
+ .interrupt = { "csiphy0" },
+ .csiphy = {
+ .id = 0,
+ .hw_ops = &csiphy_ops_3ph_1_0,
+ .formats = &csiphy_formats_sdm845
+ }
+ },
+ /* CSIPHY1 */
+ {
+ .regulators = { "vdda-phy", "vdda-pll" },
+ .clock = { "csiphy_rx", "csiphy1", "csiphy1_timer"},
+ .clock_rate = {
+ { 400000000 },
+ { 0 },
+ { 400000000 },
+ },
+ .reg = { "csiphy1" },
+ .interrupt = { "csiphy1" },
+ .csiphy = {
+ .id = 1,
+ .hw_ops = &csiphy_ops_3ph_1_0,
+ .formats = &csiphy_formats_sdm845
+ }
+ },
+ /* CSIPHY2 */
+ {
+ .regulators = { "vdda-phy", "vdda-pll" },
+ .clock = { "csiphy_rx", "csiphy2", "csiphy2_timer"},
+ .clock_rate = {
+ { 400000000 },
+ { 0 },
+ { 400000000 },
+ },
+ .reg = { "csiphy2" },
+ .interrupt = { "csiphy2" },
+ .csiphy = {
+ .id = 2,
+ .hw_ops = &csiphy_ops_3ph_1_0,
+ .formats = &csiphy_formats_sdm845
+ }
+ },
+ /* CSIPHY3 */
+ {
+ .regulators = { "vdda-phy", "vdda-pll" },
+ .clock = { "csiphy_rx", "csiphy3", "csiphy3_timer"},
+ .clock_rate = {
+ { 400000000 },
+ { 0 },
+ { 400000000 },
+ },
+ .reg = { "csiphy3" },
+ .interrupt = { "csiphy3" },
+ .csiphy = {
+ .id = 3,
+ .hw_ops = &csiphy_ops_3ph_1_0,
+ .formats = &csiphy_formats_sdm845
+ }
+ },
+};
+
+static const struct camss_subdev_resources csid_res_8775p[] = {
+ /* CSID0 */
+ {
+ .regulators = {},
+ .clock = { "csid", "csiphy_rx"},
+ .clock_rate = {
+ { 400000000, 400000000},
+ { 400000000, 400000000}
+ },
+ .reg = { "csid0" },
+ .interrupt = { "csid0" },
+ .csid = {
+ .is_lite = false,
+ .hw_ops = &csid_ops_gen3,
+ .parent_dev_ops = &vfe_parent_dev_ops,
+ .formats = &csid_formats_gen2
+ }
+ },
+ /* CSID1 */
+ {
+ .regulators = {},
+ .clock = { "csid", "csiphy_rx"},
+ .clock_rate = {
+ { 400000000, 400000000},
+ { 400000000, 400000000}
+ },
+ .reg = { "csid1" },
+ .interrupt = { "csid1" },
+ .csid = {
+ .is_lite = false,
+ .hw_ops = &csid_ops_gen3,
+ .parent_dev_ops = &vfe_parent_dev_ops,
+ .formats = &csid_formats_gen2
+ }
+ },
+
+ /* CSID2 (lite) */
+ {
+ .regulators = {},
+ .clock = { "cpas_vfe_lite", "vfe_lite_ahb",
+ "vfe_lite_csid", "vfe_lite_cphy_rx",
+ "vfe_lite"},
+ .clock_rate = {
+ { 0, 0, 400000000, 400000000, 0},
+ { 0, 0, 400000000, 480000000, 0}
+ },
+ .reg = { "csid_lite0" },
+ .interrupt = { "csid_lite0" },
+ .csid = {
+ .is_lite = true,
+ .hw_ops = &csid_ops_gen3,
+ .parent_dev_ops = &vfe_parent_dev_ops,
+ .formats = &csid_formats_gen2
+ }
+ },
+ /* CSID3 (lite) */
+ {
+ .regulators = {},
+ .clock = { "cpas_vfe_lite", "vfe_lite_ahb",
+ "vfe_lite_csid", "vfe_lite_cphy_rx",
+ "vfe_lite"},
+ .clock_rate = {
+ { 0, 0, 400000000, 400000000, 0},
+ { 0, 0, 400000000, 480000000, 0}
+ },
+ .reg = { "csid_lite1" },
+ .interrupt = { "csid_lite1" },
+ .csid = {
+ .is_lite = true,
+ .hw_ops = &csid_ops_gen3,
+ .parent_dev_ops = &vfe_parent_dev_ops,
+ .formats = &csid_formats_gen2
+ }
+ },
+ /* CSID4 (lite) */
+ {
+ .regulators = {},
+ .clock = { "cpas_vfe_lite", "vfe_lite_ahb",
+ "vfe_lite_csid", "vfe_lite_cphy_rx",
+ "vfe_lite"},
+ .clock_rate = {
+ { 0, 0, 400000000, 400000000, 0},
+ { 0, 0, 400000000, 480000000, 0}
+ },
+ .reg = { "csid_lite2" },
+ .interrupt = { "csid_lite2" },
+ .csid = {
+ .is_lite = true,
+ .hw_ops = &csid_ops_gen3,
+ .parent_dev_ops = &vfe_parent_dev_ops,
+ .formats = &csid_formats_gen2
+ }
+ },
+ /* CSID5 (lite) */
+ {
+ .regulators = {},
+ .clock = { "cpas_vfe_lite", "vfe_lite_ahb",
+ "vfe_lite_csid", "vfe_lite_cphy_rx",
+ "vfe_lite"},
+ .clock_rate = {
+ { 0, 0, 400000000, 400000000, 0},
+ { 0, 0, 400000000, 480000000, 0}
+ },
+ .reg = { "csid_lite3" },
+ .interrupt = { "csid_lite3" },
+ .csid = {
+ .is_lite = true,
+ .hw_ops = &csid_ops_gen3,
+ .parent_dev_ops = &vfe_parent_dev_ops,
+ .formats = &csid_formats_gen2
+ }
+ },
+ /* CSID6 (lite) */
+ {
+ .regulators = {},
+ .clock = { "cpas_vfe_lite", "vfe_lite_ahb",
+ "vfe_lite_csid", "vfe_lite_cphy_rx",
+ "vfe_lite"},
+ .clock_rate = {
+ { 0, 0, 400000000, 400000000, 0},
+ { 0, 0, 400000000, 480000000, 0}
+ },
+ .reg = { "csid_lite4" },
+ .interrupt = { "csid_lite4" },
+ .csid = {
+ .is_lite = true,
+ .hw_ops = &csid_ops_gen3,
+ .parent_dev_ops = &vfe_parent_dev_ops,
+ .formats = &csid_formats_gen2
+ }
+ },
+};
+
+static const struct camss_subdev_resources vfe_res_8775p[] = {
+ /* VFE0 */
+ {
+ .regulators = {},
+ .clock = { "cpas_vfe0", "vfe0", "vfe0_fast_ahb",
+ "cpas_ahb", "gcc_axi_hf",
+ "cpas_fast_ahb_clk",
+ "camnoc_axi"},
+ .clock_rate = {
+ { 0 },
+ { 480000000 },
+ { 300000000, 400000000 },
+ { 300000000, 400000000 },
+ { 0 },
+ { 300000000, 400000000 },
+ { 400000000 },
+ },
+ .reg = { "vfe0" },
+ .interrupt = { "vfe0" },
+ .vfe = {
+ .line_num = 3,
+ .is_lite = false,
+ .has_pd = false,
+ .pd_name = NULL,
+ .hw_ops = &vfe_ops_gen3,
+ .formats_rdi = &vfe_formats_rdi_845,
+ .formats_pix = &vfe_formats_pix_845
+ }
+ },
+ /* VFE1 */
+ {
+ .regulators = {},
+ .clock = { "cpas_vfe1", "vfe1", "vfe1_fast_ahb",
+ "cpas_ahb", "gcc_axi_hf",
+ "cpas_fast_ahb_clk",
+ "camnoc_axi"},
+ .clock_rate = {
+ { 0 },
+ { 480000000 },
+ { 300000000, 400000000 },
+ { 300000000, 400000000 },
+ { 0 },
+ { 300000000, 400000000 },
+ { 400000000 },
+ },
+ .reg = { "vfe1" },
+ .interrupt = { "vfe1" },
+ .vfe = {
+ .line_num = 3,
+ .is_lite = false,
+ .has_pd = false,
+ .pd_name = NULL,
+ .hw_ops = &vfe_ops_gen3,
+ .formats_rdi = &vfe_formats_rdi_845,
+ .formats_pix = &vfe_formats_pix_845
+ }
+ },
+ /* VFE2 (lite) */
+ {
+ .regulators = {},
+ .clock = { "cpas_vfe_lite", "vfe_lite_ahb",
+ "vfe_lite_csid", "vfe_lite_cphy_rx",
+ "vfe_lite"},
+ .clock_rate = {
+ { 0, 0, 0, 0 },
+ { 300000000, 400000000, 400000000, 400000000 },
+ { 400000000, 400000000, 400000000, 400000000 },
+ { 400000000, 400000000, 400000000, 400000000 },
+ { 480000000, 600000000, 600000000, 600000000 },
+ },
+ .reg = { "vfe_lite0" },
+ .interrupt = { "vfe_lite0" },
+ .vfe = {
+ .line_num = 4,
+ .is_lite = true,
+ .hw_ops = &vfe_ops_gen3,
+ .formats_rdi = &vfe_formats_rdi_845,
+ .formats_pix = &vfe_formats_pix_845
+ }
+ },
+ /* VFE3 (lite) */
+ {
+ .regulators = {},
+ .clock = { "cpas_vfe_lite", "vfe_lite_ahb",
+ "vfe_lite_csid", "vfe_lite_cphy_rx",
+ "vfe_lite"},
+ .clock_rate = {
+ { 0, 0, 0, 0 },
+ { 300000000, 400000000, 400000000, 400000000 },
+ { 400000000, 400000000, 400000000, 400000000 },
+ { 400000000, 400000000, 400000000, 400000000 },
+ { 480000000, 600000000, 600000000, 600000000 },
+ },
+ .reg = { "vfe_lite1" },
+ .interrupt = { "vfe_lite1" },
+ .vfe = {
+ .line_num = 4,
+ .is_lite = true,
+ .hw_ops = &vfe_ops_gen3,
+ .formats_rdi = &vfe_formats_rdi_845,
+ .formats_pix = &vfe_formats_pix_845
+ }
+ },
+ /* VFE4 (lite) */
+ {
+ .regulators = {},
+ .clock = { "cpas_vfe_lite", "vfe_lite_ahb",
+ "vfe_lite_csid", "vfe_lite_cphy_rx",
+ "vfe_lite"},
+ .clock_rate = {
+ { 0, 0, 0, 0 },
+ { 300000000, 400000000, 400000000, 400000000 },
+ { 400000000, 400000000, 400000000, 400000000 },
+ { 400000000, 400000000, 400000000, 400000000 },
+ { 480000000, 600000000, 600000000, 600000000 },
+ },
+ .reg = { "vfe_lite2" },
+ .interrupt = { "vfe_lite2" },
+ .vfe = {
+ .line_num = 4,
+ .is_lite = true,
+ .hw_ops = &vfe_ops_gen3,
+ .formats_rdi = &vfe_formats_rdi_845,
+ .formats_pix = &vfe_formats_pix_845
+ }
+ },
+ /* VFE5 (lite) */
+ {
+ .regulators = {},
+ .clock = { "cpas_vfe_lite", "vfe_lite_ahb",
+ "vfe_lite_csid", "vfe_lite_cphy_rx",
+ "vfe_lite"},
+ .clock_rate = {
+ { 0, 0, 0, 0 },
+ { 300000000, 400000000, 400000000, 400000000 },
+ { 400000000, 400000000, 400000000, 400000000 },
+ { 400000000, 400000000, 400000000, 400000000 },
+ { 480000000, 600000000, 600000000, 600000000 },
+ },
+ .reg = { "vfe_lite3" },
+ .interrupt = { "vfe_lite3" },
+ .vfe = {
+ .line_num = 4,
+ .is_lite = true,
+ .hw_ops = &vfe_ops_gen3,
+ .formats_rdi = &vfe_formats_rdi_845,
+ .formats_pix = &vfe_formats_pix_845
+ }
+ },
+ /* VFE6 (lite) */
+ {
+ .regulators = {},
+ .clock = { "cpas_vfe_lite", "vfe_lite_ahb",
+ "vfe_lite_csid", "vfe_lite_cphy_rx",
+ "vfe_lite"},
+ .clock_rate = {
+ { 0, 0, 0, 0 },
+ { 300000000, 400000000, 400000000, 400000000 },
+ { 400000000, 400000000, 400000000, 400000000 },
+ { 400000000, 400000000, 400000000, 400000000 },
+ { 480000000, 600000000, 600000000, 600000000 },
+ },
+ .reg = { "vfe_lite4" },
+ .interrupt = { "vfe_lite4" },
+ .vfe = {
+ .line_num = 4,
+ .is_lite = true,
+ .hw_ops = &vfe_ops_gen3,
+ .formats_rdi = &vfe_formats_rdi_845,
+ .formats_pix = &vfe_formats_pix_845
+ }
+ },
+};
+
+static const struct resources_icc icc_res_qcs8300[] = {
+ {
+ .name = "ahb",
+ .icc_bw_tbl.avg = 38400,
+ .icc_bw_tbl.peak = 76800,
+ },
+ {
+ .name = "hf_0",
+ .icc_bw_tbl.avg = 2097152,
+ .icc_bw_tbl.peak = 2097152,
+ },
+};
+
+static const struct resources_icc icc_res_sa8775p[] = {
+ {
+ .name = "ahb",
+ .icc_bw_tbl.avg = 38400,
+ .icc_bw_tbl.peak = 76800,
+ },
+ {
+ .name = "hf_0",
+ .icc_bw_tbl.avg = 2097152,
+ .icc_bw_tbl.peak = 2097152,
+ },
+};
+
static const struct camss_subdev_resources csiphy_res_x1e80100[] = {
/* CSIPHY0 */
{
@@ -3041,9 +3636,6 @@ static int camss_of_parse_ports(struct camss *camss)
for_each_endpoint_of_node(dev->of_node, node) {
struct camss_async_subdev *csd;
- if (!of_device_is_available(node))
- continue;
-
remote = of_graph_get_remote_port_parent(node);
if (!remote) {
dev_err(dev, "Cannot get remote parent\n");
@@ -3143,7 +3735,6 @@ static int camss_init_subdevices(struct camss *camss)
}
/*
- * camss_link_entities - Register subdev nodes and create links
* camss_link_err - print error in case link creation fails
* @src_name: name for source of the link
* @sink_name: name for sink of the link
@@ -3557,7 +4148,6 @@ static int camss_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct camss *camss;
- int num_subdevs;
int ret;
camss = devm_kzalloc(dev, sizeof(*camss), GFP_KERNEL);
@@ -3628,17 +4218,15 @@ static int camss_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
- num_subdevs = camss_of_parse_ports(camss);
- if (num_subdevs < 0) {
- ret = num_subdevs;
+ ret = camss_of_parse_ports(camss);
+ if (ret < 0)
goto err_v4l2_device_unregister;
- }
ret = camss_register_entities(camss);
if (ret < 0)
goto err_v4l2_device_unregister;
- ret = camss->res->link_entities(camss);
+ ret = camss_link_entities(camss);
if (ret < 0)
goto err_register_subdevs;
@@ -3648,23 +4236,12 @@ static int camss_probe(struct platform_device *pdev)
goto err_register_subdevs;
}
- if (num_subdevs) {
- camss->notifier.ops = &camss_subdev_notifier_ops;
-
- ret = v4l2_async_nf_register(&camss->notifier);
- if (ret) {
- dev_err(dev,
- "Failed to register async subdev nodes: %d\n",
- ret);
- goto err_media_device_unregister;
- }
- } else {
- ret = v4l2_device_register_subdev_nodes(&camss->v4l2_dev);
- if (ret < 0) {
- dev_err(dev, "Failed to register subdev nodes: %d\n",
- ret);
- goto err_media_device_unregister;
- }
+ camss->notifier.ops = &camss_subdev_notifier_ops;
+ ret = v4l2_async_nf_register(&camss->notifier);
+ if (ret) {
+ dev_err(dev,
+ "Failed to register async subdev nodes: %d\n", ret);
+ goto err_media_device_unregister;
}
return 0;
@@ -3723,7 +4300,6 @@ static const struct camss_resources msm8916_resources = {
.csiphy_num = ARRAY_SIZE(csiphy_res_8x16),
.csid_num = ARRAY_SIZE(csid_res_8x16),
.vfe_num = ARRAY_SIZE(vfe_res_8x16),
- .link_entities = camss_link_entities
};
static const struct camss_resources msm8953_resources = {
@@ -3737,7 +4313,6 @@ static const struct camss_resources msm8953_resources = {
.csiphy_num = ARRAY_SIZE(csiphy_res_8x96),
.csid_num = ARRAY_SIZE(csid_res_8x53),
.vfe_num = ARRAY_SIZE(vfe_res_8x53),
- .link_entities = camss_link_entities
};
static const struct camss_resources msm8996_resources = {
@@ -3749,7 +4324,46 @@ static const struct camss_resources msm8996_resources = {
.csiphy_num = ARRAY_SIZE(csiphy_res_8x96),
.csid_num = ARRAY_SIZE(csid_res_8x96),
.vfe_num = ARRAY_SIZE(vfe_res_8x96),
- .link_entities = camss_link_entities
+};
+
+static const struct camss_resources qcm2290_resources = {
+ .version = CAMSS_2290,
+ .csiphy_res = csiphy_res_2290,
+ .csid_res = csid_res_2290,
+ .vfe_res = vfe_res_2290,
+ .icc_res = icc_res_2290,
+ .icc_path_num = ARRAY_SIZE(icc_res_2290),
+ .csiphy_num = ARRAY_SIZE(csiphy_res_2290),
+ .csid_num = ARRAY_SIZE(csid_res_2290),
+ .vfe_num = ARRAY_SIZE(vfe_res_2290),
+};
+
+static const struct camss_resources qcs8300_resources = {
+ .version = CAMSS_8300,
+ .pd_name = "top",
+ .csiphy_res = csiphy_res_8300,
+ .csid_res = csid_res_8775p,
+ .csid_wrapper_res = &csid_wrapper_res_sm8550,
+ .vfe_res = vfe_res_8775p,
+ .icc_res = icc_res_qcs8300,
+ .csiphy_num = ARRAY_SIZE(csiphy_res_8300),
+ .csid_num = ARRAY_SIZE(csid_res_8775p),
+ .vfe_num = ARRAY_SIZE(vfe_res_8775p),
+ .icc_path_num = ARRAY_SIZE(icc_res_qcs8300),
+};
+
+static const struct camss_resources sa8775p_resources = {
+ .version = CAMSS_8775P,
+ .pd_name = "top",
+ .csiphy_res = csiphy_res_8775p,
+ .csid_res = csid_res_8775p,
+ .csid_wrapper_res = &csid_wrapper_res_sm8550,
+ .vfe_res = vfe_res_8775p,
+ .icc_res = icc_res_sa8775p,
+ .csiphy_num = ARRAY_SIZE(csiphy_res_8775p),
+ .csid_num = ARRAY_SIZE(csid_res_8775p),
+ .vfe_num = ARRAY_SIZE(vfe_res_8775p),
+ .icc_path_num = ARRAY_SIZE(icc_res_sa8775p),
};
static const struct camss_resources sdm660_resources = {
@@ -3761,7 +4375,6 @@ static const struct camss_resources sdm660_resources = {
.csiphy_num = ARRAY_SIZE(csiphy_res_660),
.csid_num = ARRAY_SIZE(csid_res_660),
.vfe_num = ARRAY_SIZE(vfe_res_660),
- .link_entities = camss_link_entities
};
static const struct camss_resources sdm670_resources = {
@@ -3772,7 +4385,6 @@ static const struct camss_resources sdm670_resources = {
.csiphy_num = ARRAY_SIZE(csiphy_res_670),
.csid_num = ARRAY_SIZE(csid_res_670),
.vfe_num = ARRAY_SIZE(vfe_res_670),
- .link_entities = camss_link_entities
};
static const struct camss_resources sdm845_resources = {
@@ -3784,7 +4396,6 @@ static const struct camss_resources sdm845_resources = {
.csiphy_num = ARRAY_SIZE(csiphy_res_845),
.csid_num = ARRAY_SIZE(csid_res_845),
.vfe_num = ARRAY_SIZE(vfe_res_845),
- .link_entities = camss_link_entities
};
static const struct camss_resources sm8250_resources = {
@@ -3798,7 +4409,6 @@ static const struct camss_resources sm8250_resources = {
.csiphy_num = ARRAY_SIZE(csiphy_res_8250),
.csid_num = ARRAY_SIZE(csid_res_8250),
.vfe_num = ARRAY_SIZE(vfe_res_8250),
- .link_entities = camss_link_entities
};
static const struct camss_resources sc8280xp_resources = {
@@ -3813,7 +4423,6 @@ static const struct camss_resources sc8280xp_resources = {
.csiphy_num = ARRAY_SIZE(csiphy_res_sc8280xp),
.csid_num = ARRAY_SIZE(csid_res_sc8280xp),
.vfe_num = ARRAY_SIZE(vfe_res_sc8280xp),
- .link_entities = camss_link_entities
};
static const struct camss_resources sc7280_resources = {
@@ -3827,7 +4436,6 @@ static const struct camss_resources sc7280_resources = {
.csiphy_num = ARRAY_SIZE(csiphy_res_7280),
.csid_num = ARRAY_SIZE(csid_res_7280),
.vfe_num = ARRAY_SIZE(vfe_res_7280),
- .link_entities = camss_link_entities
};
static const struct camss_resources sm8550_resources = {
@@ -3842,7 +4450,6 @@ static const struct camss_resources sm8550_resources = {
.csiphy_num = ARRAY_SIZE(csiphy_res_8550),
.csid_num = ARRAY_SIZE(csid_res_8550),
.vfe_num = ARRAY_SIZE(vfe_res_8550),
- .link_entities = camss_link_entities
};
static const struct camss_resources x1e80100_resources = {
@@ -3857,13 +4464,15 @@ static const struct camss_resources x1e80100_resources = {
.csiphy_num = ARRAY_SIZE(csiphy_res_x1e80100),
.csid_num = ARRAY_SIZE(csid_res_x1e80100),
.vfe_num = ARRAY_SIZE(vfe_res_x1e80100),
- .link_entities = camss_link_entities
};
static const struct of_device_id camss_dt_match[] = {
{ .compatible = "qcom,msm8916-camss", .data = &msm8916_resources },
{ .compatible = "qcom,msm8953-camss", .data = &msm8953_resources },
{ .compatible = "qcom,msm8996-camss", .data = &msm8996_resources },
+ { .compatible = "qcom,qcm2290-camss", .data = &qcm2290_resources },
+ { .compatible = "qcom,qcs8300-camss", .data = &qcs8300_resources },
+ { .compatible = "qcom,sa8775p-camss", .data = &sa8775p_resources },
{ .compatible = "qcom,sc7280-camss", .data = &sc7280_resources },
{ .compatible = "qcom,sc8280xp-camss", .data = &sc8280xp_resources },
{ .compatible = "qcom,sdm660-camss", .data = &sdm660_resources },
diff --git a/drivers/media/platform/qcom/camss/camss.h b/drivers/media/platform/qcom/camss/camss.h
index 63c0afee154a..a70fbc78ccc3 100644
--- a/drivers/media/platform/qcom/camss/camss.h
+++ b/drivers/media/platform/qcom/camss/camss.h
@@ -78,14 +78,17 @@ enum pm_domain {
enum camss_version {
CAMSS_660,
+ CAMSS_2290,
CAMSS_7280,
CAMSS_8x16,
CAMSS_8x53,
CAMSS_8x96,
CAMSS_8250,
CAMSS_8280XP,
+ CAMSS_8300,
CAMSS_845,
CAMSS_8550,
+ CAMSS_8775P,
CAMSS_X1E80100,
};
@@ -107,7 +110,6 @@ struct camss_resources {
const unsigned int csiphy_num;
const unsigned int csid_num;
const unsigned int vfe_num;
- int (*link_entities)(struct camss *camss);
};
struct camss {
diff --git a/drivers/media/platform/qcom/iris/Makefile b/drivers/media/platform/qcom/iris/Makefile
index e86d00ee6f15..13270cd6d899 100644
--- a/drivers/media/platform/qcom/iris/Makefile
+++ b/drivers/media/platform/qcom/iris/Makefile
@@ -1,5 +1,5 @@
-qcom-iris-objs += \
- iris_buffer.o \
+qcom-iris-objs += iris_buffer.o \
+ iris_common.o \
iris_core.o \
iris_ctrls.o \
iris_firmware.o \
@@ -19,6 +19,7 @@ qcom-iris-objs += \
iris_vidc.o \
iris_vb2.o \
iris_vdec.o \
+ iris_venc.o \
iris_vpu2.o \
iris_vpu3x.o \
iris_vpu_buffer.o \
diff --git a/drivers/media/platform/qcom/iris/iris_buffer.c b/drivers/media/platform/qcom/iris/iris_buffer.c
index 6425e4919e3b..c0900038e7de 100644
--- a/drivers/media/platform/qcom/iris/iris_buffer.c
+++ b/drivers/media/platform/qcom/iris/iris_buffer.c
@@ -63,7 +63,12 @@
static u32 iris_yuv_buffer_size_nv12(struct iris_inst *inst)
{
u32 y_plane, uv_plane, y_stride, uv_stride, y_scanlines, uv_scanlines;
- struct v4l2_format *f = inst->fmt_dst;
+ struct v4l2_format *f;
+
+ if (inst->domain == DECODER)
+ f = inst->fmt_dst;
+ else
+ f = inst->fmt_src;
y_stride = ALIGN(f->fmt.pix_mp.width, Y_STRIDE_ALIGN);
uv_stride = ALIGN(f->fmt.pix_mp.width, UV_STRIDE_ALIGN);
@@ -194,7 +199,7 @@ static u32 iris_yuv_buffer_size_qc08c(struct iris_inst *inst)
return ALIGN(y_meta_plane + y_plane + uv_meta_plane + uv_plane, PIXELS_4K);
}
-static u32 iris_bitstream_buffer_size(struct iris_inst *inst)
+static u32 iris_dec_bitstream_buffer_size(struct iris_inst *inst)
{
struct platform_inst_caps *caps = inst->core->iris_platform_data->inst_caps;
u32 base_res_mbs = NUM_MBS_4K;
@@ -219,18 +224,58 @@ static u32 iris_bitstream_buffer_size(struct iris_inst *inst)
return ALIGN(frame_size, PIXELS_4K);
}
+static u32 iris_enc_bitstream_buffer_size(struct iris_inst *inst)
+{
+ u32 aligned_width, aligned_height, bitstream_size, yuv_size;
+ int bitrate_mode, frame_rc;
+ struct v4l2_format *f;
+
+ f = inst->fmt_dst;
+
+ bitrate_mode = inst->fw_caps[BITRATE_MODE].value;
+ frame_rc = inst->fw_caps[FRAME_RC_ENABLE].value;
+
+ aligned_width = ALIGN(f->fmt.pix_mp.width, 32);
+ aligned_height = ALIGN(f->fmt.pix_mp.height, 32);
+ bitstream_size = aligned_width * aligned_height * 3;
+ yuv_size = (aligned_width * aligned_height * 3) >> 1;
+ if (aligned_width * aligned_height > (4096 * 2176))
+ /* bitstream_size = 0.25 * yuv_size; */
+ bitstream_size = (bitstream_size >> 3);
+ else if (aligned_width * aligned_height > (1280 * 720))
+ /* bitstream_size = 0.5 * yuv_size; */
+ bitstream_size = (bitstream_size >> 2);
+
+ if ((!frame_rc || bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CQ) &&
+ bitstream_size < yuv_size)
+ bitstream_size = (bitstream_size << 1);
+
+ return ALIGN(bitstream_size, 4096);
+}
+
int iris_get_buffer_size(struct iris_inst *inst,
enum iris_buffer_type buffer_type)
{
- switch (buffer_type) {
- case BUF_INPUT:
- return iris_bitstream_buffer_size(inst);
- case BUF_OUTPUT:
- return iris_yuv_buffer_size_nv12(inst);
- case BUF_DPB:
- return iris_yuv_buffer_size_qc08c(inst);
- default:
- return 0;
+ if (inst->domain == DECODER) {
+ switch (buffer_type) {
+ case BUF_INPUT:
+ return iris_dec_bitstream_buffer_size(inst);
+ case BUF_OUTPUT:
+ return iris_yuv_buffer_size_nv12(inst);
+ case BUF_DPB:
+ return iris_yuv_buffer_size_qc08c(inst);
+ default:
+ return 0;
+ }
+ } else {
+ switch (buffer_type) {
+ case BUF_INPUT:
+ return iris_yuv_buffer_size_nv12(inst);
+ case BUF_OUTPUT:
+ return iris_enc_bitstream_buffer_size(inst);
+ default:
+ return 0;
+ }
}
}
@@ -239,7 +284,7 @@ static void iris_fill_internal_buf_info(struct iris_inst *inst,
{
struct iris_buffers *buffers = &inst->buffers[buffer_type];
- buffers->size = iris_vpu_buf_size(inst, buffer_type);
+ buffers->size = inst->core->iris_platform_data->get_vpu_buffer_size(inst, buffer_type);
buffers->min_count = iris_vpu_buf_count(inst, buffer_type);
}
@@ -249,16 +294,30 @@ void iris_get_internal_buffers(struct iris_inst *inst, u32 plane)
const u32 *internal_buf_type;
u32 internal_buffer_count, i;
- if (V4L2_TYPE_IS_OUTPUT(plane)) {
- internal_buf_type = platform_data->dec_ip_int_buf_tbl;
- internal_buffer_count = platform_data->dec_ip_int_buf_tbl_size;
- for (i = 0; i < internal_buffer_count; i++)
- iris_fill_internal_buf_info(inst, internal_buf_type[i]);
+ if (inst->domain == DECODER) {
+ if (V4L2_TYPE_IS_OUTPUT(plane)) {
+ internal_buf_type = platform_data->dec_ip_int_buf_tbl;
+ internal_buffer_count = platform_data->dec_ip_int_buf_tbl_size;
+ for (i = 0; i < internal_buffer_count; i++)
+ iris_fill_internal_buf_info(inst, internal_buf_type[i]);
+ } else {
+ internal_buf_type = platform_data->dec_op_int_buf_tbl;
+ internal_buffer_count = platform_data->dec_op_int_buf_tbl_size;
+ for (i = 0; i < internal_buffer_count; i++)
+ iris_fill_internal_buf_info(inst, internal_buf_type[i]);
+ }
} else {
- internal_buf_type = platform_data->dec_op_int_buf_tbl;
- internal_buffer_count = platform_data->dec_op_int_buf_tbl_size;
- for (i = 0; i < internal_buffer_count; i++)
- iris_fill_internal_buf_info(inst, internal_buf_type[i]);
+ if (V4L2_TYPE_IS_OUTPUT(plane)) {
+ internal_buf_type = platform_data->enc_ip_int_buf_tbl;
+ internal_buffer_count = platform_data->enc_ip_int_buf_tbl_size;
+ for (i = 0; i < internal_buffer_count; i++)
+ iris_fill_internal_buf_info(inst, internal_buf_type[i]);
+ } else {
+ internal_buf_type = platform_data->enc_op_int_buf_tbl;
+ internal_buffer_count = platform_data->enc_op_int_buf_tbl_size;
+ for (i = 0; i < internal_buffer_count; i++)
+ iris_fill_internal_buf_info(inst, internal_buf_type[i]);
+ }
}
}
@@ -299,12 +358,22 @@ int iris_create_internal_buffers(struct iris_inst *inst, u32 plane)
const u32 *internal_buf_type;
int ret;
- if (V4L2_TYPE_IS_OUTPUT(plane)) {
- internal_buf_type = platform_data->dec_ip_int_buf_tbl;
- internal_buffer_count = platform_data->dec_ip_int_buf_tbl_size;
+ if (inst->domain == DECODER) {
+ if (V4L2_TYPE_IS_OUTPUT(plane)) {
+ internal_buf_type = platform_data->dec_ip_int_buf_tbl;
+ internal_buffer_count = platform_data->dec_ip_int_buf_tbl_size;
+ } else {
+ internal_buf_type = platform_data->dec_op_int_buf_tbl;
+ internal_buffer_count = platform_data->dec_op_int_buf_tbl_size;
+ }
} else {
- internal_buf_type = platform_data->dec_op_int_buf_tbl;
- internal_buffer_count = platform_data->dec_op_int_buf_tbl_size;
+ if (V4L2_TYPE_IS_OUTPUT(plane)) {
+ internal_buf_type = platform_data->enc_ip_int_buf_tbl;
+ internal_buffer_count = platform_data->enc_ip_int_buf_tbl_size;
+ } else {
+ internal_buf_type = platform_data->enc_op_int_buf_tbl;
+ internal_buffer_count = platform_data->enc_op_int_buf_tbl_size;
+ }
}
for (i = 0; i < internal_buffer_count; i++) {
@@ -334,6 +403,29 @@ int iris_queue_buffer(struct iris_inst *inst, struct iris_buffer *buf)
return 0;
}
+int iris_queue_internal_deferred_buffers(struct iris_inst *inst, enum iris_buffer_type buffer_type)
+{
+ struct iris_buffer *buffer, *next;
+ struct iris_buffers *buffers;
+ int ret = 0;
+
+ buffers = &inst->buffers[buffer_type];
+ list_for_each_entry_safe(buffer, next, &buffers->list, list) {
+ if (buffer->attr & BUF_ATTR_PENDING_RELEASE)
+ continue;
+ if (buffer->attr & BUF_ATTR_QUEUED)
+ continue;
+
+ if (buffer->attr & BUF_ATTR_DEFERRED) {
+ ret = iris_queue_buffer(inst, buffer);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
int iris_queue_internal_buffers(struct iris_inst *inst, u32 plane)
{
const struct iris_platform_data *platform_data = inst->core->iris_platform_data;
@@ -343,12 +435,22 @@ int iris_queue_internal_buffers(struct iris_inst *inst, u32 plane)
u32 internal_buffer_count, i;
int ret;
- if (V4L2_TYPE_IS_OUTPUT(plane)) {
- internal_buf_type = platform_data->dec_ip_int_buf_tbl;
- internal_buffer_count = platform_data->dec_ip_int_buf_tbl_size;
+ if (inst->domain == DECODER) {
+ if (V4L2_TYPE_IS_OUTPUT(plane)) {
+ internal_buf_type = platform_data->dec_ip_int_buf_tbl;
+ internal_buffer_count = platform_data->dec_ip_int_buf_tbl_size;
+ } else {
+ internal_buf_type = platform_data->dec_op_int_buf_tbl;
+ internal_buffer_count = platform_data->dec_op_int_buf_tbl_size;
+ }
} else {
- internal_buf_type = platform_data->dec_op_int_buf_tbl;
- internal_buffer_count = platform_data->dec_op_int_buf_tbl_size;
+ if (V4L2_TYPE_IS_OUTPUT(plane)) {
+ internal_buf_type = platform_data->enc_ip_int_buf_tbl;
+ internal_buffer_count = platform_data->enc_ip_int_buf_tbl_size;
+ } else {
+ internal_buf_type = platform_data->enc_op_int_buf_tbl;
+ internal_buffer_count = platform_data->enc_op_int_buf_tbl_size;
+ }
}
for (i = 0; i < internal_buffer_count; i++) {
@@ -358,6 +460,10 @@ int iris_queue_internal_buffers(struct iris_inst *inst, u32 plane)
continue;
if (buffer->attr & BUF_ATTR_QUEUED)
continue;
+ if (buffer->type == BUF_DPB && inst->state != IRIS_INST_STREAMING) {
+ buffer->attr |= BUF_ATTR_DEFERRED;
+ continue;
+ }
ret = iris_queue_buffer(inst, buffer);
if (ret)
return ret;
@@ -388,12 +494,22 @@ static int iris_destroy_internal_buffers(struct iris_inst *inst, u32 plane, bool
u32 i, len;
int ret;
- if (V4L2_TYPE_IS_OUTPUT(plane)) {
- internal_buf_type = platform_data->dec_ip_int_buf_tbl;
- len = platform_data->dec_ip_int_buf_tbl_size;
+ if (inst->domain == DECODER) {
+ if (V4L2_TYPE_IS_OUTPUT(plane)) {
+ internal_buf_type = platform_data->dec_ip_int_buf_tbl;
+ len = platform_data->dec_ip_int_buf_tbl_size;
+ } else {
+ internal_buf_type = platform_data->dec_op_int_buf_tbl;
+ len = platform_data->dec_op_int_buf_tbl_size;
+ }
} else {
- internal_buf_type = platform_data->dec_op_int_buf_tbl;
- len = platform_data->dec_op_int_buf_tbl_size;
+ if (V4L2_TYPE_IS_OUTPUT(plane)) {
+ internal_buf_type = platform_data->enc_ip_int_buf_tbl;
+ len = platform_data->enc_ip_int_buf_tbl_size;
+ } else {
+ internal_buf_type = platform_data->enc_op_int_buf_tbl;
+ len = platform_data->enc_op_int_buf_tbl_size;
+ }
}
for (i = 0; i < len; i++) {
@@ -413,6 +529,19 @@ static int iris_destroy_internal_buffers(struct iris_inst *inst, u32 plane, bool
}
}
+ if (force) {
+ if (inst->domain == DECODER)
+ buffers = &inst->buffers[BUF_PERSIST];
+ else
+ buffers = &inst->buffers[BUF_ARP];
+
+ list_for_each_entry_safe(buf, next, &buffers->list, list) {
+ ret = iris_destroy_internal_buffer(inst, buf);
+ if (ret)
+ return ret;
+ }
+ }
+
return 0;
}
@@ -455,8 +584,13 @@ static int iris_release_input_internal_buffers(struct iris_inst *inst)
u32 internal_buffer_count, i;
int ret;
- internal_buf_type = platform_data->dec_ip_int_buf_tbl;
- internal_buffer_count = platform_data->dec_ip_int_buf_tbl_size;
+ if (inst->domain == DECODER) {
+ internal_buf_type = platform_data->dec_ip_int_buf_tbl;
+ internal_buffer_count = platform_data->dec_ip_int_buf_tbl_size;
+ } else {
+ internal_buf_type = platform_data->enc_ip_int_buf_tbl;
+ internal_buffer_count = platform_data->enc_ip_int_buf_tbl_size;
+ }
for (i = 0; i < internal_buffer_count; i++) {
ret = iris_release_internal_buffers(inst, internal_buf_type[i]);
@@ -467,9 +601,9 @@ static int iris_release_input_internal_buffers(struct iris_inst *inst)
return 0;
}
-int iris_alloc_and_queue_persist_bufs(struct iris_inst *inst)
+int iris_alloc_and_queue_persist_bufs(struct iris_inst *inst, enum iris_buffer_type buffer_type)
{
- struct iris_buffers *buffers = &inst->buffers[BUF_PERSIST];
+ struct iris_buffers *buffers = &inst->buffers[buffer_type];
struct iris_buffer *buffer, *next;
int ret;
u32 i;
@@ -477,10 +611,10 @@ int iris_alloc_and_queue_persist_bufs(struct iris_inst *inst)
if (!list_empty(&buffers->list))
return 0;
- iris_fill_internal_buf_info(inst, BUF_PERSIST);
+ iris_fill_internal_buf_info(inst, buffer_type);
for (i = 0; i < buffers->min_count; i++) {
- ret = iris_create_internal_buffer(inst, BUF_PERSIST, i);
+ ret = iris_create_internal_buffer(inst, buffer_type, i);
if (ret)
return ret;
}
@@ -614,6 +748,8 @@ int iris_vb2_buffer_done(struct iris_inst *inst, struct iris_buffer *buf)
vb2 = &vbuf->vb2_buf;
+ vbuf->flags |= buf->flags;
+
if (buf->flags & V4L2_BUF_FLAG_ERROR) {
state = VB2_BUF_STATE_ERROR;
vb2_set_plane_payload(vb2, 0, 0);
@@ -622,8 +758,6 @@ int iris_vb2_buffer_done(struct iris_inst *inst, struct iris_buffer *buf)
return 0;
}
- vbuf->flags |= buf->flags;
-
if (V4L2_TYPE_IS_CAPTURE(type)) {
vb2_set_plane_payload(vb2, 0, buf->data_size);
vbuf->sequence = inst->sequence_cap++;
diff --git a/drivers/media/platform/qcom/iris/iris_buffer.h b/drivers/media/platform/qcom/iris/iris_buffer.h
index 00825ad2dc3a..325d30fce5c9 100644
--- a/drivers/media/platform/qcom/iris/iris_buffer.h
+++ b/drivers/media/platform/qcom/iris/iris_buffer.h
@@ -25,6 +25,8 @@ struct iris_inst;
* @BUF_DPB: buffer to store display picture buffers for reference
* @BUF_PERSIST: buffer to store session context data
* @BUF_SCRATCH_1: buffer to store decoding/encoding context data for HW
+ * @BUF_SCRATCH_2: buffer to store encoding context data for HW
+ * @BUF_VPSS: buffer to store VPSS context data for HW
* @BUF_TYPE_MAX: max buffer types
*/
enum iris_buffer_type {
@@ -38,6 +40,8 @@ enum iris_buffer_type {
BUF_DPB,
BUF_PERSIST,
BUF_SCRATCH_1,
+ BUF_SCRATCH_2,
+ BUF_VPSS,
BUF_TYPE_MAX,
};
@@ -105,10 +109,11 @@ int iris_get_buffer_size(struct iris_inst *inst, enum iris_buffer_type buffer_ty
void iris_get_internal_buffers(struct iris_inst *inst, u32 plane);
int iris_create_internal_buffers(struct iris_inst *inst, u32 plane);
int iris_queue_internal_buffers(struct iris_inst *inst, u32 plane);
+int iris_queue_internal_deferred_buffers(struct iris_inst *inst, enum iris_buffer_type buffer_type);
int iris_destroy_internal_buffer(struct iris_inst *inst, struct iris_buffer *buffer);
int iris_destroy_all_internal_buffers(struct iris_inst *inst, u32 plane);
int iris_destroy_dequeued_internal_buffers(struct iris_inst *inst, u32 plane);
-int iris_alloc_and_queue_persist_bufs(struct iris_inst *inst);
+int iris_alloc_and_queue_persist_bufs(struct iris_inst *inst, enum iris_buffer_type buf_type);
int iris_alloc_and_queue_input_int_bufs(struct iris_inst *inst);
int iris_queue_buffer(struct iris_inst *inst, struct iris_buffer *buf);
int iris_queue_deferred_buffers(struct iris_inst *inst, enum iris_buffer_type buf_type);
diff --git a/drivers/media/platform/qcom/iris/iris_common.c b/drivers/media/platform/qcom/iris/iris_common.c
new file mode 100644
index 000000000000..9fc663bdaf3f
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_common.c
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <media/v4l2-mem2mem.h>
+
+#include "iris_common.h"
+#include "iris_ctrls.h"
+#include "iris_instance.h"
+#include "iris_power.h"
+
+int iris_vb2_buffer_to_driver(struct vb2_buffer *vb2, struct iris_buffer *buf)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb2);
+
+ buf->type = iris_v4l2_type_to_driver(vb2->type);
+ buf->index = vb2->index;
+ buf->fd = vb2->planes[0].m.fd;
+ buf->buffer_size = vb2->planes[0].length;
+ buf->data_offset = vb2->planes[0].data_offset;
+ buf->data_size = vb2->planes[0].bytesused - vb2->planes[0].data_offset;
+ buf->flags = vbuf->flags;
+ buf->timestamp = vb2->timestamp;
+ buf->attr = 0;
+
+ return 0;
+}
+
+void iris_set_ts_metadata(struct iris_inst *inst, struct vb2_v4l2_buffer *vbuf)
+{
+ u32 mask = V4L2_BUF_FLAG_TIMECODE | V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ struct vb2_buffer *vb = &vbuf->vb2_buf;
+ u64 ts_us = vb->timestamp;
+
+ if (inst->metadata_idx >= ARRAY_SIZE(inst->tss))
+ inst->metadata_idx = 0;
+
+ do_div(ts_us, NSEC_PER_USEC);
+
+ inst->tss[inst->metadata_idx].flags = vbuf->flags & mask;
+ inst->tss[inst->metadata_idx].tc = vbuf->timecode;
+ inst->tss[inst->metadata_idx].ts_us = ts_us;
+ inst->tss[inst->metadata_idx].ts_ns = vb->timestamp;
+
+ inst->metadata_idx++;
+}
+
+int iris_process_streamon_input(struct iris_inst *inst)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ enum iris_inst_sub_state set_sub_state = 0;
+ int ret;
+
+ iris_scale_power(inst);
+
+ ret = hfi_ops->session_start(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (ret)
+ return ret;
+
+ if (inst->sub_state & IRIS_INST_SUB_INPUT_PAUSE) {
+ ret = iris_inst_change_sub_state(inst, IRIS_INST_SUB_INPUT_PAUSE, 0);
+ if (ret)
+ return ret;
+ }
+
+ if (inst->domain == DECODER &&
+ (inst->sub_state & IRIS_INST_SUB_DRC ||
+ inst->sub_state & IRIS_INST_SUB_DRAIN ||
+ inst->sub_state & IRIS_INST_SUB_FIRST_IPSC)) {
+ if (!(inst->sub_state & IRIS_INST_SUB_INPUT_PAUSE)) {
+ if (hfi_ops->session_pause) {
+ ret = hfi_ops->session_pause(inst,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (ret)
+ return ret;
+ }
+ set_sub_state = IRIS_INST_SUB_INPUT_PAUSE;
+ }
+ }
+
+ ret = iris_inst_state_change_streamon(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (ret)
+ return ret;
+
+ inst->last_buffer_dequeued = false;
+
+ return iris_inst_change_sub_state(inst, 0, set_sub_state);
+}
+
+int iris_process_streamon_output(struct iris_inst *inst)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ bool drain_active = false, drc_active = false;
+ enum iris_inst_sub_state clear_sub_state = 0;
+ int ret = 0;
+
+ iris_scale_power(inst);
+
+ drain_active = inst->sub_state & IRIS_INST_SUB_DRAIN &&
+ inst->sub_state & IRIS_INST_SUB_DRAIN_LAST;
+
+ drc_active = inst->sub_state & IRIS_INST_SUB_DRC &&
+ inst->sub_state & IRIS_INST_SUB_DRC_LAST;
+
+ if (drc_active)
+ clear_sub_state = IRIS_INST_SUB_DRC | IRIS_INST_SUB_DRC_LAST;
+ else if (drain_active)
+ clear_sub_state = IRIS_INST_SUB_DRAIN | IRIS_INST_SUB_DRAIN_LAST;
+
+ if (inst->domain == DECODER && inst->sub_state & IRIS_INST_SUB_INPUT_PAUSE) {
+ ret = iris_alloc_and_queue_input_int_bufs(inst);
+ if (ret)
+ return ret;
+ ret = iris_set_stage(inst, STAGE);
+ if (ret)
+ return ret;
+ ret = iris_set_pipe(inst, PIPE);
+ if (ret)
+ return ret;
+ }
+
+ if (inst->state == IRIS_INST_INPUT_STREAMING &&
+ inst->sub_state & IRIS_INST_SUB_INPUT_PAUSE) {
+ if (!drain_active)
+ ret = hfi_ops->session_resume_drc(inst,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ else if (hfi_ops->session_resume_drain)
+ ret = hfi_ops->session_resume_drain(inst,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (ret)
+ return ret;
+ clear_sub_state |= IRIS_INST_SUB_INPUT_PAUSE;
+ }
+
+ if (inst->sub_state & IRIS_INST_SUB_FIRST_IPSC)
+ clear_sub_state |= IRIS_INST_SUB_FIRST_IPSC;
+
+ ret = hfi_ops->session_start(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ if (ret)
+ return ret;
+
+ if (inst->sub_state & IRIS_INST_SUB_OUTPUT_PAUSE)
+ clear_sub_state |= IRIS_INST_SUB_OUTPUT_PAUSE;
+
+ ret = iris_inst_state_change_streamon(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ if (ret)
+ return ret;
+
+ inst->last_buffer_dequeued = false;
+
+ return iris_inst_change_sub_state(inst, clear_sub_state, 0);
+}
+
+static void iris_flush_deferred_buffers(struct iris_inst *inst,
+ enum iris_buffer_type type)
+{
+ struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
+ struct v4l2_m2m_buffer *buffer, *n;
+ struct iris_buffer *buf;
+
+ if (type == BUF_INPUT) {
+ v4l2_m2m_for_each_src_buf_safe(m2m_ctx, buffer, n) {
+ buf = to_iris_buffer(&buffer->vb);
+ if (buf->attr & BUF_ATTR_DEFERRED) {
+ if (!(buf->attr & BUF_ATTR_BUFFER_DONE)) {
+ buf->attr |= BUF_ATTR_BUFFER_DONE;
+ buf->data_size = 0;
+ iris_vb2_buffer_done(inst, buf);
+ }
+ }
+ }
+ } else {
+ v4l2_m2m_for_each_dst_buf_safe(m2m_ctx, buffer, n) {
+ buf = to_iris_buffer(&buffer->vb);
+ if (buf->attr & BUF_ATTR_DEFERRED) {
+ if (!(buf->attr & BUF_ATTR_BUFFER_DONE)) {
+ buf->attr |= BUF_ATTR_BUFFER_DONE;
+ buf->data_size = 0;
+ iris_vb2_buffer_done(inst, buf);
+ }
+ }
+ }
+ }
+}
+
+static void iris_kill_session(struct iris_inst *inst)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+
+ if (!inst->session_id)
+ return;
+
+ hfi_ops->session_close(inst);
+ iris_inst_change_state(inst, IRIS_INST_ERROR);
+}
+
+int iris_session_streamoff(struct iris_inst *inst, u32 plane)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ enum iris_buffer_type buffer_type;
+ int ret;
+
+ switch (plane) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ buffer_type = BUF_INPUT;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ buffer_type = BUF_OUTPUT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = hfi_ops->session_stop(inst, plane);
+ if (ret)
+ goto error;
+
+ ret = iris_inst_state_change_streamoff(inst, plane);
+ if (ret)
+ goto error;
+
+ iris_flush_deferred_buffers(inst, buffer_type);
+
+ return 0;
+
+error:
+ iris_kill_session(inst);
+ iris_flush_deferred_buffers(inst, buffer_type);
+
+ return ret;
+}
diff --git a/drivers/media/platform/qcom/iris/iris_common.h b/drivers/media/platform/qcom/iris/iris_common.h
new file mode 100644
index 000000000000..b2a27b781c9a
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_common.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __IRIS_COMMON_H__
+#define __IRIS_COMMON_H__
+
+struct iris_inst;
+struct iris_buffer;
+
+int iris_vb2_buffer_to_driver(struct vb2_buffer *vb2, struct iris_buffer *buf);
+void iris_set_ts_metadata(struct iris_inst *inst, struct vb2_v4l2_buffer *vbuf);
+int iris_process_streamon_input(struct iris_inst *inst);
+int iris_process_streamon_output(struct iris_inst *inst);
+int iris_session_streamoff(struct iris_inst *inst, u32 plane);
+
+#endif
diff --git a/drivers/media/platform/qcom/iris/iris_core.c b/drivers/media/platform/qcom/iris/iris_core.c
index 0fa0a3b549a2..8406c48d635b 100644
--- a/drivers/media/platform/qcom/iris/iris_core.c
+++ b/drivers/media/platform/qcom/iris/iris_core.c
@@ -15,10 +15,12 @@ void iris_core_deinit(struct iris_core *core)
pm_runtime_resume_and_get(core->dev);
mutex_lock(&core->lock);
- iris_fw_unload(core);
- iris_vpu_power_off(core);
- iris_hfi_queues_deinit(core);
- core->state = IRIS_CORE_DEINIT;
+ if (core->state != IRIS_CORE_DEINIT) {
+ iris_fw_unload(core);
+ iris_vpu_power_off(core);
+ iris_hfi_queues_deinit(core);
+ core->state = IRIS_CORE_DEINIT;
+ }
mutex_unlock(&core->lock);
pm_runtime_put_sync(core->dev);
diff --git a/drivers/media/platform/qcom/iris/iris_core.h b/drivers/media/platform/qcom/iris/iris_core.h
index aeeac32a1f6d..fb194c967ad4 100644
--- a/drivers/media/platform/qcom/iris/iris_core.h
+++ b/drivers/media/platform/qcom/iris/iris_core.h
@@ -25,6 +25,11 @@ struct icc_info {
#define IRIS_FW_VERSION_LENGTH 128
#define IFACEQ_CORE_PKT_SIZE (1024 * 4)
+enum domain_type {
+ ENCODER = BIT(0),
+ DECODER = BIT(1),
+};
+
/**
* struct iris_core - holds core parameters valid for all instances
*
@@ -33,8 +38,10 @@ struct icc_info {
* @irq: iris irq
* @v4l2_dev: a holder for v4l2 device structure
* @vdev_dec: iris video device structure for decoder
+ * @vdev_enc: iris video device structure for encoder
* @iris_v4l2_file_ops: iris v4l2 file ops
- * @iris_v4l2_ioctl_ops: iris v4l2 ioctl ops
+ * @iris_v4l2_ioctl_ops_dec: iris v4l2 ioctl ops for decoder
+ * @iris_v4l2_ioctl_ops_enc: iris v4l2 ioctl ops for encoder
* @iris_vb2_ops: iris vb2 ops
* @icc_tbl: table of iris interconnects
* @icc_count: count of iris interconnects
@@ -64,7 +71,8 @@ struct icc_info {
* @intr_status: interrupt status
* @sys_error_handler: a delayed work for handling system fatal error
* @instances: a list_head of all instances
- * @inst_fw_caps: an array of supported instance capabilities
+ * @inst_fw_caps_dec: an array of supported instance capabilities by decoder
+ * @inst_fw_caps_enc: an array of supported instance capabilities by encoder
*/
struct iris_core {
@@ -73,8 +81,10 @@ struct iris_core {
int irq;
struct v4l2_device v4l2_dev;
struct video_device *vdev_dec;
+ struct video_device *vdev_enc;
const struct v4l2_file_operations *iris_v4l2_file_ops;
- const struct v4l2_ioctl_ops *iris_v4l2_ioctl_ops;
+ const struct v4l2_ioctl_ops *iris_v4l2_ioctl_ops_dec;
+ const struct v4l2_ioctl_ops *iris_v4l2_ioctl_ops_enc;
const struct vb2_ops *iris_vb2_ops;
struct icc_bulk_data *icc_tbl;
u32 icc_count;
@@ -104,7 +114,9 @@ struct iris_core {
u32 intr_status;
struct delayed_work sys_error_handler;
struct list_head instances;
- struct platform_inst_fw_cap inst_fw_caps[INST_FW_CAP_MAX];
+ /* encoder and decoder have overlapping caps, so two different arrays are required */
+ struct platform_inst_fw_cap inst_fw_caps_dec[INST_FW_CAP_MAX];
+ struct platform_inst_fw_cap inst_fw_caps_enc[INST_FW_CAP_MAX];
};
int iris_core_init(struct iris_core *core);
diff --git a/drivers/media/platform/qcom/iris/iris_ctrls.c b/drivers/media/platform/qcom/iris/iris_ctrls.c
index 9136b723c0f2..754a5ad718bc 100644
--- a/drivers/media/platform/qcom/iris/iris_ctrls.c
+++ b/drivers/media/platform/qcom/iris/iris_ctrls.c
@@ -7,8 +7,13 @@
#include <media/v4l2-mem2mem.h>
#include "iris_ctrls.h"
+#include "iris_hfi_gen1_defines.h"
+#include "iris_hfi_gen2_defines.h"
#include "iris_instance.h"
+#define CABAC_MAX_BITRATE 160000000
+#define CAVLC_MAX_BITRATE 220000000
+
static inline bool iris_valid_cap_id(enum platform_inst_fw_cap_type cap_id)
{
return cap_id >= 1 && cap_id < INST_FW_CAP_MAX;
@@ -31,6 +36,68 @@ static enum platform_inst_fw_cap_type iris_get_cap_id(u32 id)
return LEVEL_VP9;
case V4L2_CID_MPEG_VIDEO_HEVC_TIER:
return TIER;
+ case V4L2_CID_MPEG_VIDEO_HEADER_MODE:
+ return HEADER_MODE;
+ case V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR:
+ return PREPEND_SPSPPS_TO_IDR;
+ case V4L2_CID_MPEG_VIDEO_BITRATE:
+ return BITRATE;
+ case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK:
+ return BITRATE_PEAK;
+ case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
+ return BITRATE_MODE;
+ case V4L2_CID_MPEG_VIDEO_FRAME_SKIP_MODE:
+ return FRAME_SKIP_MODE;
+ case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE:
+ return FRAME_RC_ENABLE;
+ case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
+ return GOP_SIZE;
+ case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
+ return ENTROPY_MODE;
+ case V4L2_CID_MPEG_VIDEO_H264_MIN_QP:
+ return MIN_FRAME_QP_H264;
+ case V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP:
+ return MIN_FRAME_QP_HEVC;
+ case V4L2_CID_MPEG_VIDEO_H264_MAX_QP:
+ return MAX_FRAME_QP_H264;
+ case V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP:
+ return MAX_FRAME_QP_HEVC;
+ case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MIN_QP:
+ return I_FRAME_MIN_QP_H264;
+ case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_MIN_QP:
+ return I_FRAME_MIN_QP_HEVC;
+ case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MIN_QP:
+ return P_FRAME_MIN_QP_H264;
+ case V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_MIN_QP:
+ return P_FRAME_MIN_QP_HEVC;
+ case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_MIN_QP:
+ return B_FRAME_MIN_QP_H264;
+ case V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_MIN_QP:
+ return B_FRAME_MIN_QP_HEVC;
+ case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MAX_QP:
+ return I_FRAME_MAX_QP_H264;
+ case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_MAX_QP:
+ return I_FRAME_MAX_QP_HEVC;
+ case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MAX_QP:
+ return P_FRAME_MAX_QP_H264;
+ case V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_MAX_QP:
+ return P_FRAME_MAX_QP_HEVC;
+ case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_MAX_QP:
+ return B_FRAME_MAX_QP_H264;
+ case V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_MAX_QP:
+ return B_FRAME_MAX_QP_HEVC;
+ case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP:
+ return I_FRAME_QP_H264;
+ case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP:
+ return I_FRAME_QP_HEVC;
+ case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP:
+ return P_FRAME_QP_H264;
+ case V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_QP:
+ return P_FRAME_QP_HEVC;
+ case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP:
+ return B_FRAME_QP_H264;
+ case V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP:
+ return B_FRAME_QP_HEVC;
default:
return INST_FW_CAP_MAX;
}
@@ -56,12 +123,74 @@ static u32 iris_get_v4l2_id(enum platform_inst_fw_cap_type cap_id)
return V4L2_CID_MPEG_VIDEO_VP9_LEVEL;
case TIER:
return V4L2_CID_MPEG_VIDEO_HEVC_TIER;
+ case HEADER_MODE:
+ return V4L2_CID_MPEG_VIDEO_HEADER_MODE;
+ case PREPEND_SPSPPS_TO_IDR:
+ return V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR;
+ case BITRATE:
+ return V4L2_CID_MPEG_VIDEO_BITRATE;
+ case BITRATE_PEAK:
+ return V4L2_CID_MPEG_VIDEO_BITRATE_PEAK;
+ case BITRATE_MODE:
+ return V4L2_CID_MPEG_VIDEO_BITRATE_MODE;
+ case FRAME_SKIP_MODE:
+ return V4L2_CID_MPEG_VIDEO_FRAME_SKIP_MODE;
+ case FRAME_RC_ENABLE:
+ return V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE;
+ case GOP_SIZE:
+ return V4L2_CID_MPEG_VIDEO_GOP_SIZE;
+ case ENTROPY_MODE:
+ return V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE;
+ case MIN_FRAME_QP_H264:
+ return V4L2_CID_MPEG_VIDEO_H264_MIN_QP;
+ case MIN_FRAME_QP_HEVC:
+ return V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP;
+ case MAX_FRAME_QP_H264:
+ return V4L2_CID_MPEG_VIDEO_H264_MAX_QP;
+ case MAX_FRAME_QP_HEVC:
+ return V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP;
+ case I_FRAME_MIN_QP_H264:
+ return V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MIN_QP;
+ case I_FRAME_MIN_QP_HEVC:
+ return V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_MIN_QP;
+ case P_FRAME_MIN_QP_H264:
+ return V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MIN_QP;
+ case P_FRAME_MIN_QP_HEVC:
+ return V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_MIN_QP;
+ case B_FRAME_MIN_QP_H264:
+ return V4L2_CID_MPEG_VIDEO_H264_B_FRAME_MIN_QP;
+ case B_FRAME_MIN_QP_HEVC:
+ return V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_MIN_QP;
+ case I_FRAME_MAX_QP_H264:
+ return V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MAX_QP;
+ case I_FRAME_MAX_QP_HEVC:
+ return V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_MAX_QP;
+ case P_FRAME_MAX_QP_H264:
+ return V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MAX_QP;
+ case P_FRAME_MAX_QP_HEVC:
+ return V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_MAX_QP;
+ case B_FRAME_MAX_QP_H264:
+ return V4L2_CID_MPEG_VIDEO_H264_B_FRAME_MAX_QP;
+ case B_FRAME_MAX_QP_HEVC:
+ return V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_MAX_QP;
+ case I_FRAME_QP_H264:
+ return V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP;
+ case I_FRAME_QP_HEVC:
+ return V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP;
+ case P_FRAME_QP_H264:
+ return V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP;
+ case P_FRAME_QP_HEVC:
+ return V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_QP;
+ case B_FRAME_QP_H264:
+ return V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP;
+ case B_FRAME_QP_HEVC:
+ return V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP;
default:
return 0;
}
}
-static int iris_vdec_op_s_ctrl(struct v4l2_ctrl *ctrl)
+static int iris_op_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct iris_inst *inst = container_of(ctrl->handler, struct iris_inst, ctrl_handler);
enum platform_inst_fw_cap_type cap_id;
@@ -82,11 +211,16 @@ static int iris_vdec_op_s_ctrl(struct v4l2_ctrl *ctrl)
inst->fw_caps[cap_id].value = ctrl->val;
+ if (vb2_is_streaming(q)) {
+ if (cap[cap_id].set)
+ cap[cap_id].set(inst, cap_id);
+ }
+
return 0;
}
static const struct v4l2_ctrl_ops iris_ctrl_ops = {
- .s_ctrl = iris_vdec_op_s_ctrl,
+ .s_ctrl = iris_op_s_ctrl,
};
int iris_ctrls_init(struct iris_inst *inst)
@@ -101,7 +235,10 @@ int iris_ctrls_init(struct iris_inst *inst)
num_ctrls++;
}
- /* Adding 1 to num_ctrls to include V4L2_CID_MIN_BUFFERS_FOR_CAPTURE */
+ /* Adding 1 to num_ctrls to include
+ * V4L2_CID_MIN_BUFFERS_FOR_CAPTURE for decoder and
+ * V4L2_CID_MIN_BUFFERS_FOR_OUTPUT for encoder
+ */
ret = v4l2_ctrl_handler_init(&inst->ctrl_handler, num_ctrls + 1);
if (ret)
@@ -143,8 +280,13 @@ int iris_ctrls_init(struct iris_inst *inst)
ctrl_idx++;
}
- v4l2_ctrl_new_std(&inst->ctrl_handler, NULL,
- V4L2_CID_MIN_BUFFERS_FOR_CAPTURE, 1, 32, 1, 4);
+ if (inst->domain == DECODER) {
+ v4l2_ctrl_new_std(&inst->ctrl_handler, NULL,
+ V4L2_CID_MIN_BUFFERS_FOR_CAPTURE, 1, 32, 1, 4);
+ } else {
+ v4l2_ctrl_new_std(&inst->ctrl_handler, NULL,
+ V4L2_CID_MIN_BUFFERS_FOR_OUTPUT, 1, 32, 1, 4);
+ }
ret = inst->ctrl_handler.error;
if (ret)
@@ -162,32 +304,57 @@ void iris_session_init_caps(struct iris_core *core)
struct platform_inst_fw_cap *caps;
u32 i, num_cap, cap_id;
- caps = core->iris_platform_data->inst_fw_caps;
- num_cap = core->iris_platform_data->inst_fw_caps_size;
+ caps = core->iris_platform_data->inst_fw_caps_dec;
+ num_cap = core->iris_platform_data->inst_fw_caps_dec_size;
+
+ for (i = 0; i < num_cap; i++) {
+ cap_id = caps[i].cap_id;
+ if (!iris_valid_cap_id(cap_id))
+ continue;
+
+ core->inst_fw_caps_dec[cap_id].cap_id = caps[i].cap_id;
+ core->inst_fw_caps_dec[cap_id].min = caps[i].min;
+ core->inst_fw_caps_dec[cap_id].max = caps[i].max;
+ core->inst_fw_caps_dec[cap_id].step_or_mask = caps[i].step_or_mask;
+ core->inst_fw_caps_dec[cap_id].value = caps[i].value;
+ core->inst_fw_caps_dec[cap_id].flags = caps[i].flags;
+ core->inst_fw_caps_dec[cap_id].hfi_id = caps[i].hfi_id;
+ core->inst_fw_caps_dec[cap_id].set = caps[i].set;
+ }
+
+ caps = core->iris_platform_data->inst_fw_caps_enc;
+ num_cap = core->iris_platform_data->inst_fw_caps_enc_size;
for (i = 0; i < num_cap; i++) {
cap_id = caps[i].cap_id;
if (!iris_valid_cap_id(cap_id))
continue;
- core->inst_fw_caps[cap_id].cap_id = caps[i].cap_id;
- core->inst_fw_caps[cap_id].min = caps[i].min;
- core->inst_fw_caps[cap_id].max = caps[i].max;
- core->inst_fw_caps[cap_id].step_or_mask = caps[i].step_or_mask;
- core->inst_fw_caps[cap_id].value = caps[i].value;
- core->inst_fw_caps[cap_id].flags = caps[i].flags;
- core->inst_fw_caps[cap_id].hfi_id = caps[i].hfi_id;
- core->inst_fw_caps[cap_id].set = caps[i].set;
+ core->inst_fw_caps_enc[cap_id].cap_id = caps[i].cap_id;
+ core->inst_fw_caps_enc[cap_id].min = caps[i].min;
+ core->inst_fw_caps_enc[cap_id].max = caps[i].max;
+ core->inst_fw_caps_enc[cap_id].step_or_mask = caps[i].step_or_mask;
+ core->inst_fw_caps_enc[cap_id].value = caps[i].value;
+ core->inst_fw_caps_enc[cap_id].flags = caps[i].flags;
+ core->inst_fw_caps_enc[cap_id].hfi_id = caps[i].hfi_id;
+ core->inst_fw_caps_enc[cap_id].set = caps[i].set;
}
}
static u32 iris_get_port_info(struct iris_inst *inst,
enum platform_inst_fw_cap_type cap_id)
{
- if (inst->fw_caps[cap_id].flags & CAP_FLAG_INPUT_PORT)
- return HFI_PORT_BITSTREAM;
- else if (inst->fw_caps[cap_id].flags & CAP_FLAG_OUTPUT_PORT)
- return HFI_PORT_RAW;
+ if (inst->domain == DECODER) {
+ if (inst->fw_caps[cap_id].flags & CAP_FLAG_INPUT_PORT)
+ return HFI_PORT_BITSTREAM;
+ else if (inst->fw_caps[cap_id].flags & CAP_FLAG_OUTPUT_PORT)
+ return HFI_PORT_RAW;
+ } else {
+ if (inst->fw_caps[cap_id].flags & CAP_FLAG_INPUT_PORT)
+ return HFI_PORT_RAW;
+ else if (inst->fw_caps[cap_id].flags & CAP_FLAG_OUTPUT_PORT)
+ return HFI_PORT_BITSTREAM;
+ }
return HFI_PORT_NONE;
}
@@ -227,8 +394,10 @@ int iris_set_stage(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id
u32 width = inp_f->fmt.pix_mp.width;
u32 work_mode = STAGE_2;
- if (iris_res_is_less_than(width, height, 1280, 720))
- work_mode = STAGE_1;
+ if (inst->domain == DECODER) {
+ if (iris_res_is_less_than(width, height, 1280, 720))
+ work_mode = STAGE_1;
+ }
return hfi_ops->session_set_property(inst, hfi_id,
HFI_HOST_FLAGS_NONE,
@@ -250,6 +419,470 @@ int iris_set_pipe(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id)
&work_route, sizeof(u32));
}
+int iris_set_profile(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ u32 hfi_id, hfi_value;
+
+ if (inst->codec == V4L2_PIX_FMT_H264) {
+ hfi_id = inst->fw_caps[PROFILE_H264].hfi_id;
+ hfi_value = inst->fw_caps[PROFILE_H264].value;
+ } else {
+ hfi_id = inst->fw_caps[PROFILE_HEVC].hfi_id;
+ hfi_value = inst->fw_caps[PROFILE_HEVC].value;
+ }
+
+ return hfi_ops->session_set_property(inst, hfi_id,
+ HFI_HOST_FLAGS_NONE,
+ iris_get_port_info(inst, cap_id),
+ HFI_PAYLOAD_U32_ENUM,
+ &hfi_value, sizeof(u32));
+}
+
+int iris_set_level(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ u32 hfi_id, hfi_value;
+
+ if (inst->codec == V4L2_PIX_FMT_H264) {
+ hfi_id = inst->fw_caps[LEVEL_H264].hfi_id;
+ hfi_value = inst->fw_caps[LEVEL_H264].value;
+ } else {
+ hfi_id = inst->fw_caps[LEVEL_HEVC].hfi_id;
+ hfi_value = inst->fw_caps[LEVEL_HEVC].value;
+ }
+
+ return hfi_ops->session_set_property(inst, hfi_id,
+ HFI_HOST_FLAGS_NONE,
+ iris_get_port_info(inst, cap_id),
+ HFI_PAYLOAD_U32_ENUM,
+ &hfi_value, sizeof(u32));
+}
+
+int iris_set_profile_level_gen1(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ u32 hfi_id = inst->fw_caps[cap_id].hfi_id;
+ struct hfi_profile_level pl;
+
+ if (inst->codec == V4L2_PIX_FMT_H264) {
+ pl.profile = inst->fw_caps[PROFILE_H264].value;
+ pl.level = inst->fw_caps[LEVEL_H264].value;
+ } else {
+ pl.profile = inst->fw_caps[PROFILE_HEVC].value;
+ pl.level = inst->fw_caps[LEVEL_HEVC].value;
+ }
+
+ return hfi_ops->session_set_property(inst, hfi_id,
+ HFI_HOST_FLAGS_NONE,
+ iris_get_port_info(inst, cap_id),
+ HFI_PAYLOAD_U32_ENUM,
+ &pl, sizeof(u32));
+}
+
+int iris_set_header_mode_gen1(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ u32 header_mode = inst->fw_caps[cap_id].value;
+ u32 hfi_id = inst->fw_caps[cap_id].hfi_id;
+ u32 hfi_val;
+
+ if (header_mode == V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE)
+ hfi_val = 0;
+ else
+ hfi_val = 1;
+
+ return hfi_ops->session_set_property(inst, hfi_id,
+ HFI_HOST_FLAGS_NONE,
+ iris_get_port_info(inst, cap_id),
+ HFI_PAYLOAD_U32,
+ &hfi_val, sizeof(u32));
+}
+
+int iris_set_header_mode_gen2(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ u32 prepend_sps_pps = inst->fw_caps[PREPEND_SPSPPS_TO_IDR].value;
+ u32 header_mode = inst->fw_caps[cap_id].value;
+ u32 hfi_id = inst->fw_caps[cap_id].hfi_id;
+ u32 hfi_val;
+
+ if (prepend_sps_pps)
+ hfi_val = HFI_SEQ_HEADER_PREFIX_WITH_SYNC_FRAME;
+ else if (header_mode == V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME)
+ hfi_val = HFI_SEQ_HEADER_JOINED_WITH_1ST_FRAME;
+ else
+ hfi_val = HFI_SEQ_HEADER_SEPERATE_FRAME;
+
+ return hfi_ops->session_set_property(inst, hfi_id,
+ HFI_HOST_FLAGS_NONE,
+ iris_get_port_info(inst, cap_id),
+ HFI_PAYLOAD_U32_ENUM,
+ &hfi_val, sizeof(u32));
+}
+
+int iris_set_bitrate(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ u32 entropy_mode = inst->fw_caps[ENTROPY_MODE].value;
+ u32 bitrate = inst->fw_caps[cap_id].value;
+ u32 hfi_id = inst->fw_caps[cap_id].hfi_id;
+ u32 max_bitrate;
+
+ if (inst->codec == V4L2_PIX_FMT_HEVC)
+ max_bitrate = CABAC_MAX_BITRATE;
+
+ if (entropy_mode == V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC)
+ max_bitrate = CABAC_MAX_BITRATE;
+ else
+ max_bitrate = CAVLC_MAX_BITRATE;
+
+ bitrate = min(bitrate, max_bitrate);
+
+ return hfi_ops->session_set_property(inst, hfi_id,
+ HFI_HOST_FLAGS_NONE,
+ iris_get_port_info(inst, cap_id),
+ HFI_PAYLOAD_U32,
+ &bitrate, sizeof(u32));
+}
+
+int iris_set_peak_bitrate(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ u32 rc_mode = inst->fw_caps[BITRATE_MODE].value;
+ u32 peak_bitrate = inst->fw_caps[cap_id].value;
+ u32 bitrate = inst->fw_caps[BITRATE].value;
+ u32 hfi_id = inst->fw_caps[cap_id].hfi_id;
+
+ if (rc_mode != V4L2_MPEG_VIDEO_BITRATE_MODE_CBR)
+ return 0;
+
+ if (inst->fw_caps[cap_id].flags & CAP_FLAG_CLIENT_SET) {
+ if (peak_bitrate < bitrate)
+ peak_bitrate = bitrate;
+ } else {
+ peak_bitrate = bitrate;
+ }
+
+ inst->fw_caps[cap_id].value = peak_bitrate;
+
+ return hfi_ops->session_set_property(inst, hfi_id,
+ HFI_HOST_FLAGS_NONE,
+ iris_get_port_info(inst, cap_id),
+ HFI_PAYLOAD_U32,
+ &peak_bitrate, sizeof(u32));
+}
+
+int iris_set_bitrate_mode_gen1(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ u32 bitrate_mode = inst->fw_caps[BITRATE_MODE].value;
+ u32 frame_rc = inst->fw_caps[FRAME_RC_ENABLE].value;
+ u32 frame_skip = inst->fw_caps[FRAME_SKIP_MODE].value;
+ u32 hfi_id = inst->fw_caps[cap_id].hfi_id;
+ u32 rc_mode = 0;
+
+ if (!frame_rc)
+ rc_mode = HFI_RATE_CONTROL_OFF;
+ else if (bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_VBR)
+ rc_mode = frame_skip ? HFI_RATE_CONTROL_VBR_VFR : HFI_RATE_CONTROL_VBR_CFR;
+ else if (bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR)
+ rc_mode = frame_skip ? HFI_RATE_CONTROL_CBR_VFR : HFI_RATE_CONTROL_CBR_CFR;
+ else if (bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CQ)
+ rc_mode = HFI_RATE_CONTROL_CQ;
+
+ inst->hfi_rc_type = rc_mode;
+
+ return hfi_ops->session_set_property(inst, hfi_id,
+ HFI_HOST_FLAGS_NONE,
+ iris_get_port_info(inst, cap_id),
+ HFI_PAYLOAD_U32_ENUM,
+ &rc_mode, sizeof(u32));
+}
+
+int iris_set_bitrate_mode_gen2(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ u32 bitrate_mode = inst->fw_caps[BITRATE_MODE].value;
+ u32 frame_rc = inst->fw_caps[FRAME_RC_ENABLE].value;
+ u32 frame_skip = inst->fw_caps[FRAME_SKIP_MODE].value;
+ u32 hfi_id = inst->fw_caps[cap_id].hfi_id;
+ u32 rc_mode = 0;
+
+ if (!frame_rc)
+ rc_mode = HFI_RC_OFF;
+ else if (bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_VBR)
+ rc_mode = HFI_RC_VBR_CFR;
+ else if (bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR)
+ rc_mode = frame_skip ? HFI_RC_CBR_VFR : HFI_RC_CBR_CFR;
+ else if (bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CQ)
+ rc_mode = HFI_RC_CQ;
+
+ inst->hfi_rc_type = rc_mode;
+
+ return hfi_ops->session_set_property(inst, hfi_id,
+ HFI_HOST_FLAGS_NONE,
+ iris_get_port_info(inst, cap_id),
+ HFI_PAYLOAD_U32_ENUM,
+ &rc_mode, sizeof(u32));
+}
+
+int iris_set_entropy_mode_gen1(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ u32 entropy_mode = inst->fw_caps[cap_id].value;
+ u32 hfi_id = inst->fw_caps[cap_id].hfi_id;
+ u32 hfi_val;
+
+ if (inst->codec != V4L2_PIX_FMT_H264)
+ return 0;
+
+ hfi_val = (entropy_mode == V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC) ?
+ HFI_H264_ENTROPY_CAVLC : HFI_H264_ENTROPY_CABAC;
+
+ return hfi_ops->session_set_property(inst, hfi_id,
+ HFI_HOST_FLAGS_NONE,
+ iris_get_port_info(inst, cap_id),
+ HFI_PAYLOAD_U32,
+ &hfi_val, sizeof(u32));
+}
+
+int iris_set_entropy_mode_gen2(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ u32 entropy_mode = inst->fw_caps[cap_id].value;
+ u32 hfi_id = inst->fw_caps[cap_id].hfi_id;
+ u32 profile;
+
+ if (inst->codec != V4L2_PIX_FMT_H264)
+ return 0;
+
+ profile = inst->fw_caps[PROFILE_H264].value;
+
+ if (profile == V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE ||
+ profile == V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE)
+ entropy_mode = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC;
+
+ inst->fw_caps[cap_id].value = entropy_mode;
+
+ return hfi_ops->session_set_property(inst, hfi_id,
+ HFI_HOST_FLAGS_NONE,
+ iris_get_port_info(inst, cap_id),
+ HFI_PAYLOAD_U32,
+ &entropy_mode, sizeof(u32));
+}
+
+int iris_set_min_qp(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ u32 i_qp_enable = 0, p_qp_enable = 0, b_qp_enable = 0;
+ u32 i_frame_qp = 0, p_frame_qp = 0, b_frame_qp = 0;
+ u32 min_qp_enable = 0, client_qp_enable = 0;
+ u32 hfi_id = inst->fw_caps[cap_id].hfi_id;
+ u32 hfi_val;
+
+ if (inst->codec == V4L2_PIX_FMT_H264) {
+ if (inst->fw_caps[MIN_FRAME_QP_H264].flags & CAP_FLAG_CLIENT_SET)
+ min_qp_enable = 1;
+ if (min_qp_enable ||
+ (inst->fw_caps[I_FRAME_MIN_QP_H264].flags & CAP_FLAG_CLIENT_SET))
+ i_qp_enable = 1;
+ if (min_qp_enable ||
+ (inst->fw_caps[P_FRAME_MIN_QP_H264].flags & CAP_FLAG_CLIENT_SET))
+ p_qp_enable = 1;
+ if (min_qp_enable ||
+ (inst->fw_caps[B_FRAME_MIN_QP_H264].flags & CAP_FLAG_CLIENT_SET))
+ b_qp_enable = 1;
+ } else {
+ if (inst->fw_caps[MIN_FRAME_QP_HEVC].flags & CAP_FLAG_CLIENT_SET)
+ min_qp_enable = 1;
+ if (min_qp_enable ||
+ (inst->fw_caps[I_FRAME_MIN_QP_HEVC].flags & CAP_FLAG_CLIENT_SET))
+ i_qp_enable = 1;
+ if (min_qp_enable ||
+ (inst->fw_caps[P_FRAME_MIN_QP_HEVC].flags & CAP_FLAG_CLIENT_SET))
+ p_qp_enable = 1;
+ if (min_qp_enable ||
+ (inst->fw_caps[B_FRAME_MIN_QP_HEVC].flags & CAP_FLAG_CLIENT_SET))
+ b_qp_enable = 1;
+ }
+
+ client_qp_enable = i_qp_enable | p_qp_enable << 1 | b_qp_enable << 2;
+ if (!client_qp_enable)
+ return 0;
+
+ if (inst->codec == V4L2_PIX_FMT_H264) {
+ i_frame_qp = max(inst->fw_caps[I_FRAME_MIN_QP_H264].value,
+ inst->fw_caps[MIN_FRAME_QP_H264].value);
+ p_frame_qp = max(inst->fw_caps[P_FRAME_MIN_QP_H264].value,
+ inst->fw_caps[MIN_FRAME_QP_H264].value);
+ b_frame_qp = max(inst->fw_caps[B_FRAME_MIN_QP_H264].value,
+ inst->fw_caps[MIN_FRAME_QP_H264].value);
+ } else {
+ i_frame_qp = max(inst->fw_caps[I_FRAME_MIN_QP_HEVC].value,
+ inst->fw_caps[MIN_FRAME_QP_HEVC].value);
+ p_frame_qp = max(inst->fw_caps[P_FRAME_MIN_QP_HEVC].value,
+ inst->fw_caps[MIN_FRAME_QP_HEVC].value);
+ b_frame_qp = max(inst->fw_caps[B_FRAME_MIN_QP_HEVC].value,
+ inst->fw_caps[MIN_FRAME_QP_HEVC].value);
+ }
+
+ hfi_val = i_frame_qp | p_frame_qp << 8 | b_frame_qp << 16 | client_qp_enable << 24;
+
+ return hfi_ops->session_set_property(inst, hfi_id,
+ HFI_HOST_FLAGS_NONE,
+ iris_get_port_info(inst, cap_id),
+ HFI_PAYLOAD_32_PACKED,
+ &hfi_val, sizeof(u32));
+}
+
+int iris_set_max_qp(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ u32 i_qp_enable = 0, p_qp_enable = 0, b_qp_enable = 0;
+ u32 max_qp_enable = 0, client_qp_enable;
+ u32 i_frame_qp, p_frame_qp, b_frame_qp;
+ u32 hfi_id = inst->fw_caps[cap_id].hfi_id;
+ u32 hfi_val;
+
+ if (inst->codec == V4L2_PIX_FMT_H264) {
+ if (inst->fw_caps[MAX_FRAME_QP_H264].flags & CAP_FLAG_CLIENT_SET)
+ max_qp_enable = 1;
+ if (max_qp_enable ||
+ (inst->fw_caps[I_FRAME_MAX_QP_H264].flags & CAP_FLAG_CLIENT_SET))
+ i_qp_enable = 1;
+ if (max_qp_enable ||
+ (inst->fw_caps[P_FRAME_MAX_QP_H264].flags & CAP_FLAG_CLIENT_SET))
+ p_qp_enable = 1;
+ if (max_qp_enable ||
+ (inst->fw_caps[B_FRAME_MAX_QP_H264].flags & CAP_FLAG_CLIENT_SET))
+ b_qp_enable = 1;
+ } else {
+ if (inst->fw_caps[MAX_FRAME_QP_HEVC].flags & CAP_FLAG_CLIENT_SET)
+ max_qp_enable = 1;
+ if (max_qp_enable ||
+ (inst->fw_caps[I_FRAME_MAX_QP_HEVC].flags & CAP_FLAG_CLIENT_SET))
+ i_qp_enable = 1;
+ if (max_qp_enable ||
+ (inst->fw_caps[P_FRAME_MAX_QP_HEVC].flags & CAP_FLAG_CLIENT_SET))
+ p_qp_enable = 1;
+ if (max_qp_enable ||
+ (inst->fw_caps[B_FRAME_MAX_QP_HEVC].flags & CAP_FLAG_CLIENT_SET))
+ b_qp_enable = 1;
+ }
+
+ client_qp_enable = i_qp_enable | p_qp_enable << 1 | b_qp_enable << 2;
+ if (!client_qp_enable)
+ return 0;
+
+ if (inst->codec == V4L2_PIX_FMT_H264) {
+ i_frame_qp = min(inst->fw_caps[I_FRAME_MAX_QP_H264].value,
+ inst->fw_caps[MAX_FRAME_QP_H264].value);
+ p_frame_qp = min(inst->fw_caps[P_FRAME_MAX_QP_H264].value,
+ inst->fw_caps[MAX_FRAME_QP_H264].value);
+ b_frame_qp = min(inst->fw_caps[B_FRAME_MAX_QP_H264].value,
+ inst->fw_caps[MAX_FRAME_QP_H264].value);
+ } else {
+ i_frame_qp = min(inst->fw_caps[I_FRAME_MAX_QP_HEVC].value,
+ inst->fw_caps[MAX_FRAME_QP_HEVC].value);
+ p_frame_qp = min(inst->fw_caps[P_FRAME_MAX_QP_HEVC].value,
+ inst->fw_caps[MAX_FRAME_QP_HEVC].value);
+ b_frame_qp = min(inst->fw_caps[B_FRAME_MAX_QP_HEVC].value,
+ inst->fw_caps[MAX_FRAME_QP_HEVC].value);
+ }
+
+ hfi_val = i_frame_qp | p_frame_qp << 8 | b_frame_qp << 16 |
+ client_qp_enable << 24;
+
+ return hfi_ops->session_set_property(inst, hfi_id,
+ HFI_HOST_FLAGS_NONE,
+ iris_get_port_info(inst, cap_id),
+ HFI_PAYLOAD_32_PACKED,
+ &hfi_val, sizeof(u32));
+}
+
+int iris_set_frame_qp(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ u32 i_qp_enable = 0, p_qp_enable = 0, b_qp_enable = 0, client_qp_enable;
+ u32 i_frame_qp, p_frame_qp, b_frame_qp;
+ u32 hfi_id = inst->fw_caps[cap_id].hfi_id;
+ struct vb2_queue *q;
+ u32 hfi_val;
+
+ q = v4l2_m2m_get_dst_vq(inst->m2m_ctx);
+ if (vb2_is_streaming(q)) {
+ if (inst->hfi_rc_type != HFI_RC_OFF)
+ return 0;
+ }
+
+ if (inst->hfi_rc_type == HFI_RC_OFF) {
+ i_qp_enable = 1;
+ p_qp_enable = 1;
+ b_qp_enable = 1;
+ } else {
+ if (inst->codec == V4L2_PIX_FMT_H264) {
+ if (inst->fw_caps[I_FRAME_QP_H264].flags & CAP_FLAG_CLIENT_SET)
+ i_qp_enable = 1;
+ if (inst->fw_caps[P_FRAME_QP_H264].flags & CAP_FLAG_CLIENT_SET)
+ p_qp_enable = 1;
+ if (inst->fw_caps[B_FRAME_QP_H264].flags & CAP_FLAG_CLIENT_SET)
+ b_qp_enable = 1;
+ } else {
+ if (inst->fw_caps[I_FRAME_QP_HEVC].flags & CAP_FLAG_CLIENT_SET)
+ i_qp_enable = 1;
+ if (inst->fw_caps[P_FRAME_QP_HEVC].flags & CAP_FLAG_CLIENT_SET)
+ p_qp_enable = 1;
+ if (inst->fw_caps[B_FRAME_QP_HEVC].flags & CAP_FLAG_CLIENT_SET)
+ b_qp_enable = 1;
+ }
+ }
+
+ client_qp_enable = i_qp_enable | p_qp_enable << 1 | b_qp_enable << 2;
+ if (!client_qp_enable)
+ return 0;
+
+ if (inst->codec == V4L2_PIX_FMT_H264) {
+ i_frame_qp = inst->fw_caps[I_FRAME_QP_H264].value;
+ p_frame_qp = inst->fw_caps[P_FRAME_QP_H264].value;
+ b_frame_qp = inst->fw_caps[B_FRAME_QP_H264].value;
+ } else {
+ i_frame_qp = inst->fw_caps[I_FRAME_QP_HEVC].value;
+ p_frame_qp = inst->fw_caps[P_FRAME_QP_HEVC].value;
+ b_frame_qp = inst->fw_caps[B_FRAME_QP_HEVC].value;
+ }
+
+ hfi_val = i_frame_qp | p_frame_qp << 8 | b_frame_qp << 16 |
+ client_qp_enable << 24;
+
+ return hfi_ops->session_set_property(inst, hfi_id,
+ HFI_HOST_FLAGS_NONE,
+ iris_get_port_info(inst, cap_id),
+ HFI_PAYLOAD_32_PACKED,
+ &hfi_val, sizeof(u32));
+}
+
+int iris_set_qp_range(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ struct hfi_quantization_range_v2 range;
+ u32 hfi_id = inst->fw_caps[cap_id].hfi_id;
+
+ if (inst->codec == V4L2_PIX_FMT_HEVC) {
+ range.min_qp.qp_packed = inst->fw_caps[MIN_FRAME_QP_HEVC].value;
+ range.max_qp.qp_packed = inst->fw_caps[MAX_FRAME_QP_HEVC].value;
+ } else {
+ range.min_qp.qp_packed = inst->fw_caps[MIN_FRAME_QP_H264].value;
+ range.max_qp.qp_packed = inst->fw_caps[MAX_FRAME_QP_H264].value;
+ }
+
+ return hfi_ops->session_set_property(inst, hfi_id,
+ HFI_HOST_FLAGS_NONE,
+ iris_get_port_info(inst, cap_id),
+ HFI_PAYLOAD_32_PACKED,
+ &range, sizeof(range));
+}
+
int iris_set_properties(struct iris_inst *inst, u32 plane)
{
const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
diff --git a/drivers/media/platform/qcom/iris/iris_ctrls.h b/drivers/media/platform/qcom/iris/iris_ctrls.h
index 9b5741868933..30af333cc494 100644
--- a/drivers/media/platform/qcom/iris/iris_ctrls.h
+++ b/drivers/media/platform/qcom/iris/iris_ctrls.h
@@ -17,6 +17,21 @@ int iris_set_u32_enum(struct iris_inst *inst, enum platform_inst_fw_cap_type cap
int iris_set_stage(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id);
int iris_set_pipe(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id);
int iris_set_u32(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id);
+int iris_set_profile(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id);
+int iris_set_level(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id);
+int iris_set_profile_level_gen1(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id);
+int iris_set_header_mode_gen1(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id);
+int iris_set_header_mode_gen2(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id);
+int iris_set_bitrate(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id);
+int iris_set_peak_bitrate(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id);
+int iris_set_bitrate_mode_gen1(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id);
+int iris_set_bitrate_mode_gen2(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id);
+int iris_set_entropy_mode_gen1(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id);
+int iris_set_entropy_mode_gen2(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id);
+int iris_set_min_qp(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id);
+int iris_set_max_qp(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id);
+int iris_set_frame_qp(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id);
+int iris_set_qp_range(struct iris_inst *inst, enum platform_inst_fw_cap_type cap_id);
int iris_set_properties(struct iris_inst *inst, u32 plane);
#endif
diff --git a/drivers/media/platform/qcom/iris/iris_firmware.c b/drivers/media/platform/qcom/iris/iris_firmware.c
index f1b5cd56db32..9ab499fad946 100644
--- a/drivers/media/platform/qcom/iris/iris_firmware.c
+++ b/drivers/media/platform/qcom/iris/iris_firmware.c
@@ -60,16 +60,7 @@ static int iris_load_fw_to_memory(struct iris_core *core, const char *fw_name)
ret = qcom_mdt_load(dev, firmware, fw_name,
pas_id, mem_virt, mem_phys, res_size, NULL);
- if (ret)
- goto err_mem_unmap;
-
- ret = qcom_scm_pas_auth_and_reset(pas_id);
- if (ret)
- goto err_mem_unmap;
-
- return ret;
-err_mem_unmap:
memunmap(mem_virt);
err_release_fw:
release_firmware(firmware);
@@ -94,6 +85,12 @@ int iris_fw_load(struct iris_core *core)
return -ENOMEM;
}
+ ret = qcom_scm_pas_auth_and_reset(core->iris_platform_data->pas_id);
+ if (ret) {
+ dev_err(core->dev, "auth and reset failed: %d\n", ret);
+ return ret;
+ }
+
ret = qcom_scm_mem_protect_video_var(cp_config->cp_start,
cp_config->cp_size,
cp_config->cp_nonpixel_start,
diff --git a/drivers/media/platform/qcom/iris/iris_hfi_common.h b/drivers/media/platform/qcom/iris/iris_hfi_common.h
index 9e6aadb83783..b51471fb32c7 100644
--- a/drivers/media/platform/qcom/iris/iris_hfi_common.h
+++ b/drivers/media/platform/qcom/iris/iris_hfi_common.h
@@ -102,7 +102,7 @@ enum hfi_matrix_coefficients {
struct iris_hfi_prop_type_handle {
u32 type;
- int (*handle)(struct iris_inst *inst);
+ int (*handle)(struct iris_inst *inst, u32 plane);
};
struct iris_hfi_command_ops {
diff --git a/drivers/media/platform/qcom/iris/iris_hfi_gen1_command.c b/drivers/media/platform/qcom/iris/iris_hfi_gen1_command.c
index 5fc30d54af4d..e1788c266bb1 100644
--- a/drivers/media/platform/qcom/iris/iris_hfi_gen1_command.c
+++ b/drivers/media/platform/qcom/iris/iris_hfi_gen1_command.c
@@ -21,6 +21,10 @@ static u32 iris_hfi_gen1_buf_type_from_driver(enum iris_buffer_type buffer_type)
return HFI_BUFFER_INTERNAL_SCRATCH;
case BUF_SCRATCH_1:
return HFI_BUFFER_INTERNAL_SCRATCH_1;
+ case BUF_SCRATCH_2:
+ return HFI_BUFFER_INTERNAL_SCRATCH_2;
+ case BUF_ARP:
+ return HFI_BUFFER_INTERNAL_PERSIST;
default:
return -EINVAL;
}
@@ -109,7 +113,12 @@ static int iris_hfi_gen1_session_open(struct iris_inst *inst)
packet.shdr.hdr.size = sizeof(struct hfi_session_open_pkt);
packet.shdr.hdr.pkt_type = HFI_CMD_SYS_SESSION_INIT;
packet.shdr.session_id = inst->session_id;
- packet.session_domain = HFI_SESSION_TYPE_DEC;
+
+ if (inst->domain == DECODER)
+ packet.session_domain = HFI_SESSION_TYPE_DEC;
+ else
+ packet.session_domain = HFI_SESSION_TYPE_ENC;
+
packet.session_codec = codec;
reinit_completion(&inst->completion);
@@ -184,47 +193,70 @@ static int iris_hfi_gen1_session_stop(struct iris_inst *inst, u32 plane)
u32 flush_type = 0;
int ret = 0;
- if ((V4L2_TYPE_IS_OUTPUT(plane) &&
- inst->state == IRIS_INST_INPUT_STREAMING) ||
- (V4L2_TYPE_IS_CAPTURE(plane) &&
- inst->state == IRIS_INST_OUTPUT_STREAMING) ||
- inst->state == IRIS_INST_ERROR) {
- reinit_completion(&inst->completion);
- iris_hfi_gen1_packet_session_cmd(inst, &pkt, HFI_CMD_SESSION_STOP);
- ret = iris_hfi_queue_cmd_write(core, &pkt, pkt.shdr.hdr.size);
- if (!ret)
- ret = iris_wait_for_session_response(inst, false);
-
- reinit_completion(&inst->completion);
- iris_hfi_gen1_packet_session_cmd(inst, &pkt, HFI_CMD_SESSION_RELEASE_RESOURCES);
- ret = iris_hfi_queue_cmd_write(core, &pkt, pkt.shdr.hdr.size);
- if (!ret)
- ret = iris_wait_for_session_response(inst, false);
-
- iris_inst_change_sub_state(inst, IRIS_INST_SUB_LOAD_RESOURCES, 0);
+ if (inst->domain == DECODER) {
+ if (inst->state == IRIS_INST_STREAMING) {
+ if (V4L2_TYPE_IS_OUTPUT(plane))
+ flush_type = HFI_FLUSH_ALL;
+ else if (V4L2_TYPE_IS_CAPTURE(plane))
+ flush_type = HFI_FLUSH_OUTPUT;
+
+ reinit_completion(&inst->flush_completion);
+
+ flush_pkt.shdr.hdr.size = sizeof(struct hfi_session_flush_pkt);
+ flush_pkt.shdr.hdr.pkt_type = HFI_CMD_SESSION_FLUSH;
+ flush_pkt.shdr.session_id = inst->session_id;
+ flush_pkt.flush_type = flush_type;
+
+ ret = iris_hfi_queue_cmd_write(core, &flush_pkt, flush_pkt.shdr.hdr.size);
+ if (!ret) {
+ inst->flush_responses_pending++;
+ ret = iris_wait_for_session_response(inst, true);
+ }
+ } else if (inst->sub_state & IRIS_INST_SUB_LOAD_RESOURCES) {
+ reinit_completion(&inst->completion);
+ iris_hfi_gen1_packet_session_cmd(inst, &pkt, HFI_CMD_SESSION_STOP);
+ ret = iris_hfi_queue_cmd_write(core, &pkt, pkt.shdr.hdr.size);
+ if (!ret)
+ ret = iris_wait_for_session_response(inst, false);
+
+ reinit_completion(&inst->completion);
+ iris_hfi_gen1_packet_session_cmd(inst, &pkt,
+ HFI_CMD_SESSION_RELEASE_RESOURCES);
+ ret = iris_hfi_queue_cmd_write(core, &pkt, pkt.shdr.hdr.size);
+ if (!ret)
+ ret = iris_wait_for_session_response(inst, false);
+
+ iris_inst_change_sub_state(inst, IRIS_INST_SUB_LOAD_RESOURCES, 0);
+
+ iris_helper_buffers_done(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
+ VB2_BUF_STATE_ERROR);
+ iris_helper_buffers_done(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
+ VB2_BUF_STATE_ERROR);
+ }
+ } else {
+ if (inst->state == IRIS_INST_STREAMING ||
+ inst->state == IRIS_INST_INPUT_STREAMING ||
+ inst->state == IRIS_INST_ERROR) {
+ reinit_completion(&inst->completion);
+ iris_hfi_gen1_packet_session_cmd(inst, &pkt, HFI_CMD_SESSION_STOP);
+ ret = iris_hfi_queue_cmd_write(core, &pkt, pkt.shdr.hdr.size);
+ if (!ret)
+ ret = iris_wait_for_session_response(inst, false);
+
+ reinit_completion(&inst->completion);
+ iris_hfi_gen1_packet_session_cmd(inst, &pkt,
+ HFI_CMD_SESSION_RELEASE_RESOURCES);
+ ret = iris_hfi_queue_cmd_write(core, &pkt, pkt.shdr.hdr.size);
+ if (!ret)
+ ret = iris_wait_for_session_response(inst, false);
+
+ iris_inst_change_sub_state(inst, IRIS_INST_SUB_LOAD_RESOURCES, 0);
+ }
iris_helper_buffers_done(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
VB2_BUF_STATE_ERROR);
iris_helper_buffers_done(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
VB2_BUF_STATE_ERROR);
- } else if (inst->state == IRIS_INST_STREAMING) {
- if (V4L2_TYPE_IS_OUTPUT(plane))
- flush_type = HFI_FLUSH_ALL;
- else if (V4L2_TYPE_IS_CAPTURE(plane))
- flush_type = HFI_FLUSH_OUTPUT;
-
- reinit_completion(&inst->flush_completion);
-
- flush_pkt.shdr.hdr.size = sizeof(struct hfi_session_flush_pkt);
- flush_pkt.shdr.hdr.pkt_type = HFI_CMD_SESSION_FLUSH;
- flush_pkt.shdr.session_id = inst->session_id;
- flush_pkt.flush_type = flush_type;
-
- ret = iris_hfi_queue_cmd_write(core, &flush_pkt, flush_pkt.shdr.hdr.size);
- if (!ret) {
- inst->flush_responses_pending++;
- ret = iris_wait_for_session_response(inst, true);
- }
}
return ret;
@@ -241,23 +273,44 @@ static int iris_hfi_gen1_session_continue(struct iris_inst *inst, u32 plane)
static int iris_hfi_gen1_queue_input_buffer(struct iris_inst *inst, struct iris_buffer *buf)
{
- struct hfi_session_empty_buffer_compressed_pkt ip_pkt;
-
- ip_pkt.shdr.hdr.size = sizeof(struct hfi_session_empty_buffer_compressed_pkt);
- ip_pkt.shdr.hdr.pkt_type = HFI_CMD_SESSION_EMPTY_BUFFER;
- ip_pkt.shdr.session_id = inst->session_id;
- ip_pkt.time_stamp_hi = upper_32_bits(buf->timestamp);
- ip_pkt.time_stamp_lo = lower_32_bits(buf->timestamp);
- ip_pkt.flags = buf->flags;
- ip_pkt.mark_target = 0;
- ip_pkt.mark_data = 0;
- ip_pkt.offset = buf->data_offset;
- ip_pkt.alloc_len = buf->buffer_size;
- ip_pkt.filled_len = buf->data_size;
- ip_pkt.input_tag = buf->index;
- ip_pkt.packet_buffer = buf->device_addr;
-
- return iris_hfi_queue_cmd_write(inst->core, &ip_pkt, ip_pkt.shdr.hdr.size);
+ struct hfi_session_empty_buffer_compressed_pkt com_ip_pkt;
+ struct hfi_session_empty_buffer_uncompressed_pkt uncom_ip_pkt;
+
+ if (inst->domain == DECODER) {
+ com_ip_pkt.shdr.hdr.size = sizeof(struct hfi_session_empty_buffer_compressed_pkt);
+ com_ip_pkt.shdr.hdr.pkt_type = HFI_CMD_SESSION_EMPTY_BUFFER;
+ com_ip_pkt.shdr.session_id = inst->session_id;
+ com_ip_pkt.time_stamp_hi = upper_32_bits(buf->timestamp);
+ com_ip_pkt.time_stamp_lo = lower_32_bits(buf->timestamp);
+ com_ip_pkt.flags = buf->flags;
+ com_ip_pkt.mark_target = 0;
+ com_ip_pkt.mark_data = 0;
+ com_ip_pkt.offset = buf->data_offset;
+ com_ip_pkt.alloc_len = buf->buffer_size;
+ com_ip_pkt.filled_len = buf->data_size;
+ com_ip_pkt.input_tag = buf->index;
+ com_ip_pkt.packet_buffer = buf->device_addr;
+ return iris_hfi_queue_cmd_write(inst->core, &com_ip_pkt,
+ com_ip_pkt.shdr.hdr.size);
+ } else {
+ uncom_ip_pkt.shdr.hdr.size =
+ sizeof(struct hfi_session_empty_buffer_uncompressed_pkt);
+ uncom_ip_pkt.shdr.hdr.pkt_type = HFI_CMD_SESSION_EMPTY_BUFFER;
+ uncom_ip_pkt.shdr.session_id = inst->session_id;
+ uncom_ip_pkt.time_stamp_hi = upper_32_bits(buf->timestamp);
+ uncom_ip_pkt.time_stamp_lo = lower_32_bits(buf->timestamp);
+ uncom_ip_pkt.view_id = 0;
+ uncom_ip_pkt.flags = buf->flags;
+ uncom_ip_pkt.mark_target = 0;
+ uncom_ip_pkt.mark_data = 0;
+ uncom_ip_pkt.offset = buf->data_offset;
+ uncom_ip_pkt.alloc_len = buf->buffer_size;
+ uncom_ip_pkt.filled_len = buf->data_size;
+ uncom_ip_pkt.input_tag = buf->index;
+ uncom_ip_pkt.packet_buffer = buf->device_addr;
+ return iris_hfi_queue_cmd_write(inst->core, &uncom_ip_pkt,
+ uncom_ip_pkt.shdr.hdr.size);
+ }
}
static int iris_hfi_gen1_queue_output_buffer(struct iris_inst *inst, struct iris_buffer *buf)
@@ -330,6 +383,8 @@ static int iris_hfi_gen1_session_queue_buffer(struct iris_inst *inst, struct iri
case BUF_PERSIST:
case BUF_BIN:
case BUF_SCRATCH_1:
+ case BUF_SCRATCH_2:
+ case BUF_ARP:
return iris_hfi_gen1_queue_internal_buffer(inst, buf);
default:
return -EINVAL;
@@ -395,16 +450,31 @@ exit:
static int iris_hfi_gen1_session_drain(struct iris_inst *inst, u32 plane)
{
- struct hfi_session_empty_buffer_compressed_pkt ip_pkt = {0};
+ if (inst->domain == DECODER) {
+ struct hfi_session_empty_buffer_compressed_pkt ip_pkt = {0};
+
+ ip_pkt.shdr.hdr.size = sizeof(struct hfi_session_empty_buffer_compressed_pkt);
+ ip_pkt.shdr.hdr.pkt_type = HFI_CMD_SESSION_EMPTY_BUFFER;
+ ip_pkt.shdr.session_id = inst->session_id;
+ ip_pkt.flags = HFI_BUFFERFLAG_EOS;
+ ip_pkt.packet_buffer = 0xdeadb000;
+
+ return iris_hfi_queue_cmd_write(inst->core, &ip_pkt, ip_pkt.shdr.hdr.size);
+ }
+
+ if (inst->domain == ENCODER) {
+ struct hfi_session_empty_buffer_uncompressed_pkt ip_pkt = {0};
- ip_pkt.shdr.hdr.size = sizeof(struct hfi_session_empty_buffer_compressed_pkt);
- ip_pkt.shdr.hdr.pkt_type = HFI_CMD_SESSION_EMPTY_BUFFER;
- ip_pkt.shdr.session_id = inst->session_id;
- ip_pkt.flags = HFI_BUFFERFLAG_EOS;
- if (inst->codec == V4L2_PIX_FMT_VP9)
+ ip_pkt.shdr.hdr.size = sizeof(struct hfi_session_empty_buffer_uncompressed_pkt);
+ ip_pkt.shdr.hdr.pkt_type = HFI_CMD_SESSION_EMPTY_BUFFER;
+ ip_pkt.shdr.session_id = inst->session_id;
+ ip_pkt.flags = HFI_BUFFERFLAG_EOS;
ip_pkt.packet_buffer = 0xdeadb000;
- return iris_hfi_queue_cmd_write(inst->core, &ip_pkt, ip_pkt.shdr.hdr.size);
+ return iris_hfi_queue_cmd_write(inst->core, &ip_pkt, ip_pkt.shdr.hdr.size);
+ }
+
+ return -EINVAL;
}
static int
@@ -507,6 +577,114 @@ iris_hfi_gen1_packet_session_set_property(struct hfi_session_set_property_pkt *p
packet->shdr.hdr.size += sizeof(u32) + sizeof(*wm);
break;
}
+ case HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT: {
+ struct hfi_profile_level *in = pdata, *pl = prop_data;
+
+ pl->level = in->level;
+ pl->profile = in->profile;
+ if (pl->profile <= 0)
+ /* Profile not supported, falling back to high */
+ pl->profile = V4L2_MPEG_VIDEO_H264_PROFILE_HIGH;
+
+ if (!pl->level)
+ /* Level not supported, falling back to 1 */
+ pl->level = 1;
+
+ packet->shdr.hdr.size += sizeof(u32) + sizeof(*pl);
+ break;
+ }
+ case HFI_PROPERTY_CONFIG_VENC_SYNC_FRAME_SEQUENCE_HEADER: {
+ struct hfi_enable *en = prop_data;
+ u32 *in = pdata;
+
+ en->enable = *in;
+ packet->shdr.hdr.size += sizeof(u32) + sizeof(*en);
+ break;
+ }
+ case HFI_PROPERTY_CONFIG_VENC_TARGET_BITRATE: {
+ struct hfi_bitrate *brate = prop_data;
+ u32 *in = pdata;
+
+ brate->bitrate = *in;
+ brate->layer_id = 0;
+ packet->shdr.hdr.size += sizeof(u32) + sizeof(*brate);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VENC_RATE_CONTROL: {
+ u32 *in = pdata;
+
+ switch (*in) {
+ case HFI_RATE_CONTROL_OFF:
+ case HFI_RATE_CONTROL_CBR_CFR:
+ case HFI_RATE_CONTROL_CBR_VFR:
+ case HFI_RATE_CONTROL_VBR_CFR:
+ case HFI_RATE_CONTROL_VBR_VFR:
+ case HFI_RATE_CONTROL_CQ:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ packet->data[1] = *in;
+ packet->shdr.hdr.size += sizeof(u32) * 2;
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VENC_H264_ENTROPY_CONTROL: {
+ struct hfi_h264_entropy_control *entropy = prop_data;
+ u32 *in = pdata;
+
+ entropy->entropy_mode = *in;
+ if (entropy->entropy_mode == HFI_H264_ENTROPY_CABAC)
+ entropy->cabac_model = HFI_H264_CABAC_MODEL_0;
+ packet->shdr.hdr.size += sizeof(u32) + sizeof(*entropy);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VENC_SESSION_QP_RANGE_V2: {
+ struct hfi_quantization_range_v2 *range = prop_data;
+ struct hfi_quantization_range_v2 *in = pdata;
+ u32 min_qp, max_qp;
+
+ min_qp = in->min_qp.qp_packed;
+ max_qp = in->max_qp.qp_packed;
+
+ /* We'll be packing in the qp, so make sure we
+ * won't be losing data when masking
+ */
+ if (min_qp > 0xff || max_qp > 0xff)
+ return -ERANGE;
+
+ range->min_qp.layer_id = 0xFF;
+ range->max_qp.layer_id = 0xFF;
+ range->min_qp.qp_packed = (min_qp & 0xFF) | ((min_qp & 0xFF) << 8) |
+ ((min_qp & 0xFF) << 16);
+ range->max_qp.qp_packed = (max_qp & 0xFF) | ((max_qp & 0xFF) << 8) |
+ ((max_qp & 0xFF) << 16);
+ range->min_qp.enable = 7;
+ range->max_qp.enable = 7;
+ packet->shdr.hdr.size += sizeof(u32) + sizeof(*range);
+ break;
+ }
+ case HFI_PROPERTY_CONFIG_FRAME_RATE: {
+ struct hfi_framerate *frate = prop_data;
+ struct hfi_framerate *in = pdata;
+
+ frate->buffer_type = in->buffer_type;
+ frate->framerate = in->framerate;
+ packet->shdr.hdr.size += sizeof(u32) + sizeof(*frate);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_INFO: {
+ struct hfi_uncompressed_plane_actual_info *plane_actual_info = prop_data;
+ struct hfi_uncompressed_plane_actual_info *in = pdata;
+
+ plane_actual_info->buffer_type = in->buffer_type;
+ plane_actual_info->num_planes = in->num_planes;
+ plane_actual_info->plane_format[0] = in->plane_format[0];
+ if (in->num_planes > 1)
+ plane_actual_info->plane_format[1] = in->plane_format[1];
+ packet->shdr.hdr.size += sizeof(u32) + sizeof(*plane_actual_info);
+ break;
+ }
default:
return -EINVAL;
}
@@ -549,7 +727,7 @@ static int iris_hfi_gen1_session_set_property(struct iris_inst *inst, u32 packet
return hfi_gen1_set_property(inst, packet_type, payload, payload_size);
}
-static int iris_hfi_gen1_set_resolution(struct iris_inst *inst)
+static int iris_hfi_gen1_set_resolution(struct iris_inst *inst, u32 plane)
{
u32 ptype = HFI_PROPERTY_PARAM_FRAME_SIZE;
struct hfi_framesize fs;
@@ -564,14 +742,18 @@ static int iris_hfi_gen1_set_resolution(struct iris_inst *inst)
if (ret)
return ret;
}
- fs.buffer_type = HFI_BUFFER_OUTPUT2;
+ if (inst->domain == DECODER)
+ fs.buffer_type = HFI_BUFFER_OUTPUT2;
+ else
+ fs.buffer_type = HFI_BUFFER_OUTPUT;
+
fs.width = inst->fmt_dst->fmt.pix_mp.width;
fs.height = inst->fmt_dst->fmt.pix_mp.height;
return hfi_gen1_set_property(inst, ptype, &fs, sizeof(fs));
}
-static int iris_hfi_gen1_decide_core(struct iris_inst *inst)
+static int iris_hfi_gen1_decide_core(struct iris_inst *inst, u32 plane)
{
const u32 ptype = HFI_PROPERTY_CONFIG_VIDEOCORES_USAGE;
struct hfi_videocores_usage_type cu;
@@ -581,36 +763,45 @@ static int iris_hfi_gen1_decide_core(struct iris_inst *inst)
return hfi_gen1_set_property(inst, ptype, &cu, sizeof(cu));
}
-static int iris_hfi_gen1_set_raw_format(struct iris_inst *inst)
+static int iris_hfi_gen1_set_raw_format(struct iris_inst *inst, u32 plane)
{
const u32 ptype = HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT;
- u32 pixelformat = inst->fmt_dst->fmt.pix_mp.pixelformat;
struct hfi_uncompressed_format_select fmt;
+ u32 pixelformat;
int ret;
- if (iris_split_mode_enabled(inst)) {
- fmt.buffer_type = HFI_BUFFER_OUTPUT;
- fmt.format = pixelformat == V4L2_PIX_FMT_NV12 ? HFI_COLOR_FORMAT_NV12_UBWC : 0;
+ if (inst->domain == DECODER) {
+ pixelformat = inst->fmt_dst->fmt.pix_mp.pixelformat;
+ if (iris_split_mode_enabled(inst)) {
+ fmt.buffer_type = HFI_BUFFER_OUTPUT;
+ fmt.format = pixelformat == V4L2_PIX_FMT_NV12 ?
+ HFI_COLOR_FORMAT_NV12_UBWC : 0;
- ret = hfi_gen1_set_property(inst, ptype, &fmt, sizeof(fmt));
- if (ret)
- return ret;
+ ret = hfi_gen1_set_property(inst, ptype, &fmt, sizeof(fmt));
+ if (ret)
+ return ret;
- fmt.buffer_type = HFI_BUFFER_OUTPUT2;
- fmt.format = pixelformat == V4L2_PIX_FMT_NV12 ? HFI_COLOR_FORMAT_NV12 : 0;
+ fmt.buffer_type = HFI_BUFFER_OUTPUT2;
+ fmt.format = pixelformat == V4L2_PIX_FMT_NV12 ? HFI_COLOR_FORMAT_NV12 : 0;
- ret = hfi_gen1_set_property(inst, ptype, &fmt, sizeof(fmt));
+ ret = hfi_gen1_set_property(inst, ptype, &fmt, sizeof(fmt));
+ } else {
+ fmt.buffer_type = HFI_BUFFER_OUTPUT;
+ fmt.format = pixelformat == V4L2_PIX_FMT_NV12 ? HFI_COLOR_FORMAT_NV12 : 0;
+
+ ret = hfi_gen1_set_property(inst, ptype, &fmt, sizeof(fmt));
+ }
} else {
- fmt.buffer_type = HFI_BUFFER_OUTPUT;
+ pixelformat = inst->fmt_src->fmt.pix_mp.pixelformat;
+ fmt.buffer_type = HFI_BUFFER_INPUT;
fmt.format = pixelformat == V4L2_PIX_FMT_NV12 ? HFI_COLOR_FORMAT_NV12 : 0;
-
ret = hfi_gen1_set_property(inst, ptype, &fmt, sizeof(fmt));
}
return ret;
}
-static int iris_hfi_gen1_set_format_constraints(struct iris_inst *inst)
+static int iris_hfi_gen1_set_format_constraints(struct iris_inst *inst, u32 plane)
{
const u32 ptype = HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_CONSTRAINTS_INFO;
struct hfi_uncompressed_plane_actual_constraints_info pconstraint;
@@ -630,7 +821,7 @@ static int iris_hfi_gen1_set_format_constraints(struct iris_inst *inst)
return hfi_gen1_set_property(inst, ptype, &pconstraint, sizeof(pconstraint));
}
-static int iris_hfi_gen1_set_num_bufs(struct iris_inst *inst)
+static int iris_hfi_gen1_set_num_bufs(struct iris_inst *inst, u32 plane)
{
u32 ptype = HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL;
struct hfi_buffer_count_actual buf_count;
@@ -644,20 +835,28 @@ static int iris_hfi_gen1_set_num_bufs(struct iris_inst *inst)
if (ret)
return ret;
- if (iris_split_mode_enabled(inst)) {
- buf_count.type = HFI_BUFFER_OUTPUT;
- buf_count.count_actual = VIDEO_MAX_FRAME;
- buf_count.count_min_host = VIDEO_MAX_FRAME;
+ if (inst->domain == DECODER) {
+ if (iris_split_mode_enabled(inst)) {
+ buf_count.type = HFI_BUFFER_OUTPUT;
+ buf_count.count_actual = VIDEO_MAX_FRAME;
+ buf_count.count_min_host = VIDEO_MAX_FRAME;
- ret = hfi_gen1_set_property(inst, ptype, &buf_count, sizeof(buf_count));
- if (ret)
- return ret;
+ ret = hfi_gen1_set_property(inst, ptype, &buf_count, sizeof(buf_count));
+ if (ret)
+ return ret;
- buf_count.type = HFI_BUFFER_OUTPUT2;
- buf_count.count_actual = iris_vpu_buf_count(inst, BUF_DPB);
- buf_count.count_min_host = iris_vpu_buf_count(inst, BUF_DPB);
+ buf_count.type = HFI_BUFFER_OUTPUT2;
+ buf_count.count_actual = iris_vpu_buf_count(inst, BUF_DPB);
+ buf_count.count_min_host = iris_vpu_buf_count(inst, BUF_DPB);
- ret = hfi_gen1_set_property(inst, ptype, &buf_count, sizeof(buf_count));
+ ret = hfi_gen1_set_property(inst, ptype, &buf_count, sizeof(buf_count));
+ } else {
+ buf_count.type = HFI_BUFFER_OUTPUT;
+ buf_count.count_actual = VIDEO_MAX_FRAME;
+ buf_count.count_min_host = VIDEO_MAX_FRAME;
+
+ ret = hfi_gen1_set_property(inst, ptype, &buf_count, sizeof(buf_count));
+ }
} else {
buf_count.type = HFI_BUFFER_OUTPUT;
buf_count.count_actual = VIDEO_MAX_FRAME;
@@ -669,7 +868,7 @@ static int iris_hfi_gen1_set_num_bufs(struct iris_inst *inst)
return ret;
}
-static int iris_hfi_gen1_set_multistream(struct iris_inst *inst)
+static int iris_hfi_gen1_set_multistream(struct iris_inst *inst, u32 plane)
{
u32 ptype = HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM;
struct hfi_multi_stream multi = {0};
@@ -704,7 +903,7 @@ static int iris_hfi_gen1_set_multistream(struct iris_inst *inst)
return ret;
}
-static int iris_hfi_gen1_set_bufsize(struct iris_inst *inst)
+static int iris_hfi_gen1_set_bufsize(struct iris_inst *inst, u32 plane)
{
const u32 ptype = HFI_PROPERTY_PARAM_BUFFER_SIZE_ACTUAL;
struct hfi_buffer_size_actual bufsz;
@@ -712,7 +911,7 @@ static int iris_hfi_gen1_set_bufsize(struct iris_inst *inst)
if (iris_split_mode_enabled(inst)) {
bufsz.type = HFI_BUFFER_OUTPUT;
- bufsz.size = iris_vpu_buf_size(inst, BUF_DPB);
+ bufsz.size = inst->core->iris_platform_data->get_vpu_buffer_size(inst, BUF_DPB);
ret = hfi_gen1_set_property(inst, ptype, &bufsz, sizeof(bufsz));
if (ret)
@@ -739,14 +938,49 @@ static int iris_hfi_gen1_set_bufsize(struct iris_inst *inst)
return ret;
}
+static int iris_hfi_gen1_set_frame_rate(struct iris_inst *inst, u32 plane)
+{
+ const u32 ptype = HFI_PROPERTY_CONFIG_FRAME_RATE;
+ struct hfi_framerate frate;
+
+ if (V4L2_TYPE_IS_OUTPUT(plane))
+ return 0;
+
+ frate.buffer_type = HFI_BUFFER_OUTPUT;
+ frate.framerate = inst->frame_rate << 16;
+
+ return hfi_gen1_set_property(inst, ptype, &frate, sizeof(frate));
+}
+
+static int iris_hfi_gen1_set_stride(struct iris_inst *inst, u32 plane)
+{
+ const u32 ptype = HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_INFO;
+ struct hfi_uncompressed_plane_actual_info plane_actual_info;
+
+ plane_actual_info.buffer_type = HFI_BUFFER_INPUT;
+ plane_actual_info.num_planes = 2;
+ plane_actual_info.plane_format[0].actual_stride =
+ ALIGN(inst->fmt_src->fmt.pix_mp.width, 128);
+ plane_actual_info.plane_format[0].actual_plane_buffer_height =
+ ALIGN(inst->fmt_src->fmt.pix_mp.height, 32);
+ plane_actual_info.plane_format[1].actual_stride =
+ ALIGN(inst->fmt_src->fmt.pix_mp.width, 128);
+ plane_actual_info.plane_format[1].actual_plane_buffer_height =
+ (ALIGN(inst->fmt_src->fmt.pix_mp.height, 32)) / 2;
+
+ return hfi_gen1_set_property(inst, ptype, &plane_actual_info, sizeof(plane_actual_info));
+}
+
static int iris_hfi_gen1_session_set_config_params(struct iris_inst *inst, u32 plane)
{
+ struct iris_hfi_prop_type_handle const *handler = NULL;
+ u32 handler_size = 0;
struct iris_core *core = inst->core;
u32 config_params_size, i, j;
const u32 *config_params;
int ret;
- static const struct iris_hfi_prop_type_handle prop_type_handle_inp_arr[] = {
+ static const struct iris_hfi_prop_type_handle vdec_prop_type_handle_inp_arr[] = {
{HFI_PROPERTY_PARAM_FRAME_SIZE,
iris_hfi_gen1_set_resolution},
{HFI_PROPERTY_CONFIG_VIDEOCORES_USAGE,
@@ -763,7 +997,7 @@ static int iris_hfi_gen1_session_set_config_params(struct iris_inst *inst, u32 p
iris_hfi_gen1_set_bufsize},
};
- static const struct iris_hfi_prop_type_handle prop_type_handle_out_arr[] = {
+ static const struct iris_hfi_prop_type_handle vdec_prop_type_handle_out_arr[] = {
{HFI_PROPERTY_PARAM_FRAME_SIZE,
iris_hfi_gen1_set_resolution},
{HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT,
@@ -778,29 +1012,43 @@ static int iris_hfi_gen1_session_set_config_params(struct iris_inst *inst, u32 p
iris_hfi_gen1_set_bufsize},
};
- config_params = core->iris_platform_data->input_config_params_default;
- config_params_size = core->iris_platform_data->input_config_params_default_size;
-
- if (V4L2_TYPE_IS_OUTPUT(plane)) {
- for (i = 0; i < config_params_size; i++) {
- for (j = 0; j < ARRAY_SIZE(prop_type_handle_inp_arr); j++) {
- if (prop_type_handle_inp_arr[j].type == config_params[i]) {
- ret = prop_type_handle_inp_arr[j].handle(inst);
- if (ret)
- return ret;
- break;
- }
- }
+ static const struct iris_hfi_prop_type_handle venc_prop_type_handle_inp_arr[] = {
+ {HFI_PROPERTY_CONFIG_FRAME_RATE,
+ iris_hfi_gen1_set_frame_rate},
+ {HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_INFO,
+ iris_hfi_gen1_set_stride},
+ {HFI_PROPERTY_PARAM_FRAME_SIZE,
+ iris_hfi_gen1_set_resolution},
+ {HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT,
+ iris_hfi_gen1_set_raw_format},
+ {HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL,
+ iris_hfi_gen1_set_num_bufs},
+ };
+
+ if (inst->domain == DECODER) {
+ config_params = core->iris_platform_data->dec_input_config_params_default;
+ config_params_size = core->iris_platform_data->dec_input_config_params_default_size;
+ if (V4L2_TYPE_IS_OUTPUT(plane)) {
+ handler = vdec_prop_type_handle_inp_arr;
+ handler_size = ARRAY_SIZE(vdec_prop_type_handle_inp_arr);
+ } else if (V4L2_TYPE_IS_CAPTURE(plane)) {
+ handler = vdec_prop_type_handle_out_arr;
+ handler_size = ARRAY_SIZE(vdec_prop_type_handle_out_arr);
}
- } else if (V4L2_TYPE_IS_CAPTURE(plane)) {
- for (i = 0; i < config_params_size; i++) {
- for (j = 0; j < ARRAY_SIZE(prop_type_handle_out_arr); j++) {
- if (prop_type_handle_out_arr[j].type == config_params[i]) {
- ret = prop_type_handle_out_arr[j].handle(inst);
- if (ret)
- return ret;
- break;
- }
+ } else {
+ config_params = core->iris_platform_data->enc_input_config_params;
+ config_params_size = core->iris_platform_data->enc_input_config_params_size;
+ handler = venc_prop_type_handle_inp_arr;
+ handler_size = ARRAY_SIZE(venc_prop_type_handle_inp_arr);
+ }
+
+ for (i = 0; i < config_params_size; i++) {
+ for (j = 0; j < handler_size; j++) {
+ if (handler[j].type == config_params[i]) {
+ ret = handler[j].handle(inst, plane);
+ if (ret)
+ return ret;
+ break;
}
}
}
diff --git a/drivers/media/platform/qcom/iris/iris_hfi_gen1_defines.h b/drivers/media/platform/qcom/iris/iris_hfi_gen1_defines.h
index d4d119ca98b0..42226ccee3d9 100644
--- a/drivers/media/platform/qcom/iris/iris_hfi_gen1_defines.h
+++ b/drivers/media/platform/qcom/iris/iris_hfi_gen1_defines.h
@@ -10,6 +10,7 @@
#define HFI_VIDEO_ARCH_OX 0x1
+#define HFI_SESSION_TYPE_ENC 1
#define HFI_SESSION_TYPE_DEC 2
#define HFI_VIDEO_CODEC_H264 0x00000002
@@ -73,18 +74,22 @@
#define HFI_BUFFER_INPUT 0x1
#define HFI_BUFFER_OUTPUT 0x2
#define HFI_BUFFER_OUTPUT2 0x3
+#define HFI_BUFFER_INTERNAL_PERSIST 0x4
#define HFI_BUFFER_INTERNAL_PERSIST_1 0x5
#define HFI_BUFFER_INTERNAL_SCRATCH 0x6
#define HFI_BUFFER_INTERNAL_SCRATCH_1 0x7
+#define HFI_BUFFER_INTERNAL_SCRATCH_2 0x8
#define HFI_PROPERTY_SYS_CODEC_POWER_PLANE_CTRL 0x5
#define HFI_PROPERTY_SYS_IMAGE_VERSION 0x6
#define HFI_PROPERTY_PARAM_FRAME_SIZE 0x1001
+#define HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_INFO 0x1002
#define HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT 0x1003
#define HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT 0x1005
#define HFI_PROPERTY_PARAM_WORK_MODE 0x1015
#define HFI_PROPERTY_PARAM_WORK_ROUTE 0x1017
+#define HFI_PROPERTY_CONFIG_FRAME_RATE 0x2001
#define HFI_PROPERTY_CONFIG_VIDEOCORES_USAGE 0x2002
#define HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM 0x1003001
@@ -111,15 +116,32 @@
#define HFI_MSG_SESSION_RELEASE_RESOURCES 0x22100a
#define HFI_MSG_SESSION_RELEASE_BUFFERS 0x22100c
-#define HFI_PICTURE_I 0x00000001
-#define HFI_PICTURE_P 0x00000002
-#define HFI_PICTURE_B 0x00000004
-#define HFI_PICTURE_IDR 0x00000008
+#define HFI_GEN1_PICTURE_I 0x00000001
+#define HFI_GEN1_PICTURE_P 0x00000002
+#define HFI_GEN1_PICTURE_B 0x00000004
+#define HFI_GEN1_PICTURE_IDR 0x00000008
#define HFI_FRAME_NOTCODED 0x7f002000
#define HFI_FRAME_YUV 0x7f004000
#define HFI_UNUSED_PICT 0x10000000
-#define HFI_BUFFERFLAG_DATACORRUPT 0x00000008
-#define HFI_BUFFERFLAG_DROP_FRAME 0x20000000
+#define HFI_BUFFERFLAG_DATACORRUPT 0x00000008
+#define HFI_BUFFERFLAG_DROP_FRAME 0x20000000
+#define HFI_RATE_CONTROL_OFF 0x1000001
+#define HFI_RATE_CONTROL_VBR_VFR 0x1000002
+#define HFI_RATE_CONTROL_VBR_CFR 0x1000003
+#define HFI_RATE_CONTROL_CBR_VFR 0x1000004
+#define HFI_RATE_CONTROL_CBR_CFR 0x1000005
+#define HFI_RATE_CONTROL_CQ 0x1000008
+
+#define HFI_H264_ENTROPY_CAVLC 0x1
+#define HFI_H264_ENTROPY_CABAC 0x2
+
+#define HFI_PROPERTY_PARAM_VENC_H264_ENTROPY_CONTROL 0x2005002
+#define HFI_PROPERTY_PARAM_VENC_H264_DEBLOCK_CONTROL 0x2005003
+#define HFI_PROPERTY_PARAM_VENC_RATE_CONTROL 0x2005004
+#define HFI_PROPERTY_PARAM_VENC_SESSION_QP_RANGE_V2 0x2005009
+#define HFI_PROPERTY_PARAM_VENC_MAX_NUM_B_FRAMES 0x2005020
+#define HFI_PROPERTY_CONFIG_VENC_TARGET_BITRATE 0x2006001
+#define HFI_PROPERTY_CONFIG_VENC_SYNC_FRAME_SEQUENCE_HEADER 0x2006008
struct hfi_pkt_hdr {
u32 size;
@@ -194,6 +216,23 @@ struct hfi_session_empty_buffer_compressed_pkt {
u32 data;
};
+struct hfi_session_empty_buffer_uncompressed_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 view_id;
+ u32 time_stamp_hi;
+ u32 time_stamp_lo;
+ u32 flags;
+ u32 mark_target;
+ u32 mark_data;
+ u32 alloc_len;
+ u32 filled_len;
+ u32 offset;
+ u32 input_tag;
+ u32 packet_buffer;
+ u32 extradata_buffer;
+ u32 data;
+};
+
struct hfi_session_fill_buffer_pkt {
struct hfi_session_hdr_pkt shdr;
u32 stream_id;
@@ -340,6 +379,17 @@ struct hfi_uncompressed_plane_actual_constraints_info {
struct hfi_uncompressed_plane_constraints plane_format[2];
};
+struct hfi_uncompressed_plane_actual {
+ int actual_stride;
+ u32 actual_plane_buffer_height;
+};
+
+struct hfi_uncompressed_plane_actual_info {
+ u32 buffer_type;
+ u32 num_planes;
+ struct hfi_uncompressed_plane_actual plane_format[2];
+};
+
struct hfi_buffer_count_actual {
u32 type;
u32 count_actual;
@@ -367,6 +417,36 @@ struct hfi_buffer_requirements {
u32 alignment;
};
+struct hfi_bitrate {
+ u32 bitrate;
+ u32 layer_id;
+};
+
+#define HFI_H264_CABAC_MODEL_0 0x1
+
+struct hfi_h264_entropy_control {
+ u32 entropy_mode;
+ u32 cabac_model;
+};
+
+struct hfi_quantization_v2 {
+ u32 qp_packed;
+ u32 layer_id;
+ u32 enable;
+ u32 reserved[3];
+};
+
+struct hfi_quantization_range_v2 {
+ struct hfi_quantization_v2 min_qp;
+ struct hfi_quantization_v2 max_qp;
+ u32 reserved[4];
+};
+
+struct hfi_framerate {
+ u32 buffer_type;
+ u32 framerate;
+};
+
struct hfi_event_data {
u32 error;
u32 height;
@@ -398,6 +478,26 @@ struct hfi_msg_session_empty_buffer_done_pkt {
u32 data[];
};
+struct hfi_msg_session_fbd_compressed_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 time_stamp_hi;
+ u32 time_stamp_lo;
+ u32 error_type;
+ u32 flags;
+ u32 mark_target;
+ u32 mark_data;
+ u32 stats;
+ u32 offset;
+ u32 alloc_len;
+ u32 filled_len;
+ u32 input_tag;
+ u32 output_tag;
+ u32 picture_type;
+ u32 packet_buffer;
+ u32 extradata_buffer;
+ u32 data[];
+};
+
struct hfi_msg_session_fbd_uncompressed_plane0_pkt {
struct hfi_session_hdr_pkt shdr;
u32 stream_id;
diff --git a/drivers/media/platform/qcom/iris/iris_hfi_gen1_response.c b/drivers/media/platform/qcom/iris/iris_hfi_gen1_response.c
index 8d1ce8a19a45..8e864c239e29 100644
--- a/drivers/media/platform/qcom/iris/iris_hfi_gen1_response.c
+++ b/drivers/media/platform/qcom/iris/iris_hfi_gen1_response.c
@@ -387,24 +387,43 @@ error:
static void iris_hfi_gen1_session_ftb_done(struct iris_inst *inst, void *packet)
{
- struct hfi_msg_session_fbd_uncompressed_plane0_pkt *pkt = packet;
+ struct hfi_msg_session_fbd_uncompressed_plane0_pkt *uncom_pkt = packet;
+ struct hfi_msg_session_fbd_compressed_pkt *com_pkt = packet;
struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
struct v4l2_m2m_buffer *m2m_buffer, *n;
struct hfi_session_flush_pkt flush_pkt;
- u32 timestamp_hi = pkt->time_stamp_hi;
- u32 timestamp_lo = pkt->time_stamp_lo;
+ u32 timestamp_hi;
+ u32 timestamp_lo;
struct iris_core *core = inst->core;
- u32 filled_len = pkt->filled_len;
- u32 pic_type = pkt->picture_type;
- u32 output_tag = pkt->output_tag;
+ u32 filled_len;
+ u32 pic_type;
+ u32 output_tag;
struct iris_buffer *buf, *iter;
struct iris_buffers *buffers;
- u32 hfi_flags = pkt->flags;
- u32 offset = pkt->offset;
+ u32 hfi_flags;
+ u32 offset;
u64 timestamp_us = 0;
bool found = false;
u32 flags = 0;
+ if (inst->domain == DECODER) {
+ timestamp_hi = uncom_pkt->time_stamp_hi;
+ timestamp_lo = uncom_pkt->time_stamp_lo;
+ filled_len = uncom_pkt->filled_len;
+ pic_type = uncom_pkt->picture_type;
+ output_tag = uncom_pkt->output_tag;
+ hfi_flags = uncom_pkt->flags;
+ offset = uncom_pkt->offset;
+ } else {
+ timestamp_hi = com_pkt->time_stamp_hi;
+ timestamp_lo = com_pkt->time_stamp_lo;
+ filled_len = com_pkt->filled_len;
+ pic_type = com_pkt->picture_type;
+ output_tag = com_pkt->output_tag;
+ hfi_flags = com_pkt->flags;
+ offset = com_pkt->offset;
+ }
+
if ((hfi_flags & HFI_BUFFERFLAG_EOS) && !filled_len) {
reinit_completion(&inst->flush_completion);
@@ -416,11 +435,10 @@ static void iris_hfi_gen1_session_ftb_done(struct iris_inst *inst, void *packet)
inst->flush_responses_pending++;
iris_inst_sub_state_change_drain_last(inst);
-
- return;
}
- if (iris_split_mode_enabled(inst) && pkt->stream_id == 0) {
+ if (iris_split_mode_enabled(inst) && inst->domain == DECODER &&
+ uncom_pkt->stream_id == 0) {
buffers = &inst->buffers[BUF_DPB];
if (!buffers)
goto error;
@@ -461,8 +479,14 @@ static void iris_hfi_gen1_session_ftb_done(struct iris_inst *inst, void *packet)
timestamp_us = timestamp_hi;
timestamp_us = (timestamp_us << 32) | timestamp_lo;
} else {
- if (pkt->stream_id == 1 && !inst->last_buffer_dequeued) {
- if (iris_drc_pending(inst)) {
+ if (inst->domain == DECODER && uncom_pkt->stream_id == 1 &&
+ !inst->last_buffer_dequeued) {
+ if (iris_drc_pending(inst) || iris_drain_pending(inst)) {
+ flags |= V4L2_BUF_FLAG_LAST;
+ inst->last_buffer_dequeued = true;
+ }
+ } else if (inst->domain == ENCODER) {
+ if (!inst->last_buffer_dequeued && iris_drain_pending(inst)) {
flags |= V4L2_BUF_FLAG_LAST;
inst->last_buffer_dequeued = true;
}
@@ -471,14 +495,14 @@ static void iris_hfi_gen1_session_ftb_done(struct iris_inst *inst, void *packet)
buf->timestamp = timestamp_us;
switch (pic_type) {
- case HFI_PICTURE_IDR:
- case HFI_PICTURE_I:
+ case HFI_GEN1_PICTURE_IDR:
+ case HFI_GEN1_PICTURE_I:
flags |= V4L2_BUF_FLAG_KEYFRAME;
break;
- case HFI_PICTURE_P:
+ case HFI_GEN1_PICTURE_P:
flags |= V4L2_BUF_FLAG_PFRAME;
break;
- case HFI_PICTURE_B:
+ case HFI_GEN1_PICTURE_B:
flags |= V4L2_BUF_FLAG_BFRAME;
break;
case HFI_FRAME_NOTCODED:
@@ -553,7 +577,7 @@ static const struct iris_hfi_gen1_response_pkt_info pkt_infos[] = {
},
{
.pkt = HFI_MSG_SESSION_FILL_BUFFER,
- .pkt_sz = sizeof(struct hfi_msg_session_fbd_uncompressed_plane0_pkt),
+ .pkt_sz = sizeof(struct hfi_msg_session_fbd_compressed_pkt),
},
{
.pkt = HFI_MSG_SESSION_FLUSH,
diff --git a/drivers/media/platform/qcom/iris/iris_hfi_gen2_command.c b/drivers/media/platform/qcom/iris/iris_hfi_gen2_command.c
index 7ca5ae13d62b..4ce71a142508 100644
--- a/drivers/media/platform/qcom/iris/iris_hfi_gen2_command.c
+++ b/drivers/media/platform/qcom/iris/iris_hfi_gen2_command.c
@@ -88,33 +88,63 @@ static int iris_hfi_gen2_sys_pc_prep(struct iris_core *core)
return ret;
}
-static u32 iris_hfi_gen2_get_port(u32 plane)
+static u32 iris_hfi_gen2_get_port(struct iris_inst *inst, u32 plane)
{
- switch (plane) {
- case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
- return HFI_PORT_BITSTREAM;
- case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
- return HFI_PORT_RAW;
- default:
- return HFI_PORT_NONE;
+ if (inst->domain == DECODER) {
+ switch (plane) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ return HFI_PORT_BITSTREAM;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ return HFI_PORT_RAW;
+ default:
+ return HFI_PORT_NONE;
+ }
+ } else {
+ switch (plane) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ return HFI_PORT_RAW;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ return HFI_PORT_BITSTREAM;
+ default:
+ return HFI_PORT_NONE;
+ }
}
}
-static u32 iris_hfi_gen2_get_port_from_buf_type(enum iris_buffer_type buffer_type)
+static u32 iris_hfi_gen2_get_port_from_buf_type(struct iris_inst *inst,
+ enum iris_buffer_type buffer_type)
{
- switch (buffer_type) {
- case BUF_INPUT:
- case BUF_BIN:
- case BUF_COMV:
- case BUF_NON_COMV:
- case BUF_LINE:
- return HFI_PORT_BITSTREAM;
- case BUF_OUTPUT:
- case BUF_DPB:
- return HFI_PORT_RAW;
- case BUF_PERSIST:
- default:
- return HFI_PORT_NONE;
+ if (inst->domain == DECODER) {
+ switch (buffer_type) {
+ case BUF_INPUT:
+ case BUF_BIN:
+ case BUF_COMV:
+ case BUF_NON_COMV:
+ case BUF_LINE:
+ return HFI_PORT_BITSTREAM;
+ case BUF_OUTPUT:
+ case BUF_DPB:
+ return HFI_PORT_RAW;
+ case BUF_PERSIST:
+ default:
+ return HFI_PORT_NONE;
+ }
+ } else {
+ switch (buffer_type) {
+ case BUF_INPUT:
+ case BUF_VPSS:
+ return HFI_PORT_RAW;
+ case BUF_OUTPUT:
+ case BUF_BIN:
+ case BUF_COMV:
+ case BUF_NON_COMV:
+ case BUF_LINE:
+ case BUF_SCRATCH_2:
+ return HFI_PORT_BITSTREAM;
+ case BUF_ARP:
+ default:
+ return HFI_PORT_NONE;
+ }
}
}
@@ -136,34 +166,77 @@ static int iris_hfi_gen2_session_set_property(struct iris_inst *inst, u32 packet
inst_hfi_gen2->packet->size);
}
-static int iris_hfi_gen2_set_bitstream_resolution(struct iris_inst *inst)
+static int iris_hfi_gen2_set_raw_resolution(struct iris_inst *inst, u32 plane)
{
- struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
- u32 port = iris_hfi_gen2_get_port(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
u32 resolution = inst->fmt_src->fmt.pix_mp.width << 16 |
inst->fmt_src->fmt.pix_mp.height;
+ u32 port = iris_hfi_gen2_get_port(inst, plane);
- inst_hfi_gen2->src_subcr_params.bitstream_resolution = resolution;
+ return iris_hfi_gen2_session_set_property(inst,
+ HFI_PROP_RAW_RESOLUTION,
+ HFI_HOST_FLAGS_NONE,
+ port,
+ HFI_PAYLOAD_32_PACKED,
+ &resolution,
+ sizeof(u32));
+}
+
+static int iris_hfi_gen2_set_bitstream_resolution(struct iris_inst *inst, u32 plane)
+{
+ struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+ u32 port = iris_hfi_gen2_get_port(inst, plane);
+ enum hfi_packet_payload_info payload_type;
+ u32 resolution, codec_align;
+
+ if (inst->domain == DECODER) {
+ resolution = inst->fmt_src->fmt.pix_mp.width << 16 |
+ inst->fmt_src->fmt.pix_mp.height;
+ inst_hfi_gen2->src_subcr_params.bitstream_resolution = resolution;
+ payload_type = HFI_PAYLOAD_U32;
+ } else {
+ codec_align = inst->codec == V4L2_PIX_FMT_HEVC ? 32 : 16;
+ resolution = ALIGN(inst->fmt_dst->fmt.pix_mp.width, codec_align) << 16 |
+ ALIGN(inst->fmt_dst->fmt.pix_mp.height, codec_align);
+ inst_hfi_gen2->dst_subcr_params.bitstream_resolution = resolution;
+ payload_type = HFI_PAYLOAD_32_PACKED;
+ }
return iris_hfi_gen2_session_set_property(inst,
HFI_PROP_BITSTREAM_RESOLUTION,
HFI_HOST_FLAGS_NONE,
port,
- HFI_PAYLOAD_U32,
+ payload_type,
&resolution,
sizeof(u32));
}
-static int iris_hfi_gen2_set_crop_offsets(struct iris_inst *inst)
+static int iris_hfi_gen2_set_crop_offsets(struct iris_inst *inst, u32 plane)
{
- u32 bottom_offset = (inst->fmt_src->fmt.pix_mp.height - inst->crop.height);
- u32 right_offset = (inst->fmt_src->fmt.pix_mp.width - inst->crop.width);
struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
- u32 port = iris_hfi_gen2_get_port(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
- u32 left_offset = inst->crop.left;
- u32 top_offset = inst->crop.top;
+ u32 port = iris_hfi_gen2_get_port(inst, plane);
+ u32 bottom_offset, right_offset;
+ u32 left_offset, top_offset;
u32 payload[2];
+ if (inst->domain == DECODER) {
+ if (V4L2_TYPE_IS_OUTPUT(plane)) {
+ bottom_offset = (inst->fmt_src->fmt.pix_mp.height - inst->crop.height);
+ right_offset = (inst->fmt_src->fmt.pix_mp.width - inst->crop.width);
+ left_offset = inst->crop.left;
+ top_offset = inst->crop.top;
+ } else {
+ bottom_offset = (inst->fmt_dst->fmt.pix_mp.height - inst->compose.height);
+ right_offset = (inst->fmt_dst->fmt.pix_mp.width - inst->compose.width);
+ left_offset = inst->compose.left;
+ top_offset = inst->compose.top;
+ }
+ } else {
+ bottom_offset = (inst->fmt_src->fmt.pix_mp.height - inst->crop.height);
+ right_offset = (inst->fmt_src->fmt.pix_mp.width - inst->crop.width);
+ left_offset = inst->crop.left;
+ top_offset = inst->crop.top;
+ }
+
payload[0] = FIELD_PREP(GENMASK(31, 16), left_offset) | top_offset;
payload[1] = FIELD_PREP(GENMASK(31, 16), right_offset) | bottom_offset;
inst_hfi_gen2->src_subcr_params.crop_offsets[0] = payload[0];
@@ -178,10 +251,10 @@ static int iris_hfi_gen2_set_crop_offsets(struct iris_inst *inst)
sizeof(u64));
}
-static int iris_hfi_gen2_set_bit_depth(struct iris_inst *inst)
+static int iris_hfi_gen2_set_bit_depth(struct iris_inst *inst, u32 plane)
{
struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
- u32 port = iris_hfi_gen2_get_port(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ u32 port = iris_hfi_gen2_get_port(inst, plane);
u32 bitdepth = BIT_DEPTH_8;
inst_hfi_gen2->src_subcr_params.bit_depth = bitdepth;
@@ -195,10 +268,10 @@ static int iris_hfi_gen2_set_bit_depth(struct iris_inst *inst)
sizeof(u32));
}
-static int iris_hfi_gen2_set_coded_frames(struct iris_inst *inst)
+static int iris_hfi_gen2_set_coded_frames(struct iris_inst *inst, u32 plane)
{
struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
- u32 port = iris_hfi_gen2_get_port(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ u32 port = iris_hfi_gen2_get_port(inst, plane);
u32 coded_frames = 0;
if (inst->fw_caps[CODED_FRAMES].value == CODED_FRAMES_PROGRESSIVE)
@@ -214,11 +287,11 @@ static int iris_hfi_gen2_set_coded_frames(struct iris_inst *inst)
sizeof(u32));
}
-static int iris_hfi_gen2_set_min_output_count(struct iris_inst *inst)
+static int iris_hfi_gen2_set_min_output_count(struct iris_inst *inst, u32 plane)
{
struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
- u32 port = iris_hfi_gen2_get_port(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
u32 min_output = inst->buffers[BUF_OUTPUT].min_count;
+ u32 port = iris_hfi_gen2_get_port(inst, plane);
inst_hfi_gen2->src_subcr_params.fw_min_count = min_output;
@@ -231,10 +304,10 @@ static int iris_hfi_gen2_set_min_output_count(struct iris_inst *inst)
sizeof(u32));
}
-static int iris_hfi_gen2_set_picture_order_count(struct iris_inst *inst)
+static int iris_hfi_gen2_set_picture_order_count(struct iris_inst *inst, u32 plane)
{
struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
- u32 port = iris_hfi_gen2_get_port(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ u32 port = iris_hfi_gen2_get_port(inst, plane);
u32 poc = 0;
inst_hfi_gen2->src_subcr_params.pic_order_cnt = poc;
@@ -248,16 +321,16 @@ static int iris_hfi_gen2_set_picture_order_count(struct iris_inst *inst)
sizeof(u32));
}
-static int iris_hfi_gen2_set_colorspace(struct iris_inst *inst)
+static int iris_hfi_gen2_set_colorspace(struct iris_inst *inst, u32 plane)
{
struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
- u32 port = iris_hfi_gen2_get_port(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
struct v4l2_pix_format_mplane *pixmp = &inst->fmt_src->fmt.pix_mp;
u32 video_signal_type_present_flag = 0, color_info;
u32 matrix_coeff = HFI_MATRIX_COEFF_RESERVED;
u32 video_format = UNSPECIFIED_COLOR_FORMAT;
u32 full_range = V4L2_QUANTIZATION_DEFAULT;
u32 transfer_char = HFI_TRANSFER_RESERVED;
+ u32 port = iris_hfi_gen2_get_port(inst, plane);
u32 colour_description_present_flag = 0;
u32 primaries = HFI_PRIMARIES_RESERVED;
@@ -291,10 +364,10 @@ static int iris_hfi_gen2_set_colorspace(struct iris_inst *inst)
sizeof(u32));
}
-static int iris_hfi_gen2_set_profile(struct iris_inst *inst)
+static int iris_hfi_gen2_set_profile(struct iris_inst *inst, u32 plane)
{
struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
- u32 port = iris_hfi_gen2_get_port(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ u32 port = iris_hfi_gen2_get_port(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
u32 profile = 0;
switch (inst->codec) {
@@ -320,10 +393,10 @@ static int iris_hfi_gen2_set_profile(struct iris_inst *inst)
sizeof(u32));
}
-static int iris_hfi_gen2_set_level(struct iris_inst *inst)
+static int iris_hfi_gen2_set_level(struct iris_inst *inst, u32 plane)
{
struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
- u32 port = iris_hfi_gen2_get_port(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ u32 port = iris_hfi_gen2_get_port(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
u32 level = 0;
switch (inst->codec) {
@@ -349,33 +422,47 @@ static int iris_hfi_gen2_set_level(struct iris_inst *inst)
sizeof(u32));
}
-static int iris_hfi_gen2_set_colorformat(struct iris_inst *inst)
+static int iris_hfi_gen2_set_colorformat(struct iris_inst *inst, u32 plane)
{
- u32 port = iris_hfi_gen2_get_port(V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ u32 port = iris_hfi_gen2_get_port(inst, plane);
u32 hfi_colorformat, pixelformat;
- pixelformat = inst->fmt_dst->fmt.pix_mp.pixelformat;
- hfi_colorformat = pixelformat == V4L2_PIX_FMT_NV12 ? HFI_COLOR_FMT_NV12 : 0;
+ if (inst->domain == DECODER) {
+ pixelformat = inst->fmt_dst->fmt.pix_mp.pixelformat;
+ hfi_colorformat = pixelformat == V4L2_PIX_FMT_NV12 ? HFI_COLOR_FMT_NV12 : 0;
+ } else {
+ pixelformat = inst->fmt_src->fmt.pix_mp.pixelformat;
+ hfi_colorformat = pixelformat == V4L2_PIX_FMT_NV12 ? HFI_COLOR_FMT_NV12 : 0;
+ }
return iris_hfi_gen2_session_set_property(inst,
HFI_PROP_COLOR_FORMAT,
HFI_HOST_FLAGS_NONE,
port,
- HFI_PAYLOAD_U32,
+ HFI_PAYLOAD_U32_ENUM,
&hfi_colorformat,
sizeof(u32));
}
-static int iris_hfi_gen2_set_linear_stride_scanline(struct iris_inst *inst)
+static int iris_hfi_gen2_set_linear_stride_scanline(struct iris_inst *inst, u32 plane)
{
- u32 port = iris_hfi_gen2_get_port(V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
- u32 pixelformat = inst->fmt_dst->fmt.pix_mp.pixelformat;
- u32 scanline_y = inst->fmt_dst->fmt.pix_mp.height;
- u32 stride_y = inst->fmt_dst->fmt.pix_mp.width;
- u32 scanline_uv = scanline_y / 2;
- u32 stride_uv = stride_y;
+ u32 pixelformat, stride_y, stride_uv, scanline_y, scanline_uv;
+ u32 port = iris_hfi_gen2_get_port(inst, plane);
u32 payload[2];
+ if (inst->domain == DECODER) {
+ pixelformat = inst->fmt_dst->fmt.pix_mp.pixelformat;
+ stride_y = inst->fmt_dst->fmt.pix_mp.width;
+ scanline_y = inst->fmt_dst->fmt.pix_mp.height;
+ } else {
+ pixelformat = inst->fmt_src->fmt.pix_mp.pixelformat;
+ stride_y = ALIGN(inst->fmt_src->fmt.pix_mp.width, 128);
+ scanline_y = ALIGN(inst->fmt_src->fmt.pix_mp.height, 32);
+ }
+
+ stride_uv = stride_y;
+ scanline_uv = scanline_y / 2;
+
if (pixelformat != V4L2_PIX_FMT_NV12)
return 0;
@@ -386,15 +473,15 @@ static int iris_hfi_gen2_set_linear_stride_scanline(struct iris_inst *inst)
HFI_PROP_LINEAR_STRIDE_SCANLINE,
HFI_HOST_FLAGS_NONE,
port,
- HFI_PAYLOAD_U64,
+ HFI_PAYLOAD_64_PACKED,
&payload,
sizeof(u64));
}
-static int iris_hfi_gen2_set_tier(struct iris_inst *inst)
+static int iris_hfi_gen2_set_tier(struct iris_inst *inst, u32 plane)
{
struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
- u32 port = iris_hfi_gen2_get_port(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ u32 port = iris_hfi_gen2_get_port(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
u32 tier = inst->fw_caps[TIER].value;
inst_hfi_gen2->src_subcr_params.tier = tier;
@@ -408,14 +495,29 @@ static int iris_hfi_gen2_set_tier(struct iris_inst *inst)
sizeof(u32));
}
+static int iris_hfi_gen2_set_frame_rate(struct iris_inst *inst, u32 plane)
+{
+ u32 port = iris_hfi_gen2_get_port(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ u32 frame_rate = inst->frame_rate << 16;
+
+ return iris_hfi_gen2_session_set_property(inst,
+ HFI_PROP_FRAME_RATE,
+ HFI_HOST_FLAGS_NONE,
+ port,
+ HFI_PAYLOAD_Q16,
+ &frame_rate,
+ sizeof(u32));
+}
+
static int iris_hfi_gen2_session_set_config_params(struct iris_inst *inst, u32 plane)
{
- struct iris_core *core = inst->core;
+ const struct iris_platform_data *pdata = inst->core->iris_platform_data;
u32 config_params_size = 0, i, j;
const u32 *config_params = NULL;
int ret;
static const struct iris_hfi_prop_type_handle prop_type_handle_arr[] = {
+ {HFI_PROP_RAW_RESOLUTION, iris_hfi_gen2_set_raw_resolution },
{HFI_PROP_BITSTREAM_RESOLUTION, iris_hfi_gen2_set_bitstream_resolution },
{HFI_PROP_CROP_OFFSETS, iris_hfi_gen2_set_crop_offsets },
{HFI_PROP_CODED_FRAMES, iris_hfi_gen2_set_coded_frames },
@@ -428,29 +530,35 @@ static int iris_hfi_gen2_session_set_config_params(struct iris_inst *inst, u32 p
{HFI_PROP_COLOR_FORMAT, iris_hfi_gen2_set_colorformat },
{HFI_PROP_LINEAR_STRIDE_SCANLINE, iris_hfi_gen2_set_linear_stride_scanline },
{HFI_PROP_TIER, iris_hfi_gen2_set_tier },
+ {HFI_PROP_FRAME_RATE, iris_hfi_gen2_set_frame_rate },
};
- if (V4L2_TYPE_IS_OUTPUT(plane)) {
- switch (inst->codec) {
- case V4L2_PIX_FMT_H264:
- config_params = core->iris_platform_data->input_config_params_default;
- config_params_size =
- core->iris_platform_data->input_config_params_default_size;
- break;
- case V4L2_PIX_FMT_HEVC:
- config_params = core->iris_platform_data->input_config_params_hevc;
- config_params_size =
- core->iris_platform_data->input_config_params_hevc_size;
- break;
- case V4L2_PIX_FMT_VP9:
- config_params = core->iris_platform_data->input_config_params_vp9;
- config_params_size =
- core->iris_platform_data->input_config_params_vp9_size;
- break;
+ if (inst->domain == DECODER) {
+ if (V4L2_TYPE_IS_OUTPUT(plane)) {
+ if (inst->codec == V4L2_PIX_FMT_H264) {
+ config_params = pdata->dec_input_config_params_default;
+ config_params_size = pdata->dec_input_config_params_default_size;
+ } else if (inst->codec == V4L2_PIX_FMT_HEVC) {
+ config_params = pdata->dec_input_config_params_hevc;
+ config_params_size = pdata->dec_input_config_params_hevc_size;
+ } else if (inst->codec == V4L2_PIX_FMT_VP9) {
+ config_params = pdata->dec_input_config_params_vp9;
+ config_params_size = pdata->dec_input_config_params_vp9_size;
+ } else {
+ return -EINVAL;
+ }
+ } else {
+ config_params = pdata->dec_output_config_params;
+ config_params_size = pdata->dec_output_config_params_size;
}
} else {
- config_params = core->iris_platform_data->output_config_params;
- config_params_size = core->iris_platform_data->output_config_params_size;
+ if (V4L2_TYPE_IS_OUTPUT(plane)) {
+ config_params = pdata->enc_input_config_params;
+ config_params_size = pdata->enc_input_config_params_size;
+ } else {
+ config_params = pdata->enc_output_config_params;
+ config_params_size = pdata->enc_output_config_params_size;
+ }
}
if (!config_params || !config_params_size)
@@ -459,7 +567,7 @@ static int iris_hfi_gen2_session_set_config_params(struct iris_inst *inst, u32 p
for (i = 0; i < config_params_size; i++) {
for (j = 0; j < ARRAY_SIZE(prop_type_handle_arr); j++) {
if (prop_type_handle_arr[j].type == config_params[i]) {
- ret = prop_type_handle_arr[j].handle(inst);
+ ret = prop_type_handle_arr[j].handle(inst, plane);
if (ret)
return ret;
break;
@@ -477,14 +585,19 @@ static int iris_hfi_gen2_session_set_codec(struct iris_inst *inst)
switch (inst->codec) {
case V4L2_PIX_FMT_H264:
- codec = HFI_CODEC_DECODE_AVC;
+ if (inst->domain == ENCODER)
+ codec = HFI_CODEC_ENCODE_AVC;
+ else
+ codec = HFI_CODEC_DECODE_AVC;
break;
case V4L2_PIX_FMT_HEVC:
- codec = HFI_CODEC_DECODE_HEVC;
+ if (inst->domain == ENCODER)
+ codec = HFI_CODEC_ENCODE_HEVC;
+ else
+ codec = HFI_CODEC_DECODE_HEVC;
break;
case V4L2_PIX_FMT_VP9:
codec = HFI_CODEC_DECODE_VP9;
- break;
}
iris_hfi_gen2_packet_session_property(inst,
@@ -550,9 +663,11 @@ static int iris_hfi_gen2_session_open(struct iris_inst *inst)
if (ret)
goto fail_free_packet;
- ret = iris_hfi_gen2_session_set_default_header(inst);
- if (ret)
- goto fail_free_packet;
+ if (inst->domain == DECODER) {
+ ret = iris_hfi_gen2_session_set_default_header(inst);
+ if (ret)
+ goto fail_free_packet;
+ }
return 0;
@@ -601,7 +716,7 @@ static int iris_hfi_gen2_session_subscribe_mode(struct iris_inst *inst,
cmd,
(HFI_HOST_FLAGS_RESPONSE_REQUIRED |
HFI_HOST_FLAGS_INTR_REQUIRED),
- iris_hfi_gen2_get_port(plane),
+ iris_hfi_gen2_get_port(inst, plane),
inst->session_id,
payload_type,
payload,
@@ -623,6 +738,9 @@ static int iris_hfi_gen2_subscribe_change_param(struct iris_inst *inst, u32 plan
u32 hfi_port = 0, i;
int ret;
+ if (inst->domain == ENCODER)
+ return 0;
+
if ((V4L2_TYPE_IS_OUTPUT(plane) && inst_hfi_gen2->ipsc_properties_set) ||
(V4L2_TYPE_IS_CAPTURE(plane) && inst_hfi_gen2->opsc_properties_set)) {
dev_err(core->dev, "invalid plane\n");
@@ -631,19 +749,19 @@ static int iris_hfi_gen2_subscribe_change_param(struct iris_inst *inst, u32 plan
switch (inst->codec) {
case V4L2_PIX_FMT_H264:
- change_param = core->iris_platform_data->input_config_params_default;
+ change_param = core->iris_platform_data->dec_input_config_params_default;
change_param_size =
- core->iris_platform_data->input_config_params_default_size;
+ core->iris_platform_data->dec_input_config_params_default_size;
break;
case V4L2_PIX_FMT_HEVC:
- change_param = core->iris_platform_data->input_config_params_hevc;
+ change_param = core->iris_platform_data->dec_input_config_params_hevc;
change_param_size =
- core->iris_platform_data->input_config_params_hevc_size;
+ core->iris_platform_data->dec_input_config_params_hevc_size;
break;
case V4L2_PIX_FMT_VP9:
- change_param = core->iris_platform_data->input_config_params_vp9;
+ change_param = core->iris_platform_data->dec_input_config_params_vp9;
change_param_size =
- core->iris_platform_data->input_config_params_vp9_size;
+ core->iris_platform_data->dec_input_config_params_vp9_size;
break;
}
@@ -664,7 +782,7 @@ static int iris_hfi_gen2_subscribe_change_param(struct iris_inst *inst, u32 plan
if (V4L2_TYPE_IS_OUTPUT(plane)) {
inst_hfi_gen2->ipsc_properties_set = true;
} else {
- hfi_port = iris_hfi_gen2_get_port(V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ hfi_port = iris_hfi_gen2_get_port(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
memcpy(&inst_hfi_gen2->dst_subcr_params,
&inst_hfi_gen2->src_subcr_params,
sizeof(inst_hfi_gen2->src_subcr_params));
@@ -759,6 +877,9 @@ static int iris_hfi_gen2_subscribe_property(struct iris_inst *inst, u32 plane)
payload[0] = HFI_MODE_PROPERTY;
+ if (inst->domain == ENCODER)
+ return 0;
+
if (V4L2_TYPE_IS_OUTPUT(plane)) {
subscribe_prop_size = core->iris_platform_data->dec_input_prop_size;
subcribe_prop = core->iris_platform_data->dec_input_prop;
@@ -810,7 +931,7 @@ static int iris_hfi_gen2_session_start(struct iris_inst *inst, u32 plane)
HFI_CMD_START,
(HFI_HOST_FLAGS_RESPONSE_REQUIRED |
HFI_HOST_FLAGS_INTR_REQUIRED),
- iris_hfi_gen2_get_port(plane),
+ iris_hfi_gen2_get_port(inst, plane),
inst->session_id,
HFI_PAYLOAD_NONE,
NULL,
@@ -832,7 +953,7 @@ static int iris_hfi_gen2_session_stop(struct iris_inst *inst, u32 plane)
(HFI_HOST_FLAGS_RESPONSE_REQUIRED |
HFI_HOST_FLAGS_INTR_REQUIRED |
HFI_HOST_FLAGS_NON_DISCARDABLE),
- iris_hfi_gen2_get_port(plane),
+ iris_hfi_gen2_get_port(inst, plane),
inst->session_id,
HFI_PAYLOAD_NONE,
NULL,
@@ -854,7 +975,7 @@ static int iris_hfi_gen2_session_pause(struct iris_inst *inst, u32 plane)
HFI_CMD_PAUSE,
(HFI_HOST_FLAGS_RESPONSE_REQUIRED |
HFI_HOST_FLAGS_INTR_REQUIRED),
- iris_hfi_gen2_get_port(plane),
+ iris_hfi_gen2_get_port(inst, plane),
inst->session_id,
HFI_PAYLOAD_NONE,
NULL,
@@ -873,7 +994,7 @@ static int iris_hfi_gen2_session_resume_drc(struct iris_inst *inst, u32 plane)
HFI_CMD_RESUME,
(HFI_HOST_FLAGS_RESPONSE_REQUIRED |
HFI_HOST_FLAGS_INTR_REQUIRED),
- iris_hfi_gen2_get_port(plane),
+ iris_hfi_gen2_get_port(inst, plane),
inst->session_id,
HFI_PAYLOAD_U32,
&payload,
@@ -892,7 +1013,7 @@ static int iris_hfi_gen2_session_resume_drain(struct iris_inst *inst, u32 plane)
HFI_CMD_RESUME,
(HFI_HOST_FLAGS_RESPONSE_REQUIRED |
HFI_HOST_FLAGS_INTR_REQUIRED),
- iris_hfi_gen2_get_port(plane),
+ iris_hfi_gen2_get_port(inst, plane),
inst->session_id,
HFI_PAYLOAD_U32,
&payload,
@@ -914,7 +1035,7 @@ static int iris_hfi_gen2_session_drain(struct iris_inst *inst, u32 plane)
(HFI_HOST_FLAGS_RESPONSE_REQUIRED |
HFI_HOST_FLAGS_INTR_REQUIRED |
HFI_HOST_FLAGS_NON_DISCARDABLE),
- iris_hfi_gen2_get_port(plane),
+ iris_hfi_gen2_get_port(inst, plane),
inst->session_id,
HFI_PAYLOAD_NONE,
NULL,
@@ -924,13 +1045,19 @@ static int iris_hfi_gen2_session_drain(struct iris_inst *inst, u32 plane)
inst_hfi_gen2->packet->size);
}
-static u32 iris_hfi_gen2_buf_type_from_driver(enum iris_buffer_type buffer_type)
+static u32 iris_hfi_gen2_buf_type_from_driver(u32 domain, enum iris_buffer_type buffer_type)
{
switch (buffer_type) {
case BUF_INPUT:
- return HFI_BUFFER_BITSTREAM;
+ if (domain == DECODER)
+ return HFI_BUFFER_BITSTREAM;
+ else
+ return HFI_BUFFER_RAW;
case BUF_OUTPUT:
- return HFI_BUFFER_RAW;
+ if (domain == DECODER)
+ return HFI_BUFFER_RAW;
+ else
+ return HFI_BUFFER_BITSTREAM;
case BUF_BIN:
return HFI_BUFFER_BIN;
case BUF_COMV:
@@ -940,9 +1067,14 @@ static u32 iris_hfi_gen2_buf_type_from_driver(enum iris_buffer_type buffer_type)
case BUF_LINE:
return HFI_BUFFER_LINE;
case BUF_DPB:
+ case BUF_SCRATCH_2:
return HFI_BUFFER_DPB;
case BUF_PERSIST:
return HFI_BUFFER_PERSIST;
+ case BUF_ARP:
+ return HFI_BUFFER_ARP;
+ case BUF_VPSS:
+ return HFI_BUFFER_VPSS;
default:
return 0;
}
@@ -965,16 +1097,17 @@ static int iris_set_num_comv(struct iris_inst *inst)
&num_comv, sizeof(u32));
}
-static void iris_hfi_gen2_get_buffer(struct iris_buffer *buffer, struct iris_hfi_buffer *buf)
+static void iris_hfi_gen2_get_buffer(u32 domain, struct iris_buffer *buffer,
+ struct iris_hfi_buffer *buf)
{
memset(buf, 0, sizeof(*buf));
- buf->type = iris_hfi_gen2_buf_type_from_driver(buffer->type);
+ buf->type = iris_hfi_gen2_buf_type_from_driver(domain, buffer->type);
buf->index = buffer->index;
buf->base_address = buffer->device_addr;
buf->addr_offset = 0;
buf->buffer_size = buffer->buffer_size;
- if (buffer->type == BUF_INPUT)
+ if (domain == DECODER && buffer->type == BUF_INPUT)
buf->buffer_size = ALIGN(buffer->buffer_size, 256);
buf->data_offset = buffer->data_offset;
buf->data_size = buffer->data_size;
@@ -991,14 +1124,14 @@ static int iris_hfi_gen2_session_queue_buffer(struct iris_inst *inst, struct iri
u32 port;
int ret;
- iris_hfi_gen2_get_buffer(buffer, &hfi_buffer);
+ iris_hfi_gen2_get_buffer(inst->domain, buffer, &hfi_buffer);
if (buffer->type == BUF_COMV) {
ret = iris_set_num_comv(inst);
if (ret)
return ret;
}
- port = iris_hfi_gen2_get_port_from_buf_type(buffer->type);
+ port = iris_hfi_gen2_get_port_from_buf_type(inst, buffer->type);
iris_hfi_gen2_packet_session_command(inst,
HFI_CMD_BUFFER,
HFI_HOST_FLAGS_INTR_REQUIRED,
@@ -1018,9 +1151,9 @@ static int iris_hfi_gen2_session_release_buffer(struct iris_inst *inst, struct i
struct iris_hfi_buffer hfi_buffer;
u32 port;
- iris_hfi_gen2_get_buffer(buffer, &hfi_buffer);
+ iris_hfi_gen2_get_buffer(inst->domain, buffer, &hfi_buffer);
hfi_buffer.flags |= HFI_BUF_HOST_FLAG_RELEASE;
- port = iris_hfi_gen2_get_port_from_buf_type(buffer->type);
+ port = iris_hfi_gen2_get_port_from_buf_type(inst, buffer->type);
iris_hfi_gen2_packet_session_command(inst,
HFI_CMD_BUFFER,
diff --git a/drivers/media/platform/qcom/iris/iris_hfi_gen2_defines.h b/drivers/media/platform/qcom/iris/iris_hfi_gen2_defines.h
index 5f13dc11bea5..aa1f795f5626 100644
--- a/drivers/media/platform/qcom/iris/iris_hfi_gen2_defines.h
+++ b/drivers/media/platform/qcom/iris/iris_hfi_gen2_defines.h
@@ -49,18 +49,48 @@
#define HFI_PROP_TIER 0x03000109
#define HFI_PROP_STAGE 0x0300010a
#define HFI_PROP_PIPE 0x0300010b
+#define HFI_PROP_FRAME_RATE 0x0300010c
#define HFI_PROP_LUMA_CHROMA_BIT_DEPTH 0x0300010f
#define HFI_PROP_CODED_FRAMES 0x03000120
#define HFI_PROP_CABAC_SESSION 0x03000121
#define HFI_PROP_BUFFER_HOST_MAX_COUNT 0x03000123
#define HFI_PROP_BUFFER_FW_MIN_OUTPUT_COUNT 0x03000124
#define HFI_PROP_PIC_ORDER_CNT_TYPE 0x03000128
+
+enum hfi_rate_control {
+ HFI_RC_VBR_CFR = 0x00000000,
+ HFI_RC_CBR_CFR = 0x00000001,
+ HFI_RC_CQ = 0x00000002,
+ HFI_RC_OFF = 0x00000003,
+ HFI_RC_CBR_VFR = 0x00000004,
+ HFI_RC_LOSSLESS = 0x00000005,
+};
+
+#define HFI_PROP_RATE_CONTROL 0x0300012a
+#define HFI_PROP_QP_PACKED 0x0300012e
+#define HFI_PROP_MIN_QP_PACKED 0x0300012f
+#define HFI_PROP_MAX_QP_PACKED 0x03000130
+#define HFI_PROP_TOTAL_BITRATE 0x0300013b
+#define HFI_PROP_MAX_GOP_FRAMES 0x03000146
+#define HFI_PROP_MAX_B_FRAMES 0x03000147
#define HFI_PROP_QUALITY_MODE 0x03000148
+
+enum hfi_seq_header_mode {
+ HFI_SEQ_HEADER_SEPERATE_FRAME = 0x00000001,
+ HFI_SEQ_HEADER_JOINED_WITH_1ST_FRAME = 0x00000002,
+ HFI_SEQ_HEADER_PREFIX_WITH_SYNC_FRAME = 0x00000004,
+ HFI_SEQ_HEADER_METADATA = 0x00000008,
+};
+
+#define HFI_PROP_SEQ_HEADER_MODE 0x03000149
#define HFI_PROP_SIGNAL_COLOR_INFO 0x03000155
#define HFI_PROP_PICTURE_TYPE 0x03000162
#define HFI_PROP_DEC_DEFAULT_HEADER 0x03000168
#define HFI_PROP_DEC_START_FROM_RAP_FRAME 0x03000169
#define HFI_PROP_NO_OUTPUT 0x0300016a
+#define HFI_PROP_BUFFER_MARK 0x0300016c
+#define HFI_PROP_RAW_RESOLUTION 0x03000178
+#define HFI_PROP_TOTAL_PEAK_BITRATE 0x0300017C
#define HFI_PROP_COMV_BUFFER_COUNT 0x03000193
#define HFI_PROP_END 0x03FFFFFF
@@ -111,13 +141,13 @@ enum hfi_codec_type {
};
enum hfi_picture_type {
- HFI_PICTURE_IDR = 0x00000001,
- HFI_PICTURE_P = 0x00000002,
- HFI_PICTURE_B = 0x00000004,
- HFI_PICTURE_I = 0x00000008,
- HFI_PICTURE_CRA = 0x00000010,
- HFI_PICTURE_BLA = 0x00000020,
- HFI_PICTURE_NOSHOW = 0x00000040,
+ HFI_GEN2_PICTURE_IDR = 0x00000001,
+ HFI_GEN2_PICTURE_P = 0x00000002,
+ HFI_GEN2_PICTURE_B = 0x00000004,
+ HFI_GEN2_PICTURE_I = 0x00000008,
+ HFI_GEN2_PICTURE_CRA = 0x00000010,
+ HFI_GEN2_PICTURE_BLA = 0x00000020,
+ HFI_GEN2_PICTURE_NOSHOW = 0x00000040,
};
enum hfi_buffer_type {
diff --git a/drivers/media/platform/qcom/iris/iris_hfi_gen2_response.c b/drivers/media/platform/qcom/iris/iris_hfi_gen2_response.c
index a8c30fc5c0d0..2f1f118eae4f 100644
--- a/drivers/media/platform/qcom/iris/iris_hfi_gen2_response.c
+++ b/drivers/media/platform/qcom/iris/iris_hfi_gen2_response.c
@@ -29,7 +29,8 @@ struct iris_hfi_gen2_packet_handle {
int (*handle)(struct iris_inst *inst, struct iris_hfi_packet *pkt);
};
-static u32 iris_hfi_gen2_buf_type_to_driver(enum hfi_buffer_type buf_type)
+static u32 iris_hfi_gen2_buf_type_to_driver(struct iris_inst *inst,
+ enum hfi_buffer_type buf_type)
{
switch (buf_type) {
case HFI_BUFFER_BITSTREAM:
@@ -47,7 +48,10 @@ static u32 iris_hfi_gen2_buf_type_to_driver(enum hfi_buffer_type buf_type)
case HFI_BUFFER_LINE:
return BUF_LINE;
case HFI_BUFFER_DPB:
- return BUF_DPB;
+ if (inst->domain == DECODER)
+ return BUF_DPB;
+ else
+ return BUF_SCRATCH_2;
case HFI_BUFFER_PERSIST:
return BUF_PERSIST;
default:
@@ -87,17 +91,18 @@ static bool iris_hfi_gen2_is_valid_hfi_port(u32 port, u32 buffer_type)
static int iris_hfi_gen2_get_driver_buffer_flags(struct iris_inst *inst, u32 hfi_flags)
{
- u32 keyframe = HFI_PICTURE_IDR | HFI_PICTURE_I | HFI_PICTURE_CRA | HFI_PICTURE_BLA;
struct iris_inst_hfi_gen2 *inst_hfi_gen2 = to_iris_inst_hfi_gen2(inst);
+ u32 keyframe = HFI_GEN2_PICTURE_IDR | HFI_GEN2_PICTURE_I |
+ HFI_GEN2_PICTURE_CRA | HFI_GEN2_PICTURE_BLA;
u32 driver_flags = 0;
- if (inst_hfi_gen2->hfi_frame_info.picture_type & HFI_PICTURE_NOSHOW)
+ if (inst_hfi_gen2->hfi_frame_info.picture_type & HFI_GEN2_PICTURE_NOSHOW)
driver_flags |= V4L2_BUF_FLAG_ERROR;
else if (inst_hfi_gen2->hfi_frame_info.picture_type & keyframe)
driver_flags |= V4L2_BUF_FLAG_KEYFRAME;
- else if (inst_hfi_gen2->hfi_frame_info.picture_type & HFI_PICTURE_P)
+ else if (inst_hfi_gen2->hfi_frame_info.picture_type & HFI_GEN2_PICTURE_P)
driver_flags |= V4L2_BUF_FLAG_PFRAME;
- else if (inst_hfi_gen2->hfi_frame_info.picture_type & HFI_PICTURE_B)
+ else if (inst_hfi_gen2->hfi_frame_info.picture_type & HFI_GEN2_PICTURE_B)
driver_flags |= V4L2_BUF_FLAG_BFRAME;
if (inst_hfi_gen2->hfi_frame_info.data_corrupt || inst_hfi_gen2->hfi_frame_info.overflow)
@@ -420,11 +425,10 @@ static void iris_hfi_gen2_handle_dequeue_buffers(struct iris_inst *inst)
static int iris_hfi_gen2_handle_release_internal_buffer(struct iris_inst *inst,
struct iris_hfi_buffer *buffer)
{
- u32 buf_type = iris_hfi_gen2_buf_type_to_driver(buffer->type);
+ u32 buf_type = iris_hfi_gen2_buf_type_to_driver(inst, buffer->type);
struct iris_buffers *buffers = &inst->buffers[buf_type];
struct iris_buffer *buf, *iter;
bool found = false;
- int ret = 0;
list_for_each_entry(iter, &buffers->list, list) {
if (iter->device_addr == buffer->base_address) {
@@ -437,10 +441,8 @@ static int iris_hfi_gen2_handle_release_internal_buffer(struct iris_inst *inst,
return -EINVAL;
buf->attr &= ~BUF_ATTR_QUEUED;
- if (buf->attr & BUF_ATTR_PENDING_RELEASE)
- ret = iris_destroy_internal_buffer(inst, buf);
- return ret;
+ return iris_destroy_internal_buffer(inst, buf);
}
static int iris_hfi_gen2_handle_session_stop(struct iris_inst *inst,
@@ -478,12 +480,22 @@ static int iris_hfi_gen2_handle_session_buffer(struct iris_inst *inst,
if (!iris_hfi_gen2_is_valid_hfi_port(pkt->port, buffer->type))
return 0;
- if (buffer->type == HFI_BUFFER_BITSTREAM)
- return iris_hfi_gen2_handle_input_buffer(inst, buffer);
- else if (buffer->type == HFI_BUFFER_RAW)
- return iris_hfi_gen2_handle_output_buffer(inst, buffer);
- else
- return iris_hfi_gen2_handle_release_internal_buffer(inst, buffer);
+ if (inst->domain == DECODER) {
+ if (buffer->type == HFI_BUFFER_BITSTREAM)
+ return iris_hfi_gen2_handle_input_buffer(inst, buffer);
+ else if (buffer->type == HFI_BUFFER_RAW)
+ return iris_hfi_gen2_handle_output_buffer(inst, buffer);
+ else
+ return iris_hfi_gen2_handle_release_internal_buffer(inst, buffer);
+ } else {
+ if (buffer->type == HFI_BUFFER_RAW)
+ return iris_hfi_gen2_handle_input_buffer(inst, buffer);
+ else if (buffer->type == HFI_BUFFER_BITSTREAM)
+ return iris_hfi_gen2_handle_output_buffer(inst, buffer);
+ else
+ return iris_hfi_gen2_handle_release_internal_buffer(inst, buffer);
+ }
+ return 0;
}
static int iris_hfi_gen2_handle_session_drain(struct iris_inst *inst,
diff --git a/drivers/media/platform/qcom/iris/iris_hfi_queue.c b/drivers/media/platform/qcom/iris/iris_hfi_queue.c
index 221dcd09e1e1..b3ed06297953 100644
--- a/drivers/media/platform/qcom/iris/iris_hfi_queue.c
+++ b/drivers/media/platform/qcom/iris/iris_hfi_queue.c
@@ -142,7 +142,6 @@ int iris_hfi_queue_cmd_write(struct iris_core *core, void *pkt, u32 pkt_size)
}
mutex_unlock(&core->lock);
- pm_runtime_mark_last_busy(core->dev);
pm_runtime_put_autosuspend(core->dev);
return 0;
diff --git a/drivers/media/platform/qcom/iris/iris_instance.h b/drivers/media/platform/qcom/iris/iris_instance.h
index 0e1f5799b72d..5982d7adefea 100644
--- a/drivers/media/platform/qcom/iris/iris_instance.h
+++ b/drivers/media/platform/qcom/iris/iris_instance.h
@@ -12,6 +12,20 @@
#include "iris_core.h"
#include "iris_utils.h"
+#define DEFAULT_WIDTH 320
+#define DEFAULT_HEIGHT 240
+
+enum iris_fmt_type {
+ IRIS_FMT_H264,
+ IRIS_FMT_HEVC,
+ IRIS_FMT_VP9,
+};
+
+struct iris_fmt {
+ u32 pixfmt;
+ u32 type;
+};
+
/**
* struct iris_inst - holds per video instance parameters
*
@@ -24,7 +38,9 @@
* @fmt_src: structure of v4l2_format for source
* @fmt_dst: structure of v4l2_format for destination
* @ctrl_handler: reference of v4l2 ctrl handler
+ * @domain: domain type: encoder or decoder
* @crop: structure of crop info
+ * @compose: structure of compose info
* @completion: structure of signal completions
* @flush_completion: structure of signal completions for flush cmd
* @flush_responses_pending: counter to track number of pending flush responses
@@ -45,6 +61,9 @@
* @metadata_idx: index for metadata buffer
* @codec: codec type
* @last_buffer_dequeued: a flag to indicate that last buffer is sent by driver
+ * @frame_rate: frame rate of current instance
+ * @operating_rate: operating rate of current instance
+ * @hfi_rc_type: rate control type
*/
struct iris_inst {
@@ -57,7 +76,9 @@ struct iris_inst {
struct v4l2_format *fmt_src;
struct v4l2_format *fmt_dst;
struct v4l2_ctrl_handler ctrl_handler;
+ enum domain_type domain;
struct iris_hfi_rect_desc crop;
+ struct iris_hfi_rect_desc compose;
struct completion completion;
struct completion flush_completion;
u32 flush_responses_pending;
@@ -78,6 +99,9 @@ struct iris_inst {
u32 metadata_idx;
u32 codec;
bool last_buffer_dequeued;
+ u32 frame_rate;
+ u32 operating_rate;
+ u32 hfi_rc_type;
};
#endif
diff --git a/drivers/media/platform/qcom/iris/iris_platform_common.h b/drivers/media/platform/qcom/iris/iris_platform_common.h
index adafdce8a856..58d05e0a112e 100644
--- a/drivers/media/platform/qcom/iris/iris_platform_common.h
+++ b/drivers/media/platform/qcom/iris/iris_platform_common.h
@@ -7,6 +7,7 @@
#define __IRIS_PLATFORM_COMMON_H__
#include <linux/bits.h>
+#include "iris_buffer.h"
struct iris_core;
struct iris_inst;
@@ -21,7 +22,13 @@ struct iris_inst;
#define DEFAULT_MAX_HOST_BUF_COUNT 64
#define DEFAULT_MAX_HOST_BURST_BUF_COUNT 256
#define DEFAULT_FPS 30
-#define NUM_MBS_8K ((8192 * 4352) / 256)
+#define MAXIMUM_FPS 480
+#define NUM_MBS_8K ((8192 * 4352) / 256)
+#define MIN_QP_8BIT 1
+#define MAX_QP 51
+#define MAX_QP_HEVC 63
+#define DEFAULT_QP 20
+#define BITRATE_DEFAULT 20000000
enum stage_type {
STAGE_1 = 1,
@@ -38,11 +45,15 @@ extern struct iris_platform_data qcs8300_data;
extern struct iris_platform_data sm8250_data;
extern struct iris_platform_data sm8550_data;
extern struct iris_platform_data sm8650_data;
+extern struct iris_platform_data sm8750_data;
enum platform_clk_type {
- IRIS_AXI_CLK,
+ IRIS_AXI_CLK, /* AXI0 in case of platforms with multiple AXI clocks */
IRIS_CTRL_CLK,
IRIS_HW_CLK,
+ IRIS_AXI1_CLK,
+ IRIS_CTRL_FREERUN_CLK,
+ IRIS_HW_FREERUN_CLK,
};
struct platform_clk_data {
@@ -78,6 +89,8 @@ struct platform_inst_caps {
u32 mb_cycles_fw;
u32 mb_cycles_fw_vpp;
u32 num_comv;
+ u32 max_frame_rate;
+ u32 max_operating_rate;
};
enum platform_inst_fw_cap_type {
@@ -88,6 +101,7 @@ enum platform_inst_fw_cap_type {
LEVEL_HEVC,
LEVEL_VP9,
INPUT_BUF_HOST_MAX_COUNT,
+ OUTPUT_BUF_HOST_MAX_COUNT,
STAGE,
PIPE,
POC,
@@ -95,6 +109,37 @@ enum platform_inst_fw_cap_type {
BIT_DEPTH,
RAP_FRAME,
TIER,
+ HEADER_MODE,
+ PREPEND_SPSPPS_TO_IDR,
+ BITRATE,
+ BITRATE_PEAK,
+ BITRATE_MODE,
+ FRAME_SKIP_MODE,
+ FRAME_RC_ENABLE,
+ GOP_SIZE,
+ ENTROPY_MODE,
+ MIN_FRAME_QP_H264,
+ MIN_FRAME_QP_HEVC,
+ MAX_FRAME_QP_H264,
+ MAX_FRAME_QP_HEVC,
+ I_FRAME_MIN_QP_H264,
+ I_FRAME_MIN_QP_HEVC,
+ P_FRAME_MIN_QP_H264,
+ P_FRAME_MIN_QP_HEVC,
+ B_FRAME_MIN_QP_H264,
+ B_FRAME_MIN_QP_HEVC,
+ I_FRAME_MAX_QP_H264,
+ I_FRAME_MAX_QP_HEVC,
+ P_FRAME_MAX_QP_H264,
+ P_FRAME_MAX_QP_HEVC,
+ B_FRAME_MAX_QP_H264,
+ B_FRAME_MAX_QP_HEVC,
+ I_FRAME_QP_H264,
+ I_FRAME_QP_HEVC,
+ P_FRAME_QP_H264,
+ P_FRAME_QP_HEVC,
+ B_FRAME_QP_H264,
+ B_FRAME_QP_HEVC,
INST_FW_CAP_MAX,
};
@@ -149,6 +194,7 @@ struct iris_platform_data {
void (*init_hfi_command_ops)(struct iris_core *core);
void (*init_hfi_response_ops)(struct iris_core *core);
struct iris_inst *(*get_instance)(void);
+ u32 (*get_vpu_buffer_size)(struct iris_inst *inst, enum iris_buffer_type buffer_type);
const struct vpu_ops *vpu_ops;
void (*set_preset_registers)(struct iris_core *core);
const struct icc_info *icc_tbl;
@@ -169,8 +215,10 @@ struct iris_platform_data {
const char *fwname;
u32 pas_id;
struct platform_inst_caps *inst_caps;
- struct platform_inst_fw_cap *inst_fw_caps;
- u32 inst_fw_caps_size;
+ struct platform_inst_fw_cap *inst_fw_caps_dec;
+ u32 inst_fw_caps_dec_size;
+ struct platform_inst_fw_cap *inst_fw_caps_enc;
+ u32 inst_fw_caps_enc_size;
struct tz_cp_config *tz_cp_config_data;
u32 core_arch;
u32 hw_response_timeout;
@@ -179,14 +227,20 @@ struct iris_platform_data {
u32 max_session_count;
/* max number of macroblocks per frame supported */
u32 max_core_mbpf;
- const u32 *input_config_params_default;
- unsigned int input_config_params_default_size;
- const u32 *input_config_params_hevc;
- unsigned int input_config_params_hevc_size;
- const u32 *input_config_params_vp9;
- unsigned int input_config_params_vp9_size;
- const u32 *output_config_params;
- unsigned int output_config_params_size;
+ /* max number of macroblocks per second supported */
+ u32 max_core_mbps;
+ const u32 *dec_input_config_params_default;
+ unsigned int dec_input_config_params_default_size;
+ const u32 *dec_input_config_params_hevc;
+ unsigned int dec_input_config_params_hevc_size;
+ const u32 *dec_input_config_params_vp9;
+ unsigned int dec_input_config_params_vp9_size;
+ const u32 *dec_output_config_params;
+ unsigned int dec_output_config_params_size;
+ const u32 *enc_input_config_params;
+ unsigned int enc_input_config_params_size;
+ const u32 *enc_output_config_params;
+ unsigned int enc_output_config_params_size;
const u32 *dec_input_prop;
unsigned int dec_input_prop_size;
const u32 *dec_output_prop_avc;
@@ -199,6 +253,10 @@ struct iris_platform_data {
unsigned int dec_ip_int_buf_tbl_size;
const u32 *dec_op_int_buf_tbl;
unsigned int dec_op_int_buf_tbl_size;
+ const u32 *enc_ip_int_buf_tbl;
+ unsigned int enc_ip_int_buf_tbl_size;
+ const u32 *enc_op_int_buf_tbl;
+ unsigned int enc_op_int_buf_tbl_size;
};
#endif
diff --git a/drivers/media/platform/qcom/iris/iris_platform_gen2.c b/drivers/media/platform/qcom/iris/iris_platform_gen2.c
index d3026b2bcb70..36d69cc73986 100644
--- a/drivers/media/platform/qcom/iris/iris_platform_gen2.c
+++ b/drivers/media/platform/qcom/iris/iris_platform_gen2.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2025 Linaro Ltd
*/
#include "iris_core.h"
@@ -8,14 +9,17 @@
#include "iris_hfi_gen2.h"
#include "iris_hfi_gen2_defines.h"
#include "iris_platform_common.h"
+#include "iris_vpu_buffer.h"
#include "iris_vpu_common.h"
#include "iris_platform_qcs8300.h"
#include "iris_platform_sm8650.h"
+#include "iris_platform_sm8750.h"
#define VIDEO_ARCH_LX 1
+#define BITRATE_MAX 245000000
-static struct platform_inst_fw_cap inst_fw_cap_sm8550[] = {
+static struct platform_inst_fw_cap inst_fw_cap_sm8550_dec[] = {
{
.cap_id = PROFILE_H264,
.min = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE,
@@ -199,6 +203,393 @@ static struct platform_inst_fw_cap inst_fw_cap_sm8550[] = {
},
};
+static struct platform_inst_fw_cap inst_fw_cap_sm8550_enc[] = {
+ {
+ .cap_id = PROFILE_H264,
+ .min = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE,
+ .max = V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_HIGH,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE) |
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_HIGH) |
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE) |
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_MAIN) |
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_HIGH),
+ .value = V4L2_MPEG_VIDEO_H264_PROFILE_HIGH,
+ .hfi_id = HFI_PROP_PROFILE,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ .set = iris_set_profile,
+ },
+ {
+ .cap_id = PROFILE_HEVC,
+ .min = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN,
+ .max = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_STILL_PICTURE) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10),
+ .value = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN,
+ .hfi_id = HFI_PROP_PROFILE,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ .set = iris_set_profile,
+ },
+ {
+ .cap_id = LEVEL_H264,
+ .min = V4L2_MPEG_VIDEO_H264_LEVEL_1_0,
+ .max = V4L2_MPEG_VIDEO_H264_LEVEL_6_0,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_0) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1B) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_1) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_2) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_3) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_2_0) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_2_1) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_2_2) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_3_0) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_3_1) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_3_2) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_4_0) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_4_1) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_4_2) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_5_0) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_5_1) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_5_2) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_6_0),
+ .value = V4L2_MPEG_VIDEO_H264_LEVEL_5_0,
+ .hfi_id = HFI_PROP_LEVEL,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ .set = iris_set_level,
+ },
+ {
+ .cap_id = LEVEL_HEVC,
+ .min = V4L2_MPEG_VIDEO_HEVC_LEVEL_1,
+ .max = V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_1) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_2) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_2_1) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_3) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_3_1) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_4) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_4_1) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_5) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_5_2) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_6) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_6_1) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2),
+ .value = V4L2_MPEG_VIDEO_HEVC_LEVEL_5,
+ .hfi_id = HFI_PROP_LEVEL,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ .set = iris_set_level,
+ },
+ {
+ .cap_id = STAGE,
+ .min = STAGE_1,
+ .max = STAGE_2,
+ .step_or_mask = 1,
+ .value = STAGE_2,
+ .hfi_id = HFI_PROP_STAGE,
+ .set = iris_set_stage,
+ },
+ {
+ .cap_id = HEADER_MODE,
+ .min = V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE,
+ .max = V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE) |
+ BIT(V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME),
+ .value = V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME,
+ .hfi_id = HFI_PROP_SEQ_HEADER_MODE,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ .set = iris_set_header_mode_gen2,
+ },
+ {
+ .cap_id = PREPEND_SPSPPS_TO_IDR,
+ .min = 0,
+ .max = 1,
+ .step_or_mask = 1,
+ .value = 0,
+ },
+ {
+ .cap_id = BITRATE,
+ .min = 1,
+ .max = BITRATE_MAX,
+ .step_or_mask = 1,
+ .value = BITRATE_DEFAULT,
+ .hfi_id = HFI_PROP_TOTAL_BITRATE,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_INPUT_PORT |
+ CAP_FLAG_DYNAMIC_ALLOWED,
+ .set = iris_set_bitrate,
+ },
+ {
+ .cap_id = BITRATE_PEAK,
+ .min = 1,
+ .max = BITRATE_MAX,
+ .step_or_mask = 1,
+ .value = BITRATE_DEFAULT,
+ .hfi_id = HFI_PROP_TOTAL_PEAK_BITRATE,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_INPUT_PORT |
+ CAP_FLAG_DYNAMIC_ALLOWED,
+ .set = iris_set_peak_bitrate,
+ },
+ {
+ .cap_id = BITRATE_MODE,
+ .min = V4L2_MPEG_VIDEO_BITRATE_MODE_VBR,
+ .max = V4L2_MPEG_VIDEO_BITRATE_MODE_CBR,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_BITRATE_MODE_VBR) |
+ BIT(V4L2_MPEG_VIDEO_BITRATE_MODE_CBR),
+ .value = V4L2_MPEG_VIDEO_BITRATE_MODE_VBR,
+ .hfi_id = HFI_PROP_RATE_CONTROL,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ .set = iris_set_bitrate_mode_gen2,
+ },
+ {
+ .cap_id = FRAME_SKIP_MODE,
+ .min = V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_DISABLED,
+ .max = V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_DISABLED) |
+ BIT(V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_LEVEL_LIMIT) |
+ BIT(V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT),
+ .value = V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_DISABLED,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ },
+ {
+ .cap_id = FRAME_RC_ENABLE,
+ .min = 0,
+ .max = 1,
+ .step_or_mask = 1,
+ .value = 1,
+ },
+ {
+ .cap_id = GOP_SIZE,
+ .min = 0,
+ .max = INT_MAX,
+ .step_or_mask = 1,
+ .value = 2 * DEFAULT_FPS - 1,
+ .hfi_id = HFI_PROP_MAX_GOP_FRAMES,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_INPUT_PORT |
+ CAP_FLAG_DYNAMIC_ALLOWED,
+ .set = iris_set_u32,
+ },
+ {
+ .cap_id = ENTROPY_MODE,
+ .min = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC,
+ .max = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC) |
+ BIT(V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC),
+ .value = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC,
+ .hfi_id = HFI_PROP_CABAC_SESSION,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ .set = iris_set_entropy_mode_gen2,
+ },
+ {
+ .cap_id = MIN_FRAME_QP_H264,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MIN_QP_8BIT,
+ .hfi_id = HFI_PROP_MIN_QP_PACKED,
+ .flags = CAP_FLAG_OUTPUT_PORT,
+ .set = iris_set_min_qp,
+ },
+ {
+ .cap_id = MIN_FRAME_QP_HEVC,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MIN_QP_8BIT,
+ .hfi_id = HFI_PROP_MIN_QP_PACKED,
+ .flags = CAP_FLAG_OUTPUT_PORT,
+ .set = iris_set_min_qp,
+ },
+ {
+ .cap_id = MAX_FRAME_QP_H264,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MAX_QP,
+ .hfi_id = HFI_PROP_MAX_QP_PACKED,
+ .flags = CAP_FLAG_OUTPUT_PORT,
+ .set = iris_set_max_qp,
+ },
+ {
+ .cap_id = MAX_FRAME_QP_HEVC,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MAX_QP,
+ .hfi_id = HFI_PROP_MAX_QP_PACKED,
+ .flags = CAP_FLAG_OUTPUT_PORT,
+ .set = iris_set_max_qp,
+ },
+ {
+ .cap_id = I_FRAME_MIN_QP_H264,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MIN_QP_8BIT,
+ },
+ {
+ .cap_id = I_FRAME_MIN_QP_HEVC,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MIN_QP_8BIT,
+ },
+ {
+ .cap_id = P_FRAME_MIN_QP_H264,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MIN_QP_8BIT,
+ },
+ {
+ .cap_id = P_FRAME_MIN_QP_HEVC,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MIN_QP_8BIT,
+ },
+ {
+ .cap_id = B_FRAME_MIN_QP_H264,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MIN_QP_8BIT,
+ },
+ {
+ .cap_id = B_FRAME_MIN_QP_HEVC,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MIN_QP_8BIT,
+ },
+ {
+ .cap_id = I_FRAME_MAX_QP_H264,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MAX_QP,
+ },
+ {
+ .cap_id = I_FRAME_MAX_QP_HEVC,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MAX_QP,
+ },
+ {
+ .cap_id = P_FRAME_MAX_QP_H264,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MAX_QP,
+ },
+ {
+ .cap_id = P_FRAME_MAX_QP_HEVC,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MAX_QP,
+ },
+ {
+ .cap_id = B_FRAME_MAX_QP_H264,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MAX_QP,
+ },
+ {
+ .cap_id = B_FRAME_MAX_QP_HEVC,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MAX_QP,
+ },
+ {
+ .cap_id = I_FRAME_QP_H264,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = DEFAULT_QP,
+ .hfi_id = HFI_PROP_QP_PACKED,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_INPUT_PORT |
+ CAP_FLAG_DYNAMIC_ALLOWED,
+ .set = iris_set_frame_qp,
+ },
+ {
+ .cap_id = I_FRAME_QP_HEVC,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = DEFAULT_QP,
+ .hfi_id = HFI_PROP_QP_PACKED,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_INPUT_PORT |
+ CAP_FLAG_DYNAMIC_ALLOWED,
+ .set = iris_set_frame_qp,
+ },
+ {
+ .cap_id = P_FRAME_QP_H264,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = DEFAULT_QP,
+ .hfi_id = HFI_PROP_QP_PACKED,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_INPUT_PORT |
+ CAP_FLAG_DYNAMIC_ALLOWED,
+ .set = iris_set_frame_qp,
+ },
+ {
+ .cap_id = P_FRAME_QP_HEVC,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = DEFAULT_QP,
+ .hfi_id = HFI_PROP_QP_PACKED,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_INPUT_PORT |
+ CAP_FLAG_DYNAMIC_ALLOWED,
+ .set = iris_set_frame_qp,
+ },
+ {
+ .cap_id = B_FRAME_QP_H264,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = DEFAULT_QP,
+ .hfi_id = HFI_PROP_QP_PACKED,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_INPUT_PORT |
+ CAP_FLAG_DYNAMIC_ALLOWED,
+ .set = iris_set_frame_qp,
+ },
+ {
+ .cap_id = B_FRAME_QP_HEVC,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = DEFAULT_QP,
+ .hfi_id = HFI_PROP_QP_PACKED,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_INPUT_PORT |
+ CAP_FLAG_DYNAMIC_ALLOWED,
+ .set = iris_set_frame_qp,
+ },
+ {
+ .cap_id = INPUT_BUF_HOST_MAX_COUNT,
+ .min = DEFAULT_MAX_HOST_BUF_COUNT,
+ .max = DEFAULT_MAX_HOST_BURST_BUF_COUNT,
+ .step_or_mask = 1,
+ .value = DEFAULT_MAX_HOST_BUF_COUNT,
+ .hfi_id = HFI_PROP_BUFFER_HOST_MAX_COUNT,
+ .flags = CAP_FLAG_INPUT_PORT,
+ .set = iris_set_u32,
+ },
+ {
+ .cap_id = OUTPUT_BUF_HOST_MAX_COUNT,
+ .min = DEFAULT_MAX_HOST_BUF_COUNT,
+ .max = DEFAULT_MAX_HOST_BURST_BUF_COUNT,
+ .step_or_mask = 1,
+ .value = DEFAULT_MAX_HOST_BUF_COUNT,
+ .hfi_id = HFI_PROP_BUFFER_HOST_MAX_COUNT,
+ .flags = CAP_FLAG_OUTPUT_PORT,
+ .set = iris_set_u32,
+ },
+};
+
static struct platform_inst_caps platform_inst_cap_sm8550 = {
.min_frame_width = 96,
.max_frame_width = 8192,
@@ -209,6 +600,8 @@ static struct platform_inst_caps platform_inst_cap_sm8550 = {
.mb_cycles_fw = 489583,
.mb_cycles_fw_vpp = 66234,
.num_comv = 0,
+ .max_frame_rate = MAXIMUM_FPS,
+ .max_operating_rate = MAXIMUM_FPS,
};
static void iris_set_sm8550_preset_registers(struct iris_core *core)
@@ -289,11 +682,25 @@ static const u32 sm8550_vdec_input_config_param_vp9[] = {
HFI_PROP_LEVEL,
};
+static const u32 sm8550_venc_input_config_params[] = {
+ HFI_PROP_COLOR_FORMAT,
+ HFI_PROP_RAW_RESOLUTION,
+ HFI_PROP_CROP_OFFSETS,
+ HFI_PROP_LINEAR_STRIDE_SCANLINE,
+ HFI_PROP_SIGNAL_COLOR_INFO,
+};
+
static const u32 sm8550_vdec_output_config_params[] = {
HFI_PROP_COLOR_FORMAT,
HFI_PROP_LINEAR_STRIDE_SCANLINE,
};
+static const u32 sm8550_venc_output_config_params[] = {
+ HFI_PROP_BITSTREAM_RESOLUTION,
+ HFI_PROP_CROP_OFFSETS,
+ HFI_PROP_FRAME_RATE,
+};
+
static const u32 sm8550_vdec_subscribe_input_properties[] = {
HFI_PROP_NO_OUTPUT,
};
@@ -322,10 +729,19 @@ static const u32 sm8550_dec_op_int_buf_tbl[] = {
BUF_DPB,
};
+static const u32 sm8550_enc_op_int_buf_tbl[] = {
+ BUF_BIN,
+ BUF_COMV,
+ BUF_NON_COMV,
+ BUF_LINE,
+ BUF_SCRATCH_2,
+};
+
struct iris_platform_data sm8550_data = {
.get_instance = iris_hfi_gen2_get_instance,
.init_hfi_command_ops = iris_hfi_gen2_command_ops_init,
.init_hfi_response_ops = iris_hfi_gen2_response_ops_init,
+ .get_vpu_buffer_size = iris_vpu_buf_size,
.vpu_ops = &iris_vpu3_ops,
.set_preset_registers = iris_set_sm8550_preset_registers,
.icc_tbl = sm8550_icc_table,
@@ -345,8 +761,10 @@ struct iris_platform_data sm8550_data = {
.fwname = "qcom/vpu/vpu30_p4.mbn",
.pas_id = IRIS_PAS_ID,
.inst_caps = &platform_inst_cap_sm8550,
- .inst_fw_caps = inst_fw_cap_sm8550,
- .inst_fw_caps_size = ARRAY_SIZE(inst_fw_cap_sm8550),
+ .inst_fw_caps_dec = inst_fw_cap_sm8550_dec,
+ .inst_fw_caps_dec_size = ARRAY_SIZE(inst_fw_cap_sm8550_dec),
+ .inst_fw_caps_enc = inst_fw_cap_sm8550_enc,
+ .inst_fw_caps_enc_size = ARRAY_SIZE(inst_fw_cap_sm8550_enc),
.tz_cp_config_data = &tz_cp_config_sm8550,
.core_arch = VIDEO_ARCH_LX,
.hw_response_timeout = HW_RESPONSE_TIMEOUT_VALUE,
@@ -354,22 +772,33 @@ struct iris_platform_data sm8550_data = {
.num_vpp_pipe = 4,
.max_session_count = 16,
.max_core_mbpf = NUM_MBS_8K * 2,
- .input_config_params_default =
+ .max_core_mbps = ((7680 * 4320) / 256) * 60,
+ .dec_input_config_params_default =
sm8550_vdec_input_config_params_default,
- .input_config_params_default_size =
+ .dec_input_config_params_default_size =
ARRAY_SIZE(sm8550_vdec_input_config_params_default),
- .input_config_params_hevc =
+ .dec_input_config_params_hevc =
sm8550_vdec_input_config_param_hevc,
- .input_config_params_hevc_size =
+ .dec_input_config_params_hevc_size =
ARRAY_SIZE(sm8550_vdec_input_config_param_hevc),
- .input_config_params_vp9 =
+ .dec_input_config_params_vp9 =
sm8550_vdec_input_config_param_vp9,
- .input_config_params_vp9_size =
+ .dec_input_config_params_vp9_size =
ARRAY_SIZE(sm8550_vdec_input_config_param_vp9),
- .output_config_params =
+ .dec_output_config_params =
sm8550_vdec_output_config_params,
- .output_config_params_size =
+ .dec_output_config_params_size =
ARRAY_SIZE(sm8550_vdec_output_config_params),
+
+ .enc_input_config_params =
+ sm8550_venc_input_config_params,
+ .enc_input_config_params_size =
+ ARRAY_SIZE(sm8550_venc_input_config_params),
+ .enc_output_config_params =
+ sm8550_venc_output_config_params,
+ .enc_output_config_params_size =
+ ARRAY_SIZE(sm8550_venc_output_config_params),
+
.dec_input_prop = sm8550_vdec_subscribe_input_properties,
.dec_input_prop_size = ARRAY_SIZE(sm8550_vdec_subscribe_input_properties),
.dec_output_prop_avc = sm8550_vdec_subscribe_output_properties_avc,
@@ -386,6 +815,9 @@ struct iris_platform_data sm8550_data = {
.dec_ip_int_buf_tbl_size = ARRAY_SIZE(sm8550_dec_ip_int_buf_tbl),
.dec_op_int_buf_tbl = sm8550_dec_op_int_buf_tbl,
.dec_op_int_buf_tbl_size = ARRAY_SIZE(sm8550_dec_op_int_buf_tbl),
+
+ .enc_op_int_buf_tbl = sm8550_enc_op_int_buf_tbl,
+ .enc_op_int_buf_tbl_size = ARRAY_SIZE(sm8550_enc_op_int_buf_tbl),
};
/*
@@ -399,6 +831,7 @@ struct iris_platform_data sm8650_data = {
.get_instance = iris_hfi_gen2_get_instance,
.init_hfi_command_ops = iris_hfi_gen2_command_ops_init,
.init_hfi_response_ops = iris_hfi_gen2_response_ops_init,
+ .get_vpu_buffer_size = iris_vpu33_buf_size,
.vpu_ops = &iris_vpu33_ops,
.set_preset_registers = iris_set_sm8550_preset_registers,
.icc_tbl = sm8550_icc_table,
@@ -420,8 +853,10 @@ struct iris_platform_data sm8650_data = {
.fwname = "qcom/vpu/vpu33_p4.mbn",
.pas_id = IRIS_PAS_ID,
.inst_caps = &platform_inst_cap_sm8550,
- .inst_fw_caps = inst_fw_cap_sm8550,
- .inst_fw_caps_size = ARRAY_SIZE(inst_fw_cap_sm8550),
+ .inst_fw_caps_dec = inst_fw_cap_sm8550_dec,
+ .inst_fw_caps_dec_size = ARRAY_SIZE(inst_fw_cap_sm8550_dec),
+ .inst_fw_caps_enc = inst_fw_cap_sm8550_enc,
+ .inst_fw_caps_enc_size = ARRAY_SIZE(inst_fw_cap_sm8550_enc),
.tz_cp_config_data = &tz_cp_config_sm8550,
.core_arch = VIDEO_ARCH_LX,
.hw_response_timeout = HW_RESPONSE_TIMEOUT_VALUE,
@@ -429,22 +864,33 @@ struct iris_platform_data sm8650_data = {
.num_vpp_pipe = 4,
.max_session_count = 16,
.max_core_mbpf = NUM_MBS_8K * 2,
- .input_config_params_default =
+ .max_core_mbps = ((7680 * 4320) / 256) * 60,
+ .dec_input_config_params_default =
sm8550_vdec_input_config_params_default,
- .input_config_params_default_size =
+ .dec_input_config_params_default_size =
ARRAY_SIZE(sm8550_vdec_input_config_params_default),
- .input_config_params_hevc =
+ .dec_input_config_params_hevc =
sm8550_vdec_input_config_param_hevc,
- .input_config_params_hevc_size =
+ .dec_input_config_params_hevc_size =
ARRAY_SIZE(sm8550_vdec_input_config_param_hevc),
- .input_config_params_vp9 =
+ .dec_input_config_params_vp9 =
sm8550_vdec_input_config_param_vp9,
- .input_config_params_vp9_size =
+ .dec_input_config_params_vp9_size =
ARRAY_SIZE(sm8550_vdec_input_config_param_vp9),
- .output_config_params =
+ .dec_output_config_params =
sm8550_vdec_output_config_params,
- .output_config_params_size =
+ .dec_output_config_params_size =
ARRAY_SIZE(sm8550_vdec_output_config_params),
+
+ .enc_input_config_params =
+ sm8550_venc_input_config_params,
+ .enc_input_config_params_size =
+ ARRAY_SIZE(sm8550_venc_input_config_params),
+ .enc_output_config_params =
+ sm8550_venc_output_config_params,
+ .enc_output_config_params_size =
+ ARRAY_SIZE(sm8550_venc_output_config_params),
+
.dec_input_prop = sm8550_vdec_subscribe_input_properties,
.dec_input_prop_size = ARRAY_SIZE(sm8550_vdec_subscribe_input_properties),
.dec_output_prop_avc = sm8550_vdec_subscribe_output_properties_avc,
@@ -461,6 +907,90 @@ struct iris_platform_data sm8650_data = {
.dec_ip_int_buf_tbl_size = ARRAY_SIZE(sm8550_dec_ip_int_buf_tbl),
.dec_op_int_buf_tbl = sm8550_dec_op_int_buf_tbl,
.dec_op_int_buf_tbl_size = ARRAY_SIZE(sm8550_dec_op_int_buf_tbl),
+
+ .enc_op_int_buf_tbl = sm8550_enc_op_int_buf_tbl,
+ .enc_op_int_buf_tbl_size = ARRAY_SIZE(sm8550_enc_op_int_buf_tbl),
+};
+
+struct iris_platform_data sm8750_data = {
+ .get_instance = iris_hfi_gen2_get_instance,
+ .init_hfi_command_ops = iris_hfi_gen2_command_ops_init,
+ .init_hfi_response_ops = iris_hfi_gen2_response_ops_init,
+ .vpu_ops = &iris_vpu35_ops,
+ .set_preset_registers = iris_set_sm8550_preset_registers,
+ .icc_tbl = sm8550_icc_table,
+ .icc_tbl_size = ARRAY_SIZE(sm8550_icc_table),
+ .clk_rst_tbl = sm8750_clk_reset_table,
+ .clk_rst_tbl_size = ARRAY_SIZE(sm8750_clk_reset_table),
+ .bw_tbl_dec = sm8550_bw_table_dec,
+ .bw_tbl_dec_size = ARRAY_SIZE(sm8550_bw_table_dec),
+ .pmdomain_tbl = sm8550_pmdomain_table,
+ .pmdomain_tbl_size = ARRAY_SIZE(sm8550_pmdomain_table),
+ .opp_pd_tbl = sm8550_opp_pd_table,
+ .opp_pd_tbl_size = ARRAY_SIZE(sm8550_opp_pd_table),
+ .clk_tbl = sm8750_clk_table,
+ .clk_tbl_size = ARRAY_SIZE(sm8750_clk_table),
+ /* Upper bound of DMA address range */
+ .dma_mask = 0xe0000000 - 1,
+ .fwname = "qcom/vpu/vpu35_p4.mbn",
+ .pas_id = IRIS_PAS_ID,
+ .inst_caps = &platform_inst_cap_sm8550,
+ .inst_fw_caps_dec = inst_fw_cap_sm8550_dec,
+ .inst_fw_caps_dec_size = ARRAY_SIZE(inst_fw_cap_sm8550_dec),
+ .inst_fw_caps_enc = inst_fw_cap_sm8550_enc,
+ .inst_fw_caps_enc_size = ARRAY_SIZE(inst_fw_cap_sm8550_enc),
+ .tz_cp_config_data = &tz_cp_config_sm8550,
+ .core_arch = VIDEO_ARCH_LX,
+ .hw_response_timeout = HW_RESPONSE_TIMEOUT_VALUE,
+ .ubwc_config = &ubwc_config_sm8550,
+ .num_vpp_pipe = 4,
+ .max_session_count = 16,
+ .max_core_mbpf = NUM_MBS_8K * 2,
+ .dec_input_config_params_default =
+ sm8550_vdec_input_config_params_default,
+ .dec_input_config_params_default_size =
+ ARRAY_SIZE(sm8550_vdec_input_config_params_default),
+ .dec_input_config_params_hevc =
+ sm8550_vdec_input_config_param_hevc,
+ .dec_input_config_params_hevc_size =
+ ARRAY_SIZE(sm8550_vdec_input_config_param_hevc),
+ .dec_input_config_params_vp9 =
+ sm8550_vdec_input_config_param_vp9,
+ .dec_input_config_params_vp9_size =
+ ARRAY_SIZE(sm8550_vdec_input_config_param_vp9),
+ .dec_output_config_params =
+ sm8550_vdec_output_config_params,
+ .dec_output_config_params_size =
+ ARRAY_SIZE(sm8550_vdec_output_config_params),
+
+ .enc_input_config_params =
+ sm8550_venc_input_config_params,
+ .enc_input_config_params_size =
+ ARRAY_SIZE(sm8550_venc_input_config_params),
+ .enc_output_config_params =
+ sm8550_venc_output_config_params,
+ .enc_output_config_params_size =
+ ARRAY_SIZE(sm8550_venc_output_config_params),
+
+ .dec_input_prop = sm8550_vdec_subscribe_input_properties,
+ .dec_input_prop_size = ARRAY_SIZE(sm8550_vdec_subscribe_input_properties),
+ .dec_output_prop_avc = sm8550_vdec_subscribe_output_properties_avc,
+ .dec_output_prop_avc_size =
+ ARRAY_SIZE(sm8550_vdec_subscribe_output_properties_avc),
+ .dec_output_prop_hevc = sm8550_vdec_subscribe_output_properties_hevc,
+ .dec_output_prop_hevc_size =
+ ARRAY_SIZE(sm8550_vdec_subscribe_output_properties_hevc),
+ .dec_output_prop_vp9 = sm8550_vdec_subscribe_output_properties_vp9,
+ .dec_output_prop_vp9_size =
+ ARRAY_SIZE(sm8550_vdec_subscribe_output_properties_vp9),
+
+ .dec_ip_int_buf_tbl = sm8550_dec_ip_int_buf_tbl,
+ .dec_ip_int_buf_tbl_size = ARRAY_SIZE(sm8550_dec_ip_int_buf_tbl),
+ .dec_op_int_buf_tbl = sm8550_dec_op_int_buf_tbl,
+ .dec_op_int_buf_tbl_size = ARRAY_SIZE(sm8550_dec_op_int_buf_tbl),
+
+ .enc_op_int_buf_tbl = sm8550_enc_op_int_buf_tbl,
+ .enc_op_int_buf_tbl_size = ARRAY_SIZE(sm8550_enc_op_int_buf_tbl),
};
/*
@@ -472,6 +1002,7 @@ struct iris_platform_data qcs8300_data = {
.get_instance = iris_hfi_gen2_get_instance,
.init_hfi_command_ops = iris_hfi_gen2_command_ops_init,
.init_hfi_response_ops = iris_hfi_gen2_response_ops_init,
+ .get_vpu_buffer_size = iris_vpu_buf_size,
.vpu_ops = &iris_vpu3_ops,
.set_preset_registers = iris_set_sm8550_preset_registers,
.icc_tbl = sm8550_icc_table,
@@ -491,8 +1022,10 @@ struct iris_platform_data qcs8300_data = {
.fwname = "qcom/vpu/vpu30_p4_s6.mbn",
.pas_id = IRIS_PAS_ID,
.inst_caps = &platform_inst_cap_qcs8300,
- .inst_fw_caps = inst_fw_cap_qcs8300,
- .inst_fw_caps_size = ARRAY_SIZE(inst_fw_cap_qcs8300),
+ .inst_fw_caps_dec = inst_fw_cap_qcs8300_dec,
+ .inst_fw_caps_dec_size = ARRAY_SIZE(inst_fw_cap_qcs8300_dec),
+ .inst_fw_caps_enc = inst_fw_cap_qcs8300_enc,
+ .inst_fw_caps_enc_size = ARRAY_SIZE(inst_fw_cap_qcs8300_enc),
.tz_cp_config_data = &tz_cp_config_sm8550,
.core_arch = VIDEO_ARCH_LX,
.hw_response_timeout = HW_RESPONSE_TIMEOUT_VALUE,
@@ -500,22 +1033,33 @@ struct iris_platform_data qcs8300_data = {
.num_vpp_pipe = 2,
.max_session_count = 16,
.max_core_mbpf = ((4096 * 2176) / 256) * 4,
- .input_config_params_default =
+ .max_core_mbps = (((3840 * 2176) / 256) * 120),
+ .dec_input_config_params_default =
sm8550_vdec_input_config_params_default,
- .input_config_params_default_size =
+ .dec_input_config_params_default_size =
ARRAY_SIZE(sm8550_vdec_input_config_params_default),
- .input_config_params_hevc =
+ .dec_input_config_params_hevc =
sm8550_vdec_input_config_param_hevc,
- .input_config_params_hevc_size =
+ .dec_input_config_params_hevc_size =
ARRAY_SIZE(sm8550_vdec_input_config_param_hevc),
- .input_config_params_vp9 =
+ .dec_input_config_params_vp9 =
sm8550_vdec_input_config_param_vp9,
- .input_config_params_vp9_size =
+ .dec_input_config_params_vp9_size =
ARRAY_SIZE(sm8550_vdec_input_config_param_vp9),
- .output_config_params =
+ .dec_output_config_params =
sm8550_vdec_output_config_params,
- .output_config_params_size =
+ .dec_output_config_params_size =
ARRAY_SIZE(sm8550_vdec_output_config_params),
+
+ .enc_input_config_params =
+ sm8550_venc_input_config_params,
+ .enc_input_config_params_size =
+ ARRAY_SIZE(sm8550_venc_input_config_params),
+ .enc_output_config_params =
+ sm8550_venc_output_config_params,
+ .enc_output_config_params_size =
+ ARRAY_SIZE(sm8550_venc_output_config_params),
+
.dec_input_prop = sm8550_vdec_subscribe_input_properties,
.dec_input_prop_size = ARRAY_SIZE(sm8550_vdec_subscribe_input_properties),
.dec_output_prop_avc = sm8550_vdec_subscribe_output_properties_avc,
@@ -532,4 +1076,7 @@ struct iris_platform_data qcs8300_data = {
.dec_ip_int_buf_tbl_size = ARRAY_SIZE(sm8550_dec_ip_int_buf_tbl),
.dec_op_int_buf_tbl = sm8550_dec_op_int_buf_tbl,
.dec_op_int_buf_tbl_size = ARRAY_SIZE(sm8550_dec_op_int_buf_tbl),
+
+ .enc_op_int_buf_tbl = sm8550_enc_op_int_buf_tbl,
+ .enc_op_int_buf_tbl_size = ARRAY_SIZE(sm8550_enc_op_int_buf_tbl),
};
diff --git a/drivers/media/platform/qcom/iris/iris_platform_qcs8300.h b/drivers/media/platform/qcom/iris/iris_platform_qcs8300.h
index a8d66ed388a3..35ea0efade73 100644
--- a/drivers/media/platform/qcom/iris/iris_platform_qcs8300.h
+++ b/drivers/media/platform/qcom/iris/iris_platform_qcs8300.h
@@ -3,7 +3,9 @@
* Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
-static struct platform_inst_fw_cap inst_fw_cap_qcs8300[] = {
+#define BITRATE_MAX 245000000
+
+static struct platform_inst_fw_cap inst_fw_cap_qcs8300_dec[] = {
{
.cap_id = PROFILE_H264,
.min = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE,
@@ -187,6 +189,352 @@ static struct platform_inst_fw_cap inst_fw_cap_qcs8300[] = {
},
};
+static struct platform_inst_fw_cap inst_fw_cap_qcs8300_enc[] = {
+ {
+ .cap_id = PROFILE_H264,
+ .min = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE,
+ .max = V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_HIGH,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE) |
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_HIGH) |
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE) |
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_MAIN) |
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_HIGH),
+ .value = V4L2_MPEG_VIDEO_H264_PROFILE_HIGH,
+ .hfi_id = HFI_PROP_PROFILE,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ },
+ {
+ .cap_id = PROFILE_HEVC,
+ .min = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN,
+ .max = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_STILL_PICTURE) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10),
+ .value = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN,
+ .hfi_id = HFI_PROP_PROFILE,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ },
+ {
+ .cap_id = LEVEL_H264,
+ .min = V4L2_MPEG_VIDEO_H264_LEVEL_1_0,
+ .max = V4L2_MPEG_VIDEO_H264_LEVEL_6_0,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_0) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1B) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_1) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_2) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_3) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_2_0) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_2_1) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_2_2) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_3_0) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_3_1) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_3_2) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_4_0) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_4_1) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_4_2) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_5_0) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_5_1) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_5_2) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_6_0),
+ .value = V4L2_MPEG_VIDEO_H264_LEVEL_5_0,
+ .hfi_id = HFI_PROP_LEVEL,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ },
+ {
+ .cap_id = LEVEL_HEVC,
+ .min = V4L2_MPEG_VIDEO_HEVC_LEVEL_1,
+ .max = V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_1) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_2) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_2_1) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_3) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_3_1) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_4) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_4_1) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_5) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_5_2) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_6) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_6_1) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2),
+ .value = V4L2_MPEG_VIDEO_HEVC_LEVEL_5,
+ .hfi_id = HFI_PROP_LEVEL,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ },
+ {
+ .cap_id = STAGE,
+ .min = STAGE_1,
+ .max = STAGE_2,
+ .step_or_mask = 1,
+ .value = STAGE_2,
+ .hfi_id = HFI_PROP_STAGE,
+ },
+ {
+ .cap_id = HEADER_MODE,
+ .min = V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE,
+ .max = V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE) |
+ BIT(V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME),
+ .value = V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME,
+ .hfi_id = HFI_PROP_SEQ_HEADER_MODE,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ },
+ {
+ .cap_id = PREPEND_SPSPPS_TO_IDR,
+ .min = 0,
+ .max = 1,
+ .step_or_mask = 1,
+ .value = 0,
+ },
+ {
+ .cap_id = BITRATE,
+ .min = 1,
+ .max = BITRATE_MAX,
+ .step_or_mask = 1,
+ .value = BITRATE_DEFAULT,
+ .hfi_id = HFI_PROP_TOTAL_BITRATE,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_INPUT_PORT |
+ CAP_FLAG_DYNAMIC_ALLOWED,
+ },
+ {
+ .cap_id = BITRATE_PEAK,
+ .min = 1,
+ .max = BITRATE_MAX,
+ .step_or_mask = 1,
+ .value = BITRATE_DEFAULT,
+ .hfi_id = HFI_PROP_TOTAL_PEAK_BITRATE,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_INPUT_PORT |
+ CAP_FLAG_DYNAMIC_ALLOWED,
+ },
+ {
+ .cap_id = BITRATE_MODE,
+ .min = V4L2_MPEG_VIDEO_BITRATE_MODE_VBR,
+ .max = V4L2_MPEG_VIDEO_BITRATE_MODE_CBR,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_BITRATE_MODE_VBR) |
+ BIT(V4L2_MPEG_VIDEO_BITRATE_MODE_CBR),
+ .value = V4L2_MPEG_VIDEO_BITRATE_MODE_VBR,
+ .hfi_id = HFI_PROP_RATE_CONTROL,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ },
+ {
+ .cap_id = FRAME_SKIP_MODE,
+ .min = V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_DISABLED,
+ .max = V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_DISABLED) |
+ BIT(V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_LEVEL_LIMIT) |
+ BIT(V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT),
+ .value = V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_DISABLED,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ },
+ {
+ .cap_id = FRAME_RC_ENABLE,
+ .min = 0,
+ .max = 1,
+ .step_or_mask = 1,
+ .value = 1,
+ },
+ {
+ .cap_id = GOP_SIZE,
+ .min = 0,
+ .max = INT_MAX,
+ .step_or_mask = 1,
+ .value = 2 * DEFAULT_FPS - 1,
+ .hfi_id = HFI_PROP_MAX_GOP_FRAMES,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_INPUT_PORT |
+ CAP_FLAG_DYNAMIC_ALLOWED,
+ },
+ {
+ .cap_id = ENTROPY_MODE,
+ .min = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC,
+ .max = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC) |
+ BIT(V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC),
+ .value = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC,
+ .hfi_id = HFI_PROP_CABAC_SESSION,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ },
+ {
+ .cap_id = MIN_FRAME_QP_H264,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MIN_QP_8BIT,
+ .hfi_id = HFI_PROP_MIN_QP_PACKED,
+ .flags = CAP_FLAG_OUTPUT_PORT,
+ },
+ {
+ .cap_id = MIN_FRAME_QP_HEVC,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MIN_QP_8BIT,
+ .hfi_id = HFI_PROP_MIN_QP_PACKED,
+ .flags = CAP_FLAG_OUTPUT_PORT,
+ },
+ {
+ .cap_id = MAX_FRAME_QP_H264,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MAX_QP,
+ .hfi_id = HFI_PROP_MAX_QP_PACKED,
+ .flags = CAP_FLAG_OUTPUT_PORT,
+ },
+ {
+ .cap_id = MAX_FRAME_QP_HEVC,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MAX_QP,
+ .hfi_id = HFI_PROP_MAX_QP_PACKED,
+ .flags = CAP_FLAG_OUTPUT_PORT,
+ },
+ {
+ .cap_id = I_FRAME_MIN_QP_H264,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MIN_QP_8BIT,
+ },
+ {
+ .cap_id = I_FRAME_MIN_QP_HEVC,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MIN_QP_8BIT,
+ },
+ {
+ .cap_id = P_FRAME_MIN_QP_H264,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MIN_QP_8BIT,
+ },
+ {
+ .cap_id = P_FRAME_MIN_QP_HEVC,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MIN_QP_8BIT,
+ },
+ {
+ .cap_id = B_FRAME_MIN_QP_H264,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MIN_QP_8BIT,
+ },
+ {
+ .cap_id = B_FRAME_MIN_QP_HEVC,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MIN_QP_8BIT,
+ },
+ {
+ .cap_id = I_FRAME_MAX_QP_H264,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MAX_QP,
+ },
+ {
+ .cap_id = I_FRAME_MAX_QP_HEVC,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MAX_QP,
+ },
+ {
+ .cap_id = P_FRAME_MAX_QP_H264,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MAX_QP,
+ },
+ {
+ .cap_id = P_FRAME_MAX_QP_HEVC,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MAX_QP,
+ },
+ {
+ .cap_id = B_FRAME_MAX_QP_H264,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MAX_QP,
+ },
+ {
+ .cap_id = B_FRAME_MAX_QP_HEVC,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MAX_QP,
+ },
+ {
+ .cap_id = I_FRAME_QP_H264,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = DEFAULT_QP,
+ .hfi_id = HFI_PROP_QP_PACKED,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_INPUT_PORT |
+ CAP_FLAG_DYNAMIC_ALLOWED,
+ },
+ {
+ .cap_id = I_FRAME_QP_HEVC,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = DEFAULT_QP,
+ .hfi_id = HFI_PROP_QP_PACKED,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_INPUT_PORT |
+ CAP_FLAG_DYNAMIC_ALLOWED,
+ },
+ {
+ .cap_id = P_FRAME_QP_H264,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = DEFAULT_QP,
+ .hfi_id = HFI_PROP_QP_PACKED,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_INPUT_PORT |
+ CAP_FLAG_DYNAMIC_ALLOWED,
+ },
+ {
+ .cap_id = P_FRAME_QP_HEVC,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = DEFAULT_QP,
+ .hfi_id = HFI_PROP_QP_PACKED,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_INPUT_PORT |
+ CAP_FLAG_DYNAMIC_ALLOWED,
+ },
+ {
+ .cap_id = B_FRAME_QP_H264,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = DEFAULT_QP,
+ .hfi_id = HFI_PROP_QP_PACKED,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_INPUT_PORT |
+ CAP_FLAG_DYNAMIC_ALLOWED,
+ },
+ {
+ .cap_id = B_FRAME_QP_HEVC,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = DEFAULT_QP,
+ .hfi_id = HFI_PROP_QP_PACKED,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_INPUT_PORT |
+ CAP_FLAG_DYNAMIC_ALLOWED,
+ },
+};
+
static struct platform_inst_caps platform_inst_cap_qcs8300 = {
.min_frame_width = 96,
.max_frame_width = 4096,
@@ -197,4 +545,6 @@ static struct platform_inst_caps platform_inst_cap_qcs8300 = {
.mb_cycles_fw = 326389,
.mb_cycles_fw_vpp = 44156,
.num_comv = 0,
+ .max_frame_rate = MAXIMUM_FPS,
+ .max_operating_rate = MAXIMUM_FPS,
};
diff --git a/drivers/media/platform/qcom/iris/iris_platform_sm8250.c b/drivers/media/platform/qcom/iris/iris_platform_sm8250.c
index 8d0816a67ae0..16486284f8ac 100644
--- a/drivers/media/platform/qcom/iris/iris_platform_sm8250.c
+++ b/drivers/media/platform/qcom/iris/iris_platform_sm8250.c
@@ -9,9 +9,15 @@
#include "iris_resources.h"
#include "iris_hfi_gen1.h"
#include "iris_hfi_gen1_defines.h"
+#include "iris_vpu_buffer.h"
#include "iris_vpu_common.h"
-static struct platform_inst_fw_cap inst_fw_cap_sm8250[] = {
+#define BITRATE_MIN 32000
+#define BITRATE_MAX 160000000
+#define BITRATE_PEAK_DEFAULT (BITRATE_DEFAULT * 2)
+#define BITRATE_STEP 100
+
+static struct platform_inst_fw_cap inst_fw_cap_sm8250_dec[] = {
{
.cap_id = PIPE,
.min = PIPE_1,
@@ -32,6 +38,200 @@ static struct platform_inst_fw_cap inst_fw_cap_sm8250[] = {
},
};
+static struct platform_inst_fw_cap inst_fw_cap_sm8250_enc[] = {
+ {
+ .cap_id = STAGE,
+ .min = STAGE_1,
+ .max = STAGE_2,
+ .step_or_mask = 1,
+ .value = STAGE_2,
+ .hfi_id = HFI_PROPERTY_PARAM_WORK_MODE,
+ .set = iris_set_stage,
+ },
+ {
+ .cap_id = PROFILE_H264,
+ .min = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE,
+ .max = V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE) |
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE) |
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_MAIN) |
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_HIGH) |
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_STEREO_HIGH) |
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH),
+ .value = V4L2_MPEG_VIDEO_H264_PROFILE_HIGH,
+ .hfi_id = HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ .set = iris_set_profile_level_gen1,
+ },
+ {
+ .cap_id = PROFILE_HEVC,
+ .min = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN,
+ .max = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_STILL_PICTURE) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10),
+ .value = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN,
+ .hfi_id = HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ .set = iris_set_profile_level_gen1,
+ },
+ {
+ .cap_id = LEVEL_H264,
+ .min = V4L2_MPEG_VIDEO_H264_LEVEL_1_0,
+ .max = V4L2_MPEG_VIDEO_H264_LEVEL_5_1,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_0) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1B) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_1) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_2) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_1_3) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_2_0) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_2_1) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_2_2) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_3_0) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_3_1) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_3_2) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_4_0) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_4_1) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_4_2) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_5_0) |
+ BIT(V4L2_MPEG_VIDEO_H264_LEVEL_5_1),
+ .value = V4L2_MPEG_VIDEO_H264_LEVEL_1_0,
+ .hfi_id = HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ .set = iris_set_profile_level_gen1,
+ },
+ {
+ .cap_id = LEVEL_HEVC,
+ .min = V4L2_MPEG_VIDEO_HEVC_LEVEL_1,
+ .max = V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_1) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_2) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_2_1) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_3) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_3_1) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_4) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_4_1) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_5) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_5_2) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_6) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_6_1) |
+ BIT(V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2),
+ .value = V4L2_MPEG_VIDEO_HEVC_LEVEL_1,
+ .hfi_id = HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ .set = iris_set_profile_level_gen1,
+ },
+ {
+ .cap_id = HEADER_MODE,
+ .min = V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE,
+ .max = V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE) |
+ BIT(V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME),
+ .value = V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME,
+ .hfi_id = HFI_PROPERTY_CONFIG_VENC_SYNC_FRAME_SEQUENCE_HEADER,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ .set = iris_set_header_mode_gen1,
+ },
+ {
+ .cap_id = BITRATE,
+ .min = BITRATE_MIN,
+ .max = BITRATE_MAX,
+ .step_or_mask = BITRATE_STEP,
+ .value = BITRATE_DEFAULT,
+ .hfi_id = HFI_PROPERTY_CONFIG_VENC_TARGET_BITRATE,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_INPUT_PORT |
+ CAP_FLAG_DYNAMIC_ALLOWED,
+ .set = iris_set_bitrate,
+ },
+ {
+ .cap_id = BITRATE_MODE,
+ .min = V4L2_MPEG_VIDEO_BITRATE_MODE_VBR,
+ .max = V4L2_MPEG_VIDEO_BITRATE_MODE_CBR,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_BITRATE_MODE_VBR) |
+ BIT(V4L2_MPEG_VIDEO_BITRATE_MODE_CBR),
+ .value = V4L2_MPEG_VIDEO_BITRATE_MODE_VBR,
+ .hfi_id = HFI_PROPERTY_PARAM_VENC_RATE_CONTROL,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ .set = iris_set_bitrate_mode_gen1,
+ },
+ {
+ .cap_id = FRAME_SKIP_MODE,
+ .min = V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_DISABLED,
+ .max = V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_DISABLED) |
+ BIT(V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT),
+ .value = V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_DISABLED,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ },
+ {
+ .cap_id = FRAME_RC_ENABLE,
+ .min = 0,
+ .max = 1,
+ .step_or_mask = 1,
+ .value = 1,
+ },
+ {
+ .cap_id = GOP_SIZE,
+ .min = 0,
+ .max = (1 << 16) - 1,
+ .step_or_mask = 1,
+ .value = 30,
+ .set = iris_set_u32
+ },
+ {
+ .cap_id = ENTROPY_MODE,
+ .min = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC,
+ .max = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC,
+ .step_or_mask = BIT(V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC) |
+ BIT(V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC),
+ .value = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC,
+ .hfi_id = HFI_PROPERTY_PARAM_VENC_H264_ENTROPY_CONTROL,
+ .flags = CAP_FLAG_OUTPUT_PORT | CAP_FLAG_MENU,
+ .set = iris_set_entropy_mode_gen1,
+ },
+ {
+ .cap_id = MIN_FRAME_QP_H264,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MIN_QP_8BIT,
+ .hfi_id = HFI_PROPERTY_PARAM_VENC_SESSION_QP_RANGE_V2,
+ .flags = CAP_FLAG_OUTPUT_PORT,
+ .set = iris_set_qp_range,
+ },
+ {
+ .cap_id = MIN_FRAME_QP_HEVC,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP_HEVC,
+ .step_or_mask = 1,
+ .value = MIN_QP_8BIT,
+ .hfi_id = HFI_PROPERTY_PARAM_VENC_SESSION_QP_RANGE_V2,
+ .flags = CAP_FLAG_OUTPUT_PORT,
+ .set = iris_set_qp_range,
+ },
+ {
+ .cap_id = MAX_FRAME_QP_H264,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP,
+ .step_or_mask = 1,
+ .value = MAX_QP,
+ .hfi_id = HFI_PROPERTY_PARAM_VENC_SESSION_QP_RANGE_V2,
+ .flags = CAP_FLAG_OUTPUT_PORT,
+ .set = iris_set_qp_range,
+ },
+ {
+ .cap_id = MAX_FRAME_QP_HEVC,
+ .min = MIN_QP_8BIT,
+ .max = MAX_QP_HEVC,
+ .step_or_mask = 1,
+ .value = MAX_QP_HEVC,
+ .hfi_id = HFI_PROPERTY_PARAM_VENC_SESSION_QP_RANGE_V2,
+ .flags = CAP_FLAG_OUTPUT_PORT,
+ .set = iris_set_qp_range,
+ },
+};
+
static struct platform_inst_caps platform_inst_cap_sm8250 = {
.min_frame_width = 128,
.max_frame_width = 8192,
@@ -40,6 +240,8 @@ static struct platform_inst_caps platform_inst_cap_sm8250 = {
.max_mbpf = 138240,
.mb_cycles_vsp = 25,
.mb_cycles_vpp = 200,
+ .max_frame_rate = MAXIMUM_FPS,
+ .max_operating_rate = MAXIMUM_FPS,
};
static void iris_set_sm8250_preset_registers(struct iris_core *core)
@@ -89,6 +291,14 @@ static const u32 sm8250_vdec_input_config_param_default[] = {
HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE,
};
+static const u32 sm8250_venc_input_config_param[] = {
+ HFI_PROPERTY_CONFIG_FRAME_RATE,
+ HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_INFO,
+ HFI_PROPERTY_PARAM_FRAME_SIZE,
+ HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT,
+ HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL,
+};
+
static const u32 sm8250_dec_ip_int_buf_tbl[] = {
BUF_BIN,
BUF_SCRATCH_1,
@@ -98,10 +308,17 @@ static const u32 sm8250_dec_op_int_buf_tbl[] = {
BUF_DPB,
};
+static const u32 sm8250_enc_ip_int_buf_tbl[] = {
+ BUF_BIN,
+ BUF_SCRATCH_1,
+ BUF_SCRATCH_2,
+};
+
struct iris_platform_data sm8250_data = {
.get_instance = iris_hfi_gen1_get_instance,
.init_hfi_command_ops = &iris_hfi_gen1_command_ops_init,
.init_hfi_response_ops = iris_hfi_gen1_response_ops_init,
+ .get_vpu_buffer_size = iris_vpu_buf_size,
.vpu_ops = &iris_vpu2_ops,
.set_preset_registers = iris_set_sm8250_preset_registers,
.icc_tbl = sm8250_icc_table,
@@ -121,20 +338,29 @@ struct iris_platform_data sm8250_data = {
.fwname = "qcom/vpu-1.0/venus.mbn",
.pas_id = IRIS_PAS_ID,
.inst_caps = &platform_inst_cap_sm8250,
- .inst_fw_caps = inst_fw_cap_sm8250,
- .inst_fw_caps_size = ARRAY_SIZE(inst_fw_cap_sm8250),
+ .inst_fw_caps_dec = inst_fw_cap_sm8250_dec,
+ .inst_fw_caps_dec_size = ARRAY_SIZE(inst_fw_cap_sm8250_dec),
+ .inst_fw_caps_enc = inst_fw_cap_sm8250_enc,
+ .inst_fw_caps_enc_size = ARRAY_SIZE(inst_fw_cap_sm8250_enc),
.tz_cp_config_data = &tz_cp_config_sm8250,
.hw_response_timeout = HW_RESPONSE_TIMEOUT_VALUE,
.num_vpp_pipe = 4,
.max_session_count = 16,
.max_core_mbpf = NUM_MBS_8K,
- .input_config_params_default =
+ .max_core_mbps = ((7680 * 4320) / 256) * 60,
+ .dec_input_config_params_default =
sm8250_vdec_input_config_param_default,
- .input_config_params_default_size =
+ .dec_input_config_params_default_size =
ARRAY_SIZE(sm8250_vdec_input_config_param_default),
+ .enc_input_config_params = sm8250_venc_input_config_param,
+ .enc_input_config_params_size =
+ ARRAY_SIZE(sm8250_venc_input_config_param),
.dec_ip_int_buf_tbl = sm8250_dec_ip_int_buf_tbl,
.dec_ip_int_buf_tbl_size = ARRAY_SIZE(sm8250_dec_ip_int_buf_tbl),
.dec_op_int_buf_tbl = sm8250_dec_op_int_buf_tbl,
.dec_op_int_buf_tbl_size = ARRAY_SIZE(sm8250_dec_op_int_buf_tbl),
+
+ .enc_ip_int_buf_tbl = sm8250_enc_ip_int_buf_tbl,
+ .enc_ip_int_buf_tbl_size = ARRAY_SIZE(sm8250_enc_ip_int_buf_tbl),
};
diff --git a/drivers/media/platform/qcom/iris/iris_platform_sm8750.h b/drivers/media/platform/qcom/iris/iris_platform_sm8750.h
new file mode 100644
index 000000000000..719056656a5b
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_platform_sm8750.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2025 Linaro Ltd
+ */
+
+#ifndef __MEDIA_IRIS_PLATFORM_SM8750_H__
+#define __MEDIA_IRIS_PLATFORM_SM8750_H__
+
+static const char * const sm8750_clk_reset_table[] = {
+ "bus0", "bus1", "core", "vcodec0_core"
+};
+
+static const struct platform_clk_data sm8750_clk_table[] = {
+ {IRIS_AXI_CLK, "iface" },
+ {IRIS_CTRL_CLK, "core" },
+ {IRIS_HW_CLK, "vcodec0_core" },
+ {IRIS_AXI1_CLK, "iface1" },
+ {IRIS_CTRL_FREERUN_CLK, "core_freerun" },
+ {IRIS_HW_FREERUN_CLK, "vcodec0_core_freerun" },
+};
+
+#endif
diff --git a/drivers/media/platform/qcom/iris/iris_probe.c b/drivers/media/platform/qcom/iris/iris_probe.c
index 4e6e92357968..00e99be16e08 100644
--- a/drivers/media/platform/qcom/iris/iris_probe.c
+++ b/drivers/media/platform/qcom/iris/iris_probe.c
@@ -146,7 +146,7 @@ static int iris_init_resources(struct iris_core *core)
return iris_init_resets(core);
}
-static int iris_register_video_device(struct iris_core *core)
+static int iris_register_video_device(struct iris_core *core, enum domain_type type)
{
struct video_device *vdev;
int ret;
@@ -155,19 +155,29 @@ static int iris_register_video_device(struct iris_core *core)
if (!vdev)
return -ENOMEM;
- strscpy(vdev->name, "qcom-iris-decoder", sizeof(vdev->name));
vdev->release = video_device_release;
vdev->fops = core->iris_v4l2_file_ops;
- vdev->ioctl_ops = core->iris_v4l2_ioctl_ops;
vdev->vfl_dir = VFL_DIR_M2M;
vdev->v4l2_dev = &core->v4l2_dev;
vdev->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
+ if (type == DECODER) {
+ strscpy(vdev->name, "qcom-iris-decoder", sizeof(vdev->name));
+ vdev->ioctl_ops = core->iris_v4l2_ioctl_ops_dec;
+ core->vdev_dec = vdev;
+ } else if (type == ENCODER) {
+ strscpy(vdev->name, "qcom-iris-encoder", sizeof(vdev->name));
+ vdev->ioctl_ops = core->iris_v4l2_ioctl_ops_enc;
+ core->vdev_enc = vdev;
+ } else {
+ ret = -EINVAL;
+ goto err_vdev_release;
+ }
+
ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
if (ret)
goto err_vdev_release;
- core->vdev_dec = vdev;
video_set_drvdata(vdev, core);
return 0;
@@ -189,6 +199,7 @@ static void iris_remove(struct platform_device *pdev)
iris_core_deinit(core);
video_unregister_device(core->vdev_dec);
+ video_unregister_device(core->vdev_enc);
v4l2_device_unregister(&core->v4l2_dev);
@@ -258,17 +269,21 @@ static int iris_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = iris_register_video_device(core);
+ ret = iris_register_video_device(core, DECODER);
if (ret)
goto err_v4l2_unreg;
+ ret = iris_register_video_device(core, ENCODER);
+ if (ret)
+ goto err_vdev_unreg_dec;
+
platform_set_drvdata(pdev, core);
dma_mask = core->iris_platform_data->dma_mask;
ret = dma_set_mask_and_coherent(dev, dma_mask);
if (ret)
- goto err_vdev_unreg;
+ goto err_vdev_unreg_enc;
dma_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32));
dma_set_seg_boundary(&pdev->dev, DMA_BIT_MASK(32));
@@ -277,11 +292,13 @@ static int iris_probe(struct platform_device *pdev)
pm_runtime_use_autosuspend(core->dev);
ret = devm_pm_runtime_enable(core->dev);
if (ret)
- goto err_vdev_unreg;
+ goto err_vdev_unreg_enc;
return 0;
-err_vdev_unreg:
+err_vdev_unreg_enc:
+ video_unregister_device(core->vdev_enc);
+err_vdev_unreg_dec:
video_unregister_device(core->vdev_dec);
err_v4l2_unreg:
v4l2_device_unregister(&core->v4l2_dev);
@@ -353,6 +370,10 @@ static const struct of_device_id iris_dt_match[] = {
.compatible = "qcom,sm8650-iris",
.data = &sm8650_data,
},
+ {
+ .compatible = "qcom,sm8750-iris",
+ .data = &sm8750_data,
+ },
{ },
};
MODULE_DEVICE_TABLE(of, iris_dt_match);
diff --git a/drivers/media/platform/qcom/iris/iris_state.c b/drivers/media/platform/qcom/iris/iris_state.c
index 104e1687ad39..d14472414750 100644
--- a/drivers/media/platform/qcom/iris/iris_state.c
+++ b/drivers/media/platform/qcom/iris/iris_state.c
@@ -122,7 +122,8 @@ static bool iris_inst_allow_sub_state(struct iris_inst *inst, enum iris_inst_sub
return false;
case IRIS_INST_OUTPUT_STREAMING:
if (sub_state & (IRIS_INST_SUB_DRC_LAST |
- IRIS_INST_SUB_DRAIN_LAST | IRIS_INST_SUB_OUTPUT_PAUSE))
+ IRIS_INST_SUB_DRAIN_LAST | IRIS_INST_SUB_OUTPUT_PAUSE |
+ IRIS_INST_SUB_LOAD_RESOURCES))
return true;
return false;
case IRIS_INST_STREAMING:
@@ -251,7 +252,7 @@ bool iris_drc_pending(struct iris_inst *inst)
inst->sub_state & IRIS_INST_SUB_DRC_LAST;
}
-static inline bool iris_drain_pending(struct iris_inst *inst)
+bool iris_drain_pending(struct iris_inst *inst)
{
return inst->sub_state & IRIS_INST_SUB_DRAIN &&
inst->sub_state & IRIS_INST_SUB_DRAIN_LAST;
@@ -262,11 +263,11 @@ bool iris_allow_cmd(struct iris_inst *inst, u32 cmd)
struct vb2_queue *src_q = v4l2_m2m_get_src_vq(inst->m2m_ctx);
struct vb2_queue *dst_q = v4l2_m2m_get_dst_vq(inst->m2m_ctx);
- if (cmd == V4L2_DEC_CMD_START) {
+ if (cmd == V4L2_DEC_CMD_START || cmd == V4L2_ENC_CMD_START) {
if (vb2_is_streaming(src_q) || vb2_is_streaming(dst_q))
if (iris_drc_pending(inst) || iris_drain_pending(inst))
return true;
- } else if (cmd == V4L2_DEC_CMD_STOP) {
+ } else if (cmd == V4L2_DEC_CMD_STOP || cmd == V4L2_ENC_CMD_STOP) {
if (vb2_is_streaming(src_q))
if (inst->sub_state != IRIS_INST_SUB_DRAIN)
return true;
diff --git a/drivers/media/platform/qcom/iris/iris_state.h b/drivers/media/platform/qcom/iris/iris_state.h
index e718386dbe04..b09fa54cf17e 100644
--- a/drivers/media/platform/qcom/iris/iris_state.h
+++ b/drivers/media/platform/qcom/iris/iris_state.h
@@ -141,5 +141,6 @@ int iris_inst_sub_state_change_drc_last(struct iris_inst *inst);
int iris_inst_sub_state_change_pause(struct iris_inst *inst, u32 plane);
bool iris_allow_cmd(struct iris_inst *inst, u32 cmd);
bool iris_drc_pending(struct iris_inst *inst);
+bool iris_drain_pending(struct iris_inst *inst);
#endif
diff --git a/drivers/media/platform/qcom/iris/iris_utils.c b/drivers/media/platform/qcom/iris/iris_utils.c
index 83c70d6a2d90..85c70a62b1fd 100644
--- a/drivers/media/platform/qcom/iris/iris_utils.c
+++ b/drivers/media/platform/qcom/iris/iris_utils.c
@@ -88,3 +88,39 @@ struct iris_inst *iris_get_instance(struct iris_core *core, u32 session_id)
mutex_unlock(&core->lock);
return NULL;
}
+
+int iris_check_core_mbpf(struct iris_inst *inst)
+{
+ struct iris_core *core = inst->core;
+ struct iris_inst *instance;
+ u32 total_mbpf = 0;
+
+ mutex_lock(&core->lock);
+ list_for_each_entry(instance, &core->instances, list)
+ total_mbpf += iris_get_mbpf(instance);
+ mutex_unlock(&core->lock);
+
+ if (total_mbpf > core->iris_platform_data->max_core_mbpf)
+ return -ENOMEM;
+
+ return 0;
+}
+
+int iris_check_core_mbps(struct iris_inst *inst)
+{
+ struct iris_core *core = inst->core;
+ struct iris_inst *instance;
+ u32 total_mbps = 0, fps = 0;
+
+ mutex_lock(&core->lock);
+ list_for_each_entry(instance, &core->instances, list) {
+ fps = max(instance->frame_rate, instance->operating_rate);
+ total_mbps += iris_get_mbpf(instance) * fps;
+ }
+ mutex_unlock(&core->lock);
+
+ if (total_mbps > core->iris_platform_data->max_core_mbps)
+ return -ENOMEM;
+
+ return 0;
+}
diff --git a/drivers/media/platform/qcom/iris/iris_utils.h b/drivers/media/platform/qcom/iris/iris_utils.h
index 49869cf7a376..75740181122f 100644
--- a/drivers/media/platform/qcom/iris/iris_utils.h
+++ b/drivers/media/platform/qcom/iris/iris_utils.h
@@ -49,5 +49,7 @@ struct iris_inst *iris_get_instance(struct iris_core *core, u32 session_id);
void iris_helper_buffers_done(struct iris_inst *inst, unsigned int type,
enum vb2_buffer_state state);
int iris_wait_for_session_response(struct iris_inst *inst, bool is_flush);
+int iris_check_core_mbpf(struct iris_inst *inst);
+int iris_check_core_mbps(struct iris_inst *inst);
#endif
diff --git a/drivers/media/platform/qcom/iris/iris_vb2.c b/drivers/media/platform/qcom/iris/iris_vb2.c
index 8b17c7c39487..139b821f7952 100644
--- a/drivers/media/platform/qcom/iris/iris_vb2.c
+++ b/drivers/media/platform/qcom/iris/iris_vb2.c
@@ -7,28 +7,13 @@
#include <media/v4l2-event.h>
#include <media/v4l2-mem2mem.h>
+#include "iris_common.h"
#include "iris_instance.h"
#include "iris_vb2.h"
#include "iris_vdec.h"
+#include "iris_venc.h"
#include "iris_power.h"
-static int iris_check_core_mbpf(struct iris_inst *inst)
-{
- struct iris_core *core = inst->core;
- struct iris_inst *instance;
- u32 total_mbpf = 0;
-
- mutex_lock(&core->lock);
- list_for_each_entry(instance, &core->instances, list)
- total_mbpf += iris_get_mbpf(instance);
- mutex_unlock(&core->lock);
-
- if (total_mbpf > core->iris_platform_data->max_core_mbpf)
- return -ENOMEM;
-
- return 0;
-}
-
static int iris_check_inst_mbpf(struct iris_inst *inst)
{
struct platform_inst_caps *caps;
@@ -173,9 +158,6 @@ int iris_vb2_start_streaming(struct vb2_queue *q, unsigned int count)
inst = vb2_get_drv_priv(q);
- if (V4L2_TYPE_IS_CAPTURE(q->type) && inst->state == IRIS_INST_INIT)
- return 0;
-
mutex_lock(&inst->lock);
if (inst->state == IRIS_INST_ERROR) {
ret = -EBUSY;
@@ -194,16 +176,35 @@ int iris_vb2_start_streaming(struct vb2_queue *q, unsigned int count)
if (ret)
goto error;
- if (V4L2_TYPE_IS_OUTPUT(q->type))
- ret = iris_vdec_streamon_input(inst);
- else if (V4L2_TYPE_IS_CAPTURE(q->type))
- ret = iris_vdec_streamon_output(inst);
+ if (V4L2_TYPE_IS_OUTPUT(q->type)) {
+ if (inst->domain == DECODER)
+ ret = iris_vdec_streamon_input(inst);
+ else
+ ret = iris_venc_streamon_input(inst);
+ } else if (V4L2_TYPE_IS_CAPTURE(q->type)) {
+ if (inst->domain == DECODER)
+ ret = iris_vdec_streamon_output(inst);
+ else
+ ret = iris_venc_streamon_output(inst);
+ }
if (ret)
goto error;
buf_type = iris_v4l2_type_to_driver(q->type);
- ret = iris_queue_deferred_buffers(inst, buf_type);
+ if (inst->domain == DECODER) {
+ if (inst->state == IRIS_INST_STREAMING)
+ ret = iris_queue_internal_deferred_buffers(inst, BUF_DPB);
+ if (!ret)
+ ret = iris_queue_deferred_buffers(inst, buf_type);
+ } else {
+ if (inst->state == IRIS_INST_STREAMING) {
+ ret = iris_queue_deferred_buffers(inst, BUF_INPUT);
+ if (!ret)
+ ret = iris_queue_deferred_buffers(inst, BUF_OUTPUT);
+ }
+ }
+
if (ret)
goto error;
@@ -235,7 +236,7 @@ void iris_vb2_stop_streaming(struct vb2_queue *q)
!V4L2_TYPE_IS_CAPTURE(q->type))
goto exit;
- ret = iris_vdec_session_streamoff(inst, q->type);
+ ret = iris_session_streamoff(inst, q->type);
if (ret)
goto exit;
@@ -326,7 +327,10 @@ void iris_vb2_buf_queue(struct vb2_buffer *vb2)
v4l2_m2m_buf_queue(m2m_ctx, vbuf);
- ret = iris_vdec_qbuf(inst, vbuf);
+ if (inst->domain == DECODER)
+ ret = iris_vdec_qbuf(inst, vbuf);
+ else
+ ret = iris_venc_qbuf(inst, vbuf);
exit:
if (ret) {
diff --git a/drivers/media/platform/qcom/iris/iris_vdec.c b/drivers/media/platform/qcom/iris/iris_vdec.c
index d670b51c5839..ae13c3e1b426 100644
--- a/drivers/media/platform/qcom/iris/iris_vdec.c
+++ b/drivers/media/platform/qcom/iris/iris_vdec.c
@@ -7,14 +7,13 @@
#include <media/v4l2-mem2mem.h>
#include "iris_buffer.h"
+#include "iris_common.h"
#include "iris_ctrls.h"
#include "iris_instance.h"
#include "iris_power.h"
#include "iris_vdec.h"
#include "iris_vpu_buffer.h"
-#define DEFAULT_WIDTH 320
-#define DEFAULT_HEIGHT 240
#define DEFAULT_CODEC_ALIGNMENT 16
int iris_vdec_inst_init(struct iris_inst *inst)
@@ -56,7 +55,7 @@ int iris_vdec_inst_init(struct iris_inst *inst)
inst->buffers[BUF_OUTPUT].min_count = iris_vpu_buf_count(inst, BUF_OUTPUT);
inst->buffers[BUF_OUTPUT].size = f->fmt.pix_mp.plane_fmt[0].sizeimage;
- memcpy(&inst->fw_caps[0], &core->inst_fw_caps[0],
+ memcpy(&inst->fw_caps[0], &core->inst_fw_caps_dec[0],
INST_FW_CAP_MAX * sizeof(struct platform_inst_fw_cap));
return iris_ctrls_init(inst);
@@ -158,7 +157,7 @@ int iris_vdec_try_fmt(struct iris_inst *inst, struct v4l2_format *f)
}
break;
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
- if (!fmt) {
+ if (f->fmt.pix_mp.pixelformat != V4L2_PIX_FMT_NV12) {
f_inst = inst->fmt_dst;
f->fmt.pix_mp.pixelformat = f_inst->fmt.pix_mp.pixelformat;
f->fmt.pix_mp.width = f_inst->fmt.pix_mp.width;
@@ -265,6 +264,19 @@ int iris_vdec_s_fmt(struct iris_inst *inst, struct v4l2_format *f)
return 0;
}
+int iris_vdec_validate_format(struct iris_inst *inst, u32 pixelformat)
+{
+ const struct iris_fmt *fmt = NULL;
+
+ if (pixelformat != V4L2_PIX_FMT_NV12) {
+ fmt = find_format(inst, pixelformat, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (!fmt)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int iris_vdec_subscribe_event(struct iris_inst *inst, const struct v4l2_event_subscription *sub)
{
int ret = 0;
@@ -301,125 +313,6 @@ void iris_vdec_src_change(struct iris_inst *inst)
v4l2_event_queue_fh(&inst->fh, &event);
}
-
-static void iris_vdec_flush_deferred_buffers(struct iris_inst *inst,
- enum iris_buffer_type type)
-{
- struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
- struct v4l2_m2m_buffer *buffer, *n;
- struct iris_buffer *buf;
-
- if (type == BUF_INPUT) {
- v4l2_m2m_for_each_src_buf_safe(m2m_ctx, buffer, n) {
- buf = to_iris_buffer(&buffer->vb);
- if (buf->attr & BUF_ATTR_DEFERRED) {
- if (!(buf->attr & BUF_ATTR_BUFFER_DONE)) {
- buf->attr |= BUF_ATTR_BUFFER_DONE;
- buf->data_size = 0;
- iris_vb2_buffer_done(inst, buf);
- }
- }
- }
- } else {
- v4l2_m2m_for_each_dst_buf_safe(m2m_ctx, buffer, n) {
- buf = to_iris_buffer(&buffer->vb);
- if (buf->attr & BUF_ATTR_DEFERRED) {
- if (!(buf->attr & BUF_ATTR_BUFFER_DONE)) {
- buf->attr |= BUF_ATTR_BUFFER_DONE;
- buf->data_size = 0;
- iris_vb2_buffer_done(inst, buf);
- }
- }
- }
- }
-}
-
-static void iris_vdec_kill_session(struct iris_inst *inst)
-{
- const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
-
- if (!inst->session_id)
- return;
-
- hfi_ops->session_close(inst);
- iris_inst_change_state(inst, IRIS_INST_ERROR);
-}
-
-int iris_vdec_session_streamoff(struct iris_inst *inst, u32 plane)
-{
- const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
- enum iris_buffer_type buffer_type;
- int ret;
-
- switch (plane) {
- case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
- buffer_type = BUF_INPUT;
- break;
- case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
- buffer_type = BUF_OUTPUT;
- break;
- default:
- return -EINVAL;
- }
-
- ret = hfi_ops->session_stop(inst, plane);
- if (ret)
- goto error;
-
- ret = iris_inst_state_change_streamoff(inst, plane);
- if (ret)
- goto error;
-
- iris_vdec_flush_deferred_buffers(inst, buffer_type);
-
- return 0;
-
-error:
- iris_vdec_kill_session(inst);
- iris_vdec_flush_deferred_buffers(inst, buffer_type);
-
- return ret;
-}
-
-static int iris_vdec_process_streamon_input(struct iris_inst *inst)
-{
- const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
- enum iris_inst_sub_state set_sub_state = 0;
- int ret;
-
- iris_scale_power(inst);
-
- ret = hfi_ops->session_start(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
- if (ret)
- return ret;
-
- if (inst->sub_state & IRIS_INST_SUB_INPUT_PAUSE) {
- ret = iris_inst_change_sub_state(inst, IRIS_INST_SUB_INPUT_PAUSE, 0);
- if (ret)
- return ret;
- }
-
- if (inst->sub_state & IRIS_INST_SUB_DRC ||
- inst->sub_state & IRIS_INST_SUB_DRAIN ||
- inst->sub_state & IRIS_INST_SUB_FIRST_IPSC) {
- if (!(inst->sub_state & IRIS_INST_SUB_INPUT_PAUSE)) {
- if (hfi_ops->session_pause) {
- ret = hfi_ops->session_pause(inst,
- V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
- if (ret)
- return ret;
- }
- set_sub_state = IRIS_INST_SUB_INPUT_PAUSE;
- }
- }
-
- ret = iris_inst_state_change_streamon(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
- if (ret)
- return ret;
-
- return iris_inst_change_sub_state(inst, 0, set_sub_state);
-}
-
int iris_vdec_streamon_input(struct iris_inst *inst)
{
int ret;
@@ -428,7 +321,7 @@ int iris_vdec_streamon_input(struct iris_inst *inst)
if (ret)
return ret;
- ret = iris_alloc_and_queue_persist_bufs(inst);
+ ret = iris_alloc_and_queue_persist_bufs(inst, BUF_PERSIST);
if (ret)
return ret;
@@ -446,71 +339,7 @@ int iris_vdec_streamon_input(struct iris_inst *inst)
if (ret)
return ret;
- return iris_vdec_process_streamon_input(inst);
-}
-
-static int iris_vdec_process_streamon_output(struct iris_inst *inst)
-{
- const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
- bool drain_active = false, drc_active = false;
- enum iris_inst_sub_state clear_sub_state = 0;
- int ret = 0;
-
- iris_scale_power(inst);
-
- drain_active = inst->sub_state & IRIS_INST_SUB_DRAIN &&
- inst->sub_state & IRIS_INST_SUB_DRAIN_LAST;
-
- drc_active = inst->sub_state & IRIS_INST_SUB_DRC &&
- inst->sub_state & IRIS_INST_SUB_DRC_LAST;
-
- if (drc_active)
- clear_sub_state = IRIS_INST_SUB_DRC | IRIS_INST_SUB_DRC_LAST;
- else if (drain_active)
- clear_sub_state = IRIS_INST_SUB_DRAIN | IRIS_INST_SUB_DRAIN_LAST;
-
- if (inst->sub_state & IRIS_INST_SUB_INPUT_PAUSE) {
- ret = iris_alloc_and_queue_input_int_bufs(inst);
- if (ret)
- return ret;
- ret = iris_set_stage(inst, STAGE);
- if (ret)
- return ret;
- ret = iris_set_pipe(inst, PIPE);
- if (ret)
- return ret;
- }
-
- if (inst->state == IRIS_INST_INPUT_STREAMING &&
- inst->sub_state & IRIS_INST_SUB_INPUT_PAUSE) {
- if (!drain_active)
- ret = hfi_ops->session_resume_drc(inst,
- V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
- else if (hfi_ops->session_resume_drain)
- ret = hfi_ops->session_resume_drain(inst,
- V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
- if (ret)
- return ret;
- clear_sub_state |= IRIS_INST_SUB_INPUT_PAUSE;
- }
-
- if (inst->sub_state & IRIS_INST_SUB_FIRST_IPSC)
- clear_sub_state |= IRIS_INST_SUB_FIRST_IPSC;
-
- ret = hfi_ops->session_start(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
- if (ret)
- return ret;
-
- if (inst->sub_state & IRIS_INST_SUB_OUTPUT_PAUSE)
- clear_sub_state |= IRIS_INST_SUB_OUTPUT_PAUSE;
-
- ret = iris_inst_state_change_streamon(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
- if (ret)
- return ret;
-
- inst->last_buffer_dequeued = false;
-
- return iris_inst_change_sub_state(inst, clear_sub_state, 0);
+ return iris_process_streamon_input(inst);
}
int iris_vdec_streamon_output(struct iris_inst *inst)
@@ -532,7 +361,7 @@ int iris_vdec_streamon_output(struct iris_inst *inst)
if (ret)
return ret;
- ret = iris_vdec_process_streamon_output(inst);
+ ret = iris_process_streamon_output(inst);
if (ret)
goto error;
@@ -543,49 +372,11 @@ int iris_vdec_streamon_output(struct iris_inst *inst)
return ret;
error:
- iris_vdec_session_streamoff(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ iris_session_streamoff(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
return ret;
}
-static int
-iris_vdec_vb2_buffer_to_driver(struct vb2_buffer *vb2, struct iris_buffer *buf)
-{
- struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb2);
-
- buf->type = iris_v4l2_type_to_driver(vb2->type);
- buf->index = vb2->index;
- buf->fd = vb2->planes[0].m.fd;
- buf->buffer_size = vb2->planes[0].length;
- buf->data_offset = vb2->planes[0].data_offset;
- buf->data_size = vb2->planes[0].bytesused - vb2->planes[0].data_offset;
- buf->flags = vbuf->flags;
- buf->timestamp = vb2->timestamp;
- buf->attr = 0;
-
- return 0;
-}
-
-static void
-iris_set_ts_metadata(struct iris_inst *inst, struct vb2_v4l2_buffer *vbuf)
-{
- u32 mask = V4L2_BUF_FLAG_TIMECODE | V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
- struct vb2_buffer *vb = &vbuf->vb2_buf;
- u64 ts_us = vb->timestamp;
-
- if (inst->metadata_idx >= ARRAY_SIZE(inst->tss))
- inst->metadata_idx = 0;
-
- do_div(ts_us, NSEC_PER_USEC);
-
- inst->tss[inst->metadata_idx].flags = vbuf->flags & mask;
- inst->tss[inst->metadata_idx].tc = vbuf->timecode;
- inst->tss[inst->metadata_idx].ts_us = ts_us;
- inst->tss[inst->metadata_idx].ts_ns = vb->timestamp;
-
- inst->metadata_idx++;
-}
-
int iris_vdec_qbuf(struct iris_inst *inst, struct vb2_v4l2_buffer *vbuf)
{
struct iris_buffer *buf = to_iris_buffer(vbuf);
@@ -593,7 +384,7 @@ int iris_vdec_qbuf(struct iris_inst *inst, struct vb2_v4l2_buffer *vbuf)
struct vb2_queue *q;
int ret;
- ret = iris_vdec_vb2_buffer_to_driver(vb2, buf);
+ ret = iris_vb2_buffer_to_driver(vb2, buf);
if (ret)
return ret;
diff --git a/drivers/media/platform/qcom/iris/iris_vdec.h b/drivers/media/platform/qcom/iris/iris_vdec.h
index cd7aab66dc7c..ec1ce55d1375 100644
--- a/drivers/media/platform/qcom/iris/iris_vdec.h
+++ b/drivers/media/platform/qcom/iris/iris_vdec.h
@@ -8,22 +8,12 @@
struct iris_inst;
-enum iris_fmt_type {
- IRIS_FMT_H264,
- IRIS_FMT_HEVC,
- IRIS_FMT_VP9,
-};
-
-struct iris_fmt {
- u32 pixfmt;
- u32 type;
-};
-
int iris_vdec_inst_init(struct iris_inst *inst);
void iris_vdec_inst_deinit(struct iris_inst *inst);
int iris_vdec_enum_fmt(struct iris_inst *inst, struct v4l2_fmtdesc *f);
int iris_vdec_try_fmt(struct iris_inst *inst, struct v4l2_format *f);
int iris_vdec_s_fmt(struct iris_inst *inst, struct v4l2_format *f);
+int iris_vdec_validate_format(struct iris_inst *inst, u32 pixelformat);
int iris_vdec_subscribe_event(struct iris_inst *inst, const struct v4l2_event_subscription *sub);
void iris_vdec_src_change(struct iris_inst *inst);
int iris_vdec_streamon_input(struct iris_inst *inst);
@@ -31,6 +21,5 @@ int iris_vdec_streamon_output(struct iris_inst *inst);
int iris_vdec_qbuf(struct iris_inst *inst, struct vb2_v4l2_buffer *vbuf);
int iris_vdec_start_cmd(struct iris_inst *inst);
int iris_vdec_stop_cmd(struct iris_inst *inst);
-int iris_vdec_session_streamoff(struct iris_inst *inst, u32 plane);
#endif
diff --git a/drivers/media/platform/qcom/iris/iris_venc.c b/drivers/media/platform/qcom/iris/iris_venc.c
new file mode 100644
index 000000000000..099bd5ed4ae0
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_venc.c
@@ -0,0 +1,579 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <media/v4l2-event.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "iris_buffer.h"
+#include "iris_common.h"
+#include "iris_ctrls.h"
+#include "iris_instance.h"
+#include "iris_power.h"
+#include "iris_venc.h"
+#include "iris_vpu_buffer.h"
+
+int iris_venc_inst_init(struct iris_inst *inst)
+{
+ struct iris_core *core = inst->core;
+ struct v4l2_format *f;
+
+ inst->fmt_src = kzalloc(sizeof(*inst->fmt_src), GFP_KERNEL);
+ inst->fmt_dst = kzalloc(sizeof(*inst->fmt_dst), GFP_KERNEL);
+ if (!inst->fmt_src || !inst->fmt_dst) {
+ kfree(inst->fmt_src);
+ kfree(inst->fmt_dst);
+ return -ENOMEM;
+ }
+
+ f = inst->fmt_dst;
+ f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ f->fmt.pix_mp.width = DEFAULT_WIDTH;
+ f->fmt.pix_mp.height = DEFAULT_HEIGHT;
+ f->fmt.pix_mp.pixelformat = V4L2_PIX_FMT_H264;
+ inst->codec = f->fmt.pix_mp.pixelformat;
+ f->fmt.pix_mp.num_planes = 1;
+ f->fmt.pix_mp.plane_fmt[0].bytesperline = 0;
+ f->fmt.pix_mp.plane_fmt[0].sizeimage = iris_get_buffer_size(inst, BUF_OUTPUT);
+ f->fmt.pix_mp.field = V4L2_FIELD_NONE;
+ f->fmt.pix_mp.colorspace = V4L2_COLORSPACE_DEFAULT;
+ f->fmt.pix_mp.xfer_func = V4L2_XFER_FUNC_DEFAULT;
+ f->fmt.pix_mp.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ f->fmt.pix_mp.quantization = V4L2_QUANTIZATION_DEFAULT;
+ inst->buffers[BUF_OUTPUT].min_count = iris_vpu_buf_count(inst, BUF_OUTPUT);
+ inst->buffers[BUF_OUTPUT].size = f->fmt.pix_mp.plane_fmt[0].sizeimage;
+
+ f = inst->fmt_src;
+ f->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ f->fmt.pix_mp.pixelformat = V4L2_PIX_FMT_NV12;
+ f->fmt.pix_mp.width = ALIGN(DEFAULT_WIDTH, 128);
+ f->fmt.pix_mp.height = ALIGN(DEFAULT_HEIGHT, 32);
+ f->fmt.pix_mp.num_planes = 1;
+ f->fmt.pix_mp.plane_fmt[0].bytesperline = ALIGN(DEFAULT_WIDTH, 128);
+ f->fmt.pix_mp.plane_fmt[0].sizeimage = iris_get_buffer_size(inst, BUF_INPUT);
+ f->fmt.pix_mp.field = V4L2_FIELD_NONE;
+ f->fmt.pix_mp.colorspace = V4L2_COLORSPACE_DEFAULT;
+ f->fmt.pix_mp.xfer_func = V4L2_XFER_FUNC_DEFAULT;
+ f->fmt.pix_mp.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ f->fmt.pix_mp.quantization = V4L2_QUANTIZATION_DEFAULT;
+ inst->buffers[BUF_INPUT].min_count = iris_vpu_buf_count(inst, BUF_INPUT);
+ inst->buffers[BUF_INPUT].size = f->fmt.pix_mp.plane_fmt[0].sizeimage;
+
+ inst->crop.left = 0;
+ inst->crop.top = 0;
+ inst->crop.width = f->fmt.pix_mp.width;
+ inst->crop.height = f->fmt.pix_mp.height;
+
+ inst->operating_rate = DEFAULT_FPS;
+ inst->frame_rate = DEFAULT_FPS;
+
+ memcpy(&inst->fw_caps[0], &core->inst_fw_caps_enc[0],
+ INST_FW_CAP_MAX * sizeof(struct platform_inst_fw_cap));
+
+ return iris_ctrls_init(inst);
+}
+
+void iris_venc_inst_deinit(struct iris_inst *inst)
+{
+ kfree(inst->fmt_dst);
+ kfree(inst->fmt_src);
+}
+
+static const struct iris_fmt iris_venc_formats[] = {
+ [IRIS_FMT_H264] = {
+ .pixfmt = V4L2_PIX_FMT_H264,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
+ },
+ [IRIS_FMT_HEVC] = {
+ .pixfmt = V4L2_PIX_FMT_HEVC,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
+ },
+};
+
+static const struct iris_fmt *
+find_format(struct iris_inst *inst, u32 pixfmt, u32 type)
+{
+ const struct iris_fmt *fmt = iris_venc_formats;
+ unsigned int size = ARRAY_SIZE(iris_venc_formats);
+ unsigned int i;
+
+ for (i = 0; i < size; i++) {
+ if (fmt[i].pixfmt == pixfmt)
+ break;
+ }
+
+ if (i == size || fmt[i].type != type)
+ return NULL;
+
+ return &fmt[i];
+}
+
+static const struct iris_fmt *
+find_format_by_index(struct iris_inst *inst, u32 index, u32 type)
+{
+ const struct iris_fmt *fmt = iris_venc_formats;
+ unsigned int size = ARRAY_SIZE(iris_venc_formats);
+
+ if (index >= size || fmt[index].type != type)
+ return NULL;
+
+ return &fmt[index];
+}
+
+int iris_venc_enum_fmt(struct iris_inst *inst, struct v4l2_fmtdesc *f)
+{
+ const struct iris_fmt *fmt;
+
+ switch (f->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ if (f->index)
+ return -EINVAL;
+ f->pixelformat = V4L2_PIX_FMT_NV12;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ fmt = find_format_by_index(inst, f->index, f->type);
+ if (!fmt)
+ return -EINVAL;
+
+ f->pixelformat = fmt->pixfmt;
+ f->flags = V4L2_FMT_FLAG_COMPRESSED | V4L2_FMT_FLAG_ENC_CAP_FRAME_INTERVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int iris_venc_try_fmt(struct iris_inst *inst, struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp;
+ const struct iris_fmt *fmt;
+ struct v4l2_format *f_inst;
+
+ memset(pixmp->reserved, 0, sizeof(pixmp->reserved));
+ fmt = find_format(inst, pixmp->pixelformat, f->type);
+ switch (f->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ if (f->fmt.pix_mp.pixelformat != V4L2_PIX_FMT_NV12) {
+ f_inst = inst->fmt_src;
+ f->fmt.pix_mp.width = f_inst->fmt.pix_mp.width;
+ f->fmt.pix_mp.height = f_inst->fmt.pix_mp.height;
+ f->fmt.pix_mp.pixelformat = f_inst->fmt.pix_mp.pixelformat;
+ }
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ if (!fmt) {
+ f_inst = inst->fmt_dst;
+ f->fmt.pix_mp.width = f_inst->fmt.pix_mp.width;
+ f->fmt.pix_mp.height = f_inst->fmt.pix_mp.height;
+ f->fmt.pix_mp.pixelformat = f_inst->fmt.pix_mp.pixelformat;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (pixmp->field == V4L2_FIELD_ANY)
+ pixmp->field = V4L2_FIELD_NONE;
+
+ pixmp->num_planes = 1;
+
+ return 0;
+}
+
+static int iris_venc_s_fmt_output(struct iris_inst *inst, struct v4l2_format *f)
+{
+ struct v4l2_format *fmt;
+
+ iris_venc_try_fmt(inst, f);
+
+ if (!(find_format(inst, f->fmt.pix_mp.pixelformat, f->type)))
+ return -EINVAL;
+
+ fmt = inst->fmt_dst;
+ fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ fmt->fmt.pix_mp.num_planes = 1;
+ fmt->fmt.pix_mp.plane_fmt[0].bytesperline = 0;
+ fmt->fmt.pix_mp.plane_fmt[0].sizeimage = iris_get_buffer_size(inst, BUF_OUTPUT);
+
+ if (f->fmt.pix_mp.colorspace != V4L2_COLORSPACE_DEFAULT &&
+ f->fmt.pix_mp.colorspace != V4L2_COLORSPACE_REC709)
+ f->fmt.pix_mp.colorspace = V4L2_COLORSPACE_DEFAULT;
+ fmt->fmt.pix_mp.colorspace = f->fmt.pix_mp.colorspace;
+ fmt->fmt.pix_mp.xfer_func = f->fmt.pix_mp.xfer_func;
+ fmt->fmt.pix_mp.ycbcr_enc = f->fmt.pix_mp.ycbcr_enc;
+ fmt->fmt.pix_mp.quantization = f->fmt.pix_mp.quantization;
+
+ inst->buffers[BUF_OUTPUT].min_count = iris_vpu_buf_count(inst, BUF_OUTPUT);
+ inst->buffers[BUF_OUTPUT].size = fmt->fmt.pix_mp.plane_fmt[0].sizeimage;
+ fmt->fmt.pix_mp.pixelformat = f->fmt.pix_mp.pixelformat;
+ inst->codec = f->fmt.pix_mp.pixelformat;
+ memcpy(f, fmt, sizeof(struct v4l2_format));
+
+ return 0;
+}
+
+static int iris_venc_s_fmt_input(struct iris_inst *inst, struct v4l2_format *f)
+{
+ struct v4l2_format *fmt, *output_fmt;
+
+ iris_venc_try_fmt(inst, f);
+
+ if (f->fmt.pix_mp.pixelformat != V4L2_PIX_FMT_NV12)
+ return -EINVAL;
+
+ fmt = inst->fmt_src;
+ fmt->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ fmt->fmt.pix_mp.width = ALIGN(f->fmt.pix_mp.width, 128);
+ fmt->fmt.pix_mp.height = ALIGN(f->fmt.pix_mp.height, 32);
+ fmt->fmt.pix_mp.num_planes = 1;
+ fmt->fmt.pix_mp.pixelformat = f->fmt.pix_mp.pixelformat;
+ fmt->fmt.pix_mp.plane_fmt[0].bytesperline = ALIGN(f->fmt.pix_mp.width, 128);
+ fmt->fmt.pix_mp.plane_fmt[0].sizeimage = iris_get_buffer_size(inst, BUF_INPUT);
+
+ fmt->fmt.pix_mp.colorspace = f->fmt.pix_mp.colorspace;
+ fmt->fmt.pix_mp.xfer_func = f->fmt.pix_mp.xfer_func;
+ fmt->fmt.pix_mp.ycbcr_enc = f->fmt.pix_mp.ycbcr_enc;
+ fmt->fmt.pix_mp.quantization = f->fmt.pix_mp.quantization;
+
+ output_fmt = inst->fmt_dst;
+ output_fmt->fmt.pix_mp.width = fmt->fmt.pix_mp.width;
+ output_fmt->fmt.pix_mp.height = fmt->fmt.pix_mp.height;
+ output_fmt->fmt.pix_mp.colorspace = fmt->fmt.pix_mp.colorspace;
+ output_fmt->fmt.pix_mp.xfer_func = fmt->fmt.pix_mp.xfer_func;
+ output_fmt->fmt.pix_mp.ycbcr_enc = fmt->fmt.pix_mp.ycbcr_enc;
+ output_fmt->fmt.pix_mp.quantization = fmt->fmt.pix_mp.quantization;
+
+ inst->buffers[BUF_INPUT].min_count = iris_vpu_buf_count(inst, BUF_INPUT);
+ inst->buffers[BUF_INPUT].size = fmt->fmt.pix_mp.plane_fmt[0].sizeimage;
+
+ if (f->fmt.pix_mp.width != inst->crop.width ||
+ f->fmt.pix_mp.height != inst->crop.height) {
+ inst->crop.top = 0;
+ inst->crop.left = 0;
+ inst->crop.width = fmt->fmt.pix_mp.width;
+ inst->crop.height = fmt->fmt.pix_mp.height;
+
+ iris_venc_s_fmt_output(inst, output_fmt);
+ }
+
+ memcpy(f, fmt, sizeof(struct v4l2_format));
+
+ return 0;
+}
+
+int iris_venc_s_fmt(struct iris_inst *inst, struct v4l2_format *f)
+{
+ struct vb2_queue *q;
+
+ q = v4l2_m2m_get_vq(inst->m2m_ctx, f->type);
+ if (!q)
+ return -EINVAL;
+
+ if (vb2_is_busy(q))
+ return -EBUSY;
+
+ switch (f->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ return iris_venc_s_fmt_input(inst, f);
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ return iris_venc_s_fmt_output(inst, f);
+ default:
+ return -EINVAL;
+ }
+}
+
+int iris_venc_validate_format(struct iris_inst *inst, u32 pixelformat)
+{
+ const struct iris_fmt *fmt = NULL;
+
+ if (pixelformat != V4L2_PIX_FMT_NV12) {
+ fmt = find_format(inst, pixelformat, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ if (!fmt)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int iris_venc_subscribe_event(struct iris_inst *inst,
+ const struct v4l2_event_subscription *sub)
+{
+ switch (sub->type) {
+ case V4L2_EVENT_EOS:
+ return v4l2_event_subscribe(&inst->fh, sub, 0, NULL);
+ case V4L2_EVENT_CTRL:
+ return v4l2_ctrl_subscribe_event(&inst->fh, sub);
+ default:
+ return -EINVAL;
+ }
+}
+
+int iris_venc_s_selection(struct iris_inst *inst, struct v4l2_selection *s)
+{
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP:
+ s->r.left = 0;
+ s->r.top = 0;
+
+ if (s->r.width > inst->fmt_src->fmt.pix_mp.width ||
+ s->r.height > inst->fmt_src->fmt.pix_mp.height)
+ return -EINVAL;
+
+ inst->crop.left = s->r.left;
+ inst->crop.top = s->r.top;
+ inst->crop.width = s->r.width;
+ inst->crop.height = s->r.height;
+ inst->fmt_dst->fmt.pix_mp.width = inst->crop.width;
+ inst->fmt_dst->fmt.pix_mp.height = inst->crop.height;
+ return iris_venc_s_fmt_output(inst, inst->fmt_dst);
+ default:
+ return -EINVAL;
+ }
+}
+
+int iris_venc_s_param(struct iris_inst *inst, struct v4l2_streamparm *s_parm)
+{
+ struct platform_inst_caps *caps = inst->core->iris_platform_data->inst_caps;
+ struct vb2_queue *src_q = v4l2_m2m_get_src_vq(inst->m2m_ctx);
+ struct vb2_queue *dst_q = v4l2_m2m_get_dst_vq(inst->m2m_ctx);
+ struct v4l2_fract *timeperframe = NULL;
+ u32 default_rate = DEFAULT_FPS;
+ bool is_frame_rate = false;
+ u64 us_per_frame, fps;
+ u32 max_rate;
+
+ int ret = 0;
+
+ if (s_parm->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ timeperframe = &s_parm->parm.output.timeperframe;
+ max_rate = caps->max_operating_rate;
+ s_parm->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
+ } else {
+ timeperframe = &s_parm->parm.capture.timeperframe;
+ is_frame_rate = true;
+ max_rate = caps->max_frame_rate;
+ s_parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
+ }
+
+ if (!timeperframe->denominator || !timeperframe->numerator) {
+ if (!timeperframe->numerator)
+ timeperframe->numerator = 1;
+ if (!timeperframe->denominator)
+ timeperframe->denominator = default_rate;
+ }
+
+ us_per_frame = timeperframe->numerator * (u64)USEC_PER_SEC;
+ do_div(us_per_frame, timeperframe->denominator);
+
+ if (!us_per_frame)
+ return -EINVAL;
+
+ fps = (u64)USEC_PER_SEC;
+ do_div(fps, us_per_frame);
+ if (fps > max_rate) {
+ ret = -ENOMEM;
+ goto reset_rate;
+ }
+
+ if (is_frame_rate)
+ inst->frame_rate = (u32)fps;
+ else
+ inst->operating_rate = (u32)fps;
+
+ if ((s_parm->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE && vb2_is_streaming(src_q)) ||
+ (s_parm->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE && vb2_is_streaming(dst_q))) {
+ ret = iris_check_core_mbpf(inst);
+ if (ret)
+ goto reset_rate;
+ ret = iris_check_core_mbps(inst);
+ if (ret)
+ goto reset_rate;
+ }
+
+ return 0;
+
+reset_rate:
+ if (ret) {
+ if (is_frame_rate)
+ inst->frame_rate = default_rate;
+ else
+ inst->operating_rate = default_rate;
+ }
+
+ return ret;
+}
+
+int iris_venc_g_param(struct iris_inst *inst, struct v4l2_streamparm *s_parm)
+{
+ struct v4l2_fract *timeperframe = NULL;
+
+ if (s_parm->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ timeperframe = &s_parm->parm.output.timeperframe;
+ timeperframe->numerator = 1;
+ timeperframe->denominator = inst->operating_rate;
+ s_parm->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
+ } else {
+ timeperframe = &s_parm->parm.capture.timeperframe;
+ timeperframe->numerator = 1;
+ timeperframe->denominator = inst->frame_rate;
+ s_parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
+ }
+
+ return 0;
+}
+
+int iris_venc_streamon_input(struct iris_inst *inst)
+{
+ int ret;
+
+ ret = iris_set_properties(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (ret)
+ return ret;
+
+ ret = iris_alloc_and_queue_persist_bufs(inst, BUF_ARP);
+ if (ret)
+ return ret;
+
+ iris_get_internal_buffers(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+
+ ret = iris_destroy_dequeued_internal_buffers(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (ret)
+ return ret;
+
+ ret = iris_create_internal_buffers(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (ret)
+ return ret;
+
+ ret = iris_queue_internal_buffers(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (ret)
+ return ret;
+
+ return iris_process_streamon_input(inst);
+}
+
+int iris_venc_streamon_output(struct iris_inst *inst)
+{
+ int ret;
+
+ ret = iris_set_properties(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ if (ret)
+ goto error;
+
+ ret = iris_alloc_and_queue_persist_bufs(inst, BUF_ARP);
+ if (ret)
+ return ret;
+
+ iris_get_internal_buffers(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+
+ ret = iris_destroy_dequeued_internal_buffers(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ if (ret)
+ goto error;
+
+ ret = iris_create_internal_buffers(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ if (ret)
+ goto error;
+
+ ret = iris_queue_internal_buffers(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ if (ret)
+ goto error;
+
+ ret = iris_process_streamon_output(inst);
+ if (ret)
+ goto error;
+
+ return ret;
+
+error:
+ iris_session_streamoff(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+
+ return ret;
+}
+
+int iris_venc_qbuf(struct iris_inst *inst, struct vb2_v4l2_buffer *vbuf)
+{
+ struct iris_buffer *buf = to_iris_buffer(vbuf);
+ struct vb2_buffer *vb2 = &vbuf->vb2_buf;
+ struct vb2_queue *q;
+ int ret;
+
+ ret = iris_vb2_buffer_to_driver(vb2, buf);
+ if (ret)
+ return ret;
+
+ if (buf->type == BUF_INPUT)
+ iris_set_ts_metadata(inst, vbuf);
+
+ q = v4l2_m2m_get_vq(inst->m2m_ctx, vb2->type);
+ if (!vb2_is_streaming(q)) {
+ buf->attr |= BUF_ATTR_DEFERRED;
+ return 0;
+ }
+
+ iris_scale_power(inst);
+
+ return iris_queue_buffer(inst, buf);
+}
+
+int iris_venc_start_cmd(struct iris_inst *inst)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ enum iris_inst_sub_state clear_sub_state = 0;
+ struct vb2_queue *dst_vq;
+ int ret;
+
+ dst_vq = v4l2_m2m_get_dst_vq(inst->m2m_ctx);
+
+ if (inst->sub_state & IRIS_INST_SUB_DRAIN &&
+ inst->sub_state & IRIS_INST_SUB_DRAIN_LAST) {
+ vb2_clear_last_buffer_dequeued(dst_vq);
+ clear_sub_state = IRIS_INST_SUB_DRAIN | IRIS_INST_SUB_DRAIN_LAST;
+ if (inst->sub_state & IRIS_INST_SUB_INPUT_PAUSE) {
+ if (hfi_ops->session_resume_drain) {
+ ret = hfi_ops->session_resume_drain(inst,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (ret)
+ return ret;
+ }
+ clear_sub_state |= IRIS_INST_SUB_INPUT_PAUSE;
+ }
+ if (inst->sub_state & IRIS_INST_SUB_OUTPUT_PAUSE) {
+ if (hfi_ops->session_resume_drain) {
+ ret = hfi_ops->session_resume_drain(inst,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ if (ret)
+ return ret;
+ }
+ clear_sub_state |= IRIS_INST_SUB_OUTPUT_PAUSE;
+ }
+ } else {
+ dev_err(inst->core->dev, "start called before receiving last_flag\n");
+ iris_inst_change_state(inst, IRIS_INST_ERROR);
+ return -EBUSY;
+ }
+
+ inst->last_buffer_dequeued = false;
+
+ return iris_inst_change_sub_state(inst, clear_sub_state, 0);
+}
+
+int iris_venc_stop_cmd(struct iris_inst *inst)
+{
+ const struct iris_hfi_command_ops *hfi_ops = inst->core->hfi_ops;
+ int ret;
+
+ ret = hfi_ops->session_drain(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (ret)
+ return ret;
+
+ ret = iris_inst_change_sub_state(inst, 0, IRIS_INST_SUB_DRAIN);
+
+ iris_scale_power(inst);
+
+ return ret;
+}
diff --git a/drivers/media/platform/qcom/iris/iris_venc.h b/drivers/media/platform/qcom/iris/iris_venc.h
new file mode 100644
index 000000000000..c4db7433da53
--- /dev/null
+++ b/drivers/media/platform/qcom/iris/iris_venc.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _IRIS_VENC_H_
+#define _IRIS_VENC_H_
+
+struct iris_inst;
+
+int iris_venc_inst_init(struct iris_inst *inst);
+void iris_venc_inst_deinit(struct iris_inst *inst);
+int iris_venc_enum_fmt(struct iris_inst *inst, struct v4l2_fmtdesc *f);
+int iris_venc_try_fmt(struct iris_inst *inst, struct v4l2_format *f);
+int iris_venc_s_fmt(struct iris_inst *inst, struct v4l2_format *f);
+int iris_venc_validate_format(struct iris_inst *inst, u32 pixelformat);
+int iris_venc_subscribe_event(struct iris_inst *inst, const struct v4l2_event_subscription *sub);
+int iris_venc_s_selection(struct iris_inst *inst, struct v4l2_selection *s);
+int iris_venc_g_param(struct iris_inst *inst, struct v4l2_streamparm *s_parm);
+int iris_venc_s_param(struct iris_inst *inst, struct v4l2_streamparm *s_parm);
+int iris_venc_streamon_input(struct iris_inst *inst);
+int iris_venc_streamon_output(struct iris_inst *inst);
+int iris_venc_qbuf(struct iris_inst *inst, struct vb2_v4l2_buffer *vbuf);
+int iris_venc_start_cmd(struct iris_inst *inst);
+int iris_venc_stop_cmd(struct iris_inst *inst);
+
+#endif
diff --git a/drivers/media/platform/qcom/iris/iris_vidc.c b/drivers/media/platform/qcom/iris/iris_vidc.c
index c417e8c31f80..d38d0f6961cd 100644
--- a/drivers/media/platform/qcom/iris/iris_vidc.c
+++ b/drivers/media/platform/qcom/iris/iris_vidc.c
@@ -12,6 +12,7 @@
#include "iris_vidc.h"
#include "iris_instance.h"
#include "iris_vdec.h"
+#include "iris_venc.h"
#include "iris_vb2.h"
#include "iris_vpu_buffer.h"
#include "iris_platform_common.h"
@@ -21,16 +22,19 @@
#define STEP_WIDTH 1
#define STEP_HEIGHT 1
-static void iris_v4l2_fh_init(struct iris_inst *inst)
+static void iris_v4l2_fh_init(struct iris_inst *inst, struct file *filp)
{
- v4l2_fh_init(&inst->fh, inst->core->vdev_dec);
+ if (inst->domain == ENCODER)
+ v4l2_fh_init(&inst->fh, inst->core->vdev_enc);
+ else if (inst->domain == DECODER)
+ v4l2_fh_init(&inst->fh, inst->core->vdev_dec);
inst->fh.ctrl_handler = &inst->ctrl_handler;
- v4l2_fh_add(&inst->fh);
+ v4l2_fh_add(&inst->fh, filp);
}
-static void iris_v4l2_fh_deinit(struct iris_inst *inst)
+static void iris_v4l2_fh_deinit(struct iris_inst *inst, struct file *filp)
{
- v4l2_fh_del(&inst->fh);
+ v4l2_fh_del(&inst->fh, filp);
inst->fh.ctrl_handler = NULL;
v4l2_fh_exit(&inst->fh);
}
@@ -67,9 +71,9 @@ static void iris_remove_session(struct iris_inst *inst)
mutex_unlock(&core->lock);
}
-static inline struct iris_inst *iris_get_inst(struct file *filp, void *fh)
+static inline struct iris_inst *iris_get_inst(struct file *filp)
{
- return container_of(filp->private_data, struct iris_inst, fh);
+ return container_of(file_to_v4l2_fh(filp), struct iris_inst, fh);
}
static void iris_m2m_device_run(void *priv)
@@ -126,9 +130,19 @@ iris_m2m_queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_
int iris_open(struct file *filp)
{
struct iris_core *core = video_drvdata(filp);
+ struct video_device *vdev;
struct iris_inst *inst;
+ u32 session_type;
int ret;
+ vdev = video_devdata(filp);
+ if (strcmp(vdev->name, "qcom-iris-decoder") == 0)
+ session_type = DECODER;
+ else if (strcmp(vdev->name, "qcom-iris-encoder") == 0)
+ session_type = ENCODER;
+ else
+ return -EINVAL;
+
ret = pm_runtime_resume_and_get(core->dev);
if (ret < 0)
return ret;
@@ -147,6 +161,7 @@ int iris_open(struct file *filp)
return -ENOMEM;
inst->core = core;
+ inst->domain = session_type;
inst->session_id = hash32_ptr(inst);
inst->state = IRIS_INST_DEINIT;
@@ -161,10 +176,12 @@ int iris_open(struct file *filp)
INIT_LIST_HEAD(&inst->buffers[BUF_DPB].list);
INIT_LIST_HEAD(&inst->buffers[BUF_PERSIST].list);
INIT_LIST_HEAD(&inst->buffers[BUF_SCRATCH_1].list);
+ INIT_LIST_HEAD(&inst->buffers[BUF_SCRATCH_2].list);
+ INIT_LIST_HEAD(&inst->buffers[BUF_VPSS].list);
init_completion(&inst->completion);
init_completion(&inst->flush_completion);
- iris_v4l2_fh_init(inst);
+ iris_v4l2_fh_init(inst, filp);
inst->m2m_dev = v4l2_m2m_init(&iris_m2m_ops);
if (IS_ERR_OR_NULL(inst->m2m_dev)) {
@@ -178,14 +195,16 @@ int iris_open(struct file *filp)
goto fail_m2m_release;
}
- ret = iris_vdec_inst_init(inst);
+ if (inst->domain == DECODER)
+ ret = iris_vdec_inst_init(inst);
+ else if (inst->domain == ENCODER)
+ ret = iris_venc_inst_init(inst);
if (ret)
goto fail_m2m_ctx_release;
iris_add_session(inst);
inst->fh.m2m_ctx = inst->m2m_ctx;
- filp->private_data = &inst->fh;
return 0;
@@ -194,7 +213,7 @@ fail_m2m_ctx_release:
fail_m2m_release:
v4l2_m2m_release(inst->m2m_dev);
fail_v4l2_fh_deinit:
- iris_v4l2_fh_deinit(inst);
+ iris_v4l2_fh_deinit(inst, filp);
mutex_destroy(&inst->ctx_q_lock);
mutex_destroy(&inst->lock);
kfree(inst);
@@ -240,26 +259,42 @@ static void iris_check_num_queued_internal_buffers(struct iris_inst *inst, u32 p
for (i = 0; i < internal_buffer_count; i++) {
buffers = &inst->buffers[internal_buf_type[i]];
+ count = 0;
list_for_each_entry_safe(buf, next, &buffers->list, list)
count++;
if (count)
dev_err(inst->core->dev, "%d buffer of type %d not released",
count, internal_buf_type[i]);
}
+
+ if (inst->domain == DECODER)
+ buffers = &inst->buffers[BUF_PERSIST];
+ else
+ buffers = &inst->buffers[BUF_ARP];
+
+ count = 0;
+ list_for_each_entry_safe(buf, next, &buffers->list, list)
+ count++;
+ if (count)
+ dev_err(inst->core->dev, "%d buffer of type %d not released",
+ count, inst->domain == DECODER ? BUF_PERSIST : BUF_ARP);
}
int iris_close(struct file *filp)
{
- struct iris_inst *inst = iris_get_inst(filp, NULL);
+ struct iris_inst *inst = iris_get_inst(filp);
v4l2_ctrl_handler_free(&inst->ctrl_handler);
v4l2_m2m_ctx_release(inst->m2m_ctx);
v4l2_m2m_release(inst->m2m_dev);
mutex_lock(&inst->lock);
- iris_vdec_inst_deinit(inst);
+ if (inst->domain == DECODER)
+ iris_vdec_inst_deinit(inst);
+ else if (inst->domain == ENCODER)
+ iris_venc_inst_deinit(inst);
iris_session_close(inst);
iris_inst_change_state(inst, IRIS_INST_DEINIT);
- iris_v4l2_fh_deinit(inst);
+ iris_v4l2_fh_deinit(inst, filp);
iris_destroy_all_internal_buffers(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
iris_destroy_all_internal_buffers(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
iris_check_num_queued_internal_buffers(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
@@ -269,25 +304,34 @@ int iris_close(struct file *filp)
mutex_destroy(&inst->ctx_q_lock);
mutex_destroy(&inst->lock);
kfree(inst);
- filp->private_data = NULL;
return 0;
}
static int iris_enum_fmt(struct file *filp, void *fh, struct v4l2_fmtdesc *f)
{
- struct iris_inst *inst = iris_get_inst(filp, NULL);
+ struct iris_inst *inst = iris_get_inst(filp);
- return iris_vdec_enum_fmt(inst, f);
+ if (inst->domain == DECODER)
+ return iris_vdec_enum_fmt(inst, f);
+ else if (inst->domain == ENCODER)
+ return iris_venc_enum_fmt(inst, f);
+ else
+ return -EINVAL;
}
static int iris_try_fmt_vid_mplane(struct file *filp, void *fh, struct v4l2_format *f)
{
- struct iris_inst *inst = iris_get_inst(filp, NULL);
- int ret;
+ struct iris_inst *inst = iris_get_inst(filp);
+ int ret = 0;
mutex_lock(&inst->lock);
- ret = iris_vdec_try_fmt(inst, f);
+
+ if (inst->domain == DECODER)
+ ret = iris_vdec_try_fmt(inst, f);
+ else if (inst->domain == ENCODER)
+ ret = iris_venc_try_fmt(inst, f);
+
mutex_unlock(&inst->lock);
return ret;
@@ -295,11 +339,16 @@ static int iris_try_fmt_vid_mplane(struct file *filp, void *fh, struct v4l2_form
static int iris_s_fmt_vid_mplane(struct file *filp, void *fh, struct v4l2_format *f)
{
- struct iris_inst *inst = iris_get_inst(filp, NULL);
- int ret;
+ struct iris_inst *inst = iris_get_inst(filp);
+ int ret = 0;
mutex_lock(&inst->lock);
- ret = iris_vdec_s_fmt(inst, f);
+
+ if (inst->domain == DECODER)
+ ret = iris_vdec_s_fmt(inst, f);
+ else if (inst->domain == ENCODER)
+ ret = iris_venc_s_fmt(inst, f);
+
mutex_unlock(&inst->lock);
return ret;
@@ -307,7 +356,7 @@ static int iris_s_fmt_vid_mplane(struct file *filp, void *fh, struct v4l2_format
static int iris_g_fmt_vid_mplane(struct file *filp, void *fh, struct v4l2_format *f)
{
- struct iris_inst *inst = iris_get_inst(filp, NULL);
+ struct iris_inst *inst = iris_get_inst(filp);
int ret = 0;
mutex_lock(&inst->lock);
@@ -326,15 +375,20 @@ static int iris_g_fmt_vid_mplane(struct file *filp, void *fh, struct v4l2_format
static int iris_enum_framesizes(struct file *filp, void *fh,
struct v4l2_frmsizeenum *fsize)
{
- struct iris_inst *inst = iris_get_inst(filp, NULL);
+ struct iris_inst *inst = iris_get_inst(filp);
struct platform_inst_caps *caps;
+ int ret = 0;
if (fsize->index)
return -EINVAL;
- if (fsize->pixel_format != V4L2_PIX_FMT_H264 &&
- fsize->pixel_format != V4L2_PIX_FMT_NV12)
- return -EINVAL;
+ if (inst->domain == DECODER)
+ ret = iris_vdec_validate_format(inst, fsize->pixel_format);
+ else
+ ret = iris_venc_validate_format(inst, fsize->pixel_format);
+
+ if (ret)
+ return ret;
caps = inst->core->iris_platform_data->inst_caps;
@@ -346,55 +400,174 @@ static int iris_enum_framesizes(struct file *filp, void *fh,
fsize->stepwise.max_height = caps->max_frame_height;
fsize->stepwise.step_height = STEP_HEIGHT;
+ return ret;
+}
+
+static int iris_enum_frameintervals(struct file *filp, void *fh,
+ struct v4l2_frmivalenum *fival)
+
+{
+ struct iris_inst *inst = iris_get_inst(filp);
+ struct iris_core *core = inst->core;
+ struct platform_inst_caps *caps;
+ u32 fps, mbpf;
+ int ret = 0;
+
+ if (inst->domain == DECODER)
+ return -ENOTTY;
+
+ if (fival->index)
+ return -EINVAL;
+
+ ret = iris_venc_validate_format(inst, fival->pixel_format);
+ if (ret)
+ return ret;
+
+ if (!fival->width || !fival->height)
+ return -EINVAL;
+
+ caps = inst->core->iris_platform_data->inst_caps;
+ if (fival->width > caps->max_frame_width ||
+ fival->width < caps->min_frame_width ||
+ fival->height > caps->max_frame_height ||
+ fival->height < caps->min_frame_height)
+ return -EINVAL;
+
+ mbpf = NUM_MBS_PER_FRAME(fival->height, fival->width);
+ fps = DIV_ROUND_UP(core->iris_platform_data->max_core_mbps, mbpf);
+
+ fival->type = V4L2_FRMIVAL_TYPE_STEPWISE;
+ fival->stepwise.min.numerator = 1;
+ fival->stepwise.min.denominator =
+ min_t(u32, fps, MAXIMUM_FPS);
+ fival->stepwise.max.numerator = 1;
+ fival->stepwise.max.denominator = 1;
+ fival->stepwise.step.numerator = 1;
+ fival->stepwise.step.denominator = MAXIMUM_FPS;
+
return 0;
}
static int iris_querycap(struct file *filp, void *fh, struct v4l2_capability *cap)
{
+ struct iris_inst *inst = iris_get_inst(filp);
+
strscpy(cap->driver, IRIS_DRV_NAME, sizeof(cap->driver));
- strscpy(cap->card, "Iris Decoder", sizeof(cap->card));
+
+ if (inst->domain == DECODER)
+ strscpy(cap->card, "Iris Decoder", sizeof(cap->card));
+ else
+ strscpy(cap->card, "Iris Encoder", sizeof(cap->card));
return 0;
}
static int iris_g_selection(struct file *filp, void *fh, struct v4l2_selection *s)
{
- struct iris_inst *inst = iris_get_inst(filp, NULL);
+ struct iris_inst *inst = iris_get_inst(filp);
- if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
+ inst->domain == DECODER)
return -EINVAL;
- switch (s->target) {
- case V4L2_SEL_TGT_CROP_BOUNDS:
- case V4L2_SEL_TGT_CROP_DEFAULT:
- case V4L2_SEL_TGT_CROP:
- case V4L2_SEL_TGT_COMPOSE_BOUNDS:
- case V4L2_SEL_TGT_COMPOSE_PADDED:
- case V4L2_SEL_TGT_COMPOSE_DEFAULT:
- case V4L2_SEL_TGT_COMPOSE:
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
+ inst->domain == ENCODER)
+ return -EINVAL;
+
+ if (inst->domain == DECODER) {
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ case V4L2_SEL_TGT_COMPOSE_PADDED:
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ case V4L2_SEL_TGT_COMPOSE:
+ s->r.left = inst->crop.left;
+ s->r.top = inst->crop.top;
+ s->r.width = inst->crop.width;
+ s->r.height = inst->crop.height;
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else if (inst->domain == ENCODER) {
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ s->r.width = inst->fmt_src->fmt.pix_mp.width;
+ s->r.height = inst->fmt_src->fmt.pix_mp.height;
+ break;
+ case V4L2_SEL_TGT_CROP:
+ s->r.width = inst->crop.width;
+ s->r.height = inst->crop.height;
+ break;
+ default:
+ return -EINVAL;
+ }
s->r.left = inst->crop.left;
s->r.top = inst->crop.top;
- s->r.width = inst->crop.width;
- s->r.height = inst->crop.height;
- break;
- default:
- return -EINVAL;
}
return 0;
}
+static int iris_s_selection(struct file *filp, void *fh, struct v4l2_selection *s)
+{
+ struct iris_inst *inst = iris_get_inst(filp);
+
+ if (inst->domain == DECODER)
+ return -EINVAL;
+ else if (inst->domain == ENCODER)
+ return iris_venc_s_selection(inst, s);
+
+ return -EINVAL;
+}
+
static int iris_subscribe_event(struct v4l2_fh *fh, const struct v4l2_event_subscription *sub)
{
struct iris_inst *inst = container_of(fh, struct iris_inst, fh);
- return iris_vdec_subscribe_event(inst, sub);
+ if (inst->domain == DECODER)
+ return iris_vdec_subscribe_event(inst, sub);
+ else if (inst->domain == ENCODER)
+ return iris_venc_subscribe_event(inst, sub);
+
+ return -EINVAL;
+}
+
+static int iris_s_parm(struct file *filp, void *fh, struct v4l2_streamparm *a)
+{
+ struct iris_inst *inst = iris_get_inst(filp);
+
+ if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
+ a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return -EINVAL;
+
+ if (inst->domain == ENCODER)
+ return iris_venc_s_param(inst, a);
+ else
+ return -EINVAL;
+}
+
+static int iris_g_parm(struct file *filp, void *fh, struct v4l2_streamparm *a)
+{
+ struct iris_inst *inst = iris_get_inst(filp);
+
+ if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
+ a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return -EINVAL;
+
+ if (inst->domain == ENCODER)
+ return iris_venc_g_param(inst, a);
+ else
+ return -EINVAL;
}
static int iris_dec_cmd(struct file *filp, void *fh,
struct v4l2_decoder_cmd *dec)
{
- struct iris_inst *inst = iris_get_inst(filp, NULL);
+ struct iris_inst *inst = iris_get_inst(filp);
int ret = 0;
mutex_lock(&inst->lock);
@@ -424,6 +597,39 @@ unlock:
return ret;
}
+static int iris_enc_cmd(struct file *filp, void *fh,
+ struct v4l2_encoder_cmd *enc)
+{
+ struct iris_inst *inst = iris_get_inst(filp);
+ int ret = 0;
+
+ mutex_lock(&inst->lock);
+
+ ret = v4l2_m2m_ioctl_encoder_cmd(filp, fh, enc);
+ if (ret)
+ goto unlock;
+
+ if (inst->state == IRIS_INST_DEINIT)
+ goto unlock;
+
+ if (!iris_allow_cmd(inst, enc->cmd)) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ if (enc->cmd == V4L2_ENC_CMD_START)
+ ret = iris_venc_start_cmd(inst);
+ else if (enc->cmd == V4L2_ENC_CMD_STOP)
+ ret = iris_venc_stop_cmd(inst);
+ else
+ ret = -EINVAL;
+
+unlock:
+ mutex_unlock(&inst->lock);
+
+ return ret;
+}
+
static struct v4l2_file_operations iris_v4l2_file_ops = {
.owner = THIS_MODULE,
.open = iris_open,
@@ -443,7 +649,7 @@ static const struct vb2_ops iris_vb2_ops = {
.buf_queue = iris_vb2_buf_queue,
};
-static const struct v4l2_ioctl_ops iris_v4l2_ioctl_ops = {
+static const struct v4l2_ioctl_ops iris_v4l2_ioctl_ops_dec = {
.vidioc_enum_fmt_vid_cap = iris_enum_fmt,
.vidioc_enum_fmt_vid_out = iris_enum_fmt,
.vidioc_try_fmt_vid_cap_mplane = iris_try_fmt_vid_mplane,
@@ -471,9 +677,42 @@ static const struct v4l2_ioctl_ops iris_v4l2_ioctl_ops = {
.vidioc_decoder_cmd = iris_dec_cmd,
};
+static const struct v4l2_ioctl_ops iris_v4l2_ioctl_ops_enc = {
+ .vidioc_enum_fmt_vid_cap = iris_enum_fmt,
+ .vidioc_enum_fmt_vid_out = iris_enum_fmt,
+ .vidioc_try_fmt_vid_cap_mplane = iris_try_fmt_vid_mplane,
+ .vidioc_try_fmt_vid_out_mplane = iris_try_fmt_vid_mplane,
+ .vidioc_s_fmt_vid_cap_mplane = iris_s_fmt_vid_mplane,
+ .vidioc_s_fmt_vid_out_mplane = iris_s_fmt_vid_mplane,
+ .vidioc_g_fmt_vid_cap_mplane = iris_g_fmt_vid_mplane,
+ .vidioc_g_fmt_vid_out_mplane = iris_g_fmt_vid_mplane,
+ .vidioc_enum_framesizes = iris_enum_framesizes,
+ .vidioc_enum_frameintervals = iris_enum_frameintervals,
+ .vidioc_querycap = iris_querycap,
+ .vidioc_subscribe_event = iris_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+ .vidioc_g_selection = iris_g_selection,
+ .vidioc_s_selection = iris_s_selection,
+ .vidioc_s_parm = iris_s_parm,
+ .vidioc_g_parm = iris_g_parm,
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_remove_bufs = v4l2_m2m_ioctl_remove_bufs,
+ .vidioc_try_encoder_cmd = v4l2_m2m_ioctl_try_encoder_cmd,
+ .vidioc_encoder_cmd = iris_enc_cmd,
+};
+
void iris_init_ops(struct iris_core *core)
{
core->iris_v4l2_file_ops = &iris_v4l2_file_ops;
core->iris_vb2_ops = &iris_vb2_ops;
- core->iris_v4l2_ioctl_ops = &iris_v4l2_ioctl_ops;
+ core->iris_v4l2_ioctl_ops_dec = &iris_v4l2_ioctl_ops_dec;
+ core->iris_v4l2_ioctl_ops_enc = &iris_v4l2_ioctl_ops_enc;
}
diff --git a/drivers/media/platform/qcom/iris/iris_vpu2.c b/drivers/media/platform/qcom/iris/iris_vpu2.c
index 7cf1bfc352d3..de7d142316d2 100644
--- a/drivers/media/platform/qcom/iris/iris_vpu2.c
+++ b/drivers/media/platform/qcom/iris/iris_vpu2.c
@@ -34,6 +34,8 @@ static u64 iris_vpu2_calc_freq(struct iris_inst *inst, size_t data_size)
const struct vpu_ops iris_vpu2_ops = {
.power_off_hw = iris_vpu_power_off_hw,
+ .power_on_hw = iris_vpu_power_on_hw,
.power_off_controller = iris_vpu_power_off_controller,
+ .power_on_controller = iris_vpu_power_on_controller,
.calc_freq = iris_vpu2_calc_freq,
};
diff --git a/drivers/media/platform/qcom/iris/iris_vpu3x.c b/drivers/media/platform/qcom/iris/iris_vpu3x.c
index 9b7c9a1495ee..339776a0b467 100644
--- a/drivers/media/platform/qcom/iris/iris_vpu3x.c
+++ b/drivers/media/platform/qcom/iris/iris_vpu3x.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2025 Linaro Ltd
*/
#include <linux/iopoll.h>
@@ -19,8 +20,13 @@
#define WRAPPER_IRIS_CPU_NOC_LPI_CONTROL (WRAPPER_BASE_OFFS + 0x5C)
#define REQ_POWER_DOWN_PREP BIT(0)
#define WRAPPER_IRIS_CPU_NOC_LPI_STATUS (WRAPPER_BASE_OFFS + 0x60)
+#define NOC_LPI_STATUS_DONE BIT(0) /* Indicates the NOC handshake is complete */
+#define NOC_LPI_STATUS_DENY BIT(1) /* Indicates the NOC handshake is denied */
+#define NOC_LPI_STATUS_ACTIVE BIT(2) /* Indicates the NOC is active */
#define WRAPPER_CORE_CLOCK_CONFIG (WRAPPER_BASE_OFFS + 0x88)
#define CORE_CLK_RUN 0x0
+/* VPU v3.5 */
+#define WRAPPER_IRIS_VCODEC_VPU_WRAPPER_SPARE_0 (WRAPPER_BASE_OFFS + 0x78)
#define WRAPPER_TZ_CTL_AXI_CLOCK_CONFIG (WRAPPER_TZ_BASE_OFFS + 0x14)
#define CTL_AXI_CLK_HALT BIT(0)
@@ -52,6 +58,8 @@
#define AON_WRAPPER_MVP_NOC_CORE_CLK_CONTROL (AON_BASE_OFFS + 0x20)
#define NOC_HALT BIT(0)
#define AON_WRAPPER_SPARE (AON_BASE_OFFS + 0x28)
+#define AON_WRAPPER_MVP_VIDEO_CTL_NOC_LPI_CONTROL (AON_BASE_OFFS + 0x2C)
+#define AON_WRAPPER_MVP_VIDEO_CTL_NOC_LPI_STATUS (AON_BASE_OFFS + 0x30)
static bool iris_vpu3x_hw_power_collapsed(struct iris_core *core)
{
@@ -109,7 +117,9 @@ disable_power:
static void iris_vpu33_power_off_hardware(struct iris_core *core)
{
+ bool handshake_done = false, handshake_busy = false;
u32 reg_val = 0, value, i;
+ u32 count = 0;
int ret;
if (iris_vpu3x_hw_power_collapsed(core))
@@ -128,13 +138,36 @@ static void iris_vpu33_power_off_hardware(struct iris_core *core)
goto disable_power;
}
+ /* Retry up to 1000 times as recommended by hardware documentation */
+ do {
+ /* set MNoC to low power */
+ writel(REQ_POWER_DOWN_PREP, core->reg_base + AON_WRAPPER_MVP_NOC_LPI_CONTROL);
+
+ udelay(15);
+
+ value = readl(core->reg_base + AON_WRAPPER_MVP_NOC_LPI_STATUS);
+
+ handshake_done = value & NOC_LPI_STATUS_DONE;
+ handshake_busy = value & (NOC_LPI_STATUS_DENY | NOC_LPI_STATUS_ACTIVE);
+
+ if (handshake_done || !handshake_busy)
+ break;
+
+ writel(0, core->reg_base + AON_WRAPPER_MVP_NOC_LPI_CONTROL);
+
+ udelay(15);
+
+ } while (++count < 1000);
+
+ if (!handshake_done && handshake_busy)
+ dev_err(core->dev, "LPI handshake timeout\n");
+
ret = readl_poll_timeout(core->reg_base + AON_WRAPPER_MVP_NOC_LPI_STATUS,
reg_val, reg_val & BIT(0), 200, 2000);
if (ret)
goto disable_power;
- /* set MNoC to low power, set PD_NOC_QREQ (bit 0) */
- writel(BIT(0), core->reg_base + AON_WRAPPER_MVP_NOC_LPI_CONTROL);
+ writel(0, core->reg_base + AON_WRAPPER_MVP_NOC_LPI_CONTROL);
writel(CORE_BRIDGE_SW_RESET | CORE_BRIDGE_HW_RESET_DISABLE,
core->reg_base + CPU_CS_AHB_BRIDGE_SYNC_RESET);
@@ -225,6 +258,158 @@ disable_power:
return 0;
}
+static int iris_vpu35_power_on_hw(struct iris_core *core)
+{
+ int ret;
+
+ ret = iris_enable_power_domains(core, core->pmdomain_tbl->pd_devs[IRIS_HW_POWER_DOMAIN]);
+ if (ret)
+ return ret;
+
+ ret = iris_prepare_enable_clock(core, IRIS_AXI_CLK);
+ if (ret)
+ goto err_disable_power;
+
+ ret = iris_prepare_enable_clock(core, IRIS_HW_FREERUN_CLK);
+ if (ret)
+ goto err_disable_axi_clk;
+
+ ret = iris_prepare_enable_clock(core, IRIS_HW_CLK);
+ if (ret)
+ goto err_disable_hw_free_clk;
+
+ ret = dev_pm_genpd_set_hwmode(core->pmdomain_tbl->pd_devs[IRIS_HW_POWER_DOMAIN], true);
+ if (ret)
+ goto err_disable_hw_clk;
+
+ return 0;
+
+err_disable_hw_clk:
+ iris_disable_unprepare_clock(core, IRIS_HW_CLK);
+err_disable_hw_free_clk:
+ iris_disable_unprepare_clock(core, IRIS_HW_FREERUN_CLK);
+err_disable_axi_clk:
+ iris_disable_unprepare_clock(core, IRIS_AXI_CLK);
+err_disable_power:
+ iris_disable_power_domains(core, core->pmdomain_tbl->pd_devs[IRIS_HW_POWER_DOMAIN]);
+
+ return ret;
+}
+
+static void iris_vpu35_power_off_hw(struct iris_core *core)
+{
+ iris_vpu33_power_off_hardware(core);
+
+ iris_disable_unprepare_clock(core, IRIS_HW_FREERUN_CLK);
+ iris_disable_unprepare_clock(core, IRIS_AXI_CLK);
+}
+
+static int iris_vpu35_power_off_controller(struct iris_core *core)
+{
+ u32 clk_rst_tbl_size = core->iris_platform_data->clk_rst_tbl_size;
+ unsigned int count = 0;
+ u32 val = 0;
+ bool handshake_done, handshake_busy;
+ int ret;
+
+ writel(MSK_SIGNAL_FROM_TENSILICA | MSK_CORE_POWER_ON, core->reg_base + CPU_CS_X2RPMH);
+
+ writel(REQ_POWER_DOWN_PREP, core->reg_base + WRAPPER_IRIS_CPU_NOC_LPI_CONTROL);
+
+ ret = readl_poll_timeout(core->reg_base + WRAPPER_IRIS_CPU_NOC_LPI_STATUS,
+ val, val & BIT(0), 200, 2000);
+ if (ret)
+ goto disable_power;
+
+ writel(0, core->reg_base + WRAPPER_IRIS_CPU_NOC_LPI_CONTROL);
+
+ /* Retry up to 1000 times as recommended by hardware documentation */
+ do {
+ /* set MNoC to low power */
+ writel(REQ_POWER_DOWN_PREP, core->reg_base + AON_WRAPPER_MVP_VIDEO_CTL_NOC_LPI_CONTROL);
+
+ udelay(15);
+
+ val = readl(core->reg_base + AON_WRAPPER_MVP_VIDEO_CTL_NOC_LPI_STATUS);
+
+ handshake_done = val & NOC_LPI_STATUS_DONE;
+ handshake_busy = val & (NOC_LPI_STATUS_DENY | NOC_LPI_STATUS_ACTIVE);
+
+ if (handshake_done || !handshake_busy)
+ break;
+
+ writel(0, core->reg_base + AON_WRAPPER_MVP_VIDEO_CTL_NOC_LPI_CONTROL);
+
+ udelay(15);
+
+ } while (++count < 1000);
+
+ if (!handshake_done && handshake_busy)
+ dev_err(core->dev, "LPI handshake timeout\n");
+
+ ret = readl_poll_timeout(core->reg_base + AON_WRAPPER_MVP_VIDEO_CTL_NOC_LPI_STATUS,
+ val, val & BIT(0), 200, 2000);
+ if (ret)
+ goto disable_power;
+
+ writel(0, core->reg_base + AON_WRAPPER_MVP_VIDEO_CTL_NOC_LPI_CONTROL);
+
+ writel(0, core->reg_base + WRAPPER_DEBUG_BRIDGE_LPI_CONTROL);
+
+ ret = readl_poll_timeout(core->reg_base + WRAPPER_DEBUG_BRIDGE_LPI_STATUS,
+ val, val == 0, 200, 2000);
+ if (ret)
+ goto disable_power;
+
+disable_power:
+ iris_disable_unprepare_clock(core, IRIS_CTRL_CLK);
+ iris_disable_unprepare_clock(core, IRIS_CTRL_FREERUN_CLK);
+ iris_disable_unprepare_clock(core, IRIS_AXI1_CLK);
+
+ iris_disable_power_domains(core, core->pmdomain_tbl->pd_devs[IRIS_CTRL_POWER_DOMAIN]);
+
+ reset_control_bulk_reset(clk_rst_tbl_size, core->resets);
+
+ return 0;
+}
+
+static int iris_vpu35_power_on_controller(struct iris_core *core)
+{
+ int ret;
+
+ ret = iris_enable_power_domains(core, core->pmdomain_tbl->pd_devs[IRIS_CTRL_POWER_DOMAIN]);
+ if (ret)
+ return ret;
+
+ ret = iris_prepare_enable_clock(core, IRIS_AXI1_CLK);
+ if (ret)
+ goto err_disable_power;
+
+ ret = iris_prepare_enable_clock(core, IRIS_CTRL_FREERUN_CLK);
+ if (ret)
+ goto err_disable_axi1_clk;
+
+ ret = iris_prepare_enable_clock(core, IRIS_CTRL_CLK);
+ if (ret)
+ goto err_disable_ctrl_free_clk;
+
+ return 0;
+
+err_disable_ctrl_free_clk:
+ iris_disable_unprepare_clock(core, IRIS_CTRL_FREERUN_CLK);
+err_disable_axi1_clk:
+ iris_disable_unprepare_clock(core, IRIS_AXI1_CLK);
+err_disable_power:
+ iris_disable_power_domains(core, core->pmdomain_tbl->pd_devs[IRIS_CTRL_POWER_DOMAIN]);
+
+ return ret;
+}
+
+static void iris_vpu35_program_bootup_registers(struct iris_core *core)
+{
+ writel(0x1, core->reg_base + WRAPPER_IRIS_VCODEC_VPU_WRAPPER_SPARE_0);
+}
+
static u64 iris_vpu3x_calculate_frequency(struct iris_inst *inst, size_t data_size)
{
struct platform_inst_caps *caps = inst->core->iris_platform_data->inst_caps;
@@ -264,12 +449,25 @@ static u64 iris_vpu3x_calculate_frequency(struct iris_inst *inst, size_t data_si
const struct vpu_ops iris_vpu3_ops = {
.power_off_hw = iris_vpu3_power_off_hardware,
+ .power_on_hw = iris_vpu_power_on_hw,
.power_off_controller = iris_vpu_power_off_controller,
+ .power_on_controller = iris_vpu_power_on_controller,
.calc_freq = iris_vpu3x_calculate_frequency,
};
const struct vpu_ops iris_vpu33_ops = {
.power_off_hw = iris_vpu33_power_off_hardware,
+ .power_on_hw = iris_vpu_power_on_hw,
.power_off_controller = iris_vpu33_power_off_controller,
+ .power_on_controller = iris_vpu_power_on_controller,
+ .calc_freq = iris_vpu3x_calculate_frequency,
+};
+
+const struct vpu_ops iris_vpu35_ops = {
+ .power_off_hw = iris_vpu35_power_off_hw,
+ .power_on_hw = iris_vpu35_power_on_hw,
+ .power_off_controller = iris_vpu35_power_off_controller,
+ .power_on_controller = iris_vpu35_power_on_controller,
+ .program_bootup_registers = iris_vpu35_program_bootup_registers,
.calc_freq = iris_vpu3x_calculate_frequency,
};
diff --git a/drivers/media/platform/qcom/iris/iris_vpu_buffer.c b/drivers/media/platform/qcom/iris/iris_vpu_buffer.c
index f92fd39fe310..4463be05ce16 100644
--- a/drivers/media/platform/qcom/iris/iris_vpu_buffer.c
+++ b/drivers/media/platform/qcom/iris/iris_vpu_buffer.c
@@ -5,6 +5,14 @@
#include "iris_instance.h"
#include "iris_vpu_buffer.h"
+#include "iris_hfi_gen1_defines.h"
+#include "iris_hfi_gen2_defines.h"
+
+#define HFI_MAX_COL_FRAME 6
+
+#ifndef SYSTEM_LAL_TILE10
+#define SYSTEM_LAL_TILE10 192
+#endif
static u32 size_h264d_hw_bin_buffer(u32 frame_width, u32 frame_height, u32 num_vpp_pipes)
{
@@ -548,6 +556,858 @@ static u32 iris_vpu_dec_scratch1_size(struct iris_inst *inst)
iris_vpu_dec_line_size(inst);
}
+static inline u32 size_bin_bitstream_enc(u32 width, u32 height,
+ u32 rc_type)
+{
+ u32 aligned_height = ALIGN(height, 32);
+ u32 aligned_width = ALIGN(width, 32);
+ u32 frame_size = width * height * 3;
+ u32 mbs_per_frame;
+
+ /*
+ * Encoder output size calculation: 32 Align width/height
+ * For resolution < 720p : YUVsize * 4
+ * For resolution > 720p & <= 4K : YUVsize / 2
+ * For resolution > 4k : YUVsize / 4
+ * Initially frame_size = YUVsize * 2;
+ */
+
+ mbs_per_frame = (ALIGN(aligned_height, 16) * ALIGN(aligned_width, 16)) / 256;
+
+ if (mbs_per_frame < NUM_MBS_720P)
+ frame_size = frame_size << 1;
+ else if (mbs_per_frame <= NUM_MBS_4K)
+ frame_size = frame_size >> 2;
+ else
+ frame_size = frame_size >> 3;
+
+ if (rc_type == HFI_RATE_CONTROL_OFF || rc_type == HFI_RATE_CONTROL_CQ ||
+ rc_type == HFI_RC_OFF || rc_type == HFI_RC_CQ)
+ frame_size = frame_size << 1;
+
+ /*
+ * In case of opaque color format bitdepth will be known
+ * with first ETB, buffers allocated already with 8 bit
+ * won't be sufficient for 10 bit
+ * calculate size considering 10-bit by default
+ * For 10-bit cases size = size * 1.25
+ */
+ frame_size *= 5;
+ frame_size /= 4;
+
+ return ALIGN(frame_size, SZ_4K);
+}
+
+static inline u32 hfi_buffer_bin_enc(u32 width, u32 height,
+ u32 work_mode, u32 lcu_size,
+ u32 num_vpp_pipes, u32 rc_type)
+{
+ u32 sao_bin_buffer_size, padded_bin_size, bitstream_size;
+ u32 total_bitbin_buffers, size_single_pipe, bitbin_size;
+ u32 aligned_height = ALIGN(height, lcu_size);
+ u32 aligned_width = ALIGN(width, lcu_size);
+
+ bitstream_size = size_bin_bitstream_enc(width, height, rc_type);
+ bitstream_size = ALIGN(bitstream_size, 256);
+
+ if (work_mode == STAGE_2) {
+ total_bitbin_buffers = 3;
+ bitbin_size = bitstream_size * 17 / 10;
+ bitbin_size = ALIGN(bitbin_size, 256);
+ } else {
+ total_bitbin_buffers = 1;
+ bitstream_size = aligned_width * aligned_height * 3;
+ bitbin_size = ALIGN(bitstream_size, 256);
+ }
+
+ if (num_vpp_pipes > 2)
+ size_single_pipe = bitbin_size / 2;
+ else
+ size_single_pipe = bitbin_size;
+
+ size_single_pipe = ALIGN(size_single_pipe, 256);
+ sao_bin_buffer_size = (64 * (((width + 32) * (height + 32)) >> 10)) + 384;
+ padded_bin_size = ALIGN(size_single_pipe, 256);
+ size_single_pipe = sao_bin_buffer_size + padded_bin_size;
+ size_single_pipe = ALIGN(size_single_pipe, 256);
+ bitbin_size = size_single_pipe * num_vpp_pipes;
+
+ return ALIGN(bitbin_size, 256) * total_bitbin_buffers + 512;
+}
+
+static u32 iris_vpu_enc_bin_size(struct iris_inst *inst)
+{
+ u32 num_vpp_pipes = inst->core->iris_platform_data->num_vpp_pipe;
+ u32 stage = inst->fw_caps[STAGE].value;
+ struct v4l2_format *f = inst->fmt_dst;
+ u32 height = f->fmt.pix_mp.height;
+ u32 width = f->fmt.pix_mp.width;
+ u32 lcu_size;
+
+ if (inst->codec == V4L2_PIX_FMT_HEVC)
+ lcu_size = 32;
+ else
+ lcu_size = 16;
+
+ return hfi_buffer_bin_enc(width, height, stage, lcu_size,
+ num_vpp_pipes, inst->hfi_rc_type);
+}
+
+static inline
+u32 hfi_buffer_comv_enc(u32 frame_width, u32 frame_height, u32 lcu_size,
+ u32 num_recon, u32 standard)
+{
+ u32 height_in_lcus = ((frame_height) + (lcu_size) - 1) / (lcu_size);
+ u32 width_in_lcus = ((frame_width) + (lcu_size) - 1) / (lcu_size);
+ u32 num_lcu_in_frame = width_in_lcus * height_in_lcus;
+ u32 mb_height = ((frame_height) + 15) >> 4;
+ u32 mb_width = ((frame_width) + 15) >> 4;
+ u32 size_colloc_mv, size_colloc_rc;
+
+ size_colloc_mv = (standard == HFI_CODEC_ENCODE_HEVC) ?
+ (16 * ((num_lcu_in_frame << 2) + 32)) :
+ (3 * 16 * (width_in_lcus * height_in_lcus + 32));
+ size_colloc_mv = ALIGN(size_colloc_mv, 256) * num_recon;
+ size_colloc_rc = (((mb_width + 7) >> 3) * 16 * 2 * mb_height);
+ size_colloc_rc = ALIGN(size_colloc_rc, 256) * HFI_MAX_COL_FRAME;
+
+ return size_colloc_mv + size_colloc_rc;
+}
+
+static u32 iris_vpu_enc_comv_size(struct iris_inst *inst)
+{
+ struct v4l2_format *f = inst->fmt_dst;
+ u32 height = f->fmt.pix_mp.height;
+ u32 width = f->fmt.pix_mp.width;
+ u32 num_recon = 1;
+ u32 lcu_size = 16;
+
+ if (inst->codec == V4L2_PIX_FMT_HEVC) {
+ lcu_size = 32;
+ return hfi_buffer_comv_enc(width, height, lcu_size,
+ num_recon + 1, HFI_CODEC_ENCODE_HEVC);
+ }
+
+ return hfi_buffer_comv_enc(width, height, lcu_size,
+ num_recon + 1, HFI_CODEC_ENCODE_AVC);
+}
+
+static inline
+u32 size_frame_rc_buf_size(u32 standard, u32 frame_height_coded,
+ u32 num_vpp_pipes_enc)
+{
+ u32 size = 0;
+
+ size = (standard == HFI_CODEC_ENCODE_HEVC) ?
+ (256 + 16 * (14 + ((((frame_height_coded) >> 5) + 7) >> 3))) :
+ (256 + 16 * (14 + ((((frame_height_coded) >> 4) + 7) >> 3)));
+ size *= 11;
+
+ if (num_vpp_pipes_enc > 1)
+ size = ALIGN(size, 256) * num_vpp_pipes_enc;
+
+ return ALIGN(size, 512) * HFI_MAX_COL_FRAME;
+}
+
+static inline
+u32 size_enc_slice_info_buf(u32 num_lcu_in_frame)
+{
+ return ALIGN((256 + (num_lcu_in_frame << 4)), 256);
+}
+
+static inline u32 enc_bitcnt_buf_size(u32 num_lcu_in_frame)
+{
+ return ALIGN((256 + (4 * (num_lcu_in_frame))), 256);
+}
+
+static inline u32 enc_bitmap_buf_size(u32 num_lcu_in_frame)
+{
+ return ALIGN((256 + ((num_lcu_in_frame) >> 3)), 256);
+}
+
+static inline u32 size_override_buf(u32 num_lcumb)
+{
+ return ALIGN(((16 * (((num_lcumb) + 7) >> 3))), 256) * 2;
+}
+
+static inline u32 size_ir_buf(u32 num_lcu_in_frame)
+{
+ return ALIGN((((((num_lcu_in_frame) << 1) + 7) & (~7)) * 3), 256);
+}
+
+static inline
+u32 size_linebuff_data(bool is_ten_bit, u32 frame_width_coded)
+{
+ return is_ten_bit ?
+ (((((10 * (frame_width_coded) + 1024) + (256 - 1)) &
+ (~(256 - 1))) * 1) +
+ (((((10 * (frame_width_coded) + 1024) >> 1) + (256 - 1)) &
+ (~(256 - 1))) * 2)) :
+ (((((8 * (frame_width_coded) + 1024) + (256 - 1)) &
+ (~(256 - 1))) * 1) +
+ (((((8 * (frame_width_coded) + 1024) >> 1) + (256 - 1)) &
+ (~(256 - 1))) * 2));
+}
+
+static inline
+u32 size_left_linebuff_ctrl(u32 standard, u32 frame_height_coded,
+ u32 num_vpp_pipes_enc)
+{
+ u32 size = 0;
+
+ size = standard == HFI_CODEC_ENCODE_HEVC ?
+ (((frame_height_coded) +
+ (32)) / 32 * 4 * 16) :
+ (((frame_height_coded) + 15) / 16 * 5 * 16);
+
+ if ((num_vpp_pipes_enc) > 1) {
+ size += 512;
+ size = ALIGN(size, 512) *
+ num_vpp_pipes_enc;
+ }
+
+ return ALIGN(size, 256);
+}
+
+static inline
+u32 size_left_linebuff_recon_pix(bool is_ten_bit, u32 frame_height_coded,
+ u32 num_vpp_pipes_enc)
+{
+ return (((is_ten_bit + 1) * 2 * (frame_height_coded) + 256) +
+ (256 << (num_vpp_pipes_enc - 1)) - 1) &
+ (~((256 << (num_vpp_pipes_enc - 1)) - 1)) * 1;
+}
+
+static inline
+u32 size_top_linebuff_ctrl_fe(u32 frame_width_coded, u32 standard)
+{
+ return standard == HFI_CODEC_ENCODE_HEVC ?
+ ALIGN((64 * ((frame_width_coded) >> 5)), 256) :
+ ALIGN((256 + 16 * ((frame_width_coded) >> 4)), 256);
+}
+
+static inline
+u32 size_left_linebuff_ctrl_fe(u32 frame_height_coded, u32 num_vpp_pipes_enc)
+{
+ return (((256 + 64 * ((frame_height_coded) >> 4)) +
+ (256 << (num_vpp_pipes_enc - 1)) - 1) &
+ (~((256 << (num_vpp_pipes_enc - 1)) - 1)) * 1) *
+ num_vpp_pipes_enc;
+}
+
+static inline
+u32 size_left_linebuff_metadata_recon_y(u32 frame_height_coded,
+ bool is_ten_bit,
+ u32 num_vpp_pipes_enc)
+{
+ return ALIGN(((256 + 64 * ((frame_height_coded) /
+ (8 * (is_ten_bit ? 4 : 8))))), 256) * num_vpp_pipes_enc;
+}
+
+static inline
+u32 size_left_linebuff_metadata_recon_uv(u32 frame_height_coded,
+ bool is_ten_bit,
+ u32 num_vpp_pipes_enc)
+{
+ return ALIGN(((256 + 64 * ((frame_height_coded) /
+ (4 * (is_ten_bit ? 4 : 8))))), 256) * num_vpp_pipes_enc;
+}
+
+static inline
+u32 size_linebuff_recon_pix(bool is_ten_bit, u32 frame_width_coded)
+{
+ return ALIGN(((is_ten_bit ? 3 : 2) * (frame_width_coded)), 256);
+}
+
+static inline
+u32 size_line_buf_ctrl(u32 frame_width_coded)
+{
+ return ALIGN(frame_width_coded, 256);
+}
+
+static inline
+u32 size_line_buf_ctrl_id2(u32 frame_width_coded)
+{
+ return ALIGN(frame_width_coded, 256);
+}
+
+static inline u32 size_line_buf_sde(u32 frame_width_coded)
+{
+ return ALIGN((256 + (16 * ((frame_width_coded) >> 4))), 256);
+}
+
+static inline
+u32 size_vpss_line_buf(u32 num_vpp_pipes_enc, u32 frame_height_coded,
+ u32 frame_width_coded)
+{
+ return ALIGN(((((((8192) >> 2) << 5) * (num_vpp_pipes_enc)) + 64) +
+ (((((max_t(u32, (frame_width_coded),
+ (frame_height_coded)) + 3) >> 2) << 5) + 256) * 16)), 256);
+}
+static inline
+u32 size_vpss_line_buf_vpu33(u32 num_vpp_pipes_enc, u32 frame_height_coded,
+ u32 frame_width_coded)
+{
+ u32 vpss_4tap_top, vpss_4tap_left, vpss_div2_top;
+ u32 vpss_div2_left, vpss_top_lb, vpss_left_lb;
+ u32 size_left, size_top;
+ u32 max_width_height;
+
+ max_width_height = max_t(u32, frame_width_coded, frame_height_coded);
+ vpss_4tap_top = ((((max_width_height * 2) + 3) >> 2) << 4) + 256;
+ vpss_4tap_left = (((8192 + 3) >> 2) << 5) + 64;
+ vpss_div2_top = (((max_width_height + 3) >> 2) << 4) + 256;
+ vpss_div2_left = ((((max_width_height * 2) + 3) >> 2) << 5) + 64;
+ vpss_top_lb = (frame_width_coded + 1) << 3;
+ vpss_left_lb = (frame_height_coded << 3) * num_vpp_pipes_enc;
+ size_left = (vpss_4tap_left + vpss_div2_left) * 2 * num_vpp_pipes_enc;
+ size_top = (vpss_4tap_top + vpss_div2_top) * 2;
+
+ return ALIGN(size_left + size_top + vpss_top_lb + vpss_left_lb, DMA_ALIGNMENT);
+}
+
+static inline
+u32 size_top_line_buf_first_stg_sao(u32 frame_width_coded)
+{
+ return ALIGN((16 * ((frame_width_coded) >> 5)), 256);
+}
+
+static inline
+u32 size_enc_ref_buffer(u32 frame_width, u32 frame_height)
+{
+ u32 u_chroma_buffer_height = ALIGN(frame_height >> 1, 32);
+ u32 u_buffer_height = ALIGN(frame_height, 32);
+ u32 u_buffer_width = ALIGN(frame_width, 32);
+
+ return (u_buffer_height + u_chroma_buffer_height) * u_buffer_width;
+}
+
+static inline
+u32 size_enc_ten_bit_ref_buffer(u32 frame_width, u32 frame_height)
+{
+ u32 ref_luma_stride_in_bytes = ((frame_width + SYSTEM_LAL_TILE10 - 1) / SYSTEM_LAL_TILE10) *
+ SYSTEM_LAL_TILE10;
+ u32 ref_buf_height = (frame_height + (32 - 1)) & (~(32 - 1));
+ u32 u_ref_stride, luma_size;
+ u32 ref_chrm_height_in_bytes;
+ u32 chroma_size;
+
+ u_ref_stride = 4 * (ref_luma_stride_in_bytes / 3);
+ u_ref_stride = (u_ref_stride + (128 - 1)) & (~(128 - 1));
+ luma_size = ref_buf_height * u_ref_stride;
+ luma_size = (luma_size + (4096 - 1)) & (~(4096 - 1));
+
+ ref_chrm_height_in_bytes = (((frame_height + 1) >> 1) + (32 - 1)) & (~(32 - 1));
+ chroma_size = u_ref_stride * ref_chrm_height_in_bytes;
+ chroma_size = (chroma_size + (4096 - 1)) & (~(4096 - 1));
+
+ return luma_size + chroma_size;
+}
+
+static inline
+u32 hfi_ubwc_calc_metadata_plane_stride(u32 frame_width,
+ u32 metadata_stride_multiple,
+ u32 tile_width_in_pels)
+{
+ return ALIGN(((frame_width + (tile_width_in_pels - 1)) / tile_width_in_pels),
+ metadata_stride_multiple);
+}
+
+static inline
+u32 hfi_ubwc_metadata_plane_bufheight(u32 frame_height,
+ u32 metadata_height_multiple,
+ u32 tile_height_in_pels)
+{
+ return ALIGN(((frame_height + (tile_height_in_pels - 1)) / tile_height_in_pels),
+ metadata_height_multiple);
+}
+
+static inline
+u32 hfi_ubwc_metadata_plane_buffer_size(u32 _metadata_tride, u32 _metadata_buf_height)
+{
+ return ALIGN(_metadata_tride * _metadata_buf_height, 4096);
+}
+
+static inline
+u32 hfi_buffer_non_comv_enc(u32 frame_width, u32 frame_height,
+ u32 num_vpp_pipes_enc, u32 lcu_size, u32 standard)
+{
+ u32 height_in_lcus = ((frame_height) + (lcu_size) - 1) / (lcu_size);
+ u32 width_in_lcus = ((frame_width) + (lcu_size) - 1) / (lcu_size);
+ u32 num_lcu_in_frame = width_in_lcus * height_in_lcus;
+ u32 frame_height_coded = height_in_lcus * (lcu_size);
+ u32 frame_width_coded = width_in_lcus * (lcu_size);
+ u32 num_lcumb, frame_rc_buf_size;
+
+ num_lcumb = (frame_height_coded / lcu_size) *
+ ((frame_width_coded + lcu_size * 8) / lcu_size);
+ frame_rc_buf_size = size_frame_rc_buf_size(standard, frame_height_coded,
+ num_vpp_pipes_enc);
+ return size_enc_slice_info_buf(num_lcu_in_frame) +
+ SIZE_SLICE_CMD_BUFFER +
+ SIZE_SPS_PPS_SLICE_HDR +
+ frame_rc_buf_size +
+ enc_bitcnt_buf_size(num_lcu_in_frame) +
+ enc_bitmap_buf_size(num_lcu_in_frame) +
+ SIZE_BSE_SLICE_CMD_BUF +
+ SIZE_LAMBDA_LUT +
+ size_override_buf(num_lcumb) +
+ size_ir_buf(num_lcu_in_frame);
+}
+
+static u32 iris_vpu_enc_non_comv_size(struct iris_inst *inst)
+{
+ u32 num_vpp_pipes = inst->core->iris_platform_data->num_vpp_pipe;
+ struct v4l2_format *f = inst->fmt_dst;
+ u32 height = f->fmt.pix_mp.height;
+ u32 width = f->fmt.pix_mp.width;
+ u32 lcu_size = 16;
+
+ if (inst->codec == V4L2_PIX_FMT_HEVC) {
+ lcu_size = 32;
+ return hfi_buffer_non_comv_enc(width, height, num_vpp_pipes,
+ lcu_size, HFI_CODEC_ENCODE_HEVC) +
+ SIZE_ONE_SLICE_BUF;
+ }
+
+ return hfi_buffer_non_comv_enc(width, height, num_vpp_pipes,
+ lcu_size, HFI_CODEC_ENCODE_AVC);
+}
+
+static inline
+u32 hfi_buffer_line_enc_base(u32 frame_width, u32 frame_height, bool is_ten_bit,
+ u32 num_vpp_pipes_enc, u32 lcu_size, u32 standard)
+{
+ u32 width_in_lcus = ((frame_width) + (lcu_size) - 1) / (lcu_size);
+ u32 height_in_lcus = ((frame_height) + (lcu_size) - 1) / (lcu_size);
+ u32 frame_height_coded = height_in_lcus * (lcu_size);
+ u32 frame_width_coded = width_in_lcus * (lcu_size);
+ u32 line_buff_data_size, left_line_buff_ctrl_size;
+ u32 left_line_buff_metadata_recon__uv__size;
+ u32 left_line_buff_metadata_recon__y__size;
+ u32 left_line_buff_recon_pix_size;
+ u32 top_line_buff_ctrl_fe_size;
+ u32 line_buff_recon_pix_size;
+
+ line_buff_data_size = size_linebuff_data(is_ten_bit, frame_width_coded);
+ left_line_buff_ctrl_size =
+ size_left_linebuff_ctrl(standard, frame_height_coded, num_vpp_pipes_enc);
+ left_line_buff_recon_pix_size =
+ size_left_linebuff_recon_pix(is_ten_bit, frame_height_coded,
+ num_vpp_pipes_enc);
+ top_line_buff_ctrl_fe_size =
+ size_top_linebuff_ctrl_fe(frame_width_coded, standard);
+ left_line_buff_metadata_recon__y__size =
+ size_left_linebuff_metadata_recon_y(frame_height_coded, is_ten_bit,
+ num_vpp_pipes_enc);
+ left_line_buff_metadata_recon__uv__size =
+ size_left_linebuff_metadata_recon_uv(frame_height_coded, is_ten_bit,
+ num_vpp_pipes_enc);
+ line_buff_recon_pix_size = size_linebuff_recon_pix(is_ten_bit, frame_width_coded);
+
+ return size_line_buf_ctrl(frame_width_coded) +
+ size_line_buf_ctrl_id2(frame_width_coded) +
+ line_buff_data_size +
+ left_line_buff_ctrl_size +
+ left_line_buff_recon_pix_size +
+ top_line_buff_ctrl_fe_size +
+ left_line_buff_metadata_recon__y__size +
+ left_line_buff_metadata_recon__uv__size +
+ line_buff_recon_pix_size +
+ size_left_linebuff_ctrl_fe(frame_height_coded, num_vpp_pipes_enc) +
+ size_line_buf_sde(frame_width_coded) +
+ size_top_line_buf_first_stg_sao(frame_width_coded);
+}
+
+static inline
+u32 hfi_buffer_line_enc(u32 frame_width, u32 frame_height, bool is_ten_bit,
+ u32 num_vpp_pipes_enc, u32 lcu_size, u32 standard)
+{
+ u32 width_in_lcus = ((frame_width) + (lcu_size) - 1) / (lcu_size);
+ u32 height_in_lcus = ((frame_height) + (lcu_size) - 1) / (lcu_size);
+ u32 frame_height_coded = height_in_lcus * (lcu_size);
+ u32 frame_width_coded = width_in_lcus * (lcu_size);
+
+ return hfi_buffer_line_enc_base(frame_width, frame_height, is_ten_bit,
+ num_vpp_pipes_enc, lcu_size, standard) +
+ size_vpss_line_buf(num_vpp_pipes_enc, frame_height_coded, frame_width_coded);
+}
+
+static inline
+u32 hfi_buffer_line_enc_vpu33(u32 frame_width, u32 frame_height, bool is_ten_bit,
+ u32 num_vpp_pipes_enc, u32 lcu_size, u32 standard)
+{
+ u32 width_in_lcus = ((frame_width) + (lcu_size) - 1) / (lcu_size);
+ u32 height_in_lcus = ((frame_height) + (lcu_size) - 1) / (lcu_size);
+ u32 frame_height_coded = height_in_lcus * (lcu_size);
+ u32 frame_width_coded = width_in_lcus * (lcu_size);
+
+ return hfi_buffer_line_enc_base(frame_width, frame_height, is_ten_bit,
+ num_vpp_pipes_enc, lcu_size, standard) +
+ size_vpss_line_buf_vpu33(num_vpp_pipes_enc, frame_height_coded,
+ frame_width_coded);
+}
+
+static u32 iris_vpu_enc_line_size(struct iris_inst *inst)
+{
+ u32 num_vpp_pipes = inst->core->iris_platform_data->num_vpp_pipe;
+ struct v4l2_format *f = inst->fmt_dst;
+ u32 height = f->fmt.pix_mp.height;
+ u32 width = f->fmt.pix_mp.width;
+ u32 lcu_size = 16;
+
+ if (inst->codec == V4L2_PIX_FMT_HEVC) {
+ lcu_size = 32;
+ return hfi_buffer_line_enc(width, height, 0, num_vpp_pipes,
+ lcu_size, HFI_CODEC_ENCODE_HEVC);
+ }
+
+ return hfi_buffer_line_enc(width, height, 0, num_vpp_pipes,
+ lcu_size, HFI_CODEC_ENCODE_AVC);
+}
+
+static u32 iris_vpu33_enc_line_size(struct iris_inst *inst)
+{
+ u32 num_vpp_pipes = inst->core->iris_platform_data->num_vpp_pipe;
+ struct v4l2_format *f = inst->fmt_dst;
+ u32 height = f->fmt.pix_mp.height;
+ u32 width = f->fmt.pix_mp.width;
+ u32 lcu_size = 16;
+
+ if (inst->codec == V4L2_PIX_FMT_HEVC) {
+ lcu_size = 32;
+ return hfi_buffer_line_enc_vpu33(width, height, 0, num_vpp_pipes,
+ lcu_size, HFI_CODEC_ENCODE_HEVC);
+ }
+
+ return hfi_buffer_line_enc_vpu33(width, height, 0, num_vpp_pipes,
+ lcu_size, HFI_CODEC_ENCODE_AVC);
+}
+
+static inline
+u32 hfi_buffer_dpb_enc(u32 frame_width, u32 frame_height, bool is_ten_bit)
+{
+ u32 metadata_stride, metadata_buf_height, meta_size_y, meta_size_c;
+ u32 ten_bit_ref_buf_size = 0, ref_buf_size = 0;
+ u32 size;
+
+ if (!is_ten_bit) {
+ ref_buf_size = size_enc_ref_buffer(frame_width, frame_height);
+ metadata_stride =
+ hfi_ubwc_calc_metadata_plane_stride(frame_width, 64,
+ HFI_COL_FMT_NV12C_Y_TILE_WIDTH);
+ metadata_buf_height =
+ hfi_ubwc_metadata_plane_bufheight(frame_height, 16,
+ HFI_COL_FMT_NV12C_Y_TILE_HEIGHT);
+ meta_size_y =
+ hfi_ubwc_metadata_plane_buffer_size(metadata_stride, metadata_buf_height);
+ meta_size_c =
+ hfi_ubwc_metadata_plane_buffer_size(metadata_stride, metadata_buf_height);
+ size = ref_buf_size + meta_size_y + meta_size_c;
+ } else {
+ ten_bit_ref_buf_size = size_enc_ten_bit_ref_buffer(frame_width, frame_height);
+ metadata_stride =
+ hfi_ubwc_calc_metadata_plane_stride(frame_width,
+ IRIS_METADATA_STRIDE_MULTIPLE,
+ HFI_COL_FMT_TP10C_Y_TILE_WIDTH);
+ metadata_buf_height =
+ hfi_ubwc_metadata_plane_bufheight(frame_height,
+ IRIS_METADATA_HEIGHT_MULTIPLE,
+ HFI_COL_FMT_TP10C_Y_TILE_HEIGHT);
+ meta_size_y =
+ hfi_ubwc_metadata_plane_buffer_size(metadata_stride, metadata_buf_height);
+ meta_size_c =
+ hfi_ubwc_metadata_plane_buffer_size(metadata_stride, metadata_buf_height);
+ size = ten_bit_ref_buf_size + meta_size_y + meta_size_c;
+ }
+
+ return size;
+}
+
+static u32 iris_vpu_enc_arp_size(struct iris_inst *inst)
+{
+ return HFI_BUFFER_ARP_ENC;
+}
+
+inline bool is_scaling_enabled(struct iris_inst *inst)
+{
+ return inst->crop.left != inst->compose.left ||
+ inst->crop.top != inst->compose.top ||
+ inst->crop.width != inst->compose.width ||
+ inst->crop.height != inst->compose.height;
+}
+
+static inline
+u32 hfi_buffer_vpss_enc(u32 dswidth, u32 dsheight, bool ds_enable,
+ u32 blur, bool is_ten_bit)
+{
+ if (ds_enable || blur)
+ return hfi_buffer_dpb_enc(dswidth, dsheight, is_ten_bit);
+
+ return 0;
+}
+
+static inline u32 hfi_buffer_scratch1_enc(u32 frame_width, u32 frame_height,
+ u32 lcu_size, u32 num_ref,
+ bool ten_bit, u32 num_vpp_pipes,
+ bool is_h265)
+{
+ u32 line_buf_ctrl_size, line_buf_data_size, leftline_buf_ctrl_size;
+ u32 line_buf_sde_size, sps_pps_slice_hdr, topline_buf_ctrl_size_FE;
+ u32 leftline_buf_ctrl_size_FE, line_buf_recon_pix_size;
+ u32 leftline_buf_recon_pix_size, lambda_lut_size, override_buffer_size;
+ u32 col_mv_buf_size, vpp_reg_buffer_size, ir_buffer_size;
+ u32 vpss_line_buf, leftline_buf_meta_recony, h265e_colrcbuf_size;
+ u32 h265e_framerc_bufsize, h265e_lcubitcnt_bufsize;
+ u32 h265e_lcubitmap_bufsize, se_stats_bufsize;
+ u32 bse_reg_buffer_size, bse_slice_cmd_buffer_size, slice_info_bufsize;
+ u32 line_buf_ctrl_size_buffid2, slice_cmd_buffer_size;
+ u32 width_lcu_num, height_lcu_num, width_coded, height_coded;
+ u32 frame_num_lcu, linebuf_meta_recon_uv, topline_bufsize_fe_1stg_sao;
+ u32 vpss_line_buffer_size_1;
+ u32 bit_depth, num_lcu_mb;
+
+ width_lcu_num = (frame_width + lcu_size - 1) / lcu_size;
+ height_lcu_num = (frame_height + lcu_size - 1) / lcu_size;
+ frame_num_lcu = width_lcu_num * height_lcu_num;
+ width_coded = width_lcu_num * lcu_size;
+ height_coded = height_lcu_num * lcu_size;
+ num_lcu_mb = (height_coded / lcu_size) *
+ ((width_coded + lcu_size * 8) / lcu_size);
+ slice_info_bufsize = 256 + (frame_num_lcu << 4);
+ slice_info_bufsize = ALIGN(slice_info_bufsize, 256);
+ line_buf_ctrl_size = ALIGN(width_coded, 256);
+ line_buf_ctrl_size_buffid2 = ALIGN(width_coded, 256);
+
+ bit_depth = ten_bit ? 10 : 8;
+ line_buf_data_size =
+ (((((bit_depth * width_coded + 1024) + (256 - 1)) &
+ (~(256 - 1))) * 1) +
+ (((((bit_depth * width_coded + 1024) >> 1) + (256 - 1)) &
+ (~(256 - 1))) * 2));
+
+ leftline_buf_ctrl_size = is_h265 ? ((height_coded + 32) / 32 * 4 * 16) :
+ ((height_coded + 15) / 16 * 5 * 16);
+
+ if (num_vpp_pipes > 1) {
+ leftline_buf_ctrl_size += 512;
+ leftline_buf_ctrl_size =
+ ALIGN(leftline_buf_ctrl_size, 512) * num_vpp_pipes;
+ }
+
+ leftline_buf_ctrl_size = ALIGN(leftline_buf_ctrl_size, 256);
+ leftline_buf_recon_pix_size =
+ (((ten_bit + 1) * 2 * (height_coded) + 256) +
+ (256 << (num_vpp_pipes - 1)) - 1) &
+ (~((256 << (num_vpp_pipes - 1)) - 1)) * 1;
+
+ topline_buf_ctrl_size_FE = is_h265 ? (64 * (width_coded >> 5)) :
+ (256 + 16 * (width_coded >> 4));
+ topline_buf_ctrl_size_FE = ALIGN(topline_buf_ctrl_size_FE, 256);
+ leftline_buf_ctrl_size_FE =
+ (((256 + 64 * (height_coded >> 4)) +
+ (256 << (num_vpp_pipes - 1)) - 1) &
+ (~((256 << (num_vpp_pipes - 1)) - 1)) * 1) *
+ num_vpp_pipes;
+ leftline_buf_meta_recony =
+ (256 + 64 * ((height_coded) / (8 * (ten_bit ? 4 : 8))));
+ leftline_buf_meta_recony = ALIGN(leftline_buf_meta_recony, 256);
+ leftline_buf_meta_recony = leftline_buf_meta_recony * num_vpp_pipes;
+ linebuf_meta_recon_uv =
+ (256 + 64 * ((height_coded) / (4 * (ten_bit ? 4 : 8))));
+ linebuf_meta_recon_uv = ALIGN(linebuf_meta_recon_uv, 256);
+ linebuf_meta_recon_uv = linebuf_meta_recon_uv * num_vpp_pipes;
+ line_buf_recon_pix_size = ((ten_bit ? 3 : 2) * width_coded);
+ line_buf_recon_pix_size = ALIGN(line_buf_recon_pix_size, 256);
+ slice_cmd_buffer_size = ALIGN(20480, 256);
+ sps_pps_slice_hdr = 2048 + 4096;
+ col_mv_buf_size =
+ is_h265 ? (16 * ((frame_num_lcu << 2) + 32)) :
+ (3 * 16 * (width_lcu_num * height_lcu_num + 32));
+ col_mv_buf_size = ALIGN(col_mv_buf_size, 256) * (num_ref + 1);
+ h265e_colrcbuf_size =
+ (((width_lcu_num + 7) >> 3) * 16 * 2 * height_lcu_num);
+ if (num_vpp_pipes > 1)
+ h265e_colrcbuf_size =
+ ALIGN(h265e_colrcbuf_size, 256) * num_vpp_pipes;
+
+ h265e_colrcbuf_size =
+ ALIGN(h265e_colrcbuf_size, 256) * HFI_MAX_COL_FRAME;
+ h265e_framerc_bufsize =
+ (is_h265) ?
+ (256 + 16 * (14 + (((height_coded >> 5) + 7) >> 3))) :
+ (256 + 16 * (14 + (((height_coded >> 4) + 7) >> 3)));
+ h265e_framerc_bufsize *= 6;
+ if (num_vpp_pipes > 1)
+ h265e_framerc_bufsize =
+ ALIGN(h265e_framerc_bufsize, 256) * num_vpp_pipes;
+
+ h265e_framerc_bufsize =
+ ALIGN(h265e_framerc_bufsize, 512) * HFI_MAX_COL_FRAME;
+ h265e_lcubitcnt_bufsize = 256 + 4 * frame_num_lcu;
+ h265e_lcubitcnt_bufsize = ALIGN(h265e_lcubitcnt_bufsize, 256);
+ h265e_lcubitmap_bufsize = 256 + (frame_num_lcu >> 3);
+ h265e_lcubitmap_bufsize = ALIGN(h265e_lcubitmap_bufsize, 256);
+ line_buf_sde_size = 256 + 16 * (width_coded >> 4);
+ line_buf_sde_size = ALIGN(line_buf_sde_size, 256);
+ if ((width_coded * height_coded) > (4096 * 2160))
+ se_stats_bufsize = 0;
+ else if ((width_coded * height_coded) > (1920 * 1088))
+ se_stats_bufsize = (40 * 4 * frame_num_lcu + 256 + 256);
+ else
+ se_stats_bufsize = (1024 * frame_num_lcu + 256 + 256);
+
+ se_stats_bufsize = ALIGN(se_stats_bufsize, 256) * 2;
+ bse_slice_cmd_buffer_size = (((8192 << 2) + 7) & (~7)) * 6;
+ bse_reg_buffer_size = (((512 << 3) + 7) & (~7)) * 4;
+ vpp_reg_buffer_size = (((2048 << 3) + 31) & (~31)) * 10;
+ lambda_lut_size = 256 * 11;
+ override_buffer_size = 16 * ((num_lcu_mb + 7) >> 3);
+ override_buffer_size = ALIGN(override_buffer_size, 256) * 2;
+ ir_buffer_size = (((frame_num_lcu << 1) + 7) & (~7)) * 3;
+ vpss_line_buffer_size_1 = (((8192 >> 2) << 5) * num_vpp_pipes) + 64;
+ vpss_line_buf =
+ (((((max(width_coded, height_coded) + 3) >> 2) << 5) + 256) *
+ 16) +
+ vpss_line_buffer_size_1;
+ topline_bufsize_fe_1stg_sao = 16 * (width_coded >> 5);
+ topline_bufsize_fe_1stg_sao = ALIGN(topline_bufsize_fe_1stg_sao, 256);
+
+ return line_buf_ctrl_size + line_buf_data_size +
+ line_buf_ctrl_size_buffid2 + leftline_buf_ctrl_size +
+ vpss_line_buf + col_mv_buf_size + topline_buf_ctrl_size_FE +
+ leftline_buf_ctrl_size_FE + line_buf_recon_pix_size +
+ leftline_buf_recon_pix_size + leftline_buf_meta_recony +
+ linebuf_meta_recon_uv + h265e_colrcbuf_size +
+ h265e_framerc_bufsize + h265e_lcubitcnt_bufsize +
+ h265e_lcubitmap_bufsize + line_buf_sde_size +
+ topline_bufsize_fe_1stg_sao + override_buffer_size +
+ bse_reg_buffer_size + vpp_reg_buffer_size + sps_pps_slice_hdr +
+ slice_cmd_buffer_size + bse_slice_cmd_buffer_size +
+ ir_buffer_size + slice_info_bufsize + lambda_lut_size +
+ se_stats_bufsize + 1024;
+}
+
+static u32 iris_vpu_enc_scratch1_size(struct iris_inst *inst)
+{
+ u32 num_vpp_pipes = inst->core->iris_platform_data->num_vpp_pipe;
+ struct v4l2_format *f = inst->fmt_dst;
+ u32 frame_height = f->fmt.pix_mp.height;
+ u32 frame_width = f->fmt.pix_mp.width;
+ u32 num_ref = 1;
+ u32 lcu_size;
+ bool is_h265;
+
+ if (inst->codec == V4L2_PIX_FMT_H264) {
+ lcu_size = 16;
+ is_h265 = false;
+ } else if (inst->codec == V4L2_PIX_FMT_HEVC) {
+ lcu_size = 32;
+ is_h265 = true;
+ } else {
+ return 0;
+ }
+
+ return hfi_buffer_scratch1_enc(frame_width, frame_height, lcu_size,
+ num_ref, false, num_vpp_pipes, is_h265);
+}
+
+static inline u32 ubwc_metadata_plane_stride(u32 width,
+ u32 metadata_stride_multi,
+ u32 tile_width_pels)
+{
+ return ALIGN(((width + (tile_width_pels - 1)) / tile_width_pels),
+ metadata_stride_multi);
+}
+
+static inline u32 ubwc_metadata_plane_bufheight(u32 height,
+ u32 metadata_height_multi,
+ u32 tile_height_pels)
+{
+ return ALIGN(((height + (tile_height_pels - 1)) / tile_height_pels),
+ metadata_height_multi);
+}
+
+static inline u32 ubwc_metadata_plane_buffer_size(u32 metadata_stride,
+ u32 metadata_buf_height)
+{
+ return ALIGN(metadata_stride * metadata_buf_height, SZ_4K);
+}
+
+static inline u32 hfi_buffer_scratch2_enc(u32 frame_width, u32 frame_height,
+ u32 num_ref, bool ten_bit)
+{
+ u32 aligned_width, aligned_height, chroma_height, ref_buf_height;
+ u32 metadata_stride, meta_buf_height, meta_size_y, meta_size_c;
+ u32 ref_luma_stride_bytes, ref_chroma_height_bytes;
+ u32 ref_buf_size, ref_stride;
+ u32 luma_size, chroma_size;
+ u32 size;
+
+ if (!ten_bit) {
+ aligned_height = ALIGN(frame_height, 32);
+ chroma_height = frame_height >> 1;
+ chroma_height = ALIGN(chroma_height, 32);
+ aligned_width = ALIGN(frame_width, 128);
+ metadata_stride =
+ ubwc_metadata_plane_stride(frame_width, 64, 32);
+ meta_buf_height =
+ ubwc_metadata_plane_bufheight(frame_height, 16, 8);
+ meta_size_y = ubwc_metadata_plane_buffer_size(metadata_stride,
+ meta_buf_height);
+ meta_size_c = ubwc_metadata_plane_buffer_size(metadata_stride,
+ meta_buf_height);
+ size = (aligned_height + chroma_height) * aligned_width +
+ meta_size_y + meta_size_c;
+ size = (size * (num_ref + 3)) + 4096;
+ } else {
+ ref_buf_height = (frame_height + (32 - 1)) & (~(32 - 1));
+ ref_luma_stride_bytes = ((frame_width + 192 - 1) / 192) * 192;
+ ref_stride = 4 * (ref_luma_stride_bytes / 3);
+ ref_stride = (ref_stride + (128 - 1)) & (~(128 - 1));
+ luma_size = ref_buf_height * ref_stride;
+ ref_chroma_height_bytes =
+ (((frame_height + 1) >> 1) + (32 - 1)) & (~(32 - 1));
+ chroma_size = ref_stride * ref_chroma_height_bytes;
+ luma_size = (luma_size + (SZ_4K - 1)) & (~(SZ_4K - 1));
+ chroma_size = (chroma_size + (SZ_4K - 1)) & (~(SZ_4K - 1));
+ ref_buf_size = luma_size + chroma_size;
+ metadata_stride =
+ ubwc_metadata_plane_stride(frame_width, 64, 48);
+ meta_buf_height =
+ ubwc_metadata_plane_bufheight(frame_height, 16, 4);
+ meta_size_y = ubwc_metadata_plane_buffer_size(metadata_stride,
+ meta_buf_height);
+ meta_size_c = ubwc_metadata_plane_buffer_size(metadata_stride,
+ meta_buf_height);
+ size = ref_buf_size + meta_size_y + meta_size_c;
+ size = (size * (num_ref + 3)) + 4096;
+ }
+
+ return size;
+}
+
+static u32 iris_vpu_enc_scratch2_size(struct iris_inst *inst)
+{
+ struct v4l2_format *f = inst->fmt_dst;
+ u32 frame_width = f->fmt.pix_mp.width;
+ u32 frame_height = f->fmt.pix_mp.height;
+ u32 num_ref = 1;
+
+ return hfi_buffer_scratch2_enc(frame_width, frame_height, num_ref,
+ false);
+}
+
+static u32 iris_vpu_enc_vpss_size(struct iris_inst *inst)
+{
+ u32 ds_enable = is_scaling_enabled(inst);
+ struct v4l2_format *f = inst->fmt_dst;
+ u32 height = f->fmt.pix_mp.height;
+ u32 width = f->fmt.pix_mp.width;
+
+ return hfi_buffer_vpss_enc(width, height, ds_enable, 0, 0);
+}
+
static int output_min_count(struct iris_inst *inst)
{
int output_min_count = 4;
@@ -571,10 +1431,10 @@ struct iris_vpu_buf_type_handle {
u32 (*handle)(struct iris_inst *inst);
};
-int iris_vpu_buf_size(struct iris_inst *inst, enum iris_buffer_type buffer_type)
+u32 iris_vpu_buf_size(struct iris_inst *inst, enum iris_buffer_type buffer_type)
{
- const struct iris_vpu_buf_type_handle *buf_type_handle_arr;
- u32 size = 0, buf_type_handle_size, i;
+ const struct iris_vpu_buf_type_handle *buf_type_handle_arr = NULL;
+ u32 size = 0, buf_type_handle_size = 0, i;
static const struct iris_vpu_buf_type_handle dec_internal_buf_type_handle[] = {
{BUF_BIN, iris_vpu_dec_bin_size },
@@ -586,8 +1446,24 @@ int iris_vpu_buf_size(struct iris_inst *inst, enum iris_buffer_type buffer_type)
{BUF_SCRATCH_1, iris_vpu_dec_scratch1_size },
};
- buf_type_handle_size = ARRAY_SIZE(dec_internal_buf_type_handle);
- buf_type_handle_arr = dec_internal_buf_type_handle;
+ static const struct iris_vpu_buf_type_handle enc_internal_buf_type_handle[] = {
+ {BUF_BIN, iris_vpu_enc_bin_size },
+ {BUF_COMV, iris_vpu_enc_comv_size },
+ {BUF_NON_COMV, iris_vpu_enc_non_comv_size },
+ {BUF_LINE, iris_vpu_enc_line_size },
+ {BUF_ARP, iris_vpu_enc_arp_size },
+ {BUF_VPSS, iris_vpu_enc_vpss_size },
+ {BUF_SCRATCH_1, iris_vpu_enc_scratch1_size },
+ {BUF_SCRATCH_2, iris_vpu_enc_scratch2_size },
+ };
+
+ if (inst->domain == DECODER) {
+ buf_type_handle_size = ARRAY_SIZE(dec_internal_buf_type_handle);
+ buf_type_handle_arr = dec_internal_buf_type_handle;
+ } else if (inst->domain == ENCODER) {
+ buf_type_handle_size = ARRAY_SIZE(enc_internal_buf_type_handle);
+ buf_type_handle_arr = enc_internal_buf_type_handle;
+ }
for (i = 0; i < buf_type_handle_size; i++) {
if (buf_type_handle_arr[i].type == buffer_type) {
@@ -599,6 +1475,34 @@ int iris_vpu_buf_size(struct iris_inst *inst, enum iris_buffer_type buffer_type)
return size;
}
+u32 iris_vpu33_buf_size(struct iris_inst *inst, enum iris_buffer_type buffer_type)
+{
+ u32 size = 0, i;
+
+ static const struct iris_vpu_buf_type_handle enc_internal_buf_type_handle[] = {
+ {BUF_BIN, iris_vpu_enc_bin_size },
+ {BUF_COMV, iris_vpu_enc_comv_size },
+ {BUF_NON_COMV, iris_vpu_enc_non_comv_size },
+ {BUF_LINE, iris_vpu33_enc_line_size },
+ {BUF_ARP, iris_vpu_enc_arp_size },
+ {BUF_VPSS, iris_vpu_enc_vpss_size },
+ {BUF_SCRATCH_1, iris_vpu_enc_scratch1_size },
+ {BUF_SCRATCH_2, iris_vpu_enc_scratch2_size },
+ };
+
+ if (inst->domain == DECODER)
+ return iris_vpu_buf_size(inst, buffer_type);
+
+ for (i = 0; i < ARRAY_SIZE(enc_internal_buf_type_handle); i++) {
+ if (enc_internal_buf_type_handle[i].type == buffer_type) {
+ size = enc_internal_buf_type_handle[i].handle(inst);
+ break;
+ }
+ }
+
+ return size;
+}
+
static u32 internal_buffer_count(struct iris_inst *inst,
enum iris_buffer_type buffer_type)
{
@@ -628,7 +1532,10 @@ int iris_vpu_buf_count(struct iris_inst *inst, enum iris_buffer_type buffer_type
case BUF_INPUT:
return MIN_BUFFERS;
case BUF_OUTPUT:
- return output_min_count(inst);
+ if (inst->domain == ENCODER)
+ return MIN_BUFFERS;
+ else
+ return output_min_count(inst);
case BUF_BIN:
case BUF_COMV:
case BUF_NON_COMV:
@@ -636,6 +1543,9 @@ int iris_vpu_buf_count(struct iris_inst *inst, enum iris_buffer_type buffer_type
case BUF_PERSIST:
return internal_buffer_count(inst, buffer_type);
case BUF_SCRATCH_1:
+ case BUF_SCRATCH_2:
+ case BUF_VPSS:
+ case BUF_ARP:
return 1; /* internal buffer count needed by firmware is 1 */
case BUF_DPB:
return iris_vpu_dpb_count(inst);
diff --git a/drivers/media/platform/qcom/iris/iris_vpu_buffer.h b/drivers/media/platform/qcom/iris/iris_vpu_buffer.h
index ee95fd20b794..04f0b7400a1e 100644
--- a/drivers/media/platform/qcom/iris/iris_vpu_buffer.h
+++ b/drivers/media/platform/qcom/iris/iris_vpu_buffer.h
@@ -41,6 +41,7 @@ struct iris_inst;
#define SIZE_SLIST_BUF_H265 (BIT(10))
#define H265_DISPLAY_BUF_SIZE (3072)
#define H265_NUM_FRM_INFO (48)
+#define SIZE_ONE_SLICE_BUF 256
#define VP9_NUM_FRAME_INFO_BUF 32
#define VP9_NUM_PROBABILITY_TABLE_BUF (VP9_NUM_FRAME_INFO_BUF + 4)
@@ -80,6 +81,26 @@ struct iris_inst;
#define MAX_PE_NBR_DATA_LCU64_LINE_BUFFER_SIZE 384
#define MAX_FE_NBR_DATA_LUMA_LINE_BUFFER_SIZE 640
+#define SIZE_SLICE_CMD_BUFFER (ALIGN(20480, 256))
+#define SIZE_SPS_PPS_SLICE_HDR (2048 + 4096)
+#define SIZE_BSE_SLICE_CMD_BUF ((((8192 << 2) + 7) & (~7)) * 3)
+#define SIZE_LAMBDA_LUT (256 * 11)
+
+#define HFI_COL_FMT_NV12C_Y_TILE_HEIGHT (8)
+#define HFI_COL_FMT_NV12C_Y_TILE_WIDTH (32)
+#define HFI_COL_FMT_TP10C_Y_TILE_HEIGHT (4)
+#define HFI_COL_FMT_TP10C_Y_TILE_WIDTH (48)
+
+#define IRIS_METADATA_STRIDE_MULTIPLE 64
+#define IRIS_METADATA_HEIGHT_MULTIPLE 16
+
+#define HFI_BUFFER_ARP_ENC 204800
+
+#define MAX_WIDTH 4096
+#define MAX_HEIGHT 2304
+#define NUM_MBS_4K (DIV_ROUND_UP(MAX_WIDTH, 16) * DIV_ROUND_UP(MAX_HEIGHT, 16))
+#define NUM_MBS_720P (((ALIGN(1280, 16)) >> 4) * ((ALIGN(736, 16)) >> 4))
+
static inline u32 size_h264d_lb_fe_top_data(u32 frame_width)
{
return MAX_FE_NBR_DATA_LUMA_LINE_BUFFER_SIZE * ALIGN(frame_width, 16) * 3;
@@ -125,7 +146,8 @@ static inline u32 size_h264d_qp(u32 frame_width, u32 frame_height)
return DIV_ROUND_UP(frame_width, 64) * DIV_ROUND_UP(frame_height, 64) * 128;
}
-int iris_vpu_buf_size(struct iris_inst *inst, enum iris_buffer_type buffer_type);
+u32 iris_vpu_buf_size(struct iris_inst *inst, enum iris_buffer_type buffer_type);
+u32 iris_vpu33_buf_size(struct iris_inst *inst, enum iris_buffer_type buffer_type);
int iris_vpu_buf_count(struct iris_inst *inst, enum iris_buffer_type buffer_type);
#endif
diff --git a/drivers/media/platform/qcom/iris/iris_vpu_common.c b/drivers/media/platform/qcom/iris/iris_vpu_common.c
index 268e45acaa7c..bb98950e018f 100644
--- a/drivers/media/platform/qcom/iris/iris_vpu_common.c
+++ b/drivers/media/platform/qcom/iris/iris_vpu_common.c
@@ -84,6 +84,7 @@ static void iris_vpu_interrupt_init(struct iris_core *core)
static void iris_vpu_setup_ucregion_memory_map(struct iris_core *core)
{
u32 queue_size, value;
+ const struct vpu_ops *vpu_ops = core->iris_platform_data->vpu_ops;
/* Iris hardware requires 4K queue alignment */
queue_size = ALIGN(sizeof(struct iris_hfi_queue_table_header) +
@@ -105,6 +106,9 @@ static void iris_vpu_setup_ucregion_memory_map(struct iris_core *core)
value = (u32)core->sfr_daddr + core->iris_platform_data->core_arch;
writel(value, core->reg_base + SFR_ADDR);
}
+
+ if (vpu_ops->program_bootup_registers)
+ vpu_ops->program_bootup_registers(core);
}
int iris_vpu_boot_firmware(struct iris_core *core)
@@ -271,7 +275,7 @@ void iris_vpu_power_off(struct iris_core *core)
disable_irq_nosync(core->irq);
}
-static int iris_vpu_power_on_controller(struct iris_core *core)
+int iris_vpu_power_on_controller(struct iris_core *core)
{
u32 rst_tbl_size = core->iris_platform_data->clk_rst_tbl_size;
int ret;
@@ -302,7 +306,7 @@ err_disable_power:
return ret;
}
-static int iris_vpu_power_on_hw(struct iris_core *core)
+int iris_vpu_power_on_hw(struct iris_core *core)
{
int ret;
@@ -337,11 +341,11 @@ int iris_vpu_power_on(struct iris_core *core)
if (ret)
goto err;
- ret = iris_vpu_power_on_controller(core);
+ ret = core->iris_platform_data->vpu_ops->power_on_controller(core);
if (ret)
goto err_unvote_icc;
- ret = iris_vpu_power_on_hw(core);
+ ret = core->iris_platform_data->vpu_ops->power_on_hw(core);
if (ret)
goto err_power_off_ctrl;
@@ -359,7 +363,7 @@ int iris_vpu_power_on(struct iris_core *core)
return 0;
err_power_off_ctrl:
- iris_vpu_power_off_controller(core);
+ core->iris_platform_data->vpu_ops->power_off_controller(core);
err_unvote_icc:
iris_unset_icc_bw(core);
err:
diff --git a/drivers/media/platform/qcom/iris/iris_vpu_common.h b/drivers/media/platform/qcom/iris/iris_vpu_common.h
index 93b7fa27be3b..d636e287457a 100644
--- a/drivers/media/platform/qcom/iris/iris_vpu_common.h
+++ b/drivers/media/platform/qcom/iris/iris_vpu_common.h
@@ -11,10 +11,14 @@ struct iris_core;
extern const struct vpu_ops iris_vpu2_ops;
extern const struct vpu_ops iris_vpu3_ops;
extern const struct vpu_ops iris_vpu33_ops;
+extern const struct vpu_ops iris_vpu35_ops;
struct vpu_ops {
void (*power_off_hw)(struct iris_core *core);
+ int (*power_on_hw)(struct iris_core *core);
int (*power_off_controller)(struct iris_core *core);
+ int (*power_on_controller)(struct iris_core *core);
+ void (*program_bootup_registers)(struct iris_core *core);
u64 (*calc_freq)(struct iris_inst *inst, size_t data_size);
};
@@ -23,6 +27,8 @@ void iris_vpu_raise_interrupt(struct iris_core *core);
void iris_vpu_clear_interrupt(struct iris_core *core);
int iris_vpu_watchdog(struct iris_core *core, u32 intr_status);
int iris_vpu_prepare_pc(struct iris_core *core);
+int iris_vpu_power_on_controller(struct iris_core *core);
+int iris_vpu_power_on_hw(struct iris_core *core);
int iris_vpu_power_on(struct iris_core *core);
int iris_vpu_power_off_controller(struct iris_core *core);
void iris_vpu_power_off_hw(struct iris_core *core);
diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
index 4c049c694d9c..abf959b8f3a6 100644
--- a/drivers/media/platform/qcom/venus/core.c
+++ b/drivers/media/platform/qcom/venus/core.c
@@ -254,14 +254,19 @@ err:
static void venus_assign_register_offsets(struct venus_core *core)
{
- if (IS_IRIS2(core) || IS_IRIS2_1(core)) {
- core->vbif_base = core->base + VBIF_BASE;
+ if (IS_IRIS2(core) || IS_IRIS2_1(core) || IS_AR50_LITE(core)) {
core->cpu_base = core->base + CPU_BASE_V6;
core->cpu_cs_base = core->base + CPU_CS_BASE_V6;
core->cpu_ic_base = core->base + CPU_IC_BASE_V6;
core->wrapper_base = core->base + WRAPPER_BASE_V6;
core->wrapper_tz_base = core->base + WRAPPER_TZ_BASE_V6;
- core->aon_base = core->base + AON_BASE_V6;
+ if (IS_AR50_LITE(core)) {
+ core->vbif_base = NULL;
+ core->aon_base = NULL;
+ } else {
+ core->vbif_base = core->base + VBIF_BASE;
+ core->aon_base = core->base + AON_BASE_V6;
+ }
} else {
core->vbif_base = core->base + VBIF_BASE;
core->cpu_base = core->base + CPU_BASE;
@@ -448,24 +453,18 @@ static int venus_probe(struct platform_device *pdev)
if (ret < 0)
goto err_runtime_disable;
- if (core->res->dec_nodename || core->res->enc_nodename) {
- ret = venus_add_dynamic_nodes(core);
- if (ret)
- goto err_runtime_disable;
- }
-
- ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
- if (ret)
- goto err_remove_dynamic_nodes;
-
ret = venus_firmware_init(core);
if (ret)
- goto err_of_depopulate;
+ goto err_runtime_disable;
ret = venus_boot(core);
if (ret)
goto err_firmware_deinit;
+ ret = venus_firmware_cfg(core);
+ if (ret)
+ goto err_venus_shutdown;
+
ret = hfi_core_resume(core, true);
if (ret)
goto err_venus_shutdown;
@@ -474,34 +473,48 @@ static int venus_probe(struct platform_device *pdev)
if (ret)
goto err_venus_shutdown;
- ret = venus_enumerate_codecs(core, VIDC_SESSION_TYPE_DEC);
+ ret = venus_firmware_check(core);
if (ret)
goto err_core_deinit;
+ if (core->res->dec_nodename || core->res->enc_nodename) {
+ ret = venus_add_dynamic_nodes(core);
+ if (ret)
+ goto err_core_deinit;
+ }
+
+ ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
+ if (ret)
+ goto err_remove_dynamic_nodes;
+
+ ret = venus_enumerate_codecs(core, VIDC_SESSION_TYPE_DEC);
+ if (ret)
+ goto err_of_depopulate;
+
ret = venus_enumerate_codecs(core, VIDC_SESSION_TYPE_ENC);
if (ret)
- goto err_core_deinit;
+ goto err_of_depopulate;
ret = pm_runtime_put_sync(dev);
if (ret) {
pm_runtime_get_noresume(dev);
- goto err_core_deinit;
+ goto err_of_depopulate;
}
venus_dbgfs_init(core);
return 0;
+err_of_depopulate:
+ of_platform_depopulate(dev);
+err_remove_dynamic_nodes:
+ venus_remove_dynamic_nodes(core);
err_core_deinit:
hfi_core_deinit(core, false);
err_venus_shutdown:
venus_shutdown(core);
err_firmware_deinit:
venus_firmware_deinit(core);
-err_of_depopulate:
- of_platform_depopulate(dev);
-err_remove_dynamic_nodes:
- venus_remove_dynamic_nodes(core);
err_runtime_disable:
pm_runtime_put_noidle(dev);
pm_runtime_disable(dev);
@@ -596,7 +609,7 @@ err_cpucfg_path:
return ret;
}
-void venus_close_common(struct venus_inst *inst)
+void venus_close_common(struct venus_inst *inst, struct file *filp)
{
/*
* Make sure we don't have IRQ/IRQ-thread currently running
@@ -607,7 +620,7 @@ void venus_close_common(struct venus_inst *inst)
v4l2_m2m_ctx_release(inst->m2m_ctx);
v4l2_m2m_release(inst->m2m_dev);
hfi_session_destroy(inst);
- v4l2_fh_del(&inst->fh);
+ v4l2_fh_del(&inst->fh, filp);
v4l2_fh_exit(&inst->fh);
v4l2_ctrl_handler_free(&inst->ctrl_handler);
@@ -1057,15 +1070,65 @@ static const struct venus_resources sc7280_res = {
.enc_nodename = "video-encoder",
};
+static const struct bw_tbl qcm2290_bw_table_dec[] = {
+ { 352800, 597000, 0, 746000, 0 }, /* 1080p@30 + 720p@30 */
+ { 244800, 413000, 0, 516000, 0 }, /* 1080p@30 */
+ { 216000, 364000, 0, 454000, 0 }, /* 720p@60 */
+ { 108000, 182000, 0, 227000, 0 }, /* 720p@30 */
+};
+
+static const struct bw_tbl qcm2290_bw_table_enc[] = {
+ { 352800, 396000, 0, 0, 0 }, /* 1080p@30 + 720p@30 */
+ { 244800, 275000, 0, 0, 0 }, /* 1080p@30 */
+ { 216000, 242000, 0, 0, 0 }, /* 720p@60 */
+ { 108000, 121000, 0, 0, 0 }, /* 720p@30 */
+};
+
+static const struct firmware_version min_fw = {
+ .major = 6, .minor = 0, .rev = 55,
+};
+
+static const struct venus_resources qcm2290_res = {
+ .bw_tbl_dec = qcm2290_bw_table_dec,
+ .bw_tbl_dec_size = ARRAY_SIZE(qcm2290_bw_table_dec),
+ .bw_tbl_enc = qcm2290_bw_table_enc,
+ .bw_tbl_enc_size = ARRAY_SIZE(qcm2290_bw_table_enc),
+ .clks = { "core", "iface", "bus", "throttle" },
+ .clks_num = 4,
+ .vcodec0_clks = { "vcodec0_core", "vcodec0_bus" },
+ .vcodec_clks_num = 2,
+ .vcodec_pmdomains = (const char *[]) { "venus", "vcodec0" },
+ .vcodec_pmdomains_num = 2,
+ .opp_pmdomain = (const char *[]) { "cx" },
+ .vcodec_num = 1,
+ .hfi_version = HFI_VERSION_4XX,
+ .vpu_version = VPU_VERSION_AR50_LITE,
+ .max_load = 352800,
+ .num_vpp_pipes = 1,
+ .vmem_id = VIDC_RESOURCE_NONE,
+ .vmem_size = 0,
+ .vmem_addr = 0,
+ .cp_start = 0,
+ .cp_size = 0x70800000,
+ .cp_nonpixel_start = 0x1000000,
+ .cp_nonpixel_size = 0x24800000,
+ .dma_mask = 0xe0000000 - 1,
+ .fwname = "qcom/venus-6.0/venus.mbn",
+ .dec_nodename = "video-decoder",
+ .enc_nodename = "video-encoder",
+ .min_fw = &min_fw,
+};
+
static const struct of_device_id venus_dt_match[] = {
{ .compatible = "qcom,msm8916-venus", .data = &msm8916_res, },
{ .compatible = "qcom,msm8996-venus", .data = &msm8996_res, },
{ .compatible = "qcom,msm8998-venus", .data = &msm8998_res, },
+ { .compatible = "qcom,qcm2290-venus", .data = &qcm2290_res, },
+ { .compatible = "qcom,sc7180-venus", .data = &sc7180_res, },
+ { .compatible = "qcom,sc7280-venus", .data = &sc7280_res, },
{ .compatible = "qcom,sdm660-venus", .data = &sdm660_res, },
{ .compatible = "qcom,sdm845-venus", .data = &sdm845_res, },
{ .compatible = "qcom,sdm845-venus-v2", .data = &sdm845_res_v2, },
- { .compatible = "qcom,sc7180-venus", .data = &sc7180_res, },
- { .compatible = "qcom,sc7280-venus", .data = &sc7280_res, },
{ .compatible = "qcom,sm8250-venus", .data = &sm8250_res, },
{ }
};
diff --git a/drivers/media/platform/qcom/venus/core.h b/drivers/media/platform/qcom/venus/core.h
index 5b1ba1c69adb..7506f5d0f609 100644
--- a/drivers/media/platform/qcom/venus/core.h
+++ b/drivers/media/platform/qcom/venus/core.h
@@ -58,6 +58,12 @@ enum vpu_version {
VPU_VERSION_IRIS2_1,
};
+struct firmware_version {
+ u32 major;
+ u32 minor;
+ u32 rev;
+};
+
struct venus_resources {
u64 dma_mask;
const struct freq_tbl *freq_tbl;
@@ -94,6 +100,7 @@ struct venus_resources {
const char *fwname;
const char *enc_nodename;
const char *dec_nodename;
+ const struct firmware_version *min_fw;
};
enum venus_fmt {
@@ -231,11 +238,7 @@ struct venus_core {
unsigned int core0_usage_count;
unsigned int core1_usage_count;
struct dentry *root;
- struct venus_img_version {
- u32 major;
- u32 minor;
- u32 rev;
- } venus_ver;
+ struct firmware_version venus_ver;
unsigned long dump_core;
struct of_changeset *ocs;
bool hwmode_dev;
@@ -530,12 +533,17 @@ struct venus_inst {
#define IS_IRIS2(core) ((core)->res->vpu_version == VPU_VERSION_IRIS2)
#define IS_IRIS2_1(core) ((core)->res->vpu_version == VPU_VERSION_IRIS2_1)
+static inline bool is_lite(struct venus_core *core)
+{
+ return IS_AR50_LITE(core);
+}
+
#define ctrl_to_inst(ctrl) \
container_of((ctrl)->handler, struct venus_inst, ctrl_handler)
static inline struct venus_inst *to_inst(struct file *filp)
{
- return container_of(filp->private_data, struct venus_inst, fh);
+ return container_of(file_to_v4l2_fh(filp), struct venus_inst, fh);
}
static inline void *to_hfi_priv(struct venus_core *core)
@@ -573,5 +581,5 @@ is_fw_rev_or_older(struct venus_core *core, u32 vmajor, u32 vminor, u32 vrev)
(core)->venus_ver.rev <= vrev);
}
-void venus_close_common(struct venus_inst *inst);
+void venus_close_common(struct venus_inst *inst, struct file *filp);
#endif
diff --git a/drivers/media/platform/qcom/venus/firmware.c b/drivers/media/platform/qcom/venus/firmware.c
index 66a18830e66d..af0ac40bec9b 100644
--- a/drivers/media/platform/qcom/venus/firmware.c
+++ b/drivers/media/platform/qcom/venus/firmware.c
@@ -30,7 +30,7 @@ static void venus_reset_cpu(struct venus_core *core)
u32 fw_size = core->fw.mapped_mem_size;
void __iomem *wrapper_base;
- if (IS_IRIS2_1(core))
+ if (IS_IRIS2(core) || IS_IRIS2_1(core))
wrapper_base = core->wrapper_tz_base;
else
wrapper_base = core->wrapper_base;
@@ -42,7 +42,7 @@ static void venus_reset_cpu(struct venus_core *core)
writel(fw_size, wrapper_base + WRAPPER_NONPIX_START_ADDR);
writel(fw_size, wrapper_base + WRAPPER_NONPIX_END_ADDR);
- if (IS_IRIS2_1(core)) {
+ if (IS_IRIS2(core) || IS_IRIS2_1(core)) {
/* Bring XTSS out of reset */
writel(0, wrapper_base + WRAPPER_TZ_XTSS_SW_RESET);
} else {
@@ -68,7 +68,7 @@ int venus_set_hw_state(struct venus_core *core, bool resume)
if (resume) {
venus_reset_cpu(core);
} else {
- if (IS_IRIS2_1(core))
+ if (IS_IRIS2(core) || IS_IRIS2_1(core))
writel(WRAPPER_XTSS_SW_RESET_BIT,
core->wrapper_tz_base + WRAPPER_TZ_XTSS_SW_RESET);
else
@@ -136,8 +136,8 @@ static int venus_load_fw(struct venus_core *core, const char *fwname,
ret = qcom_mdt_load(dev, mdt, fwname, VENUS_PAS_ID,
mem_va, *mem_phys, *mem_size, NULL);
else
- ret = qcom_mdt_load_no_init(dev, mdt, fwname, VENUS_PAS_ID,
- mem_va, *mem_phys, *mem_size, NULL);
+ ret = qcom_mdt_load_no_init(dev, mdt, fwname, mem_va,
+ *mem_phys, *mem_size, NULL);
memunmap(mem_va);
err_release_fw:
@@ -181,7 +181,7 @@ static int venus_shutdown_no_tz(struct venus_core *core)
void __iomem *wrapper_base = core->wrapper_base;
void __iomem *wrapper_tz_base = core->wrapper_tz_base;
- if (IS_IRIS2_1(core)) {
+ if (IS_IRIS2(core) || IS_IRIS2_1(core)) {
/* Assert the reset to XTSS */
reg = readl(wrapper_tz_base + WRAPPER_TZ_XTSS_SW_RESET);
reg |= WRAPPER_XTSS_SW_RESET_BIT;
@@ -207,6 +207,16 @@ static int venus_shutdown_no_tz(struct venus_core *core)
return 0;
}
+int venus_firmware_cfg(struct venus_core *core)
+{
+ void __iomem *cpu_cs_base = core->cpu_cs_base;
+
+ if (IS_AR50_LITE(core))
+ writel(CPU_CS_VCICMD_ARP_OFF, cpu_cs_base + CPU_CS_VCICMD);
+
+ return 0;
+}
+
int venus_boot(struct venus_core *core)
{
struct device *dev = core->dev;
@@ -280,6 +290,26 @@ int venus_shutdown(struct venus_core *core)
return ret;
}
+int venus_firmware_check(struct venus_core *core)
+{
+ const struct firmware_version *req = core->res->min_fw;
+ const struct firmware_version *run = &core->venus_ver;
+
+ if (!req)
+ return 0;
+
+ if (!is_fw_rev_or_newer(core, req->major, req->minor, req->rev))
+ goto error;
+
+ return 0;
+error:
+ dev_err(core->dev, "Firmware v%d.%d.%d < v%d.%d.%d\n",
+ run->major, run->minor, run->rev,
+ req->major, req->minor, req->rev);
+
+ return -EINVAL;
+}
+
int venus_firmware_init(struct venus_core *core)
{
struct platform_device_info info;
diff --git a/drivers/media/platform/qcom/venus/firmware.h b/drivers/media/platform/qcom/venus/firmware.h
index aaccd847fa30..87e1d922b369 100644
--- a/drivers/media/platform/qcom/venus/firmware.h
+++ b/drivers/media/platform/qcom/venus/firmware.h
@@ -9,6 +9,8 @@ struct device;
int venus_firmware_init(struct venus_core *core);
void venus_firmware_deinit(struct venus_core *core);
+int venus_firmware_check(struct venus_core *core);
+int venus_firmware_cfg(struct venus_core *core);
int venus_boot(struct venus_core *core);
int venus_shutdown(struct venus_core *core);
int venus_set_hw_state(struct venus_core *core, bool suspend);
diff --git a/drivers/media/platform/qcom/venus/helpers.c b/drivers/media/platform/qcom/venus/helpers.c
index 8295542e1a7c..2e4363f82231 100644
--- a/drivers/media/platform/qcom/venus/helpers.c
+++ b/drivers/media/platform/qcom/venus/helpers.c
@@ -1715,11 +1715,17 @@ int venus_helper_session_init(struct venus_inst *inst)
if (ret)
return ret;
- inst->clk_data.vpp_freq = hfi_platform_get_codec_vpp_freq(version, codec,
+ inst->clk_data.vpp_freq = hfi_platform_get_codec_vpp_freq(inst->core,
+ version,
+ codec,
session_type);
- inst->clk_data.vsp_freq = hfi_platform_get_codec_vsp_freq(version, codec,
+ inst->clk_data.vsp_freq = hfi_platform_get_codec_vsp_freq(inst->core,
+ version,
+ codec,
session_type);
- inst->clk_data.low_power_freq = hfi_platform_get_codec_lp_freq(version, codec,
+ inst->clk_data.low_power_freq = hfi_platform_get_codec_lp_freq(inst->core,
+ version,
+ codec,
session_type);
return 0;
diff --git a/drivers/media/platform/qcom/venus/hfi_msgs.c b/drivers/media/platform/qcom/venus/hfi_msgs.c
index cf0d97cbc463..47b99d5b5af7 100644
--- a/drivers/media/platform/qcom/venus/hfi_msgs.c
+++ b/drivers/media/platform/qcom/venus/hfi_msgs.c
@@ -277,7 +277,12 @@ static void hfi_sys_init_done(struct venus_core *core, struct venus_inst *inst,
done:
core->error = error;
- complete(&core->done);
+ /*
+ * Since core_init could ask for the firmware version to be validated,
+ * completion might have to wait until the version is retrieved.
+ */
+ if (!core->res->min_fw)
+ complete(&core->done);
}
static void
@@ -328,6 +333,10 @@ done:
if (!IS_ERR(smem_tbl_ptr) && smem_blk_sz >= SMEM_IMG_OFFSET_VENUS + VER_STR_SZ)
memcpy(smem_tbl_ptr + SMEM_IMG_OFFSET_VENUS,
img_ver, VER_STR_SZ);
+
+ /* core_init could have had to wait for a version check */
+ if (core->res->min_fw)
+ complete(&core->done);
}
static void hfi_sys_property_info(struct venus_core *core,
diff --git a/drivers/media/platform/qcom/venus/hfi_parser.c b/drivers/media/platform/qcom/venus/hfi_parser.c
index 1b3db2caa99f..92765f9c8873 100644
--- a/drivers/media/platform/qcom/venus/hfi_parser.c
+++ b/drivers/media/platform/qcom/venus/hfi_parser.c
@@ -282,7 +282,7 @@ static int hfi_platform_parser(struct venus_core *core, struct venus_inst *inst)
return ret;
if (plat->capabilities)
- caps = plat->capabilities(&entries);
+ caps = plat->capabilities(core, &entries);
if (!caps || !entries || !count)
return -EINVAL;
diff --git a/drivers/media/platform/qcom/venus/hfi_platform.c b/drivers/media/platform/qcom/venus/hfi_platform.c
index 643e5aa138f5..cde7f93045ac 100644
--- a/drivers/media/platform/qcom/venus/hfi_platform.c
+++ b/drivers/media/platform/qcom/venus/hfi_platform.c
@@ -21,7 +21,9 @@ const struct hfi_platform *hfi_platform_get(enum hfi_version version)
}
unsigned long
-hfi_platform_get_codec_vpp_freq(enum hfi_version version, u32 codec, u32 session_type)
+hfi_platform_get_codec_vpp_freq(struct venus_core *core,
+ enum hfi_version version, u32 codec,
+ u32 session_type)
{
const struct hfi_platform *plat;
unsigned long freq = 0;
@@ -31,13 +33,15 @@ hfi_platform_get_codec_vpp_freq(enum hfi_version version, u32 codec, u32 session
return 0;
if (plat->codec_vpp_freq)
- freq = plat->codec_vpp_freq(session_type, codec);
+ freq = plat->codec_vpp_freq(core, session_type, codec);
return freq;
}
unsigned long
-hfi_platform_get_codec_vsp_freq(enum hfi_version version, u32 codec, u32 session_type)
+hfi_platform_get_codec_vsp_freq(struct venus_core *core,
+ enum hfi_version version, u32 codec,
+ u32 session_type)
{
const struct hfi_platform *plat;
unsigned long freq = 0;
@@ -47,13 +51,15 @@ hfi_platform_get_codec_vsp_freq(enum hfi_version version, u32 codec, u32 session
return 0;
if (plat->codec_vpp_freq)
- freq = plat->codec_vsp_freq(session_type, codec);
+ freq = plat->codec_vsp_freq(core, session_type, codec);
return freq;
}
unsigned long
-hfi_platform_get_codec_lp_freq(enum hfi_version version, u32 codec, u32 session_type)
+hfi_platform_get_codec_lp_freq(struct venus_core *core,
+ enum hfi_version version, u32 codec,
+ u32 session_type)
{
const struct hfi_platform *plat;
unsigned long freq = 0;
@@ -63,13 +69,14 @@ hfi_platform_get_codec_lp_freq(enum hfi_version version, u32 codec, u32 session_
return 0;
if (plat->codec_lp_freq)
- freq = plat->codec_lp_freq(session_type, codec);
+ freq = plat->codec_lp_freq(core, session_type, codec);
return freq;
}
int
-hfi_platform_get_codecs(struct venus_core *core, u32 *enc_codecs, u32 *dec_codecs, u32 *count)
+hfi_platform_get_codecs(struct venus_core *core, u32 *enc_codecs,
+ u32 *dec_codecs, u32 *count)
{
const struct hfi_platform *plat;
@@ -78,7 +85,7 @@ hfi_platform_get_codecs(struct venus_core *core, u32 *enc_codecs, u32 *dec_codec
return -EINVAL;
if (plat->codecs)
- plat->codecs(enc_codecs, dec_codecs, count);
+ plat->codecs(core, enc_codecs, dec_codecs, count);
if (IS_IRIS2_1(core)) {
*enc_codecs &= ~HFI_VIDEO_CODEC_VP8;
diff --git a/drivers/media/platform/qcom/venus/hfi_platform.h b/drivers/media/platform/qcom/venus/hfi_platform.h
index ec89a90a8129..5e4f8013a6b1 100644
--- a/drivers/media/platform/qcom/venus/hfi_platform.h
+++ b/drivers/media/platform/qcom/venus/hfi_platform.h
@@ -47,11 +47,16 @@ struct hfi_platform_codec_freq_data {
};
struct hfi_platform {
- unsigned long (*codec_vpp_freq)(u32 session_type, u32 codec);
- unsigned long (*codec_vsp_freq)(u32 session_type, u32 codec);
- unsigned long (*codec_lp_freq)(u32 session_type, u32 codec);
- void (*codecs)(u32 *enc_codecs, u32 *dec_codecs, u32 *count);
- const struct hfi_plat_caps *(*capabilities)(unsigned int *entries);
+ unsigned long (*codec_vpp_freq)(struct venus_core *core,
+ u32 session_type, u32 codec);
+ unsigned long (*codec_vsp_freq)(struct venus_core *core,
+ u32 session_type, u32 codec);
+ unsigned long (*codec_lp_freq)(struct venus_core *core,
+ u32 session_type, u32 codec);
+ void (*codecs)(struct venus_core *core, u32 *enc_codecs,
+ u32 *dec_codecs, u32 *count);
+ const struct hfi_plat_caps *(*capabilities)(struct venus_core *core,
+ unsigned int *entries);
int (*bufreq)(struct hfi_plat_buffers_params *params, u32 session_type,
u32 buftype, struct hfi_buffer_requirements *bufreq);
};
@@ -60,12 +65,15 @@ extern const struct hfi_platform hfi_plat_v4;
extern const struct hfi_platform hfi_plat_v6;
const struct hfi_platform *hfi_platform_get(enum hfi_version version);
-unsigned long hfi_platform_get_codec_vpp_freq(enum hfi_version version, u32 codec,
- u32 session_type);
-unsigned long hfi_platform_get_codec_vsp_freq(enum hfi_version version, u32 codec,
- u32 session_type);
-unsigned long hfi_platform_get_codec_lp_freq(enum hfi_version version, u32 codec,
- u32 session_type);
-int hfi_platform_get_codecs(struct venus_core *core, u32 *enc_codecs, u32 *dec_codecs,
- u32 *count);
+unsigned long hfi_platform_get_codec_vpp_freq(struct venus_core *core,
+ enum hfi_version version,
+ u32 codec, u32 session_type);
+unsigned long hfi_platform_get_codec_vsp_freq(struct venus_core *core,
+ enum hfi_version version,
+ u32 codec, u32 session_type);
+unsigned long hfi_platform_get_codec_lp_freq(struct venus_core *core,
+ enum hfi_version version,
+ u32 codec, u32 session_type);
+int hfi_platform_get_codecs(struct venus_core *core, u32 *enc_codecs,
+ u32 *dec_codecs, u32 *count);
#endif
diff --git a/drivers/media/platform/qcom/venus/hfi_platform_v4.c b/drivers/media/platform/qcom/venus/hfi_platform_v4.c
index e3f0a90a567b..cda888b56b5d 100644
--- a/drivers/media/platform/qcom/venus/hfi_platform_v4.c
+++ b/drivers/media/platform/qcom/venus/hfi_platform_v4.c
@@ -2,6 +2,7 @@
/*
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*/
+#include "core.h"
#include "hfi_platform.h"
static const struct hfi_plat_caps caps[] = {
@@ -245,20 +246,150 @@ static const struct hfi_plat_caps caps[] = {
.num_fmts = 4,
} };
-static const struct hfi_plat_caps *get_capabilities(unsigned int *entries)
+static const struct hfi_plat_caps caps_lite[] = {
{
- *entries = ARRAY_SIZE(caps);
- return caps;
+ .codec = HFI_VIDEO_CODEC_H264,
+ .domain = VIDC_SESSION_TYPE_DEC,
+ .caps[0] = {HFI_CAPABILITY_FRAME_WIDTH, 128, 1920, 1},
+ .caps[1] = {HFI_CAPABILITY_FRAME_HEIGHT, 128, 1920, 1},
+ .caps[2] = {HFI_CAPABILITY_MBS_PER_FRAME, 64, 8160, 1},
+ .caps[3] = {HFI_CAPABILITY_BITRATE, 1, 60000000, 1 },
+ .caps[4] = {HFI_CAPABILITY_MBS_PER_SECOND, 64, 244800, 1},
+ .caps[5] = {HFI_CAPABILITY_FRAMERATE, 1, 120, 1},
+ .caps[6] = {HFI_CAPABILITY_MAX_VIDEOCORES, 0, 1, 1},
+ .num_caps = 7,
+ .pl[0] = { HFI_H264_PROFILE_BASELINE, HFI_H264_LEVEL_5},
+ .pl[1] = {HFI_H264_PROFILE_MAIN, HFI_H264_LEVEL_5},
+ .pl[2] = {HFI_H264_PROFILE_HIGH, HFI_H264_LEVEL_5},
+ .pl[3] = {HFI_H264_PROFILE_CONSTRAINED_BASE, HFI_H264_LEVEL_5},
+ .pl[4] = {HFI_H264_PROFILE_CONSTRAINED_HIGH, HFI_H264_LEVEL_5},
+ .num_pl = 5,
+ .fmts[0] = {HFI_BUFFER_OUTPUT, HFI_COLOR_FORMAT_NV12_UBWC},
+ .fmts[1] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12_UBWC},
+ .fmts[2] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12},
+ .fmts[3] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV21},
+ .num_fmts = 4,
+}, {
+ .codec = HFI_VIDEO_CODEC_HEVC,
+ .domain = VIDC_SESSION_TYPE_DEC,
+ .caps[0] = {HFI_CAPABILITY_FRAME_WIDTH, 128, 1920, 1},
+ .caps[1] = {HFI_CAPABILITY_FRAME_HEIGHT, 128, 1920, 1},
+ .caps[2] = {HFI_CAPABILITY_MBS_PER_FRAME, 64, 8160, 1},
+ .caps[3] = {HFI_CAPABILITY_BITRATE, 1, 60000000, 1 },
+ .caps[4] = {HFI_CAPABILITY_MBS_PER_SECOND, 64, 244800, 1},
+ .caps[5] = {HFI_CAPABILITY_FRAMERATE, 1, 120, 1},
+ .caps[6] = {HFI_CAPABILITY_MAX_VIDEOCORES, 0, 1, 1},
+ .num_caps = 7,
+ .pl[0] = {HFI_HEVC_PROFILE_MAIN, HFI_HEVC_LEVEL_5 | HFI_HEVC_TIER_HIGH0 << 28 },
+ .pl[1] = {HFI_HEVC_PROFILE_MAIN10, HFI_HEVC_LEVEL_5 | HFI_HEVC_TIER_HIGH0 << 28 },
+ .num_pl = 2,
+ .fmts[0] = {HFI_BUFFER_OUTPUT, HFI_COLOR_FORMAT_NV12_UBWC},
+ .fmts[1] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12_UBWC},
+ .fmts[2] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12},
+ .fmts[3] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV21},
+ .num_fmts = 4,
+}, {
+ .codec = HFI_VIDEO_CODEC_VP9,
+ .domain = VIDC_SESSION_TYPE_DEC,
+ .caps[0] = {HFI_CAPABILITY_FRAME_WIDTH, 128, 1920, 1},
+ .caps[1] = {HFI_CAPABILITY_FRAME_HEIGHT, 128, 1920, 1},
+ .caps[2] = {HFI_CAPABILITY_MBS_PER_FRAME, 64, 8160, 1},
+ .caps[3] = {HFI_CAPABILITY_BITRATE, 1, 60000000, 1 },
+ .caps[4] = {HFI_CAPABILITY_MBS_PER_SECOND, 64, 244800, 1},
+ .caps[5] = {HFI_CAPABILITY_FRAMERATE, 1, 120, 1},
+ .caps[6] = {HFI_CAPABILITY_MAX_VIDEOCORES, 0, 1, 1},
+ .num_caps = 7,
+ .pl[0] = {HFI_VP9_PROFILE_P0, 200},
+ .pl[1] = {HFI_VP9_PROFILE_P2_10B, 200},
+ .num_pl = 2,
+ .fmts[0] = {HFI_BUFFER_OUTPUT, HFI_COLOR_FORMAT_NV12_UBWC},
+ .fmts[1] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12_UBWC},
+ .fmts[2] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12},
+ .fmts[3] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV21},
+ .num_fmts = 4,
+}, {
+ .codec = HFI_VIDEO_CODEC_H264,
+ .domain = VIDC_SESSION_TYPE_ENC,
+ .caps[0] = {HFI_CAPABILITY_FRAME_WIDTH, 128, 1920, 1},
+ .caps[1] = {HFI_CAPABILITY_FRAME_HEIGHT, 128, 1920, 1},
+ .caps[2] = {HFI_CAPABILITY_MBS_PER_FRAME, 64, 8160, 1},
+ .caps[3] = {HFI_CAPABILITY_BITRATE, 1, 60000000, 1 },
+ .caps[4] = {HFI_CAPABILITY_MBS_PER_SECOND, 64, 244800, 1},
+ .caps[5] = {HFI_CAPABILITY_FRAMERATE, 1, 120, 1},
+ .caps[6] = {HFI_CAPABILITY_MAX_VIDEOCORES, 0, 1, 1},
+ .caps[7] = {HFI_CAPABILITY_HIER_P_NUM_ENH_LAYERS, 0, 6, 1},
+ .caps[8] = {HFI_CAPABILITY_ENC_LTR_COUNT, 0, 4, 1},
+ .caps[9] = {HFI_CAPABILITY_MBS_PER_SECOND_POWERSAVE, 0, 244800, 1},
+ .caps[10] = {HFI_CAPABILITY_I_FRAME_QP, 0, 51, 1},
+ .caps[11] = {HFI_CAPABILITY_P_FRAME_QP, 0, 51, 1},
+ .caps[12] = {HFI_CAPABILITY_B_FRAME_QP, 0, 51, 1},
+ .caps[13] = {HFI_CAPABILITY_SLICE_BYTE, 1, 10, 1},
+ .caps[14] = {HFI_CAPABILITY_SLICE_MB, 1, 10, 1},
+ .num_caps = 15,
+ .pl[0] = {HFI_H264_PROFILE_BASELINE, HFI_H264_LEVEL_5},
+ .pl[1] = {HFI_H264_PROFILE_MAIN, HFI_H264_LEVEL_5},
+ .pl[2] = {HFI_H264_PROFILE_HIGH, HFI_H264_LEVEL_5},
+ .pl[3] = {HFI_H264_PROFILE_CONSTRAINED_BASE, HFI_H264_LEVEL_5},
+ .pl[4] = {HFI_H264_PROFILE_CONSTRAINED_HIGH, HFI_H264_LEVEL_5},
+ .num_pl = 5,
+ .fmts[0] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_NV12},
+ .fmts[1] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_NV12_UBWC},
+ .num_fmts = 2,
+}, {
+ .codec = HFI_VIDEO_CODEC_HEVC,
+ .domain = VIDC_SESSION_TYPE_ENC,
+ .caps[0] = {HFI_CAPABILITY_FRAME_WIDTH, 128, 1920, 1},
+ .caps[1] = {HFI_CAPABILITY_FRAME_HEIGHT, 128, 1920, 1},
+ .caps[2] = {HFI_CAPABILITY_MBS_PER_FRAME, 64, 8160, 1},
+ .caps[3] = {HFI_CAPABILITY_BITRATE, 1, 60000000, 1 },
+ .caps[4] = {HFI_CAPABILITY_MBS_PER_SECOND, 64, 244800, 1},
+ .caps[5] = {HFI_CAPABILITY_FRAMERATE, 1, 120, 1},
+ .caps[6] = {HFI_CAPABILITY_MAX_VIDEOCORES, 0, 1, 1},
+ .caps[7] = {HFI_CAPABILITY_HIER_P_NUM_ENH_LAYERS, 0, 6, 1},
+ .caps[8] = {HFI_CAPABILITY_ENC_LTR_COUNT, 0, 4, 1},
+ .caps[9] = {HFI_CAPABILITY_MBS_PER_SECOND_POWERSAVE, 0, 244800, 1},
+ .caps[10] = {HFI_CAPABILITY_I_FRAME_QP, 0, 51, 1},
+ .caps[11] = {HFI_CAPABILITY_P_FRAME_QP, 0, 51, 1},
+ .caps[12] = {HFI_CAPABILITY_B_FRAME_QP, 0, 51, 1},
+ .caps[13] = {HFI_CAPABILITY_SLICE_BYTE, 1, 10, 1},
+ .caps[14] = {HFI_CAPABILITY_SLICE_MB, 1, 10, 1},
+ .num_caps = 15,
+ .pl[0] = {HFI_HEVC_PROFILE_MAIN, HFI_HEVC_LEVEL_5 | HFI_HEVC_TIER_HIGH0},
+ .pl[1] = {HFI_HEVC_PROFILE_MAIN10, HFI_HEVC_LEVEL_5 | HFI_HEVC_TIER_HIGH0},
+ .num_pl = 2,
+ .fmts[0] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_NV12},
+ .fmts[1] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_NV12_UBWC},
+ .num_fmts = 2,
+} };
+
+static const struct hfi_plat_caps *get_capabilities(struct venus_core *core,
+ unsigned int *entries)
+{
+ *entries = is_lite(core) ? ARRAY_SIZE(caps_lite) : ARRAY_SIZE(caps);
+
+ return is_lite(core) ? caps_lite : caps;
}
-static void get_codecs(u32 *enc_codecs, u32 *dec_codecs, u32 *count)
+static void get_codecs(struct venus_core *core,
+ u32 *enc_codecs, u32 *dec_codecs, u32 *count)
{
- *enc_codecs = HFI_VIDEO_CODEC_H264 | HFI_VIDEO_CODEC_HEVC |
- HFI_VIDEO_CODEC_VP8;
- *dec_codecs = HFI_VIDEO_CODEC_H264 | HFI_VIDEO_CODEC_HEVC |
- HFI_VIDEO_CODEC_VP8 | HFI_VIDEO_CODEC_VP9 |
- HFI_VIDEO_CODEC_MPEG2;
- *count = 8;
+ const struct hfi_plat_caps *caps;
+ unsigned int num;
+ size_t i;
+
+ *enc_codecs = 0;
+ *dec_codecs = 0;
+
+ caps = get_capabilities(core, &num);
+
+ for (i = 0; i < num; caps++, i++) {
+ if (caps->domain == VIDC_SESSION_TYPE_ENC)
+ *enc_codecs |= caps->codec;
+ else
+ *dec_codecs |= caps->codec;
+ }
+
+ *count = num;
}
static const struct hfi_platform_codec_freq_data codec_freq_data[] = {
@@ -272,13 +403,29 @@ static const struct hfi_platform_codec_freq_data codec_freq_data[] = {
{ V4L2_PIX_FMT_VP9, VIDC_SESSION_TYPE_DEC, 200, 10, 200 },
};
+static const struct hfi_platform_codec_freq_data codec_freq_data_lite[] = {
+ { V4L2_PIX_FMT_H264, VIDC_SESSION_TYPE_DEC, 440, 0, 440 },
+ { V4L2_PIX_FMT_HEVC, VIDC_SESSION_TYPE_DEC, 440, 0, 440 },
+ { V4L2_PIX_FMT_VP9, VIDC_SESSION_TYPE_DEC, 440, 0, 440 },
+ { V4L2_PIX_FMT_H264, VIDC_SESSION_TYPE_ENC, 675, 0, 675 },
+ { V4L2_PIX_FMT_HEVC, VIDC_SESSION_TYPE_ENC, 675, 0, 675 },
+};
+
static const struct hfi_platform_codec_freq_data *
-get_codec_freq_data(u32 session_type, u32 pixfmt)
+get_codec_freq_data(struct venus_core *core, u32 session_type, u32 pixfmt)
{
- const struct hfi_platform_codec_freq_data *data = codec_freq_data;
- unsigned int i, data_size = ARRAY_SIZE(codec_freq_data);
+ const struct hfi_platform_codec_freq_data *data;
+ unsigned int i, data_size;
const struct hfi_platform_codec_freq_data *found = NULL;
+ if (is_lite(core)) {
+ data = codec_freq_data_lite;
+ data_size = ARRAY_SIZE(codec_freq_data_lite);
+ } else {
+ data = codec_freq_data;
+ data_size = ARRAY_SIZE(codec_freq_data);
+ }
+
for (i = 0; i < data_size; i++) {
if (data[i].pixfmt == pixfmt && data[i].session_type == session_type) {
found = &data[i];
@@ -289,33 +436,36 @@ get_codec_freq_data(u32 session_type, u32 pixfmt)
return found;
}
-static unsigned long codec_vpp_freq(u32 session_type, u32 codec)
+static unsigned long codec_vpp_freq(struct venus_core *core,
+ u32 session_type, u32 codec)
{
const struct hfi_platform_codec_freq_data *data;
- data = get_codec_freq_data(session_type, codec);
+ data = get_codec_freq_data(core, session_type, codec);
if (data)
return data->vpp_freq;
return 0;
}
-static unsigned long codec_vsp_freq(u32 session_type, u32 codec)
+static unsigned long codec_vsp_freq(struct venus_core *core,
+ u32 session_type, u32 codec)
{
const struct hfi_platform_codec_freq_data *data;
- data = get_codec_freq_data(session_type, codec);
+ data = get_codec_freq_data(core, session_type, codec);
if (data)
return data->vsp_freq;
return 0;
}
-static unsigned long codec_lp_freq(u32 session_type, u32 codec)
+static unsigned long codec_lp_freq(struct venus_core *core,
+ u32 session_type, u32 codec)
{
const struct hfi_platform_codec_freq_data *data;
- data = get_codec_freq_data(session_type, codec);
+ data = get_codec_freq_data(core, session_type, codec);
if (data)
return data->low_power_freq;
diff --git a/drivers/media/platform/qcom/venus/hfi_platform_v6.c b/drivers/media/platform/qcom/venus/hfi_platform_v6.c
index 4e8af645f8b9..d8568c08cc36 100644
--- a/drivers/media/platform/qcom/venus/hfi_platform_v6.c
+++ b/drivers/media/platform/qcom/venus/hfi_platform_v6.c
@@ -2,6 +2,7 @@
/*
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*/
+#include "core.h"
#include "hfi_platform.h"
static const struct hfi_plat_caps caps[] = {
@@ -245,14 +246,22 @@ static const struct hfi_plat_caps caps[] = {
.num_fmts = 4,
} };
-static const struct hfi_plat_caps *get_capabilities(unsigned int *entries)
+static const struct hfi_plat_caps *get_capabilities(struct venus_core *core,
+ unsigned int *entries)
{
+ if (is_lite(core))
+ return NULL;
+
*entries = ARRAY_SIZE(caps);
return caps;
}
-static void get_codecs(u32 *enc_codecs, u32 *dec_codecs, u32 *count)
+static void get_codecs(struct venus_core *core, u32 *enc_codecs,
+ u32 *dec_codecs, u32 *count)
{
+ if (is_lite(core))
+ return;
+
*enc_codecs = HFI_VIDEO_CODEC_H264 | HFI_VIDEO_CODEC_HEVC |
HFI_VIDEO_CODEC_VP8;
*dec_codecs = HFI_VIDEO_CODEC_H264 | HFI_VIDEO_CODEC_HEVC |
@@ -273,12 +282,15 @@ static const struct hfi_platform_codec_freq_data codec_freq_data[] = {
};
static const struct hfi_platform_codec_freq_data *
-get_codec_freq_data(u32 session_type, u32 pixfmt)
+get_codec_freq_data(struct venus_core *core, u32 session_type, u32 pixfmt)
{
const struct hfi_platform_codec_freq_data *data = codec_freq_data;
unsigned int i, data_size = ARRAY_SIZE(codec_freq_data);
const struct hfi_platform_codec_freq_data *found = NULL;
+ if (is_lite(core))
+ return NULL;
+
for (i = 0; i < data_size; i++) {
if (data[i].pixfmt == pixfmt && data[i].session_type == session_type) {
found = &data[i];
@@ -289,33 +301,36 @@ get_codec_freq_data(u32 session_type, u32 pixfmt)
return found;
}
-static unsigned long codec_vpp_freq(u32 session_type, u32 codec)
+static unsigned long codec_vpp_freq(struct venus_core *core, u32 session_type,
+ u32 codec)
{
const struct hfi_platform_codec_freq_data *data;
- data = get_codec_freq_data(session_type, codec);
+ data = get_codec_freq_data(core, session_type, codec);
if (data)
return data->vpp_freq;
return 0;
}
-static unsigned long codec_vsp_freq(u32 session_type, u32 codec)
+static unsigned long codec_vsp_freq(struct venus_core *core, u32 session_type,
+ u32 codec)
{
const struct hfi_platform_codec_freq_data *data;
- data = get_codec_freq_data(session_type, codec);
+ data = get_codec_freq_data(core, session_type, codec);
if (data)
return data->vsp_freq;
return 0;
}
-static unsigned long codec_lp_freq(u32 session_type, u32 codec)
+static unsigned long codec_lp_freq(struct venus_core *core, u32 session_type,
+ u32 codec)
{
const struct hfi_platform_codec_freq_data *data;
- data = get_codec_freq_data(session_type, codec);
+ data = get_codec_freq_data(core, session_type, codec);
if (data)
return data->low_power_freq;
diff --git a/drivers/media/platform/qcom/venus/hfi_venus.c b/drivers/media/platform/qcom/venus/hfi_venus.c
index cec7f5964d3d..d3da35f67fd5 100644
--- a/drivers/media/platform/qcom/venus/hfi_venus.c
+++ b/drivers/media/platform/qcom/venus/hfi_venus.c
@@ -380,7 +380,7 @@ static void venus_soft_int(struct venus_hfi_device *hdev)
void __iomem *cpu_ic_base = hdev->core->cpu_ic_base;
u32 clear_bit;
- if (IS_V6(hdev->core))
+ if (IS_V6(hdev->core) || (IS_V4(hdev->core) && is_lite(hdev->core)))
clear_bit = BIT(CPU_IC_SOFTINT_H2A_SHIFT_V6);
else
clear_bit = BIT(CPU_IC_SOFTINT_H2A_SHIFT);
@@ -501,9 +501,11 @@ static int venus_boot_core(struct venus_hfi_device *hdev)
if (count >= max_tries)
ret = -ETIMEDOUT;
- if (IS_IRIS2(hdev->core) || IS_IRIS2_1(hdev->core)) {
+ if (IS_IRIS2(hdev->core) || IS_IRIS2_1(hdev->core) || IS_AR50_LITE(hdev->core)) {
writel(0x1, cpu_cs_base + CPU_CS_H2XSOFTINTEN_V6);
- writel(0x0, cpu_cs_base + CPU_CS_X2RPMH_V6);
+
+ if (!IS_AR50_LITE(hdev->core))
+ writel(0x0, cpu_cs_base + CPU_CS_X2RPMH_V6);
}
return ret;
@@ -569,6 +571,9 @@ static int venus_halt_axi(struct venus_hfi_device *hdev)
u32 mask_val;
int ret;
+ if (IS_AR50_LITE(hdev->core))
+ return 0;
+
if (IS_IRIS2(hdev->core) || IS_IRIS2_1(hdev->core)) {
writel(0x3, cpu_cs_base + CPU_CS_X2RPMH_V6);
@@ -1138,7 +1143,13 @@ static irqreturn_t venus_isr(struct venus_core *core)
wrapper_base = hdev->core->wrapper_base;
status = readl(wrapper_base + WRAPPER_INTR_STATUS);
- if (IS_IRIS2(core) || IS_IRIS2_1(core)) {
+
+ if (IS_AR50_LITE(core)) {
+ if (status & WRAPPER_INTR_STATUS_A2H_MASK ||
+ status & WRAPPER_INTR_STATUS_A2HWD_MASK_V4_LITE ||
+ status & CPU_CS_SCIACMDARG0_INIT_IDLE_MSG_MASK)
+ hdev->irq_status = status;
+ } else if (IS_IRIS2(core) || IS_IRIS2_1(core)) {
if (status & WRAPPER_INTR_STATUS_A2H_MASK ||
status & WRAPPER_INTR_STATUS_A2HWD_MASK_V6 ||
status & CPU_CS_SCIACMDARG0_INIT_IDLE_MSG_MASK)
@@ -1150,7 +1161,7 @@ static irqreturn_t venus_isr(struct venus_core *core)
hdev->irq_status = status;
}
writel(1, cpu_cs_base + CPU_CS_A2HSOFTINTCLR);
- if (!(IS_IRIS2(core) || IS_IRIS2_1(core)))
+ if (!(IS_IRIS2(core) || IS_IRIS2_1(core) || IS_AR50_LITE(core)))
writel(status, wrapper_base + WRAPPER_INTR_CLEAR);
return IRQ_WAKE_THREAD;
@@ -1535,7 +1546,7 @@ static bool venus_cpu_and_video_core_idle(struct venus_hfi_device *hdev)
void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
u32 ctrl_status, cpu_status;
- if (IS_IRIS2(hdev->core) || IS_IRIS2_1(hdev->core))
+ if (IS_IRIS2(hdev->core) || IS_IRIS2_1(hdev->core) || IS_AR50_LITE(hdev->core))
cpu_status = readl(wrapper_tz_base + WRAPPER_TZ_CPU_STATUS_V6);
else
cpu_status = readl(wrapper_base + WRAPPER_CPU_STATUS);
@@ -1555,7 +1566,7 @@ static bool venus_cpu_idle_and_pc_ready(struct venus_hfi_device *hdev)
void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
u32 ctrl_status, cpu_status;
- if (IS_IRIS2(hdev->core) || IS_IRIS2_1(hdev->core))
+ if (IS_IRIS2(hdev->core) || IS_IRIS2_1(hdev->core) || IS_AR50_LITE(hdev->core))
cpu_status = readl(wrapper_tz_base + WRAPPER_TZ_CPU_STATUS_V6);
else
cpu_status = readl(wrapper_base + WRAPPER_CPU_STATUS);
diff --git a/drivers/media/platform/qcom/venus/hfi_venus_io.h b/drivers/media/platform/qcom/venus/hfi_venus_io.h
index 9735a246ce36..f2c3064c44ae 100644
--- a/drivers/media/platform/qcom/venus/hfi_venus_io.h
+++ b/drivers/media/platform/qcom/venus/hfi_venus_io.h
@@ -51,6 +51,9 @@
/* Venus cpu */
#define CPU_CS_SCIACMDARG3 0x58
+#define CPU_CS_VCICMD 0x20
+#define CPU_CS_VCICMD_ARP_OFF BIT(0)
+
#define SFR_ADDR 0x5c
#define MMAP_ADDR 0x60
#define UC_REGION_ADDR 0x64
@@ -100,6 +103,7 @@
#define WRAPPER_INTR_MASK_A2HCPU_MASK 0x4
#define WRAPPER_INTR_MASK_A2HCPU_SHIFT 0x2
+#define WRAPPER_INTR_STATUS_A2HWD_MASK_V4_LITE 0x10
#define WRAPPER_INTR_STATUS_A2HWD_MASK_V6 0x8
#define WRAPPER_INTR_MASK_A2HWD_BASK_V6 0x8
diff --git a/drivers/media/platform/qcom/venus/pm_helpers.c b/drivers/media/platform/qcom/venus/pm_helpers.c
index e32f8862a9f9..f0269524ac70 100644
--- a/drivers/media/platform/qcom/venus/pm_helpers.c
+++ b/drivers/media/platform/qcom/venus/pm_helpers.c
@@ -40,6 +40,8 @@ static int core_clks_get(struct venus_core *core)
static int core_clks_enable(struct venus_core *core)
{
+ const struct freq_tbl *freq_tbl = core->res->freq_tbl;
+ unsigned int freq_tbl_size = core->res->freq_tbl_size;
const struct venus_resources *res = core->res;
struct device *dev = core->dev;
unsigned long freq = 0;
@@ -48,11 +50,16 @@ static int core_clks_enable(struct venus_core *core)
int ret;
opp = dev_pm_opp_find_freq_ceil(dev, &freq);
- if (!IS_ERR(opp))
+ if (IS_ERR(opp)) {
+ if (!freq_tbl)
+ return -ENODEV;
+ freq = freq_tbl[freq_tbl_size - 1].freq;
+ } else {
dev_pm_opp_put(opp);
+ }
for (i = 0; i < res->clks_num; i++) {
- if (IS_V6(core)) {
+ if (IS_V6(core) || (IS_V4(core) && is_lite(core))) {
ret = clk_set_rate(core->clks[i], freq);
if (ret)
goto err;
diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c
index 29b0d6a5303d..55c27345b7d8 100644
--- a/drivers/media/platform/qcom/venus/vdec.c
+++ b/drivers/media/platform/qcom/venus/vdec.c
@@ -1732,9 +1732,8 @@ static int vdec_open(struct file *file)
v4l2_fh_init(&inst->fh, core->vdev_dec);
inst->fh.ctrl_handler = &inst->ctrl_handler;
- v4l2_fh_add(&inst->fh);
+ v4l2_fh_add(&inst->fh, file);
inst->fh.m2m_ctx = inst->m2m_ctx;
- file->private_data = &inst->fh;
return 0;
@@ -1755,7 +1754,7 @@ static int vdec_close(struct file *file)
vdec_pm_get(inst);
cancel_work_sync(&inst->delayed_process_work);
- venus_close_common(inst);
+ venus_close_common(inst, file);
ida_destroy(&inst->dpb_ids);
vdec_pm_put(inst, false);
diff --git a/drivers/media/platform/qcom/venus/venc.c b/drivers/media/platform/qcom/venus/venc.c
index c0a0ccdded80..fba07557a399 100644
--- a/drivers/media/platform/qcom/venus/venc.c
+++ b/drivers/media/platform/qcom/venus/venc.c
@@ -1515,9 +1515,8 @@ static int venc_open(struct file *file)
v4l2_fh_init(&inst->fh, core->vdev_enc);
inst->fh.ctrl_handler = &inst->ctrl_handler;
- v4l2_fh_add(&inst->fh);
+ v4l2_fh_add(&inst->fh, file);
inst->fh.m2m_ctx = inst->m2m_ctx;
- file->private_data = &inst->fh;
return 0;
@@ -1537,7 +1536,7 @@ static int venc_close(struct file *file)
struct venus_inst *inst = to_inst(file);
venc_pm_get(inst);
- venus_close_common(inst);
+ venus_close_common(inst, file);
inst->enc_state = VENUS_ENC_STATE_DEINIT;
venc_pm_put(inst, false);
diff --git a/drivers/media/platform/raspberrypi/pisp_be/pisp_be.c b/drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
index b30891718d8d..d60d92d2ffa1 100644
--- a/drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
+++ b/drivers/media/platform/raspberrypi/pisp_be/pisp_be.c
@@ -950,7 +950,6 @@ static void pispbe_node_stop_streaming(struct vb2_queue *q)
kfree(job);
}
- pm_runtime_mark_last_busy(pispbe->dev);
pm_runtime_put_autosuspend(pispbe->dev);
dev_dbg(pispbe->dev, "Nodes streaming now 0x%x\n",
@@ -1742,7 +1741,6 @@ static int pispbe_probe(struct platform_device *pdev)
if (ret)
goto disable_devs_err;
- pm_runtime_mark_last_busy(pispbe->dev);
pm_runtime_put_autosuspend(pispbe->dev);
return 0;
diff --git a/drivers/media/platform/raspberrypi/rp1-cfe/csi2.c b/drivers/media/platform/raspberrypi/rp1-cfe/csi2.c
index 35c2ab1e2cd4..2c5b4d24b4e6 100644
--- a/drivers/media/platform/raspberrypi/rp1-cfe/csi2.c
+++ b/drivers/media/platform/raspberrypi/rp1-cfe/csi2.c
@@ -525,7 +525,7 @@ static const struct v4l2_subdev_internal_ops csi2_internal_ops = {
int csi2_init(struct csi2_device *csi2, struct dentry *debugfs)
{
- unsigned int ret;
+ int ret;
spin_lock_init(&csi2->errors_lock);
diff --git a/drivers/media/platform/renesas/rcar-vin/rcar-core.c b/drivers/media/platform/renesas/rcar-vin/rcar-core.c
index f73729f59671..100105b620e3 100644
--- a/drivers/media/platform/renesas/rcar-vin/rcar-core.c
+++ b/drivers/media/platform/renesas/rcar-vin/rcar-core.c
@@ -849,7 +849,7 @@ static int rvin_isp_init(struct rvin_dev *vin)
* Suspend / Resume
*/
-static int __maybe_unused rvin_suspend(struct device *dev)
+static int rvin_suspend(struct device *dev)
{
struct rvin_dev *vin = dev_get_drvdata(dev);
@@ -861,7 +861,7 @@ static int __maybe_unused rvin_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused rvin_resume(struct device *dev)
+static int rvin_resume(struct device *dev)
{
struct rvin_dev *vin = dev_get_drvdata(dev);
@@ -1276,13 +1276,13 @@ static void rcar_vin_remove(struct platform_device *pdev)
rvin_dma_unregister(vin);
}
-static SIMPLE_DEV_PM_OPS(rvin_pm_ops, rvin_suspend, rvin_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(rvin_pm_ops, rvin_suspend, rvin_resume);
static struct platform_driver rcar_vin_driver = {
.driver = {
.name = "rcar-vin",
.suppress_bind_attrs = true,
- .pm = &rvin_pm_ops,
+ .pm = pm_sleep_ptr(&rvin_pm_ops),
.of_match_table = rvin_of_id_table,
},
.probe = rcar_vin_probe,
diff --git a/drivers/media/platform/renesas/rcar-vin/rcar-v4l2.c b/drivers/media/platform/renesas/rcar-vin/rcar-v4l2.c
index 62eddf3a35fc..079dbaf016c2 100644
--- a/drivers/media/platform/renesas/rcar-vin/rcar-v4l2.c
+++ b/drivers/media/platform/renesas/rcar-vin/rcar-v4l2.c
@@ -588,8 +588,6 @@ static int rvin_open(struct file *file)
if (ret)
goto err_pm;
- file->private_data = vin;
-
ret = v4l2_fh_open(file);
if (ret)
goto err_unlock;
diff --git a/drivers/media/platform/renesas/rcar_drif.c b/drivers/media/platform/renesas/rcar_drif.c
index fc8b6bbef793..11bf47fb8266 100644
--- a/drivers/media/platform/renesas/rcar_drif.c
+++ b/drivers/media/platform/renesas/rcar_drif.c
@@ -1446,18 +1446,18 @@ static void rcar_drif_remove(struct platform_device *pdev)
}
/* FIXME: Implement suspend/resume support */
-static int __maybe_unused rcar_drif_suspend(struct device *dev)
+static int rcar_drif_suspend(struct device *dev)
{
return 0;
}
-static int __maybe_unused rcar_drif_resume(struct device *dev)
+static int rcar_drif_resume(struct device *dev)
{
return 0;
}
-static SIMPLE_DEV_PM_OPS(rcar_drif_pm_ops, rcar_drif_suspend,
- rcar_drif_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(rcar_drif_pm_ops, rcar_drif_suspend,
+ rcar_drif_resume);
static const struct of_device_id rcar_drif_of_table[] = {
{ .compatible = "renesas,rcar-gen3-drif" },
@@ -1470,8 +1470,8 @@ static struct platform_driver rcar_drif_driver = {
.driver = {
.name = RCAR_DRIF_DRV_NAME,
.of_match_table = rcar_drif_of_table,
- .pm = &rcar_drif_pm_ops,
- },
+ .pm = pm_sleep_ptr(&rcar_drif_pm_ops),
+ },
.probe = rcar_drif_probe,
.remove = rcar_drif_remove,
};
diff --git a/drivers/media/platform/renesas/rcar_fdp1.c b/drivers/media/platform/renesas/rcar_fdp1.c
index 5d453a7a8988..e615c56083f1 100644
--- a/drivers/media/platform/renesas/rcar_fdp1.c
+++ b/drivers/media/platform/renesas/rcar_fdp1.c
@@ -630,9 +630,9 @@ struct fdp1_ctx {
struct fdp1_field_buffer *previous;
};
-static inline struct fdp1_ctx *fh_to_ctx(struct v4l2_fh *fh)
+static inline struct fdp1_ctx *file_to_ctx(struct file *filp)
{
- return container_of(fh, struct fdp1_ctx, fh);
+ return container_of(file_to_v4l2_fh(filp), struct fdp1_ctx, fh);
}
static struct fdp1_q_data *get_q_data(struct fdp1_ctx *ctx,
@@ -1406,8 +1406,8 @@ static int fdp1_enum_fmt_vid_out(struct file *file, void *priv,
static int fdp1_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
+ struct fdp1_ctx *ctx = file_to_ctx(file);
struct fdp1_q_data *q_data;
- struct fdp1_ctx *ctx = fh_to_ctx(priv);
if (!v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type))
return -EINVAL;
@@ -1584,7 +1584,7 @@ static void fdp1_try_fmt_capture(struct fdp1_ctx *ctx,
static int fdp1_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
- struct fdp1_ctx *ctx = fh_to_ctx(priv);
+ struct fdp1_ctx *ctx = file_to_ctx(file);
if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
fdp1_try_fmt_output(ctx, NULL, &f->fmt.pix_mp);
@@ -1655,7 +1655,7 @@ static void fdp1_set_format(struct fdp1_ctx *ctx,
static int fdp1_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
- struct fdp1_ctx *ctx = fh_to_ctx(priv);
+ struct fdp1_ctx *ctx = file_to_ctx(file);
struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx;
struct vb2_queue *vq = v4l2_m2m_get_vq(m2m_ctx, f->type);
@@ -2088,7 +2088,6 @@ static int fdp1_open(struct file *file)
}
v4l2_fh_init(&ctx->fh, video_devdata(file));
- file->private_data = &ctx->fh;
ctx->fdp1 = fdp1;
/* Initialise Queues */
@@ -2137,7 +2136,7 @@ static int fdp1_open(struct file *file)
if (ret < 0)
goto error_pm;
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
dprintk(fdp1, "Created instance: %p, m2m_ctx: %p\n",
ctx, ctx->fh.m2m_ctx);
@@ -2158,11 +2157,11 @@ done:
static int fdp1_release(struct file *file)
{
struct fdp1_dev *fdp1 = video_drvdata(file);
- struct fdp1_ctx *ctx = fh_to_ctx(file->private_data);
+ struct fdp1_ctx *ctx = file_to_ctx(file);
dprintk(fdp1, "Releasing instance %p\n", ctx);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
v4l2_ctrl_handler_free(&ctx->hdl);
mutex_lock(&fdp1->dev_mutex);
@@ -2409,7 +2408,7 @@ static void fdp1_remove(struct platform_device *pdev)
rcar_fcp_put(fdp1->fcp);
}
-static int __maybe_unused fdp1_pm_runtime_suspend(struct device *dev)
+static int fdp1_pm_runtime_suspend(struct device *dev)
{
struct fdp1_dev *fdp1 = dev_get_drvdata(dev);
@@ -2418,7 +2417,7 @@ static int __maybe_unused fdp1_pm_runtime_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused fdp1_pm_runtime_resume(struct device *dev)
+static int fdp1_pm_runtime_resume(struct device *dev)
{
struct fdp1_dev *fdp1 = dev_get_drvdata(dev);
@@ -2429,9 +2428,7 @@ static int __maybe_unused fdp1_pm_runtime_resume(struct device *dev)
}
static const struct dev_pm_ops fdp1_pm_ops = {
- SET_RUNTIME_PM_OPS(fdp1_pm_runtime_suspend,
- fdp1_pm_runtime_resume,
- NULL)
+ RUNTIME_PM_OPS(fdp1_pm_runtime_suspend, fdp1_pm_runtime_resume, NULL)
};
static const struct of_device_id fdp1_dt_ids[] = {
@@ -2446,7 +2443,7 @@ static struct platform_driver fdp1_pdrv = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = fdp1_dt_ids,
- .pm = &fdp1_pm_ops,
+ .pm = pm_ptr(&fdp1_pm_ops),
},
};
diff --git a/drivers/media/platform/renesas/rcar_jpu.c b/drivers/media/platform/renesas/rcar_jpu.c
index 81038df71bb5..46ea259a2bb9 100644
--- a/drivers/media/platform/renesas/rcar_jpu.c
+++ b/drivers/media/platform/renesas/rcar_jpu.c
@@ -480,9 +480,9 @@ static struct jpu_ctx *ctrl_to_ctx(struct v4l2_ctrl *c)
return container_of(c->handler, struct jpu_ctx, ctrl_handler);
}
-static struct jpu_ctx *fh_to_ctx(struct v4l2_fh *fh)
+static struct jpu_ctx *file_to_ctx(struct file *filp)
{
- return container_of(fh, struct jpu_ctx, fh);
+ return container_of(file_to_v4l2_fh(filp), struct jpu_ctx, fh);
}
static void jpu_set_tbl(struct jpu *jpu, u32 reg, const unsigned int *tbl,
@@ -656,7 +656,7 @@ static u8 jpu_parse_hdr(void *buffer, unsigned long size, unsigned int *width,
static int jpu_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- struct jpu_ctx *ctx = fh_to_ctx(priv);
+ struct jpu_ctx *ctx = file_to_ctx(file);
if (ctx->encoder)
strscpy(cap->card, DRV_NAME " encoder", sizeof(cap->card));
@@ -714,7 +714,7 @@ static int jpu_enum_fmt(struct v4l2_fmtdesc *f, u32 type)
static int jpu_enum_fmt_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- struct jpu_ctx *ctx = fh_to_ctx(priv);
+ struct jpu_ctx *ctx = file_to_ctx(file);
return jpu_enum_fmt(f, ctx->encoder ? JPU_ENC_CAPTURE :
JPU_DEC_CAPTURE);
@@ -723,7 +723,7 @@ static int jpu_enum_fmt_cap(struct file *file, void *priv,
static int jpu_enum_fmt_out(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- struct jpu_ctx *ctx = fh_to_ctx(priv);
+ struct jpu_ctx *ctx = file_to_ctx(file);
return jpu_enum_fmt(f, ctx->encoder ? JPU_ENC_OUTPUT : JPU_DEC_OUTPUT);
}
@@ -823,7 +823,7 @@ static int __jpu_try_fmt(struct jpu_ctx *ctx, struct jpu_fmt **fmtinfo,
static int jpu_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
- struct jpu_ctx *ctx = fh_to_ctx(priv);
+ struct jpu_ctx *ctx = file_to_ctx(file);
if (!v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type))
return -EINVAL;
@@ -834,7 +834,7 @@ static int jpu_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
static int jpu_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
struct vb2_queue *vq;
- struct jpu_ctx *ctx = fh_to_ctx(priv);
+ struct jpu_ctx *ctx = file_to_ctx(file);
struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx;
struct jpu_fmt *fmtinfo;
struct jpu_q_data *q_data;
@@ -863,8 +863,8 @@ static int jpu_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
static int jpu_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
+ struct jpu_ctx *ctx = file_to_ctx(file);
struct jpu_q_data *q_data;
- struct jpu_ctx *ctx = fh_to_ctx(priv);
if (!v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type))
return -EINVAL;
@@ -897,8 +897,8 @@ static const struct v4l2_ctrl_ops jpu_ctrl_ops = {
static int jpu_streamon(struct file *file, void *priv, enum v4l2_buf_type type)
{
- struct jpu_ctx *ctx = fh_to_ctx(priv);
struct jpu_q_data *src_q_data, *dst_q_data, *orig, adj, *ref;
+ struct jpu_ctx *ctx = file_to_ctx(file);
enum v4l2_buf_type adj_type;
src_q_data = jpu_get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
@@ -1226,8 +1226,7 @@ static int jpu_open(struct file *file)
v4l2_fh_init(&ctx->fh, vfd);
ctx->fh.ctrl_handler = &ctx->ctrl_handler;
- file->private_data = &ctx->fh;
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
ctx->jpu = jpu;
ctx->encoder = vfd == &jpu->vfd_encoder;
@@ -1272,7 +1271,7 @@ jpu_reset_rollback:
device_prepare_rollback:
mutex_unlock(&jpu->mutex);
v4l_prepare_rollback:
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
kfree(ctx);
return ret;
@@ -1280,12 +1279,12 @@ v4l_prepare_rollback:
static int jpu_release(struct file *file)
{
+ struct jpu_ctx *ctx = file_to_ctx(file);
struct jpu *jpu = video_drvdata(file);
- struct jpu_ctx *ctx = fh_to_ctx(file->private_data);
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
v4l2_ctrl_handler_free(&ctx->ctrl_handler);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
kfree(ctx);
@@ -1745,6 +1744,6 @@ static struct platform_driver jpu_driver = {
module_platform_driver(jpu_driver);
MODULE_ALIAS("platform:" DRV_NAME);
-MODULE_AUTHOR("Mikhail Ulianov <mikhail.ulyanov@cogentembedded.com>");
+MODULE_AUTHOR("Mikhail Ulianov");
MODULE_DESCRIPTION("Renesas R-Car JPEG processing unit driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/renesas/renesas-ceu.c b/drivers/media/platform/renesas/renesas-ceu.c
index 8cceafe491b1..deed49d0fb10 100644
--- a/drivers/media/platform/renesas/renesas-ceu.c
+++ b/drivers/media/platform/renesas/renesas-ceu.c
@@ -1048,7 +1048,7 @@ static int ceu_init_mbus_fmt(struct ceu_device *ceudev)
/*
* ceu_runtime_resume() - soft-reset the interface and turn sensor power on.
*/
-static int __maybe_unused ceu_runtime_resume(struct device *dev)
+static int ceu_runtime_resume(struct device *dev)
{
struct ceu_device *ceudev = dev_get_drvdata(dev);
struct v4l2_subdev *v4l2_sd = ceudev->sd->v4l2_sd;
@@ -1064,7 +1064,7 @@ static int __maybe_unused ceu_runtime_resume(struct device *dev)
* ceu_runtime_suspend() - disable capture and interrupts and soft-reset.
* Turn sensor power off.
*/
-static int __maybe_unused ceu_runtime_suspend(struct device *dev)
+static int ceu_runtime_suspend(struct device *dev)
{
struct ceu_device *ceudev = dev_get_drvdata(dev);
struct v4l2_subdev *v4l2_sd = ceudev->sd->v4l2_sd;
@@ -1709,15 +1709,13 @@ static void ceu_remove(struct platform_device *pdev)
}
static const struct dev_pm_ops ceu_pm_ops = {
- SET_RUNTIME_PM_OPS(ceu_runtime_suspend,
- ceu_runtime_resume,
- NULL)
+ RUNTIME_PM_OPS(ceu_runtime_suspend, ceu_runtime_resume, NULL)
};
static struct platform_driver ceu_driver = {
.driver = {
.name = DRIVER_NAME,
- .pm = &ceu_pm_ops,
+ .pm = pm_ptr(&ceu_pm_ops),
.of_match_table = of_match_ptr(ceu_of_match),
},
.probe = ceu_probe,
diff --git a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-core.c b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-core.c
index 806acc8f9728..3c5fbd857371 100644
--- a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-core.c
+++ b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-core.c
@@ -367,7 +367,6 @@ static const struct rzg2l_cru_info rzg3e_cru_info = {
.enable_interrupts = rzg3e_cru_enable_interrupts,
.disable_interrupts = rzg3e_cru_disable_interrupts,
.fifo_empty = rzg3e_fifo_empty,
- .csi_setup = rzg3e_cru_csi2_setup,
};
static const u16 rzg2l_cru_regs[] = {
@@ -412,7 +411,6 @@ static const struct rzg2l_cru_info rzg2l_cru_info = {
.enable_interrupts = rzg2l_cru_enable_interrupts,
.disable_interrupts = rzg2l_cru_disable_interrupts,
.fifo_empty = rzg2l_fifo_empty,
- .csi_setup = rzg2l_cru_csi2_setup,
};
static const struct of_device_id rzg2l_cru_of_id_table[] = {
diff --git a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-cru.h b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-cru.h
index be95b41c37df..3a200db15730 100644
--- a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-cru.h
+++ b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-cru.h
@@ -92,9 +92,6 @@ struct rzg2l_cru_info {
void (*enable_interrupts)(struct rzg2l_cru_dev *cru);
void (*disable_interrupts)(struct rzg2l_cru_dev *cru);
bool (*fifo_empty)(struct rzg2l_cru_dev *cru);
- void (*csi_setup)(struct rzg2l_cru_dev *cru,
- const struct rzg2l_cru_ip_format *ip_fmt,
- u8 csi_vc);
};
/**
@@ -204,11 +201,5 @@ void rzg3e_cru_disable_interrupts(struct rzg2l_cru_dev *cru);
bool rzg2l_fifo_empty(struct rzg2l_cru_dev *cru);
bool rzg3e_fifo_empty(struct rzg2l_cru_dev *cru);
-void rzg2l_cru_csi2_setup(struct rzg2l_cru_dev *cru,
- const struct rzg2l_cru_ip_format *ip_fmt,
- u8 csi_vc);
-void rzg3e_cru_csi2_setup(struct rzg2l_cru_dev *cru,
- const struct rzg2l_cru_ip_format *ip_fmt,
- u8 csi_vc);
#endif
diff --git a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c
index a8817a7066b2..162e2ace6931 100644
--- a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c
+++ b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c
@@ -257,30 +257,18 @@ static void rzg2l_cru_initialize_axi(struct rzg2l_cru_dev *cru)
rzg2l_cru_write(cru, AMnAXIATTR, amnaxiattr);
}
-void rzg3e_cru_csi2_setup(struct rzg2l_cru_dev *cru,
- const struct rzg2l_cru_ip_format *ip_fmt,
- u8 csi_vc)
+static void rzg2l_cru_csi2_setup(struct rzg2l_cru_dev *cru,
+ const struct rzg2l_cru_ip_format *ip_fmt,
+ u8 csi_vc)
{
const struct rzg2l_cru_info *info = cru->info;
u32 icnmc = ICnMC_INF(ip_fmt->datatype);
- icnmc |= rzg2l_cru_read(cru, info->image_conv) & ~ICnMC_INF_MASK;
-
- /* Set virtual channel CSI2 */
- icnmc |= ICnMC_VCSEL(csi_vc);
-
- rzg2l_cru_write(cru, ICnSVCNUM, csi_vc);
- rzg2l_cru_write(cru, ICnSVC, ICnSVC_SVC0(0) | ICnSVC_SVC1(1) |
- ICnSVC_SVC2(2) | ICnSVC_SVC3(3));
- rzg2l_cru_write(cru, info->image_conv, icnmc);
-}
-
-void rzg2l_cru_csi2_setup(struct rzg2l_cru_dev *cru,
- const struct rzg2l_cru_ip_format *ip_fmt,
- u8 csi_vc)
-{
- const struct rzg2l_cru_info *info = cru->info;
- u32 icnmc = ICnMC_INF(ip_fmt->datatype);
+ if (cru->info->regs[ICnSVC]) {
+ rzg2l_cru_write(cru, ICnSVCNUM, csi_vc);
+ rzg2l_cru_write(cru, ICnSVC, ICnSVC_SVC0(0) | ICnSVC_SVC1(1) |
+ ICnSVC_SVC2(2) | ICnSVC_SVC3(3));
+ }
icnmc |= rzg2l_cru_read(cru, info->image_conv) & ~ICnMC_INF_MASK;
@@ -299,7 +287,7 @@ static int rzg2l_cru_initialize_image_conv(struct rzg2l_cru_dev *cru,
const struct rzg2l_cru_ip_format *cru_ip_fmt;
cru_ip_fmt = rzg2l_cru_ip_code_to_fmt(ip_sd_fmt->code);
- info->csi_setup(cru, cru_ip_fmt, csi_vc);
+ rzg2l_cru_csi2_setup(cru, cru_ip_fmt, csi_vc);
/* Output format */
cru_video_fmt = rzg2l_cru_ip_format_to_fmt(cru->format.pixelformat);
@@ -1078,7 +1066,6 @@ static int rzg2l_cru_open(struct file *file)
if (ret)
return ret;
- file->private_data = cru;
ret = v4l2_fh_open(file);
if (ret)
goto err_unlock;
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_drv.c b/drivers/media/platform/renesas/vsp1/vsp1_drv.c
index b8d06e88c475..6c64657fc4f3 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_drv.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_drv.c
@@ -618,7 +618,7 @@ void vsp1_device_put(struct vsp1_device *vsp1)
* Power Management
*/
-static int __maybe_unused vsp1_pm_suspend(struct device *dev)
+static int vsp1_pm_suspend(struct device *dev)
{
struct vsp1_device *vsp1 = dev_get_drvdata(dev);
@@ -634,7 +634,7 @@ static int __maybe_unused vsp1_pm_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused vsp1_pm_resume(struct device *dev)
+static int vsp1_pm_resume(struct device *dev)
{
struct vsp1_device *vsp1 = dev_get_drvdata(dev);
@@ -650,7 +650,7 @@ static int __maybe_unused vsp1_pm_resume(struct device *dev)
return 0;
}
-static int __maybe_unused vsp1_pm_runtime_suspend(struct device *dev)
+static int vsp1_pm_runtime_suspend(struct device *dev)
{
struct vsp1_device *vsp1 = dev_get_drvdata(dev);
@@ -660,7 +660,7 @@ static int __maybe_unused vsp1_pm_runtime_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused vsp1_pm_runtime_resume(struct device *dev)
+static int vsp1_pm_runtime_resume(struct device *dev)
{
struct vsp1_device *vsp1 = dev_get_drvdata(dev);
int ret;
@@ -693,8 +693,8 @@ done:
}
static const struct dev_pm_ops vsp1_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(vsp1_pm_suspend, vsp1_pm_resume)
- SET_RUNTIME_PM_OPS(vsp1_pm_runtime_suspend, vsp1_pm_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(vsp1_pm_suspend, vsp1_pm_resume)
+ RUNTIME_PM_OPS(vsp1_pm_runtime_suspend, vsp1_pm_runtime_resume, NULL)
};
/* -----------------------------------------------------------------------------
@@ -1042,7 +1042,7 @@ static struct platform_driver vsp1_platform_driver = {
.remove = vsp1_remove,
.driver = {
.name = "vsp1",
- .pm = &vsp1_pm_ops,
+ .pm = pm_ptr(&vsp1_pm_ops),
.of_match_table = vsp1_of_match,
},
};
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_histo.c b/drivers/media/platform/renesas/vsp1/vsp1_histo.c
index c762202877ba..390ea50f1595 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_histo.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_histo.c
@@ -392,7 +392,7 @@ static const struct v4l2_subdev_ops histo_ops = {
static int histo_v4l2_querycap(struct file *file, void *fh,
struct v4l2_capability *cap)
{
- struct v4l2_fh *vfh = file->private_data;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
struct vsp1_histogram *histo = vdev_to_histo(vfh->vdev);
cap->capabilities = V4L2_CAP_DEVICE_CAPS | V4L2_CAP_STREAMING
@@ -409,7 +409,7 @@ static int histo_v4l2_querycap(struct file *file, void *fh,
static int histo_v4l2_enum_format(struct file *file, void *fh,
struct v4l2_fmtdesc *f)
{
- struct v4l2_fh *vfh = file->private_data;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
struct vsp1_histogram *histo = vdev_to_histo(vfh->vdev);
if (f->index > 0 || f->type != histo->queue.type)
@@ -423,7 +423,7 @@ static int histo_v4l2_enum_format(struct file *file, void *fh,
static int histo_v4l2_get_format(struct file *file, void *fh,
struct v4l2_format *format)
{
- struct v4l2_fh *vfh = file->private_data;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
struct vsp1_histogram *histo = vdev_to_histo(vfh->vdev);
struct v4l2_meta_format *meta = &format->fmt.meta;
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_video.c b/drivers/media/platform/renesas/vsp1/vsp1_video.c
index bc66fbdde3cc..75f9a1a85d55 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_video.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_video.c
@@ -896,7 +896,7 @@ static const struct vb2_ops vsp1_video_queue_qops = {
static int
vsp1_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
{
- struct v4l2_fh *vfh = file->private_data;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
struct vsp1_video *video = to_vsp1_video(vfh->vdev);
cap->capabilities = V4L2_CAP_DEVICE_CAPS | V4L2_CAP_STREAMING
@@ -912,7 +912,7 @@ vsp1_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
static int vsp1_video_enum_format(struct file *file, void *fh,
struct v4l2_fmtdesc *f)
{
- struct v4l2_fh *vfh = file->private_data;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
struct vsp1_video *video = to_vsp1_video(vfh->vdev);
const struct vsp1_format_info *info;
@@ -933,7 +933,7 @@ static int vsp1_video_enum_format(struct file *file, void *fh,
static int
vsp1_video_get_format(struct file *file, void *fh, struct v4l2_format *format)
{
- struct v4l2_fh *vfh = file->private_data;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
struct vsp1_video *video = to_vsp1_video(vfh->vdev);
if (format->type != video->queue.type)
@@ -949,7 +949,7 @@ vsp1_video_get_format(struct file *file, void *fh, struct v4l2_format *format)
static int
vsp1_video_try_format(struct file *file, void *fh, struct v4l2_format *format)
{
- struct v4l2_fh *vfh = file->private_data;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
struct vsp1_video *video = to_vsp1_video(vfh->vdev);
if (format->type != video->queue.type)
@@ -961,7 +961,7 @@ vsp1_video_try_format(struct file *file, void *fh, struct v4l2_format *format)
static int
vsp1_video_set_format(struct file *file, void *fh, struct v4l2_format *format)
{
- struct v4l2_fh *vfh = file->private_data;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
struct vsp1_video *video = to_vsp1_video(vfh->vdev);
const struct vsp1_format_info *info;
int ret;
@@ -991,7 +991,7 @@ done:
static int
vsp1_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
{
- struct v4l2_fh *vfh = file->private_data;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
struct vsp1_video *video = to_vsp1_video(vfh->vdev);
struct media_device *mdev = &video->vsp1->media_dev;
struct vsp1_pipeline *pipe;
@@ -1079,13 +1079,11 @@ static int vsp1_video_open(struct file *file)
return -ENOMEM;
v4l2_fh_init(vfh, &video->video);
- v4l2_fh_add(vfh);
-
- file->private_data = vfh;
+ v4l2_fh_add(vfh, file);
ret = vsp1_device_get(video->vsp1);
if (ret < 0) {
- v4l2_fh_del(vfh);
+ v4l2_fh_del(vfh, file);
v4l2_fh_exit(vfh);
kfree(vfh);
}
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_vspx.c b/drivers/media/platform/renesas/vsp1/vsp1_vspx.c
index a754b92232bd..1673479be0ff 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_vspx.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_vspx.c
@@ -286,6 +286,7 @@ void vsp1_isp_free_buffer(struct device *dev,
dma_free_coherent(bus_master, buffer_desc->size, buffer_desc->cpu_addr,
buffer_desc->dma_addr);
}
+EXPORT_SYMBOL_GPL(vsp1_isp_free_buffer);
/**
* vsp1_isp_start_streaming - Start processing VSPX jobs
diff --git a/drivers/media/platform/rockchip/rga/rga.c b/drivers/media/platform/rockchip/rga/rga.c
index 3dccab5fa4a1..776046de979a 100644
--- a/drivers/media/platform/rockchip/rga/rga.c
+++ b/drivers/media/platform/rockchip/rga/rga.c
@@ -395,8 +395,7 @@ static int rga_open(struct file *file)
return ret;
}
v4l2_fh_init(&ctx->fh, video_devdata(file));
- file->private_data = &ctx->fh;
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
rga_setup_ctrls(ctx);
@@ -411,8 +410,7 @@ static int rga_open(struct file *file)
static int rga_release(struct file *file)
{
- struct rga_ctx *ctx =
- container_of(file->private_data, struct rga_ctx, fh);
+ struct rga_ctx *ctx = file_to_rga_ctx(file);
struct rockchip_rga *rga = ctx->rga;
mutex_lock(&rga->mutex);
@@ -420,7 +418,7 @@ static int rga_release(struct file *file)
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
v4l2_ctrl_handler_free(&ctx->ctrl_handler);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
kfree(ctx);
@@ -448,7 +446,7 @@ vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *cap)
return 0;
}
-static int vidioc_enum_fmt(struct file *file, void *prv, struct v4l2_fmtdesc *f)
+static int vidioc_enum_fmt(struct file *file, void *priv, struct v4l2_fmtdesc *f)
{
struct rga_fmt *fmt;
@@ -461,10 +459,10 @@ static int vidioc_enum_fmt(struct file *file, void *prv, struct v4l2_fmtdesc *f)
return 0;
}
-static int vidioc_g_fmt(struct file *file, void *prv, struct v4l2_format *f)
+static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
struct v4l2_pix_format_mplane *pix_fmt = &f->fmt.pix_mp;
- struct rga_ctx *ctx = prv;
+ struct rga_ctx *ctx = file_to_rga_ctx(file);
struct vb2_queue *vq;
struct rga_frame *frm;
@@ -483,7 +481,7 @@ static int vidioc_g_fmt(struct file *file, void *prv, struct v4l2_format *f)
return 0;
}
-static int vidioc_try_fmt(struct file *file, void *prv, struct v4l2_format *f)
+static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
struct v4l2_pix_format_mplane *pix_fmt = &f->fmt.pix_mp;
struct rga_fmt *fmt;
@@ -503,10 +501,10 @@ static int vidioc_try_fmt(struct file *file, void *prv, struct v4l2_format *f)
return 0;
}
-static int vidioc_s_fmt(struct file *file, void *prv, struct v4l2_format *f)
+static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
struct v4l2_pix_format_mplane *pix_fmt = &f->fmt.pix_mp;
- struct rga_ctx *ctx = prv;
+ struct rga_ctx *ctx = file_to_rga_ctx(file);
struct rockchip_rga *rga = ctx->rga;
struct vb2_queue *vq;
struct rga_frame *frm;
@@ -516,7 +514,7 @@ static int vidioc_s_fmt(struct file *file, void *prv, struct v4l2_format *f)
/* Adjust all values accordingly to the hardware capabilities
* and chosen format.
*/
- ret = vidioc_try_fmt(file, prv, f);
+ ret = vidioc_try_fmt(file, priv, f);
if (ret)
return ret;
vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
@@ -560,10 +558,10 @@ static int vidioc_s_fmt(struct file *file, void *prv, struct v4l2_format *f)
return 0;
}
-static int vidioc_g_selection(struct file *file, void *prv,
+static int vidioc_g_selection(struct file *file, void *priv,
struct v4l2_selection *s)
{
- struct rga_ctx *ctx = prv;
+ struct rga_ctx *ctx = file_to_rga_ctx(file);
struct rga_frame *f;
bool use_frame = false;
@@ -608,10 +606,10 @@ static int vidioc_g_selection(struct file *file, void *prv,
return 0;
}
-static int vidioc_s_selection(struct file *file, void *prv,
+static int vidioc_s_selection(struct file *file, void *priv,
struct v4l2_selection *s)
{
- struct rga_ctx *ctx = prv;
+ struct rga_ctx *ctx = file_to_rga_ctx(file);
struct rockchip_rga *rga = ctx->rga;
struct rga_frame *f;
int ret = 0;
diff --git a/drivers/media/platform/rockchip/rga/rga.h b/drivers/media/platform/rockchip/rga/rga.h
index 530e12de73c4..72a28b120fab 100644
--- a/drivers/media/platform/rockchip/rga/rga.h
+++ b/drivers/media/platform/rockchip/rga/rga.h
@@ -68,6 +68,11 @@ struct rga_ctx {
u32 fill_color;
};
+static inline struct rga_ctx *file_to_rga_ctx(struct file *filp)
+{
+ return container_of(file_to_v4l2_fh(filp), struct rga_ctx, fh);
+}
+
struct rockchip_rga {
struct v4l2_device v4l2_dev;
struct v4l2_m2m_dev *m2m_dev;
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h b/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
index 5f187f9efc7b..6028ecdd23de 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
@@ -24,6 +24,7 @@
#include "rkisp1-regs.h"
struct dentry;
+struct dev_pm_domain_list;
struct regmap;
/*
@@ -55,7 +56,7 @@ struct regmap;
#define RKISP1_BUS_INFO "platform:" RKISP1_DRIVER_NAME
/* maximum number of clocks */
-#define RKISP1_MAX_BUS_CLK 8
+#define RKISP1_MAX_BUS_CLK 4
/* a bitmask of the ready stats */
#define RKISP1_STATS_MEAS_MASK (RKISP1_CIF_ISP_AWB_DONE | \
@@ -139,27 +140,31 @@ enum rkisp1_feature {
/*
* struct rkisp1_info - Model-specific ISP Information
*
- * @clks: array of ISP clock names
- * @clk_size: number of entries in the @clks array
+ * @num_clocks: number of clocks
* @isrs: array of ISP interrupt descriptors
* @isr_size: number of entries in the @isrs array
* @isp_ver: ISP version
* @features: bitmask of rkisp1_feature features implemented by the ISP
* @max_width: maximum input frame width
* @max_height: maximum input frame height
+ * @pm_domains.names: name of the power domains
+ * @pm_domains.count: number of power domains
*
* This structure contains information about the ISP specific to a particular
* ISP model, version, or integration in a particular SoC.
*/
struct rkisp1_info {
- const char * const *clks;
- unsigned int clk_size;
+ unsigned int num_clocks;
const struct rkisp1_isr_data *isrs;
unsigned int isr_size;
enum rkisp1_cif_isp_version isp_ver;
unsigned int features;
unsigned int max_width;
unsigned int max_height;
+ struct {
+ const char * const *names;
+ unsigned int count;
+ } pm_domains;
};
/*
@@ -483,6 +488,7 @@ struct rkisp1_debug {
* @dev: a pointer to the struct device
* @clk_size: number of clocks
* @clks: array of clocks
+ * @pm_domains: power domains
* @gasket: the gasket - i.MX8MP only
* @gasket_id: the gasket ID (0 or 1) - i.MX8MP only
* @v4l2_dev: v4l2_device variable
@@ -507,6 +513,7 @@ struct rkisp1_device {
struct device *dev;
unsigned int clk_size;
struct clk_bulk_data clks[RKISP1_MAX_BUS_CLK];
+ struct dev_pm_domain_list *pm_domains;
struct regmap *gasket;
unsigned int gasket_id;
struct v4l2_device v4l2_dev;
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
index dc65a7924f8a..1791c02a40ae 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
@@ -8,6 +8,7 @@
* Copyright (C) 2017 Rockchip Electronics Co., Ltd.
*/
+#include <linux/build_bug.h>
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/mfd/syscon.h>
@@ -16,6 +17,7 @@
#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#include <media/v4l2-fwnode.h>
#include <media/v4l2-mc.h>
@@ -491,13 +493,6 @@ static irqreturn_t rkisp1_isr(int irq, void *ctx)
return ret;
}
-static const char * const px30_isp_clks[] = {
- "isp",
- "aclk",
- "hclk",
- "pclk",
-};
-
static const struct rkisp1_isr_data px30_isp_isrs[] = {
{ "isp", rkisp1_isp_isr, BIT(RKISP1_IRQ_ISP) },
{ "mi", rkisp1_capture_isr, BIT(RKISP1_IRQ_MI) },
@@ -505,8 +500,7 @@ static const struct rkisp1_isr_data px30_isp_isrs[] = {
};
static const struct rkisp1_info px30_isp_info = {
- .clks = px30_isp_clks,
- .clk_size = ARRAY_SIZE(px30_isp_clks),
+ .num_clocks = 4,
.isrs = px30_isp_isrs,
.isr_size = ARRAY_SIZE(px30_isp_isrs),
.isp_ver = RKISP1_V12,
@@ -518,19 +512,12 @@ static const struct rkisp1_info px30_isp_info = {
.max_height = 2448,
};
-static const char * const rk3399_isp_clks[] = {
- "isp",
- "aclk",
- "hclk",
-};
-
static const struct rkisp1_isr_data rk3399_isp_isrs[] = {
{ NULL, rkisp1_isr, BIT(RKISP1_IRQ_ISP) | BIT(RKISP1_IRQ_MI) | BIT(RKISP1_IRQ_MIPI) },
};
static const struct rkisp1_info rk3399_isp_info = {
- .clks = rk3399_isp_clks,
- .clk_size = ARRAY_SIZE(rk3399_isp_clks),
+ .num_clocks = 3,
.isrs = rk3399_isp_isrs,
.isr_size = ARRAY_SIZE(rk3399_isp_isrs),
.isp_ver = RKISP1_V10,
@@ -542,19 +529,17 @@ static const struct rkisp1_info rk3399_isp_info = {
.max_height = 3312,
};
-static const char * const imx8mp_isp_clks[] = {
- "isp",
- "hclk",
- "aclk",
-};
-
static const struct rkisp1_isr_data imx8mp_isp_isrs[] = {
{ NULL, rkisp1_isr, BIT(RKISP1_IRQ_ISP) | BIT(RKISP1_IRQ_MI) },
};
+static const char * const imx8mp_isp_pm_domains[] = {
+ "isp",
+ "csi2",
+};
+
static const struct rkisp1_info imx8mp_isp_info = {
- .clks = imx8mp_isp_clks,
- .clk_size = ARRAY_SIZE(imx8mp_isp_clks),
+ .num_clocks = 3,
.isrs = imx8mp_isp_isrs,
.isr_size = ARRAY_SIZE(imx8mp_isp_isrs),
.isp_ver = RKISP1_V_IMX8MP,
@@ -563,6 +548,10 @@ static const struct rkisp1_info imx8mp_isp_info = {
| RKISP1_FEATURE_COMPAND,
.max_width = 4096,
.max_height = 3072,
+ .pm_domains = {
+ .names = imx8mp_isp_pm_domains,
+ .count = ARRAY_SIZE(imx8mp_isp_pm_domains),
+ },
};
static const struct of_device_id rkisp1_of_match[] = {
@@ -582,6 +571,81 @@ static const struct of_device_id rkisp1_of_match[] = {
};
MODULE_DEVICE_TABLE(of, rkisp1_of_match);
+static const char * const rkisp1_clk_names[] = {
+ "isp",
+ "aclk",
+ "hclk",
+ "pclk",
+};
+
+static int rkisp1_init_clocks(struct rkisp1_device *rkisp1)
+{
+ const struct rkisp1_info *info = rkisp1->info;
+ unsigned int i;
+ int ret;
+
+ static_assert(ARRAY_SIZE(rkisp1_clk_names) == ARRAY_SIZE(rkisp1->clks));
+
+ for (i = 0; i < info->num_clocks; i++)
+ rkisp1->clks[i].id = rkisp1_clk_names[i];
+
+ ret = devm_clk_bulk_get(rkisp1->dev, info->num_clocks, rkisp1->clks);
+ if (ret)
+ return ret;
+
+ rkisp1->clk_size = info->num_clocks;
+
+ /*
+ * On i.MX8MP the pclk clock is needed to access the HDR stitching
+ * registers, but wasn't required by DT bindings. Try to acquire it as
+ * an optional clock to avoid breaking backward compatibility.
+ */
+ if (info->isp_ver == RKISP1_V_IMX8MP) {
+ struct clk *clk;
+
+ clk = devm_clk_get_optional(rkisp1->dev, "pclk");
+ if (IS_ERR(clk))
+ return dev_err_probe(rkisp1->dev, PTR_ERR(clk),
+ "Failed to acquire pclk clock\n");
+
+ if (clk)
+ rkisp1->clks[rkisp1->clk_size++].clk = clk;
+ }
+
+ return 0;
+}
+
+static int rkisp1_init_pm_domains(struct rkisp1_device *rkisp1)
+{
+ const struct rkisp1_info *info = rkisp1->info;
+ struct dev_pm_domain_attach_data pm_domain_data = {
+ .pd_names = info->pm_domains.names,
+ .num_pd_names = info->pm_domains.count,
+ };
+ int ret;
+
+ /*
+ * Most platforms have a single power domain, which the PM domain core
+ * automatically attaches at probe time. When that's the case there's
+ * nothing to do here.
+ */
+ if (rkisp1->dev->pm_domain)
+ return 0;
+
+ if (!pm_domain_data.num_pd_names)
+ return 0;
+
+ ret = devm_pm_domain_attach_list(rkisp1->dev, &pm_domain_data,
+ &rkisp1->pm_domains);
+ if (ret < 0) {
+ dev_err_probe(rkisp1->dev, ret,
+ "Failed to attach power domains\n");
+ return ret;
+ }
+
+ return 0;
+}
+
static int rkisp1_probe(struct platform_device *pdev)
{
const struct rkisp1_info *info;
@@ -639,12 +703,13 @@ static int rkisp1_probe(struct platform_device *pdev)
}
}
- for (i = 0; i < info->clk_size; i++)
- rkisp1->clks[i].id = info->clks[i];
- ret = devm_clk_bulk_get(dev, info->clk_size, rkisp1->clks);
+ ret = rkisp1_init_clocks(rkisp1);
+ if (ret)
+ return ret;
+
+ ret = rkisp1_init_pm_domains(rkisp1);
if (ret)
return ret;
- rkisp1->clk_size = info->clk_size;
if (info->isp_ver == RKISP1_V_IMX8MP) {
unsigned int id;
diff --git a/drivers/media/platform/rockchip/rkvdec/rkvdec.c b/drivers/media/platform/rockchip/rkvdec/rkvdec.c
index d707088ec0dc..6e606d73ff51 100644
--- a/drivers/media/platform/rockchip/rkvdec/rkvdec.c
+++ b/drivers/media/platform/rockchip/rkvdec/rkvdec.c
@@ -354,7 +354,7 @@ static int rkvdec_try_capture_fmt(struct file *file, void *priv,
struct v4l2_format *f)
{
struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
- struct rkvdec_ctx *ctx = fh_to_rkvdec_ctx(priv);
+ struct rkvdec_ctx *ctx = file_to_rkvdec_ctx(file);
const struct rkvdec_coded_fmt_desc *coded_desc;
/*
@@ -387,7 +387,7 @@ static int rkvdec_try_output_fmt(struct file *file, void *priv,
struct v4l2_format *f)
{
struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
- struct rkvdec_ctx *ctx = fh_to_rkvdec_ctx(priv);
+ struct rkvdec_ctx *ctx = file_to_rkvdec_ctx(file);
const struct rkvdec_coded_fmt_desc *desc;
desc = rkvdec_find_coded_fmt_desc(pix_mp->pixelformat);
@@ -418,7 +418,7 @@ static int rkvdec_try_output_fmt(struct file *file, void *priv,
static int rkvdec_s_capture_fmt(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct rkvdec_ctx *ctx = fh_to_rkvdec_ctx(priv);
+ struct rkvdec_ctx *ctx = file_to_rkvdec_ctx(file);
struct vb2_queue *vq;
int ret;
@@ -439,7 +439,7 @@ static int rkvdec_s_capture_fmt(struct file *file, void *priv,
static int rkvdec_s_output_fmt(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct rkvdec_ctx *ctx = fh_to_rkvdec_ctx(priv);
+ struct rkvdec_ctx *ctx = file_to_rkvdec_ctx(file);
struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx;
const struct rkvdec_coded_fmt_desc *desc;
struct v4l2_format *cap_fmt;
@@ -504,7 +504,7 @@ static int rkvdec_s_output_fmt(struct file *file, void *priv,
static int rkvdec_g_output_fmt(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct rkvdec_ctx *ctx = fh_to_rkvdec_ctx(priv);
+ struct rkvdec_ctx *ctx = file_to_rkvdec_ctx(file);
*f = ctx->coded_fmt;
return 0;
@@ -513,7 +513,7 @@ static int rkvdec_g_output_fmt(struct file *file, void *priv,
static int rkvdec_g_capture_fmt(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct rkvdec_ctx *ctx = fh_to_rkvdec_ctx(priv);
+ struct rkvdec_ctx *ctx = file_to_rkvdec_ctx(file);
*f = ctx->decoded_fmt;
return 0;
@@ -532,7 +532,7 @@ static int rkvdec_enum_output_fmt(struct file *file, void *priv,
static int rkvdec_enum_capture_fmt(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- struct rkvdec_ctx *ctx = fh_to_rkvdec_ctx(priv);
+ struct rkvdec_ctx *ctx = file_to_rkvdec_ctx(file);
u32 fourcc;
fourcc = rkvdec_enum_decoded_fmt(ctx, f->index, ctx->image_fmt);
@@ -765,7 +765,6 @@ static void rkvdec_job_finish(struct rkvdec_ctx *ctx,
{
struct rkvdec_dev *rkvdec = ctx->dev;
- pm_runtime_mark_last_busy(rkvdec->dev);
pm_runtime_put_autosuspend(rkvdec->dev);
rkvdec_job_finish_no_pm(ctx, result);
}
@@ -938,8 +937,7 @@ static int rkvdec_open(struct file *filp)
if (ret)
goto err_cleanup_m2m_ctx;
- filp->private_data = &ctx->fh;
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, filp);
return 0;
@@ -953,9 +951,9 @@ err_free_ctx:
static int rkvdec_release(struct file *filp)
{
- struct rkvdec_ctx *ctx = fh_to_rkvdec_ctx(filp->private_data);
+ struct rkvdec_ctx *ctx = file_to_rkvdec_ctx(filp);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, filp);
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
v4l2_fh_exit(&ctx->fh);
@@ -1159,13 +1157,6 @@ static int rkvdec_probe(struct platform_device *pdev)
return ret;
}
- if (iommu_get_domain_for_dev(&pdev->dev)) {
- rkvdec->empty_domain = iommu_paging_domain_alloc(rkvdec->dev);
-
- if (!rkvdec->empty_domain)
- dev_warn(rkvdec->dev, "cannot alloc new empty domain\n");
- }
-
vb2_dma_contig_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32));
irq = platform_get_irq(pdev, 0);
@@ -1188,6 +1179,15 @@ static int rkvdec_probe(struct platform_device *pdev)
if (ret)
goto err_disable_runtime_pm;
+ if (iommu_get_domain_for_dev(&pdev->dev)) {
+ rkvdec->empty_domain = iommu_paging_domain_alloc(rkvdec->dev);
+
+ if (IS_ERR(rkvdec->empty_domain)) {
+ rkvdec->empty_domain = NULL;
+ dev_warn(rkvdec->dev, "cannot alloc new empty domain\n");
+ }
+ }
+
return 0;
err_disable_runtime_pm:
diff --git a/drivers/media/platform/rockchip/rkvdec/rkvdec.h b/drivers/media/platform/rockchip/rkvdec/rkvdec.h
index f6e8bf38add3..481aaa4bffe9 100644
--- a/drivers/media/platform/rockchip/rkvdec/rkvdec.h
+++ b/drivers/media/platform/rockchip/rkvdec/rkvdec.h
@@ -124,9 +124,9 @@ struct rkvdec_ctx {
void *priv;
};
-static inline struct rkvdec_ctx *fh_to_rkvdec_ctx(struct v4l2_fh *fh)
+static inline struct rkvdec_ctx *file_to_rkvdec_ctx(struct file *filp)
{
- return container_of(fh, struct rkvdec_ctx, fh);
+ return container_of(file_to_v4l2_fh(filp), struct rkvdec_ctx, fh);
}
struct rkvdec_aux_buf {
diff --git a/drivers/media/platform/samsung/exynos-gsc/gsc-core.h b/drivers/media/platform/samsung/exynos-gsc/gsc-core.h
index b9777e07fb6d..265221abf4dc 100644
--- a/drivers/media/platform/samsung/exynos-gsc/gsc-core.h
+++ b/drivers/media/platform/samsung/exynos-gsc/gsc-core.h
@@ -85,7 +85,6 @@ enum gsc_yuv_fmt {
GSC_CRCB,
};
-#define fh_to_ctx(__fh) container_of(__fh, struct gsc_ctx, fh)
#define is_rgb(x) (!!((x) & 0x1))
#define is_yuv420(x) (!!((x) & 0x2))
#define is_yuv422(x) (!!((x) & 0x4))
@@ -381,6 +380,11 @@ struct gsc_ctx {
enum v4l2_colorspace out_colorspace;
};
+static inline struct gsc_ctx *file_to_ctx(struct file *filp)
+{
+ return container_of(file_to_v4l2_fh(filp), struct gsc_ctx, fh);
+}
+
void gsc_set_prefbuf(struct gsc_dev *gsc, struct gsc_frame *frm);
int gsc_register_m2m_device(struct gsc_dev *gsc);
void gsc_unregister_m2m_device(struct gsc_dev *gsc);
diff --git a/drivers/media/platform/samsung/exynos-gsc/gsc-m2m.c b/drivers/media/platform/samsung/exynos-gsc/gsc-m2m.c
index 4bda1c369c44..722e2531e23f 100644
--- a/drivers/media/platform/samsung/exynos-gsc/gsc-m2m.c
+++ b/drivers/media/platform/samsung/exynos-gsc/gsc-m2m.c
@@ -297,7 +297,7 @@ static int gsc_m2m_enum_fmt(struct file *file, void *priv,
static int gsc_m2m_g_fmt_mplane(struct file *file, void *fh,
struct v4l2_format *f)
{
- struct gsc_ctx *ctx = fh_to_ctx(fh);
+ struct gsc_ctx *ctx = file_to_ctx(file);
return gsc_g_fmt_mplane(ctx, f);
}
@@ -305,7 +305,7 @@ static int gsc_m2m_g_fmt_mplane(struct file *file, void *fh,
static int gsc_m2m_try_fmt_mplane(struct file *file, void *fh,
struct v4l2_format *f)
{
- struct gsc_ctx *ctx = fh_to_ctx(fh);
+ struct gsc_ctx *ctx = file_to_ctx(file);
return gsc_try_fmt_mplane(ctx, f);
}
@@ -313,7 +313,7 @@ static int gsc_m2m_try_fmt_mplane(struct file *file, void *fh,
static int gsc_m2m_s_fmt_mplane(struct file *file, void *fh,
struct v4l2_format *f)
{
- struct gsc_ctx *ctx = fh_to_ctx(fh);
+ struct gsc_ctx *ctx = file_to_ctx(file);
struct vb2_queue *vq;
struct gsc_frame *frame;
struct v4l2_pix_format_mplane *pix;
@@ -359,7 +359,7 @@ static int gsc_m2m_s_fmt_mplane(struct file *file, void *fh,
static int gsc_m2m_reqbufs(struct file *file, void *fh,
struct v4l2_requestbuffers *reqbufs)
{
- struct gsc_ctx *ctx = fh_to_ctx(fh);
+ struct gsc_ctx *ctx = file_to_ctx(file);
struct gsc_dev *gsc = ctx->gsc_dev;
u32 max_cnt;
@@ -374,35 +374,35 @@ static int gsc_m2m_reqbufs(struct file *file, void *fh,
static int gsc_m2m_expbuf(struct file *file, void *fh,
struct v4l2_exportbuffer *eb)
{
- struct gsc_ctx *ctx = fh_to_ctx(fh);
+ struct gsc_ctx *ctx = file_to_ctx(file);
return v4l2_m2m_expbuf(file, ctx->m2m_ctx, eb);
}
static int gsc_m2m_querybuf(struct file *file, void *fh,
struct v4l2_buffer *buf)
{
- struct gsc_ctx *ctx = fh_to_ctx(fh);
+ struct gsc_ctx *ctx = file_to_ctx(file);
return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
}
static int gsc_m2m_qbuf(struct file *file, void *fh,
struct v4l2_buffer *buf)
{
- struct gsc_ctx *ctx = fh_to_ctx(fh);
+ struct gsc_ctx *ctx = file_to_ctx(file);
return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
}
static int gsc_m2m_dqbuf(struct file *file, void *fh,
struct v4l2_buffer *buf)
{
- struct gsc_ctx *ctx = fh_to_ctx(fh);
+ struct gsc_ctx *ctx = file_to_ctx(file);
return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
}
static int gsc_m2m_streamon(struct file *file, void *fh,
enum v4l2_buf_type type)
{
- struct gsc_ctx *ctx = fh_to_ctx(fh);
+ struct gsc_ctx *ctx = file_to_ctx(file);
/* The source and target color format need to be set */
if (V4L2_TYPE_IS_OUTPUT(type)) {
@@ -418,7 +418,7 @@ static int gsc_m2m_streamon(struct file *file, void *fh,
static int gsc_m2m_streamoff(struct file *file, void *fh,
enum v4l2_buf_type type)
{
- struct gsc_ctx *ctx = fh_to_ctx(fh);
+ struct gsc_ctx *ctx = file_to_ctx(file);
return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
}
@@ -440,8 +440,8 @@ static int is_rectangle_enclosed(struct v4l2_rect *a, struct v4l2_rect *b)
static int gsc_m2m_g_selection(struct file *file, void *fh,
struct v4l2_selection *s)
{
+ struct gsc_ctx *ctx = file_to_ctx(file);
struct gsc_frame *frame;
- struct gsc_ctx *ctx = fh_to_ctx(fh);
if ((s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
(s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT))
@@ -478,7 +478,7 @@ static int gsc_m2m_s_selection(struct file *file, void *fh,
struct v4l2_selection *s)
{
struct gsc_frame *frame;
- struct gsc_ctx *ctx = fh_to_ctx(fh);
+ struct gsc_ctx *ctx = file_to_ctx(file);
struct gsc_variant *variant = ctx->gsc_dev->variant;
struct v4l2_selection sel = *s;
int ret;
@@ -625,8 +625,7 @@ static int gsc_m2m_open(struct file *file)
/* Use separate control handler per file handle */
ctx->fh.ctrl_handler = &ctx->ctrl_handler;
- file->private_data = &ctx->fh;
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
ctx->gsc_dev = gsc;
/* Default color format */
@@ -655,7 +654,7 @@ static int gsc_m2m_open(struct file *file)
error_ctrls:
gsc_ctrls_delete(ctx);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
error_fh:
v4l2_fh_exit(&ctx->fh);
kfree(ctx);
@@ -666,7 +665,7 @@ unlock:
static int gsc_m2m_release(struct file *file)
{
- struct gsc_ctx *ctx = fh_to_ctx(file->private_data);
+ struct gsc_ctx *ctx = file_to_ctx(file);
struct gsc_dev *gsc = ctx->gsc_dev;
pr_debug("pid: %d, state: 0x%lx, refcnt= %d",
@@ -676,7 +675,7 @@ static int gsc_m2m_release(struct file *file)
v4l2_m2m_ctx_release(ctx->m2m_ctx);
gsc_ctrls_delete(ctx);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
if (--gsc->m2m.refcnt <= 0)
@@ -690,7 +689,7 @@ static int gsc_m2m_release(struct file *file)
static __poll_t gsc_m2m_poll(struct file *file,
struct poll_table_struct *wait)
{
- struct gsc_ctx *ctx = fh_to_ctx(file->private_data);
+ struct gsc_ctx *ctx = file_to_ctx(file);
struct gsc_dev *gsc = ctx->gsc_dev;
__poll_t ret;
@@ -705,7 +704,7 @@ static __poll_t gsc_m2m_poll(struct file *file,
static int gsc_m2m_mmap(struct file *file, struct vm_area_struct *vma)
{
- struct gsc_ctx *ctx = fh_to_ctx(file->private_data);
+ struct gsc_ctx *ctx = file_to_ctx(file);
struct gsc_dev *gsc = ctx->gsc_dev;
int ret;
diff --git a/drivers/media/platform/samsung/exynos4-is/fimc-core.h b/drivers/media/platform/samsung/exynos4-is/fimc-core.h
index 63385152a2ff..c23cbdee7afc 100644
--- a/drivers/media/platform/samsung/exynos4-is/fimc-core.h
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-core.h
@@ -496,7 +496,10 @@ struct fimc_ctx {
struct fimc_ctrls ctrls;
};
-#define fh_to_ctx(__fh) container_of(__fh, struct fimc_ctx, fh)
+static inline struct fimc_ctx *file_to_ctx(struct file *filp)
+{
+ return container_of(file_to_v4l2_fh(filp), struct fimc_ctx, fh);
+}
static inline void set_frame_bounds(struct fimc_frame *f, u32 width, u32 height)
{
diff --git a/drivers/media/platform/samsung/exynos4-is/fimc-m2m.c b/drivers/media/platform/samsung/exynos4-is/fimc-m2m.c
index 951433c8e92a..562c57f186c6 100644
--- a/drivers/media/platform/samsung/exynos4-is/fimc-m2m.c
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-m2m.c
@@ -249,7 +249,7 @@ static int fimc_m2m_enum_fmt(struct file *file, void *priv,
static int fimc_m2m_g_fmt_mplane(struct file *file, void *fh,
struct v4l2_format *f)
{
- struct fimc_ctx *ctx = fh_to_ctx(fh);
+ struct fimc_ctx *ctx = file_to_ctx(file);
const struct fimc_frame *frame = ctx_get_frame(ctx, f->type);
if (IS_ERR(frame))
@@ -308,7 +308,7 @@ static int fimc_try_fmt_mplane(struct fimc_ctx *ctx, struct v4l2_format *f)
static int fimc_m2m_try_fmt_mplane(struct file *file, void *fh,
struct v4l2_format *f)
{
- struct fimc_ctx *ctx = fh_to_ctx(fh);
+ struct fimc_ctx *ctx = file_to_ctx(file);
return fimc_try_fmt_mplane(ctx, f);
}
@@ -337,7 +337,7 @@ static void __set_frame_format(struct fimc_frame *frame,
static int fimc_m2m_s_fmt_mplane(struct file *file, void *fh,
struct v4l2_format *f)
{
- struct fimc_ctx *ctx = fh_to_ctx(fh);
+ struct fimc_ctx *ctx = file_to_ctx(file);
struct fimc_dev *fimc = ctx->fimc_dev;
const struct fimc_fmt *fmt;
struct vb2_queue *vq;
@@ -376,7 +376,7 @@ static int fimc_m2m_s_fmt_mplane(struct file *file, void *fh,
static int fimc_m2m_g_selection(struct file *file, void *fh,
struct v4l2_selection *s)
{
- struct fimc_ctx *ctx = fh_to_ctx(fh);
+ struct fimc_ctx *ctx = file_to_ctx(file);
const struct fimc_frame *frame;
frame = ctx_get_frame(ctx, s->type);
@@ -484,7 +484,7 @@ static int fimc_m2m_try_selection(struct fimc_ctx *ctx,
static int fimc_m2m_s_selection(struct file *file, void *fh,
struct v4l2_selection *s)
{
- struct fimc_ctx *ctx = fh_to_ctx(fh);
+ struct fimc_ctx *ctx = file_to_ctx(file);
struct fimc_dev *fimc = ctx->fimc_dev;
struct fimc_frame *f;
int ret;
@@ -634,8 +634,7 @@ static int fimc_m2m_open(struct file *file)
/* Use separate control handler per file handle */
ctx->fh.ctrl_handler = &ctx->ctrls.handler;
- file->private_data = &ctx->fh;
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
/* Setup the device context for memory-to-memory mode */
ctx->state = FIMC_CTX_M2M;
@@ -664,7 +663,7 @@ error_m2m_ctx:
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
error_c:
fimc_ctrls_delete(ctx);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
error_fh:
v4l2_fh_exit(&ctx->fh);
kfree(ctx);
@@ -675,7 +674,7 @@ unlock:
static int fimc_m2m_release(struct file *file)
{
- struct fimc_ctx *ctx = fh_to_ctx(file->private_data);
+ struct fimc_ctx *ctx = file_to_ctx(file);
struct fimc_dev *fimc = ctx->fimc_dev;
dbg("pid: %d, state: 0x%lx, refcnt= %d",
@@ -685,7 +684,7 @@ static int fimc_m2m_release(struct file *file)
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
fimc_ctrls_delete(ctx);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
if (--fimc->m2m.refcnt <= 0)
diff --git a/drivers/media/platform/samsung/s3c-camif/camif-capture.c b/drivers/media/platform/samsung/s3c-camif/camif-capture.c
index 3e566b65f417..ed1a1d693293 100644
--- a/drivers/media/platform/samsung/s3c-camif/camif-capture.c
+++ b/drivers/media/platform/samsung/s3c-camif/camif-capture.c
@@ -572,7 +572,7 @@ static int s3c_camif_close(struct file *file)
mutex_lock(&camif->lock);
- if (vp->owner == file->private_data) {
+ if (vp->owner == file_to_v4l2_fh(file)) {
camif_stop_capture(vp);
vb2_queue_release(&vp->vb_queue);
vp->owner = NULL;
@@ -595,7 +595,7 @@ static __poll_t s3c_camif_poll(struct file *file,
__poll_t ret;
mutex_lock(&camif->lock);
- if (vp->owner && vp->owner != file->private_data)
+ if (vp->owner && vp->owner != file_to_v4l2_fh(file))
ret = EPOLLERR;
else
ret = vb2_poll(&vp->vb_queue, file, wait);
@@ -609,7 +609,7 @@ static int s3c_camif_mmap(struct file *file, struct vm_area_struct *vma)
struct camif_vp *vp = video_drvdata(file);
int ret;
- if (vp->owner && vp->owner != file->private_data)
+ if (vp->owner && vp->owner != file_to_v4l2_fh(file))
ret = -EBUSY;
else
ret = vb2_mmap(&vp->vb_queue, vma);
@@ -791,7 +791,7 @@ static int s3c_camif_vidioc_s_fmt(struct file *file, void *priv,
out_frame->rect.top = 0;
if (vp->owner == NULL)
- vp->owner = priv;
+ vp->owner = file_to_v4l2_fh(file);
pr_debug("%ux%u. payload: %u. fmt: 0x%08x. %d %d. sizeimage: %d. bpl: %d\n",
out_frame->f_width, out_frame->f_height, vp->payload,
@@ -841,7 +841,7 @@ static int s3c_camif_streamon(struct file *file, void *priv,
if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
- if (vp->owner && vp->owner != priv)
+ if (vp->owner && vp->owner != file_to_v4l2_fh(file))
return -EBUSY;
if (s3c_vp_active(vp))
@@ -872,7 +872,7 @@ static int s3c_camif_streamoff(struct file *file, void *priv,
if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
- if (vp->owner && vp->owner != priv)
+ if (vp->owner && vp->owner != file_to_v4l2_fh(file))
return -EBUSY;
ret = vb2_streamoff(&vp->vb_queue, type);
@@ -888,9 +888,9 @@ static int s3c_camif_reqbufs(struct file *file, void *priv,
int ret;
pr_debug("[vp%d] rb count: %d, owner: %p, priv: %p\n",
- vp->id, rb->count, vp->owner, priv);
+ vp->id, rb->count, vp->owner, file_to_v4l2_fh(file));
- if (vp->owner && vp->owner != priv)
+ if (vp->owner && vp->owner != file_to_v4l2_fh(file))
return -EBUSY;
if (rb->count)
@@ -910,7 +910,7 @@ static int s3c_camif_reqbufs(struct file *file, void *priv,
vp->reqbufs_count = rb->count;
if (vp->owner == NULL && rb->count > 0)
- vp->owner = priv;
+ vp->owner = file_to_v4l2_fh(file);
return ret;
}
@@ -929,7 +929,7 @@ static int s3c_camif_qbuf(struct file *file, void *priv,
pr_debug("[vp%d]\n", vp->id);
- if (vp->owner && vp->owner != priv)
+ if (vp->owner && vp->owner != file_to_v4l2_fh(file))
return -EBUSY;
return vb2_qbuf(&vp->vb_queue, vp->vdev.v4l2_dev->mdev, buf);
@@ -942,7 +942,7 @@ static int s3c_camif_dqbuf(struct file *file, void *priv,
pr_debug("[vp%d] sequence: %d\n", vp->id, vp->frame_sequence);
- if (vp->owner && vp->owner != priv)
+ if (vp->owner && vp->owner != file_to_v4l2_fh(file))
return -EBUSY;
return vb2_dqbuf(&vp->vb_queue, buf, file->f_flags & O_NONBLOCK);
@@ -954,14 +954,14 @@ static int s3c_camif_create_bufs(struct file *file, void *priv,
struct camif_vp *vp = video_drvdata(file);
int ret;
- if (vp->owner && vp->owner != priv)
+ if (vp->owner && vp->owner != file_to_v4l2_fh(file))
return -EBUSY;
create->count = max_t(u32, 1, create->count);
ret = vb2_create_bufs(&vp->vb_queue, create);
if (!ret && vp->owner == NULL)
- vp->owner = priv;
+ vp->owner = file_to_v4l2_fh(file);
return ret;
}
diff --git a/drivers/media/platform/samsung/s5p-g2d/g2d.c b/drivers/media/platform/samsung/s5p-g2d/g2d.c
index ffed16a34493..ffb9bee6cb9d 100644
--- a/drivers/media/platform/samsung/s5p-g2d/g2d.c
+++ b/drivers/media/platform/samsung/s5p-g2d/g2d.c
@@ -25,7 +25,10 @@
#include "g2d.h"
#include "g2d-regs.h"
-#define fh2ctx(__fh) container_of(__fh, struct g2d_ctx, fh)
+static inline struct g2d_ctx *file2ctx(struct file *filp)
+{
+ return container_of(file_to_v4l2_fh(filp), struct g2d_ctx, fh);
+}
static struct g2d_fmt formats[] = {
{
@@ -254,8 +257,7 @@ static int g2d_open(struct file *file)
return ret;
}
v4l2_fh_init(&ctx->fh, video_devdata(file));
- file->private_data = &ctx->fh;
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
g2d_setup_ctrls(ctx);
@@ -272,13 +274,13 @@ static int g2d_open(struct file *file)
static int g2d_release(struct file *file)
{
struct g2d_dev *dev = video_drvdata(file);
- struct g2d_ctx *ctx = fh2ctx(file->private_data);
+ struct g2d_ctx *ctx = file2ctx(file);
mutex_lock(&dev->mutex);
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
mutex_unlock(&dev->mutex);
v4l2_ctrl_handler_free(&ctx->ctrl_handler);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
kfree(ctx);
v4l2_info(&dev->v4l2_dev, "instance closed\n");
@@ -295,7 +297,7 @@ static int vidioc_querycap(struct file *file, void *priv,
return 0;
}
-static int vidioc_enum_fmt(struct file *file, void *prv, struct v4l2_fmtdesc *f)
+static int vidioc_enum_fmt(struct file *file, void *priv, struct v4l2_fmtdesc *f)
{
if (f->index >= NUM_FORMATS)
return -EINVAL;
@@ -303,9 +305,9 @@ static int vidioc_enum_fmt(struct file *file, void *prv, struct v4l2_fmtdesc *f)
return 0;
}
-static int vidioc_g_fmt(struct file *file, void *prv, struct v4l2_format *f)
+static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
- struct g2d_ctx *ctx = prv;
+ struct g2d_ctx *ctx = file2ctx(file);
struct vb2_queue *vq;
struct g2d_frame *frm;
@@ -325,7 +327,7 @@ static int vidioc_g_fmt(struct file *file, void *prv, struct v4l2_format *f)
return 0;
}
-static int vidioc_try_fmt(struct file *file, void *prv, struct v4l2_format *f)
+static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
struct g2d_fmt *fmt;
enum v4l2_field *field;
@@ -355,9 +357,9 @@ static int vidioc_try_fmt(struct file *file, void *prv, struct v4l2_format *f)
return 0;
}
-static int vidioc_s_fmt(struct file *file, void *prv, struct v4l2_format *f)
+static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
- struct g2d_ctx *ctx = prv;
+ struct g2d_ctx *ctx = file2ctx(file);
struct g2d_dev *dev = ctx->dev;
struct vb2_queue *vq;
struct g2d_frame *frm;
@@ -366,7 +368,7 @@ static int vidioc_s_fmt(struct file *file, void *prv, struct v4l2_format *f)
/* Adjust all values accordingly to the hardware capabilities
* and chosen format. */
- ret = vidioc_try_fmt(file, prv, f);
+ ret = vidioc_try_fmt(file, priv, f);
if (ret)
return ret;
vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
@@ -395,10 +397,10 @@ static int vidioc_s_fmt(struct file *file, void *prv, struct v4l2_format *f)
return 0;
}
-static int vidioc_g_selection(struct file *file, void *prv,
+static int vidioc_g_selection(struct file *file, void *priv,
struct v4l2_selection *s)
{
- struct g2d_ctx *ctx = prv;
+ struct g2d_ctx *ctx = file2ctx(file);
struct g2d_frame *f;
f = get_frame(ctx, s->type);
@@ -445,10 +447,10 @@ static int vidioc_g_selection(struct file *file, void *prv,
return 0;
}
-static int vidioc_try_selection(struct file *file, void *prv,
+static int vidioc_try_selection(struct file *file, void *priv,
const struct v4l2_selection *s)
{
- struct g2d_ctx *ctx = prv;
+ struct g2d_ctx *ctx = file2ctx(file);
struct g2d_dev *dev = ctx->dev;
struct g2d_frame *f;
@@ -473,14 +475,14 @@ static int vidioc_try_selection(struct file *file, void *prv,
return 0;
}
-static int vidioc_s_selection(struct file *file, void *prv,
+static int vidioc_s_selection(struct file *file, void *priv,
struct v4l2_selection *s)
{
- struct g2d_ctx *ctx = prv;
+ struct g2d_ctx *ctx = file2ctx(file);
struct g2d_frame *f;
int ret;
- ret = vidioc_try_selection(file, prv, s);
+ ret = vidioc_try_selection(file, priv, s);
if (ret)
return ret;
f = get_frame(ctx, s->type);
diff --git a/drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c b/drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c
index ac4cf269456a..81792f7f8b16 100644
--- a/drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c
@@ -580,9 +580,9 @@ static inline struct s5p_jpeg_ctx *ctrl_to_ctx(struct v4l2_ctrl *c)
return container_of(c->handler, struct s5p_jpeg_ctx, ctrl_handler);
}
-static inline struct s5p_jpeg_ctx *fh_to_ctx(struct v4l2_fh *fh)
+static inline struct s5p_jpeg_ctx *file_to_ctx(struct file *filp)
{
- return container_of(fh, struct s5p_jpeg_ctx, fh);
+ return container_of(file_to_v4l2_fh(filp), struct s5p_jpeg_ctx, fh);
}
static int s5p_jpeg_to_user_subsampling(struct s5p_jpeg_ctx *ctx)
@@ -965,8 +965,7 @@ static int s5p_jpeg_open(struct file *file)
v4l2_fh_init(&ctx->fh, vfd);
/* Use separate control handler per file handle */
ctx->fh.ctrl_handler = &ctx->ctrl_handler;
- file->private_data = &ctx->fh;
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
ctx->jpeg = jpeg;
if (vfd == jpeg->vfd_encoder) {
@@ -1001,7 +1000,7 @@ static int s5p_jpeg_open(struct file *file)
return 0;
error:
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
mutex_unlock(&jpeg->lock);
free:
@@ -1011,13 +1010,13 @@ free:
static int s5p_jpeg_release(struct file *file)
{
+ struct s5p_jpeg_ctx *ctx = file_to_ctx(file);
struct s5p_jpeg *jpeg = video_drvdata(file);
- struct s5p_jpeg_ctx *ctx = fh_to_ctx(file->private_data);
mutex_lock(&jpeg->lock);
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
v4l2_ctrl_handler_free(&ctx->ctrl_handler);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
kfree(ctx);
mutex_unlock(&jpeg->lock);
@@ -1249,7 +1248,7 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result,
static int s5p_jpeg_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
+ struct s5p_jpeg_ctx *ctx = file_to_ctx(file);
if (ctx->mode == S5P_JPEG_ENCODE) {
strscpy(cap->driver, S5P_JPEG_M2M_NAME,
@@ -1297,7 +1296,7 @@ static int enum_fmt(struct s5p_jpeg_ctx *ctx,
static int s5p_jpeg_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
+ struct s5p_jpeg_ctx *ctx = file_to_ctx(file);
if (ctx->mode == S5P_JPEG_ENCODE)
return enum_fmt(ctx, sjpeg_formats, SJPEG_NUM_FORMATS, f,
@@ -1310,7 +1309,7 @@ static int s5p_jpeg_enum_fmt_vid_cap(struct file *file, void *priv,
static int s5p_jpeg_enum_fmt_vid_out(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
+ struct s5p_jpeg_ctx *ctx = file_to_ctx(file);
if (ctx->mode == S5P_JPEG_ENCODE)
return enum_fmt(ctx, sjpeg_formats, SJPEG_NUM_FORMATS, f,
@@ -1336,7 +1335,7 @@ static int s5p_jpeg_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
struct vb2_queue *vq;
struct s5p_jpeg_q_data *q_data = NULL;
struct v4l2_pix_format *pix = &f->fmt.pix;
- struct s5p_jpeg_ctx *ct = fh_to_ctx(priv);
+ struct s5p_jpeg_ctx *ct = file_to_ctx(file);
vq = v4l2_m2m_get_vq(ct->fh.m2m_ctx, f->type);
if (!vq)
@@ -1476,7 +1475,7 @@ static int vidioc_try_fmt(struct v4l2_format *f, struct s5p_jpeg_fmt *fmt,
static int s5p_jpeg_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
+ struct s5p_jpeg_ctx *ctx = file_to_ctx(file);
struct v4l2_pix_format *pix = &f->fmt.pix;
struct s5p_jpeg_fmt *fmt;
int ret;
@@ -1535,7 +1534,7 @@ exit:
static int s5p_jpeg_try_fmt_vid_out(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
+ struct s5p_jpeg_ctx *ctx = file_to_ctx(file);
struct s5p_jpeg_fmt *fmt;
fmt = s5p_jpeg_find_format(ctx, f->fmt.pix.pixelformat,
@@ -1682,7 +1681,7 @@ static int s5p_jpeg_s_fmt_vid_cap(struct file *file, void *priv,
if (ret)
return ret;
- return s5p_jpeg_s_fmt(fh_to_ctx(priv), f);
+ return s5p_jpeg_s_fmt(file_to_ctx(file), f);
}
static int s5p_jpeg_s_fmt_vid_out(struct file *file, void *priv,
@@ -1694,7 +1693,7 @@ static int s5p_jpeg_s_fmt_vid_out(struct file *file, void *priv,
if (ret)
return ret;
- return s5p_jpeg_s_fmt(fh_to_ctx(priv), f);
+ return s5p_jpeg_s_fmt(file_to_ctx(file), f);
}
static int s5p_jpeg_subscribe_event(struct v4l2_fh *fh,
@@ -1791,7 +1790,7 @@ static int exynos3250_jpeg_try_crop(struct s5p_jpeg_ctx *ctx,
static int s5p_jpeg_g_selection(struct file *file, void *priv,
struct v4l2_selection *s)
{
- struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
+ struct s5p_jpeg_ctx *ctx = file_to_ctx(file);
if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
@@ -1828,7 +1827,7 @@ static int s5p_jpeg_g_selection(struct file *file, void *priv,
static int s5p_jpeg_s_selection(struct file *file, void *fh,
struct v4l2_selection *s)
{
- struct s5p_jpeg_ctx *ctx = fh_to_ctx(file->private_data);
+ struct s5p_jpeg_ctx *ctx = file_to_ctx(file);
struct v4l2_rect *rect = &s->r;
int ret = -EINVAL;
diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
index 73fdcd362265..4948d734eb02 100644
--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
@@ -801,8 +801,7 @@ static int s5p_mfc_open(struct file *file)
}
init_waitqueue_head(&ctx->queue);
v4l2_fh_init(&ctx->fh, vdev);
- file->private_data = &ctx->fh;
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
ctx->dev = dev;
INIT_LIST_HEAD(&ctx->src_queue);
INIT_LIST_HEAD(&ctx->dst_queue);
@@ -877,7 +876,7 @@ static int s5p_mfc_open(struct file *file)
/* Init videobuf2 queue for CAPTURE */
q = &ctx->vq_dst;
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
- q->drv_priv = &ctx->fh;
+ q->drv_priv = ctx;
q->lock = &dev->mfc_mutex;
if (vdev == dev->vfd_dec) {
q->io_modes = VB2_MMAP;
@@ -904,7 +903,7 @@ static int s5p_mfc_open(struct file *file)
/* Init videobuf2 queue for OUTPUT */
q = &ctx->vq_src;
q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
- q->drv_priv = &ctx->fh;
+ q->drv_priv = ctx;
q->lock = &dev->mfc_mutex;
if (vdev == dev->vfd_dec) {
q->io_modes = VB2_MMAP;
@@ -956,7 +955,7 @@ err_ctrls_setup:
err_bad_node:
dev->ctx[ctx->num] = NULL;
err_no_ctx:
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
kfree(ctx);
err_alloc:
@@ -970,7 +969,7 @@ err_enter:
/* Release MFC context */
static int s5p_mfc_release(struct file *file)
{
- struct s5p_mfc_ctx *ctx = fh_to_ctx(file->private_data);
+ struct s5p_mfc_ctx *ctx = file_to_ctx(file);
struct s5p_mfc_dev *dev = ctx->dev;
/* if dev is null, do cleanup that doesn't need dev */
@@ -1011,7 +1010,7 @@ static int s5p_mfc_release(struct file *file)
if (dev)
dev->ctx[ctx->num] = NULL;
s5p_mfc_dec_ctrls_delete(ctx);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
/* vdev is gone if dev is null */
if (dev)
v4l2_fh_exit(&ctx->fh);
@@ -1027,7 +1026,7 @@ static int s5p_mfc_release(struct file *file)
static __poll_t s5p_mfc_poll(struct file *file,
struct poll_table_struct *wait)
{
- struct s5p_mfc_ctx *ctx = fh_to_ctx(file->private_data);
+ struct s5p_mfc_ctx *ctx = file_to_ctx(file);
struct s5p_mfc_dev *dev = ctx->dev;
struct vb2_queue *src_q, *dst_q;
struct vb2_buffer *src_vb = NULL, *dst_vb = NULL;
@@ -1078,7 +1077,7 @@ end:
/* Mmap */
static int s5p_mfc_mmap(struct file *file, struct vm_area_struct *vma)
{
- struct s5p_mfc_ctx *ctx = fh_to_ctx(file->private_data);
+ struct s5p_mfc_ctx *ctx = file_to_ctx(file);
unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
int ret;
diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd_v6.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd_v6.c
index 47bc3014b5d8..f7c682fca645 100644
--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd_v6.c
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd_v6.c
@@ -14,8 +14,7 @@
#include "s5p_mfc_opr.h"
#include "s5p_mfc_cmd_v6.h"
-static int s5p_mfc_cmd_host2risc_v6(struct s5p_mfc_dev *dev, int cmd,
- const struct s5p_mfc_cmd_args *args)
+static int s5p_mfc_cmd_host2risc_v6(struct s5p_mfc_dev *dev, int cmd)
{
mfc_debug(2, "Issue the command: %d\n", cmd);
@@ -31,7 +30,6 @@ static int s5p_mfc_cmd_host2risc_v6(struct s5p_mfc_dev *dev, int cmd,
static int s5p_mfc_sys_init_cmd_v6(struct s5p_mfc_dev *dev)
{
- struct s5p_mfc_cmd_args h2r_args;
const struct s5p_mfc_buf_size_v6 *buf_size = dev->variant->buf_size->priv;
int ret;
@@ -41,33 +39,23 @@ static int s5p_mfc_sys_init_cmd_v6(struct s5p_mfc_dev *dev)
mfc_write(dev, dev->ctx_buf.dma, S5P_FIMV_CONTEXT_MEM_ADDR_V6);
mfc_write(dev, buf_size->dev_ctx, S5P_FIMV_CONTEXT_MEM_SIZE_V6);
- return s5p_mfc_cmd_host2risc_v6(dev, S5P_FIMV_H2R_CMD_SYS_INIT_V6,
- &h2r_args);
+ return s5p_mfc_cmd_host2risc_v6(dev, S5P_FIMV_H2R_CMD_SYS_INIT_V6);
}
static int s5p_mfc_sleep_cmd_v6(struct s5p_mfc_dev *dev)
{
- struct s5p_mfc_cmd_args h2r_args;
-
- memset(&h2r_args, 0, sizeof(struct s5p_mfc_cmd_args));
- return s5p_mfc_cmd_host2risc_v6(dev, S5P_FIMV_H2R_CMD_SLEEP_V6,
- &h2r_args);
+ return s5p_mfc_cmd_host2risc_v6(dev, S5P_FIMV_H2R_CMD_SLEEP_V6);
}
static int s5p_mfc_wakeup_cmd_v6(struct s5p_mfc_dev *dev)
{
- struct s5p_mfc_cmd_args h2r_args;
-
- memset(&h2r_args, 0, sizeof(struct s5p_mfc_cmd_args));
- return s5p_mfc_cmd_host2risc_v6(dev, S5P_FIMV_H2R_CMD_WAKEUP_V6,
- &h2r_args);
+ return s5p_mfc_cmd_host2risc_v6(dev, S5P_FIMV_H2R_CMD_WAKEUP_V6);
}
/* Open a new instance and get its number */
static int s5p_mfc_open_inst_cmd_v6(struct s5p_mfc_ctx *ctx)
{
struct s5p_mfc_dev *dev = ctx->dev;
- struct s5p_mfc_cmd_args h2r_args;
int codec_type;
mfc_debug(2, "Requested codec mode: %d\n", ctx->codec_mode);
@@ -129,23 +117,20 @@ static int s5p_mfc_open_inst_cmd_v6(struct s5p_mfc_ctx *ctx)
mfc_write(dev, ctx->ctx.size, S5P_FIMV_CONTEXT_MEM_SIZE_V6);
mfc_write(dev, 0, S5P_FIMV_D_CRC_CTRL_V6); /* no crc */
- return s5p_mfc_cmd_host2risc_v6(dev, S5P_FIMV_H2R_CMD_OPEN_INSTANCE_V6,
- &h2r_args);
+ return s5p_mfc_cmd_host2risc_v6(dev, S5P_FIMV_H2R_CMD_OPEN_INSTANCE_V6);
}
/* Close instance */
static int s5p_mfc_close_inst_cmd_v6(struct s5p_mfc_ctx *ctx)
{
struct s5p_mfc_dev *dev = ctx->dev;
- struct s5p_mfc_cmd_args h2r_args;
int ret = 0;
dev->curr_ctx = ctx->num;
if (ctx->state != MFCINST_FREE) {
mfc_write(dev, ctx->inst_no, S5P_FIMV_INSTANCE_ID_V6);
ret = s5p_mfc_cmd_host2risc_v6(dev,
- S5P_FIMV_H2R_CMD_CLOSE_INSTANCE_V6,
- &h2r_args);
+ S5P_FIMV_H2R_CMD_CLOSE_INSTANCE_V6);
} else {
ret = -EINVAL;
}
@@ -153,9 +138,15 @@ static int s5p_mfc_close_inst_cmd_v6(struct s5p_mfc_ctx *ctx)
return ret;
}
+static int s5p_mfc_cmd_host2risc_v6_args(struct s5p_mfc_dev *dev, int cmd,
+ const struct s5p_mfc_cmd_args *ignored)
+{
+ return s5p_mfc_cmd_host2risc_v6(dev, cmd);
+}
+
/* Initialize cmd function pointers for MFC v6 */
static const struct s5p_mfc_hw_cmds s5p_mfc_cmds_v6 = {
- .cmd_host2risc = s5p_mfc_cmd_host2risc_v6,
+ .cmd_host2risc = s5p_mfc_cmd_host2risc_v6_args,
.sys_init_cmd = s5p_mfc_sys_init_cmd_v6,
.sleep_cmd = s5p_mfc_sleep_cmd_v6,
.wakeup_cmd = s5p_mfc_wakeup_cmd_v6,
diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_common.h b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_common.h
index 86c316c1ff8f..58dc1768082c 100644
--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_common.h
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_common.h
@@ -767,7 +767,11 @@ struct mfc_control {
#define s5p_mfc_hw_call(f, op, args...) \
((f && f->op) ? f->op(args) : (typeof(f->op(args)))(-ENODEV))
-#define fh_to_ctx(__fh) container_of(__fh, struct s5p_mfc_ctx, fh)
+static inline struct s5p_mfc_ctx *file_to_ctx(struct file *filp)
+{
+ return container_of(file_to_v4l2_fh(filp), struct s5p_mfc_ctx, fh);
+}
+
#define ctrl_to_ctx(__ctrl) \
container_of((__ctrl)->handler, struct s5p_mfc_ctx, ctrl_handler)
diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_dec.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_dec.c
index 3efbc3367906..afd28beabfde 100644
--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_dec.c
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_dec.c
@@ -330,7 +330,7 @@ static int vidioc_enum_fmt(struct file *file, struct v4l2_fmtdesc *f,
return 0;
}
-static int vidioc_enum_fmt_vid_cap(struct file *file, void *pirv,
+static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
return vidioc_enum_fmt(file, f, false);
@@ -345,7 +345,7 @@ static int vidioc_enum_fmt_vid_out(struct file *file, void *priv,
/* Get format */
static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
- struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ struct s5p_mfc_ctx *ctx = file_to_ctx(file);
struct v4l2_pix_format_mplane *pix_mp;
mfc_debug_enter();
@@ -442,7 +442,7 @@ static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
struct s5p_mfc_dev *dev = video_drvdata(file);
- struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ struct s5p_mfc_ctx *ctx = file_to_ctx(file);
int ret = 0;
struct v4l2_pix_format_mplane *pix_mp;
const struct s5p_mfc_buf_size *buf_size = dev->variant->buf_size;
@@ -598,7 +598,7 @@ static int vidioc_reqbufs(struct file *file, void *priv,
struct v4l2_requestbuffers *reqbufs)
{
struct s5p_mfc_dev *dev = video_drvdata(file);
- struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ struct s5p_mfc_ctx *ctx = file_to_ctx(file);
if (reqbufs->memory != V4L2_MEMORY_MMAP) {
mfc_debug(2, "Only V4L2_MEMORY_MMAP is supported\n");
@@ -619,7 +619,7 @@ static int vidioc_reqbufs(struct file *file, void *priv,
static int vidioc_querybuf(struct file *file, void *priv,
struct v4l2_buffer *buf)
{
- struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ struct s5p_mfc_ctx *ctx = file_to_ctx(file);
int ret;
int i;
@@ -647,7 +647,7 @@ static int vidioc_querybuf(struct file *file, void *priv,
/* Queue a buffer */
static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
{
- struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ struct s5p_mfc_ctx *ctx = file_to_ctx(file);
if (ctx->state == MFCINST_ERROR) {
mfc_err("Call on QBUF after unrecoverable error\n");
@@ -666,7 +666,7 @@ static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
const struct v4l2_event ev = {
.type = V4L2_EVENT_EOS
};
- struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ struct s5p_mfc_ctx *ctx = file_to_ctx(file);
int ret;
if (ctx->state == MFCINST_ERROR) {
@@ -695,7 +695,7 @@ static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
static int vidioc_expbuf(struct file *file, void *priv,
struct v4l2_exportbuffer *eb)
{
- struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ struct s5p_mfc_ctx *ctx = file_to_ctx(file);
if (eb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
return vb2_expbuf(&ctx->vq_src, eb);
@@ -708,7 +708,7 @@ static int vidioc_expbuf(struct file *file, void *priv,
static int vidioc_streamon(struct file *file, void *priv,
enum v4l2_buf_type type)
{
- struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ struct s5p_mfc_ctx *ctx = file_to_ctx(file);
int ret = -EINVAL;
mfc_debug_enter();
@@ -724,7 +724,7 @@ static int vidioc_streamon(struct file *file, void *priv,
static int vidioc_streamoff(struct file *file, void *priv,
enum v4l2_buf_type type)
{
- struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ struct s5p_mfc_ctx *ctx = file_to_ctx(file);
if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
return vb2_streamoff(&ctx->vq_src, type);
@@ -801,7 +801,7 @@ static const struct v4l2_ctrl_ops s5p_mfc_dec_ctrl_ops = {
static int vidioc_g_selection(struct file *file, void *priv,
struct v4l2_selection *s)
{
- struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ struct s5p_mfc_ctx *ctx = file_to_ctx(file);
struct s5p_mfc_dev *dev = ctx->dev;
u32 left, right, top, bottom;
u32 width, height;
@@ -856,7 +856,7 @@ static int vidioc_g_selection(struct file *file, void *priv,
static int vidioc_decoder_cmd(struct file *file, void *priv,
struct v4l2_decoder_cmd *cmd)
{
- struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ struct s5p_mfc_ctx *ctx = file_to_ctx(file);
struct s5p_mfc_dev *dev = ctx->dev;
struct s5p_mfc_buf *buf;
unsigned long flags;
@@ -937,7 +937,7 @@ static int s5p_mfc_queue_setup(struct vb2_queue *vq,
unsigned int *plane_count, unsigned int psize[],
struct device *alloc_devs[])
{
- struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
+ struct s5p_mfc_ctx *ctx = vb2_get_drv_priv(vq);
struct s5p_mfc_dev *dev = ctx->dev;
const struct v4l2_format_info *format;
@@ -1006,7 +1006,7 @@ static int s5p_mfc_buf_init(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct vb2_queue *vq = vb->vb2_queue;
- struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
+ struct s5p_mfc_ctx *ctx = vb2_get_drv_priv(vq);
unsigned int i;
if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
@@ -1068,7 +1068,7 @@ static int s5p_mfc_buf_init(struct vb2_buffer *vb)
static int s5p_mfc_start_streaming(struct vb2_queue *q, unsigned int count)
{
- struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv);
+ struct s5p_mfc_ctx *ctx = vb2_get_drv_priv(q);
struct s5p_mfc_dev *dev = ctx->dev;
v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
@@ -1085,7 +1085,7 @@ static int s5p_mfc_start_streaming(struct vb2_queue *q, unsigned int count)
static void s5p_mfc_stop_streaming(struct vb2_queue *q)
{
unsigned long flags;
- struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv);
+ struct s5p_mfc_ctx *ctx = vb2_get_drv_priv(q);
struct s5p_mfc_dev *dev = ctx->dev;
int aborted = 0;
@@ -1130,7 +1130,7 @@ static void s5p_mfc_stop_streaming(struct vb2_queue *q)
static void s5p_mfc_buf_queue(struct vb2_buffer *vb)
{
struct vb2_queue *vq = vb->vb2_queue;
- struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
+ struct s5p_mfc_ctx *ctx = vb2_get_drv_priv(vq);
struct s5p_mfc_dev *dev = ctx->dev;
unsigned long flags;
struct s5p_mfc_buf *mfc_buf;
diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c
index 6c603dcd5664..3f8701e5614f 100644
--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c
@@ -1375,7 +1375,7 @@ static int vidioc_enum_fmt(struct file *file, struct v4l2_fmtdesc *f,
return -EINVAL;
}
-static int vidioc_enum_fmt_vid_cap(struct file *file, void *pirv,
+static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
return vidioc_enum_fmt(file, f, false);
@@ -1389,8 +1389,8 @@ static int vidioc_enum_fmt_vid_out(struct file *file, void *priv,
static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
- struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
+ struct s5p_mfc_ctx *ctx = file_to_ctx(file);
mfc_debug(2, "f->type = %d ctx->state = %d\n", f->type, ctx->state);
if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
@@ -1472,8 +1472,8 @@ static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
+ struct s5p_mfc_ctx *ctx = file_to_ctx(file);
struct s5p_mfc_dev *dev = video_drvdata(file);
- struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
int ret = 0;
@@ -1531,7 +1531,7 @@ static int vidioc_reqbufs(struct file *file, void *priv,
struct v4l2_requestbuffers *reqbufs)
{
struct s5p_mfc_dev *dev = video_drvdata(file);
- struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ struct s5p_mfc_ctx *ctx = file_to_ctx(file);
int ret = 0;
/* if memory is not mmp or userptr or dmabuf return error */
@@ -1601,7 +1601,7 @@ static int vidioc_reqbufs(struct file *file, void *priv,
static int vidioc_querybuf(struct file *file, void *priv,
struct v4l2_buffer *buf)
{
- struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ struct s5p_mfc_ctx *ctx = file_to_ctx(file);
int ret = 0;
/* if memory is not mmp or userptr or dmabuf return error */
@@ -1636,7 +1636,7 @@ static int vidioc_querybuf(struct file *file, void *priv,
/* Queue a buffer */
static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
{
- struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ struct s5p_mfc_ctx *ctx = file_to_ctx(file);
if (ctx->state == MFCINST_ERROR) {
mfc_err("Call on QBUF after unrecoverable error\n");
@@ -1657,10 +1657,10 @@ static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
/* Dequeue a buffer */
static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
{
+ struct s5p_mfc_ctx *ctx = file_to_ctx(file);
const struct v4l2_event ev = {
.type = V4L2_EVENT_EOS
};
- struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
int ret;
if (ctx->state == MFCINST_ERROR) {
@@ -1685,7 +1685,7 @@ static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
static int vidioc_expbuf(struct file *file, void *priv,
struct v4l2_exportbuffer *eb)
{
- struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ struct s5p_mfc_ctx *ctx = file_to_ctx(file);
if (eb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
return vb2_expbuf(&ctx->vq_src, eb);
@@ -1698,7 +1698,7 @@ static int vidioc_expbuf(struct file *file, void *priv,
static int vidioc_streamon(struct file *file, void *priv,
enum v4l2_buf_type type)
{
- struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ struct s5p_mfc_ctx *ctx = file_to_ctx(file);
if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
return vb2_streamon(&ctx->vq_src, type);
@@ -1711,7 +1711,7 @@ static int vidioc_streamon(struct file *file, void *priv,
static int vidioc_streamoff(struct file *file, void *priv,
enum v4l2_buf_type type)
{
- struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ struct s5p_mfc_ctx *ctx = file_to_ctx(file);
if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
return vb2_streamoff(&ctx->vq_src, type);
@@ -2284,7 +2284,7 @@ static const struct v4l2_ctrl_ops s5p_mfc_enc_ctrl_ops = {
static int vidioc_s_parm(struct file *file, void *priv,
struct v4l2_streamparm *a)
{
- struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ struct s5p_mfc_ctx *ctx = file_to_ctx(file);
if (a->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
ctx->enc_params.rc_framerate_num =
@@ -2301,7 +2301,7 @@ static int vidioc_s_parm(struct file *file, void *priv,
static int vidioc_g_parm(struct file *file, void *priv,
struct v4l2_streamparm *a)
{
- struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ struct s5p_mfc_ctx *ctx = file_to_ctx(file);
if (a->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
a->parm.output.timeperframe.denominator =
@@ -2318,7 +2318,7 @@ static int vidioc_g_parm(struct file *file, void *priv,
static int vidioc_encoder_cmd(struct file *file, void *priv,
struct v4l2_encoder_cmd *cmd)
{
- struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
+ struct s5p_mfc_ctx *ctx = file_to_ctx(file);
struct s5p_mfc_dev *dev = ctx->dev;
struct s5p_mfc_buf *buf;
unsigned long flags;
@@ -2418,7 +2418,7 @@ static int s5p_mfc_queue_setup(struct vb2_queue *vq,
unsigned int *buf_count, unsigned int *plane_count,
unsigned int psize[], struct device *alloc_devs[])
{
- struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
+ struct s5p_mfc_ctx *ctx = vb2_get_drv_priv(vq);
struct s5p_mfc_dev *dev = ctx->dev;
if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
@@ -2477,7 +2477,7 @@ static int s5p_mfc_buf_init(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct vb2_queue *vq = vb->vb2_queue;
- struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
+ struct s5p_mfc_ctx *ctx = vb2_get_drv_priv(vq);
unsigned int i;
int ret;
@@ -2516,7 +2516,7 @@ static int s5p_mfc_buf_init(struct vb2_buffer *vb)
static int s5p_mfc_buf_prepare(struct vb2_buffer *vb)
{
struct vb2_queue *vq = vb->vb2_queue;
- struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
+ struct s5p_mfc_ctx *ctx = vb2_get_drv_priv(vq);
int ret;
if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
@@ -2557,7 +2557,7 @@ static int s5p_mfc_buf_prepare(struct vb2_buffer *vb)
static int s5p_mfc_start_streaming(struct vb2_queue *q, unsigned int count)
{
- struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv);
+ struct s5p_mfc_ctx *ctx = vb2_get_drv_priv(q);
struct s5p_mfc_dev *dev = ctx->dev;
if (IS_MFCV6_PLUS(dev) &&
@@ -2588,7 +2588,7 @@ static int s5p_mfc_start_streaming(struct vb2_queue *q, unsigned int count)
static void s5p_mfc_stop_streaming(struct vb2_queue *q)
{
unsigned long flags;
- struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv);
+ struct s5p_mfc_ctx *ctx = vb2_get_drv_priv(q);
struct s5p_mfc_dev *dev = ctx->dev;
if ((ctx->state == MFCINST_FINISHING ||
@@ -2617,7 +2617,7 @@ static void s5p_mfc_stop_streaming(struct vb2_queue *q)
static void s5p_mfc_buf_queue(struct vb2_buffer *vb)
{
struct vb2_queue *vq = vb->vb2_queue;
- struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv);
+ struct s5p_mfc_ctx *ctx = vb2_get_drv_priv(vq);
struct s5p_mfc_dev *dev = ctx->dev;
unsigned long flags;
struct s5p_mfc_buf *mfc_buf;
diff --git a/drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c
index 1eb934490c0b..56169b70652d 100644
--- a/drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c
+++ b/drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c
@@ -33,7 +33,10 @@
#define BDISP_MIN_H 1
#define BDISP_MAX_H 8191
-#define fh_to_ctx(__fh) container_of(__fh, struct bdisp_ctx, fh)
+static inline struct bdisp_ctx *file_to_ctx(struct file *filp)
+{
+ return container_of(file_to_v4l2_fh(filp), struct bdisp_ctx, fh);
+}
enum bdisp_dev_flags {
ST_M2M_OPEN, /* Driver opened */
@@ -603,8 +606,7 @@ static int bdisp_open(struct file *file)
/* Use separate control handler per file handle */
ctx->fh.ctrl_handler = &ctx->ctrl_handler;
- file->private_data = &ctx->fh;
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
/* Default format */
ctx->src = bdisp_dflt_fmt;
@@ -630,7 +632,7 @@ static int bdisp_open(struct file *file)
error_ctrls:
bdisp_ctrls_delete(ctx);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
error_fh:
v4l2_fh_exit(&ctx->fh);
bdisp_hw_free_nodes(ctx);
@@ -644,7 +646,7 @@ unlock:
static int bdisp_release(struct file *file)
{
- struct bdisp_ctx *ctx = fh_to_ctx(file->private_data);
+ struct bdisp_ctx *ctx = file_to_ctx(file);
struct bdisp_dev *bdisp = ctx->bdisp_dev;
dev_dbg(bdisp->dev, "%s\n", __func__);
@@ -655,7 +657,7 @@ static int bdisp_release(struct file *file)
bdisp_ctrls_delete(ctx);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
if (--bdisp->m2m.refcnt <= 0)
@@ -682,7 +684,7 @@ static const struct v4l2_file_operations bdisp_fops = {
static int bdisp_querycap(struct file *file, void *fh,
struct v4l2_capability *cap)
{
- struct bdisp_ctx *ctx = fh_to_ctx(fh);
+ struct bdisp_ctx *ctx = file_to_ctx(file);
struct bdisp_dev *bdisp = ctx->bdisp_dev;
strscpy(cap->driver, bdisp->pdev->name, sizeof(cap->driver));
@@ -694,7 +696,7 @@ static int bdisp_querycap(struct file *file, void *fh,
static int bdisp_enum_fmt(struct file *file, void *fh, struct v4l2_fmtdesc *f)
{
- struct bdisp_ctx *ctx = fh_to_ctx(fh);
+ struct bdisp_ctx *ctx = file_to_ctx(file);
const struct bdisp_fmt *fmt;
if (f->index >= ARRAY_SIZE(bdisp_formats))
@@ -714,7 +716,7 @@ static int bdisp_enum_fmt(struct file *file, void *fh, struct v4l2_fmtdesc *f)
static int bdisp_g_fmt(struct file *file, void *fh, struct v4l2_format *f)
{
- struct bdisp_ctx *ctx = fh_to_ctx(fh);
+ struct bdisp_ctx *ctx = file_to_ctx(file);
struct v4l2_pix_format *pix;
struct bdisp_frame *frame = ctx_get_frame(ctx, f->type);
@@ -738,7 +740,7 @@ static int bdisp_g_fmt(struct file *file, void *fh, struct v4l2_format *f)
static int bdisp_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
{
- struct bdisp_ctx *ctx = fh_to_ctx(fh);
+ struct bdisp_ctx *ctx = file_to_ctx(file);
struct v4l2_pix_format *pix = &f->fmt.pix;
const struct bdisp_fmt *format;
u32 in_w, in_h;
@@ -788,7 +790,7 @@ static int bdisp_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
static int bdisp_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
{
- struct bdisp_ctx *ctx = fh_to_ctx(fh);
+ struct bdisp_ctx *ctx = file_to_ctx(file);
struct vb2_queue *vq;
struct bdisp_frame *frame;
struct v4l2_pix_format *pix;
@@ -841,8 +843,8 @@ static int bdisp_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
static int bdisp_g_selection(struct file *file, void *fh,
struct v4l2_selection *s)
{
+ struct bdisp_ctx *ctx = file_to_ctx(file);
struct bdisp_frame *frame;
- struct bdisp_ctx *ctx = fh_to_ctx(fh);
frame = ctx_get_frame(ctx, s->type);
if (IS_ERR(frame)) {
@@ -919,8 +921,8 @@ static int is_rect_enclosed(struct v4l2_rect *a, struct v4l2_rect *b)
static int bdisp_s_selection(struct file *file, void *fh,
struct v4l2_selection *s)
{
+ struct bdisp_ctx *ctx = file_to_ctx(file);
struct bdisp_frame *frame;
- struct bdisp_ctx *ctx = fh_to_ctx(fh);
struct v4l2_rect *in, out;
bool valid = false;
@@ -997,7 +999,7 @@ static int bdisp_s_selection(struct file *file, void *fh,
static int bdisp_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
{
- struct bdisp_ctx *ctx = fh_to_ctx(fh);
+ struct bdisp_ctx *ctx = file_to_ctx(file);
if ((type == V4L2_BUF_TYPE_VIDEO_OUTPUT) &&
!bdisp_ctx_state_is_set(BDISP_SRC_FMT, ctx)) {
diff --git a/drivers/media/platform/st/sti/delta/delta-mjpeg-dec.c b/drivers/media/platform/st/sti/delta/delta-mjpeg-dec.c
index 0533d4a083d2..a078f1107300 100644
--- a/drivers/media/platform/st/sti/delta/delta-mjpeg-dec.c
+++ b/drivers/media/platform/st/sti/delta/delta-mjpeg-dec.c
@@ -239,7 +239,7 @@ static int delta_mjpeg_ipc_open(struct delta_ctx *pctx)
return 0;
}
-static int delta_mjpeg_ipc_decode(struct delta_ctx *pctx, struct delta_au *au)
+static int delta_mjpeg_ipc_decode(struct delta_ctx *pctx, dma_addr_t pstart, dma_addr_t pend)
{
struct delta_dev *delta = pctx->dev;
struct delta_mjpeg_ctx *ctx = to_ctx(pctx);
@@ -256,8 +256,8 @@ static int delta_mjpeg_ipc_decode(struct delta_ctx *pctx, struct delta_au *au)
memset(params, 0, sizeof(*params));
- params->picture_start_addr_p = (u32)(au->paddr);
- params->picture_end_addr_p = (u32)(au->paddr + au->size - 1);
+ params->picture_start_addr_p = pstart;
+ params->picture_end_addr_p = pend;
/*
* !WARNING!
@@ -374,12 +374,14 @@ static int delta_mjpeg_decode(struct delta_ctx *pctx, struct delta_au *pau)
struct delta_dev *delta = pctx->dev;
struct delta_mjpeg_ctx *ctx = to_ctx(pctx);
int ret;
- struct delta_au au = *pau;
+ void *au_vaddr = pau->vaddr;
+ dma_addr_t au_dma = pau->paddr;
+ size_t au_size = pau->size;
unsigned int data_offset = 0;
struct mjpeg_header *header = &ctx->header_struct;
if (!ctx->header) {
- ret = delta_mjpeg_read_header(pctx, au.vaddr, au.size,
+ ret = delta_mjpeg_read_header(pctx, au_vaddr, au_size,
header, &data_offset);
if (ret) {
pctx->stream_errors++;
@@ -405,17 +407,17 @@ static int delta_mjpeg_decode(struct delta_ctx *pctx, struct delta_au *pau)
goto err;
}
- ret = delta_mjpeg_read_header(pctx, au.vaddr, au.size,
+ ret = delta_mjpeg_read_header(pctx, au_vaddr, au_size,
ctx->header, &data_offset);
if (ret) {
pctx->stream_errors++;
goto err;
}
- au.paddr += data_offset;
- au.vaddr += data_offset;
+ au_dma += data_offset;
+ au_vaddr += data_offset;
- ret = delta_mjpeg_ipc_decode(pctx, &au);
+ ret = delta_mjpeg_ipc_decode(pctx, au_dma, au_dma + au_size - 1);
if (ret)
goto err;
diff --git a/drivers/media/platform/st/sti/delta/delta-v4l2.c b/drivers/media/platform/st/sti/delta/delta-v4l2.c
index 196e6a40335d..6c1a53c771f7 100644
--- a/drivers/media/platform/st/sti/delta/delta-v4l2.c
+++ b/drivers/media/platform/st/sti/delta/delta-v4l2.c
@@ -24,7 +24,11 @@
#define DELTA_PREFIX "[---:----]"
-#define to_ctx(__fh) container_of(__fh, struct delta_ctx, fh)
+static inline struct delta_ctx *file_to_ctx(struct file *filp)
+{
+ return container_of(file_to_v4l2_fh(filp), struct delta_ctx, fh);
+}
+
#define to_au(__vbuf) container_of(__vbuf, struct delta_au, vbuf)
#define to_frame(__vbuf) container_of(__vbuf, struct delta_frame, vbuf)
@@ -382,7 +386,7 @@ static int delta_open_decoder(struct delta_ctx *ctx, u32 streamformat,
static int delta_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- struct delta_ctx *ctx = to_ctx(file->private_data);
+ struct delta_ctx *ctx = file_to_ctx(file);
struct delta_dev *delta = ctx->dev;
strscpy(cap->driver, DELTA_NAME, sizeof(cap->driver));
@@ -396,7 +400,7 @@ static int delta_querycap(struct file *file, void *priv,
static int delta_enum_fmt_stream(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- struct delta_ctx *ctx = to_ctx(file->private_data);
+ struct delta_ctx *ctx = file_to_ctx(file);
struct delta_dev *delta = ctx->dev;
if (unlikely(f->index >= delta->nb_of_streamformats))
@@ -410,7 +414,7 @@ static int delta_enum_fmt_stream(struct file *file, void *priv,
static int delta_enum_fmt_frame(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- struct delta_ctx *ctx = to_ctx(file->private_data);
+ struct delta_ctx *ctx = file_to_ctx(file);
struct delta_dev *delta = ctx->dev;
if (unlikely(f->index >= delta->nb_of_pixelformats))
@@ -424,7 +428,7 @@ static int delta_enum_fmt_frame(struct file *file, void *priv,
static int delta_g_fmt_stream(struct file *file, void *fh,
struct v4l2_format *f)
{
- struct delta_ctx *ctx = to_ctx(file->private_data);
+ struct delta_ctx *ctx = file_to_ctx(file);
struct delta_dev *delta = ctx->dev;
struct v4l2_pix_format *pix = &f->fmt.pix;
struct delta_streaminfo *streaminfo = &ctx->streaminfo;
@@ -452,7 +456,7 @@ static int delta_g_fmt_stream(struct file *file, void *fh,
static int delta_g_fmt_frame(struct file *file, void *fh, struct v4l2_format *f)
{
- struct delta_ctx *ctx = to_ctx(file->private_data);
+ struct delta_ctx *ctx = file_to_ctx(file);
struct delta_dev *delta = ctx->dev;
struct v4l2_pix_format *pix = &f->fmt.pix;
struct delta_frameinfo *frameinfo = &ctx->frameinfo;
@@ -491,7 +495,7 @@ static int delta_g_fmt_frame(struct file *file, void *fh, struct v4l2_format *f)
static int delta_try_fmt_stream(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct delta_ctx *ctx = to_ctx(file->private_data);
+ struct delta_ctx *ctx = file_to_ctx(file);
struct delta_dev *delta = ctx->dev;
struct v4l2_pix_format *pix = &f->fmt.pix;
u32 streamformat = pix->pixelformat;
@@ -545,7 +549,7 @@ static int delta_try_fmt_stream(struct file *file, void *priv,
static int delta_try_fmt_frame(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct delta_ctx *ctx = to_ctx(file->private_data);
+ struct delta_ctx *ctx = file_to_ctx(file);
struct delta_dev *delta = ctx->dev;
struct v4l2_pix_format *pix = &f->fmt.pix;
u32 pixelformat = pix->pixelformat;
@@ -605,7 +609,7 @@ static int delta_try_fmt_frame(struct file *file, void *priv,
static int delta_s_fmt_stream(struct file *file, void *fh,
struct v4l2_format *f)
{
- struct delta_ctx *ctx = to_ctx(file->private_data);
+ struct delta_ctx *ctx = file_to_ctx(file);
struct delta_dev *delta = ctx->dev;
struct vb2_queue *vq;
struct v4l2_pix_format *pix = &f->fmt.pix;
@@ -641,7 +645,7 @@ static int delta_s_fmt_stream(struct file *file, void *fh,
static int delta_s_fmt_frame(struct file *file, void *fh, struct v4l2_format *f)
{
- struct delta_ctx *ctx = to_ctx(file->private_data);
+ struct delta_ctx *ctx = file_to_ctx(file);
struct delta_dev *delta = ctx->dev;
const struct delta_dec *dec = ctx->dec;
struct v4l2_pix_format *pix = &f->fmt.pix;
@@ -721,7 +725,7 @@ static int delta_s_fmt_frame(struct file *file, void *fh, struct v4l2_format *f)
static int delta_g_selection(struct file *file, void *fh,
struct v4l2_selection *s)
{
- struct delta_ctx *ctx = to_ctx(fh);
+ struct delta_ctx *ctx = file_to_ctx(file);
struct delta_frameinfo *frameinfo = &ctx->frameinfo;
struct v4l2_rect crop;
@@ -803,7 +807,7 @@ static int delta_try_decoder_cmd(struct file *file, void *fh,
return 0;
}
-static int delta_decoder_stop_cmd(struct delta_ctx *ctx, void *fh)
+static int delta_decoder_stop_cmd(struct delta_ctx *ctx)
{
const struct delta_dec *dec = ctx->dec;
struct delta_dev *delta = ctx->dev;
@@ -866,14 +870,14 @@ delay_eos:
static int delta_decoder_cmd(struct file *file, void *fh,
struct v4l2_decoder_cmd *cmd)
{
- struct delta_ctx *ctx = to_ctx(fh);
+ struct delta_ctx *ctx = file_to_ctx(file);
int ret = 0;
ret = delta_try_decoder_cmd(file, fh, cmd);
if (ret)
return ret;
- return delta_decoder_stop_cmd(ctx, fh);
+ return delta_decoder_stop_cmd(ctx);
}
static int delta_subscribe_event(struct v4l2_fh *fh,
@@ -1633,8 +1637,7 @@ static int delta_open(struct file *file)
ctx->dev = delta;
v4l2_fh_init(&ctx->fh, video_devdata(file));
- file->private_data = &ctx->fh;
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
INIT_WORK(&ctx->run_work, delta_run_work);
mutex_init(&ctx->lock);
@@ -1679,7 +1682,7 @@ static int delta_open(struct file *file)
return 0;
err_fh_del:
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
kfree(ctx);
err:
@@ -1690,7 +1693,7 @@ err:
static int delta_release(struct file *file)
{
- struct delta_ctx *ctx = to_ctx(file->private_data);
+ struct delta_ctx *ctx = file_to_ctx(file);
struct delta_dev *delta = ctx->dev;
const struct delta_dec *dec = ctx->dec;
@@ -1707,7 +1710,7 @@ static int delta_release(struct file *file)
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
/* disable ST231 clocks */
diff --git a/drivers/media/platform/st/sti/hva/hva-v4l2.c b/drivers/media/platform/st/sti/hva/hva-v4l2.c
index 5366c0f92549..3581b73a99b8 100644
--- a/drivers/media/platform/st/sti/hva/hva-v4l2.c
+++ b/drivers/media/platform/st/sti/hva/hva-v4l2.c
@@ -36,7 +36,10 @@
#define to_type_str(type) (type == V4L2_BUF_TYPE_VIDEO_OUTPUT ? \
"frame" : "stream")
-#define fh_to_ctx(f) (container_of(f, struct hva_ctx, fh))
+static inline struct hva_ctx *file_to_ctx(struct file *filp)
+{
+ return container_of(file_to_v4l2_fh(filp), struct hva_ctx, fh);
+}
/* registry of available encoders */
static const struct hva_enc *hva_encoders[] = {
@@ -254,7 +257,7 @@ static void hva_dbg_summary(struct hva_ctx *ctx)
static int hva_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct hva_ctx *ctx = file_to_ctx(file);
struct hva_dev *hva = ctx_to_hdev(ctx);
strscpy(cap->driver, HVA_NAME, sizeof(cap->driver));
@@ -268,7 +271,7 @@ static int hva_querycap(struct file *file, void *priv,
static int hva_enum_fmt_stream(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct hva_ctx *ctx = file_to_ctx(file);
struct hva_dev *hva = ctx_to_hdev(ctx);
if (unlikely(f->index >= hva->nb_of_streamformats))
@@ -282,7 +285,7 @@ static int hva_enum_fmt_stream(struct file *file, void *priv,
static int hva_enum_fmt_frame(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct hva_ctx *ctx = file_to_ctx(file);
struct hva_dev *hva = ctx_to_hdev(ctx);
if (unlikely(f->index >= hva->nb_of_pixelformats))
@@ -295,7 +298,7 @@ static int hva_enum_fmt_frame(struct file *file, void *priv,
static int hva_g_fmt_stream(struct file *file, void *fh, struct v4l2_format *f)
{
- struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct hva_ctx *ctx = file_to_ctx(file);
struct hva_streaminfo *streaminfo = &ctx->streaminfo;
f->fmt.pix.width = streaminfo->width;
@@ -314,7 +317,7 @@ static int hva_g_fmt_stream(struct file *file, void *fh, struct v4l2_format *f)
static int hva_g_fmt_frame(struct file *file, void *fh, struct v4l2_format *f)
{
- struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct hva_ctx *ctx = file_to_ctx(file);
struct hva_frameinfo *frameinfo = &ctx->frameinfo;
f->fmt.pix.width = frameinfo->width;
@@ -335,7 +338,7 @@ static int hva_g_fmt_frame(struct file *file, void *fh, struct v4l2_format *f)
static int hva_try_fmt_stream(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct hva_ctx *ctx = file_to_ctx(file);
struct device *dev = ctx_to_dev(ctx);
struct v4l2_pix_format *pix = &f->fmt.pix;
u32 streamformat = pix->pixelformat;
@@ -399,7 +402,7 @@ static int hva_try_fmt_stream(struct file *file, void *priv,
static int hva_try_fmt_frame(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct hva_ctx *ctx = file_to_ctx(file);
struct device *dev = ctx_to_dev(ctx);
struct v4l2_pix_format *pix = &f->fmt.pix;
u32 pixelformat = pix->pixelformat;
@@ -449,7 +452,7 @@ static int hva_try_fmt_frame(struct file *file, void *priv,
static int hva_s_fmt_stream(struct file *file, void *fh, struct v4l2_format *f)
{
- struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct hva_ctx *ctx = file_to_ctx(file);
struct device *dev = ctx_to_dev(ctx);
struct vb2_queue *vq;
int ret;
@@ -479,7 +482,7 @@ static int hva_s_fmt_stream(struct file *file, void *fh, struct v4l2_format *f)
static int hva_s_fmt_frame(struct file *file, void *fh, struct v4l2_format *f)
{
- struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct hva_ctx *ctx = file_to_ctx(file);
struct device *dev = ctx_to_dev(ctx);
struct v4l2_pix_format *pix = &f->fmt.pix;
struct vb2_queue *vq;
@@ -517,7 +520,7 @@ static int hva_s_fmt_frame(struct file *file, void *fh, struct v4l2_format *f)
static int hva_g_parm(struct file *file, void *fh, struct v4l2_streamparm *sp)
{
- struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct hva_ctx *ctx = file_to_ctx(file);
struct v4l2_fract *time_per_frame = &ctx->ctrls.time_per_frame;
if (sp->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
@@ -533,7 +536,7 @@ static int hva_g_parm(struct file *file, void *fh, struct v4l2_streamparm *sp)
static int hva_s_parm(struct file *file, void *fh, struct v4l2_streamparm *sp)
{
- struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct hva_ctx *ctx = file_to_ctx(file);
struct v4l2_fract *time_per_frame = &ctx->ctrls.time_per_frame;
if (sp->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
@@ -553,7 +556,7 @@ static int hva_s_parm(struct file *file, void *fh, struct v4l2_streamparm *sp)
static int hva_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
{
- struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct hva_ctx *ctx = file_to_ctx(file);
struct device *dev = ctx_to_dev(ctx);
if (buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
@@ -1171,8 +1174,7 @@ static int hva_open(struct file *file)
INIT_WORK(&ctx->run_work, hva_run_work);
v4l2_fh_init(&ctx->fh, video_devdata(file));
- file->private_data = &ctx->fh;
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
ret = hva_ctrls_setup(ctx);
if (ret) {
@@ -1216,7 +1218,7 @@ static int hva_open(struct file *file)
err_ctrls:
v4l2_ctrl_handler_free(&ctx->ctrl_handler);
err_fh:
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
kfree(ctx);
out:
@@ -1225,7 +1227,7 @@ out:
static int hva_release(struct file *file)
{
- struct hva_ctx *ctx = fh_to_ctx(file->private_data);
+ struct hva_ctx *ctx = file_to_ctx(file);
struct hva_dev *hva = ctx_to_hdev(ctx);
struct device *dev = ctx_to_dev(ctx);
const struct hva_enc *enc = ctx->enc;
@@ -1247,7 +1249,7 @@ static int hva_release(struct file *file)
v4l2_ctrl_handler_free(&ctx->ctrl_handler);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
#ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
diff --git a/drivers/media/platform/st/sti/hva/hva.h b/drivers/media/platform/st/sti/hva/hva.h
index ba6b893416ec..1fe561082a74 100644
--- a/drivers/media/platform/st/sti/hva/hva.h
+++ b/drivers/media/platform/st/sti/hva/hva.h
@@ -13,8 +13,6 @@
#include <media/videobuf2-v4l2.h>
#include <media/v4l2-mem2mem.h>
-#define fh_to_ctx(f) (container_of(f, struct hva_ctx, fh))
-
#define hva_to_dev(h) (h->dev)
#define ctx_to_dev(c) (c->hva_dev->dev)
diff --git a/drivers/media/platform/st/stm32/dma2d/dma2d.c b/drivers/media/platform/st/stm32/dma2d/dma2d.c
index 48fa781aab06..468c247ba328 100644
--- a/drivers/media/platform/st/stm32/dma2d/dma2d.c
+++ b/drivers/media/platform/st/stm32/dma2d/dma2d.c
@@ -45,7 +45,10 @@
* whole of a destination image with a pixel format conversion.
*/
-#define fh2ctx(__fh) container_of(__fh, struct dma2d_ctx, fh)
+static inline struct dma2d_ctx *file2ctx(struct file *filp)
+{
+ return container_of(file_to_v4l2_fh(filp), struct dma2d_ctx, fh);
+}
static const struct dma2d_fmt formats[] = {
{
@@ -301,8 +304,7 @@ static int dma2d_open(struct file *file)
}
v4l2_fh_init(&ctx->fh, video_devdata(file));
- file->private_data = &ctx->fh;
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
dma2d_setup_ctrls(ctx);
@@ -318,13 +320,13 @@ static int dma2d_open(struct file *file)
static int dma2d_release(struct file *file)
{
struct dma2d_dev *dev = video_drvdata(file);
- struct dma2d_ctx *ctx = fh2ctx(file->private_data);
+ struct dma2d_ctx *ctx = file2ctx(file);
mutex_lock(&dev->mutex);
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
mutex_unlock(&dev->mutex);
v4l2_ctrl_handler_free(&ctx->ctrl_handler);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
kfree(ctx);
@@ -341,7 +343,7 @@ static int vidioc_querycap(struct file *file, void *priv,
return 0;
}
-static int vidioc_enum_fmt(struct file *file, void *prv, struct v4l2_fmtdesc *f)
+static int vidioc_enum_fmt(struct file *file, void *priv, struct v4l2_fmtdesc *f)
{
if (f->index >= NUM_FORMATS)
return -EINVAL;
@@ -350,9 +352,9 @@ static int vidioc_enum_fmt(struct file *file, void *prv, struct v4l2_fmtdesc *f)
return 0;
}
-static int vidioc_g_fmt(struct file *file, void *prv, struct v4l2_format *f)
+static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
- struct dma2d_ctx *ctx = prv;
+ struct dma2d_ctx *ctx = file2ctx(file);
struct vb2_queue *vq;
struct dma2d_frame *frm;
@@ -375,9 +377,9 @@ static int vidioc_g_fmt(struct file *file, void *prv, struct v4l2_format *f)
return 0;
}
-static int vidioc_try_fmt(struct file *file, void *prv, struct v4l2_format *f)
+static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
- struct dma2d_ctx *ctx = prv;
+ struct dma2d_ctx *ctx = file2ctx(file);
struct dma2d_fmt *fmt;
enum v4l2_field *field;
u32 fourcc = f->fmt.pix.pixelformat;
@@ -418,9 +420,9 @@ static int vidioc_try_fmt(struct file *file, void *prv, struct v4l2_format *f)
return 0;
}
-static int vidioc_s_fmt(struct file *file, void *prv, struct v4l2_format *f)
+static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
- struct dma2d_ctx *ctx = prv;
+ struct dma2d_ctx *ctx = file2ctx(file);
struct vb2_queue *vq;
struct dma2d_frame *frm;
struct dma2d_fmt *fmt;
@@ -429,7 +431,7 @@ static int vidioc_s_fmt(struct file *file, void *prv, struct v4l2_format *f)
/* Adjust all values accordingly to the hardware capabilities
* and chosen format.
*/
- ret = vidioc_try_fmt(file, prv, f);
+ ret = vidioc_try_fmt(file, priv, f);
if (ret)
return ret;
diff --git a/drivers/media/platform/st/stm32/stm32-csi.c b/drivers/media/platform/st/stm32/stm32-csi.c
index b69048144cc1..fd2b6dfbd44c 100644
--- a/drivers/media/platform/st/stm32/stm32-csi.c
+++ b/drivers/media/platform/st/stm32/stm32-csi.c
@@ -443,8 +443,7 @@ static void stm32_csi_phy_reg_write(struct stm32_csi_dev *csidev,
static int stm32_csi_start(struct stm32_csi_dev *csidev,
struct v4l2_subdev_state *state)
{
- struct media_pad *src_pad =
- &csidev->s_subdev->entity.pads[csidev->s_subdev_pad_nb];
+ struct media_pad *src_pad;
const struct stm32_csi_mbps_phy_reg *phy_regs = NULL;
struct v4l2_mbus_framefmt *sink_fmt;
const struct stm32_csi_fmts *fmt;
@@ -466,6 +465,7 @@ static int stm32_csi_start(struct stm32_csi_dev *csidev,
if (!csidev->s_subdev)
return -EIO;
+ src_pad = &csidev->s_subdev->entity.pads[csidev->s_subdev_pad_nb];
link_freq = v4l2_get_link_freq(src_pad,
fmt->bpp, 2 * csidev->num_lanes);
if (link_freq < 0)
diff --git a/drivers/media/platform/st/stm32/stm32-dcmi.c b/drivers/media/platform/st/stm32/stm32-dcmi.c
index d94c61b8569d..13762861b769 100644
--- a/drivers/media/platform/st/stm32/stm32-dcmi.c
+++ b/drivers/media/platform/st/stm32/stm32-dcmi.c
@@ -1701,8 +1701,8 @@ static int dcmi_framesizes_init(struct stm32_dcmi *dcmi)
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
.code = dcmi->sd_format->mbus_code,
};
- unsigned int ret;
unsigned int i;
+ int ret;
/* Allocate discrete framesizes array */
while (!v4l2_subdev_call(subdev, pad, enum_frame_size,
@@ -1808,8 +1808,8 @@ static int dcmi_graph_notify_bound(struct v4l2_async_notifier *notifier,
struct v4l2_async_connection *asd)
{
struct stm32_dcmi *dcmi = notifier_to_dcmi(notifier);
- unsigned int ret;
int src_pad;
+ int ret;
dev_dbg(dcmi->dev, "Subdev \"%s\" bound\n", subdev->name);
diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi_capture.c b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi_capture.c
index 76356bc7f10e..65879f4802c0 100644
--- a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi_capture.c
+++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi_capture.c
@@ -711,7 +711,7 @@ static void sun6i_csi_capture_format_prepare(struct v4l2_format *format)
pix_format->xfer_func = V4L2_XFER_FUNC_DEFAULT;
}
-static int sun6i_csi_capture_querycap(struct file *file, void *private,
+static int sun6i_csi_capture_querycap(struct file *file, void *priv,
struct v4l2_capability *capability)
{
struct sun6i_csi_device *csi_dev = video_drvdata(file);
@@ -725,7 +725,7 @@ static int sun6i_csi_capture_querycap(struct file *file, void *private,
return 0;
}
-static int sun6i_csi_capture_enum_fmt(struct file *file, void *private,
+static int sun6i_csi_capture_enum_fmt(struct file *file, void *priv,
struct v4l2_fmtdesc *fmtdesc)
{
u32 index = fmtdesc->index;
@@ -738,7 +738,7 @@ static int sun6i_csi_capture_enum_fmt(struct file *file, void *private,
return 0;
}
-static int sun6i_csi_capture_g_fmt(struct file *file, void *private,
+static int sun6i_csi_capture_g_fmt(struct file *file, void *priv,
struct v4l2_format *format)
{
struct sun6i_csi_device *csi_dev = video_drvdata(file);
@@ -748,7 +748,7 @@ static int sun6i_csi_capture_g_fmt(struct file *file, void *private,
return 0;
}
-static int sun6i_csi_capture_s_fmt(struct file *file, void *private,
+static int sun6i_csi_capture_s_fmt(struct file *file, void *priv,
struct v4l2_format *format)
{
struct sun6i_csi_device *csi_dev = video_drvdata(file);
@@ -764,7 +764,7 @@ static int sun6i_csi_capture_s_fmt(struct file *file, void *private,
return 0;
}
-static int sun6i_csi_capture_try_fmt(struct file *file, void *private,
+static int sun6i_csi_capture_try_fmt(struct file *file, void *priv,
struct v4l2_format *format)
{
sun6i_csi_capture_format_prepare(format);
@@ -772,7 +772,7 @@ static int sun6i_csi_capture_try_fmt(struct file *file, void *private,
return 0;
}
-static int sun6i_csi_capture_enum_input(struct file *file, void *private,
+static int sun6i_csi_capture_enum_input(struct file *file, void *priv,
struct v4l2_input *input)
{
if (input->index != 0)
@@ -784,7 +784,7 @@ static int sun6i_csi_capture_enum_input(struct file *file, void *private,
return 0;
}
-static int sun6i_csi_capture_g_input(struct file *file, void *private,
+static int sun6i_csi_capture_g_input(struct file *file, void *priv,
unsigned int *index)
{
*index = 0;
@@ -792,7 +792,7 @@ static int sun6i_csi_capture_g_input(struct file *file, void *private,
return 0;
}
-static int sun6i_csi_capture_s_input(struct file *file, void *private,
+static int sun6i_csi_capture_s_input(struct file *file, void *priv,
unsigned int index)
{
if (index != 0)
diff --git a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
index 3e7f2df70408..eb519afb30ca 100644
--- a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
+++ b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
@@ -309,7 +309,7 @@ static void deinterlace_init(struct deinterlace_dev *dev)
static inline struct deinterlace_ctx *deinterlace_file2ctx(struct file *file)
{
- return container_of(file->private_data, struct deinterlace_ctx, fh);
+ return container_of(file_to_v4l2_fh(file), struct deinterlace_ctx, fh);
}
static bool deinterlace_check_format(u32 pixelformat)
@@ -730,7 +730,6 @@ static int deinterlace_open(struct file *file)
deinterlace_prepare_format(&ctx->dst_fmt);
v4l2_fh_init(&ctx->fh, video_devdata(file));
- file->private_data = &ctx->fh;
ctx->dev = dev;
ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx,
@@ -740,7 +739,7 @@ static int deinterlace_open(struct file *file)
goto err_free;
}
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
mutex_unlock(&dev->dev_mutex);
@@ -756,12 +755,11 @@ err_free:
static int deinterlace_release(struct file *file)
{
struct deinterlace_dev *dev = video_drvdata(file);
- struct deinterlace_ctx *ctx = container_of(file->private_data,
- struct deinterlace_ctx, fh);
+ struct deinterlace_ctx *ctx = deinterlace_file2ctx(file);
mutex_lock(&dev->dev_mutex);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
diff --git a/drivers/media/platform/sunxi/sun8i-rotate/sun8i_rotate.c b/drivers/media/platform/sunxi/sun8i-rotate/sun8i_rotate.c
index abd10b218aa1..89992feaab60 100644
--- a/drivers/media/platform/sunxi/sun8i-rotate/sun8i_rotate.c
+++ b/drivers/media/platform/sunxi/sun8i-rotate/sun8i_rotate.c
@@ -170,7 +170,7 @@ static irqreturn_t rotate_irq(int irq, void *data)
static inline struct rotate_ctx *rotate_file2ctx(struct file *file)
{
- return container_of(file->private_data, struct rotate_ctx, fh);
+ return container_of(file_to_v4l2_fh(file), struct rotate_ctx, fh);
}
static void rotate_prepare_format(struct v4l2_pix_format *pix_fmt)
@@ -659,7 +659,6 @@ static int rotate_open(struct file *file)
rotate_set_cap_format(ctx, &ctx->dst_fmt, ctx->rotate);
v4l2_fh_init(&ctx->fh, video_devdata(file));
- file->private_data = &ctx->fh;
ctx->dev = dev;
ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx,
@@ -669,7 +668,7 @@ static int rotate_open(struct file *file)
goto err_free;
}
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
ret = rotate_setup_ctrls(ctx);
if (ret)
@@ -691,13 +690,12 @@ err_free:
static int rotate_release(struct file *file)
{
struct rotate_dev *dev = video_drvdata(file);
- struct rotate_ctx *ctx = container_of(file->private_data,
- struct rotate_ctx, fh);
+ struct rotate_ctx *ctx = rotate_file2ctx(file);
mutex_lock(&dev->dev_mutex);
v4l2_ctrl_handler_free(&ctx->ctrl_handler);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
diff --git a/drivers/media/platform/synopsys/hdmirx/snps_hdmirx.c b/drivers/media/platform/synopsys/hdmirx/snps_hdmirx.c
index 7af6765532e3..b7d278b3889f 100644
--- a/drivers/media/platform/synopsys/hdmirx/snps_hdmirx.c
+++ b/drivers/media/platform/synopsys/hdmirx/snps_hdmirx.c
@@ -459,7 +459,7 @@ static bool port_no_link(struct snps_hdmirx_dev *hdmirx_dev)
return !tx_5v_power_present(hdmirx_dev);
}
-static int hdmirx_query_dv_timings(struct file *file, void *_fh,
+static int hdmirx_query_dv_timings(struct file *file, void *priv,
struct v4l2_dv_timings *timings)
{
struct hdmirx_stream *stream = video_drvdata(file);
@@ -751,7 +751,7 @@ static int hdmirx_dv_timings_cap(struct file *file, void *fh,
return 0;
}
-static int hdmirx_enum_dv_timings(struct file *file, void *_fh,
+static int hdmirx_enum_dv_timings(struct file *file, void *priv,
struct v4l2_enum_dv_timings *timings)
{
return v4l2_enum_dv_timings_cap(timings, &hdmirx_timings_cap, NULL, NULL);
@@ -1323,7 +1323,7 @@ static int hdmirx_g_fmt_vid_cap_mplane(struct file *file, void *fh,
return 0;
}
-static int hdmirx_g_dv_timings(struct file *file, void *_fh,
+static int hdmirx_g_dv_timings(struct file *file, void *priv,
struct v4l2_dv_timings *timings)
{
struct hdmirx_stream *stream = video_drvdata(file);
@@ -1339,7 +1339,7 @@ static int hdmirx_g_dv_timings(struct file *file, void *_fh,
return 0;
}
-static int hdmirx_s_dv_timings(struct file *file, void *_fh,
+static int hdmirx_s_dv_timings(struct file *file, void *priv,
struct v4l2_dv_timings *timings)
{
struct hdmirx_stream *stream = video_drvdata(file);
diff --git a/drivers/media/platform/synopsys/hdmirx/snps_hdmirx.h b/drivers/media/platform/synopsys/hdmirx/snps_hdmirx.h
index 220ab99ca611..b13f58e31944 100644
--- a/drivers/media/platform/synopsys/hdmirx/snps_hdmirx.h
+++ b/drivers/media/platform/synopsys/hdmirx/snps_hdmirx.h
@@ -8,10 +8,12 @@
#ifndef DW_HDMIRX_H
#define DW_HDMIRX_H
+#include <linux/bitfield.h>
#include <linux/bitops.h>
+#include <linux/hw_bitfield.h>
-#define UPDATE(x, h, l) (((x) << (l)) & GENMASK((h), (l)))
-#define HIWORD_UPDATE(v, h, l) (((v) << (l)) | (GENMASK((h), (l)) << 16))
+#define UPDATE(x, h, l) FIELD_PREP(GENMASK((h), (l)), (x))
+#define HIWORD_UPDATE(v, h, l) FIELD_PREP_WM16(GENMASK((h), (l)), (v))
/* SYS_GRF */
#define SYS_GRF_SOC_CON1 0x0304
diff --git a/drivers/media/platform/ti/Kconfig b/drivers/media/platform/ti/Kconfig
index bab998c4179a..3bc4aa35887e 100644
--- a/drivers/media/platform/ti/Kconfig
+++ b/drivers/media/platform/ti/Kconfig
@@ -67,7 +67,8 @@ config VIDEO_TI_J721E_CSI2RX
tristate "TI J721E CSI2RX wrapper layer driver"
depends on VIDEO_DEV && VIDEO_V4L2_SUBDEV_API
depends on MEDIA_SUPPORT && MEDIA_CONTROLLER
- depends on (PHY_CADENCE_DPHY_RX && VIDEO_CADENCE_CSI2RX) || COMPILE_TEST
+ depends on VIDEO_CADENCE_CSI2RX
+ depends on PHY_CADENCE_DPHY_RX || COMPILE_TEST
depends on ARCH_K3 || COMPILE_TEST
select VIDEOBUF2_DMA_CONTIG
select V4L2_FWNODE
diff --git a/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c b/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
index b628d6e081db..b75aa363d1bf 100644
--- a/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
+++ b/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
@@ -13,7 +13,9 @@
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <media/cadence/cdns-csi2rx.h>
#include <media/mipi-csi2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
@@ -28,6 +30,7 @@
#define SHIM_DMACNTX 0x20
#define SHIM_DMACNTX_EN BIT(31)
#define SHIM_DMACNTX_YUV422 GENMASK(27, 26)
+#define SHIM_DMACNTX_DUAL_PCK_CFG BIT(24)
#define SHIM_DMACNTX_SIZE GENMASK(21, 20)
#define SHIM_DMACNTX_FMT GENMASK(5, 0)
#define SHIM_DMACNTX_YUV422_MODE_11 3
@@ -39,6 +42,7 @@
#define SHIM_PSI_CFG0_SRC_TAG GENMASK(15, 0)
#define SHIM_PSI_CFG0_DST_TAG GENMASK(31, 16)
+#define TI_CSI2RX_MAX_PIX_PER_CLK 4
#define PSIL_WORD_SIZE_BYTES 16
/*
* There are no hard limits on the width or height. The DMA engine can handle
@@ -52,6 +56,8 @@
#define DRAIN_TIMEOUT_MS 50
#define DRAIN_BUFFER_SIZE SZ_32K
+#define CSI2RX_BRIDGE_SOURCE_PAD 1
+
struct ti_csi2rx_fmt {
u32 fourcc; /* Four character code. */
u32 code; /* Mbus code. */
@@ -107,6 +113,7 @@ struct ti_csi2rx_dev {
struct v4l2_format v_fmt;
struct ti_csi2rx_dma dma;
u32 sequence;
+ u8 pix_per_clk;
};
static const struct ti_csi2rx_fmt ti_csi2rx_formats[] = {
@@ -299,7 +306,7 @@ static int ti_csi2rx_enum_fmt_vid_cap(struct file *file, void *priv,
return 0;
}
-static int ti_csi2rx_g_fmt_vid_cap(struct file *file, void *prov,
+static int ti_csi2rx_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct ti_csi2rx_dev *csi = video_drvdata(file);
@@ -426,8 +433,9 @@ static int csi_async_notifier_complete(struct v4l2_async_notifier *notifier)
if (ret)
return ret;
- ret = v4l2_create_fwnode_links_to_pad(csi->source, &csi->pad,
- MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
+ ret = media_create_pad_link(&csi->source->entity, CSI2RX_BRIDGE_SOURCE_PAD,
+ &vdev->entity, csi->pad.index,
+ MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
if (ret) {
video_unregister_device(vdev);
@@ -450,25 +458,23 @@ static int ti_csi2rx_notifier_register(struct ti_csi2rx_dev *csi)
{
struct fwnode_handle *fwnode;
struct v4l2_async_connection *asc;
- struct device_node *node;
int ret;
- node = of_get_child_by_name(csi->dev->of_node, "csi-bridge");
- if (!node)
+ fwnode = fwnode_get_named_child_node(csi->dev->fwnode, "csi-bridge");
+ if (!fwnode)
return -EINVAL;
- fwnode = of_fwnode_handle(node);
- if (!fwnode) {
- of_node_put(node);
- return -EINVAL;
- }
-
v4l2_async_nf_init(&csi->notifier, &csi->v4l2_dev);
csi->notifier.ops = &csi_async_notifier_ops;
asc = v4l2_async_nf_add_fwnode(&csi->notifier, fwnode,
struct v4l2_async_connection);
- of_node_put(node);
+ /*
+ * Calling v4l2_async_nf_add_fwnode grabs a refcount,
+ * so drop the one we got in fwnode_get_named_child_node
+ */
+ fwnode_handle_put(fwnode);
+
if (IS_ERR(asc)) {
v4l2_async_nf_cleanup(&csi->notifier);
return PTR_ERR(asc);
@@ -483,6 +489,26 @@ static int ti_csi2rx_notifier_register(struct ti_csi2rx_dev *csi)
return 0;
}
+/* Request maximum possible pixels per clock from the bridge */
+static void ti_csi2rx_request_max_ppc(struct ti_csi2rx_dev *csi)
+{
+ u8 ppc = TI_CSI2RX_MAX_PIX_PER_CLK;
+ struct media_pad *pad;
+ int ret;
+
+ pad = media_entity_remote_source_pad_unique(&csi->vdev.entity);
+ if (IS_ERR(pad))
+ return;
+
+ ret = cdns_csi2rx_negotiate_ppc(csi->source, pad->index, &ppc);
+ if (ret) {
+ dev_warn(csi->dev, "NUM_PIXELS negotiation failed: %d\n", ret);
+ csi->pix_per_clk = 1;
+ } else {
+ csi->pix_per_clk = ppc;
+ }
+}
+
static void ti_csi2rx_setup_shim(struct ti_csi2rx_dev *csi)
{
const struct ti_csi2rx_fmt *fmt;
@@ -494,6 +520,9 @@ static void ti_csi2rx_setup_shim(struct ti_csi2rx_dev *csi)
reg = SHIM_CNTL_PIX_RST;
writel(reg, csi->shim + SHIM_CNTL);
+ /* Negotiate pixel count from the source */
+ ti_csi2rx_request_max_ppc(csi);
+
reg = SHIM_DMACNTX_EN;
reg |= FIELD_PREP(SHIM_DMACNTX_FMT, fmt->csi_dt);
@@ -522,14 +551,18 @@ static void ti_csi2rx_setup_shim(struct ti_csi2rx_dev *csi)
case V4L2_PIX_FMT_YVYU:
reg |= FIELD_PREP(SHIM_DMACNTX_YUV422,
SHIM_DMACNTX_YUV422_MODE_11);
+ /* Multiple pixels are handled differently for packed YUV */
+ if (csi->pix_per_clk == 2)
+ reg |= SHIM_DMACNTX_DUAL_PCK_CFG;
+ reg |= FIELD_PREP(SHIM_DMACNTX_SIZE, fmt->size);
break;
default:
- /* Ignore if not YUV 4:2:2 */
+ /* By default we change the shift size for multiple pixels */
+ reg |= FIELD_PREP(SHIM_DMACNTX_SIZE,
+ fmt->size + (csi->pix_per_clk >> 1));
break;
}
- reg |= FIELD_PREP(SHIM_DMACNTX_SIZE, fmt->size);
-
writel(reg, csi->shim + SHIM_DMACNTX);
reg = FIELD_PREP(SHIM_PSI_CFG0_SRC_TAG, 0) |
@@ -1120,7 +1153,7 @@ static int ti_csi2rx_probe(struct platform_device *pdev)
if (ret)
goto err_vb2q;
- ret = of_platform_populate(csi->dev->of_node, NULL, NULL, csi->dev);
+ ret = devm_of_platform_populate(csi->dev);
if (ret) {
dev_err(csi->dev, "Failed to create children: %d\n", ret);
goto err_subdev;
diff --git a/drivers/media/platform/ti/omap/omap_vout.c b/drivers/media/platform/ti/omap/omap_vout.c
index a87d5030ac35..22782e9f1f4e 100644
--- a/drivers/media/platform/ti/omap/omap_vout.c
+++ b/drivers/media/platform/ti/omap/omap_vout.c
@@ -1236,7 +1236,7 @@ static int vidioc_g_fbuf(struct file *file, void *fh,
return 0;
}
-static int vidioc_enum_output(struct file *file, void *priv_fh,
+static int vidioc_enum_output(struct file *file, void *priv,
struct v4l2_output *out)
{
if (out->index)
@@ -1246,13 +1246,13 @@ static int vidioc_enum_output(struct file *file, void *priv_fh,
return 0;
}
-static int vidioc_g_output(struct file *file, void *priv_fh, unsigned int *i)
+static int vidioc_g_output(struct file *file, void *priv, unsigned int *i)
{
*i = 0;
return 0;
}
-static int vidioc_s_output(struct file *file, void *priv_fh, unsigned int i)
+static int vidioc_s_output(struct file *file, void *priv, unsigned int i)
{
return i ? -EINVAL : 0;
}
diff --git a/drivers/media/platform/ti/omap3isp/ispccdc.c b/drivers/media/platform/ti/omap3isp/ispccdc.c
index 7d0c723dcd11..55ee14e8b449 100644
--- a/drivers/media/platform/ti/omap3isp/ispccdc.c
+++ b/drivers/media/platform/ti/omap3isp/ispccdc.c
@@ -1873,12 +1873,6 @@ static int ccdc_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
return v4l2_event_subscribe(fh, sub, OMAP3ISP_CCDC_NEVENTS, NULL);
}
-static int ccdc_unsubscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
- struct v4l2_event_subscription *sub)
-{
- return v4l2_event_unsubscribe(fh, sub);
-}
-
/*
* ccdc_set_stream - Enable/Disable streaming on the CCDC module
* @sd: ISP CCDC V4L2 subdevice
@@ -2487,7 +2481,7 @@ static int ccdc_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
static const struct v4l2_subdev_core_ops ccdc_v4l2_core_ops = {
.ioctl = ccdc_ioctl,
.subscribe_event = ccdc_subscribe_event,
- .unsubscribe_event = ccdc_unsubscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
};
/* V4L2 subdev video operations */
diff --git a/drivers/media/platform/ti/omap3isp/isph3a_aewb.c b/drivers/media/platform/ti/omap3isp/isph3a_aewb.c
index e6c54c4bbfca..ae93da9c4542 100644
--- a/drivers/media/platform/ti/omap3isp/isph3a_aewb.c
+++ b/drivers/media/platform/ti/omap3isp/isph3a_aewb.c
@@ -269,7 +269,7 @@ static const struct ispstat_ops h3a_aewb_ops = {
static const struct v4l2_subdev_core_ops h3a_aewb_subdev_core_ops = {
.ioctl = h3a_aewb_ioctl,
.subscribe_event = omap3isp_stat_subscribe_event,
- .unsubscribe_event = omap3isp_stat_unsubscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
};
static const struct v4l2_subdev_video_ops h3a_aewb_subdev_video_ops = {
diff --git a/drivers/media/platform/ti/omap3isp/isph3a_af.c b/drivers/media/platform/ti/omap3isp/isph3a_af.c
index de7b116d0122..ca478da4ad34 100644
--- a/drivers/media/platform/ti/omap3isp/isph3a_af.c
+++ b/drivers/media/platform/ti/omap3isp/isph3a_af.c
@@ -334,7 +334,7 @@ static const struct ispstat_ops h3a_af_ops = {
static const struct v4l2_subdev_core_ops h3a_af_subdev_core_ops = {
.ioctl = h3a_af_ioctl,
.subscribe_event = omap3isp_stat_subscribe_event,
- .unsubscribe_event = omap3isp_stat_unsubscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
};
static const struct v4l2_subdev_video_ops h3a_af_subdev_video_ops = {
diff --git a/drivers/media/platform/ti/omap3isp/isphist.c b/drivers/media/platform/ti/omap3isp/isphist.c
index 0ef78aace6da..7851ad13d84f 100644
--- a/drivers/media/platform/ti/omap3isp/isphist.c
+++ b/drivers/media/platform/ti/omap3isp/isphist.c
@@ -456,7 +456,7 @@ static const struct ispstat_ops hist_ops = {
static const struct v4l2_subdev_core_ops hist_subdev_core_ops = {
.ioctl = hist_ioctl,
.subscribe_event = omap3isp_stat_subscribe_event,
- .unsubscribe_event = omap3isp_stat_unsubscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
};
static const struct v4l2_subdev_video_ops hist_subdev_video_ops = {
diff --git a/drivers/media/platform/ti/omap3isp/ispstat.c b/drivers/media/platform/ti/omap3isp/ispstat.c
index d3da68408ecb..07bd62a93d99 100644
--- a/drivers/media/platform/ti/omap3isp/ispstat.c
+++ b/drivers/media/platform/ti/omap3isp/ispstat.c
@@ -1010,13 +1010,6 @@ int omap3isp_stat_subscribe_event(struct v4l2_subdev *subdev,
return v4l2_event_subscribe(fh, sub, STAT_NEVENTS, NULL);
}
-int omap3isp_stat_unsubscribe_event(struct v4l2_subdev *subdev,
- struct v4l2_fh *fh,
- struct v4l2_event_subscription *sub)
-{
- return v4l2_event_unsubscribe(fh, sub);
-}
-
void omap3isp_stat_unregister_entities(struct ispstat *stat)
{
v4l2_device_unregister_subdev(&stat->subdev);
diff --git a/drivers/media/platform/ti/omap3isp/ispstat.h b/drivers/media/platform/ti/omap3isp/ispstat.h
index b548e617cf62..59842c4a9c33 100644
--- a/drivers/media/platform/ti/omap3isp/ispstat.h
+++ b/drivers/media/platform/ti/omap3isp/ispstat.h
@@ -135,9 +135,6 @@ void omap3isp_stat_cleanup(struct ispstat *stat);
int omap3isp_stat_subscribe_event(struct v4l2_subdev *subdev,
struct v4l2_fh *fh,
struct v4l2_event_subscription *sub);
-int omap3isp_stat_unsubscribe_event(struct v4l2_subdev *subdev,
- struct v4l2_fh *fh,
- struct v4l2_event_subscription *sub);
int omap3isp_stat_s_stream(struct v4l2_subdev *subdev, int enable);
int omap3isp_stat_busy(struct ispstat *stat);
diff --git a/drivers/media/platform/ti/omap3isp/ispvideo.c b/drivers/media/platform/ti/omap3isp/ispvideo.c
index 78e30298c7ad..0e7f0bf2b346 100644
--- a/drivers/media/platform/ti/omap3isp/ispvideo.c
+++ b/drivers/media/platform/ti/omap3isp/ispvideo.c
@@ -657,7 +657,7 @@ isp_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
static int
isp_video_get_format(struct file *file, void *fh, struct v4l2_format *format)
{
- struct isp_video_fh *vfh = to_isp_video_fh(fh);
+ struct isp_video_fh *vfh = file_to_isp_video_fh(file);
struct isp_video *video = video_drvdata(file);
if (format->type != video->type)
@@ -673,7 +673,7 @@ isp_video_get_format(struct file *file, void *fh, struct v4l2_format *format)
static int
isp_video_set_format(struct file *file, void *fh, struct v4l2_format *format)
{
- struct isp_video_fh *vfh = to_isp_video_fh(fh);
+ struct isp_video_fh *vfh = file_to_isp_video_fh(file);
struct isp_video *video = video_drvdata(file);
struct v4l2_mbus_framefmt fmt;
@@ -858,7 +858,7 @@ isp_video_set_selection(struct file *file, void *fh, struct v4l2_selection *sel)
static int
isp_video_get_param(struct file *file, void *fh, struct v4l2_streamparm *a)
{
- struct isp_video_fh *vfh = to_isp_video_fh(fh);
+ struct isp_video_fh *vfh = file_to_isp_video_fh(file);
struct isp_video *video = video_drvdata(file);
if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
@@ -876,7 +876,7 @@ isp_video_get_param(struct file *file, void *fh, struct v4l2_streamparm *a)
static int
isp_video_set_param(struct file *file, void *fh, struct v4l2_streamparm *a)
{
- struct isp_video_fh *vfh = to_isp_video_fh(fh);
+ struct isp_video_fh *vfh = file_to_isp_video_fh(file);
struct isp_video *video = video_drvdata(file);
if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
@@ -894,7 +894,7 @@ isp_video_set_param(struct file *file, void *fh, struct v4l2_streamparm *a)
static int
isp_video_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *rb)
{
- struct isp_video_fh *vfh = to_isp_video_fh(fh);
+ struct isp_video_fh *vfh = file_to_isp_video_fh(file);
struct isp_video *video = video_drvdata(file);
int ret;
@@ -908,7 +908,7 @@ isp_video_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *rb)
static int
isp_video_querybuf(struct file *file, void *fh, struct v4l2_buffer *b)
{
- struct isp_video_fh *vfh = to_isp_video_fh(fh);
+ struct isp_video_fh *vfh = file_to_isp_video_fh(file);
struct isp_video *video = video_drvdata(file);
int ret;
@@ -922,7 +922,7 @@ isp_video_querybuf(struct file *file, void *fh, struct v4l2_buffer *b)
static int
isp_video_qbuf(struct file *file, void *fh, struct v4l2_buffer *b)
{
- struct isp_video_fh *vfh = to_isp_video_fh(fh);
+ struct isp_video_fh *vfh = file_to_isp_video_fh(file);
struct isp_video *video = video_drvdata(file);
int ret;
@@ -936,7 +936,7 @@ isp_video_qbuf(struct file *file, void *fh, struct v4l2_buffer *b)
static int
isp_video_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b)
{
- struct isp_video_fh *vfh = to_isp_video_fh(fh);
+ struct isp_video_fh *vfh = file_to_isp_video_fh(file);
struct isp_video *video = video_drvdata(file);
int ret;
@@ -1074,7 +1074,7 @@ static int isp_video_check_external_subdevs(struct isp_video *video,
static int
isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
{
- struct isp_video_fh *vfh = to_isp_video_fh(fh);
+ struct isp_video_fh *vfh = file_to_isp_video_fh(file);
struct isp_video *video = video_drvdata(file);
enum isp_pipeline_state state;
struct isp_pipeline *pipe;
@@ -1180,7 +1180,7 @@ err_enum_init:
static int
isp_video_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
{
- struct isp_video_fh *vfh = to_isp_video_fh(fh);
+ struct isp_video_fh *vfh = file_to_isp_video_fh(file);
struct isp_video *video = video_drvdata(file);
struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
enum isp_pipeline_state state;
@@ -1297,7 +1297,7 @@ static int isp_video_open(struct file *file)
return -ENOMEM;
v4l2_fh_init(&handle->vfh, &video->video);
- v4l2_fh_add(&handle->vfh);
+ v4l2_fh_add(&handle->vfh, file);
/* If this is the first user, initialise the pipeline. */
if (omap3isp_get(video->isp) == NULL) {
@@ -1333,11 +1333,10 @@ static int isp_video_open(struct file *file)
handle->timeperframe.denominator = 1;
handle->video = video;
- file->private_data = &handle->vfh;
done:
if (ret < 0) {
- v4l2_fh_del(&handle->vfh);
+ v4l2_fh_del(&handle->vfh, file);
v4l2_fh_exit(&handle->vfh);
kfree(handle);
}
@@ -1348,8 +1347,8 @@ done:
static int isp_video_release(struct file *file)
{
struct isp_video *video = video_drvdata(file);
- struct v4l2_fh *vfh = file->private_data;
- struct isp_video_fh *handle = to_isp_video_fh(vfh);
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
+ struct isp_video_fh *handle = file_to_isp_video_fh(file);
/* Disable streaming and free the buffers queue resources. */
isp_video_streamoff(file, vfh, video->type);
@@ -1361,10 +1360,9 @@ static int isp_video_release(struct file *file)
v4l2_pipeline_pm_put(&video->video.entity);
/* Release the file handle. */
- v4l2_fh_del(vfh);
+ v4l2_fh_del(vfh, file);
v4l2_fh_exit(vfh);
kfree(handle);
- file->private_data = NULL;
omap3isp_put(video->isp);
@@ -1373,7 +1371,7 @@ static int isp_video_release(struct file *file)
static __poll_t isp_video_poll(struct file *file, poll_table *wait)
{
- struct isp_video_fh *vfh = to_isp_video_fh(file->private_data);
+ struct isp_video_fh *vfh = file_to_isp_video_fh(file);
struct isp_video *video = video_drvdata(file);
__poll_t ret;
@@ -1386,7 +1384,7 @@ static __poll_t isp_video_poll(struct file *file, poll_table *wait)
static int isp_video_mmap(struct file *file, struct vm_area_struct *vma)
{
- struct isp_video_fh *vfh = to_isp_video_fh(file->private_data);
+ struct isp_video_fh *vfh = file_to_isp_video_fh(file);
return vb2_mmap(&vfh->queue, vma);
}
diff --git a/drivers/media/platform/ti/omap3isp/ispvideo.h b/drivers/media/platform/ti/omap3isp/ispvideo.h
index 1d23df576e6b..537da59cff62 100644
--- a/drivers/media/platform/ti/omap3isp/ispvideo.h
+++ b/drivers/media/platform/ti/omap3isp/ispvideo.h
@@ -194,7 +194,11 @@ struct isp_video_fh {
struct v4l2_fract timeperframe;
};
-#define to_isp_video_fh(fh) container_of(fh, struct isp_video_fh, vfh)
+static inline struct isp_video_fh *file_to_isp_video_fh(struct file *filp)
+{
+ return container_of(file_to_v4l2_fh(filp), struct isp_video_fh, vfh);
+}
+
#define isp_video_queue_to_isp_video_fh(q) \
container_of(q, struct isp_video_fh, queue)
diff --git a/drivers/media/platform/ti/vpe/vpe.c b/drivers/media/platform/ti/vpe/vpe.c
index 636d76ecebcd..6029d4e8e0bd 100644
--- a/drivers/media/platform/ti/vpe/vpe.c
+++ b/drivers/media/platform/ti/vpe/vpe.c
@@ -422,6 +422,10 @@ struct vpe_ctx {
unsigned int src_mv_buf_selector;
};
+static inline struct vpe_ctx *to_vpe_ctx(struct file *filp)
+{
+ return container_of(file_to_v4l2_fh(filp), struct vpe_ctx, fh);
+}
/*
* M2M devices get 2 queues.
@@ -1562,7 +1566,7 @@ static int vpe_enum_fmt(struct file *file, void *priv,
static int vpe_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
- struct vpe_ctx *ctx = file->private_data;
+ struct vpe_ctx *ctx = to_vpe_ctx(file);
struct vb2_queue *vq;
struct vpe_q_data *q_data;
@@ -1719,7 +1723,7 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
static int vpe_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
- struct vpe_ctx *ctx = file->private_data;
+ struct vpe_ctx *ctx = to_vpe_ctx(file);
struct vpe_fmt *fmt = find_format(f);
if (V4L2_TYPE_IS_OUTPUT(f->type))
@@ -1783,7 +1787,7 @@ static int __vpe_s_fmt(struct vpe_ctx *ctx, struct v4l2_format *f)
static int vpe_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
int ret;
- struct vpe_ctx *ctx = file->private_data;
+ struct vpe_ctx *ctx = to_vpe_ctx(file);
ret = vpe_try_fmt(file, priv, f);
if (ret)
@@ -1871,7 +1875,7 @@ static int __vpe_try_selection(struct vpe_ctx *ctx, struct v4l2_selection *s)
static int vpe_g_selection(struct file *file, void *fh,
struct v4l2_selection *s)
{
- struct vpe_ctx *ctx = file->private_data;
+ struct vpe_ctx *ctx = to_vpe_ctx(file);
struct vpe_q_data *q_data;
struct v4l2_pix_format_mplane *pix;
bool use_c_rect = false;
@@ -1935,7 +1939,7 @@ static int vpe_g_selection(struct file *file, void *fh,
static int vpe_s_selection(struct file *file, void *fh,
struct v4l2_selection *s)
{
- struct vpe_ctx *ctx = file->private_data;
+ struct vpe_ctx *ctx = to_vpe_ctx(file);
struct vpe_q_data *q_data;
struct v4l2_selection sel = *s;
int ret;
@@ -2306,7 +2310,6 @@ static int vpe_open(struct file *file)
init_adb_hdrs(ctx);
v4l2_fh_init(&ctx->fh, video_devdata(file));
- file->private_data = ctx;
hdl = &ctx->hdl;
v4l2_ctrl_handler_init(hdl, 1);
@@ -2360,7 +2363,7 @@ static int vpe_open(struct file *file)
goto exit_fh;
}
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
/*
* for now, just report the creation of the first instance, we can later
@@ -2400,7 +2403,7 @@ free_ctx:
static int vpe_release(struct file *file)
{
struct vpe_dev *dev = video_drvdata(file);
- struct vpe_ctx *ctx = file->private_data;
+ struct vpe_ctx *ctx = to_vpe_ctx(file);
vpe_dbg(dev, "releasing instance %p\n", ctx);
@@ -2418,7 +2421,7 @@ static int vpe_release(struct file *file)
vpdma_free_desc_buf(&ctx->sc_coeff_v);
vpdma_free_desc_buf(&ctx->sc_coeff_h);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
v4l2_ctrl_handler_free(&ctx->hdl);
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
diff --git a/drivers/media/platform/verisilicon/hantro.h b/drivers/media/platform/verisilicon/hantro.h
index 81328c63b796..e0fdc4535b2d 100644
--- a/drivers/media/platform/verisilicon/hantro.h
+++ b/drivers/media/platform/verisilicon/hantro.h
@@ -382,9 +382,9 @@ extern int hantro_debug;
pr_err("%s:%d: " fmt, __func__, __LINE__, ##args)
/* Structure access helpers. */
-static __always_inline struct hantro_ctx *fh_to_ctx(struct v4l2_fh *fh)
+static __always_inline struct hantro_ctx *file_to_ctx(struct file *filp)
{
- return container_of(fh, struct hantro_ctx, fh);
+ return container_of(file_to_v4l2_fh(filp), struct hantro_ctx, fh);
}
/* Register accessors. */
diff --git a/drivers/media/platform/verisilicon/hantro_drv.c b/drivers/media/platform/verisilicon/hantro_drv.c
index 8542238e0fb1..e0c11fe8b55c 100644
--- a/drivers/media/platform/verisilicon/hantro_drv.c
+++ b/drivers/media/platform/verisilicon/hantro_drv.c
@@ -89,7 +89,6 @@ static void hantro_job_finish(struct hantro_dev *vpu,
struct hantro_ctx *ctx,
enum vb2_buffer_state result)
{
- pm_runtime_mark_last_busy(vpu->dev);
pm_runtime_put_autosuspend(vpu->dev);
clk_bulk_disable(vpu->variant->num_clocks, vpu->clocks);
@@ -663,8 +662,7 @@ static int hantro_open(struct file *filp)
}
v4l2_fh_init(&ctx->fh, vdev);
- filp->private_data = &ctx->fh;
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, filp);
hantro_reset_fmts(ctx);
@@ -678,7 +676,7 @@ static int hantro_open(struct file *filp)
return 0;
err_fh_free:
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, filp);
v4l2_fh_exit(&ctx->fh);
err_ctx_free:
kfree(ctx);
@@ -687,15 +685,14 @@ err_ctx_free:
static int hantro_release(struct file *filp)
{
- struct hantro_ctx *ctx =
- container_of(filp->private_data, struct hantro_ctx, fh);
+ struct hantro_ctx *ctx = file_to_ctx(filp);
/*
* No need for extra locking because this was the last reference
* to this file.
*/
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, filp);
v4l2_fh_exit(&ctx->fh);
v4l2_ctrl_handler_free(&ctx->ctrl_handler);
kfree(ctx);
@@ -918,6 +915,8 @@ static int hantro_add_func(struct hantro_dev *vpu, unsigned int funcid)
vpu->decoder = func;
v4l2_disable_ioctl(vfd, VIDIOC_TRY_ENCODER_CMD);
v4l2_disable_ioctl(vfd, VIDIOC_ENCODER_CMD);
+ v4l2_disable_ioctl(vfd, VIDIOC_G_SELECTION);
+ v4l2_disable_ioctl(vfd, VIDIOC_S_SELECTION);
}
video_set_drvdata(vfd, vpu);
diff --git a/drivers/media/platform/verisilicon/hantro_v4l2.c b/drivers/media/platform/verisilicon/hantro_v4l2.c
index 7c3515cf7d64..fcf3bd9bcda2 100644
--- a/drivers/media/platform/verisilicon/hantro_v4l2.c
+++ b/drivers/media/platform/verisilicon/hantro_v4l2.c
@@ -185,7 +185,7 @@ static int vidioc_querycap(struct file *file, void *priv,
static int vidioc_enum_framesizes(struct file *file, void *priv,
struct v4l2_frmsizeenum *fsize)
{
- struct hantro_ctx *ctx = fh_to_ctx(priv);
+ struct hantro_ctx *ctx = file_to_ctx(file);
const struct hantro_fmt *fmt;
fmt = hantro_find_format(ctx, fsize->pixel_format);
@@ -217,7 +217,7 @@ static int vidioc_enum_fmt(struct file *file, void *priv,
struct v4l2_fmtdesc *f, bool capture)
{
- struct hantro_ctx *ctx = fh_to_ctx(priv);
+ struct hantro_ctx *ctx = file_to_ctx(file);
const struct hantro_fmt *fmt, *formats;
unsigned int num_fmts, i, j = 0;
bool skip_mode_none, enum_all_formats;
@@ -297,7 +297,7 @@ static int vidioc_g_fmt_out_mplane(struct file *file, void *priv,
struct v4l2_format *f)
{
struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
- struct hantro_ctx *ctx = fh_to_ctx(priv);
+ struct hantro_ctx *ctx = file_to_ctx(file);
vpu_debug(4, "f->type = %d\n", f->type);
@@ -310,7 +310,7 @@ static int vidioc_g_fmt_cap_mplane(struct file *file, void *priv,
struct v4l2_format *f)
{
struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
- struct hantro_ctx *ctx = fh_to_ctx(priv);
+ struct hantro_ctx *ctx = file_to_ctx(file);
vpu_debug(4, "f->type = %d\n", f->type);
@@ -398,13 +398,13 @@ static int hantro_try_fmt(const struct hantro_ctx *ctx,
static int vidioc_try_fmt_cap_mplane(struct file *file, void *priv,
struct v4l2_format *f)
{
- return hantro_try_fmt(fh_to_ctx(priv), &f->fmt.pix_mp, f->type);
+ return hantro_try_fmt(file_to_ctx(file), &f->fmt.pix_mp, f->type);
}
static int vidioc_try_fmt_out_mplane(struct file *file, void *priv,
struct v4l2_format *f)
{
- return hantro_try_fmt(fh_to_ctx(priv), &f->fmt.pix_mp, f->type);
+ return hantro_try_fmt(file_to_ctx(file), &f->fmt.pix_mp, f->type);
}
static void
@@ -648,23 +648,22 @@ static int hantro_set_fmt_cap(struct hantro_ctx *ctx,
static int
vidioc_s_fmt_out_mplane(struct file *file, void *priv, struct v4l2_format *f)
{
- return hantro_set_fmt_out(fh_to_ctx(priv), &f->fmt.pix_mp, HANTRO_AUTO_POSTPROC);
+ return hantro_set_fmt_out(file_to_ctx(file), &f->fmt.pix_mp, HANTRO_AUTO_POSTPROC);
}
static int
vidioc_s_fmt_cap_mplane(struct file *file, void *priv, struct v4l2_format *f)
{
- return hantro_set_fmt_cap(fh_to_ctx(priv), &f->fmt.pix_mp);
+ return hantro_set_fmt_cap(file_to_ctx(file), &f->fmt.pix_mp);
}
static int vidioc_g_selection(struct file *file, void *priv,
struct v4l2_selection *sel)
{
- struct hantro_ctx *ctx = fh_to_ctx(priv);
+ struct hantro_ctx *ctx = file_to_ctx(file);
/* Crop only supported on source. */
- if (!ctx->is_encoder ||
- sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
return -EINVAL;
switch (sel->target) {
@@ -691,13 +690,12 @@ static int vidioc_g_selection(struct file *file, void *priv,
static int vidioc_s_selection(struct file *file, void *priv,
struct v4l2_selection *sel)
{
- struct hantro_ctx *ctx = fh_to_ctx(priv);
+ struct hantro_ctx *ctx = file_to_ctx(file);
struct v4l2_rect *rect = &sel->r;
struct vb2_queue *vq;
/* Crop only supported on source. */
- if (!ctx->is_encoder ||
- sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
return -EINVAL;
/* Change not allowed if the queue is streaming. */
@@ -738,7 +736,7 @@ static const struct v4l2_event hantro_eos_event = {
static int vidioc_encoder_cmd(struct file *file, void *priv,
struct v4l2_encoder_cmd *ec)
{
- struct hantro_ctx *ctx = fh_to_ctx(priv);
+ struct hantro_ctx *ctx = file_to_ctx(file);
int ret;
ret = v4l2_m2m_ioctl_try_encoder_cmd(file, priv, ec);
diff --git a/drivers/media/platform/verisilicon/imx8m_vpu_hw.c b/drivers/media/platform/verisilicon/imx8m_vpu_hw.c
index 35799da534ed..f9f276385c11 100644
--- a/drivers/media/platform/verisilicon/imx8m_vpu_hw.c
+++ b/drivers/media/platform/verisilicon/imx8m_vpu_hw.c
@@ -234,24 +234,6 @@ static const struct hantro_fmt imx8m_vpu_g2_dec_fmts[] = {
},
};
-static irqreturn_t imx8m_vpu_g1_irq(int irq, void *dev_id)
-{
- struct hantro_dev *vpu = dev_id;
- enum vb2_buffer_state state;
- u32 status;
-
- status = vdpu_read(vpu, G1_REG_INTERRUPT);
- state = (status & G1_REG_INTERRUPT_DEC_RDY_INT) ?
- VB2_BUF_STATE_DONE : VB2_BUF_STATE_ERROR;
-
- vdpu_write(vpu, 0, G1_REG_INTERRUPT);
- vdpu_write(vpu, G1_REG_CONFIG_DEC_CLK_GATE_E, G1_REG_CONFIG);
-
- hantro_irq_done(vpu, state);
-
- return IRQ_HANDLED;
-}
-
static int imx8mq_vpu_hw_init(struct hantro_dev *vpu)
{
vpu->ctrl_base = vpu->reg_bases[vpu->variant->num_regs - 1];
@@ -328,7 +310,7 @@ static const struct hantro_codec_ops imx8mq_vpu_g2_codec_ops[] = {
*/
static const struct hantro_irq imx8mq_irqs[] = {
- { "g1", imx8m_vpu_g1_irq },
+ { "g1", hantro_g1_irq },
};
static const struct hantro_irq imx8mq_g2_irqs[] = {
diff --git a/drivers/media/platform/xilinx/xilinx-dma.c b/drivers/media/platform/xilinx/xilinx-dma.c
index 18bfa6001909..fcfe0883aba5 100644
--- a/drivers/media/platform/xilinx/xilinx-dma.c
+++ b/drivers/media/platform/xilinx/xilinx-dma.c
@@ -469,7 +469,7 @@ static const struct vb2_ops xvip_dma_queue_qops = {
static int
xvip_dma_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
{
- struct v4l2_fh *vfh = file->private_data;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
cap->capabilities = dma->xdev->v4l2_caps | V4L2_CAP_STREAMING |
@@ -491,7 +491,7 @@ xvip_dma_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
static int
xvip_dma_enum_format(struct file *file, void *fh, struct v4l2_fmtdesc *f)
{
- struct v4l2_fh *vfh = file->private_data;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
if (f->index > 0)
@@ -505,7 +505,7 @@ xvip_dma_enum_format(struct file *file, void *fh, struct v4l2_fmtdesc *f)
static int
xvip_dma_get_format(struct file *file, void *fh, struct v4l2_format *format)
{
- struct v4l2_fh *vfh = file->private_data;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
format->fmt.pix = dma->format;
@@ -565,7 +565,7 @@ __xvip_dma_try_format(struct xvip_dma *dma, struct v4l2_pix_format *pix,
static int
xvip_dma_try_format(struct file *file, void *fh, struct v4l2_format *format)
{
- struct v4l2_fh *vfh = file->private_data;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
__xvip_dma_try_format(dma, &format->fmt.pix, NULL);
@@ -575,7 +575,7 @@ xvip_dma_try_format(struct file *file, void *fh, struct v4l2_format *format)
static int
xvip_dma_set_format(struct file *file, void *fh, struct v4l2_format *format)
{
- struct v4l2_fh *vfh = file->private_data;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
const struct xvip_video_format *info;
diff --git a/drivers/media/radio/Kconfig b/drivers/media/radio/Kconfig
index 72776d08046a..bbbdd054ba64 100644
--- a/drivers/media/radio/Kconfig
+++ b/drivers/media/radio/Kconfig
@@ -141,23 +141,6 @@ config RADIO_TIMBERDALE
found behind the Timberdale FPGA on the Russellville board.
Enabling this driver will automatically select the DSP and tuner.
-config RADIO_WL1273
- tristate "Texas Instruments WL1273 I2C FM Radio"
- depends on I2C
- select MFD_CORE
- select MFD_WL1273_CORE
- select FW_LOADER
- help
- Choose Y here if you have this FM radio chip.
-
- In order to control your radio card, you will need to use programs
- that are compatible with the Video For Linux 2 API. Information on
- this API and pointers to "v4l2" programs may be found at
- <file:Documentation/userspace-api/media/index.rst>.
-
- To compile this driver as a module, choose M here: the
- module will be called radio-wl1273.
-
config USB_DSBR
tristate "D-Link/GemTek USB FM radio support"
depends on USB
diff --git a/drivers/media/radio/Makefile b/drivers/media/radio/Makefile
index 1ff46f3a6ed3..f54693ebdb30 100644
--- a/drivers/media/radio/Makefile
+++ b/drivers/media/radio/Makefile
@@ -30,7 +30,6 @@ obj-$(CONFIG_RADIO_TERRATEC) += radio-terratec.o
obj-$(CONFIG_RADIO_TIMBERDALE) += radio-timb.o
obj-$(CONFIG_RADIO_TRUST) += radio-trust.o
obj-$(CONFIG_RADIO_TYPHOON) += radio-typhoon.o
-obj-$(CONFIG_RADIO_WL1273) += radio-wl1273.o
obj-$(CONFIG_RADIO_ZOLTRIX) += radio-zoltrix.o
obj-$(CONFIG_USB_DSBR) += dsbr100.o
diff --git a/drivers/media/radio/radio-aimslab.c b/drivers/media/radio/radio-aimslab.c
index 3c8c17d64821..2c1d413e8636 100644
--- a/drivers/media/radio/radio-aimslab.c
+++ b/drivers/media/radio/radio-aimslab.c
@@ -4,7 +4,7 @@
*
* Copyright 1997 M. Kirkwood
*
- * Converted to the radio-isa framework by Hans Verkuil <hansverk@cisco.com>
+ * Converted to the radio-isa framework by Hans Verkuil <hverkuil@kernel.org>
* Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@kernel.org>
* Converted to new API by Alan Cox <alan@lxorguk.ukuu.org.uk>
* Various bugfixes and enhancements by Russell Kroll <rkroll@exploits.org>
diff --git a/drivers/media/radio/radio-aztech.c b/drivers/media/radio/radio-aztech.c
index d989c0b3966f..0a4667bb7034 100644
--- a/drivers/media/radio/radio-aztech.c
+++ b/drivers/media/radio/radio-aztech.c
@@ -2,7 +2,7 @@
/*
* radio-aztech.c - Aztech radio card driver
*
- * Converted to the radio-isa framework by Hans Verkuil <hverkuil@xs4all.nl>
+ * Converted to the radio-isa framework by Hans Verkuil <hverkuil@kernel.org>
* Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@kernel.org>
* Adapted to support the Video for Linux API by
* Russell Kroll <rkroll@exploits.org>. Based on original tuner code by:
diff --git a/drivers/media/radio/radio-gemtek.c b/drivers/media/radio/radio-gemtek.c
index 5ca6274c45bd..a3265f1dd189 100644
--- a/drivers/media/radio/radio-gemtek.c
+++ b/drivers/media/radio/radio-gemtek.c
@@ -15,7 +15,7 @@
* Converted to new API by Alan Cox <alan@lxorguk.ukuu.org.uk>
* Various bugfixes and enhancements by Russell Kroll <rkroll@exploits.org>
*
- * Converted to the radio-isa framework by Hans Verkuil <hansverk@cisco.com>
+ * Converted to the radio-isa framework by Hans Verkuil <hverkuil@kernel.org>
* Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@kernel.org>
*
* Note: this card seems to swap the left and right audio channels!
diff --git a/drivers/media/radio/radio-isa.c b/drivers/media/radio/radio-isa.c
index 4f87c76a2a96..1a144536ffa7 100644
--- a/drivers/media/radio/radio-isa.c
+++ b/drivers/media/radio/radio-isa.c
@@ -4,7 +4,7 @@
* This takes care of all the V4L2 scaffolding, allowing the ISA drivers
* to concentrate on the actual hardware operation.
*
- * Copyright (C) 2012 Hans Verkuil <hansverk@cisco.com>
+ * Copyright (C) 2012 Hans Verkuil <hverkuil@kernel.org>
*/
#include <linux/module.h>
diff --git a/drivers/media/radio/radio-isa.h b/drivers/media/radio/radio-isa.h
index 0f3db473da5e..62ff5c3fb5d5 100644
--- a/drivers/media/radio/radio-isa.h
+++ b/drivers/media/radio/radio-isa.h
@@ -4,7 +4,7 @@
* This takes care of all the V4L2 scaffolding, allowing the ISA drivers
* to concentrate on the actual hardware operation.
*
- * Copyright (C) 2012 Hans Verkuil <hansverk@cisco.com>
+ * Copyright (C) 2012 Hans Verkuil <hverkuil@kernel.org>
*/
#ifndef _RADIO_ISA_H_
diff --git a/drivers/media/radio/radio-keene.c b/drivers/media/radio/radio-keene.c
index a35648316aa8..f3b57f0cb1ec 100644
--- a/drivers/media/radio/radio-keene.c
+++ b/drivers/media/radio/radio-keene.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * Copyright (c) 2012 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (c) 2012 Hans Verkuil <hverkuil@kernel.org>
*/
/* kernel includes */
@@ -18,7 +18,7 @@
#include <linux/mutex.h>
/* driver and module definitions */
-MODULE_AUTHOR("Hans Verkuil <hverkuil@xs4all.nl>");
+MODULE_AUTHOR("Hans Verkuil <hverkuil@kernel.org>");
MODULE_DESCRIPTION("Keene FM Transmitter driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/radio/radio-miropcm20.c b/drivers/media/radio/radio-miropcm20.c
index 27f058c5e677..67712ab3d564 100644
--- a/drivers/media/radio/radio-miropcm20.c
+++ b/drivers/media/radio/radio-miropcm20.c
@@ -23,7 +23,7 @@
* This code has been reintroduced and converted to use
* the new V4L2 RDS API by:
*
- * Hans Verkuil <hansverk@cisco.com>
+ * Hans Verkuil <hverkuil@kernel.org>
*/
#include <linux/module.h>
diff --git a/drivers/media/radio/radio-raremono.c b/drivers/media/radio/radio-raremono.c
index 64c7452c05b5..f60775b005e1 100644
--- a/drivers/media/radio/radio-raremono.c
+++ b/drivers/media/radio/radio-raremono.c
@@ -25,14 +25,14 @@
*
* The USB protocol has been reversed engineered using wireshark, initially
* by Dinesh Ram <dinesh.ram@cern.ch> and finished by Hans Verkuil
- * <hverkuil@xs4all.nl>.
+ * <hverkuil@kernel.org>.
*
* Sadly the firmware used in this product hides lots of goodies since the
* si4734 has more features than are supported by the firmware. Oh well...
*/
/* driver and module definitions */
-MODULE_AUTHOR("Hans Verkuil <hverkuil@xs4all.nl>");
+MODULE_AUTHOR("Hans Verkuil <hverkuil@kernel.org>");
MODULE_DESCRIPTION("Thanko's Raremono AM/FM/SW Receiver USB driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/radio/radio-rtrack2.c b/drivers/media/radio/radio-rtrack2.c
index 16b13a63bfed..efc02069bf9d 100644
--- a/drivers/media/radio/radio-rtrack2.c
+++ b/drivers/media/radio/radio-rtrack2.c
@@ -7,7 +7,7 @@
* Converted to new API by Alan Cox <alan@lxorguk.ukuu.org.uk>
* Various bugfixes and enhancements by Russell Kroll <rkroll@exploits.org>
*
- * Converted to the radio-isa framework by Hans Verkuil <hansverk@cisco.com>
+ * Converted to the radio-isa framework by Hans Verkuil <hverkuil@kernel.org>
* Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@kernel.org>
*
* Fully tested with actual hardware and the v4l2-compliance tool.
diff --git a/drivers/media/radio/radio-terratec.c b/drivers/media/radio/radio-terratec.c
index 720080634454..43817dd0a0fe 100644
--- a/drivers/media/radio/radio-terratec.c
+++ b/drivers/media/radio/radio-terratec.c
@@ -17,7 +17,7 @@
* Frequency control is done digitally -- ie out(port,encodefreq(95.8));
* Volume Control is done digitally
*
- * Converted to the radio-isa framework by Hans Verkuil <hansverk@cisco.com>
+ * Converted to the radio-isa framework by Hans Verkuil <hverkuil@kernel.org>
* Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@kernel.org>
*/
diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c
deleted file mode 100644
index f55217ccf2b8..000000000000
--- a/drivers/media/radio/radio-wl1273.c
+++ /dev/null
@@ -1,2159 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Driver for the Texas Instruments WL1273 FM radio.
- *
- * Copyright (C) 2011 Nokia Corporation
- * Author: Matti J. Aaltonen <matti.j.aaltonen@nokia.com>
- */
-
-#include <linux/delay.h>
-#include <linux/firmware.h>
-#include <linux/interrupt.h>
-#include <linux/mfd/wl1273-core.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <media/v4l2-common.h>
-#include <media/v4l2-ctrls.h>
-#include <media/v4l2-device.h>
-#include <media/v4l2-ioctl.h>
-
-#define DRIVER_DESC "Wl1273 FM Radio"
-
-#define WL1273_POWER_SET_OFF 0
-#define WL1273_POWER_SET_FM BIT(0)
-#define WL1273_POWER_SET_RDS BIT(1)
-#define WL1273_POWER_SET_RETENTION BIT(4)
-
-#define WL1273_PUPD_SET_OFF 0x00
-#define WL1273_PUPD_SET_ON 0x01
-#define WL1273_PUPD_SET_RETENTION 0x10
-
-#define WL1273_FREQ(x) (x * 10000 / 625)
-#define WL1273_INV_FREQ(x) (x * 625 / 10000)
-
-/*
- * static int radio_nr - The number of the radio device
- *
- * The default is 0.
- */
-static int radio_nr;
-module_param(radio_nr, int, 0);
-MODULE_PARM_DESC(radio_nr, "The number of the radio device. Default = 0");
-
-struct wl1273_device {
- char *bus_type;
-
- u8 forbidden;
- unsigned int preemphasis;
- unsigned int spacing;
- unsigned int tx_power;
- unsigned int rx_frequency;
- unsigned int tx_frequency;
- unsigned int rangelow;
- unsigned int rangehigh;
- unsigned int band;
- bool stereo;
-
- /* RDS */
- unsigned int rds_on;
-
- wait_queue_head_t read_queue;
- struct mutex lock; /* for serializing fm radio operations */
- struct completion busy;
-
- unsigned char *buffer;
- unsigned int buf_size;
- unsigned int rd_index;
- unsigned int wr_index;
-
- /* Selected interrupts */
- u16 irq_flags;
- u16 irq_received;
-
- struct v4l2_ctrl_handler ctrl_handler;
- struct v4l2_device v4l2dev;
- struct video_device videodev;
- struct device *dev;
- struct wl1273_core *core;
- struct file *owner;
- char *write_buf;
- unsigned int rds_users;
-};
-
-#define WL1273_IRQ_MASK (WL1273_FR_EVENT | \
- WL1273_POW_ENB_EVENT)
-
-/*
- * static unsigned int rds_buf - the number of RDS buffer blocks used.
- *
- * The default number is 100.
- */
-static unsigned int rds_buf = 100;
-module_param(rds_buf, uint, 0);
-MODULE_PARM_DESC(rds_buf, "Number of RDS buffer entries. Default = 100");
-
-static int wl1273_fm_write_fw(struct wl1273_core *core,
- __u8 *fw, int len)
-{
- struct i2c_client *client = core->client;
- struct i2c_msg msg;
- int i, r = 0;
-
- msg.addr = client->addr;
- msg.flags = 0;
-
- for (i = 0; i <= len; i++) {
- msg.len = fw[0];
- msg.buf = fw + 1;
-
- fw += msg.len + 1;
- dev_dbg(&client->dev, "%s:len[%d]: %d\n", __func__, i, msg.len);
-
- r = i2c_transfer(client->adapter, &msg, 1);
- if (r < 0 && i < len + 1)
- break;
- }
-
- dev_dbg(&client->dev, "%s: i: %d\n", __func__, i);
- dev_dbg(&client->dev, "%s: len + 1: %d\n", __func__, len + 1);
-
- /* Last transfer always fails. */
- if (i == len || r == 1)
- r = 0;
-
- return r;
-}
-
-#define WL1273_FIFO_HAS_DATA(status) (1 << 5 & status)
-#define WL1273_RDS_CORRECTABLE_ERROR (1 << 3)
-#define WL1273_RDS_UNCORRECTABLE_ERROR (1 << 4)
-
-static int wl1273_fm_rds(struct wl1273_device *radio)
-{
- struct wl1273_core *core = radio->core;
- struct i2c_client *client = core->client;
- u16 val;
- u8 b0 = WL1273_RDS_DATA_GET, status;
- struct v4l2_rds_data rds = { 0, 0, 0 };
- struct i2c_msg msg[] = {
- {
- .addr = client->addr,
- .flags = 0,
- .buf = &b0,
- .len = 1,
- },
- {
- .addr = client->addr,
- .flags = I2C_M_RD,
- .buf = (u8 *) &rds,
- .len = sizeof(rds),
- }
- };
- int r;
-
- if (core->mode != WL1273_MODE_RX)
- return 0;
-
- r = core->read(core, WL1273_RDS_SYNC_GET, &val);
- if (r)
- return r;
-
- if ((val & 0x01) == 0) {
- /* RDS decoder not synchronized */
- return -EAGAIN;
- }
-
- /* copy all four RDS blocks to internal buffer */
- do {
- r = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
- if (r != ARRAY_SIZE(msg)) {
- dev_err(radio->dev, WL1273_FM_DRIVER_NAME
- ": %s: read_rds error r == %i)\n",
- __func__, r);
- }
-
- status = rds.block;
-
- if (!WL1273_FIFO_HAS_DATA(status))
- break;
-
- /* copy bits 0-2 (the block ID) to bits 3-5 */
- rds.block = V4L2_RDS_BLOCK_MSK & status;
- rds.block |= rds.block << 3;
-
- /* copy the error bits to standard positions */
- if (WL1273_RDS_UNCORRECTABLE_ERROR & status) {
- rds.block |= V4L2_RDS_BLOCK_ERROR;
- rds.block &= ~V4L2_RDS_BLOCK_CORRECTED;
- } else if (WL1273_RDS_CORRECTABLE_ERROR & status) {
- rds.block &= ~V4L2_RDS_BLOCK_ERROR;
- rds.block |= V4L2_RDS_BLOCK_CORRECTED;
- }
-
- /* copy RDS block to internal buffer */
- memcpy(&radio->buffer[radio->wr_index], &rds, RDS_BLOCK_SIZE);
- radio->wr_index += 3;
-
- /* wrap write pointer */
- if (radio->wr_index >= radio->buf_size)
- radio->wr_index = 0;
-
- /* check for overflow & start over */
- if (radio->wr_index == radio->rd_index) {
- dev_dbg(radio->dev, "RDS OVERFLOW");
-
- radio->rd_index = 0;
- radio->wr_index = 0;
- break;
- }
- } while (WL1273_FIFO_HAS_DATA(status));
-
- /* wake up read queue */
- if (radio->wr_index != radio->rd_index)
- wake_up_interruptible(&radio->read_queue);
-
- return 0;
-}
-
-static irqreturn_t wl1273_fm_irq_thread_handler(int irq, void *dev_id)
-{
- struct wl1273_device *radio = dev_id;
- struct wl1273_core *core = radio->core;
- u16 flags;
- int r;
-
- r = core->read(core, WL1273_FLAG_GET, &flags);
- if (r)
- goto out;
-
- if (flags & WL1273_BL_EVENT) {
- radio->irq_received = flags;
- dev_dbg(radio->dev, "IRQ: BL\n");
- }
-
- if (flags & WL1273_RDS_EVENT) {
- msleep(200);
-
- wl1273_fm_rds(radio);
- }
-
- if (flags & WL1273_BBLK_EVENT)
- dev_dbg(radio->dev, "IRQ: BBLK\n");
-
- if (flags & WL1273_LSYNC_EVENT)
- dev_dbg(radio->dev, "IRQ: LSYNC\n");
-
- if (flags & WL1273_LEV_EVENT) {
- u16 level;
-
- r = core->read(core, WL1273_RSSI_LVL_GET, &level);
- if (r)
- goto out;
-
- if (level > 14)
- dev_dbg(radio->dev, "IRQ: LEV: 0x%x04\n", level);
- }
-
- if (flags & WL1273_IFFR_EVENT)
- dev_dbg(radio->dev, "IRQ: IFFR\n");
-
- if (flags & WL1273_PI_EVENT)
- dev_dbg(radio->dev, "IRQ: PI\n");
-
- if (flags & WL1273_PD_EVENT)
- dev_dbg(radio->dev, "IRQ: PD\n");
-
- if (flags & WL1273_STIC_EVENT)
- dev_dbg(radio->dev, "IRQ: STIC\n");
-
- if (flags & WL1273_MAL_EVENT)
- dev_dbg(radio->dev, "IRQ: MAL\n");
-
- if (flags & WL1273_POW_ENB_EVENT) {
- complete(&radio->busy);
- dev_dbg(radio->dev, "NOT BUSY\n");
- dev_dbg(radio->dev, "IRQ: POW_ENB\n");
- }
-
- if (flags & WL1273_SCAN_OVER_EVENT)
- dev_dbg(radio->dev, "IRQ: SCAN_OVER\n");
-
- if (flags & WL1273_ERROR_EVENT)
- dev_dbg(radio->dev, "IRQ: ERROR\n");
-
- if (flags & WL1273_FR_EVENT) {
- u16 freq;
-
- dev_dbg(radio->dev, "IRQ: FR:\n");
-
- if (core->mode == WL1273_MODE_RX) {
- r = core->write(core, WL1273_TUNER_MODE_SET,
- TUNER_MODE_STOP_SEARCH);
- if (r) {
- dev_err(radio->dev,
- "%s: TUNER_MODE_SET fails: %d\n",
- __func__, r);
- goto out;
- }
-
- r = core->read(core, WL1273_FREQ_SET, &freq);
- if (r)
- goto out;
-
- if (radio->band == WL1273_BAND_JAPAN)
- radio->rx_frequency = WL1273_BAND_JAPAN_LOW +
- freq * 50;
- else
- radio->rx_frequency = WL1273_BAND_OTHER_LOW +
- freq * 50;
- /*
- * The driver works better with this msleep,
- * the documentation doesn't mention it.
- */
- usleep_range(10000, 15000);
-
- dev_dbg(radio->dev, "%dkHz\n", radio->rx_frequency);
-
- } else {
- r = core->read(core, WL1273_CHANL_SET, &freq);
- if (r)
- goto out;
-
- dev_dbg(radio->dev, "%dkHz\n", freq);
- }
- dev_dbg(radio->dev, "%s: NOT BUSY\n", __func__);
- }
-
-out:
- core->write(core, WL1273_INT_MASK_SET, radio->irq_flags);
- complete(&radio->busy);
-
- return IRQ_HANDLED;
-}
-
-static int wl1273_fm_set_tx_freq(struct wl1273_device *radio, unsigned int freq)
-{
- struct wl1273_core *core = radio->core;
- int r = 0;
- unsigned long t;
-
- if (freq < WL1273_BAND_TX_LOW) {
- dev_err(radio->dev,
- "Frequency out of range: %d < %d\n", freq,
- WL1273_BAND_TX_LOW);
- return -ERANGE;
- }
-
- if (freq > WL1273_BAND_TX_HIGH) {
- dev_err(radio->dev,
- "Frequency out of range: %d > %d\n", freq,
- WL1273_BAND_TX_HIGH);
- return -ERANGE;
- }
-
- /*
- * The driver works better with this sleep,
- * the documentation doesn't mention it.
- */
- usleep_range(5000, 10000);
-
- dev_dbg(radio->dev, "%s: freq: %d kHz\n", __func__, freq);
-
- /* Set the current tx channel */
- r = core->write(core, WL1273_CHANL_SET, freq / 10);
- if (r)
- return r;
-
- reinit_completion(&radio->busy);
-
- /* wait for the FR IRQ */
- t = wait_for_completion_timeout(&radio->busy, msecs_to_jiffies(2000));
- if (!t)
- return -ETIMEDOUT;
-
- dev_dbg(radio->dev, "WL1273_CHANL_SET: %lu\n", t);
-
- /* Enable the output power */
- r = core->write(core, WL1273_POWER_ENB_SET, 1);
- if (r)
- return r;
-
- reinit_completion(&radio->busy);
-
- /* wait for the POWER_ENB IRQ */
- t = wait_for_completion_timeout(&radio->busy, msecs_to_jiffies(1000));
- if (!t)
- return -ETIMEDOUT;
-
- radio->tx_frequency = freq;
- dev_dbg(radio->dev, "WL1273_POWER_ENB_SET: %lu\n", t);
-
- return 0;
-}
-
-static int wl1273_fm_set_rx_freq(struct wl1273_device *radio, unsigned int freq)
-{
- struct wl1273_core *core = radio->core;
- int r, f;
- unsigned long t;
-
- if (freq < radio->rangelow) {
- dev_err(radio->dev,
- "Frequency out of range: %d < %d\n", freq,
- radio->rangelow);
- r = -ERANGE;
- goto err;
- }
-
- if (freq > radio->rangehigh) {
- dev_err(radio->dev,
- "Frequency out of range: %d > %d\n", freq,
- radio->rangehigh);
- r = -ERANGE;
- goto err;
- }
-
- dev_dbg(radio->dev, "%s: %dkHz\n", __func__, freq);
-
- core->write(core, WL1273_INT_MASK_SET, radio->irq_flags);
-
- if (radio->band == WL1273_BAND_JAPAN)
- f = (freq - WL1273_BAND_JAPAN_LOW) / 50;
- else
- f = (freq - WL1273_BAND_OTHER_LOW) / 50;
-
- r = core->write(core, WL1273_FREQ_SET, f);
- if (r) {
- dev_err(radio->dev, "FREQ_SET fails\n");
- goto err;
- }
-
- r = core->write(core, WL1273_TUNER_MODE_SET, TUNER_MODE_PRESET);
- if (r) {
- dev_err(radio->dev, "TUNER_MODE_SET fails\n");
- goto err;
- }
-
- reinit_completion(&radio->busy);
-
- t = wait_for_completion_timeout(&radio->busy, msecs_to_jiffies(2000));
- if (!t) {
- dev_err(radio->dev, "%s: TIMEOUT\n", __func__);
- return -ETIMEDOUT;
- }
-
- radio->rd_index = 0;
- radio->wr_index = 0;
- radio->rx_frequency = freq;
- return 0;
-err:
- return r;
-}
-
-static int wl1273_fm_get_freq(struct wl1273_device *radio)
-{
- struct wl1273_core *core = radio->core;
- unsigned int freq;
- u16 f;
- int r;
-
- if (core->mode == WL1273_MODE_RX) {
- r = core->read(core, WL1273_FREQ_SET, &f);
- if (r)
- return r;
-
- dev_dbg(radio->dev, "Freq get: 0x%04x\n", f);
- if (radio->band == WL1273_BAND_JAPAN)
- freq = WL1273_BAND_JAPAN_LOW + 50 * f;
- else
- freq = WL1273_BAND_OTHER_LOW + 50 * f;
- } else {
- r = core->read(core, WL1273_CHANL_SET, &f);
- if (r)
- return r;
-
- freq = f * 10;
- }
-
- return freq;
-}
-
-/**
- * wl1273_fm_upload_firmware_patch() - Upload the firmware.
- * @radio: A pointer to the device struct.
- *
- * The firmware file consists of arrays of bytes where the first byte
- * gives the array length. The first byte in the file gives the
- * number of these arrays.
- */
-static int wl1273_fm_upload_firmware_patch(struct wl1273_device *radio)
-{
- struct wl1273_core *core = radio->core;
- unsigned int packet_num;
- const struct firmware *fw_p;
- const char *fw_name = "radio-wl1273-fw.bin";
- struct device *dev = radio->dev;
- __u8 *ptr;
- int r;
-
- dev_dbg(dev, "%s:\n", __func__);
-
- /*
- * Uploading the firmware patch is not always necessary,
- * so we only print an info message.
- */
- if (request_firmware(&fw_p, fw_name, dev)) {
- dev_info(dev, "%s - %s not found\n", __func__, fw_name);
-
- return 0;
- }
-
- ptr = (__u8 *) fw_p->data;
- packet_num = ptr[0];
- dev_dbg(dev, "%s: packets: %d\n", __func__, packet_num);
-
- r = wl1273_fm_write_fw(core, ptr + 1, packet_num);
- if (r) {
- dev_err(dev, "FW upload error: %d\n", r);
- goto out;
- }
-
- /* ignore possible error here */
- core->write(core, WL1273_RESET, 0);
-
- dev_dbg(dev, "%s - download OK, r: %d\n", __func__, r);
-out:
- release_firmware(fw_p);
- return r;
-}
-
-static int wl1273_fm_stop(struct wl1273_device *radio)
-{
- struct wl1273_core *core = radio->core;
-
- if (core->mode == WL1273_MODE_RX) {
- int r = core->write(core, WL1273_POWER_SET,
- WL1273_POWER_SET_OFF);
- if (r)
- dev_err(radio->dev, "%s: POWER_SET fails: %d\n",
- __func__, r);
- } else if (core->mode == WL1273_MODE_TX) {
- int r = core->write(core, WL1273_PUPD_SET,
- WL1273_PUPD_SET_OFF);
- if (r)
- dev_err(radio->dev,
- "%s: PUPD_SET fails: %d\n", __func__, r);
- }
-
- if (core->pdata->disable) {
- core->pdata->disable();
- dev_dbg(radio->dev, "Back to reset\n");
- }
-
- return 0;
-}
-
-static int wl1273_fm_start(struct wl1273_device *radio, int new_mode)
-{
- struct wl1273_core *core = radio->core;
- struct wl1273_fm_platform_data *pdata = core->pdata;
- struct device *dev = radio->dev;
- int r = -EINVAL;
-
- if (pdata->enable && core->mode == WL1273_MODE_OFF) {
- dev_dbg(radio->dev, "Out of reset\n");
-
- pdata->enable();
- msleep(250);
- }
-
- if (new_mode == WL1273_MODE_RX) {
- u16 val = WL1273_POWER_SET_FM;
-
- if (radio->rds_on)
- val |= WL1273_POWER_SET_RDS;
-
- /* If this fails try again */
- r = core->write(core, WL1273_POWER_SET, val);
- if (r) {
- msleep(100);
-
- r = core->write(core, WL1273_POWER_SET, val);
- if (r) {
- dev_err(dev, "%s: POWER_SET fails\n", __func__);
- goto fail;
- }
- }
-
- /* rds buffer configuration */
- radio->wr_index = 0;
- radio->rd_index = 0;
-
- } else if (new_mode == WL1273_MODE_TX) {
- /* If this fails try again once */
- r = core->write(core, WL1273_PUPD_SET, WL1273_PUPD_SET_ON);
- if (r) {
- msleep(100);
- r = core->write(core, WL1273_PUPD_SET,
- WL1273_PUPD_SET_ON);
- if (r) {
- dev_err(dev, "%s: PUPD_SET fails\n", __func__);
- goto fail;
- }
- }
-
- if (radio->rds_on) {
- r = core->write(core, WL1273_RDS_DATA_ENB, 1);
- if (r) {
- dev_err(dev, "%s: RDS_DATA_ENB ON fails\n",
- __func__);
- goto fail;
- }
- } else {
- r = core->write(core, WL1273_RDS_DATA_ENB, 0);
- if (r) {
- dev_err(dev, "%s: RDS_DATA_ENB OFF fails\n",
- __func__);
- goto fail;
- }
- }
- } else {
- dev_warn(dev, "%s: Illegal mode.\n", __func__);
- }
-
- if (core->mode == WL1273_MODE_OFF) {
- r = wl1273_fm_upload_firmware_patch(radio);
- if (r)
- dev_warn(dev, "Firmware upload failed.\n");
-
- /*
- * Sometimes the chip is in a wrong power state at this point.
- * So we set the power once again.
- */
- if (new_mode == WL1273_MODE_RX) {
- u16 val = WL1273_POWER_SET_FM;
-
- if (radio->rds_on)
- val |= WL1273_POWER_SET_RDS;
-
- r = core->write(core, WL1273_POWER_SET, val);
- if (r) {
- dev_err(dev, "%s: POWER_SET fails\n", __func__);
- goto fail;
- }
- } else if (new_mode == WL1273_MODE_TX) {
- r = core->write(core, WL1273_PUPD_SET,
- WL1273_PUPD_SET_ON);
- if (r) {
- dev_err(dev, "%s: PUPD_SET fails\n", __func__);
- goto fail;
- }
- }
- }
-
- return 0;
-fail:
- if (pdata->disable)
- pdata->disable();
-
- dev_dbg(dev, "%s: return: %d\n", __func__, r);
- return r;
-}
-
-static int wl1273_fm_suspend(struct wl1273_device *radio)
-{
- struct wl1273_core *core = radio->core;
- int r;
-
- /* Cannot go from OFF to SUSPENDED */
- if (core->mode == WL1273_MODE_RX)
- r = core->write(core, WL1273_POWER_SET,
- WL1273_POWER_SET_RETENTION);
- else if (core->mode == WL1273_MODE_TX)
- r = core->write(core, WL1273_PUPD_SET,
- WL1273_PUPD_SET_RETENTION);
- else
- r = -EINVAL;
-
- if (r) {
- dev_err(radio->dev, "%s: POWER_SET fails: %d\n", __func__, r);
- goto out;
- }
-
-out:
- return r;
-}
-
-static int wl1273_fm_set_mode(struct wl1273_device *radio, int mode)
-{
- struct wl1273_core *core = radio->core;
- struct device *dev = radio->dev;
- int old_mode;
- int r;
-
- dev_dbg(dev, "%s\n", __func__);
- dev_dbg(dev, "Forbidden modes: 0x%02x\n", radio->forbidden);
-
- old_mode = core->mode;
- if (mode & radio->forbidden) {
- r = -EPERM;
- goto out;
- }
-
- switch (mode) {
- case WL1273_MODE_RX:
- case WL1273_MODE_TX:
- r = wl1273_fm_start(radio, mode);
- if (r) {
- dev_err(dev, "%s: Cannot start.\n", __func__);
- wl1273_fm_stop(radio);
- goto out;
- }
-
- core->mode = mode;
- r = core->write(core, WL1273_INT_MASK_SET, radio->irq_flags);
- if (r) {
- dev_err(dev, "INT_MASK_SET fails.\n");
- goto out;
- }
-
- /* remember previous settings */
- if (mode == WL1273_MODE_RX) {
- r = wl1273_fm_set_rx_freq(radio, radio->rx_frequency);
- if (r) {
- dev_err(dev, "set freq fails: %d.\n", r);
- goto out;
- }
-
- r = core->set_volume(core, core->volume);
- if (r) {
- dev_err(dev, "set volume fails: %d.\n", r);
- goto out;
- }
-
- dev_dbg(dev, "%s: Set vol: %d.\n", __func__,
- core->volume);
- } else {
- r = wl1273_fm_set_tx_freq(radio, radio->tx_frequency);
- if (r) {
- dev_err(dev, "set freq fails: %d.\n", r);
- goto out;
- }
- }
-
- dev_dbg(radio->dev, "%s: Set audio mode.\n", __func__);
-
- r = core->set_audio(core, core->audio_mode);
- if (r)
- dev_err(dev, "Cannot set audio mode.\n");
- break;
-
- case WL1273_MODE_OFF:
- r = wl1273_fm_stop(radio);
- if (r)
- dev_err(dev, "%s: Off fails: %d\n", __func__, r);
- else
- core->mode = WL1273_MODE_OFF;
-
- break;
-
- case WL1273_MODE_SUSPENDED:
- r = wl1273_fm_suspend(radio);
- if (r)
- dev_err(dev, "%s: Suspend fails: %d\n", __func__, r);
- else
- core->mode = WL1273_MODE_SUSPENDED;
-
- break;
-
- default:
- dev_err(dev, "%s: Unknown mode: %d\n", __func__, mode);
- r = -EINVAL;
- break;
- }
-out:
- if (r)
- core->mode = old_mode;
-
- return r;
-}
-
-static int wl1273_fm_set_seek(struct wl1273_device *radio,
- unsigned int wrap_around,
- unsigned int seek_upward,
- int level)
-{
- struct wl1273_core *core = radio->core;
- int r = 0;
- unsigned int dir = (seek_upward == 0) ? 0 : 1;
- unsigned int f;
-
- f = radio->rx_frequency;
- dev_dbg(radio->dev, "rx_frequency: %d\n", f);
-
- if (dir && f + radio->spacing <= radio->rangehigh)
- r = wl1273_fm_set_rx_freq(radio, f + radio->spacing);
- else if (dir && wrap_around)
- r = wl1273_fm_set_rx_freq(radio, radio->rangelow);
- else if (f - radio->spacing >= radio->rangelow)
- r = wl1273_fm_set_rx_freq(radio, f - radio->spacing);
- else if (wrap_around)
- r = wl1273_fm_set_rx_freq(radio, radio->rangehigh);
-
- if (r)
- goto out;
-
- if (level < SCHAR_MIN || level > SCHAR_MAX)
- return -EINVAL;
-
- reinit_completion(&radio->busy);
- dev_dbg(radio->dev, "%s: BUSY\n", __func__);
-
- r = core->write(core, WL1273_INT_MASK_SET, radio->irq_flags);
- if (r)
- goto out;
-
- dev_dbg(radio->dev, "%s\n", __func__);
-
- r = core->write(core, WL1273_SEARCH_LVL_SET, level);
- if (r)
- goto out;
-
- r = core->write(core, WL1273_SEARCH_DIR_SET, dir);
- if (r)
- goto out;
-
- r = core->write(core, WL1273_TUNER_MODE_SET, TUNER_MODE_AUTO_SEEK);
- if (r)
- goto out;
-
- /* wait for the FR IRQ */
- wait_for_completion_timeout(&radio->busy, msecs_to_jiffies(1000));
- if (!(radio->irq_received & WL1273_BL_EVENT)) {
- r = -ETIMEDOUT;
- goto out;
- }
-
- radio->irq_received &= ~WL1273_BL_EVENT;
-
- if (!wrap_around)
- goto out;
-
- /* Wrap around */
- dev_dbg(radio->dev, "Wrap around in HW seek.\n");
-
- if (seek_upward)
- f = radio->rangelow;
- else
- f = radio->rangehigh;
-
- r = wl1273_fm_set_rx_freq(radio, f);
- if (r)
- goto out;
-
- reinit_completion(&radio->busy);
- dev_dbg(radio->dev, "%s: BUSY\n", __func__);
-
- r = core->write(core, WL1273_TUNER_MODE_SET, TUNER_MODE_AUTO_SEEK);
- if (r)
- goto out;
-
- /* wait for the FR IRQ */
- if (!wait_for_completion_timeout(&radio->busy, msecs_to_jiffies(1000)))
- r = -ETIMEDOUT;
-out:
- dev_dbg(radio->dev, "%s: Err: %d\n", __func__, r);
- return r;
-}
-
-/**
- * wl1273_fm_get_tx_ctune() - Get the TX tuning capacitor value.
- * @radio: A pointer to the device struct.
- */
-static unsigned int wl1273_fm_get_tx_ctune(struct wl1273_device *radio)
-{
- struct wl1273_core *core = radio->core;
- struct device *dev = radio->dev;
- u16 val;
- int r;
-
- if (core->mode == WL1273_MODE_OFF ||
- core->mode == WL1273_MODE_SUSPENDED)
- return -EPERM;
-
- r = core->read(core, WL1273_READ_FMANT_TUNE_VALUE, &val);
- if (r) {
- dev_err(dev, "%s: read error: %d\n", __func__, r);
- goto out;
- }
-
-out:
- return val;
-}
-
-/**
- * wl1273_fm_set_preemphasis() - Set the TX pre-emphasis value.
- * @radio: A pointer to the device struct.
- * @preemphasis: The new pre-amphasis value.
- *
- * Possible pre-emphasis values are: V4L2_PREEMPHASIS_DISABLED,
- * V4L2_PREEMPHASIS_50_uS and V4L2_PREEMPHASIS_75_uS.
- */
-static int wl1273_fm_set_preemphasis(struct wl1273_device *radio,
- unsigned int preemphasis)
-{
- struct wl1273_core *core = radio->core;
- int r;
- u16 em;
-
- if (core->mode == WL1273_MODE_OFF ||
- core->mode == WL1273_MODE_SUSPENDED)
- return -EPERM;
-
- mutex_lock(&core->lock);
-
- switch (preemphasis) {
- case V4L2_PREEMPHASIS_DISABLED:
- em = 1;
- break;
- case V4L2_PREEMPHASIS_50_uS:
- em = 0;
- break;
- case V4L2_PREEMPHASIS_75_uS:
- em = 2;
- break;
- default:
- r = -EINVAL;
- goto out;
- }
-
- r = core->write(core, WL1273_PREMPH_SET, em);
- if (r)
- goto out;
-
- radio->preemphasis = preemphasis;
-
-out:
- mutex_unlock(&core->lock);
- return r;
-}
-
-static int wl1273_fm_rds_on(struct wl1273_device *radio)
-{
- struct wl1273_core *core = radio->core;
- int r;
-
- dev_dbg(radio->dev, "%s\n", __func__);
- if (radio->rds_on)
- return 0;
-
- r = core->write(core, WL1273_POWER_SET,
- WL1273_POWER_SET_FM | WL1273_POWER_SET_RDS);
- if (r)
- goto out;
-
- r = wl1273_fm_set_rx_freq(radio, radio->rx_frequency);
- if (r)
- dev_err(radio->dev, "set freq fails: %d.\n", r);
-out:
- return r;
-}
-
-static int wl1273_fm_rds_off(struct wl1273_device *radio)
-{
- struct wl1273_core *core = radio->core;
- int r;
-
- if (!radio->rds_on)
- return 0;
-
- radio->irq_flags &= ~WL1273_RDS_EVENT;
-
- r = core->write(core, WL1273_INT_MASK_SET, radio->irq_flags);
- if (r)
- goto out;
-
- /* Service pending read */
- wake_up_interruptible(&radio->read_queue);
-
- dev_dbg(radio->dev, "%s\n", __func__);
-
- r = core->write(core, WL1273_POWER_SET, WL1273_POWER_SET_FM);
- if (r)
- goto out;
-
- r = wl1273_fm_set_rx_freq(radio, radio->rx_frequency);
- if (r)
- dev_err(radio->dev, "set freq fails: %d.\n", r);
-out:
- dev_dbg(radio->dev, "%s: exiting...\n", __func__);
-
- return r;
-}
-
-static int wl1273_fm_set_rds(struct wl1273_device *radio, unsigned int new_mode)
-{
- int r = 0;
- struct wl1273_core *core = radio->core;
-
- if (core->mode == WL1273_MODE_OFF ||
- core->mode == WL1273_MODE_SUSPENDED)
- return -EPERM;
-
- if (new_mode == WL1273_RDS_RESET) {
- r = core->write(core, WL1273_RDS_CNTRL_SET, 1);
- return r;
- }
-
- if (core->mode == WL1273_MODE_TX && new_mode == WL1273_RDS_OFF) {
- r = core->write(core, WL1273_RDS_DATA_ENB, 0);
- } else if (core->mode == WL1273_MODE_TX && new_mode == WL1273_RDS_ON) {
- r = core->write(core, WL1273_RDS_DATA_ENB, 1);
- } else if (core->mode == WL1273_MODE_RX && new_mode == WL1273_RDS_OFF) {
- r = wl1273_fm_rds_off(radio);
- } else if (core->mode == WL1273_MODE_RX && new_mode == WL1273_RDS_ON) {
- r = wl1273_fm_rds_on(radio);
- } else {
- dev_err(radio->dev, "%s: Unknown mode: %d\n",
- __func__, new_mode);
- r = -EINVAL;
- }
-
- if (!r)
- radio->rds_on = new_mode == WL1273_RDS_ON;
-
- return r;
-}
-
-static ssize_t wl1273_fm_fops_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct wl1273_device *radio = video_get_drvdata(video_devdata(file));
- struct wl1273_core *core = radio->core;
- u16 val;
- int r;
-
- dev_dbg(radio->dev, "%s\n", __func__);
-
- if (core->mode != WL1273_MODE_TX)
- return count;
-
- if (radio->rds_users == 0) {
- dev_warn(radio->dev, "%s: RDS not on.\n", __func__);
- return 0;
- }
-
- if (mutex_lock_interruptible(&core->lock))
- return -EINTR;
- /*
- * Multiple processes can open the device, but only
- * one gets to write to it.
- */
- if (radio->owner && radio->owner != file) {
- r = -EBUSY;
- goto out;
- }
- radio->owner = file;
-
- /* Manual Mode */
- if (count > 255)
- val = 255;
- else
- val = count;
-
- core->write(core, WL1273_RDS_CONFIG_DATA_SET, val);
-
- if (copy_from_user(radio->write_buf + 1, buf, val)) {
- r = -EFAULT;
- goto out;
- }
-
- dev_dbg(radio->dev, "Count: %d\n", val);
- dev_dbg(radio->dev, "From user: \"%s\"\n", radio->write_buf);
-
- radio->write_buf[0] = WL1273_RDS_DATA_SET;
- core->write_data(core, radio->write_buf, val + 1);
-
- r = val;
-out:
- mutex_unlock(&core->lock);
-
- return r;
-}
-
-static __poll_t wl1273_fm_fops_poll(struct file *file,
- struct poll_table_struct *pts)
-{
- struct wl1273_device *radio = video_get_drvdata(video_devdata(file));
- struct wl1273_core *core = radio->core;
-
- if (radio->owner && radio->owner != file)
- return EPOLLERR;
-
- radio->owner = file;
-
- if (core->mode == WL1273_MODE_RX) {
- poll_wait(file, &radio->read_queue, pts);
-
- if (radio->rd_index != radio->wr_index)
- return EPOLLIN | EPOLLRDNORM;
-
- } else if (core->mode == WL1273_MODE_TX) {
- return EPOLLOUT | EPOLLWRNORM;
- }
-
- return 0;
-}
-
-static int wl1273_fm_fops_open(struct file *file)
-{
- struct wl1273_device *radio = video_get_drvdata(video_devdata(file));
- struct wl1273_core *core = radio->core;
- int r = 0;
-
- dev_dbg(radio->dev, "%s\n", __func__);
-
- if (core->mode == WL1273_MODE_RX && radio->rds_on &&
- !radio->rds_users) {
- dev_dbg(radio->dev, "%s: Mode: %d\n", __func__, core->mode);
-
- if (mutex_lock_interruptible(&core->lock))
- return -EINTR;
-
- radio->irq_flags |= WL1273_RDS_EVENT;
-
- r = core->write(core, WL1273_INT_MASK_SET,
- radio->irq_flags);
- if (r) {
- mutex_unlock(&core->lock);
- goto out;
- }
-
- radio->rds_users++;
-
- mutex_unlock(&core->lock);
- }
-out:
- return r;
-}
-
-static int wl1273_fm_fops_release(struct file *file)
-{
- struct wl1273_device *radio = video_get_drvdata(video_devdata(file));
- struct wl1273_core *core = radio->core;
- int r = 0;
-
- dev_dbg(radio->dev, "%s\n", __func__);
-
- if (radio->rds_users > 0) {
- radio->rds_users--;
- if (radio->rds_users == 0) {
- mutex_lock(&core->lock);
-
- radio->irq_flags &= ~WL1273_RDS_EVENT;
-
- if (core->mode == WL1273_MODE_RX) {
- r = core->write(core,
- WL1273_INT_MASK_SET,
- radio->irq_flags);
- if (r) {
- mutex_unlock(&core->lock);
- goto out;
- }
- }
- mutex_unlock(&core->lock);
- }
- }
-
- if (file == radio->owner)
- radio->owner = NULL;
-out:
- return r;
-}
-
-static ssize_t wl1273_fm_fops_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- int r = 0;
- struct wl1273_device *radio = video_get_drvdata(video_devdata(file));
- struct wl1273_core *core = radio->core;
- unsigned int block_count = 0;
- u16 val;
-
- dev_dbg(radio->dev, "%s\n", __func__);
-
- if (core->mode != WL1273_MODE_RX)
- return 0;
-
- if (radio->rds_users == 0) {
- dev_warn(radio->dev, "%s: RDS not on.\n", __func__);
- return 0;
- }
-
- if (mutex_lock_interruptible(&core->lock))
- return -EINTR;
-
- /*
- * Multiple processes can open the device, but only
- * one at a time gets read access.
- */
- if (radio->owner && radio->owner != file) {
- r = -EBUSY;
- goto out;
- }
- radio->owner = file;
-
- r = core->read(core, WL1273_RDS_SYNC_GET, &val);
- if (r) {
- dev_err(radio->dev, "%s: Get RDS_SYNC fails.\n", __func__);
- goto out;
- } else if (val == 0) {
- dev_info(radio->dev, "RDS_SYNC: Not synchronized\n");
- r = -ENODATA;
- goto out;
- }
-
- /* block if no new data available */
- while (radio->wr_index == radio->rd_index) {
- if (file->f_flags & O_NONBLOCK) {
- r = -EWOULDBLOCK;
- goto out;
- }
-
- dev_dbg(radio->dev, "%s: Wait for RDS data.\n", __func__);
- if (wait_event_interruptible(radio->read_queue,
- radio->wr_index !=
- radio->rd_index) < 0) {
- r = -EINTR;
- goto out;
- }
- }
-
- /* calculate block count from byte count */
- count /= RDS_BLOCK_SIZE;
-
- /* copy RDS blocks from the internal buffer and to user buffer */
- while (block_count < count) {
- if (radio->rd_index == radio->wr_index)
- break;
-
- /* always transfer complete RDS blocks */
- if (copy_to_user(buf, &radio->buffer[radio->rd_index],
- RDS_BLOCK_SIZE))
- break;
-
- /* increment and wrap the read pointer */
- radio->rd_index += RDS_BLOCK_SIZE;
- if (radio->rd_index >= radio->buf_size)
- radio->rd_index = 0;
-
- /* increment counters */
- block_count++;
- buf += RDS_BLOCK_SIZE;
- r += RDS_BLOCK_SIZE;
- }
-
-out:
- dev_dbg(radio->dev, "%s: exit\n", __func__);
- mutex_unlock(&core->lock);
-
- return r;
-}
-
-static const struct v4l2_file_operations wl1273_fops = {
- .owner = THIS_MODULE,
- .read = wl1273_fm_fops_read,
- .write = wl1273_fm_fops_write,
- .poll = wl1273_fm_fops_poll,
- .unlocked_ioctl = video_ioctl2,
- .open = wl1273_fm_fops_open,
- .release = wl1273_fm_fops_release,
-};
-
-static int wl1273_fm_vidioc_querycap(struct file *file, void *priv,
- struct v4l2_capability *capability)
-{
- struct wl1273_device *radio = video_get_drvdata(video_devdata(file));
-
- dev_dbg(radio->dev, "%s\n", __func__);
-
- strscpy(capability->driver, WL1273_FM_DRIVER_NAME,
- sizeof(capability->driver));
- strscpy(capability->card, "TI Wl1273 FM Radio",
- sizeof(capability->card));
- strscpy(capability->bus_info, radio->bus_type,
- sizeof(capability->bus_info));
- return 0;
-}
-
-static int wl1273_fm_vidioc_g_input(struct file *file, void *priv,
- unsigned int *i)
-{
- struct wl1273_device *radio = video_get_drvdata(video_devdata(file));
-
- dev_dbg(radio->dev, "%s\n", __func__);
-
- *i = 0;
-
- return 0;
-}
-
-static int wl1273_fm_vidioc_s_input(struct file *file, void *priv,
- unsigned int i)
-{
- struct wl1273_device *radio = video_get_drvdata(video_devdata(file));
-
- dev_dbg(radio->dev, "%s\n", __func__);
-
- if (i != 0)
- return -EINVAL;
-
- return 0;
-}
-
-/**
- * wl1273_fm_set_tx_power() - Set the transmission power value.
- * @radio: A pointer to the device struct.
- * @power: The new power value.
- */
-static int wl1273_fm_set_tx_power(struct wl1273_device *radio, u16 power)
-{
- struct wl1273_core *core = radio->core;
- int r;
-
- if (core->mode == WL1273_MODE_OFF ||
- core->mode == WL1273_MODE_SUSPENDED)
- return -EPERM;
-
- mutex_lock(&core->lock);
-
- /* Convert the dBuV value to chip presentation */
- r = core->write(core, WL1273_POWER_LEV_SET, 122 - power);
- if (r)
- goto out;
-
- radio->tx_power = power;
-
-out:
- mutex_unlock(&core->lock);
- return r;
-}
-
-#define WL1273_SPACING_50kHz 1
-#define WL1273_SPACING_100kHz 2
-#define WL1273_SPACING_200kHz 4
-
-static int wl1273_fm_tx_set_spacing(struct wl1273_device *radio,
- unsigned int spacing)
-{
- struct wl1273_core *core = radio->core;
- int r;
-
- if (spacing == 0) {
- r = core->write(core, WL1273_SCAN_SPACING_SET,
- WL1273_SPACING_100kHz);
- radio->spacing = 100;
- } else if (spacing - 50000 < 25000) {
- r = core->write(core, WL1273_SCAN_SPACING_SET,
- WL1273_SPACING_50kHz);
- radio->spacing = 50;
- } else if (spacing - 100000 < 50000) {
- r = core->write(core, WL1273_SCAN_SPACING_SET,
- WL1273_SPACING_100kHz);
- radio->spacing = 100;
- } else {
- r = core->write(core, WL1273_SCAN_SPACING_SET,
- WL1273_SPACING_200kHz);
- radio->spacing = 200;
- }
-
- return r;
-}
-
-static int wl1273_fm_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct wl1273_device *radio = ctrl->priv;
- struct wl1273_core *core = radio->core;
-
- dev_dbg(radio->dev, "%s\n", __func__);
-
- if (mutex_lock_interruptible(&core->lock))
- return -EINTR;
-
- switch (ctrl->id) {
- case V4L2_CID_TUNE_ANTENNA_CAPACITOR:
- ctrl->val = wl1273_fm_get_tx_ctune(radio);
- break;
-
- default:
- dev_warn(radio->dev, "%s: Unknown IOCTL: %d\n",
- __func__, ctrl->id);
- break;
- }
-
- mutex_unlock(&core->lock);
-
- return 0;
-}
-
-#define WL1273_MUTE_SOFT_ENABLE (1 << 0)
-#define WL1273_MUTE_AC (1 << 1)
-#define WL1273_MUTE_HARD_LEFT (1 << 2)
-#define WL1273_MUTE_HARD_RIGHT (1 << 3)
-#define WL1273_MUTE_SOFT_FORCE (1 << 4)
-
-static inline struct wl1273_device *to_radio(struct v4l2_ctrl *ctrl)
-{
- return container_of(ctrl->handler, struct wl1273_device, ctrl_handler);
-}
-
-static int wl1273_fm_s_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct wl1273_device *radio = to_radio(ctrl);
- struct wl1273_core *core = radio->core;
- int r = 0;
-
- dev_dbg(radio->dev, "%s\n", __func__);
-
- switch (ctrl->id) {
- case V4L2_CID_AUDIO_MUTE:
- if (mutex_lock_interruptible(&core->lock))
- return -EINTR;
-
- if (core->mode == WL1273_MODE_RX && ctrl->val)
- r = core->write(core,
- WL1273_MUTE_STATUS_SET,
- WL1273_MUTE_HARD_LEFT |
- WL1273_MUTE_HARD_RIGHT);
- else if (core->mode == WL1273_MODE_RX)
- r = core->write(core,
- WL1273_MUTE_STATUS_SET, 0x0);
- else if (core->mode == WL1273_MODE_TX && ctrl->val)
- r = core->write(core, WL1273_MUTE, 1);
- else if (core->mode == WL1273_MODE_TX)
- r = core->write(core, WL1273_MUTE, 0);
-
- mutex_unlock(&core->lock);
- break;
-
- case V4L2_CID_AUDIO_VOLUME:
- if (ctrl->val == 0)
- r = wl1273_fm_set_mode(radio, WL1273_MODE_OFF);
- else
- r = core->set_volume(core, core->volume);
- break;
-
- case V4L2_CID_TUNE_PREEMPHASIS:
- r = wl1273_fm_set_preemphasis(radio, ctrl->val);
- break;
-
- case V4L2_CID_TUNE_POWER_LEVEL:
- r = wl1273_fm_set_tx_power(radio, ctrl->val);
- break;
-
- default:
- dev_warn(radio->dev, "%s: Unknown IOCTL: %d\n",
- __func__, ctrl->id);
- break;
- }
-
- dev_dbg(radio->dev, "%s\n", __func__);
- return r;
-}
-
-static int wl1273_fm_vidioc_g_audio(struct file *file, void *priv,
- struct v4l2_audio *audio)
-{
- struct wl1273_device *radio = video_get_drvdata(video_devdata(file));
-
- dev_dbg(radio->dev, "%s\n", __func__);
-
- if (audio->index > 1)
- return -EINVAL;
-
- strscpy(audio->name, "Radio", sizeof(audio->name));
- audio->capability = V4L2_AUDCAP_STEREO;
-
- return 0;
-}
-
-static int wl1273_fm_vidioc_s_audio(struct file *file, void *priv,
- const struct v4l2_audio *audio)
-{
- struct wl1273_device *radio = video_get_drvdata(video_devdata(file));
-
- dev_dbg(radio->dev, "%s\n", __func__);
-
- if (audio->index != 0)
- return -EINVAL;
-
- return 0;
-}
-
-#define WL1273_RDS_NOT_SYNCHRONIZED 0
-#define WL1273_RDS_SYNCHRONIZED 1
-
-static int wl1273_fm_vidioc_g_tuner(struct file *file, void *priv,
- struct v4l2_tuner *tuner)
-{
- struct wl1273_device *radio = video_get_drvdata(video_devdata(file));
- struct wl1273_core *core = radio->core;
- u16 val;
- int r;
-
- dev_dbg(radio->dev, "%s\n", __func__);
-
- if (tuner->index > 0)
- return -EINVAL;
-
- strscpy(tuner->name, WL1273_FM_DRIVER_NAME, sizeof(tuner->name));
- tuner->type = V4L2_TUNER_RADIO;
-
- tuner->rangelow = WL1273_FREQ(WL1273_BAND_JAPAN_LOW);
- tuner->rangehigh = WL1273_FREQ(WL1273_BAND_OTHER_HIGH);
-
- tuner->capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_RDS |
- V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_RDS_BLOCK_IO |
- V4L2_TUNER_CAP_HWSEEK_BOUNDED | V4L2_TUNER_CAP_HWSEEK_WRAP;
-
- if (radio->stereo)
- tuner->audmode = V4L2_TUNER_MODE_STEREO;
- else
- tuner->audmode = V4L2_TUNER_MODE_MONO;
-
- if (core->mode != WL1273_MODE_RX)
- return 0;
-
- if (mutex_lock_interruptible(&core->lock))
- return -EINTR;
-
- r = core->read(core, WL1273_STEREO_GET, &val);
- if (r)
- goto out;
-
- if (val == 1)
- tuner->rxsubchans = V4L2_TUNER_SUB_STEREO;
- else
- tuner->rxsubchans = V4L2_TUNER_SUB_MONO;
-
- r = core->read(core, WL1273_RSSI_LVL_GET, &val);
- if (r)
- goto out;
-
- tuner->signal = (s16) val;
- dev_dbg(radio->dev, "Signal: %d\n", tuner->signal);
-
- tuner->afc = 0;
-
- r = core->read(core, WL1273_RDS_SYNC_GET, &val);
- if (r)
- goto out;
-
- if (val == WL1273_RDS_SYNCHRONIZED)
- tuner->rxsubchans |= V4L2_TUNER_SUB_RDS;
-out:
- mutex_unlock(&core->lock);
-
- return r;
-}
-
-static int wl1273_fm_vidioc_s_tuner(struct file *file, void *priv,
- const struct v4l2_tuner *tuner)
-{
- struct wl1273_device *radio = video_get_drvdata(video_devdata(file));
- struct wl1273_core *core = radio->core;
- int r = 0;
-
- dev_dbg(radio->dev, "%s\n", __func__);
- dev_dbg(radio->dev, "tuner->index: %d\n", tuner->index);
- dev_dbg(radio->dev, "tuner->name: %s\n", tuner->name);
- dev_dbg(radio->dev, "tuner->capability: 0x%04x\n", tuner->capability);
- dev_dbg(radio->dev, "tuner->rxsubchans: 0x%04x\n", tuner->rxsubchans);
- dev_dbg(radio->dev, "tuner->rangelow: %d\n", tuner->rangelow);
- dev_dbg(radio->dev, "tuner->rangehigh: %d\n", tuner->rangehigh);
-
- if (tuner->index > 0)
- return -EINVAL;
-
- if (mutex_lock_interruptible(&core->lock))
- return -EINTR;
-
- r = wl1273_fm_set_mode(radio, WL1273_MODE_RX);
- if (r)
- goto out;
-
- if (tuner->rxsubchans & V4L2_TUNER_SUB_RDS)
- r = wl1273_fm_set_rds(radio, WL1273_RDS_ON);
- else
- r = wl1273_fm_set_rds(radio, WL1273_RDS_OFF);
-
- if (r)
- dev_warn(radio->dev, "%s: RDS fails: %d\n", __func__, r);
-
- if (tuner->audmode == V4L2_TUNER_MODE_MONO) {
- r = core->write(core, WL1273_MOST_MODE_SET, WL1273_RX_MONO);
- if (r < 0) {
- dev_warn(radio->dev, "%s: MOST_MODE fails: %d\n",
- __func__, r);
- goto out;
- }
- radio->stereo = false;
- } else if (tuner->audmode == V4L2_TUNER_MODE_STEREO) {
- r = core->write(core, WL1273_MOST_MODE_SET, WL1273_RX_STEREO);
- if (r < 0) {
- dev_warn(radio->dev, "%s: MOST_MODE fails: %d\n",
- __func__, r);
- goto out;
- }
- radio->stereo = true;
- } else {
- dev_err(radio->dev, "%s: tuner->audmode: %d\n",
- __func__, tuner->audmode);
- r = -EINVAL;
- goto out;
- }
-
-out:
- mutex_unlock(&core->lock);
-
- return r;
-}
-
-static int wl1273_fm_vidioc_g_frequency(struct file *file, void *priv,
- struct v4l2_frequency *freq)
-{
- struct wl1273_device *radio = video_get_drvdata(video_devdata(file));
- struct wl1273_core *core = radio->core;
-
- dev_dbg(radio->dev, "%s\n", __func__);
-
- if (mutex_lock_interruptible(&core->lock))
- return -EINTR;
-
- freq->type = V4L2_TUNER_RADIO;
- freq->frequency = WL1273_FREQ(wl1273_fm_get_freq(radio));
-
- mutex_unlock(&core->lock);
-
- return 0;
-}
-
-static int wl1273_fm_vidioc_s_frequency(struct file *file, void *priv,
- const struct v4l2_frequency *freq)
-{
- struct wl1273_device *radio = video_get_drvdata(video_devdata(file));
- struct wl1273_core *core = radio->core;
- int r;
-
- dev_dbg(radio->dev, "%s: %d\n", __func__, freq->frequency);
-
- if (freq->type != V4L2_TUNER_RADIO) {
- dev_dbg(radio->dev,
- "freq->type != V4L2_TUNER_RADIO: %d\n", freq->type);
- return -EINVAL;
- }
-
- if (mutex_lock_interruptible(&core->lock))
- return -EINTR;
-
- if (core->mode == WL1273_MODE_RX) {
- dev_dbg(radio->dev, "freq: %d\n", freq->frequency);
-
- r = wl1273_fm_set_rx_freq(radio,
- WL1273_INV_FREQ(freq->frequency));
- if (r)
- dev_warn(radio->dev, WL1273_FM_DRIVER_NAME
- ": set frequency failed with %d\n", r);
- } else {
- r = wl1273_fm_set_tx_freq(radio,
- WL1273_INV_FREQ(freq->frequency));
- if (r)
- dev_warn(radio->dev, WL1273_FM_DRIVER_NAME
- ": set frequency failed with %d\n", r);
- }
-
- mutex_unlock(&core->lock);
-
- dev_dbg(radio->dev, "wl1273_vidioc_s_frequency: DONE\n");
- return r;
-}
-
-#define WL1273_DEFAULT_SEEK_LEVEL 7
-
-static int wl1273_fm_vidioc_s_hw_freq_seek(struct file *file, void *priv,
- const struct v4l2_hw_freq_seek *seek)
-{
- struct wl1273_device *radio = video_get_drvdata(video_devdata(file));
- struct wl1273_core *core = radio->core;
- int r;
-
- dev_dbg(radio->dev, "%s\n", __func__);
-
- if (seek->tuner != 0 || seek->type != V4L2_TUNER_RADIO)
- return -EINVAL;
-
- if (file->f_flags & O_NONBLOCK)
- return -EWOULDBLOCK;
-
- if (mutex_lock_interruptible(&core->lock))
- return -EINTR;
-
- r = wl1273_fm_set_mode(radio, WL1273_MODE_RX);
- if (r)
- goto out;
-
- r = wl1273_fm_tx_set_spacing(radio, seek->spacing);
- if (r)
- dev_warn(radio->dev, "HW seek failed: %d\n", r);
-
- r = wl1273_fm_set_seek(radio, seek->wrap_around, seek->seek_upward,
- WL1273_DEFAULT_SEEK_LEVEL);
- if (r)
- dev_warn(radio->dev, "HW seek failed: %d\n", r);
-
-out:
- mutex_unlock(&core->lock);
- return r;
-}
-
-static int wl1273_fm_vidioc_s_modulator(struct file *file, void *priv,
- const struct v4l2_modulator *modulator)
-{
- struct wl1273_device *radio = video_get_drvdata(video_devdata(file));
- struct wl1273_core *core = radio->core;
- int r = 0;
-
- dev_dbg(radio->dev, "%s\n", __func__);
-
- if (modulator->index > 0)
- return -EINVAL;
-
- if (mutex_lock_interruptible(&core->lock))
- return -EINTR;
-
- r = wl1273_fm_set_mode(radio, WL1273_MODE_TX);
- if (r)
- goto out;
-
- if (modulator->txsubchans & V4L2_TUNER_SUB_RDS)
- r = wl1273_fm_set_rds(radio, WL1273_RDS_ON);
- else
- r = wl1273_fm_set_rds(radio, WL1273_RDS_OFF);
-
- if (modulator->txsubchans & V4L2_TUNER_SUB_MONO)
- r = core->write(core, WL1273_MONO_SET, WL1273_TX_MONO);
- else
- r = core->write(core, WL1273_MONO_SET,
- WL1273_RX_STEREO);
- if (r < 0)
- dev_warn(radio->dev, WL1273_FM_DRIVER_NAME
- "MONO_SET fails: %d\n", r);
-out:
- mutex_unlock(&core->lock);
-
- return r;
-}
-
-static int wl1273_fm_vidioc_g_modulator(struct file *file, void *priv,
- struct v4l2_modulator *modulator)
-{
- struct wl1273_device *radio = video_get_drvdata(video_devdata(file));
- struct wl1273_core *core = radio->core;
- u16 val;
- int r;
-
- dev_dbg(radio->dev, "%s\n", __func__);
-
- strscpy(modulator->name, WL1273_FM_DRIVER_NAME,
- sizeof(modulator->name));
-
- modulator->rangelow = WL1273_FREQ(WL1273_BAND_JAPAN_LOW);
- modulator->rangehigh = WL1273_FREQ(WL1273_BAND_OTHER_HIGH);
-
- modulator->capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_RDS |
- V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_RDS_BLOCK_IO;
-
- if (core->mode != WL1273_MODE_TX)
- return 0;
-
- if (mutex_lock_interruptible(&core->lock))
- return -EINTR;
-
- r = core->read(core, WL1273_MONO_SET, &val);
- if (r)
- goto out;
-
- if (val == WL1273_TX_STEREO)
- modulator->txsubchans = V4L2_TUNER_SUB_STEREO;
- else
- modulator->txsubchans = V4L2_TUNER_SUB_MONO;
-
- if (radio->rds_on)
- modulator->txsubchans |= V4L2_TUNER_SUB_RDS;
-out:
- mutex_unlock(&core->lock);
-
- return 0;
-}
-
-static int wl1273_fm_vidioc_log_status(struct file *file, void *priv)
-{
- struct wl1273_device *radio = video_get_drvdata(video_devdata(file));
- struct wl1273_core *core = radio->core;
- struct device *dev = radio->dev;
- u16 val;
- int r;
-
- dev_info(dev, DRIVER_DESC);
-
- if (core->mode == WL1273_MODE_OFF) {
- dev_info(dev, "Mode: Off\n");
- return 0;
- }
-
- if (core->mode == WL1273_MODE_SUSPENDED) {
- dev_info(dev, "Mode: Suspended\n");
- return 0;
- }
-
- r = core->read(core, WL1273_ASIC_ID_GET, &val);
- if (r)
- dev_err(dev, "%s: Get ASIC_ID fails.\n", __func__);
- else
- dev_info(dev, "ASIC_ID: 0x%04x\n", val);
-
- r = core->read(core, WL1273_ASIC_VER_GET, &val);
- if (r)
- dev_err(dev, "%s: Get ASIC_VER fails.\n", __func__);
- else
- dev_info(dev, "ASIC Version: 0x%04x\n", val);
-
- r = core->read(core, WL1273_FIRM_VER_GET, &val);
- if (r)
- dev_err(dev, "%s: Get FIRM_VER fails.\n", __func__);
- else
- dev_info(dev, "FW version: %d(0x%04x)\n", val, val);
-
- r = core->read(core, WL1273_BAND_SET, &val);
- if (r)
- dev_err(dev, "%s: Get BAND fails.\n", __func__);
- else
- dev_info(dev, "BAND: %d\n", val);
-
- if (core->mode == WL1273_MODE_TX) {
- r = core->read(core, WL1273_PUPD_SET, &val);
- if (r)
- dev_err(dev, "%s: Get PUPD fails.\n", __func__);
- else
- dev_info(dev, "PUPD: 0x%04x\n", val);
-
- r = core->read(core, WL1273_CHANL_SET, &val);
- if (r)
- dev_err(dev, "%s: Get CHANL fails.\n", __func__);
- else
- dev_info(dev, "Tx frequency: %dkHz\n", val*10);
- } else if (core->mode == WL1273_MODE_RX) {
- int bf = radio->rangelow;
-
- r = core->read(core, WL1273_FREQ_SET, &val);
- if (r)
- dev_err(dev, "%s: Get FREQ fails.\n", __func__);
- else
- dev_info(dev, "RX Frequency: %dkHz\n", bf + val*50);
-
- r = core->read(core, WL1273_MOST_MODE_SET, &val);
- if (r)
- dev_err(dev, "%s: Get MOST_MODE fails.\n",
- __func__);
- else if (val == 0)
- dev_info(dev, "MOST_MODE: Stereo according to blend\n");
- else if (val == 1)
- dev_info(dev, "MOST_MODE: Force mono output\n");
- else
- dev_info(dev, "MOST_MODE: Unexpected value: %d\n", val);
-
- r = core->read(core, WL1273_MOST_BLEND_SET, &val);
- if (r)
- dev_err(dev, "%s: Get MOST_BLEND fails.\n", __func__);
- else if (val == 0)
- dev_info(dev,
- "MOST_BLEND: Switched blend & hysteresis.\n");
- else if (val == 1)
- dev_info(dev, "MOST_BLEND: Soft blend.\n");
- else
- dev_info(dev, "MOST_BLEND: Unexpected val: %d\n", val);
-
- r = core->read(core, WL1273_STEREO_GET, &val);
- if (r)
- dev_err(dev, "%s: Get STEREO fails.\n", __func__);
- else if (val == 0)
- dev_info(dev, "STEREO: Not detected\n");
- else if (val == 1)
- dev_info(dev, "STEREO: Detected\n");
- else
- dev_info(dev, "STEREO: Unexpected value: %d\n", val);
-
- r = core->read(core, WL1273_RSSI_LVL_GET, &val);
- if (r)
- dev_err(dev, "%s: Get RSSI_LVL fails.\n", __func__);
- else
- dev_info(dev, "RX signal strength: %d\n", (s16) val);
-
- r = core->read(core, WL1273_POWER_SET, &val);
- if (r)
- dev_err(dev, "%s: Get POWER fails.\n", __func__);
- else
- dev_info(dev, "POWER: 0x%04x\n", val);
-
- r = core->read(core, WL1273_INT_MASK_SET, &val);
- if (r)
- dev_err(dev, "%s: Get INT_MASK fails.\n", __func__);
- else
- dev_info(dev, "INT_MASK: 0x%04x\n", val);
-
- r = core->read(core, WL1273_RDS_SYNC_GET, &val);
- if (r)
- dev_err(dev, "%s: Get RDS_SYNC fails.\n",
- __func__);
- else if (val == 0)
- dev_info(dev, "RDS_SYNC: Not synchronized\n");
-
- else if (val == 1)
- dev_info(dev, "RDS_SYNC: Synchronized\n");
- else
- dev_info(dev, "RDS_SYNC: Unexpected value: %d\n", val);
-
- r = core->read(core, WL1273_I2S_MODE_CONFIG_SET, &val);
- if (r)
- dev_err(dev, "%s: Get I2S_MODE_CONFIG fails.\n",
- __func__);
- else
- dev_info(dev, "I2S_MODE_CONFIG: 0x%04x\n", val);
-
- r = core->read(core, WL1273_VOLUME_SET, &val);
- if (r)
- dev_err(dev, "%s: Get VOLUME fails.\n", __func__);
- else
- dev_info(dev, "VOLUME: 0x%04x\n", val);
- }
-
- return 0;
-}
-
-static void wl1273_vdev_release(struct video_device *dev)
-{
-}
-
-static const struct v4l2_ctrl_ops wl1273_ctrl_ops = {
- .s_ctrl = wl1273_fm_s_ctrl,
- .g_volatile_ctrl = wl1273_fm_g_volatile_ctrl,
-};
-
-static const struct v4l2_ioctl_ops wl1273_ioctl_ops = {
- .vidioc_querycap = wl1273_fm_vidioc_querycap,
- .vidioc_g_input = wl1273_fm_vidioc_g_input,
- .vidioc_s_input = wl1273_fm_vidioc_s_input,
- .vidioc_g_audio = wl1273_fm_vidioc_g_audio,
- .vidioc_s_audio = wl1273_fm_vidioc_s_audio,
- .vidioc_g_tuner = wl1273_fm_vidioc_g_tuner,
- .vidioc_s_tuner = wl1273_fm_vidioc_s_tuner,
- .vidioc_g_frequency = wl1273_fm_vidioc_g_frequency,
- .vidioc_s_frequency = wl1273_fm_vidioc_s_frequency,
- .vidioc_s_hw_freq_seek = wl1273_fm_vidioc_s_hw_freq_seek,
- .vidioc_g_modulator = wl1273_fm_vidioc_g_modulator,
- .vidioc_s_modulator = wl1273_fm_vidioc_s_modulator,
- .vidioc_log_status = wl1273_fm_vidioc_log_status,
-};
-
-static const struct video_device wl1273_viddev_template = {
- .fops = &wl1273_fops,
- .ioctl_ops = &wl1273_ioctl_ops,
- .name = WL1273_FM_DRIVER_NAME,
- .release = wl1273_vdev_release,
- .vfl_dir = VFL_DIR_TX,
- .device_caps = V4L2_CAP_HW_FREQ_SEEK | V4L2_CAP_TUNER |
- V4L2_CAP_RADIO | V4L2_CAP_AUDIO |
- V4L2_CAP_RDS_CAPTURE | V4L2_CAP_MODULATOR |
- V4L2_CAP_RDS_OUTPUT,
-};
-
-static void wl1273_fm_radio_remove(struct platform_device *pdev)
-{
- struct wl1273_device *radio = platform_get_drvdata(pdev);
- struct wl1273_core *core = radio->core;
-
- dev_info(&pdev->dev, "%s.\n", __func__);
-
- free_irq(core->client->irq, radio);
- core->pdata->free_resources();
-
- v4l2_ctrl_handler_free(&radio->ctrl_handler);
- video_unregister_device(&radio->videodev);
- v4l2_device_unregister(&radio->v4l2dev);
-}
-
-static int wl1273_fm_radio_probe(struct platform_device *pdev)
-{
- struct wl1273_core **core = pdev->dev.platform_data;
- struct wl1273_device *radio;
- struct v4l2_ctrl *ctrl;
- int r = 0;
-
- pr_debug("%s\n", __func__);
-
- if (!core) {
- dev_err(&pdev->dev, "No platform data.\n");
- r = -EINVAL;
- goto pdata_err;
- }
-
- radio = devm_kzalloc(&pdev->dev, sizeof(*radio), GFP_KERNEL);
- if (!radio) {
- r = -ENOMEM;
- goto pdata_err;
- }
-
- /* RDS buffer allocation */
- radio->buf_size = rds_buf * RDS_BLOCK_SIZE;
- radio->buffer = devm_kzalloc(&pdev->dev, radio->buf_size, GFP_KERNEL);
- if (!radio->buffer) {
- pr_err("Cannot allocate memory for RDS buffer.\n");
- r = -ENOMEM;
- goto pdata_err;
- }
-
- radio->core = *core;
- radio->irq_flags = WL1273_IRQ_MASK;
- radio->dev = &radio->core->client->dev;
- radio->rds_on = false;
- radio->core->mode = WL1273_MODE_OFF;
- radio->tx_power = 118;
- radio->core->audio_mode = WL1273_AUDIO_ANALOG;
- radio->band = WL1273_BAND_OTHER;
- radio->core->i2s_mode = WL1273_I2S_DEF_MODE;
- radio->core->channel_number = 2;
- radio->core->volume = WL1273_DEFAULT_VOLUME;
- radio->rx_frequency = WL1273_BAND_OTHER_LOW;
- radio->tx_frequency = WL1273_BAND_OTHER_HIGH;
- radio->rangelow = WL1273_BAND_OTHER_LOW;
- radio->rangehigh = WL1273_BAND_OTHER_HIGH;
- radio->stereo = true;
- radio->bus_type = "I2C";
-
- if (radio->core->pdata->request_resources) {
- r = radio->core->pdata->request_resources(radio->core->client);
- if (r) {
- dev_err(radio->dev, WL1273_FM_DRIVER_NAME
- ": Cannot get platform data\n");
- goto pdata_err;
- }
-
- dev_dbg(radio->dev, "irq: %d\n", radio->core->client->irq);
-
- r = request_threaded_irq(radio->core->client->irq, NULL,
- wl1273_fm_irq_thread_handler,
- IRQF_ONESHOT | IRQF_TRIGGER_FALLING,
- "wl1273-fm", radio);
- if (r < 0) {
- dev_err(radio->dev, WL1273_FM_DRIVER_NAME
- ": Unable to register IRQ handler: %d\n", r);
- goto err_request_irq;
- }
- } else {
- dev_err(radio->dev, WL1273_FM_DRIVER_NAME ": Core WL1273 IRQ not configured");
- r = -EINVAL;
- goto pdata_err;
- }
-
- init_completion(&radio->busy);
- init_waitqueue_head(&radio->read_queue);
-
- radio->write_buf = devm_kzalloc(&pdev->dev, 256, GFP_KERNEL);
- if (!radio->write_buf) {
- r = -ENOMEM;
- goto write_buf_err;
- }
-
- radio->dev = &pdev->dev;
- radio->v4l2dev.ctrl_handler = &radio->ctrl_handler;
- radio->rds_users = 0;
-
- r = v4l2_device_register(&pdev->dev, &radio->v4l2dev);
- if (r) {
- dev_err(&pdev->dev, "Cannot register v4l2_device.\n");
- goto write_buf_err;
- }
-
- /* V4L2 configuration */
- radio->videodev = wl1273_viddev_template;
-
- radio->videodev.v4l2_dev = &radio->v4l2dev;
-
- v4l2_ctrl_handler_init(&radio->ctrl_handler, 6);
-
- /* add in ascending ID order */
- v4l2_ctrl_new_std(&radio->ctrl_handler, &wl1273_ctrl_ops,
- V4L2_CID_AUDIO_VOLUME, 0, WL1273_MAX_VOLUME, 1,
- WL1273_DEFAULT_VOLUME);
-
- v4l2_ctrl_new_std(&radio->ctrl_handler, &wl1273_ctrl_ops,
- V4L2_CID_AUDIO_MUTE, 0, 1, 1, 1);
-
- v4l2_ctrl_new_std_menu(&radio->ctrl_handler, &wl1273_ctrl_ops,
- V4L2_CID_TUNE_PREEMPHASIS,
- V4L2_PREEMPHASIS_75_uS, 0x03,
- V4L2_PREEMPHASIS_50_uS);
-
- v4l2_ctrl_new_std(&radio->ctrl_handler, &wl1273_ctrl_ops,
- V4L2_CID_TUNE_POWER_LEVEL, 91, 122, 1, 118);
-
- ctrl = v4l2_ctrl_new_std(&radio->ctrl_handler, &wl1273_ctrl_ops,
- V4L2_CID_TUNE_ANTENNA_CAPACITOR,
- 0, 255, 1, 255);
- if (ctrl)
- ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
-
- if (radio->ctrl_handler.error) {
- r = radio->ctrl_handler.error;
- dev_err(&pdev->dev, "Ctrl handler error: %d\n", r);
- goto handler_init_err;
- }
-
- video_set_drvdata(&radio->videodev, radio);
- platform_set_drvdata(pdev, radio);
-
- /* register video device */
- r = video_register_device(&radio->videodev, VFL_TYPE_RADIO, radio_nr);
- if (r) {
- dev_err(&pdev->dev, WL1273_FM_DRIVER_NAME
- ": Could not register video device\n");
- goto handler_init_err;
- }
-
- return 0;
-
-handler_init_err:
- v4l2_ctrl_handler_free(&radio->ctrl_handler);
- v4l2_device_unregister(&radio->v4l2dev);
-write_buf_err:
- free_irq(radio->core->client->irq, radio);
-err_request_irq:
- radio->core->pdata->free_resources();
-pdata_err:
- return r;
-}
-
-static struct platform_driver wl1273_fm_radio_driver = {
- .probe = wl1273_fm_radio_probe,
- .remove = wl1273_fm_radio_remove,
- .driver = {
- .name = "wl1273_fm_radio",
- },
-};
-
-module_platform_driver(wl1273_fm_radio_driver);
-
-MODULE_AUTHOR("Matti Aaltonen <matti.j.aaltonen@nokia.com>");
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:wl1273_fm_radio");
diff --git a/drivers/media/radio/radio-zoltrix.c b/drivers/media/radio/radio-zoltrix.c
index 099b7af6a410..e043bee52384 100644
--- a/drivers/media/radio/radio-zoltrix.c
+++ b/drivers/media/radio/radio-zoltrix.c
@@ -30,7 +30,7 @@
* 2006-07-24 - Converted to V4L2 API
* by Mauro Carvalho Chehab <mchehab@kernel.org>
*
- * Converted to the radio-isa framework by Hans Verkuil <hansverk@cisco.com>
+ * Converted to the radio-isa framework by Hans Verkuil <hverkuil@kernel.org>
*
* Note that this is the driver for the Zoltrix Radio Plus.
* This driver does not work for the Zoltrix Radio Plus 108 or the
diff --git a/drivers/media/radio/si4713/radio-platform-si4713.c b/drivers/media/radio/si4713/radio-platform-si4713.c
index 67b4afadc95a..4132968110e3 100644
--- a/drivers/media/radio/si4713/radio-platform-si4713.c
+++ b/drivers/media/radio/si4713/radio-platform-si4713.c
@@ -75,35 +75,35 @@ static inline struct v4l2_device *get_v4l2_dev(struct file *file)
return &((struct radio_si4713_device *)video_drvdata(file))->v4l2_dev;
}
-static int radio_si4713_g_modulator(struct file *file, void *p,
+static int radio_si4713_g_modulator(struct file *file, void *priv,
struct v4l2_modulator *vm)
{
return v4l2_device_call_until_err(get_v4l2_dev(file), 0, tuner,
g_modulator, vm);
}
-static int radio_si4713_s_modulator(struct file *file, void *p,
+static int radio_si4713_s_modulator(struct file *file, void *priv,
const struct v4l2_modulator *vm)
{
return v4l2_device_call_until_err(get_v4l2_dev(file), 0, tuner,
s_modulator, vm);
}
-static int radio_si4713_g_frequency(struct file *file, void *p,
+static int radio_si4713_g_frequency(struct file *file, void *priv,
struct v4l2_frequency *vf)
{
return v4l2_device_call_until_err(get_v4l2_dev(file), 0, tuner,
g_frequency, vf);
}
-static int radio_si4713_s_frequency(struct file *file, void *p,
+static int radio_si4713_s_frequency(struct file *file, void *priv,
const struct v4l2_frequency *vf)
{
return v4l2_device_call_until_err(get_v4l2_dev(file), 0, tuner,
s_frequency, vf);
}
-static long radio_si4713_default(struct file *file, void *p,
+static long radio_si4713_default(struct file *file, void *priv,
bool valid_prio, unsigned int cmd, void *arg)
{
return v4l2_device_call_until_err(get_v4l2_dev(file), 0, core,
diff --git a/drivers/media/rc/gpio-ir-recv.c b/drivers/media/rc/gpio-ir-recv.c
index bf6d8fa983bf..a6418ef782bc 100644
--- a/drivers/media/rc/gpio-ir-recv.c
+++ b/drivers/media/rc/gpio-ir-recv.c
@@ -48,10 +48,8 @@ static irqreturn_t gpio_ir_recv_irq(int irq, void *dev_id)
if (val >= 0)
ir_raw_event_store_edge(gpio_dev->rcdev, val == 1);
- if (pmdev) {
- pm_runtime_mark_last_busy(pmdev);
+ if (pmdev)
pm_runtime_put_autosuspend(pmdev);
- }
return IRQ_HANDLED;
}
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index f5221b018808..35b9e07003d8 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -39,11 +39,6 @@
#define DISPLAY_MINOR_BASE 144
#define DEVICE_NAME "lcd%d"
-#define BUF_CHUNK_SIZE 8
-#define BUF_SIZE 128
-
-#define BIT_DURATION 250 /* each bit received is 250us */
-
#define IMON_CLOCK_ENABLE_PACKETS 2
/*** P R O T O T Y P E S ***/
@@ -536,7 +531,9 @@ static int display_open(struct inode *inode, struct file *file)
mutex_lock(&ictx->lock);
- if (!ictx->display_supported) {
+ if (ictx->disconnected) {
+ retval = -ENODEV;
+ } else if (!ictx->display_supported) {
pr_err("display not supported by device\n");
retval = -ENODEV;
} else if (ictx->display_isopen) {
@@ -598,6 +595,11 @@ static int send_packet(struct imon_context *ictx)
int retval = 0;
struct usb_ctrlrequest *control_req = NULL;
+ lockdep_assert_held(&ictx->lock);
+
+ if (ictx->disconnected)
+ return -ENODEV;
+
/* Check if we need to use control or interrupt urb */
if (!ictx->tx_control) {
pipe = usb_sndintpipe(ictx->usbdev_intf0,
@@ -645,12 +647,15 @@ static int send_packet(struct imon_context *ictx)
smp_rmb(); /* ensure later readers know we're not busy */
pr_err_ratelimited("error submitting urb(%d)\n", retval);
} else {
- /* Wait for transmission to complete (or abort) */
- retval = wait_for_completion_interruptible(
- &ictx->tx.finished);
- if (retval) {
+ /* Wait for transmission to complete (or abort or timeout) */
+ retval = wait_for_completion_interruptible_timeout(&ictx->tx.finished, 10 * HZ);
+ if (retval <= 0) {
usb_kill_urb(ictx->tx_urb);
pr_err_ratelimited("task interrupted\n");
+ if (retval < 0)
+ ictx->tx.status = retval;
+ else
+ ictx->tx.status = -ETIMEDOUT;
}
ictx->tx.busy = false;
@@ -949,12 +954,14 @@ static ssize_t vfd_write(struct file *file, const char __user *buf,
static const unsigned char vfd_packet6[] = {
0x01, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF };
- if (ictx->disconnected)
- return -ENODEV;
-
if (mutex_lock_interruptible(&ictx->lock))
return -ERESTARTSYS;
+ if (ictx->disconnected) {
+ retval = -ENODEV;
+ goto exit;
+ }
+
if (!ictx->dev_present_intf0) {
pr_err_ratelimited("no iMON device present\n");
retval = -ENODEV;
@@ -1029,11 +1036,13 @@ static ssize_t lcd_write(struct file *file, const char __user *buf,
int retval = 0;
struct imon_context *ictx = file->private_data;
- if (ictx->disconnected)
- return -ENODEV;
-
mutex_lock(&ictx->lock);
+ if (ictx->disconnected) {
+ retval = -ENODEV;
+ goto exit;
+ }
+
if (!ictx->display_supported) {
pr_err_ratelimited("no iMON display present\n");
retval = -ENODEV;
@@ -1121,7 +1130,7 @@ static int imon_ir_change_protocol(struct rc_dev *rc, u64 *rc_proto)
int retval;
struct imon_context *ictx = rc->priv;
struct device *dev = ictx->dev;
- bool unlock = false;
+ const bool unlock = mutex_trylock(&ictx->lock);
unsigned char ir_proto_packet[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86 };
@@ -1148,8 +1157,6 @@ static int imon_ir_change_protocol(struct rc_dev *rc, u64 *rc_proto)
memcpy(ictx->usb_tx_buf, &ir_proto_packet, sizeof(ir_proto_packet));
- unlock = mutex_trylock(&ictx->lock);
-
retval = send_packet(ictx);
if (retval)
goto out;
@@ -1745,14 +1752,6 @@ static void usb_rx_callback_intf0(struct urb *urb)
if (!ictx)
return;
- /*
- * if we get a callback before we're done configuring the hardware, we
- * can't yet process the data, as there's nowhere to send it, but we
- * still need to submit a new rx URB to avoid wedging the hardware
- */
- if (!ictx->dev_present_intf0)
- goto out;
-
switch (urb->status) {
case -ENOENT: /* usbcore unlink successful! */
return;
@@ -1761,16 +1760,29 @@ static void usb_rx_callback_intf0(struct urb *urb)
break;
case 0:
- imon_incoming_packet(ictx, urb, intfnum);
+ /*
+ * if we get a callback before we're done configuring the hardware, we
+ * can't yet process the data, as there's nowhere to send it, but we
+ * still need to submit a new rx URB to avoid wedging the hardware
+ */
+ if (ictx->dev_present_intf0)
+ imon_incoming_packet(ictx, urb, intfnum);
break;
+ case -ECONNRESET:
+ case -EILSEQ:
+ case -EPROTO:
+ case -EPIPE:
+ dev_warn(ictx->dev, "imon %s: status(%d)\n",
+ __func__, urb->status);
+ return;
+
default:
dev_warn(ictx->dev, "imon %s: status(%d): ignored\n",
__func__, urb->status);
break;
}
-out:
usb_submit_urb(ictx->rx_urb_intf0, GFP_ATOMIC);
}
@@ -1786,14 +1798,6 @@ static void usb_rx_callback_intf1(struct urb *urb)
if (!ictx)
return;
- /*
- * if we get a callback before we're done configuring the hardware, we
- * can't yet process the data, as there's nowhere to send it, but we
- * still need to submit a new rx URB to avoid wedging the hardware
- */
- if (!ictx->dev_present_intf1)
- goto out;
-
switch (urb->status) {
case -ENOENT: /* usbcore unlink successful! */
return;
@@ -1802,16 +1806,29 @@ static void usb_rx_callback_intf1(struct urb *urb)
break;
case 0:
- imon_incoming_packet(ictx, urb, intfnum);
+ /*
+ * if we get a callback before we're done configuring the hardware, we
+ * can't yet process the data, as there's nowhere to send it, but we
+ * still need to submit a new rx URB to avoid wedging the hardware
+ */
+ if (ictx->dev_present_intf1)
+ imon_incoming_packet(ictx, urb, intfnum);
break;
+ case -ECONNRESET:
+ case -EILSEQ:
+ case -EPROTO:
+ case -EPIPE:
+ dev_warn(ictx->dev, "imon %s: status(%d)\n",
+ __func__, urb->status);
+ return;
+
default:
dev_warn(ictx->dev, "imon %s: status(%d): ignored\n",
__func__, urb->status);
break;
}
-out:
usb_submit_urb(ictx->rx_urb_intf1, GFP_ATOMIC);
}
@@ -2499,7 +2516,11 @@ static void imon_disconnect(struct usb_interface *interface)
int ifnum;
ictx = usb_get_intfdata(interface);
+
+ mutex_lock(&ictx->lock);
ictx->disconnected = true;
+ mutex_unlock(&ictx->lock);
+
dev = ictx->dev;
ifnum = interface->cur_altsetting->desc.bInterfaceNumber;
diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
index a2257dc2f25d..7d4942925993 100644
--- a/drivers/media/rc/lirc_dev.c
+++ b/drivers/media/rc/lirc_dev.c
@@ -736,11 +736,11 @@ int lirc_register(struct rc_dev *dev)
cdev_init(&dev->lirc_cdev, &lirc_fops);
+ get_device(&dev->dev);
+
err = cdev_device_add(&dev->lirc_cdev, &dev->lirc_dev);
if (err)
- goto out_ida;
-
- get_device(&dev->dev);
+ goto out_put_device;
switch (dev->driver_type) {
case RC_DRIVER_SCANCODE:
@@ -764,7 +764,8 @@ int lirc_register(struct rc_dev *dev)
return 0;
-out_ida:
+out_put_device:
+ put_device(&dev->lirc_dev);
ida_free(&lirc_ida, minor);
return err;
}
diff --git a/drivers/media/rc/pwm-ir-tx.c b/drivers/media/rc/pwm-ir-tx.c
index 84533fdd61aa..047472dc9244 100644
--- a/drivers/media/rc/pwm-ir-tx.c
+++ b/drivers/media/rc/pwm-ir-tx.c
@@ -117,7 +117,6 @@ static int pwm_ir_tx_atomic(struct rc_dev *dev, unsigned int *txbuf,
static enum hrtimer_restart pwm_ir_timer(struct hrtimer *timer)
{
struct pwm_ir *pwm_ir = container_of(timer, struct pwm_ir, timer);
- ktime_t now;
/*
* If we happen to hit an odd latency spike, loop through the
@@ -139,9 +138,7 @@ static enum hrtimer_restart pwm_ir_timer(struct hrtimer *timer)
hrtimer_add_expires_ns(timer, ns);
pwm_ir->txbuf_index++;
-
- now = timer->base->get_time();
- } while (hrtimer_get_expires_tv64(timer) < now);
+ } while (hrtimer_expires_remaining(timer) > 0);
return HRTIMER_RESTART;
}
diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c
index d89a4cfe3c89..a49173f54a4d 100644
--- a/drivers/media/rc/redrat3.c
+++ b/drivers/media/rc/redrat3.c
@@ -422,7 +422,7 @@ static int redrat3_send_cmd(int cmd, struct redrat3_dev *rr3)
static int redrat3_enable_detector(struct redrat3_dev *rr3)
{
struct device *dev = rr3->dev;
- u8 ret;
+ int ret;
ret = redrat3_send_cmd(RR3_RC_DET_ENABLE, rr3);
if (ret != 0)
diff --git a/drivers/media/test-drivers/vicodec/vicodec-core.c b/drivers/media/test-drivers/vicodec/vicodec-core.c
index c45f5cf12ded..a3df3a33237e 100644
--- a/drivers/media/test-drivers/vicodec/vicodec-core.c
+++ b/drivers/media/test-drivers/vicodec/vicodec-core.c
@@ -26,7 +26,7 @@
#include "codec-v4l2-fwht.h"
MODULE_DESCRIPTION("Virtual codec device");
-MODULE_AUTHOR("Hans Verkuil <hansverk@cisco.com>");
+MODULE_AUTHOR("Hans Verkuil <hverkuil@kernel.org>");
MODULE_LICENSE("GPL v2");
static bool multiplanar;
@@ -144,7 +144,7 @@ static const struct v4l2_event vicodec_eos_event = {
static inline struct vicodec_ctx *file2ctx(struct file *file)
{
- return container_of(file->private_data, struct vicodec_ctx, fh);
+ return container_of(file_to_v4l2_fh(file), struct vicodec_ctx, fh);
}
static struct vicodec_q_data *get_q_data(struct vicodec_ctx *ctx,
@@ -1207,20 +1207,20 @@ static int vidioc_s_selection(struct file *file, void *priv,
return 0;
}
-static int vicodec_encoder_cmd(struct file *file, void *fh,
+static int vicodec_encoder_cmd(struct file *file, void *priv,
struct v4l2_encoder_cmd *ec)
{
struct vicodec_ctx *ctx = file2ctx(file);
int ret;
- ret = v4l2_m2m_ioctl_try_encoder_cmd(file, fh, ec);
+ ret = v4l2_m2m_ioctl_try_encoder_cmd(file, priv, ec);
if (ret < 0)
return ret;
if (!vb2_is_streaming(&ctx->fh.m2m_ctx->out_q_ctx.q))
return 0;
- ret = v4l2_m2m_ioctl_encoder_cmd(file, fh, ec);
+ ret = v4l2_m2m_ioctl_encoder_cmd(file, priv, ec);
if (ret < 0)
return ret;
@@ -1235,7 +1235,7 @@ static int vicodec_encoder_cmd(struct file *file, void *fh,
return 0;
}
-static int vicodec_decoder_cmd(struct file *file, void *fh,
+static int vicodec_decoder_cmd(struct file *file, void *priv,
struct v4l2_decoder_cmd *dc)
{
struct vicodec_ctx *ctx = file2ctx(file);
@@ -1247,14 +1247,14 @@ static int vicodec_decoder_cmd(struct file *file, void *fh,
*/
WARN_ON(ctx->is_stateless);
- ret = v4l2_m2m_ioctl_try_decoder_cmd(file, fh, dc);
+ ret = v4l2_m2m_ioctl_try_decoder_cmd(file, priv, dc);
if (ret < 0)
return ret;
if (!vb2_is_streaming(&ctx->fh.m2m_ctx->out_q_ctx.q))
return 0;
- ret = v4l2_m2m_ioctl_decoder_cmd(file, fh, dc);
+ ret = v4l2_m2m_ioctl_decoder_cmd(file, priv, dc);
if (ret < 0)
return ret;
@@ -1269,7 +1269,7 @@ static int vicodec_decoder_cmd(struct file *file, void *fh,
return 0;
}
-static int vicodec_enum_framesizes(struct file *file, void *fh,
+static int vicodec_enum_framesizes(struct file *file, void *priv,
struct v4l2_frmsizeenum *fsize)
{
switch (fsize->pixel_format) {
@@ -1848,7 +1848,6 @@ static int vicodec_open(struct file *file)
ctx->is_stateless = true;
v4l2_fh_init(&ctx->fh, video_devdata(file));
- file->private_data = &ctx->fh;
ctx->dev = dev;
hdl = &ctx->hdl;
v4l2_ctrl_handler_init(hdl, 5);
@@ -1932,7 +1931,7 @@ static int vicodec_open(struct file *file)
goto open_unlock;
}
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
open_unlock:
mutex_unlock(vfd->lock);
@@ -1947,7 +1946,7 @@ static int vicodec_release(struct file *file)
mutex_lock(vfd->lock);
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
mutex_unlock(vfd->lock);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
v4l2_ctrl_handler_free(&ctx->hdl);
kvfree(ctx->state.compressed_frame);
diff --git a/drivers/media/test-drivers/vim2m.c b/drivers/media/test-drivers/vim2m.c
index 1d1a9e768505..86c32699111a 100644
--- a/drivers/media/test-drivers/vim2m.c
+++ b/drivers/media/test-drivers/vim2m.c
@@ -191,9 +191,7 @@ static void get_alignment(u32 fourcc,
struct vim2m_dev {
struct v4l2_device v4l2_dev;
struct video_device vfd;
-#ifdef CONFIG_MEDIA_CONTROLLER
struct media_device mdev;
-#endif
atomic_t num_inst;
struct mutex dev_mutex;
@@ -236,7 +234,7 @@ struct vim2m_ctx {
static inline struct vim2m_ctx *file2ctx(struct file *file)
{
- return container_of(file->private_data, struct vim2m_ctx, fh);
+ return container_of(file_to_v4l2_fh(file), struct vim2m_ctx, fh);
}
static struct vim2m_q_data *get_q_data(struct vim2m_ctx *ctx,
@@ -268,9 +266,6 @@ static const char *type_name(enum v4l2_buf_type type)
}
}
-#define CLIP(__color) \
- (u8)(((__color) > 0xff) ? 0xff : (((__color) < 0) ? 0 : (__color)))
-
static void copy_line(struct vim2m_q_data *q_data_out,
u8 *src, u8 *dst, bool reverse)
{
@@ -1389,7 +1384,6 @@ static int vim2m_open(struct file *file)
}
v4l2_fh_init(&ctx->fh, video_devdata(file));
- file->private_data = &ctx->fh;
ctx->dev = dev;
hdl = &ctx->hdl;
v4l2_ctrl_handler_init(hdl, 4);
@@ -1433,7 +1427,7 @@ static int vim2m_open(struct file *file)
goto open_unlock;
}
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
atomic_inc(&dev->num_inst);
dprintk(dev, 1, "Created instance: %p, m2m_ctx: %p\n",
@@ -1451,7 +1445,7 @@ static int vim2m_release(struct file *file)
dprintk(dev, 1, "Releasing instance %p\n", ctx);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
v4l2_ctrl_handler_free(&ctx->hdl);
mutex_lock(&dev->dev_mutex);
@@ -1470,9 +1464,7 @@ static void vim2m_device_release(struct video_device *vdev)
v4l2_device_unregister(&dev->v4l2_dev);
v4l2_m2m_release(dev->m2m_dev);
-#ifdef CONFIG_MEDIA_CONTROLLER
media_device_cleanup(&dev->mdev);
-#endif
kfree(dev);
}
@@ -1543,7 +1535,6 @@ static int vim2m_probe(struct platform_device *pdev)
goto error_dev;
}
-#ifdef CONFIG_MEDIA_CONTROLLER
dev->mdev.dev = &pdev->dev;
strscpy(dev->mdev.model, "vim2m", sizeof(dev->mdev.model));
strscpy(dev->mdev.bus_info, "platform:vim2m",
@@ -1551,7 +1542,6 @@ static int vim2m_probe(struct platform_device *pdev)
media_device_init(&dev->mdev);
dev->mdev.ops = &m2m_media_ops;
dev->v4l2_dev.mdev = &dev->mdev;
-#endif
ret = video_register_device(vfd, VFL_TYPE_VIDEO, 0);
if (ret) {
@@ -1562,7 +1552,6 @@ static int vim2m_probe(struct platform_device *pdev)
v4l2_info(&dev->v4l2_dev,
"Device registered as /dev/video%d\n", vfd->num);
-#ifdef CONFIG_MEDIA_CONTROLLER
ret = v4l2_m2m_register_media_controller(dev->m2m_dev, vfd,
MEDIA_ENT_F_PROC_VIDEO_SCALER);
if (ret) {
@@ -1575,13 +1564,11 @@ static int vim2m_probe(struct platform_device *pdev)
v4l2_err(&dev->v4l2_dev, "Failed to register mem2mem media device\n");
goto error_m2m_mc;
}
-#endif
+
return 0;
-#ifdef CONFIG_MEDIA_CONTROLLER
error_m2m_mc:
v4l2_m2m_unregister_media_controller(dev->m2m_dev);
-#endif
error_v4l2:
video_unregister_device(&dev->vfd);
/* vim2m_device_release called by video_unregister_device to release various objects */
@@ -1602,10 +1589,8 @@ static void vim2m_remove(struct platform_device *pdev)
v4l2_info(&dev->v4l2_dev, "Removing " MEM2MEM_NAME);
-#ifdef CONFIG_MEDIA_CONTROLLER
media_device_unregister(&dev->mdev);
v4l2_m2m_unregister_media_controller(dev->m2m_dev);
-#endif
video_unregister_device(&dev->vfd);
}
diff --git a/drivers/media/test-drivers/vimc/vimc-capture.c b/drivers/media/test-drivers/vimc/vimc-capture.c
index 10df039278e7..7f6124025fc9 100644
--- a/drivers/media/test-drivers/vimc/vimc-capture.c
+++ b/drivers/media/test-drivers/vimc/vimc-capture.c
@@ -57,8 +57,6 @@ static int vimc_capture_querycap(struct file *file, void *priv,
{
strscpy(cap->driver, VIMC_PDEV_NAME, sizeof(cap->driver));
strscpy(cap->card, KBUILD_MODNAME, sizeof(cap->card));
- snprintf(cap->bus_info, sizeof(cap->bus_info),
- "platform:%s", VIMC_PDEV_NAME);
return 0;
}
@@ -169,7 +167,7 @@ static int vimc_capture_enum_fmt_vid_cap(struct file *file, void *priv,
return 0;
}
-static int vimc_capture_enum_framesizes(struct file *file, void *fh,
+static int vimc_capture_enum_framesizes(struct file *file, void *priv,
struct v4l2_frmsizeenum *fsize)
{
const struct vimc_pix_map *vpix;
diff --git a/drivers/media/test-drivers/vimc/vimc-core.c b/drivers/media/test-drivers/vimc/vimc-core.c
index c812fa9f0650..f632c77e52f5 100644
--- a/drivers/media/test-drivers/vimc/vimc-core.c
+++ b/drivers/media/test-drivers/vimc/vimc-core.c
@@ -366,8 +366,6 @@ static int vimc_probe(struct platform_device *pdev)
/* Initialize media device */
strscpy(vimc->mdev.model, VIMC_MDEV_MODEL_NAME,
sizeof(vimc->mdev.model));
- snprintf(vimc->mdev.bus_info, sizeof(vimc->mdev.bus_info),
- "platform:%s", VIMC_PDEV_NAME);
vimc->mdev.dev = &pdev->dev;
media_device_init(&vimc->mdev);
diff --git a/drivers/media/test-drivers/visl/visl-core.c b/drivers/media/test-drivers/visl/visl-core.c
index 5bf3136b36eb..26c6c6835f79 100644
--- a/drivers/media/test-drivers/visl/visl-core.c
+++ b/drivers/media/test-drivers/visl/visl-core.c
@@ -341,7 +341,6 @@ static int visl_open(struct file *file)
ctx->tpg_str_buf = kzalloc(TPG_STR_BUF_SZ, GFP_KERNEL);
v4l2_fh_init(&ctx->fh, video_devdata(file));
- file->private_data = &ctx->fh;
ctx->dev = dev;
rc = visl_init_ctrls(ctx);
@@ -361,7 +360,7 @@ static int visl_open(struct file *file)
if (rc)
goto free_m2m_ctx;
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
dprintk(dev, "Created instance: %p, m2m_ctx: %p\n",
ctx, ctx->fh.m2m_ctx);
@@ -390,7 +389,7 @@ static int visl_release(struct file *file)
dprintk(dev, "Releasing instance %p\n", ctx);
tpg_free(&ctx->tpg);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
v4l2_ctrl_handler_free(&ctx->hdl);
mutex_lock(&dev->dev_mutex);
diff --git a/drivers/media/test-drivers/visl/visl.h b/drivers/media/test-drivers/visl/visl.h
index 434e9efbf9b2..2971e8b37ff6 100644
--- a/drivers/media/test-drivers/visl/visl.h
+++ b/drivers/media/test-drivers/visl/visl.h
@@ -163,12 +163,7 @@ struct visl_ctrl_desc {
static inline struct visl_ctx *visl_file_to_ctx(struct file *file)
{
- return container_of(file->private_data, struct visl_ctx, fh);
-}
-
-static inline struct visl_ctx *visl_v4l2fh_to_ctx(struct v4l2_fh *v4l2_fh)
-{
- return container_of(v4l2_fh, struct visl_ctx, fh);
+ return container_of(file_to_v4l2_fh(file), struct visl_ctx, fh);
}
void *visl_find_control_data(struct visl_ctx *ctx, u32 id);
diff --git a/drivers/media/test-drivers/vivid/vivid-cec.c b/drivers/media/test-drivers/vivid/vivid-cec.c
index 356a988dd6a1..2d15fdd5d999 100644
--- a/drivers/media/test-drivers/vivid/vivid-cec.c
+++ b/drivers/media/test-drivers/vivid/vivid-cec.c
@@ -327,7 +327,7 @@ static int vivid_received(struct cec_adapter *adap, struct cec_msg *msg)
char osd[14];
if (!cec_is_sink(adap))
- return -ENOMSG;
+ break;
cec_ops_set_osd_string(msg, &disp_ctl, osd);
switch (disp_ctl) {
case CEC_OP_DISP_CTL_DEFAULT:
@@ -348,7 +348,7 @@ static int vivid_received(struct cec_adapter *adap, struct cec_msg *msg)
cec_transmit_msg(adap, &reply, false);
break;
}
- break;
+ return 0;
}
case CEC_MSG_VENDOR_COMMAND_WITH_ID: {
u32 vendor_id;
@@ -379,7 +379,7 @@ static int vivid_received(struct cec_adapter *adap, struct cec_msg *msg)
if (size == 1) {
// Ignore even op values
if (!(vendor_cmd[0] & 1))
- break;
+ return 0;
reply.len = msg->len;
memcpy(reply.msg + 1, msg->msg + 1, msg->len - 1);
reply.msg[msg->len - 1]++;
@@ -388,12 +388,10 @@ static int vivid_received(struct cec_adapter *adap, struct cec_msg *msg)
CEC_OP_ABORT_INVALID_OP);
}
cec_transmit_msg(adap, &reply, false);
- break;
+ return 0;
}
- default:
- return -ENOMSG;
}
- return 0;
+ return -ENOMSG;
}
static const struct cec_adap_ops vivid_cec_adap_ops = {
diff --git a/drivers/media/test-drivers/vivid/vivid-core.c b/drivers/media/test-drivers/vivid/vivid-core.c
index 8d56168c72aa..86506be36acb 100644
--- a/drivers/media/test-drivers/vivid/vivid-core.c
+++ b/drivers/media/test-drivers/vivid/vivid-core.c
@@ -277,49 +277,49 @@ static int vidioc_querycap(struct file *file, void *priv,
return 0;
}
-static int vidioc_s_hw_freq_seek(struct file *file, void *fh, const struct v4l2_hw_freq_seek *a)
+static int vidioc_s_hw_freq_seek(struct file *file, void *priv, const struct v4l2_hw_freq_seek *a)
{
struct video_device *vdev = video_devdata(file);
if (vdev->vfl_type == VFL_TYPE_RADIO)
- return vivid_radio_rx_s_hw_freq_seek(file, fh, a);
+ return vivid_radio_rx_s_hw_freq_seek(file, priv, a);
return -ENOTTY;
}
-static int vidioc_enum_freq_bands(struct file *file, void *fh, struct v4l2_frequency_band *band)
+static int vidioc_enum_freq_bands(struct file *file, void *priv, struct v4l2_frequency_band *band)
{
struct video_device *vdev = video_devdata(file);
if (vdev->vfl_type == VFL_TYPE_RADIO)
- return vivid_radio_rx_enum_freq_bands(file, fh, band);
+ return vivid_radio_rx_enum_freq_bands(file, priv, band);
if (vdev->vfl_type == VFL_TYPE_SDR)
- return vivid_sdr_enum_freq_bands(file, fh, band);
+ return vivid_sdr_enum_freq_bands(file, priv, band);
return -ENOTTY;
}
-static int vidioc_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt)
+static int vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *vt)
{
struct video_device *vdev = video_devdata(file);
if (vdev->vfl_type == VFL_TYPE_RADIO)
- return vivid_radio_rx_g_tuner(file, fh, vt);
+ return vivid_radio_rx_g_tuner(file, priv, vt);
if (vdev->vfl_type == VFL_TYPE_SDR)
- return vivid_sdr_g_tuner(file, fh, vt);
- return vivid_video_g_tuner(file, fh, vt);
+ return vivid_sdr_g_tuner(file, priv, vt);
+ return vivid_video_g_tuner(file, priv, vt);
}
-static int vidioc_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt)
+static int vidioc_s_tuner(struct file *file, void *priv, const struct v4l2_tuner *vt)
{
struct video_device *vdev = video_devdata(file);
if (vdev->vfl_type == VFL_TYPE_RADIO)
- return vivid_radio_rx_s_tuner(file, fh, vt);
+ return vivid_radio_rx_s_tuner(file, priv, vt);
if (vdev->vfl_type == VFL_TYPE_SDR)
- return vivid_sdr_s_tuner(file, fh, vt);
- return vivid_video_s_tuner(file, fh, vt);
+ return vivid_sdr_s_tuner(file, priv, vt);
+ return vivid_video_s_tuner(file, priv, vt);
}
-static int vidioc_g_frequency(struct file *file, void *fh, struct v4l2_frequency *vf)
+static int vidioc_g_frequency(struct file *file, void *priv, struct v4l2_frequency *vf)
{
struct vivid_dev *dev = video_drvdata(file);
struct video_device *vdev = video_devdata(file);
@@ -329,11 +329,11 @@ static int vidioc_g_frequency(struct file *file, void *fh, struct v4l2_frequency
vdev->vfl_dir == VFL_DIR_RX ?
&dev->radio_rx_freq : &dev->radio_tx_freq, vf);
if (vdev->vfl_type == VFL_TYPE_SDR)
- return vivid_sdr_g_frequency(file, fh, vf);
- return vivid_video_g_frequency(file, fh, vf);
+ return vivid_sdr_g_frequency(file, priv, vf);
+ return vivid_video_g_frequency(file, priv, vf);
}
-static int vidioc_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf)
+static int vidioc_s_frequency(struct file *file, void *priv, const struct v4l2_frequency *vf)
{
struct vivid_dev *dev = video_drvdata(file);
struct video_device *vdev = video_devdata(file);
@@ -343,113 +343,113 @@ static int vidioc_s_frequency(struct file *file, void *fh, const struct v4l2_fre
vdev->vfl_dir == VFL_DIR_RX ?
&dev->radio_rx_freq : &dev->radio_tx_freq, vf);
if (vdev->vfl_type == VFL_TYPE_SDR)
- return vivid_sdr_s_frequency(file, fh, vf);
- return vivid_video_s_frequency(file, fh, vf);
+ return vivid_sdr_s_frequency(file, priv, vf);
+ return vivid_video_s_frequency(file, priv, vf);
}
-static int vidioc_overlay(struct file *file, void *fh, unsigned i)
+static int vidioc_overlay(struct file *file, void *priv, unsigned i)
{
struct video_device *vdev = video_devdata(file);
if (vdev->vfl_dir == VFL_DIR_RX)
return -ENOTTY;
- return vivid_vid_out_overlay(file, fh, i);
+ return vivid_vid_out_overlay(file, priv, i);
}
-static int vidioc_g_fbuf(struct file *file, void *fh, struct v4l2_framebuffer *a)
+static int vidioc_g_fbuf(struct file *file, void *priv, struct v4l2_framebuffer *a)
{
struct video_device *vdev = video_devdata(file);
if (vdev->vfl_dir == VFL_DIR_RX)
return -ENOTTY;
- return vivid_vid_out_g_fbuf(file, fh, a);
+ return vivid_vid_out_g_fbuf(file, priv, a);
}
-static int vidioc_s_fbuf(struct file *file, void *fh, const struct v4l2_framebuffer *a)
+static int vidioc_s_fbuf(struct file *file, void *priv, const struct v4l2_framebuffer *a)
{
struct video_device *vdev = video_devdata(file);
if (vdev->vfl_dir == VFL_DIR_RX)
return -ENOTTY;
- return vivid_vid_out_s_fbuf(file, fh, a);
+ return vivid_vid_out_s_fbuf(file, priv, a);
}
-static int vidioc_s_std(struct file *file, void *fh, v4l2_std_id id)
+static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id id)
{
struct video_device *vdev = video_devdata(file);
if (vdev->vfl_dir == VFL_DIR_RX)
- return vivid_vid_cap_s_std(file, fh, id);
- return vivid_vid_out_s_std(file, fh, id);
+ return vivid_vid_cap_s_std(file, priv, id);
+ return vivid_vid_out_s_std(file, priv, id);
}
-static int vidioc_s_dv_timings(struct file *file, void *fh, struct v4l2_dv_timings *timings)
+static int vidioc_s_dv_timings(struct file *file, void *priv, struct v4l2_dv_timings *timings)
{
struct video_device *vdev = video_devdata(file);
if (vdev->vfl_dir == VFL_DIR_RX)
- return vivid_vid_cap_s_dv_timings(file, fh, timings);
- return vivid_vid_out_s_dv_timings(file, fh, timings);
+ return vivid_vid_cap_s_dv_timings(file, priv, timings);
+ return vivid_vid_out_s_dv_timings(file, priv, timings);
}
-static int vidioc_g_pixelaspect(struct file *file, void *fh,
+static int vidioc_g_pixelaspect(struct file *file, void *priv,
int type, struct v4l2_fract *f)
{
struct video_device *vdev = video_devdata(file);
if (vdev->vfl_dir == VFL_DIR_RX)
- return vivid_vid_cap_g_pixelaspect(file, fh, type, f);
- return vivid_vid_out_g_pixelaspect(file, fh, type, f);
+ return vivid_vid_cap_g_pixelaspect(file, priv, type, f);
+ return vivid_vid_out_g_pixelaspect(file, priv, type, f);
}
-static int vidioc_g_selection(struct file *file, void *fh,
+static int vidioc_g_selection(struct file *file, void *priv,
struct v4l2_selection *sel)
{
struct video_device *vdev = video_devdata(file);
if (vdev->vfl_dir == VFL_DIR_RX)
- return vivid_vid_cap_g_selection(file, fh, sel);
- return vivid_vid_out_g_selection(file, fh, sel);
+ return vivid_vid_cap_g_selection(file, priv, sel);
+ return vivid_vid_out_g_selection(file, priv, sel);
}
-static int vidioc_s_selection(struct file *file, void *fh,
+static int vidioc_s_selection(struct file *file, void *priv,
struct v4l2_selection *sel)
{
struct video_device *vdev = video_devdata(file);
if (vdev->vfl_dir == VFL_DIR_RX)
- return vivid_vid_cap_s_selection(file, fh, sel);
- return vivid_vid_out_s_selection(file, fh, sel);
+ return vivid_vid_cap_s_selection(file, priv, sel);
+ return vivid_vid_out_s_selection(file, priv, sel);
}
-static int vidioc_g_parm(struct file *file, void *fh,
+static int vidioc_g_parm(struct file *file, void *priv,
struct v4l2_streamparm *parm)
{
struct video_device *vdev = video_devdata(file);
if (vdev->vfl_type == VFL_TYPE_TOUCH)
- return vivid_g_parm_tch(file, fh, parm);
+ return vivid_g_parm_tch(file, priv, parm);
if (vdev->vfl_dir == VFL_DIR_RX)
- return vivid_vid_cap_g_parm(file, fh, parm);
- return vivid_vid_out_g_parm(file, fh, parm);
+ return vivid_vid_cap_g_parm(file, priv, parm);
+ return vivid_vid_out_g_parm(file, priv, parm);
}
-static int vidioc_s_parm(struct file *file, void *fh,
+static int vidioc_s_parm(struct file *file, void *priv,
struct v4l2_streamparm *parm)
{
struct video_device *vdev = video_devdata(file);
if (vdev->vfl_dir == VFL_DIR_RX)
- return vivid_vid_cap_s_parm(file, fh, parm);
+ return vivid_vid_cap_s_parm(file, priv, parm);
return -ENOTTY;
}
-static int vidioc_log_status(struct file *file, void *fh)
+static int vidioc_log_status(struct file *file, void *priv)
{
struct vivid_dev *dev = video_drvdata(file);
struct video_device *vdev = video_devdata(file);
- v4l2_ctrl_log_status(file, fh);
+ v4l2_ctrl_log_status(file, priv);
if (vdev->vfl_dir == VFL_DIR_RX && vdev->vfl_type == VFL_TYPE_VIDEO)
tpg_log_status(&dev->tpg);
return 0;
@@ -654,11 +654,11 @@ static int vivid_fop_release(struct file *file)
v4l2_info(&dev->v4l2_dev, "reconnect\n");
vivid_reconnect(dev);
}
- if (file->private_data == dev->radio_rx_rds_owner) {
+ if (file_to_v4l2_fh(file) == dev->radio_rx_rds_owner) {
dev->radio_rx_rds_last_block = 0;
dev->radio_rx_rds_owner = NULL;
}
- if (file->private_data == dev->radio_tx_rds_owner) {
+ if (file_to_v4l2_fh(file) == dev->radio_tx_rds_owner) {
dev->radio_tx_rds_last_block = 0;
dev->radio_tx_rds_owner = NULL;
}
diff --git a/drivers/media/test-drivers/vivid/vivid-radio-rx.c b/drivers/media/test-drivers/vivid/vivid-radio-rx.c
index 79c1723bd84c..b5e3026f883e 100644
--- a/drivers/media/test-drivers/vivid/vivid-radio-rx.c
+++ b/drivers/media/test-drivers/vivid/vivid-radio-rx.c
@@ -42,13 +42,13 @@ ssize_t vivid_radio_rx_read(struct file *file, char __user *buf,
if (mutex_lock_interruptible(&dev->mutex))
return -ERESTARTSYS;
if (dev->radio_rx_rds_owner &&
- file->private_data != dev->radio_rx_rds_owner) {
+ file_to_v4l2_fh(file) != dev->radio_rx_rds_owner) {
mutex_unlock(&dev->mutex);
return -EBUSY;
}
if (dev->radio_rx_rds_owner == NULL) {
vivid_radio_rds_init(dev);
- dev->radio_rx_rds_owner = file->private_data;
+ dev->radio_rx_rds_owner = file_to_v4l2_fh(file);
}
retry:
@@ -133,7 +133,7 @@ __poll_t vivid_radio_rx_poll(struct file *file, struct poll_table_struct *wait)
return EPOLLIN | EPOLLRDNORM | v4l2_ctrl_poll(file, wait);
}
-int vivid_radio_rx_enum_freq_bands(struct file *file, void *fh, struct v4l2_frequency_band *band)
+int vivid_radio_rx_enum_freq_bands(struct file *file, void *priv, struct v4l2_frequency_band *band)
{
if (band->tuner != 0)
return -EINVAL;
@@ -145,7 +145,7 @@ int vivid_radio_rx_enum_freq_bands(struct file *file, void *fh, struct v4l2_freq
return 0;
}
-int vivid_radio_rx_s_hw_freq_seek(struct file *file, void *fh, const struct v4l2_hw_freq_seek *a)
+int vivid_radio_rx_s_hw_freq_seek(struct file *file, void *priv, const struct v4l2_hw_freq_seek *a)
{
struct vivid_dev *dev = video_drvdata(file);
unsigned low, high;
@@ -214,7 +214,7 @@ int vivid_radio_rx_s_hw_freq_seek(struct file *file, void *fh, const struct v4l2
return 0;
}
-int vivid_radio_rx_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt)
+int vivid_radio_rx_g_tuner(struct file *file, void *priv, struct v4l2_tuner *vt)
{
struct vivid_dev *dev = video_drvdata(file);
int delta = 800;
@@ -267,7 +267,7 @@ int vivid_radio_rx_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt)
return 0;
}
-int vivid_radio_rx_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt)
+int vivid_radio_rx_s_tuner(struct file *file, void *priv, const struct v4l2_tuner *vt)
{
struct vivid_dev *dev = video_drvdata(file);
diff --git a/drivers/media/test-drivers/vivid/vivid-radio-rx.h b/drivers/media/test-drivers/vivid/vivid-radio-rx.h
index c9c7849f6f99..a2ae17c78ece 100644
--- a/drivers/media/test-drivers/vivid/vivid-radio-rx.h
+++ b/drivers/media/test-drivers/vivid/vivid-radio-rx.h
@@ -11,9 +11,9 @@
ssize_t vivid_radio_rx_read(struct file *, char __user *, size_t, loff_t *);
__poll_t vivid_radio_rx_poll(struct file *file, struct poll_table_struct *wait);
-int vivid_radio_rx_enum_freq_bands(struct file *file, void *fh, struct v4l2_frequency_band *band);
-int vivid_radio_rx_s_hw_freq_seek(struct file *file, void *fh, const struct v4l2_hw_freq_seek *a);
-int vivid_radio_rx_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt);
-int vivid_radio_rx_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt);
+int vivid_radio_rx_enum_freq_bands(struct file *file, void *priv, struct v4l2_frequency_band *band);
+int vivid_radio_rx_s_hw_freq_seek(struct file *file, void *priv, const struct v4l2_hw_freq_seek *a);
+int vivid_radio_rx_g_tuner(struct file *file, void *priv, struct v4l2_tuner *vt);
+int vivid_radio_rx_s_tuner(struct file *file, void *priv, const struct v4l2_tuner *vt);
#endif
diff --git a/drivers/media/test-drivers/vivid/vivid-radio-tx.c b/drivers/media/test-drivers/vivid/vivid-radio-tx.c
index 049d40b948bb..ada60722066e 100644
--- a/drivers/media/test-drivers/vivid/vivid-radio-tx.c
+++ b/drivers/media/test-drivers/vivid/vivid-radio-tx.c
@@ -39,11 +39,11 @@ ssize_t vivid_radio_tx_write(struct file *file, const char __user *buf,
if (mutex_lock_interruptible(&dev->mutex))
return -ERESTARTSYS;
if (dev->radio_tx_rds_owner &&
- file->private_data != dev->radio_tx_rds_owner) {
+ file_to_v4l2_fh(file) != dev->radio_tx_rds_owner) {
mutex_unlock(&dev->mutex);
return -EBUSY;
}
- dev->radio_tx_rds_owner = file->private_data;
+ dev->radio_tx_rds_owner = file_to_v4l2_fh(file);
retry:
timestamp = ktime_sub(ktime_get(), dev->radio_rds_init_time);
@@ -96,7 +96,7 @@ __poll_t vivid_radio_tx_poll(struct file *file, struct poll_table_struct *wait)
return EPOLLOUT | EPOLLWRNORM | v4l2_ctrl_poll(file, wait);
}
-int vidioc_g_modulator(struct file *file, void *fh, struct v4l2_modulator *a)
+int vidioc_g_modulator(struct file *file, void *priv, struct v4l2_modulator *a)
{
struct vivid_dev *dev = video_drvdata(file);
@@ -115,7 +115,7 @@ int vidioc_g_modulator(struct file *file, void *fh, struct v4l2_modulator *a)
return 0;
}
-int vidioc_s_modulator(struct file *file, void *fh, const struct v4l2_modulator *a)
+int vidioc_s_modulator(struct file *file, void *priv, const struct v4l2_modulator *a)
{
struct vivid_dev *dev = video_drvdata(file);
diff --git a/drivers/media/test-drivers/vivid/vivid-radio-tx.h b/drivers/media/test-drivers/vivid/vivid-radio-tx.h
index c2bf1e7e634a..20cb6f1363ff 100644
--- a/drivers/media/test-drivers/vivid/vivid-radio-tx.h
+++ b/drivers/media/test-drivers/vivid/vivid-radio-tx.h
@@ -11,7 +11,7 @@
ssize_t vivid_radio_tx_write(struct file *, const char __user *, size_t, loff_t *);
__poll_t vivid_radio_tx_poll(struct file *file, struct poll_table_struct *wait);
-int vidioc_g_modulator(struct file *file, void *fh, struct v4l2_modulator *a);
-int vidioc_s_modulator(struct file *file, void *fh, const struct v4l2_modulator *a);
+int vidioc_g_modulator(struct file *file, void *priv, struct v4l2_modulator *a);
+int vidioc_s_modulator(struct file *file, void *priv, const struct v4l2_modulator *a);
#endif
diff --git a/drivers/media/test-drivers/vivid/vivid-sdr-cap.c b/drivers/media/test-drivers/vivid/vivid-sdr-cap.c
index c633fc2ed664..2664a593e8e1 100644
--- a/drivers/media/test-drivers/vivid/vivid-sdr-cap.c
+++ b/drivers/media/test-drivers/vivid/vivid-sdr-cap.c
@@ -344,7 +344,7 @@ const struct vb2_ops vivid_sdr_cap_qops = {
.buf_request_complete = sdr_cap_buf_request_complete,
};
-int vivid_sdr_enum_freq_bands(struct file *file, void *fh,
+int vivid_sdr_enum_freq_bands(struct file *file, void *priv,
struct v4l2_frequency_band *band)
{
switch (band->tuner) {
@@ -363,7 +363,7 @@ int vivid_sdr_enum_freq_bands(struct file *file, void *fh,
}
}
-int vivid_sdr_g_frequency(struct file *file, void *fh,
+int vivid_sdr_g_frequency(struct file *file, void *priv,
struct v4l2_frequency *vf)
{
struct vivid_dev *dev = video_drvdata(file);
@@ -382,7 +382,7 @@ int vivid_sdr_g_frequency(struct file *file, void *fh,
}
}
-int vivid_sdr_s_frequency(struct file *file, void *fh,
+int vivid_sdr_s_frequency(struct file *file, void *priv,
const struct v4l2_frequency *vf)
{
struct vivid_dev *dev = video_drvdata(file);
@@ -423,7 +423,7 @@ int vivid_sdr_s_frequency(struct file *file, void *fh,
}
}
-int vivid_sdr_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt)
+int vivid_sdr_g_tuner(struct file *file, void *priv, struct v4l2_tuner *vt)
{
switch (vt->index) {
case 0:
@@ -447,14 +447,14 @@ int vivid_sdr_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt)
}
}
-int vivid_sdr_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt)
+int vivid_sdr_s_tuner(struct file *file, void *priv, const struct v4l2_tuner *vt)
{
if (vt->index > 1)
return -EINVAL;
return 0;
}
-int vidioc_enum_fmt_sdr_cap(struct file *file, void *fh, struct v4l2_fmtdesc *f)
+int vidioc_enum_fmt_sdr_cap(struct file *file, void *priv, struct v4l2_fmtdesc *f)
{
if (f->index >= ARRAY_SIZE(formats))
return -EINVAL;
@@ -462,7 +462,7 @@ int vidioc_enum_fmt_sdr_cap(struct file *file, void *fh, struct v4l2_fmtdesc *f)
return 0;
}
-int vidioc_g_fmt_sdr_cap(struct file *file, void *fh, struct v4l2_format *f)
+int vidioc_g_fmt_sdr_cap(struct file *file, void *priv, struct v4l2_format *f)
{
struct vivid_dev *dev = video_drvdata(file);
@@ -471,7 +471,7 @@ int vidioc_g_fmt_sdr_cap(struct file *file, void *fh, struct v4l2_format *f)
return 0;
}
-int vidioc_s_fmt_sdr_cap(struct file *file, void *fh, struct v4l2_format *f)
+int vidioc_s_fmt_sdr_cap(struct file *file, void *priv, struct v4l2_format *f)
{
struct vivid_dev *dev = video_drvdata(file);
struct vb2_queue *q = &dev->vb_sdr_cap_q;
@@ -495,7 +495,7 @@ int vidioc_s_fmt_sdr_cap(struct file *file, void *fh, struct v4l2_format *f)
return 0;
}
-int vidioc_try_fmt_sdr_cap(struct file *file, void *fh, struct v4l2_format *f)
+int vidioc_try_fmt_sdr_cap(struct file *file, void *priv, struct v4l2_format *f)
{
int i;
diff --git a/drivers/media/test-drivers/vivid/vivid-sdr-cap.h b/drivers/media/test-drivers/vivid/vivid-sdr-cap.h
index 813c9248e5a7..3d8eeabbfc10 100644
--- a/drivers/media/test-drivers/vivid/vivid-sdr-cap.h
+++ b/drivers/media/test-drivers/vivid/vivid-sdr-cap.h
@@ -8,15 +8,15 @@
#ifndef _VIVID_SDR_CAP_H_
#define _VIVID_SDR_CAP_H_
-int vivid_sdr_enum_freq_bands(struct file *file, void *fh, struct v4l2_frequency_band *band);
-int vivid_sdr_g_frequency(struct file *file, void *fh, struct v4l2_frequency *vf);
-int vivid_sdr_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf);
-int vivid_sdr_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt);
-int vivid_sdr_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt);
-int vidioc_enum_fmt_sdr_cap(struct file *file, void *fh, struct v4l2_fmtdesc *f);
-int vidioc_g_fmt_sdr_cap(struct file *file, void *fh, struct v4l2_format *f);
-int vidioc_s_fmt_sdr_cap(struct file *file, void *fh, struct v4l2_format *f);
-int vidioc_try_fmt_sdr_cap(struct file *file, void *fh, struct v4l2_format *f);
+int vivid_sdr_enum_freq_bands(struct file *file, void *priv, struct v4l2_frequency_band *band);
+int vivid_sdr_g_frequency(struct file *file, void *priv, struct v4l2_frequency *vf);
+int vivid_sdr_s_frequency(struct file *file, void *priv, const struct v4l2_frequency *vf);
+int vivid_sdr_g_tuner(struct file *file, void *priv, struct v4l2_tuner *vt);
+int vivid_sdr_s_tuner(struct file *file, void *priv, const struct v4l2_tuner *vt);
+int vidioc_enum_fmt_sdr_cap(struct file *file, void *priv, struct v4l2_fmtdesc *f);
+int vidioc_g_fmt_sdr_cap(struct file *file, void *priv, struct v4l2_format *f);
+int vidioc_s_fmt_sdr_cap(struct file *file, void *priv, struct v4l2_format *f);
+int vidioc_try_fmt_sdr_cap(struct file *file, void *priv, struct v4l2_format *f);
void vivid_sdr_cap_process(struct vivid_dev *dev, struct vivid_buffer *buf);
extern const struct vb2_ops vivid_sdr_cap_qops;
diff --git a/drivers/media/test-drivers/vivid/vivid-vbi-cap.c b/drivers/media/test-drivers/vivid/vivid-vbi-cap.c
index a09f62c66c33..791382a54b4f 100644
--- a/drivers/media/test-drivers/vivid/vivid-vbi-cap.c
+++ b/drivers/media/test-drivers/vivid/vivid-vbi-cap.c
@@ -282,7 +282,7 @@ void vivid_fill_service_lines(struct v4l2_sliced_vbi_format *vbi, u32 service_se
}
}
-int vidioc_g_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt)
+int vidioc_g_fmt_sliced_vbi_cap(struct file *file, void *priv, struct v4l2_format *fmt)
{
struct vivid_dev *dev = video_drvdata(file);
struct v4l2_sliced_vbi_format *vbi = &fmt->fmt.sliced;
@@ -294,7 +294,7 @@ int vidioc_g_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_format
return 0;
}
-int vidioc_try_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt)
+int vidioc_try_fmt_sliced_vbi_cap(struct file *file, void *priv, struct v4l2_format *fmt)
{
struct vivid_dev *dev = video_drvdata(file);
struct v4l2_sliced_vbi_format *vbi = &fmt->fmt.sliced;
@@ -310,11 +310,11 @@ int vidioc_try_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_forma
return 0;
}
-int vidioc_s_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt)
+int vidioc_s_fmt_sliced_vbi_cap(struct file *file, void *priv, struct v4l2_format *fmt)
{
struct vivid_dev *dev = video_drvdata(file);
struct v4l2_sliced_vbi_format *vbi = &fmt->fmt.sliced;
- int ret = vidioc_try_fmt_sliced_vbi_cap(file, fh, fmt);
+ int ret = vidioc_try_fmt_sliced_vbi_cap(file, priv, fmt);
if (ret)
return ret;
@@ -324,7 +324,7 @@ int vidioc_s_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_format
return 0;
}
-int vidioc_g_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_sliced_vbi_cap *cap)
+int vidioc_g_sliced_vbi_cap(struct file *file, void *priv, struct v4l2_sliced_vbi_cap *cap)
{
struct vivid_dev *dev = video_drvdata(file);
struct video_device *vdev = video_devdata(file);
diff --git a/drivers/media/test-drivers/vivid/vivid-vbi-cap.h b/drivers/media/test-drivers/vivid/vivid-vbi-cap.h
index 91d2de01381c..ec2d200c9e0d 100644
--- a/drivers/media/test-drivers/vivid/vivid-vbi-cap.h
+++ b/drivers/media/test-drivers/vivid/vivid-vbi-cap.h
@@ -16,10 +16,10 @@ int vidioc_g_fmt_vbi_cap(struct file *file, void *priv,
struct v4l2_format *f);
int vidioc_s_fmt_vbi_cap(struct file *file, void *priv,
struct v4l2_format *f);
-int vidioc_g_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt);
-int vidioc_try_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt);
-int vidioc_s_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt);
-int vidioc_g_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_sliced_vbi_cap *cap);
+int vidioc_g_fmt_sliced_vbi_cap(struct file *file, void *priv, struct v4l2_format *fmt);
+int vidioc_try_fmt_sliced_vbi_cap(struct file *file, void *priv, struct v4l2_format *fmt);
+int vidioc_s_fmt_sliced_vbi_cap(struct file *file, void *priv, struct v4l2_format *fmt);
+int vidioc_g_sliced_vbi_cap(struct file *file, void *priv, struct v4l2_sliced_vbi_cap *cap);
void vivid_fill_service_lines(struct v4l2_sliced_vbi_format *vbi, u32 service_set);
diff --git a/drivers/media/test-drivers/vivid/vivid-vbi-out.c b/drivers/media/test-drivers/vivid/vivid-vbi-out.c
index b7a09d2f394e..7b3ea96744bb 100644
--- a/drivers/media/test-drivers/vivid/vivid-vbi-out.c
+++ b/drivers/media/test-drivers/vivid/vivid-vbi-out.c
@@ -168,7 +168,7 @@ int vidioc_s_fmt_vbi_out(struct file *file, void *priv,
return 0;
}
-int vidioc_g_fmt_sliced_vbi_out(struct file *file, void *fh, struct v4l2_format *fmt)
+int vidioc_g_fmt_sliced_vbi_out(struct file *file, void *priv, struct v4l2_format *fmt)
{
struct vivid_dev *dev = video_drvdata(file);
struct v4l2_sliced_vbi_format *vbi = &fmt->fmt.sliced;
@@ -180,7 +180,7 @@ int vidioc_g_fmt_sliced_vbi_out(struct file *file, void *fh, struct v4l2_format
return 0;
}
-int vidioc_try_fmt_sliced_vbi_out(struct file *file, void *fh, struct v4l2_format *fmt)
+int vidioc_try_fmt_sliced_vbi_out(struct file *file, void *priv, struct v4l2_format *fmt)
{
struct vivid_dev *dev = video_drvdata(file);
struct v4l2_sliced_vbi_format *vbi = &fmt->fmt.sliced;
@@ -196,12 +196,12 @@ int vidioc_try_fmt_sliced_vbi_out(struct file *file, void *fh, struct v4l2_forma
return 0;
}
-int vidioc_s_fmt_sliced_vbi_out(struct file *file, void *fh,
+int vidioc_s_fmt_sliced_vbi_out(struct file *file, void *priv,
struct v4l2_format *fmt)
{
struct vivid_dev *dev = video_drvdata(file);
struct v4l2_sliced_vbi_format *vbi = &fmt->fmt.sliced;
- int ret = vidioc_try_fmt_sliced_vbi_out(file, fh, fmt);
+ int ret = vidioc_try_fmt_sliced_vbi_out(file, priv, fmt);
if (ret)
return ret;
diff --git a/drivers/media/test-drivers/vivid/vivid-vbi-out.h b/drivers/media/test-drivers/vivid/vivid-vbi-out.h
index 76584940cdaf..a28e55519ade 100644
--- a/drivers/media/test-drivers/vivid/vivid-vbi-out.h
+++ b/drivers/media/test-drivers/vivid/vivid-vbi-out.h
@@ -13,9 +13,9 @@ int vidioc_g_fmt_vbi_out(struct file *file, void *priv,
struct v4l2_format *f);
int vidioc_s_fmt_vbi_out(struct file *file, void *priv,
struct v4l2_format *f);
-int vidioc_g_fmt_sliced_vbi_out(struct file *file, void *fh, struct v4l2_format *fmt);
-int vidioc_try_fmt_sliced_vbi_out(struct file *file, void *fh, struct v4l2_format *fmt);
-int vidioc_s_fmt_sliced_vbi_out(struct file *file, void *fh, struct v4l2_format *fmt);
+int vidioc_g_fmt_sliced_vbi_out(struct file *file, void *priv, struct v4l2_format *fmt);
+int vidioc_try_fmt_sliced_vbi_out(struct file *file, void *priv, struct v4l2_format *fmt);
+int vidioc_s_fmt_sliced_vbi_out(struct file *file, void *priv, struct v4l2_format *fmt);
extern const struct vb2_ops vivid_vbi_out_qops;
diff --git a/drivers/media/test-drivers/vivid/vivid-vid-cap.c b/drivers/media/test-drivers/vivid/vivid-vid-cap.c
index 2e4c1ed37cd2..8b3162e82032 100644
--- a/drivers/media/test-drivers/vivid/vivid-vid-cap.c
+++ b/drivers/media/test-drivers/vivid/vivid-vid-cap.c
@@ -899,7 +899,7 @@ int vivid_vid_cap_g_selection(struct file *file, void *priv,
return 0;
}
-int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection *s)
+int vivid_vid_cap_s_selection(struct file *file, void *priv, struct v4l2_selection *s)
{
struct vivid_dev *dev = video_drvdata(file);
struct v4l2_rect *crop = &dev->crop_cap;
@@ -1222,7 +1222,7 @@ int vidioc_s_input(struct file *file, void *priv, unsigned i)
return 0;
}
-int vidioc_enumaudio(struct file *file, void *fh, struct v4l2_audio *vin)
+int vidioc_enumaudio(struct file *file, void *priv, struct v4l2_audio *vin)
{
if (vin->index >= ARRAY_SIZE(vivid_audio_inputs))
return -EINVAL;
@@ -1230,7 +1230,7 @@ int vidioc_enumaudio(struct file *file, void *fh, struct v4l2_audio *vin)
return 0;
}
-int vidioc_g_audio(struct file *file, void *fh, struct v4l2_audio *vin)
+int vidioc_g_audio(struct file *file, void *priv, struct v4l2_audio *vin)
{
struct vivid_dev *dev = video_drvdata(file);
@@ -1240,7 +1240,7 @@ int vidioc_g_audio(struct file *file, void *fh, struct v4l2_audio *vin)
return 0;
}
-int vidioc_s_audio(struct file *file, void *fh, const struct v4l2_audio *vin)
+int vidioc_s_audio(struct file *file, void *priv, const struct v4l2_audio *vin)
{
struct vivid_dev *dev = video_drvdata(file);
@@ -1252,7 +1252,7 @@ int vidioc_s_audio(struct file *file, void *fh, const struct v4l2_audio *vin)
return 0;
}
-int vivid_video_g_frequency(struct file *file, void *fh, struct v4l2_frequency *vf)
+int vivid_video_g_frequency(struct file *file, void *priv, struct v4l2_frequency *vf)
{
struct vivid_dev *dev = video_drvdata(file);
@@ -1262,7 +1262,7 @@ int vivid_video_g_frequency(struct file *file, void *fh, struct v4l2_frequency *
return 0;
}
-int vivid_video_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf)
+int vivid_video_s_frequency(struct file *file, void *priv, const struct v4l2_frequency *vf)
{
struct vivid_dev *dev = video_drvdata(file);
@@ -1274,7 +1274,7 @@ int vivid_video_s_frequency(struct file *file, void *fh, const struct v4l2_frequ
return 0;
}
-int vivid_video_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt)
+int vivid_video_s_tuner(struct file *file, void *priv, const struct v4l2_tuner *vt)
{
struct vivid_dev *dev = video_drvdata(file);
@@ -1286,7 +1286,7 @@ int vivid_video_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt
return 0;
}
-int vivid_video_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt)
+int vivid_video_g_tuner(struct file *file, void *priv, struct v4l2_tuner *vt)
{
struct vivid_dev *dev = video_drvdata(file);
enum tpg_quality qual;
@@ -1490,7 +1490,7 @@ static bool valid_cvt_gtf_timings(struct v4l2_dv_timings *timings)
return false;
}
-int vivid_vid_cap_s_dv_timings(struct file *file, void *_fh,
+int vivid_vid_cap_s_dv_timings(struct file *file, void *priv,
struct v4l2_dv_timings *timings)
{
struct vivid_dev *dev = video_drvdata(file);
@@ -1513,7 +1513,7 @@ int vivid_vid_cap_s_dv_timings(struct file *file, void *_fh,
return 0;
}
-int vidioc_query_dv_timings(struct file *file, void *_fh,
+int vidioc_query_dv_timings(struct file *file, void *priv,
struct v4l2_dv_timings *timings)
{
struct vivid_dev *dev = video_drvdata(file);
@@ -1600,7 +1600,7 @@ void vivid_update_connected_outputs(struct vivid_dev *dev)
}
}
-int vidioc_s_edid(struct file *file, void *_fh,
+int vidioc_s_edid(struct file *file, void *priv,
struct v4l2_edid *edid)
{
struct vivid_dev *dev = video_drvdata(file);
@@ -1638,7 +1638,7 @@ int vidioc_s_edid(struct file *file, void *_fh,
return 0;
}
-int vidioc_enum_framesizes(struct file *file, void *fh,
+int vidioc_enum_framesizes(struct file *file, void *priv,
struct v4l2_frmsizeenum *fsize)
{
struct vivid_dev *dev = video_drvdata(file);
diff --git a/drivers/media/test-drivers/vivid/vivid-vid-cap.h b/drivers/media/test-drivers/vivid/vivid-vid-cap.h
index 7a8daf0af2ca..38a99f7e038e 100644
--- a/drivers/media/test-drivers/vivid/vivid-vid-cap.h
+++ b/drivers/media/test-drivers/vivid/vivid-vid-cap.h
@@ -29,7 +29,7 @@ int vidioc_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f);
int vidioc_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f);
int vidioc_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f);
int vivid_vid_cap_g_selection(struct file *file, void *priv, struct v4l2_selection *sel);
-int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection *s);
+int vivid_vid_cap_s_selection(struct file *file, void *priv, struct v4l2_selection *s);
int vivid_vid_cap_g_pixelaspect(struct file *file, void *priv, int type, struct v4l2_fract *f);
int vidioc_enum_fmt_vid_overlay(struct file *file, void *priv, struct v4l2_fmtdesc *f);
int vidioc_g_fmt_vid_overlay(struct file *file, void *priv, struct v4l2_format *f);
@@ -38,19 +38,19 @@ int vidioc_s_fmt_vid_overlay(struct file *file, void *priv, struct v4l2_format *
int vidioc_enum_input(struct file *file, void *priv, struct v4l2_input *inp);
int vidioc_g_input(struct file *file, void *priv, unsigned *i);
int vidioc_s_input(struct file *file, void *priv, unsigned i);
-int vidioc_enumaudio(struct file *file, void *fh, struct v4l2_audio *vin);
-int vidioc_g_audio(struct file *file, void *fh, struct v4l2_audio *vin);
-int vidioc_s_audio(struct file *file, void *fh, const struct v4l2_audio *vin);
-int vivid_video_g_frequency(struct file *file, void *fh, struct v4l2_frequency *vf);
-int vivid_video_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf);
-int vivid_video_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt);
-int vivid_video_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt);
+int vidioc_enumaudio(struct file *file, void *priv, struct v4l2_audio *vin);
+int vidioc_g_audio(struct file *file, void *priv, struct v4l2_audio *vin);
+int vidioc_s_audio(struct file *file, void *priv, const struct v4l2_audio *vin);
+int vivid_video_g_frequency(struct file *file, void *priv, struct v4l2_frequency *vf);
+int vivid_video_s_frequency(struct file *file, void *priv, const struct v4l2_frequency *vf);
+int vivid_video_s_tuner(struct file *file, void *priv, const struct v4l2_tuner *vt);
+int vivid_video_g_tuner(struct file *file, void *priv, struct v4l2_tuner *vt);
int vidioc_querystd(struct file *file, void *priv, v4l2_std_id *id);
int vivid_vid_cap_s_std(struct file *file, void *priv, v4l2_std_id id);
-int vivid_vid_cap_s_dv_timings(struct file *file, void *_fh, struct v4l2_dv_timings *timings);
-int vidioc_query_dv_timings(struct file *file, void *_fh, struct v4l2_dv_timings *timings);
-int vidioc_s_edid(struct file *file, void *_fh, struct v4l2_edid *edid);
-int vidioc_enum_framesizes(struct file *file, void *fh, struct v4l2_frmsizeenum *fsize);
+int vivid_vid_cap_s_dv_timings(struct file *file, void *priv, struct v4l2_dv_timings *timings);
+int vidioc_query_dv_timings(struct file *file, void *priv, struct v4l2_dv_timings *timings);
+int vidioc_s_edid(struct file *file, void *priv, struct v4l2_edid *edid);
+int vidioc_enum_framesizes(struct file *file, void *priv, struct v4l2_frmsizeenum *fsize);
int vidioc_enum_frameintervals(struct file *file, void *priv, struct v4l2_frmivalenum *fival);
int vivid_vid_cap_g_parm(struct file *file, void *priv, struct v4l2_streamparm *parm);
int vivid_vid_cap_s_parm(struct file *file, void *priv, struct v4l2_streamparm *parm);
diff --git a/drivers/media/test-drivers/vivid/vivid-vid-common.c b/drivers/media/test-drivers/vivid/vivid-vid-common.c
index df7678db67fb..786a1aa3b26b 100644
--- a/drivers/media/test-drivers/vivid/vivid-vid-common.c
+++ b/drivers/media/test-drivers/vivid/vivid-vid-common.c
@@ -1021,7 +1021,7 @@ int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *id)
return 0;
}
-int vidioc_g_dv_timings(struct file *file, void *_fh,
+int vidioc_g_dv_timings(struct file *file, void *priv,
struct v4l2_dv_timings *timings)
{
struct vivid_dev *dev = video_drvdata(file);
@@ -1039,7 +1039,7 @@ int vidioc_g_dv_timings(struct file *file, void *_fh,
return 0;
}
-int vidioc_enum_dv_timings(struct file *file, void *_fh,
+int vidioc_enum_dv_timings(struct file *file, void *priv,
struct v4l2_enum_dv_timings *timings)
{
struct vivid_dev *dev = video_drvdata(file);
@@ -1056,7 +1056,7 @@ int vidioc_enum_dv_timings(struct file *file, void *_fh,
NULL, NULL);
}
-int vidioc_dv_timings_cap(struct file *file, void *_fh,
+int vidioc_dv_timings_cap(struct file *file, void *priv,
struct v4l2_dv_timings_cap *cap)
{
struct vivid_dev *dev = video_drvdata(file);
@@ -1073,7 +1073,7 @@ int vidioc_dv_timings_cap(struct file *file, void *_fh,
return 0;
}
-int vidioc_g_edid(struct file *file, void *_fh,
+int vidioc_g_edid(struct file *file, void *priv,
struct v4l2_edid *edid)
{
struct vivid_dev *dev = video_drvdata(file);
diff --git a/drivers/media/test-drivers/vivid/vivid-vid-common.h b/drivers/media/test-drivers/vivid/vivid-vid-common.h
index c49ac85abaed..fb5878174dba 100644
--- a/drivers/media/test-drivers/vivid/vivid-vid-common.h
+++ b/drivers/media/test-drivers/vivid/vivid-vid-common.h
@@ -32,10 +32,10 @@ int vivid_vid_adjust_sel(unsigned flags, struct v4l2_rect *r);
int vivid_enum_fmt_vid(struct file *file, void *priv, struct v4l2_fmtdesc *f);
int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *id);
-int vidioc_g_dv_timings(struct file *file, void *_fh, struct v4l2_dv_timings *timings);
-int vidioc_enum_dv_timings(struct file *file, void *_fh, struct v4l2_enum_dv_timings *timings);
-int vidioc_dv_timings_cap(struct file *file, void *_fh, struct v4l2_dv_timings_cap *cap);
-int vidioc_g_edid(struct file *file, void *_fh, struct v4l2_edid *edid);
+int vidioc_g_dv_timings(struct file *file, void *priv, struct v4l2_dv_timings *timings);
+int vidioc_enum_dv_timings(struct file *file, void *priv, struct v4l2_enum_dv_timings *timings);
+int vidioc_dv_timings_cap(struct file *file, void *priv, struct v4l2_dv_timings_cap *cap);
+int vidioc_g_edid(struct file *file, void *priv, struct v4l2_edid *edid);
int vidioc_subscribe_event(struct v4l2_fh *fh, const struct v4l2_event_subscription *sub);
#endif
diff --git a/drivers/media/test-drivers/vivid/vivid-vid-out.c b/drivers/media/test-drivers/vivid/vivid-vid-out.c
index c3398bce6c15..8c037b90833e 100644
--- a/drivers/media/test-drivers/vivid/vivid-vid-out.c
+++ b/drivers/media/test-drivers/vivid/vivid-vid-out.c
@@ -672,7 +672,7 @@ int vivid_vid_out_g_selection(struct file *file, void *priv,
return 0;
}
-int vivid_vid_out_s_selection(struct file *file, void *fh, struct v4l2_selection *s)
+int vivid_vid_out_s_selection(struct file *file, void *priv, struct v4l2_selection *s)
{
struct vivid_dev *dev = video_drvdata(file);
struct v4l2_rect *crop = &dev->crop_out;
@@ -880,7 +880,7 @@ int vidioc_s_fmt_vid_out_overlay(struct file *file, void *priv,
return ret;
}
-int vivid_vid_out_overlay(struct file *file, void *fh, unsigned i)
+int vivid_vid_out_overlay(struct file *file, void *priv, unsigned i)
{
struct vivid_dev *dev = video_drvdata(file);
@@ -893,7 +893,7 @@ int vivid_vid_out_overlay(struct file *file, void *fh, unsigned i)
return 0;
}
-int vivid_vid_out_g_fbuf(struct file *file, void *fh,
+int vivid_vid_out_g_fbuf(struct file *file, void *priv,
struct v4l2_framebuffer *a)
{
struct vivid_dev *dev = video_drvdata(file);
@@ -920,7 +920,7 @@ int vivid_vid_out_g_fbuf(struct file *file, void *fh,
return 0;
}
-int vivid_vid_out_s_fbuf(struct file *file, void *fh,
+int vivid_vid_out_s_fbuf(struct file *file, void *priv,
const struct v4l2_framebuffer *a)
{
struct vivid_dev *dev = video_drvdata(file);
@@ -1016,7 +1016,7 @@ int vidioc_s_output(struct file *file, void *priv, unsigned o)
return 0;
}
-int vidioc_enumaudout(struct file *file, void *fh, struct v4l2_audioout *vout)
+int vidioc_enumaudout(struct file *file, void *priv, struct v4l2_audioout *vout)
{
if (vout->index >= ARRAY_SIZE(vivid_audio_outputs))
return -EINVAL;
@@ -1024,7 +1024,7 @@ int vidioc_enumaudout(struct file *file, void *fh, struct v4l2_audioout *vout)
return 0;
}
-int vidioc_g_audout(struct file *file, void *fh, struct v4l2_audioout *vout)
+int vidioc_g_audout(struct file *file, void *priv, struct v4l2_audioout *vout)
{
struct vivid_dev *dev = video_drvdata(file);
@@ -1034,7 +1034,7 @@ int vidioc_g_audout(struct file *file, void *fh, struct v4l2_audioout *vout)
return 0;
}
-int vidioc_s_audout(struct file *file, void *fh, const struct v4l2_audioout *vout)
+int vidioc_s_audout(struct file *file, void *priv, const struct v4l2_audioout *vout)
{
struct vivid_dev *dev = video_drvdata(file);
@@ -1072,7 +1072,7 @@ static bool valid_cvt_gtf_timings(struct v4l2_dv_timings *timings)
return false;
}
-int vivid_vid_out_s_dv_timings(struct file *file, void *_fh,
+int vivid_vid_out_s_dv_timings(struct file *file, void *priv,
struct v4l2_dv_timings *timings)
{
struct vivid_dev *dev = video_drvdata(file);
diff --git a/drivers/media/test-drivers/vivid/vivid-vid-out.h b/drivers/media/test-drivers/vivid/vivid-vid-out.h
index 8d56314f4ea1..1d03891a5de5 100644
--- a/drivers/media/test-drivers/vivid/vivid-vid-out.h
+++ b/drivers/media/test-drivers/vivid/vivid-vid-out.h
@@ -22,23 +22,23 @@ int vidioc_g_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *f);
int vidioc_try_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *f);
int vidioc_s_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *f);
int vivid_vid_out_g_selection(struct file *file, void *priv, struct v4l2_selection *sel);
-int vivid_vid_out_s_selection(struct file *file, void *fh, struct v4l2_selection *s);
+int vivid_vid_out_s_selection(struct file *file, void *priv, struct v4l2_selection *s);
int vivid_vid_out_g_pixelaspect(struct file *file, void *priv, int type, struct v4l2_fract *f);
int vidioc_enum_fmt_vid_out_overlay(struct file *file, void *priv, struct v4l2_fmtdesc *f);
int vidioc_g_fmt_vid_out_overlay(struct file *file, void *priv, struct v4l2_format *f);
int vidioc_try_fmt_vid_out_overlay(struct file *file, void *priv, struct v4l2_format *f);
int vidioc_s_fmt_vid_out_overlay(struct file *file, void *priv, struct v4l2_format *f);
-int vivid_vid_out_overlay(struct file *file, void *fh, unsigned i);
-int vivid_vid_out_g_fbuf(struct file *file, void *fh, struct v4l2_framebuffer *a);
-int vivid_vid_out_s_fbuf(struct file *file, void *fh, const struct v4l2_framebuffer *a);
+int vivid_vid_out_overlay(struct file *file, void *priv, unsigned i);
+int vivid_vid_out_g_fbuf(struct file *file, void *priv, struct v4l2_framebuffer *a);
+int vivid_vid_out_s_fbuf(struct file *file, void *priv, const struct v4l2_framebuffer *a);
int vidioc_enum_output(struct file *file, void *priv, struct v4l2_output *out);
int vidioc_g_output(struct file *file, void *priv, unsigned *i);
int vidioc_s_output(struct file *file, void *priv, unsigned i);
-int vidioc_enumaudout(struct file *file, void *fh, struct v4l2_audioout *vout);
-int vidioc_g_audout(struct file *file, void *fh, struct v4l2_audioout *vout);
-int vidioc_s_audout(struct file *file, void *fh, const struct v4l2_audioout *vout);
+int vidioc_enumaudout(struct file *file, void *priv, struct v4l2_audioout *vout);
+int vidioc_g_audout(struct file *file, void *priv, struct v4l2_audioout *vout);
+int vidioc_s_audout(struct file *file, void *priv, const struct v4l2_audioout *vout);
int vivid_vid_out_s_std(struct file *file, void *priv, v4l2_std_id id);
-int vivid_vid_out_s_dv_timings(struct file *file, void *_fh, struct v4l2_dv_timings *timings);
+int vivid_vid_out_s_dv_timings(struct file *file, void *priv, struct v4l2_dv_timings *timings);
int vivid_vid_out_g_parm(struct file *file, void *priv, struct v4l2_streamparm *parm);
#endif
diff --git a/drivers/media/tuners/xc4000.c b/drivers/media/tuners/xc4000.c
index 3cf54d776d36..b44c97e4e5ec 100644
--- a/drivers/media/tuners/xc4000.c
+++ b/drivers/media/tuners/xc4000.c
@@ -1087,12 +1087,12 @@ fail:
static void xc_debug_dump(struct xc4000_priv *priv)
{
- u16 adc_envelope;
+ u16 adc_envelope = 0;
u32 freq_error_hz = 0;
- u16 lock_status;
+ u16 lock_status = 0;
u32 hsync_freq_hz = 0;
- u16 frame_lines;
- u16 quality;
+ u16 frame_lines = 0;
+ u16 quality = 0;
u16 signal = 0;
u16 noise = 0;
u8 hw_majorversion = 0, hw_minorversion = 0;
diff --git a/drivers/media/tuners/xc5000.c b/drivers/media/tuners/xc5000.c
index 30aa4ee958bd..a28481edd22e 100644
--- a/drivers/media/tuners/xc5000.c
+++ b/drivers/media/tuners/xc5000.c
@@ -622,14 +622,14 @@ static int xc5000_fwupload(struct dvb_frontend *fe,
static void xc_debug_dump(struct xc5000_priv *priv)
{
- u16 adc_envelope;
+ u16 adc_envelope = 0;
u32 freq_error_hz = 0;
- u16 lock_status;
+ u16 lock_status = 0;
u32 hsync_freq_hz = 0;
- u16 frame_lines;
- u16 quality;
- u16 snr;
- u16 totalgain;
+ u16 frame_lines = 0;
+ u16 quality = 0;
+ u16 snr = 0;
+ u16 totalgain = 0;
u8 hw_majorversion = 0, hw_minorversion = 0;
u8 fw_majorversion = 0, fw_minorversion = 0;
u16 fw_buildversion = 0;
@@ -1304,7 +1304,7 @@ static void xc5000_release(struct dvb_frontend *fe)
mutex_lock(&xc5000_list_mutex);
if (priv) {
- cancel_delayed_work(&priv->timer_sleep);
+ cancel_delayed_work_sync(&priv->timer_sleep);
hybrid_tuner_release_state(priv);
}
diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
index e5dff969ed57..fbaa542c8259 100644
--- a/drivers/media/usb/au0828/au0828-video.c
+++ b/drivers/media/usb/au0828/au0828-video.c
@@ -1921,9 +1921,8 @@ int au0828_analog_register(struct au0828_dev *dev,
iface_desc = interface->cur_altsetting;
for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) {
endpoint = &iface_desc->endpoint[i].desc;
- if (((endpoint->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
- == USB_DIR_IN) &&
- ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
+ if (usb_endpoint_dir_in(endpoint) &&
+ (usb_endpoint_type(endpoint)
== USB_ENDPOINT_XFER_ISOC)) {
/* we find our isoc in endpoint */
diff --git a/drivers/media/usb/cx231xx/cx231xx-417.c b/drivers/media/usb/cx231xx/cx231xx-417.c
index e585c8f6e4c5..c695a97e202b 100644
--- a/drivers/media/usb/cx231xx/cx231xx-417.c
+++ b/drivers/media/usb/cx231xx/cx231xx-417.c
@@ -1499,7 +1499,7 @@ static int vidioc_g_selection(struct file *file, void *priv,
return 0;
}
-static int vidioc_g_std(struct file *file, void *fh0, v4l2_std_id *norm)
+static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *norm)
{
struct cx231xx *dev = video_drvdata(file);
diff --git a/drivers/media/usb/em28xx/Kconfig b/drivers/media/usb/em28xx/Kconfig
index cb61fd6cc6c6..3122d4bdfc59 100644
--- a/drivers/media/usb/em28xx/Kconfig
+++ b/drivers/media/usb/em28xx/Kconfig
@@ -68,6 +68,7 @@ config VIDEO_EM28XX_DVB
select MEDIA_TUNER_XC5000 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_MT2060 if MEDIA_SUBDRV_AUTOSELECT
select DVB_MXL692 if MEDIA_SUBDRV_AUTOSELECT
+ select GPIOLIB_LEGACY if GPIOLIB && DVB_CXD2820R
help
This adds support for DVB cards based on the
Empiatech em28xx chips.
diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
index 9fce59979e3b..b94f5c70ab75 100644
--- a/drivers/media/usb/em28xx/em28xx-dvb.c
+++ b/drivers/media/usb/em28xx/em28xx-dvb.c
@@ -727,7 +727,7 @@ static int em28xx_pctv_290e_set_lna(struct dvb_frontend *fe)
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct em28xx_i2c_bus *i2c_bus = fe->dvb->priv;
struct em28xx *dev = i2c_bus->dev;
-#ifdef CONFIG_GPIOLIB
+#ifdef CONFIG_GPIOLIB_LEGACY
struct em28xx_dvb *dvb = dev->dvb;
int ret;
unsigned long flags;
@@ -1705,7 +1705,7 @@ static int em28xx_dvb_init(struct em28xx *dev)
goto out_free;
}
-#ifdef CONFIG_GPIOLIB
+#ifdef CONFIG_GPIOLIB_LEGACY
/* enable LNA for DVB-T, DVB-T2 and DVB-C */
result = gpio_request_one(dvb->lna_gpio,
GPIOF_OUT_INIT_LOW, NULL);
diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c
index 25edd2189654..3fc15d16df8e 100644
--- a/drivers/media/usb/gspca/gspca.c
+++ b/drivers/media/usb/gspca/gspca.c
@@ -1029,15 +1029,15 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
return 0;
}
-static int vidioc_g_fmt_vid_cap(struct file *file, void *_priv,
+static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *fmt)
{
struct gspca_dev *gspca_dev = video_drvdata(file);
- u32 priv = fmt->fmt.pix.priv;
+ u32 fmt_priv = fmt->fmt.pix.priv;
fmt->fmt.pix = gspca_dev->pixfmt;
/* some drivers use priv internally, so keep the original value */
- fmt->fmt.pix.priv = priv;
+ fmt->fmt.pix.priv = fmt_priv;
return 0;
}
@@ -1075,24 +1075,24 @@ static int try_fmt_vid_cap(struct gspca_dev *gspca_dev,
return mode; /* used when s_fmt */
}
-static int vidioc_try_fmt_vid_cap(struct file *file, void *_priv,
+static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *fmt)
{
struct gspca_dev *gspca_dev = video_drvdata(file);
- u32 priv = fmt->fmt.pix.priv;
+ u32 fmt_priv = fmt->fmt.pix.priv;
if (try_fmt_vid_cap(gspca_dev, fmt) < 0)
return -EINVAL;
/* some drivers use priv internally, so keep the original value */
- fmt->fmt.pix.priv = priv;
+ fmt->fmt.pix.priv = fmt_priv;
return 0;
}
-static int vidioc_s_fmt_vid_cap(struct file *file, void *_priv,
+static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *fmt)
{
struct gspca_dev *gspca_dev = video_drvdata(file);
- u32 priv = fmt->fmt.pix.priv;
+ u32 fmt_priv = fmt->fmt.pix.priv;
int mode;
if (vb2_is_busy(&gspca_dev->queue))
@@ -1109,7 +1109,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *_priv,
else
gspca_dev->pixfmt = gspca_dev->cam.cam_mode[mode];
/* some drivers use priv internally, so keep the original value */
- fmt->fmt.pix.priv = priv;
+ fmt->fmt.pix.priv = fmt_priv;
return 0;
}
diff --git a/drivers/media/usb/hdpvr/hdpvr-video.c b/drivers/media/usb/hdpvr/hdpvr-video.c
index fd7d2a9d0449..8c7ae362d992 100644
--- a/drivers/media/usb/hdpvr/hdpvr-video.c
+++ b/drivers/media/usb/hdpvr/hdpvr-video.c
@@ -52,6 +52,11 @@ struct hdpvr_fh {
bool legacy_mode;
};
+static inline struct hdpvr_fh *file_to_hdpvr_fh(struct file *file)
+{
+ return container_of(file_to_v4l2_fh(file), struct hdpvr_fh, fh);
+}
+
static uint list_size(struct list_head *list)
{
struct list_head *tmp;
@@ -380,8 +385,7 @@ static int hdpvr_open(struct file *file)
return -ENOMEM;
fh->legacy_mode = true;
v4l2_fh_init(&fh->fh, video_devdata(file));
- v4l2_fh_add(&fh->fh);
- file->private_data = fh;
+ v4l2_fh_add(&fh->fh, file);
return 0;
}
@@ -390,7 +394,7 @@ static int hdpvr_release(struct file *file)
struct hdpvr_device *dev = video_drvdata(file);
mutex_lock(&dev->io_mutex);
- if (file->private_data == dev->owner) {
+ if (file_to_v4l2_fh(file) == dev->owner) {
hdpvr_stop_streaming(dev);
dev->owner = NULL;
}
@@ -426,7 +430,7 @@ static ssize_t hdpvr_read(struct file *file, char __user *buffer, size_t count,
mutex_unlock(&dev->io_mutex);
goto err;
}
- dev->owner = file->private_data;
+ dev->owner = file_to_v4l2_fh(file);
print_buffer_status();
}
mutex_unlock(&dev->io_mutex);
@@ -541,7 +545,7 @@ static __poll_t hdpvr_poll(struct file *filp, poll_table *wait)
"start_streaming failed\n");
dev->status = STATUS_IDLE;
} else {
- dev->owner = filp->private_data;
+ dev->owner = file_to_v4l2_fh(filp);
}
print_buffer_status();
@@ -586,11 +590,11 @@ static int vidioc_querycap(struct file *file, void *priv,
return 0;
}
-static int vidioc_s_std(struct file *file, void *_fh,
+static int vidioc_s_std(struct file *file, void *priv,
v4l2_std_id std)
{
struct hdpvr_device *dev = video_drvdata(file);
- struct hdpvr_fh *fh = _fh;
+ struct hdpvr_fh *fh = file_to_hdpvr_fh(file);
u8 std_type = 1;
if (!fh->legacy_mode && dev->options.video_input == HDPVR_COMPONENT)
@@ -606,11 +610,12 @@ static int vidioc_s_std(struct file *file, void *_fh,
return hdpvr_config_call(dev, CTRL_VIDEO_STD_TYPE, std_type);
}
-static int vidioc_g_std(struct file *file, void *_fh,
+static int vidioc_g_std(struct file *file, void *priv,
v4l2_std_id *std)
{
struct hdpvr_device *dev = video_drvdata(file);
- struct hdpvr_fh *fh = _fh;
+ struct hdpvr_fh *fh = file_to_hdpvr_fh(file);
+
if (!fh->legacy_mode && dev->options.video_input == HDPVR_COMPONENT)
return -ENODATA;
@@ -618,11 +623,11 @@ static int vidioc_g_std(struct file *file, void *_fh,
return 0;
}
-static int vidioc_querystd(struct file *file, void *_fh, v4l2_std_id *a)
+static int vidioc_querystd(struct file *file, void *priv, v4l2_std_id *a)
{
struct hdpvr_device *dev = video_drvdata(file);
+ struct hdpvr_fh *fh = file_to_hdpvr_fh(file);
struct hdpvr_video_info vid_info;
- struct hdpvr_fh *fh = _fh;
int ret;
*a = V4L2_STD_UNKNOWN;
@@ -637,11 +642,11 @@ static int vidioc_querystd(struct file *file, void *_fh, v4l2_std_id *a)
return ret;
}
-static int vidioc_s_dv_timings(struct file *file, void *_fh,
+static int vidioc_s_dv_timings(struct file *file, void *priv,
struct v4l2_dv_timings *timings)
{
struct hdpvr_device *dev = video_drvdata(file);
- struct hdpvr_fh *fh = _fh;
+ struct hdpvr_fh *fh = file_to_hdpvr_fh(file);
int i;
fh->legacy_mode = false;
@@ -660,11 +665,11 @@ static int vidioc_s_dv_timings(struct file *file, void *_fh,
return 0;
}
-static int vidioc_g_dv_timings(struct file *file, void *_fh,
+static int vidioc_g_dv_timings(struct file *file, void *priv,
struct v4l2_dv_timings *timings)
{
struct hdpvr_device *dev = video_drvdata(file);
- struct hdpvr_fh *fh = _fh;
+ struct hdpvr_fh *fh = file_to_hdpvr_fh(file);
fh->legacy_mode = false;
if (dev->options.video_input)
@@ -673,11 +678,11 @@ static int vidioc_g_dv_timings(struct file *file, void *_fh,
return 0;
}
-static int vidioc_query_dv_timings(struct file *file, void *_fh,
+static int vidioc_query_dv_timings(struct file *file, void *priv,
struct v4l2_dv_timings *timings)
{
struct hdpvr_device *dev = video_drvdata(file);
- struct hdpvr_fh *fh = _fh;
+ struct hdpvr_fh *fh = file_to_hdpvr_fh(file);
struct hdpvr_video_info vid_info;
bool interlaced;
int ret = 0;
@@ -715,11 +720,11 @@ static int vidioc_query_dv_timings(struct file *file, void *_fh,
return ret;
}
-static int vidioc_enum_dv_timings(struct file *file, void *_fh,
+static int vidioc_enum_dv_timings(struct file *file, void *priv,
struct v4l2_enum_dv_timings *timings)
{
struct hdpvr_device *dev = video_drvdata(file);
- struct hdpvr_fh *fh = _fh;
+ struct hdpvr_fh *fh = file_to_hdpvr_fh(file);
fh->legacy_mode = false;
memset(timings->reserved, 0, sizeof(timings->reserved));
@@ -731,11 +736,11 @@ static int vidioc_enum_dv_timings(struct file *file, void *_fh,
return 0;
}
-static int vidioc_dv_timings_cap(struct file *file, void *_fh,
+static int vidioc_dv_timings_cap(struct file *file, void *priv,
struct v4l2_dv_timings_cap *cap)
{
struct hdpvr_device *dev = video_drvdata(file);
- struct hdpvr_fh *fh = _fh;
+ struct hdpvr_fh *fh = file_to_hdpvr_fh(file);
fh->legacy_mode = false;
if (dev->options.video_input)
@@ -758,7 +763,7 @@ static const char *iname[] = {
[HDPVR_COMPOSITE] = "Composite",
};
-static int vidioc_enum_input(struct file *file, void *_fh, struct v4l2_input *i)
+static int vidioc_enum_input(struct file *file, void *priv, struct v4l2_input *i)
{
unsigned int n;
@@ -778,7 +783,7 @@ static int vidioc_enum_input(struct file *file, void *_fh, struct v4l2_input *i)
return 0;
}
-static int vidioc_s_input(struct file *file, void *_fh,
+static int vidioc_s_input(struct file *file, void *priv,
unsigned int index)
{
struct hdpvr_device *dev = video_drvdata(file);
@@ -812,7 +817,7 @@ static int vidioc_s_input(struct file *file, void *_fh,
return retval;
}
-static int vidioc_g_input(struct file *file, void *private_data,
+static int vidioc_g_input(struct file *file, void *priv,
unsigned int *index)
{
struct hdpvr_device *dev = video_drvdata(file);
@@ -844,7 +849,7 @@ static int vidioc_enumaudio(struct file *file, void *priv,
return 0;
}
-static int vidioc_s_audio(struct file *file, void *private_data,
+static int vidioc_s_audio(struct file *file, void *priv,
const struct v4l2_audio *audio)
{
struct hdpvr_device *dev = video_drvdata(file);
@@ -863,7 +868,7 @@ static int vidioc_s_audio(struct file *file, void *private_data,
return retval;
}
-static int vidioc_g_audio(struct file *file, void *private_data,
+static int vidioc_g_audio(struct file *file, void *priv,
struct v4l2_audio *audio)
{
struct hdpvr_device *dev = video_drvdata(file);
@@ -980,7 +985,7 @@ static int hdpvr_s_ctrl(struct v4l2_ctrl *ctrl)
return ret;
}
-static int vidioc_enum_fmt_vid_cap(struct file *file, void *private_data,
+static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
if (f->index != 0)
@@ -991,11 +996,11 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *private_data,
return 0;
}
-static int vidioc_g_fmt_vid_cap(struct file *file, void *_fh,
+static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct hdpvr_device *dev = video_drvdata(file);
- struct hdpvr_fh *fh = _fh;
+ struct hdpvr_fh *fh = file_to_hdpvr_fh(file);
int ret;
/*
@@ -1048,7 +1053,7 @@ static int vidioc_encoder_cmd(struct file *filp, void *priv,
switch (a->cmd) {
case V4L2_ENC_CMD_START:
- if (dev->owner && filp->private_data != dev->owner) {
+ if (dev->owner && file_to_v4l2_fh(filp) != dev->owner) {
res = -EBUSY;
break;
}
@@ -1056,12 +1061,12 @@ static int vidioc_encoder_cmd(struct file *filp, void *priv,
break;
res = hdpvr_start_streaming(dev);
if (!res)
- dev->owner = filp->private_data;
+ dev->owner = file_to_v4l2_fh(filp);
else
dev->status = STATUS_IDLE;
break;
case V4L2_ENC_CMD_STOP:
- if (dev->owner && filp->private_data != dev->owner) {
+ if (dev->owner && file_to_v4l2_fh(filp) != dev->owner) {
res = -EBUSY;
break;
}
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
index ad38e1240541..f9535a484738 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
@@ -21,8 +21,6 @@
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
-struct pvr2_v4l2_dev;
-struct pvr2_v4l2_fh;
struct pvr2_v4l2;
struct pvr2_v4l2_dev {
@@ -48,6 +46,11 @@ struct pvr2_v4l2_fh {
unsigned int input_cnt;
};
+static inline struct pvr2_v4l2_fh *to_pvr2_v4l2_fh(struct file *filp)
+{
+ return container_of(file_to_v4l2_fh(filp), struct pvr2_v4l2_fh, fh);
+}
+
struct pvr2_v4l2 {
struct pvr2_channel channel;
@@ -108,7 +111,7 @@ static struct v4l2_format pvr_format [] = {
*/
static int pvr2_querycap(struct file *file, void *priv, struct v4l2_capability *cap)
{
- struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_v4l2_fh *fh = to_pvr2_v4l2_fh(file);
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
strscpy(cap->driver, "pvrusb2", sizeof(cap->driver));
@@ -123,7 +126,7 @@ static int pvr2_querycap(struct file *file, void *priv, struct v4l2_capability *
static int pvr2_g_std(struct file *file, void *priv, v4l2_std_id *std)
{
- struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_v4l2_fh *fh = to_pvr2_v4l2_fh(file);
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
int val = 0;
int ret;
@@ -136,7 +139,7 @@ static int pvr2_g_std(struct file *file, void *priv, v4l2_std_id *std)
static int pvr2_s_std(struct file *file, void *priv, v4l2_std_id std)
{
- struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_v4l2_fh *fh = to_pvr2_v4l2_fh(file);
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
int ret;
@@ -148,7 +151,7 @@ static int pvr2_s_std(struct file *file, void *priv, v4l2_std_id std)
static int pvr2_querystd(struct file *file, void *priv, v4l2_std_id *std)
{
- struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_v4l2_fh *fh = to_pvr2_v4l2_fh(file);
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
int val = 0;
int ret;
@@ -161,7 +164,7 @@ static int pvr2_querystd(struct file *file, void *priv, v4l2_std_id *std)
static int pvr2_enum_input(struct file *file, void *priv, struct v4l2_input *vi)
{
- struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_v4l2_fh *fh = to_pvr2_v4l2_fh(file);
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
struct pvr2_ctrl *cptr;
struct v4l2_input tmp;
@@ -209,7 +212,7 @@ static int pvr2_enum_input(struct file *file, void *priv, struct v4l2_input *vi)
static int pvr2_g_input(struct file *file, void *priv, unsigned int *i)
{
- struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_v4l2_fh *fh = to_pvr2_v4l2_fh(file);
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
unsigned int idx;
struct pvr2_ctrl *cptr;
@@ -231,7 +234,7 @@ static int pvr2_g_input(struct file *file, void *priv, unsigned int *i)
static int pvr2_s_input(struct file *file, void *priv, unsigned int inp)
{
- struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_v4l2_fh *fh = to_pvr2_v4l2_fh(file);
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
int ret;
@@ -286,7 +289,7 @@ static int pvr2_s_audio(struct file *file, void *priv, const struct v4l2_audio *
static int pvr2_g_tuner(struct file *file, void *priv, struct v4l2_tuner *vt)
{
- struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_v4l2_fh *fh = to_pvr2_v4l2_fh(file);
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
if (vt->index != 0)
@@ -298,7 +301,7 @@ static int pvr2_g_tuner(struct file *file, void *priv, struct v4l2_tuner *vt)
static int pvr2_s_tuner(struct file *file, void *priv, const struct v4l2_tuner *vt)
{
- struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_v4l2_fh *fh = to_pvr2_v4l2_fh(file);
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
int ret;
@@ -314,7 +317,7 @@ static int pvr2_s_tuner(struct file *file, void *priv, const struct v4l2_tuner *
static int pvr2_s_frequency(struct file *file, void *priv, const struct v4l2_frequency *vf)
{
- struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_v4l2_fh *fh = to_pvr2_v4l2_fh(file);
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
unsigned long fv;
struct v4l2_tuner vt;
@@ -349,7 +352,7 @@ static int pvr2_s_frequency(struct file *file, void *priv, const struct v4l2_fre
static int pvr2_g_frequency(struct file *file, void *priv, struct v4l2_frequency *vf)
{
- struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_v4l2_fh *fh = to_pvr2_v4l2_fh(file);
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
int val = 0;
int cur_input;
@@ -391,7 +394,7 @@ static int pvr2_enum_fmt_vid_cap(struct file *file, void *priv, struct v4l2_fmtd
static int pvr2_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *vf)
{
- struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_v4l2_fh *fh = to_pvr2_v4l2_fh(file);
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
int val;
@@ -411,7 +414,7 @@ static int pvr2_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format
static int pvr2_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *vf)
{
- struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_v4l2_fh *fh = to_pvr2_v4l2_fh(file);
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
int lmin, lmax, ldef;
struct pvr2_ctrl *hcp, *vcp;
@@ -449,7 +452,7 @@ static int pvr2_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_forma
static int pvr2_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *vf)
{
- struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_v4l2_fh *fh = to_pvr2_v4l2_fh(file);
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
struct pvr2_ctrl *hcp, *vcp;
int ret = pvr2_try_fmt_vid_cap(file, fh, vf);
@@ -466,7 +469,7 @@ static int pvr2_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format
static int pvr2_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
{
- struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_v4l2_fh *fh = to_pvr2_v4l2_fh(file);
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
struct pvr2_v4l2_dev *pdi = fh->pdi;
int ret;
@@ -485,7 +488,7 @@ static int pvr2_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
static int pvr2_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
{
- struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_v4l2_fh *fh = to_pvr2_v4l2_fh(file);
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
if (!fh->pdi->stream) {
@@ -500,7 +503,7 @@ static int pvr2_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
static int pvr2_query_ext_ctrl(struct file *file, void *priv,
struct v4l2_query_ext_ctrl *vc)
{
- struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_v4l2_fh *fh = to_pvr2_v4l2_fh(file);
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
struct pvr2_ctrl *cptr;
int val;
@@ -561,7 +564,7 @@ static int pvr2_query_ext_ctrl(struct file *file, void *priv,
static int pvr2_querymenu(struct file *file, void *priv, struct v4l2_querymenu *vm)
{
- struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_v4l2_fh *fh = to_pvr2_v4l2_fh(file);
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
unsigned int cnt = 0;
int ret;
@@ -577,7 +580,7 @@ static int pvr2_querymenu(struct file *file, void *priv, struct v4l2_querymenu *
static int pvr2_g_ext_ctrls(struct file *file, void *priv,
struct v4l2_ext_controls *ctls)
{
- struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_v4l2_fh *fh = to_pvr2_v4l2_fh(file);
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
struct v4l2_ext_control *ctrl;
struct pvr2_ctrl *cptr;
@@ -612,7 +615,7 @@ static int pvr2_g_ext_ctrls(struct file *file, void *priv,
static int pvr2_s_ext_ctrls(struct file *file, void *priv,
struct v4l2_ext_controls *ctls)
{
- struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_v4l2_fh *fh = to_pvr2_v4l2_fh(file);
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
struct v4l2_ext_control *ctrl;
unsigned int idx;
@@ -637,7 +640,7 @@ commit:
static int pvr2_try_ext_ctrls(struct file *file, void *priv,
struct v4l2_ext_controls *ctls)
{
- struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_v4l2_fh *fh = to_pvr2_v4l2_fh(file);
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
struct v4l2_ext_control *ctrl;
struct pvr2_ctrl *pctl;
@@ -659,7 +662,7 @@ static int pvr2_try_ext_ctrls(struct file *file, void *priv,
static int pvr2_g_pixelaspect(struct file *file, void *priv,
int type, struct v4l2_fract *f)
{
- struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_v4l2_fh *fh = to_pvr2_v4l2_fh(file);
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
struct v4l2_cropcap cap = { .type = type };
int ret;
@@ -675,7 +678,7 @@ static int pvr2_g_pixelaspect(struct file *file, void *priv,
static int pvr2_g_selection(struct file *file, void *priv,
struct v4l2_selection *sel)
{
- struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_v4l2_fh *fh = to_pvr2_v4l2_fh(file);
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
struct v4l2_cropcap cap;
int val = 0;
@@ -726,7 +729,7 @@ static int pvr2_g_selection(struct file *file, void *priv,
static int pvr2_s_selection(struct file *file, void *priv,
struct v4l2_selection *sel)
{
- struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_v4l2_fh *fh = to_pvr2_v4l2_fh(file);
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
int ret;
@@ -758,7 +761,7 @@ commit:
static int pvr2_log_status(struct file *file, void *priv)
{
- struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_v4l2_fh *fh = to_pvr2_v4l2_fh(file);
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
pvr2_hdw_trigger_module_log(hdw);
@@ -882,7 +885,7 @@ static void pvr2_v4l2_internal_check(struct pvr2_channel *chp)
static int pvr2_v4l2_release(struct file *file)
{
- struct pvr2_v4l2_fh *fhp = file->private_data;
+ struct pvr2_v4l2_fh *fhp = to_pvr2_v4l2_fh(file);
struct pvr2_v4l2 *vp = fhp->pdi->v4lp;
struct pvr2_hdw *hdw = fhp->channel.mc_head->hdw;
@@ -897,9 +900,8 @@ static int pvr2_v4l2_release(struct file *file)
fhp->rhp = NULL;
}
- v4l2_fh_del(&fhp->fh);
+ v4l2_fh_del(&fhp->fh, file);
v4l2_fh_exit(&fhp->fh);
- file->private_data = NULL;
pvr2_channel_done(&fhp->channel);
pvr2_trace(PVR2_TRACE_STRUCT,
@@ -1000,10 +1002,9 @@ static int pvr2_v4l2_open(struct file *file)
}
fhp->file = file;
- file->private_data = fhp;
fhp->fw_mode_flag = pvr2_hdw_cpufw_get_enabled(hdw);
- v4l2_fh_add(&fhp->fh);
+ v4l2_fh_add(&fhp->fh, file);
return 0;
}
@@ -1055,7 +1056,7 @@ static int pvr2_v4l2_iosetup(struct pvr2_v4l2_fh *fh)
static ssize_t pvr2_v4l2_read(struct file *file,
char __user *buff, size_t count, loff_t *ppos)
{
- struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_v4l2_fh *fh = to_pvr2_v4l2_fh(file);
int ret;
if (fh->fw_mode_flag) {
@@ -1117,7 +1118,7 @@ static ssize_t pvr2_v4l2_read(struct file *file,
static __poll_t pvr2_v4l2_poll(struct file *file, poll_table *wait)
{
__poll_t mask = 0;
- struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_v4l2_fh *fh = to_pvr2_v4l2_fh(file);
int ret;
if (fh->fw_mode_flag) {
diff --git a/drivers/media/usb/stk1160/stk1160-core.c b/drivers/media/usb/stk1160/stk1160-core.c
index ce717502ea4c..25d725c2ab3c 100644
--- a/drivers/media/usb/stk1160/stk1160-core.c
+++ b/drivers/media/usb/stk1160/stk1160-core.c
@@ -195,8 +195,7 @@ static int stk1160_scan_usb(struct usb_interface *intf, struct usb_device *udev,
if (udev->speed == USB_SPEED_HIGH)
size = size * hb_mult(sizedescr);
- if (usb_endpoint_xfer_isoc(desc) &&
- usb_endpoint_dir_in(desc)) {
+ if (usb_endpoint_is_isoc_in(desc)) {
switch (desc->bEndpointAddress) {
case STK1160_EP_AUDIO:
has_audio = true;
diff --git a/drivers/media/usb/stk1160/stk1160-video.c b/drivers/media/usb/stk1160/stk1160-video.c
index 416cb74377eb..f4baf9263286 100644
--- a/drivers/media/usb/stk1160/stk1160-video.c
+++ b/drivers/media/usb/stk1160/stk1160-video.c
@@ -408,8 +408,13 @@ static int stk1160_fill_urb(struct stk1160 *dev, struct stk1160_urb *stk_urb,
stk_urb->transfer_buffer = usb_alloc_noncoherent(dev->udev, sb_size,
GFP_KERNEL, &stk_urb->dma,
DMA_FROM_DEVICE, &stk_urb->sgt);
- if (!stk_urb->transfer_buffer)
+ if (!stk_urb->transfer_buffer) {
+ /*
+ * If the buffer allocation failed, we exit but return 0 since
+ * we allow the driver working with less buffers.
+ */
goto free_urb;
+ }
stk_urb->dev = dev;
return 0;
diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
index efe609d70877..2905505c240c 100644
--- a/drivers/media/usb/uvc/uvc_ctrl.c
+++ b/drivers/media/usb/uvc/uvc_ctrl.c
@@ -376,6 +376,15 @@ static const struct uvc_control_info uvc_ctrls[] = {
| UVC_CTRL_FLAG_GET_DEF
| UVC_CTRL_FLAG_AUTO_UPDATE,
},
+ {
+ .entity = UVC_GUID_CHROMEOS_XU,
+ .selector = UVC_CROSXU_CONTROL_IQ_PROFILE,
+ .index = 3,
+ .size = 1,
+ .flags = UVC_CTRL_FLAG_SET_CUR
+ | UVC_CTRL_FLAG_GET_RANGE
+ | UVC_CTRL_FLAG_RESTORE,
+ },
};
static const u32 uvc_control_classes[] = {
@@ -384,6 +393,19 @@ static const u32 uvc_control_classes[] = {
};
static const int exposure_auto_mapping[] = { 2, 1, 4, 8 };
+static const int cros_colorfx_mapping[] = {
+ 1, /* V4L2_COLORFX_NONE */
+ -1, /* V4L2_COLORFX_BW */
+ -1, /* V4L2_COLORFX_SEPIA */
+ -1, /* V4L2_COLORFX_NEGATIVE */
+ -1, /* V4L2_COLORFX_EMBOSS */
+ -1, /* V4L2_COLORFX_SKETCH */
+ -1, /* V4L2_COLORFX_SKY_BLUE */
+ -1, /* V4L2_COLORFX_GRASS_GREEN */
+ -1, /* V4L2_COLORFX_SKIN_WHITEN */
+ 0, /* V4L2_COLORFX_VIVID */
+};
+
static bool uvc_ctrl_mapping_is_compound(struct uvc_control_mapping *mapping)
{
@@ -975,6 +997,18 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
.data_type = UVC_CTRL_DATA_TYPE_BITMASK,
.name = "Region of Interest Auto Ctrls",
},
+ {
+ .id = V4L2_CID_COLORFX,
+ .entity = UVC_GUID_CHROMEOS_XU,
+ .selector = UVC_CROSXU_CONTROL_IQ_PROFILE,
+ .size = 8,
+ .offset = 0,
+ .v4l2_type = V4L2_CTRL_TYPE_MENU,
+ .data_type = UVC_CTRL_DATA_TYPE_ENUM,
+ .menu_mapping = cros_colorfx_mapping,
+ .menu_mask = BIT(V4L2_COLORFX_VIVID) |
+ BIT(V4L2_COLORFX_NONE),
+ },
};
/* ------------------------------------------------------------------------
@@ -1619,7 +1653,7 @@ static int __uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
}
if (ret == -EIO) {
- dev_warn_ratelimited(&chain->dev->udev->dev,
+ dev_warn_ratelimited(&chain->dev->intf->dev,
"UVC non compliance: Error %d querying master control %x (%s)\n",
ret, master_map->id,
uvc_map_get_name(master_map));
@@ -1643,7 +1677,7 @@ static int __uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
ret = __uvc_queryctrl_boundaries(chain, ctrl, mapping, v4l2_ctrl);
if (ret && !mapping->disabled) {
- dev_warn(&chain->dev->udev->dev,
+ dev_warn(&chain->dev->intf->dev,
"UVC non compliance: permanently disabling control %x (%s), due to error %d\n",
mapping->id, uvc_map_get_name(mapping), ret);
mapping->disabled = true;
@@ -1858,7 +1892,7 @@ static int uvc_ctrl_set_handle(struct uvc_control *ctrl, struct uvc_fh *handle)
lockdep_assert_held(&handle->chain->ctrl_mutex);
if (ctrl->handle) {
- dev_warn_ratelimited(&handle->stream->dev->udev->dev,
+ dev_warn_ratelimited(&handle->stream->dev->intf->dev,
"UVC non compliance: Setting an async control with a pending operation.");
if (ctrl->handle == handle)
@@ -1956,7 +1990,7 @@ static void uvc_ctrl_status_event_work(struct work_struct *work)
w->urb->interval = dev->int_ep->desc.bInterval;
ret = usb_submit_urb(w->urb, GFP_KERNEL);
if (ret < 0)
- dev_err(&dev->udev->dev,
+ dev_err(&dev->intf->dev,
"Failed to resubmit status URB (%d).\n", ret);
}
@@ -2895,7 +2929,7 @@ int uvc_ctrl_restore_values(struct uvc_device *dev)
if (!ctrl->initialized || !ctrl->modified ||
(ctrl->info.flags & UVC_CTRL_FLAG_RESTORE) == 0)
continue;
- dev_dbg(&dev->udev->dev,
+ dev_dbg(&dev->intf->dev,
"restoring control %pUl/%u/%u\n",
ctrl->info.entity, ctrl->info.index,
ctrl->info.selector);
@@ -3181,15 +3215,6 @@ static void uvc_ctrl_init_ctrl(struct uvc_video_chain *chain,
{
unsigned int i;
- /*
- * XU controls initialization requires querying the device for control
- * information. As some buggy UVC devices will crash when queried
- * repeatedly in a tight loop, delay XU controls initialization until
- * first use.
- */
- if (UVC_ENTITY_TYPE(ctrl->entity) == UVC_VC_EXTENSION_UNIT)
- return;
-
for (i = 0; i < ARRAY_SIZE(uvc_ctrls); ++i) {
const struct uvc_control_info *info = &uvc_ctrls[i];
@@ -3307,7 +3332,6 @@ int uvc_ctrl_init_device(struct uvc_device *dev)
void uvc_ctrl_cleanup_fh(struct uvc_fh *handle)
{
struct uvc_entity *entity;
- int i;
guard(mutex)(&handle->chain->ctrl_mutex);
@@ -3325,7 +3349,7 @@ void uvc_ctrl_cleanup_fh(struct uvc_fh *handle)
if (!WARN_ON(handle->pending_async_ctrls))
return;
- for (i = 0; i < handle->pending_async_ctrls; i++)
+ for (unsigned int i = 0; i < handle->pending_async_ctrls; i++)
uvc_pm_put(handle->stream->dev);
}
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index 775bede0d93d..fb6afb8e84f0 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -137,6 +137,9 @@ struct uvc_entity *uvc_entity_by_id(struct uvc_device *dev, int id)
{
struct uvc_entity *entity;
+ if (id == UVC_INVALID_ENTITY_ID)
+ return NULL;
+
list_for_each_entry(entity, &dev->entities, list) {
if (entity->id == id)
return entity;
@@ -183,8 +186,6 @@ static void uvc_stream_delete(struct uvc_streaming *stream)
if (stream->async_wq)
destroy_workqueue(stream->async_wq);
- mutex_destroy(&stream->mutex);
-
usb_put_intf(stream->intf);
kfree(stream->formats);
@@ -201,8 +202,6 @@ static struct uvc_streaming *uvc_stream_new(struct uvc_device *dev,
if (stream == NULL)
return NULL;
- mutex_init(&stream->mutex);
-
stream->dev = dev;
stream->intf = usb_get_intf(intf);
stream->intfnum = intf->cur_altsetting->desc.bInterfaceNumber;
@@ -539,7 +538,7 @@ static int uvc_parse_streaming(struct uvc_device *dev,
unsigned int nformats = 0, nframes = 0, nintervals = 0;
unsigned int size, i, n, p;
u32 *interval;
- u16 psize;
+ u32 psize;
int ret = -EINVAL;
if (intf->cur_altsetting->desc.bInterfaceSubClass
@@ -775,7 +774,7 @@ static int uvc_parse_streaming(struct uvc_device *dev,
streaming->header.bEndpointAddress);
if (ep == NULL)
continue;
- psize = uvc_endpoint_max_bpi(dev->udev, ep);
+ psize = usb_endpoint_max_periodic_payload(dev->udev, ep);
if (psize > streaming->maxpsize)
streaming->maxpsize = psize;
}
@@ -795,14 +794,27 @@ static const u8 uvc_media_transport_input_guid[16] =
UVC_GUID_UVC_MEDIA_TRANSPORT_INPUT;
static const u8 uvc_processing_guid[16] = UVC_GUID_UVC_PROCESSING;
-static struct uvc_entity *uvc_alloc_entity(u16 type, u16 id,
- unsigned int num_pads, unsigned int extra_size)
+static struct uvc_entity *uvc_alloc_new_entity(struct uvc_device *dev, u16 type,
+ u16 id, unsigned int num_pads,
+ unsigned int extra_size)
{
struct uvc_entity *entity;
unsigned int num_inputs;
unsigned int size;
unsigned int i;
+ /* Per UVC 1.1+ spec 3.7.2, the ID should be non-zero. */
+ if (id == 0) {
+ dev_err(&dev->intf->dev, "Found Unit with invalid ID 0\n");
+ id = UVC_INVALID_ENTITY_ID;
+ }
+
+ /* Per UVC 1.1+ spec 3.7.2, the ID is unique. */
+ if (uvc_entity_by_id(dev, id)) {
+ dev_err(&dev->intf->dev, "Found multiple Units with ID %u\n", id);
+ id = UVC_INVALID_ENTITY_ID;
+ }
+
extra_size = roundup(extra_size, sizeof(*entity->pads));
if (num_pads)
num_inputs = type & UVC_TERM_OUTPUT ? num_pads : num_pads - 1;
@@ -812,7 +824,7 @@ static struct uvc_entity *uvc_alloc_entity(u16 type, u16 id,
+ num_inputs;
entity = kzalloc(size, GFP_KERNEL);
if (entity == NULL)
- return NULL;
+ return ERR_PTR(-ENOMEM);
entity->id = id;
entity->type = type;
@@ -882,7 +894,7 @@ static int uvc_parse_vendor_control(struct uvc_device *dev,
unsigned int n, p;
int handled = 0;
- switch (le16_to_cpu(dev->udev->descriptor.idVendor)) {
+ switch (le16_to_cpu(udev->descriptor.idVendor)) {
case 0x046d: /* Logitech */
if (buffer[1] != 0x41 || buffer[2] != 0x01)
break;
@@ -924,10 +936,10 @@ static int uvc_parse_vendor_control(struct uvc_device *dev,
break;
}
- unit = uvc_alloc_entity(UVC_VC_EXTENSION_UNIT, buffer[3],
- p + 1, 2*n);
- if (unit == NULL)
- return -ENOMEM;
+ unit = uvc_alloc_new_entity(dev, UVC_VC_EXTENSION_UNIT,
+ buffer[3], p + 1, 2 * n);
+ if (IS_ERR(unit))
+ return PTR_ERR(unit);
memcpy(unit->guid, &buffer[4], 16);
unit->extension.bNumControls = buffer[20];
@@ -1036,10 +1048,10 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
return -EINVAL;
}
- term = uvc_alloc_entity(type | UVC_TERM_INPUT, buffer[3],
- 1, n + p);
- if (term == NULL)
- return -ENOMEM;
+ term = uvc_alloc_new_entity(dev, type | UVC_TERM_INPUT,
+ buffer[3], 1, n + p);
+ if (IS_ERR(term))
+ return PTR_ERR(term);
if (UVC_ENTITY_TYPE(term) == UVC_ITT_CAMERA) {
term->camera.bControlSize = n;
@@ -1095,10 +1107,10 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
return 0;
}
- term = uvc_alloc_entity(type | UVC_TERM_OUTPUT, buffer[3],
- 1, 0);
- if (term == NULL)
- return -ENOMEM;
+ term = uvc_alloc_new_entity(dev, type | UVC_TERM_OUTPUT,
+ buffer[3], 1, 0);
+ if (IS_ERR(term))
+ return PTR_ERR(term);
memcpy(term->baSourceID, &buffer[7], 1);
@@ -1117,9 +1129,10 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
return -EINVAL;
}
- unit = uvc_alloc_entity(buffer[2], buffer[3], p + 1, 0);
- if (unit == NULL)
- return -ENOMEM;
+ unit = uvc_alloc_new_entity(dev, buffer[2], buffer[3],
+ p + 1, 0);
+ if (IS_ERR(unit))
+ return PTR_ERR(unit);
memcpy(unit->baSourceID, &buffer[5], p);
@@ -1139,9 +1152,9 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
return -EINVAL;
}
- unit = uvc_alloc_entity(buffer[2], buffer[3], 2, n);
- if (unit == NULL)
- return -ENOMEM;
+ unit = uvc_alloc_new_entity(dev, buffer[2], buffer[3], 2, n);
+ if (IS_ERR(unit))
+ return PTR_ERR(unit);
memcpy(unit->baSourceID, &buffer[4], 1);
unit->processing.wMaxMultiplier =
@@ -1168,9 +1181,10 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
return -EINVAL;
}
- unit = uvc_alloc_entity(buffer[2], buffer[3], p + 1, n);
- if (unit == NULL)
- return -ENOMEM;
+ unit = uvc_alloc_new_entity(dev, buffer[2], buffer[3],
+ p + 1, n);
+ if (IS_ERR(unit))
+ return PTR_ERR(unit);
memcpy(unit->guid, &buffer[4], 16);
unit->extension.bNumControls = buffer[20];
@@ -1315,9 +1329,10 @@ static int uvc_gpio_parse(struct uvc_device *dev)
return dev_err_probe(&dev->intf->dev, irq,
"No IRQ for privacy GPIO\n");
- unit = uvc_alloc_entity(UVC_EXT_GPIO_UNIT, UVC_EXT_GPIO_UNIT_ID, 0, 1);
- if (!unit)
- return -ENOMEM;
+ unit = uvc_alloc_new_entity(dev, UVC_EXT_GPIO_UNIT,
+ UVC_EXT_GPIO_UNIT_ID, 0, 1);
+ if (IS_ERR(unit))
+ return PTR_ERR(unit);
unit->gpio.gpio_privacy = gpio_privacy;
unit->gpio.irq = irq;
@@ -1868,7 +1883,7 @@ static int uvc_scan_device(struct uvc_device *dev)
uvc_scan_fallback(dev);
if (list_empty(&dev->chains)) {
- dev_info(&dev->udev->dev, "No valid video chain found.\n");
+ dev_info(&dev->intf->dev, "No valid video chain found.\n");
return -ENODEV;
}
@@ -1958,11 +1973,11 @@ static void uvc_unregister_video(struct uvc_device *dev)
list_for_each_entry(stream, &dev->streams, list) {
/* Nothing to do here, continue. */
- if (!video_is_registered(&stream->vdev))
+ if (!video_is_registered(&stream->queue.vdev))
continue;
- vb2_video_unregister_device(&stream->vdev);
- vb2_video_unregister_device(&stream->meta.vdev);
+ vb2_video_unregister_device(&stream->queue.vdev);
+ vb2_video_unregister_device(&stream->meta.queue.vdev);
/*
* Now both vdevs are not streaming and all the ioctls will
@@ -1984,12 +1999,12 @@ static void uvc_unregister_video(struct uvc_device *dev)
int uvc_register_video_device(struct uvc_device *dev,
struct uvc_streaming *stream,
- struct video_device *vdev,
struct uvc_video_queue *queue,
enum v4l2_buf_type type,
const struct v4l2_file_operations *fops,
const struct v4l2_ioctl_ops *ioctl_ops)
{
+ struct video_device *vdev = &queue->vdev;
int ret;
/* Initialize the video buffers queue. */
@@ -2071,9 +2086,9 @@ static int uvc_register_video(struct uvc_device *dev,
uvc_debugfs_init_stream(stream);
/* Register the device with V4L. */
- return uvc_register_video_device(dev, stream, &stream->vdev,
- &stream->queue, stream->type,
- &uvc_fops, &uvc_ioctl_ops);
+ return uvc_register_video_device(dev, stream, &stream->queue,
+ stream->type, &uvc_fops,
+ &uvc_ioctl_ops);
}
/*
@@ -2092,7 +2107,7 @@ static int uvc_register_terms(struct uvc_device *dev,
stream = uvc_stream_by_id(dev, term->id);
if (stream == NULL) {
- dev_info(&dev->udev->dev,
+ dev_info(&dev->intf->dev,
"No streaming interface found for terminal %u.",
term->id);
continue;
@@ -2109,7 +2124,7 @@ static int uvc_register_terms(struct uvc_device *dev,
*/
uvc_meta_register(stream);
- term->vdev = &stream->vdev;
+ term->vdev = &stream->queue.vdev;
}
return 0;
@@ -2128,7 +2143,7 @@ static int uvc_register_chains(struct uvc_device *dev)
#ifdef CONFIG_MEDIA_CONTROLLER
ret = uvc_mc_register_entities(chain);
if (ret < 0)
- dev_info(&dev->udev->dev,
+ dev_info(&dev->intf->dev,
"Failed to register entities (%d).\n", ret);
#endif
}
@@ -2229,23 +2244,23 @@ static int uvc_probe(struct usb_interface *intf,
if (ret < 0)
goto error;
- dev_info(&dev->udev->dev, "Found UVC %u.%02x device %s (%04x:%04x)\n",
+ dev_info(&dev->intf->dev, "Found UVC %u.%02x device %s (%04x:%04x)\n",
dev->uvc_version >> 8, dev->uvc_version & 0xff,
udev->product ? udev->product : "<unnamed>",
le16_to_cpu(udev->descriptor.idVendor),
le16_to_cpu(udev->descriptor.idProduct));
if (dev->quirks != dev->info->quirks) {
- dev_info(&dev->udev->dev,
+ dev_info(&dev->intf->dev,
"Forcing device quirks to 0x%x by module parameter for testing purpose.\n",
dev->quirks);
- dev_info(&dev->udev->dev,
+ dev_info(&dev->intf->dev,
"Please report required quirks to the linux-media mailing list.\n");
}
if (dev->info->uvc_version) {
dev->uvc_version = dev->info->uvc_version;
- dev_info(&dev->udev->dev, "Forcing UVC version to %u.%02x\n",
+ dev_info(&dev->intf->dev, "Forcing UVC version to %u.%02x\n",
dev->uvc_version >> 8, dev->uvc_version & 0xff);
}
@@ -2281,21 +2296,21 @@ static int uvc_probe(struct usb_interface *intf,
/* Initialize the interrupt URB. */
ret = uvc_status_init(dev);
if (ret < 0) {
- dev_info(&dev->udev->dev,
+ dev_info(&dev->intf->dev,
"Unable to initialize the status endpoint (%d), status interrupt will not be supported.\n",
ret);
}
ret = uvc_gpio_init_irq(dev);
if (ret < 0) {
- dev_err(&dev->udev->dev,
+ dev_err(&dev->intf->dev,
"Unable to request privacy GPIO IRQ (%d)\n", ret);
goto error;
}
ret = uvc_meta_init(dev);
if (ret < 0) {
- dev_err(&dev->udev->dev,
+ dev_err(&dev->intf->dev,
"Error initializing the metadata formats (%d)\n", ret);
goto error;
}
diff --git a/drivers/media/usb/uvc/uvc_entity.c b/drivers/media/usb/uvc/uvc_entity.c
index cc68dd24eb42..3823ac9c8045 100644
--- a/drivers/media/usb/uvc/uvc_entity.c
+++ b/drivers/media/usb/uvc/uvc_entity.c
@@ -140,7 +140,7 @@ int uvc_mc_register_entities(struct uvc_video_chain *chain)
list_for_each_entry(entity, &chain->entities, chain) {
ret = uvc_mc_init_entity(chain, entity);
if (ret < 0) {
- dev_info(&chain->dev->udev->dev,
+ dev_info(&chain->dev->intf->dev,
"Failed to initialize entity for entity %u\n",
entity->id);
return ret;
@@ -150,7 +150,7 @@ int uvc_mc_register_entities(struct uvc_video_chain *chain)
list_for_each_entry(entity, &chain->entities, chain) {
ret = uvc_mc_create_links(chain, entity);
if (ret < 0) {
- dev_info(&chain->dev->udev->dev,
+ dev_info(&chain->dev->intf->dev,
"Failed to create links for entity %u\n",
entity->id);
return ret;
diff --git a/drivers/media/usb/uvc/uvc_metadata.c b/drivers/media/usb/uvc/uvc_metadata.c
index 229e08ff323e..c23b174965c3 100644
--- a/drivers/media/usb/uvc/uvc_metadata.c
+++ b/drivers/media/usb/uvc/uvc_metadata.c
@@ -23,10 +23,10 @@
* V4L2 ioctls
*/
-static int uvc_meta_v4l2_querycap(struct file *file, void *fh,
+static int uvc_meta_v4l2_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- struct v4l2_fh *vfh = file->private_data;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
struct uvc_streaming *stream = video_get_drvdata(vfh->vdev);
struct uvc_video_chain *chain = stream->chain;
@@ -39,28 +39,26 @@ static int uvc_meta_v4l2_querycap(struct file *file, void *fh,
return 0;
}
-static int uvc_meta_v4l2_get_format(struct file *file, void *fh,
+static int uvc_meta_v4l2_get_format(struct file *file, void *priv,
struct v4l2_format *format)
{
- struct v4l2_fh *vfh = file->private_data;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
struct uvc_streaming *stream = video_get_drvdata(vfh->vdev);
struct v4l2_meta_format *fmt = &format->fmt.meta;
if (format->type != vfh->vdev->queue->type)
return -EINVAL;
- memset(fmt, 0, sizeof(*fmt));
-
fmt->dataformat = stream->meta.format;
fmt->buffersize = UVC_METADATA_BUF_SIZE;
return 0;
}
-static int uvc_meta_v4l2_try_format(struct file *file, void *fh,
+static int uvc_meta_v4l2_try_format(struct file *file, void *priv,
struct v4l2_format *format)
{
- struct v4l2_fh *vfh = file->private_data;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
struct uvc_streaming *stream = video_get_drvdata(vfh->vdev);
struct uvc_device *dev = stream->dev;
struct v4l2_meta_format *fmt = &format->fmt.meta;
@@ -69,11 +67,12 @@ static int uvc_meta_v4l2_try_format(struct file *file, void *fh,
if (format->type != vfh->vdev->queue->type)
return -EINVAL;
- for (unsigned int i = 0; i < dev->nmeta_formats; i++)
+ for (unsigned int i = 0; i < dev->nmeta_formats; i++) {
if (dev->meta_formats[i] == fmt->dataformat) {
fmeta = fmt->dataformat;
break;
}
+ }
memset(fmt, 0, sizeof(*fmt));
@@ -83,15 +82,15 @@ static int uvc_meta_v4l2_try_format(struct file *file, void *fh,
return 0;
}
-static int uvc_meta_v4l2_set_format(struct file *file, void *fh,
+static int uvc_meta_v4l2_set_format(struct file *file, void *priv,
struct v4l2_format *format)
{
- struct v4l2_fh *vfh = file->private_data;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
struct uvc_streaming *stream = video_get_drvdata(vfh->vdev);
struct v4l2_meta_format *fmt = &format->fmt.meta;
int ret;
- ret = uvc_meta_v4l2_try_format(file, fh, format);
+ ret = uvc_meta_v4l2_try_format(file, priv, format);
if (ret < 0)
return ret;
@@ -100,37 +99,28 @@ static int uvc_meta_v4l2_set_format(struct file *file, void *fh,
* Metadata buffers would still be perfectly parseable, but it's more
* consistent and cleaner to disallow that.
*/
- mutex_lock(&stream->mutex);
-
if (vb2_is_busy(&stream->meta.queue.queue))
- ret = -EBUSY;
- else
- stream->meta.format = fmt->dataformat;
+ return -EBUSY;
- mutex_unlock(&stream->mutex);
+ stream->meta.format = fmt->dataformat;
- return ret;
+ return 0;
}
-static int uvc_meta_v4l2_enum_formats(struct file *file, void *fh,
+static int uvc_meta_v4l2_enum_formats(struct file *file, void *priv,
struct v4l2_fmtdesc *fdesc)
{
- struct v4l2_fh *vfh = file->private_data;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
struct uvc_streaming *stream = video_get_drvdata(vfh->vdev);
struct uvc_device *dev = stream->dev;
- u32 i = fdesc->index;
if (fdesc->type != vfh->vdev->queue->type)
return -EINVAL;
- if (i >= dev->nmeta_formats)
+ if (fdesc->index >= dev->nmeta_formats)
return -EINVAL;
- memset(fdesc, 0, sizeof(*fdesc));
-
- fdesc->type = vfh->vdev->queue->type;
- fdesc->index = i;
- fdesc->pixelformat = dev->meta_formats[i];
+ fdesc->pixelformat = dev->meta_formats[fdesc->index];
return 0;
}
@@ -177,7 +167,6 @@ static struct uvc_entity *uvc_meta_find_msxu(struct uvc_device *dev)
return NULL;
}
-#define MSXU_CONTROL_METADATA 0x9
static int uvc_meta_detect_msxu(struct uvc_device *dev)
{
u32 *data __free(kfree) = NULL;
@@ -196,9 +185,12 @@ static int uvc_meta_detect_msxu(struct uvc_device *dev)
if (!data)
return -ENOMEM;
- /* Check if the metadata is already enabled. */
+ /*
+ * Check if the metadata is already enabled, or if the device always
+ * returns metadata.
+ */
ret = uvc_query_ctrl(dev, UVC_GET_CUR, entity->id, dev->intfnum,
- MSXU_CONTROL_METADATA, data, sizeof(*data));
+ UVC_MSXU_CONTROL_METADATA, data, sizeof(*data));
if (ret)
return 0;
@@ -208,21 +200,23 @@ static int uvc_meta_detect_msxu(struct uvc_device *dev)
}
/*
- * We have seen devices that require 1 to enable the metadata, others
- * requiring a value != 1 and others requiring a value >1. Luckily for
- * us, the value from GET_MAX seems to work all the time.
+ * Set the value of UVC_MSXU_CONTROL_METADATA to the value reported by
+ * GET_MAX to enable production of MSXU metadata. The GET_MAX request
+ * reports the maximum size of the metadata, if its value is 0 then MSXU
+ * metadata is not supported. For more information, see
+ * https://learn.microsoft.com/en-us/windows-hardware/drivers/stream/uvc-extensions-1-5#2229-metadata-control
*/
ret = uvc_query_ctrl(dev, UVC_GET_MAX, entity->id, dev->intfnum,
- MSXU_CONTROL_METADATA, data, sizeof(*data));
+ UVC_MSXU_CONTROL_METADATA, data, sizeof(*data));
if (ret || !*data)
return 0;
/*
- * If we can set MSXU_CONTROL_METADATA, the device will report
+ * If we can set UVC_MSXU_CONTROL_METADATA, the device will report
* metadata.
*/
ret = uvc_query_ctrl(dev, UVC_SET_CUR, entity->id, dev->intfnum,
- MSXU_CONTROL_METADATA, data, sizeof(*data));
+ UVC_MSXU_CONTROL_METADATA, data, sizeof(*data));
if (!ret)
dev->quirks |= UVC_QUIRK_MSXU_META;
@@ -232,12 +226,11 @@ static int uvc_meta_detect_msxu(struct uvc_device *dev)
int uvc_meta_register(struct uvc_streaming *stream)
{
struct uvc_device *dev = stream->dev;
- struct video_device *vdev = &stream->meta.vdev;
struct uvc_video_queue *queue = &stream->meta.queue;
stream->meta.format = V4L2_META_FMT_UVC;
- return uvc_register_video_device(dev, stream, vdev, queue,
+ return uvc_register_video_device(dev, stream, queue,
V4L2_BUF_TYPE_META_CAPTURE,
&uvc_meta_fops, &uvc_meta_ioctl_ops);
}
diff --git a/drivers/media/usb/uvc/uvc_status.c b/drivers/media/usb/uvc/uvc_status.c
index ee01dce4b783..231cfee8e7c2 100644
--- a/drivers/media/usb/uvc/uvc_status.c
+++ b/drivers/media/usb/uvc/uvc_status.c
@@ -62,7 +62,8 @@ static int uvc_input_init(struct uvc_device *dev)
__set_bit(EV_KEY, input->evbit);
__set_bit(KEY_CAMERA, input->keybit);
- if ((ret = input_register_device(input)) < 0)
+ ret = input_register_device(input);
+ if (ret < 0)
goto error;
dev->input = input;
@@ -215,7 +216,7 @@ static void uvc_status_complete(struct urb *urb)
return;
default:
- dev_warn(&dev->udev->dev,
+ dev_warn(&dev->intf->dev,
"Non-zero status (%d) in status completion handler.\n",
urb->status);
return;
@@ -247,7 +248,7 @@ static void uvc_status_complete(struct urb *urb)
urb->interval = dev->int_ep->desc.bInterval;
ret = usb_submit_urb(urb, GFP_ATOMIC);
if (ret < 0)
- dev_err(&dev->udev->dev,
+ dev_err(&dev->intf->dev,
"Failed to resubmit status URB (%d).\n", ret);
}
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
index 160f9cf6e6db..9e4a251eca88 100644
--- a/drivers/media/usb/uvc/uvc_v4l2.c
+++ b/drivers/media/usb/uvc/uvc_v4l2.c
@@ -329,14 +329,12 @@ static int uvc_v4l2_try_format(struct uvc_streaming *stream,
* developers test their webcams with the Linux driver as well as with
* the Windows driver).
*/
- mutex_lock(&stream->mutex);
if (stream->dev->quirks & UVC_QUIRK_PROBE_EXTRAFIELDS)
probe->dwMaxVideoFrameSize =
stream->ctrl.dwMaxVideoFrameSize;
/* Probe the device. */
ret = uvc_probe_video(stream, probe);
- mutex_unlock(&stream->mutex);
if (ret < 0)
return ret;
@@ -388,26 +386,22 @@ static int uvc_v4l2_try_format(struct uvc_streaming *stream,
return ret;
}
-static int uvc_ioctl_g_fmt(struct file *file, void *fh,
+static int uvc_ioctl_g_fmt(struct file *file, void *priv,
struct v4l2_format *fmt)
{
- struct uvc_fh *handle = fh;
+ struct uvc_fh *handle = to_uvc_fh(file);
struct uvc_streaming *stream = handle->stream;
const struct uvc_format *format;
const struct uvc_frame *frame;
- int ret = 0;
if (fmt->type != stream->type)
return -EINVAL;
- mutex_lock(&stream->mutex);
format = stream->cur_format;
frame = stream->cur_frame;
- if (format == NULL || frame == NULL) {
- ret = -EINVAL;
- goto done;
- }
+ if (!format || !frame)
+ return -EINVAL;
fmt->fmt.pix.pixelformat = format->fcc;
fmt->fmt.pix.width = frame->wWidth;
@@ -419,15 +413,13 @@ static int uvc_ioctl_g_fmt(struct file *file, void *fh,
fmt->fmt.pix.xfer_func = format->xfer_func;
fmt->fmt.pix.ycbcr_enc = format->ycbcr_enc;
-done:
- mutex_unlock(&stream->mutex);
- return ret;
+ return 0;
}
-static int uvc_ioctl_s_fmt(struct file *file, void *fh,
+static int uvc_ioctl_s_fmt(struct file *file, void *priv,
struct v4l2_format *fmt)
{
- struct uvc_fh *handle = fh;
+ struct uvc_fh *handle = to_uvc_fh(file);
struct uvc_streaming *stream = handle->stream;
struct uvc_streaming_control probe;
const struct uvc_format *format;
@@ -441,35 +433,27 @@ static int uvc_ioctl_s_fmt(struct file *file, void *fh,
if (ret < 0)
return ret;
- mutex_lock(&stream->mutex);
- if (vb2_is_busy(&stream->queue.queue)) {
- ret = -EBUSY;
- goto done;
- }
+ if (vb2_is_busy(&stream->queue.queue))
+ return -EBUSY;
stream->ctrl = probe;
stream->cur_format = format;
stream->cur_frame = frame;
-done:
- mutex_unlock(&stream->mutex);
- return ret;
+ return 0;
}
-static int uvc_ioctl_g_parm(struct file *file, void *fh,
+static int uvc_ioctl_g_parm(struct file *file, void *priv,
struct v4l2_streamparm *parm)
{
u32 numerator, denominator;
- struct uvc_fh *handle = fh;
+ struct uvc_fh *handle = to_uvc_fh(file);
struct uvc_streaming *stream = handle->stream;
if (parm->type != stream->type)
return -EINVAL;
- mutex_lock(&stream->mutex);
numerator = stream->ctrl.dwFrameInterval;
- mutex_unlock(&stream->mutex);
-
denominator = 10000000;
v4l2_simplify_fraction(&numerator, &denominator, 8, 333);
@@ -493,10 +477,10 @@ static int uvc_ioctl_g_parm(struct file *file, void *fh,
return 0;
}
-static int uvc_ioctl_s_parm(struct file *file, void *fh,
+static int uvc_ioctl_s_parm(struct file *file, void *priv,
struct v4l2_streamparm *parm)
{
- struct uvc_fh *handle = fh;
+ struct uvc_fh *handle = to_uvc_fh(file);
struct uvc_streaming *stream = handle->stream;
struct uvc_streaming_control probe;
struct v4l2_fract timeperframe;
@@ -519,12 +503,8 @@ static int uvc_ioctl_s_parm(struct file *file, void *fh,
uvc_dbg(stream->dev, FORMAT, "Setting frame interval to %u/%u (%u)\n",
timeperframe.numerator, timeperframe.denominator, interval);
- mutex_lock(&stream->mutex);
-
- if (uvc_queue_streaming(&stream->queue)) {
- mutex_unlock(&stream->mutex);
+ if (uvc_queue_streaming(&stream->queue))
return -EBUSY;
- }
format = stream->cur_format;
frame = stream->cur_frame;
@@ -556,14 +536,11 @@ static int uvc_ioctl_s_parm(struct file *file, void *fh,
/* Probe the device with the new settings. */
ret = uvc_probe_video(stream, &probe);
- if (ret < 0) {
- mutex_unlock(&stream->mutex);
+ if (ret < 0)
return ret;
- }
stream->ctrl = probe;
stream->cur_frame = frame;
- mutex_unlock(&stream->mutex);
/* Return the actual frame period. */
timeperframe.numerator = probe.dwFrameInterval;
@@ -599,18 +576,17 @@ static int uvc_v4l2_open(struct file *file)
if (!handle)
return -ENOMEM;
- v4l2_fh_init(&handle->vfh, &stream->vdev);
- v4l2_fh_add(&handle->vfh);
+ v4l2_fh_init(&handle->vfh, &stream->queue.vdev);
+ v4l2_fh_add(&handle->vfh, file);
handle->chain = stream->chain;
handle->stream = stream;
- file->private_data = handle;
return 0;
}
static int uvc_v4l2_release(struct file *file)
{
- struct uvc_fh *handle = file->private_data;
+ struct uvc_fh *handle = to_uvc_fh(file);
struct uvc_streaming *stream = handle->stream;
uvc_dbg(stream->dev, CALLS, "%s\n", __func__);
@@ -623,10 +599,10 @@ static int uvc_v4l2_release(struct file *file)
return 0;
}
-static int uvc_ioctl_querycap(struct file *file, void *fh,
+static int uvc_ioctl_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- struct uvc_fh *handle = file->private_data;
+ struct uvc_fh *handle = to_uvc_fh(file);
struct uvc_video_chain *chain = handle->chain;
struct uvc_streaming *stream = handle->stream;
@@ -639,10 +615,10 @@ static int uvc_ioctl_querycap(struct file *file, void *fh,
return 0;
}
-static int uvc_ioctl_enum_fmt(struct file *file, void *fh,
+static int uvc_ioctl_enum_fmt(struct file *file, void *priv,
struct v4l2_fmtdesc *fmt)
{
- struct uvc_fh *handle = fh;
+ struct uvc_fh *handle = to_uvc_fh(file);
struct uvc_streaming *stream = handle->stream;
enum v4l2_buf_type type = fmt->type;
const struct uvc_format *format;
@@ -663,20 +639,20 @@ static int uvc_ioctl_enum_fmt(struct file *file, void *fh,
return 0;
}
-static int uvc_ioctl_try_fmt(struct file *file, void *fh,
+static int uvc_ioctl_try_fmt(struct file *file, void *priv,
struct v4l2_format *fmt)
{
- struct uvc_fh *handle = fh;
+ struct uvc_fh *handle = to_uvc_fh(file);
struct uvc_streaming *stream = handle->stream;
struct uvc_streaming_control probe;
return uvc_v4l2_try_format(stream, fmt, &probe, NULL, NULL);
}
-static int uvc_ioctl_enum_input(struct file *file, void *fh,
+static int uvc_ioctl_enum_input(struct file *file, void *priv,
struct v4l2_input *input)
{
- struct uvc_fh *handle = fh;
+ struct uvc_fh *handle = to_uvc_fh(file);
struct uvc_video_chain *chain = handle->chain;
const struct uvc_entity *selector = chain->selector;
struct uvc_entity *iterm = NULL;
@@ -716,9 +692,9 @@ static int uvc_ioctl_enum_input(struct file *file, void *fh,
return 0;
}
-static int uvc_ioctl_g_input(struct file *file, void *fh, unsigned int *input)
+static int uvc_ioctl_g_input(struct file *file, void *priv, unsigned int *input)
{
- struct uvc_fh *handle = fh;
+ struct uvc_fh *handle = to_uvc_fh(file);
struct uvc_video_chain *chain = handle->chain;
u8 *buf;
int ret;
@@ -744,9 +720,9 @@ static int uvc_ioctl_g_input(struct file *file, void *fh, unsigned int *input)
return ret;
}
-static int uvc_ioctl_s_input(struct file *file, void *fh, unsigned int input)
+static int uvc_ioctl_s_input(struct file *file, void *priv, unsigned int input)
{
- struct uvc_fh *handle = fh;
+ struct uvc_fh *handle = to_uvc_fh(file);
struct uvc_streaming *stream = handle->stream;
struct uvc_video_chain *chain = handle->chain;
u8 *buf;
@@ -778,10 +754,10 @@ static int uvc_ioctl_s_input(struct file *file, void *fh, unsigned int input)
return ret;
}
-static int uvc_ioctl_query_ext_ctrl(struct file *file, void *fh,
+static int uvc_ioctl_query_ext_ctrl(struct file *file, void *priv,
struct v4l2_query_ext_ctrl *qec)
{
- struct uvc_fh *handle = fh;
+ struct uvc_fh *handle = to_uvc_fh(file);
struct uvc_video_chain *chain = handle->chain;
return uvc_query_v4l2_ctrl(chain, qec);
@@ -806,10 +782,10 @@ static int uvc_ctrl_check_access(struct uvc_video_chain *chain,
return ret;
}
-static int uvc_ioctl_g_ext_ctrls(struct file *file, void *fh,
+static int uvc_ioctl_g_ext_ctrls(struct file *file, void *priv,
struct v4l2_ext_controls *ctrls)
{
- struct uvc_fh *handle = fh;
+ struct uvc_fh *handle = to_uvc_fh(file);
struct uvc_video_chain *chain = handle->chain;
struct v4l2_ext_control *ctrl = ctrls->controls;
unsigned int i;
@@ -890,35 +866,35 @@ static int uvc_ioctl_s_try_ext_ctrls(struct uvc_fh *handle,
return uvc_ctrl_rollback(handle);
}
-static int uvc_ioctl_s_ext_ctrls(struct file *file, void *fh,
+static int uvc_ioctl_s_ext_ctrls(struct file *file, void *priv,
struct v4l2_ext_controls *ctrls)
{
- struct uvc_fh *handle = fh;
+ struct uvc_fh *handle = to_uvc_fh(file);
return uvc_ioctl_s_try_ext_ctrls(handle, ctrls, VIDIOC_S_EXT_CTRLS);
}
-static int uvc_ioctl_try_ext_ctrls(struct file *file, void *fh,
+static int uvc_ioctl_try_ext_ctrls(struct file *file, void *priv,
struct v4l2_ext_controls *ctrls)
{
- struct uvc_fh *handle = fh;
+ struct uvc_fh *handle = to_uvc_fh(file);
return uvc_ioctl_s_try_ext_ctrls(handle, ctrls, VIDIOC_TRY_EXT_CTRLS);
}
-static int uvc_ioctl_querymenu(struct file *file, void *fh,
+static int uvc_ioctl_querymenu(struct file *file, void *priv,
struct v4l2_querymenu *qm)
{
- struct uvc_fh *handle = fh;
+ struct uvc_fh *handle = to_uvc_fh(file);
struct uvc_video_chain *chain = handle->chain;
return uvc_query_v4l2_menu(chain, qm);
}
-static int uvc_ioctl_g_selection(struct file *file, void *fh,
+static int uvc_ioctl_g_selection(struct file *file, void *priv,
struct v4l2_selection *sel)
{
- struct uvc_fh *handle = fh;
+ struct uvc_fh *handle = to_uvc_fh(file);
struct uvc_streaming *stream = handle->stream;
if (sel->type != stream->type)
@@ -941,18 +917,16 @@ static int uvc_ioctl_g_selection(struct file *file, void *fh,
sel->r.left = 0;
sel->r.top = 0;
- mutex_lock(&stream->mutex);
sel->r.width = stream->cur_frame->wWidth;
sel->r.height = stream->cur_frame->wHeight;
- mutex_unlock(&stream->mutex);
return 0;
}
-static int uvc_ioctl_enum_framesizes(struct file *file, void *fh,
+static int uvc_ioctl_enum_framesizes(struct file *file, void *priv,
struct v4l2_frmsizeenum *fsize)
{
- struct uvc_fh *handle = fh;
+ struct uvc_fh *handle = to_uvc_fh(file);
struct uvc_streaming *stream = handle->stream;
const struct uvc_format *format = NULL;
const struct uvc_frame *frame = NULL;
@@ -989,10 +963,10 @@ static int uvc_ioctl_enum_framesizes(struct file *file, void *fh,
return 0;
}
-static int uvc_ioctl_enum_frameintervals(struct file *file, void *fh,
+static int uvc_ioctl_enum_frameintervals(struct file *file, void *priv,
struct v4l2_frmivalenum *fival)
{
- struct uvc_fh *handle = fh;
+ struct uvc_fh *handle = to_uvc_fh(file);
struct uvc_streaming *stream = handle->stream;
const struct uvc_format *format = NULL;
const struct uvc_frame *frame = NULL;
@@ -1061,10 +1035,10 @@ static int uvc_ioctl_subscribe_event(struct v4l2_fh *fh,
}
}
-static long uvc_ioctl_default(struct file *file, void *fh, bool valid_prio,
+static long uvc_ioctl_default(struct file *file, void *priv, bool valid_prio,
unsigned int cmd, void *arg)
{
- struct uvc_fh *handle = fh;
+ struct uvc_fh *handle = to_uvc_fh(file);
struct uvc_video_chain *chain = handle->chain;
switch (cmd) {
@@ -1170,7 +1144,7 @@ static int uvc_v4l2_put_xu_query(const struct uvc_xu_control_query *kp,
static long uvc_v4l2_compat_ioctl32(struct file *file,
unsigned int cmd, unsigned long arg)
{
- struct uvc_fh *handle = file->private_data;
+ struct uvc_fh *handle = to_uvc_fh(file);
union {
struct uvc_xu_control_mapping xmap;
struct uvc_xu_control_query xqry;
@@ -1221,7 +1195,7 @@ static long uvc_v4l2_compat_ioctl32(struct file *file,
static long uvc_v4l2_unlocked_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
- struct uvc_fh *handle = file->private_data;
+ struct uvc_fh *handle = to_uvc_fh(file);
unsigned int converted_cmd = v4l2_translate_cmd(cmd);
int ret;
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
index a5013a7fbca4..2094e059d7d3 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -95,14 +95,14 @@ int uvc_query_ctrl(struct uvc_device *dev, u8 query, u8 unit,
*/
if (ret > 0 && query != UVC_GET_INFO) {
memset(data + ret, 0, size - ret);
- dev_warn_once(&dev->udev->dev,
+ dev_warn_once(&dev->intf->dev,
"UVC non compliance: %s control %u on unit %u returned %d bytes when we expected %u.\n",
uvc_query_name(query), cs, unit, ret, size);
return 0;
}
if (ret != -EPIPE) {
- dev_err(&dev->udev->dev,
+ dev_err(&dev->intf->dev,
"Failed to query (%s) UVC control %u on unit %u: %d (exp. %u).\n",
uvc_query_name(query), cs, unit, ret, size);
return ret < 0 ? ret : -EPIPE;
@@ -119,7 +119,7 @@ int uvc_query_ctrl(struct uvc_device *dev, u8 query, u8 unit,
*(u8 *)data = tmp;
if (ret != 1) {
- dev_err_ratelimited(&dev->udev->dev,
+ dev_err_ratelimited(&dev->intf->dev,
"Failed to query (%s) UVC error code control %u on unit %u: %d (exp. 1).\n",
uvc_query_name(query), cs, unit, ret);
return ret < 0 ? ret : -EPIPE;
@@ -266,7 +266,7 @@ static void uvc_fixup_video_ctrl(struct uvc_streaming *stream,
if (stream->intf->num_altsetting > 1 &&
ctrl->dwMaxPayloadTransferSize > stream->maxpsize) {
dev_warn_ratelimited(&stream->intf->dev,
- "UVC non compliance: the max payload transmission size (%u) exceeds the size of the ep max packet (%u). Using the max size.\n",
+ "UVC non compliance: Reducing max payload transfer size (%u) to fit endpoint limit (%u).\n",
ctrl->dwMaxPayloadTransferSize,
stream->maxpsize);
ctrl->dwMaxPayloadTransferSize = stream->maxpsize;
@@ -1691,7 +1691,7 @@ static void uvc_video_complete(struct urb *urb)
struct uvc_streaming *stream = uvc_urb->stream;
struct uvc_video_queue *queue = &stream->queue;
struct uvc_video_queue *qmeta = &stream->meta.queue;
- struct vb2_queue *vb2_qmeta = stream->meta.vdev.queue;
+ struct vb2_queue *vb2_qmeta = stream->meta.queue.vdev.queue;
struct uvc_buffer *buf = NULL;
struct uvc_buffer *buf_meta = NULL;
unsigned long flags;
@@ -1870,24 +1870,6 @@ static void uvc_video_stop_transfer(struct uvc_streaming *stream,
}
/*
- * Compute the maximum number of bytes per interval for an endpoint.
- */
-u16 uvc_endpoint_max_bpi(struct usb_device *dev, struct usb_host_endpoint *ep)
-{
- u16 psize;
-
- switch (dev->speed) {
- case USB_SPEED_SUPER:
- case USB_SPEED_SUPER_PLUS:
- return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval);
- default:
- psize = usb_endpoint_maxp(&ep->desc);
- psize *= usb_endpoint_maxp_mult(&ep->desc);
- return psize;
- }
-}
-
-/*
* Initialize isochronous URBs and allocate transfer buffers. The packet size
* is given by the endpoint.
*/
@@ -1897,10 +1879,10 @@ static int uvc_init_video_isoc(struct uvc_streaming *stream,
struct urb *urb;
struct uvc_urb *uvc_urb;
unsigned int npackets, i;
- u16 psize;
+ u32 psize;
u32 size;
- psize = uvc_endpoint_max_bpi(stream->dev->udev, ep);
+ psize = usb_endpoint_max_periodic_payload(stream->dev->udev, ep);
size = stream->ctrl.dwMaxVideoFrameSize;
npackets = uvc_alloc_urb_buffers(stream, size, psize, gfp_flags);
@@ -2043,7 +2025,7 @@ static int uvc_video_start_transfer(struct uvc_streaming *stream,
continue;
/* Check if the bandwidth is high enough. */
- psize = uvc_endpoint_max_bpi(stream->dev->udev, ep);
+ psize = usb_endpoint_max_periodic_payload(stream->dev->udev, ep);
if (psize >= bandwidth && psize < best_psize) {
altsetting = alts->desc.bAlternateSetting;
best_psize = psize;
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index 757254fc4fe9..ed7bad31f75c 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -41,6 +41,8 @@
#define UVC_EXT_GPIO_UNIT 0x7ffe
#define UVC_EXT_GPIO_UNIT_ID 0x100
+#define UVC_INVALID_ENTITY_ID 0xffff
+
/* ------------------------------------------------------------------------
* Driver specific constants.
*/
@@ -328,6 +330,7 @@ struct uvc_buffer {
#define UVC_QUEUE_DISCONNECTED (1 << 0)
struct uvc_video_queue {
+ struct video_device vdev;
struct vb2_queue queue;
struct mutex mutex; /*
* Serializes vb2_queue and
@@ -450,13 +453,12 @@ struct uvc_urb {
struct uvc_streaming {
struct list_head list;
struct uvc_device *dev;
- struct video_device vdev;
struct uvc_video_chain *chain;
atomic_t active;
struct usb_interface *intf;
int intfnum;
- u16 maxpsize;
+ u32 maxpsize;
struct uvc_streaming_header header;
enum v4l2_buf_type type;
@@ -469,12 +471,6 @@ struct uvc_streaming {
const struct uvc_format *cur_format;
const struct uvc_frame *cur_frame;
- /*
- * Protect access to ctrl, cur_format, cur_frame and hardware video
- * probe control.
- */
- struct mutex mutex;
-
/* Buffers queue. */
unsigned int frozen : 1;
struct uvc_video_queue queue;
@@ -483,7 +479,6 @@ struct uvc_streaming {
struct uvc_buffer *meta_buf);
struct {
- struct video_device vdev;
struct uvc_video_queue queue;
u32 format;
} meta;
@@ -637,6 +632,11 @@ struct uvc_fh {
unsigned int pending_async_ctrls;
};
+static inline struct uvc_fh *to_uvc_fh(struct file *filp)
+{
+ return container_of(file_to_v4l2_fh(filp), struct uvc_fh, vfh);
+}
+
/* ------------------------------------------------------------------------
* Debugging, printing and logging
*/
@@ -667,7 +667,7 @@ extern unsigned int uvc_hw_timestamps_param;
#define uvc_dbg(_dev, flag, fmt, ...) \
do { \
if (uvc_dbg_param & UVC_DBG_##flag) \
- dev_printk(KERN_DEBUG, &(_dev)->udev->dev, fmt, \
+ dev_printk(KERN_DEBUG, &(_dev)->intf->dev, fmt, \
##__VA_ARGS__); \
} while (0)
@@ -680,7 +680,7 @@ do { \
#define uvc_warn_once(_dev, warn, fmt, ...) \
do { \
if (!test_and_set_bit(warn, &(_dev)->warnings)) \
- dev_info(&(_dev)->udev->dev, fmt, ##__VA_ARGS__); \
+ dev_info(&(_dev)->intf->dev, fmt, ##__VA_ARGS__); \
} while (0)
/* --------------------------------------------------------------------------
@@ -733,7 +733,6 @@ int uvc_meta_register(struct uvc_streaming *stream);
int uvc_register_video_device(struct uvc_device *dev,
struct uvc_streaming *stream,
- struct video_device *vdev,
struct uvc_video_queue *queue,
enum v4l2_buf_type type,
const struct v4l2_file_operations *fops,
@@ -798,8 +797,6 @@ void uvc_ctrl_cleanup_fh(struct uvc_fh *handle);
/* Utility functions */
struct usb_host_endpoint *uvc_find_endpoint(struct usb_host_interface *alts,
u8 epaddr);
-u16 uvc_endpoint_max_bpi(struct usb_device *dev, struct usb_host_endpoint *ep);
-
/* Quirks support */
void uvc_video_decode_isight(struct uvc_urb *uvc_urb,
struct uvc_buffer *buf,
diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c
index 6e585bc76367..b367d479d6b3 100644
--- a/drivers/media/v4l2-core/v4l2-common.c
+++ b/drivers/media/v4l2-core/v4l2-common.c
@@ -34,6 +34,9 @@
* Added Gerd Knorrs v4l1 enhancements (Justin Schoeman)
*/
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
@@ -509,8 +512,9 @@ int v4l2_fill_pixfmt(struct v4l2_pix_format *pixfmt, u32 pixelformat,
}
EXPORT_SYMBOL_GPL(v4l2_fill_pixfmt);
-s64 __v4l2_get_link_freq_ctrl(struct v4l2_ctrl_handler *handler,
- unsigned int mul, unsigned int div)
+#ifdef CONFIG_MEDIA_CONTROLLER
+static s64 v4l2_get_link_freq_ctrl(struct v4l2_ctrl_handler *handler,
+ unsigned int mul, unsigned int div)
{
struct v4l2_ctrl *ctrl;
s64 freq;
@@ -545,11 +549,9 @@ s64 __v4l2_get_link_freq_ctrl(struct v4l2_ctrl_handler *handler,
return freq > 0 ? freq : -EINVAL;
}
-EXPORT_SYMBOL_GPL(__v4l2_get_link_freq_ctrl);
-#ifdef CONFIG_MEDIA_CONTROLLER
-s64 __v4l2_get_link_freq_pad(struct media_pad *pad, unsigned int mul,
- unsigned int div)
+s64 v4l2_get_link_freq(const struct media_pad *pad, unsigned int mul,
+ unsigned int div)
{
struct v4l2_mbus_config mbus_config = {};
struct v4l2_subdev *sd;
@@ -568,10 +570,10 @@ s64 __v4l2_get_link_freq_pad(struct media_pad *pad, unsigned int mul,
* Fall back to using the link frequency control if the media bus config
* doesn't provide a link frequency.
*/
- return __v4l2_get_link_freq_ctrl(sd->ctrl_handler, mul, div);
+ return v4l2_get_link_freq_ctrl(sd->ctrl_handler, mul, div);
}
-EXPORT_SYMBOL_GPL(__v4l2_get_link_freq_pad);
-#endif /* CONFIG_MEDIA_CONTROLLER */
+EXPORT_SYMBOL_GPL(v4l2_get_link_freq);
+#endif
/*
* Simplify a fraction using a simple continued fraction decomposition. The
@@ -705,3 +707,73 @@ int v4l2_link_freq_to_bitmap(struct device *dev, const u64 *fw_link_freqs,
return 0;
}
EXPORT_SYMBOL_GPL(v4l2_link_freq_to_bitmap);
+
+struct clk *__devm_v4l2_sensor_clk_get(struct device *dev, const char *id,
+ bool legacy, bool fixed_rate,
+ unsigned long clk_rate)
+{
+ bool of_node = is_of_node(dev_fwnode(dev));
+ const char *clk_id __free(kfree) = NULL;
+ struct clk_hw *clk_hw;
+ struct clk *clk;
+ u32 rate = clk_rate;
+ int ret = 0;
+
+ clk = devm_clk_get_optional(dev, id);
+ if (IS_ERR(clk))
+ return clk;
+
+ /*
+ * If the caller didn't request a fixed rate, retrieve it from the
+ * clock-frequency property. -EINVAL indicates the property is absent,
+ * and is not a failure. Other errors, or success with a clock-frequency
+ * value of 0, are hard failures.
+ */
+ if (!fixed_rate || !clk_rate) {
+ ret = device_property_read_u32(dev, "clock-frequency", &rate);
+ if ((ret && ret != -EINVAL) || (!ret && !rate))
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (clk) {
+ /*
+ * On non-OF platforms, or when legacy behaviour is requested,
+ * set the clock rate if a rate has been specified by the caller
+ * or by the clock-frequency property.
+ */
+ if (rate && (!of_node || legacy)) {
+ ret = clk_set_rate(clk, rate);
+ if (ret) {
+ dev_err(dev, "Failed to set clock rate: %u\n",
+ rate);
+ return ERR_PTR(ret);
+ }
+ }
+ return clk;
+ }
+
+ /*
+ * Register a dummy fixed clock on non-OF platforms or when legacy
+ * behaviour is requested. This required the common clock framework.
+ */
+ if (!IS_ENABLED(CONFIG_COMMON_CLK) || (of_node && !legacy))
+ return ERR_PTR(-ENOENT);
+
+ /* We need a rate to create a clock. */
+ if (ret)
+ return ERR_PTR(ret == -EINVAL ? -EPROBE_DEFER : ret);
+
+ if (!id) {
+ clk_id = kasprintf(GFP_KERNEL, "clk-%s", dev_name(dev));
+ if (!clk_id)
+ return ERR_PTR(-ENOMEM);
+ id = clk_id;
+ }
+
+ clk_hw = devm_clk_hw_register_fixed_rate(dev, id, NULL, 0, rate);
+ if (IS_ERR(clk_hw))
+ return ERR_CAST(clk_hw);
+
+ return clk_hw->clk;
+}
+EXPORT_SYMBOL_GPL(__devm_v4l2_sensor_clk_get);
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
index 8c07400bd280..2c88e1175a10 100644
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -8,7 +8,7 @@
* Copyright (C) 2001,2002 Andi Kleen, SuSE Labs
* Copyright (C) 2003 Pavel Machek (pavel@ucw.cz)
* Copyright (C) 2005 Philippe De Muyter (phdm@macqel.be)
- * Copyright (C) 2008 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2008 Hans Verkuil <hverkuil@kernel.org>
*
* These routines maintain argument size conversion between 32bit and 64bit
* ioctls.
@@ -672,15 +672,12 @@ struct v4l2_ext_control32 {
static inline bool ctrl_is_pointer(struct file *file, u32 id)
{
struct video_device *vdev = video_devdata(file);
- struct v4l2_fh *fh = NULL;
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
struct v4l2_ctrl_handler *hdl = NULL;
struct v4l2_query_ext_ctrl qec = { id };
const struct v4l2_ioctl_ops *ops = vdev->ioctl_ops;
- if (test_bit(V4L2_FL_USES_V4L2_FH, &vdev->flags))
- fh = file->private_data;
-
- if (fh && fh->ctrl_handler)
+ if (fh->ctrl_handler)
hdl = fh->ctrl_handler;
else if (vdev->ctrl_handler)
hdl = vdev->ctrl_handler;
@@ -694,7 +691,7 @@ static inline bool ctrl_is_pointer(struct file *file, u32 id)
if (!ops || !ops->vidioc_query_ext_ctrl)
return false;
- return !ops->vidioc_query_ext_ctrl(file, fh, &qec) &&
+ return !ops->vidioc_query_ext_ctrl(file, NULL, &qec) &&
(qec.flags & V4L2_CTRL_FLAG_HAS_PAYLOAD);
}
diff --git a/drivers/media/v4l2-core/v4l2-ctrls-api.c b/drivers/media/v4l2-core/v4l2-ctrls-api.c
index d49a68b36c28..0078a04c5445 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls-api.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls-api.c
@@ -2,7 +2,7 @@
/*
* V4L2 controls framework uAPI implementation:
*
- * Copyright (C) 2010-2021 Hans Verkuil <hverkuil-cisco@xs4all.nl>
+ * Copyright (C) 2010-2021 Hans Verkuil <hverkuil@kernel.org>
*/
#define pr_fmt(fmt) "v4l2-ctrls: " fmt
@@ -1250,14 +1250,17 @@ EXPORT_SYMBOL(v4l2_querymenu);
* VIDIOC_LOG_STATUS helpers
*/
-int v4l2_ctrl_log_status(struct file *file, void *fh)
+int v4l2_ctrl_log_status(struct file *file, void *priv)
{
struct video_device *vfd = video_devdata(file);
- struct v4l2_fh *vfh = file->private_data;
- if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) && vfd->v4l2_dev)
+ if (vfd->v4l2_dev) {
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
+
v4l2_ctrl_handler_log_status(vfh->ctrl_handler,
vfd->v4l2_dev->name);
+ }
+
return 0;
}
EXPORT_SYMBOL(v4l2_ctrl_log_status);
@@ -1348,7 +1351,7 @@ EXPORT_SYMBOL(v4l2_ctrl_subdev_subscribe_event);
*/
__poll_t v4l2_ctrl_poll(struct file *file, struct poll_table_struct *wait)
{
- struct v4l2_fh *fh = file->private_data;
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
poll_wait(file, &fh->wait, wait);
if (v4l2_event_pending(fh))
diff --git a/drivers/media/v4l2-core/v4l2-ctrls-core.c b/drivers/media/v4l2-core/v4l2-ctrls-core.c
index 98b960775e87..85d07ef44f62 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls-core.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls-core.c
@@ -2,7 +2,7 @@
/*
* V4L2 controls framework core implementation.
*
- * Copyright (C) 2010-2021 Hans Verkuil <hverkuil-cisco@xs4all.nl>
+ * Copyright (C) 2010-2021 Hans Verkuil <hverkuil@kernel.org>
*/
#include <linux/export.h>
diff --git a/drivers/media/v4l2-core/v4l2-ctrls-defs.c b/drivers/media/v4l2-core/v4l2-ctrls-defs.c
index 1ea52011247a..ad41f65374e2 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls-defs.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls-defs.c
@@ -2,7 +2,7 @@
/*
* V4L2 controls framework control definitions.
*
- * Copyright (C) 2010-2021 Hans Verkuil <hverkuil-cisco@xs4all.nl>
+ * Copyright (C) 2010-2021 Hans Verkuil <hverkuil@kernel.org>
*/
#include <linux/export.h>
diff --git a/drivers/media/v4l2-core/v4l2-ctrls-priv.h b/drivers/media/v4l2-core/v4l2-ctrls-priv.h
index aba6176fab6c..f4cf273ecf30 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls-priv.h
+++ b/drivers/media/v4l2-core/v4l2-ctrls-priv.h
@@ -2,7 +2,7 @@
/*
* V4L2 controls framework private header.
*
- * Copyright (C) 2010-2021 Hans Verkuil <hverkuil-cisco@xs4all.nl>
+ * Copyright (C) 2010-2021 Hans Verkuil <hverkuil@kernel.org>
*/
#ifndef _V4L2_CTRLS_PRIV_H_
diff --git a/drivers/media/v4l2-core/v4l2-ctrls-request.c b/drivers/media/v4l2-core/v4l2-ctrls-request.c
index c637049d7a2b..e77f722b36a4 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls-request.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls-request.c
@@ -2,7 +2,7 @@
/*
* V4L2 controls framework Request API implementation.
*
- * Copyright (C) 2018-2021 Hans Verkuil <hverkuil-cisco@xs4all.nl>
+ * Copyright (C) 2018-2021 Hans Verkuil <hverkuil@kernel.org>
*/
#define pr_fmt(fmt) "v4l2-ctrls: " fmt
diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
index c369235113d9..10a126e50c1c 100644
--- a/drivers/media/v4l2-core/v4l2-dev.c
+++ b/drivers/media/v4l2-core/v4l2-dev.c
@@ -411,7 +411,7 @@ static int v4l2_mmap(struct file *filp, struct vm_area_struct *vm)
static int v4l2_open(struct inode *inode, struct file *filp)
{
struct video_device *vdev;
- int ret = 0;
+ int ret;
/* Check if the video device is available */
mutex_lock(&videodev_lock);
@@ -424,16 +424,27 @@ static int v4l2_open(struct inode *inode, struct file *filp)
/* and increase the device refcount */
video_get(vdev);
mutex_unlock(&videodev_lock);
- if (vdev->fops->open) {
- if (video_is_registered(vdev))
- ret = vdev->fops->open(filp);
- else
- ret = -ENODEV;
+
+ if (!video_is_registered(vdev)) {
+ ret = -ENODEV;
+ goto done;
+ }
+
+ ret = vdev->fops->open(filp);
+ if (ret)
+ goto done;
+
+ /* All drivers must use v4l2_fh. */
+ if (WARN_ON(!test_bit(V4L2_FL_USES_V4L2_FH, &vdev->flags))) {
+ vdev->fops->release(filp);
+ ret = -ENODEV;
}
+done:
if (vdev->dev_debug & V4L2_DEV_DEBUG_FOP)
dprintk("%s: open (%d)\n",
video_device_node_name(vdev), ret);
+
/* decrease the refcount in case of an error */
if (ret)
video_put(vdev);
@@ -444,7 +455,7 @@ static int v4l2_open(struct inode *inode, struct file *filp)
static int v4l2_release(struct inode *inode, struct file *filp)
{
struct video_device *vdev = video_devdata(filp);
- int ret = 0;
+ int ret;
/*
* We need to serialize the release() with queueing new requests.
@@ -452,14 +463,12 @@ static int v4l2_release(struct inode *inode, struct file *filp)
* operation, and that should not be mixed with queueing a new
* request at the same time.
*/
- if (vdev->fops->release) {
- if (v4l2_device_supports_requests(vdev->v4l2_dev)) {
- mutex_lock(&vdev->v4l2_dev->mdev->req_queue_mutex);
- ret = vdev->fops->release(filp);
- mutex_unlock(&vdev->v4l2_dev->mdev->req_queue_mutex);
- } else {
- ret = vdev->fops->release(filp);
- }
+ if (v4l2_device_supports_requests(vdev->v4l2_dev)) {
+ mutex_lock(&vdev->v4l2_dev->mdev->req_queue_mutex);
+ ret = vdev->fops->release(filp);
+ mutex_unlock(&vdev->v4l2_dev->mdev->req_queue_mutex);
+ } else {
+ ret = vdev->fops->release(filp);
}
if (vdev->dev_debug & V4L2_DEV_DEBUG_FOP)
@@ -922,6 +931,9 @@ int __video_register_device(struct video_device *vdev,
/* the device_caps field MUST be set for all but subdevs */
if (WARN_ON(type != VFL_TYPE_SUBDEV && !vdev->device_caps))
return -EINVAL;
+ /* the open and release file operations are mandatory */
+ if (WARN_ON(!vdev->fops || !vdev->fops->open || !vdev->fops->release))
+ return -EINVAL;
/* v4l2_fh support */
spin_lock_init(&vdev->fh_lock);
@@ -1114,8 +1126,7 @@ void video_unregister_device(struct video_device *vdev)
*/
clear_bit(V4L2_FL_REGISTERED, &vdev->flags);
mutex_unlock(&videodev_lock);
- if (test_bit(V4L2_FL_USES_V4L2_FH, &vdev->flags))
- v4l2_event_wake_all(vdev);
+ v4l2_event_wake_all(vdev);
device_unregister(&vdev->dev);
}
EXPORT_SYMBOL(video_unregister_device);
diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c
index 5e537454f5cd..63b12ef9d4d9 100644
--- a/drivers/media/v4l2-core/v4l2-device.c
+++ b/drivers/media/v4l2-core/v4l2-device.c
@@ -2,7 +2,7 @@
/*
V4L2 device support.
- Copyright (C) 2008 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2008 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c
index 7710cb26bea0..346d1b0e10ce 100644
--- a/drivers/media/v4l2-core/v4l2-dv-timings.c
+++ b/drivers/media/v4l2-core/v4l2-dv-timings.c
@@ -1226,6 +1226,7 @@ DEBUGFS_FOPS(avi, V4L2_DEBUGFS_IF_AVI);
DEBUGFS_FOPS(audio, V4L2_DEBUGFS_IF_AUDIO);
DEBUGFS_FOPS(spd, V4L2_DEBUGFS_IF_SPD);
DEBUGFS_FOPS(hdmi, V4L2_DEBUGFS_IF_HDMI);
+DEBUGFS_FOPS(drm, V4L2_DEBUGFS_IF_DRM);
struct v4l2_debugfs_if *v4l2_debugfs_if_alloc(struct dentry *root, u32 if_types,
void *priv,
@@ -1255,6 +1256,9 @@ struct v4l2_debugfs_if *v4l2_debugfs_if_alloc(struct dentry *root, u32 if_types,
if (if_types & V4L2_DEBUGFS_IF_HDMI)
debugfs_create_file("hdmi", 0400, infoframes->if_dir,
infoframes, &infoframe_hdmi_fops);
+ if (if_types & V4L2_DEBUGFS_IF_DRM)
+ debugfs_create_file("hdr_drm", 0400, infoframes->if_dir,
+ infoframes, &infoframe_drm_fops);
return infoframes;
}
EXPORT_SYMBOL_GPL(v4l2_debugfs_if_alloc);
diff --git a/drivers/media/v4l2-core/v4l2-fh.c b/drivers/media/v4l2-core/v4l2-fh.c
index 90eec79ee995..df3ba9d4674b 100644
--- a/drivers/media/v4l2-core/v4l2-fh.c
+++ b/drivers/media/v4l2-core/v4l2-fh.c
@@ -41,10 +41,12 @@ void v4l2_fh_init(struct v4l2_fh *fh, struct video_device *vdev)
}
EXPORT_SYMBOL_GPL(v4l2_fh_init);
-void v4l2_fh_add(struct v4l2_fh *fh)
+void v4l2_fh_add(struct v4l2_fh *fh, struct file *filp)
{
unsigned long flags;
+ filp->private_data = fh;
+
v4l2_prio_open(fh->vdev->prio, &fh->prio);
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
list_add(&fh->list, &fh->vdev->fh_list);
@@ -57,16 +59,15 @@ int v4l2_fh_open(struct file *filp)
struct video_device *vdev = video_devdata(filp);
struct v4l2_fh *fh = kzalloc(sizeof(*fh), GFP_KERNEL);
- filp->private_data = fh;
if (fh == NULL)
return -ENOMEM;
v4l2_fh_init(fh, vdev);
- v4l2_fh_add(fh);
+ v4l2_fh_add(fh, filp);
return 0;
}
EXPORT_SYMBOL_GPL(v4l2_fh_open);
-void v4l2_fh_del(struct v4l2_fh *fh)
+void v4l2_fh_del(struct v4l2_fh *fh, struct file *filp)
{
unsigned long flags;
@@ -74,6 +75,8 @@ void v4l2_fh_del(struct v4l2_fh *fh)
list_del_init(&fh->list);
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
v4l2_prio_close(fh->vdev->prio, fh->prio);
+
+ filp->private_data = NULL;
}
EXPORT_SYMBOL_GPL(v4l2_fh_del);
@@ -90,13 +93,12 @@ EXPORT_SYMBOL_GPL(v4l2_fh_exit);
int v4l2_fh_release(struct file *filp)
{
- struct v4l2_fh *fh = filp->private_data;
+ struct v4l2_fh *fh = file_to_v4l2_fh(filp);
if (fh) {
- v4l2_fh_del(fh);
+ v4l2_fh_del(fh, filp);
v4l2_fh_exit(fh);
kfree(fh);
- filp->private_data = NULL;
}
return 0;
}
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 46da373066f4..01cf52c3ea33 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -1089,8 +1089,8 @@ static void v4l_sanitize_format(struct v4l2_format *fmt)
}
}
-static int v4l_querycap(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_querycap(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct v4l2_capability *cap = (struct v4l2_capability *)arg;
struct video_device *vfd = video_devdata(file);
@@ -1103,7 +1103,7 @@ static int v4l_querycap(const struct v4l2_ioctl_ops *ops,
media_set_bus_info(cap->bus_info, sizeof(cap->bus_info),
vfd->dev_parent);
- ret = ops->vidioc_querycap(file, fh, cap);
+ ret = ops->vidioc_querycap(file, NULL, cap);
/*
* Drivers must not change device_caps, so check for this and
@@ -1123,8 +1123,8 @@ static int v4l_querycap(const struct v4l2_ioctl_ops *ops,
return ret;
}
-static int v4l_g_input(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_g_input(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct video_device *vfd = video_devdata(file);
@@ -1133,11 +1133,11 @@ static int v4l_g_input(const struct v4l2_ioctl_ops *ops,
return 0;
}
- return ops->vidioc_g_input(file, fh, arg);
+ return ops->vidioc_g_input(file, NULL, arg);
}
-static int v4l_g_output(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_g_output(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct video_device *vfd = video_devdata(file);
@@ -1146,11 +1146,11 @@ static int v4l_g_output(const struct v4l2_ioctl_ops *ops,
return 0;
}
- return ops->vidioc_g_output(file, fh, arg);
+ return ops->vidioc_g_output(file, NULL, arg);
}
-static int v4l_s_input(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_s_input(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct video_device *vfd = video_devdata(file);
int ret;
@@ -1162,22 +1162,22 @@ static int v4l_s_input(const struct v4l2_ioctl_ops *ops,
if (vfd->device_caps & V4L2_CAP_IO_MC)
return *(int *)arg ? -EINVAL : 0;
- return ops->vidioc_s_input(file, fh, *(unsigned int *)arg);
+ return ops->vidioc_s_input(file, NULL, *(unsigned int *)arg);
}
-static int v4l_s_output(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_s_output(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct video_device *vfd = video_devdata(file);
if (vfd->device_caps & V4L2_CAP_IO_MC)
return *(int *)arg ? -EINVAL : 0;
- return ops->vidioc_s_output(file, fh, *(unsigned int *)arg);
+ return ops->vidioc_s_output(file, NULL, *(unsigned int *)arg);
}
-static int v4l_g_priority(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_g_priority(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct video_device *vfd;
u32 *p = arg;
@@ -1187,22 +1187,20 @@ static int v4l_g_priority(const struct v4l2_ioctl_ops *ops,
return 0;
}
-static int v4l_s_priority(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_s_priority(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct video_device *vfd;
struct v4l2_fh *vfh;
u32 *p = arg;
vfd = video_devdata(file);
- if (!test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags))
- return -ENOTTY;
- vfh = file->private_data;
+ vfh = file_to_v4l2_fh(file);
return v4l2_prio_change(vfd->prio, &vfh->prio, *p);
}
-static int v4l_enuminput(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_enuminput(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_input *p = arg;
@@ -1224,11 +1222,11 @@ static int v4l_enuminput(const struct v4l2_ioctl_ops *ops,
return 0;
}
- return ops->vidioc_enum_input(file, fh, p);
+ return ops->vidioc_enum_input(file, NULL, p);
}
-static int v4l_enumoutput(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_enumoutput(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_output *p = arg;
@@ -1250,7 +1248,7 @@ static int v4l_enumoutput(const struct v4l2_ioctl_ops *ops,
return 0;
}
- return ops->vidioc_enum_output(file, fh, p);
+ return ops->vidioc_enum_output(file, NULL, p);
}
static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
@@ -1589,8 +1587,8 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
fmt->flags |= flags;
}
-static int v4l_enum_fmt(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_enum_fmt(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct video_device *vdev = video_devdata(file);
struct v4l2_fmtdesc *p = arg;
@@ -1620,12 +1618,12 @@ static int v4l_enum_fmt(const struct v4l2_ioctl_ops *ops,
if (unlikely(!ops->vidioc_enum_fmt_vid_cap))
break;
- ret = ops->vidioc_enum_fmt_vid_cap(file, fh, arg);
+ ret = ops->vidioc_enum_fmt_vid_cap(file, NULL, arg);
break;
case V4L2_BUF_TYPE_VIDEO_OVERLAY:
if (unlikely(!ops->vidioc_enum_fmt_vid_overlay))
break;
- ret = ops->vidioc_enum_fmt_vid_overlay(file, fh, arg);
+ ret = ops->vidioc_enum_fmt_vid_overlay(file, NULL, arg);
break;
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
@@ -1637,27 +1635,27 @@ static int v4l_enum_fmt(const struct v4l2_ioctl_ops *ops,
if (unlikely(!ops->vidioc_enum_fmt_vid_out))
break;
- ret = ops->vidioc_enum_fmt_vid_out(file, fh, arg);
+ ret = ops->vidioc_enum_fmt_vid_out(file, NULL, arg);
break;
case V4L2_BUF_TYPE_SDR_CAPTURE:
if (unlikely(!ops->vidioc_enum_fmt_sdr_cap))
break;
- ret = ops->vidioc_enum_fmt_sdr_cap(file, fh, arg);
+ ret = ops->vidioc_enum_fmt_sdr_cap(file, NULL, arg);
break;
case V4L2_BUF_TYPE_SDR_OUTPUT:
if (unlikely(!ops->vidioc_enum_fmt_sdr_out))
break;
- ret = ops->vidioc_enum_fmt_sdr_out(file, fh, arg);
+ ret = ops->vidioc_enum_fmt_sdr_out(file, NULL, arg);
break;
case V4L2_BUF_TYPE_META_CAPTURE:
if (unlikely(!ops->vidioc_enum_fmt_meta_cap))
break;
- ret = ops->vidioc_enum_fmt_meta_cap(file, fh, arg);
+ ret = ops->vidioc_enum_fmt_meta_cap(file, NULL, arg);
break;
case V4L2_BUF_TYPE_META_OUTPUT:
if (unlikely(!ops->vidioc_enum_fmt_meta_out))
break;
- ret = ops->vidioc_enum_fmt_meta_out(file, fh, arg);
+ ret = ops->vidioc_enum_fmt_meta_out(file, NULL, arg);
break;
}
if (ret == 0)
@@ -1680,8 +1678,8 @@ static void v4l_pix_format_touch(struct v4l2_pix_format *p)
p->xfer_func = 0;
}
-static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct v4l2_format *p = arg;
struct video_device *vfd = video_devdata(file);
@@ -1697,50 +1695,50 @@ static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops,
if (unlikely(!ops->vidioc_g_fmt_vid_cap))
break;
p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
- ret = ops->vidioc_g_fmt_vid_cap(file, fh, arg);
+ ret = ops->vidioc_g_fmt_vid_cap(file, NULL, arg);
/* just in case the driver zeroed it again */
p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
if (vfd->vfl_type == VFL_TYPE_TOUCH)
v4l_pix_format_touch(&p->fmt.pix);
return ret;
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
- return ops->vidioc_g_fmt_vid_cap_mplane(file, fh, arg);
+ return ops->vidioc_g_fmt_vid_cap_mplane(file, NULL, arg);
case V4L2_BUF_TYPE_VIDEO_OVERLAY:
- return ops->vidioc_g_fmt_vid_overlay(file, fh, arg);
+ return ops->vidioc_g_fmt_vid_overlay(file, NULL, arg);
case V4L2_BUF_TYPE_VBI_CAPTURE:
- return ops->vidioc_g_fmt_vbi_cap(file, fh, arg);
+ return ops->vidioc_g_fmt_vbi_cap(file, NULL, arg);
case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
- return ops->vidioc_g_fmt_sliced_vbi_cap(file, fh, arg);
+ return ops->vidioc_g_fmt_sliced_vbi_cap(file, NULL, arg);
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
if (unlikely(!ops->vidioc_g_fmt_vid_out))
break;
p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
- ret = ops->vidioc_g_fmt_vid_out(file, fh, arg);
+ ret = ops->vidioc_g_fmt_vid_out(file, NULL, arg);
/* just in case the driver zeroed it again */
p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
return ret;
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
- return ops->vidioc_g_fmt_vid_out_mplane(file, fh, arg);
+ return ops->vidioc_g_fmt_vid_out_mplane(file, NULL, arg);
case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
- return ops->vidioc_g_fmt_vid_out_overlay(file, fh, arg);
+ return ops->vidioc_g_fmt_vid_out_overlay(file, NULL, arg);
case V4L2_BUF_TYPE_VBI_OUTPUT:
- return ops->vidioc_g_fmt_vbi_out(file, fh, arg);
+ return ops->vidioc_g_fmt_vbi_out(file, NULL, arg);
case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
- return ops->vidioc_g_fmt_sliced_vbi_out(file, fh, arg);
+ return ops->vidioc_g_fmt_sliced_vbi_out(file, NULL, arg);
case V4L2_BUF_TYPE_SDR_CAPTURE:
- return ops->vidioc_g_fmt_sdr_cap(file, fh, arg);
+ return ops->vidioc_g_fmt_sdr_cap(file, NULL, arg);
case V4L2_BUF_TYPE_SDR_OUTPUT:
- return ops->vidioc_g_fmt_sdr_out(file, fh, arg);
+ return ops->vidioc_g_fmt_sdr_out(file, NULL, arg);
case V4L2_BUF_TYPE_META_CAPTURE:
- return ops->vidioc_g_fmt_meta_cap(file, fh, arg);
+ return ops->vidioc_g_fmt_meta_cap(file, NULL, arg);
case V4L2_BUF_TYPE_META_OUTPUT:
- return ops->vidioc_g_fmt_meta_out(file, fh, arg);
+ return ops->vidioc_g_fmt_meta_out(file, NULL, arg);
}
return -EINVAL;
}
-static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct v4l2_format *p = arg;
struct video_device *vfd = video_devdata(file);
@@ -1760,7 +1758,7 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
if (unlikely(!ops->vidioc_s_fmt_vid_cap))
break;
memset_after(p, 0, fmt.pix);
- ret = ops->vidioc_s_fmt_vid_cap(file, fh, arg);
+ ret = ops->vidioc_s_fmt_vid_cap(file, NULL, arg);
/* just in case the driver zeroed it again */
p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
if (vfd->vfl_type == VFL_TYPE_TOUCH)
@@ -1773,7 +1771,7 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
for (i = 0; i < p->fmt.pix_mp.num_planes; i++)
memset_after(&p->fmt.pix_mp.plane_fmt[i],
0, bytesperline);
- return ops->vidioc_s_fmt_vid_cap_mplane(file, fh, arg);
+ return ops->vidioc_s_fmt_vid_cap_mplane(file, NULL, arg);
case V4L2_BUF_TYPE_VIDEO_OVERLAY:
if (unlikely(!ops->vidioc_s_fmt_vid_overlay))
break;
@@ -1781,22 +1779,22 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
p->fmt.win.clips = NULL;
p->fmt.win.clipcount = 0;
p->fmt.win.bitmap = NULL;
- return ops->vidioc_s_fmt_vid_overlay(file, fh, arg);
+ return ops->vidioc_s_fmt_vid_overlay(file, NULL, arg);
case V4L2_BUF_TYPE_VBI_CAPTURE:
if (unlikely(!ops->vidioc_s_fmt_vbi_cap))
break;
memset_after(p, 0, fmt.vbi.flags);
- return ops->vidioc_s_fmt_vbi_cap(file, fh, arg);
+ return ops->vidioc_s_fmt_vbi_cap(file, NULL, arg);
case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
if (unlikely(!ops->vidioc_s_fmt_sliced_vbi_cap))
break;
memset_after(p, 0, fmt.sliced.io_size);
- return ops->vidioc_s_fmt_sliced_vbi_cap(file, fh, arg);
+ return ops->vidioc_s_fmt_sliced_vbi_cap(file, NULL, arg);
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
if (unlikely(!ops->vidioc_s_fmt_vid_out))
break;
memset_after(p, 0, fmt.pix);
- ret = ops->vidioc_s_fmt_vid_out(file, fh, arg);
+ ret = ops->vidioc_s_fmt_vid_out(file, NULL, arg);
/* just in case the driver zeroed it again */
p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
return ret;
@@ -1807,7 +1805,7 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
for (i = 0; i < p->fmt.pix_mp.num_planes; i++)
memset_after(&p->fmt.pix_mp.plane_fmt[i],
0, bytesperline);
- return ops->vidioc_s_fmt_vid_out_mplane(file, fh, arg);
+ return ops->vidioc_s_fmt_vid_out_mplane(file, NULL, arg);
case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
if (unlikely(!ops->vidioc_s_fmt_vid_out_overlay))
break;
@@ -1815,43 +1813,43 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
p->fmt.win.clips = NULL;
p->fmt.win.clipcount = 0;
p->fmt.win.bitmap = NULL;
- return ops->vidioc_s_fmt_vid_out_overlay(file, fh, arg);
+ return ops->vidioc_s_fmt_vid_out_overlay(file, NULL, arg);
case V4L2_BUF_TYPE_VBI_OUTPUT:
if (unlikely(!ops->vidioc_s_fmt_vbi_out))
break;
memset_after(p, 0, fmt.vbi.flags);
- return ops->vidioc_s_fmt_vbi_out(file, fh, arg);
+ return ops->vidioc_s_fmt_vbi_out(file, NULL, arg);
case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
if (unlikely(!ops->vidioc_s_fmt_sliced_vbi_out))
break;
memset_after(p, 0, fmt.sliced.io_size);
- return ops->vidioc_s_fmt_sliced_vbi_out(file, fh, arg);
+ return ops->vidioc_s_fmt_sliced_vbi_out(file, NULL, arg);
case V4L2_BUF_TYPE_SDR_CAPTURE:
if (unlikely(!ops->vidioc_s_fmt_sdr_cap))
break;
memset_after(p, 0, fmt.sdr.buffersize);
- return ops->vidioc_s_fmt_sdr_cap(file, fh, arg);
+ return ops->vidioc_s_fmt_sdr_cap(file, NULL, arg);
case V4L2_BUF_TYPE_SDR_OUTPUT:
if (unlikely(!ops->vidioc_s_fmt_sdr_out))
break;
memset_after(p, 0, fmt.sdr.buffersize);
- return ops->vidioc_s_fmt_sdr_out(file, fh, arg);
+ return ops->vidioc_s_fmt_sdr_out(file, NULL, arg);
case V4L2_BUF_TYPE_META_CAPTURE:
if (unlikely(!ops->vidioc_s_fmt_meta_cap))
break;
memset_after(p, 0, fmt.meta);
- return ops->vidioc_s_fmt_meta_cap(file, fh, arg);
+ return ops->vidioc_s_fmt_meta_cap(file, NULL, arg);
case V4L2_BUF_TYPE_META_OUTPUT:
if (unlikely(!ops->vidioc_s_fmt_meta_out))
break;
memset_after(p, 0, fmt.meta);
- return ops->vidioc_s_fmt_meta_out(file, fh, arg);
+ return ops->vidioc_s_fmt_meta_out(file, NULL, arg);
}
return -EINVAL;
}
-static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct v4l2_format *p = arg;
struct video_device *vfd = video_devdata(file);
@@ -1868,7 +1866,7 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
if (unlikely(!ops->vidioc_try_fmt_vid_cap))
break;
memset_after(p, 0, fmt.pix);
- ret = ops->vidioc_try_fmt_vid_cap(file, fh, arg);
+ ret = ops->vidioc_try_fmt_vid_cap(file, NULL, arg);
/* just in case the driver zeroed it again */
p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
if (vfd->vfl_type == VFL_TYPE_TOUCH)
@@ -1881,7 +1879,7 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
for (i = 0; i < p->fmt.pix_mp.num_planes; i++)
memset_after(&p->fmt.pix_mp.plane_fmt[i],
0, bytesperline);
- return ops->vidioc_try_fmt_vid_cap_mplane(file, fh, arg);
+ return ops->vidioc_try_fmt_vid_cap_mplane(file, NULL, arg);
case V4L2_BUF_TYPE_VIDEO_OVERLAY:
if (unlikely(!ops->vidioc_try_fmt_vid_overlay))
break;
@@ -1889,22 +1887,22 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
p->fmt.win.clips = NULL;
p->fmt.win.clipcount = 0;
p->fmt.win.bitmap = NULL;
- return ops->vidioc_try_fmt_vid_overlay(file, fh, arg);
+ return ops->vidioc_try_fmt_vid_overlay(file, NULL, arg);
case V4L2_BUF_TYPE_VBI_CAPTURE:
if (unlikely(!ops->vidioc_try_fmt_vbi_cap))
break;
memset_after(p, 0, fmt.vbi.flags);
- return ops->vidioc_try_fmt_vbi_cap(file, fh, arg);
+ return ops->vidioc_try_fmt_vbi_cap(file, NULL, arg);
case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
if (unlikely(!ops->vidioc_try_fmt_sliced_vbi_cap))
break;
memset_after(p, 0, fmt.sliced.io_size);
- return ops->vidioc_try_fmt_sliced_vbi_cap(file, fh, arg);
+ return ops->vidioc_try_fmt_sliced_vbi_cap(file, NULL, arg);
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
if (unlikely(!ops->vidioc_try_fmt_vid_out))
break;
memset_after(p, 0, fmt.pix);
- ret = ops->vidioc_try_fmt_vid_out(file, fh, arg);
+ ret = ops->vidioc_try_fmt_vid_out(file, NULL, arg);
/* just in case the driver zeroed it again */
p->fmt.pix.priv = V4L2_PIX_FMT_PRIV_MAGIC;
return ret;
@@ -1915,7 +1913,7 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
for (i = 0; i < p->fmt.pix_mp.num_planes; i++)
memset_after(&p->fmt.pix_mp.plane_fmt[i],
0, bytesperline);
- return ops->vidioc_try_fmt_vid_out_mplane(file, fh, arg);
+ return ops->vidioc_try_fmt_vid_out_mplane(file, NULL, arg);
case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
if (unlikely(!ops->vidioc_try_fmt_vid_out_overlay))
break;
@@ -1923,55 +1921,55 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
p->fmt.win.clips = NULL;
p->fmt.win.clipcount = 0;
p->fmt.win.bitmap = NULL;
- return ops->vidioc_try_fmt_vid_out_overlay(file, fh, arg);
+ return ops->vidioc_try_fmt_vid_out_overlay(file, NULL, arg);
case V4L2_BUF_TYPE_VBI_OUTPUT:
if (unlikely(!ops->vidioc_try_fmt_vbi_out))
break;
memset_after(p, 0, fmt.vbi.flags);
- return ops->vidioc_try_fmt_vbi_out(file, fh, arg);
+ return ops->vidioc_try_fmt_vbi_out(file, NULL, arg);
case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
if (unlikely(!ops->vidioc_try_fmt_sliced_vbi_out))
break;
memset_after(p, 0, fmt.sliced.io_size);
- return ops->vidioc_try_fmt_sliced_vbi_out(file, fh, arg);
+ return ops->vidioc_try_fmt_sliced_vbi_out(file, NULL, arg);
case V4L2_BUF_TYPE_SDR_CAPTURE:
if (unlikely(!ops->vidioc_try_fmt_sdr_cap))
break;
memset_after(p, 0, fmt.sdr.buffersize);
- return ops->vidioc_try_fmt_sdr_cap(file, fh, arg);
+ return ops->vidioc_try_fmt_sdr_cap(file, NULL, arg);
case V4L2_BUF_TYPE_SDR_OUTPUT:
if (unlikely(!ops->vidioc_try_fmt_sdr_out))
break;
memset_after(p, 0, fmt.sdr.buffersize);
- return ops->vidioc_try_fmt_sdr_out(file, fh, arg);
+ return ops->vidioc_try_fmt_sdr_out(file, NULL, arg);
case V4L2_BUF_TYPE_META_CAPTURE:
if (unlikely(!ops->vidioc_try_fmt_meta_cap))
break;
memset_after(p, 0, fmt.meta);
- return ops->vidioc_try_fmt_meta_cap(file, fh, arg);
+ return ops->vidioc_try_fmt_meta_cap(file, NULL, arg);
case V4L2_BUF_TYPE_META_OUTPUT:
if (unlikely(!ops->vidioc_try_fmt_meta_out))
break;
memset_after(p, 0, fmt.meta);
- return ops->vidioc_try_fmt_meta_out(file, fh, arg);
+ return ops->vidioc_try_fmt_meta_out(file, NULL, arg);
}
return -EINVAL;
}
-static int v4l_streamon(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_streamon(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
- return ops->vidioc_streamon(file, fh, *(unsigned int *)arg);
+ return ops->vidioc_streamon(file, NULL, *(unsigned int *)arg);
}
-static int v4l_streamoff(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_streamoff(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
- return ops->vidioc_streamoff(file, fh, *(unsigned int *)arg);
+ return ops->vidioc_streamoff(file, NULL, *(unsigned int *)arg);
}
-static int v4l_g_tuner(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_g_tuner(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_tuner *p = arg;
@@ -1979,14 +1977,14 @@ static int v4l_g_tuner(const struct v4l2_ioctl_ops *ops,
p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
- err = ops->vidioc_g_tuner(file, fh, p);
+ err = ops->vidioc_g_tuner(file, NULL, p);
if (!err)
p->capability |= V4L2_TUNER_CAP_FREQ_BANDS;
return err;
}
-static int v4l_s_tuner(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_s_tuner(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_tuner *p = arg;
@@ -1997,11 +1995,11 @@ static int v4l_s_tuner(const struct v4l2_ioctl_ops *ops,
return ret;
p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
- return ops->vidioc_s_tuner(file, fh, p);
+ return ops->vidioc_s_tuner(file, NULL, p);
}
static int v4l_g_modulator(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+ struct file *file, void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_modulator *p = arg;
@@ -2010,14 +2008,14 @@ static int v4l_g_modulator(const struct v4l2_ioctl_ops *ops,
if (vfd->vfl_type == VFL_TYPE_RADIO)
p->type = V4L2_TUNER_RADIO;
- err = ops->vidioc_g_modulator(file, fh, p);
+ err = ops->vidioc_g_modulator(file, NULL, p);
if (!err)
p->capability |= V4L2_TUNER_CAP_FREQ_BANDS;
return err;
}
static int v4l_s_modulator(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+ struct file *file, void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_modulator *p = arg;
@@ -2025,11 +2023,11 @@ static int v4l_s_modulator(const struct v4l2_ioctl_ops *ops,
if (vfd->vfl_type == VFL_TYPE_RADIO)
p->type = V4L2_TUNER_RADIO;
- return ops->vidioc_s_modulator(file, fh, p);
+ return ops->vidioc_s_modulator(file, NULL, p);
}
static int v4l_g_frequency(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+ struct file *file, void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_frequency *p = arg;
@@ -2039,11 +2037,11 @@ static int v4l_g_frequency(const struct v4l2_ioctl_ops *ops,
else
p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
- return ops->vidioc_g_frequency(file, fh, p);
+ return ops->vidioc_g_frequency(file, NULL, p);
}
static int v4l_s_frequency(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+ struct file *file, void *arg)
{
struct video_device *vfd = video_devdata(file);
const struct v4l2_frequency *p = arg;
@@ -2062,11 +2060,11 @@ static int v4l_s_frequency(const struct v4l2_ioctl_ops *ops,
if (type != p->type)
return -EINVAL;
}
- return ops->vidioc_s_frequency(file, fh, p);
+ return ops->vidioc_s_frequency(file, NULL, p);
}
-static int v4l_enumstd(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_enumstd(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_standard *p = arg;
@@ -2074,8 +2072,8 @@ static int v4l_enumstd(const struct v4l2_ioctl_ops *ops,
return v4l_video_std_enumstd(p, vfd->tvnorms);
}
-static int v4l_s_std(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_s_std(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct video_device *vfd = video_devdata(file);
v4l2_std_id id = *(v4l2_std_id *)arg, norm;
@@ -2089,11 +2087,11 @@ static int v4l_s_std(const struct v4l2_ioctl_ops *ops,
return -EINVAL;
/* Calls the specific handler */
- return ops->vidioc_s_std(file, fh, norm);
+ return ops->vidioc_s_std(file, NULL, norm);
}
-static int v4l_querystd(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_querystd(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct video_device *vfd = video_devdata(file);
v4l2_std_id *p = arg;
@@ -2111,11 +2109,11 @@ static int v4l_querystd(const struct v4l2_ioctl_ops *ops,
* their efforts to improve the standards detection.
*/
*p = vfd->tvnorms;
- return ops->vidioc_querystd(file, fh, arg);
+ return ops->vidioc_querystd(file, NULL, arg);
}
static int v4l_s_hw_freq_seek(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+ struct file *file, void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_hw_freq_seek *p = arg;
@@ -2133,26 +2131,26 @@ static int v4l_s_hw_freq_seek(const struct v4l2_ioctl_ops *ops,
V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
if (p->type != type)
return -EINVAL;
- return ops->vidioc_s_hw_freq_seek(file, fh, p);
+ return ops->vidioc_s_hw_freq_seek(file, NULL, p);
}
-static int v4l_s_fbuf(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_s_fbuf(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct v4l2_framebuffer *p = arg;
p->base = NULL;
- return ops->vidioc_s_fbuf(file, fh, p);
+ return ops->vidioc_s_fbuf(file, NULL, p);
}
-static int v4l_overlay(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_overlay(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
- return ops->vidioc_overlay(file, fh, *(unsigned int *)arg);
+ return ops->vidioc_overlay(file, NULL, *(unsigned int *)arg);
}
-static int v4l_reqbufs(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_reqbufs(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_requestbuffers *p = arg;
@@ -2167,38 +2165,38 @@ static int v4l_reqbufs(const struct v4l2_ioctl_ops *ops,
if (is_valid_ioctl(vfd, VIDIOC_REMOVE_BUFS))
p->capabilities = V4L2_BUF_CAP_SUPPORTS_REMOVE_BUFS;
- return ops->vidioc_reqbufs(file, fh, p);
+ return ops->vidioc_reqbufs(file, NULL, p);
}
-static int v4l_querybuf(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_querybuf(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct v4l2_buffer *p = arg;
int ret = check_fmt(file, p->type);
- return ret ? ret : ops->vidioc_querybuf(file, fh, p);
+ return ret ? ret : ops->vidioc_querybuf(file, NULL, p);
}
-static int v4l_qbuf(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_qbuf(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct v4l2_buffer *p = arg;
int ret = check_fmt(file, p->type);
- return ret ? ret : ops->vidioc_qbuf(file, fh, p);
+ return ret ? ret : ops->vidioc_qbuf(file, NULL, p);
}
-static int v4l_dqbuf(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_dqbuf(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct v4l2_buffer *p = arg;
int ret = check_fmt(file, p->type);
- return ret ? ret : ops->vidioc_dqbuf(file, fh, p);
+ return ret ? ret : ops->vidioc_dqbuf(file, NULL, p);
}
static int v4l_create_bufs(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+ struct file *file, void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_create_buffers *create = arg;
@@ -2215,7 +2213,7 @@ static int v4l_create_bufs(const struct v4l2_ioctl_ops *ops,
if (is_valid_ioctl(vfd, VIDIOC_REMOVE_BUFS))
create->capabilities = V4L2_BUF_CAP_SUPPORTS_REMOVE_BUFS;
- ret = ops->vidioc_create_bufs(file, fh, create);
+ ret = ops->vidioc_create_bufs(file, NULL, create);
if (create->format.type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
create->format.type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
@@ -2225,27 +2223,27 @@ static int v4l_create_bufs(const struct v4l2_ioctl_ops *ops,
}
static int v4l_prepare_buf(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+ struct file *file, void *arg)
{
struct v4l2_buffer *b = arg;
int ret = check_fmt(file, b->type);
- return ret ? ret : ops->vidioc_prepare_buf(file, fh, b);
+ return ret ? ret : ops->vidioc_prepare_buf(file, NULL, b);
}
static int v4l_remove_bufs(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+ struct file *file, void *arg)
{
struct v4l2_remove_buffers *remove = arg;
if (ops->vidioc_remove_bufs)
- return ops->vidioc_remove_bufs(file, fh, remove);
+ return ops->vidioc_remove_bufs(file, NULL, remove);
return -ENOTTY;
}
-static int v4l_g_parm(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_g_parm(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_streamparm *p = arg;
@@ -2255,20 +2253,20 @@ static int v4l_g_parm(const struct v4l2_ioctl_ops *ops,
if (ret)
return ret;
if (ops->vidioc_g_parm)
- return ops->vidioc_g_parm(file, fh, p);
+ return ops->vidioc_g_parm(file, NULL, p);
if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
return -EINVAL;
if (vfd->device_caps & V4L2_CAP_READWRITE)
p->parm.capture.readbuffers = 2;
- ret = ops->vidioc_g_std(file, fh, &std);
+ ret = ops->vidioc_g_std(file, NULL, &std);
if (ret == 0)
v4l2_video_std_frame_period(std, &p->parm.capture.timeperframe);
return ret;
}
-static int v4l_s_parm(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_s_parm(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct v4l2_streamparm *p = arg;
int ret = check_fmt(file, p->type);
@@ -2288,17 +2286,16 @@ static int v4l_s_parm(const struct v4l2_ioctl_ops *ops,
p->parm.capture.extendedmode = 0;
p->parm.capture.capturemode &= V4L2_MODE_HIGHQUALITY;
}
- return ops->vidioc_s_parm(file, fh, p);
+ return ops->vidioc_s_parm(file, NULL, p);
}
-static int v4l_queryctrl(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_queryctrl(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_query_ext_ctrl qec = {};
struct v4l2_queryctrl *p = arg;
- struct v4l2_fh *vfh =
- test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
int ret;
if (vfh && vfh->ctrl_handler)
@@ -2310,7 +2307,7 @@ static int v4l_queryctrl(const struct v4l2_ioctl_ops *ops,
/* Simulate query_ext_ctr using query_ctrl. */
qec.id = p->id;
- ret = ops->vidioc_query_ext_ctrl(file, fh, &qec);
+ ret = ops->vidioc_query_ext_ctrl(file, NULL, &qec);
if (ret)
return ret;
v4l2_query_ext_ctrl_to_v4l2_queryctrl(p, &qec);
@@ -2318,46 +2315,43 @@ static int v4l_queryctrl(const struct v4l2_ioctl_ops *ops,
}
static int v4l_query_ext_ctrl(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+ struct file *file, void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_query_ext_ctrl *p = arg;
- struct v4l2_fh *vfh =
- test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
if (vfh && vfh->ctrl_handler)
return v4l2_query_ext_ctrl(vfh->ctrl_handler, p);
if (vfd->ctrl_handler)
return v4l2_query_ext_ctrl(vfd->ctrl_handler, p);
if (ops->vidioc_query_ext_ctrl)
- return ops->vidioc_query_ext_ctrl(file, fh, p);
+ return ops->vidioc_query_ext_ctrl(file, NULL, p);
return -ENOTTY;
}
-static int v4l_querymenu(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_querymenu(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_querymenu *p = arg;
- struct v4l2_fh *vfh =
- test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
if (vfh && vfh->ctrl_handler)
return v4l2_querymenu(vfh->ctrl_handler, p);
if (vfd->ctrl_handler)
return v4l2_querymenu(vfd->ctrl_handler, p);
if (ops->vidioc_querymenu)
- return ops->vidioc_querymenu(file, fh, p);
+ return ops->vidioc_querymenu(file, NULL, p);
return -ENOTTY;
}
-static int v4l_g_ctrl(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_g_ctrl(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_control *p = arg;
- struct v4l2_fh *vfh =
- test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
struct v4l2_ext_controls ctrls;
struct v4l2_ext_control ctrl;
@@ -2374,7 +2368,7 @@ static int v4l_g_ctrl(const struct v4l2_ioctl_ops *ops,
ctrl.id = p->id;
ctrl.value = p->value;
if (check_ext_ctrls(&ctrls, VIDIOC_G_CTRL)) {
- int ret = ops->vidioc_g_ext_ctrls(file, fh, &ctrls);
+ int ret = ops->vidioc_g_ext_ctrls(file, NULL, &ctrls);
if (ret == 0)
p->value = ctrl.value;
@@ -2383,13 +2377,12 @@ static int v4l_g_ctrl(const struct v4l2_ioctl_ops *ops,
return -EINVAL;
}
-static int v4l_s_ctrl(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_s_ctrl(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_control *p = arg;
- struct v4l2_fh *vfh =
- test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
struct v4l2_ext_controls ctrls;
struct v4l2_ext_control ctrl;
int ret;
@@ -2408,18 +2401,17 @@ static int v4l_s_ctrl(const struct v4l2_ioctl_ops *ops,
ctrl.value = p->value;
if (!check_ext_ctrls(&ctrls, VIDIOC_S_CTRL))
return -EINVAL;
- ret = ops->vidioc_s_ext_ctrls(file, fh, &ctrls);
+ ret = ops->vidioc_s_ext_ctrls(file, NULL, &ctrls);
p->value = ctrl.value;
return ret;
}
static int v4l_g_ext_ctrls(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+ struct file *file, void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_ext_controls *p = arg;
- struct v4l2_fh *vfh =
- test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
p->error_idx = p->count;
if (vfh && vfh->ctrl_handler)
@@ -2431,16 +2423,15 @@ static int v4l_g_ext_ctrls(const struct v4l2_ioctl_ops *ops,
if (ops->vidioc_g_ext_ctrls == NULL)
return -ENOTTY;
return check_ext_ctrls(p, VIDIOC_G_EXT_CTRLS) ?
- ops->vidioc_g_ext_ctrls(file, fh, p) : -EINVAL;
+ ops->vidioc_g_ext_ctrls(file, NULL, p) : -EINVAL;
}
static int v4l_s_ext_ctrls(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+ struct file *file, void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_ext_controls *p = arg;
- struct v4l2_fh *vfh =
- test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
p->error_idx = p->count;
if (vfh && vfh->ctrl_handler)
@@ -2452,16 +2443,15 @@ static int v4l_s_ext_ctrls(const struct v4l2_ioctl_ops *ops,
if (ops->vidioc_s_ext_ctrls == NULL)
return -ENOTTY;
return check_ext_ctrls(p, VIDIOC_S_EXT_CTRLS) ?
- ops->vidioc_s_ext_ctrls(file, fh, p) : -EINVAL;
+ ops->vidioc_s_ext_ctrls(file, NULL, p) : -EINVAL;
}
static int v4l_try_ext_ctrls(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+ struct file *file, void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_ext_controls *p = arg;
- struct v4l2_fh *vfh =
- test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
p->error_idx = p->count;
if (vfh && vfh->ctrl_handler)
@@ -2473,7 +2463,7 @@ static int v4l_try_ext_ctrls(const struct v4l2_ioctl_ops *ops,
if (ops->vidioc_try_ext_ctrls == NULL)
return -ENOTTY;
return check_ext_ctrls(p, VIDIOC_TRY_EXT_CTRLS) ?
- ops->vidioc_try_ext_ctrls(file, fh, p) : -EINVAL;
+ ops->vidioc_try_ext_ctrls(file, NULL, p) : -EINVAL;
}
/*
@@ -2486,7 +2476,7 @@ static int v4l_try_ext_ctrls(const struct v4l2_ioctl_ops *ops,
* type and drivers don't need to check for both.
*/
static int v4l_g_selection(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+ struct file *file, void *arg)
{
struct v4l2_selection *p = arg;
u32 old_type = p->type;
@@ -2496,13 +2486,13 @@ static int v4l_g_selection(const struct v4l2_ioctl_ops *ops,
p->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
else if (p->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
p->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
- ret = ops->vidioc_g_selection(file, fh, p);
+ ret = ops->vidioc_g_selection(file, NULL, p);
p->type = old_type;
return ret;
}
static int v4l_s_selection(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+ struct file *file, void *arg)
{
struct v4l2_selection *p = arg;
u32 old_type = p->type;
@@ -2512,13 +2502,13 @@ static int v4l_s_selection(const struct v4l2_ioctl_ops *ops,
p->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
else if (p->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
p->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
- ret = ops->vidioc_s_selection(file, fh, p);
+ ret = ops->vidioc_s_selection(file, NULL, p);
p->type = old_type;
return ret;
}
-static int v4l_g_crop(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_g_crop(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_crop *p = arg;
@@ -2539,7 +2529,7 @@ static int v4l_g_crop(const struct v4l2_ioctl_ops *ops,
s.target = s.target == V4L2_SEL_TGT_COMPOSE ?
V4L2_SEL_TGT_CROP : V4L2_SEL_TGT_COMPOSE;
- ret = v4l_g_selection(ops, file, fh, &s);
+ ret = v4l_g_selection(ops, file, &s);
/* copying results to old structure on success */
if (!ret)
@@ -2547,8 +2537,8 @@ static int v4l_g_crop(const struct v4l2_ioctl_ops *ops,
return ret;
}
-static int v4l_s_crop(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_s_crop(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_crop *p = arg;
@@ -2569,11 +2559,11 @@ static int v4l_s_crop(const struct v4l2_ioctl_ops *ops,
s.target = s.target == V4L2_SEL_TGT_COMPOSE ?
V4L2_SEL_TGT_CROP : V4L2_SEL_TGT_COMPOSE;
- return v4l_s_selection(ops, file, fh, &s);
+ return v4l_s_selection(ops, file, &s);
}
-static int v4l_cropcap(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_cropcap(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_cropcap *p = arg;
@@ -2597,7 +2587,7 @@ static int v4l_cropcap(const struct v4l2_ioctl_ops *ops,
return -ENOTTY;
if (ops->vidioc_g_pixelaspect)
- ret = ops->vidioc_g_pixelaspect(file, fh, s.type,
+ ret = ops->vidioc_g_pixelaspect(file, NULL, s.type,
&p->pixelaspect);
/*
@@ -2619,7 +2609,7 @@ static int v4l_cropcap(const struct v4l2_ioctl_ops *ops,
s.target = s.target == V4L2_SEL_TGT_COMPOSE_BOUNDS ?
V4L2_SEL_TGT_CROP_BOUNDS : V4L2_SEL_TGT_COMPOSE_BOUNDS;
- ret = v4l_g_selection(ops, file, fh, &s);
+ ret = v4l_g_selection(ops, file, &s);
if (ret)
return ret;
p->bounds = s.r;
@@ -2630,7 +2620,7 @@ static int v4l_cropcap(const struct v4l2_ioctl_ops *ops,
else
s.target = V4L2_SEL_TGT_CROP_DEFAULT;
- ret = v4l_g_selection(ops, file, fh, &s);
+ ret = v4l_g_selection(ops, file, &s);
if (ret)
return ret;
p->defrect = s.r;
@@ -2638,8 +2628,8 @@ static int v4l_cropcap(const struct v4l2_ioctl_ops *ops,
return 0;
}
-static int v4l_log_status(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_log_status(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
struct video_device *vfd = video_devdata(file);
int ret;
@@ -2647,7 +2637,7 @@ static int v4l_log_status(const struct v4l2_ioctl_ops *ops,
if (vfd->v4l2_dev)
pr_info("%s: ================= START STATUS =================\n",
vfd->v4l2_dev->name);
- ret = ops->vidioc_log_status(file, fh);
+ ret = ops->vidioc_log_status(file, NULL);
if (vfd->v4l2_dev)
pr_info("%s: ================== END STATUS ==================\n",
vfd->v4l2_dev->name);
@@ -2655,7 +2645,7 @@ static int v4l_log_status(const struct v4l2_ioctl_ops *ops,
}
static int v4l_dbg_g_register(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+ struct file *file, void *arg)
{
#ifdef CONFIG_VIDEO_ADV_DEBUG
struct v4l2_dbg_register *p = arg;
@@ -2675,7 +2665,7 @@ static int v4l_dbg_g_register(const struct v4l2_ioctl_ops *ops,
}
if (ops->vidioc_g_register && p->match.type == V4L2_CHIP_MATCH_BRIDGE &&
(ops->vidioc_g_chip_info || p->match.addr == 0))
- return ops->vidioc_g_register(file, fh, p);
+ return ops->vidioc_g_register(file, NULL, p);
return -EINVAL;
#else
return -ENOTTY;
@@ -2683,7 +2673,7 @@ static int v4l_dbg_g_register(const struct v4l2_ioctl_ops *ops,
}
static int v4l_dbg_s_register(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+ struct file *file, void *arg)
{
#ifdef CONFIG_VIDEO_ADV_DEBUG
const struct v4l2_dbg_register *p = arg;
@@ -2703,7 +2693,7 @@ static int v4l_dbg_s_register(const struct v4l2_ioctl_ops *ops,
}
if (ops->vidioc_s_register && p->match.type == V4L2_CHIP_MATCH_BRIDGE &&
(ops->vidioc_g_chip_info || p->match.addr == 0))
- return ops->vidioc_s_register(file, fh, p);
+ return ops->vidioc_s_register(file, NULL, p);
return -EINVAL;
#else
return -ENOTTY;
@@ -2711,7 +2701,7 @@ static int v4l_dbg_s_register(const struct v4l2_ioctl_ops *ops,
}
static int v4l_dbg_g_chip_info(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+ struct file *file, void *arg)
{
#ifdef CONFIG_VIDEO_ADV_DEBUG
struct video_device *vfd = video_devdata(file);
@@ -2727,7 +2717,7 @@ static int v4l_dbg_g_chip_info(const struct v4l2_ioctl_ops *ops,
p->flags |= V4L2_CHIP_FL_READABLE;
strscpy(p->name, vfd->v4l2_dev->name, sizeof(p->name));
if (ops->vidioc_g_chip_info)
- return ops->vidioc_g_chip_info(file, fh, arg);
+ return ops->vidioc_g_chip_info(file, NULL, arg);
if (p->match.addr)
return -EINVAL;
return 0;
@@ -2753,26 +2743,32 @@ static int v4l_dbg_g_chip_info(const struct v4l2_ioctl_ops *ops,
#endif
}
-static int v4l_dqevent(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+static int v4l_dqevent(const struct v4l2_ioctl_ops *ops, struct file *file,
+ void *arg)
{
- return v4l2_event_dequeue(fh, arg, file->f_flags & O_NONBLOCK);
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
+
+ return v4l2_event_dequeue(vfh, arg, file->f_flags & O_NONBLOCK);
}
static int v4l_subscribe_event(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+ struct file *file, void *arg)
{
- return ops->vidioc_subscribe_event(fh, arg);
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
+
+ return ops->vidioc_subscribe_event(vfh, arg);
}
static int v4l_unsubscribe_event(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+ struct file *file, void *arg)
{
- return ops->vidioc_unsubscribe_event(fh, arg);
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
+
+ return ops->vidioc_unsubscribe_event(vfh, arg);
}
static int v4l_g_sliced_vbi_cap(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+ struct file *file, void *arg)
{
struct v4l2_sliced_vbi_cap *p = arg;
int ret = check_fmt(file, p->type);
@@ -2783,11 +2779,11 @@ static int v4l_g_sliced_vbi_cap(const struct v4l2_ioctl_ops *ops,
/* Clear up to type, everything after type is zeroed already */
memset(p, 0, offsetof(struct v4l2_sliced_vbi_cap, type));
- return ops->vidioc_g_sliced_vbi_cap(file, fh, p);
+ return ops->vidioc_g_sliced_vbi_cap(file, NULL, p);
}
static int v4l_enum_freq_bands(const struct v4l2_ioctl_ops *ops,
- struct file *file, void *fh, void *arg)
+ struct file *file, void *arg)
{
struct video_device *vfd = video_devdata(file);
struct v4l2_frequency_band *p = arg;
@@ -2805,7 +2801,7 @@ static int v4l_enum_freq_bands(const struct v4l2_ioctl_ops *ops,
return -EINVAL;
}
if (ops->vidioc_enum_freq_bands) {
- err = ops->vidioc_enum_freq_bands(file, fh, p);
+ err = ops->vidioc_enum_freq_bands(file, NULL, p);
if (err != -ENOTTY)
return err;
}
@@ -2817,7 +2813,7 @@ static int v4l_enum_freq_bands(const struct v4l2_ioctl_ops *ops,
if (p->index)
return -EINVAL;
- err = ops->vidioc_g_tuner(file, fh, &t);
+ err = ops->vidioc_g_tuner(file, NULL, &t);
if (err)
return err;
p->capability = t.capability | V4L2_TUNER_CAP_FREQ_BANDS;
@@ -2836,7 +2832,7 @@ static int v4l_enum_freq_bands(const struct v4l2_ioctl_ops *ops,
return -EINVAL;
if (p->index)
return -EINVAL;
- err = ops->vidioc_g_modulator(file, fh, &m);
+ err = ops->vidioc_g_modulator(file, NULL, &m);
if (err)
return err;
p->capability = m.capability | V4L2_TUNER_CAP_FREQ_BANDS;
@@ -2853,7 +2849,7 @@ struct v4l2_ioctl_info {
u32 flags;
const char * const name;
int (*func)(const struct v4l2_ioctl_ops *ops, struct file *file,
- void *fh, void *p);
+ void *p);
void (*debug)(const void *arg, bool write_only);
};
@@ -2874,9 +2870,9 @@ struct v4l2_ioctl_info {
#define DEFINE_V4L_STUB_FUNC(_vidioc) \
static int v4l_stub_ ## _vidioc( \
const struct v4l2_ioctl_ops *ops, \
- struct file *file, void *fh, void *p) \
+ struct file *file, void *p) \
{ \
- return ops->vidioc_ ## _vidioc(file, fh, p); \
+ return ops->vidioc_ ## _vidioc(file, NULL, p); \
}
#define IOCTL_INFO(_ioctl, _func, _debug, _flags) \
@@ -3072,8 +3068,7 @@ static long __video_do_ioctl(struct file *file,
bool write_only = false;
struct v4l2_ioctl_info default_info;
const struct v4l2_ioctl_info *info;
- void *fh = file->private_data;
- struct v4l2_fh *vfh = NULL;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
int dev_debug = vfd->dev_debug;
long ret = -ENOTTY;
@@ -3083,9 +3078,6 @@ static long __video_do_ioctl(struct file *file,
return ret;
}
- if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags))
- vfh = file->private_data;
-
/*
* We need to serialize streamon/off with queueing new requests.
* These ioctls may trigger the cancellation of a streaming
@@ -3117,10 +3109,10 @@ static long __video_do_ioctl(struct file *file,
info = &v4l2_ioctls[_IOC_NR(cmd)];
if (!is_valid_ioctl(vfd, cmd) &&
- !((info->flags & INFO_FL_CTRL) && vfh && vfh->ctrl_handler))
+ !((info->flags & INFO_FL_CTRL) && vfh->ctrl_handler))
goto done;
- if (vfh && (info->flags & INFO_FL_PRIO)) {
+ if (info->flags & INFO_FL_PRIO) {
ret = v4l2_prio_check(vfd->prio, vfh->prio);
if (ret)
goto done;
@@ -3134,12 +3126,12 @@ static long __video_do_ioctl(struct file *file,
write_only = _IOC_DIR(cmd) == _IOC_WRITE;
if (info != &default_info) {
- ret = info->func(ops, file, fh, arg);
+ ret = info->func(ops, file, arg);
} else if (!ops->vidioc_default) {
ret = -ENOTTY;
} else {
- ret = ops->vidioc_default(file, fh,
- vfh ? v4l2_prio_check(vfd->prio, vfh->prio) >= 0 : 0,
+ ret = ops->vidioc_default(file, NULL,
+ v4l2_prio_check(vfd->prio, vfh->prio) >= 0,
cmd, arg);
}
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index eb22d6172462..21acd9bc8607 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -951,7 +951,7 @@ static __poll_t v4l2_m2m_poll_for_data(struct file *file,
__poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct poll_table_struct *wait)
{
- struct video_device *vfd = video_devdata(file);
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
struct vb2_queue *src_q = v4l2_m2m_get_src_vq(m2m_ctx);
struct vb2_queue *dst_q = v4l2_m2m_get_dst_vq(m2m_ctx);
__poll_t req_events = poll_requested_events(wait);
@@ -970,13 +970,9 @@ __poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
if (req_events & (EPOLLOUT | EPOLLWRNORM | EPOLLIN | EPOLLRDNORM))
rc = v4l2_m2m_poll_for_data(file, m2m_ctx, wait);
- if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
- struct v4l2_fh *fh = file->private_data;
-
- poll_wait(file, &fh->wait, wait);
- if (v4l2_event_pending(fh))
- rc |= EPOLLPRI;
- }
+ poll_wait(file, &fh->wait, wait);
+ if (v4l2_event_pending(fh))
+ rc |= EPOLLPRI;
return rc;
}
@@ -1004,7 +1000,7 @@ unsigned long v4l2_m2m_get_unmapped_area(struct file *file, unsigned long addr,
unsigned long len, unsigned long pgoff,
unsigned long flags)
{
- struct v4l2_fh *fh = file->private_data;
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
unsigned long offset = pgoff << PAGE_SHIFT;
struct vb2_queue *vq;
@@ -1371,7 +1367,7 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_request_queue);
int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv,
struct v4l2_requestbuffers *rb)
{
- struct v4l2_fh *fh = file->private_data;
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
return v4l2_m2m_reqbufs(file, fh->m2m_ctx, rb);
}
@@ -1380,7 +1376,7 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_reqbufs);
int v4l2_m2m_ioctl_create_bufs(struct file *file, void *priv,
struct v4l2_create_buffers *create)
{
- struct v4l2_fh *fh = file->private_data;
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
return v4l2_m2m_create_bufs(file, fh->m2m_ctx, create);
}
@@ -1389,7 +1385,7 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_create_bufs);
int v4l2_m2m_ioctl_remove_bufs(struct file *file, void *priv,
struct v4l2_remove_buffers *remove)
{
- struct v4l2_fh *fh = file->private_data;
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
struct vb2_queue *q = v4l2_m2m_get_vq(fh->m2m_ctx, remove->type);
if (!q)
@@ -1404,7 +1400,7 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_remove_bufs);
int v4l2_m2m_ioctl_querybuf(struct file *file, void *priv,
struct v4l2_buffer *buf)
{
- struct v4l2_fh *fh = file->private_data;
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
return v4l2_m2m_querybuf(file, fh->m2m_ctx, buf);
}
@@ -1413,7 +1409,7 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_querybuf);
int v4l2_m2m_ioctl_qbuf(struct file *file, void *priv,
struct v4l2_buffer *buf)
{
- struct v4l2_fh *fh = file->private_data;
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
return v4l2_m2m_qbuf(file, fh->m2m_ctx, buf);
}
@@ -1422,7 +1418,7 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_qbuf);
int v4l2_m2m_ioctl_dqbuf(struct file *file, void *priv,
struct v4l2_buffer *buf)
{
- struct v4l2_fh *fh = file->private_data;
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
return v4l2_m2m_dqbuf(file, fh->m2m_ctx, buf);
}
@@ -1431,7 +1427,7 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_dqbuf);
int v4l2_m2m_ioctl_prepare_buf(struct file *file, void *priv,
struct v4l2_buffer *buf)
{
- struct v4l2_fh *fh = file->private_data;
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
return v4l2_m2m_prepare_buf(file, fh->m2m_ctx, buf);
}
@@ -1440,7 +1436,7 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_prepare_buf);
int v4l2_m2m_ioctl_expbuf(struct file *file, void *priv,
struct v4l2_exportbuffer *eb)
{
- struct v4l2_fh *fh = file->private_data;
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
return v4l2_m2m_expbuf(file, fh->m2m_ctx, eb);
}
@@ -1449,7 +1445,7 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_expbuf);
int v4l2_m2m_ioctl_streamon(struct file *file, void *priv,
enum v4l2_buf_type type)
{
- struct v4l2_fh *fh = file->private_data;
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
return v4l2_m2m_streamon(file, fh->m2m_ctx, type);
}
@@ -1458,13 +1454,13 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamon);
int v4l2_m2m_ioctl_streamoff(struct file *file, void *priv,
enum v4l2_buf_type type)
{
- struct v4l2_fh *fh = file->private_data;
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
return v4l2_m2m_streamoff(file, fh->m2m_ctx, type);
}
EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamoff);
-int v4l2_m2m_ioctl_try_encoder_cmd(struct file *file, void *fh,
+int v4l2_m2m_ioctl_try_encoder_cmd(struct file *file, void *priv,
struct v4l2_encoder_cmd *ec)
{
if (ec->cmd != V4L2_ENC_CMD_STOP && ec->cmd != V4L2_ENC_CMD_START)
@@ -1475,7 +1471,7 @@ int v4l2_m2m_ioctl_try_encoder_cmd(struct file *file, void *fh,
}
EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_try_encoder_cmd);
-int v4l2_m2m_ioctl_try_decoder_cmd(struct file *file, void *fh,
+int v4l2_m2m_ioctl_try_decoder_cmd(struct file *file, void *priv,
struct v4l2_decoder_cmd *dc)
{
if (dc->cmd != V4L2_DEC_CMD_STOP && dc->cmd != V4L2_DEC_CMD_START)
@@ -1542,7 +1538,7 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_decoder_cmd);
int v4l2_m2m_ioctl_encoder_cmd(struct file *file, void *priv,
struct v4l2_encoder_cmd *ec)
{
- struct v4l2_fh *fh = file->private_data;
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
return v4l2_m2m_encoder_cmd(file, fh->m2m_ctx, ec);
}
@@ -1551,13 +1547,13 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_encoder_cmd);
int v4l2_m2m_ioctl_decoder_cmd(struct file *file, void *priv,
struct v4l2_decoder_cmd *dc)
{
- struct v4l2_fh *fh = file->private_data;
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
return v4l2_m2m_decoder_cmd(file, fh->m2m_ctx, dc);
}
EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_decoder_cmd);
-int v4l2_m2m_ioctl_stateless_try_decoder_cmd(struct file *file, void *fh,
+int v4l2_m2m_ioctl_stateless_try_decoder_cmd(struct file *file, void *priv,
struct v4l2_decoder_cmd *dc)
{
if (dc->cmd != V4L2_DEC_CMD_FLUSH)
@@ -1572,7 +1568,7 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_stateless_try_decoder_cmd);
int v4l2_m2m_ioctl_stateless_decoder_cmd(struct file *file, void *priv,
struct v4l2_decoder_cmd *dc)
{
- struct v4l2_fh *fh = file->private_data;
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
struct vb2_v4l2_buffer *out_vb, *cap_vb;
struct v4l2_m2m_dev *m2m_dev = fh->m2m_ctx->m2m_dev;
unsigned long flags;
@@ -1617,7 +1613,7 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_stateless_decoder_cmd);
int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma)
{
- struct v4l2_fh *fh = file->private_data;
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
return v4l2_m2m_mmap(file, fh->m2m_ctx, vma);
}
@@ -1625,7 +1621,7 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_fop_mmap);
__poll_t v4l2_m2m_fop_poll(struct file *file, poll_table *wait)
{
- struct v4l2_fh *fh = file->private_data;
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx;
__poll_t ret;
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
index 4fd25fea3b58..1da953629010 100644
--- a/drivers/media/v4l2-core/v4l2-subdev.c
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -26,6 +26,30 @@
#include <media/v4l2-fh.h>
#include <media/v4l2-ioctl.h>
+/**
+ * struct v4l2_subdev_stream_config - Used for storing stream configuration.
+ *
+ * @pad: pad number
+ * @stream: stream number
+ * @enabled: has the stream been enabled with v4l2_subdev_enable_streams()
+ * @fmt: &struct v4l2_mbus_framefmt
+ * @crop: &struct v4l2_rect to be used for crop
+ * @compose: &struct v4l2_rect to be used for compose
+ * @interval: frame interval
+ *
+ * This structure stores configuration for a stream.
+ */
+struct v4l2_subdev_stream_config {
+ u32 pad;
+ u32 stream;
+ bool enabled;
+
+ struct v4l2_mbus_framefmt fmt;
+ struct v4l2_rect crop;
+ struct v4l2_rect compose;
+ struct v4l2_fract interval;
+};
+
#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
/*
* The Streams API is an experimental feature. To use the Streams API, set
@@ -86,8 +110,7 @@ static int subdev_open(struct file *file)
}
v4l2_fh_init(&subdev_fh->vfh, vdev);
- v4l2_fh_add(&subdev_fh->vfh);
- file->private_data = &subdev_fh->vfh;
+ v4l2_fh_add(&subdev_fh->vfh, file);
if (sd->v4l2_dev->mdev && sd->entity.graph_obj.mdev->dev) {
struct module *owner;
@@ -110,7 +133,7 @@ static int subdev_open(struct file *file)
err:
module_put(subdev_fh->owner);
- v4l2_fh_del(&subdev_fh->vfh);
+ v4l2_fh_del(&subdev_fh->vfh, file);
v4l2_fh_exit(&subdev_fh->vfh);
subdev_fh_free(subdev_fh);
kfree(subdev_fh);
@@ -122,17 +145,16 @@ static int subdev_close(struct file *file)
{
struct video_device *vdev = video_devdata(file);
struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
- struct v4l2_fh *vfh = file->private_data;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh);
if (sd->internal_ops && sd->internal_ops->close)
sd->internal_ops->close(sd, subdev_fh);
module_put(subdev_fh->owner);
- v4l2_fh_del(vfh);
+ v4l2_fh_del(vfh, file);
v4l2_fh_exit(vfh);
subdev_fh_free(subdev_fh);
kfree(subdev_fh);
- file->private_data = NULL;
return 0;
}
@@ -612,7 +634,7 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg,
{
struct video_device *vdev = video_devdata(file);
struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
- struct v4l2_fh *vfh = file->private_data;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh);
bool ro_subdev = test_bit(V4L2_FL_SUBDEV_RO_DEVNODE, &vdev->flags);
bool streams_subdev = sd->flags & V4L2_SUBDEV_FL_STREAMS;
@@ -1135,7 +1157,7 @@ static long subdev_do_ioctl_lock(struct file *file, unsigned int cmd, void *arg)
if (video_is_registered(vdev)) {
struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
- struct v4l2_fh *vfh = file->private_data;
+ struct v4l2_fh *vfh = file_to_v4l2_fh(file);
struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh);
struct v4l2_subdev_state *state;
@@ -1192,7 +1214,7 @@ static __poll_t subdev_poll(struct file *file, poll_table *wait)
{
struct video_device *vdev = video_devdata(file);
struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
- struct v4l2_fh *fh = file->private_data;
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
return EPOLLERR;
diff --git a/drivers/memory/samsung/exynos-srom.c b/drivers/memory/samsung/exynos-srom.c
index e73dd330af47..d913fb901973 100644
--- a/drivers/memory/samsung/exynos-srom.c
+++ b/drivers/memory/samsung/exynos-srom.c
@@ -121,20 +121,18 @@ static int exynos_srom_probe(struct platform_device *pdev)
return -ENOMEM;
srom->dev = dev;
- srom->reg_base = of_iomap(np, 0);
- if (!srom->reg_base) {
+ srom->reg_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(srom->reg_base)) {
dev_err(&pdev->dev, "iomap of exynos srom controller failed\n");
- return -ENOMEM;
+ return PTR_ERR(srom->reg_base);
}
platform_set_drvdata(pdev, srom);
srom->reg_offset = exynos_srom_alloc_reg_dump(exynos_srom_offsets,
ARRAY_SIZE(exynos_srom_offsets));
- if (!srom->reg_offset) {
- iounmap(srom->reg_base);
+ if (!srom->reg_offset)
return -ENOMEM;
- }
for_each_child_of_node(np, child) {
if (exynos_srom_configure_bank(srom, child)) {
diff --git a/drivers/memory/stm32_omm.c b/drivers/memory/stm32_omm.c
index bee2ecc8c2b9..5d06623f3f68 100644
--- a/drivers/memory/stm32_omm.c
+++ b/drivers/memory/stm32_omm.c
@@ -238,7 +238,7 @@ static int stm32_omm_configure(struct device *dev)
if (mux & CR_MUXEN) {
ret = of_property_read_u32(dev->of_node, "st,omm-req2ack-ns",
&req2ack);
- if (!ret && !req2ack) {
+ if (!ret && req2ack) {
req2ack = DIV_ROUND_UP(req2ack, NSEC_PER_SEC / clk_rate_max) - 1;
if (req2ack > 256)
diff --git a/drivers/memory/tegra/tegra210.c b/drivers/memory/tegra/tegra210.c
index 8ab6498dbe7d..cfa61dd88557 100644
--- a/drivers/memory/tegra/tegra210.c
+++ b/drivers/memory/tegra/tegra210.c
@@ -9,11 +9,11 @@
static const struct tegra_mc_client tegra210_mc_clients[] = {
{
- .id = 0x00,
+ .id = TEGRA210_MC_PTCR,
.name = "ptcr",
.swgroup = TEGRA_SWGROUP_PTC,
}, {
- .id = 0x01,
+ .id = TEGRA210_MC_DISPLAY0A,
.name = "display0a",
.swgroup = TEGRA_SWGROUP_DC,
.regs = {
@@ -29,7 +29,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x02,
+ .id = TEGRA210_MC_DISPLAY0AB,
.name = "display0ab",
.swgroup = TEGRA_SWGROUP_DCB,
.regs = {
@@ -45,7 +45,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x03,
+ .id = TEGRA210_MC_DISPLAY0B,
.name = "display0b",
.swgroup = TEGRA_SWGROUP_DC,
.regs = {
@@ -61,7 +61,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x04,
+ .id = TEGRA210_MC_DISPLAY0BB,
.name = "display0bb",
.swgroup = TEGRA_SWGROUP_DCB,
.regs = {
@@ -77,7 +77,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x05,
+ .id = TEGRA210_MC_DISPLAY0C,
.name = "display0c",
.swgroup = TEGRA_SWGROUP_DC,
.regs = {
@@ -93,7 +93,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x06,
+ .id = TEGRA210_MC_DISPLAY0CB,
.name = "display0cb",
.swgroup = TEGRA_SWGROUP_DCB,
.regs = {
@@ -109,7 +109,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x0e,
+ .id = TEGRA210_MC_AFIR,
.name = "afir",
.swgroup = TEGRA_SWGROUP_AFI,
.regs = {
@@ -125,7 +125,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x0f,
+ .id = TEGRA210_MC_AVPCARM7R,
.name = "avpcarm7r",
.swgroup = TEGRA_SWGROUP_AVPC,
.regs = {
@@ -141,7 +141,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x10,
+ .id = TEGRA210_MC_DISPLAYHC,
.name = "displayhc",
.swgroup = TEGRA_SWGROUP_DC,
.regs = {
@@ -157,7 +157,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x11,
+ .id = TEGRA210_MC_DISPLAYHCB,
.name = "displayhcb",
.swgroup = TEGRA_SWGROUP_DCB,
.regs = {
@@ -173,7 +173,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x15,
+ .id = TEGRA210_MC_HDAR,
.name = "hdar",
.swgroup = TEGRA_SWGROUP_HDA,
.regs = {
@@ -189,7 +189,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x16,
+ .id = TEGRA210_MC_HOST1XDMAR,
.name = "host1xdmar",
.swgroup = TEGRA_SWGROUP_HC,
.regs = {
@@ -205,7 +205,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x17,
+ .id = TEGRA210_MC_HOST1XR,
.name = "host1xr",
.swgroup = TEGRA_SWGROUP_HC,
.regs = {
@@ -221,7 +221,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x1c,
+ .id = TEGRA210_MC_NVENCSRD,
.name = "nvencsrd",
.swgroup = TEGRA_SWGROUP_NVENC,
.regs = {
@@ -237,7 +237,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x1d,
+ .id = TEGRA210_MC_PPCSAHBDMAR,
.name = "ppcsahbdmar",
.swgroup = TEGRA_SWGROUP_PPCS,
.regs = {
@@ -253,7 +253,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x1e,
+ .id = TEGRA210_MC_PPCSAHBSLVR,
.name = "ppcsahbslvr",
.swgroup = TEGRA_SWGROUP_PPCS,
.regs = {
@@ -269,7 +269,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x1f,
+ .id = TEGRA210_MC_SATAR,
.name = "satar",
.swgroup = TEGRA_SWGROUP_SATA,
.regs = {
@@ -285,7 +285,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x27,
+ .id = TEGRA210_MC_MPCORER,
.name = "mpcorer",
.swgroup = TEGRA_SWGROUP_MPCORE,
.regs = {
@@ -297,7 +297,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x2b,
+ .id = TEGRA210_MC_NVENCSWR,
.name = "nvencswr",
.swgroup = TEGRA_SWGROUP_NVENC,
.regs = {
@@ -313,7 +313,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x31,
+ .id = TEGRA210_MC_AFIW,
.name = "afiw",
.swgroup = TEGRA_SWGROUP_AFI,
.regs = {
@@ -329,7 +329,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x32,
+ .id = TEGRA210_MC_AVPCARM7W,
.name = "avpcarm7w",
.swgroup = TEGRA_SWGROUP_AVPC,
.regs = {
@@ -345,7 +345,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x35,
+ .id = TEGRA210_MC_HDAW,
.name = "hdaw",
.swgroup = TEGRA_SWGROUP_HDA,
.regs = {
@@ -361,7 +361,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x36,
+ .id = TEGRA210_MC_HOST1XW,
.name = "host1xw",
.swgroup = TEGRA_SWGROUP_HC,
.regs = {
@@ -377,7 +377,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x39,
+ .id = TEGRA210_MC_MPCOREW,
.name = "mpcorew",
.swgroup = TEGRA_SWGROUP_MPCORE,
.regs = {
@@ -389,7 +389,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x3b,
+ .id = TEGRA210_MC_PPCSAHBDMAW,
.name = "ppcsahbdmaw",
.swgroup = TEGRA_SWGROUP_PPCS,
.regs = {
@@ -405,7 +405,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x3c,
+ .id = TEGRA210_MC_PPCSAHBSLVW,
.name = "ppcsahbslvw",
.swgroup = TEGRA_SWGROUP_PPCS,
.regs = {
@@ -421,7 +421,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x3d,
+ .id = TEGRA210_MC_SATAW,
.name = "sataw",
.swgroup = TEGRA_SWGROUP_SATA,
.regs = {
@@ -437,7 +437,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x44,
+ .id = TEGRA210_MC_ISPRA,
.name = "ispra",
.swgroup = TEGRA_SWGROUP_ISP2,
.regs = {
@@ -453,7 +453,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x46,
+ .id = TEGRA210_MC_ISPWA,
.name = "ispwa",
.swgroup = TEGRA_SWGROUP_ISP2,
.regs = {
@@ -469,7 +469,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x47,
+ .id = TEGRA210_MC_ISPWB,
.name = "ispwb",
.swgroup = TEGRA_SWGROUP_ISP2,
.regs = {
@@ -485,7 +485,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x4a,
+ .id = TEGRA210_MC_XUSB_HOSTR,
.name = "xusb_hostr",
.swgroup = TEGRA_SWGROUP_XUSB_HOST,
.regs = {
@@ -501,7 +501,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x4b,
+ .id = TEGRA210_MC_XUSB_HOSTW,
.name = "xusb_hostw",
.swgroup = TEGRA_SWGROUP_XUSB_HOST,
.regs = {
@@ -517,7 +517,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x4c,
+ .id = TEGRA210_MC_XUSB_DEVR,
.name = "xusb_devr",
.swgroup = TEGRA_SWGROUP_XUSB_DEV,
.regs = {
@@ -533,7 +533,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x4d,
+ .id = TEGRA210_MC_XUSB_DEVW,
.name = "xusb_devw",
.swgroup = TEGRA_SWGROUP_XUSB_DEV,
.regs = {
@@ -549,7 +549,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x4e,
+ .id = TEGRA210_MC_ISPRAB,
.name = "isprab",
.swgroup = TEGRA_SWGROUP_ISP2B,
.regs = {
@@ -565,7 +565,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x50,
+ .id = TEGRA210_MC_ISPWAB,
.name = "ispwab",
.swgroup = TEGRA_SWGROUP_ISP2B,
.regs = {
@@ -581,7 +581,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x51,
+ .id = TEGRA210_MC_ISPWBB,
.name = "ispwbb",
.swgroup = TEGRA_SWGROUP_ISP2B,
.regs = {
@@ -597,7 +597,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x54,
+ .id = TEGRA210_MC_TSECSRD,
.name = "tsecsrd",
.swgroup = TEGRA_SWGROUP_TSEC,
.regs = {
@@ -613,7 +613,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x55,
+ .id = TEGRA210_MC_TSECSWR,
.name = "tsecswr",
.swgroup = TEGRA_SWGROUP_TSEC,
.regs = {
@@ -629,7 +629,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x56,
+ .id = TEGRA210_MC_A9AVPSCR,
.name = "a9avpscr",
.swgroup = TEGRA_SWGROUP_A9AVP,
.regs = {
@@ -645,7 +645,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x57,
+ .id = TEGRA210_MC_A9AVPSCW,
.name = "a9avpscw",
.swgroup = TEGRA_SWGROUP_A9AVP,
.regs = {
@@ -661,7 +661,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x58,
+ .id = TEGRA210_MC_GPUSRD,
.name = "gpusrd",
.swgroup = TEGRA_SWGROUP_GPU,
.regs = {
@@ -678,7 +678,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x59,
+ .id = TEGRA210_MC_GPUSWR,
.name = "gpuswr",
.swgroup = TEGRA_SWGROUP_GPU,
.regs = {
@@ -695,7 +695,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x5a,
+ .id = TEGRA210_MC_DISPLAYT,
.name = "displayt",
.swgroup = TEGRA_SWGROUP_DC,
.regs = {
@@ -711,7 +711,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x60,
+ .id = TEGRA210_MC_SDMMCRA,
.name = "sdmmcra",
.swgroup = TEGRA_SWGROUP_SDMMC1A,
.regs = {
@@ -727,7 +727,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x61,
+ .id = TEGRA210_MC_SDMMCRAA,
.name = "sdmmcraa",
.swgroup = TEGRA_SWGROUP_SDMMC2A,
.regs = {
@@ -743,7 +743,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x62,
+ .id = TEGRA210_MC_SDMMCR,
.name = "sdmmcr",
.swgroup = TEGRA_SWGROUP_SDMMC3A,
.regs = {
@@ -759,7 +759,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x63,
+ .id = TEGRA210_MC_SDMMCRAB,
.swgroup = TEGRA_SWGROUP_SDMMC4A,
.name = "sdmmcrab",
.regs = {
@@ -775,7 +775,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x64,
+ .id = TEGRA210_MC_SDMMCWA,
.name = "sdmmcwa",
.swgroup = TEGRA_SWGROUP_SDMMC1A,
.regs = {
@@ -791,7 +791,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x65,
+ .id = TEGRA210_MC_SDMMCWAA,
.name = "sdmmcwaa",
.swgroup = TEGRA_SWGROUP_SDMMC2A,
.regs = {
@@ -807,7 +807,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x66,
+ .id = TEGRA210_MC_SDMMCW,
.name = "sdmmcw",
.swgroup = TEGRA_SWGROUP_SDMMC3A,
.regs = {
@@ -823,7 +823,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x67,
+ .id = TEGRA210_MC_SDMMCWAB,
.name = "sdmmcwab",
.swgroup = TEGRA_SWGROUP_SDMMC4A,
.regs = {
@@ -839,7 +839,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x6c,
+ .id = TEGRA210_MC_VICSRD,
.name = "vicsrd",
.swgroup = TEGRA_SWGROUP_VIC,
.regs = {
@@ -855,7 +855,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x6d,
+ .id = TEGRA210_MC_VICSWR,
.name = "vicswr",
.swgroup = TEGRA_SWGROUP_VIC,
.regs = {
@@ -871,7 +871,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x72,
+ .id = TEGRA210_MC_VIW,
.name = "viw",
.swgroup = TEGRA_SWGROUP_VI,
.regs = {
@@ -887,7 +887,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x73,
+ .id = TEGRA210_MC_DISPLAYD,
.name = "displayd",
.swgroup = TEGRA_SWGROUP_DC,
.regs = {
@@ -903,7 +903,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x78,
+ .id = TEGRA210_MC_NVDECSRD,
.name = "nvdecsrd",
.swgroup = TEGRA_SWGROUP_NVDEC,
.regs = {
@@ -919,7 +919,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x79,
+ .id = TEGRA210_MC_NVDECSWR,
.name = "nvdecswr",
.swgroup = TEGRA_SWGROUP_NVDEC,
.regs = {
@@ -935,7 +935,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x7a,
+ .id = TEGRA210_MC_APER,
.name = "aper",
.swgroup = TEGRA_SWGROUP_APE,
.regs = {
@@ -951,7 +951,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x7b,
+ .id = TEGRA210_MC_APEW,
.name = "apew",
.swgroup = TEGRA_SWGROUP_APE,
.regs = {
@@ -967,7 +967,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x7e,
+ .id = TEGRA210_MC_NVJPGRD,
.name = "nvjpgsrd",
.swgroup = TEGRA_SWGROUP_NVJPG,
.regs = {
@@ -983,7 +983,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x7f,
+ .id = TEGRA210_MC_NVJPGWR,
.name = "nvjpgswr",
.swgroup = TEGRA_SWGROUP_NVJPG,
.regs = {
@@ -999,7 +999,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x80,
+ .id = TEGRA210_MC_SESRD,
.name = "sesrd",
.swgroup = TEGRA_SWGROUP_SE,
.regs = {
@@ -1015,7 +1015,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x81,
+ .id = TEGRA210_MC_SESRD,
.name = "seswr",
.swgroup = TEGRA_SWGROUP_SE,
.regs = {
@@ -1031,7 +1031,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x82,
+ .id = TEGRA210_MC_AXIAPR,
.name = "axiapr",
.swgroup = TEGRA_SWGROUP_AXIAP,
.regs = {
@@ -1047,7 +1047,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x83,
+ .id = TEGRA210_MC_AXIAPW,
.name = "axiapw",
.swgroup = TEGRA_SWGROUP_AXIAP,
.regs = {
@@ -1063,7 +1063,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x84,
+ .id = TEGRA210_MC_ETRR,
.name = "etrr",
.swgroup = TEGRA_SWGROUP_ETR,
.regs = {
@@ -1079,7 +1079,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x85,
+ .id = TEGRA210_MC_ETRR,
.name = "etrw",
.swgroup = TEGRA_SWGROUP_ETR,
.regs = {
@@ -1095,7 +1095,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x86,
+ .id = TEGRA210_MC_TSECSRDB,
.name = "tsecsrdb",
.swgroup = TEGRA_SWGROUP_TSECB,
.regs = {
@@ -1111,7 +1111,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x87,
+ .id = TEGRA210_MC_TSECSWRB,
.name = "tsecswrb",
.swgroup = TEGRA_SWGROUP_TSECB,
.regs = {
@@ -1127,7 +1127,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x88,
+ .id = TEGRA210_MC_GPUSRD2,
.name = "gpusrd2",
.swgroup = TEGRA_SWGROUP_GPU,
.regs = {
@@ -1144,7 +1144,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
},
},
}, {
- .id = 0x89,
+ .id = TEGRA210_MC_GPUSWR2,
.name = "gpuswr2",
.swgroup = TEGRA_SWGROUP_GPU,
.regs = {
diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c
index 7f3f47db4c98..acafc910bbac 100644
--- a/drivers/memstick/core/memstick.c
+++ b/drivers/memstick/core/memstick.c
@@ -370,7 +370,9 @@ int memstick_set_rw_addr(struct memstick_dev *card)
{
card->next_request = h_memstick_set_rw_addr;
memstick_new_req(card->host);
- wait_for_completion(&card->mrq_complete);
+ if (!wait_for_completion_timeout(&card->mrq_complete,
+ msecs_to_jiffies(500)))
+ card->current_mrq.error = -ETIMEDOUT;
return card->current_mrq.error;
}
@@ -404,7 +406,9 @@ static struct memstick_dev *memstick_alloc_card(struct memstick_host *host)
card->next_request = h_memstick_read_dev_id;
memstick_new_req(host);
- wait_for_completion(&card->mrq_complete);
+ if (!wait_for_completion_timeout(&card->mrq_complete,
+ msecs_to_jiffies(500)))
+ card->current_mrq.error = -ETIMEDOUT;
if (card->current_mrq.error)
goto err_out;
@@ -555,7 +559,6 @@ EXPORT_SYMBOL(memstick_add_host);
*/
void memstick_remove_host(struct memstick_host *host)
{
- host->removing = 1;
flush_workqueue(workqueue);
mutex_lock(&host->lock);
if (host->card)
diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c
index d34892782f6e..1af157ce0a63 100644
--- a/drivers/memstick/core/ms_block.c
+++ b/drivers/memstick/core/ms_block.c
@@ -1953,10 +1953,10 @@ static void msb_data_clear(struct msb_data *msb)
msb->card = NULL;
}
-static int msb_bd_getgeo(struct block_device *bdev,
+static int msb_bd_getgeo(struct gendisk *disk,
struct hd_geometry *geo)
{
- struct msb_data *msb = bdev->bd_disk->private_data;
+ struct msb_data *msb = disk->private_data;
*geo = msb->geometry;
return 0;
}
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index c9853d887d28..e507bb11c802 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -189,10 +189,10 @@ static void mspro_block_bd_free_disk(struct gendisk *disk)
kfree(msb);
}
-static int mspro_block_bd_getgeo(struct block_device *bdev,
+static int mspro_block_bd_getgeo(struct gendisk *disk,
struct hd_geometry *geo)
{
- struct mspro_block_data *msb = bdev->bd_disk->private_data;
+ struct mspro_block_data *msb = disk->private_data;
geo->heads = msb->heads;
geo->sectors = msb->sectors_per_track;
@@ -560,8 +560,7 @@ has_int_reg:
t_offset += msb->current_page * msb->page_size;
sg_set_page(&t_sg,
- nth_page(sg_page(&(msb->req_sg[msb->current_seg])),
- t_offset >> PAGE_SHIFT),
+ sg_page(&(msb->req_sg[msb->current_seg])) + (t_offset >> PAGE_SHIFT),
msb->page_size, offset_in_page(t_offset));
memstick_init_req_sg(*mrq, msb->data_dir == READ
diff --git a/drivers/memstick/host/jmb38x_ms.c b/drivers/memstick/host/jmb38x_ms.c
index cddddb3a5a27..79e66e30417c 100644
--- a/drivers/memstick/host/jmb38x_ms.c
+++ b/drivers/memstick/host/jmb38x_ms.c
@@ -317,8 +317,7 @@ static int jmb38x_ms_transfer_data(struct jmb38x_ms_host *host)
unsigned int p_off;
if (host->req->long_data) {
- pg = nth_page(sg_page(&host->req->sg),
- off >> PAGE_SHIFT);
+ pg = sg_page(&host->req->sg) + (off >> PAGE_SHIFT);
p_off = offset_in_page(off);
p_cnt = PAGE_SIZE - p_off;
p_cnt = min(p_cnt, length);
diff --git a/drivers/memstick/host/rtsx_usb_ms.c b/drivers/memstick/host/rtsx_usb_ms.c
index 3878136227e4..beadc389f15f 100644
--- a/drivers/memstick/host/rtsx_usb_ms.c
+++ b/drivers/memstick/host/rtsx_usb_ms.c
@@ -216,7 +216,10 @@ static int ms_power_off(struct rtsx_usb_ms *host)
rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_CLK_EN, MS_CLK_EN, 0);
rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_OE, MS_OUTPUT_EN, 0);
-
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PWR_CTL,
+ POWER_MASK, POWER_OFF);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PWR_CTL,
+ POWER_MASK | LDO3318_PWR_MASK, POWER_OFF | LDO_SUSPEND);
err = rtsx_usb_send_cmd(ucr, MODE_C, 100);
if (err < 0)
return err;
@@ -812,6 +815,7 @@ static void rtsx_usb_ms_drv_remove(struct platform_device *pdev)
int err;
host->eject = true;
+ msh->removing = true;
cancel_work_sync(&host->handle_req);
cancel_delayed_work_sync(&host->poll_card);
diff --git a/drivers/memstick/host/tifm_ms.c b/drivers/memstick/host/tifm_ms.c
index db7f3a088fb0..0b6a90661eee 100644
--- a/drivers/memstick/host/tifm_ms.c
+++ b/drivers/memstick/host/tifm_ms.c
@@ -201,8 +201,7 @@ static unsigned int tifm_ms_transfer_data(struct tifm_ms *host)
unsigned int p_off;
if (host->req->long_data) {
- pg = nth_page(sg_page(&host->req->sg),
- off >> PAGE_SHIFT);
+ pg = sg_page(&host->req->sg) + (off >> PAGE_SHIFT);
p_off = offset_in_page(off);
p_cnt = PAGE_SIZE - p_off;
p_cnt = min(p_cnt, length);
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 3a64dc7a7e27..3304f8824cf7 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -2074,7 +2074,7 @@ mptscsih_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf,
* This is anyones guess quite frankly.
*/
int
-mptscsih_bios_param(struct scsi_device * sdev, struct block_device *bdev,
+mptscsih_bios_param(struct scsi_device * sdev, struct gendisk *unused,
sector_t capacity, int geom[])
{
int heads;
diff --git a/drivers/message/fusion/mptscsih.h b/drivers/message/fusion/mptscsih.h
index 8c2bb2331fc1..f9678d48100c 100644
--- a/drivers/message/fusion/mptscsih.h
+++ b/drivers/message/fusion/mptscsih.h
@@ -123,7 +123,7 @@ extern int mptscsih_abort(struct scsi_cmnd * SCpnt);
extern int mptscsih_dev_reset(struct scsi_cmnd * SCpnt);
extern int mptscsih_bus_reset(struct scsi_cmnd * SCpnt);
extern int mptscsih_host_reset(struct scsi_cmnd *SCpnt);
-extern int mptscsih_bios_param(struct scsi_device * sdev, struct block_device *bdev, sector_t capacity, int geom[]);
+extern int mptscsih_bios_param(struct scsi_device * sdev, struct gendisk *unused, sector_t capacity, int geom[]);
extern int mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r);
extern int mptscsih_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r);
extern int mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r);
diff --git a/drivers/mfd/88pm886.c b/drivers/mfd/88pm886.c
index 39dd9a818b0f..e411d8dee554 100644
--- a/drivers/mfd/88pm886.c
+++ b/drivers/mfd/88pm886.c
@@ -35,6 +35,7 @@ static const struct resource pm886_onkey_resources[] = {
};
static const struct mfd_cell pm886_devs[] = {
+ MFD_CELL_NAME("88pm886-gpadc"),
MFD_CELL_RES("88pm886-onkey", pm886_onkey_resources),
MFD_CELL_NAME("88pm886-regulator"),
MFD_CELL_NAME("88pm886-rtc"),
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 425c5fba6cb1..6cec1858947b 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -129,6 +129,7 @@ config MFD_AAT2870_CORE
select MFD_CORE
depends on I2C=y
depends on GPIOLIB || COMPILE_TEST
+ depends on GPIOLIB_LEGACY
help
If you say yes here you get support for the AAT2870.
This driver provides common support for accessing the device,
@@ -138,7 +139,7 @@ config MFD_AAT2870_CORE
config MFD_AT91_USART
tristate "AT91 USART Driver"
select MFD_CORE
- depends on ARCH_AT91 || ARCH_LAN969X || COMPILE_TEST
+ depends on ARCH_MICROCHIP || COMPILE_TEST
help
Select this to get support for AT91 USART IP. This is a wrapper
over at91-usart-serial driver and usart-spi-driver. Only one function
@@ -1134,6 +1135,21 @@ config MFD_MENF21BMC
This driver can also be built as a module. If so the module
will be called menf21bmc.
+config MFD_NCT6694
+ tristate "Nuvoton NCT6694 support"
+ select MFD_CORE
+ depends on USB
+ help
+ This enables support for the Nuvoton USB device NCT6694, which shares
+ peripherals.
+ The Nuvoton NCT6694 is a peripheral expander with 16 GPIO chips,
+ 6 I2C controllers, 2 CANfd controllers, 2 Watchdog timers, ADC,
+ PWM, and RTC.
+ This driver provides core APIs to access the NCT6694 hardware
+ monitoring and control features.
+ Additional drivers must be enabled to utilize the specific
+ functionalities of the device.
+
config MFD_OCELOT
tristate "Microsemi Ocelot External Control Support"
depends on SPI_MASTER
@@ -1238,6 +1254,19 @@ config MFD_QCOM_RPM
Say M here if you want to include support for the Qualcomm RPM as a
module. This will build a module called "qcom_rpm".
+config MFD_SPACEMIT_P1
+ tristate "SpacemiT P1 PMIC"
+ depends on ARCH_SPACEMIT || COMPILE_TEST
+ depends on I2C
+ select I2C_K1
+ select MFD_SIMPLE_MFD_I2C
+ help
+ This option supports the I2C-based SpacemiT P1 PMIC, which
+ contains regulators, a power switch, GPIOs, an RTC, and more.
+ This option is selected when any of the supported sub-devices
+ is configured. The basic functionality is implemented by the
+ simple MFD I2C driver.
+
config MFD_SPMI_PMIC
tristate "Qualcomm SPMI PMICs"
depends on ARCH_QCOM || COMPILE_TEST
@@ -1411,6 +1440,7 @@ config MFD_SEC_I2C
config MFD_SI476X_CORE
tristate "Silicon Laboratories 4761/64/68 AM/FM radio."
depends on I2C
+ depends on GPIOLIB_LEGACY
select MFD_CORE
select REGMAP_I2C
help
@@ -1539,8 +1569,8 @@ config MFD_DB8500_PRCMU
through a register map.
config MFD_STMPE
- bool "STMicroelectronics STMPE"
- depends on I2C=y || SPI_MASTER=y
+ tristate "STMicroelectronics STMPE"
+ depends on I2C || SPI_MASTER
depends on OF
select MFD_CORE
help
@@ -1568,14 +1598,14 @@ menu "STMicroelectronics STMPE Interface Drivers"
depends on MFD_STMPE
config STMPE_I2C
- bool "STMicroelectronics STMPE I2C Interface"
- depends on I2C=y
+ tristate "STMicroelectronics STMPE I2C Interface"
+ depends on I2C
default y
help
This is used to enable I2C interface of STMPE
config STMPE_SPI
- bool "STMicroelectronics STMPE SPI Interface"
+ tristate "STMicroelectronics STMPE SPI Interface"
depends on SPI_MASTER
help
This is used to enable SPI interface of STMPE
@@ -1641,6 +1671,17 @@ config MFD_TI_LMU
LM36274. It consists of backlight, LED and regulator driver.
It provides consistent device controls for lighting functions.
+config MFD_BQ257XX
+ tristate "TI BQ257XX Buck/Boost Charge Controller"
+ depends on I2C
+ select MFD_CORE
+ select REGMAP_I2C
+ help
+ Support Texas Instruments BQ25703 Buck/Boost converter with
+ charge controller. It consists of regulators that provide
+ system voltage and OTG voltage, and a charger manager for
+ batteries containing one or more cells.
+
config MFD_OMAP_USB_HOST
bool "TI OMAP USBHS core and TLL driver"
depends on USB_EHCI_HCD_OMAP || USB_OHCI_HCD_OMAP3
@@ -1977,7 +2018,7 @@ config MFD_TIMBERDALE
multifunction device which exposes numerous platform devices.
The timberdale FPGA can be found on the Intel Atom development board
- for in-vehicle infontainment, called Russellville.
+ for in-vehicle infotainment, called Russellville.
config MFD_TC3589X
bool "Toshiba TC35892 and variants"
@@ -2428,6 +2469,30 @@ config MFD_INTEL_M10_BMC_PMCI
additional drivers must be enabled in order to use the functionality
of the device.
+config MFD_LOONGSON_SE
+ tristate "Loongson Security Engine chip controller driver"
+ depends on LOONGARCH && ACPI
+ select MFD_CORE
+ help
+ The Loongson Security Engine chip supports RNG, SM2, SM3 and
+ SM4 accelerator engines. Each engine have its own DMA buffer
+ provided by the controller. The kernel cannot directly send
+ commands to the engine and must first send them to the controller,
+ which will forward them to the corresponding engine.
+
+config MFD_LS2K_BMC_CORE
+ bool "Loongson-2K Board Management Controller Support"
+ depends on PCI && ACPI_GENERIC_GSI
+ select MFD_CORE
+ help
+ Say yes here to add support for the Loongson-2K BMC which is a Board
+ Management Controller connected to the PCIe bus. The device supports
+ multiple sub-devices like display and IPMI. This driver provides common
+ support for accessing the devices.
+
+ The display is enabled by default in the driver, while the IPMI interface
+ is enabled independently through the IPMI_LS2K option in the IPMI section.
+
config MFD_QNAP_MCU
tristate "QNAP microcontroller unit core driver"
depends on SERIAL_DEV_BUS
@@ -2481,5 +2546,19 @@ config MFD_UPBOARD_FPGA
To compile this driver as a module, choose M here: the module will be
called upboard-fpga.
+config MFD_MAX7360
+ tristate "Maxim MAX7360 I2C IO Expander"
+ depends on I2C
+ select MFD_CORE
+ select REGMAP_I2C
+ select REGMAP_IRQ
+ help
+ Say yes here to add support for Maxim MAX7360 device, embedding
+ keypad, rotary encoder, PWM and GPIO features.
+
+ This driver provides common support for accessing the device;
+ additional drivers must be enabled in order to use the functionality
+ of the device.
+
endmenu
endif
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index f7bdedd5a66d..865e9f12faff 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_MFD_SM501) += sm501.o
obj-$(CONFIG_ARCH_BCM2835) += bcm2835-pm.o
obj-$(CONFIG_MFD_BCM590XX) += bcm590xx.o
obj-$(CONFIG_MFD_BD9571MWV) += bd9571mwv.o
+obj-$(CONFIG_MFD_BQ257XX) += bq257xx.o
obj-$(CONFIG_MFD_CGBC) += cgbc-core.o
obj-$(CONFIG_MFD_CROS_EC_DEV) += cros_ec_dev.o
obj-$(CONFIG_MFD_CS42L43) += cs42l43.o
@@ -121,6 +122,8 @@ obj-$(CONFIG_MFD_MC13XXX) += mc13xxx-core.o
obj-$(CONFIG_MFD_MC13XXX_SPI) += mc13xxx-spi.o
obj-$(CONFIG_MFD_MC13XXX_I2C) += mc13xxx-i2c.o
+obj-$(CONFIG_MFD_NCT6694) += nct6694.o
+
obj-$(CONFIG_MFD_CORE) += mfd-core.o
ocelot-soc-objs := ocelot-core.o ocelot-spi.o
@@ -163,6 +166,7 @@ obj-$(CONFIG_MFD_DA9063) += da9063.o
obj-$(CONFIG_MFD_DA9150) += da9150-core.o
obj-$(CONFIG_MFD_MAX14577) += max14577.o
+obj-$(CONFIG_MFD_MAX7360) += max7360.o
obj-$(CONFIG_MFD_MAX77541) += max77541.o
obj-$(CONFIG_MFD_MAX77620) += max77620.o
obj-$(CONFIG_MFD_MAX77650) += max77650.o
@@ -286,6 +290,8 @@ obj-$(CONFIG_MFD_INTEL_M10_BMC_CORE) += intel-m10-bmc-core.o
obj-$(CONFIG_MFD_INTEL_M10_BMC_SPI) += intel-m10-bmc-spi.o
obj-$(CONFIG_MFD_INTEL_M10_BMC_PMCI) += intel-m10-bmc-pmci.o
+obj-$(CONFIG_MFD_LS2K_BMC_CORE) += ls2k-bmc-core.o
+
obj-$(CONFIG_MFD_ATC260X) += atc260x-core.o
obj-$(CONFIG_MFD_ATC260X_I2C) += atc260x-i2c.o
@@ -295,3 +301,5 @@ obj-$(CONFIG_MFD_RSMU_I2C) += rsmu_i2c.o rsmu_core.o
obj-$(CONFIG_MFD_RSMU_SPI) += rsmu_spi.o rsmu_core.o
obj-$(CONFIG_MFD_UPBOARD_FPGA) += upboard-fpga.o
+
+obj-$(CONFIG_MFD_LOONGSON_SE) += loongson-se.o
diff --git a/drivers/mfd/adp5585.c b/drivers/mfd/adp5585.c
index 58f7cebe2ea4..46b3ce3d7bae 100644
--- a/drivers/mfd/adp5585.c
+++ b/drivers/mfd/adp5585.c
@@ -432,7 +432,6 @@ static int adp5585_reset_ev_parse(struct adp5585_dev *adp5585)
"Invalid value(%u) for adi,reset-pulse-width-us\n",
prop_val);
}
- return ret;
}
return 0;
diff --git a/drivers/mfd/arizona-irq.c b/drivers/mfd/arizona-irq.c
index 3f8622ee0e59..544016d420fe 100644
--- a/drivers/mfd/arizona-irq.c
+++ b/drivers/mfd/arizona-irq.c
@@ -136,7 +136,7 @@ static irqreturn_t arizona_irq_thread(int irq, void *data)
dev_err(arizona->dev,
"Failed to read main IRQ status: %d\n", ret);
}
-
+#ifdef CONFIG_GPIOLIB_LEGACY
/*
* Poll the IRQ pin status to see if we're really done
* if the interrupt controller can't do it for us.
@@ -150,6 +150,7 @@ static irqreturn_t arizona_irq_thread(int irq, void *data)
!gpio_get_value_cansleep(arizona->pdata.irq_gpio)) {
poll = true;
}
+#endif
} while (poll);
pm_runtime_put_autosuspend(arizona->dev);
@@ -349,6 +350,7 @@ int arizona_irq_init(struct arizona *arizona)
goto err_map_main_irq;
}
+#ifdef CONFIG_GPIOLIB_LEGACY
/* Used to emulate edge trigger and to work around broken pinmux */
if (arizona->pdata.irq_gpio) {
if (gpio_to_irq(arizona->pdata.irq_gpio) != arizona->irq) {
@@ -368,6 +370,7 @@ int arizona_irq_init(struct arizona *arizona)
arizona->pdata.irq_gpio = 0;
}
}
+#endif
ret = request_threaded_irq(arizona->irq, NULL, arizona_irq_thread,
flags, "arizona", arizona);
diff --git a/drivers/mfd/bq257xx.c b/drivers/mfd/bq257xx.c
new file mode 100644
index 000000000000..e9d49dac0a16
--- /dev/null
+++ b/drivers/mfd/bq257xx.c
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * BQ257XX Core Driver
+ * Copyright (C) 2025 Chris Morgan <macromorgan@hotmail.com>
+ */
+
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/mfd/bq257xx.h>
+#include <linux/mfd/core.h>
+#include <linux/regmap.h>
+
+static const struct regmap_range bq25703_readonly_reg_ranges[] = {
+ regmap_reg_range(BQ25703_CHARGER_STATUS, BQ25703_MANUFACT_DEV_ID),
+};
+
+static const struct regmap_access_table bq25703_writeable_regs = {
+ .no_ranges = bq25703_readonly_reg_ranges,
+ .n_no_ranges = ARRAY_SIZE(bq25703_readonly_reg_ranges),
+};
+
+static const struct regmap_range bq25703_volatile_reg_ranges[] = {
+ regmap_reg_range(BQ25703_CHARGE_OPTION_0, BQ25703_IIN_HOST),
+ regmap_reg_range(BQ25703_CHARGER_STATUS, BQ25703_ADC_OPTION),
+};
+
+static const struct regmap_access_table bq25703_volatile_regs = {
+ .yes_ranges = bq25703_volatile_reg_ranges,
+ .n_yes_ranges = ARRAY_SIZE(bq25703_volatile_reg_ranges),
+};
+
+static const struct regmap_config bq25703_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 16,
+ .max_register = BQ25703_ADC_OPTION,
+ .cache_type = REGCACHE_MAPLE,
+ .wr_table = &bq25703_writeable_regs,
+ .volatile_table = &bq25703_volatile_regs,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+};
+
+static const struct mfd_cell cells[] = {
+ MFD_CELL_NAME("bq257xx-regulator"),
+ MFD_CELL_NAME("bq257xx-charger"),
+};
+
+static int bq257xx_probe(struct i2c_client *client)
+{
+ struct bq257xx_device *ddata;
+ int ret;
+
+ ddata = devm_kzalloc(&client->dev, sizeof(*ddata), GFP_KERNEL);
+ if (!ddata)
+ return -ENOMEM;
+
+ ddata->client = client;
+
+ ddata->regmap = devm_regmap_init_i2c(client, &bq25703_regmap_config);
+ if (IS_ERR(ddata->regmap)) {
+ return dev_err_probe(&client->dev, PTR_ERR(ddata->regmap),
+ "Failed to allocate register map\n");
+ }
+
+ i2c_set_clientdata(client, ddata);
+
+ ret = devm_mfd_add_devices(&client->dev, PLATFORM_DEVID_AUTO,
+ cells, ARRAY_SIZE(cells), NULL, 0, NULL);
+ if (ret)
+ return dev_err_probe(&client->dev, ret,
+ "Failed to register child devices\n");
+
+ return ret;
+}
+
+static const struct i2c_device_id bq257xx_i2c_ids[] = {
+ { "bq25703a" },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, bq257xx_i2c_ids);
+
+static const struct of_device_id bq257xx_of_match[] = {
+ { .compatible = "ti,bq25703a" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, bq257xx_of_match);
+
+static struct i2c_driver bq257xx_driver = {
+ .driver = {
+ .name = "bq257xx",
+ .of_match_table = bq257xx_of_match,
+ },
+ .probe = bq257xx_probe,
+ .id_table = bq257xx_i2c_ids,
+};
+module_i2c_driver(bq257xx_driver);
+
+MODULE_DESCRIPTION("bq257xx buck/boost/charger driver");
+MODULE_AUTHOR("Chris Morgan <macromorgan@hotmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/cs42l43.c b/drivers/mfd/cs42l43.c
index 07c8f1b8183e..107cfb983fec 100644
--- a/drivers/mfd/cs42l43.c
+++ b/drivers/mfd/cs42l43.c
@@ -1117,24 +1117,6 @@ EXPORT_SYMBOL_NS_GPL(cs42l43_dev_probe, "MFD_CS42L43");
static int cs42l43_suspend(struct device *dev)
{
struct cs42l43 *cs42l43 = dev_get_drvdata(dev);
- static const struct reg_sequence mask_all[] = {
- { CS42L43_DECIM_MASK, 0xFFFFFFFF, },
- { CS42L43_EQ_MIX_MASK, 0xFFFFFFFF, },
- { CS42L43_ASP_MASK, 0xFFFFFFFF, },
- { CS42L43_PLL_MASK, 0xFFFFFFFF, },
- { CS42L43_SOFT_MASK, 0xFFFFFFFF, },
- { CS42L43_SWIRE_MASK, 0xFFFFFFFF, },
- { CS42L43_MSM_MASK, 0xFFFFFFFF, },
- { CS42L43_ACC_DET_MASK, 0xFFFFFFFF, },
- { CS42L43_I2C_TGT_MASK, 0xFFFFFFFF, },
- { CS42L43_SPI_MSTR_MASK, 0xFFFFFFFF, },
- { CS42L43_SW_TO_SPI_BRIDGE_MASK, 0xFFFFFFFF, },
- { CS42L43_OTP_MASK, 0xFFFFFFFF, },
- { CS42L43_CLASS_D_AMP_MASK, 0xFFFFFFFF, },
- { CS42L43_GPIO_INT_MASK, 0xFFFFFFFF, },
- { CS42L43_ASRC_MASK, 0xFFFFFFFF, },
- { CS42L43_HPOUT_MASK, 0xFFFFFFFF, },
- };
int ret;
ret = pm_runtime_resume_and_get(dev);
@@ -1143,13 +1125,7 @@ static int cs42l43_suspend(struct device *dev)
return ret;
}
- /* The IRQs will be re-enabled on resume by the cache sync */
- ret = regmap_multi_reg_write_bypassed(cs42l43->regmap,
- mask_all, ARRAY_SIZE(mask_all));
- if (ret) {
- dev_err(cs42l43->dev, "Failed to mask IRQs: %d\n", ret);
- return ret;
- }
+ disable_irq(cs42l43->irq);
ret = pm_runtime_force_suspend(dev);
if (ret) {
@@ -1164,8 +1140,6 @@ static int cs42l43_suspend(struct device *dev)
if (ret)
return ret;
- disable_irq(cs42l43->irq);
-
return 0;
}
@@ -1196,14 +1170,14 @@ static int cs42l43_resume(struct device *dev)
if (ret)
return ret;
- enable_irq(cs42l43->irq);
-
ret = pm_runtime_force_resume(dev);
if (ret) {
dev_err(cs42l43->dev, "Failed to force resume: %d\n", ret);
return ret;
}
+ enable_irq(cs42l43->irq);
+
return 0;
}
diff --git a/drivers/mfd/da9063-i2c.c b/drivers/mfd/da9063-i2c.c
index c6235cd0dbdc..1ec9ab56442d 100644
--- a/drivers/mfd/da9063-i2c.c
+++ b/drivers/mfd/da9063-i2c.c
@@ -37,9 +37,13 @@ enum da9063_page_sel_buf_fmt {
DA9063_PAGE_SEL_BUF_SIZE,
};
+enum da9063_page_sel_msgs {
+ DA9063_PAGE_SEL_MSG = 0,
+ DA9063_PAGE_SEL_CNT,
+};
+
enum da9063_paged_read_msgs {
- DA9063_PAGED_READ_MSG_PAGE_SEL = 0,
- DA9063_PAGED_READ_MSG_REG_SEL,
+ DA9063_PAGED_READ_MSG_REG_SEL = 0,
DA9063_PAGED_READ_MSG_DATA,
DA9063_PAGED_READ_MSG_CNT,
};
@@ -65,10 +69,21 @@ static int da9063_i2c_blockreg_read(struct i2c_client *client, u16 addr,
(page_num << DA9063_I2C_PAGE_SEL_SHIFT) & DA9063_REG_PAGE_MASK;
/* Write reg address, page selection */
- xfer[DA9063_PAGED_READ_MSG_PAGE_SEL].addr = client->addr;
- xfer[DA9063_PAGED_READ_MSG_PAGE_SEL].flags = 0;
- xfer[DA9063_PAGED_READ_MSG_PAGE_SEL].len = DA9063_PAGE_SEL_BUF_SIZE;
- xfer[DA9063_PAGED_READ_MSG_PAGE_SEL].buf = page_sel_buf;
+ xfer[DA9063_PAGE_SEL_MSG].addr = client->addr;
+ xfer[DA9063_PAGE_SEL_MSG].flags = 0;
+ xfer[DA9063_PAGE_SEL_MSG].len = DA9063_PAGE_SEL_BUF_SIZE;
+ xfer[DA9063_PAGE_SEL_MSG].buf = page_sel_buf;
+
+ ret = i2c_transfer(client->adapter, xfer, DA9063_PAGE_SEL_CNT);
+ if (ret < 0) {
+ dev_err(&client->dev, "Page switch failed: %d\n", ret);
+ return ret;
+ }
+
+ if (ret != DA9063_PAGE_SEL_CNT) {
+ dev_err(&client->dev, "Page switch failed to complete\n");
+ return -EIO;
+ }
/* Select register address */
xfer[DA9063_PAGED_READ_MSG_REG_SEL].addr = client->addr;
diff --git a/drivers/mfd/exynos-lpass.c b/drivers/mfd/exynos-lpass.c
index 44797001a432..9bb2687c2835 100644
--- a/drivers/mfd/exynos-lpass.c
+++ b/drivers/mfd/exynos-lpass.c
@@ -101,7 +101,6 @@ static const struct regmap_config exynos_lpass_reg_conf = {
.reg_stride = 4,
.val_bits = 32,
.max_register = 0xfc,
- .fast_io = true,
};
static void exynos_lpass_disable_lpass(void *data)
diff --git a/drivers/mfd/fsl-imx25-tsadc.c b/drivers/mfd/fsl-imx25-tsadc.c
index 0aab6428e042..467b1a23faeb 100644
--- a/drivers/mfd/fsl-imx25-tsadc.c
+++ b/drivers/mfd/fsl-imx25-tsadc.c
@@ -17,7 +17,6 @@
#include <linux/regmap.h>
static const struct regmap_config mx25_tsadc_regmap_config = {
- .fast_io = true,
.max_register = 8,
.reg_bits = 32,
.val_bits = 32,
diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
index 1a5b8b13f8d0..8d92c895d3ae 100644
--- a/drivers/mfd/intel-lpss-pci.c
+++ b/drivers/mfd/intel-lpss-pci.c
@@ -367,6 +367,19 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x4b79), (kernel_ulong_t)&ehl_i2c_info },
{ PCI_VDEVICE(INTEL, 0x4b7a), (kernel_ulong_t)&ehl_i2c_info },
{ PCI_VDEVICE(INTEL, 0x4b7b), (kernel_ulong_t)&ehl_i2c_info },
+ /* WCL */
+ { PCI_VDEVICE(INTEL, 0x4d25), (kernel_ulong_t)&bxt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x4d26), (kernel_ulong_t)&bxt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x4d27), (kernel_ulong_t)&tgl_spi_info },
+ { PCI_VDEVICE(INTEL, 0x4d30), (kernel_ulong_t)&tgl_spi_info },
+ { PCI_VDEVICE(INTEL, 0x4d46), (kernel_ulong_t)&tgl_spi_info },
+ { PCI_VDEVICE(INTEL, 0x4d50), (kernel_ulong_t)&ehl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x4d51), (kernel_ulong_t)&ehl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x4d52), (kernel_ulong_t)&bxt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x4d78), (kernel_ulong_t)&ehl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x4d79), (kernel_ulong_t)&ehl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x4d7a), (kernel_ulong_t)&ehl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x4d7b), (kernel_ulong_t)&ehl_i2c_info },
/* JSL */
{ PCI_VDEVICE(INTEL, 0x4da8), (kernel_ulong_t)&spt_uart_info },
{ PCI_VDEVICE(INTEL, 0x4da9), (kernel_ulong_t)&spt_uart_info },
diff --git a/drivers/mfd/intel_soc_pmic_chtdc_ti.c b/drivers/mfd/intel_soc_pmic_chtdc_ti.c
index 4c1a68c9f575..6daf33e07ea0 100644
--- a/drivers/mfd/intel_soc_pmic_chtdc_ti.c
+++ b/drivers/mfd/intel_soc_pmic_chtdc_ti.c
@@ -82,6 +82,8 @@ static const struct regmap_config chtdc_ti_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = 0xff,
+ /* The hardware does not support reading multiple registers at once */
+ .use_single_read = true,
};
static const struct regmap_irq chtdc_ti_irqs[] = {
diff --git a/drivers/mfd/kempld-core.c b/drivers/mfd/kempld-core.c
index c5bfb6440a93..c2008d2dc95a 100644
--- a/drivers/mfd/kempld-core.c
+++ b/drivers/mfd/kempld-core.c
@@ -141,10 +141,8 @@ static int kempld_create_platform_device(const struct kempld_platform_data *pdat
};
kempld_pdev = platform_device_register_full(&pdevinfo);
- if (IS_ERR(kempld_pdev))
- return PTR_ERR(kempld_pdev);
- return 0;
+ return PTR_ERR_OR_ZERO(kempld_pdev);
}
/**
@@ -779,22 +777,26 @@ MODULE_DEVICE_TABLE(dmi, kempld_dmi_table);
static int __init kempld_init(void)
{
const struct dmi_system_id *id;
- int ret = -ENODEV;
-
- for (id = dmi_first_match(kempld_dmi_table); id; id = dmi_first_match(id + 1)) {
- /* Check, if user asked for the exact device ID match */
- if (force_device_id[0] && !strstr(id->ident, force_device_id))
- continue;
- ret = kempld_create_platform_device(&kempld_platform_data_generic);
- if (ret)
- continue;
-
- break;
+ /*
+ * This custom DMI iteration allows the driver to be initialized in three ways:
+ * - When a forced_device_id string matches any ident in the kempld_dmi_table,
+ * regardless of whether the DMI device is present in the system dmi table.
+ * - When a matching entry is present in the DMI system tabe.
+ * - Through alternative mechanisms like ACPI.
+ */
+ if (force_device_id[0]) {
+ for (id = kempld_dmi_table; id->matches[0].slot != DMI_NONE; id++)
+ if (strstr(id->ident, force_device_id))
+ if (!kempld_create_platform_device(&kempld_platform_data_generic))
+ break;
+ if (id->matches[0].slot == DMI_NONE)
+ return -ENODEV;
+ } else {
+ for (id = dmi_first_match(kempld_dmi_table); id; id = dmi_first_match(id+1))
+ if (kempld_create_platform_device(&kempld_platform_data_generic))
+ break;
}
- if (ret)
- return ret;
-
return platform_driver_register(&kempld_driver);
}
diff --git a/drivers/mfd/loongson-se.c b/drivers/mfd/loongson-se.c
new file mode 100644
index 000000000000..3902ba377d69
--- /dev/null
+++ b/drivers/mfd/loongson-se.c
@@ -0,0 +1,253 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2025 Loongson Technology Corporation Limited
+ *
+ * Author: Yinggang Gu <guyinggang@loongson.cn>
+ * Author: Qunqin Zhao <zhaoqunqin@loongson.cn>
+ */
+
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/loongson-se.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+struct loongson_se {
+ void __iomem *base;
+ spinlock_t dev_lock;
+ struct completion cmd_completion;
+
+ void *dmam_base;
+ int dmam_size;
+
+ struct mutex engine_init_lock;
+ struct loongson_se_engine engines[SE_ENGINE_MAX];
+};
+
+struct loongson_se_controller_cmd {
+ u32 command_id;
+ u32 info[7];
+};
+
+static int loongson_se_poll(struct loongson_se *se, u32 int_bit)
+{
+ u32 status;
+ int err;
+
+ spin_lock_irq(&se->dev_lock);
+
+ /* Notify the controller that the engine needs to be started */
+ writel(int_bit, se->base + SE_L2SINT_SET);
+
+ /* Polling until the controller has forwarded the engine command */
+ err = readl_relaxed_poll_timeout_atomic(se->base + SE_L2SINT_STAT, status,
+ !(status & int_bit),
+ 1, LOONGSON_ENGINE_CMD_TIMEOUT_US);
+
+ spin_unlock_irq(&se->dev_lock);
+
+ return err;
+}
+
+static int loongson_se_send_controller_cmd(struct loongson_se *se,
+ struct loongson_se_controller_cmd *cmd)
+{
+ u32 *send_cmd = (u32 *)cmd;
+ int err, i;
+
+ for (i = 0; i < SE_SEND_CMD_REG_LEN; i++)
+ writel(send_cmd[i], se->base + SE_SEND_CMD_REG + i * 4);
+
+ err = loongson_se_poll(se, SE_INT_CONTROLLER);
+ if (err)
+ return err;
+
+ return wait_for_completion_interruptible(&se->cmd_completion);
+}
+
+int loongson_se_send_engine_cmd(struct loongson_se_engine *engine)
+{
+ /*
+ * After engine initialization, the controller already knows
+ * where to obtain engine commands from. Now all we need to
+ * do is notify the controller that the engine needs to be started.
+ */
+ int err = loongson_se_poll(engine->se, BIT(engine->id));
+
+ if (err)
+ return err;
+
+ return wait_for_completion_interruptible(&engine->completion);
+}
+EXPORT_SYMBOL_GPL(loongson_se_send_engine_cmd);
+
+struct loongson_se_engine *loongson_se_init_engine(struct device *dev, int id)
+{
+ struct loongson_se *se = dev_get_drvdata(dev);
+ struct loongson_se_engine *engine = &se->engines[id];
+ struct loongson_se_controller_cmd cmd;
+
+ engine->se = se;
+ engine->id = id;
+ init_completion(&engine->completion);
+
+ /* Divide DMA memory equally among all engines */
+ engine->buffer_size = se->dmam_size / SE_ENGINE_MAX;
+ engine->buffer_off = (se->dmam_size / SE_ENGINE_MAX) * id;
+ engine->data_buffer = se->dmam_base + engine->buffer_off;
+
+ /*
+ * There has no engine0, use its data buffer as command buffer for other
+ * engines. The DMA memory size is obtained from the ACPI table, which
+ * ensures that the data buffer size of engine0 is larger than the
+ * command buffer size of all engines.
+ */
+ engine->command = se->dmam_base + id * (2 * SE_ENGINE_CMD_SIZE);
+ engine->command_ret = engine->command + SE_ENGINE_CMD_SIZE;
+
+ mutex_lock(&se->engine_init_lock);
+
+ /* Tell the controller where to find engine command */
+ cmd.command_id = SE_CMD_SET_ENGINE_CMDBUF;
+ cmd.info[0] = id;
+ cmd.info[1] = engine->command - se->dmam_base;
+ cmd.info[2] = 2 * SE_ENGINE_CMD_SIZE;
+
+ if (loongson_se_send_controller_cmd(se, &cmd))
+ engine = NULL;
+
+ mutex_unlock(&se->engine_init_lock);
+
+ return engine;
+}
+EXPORT_SYMBOL_GPL(loongson_se_init_engine);
+
+static irqreturn_t se_irq_handler(int irq, void *dev_id)
+{
+ struct loongson_se *se = dev_id;
+ u32 int_status;
+ int id;
+
+ spin_lock(&se->dev_lock);
+
+ int_status = readl(se->base + SE_S2LINT_STAT);
+
+ /* For controller */
+ if (int_status & SE_INT_CONTROLLER) {
+ complete(&se->cmd_completion);
+ int_status &= ~SE_INT_CONTROLLER;
+ writel(SE_INT_CONTROLLER, se->base + SE_S2LINT_CL);
+ }
+
+ /* For engines */
+ while (int_status) {
+ id = __ffs(int_status);
+ complete(&se->engines[id].completion);
+ int_status &= ~BIT(id);
+ writel(BIT(id), se->base + SE_S2LINT_CL);
+ }
+
+ spin_unlock(&se->dev_lock);
+
+ return IRQ_HANDLED;
+}
+
+static int loongson_se_init(struct loongson_se *se, dma_addr_t addr, int size)
+{
+ struct loongson_se_controller_cmd cmd;
+ int err;
+
+ cmd.command_id = SE_CMD_START;
+ err = loongson_se_send_controller_cmd(se, &cmd);
+ if (err)
+ return err;
+
+ cmd.command_id = SE_CMD_SET_DMA;
+ cmd.info[0] = lower_32_bits(addr);
+ cmd.info[1] = upper_32_bits(addr);
+ cmd.info[2] = size;
+
+ return loongson_se_send_controller_cmd(se, &cmd);
+}
+
+static const struct mfd_cell engines[] = {
+ { .name = "loongson-rng" },
+ { .name = "tpm_loongson" },
+};
+
+static int loongson_se_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct loongson_se *se;
+ int nr_irq, irq, err, i;
+ dma_addr_t paddr;
+
+ se = devm_kmalloc(dev, sizeof(*se), GFP_KERNEL);
+ if (!se)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, se);
+ init_completion(&se->cmd_completion);
+ spin_lock_init(&se->dev_lock);
+ mutex_init(&se->engine_init_lock);
+
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ if (device_property_read_u32(dev, "dmam_size", &se->dmam_size))
+ return -ENODEV;
+
+ se->dmam_base = dmam_alloc_coherent(dev, se->dmam_size, &paddr, GFP_KERNEL);
+ if (!se->dmam_base)
+ return -ENOMEM;
+
+ se->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(se->base))
+ return PTR_ERR(se->base);
+
+ writel(SE_INT_ALL, se->base + SE_S2LINT_EN);
+
+ nr_irq = platform_irq_count(pdev);
+ if (nr_irq <= 0)
+ return -ENODEV;
+
+ for (i = 0; i < nr_irq; i++) {
+ irq = platform_get_irq(pdev, i);
+ err = devm_request_irq(dev, irq, se_irq_handler, 0, "loongson-se", se);
+ if (err)
+ dev_err(dev, "failed to request IRQ: %d\n", irq);
+ }
+
+ err = loongson_se_init(se, paddr, se->dmam_size);
+ if (err)
+ return err;
+
+ return devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE, engines,
+ ARRAY_SIZE(engines), NULL, 0, NULL);
+}
+
+static const struct acpi_device_id loongson_se_acpi_match[] = {
+ { "LOON0011", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, loongson_se_acpi_match);
+
+static struct platform_driver loongson_se_driver = {
+ .probe = loongson_se_probe,
+ .driver = {
+ .name = "loongson-se",
+ .acpi_match_table = loongson_se_acpi_match,
+ },
+};
+module_platform_driver(loongson_se_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Yinggang Gu <guyinggang@loongson.cn>");
+MODULE_AUTHOR("Qunqin Zhao <zhaoqunqin@loongson.cn>");
+MODULE_DESCRIPTION("Loongson Security Engine chip controller driver");
diff --git a/drivers/mfd/ls2k-bmc-core.c b/drivers/mfd/ls2k-bmc-core.c
new file mode 100644
index 000000000000..e162b3c7c9f8
--- /dev/null
+++ b/drivers/mfd/ls2k-bmc-core.c
@@ -0,0 +1,528 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Loongson-2K Board Management Controller (BMC) Core Driver.
+ *
+ * Copyright (C) 2024-2025 Loongson Technology Corporation Limited.
+ *
+ * Authors:
+ * Chong Qiao <qiaochong@loongson.cn>
+ * Binbin Zhou <zhoubinbin@loongson.cn>
+ */
+
+#include <linux/aperture.h>
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/iopoll.h>
+#include <linux/kbd_kern.h>
+#include <linux/kernel.h>
+#include <linux/mfd/core.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/platform_data/simplefb.h>
+#include <linux/platform_device.h>
+#include <linux/stop_machine.h>
+#include <linux/vt_kern.h>
+
+/* LS2K BMC resources */
+#define LS2K_DISPLAY_RES_START (SZ_16M + SZ_2M)
+#define LS2K_IPMI_RES_SIZE 0x1C
+#define LS2K_IPMI0_RES_START (SZ_16M + 0xF00000)
+#define LS2K_IPMI1_RES_START (LS2K_IPMI0_RES_START + LS2K_IPMI_RES_SIZE)
+#define LS2K_IPMI2_RES_START (LS2K_IPMI1_RES_START + LS2K_IPMI_RES_SIZE)
+#define LS2K_IPMI3_RES_START (LS2K_IPMI2_RES_START + LS2K_IPMI_RES_SIZE)
+#define LS2K_IPMI4_RES_START (LS2K_IPMI3_RES_START + LS2K_IPMI_RES_SIZE)
+
+#define LS7A_PCI_CFG_SIZE 0x100
+
+/* LS7A bridge registers */
+#define LS7A_PCIE_PORT_CTL0 0x0
+#define LS7A_PCIE_PORT_STS1 0xC
+#define LS7A_GEN2_CTL 0x80C
+#define LS7A_SYMBOL_TIMER 0x71C
+
+/* Bits of LS7A_PCIE_PORT_CTL0 */
+#define LS2K_BMC_PCIE_LTSSM_ENABLE BIT(3)
+
+/* Bits of LS7A_PCIE_PORT_STS1 */
+#define LS2K_BMC_PCIE_LTSSM_STS GENMASK(5, 0)
+#define LS2K_BMC_PCIE_CONNECTED 0x11
+
+#define LS2K_BMC_PCIE_DELAY_US 1000
+#define LS2K_BMC_PCIE_TIMEOUT_US 1000000
+
+/* Bits of LS7A_GEN2_CTL */
+#define LS7A_GEN2_SPEED_CHANG BIT(17)
+#define LS7A_CONF_PHY_TX BIT(18)
+
+/* Bits of LS7A_SYMBOL_TIMER */
+#define LS7A_MASK_LEN_MATCH BIT(26)
+
+/* Interval between interruptions */
+#define LS2K_BMC_INT_INTERVAL (60 * HZ)
+
+/* Maximum time to wait for U-Boot and DDR to be ready with ms. */
+#define LS2K_BMC_RESET_WAIT_TIME 10000
+
+/* It's an experience value */
+#define LS7A_BAR0_CHECK_MAX_TIMES 2000
+
+#define PCI_REG_STRIDE 0x4
+
+#define LS2K_BMC_RESET_GPIO 14
+#define LOONGSON_GPIO_REG_BASE 0x1FE00500
+#define LOONGSON_GPIO_REG_SIZE 0x18
+#define LOONGSON_GPIO_OEN 0x0
+#define LOONGSON_GPIO_FUNC 0x4
+#define LOONGSON_GPIO_INTPOL 0x10
+#define LOONGSON_GPIO_INTEN 0x14
+
+#define LOONGSON_IO_INT_BASE 16
+#define LS2K_BMC_RESET_GPIO_INT_VEC (LS2K_BMC_RESET_GPIO % 8)
+#define LS2K_BMC_RESET_GPIO_GSI (LOONGSON_IO_INT_BASE + LS2K_BMC_RESET_GPIO_INT_VEC)
+
+enum {
+ LS2K_BMC_DISPLAY,
+ LS2K_BMC_IPMI0,
+ LS2K_BMC_IPMI1,
+ LS2K_BMC_IPMI2,
+ LS2K_BMC_IPMI3,
+ LS2K_BMC_IPMI4,
+};
+
+static struct resource ls2k_display_resources[] = {
+ DEFINE_RES_MEM_NAMED(LS2K_DISPLAY_RES_START, SZ_4M, "simpledrm-res"),
+};
+
+static struct resource ls2k_ipmi0_resources[] = {
+ DEFINE_RES_MEM_NAMED(LS2K_IPMI0_RES_START, LS2K_IPMI_RES_SIZE, "ipmi0-res"),
+};
+
+static struct resource ls2k_ipmi1_resources[] = {
+ DEFINE_RES_MEM_NAMED(LS2K_IPMI1_RES_START, LS2K_IPMI_RES_SIZE, "ipmi1-res"),
+};
+
+static struct resource ls2k_ipmi2_resources[] = {
+ DEFINE_RES_MEM_NAMED(LS2K_IPMI2_RES_START, LS2K_IPMI_RES_SIZE, "ipmi2-res"),
+};
+
+static struct resource ls2k_ipmi3_resources[] = {
+ DEFINE_RES_MEM_NAMED(LS2K_IPMI3_RES_START, LS2K_IPMI_RES_SIZE, "ipmi3-res"),
+};
+
+static struct resource ls2k_ipmi4_resources[] = {
+ DEFINE_RES_MEM_NAMED(LS2K_IPMI4_RES_START, LS2K_IPMI_RES_SIZE, "ipmi4-res"),
+};
+
+static struct mfd_cell ls2k_bmc_cells[] = {
+ [LS2K_BMC_DISPLAY] = {
+ .name = "simple-framebuffer",
+ .num_resources = ARRAY_SIZE(ls2k_display_resources),
+ .resources = ls2k_display_resources
+ },
+ [LS2K_BMC_IPMI0] = {
+ .name = "ls2k-ipmi-si",
+ .num_resources = ARRAY_SIZE(ls2k_ipmi0_resources),
+ .resources = ls2k_ipmi0_resources
+ },
+ [LS2K_BMC_IPMI1] = {
+ .name = "ls2k-ipmi-si",
+ .num_resources = ARRAY_SIZE(ls2k_ipmi1_resources),
+ .resources = ls2k_ipmi1_resources
+ },
+ [LS2K_BMC_IPMI2] = {
+ .name = "ls2k-ipmi-si",
+ .num_resources = ARRAY_SIZE(ls2k_ipmi2_resources),
+ .resources = ls2k_ipmi2_resources
+ },
+ [LS2K_BMC_IPMI3] = {
+ .name = "ls2k-ipmi-si",
+ .num_resources = ARRAY_SIZE(ls2k_ipmi3_resources),
+ .resources = ls2k_ipmi3_resources
+ },
+ [LS2K_BMC_IPMI4] = {
+ .name = "ls2k-ipmi-si",
+ .num_resources = ARRAY_SIZE(ls2k_ipmi4_resources),
+ .resources = ls2k_ipmi4_resources
+ },
+};
+
+/* Index of the BMC PCI configuration space to be restored at BMC reset. */
+struct ls2k_bmc_pci_data {
+ u32 pci_command;
+ u32 base_address0;
+ u32 interrupt_line;
+};
+
+/* Index of the parent PCI configuration space to be restored at BMC reset. */
+struct ls2k_bmc_bridge_pci_data {
+ u32 pci_command;
+ u32 base_address[6];
+ u32 rom_addreess;
+ u32 interrupt_line;
+ u32 msi_hi;
+ u32 msi_lo;
+ u32 devctl;
+ u32 linkcap;
+ u32 linkctl_sts;
+ u32 symbol_timer;
+ u32 gen2_ctrl;
+};
+
+struct ls2k_bmc_ddata {
+ struct device *dev;
+ struct work_struct bmc_reset_work;
+ struct ls2k_bmc_pci_data bmc_pci_data;
+ struct ls2k_bmc_bridge_pci_data bridge_pci_data;
+};
+
+static bool ls2k_bmc_bar0_addr_is_set(struct pci_dev *pdev)
+{
+ u32 addr;
+
+ pci_read_config_dword(pdev, PCI_BASE_ADDRESS_0, &addr);
+
+ return addr & PCI_BASE_ADDRESS_MEM_MASK ? true : false;
+}
+
+static bool ls2k_bmc_pcie_is_connected(struct pci_dev *parent, struct ls2k_bmc_ddata *ddata)
+{
+ void __iomem *base;
+ int val, ret;
+
+ base = pci_iomap(parent, 0, LS7A_PCI_CFG_SIZE);
+ if (!base)
+ return false;
+
+ val = readl(base + LS7A_PCIE_PORT_CTL0);
+ writel(val | LS2K_BMC_PCIE_LTSSM_ENABLE, base + LS7A_PCIE_PORT_CTL0);
+
+ ret = readl_poll_timeout_atomic(base + LS7A_PCIE_PORT_STS1, val,
+ (val & LS2K_BMC_PCIE_LTSSM_STS) == LS2K_BMC_PCIE_CONNECTED,
+ LS2K_BMC_PCIE_DELAY_US, LS2K_BMC_PCIE_TIMEOUT_US);
+ if (ret) {
+ pci_iounmap(parent, base);
+ dev_err(ddata->dev, "PCI-E training failed status=0x%x\n", val);
+ return false;
+ }
+
+ pci_iounmap(parent, base);
+ return true;
+}
+
+static void ls2k_bmc_restore_bridge_pci_data(struct pci_dev *parent, struct ls2k_bmc_ddata *ddata)
+{
+ int base, i = 0;
+
+ pci_write_config_dword(parent, PCI_COMMAND, ddata->bridge_pci_data.pci_command);
+
+ for (base = PCI_BASE_ADDRESS_0; base <= PCI_BASE_ADDRESS_5; base += PCI_REG_STRIDE, i++)
+ pci_write_config_dword(parent, base, ddata->bridge_pci_data.base_address[i]);
+
+ pci_write_config_dword(parent, PCI_ROM_ADDRESS, ddata->bridge_pci_data.rom_addreess);
+ pci_write_config_dword(parent, PCI_INTERRUPT_LINE, ddata->bridge_pci_data.interrupt_line);
+
+ pci_write_config_dword(parent, parent->msi_cap + PCI_MSI_ADDRESS_LO,
+ ddata->bridge_pci_data.msi_lo);
+ pci_write_config_dword(parent, parent->msi_cap + PCI_MSI_ADDRESS_HI,
+ ddata->bridge_pci_data.msi_hi);
+ pci_write_config_dword(parent, parent->pcie_cap + PCI_EXP_DEVCTL,
+ ddata->bridge_pci_data.devctl);
+ pci_write_config_dword(parent, parent->pcie_cap + PCI_EXP_LNKCAP,
+ ddata->bridge_pci_data.linkcap);
+ pci_write_config_dword(parent, parent->pcie_cap + PCI_EXP_LNKCTL,
+ ddata->bridge_pci_data.linkctl_sts);
+
+ pci_write_config_dword(parent, LS7A_GEN2_CTL, ddata->bridge_pci_data.gen2_ctrl);
+ pci_write_config_dword(parent, LS7A_SYMBOL_TIMER, ddata->bridge_pci_data.symbol_timer);
+}
+
+static int ls2k_bmc_recover_pci_data(void *data)
+{
+ struct ls2k_bmc_ddata *ddata = data;
+ struct pci_dev *pdev = to_pci_dev(ddata->dev);
+ struct pci_dev *parent = pdev->bus->self;
+ u32 i;
+
+ /*
+ * Clear the bus, io and mem resources of the PCI-E bridge to zero, so that
+ * the processor can not access the LS2K PCI-E port, to avoid crashing due to
+ * the lack of return signal from accessing the LS2K PCI-E port.
+ */
+ pci_write_config_dword(parent, PCI_BASE_ADDRESS_2, 0);
+ pci_write_config_dword(parent, PCI_BASE_ADDRESS_3, 0);
+ pci_write_config_dword(parent, PCI_BASE_ADDRESS_4, 0);
+
+ /*
+ * When the LS2K BMC is reset, the LS7A PCI-E port is also reset, and its PCI
+ * BAR0 register is cleared. Due to the time gap between the GPIO interrupt
+ * generation and the LS2K BMC reset, the LS7A PCI BAR0 register is read to
+ * determine whether the reset has begun.
+ */
+ for (i = LS7A_BAR0_CHECK_MAX_TIMES; i > 0 ; i--) {
+ if (!ls2k_bmc_bar0_addr_is_set(parent))
+ break;
+ mdelay(1);
+ };
+
+ if (i == 0)
+ return false;
+
+ ls2k_bmc_restore_bridge_pci_data(parent, ddata);
+
+ /* Check if PCI-E is connected */
+ if (!ls2k_bmc_pcie_is_connected(parent, ddata))
+ return false;
+
+ /* Waiting for U-Boot and DDR ready */
+ mdelay(LS2K_BMC_RESET_WAIT_TIME);
+ if (!ls2k_bmc_bar0_addr_is_set(parent))
+ return false;
+
+ /* Restore LS2K BMC PCI-E config data */
+ pci_write_config_dword(pdev, PCI_COMMAND, ddata->bmc_pci_data.pci_command);
+ pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, ddata->bmc_pci_data.base_address0);
+ pci_write_config_dword(pdev, PCI_INTERRUPT_LINE, ddata->bmc_pci_data.interrupt_line);
+
+ return 0;
+}
+
+static void ls2k_bmc_events_fn(struct work_struct *work)
+{
+ struct ls2k_bmc_ddata *ddata = container_of(work, struct ls2k_bmc_ddata, bmc_reset_work);
+
+ /*
+ * The PCI-E is lost when the BMC resets, at which point access to the PCI-E
+ * from other CPUs is suspended to prevent a crash.
+ */
+ stop_machine(ls2k_bmc_recover_pci_data, ddata, NULL);
+
+ if (IS_ENABLED(CONFIG_VT)) {
+ /* Re-push the display due to previous PCI-E loss. */
+ set_console(vt_move_to_console(MAX_NR_CONSOLES - 1, 1));
+ }
+}
+
+static irqreturn_t ls2k_bmc_interrupt(int irq, void *arg)
+{
+ struct ls2k_bmc_ddata *ddata = arg;
+ static unsigned long last_jiffies;
+
+ if (system_state != SYSTEM_RUNNING)
+ return IRQ_HANDLED;
+
+ /* Skip interrupt in LS2K_BMC_INT_INTERVAL */
+ if (time_after(jiffies, last_jiffies + LS2K_BMC_INT_INTERVAL)) {
+ schedule_work(&ddata->bmc_reset_work);
+ last_jiffies = jiffies;
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Saves the BMC parent device (LS7A) and its own PCI configuration space registers
+ * that need to be restored after BMC reset.
+ */
+static void ls2k_bmc_save_pci_data(struct pci_dev *pdev, struct ls2k_bmc_ddata *ddata)
+{
+ struct pci_dev *parent = pdev->bus->self;
+ int base, i = 0;
+
+ pci_read_config_dword(parent, PCI_COMMAND, &ddata->bridge_pci_data.pci_command);
+
+ for (base = PCI_BASE_ADDRESS_0; base <= PCI_BASE_ADDRESS_5; base += PCI_REG_STRIDE, i++)
+ pci_read_config_dword(parent, base, &ddata->bridge_pci_data.base_address[i]);
+
+ pci_read_config_dword(parent, PCI_ROM_ADDRESS, &ddata->bridge_pci_data.rom_addreess);
+ pci_read_config_dword(parent, PCI_INTERRUPT_LINE, &ddata->bridge_pci_data.interrupt_line);
+
+ pci_read_config_dword(parent, parent->msi_cap + PCI_MSI_ADDRESS_LO,
+ &ddata->bridge_pci_data.msi_lo);
+ pci_read_config_dword(parent, parent->msi_cap + PCI_MSI_ADDRESS_HI,
+ &ddata->bridge_pci_data.msi_hi);
+
+ pci_read_config_dword(parent, parent->pcie_cap + PCI_EXP_DEVCTL,
+ &ddata->bridge_pci_data.devctl);
+ pci_read_config_dword(parent, parent->pcie_cap + PCI_EXP_LNKCAP,
+ &ddata->bridge_pci_data.linkcap);
+ pci_read_config_dword(parent, parent->pcie_cap + PCI_EXP_LNKCTL,
+ &ddata->bridge_pci_data.linkctl_sts);
+
+ pci_read_config_dword(parent, LS7A_GEN2_CTL, &ddata->bridge_pci_data.gen2_ctrl);
+ ddata->bridge_pci_data.gen2_ctrl |= FIELD_PREP(LS7A_GEN2_SPEED_CHANG, 0x1) |
+ FIELD_PREP(LS7A_CONF_PHY_TX, 0x0);
+
+ pci_read_config_dword(parent, LS7A_SYMBOL_TIMER, &ddata->bridge_pci_data.symbol_timer);
+ ddata->bridge_pci_data.symbol_timer |= LS7A_MASK_LEN_MATCH;
+
+ pci_read_config_dword(pdev, PCI_COMMAND, &ddata->bmc_pci_data.pci_command);
+ pci_read_config_dword(pdev, PCI_BASE_ADDRESS_0, &ddata->bmc_pci_data.base_address0);
+ pci_read_config_dword(pdev, PCI_INTERRUPT_LINE, &ddata->bmc_pci_data.interrupt_line);
+}
+
+static int ls2k_bmc_init(struct ls2k_bmc_ddata *ddata)
+{
+ struct pci_dev *pdev = to_pci_dev(ddata->dev);
+ void __iomem *gpio_base;
+ int gpio_irq, ret, val;
+
+ ls2k_bmc_save_pci_data(pdev, ddata);
+
+ INIT_WORK(&ddata->bmc_reset_work, ls2k_bmc_events_fn);
+
+ ret = devm_request_irq(&pdev->dev, pdev->irq, ls2k_bmc_interrupt,
+ IRQF_SHARED | IRQF_TRIGGER_FALLING, "ls2kbmc pcie", ddata);
+ if (ret) {
+ dev_err(ddata->dev, "Failed to request LS2KBMC PCI-E IRQ %d.\n", pdev->irq);
+ return ret;
+ }
+
+ gpio_base = ioremap(LOONGSON_GPIO_REG_BASE, LOONGSON_GPIO_REG_SIZE);
+ if (!gpio_base)
+ return -ENOMEM;
+
+ /* Disable GPIO output */
+ val = readl(gpio_base + LOONGSON_GPIO_OEN);
+ writel(val | BIT(LS2K_BMC_RESET_GPIO), gpio_base + LOONGSON_GPIO_OEN);
+
+ /* Enable GPIO functionality */
+ val = readl(gpio_base + LOONGSON_GPIO_FUNC);
+ writel(val & ~BIT(LS2K_BMC_RESET_GPIO), gpio_base + LOONGSON_GPIO_FUNC);
+
+ /* Set GPIO interrupts to low-level active */
+ val = readl(gpio_base + LOONGSON_GPIO_INTPOL);
+ writel(val & ~BIT(LS2K_BMC_RESET_GPIO), gpio_base + LOONGSON_GPIO_INTPOL);
+
+ /* Enable GPIO interrupts */
+ val = readl(gpio_base + LOONGSON_GPIO_INTEN);
+ writel(val | BIT(LS2K_BMC_RESET_GPIO), gpio_base + LOONGSON_GPIO_INTEN);
+
+ iounmap(gpio_base);
+
+ /*
+ * Since gpio_chip->to_irq is not implemented in the Loongson-3 GPIO driver,
+ * acpi_register_gsi() is used to obtain the GPIO IRQ. The GPIO interrupt is a
+ * watchdog interrupt that is triggered when the BMC resets.
+ */
+ gpio_irq = acpi_register_gsi(NULL, LS2K_BMC_RESET_GPIO_GSI, ACPI_EDGE_SENSITIVE,
+ ACPI_ACTIVE_LOW);
+ if (gpio_irq < 0)
+ return gpio_irq;
+
+ ret = devm_request_irq(ddata->dev, gpio_irq, ls2k_bmc_interrupt,
+ IRQF_SHARED | IRQF_TRIGGER_FALLING, "ls2kbmc gpio", ddata);
+ if (ret)
+ dev_err(ddata->dev, "Failed to request LS2KBMC GPIO IRQ %d.\n", gpio_irq);
+
+ acpi_unregister_gsi(LS2K_BMC_RESET_GPIO_GSI);
+ return ret;
+}
+
+/*
+ * Currently the Loongson-2K BMC hardware does not have an I2C interface to adapt to the
+ * resolution. We set the resolution by presetting "video=1280x1024-16@2M" to the BMC memory.
+ */
+static int ls2k_bmc_parse_mode(struct pci_dev *pdev, struct simplefb_platform_data *pd)
+{
+ char *mode;
+ int depth, ret;
+
+ /* The last 16M of PCI BAR0 is used to store the resolution string. */
+ mode = devm_ioremap(&pdev->dev, pci_resource_start(pdev, 0) + SZ_16M, SZ_16M);
+ if (!mode)
+ return -ENOMEM;
+
+ /* The resolution field starts with the flag "video=". */
+ if (!strncmp(mode, "video=", 6))
+ mode = mode + 6;
+
+ ret = kstrtoint(strsep(&mode, "x"), 10, &pd->width);
+ if (ret)
+ return ret;
+
+ ret = kstrtoint(strsep(&mode, "-"), 10, &pd->height);
+ if (ret)
+ return ret;
+
+ ret = kstrtoint(strsep(&mode, "@"), 10, &depth);
+ if (ret)
+ return ret;
+
+ pd->stride = pd->width * depth / 8;
+ pd->format = depth == 32 ? "a8r8g8b8" : "r5g6b5";
+
+ return 0;
+}
+
+static int ls2k_bmc_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ struct simplefb_platform_data pd;
+ struct ls2k_bmc_ddata *ddata;
+ resource_size_t base;
+ int ret;
+
+ ret = pci_enable_device(dev);
+ if (ret)
+ return ret;
+
+ ddata = devm_kzalloc(&dev->dev, sizeof(*ddata), GFP_KERNEL);
+ if (IS_ERR(ddata)) {
+ ret = -ENOMEM;
+ goto disable_pci;
+ }
+
+ ddata->dev = &dev->dev;
+
+ ret = ls2k_bmc_init(ddata);
+ if (ret)
+ goto disable_pci;
+
+ ret = ls2k_bmc_parse_mode(dev, &pd);
+ if (ret)
+ goto disable_pci;
+
+ ls2k_bmc_cells[LS2K_BMC_DISPLAY].platform_data = &pd;
+ ls2k_bmc_cells[LS2K_BMC_DISPLAY].pdata_size = sizeof(pd);
+ base = dev->resource[0].start + LS2K_DISPLAY_RES_START;
+
+ /* Remove conflicting efifb device */
+ ret = aperture_remove_conflicting_devices(base, SZ_4M, "simple-framebuffer");
+ if (ret) {
+ dev_err(&dev->dev, "Failed to removed firmware framebuffers: %d\n", ret);
+ goto disable_pci;
+ }
+
+ return devm_mfd_add_devices(&dev->dev, PLATFORM_DEVID_AUTO,
+ ls2k_bmc_cells, ARRAY_SIZE(ls2k_bmc_cells),
+ &dev->resource[0], 0, NULL);
+
+disable_pci:
+ pci_disable_device(dev);
+ return ret;
+}
+
+static void ls2k_bmc_remove(struct pci_dev *dev)
+{
+ pci_disable_device(dev);
+}
+
+static struct pci_device_id ls2k_bmc_devices[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_LOONGSON, 0x1a05) },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, ls2k_bmc_devices);
+
+static struct pci_driver ls2k_bmc_driver = {
+ .name = "ls2k-bmc",
+ .id_table = ls2k_bmc_devices,
+ .probe = ls2k_bmc_probe,
+ .remove = ls2k_bmc_remove,
+};
+module_pci_driver(ls2k_bmc_driver);
+
+MODULE_DESCRIPTION("Loongson-2K Board Management Controller (BMC) Core driver");
+MODULE_AUTHOR("Loongson Technology Corporation Limited");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/macsmc.c b/drivers/mfd/macsmc.c
index 870c8b2028a8..e6cdae221f1d 100644
--- a/drivers/mfd/macsmc.c
+++ b/drivers/mfd/macsmc.c
@@ -429,7 +429,7 @@ static int apple_smc_probe(struct platform_device *pdev)
ret = devm_add_action_or_reset(dev, apple_smc_rtkit_shutdown, smc);
if (ret)
- return dev_err_probe(dev, ret, "Failed to register rtkit shutdown action");
+ return ret;
ret = apple_rtkit_start_ep(smc->rtk, SMC_ENDPOINT);
if (ret)
@@ -465,7 +465,7 @@ static int apple_smc_probe(struct platform_device *pdev)
apple_smc_write_flag(smc, SMC_KEY(NTAP), true);
ret = devm_add_action_or_reset(dev, apple_smc_disable_notifications, smc);
if (ret)
- return dev_err_probe(dev, ret, "Failed to register notification disable action");
+ return ret;
ret = devm_mfd_add_devices(smc->dev, PLATFORM_DEVID_NONE,
apple_smc_devs, ARRAY_SIZE(apple_smc_devs),
@@ -478,6 +478,7 @@ static int apple_smc_probe(struct platform_device *pdev)
}
static const struct of_device_id apple_smc_of_match[] = {
+ { .compatible = "apple,t8103-smc" },
{ .compatible = "apple,smc" },
{},
};
diff --git a/drivers/mfd/madera-core.c b/drivers/mfd/madera-core.c
index bdbd5bfc9714..2f74a8c644a3 100644
--- a/drivers/mfd/madera-core.c
+++ b/drivers/mfd/madera-core.c
@@ -456,7 +456,7 @@ int madera_dev_init(struct madera *madera)
struct device *dev = madera->dev;
unsigned int hwid;
int (*patch_fn)(struct madera *) = NULL;
- const struct mfd_cell *mfd_devs;
+ const struct mfd_cell *mfd_devs = NULL;
int n_devs = 0;
int i, ret;
@@ -670,7 +670,7 @@ int madera_dev_init(struct madera *madera)
goto err_reset;
}
- if (!n_devs) {
+ if (!n_devs || !mfd_devs) {
dev_err(madera->dev, "Device ID 0x%x not a %s\n", hwid,
madera->type_name);
ret = -ENODEV;
diff --git a/drivers/mfd/max7360.c b/drivers/mfd/max7360.c
new file mode 100644
index 000000000000..5ee459c490ec
--- /dev/null
+++ b/drivers/mfd/max7360.c
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Maxim MAX7360 Core Driver
+ *
+ * Copyright 2025 Bootlin
+ *
+ * Authors:
+ * Kamel Bouhara <kamel.bouhara@bootlin.com>
+ * Mathieu Dubois-Briand <mathieu.dubois-briand@bootlin.com>
+ */
+
+#include <linux/array_size.h>
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/device/devres.h>
+#include <linux/dev_printk.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/max7360.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+
+static const struct mfd_cell max7360_cells[] = {
+ { .name = "max7360-pinctrl" },
+ { .name = "max7360-pwm" },
+ { .name = "max7360-keypad" },
+ { .name = "max7360-rotary" },
+ {
+ .name = "max7360-gpo",
+ .of_compatible = "maxim,max7360-gpo",
+ },
+ {
+ .name = "max7360-gpio",
+ .of_compatible = "maxim,max7360-gpio",
+ },
+};
+
+static const struct regmap_range max7360_volatile_ranges[] = {
+ regmap_reg_range(MAX7360_REG_KEYFIFO, MAX7360_REG_KEYFIFO),
+ regmap_reg_range(MAX7360_REG_I2C_TIMEOUT, MAX7360_REG_RTR_CNT),
+};
+
+static const struct regmap_access_table max7360_volatile_table = {
+ .yes_ranges = max7360_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(max7360_volatile_ranges),
+};
+
+static const struct regmap_config max7360_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = MAX7360_REG_PWMCFG(MAX7360_PORT_PWM_COUNT - 1),
+ .volatile_table = &max7360_volatile_table,
+ .cache_type = REGCACHE_MAPLE,
+};
+
+static int max7360_mask_irqs(struct regmap *regmap)
+{
+ struct device *dev = regmap_get_device(regmap);
+ unsigned int val;
+ int ret;
+
+ /*
+ * GPIO/PWM interrupts are not masked on reset: as the MAX7360 "INTI"
+ * interrupt line is shared between GPIOs and rotary encoder, this could
+ * result in repeated spurious interrupts on the rotary encoder driver
+ * if the GPIO driver is not loaded. Mask them now to avoid this
+ * situation.
+ */
+ for (unsigned int i = 0; i < MAX7360_PORT_PWM_COUNT; i++) {
+ ret = regmap_write_bits(regmap, MAX7360_REG_PWMCFG(i),
+ MAX7360_PORT_CFG_INTERRUPT_MASK,
+ MAX7360_PORT_CFG_INTERRUPT_MASK);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to write MAX7360 port configuration\n");
+ }
+
+ /* Read GPIO in register, to ACK any pending IRQ. */
+ ret = regmap_read(regmap, MAX7360_REG_GPIOIN, &val);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to read GPIO values\n");
+
+ return 0;
+}
+
+static int max7360_reset(struct regmap *regmap)
+{
+ struct device *dev = regmap_get_device(regmap);
+ int ret;
+
+ ret = regmap_write(regmap, MAX7360_REG_GPIOCFG, MAX7360_GPIO_CFG_GPIO_RST);
+ if (ret) {
+ dev_err(dev, "Failed to reset GPIO configuration: %x\n", ret);
+ return ret;
+ }
+
+ ret = regcache_drop_region(regmap, MAX7360_REG_GPIOCFG, MAX7360_REG_GPIO_LAST);
+ if (ret) {
+ dev_err(dev, "Failed to drop regmap cache: %x\n", ret);
+ return ret;
+ }
+
+ ret = regmap_write(regmap, MAX7360_REG_SLEEP, 0);
+ if (ret) {
+ dev_err(dev, "Failed to reset autosleep configuration: %x\n", ret);
+ return ret;
+ }
+
+ ret = regmap_write(regmap, MAX7360_REG_DEBOUNCE, 0);
+ if (ret)
+ dev_err(dev, "Failed to reset GPO port count: %x\n", ret);
+
+ return ret;
+}
+
+static int max7360_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct regmap *regmap;
+ int ret;
+
+ regmap = devm_regmap_init_i2c(client, &max7360_regmap_config);
+ if (IS_ERR(regmap))
+ return dev_err_probe(dev, PTR_ERR(regmap), "Failed to initialise regmap\n");
+
+ ret = max7360_reset(regmap);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to reset device\n");
+
+ /* Get the device out of shutdown mode. */
+ ret = regmap_write_bits(regmap, MAX7360_REG_GPIOCFG,
+ MAX7360_GPIO_CFG_GPIO_EN,
+ MAX7360_GPIO_CFG_GPIO_EN);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to enable GPIO and PWM module\n");
+
+ ret = max7360_mask_irqs(regmap);
+ if (ret)
+ return dev_err_probe(dev, ret, "Could not mask interrupts\n");
+
+ ret = devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE,
+ max7360_cells, ARRAY_SIZE(max7360_cells),
+ NULL, 0, NULL);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to register child devices\n");
+
+ return 0;
+}
+
+static const struct of_device_id max7360_dt_match[] = {
+ { .compatible = "maxim,max7360" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, max7360_dt_match);
+
+static struct i2c_driver max7360_driver = {
+ .driver = {
+ .name = "max7360",
+ .of_match_table = max7360_dt_match,
+ },
+ .probe = max7360_probe,
+};
+module_i2c_driver(max7360_driver);
+
+MODULE_DESCRIPTION("Maxim MAX7360 I2C IO Expander core driver");
+MODULE_AUTHOR("Kamel Bouhara <kamel.bouhara@bootlin.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/max77705.c b/drivers/mfd/max77705.c
index 6b263bacb8c2..e1a9bfd65856 100644
--- a/drivers/mfd/max77705.c
+++ b/drivers/mfd/max77705.c
@@ -61,21 +61,21 @@ static const struct regmap_config max77705_regmap_config = {
.max_register = MAX77705_PMIC_REG_USBC_RESET,
};
-static const struct regmap_irq max77705_topsys_irqs[] = {
- { .mask = MAX77705_SYSTEM_IRQ_BSTEN_INT, },
- { .mask = MAX77705_SYSTEM_IRQ_SYSUVLO_INT, },
- { .mask = MAX77705_SYSTEM_IRQ_SYSOVLO_INT, },
- { .mask = MAX77705_SYSTEM_IRQ_TSHDN_INT, },
- { .mask = MAX77705_SYSTEM_IRQ_TM_INT, },
+static const struct regmap_irq max77705_irqs[] = {
+ { .mask = MAX77705_SRC_IRQ_CHG, },
+ { .mask = MAX77705_SRC_IRQ_TOP, },
+ { .mask = MAX77705_SRC_IRQ_FG, },
+ { .mask = MAX77705_SRC_IRQ_USBC, },
};
-static const struct regmap_irq_chip max77705_topsys_irq_chip = {
- .name = "max77705-topsys",
- .status_base = MAX77705_PMIC_REG_SYSTEM_INT,
- .mask_base = MAX77705_PMIC_REG_SYSTEM_INT_MASK,
+static const struct regmap_irq_chip max77705_irq_chip = {
+ .name = "max77705",
+ .status_base = MAX77705_PMIC_REG_INTSRC,
+ .ack_base = MAX77705_PMIC_REG_INTSRC,
+ .mask_base = MAX77705_PMIC_REG_INTSRC_MASK,
.num_regs = 1,
- .irqs = max77705_topsys_irqs,
- .num_irqs = ARRAY_SIZE(max77705_topsys_irqs),
+ .irqs = max77705_irqs,
+ .num_irqs = ARRAY_SIZE(max77705_irqs),
};
static int max77705_i2c_probe(struct i2c_client *i2c)
@@ -108,21 +108,17 @@ static int max77705_i2c_probe(struct i2c_client *i2c)
if (pmic_rev != MAX77705_PASS3)
return dev_err_probe(dev, -ENODEV, "Rev.0x%x is not tested\n", pmic_rev);
+ /* Active Discharge Enable */
+ regmap_update_bits(max77705->regmap, MAX77705_PMIC_REG_MAINCTRL1, 1, 1);
+
ret = devm_regmap_add_irq_chip(dev, max77705->regmap,
i2c->irq,
- IRQF_ONESHOT | IRQF_SHARED, 0,
- &max77705_topsys_irq_chip,
+ IRQF_ONESHOT, 0,
+ &max77705_irq_chip,
&irq_data);
if (ret)
return dev_err_probe(dev, ret, "Failed to add IRQ chip\n");
- /* Unmask interrupts from all blocks in interrupt source register */
- ret = regmap_update_bits(max77705->regmap,
- MAX77705_PMIC_REG_INTSRC_MASK,
- MAX77705_SRC_IRQ_ALL, (unsigned int)~MAX77705_SRC_IRQ_ALL);
- if (ret < 0)
- return dev_err_probe(dev, ret, "Could not unmask interrupts in INTSRC\n");
-
domain = regmap_irq_get_domain(irq_data);
ret = devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE,
diff --git a/drivers/mfd/max8997.c b/drivers/mfd/max8997.c
index ffe96b40368e..7ba8ed1dfde3 100644
--- a/drivers/mfd/max8997.c
+++ b/drivers/mfd/max8997.c
@@ -438,7 +438,7 @@ static int max8997_suspend(struct device *dev)
disable_irq(max8997->irq);
if (device_may_wakeup(dev))
- irq_set_irq_wake(max8997->irq, 1);
+ enable_irq_wake(max8997->irq);
return 0;
}
@@ -448,7 +448,7 @@ static int max8997_resume(struct device *dev)
struct max8997_dev *max8997 = i2c_get_clientdata(i2c);
if (device_may_wakeup(dev))
- irq_set_irq_wake(max8997->irq, 0);
+ disable_irq_wake(max8997->irq);
enable_irq(max8997->irq);
return max8997_irq_resume(max8997);
}
diff --git a/drivers/mfd/max8998.c b/drivers/mfd/max8998.c
index 6ba27171da28..eb13bbaeda55 100644
--- a/drivers/mfd/max8998.c
+++ b/drivers/mfd/max8998.c
@@ -234,7 +234,7 @@ static int max8998_suspend(struct device *dev)
struct max8998_dev *max8998 = i2c_get_clientdata(i2c);
if (device_may_wakeup(dev))
- irq_set_irq_wake(max8998->irq, 1);
+ enable_irq_wake(max8998->irq);
return 0;
}
@@ -244,7 +244,7 @@ static int max8998_resume(struct device *dev)
struct max8998_dev *max8998 = i2c_get_clientdata(i2c);
if (device_may_wakeup(dev))
- irq_set_irq_wake(max8998->irq, 0);
+ disable_irq_wake(max8998->irq);
/*
* In LP3974, if IRQ registers are not "read & clear"
* when it's set during sleep, the interrupt becomes
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
index 76bd316a50af..7d14a1e7631e 100644
--- a/drivers/mfd/mfd-core.c
+++ b/drivers/mfd/mfd-core.c
@@ -131,6 +131,7 @@ allocate_of_node:
of_entry->np = np;
list_add_tail(&of_entry->list, &mfd_of_node_list);
+ of_node_get(np);
device_set_node(&pdev->dev, of_fwnode_handle(np));
#endif
return 0;
diff --git a/drivers/mfd/nct6694.c b/drivers/mfd/nct6694.c
new file mode 100644
index 000000000000..308b2fda3055
--- /dev/null
+++ b/drivers/mfd/nct6694.c
@@ -0,0 +1,388 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025 Nuvoton Technology Corp.
+ *
+ * Nuvoton NCT6694 core driver using USB interface to provide
+ * access to the NCT6694 hardware monitoring and control features.
+ *
+ * The NCT6694 is an integrated controller that provides GPIO, I2C,
+ * CAN, WDT, HWMON and RTC management.
+ */
+
+#include <linux/bits.h>
+#include <linux/interrupt.h>
+#include <linux/idr.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/nct6694.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/usb.h>
+
+static const struct mfd_cell nct6694_devs[] = {
+ MFD_CELL_NAME("nct6694-gpio"),
+ MFD_CELL_NAME("nct6694-gpio"),
+ MFD_CELL_NAME("nct6694-gpio"),
+ MFD_CELL_NAME("nct6694-gpio"),
+ MFD_CELL_NAME("nct6694-gpio"),
+ MFD_CELL_NAME("nct6694-gpio"),
+ MFD_CELL_NAME("nct6694-gpio"),
+ MFD_CELL_NAME("nct6694-gpio"),
+ MFD_CELL_NAME("nct6694-gpio"),
+ MFD_CELL_NAME("nct6694-gpio"),
+ MFD_CELL_NAME("nct6694-gpio"),
+ MFD_CELL_NAME("nct6694-gpio"),
+ MFD_CELL_NAME("nct6694-gpio"),
+ MFD_CELL_NAME("nct6694-gpio"),
+ MFD_CELL_NAME("nct6694-gpio"),
+ MFD_CELL_NAME("nct6694-gpio"),
+
+ MFD_CELL_NAME("nct6694-i2c"),
+ MFD_CELL_NAME("nct6694-i2c"),
+ MFD_CELL_NAME("nct6694-i2c"),
+ MFD_CELL_NAME("nct6694-i2c"),
+ MFD_CELL_NAME("nct6694-i2c"),
+ MFD_CELL_NAME("nct6694-i2c"),
+
+ MFD_CELL_NAME("nct6694-canfd"),
+ MFD_CELL_NAME("nct6694-canfd"),
+
+ MFD_CELL_NAME("nct6694-wdt"),
+ MFD_CELL_NAME("nct6694-wdt"),
+
+ MFD_CELL_NAME("nct6694-hwmon"),
+
+ MFD_CELL_NAME("nct6694-rtc"),
+};
+
+static int nct6694_response_err_handling(struct nct6694 *nct6694, unsigned char err_status)
+{
+ switch (err_status) {
+ case NCT6694_NO_ERROR:
+ return 0;
+ case NCT6694_NOT_SUPPORT_ERROR:
+ dev_err(nct6694->dev, "Command is not supported!\n");
+ break;
+ case NCT6694_NO_RESPONSE_ERROR:
+ dev_warn(nct6694->dev, "Command received no response!\n");
+ break;
+ case NCT6694_TIMEOUT_ERROR:
+ dev_warn(nct6694->dev, "Command timed out!\n");
+ break;
+ case NCT6694_PENDING:
+ dev_err(nct6694->dev, "Command is pending!\n");
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return -EIO;
+}
+
+/**
+ * nct6694_read_msg() - Read message from NCT6694 device
+ * @nct6694: NCT6694 device pointer
+ * @cmd_hd: command header structure
+ * @buf: buffer to store the response data
+ *
+ * Sends a command to the NCT6694 device and reads the response.
+ * The command header is specified in @cmd_hd, and the response
+ * data is stored in @buf.
+ *
+ * Return: Negative value on error or 0 on success.
+ */
+int nct6694_read_msg(struct nct6694 *nct6694, const struct nct6694_cmd_header *cmd_hd, void *buf)
+{
+ union nct6694_usb_msg *msg = nct6694->usb_msg;
+ struct usb_device *udev = nct6694->udev;
+ int tx_len, rx_len, ret;
+
+ guard(mutex)(&nct6694->access_lock);
+
+ memcpy(&msg->cmd_header, cmd_hd, sizeof(*cmd_hd));
+ msg->cmd_header.hctrl = NCT6694_HCTRL_GET;
+
+ /* Send command packet to USB device */
+ ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, NCT6694_BULK_OUT_EP), &msg->cmd_header,
+ sizeof(*msg), &tx_len, NCT6694_URB_TIMEOUT);
+ if (ret)
+ return ret;
+
+ /* Receive response packet from USB device */
+ ret = usb_bulk_msg(udev, usb_rcvbulkpipe(udev, NCT6694_BULK_IN_EP), &msg->response_header,
+ sizeof(*msg), &rx_len, NCT6694_URB_TIMEOUT);
+ if (ret)
+ return ret;
+
+ /* Receive data packet from USB device */
+ ret = usb_bulk_msg(udev, usb_rcvbulkpipe(udev, NCT6694_BULK_IN_EP), buf,
+ le16_to_cpu(cmd_hd->len), &rx_len, NCT6694_URB_TIMEOUT);
+ if (ret)
+ return ret;
+
+ if (rx_len != le16_to_cpu(cmd_hd->len)) {
+ dev_err(nct6694->dev, "Expected received length %d, but got %d\n",
+ le16_to_cpu(cmd_hd->len), rx_len);
+ return -EIO;
+ }
+
+ return nct6694_response_err_handling(nct6694, msg->response_header.sts);
+}
+EXPORT_SYMBOL_GPL(nct6694_read_msg);
+
+/**
+ * nct6694_write_msg() - Write message to NCT6694 device
+ * @nct6694: NCT6694 device pointer
+ * @cmd_hd: command header structure
+ * @buf: buffer containing the data to be sent
+ *
+ * Sends a command to the NCT6694 device and writes the data
+ * from @buf. The command header is specified in @cmd_hd.
+ *
+ * Return: Negative value on error or 0 on success.
+ */
+int nct6694_write_msg(struct nct6694 *nct6694, const struct nct6694_cmd_header *cmd_hd, void *buf)
+{
+ union nct6694_usb_msg *msg = nct6694->usb_msg;
+ struct usb_device *udev = nct6694->udev;
+ int tx_len, rx_len, ret;
+
+ guard(mutex)(&nct6694->access_lock);
+
+ memcpy(&msg->cmd_header, cmd_hd, sizeof(*cmd_hd));
+ msg->cmd_header.hctrl = NCT6694_HCTRL_SET;
+
+ /* Send command packet to USB device */
+ ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, NCT6694_BULK_OUT_EP), &msg->cmd_header,
+ sizeof(*msg), &tx_len, NCT6694_URB_TIMEOUT);
+ if (ret)
+ return ret;
+
+ /* Send data packet to USB device */
+ ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, NCT6694_BULK_OUT_EP), buf,
+ le16_to_cpu(cmd_hd->len), &tx_len, NCT6694_URB_TIMEOUT);
+ if (ret)
+ return ret;
+
+ /* Receive response packet from USB device */
+ ret = usb_bulk_msg(udev, usb_rcvbulkpipe(udev, NCT6694_BULK_IN_EP), &msg->response_header,
+ sizeof(*msg), &rx_len, NCT6694_URB_TIMEOUT);
+ if (ret)
+ return ret;
+
+ /* Receive data packet from USB device */
+ ret = usb_bulk_msg(udev, usb_rcvbulkpipe(udev, NCT6694_BULK_IN_EP), buf,
+ le16_to_cpu(cmd_hd->len), &rx_len, NCT6694_URB_TIMEOUT);
+ if (ret)
+ return ret;
+
+ if (rx_len != le16_to_cpu(cmd_hd->len)) {
+ dev_err(nct6694->dev, "Expected transmitted length %d, but got %d\n",
+ le16_to_cpu(cmd_hd->len), rx_len);
+ return -EIO;
+ }
+
+ return nct6694_response_err_handling(nct6694, msg->response_header.sts);
+}
+EXPORT_SYMBOL_GPL(nct6694_write_msg);
+
+static void usb_int_callback(struct urb *urb)
+{
+ struct nct6694 *nct6694 = urb->context;
+ __le32 *status_le = urb->transfer_buffer;
+ u32 int_status;
+ int ret;
+
+ switch (urb->status) {
+ case 0:
+ break;
+ case -ECONNRESET:
+ case -ENOENT:
+ case -ESHUTDOWN:
+ return;
+ default:
+ goto resubmit;
+ }
+
+ int_status = le32_to_cpu(*status_le);
+
+ while (int_status) {
+ int irq = __ffs(int_status);
+
+ generic_handle_irq_safe(irq_find_mapping(nct6694->domain, irq));
+ int_status &= ~BIT(irq);
+ }
+
+resubmit:
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret)
+ dev_warn(nct6694->dev, "Failed to resubmit urb, status %pe", ERR_PTR(ret));
+}
+
+static void nct6694_irq_enable(struct irq_data *data)
+{
+ struct nct6694 *nct6694 = irq_data_get_irq_chip_data(data);
+ irq_hw_number_t hwirq = irqd_to_hwirq(data);
+
+ guard(spinlock_irqsave)(&nct6694->irq_lock);
+
+ nct6694->irq_enable |= BIT(hwirq);
+}
+
+static void nct6694_irq_disable(struct irq_data *data)
+{
+ struct nct6694 *nct6694 = irq_data_get_irq_chip_data(data);
+ irq_hw_number_t hwirq = irqd_to_hwirq(data);
+
+ guard(spinlock_irqsave)(&nct6694->irq_lock);
+
+ nct6694->irq_enable &= ~BIT(hwirq);
+}
+
+static const struct irq_chip nct6694_irq_chip = {
+ .name = "nct6694-irq",
+ .flags = IRQCHIP_SKIP_SET_WAKE,
+ .irq_enable = nct6694_irq_enable,
+ .irq_disable = nct6694_irq_disable,
+};
+
+static int nct6694_irq_domain_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
+{
+ struct nct6694 *nct6694 = d->host_data;
+
+ irq_set_chip_data(irq, nct6694);
+ irq_set_chip_and_handler(irq, &nct6694_irq_chip, handle_simple_irq);
+
+ return 0;
+}
+
+static void nct6694_irq_domain_unmap(struct irq_domain *d, unsigned int irq)
+{
+ irq_set_chip_and_handler(irq, NULL, NULL);
+ irq_set_chip_data(irq, NULL);
+}
+
+static const struct irq_domain_ops nct6694_irq_domain_ops = {
+ .map = nct6694_irq_domain_map,
+ .unmap = nct6694_irq_domain_unmap,
+};
+
+static int nct6694_usb_probe(struct usb_interface *iface,
+ const struct usb_device_id *id)
+{
+ struct usb_device *udev = interface_to_usbdev(iface);
+ struct usb_endpoint_descriptor *int_endpoint;
+ struct usb_host_interface *interface;
+ struct device *dev = &iface->dev;
+ struct nct6694 *nct6694;
+ int ret;
+
+ nct6694 = devm_kzalloc(dev, sizeof(*nct6694), GFP_KERNEL);
+ if (!nct6694)
+ return -ENOMEM;
+
+ nct6694->usb_msg = devm_kzalloc(dev, sizeof(union nct6694_usb_msg), GFP_KERNEL);
+ if (!nct6694->usb_msg)
+ return -ENOMEM;
+
+ nct6694->int_buffer = devm_kzalloc(dev, sizeof(*nct6694->int_buffer), GFP_KERNEL);
+ if (!nct6694->int_buffer)
+ return -ENOMEM;
+
+ nct6694->int_in_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!nct6694->int_in_urb)
+ return -ENOMEM;
+
+ nct6694->domain = irq_domain_create_simple(NULL, NCT6694_NR_IRQS, 0,
+ &nct6694_irq_domain_ops,
+ nct6694);
+ if (!nct6694->domain) {
+ ret = -ENODEV;
+ goto err_urb;
+ }
+
+ nct6694->dev = dev;
+ nct6694->udev = udev;
+
+ ida_init(&nct6694->gpio_ida);
+ ida_init(&nct6694->i2c_ida);
+ ida_init(&nct6694->canfd_ida);
+ ida_init(&nct6694->wdt_ida);
+
+ spin_lock_init(&nct6694->irq_lock);
+
+ ret = devm_mutex_init(dev, &nct6694->access_lock);
+ if (ret)
+ goto err_ida;
+
+ interface = iface->cur_altsetting;
+
+ int_endpoint = &interface->endpoint[0].desc;
+ if (!usb_endpoint_is_int_in(int_endpoint)) {
+ ret = -ENODEV;
+ goto err_ida;
+ }
+
+ usb_fill_int_urb(nct6694->int_in_urb, udev, usb_rcvintpipe(udev, NCT6694_INT_IN_EP),
+ nct6694->int_buffer, sizeof(*nct6694->int_buffer), usb_int_callback,
+ nct6694, int_endpoint->bInterval);
+
+ ret = usb_submit_urb(nct6694->int_in_urb, GFP_KERNEL);
+ if (ret)
+ goto err_ida;
+
+ usb_set_intfdata(iface, nct6694);
+
+ ret = mfd_add_hotplug_devices(dev, nct6694_devs, ARRAY_SIZE(nct6694_devs));
+ if (ret)
+ goto err_mfd;
+
+ return 0;
+
+err_mfd:
+ usb_kill_urb(nct6694->int_in_urb);
+err_ida:
+ ida_destroy(&nct6694->wdt_ida);
+ ida_destroy(&nct6694->canfd_ida);
+ ida_destroy(&nct6694->i2c_ida);
+ ida_destroy(&nct6694->gpio_ida);
+ irq_domain_remove(nct6694->domain);
+err_urb:
+ usb_free_urb(nct6694->int_in_urb);
+ return ret;
+}
+
+static void nct6694_usb_disconnect(struct usb_interface *iface)
+{
+ struct nct6694 *nct6694 = usb_get_intfdata(iface);
+
+ mfd_remove_devices(nct6694->dev);
+ usb_kill_urb(nct6694->int_in_urb);
+ ida_destroy(&nct6694->wdt_ida);
+ ida_destroy(&nct6694->canfd_ida);
+ ida_destroy(&nct6694->i2c_ida);
+ ida_destroy(&nct6694->gpio_ida);
+ irq_domain_remove(nct6694->domain);
+ usb_free_urb(nct6694->int_in_urb);
+}
+
+static const struct usb_device_id nct6694_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(NCT6694_VENDOR_ID, NCT6694_PRODUCT_ID, 0xFF, 0x00, 0x00) },
+ { }
+};
+MODULE_DEVICE_TABLE(usb, nct6694_ids);
+
+static struct usb_driver nct6694_usb_driver = {
+ .name = "nct6694",
+ .id_table = nct6694_ids,
+ .probe = nct6694_usb_probe,
+ .disconnect = nct6694_usb_disconnect,
+};
+module_usb_driver(nct6694_usb_driver);
+
+MODULE_DESCRIPTION("Nuvoton NCT6694 core driver");
+MODULE_AUTHOR("Ming Yu <tmyu0@nuvoton.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/qnap-mcu.c b/drivers/mfd/qnap-mcu.c
index 89a8a1913d42..4ec1f4cf902f 100644
--- a/drivers/mfd/qnap-mcu.c
+++ b/drivers/mfd/qnap-mcu.c
@@ -150,40 +150,40 @@ int qnap_mcu_exec(struct qnap_mcu *mcu,
size_t length = reply_data_size + QNAP_MCU_CHECKSUM_SIZE;
struct qnap_mcu_reply *reply = &mcu->reply;
int ret = 0;
+ u8 crc;
if (length > sizeof(rx)) {
dev_err(&mcu->serdev->dev, "expected data too big for receive buffer");
return -EINVAL;
}
- mutex_lock(&mcu->bus_lock);
+ guard(mutex)(&mcu->bus_lock);
reply->data = rx;
reply->length = length;
reply->received = 0;
reinit_completion(&reply->done);
- qnap_mcu_write(mcu, cmd_data, cmd_data_size);
+ ret = qnap_mcu_write(mcu, cmd_data, cmd_data_size);
+ if (ret < 0)
+ return ret;
serdev_device_wait_until_sent(mcu->serdev, msecs_to_jiffies(QNAP_MCU_TIMEOUT_MS));
if (!wait_for_completion_timeout(&reply->done, msecs_to_jiffies(QNAP_MCU_TIMEOUT_MS))) {
dev_err(&mcu->serdev->dev, "Command timeout\n");
- ret = -ETIMEDOUT;
- } else {
- u8 crc = qnap_mcu_csum(rx, reply_data_size);
-
- if (crc != rx[reply_data_size]) {
- dev_err(&mcu->serdev->dev,
- "Invalid Checksum received\n");
- ret = -EIO;
- } else {
- memcpy(reply_data, rx, reply_data_size);
- }
+ return -ETIMEDOUT;
}
- mutex_unlock(&mcu->bus_lock);
- return ret;
+ crc = qnap_mcu_csum(rx, reply_data_size);
+ if (crc != rx[reply_data_size]) {
+ dev_err(&mcu->serdev->dev, "Invalid Checksum received\n");
+ return -EIO;
+ }
+
+ memcpy(reply_data, rx, reply_data_size);
+
+ return 0;
}
EXPORT_SYMBOL_GPL(qnap_mcu_exec);
@@ -247,6 +247,14 @@ static int qnap_mcu_power_off(struct sys_off_data *data)
return NOTIFY_DONE;
}
+static const struct qnap_mcu_variant qnap_ts233_mcu = {
+ .baud_rate = 115200,
+ .num_drives = 2,
+ .fan_pwm_min = 51, /* Specified in original model.conf */
+ .fan_pwm_max = 255,
+ .usb_led = true,
+};
+
static const struct qnap_mcu_variant qnap_ts433_mcu = {
.baud_rate = 115200,
.num_drives = 4,
@@ -319,6 +327,7 @@ static int qnap_mcu_probe(struct serdev_device *serdev)
}
static const struct of_device_id qnap_mcu_dt_ids[] = {
+ { .compatible = "qnap,ts233-mcu", .data = &qnap_ts233_mcu },
{ .compatible = "qnap,ts433-mcu", .data = &qnap_ts433_mcu },
{ /* sentinel */ }
};
diff --git a/drivers/mfd/rohm-bd71828.c b/drivers/mfd/rohm-bd71828.c
index a14b7aa69c3c..84a64c3b9c9f 100644
--- a/drivers/mfd/rohm-bd71828.c
+++ b/drivers/mfd/rohm-bd71828.c
@@ -45,8 +45,8 @@ static const struct resource bd71828_rtc_irqs[] = {
static const struct resource bd71815_power_irqs[] = {
DEFINE_RES_IRQ_NAMED(BD71815_INT_DCIN_RMV, "bd71815-dcin-rmv"),
- DEFINE_RES_IRQ_NAMED(BD71815_INT_CLPS_OUT, "bd71815-clps-out"),
- DEFINE_RES_IRQ_NAMED(BD71815_INT_CLPS_IN, "bd71815-clps-in"),
+ DEFINE_RES_IRQ_NAMED(BD71815_INT_CLPS_OUT, "bd71815-dcin-clps-out"),
+ DEFINE_RES_IRQ_NAMED(BD71815_INT_CLPS_IN, "bd71815-dcin-clps-in"),
DEFINE_RES_IRQ_NAMED(BD71815_INT_DCIN_OVP_RES, "bd71815-dcin-ovp-res"),
DEFINE_RES_IRQ_NAMED(BD71815_INT_DCIN_OVP_DET, "bd71815-dcin-ovp-det"),
DEFINE_RES_IRQ_NAMED(BD71815_INT_DCIN_MON_RES, "bd71815-dcin-mon-res"),
@@ -56,7 +56,7 @@ static const struct resource bd71815_power_irqs[] = {
DEFINE_RES_IRQ_NAMED(BD71815_INT_VSYS_LOW_RES, "bd71815-vsys-low-res"),
DEFINE_RES_IRQ_NAMED(BD71815_INT_VSYS_LOW_DET, "bd71815-vsys-low-det"),
DEFINE_RES_IRQ_NAMED(BD71815_INT_VSYS_MON_RES, "bd71815-vsys-mon-res"),
- DEFINE_RES_IRQ_NAMED(BD71815_INT_VSYS_MON_RES, "bd71815-vsys-mon-det"),
+ DEFINE_RES_IRQ_NAMED(BD71815_INT_VSYS_MON_DET, "bd71815-vsys-mon-det"),
DEFINE_RES_IRQ_NAMED(BD71815_INT_CHG_WDG_TEMP, "bd71815-chg-wdg-temp"),
DEFINE_RES_IRQ_NAMED(BD71815_INT_CHG_WDG_TIME, "bd71815-chg-wdg"),
DEFINE_RES_IRQ_NAMED(BD71815_INT_CHG_RECHARGE_RES, "bd71815-rechg-res"),
@@ -87,10 +87,10 @@ static const struct resource bd71815_power_irqs[] = {
DEFINE_RES_IRQ_NAMED(BD71815_INT_BAT_OVER_CURR_2_DET, "bd71815-bat-oc2-det"),
DEFINE_RES_IRQ_NAMED(BD71815_INT_BAT_OVER_CURR_3_RES, "bd71815-bat-oc3-res"),
DEFINE_RES_IRQ_NAMED(BD71815_INT_BAT_OVER_CURR_3_DET, "bd71815-bat-oc3-det"),
- DEFINE_RES_IRQ_NAMED(BD71815_INT_TEMP_BAT_LOW_RES, "bd71815-bat-low-res"),
- DEFINE_RES_IRQ_NAMED(BD71815_INT_TEMP_BAT_LOW_DET, "bd71815-bat-low-det"),
- DEFINE_RES_IRQ_NAMED(BD71815_INT_TEMP_BAT_HI_RES, "bd71815-bat-hi-res"),
- DEFINE_RES_IRQ_NAMED(BD71815_INT_TEMP_BAT_HI_DET, "bd71815-bat-hi-det"),
+ DEFINE_RES_IRQ_NAMED(BD71815_INT_TEMP_BAT_LOW_RES, "bd71815-temp-bat-low-res"),
+ DEFINE_RES_IRQ_NAMED(BD71815_INT_TEMP_BAT_LOW_DET, "bd71815-temp-bat-low-det"),
+ DEFINE_RES_IRQ_NAMED(BD71815_INT_TEMP_BAT_HI_RES, "bd71815-temp-bat-hi-res"),
+ DEFINE_RES_IRQ_NAMED(BD71815_INT_TEMP_BAT_HI_DET, "bd71815-temp-bat-hi-det"),
};
static const struct mfd_cell bd71815_mfd_cells[] = {
@@ -109,7 +109,30 @@ static const struct mfd_cell bd71815_mfd_cells[] = {
},
};
-static const struct mfd_cell bd71828_mfd_cells[] = {
+static const struct resource bd71828_power_irqs[] = {
+ DEFINE_RES_IRQ_NAMED(BD71828_INT_CHG_TOPOFF_TO_DONE,
+ "bd71828-chg-done"),
+ DEFINE_RES_IRQ_NAMED(BD71828_INT_DCIN_DET, "bd71828-pwr-dcin-in"),
+ DEFINE_RES_IRQ_NAMED(BD71828_INT_DCIN_RMV, "bd71828-pwr-dcin-out"),
+ DEFINE_RES_IRQ_NAMED(BD71828_INT_BAT_LOW_VOLT_RES,
+ "bd71828-vbat-normal"),
+ DEFINE_RES_IRQ_NAMED(BD71828_INT_BAT_LOW_VOLT_DET, "bd71828-vbat-low"),
+ DEFINE_RES_IRQ_NAMED(BD71828_INT_TEMP_BAT_HI_DET, "bd71828-btemp-hi"),
+ DEFINE_RES_IRQ_NAMED(BD71828_INT_TEMP_BAT_HI_RES, "bd71828-btemp-cool"),
+ DEFINE_RES_IRQ_NAMED(BD71828_INT_TEMP_BAT_LOW_DET, "bd71828-btemp-lo"),
+ DEFINE_RES_IRQ_NAMED(BD71828_INT_TEMP_BAT_LOW_RES,
+ "bd71828-btemp-warm"),
+ DEFINE_RES_IRQ_NAMED(BD71828_INT_TEMP_CHIP_OVER_VF_DET,
+ "bd71828-temp-hi"),
+ DEFINE_RES_IRQ_NAMED(BD71828_INT_TEMP_CHIP_OVER_VF_RES,
+ "bd71828-temp-norm"),
+ DEFINE_RES_IRQ_NAMED(BD71828_INT_TEMP_CHIP_OVER_125_DET,
+ "bd71828-temp-125-over"),
+ DEFINE_RES_IRQ_NAMED(BD71828_INT_TEMP_CHIP_OVER_125_RES,
+ "bd71828-temp-125-under"),
+};
+
+static struct mfd_cell bd71828_mfd_cells[] = {
{ .name = "bd71828-pmic", },
{ .name = "bd71828-gpio", },
{ .name = "bd71828-led", .of_compatible = "rohm,bd71828-leds" },
@@ -118,8 +141,11 @@ static const struct mfd_cell bd71828_mfd_cells[] = {
* BD70528 clock gate are the register address and mask.
*/
{ .name = "bd71828-clk", },
- { .name = "bd71827-power", },
{
+ .name = "bd71828-power",
+ .resources = bd71828_power_irqs,
+ .num_resources = ARRAY_SIZE(bd71828_power_irqs),
+ }, {
.name = "bd71828-rtc",
.resources = bd71828_rtc_irqs,
.num_resources = ARRAY_SIZE(bd71828_rtc_irqs),
diff --git a/drivers/mfd/rz-mtu3.c b/drivers/mfd/rz-mtu3.c
index f3dac4a29a83..9cdfef610398 100644
--- a/drivers/mfd/rz-mtu3.c
+++ b/drivers/mfd/rz-mtu3.c
@@ -32,7 +32,7 @@ static const unsigned long rz_mtu3_8bit_ch_reg_offs[][13] = {
[RZ_MTU3_CHAN_2] = MTU_8BIT_CH_1_2(0x204, 0x092, 0x205, 0x200, 0x20c, 0x201, 0x202),
[RZ_MTU3_CHAN_3] = MTU_8BIT_CH_3_4_6_7(0x008, 0x093, 0x02c, 0x000, 0x04c, 0x002, 0x004, 0x005, 0x038),
[RZ_MTU3_CHAN_4] = MTU_8BIT_CH_3_4_6_7(0x009, 0x094, 0x02d, 0x001, 0x04d, 0x003, 0x006, 0x007, 0x039),
- [RZ_MTU3_CHAN_5] = MTU_8BIT_CH_5(0xab2, 0x1eb, 0xab4, 0xab6, 0xa84, 0xa85, 0xa86, 0xa94, 0xa95, 0xa96, 0xaa4, 0xaa5, 0xaa6),
+ [RZ_MTU3_CHAN_5] = MTU_8BIT_CH_5(0xab2, 0x895, 0xab4, 0xab6, 0xa84, 0xa85, 0xa86, 0xa94, 0xa95, 0xa96, 0xaa4, 0xaa5, 0xaa6),
[RZ_MTU3_CHAN_6] = MTU_8BIT_CH_3_4_6_7(0x808, 0x893, 0x82c, 0x800, 0x84c, 0x802, 0x804, 0x805, 0x838),
[RZ_MTU3_CHAN_7] = MTU_8BIT_CH_3_4_6_7(0x809, 0x894, 0x82d, 0x801, 0x84d, 0x803, 0x806, 0x807, 0x839),
[RZ_MTU3_CHAN_8] = MTU_8BIT_CH_8(0x404, 0x098, 0x400, 0x406, 0x401, 0x402, 0x403)
diff --git a/drivers/mfd/simple-mfd-i2c.c b/drivers/mfd/simple-mfd-i2c.c
index 22159913bea0..0a607a1e3ca1 100644
--- a/drivers/mfd/simple-mfd-i2c.c
+++ b/drivers/mfd/simple-mfd-i2c.c
@@ -93,12 +93,32 @@ static const struct simple_mfd_data maxim_mon_max77705 = {
.mfd_cell_size = ARRAY_SIZE(max77705_sensor_cells),
};
+static const struct regmap_config spacemit_p1_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static const struct mfd_cell spacemit_p1_cells[] = {
+ { .name = "spacemit-p1-regulator", },
+ { .name = "spacemit-p1-rtc", },
+};
+
+static const struct simple_mfd_data spacemit_p1 = {
+ .regmap_config = &spacemit_p1_regmap_config,
+ .mfd_cell = spacemit_p1_cells,
+ .mfd_cell_size = ARRAY_SIZE(spacemit_p1_cells),
+};
+
static const struct of_device_id simple_mfd_i2c_of_match[] = {
+ { .compatible = "fsl,ls1028aqds-fpga" },
+ { .compatible = "fsl,lx2160aqds-fpga" },
+ { .compatible = "fsl,lx2160ardb-fpga" },
{ .compatible = "kontron,sl28cpld" },
- { .compatible = "silergy,sy7636a", .data = &silergy_sy7636a},
{ .compatible = "maxim,max5970", .data = &maxim_max5970},
{ .compatible = "maxim,max5978", .data = &maxim_max5970},
{ .compatible = "maxim,max77705-battery", .data = &maxim_mon_max77705},
+ { .compatible = "silergy,sy7636a", .data = &silergy_sy7636a},
+ { .compatible = "spacemit,p1", .data = &spacemit_p1, },
{}
};
MODULE_DEVICE_TABLE(of, simple_mfd_i2c_of_match);
diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
index a5f9241fa3f2..50bf3260f65d 100644
--- a/drivers/mfd/sm501.c
+++ b/drivers/mfd/sm501.c
@@ -965,7 +965,7 @@ static const struct gpio_chip gpio_chip_template = {
.ngpio = 32,
.direction_input = sm501_gpio_input,
.direction_output = sm501_gpio_output,
- .set_rv = sm501_gpio_set,
+ .set = sm501_gpio_set,
.get = sm501_gpio_get,
};
diff --git a/drivers/mfd/stm32-lptimer.c b/drivers/mfd/stm32-lptimer.c
index 09073dbc9c80..123659178cc2 100644
--- a/drivers/mfd/stm32-lptimer.c
+++ b/drivers/mfd/stm32-lptimer.c
@@ -19,7 +19,6 @@ static const struct regmap_config stm32_lptimer_regmap_cfg = {
.val_bits = 32,
.reg_stride = sizeof(u32),
.max_register = STM32_LPTIM_MAX_REGISTER,
- .fast_io = true,
};
static int stm32_lptimer_detect_encoder(struct stm32_lptimer *ddata)
diff --git a/drivers/mfd/stmpe-i2c.c b/drivers/mfd/stmpe-i2c.c
index fe018bedab98..943fa363efc3 100644
--- a/drivers/mfd/stmpe-i2c.c
+++ b/drivers/mfd/stmpe-i2c.c
@@ -122,18 +122,8 @@ static struct i2c_driver stmpe_i2c_driver = {
.remove = stmpe_i2c_remove,
.id_table = stmpe_i2c_id,
};
-
-static int __init stmpe_init(void)
-{
- return i2c_add_driver(&stmpe_i2c_driver);
-}
-subsys_initcall(stmpe_init);
-
-static void __exit stmpe_exit(void)
-{
- i2c_del_driver(&stmpe_i2c_driver);
-}
-module_exit(stmpe_exit);
+module_i2c_driver(stmpe_i2c_driver);
MODULE_DESCRIPTION("STMPE MFD I2C Interface Driver");
MODULE_AUTHOR("Rabin Vincent <rabin.vincent@stericsson.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/stmpe-spi.c b/drivers/mfd/stmpe-spi.c
index b9cc85ea2c40..dea31efface6 100644
--- a/drivers/mfd/stmpe-spi.c
+++ b/drivers/mfd/stmpe-spi.c
@@ -141,18 +141,8 @@ static struct spi_driver stmpe_spi_driver = {
.remove = stmpe_spi_remove,
.id_table = stmpe_spi_id,
};
-
-static int __init stmpe_init(void)
-{
- return spi_register_driver(&stmpe_spi_driver);
-}
-subsys_initcall(stmpe_init);
-
-static void __exit stmpe_exit(void)
-{
- spi_unregister_driver(&stmpe_spi_driver);
-}
-module_exit(stmpe_exit);
+module_spi_driver(stmpe_spi_driver);
MODULE_DESCRIPTION("STMPE MFD SPI Interface Driver");
MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
index 819d19dc9b4a..3c5c2f157f52 100644
--- a/drivers/mfd/stmpe.c
+++ b/drivers/mfd/stmpe.c
@@ -1482,9 +1482,13 @@ int stmpe_probe(struct stmpe_client_info *ci, enum stmpe_partnum partnum)
return ret;
}
+EXPORT_SYMBOL_GPL(stmpe_probe);
void stmpe_remove(struct stmpe *stmpe)
{
+ if (stmpe->domain)
+ irq_domain_remove(stmpe->domain);
+
if (!IS_ERR(stmpe->vio) && regulator_is_enabled(stmpe->vio))
regulator_disable(stmpe->vio);
if (!IS_ERR(stmpe->vcc) && regulator_is_enabled(stmpe->vcc))
@@ -1494,6 +1498,7 @@ void stmpe_remove(struct stmpe *stmpe)
mfd_remove_devices(stmpe->dev);
}
+EXPORT_SYMBOL_GPL(stmpe_remove);
static int stmpe_suspend(struct device *dev)
{
@@ -1517,3 +1522,7 @@ static int stmpe_resume(struct device *dev)
EXPORT_GPL_SIMPLE_DEV_PM_OPS(stmpe_dev_pm_ops,
stmpe_suspend, stmpe_resume);
+
+MODULE_DESCRIPTION("STMPE Core driver");
+MODULE_AUTHOR("Rabin Vincent <rabin.vincent@stericsson.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/sun4i-gpadc.c b/drivers/mfd/sun4i-gpadc.c
index 3029d48e982c..bf2f6fdaf8bf 100644
--- a/drivers/mfd/sun4i-gpadc.c
+++ b/drivers/mfd/sun4i-gpadc.c
@@ -72,7 +72,6 @@ static const struct regmap_config sun4i_gpadc_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
- .fast_io = true,
};
static const struct of_device_id sun4i_gpadc_of_match[] = {
diff --git a/drivers/mfd/tps65010.c b/drivers/mfd/tps65010.c
index 03bd5cd66798..8a144ec52201 100644
--- a/drivers/mfd/tps65010.c
+++ b/drivers/mfd/tps65010.c
@@ -620,7 +620,7 @@ static int tps65010_probe(struct i2c_client *client)
tps->chip.parent = &client->dev;
tps->chip.owner = THIS_MODULE;
- tps->chip.set_rv = tps65010_gpio_set;
+ tps->chip.set = tps65010_gpio_set;
tps->chip.direction_output = tps65010_output;
/* NOTE: only partial support for inputs; nyet IRQs */
diff --git a/drivers/mfd/tps6594-core.c b/drivers/mfd/tps6594-core.c
index c16c37e36617..8b26c4127472 100644
--- a/drivers/mfd/tps6594-core.c
+++ b/drivers/mfd/tps6594-core.c
@@ -10,16 +10,20 @@
* Copyright (C) 2023 BayLibre Incorporated - https://www.baylibre.com/
*/
+#include <linux/bitfield.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/reboot.h>
#include <linux/mfd/core.h>
#include <linux/mfd/tps6594.h>
#define TPS6594_CRC_SYNC_TIMEOUT_MS 150
+#define TPS65224_EN_SEL_PB 1
+#define TPS65224_GPIO3_SEL_PB 3
/* Completion to synchronize CRC feature enabling on all PMICs */
static DECLARE_COMPLETION(tps6594_crc_comp);
@@ -128,6 +132,12 @@ static const struct resource tps6594_rtc_resources[] = {
DEFINE_RES_IRQ_NAMED(TPS6594_IRQ_POWER_UP, TPS6594_IRQ_NAME_POWERUP),
};
+static const struct resource tps6594_pwrbutton_resources[] = {
+ DEFINE_RES_IRQ_NAMED(TPS65224_IRQ_PB_FALL, TPS65224_IRQ_NAME_PB_FALL),
+ DEFINE_RES_IRQ_NAMED(TPS65224_IRQ_PB_RISE, TPS65224_IRQ_NAME_PB_RISE),
+ DEFINE_RES_IRQ_NAMED(TPS65224_IRQ_PB_SHORT, TPS65224_IRQ_NAME_PB_SHORT),
+};
+
static const struct mfd_cell tps6594_common_cells[] = {
MFD_CELL_RES("tps6594-regulator", tps6594_regulator_resources),
MFD_CELL_RES("tps6594-pinctrl", tps6594_pinctrl_resources),
@@ -318,8 +328,6 @@ static const struct resource tps65224_pfsm_resources[] = {
DEFINE_RES_IRQ_NAMED(TPS65224_IRQ_REG_UNLOCK, TPS65224_IRQ_NAME_REG_UNLOCK),
DEFINE_RES_IRQ_NAMED(TPS65224_IRQ_TWARN, TPS65224_IRQ_NAME_TWARN),
DEFINE_RES_IRQ_NAMED(TPS65224_IRQ_PB_LONG, TPS65224_IRQ_NAME_PB_LONG),
- DEFINE_RES_IRQ_NAMED(TPS65224_IRQ_PB_FALL, TPS65224_IRQ_NAME_PB_FALL),
- DEFINE_RES_IRQ_NAMED(TPS65224_IRQ_PB_RISE, TPS65224_IRQ_NAME_PB_RISE),
DEFINE_RES_IRQ_NAMED(TPS65224_IRQ_TSD_ORD, TPS65224_IRQ_NAME_TSD_ORD),
DEFINE_RES_IRQ_NAMED(TPS65224_IRQ_BIST_FAIL, TPS65224_IRQ_NAME_BIST_FAIL),
DEFINE_RES_IRQ_NAMED(TPS65224_IRQ_REG_CRC_ERR, TPS65224_IRQ_NAME_REG_CRC_ERR),
@@ -347,6 +355,12 @@ static const struct mfd_cell tps65224_common_cells[] = {
MFD_CELL_RES("tps6594-regulator", tps65224_regulator_resources),
};
+static const struct mfd_cell tps6594_pwrbutton_cell = {
+ .name = "tps6594-pwrbutton",
+ .resources = tps6594_pwrbutton_resources,
+ .num_resources = ARRAY_SIZE(tps6594_pwrbutton_resources),
+};
+
static const struct regmap_irq tps65224_irqs[] = {
/* INT_BUCK register */
REGMAP_IRQ_REG(TPS65224_IRQ_BUCK1_UVOV, 0, TPS65224_BIT_BUCK1_UVOV_INT),
@@ -676,11 +690,25 @@ static int tps6594_enable_crc(struct tps6594 *tps)
return ret;
}
+static int tps6594_power_off_handler(struct sys_off_data *data)
+{
+ struct tps6594 *tps = data->cb_data;
+ int ret;
+
+ ret = regmap_update_bits(tps->regmap, TPS6594_REG_FSM_I2C_TRIGGERS,
+ TPS6594_BIT_TRIGGER_I2C(0), TPS6594_BIT_TRIGGER_I2C(0));
+ if (ret)
+ return notifier_from_errno(ret);
+
+ return NOTIFY_DONE;
+}
+
int tps6594_device_init(struct tps6594 *tps, bool enable_crc)
{
struct device *dev = tps->dev;
int ret;
struct regmap_irq_chip *irq_chip;
+ unsigned int pwr_on, gpio3_cfg;
const struct mfd_cell *cells;
int n_cells;
@@ -727,6 +755,27 @@ int tps6594_device_init(struct tps6594 *tps, bool enable_crc)
if (ret)
return dev_err_probe(dev, ret, "Failed to add common child devices\n");
+ /* If either the PB/EN/VSENSE or GPIO3 is configured as PB, register a driver for it */
+ if (tps->chip_id == TPS65224 || tps->chip_id == TPS652G1) {
+ ret = regmap_read(tps->regmap, TPS6594_REG_NPWRON_CONF, &pwr_on);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to read PB/EN/VSENSE config\n");
+
+ ret = regmap_read(tps->regmap, TPS6594_REG_GPIOX_CONF(2), &gpio3_cfg);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to read GPIO3 config\n");
+
+ if (FIELD_GET(TPS65224_MASK_EN_PB_VSENSE_CONFIG, pwr_on) == TPS65224_EN_SEL_PB ||
+ FIELD_GET(TPS65224_MASK_GPIO_SEL, gpio3_cfg) == TPS65224_GPIO3_SEL_PB) {
+ ret = devm_mfd_add_devices(dev, PLATFORM_DEVID_AUTO,
+ &tps6594_pwrbutton_cell, 1, NULL, 0,
+ regmap_irq_get_domain(tps->irq_data));
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to add power button device.\n");
+ }
+ }
+
/* No RTC for LP8764, TPS65224 and TPS652G1 */
if (tps->chip_id != LP8764 && tps->chip_id != TPS65224 && tps->chip_id != TPS652G1) {
ret = devm_mfd_add_devices(dev, PLATFORM_DEVID_AUTO, tps6594_rtc_cells,
@@ -736,6 +785,12 @@ int tps6594_device_init(struct tps6594 *tps, bool enable_crc)
return dev_err_probe(dev, ret, "Failed to add RTC child device\n");
}
+ if (of_device_is_system_power_controller(dev->of_node)) {
+ ret = devm_register_power_off_handler(tps->dev, tps6594_power_off_handler, tps);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to register power-off handler\n");
+ }
+
return 0;
}
EXPORT_SYMBOL_GPL(tps6594_device_init);
diff --git a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c
index fd71ba29f6b5..4b450d78a65f 100644
--- a/drivers/mfd/ucb1x00-core.c
+++ b/drivers/mfd/ucb1x00-core.c
@@ -570,7 +570,7 @@ static int ucb1x00_probe(struct mcp *mcp)
ucb->gpio.owner = THIS_MODULE;
ucb->gpio.base = pdata->gpio_base;
ucb->gpio.ngpio = 10;
- ucb->gpio.set_rv = ucb1x00_gpio_set;
+ ucb->gpio.set = ucb1x00_gpio_set;
ucb->gpio.get = ucb1x00_gpio_get;
ucb->gpio.direction_input = ucb1x00_gpio_direction_input;
ucb->gpio.direction_output = ucb1x00_gpio_direction_output;
diff --git a/drivers/mfd/vexpress-sysreg.c b/drivers/mfd/vexpress-sysreg.c
index fc2daffc4352..f49cee91f71c 100644
--- a/drivers/mfd/vexpress-sysreg.c
+++ b/drivers/mfd/vexpress-sysreg.c
@@ -5,6 +5,7 @@
*/
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/mfd/core.h>
@@ -96,9 +97,11 @@ static struct mfd_cell vexpress_sysreg_cells[] = {
static int vexpress_sysreg_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip *mmc_gpio_chip;
+ struct gpio_generic_chip_config config;
struct resource *mem;
void __iomem *base;
- struct gpio_chip *mmc_gpio_chip;
+ int ret;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem)
@@ -116,10 +119,22 @@ static int vexpress_sysreg_probe(struct platform_device *pdev)
GFP_KERNEL);
if (!mmc_gpio_chip)
return -ENOMEM;
- bgpio_init(mmc_gpio_chip, &pdev->dev, 0x4, base + SYS_MCI,
- NULL, NULL, NULL, NULL, 0);
- mmc_gpio_chip->ngpio = 2;
- devm_gpiochip_add_data(&pdev->dev, mmc_gpio_chip, NULL);
+
+ config = (struct gpio_generic_chip_config) {
+ .dev = &pdev->dev,
+ .sz = 4,
+ .dat = base + SYS_MCI,
+ };
+
+ ret = gpio_generic_chip_init(mmc_gpio_chip, &config);
+ if (ret)
+ return ret;
+
+ mmc_gpio_chip->gc.ngpio = 2;
+
+ ret = devm_gpiochip_add_data(&pdev->dev, &mmc_gpio_chip->gc, NULL);
+ if (ret)
+ return ret;
return devm_mfd_add_devices(&pdev->dev, PLATFORM_DEVID_AUTO,
vexpress_sysreg_cells,
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index e2e66f5f4fb8..b32a2597d246 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -23,7 +23,6 @@ obj-$(CONFIG_SENSORS_BH1770) += bh1770glc.o
obj-$(CONFIG_SENSORS_APDS990X) += apds990x.o
obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o
obj-$(CONFIG_KGDB_TESTS) += kgdbts.o
-obj-$(CONFIG_TEST_MISC_MINOR) += misc_minor_kunit.o
obj-$(CONFIG_SGI_XP) += sgi-xp/
obj-$(CONFIG_SGI_GRU) += sgi-gru/
obj-$(CONFIG_SMPRO_ERRMON) += smpro-errmon.o
diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c
index 756ef6912b5a..04683b981e54 100644
--- a/drivers/misc/ad525x_dpot.c
+++ b/drivers/misc/ad525x_dpot.c
@@ -73,6 +73,7 @@
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include "ad525x_dpot.h"
@@ -418,10 +419,8 @@ static ssize_t sysfs_show_reg(struct device *dev,
s32 value;
if (reg & DPOT_ADDR_OTP_EN)
- return sprintf(buf, "%s\n",
- test_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask) ?
- "enabled" : "disabled");
-
+ return sprintf(buf, "%s\n", str_enabled_disabled(
+ test_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask)));
mutex_lock(&data->update_lock);
value = dpot_read(data, reg);
diff --git a/drivers/misc/amd-sbi/Kconfig b/drivers/misc/amd-sbi/Kconfig
index 4840831c84ca..4aae0733d0fc 100644
--- a/drivers/misc/amd-sbi/Kconfig
+++ b/drivers/misc/amd-sbi/Kconfig
@@ -2,6 +2,7 @@
config AMD_SBRMI_I2C
tristate "AMD side band RMI support"
depends on I2C
+ select REGMAP_I2C
help
Side band RMI over I2C support for AMD out of band management.
diff --git a/drivers/misc/apds990x.c b/drivers/misc/apds990x.c
index e7d73c972f65..58946c4ff1a5 100644
--- a/drivers/misc/apds990x.c
+++ b/drivers/misc/apds990x.c
@@ -984,7 +984,6 @@ static ssize_t apds990x_power_state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", !pm_runtime_suspended(dev));
- return 0;
}
static ssize_t apds990x_power_state_store(struct device *dev,
diff --git a/drivers/misc/cardreader/rts5227.c b/drivers/misc/cardreader/rts5227.c
index cd512284bfb3..46444bb47f65 100644
--- a/drivers/misc/cardreader/rts5227.c
+++ b/drivers/misc/cardreader/rts5227.c
@@ -79,6 +79,10 @@ static void rts5227_fetch_vendor_settings(struct rtsx_pcr *pcr)
pcr->sd30_drive_sel_3v3 = rtsx_reg_to_sd30_drive_sel_3v3(reg);
if (rtsx_reg_check_reverse_socket(reg))
pcr->flags |= PCR_REVERSE_SOCKET;
+ if (rtsx_reg_check_cd_reverse(reg))
+ pcr->option.sd_cd_reverse_en = 1;
+ if (rtsx_reg_check_wp_reverse(reg))
+ pcr->option.sd_wp_reverse_en = 1;
}
static void rts5227_init_from_cfg(struct rtsx_pcr *pcr)
@@ -127,8 +131,10 @@ static int rts5227_extra_init_hw(struct rtsx_pcr *pcr)
/* Configure force_clock_req */
if (pcr->flags & PCR_REVERSE_SOCKET)
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0x30, 0x30);
- else
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0x30, 0x00);
+ else {
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0x20, option->sd_cd_reverse_en << 5);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0x10, option->sd_wp_reverse_en << 4);
+ }
if (CHK_PCI_PID(pcr, 0x522A))
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, RTS522A_AUTOLOAD_CFG1,
@@ -350,6 +356,8 @@ void rts5227_init_params(struct rtsx_pcr *pcr)
pcr->ms_pull_ctl_disable_tbl = rts5227_ms_pull_ctl_disable_tbl;
pcr->reg_pm_ctrl3 = PM_CTRL3;
+ pcr->option.sd_cd_reverse_en = 0;
+ pcr->option.sd_wp_reverse_en = 0;
}
static int rts522a_optimize_phy(struct rtsx_pcr *pcr)
@@ -508,5 +516,4 @@ void rts522a_init_params(struct rtsx_pcr *pcr)
pcr->hw_param.interrupt_en |= SD_OC_INT_EN;
pcr->hw_param.ocp_glitch = SD_OCP_GLITCH_10M;
pcr->option.sd_800mA_ocp_thd = RTS522A_OCP_THD_800;
-
}
diff --git a/drivers/misc/cardreader/rts5228.c b/drivers/misc/cardreader/rts5228.c
index 0c7f10bcf6f1..db7e735ac24f 100644
--- a/drivers/misc/cardreader/rts5228.c
+++ b/drivers/misc/cardreader/rts5228.c
@@ -84,6 +84,10 @@ static void rtsx5228_fetch_vendor_settings(struct rtsx_pcr *pcr)
pcr->sd30_drive_sel_3v3 = rtsx_reg_to_sd30_drive_sel_3v3(reg);
if (rtsx_reg_check_reverse_socket(reg))
pcr->flags |= PCR_REVERSE_SOCKET;
+ if (rtsx_reg_check_cd_reverse(reg))
+ pcr->option.sd_cd_reverse_en = 1;
+ if (rtsx_reg_check_wp_reverse(reg))
+ pcr->option.sd_wp_reverse_en = 1;
}
static int rts5228_optimize_phy(struct rtsx_pcr *pcr)
@@ -432,8 +436,10 @@ static int rts5228_extra_init_hw(struct rtsx_pcr *pcr)
if (pcr->flags & PCR_REVERSE_SOCKET)
rtsx_pci_write_register(pcr, PETXCFG, 0x30, 0x30);
- else
- rtsx_pci_write_register(pcr, PETXCFG, 0x30, 0x00);
+ else {
+ rtsx_pci_write_register(pcr, PETXCFG, 0x20, option->sd_cd_reverse_en << 5);
+ rtsx_pci_write_register(pcr, PETXCFG, 0x10, option->sd_wp_reverse_en << 4);
+ }
/*
* If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
@@ -720,4 +726,6 @@ void rts5228_init_params(struct rtsx_pcr *pcr)
hw_param->interrupt_en |= SD_OC_INT_EN;
hw_param->ocp_glitch = SD_OCP_GLITCH_800U;
option->sd_800mA_ocp_thd = RTS5228_LDO1_OCP_THD_930;
+ option->sd_cd_reverse_en = 0;
+ option->sd_wp_reverse_en = 0;
}
diff --git a/drivers/misc/cardreader/rts5249.c b/drivers/misc/cardreader/rts5249.c
index 6c81040e18be..38aefd8db452 100644
--- a/drivers/misc/cardreader/rts5249.c
+++ b/drivers/misc/cardreader/rts5249.c
@@ -60,6 +60,7 @@ static void rtsx_base_fetch_vendor_settings(struct rtsx_pcr *pcr)
pci_read_config_dword(pdev, PCR_SETTING_REG1, &reg);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
+ pci_write_config_dword(pdev, 0x718, 0x0007C000);
if (!rtsx_vendor_setting_valid(reg)) {
pcr_dbg(pcr, "skip fetch vendor setting\n");
@@ -82,6 +83,10 @@ static void rtsx_base_fetch_vendor_settings(struct rtsx_pcr *pcr)
pcr->sd30_drive_sel_3v3 = rtsx_reg_to_sd30_drive_sel_3v3(reg);
if (rtsx_reg_check_reverse_socket(reg))
pcr->flags |= PCR_REVERSE_SOCKET;
+ if (rtsx_reg_check_cd_reverse(reg))
+ pcr->option.sd_cd_reverse_en = 1;
+ if (rtsx_reg_check_wp_reverse(reg))
+ pcr->option.sd_wp_reverse_en = 1;
}
static void rts5249_init_from_cfg(struct rtsx_pcr *pcr)
@@ -254,9 +259,11 @@ static int rts5249_extra_init_hw(struct rtsx_pcr *pcr)
/* Configure driving */
rts5249_fill_driving(pcr, OUTPUT_3V3);
if (pcr->flags & PCR_REVERSE_SOCKET)
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0xB0, 0xB0);
- else
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0xB0, 0x80);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0x30, 0x30);
+ else {
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0x20, option->sd_cd_reverse_en << 5);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0x10, option->sd_wp_reverse_en << 4);
+ }
rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF);
@@ -572,6 +579,9 @@ void rts5249_init_params(struct rtsx_pcr *pcr)
option->ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5249_DEF;
option->ltr_l1off_snooze_sspwrgate =
LTR_L1OFF_SNOOZE_SSPWRGATE_5249_DEF;
+
+ option->sd_cd_reverse_en = 0;
+ option->sd_wp_reverse_en = 0;
}
static int rts524a_write_phy(struct rtsx_pcr *pcr, u8 addr, u16 val)
diff --git a/drivers/misc/cardreader/rts5264.c b/drivers/misc/cardreader/rts5264.c
index d050c9fff7ac..99a2d5ea6421 100644
--- a/drivers/misc/cardreader/rts5264.c
+++ b/drivers/misc/cardreader/rts5264.c
@@ -527,8 +527,16 @@ static void rts5264_init_from_hw(struct rtsx_pcr *pcr)
pcr->rtd3_en = rts5264_reg_to_rtd3(lval2);
- if (rts5264_reg_check_reverse_socket(lval2))
- pcr->flags |= PCR_REVERSE_SOCKET;
+ if (rts5264_reg_check_reverse_socket(lval2)) {
+ if (is_version_higher_than(pcr, PID_5264, RTS5264_IC_VER_B))
+ pcr->option.sd_cd_reverse_en = 1;
+ else
+ pcr->flags |= PCR_REVERSE_SOCKET;
+ }
+
+ if (rts5264_reg_check_wp_reverse(lval2) &&
+ is_version_higher_than(pcr, PID_5264, RTS5264_IC_VER_B))
+ pcr->option.sd_wp_reverse_en = 1;
pci_read_config_dword(pdev, setting_reg1, &lval1);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", setting_reg1, lval1);
@@ -622,8 +630,10 @@ static int rts5264_extra_init_hw(struct rtsx_pcr *pcr)
if (pcr->flags & PCR_REVERSE_SOCKET)
rtsx_pci_write_register(pcr, PETXCFG, 0x30, 0x30);
- else
- rtsx_pci_write_register(pcr, PETXCFG, 0x30, 0x00);
+ else {
+ rtsx_pci_write_register(pcr, PETXCFG, 0x20, option->sd_cd_reverse_en << 5);
+ rtsx_pci_write_register(pcr, PETXCFG, 0x10, option->sd_wp_reverse_en << 4);
+ }
/*
* If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
@@ -957,4 +967,6 @@ void rts5264_init_params(struct rtsx_pcr *pcr)
hw_param->interrupt_en |= (SD_OC_INT_EN | SD_OVP_INT_EN);
hw_param->ocp_glitch = SD_OCP_GLITCH_800U | SDVIO_OCP_GLITCH_800U;
option->sd_800mA_ocp_thd = RTS5264_LDO1_OCP_THD_1150;
+ option->sd_cd_reverse_en = 0;
+ option->sd_wp_reverse_en = 0;
}
diff --git a/drivers/misc/cardreader/rts5264.h b/drivers/misc/cardreader/rts5264.h
index f3e81daa708d..611ee253367c 100644
--- a/drivers/misc/cardreader/rts5264.h
+++ b/drivers/misc/cardreader/rts5264.h
@@ -14,6 +14,7 @@
#define rts5264_reg_to_aspm(reg) \
(((~(reg) >> 28) & 0x02) | (((reg) >> 28) & 0x01))
#define rts5264_reg_check_reverse_socket(reg) ((reg) & 0x04)
+#define rts5264_reg_check_wp_reverse(reg) ((reg) & 0x8000)
#define rts5264_reg_to_sd30_drive_sel_1v8(reg) (((reg) >> 22) & 0x03)
#define rts5264_reg_to_sd30_drive_sel_3v3(reg) (((reg) >> 16) & 0x03)
#define rts5264_reg_to_rtd3(reg) ((reg) & 0x08)
diff --git a/drivers/misc/cardreader/rtsx_pcr.h b/drivers/misc/cardreader/rtsx_pcr.h
index 8e5951b61143..40562ff2be13 100644
--- a/drivers/misc/cardreader/rtsx_pcr.h
+++ b/drivers/misc/cardreader/rtsx_pcr.h
@@ -100,6 +100,8 @@ static inline u8 map_sd_drive(int idx)
#define rtsx_reg_to_sd30_drive_sel_3v3(reg) (((reg) >> 5) & 0x03)
#define rtsx_reg_to_card_drive_sel(reg) ((((reg) >> 25) & 0x01) << 6)
#define rtsx_reg_check_reverse_socket(reg) ((reg) & 0x4000)
+#define rtsx_reg_check_cd_reverse(reg) ((reg) & 0x800000)
+#define rtsx_reg_check_wp_reverse(reg) ((reg) & 0x400000)
#define rts5209_reg_to_aspm(reg) (((reg) >> 5) & 0x03)
#define rts5209_reg_check_ms_pmos(reg) (!((reg) & 0x08))
#define rts5209_reg_to_sd30_drive_sel_1v8(reg) (((reg) >> 3) & 0x07)
diff --git a/drivers/misc/cardreader/rtsx_usb.c b/drivers/misc/cardreader/rtsx_usb.c
index d007a4455ce5..1830e9ed2521 100644
--- a/drivers/misc/cardreader/rtsx_usb.c
+++ b/drivers/misc/cardreader/rtsx_usb.c
@@ -552,6 +552,10 @@ static int rtsx_usb_reset_chip(struct rtsx_ucr *ucr)
ret = rtsx_usb_send_cmd(ucr, MODE_C, 100);
if (ret)
return ret;
+ /* config OCP */
+ rtsx_usb_write_register(ucr, OCPCTL, MS_OCP_DETECT_EN, MS_OCP_DETECT_EN);
+ rtsx_usb_write_register(ucr, OCPPARA1, 0xF0, 0x50);
+ rtsx_usb_write_register(ucr, OCPPARA2, 0x7, 0x3);
/* config non-crystal mode */
rtsx_usb_read_register(ucr, CFG_MODE, &val);
@@ -722,6 +726,9 @@ static int rtsx_usb_suspend(struct usb_interface *intf, pm_message_t message)
if (val & (SD_CD | MS_CD)) {
device_for_each_child(&intf->dev, NULL, rtsx_usb_resume_child);
return -EAGAIN;
+ } else {
+ /* if the card does not exists, clear OCP status */
+ rtsx_usb_write_register(ucr, OCPCTL, MS_OCP_CLEAR, MS_OCP_CLEAR);
}
} else {
/* There is an ongoing operation*/
diff --git a/drivers/misc/dw-xdata-pcie.c b/drivers/misc/dw-xdata-pcie.c
index efd0ca8cc925..a604c0e9c038 100644
--- a/drivers/misc/dw-xdata-pcie.c
+++ b/drivers/misc/dw-xdata-pcie.c
@@ -16,6 +16,7 @@
#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/pci.h>
+#include <linux/string_choices.h>
#define DW_XDATA_DRIVER_NAME "dw-xdata-pcie"
@@ -132,7 +133,7 @@ static void dw_xdata_start(struct dw_xdata *dw, bool write)
if (!(status & STATUS_DONE))
dev_dbg(dev, "xData: started %s direction\n",
- write ? "write" : "read");
+ str_write_read(write));
}
static void dw_xdata_perf_meas(struct dw_xdata *dw, u64 *data, bool write)
@@ -195,7 +196,7 @@ static void dw_xdata_perf(struct dw_xdata *dw, u64 *rate, bool write)
mutex_unlock(&dw->mutex);
dev_dbg(dev, "xData: time=%llu us, %s=%llu MB/s\n",
- diff, write ? "write" : "read", *rate);
+ diff, str_write_read(write), *rate);
}
static struct dw_xdata *misc_dev_to_dw(struct miscdevice *misc_dev)
diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig
index 0bef5b93bd6d..4d0ce47aa282 100644
--- a/drivers/misc/eeprom/Kconfig
+++ b/drivers/misc/eeprom/Kconfig
@@ -120,4 +120,22 @@ config EEPROM_EE1004
This driver can also be built as a module. If so, the module
will be called ee1004.
+config EEPROM_M24LR
+ tristate "STMicroelectronics M24LR RFID/NFC EEPROM support"
+ depends on I2C && SYSFS
+ select REGMAP_I2C
+ select NVMEM
+ select NVMEM_SYSFS
+ help
+ This enables support for STMicroelectronics M24LR RFID/NFC EEPROM
+ chips. These dual-interface devices expose two I2C addresses:
+ one for EEPROM memory access and another for control and system
+ configuration (e.g. UID, password handling).
+
+ This driver provides a sysfs interface for control functions and
+ integrates with the nvmem subsystem for EEPROM access.
+
+ To compile this driver as a module, choose M here: the
+ module will be called m24lr.
+
endmenu
diff --git a/drivers/misc/eeprom/Makefile b/drivers/misc/eeprom/Makefile
index 65794e526d5d..8f311fd6a4ce 100644
--- a/drivers/misc/eeprom/Makefile
+++ b/drivers/misc/eeprom/Makefile
@@ -7,3 +7,4 @@ obj-$(CONFIG_EEPROM_93XX46) += eeprom_93xx46.o
obj-$(CONFIG_EEPROM_DIGSY_MTC_CFG) += digsy_mtc_eeprom.o
obj-$(CONFIG_EEPROM_IDT_89HPESX) += idt_89hpesx.o
obj-$(CONFIG_EEPROM_EE1004) += ee1004.o
+obj-$(CONFIG_EEPROM_M24LR) += m24lr.o
diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c
index 2d0492867054..e2868f7bdb03 100644
--- a/drivers/misc/eeprom/at25.c
+++ b/drivers/misc/eeprom/at25.c
@@ -379,37 +379,49 @@ static int at25_fram_to_chip(struct device *dev, struct spi_eeprom *chip)
struct at25_data *at25 = container_of(chip, struct at25_data, chip);
u8 sernum[FM25_SN_LEN];
u8 id[FM25_ID_LEN];
+ u32 val;
int i;
strscpy(chip->name, "fm25", sizeof(chip->name));
- /* Get ID of chip */
- fm25_aux_read(at25, id, FM25_RDID, FM25_ID_LEN);
- /* There are inside-out FRAM variations, detect them and reverse the ID bytes */
- if (id[6] == 0x7f && id[2] == 0xc2)
- for (i = 0; i < ARRAY_SIZE(id) / 2; i++) {
- u8 tmp = id[i];
- int j = ARRAY_SIZE(id) - i - 1;
+ if (!device_property_read_u32(dev, "size", &val)) {
+ chip->byte_len = val;
+ } else {
+ /* Get ID of chip */
+ fm25_aux_read(at25, id, FM25_RDID, FM25_ID_LEN);
+ /* There are inside-out FRAM variations, detect them and reverse the ID bytes */
+ if (id[6] == 0x7f && id[2] == 0xc2)
+ for (i = 0; i < ARRAY_SIZE(id) / 2; i++) {
+ u8 tmp = id[i];
+ int j = ARRAY_SIZE(id) - i - 1;
+
+ id[i] = id[j];
+ id[j] = tmp;
+ }
+ if (id[6] != 0xc2) {
+ dev_err(dev, "Error: no Cypress FRAM with device ID (manufacturer ID bank 7: %02x)\n", id[6]);
+ return -ENODEV;
+ }
- id[i] = id[j];
- id[j] = tmp;
+ switch (id[7]) {
+ case 0x21 ... 0x26:
+ chip->byte_len = BIT(id[7] - 0x21 + 4) * 1024;
+ break;
+ case 0x2a ... 0x30:
+ /* CY15B116QN ... CY15B116QN */
+ chip->byte_len = BIT(((id[7] >> 1) & 0xf) + 13);
+ break;
+ default:
+ dev_err(dev, "Error: unsupported size (id %02x)\n", id[7]);
+ return -ENODEV;
}
- if (id[6] != 0xc2) {
- dev_err(dev, "Error: no Cypress FRAM (id %02x)\n", id[6]);
- return -ENODEV;
- }
- switch (id[7]) {
- case 0x21 ... 0x26:
- chip->byte_len = BIT(id[7] - 0x21 + 4) * 1024;
- break;
- case 0x2a ... 0x30:
- /* CY15B116QN ... CY15B116QN */
- chip->byte_len = BIT(((id[7] >> 1) & 0xf) + 13);
- break;
- default:
- dev_err(dev, "Error: unsupported size (id %02x)\n", id[7]);
- return -ENODEV;
+ if (id[8]) {
+ fm25_aux_read(at25, sernum, FM25_RDSN, FM25_SN_LEN);
+ /* Swap byte order */
+ for (i = 0; i < FM25_SN_LEN; i++)
+ at25->sernum[i] = sernum[FM25_SN_LEN - 1 - i];
+ }
}
if (chip->byte_len > 64 * 1024)
@@ -417,13 +429,6 @@ static int at25_fram_to_chip(struct device *dev, struct spi_eeprom *chip)
else
chip->flags |= EE_ADDR2;
- if (id[8]) {
- fm25_aux_read(at25, sernum, FM25_RDSN, FM25_SN_LEN);
- /* Swap byte order */
- for (i = 0; i < FM25_SN_LEN; i++)
- at25->sernum[i] = sernum[FM25_SN_LEN - 1 - i];
- }
-
chip->page_size = PAGE_SIZE;
return 0;
}
diff --git a/drivers/misc/eeprom/m24lr.c b/drivers/misc/eeprom/m24lr.c
new file mode 100644
index 000000000000..7a9fd45a8e46
--- /dev/null
+++ b/drivers/misc/eeprom/m24lr.c
@@ -0,0 +1,606 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * m24lr.c - Sysfs control interface for ST M24LR series RFID/NFC chips
+ *
+ * Copyright (c) 2025 Abd-Alrhman Masalkhi <abd.masalkhi@gmail.com>
+ *
+ * This driver implements both the sysfs-based control interface and EEPROM
+ * access for STMicroelectronics M24LR series chips (e.g., M24LR04E-R).
+ * It provides access to control registers for features such as password
+ * authentication, memory protection, and device configuration. In addition,
+ * it manages read and write operations to the EEPROM region of the chip.
+ */
+
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+#define M24LR_WRITE_TIMEOUT 25u
+#define M24LR_READ_TIMEOUT (M24LR_WRITE_TIMEOUT)
+
+/**
+ * struct m24lr_chip - describes chip-specific sysfs layout
+ * @sss_len: the length of the sss region
+ * @page_size: chip-specific limit on the maximum number of bytes allowed
+ * in a single write operation.
+ * @eeprom_size: size of the EEPROM in byte
+ *
+ * Supports multiple M24LR chip variants (e.g., M24LRxx) by allowing each
+ * to define its own set of sysfs attributes, depending on its available
+ * registers and features.
+ */
+struct m24lr_chip {
+ unsigned int sss_len;
+ unsigned int page_size;
+ unsigned int eeprom_size;
+};
+
+/**
+ * struct m24lr - core driver data for M24LR chip control
+ * @uid: 64 bits unique identifier stored in the device
+ * @sss_len: the length of the sss region
+ * @page_size: chip-specific limit on the maximum number of bytes allowed
+ * in a single write operation.
+ * @eeprom_size: size of the EEPROM in byte
+ * @ctl_regmap: regmap interface for accessing the system parameter sector
+ * @eeprom_regmap: regmap interface for accessing the EEPROM
+ * @lock: mutex to synchronize operations to the device
+ *
+ * Central data structure holding the state and resources used by the
+ * M24LR device driver.
+ */
+struct m24lr {
+ u64 uid;
+ unsigned int sss_len;
+ unsigned int page_size;
+ unsigned int eeprom_size;
+ struct regmap *ctl_regmap;
+ struct regmap *eeprom_regmap;
+ struct mutex lock; /* synchronize operations to the device */
+};
+
+static const struct regmap_range m24lr_ctl_vo_ranges[] = {
+ regmap_reg_range(0, 63),
+};
+
+static const struct regmap_access_table m24lr_ctl_vo_table = {
+ .yes_ranges = m24lr_ctl_vo_ranges,
+ .n_yes_ranges = ARRAY_SIZE(m24lr_ctl_vo_ranges),
+};
+
+static const struct regmap_config m24lr_ctl_regmap_conf = {
+ .name = "m24lr_ctl",
+ .reg_stride = 1,
+ .reg_bits = 16,
+ .val_bits = 8,
+ .disable_locking = false,
+ .cache_type = REGCACHE_RBTREE,/* Flat can't be used, there's huge gap */
+ .volatile_table = &m24lr_ctl_vo_table,
+};
+
+/* Chip descriptor for M24LR04E-R variant */
+static const struct m24lr_chip m24lr04e_r_chip = {
+ .page_size = 4,
+ .eeprom_size = 512,
+ .sss_len = 4,
+};
+
+/* Chip descriptor for M24LR16E-R variant */
+static const struct m24lr_chip m24lr16e_r_chip = {
+ .page_size = 4,
+ .eeprom_size = 2048,
+ .sss_len = 16,
+};
+
+/* Chip descriptor for M24LR64E-R variant */
+static const struct m24lr_chip m24lr64e_r_chip = {
+ .page_size = 4,
+ .eeprom_size = 8192,
+ .sss_len = 64,
+};
+
+static const struct i2c_device_id m24lr_ids[] = {
+ { "m24lr04e-r", (kernel_ulong_t)&m24lr04e_r_chip},
+ { "m24lr16e-r", (kernel_ulong_t)&m24lr16e_r_chip},
+ { "m24lr64e-r", (kernel_ulong_t)&m24lr64e_r_chip},
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, m24lr_ids);
+
+static const struct of_device_id m24lr_of_match[] = {
+ { .compatible = "st,m24lr04e-r", .data = &m24lr04e_r_chip},
+ { .compatible = "st,m24lr16e-r", .data = &m24lr16e_r_chip},
+ { .compatible = "st,m24lr64e-r", .data = &m24lr64e_r_chip},
+ { }
+};
+MODULE_DEVICE_TABLE(of, m24lr_of_match);
+
+/**
+ * m24lr_regmap_read - read data using regmap with retry on failure
+ * @regmap: regmap instance for the device
+ * @buf: buffer to store the read data
+ * @size: number of bytes to read
+ * @offset: starting register address
+ *
+ * Attempts to read a block of data from the device with retries and timeout.
+ * Some M24LR chips may transiently NACK reads (e.g., during internal write
+ * cycles), so this function retries with a short sleep until the timeout
+ * expires.
+ *
+ * Returns:
+ * Number of bytes read on success,
+ * -ETIMEDOUT if the read fails within the timeout window.
+ */
+static ssize_t m24lr_regmap_read(struct regmap *regmap, u8 *buf,
+ size_t size, unsigned int offset)
+{
+ int err;
+ unsigned long timeout, read_time;
+ ssize_t ret = -ETIMEDOUT;
+
+ timeout = jiffies + msecs_to_jiffies(M24LR_READ_TIMEOUT);
+ do {
+ read_time = jiffies;
+
+ err = regmap_bulk_read(regmap, offset, buf, size);
+ if (!err) {
+ ret = size;
+ break;
+ }
+
+ usleep_range(1000, 2000);
+ } while (time_before(read_time, timeout));
+
+ return ret;
+}
+
+/**
+ * m24lr_regmap_write - write data using regmap with retry on failure
+ * @regmap: regmap instance for the device
+ * @buf: buffer containing the data to write
+ * @size: number of bytes to write
+ * @offset: starting register address
+ *
+ * Attempts to write a block of data to the device with retries and a timeout.
+ * Some M24LR devices may NACK I2C writes while an internal write operation
+ * is in progress. This function retries the write operation with a short delay
+ * until it succeeds or the timeout is reached.
+ *
+ * Returns:
+ * Number of bytes written on success,
+ * -ETIMEDOUT if the write fails within the timeout window.
+ */
+static ssize_t m24lr_regmap_write(struct regmap *regmap, const u8 *buf,
+ size_t size, unsigned int offset)
+{
+ int err;
+ unsigned long timeout, write_time;
+ ssize_t ret = -ETIMEDOUT;
+
+ timeout = jiffies + msecs_to_jiffies(M24LR_WRITE_TIMEOUT);
+
+ do {
+ write_time = jiffies;
+
+ err = regmap_bulk_write(regmap, offset, buf, size);
+ if (!err) {
+ ret = size;
+ break;
+ }
+
+ usleep_range(1000, 2000);
+ } while (time_before(write_time, timeout));
+
+ return ret;
+}
+
+static ssize_t m24lr_read(struct m24lr *m24lr, u8 *buf, size_t size,
+ unsigned int offset, bool is_eeprom)
+{
+ struct regmap *regmap;
+ ssize_t ret;
+
+ if (is_eeprom)
+ regmap = m24lr->eeprom_regmap;
+ else
+ regmap = m24lr->ctl_regmap;
+
+ mutex_lock(&m24lr->lock);
+ ret = m24lr_regmap_read(regmap, buf, size, offset);
+ mutex_unlock(&m24lr->lock);
+
+ return ret;
+}
+
+/**
+ * m24lr_write - write buffer to M24LR device with page alignment handling
+ * @m24lr: pointer to driver context
+ * @buf: data buffer to write
+ * @size: number of bytes to write
+ * @offset: target register address in the device
+ * @is_eeprom: true if the write should target the EEPROM,
+ * false if it should target the system parameters sector.
+ *
+ * Writes data to the M24LR device using regmap, split into chunks no larger
+ * than page_size to respect device-specific write limitations (e.g., page
+ * size or I2C hold-time concerns). Each chunk is aligned to the page boundary
+ * defined by page_size.
+ *
+ * Returns:
+ * Total number of bytes written on success,
+ * A negative error code if any write fails.
+ */
+static ssize_t m24lr_write(struct m24lr *m24lr, const u8 *buf, size_t size,
+ unsigned int offset, bool is_eeprom)
+{
+ unsigned int n, next_sector;
+ struct regmap *regmap;
+ ssize_t ret = 0;
+ ssize_t err;
+
+ if (is_eeprom)
+ regmap = m24lr->eeprom_regmap;
+ else
+ regmap = m24lr->ctl_regmap;
+
+ n = min_t(unsigned int, size, m24lr->page_size);
+ next_sector = roundup(offset + 1, m24lr->page_size);
+ if (offset + n > next_sector)
+ n = next_sector - offset;
+
+ mutex_lock(&m24lr->lock);
+ while (n) {
+ err = m24lr_regmap_write(regmap, buf + offset, n, offset);
+ if (IS_ERR_VALUE(err)) {
+ if (!ret)
+ ret = err;
+
+ break;
+ }
+
+ offset += n;
+ size -= n;
+ ret += n;
+ n = min_t(unsigned int, size, m24lr->page_size);
+ }
+ mutex_unlock(&m24lr->lock);
+
+ return ret;
+}
+
+/**
+ * m24lr_write_pass - Write password to M24LR043-R using secure format
+ * @m24lr: Pointer to device control structure
+ * @buf: Input buffer containing hex-encoded password
+ * @count: Number of bytes in @buf
+ * @code: Operation code to embed between password copies
+ *
+ * This function parses a 4-byte password, encodes it in big-endian format,
+ * and constructs a 9-byte sequence of the form:
+ *
+ * [BE(password), code, BE(password)]
+ *
+ * The result is written to register 0x0900 (2304), which is the password
+ * register in M24LR04E-R chip.
+ *
+ * Return: Number of bytes written on success, or negative error code on failure
+ */
+static ssize_t m24lr_write_pass(struct m24lr *m24lr, const char *buf,
+ size_t count, u8 code)
+{
+ __be32 be_pass;
+ u8 output[9];
+ ssize_t ret;
+ u32 pass;
+ int err;
+
+ if (!count)
+ return -EINVAL;
+
+ if (count > 8)
+ return -EINVAL;
+
+ err = kstrtou32(buf, 16, &pass);
+ if (err)
+ return err;
+
+ be_pass = cpu_to_be32(pass);
+
+ memcpy(output, &be_pass, sizeof(be_pass));
+ output[4] = code;
+ memcpy(output + 5, &be_pass, sizeof(be_pass));
+
+ mutex_lock(&m24lr->lock);
+ ret = m24lr_regmap_write(m24lr->ctl_regmap, output, 9, 2304);
+ mutex_unlock(&m24lr->lock);
+
+ return ret;
+}
+
+static ssize_t m24lr_read_reg_le(struct m24lr *m24lr, u64 *val,
+ unsigned int reg_addr,
+ unsigned int reg_size)
+{
+ ssize_t ret;
+ __le64 input = 0;
+
+ ret = m24lr_read(m24lr, (u8 *)&input, reg_size, reg_addr, false);
+ if (IS_ERR_VALUE(ret))
+ return ret;
+
+ if (ret != reg_size)
+ return -EINVAL;
+
+ switch (reg_size) {
+ case 1:
+ *val = *(u8 *)&input;
+ break;
+ case 2:
+ *val = le16_to_cpu((__le16)input);
+ break;
+ case 4:
+ *val = le32_to_cpu((__le32)input);
+ break;
+ case 8:
+ *val = le64_to_cpu((__le64)input);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int m24lr_nvmem_read(void *priv, unsigned int offset, void *val,
+ size_t bytes)
+{
+ ssize_t err;
+ struct m24lr *m24lr = priv;
+
+ if (!bytes)
+ return bytes;
+
+ if (offset + bytes > m24lr->eeprom_size)
+ return -EINVAL;
+
+ err = m24lr_read(m24lr, val, bytes, offset, true);
+ if (IS_ERR_VALUE(err))
+ return err;
+
+ return 0;
+}
+
+static int m24lr_nvmem_write(void *priv, unsigned int offset, void *val,
+ size_t bytes)
+{
+ ssize_t err;
+ struct m24lr *m24lr = priv;
+
+ if (!bytes)
+ return -EINVAL;
+
+ if (offset + bytes > m24lr->eeprom_size)
+ return -EINVAL;
+
+ err = m24lr_write(m24lr, val, bytes, offset, true);
+ if (IS_ERR_VALUE(err))
+ return err;
+
+ return 0;
+}
+
+static ssize_t m24lr_ctl_sss_read(struct file *filep, struct kobject *kobj,
+ const struct bin_attribute *attr, char *buf,
+ loff_t offset, size_t count)
+{
+ struct m24lr *m24lr = attr->private;
+
+ if (!count)
+ return count;
+
+ if (size_add(offset, count) > m24lr->sss_len)
+ return -EINVAL;
+
+ return m24lr_read(m24lr, buf, count, offset, false);
+}
+
+static ssize_t m24lr_ctl_sss_write(struct file *filep, struct kobject *kobj,
+ const struct bin_attribute *attr, char *buf,
+ loff_t offset, size_t count)
+{
+ struct m24lr *m24lr = attr->private;
+
+ if (!count)
+ return -EINVAL;
+
+ if (size_add(offset, count) > m24lr->sss_len)
+ return -EINVAL;
+
+ return m24lr_write(m24lr, buf, count, offset, false);
+}
+static BIN_ATTR(sss, 0600, m24lr_ctl_sss_read, m24lr_ctl_sss_write, 0);
+
+static ssize_t new_pass_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct m24lr *m24lr = i2c_get_clientdata(to_i2c_client(dev));
+
+ return m24lr_write_pass(m24lr, buf, count, 7);
+}
+static DEVICE_ATTR_WO(new_pass);
+
+static ssize_t unlock_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct m24lr *m24lr = i2c_get_clientdata(to_i2c_client(dev));
+
+ return m24lr_write_pass(m24lr, buf, count, 9);
+}
+static DEVICE_ATTR_WO(unlock);
+
+static ssize_t uid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct m24lr *m24lr = i2c_get_clientdata(to_i2c_client(dev));
+
+ return sysfs_emit(buf, "%llx\n", m24lr->uid);
+}
+static DEVICE_ATTR_RO(uid);
+
+static ssize_t total_sectors_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct m24lr *m24lr = i2c_get_clientdata(to_i2c_client(dev));
+
+ return sysfs_emit(buf, "%x\n", m24lr->sss_len);
+}
+static DEVICE_ATTR_RO(total_sectors);
+
+static struct attribute *m24lr_ctl_dev_attrs[] = {
+ &dev_attr_unlock.attr,
+ &dev_attr_new_pass.attr,
+ &dev_attr_uid.attr,
+ &dev_attr_total_sectors.attr,
+ NULL,
+};
+
+static const struct m24lr_chip *m24lr_get_chip(struct device *dev)
+{
+ const struct m24lr_chip *ret;
+ const struct i2c_device_id *id;
+
+ id = i2c_match_id(m24lr_ids, to_i2c_client(dev));
+
+ if (dev->of_node && of_match_device(m24lr_of_match, dev))
+ ret = of_device_get_match_data(dev);
+ else if (id)
+ ret = (void *)id->driver_data;
+ else
+ ret = acpi_device_get_match_data(dev);
+
+ return ret;
+}
+
+static int m24lr_probe(struct i2c_client *client)
+{
+ struct regmap_config eeprom_regmap_conf = {0};
+ struct nvmem_config nvmem_conf = {0};
+ struct device *dev = &client->dev;
+ struct i2c_client *eeprom_client;
+ const struct m24lr_chip *chip;
+ struct regmap *eeprom_regmap;
+ struct nvmem_device *nvmem;
+ struct regmap *ctl_regmap;
+ struct m24lr *m24lr;
+ u32 regs[2];
+ long err;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+ return -EOPNOTSUPP;
+
+ chip = m24lr_get_chip(dev);
+ if (!chip)
+ return -ENODEV;
+
+ m24lr = devm_kzalloc(dev, sizeof(struct m24lr), GFP_KERNEL);
+ if (!m24lr)
+ return -ENOMEM;
+
+ err = device_property_read_u32_array(dev, "reg", regs, ARRAY_SIZE(regs));
+ if (err)
+ return dev_err_probe(dev, err, "Failed to read 'reg' property\n");
+
+ /* Create a second I2C client for the eeprom interface */
+ eeprom_client = devm_i2c_new_dummy_device(dev, client->adapter, regs[1]);
+ if (IS_ERR(eeprom_client))
+ return dev_err_probe(dev, PTR_ERR(eeprom_client),
+ "Failed to create dummy I2C client for the EEPROM\n");
+
+ ctl_regmap = devm_regmap_init_i2c(client, &m24lr_ctl_regmap_conf);
+ if (IS_ERR(ctl_regmap))
+ return dev_err_probe(dev, PTR_ERR(ctl_regmap),
+ "Failed to init regmap\n");
+
+ eeprom_regmap_conf.name = "m24lr_eeprom";
+ eeprom_regmap_conf.reg_bits = 16;
+ eeprom_regmap_conf.val_bits = 8;
+ eeprom_regmap_conf.disable_locking = true;
+ eeprom_regmap_conf.max_register = chip->eeprom_size - 1;
+
+ eeprom_regmap = devm_regmap_init_i2c(eeprom_client,
+ &eeprom_regmap_conf);
+ if (IS_ERR(eeprom_regmap))
+ return dev_err_probe(dev, PTR_ERR(eeprom_regmap),
+ "Failed to init regmap\n");
+
+ mutex_init(&m24lr->lock);
+ m24lr->sss_len = chip->sss_len;
+ m24lr->page_size = chip->page_size;
+ m24lr->eeprom_size = chip->eeprom_size;
+ m24lr->eeprom_regmap = eeprom_regmap;
+ m24lr->ctl_regmap = ctl_regmap;
+
+ nvmem_conf.dev = &eeprom_client->dev;
+ nvmem_conf.owner = THIS_MODULE;
+ nvmem_conf.type = NVMEM_TYPE_EEPROM;
+ nvmem_conf.reg_read = m24lr_nvmem_read;
+ nvmem_conf.reg_write = m24lr_nvmem_write;
+ nvmem_conf.size = chip->eeprom_size;
+ nvmem_conf.word_size = 1;
+ nvmem_conf.stride = 1;
+ nvmem_conf.priv = m24lr;
+
+ nvmem = devm_nvmem_register(dev, &nvmem_conf);
+ if (IS_ERR(nvmem))
+ return dev_err_probe(dev, PTR_ERR(nvmem),
+ "Failed to register nvmem\n");
+
+ i2c_set_clientdata(client, m24lr);
+ i2c_set_clientdata(eeprom_client, m24lr);
+
+ bin_attr_sss.size = chip->sss_len;
+ bin_attr_sss.private = m24lr;
+ err = sysfs_create_bin_file(&dev->kobj, &bin_attr_sss);
+ if (err)
+ return dev_err_probe(dev, err,
+ "Failed to create sss bin file\n");
+
+ /* test by reading the uid, if success store it */
+ err = m24lr_read_reg_le(m24lr, &m24lr->uid, 2324, sizeof(m24lr->uid));
+ if (IS_ERR_VALUE(err))
+ goto remove_bin_file;
+
+ return 0;
+
+remove_bin_file:
+ sysfs_remove_bin_file(&dev->kobj, &bin_attr_sss);
+
+ return err;
+}
+
+static void m24lr_remove(struct i2c_client *client)
+{
+ sysfs_remove_bin_file(&client->dev.kobj, &bin_attr_sss);
+}
+
+ATTRIBUTE_GROUPS(m24lr_ctl_dev);
+
+static struct i2c_driver m24lr_driver = {
+ .driver = {
+ .name = "m24lr",
+ .of_match_table = m24lr_of_match,
+ .dev_groups = m24lr_ctl_dev_groups,
+ },
+ .probe = m24lr_probe,
+ .remove = m24lr_remove,
+ .id_table = m24lr_ids,
+};
+module_i2c_driver(m24lr_driver);
+
+MODULE_AUTHOR("Abd-Alrhman Masalkhi");
+MODULE_DESCRIPTION("st m24lr control driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index 53e88a1bc430..621bce7e101c 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -27,8 +27,7 @@
#define MDSP_DOMAIN_ID (1)
#define SDSP_DOMAIN_ID (2)
#define CDSP_DOMAIN_ID (3)
-#define CDSP1_DOMAIN_ID (4)
-#define FASTRPC_DEV_MAX 5 /* adsp, mdsp, slpi, cdsp, cdsp1 */
+#define GDSP_DOMAIN_ID (4)
#define FASTRPC_MAX_SESSIONS 14
#define FASTRPC_MAX_VMIDS 16
#define FASTRPC_ALIGN 128
@@ -106,8 +105,6 @@
#define miscdev_to_fdevice(d) container_of(d, struct fastrpc_device, miscdev)
-static const char *domains[FASTRPC_DEV_MAX] = { "adsp", "mdsp",
- "sdsp", "cdsp", "cdsp1" };
struct fastrpc_phy_page {
u64 addr; /* physical address */
u64 size; /* size of contiguous region */
@@ -323,11 +320,11 @@ static void fastrpc_free_map(struct kref *ref)
perm.vmid = QCOM_SCM_VMID_HLOS;
perm.perm = QCOM_SCM_PERM_RWX;
- err = qcom_scm_assign_mem(map->phys, map->size,
+ err = qcom_scm_assign_mem(map->phys, map->len,
&src_perms, &perm, 1);
if (err) {
dev_err(map->fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d\n",
- map->phys, map->size, err);
+ map->phys, map->len, err);
return;
}
}
@@ -363,26 +360,21 @@ static int fastrpc_map_get(struct fastrpc_map *map)
static int fastrpc_map_lookup(struct fastrpc_user *fl, int fd,
- struct fastrpc_map **ppmap, bool take_ref)
+ struct fastrpc_map **ppmap)
{
- struct fastrpc_session_ctx *sess = fl->sctx;
struct fastrpc_map *map = NULL;
+ struct dma_buf *buf;
int ret = -ENOENT;
+ buf = dma_buf_get(fd);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
spin_lock(&fl->lock);
list_for_each_entry(map, &fl->maps, node) {
- if (map->fd != fd)
+ if (map->fd != fd || map->buf != buf)
continue;
- if (take_ref) {
- ret = fastrpc_map_get(map);
- if (ret) {
- dev_dbg(sess->dev, "%s: Failed to get map fd=%d ret=%d\n",
- __func__, fd, ret);
- break;
- }
- }
-
*ppmap = map;
ret = 0;
break;
@@ -752,16 +744,14 @@ static const struct dma_buf_ops fastrpc_dma_buf_ops = {
.release = fastrpc_release,
};
-static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
+static int fastrpc_map_attach(struct fastrpc_user *fl, int fd,
u64 len, u32 attr, struct fastrpc_map **ppmap)
{
struct fastrpc_session_ctx *sess = fl->sctx;
struct fastrpc_map *map = NULL;
struct sg_table *table;
- int err = 0;
-
- if (!fastrpc_map_lookup(fl, fd, ppmap, true))
- return 0;
+ struct scatterlist *sgl = NULL;
+ int err = 0, sgl_index = 0;
map = kzalloc(sizeof(*map), GFP_KERNEL);
if (!map)
@@ -798,7 +788,15 @@ static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
map->phys = sg_dma_address(map->table->sgl);
map->phys += ((u64)fl->sctx->sid << 32);
}
- map->size = len;
+ for_each_sg(map->table->sgl, sgl, map->table->nents,
+ sgl_index)
+ map->size += sg_dma_len(sgl);
+ if (len > map->size) {
+ dev_dbg(sess->dev, "Bad size passed len 0x%llx map size 0x%llx\n",
+ len, map->size);
+ err = -EINVAL;
+ goto map_err;
+ }
map->va = sg_virt(map->table->sgl);
map->len = len;
@@ -815,10 +813,10 @@ static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
dst_perms[1].vmid = fl->cctx->vmperms[0].vmid;
dst_perms[1].perm = QCOM_SCM_PERM_RWX;
map->attr = attr;
- err = qcom_scm_assign_mem(map->phys, (u64)map->size, &src_perms, dst_perms, 2);
+ err = qcom_scm_assign_mem(map->phys, (u64)map->len, &src_perms, dst_perms, 2);
if (err) {
dev_err(sess->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d\n",
- map->phys, map->size, err);
+ map->phys, map->len, err);
goto map_err;
}
}
@@ -839,6 +837,24 @@ get_err:
return err;
}
+static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
+ u64 len, u32 attr, struct fastrpc_map **ppmap)
+{
+ struct fastrpc_session_ctx *sess = fl->sctx;
+ int err = 0;
+
+ if (!fastrpc_map_lookup(fl, fd, ppmap)) {
+ if (!fastrpc_map_get(*ppmap))
+ return 0;
+ dev_dbg(sess->dev, "%s: Failed to get map fd=%d\n",
+ __func__, fd);
+ }
+
+ err = fastrpc_map_attach(fl, fd, len, attr, ppmap);
+
+ return err;
+}
+
/*
* Fastrpc payload buffer with metadata looks like:
*
@@ -911,8 +927,12 @@ static int fastrpc_create_maps(struct fastrpc_invoke_ctx *ctx)
ctx->args[i].length == 0)
continue;
- err = fastrpc_map_create(ctx->fl, ctx->args[i].fd,
- ctx->args[i].length, ctx->args[i].attr, &ctx->maps[i]);
+ if (i < ctx->nbufs)
+ err = fastrpc_map_create(ctx->fl, ctx->args[i].fd,
+ ctx->args[i].length, ctx->args[i].attr, &ctx->maps[i]);
+ else
+ err = fastrpc_map_attach(ctx->fl, ctx->args[i].fd,
+ ctx->args[i].length, ctx->args[i].attr, &ctx->maps[i]);
if (err) {
dev_err(dev, "Error Creating map %d\n", err);
return -EINVAL;
@@ -1071,6 +1091,7 @@ static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx,
struct fastrpc_phy_page *pages;
u64 *fdlist;
int i, inbufs, outbufs, handles;
+ int ret = 0;
inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc);
@@ -1086,23 +1107,26 @@ static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx,
u64 len = rpra[i].buf.len;
if (!kernel) {
- if (copy_to_user((void __user *)dst, src, len))
- return -EFAULT;
+ if (copy_to_user((void __user *)dst, src, len)) {
+ ret = -EFAULT;
+ goto cleanup_fdlist;
+ }
} else {
memcpy(dst, src, len);
}
}
}
+cleanup_fdlist:
/* Clean up fdlist which is updated by DSP */
for (i = 0; i < FASTRPC_MAX_FDLIST; i++) {
if (!fdlist[i])
break;
- if (!fastrpc_map_lookup(fl, (int)fdlist[i], &mmap, false))
+ if (!fastrpc_map_lookup(fl, (int)fdlist[i], &mmap))
fastrpc_map_put(mmap);
}
- return 0;
+ return ret;
}
static int fastrpc_invoke_send(struct fastrpc_session_ctx *sctx,
@@ -1723,7 +1747,6 @@ static int fastrpc_get_info_from_kernel(struct fastrpc_ioctl_capability *cap,
uint32_t attribute_id = cap->attribute_id;
uint32_t *dsp_attributes;
unsigned long flags;
- uint32_t domain = cap->domain;
int err;
spin_lock_irqsave(&cctx->lock, flags);
@@ -1741,7 +1764,7 @@ static int fastrpc_get_info_from_kernel(struct fastrpc_ioctl_capability *cap,
err = fastrpc_get_info_from_dsp(fl, dsp_attributes, FASTRPC_MAX_DSP_ATTRIBUTES);
if (err == DSP_UNSUPPORTED_API) {
dev_info(&cctx->rpdev->dev,
- "Warning: DSP capabilities not supported on domain: %d\n", domain);
+ "Warning: DSP capabilities not supported\n");
kfree(dsp_attributes);
return -EOPNOTSUPP;
} else if (err) {
@@ -1769,17 +1792,6 @@ static int fastrpc_get_dsp_info(struct fastrpc_user *fl, char __user *argp)
return -EFAULT;
cap.capability = 0;
- if (cap.domain >= FASTRPC_DEV_MAX) {
- dev_err(&fl->cctx->rpdev->dev, "Error: Invalid domain id:%d, err:%d\n",
- cap.domain, err);
- return -ECHRNG;
- }
-
- /* Fastrpc Capablities does not support modem domain */
- if (cap.domain == MDSP_DOMAIN_ID) {
- dev_err(&fl->cctx->rpdev->dev, "Error: modem not supported %d\n", err);
- return -ECHRNG;
- }
if (cap.attribute_id >= FASTRPC_MAX_DSP_ATTRIBUTES) {
dev_err(&fl->cctx->rpdev->dev, "Error: invalid attribute: %d, err: %d\n",
@@ -2046,7 +2058,7 @@ static int fastrpc_req_mem_map(struct fastrpc_user *fl, char __user *argp)
args[0].length = sizeof(req_msg);
pages.addr = map->phys;
- pages.size = map->size;
+ pages.size = map->len;
args[1].ptr = (u64) (uintptr_t) &pages;
args[1].length = sizeof(pages);
@@ -2061,7 +2073,7 @@ static int fastrpc_req_mem_map(struct fastrpc_user *fl, char __user *argp)
err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc, &args[0]);
if (err) {
dev_err(dev, "mem mmap error, fd %d, vaddr %llx, size %lld\n",
- req.fd, req.vaddrin, map->size);
+ req.fd, req.vaddrin, map->len);
goto err_invoke;
}
@@ -2074,7 +2086,7 @@ static int fastrpc_req_mem_map(struct fastrpc_user *fl, char __user *argp)
if (copy_to_user((void __user *)argp, &req, sizeof(req))) {
/* unmap the memory and release the buffer */
req_unmap.vaddr = (uintptr_t) rsp_msg.vaddr;
- req_unmap.length = map->size;
+ req_unmap.length = map->len;
fastrpc_req_mem_unmap_impl(fl, &req_unmap);
return -EFAULT;
}
@@ -2255,6 +2267,22 @@ static int fastrpc_device_register(struct device *dev, struct fastrpc_channel_ct
return err;
}
+static int fastrpc_get_domain_id(const char *domain)
+{
+ if (!strncmp(domain, "adsp", 4))
+ return ADSP_DOMAIN_ID;
+ else if (!strncmp(domain, "cdsp", 4))
+ return CDSP_DOMAIN_ID;
+ else if (!strncmp(domain, "mdsp", 4))
+ return MDSP_DOMAIN_ID;
+ else if (!strncmp(domain, "sdsp", 4))
+ return SDSP_DOMAIN_ID;
+ else if (!strncmp(domain, "gdsp", 4))
+ return GDSP_DOMAIN_ID;
+
+ return -EINVAL;
+}
+
static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
{
struct device *rdev = &rpdev->dev;
@@ -2270,15 +2298,10 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
return err;
}
- for (i = 0; i < FASTRPC_DEV_MAX; i++) {
- if (!strcmp(domains[i], domain)) {
- domain_id = i;
- break;
- }
- }
+ domain_id = fastrpc_get_domain_id(domain);
if (domain_id < 0) {
- dev_info(rdev, "FastRPC Invalid Domain ID %d\n", domain_id);
+ dev_info(rdev, "FastRPC Domain %s not supported\n", domain);
return -EINVAL;
}
@@ -2325,21 +2348,21 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
case ADSP_DOMAIN_ID:
case MDSP_DOMAIN_ID:
case SDSP_DOMAIN_ID:
- /* Unsigned PD offloading is only supported on CDSP and CDSP1 */
+ /* Unsigned PD offloading is only supported on CDSP and GDSP */
data->unsigned_support = false;
- err = fastrpc_device_register(rdev, data, secure_dsp, domains[domain_id]);
+ err = fastrpc_device_register(rdev, data, secure_dsp, domain);
if (err)
goto err_free_data;
break;
case CDSP_DOMAIN_ID:
- case CDSP1_DOMAIN_ID:
+ case GDSP_DOMAIN_ID:
data->unsigned_support = true;
/* Create both device nodes so that we can allow both Signed and Unsigned PD */
- err = fastrpc_device_register(rdev, data, true, domains[domain_id]);
+ err = fastrpc_device_register(rdev, data, true, domain);
if (err)
goto err_free_data;
- err = fastrpc_device_register(rdev, data, false, domains[domain_id]);
+ err = fastrpc_device_register(rdev, data, false, domain);
if (err)
goto err_deregister_fdev;
break;
diff --git a/drivers/misc/genwqe/card_ddcb.c b/drivers/misc/genwqe/card_ddcb.c
index 500b1feaf1f6..fd7d5cd50d39 100644
--- a/drivers/misc/genwqe/card_ddcb.c
+++ b/drivers/misc/genwqe/card_ddcb.c
@@ -923,7 +923,7 @@ int __genwqe_execute_raw_ddcb(struct genwqe_dev *cd,
}
if (cmd->asv_length > DDCB_ASV_LENGTH) {
dev_err(&pci_dev->dev, "[%s] err: wrong asv_length of %d\n",
- __func__, cmd->asiv_length);
+ __func__, cmd->asv_length);
return -EINVAL;
}
rc = __genwqe_enqueue_ddcb(cd, req, f_flags);
diff --git a/drivers/misc/hisi_hikey_usb.c b/drivers/misc/hisi_hikey_usb.c
index ffe7b945a298..2c6e448a47f1 100644
--- a/drivers/misc/hisi_hikey_usb.c
+++ b/drivers/misc/hisi_hikey_usb.c
@@ -18,6 +18,7 @@
#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/usb/role.h>
#define DEVICE_DRIVER_NAME "hisi_hikey_usb"
@@ -67,7 +68,7 @@ static void hub_power_ctrl(struct hisi_hikey_usb *hisi_hikey_usb, int value)
if (ret)
dev_err(hisi_hikey_usb->dev,
"Can't switch regulator state to %s\n",
- value ? "enabled" : "disabled");
+ str_enabled_disabled(value));
}
static void usb_switch_ctrl(struct hisi_hikey_usb *hisi_hikey_usb,
diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c
index c44de892a61e..b26c930e3edb 100644
--- a/drivers/misc/ibmasm/ibmasmfs.c
+++ b/drivers/misc/ibmasm/ibmasmfs.c
@@ -94,7 +94,7 @@ static int ibmasmfs_init_fs_context(struct fs_context *fc)
static const struct super_operations ibmasmfs_s_ops = {
.statfs = simple_statfs,
- .drop_inode = generic_delete_inode,
+ .drop_inode = inode_just_drop,
};
static const struct file_operations *ibmasmfs_dir_ops = &simple_dir_operations;
@@ -525,15 +525,9 @@ static ssize_t remote_settings_file_write(struct file *file, const char __user *
if (*offset != 0)
return 0;
- buff = kzalloc (count + 1, GFP_KERNEL);
- if (!buff)
- return -ENOMEM;
-
-
- if (copy_from_user(buff, ubuff, count)) {
- kfree(buff);
- return -EFAULT;
- }
+ buff = memdup_user_nul(ubuff, count);
+ if (IS_ERR(buff))
+ return PTR_ERR(buff);
value = simple_strtoul(buff, NULL, 10);
writel(value, address);
diff --git a/drivers/misc/lis3lv02d/Kconfig b/drivers/misc/lis3lv02d/Kconfig
index 56005243a230..9d546a42a563 100644
--- a/drivers/misc/lis3lv02d/Kconfig
+++ b/drivers/misc/lis3lv02d/Kconfig
@@ -4,7 +4,7 @@
#
config SENSORS_LIS3_SPI
- tristate "STMicroeletronics LIS3LV02Dx three-axis digital accelerometer (SPI)"
+ tristate "STMicroelectronics LIS3LV02Dx three-axis digital accelerometer (SPI)"
depends on !ACPI && SPI_MASTER && INPUT
select SENSORS_LIS3LV02D
help
@@ -20,7 +20,7 @@ config SENSORS_LIS3_SPI
is called lis3lv02d_spi.
config SENSORS_LIS3_I2C
- tristate "STMicroeletronics LIS3LV02Dx three-axis digital accelerometer (I2C)"
+ tristate "STMicroelectronics LIS3LV02Dx three-axis digital accelerometer (I2C)"
depends on I2C && INPUT
select SENSORS_LIS3LV02D
help
diff --git a/drivers/misc/lkdtm/cfi.c b/drivers/misc/lkdtm/cfi.c
index 6a33889d0902..c3971f7caa65 100644
--- a/drivers/misc/lkdtm/cfi.c
+++ b/drivers/misc/lkdtm/cfi.c
@@ -43,7 +43,7 @@ static void lkdtm_CFI_FORWARD_PROTO(void)
lkdtm_indirect_call((void *)lkdtm_increment_int);
pr_err("FAIL: survived mismatched prototype function call!\n");
- pr_expected_config(CONFIG_CFI_CLANG);
+ pr_expected_config(CONFIG_CFI);
}
/*
diff --git a/drivers/misc/lkdtm/fortify.c b/drivers/misc/lkdtm/fortify.c
index 015927665678..00ed2147113e 100644
--- a/drivers/misc/lkdtm/fortify.c
+++ b/drivers/misc/lkdtm/fortify.c
@@ -44,6 +44,9 @@ static void lkdtm_FORTIFY_STR_MEMBER(void)
char *src;
src = kmalloc(size, GFP_KERNEL);
+ if (!src)
+ return;
+
strscpy(src, "over ten bytes", size);
size = strlen(src) + 1;
@@ -109,6 +112,9 @@ static void lkdtm_FORTIFY_MEM_MEMBER(void)
char *src;
src = kmalloc(size, GFP_KERNEL);
+ if (!src)
+ return;
+
strscpy(src, "over ten bytes", size);
size = strlen(src) + 1;
diff --git a/drivers/misc/lkdtm/perms.c b/drivers/misc/lkdtm/perms.c
index 6c24426104ba..e1f5e9abb301 100644
--- a/drivers/misc/lkdtm/perms.c
+++ b/drivers/misc/lkdtm/perms.c
@@ -9,6 +9,7 @@
#include <linux/vmalloc.h>
#include <linux/mman.h>
#include <linux/uaccess.h>
+#include <linux/objtool.h>
#include <asm/cacheflush.h>
#include <asm/sections.h>
@@ -86,6 +87,10 @@ static noinline __nocfi void execute_location(void *dst, bool write)
func();
pr_err("FAIL: func returned\n");
}
+/*
+ * Explicitly doing the wrong thing for testing.
+ */
+ANNOTATE_NOCFI_SYM(execute_location);
static void execute_user_location(void *dst)
{
diff --git a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c
index ff8f4404d10f..8eddbaa1fccd 100644
--- a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c
+++ b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c
@@ -438,7 +438,7 @@ static int pci1xxxx_gpio_setup(struct pci1xxxx_gpio *priv, int irq)
gchip->direction_output = pci1xxxx_gpio_direction_output;
gchip->get_direction = pci1xxxx_gpio_get_direction;
gchip->get = pci1xxxx_gpio_get;
- gchip->set_rv = pci1xxxx_gpio_set;
+ gchip->set = pci1xxxx_gpio_set;
gchip->set_config = pci1xxxx_gpio_set_config;
gchip->dbg_show = NULL;
gchip->base = -1;
diff --git a/drivers/misc/mei/Kconfig b/drivers/misc/mei/Kconfig
index 7575fee96cc6..f8b04e49e4ba 100644
--- a/drivers/misc/mei/Kconfig
+++ b/drivers/misc/mei/Kconfig
@@ -81,6 +81,19 @@ config INTEL_MEI_VSC
This driver can also be built as a module. If so, the module
will be called mei-vsc.
+config INTEL_MEI_LB
+ tristate "Intel Late Binding (LB) support on ME Interface"
+ depends on INTEL_MEI_ME
+ depends on DRM_XE
+ help
+ Enable support for Intel Late Binding (LB) via the MEI interface.
+
+ Late Binding is a method for applying firmware updates at runtime,
+ allowing the Intel Xe driver to load firmware payloads such as
+ fan controller or voltage regulator. These firmware updates are
+ authenticated and versioned, and do not require firmware flashing
+ or system reboot.
+
source "drivers/misc/mei/hdcp/Kconfig"
source "drivers/misc/mei/pxp/Kconfig"
source "drivers/misc/mei/gsc_proxy/Kconfig"
diff --git a/drivers/misc/mei/Makefile b/drivers/misc/mei/Makefile
index 6f9fdbf1a495..a203ed766b33 100644
--- a/drivers/misc/mei/Makefile
+++ b/drivers/misc/mei/Makefile
@@ -31,6 +31,7 @@ CFLAGS_mei-trace.o = -I$(src)
obj-$(CONFIG_INTEL_MEI_HDCP) += hdcp/
obj-$(CONFIG_INTEL_MEI_PXP) += pxp/
obj-$(CONFIG_INTEL_MEI_GSC_PROXY) += gsc_proxy/
+obj-$(CONFIG_INTEL_MEI_LB) += mei_lb.o
obj-$(CONFIG_INTEL_MEI_VSC_HW) += mei-vsc-hw.o
mei-vsc-hw-y := vsc-tp.o
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
index 90dba20b2de7..e6a1d3534663 100644
--- a/drivers/misc/mei/bus-fixup.c
+++ b/drivers/misc/mei/bus-fixup.c
@@ -386,7 +386,7 @@ static int mei_nfc_if_version(struct mei_cl *cl,
ret = __mei_cl_send(cl, (u8 *)&cmd, sizeof(cmd), 0,
MEI_CL_IO_TX_BLOCKING);
if (ret < 0) {
- dev_err(bus->dev, "Could not send IF version cmd ret = %d\n", ret);
+ dev_err(&bus->dev, "Could not send IF version cmd ret = %d\n", ret);
return ret;
}
@@ -401,14 +401,14 @@ static int mei_nfc_if_version(struct mei_cl *cl,
bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length, &vtag,
0, 0);
if (bytes_recv < 0 || (size_t)bytes_recv < if_version_length) {
- dev_err(bus->dev, "Could not read IF version ret = %d\n", bytes_recv);
+ dev_err(&bus->dev, "Could not read IF version ret = %d\n", bytes_recv);
ret = -EIO;
goto err;
}
memcpy(ver, reply->data, sizeof(*ver));
- dev_info(bus->dev, "NFC MEI VERSION: IVN 0x%x Vendor ID 0x%x Type 0x%x\n",
+ dev_info(&bus->dev, "NFC MEI VERSION: IVN 0x%x Vendor ID 0x%x Type 0x%x\n",
ver->fw_ivn, ver->vendor_id, ver->radio_type);
err:
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 5cc3ad07d5be..2c810ab12e62 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -615,6 +615,19 @@ u8 mei_cldev_ver(const struct mei_cl_device *cldev)
EXPORT_SYMBOL_GPL(mei_cldev_ver);
/**
+ * mei_cldev_mtu - max message that client can send and receive
+ *
+ * @cldev: mei client device
+ *
+ * Return: mtu or 0 if client is not connected
+ */
+size_t mei_cldev_mtu(const struct mei_cl_device *cldev)
+{
+ return mei_cl_mtu(cldev->cl);
+}
+EXPORT_SYMBOL_GPL(mei_cldev_mtu);
+
+/**
* mei_cldev_enabled - check whether the device is enabled
*
* @cldev: mei client device
@@ -637,7 +650,7 @@ EXPORT_SYMBOL_GPL(mei_cldev_enabled);
*/
static bool mei_cl_bus_module_get(struct mei_cl_device *cldev)
{
- return try_module_get(cldev->bus->dev->driver->owner);
+ return try_module_get(cldev->bus->parent->driver->owner);
}
/**
@@ -647,7 +660,7 @@ static bool mei_cl_bus_module_get(struct mei_cl_device *cldev)
*/
static void mei_cl_bus_module_put(struct mei_cl_device *cldev)
{
- module_put(cldev->bus->dev->driver->owner);
+ module_put(cldev->bus->parent->driver->owner);
}
/**
@@ -814,7 +827,7 @@ int mei_cldev_enable(struct mei_cl_device *cldev)
ret = mei_cl_connect(cl, cldev->me_cl, NULL);
if (ret < 0) {
- dev_err(&cldev->dev, "cannot connect\n");
+ dev_dbg(&cldev->dev, "cannot connect\n");
mei_cl_bus_vtag_free(cldev);
}
@@ -1285,16 +1298,20 @@ static const struct bus_type mei_cl_bus_type = {
static struct mei_device *mei_dev_bus_get(struct mei_device *bus)
{
- if (bus)
- get_device(bus->dev);
+ if (bus) {
+ get_device(&bus->dev);
+ get_device(bus->parent);
+ }
return bus;
}
static void mei_dev_bus_put(struct mei_device *bus)
{
- if (bus)
- put_device(bus->dev);
+ if (bus) {
+ put_device(bus->parent);
+ put_device(&bus->dev);
+ }
}
static void mei_cl_bus_dev_release(struct device *dev)
@@ -1328,7 +1345,7 @@ static const struct device_type mei_cl_device_type = {
static inline void mei_cl_bus_set_name(struct mei_cl_device *cldev)
{
dev_set_name(&cldev->dev, "%s-%pUl",
- dev_name(cldev->bus->dev),
+ dev_name(cldev->bus->parent),
mei_me_cl_uuid(cldev->me_cl));
}
@@ -1357,7 +1374,7 @@ static struct mei_cl_device *mei_cl_bus_dev_alloc(struct mei_device *bus,
}
device_initialize(&cldev->dev);
- cldev->dev.parent = bus->dev;
+ cldev->dev.parent = bus->parent;
cldev->dev.bus = &mei_cl_bus_type;
cldev->dev.type = &mei_cl_device_type;
cldev->bus = mei_dev_bus_get(bus);
@@ -1492,7 +1509,7 @@ static void mei_cl_bus_dev_init(struct mei_device *bus,
WARN_ON(!mutex_is_locked(&bus->cl_bus_lock));
- dev_dbg(bus->dev, "initializing %pUl", mei_me_cl_uuid(me_cl));
+ dev_dbg(&bus->dev, "initializing %pUl", mei_me_cl_uuid(me_cl));
if (me_cl->bus_added)
return;
@@ -1543,7 +1560,7 @@ static void mei_cl_bus_rescan(struct mei_device *bus)
}
mutex_unlock(&bus->cl_bus_lock);
- dev_dbg(bus->dev, "rescan end");
+ dev_dbg(&bus->dev, "rescan end");
}
void mei_cl_bus_rescan_work(struct work_struct *work)
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 3db07d2a881f..159e8b841564 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -262,7 +262,7 @@ void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid)
{
struct mei_me_client *me_cl;
- dev_dbg(dev->dev, "remove %pUl\n", uuid);
+ dev_dbg(&dev->dev, "remove %pUl\n", uuid);
down_write(&dev->me_clients_rwsem);
me_cl = __mei_me_cl_by_uuid(dev, uuid);
@@ -635,12 +635,12 @@ int mei_cl_link(struct mei_cl *cl)
id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX);
if (id >= MEI_CLIENTS_MAX) {
- dev_err(dev->dev, "id exceeded %d", MEI_CLIENTS_MAX);
+ dev_err(&dev->dev, "id exceeded %d", MEI_CLIENTS_MAX);
return -EMFILE;
}
if (dev->open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) {
- dev_err(dev->dev, "open_handle_count exceeded %d",
+ dev_err(&dev->dev, "open_handle_count exceeded %d",
MEI_MAX_OPEN_HANDLE_COUNT);
return -EMFILE;
}
@@ -709,9 +709,9 @@ void mei_host_client_init(struct mei_device *dev)
schedule_work(&dev->bus_rescan_work);
- pm_runtime_mark_last_busy(dev->dev);
- dev_dbg(dev->dev, "rpm: autosuspend\n");
- pm_request_autosuspend(dev->dev);
+ pm_runtime_mark_last_busy(dev->parent);
+ dev_dbg(&dev->dev, "rpm: autosuspend\n");
+ pm_request_autosuspend(dev->parent);
}
/**
@@ -724,12 +724,12 @@ bool mei_hbuf_acquire(struct mei_device *dev)
{
if (mei_pg_state(dev) == MEI_PG_ON ||
mei_pg_in_transition(dev)) {
- dev_dbg(dev->dev, "device is in pg\n");
+ dev_dbg(&dev->dev, "device is in pg\n");
return false;
}
if (!dev->hbuf_is_ready) {
- dev_dbg(dev->dev, "hbuf is not ready\n");
+ dev_dbg(&dev->dev, "hbuf is not ready\n");
return false;
}
@@ -981,9 +981,9 @@ int mei_cl_disconnect(struct mei_cl *cl)
return 0;
}
- rets = pm_runtime_get(dev->dev);
+ rets = pm_runtime_get(dev->parent);
if (rets < 0 && rets != -EINPROGRESS) {
- pm_runtime_put_noidle(dev->dev);
+ pm_runtime_put_noidle(dev->parent);
cl_err(dev, cl, "rpm: get failed %d\n", rets);
return rets;
}
@@ -991,8 +991,8 @@ int mei_cl_disconnect(struct mei_cl *cl)
rets = __mei_cl_disconnect(cl);
cl_dbg(dev, cl, "rpm: autosuspend\n");
- pm_runtime_mark_last_busy(dev->dev);
- pm_runtime_put_autosuspend(dev->dev);
+ pm_runtime_mark_last_busy(dev->parent);
+ pm_runtime_put_autosuspend(dev->parent);
return rets;
}
@@ -1118,9 +1118,9 @@ int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
goto nortpm;
}
- rets = pm_runtime_get(dev->dev);
+ rets = pm_runtime_get(dev->parent);
if (rets < 0 && rets != -EINPROGRESS) {
- pm_runtime_put_noidle(dev->dev);
+ pm_runtime_put_noidle(dev->parent);
cl_err(dev, cl, "rpm: get failed %d\n", rets);
goto nortpm;
}
@@ -1167,8 +1167,8 @@ int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
rets = cl->status;
out:
cl_dbg(dev, cl, "rpm: autosuspend\n");
- pm_runtime_mark_last_busy(dev->dev);
- pm_runtime_put_autosuspend(dev->dev);
+ pm_runtime_mark_last_busy(dev->parent);
+ pm_runtime_put_autosuspend(dev->parent);
mei_io_cb_free(cb);
@@ -1517,9 +1517,9 @@ int mei_cl_notify_request(struct mei_cl *cl,
if (!mei_cl_is_connected(cl))
return -ENODEV;
- rets = pm_runtime_get(dev->dev);
+ rets = pm_runtime_get(dev->parent);
if (rets < 0 && rets != -EINPROGRESS) {
- pm_runtime_put_noidle(dev->dev);
+ pm_runtime_put_noidle(dev->parent);
cl_err(dev, cl, "rpm: get failed %d\n", rets);
return rets;
}
@@ -1554,8 +1554,8 @@ int mei_cl_notify_request(struct mei_cl *cl,
out:
cl_dbg(dev, cl, "rpm: autosuspend\n");
- pm_runtime_mark_last_busy(dev->dev);
- pm_runtime_put_autosuspend(dev->dev);
+ pm_runtime_mark_last_busy(dev->parent);
+ pm_runtime_put_autosuspend(dev->parent);
mei_io_cb_free(cb);
return rets;
@@ -1683,9 +1683,9 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp)
mei_cl_set_read_by_fp(cl, fp);
- rets = pm_runtime_get(dev->dev);
+ rets = pm_runtime_get(dev->parent);
if (rets < 0 && rets != -EINPROGRESS) {
- pm_runtime_put_noidle(dev->dev);
+ pm_runtime_put_noidle(dev->parent);
cl_err(dev, cl, "rpm: get failed %d\n", rets);
goto nortpm;
}
@@ -1702,8 +1702,8 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp)
out:
cl_dbg(dev, cl, "rpm: autosuspend\n");
- pm_runtime_mark_last_busy(dev->dev);
- pm_runtime_put_autosuspend(dev->dev);
+ pm_runtime_mark_last_busy(dev->parent);
+ pm_runtime_put_autosuspend(dev->parent);
nortpm:
if (rets)
mei_io_cb_free(cb);
@@ -1972,9 +1972,9 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, unsigned long time
blocking = cb->blocking;
data = buf->data;
- rets = pm_runtime_get(dev->dev);
+ rets = pm_runtime_get(dev->parent);
if (rets < 0 && rets != -EINPROGRESS) {
- pm_runtime_put_noidle(dev->dev);
+ pm_runtime_put_noidle(dev->parent);
cl_err(dev, cl, "rpm: get failed %zd\n", rets);
goto free;
}
@@ -2092,8 +2092,8 @@ out:
rets = buf_len;
err:
cl_dbg(dev, cl, "rpm: autosuspend\n");
- pm_runtime_mark_last_busy(dev->dev);
- pm_runtime_put_autosuspend(dev->dev);
+ pm_runtime_mark_last_busy(dev->parent);
+ pm_runtime_put_autosuspend(dev->parent);
free:
mei_io_cb_free(cb);
@@ -2119,8 +2119,8 @@ void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
if (waitqueue_active(&cl->tx_wait)) {
wake_up_interruptible(&cl->tx_wait);
} else {
- pm_runtime_mark_last_busy(dev->dev);
- pm_request_autosuspend(dev->dev);
+ pm_runtime_mark_last_busy(dev->parent);
+ pm_request_autosuspend(dev->parent);
}
break;
@@ -2251,7 +2251,7 @@ int mei_cl_irq_dma_unmap(struct mei_cl *cl, struct mei_cl_cb *cb,
static int mei_cl_dma_alloc(struct mei_cl *cl, u8 buf_id, size_t size)
{
- cl->dma.vaddr = dmam_alloc_coherent(cl->dev->dev, size,
+ cl->dma.vaddr = dmam_alloc_coherent(&cl->dev->dev, size,
&cl->dma.daddr, GFP_KERNEL);
if (!cl->dma.vaddr)
return -ENOMEM;
@@ -2265,7 +2265,7 @@ static int mei_cl_dma_alloc(struct mei_cl *cl, u8 buf_id, size_t size)
static void mei_cl_dma_free(struct mei_cl *cl)
{
cl->dma.buffer_id = 0;
- dmam_free_coherent(cl->dev->dev,
+ dmam_free_coherent(&cl->dev->dev,
cl->dma.size, cl->dma.vaddr, cl->dma.daddr);
cl->dma.size = 0;
cl->dma.vaddr = NULL;
@@ -2321,16 +2321,16 @@ int mei_cl_dma_alloc_and_map(struct mei_cl *cl, const struct file *fp,
return -EPROTO;
}
- rets = pm_runtime_get(dev->dev);
+ rets = pm_runtime_get(dev->parent);
if (rets < 0 && rets != -EINPROGRESS) {
- pm_runtime_put_noidle(dev->dev);
+ pm_runtime_put_noidle(dev->parent);
cl_err(dev, cl, "rpm: get failed %d\n", rets);
return rets;
}
rets = mei_cl_dma_alloc(cl, buffer_id, size);
if (rets) {
- pm_runtime_put_noidle(dev->dev);
+ pm_runtime_put_noidle(dev->parent);
return rets;
}
@@ -2366,8 +2366,8 @@ out:
mei_cl_dma_free(cl);
cl_dbg(dev, cl, "rpm: autosuspend\n");
- pm_runtime_mark_last_busy(dev->dev);
- pm_runtime_put_autosuspend(dev->dev);
+ pm_runtime_mark_last_busy(dev->parent);
+ pm_runtime_put_autosuspend(dev->parent);
mei_io_cb_free(cb);
return rets;
@@ -2406,9 +2406,9 @@ int mei_cl_dma_unmap(struct mei_cl *cl, const struct file *fp)
if (!cl->dma_mapped)
return -EPROTO;
- rets = pm_runtime_get(dev->dev);
+ rets = pm_runtime_get(dev->parent);
if (rets < 0 && rets != -EINPROGRESS) {
- pm_runtime_put_noidle(dev->dev);
+ pm_runtime_put_noidle(dev->parent);
cl_err(dev, cl, "rpm: get failed %d\n", rets);
return rets;
}
@@ -2444,8 +2444,8 @@ int mei_cl_dma_unmap(struct mei_cl *cl, const struct file *fp)
mei_cl_dma_free(cl);
out:
cl_dbg(dev, cl, "rpm: autosuspend\n");
- pm_runtime_mark_last_busy(dev->dev);
- pm_runtime_put_autosuspend(dev->dev);
+ pm_runtime_mark_last_busy(dev->parent);
+ pm_runtime_put_autosuspend(dev->parent);
mei_io_cb_free(cb);
return rets;
diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h
index 01ed26a148c4..031114478bcb 100644
--- a/drivers/misc/mei/client.h
+++ b/drivers/misc/mei/client.h
@@ -275,12 +275,12 @@ int mei_cl_dma_unmap(struct mei_cl *cl, const struct file *fp);
#define MEI_CL_PRM(cl) (cl)->host_client_id, mei_cl_me_id(cl)
#define cl_dbg(dev, cl, format, arg...) \
- dev_dbg((dev)->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg)
+ dev_dbg(&(dev)->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg)
#define cl_warn(dev, cl, format, arg...) \
- dev_warn((dev)->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg)
+ dev_warn(&(dev)->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg)
#define cl_err(dev, cl, format, arg...) \
- dev_err((dev)->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg)
+ dev_err(&(dev)->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg)
#endif /* _MEI_CLIENT_H_ */
diff --git a/drivers/misc/mei/dma-ring.c b/drivers/misc/mei/dma-ring.c
index 651e77ef82bd..6277c4a5b0fd 100644
--- a/drivers/misc/mei/dma-ring.c
+++ b/drivers/misc/mei/dma-ring.c
@@ -30,7 +30,7 @@ static int mei_dmam_dscr_alloc(struct mei_device *dev,
if (dscr->vaddr)
return 0;
- dscr->vaddr = dmam_alloc_coherent(dev->dev, dscr->size, &dscr->daddr,
+ dscr->vaddr = dmam_alloc_coherent(dev->parent, dscr->size, &dscr->daddr,
GFP_KERNEL);
if (!dscr->vaddr)
return -ENOMEM;
@@ -50,7 +50,7 @@ static void mei_dmam_dscr_free(struct mei_device *dev,
if (!dscr->vaddr)
return;
- dmam_free_coherent(dev->dev, dscr->size, dscr->vaddr, dscr->daddr);
+ dmam_free_coherent(dev->parent, dscr->size, dscr->vaddr, dscr->daddr);
dscr->vaddr = NULL;
}
@@ -177,7 +177,7 @@ void mei_dma_ring_read(struct mei_device *dev, unsigned char *buf, u32 len)
if (WARN_ON(!ctrl))
return;
- dev_dbg(dev->dev, "reading from dma %u bytes\n", len);
+ dev_dbg(&dev->dev, "reading from dma %u bytes\n", len);
if (!len)
return;
@@ -254,7 +254,7 @@ void mei_dma_ring_write(struct mei_device *dev, unsigned char *buf, u32 len)
if (WARN_ON(!ctrl))
return;
- dev_dbg(dev->dev, "writing to dma %u bytes\n", len);
+ dev_dbg(&dev->dev, "writing to dma %u bytes\n", len);
hbuf_depth = mei_dma_ring_hbuf_depth(dev);
wr_idx = READ_ONCE(ctrl->hbuf_wr_idx) & (hbuf_depth - 1);
slots = mei_data2slots(len);
diff --git a/drivers/misc/mei/gsc-me.c b/drivers/misc/mei/gsc-me.c
index 5a8c26c3df13..93cba090ea08 100644
--- a/drivers/misc/mei/gsc-me.c
+++ b/drivers/misc/mei/gsc-me.c
@@ -106,11 +106,15 @@ static int mei_gsc_probe(struct auxiliary_device *aux_dev,
}
}
+ ret = mei_register(dev, device);
+ if (ret)
+ goto deinterrupt;
+
pm_runtime_get_noresume(device);
pm_runtime_set_active(device);
pm_runtime_enable(device);
- /* Continue to char device setup in spite of firmware handshake failure.
+ /* Continue in spite of firmware handshake failure.
* In order to provide access to the firmware status registers to the user
* space via sysfs.
*/
@@ -120,18 +124,12 @@ static int mei_gsc_probe(struct auxiliary_device *aux_dev,
pm_runtime_set_autosuspend_delay(device, MEI_GSC_RPM_TIMEOUT);
pm_runtime_use_autosuspend(device);
- ret = mei_register(dev, device);
- if (ret)
- goto register_err;
-
pm_runtime_put_noidle(device);
return 0;
-register_err:
- mei_stop(dev);
+deinterrupt:
if (!mei_me_hw_use_polling(hw))
devm_free_irq(device, hw->irq, dev);
-
err:
dev_err(device, "probe failed: %d\n", ret);
dev_set_drvdata(device, NULL);
@@ -152,13 +150,13 @@ static void mei_gsc_remove(struct auxiliary_device *aux_dev)
if (mei_me_hw_use_polling(hw))
kthread_stop(hw->polling_thread);
- mei_deregister(dev);
-
pm_runtime_disable(&aux_dev->dev);
mei_disable_interrupts(dev);
if (!mei_me_hw_use_polling(hw))
devm_free_irq(&aux_dev->dev, hw->irq, dev);
+
+ mei_deregister(dev);
}
static int __maybe_unused mei_gsc_pm_suspend(struct device *device)
@@ -252,7 +250,7 @@ static int __maybe_unused mei_gsc_pm_runtime_resume(struct device *device)
irq_ret = mei_me_irq_thread_handler(1, dev);
if (irq_ret != IRQ_HANDLED)
- dev_err(dev->dev, "thread handler fail %d\n", irq_ret);
+ dev_err(&dev->dev, "thread handler fail %d\n", irq_ret);
return 0;
}
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index 4fe9a2752d43..ccd9df5d1c7d 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -239,7 +239,7 @@ int mei_hbm_start_wait(struct mei_device *dev)
if (ret == 0 && (dev->hbm_state <= MEI_HBM_STARTING)) {
dev->hbm_state = MEI_HBM_IDLE;
- dev_err(dev->dev, "waiting for mei start failed\n");
+ dev_err(&dev->dev, "waiting for mei start failed\n");
return -ETIME;
}
return 0;
@@ -271,8 +271,7 @@ int mei_hbm_start_req(struct mei_device *dev)
dev->hbm_state = MEI_HBM_IDLE;
ret = mei_hbm_write_message(dev, &mei_hdr, &req);
if (ret) {
- dev_err(dev->dev, "version message write failed: ret = %d\n",
- ret);
+ dev_err(&dev->dev, "version message write failed: ret = %d\n", ret);
return ret;
}
@@ -312,8 +311,7 @@ static int mei_hbm_dma_setup_req(struct mei_device *dev)
ret = mei_hbm_write_message(dev, &mei_hdr, &req);
if (ret) {
- dev_err(dev->dev, "dma setup request write failed: ret = %d.\n",
- ret);
+ dev_err(&dev->dev, "dma setup request write failed: ret = %d.\n", ret);
return ret;
}
@@ -351,8 +349,7 @@ static int mei_hbm_capabilities_req(struct mei_device *dev)
ret = mei_hbm_write_message(dev, &mei_hdr, &req);
if (ret) {
- dev_err(dev->dev,
- "capabilities request write failed: ret = %d.\n", ret);
+ dev_err(&dev->dev, "capabilities request write failed: ret = %d.\n", ret);
return ret;
}
@@ -386,8 +383,7 @@ static int mei_hbm_enum_clients_req(struct mei_device *dev)
ret = mei_hbm_write_message(dev, &mei_hdr, &req);
if (ret) {
- dev_err(dev->dev, "enumeration request write failed: ret = %d.\n",
- ret);
+ dev_err(&dev->dev, "enumeration request write failed: ret = %d.\n", ret);
return ret;
}
dev->hbm_state = MEI_HBM_ENUM_CLIENTS;
@@ -443,7 +439,7 @@ static int mei_hbm_add_cl_resp(struct mei_device *dev, u8 addr, u8 status)
struct hbm_add_client_response resp;
int ret;
- dev_dbg(dev->dev, "adding client response\n");
+ dev_dbg(&dev->dev, "adding client response\n");
mei_hbm_hdr(&mei_hdr, sizeof(resp));
@@ -454,8 +450,7 @@ static int mei_hbm_add_cl_resp(struct mei_device *dev, u8 addr, u8 status)
ret = mei_hbm_write_message(dev, &mei_hdr, &resp);
if (ret)
- dev_err(dev->dev, "add client response write failed: ret = %d\n",
- ret);
+ dev_err(&dev->dev, "add client response write failed: ret = %d\n", ret);
return ret;
}
@@ -752,7 +747,7 @@ static int mei_hbm_prop_req(struct mei_device *dev, unsigned long start_idx)
ret = mei_hbm_write_message(dev, &mei_hdr, &req);
if (ret) {
- dev_err(dev->dev, "properties request write failed: ret = %d\n",
+ dev_err(&dev->dev, "properties request write failed: ret = %d\n",
ret);
return ret;
}
@@ -788,7 +783,7 @@ int mei_hbm_pg(struct mei_device *dev, u8 pg_cmd)
ret = mei_hbm_write_message(dev, &mei_hdr, &req);
if (ret)
- dev_err(dev->dev, "power gate command write failed.\n");
+ dev_err(&dev->dev, "power gate command write failed.\n");
return ret;
}
EXPORT_SYMBOL_GPL(mei_hbm_pg);
@@ -847,7 +842,7 @@ static int mei_hbm_add_single_tx_flow_ctrl_creds(struct mei_device *dev,
me_cl = mei_me_cl_by_id(dev, fctrl->me_addr);
if (!me_cl) {
- dev_err(dev->dev, "no such me client %d\n", fctrl->me_addr);
+ dev_err(&dev->dev, "no such me client %d\n", fctrl->me_addr);
return -ENOENT;
}
@@ -857,7 +852,7 @@ static int mei_hbm_add_single_tx_flow_ctrl_creds(struct mei_device *dev,
}
me_cl->tx_flow_ctrl_creds++;
- dev_dbg(dev->dev, "recv flow ctrl msg ME %d (single) creds = %d.\n",
+ dev_dbg(&dev->dev, "recv flow ctrl msg ME %d (single) creds = %d.\n",
fctrl->me_addr, me_cl->tx_flow_ctrl_creds);
rets = 0;
@@ -1085,7 +1080,7 @@ static int mei_hbm_pg_enter_res(struct mei_device *dev)
{
if (mei_pg_state(dev) != MEI_PG_OFF ||
dev->pg_event != MEI_PG_EVENT_WAIT) {
- dev_err(dev->dev, "hbm: pg entry response: state mismatch [%s, %d]\n",
+ dev_err(&dev->dev, "hbm: pg entry response: state mismatch [%s, %d]\n",
mei_pg_state_str(mei_pg_state(dev)), dev->pg_event);
return -EPROTO;
}
@@ -1103,7 +1098,7 @@ static int mei_hbm_pg_enter_res(struct mei_device *dev)
*/
void mei_hbm_pg_resume(struct mei_device *dev)
{
- pm_request_resume(dev->dev);
+ pm_request_resume(dev->parent);
}
EXPORT_SYMBOL_GPL(mei_hbm_pg_resume);
@@ -1119,7 +1114,7 @@ static int mei_hbm_pg_exit_res(struct mei_device *dev)
if (mei_pg_state(dev) != MEI_PG_ON ||
(dev->pg_event != MEI_PG_EVENT_WAIT &&
dev->pg_event != MEI_PG_EVENT_IDLE)) {
- dev_err(dev->dev, "hbm: pg exit response: state mismatch [%s, %d]\n",
+ dev_err(&dev->dev, "hbm: pg exit response: state mismatch [%s, %d]\n",
mei_pg_state_str(mei_pg_state(dev)), dev->pg_event);
return -EPROTO;
}
@@ -1276,19 +1271,19 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
* hbm is put to idle during system reset
*/
if (dev->hbm_state == MEI_HBM_IDLE) {
- dev_dbg(dev->dev, "hbm: state is idle ignore spurious messages\n");
+ dev_dbg(&dev->dev, "hbm: state is idle ignore spurious messages\n");
return 0;
}
switch (mei_msg->hbm_cmd) {
case HOST_START_RES_CMD:
- dev_dbg(dev->dev, "hbm: start: response message received.\n");
+ dev_dbg(&dev->dev, "hbm: start: response message received.\n");
dev->init_clients_timer = 0;
version_res = (struct hbm_host_version_response *)mei_msg;
- dev_dbg(dev->dev, "HBM VERSION: DRIVER=%02d:%02d DEVICE=%02d:%02d\n",
+ dev_dbg(&dev->dev, "HBM VERSION: DRIVER=%02d:%02d DEVICE=%02d:%02d\n",
HBM_MAJOR_VERSION, HBM_MINOR_VERSION,
version_res->me_max_version.major_version,
version_res->me_max_version.minor_version);
@@ -1304,11 +1299,11 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
}
if (!mei_hbm_version_is_supported(dev)) {
- dev_warn(dev->dev, "hbm: start: version mismatch - stopping the driver.\n");
+ dev_warn(&dev->dev, "hbm: start: version mismatch - stopping the driver.\n");
dev->hbm_state = MEI_HBM_STOPPED;
if (mei_hbm_stop_req(dev)) {
- dev_err(dev->dev, "hbm: start: failed to send stop request\n");
+ dev_err(&dev->dev, "hbm: start: failed to send stop request\n");
return -EIO;
}
break;
@@ -1320,10 +1315,10 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
dev->hbm_state != MEI_HBM_STARTING) {
if (dev->dev_state == MEI_DEV_POWER_DOWN ||
dev->dev_state == MEI_DEV_POWERING_DOWN) {
- dev_dbg(dev->dev, "hbm: start: on shutdown, ignoring\n");
+ dev_dbg(&dev->dev, "hbm: start: on shutdown, ignoring\n");
return 0;
}
- dev_err(dev->dev, "hbm: start: state mismatch, [%d, %d]\n",
+ dev_err(&dev->dev, "hbm: start: state mismatch, [%d, %d]\n",
dev->dev_state, dev->hbm_state);
return -EPROTO;
}
@@ -1337,7 +1332,7 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
if (dev->hbm_f_dr_supported) {
if (mei_dmam_ring_alloc(dev))
- dev_info(dev->dev, "running w/o dma ring\n");
+ dev_info(&dev->dev, "running w/o dma ring\n");
if (mei_dma_ring_is_allocated(dev)) {
if (mei_hbm_dma_setup_req(dev))
return -EIO;
@@ -1357,7 +1352,7 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
break;
case MEI_HBM_CAPABILITIES_RES_CMD:
- dev_dbg(dev->dev, "hbm: capabilities response: message received.\n");
+ dev_dbg(&dev->dev, "hbm: capabilities response: message received.\n");
dev->init_clients_timer = 0;
@@ -1365,10 +1360,10 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
dev->hbm_state != MEI_HBM_CAP_SETUP) {
if (dev->dev_state == MEI_DEV_POWER_DOWN ||
dev->dev_state == MEI_DEV_POWERING_DOWN) {
- dev_dbg(dev->dev, "hbm: capabilities response: on shutdown, ignoring\n");
+ dev_dbg(&dev->dev, "hbm: capabilities response: on shutdown, ignoring\n");
return 0;
}
- dev_err(dev->dev, "hbm: capabilities response: state mismatch, [%d, %d]\n",
+ dev_err(&dev->dev, "hbm: capabilities response: state mismatch, [%d, %d]\n",
dev->dev_state, dev->hbm_state);
return -EPROTO;
}
@@ -1384,7 +1379,7 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
if (dev->hbm_f_dr_supported) {
if (mei_dmam_ring_alloc(dev))
- dev_info(dev->dev, "running w/o dma ring\n");
+ dev_info(&dev->dev, "running w/o dma ring\n");
if (mei_dma_ring_is_allocated(dev)) {
if (mei_hbm_dma_setup_req(dev))
return -EIO;
@@ -1400,7 +1395,7 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
break;
case MEI_HBM_DMA_SETUP_RES_CMD:
- dev_dbg(dev->dev, "hbm: dma setup response: message received.\n");
+ dev_dbg(&dev->dev, "hbm: dma setup response: message received.\n");
dev->init_clients_timer = 0;
@@ -1408,10 +1403,10 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
dev->hbm_state != MEI_HBM_DR_SETUP) {
if (dev->dev_state == MEI_DEV_POWER_DOWN ||
dev->dev_state == MEI_DEV_POWERING_DOWN) {
- dev_dbg(dev->dev, "hbm: dma setup response: on shutdown, ignoring\n");
+ dev_dbg(&dev->dev, "hbm: dma setup response: on shutdown, ignoring\n");
return 0;
}
- dev_err(dev->dev, "hbm: dma setup response: state mismatch, [%d, %d]\n",
+ dev_err(&dev->dev, "hbm: dma setup response: state mismatch, [%d, %d]\n",
dev->dev_state, dev->hbm_state);
return -EPROTO;
}
@@ -1422,9 +1417,9 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
u8 status = dma_setup_res->status;
if (status == MEI_HBMS_NOT_ALLOWED) {
- dev_dbg(dev->dev, "hbm: dma setup not allowed\n");
+ dev_dbg(&dev->dev, "hbm: dma setup not allowed\n");
} else {
- dev_info(dev->dev, "hbm: dma setup response: failure = %d %s\n",
+ dev_info(&dev->dev, "hbm: dma setup response: failure = %d %s\n",
status,
mei_hbm_status_str(status));
}
@@ -1437,38 +1432,38 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
break;
case CLIENT_CONNECT_RES_CMD:
- dev_dbg(dev->dev, "hbm: client connect response: message received.\n");
+ dev_dbg(&dev->dev, "hbm: client connect response: message received.\n");
mei_hbm_cl_res(dev, cl_cmd, MEI_FOP_CONNECT);
break;
case CLIENT_DISCONNECT_RES_CMD:
- dev_dbg(dev->dev, "hbm: client disconnect response: message received.\n");
+ dev_dbg(&dev->dev, "hbm: client disconnect response: message received.\n");
mei_hbm_cl_res(dev, cl_cmd, MEI_FOP_DISCONNECT);
break;
case MEI_FLOW_CONTROL_CMD:
- dev_dbg(dev->dev, "hbm: client flow control response: message received.\n");
+ dev_dbg(&dev->dev, "hbm: client flow control response: message received.\n");
fctrl = (struct hbm_flow_control *)mei_msg;
mei_hbm_cl_tx_flow_ctrl_creds_res(dev, fctrl);
break;
case MEI_PG_ISOLATION_ENTRY_RES_CMD:
- dev_dbg(dev->dev, "hbm: power gate isolation entry response received\n");
+ dev_dbg(&dev->dev, "hbm: power gate isolation entry response received\n");
ret = mei_hbm_pg_enter_res(dev);
if (ret)
return ret;
break;
case MEI_PG_ISOLATION_EXIT_REQ_CMD:
- dev_dbg(dev->dev, "hbm: power gate isolation exit request received\n");
+ dev_dbg(&dev->dev, "hbm: power gate isolation exit request received\n");
ret = mei_hbm_pg_exit_res(dev);
if (ret)
return ret;
break;
case HOST_CLIENT_PROPERTIES_RES_CMD:
- dev_dbg(dev->dev, "hbm: properties response: message received.\n");
+ dev_dbg(&dev->dev, "hbm: properties response: message received.\n");
dev->init_clients_timer = 0;
@@ -1476,10 +1471,10 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
dev->hbm_state != MEI_HBM_CLIENT_PROPERTIES) {
if (dev->dev_state == MEI_DEV_POWER_DOWN ||
dev->dev_state == MEI_DEV_POWERING_DOWN) {
- dev_dbg(dev->dev, "hbm: properties response: on shutdown, ignoring\n");
+ dev_dbg(&dev->dev, "hbm: properties response: on shutdown, ignoring\n");
return 0;
}
- dev_err(dev->dev, "hbm: properties response: state mismatch, [%d, %d]\n",
+ dev_err(&dev->dev, "hbm: properties response: state mismatch, [%d, %d]\n",
dev->dev_state, dev->hbm_state);
return -EPROTO;
}
@@ -1487,10 +1482,10 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
props_res = (struct hbm_props_response *)mei_msg;
if (props_res->status == MEI_HBMS_CLIENT_NOT_FOUND) {
- dev_dbg(dev->dev, "hbm: properties response: %d CLIENT_NOT_FOUND\n",
+ dev_dbg(&dev->dev, "hbm: properties response: %d CLIENT_NOT_FOUND\n",
props_res->me_addr);
} else if (props_res->status) {
- dev_err(dev->dev, "hbm: properties response: wrong status = %d %s\n",
+ dev_err(&dev->dev, "hbm: properties response: wrong status = %d %s\n",
props_res->status,
mei_hbm_status_str(props_res->status));
return -EPROTO;
@@ -1505,7 +1500,7 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
break;
case HOST_ENUM_RES_CMD:
- dev_dbg(dev->dev, "hbm: enumeration response: message received\n");
+ dev_dbg(&dev->dev, "hbm: enumeration response: message received\n");
dev->init_clients_timer = 0;
@@ -1519,10 +1514,10 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
dev->hbm_state != MEI_HBM_ENUM_CLIENTS) {
if (dev->dev_state == MEI_DEV_POWER_DOWN ||
dev->dev_state == MEI_DEV_POWERING_DOWN) {
- dev_dbg(dev->dev, "hbm: enumeration response: on shutdown, ignoring\n");
+ dev_dbg(&dev->dev, "hbm: enumeration response: on shutdown, ignoring\n");
return 0;
}
- dev_err(dev->dev, "hbm: enumeration response: state mismatch, [%d, %d]\n",
+ dev_err(&dev->dev, "hbm: enumeration response: state mismatch, [%d, %d]\n",
dev->dev_state, dev->hbm_state);
return -EPROTO;
}
@@ -1536,77 +1531,77 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
break;
case HOST_STOP_RES_CMD:
- dev_dbg(dev->dev, "hbm: stop response: message received\n");
+ dev_dbg(&dev->dev, "hbm: stop response: message received\n");
dev->init_clients_timer = 0;
if (dev->hbm_state != MEI_HBM_STOPPED) {
- dev_err(dev->dev, "hbm: stop response: state mismatch, [%d, %d]\n",
+ dev_err(&dev->dev, "hbm: stop response: state mismatch, [%d, %d]\n",
dev->dev_state, dev->hbm_state);
return -EPROTO;
}
mei_set_devstate(dev, MEI_DEV_POWER_DOWN);
- dev_info(dev->dev, "hbm: stop response: resetting.\n");
+ dev_info(&dev->dev, "hbm: stop response: resetting.\n");
/* force the reset */
return -EPROTO;
case CLIENT_DISCONNECT_REQ_CMD:
- dev_dbg(dev->dev, "hbm: disconnect request: message received\n");
+ dev_dbg(&dev->dev, "hbm: disconnect request: message received\n");
disconnect_req = (struct hbm_client_connect_request *)mei_msg;
mei_hbm_fw_disconnect_req(dev, disconnect_req);
break;
case ME_STOP_REQ_CMD:
- dev_dbg(dev->dev, "hbm: stop request: message received\n");
+ dev_dbg(&dev->dev, "hbm: stop request: message received\n");
dev->hbm_state = MEI_HBM_STOPPED;
if (mei_hbm_stop_req(dev)) {
- dev_err(dev->dev, "hbm: stop request: failed to send stop request\n");
+ dev_err(&dev->dev, "hbm: stop request: failed to send stop request\n");
return -EIO;
}
break;
case MEI_HBM_ADD_CLIENT_REQ_CMD:
- dev_dbg(dev->dev, "hbm: add client request received\n");
+ dev_dbg(&dev->dev, "hbm: add client request received\n");
/*
* after the host receives the enum_resp
* message clients may be added or removed
*/
if (dev->hbm_state <= MEI_HBM_ENUM_CLIENTS ||
dev->hbm_state >= MEI_HBM_STOPPED) {
- dev_err(dev->dev, "hbm: add client: state mismatch, [%d, %d]\n",
+ dev_err(&dev->dev, "hbm: add client: state mismatch, [%d, %d]\n",
dev->dev_state, dev->hbm_state);
return -EPROTO;
}
add_cl_req = (struct hbm_add_client_request *)mei_msg;
ret = mei_hbm_fw_add_cl_req(dev, add_cl_req);
if (ret) {
- dev_err(dev->dev, "hbm: add client: failed to send response %d\n",
+ dev_err(&dev->dev, "hbm: add client: failed to send response %d\n",
ret);
return -EIO;
}
- dev_dbg(dev->dev, "hbm: add client request processed\n");
+ dev_dbg(&dev->dev, "hbm: add client request processed\n");
break;
case MEI_HBM_NOTIFY_RES_CMD:
- dev_dbg(dev->dev, "hbm: notify response received\n");
+ dev_dbg(&dev->dev, "hbm: notify response received\n");
mei_hbm_cl_res(dev, cl_cmd, notify_res_to_fop(cl_cmd));
break;
case MEI_HBM_NOTIFICATION_CMD:
- dev_dbg(dev->dev, "hbm: notification\n");
+ dev_dbg(&dev->dev, "hbm: notification\n");
mei_hbm_cl_notify(dev, cl_cmd);
break;
case MEI_HBM_CLIENT_DMA_MAP_RES_CMD:
- dev_dbg(dev->dev, "hbm: client dma map response: message received.\n");
+ dev_dbg(&dev->dev, "hbm: client dma map response: message received.\n");
client_dma_res = (struct hbm_client_dma_response *)mei_msg;
mei_hbm_cl_dma_map_res(dev, client_dma_res);
break;
case MEI_HBM_CLIENT_DMA_UNMAP_RES_CMD:
- dev_dbg(dev->dev, "hbm: client dma unmap response: message received.\n");
+ dev_dbg(&dev->dev, "hbm: client dma unmap response: message received.\n");
client_dma_res = (struct hbm_client_dma_response *)mei_msg;
mei_hbm_cl_dma_unmap_res(dev, client_dma_res);
break;
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index d11a0740b47c..d4612c659784 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -84,7 +84,7 @@ static inline u32 mei_me_mecsr_read(const struct mei_device *dev)
u32 reg;
reg = mei_me_reg_read(to_me_hw(dev), ME_CSR_HA);
- trace_mei_reg_read(dev->dev, "ME_CSR_HA", ME_CSR_HA, reg);
+ trace_mei_reg_read(&dev->dev, "ME_CSR_HA", ME_CSR_HA, reg);
return reg;
}
@@ -101,7 +101,7 @@ static inline u32 mei_hcsr_read(const struct mei_device *dev)
u32 reg;
reg = mei_me_reg_read(to_me_hw(dev), H_CSR);
- trace_mei_reg_read(dev->dev, "H_CSR", H_CSR, reg);
+ trace_mei_reg_read(&dev->dev, "H_CSR", H_CSR, reg);
return reg;
}
@@ -114,7 +114,7 @@ static inline u32 mei_hcsr_read(const struct mei_device *dev)
*/
static inline void mei_hcsr_write(struct mei_device *dev, u32 reg)
{
- trace_mei_reg_write(dev->dev, "H_CSR", H_CSR, reg);
+ trace_mei_reg_write(&dev->dev, "H_CSR", H_CSR, reg);
mei_me_reg_write(to_me_hw(dev), H_CSR, reg);
}
@@ -156,7 +156,7 @@ static inline u32 mei_me_d0i3c_read(const struct mei_device *dev)
u32 reg;
reg = mei_me_reg_read(to_me_hw(dev), H_D0I3C);
- trace_mei_reg_read(dev->dev, "H_D0I3C", H_D0I3C, reg);
+ trace_mei_reg_read(&dev->dev, "H_D0I3C", H_D0I3C, reg);
return reg;
}
@@ -169,7 +169,7 @@ static inline u32 mei_me_d0i3c_read(const struct mei_device *dev)
*/
static inline void mei_me_d0i3c_write(struct mei_device *dev, u32 reg)
{
- trace_mei_reg_write(dev->dev, "H_D0I3C", H_D0I3C, reg);
+ trace_mei_reg_write(&dev->dev, "H_D0I3C", H_D0I3C, reg);
mei_me_reg_write(to_me_hw(dev), H_D0I3C, reg);
}
@@ -189,7 +189,7 @@ static int mei_me_trc_status(struct mei_device *dev, u32 *trc)
return -EOPNOTSUPP;
*trc = mei_me_reg_read(hw, ME_TRC);
- trace_mei_reg_read(dev->dev, "ME_TRC", ME_TRC, *trc);
+ trace_mei_reg_read(&dev->dev, "ME_TRC", ME_TRC, *trc);
return 0;
}
@@ -217,7 +217,7 @@ static int mei_me_fw_status(struct mei_device *dev,
for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) {
ret = hw->read_fws(dev, fw_src->status[i],
&fw_status->status[i]);
- trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_X",
+ trace_mei_pci_cfg_read(&dev->dev, "PCI_CFG_HFS_X",
fw_src->status[i],
fw_status->status[i]);
if (ret)
@@ -251,7 +251,7 @@ static int mei_me_hw_config(struct mei_device *dev)
reg = 0;
hw->read_fws(dev, PCI_CFG_HFS_1, &reg);
- trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg);
+ trace_mei_pci_cfg_read(&dev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg);
hw->d0i3_supported =
((reg & PCI_CFG_HFS_1_D0I3_MSK) == PCI_CFG_HFS_1_D0I3_MSK);
@@ -447,7 +447,7 @@ static void mei_gsc_pxp_check(struct mei_device *dev)
return;
hw->read_fws(dev, PCI_CFG_HFS_5, &fwsts5);
- trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_5", PCI_CFG_HFS_5, fwsts5);
+ trace_mei_pci_cfg_read(&dev->dev, "PCI_CFG_HFS_5", PCI_CFG_HFS_5, fwsts5);
if ((fwsts5 & GSC_CFG_HFS_5_BOOT_TYPE_MSK) == GSC_CFG_HFS_5_BOOT_TYPE_PXP) {
if (dev->gsc_reset_to_pxp == MEI_DEV_RESET_TO_PXP_DEFAULT)
@@ -460,10 +460,10 @@ static void mei_gsc_pxp_check(struct mei_device *dev)
return;
if ((fwsts5 & GSC_CFG_HFS_5_BOOT_TYPE_MSK) == GSC_CFG_HFS_5_BOOT_TYPE_PXP) {
- dev_dbg(dev->dev, "pxp mode is ready 0x%08x\n", fwsts5);
+ dev_dbg(&dev->dev, "pxp mode is ready 0x%08x\n", fwsts5);
dev->pxp_mode = MEI_DEV_PXP_READY;
} else {
- dev_dbg(dev->dev, "pxp mode is not ready 0x%08x\n", fwsts5);
+ dev_dbg(&dev->dev, "pxp mode is not ready 0x%08x\n", fwsts5);
}
}
@@ -482,7 +482,7 @@ static int mei_me_hw_ready_wait(struct mei_device *dev)
dev->timeouts.hw_ready);
mutex_lock(&dev->device_lock);
if (!dev->recvd_hw_ready) {
- dev_err(dev->dev, "wait hw ready failed\n");
+ dev_err(&dev->dev, "wait hw ready failed\n");
return -ETIME;
}
@@ -494,43 +494,6 @@ static int mei_me_hw_ready_wait(struct mei_device *dev)
}
/**
- * mei_me_check_fw_reset - check for the firmware reset error and exception conditions
- *
- * @dev: mei device
- */
-static void mei_me_check_fw_reset(struct mei_device *dev)
-{
- struct mei_fw_status fw_status;
- char fw_sts_str[MEI_FW_STATUS_STR_SZ] = {0};
- int ret;
- u32 fw_pm_event = 0;
-
- if (!dev->saved_fw_status_flag)
- goto end;
-
- if (dev->gsc_reset_to_pxp == MEI_DEV_RESET_TO_PXP_PERFORMED) {
- ret = mei_fw_status(dev, &fw_status);
- if (!ret) {
- fw_pm_event = fw_status.status[1] & PCI_CFG_HFS_2_PM_EVENT_MASK;
- if (fw_pm_event != PCI_CFG_HFS_2_PM_CMOFF_TO_CMX_ERROR &&
- fw_pm_event != PCI_CFG_HFS_2_PM_CM_RESET_ERROR)
- goto end;
- } else {
- dev_err(dev->dev, "failed to read firmware status: %d\n", ret);
- }
- }
-
- mei_fw_status2str(&dev->saved_fw_status, fw_sts_str, sizeof(fw_sts_str));
- dev_warn(dev->dev, "unexpected reset: fw_pm_event = 0x%x, dev_state = %u fw status = %s\n",
- fw_pm_event, dev->saved_dev_state, fw_sts_str);
-
-end:
- if (dev->gsc_reset_to_pxp == MEI_DEV_RESET_TO_PXP_PERFORMED)
- dev->gsc_reset_to_pxp = MEI_DEV_RESET_TO_PXP_DONE;
- dev->saved_fw_status_flag = false;
-}
-
-/**
* mei_me_hw_start - hw start routine
*
* @dev: mei device
@@ -540,11 +503,12 @@ static int mei_me_hw_start(struct mei_device *dev)
{
int ret = mei_me_hw_ready_wait(dev);
- if (kind_is_gsc(dev) || kind_is_gscfi(dev))
- mei_me_check_fw_reset(dev);
+ if ((kind_is_gsc(dev) || kind_is_gscfi(dev)) &&
+ dev->gsc_reset_to_pxp == MEI_DEV_RESET_TO_PXP_PERFORMED)
+ dev->gsc_reset_to_pxp = MEI_DEV_RESET_TO_PXP_DONE;
if (ret)
return ret;
- dev_dbg(dev->dev, "hw is ready\n");
+ dev_dbg(&dev->dev, "hw is ready\n");
mei_me_host_set_ready(dev);
return ret;
@@ -644,14 +608,14 @@ static int mei_me_hbuf_write(struct mei_device *dev,
return -EINVAL;
if (!data && data_len) {
- dev_err(dev->dev, "wrong parameters null data with data_len = %zu\n", data_len);
+ dev_err(&dev->dev, "wrong parameters null data with data_len = %zu\n", data_len);
return -EINVAL;
}
- dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM((struct mei_msg_hdr *)hdr));
+ dev_dbg(&dev->dev, MEI_HDR_FMT, MEI_HDR_PRM((struct mei_msg_hdr *)hdr));
empty_slots = mei_hbuf_empty_slots(dev);
- dev_dbg(dev->dev, "empty slots = %d.\n", empty_slots);
+ dev_dbg(&dev->dev, "empty slots = %d.\n", empty_slots);
if (empty_slots < 0)
return -EOVERFLOW;
@@ -706,7 +670,7 @@ static int mei_me_count_full_read_slots(struct mei_device *dev)
if (filled_slots > buffer_depth)
return -EOVERFLOW;
- dev_dbg(dev->dev, "filled_slots =%08x\n", filled_slots);
+ dev_dbg(&dev->dev, "filled_slots =%08x\n", filled_slots);
return (int)filled_slots;
}
@@ -748,11 +712,11 @@ static void mei_me_pg_set(struct mei_device *dev)
u32 reg;
reg = mei_me_reg_read(hw, H_HPG_CSR);
- trace_mei_reg_read(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
+ trace_mei_reg_read(&dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
reg |= H_HPG_CSR_PGI;
- trace_mei_reg_write(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
+ trace_mei_reg_write(&dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
mei_me_reg_write(hw, H_HPG_CSR, reg);
}
@@ -767,13 +731,13 @@ static void mei_me_pg_unset(struct mei_device *dev)
u32 reg;
reg = mei_me_reg_read(hw, H_HPG_CSR);
- trace_mei_reg_read(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
+ trace_mei_reg_read(&dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
WARN(!(reg & H_HPG_CSR_PGI), "PGI is not set\n");
reg |= H_HPG_CSR_PGIHEXR;
- trace_mei_reg_write(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
+ trace_mei_reg_write(&dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
mei_me_reg_write(hw, H_HPG_CSR, reg);
}
@@ -905,7 +869,7 @@ static bool mei_me_pg_is_enabled(struct mei_device *dev)
return true;
notsupported:
- dev_dbg(dev->dev, "pg: not supported: d0i3 = %d HGP = %d hbm version %d.%d ?= %d.%d\n",
+ dev_dbg(&dev->dev, "pg: not supported: d0i3 = %d HGP = %d hbm version %d.%d ?= %d.%d\n",
hw->d0i3_supported,
!!(reg & ME_PGIC_HRA),
dev->version.major_version,
@@ -974,7 +938,7 @@ static int mei_me_d0i3_enter_sync(struct mei_device *dev)
reg = mei_me_d0i3c_read(dev);
if (reg & H_D0I3C_I3) {
/* we are in d0i3, nothing to do */
- dev_dbg(dev->dev, "d0i3 set not needed\n");
+ dev_dbg(&dev->dev, "d0i3 set not needed\n");
ret = 0;
goto on;
}
@@ -1003,7 +967,7 @@ static int mei_me_d0i3_enter_sync(struct mei_device *dev)
reg = mei_me_d0i3_set(dev, true);
if (!(reg & H_D0I3C_CIP)) {
- dev_dbg(dev->dev, "d0i3 enter wait not needed\n");
+ dev_dbg(&dev->dev, "d0i3 enter wait not needed\n");
ret = 0;
goto on;
}
@@ -1027,7 +991,7 @@ on:
hw->pg_state = MEI_PG_ON;
out:
dev->pg_event = MEI_PG_EVENT_IDLE;
- dev_dbg(dev->dev, "d0i3 enter ret = %d\n", ret);
+ dev_dbg(&dev->dev, "d0i3 enter ret = %d\n", ret);
return ret;
}
@@ -1049,7 +1013,7 @@ static int mei_me_d0i3_enter(struct mei_device *dev)
reg = mei_me_d0i3c_read(dev);
if (reg & H_D0I3C_I3) {
/* we are in d0i3, nothing to do */
- dev_dbg(dev->dev, "already d0i3 : set not needed\n");
+ dev_dbg(&dev->dev, "already d0i3 : set not needed\n");
goto on;
}
@@ -1057,7 +1021,7 @@ static int mei_me_d0i3_enter(struct mei_device *dev)
on:
hw->pg_state = MEI_PG_ON;
dev->pg_event = MEI_PG_EVENT_IDLE;
- dev_dbg(dev->dev, "d0i3 enter\n");
+ dev_dbg(&dev->dev, "d0i3 enter\n");
return 0;
}
@@ -1079,14 +1043,14 @@ static int mei_me_d0i3_exit_sync(struct mei_device *dev)
reg = mei_me_d0i3c_read(dev);
if (!(reg & H_D0I3C_I3)) {
/* we are not in d0i3, nothing to do */
- dev_dbg(dev->dev, "d0i3 exit not needed\n");
+ dev_dbg(&dev->dev, "d0i3 exit not needed\n");
ret = 0;
goto off;
}
reg = mei_me_d0i3_unset(dev);
if (!(reg & H_D0I3C_CIP)) {
- dev_dbg(dev->dev, "d0i3 exit wait not needed\n");
+ dev_dbg(&dev->dev, "d0i3 exit wait not needed\n");
ret = 0;
goto off;
}
@@ -1111,7 +1075,7 @@ off:
out:
dev->pg_event = MEI_PG_EVENT_IDLE;
- dev_dbg(dev->dev, "d0i3 exit ret = %d\n", ret);
+ dev_dbg(&dev->dev, "d0i3 exit ret = %d\n", ret);
return ret;
}
@@ -1154,7 +1118,7 @@ static void mei_me_d0i3_intr(struct mei_device *dev, u32 intr_source)
* force H_RDY because it could be
* wiped off during PG
*/
- dev_dbg(dev->dev, "d0i3 set host ready\n");
+ dev_dbg(&dev->dev, "d0i3 set host ready\n");
mei_me_host_set_ready(dev);
}
} else {
@@ -1170,7 +1134,7 @@ static void mei_me_d0i3_intr(struct mei_device *dev, u32 intr_source)
* we got here because of HW initiated exit from D0i3.
* Start runtime pm resume sequence to exit low power state.
*/
- dev_dbg(dev->dev, "d0i3 want resume\n");
+ dev_dbg(&dev->dev, "d0i3 want resume\n");
mei_hbm_pg_resume(dev);
}
}
@@ -1250,7 +1214,7 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
}
}
- pm_runtime_set_active(dev->dev);
+ pm_runtime_set_active(dev->parent);
hcsr = mei_hcsr_read(dev);
/* H_RST may be found lit before reset is started,
@@ -1259,7 +1223,7 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
* we need to clean H_RST bit to start a successful reset sequence.
*/
if ((hcsr & H_RST) == H_RST) {
- dev_warn(dev->dev, "H_RST is set = 0x%08X", hcsr);
+ dev_warn(&dev->dev, "H_RST is set = 0x%08X", hcsr);
hcsr &= ~H_RST;
mei_hcsr_set(dev, hcsr);
hcsr = mei_hcsr_read(dev);
@@ -1280,10 +1244,10 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
hcsr = mei_hcsr_read(dev);
if ((hcsr & H_RST) == 0)
- dev_warn(dev->dev, "H_RST is not set = 0x%08X", hcsr);
+ dev_warn(&dev->dev, "H_RST is not set = 0x%08X", hcsr);
if ((hcsr & H_RDY) == H_RDY)
- dev_warn(dev->dev, "H_RDY is not cleared 0x%08X", hcsr);
+ dev_warn(&dev->dev, "H_RDY is not cleared 0x%08X", hcsr);
if (!intr_enable) {
mei_me_hw_reset_release(dev);
@@ -1313,7 +1277,7 @@ irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id)
if (!me_intr_src(hcsr))
return IRQ_NONE;
- dev_dbg(dev->dev, "interrupt source 0x%08X\n", me_intr_src(hcsr));
+ dev_dbg(&dev->dev, "interrupt source 0x%08X\n", me_intr_src(hcsr));
/* disable interrupts on device */
me_intr_disable(dev, hcsr);
@@ -1339,7 +1303,7 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
u32 hcsr;
int rets = 0;
- dev_dbg(dev->dev, "function called after ISR to handle the interrupt processing.\n");
+ dev_dbg(&dev->dev, "function called after ISR to handle the interrupt processing.\n");
/* initialize our complete list */
mutex_lock(&dev->device_lock);
@@ -1351,10 +1315,10 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
/* check if ME wants a reset */
if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) {
if (kind_is_gsc(dev) || kind_is_gscfi(dev)) {
- dev_dbg(dev->dev, "FW not ready: resetting: dev_state = %d\n",
+ dev_dbg(&dev->dev, "FW not ready: resetting: dev_state = %d\n",
dev->dev_state);
} else {
- dev_warn(dev->dev, "FW not ready: resetting: dev_state = %d\n",
+ dev_warn(&dev->dev, "FW not ready: resetting: dev_state = %d\n",
dev->dev_state);
}
if (dev->dev_state == MEI_DEV_POWERING_DOWN ||
@@ -1373,18 +1337,29 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
/* check if we need to start the dev */
if (!mei_host_is_ready(dev)) {
if (mei_hw_is_ready(dev)) {
- dev_dbg(dev->dev, "we need to start the dev.\n");
- dev->recvd_hw_ready = true;
- wake_up(&dev->wait_hw_ready);
+ /* synchronized by dev mutex */
+ if (waitqueue_active(&dev->wait_hw_ready)) {
+ dev_dbg(&dev->dev, "we need to start the dev.\n");
+ dev->recvd_hw_ready = true;
+ wake_up(&dev->wait_hw_ready);
+ } else if (dev->dev_state != MEI_DEV_UNINITIALIZED &&
+ dev->dev_state != MEI_DEV_POWERING_DOWN &&
+ dev->dev_state != MEI_DEV_POWER_DOWN) {
+ dev_dbg(&dev->dev, "Force link reset.\n");
+ schedule_work(&dev->reset_work);
+ } else {
+ dev_dbg(&dev->dev, "Ignore this interrupt in state = %d\n",
+ dev->dev_state);
+ }
} else {
- dev_dbg(dev->dev, "Spurious Interrupt\n");
+ dev_dbg(&dev->dev, "Spurious Interrupt\n");
}
goto end;
}
/* check slots available for reading */
slots = mei_count_full_read_slots(dev);
while (slots > 0) {
- dev_dbg(dev->dev, "slots to read = %08x\n", slots);
+ dev_dbg(&dev->dev, "slots to read = %08x\n", slots);
rets = mei_irq_read_handler(dev, &cmpl_list, &slots);
/* There is a race between ME write and interrupt delivery:
* Not all data is always available immediately after the
@@ -1394,7 +1369,7 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
break;
if (rets) {
- dev_err(dev->dev, "mei_irq_read_handler ret = %d, state = %d.\n",
+ dev_err(&dev->dev, "mei_irq_read_handler ret = %d, state = %d.\n",
rets, dev->dev_state);
if (dev->dev_state != MEI_DEV_RESETTING &&
dev->dev_state != MEI_DEV_DISABLED &&
@@ -1421,7 +1396,7 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
mei_irq_compl_handler(dev, &cmpl_list);
end:
- dev_dbg(dev->dev, "interrupt thread end ret = %d\n", rets);
+ dev_dbg(&dev->dev, "interrupt thread end ret = %d\n", rets);
mei_me_intr_enable(dev);
mutex_unlock(&dev->device_lock);
return IRQ_HANDLED;
@@ -1453,7 +1428,7 @@ int mei_me_polling_thread(void *_dev)
irqreturn_t irq_ret;
long polling_timeout = MEI_POLLING_TIMEOUT_ACTIVE;
- dev_dbg(dev->dev, "kernel thread is running\n");
+ dev_dbg(&dev->dev, "kernel thread is running\n");
while (!kthread_should_stop()) {
struct mei_me_hw *hw = to_me_hw(dev);
u32 hcsr;
@@ -1470,7 +1445,7 @@ int mei_me_polling_thread(void *_dev)
polling_timeout = MEI_POLLING_TIMEOUT_ACTIVE;
irq_ret = mei_me_irq_thread_handler(1, dev);
if (irq_ret != IRQ_HANDLED)
- dev_err(dev->dev, "irq_ret %d\n", irq_ret);
+ dev_err(&dev->dev, "irq_ret %d\n", irq_ret);
} else {
/*
* Increase timeout by MEI_POLLING_TIMEOUT_ACTIVE
@@ -1804,7 +1779,7 @@ struct mei_device *mei_me_dev_init(struct device *parent,
struct mei_me_hw *hw;
int i;
- dev = devm_kzalloc(parent, sizeof(*dev) + sizeof(*hw), GFP_KERNEL);
+ dev = kzalloc(sizeof(*dev) + sizeof(*hw), GFP_KERNEL);
if (!dev)
return NULL;
diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c
index e9476f9ae25d..e4688c391027 100644
--- a/drivers/misc/mei/hw-txe.c
+++ b/drivers/misc/mei/hw-txe.c
@@ -160,7 +160,7 @@ static bool mei_txe_aliveness_set(struct mei_device *dev, u32 req)
struct mei_txe_hw *hw = to_txe_hw(dev);
bool do_req = hw->aliveness != req;
- dev_dbg(dev->dev, "Aliveness current=%d request=%d\n",
+ dev_dbg(&dev->dev, "Aliveness current=%d request=%d\n",
hw->aliveness, req);
if (do_req) {
dev->pg_event = MEI_PG_EVENT_WAIT;
@@ -227,7 +227,7 @@ static int mei_txe_aliveness_poll(struct mei_device *dev, u32 expected)
hw->aliveness = mei_txe_aliveness_get(dev);
if (hw->aliveness == expected) {
dev->pg_event = MEI_PG_EVENT_IDLE;
- dev_dbg(dev->dev, "aliveness settled after %lld usecs\n",
+ dev_dbg(&dev->dev, "aliveness settled after %lld usecs\n",
ktime_to_us(ktime_sub(ktime_get(), start)));
return 0;
}
@@ -235,7 +235,7 @@ static int mei_txe_aliveness_poll(struct mei_device *dev, u32 expected)
} while (ktime_before(ktime_get(), stop));
dev->pg_event = MEI_PG_EVENT_IDLE;
- dev_err(dev->dev, "aliveness timed out\n");
+ dev_err(&dev->dev, "aliveness timed out\n");
return -ETIME;
}
@@ -270,10 +270,10 @@ static int mei_txe_aliveness_wait(struct mei_device *dev, u32 expected)
ret = hw->aliveness == expected ? 0 : -ETIME;
if (ret)
- dev_warn(dev->dev, "aliveness timed out = %ld aliveness = %d event = %d\n",
+ dev_warn(&dev->dev, "aliveness timed out = %ld aliveness = %d event = %d\n",
err, hw->aliveness, dev->pg_event);
else
- dev_dbg(dev->dev, "aliveness settled after = %d msec aliveness = %d event = %d\n",
+ dev_dbg(&dev->dev, "aliveness settled after = %d msec aliveness = %d event = %d\n",
jiffies_to_msecs(timeout - err),
hw->aliveness, dev->pg_event);
@@ -438,7 +438,7 @@ static void mei_txe_intr_enable(struct mei_device *dev)
*/
static void mei_txe_synchronize_irq(struct mei_device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev->dev);
+ struct pci_dev *pdev = to_pci_dev(dev->parent);
synchronize_irq(pdev->irq);
}
@@ -464,7 +464,7 @@ static bool mei_txe_pending_interrupts(struct mei_device *dev)
TXE_INTR_OUT_DB));
if (ret) {
- dev_dbg(dev->dev,
+ dev_dbg(&dev->dev,
"Pending Interrupts InReady=%01d Readiness=%01d, Aliveness=%01d, OutDoor=%01d\n",
!!(hw->intr_cause & TXE_INTR_IN_READY),
!!(hw->intr_cause & TXE_INTR_READINESS),
@@ -612,7 +612,7 @@ static int mei_txe_readiness_wait(struct mei_device *dev)
msecs_to_jiffies(SEC_RESET_WAIT_TIMEOUT));
mutex_lock(&dev->device_lock);
if (!dev->recvd_hw_ready) {
- dev_err(dev->dev, "wait for readiness failed\n");
+ dev_err(&dev->dev, "wait for readiness failed\n");
return -ETIME;
}
@@ -638,7 +638,7 @@ static int mei_txe_fw_status(struct mei_device *dev,
struct mei_fw_status *fw_status)
{
const struct mei_fw_status *fw_src = &mei_txe_fw_sts;
- struct pci_dev *pdev = to_pci_dev(dev->dev);
+ struct pci_dev *pdev = to_pci_dev(dev->parent);
int ret;
int i;
@@ -649,7 +649,7 @@ static int mei_txe_fw_status(struct mei_device *dev,
for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) {
ret = pci_read_config_dword(pdev, fw_src->status[i],
&fw_status->status[i]);
- trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HSF_X",
+ trace_mei_pci_cfg_read(&dev->dev, "PCI_CFG_HSF_X",
fw_src->status[i],
fw_status->status[i]);
if (ret)
@@ -677,7 +677,7 @@ static int mei_txe_hw_config(struct mei_device *dev)
hw->aliveness = mei_txe_aliveness_get(dev);
hw->readiness = mei_txe_readiness_get(dev);
- dev_dbg(dev->dev, "aliveness_resp = 0x%08x, readiness = 0x%08x.\n",
+ dev_dbg(&dev->dev, "aliveness_resp = 0x%08x, readiness = 0x%08x.\n",
hw->aliveness, hw->readiness);
return 0;
@@ -708,7 +708,7 @@ static int mei_txe_write(struct mei_device *dev,
if (WARN_ON(!hdr || !data || hdr_len & 0x3))
return -EINVAL;
- dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM((struct mei_msg_hdr *)hdr));
+ dev_dbg(&dev->dev, MEI_HDR_FMT, MEI_HDR_PRM((struct mei_msg_hdr *)hdr));
dw_cnt = mei_data2slots(hdr_len + data_len);
if (dw_cnt > slots)
@@ -724,7 +724,7 @@ static int mei_txe_write(struct mei_device *dev,
char fw_sts_str[MEI_FW_STATUS_STR_SZ];
mei_fw_status_str(dev, fw_sts_str, MEI_FW_STATUS_STR_SZ);
- dev_err(dev->dev, "Input is not ready %s\n", fw_sts_str);
+ dev_err(&dev->dev, "Input is not ready %s\n", fw_sts_str);
return -EAGAIN;
}
@@ -828,13 +828,13 @@ static int mei_txe_read(struct mei_device *dev,
reg_buf = (u32 *)buf;
rem = len & 0x3;
- dev_dbg(dev->dev, "buffer-length = %lu buf[0]0x%08X\n",
+ dev_dbg(&dev->dev, "buffer-length = %lu buf[0]0x%08X\n",
len, mei_txe_out_data_read(dev, 0));
for (i = 0; i < len / MEI_SLOT_SIZE; i++) {
/* skip header: index starts from 1 */
reg = mei_txe_out_data_read(dev, i + 1);
- dev_dbg(dev->dev, "buf[%d] = 0x%08X\n", i, reg);
+ dev_dbg(&dev->dev, "buf[%d] = 0x%08X\n", i, reg);
*reg_buf++ = reg;
}
@@ -879,7 +879,7 @@ static int mei_txe_hw_reset(struct mei_device *dev, bool intr_enable)
*/
if (aliveness_req != hw->aliveness)
if (mei_txe_aliveness_poll(dev, aliveness_req) < 0) {
- dev_err(dev->dev, "wait for aliveness settle failed ... bailing out\n");
+ dev_err(&dev->dev, "wait for aliveness settle failed ... bailing out\n");
return -EIO;
}
@@ -889,7 +889,7 @@ static int mei_txe_hw_reset(struct mei_device *dev, bool intr_enable)
if (aliveness_req) {
mei_txe_aliveness_set(dev, 0);
if (mei_txe_aliveness_poll(dev, 0) < 0) {
- dev_err(dev->dev, "wait for aliveness failed ... bailing out\n");
+ dev_err(&dev->dev, "wait for aliveness failed ... bailing out\n");
return -EIO;
}
}
@@ -921,7 +921,7 @@ static int mei_txe_hw_start(struct mei_device *dev)
ret = mei_txe_readiness_wait(dev);
if (ret < 0) {
- dev_err(dev->dev, "waiting for readiness failed\n");
+ dev_err(&dev->dev, "waiting for readiness failed\n");
return ret;
}
@@ -937,11 +937,11 @@ static int mei_txe_hw_start(struct mei_device *dev)
ret = mei_txe_aliveness_set_sync(dev, 1);
if (ret < 0) {
- dev_err(dev->dev, "wait for aliveness failed ... bailing out\n");
+ dev_err(&dev->dev, "wait for aliveness failed ... bailing out\n");
return ret;
}
- pm_runtime_set_active(dev->dev);
+ pm_runtime_set_active(dev->parent);
/* enable input ready interrupts:
* SEC_IPC_HOST_INT_MASK.IPC_INPUT_READY_INT_MASK
@@ -1049,7 +1049,7 @@ irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id)
s32 slots;
int rets = 0;
- dev_dbg(dev->dev, "irq thread: Interrupt Registers HHISR|HISR|SEC=%02X|%04X|%02X\n",
+ dev_dbg(&dev->dev, "irq thread: Interrupt Registers HHISR|HISR|SEC=%02X|%04X|%02X\n",
mei_txe_br_reg_read(hw, HHISR_REG),
mei_txe_br_reg_read(hw, HISR_REG),
mei_txe_sec_reg_read_silent(hw, SEC_IPC_HOST_INT_STATUS_REG));
@@ -1059,7 +1059,7 @@ irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id)
mutex_lock(&dev->device_lock);
INIT_LIST_HEAD(&cmpl_list);
- if (pci_dev_msi_enabled(to_pci_dev(dev->dev)))
+ if (pci_dev_msi_enabled(to_pci_dev(dev->parent)))
mei_txe_check_and_ack_intrs(dev, true);
/* show irq events */
@@ -1073,17 +1073,17 @@ irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id)
* or TXE driver resetting the HECI interface.
*/
if (test_and_clear_bit(TXE_INTR_READINESS_BIT, &hw->intr_cause)) {
- dev_dbg(dev->dev, "Readiness Interrupt was received...\n");
+ dev_dbg(&dev->dev, "Readiness Interrupt was received...\n");
/* Check if SeC is going through reset */
if (mei_txe_readiness_is_sec_rdy(hw->readiness)) {
- dev_dbg(dev->dev, "we need to start the dev.\n");
+ dev_dbg(&dev->dev, "we need to start the dev.\n");
dev->recvd_hw_ready = true;
} else {
dev->recvd_hw_ready = false;
if (dev->dev_state != MEI_DEV_RESETTING) {
- dev_warn(dev->dev, "FW not ready: resetting.\n");
+ dev_warn(&dev->dev, "FW not ready: resetting.\n");
schedule_work(&dev->reset_work);
goto end;
@@ -1100,7 +1100,7 @@ irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id)
if (test_and_clear_bit(TXE_INTR_ALIVENESS_BIT, &hw->intr_cause)) {
/* Clear the interrupt cause */
- dev_dbg(dev->dev,
+ dev_dbg(&dev->dev,
"Aliveness Interrupt: Status: %d\n", hw->aliveness);
dev->pg_event = MEI_PG_EVENT_RECEIVED;
if (waitqueue_active(&hw->wait_aliveness_resp))
@@ -1118,7 +1118,7 @@ irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id)
if (rets &&
(dev->dev_state != MEI_DEV_RESETTING &&
dev->dev_state != MEI_DEV_POWER_DOWN)) {
- dev_err(dev->dev,
+ dev_err(&dev->dev,
"mei_irq_read_handler ret = %d.\n", rets);
schedule_work(&dev->reset_work);
@@ -1136,7 +1136,7 @@ irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id)
dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
rets = mei_irq_write_handler(dev, &cmpl_list);
if (rets && rets != -EMSGSIZE)
- dev_err(dev->dev, "mei_irq_write_handler ret = %d.\n",
+ dev_err(&dev->dev, "mei_irq_write_handler ret = %d.\n",
rets);
dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
}
@@ -1144,7 +1144,7 @@ irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id)
mei_irq_compl_handler(dev, &cmpl_list);
end:
- dev_dbg(dev->dev, "interrupt thread end ret = %d\n", rets);
+ dev_dbg(&dev->dev, "interrupt thread end ret = %d\n", rets);
mutex_unlock(&dev->device_lock);
@@ -1197,7 +1197,7 @@ struct mei_device *mei_txe_dev_init(struct pci_dev *pdev)
struct mei_device *dev;
struct mei_txe_hw *hw;
- dev = devm_kzalloc(&pdev->dev, sizeof(*dev) + sizeof(*hw), GFP_KERNEL);
+ dev = kzalloc(sizeof(*dev) + sizeof(*hw), GFP_KERNEL);
if (!dev)
return NULL;
diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h
index 2e9cf6f4efb6..3771aa09c592 100644
--- a/drivers/misc/mei/hw.h
+++ b/drivers/misc/mei/hw.h
@@ -27,6 +27,8 @@
#define MKHI_RCV_TIMEOUT 500 /* receive timeout in msec */
#define MKHI_RCV_TIMEOUT_SLOW 10000 /* receive timeout in msec, slow FW */
+#define MEI_LINK_RESET_WAIT_TIMEOUT_MSEC 500 /* Max wait timeout for link reset, in msec */
+
/*
* FW page size for DMA allocations
*/
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index 8ef2b1df8ac7..b789c4d9c709 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -89,22 +89,6 @@ void mei_cancel_work(struct mei_device *dev)
}
EXPORT_SYMBOL_GPL(mei_cancel_work);
-static void mei_save_fw_status(struct mei_device *dev)
-{
- struct mei_fw_status fw_status;
- int ret;
-
- ret = mei_fw_status(dev, &fw_status);
- if (ret) {
- dev_err(dev->dev, "failed to read firmware status: %d\n", ret);
- return;
- }
-
- dev->saved_dev_state = dev->dev_state;
- dev->saved_fw_status_flag = true;
- memcpy(&dev->saved_fw_status, &fw_status, sizeof(fw_status));
-}
-
/**
* mei_reset - resets host and fw.
*
@@ -126,11 +110,10 @@ int mei_reset(struct mei_device *dev)
mei_fw_status_str(dev, fw_sts_str, MEI_FW_STATUS_STR_SZ);
if (kind_is_gsc(dev) || kind_is_gscfi(dev)) {
- dev_dbg(dev->dev, "unexpected reset: dev_state = %s fw status = %s\n",
+ dev_dbg(&dev->dev, "unexpected reset: dev_state = %s fw status = %s\n",
mei_dev_state_str(state), fw_sts_str);
- mei_save_fw_status(dev);
} else {
- dev_warn(dev->dev, "unexpected reset: dev_state = %s fw status = %s\n",
+ dev_warn(&dev->dev, "unexpected reset: dev_state = %s fw status = %s\n",
mei_dev_state_str(state), fw_sts_str);
}
}
@@ -150,7 +133,7 @@ int mei_reset(struct mei_device *dev)
dev->reset_count++;
if (dev->reset_count > MEI_MAX_CONSEC_RESET) {
- dev_err(dev->dev, "reset: reached maximal consecutive resets: disabling the device\n");
+ dev_err(&dev->dev, "reset: reached maximal consecutive resets: disabling the device\n");
mei_set_devstate(dev, MEI_DEV_DISABLED);
return -ENODEV;
}
@@ -170,12 +153,12 @@ int mei_reset(struct mei_device *dev)
memset(dev->rd_msg_hdr, 0, sizeof(dev->rd_msg_hdr));
if (ret) {
- dev_err(dev->dev, "hw_reset failed ret = %d\n", ret);
+ dev_err(&dev->dev, "hw_reset failed ret = %d\n", ret);
return ret;
}
if (state == MEI_DEV_POWER_DOWN) {
- dev_dbg(dev->dev, "powering down: end of reset\n");
+ dev_dbg(&dev->dev, "powering down: end of reset\n");
mei_set_devstate(dev, MEI_DEV_DISABLED);
return 0;
}
@@ -185,21 +168,21 @@ int mei_reset(struct mei_device *dev)
char fw_sts_str[MEI_FW_STATUS_STR_SZ];
mei_fw_status_str(dev, fw_sts_str, MEI_FW_STATUS_STR_SZ);
- dev_err(dev->dev, "hw_start failed ret = %d fw status = %s\n", ret, fw_sts_str);
+ dev_err(&dev->dev, "hw_start failed ret = %d fw status = %s\n", ret, fw_sts_str);
return ret;
}
if (dev->dev_state != MEI_DEV_RESETTING) {
- dev_dbg(dev->dev, "wrong state = %d on link start\n", dev->dev_state);
+ dev_dbg(&dev->dev, "wrong state = %d on link start\n", dev->dev_state);
return 0;
}
- dev_dbg(dev->dev, "link is established start sending messages.\n");
+ dev_dbg(&dev->dev, "link is established start sending messages.\n");
mei_set_devstate(dev, MEI_DEV_INIT_CLIENTS);
ret = mei_hbm_start_req(dev);
if (ret) {
- dev_err(dev->dev, "hbm_start failed ret = %d\n", ret);
+ dev_err(&dev->dev, "hbm_start failed ret = %d\n", ret);
mei_set_devstate(dev, MEI_DEV_RESETTING);
return ret;
}
@@ -228,7 +211,7 @@ int mei_start(struct mei_device *dev)
if (ret)
goto err;
- dev_dbg(dev->dev, "reset in start the mei device.\n");
+ dev_dbg(&dev->dev, "reset in start the mei device.\n");
dev->reset_count = 0;
do {
@@ -236,27 +219,27 @@ int mei_start(struct mei_device *dev)
ret = mei_reset(dev);
if (ret == -ENODEV || dev->dev_state == MEI_DEV_DISABLED) {
- dev_err(dev->dev, "reset failed ret = %d", ret);
+ dev_err(&dev->dev, "reset failed ret = %d", ret);
goto err;
}
} while (ret);
if (mei_hbm_start_wait(dev)) {
- dev_err(dev->dev, "HBM haven't started");
+ dev_err(&dev->dev, "HBM haven't started");
goto err;
}
if (!mei_hbm_version_is_supported(dev)) {
- dev_dbg(dev->dev, "MEI start failed.\n");
+ dev_dbg(&dev->dev, "MEI start failed.\n");
goto err;
}
- dev_dbg(dev->dev, "link layer has been established.\n");
+ dev_dbg(&dev->dev, "link layer has been established.\n");
mutex_unlock(&dev->device_lock);
return 0;
err:
- dev_err(dev->dev, "link layer initialization failed.\n");
+ dev_err(&dev->dev, "link layer initialization failed.\n");
mei_set_devstate(dev, MEI_DEV_DISABLED);
mutex_unlock(&dev->device_lock);
return -ENODEV;
@@ -284,7 +267,7 @@ int mei_restart(struct mei_device *dev)
mutex_unlock(&dev->device_lock);
if (err == -ENODEV || dev->dev_state == MEI_DEV_DISABLED) {
- dev_err(dev->dev, "device disabled = %d\n", err);
+ dev_err(&dev->dev, "device disabled = %d\n", err);
return -ENODEV;
}
@@ -313,7 +296,7 @@ static void mei_reset_work(struct work_struct *work)
mutex_unlock(&dev->device_lock);
if (dev->dev_state == MEI_DEV_DISABLED) {
- dev_err(dev->dev, "device disabled = %d\n", ret);
+ dev_err(&dev->dev, "device disabled = %d\n", ret);
return;
}
@@ -324,7 +307,7 @@ static void mei_reset_work(struct work_struct *work)
void mei_stop(struct mei_device *dev)
{
- dev_dbg(dev->dev, "stopping the device.\n");
+ dev_dbg(&dev->dev, "stopping the device.\n");
mutex_lock(&dev->device_lock);
mei_set_devstate(dev, MEI_DEV_POWERING_DOWN);
@@ -365,7 +348,7 @@ bool mei_write_is_idle(struct mei_device *dev)
list_empty(&dev->write_list) &&
list_empty(&dev->write_waiting_list));
- dev_dbg(dev->dev, "write pg: is idle[%d] state=%s ctrl=%01d write=%01d wwait=%01d\n",
+ dev_dbg(&dev->dev, "write pg: is idle[%d] state=%s ctrl=%01d write=%01d wwait=%01d\n",
idle,
mei_dev_state_str(dev->dev_state),
list_empty(&dev->ctrl_wr_list),
@@ -380,12 +363,12 @@ EXPORT_SYMBOL_GPL(mei_write_is_idle);
* mei_device_init - initialize mei_device structure
*
* @dev: the mei device
- * @device: the device structure
+ * @parent: the parent device
* @slow_fw: configure longer timeouts as FW is slow
* @hw_ops: hw operations
*/
void mei_device_init(struct mei_device *dev,
- struct device *device,
+ struct device *parent,
bool slow_fw,
const struct mei_hw_ops *hw_ops)
{
@@ -399,7 +382,8 @@ void mei_device_init(struct mei_device *dev,
init_waitqueue_head(&dev->wait_hw_ready);
init_waitqueue_head(&dev->wait_pg);
init_waitqueue_head(&dev->wait_hbm_start);
- dev->dev_state = MEI_DEV_INITIALIZING;
+ dev->dev_state = MEI_DEV_UNINITIALIZED;
+ init_waitqueue_head(&dev->wait_dev_state);
dev->reset_count = 0;
INIT_LIST_HEAD(&dev->write_list);
@@ -426,7 +410,7 @@ void mei_device_init(struct mei_device *dev,
dev->pg_event = MEI_PG_EVENT_IDLE;
dev->ops = hw_ops;
- dev->dev = device;
+ dev->parent = parent;
dev->timeouts.hw_ready = mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT);
dev->timeouts.connect = MEI_CONNECT_TIMEOUT;
@@ -442,6 +426,6 @@ void mei_device_init(struct mei_device *dev,
dev->timeouts.hbm = mei_secs_to_jiffies(MEI_HBM_TIMEOUT);
dev->timeouts.mkhi_recv = msecs_to_jiffies(MKHI_RCV_TIMEOUT);
}
+ dev->timeouts.link_reset_wait = msecs_to_jiffies(MEI_LINK_RESET_WAIT_TIMEOUT_MSEC);
}
EXPORT_SYMBOL_GPL(mei_device_init);
-
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index d472f6bbe767..3aa66b6b0d36 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -76,7 +76,7 @@ static void mei_irq_discard_msg(struct mei_device *dev, struct mei_msg_hdr *hdr,
* that length fits into rd_msg_buf
*/
mei_read_slots(dev, dev->rd_msg_buf, discard_len);
- dev_dbg(dev->dev, "discarding message " MEI_HDR_FMT "\n",
+ dev_dbg(&dev->dev, "discarding message " MEI_HDR_FMT "\n",
MEI_HDR_PRM(hdr));
}
@@ -229,8 +229,8 @@ static int mei_cl_irq_read_msg(struct mei_cl *cl,
cl_dbg(dev, cl, "completed read length = %zu\n", cb->buf_idx);
list_move_tail(&cb->list, cmpl_list);
} else {
- pm_runtime_mark_last_busy(dev->dev);
- pm_request_autosuspend(dev->dev);
+ pm_runtime_mark_last_busy(dev->parent);
+ pm_request_autosuspend(dev->parent);
}
return 0;
@@ -310,8 +310,8 @@ static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb,
return ret;
}
- pm_runtime_mark_last_busy(dev->dev);
- pm_request_autosuspend(dev->dev);
+ pm_runtime_mark_last_busy(dev->parent);
+ pm_request_autosuspend(dev->parent);
list_move_tail(&cb->list, &cl->rd_pending);
@@ -373,21 +373,21 @@ int mei_irq_read_handler(struct mei_device *dev,
dev->rd_msg_hdr[0] = mei_read_hdr(dev);
dev->rd_msg_hdr_count = 1;
(*slots)--;
- dev_dbg(dev->dev, "slots =%08x.\n", *slots);
+ dev_dbg(&dev->dev, "slots =%08x.\n", *slots);
ret = hdr_is_valid(dev->rd_msg_hdr[0]);
if (ret) {
- dev_err(dev->dev, "corrupted message header 0x%08X\n",
+ dev_err(&dev->dev, "corrupted message header 0x%08X\n",
dev->rd_msg_hdr[0]);
goto end;
}
}
mei_hdr = (struct mei_msg_hdr *)dev->rd_msg_hdr;
- dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr));
+ dev_dbg(&dev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr));
if (mei_slots2data(*slots) < mei_hdr->length) {
- dev_err(dev->dev, "less data available than length=%08x.\n",
+ dev_err(&dev->dev, "less data available than length=%08x.\n",
*slots);
/* we can't read the message */
ret = -ENODATA;
@@ -402,18 +402,18 @@ int mei_irq_read_handler(struct mei_device *dev,
dev->rd_msg_hdr[1] = mei_read_hdr(dev);
dev->rd_msg_hdr_count++;
(*slots)--;
- dev_dbg(dev->dev, "extended header is %08x\n", dev->rd_msg_hdr[1]);
+ dev_dbg(&dev->dev, "extended header is %08x\n", dev->rd_msg_hdr[1]);
}
meta_hdr = ((struct mei_ext_meta_hdr *)&dev->rd_msg_hdr[1]);
if (check_add_overflow((u32)sizeof(*meta_hdr),
mei_slots2data(meta_hdr->size),
&hdr_size_ext)) {
- dev_err(dev->dev, "extended message size too big %d\n",
+ dev_err(&dev->dev, "extended message size too big %d\n",
meta_hdr->size);
return -EBADMSG;
}
if (hdr_size_left < hdr_size_ext) {
- dev_err(dev->dev, "corrupted message header len %d\n",
+ dev_err(&dev->dev, "corrupted message header len %d\n",
mei_hdr->length);
return -EBADMSG;
}
@@ -422,7 +422,7 @@ int mei_irq_read_handler(struct mei_device *dev,
ext_hdr_end = meta_hdr->size + 2;
for (i = dev->rd_msg_hdr_count; i < ext_hdr_end; i++) {
dev->rd_msg_hdr[i] = mei_read_hdr(dev);
- dev_dbg(dev->dev, "extended header %d is %08x\n", i,
+ dev_dbg(&dev->dev, "extended header %d is %08x\n", i,
dev->rd_msg_hdr[i]);
dev->rd_msg_hdr_count++;
(*slots)--;
@@ -431,7 +431,7 @@ int mei_irq_read_handler(struct mei_device *dev,
if (mei_hdr->dma_ring) {
if (hdr_size_left != sizeof(dev->rd_msg_hdr[ext_hdr_end])) {
- dev_err(dev->dev, "corrupted message header len %d\n",
+ dev_err(&dev->dev, "corrupted message header len %d\n",
mei_hdr->length);
return -EBADMSG;
}
@@ -446,8 +446,7 @@ int mei_irq_read_handler(struct mei_device *dev,
if (hdr_is_hbm(mei_hdr)) {
ret = mei_hbm_dispatch(dev, mei_hdr);
if (ret) {
- dev_dbg(dev->dev, "mei_hbm_dispatch failed ret = %d\n",
- ret);
+ dev_dbg(&dev->dev, "mei_hbm_dispatch failed ret = %d\n", ret);
goto end;
}
goto reset_slots;
@@ -474,7 +473,7 @@ int mei_irq_read_handler(struct mei_device *dev,
ret = 0;
goto reset_slots;
}
- dev_err(dev->dev, "no destination client found 0x%08X\n", dev->rd_msg_hdr[0]);
+ dev_err(&dev->dev, "no destination client found 0x%08X\n", dev->rd_msg_hdr[0]);
ret = -EBADMSG;
goto end;
@@ -485,7 +484,7 @@ reset_slots:
*slots = mei_count_full_read_slots(dev);
if (*slots == -EOVERFLOW) {
/* overflow - reset */
- dev_err(dev->dev, "resetting due to slots overflow.\n");
+ dev_err(&dev->dev, "resetting due to slots overflow.\n");
/* set the event since message has been read */
ret = -ERANGE;
goto end;
@@ -525,7 +524,7 @@ int mei_irq_write_handler(struct mei_device *dev, struct list_head *cmpl_list)
return -EMSGSIZE;
/* complete all waiting for write CB */
- dev_dbg(dev->dev, "complete all waiting for write cb.\n");
+ dev_dbg(&dev->dev, "complete all waiting for write cb.\n");
list_for_each_entry_safe(cb, next, &dev->write_waiting_list, list) {
cl = cb->cl;
@@ -537,7 +536,7 @@ int mei_irq_write_handler(struct mei_device *dev, struct list_head *cmpl_list)
}
/* complete control write list CB */
- dev_dbg(dev->dev, "complete control write list cb.\n");
+ dev_dbg(&dev->dev, "complete control write list cb.\n");
list_for_each_entry_safe(cb, next, &dev->ctrl_wr_list, list) {
cl = cb->cl;
switch (cb->fop_type) {
@@ -591,7 +590,7 @@ int mei_irq_write_handler(struct mei_device *dev, struct list_head *cmpl_list)
}
/* complete write list CB */
- dev_dbg(dev->dev, "complete write list cb.\n");
+ dev_dbg(&dev->dev, "complete write list cb.\n");
list_for_each_entry_safe(cb, next, &dev->write_list, list) {
cl = cb->cl;
ret = mei_cl_irq_write(cl, cb, cmpl_list);
@@ -656,7 +655,7 @@ void mei_timer(struct work_struct *work)
if (dev->init_clients_timer) {
if (--dev->init_clients_timer == 0) {
- dev_err(dev->dev, "timer: init clients timeout hbm_state = %d.\n",
+ dev_err(&dev->dev, "timer: init clients timeout hbm_state = %d.\n",
dev->hbm_state);
mei_reset(dev);
goto out;
@@ -672,7 +671,7 @@ void mei_timer(struct work_struct *work)
list_for_each_entry(cl, &dev->file_list, link) {
if (cl->timer_count) {
if (--cl->timer_count == 0) {
- dev_err(dev->dev, "timer: connect/disconnect timeout.\n");
+ dev_err(&dev->dev, "timer: connect/disconnect timeout.\n");
mei_connect_timeout(cl);
goto out;
}
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 8a149a15b861..86a73684a373 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -51,12 +51,15 @@ static int mei_open(struct inode *inode, struct file *file)
int err;
- dev = container_of(inode->i_cdev, struct mei_device, cdev);
+ dev = idr_find(&mei_idr, iminor(inode));
+ if (!dev)
+ return -ENODEV;
+ get_device(&dev->dev);
mutex_lock(&dev->device_lock);
if (dev->dev_state != MEI_DEV_ENABLED) {
- dev_dbg(dev->dev, "dev_state != MEI_ENABLED dev_state = %s\n",
+ dev_dbg(&dev->dev, "dev_state != MEI_ENABLED dev_state = %s\n",
mei_dev_state_str(dev->dev_state));
err = -ENODEV;
goto err_unlock;
@@ -77,6 +80,7 @@ static int mei_open(struct inode *inode, struct file *file)
err_unlock:
mutex_unlock(&dev->device_lock);
+ put_device(&dev->dev);
return err;
}
@@ -152,6 +156,7 @@ out:
file->private_data = NULL;
mutex_unlock(&dev->device_lock);
+ put_device(&dev->dev);
return rets;
}
@@ -418,6 +423,7 @@ static int mei_ioctl_connect_client(struct file *file,
cl->state != MEI_FILE_DISCONNECTED)
return -EBUSY;
+retry:
/* find ME client we're trying to connect to */
me_cl = mei_me_cl_by_uuid(dev, in_client_uuid);
if (!me_cl) {
@@ -449,6 +455,28 @@ static int mei_ioctl_connect_client(struct file *file,
rets = mei_cl_connect(cl, me_cl, file);
+ if (rets && cl->status == -EFAULT &&
+ (dev->dev_state == MEI_DEV_RESETTING ||
+ dev->dev_state == MEI_DEV_INIT_CLIENTS)) {
+ /* in link reset, wait for it completion */
+ mutex_unlock(&dev->device_lock);
+ rets = wait_event_interruptible_timeout(dev->wait_dev_state,
+ dev->dev_state == MEI_DEV_ENABLED,
+ dev->timeouts.link_reset_wait);
+ mutex_lock(&dev->device_lock);
+ if (rets < 0) {
+ if (signal_pending(current))
+ rets = -EINTR;
+ goto end;
+ }
+ if (dev->dev_state != MEI_DEV_ENABLED) {
+ rets = -ETIME;
+ goto end;
+ }
+ mei_me_cl_put(me_cl);
+ goto retry;
+ }
+
end:
mei_me_cl_put(me_cl);
return rets;
@@ -477,7 +505,7 @@ static int mei_vt_support_check(struct mei_device *dev, const uuid_le *uuid)
me_cl = mei_me_cl_by_uuid(dev, uuid);
if (!me_cl) {
- dev_dbg(dev->dev, "Cannot connect to FW Client UUID = %pUl\n",
+ dev_dbg(&dev->dev, "Cannot connect to FW Client UUID = %pUl\n",
uuid);
return -ENOTTY;
}
@@ -641,7 +669,7 @@ static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
struct mei_cl *cl = file->private_data;
struct mei_connect_client_data conn;
struct mei_connect_client_data_vtag conn_vtag;
- const uuid_le *cl_uuid;
+ uuid_le cl_uuid;
struct mei_client *props;
u8 vtag;
u32 notify_get, notify_req;
@@ -669,18 +697,18 @@ static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
rets = -EFAULT;
goto out;
}
- cl_uuid = &conn.in_client_uuid;
+ cl_uuid = conn.in_client_uuid;
props = &conn.out_client_properties;
vtag = 0;
- rets = mei_vt_support_check(dev, cl_uuid);
+ rets = mei_vt_support_check(dev, &cl_uuid);
if (rets == -ENOTTY)
goto out;
if (!rets)
- rets = mei_ioctl_connect_vtag(file, cl_uuid, props,
+ rets = mei_ioctl_connect_vtag(file, &cl_uuid, props,
vtag);
else
- rets = mei_ioctl_connect_client(file, cl_uuid, props);
+ rets = mei_ioctl_connect_client(file, &cl_uuid, props);
if (rets)
goto out;
@@ -702,14 +730,14 @@ static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
goto out;
}
- cl_uuid = &conn_vtag.connect.in_client_uuid;
+ cl_uuid = conn_vtag.connect.in_client_uuid;
props = &conn_vtag.out_client_properties;
vtag = conn_vtag.connect.vtag;
- rets = mei_vt_support_check(dev, cl_uuid);
+ rets = mei_vt_support_check(dev, &cl_uuid);
if (rets == -EOPNOTSUPP)
cl_dbg(dev, cl, "FW Client %pUl does not support vtags\n",
- cl_uuid);
+ &cl_uuid);
if (rets)
goto out;
@@ -719,7 +747,7 @@ static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
goto out;
}
- rets = mei_ioctl_connect_vtag(file, cl_uuid, props, vtag);
+ rets = mei_ioctl_connect_vtag(file, &cl_uuid, props, vtag);
if (rets)
goto out;
@@ -1115,7 +1143,12 @@ void mei_set_devstate(struct mei_device *dev, enum mei_dev_state state)
dev->dev_state = state;
- clsdev = class_find_device_by_devt(&mei_class, dev->cdev.dev);
+ wake_up_interruptible_all(&dev->wait_dev_state);
+
+ if (!dev->cdev)
+ return;
+
+ clsdev = class_find_device_by_devt(&mei_class, dev->cdev->dev);
if (clsdev) {
sysfs_notify(&clsdev->kobj, NULL, "dev_state");
put_device(clsdev);
@@ -1191,7 +1224,7 @@ static int mei_minor_get(struct mei_device *dev)
if (ret >= 0)
dev->minor = ret;
else if (ret == -ENOSPC)
- dev_err(dev->dev, "too many mei devices\n");
+ dev_err(&dev->dev, "too many mei devices\n");
mutex_unlock(&mei_minor_lock);
return ret;
@@ -1200,56 +1233,81 @@ static int mei_minor_get(struct mei_device *dev)
/**
* mei_minor_free - mark device minor number as free
*
- * @dev: device pointer
+ * @minor: minor number to free
*/
-static void mei_minor_free(struct mei_device *dev)
+static void mei_minor_free(int minor)
{
mutex_lock(&mei_minor_lock);
- idr_remove(&mei_idr, dev->minor);
+ idr_remove(&mei_idr, minor);
mutex_unlock(&mei_minor_lock);
}
+static void mei_device_release(struct device *dev)
+{
+ kfree(dev_get_drvdata(dev));
+}
+
int mei_register(struct mei_device *dev, struct device *parent)
{
- struct device *clsdev; /* class device */
int ret, devno;
+ int minor;
ret = mei_minor_get(dev);
if (ret < 0)
return ret;
+ minor = dev->minor;
+
/* Fill in the data structures */
devno = MKDEV(MAJOR(mei_devt), dev->minor);
- cdev_init(&dev->cdev, &mei_fops);
- dev->cdev.owner = parent->driver->owner;
+
+ device_initialize(&dev->dev);
+ dev->dev.devt = devno;
+ dev->dev.class = &mei_class;
+ dev->dev.parent = parent;
+ dev->dev.groups = mei_groups;
+ dev->dev.release = mei_device_release;
+ dev_set_drvdata(&dev->dev, dev);
+
+ dev->cdev = cdev_alloc();
+ if (!dev->cdev) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ dev->cdev->ops = &mei_fops;
+ dev->cdev->owner = parent->driver->owner;
+ cdev_set_parent(dev->cdev, &dev->dev.kobj);
/* Add the device */
- ret = cdev_add(&dev->cdev, devno, 1);
+ ret = cdev_add(dev->cdev, devno, 1);
if (ret) {
- dev_err(parent, "unable to add device %d:%d\n",
+ dev_err(parent, "unable to add cdev for device %d:%d\n",
MAJOR(mei_devt), dev->minor);
- goto err_dev_add;
+ goto err_del_cdev;
}
- clsdev = device_create_with_groups(&mei_class, parent, devno,
- dev, mei_groups,
- "mei%d", dev->minor);
+ ret = dev_set_name(&dev->dev, "mei%d", dev->minor);
+ if (ret) {
+ dev_err(parent, "unable to set name to device %d:%d ret = %d\n",
+ MAJOR(mei_devt), dev->minor, ret);
+ goto err_del_cdev;
+ }
- if (IS_ERR(clsdev)) {
- dev_err(parent, "unable to create device %d:%d\n",
- MAJOR(mei_devt), dev->minor);
- ret = PTR_ERR(clsdev);
- goto err_dev_create;
+ ret = device_add(&dev->dev);
+ if (ret) {
+ dev_err(parent, "unable to add device %d:%d ret = %d\n",
+ MAJOR(mei_devt), dev->minor, ret);
+ goto err_del_cdev;
}
- mei_dbgfs_register(dev, dev_name(clsdev));
+ mei_dbgfs_register(dev, dev_name(&dev->dev));
return 0;
-err_dev_create:
- cdev_del(&dev->cdev);
-err_dev_add:
- mei_minor_free(dev);
+err_del_cdev:
+ cdev_del(dev->cdev);
+err:
+ mei_minor_free(minor);
return ret;
}
EXPORT_SYMBOL_GPL(mei_register);
@@ -1257,15 +1315,16 @@ EXPORT_SYMBOL_GPL(mei_register);
void mei_deregister(struct mei_device *dev)
{
int devno;
+ int minor = dev->minor;
- devno = dev->cdev.dev;
- cdev_del(&dev->cdev);
+ devno = dev->cdev->dev;
+ cdev_del(dev->cdev);
mei_dbgfs_deregister(dev);
device_destroy(&mei_class, devno);
- mei_minor_free(dev);
+ mei_minor_free(minor);
}
EXPORT_SYMBOL_GPL(mei_deregister);
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 37d7fb15cad7..0bf8d552c3ea 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -57,7 +57,8 @@ enum file_state {
/* MEI device states */
enum mei_dev_state {
- MEI_DEV_INITIALIZING = 0,
+ MEI_DEV_UNINITIALIZED = 0,
+ MEI_DEV_INITIALIZING,
MEI_DEV_INIT_CLIENTS,
MEI_DEV_ENABLED,
MEI_DEV_RESETTING,
@@ -465,13 +466,15 @@ struct mei_dev_timeouts {
unsigned int d0i3; /* D0i3 set/unset max response time, in jiffies */
unsigned long hbm; /* HBM operation timeout, in jiffies */
unsigned long mkhi_recv; /* receive timeout, in jiffies */
+ unsigned long link_reset_wait; /* link reset wait timeout, in jiffies */
};
/**
* struct mei_device - MEI private device struct
*
- * @dev : device on a bus
- * @cdev : character device
+ * @parent : device on a bus
+ * @dev : device object
+ * @cdev : character device pointer
* @minor : minor number allocated for device
*
* @write_list : write pending list
@@ -494,6 +497,7 @@ struct mei_dev_timeouts {
*
* @reset_count : number of consecutive resets
* @dev_state : device state
+ * @wait_dev_state: wait queue for device state change
* @hbm_state : state of host bus message protocol
* @pxp_mode : PXP device mode
* @init_clients_timer : HBM init handshake timeout
@@ -547,17 +551,15 @@ struct mei_dev_timeouts {
*
* @dbgfs_dir : debugfs mei root directory
*
- * @saved_fw_status : saved firmware status
- * @saved_dev_state : saved device state
- * @saved_fw_status_flag : flag indicating that firmware status was saved
* @gsc_reset_to_pxp : state of reset to the PXP mode
*
* @ops: : hw specific operations
* @hw : hw specific data
*/
struct mei_device {
- struct device *dev;
- struct cdev cdev;
+ struct device *parent;
+ struct device dev;
+ struct cdev *cdev;
int minor;
struct list_head write_list;
@@ -585,6 +587,7 @@ struct mei_device {
*/
unsigned long reset_count;
enum mei_dev_state dev_state;
+ wait_queue_head_t wait_dev_state;
enum mei_hbm_state hbm_state;
enum mei_dev_pxp_mode pxp_mode;
u16 init_clients_timer;
@@ -648,9 +651,6 @@ struct mei_device {
struct dentry *dbgfs_dir;
#endif /* CONFIG_DEBUG_FS */
- struct mei_fw_status saved_fw_status;
- enum mei_dev_state saved_dev_state;
- bool saved_fw_status_flag;
enum mei_dev_reset_to_pxp gsc_reset_to_pxp;
const struct mei_hw_ops *ops;
@@ -703,7 +703,7 @@ static inline u32 mei_slots2data(int slots)
* mei init function prototypes
*/
void mei_device_init(struct mei_device *dev,
- struct device *device,
+ struct device *parent,
bool slow_fw,
const struct mei_hw_ops *hw_ops);
int mei_reset(struct mei_device *dev);
diff --git a/drivers/misc/mei/mei_lb.c b/drivers/misc/mei/mei_lb.c
new file mode 100644
index 000000000000..77686b108d3c
--- /dev/null
+++ b/drivers/misc/mei/mei_lb.c
@@ -0,0 +1,312 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025 Intel Corporation
+ */
+
+#include <linux/component.h>
+#include <linux/mei_cl_bus.h>
+#include <linux/module.h>
+#include <linux/overflow.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/uuid.h>
+
+#include <drm/intel/i915_component.h>
+#include <drm/intel/intel_lb_mei_interface.h>
+
+#include "mkhi.h"
+
+/**
+ * DOC: Late Binding Firmware Update/Upload
+ *
+ * Late Binding is a firmware update/upload mechanism that allows configuration
+ * payloads to be securely delivered and applied at runtime, rather than
+ * being embedded in the system firmware image (e.g., IFWI or SPI flash).
+ *
+ * This mechanism is used to update device-level configuration such as:
+ * - Fan controller
+ * - Voltage regulator (VR)
+ *
+ * Key Characteristics:
+ * ---------------------
+ * - Runtime Delivery:
+ * Firmware blobs are loaded by the host driver (e.g., Xe KMD)
+ * after the GPU or SoC has booted.
+ *
+ * - Secure and Authenticated:
+ * All payloads are signed and verified by the authentication firmware.
+ *
+ * - No Firmware Flashing Required:
+ * Updates are applied in volatile memory and do not require SPI flash
+ * modification or system reboot.
+ *
+ * - Re-entrant:
+ * Multiple updates of the same or different types can be applied
+ * sequentially within a single boot session.
+ *
+ * - Version Controlled:
+ * Each payload includes version and security version number (SVN)
+ * metadata to support anti-rollback enforcement.
+ *
+ * Upload Flow:
+ * ------------
+ * 1. Host driver (KMD or user-space tool) loads the late binding firmware.
+ * 2. Firmware is passed to the MEI interface and forwarded to
+ * authentication firmware.
+ * 3. Authentication firmware authenticates the payload and extracts
+ * command and data arrays.
+ * 4. Authentication firmware delivers the configuration to PUnit/PCODE.
+ * 5. Status is returned back to the host via MEI.
+ */
+
+#define INTEL_LB_CMD 0x12
+#define INTEL_LB_RSP (INTEL_LB_CMD | 0x80)
+
+#define INTEL_LB_SEND_TIMEOUT_MSEC 3000
+#define INTEL_LB_RECV_TIMEOUT_MSEC 3000
+
+/**
+ * struct mei_lb_req - Late Binding request structure
+ * @header: MKHI message header (see struct mkhi_msg_hdr)
+ * @type: Type of the Late Binding payload
+ * @flags: Flags to be passed to the authentication firmware (e.g. %INTEL_LB_FLAGS_IS_PERSISTENT)
+ * @reserved: Reserved for future use by authentication firmware, must be set to 0
+ * @payload_size: Size of the payload data in bytes
+ * @payload: Payload data to be sent to the authentication firmware
+ */
+struct mei_lb_req {
+ struct mkhi_msg_hdr header;
+ __le32 type;
+ __le32 flags;
+ __le32 reserved[2];
+ __le32 payload_size;
+ u8 payload[] __counted_by(payload_size);
+} __packed;
+
+/**
+ * struct mei_lb_rsp - Late Binding response structure
+ * @header: MKHI message header (see struct mkhi_msg_hdr)
+ * @type: Type of the Late Binding payload
+ * @reserved: Reserved for future use by authentication firmware, must be set to 0
+ * @status: Status returned by authentication firmware (see &enum intel_lb_status)
+ */
+struct mei_lb_rsp {
+ struct mkhi_msg_hdr header;
+ __le32 type;
+ __le32 reserved[2];
+ __le32 status;
+} __packed;
+
+static bool mei_lb_check_response(const struct device *dev, ssize_t bytes,
+ struct mei_lb_rsp *rsp)
+{
+ /*
+ * Received message size may be smaller than the full message size when
+ * reply contains only MKHI header with result field set to the error code.
+ * Check the header size and content first to output exact error, if needed,
+ * and then process to the whole message.
+ */
+ if (bytes < sizeof(rsp->header)) {
+ dev_err(dev, "Received less than header size from the firmware: %zd < %zu\n",
+ bytes, sizeof(rsp->header));
+ return false;
+ }
+ if (rsp->header.group_id != MKHI_GROUP_ID_GFX) {
+ dev_err(dev, "Mismatch group id: 0x%x instead of 0x%x\n",
+ rsp->header.group_id, MKHI_GROUP_ID_GFX);
+ return false;
+ }
+ if (rsp->header.command != INTEL_LB_RSP) {
+ dev_err(dev, "Mismatch command: 0x%x instead of 0x%x\n",
+ rsp->header.command, INTEL_LB_RSP);
+ return false;
+ }
+ if (rsp->header.result) {
+ dev_err(dev, "Error in result: 0x%x\n", rsp->header.result);
+ return false;
+ }
+ if (bytes < sizeof(*rsp)) {
+ dev_err(dev, "Received less than message size from the firmware: %zd < %zu\n",
+ bytes, sizeof(*rsp));
+ return false;
+ }
+
+ return true;
+}
+
+static int mei_lb_push_payload(struct device *dev,
+ enum intel_lb_type type, u32 flags,
+ const void *payload, size_t payload_size)
+{
+ struct mei_cl_device *cldev;
+ struct mei_lb_req *req = NULL;
+ struct mei_lb_rsp rsp;
+ size_t req_size;
+ ssize_t bytes;
+ int ret;
+
+ cldev = to_mei_cl_device(dev);
+
+ ret = mei_cldev_enable(cldev);
+ if (ret) {
+ dev_dbg(dev, "Failed to enable firmware client. %d\n", ret);
+ return ret;
+ }
+
+ req_size = struct_size(req, payload, payload_size);
+ if (req_size > mei_cldev_mtu(cldev)) {
+ dev_err(dev, "Payload is too big: %zu\n", payload_size);
+ ret = -EMSGSIZE;
+ goto end;
+ }
+
+ req = kmalloc(req_size, GFP_KERNEL);
+ if (!req) {
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ req->header.group_id = MKHI_GROUP_ID_GFX;
+ req->header.command = INTEL_LB_CMD;
+ req->type = cpu_to_le32(type);
+ req->flags = cpu_to_le32(flags);
+ req->reserved[0] = 0;
+ req->reserved[1] = 0;
+ req->payload_size = cpu_to_le32(payload_size);
+ memcpy(req->payload, payload, payload_size);
+
+ bytes = mei_cldev_send_timeout(cldev, (u8 *)req, req_size,
+ INTEL_LB_SEND_TIMEOUT_MSEC);
+ if (bytes < 0) {
+ dev_err(dev, "Failed to send late binding request to firmware. %zd\n", bytes);
+ ret = bytes;
+ goto end;
+ }
+
+ bytes = mei_cldev_recv_timeout(cldev, (u8 *)&rsp, sizeof(rsp),
+ INTEL_LB_RECV_TIMEOUT_MSEC);
+ if (bytes < 0) {
+ dev_err(dev, "Failed to receive late binding reply from MEI firmware. %zd\n",
+ bytes);
+ ret = bytes;
+ goto end;
+ }
+ if (!mei_lb_check_response(dev, bytes, &rsp)) {
+ dev_err(dev, "Bad response from the firmware. header: %02x %02x %02x %02x\n",
+ rsp.header.group_id, rsp.header.command,
+ rsp.header.reserved, rsp.header.result);
+ ret = -EPROTO;
+ goto end;
+ }
+
+ dev_dbg(dev, "status = %u\n", le32_to_cpu(rsp.status));
+ ret = (int)le32_to_cpu(rsp.status);
+end:
+ mei_cldev_disable(cldev);
+ kfree(req);
+ return ret;
+}
+
+static const struct intel_lb_component_ops mei_lb_ops = {
+ .push_payload = mei_lb_push_payload,
+};
+
+static int mei_lb_component_master_bind(struct device *dev)
+{
+ return component_bind_all(dev, (void *)&mei_lb_ops);
+}
+
+static void mei_lb_component_master_unbind(struct device *dev)
+{
+ component_unbind_all(dev, (void *)&mei_lb_ops);
+}
+
+static const struct component_master_ops mei_lb_component_master_ops = {
+ .bind = mei_lb_component_master_bind,
+ .unbind = mei_lb_component_master_unbind,
+};
+
+static int mei_lb_component_match(struct device *dev, int subcomponent,
+ void *data)
+{
+ /*
+ * This function checks if requester is Intel %PCI_CLASS_DISPLAY_VGA or
+ * %PCI_CLASS_DISPLAY_OTHER device, and checks if the requester is the
+ * grand parent of mei_if i.e. late bind MEI device
+ */
+ struct device *base = data;
+ struct pci_dev *pdev;
+
+ if (!dev)
+ return 0;
+
+ if (!dev_is_pci(dev))
+ return 0;
+
+ pdev = to_pci_dev(dev);
+
+ if (pdev->vendor != PCI_VENDOR_ID_INTEL)
+ return 0;
+
+ if (pdev->class != (PCI_CLASS_DISPLAY_VGA << 8) &&
+ pdev->class != (PCI_CLASS_DISPLAY_OTHER << 8))
+ return 0;
+
+ if (subcomponent != INTEL_COMPONENT_LB)
+ return 0;
+
+ base = base->parent;
+ if (!base) /* mei device */
+ return 0;
+
+ base = base->parent; /* pci device */
+
+ return !!base && dev == base;
+}
+
+static int mei_lb_probe(struct mei_cl_device *cldev,
+ const struct mei_cl_device_id *id)
+{
+ struct component_match *master_match = NULL;
+ int ret;
+
+ component_match_add_typed(&cldev->dev, &master_match,
+ mei_lb_component_match, &cldev->dev);
+ if (IS_ERR_OR_NULL(master_match))
+ return -ENOMEM;
+
+ ret = component_master_add_with_match(&cldev->dev,
+ &mei_lb_component_master_ops,
+ master_match);
+ if (ret < 0)
+ dev_err(&cldev->dev, "Failed to add late binding master component. %d\n", ret);
+
+ return ret;
+}
+
+static void mei_lb_remove(struct mei_cl_device *cldev)
+{
+ component_master_del(&cldev->dev, &mei_lb_component_master_ops);
+}
+
+#define MEI_GUID_MKHI UUID_LE(0xe2c2afa2, 0x3817, 0x4d19, \
+ 0x9d, 0x95, 0x6, 0xb1, 0x6b, 0x58, 0x8a, 0x5d)
+
+static const struct mei_cl_device_id mei_lb_tbl[] = {
+ { .uuid = MEI_GUID_MKHI, .version = MEI_CL_VERSION_ANY },
+ { }
+};
+MODULE_DEVICE_TABLE(mei, mei_lb_tbl);
+
+static struct mei_cl_driver mei_lb_driver = {
+ .id_table = mei_lb_tbl,
+ .name = "mei_lb",
+ .probe = mei_lb_probe,
+ .remove = mei_lb_remove,
+};
+
+module_mei_cl_driver(mei_lb_driver);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MEI Late Binding Firmware Update/Upload");
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 3f9c60b579ae..b108a7c22388 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -143,7 +143,7 @@ static inline void mei_me_unset_pm_domain(struct mei_device *dev) {}
static int mei_me_read_fws(const struct mei_device *dev, int where, u32 *val)
{
- struct pci_dev *pdev = to_pci_dev(dev->dev);
+ struct pci_dev *pdev = to_pci_dev(dev->parent);
return pci_read_config_dword(pdev, where, val);
}
@@ -238,19 +238,19 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto end;
}
+ err = mei_register(dev, &pdev->dev);
+ if (err)
+ goto release_irq;
+
if (mei_start(dev)) {
dev_err(&pdev->dev, "init hw failure.\n");
err = -ENODEV;
- goto release_irq;
+ goto deregister;
}
pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_ME_RPM_TIMEOUT);
pm_runtime_use_autosuspend(&pdev->dev);
- err = mei_register(dev, &pdev->dev);
- if (err)
- goto stop;
-
pci_set_drvdata(pdev, dev);
/*
@@ -280,8 +280,8 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
-stop:
- mei_stop(dev);
+deregister:
+ mei_deregister(dev);
release_irq:
mei_cancel_work(dev);
mei_disable_interrupts(dev);
@@ -475,7 +475,7 @@ static int mei_me_pm_runtime_resume(struct device *device)
*/
static inline void mei_me_set_pm_domain(struct mei_device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev->dev);
+ struct pci_dev *pdev = to_pci_dev(dev->parent);
if (pdev->dev.bus && pdev->dev.bus->pm) {
dev->pg_domain.ops = *pdev->dev.bus->pm;
@@ -496,7 +496,7 @@ static inline void mei_me_set_pm_domain(struct mei_device *dev)
static inline void mei_me_unset_pm_domain(struct mei_device *dev)
{
/* stop using pm callbacks if any */
- dev_pm_domain_set(dev->dev, NULL);
+ dev_pm_domain_set(dev->parent, NULL);
}
static const struct dev_pm_ops mei_me_pm_ops = {
diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c
index 2a584104ba38..c9eb5c5393e4 100644
--- a/drivers/misc/mei/pci-txe.c
+++ b/drivers/misc/mei/pci-txe.c
@@ -321,7 +321,7 @@ static int mei_txe_pm_runtime_resume(struct device *device)
*/
static inline void mei_txe_set_pm_domain(struct mei_device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev->dev);
+ struct pci_dev *pdev = to_pci_dev(dev->parent);
if (pdev->dev.bus && pdev->dev.bus->pm) {
dev->pg_domain.ops = *pdev->dev.bus->pm;
@@ -342,7 +342,7 @@ static inline void mei_txe_set_pm_domain(struct mei_device *dev)
static inline void mei_txe_unset_pm_domain(struct mei_device *dev)
{
/* stop using pm callbacks if any */
- dev_pm_domain_set(dev->dev, NULL);
+ dev_pm_domain_set(dev->parent, NULL);
}
static const struct dev_pm_ops mei_txe_pm_ops = {
diff --git a/drivers/misc/mei/platform-vsc.c b/drivers/misc/mei/platform-vsc.c
index b2b5a20ae3fa..288e7b72e942 100644
--- a/drivers/misc/mei/platform-vsc.c
+++ b/drivers/misc/mei/platform-vsc.c
@@ -152,7 +152,7 @@ static int mei_vsc_hw_start(struct mei_device *mei_dev)
MEI_VSC_POLL_TIMEOUT_US, true,
hw, &buf, sizeof(buf));
if (ret) {
- dev_err(mei_dev->dev, "wait fw ready failed: %d\n", ret);
+ dev_err(&mei_dev->dev, "wait fw ready failed: %d\n", ret);
return ret;
}
@@ -259,7 +259,7 @@ static int mei_vsc_hw_reset(struct mei_device *mei_dev, bool intr_enable)
if (!intr_enable)
return 0;
- return vsc_tp_init(hw->tp, mei_dev->dev);
+ return vsc_tp_init(hw->tp, mei_dev->parent);
}
static const struct mei_hw_ops mei_vsc_hw_ops = {
@@ -325,7 +325,7 @@ static void mei_vsc_event_cb(void *context)
mei_dev->hbuf_is_ready = mei_hbuf_is_ready(mei_dev);
ret = mei_irq_write_handler(mei_dev, &cmpl_list);
if (ret)
- dev_err(mei_dev->dev, "dispatch write request failed: %d\n", ret);
+ dev_err(&mei_dev->dev, "dispatch write request failed: %d\n", ret);
mei_dev->hbuf_is_ready = mei_hbuf_is_ready(mei_dev);
mei_irq_compl_handler(mei_dev, &cmpl_list);
@@ -343,12 +343,12 @@ static int mei_vsc_probe(struct platform_device *pdev)
if (!tp)
return dev_err_probe(dev, -ENODEV, "no platform data\n");
- mei_dev = devm_kzalloc(dev, size_add(sizeof(*mei_dev), sizeof(*hw)),
- GFP_KERNEL);
+ mei_dev = kzalloc(size_add(sizeof(*mei_dev), sizeof(*hw)), GFP_KERNEL);
if (!mei_dev)
return -ENOMEM;
mei_device_init(mei_dev, dev, false, &mei_vsc_hw_ops);
+
mei_dev->fw_f_fw_ver_supported = 0;
mei_dev->kind = "ivsc";
@@ -360,22 +360,22 @@ static int mei_vsc_probe(struct platform_device *pdev)
vsc_tp_register_event_cb(tp, mei_vsc_event_cb, mei_dev);
+ ret = mei_register(mei_dev, dev);
+ if (ret)
+ goto err_dereg;
+
ret = mei_start(mei_dev);
if (ret) {
dev_err_probe(dev, ret, "init hw failed\n");
goto err_cancel;
}
- ret = mei_register(mei_dev, dev);
- if (ret)
- goto err_stop;
-
- pm_runtime_enable(mei_dev->dev);
+ pm_runtime_enable(mei_dev->parent);
return 0;
-err_stop:
- mei_stop(mei_dev);
+err_dereg:
+ mei_deregister(mei_dev);
err_cancel:
mei_cancel_work(mei_dev);
@@ -392,7 +392,7 @@ static void mei_vsc_remove(struct platform_device *pdev)
struct mei_device *mei_dev = platform_get_drvdata(pdev);
struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
- pm_runtime_disable(mei_dev->dev);
+ pm_runtime_disable(mei_dev->parent);
mei_stop(mei_dev);
diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
index 1c156a3f845e..1c0fd185114f 100644
--- a/drivers/misc/pci_endpoint_test.c
+++ b/drivers/misc/pci_endpoint_test.c
@@ -436,7 +436,11 @@ static int pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
{
struct pci_dev *pdev = test->pdev;
u32 val;
- int ret;
+ int irq;
+
+ irq = pci_irq_vector(pdev, msi_num - 1);
+ if (irq < 0)
+ return irq;
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
msix ? PCITEST_IRQ_TYPE_MSIX :
@@ -450,11 +454,7 @@ static int pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
if (!val)
return -ETIMEDOUT;
- ret = pci_irq_vector(pdev, msi_num - 1);
- if (ret < 0)
- return ret;
-
- if (ret != test->last_irq)
+ if (irq != test->last_irq)
return -EIO;
return 0;
@@ -937,7 +937,7 @@ static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
switch (cmd) {
case PCITEST_BAR:
bar = arg;
- if (bar > BAR_5)
+ if (bar <= NO_BAR || bar > BAR_5)
goto ret;
if (is_am654_pci_dev(pdev) && bar == BAR_0)
goto ret;
@@ -1020,8 +1020,6 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
if (!test)
return -ENOMEM;
- test->test_reg_bar = 0;
- test->alignment = 0;
test->pdev = pdev;
test->irq_type = PCITEST_IRQ_TYPE_UNDEFINED;
diff --git a/drivers/misc/ti_fpc202.c b/drivers/misc/ti_fpc202.c
index 0b1a6350c02b..7964e46c7448 100644
--- a/drivers/misc/ti_fpc202.c
+++ b/drivers/misc/ti_fpc202.c
@@ -333,7 +333,7 @@ static int fpc202_probe(struct i2c_client *client)
priv->gpio.base = -1;
priv->gpio.direction_input = fpc202_gpio_direction_input;
priv->gpio.direction_output = fpc202_gpio_direction_output;
- priv->gpio.set_rv = fpc202_gpio_set;
+ priv->gpio.set = fpc202_gpio_set;
priv->gpio.get = fpc202_gpio_get;
priv->gpio.ngpio = FPC202_GPIO_COUNT;
priv->gpio.parent = dev;
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index 6653fc53c951..6df51ee8db62 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -1806,7 +1806,7 @@ static int vmballoon_migratepage(struct balloon_dev_info *b_dev_info,
* the list after acquiring the lock.
*/
get_page(newpage);
- ret = MIGRATEPAGE_SUCCESS;
+ ret = 0;
}
/* Update the balloon list under the @pages_lock */
@@ -1817,7 +1817,7 @@ static int vmballoon_migratepage(struct balloon_dev_info *b_dev_info,
* If we succeed just insert it to the list and update the statistics
* under the lock.
*/
- if (ret == MIGRATEPAGE_SUCCESS) {
+ if (!ret) {
balloon_page_insert(&b->b_dev_info, newpage);
__count_vm_event(BALLOON_MIGRATE);
}
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 9cc47bf94804..9399bf6c766a 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -121,6 +121,10 @@ struct rpmb_frame {
#define RPMB_READ_DATA 0x4 /* Read data from RPMB partition */
#define RPMB_RESULT_READ 0x5 /* Read result request (Internal) */
+#define RPMB_FRAME_SIZE sizeof(struct rpmb_frame)
+#define CHECK_SIZE_NEQ(val) ((val) != sizeof(struct rpmb_frame))
+#define CHECK_SIZE_ALIGNED(val) IS_ALIGNED((val), sizeof(struct rpmb_frame))
+
static DEFINE_MUTEX(block_mutex);
/*
@@ -435,9 +439,9 @@ static void mmc_blk_release(struct gendisk *disk)
}
static int
-mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+mmc_blk_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
- geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
+ geo->cylinders = get_capacity(disk) / (4 * 16);
geo->heads = 4;
geo->sectors = 16;
return 0;
@@ -1768,8 +1772,7 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
* these, while retaining features like reliable writes.
*/
if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) &&
- (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) ||
- do_data_tag)) {
+ (do_rel_wr || !mmc_card_blk_no_cmd23(card) || do_data_tag)) {
brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
brq->sbc.arg = brq->data.blocks |
(do_rel_wr ? (1 << 31) : 0) |
@@ -2618,13 +2621,8 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
*/
md->read_only = mmc_blk_readonly(card);
- if (mmc_host_can_cmd23(card->host)) {
- if ((mmc_card_mmc(card) &&
- card->csd.mmca_vsn >= CSD_SPEC_VER_3) ||
- (mmc_card_sd(card) && !mmc_card_ult_capacity(card) &&
- card->scr.cmds & SD_SCR_CMD23_SUPPORT))
- md->flags |= MMC_BLK_CMD23;
- }
+ if (mmc_host_can_cmd23(card->host) && mmc_card_can_cmd23(card))
+ md->flags |= MMC_BLK_CMD23;
if (md->flags & MMC_BLK_CMD23 &&
((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
@@ -2864,12 +2862,12 @@ static void set_idata(struct mmc_blk_ioc_data *idata, u32 opcode,
* The size of an RPMB frame must match what's expected by the
* hardware.
*/
- BUILD_BUG_ON(sizeof(struct rpmb_frame) != 512);
+ static_assert(!CHECK_SIZE_NEQ(512), "RPMB frame size must be 512 bytes");
idata->ic.opcode = opcode;
idata->ic.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
idata->ic.write_flag = write_flag;
- idata->ic.blksz = sizeof(struct rpmb_frame);
+ idata->ic.blksz = RPMB_FRAME_SIZE;
idata->ic.blocks = buf_bytes / idata->ic.blksz;
idata->buf = buf;
idata->buf_bytes = buf_bytes;
@@ -2893,32 +2891,28 @@ static int mmc_route_rpmb_frames(struct device *dev, u8 *req,
if (IS_ERR(md->queue.card))
return PTR_ERR(md->queue.card);
- if (req_len < sizeof(*frm))
+ if (req_len < RPMB_FRAME_SIZE)
return -EINVAL;
req_type = be16_to_cpu(frm->req_resp);
switch (req_type) {
case RPMB_PROGRAM_KEY:
- if (req_len != sizeof(struct rpmb_frame) ||
- resp_len != sizeof(struct rpmb_frame))
+ if (CHECK_SIZE_NEQ(req_len) || CHECK_SIZE_NEQ(resp_len))
return -EINVAL;
write = true;
break;
case RPMB_GET_WRITE_COUNTER:
- if (req_len != sizeof(struct rpmb_frame) ||
- resp_len != sizeof(struct rpmb_frame))
+ if (CHECK_SIZE_NEQ(req_len) || CHECK_SIZE_NEQ(resp_len))
return -EINVAL;
write = false;
break;
case RPMB_WRITE_DATA:
- if (req_len % sizeof(struct rpmb_frame) ||
- resp_len != sizeof(struct rpmb_frame))
+ if (!CHECK_SIZE_ALIGNED(req_len) || CHECK_SIZE_NEQ(resp_len))
return -EINVAL;
write = true;
break;
case RPMB_READ_DATA:
- if (req_len != sizeof(struct rpmb_frame) ||
- resp_len % sizeof(struct rpmb_frame))
+ if (CHECK_SIZE_NEQ(req_len) || !CHECK_SIZE_ALIGNED(resp_len))
return -EINVAL;
write = false;
break;
@@ -2926,25 +2920,23 @@ static int mmc_route_rpmb_frames(struct device *dev, u8 *req,
return -EINVAL;
}
- if (write)
- cmd_count = 3;
- else
- cmd_count = 2;
+ /* Write operations require 3 commands, read operations require 2 */
+ cmd_count = write ? 3 : 2;
idata = alloc_idata(rpmb, cmd_count);
if (!idata)
return -ENOMEM;
if (write) {
- struct rpmb_frame *frm = (struct rpmb_frame *)resp;
+ struct rpmb_frame *resp_frm = (struct rpmb_frame *)resp;
/* Send write request frame(s) */
set_idata(idata[0], MMC_WRITE_MULTIPLE_BLOCK,
1 | MMC_CMD23_ARG_REL_WR, req, req_len);
/* Send result request frame */
- memset(frm, 0, sizeof(*frm));
- frm->req_resp = cpu_to_be16(RPMB_RESULT_READ);
+ memset(resp_frm, 0, RPMB_FRAME_SIZE);
+ resp_frm->req_resp = cpu_to_be16(RPMB_RESULT_READ);
set_idata(idata[1], MMC_WRITE_MULTIPLE_BLOCK, 1, resp,
resp_len);
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index 1cf64e0952fb..ec4f3462bf80 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -19,6 +19,7 @@
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
#include "core.h"
#include "card.h"
@@ -383,6 +384,14 @@ int mmc_add_card(struct mmc_card *card)
mmc_card_set_present(card);
+ /*
+ * Register for undervoltage notification if the card supports
+ * power-off notification, enabling emergency shutdowns.
+ */
+ if (mmc_card_mmc(card) &&
+ card->ext_csd.power_off_notification == EXT_CSD_POWER_ON)
+ mmc_regulator_register_undervoltage_notifier(card->host);
+
return 0;
}
@@ -394,6 +403,9 @@ void mmc_remove_card(struct mmc_card *card)
{
struct mmc_host *host = card->host;
+ if (mmc_card_present(card))
+ mmc_regulator_unregister_undervoltage_notifier(host);
+
mmc_remove_card_debugfs(card);
if (mmc_card_present(card)) {
diff --git a/drivers/mmc/core/card.h b/drivers/mmc/core/card.h
index 9cbdd240c3a7..1200951bab08 100644
--- a/drivers/mmc/core/card.h
+++ b/drivers/mmc/core/card.h
@@ -245,14 +245,19 @@ static inline int mmc_blksz_for_byte_mode(const struct mmc_card *c)
return c->quirks & MMC_QUIRK_BLKSZ_FOR_BYTE_MODE;
}
+static inline int mmc_card_nonstd_func_interface(const struct mmc_card *c)
+{
+ return c->quirks & MMC_QUIRK_NONSTD_FUNC_IF;
+}
+
static inline int mmc_card_disable_cd(const struct mmc_card *c)
{
return c->quirks & MMC_QUIRK_DISABLE_CD;
}
-static inline int mmc_card_nonstd_func_interface(const struct mmc_card *c)
+static inline int mmc_card_blk_no_cmd23(const struct mmc_card *c)
{
- return c->quirks & MMC_QUIRK_NONSTD_FUNC_IF;
+ return c->quirks & MMC_QUIRK_BLK_NO_CMD23;
}
static inline int mmc_card_broken_byte_mode_512(const struct mmc_card *c)
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 874c6fe92855..860378bea557 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -1398,6 +1398,29 @@ void mmc_power_cycle(struct mmc_host *host, u32 ocr)
mmc_power_up(host, ocr);
}
+/**
+ * mmc_handle_undervoltage - Handle an undervoltage event on the MMC bus
+ * @host: The MMC host that detected the undervoltage condition
+ *
+ * This function is called when an undervoltage event is detected on one of
+ * the MMC regulators.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int mmc_handle_undervoltage(struct mmc_host *host)
+{
+ /* Stop the host to prevent races with card removal */
+ __mmc_stop_host(host);
+
+ if (!host->bus_ops || !host->bus_ops->handle_undervoltage)
+ return 0;
+
+ dev_warn(mmc_dev(host), "%s: Undervoltage detected, initiating emergency stop\n",
+ mmc_hostname(host));
+
+ return host->bus_ops->handle_undervoltage(host);
+}
+
/*
* Assign a mmc bus handler to a host. Only one bus handler may control a
* host at any given time.
@@ -1875,6 +1898,15 @@ bool mmc_card_can_secure_erase_trim(struct mmc_card *card)
}
EXPORT_SYMBOL(mmc_card_can_secure_erase_trim);
+bool mmc_card_can_cmd23(struct mmc_card *card)
+{
+ return ((mmc_card_mmc(card) &&
+ card->csd.mmca_vsn >= CSD_SPEC_VER_3) ||
+ (mmc_card_sd(card) && !mmc_card_ult_capacity(card) &&
+ card->scr.cmds & SD_SCR_CMD23_SUPPORT));
+}
+EXPORT_SYMBOL(mmc_card_can_cmd23);
+
int mmc_erase_group_aligned(struct mmc_card *card, sector_t from,
unsigned int nr)
{
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index 622085cd766f..a028b48be164 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -31,6 +31,7 @@ struct mmc_bus_ops {
int (*sw_reset)(struct mmc_host *);
bool (*cache_enabled)(struct mmc_host *);
int (*flush_cache)(struct mmc_host *);
+ int (*handle_undervoltage)(struct mmc_host *host);
};
void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops);
@@ -59,6 +60,10 @@ void mmc_power_off(struct mmc_host *host);
void mmc_power_cycle(struct mmc_host *host, u32 ocr);
void mmc_set_initial_state(struct mmc_host *host);
u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max);
+int mmc_handle_undervoltage(struct mmc_host *host);
+void mmc_regulator_register_undervoltage_notifier(struct mmc_host *host);
+void mmc_regulator_unregister_undervoltage_notifier(struct mmc_host *host);
+void mmc_undervoltage_workfn(struct work_struct *work);
static inline void mmc_delay(unsigned int ms)
{
@@ -123,6 +128,7 @@ bool mmc_card_can_trim(struct mmc_card *card);
bool mmc_card_can_discard(struct mmc_card *card);
bool mmc_card_can_sanitize(struct mmc_card *card);
bool mmc_card_can_secure_erase_trim(struct mmc_card *card);
+bool mmc_card_can_cmd23(struct mmc_card *card);
int mmc_erase_group_aligned(struct mmc_card *card, sector_t from, unsigned int nr);
unsigned int mmc_calc_max_discard(struct mmc_card *card);
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index f14671ea5716..88c95dbfd9cf 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -302,6 +302,8 @@ int mmc_of_parse(struct mmc_host *host)
/* f_max is obtained from the optional "max-frequency" property */
device_property_read_u32(dev, "max-frequency", &host->f_max);
+ device_property_read_u32(dev, "max-sd-hs-hz", &host->max_sd_hs_hz);
+
/*
* Configure CD and WP pins. They are both by default active low to
* match the SDHCI spec. If GPIOs are provided for CD and / or WP, the
@@ -564,6 +566,8 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
INIT_WORK(&host->sdio_irq_work, sdio_irq_work);
timer_setup(&host->retune_timer, mmc_retune_timer, 0);
+ INIT_WORK(&host->supply.uv_work, mmc_undervoltage_workfn);
+
/*
* By default, hosts do not support SGIO or large requests.
* They have to set these according to their abilities.
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 5be9b42d5057..3e7d9437477c 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -36,6 +36,7 @@
enum mmc_poweroff_type {
MMC_POWEROFF_SUSPEND,
MMC_POWEROFF_SHUTDOWN,
+ MMC_POWEROFF_UNDERVOLTAGE,
MMC_POWEROFF_UNBIND,
};
@@ -2132,9 +2133,15 @@ static int _mmc_suspend(struct mmc_host *host, enum mmc_poweroff_type pm_type)
if (mmc_card_suspended(host->card))
goto out;
- err = _mmc_flush_cache(host);
- if (err)
- goto out;
+ /*
+ * For the undervoltage case, we care more about device integrity.
+ * Avoid cache flush and notify the device to power off quickly.
+ */
+ if (pm_type != MMC_POWEROFF_UNDERVOLTAGE) {
+ err = _mmc_flush_cache(host);
+ if (err)
+ goto out;
+ }
if (mmc_card_can_poweroff_notify(host->card) &&
mmc_host_can_poweroff_notify(host, pm_type))
@@ -2213,6 +2220,13 @@ static int mmc_shutdown(struct mmc_host *host)
int err = 0;
/*
+ * In case of undervoltage, the card will be powered off (removed) by
+ * _mmc_handle_undervoltage()
+ */
+ if (mmc_card_removed(host->card))
+ return 0;
+
+ /*
* If the card remains suspended at this point and it was done by using
* the sleep-cmd (CMD5), we may need to re-initialize it first, to allow
* us to send the preferred poweroff-notification cmd at shutdown.
@@ -2302,6 +2316,55 @@ static int _mmc_hw_reset(struct mmc_host *host)
return mmc_init_card(host, card->ocr, card);
}
+/**
+ * _mmc_handle_undervoltage - Handle an undervoltage event for MMC/eMMC devices
+ * @host: MMC host structure
+ *
+ * This function is triggered when an undervoltage condition is detected.
+ * It attempts to transition the device into a low-power or safe state to
+ * prevent data corruption.
+ *
+ * Steps performed:
+ * - Perform an emergency suspend using EXT_CSD_POWER_OFF_SHORT if possible.
+ * - If power-off notify is not supported, fallback mechanisms like sleep or
+ * deselecting the card are attempted.
+ * - Cache flushing is skipped to reduce execution time.
+ * - Mark the card as removed to prevent further interactions after
+ * undervoltage.
+ *
+ * Note: This function does not handle host claiming or releasing. The caller
+ * must ensure that the host is properly claimed before calling this
+ * function and released afterward.
+ *
+ * Returns: 0 on success, or a negative error code if any step fails.
+ */
+static int _mmc_handle_undervoltage(struct mmc_host *host)
+{
+ struct mmc_card *card = host->card;
+ int err;
+
+ /*
+ * Perform an emergency suspend to power off the eMMC quickly.
+ * This ensures the device enters a safe state before power is lost.
+ * We first attempt EXT_CSD_POWER_OFF_SHORT, but if power-off notify
+ * is not supported, we fall back to sleep mode or deselecting the card.
+ * Cache flushing is skipped to minimize delay.
+ */
+ err = _mmc_suspend(host, MMC_POWEROFF_UNDERVOLTAGE);
+ if (err)
+ pr_err("%s: undervoltage suspend failed: %pe\n",
+ mmc_hostname(host), ERR_PTR(err));
+
+ /*
+ * Mark the card as removed to prevent further operations.
+ * This ensures the system does not attempt to access the device
+ * after an undervoltage event, avoiding potential corruption.
+ */
+ mmc_card_set_removed(card);
+
+ return err;
+}
+
static const struct mmc_bus_ops mmc_ops = {
.remove = mmc_remove,
.detect = mmc_detect,
@@ -2314,6 +2377,7 @@ static const struct mmc_bus_ops mmc_ops = {
.hw_reset = _mmc_hw_reset,
.cache_enabled = _mmc_cache_enabled,
.flush_cache = _mmc_flush_cache,
+ .handle_undervoltage = _mmc_handle_undervoltage,
};
/*
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 66283825513c..a952cc8265af 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -1077,3 +1077,75 @@ int mmc_sanitize(struct mmc_card *card, unsigned int timeout_ms)
return err;
}
EXPORT_SYMBOL_GPL(mmc_sanitize);
+
+/**
+ * mmc_read_tuning() - read data blocks from the mmc
+ * @host: mmc host doing the read
+ * @blksz: data block size
+ * @blocks: number of blocks to read
+ *
+ * Read one or more blocks of data from the beginning of the mmc. This is a
+ * low-level helper for tuning operation. It is assumed that CMD23 can be used
+ * for multi-block read if the host supports it.
+ *
+ * Note: Allocate and free a temporary buffer to store the data read. The data
+ * is not available outside of the function, only the status of the read
+ * operation.
+ *
+ * Return: 0 in case of success, otherwise -EIO / -ENOMEM / -E2BIG
+ */
+int mmc_read_tuning(struct mmc_host *host, unsigned int blksz, unsigned int blocks)
+{
+ struct mmc_request mrq = {};
+ struct mmc_command sbc = {};
+ struct mmc_command cmd = {};
+ struct mmc_command stop = {};
+ struct mmc_data data = {};
+ struct scatterlist sg;
+ void *buf;
+ unsigned int len;
+
+ if (blocks > 1) {
+ if (mmc_host_can_cmd23(host)) {
+ mrq.sbc = &sbc;
+ sbc.opcode = MMC_SET_BLOCK_COUNT;
+ sbc.arg = blocks;
+ sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
+ }
+ cmd.opcode = MMC_READ_MULTIPLE_BLOCK;
+ mrq.stop = &stop;
+ stop.opcode = MMC_STOP_TRANSMISSION;
+ stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
+ } else {
+ cmd.opcode = MMC_READ_SINGLE_BLOCK;
+ }
+
+ mrq.cmd = &cmd;
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
+
+ mrq.data = &data;
+ data.flags = MMC_DATA_READ;
+ data.blksz = blksz;
+ data.blocks = blocks;
+ data.blk_addr = 0;
+ data.sg = &sg;
+ data.sg_len = 1;
+ data.timeout_ns = 1000000000;
+
+ if (check_mul_overflow(blksz, blocks, &len))
+ return -E2BIG;
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ sg_init_one(&sg, buf, len);
+
+ mmc_wait_for_req(host, &mrq);
+ kfree(buf);
+
+ if (sbc.error || cmd.error || data.error)
+ return -EIO;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mmc_read_tuning);
diff --git a/drivers/mmc/core/mmc_test.c b/drivers/mmc/core/mmc_test.c
index 80e5d87a5e50..67d4a301895c 100644
--- a/drivers/mmc/core/mmc_test.c
+++ b/drivers/mmc/core/mmc_test.c
@@ -180,20 +180,14 @@ static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size)
return mmc_set_blocklen(test->card, size);
}
-static bool mmc_test_card_cmd23(struct mmc_card *card)
-{
- return mmc_card_mmc(card) ||
- (mmc_card_sd(card) && card->scr.cmds & SD_SCR_CMD23_SUPPORT);
-}
-
static void mmc_test_prepare_sbc(struct mmc_test_card *test,
struct mmc_request *mrq, unsigned int blocks)
{
struct mmc_card *card = test->card;
if (!mrq->sbc || !mmc_host_can_cmd23(card->host) ||
- !mmc_test_card_cmd23(card) || !mmc_op_multi(mrq->cmd->opcode) ||
- (card->quirks & MMC_QUIRK_BLK_NO_CMD23)) {
+ !mmc_card_can_cmd23(card) || !mmc_op_multi(mrq->cmd->opcode) ||
+ mmc_card_blk_no_cmd23(card)) {
mrq->sbc = NULL;
return;
}
diff --git a/drivers/mmc/core/regulator.c b/drivers/mmc/core/regulator.c
index 3dae2e9b7978..a85179f1a4de 100644
--- a/drivers/mmc/core/regulator.c
+++ b/drivers/mmc/core/regulator.c
@@ -7,6 +7,7 @@
#include <linux/err.h>
#include <linux/log2.h>
#include <linux/regulator/consumer.h>
+#include <linux/workqueue.h>
#include <linux/mmc/host.h>
@@ -262,6 +263,82 @@ static inline int mmc_regulator_get_ocrmask(struct regulator *supply)
#endif /* CONFIG_REGULATOR */
+/* To be called from a high-priority workqueue */
+void mmc_undervoltage_workfn(struct work_struct *work)
+{
+ struct mmc_supply *supply;
+ struct mmc_host *host;
+
+ supply = container_of(work, struct mmc_supply, uv_work);
+ host = container_of(supply, struct mmc_host, supply);
+
+ mmc_handle_undervoltage(host);
+}
+
+static int mmc_handle_regulator_event(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct mmc_supply *supply = container_of(nb, struct mmc_supply,
+ vmmc_nb);
+ struct mmc_host *host = container_of(supply, struct mmc_host, supply);
+ unsigned long flags;
+
+ switch (event) {
+ case REGULATOR_EVENT_UNDER_VOLTAGE:
+ spin_lock_irqsave(&host->lock, flags);
+ if (host->undervoltage) {
+ spin_unlock_irqrestore(&host->lock, flags);
+ return NOTIFY_OK;
+ }
+
+ host->undervoltage = true;
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ queue_work(system_highpri_wq, &host->supply.uv_work);
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_OK;
+}
+
+/**
+ * mmc_regulator_register_undervoltage_notifier - Register for undervoltage
+ * events
+ * @host: MMC host
+ *
+ * To be called by a bus driver when a card supporting graceful shutdown
+ * is attached.
+ */
+void mmc_regulator_register_undervoltage_notifier(struct mmc_host *host)
+{
+ int ret;
+
+ if (IS_ERR_OR_NULL(host->supply.vmmc))
+ return;
+
+ host->supply.vmmc_nb.notifier_call = mmc_handle_regulator_event;
+ ret = regulator_register_notifier(host->supply.vmmc,
+ &host->supply.vmmc_nb);
+ if (ret)
+ dev_warn(mmc_dev(host), "Failed to register vmmc notifier: %d\n", ret);
+}
+
+/**
+ * mmc_regulator_unregister_undervoltage_notifier - Unregister undervoltage
+ * notifier
+ * @host: MMC host
+ */
+void mmc_regulator_unregister_undervoltage_notifier(struct mmc_host *host)
+{
+ if (IS_ERR_OR_NULL(host->supply.vmmc))
+ return;
+
+ regulator_unregister_notifier(host->supply.vmmc, &host->supply.vmmc_nb);
+ cancel_work_sync(&host->supply.uv_work);
+}
+
/**
* mmc_regulator_get_supply - try to get VMMC and VQMMC regulators for a host
* @mmc: the host to regulate
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index ec02067f03c5..67cd63004829 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -359,7 +359,7 @@ static int mmc_read_switch(struct mmc_card *card)
}
if (status[13] & SD_MODE_HIGH_SPEED)
- card->sw_caps.hs_max_dtr = HIGH_SPEED_MAX_DTR;
+ card->sw_caps.hs_max_dtr = card->host->max_sd_hs_hz ?: HIGH_SPEED_MAX_DTR;
if (card->scr.sda_spec3) {
card->sw_caps.sd3_bus_mode = status[13];
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 0f753367aec1..83085e76486a 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -945,7 +945,11 @@ static void mmc_sdio_remove(struct mmc_host *host)
*/
static int mmc_sdio_alive(struct mmc_host *host)
{
- return mmc_select_card(host->card);
+ if (!mmc_host_is_spi(host))
+ return mmc_select_card(host->card);
+ else
+ return mmc_io_rw_direct(host->card, 0, 0, SDIO_CCCR_CCCR, 0,
+ NULL);
}
/*
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index 656601754966..10799772494a 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -200,7 +200,6 @@ disable_runtimepm:
atomic_dec(&func->card->sdio_funcs_probed);
if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD)
pm_runtime_put_noidle(dev);
- dev_pm_domain_detach(dev, false);
return ret;
}
@@ -231,8 +230,6 @@ static void sdio_bus_remove(struct device *dev)
/* Then undo the runtime PM settings in sdio_bus_probe() */
if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD)
pm_runtime_put_sync(dev);
-
- dev_pm_domain_detach(dev, false);
}
static const struct dev_pm_ops sdio_bus_pm_ops = {
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 7232de1c0688..2c963cb6724b 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -56,7 +56,7 @@ config MMC_STM32_SDMMC
config MMC_PXA
tristate "Intel PXA25x/26x/27x Multimedia Card Interface support"
- depends on ARCH_PXA
+ depends on ARCH_PXA || COMPILE_TEST
help
This selects the Intel(R) PXA(R) Multimedia card Interface.
If you have a PXA(R) platform with a Multimedia Card slot,
@@ -359,7 +359,7 @@ config MMC_SDHCI_S3C
depends on PLAT_SAMSUNG || ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
help
This selects the Secure Digital Host Controller Interface (SDHCI)
- often referrered to as the HSMMC block in some of the Samsung
+ often referred to as the HSMMC block in some of the Samsung
S3C6410, S5Pv210 and Exynos (Exynso4210, Exynos4412) SoCs.
If you have a controller with this interface (thereforeyou build for
@@ -401,7 +401,7 @@ config MMC_SDHCI_SPEAR
depends on OF
help
This selects the Secure Digital Host Controller Interface (SDHCI)
- often referrered to as the HSMMC block in some of the ST SPEAR range
+ often referred to as the HSMMC block in some of the ST SPEAR range
of SoC
If you have a controller with this interface, say Y or M here.
@@ -608,7 +608,7 @@ config MMC_SDHCI_MSM
config MMC_MXC
tristate "Freescale i.MX21/27/31 or MPC512x Multimedia Card support"
- depends on ARCH_MXC || PPC_MPC512x
+ depends on ARCH_MXC || PPC_MPC512x || COMPILE_TEST
help
This selects the Freescale i.MX21, i.MX27, i.MX31 or MPC512x
Multimedia Card Interface. If you have an i.MX or MPC512x platform
@@ -866,7 +866,8 @@ config MMC_DW_PCI
config MMC_DW_ROCKCHIP
tristate "Rockchip specific extensions for Synopsys DW Memory Card Interface"
- depends on MMC_DW && ARCH_ROCKCHIP
+ depends on MMC_DW
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
select MMC_DW_PLTFM
help
This selects support for Rockchip SoC specific extensions to the
@@ -948,7 +949,7 @@ config MMC_USHC
config MMC_WMT
tristate "Wondermedia SD/MMC Host Controller support"
- depends on ARCH_VT8500
+ depends on ARCH_VT8500 || COMPILE_TEST
default y
help
This selects support for the SD/MMC Host Controller on
@@ -1115,6 +1116,7 @@ config MMC_LOONGSON2
tristate "Loongson-2K SD/SDIO/eMMC Host Interface support"
depends on LOONGARCH || COMPILE_TEST
depends on HAS_DMA
+ select REGMAP_MMIO
help
This selects support for the SD/SDIO/eMMC Host Controller on
Loongson-2K series CPUs.
diff --git a/drivers/mmc/host/alcor.c b/drivers/mmc/host/alcor.c
index 288c3a91a0af..721db54739c1 100644
--- a/drivers/mmc/host/alcor.c
+++ b/drivers/mmc/host/alcor.c
@@ -1129,7 +1129,6 @@ static void alcor_pci_sdmmc_drv_remove(struct platform_device *pdev)
mmc_remove_host(mmc);
}
-#ifdef CONFIG_PM_SLEEP
static int alcor_pci_sdmmc_suspend(struct device *dev)
{
struct alcor_sdmmc_host *host = dev_get_drvdata(dev);
@@ -1150,10 +1149,9 @@ static int alcor_pci_sdmmc_resume(struct device *dev)
return 0;
}
-#endif /* CONFIG_PM_SLEEP */
-static SIMPLE_DEV_PM_OPS(alcor_mmc_pm_ops, alcor_pci_sdmmc_suspend,
- alcor_pci_sdmmc_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(alcor_mmc_pm_ops, alcor_pci_sdmmc_suspend,
+ alcor_pci_sdmmc_resume);
static const struct platform_device_id alcor_pci_sdmmc_ids[] = {
{
@@ -1171,7 +1169,7 @@ static struct platform_driver alcor_pci_sdmmc_driver = {
.driver = {
.name = DRV_NAME_ALCOR_PCI_SDMMC,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
- .pm = &alcor_mmc_pm_ops
+ .pm = pm_sleep_ptr(&alcor_mmc_pm_ops),
},
};
module_platform_driver(alcor_pci_sdmmc_driver);
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 777342fb7657..d1fbc6811563 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -2622,7 +2622,6 @@ static void atmci_remove(struct platform_device *pdev)
pm_runtime_put_noidle(dev);
}
-#ifdef CONFIG_PM
static int atmci_runtime_suspend(struct device *dev)
{
struct atmel_mci *host = dev_get_drvdata(dev);
@@ -2642,12 +2641,10 @@ static int atmci_runtime_resume(struct device *dev)
return clk_prepare_enable(host->mck);
}
-#endif
static const struct dev_pm_ops atmci_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
- SET_RUNTIME_PM_OPS(atmci_runtime_suspend, atmci_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+ RUNTIME_PM_OPS(atmci_runtime_suspend, atmci_runtime_resume, NULL)
};
static struct platform_driver atmci_driver = {
@@ -2657,7 +2654,7 @@ static struct platform_driver atmci_driver = {
.name = "atmel_mci",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = atmci_dt_ids,
- .pm = &atmci_dev_pm_ops,
+ .pm = pm_ptr(&atmci_dev_pm_ops),
},
};
module_platform_driver(atmci_driver);
diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c
index 85470773650d..cc6e05f9b96f 100644
--- a/drivers/mmc/host/au1xmmc.c
+++ b/drivers/mmc/host/au1xmmc.c
@@ -1150,10 +1150,9 @@ static void au1xmmc_remove(struct platform_device *pdev)
}
}
-#ifdef CONFIG_PM
-static int au1xmmc_suspend(struct platform_device *pdev, pm_message_t state)
+static int au1xmmc_suspend(struct device *dev)
{
- struct au1xmmc_host *host = platform_get_drvdata(pdev);
+ struct au1xmmc_host *host = dev_get_drvdata(dev);
__raw_writel(0, HOST_CONFIG2(host));
__raw_writel(0, HOST_CONFIG(host));
@@ -1164,27 +1163,24 @@ static int au1xmmc_suspend(struct platform_device *pdev, pm_message_t state)
return 0;
}
-static int au1xmmc_resume(struct platform_device *pdev)
+static int au1xmmc_resume(struct device *dev)
{
- struct au1xmmc_host *host = platform_get_drvdata(pdev);
+ struct au1xmmc_host *host = dev_get_drvdata(dev);
au1xmmc_reset_controller(host);
return 0;
}
-#else
-#define au1xmmc_suspend NULL
-#define au1xmmc_resume NULL
-#endif
+
+static DEFINE_SIMPLE_DEV_PM_OPS(au1xmmc_pmops, au1xmmc_suspend, au1xmmc_resume);
static struct platform_driver au1xmmc_driver = {
.probe = au1xmmc_probe,
.remove = au1xmmc_remove,
- .suspend = au1xmmc_suspend,
- .resume = au1xmmc_resume,
.driver = {
.name = DRIVER_NAME,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ .pm = pm_sleep_ptr(&au1xmmc_pmops),
},
};
diff --git a/drivers/mmc/host/cb710-mmc.c b/drivers/mmc/host/cb710-mmc.c
index 448d2f9159ea..31daec787495 100644
--- a/drivers/mmc/host/cb710-mmc.c
+++ b/drivers/mmc/host/cb710-mmc.c
@@ -664,25 +664,25 @@ static const struct mmc_host_ops cb710_mmc_host = {
.get_cd = cb710_mmc_get_cd,
};
-#ifdef CONFIG_PM
-
-static int cb710_mmc_suspend(struct platform_device *pdev, pm_message_t state)
+static int cb710_mmc_suspend(struct device *dev)
{
+ struct platform_device *pdev = to_platform_device(dev);
struct cb710_slot *slot = cb710_pdev_to_slot(pdev);
cb710_mmc_enable_irq(slot, 0, ~0);
return 0;
}
-static int cb710_mmc_resume(struct platform_device *pdev)
+static int cb710_mmc_resume(struct device *dev)
{
+ struct platform_device *pdev = to_platform_device(dev);
struct cb710_slot *slot = cb710_pdev_to_slot(pdev);
cb710_mmc_enable_irq(slot, 0, ~0);
return 0;
}
-#endif /* CONFIG_PM */
+static DEFINE_SIMPLE_DEV_PM_OPS(cb710_mmc_pmops, cb710_mmc_suspend, cb710_mmc_resume);
static int cb710_mmc_init(struct platform_device *pdev)
{
@@ -767,13 +767,12 @@ static void cb710_mmc_exit(struct platform_device *pdev)
}
static struct platform_driver cb710_mmc_driver = {
- .driver.name = "cb710-mmc",
+ .driver = {
+ .name = "cb710-mmc",
+ .pm = pm_sleep_ptr(&cb710_mmc_pmops),
+ },
.probe = cb710_mmc_init,
.remove = cb710_mmc_exit,
-#ifdef CONFIG_PM
- .suspend = cb710_mmc_suspend,
- .resume = cb710_mmc_resume,
-#endif
};
module_platform_driver(cb710_mmc_driver);
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index c691f1b60395..2b7d6d9bcde5 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -588,7 +588,7 @@ static void mmc_davinci_request(struct mmc_host *mmc, struct mmc_request *req)
cpu_relax();
}
if (mmcst1 & MMCST1_BUSY) {
- dev_err(mmc_dev(host->mmc), "still BUSY? bad ... \n");
+ dev_err(mmc_dev(host->mmc), "still BUSY? bad ...\n");
req->cmd->error = -ETIMEDOUT;
mmc_request_done(mmc, req);
return;
@@ -1347,7 +1347,6 @@ static void davinci_mmcsd_remove(struct platform_device *pdev)
clk_disable_unprepare(host->clk);
}
-#ifdef CONFIG_PM
static int davinci_mmcsd_suspend(struct device *dev)
{
struct mmc_davinci_host *host = dev_get_drvdata(dev);
@@ -1373,21 +1372,14 @@ static int davinci_mmcsd_resume(struct device *dev)
return 0;
}
-static const struct dev_pm_ops davinci_mmcsd_pm = {
- .suspend = davinci_mmcsd_suspend,
- .resume = davinci_mmcsd_resume,
-};
-
-#define davinci_mmcsd_pm_ops (&davinci_mmcsd_pm)
-#else
-#define davinci_mmcsd_pm_ops NULL
-#endif
+static DEFINE_SIMPLE_DEV_PM_OPS(davinci_mmcsd_pm_ops,
+ davinci_mmcsd_suspend, davinci_mmcsd_resume);
static struct platform_driver davinci_mmcsd_driver = {
.driver = {
.name = "davinci_mmc",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
- .pm = davinci_mmcsd_pm_ops,
+ .pm = pm_sleep_ptr(&davinci_mmcsd_pm_ops),
.of_match_table = davinci_mmc_dt_ids,
},
.probe = davinci_mmcsd_probe,
diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c
index e3548408ca39..384609671a9a 100644
--- a/drivers/mmc/host/dw_mmc-exynos.c
+++ b/drivers/mmc/host/dw_mmc-exynos.c
@@ -189,7 +189,6 @@ static void dw_mci_exynos_set_clksel_timing(struct dw_mci *host, u32 timing)
set_bit(DW_MMC_CARD_NO_USE_HOLD, &host->slot->flags);
}
-#ifdef CONFIG_PM
static int dw_mci_exynos_runtime_resume(struct device *dev)
{
struct dw_mci *host = dev_get_drvdata(dev);
@@ -203,9 +202,7 @@ static int dw_mci_exynos_runtime_resume(struct device *dev)
return ret;
}
-#endif /* CONFIG_PM */
-#ifdef CONFIG_PM_SLEEP
/**
* dw_mci_exynos_suspend_noirq - Exynos-specific suspend code
* @dev: Device to suspend (this device)
@@ -265,7 +262,6 @@ static int dw_mci_exynos_resume_noirq(struct device *dev)
return 0;
}
-#endif /* CONFIG_PM_SLEEP */
static void dw_mci_exynos_config_hs400(struct dw_mci *host, u32 timing)
{
@@ -712,11 +708,8 @@ static void dw_mci_exynos_remove(struct platform_device *pdev)
}
static const struct dev_pm_ops dw_mci_exynos_pmops = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(dw_mci_exynos_suspend_noirq,
- dw_mci_exynos_resume_noirq)
- SET_RUNTIME_PM_OPS(dw_mci_runtime_suspend,
- dw_mci_exynos_runtime_resume,
- NULL)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(dw_mci_exynos_suspend_noirq, dw_mci_exynos_resume_noirq)
+ RUNTIME_PM_OPS(dw_mci_runtime_suspend, dw_mci_exynos_runtime_resume, NULL)
};
static struct platform_driver dw_mci_exynos_pltfm_driver = {
@@ -726,7 +719,7 @@ static struct platform_driver dw_mci_exynos_pltfm_driver = {
.name = "dwmmc_exynos",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = dw_mci_exynos_match,
- .pm = &dw_mci_exynos_pmops,
+ .pm = pm_ptr(&dw_mci_exynos_pmops),
},
};
diff --git a/drivers/mmc/host/dw_mmc-k3.c b/drivers/mmc/host/dw_mmc-k3.c
index 0311a37dd4ab..ad6aa1aea549 100644
--- a/drivers/mmc/host/dw_mmc-k3.c
+++ b/drivers/mmc/host/dw_mmc-k3.c
@@ -461,11 +461,8 @@ static int dw_mci_k3_probe(struct platform_device *pdev)
}
static const struct dev_pm_ops dw_mci_k3_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
- SET_RUNTIME_PM_OPS(dw_mci_runtime_suspend,
- dw_mci_runtime_resume,
- NULL)
+ SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+ RUNTIME_PM_OPS(dw_mci_runtime_suspend, dw_mci_runtime_resume, NULL)
};
static struct platform_driver dw_mci_k3_pltfm_driver = {
@@ -475,7 +472,7 @@ static struct platform_driver dw_mci_k3_pltfm_driver = {
.name = "dwmmc_k3",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = dw_mci_k3_match,
- .pm = &dw_mci_k3_dev_pm_ops,
+ .pm = pm_ptr(&dw_mci_k3_dev_pm_ops),
},
};
diff --git a/drivers/mmc/host/dw_mmc-pci.c b/drivers/mmc/host/dw_mmc-pci.c
index e7ab699f488e..092cc99175af 100644
--- a/drivers/mmc/host/dw_mmc-pci.c
+++ b/drivers/mmc/host/dw_mmc-pci.c
@@ -75,11 +75,8 @@ static void dw_mci_pci_remove(struct pci_dev *pdev)
}
static const struct dev_pm_ops dw_mci_pci_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
- SET_RUNTIME_PM_OPS(dw_mci_runtime_suspend,
- dw_mci_runtime_resume,
- NULL)
+ SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+ RUNTIME_PM_OPS(dw_mci_runtime_suspend, dw_mci_runtime_resume, NULL)
};
static const struct pci_device_id dw_mci_pci_id[] = {
@@ -94,7 +91,7 @@ static struct pci_driver dw_mci_pci_driver = {
.probe = dw_mci_pci_probe,
.remove = dw_mci_pci_remove,
.driver = {
- .pm = &dw_mci_pci_dev_pm_ops,
+ .pm = pm_ptr(&dw_mci_pci_dev_pm_ops),
},
};
diff --git a/drivers/mmc/host/dw_mmc-rockchip.c b/drivers/mmc/host/dw_mmc-rockchip.c
index baa23b517731..82dd906bb002 100644
--- a/drivers/mmc/host/dw_mmc-rockchip.c
+++ b/drivers/mmc/host/dw_mmc-rockchip.c
@@ -6,6 +6,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
+#include <linux/hw_bitfield.h>
#include <linux/mmc/host.h>
#include <linux/of_address.h>
#include <linux/mmc/slot-gpio.h>
@@ -24,8 +25,6 @@
#define ROCKCHIP_MMC_DELAYNUM_OFFSET 2
#define ROCKCHIP_MMC_DELAYNUM_MASK (0xff << ROCKCHIP_MMC_DELAYNUM_OFFSET)
#define ROCKCHIP_MMC_DELAY_ELEMENT_PSEC 60
-#define HIWORD_UPDATE(val, mask, shift) \
- ((val) << (shift) | (mask) << ((shift) + 16))
static const unsigned int freqs[] = { 100000, 200000, 300000, 400000 };
@@ -148,9 +147,11 @@ static int rockchip_mmc_set_internal_phase(struct dw_mci *host, bool sample, int
raw_value |= nineties;
if (sample)
- mci_writel(host, TIMING_CON1, HIWORD_UPDATE(raw_value, 0x07ff, 1));
+ mci_writel(host, TIMING_CON1,
+ FIELD_PREP_WM16(GENMASK(11, 1), raw_value));
else
- mci_writel(host, TIMING_CON0, HIWORD_UPDATE(raw_value, 0x07ff, 1));
+ mci_writel(host, TIMING_CON0,
+ FIELD_PREP_WM16(GENMASK(11, 1), raw_value));
dev_dbg(host->dev, "set %s_phase(%d) delay_nums=%u actual_degrees=%d\n",
sample ? "sample" : "drv", degrees, delay_num,
@@ -568,11 +569,8 @@ static void dw_mci_rockchip_remove(struct platform_device *pdev)
}
static const struct dev_pm_ops dw_mci_rockchip_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
- SET_RUNTIME_PM_OPS(dw_mci_runtime_suspend,
- dw_mci_runtime_resume,
- NULL)
+ SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+ RUNTIME_PM_OPS(dw_mci_runtime_suspend, dw_mci_runtime_resume, NULL)
};
static struct platform_driver dw_mci_rockchip_pltfm_driver = {
@@ -582,7 +580,7 @@ static struct platform_driver dw_mci_rockchip_pltfm_driver = {
.name = "dwmmc_rockchip",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = dw_mci_rockchip_match,
- .pm = &dw_mci_rockchip_dev_pm_ops,
+ .pm = pm_ptr(&dw_mci_rockchip_dev_pm_ops),
},
};
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
index 5463392dc811..648b4a5641bf 100644
--- a/drivers/mmc/host/dw_mmc.h
+++ b/drivers/mmc/host/dw_mmc.h
@@ -541,6 +541,9 @@ extern void dw_mci_remove(struct dw_mci *host);
#ifdef CONFIG_PM
extern int dw_mci_runtime_suspend(struct device *device);
extern int dw_mci_runtime_resume(struct device *device);
+#else
+static inline int dw_mci_runtime_suspend(struct device *device) { return -EOPNOTSUPP; }
+static inline int dw_mci_runtime_resume(struct device *device) { return -EOPNOTSUPP; }
#endif
/**
diff --git a/drivers/mmc/host/meson-mx-sdhc-clkc.c b/drivers/mmc/host/meson-mx-sdhc-clkc.c
index cbd17a596cd2..6d619bd0a8dc 100644
--- a/drivers/mmc/host/meson-mx-sdhc-clkc.c
+++ b/drivers/mmc/host/meson-mx-sdhc-clkc.c
@@ -84,10 +84,8 @@ static int meson_mx_sdhc_gate_clk_hw_register(struct device *dev,
return ret;
clk_bulk_data[bulk_index].clk = devm_clk_hw_get_clk(dev, hw, name_suffix);
- if (IS_ERR(clk_bulk_data[bulk_index].clk))
- return PTR_ERR(clk_bulk_data[bulk_index].clk);
- return 0;
+ return PTR_ERR_OR_ZERO(clk_bulk_data[bulk_index].clk);
}
int meson_mx_sdhc_register_clkc(struct device *dev, void __iomem *base,
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 35b0ad273b4f..42936e248c55 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -563,10 +563,10 @@ mmc_spi_setup_data_message(struct mmc_spi_host *host, bool multiple, bool write)
* the next token (next data block, or STOP_TRAN). We can try to
* minimize I/O ops by using a single read to collect end-of-busy.
*/
- if (multiple || write) {
+ if (write) {
t = &host->early_status;
memset(t, 0, sizeof(*t));
- t->len = write ? sizeof(scratch->status) : 1;
+ t->len = sizeof(scratch->status);
t->tx_buf = host->ones;
t->rx_buf = scratch->status;
t->cs_change = 1;
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 8367283647a9..e500051bd572 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -2516,7 +2516,6 @@ static void mmci_remove(struct amba_device *dev)
}
}
-#ifdef CONFIG_PM
static void mmci_save(struct mmci_host *host)
{
unsigned long flags;
@@ -2581,12 +2580,10 @@ static int mmci_runtime_resume(struct device *dev)
return 0;
}
-#endif
static const struct dev_pm_ops mmci_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
- SET_RUNTIME_PM_OPS(mmci_runtime_suspend, mmci_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+ RUNTIME_PM_OPS(mmci_runtime_suspend, mmci_runtime_resume, NULL)
};
static const struct amba_id mmci_ids[] = {
@@ -2675,7 +2672,7 @@ MODULE_DEVICE_TABLE(amba, mmci_ids);
static struct amba_driver mmci_driver = {
.drv = {
.name = DRIVER_NAME,
- .pm = &mmci_dev_pm_ops,
+ .pm = pm_ptr(&mmci_dev_pm_ops),
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = mmci_probe,
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index d7020e06dd55..79074291e9d2 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -3278,7 +3278,7 @@ static void msdc_restore_reg(struct msdc_host *host)
__msdc_enable_sdio_irq(host, 1);
}
-static int __maybe_unused msdc_runtime_suspend(struct device *dev)
+static int msdc_runtime_suspend(struct device *dev)
{
struct mmc_host *mmc = dev_get_drvdata(dev);
struct msdc_host *host = mmc_priv(mmc);
@@ -3300,7 +3300,7 @@ static int __maybe_unused msdc_runtime_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused msdc_runtime_resume(struct device *dev)
+static int msdc_runtime_resume(struct device *dev)
{
struct mmc_host *mmc = dev_get_drvdata(dev);
struct msdc_host *host = mmc_priv(mmc);
@@ -3323,7 +3323,7 @@ static int __maybe_unused msdc_runtime_resume(struct device *dev)
return 0;
}
-static int __maybe_unused msdc_suspend(struct device *dev)
+static int msdc_suspend(struct device *dev)
{
struct mmc_host *mmc = dev_get_drvdata(dev);
struct msdc_host *host = mmc_priv(mmc);
@@ -3348,7 +3348,7 @@ static int __maybe_unused msdc_suspend(struct device *dev)
return pm_runtime_force_suspend(dev);
}
-static int __maybe_unused msdc_resume(struct device *dev)
+static int msdc_resume(struct device *dev)
{
struct mmc_host *mmc = dev_get_drvdata(dev);
struct msdc_host *host = mmc_priv(mmc);
@@ -3360,8 +3360,8 @@ static int __maybe_unused msdc_resume(struct device *dev)
}
static const struct dev_pm_ops msdc_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(msdc_suspend, msdc_resume)
- SET_RUNTIME_PM_OPS(msdc_runtime_suspend, msdc_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(msdc_suspend, msdc_resume)
+ RUNTIME_PM_OPS(msdc_runtime_suspend, msdc_runtime_resume, NULL)
};
static struct platform_driver mt_msdc_driver = {
@@ -3371,7 +3371,7 @@ static struct platform_driver mt_msdc_driver = {
.name = "mtk-msdc",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = msdc_of_ids,
- .pm = &msdc_dev_pm_ops,
+ .pm = pm_ptr(&msdc_dev_pm_ops),
},
};
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index a9e6277789ba..79df2fa89a3f 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -292,7 +292,7 @@ static u32 mvsd_finish_data(struct mvsd_host *host, struct mmc_data *data,
host->pio_ptr = NULL;
host->pio_size = 0;
} else {
- dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_frags,
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
mmc_get_dma_dir(data));
}
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index a6e44e406106..7c7c52d9e8e7 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -680,7 +680,6 @@ static void mxs_mmc_remove(struct platform_device *pdev)
clk_disable_unprepare(ssp->clk);
}
-#ifdef CONFIG_PM_SLEEP
static int mxs_mmc_suspend(struct device *dev)
{
struct mmc_host *mmc = dev_get_drvdata(dev);
@@ -699,9 +698,8 @@ static int mxs_mmc_resume(struct device *dev)
return clk_prepare_enable(ssp->clk);
}
-#endif
-static SIMPLE_DEV_PM_OPS(mxs_mmc_pm_ops, mxs_mmc_suspend, mxs_mmc_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(mxs_mmc_pm_ops, mxs_mmc_suspend, mxs_mmc_resume);
static struct platform_driver mxs_mmc_driver = {
.probe = mxs_mmc_probe,
@@ -709,7 +707,7 @@ static struct platform_driver mxs_mmc_driver = {
.driver = {
.name = DRIVER_NAME,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
- .pm = &mxs_mmc_pm_ops,
+ .pm = pm_sleep_ptr(&mxs_mmc_pm_ops),
.of_match_table = mxs_mmc_dt_ids,
},
};
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index adc0d0b6ae37..09e4354d1f1d 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -620,8 +620,6 @@ static void omap_hsmmc_set_bus_mode(struct omap_hsmmc_host *host)
OMAP_HSMMC_WRITE(host->base, CON, con & ~OD);
}
-#ifdef CONFIG_PM
-
/*
* Restore the MMC host context, if it was lost as result of a
* power state change.
@@ -689,6 +687,7 @@ out:
return 0;
}
+#ifdef CONFIG_PM
/*
* Save the MMC host context (store the number of power state changes so far).
*/
@@ -1990,7 +1989,6 @@ static void omap_hsmmc_remove(struct platform_device *pdev)
clk_disable_unprepare(host->dbclk);
}
-#ifdef CONFIG_PM_SLEEP
static int omap_hsmmc_suspend(struct device *dev)
{
struct omap_hsmmc_host *host = dev_get_drvdata(dev);
@@ -2032,9 +2030,7 @@ static int omap_hsmmc_resume(struct device *dev)
pm_runtime_put_autosuspend(host->dev);
return 0;
}
-#endif
-#ifdef CONFIG_PM
static int omap_hsmmc_runtime_suspend(struct device *dev)
{
struct omap_hsmmc_host *host;
@@ -2102,11 +2098,10 @@ static int omap_hsmmc_runtime_resume(struct device *dev)
spin_unlock_irqrestore(&host->irq_lock, flags);
return 0;
}
-#endif
static const struct dev_pm_ops omap_hsmmc_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(omap_hsmmc_suspend, omap_hsmmc_resume)
- SET_RUNTIME_PM_OPS(omap_hsmmc_runtime_suspend, omap_hsmmc_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(omap_hsmmc_suspend, omap_hsmmc_resume)
+ RUNTIME_PM_OPS(omap_hsmmc_runtime_suspend, omap_hsmmc_runtime_resume, NULL)
};
static struct platform_driver omap_hsmmc_driver = {
@@ -2115,7 +2110,7 @@ static struct platform_driver omap_hsmmc_driver = {
.driver = {
.name = DRIVER_NAME,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
- .pm = &omap_hsmmc_dev_pm_ops,
+ .pm = pm_ptr(&omap_hsmmc_dev_pm_ops),
.of_match_table = of_match_ptr(omap_mmc_of_match),
},
};
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
index fb8ca03f661d..f56fa2cd208d 100644
--- a/drivers/mmc/host/renesas_sdhi_core.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -222,7 +222,11 @@ static void renesas_sdhi_set_clock(struct tmio_mmc_host *host,
clk &= ~0xff;
}
- sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & CLK_CTL_DIV_MASK);
+ clock = clk & CLK_CTL_DIV_MASK;
+ if (clock != CLK_CTL_DIV_MASK)
+ host->mmc->actual_clock /= (1 << (ffs(clock) + 1));
+
+ sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clock);
if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2))
usleep_range(10000, 11000);
diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
index 4b389e92399e..9e3ed0bcddd6 100644
--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
@@ -107,7 +107,8 @@ static const struct renesas_sdhi_of_data of_data_rza2 = {
static const struct renesas_sdhi_of_data of_data_rcar_gen3 = {
.tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_CLK_ACTUAL |
- TMIO_MMC_HAVE_CBSY | TMIO_MMC_MIN_RCAR2,
+ TMIO_MMC_HAVE_CBSY | TMIO_MMC_MIN_RCAR2 |
+ TMIO_MMC_64BIT_DATA_PORT,
.capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
MMC_CAP_CMD23 | MMC_CAP_WAIT_WHILE_BUSY,
.capabilities2 = MMC_CAP2_NO_WRITE_PROTECT | MMC_CAP2_MERGE_CAPABLE,
diff --git a/drivers/mmc/host/rtsx_usb_sdmmc.c b/drivers/mmc/host/rtsx_usb_sdmmc.c
index c5f6b9df066b..84674659a84d 100644
--- a/drivers/mmc/host/rtsx_usb_sdmmc.c
+++ b/drivers/mmc/host/rtsx_usb_sdmmc.c
@@ -48,7 +48,7 @@ struct rtsx_usb_sdmmc {
bool ddr_mode;
unsigned char power_mode;
-
+ u16 ocp_stat;
#ifdef RTSX_USB_USE_LEDS_CLASS
struct led_classdev led;
char led_name[32];
@@ -789,12 +789,20 @@ static int sdmmc_get_cd(struct mmc_host *mmc)
if (err)
goto no_card;
+ /* get OCP status */
+ host->ocp_stat = (val >> 4) & 0x03;
+
if (val & SD_CD) {
host->card_exist = true;
return 1;
}
no_card:
+ /* clear OCP status */
+ if (host->ocp_stat & (MS_OCP_NOW | MS_OCP_EVER)) {
+ rtsx_usb_write_register(ucr, OCPCTL, MS_OCP_CLEAR, MS_OCP_CLEAR);
+ host->ocp_stat = 0;
+ }
host->card_exist = false;
return 0;
}
@@ -818,7 +826,11 @@ static void sdmmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
cmd->error = -ENOMEDIUM;
goto finish_detect_card;
}
-
+ /* check OCP stat */
+ if (host->ocp_stat & (MS_OCP_NOW | MS_OCP_EVER)) {
+ cmd->error = -ENOMEDIUM;
+ goto finish_detect_card;
+ }
mutex_lock(&ucr->dev_mutex);
mutex_lock(&host->host_mutex);
@@ -952,6 +964,10 @@ static int sd_power_on(struct rtsx_usb_sdmmc *host)
struct rtsx_ucr *ucr = host->ucr;
int err;
+ if (host->ocp_stat & (MS_OCP_NOW | MS_OCP_EVER)) {
+ dev_dbg(sdmmc_dev(host), "over current\n");
+ return -EIO;
+ }
dev_dbg(sdmmc_dev(host), "%s\n", __func__);
rtsx_usb_init_cmd(ucr);
rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_SELECT, 0x07, SD_MOD_SEL);
@@ -978,8 +994,18 @@ static int sd_power_on(struct rtsx_usb_sdmmc *host)
usleep_range(800, 1000);
rtsx_usb_init_cmd(ucr);
+ /* WA OCP issue: after OCP, there were problems with reopen card power */
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PWR_CTL, POWER_MASK, POWER_ON);
+ rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, FPDCTL, SSC_POWER_MASK, SSC_POWER_DOWN);
+ err = rtsx_usb_send_cmd(ucr, MODE_C, 100);
+ if (err)
+ return err;
+ msleep(20);
+ rtsx_usb_write_register(ucr, FPDCTL, SSC_POWER_MASK, SSC_POWER_ON);
+ usleep_range(180, 200);
+ rtsx_usb_init_cmd(ucr);
rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PWR_CTL,
- POWER_MASK|LDO3318_PWR_MASK, POWER_ON|LDO_ON);
+ LDO3318_PWR_MASK, LDO_ON);
rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_OE,
SD_OUTPUT_EN, SD_OUTPUT_EN);
@@ -1332,6 +1358,7 @@ static void rtsx_usb_init_host(struct rtsx_usb_sdmmc *host)
mmc->max_req_size = 524288;
host->power_mode = MMC_POWER_OFF;
+ host->ocp_stat = 0;
}
static int rtsx_usb_sdmmc_drv_probe(struct platform_device *pdev)
@@ -1428,7 +1455,6 @@ static void rtsx_usb_sdmmc_drv_remove(struct platform_device *pdev)
": Realtek USB SD/MMC module has been removed\n");
}
-#ifdef CONFIG_PM
static int rtsx_usb_sdmmc_runtime_suspend(struct device *dev)
{
struct rtsx_usb_sdmmc *host = dev_get_drvdata(dev);
@@ -1446,11 +1472,9 @@ static int rtsx_usb_sdmmc_runtime_resume(struct device *dev)
mmc_detect_change(host->mmc, 0);
return 0;
}
-#endif
static const struct dev_pm_ops rtsx_usb_sdmmc_dev_pm_ops = {
- SET_RUNTIME_PM_OPS(rtsx_usb_sdmmc_runtime_suspend,
- rtsx_usb_sdmmc_runtime_resume, NULL)
+ RUNTIME_PM_OPS(rtsx_usb_sdmmc_runtime_suspend, rtsx_usb_sdmmc_runtime_resume, NULL)
};
static const struct platform_device_id rtsx_usb_sdmmc_ids[] = {
@@ -1469,7 +1493,7 @@ static struct platform_driver rtsx_usb_sdmmc_driver = {
.driver = {
.name = "rtsx_usb_sdmmc",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
- .pm = &rtsx_usb_sdmmc_dev_pm_ops,
+ .pm = pm_ptr(&rtsx_usb_sdmmc_dev_pm_ops),
},
};
module_platform_driver(rtsx_usb_sdmmc_driver);
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 2d46d4854fa1..84c7054607fc 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -973,8 +973,7 @@ static void sdhci_acpi_remove(struct platform_device *pdev)
c->slot->free_slot(pdev);
}
-static void __maybe_unused sdhci_acpi_reset_signal_voltage_if_needed(
- struct device *dev)
+static void sdhci_acpi_reset_signal_voltage_if_needed(struct device *dev)
{
struct sdhci_acpi_host *c = dev_get_drvdata(dev);
struct sdhci_host *host = c->host;
@@ -989,8 +988,6 @@ static void __maybe_unused sdhci_acpi_reset_signal_voltage_if_needed(
}
}
-#ifdef CONFIG_PM_SLEEP
-
static int sdhci_acpi_suspend(struct device *dev)
{
struct sdhci_acpi_host *c = dev_get_drvdata(dev);
@@ -1017,10 +1014,6 @@ static int sdhci_acpi_resume(struct device *dev)
return sdhci_resume_host(c->host);
}
-#endif
-
-#ifdef CONFIG_PM
-
static int sdhci_acpi_runtime_suspend(struct device *dev)
{
struct sdhci_acpi_host *c = dev_get_drvdata(dev);
@@ -1045,12 +1038,9 @@ static int sdhci_acpi_runtime_resume(struct device *dev)
return 0;
}
-#endif
-
static const struct dev_pm_ops sdhci_acpi_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(sdhci_acpi_suspend, sdhci_acpi_resume)
- SET_RUNTIME_PM_OPS(sdhci_acpi_runtime_suspend,
- sdhci_acpi_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(sdhci_acpi_suspend, sdhci_acpi_resume)
+ RUNTIME_PM_OPS(sdhci_acpi_runtime_suspend, sdhci_acpi_runtime_resume, NULL)
};
static struct platform_driver sdhci_acpi_driver = {
@@ -1058,7 +1048,7 @@ static struct platform_driver sdhci_acpi_driver = {
.name = "sdhci-acpi",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.acpi_match_table = sdhci_acpi_ids,
- .pm = &sdhci_acpi_pm_ops,
+ .pm = pm_ptr(&sdhci_acpi_pm_ops),
},
.probe = sdhci_acpi_probe,
.remove = sdhci_acpi_remove,
diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c
index efc2f3bdc631..15705e85417f 100644
--- a/drivers/mmc/host/sdhci-brcmstb.c
+++ b/drivers/mmc/host/sdhci-brcmstb.c
@@ -496,7 +496,6 @@ static void sdhci_brcmstb_shutdown(struct platform_device *pdev)
MODULE_DEVICE_TABLE(of, sdhci_brcm_of_match);
-#ifdef CONFIG_PM_SLEEP
static int sdhci_brcmstb_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
@@ -540,17 +539,14 @@ static int sdhci_brcmstb_resume(struct device *dev)
return ret;
}
-#endif
-static const struct dev_pm_ops sdhci_brcmstb_pmops = {
- SET_SYSTEM_SLEEP_PM_OPS(sdhci_brcmstb_suspend, sdhci_brcmstb_resume)
-};
+static DEFINE_SIMPLE_DEV_PM_OPS(sdhci_brcmstb_pmops, sdhci_brcmstb_suspend, sdhci_brcmstb_resume);
static struct platform_driver sdhci_brcmstb_driver = {
.driver = {
.name = "sdhci-brcmstb",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
- .pm = &sdhci_brcmstb_pmops,
+ .pm = pm_sleep_ptr(&sdhci_brcmstb_pmops),
.of_match_table = of_match_ptr(sdhci_brcm_of_match),
},
.probe = sdhci_brcmstb_probe,
diff --git a/drivers/mmc/host/sdhci-cadence.c b/drivers/mmc/host/sdhci-cadence.c
index 2d823e158c59..435603c8c00b 100644
--- a/drivers/mmc/host/sdhci-cadence.c
+++ b/drivers/mmc/host/sdhci-cadence.c
@@ -36,6 +36,24 @@
#define SDHCI_CDNS_HRS06_MODE_MMC_HS400 0x5
#define SDHCI_CDNS_HRS06_MODE_MMC_HS400ES 0x6
+/* Read block gap */
+#define SDHCI_CDNS_HRS37 0x94 /* interface mode select */
+#define SDHCI_CDNS_HRS37_MODE_DS 0x0
+#define SDHCI_CDNS_HRS37_MODE_HS 0x1
+#define SDHCI_CDNS_HRS37_MODE_UDS_SDR12 0x8
+#define SDHCI_CDNS_HRS37_MODE_UDS_SDR25 0x9
+#define SDHCI_CDNS_HRS37_MODE_UDS_SDR50 0xa
+#define SDHCI_CDNS_HRS37_MODE_UDS_SDR104 0xb
+#define SDHCI_CDNS_HRS37_MODE_UDS_DDR50 0xc
+#define SDHCI_CDNS_HRS37_MODE_MMC_LEGACY 0x20
+#define SDHCI_CDNS_HRS37_MODE_MMC_SDR 0x21
+#define SDHCI_CDNS_HRS37_MODE_MMC_DDR 0x22
+#define SDHCI_CDNS_HRS37_MODE_MMC_HS200 0x23
+#define SDHCI_CDNS_HRS37_MODE_MMC_HS400 0x24
+#define SDHCI_CDNS_HRS37_MODE_MMC_HS400ES 0x25
+#define SDHCI_CDNS_HRS38 0x98 /* Read block gap coefficient */
+#define SDHCI_CDNS_HRS38_BLKGAP_MAX 0xf
+
/* SRS - Slot Register Set (SDHCI-compatible) */
#define SDHCI_CDNS_SRS_BASE 0x200
@@ -251,6 +269,43 @@ static int sdhci_cdns_set_tune_val(struct sdhci_host *host, unsigned int val)
return 0;
}
+/**
+ * sdhci_cdns_tune_blkgap() - tune multi-block read gap
+ * @mmc: MMC host
+ *
+ * Tune delay used in multi block read. To do so,
+ * try sending multi-block read command with incremented gap, unless
+ * it succeeds.
+ *
+ * Return: error code
+ */
+static int sdhci_cdns_tune_blkgap(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_cdns_priv *priv = sdhci_pltfm_priv(pltfm_host);
+ void __iomem *hrs37_reg = priv->hrs_addr + SDHCI_CDNS_HRS37;
+ void __iomem *hrs38_reg = priv->hrs_addr + SDHCI_CDNS_HRS38;
+ int ret;
+ u32 gap;
+
+ /* Currently only needed in HS200 mode */
+ if (host->timing != MMC_TIMING_MMC_HS200)
+ return 0;
+
+ writel(SDHCI_CDNS_HRS37_MODE_MMC_HS200, hrs37_reg);
+
+ for (gap = 0; gap <= SDHCI_CDNS_HRS38_BLKGAP_MAX; gap++) {
+ writel(gap, hrs38_reg);
+ ret = mmc_read_tuning(mmc, 512, 32);
+ if (!ret)
+ break;
+ }
+
+ dev_dbg(mmc_dev(mmc), "read block gap tune %s, gap %d\n", ret ? "failed" : "OK", gap);
+ return ret;
+}
+
/*
* In SD mode, software must not use the hardware tuning and instead perform
* an almost identical procedure to eMMC.
@@ -261,6 +316,7 @@ static int sdhci_cdns_execute_tuning(struct sdhci_host *host, u32 opcode)
int max_streak = 0;
int end_of_streak = 0;
int i;
+ int ret;
/*
* Do not execute tuning for UHS_SDR50 or UHS_DDR50.
@@ -288,7 +344,11 @@ static int sdhci_cdns_execute_tuning(struct sdhci_host *host, u32 opcode)
return -EIO;
}
- return sdhci_cdns_set_tune_val(host, end_of_streak - max_streak / 2);
+ ret = sdhci_cdns_set_tune_val(host, end_of_streak - max_streak / 2);
+ if (ret)
+ return ret;
+
+ return sdhci_cdns_tune_blkgap(host->mmc);
}
static void sdhci_cdns_set_uhs_signaling(struct sdhci_host *host,
@@ -551,7 +611,6 @@ static int sdhci_cdns_probe(struct platform_device *pdev)
return sdhci_add_host(host);
}
-#ifdef CONFIG_PM_SLEEP
static int sdhci_cdns_resume(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
@@ -578,11 +637,8 @@ disable_clk:
return ret;
}
-#endif
-static const struct dev_pm_ops sdhci_cdns_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(sdhci_pltfm_suspend, sdhci_cdns_resume)
-};
+static DEFINE_SIMPLE_DEV_PM_OPS(sdhci_cdns_pm_ops, sdhci_pltfm_suspend, sdhci_cdns_resume);
static const struct of_device_id sdhci_cdns_match[] = {
{
@@ -606,7 +662,7 @@ static struct platform_driver sdhci_cdns_driver = {
.driver = {
.name = "sdhci-cdns",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
- .pm = &sdhci_cdns_pm_ops,
+ .pm = pm_sleep_ptr(&sdhci_cdns_pm_ops),
.of_match_table = sdhci_cdns_match,
},
.probe = sdhci_cdns_probe,
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index a040c0896a7b..a7a5df673b0f 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -1650,7 +1650,6 @@ static void sdhci_esdhc_imx_hwinit(struct sdhci_host *host)
}
}
-#ifdef CONFIG_PM_SLEEP
static void sdhc_esdhc_tuning_save(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -1707,7 +1706,6 @@ static void sdhc_esdhc_tuning_restore(struct sdhci_host *host)
host->ioaddr + ESDHC_TUNE_CTRL_STATUS);
}
}
-#endif
static void esdhc_cqe_enable(struct mmc_host *mmc)
{
@@ -2016,7 +2014,6 @@ static void sdhci_esdhc_imx_remove(struct platform_device *pdev)
cpu_latency_qos_remove_request(&imx_data->pm_qos_req);
}
-#ifdef CONFIG_PM_SLEEP
static int sdhci_esdhc_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
@@ -2112,9 +2109,7 @@ static int sdhci_esdhc_resume(struct device *dev)
return ret;
}
-#endif
-#ifdef CONFIG_PM
static int sdhci_esdhc_runtime_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
@@ -2188,12 +2183,10 @@ remove_pm_qos_request:
cpu_latency_qos_remove_request(&imx_data->pm_qos_req);
return err;
}
-#endif
static const struct dev_pm_ops sdhci_esdhc_pmops = {
- SET_SYSTEM_SLEEP_PM_OPS(sdhci_esdhc_suspend, sdhci_esdhc_resume)
- SET_RUNTIME_PM_OPS(sdhci_esdhc_runtime_suspend,
- sdhci_esdhc_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(sdhci_esdhc_suspend, sdhci_esdhc_resume)
+ RUNTIME_PM_OPS(sdhci_esdhc_runtime_suspend, sdhci_esdhc_runtime_resume, NULL)
};
static struct platform_driver sdhci_esdhc_imx_driver = {
@@ -2201,7 +2194,7 @@ static struct platform_driver sdhci_esdhc_imx_driver = {
.name = "sdhci-esdhc-imx",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = imx_esdhc_dt_ids,
- .pm = &sdhci_esdhc_pmops,
+ .pm = pm_ptr(&sdhci_esdhc_pmops),
},
.probe = sdhci_esdhc_imx_probe,
.remove = sdhci_esdhc_imx_remove,
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 9d8e20dc8ca1..4e5edbf2fc9b 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -81,6 +81,7 @@
#define CORE_IO_PAD_PWR_SWITCH_EN BIT(15)
#define CORE_IO_PAD_PWR_SWITCH BIT(16)
#define CORE_HC_SELECT_IN_EN BIT(18)
+#define CORE_HC_SELECT_IN_SDR50 (4 << 19)
#define CORE_HC_SELECT_IN_HS400 (6 << 19)
#define CORE_HC_SELECT_IN_MASK (7 << 19)
@@ -1133,6 +1134,10 @@ static bool sdhci_msm_is_tuning_needed(struct sdhci_host *host)
{
struct mmc_ios *ios = &host->mmc->ios;
+ if (ios->timing == MMC_TIMING_UHS_SDR50 &&
+ host->flags & SDHCI_SDR50_NEEDS_TUNING)
+ return true;
+
/*
* Tuning is required for SDR104, HS200 and HS400 cards and
* if clock frequency is greater than 100MHz in these modes.
@@ -1201,6 +1206,8 @@ static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode)
struct mmc_ios ios = host->mmc->ios;
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ const struct sdhci_msm_offset *msm_offset = msm_host->offset;
+ u32 config;
if (!sdhci_msm_is_tuning_needed(host)) {
msm_host->use_cdr = false;
@@ -1217,6 +1224,14 @@ static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode)
*/
msm_host->tuning_done = 0;
+ if (ios.timing == MMC_TIMING_UHS_SDR50 &&
+ host->flags & SDHCI_SDR50_NEEDS_TUNING) {
+ config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec);
+ config &= ~CORE_HC_SELECT_IN_MASK;
+ config |= CORE_HC_SELECT_IN_EN | CORE_HC_SELECT_IN_SDR50;
+ writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec);
+ }
+
/*
* For HS400 tuning in HS200 timing requires:
* - select MCLK/2 in VENDOR_SPEC
@@ -1943,7 +1958,7 @@ static void sdhci_msm_ice_enable(struct sdhci_msm_host *msm_host)
qcom_ice_enable(msm_host->ice);
}
-static __maybe_unused int sdhci_msm_ice_resume(struct sdhci_msm_host *msm_host)
+static int sdhci_msm_ice_resume(struct sdhci_msm_host *msm_host)
{
if (msm_host->mmc->caps2 & MMC_CAP2_CRYPTO)
return qcom_ice_resume(msm_host->ice);
@@ -1951,7 +1966,7 @@ static __maybe_unused int sdhci_msm_ice_resume(struct sdhci_msm_host *msm_host)
return 0;
}
-static __maybe_unused int sdhci_msm_ice_suspend(struct sdhci_msm_host *msm_host)
+static int sdhci_msm_ice_suspend(struct sdhci_msm_host *msm_host)
{
if (msm_host->mmc->caps2 & MMC_CAP2_CRYPTO)
return qcom_ice_suspend(msm_host->ice);
@@ -2011,13 +2026,13 @@ static inline void sdhci_msm_ice_enable(struct sdhci_msm_host *msm_host)
{
}
-static inline __maybe_unused int
+static inline int
sdhci_msm_ice_resume(struct sdhci_msm_host *msm_host)
{
return 0;
}
-static inline __maybe_unused int
+static inline int
sdhci_msm_ice_suspend(struct sdhci_msm_host *msm_host)
{
return 0;
@@ -2801,7 +2816,7 @@ static void sdhci_msm_remove(struct platform_device *pdev)
clk_disable_unprepare(msm_host->bus_clk);
}
-static __maybe_unused int sdhci_msm_runtime_suspend(struct device *dev)
+static int sdhci_msm_runtime_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -2820,7 +2835,7 @@ static __maybe_unused int sdhci_msm_runtime_suspend(struct device *dev)
return sdhci_msm_ice_suspend(msm_host);
}
-static __maybe_unused int sdhci_msm_runtime_resume(struct device *dev)
+static int sdhci_msm_runtime_resume(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -2856,11 +2871,8 @@ static __maybe_unused int sdhci_msm_runtime_resume(struct device *dev)
}
static const struct dev_pm_ops sdhci_msm_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
- SET_RUNTIME_PM_OPS(sdhci_msm_runtime_suspend,
- sdhci_msm_runtime_resume,
- NULL)
+ SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+ RUNTIME_PM_OPS(sdhci_msm_runtime_suspend, sdhci_msm_runtime_resume, NULL)
};
static struct platform_driver sdhci_msm_driver = {
@@ -2869,7 +2881,7 @@ static struct platform_driver sdhci_msm_driver = {
.driver = {
.name = "sdhci_msm",
.of_match_table = sdhci_msm_dt_match,
- .pm = &sdhci_msm_pm_ops,
+ .pm = pm_ptr(&sdhci_msm_pm_ops),
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
};
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index 42878474e56e..c6f09b53325d 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -99,6 +99,9 @@
#define HIWORD_UPDATE(val, mask, shift) \
((val) << (shift) | (mask) << ((shift) + 16))
+#define CD_STABLE_TIMEOUT_US 1000000
+#define CD_STABLE_MAX_SLEEP_US 10
+
/**
* struct sdhci_arasan_soc_ctl_field - Field used in sdhci_arasan_soc_ctl_map
*
@@ -206,12 +209,15 @@ struct sdhci_arasan_data {
* 19MHz instead
*/
#define SDHCI_ARASAN_QUIRK_CLOCK_25_BROKEN BIT(2)
+/* Enable CD stable check before power-up */
+#define SDHCI_ARASAN_QUIRK_ENSURE_CD_STABLE BIT(3)
};
struct sdhci_arasan_of_data {
const struct sdhci_arasan_soc_ctl_map *soc_ctl_map;
const struct sdhci_pltfm_data *pdata;
const struct sdhci_arasan_clk_ops *clk_ops;
+ u32 quirks;
};
static const struct sdhci_arasan_soc_ctl_map rk3399_soc_ctl_map = {
@@ -514,6 +520,24 @@ static int sdhci_arasan_voltage_switch(struct mmc_host *mmc,
return -EINVAL;
}
+static void sdhci_arasan_set_power_and_bus_voltage(struct sdhci_host *host, unsigned char mode,
+ unsigned short vdd)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
+ u32 reg;
+
+ /*
+ * Ensure that the card detect logic has stabilized before powering up, this is
+ * necessary after a host controller reset.
+ */
+ if (mode == MMC_POWER_UP && sdhci_arasan->quirks & SDHCI_ARASAN_QUIRK_ENSURE_CD_STABLE)
+ read_poll_timeout(sdhci_readl, reg, reg & SDHCI_CD_STABLE, CD_STABLE_MAX_SLEEP_US,
+ CD_STABLE_TIMEOUT_US, false, host, SDHCI_PRESENT_STATE);
+
+ sdhci_set_power_and_bus_voltage(host, mode, vdd);
+}
+
static const struct sdhci_ops sdhci_arasan_ops = {
.set_clock = sdhci_arasan_set_clock,
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
@@ -521,7 +545,7 @@ static const struct sdhci_ops sdhci_arasan_ops = {
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_arasan_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
- .set_power = sdhci_set_power_and_bus_voltage,
+ .set_power = sdhci_arasan_set_power_and_bus_voltage,
.hw_reset = sdhci_arasan_hw_reset,
};
@@ -570,7 +594,7 @@ static const struct sdhci_ops sdhci_arasan_cqe_ops = {
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_arasan_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
- .set_power = sdhci_set_power_and_bus_voltage,
+ .set_power = sdhci_arasan_set_power_and_bus_voltage,
.irq = sdhci_arasan_cqhci_irq,
};
@@ -581,7 +605,6 @@ static const struct sdhci_pltfm_data sdhci_arasan_cqe_pdata = {
SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN,
};
-#ifdef CONFIG_PM_SLEEP
/**
* sdhci_arasan_suspend - Suspend method for the driver
* @dev: Address of the device structure
@@ -675,10 +698,9 @@ static int sdhci_arasan_resume(struct device *dev)
return 0;
}
-#endif /* ! CONFIG_PM_SLEEP */
-static SIMPLE_DEV_PM_OPS(sdhci_arasan_dev_pm_ops, sdhci_arasan_suspend,
- sdhci_arasan_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(sdhci_arasan_dev_pm_ops, sdhci_arasan_suspend,
+ sdhci_arasan_resume);
/**
* sdhci_arasan_sdcardclk_recalc_rate - Return the card clock rate
@@ -1447,6 +1469,7 @@ static const struct sdhci_arasan_clk_ops zynqmp_clk_ops = {
static struct sdhci_arasan_of_data sdhci_arasan_zynqmp_data = {
.pdata = &sdhci_arasan_zynqmp_pdata,
.clk_ops = &zynqmp_clk_ops,
+ .quirks = SDHCI_ARASAN_QUIRK_ENSURE_CD_STABLE,
};
static const struct sdhci_arasan_clk_ops versal_clk_ops = {
@@ -1457,6 +1480,7 @@ static const struct sdhci_arasan_clk_ops versal_clk_ops = {
static struct sdhci_arasan_of_data sdhci_arasan_versal_data = {
.pdata = &sdhci_arasan_zynqmp_pdata,
.clk_ops = &versal_clk_ops,
+ .quirks = SDHCI_ARASAN_QUIRK_ENSURE_CD_STABLE,
};
static const struct sdhci_arasan_clk_ops versal_net_clk_ops = {
@@ -1467,6 +1491,7 @@ static const struct sdhci_arasan_clk_ops versal_net_clk_ops = {
static struct sdhci_arasan_of_data sdhci_arasan_versal_net_data = {
.pdata = &sdhci_arasan_versal_net_pdata,
.clk_ops = &versal_net_clk_ops,
+ .quirks = SDHCI_ARASAN_QUIRK_ENSURE_CD_STABLE,
};
static struct sdhci_arasan_of_data intel_keembay_emmc_data = {
@@ -1937,6 +1962,8 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
if (of_device_is_compatible(np, "rockchip,rk3399-sdhci-5.1"))
sdhci_arasan_update_clockmultiplier(host, 0x0);
+ sdhci_arasan->quirks |= data->quirks;
+
if (of_device_is_compatible(np, "intel,keembay-sdhci-5.1-emmc") ||
of_device_is_compatible(np, "intel,keembay-sdhci-5.1-sd") ||
of_device_is_compatible(np, "intel,keembay-sdhci-5.1-sdio")) {
@@ -2051,7 +2078,7 @@ static struct platform_driver sdhci_arasan_driver = {
.name = "sdhci-arasan",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = sdhci_arasan_of_match,
- .pm = &sdhci_arasan_dev_pm_ops,
+ .pm = pm_sleep_ptr(&sdhci_arasan_dev_pm_ops),
},
.probe = sdhci_arasan_probe,
.remove = sdhci_arasan_remove,
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index 1ba2effaf376..7c4ac65f247d 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -229,7 +229,6 @@ static int sdhci_at91_set_clks_presets(struct device *dev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
static int sdhci_at91_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
@@ -243,9 +242,7 @@ static int sdhci_at91_suspend(struct device *dev)
return ret;
}
-#endif /* CONFIG_PM_SLEEP */
-#ifdef CONFIG_PM
static int sdhci_at91_runtime_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
@@ -302,13 +299,10 @@ out:
sdhci_runtime_resume_host(host, 0);
return 0;
}
-#endif /* CONFIG_PM */
static const struct dev_pm_ops sdhci_at91_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(sdhci_at91_suspend, pm_runtime_force_resume)
- SET_RUNTIME_PM_OPS(sdhci_at91_runtime_suspend,
- sdhci_at91_runtime_resume,
- NULL)
+ SYSTEM_SLEEP_PM_OPS(sdhci_at91_suspend, pm_runtime_force_resume)
+ RUNTIME_PM_OPS(sdhci_at91_runtime_suspend, sdhci_at91_runtime_resume, NULL)
};
static int sdhci_at91_probe(struct platform_device *pdev)
@@ -460,7 +454,7 @@ static struct platform_driver sdhci_at91_driver = {
.name = "sdhci-at91",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = sdhci_at91_dt_match,
- .pm = &sdhci_at91_dev_pm_ops,
+ .pm = pm_ptr(&sdhci_at91_dev_pm_ops),
},
.probe = sdhci_at91_probe,
.remove = sdhci_at91_remove,
diff --git a/drivers/mmc/host/sdhci-of-dwcmshc.c b/drivers/mmc/host/sdhci-of-dwcmshc.c
index ee6b1096f709..eebd45389956 100644
--- a/drivers/mmc/host/sdhci-of-dwcmshc.c
+++ b/drivers/mmc/host/sdhci-of-dwcmshc.c
@@ -1499,7 +1499,6 @@ static void dwcmshc_remove(struct platform_device *pdev)
clk_bulk_disable_unprepare(priv->num_other_clks, priv->other_clks);
}
-#ifdef CONFIG_PM_SLEEP
static int dwcmshc_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
@@ -1570,9 +1569,6 @@ disable_clk:
clk_disable_unprepare(pltfm_host->clk);
return ret;
}
-#endif
-
-#ifdef CONFIG_PM
static void dwcmshc_enable_card_clk(struct sdhci_host *host)
{
@@ -1603,12 +1599,9 @@ static int dwcmshc_runtime_resume(struct device *dev)
return 0;
}
-#endif
-
static const struct dev_pm_ops dwcmshc_pmops = {
- SET_SYSTEM_SLEEP_PM_OPS(dwcmshc_suspend, dwcmshc_resume)
- SET_RUNTIME_PM_OPS(dwcmshc_runtime_suspend,
- dwcmshc_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(dwcmshc_suspend, dwcmshc_resume)
+ RUNTIME_PM_OPS(dwcmshc_runtime_suspend, dwcmshc_runtime_resume, NULL)
};
static struct platform_driver sdhci_dwcmshc_driver = {
@@ -1617,7 +1610,7 @@ static struct platform_driver sdhci_dwcmshc_driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = sdhci_dwcmshc_dt_ids,
.acpi_match_table = ACPI_PTR(sdhci_dwcmshc_acpi_ids),
- .pm = &dwcmshc_pmops,
+ .pm = pm_ptr(&dwcmshc_pmops),
},
.probe = dwcmshc_probe,
.remove = dwcmshc_remove,
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index c6ee0099ead0..8345e2c5a034 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -1234,7 +1234,6 @@ static u32 esdhc_irq(struct sdhci_host *host, u32 intmask)
return intmask;
}
-#ifdef CONFIG_PM_SLEEP
static u32 esdhc_proctl;
static int esdhc_of_suspend(struct device *dev)
{
@@ -1260,11 +1259,8 @@ static int esdhc_of_resume(struct device *dev)
}
return ret;
}
-#endif
-static SIMPLE_DEV_PM_OPS(esdhc_of_dev_pm_ops,
- esdhc_of_suspend,
- esdhc_of_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(esdhc_of_dev_pm_ops, esdhc_of_suspend, esdhc_of_resume);
static const struct sdhci_ops sdhci_esdhc_be_ops = {
.read_l = esdhc_be_readl,
@@ -1511,7 +1507,7 @@ static struct platform_driver sdhci_esdhc_driver = {
.name = "sdhci-esdhc",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = sdhci_esdhc_of_match,
- .pm = &esdhc_of_dev_pm_ops,
+ .pm = pm_sleep_ptr(&esdhc_of_dev_pm_ops),
},
.probe = sdhci_esdhc_probe,
.remove = sdhci_pltfm_remove,
diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c
index cdb09605e009..b5d7c1a80a92 100644
--- a/drivers/mmc/host/sdhci-omap.c
+++ b/drivers/mmc/host/sdhci-omap.c
@@ -1400,8 +1400,7 @@ static void sdhci_omap_remove(struct platform_device *pdev)
pm_runtime_force_suspend(dev);
}
-#ifdef CONFIG_PM
-static void __maybe_unused sdhci_omap_context_save(struct sdhci_omap_host *omap_host)
+static void sdhci_omap_context_save(struct sdhci_omap_host *omap_host)
{
omap_host->con = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON);
omap_host->hctl = sdhci_omap_readl(omap_host, SDHCI_OMAP_HCTL);
@@ -1412,7 +1411,7 @@ static void __maybe_unused sdhci_omap_context_save(struct sdhci_omap_host *omap_
}
/* Order matters here, HCTL must be restored in two phases */
-static void __maybe_unused sdhci_omap_context_restore(struct sdhci_omap_host *omap_host)
+static void sdhci_omap_context_restore(struct sdhci_omap_host *omap_host)
{
sdhci_omap_writel(omap_host, SDHCI_OMAP_HCTL, omap_host->hctl);
sdhci_omap_writel(omap_host, SDHCI_OMAP_CAPA, omap_host->capa);
@@ -1424,7 +1423,7 @@ static void __maybe_unused sdhci_omap_context_restore(struct sdhci_omap_host *om
sdhci_omap_writel(omap_host, SDHCI_OMAP_ISE, omap_host->ise);
}
-static int __maybe_unused sdhci_omap_runtime_suspend(struct device *dev)
+static int sdhci_omap_runtime_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -1443,7 +1442,7 @@ static int __maybe_unused sdhci_omap_runtime_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused sdhci_omap_runtime_resume(struct device *dev)
+static int sdhci_omap_runtime_resume(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -1458,13 +1457,10 @@ static int __maybe_unused sdhci_omap_runtime_resume(struct device *dev)
return 0;
}
-#endif
static const struct dev_pm_ops sdhci_omap_dev_pm_ops = {
- SET_RUNTIME_PM_OPS(sdhci_omap_runtime_suspend,
- sdhci_omap_runtime_resume, NULL)
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
+ RUNTIME_PM_OPS(sdhci_omap_runtime_suspend, sdhci_omap_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
};
static struct platform_driver sdhci_omap_driver = {
@@ -1473,7 +1469,7 @@ static struct platform_driver sdhci_omap_driver = {
.driver = {
.name = "sdhci-omap",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
- .pm = &sdhci_omap_dev_pm_ops,
+ .pm = pm_ptr(&sdhci_omap_dev_pm_ops),
.of_match_table = omap_sdhci_match,
},
};
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 826958992dfe..47a0a738862b 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -679,8 +679,19 @@ static int intel_start_signal_voltage_switch(struct mmc_host *mmc,
return 0;
}
+static void sdhci_intel_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ u16 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+
+ /* Stop card clock separately to avoid glitches on clock line */
+ if (clk & SDHCI_CLOCK_CARD_EN)
+ sdhci_writew(host, clk & ~SDHCI_CLOCK_CARD_EN, SDHCI_CLOCK_CONTROL);
+
+ sdhci_set_clock(host, clock);
+}
+
static const struct sdhci_ops sdhci_intel_byt_ops = {
- .set_clock = sdhci_set_clock,
+ .set_clock = sdhci_intel_set_clock,
.set_power = sdhci_intel_set_power,
.enable_dma = sdhci_pci_enable_dma,
.set_bus_width = sdhci_set_bus_width,
@@ -690,7 +701,7 @@ static const struct sdhci_ops sdhci_intel_byt_ops = {
};
static const struct sdhci_ops sdhci_intel_glk_ops = {
- .set_clock = sdhci_set_clock,
+ .set_clock = sdhci_intel_set_clock,
.set_power = sdhci_intel_set_power,
.enable_dma = sdhci_pci_enable_dma,
.set_bus_width = sdhci_set_bus_width,
diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c
index 4c2ae71770f7..b0f91cc9e40e 100644
--- a/drivers/mmc/host/sdhci-pci-gli.c
+++ b/drivers/mmc/host/sdhci-pci-gli.c
@@ -283,10 +283,26 @@
#define PCIE_GLI_9767_UHS2_CTL2_ZC_VALUE 0xb
#define PCIE_GLI_9767_UHS2_CTL2_ZC_CTL BIT(6)
#define PCIE_GLI_9767_UHS2_CTL2_ZC_CTL_VALUE 0x1
+#define PCIE_GLI_9767_UHS2_CTL2_FORCE_PHY_RESETN BIT(13)
+#define PCIE_GLI_9767_UHS2_CTL2_FORCE_RESETN_VALUE BIT(14)
#define GLI_MAX_TUNING_LOOP 40
/* Genesys Logic chipset */
+static void sdhci_gli_mask_replay_timer_timeout(struct pci_dev *pdev)
+{
+ int aer;
+ u32 value;
+
+ /* mask the replay timer timeout of AER */
+ aer = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
+ if (aer) {
+ pci_read_config_dword(pdev, aer + PCI_ERR_COR_MASK, &value);
+ value |= PCI_ERR_COR_REP_TIMER;
+ pci_write_config_dword(pdev, aer + PCI_ERR_COR_MASK, value);
+ }
+}
+
static inline void gl9750_wt_on(struct sdhci_host *host)
{
u32 wt_value;
@@ -607,7 +623,6 @@ static void gl9750_hw_setting(struct sdhci_host *host)
{
struct sdhci_pci_slot *slot = sdhci_priv(host);
struct pci_dev *pdev;
- int aer;
u32 value;
pdev = slot->chip->pdev;
@@ -626,12 +641,7 @@ static void gl9750_hw_setting(struct sdhci_host *host)
pci_set_power_state(pdev, PCI_D0);
/* mask the replay timer timeout of AER */
- aer = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
- if (aer) {
- pci_read_config_dword(pdev, aer + PCI_ERR_COR_MASK, &value);
- value |= PCI_ERR_COR_REP_TIMER;
- pci_write_config_dword(pdev, aer + PCI_ERR_COR_MASK, value);
- }
+ sdhci_gli_mask_replay_timer_timeout(pdev);
gl9750_wt_off(host);
}
@@ -806,7 +816,6 @@ static void sdhci_gl9755_set_clock(struct sdhci_host *host, unsigned int clock)
static void gl9755_hw_setting(struct sdhci_pci_slot *slot)
{
struct pci_dev *pdev = slot->chip->pdev;
- int aer;
u32 value;
gl9755_wt_on(pdev);
@@ -841,12 +850,7 @@ static void gl9755_hw_setting(struct sdhci_pci_slot *slot)
pci_set_power_state(pdev, PCI_D0);
/* mask the replay timer timeout of AER */
- aer = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
- if (aer) {
- pci_read_config_dword(pdev, aer + PCI_ERR_COR_MASK, &value);
- value |= PCI_ERR_COR_REP_TIMER;
- pci_write_config_dword(pdev, aer + PCI_ERR_COR_MASK, value);
- }
+ sdhci_gli_mask_replay_timer_timeout(pdev);
gl9755_wt_off(pdev);
}
@@ -1177,6 +1181,65 @@ static void gl9767_set_low_power_negotiation(struct pci_dev *pdev, bool enable)
gl9767_vhs_read(pdev);
}
+static void sdhci_gl9767_uhs2_phy_reset(struct sdhci_host *host, bool assert)
+{
+ struct sdhci_pci_slot *slot = sdhci_priv(host);
+ struct pci_dev *pdev = slot->chip->pdev;
+ u32 value, set, clr;
+
+ if (assert) {
+ /* Assert reset, set RESETN and clean RESETN_VALUE */
+ set = PCIE_GLI_9767_UHS2_CTL2_FORCE_PHY_RESETN;
+ clr = PCIE_GLI_9767_UHS2_CTL2_FORCE_RESETN_VALUE;
+ } else {
+ /* De-assert reset, clean RESETN and set RESETN_VALUE */
+ set = PCIE_GLI_9767_UHS2_CTL2_FORCE_RESETN_VALUE;
+ clr = PCIE_GLI_9767_UHS2_CTL2_FORCE_PHY_RESETN;
+ }
+
+ gl9767_vhs_write(pdev);
+ pci_read_config_dword(pdev, PCIE_GLI_9767_UHS2_CTL2, &value);
+ value |= set;
+ pci_write_config_dword(pdev, PCIE_GLI_9767_UHS2_CTL2, value);
+ value &= ~clr;
+ pci_write_config_dword(pdev, PCIE_GLI_9767_UHS2_CTL2, value);
+ gl9767_vhs_read(pdev);
+}
+
+static void __gl9767_uhs2_set_power(struct sdhci_host *host, unsigned char mode, unsigned short vdd)
+{
+ u8 pwr = 0;
+
+ if (mode != MMC_POWER_OFF) {
+ pwr = sdhci_get_vdd_value(vdd);
+ if (!pwr)
+ WARN(1, "%s: Invalid vdd %#x\n",
+ mmc_hostname(host->mmc), vdd);
+ pwr |= SDHCI_VDD2_POWER_180;
+ }
+
+ if (host->pwr == pwr)
+ return;
+
+ host->pwr = pwr;
+
+ if (pwr == 0) {
+ sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
+ } else {
+ sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
+
+ pwr |= SDHCI_POWER_ON;
+ sdhci_writeb(host, pwr & 0xf, SDHCI_POWER_CONTROL);
+ usleep_range(5000, 6250);
+
+ /* Assert reset */
+ sdhci_gl9767_uhs2_phy_reset(host, true);
+ pwr |= SDHCI_VDD2_POWER_ON;
+ sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
+ usleep_range(5000, 6250);
+ }
+}
+
static void sdhci_gl9767_set_clock(struct sdhci_host *host, unsigned int clock)
{
struct sdhci_pci_slot *slot = sdhci_priv(host);
@@ -1203,6 +1266,11 @@ static void sdhci_gl9767_set_clock(struct sdhci_host *host, unsigned int clock)
}
sdhci_enable_clk(host, clk);
+
+ if (mmc_card_uhs2(host->mmc))
+ /* De-assert reset */
+ sdhci_gl9767_uhs2_phy_reset(host, false);
+
gl9767_set_low_power_negotiation(pdev, true);
}
@@ -1474,7 +1542,7 @@ static void sdhci_gl9767_set_power(struct sdhci_host *host, unsigned char mode,
gl9767_vhs_read(pdev);
sdhci_gli_overcurrent_event_enable(host, false);
- sdhci_uhs2_set_power(host, mode, vdd);
+ __gl9767_uhs2_set_power(host, mode, vdd);
sdhci_gli_overcurrent_event_enable(host, true);
} else {
gl9767_vhs_write(pdev);
@@ -1751,7 +1819,7 @@ cleanup:
return ret;
}
-static void gli_set_gl9763e(struct sdhci_pci_slot *slot)
+static void gl9763e_hw_setting(struct sdhci_pci_slot *slot)
{
struct pci_dev *pdev = slot->chip->pdev;
u32 value;
@@ -1780,6 +1848,9 @@ static void gli_set_gl9763e(struct sdhci_pci_slot *slot)
value |= FIELD_PREP(GLI_9763E_HS400_RXDLY, GLI_9763E_HS400_RXDLY_5);
pci_write_config_dword(pdev, PCIE_GLI_9763E_CLKRXDLY, value);
+ /* mask the replay timer timeout of AER */
+ sdhci_gli_mask_replay_timer_timeout(pdev);
+
pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value);
value &= ~GLI_9763E_VHS_REV;
value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_R);
@@ -1923,7 +1994,7 @@ static int gli_probe_slot_gl9763e(struct sdhci_pci_slot *slot)
gli_pcie_enable_msi(slot);
host->mmc_host_ops.hs400_enhanced_strobe =
gl9763e_hs400_enhanced_strobe;
- gli_set_gl9763e(slot);
+ gl9763e_hw_setting(slot);
sdhci_enable_v4_mode(host);
return 0;
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index 1371960e34eb..d082c4e21aa9 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -20,9 +20,11 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/mbus.h>
+#include <linux/units.h>
#include "sdhci.h"
#include "sdhci-pltfm.h"
@@ -51,6 +53,9 @@ struct sdhci_pxa {
struct clk *clk_io;
u8 power_mode;
void __iomem *sdio3_conf_reg;
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *pins_default;
+ struct pinctrl_state *pins_uhs;
};
/*
@@ -313,8 +318,20 @@ static void pxav3_set_power(struct sdhci_host *host, unsigned char mode,
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
}
+static void pxav3_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ struct sdhci_pltfm_host *phost = sdhci_priv(host);
+ struct sdhci_pxa *pxa = sdhci_pltfm_priv(phost);
+ struct pinctrl_state *pins = clock < 100 * HZ_PER_MHZ ? pxa->pins_default : pxa->pins_uhs;
+
+ if (pins)
+ pinctrl_select_state(pxa->pinctrl, pins);
+
+ sdhci_set_clock(host, clock);
+}
+
static const struct sdhci_ops pxav3_sdhci_ops = {
- .set_clock = sdhci_set_clock,
+ .set_clock = pxav3_set_clock,
.set_power = pxav3_set_power,
.platform_send_init_74_clocks = pxav3_gen_init_74_clocks,
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
@@ -366,6 +383,19 @@ static inline struct sdhci_pxa_platdata *pxav3_get_mmc_pdata(struct device *dev)
}
#endif
+static struct pinctrl_state *pxav3_lookup_pinstate(struct device *dev, struct pinctrl *pinctrl,
+ const char *name)
+{
+ struct pinctrl_state *pins = pinctrl_lookup_state(pinctrl, name);
+
+ if (IS_ERR(pins)) {
+ dev_dbg(dev, "could not get pinstate '%s': %ld\n", name, PTR_ERR(pins));
+ return NULL;
+ }
+
+ return pins;
+}
+
static int sdhci_pxav3_probe(struct platform_device *pdev)
{
struct sdhci_pltfm_host *pltfm_host;
@@ -440,6 +470,15 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
host->mmc->pm_caps |= pdata->pm_caps;
}
+ pxa->pinctrl = devm_pinctrl_get(dev);
+ if (!IS_ERR(pxa->pinctrl)) {
+ pxa->pins_default = pxav3_lookup_pinstate(dev, pxa->pinctrl, "default");
+ if (pxa->pins_default)
+ pxa->pins_uhs = pxav3_lookup_pinstate(dev, pxa->pinctrl, "state_uhs");
+ } else {
+ dev_dbg(dev, "could not get pinctrl handle: %ld\n", PTR_ERR(pxa->pinctrl));
+ }
+
pm_runtime_get_noresume(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, PXAV3_RPM_DELAY_MS);
@@ -484,7 +523,6 @@ static void sdhci_pxav3_remove(struct platform_device *pdev)
clk_disable_unprepare(pxa->clk_core);
}
-#ifdef CONFIG_PM_SLEEP
static int sdhci_pxav3_suspend(struct device *dev)
{
int ret;
@@ -510,9 +548,7 @@ static int sdhci_pxav3_resume(struct device *dev)
return ret;
}
-#endif
-#ifdef CONFIG_PM
static int sdhci_pxav3_runtime_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
@@ -544,12 +580,10 @@ static int sdhci_pxav3_runtime_resume(struct device *dev)
sdhci_runtime_resume_host(host, 0);
return 0;
}
-#endif
static const struct dev_pm_ops sdhci_pxav3_pmops = {
- SET_SYSTEM_SLEEP_PM_OPS(sdhci_pxav3_suspend, sdhci_pxav3_resume)
- SET_RUNTIME_PM_OPS(sdhci_pxav3_runtime_suspend,
- sdhci_pxav3_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(sdhci_pxav3_suspend, sdhci_pxav3_resume)
+ RUNTIME_PM_OPS(sdhci_pxav3_runtime_suspend, sdhci_pxav3_runtime_resume, NULL)
};
static struct platform_driver sdhci_pxav3_driver = {
@@ -557,7 +591,7 @@ static struct platform_driver sdhci_pxav3_driver = {
.name = "sdhci-pxav3",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(sdhci_pxav3_of_match),
- .pm = &sdhci_pxav3_pmops,
+ .pm = pm_ptr(&sdhci_pxav3_pmops),
},
.probe = sdhci_pxav3_probe,
.remove = sdhci_pxav3_remove,
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 40857fc2e21b..6bf66aaa86a6 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -681,7 +681,6 @@ static void sdhci_s3c_remove(struct platform_device *pdev)
clk_disable_unprepare(sc->clk_io);
}
-#ifdef CONFIG_PM_SLEEP
static int sdhci_s3c_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
@@ -698,9 +697,7 @@ static int sdhci_s3c_resume(struct device *dev)
return sdhci_resume_host(host);
}
-#endif
-#ifdef CONFIG_PM
static int sdhci_s3c_runtime_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
@@ -730,12 +727,10 @@ static int sdhci_s3c_runtime_resume(struct device *dev)
sdhci_runtime_resume_host(host, 0);
return 0;
}
-#endif
static const struct dev_pm_ops sdhci_s3c_pmops = {
- SET_SYSTEM_SLEEP_PM_OPS(sdhci_s3c_suspend, sdhci_s3c_resume)
- SET_RUNTIME_PM_OPS(sdhci_s3c_runtime_suspend, sdhci_s3c_runtime_resume,
- NULL)
+ SYSTEM_SLEEP_PM_OPS(sdhci_s3c_suspend, sdhci_s3c_resume)
+ RUNTIME_PM_OPS(sdhci_s3c_runtime_suspend, sdhci_s3c_runtime_resume, NULL)
};
static const struct platform_device_id sdhci_s3c_driver_ids[] = {
@@ -770,7 +765,7 @@ static struct platform_driver sdhci_s3c_driver = {
.name = "s3c-sdhci",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(sdhci_s3c_dt_match),
- .pm = &sdhci_s3c_pmops,
+ .pm = pm_ptr(&sdhci_s3c_pmops),
},
};
diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c
index fa0f8aeb7ee0..72d21dc0cb69 100644
--- a/drivers/mmc/host/sdhci-spear.c
+++ b/drivers/mmc/host/sdhci-spear.c
@@ -130,7 +130,6 @@ static void sdhci_remove(struct platform_device *pdev)
clk_disable_unprepare(sdhci->clk);
}
-#ifdef CONFIG_PM_SLEEP
static int sdhci_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
@@ -161,9 +160,8 @@ static int sdhci_resume(struct device *dev)
return sdhci_resume_host(host);
}
-#endif
-static SIMPLE_DEV_PM_OPS(sdhci_pm_ops, sdhci_suspend, sdhci_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(sdhci_pm_ops, sdhci_suspend, sdhci_resume);
static const struct of_device_id sdhci_spear_id_table[] = {
{ .compatible = "st,spear300-sdhci" },
@@ -175,7 +173,7 @@ static struct platform_driver sdhci_driver = {
.driver = {
.name = "sdhci",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
- .pm = &sdhci_pm_ops,
+ .pm = pm_sleep_ptr(&sdhci_pm_ops),
.of_match_table = sdhci_spear_id_table,
},
.probe = sdhci_probe,
diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c
index fe2fe52b23b2..3584a2b314a9 100644
--- a/drivers/mmc/host/sdhci-sprd.c
+++ b/drivers/mmc/host/sdhci-sprd.c
@@ -903,7 +903,6 @@ static const struct of_device_id sdhci_sprd_of_match[] = {
};
MODULE_DEVICE_TABLE(of, sdhci_sprd_of_match);
-#ifdef CONFIG_PM
static int sdhci_sprd_runtime_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
@@ -950,13 +949,10 @@ clk_2x_disable:
return ret;
}
-#endif
static const struct dev_pm_ops sdhci_sprd_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
- SET_RUNTIME_PM_OPS(sdhci_sprd_runtime_suspend,
- sdhci_sprd_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+ RUNTIME_PM_OPS(sdhci_sprd_runtime_suspend, sdhci_sprd_runtime_resume, NULL)
};
static struct platform_driver sdhci_sprd_driver = {
@@ -966,7 +962,7 @@ static struct platform_driver sdhci_sprd_driver = {
.name = "sdhci_sprd_r11",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = sdhci_sprd_of_match,
- .pm = &sdhci_sprd_pm_ops,
+ .pm = pm_ptr(&sdhci_sprd_pm_ops),
},
};
module_platform_driver(sdhci_sprd_driver);
diff --git a/drivers/mmc/host/sdhci-st.c b/drivers/mmc/host/sdhci-st.c
index 9157342ff7a4..bf6685805137 100644
--- a/drivers/mmc/host/sdhci-st.c
+++ b/drivers/mmc/host/sdhci-st.c
@@ -445,7 +445,6 @@ static void sdhci_st_remove(struct platform_device *pdev)
reset_control_assert(rstc);
}
-#ifdef CONFIG_PM_SLEEP
static int sdhci_st_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
@@ -492,9 +491,8 @@ static int sdhci_st_resume(struct device *dev)
return sdhci_resume_host(host);
}
-#endif
-static SIMPLE_DEV_PM_OPS(sdhci_st_pmops, sdhci_st_suspend, sdhci_st_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(sdhci_st_pmops, sdhci_st_suspend, sdhci_st_resume);
static const struct of_device_id st_sdhci_match[] = {
{ .compatible = "st,sdhci" },
@@ -509,7 +507,7 @@ static struct platform_driver sdhci_st_driver = {
.driver = {
.name = "sdhci-st",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
- .pm = &sdhci_st_pmops,
+ .pm = pm_sleep_ptr(&sdhci_st_pmops),
.of_match_table = st_sdhci_match,
},
};
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index c811297185d8..820ce4dae58b 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -1831,7 +1831,7 @@ static void sdhci_tegra_remove(struct platform_device *pdev)
clk_disable_unprepare(tegra_host->tmclk);
}
-static int __maybe_unused sdhci_tegra_runtime_suspend(struct device *dev)
+static int sdhci_tegra_runtime_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -1841,7 +1841,7 @@ static int __maybe_unused sdhci_tegra_runtime_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused sdhci_tegra_runtime_resume(struct device *dev)
+static int sdhci_tegra_runtime_resume(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -1849,7 +1849,6 @@ static int __maybe_unused sdhci_tegra_runtime_resume(struct device *dev)
return clk_prepare_enable(pltfm_host->clk);
}
-#ifdef CONFIG_PM_SLEEP
static int sdhci_tegra_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
@@ -1910,12 +1909,10 @@ disable_clk:
pm_runtime_force_suspend(dev);
return ret;
}
-#endif
static const struct dev_pm_ops sdhci_tegra_dev_pm_ops = {
- SET_RUNTIME_PM_OPS(sdhci_tegra_runtime_suspend, sdhci_tegra_runtime_resume,
- NULL)
- SET_SYSTEM_SLEEP_PM_OPS(sdhci_tegra_suspend, sdhci_tegra_resume)
+ RUNTIME_PM_OPS(sdhci_tegra_runtime_suspend, sdhci_tegra_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(sdhci_tegra_suspend, sdhci_tegra_resume)
};
static struct platform_driver sdhci_tegra_driver = {
@@ -1923,7 +1920,7 @@ static struct platform_driver sdhci_tegra_driver = {
.name = "sdhci-tegra",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = sdhci_tegra_dt_match,
- .pm = &sdhci_tegra_dev_pm_ops,
+ .pm = pm_ptr(&sdhci_tegra_dev_pm_ops),
},
.probe = sdhci_tegra_probe,
.remove = sdhci_tegra_remove,
diff --git a/drivers/mmc/host/sdhci-uhs2.c b/drivers/mmc/host/sdhci-uhs2.c
index 0efeb9d0c376..c459a08d01da 100644
--- a/drivers/mmc/host/sdhci-uhs2.c
+++ b/drivers/mmc/host/sdhci-uhs2.c
@@ -295,7 +295,8 @@ static void __sdhci_uhs2_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
else
sdhci_uhs2_set_power(host, ios->power_mode, ios->vdd);
- sdhci_set_clock(host, host->clock);
+ host->ops->set_clock(host, ios->clock);
+ host->clock = ios->clock;
}
static int sdhci_uhs2_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c
index b12bee8342bd..046e8100dd08 100644
--- a/drivers/mmc/host/sdhci-xenon.c
+++ b/drivers/mmc/host/sdhci-xenon.c
@@ -622,7 +622,6 @@ static void xenon_remove(struct platform_device *pdev)
clk_disable_unprepare(pltfm_host->clk);
}
-#ifdef CONFIG_PM_SLEEP
static int xenon_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
@@ -635,9 +634,7 @@ static int xenon_suspend(struct device *dev)
priv->restore_needed = true;
return ret;
}
-#endif
-#ifdef CONFIG_PM
static int xenon_runtime_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
@@ -685,14 +682,10 @@ out:
clk_disable_unprepare(pltfm_host->clk);
return ret;
}
-#endif /* CONFIG_PM */
static const struct dev_pm_ops sdhci_xenon_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(xenon_suspend,
- pm_runtime_force_resume)
- SET_RUNTIME_PM_OPS(xenon_runtime_suspend,
- xenon_runtime_resume,
- NULL)
+ SYSTEM_SLEEP_PM_OPS(xenon_suspend, pm_runtime_force_resume)
+ RUNTIME_PM_OPS(xenon_runtime_suspend, xenon_runtime_resume, NULL)
};
static const struct of_device_id sdhci_xenon_dt_ids[] = {
@@ -721,7 +714,7 @@ static struct platform_driver sdhci_xenon_driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = sdhci_xenon_dt_ids,
.acpi_match_table = ACPI_PTR(sdhci_xenon_acpi_ids),
- .pm = &sdhci_xenon_dev_pm_ops,
+ .pm = pm_ptr(&sdhci_xenon_dev_pm_ops),
},
.probe = xenon_probe,
.remove = xenon_remove,
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 3a17821efa5c..ac7e11f37af7 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -2367,23 +2367,6 @@ void sdhci_set_ios_common(struct mmc_host *mmc, struct mmc_ios *ios)
(ios->power_mode == MMC_POWER_UP) &&
!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
sdhci_enable_preset_value(host, false);
-
- if (!ios->clock || ios->clock != host->clock) {
- host->ops->set_clock(host, ios->clock);
- host->clock = ios->clock;
-
- if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
- host->clock) {
- host->timeout_clk = mmc->actual_clock ?
- mmc->actual_clock / 1000 :
- host->clock / 1000;
- mmc->max_busy_timeout =
- host->ops->get_max_timeout_count ?
- host->ops->get_max_timeout_count(host) :
- 1 << 27;
- mmc->max_busy_timeout /= host->timeout_clk;
- }
- }
}
EXPORT_SYMBOL_GPL(sdhci_set_ios_common);
@@ -2410,6 +2393,23 @@ void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
sdhci_set_ios_common(mmc, ios);
+ if (!ios->clock || ios->clock != host->clock) {
+ host->ops->set_clock(host, ios->clock);
+ host->clock = ios->clock;
+
+ if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
+ host->clock) {
+ host->timeout_clk = mmc->actual_clock ?
+ mmc->actual_clock / 1000 :
+ host->clock / 1000;
+ mmc->max_busy_timeout =
+ host->ops->get_max_timeout_count ?
+ host->ops->get_max_timeout_count(host) :
+ 1 << 27;
+ mmc->max_busy_timeout /= host->timeout_clk;
+ }
+ }
+
if (host->ops->set_power)
host->ops->set_power(host, ios->power_mode, ios->vdd);
else
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 58fcbeaf281e..b6a571d866fa 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -880,6 +880,13 @@ int sdhci_suspend_host(struct sdhci_host *host);
int sdhci_resume_host(struct sdhci_host *host);
void sdhci_runtime_suspend_host(struct sdhci_host *host);
void sdhci_runtime_resume_host(struct sdhci_host *host, int soft_reset);
+#else
+static inline bool sdhci_enable_irq_wakeups(struct sdhci_host *host) { return false; }
+static inline void sdhci_disable_irq_wakeups(struct sdhci_host *host) {}
+static inline int sdhci_suspend_host(struct sdhci_host *host) { return -EOPNOTSUPP; }
+static inline int sdhci_resume_host(struct sdhci_host *host) { return -EOPNOTSUPP; }
+static inline void sdhci_runtime_suspend_host(struct sdhci_host *host) {}
+static inline void sdhci_runtime_resume_host(struct sdhci_host *host, int soft_reset) {}
#endif
void sdhci_cqe_enable(struct mmc_host *mmc);
diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
index e4fc345be7e5..d235b0aecfdb 100644
--- a/drivers/mmc/host/sdhci_am654.c
+++ b/drivers/mmc/host/sdhci_am654.c
@@ -95,7 +95,6 @@ static const struct regmap_config sdhci_am654_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
- .fast_io = true,
};
struct timing_data {
@@ -156,6 +155,7 @@ struct sdhci_am654_data {
#define SDHCI_AM654_QUIRK_FORCE_CDTEST BIT(0)
#define SDHCI_AM654_QUIRK_SUPPRESS_V1P8_ENA BIT(1)
+#define SDHCI_AM654_QUIRK_DISABLE_HS400 BIT(2)
};
struct window {
@@ -765,6 +765,7 @@ static int sdhci_am654_init(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host);
+ struct device *dev = mmc_dev(host->mmc);
u32 ctl_cfg_2 = 0;
u32 mask;
u32 val;
@@ -820,6 +821,12 @@ static int sdhci_am654_init(struct sdhci_host *host)
if (ret)
goto err_cleanup_host;
+ if (sdhci_am654->quirks & SDHCI_AM654_QUIRK_DISABLE_HS400 &&
+ host->mmc->caps2 & (MMC_CAP2_HS400 | MMC_CAP2_HS400_ES)) {
+ dev_info(dev, "HS400 mode not supported on this silicon revision, disabling it\n");
+ host->mmc->caps2 &= ~(MMC_CAP2_HS400 | MMC_CAP2_HS400_ES);
+ }
+
ret = __sdhci_add_host(host);
if (ret)
goto err_cleanup_host;
@@ -883,6 +890,12 @@ static int sdhci_am654_get_of_property(struct platform_device *pdev,
return 0;
}
+static const struct soc_device_attribute sdhci_am654_descope_hs400[] = {
+ { .family = "AM62PX", .revision = "SR1.0" },
+ { .family = "AM62PX", .revision = "SR1.1" },
+ { /* sentinel */ }
+};
+
static const struct of_device_id sdhci_am654_of_match[] = {
{
.compatible = "ti,am654-sdhci-5.1",
@@ -970,6 +983,10 @@ static int sdhci_am654_probe(struct platform_device *pdev)
if (ret)
return dev_err_probe(dev, ret, "parsing dt failed\n");
+ soc = soc_device_match(sdhci_am654_descope_hs400);
+ if (soc)
+ sdhci_am654->quirks |= SDHCI_AM654_QUIRK_DISABLE_HS400;
+
host->mmc_host_ops.start_signal_voltage_switch = sdhci_am654_start_signal_voltage_switch;
host->mmc_host_ops.execute_tuning = sdhci_am654_execute_tuning;
@@ -1018,7 +1035,6 @@ static void sdhci_am654_remove(struct platform_device *pdev)
pm_runtime_put_noidle(dev);
}
-#ifdef CONFIG_PM
static int sdhci_am654_restore(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -1106,20 +1122,17 @@ static int sdhci_am654_runtime_resume(struct device *dev)
return 0;
}
-#endif
static const struct dev_pm_ops sdhci_am654_dev_pm_ops = {
- SET_RUNTIME_PM_OPS(sdhci_am654_runtime_suspend,
- sdhci_am654_runtime_resume, NULL)
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
+ RUNTIME_PM_OPS(sdhci_am654_runtime_suspend, sdhci_am654_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
};
static struct platform_driver sdhci_am654_driver = {
.driver = {
.name = "sdhci-am654",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
- .pm = &sdhci_am654_dev_pm_ops,
+ .pm = pm_ptr(&sdhci_am654_dev_pm_ops),
.of_match_table = sdhci_am654_of_match,
},
.probe = sdhci_am654_probe,
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 19f84584ecfa..bf899c8e38f5 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -1568,7 +1568,6 @@ static void sh_mmcif_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
}
-#ifdef CONFIG_PM_SLEEP
static int sh_mmcif_suspend(struct device *dev)
{
struct sh_mmcif_host *host = dev_get_drvdata(dev);
@@ -1580,15 +1579,7 @@ static int sh_mmcif_suspend(struct device *dev)
return 0;
}
-static int sh_mmcif_resume(struct device *dev)
-{
- return 0;
-}
-#endif
-
-static const struct dev_pm_ops sh_mmcif_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(sh_mmcif_suspend, sh_mmcif_resume)
-};
+static DEFINE_SIMPLE_DEV_PM_OPS(sh_mmcif_dev_pm_ops, sh_mmcif_suspend, NULL);
static struct platform_driver sh_mmcif_driver = {
.probe = sh_mmcif_probe,
@@ -1596,7 +1587,7 @@ static struct platform_driver sh_mmcif_driver = {
.driver = {
.name = DRIVER_NAME,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
- .pm = &sh_mmcif_dev_pm_ops,
+ .pm = pm_sleep_ptr(&sh_mmcif_dev_pm_ops),
.of_match_table = sh_mmcif_of_match,
},
};
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index ee4a65b0a22d..8dbcff53a631 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -1495,7 +1495,6 @@ static void sunxi_mmc_remove(struct platform_device *pdev)
dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
}
-#ifdef CONFIG_PM
static int sunxi_mmc_runtime_resume(struct device *dev)
{
struct mmc_host *mmc = dev_get_drvdata(dev);
@@ -1530,14 +1529,10 @@ static int sunxi_mmc_runtime_suspend(struct device *dev)
return 0;
}
-#endif
static const struct dev_pm_ops sunxi_mmc_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
- SET_RUNTIME_PM_OPS(sunxi_mmc_runtime_suspend,
- sunxi_mmc_runtime_resume,
- NULL)
+ SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+ RUNTIME_PM_OPS(sunxi_mmc_runtime_suspend, sunxi_mmc_runtime_resume, NULL)
};
static struct platform_driver sunxi_mmc_driver = {
@@ -1545,7 +1540,7 @@ static struct platform_driver sunxi_mmc_driver = {
.name = "sunxi-mmc",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = sunxi_mmc_of_match,
- .pm = &sunxi_mmc_pm_ops,
+ .pm = pm_ptr(&sunxi_mmc_pm_ops),
},
.probe = sunxi_mmc_probe,
.remove = sunxi_mmc_remove,
diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c
index ac636efd911d..2cd69c9e9571 100644
--- a/drivers/mmc/host/tifm_sd.c
+++ b/drivers/mmc/host/tifm_sd.c
@@ -191,7 +191,7 @@ static void tifm_sd_transfer_data(struct tifm_sd *host)
}
off = sg[host->sg_pos].offset + host->block_pos;
- pg = nth_page(sg_page(&sg[host->sg_pos]), off >> PAGE_SHIFT);
+ pg = sg_page(&sg[host->sg_pos]) + (off >> PAGE_SHIFT);
p_off = offset_in_page(off);
p_cnt = PAGE_SIZE - p_off;
p_cnt = min(p_cnt, cnt);
@@ -240,7 +240,7 @@ static void tifm_sd_bounce_block(struct tifm_sd *host, struct mmc_data *r_data)
}
off = sg[host->sg_pos].offset + host->block_pos;
- pg = nth_page(sg_page(&sg[host->sg_pos]), off >> PAGE_SHIFT);
+ pg = sg_page(&sg[host->sg_pos]) + (off >> PAGE_SHIFT);
p_off = offset_in_page(off);
p_cnt = PAGE_SIZE - p_off;
p_cnt = min(p_cnt, cnt);
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index d730b7633ae1..c8cdb1c0722e 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -16,6 +16,7 @@
#include <linux/dmaengine.h>
#include <linux/highmem.h>
+#include <linux/io.h>
#include <linux/mutex.h>
#include <linux/pagemap.h>
#include <linux/scatterlist.h>
@@ -242,6 +243,20 @@ static inline void sd_ctrl_read32_rep(struct tmio_mmc_host *host, int addr,
ioread32_rep(host->ctl + (addr << host->bus_shift), buf, count);
}
+#ifdef CONFIG_64BIT
+static inline void sd_ctrl_read64_rep(struct tmio_mmc_host *host, int addr,
+ u64 *buf, int count)
+{
+ readsq(host->ctl + (addr << host->bus_shift), buf, count);
+}
+
+static inline void sd_ctrl_write64_rep(struct tmio_mmc_host *host, int addr,
+ const u64 *buf, int count)
+{
+ writesq(host->ctl + (addr << host->bus_shift), buf, count);
+}
+#endif
+
static inline void sd_ctrl_write16(struct tmio_mmc_host *host, int addr,
u16 val)
{
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index 21c2f9095bac..775e0d9353d5 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -349,6 +349,39 @@ static void tmio_mmc_transfer_data(struct tmio_mmc_host *host,
/*
* Transfer the data
*/
+#ifdef CONFIG_64BIT
+ if (host->pdata->flags & TMIO_MMC_64BIT_DATA_PORT) {
+ u64 *buf64 = (u64 *)buf;
+ u64 data = 0;
+
+ if (count >= 8) {
+ if (is_read)
+ sd_ctrl_read64_rep(host, CTL_SD_DATA_PORT,
+ buf64, count >> 3);
+ else
+ sd_ctrl_write64_rep(host, CTL_SD_DATA_PORT,
+ buf64, count >> 3);
+ }
+
+ /* if count was multiple of 8 */
+ if (!(count & 0x7))
+ return;
+
+ buf64 += count >> 3;
+ count %= 8;
+
+ if (is_read) {
+ sd_ctrl_read64_rep(host, CTL_SD_DATA_PORT, &data, 1);
+ memcpy(buf64, &data, count);
+ } else {
+ memcpy(&data, buf64, count);
+ sd_ctrl_write64_rep(host, CTL_SD_DATA_PORT, &data, 1);
+ }
+
+ return;
+ }
+#endif
+
if (host->pdata->flags & TMIO_MMC_32BIT_DATA_PORT) {
u32 data = 0;
u32 *buf32 = (u32 *)buf;
diff --git a/drivers/mmc/host/toshsd.c b/drivers/mmc/host/toshsd.c
index e5f7f8abafc0..aa5d2511a62b 100644
--- a/drivers/mmc/host/toshsd.c
+++ b/drivers/mmc/host/toshsd.c
@@ -567,7 +567,6 @@ static void toshsd_powerdown(struct toshsd_host *host)
pci_write_config_byte(host->pdev, SD_PCICFG_CLKSTOP, 0);
}
-#ifdef CONFIG_PM_SLEEP
static int toshsd_pm_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
@@ -599,7 +598,6 @@ static int toshsd_pm_resume(struct device *dev)
return 0;
}
-#endif /* CONFIG_PM_SLEEP */
static int toshsd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
@@ -688,16 +686,14 @@ static void toshsd_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-static const struct dev_pm_ops toshsd_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(toshsd_pm_suspend, toshsd_pm_resume)
-};
+static DEFINE_SIMPLE_DEV_PM_OPS(toshsd_pm_ops, toshsd_pm_suspend, toshsd_pm_resume);
static struct pci_driver toshsd_driver = {
.name = DRIVER_NAME,
.id_table = pci_ids,
.probe = toshsd_probe,
.remove = toshsd_remove,
- .driver.pm = &toshsd_pm_ops,
+ .driver.pm = pm_sleep_ptr(&toshsd_pm_ops),
};
module_pci_driver(toshsd_driver);
diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c
index 85b49c07918b..3bccf800339b 100644
--- a/drivers/mmc/host/usdhi6rol0.c
+++ b/drivers/mmc/host/usdhi6rol0.c
@@ -323,7 +323,7 @@ static void usdhi6_blk_bounce(struct usdhi6_host *host,
host->head_pg.page = host->pg.page;
host->head_pg.mapped = host->pg.mapped;
- host->pg.page = nth_page(host->pg.page, 1);
+ host->pg.page = host->pg.page + 1;
host->pg.mapped = kmap(host->pg.page);
host->blk_page = host->bounce_buf;
@@ -503,7 +503,7 @@ static void usdhi6_sg_advance(struct usdhi6_host *host)
/* We cannot get here after crossing a page border */
/* Next page in the same SG */
- host->pg.page = nth_page(sg_page(host->sg), host->page_idx);
+ host->pg.page = sg_page(host->sg) + host->page_idx;
host->pg.mapped = kmap(host->pg.page);
host->blk_page = host->pg.mapped;
diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c
index 3bd49f64899d..c628b3bbfd7a 100644
--- a/drivers/mmc/host/via-sdmmc.c
+++ b/drivers/mmc/host/via-sdmmc.c
@@ -1218,7 +1218,7 @@ static void via_sd_remove(struct pci_dev *pcidev)
pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device);
}
-static void __maybe_unused via_init_sdc_pm(struct via_crdr_mmc_host *host)
+static void via_init_sdc_pm(struct via_crdr_mmc_host *host)
{
struct sdhcreg *pm_sdhcreg;
void __iomem *addrbase;
@@ -1252,7 +1252,7 @@ static void __maybe_unused via_init_sdc_pm(struct via_crdr_mmc_host *host)
via_print_sdchc(host);
}
-static int __maybe_unused via_sd_suspend(struct device *dev)
+static int via_sd_suspend(struct device *dev)
{
struct via_crdr_mmc_host *host;
unsigned long flags;
@@ -1269,7 +1269,7 @@ static int __maybe_unused via_sd_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused via_sd_resume(struct device *dev)
+static int via_sd_resume(struct device *dev)
{
struct via_crdr_mmc_host *sdhost;
u8 gatt;
@@ -1295,14 +1295,14 @@ static int __maybe_unused via_sd_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(via_sd_pm_ops, via_sd_suspend, via_sd_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(via_sd_pm_ops, via_sd_suspend, via_sd_resume);
static struct pci_driver via_sd_driver = {
.name = DRV_NAME,
.id_table = via_ids,
.probe = via_sd_probe,
.remove = via_sd_remove,
- .driver.pm = &via_sd_pm_ops,
+ .driver.pm = pm_sleep_ptr(&via_sd_pm_ops),
};
module_pci_driver(via_sd_driver);
diff --git a/drivers/mmc/host/wmt-sdmmc.c b/drivers/mmc/host/wmt-sdmmc.c
index 0d2929cfe397..1b1d691e19fc 100644
--- a/drivers/mmc/host/wmt-sdmmc.c
+++ b/drivers/mmc/host/wmt-sdmmc.c
@@ -911,7 +911,6 @@ static void wmt_mci_remove(struct platform_device *pdev)
dev_info(&pdev->dev, "WMT MCI device removed\n");
}
-#ifdef CONFIG_PM
static int wmt_mci_suspend(struct device *dev)
{
u32 reg_tmp;
@@ -963,18 +962,7 @@ static int wmt_mci_resume(struct device *dev)
return 0;
}
-static const struct dev_pm_ops wmt_mci_pm = {
- .suspend = wmt_mci_suspend,
- .resume = wmt_mci_resume,
-};
-
-#define wmt_mci_pm_ops (&wmt_mci_pm)
-
-#else /* !CONFIG_PM */
-
-#define wmt_mci_pm_ops NULL
-
-#endif
+static DEFINE_SIMPLE_DEV_PM_OPS(wmt_mci_pm_ops, wmt_mci_suspend, wmt_mci_resume);
static struct platform_driver wmt_mci_driver = {
.probe = wmt_mci_probe,
@@ -982,7 +970,7 @@ static struct platform_driver wmt_mci_driver = {
.driver = {
.name = DRIVER_NAME,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
- .pm = wmt_mci_pm_ops,
+ .pm = pm_sleep_ptr(&wmt_mci_pm_ops),
.of_match_table = wmt_mci_dt_ids,
},
};
diff --git a/drivers/most/core.c b/drivers/most/core.c
index a635d5082ebb..da319d108ea1 100644
--- a/drivers/most/core.c
+++ b/drivers/most/core.c
@@ -538,8 +538,8 @@ static struct most_channel *get_channel(char *mdev, char *mdev_ch)
dev = bus_find_device_by_name(&mostbus, NULL, mdev);
if (!dev)
return NULL;
- put_device(dev);
iface = dev_get_drvdata(dev);
+ put_device(dev);
list_for_each_entry_safe(c, tmp, &iface->p->channel_list, list) {
if (!strcmp(dev_name(&c->dev), mdev_ch))
return c;
diff --git a/drivers/mtd/chips/cfi_probe.c b/drivers/mtd/chips/cfi_probe.c
index a04b6174181c..e254f9cd2796 100644
--- a/drivers/mtd/chips/cfi_probe.c
+++ b/drivers/mtd/chips/cfi_probe.c
@@ -208,7 +208,7 @@ static int __xipram cfi_chip_setup(struct map_info *map,
if (!num_erase_regions)
return 0;
- cfi->cfiq = kmalloc(sizeof(struct cfi_ident) + num_erase_regions * 4, GFP_KERNEL);
+ cfi->cfiq = kmalloc(struct_size(cfi->cfiq, EraseRegionInfo, num_erase_regions), GFP_KERNEL);
if (!cfi->cfiq)
return 0;
diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c
index 23c32fe584b7..b285962eee2a 100644
--- a/drivers/mtd/chips/jedec_probe.c
+++ b/drivers/mtd/chips/jedec_probe.c
@@ -1953,7 +1953,7 @@ static void jedec_reset(u32 base, struct map_info *map, struct cfi_private *cfi)
* as they will ignore the writes and don't care what address
* the F0 is written to */
if (cfi->addr_unlock1) {
- pr_debug( "reset unlock called %x %x \n",
+ pr_debug("reset unlock called %x %x\n",
cfi->addr_unlock1,cfi->addr_unlock2);
cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, cfi->device_type, NULL);
@@ -1985,7 +1985,7 @@ static int cfi_jedec_setup(struct map_info *map, struct cfi_private *cfi, int in
num_erase_regions = jedec_table[index].nr_regions;
- cfi->cfiq = kmalloc(sizeof(struct cfi_ident) + num_erase_regions * 4, GFP_KERNEL);
+ cfi->cfiq = kmalloc(struct_size(cfi->cfiq, EraseRegionInfo, num_erase_regions), GFP_KERNEL);
if (!cfi->cfiq) {
//xx printk(KERN_WARNING "%s: kmalloc failed for CFI ident structure\n", map->name);
return 0;
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index 46cebde79f34..e518dfeee654 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -185,8 +185,8 @@ config MTD_POWERNV_FLASH
config MTD_INTEL_DG
tristate "Intel Discrete Graphics non-volatile memory driver"
- depends on AUXILIARY_BUS
- depends on MTD
+ depends on AUXILIARY_BUS && MTD
+ depends on DRM_I915!=n || DRM_XE!=n || COMPILE_TEST
help
This provides an MTD device to access Intel Discrete Graphics
non-volatile memory.
diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
index f2bd1984609c..59a901549257 100644
--- a/drivers/mtd/ftl.c
+++ b/drivers/mtd/ftl.c
@@ -263,7 +263,7 @@ static int build_maps(partition_t *part)
/* Set up virtual page map */
blocks = le32_to_cpu(header.FormattedSize) >> header.BlockSize;
- part->VirtualBlockMap = vmalloc(array_size(blocks, sizeof(uint32_t)));
+ part->VirtualBlockMap = vmalloc_array(blocks, sizeof(uint32_t));
if (!part->VirtualBlockMap)
goto out_XferInfo;
diff --git a/drivers/mtd/hyperbus/hbmc-am654.c b/drivers/mtd/hyperbus/hbmc-am654.c
index 82a1e7b7e4d8..9d31464046b2 100644
--- a/drivers/mtd/hyperbus/hbmc-am654.c
+++ b/drivers/mtd/hyperbus/hbmc-am654.c
@@ -272,5 +272,4 @@ module_platform_driver(am654_hbmc_platform_driver);
MODULE_DESCRIPTION("HBMC driver for AM654 SoC");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:hbmc-am654");
MODULE_AUTHOR("Vignesh Raghavendra <vigneshr@ti.com>");
diff --git a/drivers/mtd/lpddr/lpddr_cmds.c b/drivers/mtd/lpddr/lpddr_cmds.c
index 14e36ae71958..290fd0119e98 100644
--- a/drivers/mtd/lpddr/lpddr_cmds.c
+++ b/drivers/mtd/lpddr/lpddr_cmds.c
@@ -142,7 +142,7 @@ static int wait_for_ready(struct map_info *map, struct flchip *chip,
if (dsr & DSR_READY_STATUS)
break;
if (!timeo) {
- printk(KERN_ERR "%s: Flash timeout error state %d \n",
+ printk(KERN_ERR "%s: Flash timeout error state %d\n",
map->name, chip_state);
ret = -ETIME;
break;
@@ -186,7 +186,7 @@ static int wait_for_ready(struct map_info *map, struct flchip *chip,
if (dsr & DSR_ERR) {
/* Clear DSR*/
map_write(map, CMD(~(DSR_ERR)), map->pfow_base + PFOW_DSR);
- printk(KERN_WARNING"%s: Bad status on wait: 0x%x \n",
+ printk(KERN_WARNING"%s: Bad status on wait: 0x%x\n",
map->name, dsr);
print_drs_error(dsr);
ret = -EIO;
@@ -321,7 +321,7 @@ static int chip_ready(struct map_info *map, struct flchip *chip, int mode)
/* Resume and pretend we weren't here. */
put_chip(map, chip);
printk(KERN_ERR "%s: suspend operation failed."
- "State may be wrong \n", map->name);
+ "State may be wrong\n", map->name);
return -EIO;
}
chip->erase_suspended = 1;
@@ -468,7 +468,7 @@ static int do_write_buffer(struct map_info *map, struct flchip *chip,
chip->state = FL_WRITING;
ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->ProgBufferTime));
if (ret) {
- printk(KERN_WARNING"%s Buffer program error: %d at %lx; \n",
+ printk(KERN_WARNING"%s Buffer program error: %d at %lx\n",
map->name, ret, adr);
goto out;
}
@@ -736,7 +736,7 @@ static int do_xxlock(struct mtd_info *mtd, loff_t adr, uint32_t len, int thunk)
ret = wait_for_ready(map, chip, 1);
if (ret) {
- printk(KERN_ERR "%s: block unlock error status %d \n",
+ printk(KERN_ERR "%s: block unlock error status %d\n",
map->name, ret);
goto out;
}
diff --git a/drivers/mtd/lpddr/qinfo_probe.c b/drivers/mtd/lpddr/qinfo_probe.c
index 137ae5f0a19b..42281e460c62 100644
--- a/drivers/mtd/lpddr/qinfo_probe.c
+++ b/drivers/mtd/lpddr/qinfo_probe.c
@@ -55,7 +55,7 @@ static long lpddr_get_qinforec_pos(struct map_info *map, char *id_str)
return minor | (major << bankwidth);
}
}
- printk(KERN_ERR"%s qinfo id string is wrong! \n", map->name);
+ printk(KERN_ERR"%s qinfo id string is wrong!\n", map->name);
BUG();
return -1;
}
@@ -112,7 +112,7 @@ static int lpddr_pfow_present(struct map_info *map, struct lpddr_private *lpddr)
return 1; /* "PFOW" is found */
out:
- printk(KERN_WARNING"%s: PFOW string at 0x%lx is not found \n",
+ printk(KERN_WARNING"%s: PFOW string at 0x%lx is not found\n",
map->name, map->pfow_base);
return 0;
}
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 847c11542f02..28e09d080440 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -246,9 +246,9 @@ unlock:
blktrans_dev_put(dev);
}
-static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+static int blktrans_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
- struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data;
+ struct mtd_blktrans_dev *dev = disk->private_data;
int ret = -ENXIO;
mutex_lock(&dev->lock);
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 5ba9a741f5ac..64808493b4f5 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -384,14 +384,64 @@ EXPORT_SYMBOL_GPL(mtd_check_expert_analysis_mode);
static struct dentry *dfs_dir_mtd;
+static int mtd_ooblayout_show(struct seq_file *s, void *p,
+ int (*iter)(struct mtd_info *, int section,
+ struct mtd_oob_region *region))
+{
+ struct mtd_info *mtd = s->private;
+ int section;
+
+ for (section = 0;; section++) {
+ struct mtd_oob_region region;
+ int err;
+
+ err = iter(mtd, section, &region);
+ if (err) {
+ if (err == -ERANGE)
+ break;
+
+ return err;
+ }
+
+ seq_printf(s, "%-3d %4u %4u\n", section, region.offset,
+ region.length);
+ }
+
+ return 0;
+}
+
+static int mtd_ooblayout_ecc_show(struct seq_file *s, void *p)
+{
+ return mtd_ooblayout_show(s, p, mtd_ooblayout_ecc);
+}
+DEFINE_SHOW_ATTRIBUTE(mtd_ooblayout_ecc);
+
+static int mtd_ooblayout_free_show(struct seq_file *s, void *p)
+{
+ return mtd_ooblayout_show(s, p, mtd_ooblayout_free);
+}
+DEFINE_SHOW_ATTRIBUTE(mtd_ooblayout_free);
+
static void mtd_debugfs_populate(struct mtd_info *mtd)
{
struct device *dev = &mtd->dev;
+ struct mtd_oob_region region;
if (IS_ERR_OR_NULL(dfs_dir_mtd))
return;
mtd->dbg.dfs_dir = debugfs_create_dir(dev_name(dev), dfs_dir_mtd);
+ if (IS_ERR_OR_NULL(mtd->dbg.dfs_dir))
+ return;
+
+ /* Create ooblayout files only if at least one region is present. */
+ if (mtd_ooblayout_ecc(mtd, 0, &region) == 0)
+ debugfs_create_file("ooblayout_ecc", 0444, mtd->dbg.dfs_dir,
+ mtd, &mtd_ooblayout_ecc_fops);
+
+ if (mtd_ooblayout_free(mtd, 0, &region) == 0)
+ debugfs_create_file("ooblayout_free", 0444, mtd->dbg.dfs_dir,
+ mtd, &mtd_ooblayout_free_fops);
}
#ifndef CONFIG_MMU
@@ -2339,6 +2389,7 @@ EXPORT_SYMBOL_GPL(mtd_block_isbad);
int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
{
struct mtd_info *master = mtd_get_master(mtd);
+ loff_t moffs;
int ret;
if (!master->_block_markbad)
@@ -2351,7 +2402,15 @@ int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
- ret = master->_block_markbad(master, mtd_get_master_ofs(mtd, ofs));
+ moffs = mtd_get_master_ofs(mtd, ofs);
+
+ if (master->_block_isbad) {
+ ret = master->_block_isbad(master, moffs);
+ if (ret > 0)
+ return 0;
+ }
+
+ ret = master->_block_markbad(master, moffs);
if (ret)
return ret;
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index 7bf3777e1f13..b88083751a0c 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -356,9 +356,8 @@ static void mtdoops_notify_add(struct mtd_info *mtd)
/* oops_page_used is a bit field */
cxt->oops_page_used =
- vmalloc(array_size(sizeof(unsigned long),
- DIV_ROUND_UP(mtdoops_pages,
- BITS_PER_LONG)));
+ vmalloc_array(DIV_ROUND_UP(mtdoops_pages, BITS_PER_LONG),
+ sizeof(unsigned long));
if (!cxt->oops_page_used) {
pr_err("could not allocate page array\n");
return;
diff --git a/drivers/mtd/mtdswap.c b/drivers/mtd/mtdswap.c
index 680366616da2..d8f2e5be2d31 100644
--- a/drivers/mtd/mtdswap.c
+++ b/drivers/mtd/mtdswap.c
@@ -1285,11 +1285,11 @@ static int mtdswap_init(struct mtdswap_dev *d, unsigned int eblocks,
for (i = 0; i < MTDSWAP_TREE_CNT; i++)
d->trees[i].root = RB_ROOT;
- d->page_data = vmalloc(array_size(pages, sizeof(int)));
+ d->page_data = vmalloc_array(pages, sizeof(int));
if (!d->page_data)
goto page_data_fail;
- d->revmap = vmalloc(array_size(blocks, sizeof(int)));
+ d->revmap = vmalloc_array(blocks, sizeof(int));
if (!d->revmap)
goto revmap_fail;
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 5b0c2c95f10c..4a17271076bc 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -61,6 +61,14 @@ config MTD_NAND_ECC_MEDIATEK
help
This enables support for the hardware ECC engine from Mediatek.
+config MTD_NAND_ECC_REALTEK
+ tristate "Realtek RTL93xx hardware ECC engine"
+ depends on HAS_IOMEM
+ depends on MACH_REALTEK_RTL || COMPILE_TEST
+ select MTD_NAND_ECC
+ help
+ This enables support for the hardware ECC engine from Realtek.
+
endmenu
endmenu
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 44913ff1bf12..2e0e56267718 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -3,6 +3,7 @@
nandcore-objs := core.o bbt.o
obj-$(CONFIG_MTD_NAND_CORE) += nandcore.o
obj-$(CONFIG_MTD_NAND_ECC_MEDIATEK) += ecc-mtk.o
+obj-$(CONFIG_MTD_NAND_ECC_REALTEK) += ecc-realtek.o
obj-$(CONFIG_SPI_QPIC_SNAND) += qpic_common.o
obj-$(CONFIG_MTD_NAND_QCOM) += qpic_common.o
obj-y += onenand/
diff --git a/drivers/mtd/nand/core.c b/drivers/mtd/nand/core.c
index 7737b1a4a177..3e76d127715f 100644
--- a/drivers/mtd/nand/core.c
+++ b/drivers/mtd/nand/core.c
@@ -13,6 +13,137 @@
#include <linux/mtd/nand.h>
/**
+ * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data
+ * @buf: buffer to test
+ * @len: buffer length
+ * @bitflips_threshold: maximum number of bitflips
+ *
+ * Check if a buffer contains only 0xff, which means the underlying region
+ * has been erased and is ready to be programmed.
+ * The bitflips_threshold specify the maximum number of bitflips before
+ * considering the region is not erased.
+ * Note: The logic of this function has been extracted from the memweight
+ * implementation, except that nand_check_erased_buf function exit before
+ * testing the whole buffer if the number of bitflips exceed the
+ * bitflips_threshold value.
+ *
+ * Returns a positive number of bitflips less than or equal to
+ * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
+ * threshold.
+ */
+static int nand_check_erased_buf(void *buf, int len, int bitflips_threshold)
+{
+ const unsigned char *bitmap = buf;
+ int bitflips = 0;
+ int weight;
+
+ for (; len && ((uintptr_t)bitmap) % sizeof(long);
+ len--, bitmap++) {
+ weight = hweight8(*bitmap);
+ bitflips += BITS_PER_BYTE - weight;
+ if (unlikely(bitflips > bitflips_threshold))
+ return -EBADMSG;
+ }
+
+ for (; len >= sizeof(long);
+ len -= sizeof(long), bitmap += sizeof(long)) {
+ unsigned long d = *((unsigned long *)bitmap);
+ if (d == ~0UL)
+ continue;
+ weight = hweight_long(d);
+ bitflips += BITS_PER_LONG - weight;
+ if (unlikely(bitflips > bitflips_threshold))
+ return -EBADMSG;
+ }
+
+ for (; len > 0; len--, bitmap++) {
+ weight = hweight8(*bitmap);
+ bitflips += BITS_PER_BYTE - weight;
+ if (unlikely(bitflips > bitflips_threshold))
+ return -EBADMSG;
+ }
+
+ return bitflips;
+}
+
+/**
+ * nand_check_erased_ecc_chunk - check if an ECC chunk contains (almost) only
+ * 0xff data
+ * @data: data buffer to test
+ * @datalen: data length
+ * @ecc: ECC buffer
+ * @ecclen: ECC length
+ * @extraoob: extra OOB buffer
+ * @extraooblen: extra OOB length
+ * @bitflips_threshold: maximum number of bitflips
+ *
+ * Check if a data buffer and its associated ECC and OOB data contains only
+ * 0xff pattern, which means the underlying region has been erased and is
+ * ready to be programmed.
+ * The bitflips_threshold specify the maximum number of bitflips before
+ * considering the region as not erased.
+ *
+ * Note:
+ * 1/ ECC algorithms are working on pre-defined block sizes which are usually
+ * different from the NAND page size. When fixing bitflips, ECC engines will
+ * report the number of errors per chunk, and the NAND core infrastructure
+ * expect you to return the maximum number of bitflips for the whole page.
+ * This is why you should always use this function on a single chunk and
+ * not on the whole page. After checking each chunk you should update your
+ * max_bitflips value accordingly.
+ * 2/ When checking for bitflips in erased pages you should not only check
+ * the payload data but also their associated ECC data, because a user might
+ * have programmed almost all bits to 1 but a few. In this case, we
+ * shouldn't consider the chunk as erased, and checking ECC bytes prevent
+ * this case.
+ * 3/ The extraoob argument is optional, and should be used if some of your OOB
+ * data are protected by the ECC engine.
+ * It could also be used if you support subpages and want to attach some
+ * extra OOB data to an ECC chunk.
+ *
+ * Returns a positive number of bitflips less than or equal to
+ * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
+ * threshold. In case of success, the passed buffers are filled with 0xff.
+ */
+int nand_check_erased_ecc_chunk(void *data, int datalen,
+ void *ecc, int ecclen,
+ void *extraoob, int extraooblen,
+ int bitflips_threshold)
+{
+ int data_bitflips = 0, ecc_bitflips = 0, extraoob_bitflips = 0;
+
+ data_bitflips = nand_check_erased_buf(data, datalen,
+ bitflips_threshold);
+ if (data_bitflips < 0)
+ return data_bitflips;
+
+ bitflips_threshold -= data_bitflips;
+
+ ecc_bitflips = nand_check_erased_buf(ecc, ecclen, bitflips_threshold);
+ if (ecc_bitflips < 0)
+ return ecc_bitflips;
+
+ bitflips_threshold -= ecc_bitflips;
+
+ extraoob_bitflips = nand_check_erased_buf(extraoob, extraooblen,
+ bitflips_threshold);
+ if (extraoob_bitflips < 0)
+ return extraoob_bitflips;
+
+ if (data_bitflips)
+ memset(data, 0xff, datalen);
+
+ if (ecc_bitflips)
+ memset(ecc, 0xff, ecclen);
+
+ if (extraoob_bitflips)
+ memset(extraoob, 0xff, extraooblen);
+
+ return data_bitflips + ecc_bitflips + extraoob_bitflips;
+}
+EXPORT_SYMBOL(nand_check_erased_ecc_chunk);
+
+/**
* nanddev_isbad() - Check if a block is bad
* @nand: NAND device
* @pos: position pointing to the block we want to check
diff --git a/drivers/mtd/nand/ecc-mxic.c b/drivers/mtd/nand/ecc-mxic.c
index 1bf9a5a64b87..60cdcb4175ef 100644
--- a/drivers/mtd/nand/ecc-mxic.c
+++ b/drivers/mtd/nand/ecc-mxic.c
@@ -322,14 +322,14 @@ static int mxic_ecc_init_ctx(struct nand_device *nand, struct device *dev)
sg_init_table(ctx->sg, 2);
/* Configuration dump and sanity checks */
- dev_err(dev, "DPE version number: %d\n",
+ dev_dbg(dev, "DPE version number: %d\n",
readl(mxic->regs + DP_VER) >> DP_VER_OFFSET);
- dev_err(dev, "Chunk size: %d\n", readl(mxic->regs + CHUNK_SIZE));
- dev_err(dev, "Main size: %d\n", readl(mxic->regs + MAIN_SIZE));
- dev_err(dev, "Spare size: %d\n", SPARE_SZ(spare_reg));
- dev_err(dev, "Rsv size: %ld\n", RSV_SZ(spare_reg));
- dev_err(dev, "Parity size: %d\n", ctx->parity_sz);
- dev_err(dev, "Meta size: %d\n", ctx->meta_sz);
+ dev_dbg(dev, "Chunk size: %d\n", readl(mxic->regs + CHUNK_SIZE));
+ dev_dbg(dev, "Main size: %d\n", readl(mxic->regs + MAIN_SIZE));
+ dev_dbg(dev, "Spare size: %d\n", SPARE_SZ(spare_reg));
+ dev_dbg(dev, "Rsv size: %ld\n", RSV_SZ(spare_reg));
+ dev_dbg(dev, "Parity size: %d\n", ctx->parity_sz);
+ dev_dbg(dev, "Meta size: %d\n", ctx->meta_sz);
if ((ctx->meta_sz + ctx->parity_sz + RSV_SZ(spare_reg)) !=
SPARE_SZ(spare_reg)) {
diff --git a/drivers/mtd/nand/ecc-realtek.c b/drivers/mtd/nand/ecc-realtek.c
new file mode 100644
index 000000000000..7d718934c909
--- /dev/null
+++ b/drivers/mtd/nand/ecc-realtek.c
@@ -0,0 +1,464 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Realtek hardware ECC engine in RTL93xx SoCs
+ */
+
+#include <linux/bitfield.h>
+#include <linux/dma-mapping.h>
+#include <linux/mtd/nand.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+/*
+ * The Realtek ECC engine has two operation modes.
+ *
+ * - BCH6 : Generate 10 ECC bytes from 512 data bytes plus 6 free bytes
+ * - BCH12 : Generate 20 ECC bytes from 512 data bytes plus 6 free bytes
+ *
+ * It can run for arbitrary NAND flash chips with different block and OOB sizes. Currently there
+ * are only two known devices in the wild that have NAND flash and make use of this ECC engine
+ * (Linksys LGS328C & LGS352C). To keep compatibility with vendor firmware, new modes can only
+ * be added when new data layouts have been analyzed. For now allow BCH6 on flash with 2048 byte
+ * blocks and 64 bytes oob.
+ *
+ * This driver aligns with kernel ECC naming conventions. Neverthless a short notice on the
+ * Realtek naming conventions for the different structures in the OOB area.
+ *
+ * - BBI : Bad block indicator. The first two bytes of OOB. Protected by ECC!
+ * - tag : 6 User/free bytes. First tag "contains" 2 bytes BBI. Protected by ECC!
+ * - syndrome : ECC/parity bytes
+ *
+ * Altogether this gives currently the following block layout.
+ *
+ * +------+------+------+------+-----+------+------+------+------+-----+-----+-----+-----+
+ * | 512 | 512 | 512 | 512 | 2 | 4 | 6 | 6 | 6 | 10 | 10 | 10 | 10 |
+ * +------+------+------+------+-----+------+------+------+------+-----+-----+-----+-----+
+ * | data | data | data | data | BBI | free | free | free | free | ECC | ECC | ECC | ECC |
+ * +------+------+------+------+-----+------+------+------+------+-----+-----+-----+-----+
+ */
+
+#define RTL_ECC_ALLOWED_PAGE_SIZE 2048
+#define RTL_ECC_ALLOWED_OOB_SIZE 64
+#define RTL_ECC_ALLOWED_STRENGTH 6
+
+#define RTL_ECC_BLOCK_SIZE 512
+#define RTL_ECC_FREE_SIZE 6
+#define RTL_ECC_PARITY_SIZE_BCH6 10
+#define RTL_ECC_PARITY_SIZE_BCH12 20
+
+/*
+ * The engine is fed with two DMA regions. One for data (always 512 bytes) and one for free bytes
+ * and parity (either 16 bytes for BCH6 or 26 bytes for BCH12). Start and length of each must be
+ * aligned to a multiple of 4.
+ */
+
+#define RTL_ECC_DMA_FREE_PARITY_SIZE ALIGN(RTL_ECC_FREE_SIZE + RTL_ECC_PARITY_SIZE_BCH12, 4)
+#define RTL_ECC_DMA_SIZE (RTL_ECC_BLOCK_SIZE + RTL_ECC_DMA_FREE_PARITY_SIZE)
+
+#define RTL_ECC_CFG 0x00
+#define RTL_ECC_BCH6 0
+#define RTL_ECC_BCH12 BIT(28)
+#define RTL_ECC_DMA_PRECISE BIT(12)
+#define RTL_ECC_BURST_128 GENMASK(1, 0)
+#define RTL_ECC_DMA_TRIGGER 0x08
+#define RTL_ECC_OP_DECODE 0
+#define RTL_ECC_OP_ENCODE BIT(0)
+#define RTL_ECC_DMA_START 0x0c
+#define RTL_ECC_DMA_TAG 0x10
+#define RTL_ECC_STATUS 0x14
+#define RTL_ECC_CORR_COUNT GENMASK(19, 12)
+#define RTL_ECC_RESULT BIT(8)
+#define RTL_ECC_ALL_ONE BIT(4)
+#define RTL_ECC_OP_STATUS BIT(0)
+
+struct rtl_ecc_engine {
+ struct device *dev;
+ struct nand_ecc_engine engine;
+ struct mutex lock;
+ char *buf;
+ dma_addr_t buf_dma;
+ struct regmap *regmap;
+};
+
+struct rtl_ecc_ctx {
+ struct rtl_ecc_engine * rtlc;
+ struct nand_ecc_req_tweak_ctx req_ctx;
+ int steps;
+ int bch_mode;
+ int strength;
+ int parity_size;
+};
+
+static const struct regmap_config rtl_ecc_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
+static inline void *nand_to_ctx(struct nand_device *nand)
+{
+ return nand->ecc.ctx.priv;
+}
+
+static inline struct rtl_ecc_engine *nand_to_rtlc(struct nand_device *nand)
+{
+ struct nand_ecc_engine *eng = nand->ecc.engine;
+
+ return container_of(eng, struct rtl_ecc_engine, engine);
+}
+
+static int rtl_ecc_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ struct rtl_ecc_ctx *ctx = nand_to_ctx(nand);
+
+ if (section < 0 || section >= ctx->steps)
+ return -ERANGE;
+
+ oobregion->offset = ctx->steps * RTL_ECC_FREE_SIZE + section * ctx->parity_size;
+ oobregion->length = ctx->parity_size;
+
+ return 0;
+}
+
+static int rtl_ecc_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ struct rtl_ecc_ctx *ctx = nand_to_ctx(nand);
+ int bbm;
+
+ if (section < 0 || section >= ctx->steps)
+ return -ERANGE;
+
+ /* reserve 2 BBM bytes in first block */
+ bbm = section ? 0 : 2;
+ oobregion->offset = section * RTL_ECC_FREE_SIZE + bbm;
+ oobregion->length = RTL_ECC_FREE_SIZE - bbm;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops rtl_ecc_ooblayout_ops = {
+ .ecc = rtl_ecc_ooblayout_ecc,
+ .free = rtl_ecc_ooblayout_free,
+};
+
+static void rtl_ecc_kick_engine(struct rtl_ecc_ctx *ctx, int operation)
+{
+ struct rtl_ecc_engine *rtlc = ctx->rtlc;
+
+ regmap_write(rtlc->regmap, RTL_ECC_CFG,
+ ctx->bch_mode | RTL_ECC_BURST_128 | RTL_ECC_DMA_PRECISE);
+
+ regmap_write(rtlc->regmap, RTL_ECC_DMA_START, rtlc->buf_dma);
+ regmap_write(rtlc->regmap, RTL_ECC_DMA_TAG, rtlc->buf_dma + RTL_ECC_BLOCK_SIZE);
+ regmap_write(rtlc->regmap, RTL_ECC_DMA_TRIGGER, operation);
+}
+
+static int rtl_ecc_wait_for_engine(struct rtl_ecc_ctx *ctx)
+{
+ struct rtl_ecc_engine *rtlc = ctx->rtlc;
+ int ret, status, bitflips;
+ bool all_one;
+
+ /*
+ * The ECC engine needs 6-8 us to encode/decode a BCH6 syndrome for 512 bytes of data
+ * and 6 free bytes. In case the NAND area has been erased and all data and oob is
+ * set to 0xff, decoding takes 30us (reason unknown). Although the engine can trigger
+ * interrupts when finished, use active polling for now. 12 us maximum wait time has
+ * proven to be a good tradeoff between performance and overhead.
+ */
+
+ ret = regmap_read_poll_timeout(rtlc->regmap, RTL_ECC_STATUS, status,
+ !(status & RTL_ECC_OP_STATUS), 12, 1000000);
+ if (ret)
+ return ret;
+
+ ret = FIELD_GET(RTL_ECC_RESULT, status);
+ all_one = FIELD_GET(RTL_ECC_ALL_ONE, status);
+ bitflips = FIELD_GET(RTL_ECC_CORR_COUNT, status);
+
+ /* For erased blocks (all bits one) error status can be ignored */
+ if (all_one)
+ ret = 0;
+
+ return ret ? -EBADMSG : bitflips;
+}
+
+static int rtl_ecc_run_engine(struct rtl_ecc_ctx *ctx, char *data, char *free,
+ char *parity, int operation)
+{
+ struct rtl_ecc_engine *rtlc = ctx->rtlc;
+ char *buf_parity = rtlc->buf + RTL_ECC_BLOCK_SIZE + RTL_ECC_FREE_SIZE;
+ char *buf_free = rtlc->buf + RTL_ECC_BLOCK_SIZE;
+ char *buf_data = rtlc->buf;
+ int ret;
+
+ mutex_lock(&rtlc->lock);
+
+ memcpy(buf_data, data, RTL_ECC_BLOCK_SIZE);
+ memcpy(buf_free, free, RTL_ECC_FREE_SIZE);
+ memcpy(buf_parity, parity, ctx->parity_size);
+
+ dma_sync_single_for_device(rtlc->dev, rtlc->buf_dma, RTL_ECC_DMA_SIZE, DMA_TO_DEVICE);
+ rtl_ecc_kick_engine(ctx, operation);
+ ret = rtl_ecc_wait_for_engine(ctx);
+ dma_sync_single_for_cpu(rtlc->dev, rtlc->buf_dma, RTL_ECC_DMA_SIZE, DMA_FROM_DEVICE);
+
+ if (ret >= 0) {
+ memcpy(data, buf_data, RTL_ECC_BLOCK_SIZE);
+ memcpy(free, buf_free, RTL_ECC_FREE_SIZE);
+ memcpy(parity, buf_parity, ctx->parity_size);
+ }
+
+ mutex_unlock(&rtlc->lock);
+
+ return ret;
+}
+
+static int rtl_ecc_prepare_io_req(struct nand_device *nand, struct nand_page_io_req *req)
+{
+ struct rtl_ecc_engine *rtlc = nand_to_rtlc(nand);
+ struct rtl_ecc_ctx *ctx = nand_to_ctx(nand);
+ char *data, *free, *parity;
+ int ret = 0;
+
+ if (req->mode == MTD_OPS_RAW)
+ return 0;
+
+ nand_ecc_tweak_req(&ctx->req_ctx, req);
+
+ if (req->type == NAND_PAGE_READ)
+ return 0;
+
+ free = req->oobbuf.in;
+ data = req->databuf.in;
+ parity = req->oobbuf.in + ctx->steps * RTL_ECC_FREE_SIZE;
+
+ for (int i = 0; i < ctx->steps; i++) {
+ ret |= rtl_ecc_run_engine(ctx, data, free, parity, RTL_ECC_OP_ENCODE);
+
+ free += RTL_ECC_FREE_SIZE;
+ data += RTL_ECC_BLOCK_SIZE;
+ parity += ctx->parity_size;
+ }
+
+ if (unlikely(ret))
+ dev_dbg(rtlc->dev, "ECC calculation failed\n");
+
+ return ret ? -EBADMSG : 0;
+}
+
+static int rtl_ecc_finish_io_req(struct nand_device *nand, struct nand_page_io_req *req)
+{
+ struct rtl_ecc_engine *rtlc = nand_to_rtlc(nand);
+ struct rtl_ecc_ctx *ctx = nand_to_ctx(nand);
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+ char *data, *free, *parity;
+ bool failure = false;
+ int bitflips = 0;
+
+ if (req->mode == MTD_OPS_RAW)
+ return 0;
+
+ if (req->type == NAND_PAGE_WRITE) {
+ nand_ecc_restore_req(&ctx->req_ctx, req);
+ return 0;
+ }
+
+ free = req->oobbuf.in;
+ data = req->databuf.in;
+ parity = req->oobbuf.in + ctx->steps * RTL_ECC_FREE_SIZE;
+
+ for (int i = 0 ; i < ctx->steps; i++) {
+ int ret = rtl_ecc_run_engine(ctx, data, free, parity, RTL_ECC_OP_DECODE);
+
+ if (unlikely(ret < 0))
+ /* ECC totally fails for bitflips in erased blocks */
+ ret = nand_check_erased_ecc_chunk(data, RTL_ECC_BLOCK_SIZE,
+ parity, ctx->parity_size,
+ free, RTL_ECC_FREE_SIZE,
+ ctx->strength);
+ if (unlikely(ret < 0)) {
+ failure = true;
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += ret;
+ bitflips = max_t(unsigned int, bitflips, ret);
+ }
+
+ free += RTL_ECC_FREE_SIZE;
+ data += RTL_ECC_BLOCK_SIZE;
+ parity += ctx->parity_size;
+ }
+
+ nand_ecc_restore_req(&ctx->req_ctx, req);
+
+ if (unlikely(failure))
+ dev_dbg(rtlc->dev, "ECC correction failed\n");
+ else if (unlikely(bitflips > 2))
+ dev_dbg(rtlc->dev, "%d bitflips detected\n", bitflips);
+
+ return failure ? -EBADMSG : bitflips;
+}
+
+static int rtl_ecc_check_support(struct nand_device *nand)
+{
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+ struct device *dev = nand->ecc.engine->dev;
+
+ if (mtd->oobsize != RTL_ECC_ALLOWED_OOB_SIZE ||
+ mtd->writesize != RTL_ECC_ALLOWED_PAGE_SIZE) {
+ dev_err(dev, "only flash geometry data=%d, oob=%d supported\n",
+ RTL_ECC_ALLOWED_PAGE_SIZE, RTL_ECC_ALLOWED_OOB_SIZE);
+ return -EINVAL;
+ }
+
+ if (nand->ecc.user_conf.algo != NAND_ECC_ALGO_BCH ||
+ nand->ecc.user_conf.strength != RTL_ECC_ALLOWED_STRENGTH ||
+ nand->ecc.user_conf.placement != NAND_ECC_PLACEMENT_OOB ||
+ nand->ecc.user_conf.step_size != RTL_ECC_BLOCK_SIZE) {
+ dev_err(dev, "only algo=bch, strength=%d, placement=oob, step=%d supported\n",
+ RTL_ECC_ALLOWED_STRENGTH, RTL_ECC_BLOCK_SIZE);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rtl_ecc_init_ctx(struct nand_device *nand)
+{
+ struct nand_ecc_props *conf = &nand->ecc.ctx.conf;
+ struct rtl_ecc_engine *rtlc = nand_to_rtlc(nand);
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+ int strength = nand->ecc.user_conf.strength;
+ struct device *dev = nand->ecc.engine->dev;
+ struct rtl_ecc_ctx *ctx;
+ int ret;
+
+ ret = rtl_ecc_check_support(nand);
+ if (ret)
+ return ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ nand->ecc.ctx.priv = ctx;
+ mtd_set_ooblayout(mtd, &rtl_ecc_ooblayout_ops);
+
+ conf->algo = NAND_ECC_ALGO_BCH;
+ conf->strength = strength;
+ conf->step_size = RTL_ECC_BLOCK_SIZE;
+ conf->engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+
+ ctx->rtlc = rtlc;
+ ctx->steps = mtd->writesize / RTL_ECC_BLOCK_SIZE;
+ ctx->strength = strength;
+ ctx->bch_mode = strength == 6 ? RTL_ECC_BCH6 : RTL_ECC_BCH12;
+ ctx->parity_size = strength == 6 ? RTL_ECC_PARITY_SIZE_BCH6 : RTL_ECC_PARITY_SIZE_BCH12;
+
+ ret = nand_ecc_init_req_tweaking(&ctx->req_ctx, nand);
+ if (ret)
+ return ret;
+
+ dev_dbg(dev, "using bch%d with geometry data=%dx%d, free=%dx6, parity=%dx%d",
+ conf->strength, ctx->steps, conf->step_size,
+ ctx->steps, ctx->steps, ctx->parity_size);
+
+ return 0;
+}
+
+static void rtl_ecc_cleanup_ctx(struct nand_device *nand)
+{
+ struct rtl_ecc_ctx *ctx = nand_to_ctx(nand);
+
+ if (ctx)
+ nand_ecc_cleanup_req_tweaking(&ctx->req_ctx);
+}
+
+static struct nand_ecc_engine_ops rtl_ecc_engine_ops = {
+ .init_ctx = rtl_ecc_init_ctx,
+ .cleanup_ctx = rtl_ecc_cleanup_ctx,
+ .prepare_io_req = rtl_ecc_prepare_io_req,
+ .finish_io_req = rtl_ecc_finish_io_req,
+};
+
+static int rtl_ecc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rtl_ecc_engine *rtlc;
+ void __iomem *base;
+ int ret;
+
+ rtlc = devm_kzalloc(dev, sizeof(*rtlc), GFP_KERNEL);
+ if (!rtlc)
+ return -ENOMEM;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ ret = devm_mutex_init(dev, &rtlc->lock);
+ if (ret)
+ return ret;
+
+ rtlc->regmap = devm_regmap_init_mmio(dev, base, &rtl_ecc_regmap_config);
+ if (IS_ERR(rtlc->regmap))
+ return PTR_ERR(rtlc->regmap);
+
+ /*
+ * Focus on simplicity and use a preallocated DMA buffer for data exchange with the
+ * engine. For now make it a noncoherent memory model as invalidating/flushing caches
+ * is faster than reading/writing uncached memory on the known architectures.
+ */
+
+ rtlc->buf = dma_alloc_noncoherent(dev, RTL_ECC_DMA_SIZE, &rtlc->buf_dma,
+ DMA_BIDIRECTIONAL, GFP_KERNEL);
+ if (IS_ERR(rtlc->buf))
+ return PTR_ERR(rtlc->buf);
+
+ rtlc->dev = dev;
+ rtlc->engine.dev = dev;
+ rtlc->engine.ops = &rtl_ecc_engine_ops;
+ rtlc->engine.integration = NAND_ECC_ENGINE_INTEGRATION_EXTERNAL;
+
+ nand_ecc_register_on_host_hw_engine(&rtlc->engine);
+
+ platform_set_drvdata(pdev, rtlc);
+
+ return 0;
+}
+
+static void rtl_ecc_remove(struct platform_device *pdev)
+{
+ struct rtl_ecc_engine *rtlc = platform_get_drvdata(pdev);
+
+ nand_ecc_unregister_on_host_hw_engine(&rtlc->engine);
+ dma_free_noncoherent(rtlc->dev, RTL_ECC_DMA_SIZE, rtlc->buf, rtlc->buf_dma,
+ DMA_BIDIRECTIONAL);
+}
+
+static const struct of_device_id rtl_ecc_of_ids[] = {
+ {
+ .compatible = "realtek,rtl9301-ecc",
+ },
+ { /* sentinel */ },
+};
+
+static struct platform_driver rtl_ecc_driver = {
+ .driver = {
+ .name = "rtl-nand-ecc-engine",
+ .of_match_table = rtl_ecc_of_ids,
+ },
+ .probe = rtl_ecc_probe,
+ .remove = rtl_ecc_remove,
+};
+module_platform_driver(rtl_ecc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Markus Stockhausen <markus.stockhausen@gmx.de>");
+MODULE_DESCRIPTION("Realtek NAND hardware ECC controller");
diff --git a/drivers/mtd/nand/ecc.c b/drivers/mtd/nand/ecc.c
index 8f996e8d61b8..6ccdff3fc913 100644
--- a/drivers/mtd/nand/ecc.c
+++ b/drivers/mtd/nand/ecc.c
@@ -552,7 +552,7 @@ void nand_ecc_tweak_req(struct nand_ecc_req_tweak_ctx *ctx,
memset(tweak->oobbuf.in, 0xFF, ctx->oob_buffer_size);
}
- /* Copy the data that must be writen in the bounce buffers, if needed */
+ /* Copy the data that must be written in the bounce buffers, if needed */
if (orig->type == NAND_PAGE_WRITE) {
if (ctx->bounce_data)
memcpy((void *)tweak->databuf.out + orig->dataoffs,
diff --git a/drivers/mtd/nand/onenand/onenand_omap2.c b/drivers/mtd/nand/onenand/onenand_omap2.c
index f9a386b69050..0793251ada3b 100644
--- a/drivers/mtd/nand/onenand/onenand_omap2.c
+++ b/drivers/mtd/nand/onenand/onenand_omap2.c
@@ -603,7 +603,6 @@ static struct platform_driver omap2_onenand_driver = {
module_platform_driver(omap2_onenand_driver);
-MODULE_ALIAS("platform:" DRIVER_NAME);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jarkko Lavinen <jarkko.lavinen@nokia.com>");
MODULE_DESCRIPTION("Glue layer for OneNAND flash on OMAP2 / OMAP3");
diff --git a/drivers/mtd/nand/qpic_common.c b/drivers/mtd/nand/qpic_common.c
index 8e604cc22ca3..db6c46a6fe01 100644
--- a/drivers/mtd/nand/qpic_common.c
+++ b/drivers/mtd/nand/qpic_common.c
@@ -89,10 +89,8 @@ void qcom_clear_bam_transaction(struct qcom_nand_controller *nandc)
memset(&bam_txn->bam_positions, 0, sizeof(bam_txn->bam_positions));
bam_txn->last_data_desc = NULL;
- sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
- QPIC_PER_CW_CMD_SGL);
- sg_init_table(bam_txn->data_sgl, nandc->max_cwperpage *
- QPIC_PER_CW_DATA_SGL);
+ sg_init_table(bam_txn->cmd_sgl, bam_txn->cmd_sgl_nitems);
+ sg_init_table(bam_txn->data_sgl, bam_txn->data_sgl_nitems);
reinit_completion(&bam_txn->txn_done);
}
diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig
index 4b99d9c422c3..7408f34f0c68 100644
--- a/drivers/mtd/nand/raw/Kconfig
+++ b/drivers/mtd/nand/raw/Kconfig
@@ -77,32 +77,6 @@ config MTD_NAND_NDFC
help
NDFC Nand Flash Controllers are integrated in IBM/AMCC's 4xx SoCs
-config MTD_NAND_S3C2410
- tristate "Samsung S3C NAND controller"
- depends on ARCH_S3C64XX
- help
- This enables the NAND flash controller on the S3C24xx and S3C64xx
- SoCs
-
- No board specific support is done by this driver, each board
- must advertise a platform_device for the driver to attach.
-
-config MTD_NAND_S3C2410_DEBUG
- bool "Samsung S3C NAND controller debug"
- depends on MTD_NAND_S3C2410
- help
- Enable debugging of the S3C NAND driver
-
-config MTD_NAND_S3C2410_CLKSTOP
- bool "Samsung S3C NAND IDLE clock stop"
- depends on MTD_NAND_S3C2410
- default n
- help
- Stop the clock to the NAND controller when there is no chip
- selected to save power. This will mean there is a small delay
- when the is NAND chip selected or released, but will save
- approximately 5mA of power when there is nothing happening.
-
config MTD_NAND_SHARPSL
tristate "Sharp SL Series (C7xx + others) NAND controller"
depends on ARCH_PXA || COMPILE_TEST
@@ -462,12 +436,12 @@ config MTD_NAND_NUVOTON_MA35
Enables support for the NAND controller found on
the Nuvoton MA35 series SoCs.
-config MTD_NAND_LOONGSON1
- tristate "Loongson1 NAND controller"
- depends on LOONGSON1_APB_DMA || COMPILE_TEST
+config MTD_NAND_LOONGSON
+ tristate "Loongson NAND controller"
+ depends on LOONGSON1_APB_DMA || LOONGSON2_APB_DMA || COMPILE_TEST
select REGMAP_MMIO
help
- Enables support for NAND controller on Loongson1 SoCs.
+ Enables support for NAND controller on Loongson family chips.
comment "Misc"
diff --git a/drivers/mtd/nand/raw/Makefile b/drivers/mtd/nand/raw/Makefile
index 711d043ad4f8..619760138d32 100644
--- a/drivers/mtd/nand/raw/Makefile
+++ b/drivers/mtd/nand/raw/Makefile
@@ -9,7 +9,6 @@ obj-$(CONFIG_MTD_NAND_DENALI) += denali.o
obj-$(CONFIG_MTD_NAND_DENALI_PCI) += denali_pci.o
obj-$(CONFIG_MTD_NAND_DENALI_DT) += denali_dt.o
obj-$(CONFIG_MTD_NAND_AU1550) += au1550nd.o
-obj-$(CONFIG_MTD_NAND_S3C2410) += s3c2410.o
obj-$(CONFIG_MTD_NAND_DAVINCI) += davinci_nand.o
obj-$(CONFIG_MTD_NAND_DISKONCHIP) += diskonchip.o
obj-$(CONFIG_MTD_NAND_FSMC) += fsmc_nand.o
@@ -59,7 +58,7 @@ obj-$(CONFIG_MTD_NAND_ROCKCHIP) += rockchip-nand-controller.o
obj-$(CONFIG_MTD_NAND_PL35X) += pl35x-nand-controller.o
obj-$(CONFIG_MTD_NAND_RENESAS) += renesas-nand-controller.o
obj-$(CONFIG_MTD_NAND_NUVOTON_MA35) += nuvoton-ma35d1-nand-controller.o
-obj-$(CONFIG_MTD_NAND_LOONGSON1) += loongson1-nand-controller.o
+obj-$(CONFIG_MTD_NAND_LOONGSON) += loongson-nand-controller.o
nand-objs := nand_base.o nand_legacy.o nand_bbt.o nand_timings.o nand_ids.o
nand-objs += nand_onfi.o
diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c
index 84ab4a83cbd6..83ba4ebd02d4 100644
--- a/drivers/mtd/nand/raw/atmel/nand-controller.c
+++ b/drivers/mtd/nand/raw/atmel/nand-controller.c
@@ -1240,7 +1240,7 @@ static int atmel_smc_nand_prepare_smcconf(struct atmel_nand *nand,
const struct nand_interface_config *conf,
struct atmel_smc_cs_conf *smcconf)
{
- u32 ncycles, totalcycles, timeps, mckperiodps;
+ u32 ncycles, totalcycles, timeps, mckperiodps, pulse;
struct atmel_nand_controller *nc;
int ret;
@@ -1366,11 +1366,16 @@ static int atmel_smc_nand_prepare_smcconf(struct atmel_nand *nand,
ATMEL_SMC_MODE_TDFMODE_OPTIMIZED;
/*
- * Read pulse timing directly matches tRP:
+ * Read pulse timing would directly match tRP,
+ * but some NAND flash chips (S34ML01G2 and W29N02KVxxAF)
+ * do not work properly in timing mode 3.
+ * The workaround is to extend the SMC NRD pulse to meet tREA
+ * timing.
*
- * NRD_PULSE = tRP
+ * NRD_PULSE = max(tRP, tREA)
*/
- ncycles = DIV_ROUND_UP(conf->timings.sdr.tRP_min, mckperiodps);
+ pulse = max(conf->timings.sdr.tRP_min, conf->timings.sdr.tREA_max);
+ ncycles = DIV_ROUND_UP(pulse, mckperiodps);
totalcycles += ncycles;
ret = atmel_smc_cs_conf_set_pulse(smcconf, ATMEL_SMC_NRD_SHIFT,
ncycles);
@@ -1378,13 +1383,23 @@ static int atmel_smc_nand_prepare_smcconf(struct atmel_nand *nand,
return ret;
/*
+ * Read setup timing depends on the operation done on the NAND:
+ *
+ * NRD_SETUP = max(tAR, tCLR)
+ */
+ timeps = max(conf->timings.sdr.tAR_min, conf->timings.sdr.tCLR_min);
+ ncycles = DIV_ROUND_UP(timeps, mckperiodps);
+ totalcycles += ncycles;
+ ret = atmel_smc_cs_conf_set_setup(smcconf, ATMEL_SMC_NRD_SHIFT, ncycles);
+ if (ret)
+ return ret;
+
+ /*
* The read cycle timing is directly matching tRC, but is also
* dependent on the setup and hold timings we calculated earlier,
* which gives:
*
- * NRD_CYCLE = max(tRC, NRD_PULSE + NRD_HOLD)
- *
- * NRD_SETUP is always 0.
+ * NRD_CYCLE = max(tRC, NRD_SETUP + NRD_PULSE + NRD_HOLD)
*/
ncycles = DIV_ROUND_UP(conf->timings.sdr.tRC_min, mckperiodps);
ncycles = max(totalcycles, ncycles);
@@ -1848,7 +1863,7 @@ atmel_nand_controller_legacy_add_nands(struct atmel_nand_controller *nc)
static int atmel_nand_controller_add_nands(struct atmel_nand_controller *nc)
{
- struct device_node *np, *nand_np;
+ struct device_node *np;
struct device *dev = nc->dev;
int ret, reg_cells;
u32 val;
@@ -1875,7 +1890,7 @@ static int atmel_nand_controller_add_nands(struct atmel_nand_controller *nc)
reg_cells += val;
- for_each_child_of_node(np, nand_np) {
+ for_each_child_of_node_scoped(np, nand_np) {
struct atmel_nand *nand;
nand = atmel_nand_create(nc, nand_np, reg_cells);
diff --git a/drivers/mtd/nand/raw/atmel/pmecc.c b/drivers/mtd/nand/raw/atmel/pmecc.c
index 0b402823b619..1d0e93e4edb1 100644
--- a/drivers/mtd/nand/raw/atmel/pmecc.c
+++ b/drivers/mtd/nand/raw/atmel/pmecc.c
@@ -1010,4 +1010,3 @@ module_platform_driver(atmel_pmecc_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
MODULE_DESCRIPTION("PMECC engine driver");
-MODULE_ALIAS("platform:atmel_pmecc");
diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c
index df61db8ce466..b13b2b0c3f30 100644
--- a/drivers/mtd/nand/raw/fsmc_nand.c
+++ b/drivers/mtd/nand/raw/fsmc_nand.c
@@ -876,10 +876,14 @@ static int fsmc_nand_probe_config_dt(struct platform_device *pdev,
if (!of_property_read_u32(np, "bank-width", &val)) {
if (val == 2) {
nand->options |= NAND_BUSWIDTH_16;
- } else if (val != 1) {
+ } else if (val == 1) {
+ nand->options |= NAND_BUSWIDTH_AUTO;
+ } else {
dev_err(&pdev->dev, "invalid bank-width %u\n", val);
return -EINVAL;
}
+ } else {
+ nand->options |= NAND_BUSWIDTH_AUTO;
}
if (of_property_read_bool(np, "nand-skip-bbtscan"))
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
index f4e68008ea03..a750f5839e34 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
@@ -145,6 +145,9 @@ err_clk:
return ret;
}
+#define gpmi_enable_clk(x) __gpmi_enable_clk(x, true)
+#define gpmi_disable_clk(x) __gpmi_enable_clk(x, false)
+
static int gpmi_init(struct gpmi_nand_data *this)
{
struct resources *r = &this->resources;
@@ -2765,6 +2768,11 @@ static int gpmi_nand_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, 500);
pm_runtime_use_autosuspend(&pdev->dev);
+#ifndef CONFIG_PM
+ ret = gpmi_enable_clk(this);
+ if (ret)
+ goto exit_acquire_resources;
+#endif
ret = gpmi_init(this);
if (ret)
@@ -2800,6 +2808,9 @@ static void gpmi_nand_remove(struct platform_device *pdev)
release_resources(this);
pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+#ifndef CONFIG_PM
+ gpmi_disable_clk(this);
+#endif
}
static int gpmi_pm_suspend(struct device *dev)
@@ -2846,9 +2857,6 @@ static int gpmi_pm_resume(struct device *dev)
return 0;
}
-#define gpmi_enable_clk(x) __gpmi_enable_clk(x, true)
-#define gpmi_disable_clk(x) __gpmi_enable_clk(x, false)
-
static int gpmi_runtime_suspend(struct device *dev)
{
struct gpmi_nand_data *this = dev_get_drvdata(dev);
diff --git a/drivers/mtd/nand/raw/loongson-nand-controller.c b/drivers/mtd/nand/raw/loongson-nand-controller.c
new file mode 100644
index 000000000000..8490412d5be1
--- /dev/null
+++ b/drivers/mtd/nand/raw/loongson-nand-controller.c
@@ -0,0 +1,1024 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * NAND Controller Driver for Loongson family chips
+ *
+ * Copyright (C) 2015-2025 Keguang Zhang <keguang.zhang@gmail.com>
+ * Copyright (C) 2025 Binbin Zhou <zhoubinbin@loongson.cn>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/iopoll.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/sizes.h>
+
+/* Loongson NAND Controller Registers */
+#define LOONGSON_NAND_CMD 0x0
+#define LOONGSON_NAND_ADDR1 0x4
+#define LOONGSON_NAND_ADDR2 0x8
+#define LOONGSON_NAND_TIMING 0xc
+#define LOONGSON_NAND_IDL 0x10
+#define LOONGSON_NAND_IDH_STATUS 0x14
+#define LOONGSON_NAND_PARAM 0x18
+#define LOONGSON_NAND_OP_NUM 0x1c
+#define LOONGSON_NAND_CS_RDY_MAP 0x20
+
+/* Bitfields of nand command register */
+#define LOONGSON_NAND_CMD_OP_DONE BIT(10)
+#define LOONGSON_NAND_CMD_OP_SPARE BIT(9)
+#define LOONGSON_NAND_CMD_OP_MAIN BIT(8)
+#define LOONGSON_NAND_CMD_STATUS BIT(7)
+#define LOONGSON_NAND_CMD_RESET BIT(6)
+#define LOONGSON_NAND_CMD_READID BIT(5)
+#define LOONGSON_NAND_CMD_BLOCKS_ERASE BIT(4)
+#define LOONGSON_NAND_CMD_ERASE BIT(3)
+#define LOONGSON_NAND_CMD_WRITE BIT(2)
+#define LOONGSON_NAND_CMD_READ BIT(1)
+#define LOONGSON_NAND_CMD_VALID BIT(0)
+
+/* Bitfields of nand cs/rdy map register */
+#define LOONGSON_NAND_MAP_CS1_SEL GENMASK(11, 8)
+#define LOONGSON_NAND_MAP_RDY1_SEL GENMASK(15, 12)
+#define LOONGSON_NAND_MAP_CS2_SEL GENMASK(19, 16)
+#define LOONGSON_NAND_MAP_RDY2_SEL GENMASK(23, 20)
+#define LOONGSON_NAND_MAP_CS3_SEL GENMASK(27, 24)
+#define LOONGSON_NAND_MAP_RDY3_SEL GENMASK(31, 28)
+
+#define LOONGSON_NAND_CS_SEL0 BIT(0)
+#define LOONGSON_NAND_CS_SEL1 BIT(1)
+#define LOONGSON_NAND_CS_SEL2 BIT(2)
+#define LOONGSON_NAND_CS_SEL3 BIT(3)
+#define LOONGSON_NAND_CS_RDY0 BIT(0)
+#define LOONGSON_NAND_CS_RDY1 BIT(1)
+#define LOONGSON_NAND_CS_RDY2 BIT(2)
+#define LOONGSON_NAND_CS_RDY3 BIT(3)
+
+/* Bitfields of nand timing register */
+#define LOONGSON_NAND_WAIT_CYCLE_MASK GENMASK(7, 0)
+#define LOONGSON_NAND_HOLD_CYCLE_MASK GENMASK(15, 8)
+
+/* Bitfields of nand parameter register */
+#define LOONGSON_NAND_CELL_SIZE_MASK GENMASK(11, 8)
+
+#define LOONGSON_NAND_COL_ADDR_CYC 2U
+#define LOONGSON_NAND_MAX_ADDR_CYC 5U
+
+#define LOONGSON_NAND_READ_ID_SLEEP_US 1000
+#define LOONGSON_NAND_READ_ID_TIMEOUT_US 5000
+
+#define BITS_PER_WORD (4 * BITS_PER_BYTE)
+
+/* Loongson-2K1000 NAND DMA routing register */
+#define LS2K1000_NAND_DMA_MASK GENMASK(2, 0)
+#define LS2K1000_DMA0_CONF 0x0
+#define LS2K1000_DMA1_CONF 0x1
+#define LS2K1000_DMA2_CONF 0x2
+#define LS2K1000_DMA3_CONF 0x3
+#define LS2K1000_DMA4_CONF 0x4
+
+struct loongson_nand_host;
+
+struct loongson_nand_op {
+ char addrs[LOONGSON_NAND_MAX_ADDR_CYC];
+ unsigned int naddrs;
+ unsigned int addrs_offset;
+ unsigned int aligned_offset;
+ unsigned int cmd_reg;
+ unsigned int row_start;
+ unsigned int rdy_timeout_ms;
+ unsigned int orig_len;
+ bool is_readid;
+ bool is_erase;
+ bool is_write;
+ bool is_read;
+ bool is_change_column;
+ size_t len;
+ char *buf;
+};
+
+struct loongson_nand_data {
+ unsigned int max_id_cycle;
+ unsigned int id_cycle_field;
+ unsigned int status_field;
+ unsigned int op_scope_field;
+ unsigned int hold_cycle;
+ unsigned int wait_cycle;
+ unsigned int nand_cs;
+ unsigned int dma_bits;
+ int (*dma_config)(struct device *dev);
+ void (*set_addr)(struct loongson_nand_host *host, struct loongson_nand_op *op);
+};
+
+struct loongson_nand_host {
+ struct device *dev;
+ struct nand_chip chip;
+ struct nand_controller controller;
+ const struct loongson_nand_data *data;
+ unsigned int addr_cs_field;
+ void __iomem *reg_base;
+ struct regmap *regmap;
+ /* DMA Engine stuff */
+ dma_addr_t dma_base;
+ struct dma_chan *dma_chan;
+ dma_cookie_t dma_cookie;
+ struct completion dma_complete;
+};
+
+static const struct regmap_config loongson_nand_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
+static int loongson_nand_op_cmd_mapping(struct nand_chip *chip, struct loongson_nand_op *op,
+ u8 opcode)
+{
+ struct loongson_nand_host *host = nand_get_controller_data(chip);
+
+ op->row_start = chip->page_shift + 1;
+
+ /* The controller abstracts the following NAND operations. */
+ switch (opcode) {
+ case NAND_CMD_STATUS:
+ op->cmd_reg = LOONGSON_NAND_CMD_STATUS;
+ break;
+ case NAND_CMD_RESET:
+ op->cmd_reg = LOONGSON_NAND_CMD_RESET;
+ break;
+ case NAND_CMD_READID:
+ op->is_readid = true;
+ op->cmd_reg = LOONGSON_NAND_CMD_READID;
+ break;
+ case NAND_CMD_ERASE1:
+ op->is_erase = true;
+ op->addrs_offset = LOONGSON_NAND_COL_ADDR_CYC;
+ break;
+ case NAND_CMD_ERASE2:
+ if (!op->is_erase)
+ return -EOPNOTSUPP;
+ /* During erasing, row_start differs from the default value. */
+ op->row_start = chip->page_shift;
+ op->cmd_reg = LOONGSON_NAND_CMD_ERASE;
+ break;
+ case NAND_CMD_SEQIN:
+ op->is_write = true;
+ break;
+ case NAND_CMD_PAGEPROG:
+ if (!op->is_write)
+ return -EOPNOTSUPP;
+ op->cmd_reg = LOONGSON_NAND_CMD_WRITE;
+ break;
+ case NAND_CMD_READ0:
+ op->is_read = true;
+ break;
+ case NAND_CMD_READSTART:
+ if (!op->is_read)
+ return -EOPNOTSUPP;
+ op->cmd_reg = LOONGSON_NAND_CMD_READ;
+ break;
+ case NAND_CMD_RNDOUT:
+ op->is_change_column = true;
+ break;
+ case NAND_CMD_RNDOUTSTART:
+ if (!op->is_change_column)
+ return -EOPNOTSUPP;
+ op->cmd_reg = LOONGSON_NAND_CMD_READ;
+ break;
+ default:
+ dev_dbg(host->dev, "unsupported opcode: %u\n", opcode);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int loongson_nand_parse_instructions(struct nand_chip *chip, const struct nand_subop *subop,
+ struct loongson_nand_op *op)
+{
+ unsigned int op_id;
+ int ret;
+
+ for (op_id = 0; op_id < subop->ninstrs; op_id++) {
+ const struct nand_op_instr *instr = &subop->instrs[op_id];
+ unsigned int offset, naddrs;
+ const u8 *addrs;
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ ret = loongson_nand_op_cmd_mapping(chip, op, instr->ctx.cmd.opcode);
+ if (ret < 0)
+ return ret;
+
+ break;
+ case NAND_OP_ADDR_INSTR:
+ naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
+ if (naddrs > LOONGSON_NAND_MAX_ADDR_CYC)
+ return -EOPNOTSUPP;
+ op->naddrs = naddrs;
+ offset = nand_subop_get_addr_start_off(subop, op_id);
+ addrs = &instr->ctx.addr.addrs[offset];
+ memcpy(op->addrs + op->addrs_offset, addrs, naddrs);
+ break;
+ case NAND_OP_DATA_IN_INSTR:
+ case NAND_OP_DATA_OUT_INSTR:
+ offset = nand_subop_get_data_start_off(subop, op_id);
+ op->orig_len = nand_subop_get_data_len(subop, op_id);
+ if (instr->type == NAND_OP_DATA_IN_INSTR)
+ op->buf = instr->ctx.data.buf.in + offset;
+ else if (instr->type == NAND_OP_DATA_OUT_INSTR)
+ op->buf = (void *)instr->ctx.data.buf.out + offset;
+
+ break;
+ case NAND_OP_WAITRDY_INSTR:
+ op->rdy_timeout_ms = instr->ctx.waitrdy.timeout_ms;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static void loongson_nand_set_addr_cs(struct loongson_nand_host *host)
+{
+ struct nand_chip *chip = &host->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (!host->data->nand_cs)
+ return;
+
+ /*
+ * The Manufacturer/Chip ID read operation precedes attach_chip, at which point
+ * information such as NAND chip selection and capacity is unknown. As a
+ * workaround, we use 128MB cellsize (2KB pagesize) as a fallback.
+ */
+ if (!mtd->writesize)
+ host->addr_cs_field = GENMASK(17, 16);
+
+ regmap_update_bits(host->regmap, LOONGSON_NAND_ADDR2, host->addr_cs_field,
+ host->data->nand_cs << __ffs(host->addr_cs_field));
+}
+
+static void ls1b_nand_set_addr(struct loongson_nand_host *host, struct loongson_nand_op *op)
+{
+ struct nand_chip *chip = &host->chip;
+ int i;
+
+ for (i = 0; i < LOONGSON_NAND_MAX_ADDR_CYC; i++) {
+ int shift, mask, val;
+
+ if (i < LOONGSON_NAND_COL_ADDR_CYC) {
+ shift = i * BITS_PER_BYTE;
+ mask = (u32)0xff << shift;
+ mask &= GENMASK(chip->page_shift, 0);
+ val = (u32)op->addrs[i] << shift;
+ regmap_update_bits(host->regmap, LOONGSON_NAND_ADDR1, mask, val);
+ } else if (!op->is_change_column) {
+ shift = op->row_start + (i - LOONGSON_NAND_COL_ADDR_CYC) * BITS_PER_BYTE;
+ mask = (u32)0xff << shift;
+ val = (u32)op->addrs[i] << shift;
+ regmap_update_bits(host->regmap, LOONGSON_NAND_ADDR1, mask, val);
+
+ if (i == 4) {
+ mask = (u32)0xff >> (BITS_PER_WORD - shift);
+ val = (u32)op->addrs[i] >> (BITS_PER_WORD - shift);
+ regmap_update_bits(host->regmap, LOONGSON_NAND_ADDR2, mask, val);
+ }
+ }
+ }
+}
+
+static void ls1c_nand_set_addr(struct loongson_nand_host *host, struct loongson_nand_op *op)
+{
+ int i;
+
+ for (i = 0; i < LOONGSON_NAND_MAX_ADDR_CYC; i++) {
+ int shift, mask, val;
+
+ if (i < LOONGSON_NAND_COL_ADDR_CYC) {
+ shift = i * BITS_PER_BYTE;
+ mask = (u32)0xff << shift;
+ val = (u32)op->addrs[i] << shift;
+ regmap_update_bits(host->regmap, LOONGSON_NAND_ADDR1, mask, val);
+ } else if (!op->is_change_column) {
+ shift = (i - LOONGSON_NAND_COL_ADDR_CYC) * BITS_PER_BYTE;
+ mask = (u32)0xff << shift;
+ val = (u32)op->addrs[i] << shift;
+ regmap_update_bits(host->regmap, LOONGSON_NAND_ADDR2, mask, val);
+ }
+ }
+
+ loongson_nand_set_addr_cs(host);
+}
+
+static void loongson_nand_trigger_op(struct loongson_nand_host *host, struct loongson_nand_op *op)
+{
+ struct nand_chip *chip = &host->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int col0 = op->addrs[0];
+ short col;
+
+ if (!IS_ALIGNED(col0, chip->buf_align)) {
+ col0 = ALIGN_DOWN(op->addrs[0], chip->buf_align);
+ op->aligned_offset = op->addrs[0] - col0;
+ op->addrs[0] = col0;
+ }
+
+ if (host->data->set_addr)
+ host->data->set_addr(host, op);
+
+ /* set operation length */
+ if (op->is_write || op->is_read || op->is_change_column)
+ op->len = ALIGN(op->orig_len + op->aligned_offset, chip->buf_align);
+ else if (op->is_erase)
+ op->len = 1;
+ else
+ op->len = op->orig_len;
+
+ writel(op->len, host->reg_base + LOONGSON_NAND_OP_NUM);
+
+ /* set operation area and scope */
+ col = op->addrs[1] << BITS_PER_BYTE | op->addrs[0];
+ if (op->orig_len && !op->is_readid) {
+ unsigned int op_scope = 0;
+
+ if (col < mtd->writesize) {
+ op->cmd_reg |= LOONGSON_NAND_CMD_OP_MAIN;
+ op_scope = mtd->writesize;
+ }
+
+ op->cmd_reg |= LOONGSON_NAND_CMD_OP_SPARE;
+ op_scope += mtd->oobsize;
+
+ op_scope <<= __ffs(host->data->op_scope_field);
+ regmap_update_bits(host->regmap, LOONGSON_NAND_PARAM,
+ host->data->op_scope_field, op_scope);
+ }
+
+ /* set command */
+ writel(op->cmd_reg, host->reg_base + LOONGSON_NAND_CMD);
+
+ /* trigger operation */
+ regmap_write_bits(host->regmap, LOONGSON_NAND_CMD, LOONGSON_NAND_CMD_VALID,
+ LOONGSON_NAND_CMD_VALID);
+}
+
+static int loongson_nand_wait_for_op_done(struct loongson_nand_host *host,
+ struct loongson_nand_op *op)
+{
+ unsigned int val;
+ int ret = 0;
+
+ if (op->rdy_timeout_ms) {
+ ret = regmap_read_poll_timeout(host->regmap, LOONGSON_NAND_CMD,
+ val, val & LOONGSON_NAND_CMD_OP_DONE,
+ 0, op->rdy_timeout_ms * MSEC_PER_SEC);
+ if (ret)
+ dev_err(host->dev, "operation failed\n");
+ }
+
+ return ret;
+}
+
+static void loongson_nand_dma_callback(void *data)
+{
+ struct loongson_nand_host *host = (struct loongson_nand_host *)data;
+ struct dma_chan *chan = host->dma_chan;
+ struct device *dev = chan->device->dev;
+ enum dma_status status;
+
+ status = dmaengine_tx_status(chan, host->dma_cookie, NULL);
+ if (likely(status == DMA_COMPLETE)) {
+ dev_dbg(dev, "DMA complete with cookie=%d\n", host->dma_cookie);
+ complete(&host->dma_complete);
+ } else {
+ dev_err(dev, "DMA error with cookie=%d\n", host->dma_cookie);
+ }
+}
+
+static int loongson_nand_dma_transfer(struct loongson_nand_host *host, struct loongson_nand_op *op)
+{
+ struct nand_chip *chip = &host->chip;
+ struct dma_chan *chan = host->dma_chan;
+ struct device *dev = chan->device->dev;
+ struct dma_async_tx_descriptor *desc;
+ enum dma_data_direction data_dir = op->is_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+ enum dma_transfer_direction xfer_dir = op->is_write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
+ void *buf = op->buf;
+ char *dma_buf = NULL;
+ dma_addr_t dma_addr;
+ int ret;
+
+ if (IS_ALIGNED((uintptr_t)buf, chip->buf_align) &&
+ IS_ALIGNED(op->orig_len, chip->buf_align)) {
+ dma_addr = dma_map_single(dev, buf, op->orig_len, data_dir);
+ if (dma_mapping_error(dev, dma_addr)) {
+ dev_err(dev, "failed to map DMA buffer\n");
+ return -ENXIO;
+ }
+ } else if (!op->is_write) {
+ dma_buf = dma_alloc_coherent(dev, op->len, &dma_addr, GFP_KERNEL);
+ if (!dma_buf)
+ return -ENOMEM;
+ } else {
+ dev_err(dev, "subpage writing not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ desc = dmaengine_prep_slave_single(chan, dma_addr, op->len, xfer_dir, DMA_PREP_INTERRUPT);
+ if (!desc) {
+ dev_err(dev, "failed to prepare DMA descriptor\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+ desc->callback = loongson_nand_dma_callback;
+ desc->callback_param = host;
+
+ host->dma_cookie = dmaengine_submit(desc);
+ ret = dma_submit_error(host->dma_cookie);
+ if (ret) {
+ dev_err(dev, "failed to submit DMA descriptor\n");
+ goto err;
+ }
+
+ dev_dbg(dev, "issue DMA with cookie=%d\n", host->dma_cookie);
+ dma_async_issue_pending(chan);
+
+ if (!wait_for_completion_timeout(&host->dma_complete, msecs_to_jiffies(1000))) {
+ dmaengine_terminate_sync(chan);
+ reinit_completion(&host->dma_complete);
+ ret = -ETIMEDOUT;
+ goto err;
+ }
+
+ if (dma_buf)
+ memcpy(buf, dma_buf + op->aligned_offset, op->orig_len);
+err:
+ if (dma_buf)
+ dma_free_coherent(dev, op->len, dma_buf, dma_addr);
+ else
+ dma_unmap_single(dev, dma_addr, op->orig_len, data_dir);
+
+ return ret;
+}
+
+static int loongson_nand_data_type_exec(struct nand_chip *chip, const struct nand_subop *subop)
+{
+ struct loongson_nand_host *host = nand_get_controller_data(chip);
+ struct loongson_nand_op op = {};
+ int ret;
+
+ ret = loongson_nand_parse_instructions(chip, subop, &op);
+ if (ret)
+ return ret;
+
+ loongson_nand_trigger_op(host, &op);
+
+ ret = loongson_nand_dma_transfer(host, &op);
+ if (ret)
+ return ret;
+
+ return loongson_nand_wait_for_op_done(host, &op);
+}
+
+static int loongson_nand_misc_type_exec(struct nand_chip *chip, const struct nand_subop *subop,
+ struct loongson_nand_op *op)
+{
+ struct loongson_nand_host *host = nand_get_controller_data(chip);
+ int ret;
+
+ ret = loongson_nand_parse_instructions(chip, subop, op);
+ if (ret)
+ return ret;
+
+ loongson_nand_trigger_op(host, op);
+
+ return loongson_nand_wait_for_op_done(host, op);
+}
+
+static int loongson_nand_zerolen_type_exec(struct nand_chip *chip, const struct nand_subop *subop)
+{
+ struct loongson_nand_op op = {};
+
+ return loongson_nand_misc_type_exec(chip, subop, &op);
+}
+
+static int loongson_nand_read_id_type_exec(struct nand_chip *chip, const struct nand_subop *subop)
+{
+ struct loongson_nand_host *host = nand_get_controller_data(chip);
+ struct loongson_nand_op op = {};
+ int i, ret;
+ union {
+ char ids[6];
+ struct {
+ int idl;
+ u16 idh;
+ };
+ } nand_id;
+
+ ret = loongson_nand_misc_type_exec(chip, subop, &op);
+ if (ret)
+ return ret;
+
+ ret = regmap_read_poll_timeout(host->regmap, LOONGSON_NAND_IDL, nand_id.idl, nand_id.idl,
+ LOONGSON_NAND_READ_ID_SLEEP_US,
+ LOONGSON_NAND_READ_ID_TIMEOUT_US);
+ if (ret)
+ return ret;
+
+ nand_id.idh = readw(host->reg_base + LOONGSON_NAND_IDH_STATUS);
+
+ for (i = 0; i < min(host->data->max_id_cycle, op.orig_len); i++)
+ op.buf[i] = nand_id.ids[host->data->max_id_cycle - 1 - i];
+
+ return ret;
+}
+
+static int loongson_nand_read_status_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct loongson_nand_host *host = nand_get_controller_data(chip);
+ struct loongson_nand_op op = {};
+ int val, ret;
+
+ ret = loongson_nand_misc_type_exec(chip, subop, &op);
+ if (ret)
+ return ret;
+
+ val = readl(host->reg_base + LOONGSON_NAND_IDH_STATUS);
+ val &= ~host->data->status_field;
+ op.buf[0] = val << ffs(host->data->status_field);
+
+ return ret;
+}
+
+static const struct nand_op_parser loongson_nand_op_parser = NAND_OP_PARSER(
+ NAND_OP_PARSER_PATTERN(
+ loongson_nand_read_id_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, LOONGSON_NAND_MAX_ADDR_CYC),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 8)),
+ NAND_OP_PARSER_PATTERN(
+ loongson_nand_read_status_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 1)),
+ NAND_OP_PARSER_PATTERN(
+ loongson_nand_zerolen_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ NAND_OP_PARSER_PATTERN(
+ loongson_nand_zerolen_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, LOONGSON_NAND_MAX_ADDR_CYC),
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ NAND_OP_PARSER_PATTERN(
+ loongson_nand_data_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, LOONGSON_NAND_MAX_ADDR_CYC),
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 0)),
+ NAND_OP_PARSER_PATTERN(
+ loongson_nand_data_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, LOONGSON_NAND_MAX_ADDR_CYC),
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, 0),
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
+ );
+
+static int loongson_nand_is_valid_cmd(u8 opcode)
+{
+ if (opcode == NAND_CMD_STATUS || opcode == NAND_CMD_RESET || opcode == NAND_CMD_READID)
+ return 0;
+
+ return -EOPNOTSUPP;
+}
+
+static int loongson_nand_is_valid_cmd_seq(u8 opcode1, u8 opcode2)
+{
+ if (opcode1 == NAND_CMD_RNDOUT && opcode2 == NAND_CMD_RNDOUTSTART)
+ return 0;
+
+ if (opcode1 == NAND_CMD_READ0 && opcode2 == NAND_CMD_READSTART)
+ return 0;
+
+ if (opcode1 == NAND_CMD_ERASE1 && opcode2 == NAND_CMD_ERASE2)
+ return 0;
+
+ if (opcode1 == NAND_CMD_SEQIN && opcode2 == NAND_CMD_PAGEPROG)
+ return 0;
+
+ return -EOPNOTSUPP;
+}
+
+static int loongson_nand_check_op(struct nand_chip *chip, const struct nand_operation *op)
+{
+ const struct nand_op_instr *instr1 = NULL, *instr2 = NULL;
+ int op_id;
+
+ for (op_id = 0; op_id < op->ninstrs; op_id++) {
+ const struct nand_op_instr *instr = &op->instrs[op_id];
+
+ if (instr->type == NAND_OP_CMD_INSTR) {
+ if (!instr1)
+ instr1 = instr;
+ else if (!instr2)
+ instr2 = instr;
+ else
+ break;
+ }
+ }
+
+ if (!instr1)
+ return -EOPNOTSUPP;
+
+ if (!instr2)
+ return loongson_nand_is_valid_cmd(instr1->ctx.cmd.opcode);
+
+ return loongson_nand_is_valid_cmd_seq(instr1->ctx.cmd.opcode, instr2->ctx.cmd.opcode);
+}
+
+static int loongson_nand_exec_op(struct nand_chip *chip, const struct nand_operation *op,
+ bool check_only)
+{
+ if (check_only)
+ return loongson_nand_check_op(chip, op);
+
+ return nand_op_parser_exec_op(chip, &loongson_nand_op_parser, op, check_only);
+}
+
+static int loongson_nand_get_chip_capacity(struct nand_chip *chip)
+{
+ struct loongson_nand_host *host = nand_get_controller_data(chip);
+ u64 chipsize = nanddev_target_size(&chip->base);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ switch (mtd->writesize) {
+ case SZ_512:
+ switch (chipsize) {
+ case SZ_8M:
+ host->addr_cs_field = GENMASK(15, 14);
+ return 0x9;
+ case SZ_16M:
+ host->addr_cs_field = GENMASK(16, 15);
+ return 0xa;
+ case SZ_32M:
+ host->addr_cs_field = GENMASK(17, 16);
+ return 0xb;
+ case SZ_64M:
+ host->addr_cs_field = GENMASK(18, 17);
+ return 0xc;
+ case SZ_128M:
+ host->addr_cs_field = GENMASK(19, 18);
+ return 0xd;
+ }
+ break;
+ case SZ_2K:
+ switch (chipsize) {
+ case SZ_128M:
+ host->addr_cs_field = GENMASK(17, 16);
+ return 0x0;
+ case SZ_256M:
+ host->addr_cs_field = GENMASK(18, 17);
+ return 0x1;
+ case SZ_512M:
+ host->addr_cs_field = GENMASK(19, 18);
+ return 0x2;
+ case SZ_1G:
+ host->addr_cs_field = GENMASK(20, 19);
+ return 0x3;
+ }
+ break;
+ case SZ_4K:
+ if (chipsize == SZ_2G) {
+ host->addr_cs_field = GENMASK(20, 19);
+ return 0x4;
+ }
+ break;
+ case SZ_8K:
+ switch (chipsize) {
+ case SZ_4G:
+ host->addr_cs_field = GENMASK(20, 19);
+ return 0x5;
+ case SZ_8G:
+ host->addr_cs_field = GENMASK(21, 20);
+ return 0x6;
+ case SZ_16G:
+ host->addr_cs_field = GENMASK(22, 21);
+ return 0x7;
+ }
+ break;
+ }
+
+ dev_err(host->dev, "Unsupported chip size: %llu MB with page size %u B\n",
+ chipsize, mtd->writesize);
+ return -EINVAL;
+}
+
+static int loongson_nand_attach_chip(struct nand_chip *chip)
+{
+ struct loongson_nand_host *host = nand_get_controller_data(chip);
+ int cell_size = loongson_nand_get_chip_capacity(chip);
+
+ if (cell_size < 0)
+ return cell_size;
+
+ switch (chip->ecc.engine_type) {
+ case NAND_ECC_ENGINE_TYPE_NONE:
+ break;
+ case NAND_ECC_ENGINE_TYPE_SOFT:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* set cell size */
+ regmap_update_bits(host->regmap, LOONGSON_NAND_PARAM, LOONGSON_NAND_CELL_SIZE_MASK,
+ FIELD_PREP(LOONGSON_NAND_CELL_SIZE_MASK, cell_size));
+
+ regmap_update_bits(host->regmap, LOONGSON_NAND_TIMING, LOONGSON_NAND_HOLD_CYCLE_MASK,
+ FIELD_PREP(LOONGSON_NAND_HOLD_CYCLE_MASK, host->data->hold_cycle));
+
+ regmap_update_bits(host->regmap, LOONGSON_NAND_TIMING, LOONGSON_NAND_WAIT_CYCLE_MASK,
+ FIELD_PREP(LOONGSON_NAND_WAIT_CYCLE_MASK, host->data->wait_cycle));
+
+ chip->ecc.read_page_raw = nand_monolithic_read_page_raw;
+ chip->ecc.write_page_raw = nand_monolithic_write_page_raw;
+
+ return 0;
+}
+
+static const struct nand_controller_ops loongson_nand_controller_ops = {
+ .exec_op = loongson_nand_exec_op,
+ .attach_chip = loongson_nand_attach_chip,
+};
+
+static void loongson_nand_controller_cleanup(struct loongson_nand_host *host)
+{
+ if (host->dma_chan)
+ dma_release_channel(host->dma_chan);
+}
+
+static int ls2k1000_nand_apbdma_config(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ void __iomem *regs;
+ int val;
+
+ regs = devm_platform_ioremap_resource_byname(pdev, "dma-config");
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ val = readl(regs);
+ val |= FIELD_PREP(LS2K1000_NAND_DMA_MASK, LS2K1000_DMA0_CONF);
+ writel(val, regs);
+
+ return 0;
+}
+
+static int loongson_nand_controller_init(struct loongson_nand_host *host)
+{
+ struct device *dev = host->dev;
+ struct dma_chan *chan;
+ struct dma_slave_config cfg = {};
+ int ret, val;
+
+ host->regmap = devm_regmap_init_mmio(dev, host->reg_base, &loongson_nand_regmap_config);
+ if (IS_ERR(host->regmap))
+ return dev_err_probe(dev, PTR_ERR(host->regmap), "failed to init regmap\n");
+
+ if (host->data->id_cycle_field)
+ regmap_update_bits(host->regmap, LOONGSON_NAND_PARAM, host->data->id_cycle_field,
+ host->data->max_id_cycle << __ffs(host->data->id_cycle_field));
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(host->data->dma_bits));
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to set DMA mask\n");
+
+ val = FIELD_PREP(LOONGSON_NAND_MAP_CS1_SEL, LOONGSON_NAND_CS_SEL1) |
+ FIELD_PREP(LOONGSON_NAND_MAP_RDY1_SEL, LOONGSON_NAND_CS_RDY1) |
+ FIELD_PREP(LOONGSON_NAND_MAP_CS2_SEL, LOONGSON_NAND_CS_SEL2) |
+ FIELD_PREP(LOONGSON_NAND_MAP_RDY2_SEL, LOONGSON_NAND_CS_RDY2) |
+ FIELD_PREP(LOONGSON_NAND_MAP_CS3_SEL, LOONGSON_NAND_CS_SEL3) |
+ FIELD_PREP(LOONGSON_NAND_MAP_RDY3_SEL, LOONGSON_NAND_CS_RDY3);
+
+ regmap_write(host->regmap, LOONGSON_NAND_CS_RDY_MAP, val);
+
+ if (host->data->dma_config) {
+ ret = host->data->dma_config(dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to config DMA routing\n");
+ }
+
+ chan = dma_request_chan(dev, "rxtx");
+ if (IS_ERR(chan))
+ return dev_err_probe(dev, PTR_ERR(chan), "failed to request DMA channel\n");
+ host->dma_chan = chan;
+
+ cfg.src_addr = host->dma_base;
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.dst_addr = host->dma_base;
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ ret = dmaengine_slave_config(host->dma_chan, &cfg);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to config DMA channel\n");
+
+ init_completion(&host->dma_complete);
+
+ return 0;
+}
+
+static int loongson_nand_chip_init(struct loongson_nand_host *host)
+{
+ struct device *dev = host->dev;
+ int nchips = of_get_child_count(dev->of_node);
+ struct device_node *chip_np;
+ struct nand_chip *chip = &host->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ if (nchips != 1)
+ return dev_err_probe(dev, -EINVAL, "Currently one NAND chip supported\n");
+
+ chip_np = of_get_next_child(dev->of_node, NULL);
+ if (!chip_np)
+ return dev_err_probe(dev, -ENODEV, "failed to get child node for NAND chip\n");
+
+ nand_set_flash_node(chip, chip_np);
+ of_node_put(chip_np);
+ if (!mtd->name)
+ return dev_err_probe(dev, -EINVAL, "Missing MTD label\n");
+
+ nand_set_controller_data(chip, host);
+ chip->controller = &host->controller;
+ chip->options = NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA | NAND_BROKEN_XD;
+ chip->buf_align = 16;
+ mtd->dev.parent = dev;
+ mtd->owner = THIS_MODULE;
+
+ ret = nand_scan(chip, 1);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to scan NAND chip\n");
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret) {
+ nand_cleanup(chip);
+ return dev_err_probe(dev, ret, "failed to register MTD device\n");
+ }
+
+ return 0;
+}
+
+static int loongson_nand_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct loongson_nand_data *data;
+ struct loongson_nand_host *host;
+ struct resource *res;
+ int ret;
+
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -ENODEV;
+
+ host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return -ENOMEM;
+
+ host->reg_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(host->reg_base))
+ return PTR_ERR(host->reg_base);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand-dma");
+ if (!res)
+ return dev_err_probe(dev, -EINVAL, "Missing 'nand-dma' in reg-names property\n");
+
+ host->dma_base = dma_map_resource(dev, res->start, resource_size(res),
+ DMA_BIDIRECTIONAL, 0);
+ if (dma_mapping_error(dev, host->dma_base))
+ return -ENXIO;
+
+ host->dev = dev;
+ host->data = data;
+ host->controller.ops = &loongson_nand_controller_ops;
+
+ nand_controller_init(&host->controller);
+
+ ret = loongson_nand_controller_init(host);
+ if (ret)
+ goto err;
+
+ ret = loongson_nand_chip_init(host);
+ if (ret)
+ goto err;
+
+ platform_set_drvdata(pdev, host);
+
+ return 0;
+err:
+ loongson_nand_controller_cleanup(host);
+
+ return ret;
+}
+
+static void loongson_nand_remove(struct platform_device *pdev)
+{
+ struct loongson_nand_host *host = platform_get_drvdata(pdev);
+ struct nand_chip *chip = &host->chip;
+ int ret;
+
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ loongson_nand_controller_cleanup(host);
+}
+
+static const struct loongson_nand_data ls1b_nand_data = {
+ .max_id_cycle = 5,
+ .status_field = GENMASK(15, 8),
+ .hold_cycle = 0x2,
+ .wait_cycle = 0xc,
+ .dma_bits = 32,
+ .set_addr = ls1b_nand_set_addr,
+};
+
+static const struct loongson_nand_data ls1c_nand_data = {
+ .max_id_cycle = 6,
+ .id_cycle_field = GENMASK(14, 12),
+ .status_field = GENMASK(23, 16),
+ .op_scope_field = GENMASK(29, 16),
+ .hold_cycle = 0x2,
+ .wait_cycle = 0xc,
+ .dma_bits = 32,
+ .set_addr = ls1c_nand_set_addr,
+};
+
+static const struct loongson_nand_data ls2k0500_nand_data = {
+ .max_id_cycle = 6,
+ .id_cycle_field = GENMASK(14, 12),
+ .status_field = GENMASK(23, 16),
+ .op_scope_field = GENMASK(29, 16),
+ .hold_cycle = 0x4,
+ .wait_cycle = 0x12,
+ .dma_bits = 64,
+ .set_addr = ls1c_nand_set_addr,
+};
+
+static const struct loongson_nand_data ls2k1000_nand_data = {
+ .max_id_cycle = 6,
+ .id_cycle_field = GENMASK(14, 12),
+ .status_field = GENMASK(23, 16),
+ .op_scope_field = GENMASK(29, 16),
+ .hold_cycle = 0x4,
+ .wait_cycle = 0x12,
+ .nand_cs = 0x2,
+ .dma_bits = 64,
+ .dma_config = ls2k1000_nand_apbdma_config,
+ .set_addr = ls1c_nand_set_addr,
+};
+
+static const struct of_device_id loongson_nand_match[] = {
+ {
+ .compatible = "loongson,ls1b-nand-controller",
+ .data = &ls1b_nand_data,
+ },
+ {
+ .compatible = "loongson,ls1c-nand-controller",
+ .data = &ls1c_nand_data,
+ },
+ {
+ .compatible = "loongson,ls2k0500-nand-controller",
+ .data = &ls2k0500_nand_data,
+ },
+ {
+ .compatible = "loongson,ls2k1000-nand-controller",
+ .data = &ls2k1000_nand_data,
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, loongson_nand_match);
+
+static struct platform_driver loongson_nand_driver = {
+ .probe = loongson_nand_probe,
+ .remove = loongson_nand_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = loongson_nand_match,
+ },
+};
+
+module_platform_driver(loongson_nand_driver);
+
+MODULE_AUTHOR("Keguang Zhang <keguang.zhang@gmail.com>");
+MODULE_AUTHOR("Binbin Zhou <zhoubinbin@loongson.cn>");
+MODULE_DESCRIPTION("Loongson NAND Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/raw/loongson1-nand-controller.c b/drivers/mtd/nand/raw/loongson1-nand-controller.c
deleted file mode 100644
index ef8e4f9ce287..000000000000
--- a/drivers/mtd/nand/raw/loongson1-nand-controller.c
+++ /dev/null
@@ -1,836 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * NAND Controller Driver for Loongson-1 SoC
- *
- * Copyright (C) 2015-2025 Keguang Zhang <keguang.zhang@gmail.com>
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/dmaengine.h>
-#include <linux/dma-mapping.h>
-#include <linux/iopoll.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/regmap.h>
-#include <linux/sizes.h>
-
-/* Loongson-1 NAND Controller Registers */
-#define LS1X_NAND_CMD 0x0
-#define LS1X_NAND_ADDR1 0x4
-#define LS1X_NAND_ADDR2 0x8
-#define LS1X_NAND_TIMING 0xc
-#define LS1X_NAND_IDL 0x10
-#define LS1X_NAND_IDH_STATUS 0x14
-#define LS1X_NAND_PARAM 0x18
-#define LS1X_NAND_OP_NUM 0x1c
-
-/* NAND Command Register Bits */
-#define LS1X_NAND_CMD_OP_DONE BIT(10)
-#define LS1X_NAND_CMD_OP_SPARE BIT(9)
-#define LS1X_NAND_CMD_OP_MAIN BIT(8)
-#define LS1X_NAND_CMD_STATUS BIT(7)
-#define LS1X_NAND_CMD_RESET BIT(6)
-#define LS1X_NAND_CMD_READID BIT(5)
-#define LS1X_NAND_CMD_BLOCKS_ERASE BIT(4)
-#define LS1X_NAND_CMD_ERASE BIT(3)
-#define LS1X_NAND_CMD_WRITE BIT(2)
-#define LS1X_NAND_CMD_READ BIT(1)
-#define LS1X_NAND_CMD_VALID BIT(0)
-
-#define LS1X_NAND_WAIT_CYCLE_MASK GENMASK(7, 0)
-#define LS1X_NAND_HOLD_CYCLE_MASK GENMASK(15, 8)
-#define LS1X_NAND_CELL_SIZE_MASK GENMASK(11, 8)
-
-#define LS1X_NAND_COL_ADDR_CYC 2U
-#define LS1X_NAND_MAX_ADDR_CYC 5U
-
-#define BITS_PER_WORD (4 * BITS_PER_BYTE)
-
-struct ls1x_nand_host;
-
-struct ls1x_nand_op {
- char addrs[LS1X_NAND_MAX_ADDR_CYC];
- unsigned int naddrs;
- unsigned int addrs_offset;
- unsigned int aligned_offset;
- unsigned int cmd_reg;
- unsigned int row_start;
- unsigned int rdy_timeout_ms;
- unsigned int orig_len;
- bool is_readid;
- bool is_erase;
- bool is_write;
- bool is_read;
- bool is_change_column;
- size_t len;
- char *buf;
-};
-
-struct ls1x_nand_data {
- unsigned int status_field;
- unsigned int op_scope_field;
- unsigned int hold_cycle;
- unsigned int wait_cycle;
- void (*set_addr)(struct ls1x_nand_host *host, struct ls1x_nand_op *op);
-};
-
-struct ls1x_nand_host {
- struct device *dev;
- struct nand_chip chip;
- struct nand_controller controller;
- const struct ls1x_nand_data *data;
- void __iomem *reg_base;
- struct regmap *regmap;
- /* DMA Engine stuff */
- dma_addr_t dma_base;
- struct dma_chan *dma_chan;
- dma_cookie_t dma_cookie;
- struct completion dma_complete;
-};
-
-static const struct regmap_config ls1x_nand_regmap_config = {
- .reg_bits = 32,
- .val_bits = 32,
- .reg_stride = 4,
-};
-
-static int ls1x_nand_op_cmd_mapping(struct nand_chip *chip, struct ls1x_nand_op *op, u8 opcode)
-{
- struct ls1x_nand_host *host = nand_get_controller_data(chip);
-
- op->row_start = chip->page_shift + 1;
-
- /* The controller abstracts the following NAND operations. */
- switch (opcode) {
- case NAND_CMD_STATUS:
- op->cmd_reg = LS1X_NAND_CMD_STATUS;
- break;
- case NAND_CMD_RESET:
- op->cmd_reg = LS1X_NAND_CMD_RESET;
- break;
- case NAND_CMD_READID:
- op->is_readid = true;
- op->cmd_reg = LS1X_NAND_CMD_READID;
- break;
- case NAND_CMD_ERASE1:
- op->is_erase = true;
- op->addrs_offset = LS1X_NAND_COL_ADDR_CYC;
- break;
- case NAND_CMD_ERASE2:
- if (!op->is_erase)
- return -EOPNOTSUPP;
- /* During erasing, row_start differs from the default value. */
- op->row_start = chip->page_shift;
- op->cmd_reg = LS1X_NAND_CMD_ERASE;
- break;
- case NAND_CMD_SEQIN:
- op->is_write = true;
- break;
- case NAND_CMD_PAGEPROG:
- if (!op->is_write)
- return -EOPNOTSUPP;
- op->cmd_reg = LS1X_NAND_CMD_WRITE;
- break;
- case NAND_CMD_READ0:
- op->is_read = true;
- break;
- case NAND_CMD_READSTART:
- if (!op->is_read)
- return -EOPNOTSUPP;
- op->cmd_reg = LS1X_NAND_CMD_READ;
- break;
- case NAND_CMD_RNDOUT:
- op->is_change_column = true;
- break;
- case NAND_CMD_RNDOUTSTART:
- if (!op->is_change_column)
- return -EOPNOTSUPP;
- op->cmd_reg = LS1X_NAND_CMD_READ;
- break;
- default:
- dev_dbg(host->dev, "unsupported opcode: %u\n", opcode);
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
-static int ls1x_nand_parse_instructions(struct nand_chip *chip,
- const struct nand_subop *subop, struct ls1x_nand_op *op)
-{
- unsigned int op_id;
- int ret;
-
- for (op_id = 0; op_id < subop->ninstrs; op_id++) {
- const struct nand_op_instr *instr = &subop->instrs[op_id];
- unsigned int offset, naddrs;
- const u8 *addrs;
-
- switch (instr->type) {
- case NAND_OP_CMD_INSTR:
- ret = ls1x_nand_op_cmd_mapping(chip, op, instr->ctx.cmd.opcode);
- if (ret < 0)
- return ret;
-
- break;
- case NAND_OP_ADDR_INSTR:
- naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
- if (naddrs > LS1X_NAND_MAX_ADDR_CYC)
- return -EOPNOTSUPP;
- op->naddrs = naddrs;
- offset = nand_subop_get_addr_start_off(subop, op_id);
- addrs = &instr->ctx.addr.addrs[offset];
- memcpy(op->addrs + op->addrs_offset, addrs, naddrs);
- break;
- case NAND_OP_DATA_IN_INSTR:
- case NAND_OP_DATA_OUT_INSTR:
- offset = nand_subop_get_data_start_off(subop, op_id);
- op->orig_len = nand_subop_get_data_len(subop, op_id);
- if (instr->type == NAND_OP_DATA_IN_INSTR)
- op->buf = instr->ctx.data.buf.in + offset;
- else if (instr->type == NAND_OP_DATA_OUT_INSTR)
- op->buf = (void *)instr->ctx.data.buf.out + offset;
-
- break;
- case NAND_OP_WAITRDY_INSTR:
- op->rdy_timeout_ms = instr->ctx.waitrdy.timeout_ms;
- break;
- default:
- break;
- }
- }
-
- return 0;
-}
-
-static void ls1b_nand_set_addr(struct ls1x_nand_host *host, struct ls1x_nand_op *op)
-{
- struct nand_chip *chip = &host->chip;
- int i;
-
- for (i = 0; i < LS1X_NAND_MAX_ADDR_CYC; i++) {
- int shift, mask, val;
-
- if (i < LS1X_NAND_COL_ADDR_CYC) {
- shift = i * BITS_PER_BYTE;
- mask = (u32)0xff << shift;
- mask &= GENMASK(chip->page_shift, 0);
- val = (u32)op->addrs[i] << shift;
- regmap_update_bits(host->regmap, LS1X_NAND_ADDR1, mask, val);
- } else if (!op->is_change_column) {
- shift = op->row_start + (i - LS1X_NAND_COL_ADDR_CYC) * BITS_PER_BYTE;
- mask = (u32)0xff << shift;
- val = (u32)op->addrs[i] << shift;
- regmap_update_bits(host->regmap, LS1X_NAND_ADDR1, mask, val);
-
- if (i == 4) {
- mask = (u32)0xff >> (BITS_PER_WORD - shift);
- val = (u32)op->addrs[i] >> (BITS_PER_WORD - shift);
- regmap_update_bits(host->regmap, LS1X_NAND_ADDR2, mask, val);
- }
- }
- }
-}
-
-static void ls1c_nand_set_addr(struct ls1x_nand_host *host, struct ls1x_nand_op *op)
-{
- int i;
-
- for (i = 0; i < LS1X_NAND_MAX_ADDR_CYC; i++) {
- int shift, mask, val;
-
- if (i < LS1X_NAND_COL_ADDR_CYC) {
- shift = i * BITS_PER_BYTE;
- mask = (u32)0xff << shift;
- val = (u32)op->addrs[i] << shift;
- regmap_update_bits(host->regmap, LS1X_NAND_ADDR1, mask, val);
- } else if (!op->is_change_column) {
- shift = (i - LS1X_NAND_COL_ADDR_CYC) * BITS_PER_BYTE;
- mask = (u32)0xff << shift;
- val = (u32)op->addrs[i] << shift;
- regmap_update_bits(host->regmap, LS1X_NAND_ADDR2, mask, val);
- }
- }
-}
-
-static void ls1x_nand_trigger_op(struct ls1x_nand_host *host, struct ls1x_nand_op *op)
-{
- struct nand_chip *chip = &host->chip;
- struct mtd_info *mtd = nand_to_mtd(chip);
- int col0 = op->addrs[0];
- short col;
-
- if (!IS_ALIGNED(col0, chip->buf_align)) {
- col0 = ALIGN_DOWN(op->addrs[0], chip->buf_align);
- op->aligned_offset = op->addrs[0] - col0;
- op->addrs[0] = col0;
- }
-
- if (host->data->set_addr)
- host->data->set_addr(host, op);
-
- /* set operation length */
- if (op->is_write || op->is_read || op->is_change_column)
- op->len = ALIGN(op->orig_len + op->aligned_offset, chip->buf_align);
- else if (op->is_erase)
- op->len = 1;
- else
- op->len = op->orig_len;
-
- writel(op->len, host->reg_base + LS1X_NAND_OP_NUM);
-
- /* set operation area and scope */
- col = op->addrs[1] << BITS_PER_BYTE | op->addrs[0];
- if (op->orig_len && !op->is_readid) {
- unsigned int op_scope = 0;
-
- if (col < mtd->writesize) {
- op->cmd_reg |= LS1X_NAND_CMD_OP_MAIN;
- op_scope = mtd->writesize;
- }
-
- op->cmd_reg |= LS1X_NAND_CMD_OP_SPARE;
- op_scope += mtd->oobsize;
-
- op_scope <<= __ffs(host->data->op_scope_field);
- regmap_update_bits(host->regmap, LS1X_NAND_PARAM,
- host->data->op_scope_field, op_scope);
- }
-
- /* set command */
- writel(op->cmd_reg, host->reg_base + LS1X_NAND_CMD);
-
- /* trigger operation */
- regmap_write_bits(host->regmap, LS1X_NAND_CMD, LS1X_NAND_CMD_VALID, LS1X_NAND_CMD_VALID);
-}
-
-static int ls1x_nand_wait_for_op_done(struct ls1x_nand_host *host, struct ls1x_nand_op *op)
-{
- unsigned int val;
- int ret = 0;
-
- if (op->rdy_timeout_ms) {
- ret = regmap_read_poll_timeout(host->regmap, LS1X_NAND_CMD,
- val, val & LS1X_NAND_CMD_OP_DONE,
- 0, op->rdy_timeout_ms * MSEC_PER_SEC);
- if (ret)
- dev_err(host->dev, "operation failed\n");
- }
-
- return ret;
-}
-
-static void ls1x_nand_dma_callback(void *data)
-{
- struct ls1x_nand_host *host = (struct ls1x_nand_host *)data;
- struct dma_chan *chan = host->dma_chan;
- struct device *dev = chan->device->dev;
- enum dma_status status;
-
- status = dmaengine_tx_status(chan, host->dma_cookie, NULL);
- if (likely(status == DMA_COMPLETE)) {
- dev_dbg(dev, "DMA complete with cookie=%d\n", host->dma_cookie);
- complete(&host->dma_complete);
- } else {
- dev_err(dev, "DMA error with cookie=%d\n", host->dma_cookie);
- }
-}
-
-static int ls1x_nand_dma_transfer(struct ls1x_nand_host *host, struct ls1x_nand_op *op)
-{
- struct nand_chip *chip = &host->chip;
- struct dma_chan *chan = host->dma_chan;
- struct device *dev = chan->device->dev;
- struct dma_async_tx_descriptor *desc;
- enum dma_data_direction data_dir = op->is_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
- enum dma_transfer_direction xfer_dir = op->is_write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
- void *buf = op->buf;
- char *dma_buf = NULL;
- dma_addr_t dma_addr;
- int ret;
-
- if (IS_ALIGNED((uintptr_t)buf, chip->buf_align) &&
- IS_ALIGNED(op->orig_len, chip->buf_align)) {
- dma_addr = dma_map_single(dev, buf, op->orig_len, data_dir);
- if (dma_mapping_error(dev, dma_addr)) {
- dev_err(dev, "failed to map DMA buffer\n");
- return -ENXIO;
- }
- } else if (!op->is_write) {
- dma_buf = dma_alloc_coherent(dev, op->len, &dma_addr, GFP_KERNEL);
- if (!dma_buf)
- return -ENOMEM;
- } else {
- dev_err(dev, "subpage writing not supported\n");
- return -EOPNOTSUPP;
- }
-
- desc = dmaengine_prep_slave_single(chan, dma_addr, op->len, xfer_dir, DMA_PREP_INTERRUPT);
- if (!desc) {
- dev_err(dev, "failed to prepare DMA descriptor\n");
- ret = -ENOMEM;
- goto err;
- }
- desc->callback = ls1x_nand_dma_callback;
- desc->callback_param = host;
-
- host->dma_cookie = dmaengine_submit(desc);
- ret = dma_submit_error(host->dma_cookie);
- if (ret) {
- dev_err(dev, "failed to submit DMA descriptor\n");
- goto err;
- }
-
- dev_dbg(dev, "issue DMA with cookie=%d\n", host->dma_cookie);
- dma_async_issue_pending(chan);
-
- if (!wait_for_completion_timeout(&host->dma_complete, msecs_to_jiffies(1000))) {
- dmaengine_terminate_sync(chan);
- reinit_completion(&host->dma_complete);
- ret = -ETIMEDOUT;
- goto err;
- }
-
- if (dma_buf)
- memcpy(buf, dma_buf + op->aligned_offset, op->orig_len);
-err:
- if (dma_buf)
- dma_free_coherent(dev, op->len, dma_buf, dma_addr);
- else
- dma_unmap_single(dev, dma_addr, op->orig_len, data_dir);
-
- return ret;
-}
-
-static int ls1x_nand_data_type_exec(struct nand_chip *chip, const struct nand_subop *subop)
-{
- struct ls1x_nand_host *host = nand_get_controller_data(chip);
- struct ls1x_nand_op op = {};
- int ret;
-
- ret = ls1x_nand_parse_instructions(chip, subop, &op);
- if (ret)
- return ret;
-
- ls1x_nand_trigger_op(host, &op);
-
- ret = ls1x_nand_dma_transfer(host, &op);
- if (ret)
- return ret;
-
- return ls1x_nand_wait_for_op_done(host, &op);
-}
-
-static int ls1x_nand_misc_type_exec(struct nand_chip *chip,
- const struct nand_subop *subop, struct ls1x_nand_op *op)
-{
- struct ls1x_nand_host *host = nand_get_controller_data(chip);
- int ret;
-
- ret = ls1x_nand_parse_instructions(chip, subop, op);
- if (ret)
- return ret;
-
- ls1x_nand_trigger_op(host, op);
-
- return ls1x_nand_wait_for_op_done(host, op);
-}
-
-static int ls1x_nand_zerolen_type_exec(struct nand_chip *chip, const struct nand_subop *subop)
-{
- struct ls1x_nand_op op = {};
-
- return ls1x_nand_misc_type_exec(chip, subop, &op);
-}
-
-static int ls1x_nand_read_id_type_exec(struct nand_chip *chip, const struct nand_subop *subop)
-{
- struct ls1x_nand_host *host = nand_get_controller_data(chip);
- struct ls1x_nand_op op = {};
- int i, ret;
- union {
- char ids[5];
- struct {
- int idl;
- char idh;
- };
- } nand_id;
-
- ret = ls1x_nand_misc_type_exec(chip, subop, &op);
- if (ret)
- return ret;
-
- nand_id.idl = readl(host->reg_base + LS1X_NAND_IDL);
- nand_id.idh = readb(host->reg_base + LS1X_NAND_IDH_STATUS);
-
- for (i = 0; i < min(sizeof(nand_id.ids), op.orig_len); i++)
- op.buf[i] = nand_id.ids[sizeof(nand_id.ids) - 1 - i];
-
- return ret;
-}
-
-static int ls1x_nand_read_status_type_exec(struct nand_chip *chip, const struct nand_subop *subop)
-{
- struct ls1x_nand_host *host = nand_get_controller_data(chip);
- struct ls1x_nand_op op = {};
- int val, ret;
-
- ret = ls1x_nand_misc_type_exec(chip, subop, &op);
- if (ret)
- return ret;
-
- val = readl(host->reg_base + LS1X_NAND_IDH_STATUS);
- val &= ~host->data->status_field;
- op.buf[0] = val << ffs(host->data->status_field);
-
- return ret;
-}
-
-static const struct nand_op_parser ls1x_nand_op_parser = NAND_OP_PARSER(
- NAND_OP_PARSER_PATTERN(
- ls1x_nand_read_id_type_exec,
- NAND_OP_PARSER_PAT_CMD_ELEM(false),
- NAND_OP_PARSER_PAT_ADDR_ELEM(false, LS1X_NAND_MAX_ADDR_CYC),
- NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 8)),
- NAND_OP_PARSER_PATTERN(
- ls1x_nand_read_status_type_exec,
- NAND_OP_PARSER_PAT_CMD_ELEM(false),
- NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 1)),
- NAND_OP_PARSER_PATTERN(
- ls1x_nand_zerolen_type_exec,
- NAND_OP_PARSER_PAT_CMD_ELEM(false),
- NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
- NAND_OP_PARSER_PATTERN(
- ls1x_nand_zerolen_type_exec,
- NAND_OP_PARSER_PAT_CMD_ELEM(false),
- NAND_OP_PARSER_PAT_ADDR_ELEM(false, LS1X_NAND_MAX_ADDR_CYC),
- NAND_OP_PARSER_PAT_CMD_ELEM(false),
- NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
- NAND_OP_PARSER_PATTERN(
- ls1x_nand_data_type_exec,
- NAND_OP_PARSER_PAT_CMD_ELEM(false),
- NAND_OP_PARSER_PAT_ADDR_ELEM(false, LS1X_NAND_MAX_ADDR_CYC),
- NAND_OP_PARSER_PAT_CMD_ELEM(false),
- NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
- NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 0)),
- NAND_OP_PARSER_PATTERN(
- ls1x_nand_data_type_exec,
- NAND_OP_PARSER_PAT_CMD_ELEM(false),
- NAND_OP_PARSER_PAT_ADDR_ELEM(false, LS1X_NAND_MAX_ADDR_CYC),
- NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, 0),
- NAND_OP_PARSER_PAT_CMD_ELEM(false),
- NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
- );
-
-static int ls1x_nand_is_valid_cmd(u8 opcode)
-{
- if (opcode == NAND_CMD_STATUS || opcode == NAND_CMD_RESET || opcode == NAND_CMD_READID)
- return 0;
-
- return -EOPNOTSUPP;
-}
-
-static int ls1x_nand_is_valid_cmd_seq(u8 opcode1, u8 opcode2)
-{
- if (opcode1 == NAND_CMD_RNDOUT && opcode2 == NAND_CMD_RNDOUTSTART)
- return 0;
-
- if (opcode1 == NAND_CMD_READ0 && opcode2 == NAND_CMD_READSTART)
- return 0;
-
- if (opcode1 == NAND_CMD_ERASE1 && opcode2 == NAND_CMD_ERASE2)
- return 0;
-
- if (opcode1 == NAND_CMD_SEQIN && opcode2 == NAND_CMD_PAGEPROG)
- return 0;
-
- return -EOPNOTSUPP;
-}
-
-static int ls1x_nand_check_op(struct nand_chip *chip, const struct nand_operation *op)
-{
- const struct nand_op_instr *instr1 = NULL, *instr2 = NULL;
- int op_id;
-
- for (op_id = 0; op_id < op->ninstrs; op_id++) {
- const struct nand_op_instr *instr = &op->instrs[op_id];
-
- if (instr->type == NAND_OP_CMD_INSTR) {
- if (!instr1)
- instr1 = instr;
- else if (!instr2)
- instr2 = instr;
- else
- break;
- }
- }
-
- if (!instr1)
- return -EOPNOTSUPP;
-
- if (!instr2)
- return ls1x_nand_is_valid_cmd(instr1->ctx.cmd.opcode);
-
- return ls1x_nand_is_valid_cmd_seq(instr1->ctx.cmd.opcode, instr2->ctx.cmd.opcode);
-}
-
-static int ls1x_nand_exec_op(struct nand_chip *chip,
- const struct nand_operation *op, bool check_only)
-{
- if (check_only)
- return ls1x_nand_check_op(chip, op);
-
- return nand_op_parser_exec_op(chip, &ls1x_nand_op_parser, op, check_only);
-}
-
-static int ls1x_nand_attach_chip(struct nand_chip *chip)
-{
- struct ls1x_nand_host *host = nand_get_controller_data(chip);
- u64 chipsize = nanddev_target_size(&chip->base);
- int cell_size = 0;
-
- switch (chipsize) {
- case SZ_128M:
- cell_size = 0x0;
- break;
- case SZ_256M:
- cell_size = 0x1;
- break;
- case SZ_512M:
- cell_size = 0x2;
- break;
- case SZ_1G:
- cell_size = 0x3;
- break;
- case SZ_2G:
- cell_size = 0x4;
- break;
- case SZ_4G:
- cell_size = 0x5;
- break;
- case SZ_8G:
- cell_size = 0x6;
- break;
- case SZ_16G:
- cell_size = 0x7;
- break;
- default:
- dev_err(host->dev, "unsupported chip size: %llu MB\n", chipsize);
- return -EINVAL;
- }
-
- switch (chip->ecc.engine_type) {
- case NAND_ECC_ENGINE_TYPE_NONE:
- break;
- case NAND_ECC_ENGINE_TYPE_SOFT:
- break;
- default:
- return -EINVAL;
- }
-
- /* set cell size */
- regmap_update_bits(host->regmap, LS1X_NAND_PARAM, LS1X_NAND_CELL_SIZE_MASK,
- FIELD_PREP(LS1X_NAND_CELL_SIZE_MASK, cell_size));
-
- regmap_update_bits(host->regmap, LS1X_NAND_TIMING, LS1X_NAND_HOLD_CYCLE_MASK,
- FIELD_PREP(LS1X_NAND_HOLD_CYCLE_MASK, host->data->hold_cycle));
-
- regmap_update_bits(host->regmap, LS1X_NAND_TIMING, LS1X_NAND_WAIT_CYCLE_MASK,
- FIELD_PREP(LS1X_NAND_WAIT_CYCLE_MASK, host->data->wait_cycle));
-
- chip->ecc.read_page_raw = nand_monolithic_read_page_raw;
- chip->ecc.write_page_raw = nand_monolithic_write_page_raw;
-
- return 0;
-}
-
-static const struct nand_controller_ops ls1x_nand_controller_ops = {
- .exec_op = ls1x_nand_exec_op,
- .attach_chip = ls1x_nand_attach_chip,
-};
-
-static void ls1x_nand_controller_cleanup(struct ls1x_nand_host *host)
-{
- if (host->dma_chan)
- dma_release_channel(host->dma_chan);
-}
-
-static int ls1x_nand_controller_init(struct ls1x_nand_host *host)
-{
- struct device *dev = host->dev;
- struct dma_chan *chan;
- struct dma_slave_config cfg = {};
- int ret;
-
- host->regmap = devm_regmap_init_mmio(dev, host->reg_base, &ls1x_nand_regmap_config);
- if (IS_ERR(host->regmap))
- return dev_err_probe(dev, PTR_ERR(host->regmap), "failed to init regmap\n");
-
- chan = dma_request_chan(dev, "rxtx");
- if (IS_ERR(chan))
- return dev_err_probe(dev, PTR_ERR(chan), "failed to request DMA channel\n");
- host->dma_chan = chan;
-
- cfg.src_addr = host->dma_base;
- cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- cfg.dst_addr = host->dma_base;
- cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- ret = dmaengine_slave_config(host->dma_chan, &cfg);
- if (ret)
- return dev_err_probe(dev, ret, "failed to config DMA channel\n");
-
- init_completion(&host->dma_complete);
-
- return 0;
-}
-
-static int ls1x_nand_chip_init(struct ls1x_nand_host *host)
-{
- struct device *dev = host->dev;
- int nchips = of_get_child_count(dev->of_node);
- struct device_node *chip_np;
- struct nand_chip *chip = &host->chip;
- struct mtd_info *mtd = nand_to_mtd(chip);
- int ret;
-
- if (nchips != 1)
- return dev_err_probe(dev, -EINVAL, "Currently one NAND chip supported\n");
-
- chip_np = of_get_next_child(dev->of_node, NULL);
- if (!chip_np)
- return dev_err_probe(dev, -ENODEV, "failed to get child node for NAND chip\n");
-
- nand_set_flash_node(chip, chip_np);
- of_node_put(chip_np);
- if (!mtd->name)
- return dev_err_probe(dev, -EINVAL, "Missing MTD label\n");
-
- nand_set_controller_data(chip, host);
- chip->controller = &host->controller;
- chip->options = NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA | NAND_BROKEN_XD;
- chip->buf_align = 16;
- mtd->dev.parent = dev;
- mtd->owner = THIS_MODULE;
-
- ret = nand_scan(chip, 1);
- if (ret)
- return dev_err_probe(dev, ret, "failed to scan NAND chip\n");
-
- ret = mtd_device_register(mtd, NULL, 0);
- if (ret) {
- nand_cleanup(chip);
- return dev_err_probe(dev, ret, "failed to register MTD device\n");
- }
-
- return 0;
-}
-
-static int ls1x_nand_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- const struct ls1x_nand_data *data;
- struct ls1x_nand_host *host;
- struct resource *res;
- int ret;
-
- data = of_device_get_match_data(dev);
- if (!data)
- return -ENODEV;
-
- host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
- if (!host)
- return -ENOMEM;
-
- host->reg_base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(host->reg_base))
- return PTR_ERR(host->reg_base);
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand-dma");
- if (!res)
- return dev_err_probe(dev, -EINVAL, "Missing 'nand-dma' in reg-names property\n");
-
- host->dma_base = dma_map_resource(dev, res->start, resource_size(res),
- DMA_BIDIRECTIONAL, 0);
- if (dma_mapping_error(dev, host->dma_base))
- return -ENXIO;
-
- host->dev = dev;
- host->data = data;
- host->controller.ops = &ls1x_nand_controller_ops;
-
- nand_controller_init(&host->controller);
-
- ret = ls1x_nand_controller_init(host);
- if (ret)
- goto err;
-
- ret = ls1x_nand_chip_init(host);
- if (ret)
- goto err;
-
- platform_set_drvdata(pdev, host);
-
- return 0;
-err:
- ls1x_nand_controller_cleanup(host);
-
- return ret;
-}
-
-static void ls1x_nand_remove(struct platform_device *pdev)
-{
- struct ls1x_nand_host *host = platform_get_drvdata(pdev);
- struct nand_chip *chip = &host->chip;
- int ret;
-
- ret = mtd_device_unregister(nand_to_mtd(chip));
- WARN_ON(ret);
- nand_cleanup(chip);
- ls1x_nand_controller_cleanup(host);
-}
-
-static const struct ls1x_nand_data ls1b_nand_data = {
- .status_field = GENMASK(15, 8),
- .hold_cycle = 0x2,
- .wait_cycle = 0xc,
- .set_addr = ls1b_nand_set_addr,
-};
-
-static const struct ls1x_nand_data ls1c_nand_data = {
- .status_field = GENMASK(23, 16),
- .op_scope_field = GENMASK(29, 16),
- .hold_cycle = 0x2,
- .wait_cycle = 0xc,
- .set_addr = ls1c_nand_set_addr,
-};
-
-static const struct of_device_id ls1x_nand_match[] = {
- {
- .compatible = "loongson,ls1b-nand-controller",
- .data = &ls1b_nand_data,
- },
- {
- .compatible = "loongson,ls1c-nand-controller",
- .data = &ls1c_nand_data,
- },
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, ls1x_nand_match);
-
-static struct platform_driver ls1x_nand_driver = {
- .probe = ls1x_nand_probe,
- .remove = ls1x_nand_remove,
- .driver = {
- .name = KBUILD_MODNAME,
- .of_match_table = ls1x_nand_match,
- },
-};
-
-module_platform_driver(ls1x_nand_driver);
-
-MODULE_AUTHOR("Keguang Zhang <keguang.zhang@gmail.com>");
-MODULE_DESCRIPTION("Loongson-1 NAND Controller Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index 13e4060bd1b6..c7d9501f646b 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -2784,137 +2784,6 @@ int nand_set_features(struct nand_chip *chip, int addr,
}
/**
- * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data
- * @buf: buffer to test
- * @len: buffer length
- * @bitflips_threshold: maximum number of bitflips
- *
- * Check if a buffer contains only 0xff, which means the underlying region
- * has been erased and is ready to be programmed.
- * The bitflips_threshold specify the maximum number of bitflips before
- * considering the region is not erased.
- * Note: The logic of this function has been extracted from the memweight
- * implementation, except that nand_check_erased_buf function exit before
- * testing the whole buffer if the number of bitflips exceed the
- * bitflips_threshold value.
- *
- * Returns a positive number of bitflips less than or equal to
- * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
- * threshold.
- */
-static int nand_check_erased_buf(void *buf, int len, int bitflips_threshold)
-{
- const unsigned char *bitmap = buf;
- int bitflips = 0;
- int weight;
-
- for (; len && ((uintptr_t)bitmap) % sizeof(long);
- len--, bitmap++) {
- weight = hweight8(*bitmap);
- bitflips += BITS_PER_BYTE - weight;
- if (unlikely(bitflips > bitflips_threshold))
- return -EBADMSG;
- }
-
- for (; len >= sizeof(long);
- len -= sizeof(long), bitmap += sizeof(long)) {
- unsigned long d = *((unsigned long *)bitmap);
- if (d == ~0UL)
- continue;
- weight = hweight_long(d);
- bitflips += BITS_PER_LONG - weight;
- if (unlikely(bitflips > bitflips_threshold))
- return -EBADMSG;
- }
-
- for (; len > 0; len--, bitmap++) {
- weight = hweight8(*bitmap);
- bitflips += BITS_PER_BYTE - weight;
- if (unlikely(bitflips > bitflips_threshold))
- return -EBADMSG;
- }
-
- return bitflips;
-}
-
-/**
- * nand_check_erased_ecc_chunk - check if an ECC chunk contains (almost) only
- * 0xff data
- * @data: data buffer to test
- * @datalen: data length
- * @ecc: ECC buffer
- * @ecclen: ECC length
- * @extraoob: extra OOB buffer
- * @extraooblen: extra OOB length
- * @bitflips_threshold: maximum number of bitflips
- *
- * Check if a data buffer and its associated ECC and OOB data contains only
- * 0xff pattern, which means the underlying region has been erased and is
- * ready to be programmed.
- * The bitflips_threshold specify the maximum number of bitflips before
- * considering the region as not erased.
- *
- * Note:
- * 1/ ECC algorithms are working on pre-defined block sizes which are usually
- * different from the NAND page size. When fixing bitflips, ECC engines will
- * report the number of errors per chunk, and the NAND core infrastructure
- * expect you to return the maximum number of bitflips for the whole page.
- * This is why you should always use this function on a single chunk and
- * not on the whole page. After checking each chunk you should update your
- * max_bitflips value accordingly.
- * 2/ When checking for bitflips in erased pages you should not only check
- * the payload data but also their associated ECC data, because a user might
- * have programmed almost all bits to 1 but a few. In this case, we
- * shouldn't consider the chunk as erased, and checking ECC bytes prevent
- * this case.
- * 3/ The extraoob argument is optional, and should be used if some of your OOB
- * data are protected by the ECC engine.
- * It could also be used if you support subpages and want to attach some
- * extra OOB data to an ECC chunk.
- *
- * Returns a positive number of bitflips less than or equal to
- * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
- * threshold. In case of success, the passed buffers are filled with 0xff.
- */
-int nand_check_erased_ecc_chunk(void *data, int datalen,
- void *ecc, int ecclen,
- void *extraoob, int extraooblen,
- int bitflips_threshold)
-{
- int data_bitflips = 0, ecc_bitflips = 0, extraoob_bitflips = 0;
-
- data_bitflips = nand_check_erased_buf(data, datalen,
- bitflips_threshold);
- if (data_bitflips < 0)
- return data_bitflips;
-
- bitflips_threshold -= data_bitflips;
-
- ecc_bitflips = nand_check_erased_buf(ecc, ecclen, bitflips_threshold);
- if (ecc_bitflips < 0)
- return ecc_bitflips;
-
- bitflips_threshold -= ecc_bitflips;
-
- extraoob_bitflips = nand_check_erased_buf(extraoob, extraooblen,
- bitflips_threshold);
- if (extraoob_bitflips < 0)
- return extraoob_bitflips;
-
- if (data_bitflips)
- memset(data, 0xff, datalen);
-
- if (ecc_bitflips)
- memset(ecc, 0xff, ecclen);
-
- if (extraoob_bitflips)
- memset(extraoob, 0xff, extraooblen);
-
- return data_bitflips + ecc_bitflips + extraoob_bitflips;
-}
-EXPORT_SYMBOL(nand_check_erased_ecc_chunk);
-
-/**
* nand_read_page_raw_notsupp - dummy read raw page function
* @chip: nand chip info structure
* @buf: buffer to store read data
diff --git a/drivers/mtd/nand/raw/nandsim.c b/drivers/mtd/nand/raw/nandsim.c
index df48b7d01d16..84942e7e528f 100644
--- a/drivers/mtd/nand/raw/nandsim.c
+++ b/drivers/mtd/nand/raw/nandsim.c
@@ -552,9 +552,8 @@ static int __init ns_alloc_device(struct nandsim *ns)
err = -EINVAL;
goto err_close_filp;
}
- ns->pages_written =
- vzalloc(array_size(sizeof(unsigned long),
- BITS_TO_LONGS(ns->geom.pgnum)));
+ ns->pages_written = vcalloc(BITS_TO_LONGS(ns->geom.pgnum),
+ sizeof(unsigned long));
if (!ns->pages_written) {
NS_ERR("alloc_device: unable to allocate pages written array\n");
err = -ENOMEM;
@@ -578,7 +577,7 @@ err_close_filp:
return err;
}
- ns->pages = vmalloc(array_size(sizeof(union ns_mem), ns->geom.pgnum));
+ ns->pages = vmalloc_array(ns->geom.pgnum, sizeof(union ns_mem));
if (!ns->pages) {
NS_ERR("alloc_device: unable to allocate page array\n");
return -ENOMEM;
diff --git a/drivers/mtd/nand/raw/nuvoton-ma35d1-nand-controller.c b/drivers/mtd/nand/raw/nuvoton-ma35d1-nand-controller.c
index c23b537948d5..1a285cd8fad6 100644
--- a/drivers/mtd/nand/raw/nuvoton-ma35d1-nand-controller.c
+++ b/drivers/mtd/nand/raw/nuvoton-ma35d1-nand-controller.c
@@ -935,10 +935,10 @@ static void ma35_chips_cleanup(struct ma35_nand_info *nand)
static int ma35_nand_chips_init(struct device *dev, struct ma35_nand_info *nand)
{
- struct device_node *np = dev->of_node, *nand_np;
+ struct device_node *np = dev->of_node;
int ret;
- for_each_child_of_node(np, nand_np) {
+ for_each_child_of_node_scoped(np, nand_np) {
ret = ma35_nand_chip_init(dev, nand, nand_np);
if (ret) {
ma35_chips_cleanup(nand);
diff --git a/drivers/mtd/nand/raw/omap2.c b/drivers/mtd/nand/raw/omap2.c
index b8af3a3533fc..39e297486721 100644
--- a/drivers/mtd/nand/raw/omap2.c
+++ b/drivers/mtd/nand/raw/omap2.c
@@ -1979,7 +1979,7 @@ static int omap_nand_attach_chip(struct nand_chip *chip)
err = rawnand_sw_bch_init(chip);
if (err) {
dev_err(dev, "Unable to use BCH library\n");
- return err;
+ goto err_put_elm_dev;
}
break;
@@ -2016,7 +2016,7 @@ static int omap_nand_attach_chip(struct nand_chip *chip)
err = rawnand_sw_bch_init(chip);
if (err) {
dev_err(dev, "unable to use BCH library\n");
- return err;
+ goto err_put_elm_dev;
}
break;
@@ -2054,7 +2054,8 @@ static int omap_nand_attach_chip(struct nand_chip *chip)
break;
default:
dev_err(dev, "Invalid or unsupported ECC scheme\n");
- return -EINVAL;
+ err = -EINVAL;
+ goto err_put_elm_dev;
}
if (elm_bch_strength >= 0) {
@@ -2073,7 +2074,7 @@ static int omap_nand_attach_chip(struct nand_chip *chip)
info->nsteps_per_eccpg, chip->ecc.size,
chip->ecc.bytes);
if (err < 0)
- return err;
+ goto err_put_elm_dev;
}
/* Check if NAND device's OOB is enough to store ECC signatures */
@@ -2083,10 +2084,24 @@ static int omap_nand_attach_chip(struct nand_chip *chip)
dev_err(dev,
"Not enough OOB bytes: required = %d, available=%d\n",
min_oobbytes, mtd->oobsize);
- return -EINVAL;
+ err = -EINVAL;
+ goto err_put_elm_dev;
}
return 0;
+
+err_put_elm_dev:
+ put_device(info->elm_dev);
+
+ return err;
+}
+
+static void omap_nand_detach_chip(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct omap_nand_info *info = mtd_to_omap(mtd);
+
+ put_device(info->elm_dev);
}
static void omap_nand_data_in(struct nand_chip *chip, void *buf,
@@ -2187,6 +2202,7 @@ static int omap_nand_exec_op(struct nand_chip *chip,
static const struct nand_controller_ops omap_nand_controller_ops = {
.attach_chip = omap_nand_attach_chip,
+ .detach_chip = omap_nand_detach_chip,
.exec_op = omap_nand_exec_op,
};
@@ -2316,6 +2332,5 @@ static struct platform_driver omap_nand_driver = {
module_platform_driver(omap_nand_driver);
-MODULE_ALIAS("platform:" DRIVER_NAME);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Glue layer for NAND flash on TI OMAP boards");
diff --git a/drivers/mtd/nand/raw/pl35x-nand-controller.c b/drivers/mtd/nand/raw/pl35x-nand-controller.c
index 09440ed4652e..11bd90e3f18c 100644
--- a/drivers/mtd/nand/raw/pl35x-nand-controller.c
+++ b/drivers/mtd/nand/raw/pl35x-nand-controller.c
@@ -1137,7 +1137,7 @@ static int pl35x_nand_probe(struct platform_device *pdev)
struct device *smc_dev = pdev->dev.parent;
struct amba_device *smc_amba = to_amba_device(smc_dev);
struct pl35x_nandc *nfc;
- u32 ret;
+ int ret;
nfc = devm_kzalloc(&pdev->dev, sizeof(*nfc), GFP_KERNEL);
if (!nfc)
@@ -1193,6 +1193,5 @@ static struct platform_driver pl35x_nandc_driver = {
module_platform_driver(pl35x_nandc_driver);
MODULE_AUTHOR("Xilinx, Inc.");
-MODULE_ALIAS("platform:" PL35X_NANDC_DRIVER_NAME);
MODULE_DESCRIPTION("ARM PL35X NAND controller driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/raw/rockchip-nand-controller.c b/drivers/mtd/nand/raw/rockchip-nand-controller.c
index c5d7cd8a6cab..9444ba02696d 100644
--- a/drivers/mtd/nand/raw/rockchip-nand-controller.c
+++ b/drivers/mtd/nand/raw/rockchip-nand-controller.c
@@ -1505,4 +1505,3 @@ module_platform_driver(rk_nfc_driver);
MODULE_LICENSE("Dual MIT/GPL");
MODULE_AUTHOR("Yifeng Zhao <yifeng.zhao@rock-chips.com>");
MODULE_DESCRIPTION("Rockchip Nand Flash Controller Driver");
-MODULE_ALIAS("platform:rockchip-nand-controller");
diff --git a/drivers/mtd/nand/raw/s3c2410.c b/drivers/mtd/nand/raw/s3c2410.c
deleted file mode 100644
index 229f2e87d56e..000000000000
--- a/drivers/mtd/nand/raw/s3c2410.c
+++ /dev/null
@@ -1,1230 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright © 2004-2008 Simtec Electronics
- * http://armlinux.simtec.co.uk/
- * Ben Dooks <ben@simtec.co.uk>
- *
- * Samsung S3C2410/S3C2440/S3C2412 NAND driver
-*/
-
-#define pr_fmt(fmt) "nand-s3c2410: " fmt
-
-#ifdef CONFIG_MTD_NAND_S3C2410_DEBUG
-#define DEBUG
-#endif
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/io.h>
-#include <linux/ioport.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/slab.h>
-#include <linux/clk.h>
-#include <linux/cpufreq.h>
-#include <linux/of.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/partitions.h>
-
-#include <linux/platform_data/mtd-nand-s3c2410.h>
-
-#define S3C2410_NFREG(x) (x)
-
-#define S3C2410_NFCONF S3C2410_NFREG(0x00)
-#define S3C2410_NFCMD S3C2410_NFREG(0x04)
-#define S3C2410_NFADDR S3C2410_NFREG(0x08)
-#define S3C2410_NFDATA S3C2410_NFREG(0x0C)
-#define S3C2410_NFSTAT S3C2410_NFREG(0x10)
-#define S3C2410_NFECC S3C2410_NFREG(0x14)
-#define S3C2440_NFCONT S3C2410_NFREG(0x04)
-#define S3C2440_NFCMD S3C2410_NFREG(0x08)
-#define S3C2440_NFADDR S3C2410_NFREG(0x0C)
-#define S3C2440_NFDATA S3C2410_NFREG(0x10)
-#define S3C2440_NFSTAT S3C2410_NFREG(0x20)
-#define S3C2440_NFMECC0 S3C2410_NFREG(0x2C)
-#define S3C2412_NFSTAT S3C2410_NFREG(0x28)
-#define S3C2412_NFMECC0 S3C2410_NFREG(0x34)
-#define S3C2410_NFCONF_EN (1<<15)
-#define S3C2410_NFCONF_INITECC (1<<12)
-#define S3C2410_NFCONF_nFCE (1<<11)
-#define S3C2410_NFCONF_TACLS(x) ((x)<<8)
-#define S3C2410_NFCONF_TWRPH0(x) ((x)<<4)
-#define S3C2410_NFCONF_TWRPH1(x) ((x)<<0)
-#define S3C2410_NFSTAT_BUSY (1<<0)
-#define S3C2440_NFCONF_TACLS(x) ((x)<<12)
-#define S3C2440_NFCONF_TWRPH0(x) ((x)<<8)
-#define S3C2440_NFCONF_TWRPH1(x) ((x)<<4)
-#define S3C2440_NFCONT_INITECC (1<<4)
-#define S3C2440_NFCONT_nFCE (1<<1)
-#define S3C2440_NFCONT_ENABLE (1<<0)
-#define S3C2440_NFSTAT_READY (1<<0)
-#define S3C2412_NFCONF_NANDBOOT (1<<31)
-#define S3C2412_NFCONT_INIT_MAIN_ECC (1<<5)
-#define S3C2412_NFCONT_nFCE0 (1<<1)
-#define S3C2412_NFSTAT_READY (1<<0)
-
-/* new oob placement block for use with hardware ecc generation
- */
-static int s3c2410_ooblayout_ecc(struct mtd_info *mtd, int section,
- struct mtd_oob_region *oobregion)
-{
- if (section)
- return -ERANGE;
-
- oobregion->offset = 0;
- oobregion->length = 3;
-
- return 0;
-}
-
-static int s3c2410_ooblayout_free(struct mtd_info *mtd, int section,
- struct mtd_oob_region *oobregion)
-{
- if (section)
- return -ERANGE;
-
- oobregion->offset = 8;
- oobregion->length = 8;
-
- return 0;
-}
-
-static const struct mtd_ooblayout_ops s3c2410_ooblayout_ops = {
- .ecc = s3c2410_ooblayout_ecc,
- .free = s3c2410_ooblayout_free,
-};
-
-/* controller and mtd information */
-
-struct s3c2410_nand_info;
-
-/**
- * struct s3c2410_nand_mtd - driver MTD structure
- * @chip: The NAND chip information.
- * @set: The platform information supplied for this set of NAND chips.
- * @info: Link back to the hardware information.
-*/
-struct s3c2410_nand_mtd {
- struct nand_chip chip;
- struct s3c2410_nand_set *set;
- struct s3c2410_nand_info *info;
-};
-
-enum s3c_cpu_type {
- TYPE_S3C2410,
- TYPE_S3C2412,
- TYPE_S3C2440,
-};
-
-enum s3c_nand_clk_state {
- CLOCK_DISABLE = 0,
- CLOCK_ENABLE,
- CLOCK_SUSPEND,
-};
-
-/* overview of the s3c2410 nand state */
-
-/**
- * struct s3c2410_nand_info - NAND controller state.
- * @controller: Base controller structure.
- * @mtds: An array of MTD instances on this controller.
- * @platform: The platform data for this board.
- * @device: The platform device we bound to.
- * @clk: The clock resource for this controller.
- * @regs: The area mapped for the hardware registers.
- * @sel_reg: Pointer to the register controlling the NAND selection.
- * @sel_bit: The bit in @sel_reg to select the NAND chip.
- * @mtd_count: The number of MTDs created from this controller.
- * @save_sel: The contents of @sel_reg to be saved over suspend.
- * @clk_rate: The clock rate from @clk.
- * @clk_state: The current clock state.
- * @cpu_type: The exact type of this controller.
- */
-struct s3c2410_nand_info {
- /* mtd info */
- struct nand_controller controller;
- struct s3c2410_nand_mtd *mtds;
- struct s3c2410_platform_nand *platform;
-
- /* device info */
- struct device *device;
- struct clk *clk;
- void __iomem *regs;
- void __iomem *sel_reg;
- int sel_bit;
- int mtd_count;
- unsigned long save_sel;
- unsigned long clk_rate;
- enum s3c_nand_clk_state clk_state;
-
- enum s3c_cpu_type cpu_type;
-};
-
-struct s3c24XX_nand_devtype_data {
- enum s3c_cpu_type type;
-};
-
-static const struct s3c24XX_nand_devtype_data s3c2410_nand_devtype_data = {
- .type = TYPE_S3C2410,
-};
-
-static const struct s3c24XX_nand_devtype_data s3c2412_nand_devtype_data = {
- .type = TYPE_S3C2412,
-};
-
-static const struct s3c24XX_nand_devtype_data s3c2440_nand_devtype_data = {
- .type = TYPE_S3C2440,
-};
-
-/* conversion functions */
-
-static struct s3c2410_nand_mtd *s3c2410_nand_mtd_toours(struct mtd_info *mtd)
-{
- return container_of(mtd_to_nand(mtd), struct s3c2410_nand_mtd,
- chip);
-}
-
-static struct s3c2410_nand_info *s3c2410_nand_mtd_toinfo(struct mtd_info *mtd)
-{
- return s3c2410_nand_mtd_toours(mtd)->info;
-}
-
-static struct s3c2410_nand_info *to_nand_info(struct platform_device *dev)
-{
- return platform_get_drvdata(dev);
-}
-
-static struct s3c2410_platform_nand *to_nand_plat(struct platform_device *dev)
-{
- return dev_get_platdata(&dev->dev);
-}
-
-static inline int allow_clk_suspend(struct s3c2410_nand_info *info)
-{
-#ifdef CONFIG_MTD_NAND_S3C2410_CLKSTOP
- return 1;
-#else
- return 0;
-#endif
-}
-
-/**
- * s3c2410_nand_clk_set_state - Enable, disable or suspend NAND clock.
- * @info: The controller instance.
- * @new_state: State to which clock should be set.
- */
-static void s3c2410_nand_clk_set_state(struct s3c2410_nand_info *info,
- enum s3c_nand_clk_state new_state)
-{
- if (!allow_clk_suspend(info) && new_state == CLOCK_SUSPEND)
- return;
-
- if (info->clk_state == CLOCK_ENABLE) {
- if (new_state != CLOCK_ENABLE)
- clk_disable_unprepare(info->clk);
- } else {
- if (new_state == CLOCK_ENABLE)
- clk_prepare_enable(info->clk);
- }
-
- info->clk_state = new_state;
-}
-
-/* timing calculations */
-
-#define NS_IN_KHZ 1000000
-
-/**
- * s3c_nand_calc_rate - calculate timing data.
- * @wanted: The cycle time in nanoseconds.
- * @clk: The clock rate in kHz.
- * @max: The maximum divider value.
- *
- * Calculate the timing value from the given parameters.
- */
-static int s3c_nand_calc_rate(int wanted, unsigned long clk, int max)
-{
- int result;
-
- result = DIV_ROUND_UP((wanted * clk), NS_IN_KHZ);
-
- pr_debug("result %d from %ld, %d\n", result, clk, wanted);
-
- if (result > max) {
- pr_err("%d ns is too big for current clock rate %ld\n",
- wanted, clk);
- return -1;
- }
-
- if (result < 1)
- result = 1;
-
- return result;
-}
-
-#define to_ns(ticks, clk) (((ticks) * NS_IN_KHZ) / (unsigned int)(clk))
-
-/* controller setup */
-
-/**
- * s3c2410_nand_setrate - setup controller timing information.
- * @info: The controller instance.
- *
- * Given the information supplied by the platform, calculate and set
- * the necessary timing registers in the hardware to generate the
- * necessary timing cycles to the hardware.
- */
-static int s3c2410_nand_setrate(struct s3c2410_nand_info *info)
-{
- struct s3c2410_platform_nand *plat = info->platform;
- int tacls_max = (info->cpu_type == TYPE_S3C2412) ? 8 : 4;
- int tacls, twrph0, twrph1;
- unsigned long clkrate = clk_get_rate(info->clk);
- unsigned long set, cfg, mask;
- unsigned long flags;
-
- /* calculate the timing information for the controller */
-
- info->clk_rate = clkrate;
- clkrate /= 1000; /* turn clock into kHz for ease of use */
-
- if (plat != NULL) {
- tacls = s3c_nand_calc_rate(plat->tacls, clkrate, tacls_max);
- twrph0 = s3c_nand_calc_rate(plat->twrph0, clkrate, 8);
- twrph1 = s3c_nand_calc_rate(plat->twrph1, clkrate, 8);
- } else {
- /* default timings */
- tacls = tacls_max;
- twrph0 = 8;
- twrph1 = 8;
- }
-
- if (tacls < 0 || twrph0 < 0 || twrph1 < 0) {
- dev_err(info->device, "cannot get suitable timings\n");
- return -EINVAL;
- }
-
- dev_info(info->device, "Tacls=%d, %dns Twrph0=%d %dns, Twrph1=%d %dns\n",
- tacls, to_ns(tacls, clkrate), twrph0, to_ns(twrph0, clkrate),
- twrph1, to_ns(twrph1, clkrate));
-
- switch (info->cpu_type) {
- case TYPE_S3C2410:
- mask = (S3C2410_NFCONF_TACLS(3) |
- S3C2410_NFCONF_TWRPH0(7) |
- S3C2410_NFCONF_TWRPH1(7));
- set = S3C2410_NFCONF_EN;
- set |= S3C2410_NFCONF_TACLS(tacls - 1);
- set |= S3C2410_NFCONF_TWRPH0(twrph0 - 1);
- set |= S3C2410_NFCONF_TWRPH1(twrph1 - 1);
- break;
-
- case TYPE_S3C2440:
- case TYPE_S3C2412:
- mask = (S3C2440_NFCONF_TACLS(tacls_max - 1) |
- S3C2440_NFCONF_TWRPH0(7) |
- S3C2440_NFCONF_TWRPH1(7));
-
- set = S3C2440_NFCONF_TACLS(tacls - 1);
- set |= S3C2440_NFCONF_TWRPH0(twrph0 - 1);
- set |= S3C2440_NFCONF_TWRPH1(twrph1 - 1);
- break;
-
- default:
- BUG();
- }
-
- local_irq_save(flags);
-
- cfg = readl(info->regs + S3C2410_NFCONF);
- cfg &= ~mask;
- cfg |= set;
- writel(cfg, info->regs + S3C2410_NFCONF);
-
- local_irq_restore(flags);
-
- dev_dbg(info->device, "NF_CONF is 0x%lx\n", cfg);
-
- return 0;
-}
-
-/**
- * s3c2410_nand_inithw - basic hardware initialisation
- * @info: The hardware state.
- *
- * Do the basic initialisation of the hardware, using s3c2410_nand_setrate()
- * to setup the hardware access speeds and set the controller to be enabled.
-*/
-static int s3c2410_nand_inithw(struct s3c2410_nand_info *info)
-{
- int ret;
-
- ret = s3c2410_nand_setrate(info);
- if (ret < 0)
- return ret;
-
- switch (info->cpu_type) {
- case TYPE_S3C2410:
- default:
- break;
-
- case TYPE_S3C2440:
- case TYPE_S3C2412:
- /* enable the controller and de-assert nFCE */
-
- writel(S3C2440_NFCONT_ENABLE, info->regs + S3C2440_NFCONT);
- }
-
- return 0;
-}
-
-/**
- * s3c2410_nand_select_chip - select the given nand chip
- * @this: NAND chip object.
- * @chip: The chip number.
- *
- * This is called by the MTD layer to either select a given chip for the
- * @mtd instance, or to indicate that the access has finished and the
- * chip can be de-selected.
- *
- * The routine ensures that the nFCE line is correctly setup, and any
- * platform specific selection code is called to route nFCE to the specific
- * chip.
- */
-static void s3c2410_nand_select_chip(struct nand_chip *this, int chip)
-{
- struct s3c2410_nand_info *info;
- struct s3c2410_nand_mtd *nmtd;
- unsigned long cur;
-
- nmtd = nand_get_controller_data(this);
- info = nmtd->info;
-
- if (chip != -1)
- s3c2410_nand_clk_set_state(info, CLOCK_ENABLE);
-
- cur = readl(info->sel_reg);
-
- if (chip == -1) {
- cur |= info->sel_bit;
- } else {
- if (nmtd->set != NULL && chip > nmtd->set->nr_chips) {
- dev_err(info->device, "invalid chip %d\n", chip);
- return;
- }
-
- if (info->platform != NULL) {
- if (info->platform->select_chip != NULL)
- (info->platform->select_chip) (nmtd->set, chip);
- }
-
- cur &= ~info->sel_bit;
- }
-
- writel(cur, info->sel_reg);
-
- if (chip == -1)
- s3c2410_nand_clk_set_state(info, CLOCK_SUSPEND);
-}
-
-/* s3c2410_nand_hwcontrol
- *
- * Issue command and address cycles to the chip
-*/
-
-static void s3c2410_nand_hwcontrol(struct nand_chip *chip, int cmd,
- unsigned int ctrl)
-{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
-
- if (cmd == NAND_CMD_NONE)
- return;
-
- if (ctrl & NAND_CLE)
- writeb(cmd, info->regs + S3C2410_NFCMD);
- else
- writeb(cmd, info->regs + S3C2410_NFADDR);
-}
-
-/* command and control functions */
-
-static void s3c2440_nand_hwcontrol(struct nand_chip *chip, int cmd,
- unsigned int ctrl)
-{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
-
- if (cmd == NAND_CMD_NONE)
- return;
-
- if (ctrl & NAND_CLE)
- writeb(cmd, info->regs + S3C2440_NFCMD);
- else
- writeb(cmd, info->regs + S3C2440_NFADDR);
-}
-
-/* s3c2410_nand_devready()
- *
- * returns 0 if the nand is busy, 1 if it is ready
-*/
-
-static int s3c2410_nand_devready(struct nand_chip *chip)
-{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
- return readb(info->regs + S3C2410_NFSTAT) & S3C2410_NFSTAT_BUSY;
-}
-
-static int s3c2440_nand_devready(struct nand_chip *chip)
-{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
- return readb(info->regs + S3C2440_NFSTAT) & S3C2440_NFSTAT_READY;
-}
-
-static int s3c2412_nand_devready(struct nand_chip *chip)
-{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
- return readb(info->regs + S3C2412_NFSTAT) & S3C2412_NFSTAT_READY;
-}
-
-/* ECC handling functions */
-
-static int s3c2410_nand_correct_data(struct nand_chip *chip, u_char *dat,
- u_char *read_ecc, u_char *calc_ecc)
-{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
- unsigned int diff0, diff1, diff2;
- unsigned int bit, byte;
-
- pr_debug("%s(%p,%p,%p,%p)\n", __func__, mtd, dat, read_ecc, calc_ecc);
-
- diff0 = read_ecc[0] ^ calc_ecc[0];
- diff1 = read_ecc[1] ^ calc_ecc[1];
- diff2 = read_ecc[2] ^ calc_ecc[2];
-
- pr_debug("%s: rd %*phN calc %*phN diff %02x%02x%02x\n",
- __func__, 3, read_ecc, 3, calc_ecc,
- diff0, diff1, diff2);
-
- if (diff0 == 0 && diff1 == 0 && diff2 == 0)
- return 0; /* ECC is ok */
-
- /* sometimes people do not think about using the ECC, so check
- * to see if we have an 0xff,0xff,0xff read ECC and then ignore
- * the error, on the assumption that this is an un-eccd page.
- */
- if (read_ecc[0] == 0xff && read_ecc[1] == 0xff && read_ecc[2] == 0xff
- && info->platform->ignore_unset_ecc)
- return 0;
-
- /* Can we correct this ECC (ie, one row and column change).
- * Note, this is similar to the 256 error code on smartmedia */
-
- if (((diff0 ^ (diff0 >> 1)) & 0x55) == 0x55 &&
- ((diff1 ^ (diff1 >> 1)) & 0x55) == 0x55 &&
- ((diff2 ^ (diff2 >> 1)) & 0x55) == 0x55) {
- /* calculate the bit position of the error */
-
- bit = ((diff2 >> 3) & 1) |
- ((diff2 >> 4) & 2) |
- ((diff2 >> 5) & 4);
-
- /* calculate the byte position of the error */
-
- byte = ((diff2 << 7) & 0x100) |
- ((diff1 << 0) & 0x80) |
- ((diff1 << 1) & 0x40) |
- ((diff1 << 2) & 0x20) |
- ((diff1 << 3) & 0x10) |
- ((diff0 >> 4) & 0x08) |
- ((diff0 >> 3) & 0x04) |
- ((diff0 >> 2) & 0x02) |
- ((diff0 >> 1) & 0x01);
-
- dev_dbg(info->device, "correcting error bit %d, byte %d\n",
- bit, byte);
-
- dat[byte] ^= (1 << bit);
- return 1;
- }
-
- /* if there is only one bit difference in the ECC, then
- * one of only a row or column parity has changed, which
- * means the error is most probably in the ECC itself */
-
- diff0 |= (diff1 << 8);
- diff0 |= (diff2 << 16);
-
- /* equal to "(diff0 & ~(1 << __ffs(diff0)))" */
- if ((diff0 & (diff0 - 1)) == 0)
- return 1;
-
- return -1;
-}
-
-/* ECC functions
- *
- * These allow the s3c2410 and s3c2440 to use the controller's ECC
- * generator block to ECC the data as it passes through]
-*/
-
-static void s3c2410_nand_enable_hwecc(struct nand_chip *chip, int mode)
-{
- struct s3c2410_nand_info *info;
- unsigned long ctrl;
-
- info = s3c2410_nand_mtd_toinfo(nand_to_mtd(chip));
- ctrl = readl(info->regs + S3C2410_NFCONF);
- ctrl |= S3C2410_NFCONF_INITECC;
- writel(ctrl, info->regs + S3C2410_NFCONF);
-}
-
-static void s3c2412_nand_enable_hwecc(struct nand_chip *chip, int mode)
-{
- struct s3c2410_nand_info *info;
- unsigned long ctrl;
-
- info = s3c2410_nand_mtd_toinfo(nand_to_mtd(chip));
- ctrl = readl(info->regs + S3C2440_NFCONT);
- writel(ctrl | S3C2412_NFCONT_INIT_MAIN_ECC,
- info->regs + S3C2440_NFCONT);
-}
-
-static void s3c2440_nand_enable_hwecc(struct nand_chip *chip, int mode)
-{
- struct s3c2410_nand_info *info;
- unsigned long ctrl;
-
- info = s3c2410_nand_mtd_toinfo(nand_to_mtd(chip));
- ctrl = readl(info->regs + S3C2440_NFCONT);
- writel(ctrl | S3C2440_NFCONT_INITECC, info->regs + S3C2440_NFCONT);
-}
-
-static int s3c2410_nand_calculate_ecc(struct nand_chip *chip,
- const u_char *dat, u_char *ecc_code)
-{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
-
- ecc_code[0] = readb(info->regs + S3C2410_NFECC + 0);
- ecc_code[1] = readb(info->regs + S3C2410_NFECC + 1);
- ecc_code[2] = readb(info->regs + S3C2410_NFECC + 2);
-
- pr_debug("%s: returning ecc %*phN\n", __func__, 3, ecc_code);
-
- return 0;
-}
-
-static int s3c2412_nand_calculate_ecc(struct nand_chip *chip,
- const u_char *dat, u_char *ecc_code)
-{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
- unsigned long ecc = readl(info->regs + S3C2412_NFMECC0);
-
- ecc_code[0] = ecc;
- ecc_code[1] = ecc >> 8;
- ecc_code[2] = ecc >> 16;
-
- pr_debug("%s: returning ecc %*phN\n", __func__, 3, ecc_code);
-
- return 0;
-}
-
-static int s3c2440_nand_calculate_ecc(struct nand_chip *chip,
- const u_char *dat, u_char *ecc_code)
-{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
- unsigned long ecc = readl(info->regs + S3C2440_NFMECC0);
-
- ecc_code[0] = ecc;
- ecc_code[1] = ecc >> 8;
- ecc_code[2] = ecc >> 16;
-
- pr_debug("%s: returning ecc %06lx\n", __func__, ecc & 0xffffff);
-
- return 0;
-}
-
-/* over-ride the standard functions for a little more speed. We can
- * use read/write block to move the data buffers to/from the controller
-*/
-
-static void s3c2410_nand_read_buf(struct nand_chip *this, u_char *buf, int len)
-{
- readsb(this->legacy.IO_ADDR_R, buf, len);
-}
-
-static void s3c2440_nand_read_buf(struct nand_chip *this, u_char *buf, int len)
-{
- struct mtd_info *mtd = nand_to_mtd(this);
- struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
-
- readsl(info->regs + S3C2440_NFDATA, buf, len >> 2);
-
- /* cleanup if we've got less than a word to do */
- if (len & 3) {
- buf += len & ~3;
-
- for (; len & 3; len--)
- *buf++ = readb(info->regs + S3C2440_NFDATA);
- }
-}
-
-static void s3c2410_nand_write_buf(struct nand_chip *this, const u_char *buf,
- int len)
-{
- writesb(this->legacy.IO_ADDR_W, buf, len);
-}
-
-static void s3c2440_nand_write_buf(struct nand_chip *this, const u_char *buf,
- int len)
-{
- struct mtd_info *mtd = nand_to_mtd(this);
- struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
-
- writesl(info->regs + S3C2440_NFDATA, buf, len >> 2);
-
- /* cleanup any fractional write */
- if (len & 3) {
- buf += len & ~3;
-
- for (; len & 3; len--, buf++)
- writeb(*buf, info->regs + S3C2440_NFDATA);
- }
-}
-
-/* device management functions */
-
-static void s3c24xx_nand_remove(struct platform_device *pdev)
-{
- struct s3c2410_nand_info *info = to_nand_info(pdev);
-
- if (info == NULL)
- return;
-
- /* Release all our mtds and their partitions, then go through
- * freeing the resources used
- */
-
- if (info->mtds != NULL) {
- struct s3c2410_nand_mtd *ptr = info->mtds;
- int mtdno;
-
- for (mtdno = 0; mtdno < info->mtd_count; mtdno++, ptr++) {
- pr_debug("releasing mtd %d (%p)\n", mtdno, ptr);
- WARN_ON(mtd_device_unregister(nand_to_mtd(&ptr->chip)));
- nand_cleanup(&ptr->chip);
- }
- }
-
- /* free the common resources */
-
- if (!IS_ERR(info->clk))
- s3c2410_nand_clk_set_state(info, CLOCK_DISABLE);
-}
-
-static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
- struct s3c2410_nand_mtd *mtd,
- struct s3c2410_nand_set *set)
-{
- if (set) {
- struct mtd_info *mtdinfo = nand_to_mtd(&mtd->chip);
-
- mtdinfo->name = set->name;
-
- return mtd_device_register(mtdinfo, set->partitions,
- set->nr_partitions);
- }
-
- return -ENODEV;
-}
-
-static int s3c2410_nand_setup_interface(struct nand_chip *chip, int csline,
- const struct nand_interface_config *conf)
-{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
- struct s3c2410_platform_nand *pdata = info->platform;
- const struct nand_sdr_timings *timings;
- int tacls;
-
- timings = nand_get_sdr_timings(conf);
- if (IS_ERR(timings))
- return -ENOTSUPP;
-
- tacls = timings->tCLS_min - timings->tWP_min;
- if (tacls < 0)
- tacls = 0;
-
- pdata->tacls = DIV_ROUND_UP(tacls, 1000);
- pdata->twrph0 = DIV_ROUND_UP(timings->tWP_min, 1000);
- pdata->twrph1 = DIV_ROUND_UP(timings->tCLH_min, 1000);
-
- return s3c2410_nand_setrate(info);
-}
-
-/**
- * s3c2410_nand_init_chip - initialise a single instance of an chip
- * @info: The base NAND controller the chip is on.
- * @nmtd: The new controller MTD instance to fill in.
- * @set: The information passed from the board specific platform data.
- *
- * Initialise the given @nmtd from the information in @info and @set. This
- * readies the structure for use with the MTD layer functions by ensuring
- * all pointers are setup and the necessary control routines selected.
- */
-static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
- struct s3c2410_nand_mtd *nmtd,
- struct s3c2410_nand_set *set)
-{
- struct device_node *np = info->device->of_node;
- struct nand_chip *chip = &nmtd->chip;
- void __iomem *regs = info->regs;
-
- nand_set_flash_node(chip, set->of_node);
-
- chip->legacy.write_buf = s3c2410_nand_write_buf;
- chip->legacy.read_buf = s3c2410_nand_read_buf;
- chip->legacy.select_chip = s3c2410_nand_select_chip;
- chip->legacy.chip_delay = 50;
- nand_set_controller_data(chip, nmtd);
- chip->options = set->options;
- chip->controller = &info->controller;
-
- /*
- * let's keep behavior unchanged for legacy boards booting via pdata and
- * auto-detect timings only when booting with a device tree.
- */
- if (!np)
- chip->options |= NAND_KEEP_TIMINGS;
-
- switch (info->cpu_type) {
- case TYPE_S3C2410:
- chip->legacy.IO_ADDR_W = regs + S3C2410_NFDATA;
- info->sel_reg = regs + S3C2410_NFCONF;
- info->sel_bit = S3C2410_NFCONF_nFCE;
- chip->legacy.cmd_ctrl = s3c2410_nand_hwcontrol;
- chip->legacy.dev_ready = s3c2410_nand_devready;
- break;
-
- case TYPE_S3C2440:
- chip->legacy.IO_ADDR_W = regs + S3C2440_NFDATA;
- info->sel_reg = regs + S3C2440_NFCONT;
- info->sel_bit = S3C2440_NFCONT_nFCE;
- chip->legacy.cmd_ctrl = s3c2440_nand_hwcontrol;
- chip->legacy.dev_ready = s3c2440_nand_devready;
- chip->legacy.read_buf = s3c2440_nand_read_buf;
- chip->legacy.write_buf = s3c2440_nand_write_buf;
- break;
-
- case TYPE_S3C2412:
- chip->legacy.IO_ADDR_W = regs + S3C2440_NFDATA;
- info->sel_reg = regs + S3C2440_NFCONT;
- info->sel_bit = S3C2412_NFCONT_nFCE0;
- chip->legacy.cmd_ctrl = s3c2440_nand_hwcontrol;
- chip->legacy.dev_ready = s3c2412_nand_devready;
-
- if (readl(regs + S3C2410_NFCONF) & S3C2412_NFCONF_NANDBOOT)
- dev_info(info->device, "System booted from NAND\n");
-
- break;
- }
-
- chip->legacy.IO_ADDR_R = chip->legacy.IO_ADDR_W;
-
- nmtd->info = info;
- nmtd->set = set;
-
- chip->ecc.engine_type = info->platform->engine_type;
-
- /*
- * If you use u-boot BBT creation code, specifying this flag will
- * let the kernel fish out the BBT from the NAND.
- */
- if (set->flash_bbt)
- chip->bbt_options |= NAND_BBT_USE_FLASH;
-}
-
-/**
- * s3c2410_nand_attach_chip - Init the ECC engine after NAND scan
- * @chip: The NAND chip
- *
- * This hook is called by the core after the identification of the NAND chip,
- * once the relevant per-chip information is up to date.. This call ensure that
- * we update the internal state accordingly.
- *
- * The internal state is currently limited to the ECC state information.
-*/
-static int s3c2410_nand_attach_chip(struct nand_chip *chip)
-{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
-
- switch (chip->ecc.engine_type) {
-
- case NAND_ECC_ENGINE_TYPE_NONE:
- dev_info(info->device, "ECC disabled\n");
- break;
-
- case NAND_ECC_ENGINE_TYPE_SOFT:
- /*
- * This driver expects Hamming based ECC when engine_type is set
- * to NAND_ECC_ENGINE_TYPE_SOFT. Force ecc.algo to
- * NAND_ECC_ALGO_HAMMING to avoid adding an extra ecc_algo field
- * to s3c2410_platform_nand.
- */
- chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
- dev_info(info->device, "soft ECC\n");
- break;
-
- case NAND_ECC_ENGINE_TYPE_ON_HOST:
- chip->ecc.calculate = s3c2410_nand_calculate_ecc;
- chip->ecc.correct = s3c2410_nand_correct_data;
- chip->ecc.strength = 1;
-
- switch (info->cpu_type) {
- case TYPE_S3C2410:
- chip->ecc.hwctl = s3c2410_nand_enable_hwecc;
- chip->ecc.calculate = s3c2410_nand_calculate_ecc;
- break;
-
- case TYPE_S3C2412:
- chip->ecc.hwctl = s3c2412_nand_enable_hwecc;
- chip->ecc.calculate = s3c2412_nand_calculate_ecc;
- break;
-
- case TYPE_S3C2440:
- chip->ecc.hwctl = s3c2440_nand_enable_hwecc;
- chip->ecc.calculate = s3c2440_nand_calculate_ecc;
- break;
- }
-
- dev_dbg(info->device, "chip %p => page shift %d\n",
- chip, chip->page_shift);
-
- /* change the behaviour depending on whether we are using
- * the large or small page nand device */
- if (chip->page_shift > 10) {
- chip->ecc.size = 256;
- chip->ecc.bytes = 3;
- } else {
- chip->ecc.size = 512;
- chip->ecc.bytes = 3;
- mtd_set_ooblayout(nand_to_mtd(chip),
- &s3c2410_ooblayout_ops);
- }
-
- dev_info(info->device, "hardware ECC\n");
- break;
-
- default:
- dev_err(info->device, "invalid ECC mode!\n");
- return -EINVAL;
- }
-
- if (chip->bbt_options & NAND_BBT_USE_FLASH)
- chip->options |= NAND_SKIP_BBTSCAN;
-
- return 0;
-}
-
-static const struct nand_controller_ops s3c24xx_nand_controller_ops = {
- .attach_chip = s3c2410_nand_attach_chip,
- .setup_interface = s3c2410_nand_setup_interface,
-};
-
-static const struct of_device_id s3c24xx_nand_dt_ids[] = {
- {
- .compatible = "samsung,s3c2410-nand",
- .data = &s3c2410_nand_devtype_data,
- }, {
- /* also compatible with s3c6400 */
- .compatible = "samsung,s3c2412-nand",
- .data = &s3c2412_nand_devtype_data,
- }, {
- .compatible = "samsung,s3c2440-nand",
- .data = &s3c2440_nand_devtype_data,
- },
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, s3c24xx_nand_dt_ids);
-
-static int s3c24xx_nand_probe_dt(struct platform_device *pdev)
-{
- const struct s3c24XX_nand_devtype_data *devtype_data;
- struct s3c2410_platform_nand *pdata;
- struct s3c2410_nand_info *info = platform_get_drvdata(pdev);
- struct device_node *np = pdev->dev.of_node, *child;
- struct s3c2410_nand_set *sets;
-
- devtype_data = of_device_get_match_data(&pdev->dev);
- if (!devtype_data)
- return -ENODEV;
-
- info->cpu_type = devtype_data->type;
-
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return -ENOMEM;
-
- pdev->dev.platform_data = pdata;
-
- pdata->nr_sets = of_get_child_count(np);
- if (!pdata->nr_sets)
- return 0;
-
- sets = devm_kcalloc(&pdev->dev, pdata->nr_sets, sizeof(*sets),
- GFP_KERNEL);
- if (!sets)
- return -ENOMEM;
-
- pdata->sets = sets;
-
- for_each_available_child_of_node(np, child) {
- sets->name = (char *)child->name;
- sets->of_node = child;
- sets->nr_chips = 1;
-
- of_node_get(child);
-
- sets++;
- }
-
- return 0;
-}
-
-static int s3c24xx_nand_probe_pdata(struct platform_device *pdev)
-{
- struct s3c2410_nand_info *info = platform_get_drvdata(pdev);
-
- info->cpu_type = platform_get_device_id(pdev)->driver_data;
-
- return 0;
-}
-
-/* s3c24xx_nand_probe
- *
- * called by device layer when it finds a device matching
- * one our driver can handled. This code checks to see if
- * it can allocate all necessary resources then calls the
- * nand layer to look for devices
-*/
-static int s3c24xx_nand_probe(struct platform_device *pdev)
-{
- struct s3c2410_platform_nand *plat;
- struct s3c2410_nand_info *info;
- struct s3c2410_nand_mtd *nmtd;
- struct s3c2410_nand_set *sets;
- struct resource *res;
- int err = 0;
- int size;
- int nr_sets;
- int setno;
-
- info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
- if (info == NULL) {
- err = -ENOMEM;
- goto exit_error;
- }
-
- platform_set_drvdata(pdev, info);
-
- nand_controller_init(&info->controller);
- info->controller.ops = &s3c24xx_nand_controller_ops;
-
- /* get the clock source and enable it */
-
- info->clk = devm_clk_get(&pdev->dev, "nand");
- if (IS_ERR(info->clk)) {
- dev_err(&pdev->dev, "failed to get clock\n");
- err = -ENOENT;
- goto exit_error;
- }
-
- s3c2410_nand_clk_set_state(info, CLOCK_ENABLE);
-
- if (pdev->dev.of_node)
- err = s3c24xx_nand_probe_dt(pdev);
- else
- err = s3c24xx_nand_probe_pdata(pdev);
-
- if (err)
- goto exit_error;
-
- plat = to_nand_plat(pdev);
-
- /* allocate and map the resource */
-
- /* currently we assume we have the one resource */
- res = pdev->resource;
- size = resource_size(res);
-
- info->device = &pdev->dev;
- info->platform = plat;
-
- info->regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(info->regs)) {
- err = PTR_ERR(info->regs);
- goto exit_error;
- }
-
- dev_dbg(&pdev->dev, "mapped registers at %p\n", info->regs);
-
- if (!plat->sets || plat->nr_sets < 1) {
- err = -EINVAL;
- goto exit_error;
- }
-
- sets = plat->sets;
- nr_sets = plat->nr_sets;
-
- info->mtd_count = nr_sets;
-
- /* allocate our information */
-
- size = nr_sets * sizeof(*info->mtds);
- info->mtds = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
- if (info->mtds == NULL) {
- err = -ENOMEM;
- goto exit_error;
- }
-
- /* initialise all possible chips */
-
- nmtd = info->mtds;
-
- for (setno = 0; setno < nr_sets; setno++, nmtd++, sets++) {
- struct mtd_info *mtd = nand_to_mtd(&nmtd->chip);
-
- pr_debug("initialising set %d (%p, info %p)\n",
- setno, nmtd, info);
-
- mtd->dev.parent = &pdev->dev;
- s3c2410_nand_init_chip(info, nmtd, sets);
-
- err = nand_scan(&nmtd->chip, sets ? sets->nr_chips : 1);
- if (err)
- goto exit_error;
-
- s3c2410_nand_add_partition(info, nmtd, sets);
- }
-
- /* initialise the hardware */
- err = s3c2410_nand_inithw(info);
- if (err != 0)
- goto exit_error;
-
- if (allow_clk_suspend(info)) {
- dev_info(&pdev->dev, "clock idle support enabled\n");
- s3c2410_nand_clk_set_state(info, CLOCK_SUSPEND);
- }
-
- return 0;
-
- exit_error:
- s3c24xx_nand_remove(pdev);
-
- if (err == 0)
- err = -EINVAL;
- return err;
-}
-
-/* PM Support */
-#ifdef CONFIG_PM
-
-static int s3c24xx_nand_suspend(struct platform_device *dev, pm_message_t pm)
-{
- struct s3c2410_nand_info *info = platform_get_drvdata(dev);
-
- if (info) {
- info->save_sel = readl(info->sel_reg);
-
- /* For the moment, we must ensure nFCE is high during
- * the time we are suspended. This really should be
- * handled by suspending the MTDs we are using, but
- * that is currently not the case. */
-
- writel(info->save_sel | info->sel_bit, info->sel_reg);
-
- s3c2410_nand_clk_set_state(info, CLOCK_DISABLE);
- }
-
- return 0;
-}
-
-static int s3c24xx_nand_resume(struct platform_device *dev)
-{
- struct s3c2410_nand_info *info = platform_get_drvdata(dev);
- unsigned long sel;
-
- if (info) {
- s3c2410_nand_clk_set_state(info, CLOCK_ENABLE);
- s3c2410_nand_inithw(info);
-
- /* Restore the state of the nFCE line. */
-
- sel = readl(info->sel_reg);
- sel &= ~info->sel_bit;
- sel |= info->save_sel & info->sel_bit;
- writel(sel, info->sel_reg);
-
- s3c2410_nand_clk_set_state(info, CLOCK_SUSPEND);
- }
-
- return 0;
-}
-
-#else
-#define s3c24xx_nand_suspend NULL
-#define s3c24xx_nand_resume NULL
-#endif
-
-/* driver device registration */
-
-static const struct platform_device_id s3c24xx_driver_ids[] = {
- {
- .name = "s3c2410-nand",
- .driver_data = TYPE_S3C2410,
- }, {
- .name = "s3c2440-nand",
- .driver_data = TYPE_S3C2440,
- }, {
- .name = "s3c2412-nand",
- .driver_data = TYPE_S3C2412,
- }, {
- .name = "s3c6400-nand",
- .driver_data = TYPE_S3C2412, /* compatible with 2412 */
- },
- { }
-};
-
-MODULE_DEVICE_TABLE(platform, s3c24xx_driver_ids);
-
-static struct platform_driver s3c24xx_nand_driver = {
- .probe = s3c24xx_nand_probe,
- .remove = s3c24xx_nand_remove,
- .suspend = s3c24xx_nand_suspend,
- .resume = s3c24xx_nand_resume,
- .id_table = s3c24xx_driver_ids,
- .driver = {
- .name = "s3c24xx-nand",
- .of_match_table = s3c24xx_nand_dt_ids,
- },
-};
-
-module_platform_driver(s3c24xx_nand_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
-MODULE_DESCRIPTION("S3C24XX MTD NAND driver");
diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
index a960403081f1..c08d6b176372 100644
--- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c
+++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
@@ -272,6 +272,7 @@ struct stm32_fmc2_nfc {
struct sg_table dma_data_sg;
struct sg_table dma_ecc_sg;
u8 *ecc_buf;
+ dma_addr_t dma_ecc_addr;
int dma_ecc_len;
u32 tx_dma_max_burst;
u32 rx_dma_max_burst;
@@ -902,17 +903,10 @@ static int stm32_fmc2_nfc_xfer(struct nand_chip *chip, const u8 *buf,
if (!write_data && !raw) {
/* Configure DMA ECC status */
- p = nfc->ecc_buf;
for_each_sg(nfc->dma_ecc_sg.sgl, sg, eccsteps, s) {
- sg_set_buf(sg, p, nfc->dma_ecc_len);
- p += nfc->dma_ecc_len;
- }
-
- ret = dma_map_sg(nfc->dev, nfc->dma_ecc_sg.sgl,
- eccsteps, dma_data_dir);
- if (!ret) {
- ret = -EIO;
- goto err_unmap_data;
+ sg_dma_address(sg) = nfc->dma_ecc_addr +
+ s * nfc->dma_ecc_len;
+ sg_dma_len(sg) = nfc->dma_ecc_len;
}
desc_ecc = dmaengine_prep_slave_sg(nfc->dma_ecc_ch,
@@ -921,7 +915,7 @@ static int stm32_fmc2_nfc_xfer(struct nand_chip *chip, const u8 *buf,
DMA_PREP_INTERRUPT);
if (!desc_ecc) {
ret = -ENOMEM;
- goto err_unmap_ecc;
+ goto err_unmap_data;
}
reinit_completion(&nfc->dma_ecc_complete);
@@ -929,7 +923,7 @@ static int stm32_fmc2_nfc_xfer(struct nand_chip *chip, const u8 *buf,
desc_ecc->callback_param = &nfc->dma_ecc_complete;
ret = dma_submit_error(dmaengine_submit(desc_ecc));
if (ret)
- goto err_unmap_ecc;
+ goto err_unmap_data;
dma_async_issue_pending(nfc->dma_ecc_ch);
}
@@ -949,7 +943,7 @@ static int stm32_fmc2_nfc_xfer(struct nand_chip *chip, const u8 *buf,
if (!write_data && !raw)
dmaengine_terminate_all(nfc->dma_ecc_ch);
ret = -ETIMEDOUT;
- goto err_unmap_ecc;
+ goto err_unmap_data;
}
/* Wait DMA data transfer completion */
@@ -969,11 +963,6 @@ static int stm32_fmc2_nfc_xfer(struct nand_chip *chip, const u8 *buf,
}
}
-err_unmap_ecc:
- if (!write_data && !raw)
- dma_unmap_sg(nfc->dev, nfc->dma_ecc_sg.sgl,
- eccsteps, dma_data_dir);
-
err_unmap_data:
dma_unmap_sg(nfc->dev, nfc->dma_data_sg.sgl, eccsteps, dma_data_dir);
@@ -996,9 +985,21 @@ static int stm32_fmc2_nfc_seq_write(struct nand_chip *chip, const u8 *buf,
/* Write oob */
if (oob_required) {
- ret = nand_change_write_column_op(chip, mtd->writesize,
- chip->oob_poi, mtd->oobsize,
- false);
+ unsigned int offset_in_page = mtd->writesize;
+ const void *buf = chip->oob_poi;
+ unsigned int len = mtd->oobsize;
+
+ if (!raw) {
+ struct mtd_oob_region oob_free;
+
+ mtd_ooblayout_free(mtd, 0, &oob_free);
+ offset_in_page += oob_free.offset;
+ buf += oob_free.offset;
+ len = oob_free.length;
+ }
+
+ ret = nand_change_write_column_op(chip, offset_in_page,
+ buf, len, false);
if (ret)
return ret;
}
@@ -1610,7 +1611,8 @@ static int stm32_fmc2_nfc_dma_setup(struct stm32_fmc2_nfc *nfc)
return ret;
/* Allocate a buffer to store ECC status registers */
- nfc->ecc_buf = devm_kzalloc(nfc->dev, FMC2_MAX_ECC_BUF_LEN, GFP_KERNEL);
+ nfc->ecc_buf = dmam_alloc_coherent(nfc->dev, FMC2_MAX_ECC_BUF_LEN,
+ &nfc->dma_ecc_addr, GFP_KERNEL);
if (!nfc->ecc_buf)
return -ENOMEM;
@@ -2156,7 +2158,6 @@ static struct platform_driver stm32_fmc2_nfc_driver = {
};
module_platform_driver(stm32_fmc2_nfc_driver);
-MODULE_ALIAS("platform:stm32_fmc2_nfc");
MODULE_AUTHOR("Christophe Kerello <christophe.kerello@st.com>");
MODULE_DESCRIPTION("STMicroelectronics STM32 FMC2 NFC driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c
index 162cd5f4f234..f6a8e8ae819d 100644
--- a/drivers/mtd/nand/raw/sunxi_nand.c
+++ b/drivers/mtd/nand/raw/sunxi_nand.c
@@ -2205,4 +2205,3 @@ module_platform_driver(sunxi_nfc_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Boris BREZILLON");
MODULE_DESCRIPTION("Allwinner NAND Flash Controller driver");
-MODULE_ALIAS("platform:sunxi_nand");
diff --git a/drivers/mtd/nand/spi/Makefile b/drivers/mtd/nand/spi/Makefile
index 258da42451a4..6d3d203df048 100644
--- a/drivers/mtd/nand/spi/Makefile
+++ b/drivers/mtd/nand/spi/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
spinand-objs := core.o otp.o
-spinand-objs += alliancememory.o ato.o esmt.o foresee.o gigadevice.o macronix.o
+spinand-objs += alliancememory.o ato.o esmt.o fmsh.o foresee.o gigadevice.o macronix.o
spinand-objs += micron.o paragon.o skyhigh.o toshiba.o winbond.o xtx.o
obj-$(CONFIG_MTD_SPI_NAND) += spinand.o
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index b0898990b2a5..f92133b8e1a6 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -430,8 +430,16 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand,
* Dirmap accesses are allowed to toggle the CS.
* Toggling the CS during a continuous read is forbidden.
*/
- if (nbytes && req->continuous)
- return -EIO;
+ if (nbytes && req->continuous) {
+ /*
+ * Spi controller with broken support of continuous
+ * reading was detected. Disable future use of
+ * continuous reading and return -EAGAIN to retry
+ * reading within regular mode.
+ */
+ spinand->cont_read_possible = false;
+ return -EAGAIN;
+ }
}
if (req->datalen)
@@ -899,10 +907,19 @@ static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
old_stats = mtd->ecc_stats;
- if (spinand_use_cont_read(mtd, from, ops))
+ if (spinand_use_cont_read(mtd, from, ops)) {
ret = spinand_mtd_continuous_page_read(mtd, from, ops, &max_bitflips);
- else
+ if (ret == -EAGAIN && !spinand->cont_read_possible) {
+ /*
+ * Spi controller with broken support of continuous
+ * reading was detected (see spinand_read_from_cache_op()),
+ * repeat reading in regular mode.
+ */
+ ret = spinand_mtd_regular_page_read(mtd, from, ops, &max_bitflips);
+ }
+ } else {
ret = spinand_mtd_regular_page_read(mtd, from, ops, &max_bitflips);
+ }
if (ops->stats) {
ops->stats->uncorrectable_errors +=
@@ -1093,22 +1110,50 @@ static int spinand_mtd_block_isreserved(struct mtd_info *mtd, loff_t offs)
return ret;
}
+static struct spi_mem_dirmap_desc *spinand_create_rdesc(
+ struct spinand_device *spinand,
+ struct spi_mem_dirmap_info *info)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ struct spi_mem_dirmap_desc *desc = NULL;
+
+ if (spinand->cont_read_possible) {
+ /*
+ * spi controller may return an error if info->length is
+ * too large
+ */
+ info->length = nanddev_eraseblock_size(nand);
+ desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
+ spinand->spimem, info);
+ }
+
+ if (IS_ERR_OR_NULL(desc)) {
+ /*
+ * continuous reading is not supported by flash or
+ * its spi controller, use regular reading
+ */
+ spinand->cont_read_possible = false;
+
+ info->length = nanddev_page_size(nand) +
+ nanddev_per_page_oobsize(nand);
+ desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
+ spinand->spimem, info);
+ }
+
+ return desc;
+}
+
static int spinand_create_dirmap(struct spinand_device *spinand,
unsigned int plane)
{
struct nand_device *nand = spinand_to_nand(spinand);
- struct spi_mem_dirmap_info info = {
- .length = nanddev_page_size(nand) +
- nanddev_per_page_oobsize(nand),
- };
+ struct spi_mem_dirmap_info info = { 0 };
struct spi_mem_dirmap_desc *desc;
- if (spinand->cont_read_possible)
- info.length = nanddev_eraseblock_size(nand);
-
/* The plane number is passed in MSB just above the column address */
info.offset = plane << fls(nand->memorg.pagesize);
+ info.length = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
info.op_tmpl = *spinand->op_templates.update_cache;
desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
spinand->spimem, &info);
@@ -1118,8 +1163,7 @@ static int spinand_create_dirmap(struct spinand_device *spinand,
spinand->dirmaps[plane].wdesc = desc;
info.op_tmpl = *spinand->op_templates.read_cache;
- desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
- spinand->spimem, &info);
+ desc = spinand_create_rdesc(spinand, &info);
if (IS_ERR(desc))
return PTR_ERR(desc);
@@ -1132,6 +1176,7 @@ static int spinand_create_dirmap(struct spinand_device *spinand,
return 0;
}
+ info.length = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
info.op_tmpl = *spinand->op_templates.update_cache;
info.op_tmpl.data.ecc = true;
desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
@@ -1143,8 +1188,7 @@ static int spinand_create_dirmap(struct spinand_device *spinand,
info.op_tmpl = *spinand->op_templates.read_cache;
info.op_tmpl.data.ecc = true;
- desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
- spinand->spimem, &info);
+ desc = spinand_create_rdesc(spinand, &info);
if (IS_ERR(desc))
return PTR_ERR(desc);
@@ -1184,6 +1228,7 @@ static const struct spinand_manufacturer *spinand_manufacturers[] = {
&alliancememory_spinand_manufacturer,
&ato_spinand_manufacturer,
&esmt_c8_spinand_manufacturer,
+ &fmsh_spinand_manufacturer,
&foresee_spinand_manufacturer,
&gigadevice_spinand_manufacturer,
&macronix_spinand_manufacturer,
diff --git a/drivers/mtd/nand/spi/fmsh.c b/drivers/mtd/nand/spi/fmsh.c
new file mode 100644
index 000000000000..8b2097bfc771
--- /dev/null
+++ b/drivers/mtd/nand/spi/fmsh.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020-2021 Rockchip Electronics Co., Ltd.
+ *
+ * Author: Dingqiang Lin <jon.lin@rock-chips.com>
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mtd/spinand.h>
+
+#define SPINAND_MFR_FMSH 0xA1
+
+static SPINAND_OP_VARIANTS(read_cache_variants,
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 0));
+
+static SPINAND_OP_VARIANTS(write_cache_variants,
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(update_cache_variants,
+ SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
+
+static int fm25s01a_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ return -ERANGE;
+}
+
+static int fm25s01a_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section)
+ return -ERANGE;
+
+ region->offset = 2;
+ region->length = 62;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops fm25s01a_ooblayout = {
+ .ecc = fm25s01a_ooblayout_ecc,
+ .free = fm25s01a_ooblayout_free,
+};
+
+static const struct spinand_info fmsh_spinand_table[] = {
+ SPINAND_INFO("FM25S01A",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xE4),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(1, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&fm25s01a_ooblayout, NULL)),
+};
+
+static const struct spinand_manufacturer_ops fmsh_spinand_manuf_ops = {
+};
+
+const struct spinand_manufacturer fmsh_spinand_manufacturer = {
+ .id = SPINAND_MFR_FMSH,
+ .name = "Fudan Micro",
+ .chips = fmsh_spinand_table,
+ .nchips = ARRAY_SIZE(fmsh_spinand_table),
+ .ops = &fmsh_spinand_manuf_ops,
+};
diff --git a/drivers/mtd/nand/spi/gigadevice.c b/drivers/mtd/nand/spi/gigadevice.c
index 93e40431dbe2..72ad36c9a126 100644
--- a/drivers/mtd/nand/spi/gigadevice.c
+++ b/drivers/mtd/nand/spi/gigadevice.c
@@ -4,6 +4,7 @@
* Chuanhong Guo <gch981213@gmail.com>
*/
+#include <linux/bitfield.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/mtd/spinand.h>
@@ -23,6 +24,18 @@
#define GD5FXGQ4UXFXXG_STATUS_ECC_1_3_BITFLIPS (1 << 4)
#define GD5FXGQ4UXFXXG_STATUS_ECC_UNCOR_ERROR (7 << 4)
+/* Feature bit definitions */
+#define GD_FEATURE_NR BIT(3) /* Normal Read(1=normal,0=continuous) */
+#define GD_FEATURE_CRDC BIT(2) /* Continuous Read Dummy */
+
+/* ECC status extraction helpers */
+#define GD_ECCSR_LAST_PAGE(eccsr) FIELD_GET(GENMASK(3, 0), eccsr)
+#define GD_ECCSR_ACCUMULATED(eccsr) FIELD_GET(GENMASK(7, 4), eccsr)
+
+struct gigadevice_priv {
+ bool continuous_read;
+};
+
static SPINAND_OP_VARIANTS(read_cache_variants,
SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 1, NULL, 0, 0),
SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0, 0),
@@ -63,6 +76,74 @@ static SPINAND_OP_VARIANTS(update_cache_variants,
SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0),
SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0));
+static int gd5fxgm9_get_eccsr(struct spinand_device *spinand, u8 *eccsr)
+{
+ struct gigadevice_priv *priv = spinand->priv;
+ struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(0x7c, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_DUMMY(1, 1),
+ SPI_MEM_OP_DATA_IN(1, eccsr, 1));
+ int ret;
+
+ ret = spi_mem_exec_op(spinand->spimem, &op);
+ if (ret)
+ return ret;
+
+ if (priv->continuous_read)
+ *eccsr = GD_ECCSR_ACCUMULATED(*eccsr);
+ else
+ *eccsr = GD_ECCSR_LAST_PAGE(*eccsr);
+
+ return 0;
+}
+
+static int gd5fxgm9_ecc_get_status(struct spinand_device *spinand, u8 status)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ u8 eccsr;
+ int ret;
+
+ switch (status & STATUS_ECC_MASK) {
+ case STATUS_ECC_NO_BITFLIPS:
+ return 0;
+
+ case GD5FXGQ4XA_STATUS_ECC_1_7_BITFLIPS:
+ ret = gd5fxgm9_get_eccsr(spinand, spinand->scratchbuf);
+ if (ret)
+ return nanddev_get_ecc_conf(nand)->strength;
+
+ eccsr = *spinand->scratchbuf;
+ if (WARN_ON(!eccsr || eccsr > nanddev_get_ecc_conf(nand)->strength))
+ return nanddev_get_ecc_conf(nand)->strength;
+
+ return eccsr;
+
+ case GD5FXGQ4XA_STATUS_ECC_8_BITFLIPS:
+ return 8;
+
+ case STATUS_ECC_UNCOR_ERROR:
+ return -EBADMSG;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int gd5fxgm9_set_continuous_read(struct spinand_device *spinand, bool enable)
+{
+ struct gigadevice_priv *priv = spinand->priv;
+ int ret;
+
+ ret = spinand_upd_cfg(spinand, GD_FEATURE_NR,
+ enable ? 0 : GD_FEATURE_NR);
+ if (ret)
+ return ret;
+
+ priv->continuous_read = enable;
+
+ return 0;
+}
+
static int gd5fxgq4xa_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
{
@@ -542,7 +623,8 @@ static const struct spinand_info gigadevice_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
- gd5fxgq4uexxg_ecc_get_status)),
+ gd5fxgm9_ecc_get_status),
+ SPINAND_CONT_READ(gd5fxgm9_set_continuous_read)),
SPINAND_INFO("GD5F1GM9RExxG",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x81, 0x01),
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
@@ -552,10 +634,31 @@ static const struct spinand_info gigadevice_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
- gd5fxgq4uexxg_ecc_get_status)),
+ gd5fxgm9_ecc_get_status),
+ SPINAND_CONT_READ(gd5fxgm9_set_continuous_read)),
};
+static int gd5fxgm9_spinand_init(struct spinand_device *spinand)
+{
+ struct gigadevice_priv *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ spinand->priv = priv;
+
+ return 0;
+}
+
+static void gd5fxgm9_spinand_cleanup(struct spinand_device *spinand)
+{
+ kfree(spinand->priv);
+}
+
static const struct spinand_manufacturer_ops gigadevice_spinand_manuf_ops = {
+ .init = gd5fxgm9_spinand_init,
+ .cleanup = gd5fxgm9_spinand_cleanup,
};
const struct spinand_manufacturer gigadevice_spinand_manufacturer = {
diff --git a/drivers/mtd/nand/spi/winbond.c b/drivers/mtd/nand/spi/winbond.c
index 87053389a1fc..4870b2d5edb2 100644
--- a/drivers/mtd/nand/spi/winbond.c
+++ b/drivers/mtd/nand/spi/winbond.c
@@ -176,6 +176,36 @@ static const struct mtd_ooblayout_ops w25n02kv_ooblayout = {
.free = w25n02kv_ooblayout_free,
};
+static int w25n01jw_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section > 3)
+ return -ERANGE;
+
+ region->offset = (16 * section) + 12;
+ region->length = 4;
+
+ return 0;
+}
+
+static int w25n01jw_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section > 3)
+ return -ERANGE;
+
+ region->offset = (16 * section);
+ region->length = 12;
+
+ /* Extract BBM */
+ if (!section) {
+ region->offset += 2;
+ region->length -= 2;
+ }
+
+ return 0;
+}
+
static int w35n01jw_ooblayout_ecc(struct mtd_info *mtd, int section,
struct mtd_oob_region *region)
{
@@ -206,6 +236,11 @@ static int w35n01jw_ooblayout_free(struct mtd_info *mtd, int section,
return 0;
}
+static const struct mtd_ooblayout_ops w25n01jw_ooblayout = {
+ .ecc = w25n01jw_ooblayout_ecc,
+ .free = w25n01jw_ooblayout_free,
+};
+
static const struct mtd_ooblayout_ops w35n01jw_ooblayout = {
.ecc = w35n01jw_ooblayout_ecc,
.free = w35n01jw_ooblayout_free,
@@ -394,7 +429,7 @@ static const struct spinand_info winbond_spinand_table[] = {
&write_cache_variants,
&update_cache_variants),
0,
- SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL),
+ SPINAND_ECCINFO(&w25n01jw_ooblayout, NULL),
SPINAND_CONFIGURE_CHIP(w25n0xjw_hs_cfg)),
SPINAND_INFO("W25N01KV", /* 3.3V */
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xae, 0x21),
diff --git a/drivers/mtd/rfd_ftl.c b/drivers/mtd/rfd_ftl.c
index c546f8c5f24d..be26cc67a1c4 100644
--- a/drivers/mtd/rfd_ftl.c
+++ b/drivers/mtd/rfd_ftl.c
@@ -190,8 +190,8 @@ static int scan_header(struct partition *part)
if (!part->blocks)
goto err;
- part->sector_map = vmalloc(array_size(sizeof(u_long),
- part->sector_count));
+ part->sector_map = vmalloc_array(part->sector_count,
+ sizeof(u_long));
if (!part->sector_map)
goto err;
diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
index ac4b960101cc..20ea80450f22 100644
--- a/drivers/mtd/spi-nor/core.c
+++ b/drivers/mtd/spi-nor/core.c
@@ -2014,6 +2014,76 @@ static const struct flash_info *spi_nor_detect(struct spi_nor *nor)
return info;
}
+/*
+ * On Octal DTR capable flashes, reads cannot start or end at an odd
+ * address in Octal DTR mode. Extra bytes need to be read at the start
+ * or end to make sure both the start address and length remain even.
+ */
+static int spi_nor_octal_dtr_read(struct spi_nor *nor, loff_t from, size_t len,
+ u_char *buf)
+{
+ u_char *tmp_buf;
+ size_t tmp_len;
+ loff_t start, end;
+ int ret, bytes_read;
+
+ if (IS_ALIGNED(from, 2) && IS_ALIGNED(len, 2))
+ return spi_nor_read_data(nor, from, len, buf);
+ else if (IS_ALIGNED(from, 2) && len > PAGE_SIZE)
+ return spi_nor_read_data(nor, from, round_down(len, PAGE_SIZE),
+ buf);
+
+ tmp_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!tmp_buf)
+ return -ENOMEM;
+
+ start = round_down(from, 2);
+ end = round_up(from + len, 2);
+
+ /*
+ * Avoid allocating too much memory. The requested read length might be
+ * quite large. Allocating a buffer just as large (slightly bigger, in
+ * fact) would put unnecessary memory pressure on the system.
+ *
+ * For example if the read is from 3 to 1M, then this will read from 2
+ * to 4098. The reads from 4098 to 1M will then not need a temporary
+ * buffer so they can proceed as normal.
+ */
+ tmp_len = min_t(size_t, end - start, PAGE_SIZE);
+
+ ret = spi_nor_read_data(nor, start, tmp_len, tmp_buf);
+ if (ret == 0) {
+ ret = -EIO;
+ goto out;
+ }
+ if (ret < 0)
+ goto out;
+
+ /*
+ * More bytes are read than actually requested, but that number can't be
+ * reported to the calling function or it will confuse its calculations.
+ * Calculate how many of the _requested_ bytes were read.
+ */
+ bytes_read = ret;
+
+ if (from != start)
+ ret -= from - start;
+
+ /*
+ * Only account for extra bytes at the end if they were actually read.
+ * For example, if the total length was truncated because of temporary
+ * buffer size limit then the adjustment for the extra bytes at the end
+ * is not needed.
+ */
+ if (start + bytes_read == end)
+ ret -= end - (from + len);
+
+ memcpy(buf, tmp_buf + (from - start), ret);
+out:
+ kfree(tmp_buf);
+ return ret;
+}
+
static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
@@ -2031,7 +2101,11 @@ static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
while (len) {
loff_t addr = from;
- ret = spi_nor_read_data(nor, addr, len, buf);
+ if (nor->read_proto == SNOR_PROTO_8_8_8_DTR)
+ ret = spi_nor_octal_dtr_read(nor, addr, len, buf);
+ else
+ ret = spi_nor_read_data(nor, addr, len, buf);
+
if (ret == 0) {
/* We shouldn't see 0-length reads */
ret = -EIO;
@@ -2055,6 +2129,68 @@ read_err:
}
/*
+ * On Octal DTR capable flashes, writes cannot start or end at an odd address
+ * in Octal DTR mode. Extra 0xff bytes need to be appended or prepended to
+ * make sure the start address and end address are even. 0xff is used because
+ * on NOR flashes a program operation can only flip bits from 1 to 0, not the
+ * other way round. 0 to 1 flip needs to happen via erases.
+ */
+static int spi_nor_octal_dtr_write(struct spi_nor *nor, loff_t to, size_t len,
+ const u8 *buf)
+{
+ u8 *tmp_buf;
+ size_t bytes_written;
+ loff_t start, end;
+ int ret;
+
+ if (IS_ALIGNED(to, 2) && IS_ALIGNED(len, 2))
+ return spi_nor_write_data(nor, to, len, buf);
+
+ tmp_buf = kmalloc(nor->params->page_size, GFP_KERNEL);
+ if (!tmp_buf)
+ return -ENOMEM;
+
+ memset(tmp_buf, 0xff, nor->params->page_size);
+
+ start = round_down(to, 2);
+ end = round_up(to + len, 2);
+
+ memcpy(tmp_buf + (to - start), buf, len);
+
+ ret = spi_nor_write_data(nor, start, end - start, tmp_buf);
+ if (ret == 0) {
+ ret = -EIO;
+ goto out;
+ }
+ if (ret < 0)
+ goto out;
+
+ /*
+ * More bytes are written than actually requested, but that number can't
+ * be reported to the calling function or it will confuse its
+ * calculations. Calculate how many of the _requested_ bytes were
+ * written.
+ */
+ bytes_written = ret;
+
+ if (to != start)
+ ret -= to - start;
+
+ /*
+ * Only account for extra bytes at the end if they were actually
+ * written. For example, if for some reason the controller could only
+ * complete a partial write then the adjustment for the extra bytes at
+ * the end is not needed.
+ */
+ if (start + bytes_written == end)
+ ret -= end - (to + len);
+
+out:
+ kfree(tmp_buf);
+ return ret;
+}
+
+/*
* Write an address range to the nor chip. Data must be written in
* FLASH_PAGESIZE chunks. The address range may be any size provided
* it is within the physical boundaries.
@@ -2090,7 +2226,12 @@ static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
goto write_err;
}
- ret = spi_nor_write_data(nor, addr, page_remain, buf + i);
+ if (nor->write_proto == SNOR_PROTO_8_8_8_DTR)
+ ret = spi_nor_octal_dtr_write(nor, addr, page_remain,
+ buf + i);
+ else
+ ret = spi_nor_write_data(nor, addr, page_remain,
+ buf + i);
spi_nor_unlock_device(nor);
if (ret < 0)
goto write_err;
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
index 39cc0a6a4d37..b53fd147fa65 100644
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -282,12 +282,12 @@ static void ubiblock_release(struct gendisk *gd)
mutex_unlock(&dev->dev_mutex);
}
-static int ubiblock_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+static int ubiblock_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
/* Some tools might require this information */
geo->heads = 1;
geo->cylinders = 1;
- geo->sectors = get_capacity(bdev->bd_disk);
+ geo->sectors = get_capacity(disk);
geo->start = 0;
return 0;
}
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index b29628d46be9..ac12eaf11755 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -76,24 +76,11 @@ config WIREGUARD
tristate "WireGuard secure network tunnel"
depends on NET && INET
depends on IPV6 || !IPV6
- depends on !KMSAN # KMSAN doesn't support the crypto configs below
select NET_UDP_TUNNEL
select DST_CACHE
- select CRYPTO
select CRYPTO_LIB_CURVE25519
select CRYPTO_LIB_CHACHA20POLY1305
- select CRYPTO_CHACHA20_X86_64 if X86 && 64BIT
- select CRYPTO_POLY1305_X86_64 if X86 && 64BIT
- select CRYPTO_BLAKE2S_X86 if X86 && 64BIT
- select CRYPTO_CURVE25519_X86 if X86 && 64BIT
- select CRYPTO_CHACHA20_NEON if ARM || (ARM64 && KERNEL_MODE_NEON)
- select CRYPTO_POLY1305_NEON if ARM64 && KERNEL_MODE_NEON
- select CRYPTO_POLY1305_ARM if ARM
- select CRYPTO_BLAKE2S_ARM if ARM
- select CRYPTO_CURVE25519_NEON if ARM && KERNEL_MODE_NEON
- select CRYPTO_CHACHA_MIPS if CPU_MIPS32_R2
- select CRYPTO_POLY1305_MIPS if MIPS
- select CRYPTO_CHACHA_S390 if S390
+ select CRYPTO_LIB_UTILS
help
WireGuard is a secure, fast, and easy to use replacement for IPSec
that uses modern cryptography and clever networking tricks. It's
diff --git a/drivers/net/Space.c b/drivers/net/Space.c
index dc50797a2ed0..c01e2c2f7d6c 100644
--- a/drivers/net/Space.c
+++ b/drivers/net/Space.c
@@ -67,8 +67,7 @@ static int netdev_boot_setup_add(char *name, struct ifmap *map)
s = dev_boot_setup;
for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
- memset(s[i].name, 0, sizeof(s[i].name));
- strscpy(s[i].name, name, IFNAMSIZ);
+ strscpy_pad(s[i].name, name);
memcpy(&s[i].map, map, sizeof(s[i].map));
break;
}
diff --git a/drivers/net/amt.c b/drivers/net/amt.c
index ed86537b2f61..902c817a0dea 100644
--- a/drivers/net/amt.c
+++ b/drivers/net/amt.c
@@ -11,6 +11,7 @@
#include <linux/net.h>
#include <linux/igmp.h>
#include <linux/workqueue.h>
+#include <net/flow.h>
#include <net/pkt_sched.h>
#include <net/net_namespace.h>
#include <net/ip.h>
@@ -28,6 +29,7 @@
#include <net/addrconf.h>
#include <net/ip6_route.h>
#include <net/inet_common.h>
+#include <net/inet_dscp.h>
#include <net/ip6_checksum.h>
static struct workqueue_struct *amt_wq;
@@ -1018,7 +1020,7 @@ static bool amt_send_membership_update(struct amt_dev *amt,
fl4.flowi4_oif = amt->stream_dev->ifindex;
fl4.daddr = amt->remote_ip;
fl4.saddr = amt->local_ip;
- fl4.flowi4_tos = AMT_TOS;
+ fl4.flowi4_dscp = inet_dsfield_to_dscp(AMT_TOS);
fl4.flowi4_proto = IPPROTO_UDP;
rt = ip_route_output_key(amt->net, &fl4);
if (IS_ERR(rt)) {
@@ -1133,7 +1135,7 @@ static bool amt_send_membership_query(struct amt_dev *amt,
fl4.flowi4_oif = amt->stream_dev->ifindex;
fl4.daddr = tunnel->ip4;
fl4.saddr = amt->local_ip;
- fl4.flowi4_tos = AMT_TOS;
+ fl4.flowi4_dscp = inet_dsfield_to_dscp(AMT_TOS);
fl4.flowi4_proto = IPPROTO_UDP;
rt = ip_route_output_key(amt->net, &fl4);
if (IS_ERR(rt)) {
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 2fca8e84ab10..49717b7b82a2 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -95,13 +95,13 @@ static int ad_marker_send(struct port *port, struct bond_marker *marker);
static void ad_mux_machine(struct port *port, bool *update_slave_arr);
static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port);
static void ad_tx_machine(struct port *port);
-static void ad_periodic_machine(struct port *port, struct bond_params *bond_params);
+static void ad_periodic_machine(struct port *port);
static void ad_port_selection_logic(struct port *port, bool *update_slave_arr);
static void ad_agg_selection_logic(struct aggregator *aggregator,
bool *update_slave_arr);
static void ad_clear_agg(struct aggregator *aggregator);
static void ad_initialize_agg(struct aggregator *aggregator);
-static void ad_initialize_port(struct port *port, int lacp_fast);
+static void ad_initialize_port(struct port *port, const struct bond_params *bond_params);
static void ad_enable_collecting(struct port *port);
static void ad_disable_distributing(struct port *port,
bool *update_slave_arr);
@@ -436,6 +436,7 @@ static void __ad_actor_update_port(struct port *port)
port->actor_system = BOND_AD_INFO(bond).system.sys_mac_addr;
port->actor_system_priority = BOND_AD_INFO(bond).system.sys_priority;
+ port->actor_port_priority = SLAVE_AD_INFO(port->slave)->port_priority;
}
/* Conversions */
@@ -746,6 +747,18 @@ static int __agg_active_ports(struct aggregator *agg)
return active;
}
+static unsigned int __agg_ports_priority(const struct aggregator *agg)
+{
+ struct port *port = agg->lag_ports;
+ unsigned int prio = 0;
+
+ for (; port; port = port->next_port_in_aggregator)
+ if (port->is_enabled)
+ prio += port->actor_port_priority;
+
+ return prio;
+}
+
/**
* __get_agg_bandwidth - get the total bandwidth of an aggregator
* @aggregator: the aggregator we're looking at
@@ -1307,10 +1320,16 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
* case of EXPIRED even if LINK_DOWN didn't arrive for
* the port.
*/
- port->partner_oper.port_state &= ~LACP_STATE_SYNCHRONIZATION;
port->sm_vars &= ~AD_PORT_MATCHED;
+ /* Based on IEEE 8021AX-2014, Figure 6-18 - Receive
+ * machine state diagram, the statue should be
+ * Partner_Oper_Port_State.Synchronization = FALSE;
+ * Partner_Oper_Port_State.LACP_Timeout = Short Timeout;
+ * start current_while_timer(Short Timeout);
+ * Actor_Oper_Port_State.Expired = TRUE;
+ */
+ port->partner_oper.port_state &= ~LACP_STATE_SYNCHRONIZATION;
port->partner_oper.port_state |= LACP_STATE_LACP_TIMEOUT;
- port->partner_oper.port_state |= LACP_STATE_LACP_ACTIVITY;
port->sm_rx_timer_counter = __ad_timer_to_ticks(AD_CURRENT_WHILE_TIMER, (u16)(AD_SHORT_TIMEOUT));
port->actor_oper_port_state |= LACP_STATE_EXPIRED;
port->sm_vars |= AD_PORT_CHURNED;
@@ -1417,11 +1436,10 @@ static void ad_tx_machine(struct port *port)
/**
* ad_periodic_machine - handle a port's periodic state machine
* @port: the port we're looking at
- * @bond_params: bond parameters we will use
*
* Turn ntt flag on priodically to perform periodic transmission of lacpdu's.
*/
-static void ad_periodic_machine(struct port *port, struct bond_params *bond_params)
+static void ad_periodic_machine(struct port *port)
{
periodic_states_t last_state;
@@ -1430,8 +1448,7 @@ static void ad_periodic_machine(struct port *port, struct bond_params *bond_para
/* check if port was reinitialized */
if (((port->sm_vars & AD_PORT_BEGIN) || !(port->sm_vars & AD_PORT_LACP_ENABLED) || !port->is_enabled) ||
- (!(port->actor_oper_port_state & LACP_STATE_LACP_ACTIVITY) && !(port->partner_oper.port_state & LACP_STATE_LACP_ACTIVITY)) ||
- !bond_params->lacp_active) {
+ (!(port->actor_oper_port_state & LACP_STATE_LACP_ACTIVITY) && !(port->partner_oper.port_state & LACP_STATE_LACP_ACTIVITY))) {
port->sm_periodic_state = AD_NO_PERIODIC;
}
/* check if state machine should change state */
@@ -1703,6 +1720,9 @@ static struct aggregator *ad_agg_selection_test(struct aggregator *best,
* 4. Therefore, current and best both have partner replies or
* both do not, so perform selection policy:
*
+ * BOND_AD_PRIO: Select by total priority of ports. If priority
+ * is equal, select by count.
+ *
* BOND_AD_COUNT: Select by count of ports. If count is equal,
* select by bandwidth.
*
@@ -1724,6 +1744,14 @@ static struct aggregator *ad_agg_selection_test(struct aggregator *best,
return best;
switch (__get_agg_selection_mode(curr->lag_ports)) {
+ case BOND_AD_PRIO:
+ if (__agg_ports_priority(curr) > __agg_ports_priority(best))
+ return curr;
+
+ if (__agg_ports_priority(curr) < __agg_ports_priority(best))
+ return best;
+
+ fallthrough;
case BOND_AD_COUNT:
if (__agg_active_ports(curr) > __agg_active_ports(best))
return curr;
@@ -1789,6 +1817,10 @@ static int agg_device_up(const struct aggregator *agg)
* (slaves), and reselect whenever a link state change takes place or the
* set of slaves in the bond changes.
*
+ * BOND_AD_PRIO: select the aggregator with highest total priority of ports
+ * (slaves), and reselect whenever a link state change takes place or the
+ * set of slaves in the bond changes.
+ *
* FIXME: this function MUST be called with the first agg in the bond, or
* __get_active_agg() won't work correctly. This function should be better
* called with the bond itself, and retrieve the first agg from it.
@@ -1955,16 +1987,16 @@ static void ad_initialize_agg(struct aggregator *aggregator)
/**
* ad_initialize_port - initialize a given port's parameters
* @port: the port we're looking at
- * @lacp_fast: boolean. whether fast periodic should be used
+ * @bond_params: bond parameters we will use
*/
-static void ad_initialize_port(struct port *port, int lacp_fast)
+static void ad_initialize_port(struct port *port, const struct bond_params *bond_params)
{
static const struct port_params tmpl = {
.system_priority = 0xffff,
.key = 1,
.port_number = 1,
.port_priority = 0xff,
- .port_state = 1,
+ .port_state = 0,
};
static const struct lacpdu lacpdu = {
.subtype = 0x01,
@@ -1982,12 +2014,14 @@ static void ad_initialize_port(struct port *port, int lacp_fast)
port->actor_port_priority = 0xff;
port->actor_port_aggregator_identifier = 0;
port->ntt = false;
- port->actor_admin_port_state = LACP_STATE_AGGREGATION |
- LACP_STATE_LACP_ACTIVITY;
- port->actor_oper_port_state = LACP_STATE_AGGREGATION |
- LACP_STATE_LACP_ACTIVITY;
+ port->actor_admin_port_state = LACP_STATE_AGGREGATION;
+ port->actor_oper_port_state = LACP_STATE_AGGREGATION;
+ if (bond_params->lacp_active) {
+ port->actor_admin_port_state |= LACP_STATE_LACP_ACTIVITY;
+ port->actor_oper_port_state |= LACP_STATE_LACP_ACTIVITY;
+ }
- if (lacp_fast)
+ if (bond_params->lacp_fast)
port->actor_oper_port_state |= LACP_STATE_LACP_TIMEOUT;
memcpy(&port->partner_admin, &tmpl, sizeof(tmpl));
@@ -2201,7 +2235,10 @@ void bond_3ad_bind_slave(struct slave *slave)
/* port initialization */
port = &(SLAVE_AD_INFO(slave)->port);
- ad_initialize_port(port, bond->params.lacp_fast);
+ ad_initialize_port(port, &bond->params);
+
+ /* Port priority is initialized. Update it to slave's ad info */
+ SLAVE_AD_INFO(slave)->port_priority = port->actor_port_priority;
port->slave = slave;
port->actor_port_number = SLAVE_AD_INFO(slave)->id;
@@ -2513,7 +2550,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
}
ad_rx_machine(NULL, port);
- ad_periodic_machine(port, &bond->params);
+ ad_periodic_machine(port);
ad_port_selection_logic(port, &update_slave_arr);
ad_mux_machine(port, &update_slave_arr);
ad_tx_machine(port);
@@ -2883,6 +2920,31 @@ void bond_3ad_update_lacp_rate(struct bonding *bond)
spin_unlock_bh(&bond->mode_lock);
}
+/**
+ * bond_3ad_update_lacp_active - change the lacp active
+ * @bond: bonding struct
+ *
+ * Update actor_oper_port_state when lacp_active is modified.
+ */
+void bond_3ad_update_lacp_active(struct bonding *bond)
+{
+ struct port *port = NULL;
+ struct list_head *iter;
+ struct slave *slave;
+ int lacp_active;
+
+ lacp_active = bond->params.lacp_active;
+ spin_lock_bh(&bond->mode_lock);
+ bond_for_each_slave(bond, slave, iter) {
+ port = &(SLAVE_AD_INFO(slave)->port);
+ if (lacp_active)
+ port->actor_oper_port_state |= LACP_STATE_LACP_ACTIVITY;
+ else
+ port->actor_oper_port_state &= ~LACP_STATE_LACP_ACTIVITY;
+ }
+ spin_unlock_bh(&bond->mode_lock);
+}
+
size_t bond_3ad_stats_size(void)
{
return nla_total_size_64bit(sizeof(u64)) + /* BOND_3AD_STAT_LACPDU_RX */
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 257333c88710..4da619210c1f 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -142,8 +142,7 @@ module_param(downdelay, int, 0);
MODULE_PARM_DESC(downdelay, "Delay before considering link down, "
"in milliseconds");
module_param(use_carrier, int, 0);
-MODULE_PARM_DESC(use_carrier, "Use netif_carrier_ok (vs MII ioctls) in miimon; "
- "0 for off, 1 for on (default)");
+MODULE_PARM_DESC(use_carrier, "option obsolete, use_carrier cannot be disabled");
module_param(mode, charp, 0);
MODULE_PARM_DESC(mode, "Mode of operation; 0 for balance-rr, "
"1 for active-backup, 2 for balance-xor, "
@@ -830,77 +829,6 @@ const char *bond_slave_link_status(s8 link)
}
}
-/* if <dev> supports MII link status reporting, check its link status.
- *
- * We either do MII/ETHTOOL ioctls, or check netif_carrier_ok(),
- * depending upon the setting of the use_carrier parameter.
- *
- * Return either BMSR_LSTATUS, meaning that the link is up (or we
- * can't tell and just pretend it is), or 0, meaning that the link is
- * down.
- *
- * If reporting is non-zero, instead of faking link up, return -1 if
- * both ETHTOOL and MII ioctls fail (meaning the device does not
- * support them). If use_carrier is set, return whatever it says.
- * It'd be nice if there was a good way to tell if a driver supports
- * netif_carrier, but there really isn't.
- */
-static int bond_check_dev_link(struct bonding *bond,
- struct net_device *slave_dev, int reporting)
-{
- const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
- struct mii_ioctl_data *mii;
- struct ifreq ifr;
- int ret;
-
- if (!reporting && !netif_running(slave_dev))
- return 0;
-
- if (bond->params.use_carrier)
- return netif_carrier_ok(slave_dev) ? BMSR_LSTATUS : 0;
-
- /* Try to get link status using Ethtool first. */
- if (slave_dev->ethtool_ops->get_link) {
- netdev_lock_ops(slave_dev);
- ret = slave_dev->ethtool_ops->get_link(slave_dev);
- netdev_unlock_ops(slave_dev);
-
- return ret ? BMSR_LSTATUS : 0;
- }
-
- /* Ethtool can't be used, fallback to MII ioctls. */
- if (slave_ops->ndo_eth_ioctl) {
- /* TODO: set pointer to correct ioctl on a per team member
- * bases to make this more efficient. that is, once
- * we determine the correct ioctl, we will always
- * call it and not the others for that team
- * member.
- */
-
- /* We cannot assume that SIOCGMIIPHY will also read a
- * register; not all network drivers (e.g., e100)
- * support that.
- */
-
- /* Yes, the mii is overlaid on the ifreq.ifr_ifru */
- strscpy_pad(ifr.ifr_name, slave_dev->name, IFNAMSIZ);
- mii = if_mii(&ifr);
-
- if (dev_eth_ioctl(slave_dev, &ifr, SIOCGMIIPHY) == 0) {
- mii->reg_num = MII_BMSR;
- if (dev_eth_ioctl(slave_dev, &ifr, SIOCGMIIREG) == 0)
- return mii->val_out & BMSR_LSTATUS;
- }
- }
-
- /* If reporting, report that either there's no ndo_eth_ioctl,
- * or both SIOCGMIIREG and get_link failed (meaning that we
- * cannot report link status). If not reporting, pretend
- * we're ok.
- */
- return reporting ? -1 : BMSR_LSTATUS;
-}
-
/*----------------------------- Multicast list ------------------------------*/
/* Push the promiscuity flag down to appropriate slaves */
@@ -1966,7 +1894,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
struct slave *new_slave = NULL, *prev_slave;
struct sockaddr_storage ss;
- int link_reporting;
int res = 0, i;
if (slave_dev->flags & IFF_MASTER &&
@@ -1976,12 +1903,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
return -EPERM;
}
- if (!bond->params.use_carrier &&
- slave_dev->ethtool_ops->get_link == NULL &&
- slave_ops->ndo_eth_ioctl == NULL) {
- slave_warn(bond_dev, slave_dev, "no link monitoring support\n");
- }
-
/* already in-use? */
if (netdev_is_rx_handler_busy(slave_dev)) {
SLAVE_NL_ERR(bond_dev, slave_dev, extack,
@@ -2132,6 +2053,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
memcpy(ss.__data, bond_dev->dev_addr, bond_dev->addr_len);
} else if (bond->params.fail_over_mac == BOND_FOM_FOLLOW &&
BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP &&
+ bond_has_slaves(bond) &&
memcmp(slave_dev->dev_addr, bond_dev->dev_addr, bond_dev->addr_len) == 0) {
/* Set slave to random address to avoid duplicate mac
* address in later fail over.
@@ -2195,29 +2117,10 @@ skip_mac_set:
new_slave->last_tx = new_slave->last_rx;
- if (bond->params.miimon && !bond->params.use_carrier) {
- link_reporting = bond_check_dev_link(bond, slave_dev, 1);
-
- if ((link_reporting == -1) && !bond->params.arp_interval) {
- /* miimon is set but a bonded network driver
- * does not support ETHTOOL/MII and
- * arp_interval is not set. Note: if
- * use_carrier is enabled, we will never go
- * here (because netif_carrier is always
- * supported); thus, we don't need to change
- * the messages for netif_carrier.
- */
- slave_warn(bond_dev, slave_dev, "MII and ETHTOOL support not available for slave, and arp_interval/arp_ip_target module parameters not specified, thus bonding will not detect link failures! see bonding.txt for details\n");
- } else if (link_reporting == -1) {
- /* unable get link status using mii/ethtool */
- slave_warn(bond_dev, slave_dev, "can't get link status from slave; the network driver associated with this interface does not support MII or ETHTOOL link status reporting, thus miimon has no effect on this interface\n");
- }
- }
-
/* check for initial state */
new_slave->link = BOND_LINK_NOCHANGE;
if (bond->params.miimon) {
- if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) {
+ if (netif_carrier_ok(slave_dev)) {
if (bond->params.updelay) {
bond_set_slave_link_state(new_slave,
BOND_LINK_BACK,
@@ -2759,7 +2662,7 @@ static int bond_miimon_inspect(struct bonding *bond)
bond_for_each_slave_rcu(bond, slave, iter) {
bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
- link_state = bond_check_dev_link(bond, slave->dev, 0);
+ link_state = netif_carrier_ok(slave->dev);
switch (slave->link) {
case BOND_LINK_UP:
@@ -3355,7 +3258,6 @@ static void bond_ns_send_all(struct bonding *bond, struct slave *slave)
/* Find out through which dev should the packet go */
memset(&fl6, 0, sizeof(struct flowi6));
fl6.daddr = targets[i];
- fl6.flowi6_oif = bond->dev->ifindex;
dst = ip6_route_output(dev_net(bond->dev), NULL, &fl6);
if (dst->error) {
@@ -4411,7 +4313,7 @@ void bond_work_init_all(struct bonding *bond)
INIT_DELAYED_WORK(&bond->slave_arr_work, bond_slave_arr_handler);
}
-static void bond_work_cancel_all(struct bonding *bond)
+void bond_work_cancel_all(struct bonding *bond)
{
cancel_delayed_work_sync(&bond->mii_work);
cancel_delayed_work_sync(&bond->arp_work);
@@ -6257,10 +6159,10 @@ static int __init bond_check_params(struct bond_params *params)
downdelay = 0;
}
- if ((use_carrier != 0) && (use_carrier != 1)) {
- pr_warn("Warning: use_carrier module parameter (%d), not of valid value (0/1), so it was set to 1\n",
- use_carrier);
- use_carrier = 1;
+ if (use_carrier != 1) {
+ pr_err("Error: invalid use_carrier parameter (%d)\n",
+ use_carrier);
+ return -EINVAL;
}
if (num_peer_notif < 0 || num_peer_notif > 255) {
@@ -6507,7 +6409,6 @@ static int __init bond_check_params(struct bond_params *params)
params->updelay = updelay;
params->downdelay = downdelay;
params->peer_notif_delay = 0;
- params->use_carrier = use_carrier;
params->lacp_active = 1;
params->lacp_fast = lacp_fast;
params->primary[0] = 0;
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index 57fff2421f1b..286f11c517f7 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -28,6 +28,7 @@ static size_t bond_get_slave_size(const struct net_device *bond_dev,
nla_total_size(sizeof(u8)) + /* IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE */
nla_total_size(sizeof(u16)) + /* IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE */
nla_total_size(sizeof(s32)) + /* IFLA_BOND_SLAVE_PRIO */
+ nla_total_size(sizeof(u16)) + /* IFLA_BOND_SLAVE_ACTOR_PORT_PRIO */
0;
}
@@ -77,6 +78,10 @@ static int bond_fill_slave_info(struct sk_buff *skb,
ad_port->partner_oper.port_state))
goto nla_put_failure;
}
+
+ if (nla_put_u16(skb, IFLA_BOND_SLAVE_ACTOR_PORT_PRIO,
+ SLAVE_AD_INFO(slave)->port_priority))
+ goto nla_put_failure;
}
return 0;
@@ -130,6 +135,7 @@ static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
static const struct nla_policy bond_slave_policy[IFLA_BOND_SLAVE_MAX + 1] = {
[IFLA_BOND_SLAVE_QUEUE_ID] = { .type = NLA_U16 },
[IFLA_BOND_SLAVE_PRIO] = { .type = NLA_S32 },
+ [IFLA_BOND_SLAVE_ACTOR_PORT_PRIO] = { .type = NLA_U16 },
};
static int bond_validate(struct nlattr *tb[], struct nlattr *data[],
@@ -180,6 +186,16 @@ static int bond_slave_changelink(struct net_device *bond_dev,
return err;
}
+ if (data[IFLA_BOND_SLAVE_ACTOR_PORT_PRIO]) {
+ u16 ad_prio = nla_get_u16(data[IFLA_BOND_SLAVE_ACTOR_PORT_PRIO]);
+
+ bond_opt_slave_initval(&newval, &slave_dev, ad_prio);
+ err = __bond_opt_set(bond, BOND_OPT_ACTOR_PORT_PRIO, &newval,
+ data[IFLA_BOND_SLAVE_ACTOR_PORT_PRIO], extack);
+ if (err)
+ return err;
+ }
+
return 0;
}
@@ -259,13 +275,11 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
return err;
}
if (data[IFLA_BOND_USE_CARRIER]) {
- int use_carrier = nla_get_u8(data[IFLA_BOND_USE_CARRIER]);
-
- bond_opt_initval(&newval, use_carrier);
- err = __bond_opt_set(bond, BOND_OPT_USE_CARRIER, &newval,
- data[IFLA_BOND_USE_CARRIER], extack);
- if (err)
- return err;
+ if (nla_get_u8(data[IFLA_BOND_USE_CARRIER]) != 1) {
+ NL_SET_ERR_MSG_ATTR(extack, data[IFLA_BOND_USE_CARRIER],
+ "option obsolete, use_carrier cannot be disabled");
+ return -EINVAL;
+ }
}
if (data[IFLA_BOND_ARP_INTERVAL]) {
int arp_interval = nla_get_u32(data[IFLA_BOND_ARP_INTERVAL]);
@@ -579,20 +593,22 @@ static int bond_newlink(struct net_device *bond_dev,
struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack)
{
+ struct bonding *bond = netdev_priv(bond_dev);
struct nlattr **data = params->data;
struct nlattr **tb = params->tb;
int err;
- err = bond_changelink(bond_dev, tb, data, extack);
- if (err < 0)
+ err = register_netdevice(bond_dev);
+ if (err)
return err;
- err = register_netdevice(bond_dev);
- if (!err) {
- struct bonding *bond = netdev_priv(bond_dev);
+ netif_carrier_off(bond_dev);
+ bond_work_init_all(bond);
- netif_carrier_off(bond_dev);
- bond_work_init_all(bond);
+ err = bond_changelink(bond_dev, tb, data, extack);
+ if (err) {
+ bond_work_cancel_all(bond);
+ unregister_netdevice(bond_dev);
}
return err;
@@ -688,7 +704,7 @@ static int bond_fill_info(struct sk_buff *skb,
bond->params.peer_notif_delay * bond->params.miimon))
goto nla_put_failure;
- if (nla_put_u8(skb, IFLA_BOND_USE_CARRIER, bond->params.use_carrier))
+ if (nla_put_u8(skb, IFLA_BOND_USE_CARRIER, 1))
goto nla_put_failure;
if (nla_put_u32(skb, IFLA_BOND_ARP_INTERVAL, bond->params.arp_interval))
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 1d639a3be6ba..495a87f2ea7c 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -79,6 +79,8 @@ static int bond_option_tlb_dynamic_lb_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_ad_actor_sys_prio_set(struct bonding *bond,
const struct bond_opt_value *newval);
+static int bond_option_actor_port_prio_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
static int bond_option_ad_actor_system_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_ad_user_port_key_set(struct bonding *bond,
@@ -160,10 +162,11 @@ static const struct bond_opt_value bond_lacp_rate_tbl[] = {
};
static const struct bond_opt_value bond_ad_select_tbl[] = {
- { "stable", BOND_AD_STABLE, BOND_VALFLAG_DEFAULT},
- { "bandwidth", BOND_AD_BANDWIDTH, 0},
- { "count", BOND_AD_COUNT, 0},
- { NULL, -1, 0},
+ { "stable", BOND_AD_STABLE, BOND_VALFLAG_DEFAULT},
+ { "bandwidth", BOND_AD_BANDWIDTH, 0},
+ { "count", BOND_AD_COUNT, 0},
+ { "actor_port_prio", BOND_AD_PRIO, 0},
+ { NULL, -1, 0},
};
static const struct bond_opt_value bond_num_peer_notif_tbl[] = {
@@ -187,7 +190,6 @@ static const struct bond_opt_value bond_primary_reselect_tbl[] = {
};
static const struct bond_opt_value bond_use_carrier_tbl[] = {
- { "off", 0, 0},
{ "on", 1, BOND_VALFLAG_DEFAULT},
{ NULL, -1, 0}
};
@@ -223,6 +225,13 @@ static const struct bond_opt_value bond_ad_actor_sys_prio_tbl[] = {
{ NULL, -1, 0},
};
+static const struct bond_opt_value bond_actor_port_prio_tbl[] = {
+ { "minval", 0, BOND_VALFLAG_MIN},
+ { "maxval", 65535, BOND_VALFLAG_MAX},
+ { "default", 255, BOND_VALFLAG_DEFAULT},
+ { NULL, -1, 0},
+};
+
static const struct bond_opt_value bond_ad_user_port_key_tbl[] = {
{ "minval", 0, BOND_VALFLAG_MIN | BOND_VALFLAG_DEFAULT},
{ "maxval", 1023, BOND_VALFLAG_MAX},
@@ -370,7 +379,7 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
[BOND_OPT_AD_SELECT] = {
.id = BOND_OPT_AD_SELECT,
.name = "ad_select",
- .desc = "803.ad aggregation selection logic",
+ .desc = "802.3ad aggregation selection logic",
.flags = BOND_OPTFLAG_IFDOWN,
.values = bond_ad_select_tbl,
.set = bond_option_ad_select_set
@@ -419,7 +428,7 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
[BOND_OPT_USE_CARRIER] = {
.id = BOND_OPT_USE_CARRIER,
.name = "use_carrier",
- .desc = "Use netif_carrier_ok (vs MII ioctls) in miimon",
+ .desc = "option obsolete, use_carrier cannot be disabled",
.values = bond_use_carrier_tbl,
.set = bond_option_use_carrier_set
},
@@ -484,6 +493,13 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
.values = bond_ad_actor_sys_prio_tbl,
.set = bond_option_ad_actor_sys_prio_set,
},
+ [BOND_OPT_ACTOR_PORT_PRIO] = {
+ .id = BOND_OPT_ACTOR_PORT_PRIO,
+ .name = "actor_port_prio",
+ .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_8023AD)),
+ .values = bond_actor_port_prio_tbl,
+ .set = bond_option_actor_port_prio_set,
+ },
[BOND_OPT_AD_ACTOR_SYSTEM] = {
.id = BOND_OPT_AD_ACTOR_SYSTEM,
.name = "ad_actor_system",
@@ -1091,10 +1107,6 @@ static int bond_option_peer_notif_delay_set(struct bonding *bond,
static int bond_option_use_carrier_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
- netdev_dbg(bond->dev, "Setting use_carrier to %llu\n",
- newval->value);
- bond->params.use_carrier = newval->value;
-
return 0;
}
@@ -1660,6 +1672,7 @@ static int bond_option_lacp_active_set(struct bonding *bond,
netdev_dbg(bond->dev, "Setting LACP active to %s (%llu)\n",
newval->string, newval->value);
bond->params.lacp_active = newval->value;
+ bond_3ad_update_lacp_active(bond);
return 0;
}
@@ -1816,6 +1829,26 @@ static int bond_option_ad_actor_sys_prio_set(struct bonding *bond,
return 0;
}
+static int bond_option_actor_port_prio_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
+{
+ struct slave *slave;
+
+ slave = bond_slave_get_rtnl(newval->slave_dev);
+ if (!slave) {
+ netdev_dbg(bond->dev, "%s called on NULL slave\n", __func__);
+ return -ENODEV;
+ }
+
+ netdev_dbg(newval->slave_dev, "Setting actor_port_prio to %llu\n",
+ newval->value);
+
+ SLAVE_AD_INFO(slave)->port_priority = newval->value;
+ bond_3ad_update_ad_actor_settings(bond);
+
+ return 0;
+}
+
static int bond_option_ad_actor_system_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 1e13bb170515..9a75ad3181ab 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -467,14 +467,12 @@ static ssize_t bonding_show_primary_reselect(struct device *d,
static DEVICE_ATTR(primary_reselect, 0644,
bonding_show_primary_reselect, bonding_sysfs_store_option);
-/* Show the use_carrier flag. */
+/* use_carrier is obsolete, but print value for compatibility */
static ssize_t bonding_show_carrier(struct device *d,
struct device_attribute *attr,
char *buf)
{
- struct bonding *bond = to_bond(d);
-
- return sysfs_emit(buf, "%d\n", bond->params.use_carrier);
+ return sysfs_emit(buf, "1\n");
}
static DEVICE_ATTR(use_carrier, 0644,
bonding_show_carrier, bonding_sysfs_store_option);
diff --git a/drivers/net/can/dev/calc_bittiming.c b/drivers/net/can/dev/calc_bittiming.c
index a94bd67c670c..394d6974f481 100644
--- a/drivers/net/can/dev/calc_bittiming.c
+++ b/drivers/net/can/dev/calc_bittiming.c
@@ -173,13 +173,15 @@ int can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt,
void can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const,
const struct can_bittiming *dbt,
- u32 *ctrlmode, u32 ctrlmode_supported)
+ u32 tdc_mask, u32 *ctrlmode, u32 ctrlmode_supported)
{
- if (!tdc_const || !(ctrlmode_supported & CAN_CTRLMODE_TDC_AUTO))
+ u32 tdc_auto = tdc_mask & CAN_CTRLMODE_TDC_AUTO_MASK;
+
+ if (!tdc_const || !(ctrlmode_supported & tdc_auto))
return;
- *ctrlmode &= ~CAN_CTRLMODE_FD_TDC_MASK;
+ *ctrlmode &= ~tdc_mask;
/* As specified in ISO 11898-1 section 11.3.3 "Transmitter
* delay compensation" (TDC) is only applicable if data BRP is
@@ -193,6 +195,6 @@ void can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const,
if (sample_point_in_tc < tdc_const->tdco_min)
return;
tdc->tdco = min(sample_point_in_tc, tdc_const->tdco_max);
- *ctrlmode |= CAN_CTRLMODE_TDC_AUTO;
+ *ctrlmode |= tdc_auto;
}
}
diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c
index 3913971125de..15ccedbb3f8d 100644
--- a/drivers/net/can/dev/dev.c
+++ b/drivers/net/can/dev/dev.c
@@ -4,17 +4,17 @@
* Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
*/
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/netdevice.h>
-#include <linux/if_arp.h>
-#include <linux/workqueue.h>
#include <linux/can.h>
#include <linux/can/can-ml.h>
#include <linux/can/dev.h>
#include <linux/can/skb.h>
#include <linux/gpio/consumer.h>
+#include <linux/if_arp.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
static void can_update_state_error_stats(struct net_device *dev,
enum can_state new_state)
@@ -88,6 +88,39 @@ const char *can_get_state_str(const enum can_state state)
}
EXPORT_SYMBOL_GPL(can_get_state_str);
+const char *can_get_ctrlmode_str(u32 ctrlmode)
+{
+ switch (ctrlmode & ~(ctrlmode - 1)) {
+ case 0:
+ return "none";
+ case CAN_CTRLMODE_LOOPBACK:
+ return "loopback";
+ case CAN_CTRLMODE_LISTENONLY:
+ return "listen-only";
+ case CAN_CTRLMODE_3_SAMPLES:
+ return "triple-sampling";
+ case CAN_CTRLMODE_ONE_SHOT:
+ return "one-shot";
+ case CAN_CTRLMODE_BERR_REPORTING:
+ return "berr-reporting";
+ case CAN_CTRLMODE_FD:
+ return "fd";
+ case CAN_CTRLMODE_PRESUME_ACK:
+ return "presume-ack";
+ case CAN_CTRLMODE_FD_NON_ISO:
+ return "fd-non-iso";
+ case CAN_CTRLMODE_CC_LEN8_DLC:
+ return "cc-len8-dlc";
+ case CAN_CTRLMODE_TDC_AUTO:
+ return "fd-tdc-auto";
+ case CAN_CTRLMODE_TDC_MANUAL:
+ return "fd-tdc-manual";
+ default:
+ return "<unknown>";
+ }
+}
+EXPORT_SYMBOL_GPL(can_get_ctrlmode_str);
+
static enum can_state can_state_err_to_state(u16 err)
{
if (err < CAN_ERROR_WARNING_THRESHOLD)
@@ -240,6 +273,8 @@ void can_setup(struct net_device *dev)
{
dev->type = ARPHRD_CAN;
dev->mtu = CAN_MTU;
+ dev->min_mtu = CAN_MTU;
+ dev->max_mtu = CAN_MTU;
dev->hard_header_len = 0;
dev->addr_len = 0;
dev->tx_queue_len = 10;
@@ -309,6 +344,21 @@ void free_candev(struct net_device *dev)
}
EXPORT_SYMBOL_GPL(free_candev);
+void can_set_default_mtu(struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ if (priv->ctrlmode & CAN_CTRLMODE_FD) {
+ dev->mtu = CANFD_MTU;
+ dev->min_mtu = CANFD_MTU;
+ dev->max_mtu = CANFD_MTU;
+ } else {
+ dev->mtu = CAN_MTU;
+ dev->min_mtu = CAN_MTU;
+ dev->max_mtu = CAN_MTU;
+ }
+}
+
/* changing MTU and control mode for CAN/CANFD devices */
int can_change_mtu(struct net_device *dev, int new_mtu)
{
@@ -347,6 +397,26 @@ int can_change_mtu(struct net_device *dev, int new_mtu)
}
EXPORT_SYMBOL_GPL(can_change_mtu);
+/* helper to define static CAN controller features at device creation time */
+int can_set_static_ctrlmode(struct net_device *dev, u32 static_mode)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ /* alloc_candev() succeeded => netdev_priv() is valid at this point */
+ if (priv->ctrlmode_supported & static_mode) {
+ netdev_warn(dev,
+ "Controller features can not be supported and static at the same time\n");
+ return -EINVAL;
+ }
+ priv->ctrlmode = static_mode;
+
+ /* override MTU which was set by default in can_setup()? */
+ can_set_default_mtu(dev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(can_set_static_ctrlmode);
+
/* generic implementation of netdev_ops::ndo_eth_ioctl for CAN devices
* supporting hardware timestamps
*/
diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c
index d9f6ab3efb97..0591406b6f32 100644
--- a/drivers/net/can/dev/netlink.c
+++ b/drivers/net/can/dev/netlink.c
@@ -36,116 +36,245 @@ static const struct nla_policy can_tdc_policy[IFLA_CAN_TDC_MAX + 1] = {
[IFLA_CAN_TDC_TDCF] = { .type = NLA_U32 },
};
-static int can_validate_bittiming(const struct can_bittiming *bt,
- struct netlink_ext_ack *extack)
+static int can_validate_bittiming(struct nlattr *data[],
+ struct netlink_ext_ack *extack,
+ int ifla_can_bittiming)
{
+ struct can_bittiming *bt;
+
+ if (!data[ifla_can_bittiming])
+ return 0;
+
+ static_assert(__alignof__(*bt) <= NLA_ALIGNTO);
+ bt = nla_data(data[ifla_can_bittiming]);
+
/* sample point is in one-tenth of a percent */
if (bt->sample_point >= 1000) {
NL_SET_ERR_MSG(extack, "sample point must be between 0 and 100%");
-
return -EINVAL;
}
return 0;
}
-static int can_validate(struct nlattr *tb[], struct nlattr *data[],
- struct netlink_ext_ack *extack)
+static int can_validate_tdc(struct nlattr *data_tdc,
+ struct netlink_ext_ack *extack, u32 tdc_flags)
{
- bool is_can_fd = false;
+ bool tdc_manual = tdc_flags & CAN_CTRLMODE_TDC_MANUAL_MASK;
+ bool tdc_auto = tdc_flags & CAN_CTRLMODE_TDC_AUTO_MASK;
+ int err;
+
+ if (tdc_auto && tdc_manual) {
+ NL_SET_ERR_MSG(extack,
+ "TDC manual and auto modes are mutually exclusive");
+ return -EOPNOTSUPP;
+ }
+
+ /* If one of the CAN_CTRLMODE_TDC_* flag is set then TDC
+ * must be set and vice-versa
+ */
+ if ((tdc_auto || tdc_manual) && !data_tdc) {
+ NL_SET_ERR_MSG(extack, "TDC parameters are missing");
+ return -EOPNOTSUPP;
+ }
+ if (!(tdc_auto || tdc_manual) && data_tdc) {
+ NL_SET_ERR_MSG(extack, "TDC mode (auto or manual) is missing");
+ return -EOPNOTSUPP;
+ }
+
+ /* If providing TDC parameters, at least TDCO is needed. TDCV
+ * is needed if and only if CAN_CTRLMODE_TDC_MANUAL is set
+ */
+ if (data_tdc) {
+ struct nlattr *tb_tdc[IFLA_CAN_TDC_MAX + 1];
+
+ err = nla_parse_nested(tb_tdc, IFLA_CAN_TDC_MAX,
+ data_tdc, can_tdc_policy, extack);
+ if (err)
+ return err;
+
+ if (tb_tdc[IFLA_CAN_TDC_TDCV]) {
+ if (tdc_auto) {
+ NL_SET_ERR_MSG(extack,
+ "TDCV is incompatible with TDC auto mode");
+ return -EOPNOTSUPP;
+ }
+ } else {
+ if (tdc_manual) {
+ NL_SET_ERR_MSG(extack,
+ "TDC manual mode requires TDCV");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ if (!tb_tdc[IFLA_CAN_TDC_TDCO]) {
+ NL_SET_ERR_MSG(extack, "TDCO is missing");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
+static int can_validate_databittiming(struct nlattr *data[],
+ struct netlink_ext_ack *extack,
+ int ifla_can_data_bittiming, u32 flags)
+{
+ struct nlattr *data_tdc;
+ const char *type;
+ u32 tdc_flags;
+ bool is_on;
int err;
/* Make sure that valid CAN FD configurations always consist of
* - nominal/arbitration bittiming
* - data bittiming
* - control mode with CAN_CTRLMODE_FD set
- * - TDC parameters are coherent (details below)
+ * - TDC parameters are coherent (details in can_validate_tdc())
*/
+ if (ifla_can_data_bittiming == IFLA_CAN_DATA_BITTIMING) {
+ data_tdc = data[IFLA_CAN_TDC];
+ tdc_flags = flags & CAN_CTRLMODE_FD_TDC_MASK;
+ is_on = flags & CAN_CTRLMODE_FD;
+ type = "FD";
+ } else {
+ return -EOPNOTSUPP; /* Place holder for CAN XL */
+ }
+
+ if (is_on) {
+ if (!data[IFLA_CAN_BITTIMING] || !data[ifla_can_data_bittiming]) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "Provide both nominal and %s data bittiming",
+ type);
+ return -EOPNOTSUPP;
+ }
+ } else {
+ if (data[ifla_can_data_bittiming]) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "%s data bittiming requires CAN %s",
+ type, type);
+ return -EOPNOTSUPP;
+ }
+ if (data_tdc) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "%s TDC requires CAN %s",
+ type, type);
+ return -EOPNOTSUPP;
+ }
+ }
+
+ err = can_validate_bittiming(data, extack, ifla_can_data_bittiming);
+ if (err)
+ return err;
+
+ err = can_validate_tdc(data_tdc, extack, tdc_flags);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int can_validate(struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ u32 flags = 0;
+ int err;
+
if (!data)
return 0;
if (data[IFLA_CAN_CTRLMODE]) {
struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
- u32 tdc_flags = cm->flags & CAN_CTRLMODE_FD_TDC_MASK;
- is_can_fd = cm->flags & cm->mask & CAN_CTRLMODE_FD;
+ flags = cm->flags & cm->mask;
+ }
- /* CAN_CTRLMODE_TDC_{AUTO,MANUAL} are mutually exclusive */
- if (tdc_flags == CAN_CTRLMODE_FD_TDC_MASK)
- return -EOPNOTSUPP;
- /* If one of the CAN_CTRLMODE_TDC_* flag is set then
- * TDC must be set and vice-versa
- */
- if (!!tdc_flags != !!data[IFLA_CAN_TDC])
- return -EOPNOTSUPP;
- /* If providing TDC parameters, at least TDCO is
- * needed. TDCV is needed if and only if
- * CAN_CTRLMODE_TDC_MANUAL is set
- */
- if (data[IFLA_CAN_TDC]) {
- struct nlattr *tb_tdc[IFLA_CAN_TDC_MAX + 1];
+ err = can_validate_bittiming(data, extack, IFLA_CAN_BITTIMING);
+ if (err)
+ return err;
- err = nla_parse_nested(tb_tdc, IFLA_CAN_TDC_MAX,
- data[IFLA_CAN_TDC],
- can_tdc_policy, extack);
- if (err)
- return err;
+ err = can_validate_databittiming(data, extack,
+ IFLA_CAN_DATA_BITTIMING, flags);
+ if (err)
+ return err;
- if (tb_tdc[IFLA_CAN_TDC_TDCV]) {
- if (tdc_flags & CAN_CTRLMODE_TDC_AUTO)
- return -EOPNOTSUPP;
- } else {
- if (tdc_flags & CAN_CTRLMODE_TDC_MANUAL)
- return -EOPNOTSUPP;
- }
+ return 0;
+}
- if (!tb_tdc[IFLA_CAN_TDC_TDCO])
- return -EOPNOTSUPP;
- }
- }
+static int can_ctrlmode_changelink(struct net_device *dev,
+ struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ struct can_ctrlmode *cm;
+ u32 ctrlstatic, maskedflags, notsupp, ctrlstatic_missing;
- if (data[IFLA_CAN_BITTIMING]) {
- struct can_bittiming bt;
+ if (!data[IFLA_CAN_CTRLMODE])
+ return 0;
- memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt));
- err = can_validate_bittiming(&bt, extack);
- if (err)
- return err;
- }
+ /* Do not allow changing controller mode while running */
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
- if (is_can_fd) {
- if (!data[IFLA_CAN_BITTIMING] || !data[IFLA_CAN_DATA_BITTIMING])
- return -EOPNOTSUPP;
+ cm = nla_data(data[IFLA_CAN_CTRLMODE]);
+ ctrlstatic = can_get_static_ctrlmode(priv);
+ maskedflags = cm->flags & cm->mask;
+ notsupp = maskedflags & ~(priv->ctrlmode_supported | ctrlstatic);
+ ctrlstatic_missing = (maskedflags & ctrlstatic) ^ ctrlstatic;
+
+ if (notsupp) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "requested control mode %s not supported",
+ can_get_ctrlmode_str(notsupp));
+ return -EOPNOTSUPP;
}
- if (data[IFLA_CAN_DATA_BITTIMING] || data[IFLA_CAN_TDC]) {
- if (!is_can_fd)
- return -EOPNOTSUPP;
+ /* do not check for static fd-non-iso if 'fd' is disabled */
+ if (!(maskedflags & CAN_CTRLMODE_FD))
+ ctrlstatic &= ~CAN_CTRLMODE_FD_NON_ISO;
+
+ if (ctrlstatic_missing) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "missing required %s static control mode",
+ can_get_ctrlmode_str(ctrlstatic_missing));
+ return -EOPNOTSUPP;
}
- if (data[IFLA_CAN_DATA_BITTIMING]) {
- struct can_bittiming bt;
+ /* If a top dependency flag is provided, reset all its dependencies */
+ if (cm->mask & CAN_CTRLMODE_FD)
+ priv->ctrlmode &= ~CAN_CTRLMODE_FD_TDC_MASK;
- memcpy(&bt, nla_data(data[IFLA_CAN_DATA_BITTIMING]), sizeof(bt));
- err = can_validate_bittiming(&bt, extack);
- if (err)
- return err;
+ /* clear bits to be modified and copy the flag values */
+ priv->ctrlmode &= ~cm->mask;
+ priv->ctrlmode |= maskedflags;
+
+ /* Wipe potential leftovers from previous CAN FD config */
+ if (!(priv->ctrlmode & CAN_CTRLMODE_FD)) {
+ memset(&priv->fd.data_bittiming, 0,
+ sizeof(priv->fd.data_bittiming));
+ priv->ctrlmode &= ~CAN_CTRLMODE_FD_TDC_MASK;
+ memset(&priv->fd.tdc, 0, sizeof(priv->fd.tdc));
}
+ can_set_default_mtu(dev);
+
return 0;
}
-static int can_tdc_changelink(struct can_priv *priv, const struct nlattr *nla,
+static int can_tdc_changelink(struct data_bittiming_params *dbt_params,
+ const struct nlattr *nla,
struct netlink_ext_ack *extack)
{
struct nlattr *tb_tdc[IFLA_CAN_TDC_MAX + 1];
struct can_tdc tdc = { 0 };
- const struct can_tdc_const *tdc_const = priv->fd.tdc_const;
+ const struct can_tdc_const *tdc_const = dbt_params->tdc_const;
int err;
- if (!tdc_const || !can_fd_tdc_is_enabled(priv))
+ if (!tdc_const) {
+ NL_SET_ERR_MSG(extack, "The device does not support TDC");
return -EOPNOTSUPP;
+ }
err = nla_parse_nested(tb_tdc, IFLA_CAN_TDC_MAX, nla,
can_tdc_policy, extack);
@@ -179,69 +308,107 @@ static int can_tdc_changelink(struct can_priv *priv, const struct nlattr *nla,
tdc.tdcf = tdcf;
}
- priv->fd.tdc = tdc;
+ dbt_params->tdc = tdc;
return 0;
}
-static int can_changelink(struct net_device *dev, struct nlattr *tb[],
- struct nlattr *data[],
- struct netlink_ext_ack *extack)
+static int can_dbt_changelink(struct net_device *dev, struct nlattr *data[],
+ bool fd, struct netlink_ext_ack *extack)
{
+ struct nlattr *data_bittiming, *data_tdc;
struct can_priv *priv = netdev_priv(dev);
- bool fd_tdc_flag_provided = false;
+ struct data_bittiming_params *dbt_params;
+ struct can_bittiming dbt;
+ bool need_tdc_calc = false;
+ u32 tdc_mask;
int err;
- /* We need synchronization with dev->stop() */
- ASSERT_RTNL();
+ if (fd) {
+ data_bittiming = data[IFLA_CAN_DATA_BITTIMING];
+ data_tdc = data[IFLA_CAN_TDC];
+ dbt_params = &priv->fd;
+ tdc_mask = CAN_CTRLMODE_FD_TDC_MASK;
+ } else {
+ return -EOPNOTSUPP; /* Place holder for CAN XL */
+ }
- if (data[IFLA_CAN_CTRLMODE]) {
- struct can_ctrlmode *cm;
- u32 ctrlstatic;
- u32 maskedflags;
+ if (!data_bittiming)
+ return 0;
- /* Do not allow changing controller mode while running */
- if (dev->flags & IFF_UP)
- return -EBUSY;
- cm = nla_data(data[IFLA_CAN_CTRLMODE]);
- ctrlstatic = can_get_static_ctrlmode(priv);
- maskedflags = cm->flags & cm->mask;
+ /* Do not allow changing bittiming while running */
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
- /* check whether provided bits are allowed to be passed */
- if (maskedflags & ~(priv->ctrlmode_supported | ctrlstatic))
- return -EOPNOTSUPP;
+ /* Calculate bittiming parameters based on data_bittiming_const
+ * if set, otherwise pass bitrate directly via do_set_bitrate().
+ * Bail out if neither is given.
+ */
+ if (!dbt_params->data_bittiming_const && !dbt_params->do_set_data_bittiming &&
+ !dbt_params->data_bitrate_const)
+ return -EOPNOTSUPP;
- /* do not check for static fd-non-iso if 'fd' is disabled */
- if (!(maskedflags & CAN_CTRLMODE_FD))
- ctrlstatic &= ~CAN_CTRLMODE_FD_NON_ISO;
+ memcpy(&dbt, nla_data(data_bittiming), sizeof(dbt));
+ err = can_get_bittiming(dev, &dbt, dbt_params->data_bittiming_const,
+ dbt_params->data_bitrate_const,
+ dbt_params->data_bitrate_const_cnt, extack);
+ if (err)
+ return err;
- /* make sure static options are provided by configuration */
- if ((maskedflags & ctrlstatic) != ctrlstatic)
- return -EOPNOTSUPP;
+ if (priv->bitrate_max && dbt.bitrate > priv->bitrate_max) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "CAN data bitrate %u bps surpasses transceiver capabilities of %u bps",
+ dbt.bitrate, priv->bitrate_max);
+ return -EINVAL;
+ }
- /* clear bits to be modified and copy the flag values */
- priv->ctrlmode &= ~cm->mask;
- priv->ctrlmode |= maskedflags;
+ memset(&dbt_params->tdc, 0, sizeof(dbt_params->tdc));
+ if (data[IFLA_CAN_CTRLMODE]) {
+ struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
- /* CAN_CTRLMODE_FD can only be set when driver supports FD */
- if (priv->ctrlmode & CAN_CTRLMODE_FD) {
- dev->mtu = CANFD_MTU;
- } else {
- dev->mtu = CAN_MTU;
- memset(&priv->fd.data_bittiming, 0,
- sizeof(priv->fd.data_bittiming));
- priv->ctrlmode &= ~CAN_CTRLMODE_FD_TDC_MASK;
- memset(&priv->fd.tdc, 0, sizeof(priv->fd.tdc));
+ need_tdc_calc = !(cm->mask & tdc_mask);
+ }
+ if (data_tdc) {
+ /* TDC parameters are provided: use them */
+ err = can_tdc_changelink(dbt_params, data_tdc, extack);
+ if (err) {
+ priv->ctrlmode &= ~tdc_mask;
+ return err;
}
-
- fd_tdc_flag_provided = cm->mask & CAN_CTRLMODE_FD_TDC_MASK;
- /* CAN_CTRLMODE_TDC_{AUTO,MANUAL} are mutually
- * exclusive: make sure to turn the other one off
+ } else if (need_tdc_calc) {
+ /* Neither of TDC parameters nor TDC flags are provided:
+ * do calculation
*/
- if (fd_tdc_flag_provided)
- priv->ctrlmode &= cm->flags | ~CAN_CTRLMODE_FD_TDC_MASK;
+ can_calc_tdco(&dbt_params->tdc, dbt_params->tdc_const, &dbt,
+ tdc_mask, &priv->ctrlmode, priv->ctrlmode_supported);
+ } /* else: both CAN_CTRLMODE_TDC_{AUTO,MANUAL} are explicitly
+ * turned off. TDC is disabled: do nothing
+ */
+
+ memcpy(&dbt_params->data_bittiming, &dbt, sizeof(dbt));
+
+ if (dbt_params->do_set_data_bittiming) {
+ /* Finally, set the bit-timing registers */
+ err = dbt_params->do_set_data_bittiming(dev);
+ if (err)
+ return err;
}
+ return 0;
+}
+
+static int can_changelink(struct net_device *dev, struct nlattr *tb[],
+ struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ int err;
+
+ /* We need synchronization with dev->stop() */
+ ASSERT_RTNL();
+
+ can_ctrlmode_changelink(dev, data, extack);
+
if (data[IFLA_CAN_BITTIMING]) {
struct can_bittiming bt;
@@ -312,75 +479,21 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
return err;
}
- if (data[IFLA_CAN_DATA_BITTIMING]) {
- struct can_bittiming dbt;
-
- /* Do not allow changing bittiming while running */
- if (dev->flags & IFF_UP)
- return -EBUSY;
-
- /* Calculate bittiming parameters based on
- * data_bittiming_const if set, otherwise pass bitrate
- * directly via do_set_bitrate(). Bail out if neither
- * is given.
- */
- if (!priv->fd.data_bittiming_const && !priv->fd.do_set_data_bittiming &&
- !priv->fd.data_bitrate_const)
- return -EOPNOTSUPP;
-
- memcpy(&dbt, nla_data(data[IFLA_CAN_DATA_BITTIMING]),
- sizeof(dbt));
- err = can_get_bittiming(dev, &dbt,
- priv->fd.data_bittiming_const,
- priv->fd.data_bitrate_const,
- priv->fd.data_bitrate_const_cnt,
- extack);
- if (err)
- return err;
-
- if (priv->bitrate_max && dbt.bitrate > priv->bitrate_max) {
- NL_SET_ERR_MSG_FMT(extack,
- "CANFD data bitrate %u bps surpasses transceiver capabilities of %u bps",
- dbt.bitrate, priv->bitrate_max);
- return -EINVAL;
- }
-
- memset(&priv->fd.tdc, 0, sizeof(priv->fd.tdc));
- if (data[IFLA_CAN_TDC]) {
- /* TDC parameters are provided: use them */
- err = can_tdc_changelink(priv, data[IFLA_CAN_TDC],
- extack);
- if (err) {
- priv->ctrlmode &= ~CAN_CTRLMODE_FD_TDC_MASK;
- return err;
- }
- } else if (!fd_tdc_flag_provided) {
- /* Neither of TDC parameters nor TDC flags are
- * provided: do calculation
- */
- can_calc_tdco(&priv->fd.tdc, priv->fd.tdc_const, &dbt,
- &priv->ctrlmode, priv->ctrlmode_supported);
- } /* else: both CAN_CTRLMODE_TDC_{AUTO,MANUAL} are explicitly
- * turned off. TDC is disabled: do nothing
- */
-
- memcpy(&priv->fd.data_bittiming, &dbt, sizeof(dbt));
-
- if (priv->fd.do_set_data_bittiming) {
- /* Finally, set the bit-timing registers */
- err = priv->fd.do_set_data_bittiming(dev);
- if (err)
- return err;
- }
- }
+ /* CAN FD */
+ err = can_dbt_changelink(dev, data, true, extack);
+ if (err)
+ return err;
if (data[IFLA_CAN_TERMINATION]) {
const u16 termval = nla_get_u16(data[IFLA_CAN_TERMINATION]);
const unsigned int num_term = priv->termination_const_cnt;
unsigned int i;
- if (!priv->do_set_termination)
+ if (!priv->do_set_termination) {
+ NL_SET_ERR_MSG(extack,
+ "Termination is not configurable on this device");
return -EOPNOTSUPP;
+ }
/* check whether given value is supported by the interface */
for (i = 0; i < num_term; i++) {
@@ -401,38 +514,55 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
return 0;
}
-static size_t can_tdc_get_size(const struct net_device *dev)
+static size_t can_tdc_get_size(struct data_bittiming_params *dbt_params,
+ u32 tdc_flags)
{
- struct can_priv *priv = netdev_priv(dev);
+ bool tdc_manual = tdc_flags & CAN_CTRLMODE_TDC_MANUAL_MASK;
size_t size;
- if (!priv->fd.tdc_const)
+ if (!dbt_params->tdc_const)
return 0;
size = nla_total_size(0); /* nest IFLA_CAN_TDC */
- if (priv->ctrlmode_supported & CAN_CTRLMODE_TDC_MANUAL) {
+ if (tdc_manual) {
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCV_MIN */
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCV_MAX */
}
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCO_MIN */
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCO_MAX */
- if (priv->fd.tdc_const->tdcf_max) {
+ if (dbt_params->tdc_const->tdcf_max) {
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCF_MIN */
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCF_MAX */
}
- if (can_fd_tdc_is_enabled(priv)) {
- if (priv->ctrlmode & CAN_CTRLMODE_TDC_MANUAL ||
- priv->fd.do_get_auto_tdcv)
+ if (tdc_flags) {
+ if (tdc_manual || dbt_params->do_get_auto_tdcv)
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCV */
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCO */
- if (priv->fd.tdc_const->tdcf_max)
+ if (dbt_params->tdc_const->tdcf_max)
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCF */
}
return size;
}
+static size_t can_data_bittiming_get_size(struct data_bittiming_params *dbt_params,
+ u32 tdc_flags)
+{
+ size_t size = 0;
+
+ if (dbt_params->data_bittiming.bitrate) /* IFLA_CAN_DATA_BITTIMING */
+ size += nla_total_size(sizeof(dbt_params->data_bittiming));
+ if (dbt_params->data_bittiming_const) /* IFLA_CAN_DATA_BITTIMING_CONST */
+ size += nla_total_size(sizeof(*dbt_params->data_bittiming_const));
+ if (dbt_params->data_bitrate_const) /* IFLA_CAN_DATA_BITRATE_CONST */
+ size += nla_total_size(sizeof(*dbt_params->data_bitrate_const) *
+ dbt_params->data_bitrate_const_cnt);
+ size += can_tdc_get_size(dbt_params, tdc_flags);/* IFLA_CAN_TDC */
+
+ return size;
+}
+
static size_t can_ctrlmode_ext_get_size(void)
{
return nla_total_size(0) + /* nest IFLA_CAN_CTRLMODE_EXT */
@@ -454,10 +584,6 @@ static size_t can_get_size(const struct net_device *dev)
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_RESTART_MS */
if (priv->do_get_berr_counter) /* IFLA_CAN_BERR_COUNTER */
size += nla_total_size(sizeof(struct can_berr_counter));
- if (priv->fd.data_bittiming.bitrate) /* IFLA_CAN_DATA_BITTIMING */
- size += nla_total_size(sizeof(struct can_bittiming));
- if (priv->fd.data_bittiming_const) /* IFLA_CAN_DATA_BITTIMING_CONST */
- size += nla_total_size(sizeof(struct can_bittiming_const));
if (priv->termination_const) {
size += nla_total_size(sizeof(priv->termination)); /* IFLA_CAN_TERMINATION */
size += nla_total_size(sizeof(*priv->termination_const) * /* IFLA_CAN_TERMINATION_CONST */
@@ -466,31 +592,69 @@ static size_t can_get_size(const struct net_device *dev)
if (priv->bitrate_const) /* IFLA_CAN_BITRATE_CONST */
size += nla_total_size(sizeof(*priv->bitrate_const) *
priv->bitrate_const_cnt);
- if (priv->fd.data_bitrate_const) /* IFLA_CAN_DATA_BITRATE_CONST */
- size += nla_total_size(sizeof(*priv->fd.data_bitrate_const) *
- priv->fd.data_bitrate_const_cnt);
size += sizeof(priv->bitrate_max); /* IFLA_CAN_BITRATE_MAX */
- size += can_tdc_get_size(dev); /* IFLA_CAN_TDC */
size += can_ctrlmode_ext_get_size(); /* IFLA_CAN_CTRLMODE_EXT */
+ size += can_data_bittiming_get_size(&priv->fd,
+ priv->ctrlmode & CAN_CTRLMODE_FD_TDC_MASK);
+
return size;
}
-static int can_tdc_fill_info(struct sk_buff *skb, const struct net_device *dev)
+static int can_bittiming_fill_info(struct sk_buff *skb, int ifla_can_bittiming,
+ struct can_bittiming *bittiming)
+{
+ return bittiming->bitrate != CAN_BITRATE_UNSET &&
+ bittiming->bitrate != CAN_BITRATE_UNKNOWN &&
+ nla_put(skb, ifla_can_bittiming, sizeof(*bittiming), bittiming);
+}
+
+static int can_bittiming_const_fill_info(struct sk_buff *skb,
+ int ifla_can_bittiming_const,
+ const struct can_bittiming_const *bittiming_const)
+{
+ return bittiming_const &&
+ nla_put(skb, ifla_can_bittiming_const,
+ sizeof(*bittiming_const), bittiming_const);
+}
+
+static int can_bitrate_const_fill_info(struct sk_buff *skb,
+ int ifla_can_bitrate_const,
+ const u32 *bitrate_const, unsigned int cnt)
+{
+ return bitrate_const &&
+ nla_put(skb, ifla_can_bitrate_const,
+ sizeof(*bitrate_const) * cnt, bitrate_const);
+}
+
+static int can_tdc_fill_info(struct sk_buff *skb, const struct net_device *dev,
+ int ifla_can_tdc)
{
- struct nlattr *nest;
struct can_priv *priv = netdev_priv(dev);
- struct can_tdc *tdc = &priv->fd.tdc;
- const struct can_tdc_const *tdc_const = priv->fd.tdc_const;
+ struct data_bittiming_params *dbt_params;
+ const struct can_tdc_const *tdc_const;
+ struct can_tdc *tdc;
+ struct nlattr *nest;
+ bool tdc_is_enabled, tdc_manual;
+
+ if (ifla_can_tdc == IFLA_CAN_TDC) {
+ dbt_params = &priv->fd;
+ tdc_is_enabled = can_fd_tdc_is_enabled(priv);
+ tdc_manual = priv->ctrlmode & CAN_CTRLMODE_TDC_MANUAL;
+ } else {
+ return -EOPNOTSUPP; /* Place holder for CAN XL */
+ }
+ tdc_const = dbt_params->tdc_const;
+ tdc = &dbt_params->tdc;
if (!tdc_const)
return 0;
- nest = nla_nest_start(skb, IFLA_CAN_TDC);
+ nest = nla_nest_start(skb, ifla_can_tdc);
if (!nest)
return -EMSGSIZE;
- if (priv->ctrlmode_supported & CAN_CTRLMODE_TDC_MANUAL &&
+ if (tdc_manual &&
(nla_put_u32(skb, IFLA_CAN_TDC_TDCV_MIN, tdc_const->tdcv_min) ||
nla_put_u32(skb, IFLA_CAN_TDC_TDCV_MAX, tdc_const->tdcv_max)))
goto err_cancel;
@@ -502,15 +666,15 @@ static int can_tdc_fill_info(struct sk_buff *skb, const struct net_device *dev)
nla_put_u32(skb, IFLA_CAN_TDC_TDCF_MAX, tdc_const->tdcf_max)))
goto err_cancel;
- if (can_fd_tdc_is_enabled(priv)) {
+ if (tdc_is_enabled) {
u32 tdcv;
int err = -EINVAL;
- if (priv->ctrlmode & CAN_CTRLMODE_TDC_MANUAL) {
+ if (tdc_manual) {
tdcv = tdc->tdcv;
err = 0;
- } else if (priv->fd.do_get_auto_tdcv) {
- err = priv->fd.do_get_auto_tdcv(dev, &tdcv);
+ } else if (dbt_params->do_get_auto_tdcv) {
+ err = dbt_params->do_get_auto_tdcv(dev, &tdcv);
}
if (!err && nla_put_u32(skb, IFLA_CAN_TDC_TDCV, tdcv))
goto err_cancel;
@@ -558,14 +722,11 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
if (priv->do_get_state)
priv->do_get_state(dev, &state);
- if ((priv->bittiming.bitrate != CAN_BITRATE_UNSET &&
- priv->bittiming.bitrate != CAN_BITRATE_UNKNOWN &&
- nla_put(skb, IFLA_CAN_BITTIMING,
- sizeof(priv->bittiming), &priv->bittiming)) ||
+ if (can_bittiming_fill_info(skb, IFLA_CAN_BITTIMING,
+ &priv->bittiming) ||
- (priv->bittiming_const &&
- nla_put(skb, IFLA_CAN_BITTIMING_CONST,
- sizeof(*priv->bittiming_const), priv->bittiming_const)) ||
+ can_bittiming_const_fill_info(skb, IFLA_CAN_BITTIMING_CONST,
+ priv->bittiming_const) ||
nla_put(skb, IFLA_CAN_CLOCK, sizeof(priv->clock), &priv->clock) ||
nla_put_u32(skb, IFLA_CAN_STATE, state) ||
@@ -576,14 +737,11 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
!priv->do_get_berr_counter(dev, &bec) &&
nla_put(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec)) ||
- (priv->fd.data_bittiming.bitrate &&
- nla_put(skb, IFLA_CAN_DATA_BITTIMING,
- sizeof(priv->fd.data_bittiming), &priv->fd.data_bittiming)) ||
+ can_bittiming_fill_info(skb, IFLA_CAN_DATA_BITTIMING,
+ &priv->fd.data_bittiming) ||
- (priv->fd.data_bittiming_const &&
- nla_put(skb, IFLA_CAN_DATA_BITTIMING_CONST,
- sizeof(*priv->fd.data_bittiming_const),
- priv->fd.data_bittiming_const)) ||
+ can_bittiming_const_fill_info(skb, IFLA_CAN_DATA_BITTIMING_CONST,
+ priv->fd.data_bittiming_const) ||
(priv->termination_const &&
(nla_put_u16(skb, IFLA_CAN_TERMINATION, priv->termination) ||
@@ -592,23 +750,19 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
priv->termination_const_cnt,
priv->termination_const))) ||
- (priv->bitrate_const &&
- nla_put(skb, IFLA_CAN_BITRATE_CONST,
- sizeof(*priv->bitrate_const) *
- priv->bitrate_const_cnt,
- priv->bitrate_const)) ||
+ can_bitrate_const_fill_info(skb, IFLA_CAN_BITRATE_CONST,
+ priv->bitrate_const,
+ priv->bitrate_const_cnt) ||
- (priv->fd.data_bitrate_const &&
- nla_put(skb, IFLA_CAN_DATA_BITRATE_CONST,
- sizeof(*priv->fd.data_bitrate_const) *
- priv->fd.data_bitrate_const_cnt,
- priv->fd.data_bitrate_const)) ||
+ can_bitrate_const_fill_info(skb, IFLA_CAN_DATA_BITRATE_CONST,
+ priv->fd.data_bitrate_const,
+ priv->fd.data_bitrate_const_cnt) ||
(nla_put(skb, IFLA_CAN_BITRATE_MAX,
sizeof(priv->bitrate_max),
&priv->bitrate_max)) ||
- can_tdc_fill_info(skb, dev) ||
+ can_tdc_fill_info(skb, dev, IFLA_CAN_TDC) ||
can_ctrlmode_ext_fill_info(skb, priv)
)
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index fe74dbd2c966..e1d725979685 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -2213,11 +2213,9 @@ static int m_can_set_coalesce(struct net_device *dev,
cdev->tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
if (cdev->rx_coalesce_usecs_irq)
- cdev->irq_timer_wait =
- ns_to_ktime(cdev->rx_coalesce_usecs_irq * NSEC_PER_USEC);
+ cdev->irq_timer_wait = us_to_ktime(cdev->rx_coalesce_usecs_irq);
else
- cdev->irq_timer_wait =
- ns_to_ktime(cdev->tx_coalesce_usecs_irq * NSEC_PER_USEC);
+ cdev->irq_timer_wait = us_to_ktime(cdev->tx_coalesce_usecs_irq);
return 0;
}
diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c
index 77292afaed22..b5bc80ac7876 100644
--- a/drivers/net/can/peak_canfd/peak_canfd.c
+++ b/drivers/net/can/peak_canfd/peak_canfd.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2007, 2011 Wolfgang Grandegger <wg@grandegger.com>
- * Copyright (C) 2012 Stephane Grosjean <s.grosjean@peak-system.com>
*
- * Copyright (C) 2016 PEAK System-Technik GmbH
+ * Copyright (C) 2016-2025 PEAK System-Technik GmbH
+ * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com>
*/
#include <linux/can.h>
diff --git a/drivers/net/can/peak_canfd/peak_canfd_user.h b/drivers/net/can/peak_canfd/peak_canfd_user.h
index a72719dc3b74..60c6542028cf 100644
--- a/drivers/net/can/peak_canfd/peak_canfd_user.h
+++ b/drivers/net/can/peak_canfd/peak_canfd_user.h
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* CAN driver for PEAK System micro-CAN based adapters
*
- * Copyright (C) 2003-2011 PEAK System-Technik GmbH
- * Copyright (C) 2011-2013 Stephane Grosjean <s.grosjean@peak-system.com>
+ * Copyright (C) 2003-2025 PEAK System-Technik GmbH
+ * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com>
*/
#ifndef PEAK_CANFD_USER_H
#define PEAK_CANFD_USER_H
diff --git a/drivers/net/can/peak_canfd/peak_pciefd_main.c b/drivers/net/can/peak_canfd/peak_pciefd_main.c
index 1df3c4b54f03..93558e33bc02 100644
--- a/drivers/net/can/peak_canfd/peak_pciefd_main.c
+++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c
@@ -1,10 +1,10 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2007, 2011 Wolfgang Grandegger <wg@grandegger.com>
- * Copyright (C) 2012 Stephane Grosjean <s.grosjean@peak-system.com>
*
* Derived from the PCAN project file driver/src/pcan_pci.c:
*
- * Copyright (C) 2001-2006 PEAK System-Technik GmbH
+ * Copyright (C) 2001-2025 PEAK System-Technik GmbH
+ * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com>
*/
#include <linux/kernel.h>
@@ -19,7 +19,7 @@
#include "peak_canfd_user.h"
-MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>");
+MODULE_AUTHOR("Stéphane Grosjean <stephane.grosjean@hms-networks.com>");
MODULE_DESCRIPTION("Socket-CAN driver for PEAK PCAN PCIe/M.2 FD family cards");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c
index 64e664f5adcc..5f85f4e27205 100644
--- a/drivers/net/can/rcar/rcar_can.c
+++ b/drivers/net/can/rcar/rcar_can.c
@@ -5,6 +5,8 @@
* Copyright (C) 2013 Renesas Solutions Corp.
*/
+#include <linux/bitfield.h>
+#include <linux/bits.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
@@ -16,6 +18,7 @@
#include <linux/can/dev.h>
#include <linux/clk.h>
#include <linux/of.h>
+#include <linux/pm_runtime.h>
#define RCAR_CAN_DRV_NAME "rcar_can"
@@ -92,7 +95,6 @@ struct rcar_can_priv {
struct net_device *ndev;
struct napi_struct napi;
struct rcar_can_regs __iomem *regs;
- struct clk *clk;
struct clk *can_clk;
u32 tx_head;
u32 tx_tail;
@@ -113,100 +115,102 @@ static const struct can_bittiming_const rcar_can_bittiming_const = {
};
/* Control Register bits */
-#define RCAR_CAN_CTLR_BOM (3 << 11) /* Bus-Off Recovery Mode Bits */
-#define RCAR_CAN_CTLR_BOM_ENT (1 << 11) /* Entry to halt mode */
- /* at bus-off entry */
-#define RCAR_CAN_CTLR_SLPM (1 << 10)
-#define RCAR_CAN_CTLR_CANM (3 << 8) /* Operating Mode Select Bit */
-#define RCAR_CAN_CTLR_CANM_HALT (1 << 9)
-#define RCAR_CAN_CTLR_CANM_RESET (1 << 8)
-#define RCAR_CAN_CTLR_CANM_FORCE_RESET (3 << 8)
-#define RCAR_CAN_CTLR_MLM (1 << 3) /* Message Lost Mode Select */
-#define RCAR_CAN_CTLR_IDFM (3 << 1) /* ID Format Mode Select Bits */
-#define RCAR_CAN_CTLR_IDFM_MIXED (1 << 2) /* Mixed ID mode */
-#define RCAR_CAN_CTLR_MBM (1 << 0) /* Mailbox Mode select */
+#define RCAR_CAN_CTLR_BOM GENMASK(12, 11) /* Bus-Off Recovery Mode Bits */
+#define RCAR_CAN_CTLR_BOM_ENT 1 /* Entry to halt mode */
+ /* at bus-off entry */
+#define RCAR_CAN_CTLR_SLPM BIT(10) /* Sleep Mode */
+#define RCAR_CAN_CTLR_CANM GENMASK(9, 8) /* Operating Mode Select Bit */
+#define RCAR_CAN_CTLR_CANM_OPER 0 /* Operation Mode */
+#define RCAR_CAN_CTLR_CANM_RESET 1 /* Reset Mode */
+#define RCAR_CAN_CTLR_CANM_HALT 2 /* Halt Mode */
+#define RCAR_CAN_CTLR_CANM_FORCE_RESET 3 /* Reset Mode (forcible) */
+#define RCAR_CAN_CTLR_MLM BIT(3) /* Message Lost Mode Select */
+#define RCAR_CAN_CTLR_IDFM GENMASK(2, 1) /* ID Format Mode Select Bits */
+#define RCAR_CAN_CTLR_IDFM_STD 0 /* Standard ID mode */
+#define RCAR_CAN_CTLR_IDFM_EXT 1 /* Extended ID mode */
+#define RCAR_CAN_CTLR_IDFM_MIXED 2 /* Mixed ID mode */
+#define RCAR_CAN_CTLR_MBM BIT(0) /* Mailbox Mode select */
/* Status Register bits */
-#define RCAR_CAN_STR_RSTST (1 << 8) /* Reset Status Bit */
+#define RCAR_CAN_STR_RSTST BIT(8) /* Reset Status Bit */
/* FIFO Received ID Compare Registers 0 and 1 bits */
-#define RCAR_CAN_FIDCR_IDE (1 << 31) /* ID Extension Bit */
-#define RCAR_CAN_FIDCR_RTR (1 << 30) /* Remote Transmission Request Bit */
+#define RCAR_CAN_FIDCR_IDE BIT(31) /* ID Extension Bit */
+#define RCAR_CAN_FIDCR_RTR BIT(30) /* Remote Transmission Request Bit */
/* Receive FIFO Control Register bits */
-#define RCAR_CAN_RFCR_RFEST (1 << 7) /* Receive FIFO Empty Status Flag */
-#define RCAR_CAN_RFCR_RFE (1 << 0) /* Receive FIFO Enable */
+#define RCAR_CAN_RFCR_RFEST BIT(7) /* Receive FIFO Empty Status Flag */
+#define RCAR_CAN_RFCR_RFE BIT(0) /* Receive FIFO Enable */
/* Transmit FIFO Control Register bits */
-#define RCAR_CAN_TFCR_TFUST (7 << 1) /* Transmit FIFO Unsent Message */
- /* Number Status Bits */
-#define RCAR_CAN_TFCR_TFUST_SHIFT 1 /* Offset of Transmit FIFO Unsent */
- /* Message Number Status Bits */
-#define RCAR_CAN_TFCR_TFE (1 << 0) /* Transmit FIFO Enable */
-
-#define RCAR_CAN_N_RX_MKREGS1 2 /* Number of mask registers */
- /* for Rx mailboxes 0-31 */
+#define RCAR_CAN_TFCR_TFUST GENMASK(3, 1) /* Transmit FIFO Unsent Message */
+ /* Number Status Bits */
+#define RCAR_CAN_TFCR_TFE BIT(0) /* Transmit FIFO Enable */
+
+#define RCAR_CAN_N_RX_MKREGS1 2 /* Number of mask registers */
+ /* for Rx mailboxes 0-31 */
#define RCAR_CAN_N_RX_MKREGS2 8
/* Bit Configuration Register settings */
-#define RCAR_CAN_BCR_TSEG1(x) (((x) & 0x0f) << 20)
-#define RCAR_CAN_BCR_BPR(x) (((x) & 0x3ff) << 8)
-#define RCAR_CAN_BCR_SJW(x) (((x) & 0x3) << 4)
-#define RCAR_CAN_BCR_TSEG2(x) ((x) & 0x07)
+#define RCAR_CAN_BCR_TSEG1 GENMASK(23, 20)
+#define RCAR_CAN_BCR_BRP GENMASK(17, 8)
+#define RCAR_CAN_BCR_SJW GENMASK(5, 4)
+#define RCAR_CAN_BCR_TSEG2 GENMASK(2, 0)
/* Mailbox and Mask Registers bits */
-#define RCAR_CAN_IDE (1 << 31)
-#define RCAR_CAN_RTR (1 << 30)
-#define RCAR_CAN_SID_SHIFT 18
+#define RCAR_CAN_IDE BIT(31) /* ID Extension */
+#define RCAR_CAN_RTR BIT(30) /* Remote Transmission Request */
+#define RCAR_CAN_SID GENMASK(28, 18) /* Standard ID */
+#define RCAR_CAN_EID GENMASK(28, 0) /* Extended ID */
/* Mailbox Interrupt Enable Register 1 bits */
-#define RCAR_CAN_MIER1_RXFIE (1 << 28) /* Receive FIFO Interrupt Enable */
-#define RCAR_CAN_MIER1_TXFIE (1 << 24) /* Transmit FIFO Interrupt Enable */
+#define RCAR_CAN_MIER1_RXFIE BIT(28) /* Receive FIFO Interrupt Enable */
+#define RCAR_CAN_MIER1_TXFIE BIT(24) /* Transmit FIFO Interrupt Enable */
/* Interrupt Enable Register bits */
-#define RCAR_CAN_IER_ERSIE (1 << 5) /* Error (ERS) Interrupt Enable Bit */
-#define RCAR_CAN_IER_RXFIE (1 << 4) /* Reception FIFO Interrupt */
- /* Enable Bit */
-#define RCAR_CAN_IER_TXFIE (1 << 3) /* Transmission FIFO Interrupt */
- /* Enable Bit */
+#define RCAR_CAN_IER_ERSIE BIT(5) /* Error (ERS) Interrupt Enable Bit */
+#define RCAR_CAN_IER_RXFIE BIT(4) /* Reception FIFO Interrupt */
+ /* Enable Bit */
+#define RCAR_CAN_IER_TXFIE BIT(3) /* Transmission FIFO Interrupt */
+ /* Enable Bit */
/* Interrupt Status Register bits */
-#define RCAR_CAN_ISR_ERSF (1 << 5) /* Error (ERS) Interrupt Status Bit */
-#define RCAR_CAN_ISR_RXFF (1 << 4) /* Reception FIFO Interrupt */
- /* Status Bit */
-#define RCAR_CAN_ISR_TXFF (1 << 3) /* Transmission FIFO Interrupt */
- /* Status Bit */
+#define RCAR_CAN_ISR_ERSF BIT(5) /* Error (ERS) Interrupt Status Bit */
+#define RCAR_CAN_ISR_RXFF BIT(4) /* Reception FIFO Interrupt */
+ /* Status Bit */
+#define RCAR_CAN_ISR_TXFF BIT(3) /* Transmission FIFO Interrupt */
+ /* Status Bit */
/* Error Interrupt Enable Register bits */
-#define RCAR_CAN_EIER_BLIE (1 << 7) /* Bus Lock Interrupt Enable */
-#define RCAR_CAN_EIER_OLIE (1 << 6) /* Overload Frame Transmit */
- /* Interrupt Enable */
-#define RCAR_CAN_EIER_ORIE (1 << 5) /* Receive Overrun Interrupt Enable */
-#define RCAR_CAN_EIER_BORIE (1 << 4) /* Bus-Off Recovery Interrupt Enable */
-#define RCAR_CAN_EIER_BOEIE (1 << 3) /* Bus-Off Entry Interrupt Enable */
-#define RCAR_CAN_EIER_EPIE (1 << 2) /* Error Passive Interrupt Enable */
-#define RCAR_CAN_EIER_EWIE (1 << 1) /* Error Warning Interrupt Enable */
-#define RCAR_CAN_EIER_BEIE (1 << 0) /* Bus Error Interrupt Enable */
+#define RCAR_CAN_EIER_BLIE BIT(7) /* Bus Lock Interrupt Enable */
+#define RCAR_CAN_EIER_OLIE BIT(6) /* Overload Frame Transmit */
+ /* Interrupt Enable */
+#define RCAR_CAN_EIER_ORIE BIT(5) /* Receive Overrun Interrupt Enable */
+#define RCAR_CAN_EIER_BORIE BIT(4) /* Bus-Off Recovery Interrupt Enable */
+#define RCAR_CAN_EIER_BOEIE BIT(3) /* Bus-Off Entry Interrupt Enable */
+#define RCAR_CAN_EIER_EPIE BIT(2) /* Error Passive Interrupt Enable */
+#define RCAR_CAN_EIER_EWIE BIT(1) /* Error Warning Interrupt Enable */
+#define RCAR_CAN_EIER_BEIE BIT(0) /* Bus Error Interrupt Enable */
/* Error Interrupt Factor Judge Register bits */
-#define RCAR_CAN_EIFR_BLIF (1 << 7) /* Bus Lock Detect Flag */
-#define RCAR_CAN_EIFR_OLIF (1 << 6) /* Overload Frame Transmission */
- /* Detect Flag */
-#define RCAR_CAN_EIFR_ORIF (1 << 5) /* Receive Overrun Detect Flag */
-#define RCAR_CAN_EIFR_BORIF (1 << 4) /* Bus-Off Recovery Detect Flag */
-#define RCAR_CAN_EIFR_BOEIF (1 << 3) /* Bus-Off Entry Detect Flag */
-#define RCAR_CAN_EIFR_EPIF (1 << 2) /* Error Passive Detect Flag */
-#define RCAR_CAN_EIFR_EWIF (1 << 1) /* Error Warning Detect Flag */
-#define RCAR_CAN_EIFR_BEIF (1 << 0) /* Bus Error Detect Flag */
+#define RCAR_CAN_EIFR_BLIF BIT(7) /* Bus Lock Detect Flag */
+#define RCAR_CAN_EIFR_OLIF BIT(6) /* Overload Frame Transmission */
+ /* Detect Flag */
+#define RCAR_CAN_EIFR_ORIF BIT(5) /* Receive Overrun Detect Flag */
+#define RCAR_CAN_EIFR_BORIF BIT(4) /* Bus-Off Recovery Detect Flag */
+#define RCAR_CAN_EIFR_BOEIF BIT(3) /* Bus-Off Entry Detect Flag */
+#define RCAR_CAN_EIFR_EPIF BIT(2) /* Error Passive Detect Flag */
+#define RCAR_CAN_EIFR_EWIF BIT(1) /* Error Warning Detect Flag */
+#define RCAR_CAN_EIFR_BEIF BIT(0) /* Bus Error Detect Flag */
/* Error Code Store Register bits */
-#define RCAR_CAN_ECSR_EDPM (1 << 7) /* Error Display Mode Select Bit */
-#define RCAR_CAN_ECSR_ADEF (1 << 6) /* ACK Delimiter Error Flag */
-#define RCAR_CAN_ECSR_BE0F (1 << 5) /* Bit Error (dominant) Flag */
-#define RCAR_CAN_ECSR_BE1F (1 << 4) /* Bit Error (recessive) Flag */
-#define RCAR_CAN_ECSR_CEF (1 << 3) /* CRC Error Flag */
-#define RCAR_CAN_ECSR_AEF (1 << 2) /* ACK Error Flag */
-#define RCAR_CAN_ECSR_FEF (1 << 1) /* Form Error Flag */
-#define RCAR_CAN_ECSR_SEF (1 << 0) /* Stuff Error Flag */
+#define RCAR_CAN_ECSR_EDPM BIT(7) /* Error Display Mode Select Bit */
+#define RCAR_CAN_ECSR_ADEF BIT(6) /* ACK Delimiter Error Flag */
+#define RCAR_CAN_ECSR_BE0F BIT(5) /* Bit Error (dominant) Flag */
+#define RCAR_CAN_ECSR_BE1F BIT(4) /* Bit Error (recessive) Flag */
+#define RCAR_CAN_ECSR_CEF BIT(3) /* CRC Error Flag */
+#define RCAR_CAN_ECSR_AEF BIT(2) /* ACK Error Flag */
+#define RCAR_CAN_ECSR_FEF BIT(1) /* Form Error Flag */
+#define RCAR_CAN_ECSR_SEF BIT(0) /* Stuff Error Flag */
#define RCAR_CAN_NAPI_WEIGHT 4
#define MAX_STR_READS 0x100
@@ -248,35 +252,35 @@ static void rcar_can_error(struct net_device *ndev)
if (ecsr & RCAR_CAN_ECSR_ADEF) {
netdev_dbg(priv->ndev, "ACK Delimiter Error\n");
tx_errors++;
- writeb(~RCAR_CAN_ECSR_ADEF, &priv->regs->ecsr);
+ writeb((u8)~RCAR_CAN_ECSR_ADEF, &priv->regs->ecsr);
if (skb)
cf->data[3] = CAN_ERR_PROT_LOC_ACK_DEL;
}
if (ecsr & RCAR_CAN_ECSR_BE0F) {
netdev_dbg(priv->ndev, "Bit Error (dominant)\n");
tx_errors++;
- writeb(~RCAR_CAN_ECSR_BE0F, &priv->regs->ecsr);
+ writeb((u8)~RCAR_CAN_ECSR_BE0F, &priv->regs->ecsr);
if (skb)
cf->data[2] |= CAN_ERR_PROT_BIT0;
}
if (ecsr & RCAR_CAN_ECSR_BE1F) {
netdev_dbg(priv->ndev, "Bit Error (recessive)\n");
tx_errors++;
- writeb(~RCAR_CAN_ECSR_BE1F, &priv->regs->ecsr);
+ writeb((u8)~RCAR_CAN_ECSR_BE1F, &priv->regs->ecsr);
if (skb)
cf->data[2] |= CAN_ERR_PROT_BIT1;
}
if (ecsr & RCAR_CAN_ECSR_CEF) {
netdev_dbg(priv->ndev, "CRC Error\n");
rx_errors++;
- writeb(~RCAR_CAN_ECSR_CEF, &priv->regs->ecsr);
+ writeb((u8)~RCAR_CAN_ECSR_CEF, &priv->regs->ecsr);
if (skb)
cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
}
if (ecsr & RCAR_CAN_ECSR_AEF) {
netdev_dbg(priv->ndev, "ACK Error\n");
tx_errors++;
- writeb(~RCAR_CAN_ECSR_AEF, &priv->regs->ecsr);
+ writeb((u8)~RCAR_CAN_ECSR_AEF, &priv->regs->ecsr);
if (skb) {
cf->can_id |= CAN_ERR_ACK;
cf->data[3] = CAN_ERR_PROT_LOC_ACK;
@@ -285,14 +289,14 @@ static void rcar_can_error(struct net_device *ndev)
if (ecsr & RCAR_CAN_ECSR_FEF) {
netdev_dbg(priv->ndev, "Form Error\n");
rx_errors++;
- writeb(~RCAR_CAN_ECSR_FEF, &priv->regs->ecsr);
+ writeb((u8)~RCAR_CAN_ECSR_FEF, &priv->regs->ecsr);
if (skb)
cf->data[2] |= CAN_ERR_PROT_FORM;
}
if (ecsr & RCAR_CAN_ECSR_SEF) {
netdev_dbg(priv->ndev, "Stuff Error\n");
rx_errors++;
- writeb(~RCAR_CAN_ECSR_SEF, &priv->regs->ecsr);
+ writeb((u8)~RCAR_CAN_ECSR_SEF, &priv->regs->ecsr);
if (skb)
cf->data[2] |= CAN_ERR_PROT_STUFF;
}
@@ -300,14 +304,14 @@ static void rcar_can_error(struct net_device *ndev)
priv->can.can_stats.bus_error++;
ndev->stats.rx_errors += rx_errors;
ndev->stats.tx_errors += tx_errors;
- writeb(~RCAR_CAN_EIFR_BEIF, &priv->regs->eifr);
+ writeb((u8)~RCAR_CAN_EIFR_BEIF, &priv->regs->eifr);
}
if (eifr & RCAR_CAN_EIFR_EWIF) {
netdev_dbg(priv->ndev, "Error warning interrupt\n");
priv->can.state = CAN_STATE_ERROR_WARNING;
priv->can.can_stats.error_warning++;
/* Clear interrupt condition */
- writeb(~RCAR_CAN_EIFR_EWIF, &priv->regs->eifr);
+ writeb((u8)~RCAR_CAN_EIFR_EWIF, &priv->regs->eifr);
if (skb)
cf->data[1] = txerr > rxerr ? CAN_ERR_CRTL_TX_WARNING :
CAN_ERR_CRTL_RX_WARNING;
@@ -317,7 +321,7 @@ static void rcar_can_error(struct net_device *ndev)
priv->can.state = CAN_STATE_ERROR_PASSIVE;
priv->can.can_stats.error_passive++;
/* Clear interrupt condition */
- writeb(~RCAR_CAN_EIFR_EPIF, &priv->regs->eifr);
+ writeb((u8)~RCAR_CAN_EIFR_EPIF, &priv->regs->eifr);
if (skb)
cf->data[1] = txerr > rxerr ? CAN_ERR_CRTL_TX_PASSIVE :
CAN_ERR_CRTL_RX_PASSIVE;
@@ -329,7 +333,7 @@ static void rcar_can_error(struct net_device *ndev)
writeb(priv->ier, &priv->regs->ier);
priv->can.state = CAN_STATE_BUS_OFF;
/* Clear interrupt condition */
- writeb(~RCAR_CAN_EIFR_BOEIF, &priv->regs->eifr);
+ writeb((u8)~RCAR_CAN_EIFR_BOEIF, &priv->regs->eifr);
priv->can.can_stats.bus_off++;
can_bus_off(ndev);
if (skb)
@@ -343,7 +347,7 @@ static void rcar_can_error(struct net_device *ndev)
netdev_dbg(priv->ndev, "Receive overrun error interrupt\n");
ndev->stats.rx_over_errors++;
ndev->stats.rx_errors++;
- writeb(~RCAR_CAN_EIFR_ORIF, &priv->regs->eifr);
+ writeb((u8)~RCAR_CAN_EIFR_ORIF, &priv->regs->eifr);
if (skb) {
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
@@ -354,7 +358,7 @@ static void rcar_can_error(struct net_device *ndev)
"Overload Frame Transmission error interrupt\n");
ndev->stats.rx_over_errors++;
ndev->stats.rx_errors++;
- writeb(~RCAR_CAN_EIFR_OLIF, &priv->regs->eifr);
+ writeb((u8)~RCAR_CAN_EIFR_OLIF, &priv->regs->eifr);
if (skb) {
cf->can_id |= CAN_ERR_PROT;
cf->data[2] |= CAN_ERR_PROT_OVERLOAD;
@@ -372,10 +376,9 @@ static void rcar_can_tx_done(struct net_device *ndev)
u8 isr;
while (1) {
- u8 unsent = readb(&priv->regs->tfcr);
+ u8 unsent = FIELD_GET(RCAR_CAN_TFCR_TFUST,
+ readb(&priv->regs->tfcr));
- unsent = (unsent & RCAR_CAN_TFCR_TFUST) >>
- RCAR_CAN_TFCR_TFUST_SHIFT;
if (priv->tx_head - priv->tx_tail <= unsent)
break;
stats->tx_packets++;
@@ -420,15 +423,16 @@ static irqreturn_t rcar_can_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void rcar_can_set_bittiming(struct net_device *dev)
+static void rcar_can_set_bittiming(struct net_device *ndev)
{
- struct rcar_can_priv *priv = netdev_priv(dev);
+ struct rcar_can_priv *priv = netdev_priv(ndev);
struct can_bittiming *bt = &priv->can.bittiming;
u32 bcr;
- bcr = RCAR_CAN_BCR_TSEG1(bt->phase_seg1 + bt->prop_seg - 1) |
- RCAR_CAN_BCR_BPR(bt->brp - 1) | RCAR_CAN_BCR_SJW(bt->sjw - 1) |
- RCAR_CAN_BCR_TSEG2(bt->phase_seg2 - 1);
+ bcr = FIELD_PREP(RCAR_CAN_BCR_TSEG1, bt->phase_seg1 + bt->prop_seg - 1) |
+ FIELD_PREP(RCAR_CAN_BCR_BRP, bt->brp - 1) |
+ FIELD_PREP(RCAR_CAN_BCR_SJW, bt->sjw - 1) |
+ FIELD_PREP(RCAR_CAN_BCR_TSEG2, bt->phase_seg2 - 1);
/* Don't overwrite CLKR with 32-bit BCR access; CLKR has 8-bit access.
* All the registers are big-endian but they get byte-swapped on 32-bit
* read/write (but not on 8-bit, contrary to the manuals)...
@@ -452,16 +456,17 @@ static void rcar_can_start(struct net_device *ndev)
ctlr &= ~RCAR_CAN_CTLR_SLPM;
writew(ctlr, &priv->regs->ctlr);
/* Go to reset mode */
- ctlr |= RCAR_CAN_CTLR_CANM_FORCE_RESET;
+ ctlr |= FIELD_PREP(RCAR_CAN_CTLR_CANM, RCAR_CAN_CTLR_CANM_FORCE_RESET);
writew(ctlr, &priv->regs->ctlr);
for (i = 0; i < MAX_STR_READS; i++) {
if (readw(&priv->regs->str) & RCAR_CAN_STR_RSTST)
break;
}
rcar_can_set_bittiming(ndev);
- ctlr |= RCAR_CAN_CTLR_IDFM_MIXED; /* Select mixed ID mode */
- ctlr |= RCAR_CAN_CTLR_BOM_ENT; /* Entry to halt mode automatically */
- /* at bus-off */
+ /* Select mixed ID mode */
+ ctlr |= FIELD_PREP(RCAR_CAN_CTLR_IDFM, RCAR_CAN_CTLR_IDFM_MIXED);
+ /* Entry to halt mode automatically at bus-off */
+ ctlr |= FIELD_PREP(RCAR_CAN_CTLR_BOM, RCAR_CAN_CTLR_BOM_ENT);
ctlr |= RCAR_CAN_CTLR_MBM; /* Select FIFO mailbox mode */
ctlr |= RCAR_CAN_CTLR_MLM; /* Overrun mode */
writew(ctlr, &priv->regs->ctlr);
@@ -491,7 +496,9 @@ static void rcar_can_start(struct net_device *ndev)
priv->can.state = CAN_STATE_ERROR_ACTIVE;
/* Go to operation mode */
- writew(ctlr & ~RCAR_CAN_CTLR_CANM, &priv->regs->ctlr);
+ ctlr &= ~RCAR_CAN_CTLR_CANM;
+ ctlr |= FIELD_PREP(RCAR_CAN_CTLR_CANM, RCAR_CAN_CTLR_CANM_OPER);
+ writew(ctlr, &priv->regs->ctlr);
for (i = 0; i < MAX_STR_READS; i++) {
if (!(readw(&priv->regs->str) & RCAR_CAN_STR_RSTST))
break;
@@ -506,29 +513,28 @@ static int rcar_can_open(struct net_device *ndev)
struct rcar_can_priv *priv = netdev_priv(ndev);
int err;
- err = clk_prepare_enable(priv->clk);
+ err = pm_runtime_resume_and_get(ndev->dev.parent);
if (err) {
- netdev_err(ndev,
- "failed to enable peripheral clock, error %d\n",
- err);
+ netdev_err(ndev, "pm_runtime_resume_and_get() failed %pe\n",
+ ERR_PTR(err));
goto out;
}
err = clk_prepare_enable(priv->can_clk);
if (err) {
- netdev_err(ndev, "failed to enable CAN clock, error %d\n",
- err);
- goto out_clock;
+ netdev_err(ndev, "failed to enable CAN clock: %pe\n",
+ ERR_PTR(err));
+ goto out_rpm;
}
err = open_candev(ndev);
if (err) {
- netdev_err(ndev, "open_candev() failed, error %d\n", err);
+ netdev_err(ndev, "open_candev() failed %pe\n", ERR_PTR(err));
goto out_can_clock;
}
napi_enable(&priv->napi);
err = request_irq(ndev->irq, rcar_can_interrupt, 0, ndev->name, ndev);
if (err) {
- netdev_err(ndev, "request_irq(%d) failed, error %d\n",
- ndev->irq, err);
+ netdev_err(ndev, "request_irq(%d) failed %pe\n", ndev->irq,
+ ERR_PTR(err));
goto out_close;
}
rcar_can_start(ndev);
@@ -539,8 +545,8 @@ out_close:
close_candev(ndev);
out_can_clock:
clk_disable_unprepare(priv->can_clk);
-out_clock:
- clk_disable_unprepare(priv->clk);
+out_rpm:
+ pm_runtime_put(ndev->dev.parent);
out:
return err;
}
@@ -553,7 +559,7 @@ static void rcar_can_stop(struct net_device *ndev)
/* Go to (force) reset mode */
ctlr = readw(&priv->regs->ctlr);
- ctlr |= RCAR_CAN_CTLR_CANM_FORCE_RESET;
+ ctlr |= FIELD_PREP(RCAR_CAN_CTLR_CANM, RCAR_CAN_CTLR_CANM_FORCE_RESET);
writew(ctlr, &priv->regs->ctlr);
for (i = 0; i < MAX_STR_READS; i++) {
if (readw(&priv->regs->str) & RCAR_CAN_STR_RSTST)
@@ -578,7 +584,7 @@ static int rcar_can_close(struct net_device *ndev)
free_irq(ndev->irq, ndev);
napi_disable(&priv->napi);
clk_disable_unprepare(priv->can_clk);
- clk_disable_unprepare(priv->clk);
+ pm_runtime_put(ndev->dev.parent);
close_candev(ndev);
return 0;
}
@@ -594,9 +600,10 @@ static netdev_tx_t rcar_can_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
if (cf->can_id & CAN_EFF_FLAG) /* Extended frame format */
- data = (cf->can_id & CAN_EFF_MASK) | RCAR_CAN_IDE;
+ data = FIELD_PREP(RCAR_CAN_EID, cf->can_id & CAN_EFF_MASK) |
+ RCAR_CAN_IDE;
else /* Standard frame format */
- data = (cf->can_id & CAN_SFF_MASK) << RCAR_CAN_SID_SHIFT;
+ data = FIELD_PREP(RCAR_CAN_SID, cf->can_id & CAN_SFF_MASK);
if (cf->can_id & CAN_RTR_FLAG) { /* Remote transmission request */
data |= RCAR_CAN_RTR;
@@ -651,9 +658,9 @@ static void rcar_can_rx_pkt(struct rcar_can_priv *priv)
data = readl(&priv->regs->mb[RCAR_CAN_RX_FIFO_MBX].id);
if (data & RCAR_CAN_IDE)
- cf->can_id = (data & CAN_EFF_MASK) | CAN_EFF_FLAG;
+ cf->can_id = FIELD_GET(RCAR_CAN_EID, data) | CAN_EFF_FLAG;
else
- cf->can_id = (data >> RCAR_CAN_SID_SHIFT) & CAN_SFF_MASK;
+ cf->can_id = FIELD_GET(RCAR_CAN_SID, data);
dlc = readb(&priv->regs->mb[RCAR_CAN_RX_FIFO_MBX].dlc);
cf->len = can_cc_dlc2len(dlc);
@@ -715,18 +722,21 @@ static int rcar_can_do_set_mode(struct net_device *ndev, enum can_mode mode)
}
}
-static int rcar_can_get_berr_counter(const struct net_device *dev,
+static int rcar_can_get_berr_counter(const struct net_device *ndev,
struct can_berr_counter *bec)
{
- struct rcar_can_priv *priv = netdev_priv(dev);
+ struct rcar_can_priv *priv = netdev_priv(ndev);
int err;
- err = clk_prepare_enable(priv->clk);
+ err = pm_runtime_resume_and_get(ndev->dev.parent);
if (err)
return err;
+
bec->txerr = readb(&priv->regs->tecr);
bec->rxerr = readb(&priv->regs->recr);
- clk_disable_unprepare(priv->clk);
+
+ pm_runtime_put(ndev->dev.parent);
+
return 0;
}
@@ -738,6 +748,7 @@ static const char * const clock_names[] = {
static int rcar_can_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct rcar_can_priv *priv;
struct net_device *ndev;
void __iomem *addr;
@@ -745,7 +756,7 @@ static int rcar_can_probe(struct platform_device *pdev)
int err = -ENODEV;
int irq;
- of_property_read_u32(pdev->dev.of_node, "renesas,can-clock-select",
+ of_property_read_u32(dev->of_node, "renesas,can-clock-select",
&clock_select);
irq = platform_get_irq(pdev, 0);
@@ -762,30 +773,21 @@ static int rcar_can_probe(struct platform_device *pdev)
ndev = alloc_candev(sizeof(struct rcar_can_priv), RCAR_CAN_FIFO_DEPTH);
if (!ndev) {
- dev_err(&pdev->dev, "alloc_candev() failed\n");
err = -ENOMEM;
goto fail;
}
priv = netdev_priv(ndev);
- priv->clk = devm_clk_get(&pdev->dev, "clkp1");
- if (IS_ERR(priv->clk)) {
- err = PTR_ERR(priv->clk);
- dev_err(&pdev->dev, "cannot get peripheral clock, error %d\n",
- err);
- goto fail_clk;
- }
-
if (!(BIT(clock_select) & RCAR_SUPPORTED_CLOCKS)) {
err = -EINVAL;
- dev_err(&pdev->dev, "invalid CAN clock selected\n");
+ dev_err(dev, "invalid CAN clock selected\n");
goto fail_clk;
}
- priv->can_clk = devm_clk_get(&pdev->dev, clock_names[clock_select]);
+ priv->can_clk = devm_clk_get(dev, clock_names[clock_select]);
if (IS_ERR(priv->can_clk)) {
+ dev_err(dev, "cannot get CAN clock: %pe\n", priv->can_clk);
err = PTR_ERR(priv->can_clk);
- dev_err(&pdev->dev, "cannot get CAN clock, error %d\n", err);
goto fail_clk;
}
@@ -802,21 +804,24 @@ static int rcar_can_probe(struct platform_device *pdev)
priv->can.do_get_berr_counter = rcar_can_get_berr_counter;
priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING;
platform_set_drvdata(pdev, ndev);
- SET_NETDEV_DEV(ndev, &pdev->dev);
+ SET_NETDEV_DEV(ndev, dev);
netif_napi_add_weight(ndev, &priv->napi, rcar_can_rx_poll,
RCAR_CAN_NAPI_WEIGHT);
+
+ pm_runtime_enable(dev);
+
err = register_candev(ndev);
if (err) {
- dev_err(&pdev->dev, "register_candev() failed, error %d\n",
- err);
- goto fail_candev;
+ dev_err(dev, "register_candev() failed %pe\n", ERR_PTR(err));
+ goto fail_rpm;
}
- dev_info(&pdev->dev, "device registered (IRQ%d)\n", ndev->irq);
+ dev_info(dev, "device registered (IRQ%d)\n", ndev->irq);
return 0;
-fail_candev:
+fail_rpm:
+ pm_runtime_disable(dev);
netif_napi_del(&priv->napi);
fail_clk:
free_candev(ndev);
@@ -830,6 +835,7 @@ static void rcar_can_remove(struct platform_device *pdev)
struct rcar_can_priv *priv = netdev_priv(ndev);
unregister_candev(ndev);
+ pm_runtime_disable(&pdev->dev);
netif_napi_del(&priv->napi);
free_candev(ndev);
}
@@ -847,38 +853,32 @@ static int rcar_can_suspend(struct device *dev)
netif_device_detach(ndev);
ctlr = readw(&priv->regs->ctlr);
- ctlr |= RCAR_CAN_CTLR_CANM_HALT;
+ ctlr |= FIELD_PREP(RCAR_CAN_CTLR_CANM, RCAR_CAN_CTLR_CANM_HALT);
writew(ctlr, &priv->regs->ctlr);
ctlr |= RCAR_CAN_CTLR_SLPM;
writew(ctlr, &priv->regs->ctlr);
priv->can.state = CAN_STATE_SLEEPING;
- clk_disable(priv->clk);
+ pm_runtime_put(dev);
return 0;
}
static int rcar_can_resume(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
- struct rcar_can_priv *priv = netdev_priv(ndev);
- u16 ctlr;
int err;
if (!netif_running(ndev))
return 0;
- err = clk_enable(priv->clk);
+ err = pm_runtime_resume_and_get(dev);
if (err) {
- netdev_err(ndev, "clk_enable() failed, error %d\n", err);
+ netdev_err(ndev, "pm_runtime_resume_and_get() failed %pe\n",
+ ERR_PTR(err));
return err;
}
- ctlr = readw(&priv->regs->ctlr);
- ctlr &= ~RCAR_CAN_CTLR_SLPM;
- writew(ctlr, &priv->regs->ctlr);
- ctlr &= ~RCAR_CAN_CTLR_CANM;
- writew(ctlr, &priv->regs->ctlr);
- priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ rcar_can_start(ndev);
netif_device_attach(ndev);
netif_start_queue(ndev);
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index b3c8c592fb0e..45d36adb51b7 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -103,22 +103,13 @@
/* Channel register bits */
/* RSCFDnCmCFG - Classical CAN only */
-#define RCANFD_CFG_SJW(x) (((x) & 0x3) << 24)
-#define RCANFD_CFG_TSEG2(x) (((x) & 0x7) << 20)
-#define RCANFD_CFG_TSEG1(x) (((x) & 0xf) << 16)
-#define RCANFD_CFG_BRP(x) (((x) & 0x3ff) << 0)
+#define RCANFD_CFG_SJW GENMASK(25, 24)
+#define RCANFD_CFG_TSEG2 GENMASK(22, 20)
+#define RCANFD_CFG_TSEG1 GENMASK(19, 16)
+#define RCANFD_CFG_BRP GENMASK(9, 0)
/* RSCFDnCFDCmNCFG - CAN FD only */
-#define RCANFD_NCFG_NTSEG2(gpriv, x) \
- (((x) & ((gpriv)->info->nom_bittiming->tseg2_max - 1)) << (gpriv)->info->sh->ntseg2)
-
-#define RCANFD_NCFG_NTSEG1(gpriv, x) \
- (((x) & ((gpriv)->info->nom_bittiming->tseg1_max - 1)) << (gpriv)->info->sh->ntseg1)
-
-#define RCANFD_NCFG_NSJW(gpriv, x) \
- (((x) & ((gpriv)->info->nom_bittiming->sjw_max - 1)) << (gpriv)->info->sh->nsjw)
-
-#define RCANFD_NCFG_NBRP(x) (((x) & 0x3ff) << 0)
+#define RCANFD_NCFG_NBRP GENMASK(9, 0)
/* RSCFDnCFDCmCTR / RSCFDnCmCTR */
#define RCANFD_CCTR_CTME BIT(24)
@@ -178,15 +169,7 @@
#define RCANFD_CERFL_ERR(x) ((x) & (0x7fff)) /* above bits 14:0 */
/* RSCFDnCFDCmDCFG */
-#define RCANFD_DCFG_DSJW(gpriv, x) (((x) & ((gpriv)->info->data_bittiming->sjw_max - 1)) << 24)
-
-#define RCANFD_DCFG_DTSEG2(gpriv, x) \
- (((x) & ((gpriv)->info->data_bittiming->tseg2_max - 1)) << (gpriv)->info->sh->dtseg2)
-
-#define RCANFD_DCFG_DTSEG1(gpriv, x) \
- (((x) & ((gpriv)->info->data_bittiming->tseg1_max - 1)) << (gpriv)->info->sh->dtseg1)
-
-#define RCANFD_DCFG_DBRP(x) (((x) & 0xff) << 0)
+#define RCANFD_DCFG_DBRP GENMASK(7, 0)
/* RSCFDnCFDCmFDCFG */
#define RCANFD_GEN4_FDCFG_CLOE BIT(30)
@@ -823,9 +806,6 @@ static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv)
/* Reset Global error flags */
rcar_canfd_write(gpriv->base, RCANFD_GERFL, 0x0);
- /* Set the controller into appropriate mode */
- rcar_canfd_set_mode(gpriv);
-
/* Transition all Channels to reset mode */
for_each_set_bit(ch, &gpriv->channels_mask, gpriv->info->max_channels) {
rcar_canfd_clear_bit(gpriv->base,
@@ -844,6 +824,10 @@ static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv)
return err;
}
}
+
+ /* Set the controller into appropriate mode */
+ rcar_canfd_set_mode(gpriv);
+
return 0;
}
@@ -1388,6 +1372,41 @@ static irqreturn_t rcar_canfd_channel_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static inline u32 rcar_canfd_compute_nominal_bit_rate_cfg(struct rcar_canfd_channel *priv,
+ u16 tseg1, u16 tseg2, u16 sjw, u16 brp)
+{
+ struct rcar_canfd_global *gpriv = priv->gpriv;
+ const struct rcar_canfd_hw_info *info = gpriv->info;
+ u32 ntseg1, ntseg2, nsjw, nbrp;
+
+ if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) || gpriv->info->shared_can_regs) {
+ ntseg1 = (tseg1 & (info->nom_bittiming->tseg1_max - 1)) << info->sh->ntseg1;
+ ntseg2 = (tseg2 & (info->nom_bittiming->tseg2_max - 1)) << info->sh->ntseg2;
+ nsjw = (sjw & (info->nom_bittiming->sjw_max - 1)) << info->sh->nsjw;
+ nbrp = FIELD_PREP(RCANFD_NCFG_NBRP, brp);
+ } else {
+ ntseg1 = FIELD_PREP(RCANFD_CFG_TSEG1, tseg1);
+ ntseg2 = FIELD_PREP(RCANFD_CFG_TSEG2, tseg2);
+ nsjw = FIELD_PREP(RCANFD_CFG_SJW, sjw);
+ nbrp = FIELD_PREP(RCANFD_CFG_BRP, brp);
+ }
+
+ return (ntseg1 | ntseg2 | nsjw | nbrp);
+}
+
+static inline u32 rcar_canfd_compute_data_bit_rate_cfg(const struct rcar_canfd_hw_info *info,
+ u16 tseg1, u16 tseg2, u16 sjw, u16 brp)
+{
+ u32 dtseg1, dtseg2, dsjw, dbrp;
+
+ dtseg1 = (tseg1 & (info->data_bittiming->tseg1_max - 1)) << info->sh->dtseg1;
+ dtseg2 = (tseg2 & (info->data_bittiming->tseg2_max - 1)) << info->sh->dtseg2;
+ dsjw = (sjw & (info->data_bittiming->sjw_max - 1)) << 24;
+ dbrp = FIELD_PREP(RCANFD_DCFG_DBRP, brp);
+
+ return (dtseg1 | dtseg2 | dsjw | dbrp);
+}
+
static void rcar_canfd_set_bittiming(struct net_device *ndev)
{
u32 mask = RCANFD_FDCFG_TDCO | RCANFD_FDCFG_TDCE | RCANFD_FDCFG_TDCOC;
@@ -1406,15 +1425,7 @@ static void rcar_canfd_set_bittiming(struct net_device *ndev)
sjw = bt->sjw - 1;
tseg1 = bt->prop_seg + bt->phase_seg1 - 1;
tseg2 = bt->phase_seg2 - 1;
-
- if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) || gpriv->info->shared_can_regs) {
- cfg = (RCANFD_NCFG_NTSEG1(gpriv, tseg1) | RCANFD_NCFG_NBRP(brp) |
- RCANFD_NCFG_NSJW(gpriv, sjw) | RCANFD_NCFG_NTSEG2(gpriv, tseg2));
- } else {
- cfg = (RCANFD_CFG_TSEG1(tseg1) | RCANFD_CFG_BRP(brp) |
- RCANFD_CFG_SJW(sjw) | RCANFD_CFG_TSEG2(tseg2));
- }
-
+ cfg = rcar_canfd_compute_nominal_bit_rate_cfg(priv, tseg1, tseg2, sjw, brp);
rcar_canfd_write(priv->base, RCANFD_CCFG(ch), cfg);
if (!(priv->can.ctrlmode & CAN_CTRLMODE_FD))
@@ -1425,10 +1436,7 @@ static void rcar_canfd_set_bittiming(struct net_device *ndev)
sjw = dbt->sjw - 1;
tseg1 = dbt->prop_seg + dbt->phase_seg1 - 1;
tseg2 = dbt->phase_seg2 - 1;
-
- cfg = (RCANFD_DCFG_DTSEG1(gpriv, tseg1) | RCANFD_DCFG_DBRP(brp) |
- RCANFD_DCFG_DSJW(gpriv, sjw) | RCANFD_DCFG_DTSEG2(gpriv, tseg2));
-
+ cfg = rcar_canfd_compute_data_bit_rate_cfg(gpriv->info, tseg1, tseg2, sjw, brp);
writel(cfg, &gpriv->fcbase[ch].dcfg);
/* Transceiver Delay Compensation */
@@ -1912,7 +1920,10 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
priv->can.fd.do_get_auto_tdcv = rcar_canfd_get_auto_tdcv;
} else {
/* Controller starts in Classical CAN only mode */
- priv->can.bittiming_const = &rcar_canfd_bittiming_const;
+ if (gpriv->info->shared_can_regs)
+ priv->can.bittiming_const = gpriv->info->nom_bittiming;
+ else
+ priv->can.bittiming_const = &rcar_canfd_bittiming_const;
priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING;
}
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index da396d641e24..10d88cbda465 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -1,11 +1,11 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2007, 2011 Wolfgang Grandegger <wg@grandegger.com>
- * Copyright (C) 2012 Stephane Grosjean <s.grosjean@peak-system.com>
*
* Derived from the PCAN project file driver/src/pcan_pci.c:
*
- * Copyright (C) 2001-2006 PEAK System-Technik GmbH
+ * Copyright (C) 2001-2025 PEAK System-Technik GmbH
+ * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com>
*/
#include <linux/kernel.h>
@@ -22,7 +22,7 @@
#include "sja1000.h"
-MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>");
+MODULE_AUTHOR("Stéphane Grosjean <stephane.grosjean@hms-networks.com>");
MODULE_DESCRIPTION("Socket-CAN driver for PEAK PCAN PCI family cards");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/sja1000/peak_pcmcia.c b/drivers/net/can/sja1000/peak_pcmcia.c
index ce18e9e56059..e1610b527d13 100644
--- a/drivers/net/can/sja1000/peak_pcmcia.c
+++ b/drivers/net/can/sja1000/peak_pcmcia.c
@@ -1,10 +1,10 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2010-2012 Stephane Grosjean <s.grosjean@peak-system.com>
- *
* CAN driver for PEAK-System PCAN-PC Card
* Derived from the PCAN project file driver/src/pcan_pccard.c
- * Copyright (C) 2006-2010 PEAK System-Technik GmbH
+ *
+ * Copyright (C) 2006-2025 PEAK System-Technik GmbH
+ * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com>
*/
#include <linux/kernel.h>
#include <linux/module.h>
@@ -19,7 +19,7 @@
#include <linux/can/dev.h>
#include "sja1000.h"
-MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>");
+MODULE_AUTHOR("Stéphane Grosjean <stephane.grosjean@hms-networks.com>");
MODULE_DESCRIPTION("CAN driver for PEAK-System PCAN-PC Cards");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c
index 09ae218315d7..6d4b643e135f 100644
--- a/drivers/net/can/spi/hi311x.c
+++ b/drivers/net/can/spi/hi311x.c
@@ -545,8 +545,6 @@ static int hi3110_stop(struct net_device *net)
priv->force_quit = 1;
free_irq(spi->irq, priv);
- destroy_workqueue(priv->wq);
- priv->wq = NULL;
mutex_lock(&priv->hi3110_lock);
@@ -770,34 +768,23 @@ static int hi3110_open(struct net_device *net)
goto out_close;
}
- priv->wq = alloc_workqueue("hi3110_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM,
- 0);
- if (!priv->wq) {
- ret = -ENOMEM;
- goto out_free_irq;
- }
- INIT_WORK(&priv->tx_work, hi3110_tx_work_handler);
- INIT_WORK(&priv->restart_work, hi3110_restart_work_handler);
-
ret = hi3110_hw_reset(spi);
if (ret)
- goto out_free_wq;
+ goto out_free_irq;
ret = hi3110_setup(net);
if (ret)
- goto out_free_wq;
+ goto out_free_irq;
ret = hi3110_set_normal_mode(spi);
if (ret)
- goto out_free_wq;
+ goto out_free_irq;
netif_wake_queue(net);
mutex_unlock(&priv->hi3110_lock);
return 0;
- out_free_wq:
- destroy_workqueue(priv->wq);
out_free_irq:
free_irq(spi->irq, priv);
hi3110_hw_sleep(spi);
@@ -812,6 +799,7 @@ static const struct net_device_ops hi3110_netdev_ops = {
.ndo_open = hi3110_open,
.ndo_stop = hi3110_stop,
.ndo_start_xmit = hi3110_hard_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
};
static const struct ethtool_ops hi3110_ethtool_ops = {
@@ -908,6 +896,16 @@ static int hi3110_can_probe(struct spi_device *spi)
if (ret)
goto out_clk;
+ priv->wq = alloc_workqueue("hi3110_wq",
+ WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU,
+ 0);
+ if (!priv->wq) {
+ ret = -ENOMEM;
+ goto out_clk;
+ }
+ INIT_WORK(&priv->tx_work, hi3110_tx_work_handler);
+ INIT_WORK(&priv->restart_work, hi3110_restart_work_handler);
+
priv->spi = spi;
mutex_init(&priv->hi3110_lock);
@@ -943,6 +941,8 @@ static int hi3110_can_probe(struct spi_device *spi)
return 0;
error_probe:
+ destroy_workqueue(priv->wq);
+ priv->wq = NULL;
hi3110_power_enable(priv->power, 0);
out_clk:
@@ -963,6 +963,9 @@ static void hi3110_can_remove(struct spi_device *spi)
hi3110_power_enable(priv->power, 0);
+ destroy_workqueue(priv->wq);
+ priv->wq = NULL;
+
clk_disable_unprepare(priv->clk);
free_candev(net);
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index 5a95877b7419..b797e08499d7 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -607,8 +607,8 @@ static int mcp251x_gpio_setup(struct mcp251x_priv *priv)
gpio->get_direction = mcp251x_gpio_get_direction;
gpio->get = mcp251x_gpio_get;
gpio->get_multiple = mcp251x_gpio_get_multiple;
- gpio->set_rv = mcp251x_gpio_set;
- gpio->set_multiple_rv = mcp251x_gpio_set_multiple;
+ gpio->set = mcp251x_gpio_set;
+ gpio->set_multiple = mcp251x_gpio_set_multiple;
gpio->base = -1;
gpio->ngpio = ARRAY_SIZE(mcp251x_gpio_names);
gpio->names = mcp251x_gpio_names;
@@ -1378,7 +1378,8 @@ static int mcp251x_can_probe(struct spi_device *spi)
if (ret)
goto out_clk;
- priv->wq = alloc_workqueue("mcp251x_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM,
+ priv->wq = alloc_workqueue("mcp251x_wq",
+ WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU,
0);
if (!priv->wq) {
ret = -ENOMEM;
diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
index 6fcb301ef611..53bfd873de9b 100644
--- a/drivers/net/can/sun4i_can.c
+++ b/drivers/net/can/sun4i_can.c
@@ -768,6 +768,7 @@ static const struct net_device_ops sun4ican_netdev_ops = {
.ndo_open = sun4ican_open,
.ndo_stop = sun4ican_close,
.ndo_start_xmit = sun4ican_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
};
static const struct ethtool_ops sun4ican_ethtool_ops = {
diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
index a7547a83120e..cf65a90816b9 100644
--- a/drivers/net/can/usb/Kconfig
+++ b/drivers/net/can/usb/Kconfig
@@ -134,6 +134,17 @@ config CAN_MCBA_USB
This driver supports the CAN BUS Analyzer interface
from Microchip (http://www.microchip.com/development-tools/).
+config CAN_NCT6694
+ tristate "Nuvoton NCT6694 Socket CANfd support"
+ depends on MFD_NCT6694
+ select CAN_RX_OFFLOAD
+ help
+ If you say yes to this option, support will be included for Nuvoton
+ NCT6694, a USB device to socket CANfd controller.
+
+ This driver can also be built as a module. If so, the module will
+ be called nct6694_canfd.
+
config CAN_PEAK_USB
tristate "PEAK PCAN-USB/USB Pro interfaces for CAN 2.0b/CAN-FD"
help
diff --git a/drivers/net/can/usb/Makefile b/drivers/net/can/usb/Makefile
index 8b11088e9a59..fcafb1ac262e 100644
--- a/drivers/net/can/usb/Makefile
+++ b/drivers/net/can/usb/Makefile
@@ -11,5 +11,6 @@ obj-$(CONFIG_CAN_F81604) += f81604.o
obj-$(CONFIG_CAN_GS_USB) += gs_usb.o
obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb/
obj-$(CONFIG_CAN_MCBA_USB) += mcba_usb.o
+obj-$(CONFIG_CAN_NCT6694) += nct6694_canfd.o
obj-$(CONFIG_CAN_PEAK_USB) += peak_usb/
obj-$(CONFIG_CAN_UCAN) += ucan.o
diff --git a/drivers/net/can/usb/esd_usb.c b/drivers/net/can/usb/esd_usb.c
index 27a3818885c2..9bc1824d7be6 100644
--- a/drivers/net/can/usb/esd_usb.c
+++ b/drivers/net/can/usb/esd_usb.c
@@ -9,6 +9,7 @@
#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
+#include <linux/err.h>
#include <linux/ethtool.h>
#include <linux/module.h>
#include <linux/netdevice.h>
@@ -274,6 +275,7 @@ struct esd_usb {
int net_count;
u32 version;
int rxinitdone;
+ int in_usb_disconnect;
void *rxbuf[ESD_USB_MAX_RX_URBS];
dma_addr_t rxbuf_dma[ESD_USB_MAX_RX_URBS];
};
@@ -480,7 +482,7 @@ static void esd_usb_tx_done_msg(struct esd_usb_net_priv *priv,
static void esd_usb_read_bulk_callback(struct urb *urb)
{
struct esd_usb *dev = urb->context;
- int retval;
+ int err;
int pos = 0;
int i;
@@ -496,7 +498,7 @@ static void esd_usb_read_bulk_callback(struct urb *urb)
default:
dev_info(dev->udev->dev.parent,
- "Rx URB aborted (%d)\n", urb->status);
+ "Rx URB aborted (%pe)\n", ERR_PTR(urb->status));
goto resubmit_urb;
}
@@ -539,15 +541,15 @@ resubmit_urb:
urb->transfer_buffer, ESD_USB_RX_BUFFER_SIZE,
esd_usb_read_bulk_callback, dev);
- retval = usb_submit_urb(urb, GFP_ATOMIC);
- if (retval == -ENODEV) {
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err == -ENODEV) {
for (i = 0; i < dev->net_count; i++) {
if (dev->nets[i])
netif_device_detach(dev->nets[i]->netdev);
}
- } else if (retval) {
+ } else if (err) {
dev_err(dev->udev->dev.parent,
- "failed resubmitting read bulk urb: %d\n", retval);
+ "failed resubmitting read bulk urb: %pe\n", ERR_PTR(err));
}
}
@@ -572,7 +574,7 @@ static void esd_usb_write_bulk_callback(struct urb *urb)
return;
if (urb->status)
- netdev_info(netdev, "Tx URB aborted (%d)\n", urb->status);
+ netdev_info(netdev, "Tx URB aborted (%pe)\n", ERR_PTR(urb->status));
netif_trans_update(netdev);
}
@@ -758,7 +760,7 @@ out:
if (err == -ENODEV)
netif_device_detach(netdev);
if (err)
- netdev_err(netdev, "couldn't start device: %d\n", err);
+ netdev_err(netdev, "couldn't start device: %pe\n", ERR_PTR(err));
kfree(msg);
return err;
@@ -800,7 +802,6 @@ static int esd_usb_open(struct net_device *netdev)
/* finally start device */
err = esd_usb_start(priv);
if (err) {
- netdev_warn(netdev, "couldn't start device: %d\n", err);
close_candev(netdev);
return err;
}
@@ -923,7 +924,7 @@ static netdev_tx_t esd_usb_start_xmit(struct sk_buff *skb,
if (err == -ENODEV)
netif_device_detach(netdev);
else
- netdev_warn(netdev, "failed tx_urb %d\n", err);
+ netdev_warn(netdev, "failed tx_urb %pe\n", ERR_PTR(err));
goto releasebuf;
}
@@ -947,10 +948,11 @@ nourbmem:
return ret;
}
-static int esd_usb_close(struct net_device *netdev)
+/* Stop interface */
+static int esd_usb_stop(struct esd_usb_net_priv *priv)
{
- struct esd_usb_net_priv *priv = netdev_priv(netdev);
union esd_usb_msg *msg;
+ int err;
int i;
msg = kmalloc(sizeof(*msg), GFP_KERNEL);
@@ -964,8 +966,11 @@ static int esd_usb_close(struct net_device *netdev)
msg->filter.option = ESD_USB_ID_ENABLE; /* start with segment 0 */
for (i = 0; i <= ESD_USB_MAX_ID_SEGMENT; i++)
msg->filter.mask[i] = 0;
- if (esd_usb_send_msg(priv->usb, msg) < 0)
- netdev_err(netdev, "sending idadd message failed\n");
+ err = esd_usb_send_msg(priv->usb, msg);
+ if (err < 0) {
+ netdev_err(priv->netdev, "sending idadd message failed: %pe\n", ERR_PTR(err));
+ goto bail;
+ }
/* set CAN controller to reset mode */
msg->hdr.len = sizeof(struct esd_usb_set_baudrate_msg) / sizeof(u32); /* # of 32bit words */
@@ -973,8 +978,25 @@ static int esd_usb_close(struct net_device *netdev)
msg->setbaud.net = priv->index;
msg->setbaud.rsvd = 0;
msg->setbaud.baud = cpu_to_le32(ESD_USB_NO_BAUDRATE);
- if (esd_usb_send_msg(priv->usb, msg) < 0)
- netdev_err(netdev, "sending setbaud message failed\n");
+ err = esd_usb_send_msg(priv->usb, msg);
+ if (err < 0)
+ netdev_err(priv->netdev, "sending setbaud message failed: %pe\n", ERR_PTR(err));
+
+bail:
+ kfree(msg);
+
+ return err;
+}
+
+static int esd_usb_close(struct net_device *netdev)
+{
+ struct esd_usb_net_priv *priv = netdev_priv(netdev);
+ int err = 0;
+
+ if (!priv->usb->in_usb_disconnect) {
+ /* It's moot to try this in usb_disconnect()! */
+ err = esd_usb_stop(priv);
+ }
priv->can.state = CAN_STATE_STOPPED;
@@ -982,9 +1004,7 @@ static int esd_usb_close(struct net_device *netdev)
close_candev(netdev);
- kfree(msg);
-
- return 0;
+ return err;
}
static const struct net_device_ops esd_usb_netdev_ops = {
@@ -1251,14 +1271,14 @@ static int esd_usb_probe_one_net(struct usb_interface *intf, int index)
err = register_candev(netdev);
if (err) {
- dev_err(&intf->dev, "couldn't register CAN device: %d\n", err);
+ dev_err(&intf->dev, "couldn't register CAN device: %pe\n", ERR_PTR(err));
free_candev(netdev);
err = -ENOMEM;
goto done;
}
dev->nets[index] = priv;
- netdev_info(netdev, "device %s registered\n", netdev->name);
+ netdev_info(netdev, "registered\n");
done:
return err;
@@ -1354,9 +1374,11 @@ static void esd_usb_disconnect(struct usb_interface *intf)
usb_set_intfdata(intf, NULL);
if (dev) {
+ dev->in_usb_disconnect = 1;
for (i = 0; i < dev->net_count; i++) {
if (dev->nets[i]) {
netdev = dev->nets[i]->netdev;
+ netdev_info(netdev, "unregister\n");
unregister_netdev(netdev);
free_candev(netdev);
}
diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.c b/drivers/net/can/usb/etas_es58x/es58x_core.c
index db1acf6d504c..adc91873c083 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_core.c
+++ b/drivers/net/can/usb/etas_es58x/es58x_core.c
@@ -7,7 +7,7 @@
*
* Copyright (c) 2019 Robert Bosch Engineering and Business Solutions. All rights reserved.
* Copyright (c) 2020 ETAS K.K.. All rights reserved.
- * Copyright (c) 2020-2022 Vincent Mailhol <mailhol.vincent@wanadoo.fr>
+ * Copyright (c) 2020-2025 Vincent Mailhol <mailhol@kernel.org>
*/
#include <linux/unaligned.h>
@@ -1977,6 +1977,7 @@ static const struct net_device_ops es58x_netdev_ops = {
.ndo_stop = es58x_stop,
.ndo_start_xmit = es58x_start_xmit,
.ndo_eth_ioctl = can_eth_ioctl_hwts,
+ .ndo_change_mtu = can_change_mtu,
};
static const struct ethtool_ops es58x_ethtool_ops = {
diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c
index 41c0a1c399bf..1f9b915094e6 100644
--- a/drivers/net/can/usb/mcba_usb.c
+++ b/drivers/net/can/usb/mcba_usb.c
@@ -761,6 +761,7 @@ static const struct net_device_ops mcba_netdev_ops = {
.ndo_open = mcba_usb_open,
.ndo_stop = mcba_usb_close,
.ndo_start_xmit = mcba_usb_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
};
static const struct ethtool_ops mcba_ethtool_ops = {
diff --git a/drivers/net/can/usb/nct6694_canfd.c b/drivers/net/can/usb/nct6694_canfd.c
new file mode 100644
index 000000000000..8deff16491a1
--- /dev/null
+++ b/drivers/net/can/usb/nct6694_canfd.c
@@ -0,0 +1,832 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Nuvoton NCT6694 Socket CANfd driver based on USB interface.
+ *
+ * Copyright (C) 2025 Nuvoton Technology Corp.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/can/dev.h>
+#include <linux/can/rx-offload.h>
+#include <linux/ethtool.h>
+#include <linux/idr.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/mfd/nct6694.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+
+#define DEVICE_NAME "nct6694-canfd"
+
+/* USB command module type for NCT6694 CANfd controller.
+ * This defines the module type used for communication with the NCT6694
+ * CANfd controller over the USB interface.
+ */
+#define NCT6694_CANFD_MOD 0x05
+
+/* Command 00h - CAN Setting and Initialization */
+#define NCT6694_CANFD_SETTING 0x00
+#define NCT6694_CANFD_SETTING_ACTIVE_CTRL1 BIT(0)
+#define NCT6694_CANFD_SETTING_ACTIVE_CTRL2 BIT(1)
+#define NCT6694_CANFD_SETTING_ACTIVE_NBTP_DBTP BIT(2)
+#define NCT6694_CANFD_SETTING_CTRL1_MON BIT(0)
+#define NCT6694_CANFD_SETTING_CTRL1_NISO BIT(1)
+#define NCT6694_CANFD_SETTING_CTRL1_LBCK BIT(2)
+#define NCT6694_CANFD_SETTING_NBTP_NTSEG2 GENMASK(6, 0)
+#define NCT6694_CANFD_SETTING_NBTP_NTSEG1 GENMASK(15, 8)
+#define NCT6694_CANFD_SETTING_NBTP_NBRP GENMASK(24, 16)
+#define NCT6694_CANFD_SETTING_NBTP_NSJW GENMASK(31, 25)
+#define NCT6694_CANFD_SETTING_DBTP_DSJW GENMASK(3, 0)
+#define NCT6694_CANFD_SETTING_DBTP_DTSEG2 GENMASK(7, 4)
+#define NCT6694_CANFD_SETTING_DBTP_DTSEG1 GENMASK(12, 8)
+#define NCT6694_CANFD_SETTING_DBTP_DBRP GENMASK(20, 16)
+#define NCT6694_CANFD_SETTING_DBTP_TDC BIT(23)
+
+/* Command 01h - CAN Information */
+#define NCT6694_CANFD_INFORMATION 0x01
+#define NCT6694_CANFD_INFORMATION_SEL 0x00
+
+/* Command 02h - CAN Event */
+#define NCT6694_CANFD_EVENT 0x02
+#define NCT6694_CANFD_EVENT_SEL(idx, mask) \
+ ((idx ? 0x80 : 0x00) | ((mask) & 0x7F))
+
+#define NCT6694_CANFD_EVENT_MASK GENMASK(5, 0)
+#define NCT6694_CANFD_EVT_TX_FIFO_EMPTY BIT(7) /* Read-clear */
+#define NCT6694_CANFD_EVT_RX_DATA_LOST BIT(5) /* Read-clear */
+#define NCT6694_CANFD_EVT_RX_DATA_IN BIT(7) /* Read-clear */
+
+/* Command 10h - CAN Deliver */
+#define NCT6694_CANFD_DELIVER 0x10
+#define NCT6694_CANFD_DELIVER_SEL(buf_cnt) \
+ ((buf_cnt) & 0xFF)
+
+/* Command 11h - CAN Receive */
+#define NCT6694_CANFD_RECEIVE 0x11
+#define NCT6694_CANFD_RECEIVE_SEL(idx, buf_cnt) \
+ ((idx ? 0x80 : 0x00) | ((buf_cnt) & 0x7F))
+
+#define NCT6694_CANFD_FRAME_TAG(idx) (0xC0 | (idx))
+#define NCT6694_CANFD_FRAME_FLAG_EFF BIT(0)
+#define NCT6694_CANFD_FRAME_FLAG_RTR BIT(1)
+#define NCT6694_CANFD_FRAME_FLAG_FD BIT(2)
+#define NCT6694_CANFD_FRAME_FLAG_BRS BIT(3)
+#define NCT6694_CANFD_FRAME_FLAG_ERR BIT(4)
+
+#define NCT6694_NAPI_WEIGHT 32
+
+enum nct6694_event_err {
+ NCT6694_CANFD_EVT_ERR_NO_ERROR = 0,
+ NCT6694_CANFD_EVT_ERR_CRC_ERROR,
+ NCT6694_CANFD_EVT_ERR_STUFF_ERROR,
+ NCT6694_CANFD_EVT_ERR_ACK_ERROR,
+ NCT6694_CANFD_EVT_ERR_FORM_ERROR,
+ NCT6694_CANFD_EVT_ERR_BIT_ERROR,
+ NCT6694_CANFD_EVT_ERR_TIMEOUT_ERROR,
+ NCT6694_CANFD_EVT_ERR_UNKNOWN_ERROR,
+};
+
+enum nct6694_event_status {
+ NCT6694_CANFD_EVT_STS_ERROR_ACTIVE = 0,
+ NCT6694_CANFD_EVT_STS_ERROR_PASSIVE,
+ NCT6694_CANFD_EVT_STS_BUS_OFF,
+ NCT6694_CANFD_EVT_STS_WARNING,
+};
+
+struct __packed nct6694_canfd_setting {
+ __le32 nbr;
+ __le32 dbr;
+ u8 active;
+ u8 reserved[3];
+ __le16 ctrl1;
+ __le16 ctrl2;
+ __le32 nbtp;
+ __le32 dbtp;
+};
+
+struct __packed nct6694_canfd_information {
+ u8 tx_fifo_cnt;
+ u8 rx_fifo_cnt;
+ u8 reserved[2];
+ __le32 can_clk;
+};
+
+struct __packed nct6694_canfd_event {
+ u8 err;
+ u8 status;
+ u8 tx_evt;
+ u8 rx_evt;
+ u8 rec;
+ u8 tec;
+ u8 reserved[2];
+};
+
+struct __packed nct6694_canfd_frame {
+ u8 tag;
+ u8 flag;
+ u8 reserved;
+ u8 length;
+ __le32 id;
+ u8 data[CANFD_MAX_DLEN];
+};
+
+struct nct6694_canfd_priv {
+ struct can_priv can; /* must be the first member */
+ struct can_rx_offload offload;
+ struct net_device *ndev;
+ struct nct6694 *nct6694;
+ struct workqueue_struct *wq;
+ struct work_struct tx_work;
+ struct nct6694_canfd_frame tx;
+ struct nct6694_canfd_frame rx;
+ struct nct6694_canfd_event event[2];
+ struct can_berr_counter bec;
+};
+
+static inline struct nct6694_canfd_priv *rx_offload_to_priv(struct can_rx_offload *offload)
+{
+ return container_of(offload, struct nct6694_canfd_priv, offload);
+}
+
+static const struct can_bittiming_const nct6694_canfd_bittiming_nominal_const = {
+ .name = DEVICE_NAME,
+ .tseg1_min = 1,
+ .tseg1_max = 256,
+ .tseg2_min = 1,
+ .tseg2_max = 128,
+ .sjw_max = 128,
+ .brp_min = 1,
+ .brp_max = 512,
+ .brp_inc = 1,
+};
+
+static const struct can_bittiming_const nct6694_canfd_bittiming_data_const = {
+ .name = DEVICE_NAME,
+ .tseg1_min = 1,
+ .tseg1_max = 32,
+ .tseg2_min = 1,
+ .tseg2_max = 16,
+ .sjw_max = 16,
+ .brp_min = 1,
+ .brp_max = 32,
+ .brp_inc = 1,
+};
+
+static void nct6694_canfd_rx_offload(struct can_rx_offload *offload,
+ struct sk_buff *skb)
+{
+ struct nct6694_canfd_priv *priv = rx_offload_to_priv(offload);
+ int ret;
+
+ ret = can_rx_offload_queue_tail(offload, skb);
+ if (ret)
+ priv->ndev->stats.rx_fifo_errors++;
+}
+
+static void nct6694_canfd_handle_lost_msg(struct net_device *ndev)
+{
+ struct nct6694_canfd_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+
+ netdev_dbg(ndev, "RX FIFO overflow, message(s) lost.\n");
+
+ stats->rx_errors++;
+ stats->rx_over_errors++;
+
+ skb = alloc_can_err_skb(ndev, &cf);
+ if (!skb)
+ return;
+
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+
+ nct6694_canfd_rx_offload(&priv->offload, skb);
+}
+
+static void nct6694_canfd_handle_rx(struct net_device *ndev, u8 rx_evt)
+{
+ struct net_device_stats *stats = &ndev->stats;
+ struct nct6694_canfd_priv *priv = netdev_priv(ndev);
+ struct nct6694_canfd_frame *frame = &priv->rx;
+ const struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_CANFD_MOD,
+ .cmd = NCT6694_CANFD_RECEIVE,
+ .sel = NCT6694_CANFD_RECEIVE_SEL(ndev->dev_port, 1),
+ .len = cpu_to_le16(sizeof(*frame))
+ };
+ struct sk_buff *skb;
+ int ret;
+
+ ret = nct6694_read_msg(priv->nct6694, &cmd_hd, frame);
+ if (ret)
+ return;
+
+ if (frame->flag & NCT6694_CANFD_FRAME_FLAG_FD) {
+ struct canfd_frame *cfd;
+
+ skb = alloc_canfd_skb(priv->ndev, &cfd);
+ if (!skb) {
+ stats->rx_dropped++;
+ return;
+ }
+
+ cfd->can_id = le32_to_cpu(frame->id);
+ cfd->len = canfd_sanitize_len(frame->length);
+ if (frame->flag & NCT6694_CANFD_FRAME_FLAG_EFF)
+ cfd->can_id |= CAN_EFF_FLAG;
+ if (frame->flag & NCT6694_CANFD_FRAME_FLAG_BRS)
+ cfd->flags |= CANFD_BRS;
+ if (frame->flag & NCT6694_CANFD_FRAME_FLAG_ERR)
+ cfd->flags |= CANFD_ESI;
+
+ memcpy(cfd->data, frame->data, cfd->len);
+ } else {
+ struct can_frame *cf;
+
+ skb = alloc_can_skb(priv->ndev, &cf);
+ if (!skb) {
+ stats->rx_dropped++;
+ return;
+ }
+
+ cf->can_id = le32_to_cpu(frame->id);
+ cf->len = can_cc_dlc2len(frame->length);
+ if (frame->flag & NCT6694_CANFD_FRAME_FLAG_EFF)
+ cf->can_id |= CAN_EFF_FLAG;
+
+ if (frame->flag & NCT6694_CANFD_FRAME_FLAG_RTR)
+ cf->can_id |= CAN_RTR_FLAG;
+ else
+ memcpy(cf->data, frame->data, cf->len);
+ }
+
+ nct6694_canfd_rx_offload(&priv->offload, skb);
+}
+
+static int nct6694_canfd_get_berr_counter(const struct net_device *ndev,
+ struct can_berr_counter *bec)
+{
+ struct nct6694_canfd_priv *priv = netdev_priv(ndev);
+
+ *bec = priv->bec;
+
+ return 0;
+}
+
+static void nct6694_canfd_handle_state_change(struct net_device *ndev, u8 status)
+{
+ struct nct6694_canfd_priv *priv = netdev_priv(ndev);
+ enum can_state new_state, rx_state, tx_state;
+ struct can_berr_counter bec;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+
+ nct6694_canfd_get_berr_counter(ndev, &bec);
+ can_state_get_by_berr_counter(ndev, &bec, &tx_state, &rx_state);
+
+ new_state = max(tx_state, rx_state);
+
+ /* state hasn't changed */
+ if (new_state == priv->can.state)
+ return;
+
+ skb = alloc_can_err_skb(ndev, &cf);
+
+ can_change_state(ndev, cf, tx_state, rx_state);
+
+ if (new_state == CAN_STATE_BUS_OFF) {
+ can_bus_off(ndev);
+ } else if (cf) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ }
+
+ if (skb)
+ nct6694_canfd_rx_offload(&priv->offload, skb);
+}
+
+static void nct6694_canfd_handle_bus_err(struct net_device *ndev, u8 bus_err)
+{
+ struct nct6694_canfd_priv *priv = netdev_priv(ndev);
+ struct can_frame *cf;
+ struct sk_buff *skb;
+
+ priv->can.can_stats.bus_error++;
+
+ skb = alloc_can_err_skb(ndev, &cf);
+ if (cf)
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+
+ switch (bus_err) {
+ case NCT6694_CANFD_EVT_ERR_CRC_ERROR:
+ netdev_dbg(ndev, "CRC error\n");
+ ndev->stats.rx_errors++;
+ if (cf)
+ cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ;
+ break;
+
+ case NCT6694_CANFD_EVT_ERR_STUFF_ERROR:
+ netdev_dbg(ndev, "Stuff error\n");
+ ndev->stats.rx_errors++;
+ if (cf)
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ break;
+
+ case NCT6694_CANFD_EVT_ERR_ACK_ERROR:
+ netdev_dbg(ndev, "Ack error\n");
+ ndev->stats.tx_errors++;
+ if (cf) {
+ cf->can_id |= CAN_ERR_ACK;
+ cf->data[2] |= CAN_ERR_PROT_TX;
+ }
+ break;
+
+ case NCT6694_CANFD_EVT_ERR_FORM_ERROR:
+ netdev_dbg(ndev, "Form error\n");
+ ndev->stats.rx_errors++;
+ if (cf)
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ break;
+
+ case NCT6694_CANFD_EVT_ERR_BIT_ERROR:
+ netdev_dbg(ndev, "Bit error\n");
+ ndev->stats.tx_errors++;
+ if (cf)
+ cf->data[2] |= CAN_ERR_PROT_TX | CAN_ERR_PROT_BIT;
+ break;
+
+ default:
+ break;
+ }
+
+ if (skb)
+ nct6694_canfd_rx_offload(&priv->offload, skb);
+}
+
+static void nct6694_canfd_handle_tx(struct net_device *ndev)
+{
+ struct nct6694_canfd_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+
+ stats->tx_bytes += can_rx_offload_get_echo_skb_queue_tail(&priv->offload,
+ 0, NULL);
+ stats->tx_packets++;
+ netif_wake_queue(ndev);
+}
+
+static irqreturn_t nct6694_canfd_irq(int irq, void *data)
+{
+ struct net_device *ndev = data;
+ struct nct6694_canfd_priv *priv = netdev_priv(ndev);
+ struct nct6694_canfd_event *event = &priv->event[ndev->dev_port];
+ const struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_CANFD_MOD,
+ .cmd = NCT6694_CANFD_EVENT,
+ .sel = NCT6694_CANFD_EVENT_SEL(ndev->dev_port, NCT6694_CANFD_EVENT_MASK),
+ .len = cpu_to_le16(sizeof(priv->event))
+ };
+ irqreturn_t handled = IRQ_NONE;
+ int ret;
+
+ ret = nct6694_read_msg(priv->nct6694, &cmd_hd, priv->event);
+ if (ret < 0)
+ return handled;
+
+ if (event->rx_evt & NCT6694_CANFD_EVT_RX_DATA_IN) {
+ nct6694_canfd_handle_rx(ndev, event->rx_evt);
+ handled = IRQ_HANDLED;
+ }
+
+ if (event->rx_evt & NCT6694_CANFD_EVT_RX_DATA_LOST) {
+ nct6694_canfd_handle_lost_msg(ndev);
+ handled = IRQ_HANDLED;
+ }
+
+ if (event->status) {
+ nct6694_canfd_handle_state_change(ndev, event->status);
+ handled = IRQ_HANDLED;
+ }
+
+ if (event->err != NCT6694_CANFD_EVT_ERR_NO_ERROR) {
+ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
+ nct6694_canfd_handle_bus_err(ndev, event->err);
+ handled = IRQ_HANDLED;
+ }
+
+ if (event->tx_evt & NCT6694_CANFD_EVT_TX_FIFO_EMPTY) {
+ nct6694_canfd_handle_tx(ndev);
+ handled = IRQ_HANDLED;
+ }
+
+ if (handled)
+ can_rx_offload_threaded_irq_finish(&priv->offload);
+
+ priv->bec.rxerr = event->rec;
+ priv->bec.txerr = event->tec;
+
+ return handled;
+}
+
+static void nct6694_canfd_tx_work(struct work_struct *work)
+{
+ struct nct6694_canfd_priv *priv = container_of(work,
+ struct nct6694_canfd_priv,
+ tx_work);
+ struct nct6694_canfd_frame *frame = &priv->tx;
+ struct net_device *ndev = priv->ndev;
+ struct net_device_stats *stats = &ndev->stats;
+ struct sk_buff *skb = priv->can.echo_skb[0];
+ static const struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_CANFD_MOD,
+ .cmd = NCT6694_CANFD_DELIVER,
+ .sel = NCT6694_CANFD_DELIVER_SEL(1),
+ .len = cpu_to_le16(sizeof(*frame))
+ };
+ u32 txid;
+ int err;
+
+ memset(frame, 0, sizeof(*frame));
+
+ frame->tag = NCT6694_CANFD_FRAME_TAG(ndev->dev_port);
+
+ if (can_is_canfd_skb(skb)) {
+ struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
+
+ if (cfd->flags & CANFD_BRS)
+ frame->flag |= NCT6694_CANFD_FRAME_FLAG_BRS;
+
+ if (cfd->can_id & CAN_EFF_FLAG) {
+ txid = cfd->can_id & CAN_EFF_MASK;
+ frame->flag |= NCT6694_CANFD_FRAME_FLAG_EFF;
+ } else {
+ txid = cfd->can_id & CAN_SFF_MASK;
+ }
+ frame->flag |= NCT6694_CANFD_FRAME_FLAG_FD;
+ frame->id = cpu_to_le32(txid);
+ frame->length = canfd_sanitize_len(cfd->len);
+
+ memcpy(frame->data, cfd->data, frame->length);
+ } else {
+ struct can_frame *cf = (struct can_frame *)skb->data;
+
+ if (cf->can_id & CAN_EFF_FLAG) {
+ txid = cf->can_id & CAN_EFF_MASK;
+ frame->flag |= NCT6694_CANFD_FRAME_FLAG_EFF;
+ } else {
+ txid = cf->can_id & CAN_SFF_MASK;
+ }
+
+ if (cf->can_id & CAN_RTR_FLAG)
+ frame->flag |= NCT6694_CANFD_FRAME_FLAG_RTR;
+ else
+ memcpy(frame->data, cf->data, cf->len);
+
+ frame->id = cpu_to_le32(txid);
+ frame->length = cf->len;
+ }
+
+ err = nct6694_write_msg(priv->nct6694, &cmd_hd, frame);
+ if (err) {
+ can_free_echo_skb(ndev, 0, NULL);
+ stats->tx_dropped++;
+ stats->tx_errors++;
+ netif_wake_queue(ndev);
+ }
+}
+
+static netdev_tx_t nct6694_canfd_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct nct6694_canfd_priv *priv = netdev_priv(ndev);
+
+ if (can_dev_dropped_skb(ndev, skb))
+ return NETDEV_TX_OK;
+
+ netif_stop_queue(ndev);
+ can_put_echo_skb(skb, ndev, 0, 0);
+ queue_work(priv->wq, &priv->tx_work);
+
+ return NETDEV_TX_OK;
+}
+
+static int nct6694_canfd_start(struct net_device *ndev)
+{
+ struct nct6694_canfd_priv *priv = netdev_priv(ndev);
+ const struct can_bittiming *n_bt = &priv->can.bittiming;
+ const struct can_bittiming *d_bt = &priv->can.fd.data_bittiming;
+ struct nct6694_canfd_setting *setting __free(kfree) = NULL;
+ const struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_CANFD_MOD,
+ .cmd = NCT6694_CANFD_SETTING,
+ .sel = ndev->dev_port,
+ .len = cpu_to_le16(sizeof(*setting))
+ };
+ u32 en_tdc;
+ int ret;
+
+ setting = kzalloc(sizeof(*setting), GFP_KERNEL);
+ if (!setting)
+ return -ENOMEM;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+ setting->ctrl1 |= cpu_to_le16(NCT6694_CANFD_SETTING_CTRL1_MON);
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)
+ setting->ctrl1 |= cpu_to_le16(NCT6694_CANFD_SETTING_CTRL1_NISO);
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
+ setting->ctrl1 |= cpu_to_le16(NCT6694_CANFD_SETTING_CTRL1_LBCK);
+
+ /* Disable clock divider */
+ setting->ctrl2 = 0;
+
+ setting->nbtp = cpu_to_le32(FIELD_PREP(NCT6694_CANFD_SETTING_NBTP_NSJW,
+ n_bt->sjw - 1) |
+ FIELD_PREP(NCT6694_CANFD_SETTING_NBTP_NBRP,
+ n_bt->brp - 1) |
+ FIELD_PREP(NCT6694_CANFD_SETTING_NBTP_NTSEG2,
+ n_bt->phase_seg2 - 1) |
+ FIELD_PREP(NCT6694_CANFD_SETTING_NBTP_NTSEG1,
+ n_bt->prop_seg + n_bt->phase_seg1 - 1));
+
+ if (d_bt->brp <= 2)
+ en_tdc = NCT6694_CANFD_SETTING_DBTP_TDC;
+ else
+ en_tdc = 0;
+
+ setting->dbtp = cpu_to_le32(FIELD_PREP(NCT6694_CANFD_SETTING_DBTP_DSJW,
+ d_bt->sjw - 1) |
+ FIELD_PREP(NCT6694_CANFD_SETTING_DBTP_DBRP,
+ d_bt->brp - 1) |
+ FIELD_PREP(NCT6694_CANFD_SETTING_DBTP_DTSEG2,
+ d_bt->phase_seg2 - 1) |
+ FIELD_PREP(NCT6694_CANFD_SETTING_DBTP_DTSEG1,
+ d_bt->prop_seg + d_bt->phase_seg1 - 1) |
+ en_tdc);
+
+ setting->active = NCT6694_CANFD_SETTING_ACTIVE_CTRL1 |
+ NCT6694_CANFD_SETTING_ACTIVE_CTRL2 |
+ NCT6694_CANFD_SETTING_ACTIVE_NBTP_DBTP;
+
+ ret = nct6694_write_msg(priv->nct6694, &cmd_hd, setting);
+ if (ret)
+ return ret;
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ return 0;
+}
+
+static void nct6694_canfd_stop(struct net_device *ndev)
+{
+ struct nct6694_canfd_priv *priv = netdev_priv(ndev);
+ struct nct6694_canfd_setting *setting __free(kfree) = NULL;
+ const struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_CANFD_MOD,
+ .cmd = NCT6694_CANFD_SETTING,
+ .sel = ndev->dev_port,
+ .len = cpu_to_le16(sizeof(*setting))
+ };
+
+ /* The NCT6694 cannot be stopped. To ensure safe operation and avoid
+ * interference, the control mode is set to Listen-Only mode. This
+ * mode allows the device to monitor bus activity without actively
+ * participating in communication.
+ */
+ setting = kzalloc(sizeof(*setting), GFP_KERNEL);
+ if (!setting)
+ return;
+
+ nct6694_read_msg(priv->nct6694, &cmd_hd, setting);
+ setting->ctrl1 = cpu_to_le16(NCT6694_CANFD_SETTING_CTRL1_MON);
+ setting->active = NCT6694_CANFD_SETTING_ACTIVE_CTRL1;
+ nct6694_write_msg(priv->nct6694, &cmd_hd, setting);
+
+ priv->can.state = CAN_STATE_STOPPED;
+}
+
+static int nct6694_canfd_close(struct net_device *ndev)
+{
+ struct nct6694_canfd_priv *priv = netdev_priv(ndev);
+
+ netif_stop_queue(ndev);
+ nct6694_canfd_stop(ndev);
+ destroy_workqueue(priv->wq);
+ free_irq(ndev->irq, ndev);
+ can_rx_offload_disable(&priv->offload);
+ close_candev(ndev);
+ return 0;
+}
+
+static int nct6694_canfd_set_mode(struct net_device *ndev, enum can_mode mode)
+{
+ int ret;
+
+ switch (mode) {
+ case CAN_MODE_START:
+ ret = nct6694_canfd_start(ndev);
+ if (ret)
+ return ret;
+
+ netif_wake_queue(ndev);
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static int nct6694_canfd_open(struct net_device *ndev)
+{
+ struct nct6694_canfd_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ ret = open_candev(ndev);
+ if (ret)
+ return ret;
+
+ can_rx_offload_enable(&priv->offload);
+
+ ret = request_threaded_irq(ndev->irq, NULL,
+ nct6694_canfd_irq, IRQF_ONESHOT,
+ "nct6694_canfd", ndev);
+ if (ret) {
+ netdev_err(ndev, "Failed to request IRQ\n");
+ goto can_rx_offload_disable;
+ }
+
+ priv->wq = alloc_ordered_workqueue("%s-nct6694_wq",
+ WQ_FREEZABLE | WQ_MEM_RECLAIM,
+ ndev->name);
+ if (!priv->wq) {
+ ret = -ENOMEM;
+ goto free_irq;
+ }
+
+ ret = nct6694_canfd_start(ndev);
+ if (ret)
+ goto destroy_wq;
+
+ netif_start_queue(ndev);
+
+ return 0;
+
+destroy_wq:
+ destroy_workqueue(priv->wq);
+free_irq:
+ free_irq(ndev->irq, ndev);
+can_rx_offload_disable:
+ can_rx_offload_disable(&priv->offload);
+ close_candev(ndev);
+ return ret;
+}
+
+static const struct net_device_ops nct6694_canfd_netdev_ops = {
+ .ndo_open = nct6694_canfd_open,
+ .ndo_stop = nct6694_canfd_close,
+ .ndo_start_xmit = nct6694_canfd_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
+};
+
+static const struct ethtool_ops nct6694_canfd_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
+static int nct6694_canfd_get_clock(struct nct6694_canfd_priv *priv)
+{
+ struct nct6694_canfd_information *info __free(kfree) = NULL;
+ static const struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_CANFD_MOD,
+ .cmd = NCT6694_CANFD_INFORMATION,
+ .sel = NCT6694_CANFD_INFORMATION_SEL,
+ .len = cpu_to_le16(sizeof(*info))
+ };
+ int ret;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ ret = nct6694_read_msg(priv->nct6694, &cmd_hd, info);
+ if (ret)
+ return ret;
+
+ return le32_to_cpu(info->can_clk);
+}
+
+static int nct6694_canfd_probe(struct platform_device *pdev)
+{
+ struct nct6694 *nct6694 = dev_get_drvdata(pdev->dev.parent);
+ struct nct6694_canfd_priv *priv;
+ struct net_device *ndev;
+ int port, irq, ret, can_clk;
+
+ port = ida_alloc(&nct6694->canfd_ida, GFP_KERNEL);
+ if (port < 0)
+ return port;
+
+ irq = irq_create_mapping(nct6694->domain,
+ NCT6694_IRQ_CAN0 + port);
+ if (!irq) {
+ ret = -EINVAL;
+ goto free_ida;
+ }
+
+ ndev = alloc_candev(sizeof(struct nct6694_canfd_priv), 1);
+ if (!ndev) {
+ ret = -ENOMEM;
+ goto dispose_irq;
+ }
+
+ ndev->irq = irq;
+ ndev->flags |= IFF_ECHO;
+ ndev->dev_port = port;
+ ndev->netdev_ops = &nct6694_canfd_netdev_ops;
+ ndev->ethtool_ops = &nct6694_canfd_ethtool_ops;
+
+ priv = netdev_priv(ndev);
+ priv->nct6694 = nct6694;
+ priv->ndev = ndev;
+
+ can_clk = nct6694_canfd_get_clock(priv);
+ if (can_clk < 0) {
+ ret = dev_err_probe(&pdev->dev, can_clk,
+ "Failed to get clock\n");
+ goto free_candev;
+ }
+
+ INIT_WORK(&priv->tx_work, nct6694_canfd_tx_work);
+
+ priv->can.clock.freq = can_clk;
+ priv->can.bittiming_const = &nct6694_canfd_bittiming_nominal_const;
+ priv->can.fd.data_bittiming_const = &nct6694_canfd_bittiming_data_const;
+ priv->can.do_set_mode = nct6694_canfd_set_mode;
+ priv->can.do_get_berr_counter = nct6694_canfd_get_berr_counter;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
+ CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_BERR_REPORTING |
+ CAN_CTRLMODE_FD_NON_ISO;
+
+ ret = can_set_static_ctrlmode(ndev, CAN_CTRLMODE_FD);
+ if (ret)
+ goto free_candev;
+
+ ret = can_rx_offload_add_manual(ndev, &priv->offload,
+ NCT6694_NAPI_WEIGHT);
+ if (ret) {
+ dev_err_probe(&pdev->dev, ret, "Failed to add rx_offload\n");
+ goto free_candev;
+ }
+
+ platform_set_drvdata(pdev, priv);
+ SET_NETDEV_DEV(priv->ndev, &pdev->dev);
+
+ ret = register_candev(priv->ndev);
+ if (ret)
+ goto rx_offload_del;
+
+ return 0;
+
+rx_offload_del:
+ can_rx_offload_del(&priv->offload);
+free_candev:
+ free_candev(ndev);
+dispose_irq:
+ irq_dispose_mapping(irq);
+free_ida:
+ ida_free(&nct6694->canfd_ida, port);
+ return ret;
+}
+
+static void nct6694_canfd_remove(struct platform_device *pdev)
+{
+ struct nct6694_canfd_priv *priv = platform_get_drvdata(pdev);
+ struct nct6694 *nct6694 = priv->nct6694;
+ struct net_device *ndev = priv->ndev;
+ int port = ndev->dev_port;
+ int irq = ndev->irq;
+
+ unregister_candev(ndev);
+ can_rx_offload_del(&priv->offload);
+ free_candev(ndev);
+ irq_dispose_mapping(irq);
+ ida_free(&nct6694->canfd_ida, port);
+}
+
+static struct platform_driver nct6694_canfd_driver = {
+ .driver = {
+ .name = DEVICE_NAME,
+ },
+ .probe = nct6694_canfd_probe,
+ .remove = nct6694_canfd_remove,
+};
+
+module_platform_driver(nct6694_canfd_driver);
+
+MODULE_DESCRIPTION("USB-CAN FD driver for NCT6694");
+MODULE_AUTHOR("Ming Yu <tmyu0@nuvoton.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index 6b293a9056c2..9278a1522aae 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -3,8 +3,8 @@
* CAN driver for PEAK System PCAN-USB adapter
* Derived from the PCAN project file driver/src/pcan_usb.c
*
- * Copyright (C) 2003-2010 PEAK System-Technik GmbH
- * Copyright (C) 2011-2012 Stephane Grosjean <s.grosjean@peak-system.com>
+ * Copyright (C) 2003-2025 PEAK System-Technik GmbH
+ * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com>
*
* Many thanks to Klaus Hitschler <klaus.hitschler@gmx.de>
*/
@@ -919,7 +919,7 @@ static int pcan_usb_init(struct peak_usb_device *dev)
CAN_CTRLMODE_LOOPBACK;
} else {
dev_info(dev->netdev->dev.parent,
- "Firmware update available. Please contact support@peak-system.com\n");
+ "Firmware update available. Please contact support.peak@hms-networks.com\n");
}
return 0;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 117637b9b995..c74302ca7cee 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -3,8 +3,8 @@
* CAN driver for PEAK System USB adapters
* Derived from the PCAN project file driver/src/pcan_usb_core.c
*
- * Copyright (C) 2003-2010 PEAK System-Technik GmbH
- * Copyright (C) 2010-2012 Stephane Grosjean <s.grosjean@peak-system.com>
+ * Copyright (C) 2003-2025 PEAK System-Technik GmbH
+ * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com>
*
* Many thanks to Klaus Hitschler <klaus.hitschler@gmx.de>
*/
@@ -24,7 +24,7 @@
#include "pcan_usb_core.h"
-MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>");
+MODULE_AUTHOR("Stéphane Grosjean <stephane.grosjean@hms-networks.com>");
MODULE_DESCRIPTION("CAN driver for PEAK-System USB adapters");
MODULE_LICENSE("GPL v2");
@@ -111,7 +111,7 @@ void peak_usb_update_ts_now(struct peak_time_ref *time_ref, u32 ts_now)
u32 delta_ts = time_ref->ts_dev_2 - time_ref->ts_dev_1;
if (time_ref->ts_dev_2 < time_ref->ts_dev_1)
- delta_ts &= (1 << time_ref->adapter->ts_used_bits) - 1;
+ delta_ts &= (1ULL << time_ref->adapter->ts_used_bits) - 1;
time_ref->ts_total += delta_ts;
}
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.h b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
index abab00930b9d..d1c1897d47b9 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.h
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
@@ -3,8 +3,8 @@
* CAN driver for PEAK System USB adapters
* Derived from the PCAN project file driver/src/pcan_usb_core.c
*
- * Copyright (C) 2003-2010 PEAK System-Technik GmbH
- * Copyright (C) 2010-2012 Stephane Grosjean <s.grosjean@peak-system.com>
+ * Copyright (C) 2003-2025 PEAK System-Technik GmbH
+ * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com>
*
* Many thanks to Klaus Hitschler <klaus.hitschler@gmx.de>
*/
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
index ebefc274b50a..be84191cde56 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
@@ -2,7 +2,8 @@
/*
* CAN driver for PEAK System PCAN-USB FD / PCAN-USB Pro FD adapter
*
- * Copyright (C) 2013-2014 Stephane Grosjean <s.grosjean@peak-system.com>
+ * Copyright (C) 2013-2025 PEAK System-Technik GmbH
+ * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com>
*/
#include <linux/ethtool.h>
#include <linux/module.h>
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index f736196383ac..7be286293b1a 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -3,8 +3,8 @@
* CAN driver for PEAK System PCAN-USB Pro adapter
* Derived from the PCAN project file driver/src/pcan_usbpro.c
*
- * Copyright (C) 2003-2011 PEAK System-Technik GmbH
- * Copyright (C) 2011-2012 Stephane Grosjean <s.grosjean@peak-system.com>
+ * Copyright (C) 2003-2025 PEAK System-Technik GmbH
+ * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com>
*/
#include <linux/ethtool.h>
#include <linux/module.h>
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.h b/drivers/net/can/usb/peak_usb/pcan_usb_pro.h
index 28e740af905d..162c7546d3a8 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.h
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.h
@@ -3,8 +3,8 @@
* CAN driver for PEAK System PCAN-USB Pro adapter
* Derived from the PCAN project file driver/src/pcan_usbpro_fw.h
*
- * Copyright (C) 2003-2011 PEAK System-Technik GmbH
- * Copyright (C) 2011-2012 Stephane Grosjean <s.grosjean@peak-system.com>
+ * Copyright (C) 2003-2025 PEAK System-Technik GmbH
+ * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com>
*/
#ifndef PCAN_USB_PRO_H
#define PCAN_USB_PRO_H
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index f67e85807100..fdc662aea279 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -156,7 +156,7 @@ static const struct ethtool_ops vcan_ethtool_ops = {
static void vcan_setup(struct net_device *dev)
{
dev->type = ARPHRD_CAN;
- dev->mtu = CANFD_MTU;
+ dev->mtu = CANXL_MTU;
dev->hard_header_len = 0;
dev->addr_len = 0;
dev->tx_queue_len = 0;
diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
index 99a78a757167..b2c19f8c5f8e 100644
--- a/drivers/net/can/vxcan.c
+++ b/drivers/net/can/vxcan.c
@@ -156,7 +156,7 @@ static void vxcan_setup(struct net_device *dev)
struct can_ml_priv *can_ml;
dev->type = ARPHRD_CAN;
- dev->mtu = CANFD_MTU;
+ dev->mtu = CANXL_MTU;
dev->hard_header_len = 0;
dev->addr_len = 0;
dev->tx_queue_len = 0;
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index 81baec8eb1e5..a25a3ca62c12 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -690,14 +690,6 @@ static void xcan_write_frame(struct net_device *ndev, struct sk_buff *skb,
dlc |= XCAN_DLCR_EDL_MASK;
}
- if (!(priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES) &&
- (priv->devtype.flags & XCAN_FLAG_TXFEMP))
- can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max, 0);
- else
- can_put_echo_skb(skb, ndev, 0, 0);
-
- priv->tx_head++;
-
priv->write_reg(priv, XCAN_FRAME_ID_OFFSET(frame_offset), id);
/* If the CAN frame is RTR frame this write triggers transmission
* (not on CAN FD)
@@ -730,6 +722,14 @@ static void xcan_write_frame(struct net_device *ndev, struct sk_buff *skb,
data[1]);
}
}
+
+ if (!(priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES) &&
+ (priv->devtype.flags & XCAN_FLAG_TXFEMP))
+ can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max, 0);
+ else
+ can_put_echo_skb(skb, ndev, 0, 0);
+
+ priv->tx_head++;
}
/**
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index ec759f8cb0e2..4d9af691b989 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -26,13 +26,7 @@ config NET_DSA_LOOP
source "drivers/net/dsa/hirschmann/Kconfig"
-config NET_DSA_LANTIQ_GSWIP
- tristate "Lantiq / Intel GSWIP"
- depends on HAS_IOMEM
- select NET_DSA_TAG_GSWIP
- help
- This enables support for the Lantiq / Intel GSWIP 2.1 found in
- the xrx200 / VR9 SoC.
+source "drivers/net/dsa/lantiq/Kconfig"
config NET_DSA_MT7530
tristate "MediaTek MT7530 and MT7531 Ethernet switch support"
@@ -99,6 +93,14 @@ config NET_DSA_RZN1_A5PSW
This driver supports the A5PSW switch, which is embedded in Renesas
RZ/N1 SoC.
+config NET_DSA_KS8995
+ tristate "Micrel KS8995 family 5-ports 10/100 Ethernet switches"
+ depends on SPI
+ select NET_DSA_TAG_NONE
+ help
+ This driver supports the Micrel KS8995 family of 10/100 Mbit ethernet
+ switches, managed over SPI.
+
config NET_DSA_SMSC_LAN9303
tristate
select NET_DSA_TAG_LAN9303
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
index cb9a97340e58..0f8ff4a1a313 100644
--- a/drivers/net/dsa/Makefile
+++ b/drivers/net/dsa/Makefile
@@ -2,10 +2,7 @@
obj-$(CONFIG_NET_DSA_BCM_SF2) += bcm-sf2.o
bcm-sf2-objs := bcm_sf2.o bcm_sf2_cfp.o
obj-$(CONFIG_NET_DSA_LOOP) += dsa_loop.o
-ifdef CONFIG_NET_DSA_LOOP
-obj-$(CONFIG_FIXED_PHY) += dsa_loop_bdinfo.o
-endif
-obj-$(CONFIG_NET_DSA_LANTIQ_GSWIP) += lantiq_gswip.o
+obj-$(CONFIG_NET_DSA_KS8995) += ks8995.o
obj-$(CONFIG_NET_DSA_MT7530) += mt7530.o
obj-$(CONFIG_NET_DSA_MT7530_MDIO) += mt7530-mdio.o
obj-$(CONFIG_NET_DSA_MT7530_MMIO) += mt7530-mmio.o
@@ -19,6 +16,7 @@ obj-$(CONFIG_NET_DSA_VITESSE_VSC73XX_PLATFORM) += vitesse-vsc73xx-platform.o
obj-$(CONFIG_NET_DSA_VITESSE_VSC73XX_SPI) += vitesse-vsc73xx-spi.o
obj-y += b53/
obj-y += hirschmann/
+obj-y += lantiq/
obj-y += microchip/
obj-y += mv88e6xxx/
obj-y += ocelot/
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 9942fb6f7f4b..2f846381d5a7 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -1273,9 +1273,15 @@ static int b53_setup(struct dsa_switch *ds)
*/
ds->untag_vlan_aware_bridge_pvid = true;
- /* Ageing time is set in seconds */
- ds->ageing_time_min = 1 * 1000;
- ds->ageing_time_max = AGE_TIME_MAX * 1000;
+ if (dev->chip_id == BCM53101_DEVICE_ID) {
+ /* BCM53101 uses 0.5 second increments */
+ ds->ageing_time_min = 1 * 500;
+ ds->ageing_time_max = AGE_TIME_MAX * 500;
+ } else {
+ /* Everything else uses 1 second increments */
+ ds->ageing_time_min = 1 * 1000;
+ ds->ageing_time_max = AGE_TIME_MAX * 1000;
+ }
ret = b53_reset_switch(dev);
if (ret) {
@@ -2078,7 +2084,7 @@ int b53_fdb_dump(struct dsa_switch *ds, int port,
/* Start search operation */
reg = ARL_SRCH_STDN;
- b53_write8(priv, offset, B53_ARL_SRCH_CTL, reg);
+ b53_write8(priv, B53_ARLIO_PAGE, offset, reg);
do {
ret = b53_arl_search_wait(priv);
@@ -2559,7 +2565,10 @@ int b53_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
else
reg = B53_AGING_TIME_CONTROL;
- atc = DIV_ROUND_CLOSEST(msecs, 1000);
+ if (dev->chip_id == BCM53101_DEVICE_ID)
+ atc = DIV_ROUND_CLOSEST(msecs, 500);
+ else
+ atc = DIV_ROUND_CLOSEST(msecs, 1000);
if (!is5325(dev) && !is5365(dev))
atc |= AGE_CHANGE;
diff --git a/drivers/net/dsa/b53/b53_mmap.c b/drivers/net/dsa/b53/b53_mmap.c
index f06c3e0cc42a..f4a59d8fbdd6 100644
--- a/drivers/net/dsa/b53/b53_mmap.c
+++ b/drivers/net/dsa/b53/b53_mmap.c
@@ -29,8 +29,13 @@
#include "b53_priv.h"
#define BCM63XX_EPHY_REG 0x3C
+#define BCM63268_GPHY_REG 0x54
+
+#define GPHY_CTRL_LOW_PWR BIT(3)
+#define GPHY_CTRL_IDDQ_BIAS BIT(0)
struct b53_phy_info {
+ u32 gphy_port_mask;
u32 ephy_enable_mask;
u32 ephy_port_mask;
u32 ephy_bias_bit;
@@ -65,6 +70,7 @@ static const struct b53_phy_info bcm6368_ephy_info = {
static const u32 bcm63268_ephy_offsets[] = {4, 9, 14};
static const struct b53_phy_info bcm63268_ephy_info = {
+ .gphy_port_mask = BIT(3),
.ephy_enable_mask = GENMASK(4, 0),
.ephy_port_mask = GENMASK((ARRAY_SIZE(bcm63268_ephy_offsets) - 1), 0),
.ephy_bias_bit = 24,
@@ -290,13 +296,30 @@ static int bcm63xx_ephy_set(struct b53_device *dev, int port, bool enable)
return regmap_update_bits(gpio_ctrl, BCM63XX_EPHY_REG, mask, val);
}
+static int bcm63268_gphy_set(struct b53_device *dev, bool enable)
+{
+ struct b53_mmap_priv *priv = dev->priv;
+ struct regmap *gpio_ctrl = priv->gpio_ctrl;
+ u32 mask = GPHY_CTRL_IDDQ_BIAS | GPHY_CTRL_LOW_PWR;
+ u32 val = 0;
+
+ if (!enable)
+ val = mask;
+
+ return regmap_update_bits(gpio_ctrl, BCM63268_GPHY_REG, mask, val);
+}
+
static void b53_mmap_phy_enable(struct b53_device *dev, int port)
{
struct b53_mmap_priv *priv = dev->priv;
int ret = 0;
- if (priv->phy_info && (BIT(port) & priv->phy_info->ephy_port_mask))
- ret = bcm63xx_ephy_set(dev, port, true);
+ if (priv->phy_info) {
+ if (BIT(port) & priv->phy_info->ephy_port_mask)
+ ret = bcm63xx_ephy_set(dev, port, true);
+ else if (BIT(port) & priv->phy_info->gphy_port_mask)
+ ret = bcm63268_gphy_set(dev, true);
+ }
if (!ret)
priv->phys_enabled |= BIT(port);
@@ -307,8 +330,12 @@ static void b53_mmap_phy_disable(struct b53_device *dev, int port)
struct b53_mmap_priv *priv = dev->priv;
int ret = 0;
- if (priv->phy_info && (BIT(port) & priv->phy_info->ephy_port_mask))
- ret = bcm63xx_ephy_set(dev, port, false);
+ if (priv->phy_info) {
+ if (BIT(port) & priv->phy_info->ephy_port_mask)
+ ret = bcm63xx_ephy_set(dev, port, false);
+ else if (BIT(port) & priv->phy_info->gphy_port_mask)
+ ret = bcm63268_gphy_set(dev, false);
+ }
if (!ret)
priv->phys_enabled &= ~BIT(port);
diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c
index d8a35f25a4c8..650d93226d9f 100644
--- a/drivers/net/dsa/dsa_loop.c
+++ b/drivers/net/dsa/dsa_loop.c
@@ -17,7 +17,19 @@
#include <linux/dsa/loop.h>
#include <net/dsa.h>
-#include "dsa_loop.h"
+#define DSA_LOOP_NUM_PORTS 6
+#define DSA_LOOP_CPU_PORT (DSA_LOOP_NUM_PORTS - 1)
+#define NUM_FIXED_PHYS (DSA_LOOP_NUM_PORTS - 2)
+
+struct dsa_loop_pdata {
+ /* Must be first, such that dsa_register_switch() can access this
+ * without gory pointer manipulations
+ */
+ struct dsa_chip_data cd;
+ const char *name;
+ unsigned int enabled_ports;
+ const char *netdev;
+};
static struct dsa_loop_mib_entry dsa_loop_mibs[] = {
[DSA_LOOP_PHY_READ_OK] = { "phy_read_ok", },
@@ -27,6 +39,7 @@ static struct dsa_loop_mib_entry dsa_loop_mibs[] = {
};
static struct phy_device *phydevs[PHY_MAX_ADDR];
+static struct mdio_device *switch_mdiodev;
enum dsa_loop_devlink_resource_id {
DSA_LOOP_DEVLINK_PARAM_ID_VTU,
@@ -382,17 +395,48 @@ static struct mdio_driver dsa_loop_drv = {
.shutdown = dsa_loop_drv_shutdown,
};
-#define NUM_FIXED_PHYS (DSA_LOOP_NUM_PORTS - 2)
-
static void dsa_loop_phydevs_unregister(void)
{
- unsigned int i;
-
- for (i = 0; i < NUM_FIXED_PHYS; i++)
- if (!IS_ERR(phydevs[i])) {
+ for (int i = 0; i < NUM_FIXED_PHYS; i++) {
+ if (!IS_ERR(phydevs[i]))
fixed_phy_unregister(phydevs[i]);
- phy_device_free(phydevs[i]);
- }
+ }
+}
+
+static int __init dsa_loop_create_switch_mdiodev(void)
+{
+ static struct dsa_loop_pdata dsa_loop_pdata = {
+ .cd = {
+ .port_names[0] = "lan1",
+ .port_names[1] = "lan2",
+ .port_names[2] = "lan3",
+ .port_names[3] = "lan4",
+ .port_names[DSA_LOOP_CPU_PORT] = "cpu",
+ },
+ .name = "DSA mockup driver",
+ .enabled_ports = 0x1f,
+ .netdev = "eth0",
+ };
+ struct mii_bus *bus;
+ int ret = -ENODEV;
+
+ bus = mdio_find_bus("fixed-0");
+ if (WARN_ON(!bus))
+ return ret;
+
+ switch_mdiodev = mdio_device_create(bus, 31);
+ if (IS_ERR(switch_mdiodev))
+ goto out;
+
+ strscpy(switch_mdiodev->modalias, "dsa-loop");
+ switch_mdiodev->dev.platform_data = &dsa_loop_pdata;
+
+ ret = mdio_device_register(switch_mdiodev);
+ if (ret)
+ mdio_device_free(switch_mdiodev);
+out:
+ put_device(&bus->dev);
+ return ret;
}
static int __init dsa_loop_init(void)
@@ -402,14 +446,22 @@ static int __init dsa_loop_init(void)
.speed = SPEED_100,
.duplex = DUPLEX_FULL,
};
- unsigned int i, ret;
+ unsigned int i;
+ int ret;
+
+ ret = dsa_loop_create_switch_mdiodev();
+ if (ret)
+ return ret;
for (i = 0; i < NUM_FIXED_PHYS; i++)
phydevs[i] = fixed_phy_register(&status, NULL);
ret = mdio_driver_register(&dsa_loop_drv);
- if (ret)
+ if (ret) {
dsa_loop_phydevs_unregister();
+ mdio_device_remove(switch_mdiodev);
+ mdio_device_free(switch_mdiodev);
+ }
return ret;
}
@@ -419,10 +471,11 @@ static void __exit dsa_loop_exit(void)
{
mdio_driver_unregister(&dsa_loop_drv);
dsa_loop_phydevs_unregister();
+ mdio_device_remove(switch_mdiodev);
+ mdio_device_free(switch_mdiodev);
}
module_exit(dsa_loop_exit);
-MODULE_SOFTDEP("pre: dsa_loop_bdinfo");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Florian Fainelli");
MODULE_DESCRIPTION("DSA loopback driver");
diff --git a/drivers/net/dsa/dsa_loop.h b/drivers/net/dsa/dsa_loop.h
deleted file mode 100644
index 93e5c15d0efd..000000000000
--- a/drivers/net/dsa/dsa_loop.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __DSA_LOOP_H
-#define __DSA_LOOP_H
-
-struct dsa_chip_data;
-
-struct dsa_loop_pdata {
- /* Must be first, such that dsa_register_switch() can access this
- * without gory pointer manipulations
- */
- struct dsa_chip_data cd;
- const char *name;
- unsigned int enabled_ports;
- const char *netdev;
-};
-
-#define DSA_LOOP_NUM_PORTS 6
-#define DSA_LOOP_CPU_PORT (DSA_LOOP_NUM_PORTS - 1)
-
-#endif /* __DSA_LOOP_H */
diff --git a/drivers/net/dsa/dsa_loop_bdinfo.c b/drivers/net/dsa/dsa_loop_bdinfo.c
deleted file mode 100644
index 14ca42491512..000000000000
--- a/drivers/net/dsa/dsa_loop_bdinfo.c
+++ /dev/null
@@ -1,36 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/phy.h>
-#include <net/dsa.h>
-
-#include "dsa_loop.h"
-
-static struct dsa_loop_pdata dsa_loop_pdata = {
- .cd = {
- .port_names[0] = "lan1",
- .port_names[1] = "lan2",
- .port_names[2] = "lan3",
- .port_names[3] = "lan4",
- .port_names[DSA_LOOP_CPU_PORT] = "cpu",
- },
- .name = "DSA mockup driver",
- .enabled_ports = 0x1f,
- .netdev = "eth0",
-};
-
-static const struct mdio_board_info bdinfo = {
- .bus_id = "fixed-0",
- .modalias = "dsa-loop",
- .mdio_addr = 31,
- .platform_data = &dsa_loop_pdata,
-};
-
-static int __init dsa_loop_bdinfo_init(void)
-{
- return mdiobus_register_board_info(&bdinfo, 1);
-}
-arch_initcall(dsa_loop_bdinfo_init)
-
-MODULE_DESCRIPTION("DSA mock-up switch driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/dsa/ks8995.c
index d135b061d810..5c4c83e00477 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/dsa/ks8995.c
@@ -3,6 +3,7 @@
* SPI driver for Micrel/Kendin KS8995M and KSZ8864RMN ethernet switches
*
* Copyright (C) 2008 Gabor Juhos <juhosg at openwrt.org>
+ * Copyright (C) 2025 Linus Walleij <linus.walleij@linaro.org>
*
* This file was based on: drivers/spi/at25.c
* Copyright (C) 2006 David Brownell
@@ -10,6 +11,9 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/bits.h>
+#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -17,8 +21,8 @@
#include <linux/device.h>
#include <linux/gpio/consumer.h>
#include <linux/of.h>
-
#include <linux/spi/spi.h>
+#include <net/dsa.h>
#define DRV_VERSION "0.1.1"
#define DRV_DESC "Micrel KS8995 Ethernet switch SPI driver"
@@ -29,18 +33,59 @@
#define KS8995_REG_ID1 0x01 /* Chip ID1 */
#define KS8995_REG_GC0 0x02 /* Global Control 0 */
+
+#define KS8995_GC0_P5_PHY BIT(3) /* Port 5 PHY enabled */
+
#define KS8995_REG_GC1 0x03 /* Global Control 1 */
#define KS8995_REG_GC2 0x04 /* Global Control 2 */
+
+#define KS8995_GC2_HUGE BIT(2) /* Huge packet support */
+#define KS8995_GC2_LEGAL BIT(1) /* Legal size override */
+
#define KS8995_REG_GC3 0x05 /* Global Control 3 */
#define KS8995_REG_GC4 0x06 /* Global Control 4 */
+
+#define KS8995_GC4_10BT BIT(4) /* Force switch to 10Mbit */
+#define KS8995_GC4_MII_FLOW BIT(5) /* MII full-duplex flow control enable */
+#define KS8995_GC4_MII_HD BIT(6) /* MII half-duplex mode enable */
+
#define KS8995_REG_GC5 0x07 /* Global Control 5 */
#define KS8995_REG_GC6 0x08 /* Global Control 6 */
#define KS8995_REG_GC7 0x09 /* Global Control 7 */
#define KS8995_REG_GC8 0x0a /* Global Control 8 */
#define KS8995_REG_GC9 0x0b /* Global Control 9 */
-#define KS8995_REG_PC(p, r) ((0x10 * p) + r) /* Port Control */
-#define KS8995_REG_PS(p, r) ((0x10 * p) + r + 0xe) /* Port Status */
+#define KS8995_GC9_SPECIAL BIT(0) /* Special tagging mode (DSA) */
+
+/* In DSA the ports 1-4 are numbered 0-3 and the CPU port is port 4 */
+#define KS8995_REG_PC(p, r) (0x10 + (0x10 * (p)) + (r)) /* Port Control */
+#define KS8995_REG_PS(p, r) (0x1e + (0x10 * (p)) + (r)) /* Port Status */
+
+#define KS8995_REG_PC0 0x00 /* Port Control 0 */
+#define KS8995_REG_PC1 0x01 /* Port Control 1 */
+#define KS8995_REG_PC2 0x02 /* Port Control 2 */
+#define KS8995_REG_PC3 0x03 /* Port Control 3 */
+#define KS8995_REG_PC4 0x04 /* Port Control 4 */
+#define KS8995_REG_PC5 0x05 /* Port Control 5 */
+#define KS8995_REG_PC6 0x06 /* Port Control 6 */
+#define KS8995_REG_PC7 0x07 /* Port Control 7 */
+#define KS8995_REG_PC8 0x08 /* Port Control 8 */
+#define KS8995_REG_PC9 0x09 /* Port Control 9 */
+#define KS8995_REG_PC10 0x0a /* Port Control 10 */
+#define KS8995_REG_PC11 0x0b /* Port Control 11 */
+#define KS8995_REG_PC12 0x0c /* Port Control 12 */
+#define KS8995_REG_PC13 0x0d /* Port Control 13 */
+
+#define KS8995_PC0_TAG_INS BIT(2) /* Enable tag insertion on port */
+#define KS8995_PC0_TAG_REM BIT(1) /* Enable tag removal on port */
+#define KS8995_PC0_PRIO_EN BIT(0) /* Enable priority handling */
+
+#define KS8995_PC2_TXEN BIT(2) /* Enable TX on port */
+#define KS8995_PC2_RXEN BIT(1) /* Enable RX on port */
+#define KS8995_PC2_LEARN_DIS BIT(0) /* Disable learning on port */
+
+#define KS8995_PC13_TXDIS BIT(6) /* Disable transmitter */
+#define KS8995_PC13_PWDN BIT(3) /* Power down */
#define KS8995_REG_TPC0 0x60 /* TOS Priority Control 0 */
#define KS8995_REG_TPC1 0x61 /* TOS Priority Control 1 */
@@ -91,6 +136,8 @@
#define KS8995_CMD_WRITE 0x02U
#define KS8995_CMD_READ 0x03U
+#define KS8995_CPU_PORT 4
+#define KS8995_NUM_PORTS 5 /* 5 ports including the CPU port */
#define KS8995_RESET_DELAY 10 /* usec */
enum ks8995_chip_variant {
@@ -138,11 +185,14 @@ static const struct ks8995_chip_params ks8995_chip[] = {
struct ks8995_switch {
struct spi_device *spi;
+ struct device *dev;
+ struct dsa_switch *ds;
struct mutex lock;
struct gpio_desc *reset_gpio;
struct bin_attribute regs_attr;
const struct ks8995_chip_params *chip;
int revision_id;
+ unsigned int max_mtu[KS8995_NUM_PORTS];
};
static const struct spi_device_id ks8995_id[] = {
@@ -288,30 +338,6 @@ static int ks8995_reset(struct ks8995_switch *ks)
return ks8995_start(ks);
}
-static ssize_t ks8995_registers_read(struct file *filp, struct kobject *kobj,
- const struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count)
-{
- struct device *dev;
- struct ks8995_switch *ks8995;
-
- dev = kobj_to_dev(kobj);
- ks8995 = dev_get_drvdata(dev);
-
- return ks8995_read(ks8995, buf, off, count);
-}
-
-static ssize_t ks8995_registers_write(struct file *filp, struct kobject *kobj,
- const struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count)
-{
- struct device *dev;
- struct ks8995_switch *ks8995;
-
- dev = kobj_to_dev(kobj);
- ks8995 = dev_get_drvdata(dev);
-
- return ks8995_write(ks8995, buf, off, count);
-}
-
/* ks8995_get_revision - get chip revision
* @ks: pointer to switch instance
*
@@ -395,14 +421,325 @@ err_out:
return err;
}
-static const struct bin_attribute ks8995_registers_attr = {
- .attr = {
- .name = "registers",
- .mode = 0600,
- },
- .size = KS8995_REGS_SIZE,
- .read = ks8995_registers_read,
- .write = ks8995_registers_write,
+static int ks8995_check_config(struct ks8995_switch *ks)
+{
+ int ret;
+ u8 val;
+
+ ret = ks8995_read_reg(ks, KS8995_REG_GC0, &val);
+ if (ret) {
+ dev_err(ks->dev, "failed to read KS8995_REG_GC0\n");
+ return ret;
+ }
+
+ dev_dbg(ks->dev, "port 5 PHY %senabled\n",
+ (val & KS8995_GC0_P5_PHY) ? "" : "not ");
+
+ val |= KS8995_GC0_P5_PHY;
+ ret = ks8995_write_reg(ks, KS8995_REG_GC0, val);
+ if (ret)
+ dev_err(ks->dev, "failed to set KS8995_REG_GC0\n");
+
+ dev_dbg(ks->dev, "set KS8995_REG_GC0 to 0x%02x\n", val);
+
+ return 0;
+}
+
+static void
+ks8995_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+}
+
+static void
+ks8995_mac_link_up(struct phylink_config *config, struct phy_device *phydev,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex, bool tx_pause, bool rx_pause)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct ks8995_switch *ks = dp->ds->priv;
+ int port = dp->index;
+ int ret;
+ u8 val;
+
+ /* Allow forcing the mode on the fixed CPU port, no autonegotiation.
+ * We assume autonegotiation works on the PHY-facing ports.
+ */
+ if (port != KS8995_CPU_PORT)
+ return;
+
+ dev_dbg(ks->dev, "MAC link up on CPU port (%d)\n", port);
+
+ ret = ks8995_read_reg(ks, KS8995_REG_GC4, &val);
+ if (ret) {
+ dev_err(ks->dev, "failed to read KS8995_REG_GC4\n");
+ return;
+ }
+
+ /* Conjure port config */
+ switch (speed) {
+ case SPEED_10:
+ dev_dbg(ks->dev, "set switch MII to 100Mbit mode\n");
+ val |= KS8995_GC4_10BT;
+ break;
+ case SPEED_100:
+ default:
+ dev_dbg(ks->dev, "set switch MII to 100Mbit mode\n");
+ val &= ~KS8995_GC4_10BT;
+ break;
+ }
+
+ if (duplex == DUPLEX_HALF) {
+ dev_dbg(ks->dev, "set switch MII to half duplex\n");
+ val |= KS8995_GC4_MII_HD;
+ } else {
+ dev_dbg(ks->dev, "set switch MII to full duplex\n");
+ val &= ~KS8995_GC4_MII_HD;
+ }
+
+ dev_dbg(ks->dev, "set KS8995_REG_GC4 to %02x\n", val);
+
+ /* Enable the CPU port */
+ ret = ks8995_write_reg(ks, KS8995_REG_GC4, val);
+ if (ret)
+ dev_err(ks->dev, "failed to set KS8995_REG_GC4\n");
+}
+
+static void
+ks8995_mac_link_down(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct ks8995_switch *ks = dp->ds->priv;
+ int port = dp->index;
+
+ if (port != KS8995_CPU_PORT)
+ return;
+
+ dev_dbg(ks->dev, "MAC link down on CPU port (%d)\n", port);
+
+ /* Disable the CPU port */
+}
+
+static const struct phylink_mac_ops ks8995_phylink_mac_ops = {
+ .mac_config = ks8995_mac_config,
+ .mac_link_up = ks8995_mac_link_up,
+ .mac_link_down = ks8995_mac_link_down,
+};
+
+static enum
+dsa_tag_protocol ks8995_get_tag_protocol(struct dsa_switch *ds,
+ int port,
+ enum dsa_tag_protocol mp)
+{
+ /* This switch actually uses the 6 byte KS8995 protocol */
+ return DSA_TAG_PROTO_NONE;
+}
+
+static int ks8995_setup(struct dsa_switch *ds)
+{
+ return 0;
+}
+
+static int ks8995_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct ks8995_switch *ks = ds->priv;
+
+ dev_dbg(ks->dev, "enable port %d\n", port);
+
+ return 0;
+}
+
+static void ks8995_port_disable(struct dsa_switch *ds, int port)
+{
+ struct ks8995_switch *ks = ds->priv;
+
+ dev_dbg(ks->dev, "disable port %d\n", port);
+}
+
+static int ks8995_port_pre_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ /* We support enabling/disabling learning */
+ if (flags.mask & ~(BR_LEARNING))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ks8995_port_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ struct ks8995_switch *ks = ds->priv;
+ int ret;
+ u8 val;
+
+ if (flags.mask & BR_LEARNING) {
+ ret = ks8995_read_reg(ks, KS8995_REG_PC(port, KS8995_REG_PC2), &val);
+ if (ret) {
+ dev_err(ks->dev, "failed to read KS8995_REG_PC2 on port %d\n", port);
+ return ret;
+ }
+
+ if (flags.val & BR_LEARNING)
+ val &= ~KS8995_PC2_LEARN_DIS;
+ else
+ val |= KS8995_PC2_LEARN_DIS;
+
+ ret = ks8995_write_reg(ks, KS8995_REG_PC(port, KS8995_REG_PC2), val);
+ if (ret) {
+ dev_err(ks->dev, "failed to write KS8995_REG_PC2 on port %d\n", port);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void ks8995_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
+{
+ struct ks8995_switch *ks = ds->priv;
+ int ret;
+ u8 val;
+
+ ret = ks8995_read_reg(ks, KS8995_REG_PC(port, KS8995_REG_PC2), &val);
+ if (ret) {
+ dev_err(ks->dev, "failed to read KS8995_REG_PC2 on port %d\n", port);
+ return;
+ }
+
+ /* Set the bits for the different STP states in accordance with
+ * the datasheet, pages 36-37 "Spanning tree support".
+ */
+ switch (state) {
+ case BR_STATE_DISABLED:
+ case BR_STATE_BLOCKING:
+ case BR_STATE_LISTENING:
+ val &= ~KS8995_PC2_TXEN;
+ val &= ~KS8995_PC2_RXEN;
+ val |= KS8995_PC2_LEARN_DIS;
+ break;
+ case BR_STATE_LEARNING:
+ val &= ~KS8995_PC2_TXEN;
+ val &= ~KS8995_PC2_RXEN;
+ val &= ~KS8995_PC2_LEARN_DIS;
+ break;
+ case BR_STATE_FORWARDING:
+ val |= KS8995_PC2_TXEN;
+ val |= KS8995_PC2_RXEN;
+ val &= ~KS8995_PC2_LEARN_DIS;
+ break;
+ default:
+ dev_err(ks->dev, "unknown bridge state requested\n");
+ return;
+ }
+
+ ret = ks8995_write_reg(ks, KS8995_REG_PC(port, KS8995_REG_PC2), val);
+ if (ret) {
+ dev_err(ks->dev, "failed to write KS8995_REG_PC2 on port %d\n", port);
+ return;
+ }
+
+ dev_dbg(ks->dev, "set KS8995_REG_PC2 for port %d to %02x\n", port, val);
+}
+
+static void ks8995_phylink_get_caps(struct dsa_switch *dsa, int port,
+ struct phylink_config *config)
+{
+ unsigned long *interfaces = config->supported_interfaces;
+
+ if (port == KS8995_CPU_PORT)
+ __set_bit(PHY_INTERFACE_MODE_MII, interfaces);
+
+ if (port <= 3) {
+ /* Internal PHYs */
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL, interfaces);
+ /* phylib default */
+ __set_bit(PHY_INTERFACE_MODE_MII, interfaces);
+ }
+
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100;
+}
+
+/* Huge packet support up to 1916 byte packages "inclusive"
+ * which means that tags are included. If the bit is not set
+ * it is 1536 bytes "inclusive". We present the length without
+ * tags or ethernet headers. The setting affects all ports.
+ */
+static int ks8995_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+{
+ struct ks8995_switch *ks = ds->priv;
+ unsigned int max_mtu;
+ int ret;
+ u8 val;
+ int i;
+
+ ks->max_mtu[port] = new_mtu;
+
+ /* Roof out the MTU for the entire switch to the greatest
+ * common denominator: the biggest set for any one port will
+ * be the biggest MTU for the switch.
+ */
+ max_mtu = ETH_DATA_LEN;
+ for (i = 0; i < KS8995_NUM_PORTS; i++) {
+ if (ks->max_mtu[i] > max_mtu)
+ max_mtu = ks->max_mtu[i];
+ }
+
+ /* Translate to layer 2 size.
+ * Add ethernet and (possible) VLAN headers, and checksum to the size.
+ * For ETH_DATA_LEN (1500 bytes) this will add up to 1522 bytes.
+ */
+ max_mtu += VLAN_ETH_HLEN;
+ max_mtu += ETH_FCS_LEN;
+
+ ret = ks8995_read_reg(ks, KS8995_REG_GC2, &val);
+ if (ret) {
+ dev_err(ks->dev, "failed to read KS8995_REG_GC2\n");
+ return ret;
+ }
+
+ if (max_mtu <= 1522) {
+ val &= ~KS8995_GC2_HUGE;
+ val &= ~KS8995_GC2_LEGAL;
+ } else if (max_mtu > 1522 && max_mtu <= 1536) {
+ /* This accepts packets up to 1536 bytes */
+ val &= ~KS8995_GC2_HUGE;
+ val |= KS8995_GC2_LEGAL;
+ } else {
+ /* This accepts packets up to 1916 bytes */
+ val |= KS8995_GC2_HUGE;
+ val |= KS8995_GC2_LEGAL;
+ }
+
+ dev_dbg(ks->dev, "new max MTU %d bytes (inclusive)\n", max_mtu);
+
+ ret = ks8995_write_reg(ks, KS8995_REG_GC2, val);
+ if (ret)
+ dev_err(ks->dev, "failed to set KS8995_REG_GC2\n");
+
+ return ret;
+}
+
+static int ks8995_get_max_mtu(struct dsa_switch *ds, int port)
+{
+ return 1916 - ETH_HLEN - ETH_FCS_LEN;
+}
+
+static const struct dsa_switch_ops ks8995_ds_ops = {
+ .get_tag_protocol = ks8995_get_tag_protocol,
+ .setup = ks8995_setup,
+ .port_pre_bridge_flags = ks8995_port_pre_bridge_flags,
+ .port_bridge_flags = ks8995_port_bridge_flags,
+ .port_enable = ks8995_port_enable,
+ .port_disable = ks8995_port_disable,
+ .port_stp_state_set = ks8995_port_stp_state_set,
+ .port_change_mtu = ks8995_change_mtu,
+ .port_max_mtu = ks8995_get_max_mtu,
+ .phylink_get_caps = ks8995_phylink_get_caps,
};
/* ------------------------------------------------------------------------ */
@@ -423,6 +760,7 @@ static int ks8995_probe(struct spi_device *spi)
mutex_init(&ks->lock);
ks->spi = spi;
+ ks->dev = &spi->dev;
ks->chip = &ks8995_chip[variant];
ks->reset_gpio = devm_gpiod_get_optional(&spi->dev, "reset",
@@ -438,9 +776,15 @@ static int ks8995_probe(struct spi_device *spi)
if (err)
return err;
- /* de-assert switch reset */
- /* FIXME: this likely requires a delay */
- gpiod_set_value_cansleep(ks->reset_gpio, 0);
+ if (ks->reset_gpio) {
+ /*
+ * If a reset line was obtained, wait for 100us after
+ * de-asserting RESET before accessing any registers, see
+ * the KS8995MA datasheet, page 44.
+ */
+ gpiod_set_value_cansleep(ks->reset_gpio, 0);
+ udelay(100);
+ }
spi_set_drvdata(spi, ks);
@@ -456,24 +800,32 @@ static int ks8995_probe(struct spi_device *spi)
if (err)
return err;
- memcpy(&ks->regs_attr, &ks8995_registers_attr, sizeof(ks->regs_attr));
- ks->regs_attr.size = ks->chip->regs_size;
-
err = ks8995_reset(ks);
if (err)
return err;
- sysfs_attr_init(&ks->regs_attr.attr);
- err = sysfs_create_bin_file(&spi->dev.kobj, &ks->regs_attr);
- if (err) {
- dev_err(&spi->dev, "unable to create sysfs file, err=%d\n",
- err);
- return err;
- }
-
dev_info(&spi->dev, "%s device found, Chip ID:%x, Revision:%x\n",
ks->chip->name, ks->chip->chip_id, ks->revision_id);
+ err = ks8995_check_config(ks);
+ if (err)
+ return err;
+
+ ks->ds = devm_kzalloc(&spi->dev, sizeof(*ks->ds), GFP_KERNEL);
+ if (!ks->ds)
+ return -ENOMEM;
+
+ ks->ds->dev = &spi->dev;
+ ks->ds->num_ports = KS8995_NUM_PORTS;
+ ks->ds->ops = &ks8995_ds_ops;
+ ks->ds->phylink_mac_ops = &ks8995_phylink_mac_ops;
+ ks->ds->priv = ks;
+
+ err = dsa_register_switch(ks->ds);
+ if (err)
+ return dev_err_probe(&spi->dev, err,
+ "unable to register DSA switch\n");
+
return 0;
}
@@ -481,8 +833,7 @@ static void ks8995_remove(struct spi_device *spi)
{
struct ks8995_switch *ks = spi_get_drvdata(spi);
- sysfs_remove_bin_file(&spi->dev.kobj, &ks->regs_attr);
-
+ dsa_unregister_switch(ks->ds);
/* assert reset */
gpiod_set_value_cansleep(ks->reset_gpio, 1);
}
diff --git a/drivers/net/dsa/lantiq/Kconfig b/drivers/net/dsa/lantiq/Kconfig
new file mode 100644
index 000000000000..1cb053c823f7
--- /dev/null
+++ b/drivers/net/dsa/lantiq/Kconfig
@@ -0,0 +1,7 @@
+config NET_DSA_LANTIQ_GSWIP
+ tristate "Lantiq / Intel GSWIP"
+ depends on HAS_IOMEM
+ select NET_DSA_TAG_GSWIP
+ help
+ This enables support for the Lantiq / Intel GSWIP 2.1 found in
+ the xrx200 / VR9 SoC.
diff --git a/drivers/net/dsa/lantiq/Makefile b/drivers/net/dsa/lantiq/Makefile
new file mode 100644
index 000000000000..849f85ebebd6
--- /dev/null
+++ b/drivers/net/dsa/lantiq/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_NET_DSA_LANTIQ_GSWIP) += lantiq_gswip.o
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq/lantiq_gswip.c
index 6eb3140d4044..2169c0814a48 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq/lantiq_gswip.c
@@ -25,7 +25,9 @@
* between all LAN ports by default.
*/
-#include <linux/clk.h>
+#include "lantiq_gswip.h"
+#include "lantiq_pce.h"
+
#include <linux/delay.h>
#include <linux/etherdevice.h>
#include <linux/firmware.h>
@@ -39,258 +41,13 @@
#include <linux/of_platform.h>
#include <linux/phy.h>
#include <linux/phylink.h>
-#include <linux/platform_device.h>
-#include <linux/regmap.h>
-#include <linux/reset.h>
-#include <net/dsa.h>
#include <dt-bindings/mips/lantiq_rcu_gphy.h>
-#include "lantiq_pce.h"
-
-/* GSWIP MDIO Registers */
-#define GSWIP_MDIO_GLOB 0x00
-#define GSWIP_MDIO_GLOB_ENABLE BIT(15)
-#define GSWIP_MDIO_CTRL 0x08
-#define GSWIP_MDIO_CTRL_BUSY BIT(12)
-#define GSWIP_MDIO_CTRL_RD BIT(11)
-#define GSWIP_MDIO_CTRL_WR BIT(10)
-#define GSWIP_MDIO_CTRL_PHYAD_MASK 0x1f
-#define GSWIP_MDIO_CTRL_PHYAD_SHIFT 5
-#define GSWIP_MDIO_CTRL_REGAD_MASK 0x1f
-#define GSWIP_MDIO_READ 0x09
-#define GSWIP_MDIO_WRITE 0x0A
-#define GSWIP_MDIO_MDC_CFG0 0x0B
-#define GSWIP_MDIO_MDC_CFG1 0x0C
-#define GSWIP_MDIO_PHYp(p) (0x15 - (p))
-#define GSWIP_MDIO_PHY_LINK_MASK 0x6000
-#define GSWIP_MDIO_PHY_LINK_AUTO 0x0000
-#define GSWIP_MDIO_PHY_LINK_DOWN 0x4000
-#define GSWIP_MDIO_PHY_LINK_UP 0x2000
-#define GSWIP_MDIO_PHY_SPEED_MASK 0x1800
-#define GSWIP_MDIO_PHY_SPEED_AUTO 0x1800
-#define GSWIP_MDIO_PHY_SPEED_M10 0x0000
-#define GSWIP_MDIO_PHY_SPEED_M100 0x0800
-#define GSWIP_MDIO_PHY_SPEED_G1 0x1000
-#define GSWIP_MDIO_PHY_FDUP_MASK 0x0600
-#define GSWIP_MDIO_PHY_FDUP_AUTO 0x0000
-#define GSWIP_MDIO_PHY_FDUP_EN 0x0200
-#define GSWIP_MDIO_PHY_FDUP_DIS 0x0600
-#define GSWIP_MDIO_PHY_FCONTX_MASK 0x0180
-#define GSWIP_MDIO_PHY_FCONTX_AUTO 0x0000
-#define GSWIP_MDIO_PHY_FCONTX_EN 0x0100
-#define GSWIP_MDIO_PHY_FCONTX_DIS 0x0180
-#define GSWIP_MDIO_PHY_FCONRX_MASK 0x0060
-#define GSWIP_MDIO_PHY_FCONRX_AUTO 0x0000
-#define GSWIP_MDIO_PHY_FCONRX_EN 0x0020
-#define GSWIP_MDIO_PHY_FCONRX_DIS 0x0060
-#define GSWIP_MDIO_PHY_ADDR_MASK 0x001f
-#define GSWIP_MDIO_PHY_MASK (GSWIP_MDIO_PHY_ADDR_MASK | \
- GSWIP_MDIO_PHY_FCONRX_MASK | \
- GSWIP_MDIO_PHY_FCONTX_MASK | \
- GSWIP_MDIO_PHY_LINK_MASK | \
- GSWIP_MDIO_PHY_SPEED_MASK | \
- GSWIP_MDIO_PHY_FDUP_MASK)
-
-/* GSWIP MII Registers */
-#define GSWIP_MII_CFGp(p) (0x2 * (p))
-#define GSWIP_MII_CFG_RESET BIT(15)
-#define GSWIP_MII_CFG_EN BIT(14)
-#define GSWIP_MII_CFG_ISOLATE BIT(13)
-#define GSWIP_MII_CFG_LDCLKDIS BIT(12)
-#define GSWIP_MII_CFG_RGMII_IBS BIT(8)
-#define GSWIP_MII_CFG_RMII_CLK BIT(7)
-#define GSWIP_MII_CFG_MODE_MIIP 0x0
-#define GSWIP_MII_CFG_MODE_MIIM 0x1
-#define GSWIP_MII_CFG_MODE_RMIIP 0x2
-#define GSWIP_MII_CFG_MODE_RMIIM 0x3
-#define GSWIP_MII_CFG_MODE_RGMII 0x4
-#define GSWIP_MII_CFG_MODE_GMII 0x9
-#define GSWIP_MII_CFG_MODE_MASK 0xf
-#define GSWIP_MII_CFG_RATE_M2P5 0x00
-#define GSWIP_MII_CFG_RATE_M25 0x10
-#define GSWIP_MII_CFG_RATE_M125 0x20
-#define GSWIP_MII_CFG_RATE_M50 0x30
-#define GSWIP_MII_CFG_RATE_AUTO 0x40
-#define GSWIP_MII_CFG_RATE_MASK 0x70
-#define GSWIP_MII_PCDU0 0x01
-#define GSWIP_MII_PCDU1 0x03
-#define GSWIP_MII_PCDU5 0x05
-#define GSWIP_MII_PCDU_TXDLY_MASK GENMASK(2, 0)
-#define GSWIP_MII_PCDU_RXDLY_MASK GENMASK(9, 7)
-
-/* GSWIP Core Registers */
-#define GSWIP_SWRES 0x000
-#define GSWIP_SWRES_R1 BIT(1) /* GSWIP Software reset */
-#define GSWIP_SWRES_R0 BIT(0) /* GSWIP Hardware reset */
-#define GSWIP_VERSION 0x013
-#define GSWIP_VERSION_REV_SHIFT 0
-#define GSWIP_VERSION_REV_MASK GENMASK(7, 0)
-#define GSWIP_VERSION_MOD_SHIFT 8
-#define GSWIP_VERSION_MOD_MASK GENMASK(15, 8)
-#define GSWIP_VERSION_2_0 0x100
-#define GSWIP_VERSION_2_1 0x021
-#define GSWIP_VERSION_2_2 0x122
-#define GSWIP_VERSION_2_2_ETC 0x022
-
-#define GSWIP_BM_RAM_VAL(x) (0x043 - (x))
-#define GSWIP_BM_RAM_ADDR 0x044
-#define GSWIP_BM_RAM_CTRL 0x045
-#define GSWIP_BM_RAM_CTRL_BAS BIT(15)
-#define GSWIP_BM_RAM_CTRL_OPMOD BIT(5)
-#define GSWIP_BM_RAM_CTRL_ADDR_MASK GENMASK(4, 0)
-#define GSWIP_BM_QUEUE_GCTRL 0x04A
-#define GSWIP_BM_QUEUE_GCTRL_GL_MOD BIT(10)
-/* buffer management Port Configuration Register */
-#define GSWIP_BM_PCFGp(p) (0x080 + ((p) * 2))
-#define GSWIP_BM_PCFG_CNTEN BIT(0) /* RMON Counter Enable */
-#define GSWIP_BM_PCFG_IGCNT BIT(1) /* Ingres Special Tag RMON count */
-/* buffer management Port Control Register */
-#define GSWIP_BM_RMON_CTRLp(p) (0x81 + ((p) * 2))
-#define GSWIP_BM_CTRL_RMON_RAM1_RES BIT(0) /* Software Reset for RMON RAM 1 */
-#define GSWIP_BM_CTRL_RMON_RAM2_RES BIT(1) /* Software Reset for RMON RAM 2 */
-
-/* PCE */
-#define GSWIP_PCE_TBL_KEY(x) (0x447 - (x))
-#define GSWIP_PCE_TBL_MASK 0x448
-#define GSWIP_PCE_TBL_VAL(x) (0x44D - (x))
-#define GSWIP_PCE_TBL_ADDR 0x44E
-#define GSWIP_PCE_TBL_CTRL 0x44F
-#define GSWIP_PCE_TBL_CTRL_BAS BIT(15)
-#define GSWIP_PCE_TBL_CTRL_TYPE BIT(13)
-#define GSWIP_PCE_TBL_CTRL_VLD BIT(12)
-#define GSWIP_PCE_TBL_CTRL_KEYFORM BIT(11)
-#define GSWIP_PCE_TBL_CTRL_GMAP_MASK GENMASK(10, 7)
-#define GSWIP_PCE_TBL_CTRL_OPMOD_MASK GENMASK(6, 5)
-#define GSWIP_PCE_TBL_CTRL_OPMOD_ADRD 0x00
-#define GSWIP_PCE_TBL_CTRL_OPMOD_ADWR 0x20
-#define GSWIP_PCE_TBL_CTRL_OPMOD_KSRD 0x40
-#define GSWIP_PCE_TBL_CTRL_OPMOD_KSWR 0x60
-#define GSWIP_PCE_TBL_CTRL_ADDR_MASK GENMASK(4, 0)
-#define GSWIP_PCE_PMAP1 0x453 /* Monitoring port map */
-#define GSWIP_PCE_PMAP2 0x454 /* Default Multicast port map */
-#define GSWIP_PCE_PMAP3 0x455 /* Default Unknown Unicast port map */
-#define GSWIP_PCE_GCTRL_0 0x456
-#define GSWIP_PCE_GCTRL_0_MTFL BIT(0) /* MAC Table Flushing */
-#define GSWIP_PCE_GCTRL_0_MC_VALID BIT(3)
-#define GSWIP_PCE_GCTRL_0_VLAN BIT(14) /* VLAN aware Switching */
-#define GSWIP_PCE_GCTRL_1 0x457
-#define GSWIP_PCE_GCTRL_1_MAC_GLOCK BIT(2) /* MAC Address table lock */
-#define GSWIP_PCE_GCTRL_1_MAC_GLOCK_MOD BIT(3) /* Mac address table lock forwarding mode */
-#define GSWIP_PCE_PCTRL_0p(p) (0x480 + ((p) * 0xA))
-#define GSWIP_PCE_PCTRL_0_TVM BIT(5) /* Transparent VLAN mode */
-#define GSWIP_PCE_PCTRL_0_VREP BIT(6) /* VLAN Replace Mode */
-#define GSWIP_PCE_PCTRL_0_INGRESS BIT(11) /* Accept special tag in ingress */
-#define GSWIP_PCE_PCTRL_0_PSTATE_LISTEN 0x0
-#define GSWIP_PCE_PCTRL_0_PSTATE_RX 0x1
-#define GSWIP_PCE_PCTRL_0_PSTATE_TX 0x2
-#define GSWIP_PCE_PCTRL_0_PSTATE_LEARNING 0x3
-#define GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING 0x7
-#define GSWIP_PCE_PCTRL_0_PSTATE_MASK GENMASK(2, 0)
-#define GSWIP_PCE_VCTRL(p) (0x485 + ((p) * 0xA))
-#define GSWIP_PCE_VCTRL_UVR BIT(0) /* Unknown VLAN Rule */
-#define GSWIP_PCE_VCTRL_VIMR BIT(3) /* VLAN Ingress Member violation rule */
-#define GSWIP_PCE_VCTRL_VEMR BIT(4) /* VLAN Egress Member violation rule */
-#define GSWIP_PCE_VCTRL_VSR BIT(5) /* VLAN Security */
-#define GSWIP_PCE_VCTRL_VID0 BIT(6) /* Priority Tagged Rule */
-#define GSWIP_PCE_DEFPVID(p) (0x486 + ((p) * 0xA))
-
-#define GSWIP_MAC_FLEN 0x8C5
-#define GSWIP_MAC_CTRL_0p(p) (0x903 + ((p) * 0xC))
-#define GSWIP_MAC_CTRL_0_PADEN BIT(8)
-#define GSWIP_MAC_CTRL_0_FCS_EN BIT(7)
-#define GSWIP_MAC_CTRL_0_FCON_MASK 0x0070
-#define GSWIP_MAC_CTRL_0_FCON_AUTO 0x0000
-#define GSWIP_MAC_CTRL_0_FCON_RX 0x0010
-#define GSWIP_MAC_CTRL_0_FCON_TX 0x0020
-#define GSWIP_MAC_CTRL_0_FCON_RXTX 0x0030
-#define GSWIP_MAC_CTRL_0_FCON_NONE 0x0040
-#define GSWIP_MAC_CTRL_0_FDUP_MASK 0x000C
-#define GSWIP_MAC_CTRL_0_FDUP_AUTO 0x0000
-#define GSWIP_MAC_CTRL_0_FDUP_EN 0x0004
-#define GSWIP_MAC_CTRL_0_FDUP_DIS 0x000C
-#define GSWIP_MAC_CTRL_0_GMII_MASK 0x0003
-#define GSWIP_MAC_CTRL_0_GMII_AUTO 0x0000
-#define GSWIP_MAC_CTRL_0_GMII_MII 0x0001
-#define GSWIP_MAC_CTRL_0_GMII_RGMII 0x0002
-#define GSWIP_MAC_CTRL_2p(p) (0x905 + ((p) * 0xC))
-#define GSWIP_MAC_CTRL_2_LCHKL BIT(2) /* Frame Length Check Long Enable */
-#define GSWIP_MAC_CTRL_2_MLEN BIT(3) /* Maximum Untagged Frame Lnegth */
-
-/* Ethernet Switch Fetch DMA Port Control Register */
-#define GSWIP_FDMA_PCTRLp(p) (0xA80 + ((p) * 0x6))
-#define GSWIP_FDMA_PCTRL_EN BIT(0) /* FDMA Port Enable */
-#define GSWIP_FDMA_PCTRL_STEN BIT(1) /* Special Tag Insertion Enable */
-#define GSWIP_FDMA_PCTRL_VLANMOD_MASK GENMASK(4, 3) /* VLAN Modification Control */
-#define GSWIP_FDMA_PCTRL_VLANMOD_SHIFT 3 /* VLAN Modification Control */
-#define GSWIP_FDMA_PCTRL_VLANMOD_DIS (0x0 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
-#define GSWIP_FDMA_PCTRL_VLANMOD_PRIO (0x1 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
-#define GSWIP_FDMA_PCTRL_VLANMOD_ID (0x2 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
-#define GSWIP_FDMA_PCTRL_VLANMOD_BOTH (0x3 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
-
-/* Ethernet Switch Store DMA Port Control Register */
-#define GSWIP_SDMA_PCTRLp(p) (0xBC0 + ((p) * 0x6))
-#define GSWIP_SDMA_PCTRL_EN BIT(0) /* SDMA Port Enable */
-#define GSWIP_SDMA_PCTRL_FCEN BIT(1) /* Flow Control Enable */
-#define GSWIP_SDMA_PCTRL_PAUFWD BIT(3) /* Pause Frame Forwarding */
-
-#define GSWIP_TABLE_ACTIVE_VLAN 0x01
-#define GSWIP_TABLE_VLAN_MAPPING 0x02
-#define GSWIP_TABLE_MAC_BRIDGE 0x0b
-#define GSWIP_TABLE_MAC_BRIDGE_KEY3_FID GENMASK(5, 0) /* Filtering identifier */
-#define GSWIP_TABLE_MAC_BRIDGE_VAL0_PORT GENMASK(7, 4) /* Port on learned entries */
-#define GSWIP_TABLE_MAC_BRIDGE_VAL1_STATIC BIT(0) /* Static, non-aging entry */
-
-#define XRX200_GPHY_FW_ALIGN (16 * 1024)
-
-/* Maximum packet size supported by the switch. In theory this should be 10240,
- * but long packets currently cause lock-ups with an MTU of over 2526. Medium
- * packets are sometimes dropped (e.g. TCP over 2477, UDP over 2516-2519, ICMP
- * over 2526), hence an MTU value of 2400 seems safe. This issue only affects
- * packet reception. This is probably caused by the PPA engine, which is on the
- * RX part of the device. Packet transmission works properly up to 10240.
- */
-#define GSWIP_MAX_PACKET_LENGTH 2400
-
-struct gswip_hw_info {
- int max_ports;
- int cpu_port;
- const struct dsa_switch_ops *ops;
-};
-
struct xway_gphy_match_data {
char *fe_firmware_name;
char *ge_firmware_name;
};
-struct gswip_gphy_fw {
- struct clk *clk_gate;
- struct reset_control *reset;
- u32 fw_addr_offset;
- char *fw_name;
-};
-
-struct gswip_vlan {
- struct net_device *bridge;
- u16 vid;
- u8 fid;
-};
-
-struct gswip_priv {
- __iomem void *gswip;
- __iomem void *mdio;
- __iomem void *mii;
- const struct gswip_hw_info *hw_info;
- const struct xway_gphy_match_data *gphy_fw_name_cfg;
- struct dsa_switch *ds;
- struct device *dev;
- struct regmap *rcu_regmap;
- struct gswip_vlan vlans[64];
- int num_gphy_fw;
- struct gswip_gphy_fw *gphy_fw;
- u32 port_vlan_filter;
- struct mutex pce_table_lock;
-};
-
struct gswip_pce_table_entry {
u16 index; // PCE_TBL_ADDR.ADDR = pData->table_index
u16 table; // PCE_TBL_CTRL.ADDR = pData->table
@@ -426,15 +183,29 @@ static void gswip_mii_mask(struct gswip_priv *priv, u32 clear, u32 set,
static void gswip_mii_mask_cfg(struct gswip_priv *priv, u32 clear, u32 set,
int port)
{
- /* There's no MII_CFG register for the CPU port */
- if (!dsa_is_cpu_port(priv->ds, port))
- gswip_mii_mask(priv, clear, set, GSWIP_MII_CFGp(port));
+ int reg_port;
+
+ /* MII_CFG register only exists for MII ports */
+ if (!(priv->hw_info->mii_ports & BIT(port)))
+ return;
+
+ reg_port = port + priv->hw_info->mii_port_reg_offset;
+
+ gswip_mii_mask(priv, clear, set, GSWIP_MII_CFGp(reg_port));
}
static void gswip_mii_mask_pcdu(struct gswip_priv *priv, u32 clear, u32 set,
int port)
{
- switch (port) {
+ int reg_port;
+
+ /* MII_PCDU register only exists for MII ports */
+ if (!(priv->hw_info->mii_ports & BIT(port)))
+ return;
+
+ reg_port = port + priv->hw_info->mii_port_reg_offset;
+
+ switch (reg_port) {
case 0:
gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU0);
break;
@@ -515,6 +286,9 @@ static int gswip_mdio(struct gswip_priv *priv)
int err = 0;
mdio_np = of_get_compatible_child(switch_np, "lantiq,xrx200-mdio");
+ if (!mdio_np)
+ mdio_np = of_get_child_by_name(switch_np, "mdio");
+
if (!of_device_is_available(mdio_np))
goto out_put_node;
@@ -654,7 +428,6 @@ static int gswip_add_single_port_br(struct gswip_priv *priv, int port, bool add)
{
struct gswip_pce_table_entry vlan_active = {0,};
struct gswip_pce_table_entry vlan_mapping = {0,};
- unsigned int cpu_port = priv->hw_info->cpu_port;
int err;
vlan_active.index = port + 1;
@@ -674,7 +447,7 @@ static int gswip_add_single_port_br(struct gswip_priv *priv, int port, bool add)
vlan_mapping.index = port + 1;
vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
vlan_mapping.val[0] = 0 /* vid */;
- vlan_mapping.val[1] = BIT(port) | BIT(cpu_port);
+ vlan_mapping.val[1] = BIT(port) | dsa_cpu_ports(priv->ds);
vlan_mapping.val[2] = 0;
err = gswip_pce_table_entry_write(priv, &vlan_mapping);
if (err) {
@@ -685,18 +458,27 @@ static int gswip_add_single_port_br(struct gswip_priv *priv, int port, bool add)
return 0;
}
-static int gswip_port_enable(struct dsa_switch *ds, int port,
- struct phy_device *phydev)
+static int gswip_port_setup(struct dsa_switch *ds, int port)
{
struct gswip_priv *priv = ds->priv;
int err;
if (!dsa_is_cpu_port(ds, port)) {
- u32 mdio_phy = 0;
-
err = gswip_add_single_port_br(priv, port, true);
if (err)
return err;
+ }
+
+ return 0;
+}
+
+static int gswip_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phydev)
+{
+ struct gswip_priv *priv = ds->priv;
+
+ if (!dsa_is_cpu_port(ds, port)) {
+ u32 mdio_phy = 0;
if (phydev)
mdio_phy = phydev->mdio.addr & GSWIP_MDIO_PHY_ADDR_MASK;
@@ -738,15 +520,15 @@ static int gswip_pce_load_microcode(struct gswip_priv *priv)
GSWIP_PCE_TBL_CTRL_OPMOD_ADWR, GSWIP_PCE_TBL_CTRL);
gswip_switch_w(priv, 0, GSWIP_PCE_TBL_MASK);
- for (i = 0; i < ARRAY_SIZE(gswip_pce_microcode); i++) {
+ for (i = 0; i < priv->hw_info->pce_microcode_size; i++) {
gswip_switch_w(priv, i, GSWIP_PCE_TBL_ADDR);
- gswip_switch_w(priv, gswip_pce_microcode[i].val_0,
+ gswip_switch_w(priv, (*priv->hw_info->pce_microcode)[i].val_0,
GSWIP_PCE_TBL_VAL(0));
- gswip_switch_w(priv, gswip_pce_microcode[i].val_1,
+ gswip_switch_w(priv, (*priv->hw_info->pce_microcode)[i].val_1,
GSWIP_PCE_TBL_VAL(1));
- gswip_switch_w(priv, gswip_pce_microcode[i].val_2,
+ gswip_switch_w(priv, (*priv->hw_info->pce_microcode)[i].val_2,
GSWIP_PCE_TBL_VAL(2));
- gswip_switch_w(priv, gswip_pce_microcode[i].val_3,
+ gswip_switch_w(priv, (*priv->hw_info->pce_microcode)[i].val_3,
GSWIP_PCE_TBL_VAL(3));
/* start the table access: */
@@ -804,10 +586,10 @@ static int gswip_port_vlan_filtering(struct dsa_switch *ds, int port,
static int gswip_setup(struct dsa_switch *ds)
{
+ unsigned int cpu_ports = dsa_cpu_ports(ds);
struct gswip_priv *priv = ds->priv;
- unsigned int cpu_port = priv->hw_info->cpu_port;
- int i;
- int err;
+ struct dsa_port *cpu_dp;
+ int err, i;
gswip_switch_w(priv, GSWIP_SWRES_R0, GSWIP_SWRES);
usleep_range(5000, 10000);
@@ -829,9 +611,9 @@ static int gswip_setup(struct dsa_switch *ds)
}
/* Default unknown Broadcast/Multicast/Unicast port maps */
- gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP1);
- gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP2);
- gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP3);
+ gswip_switch_w(priv, cpu_ports, GSWIP_PCE_PMAP1);
+ gswip_switch_w(priv, cpu_ports, GSWIP_PCE_PMAP2);
+ gswip_switch_w(priv, cpu_ports, GSWIP_PCE_PMAP3);
/* Deactivate MDIO PHY auto polling. Some PHYs as the AR8030 have an
* interoperability problem with this auto polling mechanism because
@@ -854,19 +636,28 @@ static int gswip_setup(struct dsa_switch *ds)
/* Configure the MDIO Clock 2.5 MHz */
gswip_mdio_mask(priv, 0xff, 0x09, GSWIP_MDIO_MDC_CFG1);
+ /* bring up the mdio bus */
+ err = gswip_mdio(priv);
+ if (err) {
+ dev_err(priv->dev, "mdio bus setup failed\n");
+ return err;
+ }
+
/* Disable the xMII interface and clear it's isolation bit */
for (i = 0; i < priv->hw_info->max_ports; i++)
gswip_mii_mask_cfg(priv,
GSWIP_MII_CFG_EN | GSWIP_MII_CFG_ISOLATE,
0, i);
- /* enable special tag insertion on cpu port */
- gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_STEN,
- GSWIP_FDMA_PCTRLp(cpu_port));
+ dsa_switch_for_each_cpu_port(cpu_dp, ds) {
+ /* enable special tag insertion on cpu port */
+ gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_STEN,
+ GSWIP_FDMA_PCTRLp(cpu_dp->index));
- /* accept special tag in ingress direction */
- gswip_switch_mask(priv, 0, GSWIP_PCE_PCTRL_0_INGRESS,
- GSWIP_PCE_PCTRL_0p(cpu_port));
+ /* accept special tag in ingress direction */
+ gswip_switch_mask(priv, 0, GSWIP_PCE_PCTRL_0_INGRESS,
+ GSWIP_PCE_PCTRL_0p(cpu_dp->index));
+ }
gswip_switch_mask(priv, 0, GSWIP_BM_QUEUE_GCTRL_GL_MOD,
GSWIP_BM_QUEUE_GCTRL);
@@ -895,7 +686,9 @@ static enum dsa_tag_protocol gswip_get_tag_protocol(struct dsa_switch *ds,
int port,
enum dsa_tag_protocol mp)
{
- return DSA_TAG_PROTO_GSWIP;
+ struct gswip_priv *priv = ds->priv;
+
+ return priv->hw_info->tag_protocol;
}
static int gswip_vlan_active_create(struct gswip_priv *priv,
@@ -962,7 +755,6 @@ static int gswip_vlan_add_unaware(struct gswip_priv *priv,
{
struct gswip_pce_table_entry vlan_mapping = {0,};
unsigned int max_ports = priv->hw_info->max_ports;
- unsigned int cpu_port = priv->hw_info->cpu_port;
bool active_vlan_created = false;
int idx = -1;
int i;
@@ -1002,7 +794,7 @@ static int gswip_vlan_add_unaware(struct gswip_priv *priv,
}
/* Update the VLAN mapping entry and write it to the switch */
- vlan_mapping.val[1] |= BIT(cpu_port);
+ vlan_mapping.val[1] |= dsa_cpu_ports(priv->ds);
vlan_mapping.val[1] |= BIT(port);
err = gswip_pce_table_entry_write(priv, &vlan_mapping);
if (err) {
@@ -1024,7 +816,7 @@ static int gswip_vlan_add_aware(struct gswip_priv *priv,
{
struct gswip_pce_table_entry vlan_mapping = {0,};
unsigned int max_ports = priv->hw_info->max_ports;
- unsigned int cpu_port = priv->hw_info->cpu_port;
+ unsigned int cpu_ports = dsa_cpu_ports(priv->ds);
bool active_vlan_created = false;
int idx = -1;
int fid = -1;
@@ -1071,8 +863,8 @@ static int gswip_vlan_add_aware(struct gswip_priv *priv,
vlan_mapping.val[0] = vid;
/* Update the VLAN mapping entry and write it to the switch */
- vlan_mapping.val[1] |= BIT(cpu_port);
- vlan_mapping.val[2] |= BIT(cpu_port);
+ vlan_mapping.val[1] |= cpu_ports;
+ vlan_mapping.val[2] |= cpu_ports;
vlan_mapping.val[1] |= BIT(port);
if (untagged)
vlan_mapping.val[2] &= ~BIT(port);
@@ -1099,7 +891,6 @@ static int gswip_vlan_remove(struct gswip_priv *priv,
{
struct gswip_pce_table_entry vlan_mapping = {0,};
unsigned int max_ports = priv->hw_info->max_ports;
- unsigned int cpu_port = priv->hw_info->cpu_port;
int idx = -1;
int i;
int err;
@@ -1135,7 +926,7 @@ static int gswip_vlan_remove(struct gswip_priv *priv,
}
/* In case all ports are removed from the bridge, remove the VLAN */
- if ((vlan_mapping.val[1] & ~BIT(cpu_port)) == 0) {
+ if (!(vlan_mapping.val[1] & ~dsa_cpu_ports(priv->ds))) {
err = gswip_vlan_active_remove(priv, idx);
if (err) {
dev_err(priv->dev, "failed to write active VLAN: %d\n",
@@ -1359,8 +1150,9 @@ static int gswip_port_fdb(struct dsa_switch *ds, int port,
int i;
int err;
+ /* Operation not supported on the CPU port, don't throw errors */
if (!bridge)
- return -EINVAL;
+ return 0;
for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
if (priv->vlans[i].bridge == bridge) {
@@ -1554,6 +1346,14 @@ static void gswip_xrx300_phylink_get_caps(struct dsa_switch *ds, int port,
MAC_10 | MAC_100 | MAC_1000;
}
+static void gswip_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+{
+ struct gswip_priv *priv = ds->priv;
+
+ priv->hw_info->phylink_get_caps(ds, port, config);
+}
+
static void gswip_port_set_link(struct gswip_priv *priv, int port, bool link)
{
u32 mdio_phy;
@@ -1672,6 +1472,10 @@ static void gswip_phylink_mac_config(struct phylink_config *config,
miicfg |= GSWIP_MII_CFG_LDCLKDIS;
switch (state->interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ return;
case PHY_INTERFACE_MODE_MII:
case PHY_INTERFACE_MODE_INTERNAL:
miicfg |= GSWIP_MII_CFG_MODE_MIIM;
@@ -1820,38 +1624,29 @@ static int gswip_get_sset_count(struct dsa_switch *ds, int port, int sset)
return ARRAY_SIZE(gswip_rmon_cnt);
}
-static const struct phylink_mac_ops gswip_phylink_mac_ops = {
- .mac_config = gswip_phylink_mac_config,
- .mac_link_down = gswip_phylink_mac_link_down,
- .mac_link_up = gswip_phylink_mac_link_up,
-};
+static struct phylink_pcs *gswip_phylink_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct gswip_priv *priv = dp->ds->priv;
-static const struct dsa_switch_ops gswip_xrx200_switch_ops = {
- .get_tag_protocol = gswip_get_tag_protocol,
- .setup = gswip_setup,
- .port_enable = gswip_port_enable,
- .port_disable = gswip_port_disable,
- .port_bridge_join = gswip_port_bridge_join,
- .port_bridge_leave = gswip_port_bridge_leave,
- .port_fast_age = gswip_port_fast_age,
- .port_vlan_filtering = gswip_port_vlan_filtering,
- .port_vlan_add = gswip_port_vlan_add,
- .port_vlan_del = gswip_port_vlan_del,
- .port_stp_state_set = gswip_port_stp_state_set,
- .port_fdb_add = gswip_port_fdb_add,
- .port_fdb_del = gswip_port_fdb_del,
- .port_fdb_dump = gswip_port_fdb_dump,
- .port_change_mtu = gswip_port_change_mtu,
- .port_max_mtu = gswip_port_max_mtu,
- .phylink_get_caps = gswip_xrx200_phylink_get_caps,
- .get_strings = gswip_get_strings,
- .get_ethtool_stats = gswip_get_ethtool_stats,
- .get_sset_count = gswip_get_sset_count,
+ if (priv->hw_info->mac_select_pcs)
+ return priv->hw_info->mac_select_pcs(config, interface);
+
+ return NULL;
+}
+
+static const struct phylink_mac_ops gswip_phylink_mac_ops = {
+ .mac_config = gswip_phylink_mac_config,
+ .mac_link_down = gswip_phylink_mac_link_down,
+ .mac_link_up = gswip_phylink_mac_link_up,
+ .mac_select_pcs = gswip_phylink_mac_select_pcs,
};
-static const struct dsa_switch_ops gswip_xrx300_switch_ops = {
+static const struct dsa_switch_ops gswip_switch_ops = {
.get_tag_protocol = gswip_get_tag_protocol,
.setup = gswip_setup,
+ .port_setup = gswip_port_setup,
.port_enable = gswip_port_enable,
.port_disable = gswip_port_disable,
.port_bridge_join = gswip_port_bridge_join,
@@ -1866,7 +1661,7 @@ static const struct dsa_switch_ops gswip_xrx300_switch_ops = {
.port_fdb_dump = gswip_port_fdb_dump,
.port_change_mtu = gswip_port_change_mtu,
.port_max_mtu = gswip_port_max_mtu,
- .phylink_get_caps = gswip_xrx300_phylink_get_caps,
+ .phylink_get_caps = gswip_phylink_get_caps,
.get_strings = gswip_get_strings,
.get_ethtool_stats = gswip_get_ethtool_stats,
.get_sset_count = gswip_get_sset_count,
@@ -1935,8 +1730,7 @@ static int gswip_gphy_fw_load(struct gswip_priv *priv, struct gswip_gphy_fw *gph
memcpy(fw_addr, fw->data, fw->size);
} else {
release_firmware(fw);
- return dev_err_probe(dev, -ENOMEM,
- "failed to alloc firmware memory\n");
+ return -ENOMEM;
}
release_firmware(fw);
@@ -2093,6 +1887,30 @@ remove_gphy:
return err;
}
+static int gswip_validate_cpu_port(struct dsa_switch *ds)
+{
+ struct gswip_priv *priv = ds->priv;
+ struct dsa_port *cpu_dp;
+ int cpu_port = -1;
+
+ dsa_switch_for_each_cpu_port(cpu_dp, ds) {
+ if (cpu_port != -1)
+ return dev_err_probe(ds->dev, -EINVAL,
+ "only a single CPU port is supported\n");
+
+ cpu_port = cpu_dp->index;
+ }
+
+ if (cpu_port == -1)
+ return dev_err_probe(ds->dev, -EINVAL, "no CPU port defined\n");
+
+ if (BIT(cpu_port) & ~priv->hw_info->allowed_cpu_ports)
+ return dev_err_probe(ds->dev, -EINVAL,
+ "unsupported CPU port defined\n");
+
+ return 0;
+}
+
static int gswip_probe(struct platform_device *pdev)
{
struct device_node *np, *gphy_fw_np;
@@ -2129,12 +1947,22 @@ static int gswip_probe(struct platform_device *pdev)
priv->ds->dev = dev;
priv->ds->num_ports = priv->hw_info->max_ports;
priv->ds->priv = priv;
- priv->ds->ops = priv->hw_info->ops;
+ priv->ds->ops = &gswip_switch_ops;
priv->ds->phylink_mac_ops = &gswip_phylink_mac_ops;
priv->dev = dev;
mutex_init(&priv->pce_table_lock);
version = gswip_switch_r(priv, GSWIP_VERSION);
+ /* The hardware has the 'major/minor' version bytes in the wrong order
+ * preventing numerical comparisons. Construct a 16-bit unsigned integer
+ * having the REV field as most significant byte and the MOD field as
+ * least significant byte. This is effectively swapping the two bytes of
+ * the version variable, but other than using swab16 it doesn't affect
+ * the source variable.
+ */
+ priv->version = GSWIP_VERSION_REV(version) << 8 |
+ GSWIP_VERSION_MOD(version);
+
np = dev->of_node;
switch (version) {
case GSWIP_VERSION_2_0:
@@ -2163,30 +1991,20 @@ static int gswip_probe(struct platform_device *pdev)
"gphy fw probe failed\n");
}
- /* bring up the mdio bus */
- err = gswip_mdio(priv);
- if (err) {
- dev_err_probe(dev, err, "mdio probe failed\n");
- goto gphy_fw_remove;
- }
-
err = dsa_register_switch(priv->ds);
if (err) {
dev_err_probe(dev, err, "dsa switch registration failed\n");
goto gphy_fw_remove;
}
- if (!dsa_is_cpu_port(priv->ds, priv->hw_info->cpu_port)) {
- err = dev_err_probe(dev, -EINVAL,
- "wrong CPU port defined, HW only supports port: %i\n",
- priv->hw_info->cpu_port);
+
+ err = gswip_validate_cpu_port(priv->ds);
+ if (err)
goto disable_switch;
- }
platform_set_drvdata(pdev, priv);
dev_info(dev, "probed GSWIP version %lx mod %lx\n",
- (version & GSWIP_VERSION_REV_MASK) >> GSWIP_VERSION_REV_SHIFT,
- (version & GSWIP_VERSION_MOD_MASK) >> GSWIP_VERSION_MOD_SHIFT);
+ GSWIP_VERSION_REV(version), GSWIP_VERSION_MOD(version));
return 0;
disable_switch:
@@ -2229,14 +2047,24 @@ static void gswip_shutdown(struct platform_device *pdev)
static const struct gswip_hw_info gswip_xrx200 = {
.max_ports = 7,
- .cpu_port = 6,
- .ops = &gswip_xrx200_switch_ops,
+ .allowed_cpu_ports = BIT(6),
+ .mii_ports = BIT(0) | BIT(1) | BIT(5),
+ .mii_port_reg_offset = 0,
+ .phylink_get_caps = gswip_xrx200_phylink_get_caps,
+ .pce_microcode = &gswip_pce_microcode,
+ .pce_microcode_size = ARRAY_SIZE(gswip_pce_microcode),
+ .tag_protocol = DSA_TAG_PROTO_GSWIP,
};
static const struct gswip_hw_info gswip_xrx300 = {
.max_ports = 7,
- .cpu_port = 6,
- .ops = &gswip_xrx300_switch_ops,
+ .allowed_cpu_ports = BIT(6),
+ .mii_ports = BIT(0) | BIT(5),
+ .mii_port_reg_offset = 0,
+ .phylink_get_caps = gswip_xrx300_phylink_get_caps,
+ .pce_microcode = &gswip_pce_microcode,
+ .pce_microcode_size = ARRAY_SIZE(gswip_pce_microcode),
+ .tag_protocol = DSA_TAG_PROTO_GSWIP,
};
static const struct of_device_id gswip_of_match[] = {
diff --git a/drivers/net/dsa/lantiq/lantiq_gswip.h b/drivers/net/dsa/lantiq/lantiq_gswip.h
new file mode 100644
index 000000000000..2df9c8e8cfd0
--- /dev/null
+++ b/drivers/net/dsa/lantiq/lantiq_gswip.h
@@ -0,0 +1,276 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef __LANTIQ_GSWIP_H
+#define __LANTIQ_GSWIP_H
+
+#include <linux/clk.h>
+#include <linux/mutex.h>
+#include <linux/phylink.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/swab.h>
+#include <net/dsa.h>
+
+/* GSWIP MDIO Registers */
+#define GSWIP_MDIO_GLOB 0x00
+#define GSWIP_MDIO_GLOB_ENABLE BIT(15)
+#define GSWIP_MDIO_CTRL 0x08
+#define GSWIP_MDIO_CTRL_BUSY BIT(12)
+#define GSWIP_MDIO_CTRL_RD BIT(11)
+#define GSWIP_MDIO_CTRL_WR BIT(10)
+#define GSWIP_MDIO_CTRL_PHYAD_MASK 0x1f
+#define GSWIP_MDIO_CTRL_PHYAD_SHIFT 5
+#define GSWIP_MDIO_CTRL_REGAD_MASK 0x1f
+#define GSWIP_MDIO_READ 0x09
+#define GSWIP_MDIO_WRITE 0x0A
+#define GSWIP_MDIO_MDC_CFG0 0x0B
+#define GSWIP_MDIO_MDC_CFG1 0x0C
+#define GSWIP_MDIO_PHYp(p) (0x15 - (p))
+#define GSWIP_MDIO_PHY_LINK_MASK 0x6000
+#define GSWIP_MDIO_PHY_LINK_AUTO 0x0000
+#define GSWIP_MDIO_PHY_LINK_DOWN 0x4000
+#define GSWIP_MDIO_PHY_LINK_UP 0x2000
+#define GSWIP_MDIO_PHY_SPEED_MASK 0x1800
+#define GSWIP_MDIO_PHY_SPEED_AUTO 0x1800
+#define GSWIP_MDIO_PHY_SPEED_M10 0x0000
+#define GSWIP_MDIO_PHY_SPEED_M100 0x0800
+#define GSWIP_MDIO_PHY_SPEED_G1 0x1000
+#define GSWIP_MDIO_PHY_FDUP_MASK 0x0600
+#define GSWIP_MDIO_PHY_FDUP_AUTO 0x0000
+#define GSWIP_MDIO_PHY_FDUP_EN 0x0200
+#define GSWIP_MDIO_PHY_FDUP_DIS 0x0600
+#define GSWIP_MDIO_PHY_FCONTX_MASK 0x0180
+#define GSWIP_MDIO_PHY_FCONTX_AUTO 0x0000
+#define GSWIP_MDIO_PHY_FCONTX_EN 0x0100
+#define GSWIP_MDIO_PHY_FCONTX_DIS 0x0180
+#define GSWIP_MDIO_PHY_FCONRX_MASK 0x0060
+#define GSWIP_MDIO_PHY_FCONRX_AUTO 0x0000
+#define GSWIP_MDIO_PHY_FCONRX_EN 0x0020
+#define GSWIP_MDIO_PHY_FCONRX_DIS 0x0060
+#define GSWIP_MDIO_PHY_ADDR_MASK 0x001f
+#define GSWIP_MDIO_PHY_MASK (GSWIP_MDIO_PHY_ADDR_MASK | \
+ GSWIP_MDIO_PHY_FCONRX_MASK | \
+ GSWIP_MDIO_PHY_FCONTX_MASK | \
+ GSWIP_MDIO_PHY_LINK_MASK | \
+ GSWIP_MDIO_PHY_SPEED_MASK | \
+ GSWIP_MDIO_PHY_FDUP_MASK)
+
+/* GSWIP MII Registers */
+#define GSWIP_MII_CFGp(p) (0x2 * (p))
+#define GSWIP_MII_CFG_RESET BIT(15)
+#define GSWIP_MII_CFG_EN BIT(14)
+#define GSWIP_MII_CFG_ISOLATE BIT(13)
+#define GSWIP_MII_CFG_LDCLKDIS BIT(12)
+#define GSWIP_MII_CFG_RGMII_IBS BIT(8)
+#define GSWIP_MII_CFG_RMII_CLK BIT(7)
+#define GSWIP_MII_CFG_MODE_MIIP 0x0
+#define GSWIP_MII_CFG_MODE_MIIM 0x1
+#define GSWIP_MII_CFG_MODE_RMIIP 0x2
+#define GSWIP_MII_CFG_MODE_RMIIM 0x3
+#define GSWIP_MII_CFG_MODE_RGMII 0x4
+#define GSWIP_MII_CFG_MODE_GMII 0x9
+#define GSWIP_MII_CFG_MODE_MASK 0xf
+#define GSWIP_MII_CFG_RATE_M2P5 0x00
+#define GSWIP_MII_CFG_RATE_M25 0x10
+#define GSWIP_MII_CFG_RATE_M125 0x20
+#define GSWIP_MII_CFG_RATE_M50 0x30
+#define GSWIP_MII_CFG_RATE_AUTO 0x40
+#define GSWIP_MII_CFG_RATE_MASK 0x70
+#define GSWIP_MII_PCDU0 0x01
+#define GSWIP_MII_PCDU1 0x03
+#define GSWIP_MII_PCDU5 0x05
+#define GSWIP_MII_PCDU_TXDLY_MASK GENMASK(2, 0)
+#define GSWIP_MII_PCDU_RXDLY_MASK GENMASK(9, 7)
+
+/* GSWIP Core Registers */
+#define GSWIP_SWRES 0x000
+#define GSWIP_SWRES_R1 BIT(1) /* GSWIP Software reset */
+#define GSWIP_SWRES_R0 BIT(0) /* GSWIP Hardware reset */
+#define GSWIP_VERSION 0x013
+#define GSWIP_VERSION_REV_MASK GENMASK(7, 0)
+#define GSWIP_VERSION_MOD_MASK GENMASK(15, 8)
+#define GSWIP_VERSION_REV(v) FIELD_GET(GSWIP_VERSION_REV_MASK, v)
+#define GSWIP_VERSION_MOD(v) FIELD_GET(GSWIP_VERSION_MOD_MASK, v)
+#define GSWIP_VERSION_2_0 0x100
+#define GSWIP_VERSION_2_1 0x021
+#define GSWIP_VERSION_2_2 0x122
+#define GSWIP_VERSION_2_2_ETC 0x022
+/* The hardware has the 'major/minor' version bytes in the wrong order
+ * preventing numerical comparisons. Swap the bytes of the 16-bit value
+ * to end up with REV being the most significant byte and MOD being the
+ * least significant byte, which then allows comparing it with the
+ * value stored in struct gswip_priv.
+ */
+#define GSWIP_VERSION_GE(priv, ver) ((priv)->version >= swab16(ver))
+
+#define GSWIP_BM_RAM_VAL(x) (0x043 - (x))
+#define GSWIP_BM_RAM_ADDR 0x044
+#define GSWIP_BM_RAM_CTRL 0x045
+#define GSWIP_BM_RAM_CTRL_BAS BIT(15)
+#define GSWIP_BM_RAM_CTRL_OPMOD BIT(5)
+#define GSWIP_BM_RAM_CTRL_ADDR_MASK GENMASK(4, 0)
+#define GSWIP_BM_QUEUE_GCTRL 0x04A
+#define GSWIP_BM_QUEUE_GCTRL_GL_MOD BIT(10)
+/* buffer management Port Configuration Register */
+#define GSWIP_BM_PCFGp(p) (0x080 + ((p) * 2))
+#define GSWIP_BM_PCFG_CNTEN BIT(0) /* RMON Counter Enable */
+#define GSWIP_BM_PCFG_IGCNT BIT(1) /* Ingres Special Tag RMON count */
+/* buffer management Port Control Register */
+#define GSWIP_BM_RMON_CTRLp(p) (0x81 + ((p) * 2))
+#define GSWIP_BM_CTRL_RMON_RAM1_RES BIT(0) /* Software Reset for RMON RAM 1 */
+#define GSWIP_BM_CTRL_RMON_RAM2_RES BIT(1) /* Software Reset for RMON RAM 2 */
+
+/* PCE */
+#define GSWIP_PCE_TBL_KEY(x) (0x447 - (x))
+#define GSWIP_PCE_TBL_MASK 0x448
+#define GSWIP_PCE_TBL_VAL(x) (0x44D - (x))
+#define GSWIP_PCE_TBL_ADDR 0x44E
+#define GSWIP_PCE_TBL_CTRL 0x44F
+#define GSWIP_PCE_TBL_CTRL_BAS BIT(15)
+#define GSWIP_PCE_TBL_CTRL_TYPE BIT(13)
+#define GSWIP_PCE_TBL_CTRL_VLD BIT(12)
+#define GSWIP_PCE_TBL_CTRL_KEYFORM BIT(11)
+#define GSWIP_PCE_TBL_CTRL_GMAP_MASK GENMASK(10, 7)
+#define GSWIP_PCE_TBL_CTRL_OPMOD_MASK GENMASK(6, 5)
+#define GSWIP_PCE_TBL_CTRL_OPMOD_ADRD 0x00
+#define GSWIP_PCE_TBL_CTRL_OPMOD_ADWR 0x20
+#define GSWIP_PCE_TBL_CTRL_OPMOD_KSRD 0x40
+#define GSWIP_PCE_TBL_CTRL_OPMOD_KSWR 0x60
+#define GSWIP_PCE_TBL_CTRL_ADDR_MASK GENMASK(4, 0)
+#define GSWIP_PCE_PMAP1 0x453 /* Monitoring port map */
+#define GSWIP_PCE_PMAP2 0x454 /* Default Multicast port map */
+#define GSWIP_PCE_PMAP3 0x455 /* Default Unknown Unicast port map */
+#define GSWIP_PCE_GCTRL_0 0x456
+#define GSWIP_PCE_GCTRL_0_MTFL BIT(0) /* MAC Table Flushing */
+#define GSWIP_PCE_GCTRL_0_MC_VALID BIT(3)
+#define GSWIP_PCE_GCTRL_0_VLAN BIT(14) /* VLAN aware Switching */
+#define GSWIP_PCE_GCTRL_1 0x457
+#define GSWIP_PCE_GCTRL_1_MAC_GLOCK BIT(2) /* MAC Address table lock */
+#define GSWIP_PCE_GCTRL_1_MAC_GLOCK_MOD BIT(3) /* Mac address table lock forwarding mode */
+#define GSWIP_PCE_PCTRL_0p(p) (0x480 + ((p) * 0xA))
+#define GSWIP_PCE_PCTRL_0_TVM BIT(5) /* Transparent VLAN mode */
+#define GSWIP_PCE_PCTRL_0_VREP BIT(6) /* VLAN Replace Mode */
+#define GSWIP_PCE_PCTRL_0_INGRESS BIT(11) /* Accept special tag in ingress */
+#define GSWIP_PCE_PCTRL_0_PSTATE_LISTEN 0x0
+#define GSWIP_PCE_PCTRL_0_PSTATE_RX 0x1
+#define GSWIP_PCE_PCTRL_0_PSTATE_TX 0x2
+#define GSWIP_PCE_PCTRL_0_PSTATE_LEARNING 0x3
+#define GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING 0x7
+#define GSWIP_PCE_PCTRL_0_PSTATE_MASK GENMASK(2, 0)
+#define GSWIP_PCE_VCTRL(p) (0x485 + ((p) * 0xA))
+#define GSWIP_PCE_VCTRL_UVR BIT(0) /* Unknown VLAN Rule */
+#define GSWIP_PCE_VCTRL_VIMR BIT(3) /* VLAN Ingress Member violation rule */
+#define GSWIP_PCE_VCTRL_VEMR BIT(4) /* VLAN Egress Member violation rule */
+#define GSWIP_PCE_VCTRL_VSR BIT(5) /* VLAN Security */
+#define GSWIP_PCE_VCTRL_VID0 BIT(6) /* Priority Tagged Rule */
+#define GSWIP_PCE_DEFPVID(p) (0x486 + ((p) * 0xA))
+
+#define GSWIP_MAC_FLEN 0x8C5
+#define GSWIP_MAC_CTRL_0p(p) (0x903 + ((p) * 0xC))
+#define GSWIP_MAC_CTRL_0_PADEN BIT(8)
+#define GSWIP_MAC_CTRL_0_FCS_EN BIT(7)
+#define GSWIP_MAC_CTRL_0_FCON_MASK 0x0070
+#define GSWIP_MAC_CTRL_0_FCON_AUTO 0x0000
+#define GSWIP_MAC_CTRL_0_FCON_RX 0x0010
+#define GSWIP_MAC_CTRL_0_FCON_TX 0x0020
+#define GSWIP_MAC_CTRL_0_FCON_RXTX 0x0030
+#define GSWIP_MAC_CTRL_0_FCON_NONE 0x0040
+#define GSWIP_MAC_CTRL_0_FDUP_MASK 0x000C
+#define GSWIP_MAC_CTRL_0_FDUP_AUTO 0x0000
+#define GSWIP_MAC_CTRL_0_FDUP_EN 0x0004
+#define GSWIP_MAC_CTRL_0_FDUP_DIS 0x000C
+#define GSWIP_MAC_CTRL_0_GMII_MASK 0x0003
+#define GSWIP_MAC_CTRL_0_GMII_AUTO 0x0000
+#define GSWIP_MAC_CTRL_0_GMII_MII 0x0001
+#define GSWIP_MAC_CTRL_0_GMII_RGMII 0x0002
+#define GSWIP_MAC_CTRL_2p(p) (0x905 + ((p) * 0xC))
+#define GSWIP_MAC_CTRL_2_LCHKL BIT(2) /* Frame Length Check Long Enable */
+#define GSWIP_MAC_CTRL_2_MLEN BIT(3) /* Maximum Untagged Frame Lnegth */
+
+/* Ethernet Switch Fetch DMA Port Control Register */
+#define GSWIP_FDMA_PCTRLp(p) (0xA80 + ((p) * 0x6))
+#define GSWIP_FDMA_PCTRL_EN BIT(0) /* FDMA Port Enable */
+#define GSWIP_FDMA_PCTRL_STEN BIT(1) /* Special Tag Insertion Enable */
+#define GSWIP_FDMA_PCTRL_VLANMOD_MASK GENMASK(4, 3) /* VLAN Modification Control */
+#define GSWIP_FDMA_PCTRL_VLANMOD_SHIFT 3 /* VLAN Modification Control */
+#define GSWIP_FDMA_PCTRL_VLANMOD_DIS (0x0 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
+#define GSWIP_FDMA_PCTRL_VLANMOD_PRIO (0x1 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
+#define GSWIP_FDMA_PCTRL_VLANMOD_ID (0x2 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
+#define GSWIP_FDMA_PCTRL_VLANMOD_BOTH (0x3 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT)
+
+/* Ethernet Switch Store DMA Port Control Register */
+#define GSWIP_SDMA_PCTRLp(p) (0xBC0 + ((p) * 0x6))
+#define GSWIP_SDMA_PCTRL_EN BIT(0) /* SDMA Port Enable */
+#define GSWIP_SDMA_PCTRL_FCEN BIT(1) /* Flow Control Enable */
+#define GSWIP_SDMA_PCTRL_PAUFWD BIT(3) /* Pause Frame Forwarding */
+
+#define GSWIP_TABLE_ACTIVE_VLAN 0x01
+#define GSWIP_TABLE_VLAN_MAPPING 0x02
+#define GSWIP_TABLE_MAC_BRIDGE 0x0b
+#define GSWIP_TABLE_MAC_BRIDGE_KEY3_FID GENMASK(5, 0) /* Filtering identifier */
+#define GSWIP_TABLE_MAC_BRIDGE_VAL0_PORT GENMASK(7, 4) /* Port on learned entries */
+#define GSWIP_TABLE_MAC_BRIDGE_VAL1_STATIC BIT(0) /* Static, non-aging entry */
+
+#define XRX200_GPHY_FW_ALIGN (16 * 1024)
+
+/* Maximum packet size supported by the switch. In theory this should be 10240,
+ * but long packets currently cause lock-ups with an MTU of over 2526. Medium
+ * packets are sometimes dropped (e.g. TCP over 2477, UDP over 2516-2519, ICMP
+ * over 2526), hence an MTU value of 2400 seems safe. This issue only affects
+ * packet reception. This is probably caused by the PPA engine, which is on the
+ * RX part of the device. Packet transmission works properly up to 10240.
+ */
+#define GSWIP_MAX_PACKET_LENGTH 2400
+
+struct gswip_pce_microcode {
+ u16 val_3;
+ u16 val_2;
+ u16 val_1;
+ u16 val_0;
+};
+
+struct gswip_hw_info {
+ int max_ports;
+ unsigned int allowed_cpu_ports;
+ unsigned int mii_ports;
+ int mii_port_reg_offset;
+ const struct gswip_pce_microcode (*pce_microcode)[];
+ size_t pce_microcode_size;
+ enum dsa_tag_protocol tag_protocol;
+ void (*phylink_get_caps)(struct dsa_switch *ds, int port,
+ struct phylink_config *config);
+ struct phylink_pcs *(*mac_select_pcs)(struct phylink_config *config,
+ phy_interface_t interface);
+};
+
+struct gswip_gphy_fw {
+ struct clk *clk_gate;
+ struct reset_control *reset;
+ u32 fw_addr_offset;
+ char *fw_name;
+};
+
+struct gswip_vlan {
+ struct net_device *bridge;
+ u16 vid;
+ u8 fid;
+};
+
+struct gswip_priv {
+ __iomem void *gswip;
+ __iomem void *mdio;
+ __iomem void *mii;
+ const struct gswip_hw_info *hw_info;
+ const struct xway_gphy_match_data *gphy_fw_name_cfg;
+ struct dsa_switch *ds;
+ struct device *dev;
+ struct regmap *rcu_regmap;
+ struct gswip_vlan vlans[64];
+ int num_gphy_fw;
+ struct gswip_gphy_fw *gphy_fw;
+ u32 port_vlan_filter;
+ struct mutex pce_table_lock;
+ u16 version;
+};
+
+#endif /* __LANTIQ_GSWIP_H */
diff --git a/drivers/net/dsa/lantiq_pce.h b/drivers/net/dsa/lantiq/lantiq_pce.h
index e2be31f3672a..659f9a0638d9 100644
--- a/drivers/net/dsa/lantiq_pce.h
+++ b/drivers/net/dsa/lantiq/lantiq_pce.h
@@ -7,6 +7,8 @@
* Copyright (C) 2017 - 2018 Hauke Mehrtens <hauke@hauke-m.de>
*/
+#include "lantiq_gswip.h"
+
enum {
OUT_MAC0 = 0,
OUT_MAC1,
@@ -74,13 +76,6 @@ enum {
FLAG_NO, /*13*/
};
-struct gswip_pce_microcode {
- u16 val_3;
- u16 val_2;
- u16 val_1;
- u16 val_0;
-};
-
#define MC_ENTRY(val, msk, ns, out, len, type, flags, ipv4_len) \
{ val, msk, ((ns) << 10 | (out) << 4 | (len) >> 1),\
((len) & 1) << 15 | (type) << 13 | (flags) << 9 | (ipv4_len) << 8 }
diff --git a/drivers/net/dsa/microchip/ksz8.c b/drivers/net/dsa/microchip/ksz8.c
index 76e490070e9c..c354abdafc1b 100644
--- a/drivers/net/dsa/microchip/ksz8.c
+++ b/drivers/net/dsa/microchip/ksz8.c
@@ -36,15 +36,14 @@
static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set)
{
- regmap_update_bits(ksz_regmap_8(dev), addr, bits, set ? bits : 0);
+ ksz_rmw8(dev, addr, bits, set ? bits : 0);
}
static void ksz_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits,
bool set)
{
- regmap_update_bits(ksz_regmap_8(dev),
- dev->dev_ops->get_port_addr(port, offset),
- bits, set ? bits : 0);
+ ksz_rmw8(dev, dev->dev_ops->get_port_addr(port, offset), bits,
+ set ? bits : 0);
}
/**
@@ -1955,16 +1954,19 @@ int ksz8_setup(struct dsa_switch *ds)
ksz_cfg(dev, S_LINK_AGING_CTRL, SW_LINK_AUTO_AGING, true);
/* Enable aggressive back off algorithm in half duplex mode. */
- regmap_update_bits(ksz_regmap_8(dev), REG_SW_CTRL_1,
- SW_AGGR_BACKOFF, SW_AGGR_BACKOFF);
+ ret = ksz_rmw8(dev, REG_SW_CTRL_1, SW_AGGR_BACKOFF, SW_AGGR_BACKOFF);
+ if (ret)
+ return ret;
/*
* Make sure unicast VLAN boundary is set as default and
* enable no excessive collision drop.
*/
- regmap_update_bits(ksz_regmap_8(dev), REG_SW_CTRL_2,
- UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP,
- UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP);
+ ret = ksz_rmw8(dev, REG_SW_CTRL_2,
+ UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP,
+ UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP);
+ if (ret)
+ return ret;
ksz_cfg(dev, S_REPLACE_VID_CTRL, SW_REPLACE_VID, false);
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 7292bfe2f7ca..a962055bfdbd 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -23,6 +23,7 @@
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/micrel_phy.h>
+#include <linux/pinctrl/consumer.h>
#include <net/dsa.h>
#include <net/ieee8021q.h>
#include <net/pkt_cls.h>
@@ -1447,6 +1448,7 @@ static const struct regmap_range ksz8873_valid_regs[] = {
regmap_reg_range(0x3f, 0x3f),
/* advanced control registers */
+ regmap_reg_range(0x43, 0x43),
regmap_reg_range(0x60, 0x6f),
regmap_reg_range(0x70, 0x75),
regmap_reg_range(0x76, 0x78),
@@ -2456,6 +2458,12 @@ static void ksz_update_port_member(struct ksz_device *dev, int port)
dev->dev_ops->cfg_port_member(dev, i, val | cpu_port);
}
+ /* HSR ports are setup once so need to use the assigned membership
+ * when the port is enabled.
+ */
+ if (!port_member && p->stp_state == BR_STATE_FORWARDING &&
+ (dev->hsr_ports & BIT(port)))
+ port_member = dev->hsr_ports;
dev->dev_ops->cfg_port_member(dev, port, port_member | cpu_port);
}
@@ -5338,6 +5346,38 @@ static int ksz_parse_drive_strength(struct ksz_device *dev)
return 0;
}
+static int ksz8463_configure_straps_spi(struct ksz_device *dev)
+{
+ struct pinctrl *pinctrl;
+ struct gpio_desc *rxd0;
+ struct gpio_desc *rxd1;
+
+ rxd0 = devm_gpiod_get_index_optional(dev->dev, "straps-rxd", 0, GPIOD_OUT_LOW);
+ if (IS_ERR(rxd0))
+ return PTR_ERR(rxd0);
+
+ rxd1 = devm_gpiod_get_index_optional(dev->dev, "straps-rxd", 1, GPIOD_OUT_HIGH);
+ if (IS_ERR(rxd1))
+ return PTR_ERR(rxd1);
+
+ if (!rxd0 && !rxd1)
+ return 0;
+
+ if ((rxd0 && !rxd1) || (rxd1 && !rxd0))
+ return -EINVAL;
+
+ pinctrl = devm_pinctrl_get_select(dev->dev, "reset");
+ if (IS_ERR(pinctrl))
+ return PTR_ERR(pinctrl);
+
+ return 0;
+}
+
+static int ksz8463_release_straps_spi(struct ksz_device *dev)
+{
+ return pinctrl_select_default_state(dev->dev);
+}
+
int ksz_switch_register(struct ksz_device *dev)
{
const struct ksz_chip_data *info;
@@ -5353,10 +5393,22 @@ int ksz_switch_register(struct ksz_device *dev)
return PTR_ERR(dev->reset_gpio);
if (dev->reset_gpio) {
+ if (of_device_is_compatible(dev->dev->of_node, "microchip,ksz8463")) {
+ ret = ksz8463_configure_straps_spi(dev);
+ if (ret)
+ return ret;
+ }
+
gpiod_set_value_cansleep(dev->reset_gpio, 1);
usleep_range(10000, 12000);
gpiod_set_value_cansleep(dev->reset_gpio, 0);
msleep(100);
+
+ if (of_device_is_compatible(dev->dev->of_node, "microchip,ksz8463")) {
+ ret = ksz8463_release_straps_spi(dev);
+ if (ret)
+ return ret;
+ }
}
mutex_init(&dev->dev_mutex);
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index e5bed4237ff4..548b85befbf4 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -2187,7 +2187,7 @@ mt7530_setup_gpio(struct mt7530_priv *priv)
gc->direction_input = mt7530_gpio_direction_input;
gc->direction_output = mt7530_gpio_direction_output;
gc->get = mt7530_gpio_get;
- gc->set_rv = mt7530_gpio_set;
+ gc->set = mt7530_gpio_set;
gc->base = -1;
gc->ngpio = 15;
gc->can_sleep = true;
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 2281d6ab8c9a..b4d48997bf46 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -3965,6 +3965,8 @@ static void mv88e6xxx_teardown(struct dsa_switch *ds)
mv88e6xxx_teardown_devlink_params(ds);
dsa_devlink_resources_unregister(ds);
mv88e6xxx_teardown_devlink_regions_global(ds);
+ mv88e6xxx_hwtstamp_free(chip);
+ mv88e6xxx_ptp_free(chip);
mv88e6xxx_mdios_unregister(chip);
}
@@ -4105,7 +4107,7 @@ unlock:
mv88e6xxx_reg_unlock(chip);
if (err)
- goto out_mdios;
+ goto out_hwtstamp;
/* Have to be called without holding the register lock, since
* they take the devlink lock, and we later take the locks in
@@ -4114,7 +4116,7 @@ unlock:
*/
err = mv88e6xxx_setup_devlink_resources(ds);
if (err)
- goto out_mdios;
+ goto out_hwtstamp;
err = mv88e6xxx_setup_devlink_params(ds);
if (err)
@@ -4130,7 +4132,9 @@ out_params:
mv88e6xxx_teardown_devlink_params(ds);
out_resources:
dsa_devlink_resources_unregister(ds);
-out_mdios:
+out_hwtstamp:
+ mv88e6xxx_hwtstamp_free(chip);
+ mv88e6xxx_ptp_free(chip);
mv88e6xxx_mdios_unregister(chip);
return err;
@@ -5088,7 +5092,7 @@ static const struct mv88e6xxx_ops mv88e6250_ops = {
.vtu_getnext = mv88e6185_g1_vtu_getnext,
.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
.avb_ops = &mv88e6352_avb_ops,
- .ptp_ops = &mv88e6250_ptp_ops,
+ .ptp_ops = &mv88e6352_ptp_ops,
.phylink_get_caps = mv88e6250_phylink_get_caps,
.set_max_frame_size = mv88e6185_g1_set_max_frame_size,
};
@@ -7439,11 +7443,6 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev)
chip = ds->priv;
- if (chip->info->ptp_support) {
- mv88e6xxx_hwtstamp_free(chip);
- mv88e6xxx_ptp_free(chip);
- }
-
mv88e6xxx_unregister_switch(chip);
mv88e6xxx_g1_vtu_prob_irq_free(chip);
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index feddf505c918..2f211e55cb47 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -424,8 +424,6 @@ struct mv88e6xxx_chip {
struct ptp_clock_info ptp_clock_info;
struct delayed_work tai_event_work;
struct ptp_pin_desc pin_config[MV88E6XXX_MAX_GPIO];
- u16 trig_config;
- u16 evcap_config;
u16 enable_count;
/* Current ingress and egress monitor ports */
diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.c b/drivers/net/dsa/mv88e6xxx/hwtstamp.c
index f663799b0b3b..6e6472a3b75a 100644
--- a/drivers/net/dsa/mv88e6xxx/hwtstamp.c
+++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.c
@@ -570,7 +570,7 @@ int mv88e6xxx_hwtstamp_setup(struct mv88e6xxx_chip *chip)
}
/* Set the ethertype of L2 PTP messages */
- err = mv88e6xxx_ptp_write(chip, MV88E6XXX_PTP_GC_ETYPE, ETH_P_1588);
+ err = mv88e6xxx_ptp_write(chip, MV88E6XXX_PTP_ETHERTYPE, ETH_P_1588);
if (err)
return err;
diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.h b/drivers/net/dsa/mv88e6xxx/hwtstamp.h
index 22e4acc957f0..c359821d5a6e 100644
--- a/drivers/net/dsa/mv88e6xxx/hwtstamp.h
+++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.h
@@ -124,6 +124,7 @@ void mv88e6xxx_port_txtstamp(struct dsa_switch *ds, int port,
int mv88e6xxx_get_ts_info(struct dsa_switch *ds, int port,
struct kernel_ethtool_ts_info *info);
+long mv88e6xxx_hwtstamp_work(struct ptp_clock_info *ptp);
int mv88e6xxx_hwtstamp_setup(struct mv88e6xxx_chip *chip);
void mv88e6xxx_hwtstamp_free(struct mv88e6xxx_chip *chip);
int mv88e6352_hwtstamp_port_enable(struct mv88e6xxx_chip *chip, int port);
diff --git a/drivers/net/dsa/mv88e6xxx/leds.c b/drivers/net/dsa/mv88e6xxx/leds.c
index 1c88bfaea46b..ab3bc645da56 100644
--- a/drivers/net/dsa/mv88e6xxx/leds.c
+++ b/drivers/net/dsa/mv88e6xxx/leds.c
@@ -779,7 +779,8 @@ int mv88e6xxx_port_setup_leds(struct mv88e6xxx_chip *chip, int port)
continue;
if (led_num > 1) {
dev_err(dev, "invalid LED specified port %d\n", port);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_put_led;
}
if (led_num == 0)
@@ -823,17 +824,25 @@ int mv88e6xxx_port_setup_leds(struct mv88e6xxx_chip *chip, int port)
init_data.devname_mandatory = true;
init_data.devicename = kasprintf(GFP_KERNEL, "%s:0%d:0%d", chip->info->name,
port, led_num);
- if (!init_data.devicename)
- return -ENOMEM;
+ if (!init_data.devicename) {
+ ret = -ENOMEM;
+ goto err_put_led;
+ }
ret = devm_led_classdev_register_ext(dev, l, &init_data);
kfree(init_data.devicename);
if (ret) {
dev_err(dev, "Failed to init LED %d for port %d", led_num, port);
- return ret;
+ goto err_put_led;
}
}
+ fwnode_handle_put(leds);
return 0;
+
+err_put_led:
+ fwnode_handle_put(led);
+ fwnode_handle_put(leds);
+ return ret;
}
diff --git a/drivers/net/dsa/mv88e6xxx/ptp.c b/drivers/net/dsa/mv88e6xxx/ptp.c
index e8c9207e932e..f7603573d3a9 100644
--- a/drivers/net/dsa/mv88e6xxx/ptp.c
+++ b/drivers/net/dsa/mv88e6xxx/ptp.c
@@ -144,7 +144,7 @@ static u64 mv88e6352_ptp_clock_read(struct cyclecounter *cc)
u16 phc_time[2];
int err;
- err = mv88e6xxx_tai_read(chip, MV88E6XXX_TAI_TIME_LO, phc_time,
+ err = mv88e6xxx_tai_read(chip, MV88E6352_TAI_TIME_LO, phc_time,
ARRAY_SIZE(phc_time));
if (err)
return 0;
@@ -158,7 +158,7 @@ static u64 mv88e6165_ptp_clock_read(struct cyclecounter *cc)
u16 phc_time[2];
int err;
- err = mv88e6xxx_tai_read(chip, MV88E6XXX_PTP_GC_TIME_LO, phc_time,
+ err = mv88e6xxx_tai_read(chip, MV88E6165_PTP_GC_TIME_LO, phc_time,
ARRAY_SIZE(phc_time));
if (err)
return 0;
@@ -167,42 +167,26 @@ static u64 mv88e6165_ptp_clock_read(struct cyclecounter *cc)
}
/* mv88e6352_config_eventcap - configure TAI event capture
- * @event: PTP_CLOCK_PPS (internal) or PTP_CLOCK_EXTTS (external)
* @rising: zero for falling-edge trigger, else rising-edge trigger
*
* This will also reset the capture sequence counter.
*/
-static int mv88e6352_config_eventcap(struct mv88e6xxx_chip *chip, int event,
- int rising)
+static int mv88e6352_config_eventcap(struct mv88e6xxx_chip *chip, int rising)
{
- u16 global_config;
- u16 cap_config;
+ u16 evcap_config;
int err;
- chip->evcap_config = MV88E6XXX_TAI_CFG_CAP_OVERWRITE |
- MV88E6XXX_TAI_CFG_CAP_CTR_START;
+ evcap_config = MV88E6352_TAI_CFG_CAP_OVERWRITE |
+ MV88E6352_TAI_CFG_CAP_CTR_START;
if (!rising)
- chip->evcap_config |= MV88E6XXX_TAI_CFG_EVREQ_FALLING;
+ evcap_config |= MV88E6352_TAI_CFG_EVREQ_FALLING;
- global_config = (chip->evcap_config | chip->trig_config);
- err = mv88e6xxx_tai_write(chip, MV88E6XXX_TAI_CFG, global_config);
+ err = mv88e6xxx_tai_write(chip, MV88E6352_TAI_CFG, evcap_config);
if (err)
return err;
- if (event == PTP_CLOCK_PPS) {
- cap_config = MV88E6XXX_TAI_EVENT_STATUS_CAP_TRIG;
- } else if (event == PTP_CLOCK_EXTTS) {
- /* if STATUS_CAP_TRIG is unset we capture PTP_EVREQ events */
- cap_config = 0;
- } else {
- return -EINVAL;
- }
-
/* Write the capture config; this also clears the capture counter */
- err = mv88e6xxx_tai_write(chip, MV88E6XXX_TAI_EVENT_STATUS,
- cap_config);
-
- return err;
+ return mv88e6xxx_tai_write(chip, MV88E6352_TAI_EVENT_STATUS, 0);
}
static void mv88e6352_tai_event_work(struct work_struct *ugly)
@@ -215,7 +199,7 @@ static void mv88e6352_tai_event_work(struct work_struct *ugly)
int err;
mv88e6xxx_reg_lock(chip);
- err = mv88e6xxx_tai_read(chip, MV88E6XXX_TAI_EVENT_STATUS,
+ err = mv88e6xxx_tai_read(chip, MV88E6352_TAI_EVENT_STATUS,
status, ARRAY_SIZE(status));
mv88e6xxx_reg_unlock(chip);
@@ -223,19 +207,19 @@ static void mv88e6352_tai_event_work(struct work_struct *ugly)
dev_err(chip->dev, "failed to read TAI status register\n");
return;
}
- if (status[0] & MV88E6XXX_TAI_EVENT_STATUS_ERROR) {
+ if (status[0] & MV88E6352_TAI_EVENT_STATUS_ERROR) {
dev_warn(chip->dev, "missed event capture\n");
return;
}
- if (!(status[0] & MV88E6XXX_TAI_EVENT_STATUS_VALID))
+ if (!(status[0] & MV88E6352_TAI_EVENT_STATUS_VALID))
goto out;
raw_ts = ((u32)status[2] << 16) | status[1];
/* Clear the valid bit so the next timestamp can come in */
- status[0] &= ~MV88E6XXX_TAI_EVENT_STATUS_VALID;
+ status[0] &= ~MV88E6352_TAI_EVENT_STATUS_VALID;
mv88e6xxx_reg_lock(chip);
- err = mv88e6xxx_tai_write(chip, MV88E6XXX_TAI_EVENT_STATUS, status[0]);
+ err = mv88e6xxx_tai_write(chip, MV88E6352_TAI_EVENT_STATUS, status[0]);
mv88e6xxx_reg_unlock(chip);
if (err) {
dev_err(chip->dev, "failed to write TAI status register\n");
@@ -355,7 +339,7 @@ static int mv88e6352_ptp_enable_extts(struct mv88e6xxx_chip *chip,
schedule_delayed_work(&chip->tai_event_work,
TAI_EVENT_WORK_INTERVAL);
- err = mv88e6352_config_eventcap(chip, PTP_CLOCK_EXTTS, rising);
+ err = mv88e6352_config_eventcap(chip, rising);
} else {
func = MV88E6352_G2_SCRATCH_GPIO_PCTL_GPIO;
@@ -413,29 +397,6 @@ const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops = {
(1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ),
};
-const struct mv88e6xxx_ptp_ops mv88e6250_ptp_ops = {
- .clock_read = mv88e6352_ptp_clock_read,
- .ptp_enable = mv88e6352_ptp_enable,
- .ptp_verify = mv88e6352_ptp_verify,
- .event_work = mv88e6352_tai_event_work,
- .port_enable = mv88e6352_hwtstamp_port_enable,
- .port_disable = mv88e6352_hwtstamp_port_disable,
- .n_ext_ts = 1,
- .arr0_sts_reg = MV88E6XXX_PORT_PTP_ARR0_STS,
- .arr1_sts_reg = MV88E6XXX_PORT_PTP_ARR1_STS,
- .dep_sts_reg = MV88E6XXX_PORT_PTP_DEP_STS,
- .rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ),
-};
-
const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = {
.clock_read = mv88e6352_ptp_clock_read,
.ptp_enable = mv88e6352_ptp_enable,
@@ -590,6 +551,7 @@ int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip)
return 0;
}
+/* This must never be called holding the register lock */
void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip)
{
if (chip->ptp_clock) {
diff --git a/drivers/net/dsa/mv88e6xxx/ptp.h b/drivers/net/dsa/mv88e6xxx/ptp.h
index 6c4d09adc93c..95bdddb0bf39 100644
--- a/drivers/net/dsa/mv88e6xxx/ptp.h
+++ b/drivers/net/dsa/mv88e6xxx/ptp.h
@@ -16,132 +16,56 @@
#include "chip.h"
/* Offset 0x00: TAI Global Config */
-#define MV88E6XXX_TAI_CFG 0x00
-#define MV88E6XXX_TAI_CFG_CAP_OVERWRITE 0x8000
-#define MV88E6XXX_TAI_CFG_CAP_CTR_START 0x4000
-#define MV88E6XXX_TAI_CFG_EVREQ_FALLING 0x2000
-#define MV88E6XXX_TAI_CFG_TRIG_ACTIVE_LO 0x1000
-#define MV88E6XXX_TAI_CFG_IRL_ENABLE 0x0400
-#define MV88E6XXX_TAI_CFG_TRIG_IRQ_EN 0x0200
-#define MV88E6XXX_TAI_CFG_EVREQ_IRQ_EN 0x0100
-#define MV88E6XXX_TAI_CFG_TRIG_LOCK 0x0080
-#define MV88E6XXX_TAI_CFG_BLOCK_UPDATE 0x0008
-#define MV88E6XXX_TAI_CFG_MULTI_PTP 0x0004
-#define MV88E6XXX_TAI_CFG_TRIG_MODE_ONESHOT 0x0002
-#define MV88E6XXX_TAI_CFG_TRIG_ENABLE 0x0001
+#define MV88E6352_TAI_CFG 0x00
+#define MV88E6352_TAI_CFG_CAP_OVERWRITE 0x8000
+#define MV88E6352_TAI_CFG_CAP_CTR_START 0x4000
+#define MV88E6352_TAI_CFG_EVREQ_FALLING 0x2000
+#define MV88E6352_TAI_CFG_TRIG_ACTIVE_LO 0x1000
+#define MV88E6352_TAI_CFG_IRL_ENABLE 0x0400
+#define MV88E6352_TAI_CFG_TRIG_IRQ_EN 0x0200
+#define MV88E6352_TAI_CFG_EVREQ_IRQ_EN 0x0100
+#define MV88E6352_TAI_CFG_TRIG_LOCK 0x0080
+#define MV88E6352_TAI_CFG_BLOCK_UPDATE 0x0008
+#define MV88E6352_TAI_CFG_MULTI_PTP 0x0004
+#define MV88E6352_TAI_CFG_TRIG_MODE_ONESHOT 0x0002
+#define MV88E6352_TAI_CFG_TRIG_ENABLE 0x0001
/* Offset 0x01: Timestamp Clock Period (ps) */
#define MV88E6XXX_TAI_CLOCK_PERIOD 0x01
-/* Offset 0x02/0x03: Trigger Generation Amount */
-#define MV88E6XXX_TAI_TRIG_GEN_AMOUNT_LO 0x02
-#define MV88E6XXX_TAI_TRIG_GEN_AMOUNT_HI 0x03
-
-/* Offset 0x04: Clock Compensation */
-#define MV88E6XXX_TAI_TRIG_CLOCK_COMP 0x04
-
-/* Offset 0x05: Trigger Configuration */
-#define MV88E6XXX_TAI_TRIG_CFG 0x05
-
-/* Offset 0x06: Ingress Rate Limiter Clock Generation Amount */
-#define MV88E6XXX_TAI_IRL_AMOUNT 0x06
-
-/* Offset 0x07: Ingress Rate Limiter Compensation */
-#define MV88E6XXX_TAI_IRL_COMP 0x07
-
-/* Offset 0x08: Ingress Rate Limiter Compensation */
-#define MV88E6XXX_TAI_IRL_COMP_PS 0x08
-
/* Offset 0x09: Event Status */
-#define MV88E6XXX_TAI_EVENT_STATUS 0x09
-#define MV88E6XXX_TAI_EVENT_STATUS_CAP_TRIG 0x4000
-#define MV88E6XXX_TAI_EVENT_STATUS_ERROR 0x0200
-#define MV88E6XXX_TAI_EVENT_STATUS_VALID 0x0100
-#define MV88E6XXX_TAI_EVENT_STATUS_CTR_MASK 0x00ff
-
-/* Offset 0x0A/0x0B: Event Time */
-#define MV88E6XXX_TAI_EVENT_TIME_LO 0x0a
-#define MV88E6XXX_TAI_EVENT_TYPE_HI 0x0b
+#define MV88E6352_TAI_EVENT_STATUS 0x09
+#define MV88E6352_TAI_EVENT_STATUS_ERROR 0x0200
+#define MV88E6352_TAI_EVENT_STATUS_VALID 0x0100
+#define MV88E6352_TAI_EVENT_STATUS_CTR_MASK 0x00ff
+/* Offset 0x0A/0x0B: Event Time Lo/Hi. Always read with Event Status. */
/* Offset 0x0E/0x0F: PTP Global Time */
-#define MV88E6XXX_TAI_TIME_LO 0x0e
-#define MV88E6XXX_TAI_TIME_HI 0x0f
-
-/* Offset 0x10/0x11: Trig Generation Time */
-#define MV88E6XXX_TAI_TRIG_TIME_LO 0x10
-#define MV88E6XXX_TAI_TRIG_TIME_HI 0x11
-
-/* Offset 0x12: Lock Status */
-#define MV88E6XXX_TAI_LOCK_STATUS 0x12
-
-/* Offset 0x00: Ether Type */
-#define MV88E6XXX_PTP_GC_ETYPE 0x00
+#define MV88E6352_TAI_TIME_LO 0x0e
+#define MV88E6352_TAI_TIME_HI 0x0f
/* 6165 Global Control Registers */
-/* Offset 0x00: Ether Type */
-#define MV88E6XXX_PTP_GC_ETYPE 0x00
-
-/* Offset 0x01: Message ID */
-#define MV88E6XXX_PTP_GC_MESSAGE_ID 0x01
-
-/* Offset 0x02: Time Stamp Arrive Time */
-#define MV88E6XXX_PTP_GC_TS_ARR_PTR 0x02
-
-/* Offset 0x03: Port Arrival Interrupt Enable */
-#define MV88E6XXX_PTP_GC_PORT_ARR_INT_EN 0x03
-
-/* Offset 0x04: Port Departure Interrupt Enable */
-#define MV88E6XXX_PTP_GC_PORT_DEP_INT_EN 0x04
-
-/* Offset 0x05: Configuration */
-#define MV88E6XXX_PTP_GC_CONFIG 0x05
-#define MV88E6XXX_PTP_GC_CONFIG_DIS_OVERWRITE BIT(1)
-#define MV88E6XXX_PTP_GC_CONFIG_DIS_TS BIT(0)
-
-/* Offset 0x8: Interrupt Status */
-#define MV88E6XXX_PTP_GC_INT_STATUS 0x08
-
/* Offset 0x9/0xa: Global Time */
-#define MV88E6XXX_PTP_GC_TIME_LO 0x09
-#define MV88E6XXX_PTP_GC_TIME_HI 0x0A
+#define MV88E6165_PTP_GC_TIME_LO 0x09
+#define MV88E6165_PTP_GC_TIME_HI 0x0A
-/* 6165 Per Port Registers */
+/* 6165 Per Port Registers. The arrival and departure registers are a
+ * common block consisting of status, two time registers and the sequence ID
+ */
/* Offset 0: Arrival Time 0 Status */
#define MV88E6165_PORT_PTP_ARR0_STS 0x00
-/* Offset 0x01/0x02: PTP Arrival 0 Time */
-#define MV88E6165_PORT_PTP_ARR0_TIME_LO 0x01
-#define MV88E6165_PORT_PTP_ARR0_TIME_HI 0x02
-
-/* Offset 0x03: PTP Arrival 0 Sequence ID */
-#define MV88E6165_PORT_PTP_ARR0_SEQID 0x03
-
/* Offset 0x04: PTP Arrival 1 Status */
#define MV88E6165_PORT_PTP_ARR1_STS 0x04
-/* Offset 0x05/0x6E: PTP Arrival 1 Time */
-#define MV88E6165_PORT_PTP_ARR1_TIME_LO 0x05
-#define MV88E6165_PORT_PTP_ARR1_TIME_HI 0x06
-
-/* Offset 0x07: PTP Arrival 1 Sequence ID */
-#define MV88E6165_PORT_PTP_ARR1_SEQID 0x07
-
/* Offset 0x08: PTP Departure Status */
#define MV88E6165_PORT_PTP_DEP_STS 0x08
-/* Offset 0x09/0x0a: PTP Deperture Time */
-#define MV88E6165_PORT_PTP_DEP_TIME_LO 0x09
-#define MV88E6165_PORT_PTP_DEP_TIME_HI 0x0a
-
-/* Offset 0x0b: PTP Departure Sequence ID */
-#define MV88E6165_PORT_PTP_DEP_SEQID 0x0b
-
/* Offset 0x0d: Port Status */
#define MV88E6164_PORT_STATUS 0x0d
#ifdef CONFIG_NET_DSA_MV88E6XXX_PTP
-long mv88e6xxx_hwtstamp_work(struct ptp_clock_info *ptp);
int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip);
void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip);
@@ -149,17 +73,11 @@ void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip);
ptp_clock_info)
extern const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops;
-extern const struct mv88e6xxx_ptp_ops mv88e6250_ptp_ops;
extern const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops;
extern const struct mv88e6xxx_ptp_ops mv88e6390_ptp_ops;
#else /* !CONFIG_NET_DSA_MV88E6XXX_PTP */
-static inline long mv88e6xxx_hwtstamp_work(struct ptp_clock_info *ptp)
-{
- return -1;
-}
-
static inline int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip)
{
return 0;
@@ -170,7 +88,6 @@ static inline void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip)
}
static const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops = {};
-static const struct mv88e6xxx_ptp_ops mv88e6250_ptp_ops = {};
static const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = {};
static const struct mv88e6xxx_ptp_ops mv88e6390_ptp_ops = {};
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index 2dd4e56e1cf1..20ab558fde24 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -1153,6 +1153,9 @@ static void felix_phylink_get_caps(struct dsa_switch *ds, int port,
__set_bit(ocelot->ports[port]->phy_mode,
config->supported_interfaces);
+ if (ocelot->ports[port]->phy_mode == PHY_INTERFACE_MODE_USXGMII)
+ __set_bit(PHY_INTERFACE_MODE_10G_QXGMII,
+ config->supported_interfaces);
}
static void felix_phylink_mac_config(struct phylink_config *config,
@@ -1359,6 +1362,7 @@ static const u32 felix_phy_match_table[PHY_INTERFACE_MODE_MAX] = {
[PHY_INTERFACE_MODE_SGMII] = OCELOT_PORT_MODE_SGMII,
[PHY_INTERFACE_MODE_QSGMII] = OCELOT_PORT_MODE_QSGMII,
[PHY_INTERFACE_MODE_USXGMII] = OCELOT_PORT_MODE_USXGMII,
+ [PHY_INTERFACE_MODE_10G_QXGMII] = OCELOT_PORT_MODE_10G_QXGMII,
[PHY_INTERFACE_MODE_1000BASEX] = OCELOT_PORT_MODE_1000BASEX,
[PHY_INTERFACE_MODE_2500BASEX] = OCELOT_PORT_MODE_2500BASEX,
};
diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h
index 211991f494e3..a657b190c5d7 100644
--- a/drivers/net/dsa/ocelot/felix.h
+++ b/drivers/net/dsa/ocelot/felix.h
@@ -12,8 +12,9 @@
#define OCELOT_PORT_MODE_SGMII BIT(1)
#define OCELOT_PORT_MODE_QSGMII BIT(2)
#define OCELOT_PORT_MODE_2500BASEX BIT(3)
-#define OCELOT_PORT_MODE_USXGMII BIT(4)
+#define OCELOT_PORT_MODE_USXGMII BIT(4) /* compatibility */
#define OCELOT_PORT_MODE_1000BASEX BIT(5)
+#define OCELOT_PORT_MODE_10G_QXGMII BIT(6)
struct device_node;
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index 7b35d24c38d7..8cf4c8986587 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -34,7 +34,8 @@
OCELOT_PORT_MODE_QSGMII | \
OCELOT_PORT_MODE_1000BASEX | \
OCELOT_PORT_MODE_2500BASEX | \
- OCELOT_PORT_MODE_USXGMII)
+ OCELOT_PORT_MODE_USXGMII | \
+ OCELOT_PORT_MODE_10G_QXGMII)
static const u32 vsc9959_port_modes[VSC9959_NUM_PORTS] = {
VSC9959_PORT_MODE_SERDES,
diff --git a/drivers/net/dsa/realtek/realtek.h b/drivers/net/dsa/realtek/realtek.h
index a1b2e0b529d5..c03485a80d93 100644
--- a/drivers/net/dsa/realtek/realtek.h
+++ b/drivers/net/dsa/realtek/realtek.h
@@ -19,9 +19,6 @@
struct phylink_mac_ops;
struct realtek_ops;
-struct dentry;
-struct inode;
-struct file;
struct rtl8366_mib_counter {
unsigned int base;
diff --git a/drivers/net/dsa/vitesse-vsc73xx-core.c b/drivers/net/dsa/vitesse-vsc73xx-core.c
index 4f9687ab3b2b..9d31b8258268 100644
--- a/drivers/net/dsa/vitesse-vsc73xx-core.c
+++ b/drivers/net/dsa/vitesse-vsc73xx-core.c
@@ -2317,7 +2317,7 @@ static int vsc73xx_gpio_probe(struct vsc73xx *vsc)
vsc->gc.parent = vsc->dev;
vsc->gc.base = -1;
vsc->gc.get = vsc73xx_gpio_get;
- vsc->gc.set_rv = vsc73xx_gpio_set;
+ vsc->gc.set = vsc73xx_gpio_set;
vsc->gc.direction_input = vsc73xx_gpio_direction_input;
vsc->gc.direction_output = vsc73xx_gpio_direction_output;
vsc->gc.get_direction = vsc73xx_gpio_get_direction;
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index f86d4557d8d7..aead145dd91d 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -188,6 +188,7 @@ source "drivers/net/ethernet/sis/Kconfig"
source "drivers/net/ethernet/sfc/Kconfig"
source "drivers/net/ethernet/smsc/Kconfig"
source "drivers/net/ethernet/socionext/Kconfig"
+source "drivers/net/ethernet/spacemit/Kconfig"
source "drivers/net/ethernet/stmicro/Kconfig"
source "drivers/net/ethernet/sun/Kconfig"
source "drivers/net/ethernet/sunplus/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 67182339469a..998dd628b202 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -91,6 +91,7 @@ obj-$(CONFIG_NET_VENDOR_SOLARFLARE) += sfc/
obj-$(CONFIG_NET_VENDOR_SGI) += sgi/
obj-$(CONFIG_NET_VENDOR_SMSC) += smsc/
obj-$(CONFIG_NET_VENDOR_SOCIONEXT) += socionext/
+obj-$(CONFIG_NET_VENDOR_SPACEMIT) += spacemit/
obj-$(CONFIG_NET_VENDOR_STMICRO) += stmicro/
obj-$(CONFIG_NET_VENDOR_SUN) += sun/
obj-$(CONFIG_NET_VENDOR_SUNPLUS) += sunplus/
diff --git a/drivers/net/ethernet/airoha/airoha_eth.c b/drivers/net/ethernet/airoha/airoha_eth.c
index e6b802e3d844..833dd911980b 100644
--- a/drivers/net/ethernet/airoha/airoha_eth.c
+++ b/drivers/net/ethernet/airoha/airoha_eth.c
@@ -698,7 +698,8 @@ static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
reason = FIELD_GET(AIROHA_RXD4_PPE_CPU_REASON, msg1);
if (reason == PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
- airoha_ppe_check_skb(eth->ppe, q->skb, hash);
+ airoha_ppe_check_skb(&eth->ppe->dev, q->skb, hash,
+ false);
done++;
napi_gro_receive(&q->napi, q->skb);
@@ -1709,7 +1710,9 @@ static void airhoha_set_gdm2_loopback(struct airoha_gdm_port *port)
airoha_fe_wr(eth, REG_GDM_RXCHN_EN(2), 0xffff);
airoha_fe_rmw(eth, REG_GDM_LPBK_CFG(2),
LPBK_CHAN_MASK | LPBK_MODE_MASK | LPBK_EN_MASK,
- FIELD_PREP(LPBK_CHAN_MASK, chan) | LPBK_EN_MASK);
+ FIELD_PREP(LPBK_CHAN_MASK, chan) |
+ LBK_GAP_MODE_MASK | LBK_LEN_MODE_MASK |
+ LBK_CHAN_MODE_MASK | LPBK_EN_MASK);
airoha_fe_rmw(eth, REG_GDM_LEN_CFG(2),
GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK,
FIELD_PREP(GDM_SHORT_LEN_MASK, 60) |
@@ -2599,13 +2602,15 @@ static int airoha_dev_setup_tc_block_cb(enum tc_setup_type type,
void *type_data, void *cb_priv)
{
struct net_device *dev = cb_priv;
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ struct airoha_eth *eth = port->qdma->eth;
if (!tc_can_offload(dev))
return -EOPNOTSUPP;
switch (type) {
case TC_SETUP_CLSFLOWER:
- return airoha_ppe_setup_tc_block_cb(dev, type_data);
+ return airoha_ppe_setup_tc_block_cb(&eth->ppe->dev, type_data);
case TC_SETUP_CLSMATCHALL:
return airoha_dev_tc_matchall(dev, type_data);
default:
diff --git a/drivers/net/ethernet/airoha/airoha_eth.h b/drivers/net/ethernet/airoha/airoha_eth.h
index a970b789cf23..cd13c1c1224f 100644
--- a/drivers/net/ethernet/airoha/airoha_eth.h
+++ b/drivers/net/ethernet/airoha/airoha_eth.h
@@ -13,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/reset.h>
+#include <linux/soc/airoha/airoha_offload.h>
#include <net/dsa.h>
#define AIROHA_MAX_NUM_GDM_PORTS 4
@@ -229,10 +230,6 @@ struct airoha_hw_stats {
};
enum {
- PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED = 0x0f,
-};
-
-enum {
AIROHA_FOE_STATE_INVALID,
AIROHA_FOE_STATE_UNBIND,
AIROHA_FOE_STATE_BIND,
@@ -252,6 +249,10 @@ enum {
#define AIROHA_FOE_MAC_SMAC_ID GENMASK(20, 16)
#define AIROHA_FOE_MAC_PPPOE_ID GENMASK(15, 0)
+#define AIROHA_FOE_MAC_WDMA_QOS GENMASK(15, 12)
+#define AIROHA_FOE_MAC_WDMA_BAND BIT(11)
+#define AIROHA_FOE_MAC_WDMA_WCID GENMASK(10, 0)
+
struct airoha_foe_mac_info_common {
u16 vlan1;
u16 etype;
@@ -470,7 +471,6 @@ struct airoha_flow_table_entry {
};
};
- struct airoha_foe_entry data;
struct hlist_node l2_subflow_node; /* PPE L2 subflow entry */
u32 hash;
@@ -479,6 +479,16 @@ struct airoha_flow_table_entry {
struct rhash_head node;
unsigned long cookie;
+
+ /* Must be last --ends in a flexible-array member. */
+ struct airoha_foe_entry data;
+};
+
+struct airoha_wdma_info {
+ u8 idx;
+ u8 queue;
+ u16 wcid;
+ u8 bss;
};
/* RX queue to IRQ mapping: BIT(q) in IRQ(n) */
@@ -535,6 +545,7 @@ struct airoha_gdm_port {
#define AIROHA_RXD4_FOE_ENTRY GENMASK(15, 0)
struct airoha_ppe {
+ struct airoha_ppe_dev dev;
struct airoha_eth *eth;
void *foe;
@@ -609,9 +620,9 @@ static inline bool airhoa_is_lan_gdm_port(struct airoha_gdm_port *port)
bool airoha_is_valid_gdm_port(struct airoha_eth *eth,
struct airoha_gdm_port *port);
-void airoha_ppe_check_skb(struct airoha_ppe *ppe, struct sk_buff *skb,
- u16 hash);
-int airoha_ppe_setup_tc_block_cb(struct net_device *dev, void *type_data);
+void airoha_ppe_check_skb(struct airoha_ppe_dev *dev, struct sk_buff *skb,
+ u16 hash, bool rx_wlan);
+int airoha_ppe_setup_tc_block_cb(struct airoha_ppe_dev *dev, void *type_data);
int airoha_ppe_init(struct airoha_eth *eth);
void airoha_ppe_deinit(struct airoha_eth *eth);
void airoha_ppe_init_upd_mem(struct airoha_gdm_port *port);
diff --git a/drivers/net/ethernet/airoha/airoha_npu.c b/drivers/net/ethernet/airoha/airoha_npu.c
index 9ab964c536e1..8c883f2b2d36 100644
--- a/drivers/net/ethernet/airoha/airoha_npu.c
+++ b/drivers/net/ethernet/airoha/airoha_npu.c
@@ -13,7 +13,6 @@
#include <linux/regmap.h>
#include "airoha_eth.h"
-#include "airoha_npu.h"
#define NPU_EN7581_FIRMWARE_DATA "airoha/en7581_npu_data.bin"
#define NPU_EN7581_FIRMWARE_RV32 "airoha/en7581_npu_rv32.bin"
@@ -42,6 +41,22 @@
#define REG_CR_MBQ8_CTRL(_n) (NPU_MBOX_BASE_ADDR + 0x0b0 + ((_n) << 2))
#define REG_CR_NPU_MIB(_n) (NPU_MBOX_BASE_ADDR + 0x140 + ((_n) << 2))
+#define NPU_WLAN_BASE_ADDR 0x30d000
+
+#define REG_IRQ_STATUS (NPU_WLAN_BASE_ADDR + 0x030)
+#define REG_IRQ_RXDONE(_n) (NPU_WLAN_BASE_ADDR + ((_n) << 2) + 0x034)
+#define NPU_IRQ_RX_MASK(_n) ((_n) == 1 ? BIT(17) : BIT(16))
+
+#define REG_TX_BASE(_n) (NPU_WLAN_BASE_ADDR + ((_n) << 4) + 0x080)
+#define REG_TX_DSCP_NUM(_n) (NPU_WLAN_BASE_ADDR + ((_n) << 4) + 0x084)
+#define REG_TX_CPU_IDX(_n) (NPU_WLAN_BASE_ADDR + ((_n) << 4) + 0x088)
+#define REG_TX_DMA_IDX(_n) (NPU_WLAN_BASE_ADDR + ((_n) << 4) + 0x08c)
+
+#define REG_RX_BASE(_n) (NPU_WLAN_BASE_ADDR + ((_n) << 4) + 0x180)
+#define REG_RX_DSCP_NUM(_n) (NPU_WLAN_BASE_ADDR + ((_n) << 4) + 0x184)
+#define REG_RX_CPU_IDX(_n) (NPU_WLAN_BASE_ADDR + ((_n) << 4) + 0x188)
+#define REG_RX_DMA_IDX(_n) (NPU_WLAN_BASE_ADDR + ((_n) << 4) + 0x18c)
+
#define NPU_TIMER_BASE_ADDR 0x310100
#define REG_WDT_TIMER_CTRL(_n) (NPU_TIMER_BASE_ADDR + ((_n) * 0x100))
#define WDT_EN_MASK BIT(25)
@@ -124,6 +139,13 @@ struct ppe_mbox_data {
};
};
+struct wlan_mbox_data {
+ u32 ifindex:4;
+ u32 func_type:4;
+ u32 func_id;
+ DECLARE_FLEX_ARRAY(u8, d);
+};
+
static int airoha_npu_send_msg(struct airoha_npu *npu, int func_id,
void *p, int size)
{
@@ -357,15 +379,13 @@ out:
return err;
}
-static int airoha_npu_stats_setup(struct airoha_npu *npu,
- dma_addr_t foe_stats_addr)
+static int airoha_npu_ppe_stats_setup(struct airoha_npu *npu,
+ dma_addr_t foe_stats_addr,
+ u32 num_stats_entries)
{
- int err, size = PPE_STATS_NUM_ENTRIES * sizeof(*npu->stats);
+ int err, size = num_stats_entries * sizeof(*npu->stats);
struct ppe_mbox_data *ppe_data;
- if (!size) /* flow stats are disabled */
- return 0;
-
ppe_data = kzalloc(sizeof(*ppe_data), GFP_ATOMIC);
if (!ppe_data)
return -ENOMEM;
@@ -390,7 +410,137 @@ out:
return err;
}
-struct airoha_npu *airoha_npu_get(struct device *dev, dma_addr_t *stats_addr)
+static int airoha_npu_wlan_msg_send(struct airoha_npu *npu, int ifindex,
+ enum airoha_npu_wlan_set_cmd func_id,
+ void *data, int data_len, gfp_t gfp)
+{
+ struct wlan_mbox_data *wlan_data;
+ int err, len;
+
+ len = sizeof(*wlan_data) + data_len;
+ wlan_data = kzalloc(len, gfp);
+ if (!wlan_data)
+ return -ENOMEM;
+
+ wlan_data->ifindex = ifindex;
+ wlan_data->func_type = NPU_OP_SET;
+ wlan_data->func_id = func_id;
+ memcpy(wlan_data->d, data, data_len);
+
+ err = airoha_npu_send_msg(npu, NPU_FUNC_WIFI, wlan_data, len);
+ kfree(wlan_data);
+
+ return err;
+}
+
+static int airoha_npu_wlan_msg_get(struct airoha_npu *npu, int ifindex,
+ enum airoha_npu_wlan_get_cmd func_id,
+ void *data, int data_len, gfp_t gfp)
+{
+ struct wlan_mbox_data *wlan_data;
+ int err, len;
+
+ len = sizeof(*wlan_data) + data_len;
+ wlan_data = kzalloc(len, gfp);
+ if (!wlan_data)
+ return -ENOMEM;
+
+ wlan_data->ifindex = ifindex;
+ wlan_data->func_type = NPU_OP_GET;
+ wlan_data->func_id = func_id;
+
+ err = airoha_npu_send_msg(npu, NPU_FUNC_WIFI, wlan_data, len);
+ if (!err)
+ memcpy(data, wlan_data->d, data_len);
+ kfree(wlan_data);
+
+ return err;
+}
+
+static int
+airoha_npu_wlan_set_reserved_memory(struct airoha_npu *npu,
+ int ifindex, const char *name,
+ enum airoha_npu_wlan_set_cmd func_id)
+{
+ struct device *dev = npu->dev;
+ struct resource res;
+ int err;
+ u32 val;
+
+ err = of_reserved_mem_region_to_resource_byname(dev->of_node, name,
+ &res);
+ if (err)
+ return err;
+
+ val = res.start;
+ return airoha_npu_wlan_msg_send(npu, ifindex, func_id, &val,
+ sizeof(val), GFP_KERNEL);
+}
+
+static int airoha_npu_wlan_init_memory(struct airoha_npu *npu)
+{
+ enum airoha_npu_wlan_set_cmd cmd = WLAN_FUNC_SET_WAIT_NPU_BAND0_ONCPU;
+ u32 val = 0;
+ int err;
+
+ err = airoha_npu_wlan_msg_send(npu, 1, cmd, &val, sizeof(val),
+ GFP_KERNEL);
+ if (err)
+ return err;
+
+ cmd = WLAN_FUNC_SET_WAIT_TX_BUF_CHECK_ADDR;
+ err = airoha_npu_wlan_set_reserved_memory(npu, 0, "tx-bufid", cmd);
+ if (err)
+ return err;
+
+ cmd = WLAN_FUNC_SET_WAIT_PKT_BUF_ADDR;
+ err = airoha_npu_wlan_set_reserved_memory(npu, 0, "pkt", cmd);
+ if (err)
+ return err;
+
+ cmd = WLAN_FUNC_SET_WAIT_TX_PKT_BUF_ADDR;
+ err = airoha_npu_wlan_set_reserved_memory(npu, 0, "tx-pkt", cmd);
+ if (err)
+ return err;
+
+ cmd = WLAN_FUNC_SET_WAIT_IS_FORCE_TO_CPU;
+ return airoha_npu_wlan_msg_send(npu, 0, cmd, &val, sizeof(val),
+ GFP_KERNEL);
+}
+
+static u32 airoha_npu_wlan_queue_addr_get(struct airoha_npu *npu, int qid,
+ bool xmit)
+{
+ if (xmit)
+ return REG_TX_BASE(qid + 2);
+
+ return REG_RX_BASE(qid);
+}
+
+static void airoha_npu_wlan_irq_status_set(struct airoha_npu *npu, u32 val)
+{
+ regmap_write(npu->regmap, REG_IRQ_STATUS, val);
+}
+
+static u32 airoha_npu_wlan_irq_status_get(struct airoha_npu *npu, int q)
+{
+ u32 val;
+
+ regmap_read(npu->regmap, REG_IRQ_STATUS, &val);
+ return val;
+}
+
+static void airoha_npu_wlan_irq_enable(struct airoha_npu *npu, int q)
+{
+ regmap_set_bits(npu->regmap, REG_IRQ_RXDONE(q), NPU_IRQ_RX_MASK(q));
+}
+
+static void airoha_npu_wlan_irq_disable(struct airoha_npu *npu, int q)
+{
+ regmap_clear_bits(npu->regmap, REG_IRQ_RXDONE(q), NPU_IRQ_RX_MASK(q));
+}
+
+struct airoha_npu *airoha_npu_get(struct device *dev)
{
struct platform_device *pdev;
struct device_node *np;
@@ -429,17 +579,6 @@ struct airoha_npu *airoha_npu_get(struct device *dev, dma_addr_t *stats_addr)
goto error_module_put;
}
- if (stats_addr) {
- int err;
-
- err = airoha_npu_stats_setup(npu, *stats_addr);
- if (err) {
- dev_err(dev, "failed to allocate npu stats buffer\n");
- npu = ERR_PTR(err);
- goto error_module_put;
- }
- }
-
return npu;
error_module_put:
@@ -491,8 +630,17 @@ static int airoha_npu_probe(struct platform_device *pdev)
npu->dev = dev;
npu->ops.ppe_init = airoha_npu_ppe_init;
npu->ops.ppe_deinit = airoha_npu_ppe_deinit;
+ npu->ops.ppe_init_stats = airoha_npu_ppe_stats_setup;
npu->ops.ppe_flush_sram_entries = airoha_npu_ppe_flush_sram_entries;
npu->ops.ppe_foe_commit_entry = airoha_npu_foe_commit_entry;
+ npu->ops.wlan_init_reserved_memory = airoha_npu_wlan_init_memory;
+ npu->ops.wlan_send_msg = airoha_npu_wlan_msg_send;
+ npu->ops.wlan_get_msg = airoha_npu_wlan_msg_get;
+ npu->ops.wlan_get_queue_addr = airoha_npu_wlan_queue_addr_get;
+ npu->ops.wlan_set_irq_status = airoha_npu_wlan_irq_status_set;
+ npu->ops.wlan_get_irq_status = airoha_npu_wlan_irq_status_get;
+ npu->ops.wlan_enable_irq = airoha_npu_wlan_irq_enable;
+ npu->ops.wlan_disable_irq = airoha_npu_wlan_irq_disable;
npu->regmap = devm_regmap_init_mmio(dev, base, &regmap_config);
if (IS_ERR(npu->regmap))
@@ -529,6 +677,15 @@ static int airoha_npu_probe(struct platform_device *pdev)
INIT_WORK(&core->wdt_work, airoha_npu_wdt_work);
}
+ /* wlan IRQ lines */
+ for (i = 0; i < ARRAY_SIZE(npu->irqs); i++) {
+ irq = platform_get_irq(pdev, i + ARRAY_SIZE(npu->cores) + 1);
+ if (irq < 0)
+ return irq;
+
+ npu->irqs[i] = irq;
+ }
+
err = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
if (err)
return err;
@@ -550,8 +707,7 @@ static int airoha_npu_probe(struct platform_device *pdev)
usleep_range(1000, 2000);
/* enable NPU cores */
- /* do not start core3 since it is used for WiFi offloading */
- regmap_write(npu->regmap, REG_CR_BOOT_CONFIG, 0xf7);
+ regmap_write(npu->regmap, REG_CR_BOOT_CONFIG, 0xff);
regmap_write(npu->regmap, REG_CR_BOOT_TRIGGER, 0x1);
msleep(100);
@@ -579,6 +735,8 @@ static struct platform_driver airoha_npu_driver = {
};
module_platform_driver(airoha_npu_driver);
+MODULE_FIRMWARE(NPU_EN7581_FIRMWARE_DATA);
+MODULE_FIRMWARE(NPU_EN7581_FIRMWARE_RV32);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
MODULE_DESCRIPTION("Airoha Network Processor Unit driver");
diff --git a/drivers/net/ethernet/airoha/airoha_npu.h b/drivers/net/ethernet/airoha/airoha_npu.h
deleted file mode 100644
index 98ec3be74ce4..000000000000
--- a/drivers/net/ethernet/airoha/airoha_npu.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2025 AIROHA Inc
- * Author: Lorenzo Bianconi <lorenzo@kernel.org>
- */
-
-#define NPU_NUM_CORES 8
-
-struct airoha_npu {
- struct device *dev;
- struct regmap *regmap;
-
- struct airoha_npu_core {
- struct airoha_npu *npu;
- /* protect concurrent npu memory accesses */
- spinlock_t lock;
- struct work_struct wdt_work;
- } cores[NPU_NUM_CORES];
-
- struct airoha_foe_stats __iomem *stats;
-
- struct {
- int (*ppe_init)(struct airoha_npu *npu);
- int (*ppe_deinit)(struct airoha_npu *npu);
- int (*ppe_flush_sram_entries)(struct airoha_npu *npu,
- dma_addr_t foe_addr,
- int sram_num_entries);
- int (*ppe_foe_commit_entry)(struct airoha_npu *npu,
- dma_addr_t foe_addr,
- u32 entry_size, u32 hash,
- bool ppe2);
- } ops;
-};
-
-struct airoha_npu *airoha_npu_get(struct device *dev, dma_addr_t *stats_addr);
-void airoha_npu_put(struct airoha_npu *npu);
diff --git a/drivers/net/ethernet/airoha/airoha_ppe.c b/drivers/net/ethernet/airoha/airoha_ppe.c
index c354d536bc66..691361b25407 100644
--- a/drivers/net/ethernet/airoha/airoha_ppe.c
+++ b/drivers/net/ethernet/airoha/airoha_ppe.c
@@ -6,11 +6,12 @@
#include <linux/ip.h>
#include <linux/ipv6.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/rhashtable.h>
#include <net/ipv6.h>
#include <net/pkt_cls.h>
-#include "airoha_npu.h"
#include "airoha_regs.h"
#include "airoha_eth.h"
@@ -190,6 +191,31 @@ static int airoha_ppe_flow_mangle_ipv4(const struct flow_action_entry *act,
return 0;
}
+static int airoha_ppe_get_wdma_info(struct net_device *dev, const u8 *addr,
+ struct airoha_wdma_info *info)
+{
+ struct net_device_path_stack stack;
+ struct net_device_path *path;
+ int err;
+
+ if (!dev)
+ return -ENODEV;
+
+ err = dev_fill_forward_path(dev, addr, &stack);
+ if (err)
+ return err;
+
+ path = &stack.path[stack.num_paths - 1];
+ if (path->type != DEV_PATH_MTK_WDMA)
+ return -1;
+
+ info->idx = path->mtk_wdma.wdma_idx;
+ info->bss = path->mtk_wdma.bss;
+ info->wcid = path->mtk_wdma.wcid;
+
+ return 0;
+}
+
static int airoha_get_dsa_port(struct net_device **dev)
{
#if IS_ENABLED(CONFIG_NET_DSA)
@@ -220,9 +246,9 @@ static int airoha_ppe_foe_entry_prepare(struct airoha_eth *eth,
struct airoha_flow_data *data,
int l4proto)
{
- int dsa_port = airoha_get_dsa_port(&dev);
+ u32 qdata = FIELD_PREP(AIROHA_FOE_SHAPER_ID, 0x7f), ports_pad, val;
+ int wlan_etype = -EINVAL, dsa_port = airoha_get_dsa_port(&dev);
struct airoha_foe_mac_info_common *l2;
- u32 qdata, ports_pad, val;
u8 smac_id = 0xf;
memset(hwe, 0, sizeof(*hwe));
@@ -236,31 +262,47 @@ static int airoha_ppe_foe_entry_prepare(struct airoha_eth *eth,
AIROHA_FOE_IB1_BIND_TTL;
hwe->ib1 = val;
- val = FIELD_PREP(AIROHA_FOE_IB2_PORT_AG, 0x1f) |
- AIROHA_FOE_IB2_PSE_QOS;
- if (dsa_port >= 0)
- val |= FIELD_PREP(AIROHA_FOE_IB2_NBQ, dsa_port);
-
+ val = FIELD_PREP(AIROHA_FOE_IB2_PORT_AG, 0x1f);
if (dev) {
- struct airoha_gdm_port *port = netdev_priv(dev);
- u8 pse_port;
-
- if (!airoha_is_valid_gdm_port(eth, port))
- return -EINVAL;
-
- if (dsa_port >= 0)
- pse_port = port->id == 4 ? FE_PSE_PORT_GDM4 : port->id;
- else
- pse_port = 2; /* uplink relies on GDM2 loopback */
- val |= FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT, pse_port);
-
- /* For downlink traffic consume SRAM memory for hw forwarding
- * descriptors queue.
- */
- if (airhoa_is_lan_gdm_port(port))
- val |= AIROHA_FOE_IB2_FAST_PATH;
-
- smac_id = port->id;
+ struct airoha_wdma_info info = {};
+
+ if (!airoha_ppe_get_wdma_info(dev, data->eth.h_dest, &info)) {
+ val |= FIELD_PREP(AIROHA_FOE_IB2_NBQ, info.idx) |
+ FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT,
+ FE_PSE_PORT_CDM4);
+ qdata |= FIELD_PREP(AIROHA_FOE_ACTDP, info.bss);
+ wlan_etype = FIELD_PREP(AIROHA_FOE_MAC_WDMA_BAND,
+ info.idx) |
+ FIELD_PREP(AIROHA_FOE_MAC_WDMA_WCID,
+ info.wcid);
+ } else {
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ u8 pse_port;
+
+ if (!airoha_is_valid_gdm_port(eth, port))
+ return -EINVAL;
+
+ if (dsa_port >= 0)
+ pse_port = port->id == 4 ? FE_PSE_PORT_GDM4
+ : port->id;
+ else
+ pse_port = 2; /* uplink relies on GDM2
+ * loopback
+ */
+
+ val |= FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT, pse_port) |
+ AIROHA_FOE_IB2_PSE_QOS;
+ /* For downlink traffic consume SRAM memory for hw
+ * forwarding descriptors queue.
+ */
+ if (airhoa_is_lan_gdm_port(port))
+ val |= AIROHA_FOE_IB2_FAST_PATH;
+ if (dsa_port >= 0)
+ val |= FIELD_PREP(AIROHA_FOE_IB2_NBQ,
+ dsa_port);
+
+ smac_id = port->id;
+ }
}
if (is_multicast_ether_addr(data->eth.h_dest))
@@ -272,7 +314,6 @@ static int airoha_ppe_foe_entry_prepare(struct airoha_eth *eth,
if (type == PPE_PKT_TYPE_IPV6_ROUTE_3T)
hwe->ipv6.ports = ports_pad;
- qdata = FIELD_PREP(AIROHA_FOE_SHAPER_ID, 0x7f);
if (type == PPE_PKT_TYPE_BRIDGE) {
airoha_ppe_foe_set_bridge_addrs(&hwe->bridge, &data->eth);
hwe->bridge.data = qdata;
@@ -313,7 +354,9 @@ static int airoha_ppe_foe_entry_prepare(struct airoha_eth *eth,
l2->vlan2 = data->vlan.hdr[1].id;
}
- if (dsa_port >= 0) {
+ if (wlan_etype >= 0) {
+ l2->etype = wlan_etype;
+ } else if (dsa_port >= 0) {
l2->etype = BIT(dsa_port);
l2->etype |= !data->vlan.num ? BIT(15) : 0;
} else if (data->pppoe.num) {
@@ -490,6 +533,10 @@ static void airoha_ppe_foe_flow_stats_update(struct airoha_ppe *ppe,
meter = &hwe->ipv4.l2.meter;
}
+ pse_port = FIELD_GET(AIROHA_FOE_IB2_PSE_PORT, *ib2);
+ if (pse_port == FE_PSE_PORT_CDM4)
+ return;
+
airoha_ppe_foe_flow_stat_entry_reset(ppe, npu, index);
val = FIELD_GET(AIROHA_FOE_CHANNEL | AIROHA_FOE_QID, *data);
@@ -500,7 +547,6 @@ static void airoha_ppe_foe_flow_stats_update(struct airoha_ppe *ppe,
AIROHA_FOE_IB2_PSE_QOS | AIROHA_FOE_IB2_FAST_PATH);
*meter |= FIELD_PREP(AIROHA_FOE_TUNNEL_MTU, val);
- pse_port = FIELD_GET(AIROHA_FOE_IB2_PSE_PORT, *ib2);
nbq = pse_port == 1 ? 6 : 5;
*ib2 &= ~(AIROHA_FOE_IB2_NBQ | AIROHA_FOE_IB2_PSE_PORT |
AIROHA_FOE_IB2_PSE_QOS);
@@ -508,9 +554,11 @@ static void airoha_ppe_foe_flow_stats_update(struct airoha_ppe *ppe,
FIELD_PREP(AIROHA_FOE_IB2_NBQ, nbq);
}
-struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe,
- u32 hash)
+static struct airoha_foe_entry *
+airoha_ppe_foe_get_entry_locked(struct airoha_ppe *ppe, u32 hash)
{
+ lockdep_assert_held(&ppe_lock);
+
if (hash < PPE_SRAM_NUM_ENTRIES) {
u32 *hwe = ppe->foe + hash * sizeof(struct airoha_foe_entry);
struct airoha_eth *eth = ppe->eth;
@@ -537,6 +585,18 @@ struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe,
return ppe->foe + hash * sizeof(struct airoha_foe_entry);
}
+struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe,
+ u32 hash)
+{
+ struct airoha_foe_entry *hwe;
+
+ spin_lock_bh(&ppe_lock);
+ hwe = airoha_ppe_foe_get_entry_locked(ppe, hash);
+ spin_unlock_bh(&ppe_lock);
+
+ return hwe;
+}
+
static bool airoha_ppe_foe_compare_entry(struct airoha_flow_table_entry *e,
struct airoha_foe_entry *hwe)
{
@@ -556,7 +616,7 @@ static bool airoha_ppe_foe_compare_entry(struct airoha_flow_table_entry *e,
static int airoha_ppe_foe_commit_entry(struct airoha_ppe *ppe,
struct airoha_foe_entry *e,
- u32 hash)
+ u32 hash, bool rx_wlan)
{
struct airoha_foe_entry *hwe = ppe->foe + hash * sizeof(*hwe);
u32 ts = airoha_ppe_get_timestamp(ppe);
@@ -579,7 +639,8 @@ static int airoha_ppe_foe_commit_entry(struct airoha_ppe *ppe,
goto unlock;
}
- airoha_ppe_foe_flow_stats_update(ppe, npu, hwe, hash);
+ if (!rx_wlan)
+ airoha_ppe_foe_flow_stats_update(ppe, npu, hwe, hash);
if (hash < PPE_SRAM_NUM_ENTRIES) {
dma_addr_t addr = ppe->foe_dma + hash * sizeof(*hwe);
@@ -605,7 +666,7 @@ static void airoha_ppe_foe_remove_flow(struct airoha_ppe *ppe,
e->data.ib1 &= ~AIROHA_FOE_IB1_BIND_STATE;
e->data.ib1 |= FIELD_PREP(AIROHA_FOE_IB1_BIND_STATE,
AIROHA_FOE_STATE_INVALID);
- airoha_ppe_foe_commit_entry(ppe, &e->data, e->hash);
+ airoha_ppe_foe_commit_entry(ppe, &e->data, e->hash, false);
e->hash = 0xffff;
}
if (e->type == FLOW_TYPE_L2_SUBFLOW) {
@@ -644,14 +705,14 @@ static void airoha_ppe_foe_flow_remove_entry(struct airoha_ppe *ppe,
static int
airoha_ppe_foe_commit_subflow_entry(struct airoha_ppe *ppe,
struct airoha_flow_table_entry *e,
- u32 hash)
+ u32 hash, bool rx_wlan)
{
u32 mask = AIROHA_FOE_IB1_BIND_PACKET_TYPE | AIROHA_FOE_IB1_BIND_UDP;
struct airoha_foe_entry *hwe_p, hwe;
struct airoha_flow_table_entry *f;
int type;
- hwe_p = airoha_ppe_foe_get_entry(ppe, hash);
+ hwe_p = airoha_ppe_foe_get_entry_locked(ppe, hash);
if (!hwe_p)
return -EINVAL;
@@ -685,14 +746,14 @@ airoha_ppe_foe_commit_subflow_entry(struct airoha_ppe *ppe,
}
hwe.bridge.data = e->data.bridge.data;
- airoha_ppe_foe_commit_entry(ppe, &hwe, hash);
+ airoha_ppe_foe_commit_entry(ppe, &hwe, hash, rx_wlan);
return 0;
}
static void airoha_ppe_foe_insert_entry(struct airoha_ppe *ppe,
struct sk_buff *skb,
- u32 hash)
+ u32 hash, bool rx_wlan)
{
struct airoha_flow_table_entry *e;
struct airoha_foe_bridge br = {};
@@ -703,7 +764,7 @@ static void airoha_ppe_foe_insert_entry(struct airoha_ppe *ppe,
spin_lock_bh(&ppe_lock);
- hwe = airoha_ppe_foe_get_entry(ppe, hash);
+ hwe = airoha_ppe_foe_get_entry_locked(ppe, hash);
if (!hwe)
goto unlock;
@@ -722,12 +783,10 @@ static void airoha_ppe_foe_insert_entry(struct airoha_ppe *ppe,
continue;
}
- if (commit_done || !airoha_ppe_foe_compare_entry(e, hwe)) {
- e->hash = 0xffff;
+ if (!airoha_ppe_foe_compare_entry(e, hwe))
continue;
- }
- airoha_ppe_foe_commit_entry(ppe, &e->data, hash);
+ airoha_ppe_foe_commit_entry(ppe, &e->data, hash, rx_wlan);
commit_done = true;
e->hash = hash;
}
@@ -739,7 +798,7 @@ static void airoha_ppe_foe_insert_entry(struct airoha_ppe *ppe,
e = rhashtable_lookup_fast(&ppe->l2_flows, &br,
airoha_l2_flow_table_params);
if (e)
- airoha_ppe_foe_commit_subflow_entry(ppe, e, hash);
+ airoha_ppe_foe_commit_subflow_entry(ppe, e, hash, rx_wlan);
unlock:
spin_unlock_bh(&ppe_lock);
}
@@ -818,7 +877,7 @@ airoha_ppe_foe_flow_l2_entry_update(struct airoha_ppe *ppe,
u32 ib1, state;
int idle;
- hwe = airoha_ppe_foe_get_entry(ppe, iter->hash);
+ hwe = airoha_ppe_foe_get_entry_locked(ppe, iter->hash);
if (!hwe)
continue;
@@ -855,7 +914,7 @@ static void airoha_ppe_foe_flow_entry_update(struct airoha_ppe *ppe,
if (e->hash == 0xffff)
goto unlock;
- hwe_p = airoha_ppe_foe_get_entry(ppe, e->hash);
+ hwe_p = airoha_ppe_foe_get_entry_locked(ppe, e->hash);
if (!hwe_p)
goto unlock;
@@ -878,11 +937,10 @@ static int airoha_ppe_entry_idle_time(struct airoha_ppe *ppe,
return airoha_ppe_get_entry_idle_time(ppe, e->data.ib1);
}
-static int airoha_ppe_flow_offload_replace(struct airoha_gdm_port *port,
+static int airoha_ppe_flow_offload_replace(struct airoha_eth *eth,
struct flow_cls_offload *f)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
- struct airoha_eth *eth = port->qdma->eth;
struct airoha_flow_table_entry *e;
struct airoha_flow_data data = {};
struct net_device *odev = NULL;
@@ -1079,10 +1137,9 @@ free_entry:
return err;
}
-static int airoha_ppe_flow_offload_destroy(struct airoha_gdm_port *port,
+static int airoha_ppe_flow_offload_destroy(struct airoha_eth *eth,
struct flow_cls_offload *f)
{
- struct airoha_eth *eth = port->qdma->eth;
struct airoha_flow_table_entry *e;
e = rhashtable_lookup(&eth->flow_table, &f->cookie,
@@ -1125,10 +1182,9 @@ void airoha_ppe_foe_entry_get_stats(struct airoha_ppe *ppe, u32 hash,
rcu_read_unlock();
}
-static int airoha_ppe_flow_offload_stats(struct airoha_gdm_port *port,
+static int airoha_ppe_flow_offload_stats(struct airoha_eth *eth,
struct flow_cls_offload *f)
{
- struct airoha_eth *eth = port->qdma->eth;
struct airoha_flow_table_entry *e;
u32 idle;
@@ -1152,16 +1208,16 @@ static int airoha_ppe_flow_offload_stats(struct airoha_gdm_port *port,
return 0;
}
-static int airoha_ppe_flow_offload_cmd(struct airoha_gdm_port *port,
+static int airoha_ppe_flow_offload_cmd(struct airoha_eth *eth,
struct flow_cls_offload *f)
{
switch (f->command) {
case FLOW_CLS_REPLACE:
- return airoha_ppe_flow_offload_replace(port, f);
+ return airoha_ppe_flow_offload_replace(eth, f);
case FLOW_CLS_DESTROY:
- return airoha_ppe_flow_offload_destroy(port, f);
+ return airoha_ppe_flow_offload_destroy(eth, f);
case FLOW_CLS_STATS:
- return airoha_ppe_flow_offload_stats(port, f);
+ return airoha_ppe_flow_offload_stats(eth, f);
default:
break;
}
@@ -1187,12 +1243,11 @@ static int airoha_ppe_flush_sram_entries(struct airoha_ppe *ppe,
static struct airoha_npu *airoha_ppe_npu_get(struct airoha_eth *eth)
{
- struct airoha_npu *npu = airoha_npu_get(eth->dev,
- &eth->ppe->foe_stats_dma);
+ struct airoha_npu *npu = airoha_npu_get(eth->dev);
if (IS_ERR(npu)) {
request_module("airoha-npu");
- npu = airoha_npu_get(eth->dev, &eth->ppe->foe_stats_dma);
+ npu = airoha_npu_get(eth->dev);
}
return npu;
@@ -1201,6 +1256,7 @@ static struct airoha_npu *airoha_ppe_npu_get(struct airoha_eth *eth)
static int airoha_ppe_offload_setup(struct airoha_eth *eth)
{
struct airoha_npu *npu = airoha_ppe_npu_get(eth);
+ struct airoha_ppe *ppe = eth->ppe;
int err;
if (IS_ERR(npu))
@@ -1210,12 +1266,19 @@ static int airoha_ppe_offload_setup(struct airoha_eth *eth)
if (err)
goto error_npu_put;
- airoha_ppe_hw_init(eth->ppe);
- err = airoha_ppe_flush_sram_entries(eth->ppe, npu);
+ if (PPE_STATS_NUM_ENTRIES) {
+ err = npu->ops.ppe_init_stats(npu, ppe->foe_stats_dma,
+ PPE_STATS_NUM_ENTRIES);
+ if (err)
+ goto error_npu_put;
+ }
+
+ airoha_ppe_hw_init(ppe);
+ err = airoha_ppe_flush_sram_entries(ppe, npu);
if (err)
goto error_npu_put;
- airoha_ppe_foe_flow_stats_reset(eth->ppe, npu);
+ airoha_ppe_foe_flow_stats_reset(ppe, npu);
rcu_assign_pointer(eth->npu, npu);
synchronize_rcu();
@@ -1228,11 +1291,10 @@ error_npu_put:
return err;
}
-int airoha_ppe_setup_tc_block_cb(struct net_device *dev, void *type_data)
+int airoha_ppe_setup_tc_block_cb(struct airoha_ppe_dev *dev, void *type_data)
{
- struct airoha_gdm_port *port = netdev_priv(dev);
- struct flow_cls_offload *cls = type_data;
- struct airoha_eth *eth = port->qdma->eth;
+ struct airoha_ppe *ppe = dev->priv;
+ struct airoha_eth *eth = ppe->eth;
int err = 0;
mutex_lock(&flow_offload_mutex);
@@ -1240,16 +1302,17 @@ int airoha_ppe_setup_tc_block_cb(struct net_device *dev, void *type_data)
if (!eth->npu)
err = airoha_ppe_offload_setup(eth);
if (!err)
- err = airoha_ppe_flow_offload_cmd(port, cls);
+ err = airoha_ppe_flow_offload_cmd(eth, type_data);
mutex_unlock(&flow_offload_mutex);
return err;
}
-void airoha_ppe_check_skb(struct airoha_ppe *ppe, struct sk_buff *skb,
- u16 hash)
+void airoha_ppe_check_skb(struct airoha_ppe_dev *dev, struct sk_buff *skb,
+ u16 hash, bool rx_wlan)
{
+ struct airoha_ppe *ppe = dev->priv;
u16 now, diff;
if (hash > PPE_HASH_MASK)
@@ -1261,7 +1324,7 @@ void airoha_ppe_check_skb(struct airoha_ppe *ppe, struct sk_buff *skb,
return;
ppe->foe_check_time[hash] = now;
- airoha_ppe_foe_insert_entry(ppe, skb, hash);
+ airoha_ppe_foe_insert_entry(ppe, skb, hash, rx_wlan);
}
void airoha_ppe_init_upd_mem(struct airoha_gdm_port *port)
@@ -1285,6 +1348,61 @@ void airoha_ppe_init_upd_mem(struct airoha_gdm_port *port)
PPE_UPDMEM_WR_MASK | PPE_UPDMEM_REQ_MASK);
}
+struct airoha_ppe_dev *airoha_ppe_get_dev(struct device *dev)
+{
+ struct platform_device *pdev;
+ struct device_node *np;
+ struct airoha_eth *eth;
+
+ np = of_parse_phandle(dev->of_node, "airoha,eth", 0);
+ if (!np)
+ return ERR_PTR(-ENODEV);
+
+ pdev = of_find_device_by_node(np);
+ if (!pdev) {
+ dev_err(dev, "cannot find device node %s\n", np->name);
+ of_node_put(np);
+ return ERR_PTR(-ENODEV);
+ }
+ of_node_put(np);
+
+ if (!try_module_get(THIS_MODULE)) {
+ dev_err(dev, "failed to get the device driver module\n");
+ goto error_pdev_put;
+ }
+
+ eth = platform_get_drvdata(pdev);
+ if (!eth)
+ goto error_module_put;
+
+ if (!device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_SUPPLIER)) {
+ dev_err(&pdev->dev,
+ "failed to create device link to consumer %s\n",
+ dev_name(dev));
+ goto error_module_put;
+ }
+
+ return &eth->ppe->dev;
+
+error_module_put:
+ module_put(THIS_MODULE);
+error_pdev_put:
+ platform_device_put(pdev);
+
+ return ERR_PTR(-ENODEV);
+}
+EXPORT_SYMBOL_GPL(airoha_ppe_get_dev);
+
+void airoha_ppe_put_dev(struct airoha_ppe_dev *dev)
+{
+ struct airoha_ppe *ppe = dev->priv;
+ struct airoha_eth *eth = ppe->eth;
+
+ module_put(THIS_MODULE);
+ put_device(eth->dev);
+}
+EXPORT_SYMBOL_GPL(airoha_ppe_put_dev);
+
int airoha_ppe_init(struct airoha_eth *eth)
{
struct airoha_ppe *ppe;
@@ -1294,6 +1412,10 @@ int airoha_ppe_init(struct airoha_eth *eth)
if (!ppe)
return -ENOMEM;
+ ppe->dev.ops.setup_tc_block_cb = airoha_ppe_setup_tc_block_cb;
+ ppe->dev.ops.check_skb = airoha_ppe_check_skb;
+ ppe->dev.priv = ppe;
+
foe_size = PPE_NUM_ENTRIES * sizeof(struct airoha_foe_entry);
ppe->foe = dmam_alloc_coherent(eth->dev, foe_size, &ppe->foe_dma,
GFP_KERNEL);
diff --git a/drivers/net/ethernet/airoha/airoha_regs.h b/drivers/net/ethernet/airoha/airoha_regs.h
index 150c85995cc1..69c5a143db8c 100644
--- a/drivers/net/ethernet/airoha/airoha_regs.h
+++ b/drivers/net/ethernet/airoha/airoha_regs.h
@@ -151,6 +151,9 @@
#define LPBK_LEN_MASK GENMASK(23, 10)
#define LPBK_CHAN_MASK GENMASK(8, 4)
#define LPBK_MODE_MASK GENMASK(3, 1)
+#define LBK_GAP_MODE_MASK BIT(3)
+#define LBK_LEN_MODE_MASK BIT(2)
+#define LBK_CHAN_MODE_MASK BIT(1)
#define LPBK_EN_MASK BIT(0)
#define REG_GDM_TXCHN_EN(_n) (GDM_BASE(_n) + 0x24)
@@ -237,8 +240,8 @@
#define PPE_FLOW_CFG_IP4_TCP_FRAG_MASK BIT(6)
#define REG_PPE_IP_PROTO_CHK(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x208)
-#define PPE_IP_PROTO_CHK_IPV4_MASK GENMASK(15, 0)
-#define PPE_IP_PROTO_CHK_IPV6_MASK GENMASK(31, 16)
+#define PPE_IP_PROTO_CHK_IPV4_MASK GENMASK(31, 16)
+#define PPE_IP_PROTO_CHK_IPV6_MASK GENMASK(15, 0)
#define REG_PPE_TB_CFG(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x21c)
#define PPE_SRAM_TB_NUM_ENTRY_MASK GENMASK(26, 24)
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index a81d3a7a3bb9..fe3479b84a1f 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -865,7 +865,10 @@ static u32 ena_get_rxfh_indir_size(struct net_device *netdev)
static u32 ena_get_rxfh_key_size(struct net_device *netdev)
{
- return ENA_HASH_KEY_SIZE;
+ struct ena_adapter *adapter = netdev_priv(netdev);
+ struct ena_rss *rss = &adapter->ena_dev->rss;
+
+ return rss->hash_key ? ENA_HASH_KEY_SIZE : 0;
}
static int ena_indirection_table_set(struct ena_adapter *adapter,
diff --git a/drivers/net/ethernet/amd/pds_core/main.c b/drivers/net/ethernet/amd/pds_core/main.c
index 9b81e1c260c2..c7a2eff57632 100644
--- a/drivers/net/ethernet/amd/pds_core/main.c
+++ b/drivers/net/ethernet/amd/pds_core/main.c
@@ -280,7 +280,7 @@ static int pdsc_init_pf(struct pdsc *pdsc)
goto err_out_del_dev;
}
- hr = devl_health_reporter_create(dl, &pdsc_fw_reporter_ops, 0, pdsc);
+ hr = devl_health_reporter_create(dl, &pdsc_fw_reporter_ops, pdsc);
if (IS_ERR(hr)) {
devl_unlock(dl);
dev_warn(pdsc->dev, "Failed to create fw reporter: %pe\n", hr);
diff --git a/drivers/net/ethernet/amd/xgbe/Makefile b/drivers/net/ethernet/amd/xgbe/Makefile
index 5b0ab6240cf2..980e27652237 100644
--- a/drivers/net/ethernet/amd/xgbe/Makefile
+++ b/drivers/net/ethernet/amd/xgbe/Makefile
@@ -3,7 +3,7 @@ obj-$(CONFIG_AMD_XGBE) += amd-xgbe.o
amd-xgbe-objs := xgbe-main.o xgbe-drv.o xgbe-dev.o \
xgbe-desc.o xgbe-ethtool.o xgbe-mdio.o \
- xgbe-hwtstamp.o xgbe-ptp.o \
+ xgbe-hwtstamp.o xgbe-ptp.o xgbe-pps.o \
xgbe-i2c.o xgbe-phy-v1.o xgbe-phy-v2.o \
xgbe-platform.o
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index 009fbc9b11ce..62b01de93db4 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -227,7 +227,11 @@
#define MAC_TICSNR 0x0d5C
#define MAC_TECNR 0x0d60
#define MAC_TECSNR 0x0d64
-
+#define MAC_PPSCR 0x0d70
+#define MAC_PPS0_TTSR 0x0d80
+#define MAC_PPS0_TTNSR 0x0d84
+#define MAC_PPS0_INTERVAL 0x0d88
+#define MAC_PPS0_WIDTH 0x0d8C
#define MAC_QTFCR_INC 4
#define MAC_MACA_INC 4
#define MAC_HTR_INC 4
@@ -235,6 +239,18 @@
#define MAC_RQC2_INC 4
#define MAC_RQC2_Q_PER_REG 4
+/* PPS helpers */
+#define PPSEN0 BIT(4)
+#define MAC_PPSx_TTSR(x) ((MAC_PPS0_TTSR) + ((x) * 0x10))
+#define MAC_PPSx_TTNSR(x) ((MAC_PPS0_TTNSR) + ((x) * 0x10))
+#define MAC_PPSx_INTERVAL(x) ((MAC_PPS0_INTERVAL) + ((x) * 0x10))
+#define MAC_PPSx_WIDTH(x) ((MAC_PPS0_WIDTH) + ((x) * 0x10))
+#define PPS_MAXIDX(x) ((((x) + 1) * 8) - 1)
+#define PPS_MINIDX(x) ((x) * 8)
+#define XGBE_PPSCMD_STOP 0x5
+#define XGBE_PPSCMD_START 0x2
+#define XGBE_PPSTARGET_PULSE 0x2
+
/* MAC register entry bit positions and sizes */
#define MAC_HWF0R_ADDMACADRSEL_INDEX 18
#define MAC_HWF0R_ADDMACADRSEL_WIDTH 5
@@ -496,8 +512,10 @@
#define MAC_VR_SNPSVER_WIDTH 8
#define MAC_VR_USERVER_INDEX 16
#define MAC_VR_USERVER_WIDTH 8
+#define MAC_PPSx_TTNSR_TRGTBUSY0_INDEX 31
+#define MAC_PPSx_TTNSR_TRGTBUSY0_WIDTH 1
-/* MMC register offsets */
+ /* MMC register offsets */
#define MMC_CR 0x0800
#define MMC_RISR 0x0804
#define MMC_TISR 0x0808
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 2e9b95a94f89..f0989aa01855 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -691,6 +691,21 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
hw_feat->pps_out_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM);
hw_feat->aux_snap_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, AUXSNAPNUM);
+ /* Sanity check and warn if hardware reports more than supported */
+ if (hw_feat->pps_out_num > XGBE_MAX_PPS_OUT) {
+ dev_warn(pdata->dev,
+ "Hardware reports %u PPS outputs, limiting to %u\n",
+ hw_feat->pps_out_num, XGBE_MAX_PPS_OUT);
+ hw_feat->pps_out_num = XGBE_MAX_PPS_OUT;
+ }
+
+ if (hw_feat->aux_snap_num > XGBE_MAX_AUX_SNAP) {
+ dev_warn(pdata->dev,
+ "Hardware reports %u aux snapshot inputs, limiting to %u\n",
+ hw_feat->aux_snap_num, XGBE_MAX_AUX_SNAP);
+ hw_feat->aux_snap_num = XGBE_MAX_AUX_SNAP;
+ }
+
/* Translate the Hash Table size into actual number */
switch (hw_feat->hash_table_size) {
case 0:
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index be0d2c7d08dc..b6e1b67a2d0e 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -329,6 +329,7 @@ static int xgbe_get_coalesce(struct net_device *netdev,
ec->rx_coalesce_usecs = pdata->rx_usecs;
ec->rx_max_coalesced_frames = pdata->rx_frames;
+ ec->tx_coalesce_usecs = pdata->tx_usecs;
ec->tx_max_coalesced_frames = pdata->tx_frames;
return 0;
@@ -342,7 +343,8 @@ static int xgbe_set_coalesce(struct net_device *netdev,
struct xgbe_prv_data *pdata = netdev_priv(netdev);
struct xgbe_hw_if *hw_if = &pdata->hw_if;
unsigned int rx_frames, rx_riwt, rx_usecs;
- unsigned int tx_frames;
+ unsigned int tx_frames, tx_usecs;
+ unsigned int jiffy_us = jiffies_to_usecs(1);
rx_riwt = hw_if->usec_to_riwt(pdata, ec->rx_coalesce_usecs);
rx_usecs = ec->rx_coalesce_usecs;
@@ -364,20 +366,42 @@ static int xgbe_set_coalesce(struct net_device *netdev,
return -EINVAL;
}
+ tx_usecs = ec->tx_coalesce_usecs;
tx_frames = ec->tx_max_coalesced_frames;
/* Check the bounds of values for Tx */
+ if (!tx_usecs) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "tx-usecs must not be 0");
+ return -EINVAL;
+ }
+ if (tx_usecs > XGMAC_MAX_COAL_TX_TICK) {
+ NL_SET_ERR_MSG_FMT_MOD(extack, "tx-usecs is limited to %d usec",
+ XGMAC_MAX_COAL_TX_TICK);
+ return -EINVAL;
+ }
if (tx_frames > pdata->tx_desc_count) {
netdev_err(netdev, "tx-frames is limited to %d frames\n",
pdata->tx_desc_count);
return -EINVAL;
}
+ /* Round tx-usecs to nearest multiple of jiffy granularity */
+ if (tx_usecs % jiffy_us) {
+ tx_usecs = rounddown(tx_usecs, jiffy_us);
+ if (!tx_usecs)
+ tx_usecs = jiffy_us;
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "tx-usecs rounded to %u usec due to jiffy granularity (%u usec)",
+ tx_usecs, jiffy_us);
+ }
+
pdata->rx_riwt = rx_riwt;
pdata->rx_usecs = rx_usecs;
pdata->rx_frames = rx_frames;
hw_if->config_rx_coalesce(pdata);
+ pdata->tx_usecs = tx_usecs;
pdata->tx_frames = tx_frames;
hw_if->config_tx_coalesce(pdata);
@@ -440,7 +464,7 @@ static int xgbe_set_rxfh(struct net_device *netdev,
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
struct xgbe_hw_if *hw_if = &pdata->hw_if;
- unsigned int ret;
+ int ret;
if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
rxfh->hfunc != ETH_RSS_HASH_TOP) {
@@ -709,7 +733,7 @@ out:
}
static const struct ethtool_ops xgbe_ethtool_ops = {
- .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES,
.get_drvinfo = xgbe_get_drvinfo,
.get_msglevel = xgbe_get_msglevel,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
index d40011e8ddf2..65eb7b577b65 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
@@ -70,7 +70,7 @@ static int xgbe_i2c_set_enable(struct xgbe_prv_data *pdata, bool enable)
static int xgbe_i2c_disable(struct xgbe_prv_data *pdata)
{
- unsigned int ret;
+ int ret;
ret = xgbe_i2c_set_enable(pdata, false);
if (ret) {
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
index 23c39e92e783..a56efc1bee33 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
@@ -2902,7 +2902,7 @@ static void xgbe_phy_sfp_setup(struct xgbe_prv_data *pdata)
static int xgbe_phy_int_mdio_reset(struct xgbe_prv_data *pdata)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
- unsigned int ret;
+ int ret;
ret = pdata->hw_if.set_gpio(pdata, phy_data->mdio_reset_gpio);
if (ret)
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pps.c b/drivers/net/ethernet/amd/xgbe/xgbe-pps.c
new file mode 100644
index 000000000000..6d03ae7ab36f
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-pps.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
+/*
+ * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
+ * Copyright (c) 2014, Synopsys, Inc.
+ * All rights reserved
+ *
+ * Author: Raju Rangoju <Raju.Rangoju@amd.com>
+ */
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+static u32 get_pps_mask(unsigned int x)
+{
+ return GENMASK(PPS_MAXIDX(x), PPS_MINIDX(x));
+}
+
+static u32 get_pps_cmd(unsigned int x, u32 val)
+{
+ return (val & GENMASK(3, 0)) << PPS_MINIDX(x);
+}
+
+static u32 get_target_mode_sel(unsigned int x, u32 val)
+{
+ return (val & GENMASK(1, 0)) << (PPS_MAXIDX(x) - 2);
+}
+
+int xgbe_pps_config(struct xgbe_prv_data *pdata,
+ struct xgbe_pps_config *cfg, int index, bool on)
+{
+ unsigned int ppscr = 0;
+ unsigned int tnsec;
+ u64 period;
+
+ /* Check if target time register is busy */
+ tnsec = XGMAC_IOREAD(pdata, MAC_PPSx_TTNSR(index));
+ if (XGMAC_GET_BITS(tnsec, MAC_PPSx_TTNSR, TRGTBUSY0))
+ return -EBUSY;
+
+ ppscr = XGMAC_IOREAD(pdata, MAC_PPSCR);
+ ppscr &= ~get_pps_mask(index);
+
+ if (!on) {
+ /* Disable PPS output */
+ ppscr |= get_pps_cmd(index, XGBE_PPSCMD_STOP);
+ ppscr |= PPSEN0;
+ XGMAC_IOWRITE(pdata, MAC_PPSCR, ppscr);
+
+ return 0;
+ }
+
+ /* Configure start time */
+ XGMAC_IOWRITE(pdata, MAC_PPSx_TTSR(index), cfg->start.tv_sec);
+ XGMAC_IOWRITE(pdata, MAC_PPSx_TTNSR(index), cfg->start.tv_nsec);
+
+ period = cfg->period.tv_sec * NSEC_PER_SEC + cfg->period.tv_nsec;
+ period = div_u64(period, XGBE_V2_TSTAMP_SSINC);
+
+ if (period < 4)
+ return -EINVAL;
+
+ /* Configure interval and pulse width (50% duty cycle) */
+ XGMAC_IOWRITE(pdata, MAC_PPSx_INTERVAL(index), period - 1);
+ XGMAC_IOWRITE(pdata, MAC_PPSx_WIDTH(index), (period >> 1) - 1);
+
+ /* Enable PPS with pulse train mode */
+ ppscr |= get_pps_cmd(index, XGBE_PPSCMD_START);
+ ppscr |= get_target_mode_sel(index, XGBE_PPSTARGET_PULSE);
+ ppscr |= PPSEN0;
+
+ XGMAC_IOWRITE(pdata, MAC_PPSCR, ppscr);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
index 3658afc7801d..0e0b8ec3b504 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
@@ -106,7 +106,29 @@ static int xgbe_settime(struct ptp_clock_info *info,
static int xgbe_enable(struct ptp_clock_info *info,
struct ptp_clock_request *request, int on)
{
- return -EOPNOTSUPP;
+ struct xgbe_prv_data *pdata = container_of(info, struct xgbe_prv_data,
+ ptp_clock_info);
+ struct xgbe_pps_config *pps_cfg;
+ unsigned long flags;
+ int ret;
+
+ dev_dbg(pdata->dev, "rq->type %d on %d\n", request->type, on);
+
+ if (request->type != PTP_CLK_REQ_PEROUT)
+ return -EOPNOTSUPP;
+
+ pps_cfg = &pdata->pps[request->perout.index];
+
+ pps_cfg->start.tv_sec = request->perout.start.sec;
+ pps_cfg->start.tv_nsec = request->perout.start.nsec;
+ pps_cfg->period.tv_sec = request->perout.period.sec;
+ pps_cfg->period.tv_nsec = request->perout.period.nsec;
+
+ spin_lock_irqsave(&pdata->tstamp_lock, flags);
+ ret = xgbe_pps_config(pdata, pps_cfg, request->perout.index, on);
+ spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
+
+ return ret;
}
void xgbe_ptp_register(struct xgbe_prv_data *pdata)
@@ -122,6 +144,8 @@ void xgbe_ptp_register(struct xgbe_prv_data *pdata)
info->adjtime = xgbe_adjtime;
info->gettimex64 = xgbe_gettimex;
info->settime64 = xgbe_settime;
+ info->n_per_out = pdata->hw_feat.pps_out_num;
+ info->n_ext_ts = pdata->hw_feat.aux_snap_num;
info->enable = xgbe_enable;
clock = ptp_clock_register(info, pdata->dev);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index d7e03e292ec4..e8bbb6805901 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -142,6 +142,10 @@
#define XGBE_V2_TSTAMP_SNSINC 0
#define XGBE_V2_PTP_ACT_CLK_FREQ 1000000000
+/* Define maximum supported values */
+#define XGBE_MAX_PPS_OUT 4
+#define XGBE_MAX_AUX_SNAP 4
+
/* Driver PMT macros */
#define XGMAC_DRIVER_CONTEXT 1
#define XGMAC_IOCTL_CONTEXT 2
@@ -168,6 +172,7 @@
/* Default coalescing parameters */
#define XGMAC_INIT_DMA_TX_USECS 1000
#define XGMAC_INIT_DMA_TX_FRAMES 25
+#define XGMAC_MAX_COAL_TX_TICK 100000
#define XGMAC_MAX_DMA_RIWT 0xff
#define XGMAC_INIT_DMA_RX_USECS 30
@@ -672,6 +677,11 @@ struct xgbe_ext_stats {
u64 rx_vxlan_csum_errors;
};
+struct xgbe_pps_config {
+ struct timespec64 start;
+ struct timespec64 period;
+};
+
struct xgbe_hw_if {
int (*tx_complete)(struct xgbe_ring_desc *);
@@ -1142,6 +1152,9 @@ struct xgbe_prv_data {
struct sk_buff *tx_tstamp_skb;
u64 tx_tstamp;
+ /* Pulse Per Second output */
+ struct xgbe_pps_config pps[XGBE_MAX_PPS_OUT];
+
/* DCB support */
struct ieee_ets *ets;
struct ieee_pfc *pfc;
@@ -1304,6 +1317,10 @@ void xgbe_prep_tx_tstamp(struct xgbe_prv_data *pdata,
int xgbe_init_ptp(struct xgbe_prv_data *pdata);
void xgbe_update_tstamp_time(struct xgbe_prv_data *pdata, unsigned int sec,
unsigned int nsec);
+
+int xgbe_pps_config(struct xgbe_prv_data *pdata, struct xgbe_pps_config *cfg,
+ int index, bool on);
+
#ifdef CONFIG_DEBUG_FS
void xgbe_debugfs_init(struct xgbe_prv_data *);
void xgbe_debugfs_exit(struct xgbe_prv_data *);
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 0fc10e6c6902..9fdef874f5ca 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -257,6 +257,7 @@ config BNGE
tristate "Broadcom Ethernet device support"
depends on PCI
select NET_DEVLINK
+ select PAGE_POOL
help
This driver supports Broadcom 50/100/200/400/800 gigabit Ethernet cards.
The module will be called bng_en. To compile this driver as a module,
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge.h b/drivers/net/ethernet/broadcom/bnge/bnge.h
index 6fb3683b6b04..7aed5f81cd51 100644
--- a/drivers/net/ethernet/broadcom/bnge/bnge.h
+++ b/drivers/net/ethernet/broadcom/bnge/bnge.h
@@ -102,6 +102,10 @@ struct bnge_dev {
u16 chip_num;
u8 chip_rev;
+#if BITS_PER_LONG == 32
+ /* ensure atomic 64-bit doorbell writes on 32-bit systems. */
+ spinlock_t db_lock;
+#endif
int db_offset; /* db_offset within db_size */
int db_size;
@@ -129,6 +133,7 @@ struct bnge_dev {
unsigned long state;
#define BNGE_STATE_DRV_REGISTERED 0
+#define BNGE_STATE_OPEN 1
u64 fw_cap;
@@ -155,6 +160,7 @@ struct bnge_dev {
u16 rss_indir_tbl_entries;
u32 rss_cap;
+ u32 rss_hash_cfg;
u16 rx_nr_rings;
u16 tx_nr_rings;
@@ -213,6 +219,27 @@ static inline bool bnge_is_agg_reqd(struct bnge_dev *bd)
return true;
}
+static inline void bnge_writeq(struct bnge_dev *bd, u64 val,
+ void __iomem *addr)
+{
+#if BITS_PER_LONG == 32
+ spin_lock(&bd->db_lock);
+ lo_hi_writeq(val, addr);
+ spin_unlock(&bd->db_lock);
+#else
+ writeq(val, addr);
+#endif
+}
+
+/* For TX and RX ring doorbells */
+static inline void bnge_db_write(struct bnge_dev *bd, struct bnge_db_info *db,
+ u32 idx)
+{
+ bnge_writeq(bd, db->db_key64 | DB_RING_IDX(db, idx),
+ db->doorbell);
+}
+
bool bnge_aux_registered(struct bnge_dev *bd);
+u16 bnge_aux_get_msix(struct bnge_dev *bd);
#endif /* _BNGE_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_core.c b/drivers/net/ethernet/broadcom/bnge/bnge_core.c
index 68da656f2894..2c72dd34d50d 100644
--- a/drivers/net/ethernet/broadcom/bnge/bnge_core.c
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_core.c
@@ -96,6 +96,16 @@ static void bnge_fw_unregister_dev(struct bnge_dev *bd)
bnge_free_ctx_mem(bd);
}
+static void bnge_set_dflt_rss_hash_type(struct bnge_dev *bd)
+{
+ bd->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
+}
+
static int bnge_fw_register_dev(struct bnge_dev *bd)
{
int rc;
@@ -137,6 +147,8 @@ static int bnge_fw_register_dev(struct bnge_dev *bd)
goto err_func_unrgtr;
}
+ bnge_set_dflt_rss_hash_type(bd);
+
return 0;
err_func_unrgtr:
@@ -296,6 +308,10 @@ static int bnge_probe_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_config_uninit;
}
+#if BITS_PER_LONG == 32
+ spin_lock_init(&bd->db_lock);
+#endif
+
rc = bnge_alloc_irqs(bd);
if (rc) {
dev_err(&pdev->dev, "Error IRQ allocation rc = %d\n", rc);
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_db.h b/drivers/net/ethernet/broadcom/bnge/bnge_db.h
new file mode 100644
index 000000000000..950ed582f1d8
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_db.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Broadcom */
+
+#ifndef _BNGE_DB_H_
+#define _BNGE_DB_H_
+
+/* 64-bit doorbell */
+#define DBR_EPOCH_SFT 24
+#define DBR_TOGGLE_SFT 25
+#define DBR_XID_SFT 32
+#define DBR_PATH_L2 (0x1ULL << 56)
+#define DBR_VALID (0x1ULL << 58)
+#define DBR_TYPE_SQ (0x0ULL << 60)
+#define DBR_TYPE_SRQ (0x2ULL << 60)
+#define DBR_TYPE_CQ (0x4ULL << 60)
+#define DBR_TYPE_CQ_ARMALL (0x6ULL << 60)
+#define DBR_TYPE_NQ (0xaULL << 60)
+#define DBR_TYPE_NQ_ARM (0xbULL << 60)
+#define DBR_TYPE_NQ_MASK (0xeULL << 60)
+
+struct bnge_db_info {
+ void __iomem *doorbell;
+ u64 db_key64;
+ u32 db_ring_mask;
+ u32 db_epoch_mask;
+ u8 db_epoch_shift;
+};
+
+#define DB_EPOCH(db, idx) (((idx) & (db)->db_epoch_mask) << \
+ ((db)->db_epoch_shift))
+#define DB_RING_IDX(db, idx) (((idx) & (db)->db_ring_mask) | \
+ DB_EPOCH(db, idx))
+
+#endif /* _BNGE_DB_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c
index 5c178fade065..198f49b40dbf 100644
--- a/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c
@@ -6,6 +6,8 @@
#include <linux/mm.h>
#include <linux/pci.h>
#include <linux/bnxt/hsi.h>
+#include <linux/if_vlan.h>
+#include <net/netdev_queues.h>
#include "bnge.h"
#include "bnge_hwrm.h"
@@ -701,3 +703,483 @@ qportcfg_exit:
bnge_hwrm_req_drop(bd, req);
return rc;
}
+
+int bnge_hwrm_vnic_set_hds(struct bnge_net *bn, struct bnge_vnic_info *vnic)
+{
+ u16 hds_thresh = (u16)bn->netdev->cfg_pending->hds_thresh;
+ struct hwrm_vnic_plcmodes_cfg_input *req;
+ struct bnge_dev *bd = bn->bd;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_VNIC_PLCMODES_CFG);
+ if (rc)
+ return rc;
+
+ req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT);
+ req->enables = cpu_to_le32(BNGE_PLC_EN_JUMBO_THRES_VALID);
+ req->jumbo_thresh = cpu_to_le16(bn->rx_buf_use_size);
+
+ if (bnge_is_agg_reqd(bd)) {
+ req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
+ VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
+ req->enables |=
+ cpu_to_le32(BNGE_PLC_EN_HDS_THRES_VALID);
+ req->hds_threshold = cpu_to_le16(hds_thresh);
+ }
+ req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
+ return bnge_hwrm_req_send(bd, req);
+}
+
+int bnge_hwrm_vnic_ctx_alloc(struct bnge_dev *bd,
+ struct bnge_vnic_info *vnic, u16 ctx_idx)
+{
+ struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp;
+ struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC);
+ if (rc)
+ return rc;
+
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (!rc)
+ vnic->fw_rss_cos_lb_ctx[ctx_idx] =
+ le16_to_cpu(resp->rss_cos_lb_ctx_id);
+ bnge_hwrm_req_drop(bd, req);
+
+ return rc;
+}
+
+static void
+__bnge_hwrm_vnic_set_rss(struct bnge_net *bn,
+ struct hwrm_vnic_rss_cfg_input *req,
+ struct bnge_vnic_info *vnic)
+{
+ struct bnge_dev *bd = bn->bd;
+
+ bnge_fill_hw_rss_tbl(bn, vnic);
+ req->flags |= VNIC_RSS_CFG_REQ_FLAGS_IPSEC_HASH_TYPE_CFG_SUPPORT;
+
+ req->hash_type = cpu_to_le32(bd->rss_hash_cfg);
+ req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
+ req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
+ req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
+}
+
+int bnge_hwrm_vnic_set_rss(struct bnge_net *bn,
+ struct bnge_vnic_info *vnic, bool set_rss)
+{
+ struct hwrm_vnic_rss_cfg_input *req;
+ struct bnge_dev *bd = bn->bd;
+ dma_addr_t ring_tbl_map;
+ u32 i, nr_ctxs;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_VNIC_RSS_CFG);
+ if (rc)
+ return rc;
+
+ req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
+ if (!set_rss)
+ return bnge_hwrm_req_send(bd, req);
+
+ __bnge_hwrm_vnic_set_rss(bn, req, vnic);
+ ring_tbl_map = vnic->rss_table_dma_addr;
+ nr_ctxs = bnge_cal_nr_rss_ctxs(bd->rx_nr_rings);
+
+ bnge_hwrm_req_hold(bd, req);
+ for (i = 0; i < nr_ctxs; ring_tbl_map += BNGE_RSS_TABLE_SIZE, i++) {
+ req->ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map);
+ req->ring_table_pair_index = i;
+ req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (rc)
+ goto exit;
+ }
+
+exit:
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+
+int bnge_hwrm_vnic_cfg(struct bnge_net *bn, struct bnge_vnic_info *vnic)
+{
+ struct bnge_rx_ring_info *rxr = &bn->rx_ring[0];
+ struct hwrm_vnic_cfg_input *req;
+ struct bnge_dev *bd = bn->bd;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_VNIC_CFG);
+ if (rc)
+ return rc;
+
+ req->default_rx_ring_id =
+ cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
+ req->default_cmpl_ring_id =
+ cpu_to_le16(bnge_cp_ring_for_rx(rxr));
+ req->enables =
+ cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
+ VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
+ vnic->mru = bd->netdev->mtu + ETH_HLEN + VLAN_HLEN;
+ req->mru = cpu_to_le16(vnic->mru);
+
+ req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
+
+ if (bd->flags & BNGE_EN_STRIP_VLAN)
+ req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
+ if (vnic->vnic_id == BNGE_VNIC_DEFAULT && bnge_aux_registered(bd))
+ req->flags |= cpu_to_le32(BNGE_VNIC_CFG_ROCE_DUAL_MODE);
+
+ return bnge_hwrm_req_send(bd, req);
+}
+
+void bnge_hwrm_update_rss_hash_cfg(struct bnge_net *bn)
+{
+ struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT];
+ struct hwrm_vnic_rss_qcfg_output *resp;
+ struct hwrm_vnic_rss_qcfg_input *req;
+ struct bnge_dev *bd = bn->bd;
+
+ if (bnge_hwrm_req_init(bd, req, HWRM_VNIC_RSS_QCFG))
+ return;
+
+ req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
+ /* all contexts configured to same hash_type, zero always exists */
+ req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
+ resp = bnge_hwrm_req_hold(bd, req);
+ if (!bnge_hwrm_req_send(bd, req))
+ bd->rss_hash_cfg =
+ le32_to_cpu(resp->hash_type) ?: bd->rss_hash_cfg;
+ bnge_hwrm_req_drop(bd, req);
+}
+
+int bnge_hwrm_l2_filter_free(struct bnge_dev *bd, struct bnge_l2_filter *fltr)
+{
+ struct hwrm_cfa_l2_filter_free_input *req;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_CFA_L2_FILTER_FREE);
+ if (rc)
+ return rc;
+
+ req->l2_filter_id = fltr->base.filter_id;
+ return bnge_hwrm_req_send(bd, req);
+}
+
+int bnge_hwrm_l2_filter_alloc(struct bnge_dev *bd, struct bnge_l2_filter *fltr)
+{
+ struct hwrm_cfa_l2_filter_alloc_output *resp;
+ struct hwrm_cfa_l2_filter_alloc_input *req;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_CFA_L2_FILTER_ALLOC);
+ if (rc)
+ return rc;
+
+ req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
+
+ req->flags |= cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
+ req->dst_id = cpu_to_le16(fltr->base.fw_vnic_id);
+ req->enables =
+ cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
+ CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
+ CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
+ ether_addr_copy(req->l2_addr, fltr->l2_key.dst_mac_addr);
+ eth_broadcast_addr(req->l2_addr_mask);
+
+ if (fltr->l2_key.vlan) {
+ req->enables |=
+ cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN |
+ CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK |
+ CFA_L2_FILTER_ALLOC_REQ_ENABLES_NUM_VLANS);
+ req->num_vlans = 1;
+ req->l2_ivlan = cpu_to_le16(fltr->l2_key.vlan);
+ req->l2_ivlan_mask = cpu_to_le16(0xfff);
+ }
+
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (!rc)
+ fltr->base.filter_id = resp->l2_filter_id;
+
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+
+int bnge_hwrm_cfa_l2_set_rx_mask(struct bnge_dev *bd,
+ struct bnge_vnic_info *vnic)
+{
+ struct hwrm_cfa_l2_set_rx_mask_input *req;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_CFA_L2_SET_RX_MASK);
+ if (rc)
+ return rc;
+
+ req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
+ if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) {
+ req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
+ req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
+ }
+ req->mask = cpu_to_le32(vnic->rx_mask);
+ return bnge_hwrm_req_send_silent(bd, req);
+}
+
+int bnge_hwrm_vnic_alloc(struct bnge_dev *bd, struct bnge_vnic_info *vnic,
+ unsigned int nr_rings)
+{
+ struct hwrm_vnic_alloc_output *resp;
+ struct hwrm_vnic_alloc_input *req;
+ unsigned int i;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_VNIC_ALLOC);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < BNGE_MAX_CTX_PER_VNIC; i++)
+ vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
+ if (vnic->vnic_id == BNGE_VNIC_DEFAULT)
+ req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
+
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ if (!rc)
+ vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+
+void bnge_hwrm_vnic_free_one(struct bnge_dev *bd, struct bnge_vnic_info *vnic)
+{
+ if (vnic->fw_vnic_id != INVALID_HW_RING_ID) {
+ struct hwrm_vnic_free_input *req;
+
+ if (bnge_hwrm_req_init(bd, req, HWRM_VNIC_FREE))
+ return;
+
+ req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
+
+ bnge_hwrm_req_send(bd, req);
+ vnic->fw_vnic_id = INVALID_HW_RING_ID;
+ }
+}
+
+void bnge_hwrm_vnic_ctx_free_one(struct bnge_dev *bd,
+ struct bnge_vnic_info *vnic, u16 ctx_idx)
+{
+ struct hwrm_vnic_rss_cos_lb_ctx_free_input *req;
+
+ if (bnge_hwrm_req_init(bd, req, HWRM_VNIC_RSS_COS_LB_CTX_FREE))
+ return;
+
+ req->rss_cos_lb_ctx_id =
+ cpu_to_le16(vnic->fw_rss_cos_lb_ctx[ctx_idx]);
+
+ bnge_hwrm_req_send(bd, req);
+ vnic->fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
+}
+
+void bnge_hwrm_stat_ctx_free(struct bnge_net *bn)
+{
+ struct hwrm_stat_ctx_free_input *req;
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ if (bnge_hwrm_req_init(bd, req, HWRM_STAT_CTX_FREE))
+ return;
+
+ bnge_hwrm_req_hold(bd, req);
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+ struct bnge_nq_ring_info *nqr = &bnapi->nq_ring;
+
+ if (nqr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
+ req->stat_ctx_id = cpu_to_le32(nqr->hw_stats_ctx_id);
+ bnge_hwrm_req_send(bd, req);
+
+ nqr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
+ }
+ }
+ bnge_hwrm_req_drop(bd, req);
+}
+
+int bnge_hwrm_stat_ctx_alloc(struct bnge_net *bn)
+{
+ struct hwrm_stat_ctx_alloc_output *resp;
+ struct hwrm_stat_ctx_alloc_input *req;
+ struct bnge_dev *bd = bn->bd;
+ int rc, i;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_STAT_CTX_ALLOC);
+ if (rc)
+ return rc;
+
+ req->stats_dma_length = cpu_to_le16(bd->hw_ring_stats_size);
+ req->update_period_ms = cpu_to_le32(bn->stats_coal_ticks / 1000);
+
+ resp = bnge_hwrm_req_hold(bd, req);
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+ struct bnge_nq_ring_info *nqr = &bnapi->nq_ring;
+
+ req->stats_dma_addr = cpu_to_le64(nqr->stats.hw_stats_map);
+
+ rc = bnge_hwrm_req_send(bd, req);
+ if (rc)
+ break;
+
+ nqr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
+ bn->grp_info[i].fw_stats_ctx = nqr->hw_stats_ctx_id;
+ }
+ bnge_hwrm_req_drop(bd, req);
+ return rc;
+}
+
+int hwrm_ring_free_send_msg(struct bnge_net *bn,
+ struct bnge_ring_struct *ring,
+ u32 ring_type, int cmpl_ring_id)
+{
+ struct hwrm_ring_free_input *req;
+ struct bnge_dev *bd = bn->bd;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_RING_FREE);
+ if (rc)
+ goto exit;
+
+ req->cmpl_ring = cpu_to_le16(cmpl_ring_id);
+ req->ring_type = ring_type;
+ req->ring_id = cpu_to_le16(ring->fw_ring_id);
+
+ bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ bnge_hwrm_req_drop(bd, req);
+exit:
+ if (rc) {
+ netdev_err(bd->netdev, "hwrm_ring_free type %d failed. rc:%d\n", ring_type, rc);
+ return -EIO;
+ }
+ return 0;
+}
+
+int hwrm_ring_alloc_send_msg(struct bnge_net *bn,
+ struct bnge_ring_struct *ring,
+ u32 ring_type, u32 map_index)
+{
+ struct bnge_ring_mem_info *rmem = &ring->ring_mem;
+ struct bnge_ring_grp_info *grp_info;
+ struct hwrm_ring_alloc_output *resp;
+ struct hwrm_ring_alloc_input *req;
+ struct bnge_dev *bd = bn->bd;
+ u16 ring_id, flags = 0;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_RING_ALLOC);
+ if (rc)
+ goto exit;
+
+ req->enables = 0;
+ if (rmem->nr_pages > 1) {
+ req->page_tbl_addr = cpu_to_le64(rmem->dma_pg_tbl);
+ /* Page size is in log2 units */
+ req->page_size = BNGE_PAGE_SHIFT;
+ req->page_tbl_depth = 1;
+ } else {
+ req->page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]);
+ }
+ req->fbo = 0;
+ /* Association of ring index with doorbell index and MSIX number */
+ req->logical_id = cpu_to_le16(map_index);
+
+ switch (ring_type) {
+ case HWRM_RING_ALLOC_TX: {
+ struct bnge_tx_ring_info *txr;
+
+ txr = container_of(ring, struct bnge_tx_ring_info,
+ tx_ring_struct);
+ req->ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
+ /* Association of transmit ring with completion ring */
+ grp_info = &bn->grp_info[ring->grp_idx];
+ req->cmpl_ring_id = cpu_to_le16(bnge_cp_ring_for_tx(txr));
+ req->length = cpu_to_le32(bn->tx_ring_mask + 1);
+ req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
+ req->queue_id = cpu_to_le16(ring->queue_id);
+ req->flags = cpu_to_le16(flags);
+ break;
+ }
+ case HWRM_RING_ALLOC_RX:
+ req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
+ req->length = cpu_to_le32(bn->rx_ring_mask + 1);
+
+ /* Association of rx ring with stats context */
+ grp_info = &bn->grp_info[ring->grp_idx];
+ req->rx_buf_size = cpu_to_le16(bn->rx_buf_use_size);
+ req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
+ req->enables |=
+ cpu_to_le32(RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
+ if (NET_IP_ALIGN == 2)
+ flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
+ req->flags = cpu_to_le16(flags);
+ break;
+ case HWRM_RING_ALLOC_AGG:
+ req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
+ /* Association of agg ring with rx ring */
+ grp_info = &bn->grp_info[ring->grp_idx];
+ req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
+ req->rx_buf_size = cpu_to_le16(BNGE_RX_PAGE_SIZE);
+ req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
+ req->enables |=
+ cpu_to_le32(RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
+ RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
+ req->length = cpu_to_le32(bn->rx_agg_ring_mask + 1);
+ break;
+ case HWRM_RING_ALLOC_CMPL:
+ req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
+ req->length = cpu_to_le32(bn->cp_ring_mask + 1);
+ /* Association of cp ring with nq */
+ grp_info = &bn->grp_info[map_index];
+ req->nq_ring_id = cpu_to_le16(grp_info->nq_fw_ring_id);
+ req->cq_handle = cpu_to_le64(ring->handle);
+ req->enables |=
+ cpu_to_le32(RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
+ break;
+ case HWRM_RING_ALLOC_NQ:
+ req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
+ req->length = cpu_to_le32(bn->cp_ring_mask + 1);
+ req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
+ break;
+ default:
+ netdev_err(bn->netdev, "hwrm alloc invalid ring type %d\n", ring_type);
+ return -EINVAL;
+ }
+
+ resp = bnge_hwrm_req_hold(bd, req);
+ rc = bnge_hwrm_req_send(bd, req);
+ ring_id = le16_to_cpu(resp->ring_id);
+ bnge_hwrm_req_drop(bd, req);
+
+exit:
+ if (rc) {
+ netdev_err(bd->netdev, "hwrm_ring_alloc type %d failed. rc:%d\n", ring_type, rc);
+ return -EIO;
+ }
+ ring->fw_ring_id = ring_id;
+ return rc;
+}
+
+int bnge_hwrm_set_async_event_cr(struct bnge_dev *bd, int idx)
+{
+ struct hwrm_func_cfg_input *req;
+ int rc;
+
+ rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_CFG);
+ if (rc)
+ return rc;
+
+ req->fid = cpu_to_le16(0xffff);
+ req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
+ req->async_event_cr = cpu_to_le16(idx);
+ return bnge_hwrm_req_send(bd, req);
+}
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.h b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.h
index 6c03923eb559..042f28e84a05 100644
--- a/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.h
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.h
@@ -4,6 +4,13 @@
#ifndef _BNGE_HWRM_LIB_H_
#define _BNGE_HWRM_LIB_H_
+#define BNGE_PLC_EN_JUMBO_THRES_VALID \
+ VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID
+#define BNGE_PLC_EN_HDS_THRES_VALID \
+ VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID
+#define BNGE_VNIC_CFG_ROCE_DUAL_MODE \
+ VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE
+
int bnge_hwrm_ver_get(struct bnge_dev *bd);
int bnge_hwrm_func_reset(struct bnge_dev *bd);
int bnge_hwrm_fw_set_time(struct bnge_dev *bd);
@@ -24,4 +31,28 @@ int bnge_hwrm_func_qcfg(struct bnge_dev *bd);
int bnge_hwrm_func_resc_qcaps(struct bnge_dev *bd);
int bnge_hwrm_queue_qportcfg(struct bnge_dev *bd);
+int bnge_hwrm_vnic_set_hds(struct bnge_net *bn, struct bnge_vnic_info *vnic);
+int bnge_hwrm_vnic_ctx_alloc(struct bnge_dev *bd,
+ struct bnge_vnic_info *vnic, u16 ctx_idx);
+int bnge_hwrm_vnic_set_rss(struct bnge_net *bn,
+ struct bnge_vnic_info *vnic, bool set_rss);
+int bnge_hwrm_vnic_cfg(struct bnge_net *bn, struct bnge_vnic_info *vnic);
+void bnge_hwrm_update_rss_hash_cfg(struct bnge_net *bn);
+int bnge_hwrm_vnic_alloc(struct bnge_dev *bd, struct bnge_vnic_info *vnic,
+ unsigned int nr_rings);
+void bnge_hwrm_vnic_free_one(struct bnge_dev *bd, struct bnge_vnic_info *vnic);
+void bnge_hwrm_vnic_ctx_free_one(struct bnge_dev *bd,
+ struct bnge_vnic_info *vnic, u16 ctx_idx);
+int bnge_hwrm_l2_filter_free(struct bnge_dev *bd, struct bnge_l2_filter *fltr);
+int bnge_hwrm_l2_filter_alloc(struct bnge_dev *bd, struct bnge_l2_filter *fltr);
+int bnge_hwrm_cfa_l2_set_rx_mask(struct bnge_dev *bd,
+ struct bnge_vnic_info *vnic);
+void bnge_hwrm_stat_ctx_free(struct bnge_net *bn);
+int bnge_hwrm_stat_ctx_alloc(struct bnge_net *bn);
+int hwrm_ring_free_send_msg(struct bnge_net *bn, struct bnge_ring_struct *ring,
+ u32 ring_type, int cmpl_ring_id);
+int hwrm_ring_alloc_send_msg(struct bnge_net *bn,
+ struct bnge_ring_struct *ring,
+ u32 ring_type, u32 map_index);
+int bnge_hwrm_set_async_event_cr(struct bnge_dev *bd, int idx);
#endif /* _BNGE_HWRM_LIB_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_netdev.c b/drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
index 02254934f3d0..832eeb960bd2 100644
--- a/drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
@@ -14,10 +14,2194 @@
#include <linux/if.h>
#include <net/ip.h>
#include <linux/skbuff.h>
+#include <net/page_pool/helpers.h>
#include "bnge.h"
#include "bnge_hwrm_lib.h"
#include "bnge_ethtool.h"
+#include "bnge_rmem.h"
+
+#define BNGE_RING_TO_TC_OFF(bd, tx) \
+ ((tx) % (bd)->tx_nr_rings_per_tc)
+
+#define BNGE_RING_TO_TC(bd, tx) \
+ ((tx) / (bd)->tx_nr_rings_per_tc)
+
+#define BNGE_TC_TO_RING_BASE(bd, tc) \
+ ((tc) * (bd)->tx_nr_rings_per_tc)
+
+static void bnge_free_stats_mem(struct bnge_net *bn,
+ struct bnge_stats_mem *stats)
+{
+ struct bnge_dev *bd = bn->bd;
+
+ if (stats->hw_stats) {
+ dma_free_coherent(bd->dev, stats->len, stats->hw_stats,
+ stats->hw_stats_map);
+ stats->hw_stats = NULL;
+ }
+}
+
+static int bnge_alloc_stats_mem(struct bnge_net *bn,
+ struct bnge_stats_mem *stats)
+{
+ struct bnge_dev *bd = bn->bd;
+
+ stats->hw_stats = dma_alloc_coherent(bd->dev, stats->len,
+ &stats->hw_stats_map, GFP_KERNEL);
+ if (!stats->hw_stats)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void bnge_free_ring_stats(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ if (!bn->bnapi)
+ return;
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+ struct bnge_nq_ring_info *nqr = &bnapi->nq_ring;
+
+ bnge_free_stats_mem(bn, &nqr->stats);
+ }
+}
+
+static int bnge_alloc_ring_stats(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ u32 size, i;
+ int rc;
+
+ size = bd->hw_ring_stats_size;
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+ struct bnge_nq_ring_info *nqr = &bnapi->nq_ring;
+
+ nqr->stats.len = size;
+ rc = bnge_alloc_stats_mem(bn, &nqr->stats);
+ if (rc)
+ goto err_free_ring_stats;
+
+ nqr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
+ }
+ return 0;
+
+err_free_ring_stats:
+ bnge_free_ring_stats(bn);
+ return rc;
+}
+
+static void bnge_free_nq_desc_arr(struct bnge_nq_ring_info *nqr)
+{
+ struct bnge_ring_struct *ring = &nqr->ring_struct;
+
+ kfree(nqr->desc_ring);
+ nqr->desc_ring = NULL;
+ ring->ring_mem.pg_arr = NULL;
+ kfree(nqr->desc_mapping);
+ nqr->desc_mapping = NULL;
+ ring->ring_mem.dma_arr = NULL;
+}
+
+static void bnge_free_cp_desc_arr(struct bnge_cp_ring_info *cpr)
+{
+ struct bnge_ring_struct *ring = &cpr->ring_struct;
+
+ kfree(cpr->desc_ring);
+ cpr->desc_ring = NULL;
+ ring->ring_mem.pg_arr = NULL;
+ kfree(cpr->desc_mapping);
+ cpr->desc_mapping = NULL;
+ ring->ring_mem.dma_arr = NULL;
+}
+
+static int bnge_alloc_nq_desc_arr(struct bnge_nq_ring_info *nqr, int n)
+{
+ nqr->desc_ring = kcalloc(n, sizeof(*nqr->desc_ring), GFP_KERNEL);
+ if (!nqr->desc_ring)
+ return -ENOMEM;
+
+ nqr->desc_mapping = kcalloc(n, sizeof(*nqr->desc_mapping), GFP_KERNEL);
+ if (!nqr->desc_mapping)
+ goto err_free_desc_ring;
+ return 0;
+
+err_free_desc_ring:
+ kfree(nqr->desc_ring);
+ nqr->desc_ring = NULL;
+ return -ENOMEM;
+}
+
+static int bnge_alloc_cp_desc_arr(struct bnge_cp_ring_info *cpr, int n)
+{
+ cpr->desc_ring = kcalloc(n, sizeof(*cpr->desc_ring), GFP_KERNEL);
+ if (!cpr->desc_ring)
+ return -ENOMEM;
+
+ cpr->desc_mapping = kcalloc(n, sizeof(*cpr->desc_mapping), GFP_KERNEL);
+ if (!cpr->desc_mapping)
+ goto err_free_desc_ring;
+ return 0;
+
+err_free_desc_ring:
+ kfree(cpr->desc_ring);
+ cpr->desc_ring = NULL;
+ return -ENOMEM;
+}
+
+static void bnge_free_nq_arrays(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+
+ bnge_free_nq_desc_arr(&bnapi->nq_ring);
+ }
+}
+
+static int bnge_alloc_nq_arrays(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i, rc;
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+
+ rc = bnge_alloc_nq_desc_arr(&bnapi->nq_ring, bn->cp_nr_pages);
+ if (rc)
+ goto err_free_nq_arrays;
+ }
+ return 0;
+
+err_free_nq_arrays:
+ bnge_free_nq_arrays(bn);
+ return rc;
+}
+
+static void bnge_free_nq_tree(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+ struct bnge_nq_ring_info *nqr;
+ struct bnge_ring_struct *ring;
+ int j;
+
+ nqr = &bnapi->nq_ring;
+ ring = &nqr->ring_struct;
+
+ bnge_free_ring(bd, &ring->ring_mem);
+
+ if (!nqr->cp_ring_arr)
+ continue;
+
+ for (j = 0; j < nqr->cp_ring_count; j++) {
+ struct bnge_cp_ring_info *cpr = &nqr->cp_ring_arr[j];
+
+ ring = &cpr->ring_struct;
+ bnge_free_ring(bd, &ring->ring_mem);
+ bnge_free_cp_desc_arr(cpr);
+ }
+ kfree(nqr->cp_ring_arr);
+ nqr->cp_ring_arr = NULL;
+ nqr->cp_ring_count = 0;
+ }
+}
+
+static int alloc_one_cp_ring(struct bnge_net *bn,
+ struct bnge_cp_ring_info *cpr)
+{
+ struct bnge_ring_mem_info *rmem;
+ struct bnge_ring_struct *ring;
+ struct bnge_dev *bd = bn->bd;
+ int rc;
+
+ rc = bnge_alloc_cp_desc_arr(cpr, bn->cp_nr_pages);
+ if (rc)
+ return -ENOMEM;
+ ring = &cpr->ring_struct;
+ rmem = &ring->ring_mem;
+ rmem->nr_pages = bn->cp_nr_pages;
+ rmem->page_size = HW_CMPD_RING_SIZE;
+ rmem->pg_arr = (void **)cpr->desc_ring;
+ rmem->dma_arr = cpr->desc_mapping;
+ rmem->flags = BNGE_RMEM_RING_PTE_FLAG;
+ rc = bnge_alloc_ring(bd, rmem);
+ if (rc)
+ goto err_free_cp_desc_arr;
+ return rc;
+
+err_free_cp_desc_arr:
+ bnge_free_cp_desc_arr(cpr);
+ return rc;
+}
+
+static int bnge_alloc_nq_tree(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i, j, ulp_msix, rc;
+ int tcs = 1;
+
+ ulp_msix = bnge_aux_get_msix(bd);
+ for (i = 0, j = 0; i < bd->nq_nr_rings; i++) {
+ bool sh = !!(bd->flags & BNGE_EN_SHARED_CHNL);
+ struct bnge_napi *bnapi = bn->bnapi[i];
+ struct bnge_nq_ring_info *nqr;
+ struct bnge_cp_ring_info *cpr;
+ struct bnge_ring_struct *ring;
+ int cp_count = 0, k;
+ int rx = 0, tx = 0;
+
+ nqr = &bnapi->nq_ring;
+ nqr->bnapi = bnapi;
+ ring = &nqr->ring_struct;
+
+ rc = bnge_alloc_ring(bd, &ring->ring_mem);
+ if (rc)
+ goto err_free_nq_tree;
+
+ ring->map_idx = ulp_msix + i;
+
+ if (i < bd->rx_nr_rings) {
+ cp_count++;
+ rx = 1;
+ }
+
+ if ((sh && i < bd->tx_nr_rings) ||
+ (!sh && i >= bd->rx_nr_rings)) {
+ cp_count += tcs;
+ tx = 1;
+ }
+
+ nqr->cp_ring_arr = kcalloc(cp_count, sizeof(*cpr),
+ GFP_KERNEL);
+ if (!nqr->cp_ring_arr) {
+ rc = -ENOMEM;
+ goto err_free_nq_tree;
+ }
+
+ nqr->cp_ring_count = cp_count;
+
+ for (k = 0; k < cp_count; k++) {
+ cpr = &nqr->cp_ring_arr[k];
+ rc = alloc_one_cp_ring(bn, cpr);
+ if (rc)
+ goto err_free_nq_tree;
+
+ cpr->bnapi = bnapi;
+ cpr->cp_idx = k;
+ if (!k && rx) {
+ bn->rx_ring[i].rx_cpr = cpr;
+ cpr->cp_ring_type = BNGE_NQ_HDL_TYPE_RX;
+ } else {
+ int n, tc = k - rx;
+
+ n = BNGE_TC_TO_RING_BASE(bd, tc) + j;
+ bn->tx_ring[n].tx_cpr = cpr;
+ cpr->cp_ring_type = BNGE_NQ_HDL_TYPE_TX;
+ }
+ }
+ if (tx)
+ j++;
+ }
+ return 0;
+
+err_free_nq_tree:
+ bnge_free_nq_tree(bn);
+ return rc;
+}
+
+static bool bnge_separate_head_pool(struct bnge_rx_ring_info *rxr)
+{
+ return rxr->need_head_pool || PAGE_SIZE > BNGE_RX_PAGE_SIZE;
+}
+
+static void bnge_free_one_rx_ring_bufs(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr)
+{
+ int i, max_idx;
+
+ if (!rxr->rx_buf_ring)
+ return;
+
+ max_idx = bn->rx_nr_pages * RX_DESC_CNT;
+
+ for (i = 0; i < max_idx; i++) {
+ struct bnge_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
+ void *data = rx_buf->data;
+
+ if (!data)
+ continue;
+
+ rx_buf->data = NULL;
+ page_pool_free_va(rxr->head_pool, data, true);
+ }
+}
+
+static void bnge_free_one_agg_ring_bufs(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr)
+{
+ int i, max_idx;
+
+ if (!rxr->rx_agg_buf_ring)
+ return;
+
+ max_idx = bn->rx_agg_nr_pages * RX_DESC_CNT;
+
+ for (i = 0; i < max_idx; i++) {
+ struct bnge_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_buf_ring[i];
+ netmem_ref netmem = rx_agg_buf->netmem;
+
+ if (!netmem)
+ continue;
+
+ rx_agg_buf->netmem = 0;
+ __clear_bit(i, rxr->rx_agg_bmap);
+
+ page_pool_recycle_direct_netmem(rxr->page_pool, netmem);
+ }
+}
+
+static void bnge_free_one_rx_ring_pair_bufs(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr)
+{
+ bnge_free_one_rx_ring_bufs(bn, rxr);
+ bnge_free_one_agg_ring_bufs(bn, rxr);
+}
+
+static void bnge_free_rx_ring_pair_bufs(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ if (!bn->rx_ring)
+ return;
+
+ for (i = 0; i < bd->rx_nr_rings; i++)
+ bnge_free_one_rx_ring_pair_bufs(bn, &bn->rx_ring[i]);
+}
+
+static void bnge_free_all_rings_bufs(struct bnge_net *bn)
+{
+ bnge_free_rx_ring_pair_bufs(bn);
+}
+
+static void bnge_free_rx_rings(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ for (i = 0; i < bd->rx_nr_rings; i++) {
+ struct bnge_rx_ring_info *rxr = &bn->rx_ring[i];
+ struct bnge_ring_struct *ring;
+
+ page_pool_destroy(rxr->page_pool);
+ page_pool_destroy(rxr->head_pool);
+ rxr->page_pool = rxr->head_pool = NULL;
+
+ kfree(rxr->rx_agg_bmap);
+ rxr->rx_agg_bmap = NULL;
+
+ ring = &rxr->rx_ring_struct;
+ bnge_free_ring(bd, &ring->ring_mem);
+
+ ring = &rxr->rx_agg_ring_struct;
+ bnge_free_ring(bd, &ring->ring_mem);
+ }
+}
+
+static int bnge_alloc_rx_page_pool(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr,
+ int numa_node)
+{
+ const unsigned int agg_size_fac = PAGE_SIZE / BNGE_RX_PAGE_SIZE;
+ const unsigned int rx_size_fac = PAGE_SIZE / SZ_4K;
+ struct page_pool_params pp = { 0 };
+ struct bnge_dev *bd = bn->bd;
+ struct page_pool *pool;
+
+ pp.pool_size = bn->rx_agg_ring_size / agg_size_fac;
+ pp.nid = numa_node;
+ pp.netdev = bn->netdev;
+ pp.dev = bd->dev;
+ pp.dma_dir = bn->rx_dir;
+ pp.max_len = PAGE_SIZE;
+ pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV |
+ PP_FLAG_ALLOW_UNREADABLE_NETMEM;
+ pp.queue_idx = rxr->bnapi->index;
+
+ pool = page_pool_create(&pp);
+ if (IS_ERR(pool))
+ return PTR_ERR(pool);
+ rxr->page_pool = pool;
+
+ rxr->need_head_pool = page_pool_is_unreadable(pool);
+ if (bnge_separate_head_pool(rxr)) {
+ pp.pool_size = min(bn->rx_ring_size / rx_size_fac, 1024);
+ pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
+ pool = page_pool_create(&pp);
+ if (IS_ERR(pool))
+ goto err_destroy_pp;
+ } else {
+ page_pool_get(pool);
+ }
+ rxr->head_pool = pool;
+ return 0;
+
+err_destroy_pp:
+ page_pool_destroy(rxr->page_pool);
+ rxr->page_pool = NULL;
+ return PTR_ERR(pool);
+}
+
+static void bnge_enable_rx_page_pool(struct bnge_rx_ring_info *rxr)
+{
+ page_pool_enable_direct_recycling(rxr->head_pool, &rxr->bnapi->napi);
+ page_pool_enable_direct_recycling(rxr->page_pool, &rxr->bnapi->napi);
+}
+
+static int bnge_alloc_rx_agg_bmap(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr)
+{
+ u16 mem_size;
+
+ rxr->rx_agg_bmap_size = bn->rx_agg_ring_mask + 1;
+ mem_size = rxr->rx_agg_bmap_size / 8;
+ rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
+ if (!rxr->rx_agg_bmap)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int bnge_alloc_rx_rings(struct bnge_net *bn)
+{
+ int i, rc = 0, agg_rings = 0, cpu;
+ struct bnge_dev *bd = bn->bd;
+
+ if (bnge_is_agg_reqd(bd))
+ agg_rings = 1;
+
+ for (i = 0; i < bd->rx_nr_rings; i++) {
+ struct bnge_rx_ring_info *rxr = &bn->rx_ring[i];
+ struct bnge_ring_struct *ring;
+ int cpu_node;
+
+ ring = &rxr->rx_ring_struct;
+
+ cpu = cpumask_local_spread(i, dev_to_node(bd->dev));
+ cpu_node = cpu_to_node(cpu);
+ netdev_dbg(bn->netdev, "Allocating page pool for rx_ring[%d] on numa_node: %d\n",
+ i, cpu_node);
+ rc = bnge_alloc_rx_page_pool(bn, rxr, cpu_node);
+ if (rc)
+ goto err_free_rx_rings;
+ bnge_enable_rx_page_pool(rxr);
+
+ rc = bnge_alloc_ring(bd, &ring->ring_mem);
+ if (rc)
+ goto err_free_rx_rings;
+
+ ring->grp_idx = i;
+ if (agg_rings) {
+ ring = &rxr->rx_agg_ring_struct;
+ rc = bnge_alloc_ring(bd, &ring->ring_mem);
+ if (rc)
+ goto err_free_rx_rings;
+
+ ring->grp_idx = i;
+ rc = bnge_alloc_rx_agg_bmap(bn, rxr);
+ if (rc)
+ goto err_free_rx_rings;
+ }
+ }
+ return rc;
+
+err_free_rx_rings:
+ bnge_free_rx_rings(bn);
+ return rc;
+}
+
+static void bnge_free_tx_rings(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ for (i = 0; i < bd->tx_nr_rings; i++) {
+ struct bnge_tx_ring_info *txr = &bn->tx_ring[i];
+ struct bnge_ring_struct *ring;
+
+ ring = &txr->tx_ring_struct;
+
+ bnge_free_ring(bd, &ring->ring_mem);
+ }
+}
+
+static int bnge_alloc_tx_rings(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i, j, rc;
+
+ for (i = 0, j = 0; i < bd->tx_nr_rings; i++) {
+ struct bnge_tx_ring_info *txr = &bn->tx_ring[i];
+ struct bnge_ring_struct *ring;
+ u8 qidx;
+
+ ring = &txr->tx_ring_struct;
+
+ rc = bnge_alloc_ring(bd, &ring->ring_mem);
+ if (rc)
+ goto err_free_tx_rings;
+
+ ring->grp_idx = txr->bnapi->index;
+ qidx = bd->tc_to_qidx[j];
+ ring->queue_id = bd->q_info[qidx].queue_id;
+ if (BNGE_RING_TO_TC_OFF(bd, i) == (bd->tx_nr_rings_per_tc - 1))
+ j++;
+ }
+ return 0;
+
+err_free_tx_rings:
+ bnge_free_tx_rings(bn);
+ return rc;
+}
+
+static void bnge_free_vnic_attributes(struct bnge_net *bn)
+{
+ struct pci_dev *pdev = bn->bd->pdev;
+ struct bnge_vnic_info *vnic;
+ int i;
+
+ if (!bn->vnic_info)
+ return;
+
+ for (i = 0; i < bn->nr_vnics; i++) {
+ vnic = &bn->vnic_info[i];
+
+ kfree(vnic->uc_list);
+ vnic->uc_list = NULL;
+
+ if (vnic->mc_list) {
+ dma_free_coherent(&pdev->dev, vnic->mc_list_size,
+ vnic->mc_list, vnic->mc_list_mapping);
+ vnic->mc_list = NULL;
+ }
+
+ if (vnic->rss_table) {
+ dma_free_coherent(&pdev->dev, vnic->rss_table_size,
+ vnic->rss_table,
+ vnic->rss_table_dma_addr);
+ vnic->rss_table = NULL;
+ }
+
+ vnic->rss_hash_key = NULL;
+ vnic->flags = 0;
+ }
+}
+
+static int bnge_alloc_vnic_attributes(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ struct bnge_vnic_info *vnic;
+ int i, size;
+
+ for (i = 0; i < bn->nr_vnics; i++) {
+ vnic = &bn->vnic_info[i];
+
+ if (vnic->flags & BNGE_VNIC_UCAST_FLAG) {
+ int mem_size = (BNGE_MAX_UC_ADDRS - 1) * ETH_ALEN;
+
+ vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
+ if (!vnic->uc_list)
+ goto err_free_vnic_attributes;
+ }
+
+ if (vnic->flags & BNGE_VNIC_MCAST_FLAG) {
+ vnic->mc_list_size = BNGE_MAX_MC_ADDRS * ETH_ALEN;
+ vnic->mc_list =
+ dma_alloc_coherent(bd->dev,
+ vnic->mc_list_size,
+ &vnic->mc_list_mapping,
+ GFP_KERNEL);
+ if (!vnic->mc_list)
+ goto err_free_vnic_attributes;
+ }
+
+ /* Allocate rss table and hash key */
+ size = L1_CACHE_ALIGN(BNGE_MAX_RSS_TABLE_SIZE);
+
+ vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
+ vnic->rss_table = dma_alloc_coherent(bd->dev,
+ vnic->rss_table_size,
+ &vnic->rss_table_dma_addr,
+ GFP_KERNEL);
+ if (!vnic->rss_table)
+ goto err_free_vnic_attributes;
+
+ vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
+ vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
+ }
+ return 0;
+
+err_free_vnic_attributes:
+ bnge_free_vnic_attributes(bn);
+ return -ENOMEM;
+}
+
+static int bnge_alloc_vnics(struct bnge_net *bn)
+{
+ int num_vnics;
+
+ /* Allocate only 1 VNIC for now
+ * Additional VNICs will be added based on RFS/NTUPLE in future patches
+ */
+ num_vnics = 1;
+
+ bn->vnic_info = kcalloc(num_vnics, sizeof(struct bnge_vnic_info),
+ GFP_KERNEL);
+ if (!bn->vnic_info)
+ return -ENOMEM;
+
+ bn->nr_vnics = num_vnics;
+
+ return 0;
+}
+
+static void bnge_free_vnics(struct bnge_net *bn)
+{
+ kfree(bn->vnic_info);
+ bn->vnic_info = NULL;
+ bn->nr_vnics = 0;
+}
+
+static void bnge_free_ring_grps(struct bnge_net *bn)
+{
+ kfree(bn->grp_info);
+ bn->grp_info = NULL;
+}
+
+static int bnge_init_ring_grps(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ bn->grp_info = kcalloc(bd->nq_nr_rings,
+ sizeof(struct bnge_ring_grp_info),
+ GFP_KERNEL);
+ if (!bn->grp_info)
+ return -ENOMEM;
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ bn->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
+ bn->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
+ bn->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
+ bn->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
+ bn->grp_info[i].nq_fw_ring_id = INVALID_HW_RING_ID;
+ }
+
+ return 0;
+}
+
+static void bnge_free_core(struct bnge_net *bn)
+{
+ bnge_free_vnic_attributes(bn);
+ bnge_free_tx_rings(bn);
+ bnge_free_rx_rings(bn);
+ bnge_free_nq_tree(bn);
+ bnge_free_nq_arrays(bn);
+ bnge_free_ring_stats(bn);
+ bnge_free_ring_grps(bn);
+ bnge_free_vnics(bn);
+ kfree(bn->tx_ring_map);
+ bn->tx_ring_map = NULL;
+ kfree(bn->tx_ring);
+ bn->tx_ring = NULL;
+ kfree(bn->rx_ring);
+ bn->rx_ring = NULL;
+ kfree(bn->bnapi);
+ bn->bnapi = NULL;
+}
+
+static int bnge_alloc_core(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i, j, size, arr_size;
+ int rc = -ENOMEM;
+ void *bnapi;
+
+ arr_size = L1_CACHE_ALIGN(sizeof(struct bnge_napi *) *
+ bd->nq_nr_rings);
+ size = L1_CACHE_ALIGN(sizeof(struct bnge_napi));
+ bnapi = kzalloc(arr_size + size * bd->nq_nr_rings, GFP_KERNEL);
+ if (!bnapi)
+ return rc;
+
+ bn->bnapi = bnapi;
+ bnapi += arr_size;
+ for (i = 0; i < bd->nq_nr_rings; i++, bnapi += size) {
+ struct bnge_nq_ring_info *nqr;
+
+ bn->bnapi[i] = bnapi;
+ bn->bnapi[i]->index = i;
+ bn->bnapi[i]->bn = bn;
+ nqr = &bn->bnapi[i]->nq_ring;
+ nqr->ring_struct.ring_mem.flags = BNGE_RMEM_RING_PTE_FLAG;
+ }
+
+ bn->rx_ring = kcalloc(bd->rx_nr_rings,
+ sizeof(struct bnge_rx_ring_info),
+ GFP_KERNEL);
+ if (!bn->rx_ring)
+ goto err_free_core;
+
+ for (i = 0; i < bd->rx_nr_rings; i++) {
+ struct bnge_rx_ring_info *rxr = &bn->rx_ring[i];
+
+ rxr->rx_ring_struct.ring_mem.flags =
+ BNGE_RMEM_RING_PTE_FLAG;
+ rxr->rx_agg_ring_struct.ring_mem.flags =
+ BNGE_RMEM_RING_PTE_FLAG;
+ rxr->bnapi = bn->bnapi[i];
+ bn->bnapi[i]->rx_ring = &bn->rx_ring[i];
+ }
+
+ bn->tx_ring = kcalloc(bd->tx_nr_rings,
+ sizeof(struct bnge_tx_ring_info),
+ GFP_KERNEL);
+ if (!bn->tx_ring)
+ goto err_free_core;
+
+ bn->tx_ring_map = kcalloc(bd->tx_nr_rings, sizeof(u16),
+ GFP_KERNEL);
+ if (!bn->tx_ring_map)
+ goto err_free_core;
+
+ if (bd->flags & BNGE_EN_SHARED_CHNL)
+ j = 0;
+ else
+ j = bd->rx_nr_rings;
+
+ for (i = 0; i < bd->tx_nr_rings; i++) {
+ struct bnge_tx_ring_info *txr = &bn->tx_ring[i];
+ struct bnge_napi *bnapi2;
+ int k;
+
+ txr->tx_ring_struct.ring_mem.flags = BNGE_RMEM_RING_PTE_FLAG;
+ bn->tx_ring_map[i] = i;
+ k = j + BNGE_RING_TO_TC_OFF(bd, i);
+
+ bnapi2 = bn->bnapi[k];
+ txr->txq_index = i;
+ txr->tx_napi_idx =
+ BNGE_RING_TO_TC(bd, txr->txq_index);
+ bnapi2->tx_ring[txr->tx_napi_idx] = txr;
+ txr->bnapi = bnapi2;
+ }
+
+ rc = bnge_alloc_ring_stats(bn);
+ if (rc)
+ goto err_free_core;
+
+ rc = bnge_alloc_vnics(bn);
+ if (rc)
+ goto err_free_core;
+
+ rc = bnge_alloc_nq_arrays(bn);
+ if (rc)
+ goto err_free_core;
+
+ bnge_init_ring_struct(bn);
+
+ rc = bnge_alloc_rx_rings(bn);
+ if (rc)
+ goto err_free_core;
+
+ rc = bnge_alloc_tx_rings(bn);
+ if (rc)
+ goto err_free_core;
+
+ rc = bnge_alloc_nq_tree(bn);
+ if (rc)
+ goto err_free_core;
+
+ bn->vnic_info[BNGE_VNIC_DEFAULT].flags |= BNGE_VNIC_RSS_FLAG |
+ BNGE_VNIC_MCAST_FLAG |
+ BNGE_VNIC_UCAST_FLAG;
+ rc = bnge_alloc_vnic_attributes(bn);
+ if (rc)
+ goto err_free_core;
+ return 0;
+
+err_free_core:
+ bnge_free_core(bn);
+ return rc;
+}
+
+u16 bnge_cp_ring_for_rx(struct bnge_rx_ring_info *rxr)
+{
+ return rxr->rx_cpr->ring_struct.fw_ring_id;
+}
+
+u16 bnge_cp_ring_for_tx(struct bnge_tx_ring_info *txr)
+{
+ return txr->tx_cpr->ring_struct.fw_ring_id;
+}
+
+static void bnge_db_nq(struct bnge_net *bn, struct bnge_db_info *db, u32 idx)
+{
+ bnge_writeq(bn->bd, db->db_key64 | DBR_TYPE_NQ_MASK |
+ DB_RING_IDX(db, idx), db->doorbell);
+}
+
+static void bnge_db_cq(struct bnge_net *bn, struct bnge_db_info *db, u32 idx)
+{
+ bnge_writeq(bn->bd, db->db_key64 | DBR_TYPE_CQ_ARMALL |
+ DB_RING_IDX(db, idx), db->doorbell);
+}
+
+static int bnge_cp_num_to_irq_num(struct bnge_net *bn, int n)
+{
+ struct bnge_napi *bnapi = bn->bnapi[n];
+ struct bnge_nq_ring_info *nqr;
+
+ nqr = &bnapi->nq_ring;
+
+ return nqr->ring_struct.map_idx;
+}
+
+static irqreturn_t bnge_msix(int irq, void *dev_instance)
+{
+ /* NAPI scheduling to be added in a future patch */
+ return IRQ_HANDLED;
+}
+
+static void bnge_init_nq_tree(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i, j;
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_nq_ring_info *nqr = &bn->bnapi[i]->nq_ring;
+ struct bnge_ring_struct *ring = &nqr->ring_struct;
+
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ for (j = 0; j < nqr->cp_ring_count; j++) {
+ struct bnge_cp_ring_info *cpr = &nqr->cp_ring_arr[j];
+
+ ring = &cpr->ring_struct;
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ }
+ }
+}
+
+static netmem_ref __bnge_alloc_rx_netmem(struct bnge_net *bn,
+ dma_addr_t *mapping,
+ struct bnge_rx_ring_info *rxr,
+ unsigned int *offset,
+ gfp_t gfp)
+{
+ netmem_ref netmem;
+
+ if (PAGE_SIZE > BNGE_RX_PAGE_SIZE) {
+ netmem = page_pool_alloc_frag_netmem(rxr->page_pool, offset,
+ BNGE_RX_PAGE_SIZE, gfp);
+ } else {
+ netmem = page_pool_alloc_netmems(rxr->page_pool, gfp);
+ *offset = 0;
+ }
+ if (!netmem)
+ return 0;
+
+ *mapping = page_pool_get_dma_addr_netmem(netmem) + *offset;
+ return netmem;
+}
+
+static u8 *__bnge_alloc_rx_frag(struct bnge_net *bn, dma_addr_t *mapping,
+ struct bnge_rx_ring_info *rxr,
+ gfp_t gfp)
+{
+ unsigned int offset;
+ struct page *page;
+
+ page = page_pool_alloc_frag(rxr->head_pool, &offset,
+ bn->rx_buf_size, gfp);
+ if (!page)
+ return NULL;
+
+ *mapping = page_pool_get_dma_addr(page) + bn->rx_dma_offset + offset;
+ return page_address(page) + offset;
+}
+
+static int bnge_alloc_rx_data(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr,
+ u16 prod, gfp_t gfp)
+{
+ struct bnge_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[RING_RX(bn, prod)];
+ struct rx_bd *rxbd;
+ dma_addr_t mapping;
+ u8 *data;
+
+ rxbd = &rxr->rx_desc_ring[RX_RING(bn, prod)][RX_IDX(prod)];
+ data = __bnge_alloc_rx_frag(bn, &mapping, rxr, gfp);
+ if (!data)
+ return -ENOMEM;
+
+ rx_buf->data = data;
+ rx_buf->data_ptr = data + bn->rx_offset;
+ rx_buf->mapping = mapping;
+
+ rxbd->rx_bd_haddr = cpu_to_le64(mapping);
+
+ return 0;
+}
+
+static int bnge_alloc_one_rx_ring_bufs(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr,
+ int ring_nr)
+{
+ u32 prod = rxr->rx_prod;
+ int i, rc = 0;
+
+ for (i = 0; i < bn->rx_ring_size; i++) {
+ rc = bnge_alloc_rx_data(bn, rxr, prod, GFP_KERNEL);
+ if (rc)
+ break;
+ prod = NEXT_RX(prod);
+ }
+
+ /* Abort if not a single buffer can be allocated */
+ if (rc && !i) {
+ netdev_err(bn->netdev,
+ "RX ring %d: allocated %d/%d buffers, abort\n",
+ ring_nr, i, bn->rx_ring_size);
+ return rc;
+ }
+
+ rxr->rx_prod = prod;
+
+ if (i < bn->rx_ring_size)
+ netdev_warn(bn->netdev,
+ "RX ring %d: allocated %d/%d buffers, continuing\n",
+ ring_nr, i, bn->rx_ring_size);
+ return 0;
+}
+
+static u16 bnge_find_next_agg_idx(struct bnge_rx_ring_info *rxr, u16 idx)
+{
+ u16 next, max = rxr->rx_agg_bmap_size;
+
+ next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
+ if (next >= max)
+ next = find_first_zero_bit(rxr->rx_agg_bmap, max);
+ return next;
+}
+
+static int bnge_alloc_rx_netmem(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr,
+ u16 prod, gfp_t gfp)
+{
+ struct bnge_sw_rx_agg_bd *rx_agg_buf;
+ u16 sw_prod = rxr->rx_sw_agg_prod;
+ unsigned int offset = 0;
+ struct rx_bd *rxbd;
+ dma_addr_t mapping;
+ netmem_ref netmem;
+
+ rxbd = &rxr->rx_agg_desc_ring[RX_AGG_RING(bn, prod)][RX_IDX(prod)];
+ netmem = __bnge_alloc_rx_netmem(bn, &mapping, rxr, &offset, gfp);
+ if (!netmem)
+ return -ENOMEM;
+
+ if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
+ sw_prod = bnge_find_next_agg_idx(rxr, sw_prod);
+
+ __set_bit(sw_prod, rxr->rx_agg_bmap);
+ rx_agg_buf = &rxr->rx_agg_buf_ring[sw_prod];
+ rxr->rx_sw_agg_prod = RING_RX_AGG(bn, NEXT_RX_AGG(sw_prod));
+
+ rx_agg_buf->netmem = netmem;
+ rx_agg_buf->offset = offset;
+ rx_agg_buf->mapping = mapping;
+ rxbd->rx_bd_haddr = cpu_to_le64(mapping);
+ rxbd->rx_bd_opaque = sw_prod;
+ return 0;
+}
+
+static int bnge_alloc_one_agg_ring_bufs(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr,
+ int ring_nr)
+{
+ u32 prod = rxr->rx_agg_prod;
+ int i, rc = 0;
+
+ for (i = 0; i < bn->rx_agg_ring_size; i++) {
+ rc = bnge_alloc_rx_netmem(bn, rxr, prod, GFP_KERNEL);
+ if (rc)
+ break;
+ prod = NEXT_RX_AGG(prod);
+ }
+
+ if (rc && i < MAX_SKB_FRAGS) {
+ netdev_err(bn->netdev,
+ "Agg ring %d: allocated %d/%d buffers (min %d), abort\n",
+ ring_nr, i, bn->rx_agg_ring_size, MAX_SKB_FRAGS);
+ goto err_free_one_agg_ring_bufs;
+ }
+
+ rxr->rx_agg_prod = prod;
+
+ if (i < bn->rx_agg_ring_size)
+ netdev_warn(bn->netdev,
+ "Agg ring %d: allocated %d/%d buffers, continuing\n",
+ ring_nr, i, bn->rx_agg_ring_size);
+ return 0;
+
+err_free_one_agg_ring_bufs:
+ bnge_free_one_agg_ring_bufs(bn, rxr);
+ return -ENOMEM;
+}
+
+static int bnge_alloc_one_rx_ring_pair_bufs(struct bnge_net *bn, int ring_nr)
+{
+ struct bnge_rx_ring_info *rxr = &bn->rx_ring[ring_nr];
+ int rc;
+
+ rc = bnge_alloc_one_rx_ring_bufs(bn, rxr, ring_nr);
+ if (rc)
+ return rc;
+
+ if (bnge_is_agg_reqd(bn->bd)) {
+ rc = bnge_alloc_one_agg_ring_bufs(bn, rxr, ring_nr);
+ if (rc)
+ goto err_free_one_rx_ring_bufs;
+ }
+ return 0;
+
+err_free_one_rx_ring_bufs:
+ bnge_free_one_rx_ring_bufs(bn, rxr);
+ return rc;
+}
+
+static void bnge_init_rxbd_pages(struct bnge_ring_struct *ring, u32 type)
+{
+ struct rx_bd **rx_desc_ring;
+ u32 prod;
+ int i;
+
+ rx_desc_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
+ for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
+ struct rx_bd *rxbd = rx_desc_ring[i];
+ int j;
+
+ for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
+ rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
+ rxbd->rx_bd_opaque = prod;
+ }
+ }
+}
+
+static void bnge_init_one_rx_ring_rxbd(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr)
+{
+ struct bnge_ring_struct *ring;
+ u32 type;
+
+ type = (bn->rx_buf_use_size << RX_BD_LEN_SHIFT) |
+ RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
+
+ if (NET_IP_ALIGN == 2)
+ type |= RX_BD_FLAGS_SOP;
+
+ ring = &rxr->rx_ring_struct;
+ bnge_init_rxbd_pages(ring, type);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+}
+
+static void bnge_init_one_agg_ring_rxbd(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr)
+{
+ struct bnge_ring_struct *ring;
+ u32 type;
+
+ ring = &rxr->rx_agg_ring_struct;
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ if (bnge_is_agg_reqd(bn->bd)) {
+ type = ((u32)BNGE_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
+ RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
+
+ bnge_init_rxbd_pages(ring, type);
+ }
+}
+
+static void bnge_init_one_rx_ring_pair(struct bnge_net *bn, int ring_nr)
+{
+ struct bnge_rx_ring_info *rxr;
+
+ rxr = &bn->rx_ring[ring_nr];
+ bnge_init_one_rx_ring_rxbd(bn, rxr);
+
+ netif_queue_set_napi(bn->netdev, ring_nr, NETDEV_QUEUE_TYPE_RX,
+ &rxr->bnapi->napi);
+
+ bnge_init_one_agg_ring_rxbd(bn, rxr);
+}
+
+static int bnge_alloc_rx_ring_pair_bufs(struct bnge_net *bn)
+{
+ int i, rc;
+
+ for (i = 0; i < bn->bd->rx_nr_rings; i++) {
+ rc = bnge_alloc_one_rx_ring_pair_bufs(bn, i);
+ if (rc)
+ goto err_free_rx_ring_pair_bufs;
+ }
+ return 0;
+
+err_free_rx_ring_pair_bufs:
+ bnge_free_rx_ring_pair_bufs(bn);
+ return rc;
+}
+
+static void bnge_init_rx_rings(struct bnge_net *bn)
+{
+ int i;
+
+#define BNGE_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
+#define BNGE_RX_DMA_OFFSET NET_SKB_PAD
+ bn->rx_offset = BNGE_RX_OFFSET;
+ bn->rx_dma_offset = BNGE_RX_DMA_OFFSET;
+
+ for (i = 0; i < bn->bd->rx_nr_rings; i++)
+ bnge_init_one_rx_ring_pair(bn, i);
+}
+
+static void bnge_init_tx_rings(struct bnge_net *bn)
+{
+ int i;
+
+ bn->tx_wake_thresh = max(bn->tx_ring_size / 2, BNGE_MIN_TX_DESC_CNT);
+
+ for (i = 0; i < bn->bd->tx_nr_rings; i++) {
+ struct bnge_tx_ring_info *txr = &bn->tx_ring[i];
+ struct bnge_ring_struct *ring = &txr->tx_ring_struct;
+
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+
+ netif_queue_set_napi(bn->netdev, i, NETDEV_QUEUE_TYPE_TX,
+ &txr->bnapi->napi);
+ }
+}
+
+static void bnge_init_vnics(struct bnge_net *bn)
+{
+ struct bnge_vnic_info *vnic0 = &bn->vnic_info[BNGE_VNIC_DEFAULT];
+ int i;
+
+ for (i = 0; i < bn->nr_vnics; i++) {
+ struct bnge_vnic_info *vnic = &bn->vnic_info[i];
+ int j;
+
+ vnic->fw_vnic_id = INVALID_HW_RING_ID;
+ vnic->vnic_id = i;
+ for (j = 0; j < BNGE_MAX_CTX_PER_VNIC; j++)
+ vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
+
+ if (bn->vnic_info[i].rss_hash_key) {
+ if (i == BNGE_VNIC_DEFAULT) {
+ u8 *key = (void *)vnic->rss_hash_key;
+ int k;
+
+ if (!bn->rss_hash_key_valid &&
+ !bn->rss_hash_key_updated) {
+ get_random_bytes(bn->rss_hash_key,
+ HW_HASH_KEY_SIZE);
+ bn->rss_hash_key_updated = true;
+ }
+
+ memcpy(vnic->rss_hash_key, bn->rss_hash_key,
+ HW_HASH_KEY_SIZE);
+
+ if (!bn->rss_hash_key_updated)
+ continue;
+
+ bn->rss_hash_key_updated = false;
+ bn->rss_hash_key_valid = true;
+
+ bn->toeplitz_prefix = 0;
+ for (k = 0; k < 8; k++) {
+ bn->toeplitz_prefix <<= 8;
+ bn->toeplitz_prefix |= key[k];
+ }
+ } else {
+ memcpy(vnic->rss_hash_key, vnic0->rss_hash_key,
+ HW_HASH_KEY_SIZE);
+ }
+ }
+ }
+}
+
+static void bnge_set_db_mask(struct bnge_net *bn, struct bnge_db_info *db,
+ u32 ring_type)
+{
+ switch (ring_type) {
+ case HWRM_RING_ALLOC_TX:
+ db->db_ring_mask = bn->tx_ring_mask;
+ break;
+ case HWRM_RING_ALLOC_RX:
+ db->db_ring_mask = bn->rx_ring_mask;
+ break;
+ case HWRM_RING_ALLOC_AGG:
+ db->db_ring_mask = bn->rx_agg_ring_mask;
+ break;
+ case HWRM_RING_ALLOC_CMPL:
+ case HWRM_RING_ALLOC_NQ:
+ db->db_ring_mask = bn->cp_ring_mask;
+ break;
+ }
+ db->db_epoch_mask = db->db_ring_mask + 1;
+ db->db_epoch_shift = DBR_EPOCH_SFT - ilog2(db->db_epoch_mask);
+}
+
+static void bnge_set_db(struct bnge_net *bn, struct bnge_db_info *db,
+ u32 ring_type, u32 map_idx, u32 xid)
+{
+ struct bnge_dev *bd = bn->bd;
+
+ switch (ring_type) {
+ case HWRM_RING_ALLOC_TX:
+ db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
+ break;
+ case HWRM_RING_ALLOC_RX:
+ case HWRM_RING_ALLOC_AGG:
+ db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
+ break;
+ case HWRM_RING_ALLOC_CMPL:
+ db->db_key64 = DBR_PATH_L2;
+ break;
+ case HWRM_RING_ALLOC_NQ:
+ db->db_key64 = DBR_PATH_L2;
+ break;
+ }
+ db->db_key64 |= ((u64)xid << DBR_XID_SFT) | DBR_VALID;
+
+ db->doorbell = bd->bar1 + bd->db_offset;
+ bnge_set_db_mask(bn, db, ring_type);
+}
+
+static int bnge_hwrm_cp_ring_alloc(struct bnge_net *bn,
+ struct bnge_cp_ring_info *cpr)
+{
+ const u32 type = HWRM_RING_ALLOC_CMPL;
+ struct bnge_napi *bnapi = cpr->bnapi;
+ struct bnge_ring_struct *ring;
+ u32 map_idx = bnapi->index;
+ int rc;
+
+ ring = &cpr->ring_struct;
+ ring->handle = BNGE_SET_NQ_HDL(cpr);
+ rc = hwrm_ring_alloc_send_msg(bn, ring, type, map_idx);
+ if (rc)
+ return rc;
+
+ bnge_set_db(bn, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
+ bnge_db_cq(bn, &cpr->cp_db, cpr->cp_raw_cons);
+
+ return 0;
+}
+
+static int bnge_hwrm_tx_ring_alloc(struct bnge_net *bn,
+ struct bnge_tx_ring_info *txr, u32 tx_idx)
+{
+ struct bnge_ring_struct *ring = &txr->tx_ring_struct;
+ const u32 type = HWRM_RING_ALLOC_TX;
+ int rc;
+
+ rc = hwrm_ring_alloc_send_msg(bn, ring, type, tx_idx);
+ if (rc)
+ return rc;
+
+ bnge_set_db(bn, &txr->tx_db, type, tx_idx, ring->fw_ring_id);
+
+ return 0;
+}
+
+static int bnge_hwrm_rx_agg_ring_alloc(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr)
+{
+ struct bnge_ring_struct *ring = &rxr->rx_agg_ring_struct;
+ u32 type = HWRM_RING_ALLOC_AGG;
+ struct bnge_dev *bd = bn->bd;
+ u32 grp_idx = ring->grp_idx;
+ u32 map_idx;
+ int rc;
+
+ map_idx = grp_idx + bd->rx_nr_rings;
+ rc = hwrm_ring_alloc_send_msg(bn, ring, type, map_idx);
+ if (rc)
+ return rc;
+
+ bnge_set_db(bn, &rxr->rx_agg_db, type, map_idx,
+ ring->fw_ring_id);
+ bnge_db_write(bn->bd, &rxr->rx_agg_db, rxr->rx_agg_prod);
+ bnge_db_write(bn->bd, &rxr->rx_db, rxr->rx_prod);
+ bn->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
+
+ return 0;
+}
+
+static int bnge_hwrm_rx_ring_alloc(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr)
+{
+ struct bnge_ring_struct *ring = &rxr->rx_ring_struct;
+ struct bnge_napi *bnapi = rxr->bnapi;
+ u32 type = HWRM_RING_ALLOC_RX;
+ u32 map_idx = bnapi->index;
+ int rc;
+
+ rc = hwrm_ring_alloc_send_msg(bn, ring, type, map_idx);
+ if (rc)
+ return rc;
+
+ bnge_set_db(bn, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
+ bn->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
+
+ return 0;
+}
+
+static int bnge_hwrm_ring_alloc(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ bool agg_rings;
+ int i, rc = 0;
+
+ agg_rings = !!(bnge_is_agg_reqd(bd));
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+ struct bnge_nq_ring_info *nqr = &bnapi->nq_ring;
+ struct bnge_ring_struct *ring = &nqr->ring_struct;
+ u32 type = HWRM_RING_ALLOC_NQ;
+ u32 map_idx = ring->map_idx;
+ unsigned int vector;
+
+ vector = bd->irq_tbl[map_idx].vector;
+ disable_irq_nosync(vector);
+ rc = hwrm_ring_alloc_send_msg(bn, ring, type, map_idx);
+ if (rc) {
+ enable_irq(vector);
+ goto err_out;
+ }
+ bnge_set_db(bn, &nqr->nq_db, type, map_idx, ring->fw_ring_id);
+ bnge_db_nq(bn, &nqr->nq_db, nqr->nq_raw_cons);
+ enable_irq(vector);
+ bn->grp_info[i].nq_fw_ring_id = ring->fw_ring_id;
+
+ if (!i) {
+ rc = bnge_hwrm_set_async_event_cr(bd, ring->fw_ring_id);
+ if (rc)
+ netdev_warn(bn->netdev, "Failed to set async event completion ring.\n");
+ }
+ }
+
+ for (i = 0; i < bd->tx_nr_rings; i++) {
+ struct bnge_tx_ring_info *txr = &bn->tx_ring[i];
+
+ rc = bnge_hwrm_cp_ring_alloc(bn, txr->tx_cpr);
+ if (rc)
+ goto err_out;
+ rc = bnge_hwrm_tx_ring_alloc(bn, txr, i);
+ if (rc)
+ goto err_out;
+ }
+
+ for (i = 0; i < bd->rx_nr_rings; i++) {
+ struct bnge_rx_ring_info *rxr = &bn->rx_ring[i];
+ struct bnge_cp_ring_info *cpr;
+ struct bnge_ring_struct *ring;
+ struct bnge_napi *bnapi;
+ u32 map_idx, type;
+
+ rc = bnge_hwrm_rx_ring_alloc(bn, rxr);
+ if (rc)
+ goto err_out;
+ /* If we have agg rings, post agg buffers first. */
+ if (!agg_rings)
+ bnge_db_write(bn->bd, &rxr->rx_db, rxr->rx_prod);
+
+ cpr = rxr->rx_cpr;
+ bnapi = rxr->bnapi;
+ type = HWRM_RING_ALLOC_CMPL;
+ map_idx = bnapi->index;
+
+ ring = &cpr->ring_struct;
+ ring->handle = BNGE_SET_NQ_HDL(cpr);
+ rc = hwrm_ring_alloc_send_msg(bn, ring, type, map_idx);
+ if (rc)
+ goto err_out;
+ bnge_set_db(bn, &cpr->cp_db, type, map_idx,
+ ring->fw_ring_id);
+ bnge_db_cq(bn, &cpr->cp_db, cpr->cp_raw_cons);
+ }
+
+ if (agg_rings) {
+ for (i = 0; i < bd->rx_nr_rings; i++) {
+ rc = bnge_hwrm_rx_agg_ring_alloc(bn, &bn->rx_ring[i]);
+ if (rc)
+ goto err_out;
+ }
+ }
+err_out:
+ return rc;
+}
+
+void bnge_fill_hw_rss_tbl(struct bnge_net *bn, struct bnge_vnic_info *vnic)
+{
+ __le16 *ring_tbl = vnic->rss_table;
+ struct bnge_rx_ring_info *rxr;
+ struct bnge_dev *bd = bn->bd;
+ u16 tbl_size, i;
+
+ tbl_size = bnge_get_rxfh_indir_size(bd);
+
+ for (i = 0; i < tbl_size; i++) {
+ u16 ring_id, j;
+
+ j = bd->rss_indir_tbl[i];
+ rxr = &bn->rx_ring[j];
+
+ ring_id = rxr->rx_ring_struct.fw_ring_id;
+ *ring_tbl++ = cpu_to_le16(ring_id);
+ ring_id = bnge_cp_ring_for_rx(rxr);
+ *ring_tbl++ = cpu_to_le16(ring_id);
+ }
+}
+
+static int bnge_hwrm_vnic_rss_cfg(struct bnge_net *bn,
+ struct bnge_vnic_info *vnic)
+{
+ int rc;
+
+ rc = bnge_hwrm_vnic_set_rss(bn, vnic, true);
+ if (rc) {
+ netdev_err(bn->netdev, "hwrm vnic %d set rss failure rc: %d\n",
+ vnic->vnic_id, rc);
+ return rc;
+ }
+ rc = bnge_hwrm_vnic_cfg(bn, vnic);
+ if (rc)
+ netdev_err(bn->netdev, "hwrm vnic %d cfg failure rc: %d\n",
+ vnic->vnic_id, rc);
+ return rc;
+}
+
+static int bnge_setup_vnic(struct bnge_net *bn, struct bnge_vnic_info *vnic)
+{
+ struct bnge_dev *bd = bn->bd;
+ int rc, i, nr_ctxs;
+
+ nr_ctxs = bnge_cal_nr_rss_ctxs(bd->rx_nr_rings);
+ for (i = 0; i < nr_ctxs; i++) {
+ rc = bnge_hwrm_vnic_ctx_alloc(bd, vnic, i);
+ if (rc) {
+ netdev_err(bn->netdev, "hwrm vnic %d ctx %d alloc failure rc: %d\n",
+ vnic->vnic_id, i, rc);
+ return -ENOMEM;
+ }
+ bn->rsscos_nr_ctxs++;
+ }
+
+ rc = bnge_hwrm_vnic_rss_cfg(bn, vnic);
+ if (rc)
+ return rc;
+
+ if (bnge_is_agg_reqd(bd)) {
+ rc = bnge_hwrm_vnic_set_hds(bn, vnic);
+ if (rc)
+ netdev_err(bn->netdev, "hwrm vnic %d set hds failure rc: %d\n",
+ vnic->vnic_id, rc);
+ }
+ return rc;
+}
+
+static void bnge_del_l2_filter(struct bnge_net *bn, struct bnge_l2_filter *fltr)
+{
+ if (!refcount_dec_and_test(&fltr->refcnt))
+ return;
+ hlist_del_rcu(&fltr->base.hash);
+ kfree_rcu(fltr, base.rcu);
+}
+
+static void bnge_init_l2_filter(struct bnge_net *bn,
+ struct bnge_l2_filter *fltr,
+ struct bnge_l2_key *key, u32 idx)
+{
+ struct hlist_head *head;
+
+ ether_addr_copy(fltr->l2_key.dst_mac_addr, key->dst_mac_addr);
+ fltr->l2_key.vlan = key->vlan;
+ fltr->base.type = BNGE_FLTR_TYPE_L2;
+
+ head = &bn->l2_fltr_hash_tbl[idx];
+ hlist_add_head_rcu(&fltr->base.hash, head);
+ refcount_set(&fltr->refcnt, 1);
+}
+
+static struct bnge_l2_filter *__bnge_lookup_l2_filter(struct bnge_net *bn,
+ struct bnge_l2_key *key,
+ u32 idx)
+{
+ struct bnge_l2_filter *fltr;
+ struct hlist_head *head;
+
+ head = &bn->l2_fltr_hash_tbl[idx];
+ hlist_for_each_entry_rcu(fltr, head, base.hash) {
+ struct bnge_l2_key *l2_key = &fltr->l2_key;
+
+ if (ether_addr_equal(l2_key->dst_mac_addr, key->dst_mac_addr) &&
+ l2_key->vlan == key->vlan)
+ return fltr;
+ }
+ return NULL;
+}
+
+static struct bnge_l2_filter *bnge_lookup_l2_filter(struct bnge_net *bn,
+ struct bnge_l2_key *key,
+ u32 idx)
+{
+ struct bnge_l2_filter *fltr;
+
+ rcu_read_lock();
+ fltr = __bnge_lookup_l2_filter(bn, key, idx);
+ if (fltr)
+ refcount_inc(&fltr->refcnt);
+ rcu_read_unlock();
+ return fltr;
+}
+
+static struct bnge_l2_filter *bnge_alloc_l2_filter(struct bnge_net *bn,
+ struct bnge_l2_key *key,
+ gfp_t gfp)
+{
+ struct bnge_l2_filter *fltr;
+ u32 idx;
+
+ idx = jhash2(&key->filter_key, BNGE_L2_KEY_SIZE, bn->hash_seed) &
+ BNGE_L2_FLTR_HASH_MASK;
+ fltr = bnge_lookup_l2_filter(bn, key, idx);
+ if (fltr)
+ return fltr;
+
+ fltr = kzalloc(sizeof(*fltr), gfp);
+ if (!fltr)
+ return ERR_PTR(-ENOMEM);
+
+ bnge_init_l2_filter(bn, fltr, key, idx);
+ return fltr;
+}
+
+static int bnge_hwrm_set_vnic_filter(struct bnge_net *bn, u16 vnic_id, u16 idx,
+ const u8 *mac_addr)
+{
+ struct bnge_l2_filter *fltr;
+ struct bnge_l2_key key;
+ int rc;
+
+ ether_addr_copy(key.dst_mac_addr, mac_addr);
+ key.vlan = 0;
+ fltr = bnge_alloc_l2_filter(bn, &key, GFP_KERNEL);
+ if (IS_ERR(fltr))
+ return PTR_ERR(fltr);
+
+ fltr->base.fw_vnic_id = bn->vnic_info[vnic_id].fw_vnic_id;
+ rc = bnge_hwrm_l2_filter_alloc(bn->bd, fltr);
+ if (rc)
+ goto err_del_l2_filter;
+ bn->vnic_info[vnic_id].l2_filters[idx] = fltr;
+ return rc;
+
+err_del_l2_filter:
+ bnge_del_l2_filter(bn, fltr);
+ return rc;
+}
+
+static bool bnge_mc_list_updated(struct bnge_net *bn, u32 *rx_mask)
+{
+ struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT];
+ struct net_device *dev = bn->netdev;
+ struct netdev_hw_addr *ha;
+ int mc_count = 0, off = 0;
+ bool update = false;
+ u8 *haddr;
+
+ netdev_for_each_mc_addr(ha, dev) {
+ if (mc_count >= BNGE_MAX_MC_ADDRS) {
+ *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
+ vnic->mc_list_count = 0;
+ return false;
+ }
+ haddr = ha->addr;
+ if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
+ memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
+ update = true;
+ }
+ off += ETH_ALEN;
+ mc_count++;
+ }
+ if (mc_count)
+ *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
+
+ if (mc_count != vnic->mc_list_count) {
+ vnic->mc_list_count = mc_count;
+ update = true;
+ }
+ return update;
+}
+
+static bool bnge_uc_list_updated(struct bnge_net *bn)
+{
+ struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT];
+ struct net_device *dev = bn->netdev;
+ struct netdev_hw_addr *ha;
+ int off = 0;
+
+ if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
+ return true;
+
+ netdev_for_each_uc_addr(ha, dev) {
+ if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
+ return true;
+
+ off += ETH_ALEN;
+ }
+ return false;
+}
+
+static bool bnge_promisc_ok(struct bnge_net *bn)
+{
+ return true;
+}
+
+static int bnge_cfg_def_vnic(struct bnge_net *bn)
+{
+ struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT];
+ struct net_device *dev = bn->netdev;
+ struct bnge_dev *bd = bn->bd;
+ struct netdev_hw_addr *ha;
+ int i, off = 0, rc;
+ bool uc_update;
+
+ netif_addr_lock_bh(dev);
+ uc_update = bnge_uc_list_updated(bn);
+ netif_addr_unlock_bh(dev);
+
+ if (!uc_update)
+ goto skip_uc;
+
+ for (i = 1; i < vnic->uc_filter_count; i++) {
+ struct bnge_l2_filter *fltr = vnic->l2_filters[i];
+
+ bnge_hwrm_l2_filter_free(bd, fltr);
+ bnge_del_l2_filter(bn, fltr);
+ }
+
+ vnic->uc_filter_count = 1;
+
+ netif_addr_lock_bh(dev);
+ if (netdev_uc_count(dev) > (BNGE_MAX_UC_ADDRS - 1)) {
+ vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
+ } else {
+ netdev_for_each_uc_addr(ha, dev) {
+ memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
+ off += ETH_ALEN;
+ vnic->uc_filter_count++;
+ }
+ }
+ netif_addr_unlock_bh(dev);
+
+ for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
+ rc = bnge_hwrm_set_vnic_filter(bn, 0, i, vnic->uc_list + off);
+ if (rc) {
+ netdev_err(dev, "HWRM vnic filter failure rc: %d\n", rc);
+ vnic->uc_filter_count = i;
+ return rc;
+ }
+ }
+
+skip_uc:
+ if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
+ !bnge_promisc_ok(bn))
+ vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
+ rc = bnge_hwrm_cfa_l2_set_rx_mask(bd, vnic);
+ if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) {
+ netdev_info(dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
+ rc);
+ vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
+ vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
+ vnic->mc_list_count = 0;
+ rc = bnge_hwrm_cfa_l2_set_rx_mask(bd, vnic);
+ }
+ if (rc)
+ netdev_err(dev, "HWRM cfa l2 rx mask failure rc: %d\n",
+ rc);
+
+ return rc;
+}
+
+static void bnge_hwrm_vnic_free(struct bnge_net *bn)
+{
+ int i;
+
+ for (i = 0; i < bn->nr_vnics; i++)
+ bnge_hwrm_vnic_free_one(bn->bd, &bn->vnic_info[i]);
+}
+
+static void bnge_hwrm_vnic_ctx_free(struct bnge_net *bn)
+{
+ int i, j;
+
+ for (i = 0; i < bn->nr_vnics; i++) {
+ struct bnge_vnic_info *vnic = &bn->vnic_info[i];
+
+ for (j = 0; j < BNGE_MAX_CTX_PER_VNIC; j++) {
+ if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
+ bnge_hwrm_vnic_ctx_free_one(bn->bd, vnic, j);
+ }
+ }
+ bn->rsscos_nr_ctxs = 0;
+}
+
+static void bnge_hwrm_clear_vnic_filter(struct bnge_net *bn)
+{
+ struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT];
+ int i;
+
+ for (i = 0; i < vnic->uc_filter_count; i++) {
+ struct bnge_l2_filter *fltr = vnic->l2_filters[i];
+
+ bnge_hwrm_l2_filter_free(bn->bd, fltr);
+ bnge_del_l2_filter(bn, fltr);
+ }
+
+ vnic->uc_filter_count = 0;
+}
+
+static void bnge_clear_vnic(struct bnge_net *bn)
+{
+ bnge_hwrm_clear_vnic_filter(bn);
+ bnge_hwrm_vnic_free(bn);
+ bnge_hwrm_vnic_ctx_free(bn);
+}
+
+static void bnge_hwrm_rx_ring_free(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr,
+ bool close_path)
+{
+ struct bnge_ring_struct *ring = &rxr->rx_ring_struct;
+ u32 grp_idx = rxr->bnapi->index;
+ u32 cmpl_ring_id;
+
+ if (ring->fw_ring_id == INVALID_HW_RING_ID)
+ return;
+
+ cmpl_ring_id = bnge_cp_ring_for_rx(rxr);
+ hwrm_ring_free_send_msg(bn, ring,
+ RING_FREE_REQ_RING_TYPE_RX,
+ close_path ? cmpl_ring_id :
+ INVALID_HW_RING_ID);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ bn->grp_info[grp_idx].rx_fw_ring_id = INVALID_HW_RING_ID;
+}
+
+static void bnge_hwrm_rx_agg_ring_free(struct bnge_net *bn,
+ struct bnge_rx_ring_info *rxr,
+ bool close_path)
+{
+ struct bnge_ring_struct *ring = &rxr->rx_agg_ring_struct;
+ u32 grp_idx = rxr->bnapi->index;
+ u32 cmpl_ring_id;
+
+ if (ring->fw_ring_id == INVALID_HW_RING_ID)
+ return;
+
+ cmpl_ring_id = bnge_cp_ring_for_rx(rxr);
+ hwrm_ring_free_send_msg(bn, ring, RING_FREE_REQ_RING_TYPE_RX_AGG,
+ close_path ? cmpl_ring_id :
+ INVALID_HW_RING_ID);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ bn->grp_info[grp_idx].agg_fw_ring_id = INVALID_HW_RING_ID;
+}
+
+static void bnge_hwrm_tx_ring_free(struct bnge_net *bn,
+ struct bnge_tx_ring_info *txr,
+ bool close_path)
+{
+ struct bnge_ring_struct *ring = &txr->tx_ring_struct;
+ u32 cmpl_ring_id;
+
+ if (ring->fw_ring_id == INVALID_HW_RING_ID)
+ return;
+
+ cmpl_ring_id = close_path ? bnge_cp_ring_for_tx(txr) :
+ INVALID_HW_RING_ID;
+ hwrm_ring_free_send_msg(bn, ring, RING_FREE_REQ_RING_TYPE_TX,
+ cmpl_ring_id);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+}
+
+static void bnge_hwrm_cp_ring_free(struct bnge_net *bn,
+ struct bnge_cp_ring_info *cpr)
+{
+ struct bnge_ring_struct *ring;
+
+ ring = &cpr->ring_struct;
+ if (ring->fw_ring_id == INVALID_HW_RING_ID)
+ return;
+
+ hwrm_ring_free_send_msg(bn, ring, RING_FREE_REQ_RING_TYPE_L2_CMPL,
+ INVALID_HW_RING_ID);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+}
+
+static void bnge_hwrm_ring_free(struct bnge_net *bn, bool close_path)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ if (!bn->bnapi)
+ return;
+
+ for (i = 0; i < bd->tx_nr_rings; i++)
+ bnge_hwrm_tx_ring_free(bn, &bn->tx_ring[i], close_path);
+
+ for (i = 0; i < bd->rx_nr_rings; i++) {
+ bnge_hwrm_rx_ring_free(bn, &bn->rx_ring[i], close_path);
+ bnge_hwrm_rx_agg_ring_free(bn, &bn->rx_ring[i], close_path);
+ }
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+ struct bnge_nq_ring_info *nqr;
+ struct bnge_ring_struct *ring;
+ int j;
+
+ nqr = &bnapi->nq_ring;
+ for (j = 0; j < nqr->cp_ring_count && nqr->cp_ring_arr; j++)
+ bnge_hwrm_cp_ring_free(bn, &nqr->cp_ring_arr[j]);
+
+ ring = &nqr->ring_struct;
+ if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+ hwrm_ring_free_send_msg(bn, ring,
+ RING_FREE_REQ_RING_TYPE_NQ,
+ INVALID_HW_RING_ID);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ bn->grp_info[i].nq_fw_ring_id = INVALID_HW_RING_ID;
+ }
+ }
+}
+
+static void bnge_setup_msix(struct bnge_net *bn)
+{
+ struct net_device *dev = bn->netdev;
+ struct bnge_dev *bd = bn->bd;
+ int len, i;
+
+ len = sizeof(bd->irq_tbl[0].name);
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ int map_idx = bnge_cp_num_to_irq_num(bn, i);
+ char *attr;
+
+ if (bd->flags & BNGE_EN_SHARED_CHNL)
+ attr = "TxRx";
+ else if (i < bd->rx_nr_rings)
+ attr = "rx";
+ else
+ attr = "tx";
+
+ snprintf(bd->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
+ attr, i);
+ bd->irq_tbl[map_idx].handler = bnge_msix;
+ }
+}
+
+static int bnge_setup_interrupts(struct bnge_net *bn)
+{
+ struct net_device *dev = bn->netdev;
+ struct bnge_dev *bd = bn->bd;
+
+ bnge_setup_msix(bn);
+
+ return netif_set_real_num_queues(dev, bd->tx_nr_rings, bd->rx_nr_rings);
+}
+
+static void bnge_hwrm_resource_free(struct bnge_net *bn, bool close_path)
+{
+ bnge_clear_vnic(bn);
+ bnge_hwrm_ring_free(bn, close_path);
+ bnge_hwrm_stat_ctx_free(bn);
+}
+
+static void bnge_free_irq(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ struct bnge_irq *irq;
+ int i;
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ int map_idx = bnge_cp_num_to_irq_num(bn, i);
+
+ irq = &bd->irq_tbl[map_idx];
+ if (irq->requested) {
+ if (irq->have_cpumask) {
+ irq_set_affinity_hint(irq->vector, NULL);
+ free_cpumask_var(irq->cpu_mask);
+ irq->have_cpumask = 0;
+ }
+ free_irq(irq->vector, bn->bnapi[i]);
+ }
+
+ irq->requested = 0;
+ }
+}
+
+static int bnge_request_irq(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i, rc;
+
+ rc = bnge_setup_interrupts(bn);
+ if (rc) {
+ netdev_err(bn->netdev, "bnge_setup_interrupts err: %d\n", rc);
+ return rc;
+ }
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ int map_idx = bnge_cp_num_to_irq_num(bn, i);
+ struct bnge_irq *irq = &bd->irq_tbl[map_idx];
+
+ rc = request_irq(irq->vector, irq->handler, 0, irq->name,
+ bn->bnapi[i]);
+ if (rc)
+ goto err_free_irq;
+
+ netif_napi_set_irq_locked(&bn->bnapi[i]->napi, irq->vector);
+ irq->requested = 1;
+
+ if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
+ int numa_node = dev_to_node(&bd->pdev->dev);
+
+ irq->have_cpumask = 1;
+ cpumask_set_cpu(cpumask_local_spread(i, numa_node),
+ irq->cpu_mask);
+ rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
+ if (rc) {
+ netdev_warn(bn->netdev,
+ "Set affinity failed, IRQ = %d\n",
+ irq->vector);
+ goto err_free_irq;
+ }
+ }
+ }
+ return 0;
+
+err_free_irq:
+ bnge_free_irq(bn);
+ return rc;
+}
+
+static int bnge_init_chip(struct bnge_net *bn)
+{
+ struct bnge_vnic_info *vnic = &bn->vnic_info[BNGE_VNIC_DEFAULT];
+ struct bnge_dev *bd = bn->bd;
+ int rc;
+
+#define BNGE_DEF_STATS_COAL_TICKS 1000000
+ bn->stats_coal_ticks = BNGE_DEF_STATS_COAL_TICKS;
+
+ rc = bnge_hwrm_stat_ctx_alloc(bn);
+ if (rc) {
+ netdev_err(bn->netdev, "hwrm stat ctx alloc failure rc: %d\n", rc);
+ goto err_out;
+ }
+
+ rc = bnge_hwrm_ring_alloc(bn);
+ if (rc) {
+ netdev_err(bn->netdev, "hwrm ring alloc failure rc: %d\n", rc);
+ goto err_out;
+ }
+
+ rc = bnge_hwrm_vnic_alloc(bd, vnic, bd->rx_nr_rings);
+ if (rc) {
+ netdev_err(bn->netdev, "hwrm vnic alloc failure rc: %d\n", rc);
+ goto err_out;
+ }
+
+ rc = bnge_setup_vnic(bn, vnic);
+ if (rc)
+ goto err_out;
+
+ if (bd->rss_cap & BNGE_RSS_CAP_RSS_HASH_TYPE_DELTA)
+ bnge_hwrm_update_rss_hash_cfg(bn);
+
+ /* Filter for default vnic 0 */
+ rc = bnge_hwrm_set_vnic_filter(bn, 0, 0, bn->netdev->dev_addr);
+ if (rc) {
+ netdev_err(bn->netdev, "HWRM vnic filter failure rc: %d\n", rc);
+ goto err_out;
+ }
+ vnic->uc_filter_count = 1;
+
+ vnic->rx_mask = 0;
+
+ if (bn->netdev->flags & IFF_BROADCAST)
+ vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
+
+ if (bn->netdev->flags & IFF_PROMISC)
+ vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
+
+ if (bn->netdev->flags & IFF_ALLMULTI) {
+ vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
+ vnic->mc_list_count = 0;
+ } else if (bn->netdev->flags & IFF_MULTICAST) {
+ u32 mask = 0;
+
+ bnge_mc_list_updated(bn, &mask);
+ vnic->rx_mask |= mask;
+ }
+
+ rc = bnge_cfg_def_vnic(bn);
+ if (rc)
+ goto err_out;
+ return 0;
+
+err_out:
+ bnge_hwrm_resource_free(bn, 0);
+ return rc;
+}
+
+static int bnge_napi_poll(struct napi_struct *napi, int budget)
+{
+ int work_done = 0;
+
+ /* defer NAPI implementation to next patch series */
+ napi_complete_done(napi, work_done);
+
+ return work_done;
+}
+
+static void bnge_init_napi(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ struct bnge_napi *bnapi;
+ int i;
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ bnapi = bn->bnapi[i];
+ netif_napi_add_config_locked(bn->netdev, &bnapi->napi,
+ bnge_napi_poll, bnapi->index);
+ }
+}
+
+static void bnge_del_napi(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i;
+
+ for (i = 0; i < bd->rx_nr_rings; i++)
+ netif_queue_set_napi(bn->netdev, i, NETDEV_QUEUE_TYPE_RX, NULL);
+ for (i = 0; i < bd->tx_nr_rings; i++)
+ netif_queue_set_napi(bn->netdev, i, NETDEV_QUEUE_TYPE_TX, NULL);
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+
+ __netif_napi_del_locked(&bnapi->napi);
+ }
+
+ /* Wait for RCU grace period after removing NAPI instances */
+ synchronize_net();
+}
+
+static int bnge_init_nic(struct bnge_net *bn)
+{
+ int rc;
+
+ bnge_init_nq_tree(bn);
+
+ bnge_init_rx_rings(bn);
+ rc = bnge_alloc_rx_ring_pair_bufs(bn);
+ if (rc)
+ return rc;
+
+ bnge_init_tx_rings(bn);
+
+ rc = bnge_init_ring_grps(bn);
+ if (rc)
+ goto err_free_rx_ring_pair_bufs;
+
+ bnge_init_vnics(bn);
+
+ rc = bnge_init_chip(bn);
+ if (rc)
+ goto err_free_ring_grps;
+ return rc;
+
+err_free_ring_grps:
+ bnge_free_ring_grps(bn);
+ return rc;
+
+err_free_rx_ring_pair_bufs:
+ bnge_free_rx_ring_pair_bufs(bn);
+ return rc;
+}
+
+static int bnge_open_core(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int rc;
+
+ netif_carrier_off(bn->netdev);
+
+ rc = bnge_reserve_rings(bd);
+ if (rc) {
+ netdev_err(bn->netdev, "bnge_reserve_rings err: %d\n", rc);
+ return rc;
+ }
+
+ rc = bnge_alloc_core(bn);
+ if (rc) {
+ netdev_err(bn->netdev, "bnge_alloc_core err: %d\n", rc);
+ return rc;
+ }
+
+ bnge_init_napi(bn);
+ rc = bnge_request_irq(bn);
+ if (rc) {
+ netdev_err(bn->netdev, "bnge_request_irq err: %d\n", rc);
+ goto err_del_napi;
+ }
+
+ rc = bnge_init_nic(bn);
+ if (rc) {
+ netdev_err(bn->netdev, "bnge_init_nic err: %d\n", rc);
+ goto err_free_irq;
+ }
+ set_bit(BNGE_STATE_OPEN, &bd->state);
+ return 0;
+
+err_free_irq:
+ bnge_free_irq(bn);
+err_del_napi:
+ bnge_del_napi(bn);
+ bnge_free_core(bn);
+ return rc;
+}
static netdev_tx_t bnge_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
@@ -28,11 +2212,42 @@ static netdev_tx_t bnge_start_xmit(struct sk_buff *skb, struct net_device *dev)
static int bnge_open(struct net_device *dev)
{
+ struct bnge_net *bn = netdev_priv(dev);
+ int rc;
+
+ rc = bnge_open_core(bn);
+ if (rc)
+ netdev_err(dev, "bnge_open_core err: %d\n", rc);
+
+ return rc;
+}
+
+static int bnge_shutdown_nic(struct bnge_net *bn)
+{
+ /* TODO: close_path = 0 until we make NAPI functional */
+ bnge_hwrm_resource_free(bn, 0);
return 0;
}
+static void bnge_close_core(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+
+ clear_bit(BNGE_STATE_OPEN, &bd->state);
+ bnge_shutdown_nic(bn);
+ bnge_free_all_rings_bufs(bn);
+ bnge_free_irq(bn);
+ bnge_del_napi(bn);
+
+ bnge_free_core(bn);
+}
+
static int bnge_close(struct net_device *dev)
{
+ struct bnge_net *bn = netdev_priv(dev);
+
+ bnge_close_core(bn);
+
return 0;
}
@@ -238,6 +2453,7 @@ int bnge_netdev_alloc(struct bnge_dev *bd, int max_irqs)
bn->rx_ring_size = BNGE_DEFAULT_RX_RING_SIZE;
bn->tx_ring_size = BNGE_DEFAULT_TX_RING_SIZE;
+ bn->rx_dir = DMA_FROM_DEVICE;
bnge_set_tpa_flags(bd);
bnge_set_ring_params(bd);
@@ -245,6 +2461,7 @@ int bnge_netdev_alloc(struct bnge_dev *bd, int max_irqs)
bnge_init_l2_fltr_tbl(bn);
bnge_init_mac_addr(bd);
+ netdev->request_ops_lock = true;
rc = register_netdev(netdev);
if (rc) {
dev_err(bd->dev, "Register netdev failed rc: %d\n", rc);
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_netdev.h b/drivers/net/ethernet/broadcom/bnge/bnge_netdev.h
index a650d71a58db..fb3b961536ba 100644
--- a/drivers/net/ethernet/broadcom/bnge/bnge_netdev.h
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_netdev.h
@@ -5,6 +5,9 @@
#define _BNGE_NETDEV_H_
#include <linux/bnxt/hsi.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/refcount.h>
+#include "bnge_db.h"
struct tx_bd {
__le32 tx_bd_len_flags_type;
@@ -113,11 +116,25 @@ struct bnge_sw_rx_bd {
};
struct bnge_sw_rx_agg_bd {
- struct page *page;
+ netmem_ref netmem;
unsigned int offset;
dma_addr_t mapping;
};
+#define HWRM_RING_ALLOC_TX 0x1
+#define HWRM_RING_ALLOC_RX 0x2
+#define HWRM_RING_ALLOC_AGG 0x4
+#define HWRM_RING_ALLOC_CMPL 0x8
+#define HWRM_RING_ALLOC_NQ 0x10
+
+struct bnge_ring_grp_info {
+ u16 fw_stats_ctx;
+ u16 fw_grp_id;
+ u16 rx_fw_ring_id;
+ u16 agg_fw_ring_id;
+ u16 nq_fw_ring_id;
+};
+
#define BNGE_RX_COPY_THRESH 256
#define BNGE_HW_FEATURE_VLAN_ALL_RX \
@@ -133,6 +150,32 @@ enum {
#define BNGE_NET_EN_TPA (BNGE_NET_EN_GRO | BNGE_NET_EN_LRO)
+/* Minimum TX BDs for a TX packet with MAX_SKB_FRAGS + 1. We need one extra
+ * BD because the first TX BD is always a long BD.
+ */
+#define BNGE_MIN_TX_DESC_CNT (MAX_SKB_FRAGS + 2)
+
+#define RX_RING(bn, x) (((x) & (bn)->rx_ring_mask) >> (BNGE_PAGE_SHIFT - 4))
+#define RX_AGG_RING(bn, x) (((x) & (bn)->rx_agg_ring_mask) >> \
+ (BNGE_PAGE_SHIFT - 4))
+#define RX_IDX(x) ((x) & (RX_DESC_CNT - 1))
+
+#define TX_RING(bn, x) (((x) & (bn)->tx_ring_mask) >> (BNGE_PAGE_SHIFT - 4))
+#define TX_IDX(x) ((x) & (TX_DESC_CNT - 1))
+
+#define CP_RING(x) (((x) & ~(CP_DESC_CNT - 1)) >> (BNGE_PAGE_SHIFT - 4))
+#define CP_IDX(x) ((x) & (CP_DESC_CNT - 1))
+
+#define RING_RX(bn, idx) ((idx) & (bn)->rx_ring_mask)
+#define NEXT_RX(idx) ((idx) + 1)
+
+#define RING_RX_AGG(bn, idx) ((idx) & (bn)->rx_agg_ring_mask)
+#define NEXT_RX_AGG(idx) ((idx) + 1)
+
+#define BNGE_NQ_HDL_TYPE_SHIFT 24
+#define BNGE_NQ_HDL_TYPE_RX 0x00
+#define BNGE_NQ_HDL_TYPE_TX 0x01
+
struct bnge_net {
struct bnge_dev *bd;
struct net_device *netdev;
@@ -164,6 +207,30 @@ struct bnge_net {
struct hlist_head l2_fltr_hash_tbl[BNGE_L2_FLTR_HASH_SIZE];
u32 hash_seed;
u64 toeplitz_prefix;
+
+ struct bnge_napi **bnapi;
+
+ struct bnge_rx_ring_info *rx_ring;
+ struct bnge_tx_ring_info *tx_ring;
+
+ u16 *tx_ring_map;
+ enum dma_data_direction rx_dir;
+
+ /* grp_info indexed by napi/nq index */
+ struct bnge_ring_grp_info *grp_info;
+ struct bnge_vnic_info *vnic_info;
+ int nr_vnics;
+ int total_irqs;
+
+ u32 tx_wake_thresh;
+ u16 rx_offset;
+ u16 rx_dma_offset;
+
+ u8 rss_hash_key[HW_HASH_KEY_SIZE];
+ u8 rss_hash_key_valid:1;
+ u8 rss_hash_key_updated:1;
+ int rsscos_nr_ctxs;
+ u32 stats_coal_ticks;
};
#define BNGE_DEFAULT_RX_RING_SIZE 511
@@ -203,4 +270,185 @@ void bnge_set_ring_params(struct bnge_dev *bd);
#define BNGE_MAX_RX_JUM_DESC_CNT (RX_DESC_CNT * MAX_RX_AGG_PAGES - 1)
#define BNGE_MAX_TX_DESC_CNT (TX_DESC_CNT * MAX_TX_PAGES - 1)
+#define BNGE_MAX_TXR_PER_NAPI 8
+
+#define bnge_for_each_napi_tx(iter, bnapi, txr) \
+ for (iter = 0, txr = (bnapi)->tx_ring[0]; txr; \
+ txr = (iter < BNGE_MAX_TXR_PER_NAPI - 1) ? \
+ (bnapi)->tx_ring[++iter] : NULL)
+
+#define BNGE_SET_NQ_HDL(cpr) \
+ (((cpr)->cp_ring_type << BNGE_NQ_HDL_TYPE_SHIFT) | (cpr)->cp_idx)
+
+struct bnge_stats_mem {
+ u64 *sw_stats;
+ u64 *hw_masks;
+ void *hw_stats;
+ dma_addr_t hw_stats_map;
+ int len;
+};
+
+struct bnge_cp_ring_info {
+ struct bnge_napi *bnapi;
+ dma_addr_t *desc_mapping;
+ struct tx_cmp **desc_ring;
+ struct bnge_ring_struct ring_struct;
+ u8 cp_ring_type;
+ u8 cp_idx;
+ u32 cp_raw_cons;
+ struct bnge_db_info cp_db;
+};
+
+struct bnge_nq_ring_info {
+ struct bnge_napi *bnapi;
+ dma_addr_t *desc_mapping;
+ struct nqe_cn **desc_ring;
+ struct bnge_ring_struct ring_struct;
+ u32 nq_raw_cons;
+ struct bnge_db_info nq_db;
+
+ struct bnge_stats_mem stats;
+ u32 hw_stats_ctx_id;
+
+ int cp_ring_count;
+ struct bnge_cp_ring_info *cp_ring_arr;
+};
+
+struct bnge_rx_ring_info {
+ struct bnge_napi *bnapi;
+ struct bnge_cp_ring_info *rx_cpr;
+ u16 rx_prod;
+ u16 rx_agg_prod;
+ u16 rx_sw_agg_prod;
+ u16 rx_next_cons;
+ struct bnge_db_info rx_db;
+ struct bnge_db_info rx_agg_db;
+
+ struct rx_bd *rx_desc_ring[MAX_RX_PAGES];
+ struct bnge_sw_rx_bd *rx_buf_ring;
+
+ struct rx_bd *rx_agg_desc_ring[MAX_RX_AGG_PAGES];
+ struct bnge_sw_rx_agg_bd *rx_agg_buf_ring;
+
+ unsigned long *rx_agg_bmap;
+ u16 rx_agg_bmap_size;
+
+ dma_addr_t rx_desc_mapping[MAX_RX_PAGES];
+ dma_addr_t rx_agg_desc_mapping[MAX_RX_AGG_PAGES];
+
+ struct bnge_ring_struct rx_ring_struct;
+ struct bnge_ring_struct rx_agg_ring_struct;
+ struct page_pool *page_pool;
+ struct page_pool *head_pool;
+ bool need_head_pool;
+};
+
+struct bnge_tx_ring_info {
+ struct bnge_napi *bnapi;
+ struct bnge_cp_ring_info *tx_cpr;
+ u16 tx_prod;
+ u16 tx_cons;
+ u16 tx_hw_cons;
+ u16 txq_index;
+ u8 tx_napi_idx;
+ u8 kick_pending;
+ struct bnge_db_info tx_db;
+
+ struct tx_bd *tx_desc_ring[MAX_TX_PAGES];
+ struct bnge_sw_tx_bd *tx_buf_ring;
+
+ dma_addr_t tx_desc_mapping[MAX_TX_PAGES];
+
+ u32 dev_state;
+#define BNGE_DEV_STATE_CLOSING 0x1
+
+ struct bnge_ring_struct tx_ring_struct;
+};
+
+struct bnge_napi {
+ struct napi_struct napi;
+ struct bnge_net *bn;
+ int index;
+
+ struct bnge_nq_ring_info nq_ring;
+ struct bnge_rx_ring_info *rx_ring;
+ struct bnge_tx_ring_info *tx_ring[BNGE_MAX_TXR_PER_NAPI];
+};
+
+#define INVALID_STATS_CTX_ID -1
+#define BNGE_VNIC_DEFAULT 0
+#define BNGE_MAX_UC_ADDRS 4
+
+struct bnge_vnic_info {
+ u16 fw_vnic_id;
+#define BNGE_MAX_CTX_PER_VNIC 8
+ u16 fw_rss_cos_lb_ctx[BNGE_MAX_CTX_PER_VNIC];
+ u16 mru;
+ /* index 0 always dev_addr */
+ struct bnge_l2_filter *l2_filters[BNGE_MAX_UC_ADDRS];
+ u16 uc_filter_count;
+ u8 *uc_list;
+ dma_addr_t rss_table_dma_addr;
+ __le16 *rss_table;
+ dma_addr_t rss_hash_key_dma_addr;
+ u64 *rss_hash_key;
+ int rss_table_size;
+#define BNGE_RSS_TABLE_ENTRIES 64
+#define BNGE_RSS_TABLE_SIZE (BNGE_RSS_TABLE_ENTRIES * 4)
+#define BNGE_RSS_TABLE_MAX_TBL 8
+#define BNGE_MAX_RSS_TABLE_SIZE \
+ (BNGE_RSS_TABLE_SIZE * BNGE_RSS_TABLE_MAX_TBL)
+ u32 rx_mask;
+
+ u8 *mc_list;
+ int mc_list_size;
+ int mc_list_count;
+ dma_addr_t mc_list_mapping;
+#define BNGE_MAX_MC_ADDRS 16
+
+ u32 flags;
+#define BNGE_VNIC_RSS_FLAG 1
+#define BNGE_VNIC_MCAST_FLAG 4
+#define BNGE_VNIC_UCAST_FLAG 8
+ u32 vnic_id;
+};
+
+struct bnge_filter_base {
+ struct hlist_node hash;
+ struct list_head list;
+ __le64 filter_id;
+ u8 type;
+#define BNGE_FLTR_TYPE_L2 2
+ u8 flags;
+ u16 rxq;
+ u16 fw_vnic_id;
+ u16 vf_idx;
+ unsigned long state;
+#define BNGE_FLTR_VALID 0
+#define BNGE_FLTR_FW_DELETED 2
+
+ struct rcu_head rcu;
+};
+
+struct bnge_l2_key {
+ union {
+ struct {
+ u8 dst_mac_addr[ETH_ALEN];
+ u16 vlan;
+ };
+ u32 filter_key;
+ };
+};
+
+#define BNGE_L2_KEY_SIZE (sizeof(struct bnge_l2_key) / 4)
+struct bnge_l2_filter {
+ /* base filter must be the first member */
+ struct bnge_filter_base base;
+ struct bnge_l2_key l2_key;
+ refcount_t refcnt;
+};
+
+u16 bnge_cp_ring_for_rx(struct bnge_rx_ring_info *rxr);
+u16 bnge_cp_ring_for_tx(struct bnge_tx_ring_info *txr);
+void bnge_fill_hw_rss_tbl(struct bnge_net *bn, struct bnge_vnic_info *vnic);
#endif /* _BNGE_NETDEV_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_resc.c b/drivers/net/ethernet/broadcom/bnge/bnge_resc.c
index c79a3607a1b7..62ebe03a0dcf 100644
--- a/drivers/net/ethernet/broadcom/bnge/bnge_resc.c
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_resc.c
@@ -46,7 +46,7 @@ static int bnge_aux_get_dflt_msix(struct bnge_dev *bd)
return min_t(int, roce_msix, num_online_cpus() + 1);
}
-static u16 bnge_aux_get_msix(struct bnge_dev *bd)
+u16 bnge_aux_get_msix(struct bnge_dev *bd)
{
if (bnge_is_roce_en(bd))
return bd->aux_num_msix;
@@ -164,7 +164,7 @@ static int bnge_adjust_rings(struct bnge_dev *bd, u16 *rx,
return bnge_fix_rings_count(rx, tx, max_nq, sh);
}
-static int bnge_cal_nr_rss_ctxs(u16 rx_rings)
+int bnge_cal_nr_rss_ctxs(u16 rx_rings)
{
if (!rx_rings)
return 0;
@@ -184,7 +184,7 @@ static u16 bnge_get_total_vnics(struct bnge_dev *bd, u16 rx_rings)
return 1;
}
-static u32 bnge_get_rxfh_indir_size(struct bnge_dev *bd)
+u32 bnge_get_rxfh_indir_size(struct bnge_dev *bd)
{
return bnge_cal_nr_rss_ctxs(bd->rx_nr_rings) *
BNGE_RSS_TABLE_ENTRIES;
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_resc.h b/drivers/net/ethernet/broadcom/bnge/bnge_resc.h
index 54ef1c7d8822..0d6213b27580 100644
--- a/drivers/net/ethernet/broadcom/bnge/bnge_resc.h
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_resc.h
@@ -72,6 +72,8 @@ void bnge_free_irqs(struct bnge_dev *bd);
int bnge_net_init_dflt_config(struct bnge_dev *bd);
void bnge_net_uninit_dflt_config(struct bnge_dev *bd);
void bnge_aux_init_dflt_config(struct bnge_dev *bd);
+u32 bnge_get_rxfh_indir_size(struct bnge_dev *bd);
+int bnge_cal_nr_rss_ctxs(u16 rx_rings);
static inline u32
bnge_adjust_pow_two(u32 total_ent, u16 ent_per_blk)
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_rmem.c b/drivers/net/ethernet/broadcom/bnge/bnge_rmem.c
index 52ada65943a0..79f5ce2e5d08 100644
--- a/drivers/net/ethernet/broadcom/bnge/bnge_rmem.c
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_rmem.c
@@ -95,7 +95,7 @@ int bnge_alloc_ring(struct bnge_dev *bd, struct bnge_ring_mem_info *rmem)
&rmem->dma_arr[i],
GFP_KERNEL);
if (!rmem->pg_arr[i])
- return -ENOMEM;
+ goto err_free_ring;
if (rmem->ctx_mem)
bnge_init_ctx_mem(rmem->ctx_mem, rmem->pg_arr[i],
@@ -116,10 +116,13 @@ int bnge_alloc_ring(struct bnge_dev *bd, struct bnge_ring_mem_info *rmem)
if (rmem->vmem_size) {
*rmem->vmem = vzalloc(rmem->vmem_size);
if (!(*rmem->vmem))
- return -ENOMEM;
+ goto err_free_ring;
}
-
return 0;
+
+err_free_ring:
+ bnge_free_ring(bd, rmem);
+ return -ENOMEM;
}
static int bnge_alloc_ctx_one_lvl(struct bnge_dev *bd,
@@ -436,3 +439,61 @@ skip_rdma:
return 0;
}
+
+void bnge_init_ring_struct(struct bnge_net *bn)
+{
+ struct bnge_dev *bd = bn->bd;
+ int i, j;
+
+ for (i = 0; i < bd->nq_nr_rings; i++) {
+ struct bnge_napi *bnapi = bn->bnapi[i];
+ struct bnge_ring_mem_info *rmem;
+ struct bnge_nq_ring_info *nqr;
+ struct bnge_rx_ring_info *rxr;
+ struct bnge_tx_ring_info *txr;
+ struct bnge_ring_struct *ring;
+
+ nqr = &bnapi->nq_ring;
+ ring = &nqr->ring_struct;
+ rmem = &ring->ring_mem;
+ rmem->nr_pages = bn->cp_nr_pages;
+ rmem->page_size = HW_CMPD_RING_SIZE;
+ rmem->pg_arr = (void **)nqr->desc_ring;
+ rmem->dma_arr = nqr->desc_mapping;
+ rmem->vmem_size = 0;
+
+ rxr = bnapi->rx_ring;
+ if (!rxr)
+ goto skip_rx;
+
+ ring = &rxr->rx_ring_struct;
+ rmem = &ring->ring_mem;
+ rmem->nr_pages = bn->rx_nr_pages;
+ rmem->page_size = HW_RXBD_RING_SIZE;
+ rmem->pg_arr = (void **)rxr->rx_desc_ring;
+ rmem->dma_arr = rxr->rx_desc_mapping;
+ rmem->vmem_size = SW_RXBD_RING_SIZE * bn->rx_nr_pages;
+ rmem->vmem = (void **)&rxr->rx_buf_ring;
+
+ ring = &rxr->rx_agg_ring_struct;
+ rmem = &ring->ring_mem;
+ rmem->nr_pages = bn->rx_agg_nr_pages;
+ rmem->page_size = HW_RXBD_RING_SIZE;
+ rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
+ rmem->dma_arr = rxr->rx_agg_desc_mapping;
+ rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bn->rx_agg_nr_pages;
+ rmem->vmem = (void **)&rxr->rx_agg_buf_ring;
+
+skip_rx:
+ bnge_for_each_napi_tx(j, bnapi, txr) {
+ ring = &txr->tx_ring_struct;
+ rmem = &ring->ring_mem;
+ rmem->nr_pages = bn->tx_nr_pages;
+ rmem->page_size = HW_TXBD_RING_SIZE;
+ rmem->pg_arr = (void **)txr->tx_desc_ring;
+ rmem->dma_arr = txr->tx_desc_mapping;
+ rmem->vmem_size = SW_TXBD_RING_SIZE * bn->tx_nr_pages;
+ rmem->vmem = (void **)&txr->tx_buf_ring;
+ }
+ }
+}
diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_rmem.h b/drivers/net/ethernet/broadcom/bnge/bnge_rmem.h
index 300f1d8268ef..341c7f81ed09 100644
--- a/drivers/net/ethernet/broadcom/bnge/bnge_rmem.h
+++ b/drivers/net/ethernet/broadcom/bnge/bnge_rmem.h
@@ -6,6 +6,7 @@
struct bnge_ctx_mem_type;
struct bnge_dev;
+struct bnge_net;
#define PTU_PTE_VALID 0x1UL
#define PTU_PTE_LAST 0x2UL
@@ -180,9 +181,22 @@ struct bnge_ctx_mem_info {
struct bnge_ctx_mem_type ctx_arr[BNGE_CTX_V2_MAX];
};
+struct bnge_ring_struct {
+ struct bnge_ring_mem_info ring_mem;
+
+ u16 fw_ring_id;
+ union {
+ u16 grp_idx;
+ u16 map_idx; /* Used by NQs */
+ };
+ u32 handle;
+ u8 queue_id;
+};
+
int bnge_alloc_ring(struct bnge_dev *bd, struct bnge_ring_mem_info *rmem);
void bnge_free_ring(struct bnge_dev *bd, struct bnge_ring_mem_info *rmem);
int bnge_alloc_ctx_mem(struct bnge_dev *bd);
void bnge_free_ctx_mem(struct bnge_dev *bd);
+void bnge_init_ring_struct(struct bnge_net *bn);
#endif /* _BNGE_RMEM_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 5578ddcb465d..3fc33b1b4dfb 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -142,6 +142,7 @@ static const struct {
[NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
[NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
[NETXTREME_E_P7_VF] = { "Broadcom BCM5760X Virtual Function" },
+ [NETXTREME_E_P7_VF_HV] = { "Broadcom BCM5760X Virtual Function for Hyper-V" },
};
static const struct pci_device_id bnxt_pci_tbl[] = {
@@ -217,6 +218,7 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
{ PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
{ PCI_VDEVICE(BROADCOM, 0x1819), .driver_data = NETXTREME_E_P7_VF },
+ { PCI_VDEVICE(BROADCOM, 0x181b), .driver_data = NETXTREME_E_P7_VF_HV },
{ PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
#endif
{ 0 }
@@ -263,6 +265,8 @@ const u16 bnxt_bstore_to_trace[] = {
[BNXT_CTX_CA1] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA1_TRACE,
[BNXT_CTX_CA2] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA2_TRACE,
[BNXT_CTX_RIGP1] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_RIGP1_TRACE,
+ [BNXT_CTX_KONG] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_AFM_KONG_HWRM_TRACE,
+ [BNXT_CTX_QPC] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_ERR_QPC_TRACE,
};
static struct workqueue_struct *bnxt_pf_wq;
@@ -315,7 +319,8 @@ static bool bnxt_vf_pciid(enum board_idx idx)
return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF ||
- idx == NETXTREME_E_P5_VF_HV || idx == NETXTREME_E_P7_VF);
+ idx == NETXTREME_E_P5_VF_HV || idx == NETXTREME_E_P7_VF ||
+ idx == NETXTREME_E_P7_VF_HV);
}
#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
@@ -926,15 +931,21 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
static netmem_ref __bnxt_alloc_rx_netmem(struct bnxt *bp, dma_addr_t *mapping,
struct bnxt_rx_ring_info *rxr,
+ unsigned int *offset,
gfp_t gfp)
{
netmem_ref netmem;
- netmem = page_pool_alloc_netmems(rxr->page_pool, gfp);
+ if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
+ netmem = page_pool_alloc_frag_netmem(rxr->page_pool, offset, BNXT_RX_PAGE_SIZE, gfp);
+ } else {
+ netmem = page_pool_alloc_netmems(rxr->page_pool, gfp);
+ *offset = 0;
+ }
if (!netmem)
return 0;
- *mapping = page_pool_get_dma_addr_netmem(netmem);
+ *mapping = page_pool_get_dma_addr_netmem(netmem) + *offset;
return netmem;
}
@@ -1029,7 +1040,7 @@ static int bnxt_alloc_rx_netmem(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
dma_addr_t mapping;
netmem_ref netmem;
- netmem = __bnxt_alloc_rx_netmem(bp, &mapping, rxr, gfp);
+ netmem = __bnxt_alloc_rx_netmem(bp, &mapping, rxr, &offset, gfp);
if (!netmem)
return -ENOMEM;
@@ -3791,8 +3802,7 @@ static void bnxt_free_rx_rings(struct bnxt *bp)
xdp_rxq_info_unreg(&rxr->xdp_rxq);
page_pool_destroy(rxr->page_pool);
- if (bnxt_separate_head_pool(rxr))
- page_pool_destroy(rxr->head_pool);
+ page_pool_destroy(rxr->head_pool);
rxr->page_pool = rxr->head_pool = NULL;
kfree(rxr->rx_agg_bmap);
@@ -3819,7 +3829,6 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
if (BNXT_RX_PAGE_MODE(bp))
pp.pool_size += bp->rx_ring_size / rx_size_fac;
pp.nid = numa_node;
- pp.napi = &rxr->bnapi->napi;
pp.netdev = bp->dev;
pp.dev = &bp->pdev->dev;
pp.dma_dir = bp->rx_dir;
@@ -3840,6 +3849,8 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
pool = page_pool_create(&pp);
if (IS_ERR(pool))
goto err_destroy_pp;
+ } else {
+ page_pool_get(pool);
}
rxr->head_pool = pool;
@@ -3851,6 +3862,12 @@ err_destroy_pp:
return PTR_ERR(pool);
}
+static void bnxt_enable_rx_page_pool(struct bnxt_rx_ring_info *rxr)
+{
+ page_pool_enable_direct_recycling(rxr->head_pool, &rxr->bnapi->napi);
+ page_pool_enable_direct_recycling(rxr->page_pool, &rxr->bnapi->napi);
+}
+
static int bnxt_alloc_rx_agg_bmap(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
{
u16 mem_size;
@@ -3889,6 +3906,7 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp)
rc = bnxt_alloc_rx_page_pool(bp, rxr, cpu_node);
if (rc)
return rc;
+ bnxt_enable_rx_page_pool(rxr);
rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0);
if (rc < 0)
@@ -4385,7 +4403,7 @@ static void bnxt_alloc_one_rx_ring_netmem(struct bnxt *bp,
for (i = 0; i < bp->rx_agg_ring_size; i++) {
if (bnxt_alloc_rx_netmem(bp, rxr, prod, GFP_KERNEL)) {
netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d pages only\n",
- ring_nr, i, bp->rx_ring_size);
+ ring_nr, i, bp->rx_agg_ring_size);
break;
}
prod = NEXT_RX_AGG(prod);
@@ -5320,7 +5338,7 @@ static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool all)
{
int i;
- netdev_assert_locked(bp->dev);
+ netdev_assert_locked_or_invisible(bp->dev);
/* Under netdev instance lock and all our NAPIs have been disabled.
* It's safe to delete the hash table.
@@ -6820,7 +6838,7 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
req->lb_rule = cpu_to_le16(0xffff);
vnic_mru:
- vnic->mru = bp->dev->mtu + ETH_HLEN + VLAN_HLEN;
+ vnic->mru = bp->dev->mtu + VLAN_ETH_HLEN;
req->mru = cpu_to_le16(vnic->mru);
req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
@@ -6957,6 +6975,8 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
bp->rss_cap |= BNXT_RSS_CAP_ESP_V4_RSS_CAP;
if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP)
bp->rss_cap |= BNXT_RSS_CAP_ESP_V6_RSS_CAP;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPV6_FLOW_LABEL_CAP)
+ bp->rss_cap |= BNXT_RSS_CAP_IPV6_FLOW_LABEL_RSS_CAP;
if (flags & VNIC_QCAPS_RESP_FLAGS_RE_FLUSH_CAP)
bp->fw_cap |= BNXT_FW_CAP_VNIC_RE_FLUSH;
}
@@ -8004,7 +8024,8 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
}
rx_rings = min_t(int, rx_rings, hwr.grp);
hwr.cp = min_t(int, hwr.cp, bp->cp_nr_rings);
- if (hwr.stat > bnxt_get_ulp_stat_ctxs(bp))
+ if (bnxt_ulp_registered(bp->edev) &&
+ hwr.stat > bnxt_get_ulp_stat_ctxs(bp))
hwr.stat -= bnxt_get_ulp_stat_ctxs(bp);
hwr.cp = min_t(int, hwr.cp, hwr.stat);
rc = bnxt_trim_rings(bp, &rx_rings, &hwr.tx, hwr.cp, sh);
@@ -8012,6 +8033,11 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
hwr.rx = rx_rings << 1;
tx_cp = bnxt_num_tx_to_cp(bp, hwr.tx);
hwr.cp = sh ? max_t(int, tx_cp, rx_rings) : tx_cp + rx_rings;
+ if (hwr.tx != bp->tx_nr_rings) {
+ netdev_warn(bp->dev,
+ "Able to reserve only %d out of %d requested TX rings\n",
+ hwr.tx, bp->tx_nr_rings);
+ }
bp->tx_nr_rings = hwr.tx;
/* If we cannot reserve all the RX rings, reset the RSS map only
@@ -9126,7 +9152,7 @@ static int bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt *bp,
return rc;
}
-static int bnxt_backing_store_cfg_v2(struct bnxt *bp, u32 ena)
+static int bnxt_backing_store_cfg_v2(struct bnxt *bp)
{
struct bnxt_ctx_mem_info *ctx = bp->ctx;
struct bnxt_ctx_mem_type *ctxm;
@@ -9134,7 +9160,7 @@ static int bnxt_backing_store_cfg_v2(struct bnxt *bp, u32 ena)
int rc = 0;
u16 type;
- for (type = BNXT_CTX_SRT; type <= BNXT_CTX_RIGP1; type++) {
+ for (type = BNXT_CTX_SRT; type <= BNXT_CTX_QPC; type++) {
ctxm = &ctx->ctx_arr[type];
if (!bnxt_bs_trace_avail(bp, type))
continue;
@@ -9152,12 +9178,13 @@ static int bnxt_backing_store_cfg_v2(struct bnxt *bp, u32 ena)
}
if (last_type == BNXT_CTX_INV) {
- if (!ena)
+ for (type = 0; type < BNXT_CTX_MAX; type++) {
+ ctxm = &ctx->ctx_arr[type];
+ if (ctxm->mem_valid)
+ last_type = type;
+ }
+ if (last_type == BNXT_CTX_INV)
return 0;
- else if (ena & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM)
- last_type = BNXT_CTX_MAX - 1;
- else
- last_type = BNXT_CTX_L2_MAX - 1;
}
ctx->ctx_arr[last_type].last = 1;
@@ -9284,6 +9311,10 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
return 0;
+ ena = 0;
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
+ goto skip_legacy;
+
ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
l2_qps = ctxm->qp_l2_entries;
qp1_qps = ctxm->qp_qp1_entries;
@@ -9292,7 +9323,6 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
srqs = ctxm->srq_l2_entries;
max_srqs = ctxm->max_entries;
- ena = 0;
if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
pg_lvl = 2;
if (BNXT_SW_RES_LMT(bp)) {
@@ -9386,8 +9416,9 @@ skip_rdma:
ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
+skip_legacy:
if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
- rc = bnxt_backing_store_cfg_v2(bp, ena);
+ rc = bnxt_backing_store_cfg_v2(bp);
else
rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
if (rc) {
@@ -9601,10 +9632,10 @@ no_ptp:
static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
{
+ u32 flags, flags_ext, flags_ext2, flags_ext3;
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
struct hwrm_func_qcaps_output *resp;
struct hwrm_func_qcaps_input *req;
- struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
- u32 flags, flags_ext, flags_ext2;
int rc;
rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS);
@@ -9671,6 +9702,12 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
(flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_ROCE_VF_RESOURCE_MGMT_SUPPORTED))
bp->fw_cap |= BNXT_FW_CAP_ROCE_VF_RESC_MGMT_SUPPORTED;
+ flags_ext3 = le32_to_cpu(resp->flags_ext3);
+ if (flags_ext3 & FUNC_QCAPS_RESP_FLAGS_EXT3_ROCE_VF_DYN_ALLOC_SUPPORT)
+ bp->fw_cap |= BNXT_FW_CAP_ROCE_VF_DYN_ALLOC_SUPPORT;
+ if (flags_ext3 & FUNC_QCAPS_RESP_FLAGS_EXT3_MIRROR_ON_ROCE_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_MIRROR_ON_ROCE;
+
bp->tx_push_thresh = 0;
if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
BNXT_FW_MAJ(bp) > 217)
@@ -12839,6 +12876,17 @@ static int bnxt_set_xps_mapping(struct bnxt *bp)
return rc;
}
+static int bnxt_tx_nr_rings(struct bnxt *bp)
+{
+ return bp->num_tc ? bp->tx_nr_rings_per_tc * bp->num_tc :
+ bp->tx_nr_rings_per_tc;
+}
+
+static int bnxt_tx_nr_rings_per_tc(struct bnxt *bp)
+{
+ return bp->num_tc ? bp->tx_nr_rings / bp->num_tc : bp->tx_nr_rings;
+}
+
static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
{
int rc = 0;
@@ -12856,6 +12904,13 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
if (rc)
return rc;
+ /* Make adjustments if reserved TX rings are less than requested */
+ bp->tx_nr_rings -= bp->tx_nr_rings_xdp;
+ bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp);
+ if (bp->tx_nr_rings_xdp) {
+ bp->tx_nr_rings_xdp = bp->tx_nr_rings_per_tc;
+ bp->tx_nr_rings += bp->tx_nr_rings_xdp;
+ }
rc = bnxt_alloc_mem(bp, irq_re_init);
if (rc) {
netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
@@ -13225,12 +13280,6 @@ static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
mdio->val_in);
- case SIOCSHWTSTAMP:
- return bnxt_hwtstamp_set(dev, ifr);
-
- case SIOCGHWTSTAMP:
- return bnxt_hwtstamp_get(dev, ifr);
-
default:
/* do nothing */
break;
@@ -14695,6 +14744,23 @@ static bool bnxt_fw_pre_resv_vnics(struct bnxt *bp)
return false;
}
+static void bnxt_hwrm_pfcwd_qcaps(struct bnxt *bp)
+{
+ struct hwrm_queue_pfcwd_timeout_qcaps_output *resp;
+ struct hwrm_queue_pfcwd_timeout_qcaps_input *req;
+ int rc;
+
+ bp->max_pfcwd_tmo_ms = 0;
+ rc = hwrm_req_init(bp, req, HWRM_QUEUE_PFCWD_TIMEOUT_QCAPS);
+ if (rc)
+ return;
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send_silent(bp, req);
+ if (!rc)
+ bp->max_pfcwd_tmo_ms = le16_to_cpu(resp->max_pfcwd_timeout);
+ hwrm_req_drop(bp, req);
+}
+
static int bnxt_fw_init_one_p1(struct bnxt *bp)
{
int rc;
@@ -14772,6 +14838,7 @@ static int bnxt_fw_init_one_p2(struct bnxt *bp)
if (bnxt_fw_pre_resv_vnics(bp))
bp->fw_cap |= BNXT_FW_CAP_PRE_RESV_VNICS;
+ bnxt_hwrm_pfcwd_qcaps(bp);
bnxt_hwrm_func_qcfg(bp);
bnxt_hwrm_vnic_qcaps(bp);
bnxt_hwrm_port_led_qcaps(bp);
@@ -15735,6 +15802,8 @@ static const struct net_device_ops bnxt_netdev_ops = {
.ndo_xdp_xmit = bnxt_xdp_xmit,
.ndo_bridge_getlink = bnxt_bridge_getlink,
.ndo_bridge_setlink = bnxt_bridge_setlink,
+ .ndo_hwtstamp_get = bnxt_hwtstamp_get,
+ .ndo_hwtstamp_set = bnxt_hwtstamp_set,
};
static void bnxt_get_queue_stats_rx(struct net_device *dev, int i,
@@ -15886,8 +15955,7 @@ err_rxq_info_unreg:
xdp_rxq_info_unreg(&clone->xdp_rxq);
err_page_pool_destroy:
page_pool_destroy(clone->page_pool);
- if (bnxt_separate_head_pool(clone))
- page_pool_destroy(clone->head_pool);
+ page_pool_destroy(clone->head_pool);
clone->page_pool = NULL;
clone->head_pool = NULL;
return rc;
@@ -15905,8 +15973,7 @@ static void bnxt_queue_mem_free(struct net_device *dev, void *qmem)
xdp_rxq_info_unreg(&rxr->xdp_rxq);
page_pool_destroy(rxr->page_pool);
- if (bnxt_separate_head_pool(rxr))
- page_pool_destroy(rxr->head_pool);
+ page_pool_destroy(rxr->head_pool);
rxr->page_pool = NULL;
rxr->head_pool = NULL;
@@ -16031,10 +16098,11 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
goto err_reset;
}
+ bnxt_enable_rx_page_pool(rxr);
napi_enable_locked(&bnapi->napi);
bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
- mru = bp->dev->mtu + ETH_HLEN + VLAN_HLEN;
+ mru = bp->dev->mtu + VLAN_ETH_HLEN;
for (i = 0; i < bp->nr_vnics; i++) {
vnic = &bp->vnic_info[i];
@@ -16115,7 +16183,7 @@ static void bnxt_remove_one(struct pci_dev *pdev)
struct bnxt *bp = netdev_priv(dev);
if (BNXT_PF(bp))
- bnxt_sriov_disable(bp);
+ __bnxt_sriov_disable(bp);
bnxt_rdma_aux_device_del(bp);
@@ -16312,7 +16380,7 @@ static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
bp->rx_nr_rings = bp->cp_nr_rings;
bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
- bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
+ bp->tx_nr_rings = bnxt_tx_nr_rings(bp);
}
static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
@@ -16344,7 +16412,7 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
bnxt_trim_dflt_sh_rings(bp);
else
bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
- bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
+ bp->tx_nr_rings = bnxt_tx_nr_rings(bp);
avail_msix = bnxt_get_max_func_irqs(bp) - bp->cp_nr_rings;
if (avail_msix >= BNXT_MIN_ROCE_CP_RINGS) {
@@ -16357,7 +16425,7 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
rc = __bnxt_reserve_rings(bp);
if (rc && rc != -ENODEV)
netdev_warn(bp->dev, "Unable to reserve tx rings\n");
- bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+ bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp);
if (sh)
bnxt_trim_dflt_sh_rings(bp);
@@ -16366,7 +16434,7 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
rc = __bnxt_reserve_rings(bp);
if (rc && rc != -ENODEV)
netdev_warn(bp->dev, "2nd rings reservation failed.\n");
- bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+ bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp);
}
if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
bp->rx_nr_rings++;
@@ -16400,7 +16468,7 @@ static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
if (rc)
goto init_dflt_ring_err;
- bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+ bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp);
bnxt_set_dflt_rfs(bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index fda0d3cc6227..741b2d854789 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1968,10 +1968,12 @@ struct bnxt_ctx_mem_type {
#define BNXT_CTX_CA1 FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA1_TRACE
#define BNXT_CTX_CA2 FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA2_TRACE
#define BNXT_CTX_RIGP1 FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RIGP1_TRACE
+#define BNXT_CTX_KONG FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_AFM_KONG_HWRM_TRACE
+#define BNXT_CTX_QPC FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_ERR_QPC_TRACE
#define BNXT_CTX_MAX (BNXT_CTX_TIM + 1)
#define BNXT_CTX_L2_MAX (BNXT_CTX_FTQM + 1)
-#define BNXT_CTX_V2_MAX (BNXT_CTX_RIGP1 + 1)
+#define BNXT_CTX_V2_MAX (BNXT_CTX_QPC + 1)
#define BNXT_CTX_INV ((u16)-1)
struct bnxt_ctx_mem_info {
@@ -2130,6 +2132,7 @@ enum board_idx {
NETXTREME_E_P5_VF,
NETXTREME_E_P5_VF_HV,
NETXTREME_E_P7_VF,
+ NETXTREME_E_P7_VF_HV,
};
#define BNXT_TRACE_BUF_MAGIC_BYTE ((u8)0xbc)
@@ -2407,6 +2410,7 @@ struct bnxt {
#define BNXT_RSS_CAP_ESP_V4_RSS_CAP BIT(6)
#define BNXT_RSS_CAP_ESP_V6_RSS_CAP BIT(7)
#define BNXT_RSS_CAP_MULTI_RSS_CTX BIT(8)
+#define BNXT_RSS_CAP_IPV6_FLOW_LABEL_RSS_CAP BIT(9)
u8 rss_hash_key[HW_HASH_KEY_SIZE];
u8 rss_hash_key_valid:1;
@@ -2422,6 +2426,8 @@ struct bnxt {
u8 max_q;
u8 num_tc;
+ u16 max_pfcwd_tmo_ms;
+
u8 tph_mode;
unsigned int current_interval;
@@ -2475,6 +2481,7 @@ struct bnxt {
#define BNXT_FW_CAP_ENABLE_RDMA_SRIOV BIT_ULL(5)
#define BNXT_FW_CAP_ROCE_VF_RESC_MGMT_SUPPORTED BIT_ULL(6)
#define BNXT_FW_CAP_KONG_MB_CHNL BIT_ULL(7)
+ #define BNXT_FW_CAP_ROCE_VF_DYN_ALLOC_SUPPORT BIT_ULL(8)
#define BNXT_FW_CAP_OVS_64BIT_HANDLE BIT_ULL(10)
#define BNXT_FW_CAP_TRUSTED_VF BIT_ULL(11)
#define BNXT_FW_CAP_ERROR_RECOVERY BIT_ULL(13)
@@ -2507,6 +2514,7 @@ struct bnxt {
#define BNXT_FW_CAP_VNIC_RE_FLUSH BIT_ULL(40)
#define BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS BIT_ULL(41)
#define BNXT_FW_CAP_NPAR_1_2 BIT_ULL(42)
+ #define BNXT_FW_CAP_MIRROR_ON_ROCE BIT_ULL(43)
u32 fw_dbg_cap;
@@ -2519,6 +2527,8 @@ struct bnxt {
#define BNXT_SUPPORTS_MULTI_RSS_CTX(bp) \
(BNXT_PF(bp) && BNXT_SUPPORTS_NTUPLE_VNIC(bp) && \
((bp)->rss_cap & BNXT_RSS_CAP_MULTI_RSS_CTX))
+#define BNXT_ROCE_VF_DYN_ALLOC_CAP(bp) \
+ ((bp)->fw_cap & BNXT_FW_CAP_ROCE_VF_DYN_ALLOC_SUPPORT)
#define BNXT_SUPPORTS_QUEUE_API(bp) \
(BNXT_PF(bp) && BNXT_SUPPORTS_NTUPLE_VNIC(bp) && \
((bp)->fw_cap & BNXT_FW_CAP_VNIC_RE_FLUSH))
@@ -2528,6 +2538,8 @@ struct bnxt {
((bp)->fw_cap & BNXT_FW_CAP_ROCE_VF_RESC_MGMT_SUPPORTED)
#define BNXT_SW_RES_LMT(bp) \
((bp)->fw_cap & BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS)
+#define BNXT_MIRROR_ON_ROCE_CAP(bp) \
+ ((bp)->fw_cap & BNXT_FW_CAP_MIRROR_ON_ROCE)
u32 hwrm_spec_code;
u16 hwrm_cmd_seq;
@@ -2542,6 +2554,7 @@ struct bnxt {
u16 fw_rx_stats_ext_size;
u16 fw_tx_stats_ext_size;
u16 hw_ring_stats_size;
+ u16 pcie_stat_len;
u8 pri2cos_idx[8];
u8 pri2cos_valid;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
index 18d6c94d5cb8..0181ab1f2dfd 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
@@ -36,6 +36,8 @@ static const u16 bnxt_bstore_to_seg_id[] = {
[BNXT_CTX_CA1] = BNXT_CTX_MEM_SEG_CA1,
[BNXT_CTX_CA2] = BNXT_CTX_MEM_SEG_CA2,
[BNXT_CTX_RIGP1] = BNXT_CTX_MEM_SEG_RIGP1,
+ [BNXT_CTX_KONG] = BNXT_CTX_MEM_SEG_KONG,
+ [BNXT_CTX_QPC] = BNXT_CTX_MEM_SEG_QPC,
};
static int bnxt_dbg_hwrm_log_buffer_flush(struct bnxt *bp, u16 type, u32 flags,
@@ -359,7 +361,7 @@ static u32 bnxt_get_ctx_coredump(struct bnxt *bp, void *buf, u32 offset,
if (buf)
buf += offset;
- for (type = 0 ; type <= BNXT_CTX_RIGP1; type++) {
+ for (type = 0; type < BNXT_CTX_V2_MAX; type++) {
struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
bool trace = bnxt_bs_trace_avail(bp, type);
u32 seg_id = bnxt_bstore_to_seg_id[type];
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h
index d1cd6387f3ab..c087df88154a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h
@@ -102,6 +102,8 @@ struct bnxt_driver_segment_record {
#define BNXT_CTX_MEM_SEG_CA1 0x9
#define BNXT_CTX_MEM_SEG_CA2 0xa
#define BNXT_CTX_MEM_SEG_RIGP1 0xb
+#define BNXT_CTX_MEM_SEG_QPC 0xc
+#define BNXT_CTX_MEM_SEG_KONG 0xd
#define BNXT_CRASH_DUMP_LEN (8 << 20)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 4c4581b0342e..02961d93ed35 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -40,12 +40,6 @@ bnxt_dl_flash_update(struct devlink *dl,
struct bnxt *bp = bnxt_get_bp_from_dl(dl);
int rc;
- if (!BNXT_PF(bp)) {
- NL_SET_ERR_MSG_MOD(extack,
- "flash update not supported from a VF");
- return -EPERM;
- }
-
devlink_flash_update_status_notify(dl, "Preparing to flash", NULL, 0, 0);
rc = bnxt_flash_package_from_fw_obj(bp->dev, params->fw, 0, extack);
if (!rc)
@@ -220,7 +214,7 @@ __bnxt_dl_reporter_create(struct bnxt *bp,
{
struct devlink_health_reporter *reporter;
- reporter = devlink_health_reporter_create(bp->dl, ops, 0, bp);
+ reporter = devlink_health_reporter_create(bp->dl, ops, bp);
if (IS_ERR(reporter)) {
netdev_warn(bp->dev, "Failed to create %s health reporter, rc = %ld\n",
ops->name, PTR_ERR(reporter));
@@ -1080,16 +1074,9 @@ static int __bnxt_hwrm_nvm_req(struct bnxt *bp,
static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
union devlink_param_value *val)
{
- struct hwrm_nvm_get_variable_input *req = msg;
const struct bnxt_dl_nvm_param *nvm_param;
int i;
- /* Get/Set NVM CFG parameter is supported only on PFs */
- if (BNXT_VF(bp)) {
- hwrm_req_drop(bp, req);
- return -EPERM;
- }
-
for (i = 0; i < ARRAY_SIZE(nvm_params); i++) {
nvm_param = &nvm_params[i];
if (nvm_param->id == param_id)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 1b37612b1c01..41686a6f84b5 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -1584,6 +1584,8 @@ static u64 get_ethtool_ipv6_rss(struct bnxt *bp)
{
if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6)
return RXH_IP_SRC | RXH_IP_DST;
+ if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6_FLOW_LABEL)
+ return RXH_IP_SRC | RXH_IP_DST | RXH_IP6_FL;
return 0;
}
@@ -1662,13 +1664,18 @@ static int bnxt_set_rxfh_fields(struct net_device *dev,
if (cmd->data == RXH_4TUPLE)
tuple = 4;
- else if (cmd->data == RXH_2TUPLE)
+ else if (cmd->data == RXH_2TUPLE ||
+ cmd->data == (RXH_2TUPLE | RXH_IP6_FL))
tuple = 2;
else if (!cmd->data)
tuple = 0;
else
return -EINVAL;
+ if (cmd->data & RXH_IP6_FL &&
+ !(bp->rss_cap & BNXT_RSS_CAP_IPV6_FLOW_LABEL_RSS_CAP))
+ return -EINVAL;
+
if (cmd->flow_type == TCP_V4_FLOW) {
rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4;
if (tuple == 4)
@@ -1732,10 +1739,15 @@ static int bnxt_set_rxfh_fields(struct net_device *dev,
case AH_V6_FLOW:
case ESP_V6_FLOW:
case IPV6_FLOW:
- if (tuple == 2)
+ rss_hash_cfg &= ~(VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6_FLOW_LABEL);
+ if (!tuple)
+ break;
+ if (cmd->data & RXH_IP6_FL)
+ rss_hash_cfg |=
+ VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6_FLOW_LABEL;
+ else if (tuple == 2)
rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6;
- else if (!tuple)
- rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6;
break;
}
@@ -2049,38 +2061,52 @@ static void bnxt_get_drvinfo(struct net_device *dev,
static int bnxt_get_regs_len(struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
- int reg_len;
if (!BNXT_PF(bp))
return -EOPNOTSUPP;
- reg_len = BNXT_PXP_REG_LEN;
+ return BNXT_PXP_REG_LEN + bp->pcie_stat_len;
+}
+
+static void *
+__bnxt_hwrm_pcie_qstats(struct bnxt *bp, struct hwrm_pcie_qstats_input *req)
+{
+ struct pcie_ctx_hw_stats_v2 *hw_pcie_stats;
+ dma_addr_t hw_pcie_stats_addr;
+ int rc;
+
+ hw_pcie_stats = hwrm_req_dma_slice(bp, req, sizeof(*hw_pcie_stats),
+ &hw_pcie_stats_addr);
+ if (!hw_pcie_stats)
+ return NULL;
- if (bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED)
- reg_len += sizeof(struct pcie_ctx_hw_stats);
+ req->pcie_stat_size = cpu_to_le16(sizeof(*hw_pcie_stats));
+ req->pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr);
+ rc = hwrm_req_send(bp, req);
- return reg_len;
+ return rc ? NULL : hw_pcie_stats;
}
#define BNXT_PCIE_32B_ENTRY(start, end) \
- { offsetof(struct pcie_ctx_hw_stats, start), \
- offsetof(struct pcie_ctx_hw_stats, end) }
+ { offsetof(struct pcie_ctx_hw_stats_v2, start),\
+ offsetof(struct pcie_ctx_hw_stats_v2, end) }
static const struct {
u16 start;
u16 end;
} bnxt_pcie_32b_entries[] = {
BNXT_PCIE_32B_ENTRY(pcie_ltssm_histogram[0], pcie_ltssm_histogram[3]),
+ BNXT_PCIE_32B_ENTRY(pcie_tl_credit_nph_histogram[0], unused_1),
+ BNXT_PCIE_32B_ENTRY(pcie_rd_latency_histogram[0], unused_2),
};
static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs,
void *_p)
{
- struct pcie_ctx_hw_stats *hw_pcie_stats;
+ struct hwrm_pcie_qstats_output *resp;
struct hwrm_pcie_qstats_input *req;
struct bnxt *bp = netdev_priv(dev);
- dma_addr_t hw_pcie_stats_addr;
- int rc;
+ u8 *src;
regs->version = 0;
if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_REG_ACCESS_RESTRICTED))
@@ -2092,24 +2118,21 @@ static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs,
if (hwrm_req_init(bp, req, HWRM_PCIE_QSTATS))
return;
- hw_pcie_stats = hwrm_req_dma_slice(bp, req, sizeof(*hw_pcie_stats),
- &hw_pcie_stats_addr);
- if (!hw_pcie_stats) {
- hwrm_req_drop(bp, req);
- return;
- }
-
- regs->version = 1;
- hwrm_req_hold(bp, req); /* hold on to slice */
- req->pcie_stat_size = cpu_to_le16(sizeof(*hw_pcie_stats));
- req->pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr);
- rc = hwrm_req_send(bp, req);
- if (!rc) {
+ resp = hwrm_req_hold(bp, req);
+ src = __bnxt_hwrm_pcie_qstats(bp, req);
+ if (src) {
u8 *dst = (u8 *)(_p + BNXT_PXP_REG_LEN);
- u8 *src = (u8 *)hw_pcie_stats;
- int i, j;
+ int i, j, len;
+
+ len = min(bp->pcie_stat_len, le16_to_cpu(resp->pcie_stat_size));
+ if (len <= sizeof(struct pcie_ctx_hw_stats))
+ regs->version = 1;
+ else if (len < sizeof(struct pcie_ctx_hw_stats_v2))
+ regs->version = 2;
+ else
+ regs->version = 3;
- for (i = 0, j = 0; i < sizeof(*hw_pcie_stats); ) {
+ for (i = 0, j = 0; i < len; ) {
if (i >= bnxt_pcie_32b_entries[j].start &&
i <= bnxt_pcie_32b_entries[j].end) {
u32 *dst32 = (u32 *)(dst + i);
@@ -3185,7 +3208,8 @@ static int bnxt_get_fecparam(struct net_device *dev,
}
static void bnxt_get_fec_stats(struct net_device *dev,
- struct ethtool_fec_stats *fec_stats)
+ struct ethtool_fec_stats *fec_stats,
+ struct ethtool_fec_hist *hist)
{
struct bnxt *bp = netdev_priv(dev);
u64 *rx;
@@ -4376,12 +4400,42 @@ static int bnxt_get_eee(struct net_device *dev, struct ethtool_keee *edata)
return 0;
}
+static int bnxt_hwrm_pfcwd_qcfg(struct bnxt *bp, u16 *val)
+{
+ struct hwrm_queue_pfcwd_timeout_qcfg_output *resp;
+ struct hwrm_queue_pfcwd_timeout_qcfg_input *req;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_QUEUE_PFCWD_TIMEOUT_QCFG);
+ if (rc)
+ return rc;
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
+ if (!rc)
+ *val = le16_to_cpu(resp->pfcwd_timeout_value);
+ hwrm_req_drop(bp, req);
+ return rc;
+}
+
+static int bnxt_hwrm_pfcwd_cfg(struct bnxt *bp, u16 val)
+{
+ struct hwrm_queue_pfcwd_timeout_cfg_input *req;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_QUEUE_PFCWD_TIMEOUT_CFG);
+ if (rc)
+ return rc;
+ req->pfcwd_timeout_value = cpu_to_le16(val);
+ rc = hwrm_req_send(bp, req);
+ return rc;
+}
+
static int bnxt_set_tunable(struct net_device *dev,
const struct ethtool_tunable *tuna,
const void *data)
{
struct bnxt *bp = netdev_priv(dev);
- u32 rx_copybreak;
+ u32 rx_copybreak, val;
switch (tuna->id) {
case ETHTOOL_RX_COPYBREAK:
@@ -4394,6 +4448,15 @@ static int bnxt_set_tunable(struct net_device *dev,
bp->rx_copybreak = rx_copybreak;
}
return 0;
+ case ETHTOOL_PFC_PREVENTION_TOUT:
+ if (BNXT_VF(bp) || !bp->max_pfcwd_tmo_ms)
+ return -EOPNOTSUPP;
+
+ val = *(u16 *)data;
+ if (val > bp->max_pfcwd_tmo_ms &&
+ val != PFC_STORM_PREVENTION_AUTO)
+ return -EINVAL;
+ return bnxt_hwrm_pfcwd_cfg(bp, val);
default:
return -EOPNOTSUPP;
}
@@ -4408,6 +4471,10 @@ static int bnxt_get_tunable(struct net_device *dev,
case ETHTOOL_RX_COPYBREAK:
*(u32 *)data = bp->rx_copybreak;
break;
+ case ETHTOOL_PFC_PREVENTION_TOUT:
+ if (!bp->max_pfcwd_tmo_ms)
+ return -EOPNOTSUPP;
+ return bnxt_hwrm_pfcwd_qcfg(bp, data);
default:
return -EOPNOTSUPP;
}
@@ -5254,6 +5321,26 @@ static int bnxt_get_ts_info(struct net_device *dev,
return 0;
}
+static void bnxt_hwrm_pcie_qstats(struct bnxt *bp)
+{
+ struct hwrm_pcie_qstats_output *resp;
+ struct hwrm_pcie_qstats_input *req;
+
+ bp->pcie_stat_len = 0;
+ if (!(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED))
+ return;
+
+ if (hwrm_req_init(bp, req, HWRM_PCIE_QSTATS))
+ return;
+
+ resp = hwrm_req_hold(bp, req);
+ if (__bnxt_hwrm_pcie_qstats(bp, req))
+ bp->pcie_stat_len = min_t(u16,
+ le16_to_cpu(resp->pcie_stat_size),
+ sizeof(struct pcie_ctx_hw_stats_v2));
+ hwrm_req_drop(bp, req);
+}
+
void bnxt_ethtool_init(struct bnxt *bp)
{
struct hwrm_selftest_qlist_output *resp;
@@ -5262,6 +5349,7 @@ void bnxt_ethtool_init(struct bnxt *bp)
struct net_device *dev = bp->dev;
int i, rc;
+ bnxt_hwrm_pcie_qstats(bp);
if (!(bp->fw_cap & BNXT_FW_CAP_PKG_VER))
bnxt_get_pkgver(dev);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
index ca660e6d28a4..db81cf6d5289 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
@@ -560,10 +560,11 @@ static int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
return bnxt_ptp_cfg_tstamp_filters(bp);
}
-int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+int bnxt_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *stmpconf,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = netdev_priv(dev);
- struct hwtstamp_config stmpconf;
struct bnxt_ptp_cfg *ptp;
u16 old_rxctl;
int old_rx_filter, rc;
@@ -573,17 +574,14 @@ int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
if (!ptp)
return -EOPNOTSUPP;
- if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
- return -EFAULT;
-
- if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
- stmpconf.tx_type != HWTSTAMP_TX_OFF)
+ if (stmpconf->tx_type != HWTSTAMP_TX_ON &&
+ stmpconf->tx_type != HWTSTAMP_TX_OFF)
return -ERANGE;
old_rx_filter = ptp->rx_filter;
old_rxctl = ptp->rxctl;
old_tx_tstamp_en = ptp->tx_tstamp_en;
- switch (stmpconf.rx_filter) {
+ switch (stmpconf->rx_filter) {
case HWTSTAMP_FILTER_NONE:
ptp->rxctl = 0;
ptp->rx_filter = HWTSTAMP_FILTER_NONE;
@@ -616,7 +614,7 @@ int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
return -ERANGE;
}
- if (stmpconf.tx_type == HWTSTAMP_TX_ON)
+ if (stmpconf->tx_type == HWTSTAMP_TX_ON)
ptp->tx_tstamp_en = 1;
else
ptp->tx_tstamp_en = 0;
@@ -625,9 +623,8 @@ int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
if (rc)
goto ts_set_err;
- stmpconf.rx_filter = ptp->rx_filter;
- return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
- -EFAULT : 0;
+ stmpconf->rx_filter = ptp->rx_filter;
+ return 0;
ts_set_err:
ptp->rx_filter = old_rx_filter;
@@ -636,22 +633,22 @@ ts_set_err:
return rc;
}
-int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+int bnxt_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *stmpconf)
{
struct bnxt *bp = netdev_priv(dev);
- struct hwtstamp_config stmpconf;
struct bnxt_ptp_cfg *ptp;
ptp = bp->ptp_cfg;
if (!ptp)
return -EOPNOTSUPP;
- stmpconf.flags = 0;
- stmpconf.tx_type = ptp->tx_tstamp_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ stmpconf->flags = 0;
+ stmpconf->tx_type = ptp->tx_tstamp_en ? HWTSTAMP_TX_ON
+ : HWTSTAMP_TX_OFF;
- stmpconf.rx_filter = ptp->rx_filter;
- return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
- -EFAULT : 0;
+ stmpconf->rx_filter = ptp->rx_filter;
+ return 0;
}
static int bnxt_map_regs(struct bnxt *bp, u32 *reg_arr, int count, int reg_win)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
index 0481161d26ef..8cc2fae47644 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
@@ -160,8 +160,11 @@ void bnxt_ptp_update_current_time(struct bnxt *bp);
void bnxt_ptp_pps_event(struct bnxt *bp, u32 data1, u32 data2);
int bnxt_ptp_cfg_tstamp_filters(struct bnxt *bp);
void bnxt_ptp_reapply_pps(struct bnxt *bp);
-int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr);
-int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr);
+int bnxt_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *stmpconf,
+ struct netlink_ext_ack *extack);
+int bnxt_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *stmpconf);
void bnxt_ptp_free_txts_skbs(struct bnxt_ptp_cfg *ptp);
int bnxt_ptp_get_txts_prod(struct bnxt_ptp_cfg *ptp, u16 *prod);
void bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb, u16 prod);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 480e18a32caa..80fed2c07b9e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -541,6 +541,13 @@ static void bnxt_hwrm_roce_sriov_cfg(struct bnxt *bp, int num_vfs)
if (rc)
goto err;
+ /* In case of VF Dynamic resource allocation, driver will provision
+ * maximum resources to all the VFs. FW will dynamically allocate
+ * resources to VFs on the fly, so always divide the resources by 1.
+ */
+ if (BNXT_ROCE_VF_DYN_ALLOC_CAP(bp))
+ num_vfs = 1;
+
cfg_req->fid = cpu_to_le16(0xffff);
cfg_req->enables2 =
cpu_to_le32(FUNC_CFG_REQ_ENABLES2_ROCE_MAX_AV_PER_VF |
@@ -734,7 +741,7 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
FUNC_CFG_REQ_ENABLES_NUM_VNICS |
FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS);
- mtu = bp->dev->mtu + ETH_HLEN + VLAN_HLEN;
+ mtu = bp->dev->mtu + VLAN_ETH_HLEN;
req->mru = cpu_to_le16(mtu);
req->admin_mtu = cpu_to_le16(mtu);
@@ -919,7 +926,7 @@ err_out1:
return rc;
}
-void bnxt_sriov_disable(struct bnxt *bp)
+void __bnxt_sriov_disable(struct bnxt *bp)
{
u16 num_vfs = pci_num_vf(bp->pdev);
@@ -943,6 +950,14 @@ void bnxt_sriov_disable(struct bnxt *bp)
devl_unlock(bp->dl);
bnxt_free_vf_resources(bp);
+}
+
+static void bnxt_sriov_disable(struct bnxt *bp)
+{
+ if (!pci_num_vf(bp->pdev))
+ return;
+
+ __bnxt_sriov_disable(bp);
/* Reclaim all resources for the PF. */
rtnl_lock();
@@ -1321,7 +1336,7 @@ int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset)
return 0;
}
-void bnxt_sriov_disable(struct bnxt *bp)
+void __bnxt_sriov_disable(struct bnxt *bp)
{
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
index 9a4bacba477b..e4979d729312 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
@@ -38,7 +38,7 @@ bool bnxt_is_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf);
int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trust);
int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs);
int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset);
-void bnxt_sriov_disable(struct bnxt *);
+void __bnxt_sriov_disable(struct bnxt *bp);
void bnxt_hwrm_exec_fwd_req(struct bnxt *);
void bnxt_update_vf_mac(struct bnxt *);
int bnxt_approve_mac(struct bnxt *, const u8 *, bool);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index d72fd248f3aa..2d66bf59cd64 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -244,7 +244,7 @@ bnxt_tc_parse_pedit(struct bnxt *bp, struct bnxt_tc_actions *actions,
offset < offset_of_ip6_daddr + 16) {
actions->nat.src_xlate = false;
idx = (offset - offset_of_ip6_daddr) / 4;
- actions->nat.l3.ipv6.saddr.s6_addr32[idx] = htonl(val);
+ actions->nat.l3.ipv6.daddr.s6_addr32[idx] = htonl(val);
} else {
netdev_err(bp->dev,
"%s: IPv6_hdr: Invalid pedit field\n",
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index 61cf201bb0dc..f8c2c72b382d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -100,6 +100,12 @@ void bnxt_set_dflt_ulp_stat_ctxs(struct bnxt *bp)
if (BNXT_PF(bp) && !bp->pf.port_id &&
bp->port_count > 1)
bp->edev->ulp_num_ctxs++;
+
+ /* Reserve one additional stat_ctx when the device is capable
+ * of supporting port mirroring on RDMA device.
+ */
+ if (BNXT_MIRROR_ON_ROCE_CAP(bp))
+ bp->edev->ulp_num_ctxs++;
}
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index 58d579dca3f1..3e77a96e5a3e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -468,9 +468,8 @@ bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, u8 num_frags,
if (!skb)
return NULL;
- xdp_update_skb_shared_info(skb, num_frags,
- sinfo->xdp_frags_size,
- BNXT_RX_PAGE_SIZE * num_frags,
- xdp_buff_is_frag_pfmemalloc(xdp));
+ xdp_update_skb_frags_info(skb, num_frags, sinfo->xdp_frags_size,
+ BNXT_RX_PAGE_SIZE * num_frags,
+ xdp_buff_get_skb_flags(xdp));
return skb;
}
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index a9040c42d2ff..6e97a5a7daaf 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -4230,8 +4230,7 @@ static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
cnic_bnx2x_delete_wait(dev, 0);
- cancel_delayed_work(&cp->delete_task);
- flush_workqueue(cnic_wq);
+ cancel_delayed_work_sync(&cp->delete_task);
if (atomic_read(&cp->iscsi_conn) != 0)
netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n",
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index b4dc93a48718..7f00ec7fd7b9 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -13929,22 +13929,20 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
}
-static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+static int tg3_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *stmpconf,
+ struct netlink_ext_ack *extack)
{
struct tg3 *tp = netdev_priv(dev);
- struct hwtstamp_config stmpconf;
if (!tg3_flag(tp, PTP_CAPABLE))
return -EOPNOTSUPP;
- if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
- return -EFAULT;
-
- if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
- stmpconf.tx_type != HWTSTAMP_TX_OFF)
+ if (stmpconf->tx_type != HWTSTAMP_TX_ON &&
+ stmpconf->tx_type != HWTSTAMP_TX_OFF)
return -ERANGE;
- switch (stmpconf.rx_filter) {
+ switch (stmpconf->rx_filter) {
case HWTSTAMP_FILTER_NONE:
tp->rxptpctl = 0;
break;
@@ -14004,74 +14002,72 @@ static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
tw32(TG3_RX_PTP_CTL,
tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
- if (stmpconf.tx_type == HWTSTAMP_TX_ON)
+ if (stmpconf->tx_type == HWTSTAMP_TX_ON)
tg3_flag_set(tp, TX_TSTAMP_EN);
else
tg3_flag_clear(tp, TX_TSTAMP_EN);
- return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
- -EFAULT : 0;
+ return 0;
}
-static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+static int tg3_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *stmpconf)
{
struct tg3 *tp = netdev_priv(dev);
- struct hwtstamp_config stmpconf;
if (!tg3_flag(tp, PTP_CAPABLE))
return -EOPNOTSUPP;
- stmpconf.flags = 0;
- stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
- HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
+ stmpconf->flags = 0;
+ stmpconf->tx_type = tg3_flag(tp, TX_TSTAMP_EN) ?
+ HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
switch (tp->rxptpctl) {
case 0:
- stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_NONE;
break;
case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
- stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
break;
case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
- stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
break;
case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
- stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
break;
case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
- stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
break;
case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
- stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
break;
case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
- stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
break;
case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
- stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
break;
case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
- stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
break;
case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
- stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
break;
case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
- stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
break;
case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
- stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
break;
case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
- stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
+ stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
break;
default:
WARN_ON_ONCE(1);
return -ERANGE;
}
- return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
- -EFAULT : 0;
+ return 0;
}
static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -14126,12 +14122,6 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return err;
- case SIOCSHWTSTAMP:
- return tg3_hwtstamp_set(dev, ifr);
-
- case SIOCGHWTSTAMP:
- return tg3_hwtstamp_get(dev, ifr);
-
default:
/* do nothing */
break;
@@ -14407,6 +14397,8 @@ static const struct net_device_ops tg3_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = tg3_poll_controller,
#endif
+ .ndo_hwtstamp_get = tg3_hwtstamp_get,
+ .ndo_hwtstamp_set = tg3_hwtstamp_set,
};
static void tg3_get_eeprom_size(struct tg3 *tp)
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index c9a5c8beb2fa..0830c48973aa 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -184,6 +184,13 @@
#define GEM_DCFG8 0x029C /* Design Config 8 */
#define GEM_DCFG10 0x02A4 /* Design Config 10 */
#define GEM_DCFG12 0x02AC /* Design Config 12 */
+#define GEM_ENST_START_TIME_Q0 0x0800 /* ENST Q0 start time */
+#define GEM_ENST_START_TIME_Q1 0x0804 /* ENST Q1 start time */
+#define GEM_ENST_ON_TIME_Q0 0x0820 /* ENST Q0 on time */
+#define GEM_ENST_ON_TIME_Q1 0x0824 /* ENST Q1 on time */
+#define GEM_ENST_OFF_TIME_Q0 0x0840 /* ENST Q0 off time */
+#define GEM_ENST_OFF_TIME_Q1 0x0844 /* ENST Q1 off time */
+#define GEM_ENST_CONTROL 0x0880 /* ENST control register */
#define GEM_USX_CONTROL 0x0A80 /* High speed PCS control register */
#define GEM_USX_STATUS 0x0A88 /* High speed PCS status register */
@@ -213,14 +220,19 @@
#define GEM_ISR(hw_q) (0x0400 + ((hw_q) << 2))
#define GEM_TBQP(hw_q) (0x0440 + ((hw_q) << 2))
-#define GEM_TBQPH(hw_q) (0x04C8)
#define GEM_RBQP(hw_q) (0x0480 + ((hw_q) << 2))
#define GEM_RBQS(hw_q) (0x04A0 + ((hw_q) << 2))
-#define GEM_RBQPH(hw_q) (0x04D4)
#define GEM_IER(hw_q) (0x0600 + ((hw_q) << 2))
#define GEM_IDR(hw_q) (0x0620 + ((hw_q) << 2))
#define GEM_IMR(hw_q) (0x0640 + ((hw_q) << 2))
+#define GEM_ENST_START_TIME(hw_q) (0x0800 + ((hw_q) << 2))
+#define GEM_ENST_ON_TIME(hw_q) (0x0820 + ((hw_q) << 2))
+#define GEM_ENST_OFF_TIME(hw_q) (0x0840 + ((hw_q) << 2))
+
+/* Bitfields in ENST_CONTROL */
+#define GEM_ENST_DISABLE_QUEUE_OFFSET 16
+
/* Bitfields in NCR */
#define MACB_LB_OFFSET 0 /* reserved */
#define MACB_LB_SIZE 1
@@ -554,6 +566,23 @@
#define GEM_HIGH_SPEED_OFFSET 26
#define GEM_HIGH_SPEED_SIZE 1
+/* Bitfields in ENST_START_TIME_Qx. */
+#define GEM_START_TIME_SEC_OFFSET 30
+#define GEM_START_TIME_SEC_SIZE 2
+#define GEM_START_TIME_NSEC_OFFSET 0
+#define GEM_START_TIME_NSEC_SIZE 30
+
+/* Bitfields in ENST_ON_TIME_Qx. */
+#define GEM_ON_TIME_OFFSET 0
+#define GEM_ON_TIME_SIZE 17
+
+/* Bitfields in ENST_OFF_TIME_Qx. */
+#define GEM_OFF_TIME_OFFSET 0
+#define GEM_OFF_TIME_SIZE 17
+
+/* Hardware ENST timing registers granularity */
+#define ENST_TIME_GRANULARITY_NS 8
+
/* Bitfields in USX_CONTROL. */
#define GEM_USX_CTRL_SPEED_OFFSET 14
#define GEM_USX_CTRL_SPEED_SIZE 3
@@ -739,6 +768,7 @@
#define MACB_CAPS_MIIONRGMII 0x00000200
#define MACB_CAPS_NEED_TSUCLK 0x00000400
#define MACB_CAPS_QUEUE_DISABLE 0x00000800
+#define MACB_CAPS_QBV 0x00001000
#define MACB_CAPS_PCS 0x01000000
#define MACB_CAPS_HIGH_SPEED 0x02000000
#define MACB_CAPS_CLK_HW_CHG 0x04000000
@@ -1214,10 +1244,13 @@ struct macb_queue {
unsigned int IDR;
unsigned int IMR;
unsigned int TBQP;
- unsigned int TBQPH;
unsigned int RBQS;
unsigned int RBQP;
- unsigned int RBQPH;
+
+ /* ENST register offsets for this queue */
+ unsigned int ENST_START_TIME;
+ unsigned int ENST_ON_TIME;
+ unsigned int ENST_OFF_TIME;
/* Lock to protect tx_head and tx_tail */
spinlock_t tx_ptr_lock;
@@ -1397,6 +1430,19 @@ static inline bool gem_has_ptp(struct macb *bp)
return IS_ENABLED(CONFIG_MACB_USE_HWSTAMP) && (bp->caps & MACB_CAPS_GEM_HAS_PTP);
}
+/* ENST Helper functions */
+static inline u64 enst_ns_to_hw_units(size_t ns, u32 speed_mbps)
+{
+ return DIV_ROUND_UP((ns) * (speed_mbps),
+ (ENST_TIME_GRANULARITY_NS * 1000));
+}
+
+static inline u64 enst_max_hw_interval(u32 speed_mbps)
+{
+ return DIV_ROUND_UP(GENMASK(GEM_ON_TIME_SIZE - 1, 0) *
+ ENST_TIME_GRANULARITY_NS * 1000, (speed_mbps));
+}
+
/**
* struct macb_platform_data - platform data for MACB Ethernet used for PCI registration
* @pclk: platform clock
@@ -1407,4 +1453,21 @@ struct macb_platform_data {
struct clk *hclk;
};
+/**
+ * struct macb_queue_enst_config - Configuration for Enhanced Scheduled Traffic
+ * @start_time_mask: Bitmask representing the start time for the queue
+ * @on_time_bytes: "on" time nsec expressed in bytes
+ * @off_time_bytes: "off" time nsec expressed in bytes
+ * @queue_id: Identifier for the queue
+ *
+ * This structure holds the configuration parameters for an ENST queue,
+ * used to control time-based transmission scheduling in the MACB driver.
+ */
+struct macb_queue_enst_config {
+ u32 start_time_mask;
+ u32 on_time_bytes;
+ u32 off_time_bytes;
+ u8 queue_id;
+};
+
#endif /* _MACB_H */
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index ce95fad8cedd..ca2386b83473 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -36,6 +36,7 @@
#include <linux/reset.h>
#include <linux/firmware/xlnx-zynqmp.h>
#include <linux/inetdevice.h>
+#include <net/pkt_sched.h>
#include "macb.h"
/* This structure is only used for MACB on SiFive FU540 devices */
@@ -51,14 +52,10 @@ struct sifive_fu540_macb_mgmt {
#define DEFAULT_RX_RING_SIZE 512 /* must be power of 2 */
#define MIN_RX_RING_SIZE 64
#define MAX_RX_RING_SIZE 8192
-#define RX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
- * (bp)->rx_ring_size)
#define DEFAULT_TX_RING_SIZE 512 /* must be power of 2 */
#define MIN_TX_RING_SIZE 64
#define MAX_TX_RING_SIZE 4096
-#define TX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
- * (bp)->tx_ring_size)
/* level of occupied TX descriptors under which we wake up TX process */
#define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4)
@@ -278,9 +275,9 @@ static void macb_set_hwaddr(struct macb *bp)
u32 bottom;
u16 top;
- bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr));
+ bottom = get_unaligned_le32(bp->dev->dev_addr);
macb_or_gem_writel(bp, SA1B, bottom);
- top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
+ top = get_unaligned_le16(bp->dev->dev_addr + 4);
macb_or_gem_writel(bp, SA1T, top);
if (gem_has_ptp(bp)) {
@@ -495,19 +492,19 @@ static void macb_init_buffers(struct macb *bp)
struct macb_queue *queue;
unsigned int q;
- for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
- queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap & HW_DMA_CAP_64B)
- queue_writel(queue, RBQPH,
- upper_32_bits(queue->rx_ring_dma));
+ /* Single register for all queues' high 32 bits. */
+ if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
+ macb_writel(bp, RBQPH,
+ upper_32_bits(bp->queues[0].rx_ring_dma));
+ macb_writel(bp, TBQPH,
+ upper_32_bits(bp->queues[0].tx_ring_dma));
+ }
#endif
+
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap & HW_DMA_CAP_64B)
- queue_writel(queue, TBQPH,
- upper_32_bits(queue->tx_ring_dma));
-#endif
}
}
@@ -1166,10 +1163,6 @@ static void macb_tx_error_task(struct work_struct *work)
/* Reinitialize the TX desc queue */
queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap & HW_DMA_CAP_64B)
- queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
-#endif
/* Make TX ring reflect state of hardware */
queue->tx_head = 0;
queue->tx_tail = 0;
@@ -1223,12 +1216,13 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
{
struct macb *bp = queue->bp;
u16 queue_index = queue - bp->queues;
+ unsigned long flags;
unsigned int tail;
unsigned int head;
int packets = 0;
u32 bytes = 0;
- spin_lock(&queue->tx_ptr_lock);
+ spin_lock_irqsave(&queue->tx_ptr_lock, flags);
head = queue->tx_head;
for (tail = queue->tx_tail; tail != head && packets < budget; tail++) {
struct macb_tx_skb *tx_skb;
@@ -1291,7 +1285,7 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
CIRC_CNT(queue->tx_head, queue->tx_tail,
bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp))
netif_wake_subqueue(bp->dev, queue_index);
- spin_unlock(&queue->tx_ptr_lock);
+ spin_unlock_irqrestore(&queue->tx_ptr_lock, flags);
return packets;
}
@@ -1707,8 +1701,9 @@ static void macb_tx_restart(struct macb_queue *queue)
{
struct macb *bp = queue->bp;
unsigned int head_idx, tbqp;
+ unsigned long flags;
- spin_lock(&queue->tx_ptr_lock);
+ spin_lock_irqsave(&queue->tx_ptr_lock, flags);
if (queue->tx_head == queue->tx_tail)
goto out_tx_ptr_unlock;
@@ -1720,19 +1715,20 @@ static void macb_tx_restart(struct macb_queue *queue)
if (tbqp == head_idx)
goto out_tx_ptr_unlock;
- spin_lock_irq(&bp->lock);
+ spin_lock(&bp->lock);
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
- spin_unlock_irq(&bp->lock);
+ spin_unlock(&bp->lock);
out_tx_ptr_unlock:
- spin_unlock(&queue->tx_ptr_lock);
+ spin_unlock_irqrestore(&queue->tx_ptr_lock, flags);
}
static bool macb_tx_complete_pending(struct macb_queue *queue)
{
bool retval = false;
+ unsigned long flags;
- spin_lock(&queue->tx_ptr_lock);
+ spin_lock_irqsave(&queue->tx_ptr_lock, flags);
if (queue->tx_head != queue->tx_tail) {
/* Make hw descriptor updates visible to CPU */
rmb();
@@ -1740,7 +1736,7 @@ static bool macb_tx_complete_pending(struct macb_queue *queue)
if (macb_tx_desc(queue, queue->tx_tail)->ctrl & MACB_BIT(TX_USED))
retval = true;
}
- spin_unlock(&queue->tx_ptr_lock);
+ spin_unlock_irqrestore(&queue->tx_ptr_lock, flags);
return retval;
}
@@ -2308,6 +2304,7 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct macb_queue *queue = &bp->queues[queue_index];
unsigned int desc_cnt, nr_frags, frag_size, f;
unsigned int hdrlen;
+ unsigned long flags;
bool is_lso;
netdev_tx_t ret = NETDEV_TX_OK;
@@ -2368,7 +2365,7 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
desc_cnt += DIV_ROUND_UP(frag_size, bp->max_tx_length);
}
- spin_lock_bh(&queue->tx_ptr_lock);
+ spin_lock_irqsave(&queue->tx_ptr_lock, flags);
/* This is a hard error, log it. */
if (CIRC_SPACE(queue->tx_head, queue->tx_tail,
@@ -2392,15 +2389,15 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
netdev_tx_sent_queue(netdev_get_tx_queue(bp->dev, queue_index),
skb->len);
- spin_lock_irq(&bp->lock);
+ spin_lock(&bp->lock);
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
- spin_unlock_irq(&bp->lock);
+ spin_unlock(&bp->lock);
if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1)
netif_stop_subqueue(dev, queue_index);
unlock:
- spin_unlock_bh(&queue->tx_ptr_lock);
+ spin_unlock_irqrestore(&queue->tx_ptr_lock, flags);
return ret;
}
@@ -2470,35 +2467,42 @@ static void macb_free_rx_buffers(struct macb *bp)
}
}
+static unsigned int macb_tx_ring_size_per_queue(struct macb *bp)
+{
+ return macb_dma_desc_get_size(bp) * bp->tx_ring_size + bp->tx_bd_rd_prefetch;
+}
+
+static unsigned int macb_rx_ring_size_per_queue(struct macb *bp)
+{
+ return macb_dma_desc_get_size(bp) * bp->rx_ring_size + bp->rx_bd_rd_prefetch;
+}
+
static void macb_free_consistent(struct macb *bp)
{
+ struct device *dev = &bp->pdev->dev;
struct macb_queue *queue;
unsigned int q;
- int size;
+ size_t size;
if (bp->rx_ring_tieoff) {
- dma_free_coherent(&bp->pdev->dev, macb_dma_desc_get_size(bp),
+ dma_free_coherent(dev, macb_dma_desc_get_size(bp),
bp->rx_ring_tieoff, bp->rx_ring_tieoff_dma);
bp->rx_ring_tieoff = NULL;
}
bp->macbgem_ops.mog_free_rx_buffers(bp);
+ size = bp->num_queues * macb_tx_ring_size_per_queue(bp);
+ dma_free_coherent(dev, size, bp->queues[0].tx_ring, bp->queues[0].tx_ring_dma);
+
+ size = bp->num_queues * macb_rx_ring_size_per_queue(bp);
+ dma_free_coherent(dev, size, bp->queues[0].rx_ring, bp->queues[0].rx_ring_dma);
+
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
kfree(queue->tx_skb);
queue->tx_skb = NULL;
- if (queue->tx_ring) {
- size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch;
- dma_free_coherent(&bp->pdev->dev, size,
- queue->tx_ring, queue->tx_ring_dma);
- queue->tx_ring = NULL;
- }
- if (queue->rx_ring) {
- size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch;
- dma_free_coherent(&bp->pdev->dev, size,
- queue->rx_ring, queue->rx_ring_dma);
- queue->rx_ring = NULL;
- }
+ queue->tx_ring = NULL;
+ queue->rx_ring = NULL;
}
}
@@ -2540,35 +2544,45 @@ static int macb_alloc_rx_buffers(struct macb *bp)
static int macb_alloc_consistent(struct macb *bp)
{
+ struct device *dev = &bp->pdev->dev;
+ dma_addr_t tx_dma, rx_dma;
struct macb_queue *queue;
unsigned int q;
- int size;
+ void *tx, *rx;
+ size_t size;
+
+ /*
+ * Upper 32-bits of Tx/Rx DMA descriptor for each queues much match!
+ * We cannot enforce this guarantee, the best we can do is do a single
+ * allocation and hope it will land into alloc_pages() that guarantees
+ * natural alignment of physical addresses.
+ */
+
+ size = bp->num_queues * macb_tx_ring_size_per_queue(bp);
+ tx = dma_alloc_coherent(dev, size, &tx_dma, GFP_KERNEL);
+ if (!tx || upper_32_bits(tx_dma) != upper_32_bits(tx_dma + size - 1))
+ goto out_err;
+ netdev_dbg(bp->dev, "Allocated %zu bytes for %u TX rings at %08lx (mapped %p)\n",
+ size, bp->num_queues, (unsigned long)tx_dma, tx);
+
+ size = bp->num_queues * macb_rx_ring_size_per_queue(bp);
+ rx = dma_alloc_coherent(dev, size, &rx_dma, GFP_KERNEL);
+ if (!rx || upper_32_bits(rx_dma) != upper_32_bits(rx_dma + size - 1))
+ goto out_err;
+ netdev_dbg(bp->dev, "Allocated %zu bytes for %u RX rings at %08lx (mapped %p)\n",
+ size, bp->num_queues, (unsigned long)rx_dma, rx);
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
- size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch;
- queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
- &queue->tx_ring_dma,
- GFP_KERNEL);
- if (!queue->tx_ring)
- goto out_err;
- netdev_dbg(bp->dev,
- "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n",
- q, size, (unsigned long)queue->tx_ring_dma,
- queue->tx_ring);
+ queue->tx_ring = tx + macb_tx_ring_size_per_queue(bp) * q;
+ queue->tx_ring_dma = tx_dma + macb_tx_ring_size_per_queue(bp) * q;
+
+ queue->rx_ring = rx + macb_rx_ring_size_per_queue(bp) * q;
+ queue->rx_ring_dma = rx_dma + macb_rx_ring_size_per_queue(bp) * q;
size = bp->tx_ring_size * sizeof(struct macb_tx_skb);
queue->tx_skb = kmalloc(size, GFP_KERNEL);
if (!queue->tx_skb)
goto out_err;
-
- size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch;
- queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
- &queue->rx_ring_dma, GFP_KERNEL);
- if (!queue->rx_ring)
- goto out_err;
- netdev_dbg(bp->dev,
- "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
- size, (unsigned long)queue->rx_ring_dma, queue->rx_ring);
}
if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
goto out_err;
@@ -3090,7 +3104,7 @@ static void gem_update_stats(struct macb *bp)
/* Add GEM_OCTTXH, GEM_OCTRXH */
val = bp->macb_reg_readl(bp, offset + 4);
bp->ethtool_stats[i] += ((u64)val) << 32;
- *(p++) += ((u64)val) << 32;
+ *p += ((u64)val) << 32;
}
}
@@ -4084,6 +4098,223 @@ static void macb_restore_features(struct macb *bp)
macb_set_rxflow_feature(bp, features);
}
+static int macb_taprio_setup_replace(struct net_device *ndev,
+ struct tc_taprio_qopt_offload *conf)
+{
+ u64 total_on_time = 0, start_time_sec = 0, start_time = conf->base_time;
+ u32 configured_queues = 0, speed = 0, start_time_nsec;
+ struct macb_queue_enst_config *enst_queue;
+ struct tc_taprio_sched_entry *entry;
+ struct macb *bp = netdev_priv(ndev);
+ struct ethtool_link_ksettings kset;
+ struct macb_queue *queue;
+ size_t i;
+ int err;
+
+ if (conf->num_entries > bp->num_queues) {
+ netdev_err(ndev, "Too many TAPRIO entries: %zu > %d queues\n",
+ conf->num_entries, bp->num_queues);
+ return -EINVAL;
+ }
+
+ if (conf->base_time < 0) {
+ netdev_err(ndev, "Invalid base_time: must be 0 or positive, got %lld\n",
+ conf->base_time);
+ return -ERANGE;
+ }
+
+ /* Get the current link speed */
+ err = phylink_ethtool_ksettings_get(bp->phylink, &kset);
+ if (unlikely(err)) {
+ netdev_err(ndev, "Failed to get link settings: %d\n", err);
+ return err;
+ }
+
+ speed = kset.base.speed;
+ if (unlikely(speed <= 0)) {
+ netdev_err(ndev, "Invalid speed: %d\n", speed);
+ return -EINVAL;
+ }
+
+ enst_queue = kcalloc(conf->num_entries, sizeof(*enst_queue), GFP_KERNEL);
+ if (unlikely(!enst_queue))
+ return -ENOMEM;
+
+ /* Pre-validate all entries before making any hardware changes */
+ for (i = 0; i < conf->num_entries; i++) {
+ entry = &conf->entries[i];
+
+ if (entry->command != TC_TAPRIO_CMD_SET_GATES) {
+ netdev_err(ndev, "Entry %zu: unsupported command %d\n",
+ i, entry->command);
+ err = -EOPNOTSUPP;
+ goto cleanup;
+ }
+
+ /* Validate gate_mask: must be nonzero, single queue, and within range */
+ if (!is_power_of_2(entry->gate_mask)) {
+ netdev_err(ndev, "Entry %zu: gate_mask 0x%x is not a power of 2 (only one queue per entry allowed)\n",
+ i, entry->gate_mask);
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+ /* gate_mask must not select queues outside the valid queue_mask */
+ if (entry->gate_mask & ~bp->queue_mask) {
+ netdev_err(ndev, "Entry %zu: gate_mask 0x%x exceeds queue range (max_queues=%d)\n",
+ i, entry->gate_mask, bp->num_queues);
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+ /* Check for start time limits */
+ start_time_sec = start_time;
+ start_time_nsec = do_div(start_time_sec, NSEC_PER_SEC);
+ if (start_time_sec > GENMASK(GEM_START_TIME_SEC_SIZE - 1, 0)) {
+ netdev_err(ndev, "Entry %zu: Start time %llu s exceeds hardware limit\n",
+ i, start_time_sec);
+ err = -ERANGE;
+ goto cleanup;
+ }
+
+ /* Check for on time limit */
+ if (entry->interval > enst_max_hw_interval(speed)) {
+ netdev_err(ndev, "Entry %zu: interval %u ns exceeds hardware limit %llu ns\n",
+ i, entry->interval, enst_max_hw_interval(speed));
+ err = -ERANGE;
+ goto cleanup;
+ }
+
+ /* Check for off time limit*/
+ if ((conf->cycle_time - entry->interval) > enst_max_hw_interval(speed)) {
+ netdev_err(ndev, "Entry %zu: off_time %llu ns exceeds hardware limit %llu ns\n",
+ i, conf->cycle_time - entry->interval,
+ enst_max_hw_interval(speed));
+ err = -ERANGE;
+ goto cleanup;
+ }
+
+ enst_queue[i].queue_id = order_base_2(entry->gate_mask);
+ enst_queue[i].start_time_mask =
+ (start_time_sec << GEM_START_TIME_SEC_OFFSET) |
+ start_time_nsec;
+ enst_queue[i].on_time_bytes =
+ enst_ns_to_hw_units(entry->interval, speed);
+ enst_queue[i].off_time_bytes =
+ enst_ns_to_hw_units(conf->cycle_time - entry->interval, speed);
+
+ configured_queues |= entry->gate_mask;
+ total_on_time += entry->interval;
+ start_time += entry->interval;
+ }
+
+ /* Check total interval doesn't exceed cycle time */
+ if (total_on_time > conf->cycle_time) {
+ netdev_err(ndev, "Total ON %llu ns exceeds cycle time %llu ns\n",
+ total_on_time, conf->cycle_time);
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+ netdev_dbg(ndev, "TAPRIO setup: %zu entries, base_time=%lld ns, cycle_time=%llu ns\n",
+ conf->num_entries, conf->base_time, conf->cycle_time);
+
+ /* All validations passed - proceed with hardware configuration */
+ scoped_guard(spinlock_irqsave, &bp->lock) {
+ /* Disable ENST queues if running before configuring */
+ gem_writel(bp, ENST_CONTROL,
+ bp->queue_mask << GEM_ENST_DISABLE_QUEUE_OFFSET);
+
+ for (i = 0; i < conf->num_entries; i++) {
+ queue = &bp->queues[enst_queue[i].queue_id];
+ /* Configure queue timing registers */
+ queue_writel(queue, ENST_START_TIME,
+ enst_queue[i].start_time_mask);
+ queue_writel(queue, ENST_ON_TIME,
+ enst_queue[i].on_time_bytes);
+ queue_writel(queue, ENST_OFF_TIME,
+ enst_queue[i].off_time_bytes);
+ }
+
+ /* Enable ENST for all configured queues in one write */
+ gem_writel(bp, ENST_CONTROL, configured_queues);
+ }
+
+ netdev_info(ndev, "TAPRIO configuration completed successfully: %zu entries, %d queues configured\n",
+ conf->num_entries, hweight32(configured_queues));
+
+cleanup:
+ kfree(enst_queue);
+ return err;
+}
+
+static void macb_taprio_destroy(struct net_device *ndev)
+{
+ struct macb *bp = netdev_priv(ndev);
+ struct macb_queue *queue;
+ u32 enst_disable_mask;
+ unsigned int q;
+
+ netdev_reset_tc(ndev);
+ enst_disable_mask = bp->queue_mask << GEM_ENST_DISABLE_QUEUE_OFFSET;
+
+ scoped_guard(spinlock_irqsave, &bp->lock) {
+ /* Single disable command for all queues */
+ gem_writel(bp, ENST_CONTROL, enst_disable_mask);
+
+ /* Clear all queue ENST registers in batch */
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ queue_writel(queue, ENST_START_TIME, 0);
+ queue_writel(queue, ENST_ON_TIME, 0);
+ queue_writel(queue, ENST_OFF_TIME, 0);
+ }
+ }
+ netdev_info(ndev, "TAPRIO destroy: All gates disabled\n");
+}
+
+static int macb_setup_taprio(struct net_device *ndev,
+ struct tc_taprio_qopt_offload *taprio)
+{
+ struct macb *bp = netdev_priv(ndev);
+ int err = 0;
+
+ if (unlikely(!(ndev->hw_features & NETIF_F_HW_TC)))
+ return -EOPNOTSUPP;
+
+ /* Check if Device is in runtime suspend */
+ if (unlikely(pm_runtime_suspended(&bp->pdev->dev))) {
+ netdev_err(ndev, "Device is in runtime suspend\n");
+ return -EOPNOTSUPP;
+ }
+
+ switch (taprio->cmd) {
+ case TAPRIO_CMD_REPLACE:
+ err = macb_taprio_setup_replace(ndev, taprio);
+ break;
+ case TAPRIO_CMD_DESTROY:
+ macb_taprio_destroy(ndev);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
+static int macb_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
+{
+ if (!dev || !type_data)
+ return -EINVAL;
+
+ switch (type) {
+ case TC_SETUP_QDISC_TAPRIO:
+ return macb_setup_taprio(dev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct net_device_ops macb_netdev_ops = {
.ndo_open = macb_open,
.ndo_stop = macb_close,
@@ -4101,6 +4332,7 @@ static const struct net_device_ops macb_netdev_ops = {
.ndo_features_check = macb_features_check,
.ndo_hwtstamp_set = macb_hwtstamp_set,
.ndo_hwtstamp_get = macb_hwtstamp_get,
+ .ndo_setup_tc = macb_setup_tc,
};
/* Configure peripheral capabilities according to device tree
@@ -4305,12 +4537,6 @@ static int macb_init(struct platform_device *pdev)
queue->TBQP = GEM_TBQP(hw_q - 1);
queue->RBQP = GEM_RBQP(hw_q - 1);
queue->RBQS = GEM_RBQS(hw_q - 1);
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
- queue->TBQPH = GEM_TBQPH(hw_q - 1);
- queue->RBQPH = GEM_RBQPH(hw_q - 1);
- }
-#endif
} else {
/* queue0 uses legacy registers */
queue->ISR = MACB_ISR;
@@ -4319,14 +4545,12 @@ static int macb_init(struct platform_device *pdev)
queue->IMR = MACB_IMR;
queue->TBQP = MACB_TBQP;
queue->RBQP = MACB_RBQP;
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
- queue->TBQPH = MACB_TBQPH;
- queue->RBQPH = MACB_RBQPH;
- }
-#endif
}
+ queue->ENST_START_TIME = GEM_ENST_START_TIME(hw_q);
+ queue->ENST_ON_TIME = GEM_ENST_ON_TIME(hw_q);
+ queue->ENST_OFF_TIME = GEM_ENST_OFF_TIME(hw_q);
+
/* get irq: here we use the linux queue index, not the hardware
* queue index. the queue irq definitions in the device tree
* must remove the optional gaps that could exist in the
@@ -4379,6 +4603,10 @@ static int macb_init(struct platform_device *pdev)
dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
if (bp->caps & MACB_CAPS_SG_DISABLED)
dev->hw_features &= ~NETIF_F_SG;
+ /* Enable HW_TC if hardware supports QBV */
+ if (bp->caps & MACB_CAPS_QBV)
+ dev->hw_features |= NETIF_F_HW_TC;
+
dev->features = dev->hw_features;
/* Check RX Flow Filters support.
@@ -4822,36 +5050,45 @@ static unsigned long fu540_macb_tx_recalc_rate(struct clk_hw *hw,
return mgmt->rate;
}
-static long fu540_macb_tx_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
-{
- if (WARN_ON(rate < 2500000))
- return 2500000;
- else if (rate == 2500000)
- return 2500000;
- else if (WARN_ON(rate < 13750000))
- return 2500000;
- else if (WARN_ON(rate < 25000000))
- return 25000000;
- else if (rate == 25000000)
- return 25000000;
- else if (WARN_ON(rate < 75000000))
- return 25000000;
- else if (WARN_ON(rate < 125000000))
- return 125000000;
- else if (rate == 125000000)
- return 125000000;
-
- WARN_ON(rate > 125000000);
+static int fu540_macb_tx_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ if (WARN_ON(req->rate < 2500000))
+ req->rate = 2500000;
+ else if (req->rate == 2500000)
+ req->rate = 2500000;
+ else if (WARN_ON(req->rate < 13750000))
+ req->rate = 2500000;
+ else if (WARN_ON(req->rate < 25000000))
+ req->rate = 25000000;
+ else if (req->rate == 25000000)
+ req->rate = 25000000;
+ else if (WARN_ON(req->rate < 75000000))
+ req->rate = 25000000;
+ else if (WARN_ON(req->rate < 125000000))
+ req->rate = 125000000;
+ else if (req->rate == 125000000)
+ req->rate = 125000000;
+ else if (WARN_ON(req->rate > 125000000))
+ req->rate = 125000000;
+ else
+ req->rate = 125000000;
- return 125000000;
+ return 0;
}
static int fu540_macb_tx_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
- rate = fu540_macb_tx_round_rate(hw, rate, &parent_rate);
- if (rate != 125000000)
+ struct clk_rate_request req;
+ int ret;
+
+ clk_hw_init_rate_request(hw, &req, rate);
+ ret = fu540_macb_tx_determine_rate(hw, &req);
+ if (ret != 0)
+ return ret;
+
+ if (req.rate != 125000000)
iowrite32(1, mgmt->reg);
else
iowrite32(0, mgmt->reg);
@@ -4862,7 +5099,7 @@ static int fu540_macb_tx_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops fu540_c000_ops = {
.recalc_rate = fu540_macb_tx_recalc_rate,
- .round_rate = fu540_macb_tx_round_rate,
+ .determine_rate = fu540_macb_tx_determine_rate,
.set_rate = fu540_macb_tx_set_rate,
};
@@ -5113,7 +5350,8 @@ static const struct macb_config sama7g5_gem_config = {
static const struct macb_config sama7g5_emac_config = {
.caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII |
- MACB_CAPS_MIIONRGMII | MACB_CAPS_GEM_HAS_PTP,
+ MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_MIIONRGMII |
+ MACB_CAPS_GEM_HAS_PTP,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
@@ -5122,8 +5360,9 @@ static const struct macb_config sama7g5_emac_config = {
static const struct macb_config versal_config = {
.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO |
- MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH | MACB_CAPS_NEED_TSUCLK |
- MACB_CAPS_QUEUE_DISABLE,
+ MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH |
+ MACB_CAPS_NEED_TSUCLK | MACB_CAPS_QUEUE_DISABLE |
+ MACB_CAPS_QBV,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = init_reset_optional,
@@ -5131,6 +5370,17 @@ static const struct macb_config versal_config = {
.usrio = &macb_default_usrio,
};
+static const struct macb_config raspberrypi_rp1_config = {
+ .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_CLK_HW_CHG |
+ MACB_CAPS_JUMBO |
+ MACB_CAPS_GEM_HAS_PTP,
+ .dma_burst_length = 16,
+ .clk_init = macb_clk_init,
+ .init = macb_init,
+ .usrio = &macb_default_usrio,
+ .jumbo_max_len = 10240,
+};
+
static const struct of_device_id macb_dt_ids[] = {
{ .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config },
{ .compatible = "cdns,macb" },
@@ -5151,6 +5401,7 @@ static const struct of_device_id macb_dt_ids[] = {
{ .compatible = "microchip,mpfs-macb", .data = &mpfs_config },
{ .compatible = "microchip,sama7g5-gem", .data = &sama7g5_gem_config },
{ .compatible = "microchip,sama7g5-emac", .data = &sama7g5_emac_config },
+ { .compatible = "raspberrypi,rp1-gem", .data = &raspberrypi_rp1_config },
{ .compatible = "xlnx,zynqmp-gem", .data = &zynqmp_config},
{ .compatible = "xlnx,zynq-gem", .data = &zynq_config },
{ .compatible = "xlnx,versal-gem", .data = &versal_config},
@@ -5398,19 +5649,16 @@ static void macb_remove(struct platform_device *pdev)
if (dev) {
bp = netdev_priv(dev);
+ unregister_netdev(dev);
phy_exit(bp->sgmii_phy);
mdiobus_unregister(bp->mii_bus);
mdiobus_free(bp->mii_bus);
- unregister_netdev(dev);
+ device_set_wakeup_enable(&bp->pdev->dev, 0);
cancel_work_sync(&bp->hresp_err_bh_work);
pm_runtime_disable(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
- if (!pm_runtime_suspended(&pdev->dev)) {
- macb_clks_disable(bp->pclk, bp->hclk, bp->tx_clk,
- bp->rx_clk, bp->tsu_clk);
- pm_runtime_set_suspended(&pdev->dev);
- }
+ pm_runtime_set_suspended(&pdev->dev);
phylink_destroy(bp->phylink);
free_netdev(dev);
}
@@ -5450,6 +5698,11 @@ static int __maybe_unused macb_suspend(struct device *dev)
*/
tmp = macb_readl(bp, NCR);
macb_writel(bp, NCR, tmp & ~(MACB_BIT(TE) | MACB_BIT(RE)));
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ if (!(bp->caps & MACB_CAPS_QUEUE_DISABLE))
+ macb_writel(bp, RBQPH,
+ upper_32_bits(bp->rx_ring_tieoff_dma));
+#endif
for (q = 0, queue = bp->queues; q < bp->num_queues;
++q, ++queue) {
/* Disable RX queues */
@@ -5459,10 +5712,6 @@ static int __maybe_unused macb_suspend(struct device *dev)
/* Tie off RX queues */
queue_writel(queue, RBQP,
lower_32_bits(bp->rx_ring_tieoff_dma));
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- queue_writel(queue, RBQPH,
- upper_32_bits(bp->rx_ring_tieoff_dma));
-#endif
}
/* Disable all interrupts */
queue_writel(queue, IDR, -1);
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c
index 674c54831875..215dac201b4a 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_core.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c
@@ -472,7 +472,7 @@ int setup_rx_oom_poll_fn(struct net_device *netdev)
q_no = lio->linfo.rxpciq[q].s.q_no;
wq = &lio->rxq_status_wq[q_no];
wq->wq = alloc_workqueue("rxq-oom-status",
- WQ_MEM_RECLAIM, 0);
+ WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (!wq->wq) {
dev_err(&oct->pci_dev->dev, "unable to create cavium rxq oom status wq\n");
return -ENOMEM;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 1d79f6eaa41f..8e2fcec26ea1 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -526,7 +526,8 @@ static inline int setup_link_status_change_wq(struct net_device *netdev)
struct octeon_device *oct = lio->oct_dev;
lio->link_status_wq.wq = alloc_workqueue("link-status",
- WQ_MEM_RECLAIM, 0);
+ WQ_MEM_RECLAIM | WQ_PERCPU,
+ 0);
if (!lio->link_status_wq.wq) {
dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n");
return -1;
@@ -659,7 +660,8 @@ static inline int setup_sync_octeon_time_wq(struct net_device *netdev)
struct octeon_device *oct = lio->oct_dev;
lio->sync_octeon_time_wq.wq =
- alloc_workqueue("update-octeon-time", WQ_MEM_RECLAIM, 0);
+ alloc_workqueue("update-octeon-time",
+ WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (!lio->sync_octeon_time_wq.wq) {
dev_err(&oct->pci_dev->dev, "Unable to create wq to update octeon time\n");
return -1;
@@ -1734,7 +1736,7 @@ static inline int setup_tx_poll_fn(struct net_device *netdev)
struct octeon_device *oct = lio->oct_dev;
lio->txq_status_wq.wq = alloc_workqueue("txq-status",
- WQ_MEM_RECLAIM, 0);
+ WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (!lio->txq_status_wq.wq) {
dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n");
return -1;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index 62c2eadc33e3..3230dff5ba05 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -304,7 +304,8 @@ static int setup_link_status_change_wq(struct net_device *netdev)
struct octeon_device *oct = lio->oct_dev;
lio->link_status_wq.wq = alloc_workqueue("link-status",
- WQ_MEM_RECLAIM, 0);
+ WQ_MEM_RECLAIM | WQ_PERCPU,
+ 0);
if (!lio->link_status_wq.wq) {
dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n");
return -1;
diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
index de8a6ce86ad7..d7cfb20eea00 100644
--- a/drivers/net/ethernet/cavium/liquidio/request_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
@@ -126,13 +126,13 @@ int octeon_init_instr_queue(struct octeon_device *oct,
oct->io_qmask.iq |= BIT_ULL(iq_no);
/* Set the 32B/64B mode for each input queue */
- oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no);
+ oct->io_qmask.iq64B |= ((u64)(conf->instr_type == 64) << iq_no);
iq->iqcmd_64B = (conf->instr_type == 64);
oct->fn_list.setup_iq_regs(oct, iq_no);
oct->check_db_wq[iq_no].wq = alloc_workqueue("check_iq_db",
- WQ_MEM_RECLAIM,
+ WQ_MEM_RECLAIM | WQ_PERCPU,
0);
if (!oct->check_db_wq[iq_no].wq) {
vfree(iq->request_list);
diff --git a/drivers/net/ethernet/cavium/liquidio/response_manager.c b/drivers/net/ethernet/cavium/liquidio/response_manager.c
index 861050966e18..de1a8335b545 100644
--- a/drivers/net/ethernet/cavium/liquidio/response_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/response_manager.c
@@ -39,7 +39,8 @@ int octeon_setup_response_list(struct octeon_device *oct)
}
spin_lock_init(&oct->cmd_resp_wqlock);
- oct->dma_comp_wq.wq = alloc_workqueue("dma-comp", WQ_MEM_RECLAIM, 0);
+ oct->dma_comp_wq.wq = alloc_workqueue("dma-comp",
+ WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (!oct->dma_comp_wq.wq) {
dev_err(&oct->pci_dev->dev, "failed to create wq thread\n");
return -ENOMEM;
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 21495b5dce25..9efb60842ad1 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -1493,13 +1493,17 @@ static int bgx_init_of_phy(struct bgx *bgx)
* this cortina phy, for which there is no driver
* support, ignore it.
*/
- if (phy_np &&
- !of_device_is_compatible(phy_np, "cortina,cs4223-slice")) {
- /* Wait until the phy drivers are available */
- pd = of_phy_find_device(phy_np);
- if (!pd)
- goto defer;
- bgx->lmac[lmac].phydev = pd;
+ if (phy_np) {
+ if (!of_device_is_compatible(phy_np, "cortina,cs4223-slice")) {
+ /* Wait until the phy drivers are available */
+ pd = of_phy_find_device(phy_np);
+ if (!pd) {
+ of_node_put(phy_np);
+ goto defer;
+ }
+ bgx->lmac[lmac].phydev = pd;
+ }
+ of_node_put(phy_np);
}
lmac++;
@@ -1515,11 +1519,11 @@ defer:
* for phy devices we may have already found.
*/
while (lmac) {
+ lmac--;
if (bgx->lmac[lmac].phydev) {
put_device(&bgx->lmac[lmac].phydev->mdio.dev);
bgx->lmac[lmac].phydev = NULL;
}
- lmac--;
}
of_node_put(node);
return -EPROBE_DEFER;
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
index 6f6525983130..4ee970f3bad6 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
@@ -171,7 +171,7 @@ static void chtls_purge_receive_queue(struct sock *sk)
struct sk_buff *skb;
while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
- skb_dst_set(skb, (void *)NULL);
+ skb_dstref_steal(skb);
kfree_skb(skb);
}
}
@@ -194,7 +194,7 @@ static void chtls_purge_recv_queue(struct sock *sk)
struct sk_buff *skb;
while ((skb = __skb_dequeue(&tlsk->sk_recv_queue)) != NULL) {
- skb_dst_set(skb, NULL);
+ skb_dstref_steal(skb);
kfree_skb(skb);
}
}
@@ -505,7 +505,7 @@ static void reset_listen_child(struct sock *child)
chtls_send_reset(child, CPL_ABORT_SEND_RST, skb);
sock_orphan(child);
- INC_ORPHAN_COUNT(child);
+ tcp_orphan_count_inc();
if (child->sk_state == TCP_CLOSE)
inet_csk_destroy_sock(child);
}
@@ -870,7 +870,7 @@ static void do_abort_syn_rcv(struct sock *child, struct sock *parent)
* created only after 3 way handshake is done.
*/
sock_orphan(child);
- INC_ORPHAN_COUNT(child);
+ tcp_orphan_count_inc();
chtls_release_resources(child);
chtls_conn_done(child);
} else {
@@ -951,6 +951,7 @@ static unsigned int chtls_select_mss(const struct chtls_sock *csk,
struct tcp_sock *tp;
unsigned int mss;
struct sock *sk;
+ u16 user_mss;
mss = ntohs(req->tcpopt.mss);
sk = csk->sk;
@@ -969,8 +970,9 @@ static unsigned int chtls_select_mss(const struct chtls_sock *csk,
tcpoptsz += round_up(TCPOLEN_TIMESTAMP, 4);
tp->advmss = dst_metric_advmss(dst);
- if (USER_MSS(tp) && tp->advmss > USER_MSS(tp))
- tp->advmss = USER_MSS(tp);
+ user_mss = USER_MSS(tp);
+ if (user_mss && tp->advmss > user_mss)
+ tp->advmss = user_mss;
if (tp->advmss > pmtu - iphdrsz)
tp->advmss = pmtu - iphdrsz;
if (mss && tp->advmss > mss)
@@ -1734,7 +1736,7 @@ static int chtls_rx_data(struct chtls_dev *cdev, struct sk_buff *skb)
pr_err("can't find conn. for hwtid %u.\n", hwtid);
return -EINVAL;
}
- skb_dst_set(skb, NULL);
+ skb_dstref_steal(skb);
process_cpl_msg(chtls_recv_data, sk, skb);
return 0;
}
@@ -1786,7 +1788,7 @@ static int chtls_rx_pdu(struct chtls_dev *cdev, struct sk_buff *skb)
pr_err("can't find conn. for hwtid %u.\n", hwtid);
return -EINVAL;
}
- skb_dst_set(skb, NULL);
+ skb_dstref_steal(skb);
process_cpl_msg(chtls_recv_pdu, sk, skb);
return 0;
}
@@ -1855,7 +1857,7 @@ static int chtls_rx_cmp(struct chtls_dev *cdev, struct sk_buff *skb)
pr_err("can't find conn. for hwtid %u.\n", hwtid);
return -EINVAL;
}
- skb_dst_set(skb, NULL);
+ skb_dstref_steal(skb);
process_cpl_msg(chtls_rx_hdr, sk, skb);
return 0;
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h
index f61ca657601c..29ceff5a5fcb 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h
@@ -90,12 +90,11 @@ struct deferred_skb_cb {
#define SND_WSCALE(tp) ((tp)->rx_opt.snd_wscale)
#define RCV_WSCALE(tp) ((tp)->rx_opt.rcv_wscale)
-#define USER_MSS(tp) ((tp)->rx_opt.user_mss)
+#define USER_MSS(tp) (READ_ONCE((tp)->rx_opt.user_mss))
#define TS_RECENT_STAMP(tp) ((tp)->rx_opt.ts_recent_stamp)
#define WSCALE_OK(tp) ((tp)->rx_opt.wscale_ok)
#define TSTAMP_OK(tp) ((tp)->rx_opt.tstamp_ok)
#define SACK_OK(tp) ((tp)->rx_opt.sack_ok)
-#define INC_ORPHAN_COUNT(sk) this_cpu_inc(*(sk)->sk_prot->orphan_count)
/* TLS SKB */
#define skb_ulp_tls_inline(skb) (ULP_SKB_CB(skb)->ulp.tls.ofld)
@@ -171,14 +170,14 @@ static inline void chtls_set_req_addr(struct request_sock *oreq,
static inline void chtls_free_skb(struct sock *sk, struct sk_buff *skb)
{
- skb_dst_set(skb, NULL);
+ skb_dstref_steal(skb);
__skb_unlink(skb, &sk->sk_receive_queue);
__kfree_skb(skb);
}
static inline void chtls_kfree_skb(struct sock *sk, struct sk_buff *skb)
{
- skb_dst_set(skb, NULL);
+ skb_dstref_steal(skb);
__skb_unlink(skb, &sk->sk_receive_queue);
kfree_skb(skb);
}
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
index 465fa8077964..4036db466e18 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
@@ -1434,7 +1434,7 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
continue;
found_ok_skb:
if (!skb->len) {
- skb_dst_set(skb, NULL);
+ skb_dstref_steal(skb);
__skb_unlink(skb, &sk->sk_receive_queue);
kfree_skb(skb);
diff --git a/drivers/net/ethernet/dlink/Kconfig b/drivers/net/ethernet/dlink/Kconfig
index e9e13654812c..0d77f84c8e7b 100644
--- a/drivers/net/ethernet/dlink/Kconfig
+++ b/drivers/net/ethernet/dlink/Kconfig
@@ -32,4 +32,24 @@ config DL2K
To compile this driver as a module, choose M here: the
module will be called dl2k.
+config SUNDANCE
+ tristate "Sundance Alta support"
+ depends on PCI
+ select CRC32
+ select MII
+ help
+ This driver is for the Sundance "Alta" chip.
+ More specific information and updates are available from
+ <http://www.scyld.com/network/sundance.html>.
+
+config SUNDANCE_MMIO
+ bool "Use MMIO instead of PIO"
+ depends on SUNDANCE
+ help
+ Enable memory-mapped I/O for interaction with Sundance NIC registers.
+ Do NOT enable this by default, PIO (enabled when MMIO is disabled)
+ is known to solve bugs on certain chips.
+
+ If unsure, say N.
+
endif # NET_VENDOR_DLINK
diff --git a/drivers/net/ethernet/dlink/Makefile b/drivers/net/ethernet/dlink/Makefile
index 38c236eb6007..3ff503c747db 100644
--- a/drivers/net/ethernet/dlink/Makefile
+++ b/drivers/net/ethernet/dlink/Makefile
@@ -4,3 +4,4 @@
#
obj-$(CONFIG_DL2K) += dl2k.o
+obj-$(CONFIG_SUNDANCE) += sundance.o
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index cc60ee454bf9..1996d2e4e3e2 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -964,15 +964,18 @@ receive_packet (struct net_device *dev)
} else {
struct sk_buff *skb;
+ skb = NULL;
/* Small skbuffs for short packets */
- if (pkt_len > copy_thresh) {
+ if (pkt_len <= copy_thresh)
+ skb = netdev_alloc_skb_ip_align(dev, pkt_len);
+ if (!skb) {
dma_unmap_single(&np->pdev->dev,
desc_to_dma(desc),
np->rx_buf_sz,
DMA_FROM_DEVICE);
skb_put (skb = np->rx_skbuff[entry], pkt_len);
np->rx_skbuff[entry] = NULL;
- } else if ((skb = netdev_alloc_skb_ip_align(dev, pkt_len))) {
+ } else {
dma_sync_single_for_cpu(&np->pdev->dev,
desc_to_dma(desc),
np->rx_buf_sz,
@@ -1099,7 +1102,7 @@ get_stats (struct net_device *dev)
dev->stats.rx_bytes += dr32(OctetRcvOk);
dev->stats.tx_bytes += dr32(OctetXmtOk);
- dev->stats.multicast = dr32(McstFramesRcvdOk);
+ dev->stats.multicast += dr32(McstFramesRcvdOk);
dev->stats.collisions += dr32(SingleColFrames)
+ dr32(MultiColFrames);
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
new file mode 100644
index 000000000000..277c50ef773f
--- /dev/null
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -0,0 +1,1990 @@
+/* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
+/*
+ Written 1999-2000 by Donald Becker.
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License (GPL), incorporated herein by reference.
+ Drivers based on or derived from this code fall under the GPL and must
+ retain the authorship, copyright and license notice. This file is not
+ a complete program and may only be used when the entire operating
+ system is licensed under the GPL.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ Support and updates available at
+ http://www.scyld.com/network/sundance.html
+ [link no longer provides useful info -jgarzik]
+ Archives of the mailing list are still available at
+ https://www.beowulf.org/pipermail/netdrivers/
+
+*/
+
+#define DRV_NAME "sundance"
+
+/* The user-configurable values.
+ These may be modified when a driver module is loaded.*/
+static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
+/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
+ Typical is a 64 element hash table based on the Ethernet CRC. */
+static const int multicast_filter_limit = 32;
+
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+ Setting to > 1518 effectively disables this feature.
+ This chip can receive into offset buffers, so the Alpha does not
+ need a copy-align. */
+static int rx_copybreak;
+static int flowctrl=1;
+
+/* media[] specifies the media type the NIC operates at.
+ autosense Autosensing active media.
+ 10mbps_hd 10Mbps half duplex.
+ 10mbps_fd 10Mbps full duplex.
+ 100mbps_hd 100Mbps half duplex.
+ 100mbps_fd 100Mbps full duplex.
+ 0 Autosensing active media.
+ 1 10Mbps half duplex.
+ 2 10Mbps full duplex.
+ 3 100Mbps half duplex.
+ 4 100Mbps full duplex.
+*/
+#define MAX_UNITS 8
+static char *media[MAX_UNITS];
+
+
+/* Operational parameters that are set at compile time. */
+
+/* Keep the ring sizes a power of two for compile efficiency.
+ The compiler will convert <unsigned>'%'<2^N> into a bit mask.
+ Making the Tx ring too large decreases the effectiveness of channel
+ bonding and packet priority, and more than 128 requires modifying the
+ Tx error recovery.
+ Large receive rings merely waste memory. */
+#define TX_RING_SIZE 32
+#define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used. */
+#define RX_RING_SIZE 64
+#define RX_BUDGET 32
+#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc)
+#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc)
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT (4*HZ)
+#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
+
+/* Include files, designed to support most kernel versions 2.0.0 and later. */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <linux/uaccess.h>
+#include <asm/processor.h> /* Processor type for cache alignment. */
+#include <asm/io.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/dma-mapping.h>
+#include <linux/crc32.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
+MODULE_LICENSE("GPL");
+
+module_param(debug, int, 0);
+module_param(rx_copybreak, int, 0);
+module_param_array(media, charp, NULL, 0);
+module_param(flowctrl, int, 0);
+MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
+MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
+MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]");
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This driver is designed for the Sundance Technologies "Alta" ST201 chip.
+
+II. Board-specific settings
+
+III. Driver operation
+
+IIIa. Ring buffers
+
+This driver uses two statically allocated fixed-size descriptor lists
+formed into rings by a branch from the final descriptor to the beginning of
+the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
+Some chips explicitly use only 2^N sized rings, while others use a
+'next descriptor' pointer that the driver forms into rings.
+
+IIIb/c. Transmit/Receive Structure
+
+This driver uses a zero-copy receive and transmit scheme.
+The driver allocates full frame size skbuffs for the Rx ring buffers at
+open() time and passes the skb->data field to the chip as receive data
+buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
+a fresh skbuff is allocated and the frame is copied to the new skbuff.
+When the incoming frame is larger, the skbuff is passed directly up the
+protocol stack. Buffers consumed this way are replaced by newly allocated
+skbuffs in a later phase of receives.
+
+The RX_COPYBREAK value is chosen to trade-off the memory wasted by
+using a full-sized skbuff for small frames vs. the copying costs of larger
+frames. New boards are typically used in generously configured machines
+and the underfilled buffers have negligible impact compared to the benefit of
+a single allocation size, so the default value of zero results in never
+copying packets. When copying is done, the cost is usually mitigated by using
+a combined copy/checksum routine. Copying also preloads the cache, which is
+most useful with small frames.
+
+A subtle aspect of the operation is that the IP header at offset 14 in an
+ethernet frame isn't longword aligned for further processing.
+Unaligned buffers are permitted by the Sundance hardware, so
+frames are received into the skbuff at an offset of "+2", 16-byte aligning
+the IP header.
+
+IIId. Synchronization
+
+The driver runs as two independent, single-threaded flows of control. One
+is the send-packet routine, which enforces single-threaded use by the
+dev->tbusy flag. The other thread is the interrupt handler, which is single
+threaded by the hardware and interrupt handling software.
+
+The send packet thread has partial control over the Tx ring and 'dev->tbusy'
+flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
+queue slot is empty, it clears the tbusy flag when finished otherwise it sets
+the 'lp->tx_full' flag.
+
+The interrupt handler has exclusive control over the Rx ring and records stats
+from the Tx ring. After reaping the stats, it marks the Tx queue entry as
+empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
+clears both the tx_full and tbusy flags.
+
+IV. Notes
+
+IVb. References
+
+The Sundance ST201 datasheet, preliminary version.
+The Kendin KS8723 datasheet, preliminary version.
+The ICplus IP100 datasheet, preliminary version.
+http://www.scyld.com/expert/100mbps.html
+http://www.scyld.com/expert/NWay.html
+
+IVc. Errata
+
+*/
+
+/* Work-around for Kendin chip bugs. */
+#ifndef CONFIG_SUNDANCE_MMIO
+#define USE_IO_OPS 1
+#endif
+
+static const struct pci_device_id sundance_pci_tbl[] = {
+ { 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 },
+ { 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 },
+ { 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 },
+ { 0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3 },
+ { 0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
+ { 0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
+ { 0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
+
+enum {
+ netdev_io_size = 128
+};
+
+struct pci_id_info {
+ const char *name;
+};
+static const struct pci_id_info pci_id_tbl[] = {
+ {"D-Link DFE-550TX FAST Ethernet Adapter"},
+ {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
+ {"D-Link DFE-580TX 4 port Server Adapter"},
+ {"D-Link DFE-530TXS FAST Ethernet Adapter"},
+ {"D-Link DL10050-based FAST Ethernet Adapter"},
+ {"Sundance Technology Alta"},
+ {"IC Plus Corporation IP100A FAST Ethernet Adapter"},
+ { } /* terminate list. */
+};
+
+/* This driver was written to use PCI memory space, however x86-oriented
+ hardware often uses I/O space accesses. */
+
+/* Offsets to the device registers.
+ Unlike software-only systems, device drivers interact with complex hardware.
+ It's not useful to define symbolic names for every register bit in the
+ device. The name can only partially document the semantics and make
+ the driver longer and more difficult to read.
+ In general, only the important configuration values or bits changed
+ multiple times should be defined symbolically.
+*/
+enum alta_offsets {
+ DMACtrl = 0x00,
+ TxListPtr = 0x04,
+ TxDMABurstThresh = 0x08,
+ TxDMAUrgentThresh = 0x09,
+ TxDMAPollPeriod = 0x0a,
+ RxDMAStatus = 0x0c,
+ RxListPtr = 0x10,
+ DebugCtrl0 = 0x1a,
+ DebugCtrl1 = 0x1c,
+ RxDMABurstThresh = 0x14,
+ RxDMAUrgentThresh = 0x15,
+ RxDMAPollPeriod = 0x16,
+ LEDCtrl = 0x1a,
+ ASICCtrl = 0x30,
+ EEData = 0x34,
+ EECtrl = 0x36,
+ FlashAddr = 0x40,
+ FlashData = 0x44,
+ WakeEvent = 0x45,
+ TxStatus = 0x46,
+ TxFrameId = 0x47,
+ DownCounter = 0x18,
+ IntrClear = 0x4a,
+ IntrEnable = 0x4c,
+ IntrStatus = 0x4e,
+ MACCtrl0 = 0x50,
+ MACCtrl1 = 0x52,
+ StationAddr = 0x54,
+ MaxFrameSize = 0x5A,
+ RxMode = 0x5c,
+ MIICtrl = 0x5e,
+ MulticastFilter0 = 0x60,
+ MulticastFilter1 = 0x64,
+ RxOctetsLow = 0x68,
+ RxOctetsHigh = 0x6a,
+ TxOctetsLow = 0x6c,
+ TxOctetsHigh = 0x6e,
+ TxFramesOK = 0x70,
+ RxFramesOK = 0x72,
+ StatsCarrierError = 0x74,
+ StatsLateColl = 0x75,
+ StatsMultiColl = 0x76,
+ StatsOneColl = 0x77,
+ StatsTxDefer = 0x78,
+ RxMissed = 0x79,
+ StatsTxXSDefer = 0x7a,
+ StatsTxAbort = 0x7b,
+ StatsBcastTx = 0x7c,
+ StatsBcastRx = 0x7d,
+ StatsMcastTx = 0x7e,
+ StatsMcastRx = 0x7f,
+ /* Aliased and bogus values! */
+ RxStatus = 0x0c,
+};
+
+#define ASIC_HI_WORD(x) ((x) + 2)
+
+enum ASICCtrl_HiWord_bit {
+ GlobalReset = 0x0001,
+ RxReset = 0x0002,
+ TxReset = 0x0004,
+ DMAReset = 0x0008,
+ FIFOReset = 0x0010,
+ NetworkReset = 0x0020,
+ HostReset = 0x0040,
+ ResetBusy = 0x0400,
+};
+
+/* Bits in the interrupt status/mask registers. */
+enum intr_status_bits {
+ IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
+ IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
+ IntrDrvRqst=0x0040,
+ StatsMax=0x0080, LinkChange=0x0100,
+ IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
+};
+
+/* Bits in the RxMode register. */
+enum rx_mode_bits {
+ AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
+ AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
+};
+/* Bits in MACCtrl. */
+enum mac_ctrl0_bits {
+ EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
+ EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
+};
+enum mac_ctrl1_bits {
+ StatsEnable=0x0020, StatsDisable=0x0040, StatsEnabled=0x0080,
+ TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
+ RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
+};
+
+/* Bits in WakeEvent register. */
+enum wake_event_bits {
+ WakePktEnable = 0x01,
+ MagicPktEnable = 0x02,
+ LinkEventEnable = 0x04,
+ WolEnable = 0x80,
+};
+
+/* The Rx and Tx buffer descriptors. */
+/* Note that using only 32 bit fields simplifies conversion to big-endian
+ architectures. */
+struct netdev_desc {
+ __le32 next_desc;
+ __le32 status;
+ struct desc_frag { __le32 addr, length; } frag;
+};
+
+/* Bits in netdev_desc.status */
+enum desc_status_bits {
+ DescOwn=0x8000,
+ DescEndPacket=0x4000,
+ DescEndRing=0x2000,
+ LastFrag=0x80000000,
+ DescIntrOnTx=0x8000,
+ DescIntrOnDMADone=0x80000000,
+ DisableAlign = 0x00000001,
+};
+
+#define PRIV_ALIGN 15 /* Required alignment mask */
+/* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment
+ within the structure. */
+#define MII_CNT 4
+struct netdev_private {
+ /* Descriptor rings first for alignment. */
+ struct netdev_desc *rx_ring;
+ struct netdev_desc *tx_ring;
+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ dma_addr_t tx_ring_dma;
+ dma_addr_t rx_ring_dma;
+ struct timer_list timer; /* Media monitoring timer. */
+ struct net_device *ndev; /* backpointer */
+ /* ethtool extra stats */
+ struct {
+ u64 tx_multiple_collisions;
+ u64 tx_single_collisions;
+ u64 tx_late_collisions;
+ u64 tx_deferred;
+ u64 tx_deferred_excessive;
+ u64 tx_aborted;
+ u64 tx_bcasts;
+ u64 rx_bcasts;
+ u64 tx_mcasts;
+ u64 rx_mcasts;
+ } xstats;
+ /* Frequently used values: keep some adjacent for cache effect. */
+ spinlock_t lock;
+ int msg_enable;
+ int chip_id;
+ unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
+ unsigned int rx_buf_sz; /* Based on MTU+slack. */
+ struct netdev_desc *last_tx; /* Last Tx descriptor used. */
+ unsigned int cur_tx, dirty_tx;
+ /* These values are keep track of the transceiver/media in use. */
+ unsigned int flowctrl:1;
+ unsigned int default_port:4; /* Last dev->if_port value. */
+ unsigned int an_enable:1;
+ unsigned int speed;
+ unsigned int wol_enabled:1; /* Wake on LAN enabled */
+ struct tasklet_struct rx_tasklet;
+ struct tasklet_struct tx_tasklet;
+ int budget;
+ int cur_task;
+ /* Multicast and receive mode. */
+ spinlock_t mcastlock; /* SMP lock multicast updates. */
+ u16 mcast_filter[4];
+ /* MII transceiver section. */
+ struct mii_if_info mii_if;
+ int mii_preamble_required;
+ unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */
+ struct pci_dev *pci_dev;
+ void __iomem *base;
+ spinlock_t statlock;
+};
+
+/* The station address location in the EEPROM. */
+#define EEPROM_SA_OFFSET 0x10
+#define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
+ IntrDrvRqst | IntrTxDone | StatsMax | \
+ LinkChange)
+
+static int change_mtu(struct net_device *dev, int new_mtu);
+static int eeprom_read(void __iomem *ioaddr, int location);
+static int mdio_read(struct net_device *dev, int phy_id, int location);
+static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
+static int mdio_wait_link(struct net_device *dev, int wait);
+static int netdev_open(struct net_device *dev);
+static void check_duplex(struct net_device *dev);
+static void netdev_timer(struct timer_list *t);
+static void tx_timeout(struct net_device *dev, unsigned int txqueue);
+static void init_ring(struct net_device *dev);
+static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
+static int reset_tx (struct net_device *dev);
+static irqreturn_t intr_handler(int irq, void *dev_instance);
+static void rx_poll(struct tasklet_struct *t);
+static void tx_poll(struct tasklet_struct *t);
+static void refill_rx (struct net_device *dev);
+static void netdev_error(struct net_device *dev, int intr_status);
+static void netdev_error(struct net_device *dev, int intr_status);
+static void set_rx_mode(struct net_device *dev);
+static int __set_mac_addr(struct net_device *dev);
+static int sundance_set_mac_addr(struct net_device *dev, void *data);
+static struct net_device_stats *get_stats(struct net_device *dev);
+static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int netdev_close(struct net_device *dev);
+static const struct ethtool_ops ethtool_ops;
+
+static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base + ASICCtrl;
+ int countdown;
+
+ /* ST201 documentation states ASICCtrl is a 32bit register */
+ iowrite32 (reset_cmd | ioread32 (ioaddr), ioaddr);
+ /* ST201 documentation states reset can take up to 1 ms */
+ countdown = 10 + 1;
+ while (ioread32 (ioaddr) & (ResetBusy << 16)) {
+ if (--countdown == 0) {
+ printk(KERN_WARNING "%s : reset not completed !!\n", dev->name);
+ break;
+ }
+ udelay(100);
+ }
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void sundance_poll_controller(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+
+ disable_irq(np->pci_dev->irq);
+ intr_handler(np->pci_dev->irq, dev);
+ enable_irq(np->pci_dev->irq);
+}
+#endif
+
+static const struct net_device_ops netdev_ops = {
+ .ndo_open = netdev_open,
+ .ndo_stop = netdev_close,
+ .ndo_start_xmit = start_tx,
+ .ndo_get_stats = get_stats,
+ .ndo_set_rx_mode = set_rx_mode,
+ .ndo_eth_ioctl = netdev_ioctl,
+ .ndo_tx_timeout = tx_timeout,
+ .ndo_change_mtu = change_mtu,
+ .ndo_set_mac_address = sundance_set_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = sundance_poll_controller,
+#endif
+};
+
+static int sundance_probe1(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *dev;
+ struct netdev_private *np;
+ static int card_idx;
+ int chip_idx = ent->driver_data;
+ int irq;
+ int i;
+ void __iomem *ioaddr;
+ u16 mii_ctl;
+ void *ring_space;
+ dma_addr_t ring_dma;
+#ifdef USE_IO_OPS
+ int bar = 0;
+#else
+ int bar = 1;
+#endif
+ int phy, phy_end, phy_idx = 0;
+ __le16 addr[ETH_ALEN / 2];
+
+ if (pci_enable_device(pdev))
+ return -EIO;
+ pci_set_master(pdev);
+
+ irq = pdev->irq;
+
+ dev = alloc_etherdev(sizeof(*np));
+ if (!dev)
+ return -ENOMEM;
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ if (pci_request_regions(pdev, DRV_NAME))
+ goto err_out_netdev;
+
+ ioaddr = pci_iomap(pdev, bar, netdev_io_size);
+ if (!ioaddr)
+ goto err_out_res;
+
+ for (i = 0; i < 3; i++)
+ addr[i] =
+ cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
+ eth_hw_addr_set(dev, (u8 *)addr);
+
+ np = netdev_priv(dev);
+ np->ndev = dev;
+ np->base = ioaddr;
+ np->pci_dev = pdev;
+ np->chip_id = chip_idx;
+ np->msg_enable = (1 << debug) - 1;
+ spin_lock_init(&np->lock);
+ spin_lock_init(&np->statlock);
+ tasklet_setup(&np->rx_tasklet, rx_poll);
+ tasklet_setup(&np->tx_tasklet, tx_poll);
+
+ ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE,
+ &ring_dma, GFP_KERNEL);
+ if (!ring_space)
+ goto err_out_cleardev;
+ np->tx_ring = (struct netdev_desc *)ring_space;
+ np->tx_ring_dma = ring_dma;
+
+ ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE,
+ &ring_dma, GFP_KERNEL);
+ if (!ring_space)
+ goto err_out_unmap_tx;
+ np->rx_ring = (struct netdev_desc *)ring_space;
+ np->rx_ring_dma = ring_dma;
+
+ np->mii_if.dev = dev;
+ np->mii_if.mdio_read = mdio_read;
+ np->mii_if.mdio_write = mdio_write;
+ np->mii_if.phy_id_mask = 0x1f;
+ np->mii_if.reg_num_mask = 0x1f;
+
+ /* The chip-specific entries in the device structure. */
+ dev->netdev_ops = &netdev_ops;
+ dev->ethtool_ops = &ethtool_ops;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ /* MTU range: 68 - 8191 */
+ dev->min_mtu = ETH_MIN_MTU;
+ dev->max_mtu = 8191;
+
+ pci_set_drvdata(pdev, dev);
+
+ i = register_netdev(dev);
+ if (i)
+ goto err_out_unmap_rx;
+
+ printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
+ dev->name, pci_id_tbl[chip_idx].name, ioaddr,
+ dev->dev_addr, irq);
+
+ np->phys[0] = 1; /* Default setting */
+ np->mii_preamble_required++;
+
+ /*
+ * It seems some phys doesn't deal well with address 0 being accessed
+ * first
+ */
+ if (sundance_pci_tbl[np->chip_id].device == 0x0200) {
+ phy = 0;
+ phy_end = 31;
+ } else {
+ phy = 1;
+ phy_end = 32; /* wraps to zero, due to 'phy & 0x1f' */
+ }
+ for (; phy <= phy_end && phy_idx < MII_CNT; phy++) {
+ int phyx = phy & 0x1f;
+ int mii_status = mdio_read(dev, phyx, MII_BMSR);
+ if (mii_status != 0xffff && mii_status != 0x0000) {
+ np->phys[phy_idx++] = phyx;
+ np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE);
+ if ((mii_status & 0x0040) == 0)
+ np->mii_preamble_required++;
+ printk(KERN_INFO "%s: MII PHY found at address %d, status "
+ "0x%4.4x advertising %4.4x.\n",
+ dev->name, phyx, mii_status, np->mii_if.advertising);
+ }
+ }
+ np->mii_preamble_required--;
+
+ if (phy_idx == 0) {
+ printk(KERN_INFO "%s: No MII transceiver found, aborting. ASIC status %x\n",
+ dev->name, ioread32(ioaddr + ASICCtrl));
+ goto err_out_unregister;
+ }
+
+ np->mii_if.phy_id = np->phys[0];
+
+ /* Parse override configuration */
+ np->an_enable = 1;
+ if (card_idx < MAX_UNITS) {
+ if (media[card_idx] != NULL) {
+ np->an_enable = 0;
+ if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
+ strcmp (media[card_idx], "4") == 0) {
+ np->speed = 100;
+ np->mii_if.full_duplex = 1;
+ } else if (strcmp (media[card_idx], "100mbps_hd") == 0 ||
+ strcmp (media[card_idx], "3") == 0) {
+ np->speed = 100;
+ np->mii_if.full_duplex = 0;
+ } else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
+ strcmp (media[card_idx], "2") == 0) {
+ np->speed = 10;
+ np->mii_if.full_duplex = 1;
+ } else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
+ strcmp (media[card_idx], "1") == 0) {
+ np->speed = 10;
+ np->mii_if.full_duplex = 0;
+ } else {
+ np->an_enable = 1;
+ }
+ }
+ if (flowctrl == 1)
+ np->flowctrl = 1;
+ }
+
+ /* Fibre PHY? */
+ if (ioread32 (ioaddr + ASICCtrl) & 0x80) {
+ /* Default 100Mbps Full */
+ if (np->an_enable) {
+ np->speed = 100;
+ np->mii_if.full_duplex = 1;
+ np->an_enable = 0;
+ }
+ }
+ /* Reset PHY */
+ mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET);
+ mdelay (300);
+ /* If flow control enabled, we need to advertise it.*/
+ if (np->flowctrl)
+ mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400);
+ mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
+ /* Force media type */
+ if (!np->an_enable) {
+ mii_ctl = 0;
+ mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0;
+ mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0;
+ mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl);
+ printk (KERN_INFO "Override speed=%d, %s duplex\n",
+ np->speed, np->mii_if.full_duplex ? "Full" : "Half");
+
+ }
+
+ /* Perhaps move the reset here? */
+ /* Reset the chip to erase previous misconfiguration. */
+ if (netif_msg_hw(np))
+ printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
+ sundance_reset(dev, 0x00ff << 16);
+ if (netif_msg_hw(np))
+ printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
+
+ card_idx++;
+ return 0;
+
+err_out_unregister:
+ unregister_netdev(dev);
+err_out_unmap_rx:
+ dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
+ np->rx_ring, np->rx_ring_dma);
+err_out_unmap_tx:
+ dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
+ np->tx_ring, np->tx_ring_dma);
+err_out_cleardev:
+ pci_iounmap(pdev, ioaddr);
+err_out_res:
+ pci_release_regions(pdev);
+err_out_netdev:
+ free_netdev (dev);
+ return -ENODEV;
+}
+
+static int change_mtu(struct net_device *dev, int new_mtu)
+{
+ if (netif_running(dev))
+ return -EBUSY;
+ WRITE_ONCE(dev->mtu, new_mtu);
+ return 0;
+}
+
+#define eeprom_delay(ee_addr) ioread32(ee_addr)
+/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
+static int eeprom_read(void __iomem *ioaddr, int location)
+{
+ int boguscnt = 10000; /* Typical 1900 ticks. */
+ iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl);
+ do {
+ eeprom_delay(ioaddr + EECtrl);
+ if (! (ioread16(ioaddr + EECtrl) & 0x8000)) {
+ return ioread16(ioaddr + EEData);
+ }
+ } while (--boguscnt > 0);
+ return 0;
+}
+
+/* MII transceiver control section.
+ Read and write the MII registers using software-generated serial
+ MDIO protocol. See the MII specifications or DP83840A data sheet
+ for details.
+
+ The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
+ met by back-to-back 33Mhz PCI cycles. */
+#define mdio_delay() ioread8(mdio_addr)
+
+enum mii_reg_bits {
+ MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
+};
+#define MDIO_EnbIn (0)
+#define MDIO_WRITE0 (MDIO_EnbOutput)
+#define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
+
+/* Generate the preamble required for initial synchronization and
+ a few older transceivers. */
+static void mdio_sync(void __iomem *mdio_addr)
+{
+ int bits = 32;
+
+ /* Establish sync by sending at least 32 logic ones. */
+ while (--bits >= 0) {
+ iowrite8(MDIO_WRITE1, mdio_addr);
+ mdio_delay();
+ iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
+ mdio_delay();
+ }
+}
+
+static int mdio_read(struct net_device *dev, int phy_id, int location)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *mdio_addr = np->base + MIICtrl;
+ int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
+ int i, retval = 0;
+
+ if (np->mii_preamble_required)
+ mdio_sync(mdio_addr);
+
+ /* Shift the read command bits out. */
+ for (i = 15; i >= 0; i--) {
+ int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
+
+ iowrite8(dataval, mdio_addr);
+ mdio_delay();
+ iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
+ mdio_delay();
+ }
+ /* Read the two transition, 16 data, and wire-idle bits. */
+ for (i = 19; i > 0; i--) {
+ iowrite8(MDIO_EnbIn, mdio_addr);
+ mdio_delay();
+ retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0);
+ iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
+ mdio_delay();
+ }
+ return (retval>>1) & 0xffff;
+}
+
+static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *mdio_addr = np->base + MIICtrl;
+ int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
+ int i;
+
+ if (np->mii_preamble_required)
+ mdio_sync(mdio_addr);
+
+ /* Shift the command bits out. */
+ for (i = 31; i >= 0; i--) {
+ int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
+
+ iowrite8(dataval, mdio_addr);
+ mdio_delay();
+ iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
+ mdio_delay();
+ }
+ /* Clear out extra bits. */
+ for (i = 2; i > 0; i--) {
+ iowrite8(MDIO_EnbIn, mdio_addr);
+ mdio_delay();
+ iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
+ mdio_delay();
+ }
+}
+
+static int mdio_wait_link(struct net_device *dev, int wait)
+{
+ int bmsr;
+ int phy_id;
+ struct netdev_private *np;
+
+ np = netdev_priv(dev);
+ phy_id = np->phys[0];
+
+ do {
+ bmsr = mdio_read(dev, phy_id, MII_BMSR);
+ if (bmsr & 0x0004)
+ return 0;
+ mdelay(1);
+ } while (--wait > 0);
+ return -1;
+}
+
+static int netdev_open(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+ const int irq = np->pci_dev->irq;
+ unsigned long flags;
+ int i;
+
+ sundance_reset(dev, 0x00ff << 16);
+
+ i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
+ if (i)
+ return i;
+
+ if (netif_msg_ifup(np))
+ printk(KERN_DEBUG "%s: netdev_open() irq %d\n", dev->name, irq);
+
+ init_ring(dev);
+
+ iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
+ /* The Tx list pointer is written as packets are queued. */
+
+ /* Initialize other registers. */
+ __set_mac_addr(dev);
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
+ iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize);
+#else
+ iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize);
+#endif
+ if (dev->mtu > 2047)
+ iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
+
+ /* Configure the PCI bus bursts and FIFO thresholds. */
+
+ if (dev->if_port == 0)
+ dev->if_port = np->default_port;
+
+ spin_lock_init(&np->mcastlock);
+
+ set_rx_mode(dev);
+ iowrite16(0, ioaddr + IntrEnable);
+ iowrite16(0, ioaddr + DownCounter);
+ /* Set the chip to poll every N*320nsec. */
+ iowrite8(100, ioaddr + RxDMAPollPeriod);
+ iowrite8(127, ioaddr + TxDMAPollPeriod);
+ /* Fix DFE-580TX packet drop issue */
+ if (np->pci_dev->revision >= 0x14)
+ iowrite8(0x01, ioaddr + DebugCtrl1);
+ netif_start_queue(dev);
+
+ spin_lock_irqsave(&np->lock, flags);
+ reset_tx(dev);
+ spin_unlock_irqrestore(&np->lock, flags);
+
+ iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
+
+ /* Disable Wol */
+ iowrite8(ioread8(ioaddr + WakeEvent) | 0x00, ioaddr + WakeEvent);
+ np->wol_enabled = 0;
+
+ if (netif_msg_ifup(np))
+ printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
+ "MAC Control %x, %4.4x %4.4x.\n",
+ dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus),
+ ioread32(ioaddr + MACCtrl0),
+ ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0));
+
+ /* Set the timer to check for link beat. */
+ timer_setup(&np->timer, netdev_timer, 0);
+ np->timer.expires = jiffies + 3*HZ;
+ add_timer(&np->timer);
+
+ /* Enable interrupts by setting the interrupt mask. */
+ iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
+
+ return 0;
+}
+
+static void check_duplex(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+ int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
+ int negotiated = mii_lpa & np->mii_if.advertising;
+ int duplex;
+
+ /* Force media */
+ if (!np->an_enable || mii_lpa == 0xffff) {
+ if (np->mii_if.full_duplex)
+ iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex,
+ ioaddr + MACCtrl0);
+ return;
+ }
+
+ /* Autonegotiation */
+ duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
+ if (np->mii_if.full_duplex != duplex) {
+ np->mii_if.full_duplex = duplex;
+ if (netif_msg_link(np))
+ printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
+ "negotiated capability %4.4x.\n", dev->name,
+ duplex ? "full" : "half", np->phys[0], negotiated);
+ iowrite16(ioread16(ioaddr + MACCtrl0) | (duplex ? 0x20 : 0), ioaddr + MACCtrl0);
+ }
+}
+
+static void netdev_timer(struct timer_list *t)
+{
+ struct netdev_private *np = timer_container_of(np, t, timer);
+ struct net_device *dev = np->mii_if.dev;
+ void __iomem *ioaddr = np->base;
+ int next_tick = 10*HZ;
+
+ if (netif_msg_timer(np)) {
+ printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
+ "Tx %x Rx %x.\n",
+ dev->name, ioread16(ioaddr + IntrEnable),
+ ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
+ }
+ check_duplex(dev);
+ np->timer.expires = jiffies + next_tick;
+ add_timer(&np->timer);
+}
+
+static void tx_timeout(struct net_device *dev, unsigned int txqueue)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+ unsigned long flag;
+
+ netif_stop_queue(dev);
+ tasklet_disable_in_atomic(&np->tx_tasklet);
+ iowrite16(0, ioaddr + IntrEnable);
+ printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
+ "TxFrameId %2.2x,"
+ " resetting...\n", dev->name, ioread8(ioaddr + TxStatus),
+ ioread8(ioaddr + TxFrameId));
+
+ {
+ int i;
+ for (i=0; i<TX_RING_SIZE; i++) {
+ printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
+ (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
+ le32_to_cpu(np->tx_ring[i].next_desc),
+ le32_to_cpu(np->tx_ring[i].status),
+ (le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
+ le32_to_cpu(np->tx_ring[i].frag.addr),
+ le32_to_cpu(np->tx_ring[i].frag.length));
+ }
+ printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
+ ioread32(np->base + TxListPtr),
+ netif_queue_stopped(dev));
+ printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
+ np->cur_tx, np->cur_tx % TX_RING_SIZE,
+ np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
+ printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
+ printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
+ }
+ spin_lock_irqsave(&np->lock, flag);
+
+ /* Stop and restart the chip's Tx processes . */
+ reset_tx(dev);
+ spin_unlock_irqrestore(&np->lock, flag);
+
+ dev->if_port = 0;
+
+ netif_trans_update(dev); /* prevent tx timeout */
+ dev->stats.tx_errors++;
+ if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
+ netif_wake_queue(dev);
+ }
+ iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
+ tasklet_enable(&np->tx_tasklet);
+}
+
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void init_ring(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int i;
+
+ np->cur_rx = np->cur_tx = 0;
+ np->dirty_rx = np->dirty_tx = 0;
+ np->cur_task = 0;
+
+ np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16);
+
+ /* Initialize all Rx descriptors. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
+ ((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
+ np->rx_ring[i].status = 0;
+ np->rx_ring[i].frag.length = 0;
+ np->rx_skbuff[i] = NULL;
+ }
+
+ /* Fill in the Rx buffers. Handle allocation failure gracefully. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ dma_addr_t addr;
+
+ struct sk_buff *skb =
+ netdev_alloc_skb(dev, np->rx_buf_sz + 2);
+ np->rx_skbuff[i] = skb;
+ if (skb == NULL)
+ break;
+ skb_reserve(skb, 2); /* 16 byte align the IP header. */
+ addr = dma_map_single(&np->pci_dev->dev, skb->data,
+ np->rx_buf_sz, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&np->pci_dev->dev, addr)) {
+ dev_kfree_skb(skb);
+ np->rx_skbuff[i] = NULL;
+ break;
+ }
+ np->rx_ring[i].frag.addr = cpu_to_le32(addr);
+ np->rx_ring[i].frag.length = cpu_to_le32(np->rx_buf_sz | LastFrag);
+ }
+ np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ np->tx_skbuff[i] = NULL;
+ np->tx_ring[i].status = 0;
+ }
+}
+
+static void tx_poll(struct tasklet_struct *t)
+{
+ struct netdev_private *np = from_tasklet(np, t, tx_tasklet);
+ unsigned head = np->cur_task % TX_RING_SIZE;
+ struct netdev_desc *txdesc =
+ &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
+
+ /* Chain the next pointer */
+ for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
+ int entry = np->cur_task % TX_RING_SIZE;
+ txdesc = &np->tx_ring[entry];
+ if (np->last_tx) {
+ np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
+ entry*sizeof(struct netdev_desc));
+ }
+ np->last_tx = txdesc;
+ }
+ /* Indicate the latest descriptor of tx ring */
+ txdesc->status |= cpu_to_le32(DescIntrOnTx);
+
+ if (ioread32 (np->base + TxListPtr) == 0)
+ iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc),
+ np->base + TxListPtr);
+}
+
+static netdev_tx_t
+start_tx (struct sk_buff *skb, struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ struct netdev_desc *txdesc;
+ dma_addr_t addr;
+ unsigned entry;
+
+ /* Calculate the next Tx descriptor entry. */
+ entry = np->cur_tx % TX_RING_SIZE;
+ np->tx_skbuff[entry] = skb;
+ txdesc = &np->tx_ring[entry];
+
+ addr = dma_map_single(&np->pci_dev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&np->pci_dev->dev, addr))
+ goto drop_frame;
+
+ txdesc->next_desc = 0;
+ txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
+ txdesc->frag.addr = cpu_to_le32(addr);
+ txdesc->frag.length = cpu_to_le32 (skb->len | LastFrag);
+
+ /* Increment cur_tx before tasklet_schedule() */
+ np->cur_tx++;
+ mb();
+ /* Schedule a tx_poll() task */
+ tasklet_schedule(&np->tx_tasklet);
+
+ /* On some architectures: explicitly flush cache lines here. */
+ if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1 &&
+ !netif_queue_stopped(dev)) {
+ /* do nothing */
+ } else {
+ netif_stop_queue (dev);
+ }
+ if (netif_msg_tx_queued(np)) {
+ printk (KERN_DEBUG
+ "%s: Transmit frame #%d queued in slot %d.\n",
+ dev->name, np->cur_tx, entry);
+ }
+ return NETDEV_TX_OK;
+
+drop_frame:
+ dev_kfree_skb_any(skb);
+ np->tx_skbuff[entry] = NULL;
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+}
+
+/* Reset hardware tx and free all of tx buffers */
+static int
+reset_tx (struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+ struct sk_buff *skb;
+ int i;
+
+ /* Reset tx logic, TxListPtr will be cleaned */
+ iowrite16 (TxDisable, ioaddr + MACCtrl1);
+ sundance_reset(dev, (NetworkReset|FIFOReset|DMAReset|TxReset) << 16);
+
+ /* free all tx skbuff */
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ np->tx_ring[i].next_desc = 0;
+
+ skb = np->tx_skbuff[i];
+ if (skb) {
+ dma_unmap_single(&np->pci_dev->dev,
+ le32_to_cpu(np->tx_ring[i].frag.addr),
+ skb->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ np->tx_skbuff[i] = NULL;
+ dev->stats.tx_dropped++;
+ }
+ }
+ np->cur_tx = np->dirty_tx = 0;
+ np->cur_task = 0;
+
+ np->last_tx = NULL;
+ iowrite8(127, ioaddr + TxDMAPollPeriod);
+
+ iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
+ return 0;
+}
+
+/* The interrupt handler cleans up after the Tx thread,
+ and schedule a Rx thread work */
+static irqreturn_t intr_handler(int irq, void *dev_instance)
+{
+ struct net_device *dev = (struct net_device *)dev_instance;
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+ int hw_frame_id;
+ int tx_cnt;
+ int tx_status;
+ int handled = 0;
+ int i;
+
+ do {
+ int intr_status = ioread16(ioaddr + IntrStatus);
+ iowrite16(intr_status, ioaddr + IntrStatus);
+
+ if (netif_msg_intr(np))
+ printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
+ dev->name, intr_status);
+
+ if (!(intr_status & DEFAULT_INTR))
+ break;
+
+ handled = 1;
+
+ if (intr_status & (IntrRxDMADone)) {
+ iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone),
+ ioaddr + IntrEnable);
+ if (np->budget < 0)
+ np->budget = RX_BUDGET;
+ tasklet_schedule(&np->rx_tasklet);
+ }
+ if (intr_status & (IntrTxDone | IntrDrvRqst)) {
+ tx_status = ioread16 (ioaddr + TxStatus);
+ for (tx_cnt=32; tx_status & 0x80; --tx_cnt) {
+ if (netif_msg_tx_done(np))
+ printk
+ ("%s: Transmit status is %2.2x.\n",
+ dev->name, tx_status);
+ if (tx_status & 0x1e) {
+ if (netif_msg_tx_err(np))
+ printk("%s: Transmit error status %4.4x.\n",
+ dev->name, tx_status);
+ dev->stats.tx_errors++;
+ if (tx_status & 0x10)
+ dev->stats.tx_fifo_errors++;
+ if (tx_status & 0x08)
+ dev->stats.collisions++;
+ if (tx_status & 0x04)
+ dev->stats.tx_fifo_errors++;
+ if (tx_status & 0x02)
+ dev->stats.tx_window_errors++;
+
+ /*
+ ** This reset has been verified on
+ ** DFE-580TX boards ! phdm@macqel.be.
+ */
+ if (tx_status & 0x10) { /* TxUnderrun */
+ /* Restart Tx FIFO and transmitter */
+ sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16);
+ /* No need to reset the Tx pointer here */
+ }
+ /* Restart the Tx. Need to make sure tx enabled */
+ i = 10;
+ do {
+ iowrite16(ioread16(ioaddr + MACCtrl1) | TxEnable, ioaddr + MACCtrl1);
+ if (ioread16(ioaddr + MACCtrl1) & TxEnabled)
+ break;
+ mdelay(1);
+ } while (--i);
+ }
+ /* Yup, this is a documentation bug. It cost me *hours*. */
+ iowrite16 (0, ioaddr + TxStatus);
+ if (tx_cnt < 0) {
+ iowrite32(5000, ioaddr + DownCounter);
+ break;
+ }
+ tx_status = ioread16 (ioaddr + TxStatus);
+ }
+ hw_frame_id = (tx_status >> 8) & 0xff;
+ } else {
+ hw_frame_id = ioread8(ioaddr + TxFrameId);
+ }
+
+ if (np->pci_dev->revision >= 0x14) {
+ spin_lock(&np->lock);
+ for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
+ int entry = np->dirty_tx % TX_RING_SIZE;
+ struct sk_buff *skb;
+ int sw_frame_id;
+ sw_frame_id = (le32_to_cpu(
+ np->tx_ring[entry].status) >> 2) & 0xff;
+ if (sw_frame_id == hw_frame_id &&
+ !(le32_to_cpu(np->tx_ring[entry].status)
+ & 0x00010000))
+ break;
+ if (sw_frame_id == (hw_frame_id + 1) %
+ TX_RING_SIZE)
+ break;
+ skb = np->tx_skbuff[entry];
+ /* Free the original skb. */
+ dma_unmap_single(&np->pci_dev->dev,
+ le32_to_cpu(np->tx_ring[entry].frag.addr),
+ skb->len, DMA_TO_DEVICE);
+ dev_consume_skb_irq(np->tx_skbuff[entry]);
+ np->tx_skbuff[entry] = NULL;
+ np->tx_ring[entry].frag.addr = 0;
+ np->tx_ring[entry].frag.length = 0;
+ }
+ spin_unlock(&np->lock);
+ } else {
+ spin_lock(&np->lock);
+ for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
+ int entry = np->dirty_tx % TX_RING_SIZE;
+ struct sk_buff *skb;
+ if (!(le32_to_cpu(np->tx_ring[entry].status)
+ & 0x00010000))
+ break;
+ skb = np->tx_skbuff[entry];
+ /* Free the original skb. */
+ dma_unmap_single(&np->pci_dev->dev,
+ le32_to_cpu(np->tx_ring[entry].frag.addr),
+ skb->len, DMA_TO_DEVICE);
+ dev_consume_skb_irq(np->tx_skbuff[entry]);
+ np->tx_skbuff[entry] = NULL;
+ np->tx_ring[entry].frag.addr = 0;
+ np->tx_ring[entry].frag.length = 0;
+ }
+ spin_unlock(&np->lock);
+ }
+
+ if (netif_queue_stopped(dev) &&
+ np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
+ /* The ring is no longer full, clear busy flag. */
+ netif_wake_queue (dev);
+ }
+ /* Abnormal error summary/uncommon events handlers. */
+ if (intr_status & (IntrPCIErr | LinkChange | StatsMax))
+ netdev_error(dev, intr_status);
+ } while (0);
+ if (netif_msg_intr(np))
+ printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
+ dev->name, ioread16(ioaddr + IntrStatus));
+ return IRQ_RETVAL(handled);
+}
+
+static void rx_poll(struct tasklet_struct *t)
+{
+ struct netdev_private *np = from_tasklet(np, t, rx_tasklet);
+ struct net_device *dev = np->ndev;
+ int entry = np->cur_rx % RX_RING_SIZE;
+ int boguscnt = np->budget;
+ void __iomem *ioaddr = np->base;
+ int received = 0;
+
+ /* If EOP is set on the next entry, it's a new packet. Send it up. */
+ while (1) {
+ struct netdev_desc *desc = &(np->rx_ring[entry]);
+ u32 frame_status = le32_to_cpu(desc->status);
+ int pkt_len;
+
+ if (--boguscnt < 0) {
+ goto not_done;
+ }
+ if (!(frame_status & DescOwn))
+ break;
+ pkt_len = frame_status & 0x1fff; /* Chip omits the CRC. */
+ if (netif_msg_rx_status(np))
+ printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
+ frame_status);
+ if (frame_status & 0x001f4000) {
+ /* There was a error. */
+ if (netif_msg_rx_err(np))
+ printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
+ frame_status);
+ dev->stats.rx_errors++;
+ if (frame_status & 0x00100000)
+ dev->stats.rx_length_errors++;
+ if (frame_status & 0x00010000)
+ dev->stats.rx_fifo_errors++;
+ if (frame_status & 0x00060000)
+ dev->stats.rx_frame_errors++;
+ if (frame_status & 0x00080000)
+ dev->stats.rx_crc_errors++;
+ if (frame_status & 0x00100000) {
+ printk(KERN_WARNING "%s: Oversized Ethernet frame,"
+ " status %8.8x.\n",
+ dev->name, frame_status);
+ }
+ } else {
+ struct sk_buff *skb;
+#ifndef final_version
+ if (netif_msg_rx_status(np))
+ printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
+ ", bogus_cnt %d.\n",
+ pkt_len, boguscnt);
+#endif
+ /* Check if the packet is long enough to accept without copying
+ to a minimally-sized skbuff. */
+ if (pkt_len < rx_copybreak &&
+ (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
+ skb_reserve(skb, 2); /* 16 byte align the IP header */
+ dma_sync_single_for_cpu(&np->pci_dev->dev,
+ le32_to_cpu(desc->frag.addr),
+ np->rx_buf_sz, DMA_FROM_DEVICE);
+ skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
+ dma_sync_single_for_device(&np->pci_dev->dev,
+ le32_to_cpu(desc->frag.addr),
+ np->rx_buf_sz, DMA_FROM_DEVICE);
+ skb_put(skb, pkt_len);
+ } else {
+ dma_unmap_single(&np->pci_dev->dev,
+ le32_to_cpu(desc->frag.addr),
+ np->rx_buf_sz, DMA_FROM_DEVICE);
+ skb_put(skb = np->rx_skbuff[entry], pkt_len);
+ np->rx_skbuff[entry] = NULL;
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+ /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
+ netif_rx(skb);
+ }
+ entry = (entry + 1) % RX_RING_SIZE;
+ received++;
+ }
+ np->cur_rx = entry;
+ refill_rx (dev);
+ np->budget -= received;
+ iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
+ return;
+
+not_done:
+ np->cur_rx = entry;
+ refill_rx (dev);
+ if (!received)
+ received = 1;
+ np->budget -= received;
+ if (np->budget <= 0)
+ np->budget = RX_BUDGET;
+ tasklet_schedule(&np->rx_tasklet);
+}
+
+static void refill_rx (struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int entry;
+
+ /* Refill the Rx ring buffers. */
+ for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
+ np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
+ struct sk_buff *skb;
+ dma_addr_t addr;
+
+ entry = np->dirty_rx % RX_RING_SIZE;
+ if (np->rx_skbuff[entry] == NULL) {
+ skb = netdev_alloc_skb(dev, np->rx_buf_sz + 2);
+ np->rx_skbuff[entry] = skb;
+ if (skb == NULL)
+ break; /* Better luck next round. */
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+ addr = dma_map_single(&np->pci_dev->dev, skb->data,
+ np->rx_buf_sz, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&np->pci_dev->dev, addr)) {
+ dev_kfree_skb_irq(skb);
+ np->rx_skbuff[entry] = NULL;
+ break;
+ }
+
+ np->rx_ring[entry].frag.addr = cpu_to_le32(addr);
+ }
+ /* Perhaps we need not reset this field. */
+ np->rx_ring[entry].frag.length =
+ cpu_to_le32(np->rx_buf_sz | LastFrag);
+ np->rx_ring[entry].status = 0;
+ }
+}
+static void netdev_error(struct net_device *dev, int intr_status)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+ u16 mii_ctl, mii_advertise, mii_lpa;
+ int speed;
+
+ if (intr_status & LinkChange) {
+ if (mdio_wait_link(dev, 10) == 0) {
+ printk(KERN_INFO "%s: Link up\n", dev->name);
+ if (np->an_enable) {
+ mii_advertise = mdio_read(dev, np->phys[0],
+ MII_ADVERTISE);
+ mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
+ mii_advertise &= mii_lpa;
+ printk(KERN_INFO "%s: Link changed: ",
+ dev->name);
+ if (mii_advertise & ADVERTISE_100FULL) {
+ np->speed = 100;
+ printk("100Mbps, full duplex\n");
+ } else if (mii_advertise & ADVERTISE_100HALF) {
+ np->speed = 100;
+ printk("100Mbps, half duplex\n");
+ } else if (mii_advertise & ADVERTISE_10FULL) {
+ np->speed = 10;
+ printk("10Mbps, full duplex\n");
+ } else if (mii_advertise & ADVERTISE_10HALF) {
+ np->speed = 10;
+ printk("10Mbps, half duplex\n");
+ } else
+ printk("\n");
+
+ } else {
+ mii_ctl = mdio_read(dev, np->phys[0], MII_BMCR);
+ speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
+ np->speed = speed;
+ printk(KERN_INFO "%s: Link changed: %dMbps ,",
+ dev->name, speed);
+ printk("%s duplex.\n",
+ (mii_ctl & BMCR_FULLDPLX) ?
+ "full" : "half");
+ }
+ check_duplex(dev);
+ if (np->flowctrl && np->mii_if.full_duplex) {
+ iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
+ ioaddr + MulticastFilter1+2);
+ iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
+ ioaddr + MACCtrl0);
+ }
+ netif_carrier_on(dev);
+ } else {
+ printk(KERN_INFO "%s: Link down\n", dev->name);
+ netif_carrier_off(dev);
+ }
+ }
+ if (intr_status & StatsMax) {
+ get_stats(dev);
+ }
+ if (intr_status & IntrPCIErr) {
+ printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
+ dev->name, intr_status);
+ /* We must do a global reset of DMA to continue. */
+ }
+}
+
+static struct net_device_stats *get_stats(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+ unsigned long flags;
+ u8 late_coll, single_coll, mult_coll;
+
+ spin_lock_irqsave(&np->statlock, flags);
+ /* The chip only need report frame silently dropped. */
+ dev->stats.rx_missed_errors += ioread8(ioaddr + RxMissed);
+ dev->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
+ dev->stats.rx_packets += ioread16(ioaddr + RxFramesOK);
+ dev->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError);
+
+ mult_coll = ioread8(ioaddr + StatsMultiColl);
+ np->xstats.tx_multiple_collisions += mult_coll;
+ single_coll = ioread8(ioaddr + StatsOneColl);
+ np->xstats.tx_single_collisions += single_coll;
+ late_coll = ioread8(ioaddr + StatsLateColl);
+ np->xstats.tx_late_collisions += late_coll;
+ dev->stats.collisions += mult_coll
+ + single_coll
+ + late_coll;
+
+ np->xstats.tx_deferred += ioread8(ioaddr + StatsTxDefer);
+ np->xstats.tx_deferred_excessive += ioread8(ioaddr + StatsTxXSDefer);
+ np->xstats.tx_aborted += ioread8(ioaddr + StatsTxAbort);
+ np->xstats.tx_bcasts += ioread8(ioaddr + StatsBcastTx);
+ np->xstats.rx_bcasts += ioread8(ioaddr + StatsBcastRx);
+ np->xstats.tx_mcasts += ioread8(ioaddr + StatsMcastTx);
+ np->xstats.rx_mcasts += ioread8(ioaddr + StatsMcastRx);
+
+ dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow);
+ dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16;
+ dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
+ dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16;
+
+ spin_unlock_irqrestore(&np->statlock, flags);
+
+ return &dev->stats;
+}
+
+static void set_rx_mode(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+ u16 mc_filter[4]; /* Multicast hash filter */
+ u32 rx_mode;
+ int i;
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ memset(mc_filter, 0xff, sizeof(mc_filter));
+ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
+ (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to match, or accept all multicasts. */
+ memset(mc_filter, 0xff, sizeof(mc_filter));
+ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
+ } else if (!netdev_mc_empty(dev)) {
+ struct netdev_hw_addr *ha;
+ int bit;
+ int index;
+ int crc;
+ memset (mc_filter, 0, sizeof (mc_filter));
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc_le(ETH_ALEN, ha->addr);
+ for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
+ if (crc & 0x80000000) index |= 1 << bit;
+ mc_filter[index/16] |= (1 << (index % 16));
+ }
+ rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
+ } else {
+ iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
+ return;
+ }
+ if (np->mii_if.full_duplex && np->flowctrl)
+ mc_filter[3] |= 0x0200;
+
+ for (i = 0; i < 4; i++)
+ iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
+ iowrite8(rx_mode, ioaddr + RxMode);
+}
+
+static int __set_mac_addr(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ u16 addr16;
+
+ addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8));
+ iowrite16(addr16, np->base + StationAddr);
+ addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8));
+ iowrite16(addr16, np->base + StationAddr+2);
+ addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8));
+ iowrite16(addr16, np->base + StationAddr+4);
+ return 0;
+}
+
+/* Invoked with rtnl_lock held */
+static int sundance_set_mac_addr(struct net_device *dev, void *data)
+{
+ const struct sockaddr *addr = data;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+ eth_hw_addr_set(dev, addr->sa_data);
+ __set_mac_addr(dev);
+
+ return 0;
+}
+
+static const struct {
+ const char name[ETH_GSTRING_LEN];
+} sundance_stats[] = {
+ { "tx_multiple_collisions" },
+ { "tx_single_collisions" },
+ { "tx_late_collisions" },
+ { "tx_deferred" },
+ { "tx_deferred_excessive" },
+ { "tx_aborted" },
+ { "tx_bcasts" },
+ { "rx_bcasts" },
+ { "tx_mcasts" },
+ { "rx_mcasts" },
+};
+
+static int check_if_running(struct net_device *dev)
+{
+ if (!netif_running(dev))
+ return -EINVAL;
+ return 0;
+}
+
+static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
+}
+
+static int get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ spin_lock_irq(&np->lock);
+ mii_ethtool_get_link_ksettings(&np->mii_if, cmd);
+ spin_unlock_irq(&np->lock);
+ return 0;
+}
+
+static int set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int res;
+ spin_lock_irq(&np->lock);
+ res = mii_ethtool_set_link_ksettings(&np->mii_if, cmd);
+ spin_unlock_irq(&np->lock);
+ return res;
+}
+
+static int nway_reset(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ return mii_nway_restart(&np->mii_if);
+}
+
+static u32 get_link(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ return mii_link_ok(&np->mii_if);
+}
+
+static u32 get_msglevel(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ return np->msg_enable;
+}
+
+static void set_msglevel(struct net_device *dev, u32 val)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ np->msg_enable = val;
+}
+
+static void get_strings(struct net_device *dev, u32 stringset,
+ u8 *data)
+{
+ if (stringset == ETH_SS_STATS)
+ memcpy(data, sundance_stats, sizeof(sundance_stats));
+}
+
+static int get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(sundance_stats);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int i = 0;
+
+ get_stats(dev);
+ data[i++] = np->xstats.tx_multiple_collisions;
+ data[i++] = np->xstats.tx_single_collisions;
+ data[i++] = np->xstats.tx_late_collisions;
+ data[i++] = np->xstats.tx_deferred;
+ data[i++] = np->xstats.tx_deferred_excessive;
+ data[i++] = np->xstats.tx_aborted;
+ data[i++] = np->xstats.tx_bcasts;
+ data[i++] = np->xstats.rx_bcasts;
+ data[i++] = np->xstats.tx_mcasts;
+ data[i++] = np->xstats.rx_mcasts;
+}
+
+#ifdef CONFIG_PM
+
+static void sundance_get_wol(struct net_device *dev,
+ struct ethtool_wolinfo *wol)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+ u8 wol_bits;
+
+ wol->wolopts = 0;
+
+ wol->supported = (WAKE_PHY | WAKE_MAGIC);
+ if (!np->wol_enabled)
+ return;
+
+ wol_bits = ioread8(ioaddr + WakeEvent);
+ if (wol_bits & MagicPktEnable)
+ wol->wolopts |= WAKE_MAGIC;
+ if (wol_bits & LinkEventEnable)
+ wol->wolopts |= WAKE_PHY;
+}
+
+static int sundance_set_wol(struct net_device *dev,
+ struct ethtool_wolinfo *wol)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+ u8 wol_bits;
+
+ if (!device_can_wakeup(&np->pci_dev->dev))
+ return -EOPNOTSUPP;
+
+ np->wol_enabled = !!(wol->wolopts);
+ wol_bits = ioread8(ioaddr + WakeEvent);
+ wol_bits &= ~(WakePktEnable | MagicPktEnable |
+ LinkEventEnable | WolEnable);
+
+ if (np->wol_enabled) {
+ if (wol->wolopts & WAKE_MAGIC)
+ wol_bits |= (MagicPktEnable | WolEnable);
+ if (wol->wolopts & WAKE_PHY)
+ wol_bits |= (LinkEventEnable | WolEnable);
+ }
+ iowrite8(wol_bits, ioaddr + WakeEvent);
+
+ device_set_wakeup_enable(&np->pci_dev->dev, np->wol_enabled);
+
+ return 0;
+}
+#else
+#define sundance_get_wol NULL
+#define sundance_set_wol NULL
+#endif /* CONFIG_PM */
+
+static const struct ethtool_ops ethtool_ops = {
+ .begin = check_if_running,
+ .get_drvinfo = get_drvinfo,
+ .nway_reset = nway_reset,
+ .get_link = get_link,
+ .get_wol = sundance_get_wol,
+ .set_wol = sundance_set_wol,
+ .get_msglevel = get_msglevel,
+ .set_msglevel = set_msglevel,
+ .get_strings = get_strings,
+ .get_sset_count = get_sset_count,
+ .get_ethtool_stats = get_ethtool_stats,
+ .get_link_ksettings = get_link_ksettings,
+ .set_link_ksettings = set_link_ksettings,
+};
+
+static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ int rc;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ spin_lock_irq(&np->lock);
+ rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL);
+ spin_unlock_irq(&np->lock);
+
+ return rc;
+}
+
+static int netdev_close(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+ struct sk_buff *skb;
+ int i;
+
+ /* Wait and kill tasklet */
+ tasklet_kill(&np->rx_tasklet);
+ tasklet_kill(&np->tx_tasklet);
+ np->cur_tx = 0;
+ np->dirty_tx = 0;
+ np->cur_task = 0;
+ np->last_tx = NULL;
+
+ netif_stop_queue(dev);
+
+ if (netif_msg_ifdown(np)) {
+ printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
+ "Rx %4.4x Int %2.2x.\n",
+ dev->name, ioread8(ioaddr + TxStatus),
+ ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus));
+ printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
+ dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
+ }
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ iowrite16(0x0000, ioaddr + IntrEnable);
+
+ /* Disable Rx and Tx DMA for safely release resource */
+ iowrite32(0x500, ioaddr + DMACtrl);
+
+ /* Stop the chip's Tx and Rx processes. */
+ iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
+
+ for (i = 2000; i > 0; i--) {
+ if ((ioread32(ioaddr + DMACtrl) & 0xc000) == 0)
+ break;
+ mdelay(1);
+ }
+
+ iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset,
+ ioaddr + ASIC_HI_WORD(ASICCtrl));
+
+ for (i = 2000; i > 0; i--) {
+ if ((ioread16(ioaddr + ASIC_HI_WORD(ASICCtrl)) & ResetBusy) == 0)
+ break;
+ mdelay(1);
+ }
+
+#ifdef __i386__
+ if (netif_msg_hw(np)) {
+ printk(KERN_DEBUG " Tx ring at %8.8x:\n",
+ (int)(np->tx_ring_dma));
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk(KERN_DEBUG " #%d desc. %4.4x %8.8x %8.8x.\n",
+ i, np->tx_ring[i].status, np->tx_ring[i].frag.addr,
+ np->tx_ring[i].frag.length);
+ printk(KERN_DEBUG " Rx ring %8.8x:\n",
+ (int)(np->rx_ring_dma));
+ for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
+ printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
+ i, np->rx_ring[i].status, np->rx_ring[i].frag.addr,
+ np->rx_ring[i].frag.length);
+ }
+ }
+#endif /* __i386__ debugging only */
+
+ free_irq(np->pci_dev->irq, dev);
+
+ timer_delete_sync(&np->timer);
+
+ /* Free all the skbuffs in the Rx queue. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ np->rx_ring[i].status = 0;
+ skb = np->rx_skbuff[i];
+ if (skb) {
+ dma_unmap_single(&np->pci_dev->dev,
+ le32_to_cpu(np->rx_ring[i].frag.addr),
+ np->rx_buf_sz, DMA_FROM_DEVICE);
+ dev_kfree_skb(skb);
+ np->rx_skbuff[i] = NULL;
+ }
+ np->rx_ring[i].frag.addr = cpu_to_le32(0xBADF00D0); /* poison */
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ np->tx_ring[i].next_desc = 0;
+ skb = np->tx_skbuff[i];
+ if (skb) {
+ dma_unmap_single(&np->pci_dev->dev,
+ le32_to_cpu(np->tx_ring[i].frag.addr),
+ skb->len, DMA_TO_DEVICE);
+ dev_kfree_skb(skb);
+ np->tx_skbuff[i] = NULL;
+ }
+ }
+
+ return 0;
+}
+
+static void sundance_remove1(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ if (dev) {
+ struct netdev_private *np = netdev_priv(dev);
+ unregister_netdev(dev);
+ dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
+ np->rx_ring, np->rx_ring_dma);
+ dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
+ np->tx_ring, np->tx_ring_dma);
+ pci_iounmap(pdev, np->base);
+ pci_release_regions(pdev);
+ free_netdev(dev);
+ }
+}
+
+static int __maybe_unused sundance_suspend(struct device *dev_d)
+{
+ struct net_device *dev = dev_get_drvdata(dev_d);
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base;
+
+ if (!netif_running(dev))
+ return 0;
+
+ netdev_close(dev);
+ netif_device_detach(dev);
+
+ if (np->wol_enabled) {
+ iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
+ iowrite16(RxEnable, ioaddr + MACCtrl1);
+ }
+
+ device_set_wakeup_enable(dev_d, np->wol_enabled);
+
+ return 0;
+}
+
+static int __maybe_unused sundance_resume(struct device *dev_d)
+{
+ struct net_device *dev = dev_get_drvdata(dev_d);
+ int err = 0;
+
+ if (!netif_running(dev))
+ return 0;
+
+ err = netdev_open(dev);
+ if (err) {
+ printk(KERN_ERR "%s: Can't resume interface!\n",
+ dev->name);
+ goto out;
+ }
+
+ netif_device_attach(dev);
+
+out:
+ return err;
+}
+
+static SIMPLE_DEV_PM_OPS(sundance_pm_ops, sundance_suspend, sundance_resume);
+
+static struct pci_driver sundance_driver = {
+ .name = DRV_NAME,
+ .id_table = sundance_pci_tbl,
+ .probe = sundance_probe1,
+ .remove = sundance_remove1,
+ .driver.pm = &sundance_pm_ops,
+};
+
+module_pci_driver(sundance_driver);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index d730af4a50c7..bb5d2fa15736 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -3856,8 +3856,8 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
status = be_mcc_notify_wait(adapter);
err:
- dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
spin_unlock_bh(&adapter->mcc_lock);
+ dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
return status;
}
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 5d0c0906878d..a863f7841210 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -1750,16 +1750,17 @@ err_register_mdiobus:
static void ftgmac100_phy_disconnect(struct net_device *netdev)
{
struct ftgmac100 *priv = netdev_priv(netdev);
+ struct phy_device *phydev = netdev->phydev;
- if (!netdev->phydev)
+ if (!phydev)
return;
- phy_disconnect(netdev->phydev);
+ phy_disconnect(phydev);
if (of_phy_is_fixed_link(priv->dev->of_node))
of_phy_deregister_fixed_link(priv->dev->of_node);
if (priv->use_ncsi)
- fixed_phy_unregister(netdev->phydev);
+ fixed_phy_unregister(phydev);
}
static void ftgmac100_destroy_mdio(struct net_device *netdev)
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
index 0c588e03b15e..d09e456f14c0 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
@@ -371,8 +371,10 @@ static int dpaa_get_ts_info(struct net_device *net_dev,
of_node_put(ptp_node);
}
- if (ptp_dev)
+ if (ptp_dev) {
ptp = platform_get_drvdata(ptp_dev);
+ put_device(&ptp_dev->dev);
+ }
if (ptp)
info->phc_index = ptp->phc_index;
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 0f4efd505332..c96d1d6ba8fe 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -4884,7 +4884,7 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
priv->tx_tstamp_type = HWTSTAMP_TX_OFF;
priv->rx_tstamp = false;
- priv->dpaa2_ptp_wq = alloc_workqueue("dpaa2_ptp_wq", 0, 0);
+ priv->dpaa2_ptp_wq = alloc_workqueue("dpaa2_ptp_wq", WQ_PERCPU, 0);
if (!priv->dpaa2_ptp_wq) {
err = -ENOMEM;
goto err_wq_alloc;
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
index 4643a3380618..b1e1ad9e4b48 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
@@ -2736,7 +2736,7 @@ static int dpaa2_switch_setup_dpbp(struct ethsw_core *ethsw)
dev_err(dev, "dpsw_ctrl_if_set_pools() failed\n");
goto err_get_attr;
}
- ethsw->bpid = dpbp_attrs.id;
+ ethsw->bpid = dpbp_attrs.bpid;
return 0;
diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig
index 54b0f0a5a6bb..117038104b69 100644
--- a/drivers/net/ethernet/freescale/enetc/Kconfig
+++ b/drivers/net/ethernet/freescale/enetc/Kconfig
@@ -28,6 +28,7 @@ config NXP_NTMP
config FSL_ENETC
tristate "ENETC PF driver"
+ depends on PTP_1588_CLOCK_OPTIONAL
depends on PCI_MSI
select FSL_ENETC_CORE
select FSL_ENETC_IERB
@@ -45,6 +46,7 @@ config FSL_ENETC
config NXP_ENETC4
tristate "ENETC4 PF driver"
+ depends on PTP_1588_CLOCK_OPTIONAL
depends on PCI_MSI
select FSL_ENETC_CORE
select FSL_ENETC_MDIO
@@ -62,6 +64,7 @@ config NXP_ENETC4
config FSL_ENETC_VF
tristate "ENETC VF driver"
+ depends on PTP_1588_CLOCK_OPTIONAL
depends on PCI_MSI
select FSL_ENETC_CORE
select FSL_ENETC_MDIO
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index e4287725832e..aae462a0cf5a 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -221,22 +221,111 @@ static void enetc_unwind_tx_frame(struct enetc_bdr *tx_ring, int count, int i)
}
}
+static void enetc_set_one_step_ts(struct enetc_si *si, bool udp, int offset)
+{
+ u32 val = ENETC_PM0_SINGLE_STEP_EN;
+
+ val |= ENETC_SET_SINGLE_STEP_OFFSET(offset);
+ if (udp)
+ val |= ENETC_PM0_SINGLE_STEP_CH;
+
+ /* The "Correction" field of a packet is updated based on the
+ * current time and the timestamp provided
+ */
+ enetc_port_mac_wr(si, ENETC_PM0_SINGLE_STEP, val);
+}
+
+static void enetc4_set_one_step_ts(struct enetc_si *si, bool udp, int offset)
+{
+ u32 val = PM_SINGLE_STEP_EN;
+
+ val |= PM_SINGLE_STEP_OFFSET_SET(offset);
+ if (udp)
+ val |= PM_SINGLE_STEP_CH;
+
+ enetc_port_mac_wr(si, ENETC4_PM_SINGLE_STEP(0), val);
+}
+
+static u32 enetc_update_ptp_sync_msg(struct enetc_ndev_priv *priv,
+ struct sk_buff *skb, bool csum_offload)
+{
+ struct enetc_skb_cb *enetc_cb = ENETC_SKB_CB(skb);
+ u16 tstamp_off = enetc_cb->origin_tstamp_off;
+ u16 corr_off = enetc_cb->correction_off;
+ struct enetc_si *si = priv->si;
+ struct enetc_hw *hw = &si->hw;
+ __be32 new_sec_l, new_nsec;
+ __be16 new_sec_h;
+ u32 lo, hi, nsec;
+ u8 *data;
+ u64 sec;
+
+ lo = enetc_rd_hot(hw, ENETC_SICTR0);
+ hi = enetc_rd_hot(hw, ENETC_SICTR1);
+ sec = (u64)hi << 32 | lo;
+ nsec = do_div(sec, 1000000000);
+
+ /* Update originTimestamp field of Sync packet
+ * - 48 bits seconds field
+ * - 32 bits nanseconds field
+ *
+ * In addition, if csum_offload is false, the UDP checksum needs
+ * to be updated by software after updating originTimestamp field,
+ * otherwise the hardware will calculate the wrong checksum when
+ * updating the correction field and update it to the packet.
+ */
+
+ data = skb_mac_header(skb);
+ new_sec_h = htons((sec >> 32) & 0xffff);
+ new_sec_l = htonl(sec & 0xffffffff);
+ new_nsec = htonl(nsec);
+ if (enetc_cb->udp && !csum_offload) {
+ struct udphdr *uh = udp_hdr(skb);
+ __be32 old_sec_l, old_nsec;
+ __be16 old_sec_h;
+
+ old_sec_h = *(__be16 *)(data + tstamp_off);
+ inet_proto_csum_replace2(&uh->check, skb, old_sec_h,
+ new_sec_h, false);
+
+ old_sec_l = *(__be32 *)(data + tstamp_off + 2);
+ inet_proto_csum_replace4(&uh->check, skb, old_sec_l,
+ new_sec_l, false);
+
+ old_nsec = *(__be32 *)(data + tstamp_off + 6);
+ inet_proto_csum_replace4(&uh->check, skb, old_nsec,
+ new_nsec, false);
+ }
+
+ *(__be16 *)(data + tstamp_off) = new_sec_h;
+ *(__be32 *)(data + tstamp_off + 2) = new_sec_l;
+ *(__be32 *)(data + tstamp_off + 6) = new_nsec;
+
+ /* Configure single-step register */
+ if (is_enetc_rev1(si))
+ enetc_set_one_step_ts(si, enetc_cb->udp, corr_off);
+ else
+ enetc4_set_one_step_ts(si, enetc_cb->udp, corr_off);
+
+ return lo & ENETC_TXBD_TSTAMP;
+}
+
static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
{
bool do_vlan, do_onestep_tstamp = false, do_twostep_tstamp = false;
struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev);
- struct enetc_hw *hw = &priv->si->hw;
+ struct enetc_skb_cb *enetc_cb = ENETC_SKB_CB(skb);
struct enetc_tx_swbd *tx_swbd;
int len = skb_headlen(skb);
union enetc_tx_bd temp_bd;
- u8 msgtype, twostep, udp;
+ bool csum_offload = false;
union enetc_tx_bd *txbd;
- u16 offset1, offset2;
int i, count = 0;
skb_frag_t *frag;
unsigned int f;
dma_addr_t dma;
u8 flags = 0;
+ u32 tstamp;
enetc_clear_tx_bd(&temp_bd);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
@@ -256,11 +345,19 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
temp_bd.l4_aux = FIELD_PREP(ENETC_TX_BD_L4T,
ENETC_TXBD_L4T_UDP);
flags |= ENETC_TXBD_FLAGS_CSUM_LSO | ENETC_TXBD_FLAGS_L4CS;
+ csum_offload = true;
} else if (skb_checksum_help(skb)) {
return 0;
}
}
+ if (enetc_cb->flag & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
+ do_onestep_tstamp = true;
+ tstamp = enetc_update_ptp_sync_msg(priv, skb, csum_offload);
+ } else if (enetc_cb->flag & ENETC_F_TX_TSTAMP) {
+ do_twostep_tstamp = true;
+ }
+
i = tx_ring->next_to_use;
txbd = ENETC_TXBD(*tx_ring, i);
prefetchw(txbd);
@@ -280,17 +377,6 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
count++;
do_vlan = skb_vlan_tag_present(skb);
- if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
- if (enetc_ptp_parse(skb, &udp, &msgtype, &twostep, &offset1,
- &offset2) ||
- msgtype != PTP_MSGTYPE_SYNC || twostep)
- WARN_ONCE(1, "Bad packet for one-step timestamping\n");
- else
- do_onestep_tstamp = true;
- } else if (skb->cb[0] & ENETC_F_TX_TSTAMP) {
- do_twostep_tstamp = true;
- }
-
tx_swbd->do_twostep_tstamp = do_twostep_tstamp;
tx_swbd->qbv_en = !!(priv->active_offloads & ENETC_F_QBV);
tx_swbd->check_wb = tx_swbd->do_twostep_tstamp || tx_swbd->qbv_en;
@@ -333,65 +419,9 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
}
if (do_onestep_tstamp) {
- __be32 new_sec_l, new_nsec;
- u32 lo, hi, nsec, val;
- __be16 new_sec_h;
- u8 *data;
- u64 sec;
-
- lo = enetc_rd_hot(hw, ENETC_SICTR0);
- hi = enetc_rd_hot(hw, ENETC_SICTR1);
- sec = (u64)hi << 32 | lo;
- nsec = do_div(sec, 1000000000);
-
/* Configure extension BD */
- temp_bd.ext.tstamp = cpu_to_le32(lo & 0x3fffffff);
+ temp_bd.ext.tstamp = cpu_to_le32(tstamp);
e_flags |= ENETC_TXBD_E_FLAGS_ONE_STEP_PTP;
-
- /* Update originTimestamp field of Sync packet
- * - 48 bits seconds field
- * - 32 bits nanseconds field
- *
- * In addition, the UDP checksum needs to be updated
- * by software after updating originTimestamp field,
- * otherwise the hardware will calculate the wrong
- * checksum when updating the correction field and
- * update it to the packet.
- */
- data = skb_mac_header(skb);
- new_sec_h = htons((sec >> 32) & 0xffff);
- new_sec_l = htonl(sec & 0xffffffff);
- new_nsec = htonl(nsec);
- if (udp) {
- struct udphdr *uh = udp_hdr(skb);
- __be32 old_sec_l, old_nsec;
- __be16 old_sec_h;
-
- old_sec_h = *(__be16 *)(data + offset2);
- inet_proto_csum_replace2(&uh->check, skb, old_sec_h,
- new_sec_h, false);
-
- old_sec_l = *(__be32 *)(data + offset2 + 2);
- inet_proto_csum_replace4(&uh->check, skb, old_sec_l,
- new_sec_l, false);
-
- old_nsec = *(__be32 *)(data + offset2 + 6);
- inet_proto_csum_replace4(&uh->check, skb, old_nsec,
- new_nsec, false);
- }
-
- *(__be16 *)(data + offset2) = new_sec_h;
- *(__be32 *)(data + offset2 + 2) = new_sec_l;
- *(__be32 *)(data + offset2 + 6) = new_nsec;
-
- /* Configure single-step register */
- val = ENETC_PM0_SINGLE_STEP_EN;
- val |= ENETC_SET_SINGLE_STEP_OFFSET(offset1);
- if (udp)
- val |= ENETC_PM0_SINGLE_STEP_CH;
-
- enetc_port_mac_wr(priv->si, ENETC_PM0_SINGLE_STEP,
- val);
} else if (do_twostep_tstamp) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
e_flags |= ENETC_TXBD_E_FLAGS_TWO_STEP_PTP;
@@ -938,12 +968,13 @@ err_chained_bd:
static netdev_tx_t enetc_start_xmit(struct sk_buff *skb,
struct net_device *ndev)
{
+ struct enetc_skb_cb *enetc_cb = ENETC_SKB_CB(skb);
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct enetc_bdr *tx_ring;
int count;
/* Queue one-step Sync packet if already locked */
- if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
+ if (enetc_cb->flag & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
if (test_and_set_bit_lock(ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS,
&priv->flags)) {
skb_queue_tail(&priv->tx_skbs, skb);
@@ -1005,24 +1036,29 @@ drop_packet_err:
netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev)
{
+ struct enetc_skb_cb *enetc_cb = ENETC_SKB_CB(skb);
struct enetc_ndev_priv *priv = netdev_priv(ndev);
u8 udp, msgtype, twostep;
u16 offset1, offset2;
- /* Mark tx timestamp type on skb->cb[0] if requires */
+ /* Mark tx timestamp type on enetc_cb->flag if requires */
if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
- (priv->active_offloads & ENETC_F_TX_TSTAMP_MASK)) {
- skb->cb[0] = priv->active_offloads & ENETC_F_TX_TSTAMP_MASK;
- } else {
- skb->cb[0] = 0;
- }
+ (priv->active_offloads & ENETC_F_TX_TSTAMP_MASK))
+ enetc_cb->flag = priv->active_offloads & ENETC_F_TX_TSTAMP_MASK;
+ else
+ enetc_cb->flag = 0;
/* Fall back to two-step timestamp if not one-step Sync packet */
- if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
+ if (enetc_cb->flag & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
if (enetc_ptp_parse(skb, &udp, &msgtype, &twostep,
&offset1, &offset2) ||
- msgtype != PTP_MSGTYPE_SYNC || twostep != 0)
- skb->cb[0] = ENETC_F_TX_TSTAMP;
+ msgtype != PTP_MSGTYPE_SYNC || twostep != 0) {
+ enetc_cb->flag = ENETC_F_TX_TSTAMP;
+ } else {
+ enetc_cb->udp = !!udp;
+ enetc_cb->correction_off = offset1;
+ enetc_cb->origin_tstamp_off = offset2;
+ }
}
return enetc_start_xmit(skb, ndev);
@@ -1214,7 +1250,9 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
if (xdp_frame) {
xdp_return_frame(xdp_frame);
} else if (skb) {
- if (unlikely(skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP)) {
+ struct enetc_skb_cb *enetc_cb = ENETC_SKB_CB(skb);
+
+ if (unlikely(enetc_cb->flag & ENETC_F_TX_ONESTEP_SYNC_TSTAMP)) {
/* Start work to release lock for next one-step
* timestamping packet. And send one skb in
* tx_skbs queue if has.
@@ -1397,8 +1435,7 @@ static void enetc_get_offloads(struct enetc_bdr *rx_ring,
__vlan_hwaccel_put_tag(skb, tpid, le16_to_cpu(rxbd->r.vlan_opt));
}
- if (IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK) &&
- (priv->active_offloads & ENETC_F_RX_TSTAMP))
+ if (priv->active_offloads & ENETC_F_RX_TSTAMP)
enetc_get_rx_tstamp(rx_ring->ndev, rxbd, skb);
}
@@ -3301,7 +3338,7 @@ int enetc_hwtstamp_set(struct net_device *ndev,
struct enetc_ndev_priv *priv = netdev_priv(ndev);
int err, new_offloads = priv->active_offloads;
- if (!IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK))
+ if (!enetc_ptp_clock_is_enabled(priv->si))
return -EOPNOTSUPP;
switch (config->tx_type) {
@@ -3351,7 +3388,7 @@ int enetc_hwtstamp_get(struct net_device *ndev,
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
- if (!IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK))
+ if (!enetc_ptp_clock_is_enabled(priv->si))
return -EOPNOTSUPP;
if (priv->active_offloads & ENETC_F_TX_ONESTEP_SYNC_TSTAMP)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index 62e8ee4d2f04..0ec010a7d640 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -54,6 +54,15 @@ struct enetc_tx_swbd {
u8 qbv_en:1;
};
+struct enetc_skb_cb {
+ u8 flag;
+ bool udp;
+ u16 correction_off;
+ u16 origin_tstamp_off;
+};
+
+#define ENETC_SKB_CB(skb) ((struct enetc_skb_cb *)((skb)->cb))
+
struct enetc_lso_t {
bool ipv6;
bool tcp;
@@ -217,7 +226,7 @@ static inline union enetc_rx_bd *enetc_rxbd(struct enetc_bdr *rx_ring, int i)
{
int hw_idx = i;
- if (IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK) && rx_ring->ext_en)
+ if (rx_ring->ext_en)
hw_idx = 2 * i;
return &(((union enetc_rx_bd *)rx_ring->bd_base)[hw_idx]);
@@ -231,7 +240,7 @@ static inline void enetc_rxbd_next(struct enetc_bdr *rx_ring,
new_rxbd++;
- if (IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK) && rx_ring->ext_en)
+ if (rx_ring->ext_en)
new_rxbd++;
if (unlikely(++new_index == rx_ring->bd_count)) {
@@ -484,9 +493,6 @@ struct enetc_msg_cmd_set_primary_mac {
#define ENETC_CBDR_TIMEOUT 1000 /* usecs */
-/* PTP driver exports */
-extern int enetc_phc_index;
-
/* SI common */
u32 enetc_port_mac_rd(struct enetc_si *si, u32 reg);
void enetc_port_mac_wr(struct enetc_si *si, u32 reg, u32 val);
@@ -589,6 +595,14 @@ static inline void enetc_cbd_free_data_mem(struct enetc_si *si, int size,
void enetc_reset_ptcmsdur(struct enetc_hw *hw);
void enetc_set_ptcmsdur(struct enetc_hw *hw, u32 *queue_max_sdu);
+static inline bool enetc_ptp_clock_is_enabled(struct enetc_si *si)
+{
+ if (is_enetc_rev1(si))
+ return IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK);
+
+ return IS_ENABLED(CONFIG_PTP_NETC_V4_TIMER);
+}
+
#ifdef CONFIG_FSL_ENETC_QOS
int enetc_qos_query_caps(struct net_device *ndev, void *type_data);
int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc4_hw.h b/drivers/net/ethernet/freescale/enetc/enetc4_hw.h
index aa25b445d301..19bf0e89cdc2 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc4_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc4_hw.h
@@ -171,6 +171,12 @@
/* Port MAC 0/1 Pause Quanta Threshold Register */
#define ENETC4_PM_PAUSE_THRESH(mac) (0x5064 + (mac) * 0x400)
+#define ENETC4_PM_SINGLE_STEP(mac) (0x50c0 + (mac) * 0x400)
+#define PM_SINGLE_STEP_CH BIT(6)
+#define PM_SINGLE_STEP_OFFSET GENMASK(15, 7)
+#define PM_SINGLE_STEP_OFFSET_SET(o) FIELD_PREP(PM_SINGLE_STEP_OFFSET, o)
+#define PM_SINGLE_STEP_EN BIT(31)
+
/* Port MAC 0 Interface Mode Control Register */
#define ENETC4_PM_IF_MODE(mac) (0x5300 + (mac) * 0x400)
#define PM_IF_MODE_IFMODE GENMASK(2, 0)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc4_pf.c b/drivers/net/ethernet/freescale/enetc/enetc4_pf.c
index b3dc1afeefd1..82c443b28b15 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc4_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc4_pf.c
@@ -569,6 +569,9 @@ static const struct net_device_ops enetc4_ndev_ops = {
.ndo_set_features = enetc4_pf_set_features,
.ndo_vlan_rx_add_vid = enetc_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = enetc_vlan_rx_del_vid,
+ .ndo_eth_ioctl = enetc_ioctl,
+ .ndo_hwtstamp_get = enetc_hwtstamp_get,
+ .ndo_hwtstamp_set = enetc_hwtstamp_set,
};
static struct phylink_pcs *
@@ -1016,8 +1019,7 @@ static int enetc4_pf_probe(struct pci_dev *pdev,
err = devm_add_action_or_reset(dev, enetc4_pci_remove, pdev);
if (err)
- return dev_err_probe(dev, err,
- "Add enetc4_pci_remove() action failed\n");
+ return err;
/* si is the private data. */
si = pci_get_drvdata(pdev);
@@ -1030,7 +1032,7 @@ static int enetc4_pf_probe(struct pci_dev *pdev,
err = enetc_get_driver_data(si);
if (err)
return dev_err_probe(dev, err,
- "Could not get VF driver data\n");
+ "Could not get PF driver data\n");
err = enetc4_pf_struct_init(si);
if (err)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
index 961e76cd8489..71d052de669a 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -4,6 +4,9 @@
#include <linux/ethtool_netlink.h>
#include <linux/net_tstamp.h>
#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/ptp_clock_kernel.h>
+
#include "enetc.h"
static const u32 enetc_si_regs[] = {
@@ -877,23 +880,58 @@ static int enetc_set_coalesce(struct net_device *ndev,
return 0;
}
-static int enetc_get_ts_info(struct net_device *ndev,
- struct kernel_ethtool_ts_info *info)
+static int enetc_get_phc_index_by_pdev(struct enetc_si *si)
{
- struct enetc_ndev_priv *priv = netdev_priv(ndev);
- int *phc_idx;
-
- phc_idx = symbol_get(enetc_phc_index);
- if (phc_idx) {
- info->phc_index = *phc_idx;
- symbol_put(enetc_phc_index);
+ struct pci_bus *bus = si->pdev->bus;
+ struct pci_dev *timer_pdev;
+ unsigned int devfn;
+ int phc_index;
+
+ switch (si->revision) {
+ case ENETC_REV_1_0:
+ devfn = PCI_DEVFN(0, 4);
+ break;
+ case ENETC_REV_4_1:
+ devfn = PCI_DEVFN(24, 0);
+ break;
+ default:
+ return -1;
}
- if (!IS_ENABLED(CONFIG_FSL_ENETC_PTP_CLOCK)) {
- info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE;
+ timer_pdev = pci_get_domain_bus_and_slot(pci_domain_nr(bus),
+ bus->number, devfn);
+ if (!timer_pdev)
+ return -1;
- return 0;
- }
+ phc_index = ptp_clock_index_by_dev(&timer_pdev->dev);
+ pci_dev_put(timer_pdev);
+
+ return phc_index;
+}
+
+static int enetc_get_phc_index(struct enetc_si *si)
+{
+ struct device_node *np = si->pdev->dev.of_node;
+ struct device_node *timer_np;
+ int phc_index;
+
+ if (!np)
+ return enetc_get_phc_index_by_pdev(si);
+
+ timer_np = of_parse_phandle(np, "ptp-timer", 0);
+ if (!timer_np)
+ return enetc_get_phc_index_by_pdev(si);
+
+ phc_index = ptp_clock_index_by_of_node(timer_np);
+ of_node_put(timer_np);
+
+ return phc_index;
+}
+
+static void enetc_get_ts_generic_info(struct net_device *ndev,
+ struct kernel_ethtool_ts_info *info)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
@@ -908,6 +946,27 @@ static int enetc_get_ts_info(struct net_device *ndev,
info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_ALL);
+}
+
+static int enetc_get_ts_info(struct net_device *ndev,
+ struct kernel_ethtool_ts_info *info)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_si *si = priv->si;
+
+ if (!enetc_ptp_clock_is_enabled(si))
+ goto timestamp_tx_sw;
+
+ info->phc_index = enetc_get_phc_index(si);
+ if (info->phc_index < 0)
+ goto timestamp_tx_sw;
+
+ enetc_get_ts_generic_info(ndev, info);
+
+ return 0;
+
+timestamp_tx_sw:
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE;
return 0;
}
@@ -1296,6 +1355,7 @@ const struct ethtool_ops enetc4_pf_ethtool_ops = {
.get_rxfh = enetc_get_rxfh,
.set_rxfh = enetc_set_rxfh,
.get_rxfh_fields = enetc_get_rxfh_fields,
+ .get_ts_info = enetc_get_ts_info,
};
void enetc_set_ethtool_ops(struct net_device *ndev)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
index 73763e8f4879..377c96325814 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -614,6 +614,7 @@ enum enetc_txbd_flags {
#define ENETC_TXBD_STATS_WIN BIT(7)
#define ENETC_TXBD_TXSTART_MASK GENMASK(24, 0)
#define ENETC_TXBD_FLAGS_OFFSET 24
+#define ENETC_TXBD_TSTAMP GENMASK(29, 0)
static inline __le32 enetc_txbd_set_tx_start(u64 tx_start, u8 flags)
{
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index f63a29e2e031..de0fb272c847 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -829,19 +829,29 @@ static int enetc_pf_register_with_ierb(struct pci_dev *pdev)
{
struct platform_device *ierb_pdev;
struct device_node *ierb_node;
+ int ret;
ierb_node = of_find_compatible_node(NULL, NULL,
"fsl,ls1028a-enetc-ierb");
- if (!ierb_node || !of_device_is_available(ierb_node))
+ if (!ierb_node)
return -ENODEV;
+ if (!of_device_is_available(ierb_node)) {
+ of_node_put(ierb_node);
+ return -ENODEV;
+ }
+
ierb_pdev = of_find_device_by_node(ierb_node);
of_node_put(ierb_node);
if (!ierb_pdev)
return -EPROBE_DEFER;
- return enetc_ierb_register_pf(ierb_pdev, pdev);
+ ret = enetc_ierb_register_pf(ierb_pdev, pdev);
+
+ put_device(&ierb_pdev->dev);
+
+ return ret;
}
static const struct enetc_si_ops enetc_psi_ops = {
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
index 5243fc031058..b8413d3b4f16 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
@@ -7,9 +7,6 @@
#include "enetc.h"
-int enetc_phc_index = -1;
-EXPORT_SYMBOL_GPL(enetc_phc_index);
-
static struct ptp_clock_info enetc_ptp_caps = {
.owner = THIS_MODULE,
.name = "ENETC PTP clock",
@@ -92,7 +89,6 @@ static int enetc_ptp_probe(struct pci_dev *pdev,
if (err)
goto err_no_clock;
- enetc_phc_index = ptp_qoriq->phc_index;
pci_set_drvdata(pdev, ptp_qoriq);
return 0;
@@ -118,7 +114,6 @@ static void enetc_ptp_remove(struct pci_dev *pdev)
{
struct ptp_qoriq *ptp_qoriq = pci_get_drvdata(pdev);
- enetc_phc_index = -1;
ptp_qoriq_free(ptp_qoriq);
pci_free_irq_vectors(pdev);
kfree(ptp_qoriq);
diff --git a/drivers/net/ethernet/freescale/enetc/ntmp.c b/drivers/net/ethernet/freescale/enetc/ntmp.c
index ba32c1bbd9e1..0c1d343253bf 100644
--- a/drivers/net/ethernet/freescale/enetc/ntmp.c
+++ b/drivers/net/ethernet/freescale/enetc/ntmp.c
@@ -52,24 +52,19 @@ int ntmp_init_cbdr(struct netc_cbdr *cbdr, struct device *dev,
cbdr->addr_base_align = PTR_ALIGN(cbdr->addr_base,
NTMP_BASE_ADDR_ALIGN);
- cbdr->next_to_clean = 0;
- cbdr->next_to_use = 0;
spin_lock_init(&cbdr->ring_lock);
+ cbdr->next_to_use = netc_read(cbdr->regs.pir);
+ cbdr->next_to_clean = netc_read(cbdr->regs.cir);
+
/* Step 1: Configure the base address of the Control BD Ring */
netc_write(cbdr->regs.bar0, lower_32_bits(cbdr->dma_base_align));
netc_write(cbdr->regs.bar1, upper_32_bits(cbdr->dma_base_align));
- /* Step 2: Configure the producer index register */
- netc_write(cbdr->regs.pir, cbdr->next_to_clean);
-
- /* Step 3: Configure the consumer index register */
- netc_write(cbdr->regs.cir, cbdr->next_to_use);
-
- /* Step4: Configure the number of BDs of the Control BD Ring */
+ /* Step 2: Configure the number of BDs of the Control BD Ring */
netc_write(cbdr->regs.lenr, cbdr->bd_num);
- /* Step 5: Enable the Control BD Ring */
+ /* Step 3: Enable the Control BD Ring */
netc_write(cbdr->regs.mr, NETC_CBDR_MR_EN);
return 0;
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 5c8fdcef759b..41e0d85d15da 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -348,10 +348,11 @@ struct bufdesc_ex {
* the skbuffer directly.
*/
+#define FEC_DRV_RESERVE_SPACE (XDP_PACKET_HEADROOM + \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
#define FEC_ENET_XDP_HEADROOM (XDP_PACKET_HEADROOM)
#define FEC_ENET_RX_PAGES 256
-#define FEC_ENET_RX_FRSIZE (PAGE_SIZE - FEC_ENET_XDP_HEADROOM \
- - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define FEC_ENET_RX_FRSIZE (PAGE_SIZE - FEC_DRV_RESERVE_SPACE)
#define FEC_ENET_RX_FRPPG (PAGE_SIZE / FEC_ENET_RX_FRSIZE)
#define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES)
#define FEC_ENET_TX_FRSIZE 2048
@@ -513,6 +514,9 @@ struct bufdesc_ex {
*/
#define FEC_QUIRK_HAS_MDIO_C45 BIT(24)
+/* Jumbo Frame support */
+#define FEC_QUIRK_JUMBO_FRAME BIT(25)
+
struct bufdesc_prop {
int qid;
/* Address of Rx and Tx buffers */
@@ -619,6 +623,9 @@ struct fec_enet_private {
unsigned int total_tx_ring_size;
unsigned int total_rx_ring_size;
+ unsigned int max_buf_size;
+ unsigned int pagepool_order;
+ unsigned int rx_frame_size;
struct platform_device *pdev;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 1383918f8a3f..1edcfaee6819 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -167,7 +167,8 @@ static const struct fec_devinfo fec_imx8qm_info = {
FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES |
- FEC_QUIRK_DELAYED_CLKS_SUPPORT | FEC_QUIRK_HAS_MDIO_C45,
+ FEC_QUIRK_DELAYED_CLKS_SUPPORT | FEC_QUIRK_HAS_MDIO_C45 |
+ FEC_QUIRK_JUMBO_FRAME,
};
static const struct fec_devinfo fec_s32v234_info = {
@@ -233,6 +234,7 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
* 2048 byte skbufs are allocated. However, alignment requirements
* varies between FEC variants. Worst case is 64, so round down by 64.
*/
+#define MAX_JUMBO_BUF_SIZE (round_down(16384 - FEC_DRV_RESERVE_SPACE - 64, 64))
#define PKT_MAXBUF_SIZE (round_down(2048 - 64, 64))
#define PKT_MINBUF_SIZE 64
@@ -253,9 +255,9 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
defined(CONFIG_ARM64)
-#define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16)
+#define OPT_ARCH_HAS_MAX_FL 1
#else
-#define OPT_FRAME_SIZE 0
+#define OPT_ARCH_HAS_MAX_FL 0
#endif
/* FEC MII MMFR bits definition */
@@ -470,14 +472,14 @@ fec_enet_create_page_pool(struct fec_enet_private *fep,
{
struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog);
struct page_pool_params pp_params = {
- .order = 0,
+ .order = fep->pagepool_order,
.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
.pool_size = size,
.nid = dev_to_node(&fep->pdev->dev),
.dev = &fep->pdev->dev,
.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
.offset = FEC_ENET_XDP_HEADROOM,
- .max_len = FEC_ENET_RX_FRSIZE,
+ .max_len = fep->rx_frame_size,
};
int err;
@@ -1083,7 +1085,7 @@ static void fec_enet_enable_ring(struct net_device *ndev)
for (i = 0; i < fep->num_rx_queues; i++) {
rxq = fep->rx_queue[i];
writel(rxq->bd.dma, fep->hwp + FEC_R_DES_START(i));
- writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i));
+ writel(fep->max_buf_size, fep->hwp + FEC_R_BUFF_SIZE(i));
/* enable DMA1/2 */
if (i)
@@ -1145,8 +1147,11 @@ static void
fec_restart(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- u32 rcntl = OPT_FRAME_SIZE | FEC_RCR_MII;
u32 ecntl = FEC_ECR_ETHEREN;
+ u32 rcntl = FEC_RCR_MII;
+
+ if (OPT_ARCH_HAS_MAX_FL)
+ rcntl |= (fep->netdev->mtu + ETH_HLEN + ETH_FCS_LEN) << 16;
if (fep->bufdesc_ex)
fec_ptp_save_state(fep);
@@ -1191,7 +1196,7 @@ fec_restart(struct net_device *ndev)
else
val &= ~FEC_RACC_OPTIONS;
writel(val, fep->hwp + FEC_RACC);
- writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL);
+ writel(min(fep->rx_frame_size, fep->max_buf_size), fep->hwp + FEC_FTRL);
}
#endif
@@ -1278,8 +1283,18 @@ fec_restart(struct net_device *ndev)
if (fep->quirks & FEC_QUIRK_ENET_MAC) {
/* enable ENET endian swap */
ecntl |= FEC_ECR_BYTESWP;
- /* enable ENET store and forward mode */
- writel(FEC_TXWMRK_STRFWD, fep->hwp + FEC_X_WMRK);
+
+ /* When Jumbo Frame is enabled, the FIFO may not be large enough
+ * to hold an entire frame. In such cases, if the MTU exceeds
+ * (PKT_MAXBUF_SIZE - ETH_HLEN - ETH_FCS_LEN), configure the interface
+ * to operate in cut-through mode, triggered by the FIFO threshold.
+ * Otherwise, enable the ENET store-and-forward mode.
+ */
+ if ((fep->quirks & FEC_QUIRK_JUMBO_FRAME) &&
+ (ndev->mtu > (PKT_MAXBUF_SIZE - ETH_HLEN - ETH_FCS_LEN)))
+ writel(0xF, fep->hwp + FEC_X_WMRK);
+ else
+ writel(FEC_TXWMRK_STRFWD, fep->hwp + FEC_X_WMRK);
}
if (fep->bufdesc_ex)
@@ -1780,7 +1795,7 @@ fec_enet_rx_queue(struct net_device *ndev, u16 queue_id, int budget)
* These get messed up if we get called due to a busy condition.
*/
bdp = rxq->bd.cur;
- xdp_init_buff(&xdp, PAGE_SIZE, &rxq->xdp_rxq);
+ xdp_init_buff(&xdp, PAGE_SIZE << fep->pagepool_order, &rxq->xdp_rxq);
while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) {
@@ -1850,7 +1865,8 @@ fec_enet_rx_queue(struct net_device *ndev, u16 queue_id, int budget)
* include that when passing upstream as it messes up
* bridging applications.
*/
- skb = build_skb(page_address(page), PAGE_SIZE);
+ skb = build_skb(page_address(page),
+ PAGE_SIZE << fep->pagepool_order);
if (unlikely(!skb)) {
page_pool_recycle_direct(rxq->page_pool, page);
ndev->stats.rx_dropped++;
@@ -2363,7 +2379,8 @@ static void fec_enet_phy_reset_after_clk_enable(struct net_device *ndev)
*/
phy_dev = of_phy_find_device(fep->phy_node);
phy_reset_after_clk_enable(phy_dev);
- put_device(&phy_dev->mdio.dev);
+ if (phy_dev)
+ put_device(&phy_dev->mdio.dev);
}
}
@@ -4020,6 +4037,23 @@ static int fec_hwtstamp_set(struct net_device *ndev,
return fec_ptp_set(ndev, config, extack);
}
+static int fec_change_mtu(struct net_device *ndev, int new_mtu)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ int order;
+
+ if (netif_running(ndev))
+ return -EBUSY;
+
+ order = get_order(new_mtu + ETH_HLEN + ETH_FCS_LEN
+ + FEC_DRV_RESERVE_SPACE);
+ fep->rx_frame_size = (PAGE_SIZE << order) - FEC_DRV_RESERVE_SPACE;
+ fep->pagepool_order = order;
+ WRITE_ONCE(ndev->mtu, new_mtu);
+
+ return 0;
+}
+
static const struct net_device_ops fec_netdev_ops = {
.ndo_open = fec_enet_open,
.ndo_stop = fec_enet_close,
@@ -4029,6 +4063,7 @@ static const struct net_device_ops fec_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_tx_timeout = fec_timeout,
.ndo_set_mac_address = fec_set_mac_address,
+ .ndo_change_mtu = fec_change_mtu,
.ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_set_features = fec_set_features,
.ndo_bpf = fec_enet_bpf,
@@ -4559,7 +4594,15 @@ fec_probe(struct platform_device *pdev)
fec_enet_clk_enable(ndev, false);
pinctrl_pm_select_sleep_state(&pdev->dev);
- ndev->max_mtu = PKT_MAXBUF_SIZE - ETH_HLEN - ETH_FCS_LEN;
+ fep->pagepool_order = 0;
+ fep->rx_frame_size = FEC_ENET_RX_FRSIZE;
+
+ if (fep->quirks & FEC_QUIRK_JUMBO_FRAME)
+ fep->max_buf_size = MAX_JUMBO_BUF_SIZE;
+ else
+ fep->max_buf_size = PKT_MAXBUF_SIZE;
+
+ ndev->max_mtu = fep->max_buf_size - ETH_HLEN - ETH_FCS_LEN;
ret = register_netdev(ndev);
if (ret)
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index a39fcea6a77a..f27ff625fe29 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -14,8 +14,6 @@
#include <linux/device.h>
#include <linux/phy.h>
#include <linux/netdevice.h>
-#include <linux/phy_fixed.h>
-#include <linux/phylink.h>
#include <linux/etherdevice.h>
#include <linux/libfdt_env.h>
#include <linux/platform_device.h>
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index 577f9b1780ad..de88776dd2a2 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -479,10 +479,12 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
"missing 'reg' property in node %pOF\n",
tbi);
err = -EBUSY;
+ of_node_put(tbi);
goto error;
}
set_tbipa(*prop, pdev,
data->get_tbipa, priv->map, &res);
+ of_node_put(tbi);
}
}
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 28f53cf2a174..5fd1f7327680 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -1475,8 +1475,10 @@ static int gfar_get_ts_info(struct net_device *dev,
if (ptp_node) {
ptp_dev = of_find_device_by_node(ptp_node);
of_node_put(ptp_node);
- if (ptp_dev)
+ if (ptp_dev) {
ptp = platform_get_drvdata(ptp_dev);
+ put_device(&ptp_dev->dev);
+ }
}
if (ptp)
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c b/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c
index ba83dbf4ed22..1966dba512f8 100644
--- a/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c
+++ b/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c
@@ -930,7 +930,8 @@ static void fun_get_rmon_stats(struct net_device *netdev,
}
static void fun_get_fec_stats(struct net_device *netdev,
- struct ethtool_fec_stats *stats)
+ struct ethtool_fec_stats *stats,
+ struct ethtool_fec_hist *hist)
{
const struct funeth_priv *fp = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c b/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c
index 8f5021e59e0a..0e2b703c673a 100644
--- a/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c
@@ -260,6 +260,11 @@ struct page_pool *gve_rx_create_page_pool(struct gve_priv *priv,
.offset = xdp ? XDP_PACKET_HEADROOM : 0,
};
+ if (priv->header_split_enabled) {
+ pp.flags |= PP_FLAG_ALLOW_UNREADABLE_NETMEM;
+ pp.queue_idx = rx->q_num;
+ }
+
return page_pool_create(&pp);
}
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 1f411d7c4373..1be1b1ef31ee 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -2870,6 +2870,8 @@ static void gve_shutdown(struct pci_dev *pdev)
struct gve_priv *priv = netdev_priv(netdev);
bool was_up = netif_running(priv->dev);
+ netif_device_detach(netdev);
+
rtnl_lock();
netdev_lock(netdev);
if (was_up && gve_close(priv->dev)) {
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index 7380c2b7a2d8..55393b784317 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -718,6 +718,24 @@ static int gve_rx_xsk_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
return 0;
}
+static void gve_dma_sync(struct gve_priv *priv, struct gve_rx_ring *rx,
+ struct gve_rx_buf_state_dqo *buf_state, u16 buf_len)
+{
+ struct gve_rx_slot_page_info *page_info = &buf_state->page_info;
+
+ if (rx->dqo.page_pool) {
+ page_pool_dma_sync_netmem_for_cpu(rx->dqo.page_pool,
+ page_info->netmem,
+ page_info->page_offset,
+ buf_len);
+ } else {
+ dma_sync_single_range_for_cpu(&priv->pdev->dev, buf_state->addr,
+ page_info->page_offset +
+ page_info->pad,
+ buf_len, DMA_FROM_DEVICE);
+ }
+}
+
/* Returns 0 if descriptor is completed successfully.
* Returns -EINVAL if descriptor is invalid.
* Returns -ENOMEM if data cannot be copied to skb.
@@ -793,13 +811,18 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
rx->rx_hsplit_unsplit_pkt += unsplit;
rx->rx_hsplit_bytes += hdr_len;
u64_stats_update_end(&rx->statss);
+ } else if (!rx->ctx.skb_head && rx->dqo.page_pool &&
+ netmem_is_net_iov(buf_state->page_info.netmem)) {
+ /* when header split is disabled, the header went to the packet
+ * buffer. If the packet buffer is a net_iov, those can't be
+ * easily mapped into the kernel space to access the header
+ * required to process the packet.
+ */
+ goto error;
}
/* Sync the portion of dma buffer for CPU to read. */
- dma_sync_single_range_for_cpu(&priv->pdev->dev, buf_state->addr,
- buf_state->page_info.page_offset +
- buf_state->page_info.pad,
- buf_len, DMA_FROM_DEVICE);
+ gve_dma_sync(priv, rx, buf_state, buf_len);
/* Append to current skb if one exists. */
if (rx->ctx.skb_head) {
@@ -837,7 +860,9 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
u64_stats_update_end(&rx->statss);
}
- if (eop && buf_len <= priv->rx_copybreak) {
+ if (eop && buf_len <= priv->rx_copybreak &&
+ !(rx->dqo.page_pool &&
+ netmem_is_net_iov(buf_state->page_info.netmem))) {
rx->ctx.skb_head = gve_rx_copy(priv->dev, napi,
&buf_state->page_info, buf_len);
if (unlikely(!rx->ctx.skb_head))
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c
index 503cfbfb4a8a..83cf75bf7a17 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c
@@ -53,9 +53,11 @@ static int hbg_reset_prepare(struct hbg_priv *priv, enum hbg_reset_type type)
{
int ret;
- ASSERT_RTNL();
+ if (test_and_set_bit(HBG_NIC_STATE_RESETTING, &priv->state))
+ return -EBUSY;
if (netif_running(priv->netdev)) {
+ clear_bit(HBG_NIC_STATE_RESETTING, &priv->state);
dev_warn(&priv->pdev->dev,
"failed to reset because port is up\n");
return -EBUSY;
@@ -64,7 +66,6 @@ static int hbg_reset_prepare(struct hbg_priv *priv, enum hbg_reset_type type)
netif_device_detach(priv->netdev);
priv->reset_type = type;
- set_bit(HBG_NIC_STATE_RESETTING, &priv->state);
clear_bit(HBG_NIC_STATE_RESET_FAIL, &priv->state);
ret = hbg_hw_event_notify(priv, HBG_HW_EVENT_RESET);
if (ret) {
@@ -84,29 +85,26 @@ static int hbg_reset_done(struct hbg_priv *priv, enum hbg_reset_type type)
type != priv->reset_type)
return 0;
- ASSERT_RTNL();
-
- clear_bit(HBG_NIC_STATE_RESETTING, &priv->state);
ret = hbg_rebuild(priv);
if (ret) {
priv->stats.reset_fail_cnt++;
set_bit(HBG_NIC_STATE_RESET_FAIL, &priv->state);
+ clear_bit(HBG_NIC_STATE_RESETTING, &priv->state);
dev_err(&priv->pdev->dev, "failed to rebuild after reset\n");
return ret;
}
netif_device_attach(priv->netdev);
+ clear_bit(HBG_NIC_STATE_RESETTING, &priv->state);
dev_info(&priv->pdev->dev, "reset done\n");
return ret;
}
-/* must be protected by rtnl lock */
int hbg_reset(struct hbg_priv *priv)
{
int ret;
- ASSERT_RTNL();
ret = hbg_reset_prepare(priv, HBG_RESET_TYPE_FUNCTION);
if (ret)
return ret;
@@ -171,7 +169,6 @@ static void hbg_pci_err_reset_prepare(struct pci_dev *pdev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct hbg_priv *priv = netdev_priv(netdev);
- rtnl_lock();
hbg_reset_prepare(priv, HBG_RESET_TYPE_FLR);
}
@@ -181,7 +178,6 @@ static void hbg_pci_err_reset_done(struct pci_dev *pdev)
struct hbg_priv *priv = netdev_priv(netdev);
hbg_reset_done(priv, HBG_RESET_TYPE_FLR);
- rtnl_unlock();
}
static const struct pci_error_handlers hbg_pci_err_handler = {
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c
index 8cca8316ba40..d0aa0661ecd4 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c
@@ -12,6 +12,8 @@
#define HBG_HW_EVENT_WAIT_TIMEOUT_US (2 * 1000 * 1000)
#define HBG_HW_EVENT_WAIT_INTERVAL_US (10 * 1000)
+#define HBG_MAC_LINK_WAIT_TIMEOUT_US (500 * 1000)
+#define HBG_MAC_LINK_WAIT_INTERVAL_US (5 * 1000)
/* little endian or big endian.
* ctrl means packet description, data means skb packet data
*/
@@ -228,6 +230,9 @@ void hbg_hw_fill_buffer(struct hbg_priv *priv, u32 buffer_dma_addr)
void hbg_hw_adjust_link(struct hbg_priv *priv, u32 speed, u32 duplex)
{
+ u32 link_status;
+ int ret;
+
hbg_hw_mac_enable(priv, HBG_STATUS_DISABLE);
hbg_reg_write_field(priv, HBG_REG_PORT_MODE_ADDR,
@@ -239,8 +244,14 @@ void hbg_hw_adjust_link(struct hbg_priv *priv, u32 speed, u32 duplex)
hbg_hw_mac_enable(priv, HBG_STATUS_ENABLE);
- if (!hbg_reg_read_field(priv, HBG_REG_AN_NEG_STATE_ADDR,
- HBG_REG_AN_NEG_STATE_NP_LINK_OK_B))
+ /* wait MAC link up */
+ ret = readl_poll_timeout(priv->io_base + HBG_REG_AN_NEG_STATE_ADDR,
+ link_status,
+ FIELD_GET(HBG_REG_AN_NEG_STATE_NP_LINK_OK_B,
+ link_status),
+ HBG_MAC_LINK_WAIT_INTERVAL_US,
+ HBG_MAC_LINK_WAIT_TIMEOUT_US);
+ if (ret)
hbg_np_link_fail_task_schedule(priv);
}
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c
index 2e64dc1ab355..0b92a2e5e986 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c
@@ -417,7 +417,7 @@ static int hbg_pci_init(struct pci_dev *pdev)
priv->io_base = pcim_iomap_table(pdev)[0];
if (!priv->io_base)
- return dev_err_probe(dev, -ENOMEM, "failed to get io base\n");
+ return -ENOMEM;
pci_set_master(pdev);
return 0;
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c
index 8b7b476ed7fb..37791de47f6f 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c
@@ -278,8 +278,7 @@ int hbg_mdio_init(struct hbg_priv *priv)
mdio_bus = devm_mdiobus_alloc(dev);
if (!mdio_bus)
- return dev_err_probe(dev, -ENOMEM,
- "failed to alloc MDIO bus\n");
+ return -ENOMEM;
mdio_bus->parent = dev;
mdio_bus->priv = priv;
diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.h
index 2883a5899ae2..8b6110599e10 100644
--- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.h
+++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.h
@@ -29,7 +29,12 @@ static inline bool hbg_fifo_is_full(struct hbg_priv *priv, enum hbg_dir dir)
static inline u32 hbg_get_queue_used_num(struct hbg_ring *ring)
{
- return (ring->ntu + ring->len - ring->ntc) % ring->len;
+ u32 len = READ_ONCE(ring->len);
+
+ if (!len)
+ return 0;
+
+ return (READ_ONCE(ring->ntu) + len - READ_ONCE(ring->ntc)) % len;
}
netdev_tx_t hbg_net_start_xmit(struct sk_buff *skb, struct net_device *netdev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index 0255c8acb744..4cce4f4ba6b0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -843,7 +843,7 @@ static int hns3_dbg_bd_file_init(struct hnae3_handle *handle, u32 cmd)
entry_dir = hns3_dbg_dentry[hns3_dbg_cmd[cmd].dentry].dentry;
max_queue_num = hns3_get_max_available_channels(handle);
- data = devm_kzalloc(&handle->pdev->dev, max_queue_num * sizeof(*data),
+ data = devm_kcalloc(&handle->pdev->dev, max_queue_num, sizeof(*data),
GFP_KERNEL);
if (!data)
return -ENOMEM;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index d5454e126c85..a5eefa28454c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -1659,7 +1659,8 @@ static void hns3_set_msglevel(struct net_device *netdev, u32 msg_level)
}
static void hns3_get_fec_stats(struct net_device *netdev,
- struct ethtool_fec_stats *fec_stats)
+ struct ethtool_fec_stats *fec_stats,
+ struct ethtool_fec_hist *hist)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
@@ -1927,6 +1928,31 @@ static int hns3_set_tx_spare_buf_size(struct net_device *netdev,
return ret;
}
+static int hns3_check_tx_copybreak(struct net_device *netdev, u32 copybreak)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+
+ if (copybreak < priv->min_tx_copybreak) {
+ netdev_err(netdev, "tx copybreak %u should be no less than %u!\n",
+ copybreak, priv->min_tx_copybreak);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int hns3_check_tx_spare_buf_size(struct net_device *netdev, u32 buf_size)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+
+ if (buf_size < priv->min_tx_spare_buf_size) {
+ netdev_err(netdev,
+ "tx spare buf size %u should be no less than %u!\n",
+ buf_size, priv->min_tx_spare_buf_size);
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int hns3_set_tunable(struct net_device *netdev,
const struct ethtool_tunable *tuna,
const void *data)
@@ -1943,6 +1969,10 @@ static int hns3_set_tunable(struct net_device *netdev,
switch (tuna->id) {
case ETHTOOL_TX_COPYBREAK:
+ ret = hns3_check_tx_copybreak(netdev, *(u32 *)data);
+ if (ret)
+ return ret;
+
priv->tx_copybreak = *(u32 *)data;
for (i = 0; i < h->kinfo.num_tqps; i++)
@@ -1957,6 +1987,10 @@ static int hns3_set_tunable(struct net_device *netdev,
break;
case ETHTOOL_TX_COPYBREAK_BUF_SIZE:
+ ret = hns3_check_tx_spare_buf_size(netdev, *(u32 *)data);
+ if (ret)
+ return ret;
+
old_tx_spare_buf_size = h->kinfo.tx_spare_buf_size;
new_tx_spare_buf_size = *(u32 *)data;
netdev_info(netdev, "request to set tx spare buf size from %u to %u\n",
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index f209a05e2033..9d34d28ff168 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -2182,8 +2182,8 @@ static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
}
-static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
- struct hclge_pkt_buf_alloc *buf_alloc)
+static bool hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
+ struct hclge_pkt_buf_alloc *buf_alloc)
{
#define COMPENSATE_BUFFER 0x3C00
#define COMPENSATE_HALF_MPS_NUM 5
@@ -12912,7 +12912,8 @@ static int __init hclge_init(void)
{
pr_debug("%s is initializing\n", HCLGE_NAME);
- hclge_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGE_NAME);
+ hclge_wq = alloc_workqueue("%s", WQ_UNBOUND, 0,
+ HCLGE_NAME);
if (!hclge_wq) {
pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
return -ENOMEM;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c
index 03e42512a2d5..300bc267a259 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c
@@ -443,8 +443,9 @@ int hinic_health_reporters_create(struct hinic_devlink_priv *priv)
struct devlink *devlink = priv_to_devlink(priv);
priv->hw_fault_reporter =
- devlink_health_reporter_create(devlink, &hinic_hw_fault_reporter_ops,
- 0, priv);
+ devlink_health_reporter_create(devlink,
+ &hinic_hw_fault_reporter_ops,
+ priv);
if (IS_ERR(priv->hw_fault_reporter)) {
dev_warn(&priv->hwdev->hwif->pdev->dev, "Failed to create hw fault reporter, err: %ld\n",
PTR_ERR(priv->hw_fault_reporter));
@@ -452,8 +453,9 @@ int hinic_health_reporters_create(struct hinic_devlink_priv *priv)
}
priv->fw_fault_reporter =
- devlink_health_reporter_create(devlink, &hinic_fw_fault_reporter_ops,
- 0, priv);
+ devlink_health_reporter_create(devlink,
+ &hinic_fw_fault_reporter_ops,
+ priv);
if (IS_ERR(priv->fw_fault_reporter)) {
dev_warn(&priv->hwdev->hwif->pdev->dev, "Failed to create fw fault reporter, err: %ld\n",
PTR_ERR(priv->fw_fault_reporter));
diff --git a/drivers/net/ethernet/huawei/hinic3/Makefile b/drivers/net/ethernet/huawei/hinic3/Makefile
index 509dfbfb0e96..c3efa45a6a42 100644
--- a/drivers/net/ethernet/huawei/hinic3/Makefile
+++ b/drivers/net/ethernet/huawei/hinic3/Makefile
@@ -3,7 +3,9 @@
obj-$(CONFIG_HINIC3) += hinic3.o
-hinic3-objs := hinic3_common.o \
+hinic3-objs := hinic3_cmdq.o \
+ hinic3_common.o \
+ hinic3_eqs.o \
hinic3_hw_cfg.o \
hinic3_hw_comm.o \
hinic3_hwdev.o \
@@ -12,10 +14,12 @@ hinic3-objs := hinic3_common.o \
hinic3_lld.o \
hinic3_main.o \
hinic3_mbox.o \
+ hinic3_mgmt.o \
hinic3_netdev_ops.o \
hinic3_nic_cfg.o \
hinic3_nic_io.o \
hinic3_queue_common.o \
+ hinic3_rss.o \
hinic3_rx.o \
hinic3_tx.o \
hinic3_wq.o
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.c b/drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.c
new file mode 100644
index 000000000000..ef539d1b69a3
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.c
@@ -0,0 +1,915 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+
+#include "hinic3_cmdq.h"
+#include "hinic3_hwdev.h"
+#include "hinic3_hwif.h"
+#include "hinic3_mbox.h"
+
+#define CMDQ_BUF_SIZE 2048
+#define CMDQ_WQEBB_SIZE 64
+
+#define CMDQ_CMD_TIMEOUT 5000
+#define CMDQ_ENABLE_WAIT_TIMEOUT 300
+
+#define CMDQ_CTXT_CURR_WQE_PAGE_PFN_MASK GENMASK_ULL(51, 0)
+#define CMDQ_CTXT_EQ_ID_MASK GENMASK_ULL(60, 53)
+#define CMDQ_CTXT_CEQ_ARM_MASK BIT_ULL(61)
+#define CMDQ_CTXT_CEQ_EN_MASK BIT_ULL(62)
+#define CMDQ_CTXT_HW_BUSY_BIT_MASK BIT_ULL(63)
+
+#define CMDQ_CTXT_WQ_BLOCK_PFN_MASK GENMASK_ULL(51, 0)
+#define CMDQ_CTXT_CI_MASK GENMASK_ULL(63, 52)
+#define CMDQ_CTXT_SET(val, member) \
+ FIELD_PREP(CMDQ_CTXT_##member##_MASK, val)
+
+#define CMDQ_WQE_HDR_BUFDESC_LEN_MASK GENMASK(7, 0)
+#define CMDQ_WQE_HDR_COMPLETE_FMT_MASK BIT(15)
+#define CMDQ_WQE_HDR_DATA_FMT_MASK BIT(22)
+#define CMDQ_WQE_HDR_COMPLETE_REQ_MASK BIT(23)
+#define CMDQ_WQE_HDR_COMPLETE_SECT_LEN_MASK GENMASK(28, 27)
+#define CMDQ_WQE_HDR_CTRL_LEN_MASK GENMASK(30, 29)
+#define CMDQ_WQE_HDR_HW_BUSY_BIT_MASK BIT(31)
+#define CMDQ_WQE_HDR_SET(val, member) \
+ FIELD_PREP(CMDQ_WQE_HDR_##member##_MASK, val)
+#define CMDQ_WQE_HDR_GET(val, member) \
+ FIELD_GET(CMDQ_WQE_HDR_##member##_MASK, le32_to_cpu(val))
+
+#define CMDQ_CTRL_PI_MASK GENMASK(15, 0)
+#define CMDQ_CTRL_CMD_MASK GENMASK(23, 16)
+#define CMDQ_CTRL_MOD_MASK GENMASK(28, 24)
+#define CMDQ_CTRL_HW_BUSY_BIT_MASK BIT(31)
+#define CMDQ_CTRL_SET(val, member) \
+ FIELD_PREP(CMDQ_CTRL_##member##_MASK, val)
+#define CMDQ_CTRL_GET(val, member) \
+ FIELD_GET(CMDQ_CTRL_##member##_MASK, val)
+
+#define CMDQ_WQE_ERRCODE_VAL_MASK GENMASK(30, 0)
+#define CMDQ_WQE_ERRCODE_GET(val, member) \
+ FIELD_GET(CMDQ_WQE_ERRCODE_##member##_MASK, le32_to_cpu(val))
+
+#define CMDQ_DB_INFO_HI_PROD_IDX_MASK GENMASK(7, 0)
+#define CMDQ_DB_INFO_SET(val, member) \
+ FIELD_PREP(CMDQ_DB_INFO_##member##_MASK, val)
+
+#define CMDQ_DB_HEAD_QUEUE_TYPE_MASK BIT(23)
+#define CMDQ_DB_HEAD_CMDQ_TYPE_MASK GENMASK(26, 24)
+#define CMDQ_DB_HEAD_SET(val, member) \
+ FIELD_PREP(CMDQ_DB_HEAD_##member##_MASK, val)
+
+#define CMDQ_CEQE_TYPE_MASK GENMASK(2, 0)
+#define CMDQ_CEQE_GET(val, member) \
+ FIELD_GET(CMDQ_CEQE_##member##_MASK, le32_to_cpu(val))
+
+#define CMDQ_WQE_HEADER(wqe) ((struct cmdq_header *)(wqe))
+#define CMDQ_WQE_COMPLETED(ctrl_info) \
+ CMDQ_CTRL_GET(le32_to_cpu(ctrl_info), HW_BUSY_BIT)
+
+#define CMDQ_PFN(addr) ((addr) >> 12)
+
+/* cmdq work queue's chip logical address table is up to 512B */
+#define CMDQ_WQ_CLA_SIZE 512
+
+/* Completion codes: send, direct sync, force stop */
+#define CMDQ_SEND_CMPT_CODE 10
+#define CMDQ_DIRECT_SYNC_CMPT_CODE 11
+#define CMDQ_FORCE_STOP_CMPT_CODE 12
+
+enum cmdq_data_format {
+ CMDQ_DATA_SGE = 0,
+ CMDQ_DATA_DIRECT = 1,
+};
+
+enum cmdq_ctrl_sect_len {
+ CMDQ_CTRL_SECT_LEN = 1,
+ CMDQ_CTRL_DIRECT_SECT_LEN = 2,
+};
+
+enum cmdq_bufdesc_len {
+ CMDQ_BUFDESC_LCMD_LEN = 2,
+ CMDQ_BUFDESC_SCMD_LEN = 3,
+};
+
+enum cmdq_completion_format {
+ CMDQ_COMPLETE_DIRECT = 0,
+ CMDQ_COMPLETE_SGE = 1,
+};
+
+enum cmdq_cmd_type {
+ CMDQ_CMD_DIRECT_RESP,
+ CMDQ_CMD_SGE_RESP,
+};
+
+#define CMDQ_WQE_NUM_WQEBBS 1
+
+static struct cmdq_wqe *cmdq_read_wqe(struct hinic3_wq *wq, u16 *ci)
+{
+ if (hinic3_wq_get_used(wq) == 0)
+ return NULL;
+
+ *ci = wq->cons_idx & wq->idx_mask;
+
+ return get_q_element(&wq->qpages, wq->cons_idx, NULL);
+}
+
+struct hinic3_cmd_buf *hinic3_alloc_cmd_buf(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_cmd_buf *cmd_buf;
+ struct hinic3_cmdqs *cmdqs;
+
+ cmdqs = hwdev->cmdqs;
+
+ cmd_buf = kmalloc(sizeof(*cmd_buf), GFP_ATOMIC);
+ if (!cmd_buf)
+ return NULL;
+
+ cmd_buf->buf = dma_pool_alloc(cmdqs->cmd_buf_pool, GFP_ATOMIC,
+ &cmd_buf->dma_addr);
+ if (!cmd_buf->buf) {
+ dev_err(hwdev->dev, "Failed to allocate cmdq cmd buf from the pool\n");
+ goto err_free_cmd_buf;
+ }
+
+ cmd_buf->size = cpu_to_le16(CMDQ_BUF_SIZE);
+ refcount_set(&cmd_buf->ref_cnt, 1);
+
+ return cmd_buf;
+
+err_free_cmd_buf:
+ kfree(cmd_buf);
+
+ return NULL;
+}
+
+void hinic3_free_cmd_buf(struct hinic3_hwdev *hwdev,
+ struct hinic3_cmd_buf *cmd_buf)
+{
+ struct hinic3_cmdqs *cmdqs;
+
+ if (!refcount_dec_and_test(&cmd_buf->ref_cnt))
+ return;
+
+ cmdqs = hwdev->cmdqs;
+
+ dma_pool_free(cmdqs->cmd_buf_pool, cmd_buf->buf, cmd_buf->dma_addr);
+ kfree(cmd_buf);
+}
+
+static void cmdq_clear_cmd_buf(struct hinic3_cmdq_cmd_info *cmd_info,
+ struct hinic3_hwdev *hwdev)
+{
+ if (cmd_info->buf_in) {
+ hinic3_free_cmd_buf(hwdev, cmd_info->buf_in);
+ cmd_info->buf_in = NULL;
+ }
+}
+
+static void clear_wqe_complete_bit(struct hinic3_cmdq *cmdq,
+ struct cmdq_wqe *wqe, u16 ci)
+{
+ struct cmdq_header *hdr = CMDQ_WQE_HEADER(wqe);
+ __le32 header_info = hdr->header_info;
+ enum cmdq_data_format df;
+ struct cmdq_ctrl *ctrl;
+
+ df = CMDQ_WQE_HDR_GET(header_info, DATA_FMT);
+ if (df == CMDQ_DATA_SGE)
+ ctrl = &wqe->wqe_lcmd.ctrl;
+ else
+ ctrl = &wqe->wqe_scmd.ctrl;
+
+ /* clear HW busy bit */
+ ctrl->ctrl_info = 0;
+ cmdq->cmd_infos[ci].cmd_type = HINIC3_CMD_TYPE_NONE;
+ wmb(); /* verify wqe is clear before updating ci */
+ hinic3_wq_put_wqebbs(&cmdq->wq, CMDQ_WQE_NUM_WQEBBS);
+}
+
+static void cmdq_update_cmd_status(struct hinic3_cmdq *cmdq, u16 prod_idx,
+ struct cmdq_wqe *wqe)
+{
+ struct hinic3_cmdq_cmd_info *cmd_info;
+ struct cmdq_wqe_lcmd *wqe_lcmd;
+ __le32 status_info;
+
+ wqe_lcmd = &wqe->wqe_lcmd;
+ cmd_info = &cmdq->cmd_infos[prod_idx];
+ if (cmd_info->errcode) {
+ status_info = wqe_lcmd->status.status_info;
+ *cmd_info->errcode = CMDQ_WQE_ERRCODE_GET(status_info, VAL);
+ }
+
+ if (cmd_info->direct_resp)
+ *cmd_info->direct_resp = wqe_lcmd->completion.resp.direct.val;
+}
+
+static void cmdq_sync_cmd_handler(struct hinic3_cmdq *cmdq,
+ struct cmdq_wqe *wqe, u16 ci)
+{
+ spin_lock(&cmdq->cmdq_lock);
+ cmdq_update_cmd_status(cmdq, ci, wqe);
+ if (cmdq->cmd_infos[ci].cmpt_code) {
+ *cmdq->cmd_infos[ci].cmpt_code = CMDQ_DIRECT_SYNC_CMPT_CODE;
+ cmdq->cmd_infos[ci].cmpt_code = NULL;
+ }
+
+ /* Ensure that completion code has been updated before updating done */
+ smp_wmb();
+ if (cmdq->cmd_infos[ci].done) {
+ complete(cmdq->cmd_infos[ci].done);
+ cmdq->cmd_infos[ci].done = NULL;
+ }
+ spin_unlock(&cmdq->cmdq_lock);
+
+ cmdq_clear_cmd_buf(&cmdq->cmd_infos[ci], cmdq->hwdev);
+ clear_wqe_complete_bit(cmdq, wqe, ci);
+}
+
+void hinic3_cmdq_ceq_handler(struct hinic3_hwdev *hwdev, __le32 ceqe_data)
+{
+ enum hinic3_cmdq_type cmdq_type = CMDQ_CEQE_GET(ceqe_data, TYPE);
+ struct hinic3_cmdqs *cmdqs = hwdev->cmdqs;
+ struct hinic3_cmdq_cmd_info *cmd_info;
+ struct cmdq_wqe_lcmd *wqe_lcmd;
+ struct hinic3_cmdq *cmdq;
+ struct cmdq_wqe *wqe;
+ __le32 ctrl_info;
+ u16 ci;
+
+ if (unlikely(cmdq_type >= ARRAY_SIZE(cmdqs->cmdq)))
+ return;
+
+ cmdq = &cmdqs->cmdq[cmdq_type];
+ while ((wqe = cmdq_read_wqe(&cmdq->wq, &ci)) != NULL) {
+ cmd_info = &cmdq->cmd_infos[ci];
+ switch (cmd_info->cmd_type) {
+ case HINIC3_CMD_TYPE_NONE:
+ return;
+ case HINIC3_CMD_TYPE_TIMEOUT:
+ dev_warn(hwdev->dev, "Cmdq timeout, q_id: %u, ci: %u\n",
+ cmdq_type, ci);
+ fallthrough;
+ case HINIC3_CMD_TYPE_FAKE_TIMEOUT:
+ cmdq_clear_cmd_buf(cmd_info, hwdev);
+ clear_wqe_complete_bit(cmdq, wqe, ci);
+ break;
+ default:
+ /* only arm bit is using scmd wqe,
+ * the other wqe is lcmd
+ */
+ wqe_lcmd = &wqe->wqe_lcmd;
+ ctrl_info = wqe_lcmd->ctrl.ctrl_info;
+ if (!CMDQ_WQE_COMPLETED(ctrl_info))
+ return;
+
+ dma_rmb();
+ /* For FORCE_STOP cmd_type, we also need to wait for
+ * the firmware processing to complete to prevent the
+ * firmware from accessing the released cmd_buf
+ */
+ if (cmd_info->cmd_type == HINIC3_CMD_TYPE_FORCE_STOP) {
+ cmdq_clear_cmd_buf(cmd_info, hwdev);
+ clear_wqe_complete_bit(cmdq, wqe, ci);
+ } else {
+ cmdq_sync_cmd_handler(cmdq, wqe, ci);
+ }
+
+ break;
+ }
+ }
+}
+
+static int wait_cmdqs_enable(struct hinic3_cmdqs *cmdqs)
+{
+ unsigned long end;
+
+ end = jiffies + msecs_to_jiffies(CMDQ_ENABLE_WAIT_TIMEOUT);
+ do {
+ if (cmdqs->status & HINIC3_CMDQ_ENABLE)
+ return 0;
+ usleep_range(1000, 2000);
+ } while (time_before(jiffies, end) && !cmdqs->disable_flag);
+
+ cmdqs->disable_flag = 1;
+
+ return -EBUSY;
+}
+
+static void cmdq_set_completion(struct cmdq_completion *complete,
+ struct hinic3_cmd_buf *buf_out)
+{
+ struct hinic3_sge *sge = &complete->resp.sge;
+
+ hinic3_set_sge(sge, buf_out->dma_addr, cpu_to_le32(CMDQ_BUF_SIZE));
+}
+
+static struct cmdq_wqe *cmdq_get_wqe(struct hinic3_wq *wq, u16 *pi)
+{
+ if (!hinic3_wq_free_wqebbs(wq))
+ return NULL;
+
+ return hinic3_wq_get_one_wqebb(wq, pi);
+}
+
+static void cmdq_set_lcmd_bufdesc(struct cmdq_wqe_lcmd *wqe,
+ struct hinic3_cmd_buf *buf_in)
+{
+ hinic3_set_sge(&wqe->buf_desc.sge, buf_in->dma_addr,
+ (__force __le32)buf_in->size);
+}
+
+static void cmdq_set_db(struct hinic3_cmdq *cmdq,
+ enum hinic3_cmdq_type cmdq_type, u16 prod_idx)
+{
+ u8 __iomem *db_base = cmdq->hwdev->cmdqs->cmdqs_db_base;
+ u16 db_ofs = (prod_idx & 0xFF) << 3;
+ struct cmdq_db db;
+
+ db.db_info = cpu_to_le32(CMDQ_DB_INFO_SET(prod_idx >> 8, HI_PROD_IDX));
+ db.db_head = cpu_to_le32(CMDQ_DB_HEAD_SET(1, QUEUE_TYPE) |
+ CMDQ_DB_HEAD_SET(cmdq_type, CMDQ_TYPE));
+ writeq(*(u64 *)&db, db_base + db_ofs);
+}
+
+static void cmdq_wqe_fill(struct cmdq_wqe *hw_wqe,
+ const struct cmdq_wqe *shadow_wqe)
+{
+ const struct cmdq_header *src = (struct cmdq_header *)shadow_wqe;
+ struct cmdq_header *dst = (struct cmdq_header *)hw_wqe;
+ size_t len;
+
+ len = sizeof(struct cmdq_wqe) - sizeof(struct cmdq_header);
+ memcpy(dst + 1, src + 1, len);
+ /* Ensure buffer len before updating header */
+ wmb();
+ WRITE_ONCE(*dst, *src);
+}
+
+static void cmdq_prepare_wqe_ctrl(struct cmdq_wqe *wqe, u8 wrapped,
+ u8 mod, u8 cmd, u16 prod_idx,
+ enum cmdq_completion_format complete_format,
+ enum cmdq_data_format data_format,
+ enum cmdq_bufdesc_len buf_len)
+{
+ struct cmdq_header *hdr = CMDQ_WQE_HEADER(wqe);
+ enum cmdq_ctrl_sect_len ctrl_len;
+ struct cmdq_wqe_lcmd *wqe_lcmd;
+ struct cmdq_wqe_scmd *wqe_scmd;
+ struct cmdq_ctrl *ctrl;
+
+ if (data_format == CMDQ_DATA_SGE) {
+ wqe_lcmd = &wqe->wqe_lcmd;
+ wqe_lcmd->status.status_info = 0;
+ ctrl = &wqe_lcmd->ctrl;
+ ctrl_len = CMDQ_CTRL_SECT_LEN;
+ } else {
+ wqe_scmd = &wqe->wqe_scmd;
+ wqe_scmd->status.status_info = 0;
+ ctrl = &wqe_scmd->ctrl;
+ ctrl_len = CMDQ_CTRL_DIRECT_SECT_LEN;
+ }
+
+ ctrl->ctrl_info =
+ cpu_to_le32(CMDQ_CTRL_SET(prod_idx, PI) |
+ CMDQ_CTRL_SET(cmd, CMD) |
+ CMDQ_CTRL_SET(mod, MOD));
+
+ hdr->header_info =
+ cpu_to_le32(CMDQ_WQE_HDR_SET(buf_len, BUFDESC_LEN) |
+ CMDQ_WQE_HDR_SET(complete_format, COMPLETE_FMT) |
+ CMDQ_WQE_HDR_SET(data_format, DATA_FMT) |
+ CMDQ_WQE_HDR_SET(1, COMPLETE_REQ) |
+ CMDQ_WQE_HDR_SET(3, COMPLETE_SECT_LEN) |
+ CMDQ_WQE_HDR_SET(ctrl_len, CTRL_LEN) |
+ CMDQ_WQE_HDR_SET(wrapped, HW_BUSY_BIT));
+}
+
+static void cmdq_set_lcmd_wqe(struct cmdq_wqe *wqe,
+ enum cmdq_cmd_type cmd_type,
+ struct hinic3_cmd_buf *buf_in,
+ struct hinic3_cmd_buf *buf_out,
+ u8 wrapped, u8 mod, u8 cmd, u16 prod_idx)
+{
+ enum cmdq_completion_format complete_format = CMDQ_COMPLETE_DIRECT;
+ struct cmdq_wqe_lcmd *wqe_lcmd = &wqe->wqe_lcmd;
+
+ switch (cmd_type) {
+ case CMDQ_CMD_DIRECT_RESP:
+ wqe_lcmd->completion.resp.direct.val = 0;
+ break;
+ case CMDQ_CMD_SGE_RESP:
+ if (buf_out) {
+ complete_format = CMDQ_COMPLETE_SGE;
+ cmdq_set_completion(&wqe_lcmd->completion, buf_out);
+ }
+ break;
+ }
+
+ cmdq_prepare_wqe_ctrl(wqe, wrapped, mod, cmd, prod_idx, complete_format,
+ CMDQ_DATA_SGE, CMDQ_BUFDESC_LCMD_LEN);
+ cmdq_set_lcmd_bufdesc(wqe_lcmd, buf_in);
+}
+
+static int hinic3_cmdq_sync_timeout_check(struct hinic3_cmdq *cmdq,
+ struct cmdq_wqe *wqe, u16 pi)
+{
+ struct cmdq_wqe_lcmd *wqe_lcmd;
+ struct cmdq_ctrl *ctrl;
+ __le32 ctrl_info;
+
+ wqe_lcmd = &wqe->wqe_lcmd;
+ ctrl = &wqe_lcmd->ctrl;
+ ctrl_info = ctrl->ctrl_info;
+ if (!CMDQ_WQE_COMPLETED(ctrl_info)) {
+ dev_dbg(cmdq->hwdev->dev, "Cmdq sync command check busy bit not set\n");
+ return -EFAULT;
+ }
+ cmdq_update_cmd_status(cmdq, pi, wqe);
+
+ return 0;
+}
+
+static void clear_cmd_info(struct hinic3_cmdq_cmd_info *cmd_info,
+ const struct hinic3_cmdq_cmd_info *saved_cmd_info)
+{
+ if (cmd_info->errcode == saved_cmd_info->errcode)
+ cmd_info->errcode = NULL;
+
+ if (cmd_info->done == saved_cmd_info->done)
+ cmd_info->done = NULL;
+
+ if (cmd_info->direct_resp == saved_cmd_info->direct_resp)
+ cmd_info->direct_resp = NULL;
+}
+
+static int wait_cmdq_sync_cmd_completion(struct hinic3_cmdq *cmdq,
+ struct hinic3_cmdq_cmd_info *cmd_info,
+ struct hinic3_cmdq_cmd_info *saved_cmd_info,
+ u64 curr_msg_id, u16 curr_prod_idx,
+ struct cmdq_wqe *curr_wqe,
+ u32 timeout)
+{
+ ulong timeo = msecs_to_jiffies(timeout);
+ int err;
+
+ if (wait_for_completion_timeout(saved_cmd_info->done, timeo))
+ return 0;
+
+ spin_lock_bh(&cmdq->cmdq_lock);
+ if (cmd_info->cmpt_code == saved_cmd_info->cmpt_code)
+ cmd_info->cmpt_code = NULL;
+
+ if (*saved_cmd_info->cmpt_code == CMDQ_DIRECT_SYNC_CMPT_CODE) {
+ dev_dbg(cmdq->hwdev->dev, "Cmdq direct sync command has been completed\n");
+ spin_unlock_bh(&cmdq->cmdq_lock);
+ return 0;
+ }
+
+ if (curr_msg_id == cmd_info->cmdq_msg_id) {
+ err = hinic3_cmdq_sync_timeout_check(cmdq, curr_wqe,
+ curr_prod_idx);
+ if (err)
+ cmd_info->cmd_type = HINIC3_CMD_TYPE_TIMEOUT;
+ else
+ cmd_info->cmd_type = HINIC3_CMD_TYPE_FAKE_TIMEOUT;
+ } else {
+ err = -ETIMEDOUT;
+ dev_err(cmdq->hwdev->dev,
+ "Cmdq sync command current msg id mismatch cmd_info msg id\n");
+ }
+
+ clear_cmd_info(cmd_info, saved_cmd_info);
+ spin_unlock_bh(&cmdq->cmdq_lock);
+
+ return err;
+}
+
+static int cmdq_sync_cmd_direct_resp(struct hinic3_cmdq *cmdq, u8 mod, u8 cmd,
+ struct hinic3_cmd_buf *buf_in,
+ __le64 *out_param)
+{
+ struct hinic3_cmdq_cmd_info *cmd_info, saved_cmd_info;
+ int cmpt_code = CMDQ_SEND_CMPT_CODE;
+ struct cmdq_wqe *curr_wqe, wqe = {};
+ struct hinic3_wq *wq = &cmdq->wq;
+ u16 curr_prod_idx, next_prod_idx;
+ struct completion done;
+ u64 curr_msg_id;
+ int errcode;
+ u8 wrapped;
+ int err;
+
+ spin_lock_bh(&cmdq->cmdq_lock);
+ curr_wqe = cmdq_get_wqe(wq, &curr_prod_idx);
+ if (!curr_wqe) {
+ spin_unlock_bh(&cmdq->cmdq_lock);
+ return -EBUSY;
+ }
+
+ wrapped = cmdq->wrapped;
+ next_prod_idx = curr_prod_idx + CMDQ_WQE_NUM_WQEBBS;
+ if (next_prod_idx >= wq->q_depth) {
+ cmdq->wrapped ^= 1;
+ next_prod_idx -= wq->q_depth;
+ }
+
+ cmd_info = &cmdq->cmd_infos[curr_prod_idx];
+ init_completion(&done);
+ refcount_inc(&buf_in->ref_cnt);
+ cmd_info->cmd_type = HINIC3_CMD_TYPE_DIRECT_RESP;
+ cmd_info->done = &done;
+ cmd_info->errcode = &errcode;
+ cmd_info->direct_resp = out_param;
+ cmd_info->cmpt_code = &cmpt_code;
+ cmd_info->buf_in = buf_in;
+ saved_cmd_info = *cmd_info;
+ cmdq_set_lcmd_wqe(&wqe, CMDQ_CMD_DIRECT_RESP, buf_in, NULL,
+ wrapped, mod, cmd, curr_prod_idx);
+
+ cmdq_wqe_fill(curr_wqe, &wqe);
+ (cmd_info->cmdq_msg_id)++;
+ curr_msg_id = cmd_info->cmdq_msg_id;
+ cmdq_set_db(cmdq, HINIC3_CMDQ_SYNC, next_prod_idx);
+ spin_unlock_bh(&cmdq->cmdq_lock);
+
+ err = wait_cmdq_sync_cmd_completion(cmdq, cmd_info, &saved_cmd_info,
+ curr_msg_id, curr_prod_idx,
+ curr_wqe, CMDQ_CMD_TIMEOUT);
+ if (err) {
+ dev_err(cmdq->hwdev->dev,
+ "Cmdq sync command timeout, mod: %u, cmd: %u, prod idx: 0x%x\n",
+ mod, cmd, curr_prod_idx);
+ err = -ETIMEDOUT;
+ }
+
+ if (cmpt_code == CMDQ_FORCE_STOP_CMPT_CODE) {
+ dev_dbg(cmdq->hwdev->dev,
+ "Force stop cmdq cmd, mod: %u, cmd: %u\n", mod, cmd);
+ err = -EAGAIN;
+ }
+
+ smp_rmb(); /* read error code after completion */
+
+ return err ? err : errcode;
+}
+
+int hinic3_cmdq_direct_resp(struct hinic3_hwdev *hwdev, u8 mod, u8 cmd,
+ struct hinic3_cmd_buf *buf_in, __le64 *out_param)
+{
+ struct hinic3_cmdqs *cmdqs;
+ int err;
+
+ cmdqs = hwdev->cmdqs;
+ err = wait_cmdqs_enable(cmdqs);
+ if (err) {
+ dev_err(hwdev->dev, "Cmdq is disabled\n");
+ return err;
+ }
+
+ err = cmdq_sync_cmd_direct_resp(&cmdqs->cmdq[HINIC3_CMDQ_SYNC],
+ mod, cmd, buf_in, out_param);
+
+ return err;
+}
+
+static void cmdq_init_queue_ctxt(struct hinic3_hwdev *hwdev, u8 cmdq_id,
+ struct comm_cmdq_ctxt_info *ctxt_info)
+{
+ const struct hinic3_cmdqs *cmdqs;
+ u64 cmdq_first_block_paddr, pfn;
+ const struct hinic3_wq *wq;
+
+ cmdqs = hwdev->cmdqs;
+ wq = &cmdqs->cmdq[cmdq_id].wq;
+ pfn = CMDQ_PFN(hinic3_wq_get_first_wqe_page_addr(wq));
+
+ ctxt_info->curr_wqe_page_pfn =
+ cpu_to_le64(CMDQ_CTXT_SET(1, HW_BUSY_BIT) |
+ CMDQ_CTXT_SET(1, CEQ_EN) |
+ CMDQ_CTXT_SET(1, CEQ_ARM) |
+ CMDQ_CTXT_SET(0, EQ_ID) |
+ CMDQ_CTXT_SET(pfn, CURR_WQE_PAGE_PFN));
+
+ if (!hinic3_wq_is_0_level_cla(wq)) {
+ cmdq_first_block_paddr = cmdqs->wq_block_paddr;
+ pfn = CMDQ_PFN(cmdq_first_block_paddr);
+ }
+
+ ctxt_info->wq_block_pfn = cpu_to_le64(CMDQ_CTXT_SET(wq->cons_idx, CI) |
+ CMDQ_CTXT_SET(pfn, WQ_BLOCK_PFN));
+}
+
+static int init_cmdq(struct hinic3_cmdq *cmdq, struct hinic3_hwdev *hwdev,
+ enum hinic3_cmdq_type q_type)
+{
+ int err;
+
+ cmdq->cmdq_type = q_type;
+ cmdq->wrapped = 1;
+ cmdq->hwdev = hwdev;
+
+ spin_lock_init(&cmdq->cmdq_lock);
+
+ cmdq->cmd_infos = kcalloc(cmdq->wq.q_depth, sizeof(*cmdq->cmd_infos),
+ GFP_KERNEL);
+ if (!cmdq->cmd_infos) {
+ err = -ENOMEM;
+ return err;
+ }
+
+ return 0;
+}
+
+static int hinic3_set_cmdq_ctxt(struct hinic3_hwdev *hwdev, u8 cmdq_id)
+{
+ struct comm_cmd_set_cmdq_ctxt cmdq_ctxt = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ cmdq_init_queue_ctxt(hwdev, cmdq_id, &cmdq_ctxt.ctxt);
+ cmdq_ctxt.func_id = hinic3_global_func_id(hwdev);
+ cmdq_ctxt.cmdq_id = cmdq_id;
+
+ mgmt_msg_params_init_default(&msg_params, &cmdq_ctxt,
+ sizeof(cmdq_ctxt));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
+ COMM_CMD_SET_CMDQ_CTXT, &msg_params);
+ if (err || cmdq_ctxt.head.status) {
+ dev_err(hwdev->dev, "Failed to set cmdq ctxt, err: %d, status: 0x%x\n",
+ err, cmdq_ctxt.head.status);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int hinic3_set_cmdq_ctxts(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_cmdqs *cmdqs = hwdev->cmdqs;
+ u8 cmdq_type;
+ int err;
+
+ for (cmdq_type = 0; cmdq_type < cmdqs->cmdq_num; cmdq_type++) {
+ err = hinic3_set_cmdq_ctxt(hwdev, cmdq_type);
+ if (err)
+ return err;
+ }
+
+ cmdqs->status |= HINIC3_CMDQ_ENABLE;
+ cmdqs->disable_flag = 0;
+
+ return 0;
+}
+
+static int create_cmdq_wq(struct hinic3_hwdev *hwdev,
+ struct hinic3_cmdqs *cmdqs)
+{
+ u8 cmdq_type;
+ int err;
+
+ for (cmdq_type = 0; cmdq_type < cmdqs->cmdq_num; cmdq_type++) {
+ err = hinic3_wq_create(hwdev, &cmdqs->cmdq[cmdq_type].wq,
+ CMDQ_DEPTH, CMDQ_WQEBB_SIZE);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to create cmdq wq\n");
+ goto err_destroy_wq;
+ }
+ }
+
+ /* 1-level Chip Logical Address (CLA) must put all
+ * cmdq's wq page addr in one wq block
+ */
+ if (!hinic3_wq_is_0_level_cla(&cmdqs->cmdq[HINIC3_CMDQ_SYNC].wq)) {
+ if (cmdqs->cmdq[HINIC3_CMDQ_SYNC].wq.qpages.num_pages >
+ CMDQ_WQ_CLA_SIZE / sizeof(u64)) {
+ err = -EINVAL;
+ dev_err(hwdev->dev,
+ "Cmdq number of wq pages exceeds limit: %lu\n",
+ CMDQ_WQ_CLA_SIZE / sizeof(u64));
+ goto err_destroy_wq;
+ }
+
+ cmdqs->wq_block_vaddr =
+ dma_alloc_coherent(hwdev->dev, HINIC3_MIN_PAGE_SIZE,
+ &cmdqs->wq_block_paddr, GFP_KERNEL);
+ if (!cmdqs->wq_block_vaddr) {
+ err = -ENOMEM;
+ goto err_destroy_wq;
+ }
+
+ for (cmdq_type = 0; cmdq_type < cmdqs->cmdq_num; cmdq_type++)
+ memcpy((u8 *)cmdqs->wq_block_vaddr +
+ CMDQ_WQ_CLA_SIZE * cmdq_type,
+ cmdqs->cmdq[cmdq_type].wq.wq_block_vaddr,
+ cmdqs->cmdq[cmdq_type].wq.qpages.num_pages *
+ sizeof(__be64));
+ }
+
+ return 0;
+
+err_destroy_wq:
+ while (cmdq_type > 0) {
+ cmdq_type--;
+ hinic3_wq_destroy(hwdev, &cmdqs->cmdq[cmdq_type].wq);
+ }
+
+ return err;
+}
+
+static void destroy_cmdq_wq(struct hinic3_hwdev *hwdev,
+ struct hinic3_cmdqs *cmdqs)
+{
+ u8 cmdq_type;
+
+ if (cmdqs->wq_block_vaddr)
+ dma_free_coherent(hwdev->dev, HINIC3_MIN_PAGE_SIZE,
+ cmdqs->wq_block_vaddr, cmdqs->wq_block_paddr);
+
+ for (cmdq_type = 0; cmdq_type < cmdqs->cmdq_num; cmdq_type++)
+ hinic3_wq_destroy(hwdev, &cmdqs->cmdq[cmdq_type].wq);
+}
+
+static int init_cmdqs(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_cmdqs *cmdqs;
+
+ cmdqs = kzalloc(sizeof(*cmdqs), GFP_KERNEL);
+ if (!cmdqs)
+ return -ENOMEM;
+
+ hwdev->cmdqs = cmdqs;
+ cmdqs->hwdev = hwdev;
+ cmdqs->cmdq_num = hwdev->max_cmdq;
+
+ cmdqs->cmd_buf_pool = dma_pool_create("hinic3_cmdq", hwdev->dev,
+ CMDQ_BUF_SIZE, CMDQ_BUF_SIZE, 0);
+ if (!cmdqs->cmd_buf_pool) {
+ dev_err(hwdev->dev, "Failed to create cmdq buffer pool\n");
+ kfree(cmdqs);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void cmdq_flush_sync_cmd(struct hinic3_cmdq_cmd_info *cmd_info)
+{
+ if (cmd_info->cmd_type != HINIC3_CMD_TYPE_DIRECT_RESP)
+ return;
+
+ cmd_info->cmd_type = HINIC3_CMD_TYPE_FORCE_STOP;
+
+ if (cmd_info->cmpt_code &&
+ *cmd_info->cmpt_code == CMDQ_SEND_CMPT_CODE)
+ *cmd_info->cmpt_code = CMDQ_FORCE_STOP_CMPT_CODE;
+
+ if (cmd_info->done) {
+ complete(cmd_info->done);
+ cmd_info->done = NULL;
+ cmd_info->cmpt_code = NULL;
+ cmd_info->direct_resp = NULL;
+ cmd_info->errcode = NULL;
+ }
+}
+
+static void hinic3_cmdq_flush_cmd(struct hinic3_cmdq *cmdq)
+{
+ struct hinic3_cmdq_cmd_info *cmd_info;
+ u16 ci;
+
+ spin_lock_bh(&cmdq->cmdq_lock);
+ while (cmdq_read_wqe(&cmdq->wq, &ci)) {
+ hinic3_wq_put_wqebbs(&cmdq->wq, CMDQ_WQE_NUM_WQEBBS);
+ cmd_info = &cmdq->cmd_infos[ci];
+ if (cmd_info->cmd_type == HINIC3_CMD_TYPE_DIRECT_RESP)
+ cmdq_flush_sync_cmd(cmd_info);
+ }
+ spin_unlock_bh(&cmdq->cmdq_lock);
+}
+
+void hinic3_cmdq_flush_sync_cmd(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_cmdq *cmdq;
+ u16 wqe_cnt, wqe_idx, i;
+ struct hinic3_wq *wq;
+
+ cmdq = &hwdev->cmdqs->cmdq[HINIC3_CMDQ_SYNC];
+ spin_lock_bh(&cmdq->cmdq_lock);
+ wq = &cmdq->wq;
+ wqe_cnt = hinic3_wq_get_used(wq);
+ for (i = 0; i < wqe_cnt; i++) {
+ wqe_idx = (wq->cons_idx + i) & wq->idx_mask;
+ cmdq_flush_sync_cmd(cmdq->cmd_infos + wqe_idx);
+ }
+ spin_unlock_bh(&cmdq->cmdq_lock);
+}
+
+static void hinic3_cmdq_reset_all_cmd_buf(struct hinic3_cmdq *cmdq)
+{
+ u16 i;
+
+ for (i = 0; i < cmdq->wq.q_depth; i++)
+ cmdq_clear_cmd_buf(&cmdq->cmd_infos[i], cmdq->hwdev);
+}
+
+int hinic3_reinit_cmdq_ctxts(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_cmdqs *cmdqs = hwdev->cmdqs;
+ u8 cmdq_type;
+
+ for (cmdq_type = 0; cmdq_type < cmdqs->cmdq_num; cmdq_type++) {
+ hinic3_cmdq_flush_cmd(&cmdqs->cmdq[cmdq_type]);
+ hinic3_cmdq_reset_all_cmd_buf(&cmdqs->cmdq[cmdq_type]);
+ cmdqs->cmdq[cmdq_type].wrapped = 1;
+ hinic3_wq_reset(&cmdqs->cmdq[cmdq_type].wq);
+ }
+
+ return hinic3_set_cmdq_ctxts(hwdev);
+}
+
+int hinic3_cmdqs_init(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_cmdqs *cmdqs;
+ void __iomem *db_base;
+ u8 cmdq_type;
+ int err;
+
+ err = init_cmdqs(hwdev);
+ if (err)
+ goto err_out;
+
+ cmdqs = hwdev->cmdqs;
+ err = create_cmdq_wq(hwdev, cmdqs);
+ if (err)
+ goto err_free_cmdqs;
+
+ err = hinic3_alloc_db_addr(hwdev, &db_base, NULL);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to allocate doorbell address\n");
+ goto err_destroy_cmdq_wq;
+ }
+ cmdqs->cmdqs_db_base = db_base;
+
+ for (cmdq_type = 0; cmdq_type < cmdqs->cmdq_num; cmdq_type++) {
+ err = init_cmdq(&cmdqs->cmdq[cmdq_type], hwdev, cmdq_type);
+ if (err) {
+ dev_err(hwdev->dev,
+ "Failed to initialize cmdq type : %d\n",
+ cmdq_type);
+ goto err_free_cmd_infos;
+ }
+ }
+
+ err = hinic3_set_cmdq_ctxts(hwdev);
+ if (err)
+ goto err_free_cmd_infos;
+
+ return 0;
+
+err_free_cmd_infos:
+ while (cmdq_type > 0) {
+ cmdq_type--;
+ kfree(cmdqs->cmdq[cmdq_type].cmd_infos);
+ }
+
+ hinic3_free_db_addr(hwdev, cmdqs->cmdqs_db_base);
+
+err_destroy_cmdq_wq:
+ destroy_cmdq_wq(hwdev, cmdqs);
+
+err_free_cmdqs:
+ dma_pool_destroy(cmdqs->cmd_buf_pool);
+ kfree(cmdqs);
+
+err_out:
+ return err;
+}
+
+void hinic3_cmdqs_free(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_cmdqs *cmdqs = hwdev->cmdqs;
+ u8 cmdq_type;
+
+ cmdqs->status &= ~HINIC3_CMDQ_ENABLE;
+
+ for (cmdq_type = 0; cmdq_type < cmdqs->cmdq_num; cmdq_type++) {
+ hinic3_cmdq_flush_cmd(&cmdqs->cmdq[cmdq_type]);
+ hinic3_cmdq_reset_all_cmd_buf(&cmdqs->cmdq[cmdq_type]);
+ kfree(cmdqs->cmdq[cmdq_type].cmd_infos);
+ }
+
+ hinic3_free_db_addr(hwdev, cmdqs->cmdqs_db_base);
+ destroy_cmdq_wq(hwdev, cmdqs);
+ dma_pool_destroy(cmdqs->cmd_buf_pool);
+ kfree(cmdqs);
+}
+
+bool hinic3_cmdq_idle(struct hinic3_cmdq *cmdq)
+{
+ return hinic3_wq_get_used(&cmdq->wq) == 0;
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.h b/drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.h
new file mode 100644
index 000000000000..f99c386a2780
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_CMDQ_H_
+#define _HINIC3_CMDQ_H_
+
+#include <linux/dmapool.h>
+
+#include "hinic3_hw_intf.h"
+#include "hinic3_wq.h"
+
+#define CMDQ_DEPTH 4096
+
+struct cmdq_db {
+ __le32 db_head;
+ __le32 db_info;
+};
+
+/* hw defined cmdq wqe header */
+struct cmdq_header {
+ __le32 header_info;
+ __le32 saved_data;
+};
+
+struct cmdq_lcmd_bufdesc {
+ struct hinic3_sge sge;
+ __le64 rsvd2;
+ __le64 rsvd3;
+};
+
+struct cmdq_status {
+ __le32 status_info;
+};
+
+struct cmdq_ctrl {
+ __le32 ctrl_info;
+};
+
+struct cmdq_direct_resp {
+ __le64 val;
+ __le64 rsvd;
+};
+
+struct cmdq_completion {
+ union {
+ struct hinic3_sge sge;
+ struct cmdq_direct_resp direct;
+ } resp;
+};
+
+struct cmdq_wqe_scmd {
+ struct cmdq_header header;
+ __le64 rsvd3;
+ struct cmdq_status status;
+ struct cmdq_ctrl ctrl;
+ struct cmdq_completion completion;
+ __le32 rsvd10[6];
+};
+
+struct cmdq_wqe_lcmd {
+ struct cmdq_header header;
+ struct cmdq_status status;
+ struct cmdq_ctrl ctrl;
+ struct cmdq_completion completion;
+ struct cmdq_lcmd_bufdesc buf_desc;
+};
+
+struct cmdq_wqe {
+ union {
+ struct cmdq_wqe_scmd wqe_scmd;
+ struct cmdq_wqe_lcmd wqe_lcmd;
+ };
+};
+
+static_assert(sizeof(struct cmdq_wqe) == 64);
+
+enum hinic3_cmdq_type {
+ HINIC3_CMDQ_SYNC = 0,
+ HINIC3_MAX_CMDQ_TYPES = 4
+};
+
+enum hinic3_cmdq_status {
+ HINIC3_CMDQ_ENABLE = BIT(0),
+};
+
+enum hinic3_cmdq_cmd_type {
+ HINIC3_CMD_TYPE_NONE,
+ HINIC3_CMD_TYPE_DIRECT_RESP,
+ HINIC3_CMD_TYPE_FAKE_TIMEOUT,
+ HINIC3_CMD_TYPE_TIMEOUT,
+ HINIC3_CMD_TYPE_FORCE_STOP,
+};
+
+struct hinic3_cmd_buf {
+ void *buf;
+ dma_addr_t dma_addr;
+ __le16 size;
+ refcount_t ref_cnt;
+};
+
+struct hinic3_cmdq_cmd_info {
+ enum hinic3_cmdq_cmd_type cmd_type;
+ struct completion *done;
+ int *errcode;
+ /* completion code */
+ int *cmpt_code;
+ __le64 *direct_resp;
+ u64 cmdq_msg_id;
+ struct hinic3_cmd_buf *buf_in;
+};
+
+struct hinic3_cmdq {
+ struct hinic3_wq wq;
+ enum hinic3_cmdq_type cmdq_type;
+ u8 wrapped;
+ /* synchronize command submission with completions via event queue */
+ spinlock_t cmdq_lock;
+ struct hinic3_cmdq_cmd_info *cmd_infos;
+ struct hinic3_hwdev *hwdev;
+};
+
+struct hinic3_cmdqs {
+ struct hinic3_hwdev *hwdev;
+ struct hinic3_cmdq cmdq[HINIC3_MAX_CMDQ_TYPES];
+ struct dma_pool *cmd_buf_pool;
+ /* doorbell area */
+ u8 __iomem *cmdqs_db_base;
+
+ /* When command queue uses multiple memory pages (1-level CLA), this
+ * block will hold aggregated indirection table for all command queues
+ * of cmdqs. Not used for small cmdq (0-level CLA).
+ */
+ dma_addr_t wq_block_paddr;
+ void *wq_block_vaddr;
+
+ u32 status;
+ u32 disable_flag;
+ u8 cmdq_num;
+};
+
+int hinic3_cmdqs_init(struct hinic3_hwdev *hwdev);
+void hinic3_cmdqs_free(struct hinic3_hwdev *hwdev);
+
+struct hinic3_cmd_buf *hinic3_alloc_cmd_buf(struct hinic3_hwdev *hwdev);
+void hinic3_free_cmd_buf(struct hinic3_hwdev *hwdev,
+ struct hinic3_cmd_buf *cmd_buf);
+void hinic3_cmdq_ceq_handler(struct hinic3_hwdev *hwdev, __le32 ceqe_data);
+
+int hinic3_cmdq_direct_resp(struct hinic3_hwdev *hwdev, u8 mod, u8 cmd,
+ struct hinic3_cmd_buf *buf_in, __le64 *out_param);
+
+void hinic3_cmdq_flush_sync_cmd(struct hinic3_hwdev *hwdev);
+int hinic3_reinit_cmdq_ctxts(struct hinic3_hwdev *hwdev);
+bool hinic3_cmdq_idle(struct hinic3_cmdq *cmdq);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_common.c b/drivers/net/ethernet/huawei/hinic3/hinic3_common.c
index 0aa42068728c..fe4778d152cf 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_common.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_common.c
@@ -3,6 +3,7 @@
#include <linux/delay.h>
#include <linux/dma-mapping.h>
+#include <linux/iopoll.h>
#include "hinic3_common.h"
@@ -51,3 +52,25 @@ void hinic3_dma_free_coherent_align(struct device *dev,
dma_free_coherent(dev, mem_align->real_size,
mem_align->ori_vaddr, mem_align->ori_paddr);
}
+
+int hinic3_wait_for_timeout(void *priv_data, wait_cpl_handler handler,
+ u32 wait_total_ms, u32 wait_once_us)
+{
+ enum hinic3_wait_return ret;
+ int err;
+
+ err = read_poll_timeout(handler, ret, ret == HINIC3_WAIT_PROCESS_CPL,
+ wait_once_us, wait_total_ms * USEC_PER_MSEC,
+ false, priv_data);
+
+ return err;
+}
+
+/* Data provided to/by cmdq is arranged in structs with little endian fields but
+ * every dword (32bits) should be swapped since HW swaps it again when it
+ * copies it from/to host memory.
+ */
+void hinic3_cmdq_buf_swab32(void *data, int len)
+{
+ swab32_array(data, len / sizeof(u32));
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_common.h b/drivers/net/ethernet/huawei/hinic3/hinic3_common.h
index bb795dace04c..a8fabfae90fb 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_common.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_common.h
@@ -18,10 +18,37 @@ struct hinic3_dma_addr_align {
dma_addr_t align_paddr;
};
+enum hinic3_wait_return {
+ HINIC3_WAIT_PROCESS_CPL = 0,
+ HINIC3_WAIT_PROCESS_WAITING = 1,
+};
+
+struct hinic3_sge {
+ __le32 hi_addr;
+ __le32 lo_addr;
+ __le32 len;
+ __le32 rsvd;
+};
+
+static inline void hinic3_set_sge(struct hinic3_sge *sge, dma_addr_t addr,
+ __le32 len)
+{
+ sge->hi_addr = cpu_to_le32(upper_32_bits(addr));
+ sge->lo_addr = cpu_to_le32(lower_32_bits(addr));
+ sge->len = len;
+ sge->rsvd = 0;
+}
+
int hinic3_dma_zalloc_coherent_align(struct device *dev, u32 size, u32 align,
gfp_t flag,
struct hinic3_dma_addr_align *mem_align);
void hinic3_dma_free_coherent_align(struct device *dev,
struct hinic3_dma_addr_align *mem_align);
+typedef enum hinic3_wait_return (*wait_cpl_handler)(void *priv_data);
+int hinic3_wait_for_timeout(void *priv_data, wait_cpl_handler handler,
+ u32 wait_total_ms, u32 wait_once_us);
+
+void hinic3_cmdq_buf_swab32(void *data, int len);
+
#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_csr.h b/drivers/net/ethernet/huawei/hinic3/hinic3_csr.h
new file mode 100644
index 000000000000..e7417e8efa99
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_csr.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_CSR_H_
+#define _HINIC3_CSR_H_
+
+#define HINIC3_CFG_REGS_FLAG 0x40000000
+#define HINIC3_REGS_FLAG_MASK 0x3FFFFFFF
+
+#define HINIC3_VF_CFG_REG_OFFSET 0x2000
+
+/* HW interface registers */
+#define HINIC3_CSR_FUNC_ATTR0_ADDR (HINIC3_CFG_REGS_FLAG + 0x0)
+#define HINIC3_CSR_FUNC_ATTR1_ADDR (HINIC3_CFG_REGS_FLAG + 0x4)
+#define HINIC3_CSR_FUNC_ATTR2_ADDR (HINIC3_CFG_REGS_FLAG + 0x8)
+#define HINIC3_CSR_FUNC_ATTR3_ADDR (HINIC3_CFG_REGS_FLAG + 0xC)
+#define HINIC3_CSR_FUNC_ATTR4_ADDR (HINIC3_CFG_REGS_FLAG + 0x10)
+#define HINIC3_CSR_FUNC_ATTR5_ADDR (HINIC3_CFG_REGS_FLAG + 0x14)
+#define HINIC3_CSR_FUNC_ATTR6_ADDR (HINIC3_CFG_REGS_FLAG + 0x18)
+
+#define HINIC3_FUNC_CSR_MAILBOX_DATA_OFF 0x80
+#define HINIC3_FUNC_CSR_MAILBOX_CONTROL_OFF (HINIC3_CFG_REGS_FLAG + 0x0100)
+#define HINIC3_FUNC_CSR_MAILBOX_INT_OFF (HINIC3_CFG_REGS_FLAG + 0x0104)
+#define HINIC3_FUNC_CSR_MAILBOX_RESULT_H_OFF (HINIC3_CFG_REGS_FLAG + 0x0108)
+#define HINIC3_FUNC_CSR_MAILBOX_RESULT_L_OFF (HINIC3_CFG_REGS_FLAG + 0x010C)
+
+#define HINIC3_CSR_DMA_ATTR_TBL_ADDR (HINIC3_CFG_REGS_FLAG + 0x380)
+#define HINIC3_CSR_DMA_ATTR_INDIR_IDX_ADDR (HINIC3_CFG_REGS_FLAG + 0x390)
+
+/* MSI-X registers */
+#define HINIC3_CSR_FUNC_MSI_CLR_WR_ADDR (HINIC3_CFG_REGS_FLAG + 0x58)
+
+#define HINIC3_MSI_CLR_INDIR_RESEND_TIMER_CLR_MASK BIT(0)
+#define HINIC3_MSI_CLR_INDIR_INT_MSK_SET_MASK BIT(1)
+#define HINIC3_MSI_CLR_INDIR_INT_MSK_CLR_MASK BIT(2)
+#define HINIC3_MSI_CLR_INDIR_AUTO_MSK_SET_MASK BIT(3)
+#define HINIC3_MSI_CLR_INDIR_AUTO_MSK_CLR_MASK BIT(4)
+#define HINIC3_MSI_CLR_INDIR_SIMPLE_INDIR_IDX_MASK GENMASK(31, 22)
+#define HINIC3_MSI_CLR_INDIR_SET(val, member) \
+ FIELD_PREP(HINIC3_MSI_CLR_INDIR_##member##_MASK, val)
+
+/* EQ registers */
+#define HINIC3_AEQ_INDIR_IDX_ADDR (HINIC3_CFG_REGS_FLAG + 0x210)
+#define HINIC3_CEQ_INDIR_IDX_ADDR (HINIC3_CFG_REGS_FLAG + 0x290)
+
+#define HINIC3_EQ_INDIR_IDX_ADDR(type) \
+ ((type == HINIC3_AEQ) ? HINIC3_AEQ_INDIR_IDX_ADDR : \
+ HINIC3_CEQ_INDIR_IDX_ADDR)
+
+#define HINIC3_AEQ_MTT_OFF_BASE_ADDR (HINIC3_CFG_REGS_FLAG + 0x240)
+#define HINIC3_CEQ_MTT_OFF_BASE_ADDR (HINIC3_CFG_REGS_FLAG + 0x2C0)
+
+#define HINIC3_CSR_EQ_PAGE_OFF_STRIDE 8
+
+#define HINIC3_AEQ_HI_PHYS_ADDR_REG(pg_num) \
+ (HINIC3_AEQ_MTT_OFF_BASE_ADDR + (pg_num) * \
+ HINIC3_CSR_EQ_PAGE_OFF_STRIDE)
+
+#define HINIC3_AEQ_LO_PHYS_ADDR_REG(pg_num) \
+ (HINIC3_AEQ_MTT_OFF_BASE_ADDR + (pg_num) * \
+ HINIC3_CSR_EQ_PAGE_OFF_STRIDE + 4)
+
+#define HINIC3_CEQ_HI_PHYS_ADDR_REG(pg_num) \
+ (HINIC3_CEQ_MTT_OFF_BASE_ADDR + (pg_num) * \
+ HINIC3_CSR_EQ_PAGE_OFF_STRIDE)
+
+#define HINIC3_CEQ_LO_PHYS_ADDR_REG(pg_num) \
+ (HINIC3_CEQ_MTT_OFF_BASE_ADDR + (pg_num) * \
+ HINIC3_CSR_EQ_PAGE_OFF_STRIDE + 4)
+
+#define HINIC3_CSR_AEQ_CTRL_0_ADDR (HINIC3_CFG_REGS_FLAG + 0x200)
+#define HINIC3_CSR_AEQ_CTRL_1_ADDR (HINIC3_CFG_REGS_FLAG + 0x204)
+#define HINIC3_CSR_AEQ_PROD_IDX_ADDR (HINIC3_CFG_REGS_FLAG + 0x20C)
+#define HINIC3_CSR_AEQ_CI_SIMPLE_INDIR_ADDR (HINIC3_CFG_REGS_FLAG + 0x50)
+
+#define HINIC3_CSR_CEQ_PROD_IDX_ADDR (HINIC3_CFG_REGS_FLAG + 0x28c)
+#define HINIC3_CSR_CEQ_CI_SIMPLE_INDIR_ADDR (HINIC3_CFG_REGS_FLAG + 0x54)
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_eqs.c b/drivers/net/ethernet/huawei/hinic3/hinic3_eqs.c
new file mode 100644
index 000000000000..01686472985b
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_eqs.c
@@ -0,0 +1,776 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/delay.h>
+
+#include "hinic3_csr.h"
+#include "hinic3_eqs.h"
+#include "hinic3_hwdev.h"
+#include "hinic3_hwif.h"
+#include "hinic3_mbox.h"
+
+#define AEQ_CTRL_0_INTR_IDX_MASK GENMASK(9, 0)
+#define AEQ_CTRL_0_DMA_ATTR_MASK GENMASK(17, 12)
+#define AEQ_CTRL_0_PCI_INTF_IDX_MASK GENMASK(22, 20)
+#define AEQ_CTRL_0_INTR_MODE_MASK BIT(31)
+#define AEQ_CTRL_0_SET(val, member) \
+ FIELD_PREP(AEQ_CTRL_0_##member##_MASK, val)
+
+#define AEQ_CTRL_1_LEN_MASK GENMASK(20, 0)
+#define AEQ_CTRL_1_ELEM_SIZE_MASK GENMASK(25, 24)
+#define AEQ_CTRL_1_PAGE_SIZE_MASK GENMASK(31, 28)
+#define AEQ_CTRL_1_SET(val, member) \
+ FIELD_PREP(AEQ_CTRL_1_##member##_MASK, val)
+
+#define CEQ_CTRL_0_INTR_IDX_MASK GENMASK(9, 0)
+#define CEQ_CTRL_0_DMA_ATTR_MASK GENMASK(17, 12)
+#define CEQ_CTRL_0_LIMIT_KICK_MASK GENMASK(23, 20)
+#define CEQ_CTRL_0_PCI_INTF_IDX_MASK GENMASK(25, 24)
+#define CEQ_CTRL_0_PAGE_SIZE_MASK GENMASK(30, 27)
+#define CEQ_CTRL_0_INTR_MODE_MASK BIT(31)
+#define CEQ_CTRL_0_SET(val, member) \
+ FIELD_PREP(CEQ_CTRL_0_##member##_MASK, val)
+
+#define CEQ_CTRL_1_LEN_MASK GENMASK(19, 0)
+#define CEQ_CTRL_1_SET(val, member) \
+ FIELD_PREP(CEQ_CTRL_1_##member##_MASK, val)
+
+#define CEQE_TYPE_MASK GENMASK(25, 23)
+#define CEQE_TYPE(type) \
+ FIELD_GET(CEQE_TYPE_MASK, le32_to_cpu(type))
+
+#define CEQE_DATA_MASK GENMASK(25, 0)
+#define CEQE_DATA(data) ((data) & cpu_to_le32(CEQE_DATA_MASK))
+
+#define EQ_ELEM_DESC_TYPE_MASK GENMASK(6, 0)
+#define EQ_ELEM_DESC_SRC_MASK BIT(7)
+#define EQ_ELEM_DESC_SIZE_MASK GENMASK(15, 8)
+#define EQ_ELEM_DESC_WRAPPED_MASK BIT(31)
+#define EQ_ELEM_DESC_GET(val, member) \
+ FIELD_GET(EQ_ELEM_DESC_##member##_MASK, le32_to_cpu(val))
+
+#define EQ_CI_SIMPLE_INDIR_CI_MASK GENMASK(20, 0)
+#define EQ_CI_SIMPLE_INDIR_ARMED_MASK BIT(21)
+#define EQ_CI_SIMPLE_INDIR_AEQ_IDX_MASK GENMASK(31, 30)
+#define EQ_CI_SIMPLE_INDIR_CEQ_IDX_MASK GENMASK(31, 24)
+#define EQ_CI_SIMPLE_INDIR_SET(val, member) \
+ FIELD_PREP(EQ_CI_SIMPLE_INDIR_##member##_MASK, val)
+
+#define EQ_CI_SIMPLE_INDIR_REG_ADDR(eq) \
+ (((eq)->type == HINIC3_AEQ) ? \
+ HINIC3_CSR_AEQ_CI_SIMPLE_INDIR_ADDR : \
+ HINIC3_CSR_CEQ_CI_SIMPLE_INDIR_ADDR)
+
+#define EQ_PROD_IDX_REG_ADDR(eq) \
+ (((eq)->type == HINIC3_AEQ) ? \
+ HINIC3_CSR_AEQ_PROD_IDX_ADDR : HINIC3_CSR_CEQ_PROD_IDX_ADDR)
+
+#define EQ_HI_PHYS_ADDR_REG(type, pg_num) \
+ (((type) == HINIC3_AEQ) ? \
+ HINIC3_AEQ_HI_PHYS_ADDR_REG(pg_num) : \
+ HINIC3_CEQ_HI_PHYS_ADDR_REG(pg_num))
+
+#define EQ_LO_PHYS_ADDR_REG(type, pg_num) \
+ (((type) == HINIC3_AEQ) ? \
+ HINIC3_AEQ_LO_PHYS_ADDR_REG(pg_num) : \
+ HINIC3_CEQ_LO_PHYS_ADDR_REG(pg_num))
+
+#define EQ_MSIX_RESEND_TIMER_CLEAR 1
+
+#define HINIC3_EQ_MAX_PAGES(eq) \
+ ((eq)->type == HINIC3_AEQ ? \
+ HINIC3_AEQ_MAX_PAGES : HINIC3_CEQ_MAX_PAGES)
+
+#define HINIC3_TASK_PROCESS_EQE_LIMIT 1024
+#define HINIC3_EQ_UPDATE_CI_STEP 64
+#define HINIC3_EQS_WQ_NAME "hinic3_eqs"
+
+#define HINIC3_EQ_VALID_SHIFT 31
+#define HINIC3_EQ_WRAPPED(eq) \
+ ((eq)->wrapped << HINIC3_EQ_VALID_SHIFT)
+
+#define HINIC3_EQ_WRAPPED_SHIFT 20
+#define HINIC3_EQ_CONS_IDX(eq) \
+ ((eq)->cons_idx | ((eq)->wrapped << HINIC3_EQ_WRAPPED_SHIFT))
+
+static const struct hinic3_aeq_elem *get_curr_aeq_elem(const struct hinic3_eq *eq)
+{
+ return get_q_element(&eq->qpages, eq->cons_idx, NULL);
+}
+
+static const __be32 *get_curr_ceq_elem(const struct hinic3_eq *eq)
+{
+ return get_q_element(&eq->qpages, eq->cons_idx, NULL);
+}
+
+int hinic3_aeq_register_cb(struct hinic3_hwdev *hwdev,
+ enum hinic3_aeq_type event,
+ hinic3_aeq_event_cb hwe_cb)
+{
+ struct hinic3_aeqs *aeqs;
+
+ aeqs = hwdev->aeqs;
+ aeqs->aeq_cb[event] = hwe_cb;
+ spin_lock_init(&aeqs->aeq_lock);
+
+ return 0;
+}
+
+void hinic3_aeq_unregister_cb(struct hinic3_hwdev *hwdev,
+ enum hinic3_aeq_type event)
+{
+ struct hinic3_aeqs *aeqs;
+
+ aeqs = hwdev->aeqs;
+
+ spin_lock_bh(&aeqs->aeq_lock);
+ aeqs->aeq_cb[event] = NULL;
+ spin_unlock_bh(&aeqs->aeq_lock);
+}
+
+int hinic3_ceq_register_cb(struct hinic3_hwdev *hwdev,
+ enum hinic3_ceq_event event,
+ hinic3_ceq_event_cb callback)
+{
+ struct hinic3_ceqs *ceqs;
+
+ ceqs = hwdev->ceqs;
+ ceqs->ceq_cb[event] = callback;
+ spin_lock_init(&ceqs->ceq_lock);
+
+ return 0;
+}
+
+void hinic3_ceq_unregister_cb(struct hinic3_hwdev *hwdev,
+ enum hinic3_ceq_event event)
+{
+ struct hinic3_ceqs *ceqs;
+
+ ceqs = hwdev->ceqs;
+
+ spin_lock_bh(&ceqs->ceq_lock);
+ ceqs->ceq_cb[event] = NULL;
+ spin_unlock_bh(&ceqs->ceq_lock);
+}
+
+/* Set consumer index in the hw. */
+static void set_eq_cons_idx(struct hinic3_eq *eq, u32 arm_state)
+{
+ u32 addr = EQ_CI_SIMPLE_INDIR_REG_ADDR(eq);
+ u32 eq_wrap_ci, val;
+
+ eq_wrap_ci = HINIC3_EQ_CONS_IDX(eq);
+ val = EQ_CI_SIMPLE_INDIR_SET(arm_state, ARMED);
+ if (eq->type == HINIC3_AEQ) {
+ val = val |
+ EQ_CI_SIMPLE_INDIR_SET(eq_wrap_ci, CI) |
+ EQ_CI_SIMPLE_INDIR_SET(eq->q_id, AEQ_IDX);
+ } else {
+ val = val |
+ EQ_CI_SIMPLE_INDIR_SET(eq_wrap_ci, CI) |
+ EQ_CI_SIMPLE_INDIR_SET(eq->q_id, CEQ_IDX);
+ }
+
+ hinic3_hwif_write_reg(eq->hwdev->hwif, addr, val);
+}
+
+static struct hinic3_ceqs *ceq_to_ceqs(const struct hinic3_eq *eq)
+{
+ return container_of(eq, struct hinic3_ceqs, ceq[eq->q_id]);
+}
+
+static void ceq_event_handler(struct hinic3_ceqs *ceqs, __le32 ceqe)
+{
+ enum hinic3_ceq_event event = CEQE_TYPE(ceqe);
+ struct hinic3_hwdev *hwdev = ceqs->hwdev;
+ __le32 ceqe_data = CEQE_DATA(ceqe);
+
+ if (event >= HINIC3_MAX_CEQ_EVENTS) {
+ dev_warn(hwdev->dev, "Ceq unknown event:%d, ceqe data: 0x%x\n",
+ event, ceqe_data);
+ return;
+ }
+
+ spin_lock_bh(&ceqs->ceq_lock);
+ if (ceqs->ceq_cb[event])
+ ceqs->ceq_cb[event](hwdev, ceqe_data);
+
+ spin_unlock_bh(&ceqs->ceq_lock);
+}
+
+static struct hinic3_aeqs *aeq_to_aeqs(const struct hinic3_eq *eq)
+{
+ return container_of(eq, struct hinic3_aeqs, aeq[eq->q_id]);
+}
+
+static void aeq_event_handler(struct hinic3_aeqs *aeqs, __le32 aeqe,
+ const struct hinic3_aeq_elem *aeqe_pos)
+{
+ struct hinic3_hwdev *hwdev = aeqs->hwdev;
+ u8 data[HINIC3_AEQE_DATA_SIZE], size;
+ enum hinic3_aeq_type event;
+ hinic3_aeq_event_cb hwe_cb;
+
+ if (EQ_ELEM_DESC_GET(aeqe, SRC))
+ return;
+
+ event = EQ_ELEM_DESC_GET(aeqe, TYPE);
+ if (event >= HINIC3_MAX_AEQ_EVENTS) {
+ dev_warn(hwdev->dev, "Aeq unknown event:%d\n", event);
+ return;
+ }
+
+ memcpy(data, aeqe_pos->aeqe_data, HINIC3_AEQE_DATA_SIZE);
+ swab32_array((u32 *)data, HINIC3_AEQE_DATA_SIZE / sizeof(u32));
+ size = EQ_ELEM_DESC_GET(aeqe, SIZE);
+
+ spin_lock_bh(&aeqs->aeq_lock);
+ hwe_cb = aeqs->aeq_cb[event];
+ if (hwe_cb)
+ hwe_cb(aeqs->hwdev, data, size);
+ spin_unlock_bh(&aeqs->aeq_lock);
+}
+
+static int aeq_irq_handler(struct hinic3_eq *eq)
+{
+ const struct hinic3_aeq_elem *aeqe_pos;
+ struct hinic3_aeqs *aeqs;
+ u32 i, eqe_cnt = 0;
+ __le32 aeqe;
+
+ aeqs = aeq_to_aeqs(eq);
+ for (i = 0; i < HINIC3_TASK_PROCESS_EQE_LIMIT; i++) {
+ aeqe_pos = get_curr_aeq_elem(eq);
+ aeqe = (__force __le32)swab32((__force __u32)aeqe_pos->desc);
+ /* HW updates wrapped bit, when it adds eq element event */
+ if (EQ_ELEM_DESC_GET(aeqe, WRAPPED) == eq->wrapped)
+ return 0;
+
+ /* Prevent speculative reads from element */
+ dma_rmb();
+ aeq_event_handler(aeqs, aeqe, aeqe_pos);
+ eq->cons_idx++;
+ if (eq->cons_idx == eq->eq_len) {
+ eq->cons_idx = 0;
+ eq->wrapped = !eq->wrapped;
+ }
+
+ if (++eqe_cnt >= HINIC3_EQ_UPDATE_CI_STEP) {
+ eqe_cnt = 0;
+ set_eq_cons_idx(eq, HINIC3_EQ_NOT_ARMED);
+ }
+ }
+
+ return -EAGAIN;
+}
+
+static int ceq_irq_handler(struct hinic3_eq *eq)
+{
+ struct hinic3_ceqs *ceqs;
+ u32 eqe_cnt = 0;
+ __be32 ceqe_raw;
+ __le32 ceqe;
+ u32 i;
+
+ ceqs = ceq_to_ceqs(eq);
+ for (i = 0; i < HINIC3_TASK_PROCESS_EQE_LIMIT; i++) {
+ ceqe_raw = *get_curr_ceq_elem(eq);
+ ceqe = (__force __le32)swab32((__force __u32)ceqe_raw);
+
+ /* HW updates wrapped bit, when it adds eq element event */
+ if (EQ_ELEM_DESC_GET(ceqe, WRAPPED) == eq->wrapped)
+ return 0;
+
+ ceq_event_handler(ceqs, ceqe);
+ eq->cons_idx++;
+ if (eq->cons_idx == eq->eq_len) {
+ eq->cons_idx = 0;
+ eq->wrapped = !eq->wrapped;
+ }
+
+ if (++eqe_cnt >= HINIC3_EQ_UPDATE_CI_STEP) {
+ eqe_cnt = 0;
+ set_eq_cons_idx(eq, HINIC3_EQ_NOT_ARMED);
+ }
+ }
+
+ return -EAGAIN;
+}
+
+static void reschedule_aeq_handler(struct hinic3_eq *eq)
+{
+ struct hinic3_aeqs *aeqs = aeq_to_aeqs(eq);
+
+ queue_work(aeqs->workq, &eq->aeq_work);
+}
+
+static int eq_irq_handler(struct hinic3_eq *eq)
+{
+ int err;
+
+ if (eq->type == HINIC3_AEQ)
+ err = aeq_irq_handler(eq);
+ else
+ err = ceq_irq_handler(eq);
+
+ set_eq_cons_idx(eq, err ? HINIC3_EQ_NOT_ARMED :
+ HINIC3_EQ_ARMED);
+
+ return err;
+}
+
+static void aeq_irq_work(struct work_struct *work)
+{
+ struct hinic3_eq *eq = container_of(work, struct hinic3_eq, aeq_work);
+ int err;
+
+ err = eq_irq_handler(eq);
+ if (err)
+ reschedule_aeq_handler(eq);
+}
+
+static irqreturn_t aeq_interrupt(int irq, void *data)
+{
+ struct workqueue_struct *workq;
+ struct hinic3_eq *aeq = data;
+ struct hinic3_hwdev *hwdev;
+ struct hinic3_aeqs *aeqs;
+
+ aeqs = aeq_to_aeqs(aeq);
+ hwdev = aeq->hwdev;
+
+ /* clear resend timer cnt register */
+ workq = aeqs->workq;
+ hinic3_msix_intr_clear_resend_bit(hwdev, aeq->msix_entry_idx,
+ EQ_MSIX_RESEND_TIMER_CLEAR);
+ queue_work(workq, &aeq->aeq_work);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ceq_interrupt(int irq, void *data)
+{
+ struct hinic3_eq *ceq = data;
+ int err;
+
+ /* clear resend timer counters */
+ hinic3_msix_intr_clear_resend_bit(ceq->hwdev, ceq->msix_entry_idx,
+ EQ_MSIX_RESEND_TIMER_CLEAR);
+ err = eq_irq_handler(ceq);
+ if (err)
+ return IRQ_NONE;
+
+ return IRQ_HANDLED;
+}
+
+static int hinic3_set_ceq_ctrl_reg(struct hinic3_hwdev *hwdev, u16 q_id,
+ u32 ctrl0, u32 ctrl1)
+{
+ struct comm_cmd_set_ceq_ctrl_reg ceq_ctrl = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ ceq_ctrl.func_id = hinic3_global_func_id(hwdev);
+ ceq_ctrl.q_id = q_id;
+ ceq_ctrl.ctrl0 = ctrl0;
+ ceq_ctrl.ctrl1 = ctrl1;
+
+ mgmt_msg_params_init_default(&msg_params, &ceq_ctrl, sizeof(ceq_ctrl));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
+ COMM_CMD_SET_CEQ_CTRL_REG, &msg_params);
+ if (err || ceq_ctrl.head.status) {
+ dev_err(hwdev->dev, "Failed to set ceq %u ctrl reg, err: %d status: 0x%x\n",
+ q_id, err, ceq_ctrl.head.status);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int set_eq_ctrls(struct hinic3_eq *eq)
+{
+ struct hinic3_hwif *hwif = eq->hwdev->hwif;
+ struct hinic3_queue_pages *qpages;
+ u8 pci_intf_idx, elem_size;
+ u32 mask, ctrl0, ctrl1;
+ u32 page_size_val;
+ int err;
+
+ qpages = &eq->qpages;
+ page_size_val = ilog2(qpages->page_size / HINIC3_MIN_PAGE_SIZE);
+ pci_intf_idx = hwif->attr.pci_intf_idx;
+
+ if (eq->type == HINIC3_AEQ) {
+ /* set ctrl0 using read-modify-write */
+ mask = AEQ_CTRL_0_INTR_IDX_MASK |
+ AEQ_CTRL_0_DMA_ATTR_MASK |
+ AEQ_CTRL_0_PCI_INTF_IDX_MASK |
+ AEQ_CTRL_0_INTR_MODE_MASK;
+ ctrl0 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_AEQ_CTRL_0_ADDR);
+ ctrl0 = (ctrl0 & ~mask) |
+ AEQ_CTRL_0_SET(eq->msix_entry_idx, INTR_IDX) |
+ AEQ_CTRL_0_SET(0, DMA_ATTR) |
+ AEQ_CTRL_0_SET(pci_intf_idx, PCI_INTF_IDX) |
+ AEQ_CTRL_0_SET(HINIC3_INTR_MODE_ARMED, INTR_MODE);
+ hinic3_hwif_write_reg(hwif, HINIC3_CSR_AEQ_CTRL_0_ADDR, ctrl0);
+
+ /* HW expects log2(number of 32 byte units). */
+ elem_size = qpages->elem_size_shift - 5;
+ ctrl1 = AEQ_CTRL_1_SET(eq->eq_len, LEN) |
+ AEQ_CTRL_1_SET(elem_size, ELEM_SIZE) |
+ AEQ_CTRL_1_SET(page_size_val, PAGE_SIZE);
+ hinic3_hwif_write_reg(hwif, HINIC3_CSR_AEQ_CTRL_1_ADDR, ctrl1);
+ } else {
+ ctrl0 = CEQ_CTRL_0_SET(eq->msix_entry_idx, INTR_IDX) |
+ CEQ_CTRL_0_SET(0, DMA_ATTR) |
+ CEQ_CTRL_0_SET(0, LIMIT_KICK) |
+ CEQ_CTRL_0_SET(pci_intf_idx, PCI_INTF_IDX) |
+ CEQ_CTRL_0_SET(page_size_val, PAGE_SIZE) |
+ CEQ_CTRL_0_SET(HINIC3_INTR_MODE_ARMED, INTR_MODE);
+
+ ctrl1 = CEQ_CTRL_1_SET(eq->eq_len, LEN);
+
+ /* set ceq ctrl reg through mgmt cpu */
+ err = hinic3_set_ceq_ctrl_reg(eq->hwdev, eq->q_id, ctrl0,
+ ctrl1);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void ceq_elements_init(struct hinic3_eq *eq, u32 init_val)
+{
+ __be32 *ceqe;
+ u32 i;
+
+ for (i = 0; i < eq->eq_len; i++) {
+ ceqe = get_q_element(&eq->qpages, i, NULL);
+ *ceqe = cpu_to_be32(init_val);
+ }
+
+ wmb(); /* Clear ceq elements bit */
+}
+
+static void aeq_elements_init(struct hinic3_eq *eq, u32 init_val)
+{
+ struct hinic3_aeq_elem *aeqe;
+ u32 i;
+
+ for (i = 0; i < eq->eq_len; i++) {
+ aeqe = get_q_element(&eq->qpages, i, NULL);
+ aeqe->desc = cpu_to_be32(init_val);
+ }
+
+ wmb(); /* Clear aeq elements bit */
+}
+
+static void eq_elements_init(struct hinic3_eq *eq, u32 init_val)
+{
+ if (eq->type == HINIC3_AEQ)
+ aeq_elements_init(eq, init_val);
+ else
+ ceq_elements_init(eq, init_val);
+}
+
+static int alloc_eq_pages(struct hinic3_eq *eq)
+{
+ struct hinic3_hwif *hwif = eq->hwdev->hwif;
+ struct hinic3_queue_pages *qpages;
+ dma_addr_t page_paddr;
+ u32 reg, init_val;
+ u16 pg_idx;
+ int err;
+
+ qpages = &eq->qpages;
+ err = hinic3_queue_pages_alloc(eq->hwdev, qpages, HINIC3_MIN_PAGE_SIZE);
+ if (err)
+ return err;
+
+ for (pg_idx = 0; pg_idx < qpages->num_pages; pg_idx++) {
+ page_paddr = qpages->pages[pg_idx].align_paddr;
+ reg = EQ_HI_PHYS_ADDR_REG(eq->type, pg_idx);
+ hinic3_hwif_write_reg(hwif, reg, upper_32_bits(page_paddr));
+ reg = EQ_LO_PHYS_ADDR_REG(eq->type, pg_idx);
+ hinic3_hwif_write_reg(hwif, reg, lower_32_bits(page_paddr));
+ }
+
+ init_val = HINIC3_EQ_WRAPPED(eq);
+ eq_elements_init(eq, init_val);
+
+ return 0;
+}
+
+static void eq_calc_page_size_and_num(struct hinic3_eq *eq, u32 elem_size)
+{
+ u32 max_pages, min_page_size, page_size, total_size;
+
+ /* No need for complicated arithmetic. All values must be power of 2.
+ * Multiplications give power of 2 and divisions give power of 2 without
+ * remainder.
+ */
+ max_pages = HINIC3_EQ_MAX_PAGES(eq);
+ min_page_size = HINIC3_MIN_PAGE_SIZE;
+ total_size = eq->eq_len * elem_size;
+
+ if (total_size <= max_pages * min_page_size)
+ page_size = min_page_size;
+ else
+ page_size = total_size / max_pages;
+
+ hinic3_queue_pages_init(&eq->qpages, eq->eq_len, page_size, elem_size);
+}
+
+static int request_eq_irq(struct hinic3_eq *eq)
+{
+ int err;
+
+ if (eq->type == HINIC3_AEQ) {
+ INIT_WORK(&eq->aeq_work, aeq_irq_work);
+ snprintf(eq->irq_name, sizeof(eq->irq_name),
+ "hinic3_aeq%u@pci:%s", eq->q_id,
+ pci_name(eq->hwdev->pdev));
+ err = request_irq(eq->irq_id, aeq_interrupt, 0,
+ eq->irq_name, eq);
+ } else {
+ snprintf(eq->irq_name, sizeof(eq->irq_name),
+ "hinic3_ceq%u@pci:%s", eq->q_id,
+ pci_name(eq->hwdev->pdev));
+ err = request_threaded_irq(eq->irq_id, NULL, ceq_interrupt,
+ IRQF_ONESHOT, eq->irq_name, eq);
+ }
+
+ return err;
+}
+
+static void reset_eq(struct hinic3_eq *eq)
+{
+ /* clear eq_len to force eqe drop in hardware */
+ if (eq->type == HINIC3_AEQ)
+ hinic3_hwif_write_reg(eq->hwdev->hwif,
+ HINIC3_CSR_AEQ_CTRL_1_ADDR, 0);
+ else
+ hinic3_set_ceq_ctrl_reg(eq->hwdev, eq->q_id, 0, 0);
+
+ hinic3_hwif_write_reg(eq->hwdev->hwif, EQ_PROD_IDX_REG_ADDR(eq), 0);
+}
+
+static int init_eq(struct hinic3_eq *eq, struct hinic3_hwdev *hwdev, u16 q_id,
+ u32 q_len, enum hinic3_eq_type type,
+ struct msix_entry *msix_entry)
+{
+ u32 elem_size;
+ int err;
+
+ eq->hwdev = hwdev;
+ eq->q_id = q_id;
+ eq->type = type;
+ eq->eq_len = q_len;
+
+ /* Indirect access should set q_id first */
+ hinic3_hwif_write_reg(hwdev->hwif, HINIC3_EQ_INDIR_IDX_ADDR(eq->type),
+ eq->q_id);
+
+ reset_eq(eq);
+
+ eq->cons_idx = 0;
+ eq->wrapped = 0;
+
+ elem_size = (type == HINIC3_AEQ) ? HINIC3_AEQE_SIZE : HINIC3_CEQE_SIZE;
+ eq_calc_page_size_and_num(eq, elem_size);
+
+ err = alloc_eq_pages(eq);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to allocate pages for eq\n");
+ return err;
+ }
+
+ eq->msix_entry_idx = msix_entry->entry;
+ eq->irq_id = msix_entry->vector;
+
+ err = set_eq_ctrls(eq);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to set ctrls for eq\n");
+ goto err_free_queue_pages;
+ }
+
+ set_eq_cons_idx(eq, HINIC3_EQ_ARMED);
+
+ err = request_eq_irq(eq);
+ if (err) {
+ dev_err(hwdev->dev,
+ "Failed to request irq for the eq, err: %d\n", err);
+ goto err_free_queue_pages;
+ }
+
+ hinic3_set_msix_state(hwdev, eq->msix_entry_idx, HINIC3_MSIX_DISABLE);
+
+ return 0;
+
+err_free_queue_pages:
+ hinic3_queue_pages_free(hwdev, &eq->qpages);
+
+ return err;
+}
+
+static void remove_eq(struct hinic3_eq *eq)
+{
+ hinic3_set_msix_state(eq->hwdev, eq->msix_entry_idx,
+ HINIC3_MSIX_DISABLE);
+ free_irq(eq->irq_id, eq);
+ /* Indirect access should set q_id first */
+ hinic3_hwif_write_reg(eq->hwdev->hwif,
+ HINIC3_EQ_INDIR_IDX_ADDR(eq->type),
+ eq->q_id);
+
+ if (eq->type == HINIC3_AEQ) {
+ disable_work_sync(&eq->aeq_work);
+ /* clear eq_len to avoid hw access host memory */
+ hinic3_hwif_write_reg(eq->hwdev->hwif,
+ HINIC3_CSR_AEQ_CTRL_1_ADDR, 0);
+ } else {
+ hinic3_set_ceq_ctrl_reg(eq->hwdev, eq->q_id, 0, 0);
+ }
+
+ /* update consumer index to avoid invalid interrupt */
+ eq->cons_idx = hinic3_hwif_read_reg(eq->hwdev->hwif,
+ EQ_PROD_IDX_REG_ADDR(eq));
+ set_eq_cons_idx(eq, HINIC3_EQ_NOT_ARMED);
+ hinic3_queue_pages_free(eq->hwdev, &eq->qpages);
+}
+
+int hinic3_aeqs_init(struct hinic3_hwdev *hwdev, u16 num_aeqs,
+ struct msix_entry *msix_entries)
+{
+ struct hinic3_aeqs *aeqs;
+ u16 q_id;
+ int err;
+
+ aeqs = kzalloc(sizeof(*aeqs), GFP_KERNEL);
+ if (!aeqs)
+ return -ENOMEM;
+
+ hwdev->aeqs = aeqs;
+ aeqs->hwdev = hwdev;
+ aeqs->num_aeqs = num_aeqs;
+ aeqs->workq = alloc_workqueue(HINIC3_EQS_WQ_NAME, WQ_MEM_RECLAIM,
+ HINIC3_MAX_AEQS);
+ if (!aeqs->workq) {
+ dev_err(hwdev->dev, "Failed to initialize aeq workqueue\n");
+ err = -ENOMEM;
+ goto err_free_aeqs;
+ }
+
+ for (q_id = 0; q_id < num_aeqs; q_id++) {
+ err = init_eq(&aeqs->aeq[q_id], hwdev, q_id,
+ HINIC3_DEFAULT_AEQ_LEN, HINIC3_AEQ,
+ &msix_entries[q_id]);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init aeq %u\n",
+ q_id);
+ goto err_remove_eqs;
+ }
+ }
+ for (q_id = 0; q_id < num_aeqs; q_id++)
+ hinic3_set_msix_state(hwdev, aeqs->aeq[q_id].msix_entry_idx,
+ HINIC3_MSIX_ENABLE);
+
+ return 0;
+
+err_remove_eqs:
+ while (q_id > 0) {
+ q_id--;
+ remove_eq(&aeqs->aeq[q_id]);
+ }
+
+ destroy_workqueue(aeqs->workq);
+
+err_free_aeqs:
+ kfree(aeqs);
+
+ return err;
+}
+
+void hinic3_aeqs_free(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_aeqs *aeqs = hwdev->aeqs;
+ enum hinic3_aeq_type aeq_event;
+ struct hinic3_eq *eq;
+ u16 q_id;
+
+ for (q_id = 0; q_id < aeqs->num_aeqs; q_id++) {
+ eq = aeqs->aeq + q_id;
+ remove_eq(eq);
+ hinic3_free_irq(hwdev, eq->irq_id);
+ }
+
+ for (aeq_event = 0; aeq_event < HINIC3_MAX_AEQ_EVENTS; aeq_event++)
+ hinic3_aeq_unregister_cb(hwdev, aeq_event);
+
+ destroy_workqueue(aeqs->workq);
+
+ kfree(aeqs);
+}
+
+int hinic3_ceqs_init(struct hinic3_hwdev *hwdev, u16 num_ceqs,
+ struct msix_entry *msix_entries)
+{
+ struct hinic3_ceqs *ceqs;
+ u16 q_id;
+ int err;
+
+ ceqs = kzalloc(sizeof(*ceqs), GFP_KERNEL);
+ if (!ceqs)
+ return -ENOMEM;
+
+ hwdev->ceqs = ceqs;
+ ceqs->hwdev = hwdev;
+ ceqs->num_ceqs = num_ceqs;
+
+ for (q_id = 0; q_id < num_ceqs; q_id++) {
+ err = init_eq(&ceqs->ceq[q_id], hwdev, q_id,
+ HINIC3_DEFAULT_CEQ_LEN, HINIC3_CEQ,
+ &msix_entries[q_id]);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init ceq %u\n",
+ q_id);
+ goto err_free_ceqs;
+ }
+ }
+ for (q_id = 0; q_id < num_ceqs; q_id++)
+ hinic3_set_msix_state(hwdev, ceqs->ceq[q_id].msix_entry_idx,
+ HINIC3_MSIX_ENABLE);
+
+ return 0;
+
+err_free_ceqs:
+ while (q_id > 0) {
+ q_id--;
+ remove_eq(&ceqs->ceq[q_id]);
+ }
+
+ kfree(ceqs);
+
+ return err;
+}
+
+void hinic3_ceqs_free(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_ceqs *ceqs = hwdev->ceqs;
+ enum hinic3_ceq_event ceq_event;
+ struct hinic3_eq *eq;
+ u16 q_id;
+
+ for (q_id = 0; q_id < ceqs->num_ceqs; q_id++) {
+ eq = ceqs->ceq + q_id;
+ remove_eq(eq);
+ hinic3_free_irq(hwdev, eq->irq_id);
+ }
+
+ for (ceq_event = 0; ceq_event < HINIC3_MAX_CEQ_EVENTS; ceq_event++)
+ hinic3_ceq_unregister_cb(hwdev, ceq_event);
+
+ kfree(ceqs);
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_eqs.h b/drivers/net/ethernet/huawei/hinic3/hinic3_eqs.h
new file mode 100644
index 000000000000..005a6e0745b3
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_eqs.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_EQS_H_
+#define _HINIC3_EQS_H_
+
+#include <linux/interrupt.h>
+
+#include "hinic3_hw_cfg.h"
+#include "hinic3_queue_common.h"
+
+#define HINIC3_MAX_AEQS 4
+#define HINIC3_MAX_CEQS 32
+
+#define HINIC3_AEQ_MAX_PAGES 4
+#define HINIC3_CEQ_MAX_PAGES 8
+
+#define HINIC3_AEQE_SIZE 64
+#define HINIC3_CEQE_SIZE 4
+
+#define HINIC3_AEQE_DESC_SIZE 4
+#define HINIC3_AEQE_DATA_SIZE (HINIC3_AEQE_SIZE - HINIC3_AEQE_DESC_SIZE)
+
+#define HINIC3_DEFAULT_AEQ_LEN 0x10000
+#define HINIC3_DEFAULT_CEQ_LEN 0x10000
+
+#define HINIC3_EQ_IRQ_NAME_LEN 64
+
+#define HINIC3_EQ_USLEEP_LOW_BOUND 900
+#define HINIC3_EQ_USLEEP_HIGH_BOUND 1000
+
+enum hinic3_eq_type {
+ HINIC3_AEQ = 0,
+ HINIC3_CEQ = 1,
+};
+
+enum hinic3_eq_intr_mode {
+ HINIC3_INTR_MODE_ARMED = 0,
+ HINIC3_INTR_MODE_ALWAYS = 1,
+};
+
+enum hinic3_eq_ci_arm_state {
+ HINIC3_EQ_NOT_ARMED = 0,
+ HINIC3_EQ_ARMED = 1,
+};
+
+struct hinic3_eq {
+ struct hinic3_hwdev *hwdev;
+ struct hinic3_queue_pages qpages;
+ u16 q_id;
+ enum hinic3_eq_type type;
+ u32 eq_len;
+ u32 cons_idx;
+ u8 wrapped;
+ u32 irq_id;
+ u16 msix_entry_idx;
+ char irq_name[HINIC3_EQ_IRQ_NAME_LEN];
+ struct work_struct aeq_work;
+};
+
+struct hinic3_aeq_elem {
+ u8 aeqe_data[HINIC3_AEQE_DATA_SIZE];
+ __be32 desc;
+};
+
+enum hinic3_aeq_type {
+ HINIC3_HW_INTER_INT = 0,
+ HINIC3_MBX_FROM_FUNC = 1,
+ HINIC3_MSG_FROM_FW = 2,
+ HINIC3_MAX_AEQ_EVENTS = 6,
+};
+
+typedef void (*hinic3_aeq_event_cb)(struct hinic3_hwdev *hwdev, u8 *data,
+ u8 size);
+
+struct hinic3_aeqs {
+ struct hinic3_hwdev *hwdev;
+ hinic3_aeq_event_cb aeq_cb[HINIC3_MAX_AEQ_EVENTS];
+ struct hinic3_eq aeq[HINIC3_MAX_AEQS];
+ u16 num_aeqs;
+ struct workqueue_struct *workq;
+ /* lock for aeq event flag */
+ spinlock_t aeq_lock;
+};
+
+enum hinic3_ceq_event {
+ HINIC3_CMDQ = 3,
+ HINIC3_MAX_CEQ_EVENTS = 6,
+};
+
+typedef void (*hinic3_ceq_event_cb)(struct hinic3_hwdev *hwdev,
+ __le32 ceqe_data);
+
+struct hinic3_ceqs {
+ struct hinic3_hwdev *hwdev;
+
+ hinic3_ceq_event_cb ceq_cb[HINIC3_MAX_CEQ_EVENTS];
+
+ struct hinic3_eq ceq[HINIC3_MAX_CEQS];
+ u16 num_ceqs;
+ /* lock for ceq event flag */
+ spinlock_t ceq_lock;
+};
+
+int hinic3_aeqs_init(struct hinic3_hwdev *hwdev, u16 num_aeqs,
+ struct msix_entry *msix_entries);
+void hinic3_aeqs_free(struct hinic3_hwdev *hwdev);
+int hinic3_aeq_register_cb(struct hinic3_hwdev *hwdev,
+ enum hinic3_aeq_type event,
+ hinic3_aeq_event_cb hwe_cb);
+void hinic3_aeq_unregister_cb(struct hinic3_hwdev *hwdev,
+ enum hinic3_aeq_type event);
+int hinic3_ceqs_init(struct hinic3_hwdev *hwdev, u16 num_ceqs,
+ struct msix_entry *msix_entries);
+void hinic3_ceqs_free(struct hinic3_hwdev *hwdev);
+int hinic3_ceq_register_cb(struct hinic3_hwdev *hwdev,
+ enum hinic3_ceq_event event,
+ hinic3_ceq_event_cb callback);
+void hinic3_ceq_unregister_cb(struct hinic3_hwdev *hwdev,
+ enum hinic3_ceq_event event);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.c b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.c
index 87d9450c30ca..7827c1f626db 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.c
@@ -8,6 +8,217 @@
#include "hinic3_hwif.h"
#include "hinic3_mbox.h"
+#define HINIC3_CFG_MAX_QP 256
+
+static void hinic3_parse_pub_res_cap(struct hinic3_hwdev *hwdev,
+ struct hinic3_dev_cap *cap,
+ const struct cfg_cmd_dev_cap *dev_cap,
+ enum hinic3_func_type type)
+{
+ cap->port_id = dev_cap->port_id;
+ cap->supp_svcs_bitmap = dev_cap->svc_cap_en;
+}
+
+static void hinic3_parse_l2nic_res_cap(struct hinic3_hwdev *hwdev,
+ struct hinic3_dev_cap *cap,
+ const struct cfg_cmd_dev_cap *dev_cap,
+ enum hinic3_func_type type)
+{
+ struct hinic3_nic_service_cap *nic_svc_cap = &cap->nic_svc_cap;
+
+ nic_svc_cap->max_sqs = min(dev_cap->nic_max_sq_id + 1,
+ HINIC3_CFG_MAX_QP);
+}
+
+static void hinic3_parse_dev_cap(struct hinic3_hwdev *hwdev,
+ const struct cfg_cmd_dev_cap *dev_cap,
+ enum hinic3_func_type type)
+{
+ struct hinic3_dev_cap *cap = &hwdev->cfg_mgmt->cap;
+
+ /* Public resource */
+ hinic3_parse_pub_res_cap(hwdev, cap, dev_cap, type);
+
+ /* L2 NIC resource */
+ if (hinic3_support_nic(hwdev))
+ hinic3_parse_l2nic_res_cap(hwdev, cap, dev_cap, type);
+}
+
+static int get_cap_from_fw(struct hinic3_hwdev *hwdev,
+ enum hinic3_func_type type)
+{
+ struct mgmt_msg_params msg_params = {};
+ struct cfg_cmd_dev_cap dev_cap = {};
+ int err;
+
+ dev_cap.func_id = hinic3_global_func_id(hwdev);
+
+ mgmt_msg_params_init_default(&msg_params, &dev_cap, sizeof(dev_cap));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_CFGM,
+ CFG_CMD_GET_DEV_CAP, &msg_params);
+ if (err || dev_cap.head.status) {
+ dev_err(hwdev->dev,
+ "Failed to get capability from FW, err: %d, status: 0x%x\n",
+ err, dev_cap.head.status);
+ return -EIO;
+ }
+
+ hinic3_parse_dev_cap(hwdev, &dev_cap, type);
+
+ return 0;
+}
+
+static int hinic3_init_irq_info(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_cfg_mgmt_info *cfg_mgmt = hwdev->cfg_mgmt;
+ struct hinic3_hwif *hwif = hwdev->hwif;
+ u16 intr_num = hwif->attr.num_irqs;
+ struct hinic3_irq_info *irq_info;
+ u16 intr_needed;
+
+ intr_needed = hwif->attr.msix_flex_en ? (hwif->attr.num_aeqs +
+ hwif->attr.num_ceqs + hwif->attr.num_sq) : intr_num;
+ if (intr_needed > intr_num) {
+ dev_warn(hwdev->dev, "Irq num cfg %d is less than the needed irq num %d msix_flex_en %d\n",
+ intr_num, intr_needed, hwdev->hwif->attr.msix_flex_en);
+ intr_needed = intr_num;
+ }
+
+ irq_info = &cfg_mgmt->irq_info;
+ irq_info->irq = kcalloc(intr_num, sizeof(struct hinic3_irq),
+ GFP_KERNEL);
+ if (!irq_info->irq)
+ return -ENOMEM;
+
+ irq_info->num_irq_hw = intr_needed;
+ mutex_init(&irq_info->irq_mutex);
+
+ return 0;
+}
+
+static int hinic3_init_irq_alloc_info(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_cfg_mgmt_info *cfg_mgmt = hwdev->cfg_mgmt;
+ struct hinic3_irq *irq = cfg_mgmt->irq_info.irq;
+ u16 nreq = cfg_mgmt->irq_info.num_irq_hw;
+ struct pci_dev *pdev = hwdev->pdev;
+ int actual_irq;
+ u16 i;
+
+ actual_irq = pci_alloc_irq_vectors(pdev, 2, nreq, PCI_IRQ_MSIX);
+ if (actual_irq < 0) {
+ dev_err(hwdev->dev, "Alloc msix entries with threshold 2 failed. actual_irq: %d\n",
+ actual_irq);
+ return -ENOMEM;
+ }
+
+ nreq = actual_irq;
+ cfg_mgmt->irq_info.num_irq = nreq;
+
+ for (i = 0; i < nreq; ++i) {
+ irq[i].msix_entry_idx = i;
+ irq[i].irq_id = pci_irq_vector(pdev, i);
+ irq[i].allocated = false;
+ }
+
+ return 0;
+}
+
+int hinic3_init_cfg_mgmt(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_cfg_mgmt_info *cfg_mgmt;
+ int err;
+
+ cfg_mgmt = kzalloc(sizeof(*cfg_mgmt), GFP_KERNEL);
+ if (!cfg_mgmt)
+ return -ENOMEM;
+
+ hwdev->cfg_mgmt = cfg_mgmt;
+
+ err = hinic3_init_irq_info(hwdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init hinic3_irq_mgmt_info, err: %d\n",
+ err);
+ goto err_free_cfg_mgmt;
+ }
+
+ err = hinic3_init_irq_alloc_info(hwdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init hinic3_irq_info, err: %d\n",
+ err);
+ goto err_free_irq_info;
+ }
+
+ return 0;
+
+err_free_irq_info:
+ kfree(cfg_mgmt->irq_info.irq);
+ cfg_mgmt->irq_info.irq = NULL;
+err_free_cfg_mgmt:
+ kfree(cfg_mgmt);
+
+ return err;
+}
+
+void hinic3_free_cfg_mgmt(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_cfg_mgmt_info *cfg_mgmt = hwdev->cfg_mgmt;
+
+ pci_free_irq_vectors(hwdev->pdev);
+ kfree(cfg_mgmt->irq_info.irq);
+ cfg_mgmt->irq_info.irq = NULL;
+ kfree(cfg_mgmt);
+}
+
+int hinic3_alloc_irqs(struct hinic3_hwdev *hwdev, u16 num,
+ struct msix_entry *alloc_arr, u16 *act_num)
+{
+ struct hinic3_irq_info *irq_info;
+ struct hinic3_irq *curr;
+ u16 i, found = 0;
+
+ irq_info = &hwdev->cfg_mgmt->irq_info;
+ mutex_lock(&irq_info->irq_mutex);
+ for (i = 0; i < irq_info->num_irq && found < num; i++) {
+ curr = irq_info->irq + i;
+ if (curr->allocated)
+ continue;
+ curr->allocated = true;
+ alloc_arr[found].vector = curr->irq_id;
+ alloc_arr[found].entry = curr->msix_entry_idx;
+ found++;
+ }
+ mutex_unlock(&irq_info->irq_mutex);
+
+ *act_num = found;
+
+ return found == 0 ? -ENOMEM : 0;
+}
+
+void hinic3_free_irq(struct hinic3_hwdev *hwdev, u32 irq_id)
+{
+ struct hinic3_irq_info *irq_info;
+ struct hinic3_irq *curr;
+ u16 i;
+
+ irq_info = &hwdev->cfg_mgmt->irq_info;
+ mutex_lock(&irq_info->irq_mutex);
+ for (i = 0; i < irq_info->num_irq; i++) {
+ curr = irq_info->irq + i;
+ if (curr->irq_id == irq_id) {
+ curr->allocated = false;
+ break;
+ }
+ }
+ mutex_unlock(&irq_info->irq_mutex);
+}
+
+int hinic3_init_capability(struct hinic3_hwdev *hwdev)
+{
+ return get_cap_from_fw(hwdev, HINIC3_FUNC_TYPE_VF);
+}
+
bool hinic3_support_nic(struct hinic3_hwdev *hwdev)
{
return hwdev->cfg_mgmt->cap.supp_svcs_bitmap &
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.h
index e017b1ae9f05..58806199bf54 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.h
@@ -42,10 +42,14 @@ struct hinic3_cfg_mgmt_info {
struct hinic3_dev_cap cap;
};
+int hinic3_init_cfg_mgmt(struct hinic3_hwdev *hwdev);
+void hinic3_free_cfg_mgmt(struct hinic3_hwdev *hwdev);
+
int hinic3_alloc_irqs(struct hinic3_hwdev *hwdev, u16 num,
struct msix_entry *alloc_arr, u16 *act_num);
void hinic3_free_irq(struct hinic3_hwdev *hwdev, u32 irq_id);
+int hinic3_init_capability(struct hinic3_hwdev *hwdev);
bool hinic3_support_nic(struct hinic3_hwdev *hwdev);
u16 hinic3_func_max_qnum(struct hinic3_hwdev *hwdev);
u8 hinic3_physical_port_id(struct hinic3_hwdev *hwdev);
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.c b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.c
index 434696ce7dc2..89638813df40 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.c
@@ -3,11 +3,43 @@
#include <linux/delay.h>
+#include "hinic3_cmdq.h"
#include "hinic3_hw_comm.h"
#include "hinic3_hwdev.h"
#include "hinic3_hwif.h"
#include "hinic3_mbox.h"
+int hinic3_set_interrupt_cfg_direct(struct hinic3_hwdev *hwdev,
+ const struct hinic3_interrupt_info *info)
+{
+ struct comm_cmd_cfg_msix_ctrl_reg msix_cfg = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ msix_cfg.func_id = hinic3_global_func_id(hwdev);
+ msix_cfg.msix_index = info->msix_index;
+ msix_cfg.opcode = MGMT_MSG_CMD_OP_SET;
+
+ msix_cfg.lli_credit_cnt = info->lli_credit_limit;
+ msix_cfg.lli_timer_cnt = info->lli_timer_cfg;
+ msix_cfg.pending_cnt = info->pending_limit;
+ msix_cfg.coalesce_timer_cnt = info->coalesc_timer_cfg;
+ msix_cfg.resend_timer_cnt = info->resend_timer_cfg;
+
+ mgmt_msg_params_init_default(&msg_params, &msix_cfg, sizeof(msix_cfg));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
+ COMM_CMD_CFG_MSIX_CTRL_REG, &msg_params);
+ if (err || msix_cfg.head.status) {
+ dev_err(hwdev->dev,
+ "Failed to set interrupt config, err: %d, status: 0x%x\n",
+ err, msix_cfg.head.status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int hinic3_func_reset(struct hinic3_hwdev *hwdev, u16 func_id, u64 reset_flag)
{
struct comm_cmd_func_reset func_reset = {};
@@ -30,3 +62,365 @@ int hinic3_func_reset(struct hinic3_hwdev *hwdev, u16 func_id, u64 reset_flag)
return 0;
}
+
+static int hinic3_comm_features_nego(struct hinic3_hwdev *hwdev, u8 opcode,
+ u64 *s_feature, u16 size)
+{
+ struct comm_cmd_feature_nego feature_nego = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ feature_nego.func_id = hinic3_global_func_id(hwdev);
+ feature_nego.opcode = opcode;
+ if (opcode == MGMT_MSG_CMD_OP_SET)
+ memcpy(feature_nego.s_feature, s_feature,
+ array_size(size, sizeof(u64)));
+
+ mgmt_msg_params_init_default(&msg_params, &feature_nego,
+ sizeof(feature_nego));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
+ COMM_CMD_FEATURE_NEGO, &msg_params);
+ if (err || feature_nego.head.status) {
+ dev_err(hwdev->dev, "Failed to negotiate feature, err: %d, status: 0x%x\n",
+ err, feature_nego.head.status);
+ return -EINVAL;
+ }
+
+ if (opcode == MGMT_MSG_CMD_OP_GET)
+ memcpy(s_feature, feature_nego.s_feature,
+ array_size(size, sizeof(u64)));
+
+ return 0;
+}
+
+int hinic3_get_comm_features(struct hinic3_hwdev *hwdev, u64 *s_feature,
+ u16 size)
+{
+ return hinic3_comm_features_nego(hwdev, MGMT_MSG_CMD_OP_GET, s_feature,
+ size);
+}
+
+int hinic3_set_comm_features(struct hinic3_hwdev *hwdev, u64 *s_feature,
+ u16 size)
+{
+ return hinic3_comm_features_nego(hwdev, MGMT_MSG_CMD_OP_SET, s_feature,
+ size);
+}
+
+int hinic3_get_global_attr(struct hinic3_hwdev *hwdev,
+ struct comm_global_attr *attr)
+{
+ struct comm_cmd_get_glb_attr get_attr = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ mgmt_msg_params_init_default(&msg_params, &get_attr, sizeof(get_attr));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
+ COMM_CMD_GET_GLOBAL_ATTR, &msg_params);
+ if (err || get_attr.head.status) {
+ dev_err(hwdev->dev,
+ "Failed to get global attribute, err: %d, status: 0x%x\n",
+ err, get_attr.head.status);
+ return -EIO;
+ }
+
+ memcpy(attr, &get_attr.attr, sizeof(*attr));
+
+ return 0;
+}
+
+int hinic3_set_func_svc_used_state(struct hinic3_hwdev *hwdev, u16 svc_type,
+ u8 state)
+{
+ struct comm_cmd_set_func_svc_used_state used_state = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ used_state.func_id = hinic3_global_func_id(hwdev);
+ used_state.svc_type = svc_type;
+ used_state.used_state = state;
+
+ mgmt_msg_params_init_default(&msg_params, &used_state,
+ sizeof(used_state));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
+ COMM_CMD_SET_FUNC_SVC_USED_STATE,
+ &msg_params);
+ if (err || used_state.head.status) {
+ dev_err(hwdev->dev,
+ "Failed to set func service used state, err: %d, status: 0x%x\n",
+ err, used_state.head.status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int hinic3_set_dma_attr_tbl(struct hinic3_hwdev *hwdev, u8 entry_idx, u8 st,
+ u8 at, u8 ph, u8 no_snooping, u8 tph_en)
+{
+ struct comm_cmd_set_dma_attr dma_attr = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ dma_attr.func_id = hinic3_global_func_id(hwdev);
+ dma_attr.entry_idx = entry_idx;
+ dma_attr.st = st;
+ dma_attr.at = at;
+ dma_attr.ph = ph;
+ dma_attr.no_snooping = no_snooping;
+ dma_attr.tph_en = tph_en;
+
+ mgmt_msg_params_init_default(&msg_params, &dma_attr, sizeof(dma_attr));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
+ COMM_CMD_SET_DMA_ATTR, &msg_params);
+ if (err || dma_attr.head.status) {
+ dev_err(hwdev->dev, "Failed to set dma attr, err: %d, status: 0x%x\n",
+ err, dma_attr.head.status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int hinic3_set_wq_page_size(struct hinic3_hwdev *hwdev, u16 func_idx,
+ u32 page_size)
+{
+ struct comm_cmd_cfg_wq_page_size page_size_info = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ page_size_info.func_id = func_idx;
+ page_size_info.page_size = ilog2(page_size / HINIC3_MIN_PAGE_SIZE);
+ page_size_info.opcode = MGMT_MSG_CMD_OP_SET;
+
+ mgmt_msg_params_init_default(&msg_params, &page_size_info,
+ sizeof(page_size_info));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
+ COMM_CMD_CFG_PAGESIZE, &msg_params);
+ if (err || page_size_info.head.status) {
+ dev_err(hwdev->dev,
+ "Failed to set wq page size, err: %d, status: 0x%x\n",
+ err, page_size_info.head.status);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int hinic3_set_cmdq_depth(struct hinic3_hwdev *hwdev, u16 cmdq_depth)
+{
+ struct comm_cmd_set_root_ctxt root_ctxt = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ root_ctxt.func_id = hinic3_global_func_id(hwdev);
+
+ root_ctxt.set_cmdq_depth = 1;
+ root_ctxt.cmdq_depth = ilog2(cmdq_depth);
+
+ mgmt_msg_params_init_default(&msg_params, &root_ctxt,
+ sizeof(root_ctxt));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
+ COMM_CMD_SET_VAT, &msg_params);
+ if (err || root_ctxt.head.status) {
+ dev_err(hwdev->dev,
+ "Failed to set cmdq depth, err: %d, status: 0x%x\n",
+ err, root_ctxt.head.status);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+#define HINIC3_WAIT_CMDQ_IDLE_TIMEOUT 5000
+
+static enum hinic3_wait_return check_cmdq_stop_handler(void *priv_data)
+{
+ struct hinic3_hwdev *hwdev = priv_data;
+ enum hinic3_cmdq_type cmdq_type;
+ struct hinic3_cmdqs *cmdqs;
+
+ cmdqs = hwdev->cmdqs;
+ for (cmdq_type = 0; cmdq_type < cmdqs->cmdq_num; cmdq_type++) {
+ if (!hinic3_cmdq_idle(&cmdqs->cmdq[cmdq_type]))
+ return HINIC3_WAIT_PROCESS_WAITING;
+ }
+
+ return HINIC3_WAIT_PROCESS_CPL;
+}
+
+static int wait_cmdq_stop(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_cmdqs *cmdqs = hwdev->cmdqs;
+ enum hinic3_cmdq_type cmdq_type;
+ int err;
+
+ if (!(cmdqs->status & HINIC3_CMDQ_ENABLE))
+ return 0;
+
+ cmdqs->status &= ~HINIC3_CMDQ_ENABLE;
+ err = hinic3_wait_for_timeout(hwdev, check_cmdq_stop_handler,
+ HINIC3_WAIT_CMDQ_IDLE_TIMEOUT,
+ USEC_PER_MSEC);
+
+ if (err)
+ goto err_reenable_cmdq;
+
+ return 0;
+
+err_reenable_cmdq:
+ for (cmdq_type = 0; cmdq_type < cmdqs->cmdq_num; cmdq_type++) {
+ if (!hinic3_cmdq_idle(&cmdqs->cmdq[cmdq_type]))
+ dev_err(hwdev->dev, "Cmdq %d is busy\n", cmdq_type);
+ }
+ cmdqs->status |= HINIC3_CMDQ_ENABLE;
+
+ return err;
+}
+
+int hinic3_func_rx_tx_flush(struct hinic3_hwdev *hwdev)
+{
+ struct comm_cmd_clear_resource clear_db = {};
+ struct comm_cmd_clear_resource clr_res = {};
+ struct hinic3_hwif *hwif = hwdev->hwif;
+ struct mgmt_msg_params msg_params = {};
+ int ret = 0;
+ int err;
+
+ err = wait_cmdq_stop(hwdev);
+ if (err) {
+ dev_warn(hwdev->dev, "CMDQ is still working, CMDQ timeout value is unreasonable\n");
+ ret = err;
+ }
+
+ hinic3_toggle_doorbell(hwif, DISABLE_DOORBELL);
+
+ clear_db.func_id = hwif->attr.func_global_idx;
+ mgmt_msg_params_init_default(&msg_params, &clear_db, sizeof(clear_db));
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
+ COMM_CMD_FLUSH_DOORBELL, &msg_params);
+ if (err || clear_db.head.status) {
+ dev_warn(hwdev->dev, "Failed to flush doorbell, err: %d, status: 0x%x\n",
+ err, clear_db.head.status);
+ if (err)
+ ret = err;
+ else
+ ret = -EFAULT;
+ }
+
+ clr_res.func_id = hwif->attr.func_global_idx;
+ msg_params.buf_in = &clr_res;
+ msg_params.in_size = sizeof(clr_res);
+ err = hinic3_send_mbox_to_mgmt_no_ack(hwdev, MGMT_MOD_COMM,
+ COMM_CMD_START_FLUSH,
+ &msg_params);
+ if (err) {
+ dev_warn(hwdev->dev, "Failed to notice flush message, err: %d\n",
+ err);
+ ret = err;
+ }
+
+ hinic3_toggle_doorbell(hwif, ENABLE_DOORBELL);
+
+ err = hinic3_reinit_cmdq_ctxts(hwdev);
+ if (err) {
+ dev_warn(hwdev->dev, "Failed to reinit cmdq\n");
+ ret = err;
+ }
+
+ return ret;
+}
+
+static int get_hw_rx_buf_size_idx(int rx_buf_sz, u16 *buf_sz_idx)
+{
+ /* Supported RX buffer sizes in bytes. Configured by array index. */
+ static const int supported_sizes[16] = {
+ [0] = 32, [1] = 64, [2] = 96, [3] = 128,
+ [4] = 192, [5] = 256, [6] = 384, [7] = 512,
+ [8] = 768, [9] = 1024, [10] = 1536, [11] = 2048,
+ [12] = 3072, [13] = 4096, [14] = 8192, [15] = 16384,
+ };
+ u16 idx;
+
+ /* Scan from biggest to smallest. Choose supported size that is equal or
+ * smaller. For smaller value HW will under-utilize posted buffers. For
+ * bigger value HW may overrun posted buffers.
+ */
+ idx = ARRAY_SIZE(supported_sizes);
+ while (idx > 0) {
+ idx--;
+ if (supported_sizes[idx] <= rx_buf_sz) {
+ *buf_sz_idx = idx;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+int hinic3_set_root_ctxt(struct hinic3_hwdev *hwdev, u32 rq_depth, u32 sq_depth,
+ int rx_buf_sz)
+{
+ struct comm_cmd_set_root_ctxt root_ctxt = {};
+ struct mgmt_msg_params msg_params = {};
+ u16 buf_sz_idx;
+ int err;
+
+ err = get_hw_rx_buf_size_idx(rx_buf_sz, &buf_sz_idx);
+ if (err)
+ return err;
+
+ root_ctxt.func_id = hinic3_global_func_id(hwdev);
+
+ root_ctxt.set_cmdq_depth = 0;
+ root_ctxt.cmdq_depth = 0;
+
+ root_ctxt.lro_en = 1;
+
+ root_ctxt.rq_depth = ilog2(rq_depth);
+ root_ctxt.rx_buf_sz = buf_sz_idx;
+ root_ctxt.sq_depth = ilog2(sq_depth);
+
+ mgmt_msg_params_init_default(&msg_params, &root_ctxt,
+ sizeof(root_ctxt));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
+ COMM_CMD_SET_VAT, &msg_params);
+ if (err || root_ctxt.head.status) {
+ dev_err(hwdev->dev,
+ "Failed to set root context, err: %d, status: 0x%x\n",
+ err, root_ctxt.head.status);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int hinic3_clean_root_ctxt(struct hinic3_hwdev *hwdev)
+{
+ struct comm_cmd_set_root_ctxt root_ctxt = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ root_ctxt.func_id = hinic3_global_func_id(hwdev);
+
+ mgmt_msg_params_init_default(&msg_params, &root_ctxt,
+ sizeof(root_ctxt));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM,
+ COMM_CMD_SET_VAT, &msg_params);
+ if (err || root_ctxt.head.status) {
+ dev_err(hwdev->dev,
+ "Failed to set root context, err: %d, status: 0x%x\n",
+ err, root_ctxt.head.status);
+ return -EFAULT;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.h
index c33a1c77da9c..304f5691f0c2 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.h
@@ -8,6 +8,40 @@
struct hinic3_hwdev;
+#define HINIC3_WQ_PAGE_SIZE_ORDER 8
+
+struct hinic3_interrupt_info {
+ u32 lli_set;
+ u32 interrupt_coalesc_set;
+ u16 msix_index;
+ u8 lli_credit_limit;
+ u8 lli_timer_cfg;
+ u8 pending_limit;
+ u8 coalesc_timer_cfg;
+ u8 resend_timer_cfg;
+};
+
+int hinic3_set_interrupt_cfg_direct(struct hinic3_hwdev *hwdev,
+ const struct hinic3_interrupt_info *info);
int hinic3_func_reset(struct hinic3_hwdev *hwdev, u16 func_id, u64 reset_flag);
+int hinic3_get_comm_features(struct hinic3_hwdev *hwdev, u64 *s_feature,
+ u16 size);
+int hinic3_set_comm_features(struct hinic3_hwdev *hwdev, u64 *s_feature,
+ u16 size);
+int hinic3_get_global_attr(struct hinic3_hwdev *hwdev,
+ struct comm_global_attr *attr);
+int hinic3_set_func_svc_used_state(struct hinic3_hwdev *hwdev, u16 svc_type,
+ u8 state);
+int hinic3_set_dma_attr_tbl(struct hinic3_hwdev *hwdev, u8 entry_idx, u8 st,
+ u8 at, u8 ph, u8 no_snooping, u8 tph_en);
+
+int hinic3_set_wq_page_size(struct hinic3_hwdev *hwdev, u16 func_idx,
+ u32 page_size);
+int hinic3_set_cmdq_depth(struct hinic3_hwdev *hwdev, u16 cmdq_depth);
+int hinic3_func_rx_tx_flush(struct hinic3_hwdev *hwdev);
+int hinic3_set_root_ctxt(struct hinic3_hwdev *hwdev, u32 rq_depth, u32 sq_depth,
+ int rx_buf_sz);
+int hinic3_clean_root_ctxt(struct hinic3_hwdev *hwdev);
+
#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h
index 22c84093efa2..623cf2d14cbc 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h
@@ -51,6 +51,48 @@ static inline void mgmt_msg_params_init_default(struct mgmt_msg_params *msg_para
msg_params->timeout_ms = 0;
}
+enum cfg_cmd {
+ CFG_CMD_GET_DEV_CAP = 0,
+};
+
+/* Device capabilities, defined by hw */
+struct cfg_cmd_dev_cap {
+ struct mgmt_msg_head head;
+
+ u16 func_id;
+ u16 rsvd1;
+
+ /* Public resources */
+ u8 host_id;
+ u8 ep_id;
+ u8 er_id;
+ u8 port_id;
+
+ u16 host_total_func;
+ u8 host_pf_num;
+ u8 pf_id_start;
+ u16 host_vf_num;
+ u16 vf_id_start;
+ u8 host_oq_id_mask_val;
+ u8 timer_en;
+ u8 host_valid_bitmap;
+ u8 rsvd_host;
+
+ u16 svc_cap_en;
+ u16 max_vf;
+ u8 flexq_en;
+ u8 valid_cos_bitmap;
+ u8 port_cos_valid_bitmap;
+ u8 rsvd2[45];
+
+ /* l2nic */
+ u16 nic_max_sq_id;
+ u16 nic_max_rq_id;
+ u16 nic_default_num_queues;
+
+ u8 rsvd3[250];
+};
+
/* COMM Commands between Driver to fw */
enum comm_cmd {
/* Commands for clearing FLR and resources */
@@ -70,6 +112,20 @@ enum comm_cmd {
COMM_CMD_SET_DMA_ATTR = 25,
};
+struct comm_cmd_cfg_msix_ctrl_reg {
+ struct mgmt_msg_head head;
+ u16 func_id;
+ u8 opcode;
+ u8 rsvd1;
+ u16 msix_index;
+ u8 pending_cnt;
+ u8 coalesce_timer_cnt;
+ u8 resend_timer_cnt;
+ u8 lli_timer_cnt;
+ u8 lli_credit_cnt;
+ u8 rsvd2[5];
+};
+
enum comm_func_reset_bits {
COMM_FUNC_RESET_BIT_FLUSH = BIT(0),
COMM_FUNC_RESET_BIT_MQM = BIT(1),
@@ -84,6 +140,11 @@ enum comm_func_reset_bits {
COMM_FUNC_RESET_BIT_NIC = BIT(13),
};
+#define COMM_FUNC_RESET_FLAG \
+ (COMM_FUNC_RESET_BIT_COMM | COMM_FUNC_RESET_BIT_COMM_CMD_CH | \
+ COMM_FUNC_RESET_BIT_FLUSH | COMM_FUNC_RESET_BIT_MQM | \
+ COMM_FUNC_RESET_BIT_SMF | COMM_FUNC_RESET_BIT_PF_BW_CFG)
+
struct comm_cmd_func_reset {
struct mgmt_msg_head head;
u16 func_id;
@@ -100,6 +161,96 @@ struct comm_cmd_feature_nego {
u64 s_feature[COMM_MAX_FEATURE_QWORD];
};
+struct comm_global_attr {
+ u8 max_host_num;
+ u8 max_pf_num;
+ u16 vf_id_start;
+ /* for api cmd to mgmt cpu */
+ u8 mgmt_host_node_id;
+ u8 cmdq_num;
+ u8 rsvd1[34];
+};
+
+struct comm_cmd_get_glb_attr {
+ struct mgmt_msg_head head;
+ struct comm_global_attr attr;
+};
+
+enum comm_func_svc_type {
+ COMM_FUNC_SVC_T_COMM = 0,
+ COMM_FUNC_SVC_T_NIC = 1,
+};
+
+struct comm_cmd_set_func_svc_used_state {
+ struct mgmt_msg_head head;
+ u16 func_id;
+ u16 svc_type;
+ u8 used_state;
+ u8 rsvd[35];
+};
+
+struct comm_cmd_set_dma_attr {
+ struct mgmt_msg_head head;
+ u16 func_id;
+ u8 entry_idx;
+ u8 st;
+ u8 at;
+ u8 ph;
+ u8 no_snooping;
+ u8 tph_en;
+ u32 resv1;
+};
+
+struct comm_cmd_set_ceq_ctrl_reg {
+ struct mgmt_msg_head head;
+ u16 func_id;
+ u16 q_id;
+ u32 ctrl0;
+ u32 ctrl1;
+ u32 rsvd1;
+};
+
+struct comm_cmd_cfg_wq_page_size {
+ struct mgmt_msg_head head;
+ u16 func_id;
+ u8 opcode;
+ /* real_size=4KB*2^page_size, range(0~20) must be checked by driver */
+ u8 page_size;
+ u32 rsvd1;
+};
+
+struct comm_cmd_set_root_ctxt {
+ struct mgmt_msg_head head;
+ u16 func_id;
+ u8 set_cmdq_depth;
+ u8 cmdq_depth;
+ u16 rx_buf_sz;
+ u8 lro_en;
+ u8 rsvd1;
+ u16 sq_depth;
+ u16 rq_depth;
+ u64 rsvd2;
+};
+
+struct comm_cmdq_ctxt_info {
+ __le64 curr_wqe_page_pfn;
+ __le64 wq_block_pfn;
+};
+
+struct comm_cmd_set_cmdq_ctxt {
+ struct mgmt_msg_head head;
+ u16 func_id;
+ u8 cmdq_id;
+ u8 rsvd1[5];
+ struct comm_cmdq_ctxt_info ctxt;
+};
+
+struct comm_cmd_clear_resource {
+ struct mgmt_msg_head head;
+ u16 func_id;
+ u16 rsvd1[3];
+};
+
/* Services supported by HW. HW uses these values when delivering events.
* HW supports multiple services that are not yet supported by driver
* (e.g. RoCE).
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c b/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c
index 6e8788a64925..95a213133be9 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c
@@ -1,24 +1,557 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+#include "hinic3_cmdq.h"
+#include "hinic3_csr.h"
+#include "hinic3_eqs.h"
#include "hinic3_hw_comm.h"
#include "hinic3_hwdev.h"
#include "hinic3_hwif.h"
#include "hinic3_mbox.h"
#include "hinic3_mgmt.h"
+#define HINIC3_PCIE_SNOOP 0
+#define HINIC3_PCIE_TPH_DISABLE 0
+
+#define HINIC3_DMA_ATTR_INDIR_IDX_MASK GENMASK(9, 0)
+#define HINIC3_DMA_ATTR_INDIR_IDX_SET(val, member) \
+ FIELD_PREP(HINIC3_DMA_ATTR_INDIR_##member##_MASK, val)
+
+#define HINIC3_DMA_ATTR_ENTRY_ST_MASK GENMASK(7, 0)
+#define HINIC3_DMA_ATTR_ENTRY_AT_MASK GENMASK(9, 8)
+#define HINIC3_DMA_ATTR_ENTRY_PH_MASK GENMASK(11, 10)
+#define HINIC3_DMA_ATTR_ENTRY_NO_SNOOPING_MASK BIT(12)
+#define HINIC3_DMA_ATTR_ENTRY_TPH_EN_MASK BIT(13)
+#define HINIC3_DMA_ATTR_ENTRY_SET(val, member) \
+ FIELD_PREP(HINIC3_DMA_ATTR_ENTRY_##member##_MASK, val)
+
+#define HINIC3_PCIE_ST_DISABLE 0
+#define HINIC3_PCIE_AT_DISABLE 0
+#define HINIC3_PCIE_PH_DISABLE 0
+#define HINIC3_PCIE_MSIX_ATTR_ENTRY 0
+
+#define HINIC3_DEFAULT_EQ_MSIX_PENDING_LIMIT 0
+#define HINIC3_DEFAULT_EQ_MSIX_COALESC_TIMER_CFG 0xFF
+#define HINIC3_DEFAULT_EQ_MSIX_RESEND_TIMER_CFG 7
+
+#define HINIC3_HWDEV_WQ_NAME "hinic3_hardware"
+#define HINIC3_WQ_MAX_REQ 10
+
+enum hinic3_hwdev_init_state {
+ HINIC3_HWDEV_MBOX_INITED = 2,
+ HINIC3_HWDEV_CMDQ_INITED = 3,
+};
+
+static int hinic3_comm_aeqs_init(struct hinic3_hwdev *hwdev)
+{
+ struct msix_entry aeq_msix_entries[HINIC3_MAX_AEQS];
+ u16 num_aeqs, resp_num_irq, i;
+ int err;
+
+ num_aeqs = hwdev->hwif->attr.num_aeqs;
+ if (num_aeqs > HINIC3_MAX_AEQS) {
+ dev_warn(hwdev->dev, "Adjust aeq num to %d\n",
+ HINIC3_MAX_AEQS);
+ num_aeqs = HINIC3_MAX_AEQS;
+ }
+ err = hinic3_alloc_irqs(hwdev, num_aeqs, aeq_msix_entries,
+ &resp_num_irq);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to alloc aeq irqs, num_aeqs: %u\n",
+ num_aeqs);
+ return err;
+ }
+
+ if (resp_num_irq < num_aeqs) {
+ dev_warn(hwdev->dev, "Adjust aeq num to %u\n",
+ resp_num_irq);
+ num_aeqs = resp_num_irq;
+ }
+
+ err = hinic3_aeqs_init(hwdev, num_aeqs, aeq_msix_entries);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init aeqs\n");
+ goto err_free_irqs;
+ }
+
+ return 0;
+
+err_free_irqs:
+ for (i = 0; i < num_aeqs; i++)
+ hinic3_free_irq(hwdev, aeq_msix_entries[i].vector);
+
+ return err;
+}
+
+static int hinic3_comm_ceqs_init(struct hinic3_hwdev *hwdev)
+{
+ struct msix_entry ceq_msix_entries[HINIC3_MAX_CEQS];
+ u16 num_ceqs, resp_num_irq, i;
+ int err;
+
+ num_ceqs = hwdev->hwif->attr.num_ceqs;
+ if (num_ceqs > HINIC3_MAX_CEQS) {
+ dev_warn(hwdev->dev, "Adjust ceq num to %d\n",
+ HINIC3_MAX_CEQS);
+ num_ceqs = HINIC3_MAX_CEQS;
+ }
+
+ err = hinic3_alloc_irqs(hwdev, num_ceqs, ceq_msix_entries,
+ &resp_num_irq);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to alloc ceq irqs, num_ceqs: %u\n",
+ num_ceqs);
+ return err;
+ }
+
+ if (resp_num_irq < num_ceqs) {
+ dev_warn(hwdev->dev, "Adjust ceq num to %u\n",
+ resp_num_irq);
+ num_ceqs = resp_num_irq;
+ }
+
+ err = hinic3_ceqs_init(hwdev, num_ceqs, ceq_msix_entries);
+ if (err) {
+ dev_err(hwdev->dev,
+ "Failed to init ceqs, err:%d\n", err);
+ goto err_free_irqs;
+ }
+
+ return 0;
+
+err_free_irqs:
+ for (i = 0; i < num_ceqs; i++)
+ hinic3_free_irq(hwdev, ceq_msix_entries[i].vector);
+
+ return err;
+}
+
+static int hinic3_comm_mbox_init(struct hinic3_hwdev *hwdev)
+{
+ int err;
+
+ err = hinic3_init_mbox(hwdev);
+ if (err)
+ return err;
+
+ hinic3_aeq_register_cb(hwdev, HINIC3_MBX_FROM_FUNC,
+ hinic3_mbox_func_aeqe_handler);
+ hinic3_aeq_register_cb(hwdev, HINIC3_MSG_FROM_FW,
+ hinic3_mgmt_msg_aeqe_handler);
+
+ set_bit(HINIC3_HWDEV_MBOX_INITED, &hwdev->func_state);
+
+ return 0;
+}
+
+static void hinic3_comm_mbox_free(struct hinic3_hwdev *hwdev)
+{
+ spin_lock_bh(&hwdev->channel_lock);
+ clear_bit(HINIC3_HWDEV_MBOX_INITED, &hwdev->func_state);
+ spin_unlock_bh(&hwdev->channel_lock);
+ hinic3_aeq_unregister_cb(hwdev, HINIC3_MBX_FROM_FUNC);
+ hinic3_aeq_unregister_cb(hwdev, HINIC3_MSG_FROM_FW);
+ hinic3_free_mbox(hwdev);
+}
+
+static int init_aeqs_msix_attr(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_aeqs *aeqs = hwdev->aeqs;
+ struct hinic3_interrupt_info info = {};
+ struct hinic3_eq *eq;
+ u16 q_id;
+ int err;
+
+ info.interrupt_coalesc_set = 1;
+ info.pending_limit = HINIC3_DEFAULT_EQ_MSIX_PENDING_LIMIT;
+ info.coalesc_timer_cfg = HINIC3_DEFAULT_EQ_MSIX_COALESC_TIMER_CFG;
+ info.resend_timer_cfg = HINIC3_DEFAULT_EQ_MSIX_RESEND_TIMER_CFG;
+
+ for (q_id = 0; q_id < aeqs->num_aeqs; q_id++) {
+ eq = &aeqs->aeq[q_id];
+ info.msix_index = eq->msix_entry_idx;
+ err = hinic3_set_interrupt_cfg_direct(hwdev, &info);
+ if (err) {
+ dev_err(hwdev->dev, "Set msix attr for aeq %d failed\n",
+ q_id);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int init_ceqs_msix_attr(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_ceqs *ceqs = hwdev->ceqs;
+ struct hinic3_interrupt_info info = {};
+ struct hinic3_eq *eq;
+ u16 q_id;
+ int err;
+
+ info.interrupt_coalesc_set = 1;
+ info.pending_limit = HINIC3_DEFAULT_EQ_MSIX_PENDING_LIMIT;
+ info.coalesc_timer_cfg = HINIC3_DEFAULT_EQ_MSIX_COALESC_TIMER_CFG;
+ info.resend_timer_cfg = HINIC3_DEFAULT_EQ_MSIX_RESEND_TIMER_CFG;
+
+ for (q_id = 0; q_id < ceqs->num_ceqs; q_id++) {
+ eq = &ceqs->ceq[q_id];
+ info.msix_index = eq->msix_entry_idx;
+ err = hinic3_set_interrupt_cfg_direct(hwdev, &info);
+ if (err) {
+ dev_err(hwdev->dev, "Set msix attr for ceq %u failed\n",
+ q_id);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int init_basic_mgmt_channel(struct hinic3_hwdev *hwdev)
+{
+ int err;
+
+ err = hinic3_comm_aeqs_init(hwdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init async event queues\n");
+ return err;
+ }
+
+ err = hinic3_comm_mbox_init(hwdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init mailbox\n");
+ goto err_free_comm_aeqs;
+ }
+
+ err = init_aeqs_msix_attr(hwdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init aeqs msix attr\n");
+ goto err_free_comm_mbox;
+ }
+
+ return 0;
+
+err_free_comm_mbox:
+ hinic3_comm_mbox_free(hwdev);
+err_free_comm_aeqs:
+ hinic3_aeqs_free(hwdev);
+
+ return err;
+}
+
+static void free_base_mgmt_channel(struct hinic3_hwdev *hwdev)
+{
+ hinic3_comm_mbox_free(hwdev);
+ hinic3_aeqs_free(hwdev);
+}
+
+static int dma_attr_table_init(struct hinic3_hwdev *hwdev)
+{
+ u32 addr, val, dst_attr;
+
+ /* Indirect access, set entry_idx first */
+ addr = HINIC3_CSR_DMA_ATTR_INDIR_IDX_ADDR;
+ val = hinic3_hwif_read_reg(hwdev->hwif, addr);
+ val &= ~HINIC3_DMA_ATTR_ENTRY_AT_MASK;
+ val |= HINIC3_DMA_ATTR_INDIR_IDX_SET(HINIC3_PCIE_MSIX_ATTR_ENTRY, IDX);
+ hinic3_hwif_write_reg(hwdev->hwif, addr, val);
+
+ addr = HINIC3_CSR_DMA_ATTR_TBL_ADDR;
+ val = hinic3_hwif_read_reg(hwdev->hwif, addr);
+
+ dst_attr = HINIC3_DMA_ATTR_ENTRY_SET(HINIC3_PCIE_ST_DISABLE, ST) |
+ HINIC3_DMA_ATTR_ENTRY_SET(HINIC3_PCIE_AT_DISABLE, AT) |
+ HINIC3_DMA_ATTR_ENTRY_SET(HINIC3_PCIE_PH_DISABLE, PH) |
+ HINIC3_DMA_ATTR_ENTRY_SET(HINIC3_PCIE_SNOOP, NO_SNOOPING) |
+ HINIC3_DMA_ATTR_ENTRY_SET(HINIC3_PCIE_TPH_DISABLE, TPH_EN);
+ if (val == dst_attr)
+ return 0;
+
+ return hinic3_set_dma_attr_tbl(hwdev,
+ HINIC3_PCIE_MSIX_ATTR_ENTRY,
+ HINIC3_PCIE_ST_DISABLE,
+ HINIC3_PCIE_AT_DISABLE,
+ HINIC3_PCIE_PH_DISABLE,
+ HINIC3_PCIE_SNOOP,
+ HINIC3_PCIE_TPH_DISABLE);
+}
+
+static int init_basic_attributes(struct hinic3_hwdev *hwdev)
+{
+ struct comm_global_attr glb_attr;
+ int err;
+
+ err = hinic3_func_reset(hwdev, hinic3_global_func_id(hwdev),
+ COMM_FUNC_RESET_FLAG);
+ if (err)
+ return err;
+
+ err = hinic3_get_comm_features(hwdev, hwdev->features,
+ COMM_MAX_FEATURE_QWORD);
+ if (err)
+ return err;
+
+ dev_dbg(hwdev->dev, "Comm hw features: 0x%llx\n", hwdev->features[0]);
+
+ err = hinic3_get_global_attr(hwdev, &glb_attr);
+ if (err)
+ return err;
+
+ err = hinic3_set_func_svc_used_state(hwdev, COMM_FUNC_SVC_T_COMM, 1);
+ if (err)
+ return err;
+
+ err = dma_attr_table_init(hwdev);
+ if (err)
+ return err;
+
+ hwdev->max_cmdq = min(glb_attr.cmdq_num, HINIC3_MAX_CMDQ_TYPES);
+ dev_dbg(hwdev->dev,
+ "global attribute: max_host: 0x%x, max_pf: 0x%x, vf_id_start: 0x%x, mgmt node id: 0x%x, cmdq_num: 0x%x\n",
+ glb_attr.max_host_num, glb_attr.max_pf_num,
+ glb_attr.vf_id_start, glb_attr.mgmt_host_node_id,
+ glb_attr.cmdq_num);
+
+ return 0;
+}
+
+static int hinic3_comm_cmdqs_init(struct hinic3_hwdev *hwdev)
+{
+ int err;
+
+ err = hinic3_cmdqs_init(hwdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init cmd queues\n");
+ return err;
+ }
+
+ hinic3_ceq_register_cb(hwdev, HINIC3_CMDQ, hinic3_cmdq_ceq_handler);
+
+ err = hinic3_set_cmdq_depth(hwdev, CMDQ_DEPTH);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to set cmdq depth\n");
+ goto err_free_cmdqs;
+ }
+
+ set_bit(HINIC3_HWDEV_CMDQ_INITED, &hwdev->func_state);
+
+ return 0;
+
+err_free_cmdqs:
+ hinic3_cmdqs_free(hwdev);
+
+ return err;
+}
+
+static void hinic3_comm_cmdqs_free(struct hinic3_hwdev *hwdev)
+{
+ spin_lock_bh(&hwdev->channel_lock);
+ clear_bit(HINIC3_HWDEV_CMDQ_INITED, &hwdev->func_state);
+ spin_unlock_bh(&hwdev->channel_lock);
+
+ hinic3_ceq_unregister_cb(hwdev, HINIC3_CMDQ);
+ hinic3_cmdqs_free(hwdev);
+}
+
+static int init_cmdqs_channel(struct hinic3_hwdev *hwdev)
+{
+ int err;
+
+ err = hinic3_comm_ceqs_init(hwdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init completion event queues\n");
+ return err;
+ }
+
+ err = init_ceqs_msix_attr(hwdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init ceqs msix attr\n");
+ goto err_free_ceqs;
+ }
+
+ hwdev->wq_page_size = HINIC3_MIN_PAGE_SIZE << HINIC3_WQ_PAGE_SIZE_ORDER;
+ err = hinic3_set_wq_page_size(hwdev, hinic3_global_func_id(hwdev),
+ hwdev->wq_page_size);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to set wq page size\n");
+ goto err_free_ceqs;
+ }
+
+ err = hinic3_comm_cmdqs_init(hwdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init cmd queues\n");
+ goto err_reset_wq_page_size;
+ }
+
+ return 0;
+
+err_reset_wq_page_size:
+ hinic3_set_wq_page_size(hwdev, hinic3_global_func_id(hwdev),
+ HINIC3_MIN_PAGE_SIZE);
+err_free_ceqs:
+ hinic3_ceqs_free(hwdev);
+
+ return err;
+}
+
+static void hinic3_free_cmdqs_channel(struct hinic3_hwdev *hwdev)
+{
+ hinic3_comm_cmdqs_free(hwdev);
+ hinic3_ceqs_free(hwdev);
+}
+
+static int hinic3_init_comm_ch(struct hinic3_hwdev *hwdev)
+{
+ int err;
+
+ err = init_basic_mgmt_channel(hwdev);
+ if (err)
+ return err;
+
+ err = init_basic_attributes(hwdev);
+ if (err)
+ goto err_free_basic_mgmt_ch;
+
+ err = init_cmdqs_channel(hwdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init cmdq channel\n");
+ goto err_clear_func_svc_used_state;
+ }
+
+ return 0;
+
+err_clear_func_svc_used_state:
+ hinic3_set_func_svc_used_state(hwdev, COMM_FUNC_SVC_T_COMM, 0);
+err_free_basic_mgmt_ch:
+ free_base_mgmt_channel(hwdev);
+
+ return err;
+}
+
+static void hinic3_uninit_comm_ch(struct hinic3_hwdev *hwdev)
+{
+ hinic3_free_cmdqs_channel(hwdev);
+ hinic3_set_func_svc_used_state(hwdev, COMM_FUNC_SVC_T_COMM, 0);
+ free_base_mgmt_channel(hwdev);
+}
+
+static DEFINE_IDA(hinic3_adev_ida);
+
+static int hinic3_adev_idx_alloc(void)
+{
+ return ida_alloc(&hinic3_adev_ida, GFP_KERNEL);
+}
+
+static void hinic3_adev_idx_free(int id)
+{
+ ida_free(&hinic3_adev_ida, id);
+}
+
int hinic3_init_hwdev(struct pci_dev *pdev)
{
- /* Completed by later submission due to LoC limit. */
- return -EFAULT;
+ struct hinic3_pcidev *pci_adapter = pci_get_drvdata(pdev);
+ struct hinic3_hwdev *hwdev;
+ int err;
+
+ hwdev = kzalloc(sizeof(*hwdev), GFP_KERNEL);
+ if (!hwdev)
+ return -ENOMEM;
+
+ pci_adapter->hwdev = hwdev;
+ hwdev->adapter = pci_adapter;
+ hwdev->pdev = pci_adapter->pdev;
+ hwdev->dev = &pci_adapter->pdev->dev;
+ hwdev->func_state = 0;
+ hwdev->dev_id = hinic3_adev_idx_alloc();
+ spin_lock_init(&hwdev->channel_lock);
+
+ err = hinic3_init_hwif(hwdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init hwif\n");
+ goto err_free_hwdev;
+ }
+
+ hwdev->workq = alloc_workqueue(HINIC3_HWDEV_WQ_NAME, WQ_MEM_RECLAIM,
+ HINIC3_WQ_MAX_REQ);
+ if (!hwdev->workq) {
+ dev_err(hwdev->dev, "Failed to alloc hardware workq\n");
+ err = -ENOMEM;
+ goto err_free_hwif;
+ }
+
+ err = hinic3_init_cfg_mgmt(hwdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init config mgmt\n");
+ goto err_destroy_workqueue;
+ }
+
+ err = hinic3_init_comm_ch(hwdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init communication channel\n");
+ goto err_free_cfg_mgmt;
+ }
+
+ err = hinic3_init_capability(hwdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init capability\n");
+ goto err_uninit_comm_ch;
+ }
+
+ err = hinic3_set_comm_features(hwdev, hwdev->features,
+ COMM_MAX_FEATURE_QWORD);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to set comm features\n");
+ goto err_uninit_comm_ch;
+ }
+
+ return 0;
+
+err_uninit_comm_ch:
+ hinic3_uninit_comm_ch(hwdev);
+err_free_cfg_mgmt:
+ hinic3_free_cfg_mgmt(hwdev);
+err_destroy_workqueue:
+ destroy_workqueue(hwdev->workq);
+err_free_hwif:
+ hinic3_free_hwif(hwdev);
+err_free_hwdev:
+ pci_adapter->hwdev = NULL;
+ hinic3_adev_idx_free(hwdev->dev_id);
+ kfree(hwdev);
+
+ return err;
}
void hinic3_free_hwdev(struct hinic3_hwdev *hwdev)
{
- /* Completed by later submission due to LoC limit. */
+ u64 drv_features[COMM_MAX_FEATURE_QWORD] = {};
+
+ hinic3_set_comm_features(hwdev, drv_features, COMM_MAX_FEATURE_QWORD);
+ hinic3_func_rx_tx_flush(hwdev);
+ hinic3_uninit_comm_ch(hwdev);
+ hinic3_free_cfg_mgmt(hwdev);
+ destroy_workqueue(hwdev->workq);
+ hinic3_free_hwif(hwdev);
+ hinic3_adev_idx_free(hwdev->dev_id);
+ kfree(hwdev);
}
void hinic3_set_api_stop(struct hinic3_hwdev *hwdev)
{
- /* Completed by later submission due to LoC limit. */
+ struct hinic3_mbox *mbox;
+
+ spin_lock_bh(&hwdev->channel_lock);
+ if (test_bit(HINIC3_HWDEV_MBOX_INITED, &hwdev->func_state)) {
+ mbox = hwdev->mbox;
+ spin_lock(&mbox->mbox_lock);
+ if (mbox->event_flag == MBOX_EVENT_START)
+ mbox->event_flag = MBOX_EVENT_TIMEOUT;
+ spin_unlock(&mbox->mbox_lock);
+ }
+
+ if (test_bit(HINIC3_HWDEV_CMDQ_INITED, &hwdev->func_state))
+ hinic3_cmdq_flush_sync_cmd(hwdev);
+
+ spin_unlock_bh(&hwdev->channel_lock);
}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.c b/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.c
index 0865453bf0e7..f76f140fb6f7 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.c
@@ -6,13 +6,428 @@
#include <linux/io.h>
#include "hinic3_common.h"
+#include "hinic3_csr.h"
#include "hinic3_hwdev.h"
#include "hinic3_hwif.h"
+#define HINIC3_HWIF_READY_TIMEOUT 10000
+#define HINIC3_DB_AND_OUTBOUND_EN_TIMEOUT 60000
+#define HINIC3_PCIE_LINK_DOWN 0xFFFFFFFF
+
+/* config BAR4/5 4MB, DB & DWQE both 2MB */
+#define HINIC3_DB_DWQE_SIZE 0x00400000
+
+/* db/dwqe page size: 4K */
+#define HINIC3_DB_PAGE_SIZE 0x00001000
+#define HINIC3_DWQE_OFFSET 0x00000800
+#define HINIC3_DB_MAX_AREAS (HINIC3_DB_DWQE_SIZE / HINIC3_DB_PAGE_SIZE)
+
+#define HINIC3_MAX_MSIX_ENTRY 2048
+
+#define HINIC3_AF0_FUNC_GLOBAL_IDX_MASK GENMASK(11, 0)
+#define HINIC3_AF0_P2P_IDX_MASK GENMASK(16, 12)
+#define HINIC3_AF0_PCI_INTF_IDX_MASK GENMASK(19, 17)
+#define HINIC3_AF0_FUNC_TYPE_MASK BIT(28)
+#define HINIC3_AF0_GET(val, member) \
+ FIELD_GET(HINIC3_AF0_##member##_MASK, val)
+
+#define HINIC3_AF1_AEQS_PER_FUNC_MASK GENMASK(9, 8)
+#define HINIC3_AF1_MGMT_INIT_STATUS_MASK BIT(30)
+#define HINIC3_AF1_GET(val, member) \
+ FIELD_GET(HINIC3_AF1_##member##_MASK, val)
+
+#define HINIC3_AF2_CEQS_PER_FUNC_MASK GENMASK(8, 0)
+#define HINIC3_AF2_IRQS_PER_FUNC_MASK GENMASK(26, 16)
+#define HINIC3_AF2_GET(val, member) \
+ FIELD_GET(HINIC3_AF2_##member##_MASK, val)
+
+#define HINIC3_AF4_DOORBELL_CTRL_MASK BIT(0)
+#define HINIC3_AF4_GET(val, member) \
+ FIELD_GET(HINIC3_AF4_##member##_MASK, val)
+#define HINIC3_AF4_SET(val, member) \
+ FIELD_PREP(HINIC3_AF4_##member##_MASK, val)
+
+#define HINIC3_AF5_OUTBOUND_CTRL_MASK BIT(0)
+#define HINIC3_AF5_GET(val, member) \
+ FIELD_GET(HINIC3_AF5_##member##_MASK, val)
+
+#define HINIC3_AF6_PF_STATUS_MASK GENMASK(15, 0)
+#define HINIC3_AF6_FUNC_MAX_SQ_MASK GENMASK(31, 23)
+#define HINIC3_AF6_MSIX_FLEX_EN_MASK BIT(22)
+#define HINIC3_AF6_GET(val, member) \
+ FIELD_GET(HINIC3_AF6_##member##_MASK, val)
+
+#define HINIC3_GET_REG_ADDR(reg) ((reg) & (HINIC3_REGS_FLAG_MASK))
+
+static void __iomem *hinic3_reg_addr(struct hinic3_hwif *hwif, u32 reg)
+{
+ return hwif->cfg_regs_base + HINIC3_GET_REG_ADDR(reg);
+}
+
+u32 hinic3_hwif_read_reg(struct hinic3_hwif *hwif, u32 reg)
+{
+ void __iomem *addr = hinic3_reg_addr(hwif, reg);
+
+ return ioread32be(addr);
+}
+
+void hinic3_hwif_write_reg(struct hinic3_hwif *hwif, u32 reg, u32 val)
+{
+ void __iomem *addr = hinic3_reg_addr(hwif, reg);
+
+ iowrite32be(val, addr);
+}
+
+static enum hinic3_wait_return check_hwif_ready_handler(void *priv_data)
+{
+ struct hinic3_hwdev *hwdev = priv_data;
+ u32 attr1;
+
+ attr1 = hinic3_hwif_read_reg(hwdev->hwif, HINIC3_CSR_FUNC_ATTR1_ADDR);
+
+ return HINIC3_AF1_GET(attr1, MGMT_INIT_STATUS) ?
+ HINIC3_WAIT_PROCESS_CPL : HINIC3_WAIT_PROCESS_WAITING;
+}
+
+static int wait_hwif_ready(struct hinic3_hwdev *hwdev)
+{
+ return hinic3_wait_for_timeout(hwdev, check_hwif_ready_handler,
+ HINIC3_HWIF_READY_TIMEOUT,
+ USEC_PER_MSEC);
+}
+
+/* Set attr struct from HW attr values. */
+static void set_hwif_attr(struct hinic3_func_attr *attr, u32 attr0, u32 attr1,
+ u32 attr2, u32 attr3, u32 attr6)
+{
+ attr->func_global_idx = HINIC3_AF0_GET(attr0, FUNC_GLOBAL_IDX);
+ attr->port_to_port_idx = HINIC3_AF0_GET(attr0, P2P_IDX);
+ attr->pci_intf_idx = HINIC3_AF0_GET(attr0, PCI_INTF_IDX);
+ attr->func_type = HINIC3_AF0_GET(attr0, FUNC_TYPE);
+
+ attr->num_aeqs = BIT(HINIC3_AF1_GET(attr1, AEQS_PER_FUNC));
+ attr->num_ceqs = HINIC3_AF2_GET(attr2, CEQS_PER_FUNC);
+ attr->num_irqs = HINIC3_AF2_GET(attr2, IRQS_PER_FUNC);
+ if (attr->num_irqs > HINIC3_MAX_MSIX_ENTRY)
+ attr->num_irqs = HINIC3_MAX_MSIX_ENTRY;
+
+ attr->num_sq = HINIC3_AF6_GET(attr6, FUNC_MAX_SQ);
+ attr->msix_flex_en = HINIC3_AF6_GET(attr6, MSIX_FLEX_EN);
+}
+
+/* Read attributes from HW and set attribute struct. */
+static int init_hwif_attr(struct hinic3_hwdev *hwdev)
+{
+ u32 attr0, attr1, attr2, attr3, attr6;
+ struct hinic3_hwif *hwif;
+
+ hwif = hwdev->hwif;
+ attr0 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR0_ADDR);
+ if (attr0 == HINIC3_PCIE_LINK_DOWN)
+ return -EFAULT;
+
+ attr1 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR1_ADDR);
+ if (attr1 == HINIC3_PCIE_LINK_DOWN)
+ return -EFAULT;
+
+ attr2 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR2_ADDR);
+ if (attr2 == HINIC3_PCIE_LINK_DOWN)
+ return -EFAULT;
+
+ attr3 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR3_ADDR);
+ if (attr3 == HINIC3_PCIE_LINK_DOWN)
+ return -EFAULT;
+
+ attr6 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR6_ADDR);
+ if (attr6 == HINIC3_PCIE_LINK_DOWN)
+ return -EFAULT;
+
+ set_hwif_attr(&hwif->attr, attr0, attr1, attr2, attr3, attr6);
+
+ if (!hwif->attr.num_ceqs) {
+ dev_err(hwdev->dev, "Ceq num cfg in fw is zero\n");
+ return -EFAULT;
+ }
+
+ if (!hwif->attr.num_irqs) {
+ dev_err(hwdev->dev,
+ "Irq num cfg in fw is zero, msix_flex_en %d\n",
+ hwif->attr.msix_flex_en);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static enum hinic3_doorbell_ctrl hinic3_get_doorbell_ctrl_status(struct hinic3_hwif *hwif)
+{
+ u32 attr4 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR4_ADDR);
+
+ return HINIC3_AF4_GET(attr4, DOORBELL_CTRL);
+}
+
+static enum hinic3_outbound_ctrl hinic3_get_outbound_ctrl_status(struct hinic3_hwif *hwif)
+{
+ u32 attr5 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR5_ADDR);
+
+ return HINIC3_AF5_GET(attr5, OUTBOUND_CTRL);
+}
+
+void hinic3_toggle_doorbell(struct hinic3_hwif *hwif,
+ enum hinic3_doorbell_ctrl flag)
+{
+ u32 addr, attr4;
+
+ addr = HINIC3_CSR_FUNC_ATTR4_ADDR;
+ attr4 = hinic3_hwif_read_reg(hwif, addr);
+
+ attr4 &= ~HINIC3_AF4_DOORBELL_CTRL_MASK;
+ attr4 |= HINIC3_AF4_SET(flag, DOORBELL_CTRL);
+
+ hinic3_hwif_write_reg(hwif, addr, attr4);
+}
+
+static int db_area_idx_init(struct hinic3_hwif *hwif, u64 db_base_phy,
+ u8 __iomem *db_base, u64 db_dwqe_len)
+{
+ struct hinic3_db_area *db_area = &hwif->db_area;
+ u32 db_max_areas;
+
+ hwif->db_base_phy = db_base_phy;
+ hwif->db_base = db_base;
+ hwif->db_dwqe_len = db_dwqe_len;
+
+ db_max_areas = db_dwqe_len > HINIC3_DB_DWQE_SIZE ?
+ HINIC3_DB_MAX_AREAS : db_dwqe_len / HINIC3_DB_PAGE_SIZE;
+ db_area->db_bitmap_array = bitmap_zalloc(db_max_areas, GFP_KERNEL);
+ if (!db_area->db_bitmap_array)
+ return -ENOMEM;
+
+ db_area->db_max_areas = db_max_areas;
+ spin_lock_init(&db_area->idx_lock);
+
+ return 0;
+}
+
+static void db_area_idx_free(struct hinic3_db_area *db_area)
+{
+ bitmap_free(db_area->db_bitmap_array);
+}
+
+static int get_db_idx(struct hinic3_hwif *hwif, u32 *idx)
+{
+ struct hinic3_db_area *db_area = &hwif->db_area;
+ u32 pg_idx;
+
+ spin_lock(&db_area->idx_lock);
+ pg_idx = find_first_zero_bit(db_area->db_bitmap_array,
+ db_area->db_max_areas);
+ if (pg_idx == db_area->db_max_areas) {
+ spin_unlock(&db_area->idx_lock);
+ return -ENOMEM;
+ }
+ set_bit(pg_idx, db_area->db_bitmap_array);
+ spin_unlock(&db_area->idx_lock);
+
+ *idx = pg_idx;
+
+ return 0;
+}
+
+static void free_db_idx(struct hinic3_hwif *hwif, u32 idx)
+{
+ struct hinic3_db_area *db_area = &hwif->db_area;
+
+ spin_lock(&db_area->idx_lock);
+ clear_bit(idx, db_area->db_bitmap_array);
+ spin_unlock(&db_area->idx_lock);
+}
+
+void hinic3_free_db_addr(struct hinic3_hwdev *hwdev, const u8 __iomem *db_base)
+{
+ struct hinic3_hwif *hwif;
+ uintptr_t distance;
+ u32 idx;
+
+ hwif = hwdev->hwif;
+ distance = db_base - hwif->db_base;
+ idx = distance / HINIC3_DB_PAGE_SIZE;
+
+ free_db_idx(hwif, idx);
+}
+
+int hinic3_alloc_db_addr(struct hinic3_hwdev *hwdev, void __iomem **db_base,
+ void __iomem **dwqe_base)
+{
+ struct hinic3_hwif *hwif;
+ u8 __iomem *addr;
+ u32 idx;
+ int err;
+
+ hwif = hwdev->hwif;
+
+ err = get_db_idx(hwif, &idx);
+ if (err)
+ return err;
+
+ addr = hwif->db_base + idx * HINIC3_DB_PAGE_SIZE;
+ *db_base = addr;
+
+ if (dwqe_base)
+ *dwqe_base = addr + HINIC3_DWQE_OFFSET;
+
+ return 0;
+}
+
void hinic3_set_msix_state(struct hinic3_hwdev *hwdev, u16 msix_idx,
enum hinic3_msix_state flag)
{
- /* Completed by later submission due to LoC limit. */
+ struct hinic3_hwif *hwif;
+ u8 int_msk = 1;
+ u32 mask_bits;
+ u32 addr;
+
+ hwif = hwdev->hwif;
+
+ if (flag)
+ mask_bits = HINIC3_MSI_CLR_INDIR_SET(int_msk, INT_MSK_SET);
+ else
+ mask_bits = HINIC3_MSI_CLR_INDIR_SET(int_msk, INT_MSK_CLR);
+ mask_bits = mask_bits |
+ HINIC3_MSI_CLR_INDIR_SET(msix_idx, SIMPLE_INDIR_IDX);
+
+ addr = HINIC3_CSR_FUNC_MSI_CLR_WR_ADDR;
+ hinic3_hwif_write_reg(hwif, addr, mask_bits);
+}
+
+static void disable_all_msix(struct hinic3_hwdev *hwdev)
+{
+ u16 num_irqs = hwdev->hwif->attr.num_irqs;
+ u16 i;
+
+ for (i = 0; i < num_irqs; i++)
+ hinic3_set_msix_state(hwdev, i, HINIC3_MSIX_DISABLE);
+}
+
+void hinic3_msix_intr_clear_resend_bit(struct hinic3_hwdev *hwdev, u16 msix_idx,
+ u8 clear_resend_en)
+{
+ struct hinic3_hwif *hwif;
+ u32 msix_ctrl, addr;
+
+ hwif = hwdev->hwif;
+
+ msix_ctrl = HINIC3_MSI_CLR_INDIR_SET(msix_idx, SIMPLE_INDIR_IDX) |
+ HINIC3_MSI_CLR_INDIR_SET(clear_resend_en, RESEND_TIMER_CLR);
+
+ addr = HINIC3_CSR_FUNC_MSI_CLR_WR_ADDR;
+ hinic3_hwif_write_reg(hwif, addr, msix_ctrl);
+}
+
+void hinic3_set_msix_auto_mask_state(struct hinic3_hwdev *hwdev, u16 msix_idx,
+ enum hinic3_msix_auto_mask flag)
+{
+ struct hinic3_hwif *hwif;
+ u32 mask_bits;
+ u32 addr;
+
+ hwif = hwdev->hwif;
+
+ if (flag)
+ mask_bits = HINIC3_MSI_CLR_INDIR_SET(1, AUTO_MSK_SET);
+ else
+ mask_bits = HINIC3_MSI_CLR_INDIR_SET(1, AUTO_MSK_CLR);
+
+ mask_bits = mask_bits |
+ HINIC3_MSI_CLR_INDIR_SET(msix_idx, SIMPLE_INDIR_IDX);
+
+ addr = HINIC3_CSR_FUNC_MSI_CLR_WR_ADDR;
+ hinic3_hwif_write_reg(hwif, addr, mask_bits);
+}
+
+static enum hinic3_wait_return check_db_outbound_enable_handler(void *priv_data)
+{
+ enum hinic3_outbound_ctrl outbound_ctrl;
+ struct hinic3_hwif *hwif = priv_data;
+ enum hinic3_doorbell_ctrl db_ctrl;
+
+ db_ctrl = hinic3_get_doorbell_ctrl_status(hwif);
+ outbound_ctrl = hinic3_get_outbound_ctrl_status(hwif);
+ if (outbound_ctrl == ENABLE_OUTBOUND && db_ctrl == ENABLE_DOORBELL)
+ return HINIC3_WAIT_PROCESS_CPL;
+
+ return HINIC3_WAIT_PROCESS_WAITING;
+}
+
+static int wait_until_doorbell_and_outbound_enabled(struct hinic3_hwif *hwif)
+{
+ return hinic3_wait_for_timeout(hwif, check_db_outbound_enable_handler,
+ HINIC3_DB_AND_OUTBOUND_EN_TIMEOUT,
+ USEC_PER_MSEC);
+}
+
+int hinic3_init_hwif(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_pcidev *pci_adapter = hwdev->adapter;
+ struct hinic3_hwif *hwif;
+ u32 attr1, attr4, attr5;
+ int err;
+
+ hwif = kzalloc(sizeof(*hwif), GFP_KERNEL);
+ if (!hwif)
+ return -ENOMEM;
+
+ hwdev->hwif = hwif;
+ hwif->cfg_regs_base = (u8 __iomem *)pci_adapter->cfg_reg_base +
+ HINIC3_VF_CFG_REG_OFFSET;
+
+ err = db_area_idx_init(hwif, pci_adapter->db_base_phy,
+ pci_adapter->db_base,
+ pci_adapter->db_dwqe_len);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init db area.\n");
+ goto err_free_hwif;
+ }
+
+ err = wait_hwif_ready(hwdev);
+ if (err) {
+ attr1 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR1_ADDR);
+ dev_err(hwdev->dev, "Chip status is not ready, attr1:0x%x\n",
+ attr1);
+ goto err_free_db_area_idx;
+ }
+
+ err = init_hwif_attr(hwdev);
+ if (err) {
+ dev_err(hwdev->dev, "Init hwif attr failed\n");
+ goto err_free_db_area_idx;
+ }
+
+ err = wait_until_doorbell_and_outbound_enabled(hwif);
+ if (err) {
+ attr4 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR4_ADDR);
+ attr5 = hinic3_hwif_read_reg(hwif, HINIC3_CSR_FUNC_ATTR5_ADDR);
+ dev_err(hwdev->dev, "HW doorbell/outbound is disabled, attr4 0x%x attr5 0x%x\n",
+ attr4, attr5);
+ goto err_free_db_area_idx;
+ }
+
+ disable_all_msix(hwdev);
+
+ return 0;
+
+err_free_db_area_idx:
+ db_area_idx_free(&hwif->db_area);
+err_free_hwif:
+ kfree(hwif);
+
+ return err;
+}
+
+void hinic3_free_hwif(struct hinic3_hwdev *hwdev)
+{
+ db_area_idx_free(&hwdev->hwif->db_area);
+ kfree(hwdev->hwif);
}
u16 hinic3_global_func_id(struct hinic3_hwdev *hwdev)
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.h
index 513c9680e6b6..c02904e861cc 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.h
@@ -45,13 +45,45 @@ struct hinic3_hwif {
struct hinic3_func_attr attr;
};
+enum hinic3_outbound_ctrl {
+ ENABLE_OUTBOUND = 0x0,
+ DISABLE_OUTBOUND = 0x1,
+};
+
+enum hinic3_doorbell_ctrl {
+ ENABLE_DOORBELL = 0,
+ DISABLE_DOORBELL = 1,
+};
+
enum hinic3_msix_state {
HINIC3_MSIX_ENABLE,
HINIC3_MSIX_DISABLE,
};
+enum hinic3_msix_auto_mask {
+ HINIC3_CLR_MSIX_AUTO_MASK,
+ HINIC3_SET_MSIX_AUTO_MASK,
+};
+
+u32 hinic3_hwif_read_reg(struct hinic3_hwif *hwif, u32 reg);
+void hinic3_hwif_write_reg(struct hinic3_hwif *hwif, u32 reg, u32 val);
+
+void hinic3_toggle_doorbell(struct hinic3_hwif *hwif,
+ enum hinic3_doorbell_ctrl flag);
+
+int hinic3_alloc_db_addr(struct hinic3_hwdev *hwdev, void __iomem **db_base,
+ void __iomem **dwqe_base);
+void hinic3_free_db_addr(struct hinic3_hwdev *hwdev, const u8 __iomem *db_base);
+
+int hinic3_init_hwif(struct hinic3_hwdev *hwdev);
+void hinic3_free_hwif(struct hinic3_hwdev *hwdev);
+
void hinic3_set_msix_state(struct hinic3_hwdev *hwdev, u16 msix_idx,
enum hinic3_msix_state flag);
+void hinic3_msix_intr_clear_resend_bit(struct hinic3_hwdev *hwdev, u16 msix_idx,
+ u8 clear_resend_en);
+void hinic3_set_msix_auto_mask_state(struct hinic3_hwdev *hwdev, u16 msix_idx,
+ enum hinic3_msix_auto_mask flag);
u16 hinic3_global_func_id(struct hinic3_hwdev *hwdev);
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c b/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c
index 8b92eed25edf..a69b361225e9 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c
@@ -38,19 +38,19 @@ static int hinic3_poll(struct napi_struct *napi, int budget)
return work_done;
}
-void qp_add_napi(struct hinic3_irq_cfg *irq_cfg)
+static void qp_add_napi(struct hinic3_irq_cfg *irq_cfg)
{
struct hinic3_nic_dev *nic_dev = netdev_priv(irq_cfg->netdev);
+ netif_napi_add(nic_dev->netdev, &irq_cfg->napi, hinic3_poll);
netif_queue_set_napi(irq_cfg->netdev, irq_cfg->irq_id,
NETDEV_QUEUE_TYPE_RX, &irq_cfg->napi);
netif_queue_set_napi(irq_cfg->netdev, irq_cfg->irq_id,
NETDEV_QUEUE_TYPE_TX, &irq_cfg->napi);
- netif_napi_add(nic_dev->netdev, &irq_cfg->napi, hinic3_poll);
napi_enable(&irq_cfg->napi);
}
-void qp_del_napi(struct hinic3_irq_cfg *irq_cfg)
+static void qp_del_napi(struct hinic3_irq_cfg *irq_cfg)
{
napi_disable(&irq_cfg->napi);
netif_queue_set_napi(irq_cfg->netdev, irq_cfg->irq_id,
@@ -60,3 +60,135 @@ void qp_del_napi(struct hinic3_irq_cfg *irq_cfg)
netif_stop_subqueue(irq_cfg->netdev, irq_cfg->irq_id);
netif_napi_del(&irq_cfg->napi);
}
+
+static irqreturn_t qp_irq(int irq, void *data)
+{
+ struct hinic3_irq_cfg *irq_cfg = data;
+ struct hinic3_nic_dev *nic_dev;
+
+ nic_dev = netdev_priv(irq_cfg->netdev);
+ hinic3_msix_intr_clear_resend_bit(nic_dev->hwdev,
+ irq_cfg->msix_entry_idx, 1);
+
+ napi_schedule(&irq_cfg->napi);
+
+ return IRQ_HANDLED;
+}
+
+static int hinic3_request_irq(struct hinic3_irq_cfg *irq_cfg, u16 q_id)
+{
+ struct hinic3_interrupt_info info = {};
+ struct hinic3_nic_dev *nic_dev;
+ struct net_device *netdev;
+ int err;
+
+ netdev = irq_cfg->netdev;
+ nic_dev = netdev_priv(netdev);
+ qp_add_napi(irq_cfg);
+
+ info.msix_index = irq_cfg->msix_entry_idx;
+ info.interrupt_coalesc_set = 1;
+ info.pending_limit = nic_dev->intr_coalesce[q_id].pending_limit;
+ info.coalesc_timer_cfg =
+ nic_dev->intr_coalesce[q_id].coalesce_timer_cfg;
+ info.resend_timer_cfg = nic_dev->intr_coalesce[q_id].resend_timer_cfg;
+ err = hinic3_set_interrupt_cfg_direct(nic_dev->hwdev, &info);
+ if (err) {
+ netdev_err(netdev, "Failed to set RX interrupt coalescing attribute.\n");
+ qp_del_napi(irq_cfg);
+ return err;
+ }
+
+ err = request_irq(irq_cfg->irq_id, qp_irq, 0, irq_cfg->irq_name,
+ irq_cfg);
+ if (err) {
+ qp_del_napi(irq_cfg);
+ return err;
+ }
+
+ irq_set_affinity_hint(irq_cfg->irq_id, &irq_cfg->affinity_mask);
+
+ return 0;
+}
+
+static void hinic3_release_irq(struct hinic3_irq_cfg *irq_cfg)
+{
+ irq_set_affinity_hint(irq_cfg->irq_id, NULL);
+ free_irq(irq_cfg->irq_id, irq_cfg);
+}
+
+int hinic3_qps_irq_init(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct pci_dev *pdev = nic_dev->pdev;
+ struct hinic3_irq_cfg *irq_cfg;
+ struct msix_entry *msix_entry;
+ u32 local_cpu;
+ u16 q_id;
+ int err;
+
+ for (q_id = 0; q_id < nic_dev->q_params.num_qps; q_id++) {
+ msix_entry = &nic_dev->qps_msix_entries[q_id];
+ irq_cfg = &nic_dev->q_params.irq_cfg[q_id];
+
+ irq_cfg->irq_id = msix_entry->vector;
+ irq_cfg->msix_entry_idx = msix_entry->entry;
+ irq_cfg->netdev = netdev;
+ irq_cfg->txq = &nic_dev->txqs[q_id];
+ irq_cfg->rxq = &nic_dev->rxqs[q_id];
+ nic_dev->rxqs[q_id].irq_cfg = irq_cfg;
+
+ local_cpu = cpumask_local_spread(q_id, dev_to_node(&pdev->dev));
+ cpumask_set_cpu(local_cpu, &irq_cfg->affinity_mask);
+
+ snprintf(irq_cfg->irq_name, sizeof(irq_cfg->irq_name),
+ "%s_qp%u", netdev->name, q_id);
+
+ err = hinic3_request_irq(irq_cfg, q_id);
+ if (err) {
+ netdev_err(netdev, "Failed to request Rx irq\n");
+ goto err_release_irqs;
+ }
+
+ hinic3_set_msix_auto_mask_state(nic_dev->hwdev,
+ irq_cfg->msix_entry_idx,
+ HINIC3_SET_MSIX_AUTO_MASK);
+ hinic3_set_msix_state(nic_dev->hwdev, irq_cfg->msix_entry_idx,
+ HINIC3_MSIX_ENABLE);
+ }
+
+ return 0;
+
+err_release_irqs:
+ while (q_id > 0) {
+ q_id--;
+ irq_cfg = &nic_dev->q_params.irq_cfg[q_id];
+ qp_del_napi(irq_cfg);
+ hinic3_set_msix_state(nic_dev->hwdev, irq_cfg->msix_entry_idx,
+ HINIC3_MSIX_DISABLE);
+ hinic3_set_msix_auto_mask_state(nic_dev->hwdev,
+ irq_cfg->msix_entry_idx,
+ HINIC3_CLR_MSIX_AUTO_MASK);
+ hinic3_release_irq(irq_cfg);
+ }
+
+ return err;
+}
+
+void hinic3_qps_irq_uninit(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_irq_cfg *irq_cfg;
+ u16 q_id;
+
+ for (q_id = 0; q_id < nic_dev->q_params.num_qps; q_id++) {
+ irq_cfg = &nic_dev->q_params.irq_cfg[q_id];
+ qp_del_napi(irq_cfg);
+ hinic3_set_msix_state(nic_dev->hwdev, irq_cfg->msix_entry_idx,
+ HINIC3_MSIX_DISABLE);
+ hinic3_set_msix_auto_mask_state(nic_dev->hwdev,
+ irq_cfg->msix_entry_idx,
+ HINIC3_CLR_MSIX_AUTO_MASK);
+ hinic3_release_irq(irq_cfg);
+ }
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_lld.c b/drivers/net/ethernet/huawei/hinic3/hinic3_lld.c
index 4827326e6a59..3db8241a3b0c 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_lld.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_lld.c
@@ -8,6 +8,7 @@
#include "hinic3_hwdev.h"
#include "hinic3_lld.h"
#include "hinic3_mgmt.h"
+#include "hinic3_pci_id_tbl.h"
#define HINIC3_VF_PCI_CFG_REG_BAR 0
#define HINIC3_PCI_INTR_REG_BAR 2
@@ -121,6 +122,7 @@ static int hinic3_attach_aux_devices(struct hinic3_hwdev *hwdev)
goto err_del_adevs;
}
mutex_unlock(&pci_adapter->pdev_mutex);
+
return 0;
err_del_adevs:
@@ -132,6 +134,7 @@ err_del_adevs:
}
}
mutex_unlock(&pci_adapter->pdev_mutex);
+
return -ENOMEM;
}
@@ -153,6 +156,7 @@ struct hinic3_hwdev *hinic3_adev_get_hwdev(struct auxiliary_device *adev)
struct hinic3_adev *hadev;
hadev = container_of(adev, struct hinic3_adev, adev);
+
return hadev->hwdev;
}
@@ -307,6 +311,7 @@ static void hinic3_func_uninit(struct pci_dev *pdev)
{
struct hinic3_pcidev *pci_adapter = pci_get_drvdata(pdev);
+ hinic3_flush_mgmt_workq(pci_adapter->hwdev);
hinic3_detach_aux_devices(pci_adapter->hwdev);
hinic3_free_hwdev(pci_adapter->hwdev);
}
@@ -333,6 +338,7 @@ err_unmap_bar:
err_out:
dev_err(&pdev->dev, "PCIe device probe function failed\n");
+
return err;
}
@@ -365,6 +371,7 @@ err_uninit_pci:
err_out:
dev_err(&pdev->dev, "PCIe device probe failed\n");
+
return err;
}
@@ -377,7 +384,7 @@ static void hinic3_remove(struct pci_dev *pdev)
}
static const struct pci_device_id hinic3_pci_table[] = {
- /* Completed by later submission due to LoC limit. */
+ {PCI_VDEVICE(HUAWEI, PCI_DEV_ID_HINIC3_VF), 0},
{0, 0}
};
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_main.c b/drivers/net/ethernet/huawei/hinic3/hinic3_main.c
index 497f2a36f35d..6d87d4d895ba 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_main.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_main.c
@@ -12,17 +12,59 @@
#include "hinic3_nic_cfg.h"
#include "hinic3_nic_dev.h"
#include "hinic3_nic_io.h"
+#include "hinic3_rss.h"
#include "hinic3_rx.h"
#include "hinic3_tx.h"
#define HINIC3_NIC_DRV_DESC "Intelligent Network Interface Card Driver"
-#define HINIC3_RX_BUF_LEN 2048
-#define HINIC3_LRO_REPLENISH_THLD 256
-#define HINIC3_NIC_DEV_WQ_NAME "hinic3_nic_dev_wq"
+#define HINIC3_RX_BUF_LEN 2048
+#define HINIC3_LRO_REPLENISH_THLD 256
+#define HINIC3_NIC_DEV_WQ_NAME "hinic3_nic_dev_wq"
-#define HINIC3_SQ_DEPTH 1024
-#define HINIC3_RQ_DEPTH 1024
+#define HINIC3_SQ_DEPTH 1024
+#define HINIC3_RQ_DEPTH 1024
+
+#define HINIC3_DEFAULT_TXRX_MSIX_PENDING_LIMIT 2
+#define HINIC3_DEFAULT_TXRX_MSIX_COALESC_TIMER_CFG 25
+#define HINIC3_DEFAULT_TXRX_MSIX_RESEND_TIMER_CFG 7
+
+static void init_intr_coal_param(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_intr_coal_info *info;
+ u16 i;
+
+ for (i = 0; i < nic_dev->max_qps; i++) {
+ info = &nic_dev->intr_coalesce[i];
+ info->pending_limit = HINIC3_DEFAULT_TXRX_MSIX_PENDING_LIMIT;
+ info->coalesce_timer_cfg = HINIC3_DEFAULT_TXRX_MSIX_COALESC_TIMER_CFG;
+ info->resend_timer_cfg = HINIC3_DEFAULT_TXRX_MSIX_RESEND_TIMER_CFG;
+ }
+}
+
+static int hinic3_init_intr_coalesce(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ nic_dev->intr_coalesce = kcalloc(nic_dev->max_qps,
+ sizeof(*nic_dev->intr_coalesce),
+ GFP_KERNEL);
+
+ if (!nic_dev->intr_coalesce)
+ return -ENOMEM;
+
+ init_intr_coal_param(netdev);
+
+ return 0;
+}
+
+static void hinic3_free_intr_coalesce(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ kfree(nic_dev->intr_coalesce);
+}
static int hinic3_alloc_txrxqs(struct net_device *netdev)
{
@@ -42,8 +84,17 @@ static int hinic3_alloc_txrxqs(struct net_device *netdev)
goto err_free_txqs;
}
+ err = hinic3_init_intr_coalesce(netdev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init_intr_coalesce\n");
+ goto err_free_rxqs;
+ }
+
return 0;
+err_free_rxqs:
+ hinic3_free_rxqs(netdev);
+
err_free_txqs:
hinic3_free_txqs(netdev);
@@ -52,6 +103,7 @@ err_free_txqs:
static void hinic3_free_txrxqs(struct net_device *netdev)
{
+ hinic3_free_intr_coalesce(netdev);
hinic3_free_rxqs(netdev);
hinic3_free_txqs(netdev);
}
@@ -83,6 +135,8 @@ static int hinic3_sw_init(struct net_device *netdev)
nic_dev->q_params.sq_depth = HINIC3_SQ_DEPTH;
nic_dev->q_params.rq_depth = HINIC3_RQ_DEPTH;
+ hinic3_try_to_enable_rss(netdev);
+
/* VF driver always uses random MAC address. During VM migration to a
* new device, the new device should learn the VMs old MAC rather than
* provide its own MAC. The product design assumes that every VF is
@@ -94,7 +148,7 @@ static int hinic3_sw_init(struct net_device *netdev)
hinic3_global_func_id(hwdev));
if (err) {
dev_err(hwdev->dev, "Failed to set default MAC\n");
- return err;
+ goto err_clear_rss_config;
}
err = hinic3_alloc_txrxqs(netdev);
@@ -108,6 +162,8 @@ static int hinic3_sw_init(struct net_device *netdev)
err_del_mac:
hinic3_del_mac(hwdev, netdev->dev_addr, 0,
hinic3_global_func_id(hwdev));
+err_clear_rss_config:
+ hinic3_clear_rss_config(netdev);
return err;
}
@@ -119,6 +175,7 @@ static void hinic3_sw_uninit(struct net_device *netdev)
hinic3_free_txrxqs(netdev);
hinic3_del_mac(nic_dev->hwdev, netdev->dev_addr, 0,
hinic3_global_func_id(nic_dev->hwdev));
+ hinic3_clear_rss_config(netdev);
}
static void hinic3_assign_netdev_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c b/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c
index e74d1eb09730..cf67e26acece 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c
@@ -4,13 +4,857 @@
#include <linux/dma-mapping.h>
#include "hinic3_common.h"
+#include "hinic3_csr.h"
#include "hinic3_hwdev.h"
#include "hinic3_hwif.h"
#include "hinic3_mbox.h"
+#define MBOX_INT_DST_AEQN_MASK GENMASK(11, 10)
+#define MBOX_INT_SRC_RESP_AEQN_MASK GENMASK(13, 12)
+#define MBOX_INT_STAT_DMA_MASK GENMASK(19, 14)
+/* TX size, expressed in 4 bytes units */
+#define MBOX_INT_TX_SIZE_MASK GENMASK(24, 20)
+/* SO_RO == strong order, relaxed order */
+#define MBOX_INT_STAT_DMA_SO_RO_MASK GENMASK(26, 25)
+#define MBOX_INT_WB_EN_MASK BIT(28)
+#define MBOX_INT_SET(val, field) \
+ FIELD_PREP(MBOX_INT_##field##_MASK, val)
+
+#define MBOX_CTRL_TRIGGER_AEQE_MASK BIT(0)
+#define MBOX_CTRL_TX_STATUS_MASK BIT(1)
+#define MBOX_CTRL_DST_FUNC_MASK GENMASK(28, 16)
+#define MBOX_CTRL_SET(val, field) \
+ FIELD_PREP(MBOX_CTRL_##field##_MASK, val)
+
+#define MBOX_MSG_POLLING_TIMEOUT_MS 8000 // send msg seg timeout
+#define MBOX_COMP_POLLING_TIMEOUT_MS 40000 // response
+
+#define MBOX_MAX_BUF_SZ 2048
+#define MBOX_HEADER_SZ 8
+
+/* MBOX size is 64B, 8B for mbox_header, 8B reserved */
+#define MBOX_SEG_LEN 48
+#define MBOX_SEG_LEN_ALIGN 4
+#define MBOX_WB_STATUS_LEN 16
+
+#define MBOX_SEQ_ID_START_VAL 0
+#define MBOX_SEQ_ID_MAX_VAL 42
+#define MBOX_LAST_SEG_MAX_LEN \
+ (MBOX_MAX_BUF_SZ - MBOX_SEQ_ID_MAX_VAL * MBOX_SEG_LEN)
+
+/* mbox write back status is 16B, only first 4B is used */
+#define MBOX_WB_STATUS_ERRCODE_MASK 0xFFFF
+#define MBOX_WB_STATUS_MASK 0xFF
+#define MBOX_WB_ERROR_CODE_MASK 0xFF00
+#define MBOX_WB_STATUS_FINISHED_SUCCESS 0xFF
+#define MBOX_WB_STATUS_NOT_FINISHED 0x00
+
+#define MBOX_STATUS_FINISHED(wb) \
+ ((FIELD_PREP(MBOX_WB_STATUS_MASK, (wb))) != MBOX_WB_STATUS_NOT_FINISHED)
+#define MBOX_STATUS_SUCCESS(wb) \
+ ((FIELD_PREP(MBOX_WB_STATUS_MASK, (wb))) == \
+ MBOX_WB_STATUS_FINISHED_SUCCESS)
+#define MBOX_STATUS_ERRCODE(wb) \
+ ((wb) & MBOX_WB_ERROR_CODE_MASK)
+
+#define MBOX_DMA_MSG_QUEUE_DEPTH 32
+#define MBOX_AREA(hwif) \
+ ((hwif)->cfg_regs_base + HINIC3_FUNC_CSR_MAILBOX_DATA_OFF)
+
+#define MBOX_MQ_CI_OFFSET \
+ (HINIC3_CFG_REGS_FLAG + HINIC3_FUNC_CSR_MAILBOX_DATA_OFF + \
+ MBOX_HEADER_SZ + MBOX_SEG_LEN)
+
+#define MBOX_MQ_SYNC_CI_MASK GENMASK(7, 0)
+#define MBOX_MQ_ASYNC_CI_MASK GENMASK(15, 8)
+#define MBOX_MQ_CI_GET(val, field) \
+ FIELD_GET(MBOX_MQ_##field##_CI_MASK, val)
+
+#define MBOX_MGMT_FUNC_ID 0x1FFF
+#define MBOX_COMM_F_MBOX_SEGMENT BIT(3)
+
+static u8 *get_mobx_body_from_hdr(u8 *header)
+{
+ return header + MBOX_HEADER_SZ;
+}
+
+static struct hinic3_msg_desc *get_mbox_msg_desc(struct hinic3_mbox *mbox,
+ enum mbox_msg_direction_type dir,
+ u16 src_func_id)
+{
+ struct hinic3_msg_channel *msg_ch;
+
+ msg_ch = (src_func_id == MBOX_MGMT_FUNC_ID) ?
+ &mbox->mgmt_msg : mbox->func_msg;
+
+ return (dir == MBOX_MSG_SEND) ?
+ &msg_ch->recv_msg : &msg_ch->resp_msg;
+}
+
+static void resp_mbox_handler(struct hinic3_mbox *mbox,
+ const struct hinic3_msg_desc *msg_desc)
+{
+ spin_lock(&mbox->mbox_lock);
+ if (msg_desc->msg_info.msg_id == mbox->send_msg_id &&
+ mbox->event_flag == MBOX_EVENT_START)
+ mbox->event_flag = MBOX_EVENT_SUCCESS;
+ spin_unlock(&mbox->mbox_lock);
+}
+
+static bool mbox_segment_valid(struct hinic3_mbox *mbox,
+ struct hinic3_msg_desc *msg_desc,
+ __le64 mbox_header)
+{
+ u8 seq_id, seg_len, msg_id, mod;
+ __le16 src_func_idx, cmd;
+
+ seq_id = MBOX_MSG_HEADER_GET(mbox_header, SEQID);
+ seg_len = MBOX_MSG_HEADER_GET(mbox_header, SEG_LEN);
+ msg_id = MBOX_MSG_HEADER_GET(mbox_header, MSG_ID);
+ mod = MBOX_MSG_HEADER_GET(mbox_header, MODULE);
+ cmd = cpu_to_le16(MBOX_MSG_HEADER_GET(mbox_header, CMD));
+ src_func_idx = cpu_to_le16(MBOX_MSG_HEADER_GET(mbox_header,
+ SRC_GLB_FUNC_IDX));
+
+ if (seq_id > MBOX_SEQ_ID_MAX_VAL || seg_len > MBOX_SEG_LEN ||
+ (seq_id == MBOX_SEQ_ID_MAX_VAL && seg_len > MBOX_LAST_SEG_MAX_LEN))
+ goto err_seg;
+
+ if (seq_id == 0) {
+ msg_desc->seq_id = seq_id;
+ msg_desc->msg_info.msg_id = msg_id;
+ msg_desc->mod = mod;
+ msg_desc->cmd = cmd;
+ } else {
+ if (seq_id != msg_desc->seq_id + 1 ||
+ msg_id != msg_desc->msg_info.msg_id ||
+ mod != msg_desc->mod || cmd != msg_desc->cmd)
+ goto err_seg;
+
+ msg_desc->seq_id = seq_id;
+ }
+
+ return true;
+
+err_seg:
+ dev_err(mbox->hwdev->dev,
+ "Mailbox segment check failed, src func id: 0x%x, front seg info: seq id: 0x%x, msg id: 0x%x, mod: 0x%x, cmd: 0x%x\n",
+ src_func_idx, msg_desc->seq_id, msg_desc->msg_info.msg_id,
+ msg_desc->mod, msg_desc->cmd);
+ dev_err(mbox->hwdev->dev,
+ "Current seg info: seg len: 0x%x, seq id: 0x%x, msg id: 0x%x, mod: 0x%x, cmd: 0x%x\n",
+ seg_len, seq_id, msg_id, mod, cmd);
+
+ return false;
+}
+
+static void recv_mbox_handler(struct hinic3_mbox *mbox,
+ u8 *header, struct hinic3_msg_desc *msg_desc)
+{
+ __le64 mbox_header = *((__force __le64 *)header);
+ u8 *mbox_body = get_mobx_body_from_hdr(header);
+ u8 seq_id, seg_len;
+ int pos;
+
+ if (!mbox_segment_valid(mbox, msg_desc, mbox_header)) {
+ msg_desc->seq_id = MBOX_SEQ_ID_MAX_VAL;
+ return;
+ }
+
+ seq_id = MBOX_MSG_HEADER_GET(mbox_header, SEQID);
+ seg_len = MBOX_MSG_HEADER_GET(mbox_header, SEG_LEN);
+
+ pos = seq_id * MBOX_SEG_LEN;
+ memcpy(msg_desc->msg + pos, mbox_body, seg_len);
+
+ if (!MBOX_MSG_HEADER_GET(mbox_header, LAST))
+ return;
+
+ msg_desc->msg_len = cpu_to_le16(MBOX_MSG_HEADER_GET(mbox_header,
+ MSG_LEN));
+ msg_desc->msg_info.status = MBOX_MSG_HEADER_GET(mbox_header, STATUS);
+
+ if (MBOX_MSG_HEADER_GET(mbox_header, DIRECTION) == MBOX_MSG_RESP)
+ resp_mbox_handler(mbox, msg_desc);
+}
+
+void hinic3_mbox_func_aeqe_handler(struct hinic3_hwdev *hwdev, u8 *header,
+ u8 size)
+{
+ __le64 mbox_header = *((__force __le64 *)header);
+ enum mbox_msg_direction_type dir;
+ struct hinic3_msg_desc *msg_desc;
+ struct hinic3_mbox *mbox;
+ u16 src_func_id;
+
+ mbox = hwdev->mbox;
+ dir = MBOX_MSG_HEADER_GET(mbox_header, DIRECTION);
+ src_func_id = MBOX_MSG_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX);
+ msg_desc = get_mbox_msg_desc(mbox, dir, src_func_id);
+ recv_mbox_handler(mbox, header, msg_desc);
+}
+
+static int init_mbox_dma_queue(struct hinic3_hwdev *hwdev,
+ struct mbox_dma_queue *mq)
+{
+ u32 size;
+
+ mq->depth = MBOX_DMA_MSG_QUEUE_DEPTH;
+ mq->prod_idx = 0;
+ mq->cons_idx = 0;
+
+ size = mq->depth * MBOX_MAX_BUF_SZ;
+ mq->dma_buf_vaddr = dma_alloc_coherent(hwdev->dev, size,
+ &mq->dma_buf_paddr,
+ GFP_KERNEL);
+ if (!mq->dma_buf_vaddr)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void uninit_mbox_dma_queue(struct hinic3_hwdev *hwdev,
+ struct mbox_dma_queue *mq)
+{
+ dma_free_coherent(hwdev->dev, mq->depth * MBOX_MAX_BUF_SZ,
+ mq->dma_buf_vaddr, mq->dma_buf_paddr);
+}
+
+static int hinic3_init_mbox_dma_queue(struct hinic3_mbox *mbox)
+{
+ u32 val;
+ int err;
+
+ err = init_mbox_dma_queue(mbox->hwdev, &mbox->sync_msg_queue);
+ if (err)
+ return err;
+
+ err = init_mbox_dma_queue(mbox->hwdev, &mbox->async_msg_queue);
+ if (err) {
+ uninit_mbox_dma_queue(mbox->hwdev, &mbox->sync_msg_queue);
+ return err;
+ }
+
+ val = hinic3_hwif_read_reg(mbox->hwdev->hwif, MBOX_MQ_CI_OFFSET);
+ val &= ~MBOX_MQ_SYNC_CI_MASK;
+ val &= ~MBOX_MQ_ASYNC_CI_MASK;
+ hinic3_hwif_write_reg(mbox->hwdev->hwif, MBOX_MQ_CI_OFFSET, val);
+
+ return 0;
+}
+
+static void hinic3_uninit_mbox_dma_queue(struct hinic3_mbox *mbox)
+{
+ uninit_mbox_dma_queue(mbox->hwdev, &mbox->sync_msg_queue);
+ uninit_mbox_dma_queue(mbox->hwdev, &mbox->async_msg_queue);
+}
+
+static int alloc_mbox_msg_channel(struct hinic3_msg_channel *msg_ch)
+{
+ msg_ch->resp_msg.msg = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL);
+ if (!msg_ch->resp_msg.msg)
+ return -ENOMEM;
+
+ msg_ch->recv_msg.msg = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL);
+ if (!msg_ch->recv_msg.msg) {
+ kfree(msg_ch->resp_msg.msg);
+ return -ENOMEM;
+ }
+
+ msg_ch->resp_msg.seq_id = MBOX_SEQ_ID_MAX_VAL;
+ msg_ch->recv_msg.seq_id = MBOX_SEQ_ID_MAX_VAL;
+
+ return 0;
+}
+
+static void free_mbox_msg_channel(struct hinic3_msg_channel *msg_ch)
+{
+ kfree(msg_ch->recv_msg.msg);
+ kfree(msg_ch->resp_msg.msg);
+}
+
+static int init_mgmt_msg_channel(struct hinic3_mbox *mbox)
+{
+ int err;
+
+ err = alloc_mbox_msg_channel(&mbox->mgmt_msg);
+ if (err) {
+ dev_err(mbox->hwdev->dev, "Failed to alloc mgmt message channel\n");
+ return err;
+ }
+
+ err = hinic3_init_mbox_dma_queue(mbox);
+ if (err) {
+ dev_err(mbox->hwdev->dev, "Failed to init mbox dma queue\n");
+ free_mbox_msg_channel(&mbox->mgmt_msg);
+ return err;
+ }
+
+ return 0;
+}
+
+static void uninit_mgmt_msg_channel(struct hinic3_mbox *mbox)
+{
+ hinic3_uninit_mbox_dma_queue(mbox);
+ free_mbox_msg_channel(&mbox->mgmt_msg);
+}
+
+static int hinic3_init_func_mbox_msg_channel(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_mbox *mbox;
+ int err;
+
+ mbox = hwdev->mbox;
+ mbox->func_msg = kzalloc(sizeof(*mbox->func_msg), GFP_KERNEL);
+ if (!mbox->func_msg)
+ return -ENOMEM;
+
+ err = alloc_mbox_msg_channel(mbox->func_msg);
+ if (err)
+ goto err_free_func_msg;
+
+ return 0;
+
+err_free_func_msg:
+ kfree(mbox->func_msg);
+ mbox->func_msg = NULL;
+
+ return err;
+}
+
+static void hinic3_uninit_func_mbox_msg_channel(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_mbox *mbox = hwdev->mbox;
+
+ free_mbox_msg_channel(mbox->func_msg);
+ kfree(mbox->func_msg);
+ mbox->func_msg = NULL;
+}
+
+static void prepare_send_mbox(struct hinic3_mbox *mbox)
+{
+ struct hinic3_send_mbox *send_mbox = &mbox->send_mbox;
+
+ send_mbox->data = MBOX_AREA(mbox->hwdev->hwif);
+}
+
+static int alloc_mbox_wb_status(struct hinic3_mbox *mbox)
+{
+ struct hinic3_send_mbox *send_mbox = &mbox->send_mbox;
+ struct hinic3_hwdev *hwdev = mbox->hwdev;
+ u32 addr_h, addr_l;
+
+ send_mbox->wb_vaddr = dma_alloc_coherent(hwdev->dev,
+ MBOX_WB_STATUS_LEN,
+ &send_mbox->wb_paddr,
+ GFP_KERNEL);
+ if (!send_mbox->wb_vaddr)
+ return -ENOMEM;
+
+ addr_h = upper_32_bits(send_mbox->wb_paddr);
+ addr_l = lower_32_bits(send_mbox->wb_paddr);
+ hinic3_hwif_write_reg(hwdev->hwif, HINIC3_FUNC_CSR_MAILBOX_RESULT_H_OFF,
+ addr_h);
+ hinic3_hwif_write_reg(hwdev->hwif, HINIC3_FUNC_CSR_MAILBOX_RESULT_L_OFF,
+ addr_l);
+
+ return 0;
+}
+
+static void free_mbox_wb_status(struct hinic3_mbox *mbox)
+{
+ struct hinic3_send_mbox *send_mbox = &mbox->send_mbox;
+ struct hinic3_hwdev *hwdev = mbox->hwdev;
+
+ hinic3_hwif_write_reg(hwdev->hwif, HINIC3_FUNC_CSR_MAILBOX_RESULT_H_OFF,
+ 0);
+ hinic3_hwif_write_reg(hwdev->hwif, HINIC3_FUNC_CSR_MAILBOX_RESULT_L_OFF,
+ 0);
+
+ dma_free_coherent(hwdev->dev, MBOX_WB_STATUS_LEN,
+ send_mbox->wb_vaddr, send_mbox->wb_paddr);
+}
+
+static int hinic3_mbox_pre_init(struct hinic3_hwdev *hwdev,
+ struct hinic3_mbox *mbox)
+{
+ mbox->hwdev = hwdev;
+ mutex_init(&mbox->mbox_send_lock);
+ spin_lock_init(&mbox->mbox_lock);
+
+ mbox->workq = create_singlethread_workqueue(HINIC3_MBOX_WQ_NAME);
+ if (!mbox->workq) {
+ dev_err(hwdev->dev, "Failed to initialize MBOX workqueue\n");
+ return -ENOMEM;
+ }
+ hwdev->mbox = mbox;
+
+ return 0;
+}
+
+int hinic3_init_mbox(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_mbox *mbox;
+ int err;
+
+ mbox = kzalloc(sizeof(*mbox), GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ err = hinic3_mbox_pre_init(hwdev, mbox);
+ if (err)
+ goto err_free_mbox;
+
+ err = init_mgmt_msg_channel(mbox);
+ if (err)
+ goto err_destroy_workqueue;
+
+ err = hinic3_init_func_mbox_msg_channel(hwdev);
+ if (err)
+ goto err_uninit_mgmt_msg_ch;
+
+ err = alloc_mbox_wb_status(mbox);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to alloc mbox write back status\n");
+ goto err_uninit_func_mbox_msg_ch;
+ }
+
+ prepare_send_mbox(mbox);
+
+ return 0;
+
+err_uninit_func_mbox_msg_ch:
+ hinic3_uninit_func_mbox_msg_channel(hwdev);
+
+err_uninit_mgmt_msg_ch:
+ uninit_mgmt_msg_channel(mbox);
+
+err_destroy_workqueue:
+ destroy_workqueue(mbox->workq);
+
+err_free_mbox:
+ kfree(mbox);
+
+ return err;
+}
+
+void hinic3_free_mbox(struct hinic3_hwdev *hwdev)
+{
+ struct hinic3_mbox *mbox = hwdev->mbox;
+
+ destroy_workqueue(mbox->workq);
+ free_mbox_wb_status(mbox);
+ hinic3_uninit_func_mbox_msg_channel(hwdev);
+ uninit_mgmt_msg_channel(mbox);
+ kfree(mbox);
+}
+
+#define MBOX_DMA_MSG_INIT_XOR_VAL 0x5a5a5a5a
+#define MBOX_XOR_DATA_ALIGN 4
+static u32 mbox_dma_msg_xor(u32 *data, u32 msg_len)
+{
+ u32 xor = MBOX_DMA_MSG_INIT_XOR_VAL;
+ u32 dw_len = msg_len / sizeof(u32);
+ u32 i;
+
+ for (i = 0; i < dw_len; i++)
+ xor ^= data[i];
+
+ return xor;
+}
+
+#define MBOX_MQ_ID_MASK(mq, idx) ((idx) & ((mq)->depth - 1))
+
+static bool is_msg_queue_full(struct mbox_dma_queue *mq)
+{
+ return MBOX_MQ_ID_MASK(mq, (mq)->prod_idx + 1) ==
+ MBOX_MQ_ID_MASK(mq, (mq)->cons_idx);
+}
+
+static int mbox_prepare_dma_entry(struct hinic3_mbox *mbox,
+ struct mbox_dma_queue *mq,
+ struct mbox_dma_msg *dma_msg,
+ const void *msg, u32 msg_len)
+{
+ u64 dma_addr, offset;
+ void *dma_vaddr;
+
+ if (is_msg_queue_full(mq)) {
+ dev_err(mbox->hwdev->dev, "Mbox sync message queue is busy, pi: %u, ci: %u\n",
+ mq->prod_idx, MBOX_MQ_ID_MASK(mq, mq->cons_idx));
+ return -EBUSY;
+ }
+
+ /* copy data to DMA buffer */
+ offset = mq->prod_idx * MBOX_MAX_BUF_SZ;
+ dma_vaddr = (u8 *)mq->dma_buf_vaddr + offset;
+ memcpy(dma_vaddr, msg, msg_len);
+ dma_addr = mq->dma_buf_paddr + offset;
+ dma_msg->dma_addr_high = cpu_to_le32(upper_32_bits(dma_addr));
+ dma_msg->dma_addr_low = cpu_to_le32(lower_32_bits(dma_addr));
+ dma_msg->msg_len = cpu_to_le32(msg_len);
+ /* The firmware obtains message based on 4B alignment. */
+ dma_msg->xor = cpu_to_le32(mbox_dma_msg_xor(dma_vaddr,
+ ALIGN(msg_len, MBOX_XOR_DATA_ALIGN)));
+ mq->prod_idx++;
+ mq->prod_idx = MBOX_MQ_ID_MASK(mq, mq->prod_idx);
+
+ return 0;
+}
+
+static int mbox_prepare_dma_msg(struct hinic3_mbox *mbox,
+ enum mbox_msg_ack_type ack_type,
+ struct mbox_dma_msg *dma_msg, const void *msg,
+ u32 msg_len)
+{
+ struct mbox_dma_queue *mq;
+ u32 val;
+
+ val = hinic3_hwif_read_reg(mbox->hwdev->hwif, MBOX_MQ_CI_OFFSET);
+ if (ack_type == MBOX_MSG_ACK) {
+ mq = &mbox->sync_msg_queue;
+ mq->cons_idx = MBOX_MQ_CI_GET(val, SYNC);
+ } else {
+ mq = &mbox->async_msg_queue;
+ mq->cons_idx = MBOX_MQ_CI_GET(val, ASYNC);
+ }
+
+ return mbox_prepare_dma_entry(mbox, mq, dma_msg, msg, msg_len);
+}
+
+static void clear_mbox_status(struct hinic3_send_mbox *mbox)
+{
+ __be64 *wb_status = mbox->wb_vaddr;
+
+ *wb_status = 0;
+ /* clear mailbox write back status */
+ wmb();
+}
+
+static void mbox_dword_write(const void *src, void __iomem *dst, u32 count)
+{
+ const __le32 *src32 = src;
+ u32 __iomem *dst32 = dst;
+ u32 i;
+
+ /* Data written to mbox is arranged in structs with little endian fields
+ * but when written to HW every dword (32bits) should be swapped since
+ * the HW will swap it again.
+ */
+ for (i = 0; i < count; i++)
+ __raw_writel(swab32((__force __u32)src32[i]), dst32 + i);
+}
+
+static void mbox_copy_header(struct hinic3_hwdev *hwdev,
+ struct hinic3_send_mbox *mbox, __le64 *header)
+{
+ mbox_dword_write(header, mbox->data, MBOX_HEADER_SZ / sizeof(__le32));
+}
+
+static void mbox_copy_send_data(struct hinic3_hwdev *hwdev,
+ struct hinic3_send_mbox *mbox, void *seg,
+ u32 seg_len)
+{
+ u32 __iomem *dst = (u32 __iomem *)(mbox->data + MBOX_HEADER_SZ);
+ u32 count, leftover, last_dword;
+ const __le32 *src = seg;
+
+ count = seg_len / sizeof(u32);
+ leftover = seg_len % sizeof(u32);
+ if (count > 0)
+ mbox_dword_write(src, dst, count);
+
+ if (leftover > 0) {
+ last_dword = 0;
+ memcpy(&last_dword, src + count, leftover);
+ mbox_dword_write(&last_dword, dst + count, 1);
+ }
+}
+
+static void write_mbox_msg_attr(struct hinic3_mbox *mbox,
+ u16 dst_func, u16 dst_aeqn, u32 seg_len)
+{
+ struct hinic3_hwif *hwif = mbox->hwdev->hwif;
+ u32 mbox_int, mbox_ctrl, tx_size;
+
+ tx_size = ALIGN(seg_len + MBOX_HEADER_SZ, MBOX_SEG_LEN_ALIGN) >> 2;
+
+ mbox_int = MBOX_INT_SET(dst_aeqn, DST_AEQN) |
+ MBOX_INT_SET(0, STAT_DMA) |
+ MBOX_INT_SET(tx_size, TX_SIZE) |
+ MBOX_INT_SET(0, STAT_DMA_SO_RO) |
+ MBOX_INT_SET(1, WB_EN);
+
+ mbox_ctrl = MBOX_CTRL_SET(1, TX_STATUS) |
+ MBOX_CTRL_SET(0, TRIGGER_AEQE) |
+ MBOX_CTRL_SET(dst_func, DST_FUNC);
+
+ hinic3_hwif_write_reg(hwif, HINIC3_FUNC_CSR_MAILBOX_INT_OFF, mbox_int);
+ hinic3_hwif_write_reg(hwif, HINIC3_FUNC_CSR_MAILBOX_CONTROL_OFF,
+ mbox_ctrl);
+}
+
+static u16 get_mbox_status(const struct hinic3_send_mbox *mbox)
+{
+ __be64 *wb_status = mbox->wb_vaddr;
+ u64 wb_val;
+
+ wb_val = be64_to_cpu(*wb_status);
+ /* verify reading before check */
+ rmb();
+
+ return wb_val & MBOX_WB_STATUS_ERRCODE_MASK;
+}
+
+static enum hinic3_wait_return check_mbox_wb_status(void *priv_data)
+{
+ struct hinic3_mbox *mbox = priv_data;
+ u16 wb_status;
+
+ wb_status = get_mbox_status(&mbox->send_mbox);
+
+ return MBOX_STATUS_FINISHED(wb_status) ?
+ HINIC3_WAIT_PROCESS_CPL : HINIC3_WAIT_PROCESS_WAITING;
+}
+
+static int send_mbox_seg(struct hinic3_mbox *mbox, __le64 header,
+ u16 dst_func, void *seg, u32 seg_len, void *msg_info)
+{
+ struct hinic3_send_mbox *send_mbox = &mbox->send_mbox;
+ struct hinic3_hwdev *hwdev = mbox->hwdev;
+ u8 num_aeqs = hwdev->hwif->attr.num_aeqs;
+ enum mbox_msg_direction_type dir;
+ u16 dst_aeqn, wb_status, errcode;
+ int err;
+
+ /* mbox to mgmt cpu, hardware doesn't care about dst aeq id */
+ if (num_aeqs > MBOX_MSG_AEQ_FOR_MBOX) {
+ dir = MBOX_MSG_HEADER_GET(header, DIRECTION);
+ dst_aeqn = (dir == MBOX_MSG_SEND) ?
+ MBOX_MSG_AEQ_FOR_EVENT : MBOX_MSG_AEQ_FOR_MBOX;
+ } else {
+ dst_aeqn = 0;
+ }
+
+ clear_mbox_status(send_mbox);
+ mbox_copy_header(hwdev, send_mbox, &header);
+ mbox_copy_send_data(hwdev, send_mbox, seg, seg_len);
+ write_mbox_msg_attr(mbox, dst_func, dst_aeqn, seg_len);
+
+ err = hinic3_wait_for_timeout(mbox, check_mbox_wb_status,
+ MBOX_MSG_POLLING_TIMEOUT_MS,
+ USEC_PER_MSEC);
+ wb_status = get_mbox_status(send_mbox);
+ if (err) {
+ dev_err(hwdev->dev, "Send mailbox segment timeout, wb status: 0x%x\n",
+ wb_status);
+ return err;
+ }
+
+ if (!MBOX_STATUS_SUCCESS(wb_status)) {
+ dev_err(hwdev->dev,
+ "Send mailbox segment to function %u error, wb status: 0x%x\n",
+ dst_func, wb_status);
+ errcode = MBOX_STATUS_ERRCODE(wb_status);
+ return errcode ? errcode : -EFAULT;
+ }
+
+ return 0;
+}
+
+static int send_mbox_msg(struct hinic3_mbox *mbox, u8 mod, u16 cmd,
+ const void *msg, u32 msg_len, u16 dst_func,
+ enum mbox_msg_direction_type direction,
+ enum mbox_msg_ack_type ack_type,
+ struct mbox_msg_info *msg_info)
+{
+ enum mbox_msg_data_type data_type = MBOX_MSG_DATA_INLINE;
+ struct hinic3_hwdev *hwdev = mbox->hwdev;
+ struct mbox_dma_msg dma_msg;
+ u32 seg_len = MBOX_SEG_LEN;
+ __le64 header = 0;
+ u32 seq_id = 0;
+ u16 rsp_aeq_id;
+ u8 *msg_seg;
+ int err = 0;
+ u32 left;
+
+ if (hwdev->hwif->attr.num_aeqs > MBOX_MSG_AEQ_FOR_MBOX)
+ rsp_aeq_id = MBOX_MSG_AEQ_FOR_MBOX;
+ else
+ rsp_aeq_id = 0;
+
+ if (dst_func == MBOX_MGMT_FUNC_ID &&
+ !(hwdev->features[0] & MBOX_COMM_F_MBOX_SEGMENT)) {
+ err = mbox_prepare_dma_msg(mbox, ack_type, &dma_msg,
+ msg, msg_len);
+ if (err)
+ goto err_send;
+
+ msg = &dma_msg;
+ msg_len = sizeof(dma_msg);
+ data_type = MBOX_MSG_DATA_DMA;
+ }
+
+ msg_seg = (u8 *)msg;
+ left = msg_len;
+
+ header = cpu_to_le64(MBOX_MSG_HEADER_SET(msg_len, MSG_LEN) |
+ MBOX_MSG_HEADER_SET(mod, MODULE) |
+ MBOX_MSG_HEADER_SET(seg_len, SEG_LEN) |
+ MBOX_MSG_HEADER_SET(ack_type, NO_ACK) |
+ MBOX_MSG_HEADER_SET(data_type, DATA_TYPE) |
+ MBOX_MSG_HEADER_SET(MBOX_SEQ_ID_START_VAL, SEQID) |
+ MBOX_MSG_HEADER_SET(direction, DIRECTION) |
+ MBOX_MSG_HEADER_SET(cmd, CMD) |
+ MBOX_MSG_HEADER_SET(msg_info->msg_id, MSG_ID) |
+ MBOX_MSG_HEADER_SET(rsp_aeq_id, AEQ_ID) |
+ MBOX_MSG_HEADER_SET(MBOX_MSG_FROM_MBOX, SOURCE) |
+ MBOX_MSG_HEADER_SET(!!msg_info->status, STATUS));
+
+ while (!(MBOX_MSG_HEADER_GET(header, LAST))) {
+ if (left <= MBOX_SEG_LEN) {
+ header &= cpu_to_le64(~MBOX_MSG_HEADER_SEG_LEN_MASK);
+ header |=
+ cpu_to_le64(MBOX_MSG_HEADER_SET(left, SEG_LEN) |
+ MBOX_MSG_HEADER_SET(1, LAST));
+ seg_len = left;
+ }
+
+ err = send_mbox_seg(mbox, header, dst_func, msg_seg,
+ seg_len, msg_info);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to send mbox seg, seq_id=0x%llx\n",
+ MBOX_MSG_HEADER_GET(header, SEQID));
+ goto err_send;
+ }
+
+ left -= MBOX_SEG_LEN;
+ msg_seg += MBOX_SEG_LEN;
+ seq_id++;
+ header &= cpu_to_le64(~MBOX_MSG_HEADER_SEG_LEN_MASK);
+ header |= cpu_to_le64(MBOX_MSG_HEADER_SET(seq_id, SEQID));
+ }
+
+err_send:
+ return err;
+}
+
+static void set_mbox_to_func_event(struct hinic3_mbox *mbox,
+ enum mbox_event_state event_flag)
+{
+ spin_lock(&mbox->mbox_lock);
+ mbox->event_flag = event_flag;
+ spin_unlock(&mbox->mbox_lock);
+}
+
+static enum hinic3_wait_return check_mbox_msg_finish(void *priv_data)
+{
+ struct hinic3_mbox *mbox = priv_data;
+
+ return (mbox->event_flag == MBOX_EVENT_SUCCESS) ?
+ HINIC3_WAIT_PROCESS_CPL : HINIC3_WAIT_PROCESS_WAITING;
+}
+
+static int wait_mbox_msg_completion(struct hinic3_mbox *mbox,
+ u32 timeout)
+{
+ u32 wait_time;
+ int err;
+
+ wait_time = (timeout != 0) ? timeout : MBOX_COMP_POLLING_TIMEOUT_MS;
+ err = hinic3_wait_for_timeout(mbox, check_mbox_msg_finish,
+ wait_time, USEC_PER_MSEC);
+ if (err) {
+ set_mbox_to_func_event(mbox, MBOX_EVENT_TIMEOUT);
+ return err;
+ }
+ set_mbox_to_func_event(mbox, MBOX_EVENT_END);
+
+ return 0;
+}
+
int hinic3_send_mbox_to_mgmt(struct hinic3_hwdev *hwdev, u8 mod, u16 cmd,
const struct mgmt_msg_params *msg_params)
{
- /* Completed by later submission due to LoC limit. */
- return -EFAULT;
+ struct hinic3_mbox *mbox = hwdev->mbox;
+ struct mbox_msg_info msg_info = {};
+ struct hinic3_msg_desc *msg_desc;
+ u32 msg_len;
+ int err;
+
+ /* expect response message */
+ msg_desc = get_mbox_msg_desc(mbox, MBOX_MSG_RESP, MBOX_MGMT_FUNC_ID);
+ mutex_lock(&mbox->mbox_send_lock);
+ msg_info.msg_id = (mbox->send_msg_id + 1) & 0xF;
+ mbox->send_msg_id = msg_info.msg_id;
+ set_mbox_to_func_event(mbox, MBOX_EVENT_START);
+
+ err = send_mbox_msg(mbox, mod, cmd, msg_params->buf_in,
+ msg_params->in_size, MBOX_MGMT_FUNC_ID,
+ MBOX_MSG_SEND, MBOX_MSG_ACK, &msg_info);
+ if (err) {
+ dev_err(hwdev->dev, "Send mailbox mod %u, cmd %u failed, msg_id: %u, err: %d\n",
+ mod, cmd, msg_info.msg_id, err);
+ set_mbox_to_func_event(mbox, MBOX_EVENT_FAIL);
+ goto err_send;
+ }
+
+ if (wait_mbox_msg_completion(mbox, msg_params->timeout_ms)) {
+ dev_err(hwdev->dev,
+ "Send mbox msg timeout, msg_id: %u\n", msg_info.msg_id);
+ err = -ETIMEDOUT;
+ goto err_send;
+ }
+
+ if (mod != msg_desc->mod || cmd != le16_to_cpu(msg_desc->cmd)) {
+ dev_err(hwdev->dev,
+ "Invalid response mbox message, mod: 0x%x, cmd: 0x%x, expect mod: 0x%x, cmd: 0x%x\n",
+ msg_desc->mod, msg_desc->cmd, mod, cmd);
+ err = -EFAULT;
+ goto err_send;
+ }
+
+ if (msg_desc->msg_info.status) {
+ err = msg_desc->msg_info.status;
+ goto err_send;
+ }
+
+ if (msg_params->buf_out) {
+ msg_len = le16_to_cpu(msg_desc->msg_len);
+ if (msg_len != msg_params->expected_out_size) {
+ dev_err(hwdev->dev,
+ "Invalid response mbox message length: %u for mod %d cmd %u, expected length: %u\n",
+ msg_desc->msg_len, mod, cmd,
+ msg_params->expected_out_size);
+ err = -EFAULT;
+ goto err_send;
+ }
+
+ memcpy(msg_params->buf_out, msg_desc->msg, msg_len);
+ }
+
+err_send:
+ mutex_unlock(&mbox->mbox_send_lock);
+
+ return err;
+}
+
+int hinic3_send_mbox_to_mgmt_no_ack(struct hinic3_hwdev *hwdev, u8 mod, u16 cmd,
+ const struct mgmt_msg_params *msg_params)
+{
+ struct hinic3_mbox *mbox = hwdev->mbox;
+ struct mbox_msg_info msg_info = {};
+ int err;
+
+ mutex_lock(&mbox->mbox_send_lock);
+ err = send_mbox_msg(mbox, mod, cmd, msg_params->buf_in,
+ msg_params->in_size, MBOX_MGMT_FUNC_ID,
+ MBOX_MSG_SEND, MBOX_MSG_NO_ACK, &msg_info);
+ if (err)
+ dev_err(hwdev->dev, "Send mailbox no ack failed\n");
+
+ mutex_unlock(&mbox->mbox_send_lock);
+
+ return err;
}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h b/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h
index d7a6c37b7eff..e71629e95086 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h
@@ -8,8 +8,134 @@
#include <linux/mutex.h>
struct hinic3_hwdev;
+struct mgmt_msg_params;
+
+#define MBOX_MSG_HEADER_SRC_GLB_FUNC_IDX_MASK GENMASK_ULL(12, 0)
+#define MBOX_MSG_HEADER_STATUS_MASK BIT_ULL(13)
+#define MBOX_MSG_HEADER_SOURCE_MASK BIT_ULL(15)
+#define MBOX_MSG_HEADER_AEQ_ID_MASK GENMASK_ULL(17, 16)
+#define MBOX_MSG_HEADER_MSG_ID_MASK GENMASK_ULL(21, 18)
+#define MBOX_MSG_HEADER_CMD_MASK GENMASK_ULL(31, 22)
+#define MBOX_MSG_HEADER_MSG_LEN_MASK GENMASK_ULL(42, 32)
+#define MBOX_MSG_HEADER_MODULE_MASK GENMASK_ULL(47, 43)
+#define MBOX_MSG_HEADER_SEG_LEN_MASK GENMASK_ULL(53, 48)
+#define MBOX_MSG_HEADER_NO_ACK_MASK BIT_ULL(54)
+#define MBOX_MSG_HEADER_DATA_TYPE_MASK BIT_ULL(55)
+#define MBOX_MSG_HEADER_SEQID_MASK GENMASK_ULL(61, 56)
+#define MBOX_MSG_HEADER_LAST_MASK BIT_ULL(62)
+#define MBOX_MSG_HEADER_DIRECTION_MASK BIT_ULL(63)
+
+#define MBOX_MSG_HEADER_SET(val, member) \
+ FIELD_PREP(MBOX_MSG_HEADER_##member##_MASK, val)
+#define MBOX_MSG_HEADER_GET(val, member) \
+ FIELD_GET(MBOX_MSG_HEADER_##member##_MASK, le64_to_cpu(val))
+
+/* identifies if a segment belongs to a message or to a response. A VF is only
+ * expected to send messages and receive responses. PF driver could receive
+ * messages and send responses.
+ */
+enum mbox_msg_direction_type {
+ MBOX_MSG_SEND = 0,
+ MBOX_MSG_RESP = 1,
+};
+
+/* Indicates if mbox message expects a response (ack) or not */
+enum mbox_msg_ack_type {
+ MBOX_MSG_ACK = 0,
+ MBOX_MSG_NO_ACK = 1,
+};
+
+enum mbox_msg_data_type {
+ MBOX_MSG_DATA_INLINE = 0,
+ MBOX_MSG_DATA_DMA = 1,
+};
+
+enum mbox_msg_src_type {
+ MBOX_MSG_FROM_MBOX = 1,
+};
+
+enum mbox_msg_aeq_type {
+ MBOX_MSG_AEQ_FOR_EVENT = 0,
+ MBOX_MSG_AEQ_FOR_MBOX = 1,
+};
+
+#define HINIC3_MBOX_WQ_NAME "hinic3_mbox"
+
+struct mbox_msg_info {
+ u8 msg_id;
+ u8 status;
+};
+
+struct hinic3_msg_desc {
+ u8 *msg;
+ __le16 msg_len;
+ u8 seq_id;
+ u8 mod;
+ __le16 cmd;
+ struct mbox_msg_info msg_info;
+};
+
+struct hinic3_msg_channel {
+ struct hinic3_msg_desc resp_msg;
+ struct hinic3_msg_desc recv_msg;
+};
+
+struct hinic3_send_mbox {
+ u8 __iomem *data;
+ void *wb_vaddr;
+ dma_addr_t wb_paddr;
+};
+
+enum mbox_event_state {
+ MBOX_EVENT_START = 0,
+ MBOX_EVENT_FAIL = 1,
+ MBOX_EVENT_SUCCESS = 2,
+ MBOX_EVENT_TIMEOUT = 3,
+ MBOX_EVENT_END = 4,
+};
+
+struct mbox_dma_msg {
+ __le32 xor;
+ __le32 dma_addr_high;
+ __le32 dma_addr_low;
+ __le32 msg_len;
+ __le64 rsvd;
+};
+
+struct mbox_dma_queue {
+ void *dma_buf_vaddr;
+ dma_addr_t dma_buf_paddr;
+ u16 depth;
+ u16 prod_idx;
+ u16 cons_idx;
+};
+
+struct hinic3_mbox {
+ struct hinic3_hwdev *hwdev;
+ /* lock for send mbox message and ack message */
+ struct mutex mbox_send_lock;
+ struct hinic3_send_mbox send_mbox;
+ struct mbox_dma_queue sync_msg_queue;
+ struct mbox_dma_queue async_msg_queue;
+ struct workqueue_struct *workq;
+ /* driver and MGMT CPU */
+ struct hinic3_msg_channel mgmt_msg;
+ /* VF to PF */
+ struct hinic3_msg_channel *func_msg;
+ u8 send_msg_id;
+ enum mbox_event_state event_flag;
+ /* lock for mbox event flag */
+ spinlock_t mbox_lock;
+};
+
+void hinic3_mbox_func_aeqe_handler(struct hinic3_hwdev *hwdev, u8 *header,
+ u8 size);
+int hinic3_init_mbox(struct hinic3_hwdev *hwdev);
+void hinic3_free_mbox(struct hinic3_hwdev *hwdev);
int hinic3_send_mbox_to_mgmt(struct hinic3_hwdev *hwdev, u8 mod, u16 cmd,
const struct mgmt_msg_params *msg_params);
+int hinic3_send_mbox_to_mgmt_no_ack(struct hinic3_hwdev *hwdev, u8 mod, u16 cmd,
+ const struct mgmt_msg_params *msg_params);
#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.c b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.c
new file mode 100644
index 000000000000..c38d10cd7fac
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.c
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include "hinic3_eqs.h"
+#include "hinic3_hwdev.h"
+#include "hinic3_mbox.h"
+#include "hinic3_mgmt.h"
+
+void hinic3_flush_mgmt_workq(struct hinic3_hwdev *hwdev)
+{
+ if (hwdev->aeqs)
+ flush_workqueue(hwdev->aeqs->workq);
+}
+
+void hinic3_mgmt_msg_aeqe_handler(struct hinic3_hwdev *hwdev, u8 *header,
+ u8 size)
+{
+ if (MBOX_MSG_HEADER_GET(*(__force __le64 *)header, SOURCE) ==
+ MBOX_MSG_FROM_MBOX)
+ hinic3_mbox_func_aeqe_handler(hwdev, header, size);
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h
index 4edabeb32112..bbef3b32a6ec 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h
@@ -9,5 +9,7 @@
struct hinic3_hwdev;
void hinic3_flush_mgmt_workq(struct hinic3_hwdev *hwdev);
+void hinic3_mgmt_msg_aeqe_handler(struct hinic3_hwdev *hwdev,
+ u8 *header, u8 size);
#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h
index c4434efdc7f7..6cc0345c39e4 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h
@@ -56,12 +56,105 @@ struct l2nic_cmd_update_mac {
u8 new_mac[ETH_ALEN];
};
+struct l2nic_cmd_set_ci_attr {
+ struct mgmt_msg_head msg_head;
+ u16 func_idx;
+ u8 dma_attr_off;
+ u8 pending_limit;
+ u8 coalescing_time;
+ u8 intr_en;
+ u16 intr_idx;
+ u32 l2nic_sqn;
+ u32 rsvd;
+ u64 ci_addr;
+};
+
+struct l2nic_cmd_clear_qp_resource {
+ struct mgmt_msg_head msg_head;
+ u16 func_id;
+ u16 rsvd1;
+};
+
struct l2nic_cmd_force_pkt_drop {
struct mgmt_msg_head msg_head;
u8 port;
u8 rsvd1[3];
};
+struct l2nic_cmd_set_vport_state {
+ struct mgmt_msg_head msg_head;
+ u16 func_id;
+ u16 rsvd1;
+ /* 0--disable, 1--enable */
+ u8 state;
+ u8 rsvd2[3];
+};
+
+struct l2nic_cmd_set_dcb_state {
+ struct mgmt_msg_head head;
+ u16 func_id;
+ /* 0 - get dcb state, 1 - set dcb state */
+ u8 op_code;
+ /* 0 - disable, 1 - enable dcb */
+ u8 state;
+ /* 0 - disable, 1 - enable dcb */
+ u8 port_state;
+ u8 rsvd[7];
+};
+
+#define L2NIC_RSS_TYPE_VALID_MASK BIT(23)
+#define L2NIC_RSS_TYPE_TCP_IPV6_EXT_MASK BIT(24)
+#define L2NIC_RSS_TYPE_IPV6_EXT_MASK BIT(25)
+#define L2NIC_RSS_TYPE_TCP_IPV6_MASK BIT(26)
+#define L2NIC_RSS_TYPE_IPV6_MASK BIT(27)
+#define L2NIC_RSS_TYPE_TCP_IPV4_MASK BIT(28)
+#define L2NIC_RSS_TYPE_IPV4_MASK BIT(29)
+#define L2NIC_RSS_TYPE_UDP_IPV6_MASK BIT(30)
+#define L2NIC_RSS_TYPE_UDP_IPV4_MASK BIT(31)
+#define L2NIC_RSS_TYPE_SET(val, member) \
+ FIELD_PREP(L2NIC_RSS_TYPE_##member##_MASK, val)
+#define L2NIC_RSS_TYPE_GET(val, member) \
+ FIELD_GET(L2NIC_RSS_TYPE_##member##_MASK, val)
+
+#define L2NIC_RSS_INDIR_SIZE 256
+#define L2NIC_RSS_KEY_SIZE 40
+
+/* IEEE 802.1Qaz std */
+#define L2NIC_DCB_COS_MAX 0x8
+
+struct l2nic_cmd_set_rss_ctx_tbl {
+ struct mgmt_msg_head msg_head;
+ u16 func_id;
+ u16 rsvd1;
+ u32 context;
+};
+
+struct l2nic_cmd_cfg_rss_engine {
+ struct mgmt_msg_head msg_head;
+ u16 func_id;
+ u8 opcode;
+ u8 hash_engine;
+ u8 rsvd1[4];
+};
+
+struct l2nic_cmd_cfg_rss_hash_key {
+ struct mgmt_msg_head msg_head;
+ u16 func_id;
+ u8 opcode;
+ u8 rsvd1;
+ u8 key[L2NIC_RSS_KEY_SIZE];
+};
+
+struct l2nic_cmd_cfg_rss {
+ struct mgmt_msg_head msg_head;
+ u16 func_id;
+ u8 rss_en;
+ u8 rq_priority_number;
+ u8 prio_tc[L2NIC_DCB_COS_MAX];
+ u16 num_qps;
+ u16 rsvd1;
+};
+
/* Commands between NIC to fw */
enum l2nic_cmd {
/* FUNC CFG */
@@ -82,6 +175,32 @@ enum l2nic_cmd {
L2NIC_CMD_MAX = 256,
};
+struct l2nic_cmd_rss_set_indir_tbl {
+ __le32 rsvd[4];
+ __le16 entry[L2NIC_RSS_INDIR_SIZE];
+};
+
+/* NIC CMDQ MODE */
+enum l2nic_ucode_cmd {
+ L2NIC_UCODE_CMD_MODIFY_QUEUE_CTX = 0,
+ L2NIC_UCODE_CMD_CLEAN_QUEUE_CTX = 1,
+ L2NIC_UCODE_CMD_SET_RSS_INDIR_TBL = 4,
+};
+
+/* hilink mac group command */
+enum mag_cmd {
+ MAG_CMD_GET_LINK_STATUS = 7,
+};
+
+/* firmware also use this cmd report link event to driver */
+struct mag_cmd_get_link_status {
+ struct mgmt_msg_head head;
+ u8 port_id;
+ /* 0:link down 1:link up */
+ u8 status;
+ u8 rsvd0[2];
+};
+
enum hinic3_nic_feature_cap {
HINIC3_NIC_F_CSUM = BIT(0),
HINIC3_NIC_F_SCTP_CRC = BIT(1),
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c b/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c
index 71104a6b8bef..0fa3c7900225 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c
@@ -8,19 +8,437 @@
#include "hinic3_nic_cfg.h"
#include "hinic3_nic_dev.h"
#include "hinic3_nic_io.h"
+#include "hinic3_rss.h"
#include "hinic3_rx.h"
#include "hinic3_tx.h"
+/* try to modify the number of irq to the target number,
+ * and return the actual number of irq.
+ */
+static u16 hinic3_qp_irq_change(struct net_device *netdev,
+ u16 dst_num_qp_irq)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct msix_entry *qps_msix_entries;
+ u16 resp_irq_num, irq_num_gap, i;
+ u16 idx;
+ int err;
+
+ qps_msix_entries = nic_dev->qps_msix_entries;
+ if (dst_num_qp_irq > nic_dev->num_qp_irq) {
+ irq_num_gap = dst_num_qp_irq - nic_dev->num_qp_irq;
+ err = hinic3_alloc_irqs(nic_dev->hwdev, irq_num_gap,
+ &qps_msix_entries[nic_dev->num_qp_irq],
+ &resp_irq_num);
+ if (err) {
+ netdev_err(netdev, "Failed to alloc irqs\n");
+ return nic_dev->num_qp_irq;
+ }
+
+ nic_dev->num_qp_irq += resp_irq_num;
+ } else if (dst_num_qp_irq < nic_dev->num_qp_irq) {
+ irq_num_gap = nic_dev->num_qp_irq - dst_num_qp_irq;
+ for (i = 0; i < irq_num_gap; i++) {
+ idx = (nic_dev->num_qp_irq - i) - 1;
+ hinic3_free_irq(nic_dev->hwdev,
+ qps_msix_entries[idx].vector);
+ qps_msix_entries[idx].vector = 0;
+ qps_msix_entries[idx].entry = 0;
+ }
+ nic_dev->num_qp_irq = dst_num_qp_irq;
+ }
+
+ return nic_dev->num_qp_irq;
+}
+
+static void hinic3_config_num_qps(struct net_device *netdev,
+ struct hinic3_dyna_txrxq_params *q_params)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ u16 alloc_num_irq, cur_num_irq;
+ u16 dst_num_irq;
+
+ if (!test_bit(HINIC3_RSS_ENABLE, &nic_dev->flags))
+ q_params->num_qps = 1;
+
+ if (nic_dev->num_qp_irq >= q_params->num_qps)
+ goto out;
+
+ cur_num_irq = nic_dev->num_qp_irq;
+
+ alloc_num_irq = hinic3_qp_irq_change(netdev, q_params->num_qps);
+ if (alloc_num_irq < q_params->num_qps) {
+ q_params->num_qps = alloc_num_irq;
+ netdev_warn(netdev, "Can not get enough irqs, adjust num_qps to %u\n",
+ q_params->num_qps);
+
+ /* The current irq may be in use, we must keep it */
+ dst_num_irq = max_t(u16, cur_num_irq, q_params->num_qps);
+ hinic3_qp_irq_change(netdev, dst_num_irq);
+ }
+
+out:
+ netdev_dbg(netdev, "No need to change irqs, num_qps is %u\n",
+ q_params->num_qps);
+}
+
+static int hinic3_setup_num_qps(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ nic_dev->num_qp_irq = 0;
+
+ nic_dev->qps_msix_entries = kcalloc(nic_dev->max_qps,
+ sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!nic_dev->qps_msix_entries)
+ return -ENOMEM;
+
+ hinic3_config_num_qps(netdev, &nic_dev->q_params);
+
+ return 0;
+}
+
+static void hinic3_destroy_num_qps(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ u16 i;
+
+ for (i = 0; i < nic_dev->num_qp_irq; i++)
+ hinic3_free_irq(nic_dev->hwdev,
+ nic_dev->qps_msix_entries[i].vector);
+
+ kfree(nic_dev->qps_msix_entries);
+}
+
+static int hinic3_alloc_txrxq_resources(struct net_device *netdev,
+ struct hinic3_dyna_txrxq_params *q_params)
+{
+ int err;
+
+ q_params->txqs_res = kcalloc(q_params->num_qps,
+ sizeof(*q_params->txqs_res), GFP_KERNEL);
+ if (!q_params->txqs_res)
+ return -ENOMEM;
+
+ q_params->rxqs_res = kcalloc(q_params->num_qps,
+ sizeof(*q_params->rxqs_res), GFP_KERNEL);
+ if (!q_params->rxqs_res) {
+ err = -ENOMEM;
+ goto err_free_txqs_res_arr;
+ }
+
+ q_params->irq_cfg = kcalloc(q_params->num_qps,
+ sizeof(*q_params->irq_cfg), GFP_KERNEL);
+ if (!q_params->irq_cfg) {
+ err = -ENOMEM;
+ goto err_free_rxqs_res_arr;
+ }
+
+ err = hinic3_alloc_txqs_res(netdev, q_params->num_qps,
+ q_params->sq_depth, q_params->txqs_res);
+ if (err) {
+ netdev_err(netdev, "Failed to alloc txqs resource\n");
+ goto err_free_irq_cfg;
+ }
+
+ err = hinic3_alloc_rxqs_res(netdev, q_params->num_qps,
+ q_params->rq_depth, q_params->rxqs_res);
+ if (err) {
+ netdev_err(netdev, "Failed to alloc rxqs resource\n");
+ goto err_free_txqs_res;
+ }
+
+ return 0;
+
+err_free_txqs_res:
+ hinic3_free_txqs_res(netdev, q_params->num_qps, q_params->sq_depth,
+ q_params->txqs_res);
+err_free_irq_cfg:
+ kfree(q_params->irq_cfg);
+ q_params->irq_cfg = NULL;
+err_free_rxqs_res_arr:
+ kfree(q_params->rxqs_res);
+ q_params->rxqs_res = NULL;
+err_free_txqs_res_arr:
+ kfree(q_params->txqs_res);
+ q_params->txqs_res = NULL;
+
+ return err;
+}
+
+static void hinic3_free_txrxq_resources(struct net_device *netdev,
+ struct hinic3_dyna_txrxq_params *q_params)
+{
+ hinic3_free_rxqs_res(netdev, q_params->num_qps, q_params->rq_depth,
+ q_params->rxqs_res);
+ hinic3_free_txqs_res(netdev, q_params->num_qps, q_params->sq_depth,
+ q_params->txqs_res);
+
+ kfree(q_params->irq_cfg);
+ q_params->irq_cfg = NULL;
+
+ kfree(q_params->rxqs_res);
+ q_params->rxqs_res = NULL;
+
+ kfree(q_params->txqs_res);
+ q_params->txqs_res = NULL;
+}
+
+static int hinic3_configure_txrxqs(struct net_device *netdev,
+ struct hinic3_dyna_txrxq_params *q_params)
+{
+ int err;
+
+ err = hinic3_configure_txqs(netdev, q_params->num_qps,
+ q_params->sq_depth, q_params->txqs_res);
+ if (err) {
+ netdev_err(netdev, "Failed to configure txqs\n");
+ return err;
+ }
+
+ err = hinic3_configure_rxqs(netdev, q_params->num_qps,
+ q_params->rq_depth, q_params->rxqs_res);
+ if (err) {
+ netdev_err(netdev, "Failed to configure rxqs\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int hinic3_configure(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ int err;
+
+ netdev->min_mtu = HINIC3_MIN_MTU_SIZE;
+ netdev->max_mtu = HINIC3_MAX_JUMBO_FRAME_SIZE;
+ err = hinic3_set_port_mtu(netdev, netdev->mtu);
+ if (err) {
+ netdev_err(netdev, "Failed to set mtu\n");
+ return err;
+ }
+
+ /* Ensure DCB is disabled */
+ hinic3_sync_dcb_state(nic_dev->hwdev, 1, 0);
+
+ if (test_bit(HINIC3_RSS_ENABLE, &nic_dev->flags)) {
+ err = hinic3_rss_init(netdev);
+ if (err) {
+ netdev_err(netdev, "Failed to init rss\n");
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void hinic3_remove_configure(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ if (test_bit(HINIC3_RSS_ENABLE, &nic_dev->flags))
+ hinic3_rss_uninit(netdev);
+}
+
+static int hinic3_alloc_channel_resources(struct net_device *netdev,
+ struct hinic3_dyna_qp_params *qp_params,
+ struct hinic3_dyna_txrxq_params *trxq_params)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ int err;
+
+ qp_params->num_qps = trxq_params->num_qps;
+ qp_params->sq_depth = trxq_params->sq_depth;
+ qp_params->rq_depth = trxq_params->rq_depth;
+
+ err = hinic3_alloc_qps(nic_dev, qp_params);
+ if (err) {
+ netdev_err(netdev, "Failed to alloc qps\n");
+ return err;
+ }
+
+ err = hinic3_alloc_txrxq_resources(netdev, trxq_params);
+ if (err) {
+ netdev_err(netdev, "Failed to alloc txrxq resources\n");
+ hinic3_free_qps(nic_dev, qp_params);
+ return err;
+ }
+
+ return 0;
+}
+
+static void hinic3_free_channel_resources(struct net_device *netdev,
+ struct hinic3_dyna_qp_params *qp_params,
+ struct hinic3_dyna_txrxq_params *trxq_params)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ hinic3_free_txrxq_resources(netdev, trxq_params);
+ hinic3_free_qps(nic_dev, qp_params);
+}
+
+static int hinic3_open_channel(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ int err;
+
+ err = hinic3_init_qp_ctxts(nic_dev);
+ if (err) {
+ netdev_err(netdev, "Failed to init qps\n");
+ return err;
+ }
+
+ err = hinic3_configure_txrxqs(netdev, &nic_dev->q_params);
+ if (err) {
+ netdev_err(netdev, "Failed to configure txrxqs\n");
+ goto err_free_qp_ctxts;
+ }
+
+ err = hinic3_qps_irq_init(netdev);
+ if (err) {
+ netdev_err(netdev, "Failed to init txrxq irq\n");
+ goto err_free_qp_ctxts;
+ }
+
+ err = hinic3_configure(netdev);
+ if (err) {
+ netdev_err(netdev, "Failed to init txrxq irq\n");
+ goto err_uninit_qps_irq;
+ }
+
+ return 0;
+
+err_uninit_qps_irq:
+ hinic3_qps_irq_uninit(netdev);
+err_free_qp_ctxts:
+ hinic3_free_qp_ctxts(nic_dev);
+
+ return err;
+}
+
+static void hinic3_close_channel(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ hinic3_remove_configure(netdev);
+ hinic3_qps_irq_uninit(netdev);
+ hinic3_free_qp_ctxts(nic_dev);
+}
+
+static int hinic3_vport_up(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ bool link_status_up;
+ u16 glb_func_id;
+ int err;
+
+ glb_func_id = hinic3_global_func_id(nic_dev->hwdev);
+ err = hinic3_set_vport_enable(nic_dev->hwdev, glb_func_id, true);
+ if (err) {
+ netdev_err(netdev, "Failed to enable vport\n");
+ goto err_flush_qps_res;
+ }
+
+ err = netif_set_real_num_queues(netdev, nic_dev->q_params.num_qps,
+ nic_dev->q_params.num_qps);
+ if (err) {
+ netdev_err(netdev, "Failed to set real number of queues\n");
+ goto err_flush_qps_res;
+ }
+ netif_tx_start_all_queues(netdev);
+
+ err = hinic3_get_link_status(nic_dev->hwdev, &link_status_up);
+ if (!err && link_status_up)
+ netif_carrier_on(netdev);
+
+ return 0;
+
+err_flush_qps_res:
+ hinic3_flush_qps_res(nic_dev->hwdev);
+ /* wait to guarantee that no packets will be sent to host */
+ msleep(100);
+
+ return err;
+}
+
+static void hinic3_vport_down(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ u16 glb_func_id;
+
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
+
+ glb_func_id = hinic3_global_func_id(nic_dev->hwdev);
+ hinic3_set_vport_enable(nic_dev->hwdev, glb_func_id, false);
+
+ hinic3_flush_txqs(netdev);
+ /* wait to guarantee that no packets will be sent to host */
+ msleep(100);
+ hinic3_flush_qps_res(nic_dev->hwdev);
+}
+
static int hinic3_open(struct net_device *netdev)
{
- /* Completed by later submission due to LoC limit. */
- return -EFAULT;
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_dyna_qp_params qp_params;
+ int err;
+
+ err = hinic3_init_nicio_res(nic_dev);
+ if (err) {
+ netdev_err(netdev, "Failed to init nicio resources\n");
+ return err;
+ }
+
+ err = hinic3_setup_num_qps(netdev);
+ if (err) {
+ netdev_err(netdev, "Failed to setup num_qps\n");
+ goto err_free_nicio_res;
+ }
+
+ err = hinic3_alloc_channel_resources(netdev, &qp_params,
+ &nic_dev->q_params);
+ if (err)
+ goto err_destroy_num_qps;
+
+ hinic3_init_qps(nic_dev, &qp_params);
+
+ err = hinic3_open_channel(netdev);
+ if (err)
+ goto err_uninit_qps;
+
+ err = hinic3_vport_up(netdev);
+ if (err)
+ goto err_close_channel;
+
+ return 0;
+
+err_close_channel:
+ hinic3_close_channel(netdev);
+err_uninit_qps:
+ hinic3_uninit_qps(nic_dev, &qp_params);
+ hinic3_free_channel_resources(netdev, &qp_params, &nic_dev->q_params);
+err_destroy_num_qps:
+ hinic3_destroy_num_qps(netdev);
+err_free_nicio_res:
+ hinic3_free_nicio_res(nic_dev);
+
+ return err;
}
static int hinic3_close(struct net_device *netdev)
{
- /* Completed by later submission due to LoC limit. */
- return -EFAULT;
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_dyna_qp_params qp_params;
+
+ hinic3_vport_down(netdev);
+ hinic3_close_channel(netdev);
+ hinic3_uninit_qps(nic_dev, &qp_params);
+ hinic3_free_channel_resources(netdev, &qp_params, &nic_dev->q_params);
+
+ return 0;
}
static int hinic3_change_mtu(struct net_device *netdev, int new_mtu)
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c
index 5b1a91a18c67..979f47ca77f9 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c
@@ -39,6 +39,12 @@ static int hinic3_feature_nego(struct hinic3_hwdev *hwdev, u8 opcode,
return 0;
}
+int hinic3_get_nic_feature_from_hw(struct hinic3_nic_dev *nic_dev)
+{
+ return hinic3_feature_nego(nic_dev->hwdev, MGMT_MSG_CMD_OP_GET,
+ &nic_dev->nic_io->feature_cap, 1);
+}
+
int hinic3_set_nic_feature_to_hw(struct hinic3_nic_dev *nic_dev)
{
return hinic3_feature_nego(nic_dev->hwdev, MGMT_MSG_CMD_OP_SET,
@@ -82,6 +88,23 @@ static int hinic3_set_function_table(struct hinic3_hwdev *hwdev, u32 cfg_bitmap,
return 0;
}
+int hinic3_init_function_table(struct hinic3_nic_dev *nic_dev)
+{
+ struct hinic3_nic_io *nic_io = nic_dev->nic_io;
+ struct l2nic_func_tbl_cfg func_tbl_cfg = {};
+ u32 cfg_bitmap;
+
+ func_tbl_cfg.mtu = 0x3FFF; /* default, max mtu */
+ func_tbl_cfg.rx_wqe_buf_size = nic_io->rx_buf_len;
+
+ cfg_bitmap = BIT(L2NIC_FUNC_TBL_CFG_INIT) |
+ BIT(L2NIC_FUNC_TBL_CFG_MTU) |
+ BIT(L2NIC_FUNC_TBL_CFG_RX_BUF_SIZE);
+
+ return hinic3_set_function_table(nic_dev->hwdev, cfg_bitmap,
+ &func_tbl_cfg);
+}
+
int hinic3_set_port_mtu(struct net_device *netdev, u16 new_mtu)
{
struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
@@ -89,6 +112,7 @@ int hinic3_set_port_mtu(struct net_device *netdev, u16 new_mtu)
struct hinic3_hwdev *hwdev = nic_dev->hwdev;
func_tbl_cfg.mtu = new_mtu;
+
return hinic3_set_function_table(hwdev, BIT(L2NIC_FUNC_TBL_CFG_MTU),
&func_tbl_cfg);
}
@@ -206,6 +230,63 @@ int hinic3_update_mac(struct hinic3_hwdev *hwdev, const u8 *old_mac,
err, mac_info.msg_head.status);
return -EIO;
}
+
+ return 0;
+}
+
+int hinic3_set_ci_table(struct hinic3_hwdev *hwdev, struct hinic3_sq_attr *attr)
+{
+ struct l2nic_cmd_set_ci_attr cons_idx_attr = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ cons_idx_attr.func_idx = hinic3_global_func_id(hwdev);
+ cons_idx_attr.dma_attr_off = attr->dma_attr_off;
+ cons_idx_attr.pending_limit = attr->pending_limit;
+ cons_idx_attr.coalescing_time = attr->coalescing_time;
+
+ if (attr->intr_en) {
+ cons_idx_attr.intr_en = attr->intr_en;
+ cons_idx_attr.intr_idx = attr->intr_idx;
+ }
+
+ cons_idx_attr.l2nic_sqn = attr->l2nic_sqn;
+ cons_idx_attr.ci_addr = attr->ci_dma_base;
+
+ mgmt_msg_params_init_default(&msg_params, &cons_idx_attr,
+ sizeof(cons_idx_attr));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_SET_SQ_CI_ATTR, &msg_params);
+ if (err || cons_idx_attr.msg_head.status) {
+ dev_err(hwdev->dev,
+ "Failed to set ci attribute table, err: %d, status: 0x%x\n",
+ err, cons_idx_attr.msg_head.status);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int hinic3_flush_qps_res(struct hinic3_hwdev *hwdev)
+{
+ struct l2nic_cmd_clear_qp_resource sq_res = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ sq_res.func_id = hinic3_global_func_id(hwdev);
+
+ mgmt_msg_params_init_default(&msg_params, &sq_res, sizeof(sq_res));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_CLEAR_QP_RESOURCE,
+ &msg_params);
+ if (err || sq_res.msg_head.status) {
+ dev_err(hwdev->dev, "Failed to clear sq resources, err: %d, status: 0x%x\n",
+ err, sq_res.msg_head.status);
+ return -EINVAL;
+ }
+
return 0;
}
@@ -231,3 +312,74 @@ int hinic3_force_drop_tx_pkt(struct hinic3_hwdev *hwdev)
return pkt_drop.msg_head.status;
}
+
+int hinic3_sync_dcb_state(struct hinic3_hwdev *hwdev, u8 op_code, u8 state)
+{
+ struct l2nic_cmd_set_dcb_state dcb_state = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ dcb_state.op_code = op_code;
+ dcb_state.state = state;
+ dcb_state.func_id = hinic3_global_func_id(hwdev);
+
+ mgmt_msg_params_init_default(&msg_params, &dcb_state,
+ sizeof(dcb_state));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_QOS_DCB_STATE, &msg_params);
+ if (err || dcb_state.head.status) {
+ dev_err(hwdev->dev,
+ "Failed to set dcb state, err: %d, status: 0x%x\n",
+ err, dcb_state.head.status);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int hinic3_get_link_status(struct hinic3_hwdev *hwdev, bool *link_status_up)
+{
+ struct mag_cmd_get_link_status get_link = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ get_link.port_id = hinic3_physical_port_id(hwdev);
+
+ mgmt_msg_params_init_default(&msg_params, &get_link, sizeof(get_link));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_HILINK,
+ MAG_CMD_GET_LINK_STATUS, &msg_params);
+ if (err || get_link.head.status) {
+ dev_err(hwdev->dev, "Failed to get link state, err: %d, status: 0x%x\n",
+ err, get_link.head.status);
+ return -EIO;
+ }
+
+ *link_status_up = !!get_link.status;
+
+ return 0;
+}
+
+int hinic3_set_vport_enable(struct hinic3_hwdev *hwdev, u16 func_id,
+ bool enable)
+{
+ struct l2nic_cmd_set_vport_state en_state = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ en_state.func_id = func_id;
+ en_state.state = enable ? 1 : 0;
+
+ mgmt_msg_params_init_default(&msg_params, &en_state, sizeof(en_state));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_SET_VPORT_ENABLE, &msg_params);
+ if (err || en_state.msg_head.status) {
+ dev_err(hwdev->dev, "Failed to set vport state, err: %d, status: 0x%x\n",
+ err, en_state.msg_head.status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h
index bf9ce51dc401..b83b567fa542 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h
@@ -22,11 +22,23 @@ enum hinic3_nic_event_type {
HINIC3_NIC_EVENT_LINK_UP = 1,
};
+struct hinic3_sq_attr {
+ u8 dma_attr_off;
+ u8 pending_limit;
+ u8 coalescing_time;
+ u8 intr_en;
+ u16 intr_idx;
+ u32 l2nic_sqn;
+ u64 ci_dma_base;
+};
+
+int hinic3_get_nic_feature_from_hw(struct hinic3_nic_dev *nic_dev);
int hinic3_set_nic_feature_to_hw(struct hinic3_nic_dev *nic_dev);
bool hinic3_test_support(struct hinic3_nic_dev *nic_dev,
enum hinic3_nic_feature_cap feature_bits);
void hinic3_update_nic_feature(struct hinic3_nic_dev *nic_dev, u64 feature_cap);
+int hinic3_init_function_table(struct hinic3_nic_dev *nic_dev);
int hinic3_set_port_mtu(struct net_device *netdev, u16 new_mtu);
int hinic3_set_mac(struct hinic3_hwdev *hwdev, const u8 *mac_addr, u16 vlan_id,
@@ -36,6 +48,14 @@ int hinic3_del_mac(struct hinic3_hwdev *hwdev, const u8 *mac_addr, u16 vlan_id,
int hinic3_update_mac(struct hinic3_hwdev *hwdev, const u8 *old_mac,
u8 *new_mac, u16 vlan_id, u16 func_id);
+int hinic3_set_ci_table(struct hinic3_hwdev *hwdev,
+ struct hinic3_sq_attr *attr);
+int hinic3_flush_qps_res(struct hinic3_hwdev *hwdev);
int hinic3_force_drop_tx_pkt(struct hinic3_hwdev *hwdev);
+int hinic3_sync_dcb_state(struct hinic3_hwdev *hwdev, u8 op_code, u8 state);
+int hinic3_get_link_status(struct hinic3_hwdev *hwdev, bool *link_status_up);
+int hinic3_set_vport_enable(struct hinic3_hwdev *hwdev, u16 func_id,
+ bool enable);
+
#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h
index c994fc9b6ee0..5ba83261616c 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h
@@ -51,6 +51,12 @@ struct hinic3_dyna_txrxq_params {
struct hinic3_irq_cfg *irq_cfg;
};
+struct hinic3_intr_coal_info {
+ u8 pending_limit;
+ u8 coalesce_timer_cfg;
+ u8 resend_timer_cfg;
+};
+
struct hinic3_nic_dev {
struct pci_dev *pdev;
struct net_device *netdev;
@@ -67,16 +73,21 @@ struct hinic3_nic_dev {
struct hinic3_txq *txqs;
struct hinic3_rxq *rxqs;
+ enum hinic3_rss_hash_type rss_hash_type;
+ struct hinic3_rss_type rss_type;
+ u8 *rss_hkey;
+ u16 *rss_indir;
+
u16 num_qp_irq;
struct msix_entry *qps_msix_entries;
+ struct hinic3_intr_coal_info *intr_coalesce;
+
bool link_status_up;
};
void hinic3_set_netdev_ops(struct net_device *netdev);
-
-/* Temporary prototypes. Functions become static in later submission. */
-void qp_add_napi(struct hinic3_irq_cfg *irq_cfg);
-void qp_del_napi(struct hinic3_irq_cfg *irq_cfg);
+int hinic3_qps_irq_init(struct net_device *netdev);
+void hinic3_qps_irq_uninit(struct net_device *netdev);
#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c
index 34a1f5bd5ac1..d86cd1ba4605 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+#include "hinic3_cmdq.h"
#include "hinic3_hw_comm.h"
#include "hinic3_hw_intf.h"
#include "hinic3_hwdev.h"
@@ -9,13 +10,876 @@
#include "hinic3_nic_dev.h"
#include "hinic3_nic_io.h"
+#define HINIC3_DEFAULT_TX_CI_PENDING_LIMIT 1
+#define HINIC3_DEFAULT_TX_CI_COALESCING_TIME 1
+#define HINIC3_DEFAULT_DROP_THD_ON (0xFFFF)
+#define HINIC3_DEFAULT_DROP_THD_OFF 0
+
+#define HINIC3_CI_Q_ADDR_SIZE (64)
+
+#define HINIC3_CI_TABLE_SIZE(num_qps) \
+ (ALIGN((num_qps) * HINIC3_CI_Q_ADDR_SIZE, HINIC3_MIN_PAGE_SIZE))
+
+#define HINIC3_CI_VADDR(base_addr, q_id) \
+ ((u8 *)(base_addr) + (q_id) * HINIC3_CI_Q_ADDR_SIZE)
+
+#define HINIC3_CI_PADDR(base_paddr, q_id) \
+ ((base_paddr) + (q_id) * HINIC3_CI_Q_ADDR_SIZE)
+
+#define SQ_WQ_PREFETCH_MAX 1
+#define SQ_WQ_PREFETCH_MIN 1
+#define SQ_WQ_PREFETCH_THRESHOLD 16
+
+#define RQ_WQ_PREFETCH_MAX 4
+#define RQ_WQ_PREFETCH_MIN 1
+#define RQ_WQ_PREFETCH_THRESHOLD 256
+
+/* (2048 - 8) / 64 */
+#define HINIC3_Q_CTXT_MAX 31
+
+enum hinic3_qp_ctxt_type {
+ HINIC3_QP_CTXT_TYPE_SQ = 0,
+ HINIC3_QP_CTXT_TYPE_RQ = 1,
+};
+
+struct hinic3_qp_ctxt_hdr {
+ __le16 num_queues;
+ __le16 queue_type;
+ __le16 start_qid;
+ __le16 rsvd;
+};
+
+struct hinic3_sq_ctxt {
+ __le32 ci_pi;
+ __le32 drop_mode_sp;
+ __le32 wq_pfn_hi_owner;
+ __le32 wq_pfn_lo;
+
+ __le32 rsvd0;
+ __le32 pkt_drop_thd;
+ __le32 global_sq_id;
+ __le32 vlan_ceq_attr;
+
+ __le32 pref_cache;
+ __le32 pref_ci_owner;
+ __le32 pref_wq_pfn_hi_ci;
+ __le32 pref_wq_pfn_lo;
+
+ __le32 rsvd8;
+ __le32 rsvd9;
+ __le32 wq_block_pfn_hi;
+ __le32 wq_block_pfn_lo;
+};
+
+struct hinic3_rq_ctxt {
+ __le32 ci_pi;
+ __le32 ceq_attr;
+ __le32 wq_pfn_hi_type_owner;
+ __le32 wq_pfn_lo;
+
+ __le32 rsvd[3];
+ __le32 cqe_sge_len;
+
+ __le32 pref_cache;
+ __le32 pref_ci_owner;
+ __le32 pref_wq_pfn_hi_ci;
+ __le32 pref_wq_pfn_lo;
+
+ __le32 pi_paddr_hi;
+ __le32 pi_paddr_lo;
+ __le32 wq_block_pfn_hi;
+ __le32 wq_block_pfn_lo;
+};
+
+struct hinic3_sq_ctxt_block {
+ struct hinic3_qp_ctxt_hdr cmdq_hdr;
+ struct hinic3_sq_ctxt sq_ctxt[HINIC3_Q_CTXT_MAX];
+};
+
+struct hinic3_rq_ctxt_block {
+ struct hinic3_qp_ctxt_hdr cmdq_hdr;
+ struct hinic3_rq_ctxt rq_ctxt[HINIC3_Q_CTXT_MAX];
+};
+
+struct hinic3_clean_queue_ctxt {
+ struct hinic3_qp_ctxt_hdr cmdq_hdr;
+ __le32 rsvd;
+};
+
+#define SQ_CTXT_SIZE(num_sqs) \
+ (sizeof(struct hinic3_qp_ctxt_hdr) + \
+ (num_sqs) * sizeof(struct hinic3_sq_ctxt))
+
+#define RQ_CTXT_SIZE(num_rqs) \
+ (sizeof(struct hinic3_qp_ctxt_hdr) + \
+ (num_rqs) * sizeof(struct hinic3_rq_ctxt))
+
+#define SQ_CTXT_PREF_CI_HI_SHIFT 12
+#define SQ_CTXT_PREF_CI_HI(val) ((val) >> SQ_CTXT_PREF_CI_HI_SHIFT)
+
+#define SQ_CTXT_PI_IDX_MASK GENMASK(15, 0)
+#define SQ_CTXT_CI_IDX_MASK GENMASK(31, 16)
+#define SQ_CTXT_CI_PI_SET(val, member) \
+ FIELD_PREP(SQ_CTXT_##member##_MASK, val)
+
+#define SQ_CTXT_MODE_SP_FLAG_MASK BIT(0)
+#define SQ_CTXT_MODE_PKT_DROP_MASK BIT(1)
+#define SQ_CTXT_MODE_SET(val, member) \
+ FIELD_PREP(SQ_CTXT_MODE_##member##_MASK, val)
+
+#define SQ_CTXT_WQ_PAGE_HI_PFN_MASK GENMASK(19, 0)
+#define SQ_CTXT_WQ_PAGE_OWNER_MASK BIT(23)
+#define SQ_CTXT_WQ_PAGE_SET(val, member) \
+ FIELD_PREP(SQ_CTXT_WQ_PAGE_##member##_MASK, val)
+
+#define SQ_CTXT_PKT_DROP_THD_ON_MASK GENMASK(15, 0)
+#define SQ_CTXT_PKT_DROP_THD_OFF_MASK GENMASK(31, 16)
+#define SQ_CTXT_PKT_DROP_THD_SET(val, member) \
+ FIELD_PREP(SQ_CTXT_PKT_DROP_##member##_MASK, val)
+
+#define SQ_CTXT_GLOBAL_SQ_ID_MASK GENMASK(12, 0)
+#define SQ_CTXT_GLOBAL_QUEUE_ID_SET(val, member) \
+ FIELD_PREP(SQ_CTXT_##member##_MASK, val)
+
+#define SQ_CTXT_VLAN_INSERT_MODE_MASK GENMASK(20, 19)
+#define SQ_CTXT_VLAN_CEQ_EN_MASK BIT(23)
+#define SQ_CTXT_VLAN_CEQ_SET(val, member) \
+ FIELD_PREP(SQ_CTXT_VLAN_##member##_MASK, val)
+
+#define SQ_CTXT_PREF_CACHE_THRESHOLD_MASK GENMASK(13, 0)
+#define SQ_CTXT_PREF_CACHE_MAX_MASK GENMASK(24, 14)
+#define SQ_CTXT_PREF_CACHE_MIN_MASK GENMASK(31, 25)
+
+#define SQ_CTXT_PREF_CI_HI_MASK GENMASK(3, 0)
+#define SQ_CTXT_PREF_OWNER_MASK BIT(4)
+
+#define SQ_CTXT_PREF_WQ_PFN_HI_MASK GENMASK(19, 0)
+#define SQ_CTXT_PREF_CI_LOW_MASK GENMASK(31, 20)
+#define SQ_CTXT_PREF_SET(val, member) \
+ FIELD_PREP(SQ_CTXT_PREF_##member##_MASK, val)
+
+#define SQ_CTXT_WQ_BLOCK_PFN_HI_MASK GENMASK(22, 0)
+#define SQ_CTXT_WQ_BLOCK_SET(val, member) \
+ FIELD_PREP(SQ_CTXT_WQ_BLOCK_##member##_MASK, val)
+
+#define RQ_CTXT_PI_IDX_MASK GENMASK(15, 0)
+#define RQ_CTXT_CI_IDX_MASK GENMASK(31, 16)
+#define RQ_CTXT_CI_PI_SET(val, member) \
+ FIELD_PREP(RQ_CTXT_##member##_MASK, val)
+
+#define RQ_CTXT_CEQ_ATTR_INTR_MASK GENMASK(30, 21)
+#define RQ_CTXT_CEQ_ATTR_EN_MASK BIT(31)
+#define RQ_CTXT_CEQ_ATTR_SET(val, member) \
+ FIELD_PREP(RQ_CTXT_CEQ_ATTR_##member##_MASK, val)
+
+#define RQ_CTXT_WQ_PAGE_HI_PFN_MASK GENMASK(19, 0)
+#define RQ_CTXT_WQ_PAGE_WQE_TYPE_MASK GENMASK(29, 28)
+#define RQ_CTXT_WQ_PAGE_OWNER_MASK BIT(31)
+#define RQ_CTXT_WQ_PAGE_SET(val, member) \
+ FIELD_PREP(RQ_CTXT_WQ_PAGE_##member##_MASK, val)
+
+#define RQ_CTXT_CQE_LEN_MASK GENMASK(29, 28)
+#define RQ_CTXT_CQE_LEN_SET(val, member) \
+ FIELD_PREP(RQ_CTXT_##member##_MASK, val)
+
+#define RQ_CTXT_PREF_CACHE_THRESHOLD_MASK GENMASK(13, 0)
+#define RQ_CTXT_PREF_CACHE_MAX_MASK GENMASK(24, 14)
+#define RQ_CTXT_PREF_CACHE_MIN_MASK GENMASK(31, 25)
+
+#define RQ_CTXT_PREF_CI_HI_MASK GENMASK(3, 0)
+#define RQ_CTXT_PREF_OWNER_MASK BIT(4)
+
+#define RQ_CTXT_PREF_WQ_PFN_HI_MASK GENMASK(19, 0)
+#define RQ_CTXT_PREF_CI_LOW_MASK GENMASK(31, 20)
+#define RQ_CTXT_PREF_SET(val, member) \
+ FIELD_PREP(RQ_CTXT_PREF_##member##_MASK, val)
+
+#define RQ_CTXT_WQ_BLOCK_PFN_HI_MASK GENMASK(22, 0)
+#define RQ_CTXT_WQ_BLOCK_SET(val, member) \
+ FIELD_PREP(RQ_CTXT_WQ_BLOCK_##member##_MASK, val)
+
+#define WQ_PAGE_PFN_SHIFT 12
+#define WQ_BLOCK_PFN_SHIFT 9
+#define WQ_PAGE_PFN(page_addr) ((page_addr) >> WQ_PAGE_PFN_SHIFT)
+#define WQ_BLOCK_PFN(page_addr) ((page_addr) >> WQ_BLOCK_PFN_SHIFT)
+
int hinic3_init_nic_io(struct hinic3_nic_dev *nic_dev)
{
- /* Completed by later submission due to LoC limit. */
- return -EFAULT;
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic3_nic_io *nic_io;
+ int err;
+
+ nic_io = kzalloc(sizeof(*nic_io), GFP_KERNEL);
+ if (!nic_io)
+ return -ENOMEM;
+
+ nic_dev->nic_io = nic_io;
+
+ err = hinic3_set_func_svc_used_state(hwdev, COMM_FUNC_SVC_T_NIC, 1);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to set function svc used state\n");
+ goto err_free_nicio;
+ }
+
+ err = hinic3_init_function_table(nic_dev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init function table\n");
+ goto err_clear_func_svc_used_state;
+ }
+
+ nic_io->rx_buf_len = nic_dev->rx_buf_len;
+
+ err = hinic3_get_nic_feature_from_hw(nic_dev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to get nic features\n");
+ goto err_clear_func_svc_used_state;
+ }
+
+ nic_io->feature_cap &= HINIC3_NIC_F_ALL_MASK;
+ nic_io->feature_cap &= HINIC3_NIC_DRV_DEFAULT_FEATURE;
+ dev_dbg(hwdev->dev, "nic features: 0x%llx\n\n", nic_io->feature_cap);
+
+ return 0;
+
+err_clear_func_svc_used_state:
+ hinic3_set_func_svc_used_state(hwdev, COMM_FUNC_SVC_T_NIC, 0);
+err_free_nicio:
+ nic_dev->nic_io = NULL;
+ kfree(nic_io);
+
+ return err;
}
void hinic3_free_nic_io(struct hinic3_nic_dev *nic_dev)
{
- /* Completed by later submission due to LoC limit. */
+ struct hinic3_nic_io *nic_io = nic_dev->nic_io;
+
+ hinic3_set_func_svc_used_state(nic_dev->hwdev, COMM_FUNC_SVC_T_NIC, 0);
+ nic_dev->nic_io = NULL;
+ kfree(nic_io);
+}
+
+int hinic3_init_nicio_res(struct hinic3_nic_dev *nic_dev)
+{
+ struct hinic3_nic_io *nic_io = nic_dev->nic_io;
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ void __iomem *db_base;
+ int err;
+
+ nic_io->max_qps = hinic3_func_max_qnum(hwdev);
+
+ err = hinic3_alloc_db_addr(hwdev, &db_base, NULL);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to allocate doorbell for sqs\n");
+ return err;
+ }
+ nic_io->sqs_db_addr = db_base;
+
+ err = hinic3_alloc_db_addr(hwdev, &db_base, NULL);
+ if (err) {
+ hinic3_free_db_addr(hwdev, nic_io->sqs_db_addr);
+ dev_err(hwdev->dev, "Failed to allocate doorbell for rqs\n");
+ return err;
+ }
+ nic_io->rqs_db_addr = db_base;
+
+ nic_io->ci_vaddr_base =
+ dma_alloc_coherent(hwdev->dev,
+ HINIC3_CI_TABLE_SIZE(nic_io->max_qps),
+ &nic_io->ci_dma_base,
+ GFP_KERNEL);
+ if (!nic_io->ci_vaddr_base) {
+ hinic3_free_db_addr(hwdev, nic_io->sqs_db_addr);
+ hinic3_free_db_addr(hwdev, nic_io->rqs_db_addr);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void hinic3_free_nicio_res(struct hinic3_nic_dev *nic_dev)
+{
+ struct hinic3_nic_io *nic_io = nic_dev->nic_io;
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+
+ dma_free_coherent(hwdev->dev,
+ HINIC3_CI_TABLE_SIZE(nic_io->max_qps),
+ nic_io->ci_vaddr_base, nic_io->ci_dma_base);
+
+ hinic3_free_db_addr(hwdev, nic_io->sqs_db_addr);
+ hinic3_free_db_addr(hwdev, nic_io->rqs_db_addr);
+}
+
+static int hinic3_create_sq(struct hinic3_hwdev *hwdev,
+ struct hinic3_io_queue *sq,
+ u16 q_id, u32 sq_depth, u16 sq_msix_idx)
+{
+ int err;
+
+ /* sq used & hardware request init 1 */
+ sq->owner = 1;
+
+ sq->q_id = q_id;
+ sq->msix_entry_idx = sq_msix_idx;
+
+ err = hinic3_wq_create(hwdev, &sq->wq, sq_depth,
+ BIT(HINIC3_SQ_WQEBB_SHIFT));
+ if (err) {
+ dev_err(hwdev->dev, "Failed to create tx queue %u wq\n",
+ q_id);
+ return err;
+ }
+
+ return 0;
+}
+
+static int hinic3_create_rq(struct hinic3_hwdev *hwdev,
+ struct hinic3_io_queue *rq,
+ u16 q_id, u32 rq_depth, u16 rq_msix_idx)
+{
+ int err;
+
+ rq->q_id = q_id;
+ rq->msix_entry_idx = rq_msix_idx;
+
+ err = hinic3_wq_create(hwdev, &rq->wq, rq_depth,
+ BIT(HINIC3_RQ_WQEBB_SHIFT +
+ HINIC3_NORMAL_RQ_WQE));
+ if (err) {
+ dev_err(hwdev->dev, "Failed to create rx queue %u wq\n",
+ q_id);
+ return err;
+ }
+
+ return 0;
+}
+
+static int hinic3_create_qp(struct hinic3_hwdev *hwdev,
+ struct hinic3_io_queue *sq,
+ struct hinic3_io_queue *rq, u16 q_id, u32 sq_depth,
+ u32 rq_depth, u16 qp_msix_idx)
+{
+ int err;
+
+ err = hinic3_create_sq(hwdev, sq, q_id, sq_depth, qp_msix_idx);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to create sq, qid: %u\n",
+ q_id);
+ return err;
+ }
+
+ err = hinic3_create_rq(hwdev, rq, q_id, rq_depth, qp_msix_idx);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to create rq, qid: %u\n",
+ q_id);
+ goto err_destroy_sq_wq;
+ }
+
+ return 0;
+
+err_destroy_sq_wq:
+ hinic3_wq_destroy(hwdev, &sq->wq);
+
+ return err;
+}
+
+static void hinic3_destroy_qp(struct hinic3_hwdev *hwdev,
+ struct hinic3_io_queue *sq,
+ struct hinic3_io_queue *rq)
+{
+ hinic3_wq_destroy(hwdev, &sq->wq);
+ hinic3_wq_destroy(hwdev, &rq->wq);
+}
+
+int hinic3_alloc_qps(struct hinic3_nic_dev *nic_dev,
+ struct hinic3_dyna_qp_params *qp_params)
+{
+ struct msix_entry *qps_msix_entries = nic_dev->qps_msix_entries;
+ struct hinic3_nic_io *nic_io = nic_dev->nic_io;
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic3_io_queue *sqs;
+ struct hinic3_io_queue *rqs;
+ u16 q_id;
+ int err;
+
+ if (qp_params->num_qps > nic_io->max_qps || !qp_params->num_qps)
+ return -EINVAL;
+
+ sqs = kcalloc(qp_params->num_qps, sizeof(*sqs), GFP_KERNEL);
+ if (!sqs) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ rqs = kcalloc(qp_params->num_qps, sizeof(*rqs), GFP_KERNEL);
+ if (!rqs) {
+ err = -ENOMEM;
+ goto err_free_sqs;
+ }
+
+ for (q_id = 0; q_id < qp_params->num_qps; q_id++) {
+ err = hinic3_create_qp(hwdev, &sqs[q_id], &rqs[q_id], q_id,
+ qp_params->sq_depth, qp_params->rq_depth,
+ qps_msix_entries[q_id].entry);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to allocate qp %u, err: %d\n",
+ q_id, err);
+ goto err_destroy_qp;
+ }
+ }
+
+ qp_params->sqs = sqs;
+ qp_params->rqs = rqs;
+
+ return 0;
+
+err_destroy_qp:
+ while (q_id > 0) {
+ q_id--;
+ hinic3_destroy_qp(hwdev, &sqs[q_id], &rqs[q_id]);
+ }
+ kfree(rqs);
+err_free_sqs:
+ kfree(sqs);
+err_out:
+ return err;
+}
+
+void hinic3_free_qps(struct hinic3_nic_dev *nic_dev,
+ struct hinic3_dyna_qp_params *qp_params)
+{
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ u16 q_id;
+
+ for (q_id = 0; q_id < qp_params->num_qps; q_id++)
+ hinic3_destroy_qp(hwdev, &qp_params->sqs[q_id],
+ &qp_params->rqs[q_id]);
+
+ kfree(qp_params->sqs);
+ kfree(qp_params->rqs);
+}
+
+void hinic3_init_qps(struct hinic3_nic_dev *nic_dev,
+ struct hinic3_dyna_qp_params *qp_params)
+{
+ struct hinic3_nic_io *nic_io = nic_dev->nic_io;
+ struct hinic3_io_queue *sqs = qp_params->sqs;
+ struct hinic3_io_queue *rqs = qp_params->rqs;
+ u16 q_id;
+
+ nic_io->num_qps = qp_params->num_qps;
+ nic_io->sq = qp_params->sqs;
+ nic_io->rq = qp_params->rqs;
+ for (q_id = 0; q_id < nic_io->num_qps; q_id++) {
+ sqs[q_id].cons_idx_addr =
+ (u16 *)HINIC3_CI_VADDR(nic_io->ci_vaddr_base, q_id);
+ /* clear ci value */
+ WRITE_ONCE(*sqs[q_id].cons_idx_addr, 0);
+
+ sqs[q_id].db_addr = nic_io->sqs_db_addr;
+ rqs[q_id].db_addr = nic_io->rqs_db_addr;
+ }
+}
+
+void hinic3_uninit_qps(struct hinic3_nic_dev *nic_dev,
+ struct hinic3_dyna_qp_params *qp_params)
+{
+ struct hinic3_nic_io *nic_io = nic_dev->nic_io;
+
+ qp_params->sqs = nic_io->sq;
+ qp_params->rqs = nic_io->rq;
+ qp_params->num_qps = nic_io->num_qps;
+}
+
+static void hinic3_qp_prepare_cmdq_header(struct hinic3_qp_ctxt_hdr *qp_ctxt_hdr,
+ enum hinic3_qp_ctxt_type ctxt_type,
+ u16 num_queues, u16 q_id)
+{
+ qp_ctxt_hdr->queue_type = cpu_to_le16(ctxt_type);
+ qp_ctxt_hdr->num_queues = cpu_to_le16(num_queues);
+ qp_ctxt_hdr->start_qid = cpu_to_le16(q_id);
+ qp_ctxt_hdr->rsvd = 0;
+}
+
+static void hinic3_sq_prepare_ctxt(struct hinic3_io_queue *sq, u16 sq_id,
+ struct hinic3_sq_ctxt *sq_ctxt)
+{
+ u64 wq_page_addr, wq_page_pfn, wq_block_pfn;
+ u32 wq_block_pfn_hi, wq_block_pfn_lo;
+ u32 wq_page_pfn_hi, wq_page_pfn_lo;
+ u16 pi_start, ci_start;
+
+ ci_start = hinic3_get_sq_local_ci(sq);
+ pi_start = hinic3_get_sq_local_pi(sq);
+
+ wq_page_addr = hinic3_wq_get_first_wqe_page_addr(&sq->wq);
+
+ wq_page_pfn = WQ_PAGE_PFN(wq_page_addr);
+ wq_page_pfn_hi = upper_32_bits(wq_page_pfn);
+ wq_page_pfn_lo = lower_32_bits(wq_page_pfn);
+
+ wq_block_pfn = WQ_BLOCK_PFN(sq->wq.wq_block_paddr);
+ wq_block_pfn_hi = upper_32_bits(wq_block_pfn);
+ wq_block_pfn_lo = lower_32_bits(wq_block_pfn);
+
+ sq_ctxt->ci_pi =
+ cpu_to_le32(SQ_CTXT_CI_PI_SET(ci_start, CI_IDX) |
+ SQ_CTXT_CI_PI_SET(pi_start, PI_IDX));
+
+ sq_ctxt->drop_mode_sp =
+ cpu_to_le32(SQ_CTXT_MODE_SET(0, SP_FLAG) |
+ SQ_CTXT_MODE_SET(0, PKT_DROP));
+
+ sq_ctxt->wq_pfn_hi_owner =
+ cpu_to_le32(SQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, HI_PFN) |
+ SQ_CTXT_WQ_PAGE_SET(1, OWNER));
+
+ sq_ctxt->wq_pfn_lo = cpu_to_le32(wq_page_pfn_lo);
+
+ sq_ctxt->pkt_drop_thd =
+ cpu_to_le32(SQ_CTXT_PKT_DROP_THD_SET(HINIC3_DEFAULT_DROP_THD_ON, THD_ON) |
+ SQ_CTXT_PKT_DROP_THD_SET(HINIC3_DEFAULT_DROP_THD_OFF, THD_OFF));
+
+ sq_ctxt->global_sq_id =
+ cpu_to_le32(SQ_CTXT_GLOBAL_QUEUE_ID_SET((u32)sq_id,
+ GLOBAL_SQ_ID));
+
+ /* enable insert c-vlan by default */
+ sq_ctxt->vlan_ceq_attr =
+ cpu_to_le32(SQ_CTXT_VLAN_CEQ_SET(0, CEQ_EN) |
+ SQ_CTXT_VLAN_CEQ_SET(1, INSERT_MODE));
+
+ sq_ctxt->rsvd0 = 0;
+
+ sq_ctxt->pref_cache =
+ cpu_to_le32(SQ_CTXT_PREF_SET(SQ_WQ_PREFETCH_MIN, CACHE_MIN) |
+ SQ_CTXT_PREF_SET(SQ_WQ_PREFETCH_MAX, CACHE_MAX) |
+ SQ_CTXT_PREF_SET(SQ_WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD));
+
+ sq_ctxt->pref_ci_owner =
+ cpu_to_le32(SQ_CTXT_PREF_SET(SQ_CTXT_PREF_CI_HI(ci_start), CI_HI) |
+ SQ_CTXT_PREF_SET(1, OWNER));
+
+ sq_ctxt->pref_wq_pfn_hi_ci =
+ cpu_to_le32(SQ_CTXT_PREF_SET(ci_start, CI_LOW) |
+ SQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_PFN_HI));
+
+ sq_ctxt->pref_wq_pfn_lo = cpu_to_le32(wq_page_pfn_lo);
+
+ sq_ctxt->wq_block_pfn_hi =
+ cpu_to_le32(SQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, PFN_HI));
+
+ sq_ctxt->wq_block_pfn_lo = cpu_to_le32(wq_block_pfn_lo);
+}
+
+static void hinic3_rq_prepare_ctxt_get_wq_info(struct hinic3_io_queue *rq,
+ u32 *wq_page_pfn_hi,
+ u32 *wq_page_pfn_lo,
+ u32 *wq_block_pfn_hi,
+ u32 *wq_block_pfn_lo)
+{
+ u64 wq_page_addr, wq_page_pfn, wq_block_pfn;
+
+ wq_page_addr = hinic3_wq_get_first_wqe_page_addr(&rq->wq);
+
+ wq_page_pfn = WQ_PAGE_PFN(wq_page_addr);
+ *wq_page_pfn_hi = upper_32_bits(wq_page_pfn);
+ *wq_page_pfn_lo = lower_32_bits(wq_page_pfn);
+
+ wq_block_pfn = WQ_BLOCK_PFN(rq->wq.wq_block_paddr);
+ *wq_block_pfn_hi = upper_32_bits(wq_block_pfn);
+ *wq_block_pfn_lo = lower_32_bits(wq_block_pfn);
+}
+
+static void hinic3_rq_prepare_ctxt(struct hinic3_io_queue *rq,
+ struct hinic3_rq_ctxt *rq_ctxt)
+{
+ u32 wq_block_pfn_hi, wq_block_pfn_lo;
+ u32 wq_page_pfn_hi, wq_page_pfn_lo;
+ u16 pi_start, ci_start;
+
+ ci_start = (rq->wq.cons_idx & rq->wq.idx_mask) << HINIC3_NORMAL_RQ_WQE;
+ pi_start = (rq->wq.prod_idx & rq->wq.idx_mask) << HINIC3_NORMAL_RQ_WQE;
+
+ hinic3_rq_prepare_ctxt_get_wq_info(rq, &wq_page_pfn_hi, &wq_page_pfn_lo,
+ &wq_block_pfn_hi, &wq_block_pfn_lo);
+
+ rq_ctxt->ci_pi =
+ cpu_to_le32(RQ_CTXT_CI_PI_SET(ci_start, CI_IDX) |
+ RQ_CTXT_CI_PI_SET(pi_start, PI_IDX));
+
+ rq_ctxt->ceq_attr =
+ cpu_to_le32(RQ_CTXT_CEQ_ATTR_SET(0, EN) |
+ RQ_CTXT_CEQ_ATTR_SET(rq->msix_entry_idx, INTR));
+
+ rq_ctxt->wq_pfn_hi_type_owner =
+ cpu_to_le32(RQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, HI_PFN) |
+ RQ_CTXT_WQ_PAGE_SET(1, OWNER));
+
+ /* use 16Byte WQE */
+ rq_ctxt->wq_pfn_hi_type_owner |=
+ cpu_to_le32(RQ_CTXT_WQ_PAGE_SET(2, WQE_TYPE));
+ rq_ctxt->cqe_sge_len = cpu_to_le32(RQ_CTXT_CQE_LEN_SET(1, CQE_LEN));
+
+ rq_ctxt->wq_pfn_lo = cpu_to_le32(wq_page_pfn_lo);
+
+ rq_ctxt->pref_cache =
+ cpu_to_le32(RQ_CTXT_PREF_SET(RQ_WQ_PREFETCH_MIN, CACHE_MIN) |
+ RQ_CTXT_PREF_SET(RQ_WQ_PREFETCH_MAX, CACHE_MAX) |
+ RQ_CTXT_PREF_SET(RQ_WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD));
+
+ rq_ctxt->pref_ci_owner =
+ cpu_to_le32(RQ_CTXT_PREF_SET(SQ_CTXT_PREF_CI_HI(ci_start), CI_HI) |
+ RQ_CTXT_PREF_SET(1, OWNER));
+
+ rq_ctxt->pref_wq_pfn_hi_ci =
+ cpu_to_le32(RQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_PFN_HI) |
+ RQ_CTXT_PREF_SET(ci_start, CI_LOW));
+
+ rq_ctxt->pref_wq_pfn_lo = cpu_to_le32(wq_page_pfn_lo);
+
+ rq_ctxt->wq_block_pfn_hi =
+ cpu_to_le32(RQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, PFN_HI));
+
+ rq_ctxt->wq_block_pfn_lo = cpu_to_le32(wq_block_pfn_lo);
+}
+
+static int init_sq_ctxts(struct hinic3_nic_dev *nic_dev)
+{
+ struct hinic3_nic_io *nic_io = nic_dev->nic_io;
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic3_sq_ctxt_block *sq_ctxt_block;
+ u16 q_id, curr_id, max_ctxts, i;
+ struct hinic3_sq_ctxt *sq_ctxt;
+ struct hinic3_cmd_buf *cmd_buf;
+ struct hinic3_io_queue *sq;
+ __le64 out_param;
+ int err = 0;
+
+ cmd_buf = hinic3_alloc_cmd_buf(hwdev);
+ if (!cmd_buf) {
+ dev_err(hwdev->dev, "Failed to allocate cmd buf\n");
+ return -ENOMEM;
+ }
+
+ q_id = 0;
+ while (q_id < nic_io->num_qps) {
+ sq_ctxt_block = cmd_buf->buf;
+ sq_ctxt = sq_ctxt_block->sq_ctxt;
+
+ max_ctxts = (nic_io->num_qps - q_id) > HINIC3_Q_CTXT_MAX ?
+ HINIC3_Q_CTXT_MAX : (nic_io->num_qps - q_id);
+
+ hinic3_qp_prepare_cmdq_header(&sq_ctxt_block->cmdq_hdr,
+ HINIC3_QP_CTXT_TYPE_SQ, max_ctxts,
+ q_id);
+
+ for (i = 0; i < max_ctxts; i++) {
+ curr_id = q_id + i;
+ sq = &nic_io->sq[curr_id];
+ hinic3_sq_prepare_ctxt(sq, curr_id, &sq_ctxt[i]);
+ }
+
+ hinic3_cmdq_buf_swab32(sq_ctxt_block, sizeof(*sq_ctxt_block));
+
+ cmd_buf->size = cpu_to_le16(SQ_CTXT_SIZE(max_ctxts));
+ err = hinic3_cmdq_direct_resp(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_UCODE_CMD_MODIFY_QUEUE_CTX,
+ cmd_buf, &out_param);
+ if (err || out_param) {
+ dev_err(hwdev->dev, "Failed to set SQ ctxts, err: %d, out_param: 0x%llx\n",
+ err, out_param);
+ err = -EFAULT;
+ break;
+ }
+
+ q_id += max_ctxts;
+ }
+
+ hinic3_free_cmd_buf(hwdev, cmd_buf);
+
+ return err;
+}
+
+static int init_rq_ctxts(struct hinic3_nic_dev *nic_dev)
+{
+ struct hinic3_nic_io *nic_io = nic_dev->nic_io;
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic3_rq_ctxt_block *rq_ctxt_block;
+ u16 q_id, curr_id, max_ctxts, i;
+ struct hinic3_rq_ctxt *rq_ctxt;
+ struct hinic3_cmd_buf *cmd_buf;
+ struct hinic3_io_queue *rq;
+ __le64 out_param;
+ int err = 0;
+
+ cmd_buf = hinic3_alloc_cmd_buf(hwdev);
+ if (!cmd_buf) {
+ dev_err(hwdev->dev, "Failed to allocate cmd buf\n");
+ return -ENOMEM;
+ }
+
+ q_id = 0;
+ while (q_id < nic_io->num_qps) {
+ rq_ctxt_block = cmd_buf->buf;
+ rq_ctxt = rq_ctxt_block->rq_ctxt;
+
+ max_ctxts = (nic_io->num_qps - q_id) > HINIC3_Q_CTXT_MAX ?
+ HINIC3_Q_CTXT_MAX : (nic_io->num_qps - q_id);
+
+ hinic3_qp_prepare_cmdq_header(&rq_ctxt_block->cmdq_hdr,
+ HINIC3_QP_CTXT_TYPE_RQ, max_ctxts,
+ q_id);
+
+ for (i = 0; i < max_ctxts; i++) {
+ curr_id = q_id + i;
+ rq = &nic_io->rq[curr_id];
+ hinic3_rq_prepare_ctxt(rq, &rq_ctxt[i]);
+ }
+
+ hinic3_cmdq_buf_swab32(rq_ctxt_block, sizeof(*rq_ctxt_block));
+
+ cmd_buf->size = cpu_to_le16(RQ_CTXT_SIZE(max_ctxts));
+
+ err = hinic3_cmdq_direct_resp(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_UCODE_CMD_MODIFY_QUEUE_CTX,
+ cmd_buf, &out_param);
+ if (err || out_param) {
+ dev_err(hwdev->dev, "Failed to set RQ ctxts, err: %d, out_param: 0x%llx\n",
+ err, out_param);
+ err = -EFAULT;
+ break;
+ }
+
+ q_id += max_ctxts;
+ }
+
+ hinic3_free_cmd_buf(hwdev, cmd_buf);
+
+ return err;
+}
+
+static int init_qp_ctxts(struct hinic3_nic_dev *nic_dev)
+{
+ int err;
+
+ err = init_sq_ctxts(nic_dev);
+ if (err)
+ return err;
+
+ err = init_rq_ctxts(nic_dev);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int clean_queue_offload_ctxt(struct hinic3_nic_dev *nic_dev,
+ enum hinic3_qp_ctxt_type ctxt_type)
+{
+ struct hinic3_nic_io *nic_io = nic_dev->nic_io;
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic3_clean_queue_ctxt *ctxt_block;
+ struct hinic3_cmd_buf *cmd_buf;
+ __le64 out_param;
+ int err;
+
+ cmd_buf = hinic3_alloc_cmd_buf(hwdev);
+ if (!cmd_buf) {
+ dev_err(hwdev->dev, "Failed to allocate cmd buf\n");
+ return -ENOMEM;
+ }
+
+ ctxt_block = cmd_buf->buf;
+ ctxt_block->cmdq_hdr.num_queues = cpu_to_le16(nic_io->max_qps);
+ ctxt_block->cmdq_hdr.queue_type = cpu_to_le16(ctxt_type);
+ ctxt_block->cmdq_hdr.start_qid = 0;
+ ctxt_block->cmdq_hdr.rsvd = 0;
+ ctxt_block->rsvd = 0;
+
+ hinic3_cmdq_buf_swab32(ctxt_block, sizeof(*ctxt_block));
+
+ cmd_buf->size = cpu_to_le16(sizeof(*ctxt_block));
+
+ err = hinic3_cmdq_direct_resp(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_UCODE_CMD_CLEAN_QUEUE_CTX,
+ cmd_buf, &out_param);
+ if (err || out_param) {
+ dev_err(hwdev->dev, "Failed to clean queue offload ctxts, err: %d,out_param: 0x%llx\n",
+ err, out_param);
+
+ err = -EFAULT;
+ }
+
+ hinic3_free_cmd_buf(hwdev, cmd_buf);
+
+ return err;
+}
+
+static int clean_qp_offload_ctxt(struct hinic3_nic_dev *nic_dev)
+{
+ /* clean LRO/TSO context space */
+ return clean_queue_offload_ctxt(nic_dev, HINIC3_QP_CTXT_TYPE_SQ) ||
+ clean_queue_offload_ctxt(nic_dev, HINIC3_QP_CTXT_TYPE_RQ);
+}
+
+/* init qps ctxt and set sq ci attr and arm all sq */
+int hinic3_init_qp_ctxts(struct hinic3_nic_dev *nic_dev)
+{
+ struct hinic3_nic_io *nic_io = nic_dev->nic_io;
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic3_sq_attr sq_attr;
+ u32 rq_depth;
+ u16 q_id;
+ int err;
+
+ err = init_qp_ctxts(nic_dev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to init QP ctxts\n");
+ return err;
+ }
+
+ /* clean LRO/TSO context space */
+ err = clean_qp_offload_ctxt(nic_dev);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to clean qp offload ctxts\n");
+ return err;
+ }
+
+ rq_depth = nic_io->rq[0].wq.q_depth << HINIC3_NORMAL_RQ_WQE;
+
+ err = hinic3_set_root_ctxt(hwdev, rq_depth, nic_io->sq[0].wq.q_depth,
+ nic_io->rx_buf_len);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to set root context\n");
+ return err;
+ }
+
+ for (q_id = 0; q_id < nic_io->num_qps; q_id++) {
+ sq_attr.ci_dma_base =
+ HINIC3_CI_PADDR(nic_io->ci_dma_base, q_id) >> 0x2;
+ sq_attr.pending_limit = HINIC3_DEFAULT_TX_CI_PENDING_LIMIT;
+ sq_attr.coalescing_time = HINIC3_DEFAULT_TX_CI_COALESCING_TIME;
+ sq_attr.intr_en = 1;
+ sq_attr.intr_idx = nic_io->sq[q_id].msix_entry_idx;
+ sq_attr.l2nic_sqn = q_id;
+ sq_attr.dma_attr_off = 0;
+ err = hinic3_set_ci_table(hwdev, &sq_attr);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to set ci table\n");
+ goto err_clean_root_ctxt;
+ }
+ }
+
+ return 0;
+
+err_clean_root_ctxt:
+ hinic3_clean_root_ctxt(hwdev);
+
+ return err;
+}
+
+void hinic3_free_qp_ctxts(struct hinic3_nic_dev *nic_dev)
+{
+ hinic3_clean_root_ctxt(nic_dev->hwdev);
}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h
index 865ba6878c48..12eefabcf1db 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h
@@ -75,8 +75,8 @@ static inline u16 hinic3_get_sq_hw_ci(const struct hinic3_io_queue *sq)
#define DB_CFLAG_DP_RQ 1
struct hinic3_nic_db {
- u32 db_info;
- u32 pi_hi;
+ __le32 db_info;
+ __le32 pi_hi;
};
static inline void hinic3_write_db(struct hinic3_io_queue *queue, int cos,
@@ -84,15 +84,25 @@ static inline void hinic3_write_db(struct hinic3_io_queue *queue, int cos,
{
struct hinic3_nic_db db;
- db.db_info = DB_INFO_SET(DB_SRC_TYPE, TYPE) |
- DB_INFO_SET(cflag, CFLAG) |
- DB_INFO_SET(cos, COS) |
- DB_INFO_SET(queue->q_id, QID);
- db.pi_hi = DB_PI_HIGH(pi);
+ db.db_info =
+ cpu_to_le32(DB_INFO_SET(DB_SRC_TYPE, TYPE) |
+ DB_INFO_SET(cflag, CFLAG) |
+ DB_INFO_SET(cos, COS) |
+ DB_INFO_SET(queue->q_id, QID));
+ db.pi_hi = cpu_to_le32(DB_PI_HIGH(pi));
writeq(*((u64 *)&db), DB_ADDR(queue, pi));
}
+struct hinic3_dyna_qp_params {
+ u16 num_qps;
+ u32 sq_depth;
+ u32 rq_depth;
+
+ struct hinic3_io_queue *sqs;
+ struct hinic3_io_queue *rqs;
+};
+
struct hinic3_nic_io {
struct hinic3_io_queue *sq;
struct hinic3_io_queue *rq;
@@ -117,4 +127,19 @@ struct hinic3_nic_io {
int hinic3_init_nic_io(struct hinic3_nic_dev *nic_dev);
void hinic3_free_nic_io(struct hinic3_nic_dev *nic_dev);
+int hinic3_init_nicio_res(struct hinic3_nic_dev *nic_dev);
+void hinic3_free_nicio_res(struct hinic3_nic_dev *nic_dev);
+
+int hinic3_alloc_qps(struct hinic3_nic_dev *nic_dev,
+ struct hinic3_dyna_qp_params *qp_params);
+void hinic3_free_qps(struct hinic3_nic_dev *nic_dev,
+ struct hinic3_dyna_qp_params *qp_params);
+void hinic3_init_qps(struct hinic3_nic_dev *nic_dev,
+ struct hinic3_dyna_qp_params *qp_params);
+void hinic3_uninit_qps(struct hinic3_nic_dev *nic_dev,
+ struct hinic3_dyna_qp_params *qp_params);
+
+int hinic3_init_qp_ctxts(struct hinic3_nic_dev *nic_dev);
+void hinic3_free_qp_ctxts(struct hinic3_nic_dev *nic_dev);
+
#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_pci_id_tbl.h b/drivers/net/ethernet/huawei/hinic3/hinic3_pci_id_tbl.h
new file mode 100644
index 000000000000..86c88d0bb4bd
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_pci_id_tbl.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_PCI_ID_TBL_H_
+#define _HINIC3_PCI_ID_TBL_H_
+
+#define PCI_DEV_ID_HINIC3_VF 0x375F
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_rss.c b/drivers/net/ethernet/huawei/hinic3/hinic3_rss.c
new file mode 100644
index 000000000000..4ff1b2f79838
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_rss.c
@@ -0,0 +1,336 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
+
+#include <linux/ethtool.h>
+
+#include "hinic3_cmdq.h"
+#include "hinic3_hwdev.h"
+#include "hinic3_hwif.h"
+#include "hinic3_mbox.h"
+#include "hinic3_nic_cfg.h"
+#include "hinic3_nic_dev.h"
+#include "hinic3_rss.h"
+
+static void hinic3_fillout_indir_tbl(struct net_device *netdev, u16 *indir)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ u16 i, num_qps;
+
+ num_qps = nic_dev->q_params.num_qps;
+ for (i = 0; i < L2NIC_RSS_INDIR_SIZE; i++)
+ indir[i] = ethtool_rxfh_indir_default(i, num_qps);
+}
+
+static int hinic3_rss_cfg(struct hinic3_hwdev *hwdev, u8 rss_en, u16 num_qps)
+{
+ struct mgmt_msg_params msg_params = {};
+ struct l2nic_cmd_cfg_rss rss_cfg = {};
+ int err;
+
+ rss_cfg.func_id = hinic3_global_func_id(hwdev);
+ rss_cfg.rss_en = rss_en;
+ rss_cfg.rq_priority_number = 0;
+ rss_cfg.num_qps = num_qps;
+
+ mgmt_msg_params_init_default(&msg_params, &rss_cfg, sizeof(rss_cfg));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_CFG_RSS, &msg_params);
+ if (err || rss_cfg.msg_head.status) {
+ dev_err(hwdev->dev, "Failed to set rss cfg, err: %d, status: 0x%x\n",
+ err, rss_cfg.msg_head.status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void hinic3_init_rss_parameters(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ nic_dev->rss_hash_type = HINIC3_RSS_HASH_ENGINE_TYPE_XOR;
+ nic_dev->rss_type.tcp_ipv6_ext = 1;
+ nic_dev->rss_type.ipv6_ext = 1;
+ nic_dev->rss_type.tcp_ipv6 = 1;
+ nic_dev->rss_type.ipv6 = 1;
+ nic_dev->rss_type.tcp_ipv4 = 1;
+ nic_dev->rss_type.ipv4 = 1;
+ nic_dev->rss_type.udp_ipv6 = 1;
+ nic_dev->rss_type.udp_ipv4 = 1;
+}
+
+static void decide_num_qps(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ unsigned int dev_cpus;
+
+ dev_cpus = netif_get_num_default_rss_queues();
+ nic_dev->q_params.num_qps = min(dev_cpus, nic_dev->max_qps);
+}
+
+static int alloc_rss_resource(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ nic_dev->rss_hkey = kmalloc_array(L2NIC_RSS_KEY_SIZE,
+ sizeof(nic_dev->rss_hkey[0]),
+ GFP_KERNEL);
+ if (!nic_dev->rss_hkey)
+ return -ENOMEM;
+
+ netdev_rss_key_fill(nic_dev->rss_hkey, L2NIC_RSS_KEY_SIZE);
+
+ nic_dev->rss_indir = kcalloc(L2NIC_RSS_INDIR_SIZE, sizeof(u16),
+ GFP_KERNEL);
+ if (!nic_dev->rss_indir) {
+ kfree(nic_dev->rss_hkey);
+ nic_dev->rss_hkey = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int hinic3_rss_set_indir_tbl(struct hinic3_hwdev *hwdev,
+ const u16 *indir_table)
+{
+ struct l2nic_cmd_rss_set_indir_tbl *indir_tbl;
+ struct hinic3_cmd_buf *cmd_buf;
+ __le64 out_param;
+ int err;
+ u32 i;
+
+ cmd_buf = hinic3_alloc_cmd_buf(hwdev);
+ if (!cmd_buf) {
+ dev_err(hwdev->dev, "Failed to allocate cmd buf\n");
+ return -ENOMEM;
+ }
+
+ cmd_buf->size = cpu_to_le16(sizeof(struct l2nic_cmd_rss_set_indir_tbl));
+ indir_tbl = cmd_buf->buf;
+ memset(indir_tbl, 0, sizeof(*indir_tbl));
+
+ for (i = 0; i < L2NIC_RSS_INDIR_SIZE; i++)
+ indir_tbl->entry[i] = cpu_to_le16(indir_table[i]);
+
+ hinic3_cmdq_buf_swab32(indir_tbl, sizeof(*indir_tbl));
+
+ err = hinic3_cmdq_direct_resp(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_UCODE_CMD_SET_RSS_INDIR_TBL,
+ cmd_buf, &out_param);
+ if (err || out_param) {
+ dev_err(hwdev->dev, "Failed to set rss indir table\n");
+ err = -EFAULT;
+ }
+
+ hinic3_free_cmd_buf(hwdev, cmd_buf);
+
+ return err;
+}
+
+static int hinic3_set_rss_type(struct hinic3_hwdev *hwdev,
+ struct hinic3_rss_type rss_type)
+{
+ struct l2nic_cmd_set_rss_ctx_tbl ctx_tbl = {};
+ struct mgmt_msg_params msg_params = {};
+ u32 ctx;
+ int err;
+
+ ctx_tbl.func_id = hinic3_global_func_id(hwdev);
+ ctx = L2NIC_RSS_TYPE_SET(1, VALID) |
+ L2NIC_RSS_TYPE_SET(rss_type.ipv4, IPV4) |
+ L2NIC_RSS_TYPE_SET(rss_type.ipv6, IPV6) |
+ L2NIC_RSS_TYPE_SET(rss_type.ipv6_ext, IPV6_EXT) |
+ L2NIC_RSS_TYPE_SET(rss_type.tcp_ipv4, TCP_IPV4) |
+ L2NIC_RSS_TYPE_SET(rss_type.tcp_ipv6, TCP_IPV6) |
+ L2NIC_RSS_TYPE_SET(rss_type.tcp_ipv6_ext, TCP_IPV6_EXT) |
+ L2NIC_RSS_TYPE_SET(rss_type.udp_ipv4, UDP_IPV4) |
+ L2NIC_RSS_TYPE_SET(rss_type.udp_ipv6, UDP_IPV6);
+ ctx_tbl.context = ctx;
+
+ mgmt_msg_params_init_default(&msg_params, &ctx_tbl, sizeof(ctx_tbl));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_SET_RSS_CTX_TBL, &msg_params);
+
+ if (ctx_tbl.msg_head.status == MGMT_STATUS_CMD_UNSUPPORTED) {
+ return MGMT_STATUS_CMD_UNSUPPORTED;
+ } else if (err || ctx_tbl.msg_head.status) {
+ dev_err(hwdev->dev, "mgmt Failed to set rss context offload, err: %d, status: 0x%x\n",
+ err, ctx_tbl.msg_head.status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hinic3_rss_cfg_hash_type(struct hinic3_hwdev *hwdev, u8 opcode,
+ enum hinic3_rss_hash_type *type)
+{
+ struct l2nic_cmd_cfg_rss_engine hash_type_cmd = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ hash_type_cmd.func_id = hinic3_global_func_id(hwdev);
+ hash_type_cmd.opcode = opcode;
+
+ if (opcode == MGMT_MSG_CMD_OP_SET)
+ hash_type_cmd.hash_engine = *type;
+
+ mgmt_msg_params_init_default(&msg_params, &hash_type_cmd,
+ sizeof(hash_type_cmd));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_CFG_RSS_HASH_ENGINE,
+ &msg_params);
+ if (err || hash_type_cmd.msg_head.status) {
+ dev_err(hwdev->dev, "Failed to %s hash engine, err: %d, status: 0x%x\n",
+ opcode == MGMT_MSG_CMD_OP_SET ? "set" : "get",
+ err, hash_type_cmd.msg_head.status);
+ return -EIO;
+ }
+
+ if (opcode == MGMT_MSG_CMD_OP_GET)
+ *type = hash_type_cmd.hash_engine;
+
+ return 0;
+}
+
+static int hinic3_rss_set_hash_type(struct hinic3_hwdev *hwdev,
+ enum hinic3_rss_hash_type type)
+{
+ return hinic3_rss_cfg_hash_type(hwdev, MGMT_MSG_CMD_OP_SET, &type);
+}
+
+static int hinic3_config_rss_hw_resource(struct net_device *netdev,
+ u16 *indir_tbl)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ int err;
+
+ err = hinic3_rss_set_indir_tbl(nic_dev->hwdev, indir_tbl);
+ if (err)
+ return err;
+
+ err = hinic3_set_rss_type(nic_dev->hwdev, nic_dev->rss_type);
+ if (err)
+ return err;
+
+ return hinic3_rss_set_hash_type(nic_dev->hwdev, nic_dev->rss_hash_type);
+}
+
+static int hinic3_rss_cfg_hash_key(struct hinic3_hwdev *hwdev, u8 opcode,
+ u8 *key)
+{
+ struct l2nic_cmd_cfg_rss_hash_key hash_key = {};
+ struct mgmt_msg_params msg_params = {};
+ int err;
+
+ hash_key.func_id = hinic3_global_func_id(hwdev);
+ hash_key.opcode = opcode;
+
+ if (opcode == MGMT_MSG_CMD_OP_SET)
+ memcpy(hash_key.key, key, L2NIC_RSS_KEY_SIZE);
+
+ mgmt_msg_params_init_default(&msg_params, &hash_key, sizeof(hash_key));
+
+ err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC,
+ L2NIC_CMD_CFG_RSS_HASH_KEY, &msg_params);
+ if (err || hash_key.msg_head.status) {
+ dev_err(hwdev->dev, "Failed to %s hash key, err: %d, status: 0x%x\n",
+ opcode == MGMT_MSG_CMD_OP_SET ? "set" : "get",
+ err, hash_key.msg_head.status);
+ return -EINVAL;
+ }
+
+ if (opcode == MGMT_MSG_CMD_OP_GET)
+ memcpy(key, hash_key.key, L2NIC_RSS_KEY_SIZE);
+
+ return 0;
+}
+
+static int hinic3_rss_set_hash_key(struct hinic3_hwdev *hwdev, u8 *key)
+{
+ return hinic3_rss_cfg_hash_key(hwdev, MGMT_MSG_CMD_OP_SET, key);
+}
+
+static int hinic3_set_hw_rss_parameters(struct net_device *netdev, u8 rss_en)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ int err;
+
+ err = hinic3_rss_set_hash_key(nic_dev->hwdev, nic_dev->rss_hkey);
+ if (err)
+ return err;
+
+ hinic3_fillout_indir_tbl(netdev, nic_dev->rss_indir);
+
+ err = hinic3_config_rss_hw_resource(netdev, nic_dev->rss_indir);
+ if (err)
+ return err;
+
+ err = hinic3_rss_cfg(nic_dev->hwdev, rss_en, nic_dev->q_params.num_qps);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int hinic3_rss_init(struct net_device *netdev)
+{
+ return hinic3_set_hw_rss_parameters(netdev, 1);
+}
+
+void hinic3_rss_uninit(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ hinic3_rss_cfg(nic_dev->hwdev, 0, 0);
+}
+
+void hinic3_clear_rss_config(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ kfree(nic_dev->rss_hkey);
+ nic_dev->rss_hkey = NULL;
+
+ kfree(nic_dev->rss_indir);
+ nic_dev->rss_indir = NULL;
+}
+
+void hinic3_try_to_enable_rss(struct net_device *netdev)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_hwdev *hwdev = nic_dev->hwdev;
+ int err;
+
+ nic_dev->max_qps = hinic3_func_max_qnum(hwdev);
+ if (nic_dev->max_qps <= 1 ||
+ !hinic3_test_support(nic_dev, HINIC3_NIC_F_RSS))
+ goto err_reset_q_params;
+
+ err = alloc_rss_resource(netdev);
+ if (err) {
+ nic_dev->max_qps = 1;
+ goto err_reset_q_params;
+ }
+
+ set_bit(HINIC3_RSS_ENABLE, &nic_dev->flags);
+ decide_num_qps(netdev);
+ hinic3_init_rss_parameters(netdev);
+ err = hinic3_set_hw_rss_parameters(netdev, 0);
+ if (err) {
+ dev_err(hwdev->dev, "Failed to set hardware rss parameters\n");
+ hinic3_clear_rss_config(netdev);
+ nic_dev->max_qps = 1;
+ goto err_reset_q_params;
+ }
+
+ return;
+
+err_reset_q_params:
+ clear_bit(HINIC3_RSS_ENABLE, &nic_dev->flags);
+ nic_dev->q_params.num_qps = nic_dev->max_qps;
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_rss.h b/drivers/net/ethernet/huawei/hinic3/hinic3_rss.h
new file mode 100644
index 000000000000..78d82c2aca06
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_rss.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */
+
+#ifndef _HINIC3_RSS_H_
+#define _HINIC3_RSS_H_
+
+#include <linux/netdevice.h>
+
+int hinic3_rss_init(struct net_device *netdev);
+void hinic3_rss_uninit(struct net_device *netdev);
+void hinic3_try_to_enable_rss(struct net_device *netdev);
+void hinic3_clear_rss_config(struct net_device *netdev);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
index 860163e9d66c..16c00c3bb1ed 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c
@@ -35,13 +35,35 @@
int hinic3_alloc_rxqs(struct net_device *netdev)
{
- /* Completed by later submission due to LoC limit. */
- return -EFAULT;
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct pci_dev *pdev = nic_dev->pdev;
+ u16 num_rxqs = nic_dev->max_qps;
+ struct hinic3_rxq *rxq;
+ u16 q_id;
+
+ nic_dev->rxqs = kcalloc(num_rxqs, sizeof(*nic_dev->rxqs), GFP_KERNEL);
+ if (!nic_dev->rxqs)
+ return -ENOMEM;
+
+ for (q_id = 0; q_id < num_rxqs; q_id++) {
+ rxq = &nic_dev->rxqs[q_id];
+ rxq->netdev = netdev;
+ rxq->dev = &pdev->dev;
+ rxq->q_id = q_id;
+ rxq->buf_len = nic_dev->rx_buf_len;
+ rxq->buf_len_shift = ilog2(nic_dev->rx_buf_len);
+ rxq->q_depth = nic_dev->q_params.rq_depth;
+ rxq->q_mask = nic_dev->q_params.rq_depth - 1;
+ }
+
+ return 0;
}
void hinic3_free_rxqs(struct net_device *netdev)
{
- /* Completed by later submission due to LoC limit. */
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+
+ kfree(nic_dev->rxqs);
}
static int rx_alloc_mapped_page(struct page_pool *page_pool,
@@ -50,6 +72,9 @@ static int rx_alloc_mapped_page(struct page_pool *page_pool,
struct page *page;
u32 page_offset;
+ if (likely(rx_info->page))
+ return 0;
+
page = page_pool_dev_alloc_frag(page_pool, &page_offset, buf_len);
if (unlikely(!page))
return -ENOMEM;
@@ -60,14 +85,35 @@ static int rx_alloc_mapped_page(struct page_pool *page_pool,
return 0;
}
+/* Associate fixed completion element to every wqe in the rq. Every rq wqe will
+ * always post completion to the same place.
+ */
+static void rq_associate_cqes(struct hinic3_rxq *rxq)
+{
+ struct hinic3_queue_pages *qpages;
+ struct hinic3_rq_wqe *rq_wqe;
+ dma_addr_t cqe_dma;
+ u32 i;
+
+ qpages = &rxq->rq->wq.qpages;
+
+ for (i = 0; i < rxq->q_depth; i++) {
+ rq_wqe = get_q_element(qpages, i, NULL);
+ cqe_dma = rxq->cqe_start_paddr +
+ i * sizeof(struct hinic3_rq_cqe);
+ rq_wqe->cqe_hi_addr = cpu_to_le32(upper_32_bits(cqe_dma));
+ rq_wqe->cqe_lo_addr = cpu_to_le32(lower_32_bits(cqe_dma));
+ }
+}
+
static void rq_wqe_buf_set(struct hinic3_io_queue *rq, uint32_t wqe_idx,
dma_addr_t dma_addr, u16 len)
{
struct hinic3_rq_wqe *rq_wqe;
rq_wqe = get_q_element(&rq->wq.qpages, wqe_idx, NULL);
- rq_wqe->buf_hi_addr = upper_32_bits(dma_addr);
- rq_wqe->buf_lo_addr = lower_32_bits(dma_addr);
+ rq_wqe->buf_hi_addr = cpu_to_le32(upper_32_bits(dma_addr));
+ rq_wqe->buf_lo_addr = cpu_to_le32(lower_32_bits(dma_addr));
}
static u32 hinic3_rx_fill_buffers(struct hinic3_rxq *rxq)
@@ -102,6 +148,41 @@ static u32 hinic3_rx_fill_buffers(struct hinic3_rxq *rxq)
return i;
}
+static u32 hinic3_alloc_rx_buffers(struct hinic3_dyna_rxq_res *rqres,
+ u32 rq_depth, u16 buf_len)
+{
+ u32 free_wqebbs = rq_depth - 1;
+ u32 idx;
+ int err;
+
+ for (idx = 0; idx < free_wqebbs; idx++) {
+ err = rx_alloc_mapped_page(rqres->page_pool,
+ &rqres->rx_info[idx], buf_len);
+ if (err)
+ break;
+ }
+
+ return idx;
+}
+
+static void hinic3_free_rx_buffers(struct hinic3_dyna_rxq_res *rqres,
+ u32 q_depth)
+{
+ struct hinic3_rx_info *rx_info;
+ u32 i;
+
+ /* Free all the Rx ring sk_buffs */
+ for (i = 0; i < q_depth; i++) {
+ rx_info = &rqres->rx_info[i];
+
+ if (rx_info->page) {
+ page_pool_put_full_page(rqres->page_pool,
+ rx_info->page, false);
+ rx_info->page = NULL;
+ }
+ }
+}
+
static void hinic3_add_rx_frag(struct hinic3_rxq *rxq,
struct hinic3_rx_info *rx_info,
struct sk_buff *skb, u32 size)
@@ -279,7 +360,7 @@ static int recv_one_pkt(struct hinic3_rxq *rxq, struct hinic3_rq_cqe *rx_cqe,
if (skb_is_nonlinear(skb))
hinic3_pull_tail(skb);
- offload_type = rx_cqe->offload_type;
+ offload_type = le32_to_cpu(rx_cqe->offload_type);
hinic3_rx_csum(rxq, offload_type, status, skb);
num_lro = RQ_CQE_STATUS_GET(status, NUM_LRO);
@@ -299,6 +380,135 @@ static int recv_one_pkt(struct hinic3_rxq *rxq, struct hinic3_rq_cqe *rx_cqe,
return 0;
}
+int hinic3_alloc_rxqs_res(struct net_device *netdev, u16 num_rq,
+ u32 rq_depth, struct hinic3_dyna_rxq_res *rxqs_res)
+{
+ u64 cqe_mem_size = sizeof(struct hinic3_rq_cqe) * rq_depth;
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct page_pool_params pp_params = {};
+ struct hinic3_dyna_rxq_res *rqres;
+ u32 pkt_idx;
+ int idx;
+
+ for (idx = 0; idx < num_rq; idx++) {
+ rqres = &rxqs_res[idx];
+ rqres->rx_info = kcalloc(rq_depth, sizeof(*rqres->rx_info),
+ GFP_KERNEL);
+ if (!rqres->rx_info)
+ goto err_free_rqres;
+
+ rqres->cqe_start_vaddr =
+ dma_alloc_coherent(&nic_dev->pdev->dev, cqe_mem_size,
+ &rqres->cqe_start_paddr, GFP_KERNEL);
+ if (!rqres->cqe_start_vaddr) {
+ netdev_err(netdev, "Failed to alloc rxq%d rx cqe\n",
+ idx);
+ goto err_free_rx_info;
+ }
+
+ pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
+ pp_params.pool_size = rq_depth * nic_dev->rx_buf_len /
+ PAGE_SIZE;
+ pp_params.nid = dev_to_node(&nic_dev->pdev->dev);
+ pp_params.dev = &nic_dev->pdev->dev;
+ pp_params.dma_dir = DMA_FROM_DEVICE;
+ pp_params.max_len = PAGE_SIZE;
+ rqres->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(rqres->page_pool)) {
+ netdev_err(netdev, "Failed to create rxq%d page pool\n",
+ idx);
+ goto err_free_cqe;
+ }
+
+ pkt_idx = hinic3_alloc_rx_buffers(rqres, rq_depth,
+ nic_dev->rx_buf_len);
+ if (!pkt_idx) {
+ netdev_err(netdev, "Failed to alloc rxq%d rx buffers\n",
+ idx);
+ goto err_destroy_page_pool;
+ }
+ rqres->next_to_alloc = pkt_idx;
+ }
+
+ return 0;
+
+err_destroy_page_pool:
+ page_pool_destroy(rqres->page_pool);
+err_free_cqe:
+ dma_free_coherent(&nic_dev->pdev->dev, cqe_mem_size,
+ rqres->cqe_start_vaddr,
+ rqres->cqe_start_paddr);
+err_free_rx_info:
+ kfree(rqres->rx_info);
+err_free_rqres:
+ hinic3_free_rxqs_res(netdev, idx, rq_depth, rxqs_res);
+
+ return -ENOMEM;
+}
+
+void hinic3_free_rxqs_res(struct net_device *netdev, u16 num_rq,
+ u32 rq_depth, struct hinic3_dyna_rxq_res *rxqs_res)
+{
+ u64 cqe_mem_size = sizeof(struct hinic3_rq_cqe) * rq_depth;
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_dyna_rxq_res *rqres;
+ int idx;
+
+ for (idx = 0; idx < num_rq; idx++) {
+ rqres = &rxqs_res[idx];
+
+ hinic3_free_rx_buffers(rqres, rq_depth);
+ page_pool_destroy(rqres->page_pool);
+ dma_free_coherent(&nic_dev->pdev->dev, cqe_mem_size,
+ rqres->cqe_start_vaddr,
+ rqres->cqe_start_paddr);
+ kfree(rqres->rx_info);
+ }
+}
+
+int hinic3_configure_rxqs(struct net_device *netdev, u16 num_rq,
+ u32 rq_depth, struct hinic3_dyna_rxq_res *rxqs_res)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_dyna_rxq_res *rqres;
+ struct msix_entry *msix_entry;
+ struct hinic3_rxq *rxq;
+ u16 q_id;
+ u32 pkts;
+
+ for (q_id = 0; q_id < num_rq; q_id++) {
+ rxq = &nic_dev->rxqs[q_id];
+ rqres = &rxqs_res[q_id];
+ msix_entry = &nic_dev->qps_msix_entries[q_id];
+
+ rxq->irq_id = msix_entry->vector;
+ rxq->msix_entry_idx = msix_entry->entry;
+ rxq->next_to_update = 0;
+ rxq->next_to_alloc = rqres->next_to_alloc;
+ rxq->q_depth = rq_depth;
+ rxq->delta = rxq->q_depth;
+ rxq->q_mask = rxq->q_depth - 1;
+ rxq->cons_idx = 0;
+
+ rxq->cqe_arr = rqres->cqe_start_vaddr;
+ rxq->cqe_start_paddr = rqres->cqe_start_paddr;
+ rxq->rx_info = rqres->rx_info;
+ rxq->page_pool = rqres->page_pool;
+
+ rxq->rq = &nic_dev->nic_io->rq[rxq->q_id];
+
+ rq_associate_cqes(rxq);
+
+ pkts = hinic3_rx_fill_buffers(rxq);
+ if (!pkts) {
+ netdev_err(netdev, "Failed to fill Rx buffer\n");
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
int hinic3_rx_poll(struct hinic3_rxq *rxq, int budget)
{
struct hinic3_nic_dev *nic_dev = netdev_priv(rxq->netdev);
@@ -311,14 +521,14 @@ int hinic3_rx_poll(struct hinic3_rxq *rxq, int budget)
while (likely(nr_pkts < budget)) {
sw_ci = rxq->cons_idx & rxq->q_mask;
rx_cqe = rxq->cqe_arr + sw_ci;
- status = rx_cqe->status;
+ status = le32_to_cpu(rx_cqe->status);
if (!RQ_CQE_STATUS_GET(status, RXDONE))
break;
/* make sure we read rx_done before packet length */
rmb();
- vlan_len = rx_cqe->vlan_len;
+ vlan_len = le32_to_cpu(rx_cqe->vlan_len);
pkt_len = RQ_CQE_SGE_GET(vlan_len, LEN);
if (recv_one_pkt(rxq, rx_cqe, pkt_len, vlan_len, status))
break;
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h
index 1cca21858d40..44ae841a3648 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h
@@ -27,21 +27,21 @@
/* RX Completion information that is provided by HW for a specific RX WQE */
struct hinic3_rq_cqe {
- u32 status;
- u32 vlan_len;
- u32 offload_type;
- u32 rsvd3;
- u32 rsvd4;
- u32 rsvd5;
- u32 rsvd6;
- u32 pkt_info;
+ __le32 status;
+ __le32 vlan_len;
+ __le32 offload_type;
+ __le32 rsvd3;
+ __le32 rsvd4;
+ __le32 rsvd5;
+ __le32 rsvd6;
+ __le32 pkt_info;
};
struct hinic3_rq_wqe {
- u32 buf_hi_addr;
- u32 buf_lo_addr;
- u32 cqe_hi_addr;
- u32 cqe_lo_addr;
+ __le32 buf_hi_addr;
+ __le32 buf_lo_addr;
+ __le32 cqe_hi_addr;
+ __le32 cqe_lo_addr;
};
struct hinic3_rx_info {
@@ -82,9 +82,23 @@ struct hinic3_rxq {
dma_addr_t cqe_start_paddr;
} ____cacheline_aligned;
+struct hinic3_dyna_rxq_res {
+ u16 next_to_alloc;
+ struct hinic3_rx_info *rx_info;
+ dma_addr_t cqe_start_paddr;
+ void *cqe_start_vaddr;
+ struct page_pool *page_pool;
+};
+
int hinic3_alloc_rxqs(struct net_device *netdev);
void hinic3_free_rxqs(struct net_device *netdev);
+int hinic3_alloc_rxqs_res(struct net_device *netdev, u16 num_rq,
+ u32 rq_depth, struct hinic3_dyna_rxq_res *rxqs_res);
+void hinic3_free_rxqs_res(struct net_device *netdev, u16 num_rq,
+ u32 rq_depth, struct hinic3_dyna_rxq_res *rxqs_res);
+int hinic3_configure_rxqs(struct net_device *netdev, u16 num_rq,
+ u32 rq_depth, struct hinic3_dyna_rxq_res *rxqs_res);
int hinic3_rx_poll(struct hinic3_rxq *rxq, int budget);
#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
index 3f7f73430be4..92c43c05e3f2 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c
@@ -55,9 +55,9 @@ void hinic3_free_txqs(struct net_device *netdev)
static void hinic3_set_buf_desc(struct hinic3_sq_bufdesc *buf_descs,
dma_addr_t addr, u32 len)
{
- buf_descs->hi_addr = upper_32_bits(addr);
- buf_descs->lo_addr = lower_32_bits(addr);
- buf_descs->len = len;
+ buf_descs->hi_addr = cpu_to_le32(upper_32_bits(addr));
+ buf_descs->lo_addr = cpu_to_le32(lower_32_bits(addr));
+ buf_descs->len = cpu_to_le32(len);
}
static int hinic3_tx_map_skb(struct net_device *netdev, struct sk_buff *skb,
@@ -81,10 +81,10 @@ static int hinic3_tx_map_skb(struct net_device *netdev, struct sk_buff *skb,
dma_info[0].len = skb_headlen(skb);
- wqe_desc->hi_addr = upper_32_bits(dma_info[0].dma);
- wqe_desc->lo_addr = lower_32_bits(dma_info[0].dma);
+ wqe_desc->hi_addr = cpu_to_le32(upper_32_bits(dma_info[0].dma));
+ wqe_desc->lo_addr = cpu_to_le32(lower_32_bits(dma_info[0].dma));
- wqe_desc->ctrl_len = dma_info[0].len;
+ wqe_desc->ctrl_len = cpu_to_le32(dma_info[0].len);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
frag = &(skb_shinfo(skb)->frags[i]);
@@ -116,6 +116,7 @@ err_unmap_page:
}
dma_unmap_single(&pdev->dev, dma_info[0].dma, dma_info[0].len,
DMA_TO_DEVICE);
+
return err;
}
@@ -138,6 +139,23 @@ static void hinic3_tx_unmap_skb(struct net_device *netdev,
dma_info[0].len, DMA_TO_DEVICE);
}
+static void free_all_tx_skbs(struct net_device *netdev, u32 sq_depth,
+ struct hinic3_tx_info *tx_info_arr)
+{
+ struct hinic3_tx_info *tx_info;
+ u32 idx;
+
+ for (idx = 0; idx < sq_depth; idx++) {
+ tx_info = &tx_info_arr[idx];
+ if (tx_info->skb) {
+ hinic3_tx_unmap_skb(netdev, tx_info->skb,
+ tx_info->dma_info);
+ dev_kfree_skb_any(tx_info->skb);
+ tx_info->skb = NULL;
+ }
+ }
+}
+
union hinic3_ip {
struct iphdr *v4;
struct ipv6hdr *v6;
@@ -197,7 +215,8 @@ static int hinic3_tx_csum(struct hinic3_txq *txq, struct hinic3_sq_task *task,
union hinic3_ip ip;
u8 l4_proto;
- task->pkt_info0 |= SQ_TASK_INFO0_SET(1, TUNNEL_FLAG);
+ task->pkt_info0 |= cpu_to_le32(SQ_TASK_INFO0_SET(1,
+ TUNNEL_FLAG));
ip.hdr = skb_network_header(skb);
if (ip.v4->version == 4) {
@@ -226,7 +245,7 @@ static int hinic3_tx_csum(struct hinic3_txq *txq, struct hinic3_sq_task *task,
}
}
- task->pkt_info0 |= SQ_TASK_INFO0_SET(1, INNER_L4_EN);
+ task->pkt_info0 |= cpu_to_le32(SQ_TASK_INFO0_SET(1, INNER_L4_EN));
return 1;
}
@@ -255,26 +274,28 @@ static void get_inner_l3_l4_type(struct sk_buff *skb, union hinic3_ip *ip,
}
}
-static void hinic3_set_tso_info(struct hinic3_sq_task *task, u32 *queue_info,
+static void hinic3_set_tso_info(struct hinic3_sq_task *task, __le32 *queue_info,
enum hinic3_l4_offload_type l4_offload,
u32 offset, u32 mss)
{
if (l4_offload == HINIC3_L4_OFFLOAD_TCP) {
- *queue_info |= SQ_CTRL_QUEUE_INFO_SET(1, TSO);
- task->pkt_info0 |= SQ_TASK_INFO0_SET(1, INNER_L4_EN);
+ *queue_info |= cpu_to_le32(SQ_CTRL_QUEUE_INFO_SET(1, TSO));
+ task->pkt_info0 |= cpu_to_le32(SQ_TASK_INFO0_SET(1,
+ INNER_L4_EN));
} else if (l4_offload == HINIC3_L4_OFFLOAD_UDP) {
- *queue_info |= SQ_CTRL_QUEUE_INFO_SET(1, UFO);
- task->pkt_info0 |= SQ_TASK_INFO0_SET(1, INNER_L4_EN);
+ *queue_info |= cpu_to_le32(SQ_CTRL_QUEUE_INFO_SET(1, UFO));
+ task->pkt_info0 |= cpu_to_le32(SQ_TASK_INFO0_SET(1,
+ INNER_L4_EN));
}
/* enable L3 calculation */
- task->pkt_info0 |= SQ_TASK_INFO0_SET(1, INNER_L3_EN);
+ task->pkt_info0 |= cpu_to_le32(SQ_TASK_INFO0_SET(1, INNER_L3_EN));
- *queue_info |= SQ_CTRL_QUEUE_INFO_SET(offset >> 1, PLDOFF);
+ *queue_info |= cpu_to_le32(SQ_CTRL_QUEUE_INFO_SET(offset >> 1, PLDOFF));
/* set MSS value */
- *queue_info &= ~SQ_CTRL_QUEUE_INFO_MSS_MASK;
- *queue_info |= SQ_CTRL_QUEUE_INFO_SET(mss, MSS);
+ *queue_info &= cpu_to_le32(~SQ_CTRL_QUEUE_INFO_MSS_MASK);
+ *queue_info |= cpu_to_le32(SQ_CTRL_QUEUE_INFO_SET(mss, MSS));
}
static __sum16 csum_magic(union hinic3_ip *ip, unsigned short proto)
@@ -284,7 +305,7 @@ static __sum16 csum_magic(union hinic3_ip *ip, unsigned short proto)
csum_ipv6_magic(&ip->v6->saddr, &ip->v6->daddr, 0, proto, 0);
}
-static int hinic3_tso(struct hinic3_sq_task *task, u32 *queue_info,
+static int hinic3_tso(struct hinic3_sq_task *task, __le32 *queue_info,
struct sk_buff *skb)
{
enum hinic3_l4_offload_type l4_offload;
@@ -305,15 +326,17 @@ static int hinic3_tso(struct hinic3_sq_task *task, u32 *queue_info,
if (skb->encapsulation) {
u32 gso_type = skb_shinfo(skb)->gso_type;
/* L3 checksum is always enabled */
- task->pkt_info0 |= SQ_TASK_INFO0_SET(1, OUT_L3_EN);
- task->pkt_info0 |= SQ_TASK_INFO0_SET(1, TUNNEL_FLAG);
+ task->pkt_info0 |= cpu_to_le32(SQ_TASK_INFO0_SET(1, OUT_L3_EN));
+ task->pkt_info0 |= cpu_to_le32(SQ_TASK_INFO0_SET(1,
+ TUNNEL_FLAG));
l4.hdr = skb_transport_header(skb);
ip.hdr = skb_network_header(skb);
if (gso_type & SKB_GSO_UDP_TUNNEL_CSUM) {
l4.udp->check = ~csum_magic(&ip, IPPROTO_UDP);
- task->pkt_info0 |= SQ_TASK_INFO0_SET(1, OUT_L4_EN);
+ task->pkt_info0 |=
+ cpu_to_le32(SQ_TASK_INFO0_SET(1, OUT_L4_EN));
}
ip.hdr = skb_inner_network_header(skb);
@@ -343,13 +366,14 @@ static void hinic3_set_vlan_tx_offload(struct hinic3_sq_task *task,
* 2=select TPID2 in IPSU, 3=select TPID3 in IPSU,
* 4=select TPID4 in IPSU
*/
- task->vlan_offload = SQ_TASK_INFO3_SET(vlan_tag, VLAN_TAG) |
- SQ_TASK_INFO3_SET(vlan_tpid, VLAN_TPID) |
- SQ_TASK_INFO3_SET(1, VLAN_TAG_VALID);
+ task->vlan_offload =
+ cpu_to_le32(SQ_TASK_INFO3_SET(vlan_tag, VLAN_TAG) |
+ SQ_TASK_INFO3_SET(vlan_tpid, VLAN_TPID) |
+ SQ_TASK_INFO3_SET(1, VLAN_TAG_VALID));
}
static u32 hinic3_tx_offload(struct sk_buff *skb, struct hinic3_sq_task *task,
- u32 *queue_info, struct hinic3_txq *txq)
+ __le32 *queue_info, struct hinic3_txq *txq)
{
u32 offload = 0;
int tso_cs_en;
@@ -440,39 +464,41 @@ static u16 hinic3_set_wqe_combo(struct hinic3_txq *txq,
}
static void hinic3_prepare_sq_ctrl(struct hinic3_sq_wqe_combo *wqe_combo,
- u32 queue_info, int nr_descs, u16 owner)
+ __le32 queue_info, int nr_descs, u16 owner)
{
struct hinic3_sq_wqe_desc *wqe_desc = wqe_combo->ctrl_bd0;
if (wqe_combo->wqe_type == SQ_WQE_COMPACT_TYPE) {
wqe_desc->ctrl_len |=
- SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) |
- SQ_CTRL_SET(wqe_combo->wqe_type, EXTENDED) |
- SQ_CTRL_SET(owner, OWNER);
+ cpu_to_le32(SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) |
+ SQ_CTRL_SET(wqe_combo->wqe_type, EXTENDED) |
+ SQ_CTRL_SET(owner, OWNER));
/* compact wqe queue_info will transfer to chip */
wqe_desc->queue_info = 0;
return;
}
- wqe_desc->ctrl_len |= SQ_CTRL_SET(nr_descs, BUFDESC_NUM) |
- SQ_CTRL_SET(wqe_combo->task_type, TASKSECT_LEN) |
- SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) |
- SQ_CTRL_SET(wqe_combo->wqe_type, EXTENDED) |
- SQ_CTRL_SET(owner, OWNER);
+ wqe_desc->ctrl_len |=
+ cpu_to_le32(SQ_CTRL_SET(nr_descs, BUFDESC_NUM) |
+ SQ_CTRL_SET(wqe_combo->task_type, TASKSECT_LEN) |
+ SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) |
+ SQ_CTRL_SET(wqe_combo->wqe_type, EXTENDED) |
+ SQ_CTRL_SET(owner, OWNER));
wqe_desc->queue_info = queue_info;
- wqe_desc->queue_info |= SQ_CTRL_QUEUE_INFO_SET(1, UC);
+ wqe_desc->queue_info |= cpu_to_le32(SQ_CTRL_QUEUE_INFO_SET(1, UC));
if (!SQ_CTRL_QUEUE_INFO_GET(wqe_desc->queue_info, MSS)) {
wqe_desc->queue_info |=
- SQ_CTRL_QUEUE_INFO_SET(HINIC3_TX_MSS_DEFAULT, MSS);
+ cpu_to_le32(SQ_CTRL_QUEUE_INFO_SET(HINIC3_TX_MSS_DEFAULT, MSS));
} else if (SQ_CTRL_QUEUE_INFO_GET(wqe_desc->queue_info, MSS) <
HINIC3_TX_MSS_MIN) {
/* mss should not be less than 80 */
- wqe_desc->queue_info &= ~SQ_CTRL_QUEUE_INFO_MSS_MASK;
+ wqe_desc->queue_info &=
+ cpu_to_le32(~SQ_CTRL_QUEUE_INFO_MSS_MASK);
wqe_desc->queue_info |=
- SQ_CTRL_QUEUE_INFO_SET(HINIC3_TX_MSS_MIN, MSS);
+ cpu_to_le32(SQ_CTRL_QUEUE_INFO_SET(HINIC3_TX_MSS_MIN, MSS));
}
}
@@ -482,12 +508,13 @@ static netdev_tx_t hinic3_send_one_skb(struct sk_buff *skb,
{
struct hinic3_sq_wqe_combo wqe_combo = {};
struct hinic3_tx_info *tx_info;
- u32 offload, queue_info = 0;
struct hinic3_sq_task task;
u16 wqebb_cnt, num_sge;
+ __le32 queue_info = 0;
u16 saved_wq_prod_idx;
u16 owner, pi = 0;
u8 saved_sq_owner;
+ u32 offload;
int err;
if (unlikely(skb->len < MIN_SKB_LEN)) {
@@ -575,6 +602,7 @@ netdev_tx_t hinic3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
err_drop_pkt:
dev_kfree_skb_any(skb);
+
return NETDEV_TX_OK;
}
@@ -624,6 +652,90 @@ void hinic3_flush_txqs(struct net_device *netdev)
#define HINIC3_BDS_PER_SQ_WQEBB \
(HINIC3_SQ_WQEBB_SIZE / sizeof(struct hinic3_sq_bufdesc))
+int hinic3_alloc_txqs_res(struct net_device *netdev, u16 num_sq,
+ u32 sq_depth, struct hinic3_dyna_txq_res *txqs_res)
+{
+ struct hinic3_dyna_txq_res *tqres;
+ int idx;
+
+ for (idx = 0; idx < num_sq; idx++) {
+ tqres = &txqs_res[idx];
+
+ tqres->tx_info = kcalloc(sq_depth, sizeof(*tqres->tx_info),
+ GFP_KERNEL);
+ if (!tqres->tx_info)
+ goto err_free_tqres;
+
+ tqres->bds = kcalloc(sq_depth * HINIC3_BDS_PER_SQ_WQEBB +
+ HINIC3_MAX_SQ_SGE, sizeof(*tqres->bds),
+ GFP_KERNEL);
+ if (!tqres->bds) {
+ kfree(tqres->tx_info);
+ goto err_free_tqres;
+ }
+ }
+
+ return 0;
+
+err_free_tqres:
+ while (idx > 0) {
+ idx--;
+ tqres = &txqs_res[idx];
+
+ kfree(tqres->bds);
+ kfree(tqres->tx_info);
+ }
+
+ return -ENOMEM;
+}
+
+void hinic3_free_txqs_res(struct net_device *netdev, u16 num_sq,
+ u32 sq_depth, struct hinic3_dyna_txq_res *txqs_res)
+{
+ struct hinic3_dyna_txq_res *tqres;
+ int idx;
+
+ for (idx = 0; idx < num_sq; idx++) {
+ tqres = &txqs_res[idx];
+
+ free_all_tx_skbs(netdev, sq_depth, tqres->tx_info);
+ kfree(tqres->bds);
+ kfree(tqres->tx_info);
+ }
+}
+
+int hinic3_configure_txqs(struct net_device *netdev, u16 num_sq,
+ u32 sq_depth, struct hinic3_dyna_txq_res *txqs_res)
+{
+ struct hinic3_nic_dev *nic_dev = netdev_priv(netdev);
+ struct hinic3_dyna_txq_res *tqres;
+ struct hinic3_txq *txq;
+ u16 q_id;
+ u32 idx;
+
+ for (q_id = 0; q_id < num_sq; q_id++) {
+ txq = &nic_dev->txqs[q_id];
+ tqres = &txqs_res[q_id];
+
+ txq->q_depth = sq_depth;
+ txq->q_mask = sq_depth - 1;
+
+ txq->tx_stop_thrs = min(HINIC3_DEFAULT_STOP_THRS,
+ sq_depth / 20);
+ txq->tx_start_thrs = min(HINIC3_DEFAULT_START_THRS,
+ sq_depth / 10);
+
+ txq->tx_info = tqres->tx_info;
+ for (idx = 0; idx < sq_depth; idx++)
+ txq->tx_info[idx].dma_info =
+ &tqres->bds[idx * HINIC3_BDS_PER_SQ_WQEBB];
+
+ txq->sq = &nic_dev->nic_io->sq[q_id];
+ }
+
+ return 0;
+}
+
bool hinic3_tx_poll(struct hinic3_txq *txq, int budget)
{
struct net_device *netdev = txq->netdev;
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_tx.h b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.h
index 9e505cc19dd5..7e1b872ba752 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_tx.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.h
@@ -58,7 +58,7 @@ enum hinic3_tx_offload_type {
#define SQ_CTRL_QUEUE_INFO_SET(val, member) \
FIELD_PREP(SQ_CTRL_QUEUE_INFO_##member##_MASK, val)
#define SQ_CTRL_QUEUE_INFO_GET(val, member) \
- FIELD_GET(SQ_CTRL_QUEUE_INFO_##member##_MASK, val)
+ FIELD_GET(SQ_CTRL_QUEUE_INFO_##member##_MASK, le32_to_cpu(val))
#define SQ_CTRL_MAX_PLDOFF 221
@@ -77,17 +77,17 @@ enum hinic3_tx_offload_type {
FIELD_PREP(SQ_TASK_INFO3_##member##_MASK, val)
struct hinic3_sq_wqe_desc {
- u32 ctrl_len;
- u32 queue_info;
- u32 hi_addr;
- u32 lo_addr;
+ __le32 ctrl_len;
+ __le32 queue_info;
+ __le32 hi_addr;
+ __le32 lo_addr;
};
struct hinic3_sq_task {
- u32 pkt_info0;
- u32 ip_identify;
- u32 rsvd;
- u32 vlan_offload;
+ __le32 pkt_info0;
+ __le32 ip_identify;
+ __le32 rsvd;
+ __le32 vlan_offload;
};
struct hinic3_sq_wqe_combo {
@@ -125,9 +125,21 @@ struct hinic3_txq {
struct hinic3_io_queue *sq;
} ____cacheline_aligned;
+struct hinic3_dyna_txq_res {
+ struct hinic3_tx_info *tx_info;
+ struct hinic3_dma_info *bds;
+};
+
int hinic3_alloc_txqs(struct net_device *netdev);
void hinic3_free_txqs(struct net_device *netdev);
+int hinic3_alloc_txqs_res(struct net_device *netdev, u16 num_sq,
+ u32 sq_depth, struct hinic3_dyna_txq_res *txqs_res);
+void hinic3_free_txqs_res(struct net_device *netdev, u16 num_sq,
+ u32 sq_depth, struct hinic3_dyna_txq_res *txqs_res);
+int hinic3_configure_txqs(struct net_device *netdev, u16 num_sq,
+ u32 sq_depth, struct hinic3_dyna_txq_res *txqs_res);
+
netdev_tx_t hinic3_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
bool hinic3_tx_poll(struct hinic3_txq *txq, int budget);
void hinic3_flush_txqs(struct net_device *netdev);
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_wq.c b/drivers/net/ethernet/huawei/hinic3/hinic3_wq.c
index 2ac7efcd1365..bc3ffdc25cf6 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_wq.c
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_wq.c
@@ -6,6 +6,110 @@
#include "hinic3_hwdev.h"
#include "hinic3_wq.h"
+#define WQ_MIN_DEPTH 64
+#define WQ_MAX_DEPTH 65536
+#define WQ_PAGE_ADDR_SIZE sizeof(u64)
+#define WQ_MAX_NUM_PAGES (HINIC3_MIN_PAGE_SIZE / WQ_PAGE_ADDR_SIZE)
+
+static int wq_init_wq_block(struct hinic3_hwdev *hwdev, struct hinic3_wq *wq)
+{
+ struct hinic3_queue_pages *qpages = &wq->qpages;
+ int i;
+
+ if (hinic3_wq_is_0_level_cla(wq)) {
+ wq->wq_block_paddr = qpages->pages[0].align_paddr;
+ wq->wq_block_vaddr = qpages->pages[0].align_vaddr;
+
+ return 0;
+ }
+
+ if (wq->qpages.num_pages > WQ_MAX_NUM_PAGES) {
+ dev_err(hwdev->dev, "wq num_pages exceed limit: %lu\n",
+ WQ_MAX_NUM_PAGES);
+ return -EFAULT;
+ }
+
+ wq->wq_block_vaddr = dma_alloc_coherent(hwdev->dev,
+ HINIC3_MIN_PAGE_SIZE,
+ &wq->wq_block_paddr,
+ GFP_KERNEL);
+ if (!wq->wq_block_vaddr)
+ return -ENOMEM;
+
+ for (i = 0; i < qpages->num_pages; i++)
+ wq->wq_block_vaddr[i] = cpu_to_be64(qpages->pages[i].align_paddr);
+
+ return 0;
+}
+
+static int wq_alloc_pages(struct hinic3_hwdev *hwdev, struct hinic3_wq *wq)
+{
+ int err;
+
+ err = hinic3_queue_pages_alloc(hwdev, &wq->qpages, 0);
+ if (err)
+ return err;
+
+ err = wq_init_wq_block(hwdev, wq);
+ if (err) {
+ hinic3_queue_pages_free(hwdev, &wq->qpages);
+ return err;
+ }
+
+ return 0;
+}
+
+static void wq_free_pages(struct hinic3_hwdev *hwdev, struct hinic3_wq *wq)
+{
+ if (!hinic3_wq_is_0_level_cla(wq))
+ dma_free_coherent(hwdev->dev,
+ HINIC3_MIN_PAGE_SIZE,
+ wq->wq_block_vaddr,
+ wq->wq_block_paddr);
+
+ hinic3_queue_pages_free(hwdev, &wq->qpages);
+}
+
+int hinic3_wq_create(struct hinic3_hwdev *hwdev, struct hinic3_wq *wq,
+ u32 q_depth, u16 wqebb_size)
+{
+ u32 wq_page_size;
+
+ if (q_depth < WQ_MIN_DEPTH || q_depth > WQ_MAX_DEPTH ||
+ !is_power_of_2(q_depth) || !is_power_of_2(wqebb_size)) {
+ dev_err(hwdev->dev, "Invalid WQ: q_depth %u, wqebb_size %u\n",
+ q_depth, wqebb_size);
+ return -EINVAL;
+ }
+
+ wq_page_size = ALIGN(hwdev->wq_page_size, HINIC3_MIN_PAGE_SIZE);
+
+ memset(wq, 0, sizeof(*wq));
+ wq->q_depth = q_depth;
+ wq->idx_mask = q_depth - 1;
+
+ hinic3_queue_pages_init(&wq->qpages, q_depth, wq_page_size, wqebb_size);
+
+ return wq_alloc_pages(hwdev, wq);
+}
+
+void hinic3_wq_destroy(struct hinic3_hwdev *hwdev, struct hinic3_wq *wq)
+{
+ wq_free_pages(hwdev, wq);
+}
+
+void hinic3_wq_reset(struct hinic3_wq *wq)
+{
+ struct hinic3_queue_pages *qpages = &wq->qpages;
+ u16 pg_idx;
+
+ wq->cons_idx = 0;
+ wq->prod_idx = 0;
+
+ for (pg_idx = 0; pg_idx < qpages->num_pages; pg_idx++)
+ memset(qpages->pages[pg_idx].align_vaddr, 0, qpages->page_size);
+}
+
void hinic3_wq_get_multi_wqebbs(struct hinic3_wq *wq,
u16 num_wqebbs, u16 *prod_idx,
struct hinic3_sq_bufdesc **first_part_wqebbs,
@@ -27,3 +131,8 @@ void hinic3_wq_get_multi_wqebbs(struct hinic3_wq *wq,
*second_part_wqebbs = get_q_element(&wq->qpages, idx, NULL);
}
}
+
+bool hinic3_wq_is_0_level_cla(const struct hinic3_wq *wq)
+{
+ return wq->qpages.num_pages == 1;
+}
diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_wq.h b/drivers/net/ethernet/huawei/hinic3/hinic3_wq.h
index ab37893efd7e..9b3f012bec80 100644
--- a/drivers/net/ethernet/huawei/hinic3/hinic3_wq.h
+++ b/drivers/net/ethernet/huawei/hinic3/hinic3_wq.h
@@ -10,10 +10,10 @@
struct hinic3_sq_bufdesc {
/* 31-bits Length, L2NIC only uses length[17:0] */
- u32 len;
- u32 rsvd;
- u32 hi_addr;
- u32 lo_addr;
+ __le32 len;
+ __le32 rsvd;
+ __le32 hi_addr;
+ __le32 lo_addr;
};
/* Work queue is used to submit elements (tx, rx, cmd) to hw.
@@ -59,6 +59,7 @@ static inline void *hinic3_wq_get_one_wqebb(struct hinic3_wq *wq, u16 *pi)
{
*pi = wq->prod_idx & wq->idx_mask;
wq->prod_idx++;
+
return get_q_element(&wq->qpages, *pi, NULL);
}
@@ -67,10 +68,20 @@ static inline void hinic3_wq_put_wqebbs(struct hinic3_wq *wq, u16 num_wqebbs)
wq->cons_idx += num_wqebbs;
}
+static inline u64 hinic3_wq_get_first_wqe_page_addr(const struct hinic3_wq *wq)
+{
+ return wq->qpages.pages[0].align_paddr;
+}
+
+int hinic3_wq_create(struct hinic3_hwdev *hwdev, struct hinic3_wq *wq,
+ u32 q_depth, u16 wqebb_size);
+void hinic3_wq_destroy(struct hinic3_hwdev *hwdev, struct hinic3_wq *wq);
+void hinic3_wq_reset(struct hinic3_wq *wq);
void hinic3_wq_get_multi_wqebbs(struct hinic3_wq *wq,
u16 num_wqebbs, u16 *prod_idx,
struct hinic3_sq_bufdesc **first_part_wqebbs,
struct hinic3_sq_bufdesc **second_part_wqebbs,
u16 *first_part_wqebbs_num);
+bool hinic3_wq_is_0_level_cla(const struct hinic3_wq *wq);
#endif
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index eec971567aac..3808148c1fc7 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -756,6 +756,17 @@ static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
adapter->rx_pool[i].active = 0;
}
+static void ibmvnic_set_safe_max_ind_descs(struct ibmvnic_adapter *adapter)
+{
+ if (adapter->cur_max_ind_descs > IBMVNIC_SAFE_IND_DESC) {
+ netdev_info(adapter->netdev,
+ "set max ind descs from %u to safe limit %u\n",
+ adapter->cur_max_ind_descs,
+ IBMVNIC_SAFE_IND_DESC);
+ adapter->cur_max_ind_descs = IBMVNIC_SAFE_IND_DESC;
+ }
+}
+
static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
struct ibmvnic_rx_pool *pool)
{
@@ -843,7 +854,7 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
sub_crq->rx_add.len = cpu_to_be32(pool->buff_size << shift);
/* if send_subcrq_indirect queue is full, flush to VIOS */
- if (ind_bufp->index == IBMVNIC_MAX_IND_DESCS ||
+ if (ind_bufp->index == adapter->cur_max_ind_descs ||
i == count - 1) {
lpar_rc =
send_subcrq_indirect(adapter, handle,
@@ -862,6 +873,14 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
failure:
if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED)
dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n");
+
+ /* Detect platform limit H_PARAMETER */
+ if (lpar_rc == H_PARAMETER)
+ ibmvnic_set_safe_max_ind_descs(adapter);
+
+ /* For all error case, temporarily drop only this batch
+ * Rely on TCP/IP retransmissions to retry and recover
+ */
for (i = ind_bufp->index - 1; i >= 0; --i) {
struct ibmvnic_rx_buff *rx_buff;
@@ -2381,16 +2400,28 @@ static int ibmvnic_tx_scrq_flush(struct ibmvnic_adapter *adapter,
rc = send_subcrq_direct(adapter, handle,
(u64 *)ind_bufp->indir_arr);
- if (rc)
+ if (rc) {
+ dev_err_ratelimited(&adapter->vdev->dev,
+ "tx_flush failed, rc=%u (%llu entries dma=%pad handle=%llx)\n",
+ rc, entries, &dma_addr, handle);
+ /* Detect platform limit H_PARAMETER */
+ if (rc == H_PARAMETER)
+ ibmvnic_set_safe_max_ind_descs(adapter);
+
+ /* For all error case, temporarily drop only this batch
+ * Rely on TCP/IP retransmissions to retry and recover
+ */
ibmvnic_tx_scrq_clean_buffer(adapter, tx_scrq);
- else
+ } else {
ind_bufp->index = 0;
+ }
return rc;
}
static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+ u32 cur_max_ind_descs = adapter->cur_max_ind_descs;
int queue_num = skb_get_queue_mapping(skb);
u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
struct device *dev = &adapter->vdev->dev;
@@ -2590,7 +2621,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_crq.v1.n_crq_elem = num_entries;
tx_buff->num_entries = num_entries;
/* flush buffer if current entry can not fit */
- if (num_entries + ind_bufp->index > IBMVNIC_MAX_IND_DESCS) {
+ if (num_entries + ind_bufp->index > cur_max_ind_descs) {
lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq, true);
if (lpar_rc != H_SUCCESS)
goto tx_flush_err;
@@ -2603,7 +2634,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
ind_bufp->index += num_entries;
if (__netdev_tx_sent_queue(txq, skb->len,
netdev_xmit_more() &&
- ind_bufp->index < IBMVNIC_MAX_IND_DESCS)) {
+ ind_bufp->index < cur_max_ind_descs)) {
lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq, true);
if (lpar_rc != H_SUCCESS)
goto tx_err;
@@ -4006,7 +4037,7 @@ static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
}
dma_free_coherent(dev,
- IBMVNIC_IND_ARR_SZ,
+ IBMVNIC_IND_MAX_ARR_SZ,
scrq->ind_buf.indir_arr,
scrq->ind_buf.indir_dma);
@@ -4063,7 +4094,7 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
scrq->ind_buf.indir_arr =
dma_alloc_coherent(dev,
- IBMVNIC_IND_ARR_SZ,
+ IBMVNIC_IND_MAX_ARR_SZ,
&scrq->ind_buf.indir_dma,
GFP_KERNEL);
@@ -6369,6 +6400,19 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
rc = reset_sub_crq_queues(adapter);
}
} else {
+ if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
+ /* After an LPM, reset the max number of indirect
+ * subcrq descriptors per H_SEND_SUB_CRQ_INDIRECT
+ * hcall to the default max (e.g POWER8 -> POWER10)
+ *
+ * If the new destination platform does not support
+ * the higher limit max (e.g. POWER10-> POWER8 LPM)
+ * H_PARAMETER will trigger automatic fallback to the
+ * safe minimum limit.
+ */
+ adapter->cur_max_ind_descs = IBMVNIC_MAX_IND_DESCS;
+ }
+
rc = init_sub_crqs(adapter);
}
@@ -6520,6 +6564,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
adapter->wait_for_reset = false;
adapter->last_reset_time = jiffies;
+ adapter->cur_max_ind_descs = IBMVNIC_MAX_IND_DESCS;
rc = register_netdev(netdev);
if (rc) {
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 246ddce753f9..480dc587078f 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -29,8 +29,9 @@
#define IBMVNIC_BUFFS_PER_POOL 100
#define IBMVNIC_MAX_QUEUES 16
#define IBMVNIC_MAX_QUEUE_SZ 4096
-#define IBMVNIC_MAX_IND_DESCS 16
-#define IBMVNIC_IND_ARR_SZ (IBMVNIC_MAX_IND_DESCS * 32)
+#define IBMVNIC_MAX_IND_DESCS 128
+#define IBMVNIC_SAFE_IND_DESC 16
+#define IBMVNIC_IND_MAX_ARR_SZ (IBMVNIC_MAX_IND_DESCS * 32)
#define IBMVNIC_TSO_BUF_SZ 65536
#define IBMVNIC_TSO_BUFS 64
@@ -930,6 +931,7 @@ struct ibmvnic_adapter {
struct ibmvnic_control_ip_offload_buffer ip_offload_ctrl;
dma_addr_t ip_offload_ctrl_tok;
u32 msg_enable;
+ u32 cur_max_ind_descs;
/* Vital Product Data (VPD) */
struct ibmvnic_vpd *vpd;
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index b05cc0d7a15d..a563a94e2780 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -146,6 +146,7 @@ config IXGBE
tristate "Intel(R) 10GbE PCI Express adapters support"
depends on PCI
depends on PTP_1588_CLOCK_OPTIONAL
+ select LIBIE_FWLOG
select MDIO
select NET_DEVLINK
select PLDMFW
@@ -297,6 +298,7 @@ config ICE
select DIMLIB
select LIBIE
select LIBIE_ADMINQ
+ select LIBIE_FWLOG
select NET_DEVLINK
select PACKING
select PLDMFW
diff --git a/drivers/net/ethernet/intel/Makefile b/drivers/net/ethernet/intel/Makefile
index 04c844ef4964..9a37dc76aef0 100644
--- a/drivers/net/ethernet/intel/Makefile
+++ b/drivers/net/ethernet/intel/Makefile
@@ -4,7 +4,7 @@
#
obj-$(CONFIG_LIBETH) += libeth/
-obj-$(CONFIG_LIBIE) += libie/
+obj-y += libie/
obj-$(CONFIG_E100) += e100.o
obj-$(CONFIG_E1000) += e1000/
diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h
index 75f3fd1d8d6e..ea6ccf4b728b 100644
--- a/drivers/net/ethernet/intel/e1000/e1000.h
+++ b/drivers/net/ethernet/intel/e1000/e1000.h
@@ -116,7 +116,7 @@ struct e1000_adapter;
#define E1000_MASTER_SLAVE e1000_ms_hw_default
#endif
-#define E1000_MNG_VLAN_NONE (-1)
+#define E1000_MNG_VLAN_NONE 0xFFFF
/* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index d06d29c6c037..726365c567ef 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -806,7 +806,7 @@ static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data)
}
/* If Checksum is not Correct return error else test passed */
- if ((checksum != (u16)EEPROM_SUM) && !(*data))
+ if (checksum != EEPROM_SUM && !(*data))
*data = 2;
return *data;
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
index f9328f2e669f..0e5de52b1067 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -3970,7 +3970,7 @@ s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw)
return E1000_SUCCESS;
#endif
- if (checksum == (u16)EEPROM_SUM)
+ if (checksum == EEPROM_SUM)
return E1000_SUCCESS;
else {
e_dbg("EEPROM Checksum Invalid\n");
@@ -3997,7 +3997,7 @@ s32 e1000_update_eeprom_checksum(struct e1000_hw *hw)
}
checksum += eeprom_data;
}
- checksum = (u16)EEPROM_SUM - checksum;
+ checksum = EEPROM_SUM - checksum;
if (e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) {
e_dbg("EEPROM Write Error\n");
return -E1000_ERR_EEPROM;
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index d8595e84326d..292389aceb2d 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -313,8 +313,7 @@ static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
} else {
adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
}
- if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
- (vid != old_vid) &&
+ if (old_vid != E1000_MNG_VLAN_NONE && vid != old_vid &&
!test_bit(old_vid, adapter->active_vlans))
e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
old_vid);
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 952898151565..018e61aea787 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -64,7 +64,7 @@ struct e1000_info;
#define AUTO_ALL_MODES 0
#define E1000_EEPROM_APME 0x0400
-#define E1000_MNG_VLAN_NONE (-1)
+#define E1000_MNG_VLAN_NONE 0xFFFF
#define DEFAULT_JUMBO 9234
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index c0bbb12eed2e..8e40bb50a01e 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -549,12 +549,12 @@ static int e1000_set_eeprom(struct net_device *netdev,
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
+ size_t total_len, max_len;
u16 *eeprom_buff;
- void *ptr;
- int max_len;
+ int ret_val = 0;
int first_word;
int last_word;
- int ret_val = 0;
+ void *ptr;
u16 i;
if (eeprom->len == 0)
@@ -569,6 +569,10 @@ static int e1000_set_eeprom(struct net_device *netdev,
max_len = hw->nvm.word_size * 2;
+ if (check_add_overflow(eeprom->offset, eeprom->len, &total_len) ||
+ total_len > max_len)
+ return -EFBIG;
+
first_word = eeprom->offset >> 1;
last_word = (eeprom->offset + eeprom->len - 1) >> 1;
eeprom_buff = kmalloc(max_len, GFP_KERNEL);
@@ -959,7 +963,7 @@ static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data)
}
/* If Checksum is not Correct return error else test passed */
- if ((checksum != (u16)NVM_SUM) && !(*data))
+ if (checksum != NVM_SUM && !(*data))
*data = 2;
return *data;
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index b27a61fab371..201322dac233 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -2761,7 +2761,7 @@ static void e1000e_vlan_filter_disable(struct e1000_adapter *adapter)
rctl &= ~(E1000_RCTL_VFE | E1000_RCTL_CFIEN);
ew32(RCTL, rctl);
- if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) {
+ if (adapter->mng_vlan_id != E1000_MNG_VLAN_NONE) {
e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
adapter->mng_vlan_id);
adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
@@ -2828,7 +2828,7 @@ static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
adapter->mng_vlan_id = vid;
}
- if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && (vid != old_vid))
+ if (old_vid != E1000_MNG_VLAN_NONE && vid != old_vid)
e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), old_vid);
}
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c
index 16369e6d245a..4bde1c9de1b9 100644
--- a/drivers/net/ethernet/intel/e1000e/nvm.c
+++ b/drivers/net/ethernet/intel/e1000e/nvm.c
@@ -564,7 +564,7 @@ s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw)
return 0;
}
- if (checksum != (u16)NVM_SUM) {
+ if (checksum != NVM_SUM) {
e_dbg("NVM Checksum Invalid\n");
return -E1000_ERR_NVM;
}
@@ -594,7 +594,7 @@ s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw)
}
checksum += nvm_data;
}
- checksum = (u16)NVM_SUM - checksum;
+ checksum = NVM_SUM - checksum;
ret_val = e1000_write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum);
if (ret_val)
e_dbg("NVM Write Error while updating checksum.\n");
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_common.c b/drivers/net/ethernet/intel/fm10k/fm10k_common.c
index f51a63fca513..1f919a50c765 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_common.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_common.c
@@ -447,17 +447,16 @@ void fm10k_update_hw_stats_q(struct fm10k_hw *hw, struct fm10k_hw_stats_q *q,
/**
* fm10k_unbind_hw_stats_q - Unbind the queue counters from their queues
* @q: pointer to the ring of hardware statistics queue
- * @idx: index pointing to the start of the ring iteration
* @count: number of queues to iterate over
*
* Function invalidates the index values for the queues so any updates that
* may have happened are ignored and the base for the queue stats is reset.
**/
-void fm10k_unbind_hw_stats_q(struct fm10k_hw_stats_q *q, u32 idx, u32 count)
+void fm10k_unbind_hw_stats_q(struct fm10k_hw_stats_q *q, u32 count)
{
u32 i;
- for (i = 0; i < count; i++, idx++, q++) {
+ for (i = 0; i < count; i++, q++) {
q->rx_stats_idx = 0;
q->tx_stats_idx = 0;
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_common.h b/drivers/net/ethernet/intel/fm10k/fm10k_common.h
index 4c48fb73b3e7..13fca6a91a01 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_common.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_common.h
@@ -43,6 +43,6 @@ u32 fm10k_read_hw_stats_32b(struct fm10k_hw *hw, u32 addr,
void fm10k_update_hw_stats_q(struct fm10k_hw *hw, struct fm10k_hw_stats_q *q,
u32 idx, u32 count);
#define fm10k_unbind_hw_stats_32b(s) ((s)->base_h = 0)
-void fm10k_unbind_hw_stats_q(struct fm10k_hw_stats_q *q, u32 idx, u32 count);
+void fm10k_unbind_hw_stats_q(struct fm10k_hw_stats_q *q, u32 count);
s32 fm10k_get_host_state_generic(struct fm10k_hw *hw, bool *host_ready);
#endif /* _FM10K_COMMON_H_ */
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
index 1954a04460d1..bf2029144c1d 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
@@ -560,7 +560,7 @@ static int fm10k_set_ringparam(struct net_device *netdev,
/* allocate temporary buffer to store rings in */
i = max_t(int, interface->num_tx_queues, interface->num_rx_queues);
- temp_ring = vmalloc(array_size(i, sizeof(struct fm10k_ring)));
+ temp_ring = vmalloc_array(i, sizeof(struct fm10k_ring));
if (!temp_ring) {
err = -ENOMEM;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 142f07ca8bc0..b8c15b837fda 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -37,7 +37,7 @@ static int __init fm10k_init_module(void)
pr_info("%s\n", fm10k_copyright);
/* create driver workqueue */
- fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
+ fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM | WQ_PERCPU, 0,
fm10k_driver_name);
if (!fm10k_workqueue)
return -ENOMEM;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
index b9dd7b719832..3394645a18fe 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
@@ -1389,7 +1389,7 @@ static void fm10k_rebind_hw_stats_pf(struct fm10k_hw *hw,
fm10k_unbind_hw_stats_32b(&stats->nodesc_drop);
/* Unbind Queue Statistics */
- fm10k_unbind_hw_stats_q(stats->q, 0, hw->mac.max_queues);
+ fm10k_unbind_hw_stats_q(stats->q, hw->mac.max_queues);
/* Reinitialize bases for all stats */
fm10k_update_hw_stats_pf(hw, stats);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
index 7fb1961f2921..6861a0bdc14e 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
@@ -465,7 +465,7 @@ static void fm10k_rebind_hw_stats_vf(struct fm10k_hw *hw,
struct fm10k_hw_stats *stats)
{
/* Unbind Queue Statistics */
- fm10k_unbind_hw_stats_q(stats->q, 0, hw->mac.max_queues);
+ fm10k_unbind_hw_stats_q(stats->q, hw->mac.max_queues);
/* Reinitialize bases for all stats */
fm10k_update_hw_stats_vf(hw, stats);
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 49aa4497efce..801a57a925da 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -1278,7 +1278,8 @@ struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
const u8 *macaddr);
int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr);
bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi);
-int i40e_count_filters(struct i40e_vsi *vsi);
+int i40e_count_all_filters(struct i40e_vsi *vsi);
+int i40e_count_active_filters(struct i40e_vsi *vsi);
struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr);
void i40e_vlan_stripping_enable(struct i40e_vsi *vsi);
static inline bool i40e_is_sw_dcb(struct i40e_pf *pf)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 76d872b91a38..cc02a85ad42b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -1561,6 +1561,7 @@ I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config);
struct i40e_aq_set_mac_config {
__le16 max_frame_size;
u8 params;
+#define I40E_AQ_SET_MAC_CONFIG_CRC_EN BIT(2)
u8 tx_timer_priority; /* bitmap */
__le16 tx_timer_value;
__le16 fc_refresh_threshold;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index 5f1a405cbbf8..518bc738ea3b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -359,8 +359,8 @@ static void i40e_client_add_instance(struct i40e_pf *pf)
if (i40e_client_get_params(vsi, &cdev->lan_info.params))
goto free_cdev;
- mac = list_first_entry(&cdev->lan_info.netdev->dev_addrs.list,
- struct netdev_hw_addr, list);
+ mac = list_first_entry_or_null(&cdev->lan_info.netdev->dev_addrs.list,
+ struct netdev_hw_addr, list);
if (mac)
ether_addr_copy(cdev->lan_info.lanmac, mac->addr);
else
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 270e7e8cf9cf..59f5c1e810eb 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -1190,6 +1190,40 @@ int i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
}
/**
+ * i40e_aq_set_mac_config - Configure MAC settings
+ * @hw: pointer to the hw struct
+ * @max_frame_size: Maximum Frame Size to be supported by the port
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set MAC configuration (0x0603). Note that max_frame_size must be greater
+ * than zero.
+ *
+ * Return: 0 on success, or a negative error code on failure.
+ */
+int i40e_aq_set_mac_config(struct i40e_hw *hw, u16 max_frame_size,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_set_mac_config *cmd;
+ struct libie_aq_desc desc;
+
+ cmd = libie_aq_raw(&desc);
+
+ if (max_frame_size == 0)
+ return -EINVAL;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_mac_config);
+
+ cmd->max_frame_size = cpu_to_le16(max_frame_size);
+ cmd->params = I40E_AQ_SET_MAC_CONFIG_CRC_EN;
+
+#define I40E_AQ_SET_MAC_CONFIG_FC_DEFAULT_THRESHOLD 0x7FFF
+ cmd->fc_refresh_threshold =
+ cpu_to_le16(I40E_AQ_SET_MAC_CONFIG_FC_DEFAULT_THRESHOLD);
+
+ return i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+}
+
+/**
* i40e_aq_clear_pxe_mode
* @hw: pointer to the hw struct
* @cmd_details: pointer to command details structure or NULL
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 6cd6f23d42a6..c17b5d290f0a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -40,48 +40,6 @@ static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid)
* setup, adding or removing filters, or other things. Many of
* these will be useful for some forms of unit testing.
**************************************************************/
-static char i40e_dbg_command_buf[256] = "";
-
-/**
- * i40e_dbg_command_read - read for command datum
- * @filp: the opened file
- * @buffer: where to write the data for the user to read
- * @count: the size of the user's buffer
- * @ppos: file position offset
- **/
-static ssize_t i40e_dbg_command_read(struct file *filp, char __user *buffer,
- size_t count, loff_t *ppos)
-{
- struct i40e_pf *pf = filp->private_data;
- struct i40e_vsi *main_vsi;
- int bytes_not_copied;
- int buf_size = 256;
- char *buf;
- int len;
-
- /* don't allow partial reads */
- if (*ppos != 0)
- return 0;
- if (count < buf_size)
- return -ENOSPC;
-
- buf = kzalloc(buf_size, GFP_KERNEL);
- if (!buf)
- return -ENOSPC;
-
- main_vsi = i40e_pf_get_main_vsi(pf);
- len = snprintf(buf, buf_size, "%s: %s\n", main_vsi->netdev->name,
- i40e_dbg_command_buf);
-
- bytes_not_copied = copy_to_user(buffer, buf, len);
- kfree(buf);
-
- if (bytes_not_copied)
- return -EFAULT;
-
- *ppos = len;
- return len;
-}
static char *i40e_filter_state_string[] = {
"INVALID",
@@ -1621,7 +1579,6 @@ command_write_done:
static const struct file_operations i40e_dbg_command_fops = {
.owner = THIS_MODULE,
.open = simple_open,
- .read = i40e_dbg_command_read,
.write = i40e_dbg_command_write,
};
@@ -1630,48 +1587,6 @@ static const struct file_operations i40e_dbg_command_fops = {
* The netdev_ops entry in debugfs is for giving the driver commands
* to be executed from the netdev operations.
**************************************************************/
-static char i40e_dbg_netdev_ops_buf[256] = "";
-
-/**
- * i40e_dbg_netdev_ops_read - read for netdev_ops datum
- * @filp: the opened file
- * @buffer: where to write the data for the user to read
- * @count: the size of the user's buffer
- * @ppos: file position offset
- **/
-static ssize_t i40e_dbg_netdev_ops_read(struct file *filp, char __user *buffer,
- size_t count, loff_t *ppos)
-{
- struct i40e_pf *pf = filp->private_data;
- struct i40e_vsi *main_vsi;
- int bytes_not_copied;
- int buf_size = 256;
- char *buf;
- int len;
-
- /* don't allow partal reads */
- if (*ppos != 0)
- return 0;
- if (count < buf_size)
- return -ENOSPC;
-
- buf = kzalloc(buf_size, GFP_KERNEL);
- if (!buf)
- return -ENOSPC;
-
- main_vsi = i40e_pf_get_main_vsi(pf);
- len = snprintf(buf, buf_size, "%s: %s\n", main_vsi->netdev->name,
- i40e_dbg_netdev_ops_buf);
-
- bytes_not_copied = copy_to_user(buffer, buf, len);
- kfree(buf);
-
- if (bytes_not_copied)
- return -EFAULT;
-
- *ppos = len;
- return len;
-}
/**
* i40e_dbg_netdev_ops_write - write into netdev_ops datum
@@ -1685,35 +1600,36 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
size_t count, loff_t *ppos)
{
struct i40e_pf *pf = filp->private_data;
+ char *cmd_buf, *buf_tmp;
int bytes_not_copied;
struct i40e_vsi *vsi;
- char *buf_tmp;
int vsi_seid;
int i, cnt;
/* don't allow partial writes */
if (*ppos != 0)
return 0;
- if (count >= sizeof(i40e_dbg_netdev_ops_buf))
- return -ENOSPC;
- memset(i40e_dbg_netdev_ops_buf, 0, sizeof(i40e_dbg_netdev_ops_buf));
- bytes_not_copied = copy_from_user(i40e_dbg_netdev_ops_buf,
- buffer, count);
- if (bytes_not_copied)
+ cmd_buf = kzalloc(count + 1, GFP_KERNEL);
+ if (!cmd_buf)
+ return count;
+ bytes_not_copied = copy_from_user(cmd_buf, buffer, count);
+ if (bytes_not_copied) {
+ kfree(cmd_buf);
return -EFAULT;
- i40e_dbg_netdev_ops_buf[count] = '\0';
+ }
+ cmd_buf[count] = '\0';
- buf_tmp = strchr(i40e_dbg_netdev_ops_buf, '\n');
+ buf_tmp = strchr(cmd_buf, '\n');
if (buf_tmp) {
*buf_tmp = '\0';
- count = buf_tmp - i40e_dbg_netdev_ops_buf + 1;
+ count = buf_tmp - cmd_buf + 1;
}
- if (strncmp(i40e_dbg_netdev_ops_buf, "change_mtu", 10) == 0) {
+ if (strncmp(cmd_buf, "change_mtu", 10) == 0) {
int mtu;
- cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i %i",
+ cnt = sscanf(&cmd_buf[11], "%i %i",
&vsi_seid, &mtu);
if (cnt != 2) {
dev_info(&pf->pdev->dev, "change_mtu <vsi_seid> <mtu>\n");
@@ -1735,8 +1651,8 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n");
}
- } else if (strncmp(i40e_dbg_netdev_ops_buf, "set_rx_mode", 11) == 0) {
- cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i", &vsi_seid);
+ } else if (strncmp(cmd_buf, "set_rx_mode", 11) == 0) {
+ cnt = sscanf(&cmd_buf[11], "%i", &vsi_seid);
if (cnt != 1) {
dev_info(&pf->pdev->dev, "set_rx_mode <vsi_seid>\n");
goto netdev_ops_write_done;
@@ -1756,8 +1672,8 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n");
}
- } else if (strncmp(i40e_dbg_netdev_ops_buf, "napi", 4) == 0) {
- cnt = sscanf(&i40e_dbg_netdev_ops_buf[4], "%i", &vsi_seid);
+ } else if (strncmp(cmd_buf, "napi", 4) == 0) {
+ cnt = sscanf(&cmd_buf[4], "%i", &vsi_seid);
if (cnt != 1) {
dev_info(&pf->pdev->dev, "napi <vsi_seid>\n");
goto netdev_ops_write_done;
@@ -1775,21 +1691,20 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
dev_info(&pf->pdev->dev, "napi called\n");
}
} else {
- dev_info(&pf->pdev->dev, "unknown command '%s'\n",
- i40e_dbg_netdev_ops_buf);
+ dev_info(&pf->pdev->dev, "unknown command '%s'\n", cmd_buf);
dev_info(&pf->pdev->dev, "available commands\n");
dev_info(&pf->pdev->dev, " change_mtu <vsi_seid> <mtu>\n");
dev_info(&pf->pdev->dev, " set_rx_mode <vsi_seid>\n");
dev_info(&pf->pdev->dev, " napi <vsi_seid>\n");
}
netdev_ops_write_done:
+ kfree(cmd_buf);
return count;
}
static const struct file_operations i40e_dbg_netdev_ops_fops = {
.owner = THIS_MODULE,
.open = simple_open,
- .read = i40e_dbg_netdev_ops_read,
.write = i40e_dbg_netdev_ops_write,
};
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index b83f823e4917..50be0a60ae13 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1243,12 +1243,30 @@ void i40e_update_stats(struct i40e_vsi *vsi)
}
/**
- * i40e_count_filters - counts VSI mac filters
+ * i40e_count_all_filters - counts VSI MAC filters
* @vsi: the VSI to be searched
*
- * Returns count of mac filters
- **/
-int i40e_count_filters(struct i40e_vsi *vsi)
+ * Return: count of MAC filters in any state.
+ */
+int i40e_count_all_filters(struct i40e_vsi *vsi)
+{
+ struct i40e_mac_filter *f;
+ struct hlist_node *h;
+ int bkt, cnt = 0;
+
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
+ cnt++;
+
+ return cnt;
+}
+
+/**
+ * i40e_count_active_filters - counts VSI MAC filters
+ * @vsi: the VSI to be searched
+ *
+ * Return: count of active MAC filters.
+ */
+int i40e_count_active_filters(struct i40e_vsi *vsi)
{
struct i40e_mac_filter *f;
struct hlist_node *h;
@@ -4156,7 +4174,7 @@ free_queue_irqs:
irq_num = pf->msix_entries[base + vector].vector;
irq_set_affinity_notifier(irq_num, NULL);
irq_update_affinity_hint(irq_num, NULL);
- free_irq(irq_num, &vsi->q_vectors[vector]);
+ free_irq(irq_num, vsi->q_vectors[vector]);
}
return err;
}
@@ -16045,13 +16063,17 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_dbg(&pf->pdev->dev, "get supported phy types ret = %pe last_status = %s\n",
ERR_PTR(err), libie_aq_str(pf->hw.aq.asq_last_status));
- /* make sure the MFS hasn't been set lower than the default */
#define MAX_FRAME_SIZE_DEFAULT 0x2600
- val = FIELD_GET(I40E_PRTGL_SAH_MFS_MASK,
- rd32(&pf->hw, I40E_PRTGL_SAH));
- if (val < MAX_FRAME_SIZE_DEFAULT)
- dev_warn(&pdev->dev, "MFS for port %x (%d) has been set below the default (%d)\n",
- pf->hw.port, val, MAX_FRAME_SIZE_DEFAULT);
+
+ err = i40e_aq_set_mac_config(hw, MAX_FRAME_SIZE_DEFAULT, NULL);
+ if (err)
+ dev_warn(&pdev->dev, "set mac config ret = %pe last_status = %s\n",
+ ERR_PTR(err), libie_aq_str(pf->hw.aq.asq_last_status));
+
+ /* Make sure the MFS is set to the expected value */
+ val = rd32(hw, I40E_PRTGL_SAH);
+ FIELD_MODIFY(I40E_PRTGL_SAH_MFS_MASK, &val, MAX_FRAME_SIZE_DEFAULT);
+ wr32(hw, I40E_PRTGL_SAH, val);
/* Add a filter to drop all Flow control frames from any VSI from being
* transmitted. By doing so we stop a malicious VF from sending out
@@ -16613,7 +16635,7 @@ static int __init i40e_init_module(void)
* since we need to be able to guarantee forward progress even under
* memory pressure.
*/
- i40e_wq = alloc_workqueue("%s", 0, 0, i40e_driver_name);
+ i40e_wq = alloc_workqueue("%s", WQ_PERCPU, 0, i40e_driver_name);
if (!i40e_wq) {
pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
return -ENOMEM;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index aef5de53ce3b..26bb7bffe361 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -98,6 +98,8 @@ int i40e_aq_set_mac_loopback(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
int i40e_aq_set_phy_int_mask(struct i40e_hw *hw, u16 mask,
struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_mac_config(struct i40e_hw *hw, u16 max_frame_size,
+ struct i40e_asq_cmd_details *cmd_details);
int i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
int i40e_aq_set_link_restart_an(struct i40e_hw *hw,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 048c33039130..cc0b9efc2637 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -948,9 +948,6 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
if (!eop_desc)
break;
- /* prevent any other reads prior to eop_desc */
- smp_rmb();
-
i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
/* we have caught up to head, no work left to do */
if (tx_head == tx_desc)
@@ -2151,10 +2148,10 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
memcpy(&skinfo->frags[skinfo->nr_frags], &sinfo->frags[0],
sizeof(skb_frag_t) * nr_frags);
- xdp_update_skb_shared_info(skb, skinfo->nr_frags + nr_frags,
- sinfo->xdp_frags_size,
- nr_frags * xdp->frame_sz,
- xdp_buff_is_frag_pfmemalloc(xdp));
+ xdp_update_skb_frags_info(skb, skinfo->nr_frags + nr_frags,
+ sinfo->xdp_frags_size,
+ nr_frags * xdp->frame_sz,
+ xdp_buff_get_skb_flags(xdp));
/* First buffer has already been processed, so bump ntc */
if (++rx_ring->next_to_clean == rx_ring->count)
@@ -2206,10 +2203,9 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
skb_metadata_set(skb, metasize);
if (unlikely(xdp_buff_has_frags(xdp))) {
- xdp_update_skb_shared_info(skb, nr_frags,
- sinfo->xdp_frags_size,
- nr_frags * xdp->frame_sz,
- xdp_buff_is_frag_pfmemalloc(xdp));
+ xdp_update_skb_frags_info(skb, nr_frags, sinfo->xdp_frags_size,
+ nr_frags * xdp->frame_sz,
+ xdp_buff_get_skb_flags(xdp));
i40e_process_rx_buffs(rx_ring, I40E_XDP_PASS, xdp);
} else {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 9b8efdeafbcf..081a4526a2f0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -448,7 +448,7 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
(qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
(pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
- (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
+ FIELD_PREP(I40E_QINT_RQCTL_ITR_INDX_MASK, itr_idx);
wr32(hw, reg_idx, reg);
}
@@ -653,6 +653,13 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
/* only set the required fields */
tx_ctx.base = info->dma_ring_addr / 128;
+
+ /* ring_len has to be multiple of 8 */
+ if (!IS_ALIGNED(info->ring_len, 8) ||
+ info->ring_len > I40E_MAX_NUM_DESCRIPTORS_XL710) {
+ ret = -EINVAL;
+ goto error_context;
+ }
tx_ctx.qlen = info->ring_len;
tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]);
tx_ctx.rdylist_act = 0;
@@ -716,6 +723,13 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
/* only set the required fields */
rx_ctx.base = info->dma_ring_addr / 128;
+
+ /* ring_len has to be multiple of 32 */
+ if (!IS_ALIGNED(info->ring_len, 32) ||
+ info->ring_len > I40E_MAX_NUM_DESCRIPTORS_XL710) {
+ ret = -EINVAL;
+ goto error_param;
+ }
rx_ctx.qlen = info->ring_len;
if (info->splithdr_enabled) {
@@ -1450,6 +1464,7 @@ static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr)
* functions that may still be running at this point.
*/
clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
+ clear_bit(I40E_VF_STATE_RESOURCES_LOADED, &vf->vf_states);
/* In the case of a VFLR, the HW has already reset the VF and we
* just need to clean up, so don't hit the VFRTRIG register.
@@ -2116,7 +2131,10 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
size_t len = 0;
int ret;
- if (!i40e_sync_vf_state(vf, I40E_VF_STATE_INIT)) {
+ i40e_sync_vf_state(vf, I40E_VF_STATE_INIT);
+
+ if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) ||
+ test_bit(I40E_VF_STATE_RESOURCES_LOADED, &vf->vf_states)) {
aq_ret = -EINVAL;
goto err;
}
@@ -2219,6 +2237,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
vf->default_lan_addr.addr);
}
set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
+ set_bit(I40E_VF_STATE_RESOURCES_LOADED, &vf->vf_states);
err:
/* send the response back to the VF */
@@ -2381,7 +2400,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
}
if (vf->adq_enabled) {
- if (idx >= ARRAY_SIZE(vf->ch)) {
+ if (idx >= vf->num_tc) {
aq_ret = -ENODEV;
goto error_param;
}
@@ -2402,7 +2421,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
* to its appropriate VSIs based on TC mapping
*/
if (vf->adq_enabled) {
- if (idx >= ARRAY_SIZE(vf->ch)) {
+ if (idx >= vf->num_tc) {
aq_ret = -ENODEV;
goto error_param;
}
@@ -2452,8 +2471,10 @@ static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id,
u16 vsi_queue_id, queue_id;
for_each_set_bit(vsi_queue_id, &queuemap, I40E_MAX_VSI_QP) {
- if (vf->adq_enabled) {
- vsi_id = vf->ch[vsi_queue_id / I40E_MAX_VF_VSI].vsi_id;
+ u16 idx = vsi_queue_id / I40E_MAX_VF_VSI;
+
+ if (vf->adq_enabled && idx < vf->num_tc) {
+ vsi_id = vf->ch[idx].vsi_id;
queue_id = (vsi_queue_id % I40E_DEFAULT_QUEUES_PER_VF);
} else {
queue_id = vsi_queue_id;
@@ -2841,24 +2862,6 @@ error_param:
(u8 *)&stats, sizeof(stats));
}
-/**
- * i40e_can_vf_change_mac
- * @vf: pointer to the VF info
- *
- * Return true if the VF is allowed to change its MAC filters, false otherwise
- */
-static bool i40e_can_vf_change_mac(struct i40e_vf *vf)
-{
- /* If the VF MAC address has been set administratively (via the
- * ndo_set_vf_mac command), then deny permission to the VF to
- * add/delete unicast MAC addresses, unless the VF is trusted
- */
- if (vf->pf_set_mac && !vf->trusted)
- return false;
-
- return true;
-}
-
#define I40E_MAX_MACVLAN_PER_HW 3072
#define I40E_MAX_MACVLAN_PER_PF(num_ports) (I40E_MAX_MACVLAN_PER_HW / \
(num_ports))
@@ -2897,8 +2900,10 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
struct i40e_hw *hw = &pf->hw;
- int mac2add_cnt = 0;
- int i;
+ int i, mac_add_max, mac_add_cnt = 0;
+ bool vf_trusted;
+
+ vf_trusted = test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
for (i = 0; i < al->num_elements; i++) {
struct i40e_mac_filter *f;
@@ -2918,9 +2923,8 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
* The VF may request to set the MAC address filter already
* assigned to it so do not return an error in that case.
*/
- if (!i40e_can_vf_change_mac(vf) &&
- !is_multicast_ether_addr(addr) &&
- !ether_addr_equal(addr, vf->default_lan_addr.addr)) {
+ if (!vf_trusted && !is_multicast_ether_addr(addr) &&
+ vf->pf_set_mac && !ether_addr_equal(addr, vf->default_lan_addr.addr)) {
dev_err(&pf->pdev->dev,
"VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
return -EPERM;
@@ -2929,29 +2933,33 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
/*count filters that really will be added*/
f = i40e_find_mac(vsi, addr);
if (!f)
- ++mac2add_cnt;
+ ++mac_add_cnt;
}
/* If this VF is not privileged, then we can't add more than a limited
- * number of addresses. Check to make sure that the additions do not
- * push us over the limit.
- */
- if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
- if ((i40e_count_filters(vsi) + mac2add_cnt) >
- I40E_VC_MAX_MAC_ADDR_PER_VF) {
- dev_err(&pf->pdev->dev,
- "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n");
- return -EPERM;
- }
- /* If this VF is trusted, it can use more resources than untrusted.
+ * number of addresses.
+ *
+ * If this VF is trusted, it can use more resources than untrusted.
* However to ensure that every trusted VF has appropriate number of
* resources, divide whole pool of resources per port and then across
* all VFs.
*/
- } else {
- if ((i40e_count_filters(vsi) + mac2add_cnt) >
- I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(pf->num_alloc_vfs,
- hw->num_ports)) {
+ if (!vf_trusted)
+ mac_add_max = I40E_VC_MAX_MAC_ADDR_PER_VF;
+ else
+ mac_add_max = I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(pf->num_alloc_vfs, hw->num_ports);
+
+ /* VF can replace all its filters in one step, in this case mac_add_max
+ * will be added as active and another mac_add_max will be in
+ * a to-be-removed state. Account for that.
+ */
+ if ((i40e_count_active_filters(vsi) + mac_add_cnt) > mac_add_max ||
+ (i40e_count_all_filters(vsi) + mac_add_cnt) > 2 * mac_add_max) {
+ if (!vf_trusted) {
+ dev_err(&pf->pdev->dev,
+ "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n");
+ return -EPERM;
+ } else {
dev_err(&pf->pdev->dev,
"Cannot add more MAC addresses, trusted VF exhausted it's resources\n");
return -EPERM;
@@ -3587,7 +3595,7 @@ static int i40e_validate_cloud_filter(struct i40e_vf *vf,
/* action_meta is TC number here to which the filter is applied */
if (!tc_filter->action_meta ||
- tc_filter->action_meta > vf->num_tc) {
+ tc_filter->action_meta >= vf->num_tc) {
dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n",
vf->vf_id, tc_filter->action_meta);
goto err;
@@ -3884,6 +3892,8 @@ err:
aq_ret);
}
+#define I40E_MAX_VF_CLOUD_FILTER 0xFF00
+
/**
* i40e_vc_add_cloud_filter
* @vf: pointer to the VF info
@@ -3923,6 +3933,14 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
goto err_out;
}
+ if (vf->num_cloud_filters >= I40E_MAX_VF_CLOUD_FILTER) {
+ dev_warn(&pf->pdev->dev,
+ "VF %d: Max number of filters reached, can't apply cloud filter\n",
+ vf->vf_id);
+ aq_ret = -ENOSPC;
+ goto err_out;
+ }
+
cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL);
if (!cfilter) {
aq_ret = -ENOMEM;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 5cf74f16f433..f558b45725c8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -41,7 +41,8 @@ enum i40e_vf_states {
I40E_VF_STATE_MC_PROMISC,
I40E_VF_STATE_UC_PROMISC,
I40E_VF_STATE_PRE_ENABLE,
- I40E_VF_STATE_RESETTING
+ I40E_VF_STATE_RESETTING,
+ I40E_VF_STATE_RESOURCES_LOADED,
};
/* VF capabilities */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 69054af4689a..c2fbe443ef85 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -5491,7 +5491,7 @@ static int iavf_resume(struct device *dev_d)
{
struct pci_dev *pdev = to_pci_dev(dev_d);
struct iavf_adapter *adapter;
- u32 err;
+ int err;
adapter = iavf_pdev_to_adapter(pdev);
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index d0f9c9492363..5b2c666496e7 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -42,14 +42,15 @@ ice-y := ice_main.o \
ice_ethtool.o \
ice_repr.o \
ice_tc_lib.o \
- ice_fwlog.o \
ice_debugfs.o \
ice_adapter.o
ice-$(CONFIG_PCI_IOV) += \
ice_sriov.o \
- ice_virtchnl.o \
- ice_virtchnl_allowlist.o \
- ice_virtchnl_fdir.o \
+ virt/allowlist.o \
+ virt/fdir.o \
+ virt/queues.o \
+ virt/virtchnl.o \
+ virt/rss.o \
ice_vf_mbx.o \
ice_vf_vsi_vlan_ops.o \
ice_vf_lib.o
diff --git a/drivers/net/ethernet/intel/ice/devlink/health.c b/drivers/net/ethernet/intel/ice/devlink/health.c
index ab519c0f28bf..8e9a8a8178d4 100644
--- a/drivers/net/ethernet/intel/ice/devlink/health.c
+++ b/drivers/net/ethernet/intel/ice/devlink/health.c
@@ -450,9 +450,8 @@ ice_init_devlink_rep(struct ice_pf *pf,
{
struct devlink *devlink = priv_to_devlink(pf);
struct devlink_health_reporter *rep;
- const u64 graceful_period = 0;
- rep = devl_health_reporter_create(devlink, ops, graceful_period, pf);
+ rep = devl_health_reporter_create(devlink, ops, pf);
if (IS_ERR(rep)) {
struct device *dev = ice_pf_to_dev(pf);
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 2098f00b3cd3..22b8323ff0d0 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -84,7 +84,11 @@
#define ICE_BAR0 0
#define ICE_REQ_DESC_MULTIPLE 32
#define ICE_MIN_NUM_DESC 64
-#define ICE_MAX_NUM_DESC 8160
+#define ICE_MAX_NUM_DESC_E810 8160
+#define ICE_MAX_NUM_DESC_E830 8096
+#define ICE_MAX_NUM_DESC_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? \
+ ICE_MAX_NUM_DESC_E830 : \
+ ICE_MAX_NUM_DESC_E810)
#define ICE_DFLT_MIN_RX_DESC 512
#define ICE_DFLT_NUM_TX_DESC 256
#define ICE_DFLT_NUM_RX_DESC 2048
@@ -200,9 +204,11 @@ enum ice_feature {
ICE_F_SMA_CTRL,
ICE_F_CGU,
ICE_F_GNSS,
+ ICE_F_TXTIME,
ICE_F_GCS,
ICE_F_ROCE_LAG,
ICE_F_SRIOV_LAG,
+ ICE_F_SRIOV_AA_LAG,
ICE_F_MBX_LIMIT,
ICE_F_MAX
};
@@ -510,6 +516,7 @@ enum ice_pf_flags {
ICE_FLAG_LINK_LENIENT_MODE_ENA,
ICE_FLAG_PLUG_AUX_DEV,
ICE_FLAG_UNPLUG_AUX_DEV,
+ ICE_FLAG_AUX_DEV_CREATED,
ICE_FLAG_MTU_CHANGED,
ICE_FLAG_GNSS, /* GNSS successfully initialized */
ICE_FLAG_DPLL, /* SyncE/PTP dplls initialized */
@@ -566,9 +573,6 @@ struct ice_pf {
struct ice_sw *first_sw; /* first switch created by firmware */
u16 eswitch_mode; /* current mode of eswitch */
struct dentry *ice_debugfs_pf;
- struct dentry *ice_debugfs_pf_fwlog;
- /* keep track of all the dentrys for FW log modules */
- struct dentry **ice_debugfs_pf_fwlog_modules;
struct ice_vfs vfs;
DECLARE_BITMAP(features, ICE_F_MAX);
DECLARE_BITMAP(state, ICE_STATE_NBITS);
@@ -576,6 +580,7 @@ struct ice_pf {
DECLARE_BITMAP(misc_thread, ICE_MISC_THREAD_NBITS);
unsigned long *avail_txqs; /* bitmap to track PF Tx queue usage */
unsigned long *avail_rxqs; /* bitmap to track PF Rx queue usage */
+ unsigned long *txtime_txqs; /* bitmap to track PF Tx Time queue */
unsigned long serv_tmr_period;
unsigned long serv_tmr_prev;
struct timer_list serv_tmr;
@@ -750,6 +755,31 @@ static inline void ice_set_ring_xdp(struct ice_tx_ring *ring)
}
/**
+ * ice_is_txtime_ena - check if Tx Time is enabled on the Tx ring
+ * @ring: pointer to Tx ring
+ *
+ * Return: true if the Tx ring has Tx Time enabled, false otherwise.
+ */
+static inline bool ice_is_txtime_ena(const struct ice_tx_ring *ring)
+{
+ struct ice_vsi *vsi = ring->vsi;
+ struct ice_pf *pf = vsi->back;
+
+ return test_bit(ring->q_index, pf->txtime_txqs);
+}
+
+/**
+ * ice_is_txtime_cfg - check if Tx Time is configured on the Tx ring
+ * @ring: pointer to Tx ring
+ *
+ * Return: true if the Tx ring is configured for Tx ring, false otherwise.
+ */
+static inline bool ice_is_txtime_cfg(const struct ice_tx_ring *ring)
+{
+ return !!(ring->flags & ICE_TX_FLAGS_TXTIME);
+}
+
+/**
* ice_get_xp_from_qid - get ZC XSK buffer pool bound to a queue ID
* @vsi: pointer to VSI
* @qid: index of a queue to look at XSK buff pool presence
@@ -906,11 +936,10 @@ static inline bool ice_is_adq_active(struct ice_pf *pf)
return false;
}
-void ice_debugfs_fwlog_init(struct ice_pf *pf);
+int ice_debugfs_pf_init(struct ice_pf *pf);
void ice_debugfs_pf_deinit(struct ice_pf *pf);
void ice_debugfs_init(void);
void ice_debugfs_exit(void);
-void ice_pf_fwlog_update_module(struct ice_pf *pf, int log_level, int module);
bool netif_is_ice(const struct net_device *dev);
int ice_vsi_setup_tx_rings(struct ice_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_adapter.c b/drivers/net/ethernet/intel/ice/ice_adapter.c
index 9e4adc43e474..0a8a48cd4bce 100644
--- a/drivers/net/ethernet/intel/ice/ice_adapter.c
+++ b/drivers/net/ethernet/intel/ice/ice_adapter.c
@@ -13,16 +13,45 @@
static DEFINE_XARRAY(ice_adapters);
static DEFINE_MUTEX(ice_adapters_mutex);
-static unsigned long ice_adapter_index(u64 dsn)
+#define ICE_ADAPTER_FIXED_INDEX BIT_ULL(63)
+
+#define ICE_ADAPTER_INDEX_E825C \
+ (ICE_DEV_ID_E825C_BACKPLANE | ICE_ADAPTER_FIXED_INDEX)
+
+static u64 ice_adapter_index(struct pci_dev *pdev)
{
+ switch (pdev->device) {
+ case ICE_DEV_ID_E825C_BACKPLANE:
+ case ICE_DEV_ID_E825C_QSFP:
+ case ICE_DEV_ID_E825C_SFP:
+ case ICE_DEV_ID_E825C_SGMII:
+ /* E825C devices have multiple NACs which are connected to the
+ * same clock source, and which must share the same
+ * ice_adapter structure. We can't use the serial number since
+ * each NAC has its own NVM generated with its own unique
+ * Device Serial Number. Instead, rely on the embedded nature
+ * of the E825C devices, and use a fixed index. This relies on
+ * the fact that all E825C physical functions in a given
+ * system are part of the same overall device.
+ */
+ return ICE_ADAPTER_INDEX_E825C;
+ default:
+ return pci_get_dsn(pdev) & ~ICE_ADAPTER_FIXED_INDEX;
+ }
+}
+
+static unsigned long ice_adapter_xa_index(struct pci_dev *pdev)
+{
+ u64 index = ice_adapter_index(pdev);
+
#if BITS_PER_LONG == 64
- return dsn;
+ return index;
#else
- return (u32)dsn ^ (u32)(dsn >> 32);
+ return (u32)index ^ (u32)(index >> 32);
#endif
}
-static struct ice_adapter *ice_adapter_new(u64 dsn)
+static struct ice_adapter *ice_adapter_new(struct pci_dev *pdev)
{
struct ice_adapter *adapter;
@@ -30,7 +59,7 @@ static struct ice_adapter *ice_adapter_new(u64 dsn)
if (!adapter)
return NULL;
- adapter->device_serial_number = dsn;
+ adapter->index = ice_adapter_index(pdev);
spin_lock_init(&adapter->ptp_gltsyn_time_lock);
spin_lock_init(&adapter->txq_ctx_lock);
refcount_set(&adapter->refcount, 1);
@@ -64,26 +93,27 @@ static void ice_adapter_free(struct ice_adapter *adapter)
*/
struct ice_adapter *ice_adapter_get(struct pci_dev *pdev)
{
- u64 dsn = pci_get_dsn(pdev);
struct ice_adapter *adapter;
unsigned long index;
int err;
- index = ice_adapter_index(dsn);
+ index = ice_adapter_xa_index(pdev);
scoped_guard(mutex, &ice_adapters_mutex) {
- err = xa_insert(&ice_adapters, index, NULL, GFP_KERNEL);
- if (err == -EBUSY) {
- adapter = xa_load(&ice_adapters, index);
+ adapter = xa_load(&ice_adapters, index);
+ if (adapter) {
refcount_inc(&adapter->refcount);
- WARN_ON_ONCE(adapter->device_serial_number != dsn);
+ WARN_ON_ONCE(adapter->index != ice_adapter_index(pdev));
return adapter;
}
+ err = xa_reserve(&ice_adapters, index, GFP_KERNEL);
if (err)
return ERR_PTR(err);
- adapter = ice_adapter_new(dsn);
- if (!adapter)
+ adapter = ice_adapter_new(pdev);
+ if (!adapter) {
+ xa_release(&ice_adapters, index);
return ERR_PTR(-ENOMEM);
+ }
xa_store(&ice_adapters, index, adapter, GFP_KERNEL);
}
return adapter;
@@ -100,11 +130,10 @@ struct ice_adapter *ice_adapter_get(struct pci_dev *pdev)
*/
void ice_adapter_put(struct pci_dev *pdev)
{
- u64 dsn = pci_get_dsn(pdev);
struct ice_adapter *adapter;
unsigned long index;
- index = ice_adapter_index(dsn);
+ index = ice_adapter_xa_index(pdev);
scoped_guard(mutex, &ice_adapters_mutex) {
adapter = xa_load(&ice_adapters, index);
if (WARN_ON(!adapter))
diff --git a/drivers/net/ethernet/intel/ice/ice_adapter.h b/drivers/net/ethernet/intel/ice/ice_adapter.h
index db66d03c9f96..e95266c7f20b 100644
--- a/drivers/net/ethernet/intel/ice/ice_adapter.h
+++ b/drivers/net/ethernet/intel/ice/ice_adapter.h
@@ -33,7 +33,7 @@ struct ice_port_list {
* @txq_ctx_lock: Spinlock protecting access to the GLCOMM_QTX_CNTX_CTL register
* @ctrl_pf: Control PF of the adapter
* @ports: Ports list
- * @device_serial_number: DSN cached for collision detection on 32bit systems
+ * @index: 64-bit index cached for collision detection on 32bit systems
*/
struct ice_adapter {
refcount_t refcount;
@@ -44,7 +44,7 @@ struct ice_adapter {
struct ice_pf *ctrl_pf;
struct ice_port_list ports;
- u64 device_serial_number;
+ u64 index;
};
struct ice_adapter *ice_adapter_get(struct pci_dev *pdev);
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 3bd3ea3af888..859e9c66f3e7 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -33,6 +33,10 @@ typedef struct __packed { u8 buf[ICE_TXQ_CTX_SZ]; } ice_txq_ctx_buf_t;
typedef struct __packed { u8 buf[ICE_TXQ_CTX_FULL_SZ]; } ice_txq_ctx_buf_full_t;
+#define ICE_TXTIME_CTX_SZ 25
+
+typedef struct __packed { u8 buf[ICE_TXTIME_CTX_SZ]; } ice_txtime_ctx_buf_t;
+
/* Queue Shutdown (direct 0x0003) */
struct ice_aqc_q_shutdown {
u8 driver_unloading;
@@ -2060,6 +2064,10 @@ struct ice_aqc_cfg_txqs {
#define ICE_AQC_Q_CFG_SRC_PRT_M 0x7
#define ICE_AQC_Q_CFG_DST_PRT_S 3
#define ICE_AQC_Q_CFG_DST_PRT_M (0x7 << ICE_AQC_Q_CFG_DST_PRT_S)
+#define ICE_AQC_Q_CFG_MODE_M GENMASK(7, 6)
+#define ICE_AQC_Q_CFG_MODE_SAME_PF 0x0
+#define ICE_AQC_Q_CFG_MODE_GIVE_OWN 0x1
+#define ICE_AQC_Q_CFG_MODE_KEEP_OWN 0x2
u8 time_out;
#define ICE_AQC_Q_CFG_TIMEOUT_S 2
#define ICE_AQC_Q_CFG_TIMEOUT_M (0x1F << ICE_AQC_Q_CFG_TIMEOUT_S)
@@ -2113,6 +2121,34 @@ struct ice_aqc_add_rdma_qset_data {
struct ice_aqc_add_tx_rdma_qset_entry rdma_qsets[];
};
+/* Set Tx Time LAN Queue (indirect 0x0C35) */
+struct ice_aqc_set_txtimeqs {
+ __le16 q_id;
+ __le16 q_amount;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* This is the descriptor of each queue entry for the Set Tx Time Queue
+ * command (0x0C35). Only used within struct ice_aqc_set_txtime_qgrp.
+ */
+struct ice_aqc_set_txtimeqs_perq {
+ u8 reserved[4];
+ ice_txtime_ctx_buf_t txtime_ctx;
+ u8 reserved1[3];
+};
+
+/* The format of the command buffer for Set Tx Time Queue (0x0C35)
+ * is an array of the following structs. Please note that the length of
+ * each struct ice_aqc_set_txtime_qgrp is variable due to the variable
+ * number of queues in each group!
+ */
+struct ice_aqc_set_txtime_qgrp {
+ u8 reserved[8];
+ struct ice_aqc_set_txtimeqs_perq txtimeqs[];
+};
+
/* Download Package (indirect 0x0C40) */
/* Also used for Update Package (indirect 0x0C41 and 0x0C42) */
struct ice_aqc_download_pkg {
@@ -2395,42 +2431,6 @@ struct ice_aqc_event_lan_overflow {
u8 reserved[8];
};
-enum ice_aqc_fw_logging_mod {
- ICE_AQC_FW_LOG_ID_GENERAL = 0,
- ICE_AQC_FW_LOG_ID_CTRL,
- ICE_AQC_FW_LOG_ID_LINK,
- ICE_AQC_FW_LOG_ID_LINK_TOPO,
- ICE_AQC_FW_LOG_ID_DNL,
- ICE_AQC_FW_LOG_ID_I2C,
- ICE_AQC_FW_LOG_ID_SDP,
- ICE_AQC_FW_LOG_ID_MDIO,
- ICE_AQC_FW_LOG_ID_ADMINQ,
- ICE_AQC_FW_LOG_ID_HDMA,
- ICE_AQC_FW_LOG_ID_LLDP,
- ICE_AQC_FW_LOG_ID_DCBX,
- ICE_AQC_FW_LOG_ID_DCB,
- ICE_AQC_FW_LOG_ID_XLR,
- ICE_AQC_FW_LOG_ID_NVM,
- ICE_AQC_FW_LOG_ID_AUTH,
- ICE_AQC_FW_LOG_ID_VPD,
- ICE_AQC_FW_LOG_ID_IOSF,
- ICE_AQC_FW_LOG_ID_PARSER,
- ICE_AQC_FW_LOG_ID_SW,
- ICE_AQC_FW_LOG_ID_SCHEDULER,
- ICE_AQC_FW_LOG_ID_TXQ,
- ICE_AQC_FW_LOG_ID_RSVD,
- ICE_AQC_FW_LOG_ID_POST,
- ICE_AQC_FW_LOG_ID_WATCHDOG,
- ICE_AQC_FW_LOG_ID_TASK_DISPATCH,
- ICE_AQC_FW_LOG_ID_MNG,
- ICE_AQC_FW_LOG_ID_SYNCE,
- ICE_AQC_FW_LOG_ID_HEALTH,
- ICE_AQC_FW_LOG_ID_TSDRV,
- ICE_AQC_FW_LOG_ID_PFREG,
- ICE_AQC_FW_LOG_ID_MDLVER,
- ICE_AQC_FW_LOG_ID_MAX,
-};
-
enum ice_aqc_health_status_mask {
ICE_AQC_HEALTH_STATUS_SET_PF_SPECIFIC_MASK = BIT(0),
ICE_AQC_HEALTH_STATUS_SET_ALL_PF_MASK = BIT(1),
@@ -2512,48 +2512,6 @@ struct ice_aqc_health_status_elem {
__le32 internal_data2;
};
-/* Set FW Logging configuration (indirect 0xFF30)
- * Register for FW Logging (indirect 0xFF31)
- * Query FW Logging (indirect 0xFF32)
- * FW Log Event (indirect 0xFF33)
- */
-struct ice_aqc_fw_log {
- u8 cmd_flags;
-#define ICE_AQC_FW_LOG_CONF_UART_EN BIT(0)
-#define ICE_AQC_FW_LOG_CONF_AQ_EN BIT(1)
-#define ICE_AQC_FW_LOG_QUERY_REGISTERED BIT(2)
-#define ICE_AQC_FW_LOG_CONF_SET_VALID BIT(3)
-#define ICE_AQC_FW_LOG_AQ_REGISTER BIT(0)
-#define ICE_AQC_FW_LOG_AQ_QUERY BIT(2)
-
- u8 rsp_flag;
- __le16 fw_rt_msb;
- union {
- struct {
- __le32 fw_rt_lsb;
- } sync;
- struct {
- __le16 log_resolution;
-#define ICE_AQC_FW_LOG_MIN_RESOLUTION (1)
-#define ICE_AQC_FW_LOG_MAX_RESOLUTION (128)
-
- __le16 mdl_cnt;
- } cfg;
- } ops;
- __le32 addr_high;
- __le32 addr_low;
-};
-
-/* Response Buffer for:
- * Set Firmware Logging Configuration (0xFF30)
- * Query FW Logging (0xFF32)
- */
-struct ice_aqc_fw_log_cfg_resp {
- __le16 module_identifier;
- u8 log_level;
- u8 rsvd0;
-};
-
/* Admin Queue command opcodes */
enum ice_adminq_opc {
/* AQ commands */
@@ -2688,6 +2646,9 @@ enum ice_adminq_opc {
ice_aqc_opc_cfg_txqs = 0x0C32,
ice_aqc_opc_add_rdma_qset = 0x0C33,
+ /* Tx Time queue commands */
+ ice_aqc_opc_set_txtimeqs = 0x0C35,
+
/* package commands */
ice_aqc_opc_download_pkg = 0x0C40,
ice_aqc_opc_upload_section = 0x0C41,
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index c5da8e9cc0a0..2d35a278c555 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -242,7 +242,8 @@ static void ice_cfg_itr_gran(struct ice_hw *hw)
* @ring: ring to get the absolute queue index
* @tc: traffic class number
*/
-static u16 ice_calc_txq_handle(struct ice_vsi *vsi, struct ice_tx_ring *ring, u8 tc)
+static u16
+ice_calc_txq_handle(const struct ice_vsi *vsi, struct ice_tx_ring *ring, u8 tc)
{
WARN_ONCE(ice_ring_is_xdp(ring) && tc, "XDP ring can't belong to TC other than 0\n");
@@ -278,30 +279,20 @@ static void ice_cfg_xps_tx_ring(struct ice_tx_ring *ring)
}
/**
- * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance
- * @ring: The Tx ring to configure
- * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized
- * @pf_q: queue index in the PF space
+ * ice_set_txq_ctx_vmvf - set queue context VM/VF type and number by VSI type
+ * @ring: the Tx ring to configure
+ * @vmvf_type: VM/VF type
+ * @vmvf_num: VM/VF number
*
- * Configure the Tx descriptor ring in TLAN context.
+ * Return: 0 on success and a negative value on error.
*/
-static void
-ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
+static int
+ice_set_txq_ctx_vmvf(struct ice_tx_ring *ring, u8 *vmvf_type, u16 *vmvf_num)
{
struct ice_vsi *vsi = ring->vsi;
- struct ice_hw *hw = &vsi->back->hw;
-
- tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S;
-
- tlan_ctx->port_num = vsi->port_info->lport;
-
- /* Transmit Queue Length */
- tlan_ctx->qlen = ring->count;
-
- ice_set_cgd_num(tlan_ctx, ring->dcb_tc);
+ struct ice_hw *hw;
- /* PF number */
- tlan_ctx->pf_num = hw->pf_id;
+ hw = &vsi->back->hw;
/* queue belongs to a specific VSI type
* VF / VM index should be programmed per vmvf_type setting:
@@ -314,21 +305,60 @@ ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf
case ICE_VSI_CTRL:
case ICE_VSI_PF:
if (ring->ch)
- tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ;
+ *vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ;
else
- tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
+ *vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
break;
case ICE_VSI_VF:
/* Firmware expects vmvf_num to be absolute VF ID */
- tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf->vf_id;
- tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
+ *vmvf_num = hw->func_caps.vf_base_id + vsi->vf->vf_id;
+ *vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
break;
case ICE_VSI_SF:
- tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ;
+ *vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ;
break;
default:
- return;
+ dev_info(ice_pf_to_dev(vsi->back),
+ "Unable to set VMVF type for VSI type %d\n",
+ vsi->type);
+ return -EINVAL;
}
+ return 0;
+}
+
+/**
+ * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance
+ * @ring: the Tx ring to configure
+ * @tlan_ctx: pointer to the Tx LAN queue context structure to be initialized
+ * @pf_q: queue index in the PF space
+ *
+ * Configure the Tx descriptor ring in TLAN context.
+ *
+ * Return: 0 on success and a negative value on error.
+ */
+static int
+ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
+{
+ struct ice_vsi *vsi = ring->vsi;
+ struct ice_hw *hw;
+ int err;
+
+ hw = &vsi->back->hw;
+ tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S;
+ tlan_ctx->port_num = vsi->port_info->lport;
+
+ /* Transmit Queue Length */
+ tlan_ctx->qlen = ring->count;
+
+ ice_set_cgd_num(tlan_ctx, ring->dcb_tc);
+
+ /* PF number */
+ tlan_ctx->pf_num = hw->pf_id;
+
+ err = ice_set_txq_ctx_vmvf(ring, &tlan_ctx->vmvf_type,
+ &tlan_ctx->vmvf_num);
+ if (err)
+ return err;
/* make sure the context is associated with the right VSI */
if (ring->ch)
@@ -355,6 +385,80 @@ ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf
* 1: Legacy Host Interface
*/
tlan_ctx->legacy_int = ICE_TX_LEGACY;
+
+ return 0;
+}
+
+/**
+ * ice_setup_txtime_ctx - setup a struct ice_txtime_ctx instance
+ * @ring: the tstamp ring to configure
+ * @txtime_ctx: pointer to the Tx time queue context structure to be initialized
+ *
+ * Return: 0 on success and a negative value on error.
+ */
+static int
+ice_setup_txtime_ctx(const struct ice_tstamp_ring *ring,
+ struct ice_txtime_ctx *txtime_ctx)
+{
+ struct ice_tx_ring *tx_ring = ring->tx_ring;
+ struct ice_vsi *vsi = tx_ring->vsi;
+ struct ice_hw *hw = &vsi->back->hw;
+ int err;
+
+ txtime_ctx->base = ring->dma >> ICE_TXTIME_CTX_BASE_S;
+
+ /* Tx time Queue Length */
+ txtime_ctx->qlen = ring->count;
+ txtime_ctx->txtime_ena_q = 1;
+
+ /* PF number */
+ txtime_ctx->pf_num = hw->pf_id;
+
+ err = ice_set_txq_ctx_vmvf(tx_ring, &txtime_ctx->vmvf_type,
+ &txtime_ctx->vmvf_num);
+ if (err)
+ return err;
+
+ /* make sure the context is associated with the right VSI */
+ if (tx_ring->ch)
+ txtime_ctx->src_vsi = tx_ring->ch->vsi_num;
+ else
+ txtime_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx);
+
+ txtime_ctx->ts_res = ICE_TXTIME_CTX_RESOLUTION_128NS;
+ txtime_ctx->drbell_mode_32 = ICE_TXTIME_CTX_DRBELL_MODE_32;
+ txtime_ctx->ts_fetch_prof_id = ICE_TXTIME_CTX_FETCH_PROF_ID_0;
+
+ return 0;
+}
+
+/**
+ * ice_calc_ts_ring_count - calculate the number of Tx time stamp descriptors
+ * @tx_ring: Tx ring to calculate the count for
+ *
+ * Return: the number of Tx time stamp descriptors.
+ */
+u16 ice_calc_ts_ring_count(struct ice_tx_ring *tx_ring)
+{
+ u16 prof = ICE_TXTIME_CTX_FETCH_PROF_ID_0;
+ struct ice_vsi *vsi = tx_ring->vsi;
+ struct ice_hw *hw = &vsi->back->hw;
+ u16 max_fetch_desc = 0, fetch, i;
+ u32 reg;
+
+ for (i = 0; i < ICE_TXTIME_FETCH_PROFILE_CNT; i++) {
+ reg = rd32(hw, E830_GLTXTIME_FETCH_PROFILE(prof, 0));
+ fetch = FIELD_GET(E830_GLTXTIME_FETCH_PROFILE_FETCH_TS_DESC_M,
+ reg);
+ max_fetch_desc = max(fetch, max_fetch_desc);
+ }
+
+ if (!max_fetch_desc)
+ max_fetch_desc = ICE_TXTIME_FETCH_TS_DESC_DFLT;
+
+ max_fetch_desc = ALIGN(max_fetch_desc, ICE_REQ_DESC_MULTIPLE);
+
+ return tx_ring->count + max_fetch_desc;
}
/**
@@ -882,13 +986,49 @@ void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
}
/**
+ * ice_cfg_tstamp - Configure Tx time stamp queue
+ * @tx_ring: Tx ring to be configured with timestamping
+ *
+ * Return: 0 on success and a negative value on error.
+ */
+static int
+ice_cfg_tstamp(struct ice_tx_ring *tx_ring)
+{
+ DEFINE_RAW_FLEX(struct ice_aqc_set_txtime_qgrp, txtime_qg_buf,
+ txtimeqs, 1);
+ u8 txtime_buf_len = struct_size(txtime_qg_buf, txtimeqs, 1);
+ struct ice_tstamp_ring *tstamp_ring = tx_ring->tstamp_ring;
+ struct ice_txtime_ctx txtime_ctx = {};
+ struct ice_vsi *vsi = tx_ring->vsi;
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ u16 pf_q = tx_ring->reg_idx;
+ int err;
+
+ err = ice_setup_txtime_ctx(tstamp_ring, &txtime_ctx);
+ if (err) {
+ dev_err(ice_pf_to_dev(pf), "Failed to setup Tx time queue context for queue %d, error: %d\n",
+ pf_q, err);
+ return err;
+ }
+ ice_pack_txtime_ctx(&txtime_ctx,
+ &txtime_qg_buf->txtimeqs[0].txtime_ctx);
+
+ tstamp_ring->tail = hw->hw_addr + E830_GLQTX_TXTIME_DBELL_LSB(pf_q);
+ return ice_aq_set_txtimeq(hw, pf_q, 1, txtime_qg_buf,
+ txtime_buf_len, NULL);
+}
+
+/**
* ice_vsi_cfg_txq - Configure single Tx queue
* @vsi: the VSI that queue belongs to
* @ring: Tx ring to be configured
* @qg_buf: queue group buffer
+ *
+ * Return: 0 on success and a negative value on error.
*/
static int
-ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
+ice_vsi_cfg_txq(const struct ice_vsi *vsi, struct ice_tx_ring *ring,
struct ice_aqc_add_tx_qgrp *qg_buf)
{
u8 buf_len = struct_size(qg_buf, txqs, 1);
@@ -897,15 +1037,20 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
struct ice_channel *ch = ring->ch;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
+ u32 pf_q, vsi_idx;
int status;
- u16 pf_q;
u8 tc;
/* Configure XPS */
ice_cfg_xps_tx_ring(ring);
pf_q = ring->reg_idx;
- ice_setup_tx_ctx(ring, &tlan_ctx, pf_q);
+ status = ice_setup_tx_ctx(ring, &tlan_ctx, pf_q);
+ if (status) {
+ dev_err(ice_pf_to_dev(pf), "Failed to setup Tx context for queue %d, error: %d\n",
+ pf_q, status);
+ return status;
+ }
/* copy context contents into the qg_buf */
qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
ice_pack_txq_ctx(&tlan_ctx, &qg_buf->txqs[0].txq_ctx);
@@ -925,14 +1070,15 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
*/
ring->q_handle = ice_calc_txq_handle(vsi, ring, tc);
- if (ch)
- status = ice_ena_vsi_txq(vsi->port_info, ch->ch_vsi->idx, 0,
- ring->q_handle, 1, qg_buf, buf_len,
- NULL);
- else
- status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc,
- ring->q_handle, 1, qg_buf, buf_len,
- NULL);
+ if (ch) {
+ tc = 0;
+ vsi_idx = ch->ch_vsi->idx;
+ } else {
+ vsi_idx = vsi->idx;
+ }
+
+ status = ice_ena_vsi_txq(vsi->port_info, vsi_idx, tc, ring->q_handle,
+ 1, qg_buf, buf_len, NULL);
if (status) {
dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %d\n",
status);
@@ -947,7 +1093,32 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
if (pf_q == le16_to_cpu(txq->txq_id))
ring->txq_teid = le32_to_cpu(txq->q_teid);
+ if (ice_is_txtime_ena(ring)) {
+ status = ice_alloc_setup_tstamp_ring(ring);
+ if (status) {
+ dev_err(ice_pf_to_dev(pf),
+ "Failed to allocate Tx timestamp ring, error: %d\n",
+ status);
+ goto err_setup_tstamp;
+ }
+
+ status = ice_cfg_tstamp(ring);
+ if (status) {
+ dev_err(ice_pf_to_dev(pf), "Failed to set Tx Time queue context, error: %d\n",
+ status);
+ goto err_cfg_tstamp;
+ }
+ }
return 0;
+
+err_cfg_tstamp:
+ ice_free_tx_tstamp_ring(ring);
+err_setup_tstamp:
+ ice_dis_vsi_txq(vsi->port_info, vsi_idx, tc, 1, &ring->q_handle,
+ &ring->reg_idx, &ring->txq_teid, ICE_NO_RESET,
+ tlan_ctx.vmvf_num, NULL);
+
+ return status;
}
int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings,
@@ -1206,3 +1377,148 @@ ice_fill_txq_meta(const struct ice_vsi *vsi, struct ice_tx_ring *ring,
txq_meta->tc = tc;
}
}
+
+/**
+ * ice_qp_reset_stats - Resets all stats for rings of given index
+ * @vsi: VSI that contains rings of interest
+ * @q_idx: ring index in array
+ */
+static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
+{
+ struct ice_vsi_stats *vsi_stat;
+ struct ice_pf *pf;
+
+ pf = vsi->back;
+ if (!pf->vsi_stats)
+ return;
+
+ vsi_stat = pf->vsi_stats[vsi->idx];
+ if (!vsi_stat)
+ return;
+
+ memset(&vsi_stat->rx_ring_stats[q_idx]->rx_stats, 0,
+ sizeof(vsi_stat->rx_ring_stats[q_idx]->rx_stats));
+ memset(&vsi_stat->tx_ring_stats[q_idx]->stats, 0,
+ sizeof(vsi_stat->tx_ring_stats[q_idx]->stats));
+ if (vsi->xdp_rings)
+ memset(&vsi->xdp_rings[q_idx]->ring_stats->stats, 0,
+ sizeof(vsi->xdp_rings[q_idx]->ring_stats->stats));
+}
+
+/**
+ * ice_qp_clean_rings - Cleans all the rings of a given index
+ * @vsi: VSI that contains rings of interest
+ * @q_idx: ring index in array
+ */
+static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx)
+{
+ ice_clean_tx_ring(vsi->tx_rings[q_idx]);
+ if (vsi->xdp_rings)
+ ice_clean_tx_ring(vsi->xdp_rings[q_idx]);
+ ice_clean_rx_ring(vsi->rx_rings[q_idx]);
+}
+
+/**
+ * ice_qp_dis - Disables a queue pair
+ * @vsi: VSI of interest
+ * @q_idx: ring index in array
+ *
+ * Returns 0 on success, negative on failure.
+ */
+int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
+{
+ struct ice_txq_meta txq_meta = { };
+ struct ice_q_vector *q_vector;
+ struct ice_tx_ring *tx_ring;
+ struct ice_rx_ring *rx_ring;
+ int fail = 0;
+ int err;
+
+ if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
+ return -EINVAL;
+
+ tx_ring = vsi->tx_rings[q_idx];
+ rx_ring = vsi->rx_rings[q_idx];
+ q_vector = rx_ring->q_vector;
+
+ synchronize_net();
+ netif_carrier_off(vsi->netdev);
+ netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
+
+ ice_qvec_dis_irq(vsi, rx_ring, q_vector);
+ ice_qvec_toggle_napi(vsi, q_vector, false);
+
+ ice_fill_txq_meta(vsi, tx_ring, &txq_meta);
+ err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta);
+ if (!fail)
+ fail = err;
+ if (vsi->xdp_rings) {
+ struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
+
+ memset(&txq_meta, 0, sizeof(txq_meta));
+ ice_fill_txq_meta(vsi, xdp_ring, &txq_meta);
+ err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, xdp_ring,
+ &txq_meta);
+ if (!fail)
+ fail = err;
+ }
+
+ ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, false);
+ ice_qp_clean_rings(vsi, q_idx);
+ ice_qp_reset_stats(vsi, q_idx);
+
+ return fail;
+}
+
+/**
+ * ice_qp_ena - Enables a queue pair
+ * @vsi: VSI of interest
+ * @q_idx: ring index in array
+ *
+ * Returns 0 on success, negative on failure.
+ */
+int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
+{
+ struct ice_q_vector *q_vector;
+ int fail = 0;
+ bool link_up;
+ int err;
+
+ err = ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx);
+ if (!fail)
+ fail = err;
+
+ if (ice_is_xdp_ena_vsi(vsi)) {
+ struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
+
+ err = ice_vsi_cfg_single_txq(vsi, vsi->xdp_rings, q_idx);
+ if (!fail)
+ fail = err;
+ ice_set_ring_xdp(xdp_ring);
+ ice_tx_xsk_pool(vsi, q_idx);
+ }
+
+ err = ice_vsi_cfg_single_rxq(vsi, q_idx);
+ if (!fail)
+ fail = err;
+
+ q_vector = vsi->rx_rings[q_idx]->q_vector;
+ ice_qvec_cfg_msix(vsi, q_vector, q_idx);
+
+ err = ice_vsi_ctrl_one_rx_ring(vsi, true, q_idx, true);
+ if (!fail)
+ fail = err;
+
+ ice_qvec_toggle_napi(vsi, q_vector, true);
+ ice_qvec_ena_irq(vsi, q_vector);
+
+ /* make sure NAPI sees updated ice_{t,x}_ring::xsk_pool */
+ synchronize_net();
+ ice_get_link_status(vsi->port_info, &link_up);
+ if (link_up) {
+ netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
+ netif_carrier_on(vsi->netdev);
+ }
+
+ return fail;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_base.h b/drivers/net/ethernet/intel/ice/ice_base.h
index b711bc921928..d28294247599 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.h
+++ b/drivers/net/ethernet/intel/ice/ice_base.h
@@ -32,4 +32,7 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
void
ice_fill_txq_meta(const struct ice_vsi *vsi, struct ice_tx_ring *ring,
struct ice_txq_meta *txq_meta);
+int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx);
+int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx);
+u16 ice_calc_ts_ring_count(struct ice_tx_ring *tx_ring);
#endif /* _ICE_BASE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 003d60a4db21..2250426ec91b 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -984,6 +984,37 @@ static int ice_wait_for_fw(struct ice_hw *hw, u32 timeout)
return -ETIMEDOUT;
}
+static int __fwlog_send_cmd(void *priv, struct libie_aq_desc *desc, void *buf,
+ u16 size)
+{
+ struct ice_hw *hw = priv;
+
+ return ice_aq_send_cmd(hw, desc, buf, size, NULL);
+}
+
+static int __fwlog_init(struct ice_hw *hw)
+{
+ struct ice_pf *pf = hw->back;
+ struct libie_fwlog_api api = {
+ .pdev = pf->pdev,
+ .send_cmd = __fwlog_send_cmd,
+ .priv = hw,
+ };
+ int err;
+
+ /* only support fw log commands on PF 0 */
+ if (hw->bus.func)
+ return -EINVAL;
+
+ err = ice_debugfs_pf_init(pf);
+ if (err)
+ return err;
+
+ api.debugfs_root = pf->ice_debugfs_pf;
+
+ return libie_fwlog_init(&hw->fwlog, &api);
+}
+
/**
* ice_init_hw - main hardware initialization routine
* @hw: pointer to the hardware structure
@@ -1012,7 +1043,7 @@ int ice_init_hw(struct ice_hw *hw)
if (status)
goto err_unroll_cqinit;
- status = ice_fwlog_init(hw);
+ status = __fwlog_init(hw);
if (status)
ice_debug(hw, ICE_DBG_FW_LOG, "Error initializing FW logging: %d\n",
status);
@@ -1159,6 +1190,16 @@ err_unroll_cqinit:
return status;
}
+static void __fwlog_deinit(struct ice_hw *hw)
+{
+ /* only support fw log commands on PF 0 */
+ if (hw->bus.func)
+ return;
+
+ ice_debugfs_pf_deinit(hw->back);
+ libie_fwlog_deinit(&hw->fwlog);
+}
+
/**
* ice_deinit_hw - unroll initialization operations done by ice_init_hw
* @hw: pointer to the hardware structure
@@ -1177,8 +1218,7 @@ void ice_deinit_hw(struct ice_hw *hw)
ice_free_seg(hw);
ice_free_hw_tbls(hw);
mutex_destroy(&hw->tnl_lock);
-
- ice_fwlog_deinit(hw);
+ __fwlog_deinit(hw);
ice_destroy_all_ctrlq(hw);
/* Clear VSI contexts if not already cleared */
@@ -1693,6 +1733,44 @@ int ice_write_txq_ctx(struct ice_hw *hw, struct ice_tlan_ctx *tlan_ctx,
return 0;
}
+/* Tx time Queue Context */
+static const struct packed_field_u8 ice_txtime_ctx_fields[] = {
+ /* Field Width LSB */
+ ICE_CTX_STORE(ice_txtime_ctx, base, 57, 0),
+ ICE_CTX_STORE(ice_txtime_ctx, pf_num, 3, 57),
+ ICE_CTX_STORE(ice_txtime_ctx, vmvf_num, 10, 60),
+ ICE_CTX_STORE(ice_txtime_ctx, vmvf_type, 2, 70),
+ ICE_CTX_STORE(ice_txtime_ctx, src_vsi, 10, 72),
+ ICE_CTX_STORE(ice_txtime_ctx, cpuid, 8, 82),
+ ICE_CTX_STORE(ice_txtime_ctx, tphrd_desc, 1, 90),
+ ICE_CTX_STORE(ice_txtime_ctx, qlen, 13, 91),
+ ICE_CTX_STORE(ice_txtime_ctx, timer_num, 1, 104),
+ ICE_CTX_STORE(ice_txtime_ctx, txtime_ena_q, 1, 105),
+ ICE_CTX_STORE(ice_txtime_ctx, drbell_mode_32, 1, 106),
+ ICE_CTX_STORE(ice_txtime_ctx, ts_res, 4, 107),
+ ICE_CTX_STORE(ice_txtime_ctx, ts_round_type, 2, 111),
+ ICE_CTX_STORE(ice_txtime_ctx, ts_pacing_slot, 3, 113),
+ ICE_CTX_STORE(ice_txtime_ctx, merging_ena, 1, 116),
+ ICE_CTX_STORE(ice_txtime_ctx, ts_fetch_prof_id, 4, 117),
+ ICE_CTX_STORE(ice_txtime_ctx, ts_fetch_cache_line_aln_thld, 4, 121),
+ ICE_CTX_STORE(ice_txtime_ctx, tx_pipe_delay_mode, 1, 125),
+};
+
+/**
+ * ice_pack_txtime_ctx - pack Tx time queue context into a HW buffer
+ * @ctx: the Tx time queue context to pack
+ * @buf: the HW buffer to pack into
+ *
+ * Pack the Tx time queue context from the CPU-friendly unpacked buffer into
+ * its bit-packed HW layout.
+ */
+void ice_pack_txtime_ctx(const struct ice_txtime_ctx *ctx,
+ ice_txtime_ctx_buf_t *buf)
+{
+ pack_fields(buf, sizeof(*buf), ctx, ice_txtime_ctx_fields,
+ QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST);
+}
+
/* Sideband Queue command wrappers */
/**
@@ -2418,12 +2496,15 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
caps->reset_restrict_support);
break;
case LIBIE_AQC_CAPS_FW_LAG_SUPPORT:
- caps->roce_lag = !!(number & LIBIE_AQC_BIT_ROCEV2_LAG);
+ caps->roce_lag = number & LIBIE_AQC_BIT_ROCEV2_LAG;
ice_debug(hw, ICE_DBG_INIT, "%s: roce_lag = %u\n",
prefix, caps->roce_lag);
- caps->sriov_lag = !!(number & LIBIE_AQC_BIT_SRIOV_LAG);
+ caps->sriov_lag = number & LIBIE_AQC_BIT_SRIOV_LAG;
ice_debug(hw, ICE_DBG_INIT, "%s: sriov_lag = %u\n",
prefix, caps->sriov_lag);
+ caps->sriov_aa_lag = number & LIBIE_AQC_BIT_SRIOV_AA_LAG;
+ ice_debug(hw, ICE_DBG_INIT, "%s: sriov_aa_lag = %u\n",
+ prefix, caps->sriov_aa_lag);
break;
case LIBIE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE:
caps->tx_sched_topo_comp_mode_en = (number == 1);
@@ -4712,24 +4793,24 @@ do_aq:
}
/**
- * ice_aq_cfg_lan_txq
+ * ice_aq_cfg_lan_txq - send AQ command 0x0C32 to FW
* @hw: pointer to the hardware structure
* @buf: buffer for command
* @buf_size: size of buffer in bytes
* @num_qs: number of queues being configured
* @oldport: origination lport
* @newport: destination lport
+ * @mode: cmd_type for move to use
* @cd: pointer to command details structure or NULL
*
* Move/Configure LAN Tx queue (0x0C32)
*
- * There is a better AQ command to use for moving nodes, so only coding
- * this one for configuring the node.
+ * Return: Zero on success, associated error code on failure.
*/
int
ice_aq_cfg_lan_txq(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *buf,
u16 buf_size, u16 num_qs, u8 oldport, u8 newport,
- struct ice_sq_cd *cd)
+ u8 mode, struct ice_sq_cd *cd)
{
struct ice_aqc_cfg_txqs *cmd;
struct libie_aq_desc desc;
@@ -4742,10 +4823,12 @@ ice_aq_cfg_lan_txq(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *buf,
if (!buf)
return -EINVAL;
- cmd->cmd_type = ICE_AQC_Q_CFG_TC_CHNG;
+ cmd->cmd_type = mode;
cmd->num_qs = num_qs;
cmd->port_num_chng = (oldport & ICE_AQC_Q_CFG_SRC_PRT_M);
cmd->port_num_chng |= FIELD_PREP(ICE_AQC_Q_CFG_DST_PRT_M, newport);
+ cmd->port_num_chng |= FIELD_PREP(ICE_AQC_Q_CFG_MODE_M,
+ ICE_AQC_Q_CFG_MODE_KEEP_OWN);
cmd->time_out = FIELD_PREP(ICE_AQC_Q_CFG_TIMEOUT_M, 5);
cmd->blocked_cgds = 0;
@@ -4801,6 +4884,46 @@ ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps,
return ice_aq_send_cmd(hw, &desc, qset_list, buf_size, cd);
}
+/**
+ * ice_aq_set_txtimeq - set Tx time queues
+ * @hw: pointer to the hardware structure
+ * @txtimeq: first Tx time queue id to configure
+ * @q_count: number of queues to configure
+ * @txtime_qg: queue group to be set
+ * @buf_size: size of buffer for indirect command
+ * @cd: pointer to command details structure or NULL
+ *
+ * Set Tx Time queue (0x0C35)
+ * Return: 0 on success or negative value on failure.
+ */
+int
+ice_aq_set_txtimeq(struct ice_hw *hw, u16 txtimeq, u8 q_count,
+ struct ice_aqc_set_txtime_qgrp *txtime_qg, u16 buf_size,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_set_txtimeqs *cmd;
+ struct libie_aq_desc desc;
+ u16 size;
+
+ if (!txtime_qg || txtimeq > ICE_TXTIME_MAX_QUEUE ||
+ q_count < 1 || q_count > ICE_SET_TXTIME_MAX_Q_AMOUNT)
+ return -EINVAL;
+
+ size = struct_size(txtime_qg, txtimeqs, q_count);
+ if (buf_size != size)
+ return -EINVAL;
+
+ cmd = libie_aq_raw(&desc);
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_txtimeqs);
+
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
+
+ cmd->q_id = cpu_to_le16(txtimeq);
+ cmd->q_amount = cpu_to_le16(q_count);
+ return ice_aq_send_cmd(hw, &desc, txtime_qg, buf_size, cd);
+}
+
/* End of FW Admin Queue command wrappers */
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 60320cdf7804..e700ac0dc347 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -270,11 +270,17 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
int
ice_aq_cfg_lan_txq(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *buf,
u16 buf_size, u16 num_qs, u8 oldport, u8 newport,
- struct ice_sq_cd *cd);
+ u8 mode, struct ice_sq_cd *cd);
int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle);
void ice_replay_post(struct ice_hw *hw);
struct ice_q_ctx *
ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle);
+int
+ice_aq_set_txtimeq(struct ice_hw *hw, u16 txtimeq, u8 q_count,
+ struct ice_aqc_set_txtime_qgrp *txtime_qg,
+ u16 buf_size, struct ice_sq_cd *cd);
+void ice_pack_txtime_ctx(const struct ice_txtime_ctx *ctx,
+ ice_txtime_ctx_buf_t *buf);
int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in, u16 flag);
int ice_aq_get_cgu_input_pin_measure(struct ice_hw *hw, u8 dpll_idx,
struct ice_cgu_input_measure *meas,
diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c
index e2a036ce76ca..3b2d9c436979 100644
--- a/drivers/net/ethernet/intel/ice/ice_ddp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ddp.c
@@ -2377,7 +2377,13 @@ ice_get_set_tx_topo(struct ice_hw *hw, u8 *buf, u16 buf_size,
* The function will apply the new Tx topology from the package buffer
* if available.
*
- * Return: zero when update was successful, negative values otherwise.
+ * Return:
+ * * 0 - Successfully applied topology configuration.
+ * * -EBUSY - Failed to acquire global configuration lock.
+ * * -EEXIST - Topology configuration has already been applied.
+ * * -EIO - Unable to apply topology configuration.
+ * * -ENODEV - Failed to re-initialize device after applying configuration.
+ * * Other negative error codes indicate unexpected failures.
*/
int ice_cfg_tx_topo(struct ice_hw *hw, const void *buf, u32 len)
{
@@ -2410,7 +2416,7 @@ int ice_cfg_tx_topo(struct ice_hw *hw, const void *buf, u32 len)
if (status) {
ice_debug(hw, ICE_DBG_INIT, "Get current topology is failed\n");
- return status;
+ return -EIO;
}
/* Is default topology already applied ? */
@@ -2497,31 +2503,45 @@ update_topo:
ICE_GLOBAL_CFG_LOCK_TIMEOUT);
if (status) {
ice_debug(hw, ICE_DBG_INIT, "Failed to acquire global lock\n");
- return status;
+ return -EBUSY;
}
/* Check if reset was triggered already. */
reg = rd32(hw, GLGEN_RSTAT);
if (reg & GLGEN_RSTAT_DEVSTATE_M) {
- /* Reset is in progress, re-init the HW again */
ice_debug(hw, ICE_DBG_INIT, "Reset is in progress. Layer topology might be applied already\n");
ice_check_reset(hw);
- return 0;
+ /* Reset is in progress, re-init the HW again */
+ goto reinit_hw;
}
/* Set new topology */
status = ice_get_set_tx_topo(hw, new_topo, size, NULL, NULL, true);
if (status) {
- ice_debug(hw, ICE_DBG_INIT, "Failed setting Tx topology\n");
- return status;
+ ice_debug(hw, ICE_DBG_INIT, "Failed to set Tx topology, status %pe\n",
+ ERR_PTR(status));
+ /* only report -EIO here as the caller checks the error value
+ * and reports an informational error message informing that
+ * the driver failed to program Tx topology.
+ */
+ status = -EIO;
}
- /* New topology is updated, delay 1 second before issuing the CORER */
+ /* Even if Tx topology config failed, we need to CORE reset here to
+ * clear the global configuration lock. Delay 1 second to allow
+ * hardware to settle then issue a CORER
+ */
msleep(1000);
ice_reset(hw, ICE_RESET_CORER);
- /* CORER will clear the global lock, so no explicit call
- * required for release.
- */
+ ice_check_reset(hw);
+
+reinit_hw:
+ /* Since we triggered a CORER, re-initialize hardware */
+ ice_deinit_hw(hw);
+ if (ice_init_hw(hw)) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to re-init hardware after setting Tx topology\n");
+ return -ENODEV;
+ }
- return 0;
+ return status;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_debugfs.c b/drivers/net/ethernet/intel/ice/ice_debugfs.c
index cb71eca6a85b..f450250fc827 100644
--- a/drivers/net/ethernet/intel/ice/ice_debugfs.c
+++ b/drivers/net/ethernet/intel/ice/ice_debugfs.c
@@ -1,647 +1,20 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022, Intel Corporation. */
-#include <linux/fs.h>
#include <linux/debugfs.h>
-#include <linux/random.h>
-#include <linux/vmalloc.h>
#include "ice.h"
static struct dentry *ice_debugfs_root;
-/* create a define that has an extra module that doesn't really exist. this
- * is so we can add a module 'all' to easily enable/disable all the modules
- */
-#define ICE_NR_FW_LOG_MODULES (ICE_AQC_FW_LOG_ID_MAX + 1)
-
-/* the ordering in this array is important. it matches the ordering of the
- * values in the FW so the index is the same value as in ice_aqc_fw_logging_mod
- */
-static const char * const ice_fwlog_module_string[] = {
- "general",
- "ctrl",
- "link",
- "link_topo",
- "dnl",
- "i2c",
- "sdp",
- "mdio",
- "adminq",
- "hdma",
- "lldp",
- "dcbx",
- "dcb",
- "xlr",
- "nvm",
- "auth",
- "vpd",
- "iosf",
- "parser",
- "sw",
- "scheduler",
- "txq",
- "rsvd",
- "post",
- "watchdog",
- "task_dispatch",
- "mng",
- "synce",
- "health",
- "tsdrv",
- "pfreg",
- "mdlver",
- "all",
-};
-
-/* the ordering in this array is important. it matches the ordering of the
- * values in the FW so the index is the same value as in ice_fwlog_level
- */
-static const char * const ice_fwlog_level_string[] = {
- "none",
- "error",
- "warning",
- "normal",
- "verbose",
-};
-
-static const char * const ice_fwlog_log_size[] = {
- "128K",
- "256K",
- "512K",
- "1M",
- "2M",
-};
-
-/**
- * ice_fwlog_print_module_cfg - print current FW logging module configuration
- * @hw: pointer to the HW structure
- * @module: module to print
- * @s: the seq file to put data into
- */
-static void
-ice_fwlog_print_module_cfg(struct ice_hw *hw, int module, struct seq_file *s)
-{
- struct ice_fwlog_cfg *cfg = &hw->fwlog_cfg;
- struct ice_fwlog_module_entry *entry;
-
- if (module != ICE_AQC_FW_LOG_ID_MAX) {
- entry = &cfg->module_entries[module];
-
- seq_printf(s, "\tModule: %s, Log Level: %s\n",
- ice_fwlog_module_string[entry->module_id],
- ice_fwlog_level_string[entry->log_level]);
- } else {
- int i;
-
- for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
- entry = &cfg->module_entries[i];
-
- seq_printf(s, "\tModule: %s, Log Level: %s\n",
- ice_fwlog_module_string[entry->module_id],
- ice_fwlog_level_string[entry->log_level]);
- }
- }
-}
-
-static int ice_find_module_by_dentry(struct ice_pf *pf, struct dentry *d)
-{
- int i, module;
-
- module = -1;
- /* find the module based on the dentry */
- for (i = 0; i < ICE_NR_FW_LOG_MODULES; i++) {
- if (d == pf->ice_debugfs_pf_fwlog_modules[i]) {
- module = i;
- break;
- }
- }
-
- return module;
-}
-
-/**
- * ice_debugfs_module_show - read from 'module' file
- * @s: the opened file
- * @v: pointer to the offset
- */
-static int ice_debugfs_module_show(struct seq_file *s, void *v)
-{
- const struct file *filp = s->file;
- struct dentry *dentry;
- struct ice_pf *pf;
- int module;
-
- dentry = file_dentry(filp);
- pf = s->private;
-
- module = ice_find_module_by_dentry(pf, dentry);
- if (module < 0) {
- dev_info(ice_pf_to_dev(pf), "unknown module\n");
- return -EINVAL;
- }
-
- ice_fwlog_print_module_cfg(&pf->hw, module, s);
-
- return 0;
-}
-
-static int ice_debugfs_module_open(struct inode *inode, struct file *filp)
-{
- return single_open(filp, ice_debugfs_module_show, inode->i_private);
-}
-
-/**
- * ice_debugfs_module_write - write into 'module' file
- * @filp: the opened file
- * @buf: where to find the user's data
- * @count: the length of the user's data
- * @ppos: file position offset
- */
-static ssize_t
-ice_debugfs_module_write(struct file *filp, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct ice_pf *pf = file_inode(filp)->i_private;
- struct dentry *dentry = file_dentry(filp);
- struct device *dev = ice_pf_to_dev(pf);
- char user_val[16], *cmd_buf;
- int module, log_level, cnt;
-
- /* don't allow partial writes or invalid input */
- if (*ppos != 0 || count > 8)
- return -EINVAL;
-
- cmd_buf = memdup_user_nul(buf, count);
- if (IS_ERR(cmd_buf))
- return PTR_ERR(cmd_buf);
-
- module = ice_find_module_by_dentry(pf, dentry);
- if (module < 0) {
- dev_info(dev, "unknown module\n");
- return -EINVAL;
- }
-
- cnt = sscanf(cmd_buf, "%s", user_val);
- if (cnt != 1)
- return -EINVAL;
-
- log_level = sysfs_match_string(ice_fwlog_level_string, user_val);
- if (log_level < 0) {
- dev_info(dev, "unknown log level '%s'\n", user_val);
- return -EINVAL;
- }
-
- if (module != ICE_AQC_FW_LOG_ID_MAX) {
- ice_pf_fwlog_update_module(pf, log_level, module);
- } else {
- /* the module 'all' is a shortcut so that we can set
- * all of the modules to the same level quickly
- */
- int i;
-
- for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++)
- ice_pf_fwlog_update_module(pf, log_level, i);
- }
-
- return count;
-}
-
-static const struct file_operations ice_debugfs_module_fops = {
- .owner = THIS_MODULE,
- .open = ice_debugfs_module_open,
- .read = seq_read,
- .release = single_release,
- .write = ice_debugfs_module_write,
-};
-
-/**
- * ice_debugfs_nr_messages_read - read from 'nr_messages' file
- * @filp: the opened file
- * @buffer: where to write the data for the user to read
- * @count: the size of the user's buffer
- * @ppos: file position offset
- */
-static ssize_t ice_debugfs_nr_messages_read(struct file *filp,
- char __user *buffer, size_t count,
- loff_t *ppos)
-{
- struct ice_pf *pf = filp->private_data;
- struct ice_hw *hw = &pf->hw;
- char buff[32] = {};
-
- snprintf(buff, sizeof(buff), "%d\n",
- hw->fwlog_cfg.log_resolution);
-
- return simple_read_from_buffer(buffer, count, ppos, buff, strlen(buff));
-}
-
-/**
- * ice_debugfs_nr_messages_write - write into 'nr_messages' file
- * @filp: the opened file
- * @buf: where to find the user's data
- * @count: the length of the user's data
- * @ppos: file position offset
- */
-static ssize_t
-ice_debugfs_nr_messages_write(struct file *filp, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct ice_pf *pf = filp->private_data;
- struct device *dev = ice_pf_to_dev(pf);
- struct ice_hw *hw = &pf->hw;
- char user_val[8], *cmd_buf;
- s16 nr_messages;
- ssize_t ret;
-
- /* don't allow partial writes or invalid input */
- if (*ppos != 0 || count > 4)
- return -EINVAL;
-
- cmd_buf = memdup_user_nul(buf, count);
- if (IS_ERR(cmd_buf))
- return PTR_ERR(cmd_buf);
-
- ret = sscanf(cmd_buf, "%s", user_val);
- if (ret != 1)
- return -EINVAL;
-
- ret = kstrtos16(user_val, 0, &nr_messages);
- if (ret)
- return ret;
-
- if (nr_messages < ICE_AQC_FW_LOG_MIN_RESOLUTION ||
- nr_messages > ICE_AQC_FW_LOG_MAX_RESOLUTION) {
- dev_err(dev, "Invalid FW log number of messages %d, value must be between %d - %d\n",
- nr_messages, ICE_AQC_FW_LOG_MIN_RESOLUTION,
- ICE_AQC_FW_LOG_MAX_RESOLUTION);
- return -EINVAL;
- }
-
- hw->fwlog_cfg.log_resolution = nr_messages;
-
- return count;
-}
-
-static const struct file_operations ice_debugfs_nr_messages_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = ice_debugfs_nr_messages_read,
- .write = ice_debugfs_nr_messages_write,
-};
-
-/**
- * ice_debugfs_enable_read - read from 'enable' file
- * @filp: the opened file
- * @buffer: where to write the data for the user to read
- * @count: the size of the user's buffer
- * @ppos: file position offset
- */
-static ssize_t ice_debugfs_enable_read(struct file *filp,
- char __user *buffer, size_t count,
- loff_t *ppos)
-{
- struct ice_pf *pf = filp->private_data;
- struct ice_hw *hw = &pf->hw;
- char buff[32] = {};
-
- snprintf(buff, sizeof(buff), "%u\n",
- (u16)(hw->fwlog_cfg.options &
- ICE_FWLOG_OPTION_IS_REGISTERED) >> 3);
-
- return simple_read_from_buffer(buffer, count, ppos, buff, strlen(buff));
-}
-
-/**
- * ice_debugfs_enable_write - write into 'enable' file
- * @filp: the opened file
- * @buf: where to find the user's data
- * @count: the length of the user's data
- * @ppos: file position offset
- */
-static ssize_t
-ice_debugfs_enable_write(struct file *filp, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct ice_pf *pf = filp->private_data;
- struct ice_hw *hw = &pf->hw;
- char user_val[8], *cmd_buf;
- bool enable;
- ssize_t ret;
-
- /* don't allow partial writes or invalid input */
- if (*ppos != 0 || count > 2)
- return -EINVAL;
-
- cmd_buf = memdup_user_nul(buf, count);
- if (IS_ERR(cmd_buf))
- return PTR_ERR(cmd_buf);
-
- ret = sscanf(cmd_buf, "%s", user_val);
- if (ret != 1)
- return -EINVAL;
-
- ret = kstrtobool(user_val, &enable);
- if (ret)
- goto enable_write_error;
-
- if (enable)
- hw->fwlog_cfg.options |= ICE_FWLOG_OPTION_ARQ_ENA;
- else
- hw->fwlog_cfg.options &= ~ICE_FWLOG_OPTION_ARQ_ENA;
-
- ret = ice_fwlog_set(hw, &hw->fwlog_cfg);
- if (ret)
- goto enable_write_error;
-
- if (enable)
- ret = ice_fwlog_register(hw);
- else
- ret = ice_fwlog_unregister(hw);
-
- if (ret)
- goto enable_write_error;
-
- /* if we get here, nothing went wrong; return count since we didn't
- * really write anything
- */
- ret = (ssize_t)count;
-
-enable_write_error:
- /* This function always consumes all of the written input, or produces
- * an error. Check and enforce this. Otherwise, the write operation
- * won't complete properly.
- */
- if (WARN_ON(ret != (ssize_t)count && ret >= 0))
- ret = -EIO;
-
- return ret;
-}
-
-static const struct file_operations ice_debugfs_enable_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = ice_debugfs_enable_read,
- .write = ice_debugfs_enable_write,
-};
-
-/**
- * ice_debugfs_log_size_read - read from 'log_size' file
- * @filp: the opened file
- * @buffer: where to write the data for the user to read
- * @count: the size of the user's buffer
- * @ppos: file position offset
- */
-static ssize_t ice_debugfs_log_size_read(struct file *filp,
- char __user *buffer, size_t count,
- loff_t *ppos)
-{
- struct ice_pf *pf = filp->private_data;
- struct ice_hw *hw = &pf->hw;
- char buff[32] = {};
- int index;
-
- index = hw->fwlog_ring.index;
- snprintf(buff, sizeof(buff), "%s\n", ice_fwlog_log_size[index]);
-
- return simple_read_from_buffer(buffer, count, ppos, buff, strlen(buff));
-}
-
-/**
- * ice_debugfs_log_size_write - write into 'log_size' file
- * @filp: the opened file
- * @buf: where to find the user's data
- * @count: the length of the user's data
- * @ppos: file position offset
- */
-static ssize_t
-ice_debugfs_log_size_write(struct file *filp, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct ice_pf *pf = filp->private_data;
- struct device *dev = ice_pf_to_dev(pf);
- struct ice_hw *hw = &pf->hw;
- char user_val[8], *cmd_buf;
- ssize_t ret;
- int index;
-
- /* don't allow partial writes or invalid input */
- if (*ppos != 0 || count > 5)
- return -EINVAL;
-
- cmd_buf = memdup_user_nul(buf, count);
- if (IS_ERR(cmd_buf))
- return PTR_ERR(cmd_buf);
-
- ret = sscanf(cmd_buf, "%s", user_val);
- if (ret != 1)
- return -EINVAL;
-
- index = sysfs_match_string(ice_fwlog_log_size, user_val);
- if (index < 0) {
- dev_info(dev, "Invalid log size '%s'. The value must be one of 128K, 256K, 512K, 1M, 2M\n",
- user_val);
- ret = -EINVAL;
- goto log_size_write_error;
- } else if (hw->fwlog_cfg.options & ICE_FWLOG_OPTION_IS_REGISTERED) {
- dev_info(dev, "FW logging is currently running. Please disable FW logging to change log_size\n");
- ret = -EINVAL;
- goto log_size_write_error;
- }
-
- /* free all the buffers and the tracking info and resize */
- ice_fwlog_realloc_rings(hw, index);
-
- /* if we get here, nothing went wrong; return count since we didn't
- * really write anything
- */
- ret = (ssize_t)count;
-
-log_size_write_error:
- /* This function always consumes all of the written input, or produces
- * an error. Check and enforce this. Otherwise, the write operation
- * won't complete properly.
- */
- if (WARN_ON(ret != (ssize_t)count && ret >= 0))
- ret = -EIO;
-
- return ret;
-}
-
-static const struct file_operations ice_debugfs_log_size_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = ice_debugfs_log_size_read,
- .write = ice_debugfs_log_size_write,
-};
-
-/**
- * ice_debugfs_data_read - read from 'data' file
- * @filp: the opened file
- * @buffer: where to write the data for the user to read
- * @count: the size of the user's buffer
- * @ppos: file position offset
- */
-static ssize_t ice_debugfs_data_read(struct file *filp, char __user *buffer,
- size_t count, loff_t *ppos)
-{
- struct ice_pf *pf = filp->private_data;
- struct ice_hw *hw = &pf->hw;
- int data_copied = 0;
- bool done = false;
-
- if (ice_fwlog_ring_empty(&hw->fwlog_ring))
- return 0;
-
- while (!ice_fwlog_ring_empty(&hw->fwlog_ring) && !done) {
- struct ice_fwlog_data *log;
- u16 cur_buf_len;
-
- log = &hw->fwlog_ring.rings[hw->fwlog_ring.head];
- cur_buf_len = log->data_size;
- if (cur_buf_len >= count) {
- done = true;
- continue;
- }
-
- if (copy_to_user(buffer, log->data, cur_buf_len)) {
- /* if there is an error then bail and return whatever
- * the driver has copied so far
- */
- done = true;
- continue;
- }
-
- data_copied += cur_buf_len;
- buffer += cur_buf_len;
- count -= cur_buf_len;
- *ppos += cur_buf_len;
- ice_fwlog_ring_increment(&hw->fwlog_ring.head,
- hw->fwlog_ring.size);
- }
-
- return data_copied;
-}
-
-/**
- * ice_debugfs_data_write - write into 'data' file
- * @filp: the opened file
- * @buf: where to find the user's data
- * @count: the length of the user's data
- * @ppos: file position offset
- */
-static ssize_t
-ice_debugfs_data_write(struct file *filp, const char __user *buf, size_t count,
- loff_t *ppos)
-{
- struct ice_pf *pf = filp->private_data;
- struct device *dev = ice_pf_to_dev(pf);
- struct ice_hw *hw = &pf->hw;
- ssize_t ret;
-
- /* don't allow partial writes */
- if (*ppos != 0)
- return 0;
-
- /* any value is allowed to clear the buffer so no need to even look at
- * what the value is
- */
- if (!(hw->fwlog_cfg.options & ICE_FWLOG_OPTION_IS_REGISTERED)) {
- hw->fwlog_ring.head = 0;
- hw->fwlog_ring.tail = 0;
- } else {
- dev_info(dev, "Can't clear FW log data while FW log running\n");
- ret = -EINVAL;
- goto nr_buffs_write_error;
- }
-
- /* if we get here, nothing went wrong; return count since we didn't
- * really write anything
- */
- ret = (ssize_t)count;
-
-nr_buffs_write_error:
- /* This function always consumes all of the written input, or produces
- * an error. Check and enforce this. Otherwise, the write operation
- * won't complete properly.
- */
- if (WARN_ON(ret != (ssize_t)count && ret >= 0))
- ret = -EIO;
-
- return ret;
-}
-
-static const struct file_operations ice_debugfs_data_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = ice_debugfs_data_read,
- .write = ice_debugfs_data_write,
-};
-
-/**
- * ice_debugfs_fwlog_init - setup the debugfs directory
- * @pf: the ice that is starting up
- */
-void ice_debugfs_fwlog_init(struct ice_pf *pf)
+int ice_debugfs_pf_init(struct ice_pf *pf)
{
const char *name = pci_name(pf->pdev);
- struct dentry *fw_modules_dir;
- struct dentry **fw_modules;
- int i;
-
- /* only support fw log commands on PF 0 */
- if (pf->hw.bus.func)
- return;
-
- /* allocate space for this first because if it fails then we don't
- * need to unwind
- */
- fw_modules = kcalloc(ICE_NR_FW_LOG_MODULES, sizeof(*fw_modules),
- GFP_KERNEL);
- if (!fw_modules)
- return;
pf->ice_debugfs_pf = debugfs_create_dir(name, ice_debugfs_root);
if (IS_ERR(pf->ice_debugfs_pf))
- goto err_create_module_files;
-
- pf->ice_debugfs_pf_fwlog = debugfs_create_dir("fwlog",
- pf->ice_debugfs_pf);
- if (IS_ERR(pf->ice_debugfs_pf_fwlog))
- goto err_create_module_files;
+ return PTR_ERR(pf->ice_debugfs_pf);
- fw_modules_dir = debugfs_create_dir("modules",
- pf->ice_debugfs_pf_fwlog);
- if (IS_ERR(fw_modules_dir))
- goto err_create_module_files;
-
- for (i = 0; i < ICE_NR_FW_LOG_MODULES; i++) {
- fw_modules[i] = debugfs_create_file(ice_fwlog_module_string[i],
- 0600, fw_modules_dir, pf,
- &ice_debugfs_module_fops);
- if (IS_ERR(fw_modules[i]))
- goto err_create_module_files;
- }
-
- debugfs_create_file("nr_messages", 0600,
- pf->ice_debugfs_pf_fwlog, pf,
- &ice_debugfs_nr_messages_fops);
-
- pf->ice_debugfs_pf_fwlog_modules = fw_modules;
-
- debugfs_create_file("enable", 0600, pf->ice_debugfs_pf_fwlog,
- pf, &ice_debugfs_enable_fops);
-
- debugfs_create_file("log_size", 0600, pf->ice_debugfs_pf_fwlog,
- pf, &ice_debugfs_log_size_fops);
-
- debugfs_create_file("data", 0600, pf->ice_debugfs_pf_fwlog,
- pf, &ice_debugfs_data_fops);
-
- return;
-
-err_create_module_files:
- debugfs_remove_recursive(pf->ice_debugfs_pf_fwlog);
- kfree(fw_modules);
+ return 0;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 55e0f2c6af9e..dc131779d426 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -3147,9 +3147,11 @@ ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
+ struct ice_hw *hw;
- ring->rx_max_pending = ICE_MAX_NUM_DESC;
- ring->tx_max_pending = ICE_MAX_NUM_DESC;
+ hw = &vsi->back->hw;
+ ring->rx_max_pending = ICE_MAX_NUM_DESC_BY_MAC(hw);
+ ring->tx_max_pending = ICE_MAX_NUM_DESC_BY_MAC(hw);
if (vsi->tx_rings && vsi->rx_rings) {
ring->rx_pending = vsi->rx_rings[0]->count;
ring->tx_pending = vsi->tx_rings[0]->count;
@@ -3177,15 +3179,16 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
int i, timeout = 50, err = 0;
+ struct ice_hw *hw = &pf->hw;
u16 new_rx_cnt, new_tx_cnt;
- if (ring->tx_pending > ICE_MAX_NUM_DESC ||
+ if (ring->tx_pending > ICE_MAX_NUM_DESC_BY_MAC(hw) ||
ring->tx_pending < ICE_MIN_NUM_DESC ||
- ring->rx_pending > ICE_MAX_NUM_DESC ||
+ ring->rx_pending > ICE_MAX_NUM_DESC_BY_MAC(hw) ||
ring->rx_pending < ICE_MIN_NUM_DESC) {
netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d] (increment %d)\n",
ring->tx_pending, ring->rx_pending,
- ICE_MIN_NUM_DESC, ICE_MAX_NUM_DESC,
+ ICE_MIN_NUM_DESC, ICE_MAX_NUM_DESC_BY_MAC(hw),
ICE_REQ_DESC_MULTIPLE);
return -EINVAL;
}
@@ -3258,6 +3261,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
tx_rings[i].count = new_tx_cnt;
tx_rings[i].desc = NULL;
tx_rings[i].tx_buf = NULL;
+ tx_rings[i].tstamp_ring = NULL;
tx_rings[i].tx_tstamps = &pf->ptp.port.tx;
err = ice_setup_tx_ring(&tx_rings[i]);
if (err) {
@@ -4620,10 +4624,12 @@ static int ice_get_port_fec_stats(struct ice_hw *hw, u16 pcs_quad, u16 pcs_port,
* ice_get_fec_stats - returns FEC correctable, uncorrectable stats per netdev
* @netdev: network interface device structure
* @fec_stats: buffer to hold FEC statistics for given port
+ * @hist: buffer to put FEC histogram statistics for given port
*
*/
static void ice_get_fec_stats(struct net_device *netdev,
- struct ethtool_fec_stats *fec_stats)
+ struct ethtool_fec_stats *fec_stats,
+ struct ethtool_fec_hist *hist)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_port_topology port_topology;
diff --git a/drivers/net/ethernet/intel/ice/ice_fwlog.c b/drivers/net/ethernet/intel/ice/ice_fwlog.c
deleted file mode 100644
index a31bb026ad34..000000000000
--- a/drivers/net/ethernet/intel/ice/ice_fwlog.c
+++ /dev/null
@@ -1,474 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2022, Intel Corporation. */
-
-#include <linux/vmalloc.h>
-#include "ice.h"
-#include "ice_common.h"
-#include "ice_fwlog.h"
-
-bool ice_fwlog_ring_full(struct ice_fwlog_ring *rings)
-{
- u16 head, tail;
-
- head = rings->head;
- tail = rings->tail;
-
- if (head < tail && (tail - head == (rings->size - 1)))
- return true;
- else if (head > tail && (tail == (head - 1)))
- return true;
-
- return false;
-}
-
-bool ice_fwlog_ring_empty(struct ice_fwlog_ring *rings)
-{
- return rings->head == rings->tail;
-}
-
-void ice_fwlog_ring_increment(u16 *item, u16 size)
-{
- *item = (*item + 1) & (size - 1);
-}
-
-static int ice_fwlog_alloc_ring_buffs(struct ice_fwlog_ring *rings)
-{
- int i, nr_bytes;
- u8 *mem;
-
- nr_bytes = rings->size * ICE_AQ_MAX_BUF_LEN;
- mem = vzalloc(nr_bytes);
- if (!mem)
- return -ENOMEM;
-
- for (i = 0; i < rings->size; i++) {
- struct ice_fwlog_data *ring = &rings->rings[i];
-
- ring->data_size = ICE_AQ_MAX_BUF_LEN;
- ring->data = mem;
- mem += ICE_AQ_MAX_BUF_LEN;
- }
-
- return 0;
-}
-
-static void ice_fwlog_free_ring_buffs(struct ice_fwlog_ring *rings)
-{
- int i;
-
- for (i = 0; i < rings->size; i++) {
- struct ice_fwlog_data *ring = &rings->rings[i];
-
- /* the first ring is the base memory for the whole range so
- * free it
- */
- if (!i)
- vfree(ring->data);
-
- ring->data = NULL;
- ring->data_size = 0;
- }
-}
-
-#define ICE_FWLOG_INDEX_TO_BYTES(n) ((128 * 1024) << (n))
-/**
- * ice_fwlog_realloc_rings - reallocate the FW log rings
- * @hw: pointer to the HW structure
- * @index: the new index to use to allocate memory for the log data
- *
- */
-void ice_fwlog_realloc_rings(struct ice_hw *hw, int index)
-{
- struct ice_fwlog_ring ring;
- int status, ring_size;
-
- /* convert the number of bytes into a number of 4K buffers. externally
- * the driver presents the interface to the FW log data as a number of
- * bytes because that's easy for users to understand. internally the
- * driver uses a ring of buffers because the driver doesn't know where
- * the beginning and end of any line of log data is so the driver has
- * to overwrite data as complete blocks. when the data is returned to
- * the user the driver knows that the data is correct and the FW log
- * can be correctly parsed by the tools
- */
- ring_size = ICE_FWLOG_INDEX_TO_BYTES(index) / ICE_AQ_MAX_BUF_LEN;
- if (ring_size == hw->fwlog_ring.size)
- return;
-
- /* allocate space for the new rings and buffers then release the
- * old rings and buffers. that way if we don't have enough
- * memory then we at least have what we had before
- */
- ring.rings = kcalloc(ring_size, sizeof(*ring.rings), GFP_KERNEL);
- if (!ring.rings)
- return;
-
- ring.size = ring_size;
-
- status = ice_fwlog_alloc_ring_buffs(&ring);
- if (status) {
- dev_warn(ice_hw_to_dev(hw), "Unable to allocate memory for FW log ring data buffers\n");
- ice_fwlog_free_ring_buffs(&ring);
- kfree(ring.rings);
- return;
- }
-
- ice_fwlog_free_ring_buffs(&hw->fwlog_ring);
- kfree(hw->fwlog_ring.rings);
-
- hw->fwlog_ring.rings = ring.rings;
- hw->fwlog_ring.size = ring.size;
- hw->fwlog_ring.index = index;
- hw->fwlog_ring.head = 0;
- hw->fwlog_ring.tail = 0;
-}
-
-/**
- * ice_fwlog_init - Initialize FW logging configuration
- * @hw: pointer to the HW structure
- *
- * This function should be called on driver initialization during
- * ice_init_hw().
- */
-int ice_fwlog_init(struct ice_hw *hw)
-{
- /* only support fw log commands on PF 0 */
- if (hw->bus.func)
- return -EINVAL;
-
- ice_fwlog_set_supported(hw);
-
- if (ice_fwlog_supported(hw)) {
- int status;
-
- /* read the current config from the FW and store it */
- status = ice_fwlog_get(hw, &hw->fwlog_cfg);
- if (status)
- return status;
-
- hw->fwlog_ring.rings = kcalloc(ICE_FWLOG_RING_SIZE_DFLT,
- sizeof(*hw->fwlog_ring.rings),
- GFP_KERNEL);
- if (!hw->fwlog_ring.rings) {
- dev_warn(ice_hw_to_dev(hw), "Unable to allocate memory for FW log rings\n");
- return -ENOMEM;
- }
-
- hw->fwlog_ring.size = ICE_FWLOG_RING_SIZE_DFLT;
- hw->fwlog_ring.index = ICE_FWLOG_RING_SIZE_INDEX_DFLT;
-
- status = ice_fwlog_alloc_ring_buffs(&hw->fwlog_ring);
- if (status) {
- dev_warn(ice_hw_to_dev(hw), "Unable to allocate memory for FW log ring data buffers\n");
- ice_fwlog_free_ring_buffs(&hw->fwlog_ring);
- kfree(hw->fwlog_ring.rings);
- return status;
- }
-
- ice_debugfs_fwlog_init(hw->back);
- } else {
- dev_warn(ice_hw_to_dev(hw), "FW logging is not supported in this NVM image. Please update the NVM to get FW log support\n");
- }
-
- return 0;
-}
-
-/**
- * ice_fwlog_deinit - unroll FW logging configuration
- * @hw: pointer to the HW structure
- *
- * This function should be called in ice_deinit_hw().
- */
-void ice_fwlog_deinit(struct ice_hw *hw)
-{
- struct ice_pf *pf = hw->back;
- int status;
-
- /* only support fw log commands on PF 0 */
- if (hw->bus.func)
- return;
-
- ice_debugfs_pf_deinit(hw->back);
-
- /* make sure FW logging is disabled to not put the FW in a weird state
- * for the next driver load
- */
- hw->fwlog_cfg.options &= ~ICE_FWLOG_OPTION_ARQ_ENA;
- status = ice_fwlog_set(hw, &hw->fwlog_cfg);
- if (status)
- dev_warn(ice_hw_to_dev(hw), "Unable to turn off FW logging, status: %d\n",
- status);
-
- kfree(pf->ice_debugfs_pf_fwlog_modules);
-
- pf->ice_debugfs_pf_fwlog_modules = NULL;
-
- status = ice_fwlog_unregister(hw);
- if (status)
- dev_warn(ice_hw_to_dev(hw), "Unable to unregister FW logging, status: %d\n",
- status);
-
- if (hw->fwlog_ring.rings) {
- ice_fwlog_free_ring_buffs(&hw->fwlog_ring);
- kfree(hw->fwlog_ring.rings);
- }
-}
-
-/**
- * ice_fwlog_supported - Cached for whether FW supports FW logging or not
- * @hw: pointer to the HW structure
- *
- * This will always return false if called before ice_init_hw(), so it must be
- * called after ice_init_hw().
- */
-bool ice_fwlog_supported(struct ice_hw *hw)
-{
- return hw->fwlog_supported;
-}
-
-/**
- * ice_aq_fwlog_set - Set FW logging configuration AQ command (0xFF30)
- * @hw: pointer to the HW structure
- * @entries: entries to configure
- * @num_entries: number of @entries
- * @options: options from ice_fwlog_cfg->options structure
- * @log_resolution: logging resolution
- */
-static int
-ice_aq_fwlog_set(struct ice_hw *hw, struct ice_fwlog_module_entry *entries,
- u16 num_entries, u16 options, u16 log_resolution)
-{
- struct ice_aqc_fw_log_cfg_resp *fw_modules;
- struct ice_aqc_fw_log *cmd;
- struct libie_aq_desc desc;
- int status;
- int i;
-
- fw_modules = kcalloc(num_entries, sizeof(*fw_modules), GFP_KERNEL);
- if (!fw_modules)
- return -ENOMEM;
-
- for (i = 0; i < num_entries; i++) {
- fw_modules[i].module_identifier =
- cpu_to_le16(entries[i].module_id);
- fw_modules[i].log_level = entries[i].log_level;
- }
-
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logs_config);
- desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
-
- cmd = libie_aq_raw(&desc);
-
- cmd->cmd_flags = ICE_AQC_FW_LOG_CONF_SET_VALID;
- cmd->ops.cfg.log_resolution = cpu_to_le16(log_resolution);
- cmd->ops.cfg.mdl_cnt = cpu_to_le16(num_entries);
-
- if (options & ICE_FWLOG_OPTION_ARQ_ENA)
- cmd->cmd_flags |= ICE_AQC_FW_LOG_CONF_AQ_EN;
- if (options & ICE_FWLOG_OPTION_UART_ENA)
- cmd->cmd_flags |= ICE_AQC_FW_LOG_CONF_UART_EN;
-
- status = ice_aq_send_cmd(hw, &desc, fw_modules,
- sizeof(*fw_modules) * num_entries,
- NULL);
-
- kfree(fw_modules);
-
- return status;
-}
-
-/**
- * ice_fwlog_set - Set the firmware logging settings
- * @hw: pointer to the HW structure
- * @cfg: config used to set firmware logging
- *
- * This function should be called whenever the driver needs to set the firmware
- * logging configuration. It can be called on initialization, reset, or during
- * runtime.
- *
- * If the PF wishes to receive FW logging then it must register via
- * ice_fwlog_register. Note, that ice_fwlog_register does not need to be called
- * for init.
- */
-int ice_fwlog_set(struct ice_hw *hw, struct ice_fwlog_cfg *cfg)
-{
- if (!ice_fwlog_supported(hw))
- return -EOPNOTSUPP;
-
- return ice_aq_fwlog_set(hw, cfg->module_entries,
- ICE_AQC_FW_LOG_ID_MAX, cfg->options,
- cfg->log_resolution);
-}
-
-/**
- * ice_aq_fwlog_get - Get the current firmware logging configuration (0xFF32)
- * @hw: pointer to the HW structure
- * @cfg: firmware logging configuration to populate
- */
-static int ice_aq_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg)
-{
- struct ice_aqc_fw_log_cfg_resp *fw_modules;
- struct ice_aqc_fw_log *cmd;
- struct libie_aq_desc desc;
- u16 module_id_cnt;
- int status;
- void *buf;
- int i;
-
- memset(cfg, 0, sizeof(*cfg));
-
- buf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logs_query);
- cmd = libie_aq_raw(&desc);
-
- cmd->cmd_flags = ICE_AQC_FW_LOG_AQ_QUERY;
-
- status = ice_aq_send_cmd(hw, &desc, buf, ICE_AQ_MAX_BUF_LEN, NULL);
- if (status) {
- ice_debug(hw, ICE_DBG_FW_LOG, "Failed to get FW log configuration\n");
- goto status_out;
- }
-
- module_id_cnt = le16_to_cpu(cmd->ops.cfg.mdl_cnt);
- if (module_id_cnt < ICE_AQC_FW_LOG_ID_MAX) {
- ice_debug(hw, ICE_DBG_FW_LOG, "FW returned less than the expected number of FW log module IDs\n");
- } else if (module_id_cnt > ICE_AQC_FW_LOG_ID_MAX) {
- ice_debug(hw, ICE_DBG_FW_LOG, "FW returned more than expected number of FW log module IDs, setting module_id_cnt to software expected max %u\n",
- ICE_AQC_FW_LOG_ID_MAX);
- module_id_cnt = ICE_AQC_FW_LOG_ID_MAX;
- }
-
- cfg->log_resolution = le16_to_cpu(cmd->ops.cfg.log_resolution);
- if (cmd->cmd_flags & ICE_AQC_FW_LOG_CONF_AQ_EN)
- cfg->options |= ICE_FWLOG_OPTION_ARQ_ENA;
- if (cmd->cmd_flags & ICE_AQC_FW_LOG_CONF_UART_EN)
- cfg->options |= ICE_FWLOG_OPTION_UART_ENA;
- if (cmd->cmd_flags & ICE_AQC_FW_LOG_QUERY_REGISTERED)
- cfg->options |= ICE_FWLOG_OPTION_IS_REGISTERED;
-
- fw_modules = (struct ice_aqc_fw_log_cfg_resp *)buf;
-
- for (i = 0; i < module_id_cnt; i++) {
- struct ice_aqc_fw_log_cfg_resp *fw_module = &fw_modules[i];
-
- cfg->module_entries[i].module_id =
- le16_to_cpu(fw_module->module_identifier);
- cfg->module_entries[i].log_level = fw_module->log_level;
- }
-
-status_out:
- kfree(buf);
- return status;
-}
-
-/**
- * ice_fwlog_get - Get the firmware logging settings
- * @hw: pointer to the HW structure
- * @cfg: config to populate based on current firmware logging settings
- */
-int ice_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg)
-{
- if (!ice_fwlog_supported(hw))
- return -EOPNOTSUPP;
-
- return ice_aq_fwlog_get(hw, cfg);
-}
-
-/**
- * ice_aq_fwlog_register - Register PF for firmware logging events (0xFF31)
- * @hw: pointer to the HW structure
- * @reg: true to register and false to unregister
- */
-static int ice_aq_fwlog_register(struct ice_hw *hw, bool reg)
-{
- struct ice_aqc_fw_log *cmd;
- struct libie_aq_desc desc;
-
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logs_register);
- cmd = libie_aq_raw(&desc);
-
- if (reg)
- cmd->cmd_flags = ICE_AQC_FW_LOG_AQ_REGISTER;
-
- return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
-}
-
-/**
- * ice_fwlog_register - Register the PF for firmware logging
- * @hw: pointer to the HW structure
- *
- * After this call the PF will start to receive firmware logging based on the
- * configuration set in ice_fwlog_set.
- */
-int ice_fwlog_register(struct ice_hw *hw)
-{
- int status;
-
- if (!ice_fwlog_supported(hw))
- return -EOPNOTSUPP;
-
- status = ice_aq_fwlog_register(hw, true);
- if (status)
- ice_debug(hw, ICE_DBG_FW_LOG, "Failed to register for firmware logging events over ARQ\n");
- else
- hw->fwlog_cfg.options |= ICE_FWLOG_OPTION_IS_REGISTERED;
-
- return status;
-}
-
-/**
- * ice_fwlog_unregister - Unregister the PF from firmware logging
- * @hw: pointer to the HW structure
- */
-int ice_fwlog_unregister(struct ice_hw *hw)
-{
- int status;
-
- if (!ice_fwlog_supported(hw))
- return -EOPNOTSUPP;
-
- status = ice_aq_fwlog_register(hw, false);
- if (status)
- ice_debug(hw, ICE_DBG_FW_LOG, "Failed to unregister from firmware logging events over ARQ\n");
- else
- hw->fwlog_cfg.options &= ~ICE_FWLOG_OPTION_IS_REGISTERED;
-
- return status;
-}
-
-/**
- * ice_fwlog_set_supported - Set if FW logging is supported by FW
- * @hw: pointer to the HW struct
- *
- * If FW returns success to the ice_aq_fwlog_get call then it supports FW
- * logging, else it doesn't. Set the fwlog_supported flag accordingly.
- *
- * This function is only meant to be called during driver init to determine if
- * the FW support FW logging.
- */
-void ice_fwlog_set_supported(struct ice_hw *hw)
-{
- struct ice_fwlog_cfg *cfg;
- int status;
-
- hw->fwlog_supported = false;
-
- cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
- if (!cfg)
- return;
-
- /* don't call ice_fwlog_get() because that would check to see if FW
- * logging is supported which is what the driver is determining now
- */
- status = ice_aq_fwlog_get(hw, cfg);
- if (status)
- ice_debug(hw, ICE_DBG_FW_LOG, "ice_aq_fwlog_get failed, FW logging is not supported on this version of FW, status %d\n",
- status);
- else
- hw->fwlog_supported = true;
-
- kfree(cfg);
-}
diff --git a/drivers/net/ethernet/intel/ice/ice_fwlog.h b/drivers/net/ethernet/intel/ice/ice_fwlog.h
deleted file mode 100644
index 287e71fa4b86..000000000000
--- a/drivers/net/ethernet/intel/ice/ice_fwlog.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2022, Intel Corporation. */
-
-#ifndef _ICE_FWLOG_H_
-#define _ICE_FWLOG_H_
-#include "ice_adminq_cmd.h"
-
-struct ice_hw;
-
-/* Only a single log level should be set and all log levels under the set value
- * are enabled, e.g. if log level is set to ICE_FW_LOG_LEVEL_VERBOSE, then all
- * other log levels are included (except ICE_FW_LOG_LEVEL_NONE)
- */
-enum ice_fwlog_level {
- ICE_FWLOG_LEVEL_NONE = 0,
- ICE_FWLOG_LEVEL_ERROR = 1,
- ICE_FWLOG_LEVEL_WARNING = 2,
- ICE_FWLOG_LEVEL_NORMAL = 3,
- ICE_FWLOG_LEVEL_VERBOSE = 4,
- ICE_FWLOG_LEVEL_INVALID, /* all values >= this entry are invalid */
-};
-
-struct ice_fwlog_module_entry {
- /* module ID for the corresponding firmware logging event */
- u16 module_id;
- /* verbosity level for the module_id */
- u8 log_level;
-};
-
-struct ice_fwlog_cfg {
- /* list of modules for configuring log level */
- struct ice_fwlog_module_entry module_entries[ICE_AQC_FW_LOG_ID_MAX];
- /* options used to configure firmware logging */
- u16 options;
-#define ICE_FWLOG_OPTION_ARQ_ENA BIT(0)
-#define ICE_FWLOG_OPTION_UART_ENA BIT(1)
- /* set before calling ice_fwlog_init() so the PF registers for firmware
- * logging on initialization
- */
-#define ICE_FWLOG_OPTION_REGISTER_ON_INIT BIT(2)
- /* set in the ice_fwlog_get() response if the PF is registered for FW
- * logging events over ARQ
- */
-#define ICE_FWLOG_OPTION_IS_REGISTERED BIT(3)
-
- /* minimum number of log events sent per Admin Receive Queue event */
- u16 log_resolution;
-};
-
-struct ice_fwlog_data {
- u16 data_size;
- u8 *data;
-};
-
-struct ice_fwlog_ring {
- struct ice_fwlog_data *rings;
- u16 index;
- u16 size;
- u16 head;
- u16 tail;
-};
-
-#define ICE_FWLOG_RING_SIZE_INDEX_DFLT 3
-#define ICE_FWLOG_RING_SIZE_DFLT 256
-#define ICE_FWLOG_RING_SIZE_MAX 512
-
-bool ice_fwlog_ring_full(struct ice_fwlog_ring *rings);
-bool ice_fwlog_ring_empty(struct ice_fwlog_ring *rings);
-void ice_fwlog_ring_increment(u16 *item, u16 size);
-void ice_fwlog_set_supported(struct ice_hw *hw);
-bool ice_fwlog_supported(struct ice_hw *hw);
-int ice_fwlog_init(struct ice_hw *hw);
-void ice_fwlog_deinit(struct ice_hw *hw);
-int ice_fwlog_set(struct ice_hw *hw, struct ice_fwlog_cfg *cfg);
-int ice_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg);
-int ice_fwlog_register(struct ice_hw *hw);
-int ice_fwlog_unregister(struct ice_hw *hw);
-void ice_fwlog_realloc_rings(struct ice_hw *hw, int index);
-#endif /* _ICE_FWLOG_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index dd520aa4d1d6..082ad33c53dc 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -19,6 +19,7 @@
#define QTX_COMM_HEAD_MAX_INDEX 16383
#define QTX_COMM_HEAD_HEAD_S 0
#define QTX_COMM_HEAD_HEAD_M ICE_M(0x1FFF, 0)
+#define E830_GLQTX_TXTIME_DBELL_LSB(_DBQM) (0x002E0000 + ((_DBQM) * 8))
#define PF_FW_ARQBAH 0x00080180
#define PF_FW_ARQBAL 0x00080080
#define PF_FW_ARQH 0x00080380
@@ -571,6 +572,8 @@
#define E830_PFPTM_SEM_BUSY_M BIT(0)
#define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4))
#define VFINT_DYN_CTLN_CLEARPBA_M BIT(1)
+#define E830_GLTXTIME_FETCH_PROFILE(_i, _j) (0x002D3500 + ((_i) * 4 + (_j) * 64))
+#define E830_GLTXTIME_FETCH_PROFILE_FETCH_TS_DESC_M ICE_M(0x1FF, 0)
#define E830_MBX_PF_IN_FLIGHT_VF_MSGS_THRESH 0x00234000
#define E830_MBX_VF_DEC_TRIG(_VF) (0x00233800 + (_VF) * 4)
#define E830_MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT(_VF) (0x00233000 + (_VF) * 4)
diff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c
index 6ab53e430f91..420d45c2558b 100644
--- a/drivers/net/ethernet/intel/ice/ice_idc.c
+++ b/drivers/net/ethernet/intel/ice/ice_idc.c
@@ -336,6 +336,7 @@ int ice_plug_aux_dev(struct ice_pf *pf)
mutex_lock(&pf->adev_mutex);
cdev->adev = adev;
mutex_unlock(&pf->adev_mutex);
+ set_bit(ICE_FLAG_AUX_DEV_CREATED, pf->flags);
return 0;
}
@@ -347,15 +348,16 @@ void ice_unplug_aux_dev(struct ice_pf *pf)
{
struct auxiliary_device *adev;
+ if (!test_and_clear_bit(ICE_FLAG_AUX_DEV_CREATED, pf->flags))
+ return;
+
mutex_lock(&pf->adev_mutex);
adev = pf->cdev_info->adev;
pf->cdev_info->adev = NULL;
mutex_unlock(&pf->adev_mutex);
- if (adev) {
- auxiliary_device_delete(adev);
- auxiliary_device_uninit(adev);
- }
+ auxiliary_device_delete(adev);
+ auxiliary_device_uninit(adev);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
index b1129da72139..aebf8e08a297 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.c
+++ b/drivers/net/ethernet/intel/ice/ice_lag.c
@@ -10,12 +10,17 @@
#define ICE_LAG_RES_SHARED BIT(14)
#define ICE_LAG_RES_VALID BIT(15)
-#define LACP_TRAIN_PKT_LEN 16
-static const u8 lacp_train_pkt[LACP_TRAIN_PKT_LEN] = { 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0,
- 0x88, 0x09, 0, 0 };
+#define ICE_TRAIN_PKT_LEN 16
+static const u8 lacp_train_pkt[ICE_TRAIN_PKT_LEN] = { 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0x88, 0x09, 0, 0 };
+static const u8 act_act_train_pkt[ICE_TRAIN_PKT_LEN] = { 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0 };
#define ICE_RECIPE_LEN 64
+#define ICE_LAG_SRIOV_CP_RECIPE 10
+
static const u8 ice_dflt_vsi_rcp[ICE_RECIPE_LEN] = {
0x05, 0, 0, 0, 0x20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0x85, 0, 0x01, 0, 0, 0, 0xff, 0xff, 0x08, 0, 0, 0, 0, 0, 0, 0,
@@ -46,10 +51,10 @@ static void ice_lag_set_primary(struct ice_lag *lag)
}
/**
- * ice_lag_set_backup - set PF LAG state to Backup
+ * ice_lag_set_bkup - set PF LAG state to Backup
* @lag: LAG info struct
*/
-static void ice_lag_set_backup(struct ice_lag *lag)
+static void ice_lag_set_bkup(struct ice_lag *lag)
{
struct ice_pf *pf = lag->pf;
@@ -99,6 +104,28 @@ static bool netif_is_same_ice(struct ice_pf *pf, struct net_device *netdev)
}
/**
+ * ice_lag_config_eswitch - configure eswitch to work with LAG
+ * @lag: lag info struct
+ * @netdev: active network interface device struct
+ *
+ * Updates all port representors in eswitch to use @netdev for Tx.
+ *
+ * Configures the netdev to keep dst metadata (also used in representor Tx).
+ * This is required for an uplink without switchdev mode configured.
+ */
+static void ice_lag_config_eswitch(struct ice_lag *lag,
+ struct net_device *netdev)
+{
+ struct ice_repr *repr;
+ unsigned long id;
+
+ xa_for_each(&lag->pf->eswitch.reprs, id, repr)
+ repr->dst->u.port_info.lower_dev = netdev;
+
+ netif_keep_dst(netdev);
+}
+
+/**
* ice_netdev_to_lag - return pointer to associated lag struct from netdev
* @netdev: pointer to net_device struct to query
*/
@@ -210,13 +237,12 @@ ice_lag_cfg_fltr(struct ice_lag *lag, u32 act, u16 recipe_id, u16 *rule_idx,
u8 direction, bool add)
{
struct ice_sw_rule_lkup_rx_tx *s_rule;
+ struct ice_hw *hw = &lag->pf->hw;
u16 s_rule_sz, vsi_num;
- struct ice_hw *hw;
u8 *eth_hdr;
u32 opc;
int err;
- hw = &lag->pf->hw;
vsi_num = ice_get_hw_vsi_num(hw, 0);
s_rule_sz = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule);
@@ -314,26 +340,15 @@ ice_lag_cfg_drop_fltr(struct ice_lag *lag, bool add)
}
/**
- * ice_lag_cfg_pf_fltrs - set filters up for new active port
+ * ice_lag_cfg_pf_fltrs_act_bkup - set filters up for new active port
* @lag: local interfaces lag struct
- * @ptr: opaque data containing notifier event
+ * @bonding_info: netdev event bonding info
*/
static void
-ice_lag_cfg_pf_fltrs(struct ice_lag *lag, void *ptr)
+ice_lag_cfg_pf_fltrs_act_bkup(struct ice_lag *lag,
+ struct netdev_bonding_info *bonding_info)
{
- struct netdev_notifier_bonding_info *info;
- struct netdev_bonding_info *bonding_info;
- struct net_device *event_netdev;
- struct device *dev;
-
- event_netdev = netdev_notifier_info_to_dev(ptr);
- /* not for this netdev */
- if (event_netdev != lag->netdev)
- return;
-
- info = (struct netdev_notifier_bonding_info *)ptr;
- bonding_info = &info->bonding_info;
- dev = ice_pf_to_dev(lag->pf);
+ struct device *dev = ice_pf_to_dev(lag->pf);
/* interface not active - remove old default VSI rule */
if (bonding_info->slave.state && lag->pf_rx_rule_id) {
@@ -354,6 +369,105 @@ ice_lag_cfg_pf_fltrs(struct ice_lag *lag, void *ptr)
}
/**
+ * ice_lag_cfg_lp_fltr - configure lport filters
+ * @lag: local interface's lag struct
+ * @add: add or remove rule
+ * @cp: control packet only or general PF lport rule
+ */
+static void
+ice_lag_cfg_lp_fltr(struct ice_lag *lag, bool add, bool cp)
+{
+ struct ice_sw_rule_lkup_rx_tx *s_rule;
+ struct ice_vsi *vsi = lag->pf->vsi[0];
+ u16 buf_len, opc;
+
+ buf_len = ICE_SW_RULE_RX_TX_HDR_SIZE(s_rule, ICE_TRAIN_PKT_LEN);
+ s_rule = kzalloc(buf_len, GFP_KERNEL);
+ if (!s_rule) {
+ netdev_warn(lag->netdev, "-ENOMEM error configuring CP filter\n");
+ return;
+ }
+
+ if (add) {
+ if (cp) {
+ s_rule->recipe_id =
+ cpu_to_le16(ICE_LAG_SRIOV_CP_RECIPE);
+ memcpy(s_rule->hdr_data, lacp_train_pkt,
+ ICE_TRAIN_PKT_LEN);
+ } else {
+ s_rule->recipe_id = cpu_to_le16(lag->act_act_recipe);
+ memcpy(s_rule->hdr_data, act_act_train_pkt,
+ ICE_TRAIN_PKT_LEN);
+ }
+
+ s_rule->src = cpu_to_le16(vsi->port_info->lport);
+ s_rule->act = cpu_to_le32(ICE_FWD_TO_VSI |
+ ICE_SINGLE_ACT_LAN_ENABLE |
+ ICE_SINGLE_ACT_VALID_BIT |
+ FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M,
+ vsi->vsi_num));
+ s_rule->hdr_len = cpu_to_le16(ICE_TRAIN_PKT_LEN);
+ s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
+ opc = ice_aqc_opc_add_sw_rules;
+ } else {
+ opc = ice_aqc_opc_remove_sw_rules;
+ if (cp)
+ s_rule->index = cpu_to_le16(lag->cp_rule_idx);
+ else
+ s_rule->index = cpu_to_le16(lag->act_act_rule_idx);
+ }
+ if (ice_aq_sw_rules(&lag->pf->hw, s_rule, buf_len, 1, opc, NULL)) {
+ netdev_warn(lag->netdev, "Error %s %s rule for aggregate\n",
+ add ? "ADDING" : "REMOVING",
+ cp ? "CONTROL PACKET" : "LPORT");
+ goto err_cp_free;
+ }
+
+ if (add) {
+ if (cp)
+ lag->cp_rule_idx = le16_to_cpu(s_rule->index);
+ else
+ lag->act_act_rule_idx = le16_to_cpu(s_rule->index);
+ } else {
+ if (cp)
+ lag->cp_rule_idx = 0;
+ else
+ lag->act_act_rule_idx = 0;
+ }
+
+err_cp_free:
+ kfree(s_rule);
+}
+
+/**
+ * ice_lag_cfg_pf_fltrs - set filters up for PF traffic
+ * @lag: local interfaces lag struct
+ * @ptr: opaque data containing notifier event
+ */
+static void
+ice_lag_cfg_pf_fltrs(struct ice_lag *lag, void *ptr)
+{
+ struct netdev_notifier_bonding_info *info = ptr;
+ struct netdev_bonding_info *bonding_info;
+ struct net_device *event_netdev;
+
+ event_netdev = netdev_notifier_info_to_dev(ptr);
+ if (event_netdev != lag->netdev)
+ return;
+
+ bonding_info = &info->bonding_info;
+
+ if (lag->bond_aa) {
+ if (lag->need_fltr_cfg) {
+ ice_lag_cfg_lp_fltr(lag, true, false);
+ lag->need_fltr_cfg = false;
+ }
+ } else {
+ ice_lag_cfg_pf_fltrs_act_bkup(lag, bonding_info);
+ }
+}
+
+/**
* ice_display_lag_info - print LAG info
* @lag: LAG info struct
*/
@@ -402,12 +516,11 @@ static u16
ice_lag_qbuf_recfg(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *qbuf,
u16 vsi_num, u16 numq, u8 tc)
{
+ struct ice_pf *pf = hw->back;
struct ice_q_ctx *q_ctx;
u16 qid, count = 0;
- struct ice_pf *pf;
int i;
- pf = hw->back;
for (i = 0; i < numq; i++) {
q_ctx = ice_get_lan_q_ctx(hw, vsi_num, tc, i);
if (!q_ctx) {
@@ -577,7 +690,7 @@ ice_lag_move_vf_node_tc(struct ice_lag *lag, u8 oldport, u8 newport,
}
if (ice_aq_cfg_lan_txq(&lag->pf->hw, qbuf, qbuf_size, valq, oldport,
- newport, NULL)) {
+ newport, ICE_AQC_Q_CFG_TC_CHNG, NULL)) {
dev_warn(dev, "Failure to configure queues for LAG failover\n");
goto qbuf_err;
}
@@ -677,54 +790,6 @@ ice_lag_move_single_vf_nodes(struct ice_lag *lag, u8 oldport, u8 newport,
}
/**
- * ice_lag_move_new_vf_nodes - Move Tx scheduling nodes for a VF if required
- * @vf: the VF to move Tx nodes for
- *
- * Called just after configuring new VF queues. Check whether the VF Tx
- * scheduling nodes need to be updated to fail over to the active port. If so,
- * move them now.
- */
-void ice_lag_move_new_vf_nodes(struct ice_vf *vf)
-{
- struct ice_lag_netdev_list ndlist;
- u8 pri_port, act_port;
- struct ice_lag *lag;
- struct ice_vsi *vsi;
- struct ice_pf *pf;
-
- vsi = ice_get_vf_vsi(vf);
-
- if (WARN_ON(!vsi))
- return;
-
- if (WARN_ON(vsi->type != ICE_VSI_VF))
- return;
-
- pf = vf->pf;
- lag = pf->lag;
-
- mutex_lock(&pf->lag_mutex);
- if (!lag->bonded)
- goto new_vf_unlock;
-
- pri_port = pf->hw.port_info->lport;
- act_port = lag->active_port;
-
- if (lag->upper_netdev)
- ice_lag_build_netdev_list(lag, &ndlist);
-
- if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG) &&
- lag->bonded && lag->primary && pri_port != act_port &&
- !list_empty(lag->netdev_head))
- ice_lag_move_single_vf_nodes(lag, pri_port, act_port, vsi->idx);
-
- ice_lag_destroy_netdev_list(lag, &ndlist);
-
-new_vf_unlock:
- mutex_unlock(&pf->lag_mutex);
-}
-
-/**
* ice_lag_move_vf_nodes - move Tx scheduling nodes for all VFs to new port
* @lag: lag info struct
* @oldport: lport of previous interface
@@ -767,61 +832,6 @@ void ice_lag_move_vf_nodes_cfg(struct ice_lag *lag, u8 src_prt, u8 dst_prt)
ice_lag_destroy_netdev_list(lag, &ndlist);
}
-#define ICE_LAG_SRIOV_CP_RECIPE 10
-#define ICE_LAG_SRIOV_TRAIN_PKT_LEN 16
-
-/**
- * ice_lag_cfg_cp_fltr - configure filter for control packets
- * @lag: local interface's lag struct
- * @add: add or remove rule
- */
-static void
-ice_lag_cfg_cp_fltr(struct ice_lag *lag, bool add)
-{
- struct ice_sw_rule_lkup_rx_tx *s_rule = NULL;
- struct ice_vsi *vsi;
- u16 buf_len, opc;
-
- vsi = lag->pf->vsi[0];
-
- buf_len = ICE_SW_RULE_RX_TX_HDR_SIZE(s_rule,
- ICE_LAG_SRIOV_TRAIN_PKT_LEN);
- s_rule = kzalloc(buf_len, GFP_KERNEL);
- if (!s_rule) {
- netdev_warn(lag->netdev, "-ENOMEM error configuring CP filter\n");
- return;
- }
-
- if (add) {
- s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
- s_rule->recipe_id = cpu_to_le16(ICE_LAG_SRIOV_CP_RECIPE);
- s_rule->src = cpu_to_le16(vsi->port_info->lport);
- s_rule->act = cpu_to_le32(ICE_FWD_TO_VSI |
- ICE_SINGLE_ACT_LAN_ENABLE |
- ICE_SINGLE_ACT_VALID_BIT |
- FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M, vsi->vsi_num));
- s_rule->hdr_len = cpu_to_le16(ICE_LAG_SRIOV_TRAIN_PKT_LEN);
- memcpy(s_rule->hdr_data, lacp_train_pkt, LACP_TRAIN_PKT_LEN);
- opc = ice_aqc_opc_add_sw_rules;
- } else {
- opc = ice_aqc_opc_remove_sw_rules;
- s_rule->index = cpu_to_le16(lag->cp_rule_idx);
- }
- if (ice_aq_sw_rules(&lag->pf->hw, s_rule, buf_len, 1, opc, NULL)) {
- netdev_warn(lag->netdev, "Error %s CP rule for fail-over\n",
- add ? "ADDING" : "REMOVING");
- goto cp_free;
- }
-
- if (add)
- lag->cp_rule_idx = le16_to_cpu(s_rule->index);
- else
- lag->cp_rule_idx = 0;
-
-cp_free:
- kfree(s_rule);
-}
-
/**
* ice_lag_prepare_vf_reset - helper to adjust vf lag for reset
* @lag: lag struct for interface that owns VF
@@ -835,11 +845,20 @@ u8 ice_lag_prepare_vf_reset(struct ice_lag *lag)
u8 pri_prt, act_prt;
if (lag && lag->bonded && lag->primary && lag->upper_netdev) {
- pri_prt = lag->pf->hw.port_info->lport;
- act_prt = lag->active_port;
- if (act_prt != pri_prt && act_prt != ICE_LAG_INVALID_PORT) {
- ice_lag_move_vf_nodes_cfg(lag, act_prt, pri_prt);
- return act_prt;
+ if (!lag->bond_aa) {
+ pri_prt = lag->pf->hw.port_info->lport;
+ act_prt = lag->active_port;
+ if (act_prt != pri_prt &&
+ act_prt != ICE_LAG_INVALID_PORT) {
+ ice_lag_move_vf_nodes_cfg(lag, act_prt, pri_prt);
+ return act_prt;
+ }
+ } else {
+ if (lag->port_bitmap & ICE_LAGS_M) {
+ lag->port_bitmap &= ~ICE_LAGS_M;
+ ice_lag_aa_failover(lag, ICE_LAGP_IDX, NULL);
+ lag->port_bitmap |= ICE_LAGS_M;
+ }
}
}
@@ -857,10 +876,15 @@ void ice_lag_complete_vf_reset(struct ice_lag *lag, u8 act_prt)
{
u8 pri_prt;
- if (lag && lag->bonded && lag->primary &&
- act_prt != ICE_LAG_INVALID_PORT) {
- pri_prt = lag->pf->hw.port_info->lport;
- ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
+ if (lag && lag->bonded && lag->primary) {
+ if (!lag->bond_aa) {
+ pri_prt = lag->pf->hw.port_info->lport;
+ if (act_prt != ICE_LAG_INVALID_PORT)
+ ice_lag_move_vf_nodes_cfg(lag, pri_prt,
+ act_prt);
+ } else {
+ ice_lag_aa_failover(lag, ICE_LAGS_IDX, NULL);
+ }
}
}
@@ -873,13 +897,12 @@ void ice_lag_complete_vf_reset(struct ice_lag *lag, u8 act_prt)
*/
static void ice_lag_info_event(struct ice_lag *lag, void *ptr)
{
- struct netdev_notifier_bonding_info *info;
+ struct netdev_notifier_bonding_info *info = ptr;
struct netdev_bonding_info *bonding_info;
struct net_device *event_netdev;
const char *lag_netdev_name;
event_netdev = netdev_notifier_info_to_dev(ptr);
- info = ptr;
lag_netdev_name = netdev_name(lag->netdev);
bonding_info = &info->bonding_info;
@@ -897,7 +920,7 @@ static void ice_lag_info_event(struct ice_lag *lag, void *ptr)
}
if (bonding_info->slave.state)
- ice_lag_set_backup(lag);
+ ice_lag_set_bkup(lag);
else
ice_lag_set_primary(lag);
@@ -906,6 +929,295 @@ lag_out:
}
/**
+ * ice_lag_aa_qbuf_recfg - fill a single queue buffer for recfg cmd
+ * @hw: HW struct that contains the queue context
+ * @qbuf: pointer to single queue buffer
+ * @vsi_num: index of the VF VSI in PF space
+ * @qnum: queue index
+ *
+ * Return: Zero on success, error code on failure.
+ */
+static int
+ice_lag_aa_qbuf_recfg(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *qbuf,
+ u16 vsi_num, int qnum)
+{
+ struct ice_pf *pf = hw->back;
+ struct ice_q_ctx *q_ctx;
+ u16 q_id;
+
+ q_ctx = ice_get_lan_q_ctx(hw, vsi_num, 0, qnum);
+ if (!q_ctx) {
+ dev_dbg(ice_hw_to_dev(hw), "LAG queue %d no Q context\n", qnum);
+ return -ENOENT;
+ }
+
+ if (q_ctx->q_teid == ICE_INVAL_TEID) {
+ dev_dbg(ice_hw_to_dev(hw), "LAG queue %d INVAL TEID\n", qnum);
+ return -EINVAL;
+ }
+
+ if (q_ctx->q_handle == ICE_INVAL_Q_HANDLE) {
+ dev_dbg(ice_hw_to_dev(hw), "LAG queue %d INVAL Q HANDLE\n", qnum);
+ return -EINVAL;
+ }
+
+ q_id = pf->vsi[vsi_num]->txq_map[q_ctx->q_handle];
+ qbuf->queue_info[0].q_handle = cpu_to_le16(q_id);
+ qbuf->queue_info[0].tc = 0;
+ qbuf->queue_info[0].q_teid = cpu_to_le32(q_ctx->q_teid);
+
+ return 0;
+}
+
+/**
+ * ice_lag_aa_move_vf_qs - Move some/all VF queues to destination
+ * @lag: primary interface's lag struct
+ * @dest: index of destination port
+ * @vsi_num: index of VF VSI in PF space
+ * @all: if true move all queues to destination
+ * @odd: VF wide q indicator for odd/even
+ * @e_pf: PF struct for the event interface
+ *
+ * the parameter "all" is to control whether we are splitting the queues
+ * between two interfaces or moving them all to the destination interface
+ */
+static void ice_lag_aa_move_vf_qs(struct ice_lag *lag, u8 dest, u16 vsi_num,
+ bool all, bool *odd, struct ice_pf *e_pf)
+{
+ DEFINE_RAW_FLEX(struct ice_aqc_cfg_txqs_buf, qbuf, queue_info, 1);
+ struct ice_hw *old_hw, *new_hw, *pri_hw, *sec_hw;
+ struct device *dev = ice_pf_to_dev(lag->pf);
+ struct ice_vsi_ctx *pv_ctx, *sv_ctx;
+ struct ice_lag_netdev_list ndlist;
+ u16 num_q, qbuf_size, sec_vsi_num;
+ u8 pri_lport, sec_lport;
+ u32 pvf_teid, svf_teid;
+ u16 vf_id;
+
+ vf_id = lag->pf->vsi[vsi_num]->vf->vf_id;
+ /* If sec_vf[] not defined, then no second interface to share with */
+ if (lag->sec_vf[vf_id])
+ sec_vsi_num = lag->sec_vf[vf_id]->idx;
+ else
+ return;
+
+ pri_lport = lag->bond_lport_pri;
+ sec_lport = lag->bond_lport_sec;
+
+ if (pri_lport == ICE_LAG_INVALID_PORT ||
+ sec_lport == ICE_LAG_INVALID_PORT)
+ return;
+
+ if (!e_pf)
+ ice_lag_build_netdev_list(lag, &ndlist);
+
+ pri_hw = &lag->pf->hw;
+ if (e_pf && lag->pf != e_pf)
+ sec_hw = &e_pf->hw;
+ else
+ sec_hw = ice_lag_find_hw_by_lport(lag, sec_lport);
+
+ if (!pri_hw || !sec_hw)
+ return;
+
+ if (dest == ICE_LAGP_IDX) {
+ struct ice_vsi *vsi;
+
+ vsi = ice_get_main_vsi(lag->pf);
+ if (!vsi)
+ return;
+
+ old_hw = sec_hw;
+ new_hw = pri_hw;
+ ice_lag_config_eswitch(lag, vsi->netdev);
+ } else {
+ struct ice_pf *sec_pf = sec_hw->back;
+ struct ice_vsi *vsi;
+
+ vsi = ice_get_main_vsi(sec_pf);
+ if (!vsi)
+ return;
+
+ old_hw = pri_hw;
+ new_hw = sec_hw;
+ ice_lag_config_eswitch(lag, vsi->netdev);
+ }
+
+ pv_ctx = ice_get_vsi_ctx(pri_hw, vsi_num);
+ if (!pv_ctx) {
+ dev_warn(dev, "Unable to locate primary VSI %d context for LAG failover\n",
+ vsi_num);
+ return;
+ }
+
+ sv_ctx = ice_get_vsi_ctx(sec_hw, sec_vsi_num);
+ if (!sv_ctx) {
+ dev_warn(dev, "Unable to locate secondary VSI %d context for LAG failover\n",
+ vsi_num);
+ return;
+ }
+
+ num_q = pv_ctx->num_lan_q_entries[0];
+ qbuf_size = __struct_size(qbuf);
+
+ /* Suspend traffic for primary VSI VF */
+ pvf_teid = le32_to_cpu(pv_ctx->sched.vsi_node[0]->info.node_teid);
+ ice_sched_suspend_resume_elems(pri_hw, 1, &pvf_teid, true);
+
+ /* Suspend traffic for secondary VSI VF */
+ svf_teid = le32_to_cpu(sv_ctx->sched.vsi_node[0]->info.node_teid);
+ ice_sched_suspend_resume_elems(sec_hw, 1, &svf_teid, true);
+
+ for (int i = 0; i < num_q; i++) {
+ struct ice_sched_node *n_prt, *q_node, *parent;
+ struct ice_port_info *pi, *new_pi;
+ struct ice_vsi_ctx *src_ctx;
+ struct ice_sched_node *p;
+ struct ice_q_ctx *q_ctx;
+ u16 dst_vsi_num;
+
+ pi = old_hw->port_info;
+ new_pi = new_hw->port_info;
+
+ *odd = !(*odd);
+ if ((dest == ICE_LAGP_IDX && *odd && !all) ||
+ (dest == ICE_LAGS_IDX && !(*odd) && !all) ||
+ lag->q_home[vf_id][i] == dest)
+ continue;
+
+ if (dest == ICE_LAGP_IDX)
+ dst_vsi_num = vsi_num;
+ else
+ dst_vsi_num = sec_vsi_num;
+
+ n_prt = ice_sched_get_free_qparent(new_hw->port_info,
+ dst_vsi_num, 0,
+ ICE_SCHED_NODE_OWNER_LAN);
+ if (!n_prt)
+ continue;
+
+ q_ctx = ice_get_lan_q_ctx(pri_hw, vsi_num, 0, i);
+ if (!q_ctx)
+ continue;
+
+ if (dest == ICE_LAGP_IDX)
+ src_ctx = sv_ctx;
+ else
+ src_ctx = pv_ctx;
+
+ q_node = ice_sched_find_node_by_teid(src_ctx->sched.vsi_node[0],
+ q_ctx->q_teid);
+ if (!q_node)
+ continue;
+
+ qbuf->src_parent_teid = q_node->info.parent_teid;
+ qbuf->dst_parent_teid = n_prt->info.node_teid;
+
+ /* Move the node in the HW/FW */
+ if (ice_lag_aa_qbuf_recfg(pri_hw, qbuf, vsi_num, i))
+ continue;
+
+ if (dest == ICE_LAGP_IDX)
+ ice_aq_cfg_lan_txq(pri_hw, qbuf, qbuf_size, 1,
+ sec_lport, pri_lport,
+ ICE_AQC_Q_CFG_MOVE_TC_CHNG,
+ NULL);
+ else
+ ice_aq_cfg_lan_txq(pri_hw, qbuf, qbuf_size, 1,
+ pri_lport, sec_lport,
+ ICE_AQC_Q_CFG_MOVE_TC_CHNG,
+ NULL);
+
+ /* Move the node in the SW */
+ parent = q_node->parent;
+ if (!parent)
+ continue;
+
+ for (int n = 0; n < parent->num_children; n++) {
+ int j;
+
+ if (parent->children[n] != q_node)
+ continue;
+
+ for (j = n + 1; j < parent->num_children;
+ j++) {
+ parent->children[j - 1] =
+ parent->children[j];
+ }
+ parent->children[j] = NULL;
+ parent->num_children--;
+ break;
+ }
+
+ p = pi->sib_head[0][q_node->tx_sched_layer];
+ while (p) {
+ if (p->sibling == q_node) {
+ p->sibling = q_node->sibling;
+ break;
+ }
+ p = p->sibling;
+ }
+
+ if (pi->sib_head[0][q_node->tx_sched_layer] == q_node)
+ pi->sib_head[0][q_node->tx_sched_layer] =
+ q_node->sibling;
+
+ q_node->parent = n_prt;
+ q_node->info.parent_teid = n_prt->info.node_teid;
+ q_node->sibling = NULL;
+ p = new_pi->sib_head[0][q_node->tx_sched_layer];
+ if (p) {
+ while (p) {
+ if (!p->sibling) {
+ p->sibling = q_node;
+ break;
+ }
+ p = p->sibling;
+ }
+ } else {
+ new_pi->sib_head[0][q_node->tx_sched_layer] =
+ q_node;
+ }
+
+ n_prt->children[n_prt->num_children++] = q_node;
+ lag->q_home[vf_id][i] = dest;
+ }
+
+ ice_sched_suspend_resume_elems(pri_hw, 1, &pvf_teid, false);
+ ice_sched_suspend_resume_elems(sec_hw, 1, &svf_teid, false);
+
+ if (!e_pf)
+ ice_lag_destroy_netdev_list(lag, &ndlist);
+}
+
+/**
+ * ice_lag_aa_failover - move VF queues in A/A mode
+ * @lag: primary lag struct
+ * @dest: index of destination port
+ * @e_pf: PF struct for event port
+ */
+void ice_lag_aa_failover(struct ice_lag *lag, u8 dest, struct ice_pf *e_pf)
+{
+ bool odd = true, all = false;
+ int i;
+
+ /* Primary can be a target if down (cleanup), but secondary can't */
+ if (dest == ICE_LAGS_IDX && !(lag->port_bitmap & ICE_LAGS_M))
+ return;
+
+ /* Move all queues to a destination if only one port is active,
+ * or no ports are active and dest is primary.
+ */
+ if ((lag->port_bitmap ^ (ICE_LAGP_M | ICE_LAGS_M)) ||
+ (!lag->port_bitmap && dest == ICE_LAGP_IDX))
+ all = true;
+
+ ice_for_each_vsi(lag->pf, i)
+ if (lag->pf->vsi[i] && lag->pf->vsi[i]->type == ICE_VSI_VF)
+ ice_lag_aa_move_vf_qs(lag, dest, i, all, &odd, e_pf);
+}
+
+/**
* ice_lag_reclaim_vf_tc - move scheduling nodes back to primary interface
* @lag: primary interface lag struct
* @src_hw: HW struct current node location
@@ -921,13 +1233,12 @@ ice_lag_reclaim_vf_tc(struct ice_lag *lag, struct ice_hw *src_hw, u16 vsi_num,
u16 numq, valq, num_moved, qbuf_size;
u16 buf_size = __struct_size(buf);
struct ice_aqc_cfg_txqs_buf *qbuf;
+ struct ice_hw *hw = &lag->pf->hw;
struct ice_sched_node *n_prt;
__le32 teid, parent_teid;
struct ice_vsi_ctx *ctx;
- struct ice_hw *hw;
u32 tmp_teid;
- hw = &lag->pf->hw;
ctx = ice_get_vsi_ctx(hw, vsi_num);
if (!ctx) {
dev_warn(dev, "Unable to locate VSI context for LAG reclaim\n");
@@ -968,7 +1279,7 @@ ice_lag_reclaim_vf_tc(struct ice_lag *lag, struct ice_hw *src_hw, u16 vsi_num,
if (ice_aq_cfg_lan_txq(hw, qbuf, qbuf_size, numq,
src_hw->port_info->lport, hw->port_info->lport,
- NULL)) {
+ ICE_AQC_Q_CFG_TC_CHNG, NULL)) {
dev_warn(dev, "Failure to configure queues for LAG failover\n");
goto reclaim_qerr;
}
@@ -1039,36 +1350,15 @@ static void ice_lag_link(struct ice_lag *lag)
lag->bonded = true;
lag->role = ICE_LAG_UNSET;
+ lag->need_fltr_cfg = true;
netdev_info(lag->netdev, "Shared SR-IOV resources in bond are active\n");
}
/**
- * ice_lag_config_eswitch - configure eswitch to work with LAG
- * @lag: lag info struct
- * @netdev: active network interface device struct
- *
- * Updates all port representors in eswitch to use @netdev for Tx.
- *
- * Configures the netdev to keep dst metadata (also used in representor Tx).
- * This is required for an uplink without switchdev mode configured.
- */
-static void ice_lag_config_eswitch(struct ice_lag *lag,
- struct net_device *netdev)
-{
- struct ice_repr *repr;
- unsigned long id;
-
- xa_for_each(&lag->pf->eswitch.reprs, id, repr)
- repr->dst->u.port_info.lower_dev = netdev;
-
- netif_keep_dst(netdev);
-}
-
-/**
- * ice_lag_unlink - handle unlink event
+ * ice_lag_act_bkup_unlink - handle unlink event for A/B bond
* @lag: LAG info struct
*/
-static void ice_lag_unlink(struct ice_lag *lag)
+static void ice_lag_act_bkup_unlink(struct ice_lag *lag)
{
u8 pri_port, act_port, loc_port;
struct ice_pf *pf = lag->pf;
@@ -1104,10 +1394,32 @@ static void ice_lag_unlink(struct ice_lag *lag)
}
}
}
+}
- lag->bonded = false;
- lag->role = ICE_LAG_NONE;
- lag->upper_netdev = NULL;
+/**
+ * ice_lag_aa_unlink - handle unlink event for Active-Active bond
+ * @lag: LAG info struct
+ */
+static void ice_lag_aa_unlink(struct ice_lag *lag)
+{
+ struct ice_lag *pri_lag;
+
+ if (lag->primary) {
+ pri_lag = lag;
+ lag->port_bitmap &= ~ICE_LAGP_M;
+ } else {
+ pri_lag = ice_lag_find_primary(lag);
+ if (pri_lag)
+ pri_lag->port_bitmap &= ICE_LAGS_M;
+ }
+
+ if (pri_lag) {
+ ice_lag_aa_failover(pri_lag, ICE_LAGP_IDX, lag->pf);
+ if (lag->primary)
+ pri_lag->bond_lport_pri = ICE_LAG_INVALID_PORT;
+ else
+ pri_lag->bond_lport_sec = ICE_LAG_INVALID_PORT;
+ }
}
/**
@@ -1123,10 +1435,20 @@ static void ice_lag_link_unlink(struct ice_lag *lag, void *ptr)
if (netdev != lag->netdev)
return;
- if (info->linking)
+ if (info->linking) {
ice_lag_link(lag);
- else
- ice_lag_unlink(lag);
+ } else {
+ if (lag->bond_aa)
+ ice_lag_aa_unlink(lag);
+ else
+ ice_lag_act_bkup_unlink(lag);
+
+ lag->bonded = false;
+ lag->role = ICE_LAG_NONE;
+ lag->upper_netdev = NULL;
+ lag->bond_aa = false;
+ lag->need_fltr_cfg = false;
+ }
}
/**
@@ -1224,11 +1546,8 @@ ice_lag_set_swid(u16 primary_swid, struct ice_lag *local_lag,
*/
static void ice_lag_primary_swid(struct ice_lag *lag, bool link)
{
- struct ice_hw *hw;
- u16 swid;
-
- hw = &lag->pf->hw;
- swid = hw->port_info->sw_id;
+ struct ice_hw *hw = &lag->pf->hw;
+ u16 swid = hw->port_info->sw_id;
if (ice_share_res(hw, ICE_AQC_RES_TYPE_SWID, link, swid))
dev_warn(ice_pf_to_dev(lag->pf), "Failure to set primary interface shared status\n");
@@ -1241,12 +1560,10 @@ static void ice_lag_primary_swid(struct ice_lag *lag, bool link)
*/
static void ice_lag_add_prune_list(struct ice_lag *lag, struct ice_pf *event_pf)
{
- u16 num_vsi, rule_buf_sz, vsi_list_id, event_vsi_num, prim_vsi_idx;
- struct ice_sw_rule_vsi_list *s_rule = NULL;
+ u16 rule_buf_sz, vsi_list_id, event_vsi_num, prim_vsi_idx, num_vsi = 1;
+ struct ice_sw_rule_vsi_list *s_rule;
struct device *dev;
- num_vsi = 1;
-
dev = ice_pf_to_dev(lag->pf);
event_vsi_num = event_pf->vsi[0]->vsi_num;
prim_vsi_idx = lag->pf->vsi[0]->idx;
@@ -1282,12 +1599,10 @@ static void ice_lag_add_prune_list(struct ice_lag *lag, struct ice_pf *event_pf)
*/
static void ice_lag_del_prune_list(struct ice_lag *lag, struct ice_pf *event_pf)
{
- u16 num_vsi, vsi_num, vsi_idx, rule_buf_sz, vsi_list_id;
- struct ice_sw_rule_vsi_list *s_rule = NULL;
+ u16 vsi_num, vsi_idx, rule_buf_sz, vsi_list_id, num_vsi = 1;
+ struct ice_sw_rule_vsi_list *s_rule;
struct device *dev;
- num_vsi = 1;
-
dev = ice_pf_to_dev(lag->pf);
vsi_num = event_pf->vsi[0]->vsi_num;
vsi_idx = lag->pf->vsi[0]->idx;
@@ -1335,6 +1650,11 @@ static void ice_lag_init_feature_support_flag(struct ice_pf *pf)
ice_set_feature_support(pf, ICE_F_SRIOV_LAG);
else
ice_clear_feature_support(pf, ICE_F_SRIOV_LAG);
+
+ if (caps->sriov_aa_lag && ice_pkg_has_lport_extract(&pf->hw))
+ ice_set_feature_support(pf, ICE_F_SRIOV_AA_LAG);
+ else
+ ice_clear_feature_support(pf, ICE_F_SRIOV_AA_LAG);
}
/**
@@ -1344,11 +1664,10 @@ static void ice_lag_init_feature_support_flag(struct ice_pf *pf)
*/
static void ice_lag_changeupper_event(struct ice_lag *lag, void *ptr)
{
- struct netdev_notifier_changeupper_info *info;
+ struct netdev_notifier_changeupper_info *info = ptr;
struct ice_lag *primary_lag;
struct net_device *netdev;
- info = ptr;
netdev = netdev_notifier_info_to_dev(ptr);
/* not for this netdev */
@@ -1369,6 +1688,9 @@ static void ice_lag_changeupper_event(struct ice_lag *lag, void *ptr)
/* Configure primary's SWID to be shared */
ice_lag_primary_swid(lag, true);
primary_lag = lag;
+ lag->bond_lport_pri = lag->pf->hw.port_info->lport;
+ lag->bond_lport_sec = ICE_LAG_INVALID_PORT;
+ lag->port_bitmap = 0;
} else {
u16 swid;
@@ -1378,16 +1700,29 @@ static void ice_lag_changeupper_event(struct ice_lag *lag, void *ptr)
swid = primary_lag->pf->hw.port_info->sw_id;
ice_lag_set_swid(swid, lag, true);
ice_lag_add_prune_list(primary_lag, lag->pf);
- ice_lag_cfg_drop_fltr(lag, true);
+ primary_lag->bond_lport_sec =
+ lag->pf->hw.port_info->lport;
}
/* add filter for primary control packets */
- ice_lag_cfg_cp_fltr(lag, true);
+ ice_lag_cfg_lp_fltr(lag, true, true);
} else {
if (!primary_lag && lag->primary)
primary_lag = lag;
+ if (primary_lag) {
+ for (int i = 0; i < ICE_MAX_SRIOV_VFS; i++) {
+ if (primary_lag->sec_vf[i]) {
+ ice_vsi_release(primary_lag->sec_vf[i]);
+ primary_lag->sec_vf[i] = NULL;
+ }
+ }
+ }
+
if (!lag->primary) {
ice_lag_set_swid(0, lag, false);
+ if (primary_lag)
+ primary_lag->bond_lport_sec =
+ ICE_LAG_INVALID_PORT;
} else {
if (primary_lag && lag->primary) {
ice_lag_primary_swid(lag, false);
@@ -1395,7 +1730,7 @@ static void ice_lag_changeupper_event(struct ice_lag *lag, void *ptr)
}
}
/* remove filter for control packets */
- ice_lag_cfg_cp_fltr(lag, false);
+ ice_lag_cfg_lp_fltr(lag, false, !lag->bond_aa);
}
}
@@ -1408,7 +1743,7 @@ static void ice_lag_changeupper_event(struct ice_lag *lag, void *ptr)
*/
static void ice_lag_monitor_link(struct ice_lag *lag, void *ptr)
{
- struct netdev_notifier_changeupper_info *info;
+ struct netdev_notifier_changeupper_info *info = ptr;
struct ice_hw *prim_hw, *active_hw;
struct net_device *event_netdev;
struct ice_pf *pf;
@@ -1421,19 +1756,34 @@ static void ice_lag_monitor_link(struct ice_lag *lag, void *ptr)
if (!netif_is_same_ice(lag->pf, event_netdev))
return;
+ if (info->upper_dev != lag->upper_netdev)
+ return;
+
+ if (info->linking)
+ return;
+
pf = lag->pf;
prim_hw = &pf->hw;
prim_port = prim_hw->port_info->lport;
- info = (struct netdev_notifier_changeupper_info *)ptr;
- if (info->upper_dev != lag->upper_netdev)
- return;
-
- if (!info->linking) {
- /* Since there are only two interfaces allowed in SRIOV+LAG, if
- * one port is leaving, then nodes need to be on primary
- * interface.
- */
+ /* Since there are only two interfaces allowed in SRIOV+LAG, if
+ * one port is leaving, then nodes need to be on primary
+ * interface.
+ */
+ if (lag->bond_aa) {
+ struct ice_netdev_priv *e_ndp;
+ struct ice_pf *e_pf;
+
+ e_ndp = netdev_priv(event_netdev);
+ e_pf = e_ndp->vsi->back;
+
+ if (lag->bond_lport_pri != ICE_LAG_INVALID_PORT &&
+ lag->port_bitmap & ICE_LAGS_M) {
+ lag->port_bitmap &= ~ICE_LAGS_M;
+ ice_lag_aa_failover(lag, ICE_LAGP_IDX, e_pf);
+ lag->bond_lport_sec = ICE_LAG_INVALID_PORT;
+ }
+ } else {
if (prim_port != lag->active_port &&
lag->active_port != ICE_LAG_INVALID_PORT) {
active_hw = ice_lag_find_hw_by_lport(lag,
@@ -1445,45 +1795,32 @@ static void ice_lag_monitor_link(struct ice_lag *lag, void *ptr)
}
/**
- * ice_lag_monitor_active - main PF keep track of which port is active
+ * ice_lag_monitor_act_bkup - keep track of which port is active in A/B LAG
* @lag: lag info struct
- * @ptr: opaque data containing notifier event
+ * @b_info: bonding info
+ * @event_netdev: net_device got target netdev
*
* This function is for the primary PF to monitor changes in which port is
* active and handle changes for SRIOV VF functionality
*/
-static void ice_lag_monitor_active(struct ice_lag *lag, void *ptr)
+static void ice_lag_monitor_act_bkup(struct ice_lag *lag,
+ struct netdev_bonding_info *b_info,
+ struct net_device *event_netdev)
{
- struct net_device *event_netdev, *event_upper;
- struct netdev_notifier_bonding_info *info;
- struct netdev_bonding_info *bonding_info;
struct ice_netdev_priv *event_np;
struct ice_pf *pf, *event_pf;
u8 prim_port, event_port;
- if (!lag->primary)
- return;
-
pf = lag->pf;
if (!pf)
return;
- event_netdev = netdev_notifier_info_to_dev(ptr);
- rcu_read_lock();
- event_upper = netdev_master_upper_dev_get_rcu(event_netdev);
- rcu_read_unlock();
- if (!netif_is_ice(event_netdev) || event_upper != lag->upper_netdev)
- return;
-
event_np = netdev_priv(event_netdev);
event_pf = event_np->vsi->back;
event_port = event_pf->hw.port_info->lport;
prim_port = pf->hw.port_info->lport;
- info = (struct netdev_notifier_bonding_info *)ptr;
- bonding_info = &info->bonding_info;
-
- if (!bonding_info->slave.state) {
+ if (!b_info->slave.state) {
/* if no port is currently active, then nodes and filters exist
* on primary port, check if we need to move them
*/
@@ -1520,6 +1857,128 @@ static void ice_lag_monitor_active(struct ice_lag *lag, void *ptr)
}
/**
+ * ice_lag_aa_clear_spoof - adjust the placeholder VSI spoofing for A/A LAG
+ * @vsi: placeholder VSI to adjust
+ */
+static void ice_lag_aa_clear_spoof(struct ice_vsi *vsi)
+{
+ ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof);
+}
+
+/**
+ * ice_lag_monitor_act_act - Keep track of active ports in A/A LAG
+ * @lag: lag struct for primary interface
+ * @b_info: bonding_info for event
+ * @event_netdev: net_device for target netdev
+ */
+static void ice_lag_monitor_act_act(struct ice_lag *lag,
+ struct netdev_bonding_info *b_info,
+ struct net_device *event_netdev)
+{
+ struct ice_netdev_priv *event_np;
+ u8 prim_port, event_port;
+ struct ice_pf *event_pf;
+
+ event_np = netdev_priv(event_netdev);
+ event_pf = event_np->vsi->back;
+ event_port = event_pf->hw.port_info->lport;
+ prim_port = lag->pf->hw.port_info->lport;
+
+ if (b_info->slave.link == BOND_LINK_UP) {
+ /* Port is coming up */
+ if (prim_port == event_port) {
+ /* Processing event for primary interface */
+ if (lag->bond_lport_pri == ICE_LAG_INVALID_PORT)
+ return;
+
+ if (!(lag->port_bitmap & ICE_LAGP_M)) {
+ /* Primary port was not marked up before, move
+ * some|all VF queues to it and mark as up
+ */
+ lag->port_bitmap |= ICE_LAGP_M;
+ ice_lag_aa_failover(lag, ICE_LAGP_IDX, event_pf);
+ }
+ } else {
+ if (lag->bond_lport_sec == ICE_LAG_INVALID_PORT)
+ return;
+
+ /* Create placeholder VSIs on secondary PF.
+ * The placeholder is necessary so that we have
+ * an element that represents the VF on the secondary
+ * interface's scheduling tree. This will be a tree
+ * root for scheduling nodes when they are moved to
+ * the secondary interface.
+ */
+ if (!lag->sec_vf[0]) {
+ struct ice_vsi_cfg_params params = {};
+ struct ice_vsi *nvsi;
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ params.type = ICE_VSI_VF;
+ params.port_info = event_pf->hw.port_info;
+ params.flags = ICE_VSI_FLAG_INIT;
+
+ ice_for_each_vf(lag->pf, bkt, vf) {
+ params.vf = vf;
+ nvsi = ice_vsi_setup(event_pf,
+ &params);
+ ice_lag_aa_clear_spoof(nvsi);
+ lag->sec_vf[vf->vf_id] = nvsi;
+ }
+ }
+
+ if (!(lag->port_bitmap & ICE_LAGS_M)) {
+ /* Secondary port was not marked up before,
+ * move some|all VF queues to it and mark as up
+ */
+ lag->port_bitmap |= ICE_LAGS_M;
+ ice_lag_aa_failover(lag, ICE_LAGS_IDX, event_pf);
+ }
+ }
+ } else {
+ /* Port is going down */
+ if (prim_port == event_port) {
+ lag->port_bitmap &= ~ICE_LAGP_M;
+ ice_lag_aa_failover(lag, ICE_LAGS_IDX, event_pf);
+ } else {
+ lag->port_bitmap &= ~ICE_LAGS_M;
+ ice_lag_aa_failover(lag, ICE_LAGP_IDX, event_pf);
+ }
+ }
+}
+
+/**
+ * ice_lag_monitor_info - Calls relevant A/A or A/B monitoring function
+ * @lag: lag info struct
+ * @ptr: opaque data containing notifier event
+ *
+ * This function is for the primary PF to monitor changes in which port is
+ * active and handle changes for SRIOV VF functionality
+ */
+static void ice_lag_monitor_info(struct ice_lag *lag, void *ptr)
+{
+ struct netdev_notifier_bonding_info *info = ptr;
+ struct net_device *event_netdev, *event_upper;
+ struct netdev_bonding_info *bonding_info;
+
+ if (!lag->primary)
+ return;
+
+ event_netdev = netdev_notifier_info_to_dev(ptr);
+ bonding_info = &info->bonding_info;
+ rcu_read_lock();
+ event_upper = netdev_master_upper_dev_get_rcu(event_netdev);
+ rcu_read_unlock();
+ if (!netif_is_ice(event_netdev) || event_upper != lag->upper_netdev)
+ return;
+
+ if (lag->bond_aa)
+ ice_lag_monitor_act_act(lag, bonding_info, event_netdev);
+ else
+ ice_lag_monitor_act_bkup(lag, bonding_info, event_netdev);
+}
+/**
* ice_lag_chk_comp - evaluate bonded interface for feature support
* @lag: lag info struct
* @ptr: opaque data for netdev event info
@@ -1527,13 +1986,21 @@ static void ice_lag_monitor_active(struct ice_lag *lag, void *ptr)
static bool
ice_lag_chk_comp(struct ice_lag *lag, void *ptr)
{
+ struct netdev_notifier_bonding_info *info = ptr;
struct net_device *event_netdev, *event_upper;
- struct netdev_notifier_bonding_info *info;
struct netdev_bonding_info *bonding_info;
struct list_head *tmp;
struct device *dev;
int count = 0;
+ /* All members need to know if bond A/A or A/B */
+ bonding_info = &info->bonding_info;
+ lag->bond_mode = bonding_info->master.bond_mode;
+ if (lag->bond_mode != BOND_MODE_ACTIVEBACKUP)
+ lag->bond_aa = true;
+ else
+ lag->bond_aa = false;
+
if (!lag->primary)
return true;
@@ -1554,13 +2021,9 @@ ice_lag_chk_comp(struct ice_lag *lag, void *ptr)
return false;
}
- info = (struct netdev_notifier_bonding_info *)ptr;
- bonding_info = &info->bonding_info;
- lag->bond_mode = bonding_info->master.bond_mode;
- if (lag->bond_mode != BOND_MODE_ACTIVEBACKUP) {
- dev_info(dev, "Bond Mode not ACTIVE-BACKUP - VF LAG disabled\n");
+ if (lag->bond_aa && !ice_is_feature_supported(lag->pf,
+ ICE_F_SRIOV_AA_LAG))
return false;
- }
list_for_each(tmp, lag->netdev_head) {
struct ice_dcbx_cfg *dcb_cfg, *peer_dcb_cfg;
@@ -1664,10 +2127,9 @@ ice_lag_unregister(struct ice_lag *lag, struct net_device *event_netdev)
static void
ice_lag_monitor_rdma(struct ice_lag *lag, void *ptr)
{
- struct netdev_notifier_changeupper_info *info;
+ struct netdev_notifier_changeupper_info *info = ptr;
struct net_device *netdev;
- info = ptr;
netdev = netdev_notifier_info_to_dev(ptr);
if (netdev != lag->netdev)
@@ -1715,12 +2177,30 @@ static void ice_lag_chk_disabled_bond(struct ice_lag *lag, void *ptr)
*/
static void ice_lag_disable_sriov_bond(struct ice_lag *lag)
{
- struct ice_netdev_priv *np;
- struct ice_pf *pf;
+ struct ice_netdev_priv *np = netdev_priv(lag->netdev);
+ struct ice_pf *pf = np->vsi->back;
- np = netdev_priv(lag->netdev);
- pf = np->vsi->back;
ice_clear_feature_support(pf, ICE_F_SRIOV_LAG);
+ ice_clear_feature_support(pf, ICE_F_SRIOV_AA_LAG);
+}
+
+/**
+ * ice_lag_preset_drop_fltr - preset drop filter for A/B bonds
+ * @lag: local lag struct
+ * @ptr: opaque data containing event
+ *
+ * Sets the initial drop filter for secondary interface in an
+ * active-backup bond
+ */
+static void ice_lag_preset_drop_fltr(struct ice_lag *lag, void *ptr)
+{
+ struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
+
+ if (netdev != lag->netdev || lag->primary || !lag->need_fltr_cfg)
+ return;
+
+ ice_lag_cfg_drop_fltr(lag, true);
+ lag->need_fltr_cfg = false;
}
/**
@@ -1761,10 +2241,12 @@ static void ice_lag_process_event(struct work_struct *work)
ice_lag_unregister(lag_work->lag, netdev);
goto lag_cleanup;
}
- ice_lag_monitor_active(lag_work->lag,
- &lag_work->info.bonding_info);
ice_lag_cfg_pf_fltrs(lag_work->lag,
&lag_work->info.bonding_info);
+ ice_lag_preset_drop_fltr(lag_work->lag,
+ &lag_work->info.bonding_info);
+ ice_lag_monitor_info(lag_work->lag,
+ &lag_work->info.bonding_info);
}
ice_lag_info_event(lag_work->lag, &lag_work->info.bonding_info);
break;
@@ -1837,9 +2319,8 @@ ice_lag_event_handler(struct notifier_block *notif_blk, unsigned long event,
lag_work->lag = lag;
lag_work->event = event;
if (event == NETDEV_CHANGEUPPER) {
- struct netdev_notifier_changeupper_info *info;
+ struct netdev_notifier_changeupper_info *info = ptr;
- info = ptr;
upper_netdev = info->upper_dev;
} else {
upper_netdev = netdev_master_upper_dev_get(netdev);
@@ -1889,10 +2370,8 @@ ice_lag_event_handler(struct notifier_block *notif_blk, unsigned long event,
*/
static int ice_register_lag_handler(struct ice_lag *lag)
{
+ struct notifier_block *notif_blk = &lag->notif_block;
struct device *dev = ice_pf_to_dev(lag->pf);
- struct notifier_block *notif_blk;
-
- notif_blk = &lag->notif_block;
if (!notif_blk->notifier_call) {
notif_blk->notifier_call = ice_lag_event_handler;
@@ -1912,10 +2391,9 @@ static int ice_register_lag_handler(struct ice_lag *lag)
*/
static void ice_unregister_lag_handler(struct ice_lag *lag)
{
+ struct notifier_block *notif_blk = &lag->notif_block;
struct device *dev = ice_pf_to_dev(lag->pf);
- struct notifier_block *notif_blk;
- notif_blk = &lag->notif_block;
if (notif_blk->notifier_call) {
unregister_netdevice_notifier(notif_blk);
dev_dbg(dev, "LAG event handler unregistered\n");
@@ -1977,13 +2455,12 @@ ice_lag_move_vf_nodes_tc_sync(struct ice_lag *lag, struct ice_hw *dest_hw,
u16 numq, valq, num_moved, qbuf_size;
u16 buf_size = __struct_size(buf);
struct ice_aqc_cfg_txqs_buf *qbuf;
+ struct ice_hw *hw = &lag->pf->hw;
struct ice_sched_node *n_prt;
__le32 teid, parent_teid;
struct ice_vsi_ctx *ctx;
- struct ice_hw *hw;
u32 tmp_teid;
- hw = &lag->pf->hw;
ctx = ice_get_vsi_ctx(hw, vsi_num);
if (!ctx) {
dev_warn(dev, "LAG rebuild failed after reset due to VSI Context failure\n");
@@ -2020,7 +2497,8 @@ ice_lag_move_vf_nodes_tc_sync(struct ice_lag *lag, struct ice_hw *dest_hw,
}
if (ice_aq_cfg_lan_txq(hw, qbuf, qbuf_size, numq, hw->port_info->lport,
- dest_hw->port_info->lport, NULL)) {
+ dest_hw->port_info->lport,
+ ICE_AQC_Q_CFG_TC_CHNG, NULL)) {
dev_warn(dev, "Failure to configure queues for LAG reset rebuild\n");
goto sync_qerr;
}
@@ -2116,9 +2594,13 @@ int ice_init_lag(struct ice_pf *pf)
lag->netdev = vsi->netdev;
lag->role = ICE_LAG_NONE;
lag->active_port = ICE_LAG_INVALID_PORT;
+ lag->port_bitmap = 0x0;
lag->bonded = false;
+ lag->bond_aa = false;
+ lag->need_fltr_cfg = false;
lag->upper_netdev = NULL;
lag->notif_block.notifier_call = NULL;
+ memset(lag->sec_vf, 0, sizeof(lag->sec_vf));
err = ice_register_lag_handler(lag);
if (err) {
@@ -2136,6 +2618,11 @@ int ice_init_lag(struct ice_pf *pf)
if (err)
goto free_rcp_res;
+ err = ice_create_lag_recipe(&pf->hw, &lag->act_act_recipe,
+ ice_lport_rcp, 1);
+ if (err)
+ goto free_lport_res;
+
/* associate recipes to profiles */
for (n = 0; n < ICE_PROFID_IPV6_GTPU_IPV6_TCP_INNER; n++) {
err = ice_aq_get_recipe_to_profile(&pf->hw, n,
@@ -2145,7 +2632,8 @@ int ice_init_lag(struct ice_pf *pf)
if (recipe_bits & BIT(ICE_SW_LKUP_DFLT)) {
recipe_bits |= BIT(lag->pf_recipe) |
- BIT(lag->lport_recipe);
+ BIT(lag->lport_recipe) |
+ BIT(lag->act_act_recipe);
ice_aq_map_recipe_to_profile(&pf->hw, n,
recipe_bits, NULL);
}
@@ -2156,9 +2644,13 @@ int ice_init_lag(struct ice_pf *pf)
dev_dbg(dev, "INIT LAG complete\n");
return 0;
+free_lport_res:
+ ice_free_hw_res(&pf->hw, ICE_AQC_RES_TYPE_RECIPE, 1,
+ &lag->lport_recipe);
+
free_rcp_res:
ice_free_hw_res(&pf->hw, ICE_AQC_RES_TYPE_RECIPE, 1,
- &pf->lag->pf_recipe);
+ &lag->pf_recipe);
lag_error:
kfree(lag);
pf->lag = NULL;
@@ -2174,9 +2666,7 @@ lag_error:
*/
void ice_deinit_lag(struct ice_pf *pf)
{
- struct ice_lag *lag;
-
- lag = pf->lag;
+ struct ice_lag *lag = pf->lag;
if (!lag)
return;
@@ -2245,11 +2735,15 @@ void ice_lag_rebuild(struct ice_pf *pf)
ice_lag_move_vf_nodes_sync(prim_lag, &pf->hw);
}
- ice_lag_cfg_cp_fltr(lag, true);
+ if (!lag->bond_aa) {
+ ice_lag_cfg_lp_fltr(lag, true, true);
+ if (lag->pf_rx_rule_id)
+ if (ice_lag_cfg_dflt_fltr(lag, true))
+ dev_err(ice_pf_to_dev(pf), "Error adding default VSI rule in rebuild\n");
+ } else {
+ ice_lag_cfg_lp_fltr(lag, true, false);
+ }
- if (lag->pf_rx_rule_id)
- if (ice_lag_cfg_dflt_fltr(lag, true))
- dev_err(ice_pf_to_dev(pf), "Error adding default VSI rule in rebuild\n");
ice_clear_rdma_cap(pf);
lag_rebuild_out:
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.h b/drivers/net/ethernet/intel/ice/ice_lag.h
index 69347d9f986b..f77ebcd61042 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.h
+++ b/drivers/net/ethernet/intel/ice/ice_lag.h
@@ -14,7 +14,11 @@ enum ice_lag_role {
ICE_LAG_UNSET
};
-#define ICE_LAG_INVALID_PORT 0xFF
+#define ICE_LAG_INVALID_PORT 0xFF
+#define ICE_LAGP_IDX 0
+#define ICE_LAGS_IDX 1
+#define ICE_LAGP_M 0x1
+#define ICE_LAGS_M 0x2
#define ICE_LAG_RESET_RETRIES 5
#define ICE_SW_DEFAULT_PROFILE 0
@@ -41,12 +45,26 @@ struct ice_lag {
u8 active_port; /* lport value for the current active port */
u8 bonded:1; /* currently bonded */
u8 primary:1; /* this is primary */
+ u8 bond_aa:1; /* is this bond active-active */
+ u8 need_fltr_cfg:1; /* fltrs for A/A bond still need to be make */
+ u8 port_bitmap:2; /* bitmap of active ports */
+ u8 bond_lport_pri; /* lport values for primary PF */
+ u8 bond_lport_sec; /* lport values for secondary PF */
+
+ /* q_home keeps track of which interface the q is currently on */
+ u8 q_home[ICE_MAX_SRIOV_VFS][ICE_MAX_RSS_QS_PER_VF];
+
+ /* placeholder VSI for hanging VF queues from on secondary interface */
+ struct ice_vsi *sec_vf[ICE_MAX_SRIOV_VFS];
+
u16 pf_recipe;
u16 lport_recipe;
+ u16 act_act_recipe;
u16 pf_rx_rule_id;
u16 pf_tx_rule_id;
u16 cp_rule_idx;
u16 lport_rule_idx;
+ u16 act_act_rule_idx;
u8 role;
};
@@ -64,7 +82,7 @@ struct ice_lag_work {
} info;
};
-void ice_lag_move_new_vf_nodes(struct ice_vf *vf);
+void ice_lag_aa_failover(struct ice_lag *lag, u8 dest, struct ice_pf *e_pf);
int ice_init_lag(struct ice_pf *pf);
void ice_deinit_lag(struct ice_pf *pf);
void ice_lag_rebuild(struct ice_pf *pf);
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index 77ba26538b07..10c312d49e05 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -569,4 +569,45 @@ struct ice_tlan_ctx {
u8 pkt_shaper_prof_idx;
};
+#define ICE_TXTIME_TX_DESC_IDX_M GENMASK(12, 0)
+#define ICE_TXTIME_STAMP_M GENMASK(31, 13)
+
+/* Tx time stamp descriptor */
+struct ice_ts_desc {
+ __le32 tx_desc_idx_tstamp;
+};
+
+#define ICE_TS_DESC(R, i) (&(((struct ice_ts_desc *)((R)->desc))[i]))
+
+#define ICE_TXTIME_MAX_QUEUE 2047
+#define ICE_SET_TXTIME_MAX_Q_AMOUNT 127
+#define ICE_TXTIME_FETCH_TS_DESC_DFLT 8
+#define ICE_TXTIME_FETCH_PROFILE_CNT 16
+
+/* Tx Time queue context data */
+struct ice_txtime_ctx {
+#define ICE_TXTIME_CTX_BASE_S 7
+ u64 base; /* base is defined in 128-byte units */
+ u8 pf_num;
+ u16 vmvf_num;
+ u8 vmvf_type;
+ u16 src_vsi;
+ u8 cpuid;
+ u8 tphrd_desc;
+ u16 qlen;
+ u8 timer_num;
+ u8 txtime_ena_q;
+ u8 drbell_mode_32;
+#define ICE_TXTIME_CTX_DRBELL_MODE_32 1
+ u8 ts_res;
+#define ICE_TXTIME_CTX_RESOLUTION_128NS 7
+ u8 ts_round_type;
+ u8 ts_pacing_slot;
+#define ICE_TXTIME_CTX_FETCH_PROF_ID_0 0
+ u8 merging_ena;
+ u8 ts_fetch_prof_id;
+ u8 ts_fetch_cache_line_aln_thld;
+ u8 tx_pipe_delay_mode;
+};
+
#endif /* _ICE_LAN_TX_RX_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index a439b5a61a56..4479c824561e 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -3950,6 +3950,7 @@ void ice_init_feature_support(struct ice_pf *pf)
if (pf->hw.mac_type == ICE_MAC_E830) {
ice_set_feature_support(pf, ICE_F_MBX_LIMIT);
ice_set_feature_support(pf, ICE_F_GCS);
+ ice_set_feature_support(pf, ICE_F_TXTIME);
}
}
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 8e0b06c1e02b..86f5859e88ef 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -39,6 +39,7 @@ static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
MODULE_DESCRIPTION(DRV_SUMMARY);
MODULE_IMPORT_NS("LIBIE");
MODULE_IMPORT_NS("LIBIE_ADMINQ");
+MODULE_IMPORT_NS("LIBIE_FWLOG");
MODULE_LICENSE("GPL v2");
MODULE_FIRMWARE(ICE_DDP_PKG_FILE);
@@ -1251,32 +1252,6 @@ ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
}
/**
- * ice_get_fwlog_data - copy the FW log data from ARQ event
- * @pf: PF that the FW log event is associated with
- * @event: event structure containing FW log data
- */
-static void
-ice_get_fwlog_data(struct ice_pf *pf, struct ice_rq_event_info *event)
-{
- struct ice_fwlog_data *fwlog;
- struct ice_hw *hw = &pf->hw;
-
- fwlog = &hw->fwlog_ring.rings[hw->fwlog_ring.tail];
-
- memset(fwlog->data, 0, PAGE_SIZE);
- fwlog->data_size = le16_to_cpu(event->desc.datalen);
-
- memcpy(fwlog->data, event->msg_buf, fwlog->data_size);
- ice_fwlog_ring_increment(&hw->fwlog_ring.tail, hw->fwlog_ring.size);
-
- if (ice_fwlog_ring_full(&hw->fwlog_ring)) {
- /* the rings are full so bump the head to create room */
- ice_fwlog_ring_increment(&hw->fwlog_ring.head,
- hw->fwlog_ring.size);
- }
-}
-
-/**
* ice_aq_prep_for_event - Prepare to wait for an AdminQ event from firmware
* @pf: pointer to the PF private structure
* @task: intermediate helper storage and identifier for waiting
@@ -1566,7 +1541,8 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
}
break;
case ice_aqc_opc_fw_logs_event:
- ice_get_fwlog_data(pf, &event);
+ libie_get_fwlog_data(&hw->fwlog, event.msg_buf,
+ le16_to_cpu(event.desc.datalen));
break;
case ice_aqc_opc_lldp_set_mib_change:
ice_dcb_process_lldp_set_mib_change(pf, &event);
@@ -3176,12 +3152,14 @@ static irqreturn_t ice_ll_ts_intr(int __always_unused irq, void *data)
hw = &pf->hw;
tx = &pf->ptp.port.tx;
spin_lock_irqsave(&tx->lock, flags);
- ice_ptp_complete_tx_single_tstamp(tx);
+ if (tx->init) {
+ ice_ptp_complete_tx_single_tstamp(tx);
- idx = find_next_bit_wrap(tx->in_use, tx->len,
- tx->last_ll_ts_idx_read + 1);
- if (idx != tx->len)
- ice_ptp_req_tx_single_tstamp(tx, idx);
+ idx = find_next_bit_wrap(tx->in_use, tx->len,
+ tx->last_ll_ts_idx_read + 1);
+ if (idx != tx->len)
+ ice_ptp_req_tx_single_tstamp(tx, idx);
+ }
spin_unlock_irqrestore(&tx->lock, flags);
val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
@@ -3991,6 +3969,11 @@ static void ice_deinit_pf(struct ice_pf *pf)
pf->avail_rxqs = NULL;
}
+ if (pf->txtime_txqs) {
+ bitmap_free(pf->txtime_txqs);
+ pf->txtime_txqs = NULL;
+ }
+
if (pf->ptp.clock)
ptp_clock_unregister(pf->ptp.clock);
@@ -4084,6 +4067,15 @@ static int ice_init_pf(struct ice_pf *pf)
return -ENOMEM;
}
+ pf->txtime_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL);
+ if (!pf->txtime_txqs) {
+ bitmap_free(pf->avail_txqs);
+ pf->avail_txqs = NULL;
+ bitmap_free(pf->avail_rxqs);
+ pf->avail_rxqs = NULL;
+ return -ENOMEM;
+ }
+
mutex_init(&pf->vfs.table_lock);
hash_init(pf->vfs.table);
if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT))
@@ -4536,17 +4528,23 @@ ice_init_tx_topology(struct ice_hw *hw, const struct firmware *firmware)
dev_info(dev, "Tx scheduling layers switching feature disabled\n");
else
dev_info(dev, "Tx scheduling layers switching feature enabled\n");
- /* if there was a change in topology ice_cfg_tx_topo triggered
- * a CORER and we need to re-init hw
+ return 0;
+ } else if (err == -ENODEV) {
+ /* If we failed to re-initialize the device, we can no longer
+ * continue loading.
*/
- ice_deinit_hw(hw);
- err = ice_init_hw(hw);
-
+ dev_warn(dev, "Failed to initialize hardware after applying Tx scheduling configuration.\n");
return err;
} else if (err == -EIO) {
dev_info(dev, "DDP package does not support Tx scheduling layers switching feature - please update to the latest DDP package and try again\n");
+ return 0;
+ } else if (err == -EEXIST) {
+ return 0;
}
+ /* Do not treat this as a fatal error. */
+ dev_info(dev, "Failed to apply Tx scheduling configuration, err %pe\n",
+ ERR_PTR(err));
return 0;
}
@@ -4646,19 +4644,6 @@ static void ice_print_wake_reason(struct ice_pf *pf)
}
/**
- * ice_pf_fwlog_update_module - update 1 module
- * @pf: pointer to the PF struct
- * @log_level: log_level to use for the @module
- * @module: module to update
- */
-void ice_pf_fwlog_update_module(struct ice_pf *pf, int log_level, int module)
-{
- struct ice_hw *hw = &pf->hw;
-
- hw->fwlog_cfg.module_entries[module].log_level = log_level;
-}
-
-/**
* ice_register_netdev - register netdev
* @vsi: pointer to the VSI struct
*/
@@ -7513,7 +7498,8 @@ int ice_vsi_open(struct ice_vsi *vsi)
if (err)
goto err_setup_rx;
- ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
+ if (bitmap_empty(pf->txtime_txqs, pf->max_pf_txqs))
+ ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
if (vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_SF) {
/* Notify the stack of the actual queue counts. */
@@ -9117,7 +9103,7 @@ static int ice_create_q_channels(struct ice_vsi *vsi)
list_add_tail(&ch->list, &vsi->ch_list);
vsi->tc_map_vsi[i] = ch->ch_vsi;
dev_dbg(ice_pf_to_dev(pf),
- "successfully created channel: VSI %pK\n", ch->ch_vsi);
+ "successfully created channel: VSI %p\n", ch->ch_vsi);
}
return 0;
@@ -9302,6 +9288,96 @@ exit:
return ret;
}
+/**
+ * ice_cfg_txtime - configure Tx Time for the Tx ring
+ * @tx_ring: pointer to the Tx ring structure
+ *
+ * Return: 0 on success, negative value on failure.
+ */
+static int ice_cfg_txtime(struct ice_tx_ring *tx_ring)
+{
+ int err, timeout = 50;
+ struct ice_vsi *vsi;
+ struct device *dev;
+ struct ice_pf *pf;
+ u32 queue;
+
+ if (!tx_ring)
+ return -EINVAL;
+
+ vsi = tx_ring->vsi;
+ pf = vsi->back;
+ while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) {
+ timeout--;
+ if (!timeout)
+ return -EBUSY;
+ usleep_range(1000, 2000);
+ }
+
+ queue = tx_ring->q_index;
+ dev = ice_pf_to_dev(pf);
+
+ /* Ignore return value, and always attempt to enable queue. */
+ ice_qp_dis(vsi, queue);
+
+ err = ice_qp_ena(vsi, queue);
+ if (err)
+ dev_err(dev, "Failed to enable Tx queue %d for TxTime configuration\n",
+ queue);
+
+ clear_bit(ICE_CFG_BUSY, pf->state);
+ return err;
+}
+
+/**
+ * ice_offload_txtime - set earliest TxTime first
+ * @netdev: network interface device structure
+ * @qopt_off: etf queue option offload from the skb to set
+ *
+ * Return: 0 on success, negative value on failure.
+ */
+static int ice_offload_txtime(struct net_device *netdev,
+ void *qopt_off)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_pf *pf = np->vsi->back;
+ struct tc_etf_qopt_offload *qopt;
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_tx_ring *tx_ring;
+ int ret = 0;
+
+ if (!ice_is_feature_supported(pf, ICE_F_TXTIME))
+ return -EOPNOTSUPP;
+
+ qopt = qopt_off;
+ if (!qopt_off || qopt->queue < 0 || qopt->queue >= vsi->num_txq)
+ return -EINVAL;
+
+ if (qopt->enable)
+ set_bit(qopt->queue, pf->txtime_txqs);
+ else
+ clear_bit(qopt->queue, pf->txtime_txqs);
+
+ if (netif_running(vsi->netdev)) {
+ tx_ring = vsi->tx_rings[qopt->queue];
+ ret = ice_cfg_txtime(tx_ring);
+ if (ret)
+ goto err;
+ }
+
+ netdev_info(netdev, "%s TxTime on queue: %i\n",
+ str_enable_disable(qopt->enable), qopt->queue);
+ return 0;
+
+err:
+ netdev_err(netdev, "Failed to %s TxTime on queue: %i\n",
+ str_enable_disable(qopt->enable), qopt->queue);
+
+ if (qopt->enable)
+ clear_bit(qopt->queue, pf->txtime_txqs);
+ return ret;
+}
+
static LIST_HEAD(ice_block_cb_list);
static int
@@ -9365,6 +9441,8 @@ adev_unlock:
mutex_unlock(&pf->adev_mutex);
}
return err;
+ case TC_SETUP_QDISC_ETF:
+ return ice_offload_txtime(netdev, type_data);
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index e358eb1d719f..fb0f6365a6d6 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -2701,16 +2701,19 @@ irqreturn_t ice_ptp_ts_irq(struct ice_pf *pf)
*/
if (hw->dev_caps.ts_dev_info.ts_ll_int_read) {
struct ice_ptp_tx *tx = &pf->ptp.port.tx;
- u8 idx;
+ u8 idx, last;
if (!ice_pf_state_is_nominal(pf))
return IRQ_HANDLED;
spin_lock(&tx->lock);
- idx = find_next_bit_wrap(tx->in_use, tx->len,
- tx->last_ll_ts_idx_read + 1);
- if (idx != tx->len)
- ice_ptp_req_tx_single_tstamp(tx, idx);
+ if (tx->init) {
+ last = tx->last_ll_ts_idx_read + 1;
+ idx = find_next_bit_wrap(tx->in_use, tx->len,
+ last);
+ if (idx != tx->len)
+ ice_ptp_req_tx_single_tstamp(tx, idx);
+ }
spin_unlock(&tx->lock);
return IRQ_HANDLED;
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index 9ce4c4db400e..843e82fd3bf9 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -9,7 +9,7 @@
#include "ice_dcb_lib.h"
#include "ice_flow.h"
#include "ice_eswitch.h"
-#include "ice_virtchnl_allowlist.h"
+#include "virt/allowlist.h"
#include "ice_flex_pipe.h"
#include "ice_vf_vsi_vlan_ops.h"
#include "ice_vlan.h"
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.h b/drivers/net/ethernet/intel/ice/ice_sriov.h
index d1a998a4bef6..6c4fad09a527 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.h
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.h
@@ -3,9 +3,9 @@
#ifndef _ICE_SRIOV_H_
#define _ICE_SRIOV_H_
-#include "ice_virtchnl_fdir.h"
+#include "virt/fdir.h"
#include "ice_vf_lib.h"
-#include "ice_virtchnl.h"
+#include "virt/virtchnl.h"
/* Static VF transaction/status register def */
#define VF_DEVICE_STATUS 0xAA
diff --git a/drivers/net/ethernet/intel/ice/ice_trace.h b/drivers/net/ethernet/intel/ice/ice_trace.h
index 07aab6e130cd..4f35ef8d6b29 100644
--- a/drivers/net/ethernet/intel/ice/ice_trace.h
+++ b/drivers/net/ethernet/intel/ice/ice_trace.h
@@ -130,7 +130,7 @@ DECLARE_EVENT_CLASS(ice_tx_template,
__entry->buf = buf;
__assign_str(devname);),
- TP_printk("netdev: %s ring: %pK desc: %pK buf %pK", __get_str(devname),
+ TP_printk("netdev: %s ring: %p desc: %p buf %p", __get_str(devname),
__entry->ring, __entry->desc, __entry->buf)
);
@@ -158,7 +158,7 @@ DECLARE_EVENT_CLASS(ice_rx_template,
__entry->desc = desc;
__assign_str(devname);),
- TP_printk("netdev: %s ring: %pK desc: %pK", __get_str(devname),
+ TP_printk("netdev: %s ring: %p desc: %p", __get_str(devname),
__entry->ring, __entry->desc)
);
DEFINE_EVENT(ice_rx_template, ice_clean_rx_irq,
@@ -182,7 +182,7 @@ DECLARE_EVENT_CLASS(ice_rx_indicate_template,
__entry->skb = skb;
__assign_str(devname);),
- TP_printk("netdev: %s ring: %pK desc: %pK skb %pK", __get_str(devname),
+ TP_printk("netdev: %s ring: %p desc: %p skb %p", __get_str(devname),
__entry->ring, __entry->desc, __entry->skb)
);
@@ -205,7 +205,7 @@ DECLARE_EVENT_CLASS(ice_xmit_template,
__entry->skb = skb;
__assign_str(devname);),
- TP_printk("netdev: %s skb: %pK ring: %pK", __get_str(devname),
+ TP_printk("netdev: %s skb: %p ring: %p", __get_str(devname),
__entry->skb, __entry->ring)
);
@@ -228,7 +228,7 @@ DECLARE_EVENT_CLASS(ice_tx_tstamp_template,
TP_fast_assign(__entry->skb = skb;
__entry->idx = idx;),
- TP_printk("skb %pK idx %d",
+ TP_printk("skb %p idx %d",
__entry->skb, __entry->idx)
);
#define DEFINE_TX_TSTAMP_OP_EVENT(name) \
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 29e0088ab6b2..73f08d02f9c7 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -144,6 +144,56 @@ static struct netdev_queue *txring_txq(const struct ice_tx_ring *ring)
}
/**
+ * ice_clean_tstamp_ring - clean time stamp ring
+ * @tx_ring: Tx ring to clean the Time Stamp ring for
+ */
+static void ice_clean_tstamp_ring(struct ice_tx_ring *tx_ring)
+{
+ struct ice_tstamp_ring *tstamp_ring = tx_ring->tstamp_ring;
+ u32 size;
+
+ if (!tstamp_ring->desc)
+ return;
+
+ size = ALIGN(tstamp_ring->count * sizeof(struct ice_ts_desc),
+ PAGE_SIZE);
+ memset(tstamp_ring->desc, 0, size);
+ tstamp_ring->next_to_use = 0;
+}
+
+/**
+ * ice_free_tstamp_ring - free time stamp resources per queue
+ * @tx_ring: Tx ring to free the Time Stamp ring for
+ */
+void ice_free_tstamp_ring(struct ice_tx_ring *tx_ring)
+{
+ struct ice_tstamp_ring *tstamp_ring = tx_ring->tstamp_ring;
+ u32 size;
+
+ if (!tstamp_ring->desc)
+ return;
+
+ ice_clean_tstamp_ring(tx_ring);
+ size = ALIGN(tstamp_ring->count * sizeof(struct ice_ts_desc),
+ PAGE_SIZE);
+ dmam_free_coherent(tx_ring->dev, size, tstamp_ring->desc,
+ tstamp_ring->dma);
+ tstamp_ring->desc = NULL;
+}
+
+/**
+ * ice_free_tx_tstamp_ring - free time stamp resources per Tx ring
+ * @tx_ring: Tx ring to free the Time Stamp ring for
+ */
+void ice_free_tx_tstamp_ring(struct ice_tx_ring *tx_ring)
+{
+ ice_free_tstamp_ring(tx_ring);
+ kfree_rcu(tx_ring->tstamp_ring, rcu);
+ tx_ring->tstamp_ring = NULL;
+ tx_ring->flags &= ~ICE_TX_FLAGS_TXTIME;
+}
+
+/**
* ice_clean_tx_ring - Free any empty Tx buffers
* @tx_ring: ring to be cleaned
*/
@@ -181,6 +231,9 @@ tx_skip_free:
/* cleanup Tx queue statistics */
netdev_tx_reset_queue(txring_txq(tx_ring));
+
+ if (ice_is_txtime_cfg(tx_ring))
+ ice_free_tx_tstamp_ring(tx_ring);
}
/**
@@ -332,6 +385,84 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)
}
/**
+ * ice_alloc_tstamp_ring - allocate the Time Stamp ring
+ * @tx_ring: Tx ring to allocate the Time Stamp ring for
+ *
+ * Return: 0 on success, negative on error
+ */
+static int ice_alloc_tstamp_ring(struct ice_tx_ring *tx_ring)
+{
+ struct ice_tstamp_ring *tstamp_ring;
+
+ /* allocate with kzalloc(), free with kfree_rcu() */
+ tstamp_ring = kzalloc(sizeof(*tstamp_ring), GFP_KERNEL);
+ if (!tstamp_ring)
+ return -ENOMEM;
+
+ tstamp_ring->tx_ring = tx_ring;
+ tx_ring->tstamp_ring = tstamp_ring;
+ tstamp_ring->desc = NULL;
+ tstamp_ring->count = ice_calc_ts_ring_count(tx_ring);
+ tx_ring->flags |= ICE_TX_FLAGS_TXTIME;
+ return 0;
+}
+
+/**
+ * ice_setup_tstamp_ring - allocate the Time Stamp ring
+ * @tx_ring: Tx ring to set up the Time Stamp ring for
+ *
+ * Return: 0 on success, negative on error
+ */
+static int ice_setup_tstamp_ring(struct ice_tx_ring *tx_ring)
+{
+ struct ice_tstamp_ring *tstamp_ring = tx_ring->tstamp_ring;
+ struct device *dev = tx_ring->dev;
+ u32 size;
+
+ /* round up to nearest page */
+ size = ALIGN(tstamp_ring->count * sizeof(struct ice_ts_desc),
+ PAGE_SIZE);
+ tstamp_ring->desc = dmam_alloc_coherent(dev, size, &tstamp_ring->dma,
+ GFP_KERNEL);
+ if (!tstamp_ring->desc) {
+ dev_err(dev, "Unable to allocate memory for Time stamp Ring, size=%d\n",
+ size);
+ return -ENOMEM;
+ }
+
+ tstamp_ring->next_to_use = 0;
+ return 0;
+}
+
+/**
+ * ice_alloc_setup_tstamp_ring - Allocate and setup the Time Stamp ring
+ * @tx_ring: Tx ring to allocate and setup the Time Stamp ring for
+ *
+ * Return: 0 on success, negative on error
+ */
+int ice_alloc_setup_tstamp_ring(struct ice_tx_ring *tx_ring)
+{
+ struct device *dev = tx_ring->dev;
+ int err;
+
+ err = ice_alloc_tstamp_ring(tx_ring);
+ if (err) {
+ dev_err(dev, "Unable to allocate Time stamp ring for Tx ring %d\n",
+ tx_ring->q_index);
+ return err;
+ }
+
+ err = ice_setup_tstamp_ring(tx_ring);
+ if (err) {
+ dev_err(dev, "Unable to setup Time stamp ring for Tx ring %d\n",
+ tx_ring->q_index);
+ ice_free_tx_tstamp_ring(tx_ring);
+ return err;
+ }
+ return 0;
+}
+
+/**
* ice_setup_tx_ring - Allocate the Tx descriptors
* @tx_ring: the Tx ring to set up
*
@@ -894,10 +1025,6 @@ ice_add_xdp_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
__skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buf->page,
rx_buf->page_offset, size);
sinfo->xdp_frags_size += size;
- /* remember frag count before XDP prog execution; bpf_xdp_adjust_tail()
- * can pop off frags but driver has to handle it on its own
- */
- rx_ring->nr_frags = sinfo->nr_frags;
if (page_is_pfmemalloc(rx_buf->page))
xdp_buff_set_frag_pfmemalloc(xdp);
@@ -968,20 +1095,20 @@ ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size,
/**
* ice_get_pgcnts - grab page_count() for gathered fragments
* @rx_ring: Rx descriptor ring to store the page counts on
+ * @ntc: the next to clean element (not included in this frame!)
*
* This function is intended to be called right before running XDP
* program so that the page recycling mechanism will be able to take
* a correct decision regarding underlying pages; this is done in such
* way as XDP program can change the refcount of page
*/
-static void ice_get_pgcnts(struct ice_rx_ring *rx_ring)
+static void ice_get_pgcnts(struct ice_rx_ring *rx_ring, unsigned int ntc)
{
- u32 nr_frags = rx_ring->nr_frags + 1;
u32 idx = rx_ring->first_desc;
struct ice_rx_buf *rx_buf;
u32 cnt = rx_ring->count;
- for (int i = 0; i < nr_frags; i++) {
+ while (idx != ntc) {
rx_buf = &rx_ring->rx_buf[idx];
rx_buf->pgcnt = page_count(rx_buf->page);
@@ -1035,10 +1162,9 @@ ice_build_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
skb_metadata_set(skb, metasize);
if (unlikely(xdp_buff_has_frags(xdp)))
- xdp_update_skb_shared_info(skb, nr_frags,
- sinfo->xdp_frags_size,
- nr_frags * xdp->frame_sz,
- xdp_buff_is_frag_pfmemalloc(xdp));
+ xdp_update_skb_frags_info(skb, nr_frags, sinfo->xdp_frags_size,
+ nr_frags * xdp->frame_sz,
+ xdp_buff_get_skb_flags(xdp));
return skb;
}
@@ -1115,10 +1241,10 @@ ice_construct_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
memcpy(&skinfo->frags[skinfo->nr_frags], &sinfo->frags[0],
sizeof(skb_frag_t) * nr_frags);
- xdp_update_skb_shared_info(skb, skinfo->nr_frags + nr_frags,
- sinfo->xdp_frags_size,
- nr_frags * xdp->frame_sz,
- xdp_buff_is_frag_pfmemalloc(xdp));
+ xdp_update_skb_frags_info(skb, skinfo->nr_frags + nr_frags,
+ sinfo->xdp_frags_size,
+ nr_frags * xdp->frame_sz,
+ xdp_buff_get_skb_flags(xdp));
}
return skb;
@@ -1154,62 +1280,51 @@ ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf)
}
/**
- * ice_put_rx_mbuf - ice_put_rx_buf() caller, for all frame frags
+ * ice_put_rx_mbuf - ice_put_rx_buf() caller, for all buffers in frame
* @rx_ring: Rx ring with all the auxiliary data
* @xdp: XDP buffer carrying linear + frags part
- * @xdp_xmit: XDP_TX/XDP_REDIRECT verdict storage
- * @ntc: a current next_to_clean value to be stored at rx_ring
+ * @ntc: the next to clean element (not included in this frame!)
* @verdict: return code from XDP program execution
*
- * Walk through gathered fragments and satisfy internal page
- * recycle mechanism; we take here an action related to verdict
- * returned by XDP program;
+ * Called after XDP program is completed, or on error with verdict set to
+ * ICE_XDP_CONSUMED.
+ *
+ * Walk through buffers from first_desc to the end of the frame, releasing
+ * buffers and satisfying internal page recycle mechanism. The action depends
+ * on verdict from XDP program.
*/
static void ice_put_rx_mbuf(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
- u32 *xdp_xmit, u32 ntc, u32 verdict)
+ u32 ntc, u32 verdict)
{
- u32 nr_frags = rx_ring->nr_frags + 1;
u32 idx = rx_ring->first_desc;
u32 cnt = rx_ring->count;
- u32 post_xdp_frags = 1;
struct ice_rx_buf *buf;
- int i;
+ u32 xdp_frags = 0;
+ int i = 0;
if (unlikely(xdp_buff_has_frags(xdp)))
- post_xdp_frags += xdp_get_shared_info_from_buff(xdp)->nr_frags;
+ xdp_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags;
- for (i = 0; i < post_xdp_frags; i++) {
+ while (idx != ntc) {
buf = &rx_ring->rx_buf[idx];
+ if (++idx == cnt)
+ idx = 0;
- if (verdict & (ICE_XDP_TX | ICE_XDP_REDIR)) {
+ /* An XDP program could release fragments from the end of the
+ * buffer. For these, we need to keep the pagecnt_bias as-is.
+ * To do this, only adjust pagecnt_bias for fragments up to
+ * the total remaining after the XDP program has run.
+ */
+ if (verdict != ICE_XDP_CONSUMED)
ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
- *xdp_xmit |= verdict;
- } else if (verdict & ICE_XDP_CONSUMED) {
+ else if (i++ <= xdp_frags)
buf->pagecnt_bias++;
- } else if (verdict == ICE_XDP_PASS) {
- ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
- }
ice_put_rx_buf(rx_ring, buf);
-
- if (++idx == cnt)
- idx = 0;
- }
- /* handle buffers that represented frags released by XDP prog;
- * for these we keep pagecnt_bias as-is; refcount from struct page
- * has been decremented within XDP prog and we do not have to increase
- * the biased refcnt
- */
- for (; i < nr_frags; i++) {
- buf = &rx_ring->rx_buf[idx];
- ice_put_rx_buf(rx_ring, buf);
- if (++idx == cnt)
- idx = 0;
}
xdp->data = NULL;
rx_ring->first_desc = ntc;
- rx_ring->nr_frags = 0;
}
/**
@@ -1317,6 +1432,10 @@ static int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
/* retrieve a buffer from the ring */
rx_buf = ice_get_rx_buf(rx_ring, size, ntc);
+ /* Increment ntc before calls to ice_put_rx_mbuf() */
+ if (++ntc == cnt)
+ ntc = 0;
+
if (!xdp->data) {
void *hard_start;
@@ -1325,24 +1444,23 @@ static int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
xdp_prepare_buff(xdp, hard_start, offset, size, !!offset);
xdp_buff_clear_frags_flag(xdp);
} else if (ice_add_xdp_frag(rx_ring, xdp, rx_buf, size)) {
- ice_put_rx_mbuf(rx_ring, xdp, NULL, ntc, ICE_XDP_CONSUMED);
+ ice_put_rx_mbuf(rx_ring, xdp, ntc, ICE_XDP_CONSUMED);
break;
}
- if (++ntc == cnt)
- ntc = 0;
/* skip if it is NOP desc */
if (ice_is_non_eop(rx_ring, rx_desc))
continue;
- ice_get_pgcnts(rx_ring);
+ ice_get_pgcnts(rx_ring, ntc);
xdp_verdict = ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_desc);
if (xdp_verdict == ICE_XDP_PASS)
goto construct_skb;
total_rx_bytes += xdp_get_buff_len(xdp);
total_rx_pkts++;
- ice_put_rx_mbuf(rx_ring, xdp, &xdp_xmit, ntc, xdp_verdict);
+ ice_put_rx_mbuf(rx_ring, xdp, ntc, xdp_verdict);
+ xdp_xmit |= xdp_verdict & (ICE_XDP_TX | ICE_XDP_REDIR);
continue;
construct_skb:
@@ -1352,10 +1470,10 @@ construct_skb:
skb = ice_construct_skb(rx_ring, xdp);
/* exit if we failed to retrieve a buffer */
if (!skb) {
- rx_ring->ring_stats->rx_stats.alloc_page_failed++;
+ rx_ring->ring_stats->rx_stats.alloc_buf_failed++;
xdp_verdict = ICE_XDP_CONSUMED;
}
- ice_put_rx_mbuf(rx_ring, xdp, &xdp_xmit, ntc, xdp_verdict);
+ ice_put_rx_mbuf(rx_ring, xdp, ntc, xdp_verdict);
if (!skb)
break;
@@ -1835,10 +1953,46 @@ ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first,
/* notify HW of packet */
kick = __netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount,
netdev_xmit_more());
- if (kick)
- /* notify HW of packet */
- writel(i, tx_ring->tail);
+ if (!kick)
+ return;
+ if (ice_is_txtime_cfg(tx_ring)) {
+ struct ice_tstamp_ring *tstamp_ring = tx_ring->tstamp_ring;
+ u32 tstamp_count = tstamp_ring->count;
+ u32 j = tstamp_ring->next_to_use;
+ struct ice_ts_desc *ts_desc;
+ struct timespec64 ts;
+ u32 tstamp;
+
+ ts = ktime_to_timespec64(first->skb->tstamp);
+ tstamp = ts.tv_nsec >> ICE_TXTIME_CTX_RESOLUTION_128NS;
+
+ ts_desc = ICE_TS_DESC(tstamp_ring, j);
+ ts_desc->tx_desc_idx_tstamp = ice_build_tstamp_desc(i, tstamp);
+
+ j++;
+ if (j == tstamp_count) {
+ u32 fetch = tstamp_count - tx_ring->count;
+
+ j = 0;
+
+ /* To prevent an MDD, when wrapping the tstamp ring
+ * create additional TS descriptors equal to the number
+ * of the fetch TS descriptors value. HW will merge the
+ * TS descriptors with the same timestamp value into a
+ * single descriptor.
+ */
+ for (; j < fetch; j++) {
+ ts_desc = ICE_TS_DESC(tstamp_ring, j);
+ ts_desc->tx_desc_idx_tstamp =
+ ice_build_tstamp_desc(i, tstamp);
+ }
+ }
+ tstamp_ring->next_to_use = j;
+ writel_relaxed(j, tstamp_ring->tail);
+ } else {
+ writel_relaxed(i, tx_ring->tail);
+ }
return;
dma_error:
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index fef750c5f288..841a07bfba54 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -310,6 +310,16 @@ enum ice_dynamic_itr {
#define ICE_TX_LEGACY 1
/* descriptor ring, associated with a VSI */
+struct ice_tstamp_ring {
+ struct ice_tx_ring *tx_ring; /* Backreference to associated Tx ring */
+ dma_addr_t dma; /* physical address of ring */
+ struct rcu_head rcu; /* to avoid race on free */
+ u8 __iomem *tail;
+ void *desc;
+ u16 next_to_use;
+ u16 count;
+} ____cacheline_internodealigned_in_smp;
+
struct ice_rx_ring {
/* CL1 - 1st cacheline starts here */
void *desc; /* Descriptor ring memory */
@@ -358,7 +368,6 @@ struct ice_rx_ring {
struct ice_tx_ring *xdp_ring;
struct ice_rx_ring *next; /* pointer to next ring in q_vector */
struct xsk_buff_pool *xsk_pool;
- u32 nr_frags;
u16 max_frame;
u16 rx_buf_len;
dma_addr_t dma; /* physical address of ring */
@@ -403,9 +412,11 @@ struct ice_tx_ring {
spinlock_t tx_lock;
u32 txq_teid; /* Added Tx queue TEID */
/* CL4 - 4th cacheline starts here */
+ struct ice_tstamp_ring *tstamp_ring;
#define ICE_TX_FLAGS_RING_XDP BIT(0)
#define ICE_TX_FLAGS_RING_VLAN_L2TAG1 BIT(1)
#define ICE_TX_FLAGS_RING_VLAN_L2TAG2 BIT(2)
+#define ICE_TX_FLAGS_TXTIME BIT(3)
u8 flags;
u8 dcb_tc; /* Traffic class of ring */
u16 quanta_prof_id;
@@ -501,6 +512,7 @@ void ice_clean_tx_ring(struct ice_tx_ring *tx_ring);
void ice_clean_rx_ring(struct ice_rx_ring *rx_ring);
int ice_setup_tx_ring(struct ice_tx_ring *tx_ring);
int ice_setup_rx_ring(struct ice_rx_ring *rx_ring);
+int ice_alloc_setup_tstamp_ring(struct ice_tx_ring *tx_ring);
void ice_free_tx_ring(struct ice_tx_ring *tx_ring);
void ice_free_rx_ring(struct ice_rx_ring *rx_ring);
int ice_napi_poll(struct napi_struct *napi, int budget);
@@ -509,4 +521,6 @@ ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
u8 *raw_packet);
void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring);
void ice_clean_ctrl_rx_irq(struct ice_rx_ring *rx_ring);
+void ice_free_tx_tstamp_ring(struct ice_tx_ring *tx_ring);
+void ice_free_tstamp_ring(struct ice_tx_ring *tx_ring);
#endif /* _ICE_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
index 6cf32b404127..99717730f21a 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
@@ -54,6 +54,20 @@ ice_build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
}
/**
+ * ice_build_tstamp_desc - build Tx time stamp descriptor
+ * @tx_desc: Tx LAN descriptor index
+ * @tstamp: time stamp
+ *
+ * Return: Tx time stamp descriptor
+ */
+static inline __le32
+ice_build_tstamp_desc(u16 tx_desc, u32 tstamp)
+{
+ return cpu_to_le32(FIELD_PREP(ICE_TXTIME_TX_DESC_IDX_M, tx_desc) |
+ FIELD_PREP(ICE_TXTIME_STAMP_M, tstamp));
+}
+
+/**
* ice_get_vlan_tci - get VLAN TCI from Rx flex descriptor
* @rx_desc: Rx 32b flex descriptor with RXDID=2
*
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 03c6c271865d..b0a1b67071c5 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -17,7 +17,7 @@
#include "ice_protocol_type.h"
#include "ice_sbq_cmd.h"
#include "ice_vlan_mode.h"
-#include "ice_fwlog.h"
+#include <linux/net/intel/libie/fwlog.h>
#include <linux/wait.h>
#include <net/dscp.h>
@@ -293,8 +293,10 @@ struct ice_hw_common_caps {
u8 dcb;
u8 ieee_1588;
u8 rdma;
- u8 roce_lag;
- u8 sriov_lag;
+
+ bool roce_lag;
+ bool sriov_lag;
+ bool sriov_aa_lag;
bool nvm_update_pending_nvm;
bool nvm_update_pending_orom;
@@ -946,9 +948,7 @@ struct ice_hw {
u8 fw_patch; /* firmware patch version */
u32 fw_build; /* firmware build number */
- struct ice_fwlog_cfg fwlog_cfg;
- bool fwlog_supported; /* does hardware support FW logging? */
- struct ice_fwlog_ring fwlog_ring;
+ struct libie_fwlog fwlog;
/* Device max aggregate bandwidths corresponding to the GL_PWR_MODE_CTL
* register. Used for determining the ITR/INTRL granularity during
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
index 5ee74f3e82dc..de9e81ccee66 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -5,7 +5,7 @@
#include "ice.h"
#include "ice_lib.h"
#include "ice_fltr.h"
-#include "ice_virtchnl_allowlist.h"
+#include "virt/allowlist.h"
/* Public functions which may be accessed by all driver files */
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
index ffe1f9f830ea..b00708907176 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
@@ -13,7 +13,7 @@
#include <linux/avf/virtchnl.h>
#include "ice_type.h"
#include "ice_flow.h"
-#include "ice_virtchnl_fdir.h"
+#include "virt/fdir.h"
#include "ice_vsi_vlan_ops.h"
#define ICE_MAX_SRIOV_VFS 256
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index a3a4eaa17739..575fd48f485f 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -19,52 +19,12 @@ static struct xdp_buff **ice_xdp_buf(struct ice_rx_ring *rx_ring, u32 idx)
}
/**
- * ice_qp_reset_stats - Resets all stats for rings of given index
- * @vsi: VSI that contains rings of interest
- * @q_idx: ring index in array
- */
-static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
-{
- struct ice_vsi_stats *vsi_stat;
- struct ice_pf *pf;
-
- pf = vsi->back;
- if (!pf->vsi_stats)
- return;
-
- vsi_stat = pf->vsi_stats[vsi->idx];
- if (!vsi_stat)
- return;
-
- memset(&vsi_stat->rx_ring_stats[q_idx]->rx_stats, 0,
- sizeof(vsi_stat->rx_ring_stats[q_idx]->rx_stats));
- memset(&vsi_stat->tx_ring_stats[q_idx]->stats, 0,
- sizeof(vsi_stat->tx_ring_stats[q_idx]->stats));
- if (vsi->xdp_rings)
- memset(&vsi->xdp_rings[q_idx]->ring_stats->stats, 0,
- sizeof(vsi->xdp_rings[q_idx]->ring_stats->stats));
-}
-
-/**
- * ice_qp_clean_rings - Cleans all the rings of a given index
- * @vsi: VSI that contains rings of interest
- * @q_idx: ring index in array
- */
-static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx)
-{
- ice_clean_tx_ring(vsi->tx_rings[q_idx]);
- if (vsi->xdp_rings)
- ice_clean_tx_ring(vsi->xdp_rings[q_idx]);
- ice_clean_rx_ring(vsi->rx_rings[q_idx]);
-}
-
-/**
* ice_qvec_toggle_napi - Enables/disables NAPI for a given q_vector
* @vsi: VSI that has netdev
* @q_vector: q_vector that has NAPI context
* @enable: true for enable, false for disable
*/
-static void
+void
ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector,
bool enable)
{
@@ -83,7 +43,7 @@ ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector,
* @rx_ring: Rx ring that will have its IRQ disabled
* @q_vector: queue vector
*/
-static void
+void
ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_rx_ring *rx_ring,
struct ice_q_vector *q_vector)
{
@@ -113,7 +73,7 @@ ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_rx_ring *rx_ring,
* @q_vector: queue vector
* @qid: queue index
*/
-static void
+void
ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector, u16 qid)
{
u16 reg_idx = q_vector->reg_idx;
@@ -143,7 +103,7 @@ ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector, u16 qid)
* @vsi: the VSI that contains queue vector
* @q_vector: queue vector
*/
-static void ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
+void ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
{
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
@@ -154,111 +114,6 @@ static void ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
}
/**
- * ice_qp_dis - Disables a queue pair
- * @vsi: VSI of interest
- * @q_idx: ring index in array
- *
- * Returns 0 on success, negative on failure.
- */
-static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
-{
- struct ice_txq_meta txq_meta = { };
- struct ice_q_vector *q_vector;
- struct ice_tx_ring *tx_ring;
- struct ice_rx_ring *rx_ring;
- int fail = 0;
- int err;
-
- if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
- return -EINVAL;
-
- tx_ring = vsi->tx_rings[q_idx];
- rx_ring = vsi->rx_rings[q_idx];
- q_vector = rx_ring->q_vector;
-
- synchronize_net();
- netif_carrier_off(vsi->netdev);
- netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
-
- ice_qvec_dis_irq(vsi, rx_ring, q_vector);
- ice_qvec_toggle_napi(vsi, q_vector, false);
-
- ice_fill_txq_meta(vsi, tx_ring, &txq_meta);
- err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta);
- if (!fail)
- fail = err;
- if (vsi->xdp_rings) {
- struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
-
- memset(&txq_meta, 0, sizeof(txq_meta));
- ice_fill_txq_meta(vsi, xdp_ring, &txq_meta);
- err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, xdp_ring,
- &txq_meta);
- if (!fail)
- fail = err;
- }
-
- ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, false);
- ice_qp_clean_rings(vsi, q_idx);
- ice_qp_reset_stats(vsi, q_idx);
-
- return fail;
-}
-
-/**
- * ice_qp_ena - Enables a queue pair
- * @vsi: VSI of interest
- * @q_idx: ring index in array
- *
- * Returns 0 on success, negative on failure.
- */
-static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
-{
- struct ice_q_vector *q_vector;
- int fail = 0;
- bool link_up;
- int err;
-
- err = ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx);
- if (!fail)
- fail = err;
-
- if (ice_is_xdp_ena_vsi(vsi)) {
- struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
-
- err = ice_vsi_cfg_single_txq(vsi, vsi->xdp_rings, q_idx);
- if (!fail)
- fail = err;
- ice_set_ring_xdp(xdp_ring);
- ice_tx_xsk_pool(vsi, q_idx);
- }
-
- err = ice_vsi_cfg_single_rxq(vsi, q_idx);
- if (!fail)
- fail = err;
-
- q_vector = vsi->rx_rings[q_idx]->q_vector;
- ice_qvec_cfg_msix(vsi, q_vector, q_idx);
-
- err = ice_vsi_ctrl_one_rx_ring(vsi, true, q_idx, true);
- if (!fail)
- fail = err;
-
- ice_qvec_toggle_napi(vsi, q_vector, true);
- ice_qvec_ena_irq(vsi, q_vector);
-
- /* make sure NAPI sees updated ice_{t,x}_ring::xsk_pool */
- synchronize_net();
- ice_get_link_status(vsi->port_info, &link_up);
- if (link_up) {
- netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
- netif_carrier_on(vsi->netdev);
- }
-
- return fail;
-}
-
-/**
* ice_xsk_pool_disable - disable a buffer pool region
* @vsi: Current VSI
* @qid: queue ID
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.h b/drivers/net/ethernet/intel/ice/ice_xsk.h
index 8dc5d55e26c5..600cbeeaa203 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.h
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.h
@@ -23,6 +23,13 @@ void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring);
void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring);
bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, struct xsk_buff_pool *xsk_pool);
int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc);
+void ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector,
+ u16 qid);
+void ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector,
+ bool enable);
+void ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector);
+void ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_rx_ring *rx_ring,
+ struct ice_q_vector *q_vector);
#else
static inline bool ice_xmit_zc(struct ice_tx_ring __always_unused *xdp_ring,
struct xsk_buff_pool __always_unused *xsk_pool)
@@ -75,5 +82,20 @@ ice_realloc_zc_buf(struct ice_vsi __always_unused *vsi,
{
return 0;
}
+
+static inline void
+ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector,
+ u16 qid) { }
+
+static inline void
+ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector,
+ bool enable) { }
+
+static inline void
+ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector) { }
+
+static inline void
+ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_rx_ring *rx_ring,
+ struct ice_q_vector *q_vector) { }
#endif /* CONFIG_XDP_SOCKETS */
#endif /* !_ICE_XSK_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c b/drivers/net/ethernet/intel/ice/virt/allowlist.c
index 4c2ec2337b38..a07efec19c45 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
+++ b/drivers/net/ethernet/intel/ice/virt/allowlist.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2021, Intel Corporation. */
-#include "ice_virtchnl_allowlist.h"
+#include "allowlist.h"
/* Purpose of this file is to share functionality to allowlist or denylist
* opcodes used in PF <-> VF communication. Group of opcodes:
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.h b/drivers/net/ethernet/intel/ice/virt/allowlist.h
index d3ae86ded219..d3ae86ded219 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.h
+++ b/drivers/net/ethernet/intel/ice/virt/allowlist.h
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/virt/fdir.c
index ae83c3914e29..ae83c3914e29 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+++ b/drivers/net/ethernet/intel/ice/virt/fdir.c
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h b/drivers/net/ethernet/intel/ice/virt/fdir.h
index ac6dcab454b4..ac6dcab454b4 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h
+++ b/drivers/net/ethernet/intel/ice/virt/fdir.h
diff --git a/drivers/net/ethernet/intel/ice/virt/queues.c b/drivers/net/ethernet/intel/ice/virt/queues.c
new file mode 100644
index 000000000000..370f6ec2a374
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/virt/queues.c
@@ -0,0 +1,973 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2022, Intel Corporation. */
+
+#include "virtchnl.h"
+#include "queues.h"
+#include "ice_vf_lib_private.h"
+#include "ice.h"
+#include "ice_base.h"
+#include "ice_lib.h"
+
+/**
+ * ice_vc_get_max_frame_size - get max frame size allowed for VF
+ * @vf: VF used to determine max frame size
+ *
+ * Max frame size is determined based on the current port's max frame size and
+ * whether a port VLAN is configured on this VF. The VF is not aware whether
+ * it's in a port VLAN so the PF needs to account for this in max frame size
+ * checks and sending the max frame size to the VF.
+ */
+u16 ice_vc_get_max_frame_size(struct ice_vf *vf)
+{
+ struct ice_port_info *pi = ice_vf_get_port_info(vf);
+ u16 max_frame_size;
+
+ max_frame_size = pi->phy.link_info.max_frame_size;
+
+ if (ice_vf_is_port_vlan_ena(vf))
+ max_frame_size -= VLAN_HLEN;
+
+ return max_frame_size;
+}
+
+/**
+ * ice_vc_isvalid_q_id
+ * @vsi: VSI to check queue ID against
+ * @qid: VSI relative queue ID
+ *
+ * check for the valid queue ID
+ */
+static bool ice_vc_isvalid_q_id(struct ice_vsi *vsi, u16 qid)
+{
+ /* allocated Tx and Rx queues should be always equal for VF VSI */
+ return qid < vsi->alloc_txq;
+}
+
+/**
+ * ice_vc_isvalid_ring_len
+ * @ring_len: length of ring
+ *
+ * check for the valid ring count, should be multiple of ICE_REQ_DESC_MULTIPLE
+ * or zero
+ */
+static bool ice_vc_isvalid_ring_len(u16 ring_len)
+{
+ return ring_len == 0 ||
+ (ring_len >= ICE_MIN_NUM_DESC &&
+ ring_len <= ICE_MAX_NUM_DESC_E810 &&
+ !(ring_len % ICE_REQ_DESC_MULTIPLE));
+}
+
+/**
+ * ice_vf_cfg_qs_bw - Configure per queue bandwidth
+ * @vf: pointer to the VF info
+ * @num_queues: number of queues to be configured
+ *
+ * Configure per queue bandwidth.
+ *
+ * Return: 0 on success or negative error value.
+ */
+static int ice_vf_cfg_qs_bw(struct ice_vf *vf, u16 num_queues)
+{
+ struct ice_hw *hw = &vf->pf->hw;
+ struct ice_vsi *vsi;
+ int ret;
+ u16 i;
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi)
+ return -EINVAL;
+
+ for (i = 0; i < num_queues; i++) {
+ u32 p_rate, min_rate;
+ u8 tc;
+
+ p_rate = vf->qs_bw[i].peak;
+ min_rate = vf->qs_bw[i].committed;
+ tc = vf->qs_bw[i].tc;
+ if (p_rate)
+ ret = ice_cfg_q_bw_lmt(hw->port_info, vsi->idx, tc,
+ vf->qs_bw[i].queue_id,
+ ICE_MAX_BW, p_rate);
+ else
+ ret = ice_cfg_q_bw_dflt_lmt(hw->port_info, vsi->idx, tc,
+ vf->qs_bw[i].queue_id,
+ ICE_MAX_BW);
+ if (ret)
+ return ret;
+
+ if (min_rate)
+ ret = ice_cfg_q_bw_lmt(hw->port_info, vsi->idx, tc,
+ vf->qs_bw[i].queue_id,
+ ICE_MIN_BW, min_rate);
+ else
+ ret = ice_cfg_q_bw_dflt_lmt(hw->port_info, vsi->idx, tc,
+ vf->qs_bw[i].queue_id,
+ ICE_MIN_BW);
+
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vf_cfg_q_quanta_profile - Configure quanta profile
+ * @vf: pointer to the VF info
+ * @quanta_prof_idx: pointer to the quanta profile index
+ * @quanta_size: quanta size to be set
+ *
+ * This function chooses available quanta profile and configures the register.
+ * The quanta profile is evenly divided by the number of device ports, and then
+ * available to the specific PF and VFs. The first profile for each PF is a
+ * reserved default profile. Only quanta size of the rest unused profile can be
+ * modified.
+ *
+ * Return: 0 on success or negative error value.
+ */
+static int ice_vf_cfg_q_quanta_profile(struct ice_vf *vf, u16 quanta_size,
+ u16 *quanta_prof_idx)
+{
+ const u16 n_desc = calc_quanta_desc(quanta_size);
+ struct ice_hw *hw = &vf->pf->hw;
+ const u16 n_cmd = 2 * n_desc;
+ struct ice_pf *pf = vf->pf;
+ u16 per_pf, begin_id;
+ u8 n_used;
+ u32 reg;
+
+ begin_id = (GLCOMM_QUANTA_PROF_MAX_INDEX + 1) / hw->dev_caps.num_funcs *
+ hw->logical_pf_id;
+
+ if (quanta_size == ICE_DFLT_QUANTA) {
+ *quanta_prof_idx = begin_id;
+ } else {
+ per_pf = (GLCOMM_QUANTA_PROF_MAX_INDEX + 1) /
+ hw->dev_caps.num_funcs;
+ n_used = pf->num_quanta_prof_used;
+ if (n_used < per_pf) {
+ *quanta_prof_idx = begin_id + 1 + n_used;
+ pf->num_quanta_prof_used++;
+ } else {
+ return -EINVAL;
+ }
+ }
+
+ reg = FIELD_PREP(GLCOMM_QUANTA_PROF_QUANTA_SIZE_M, quanta_size) |
+ FIELD_PREP(GLCOMM_QUANTA_PROF_MAX_CMD_M, n_cmd) |
+ FIELD_PREP(GLCOMM_QUANTA_PROF_MAX_DESC_M, n_desc);
+ wr32(hw, GLCOMM_QUANTA_PROF(*quanta_prof_idx), reg);
+
+ return 0;
+}
+
+/**
+ * ice_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTCHNL
+ * @vqs: virtchnl_queue_select structure containing bitmaps to validate
+ *
+ * Return true on successful validation, else false
+ */
+static bool ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
+{
+ if ((!vqs->rx_queues && !vqs->tx_queues) ||
+ vqs->rx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF) ||
+ vqs->tx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF))
+ return false;
+
+ return true;
+}
+
+/**
+ * ice_vf_ena_txq_interrupt - enable Tx queue interrupt via QINT_TQCTL
+ * @vsi: VSI of the VF to configure
+ * @q_idx: VF queue index used to determine the queue in the PF's space
+ */
+void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ u32 pfq = vsi->txq_map[q_idx];
+ u32 reg;
+
+ reg = rd32(hw, QINT_TQCTL(pfq));
+
+ /* MSI-X index 0 in the VF's space is always for the OICR, which means
+ * this is most likely a poll mode VF driver, so don't enable an
+ * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
+ */
+ if (!(reg & QINT_TQCTL_MSIX_INDX_M))
+ return;
+
+ wr32(hw, QINT_TQCTL(pfq), reg | QINT_TQCTL_CAUSE_ENA_M);
+}
+
+/**
+ * ice_vf_ena_rxq_interrupt - enable Tx queue interrupt via QINT_RQCTL
+ * @vsi: VSI of the VF to configure
+ * @q_idx: VF queue index used to determine the queue in the PF's space
+ */
+void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ u32 pfq = vsi->rxq_map[q_idx];
+ u32 reg;
+
+ reg = rd32(hw, QINT_RQCTL(pfq));
+
+ /* MSI-X index 0 in the VF's space is always for the OICR, which means
+ * this is most likely a poll mode VF driver, so don't enable an
+ * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
+ */
+ if (!(reg & QINT_RQCTL_MSIX_INDX_M))
+ return;
+
+ wr32(hw, QINT_RQCTL(pfq), reg | QINT_RQCTL_CAUSE_ENA_M);
+}
+
+/**
+ * ice_vc_ena_qs_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to enable all or specific queue(s)
+ */
+int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_queue_select *vqs =
+ (struct virtchnl_queue_select *)msg;
+ struct ice_vsi *vsi;
+ unsigned long q_map;
+ u16 vf_q_id;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_validate_vqs_bitmaps(vqs)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* Enable only Rx rings, Tx rings were enabled by the FW when the
+ * Tx queue group list was configured and the context bits were
+ * programmed using ice_vsi_cfg_txqs
+ */
+ q_map = vqs->rx_queues;
+ for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
+ if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* Skip queue if enabled */
+ if (test_bit(vf_q_id, vf->rxq_ena))
+ continue;
+
+ if (ice_vsi_ctrl_one_rx_ring(vsi, true, vf_q_id, true)) {
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to enable Rx ring %d on VSI %d\n",
+ vf_q_id, vsi->vsi_num);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ ice_vf_ena_rxq_interrupt(vsi, vf_q_id);
+ set_bit(vf_q_id, vf->rxq_ena);
+ }
+
+ q_map = vqs->tx_queues;
+ for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
+ if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* Skip queue if enabled */
+ if (test_bit(vf_q_id, vf->txq_ena))
+ continue;
+
+ ice_vf_ena_txq_interrupt(vsi, vf_q_id);
+ set_bit(vf_q_id, vf->txq_ena);
+ }
+
+ /* Set flag to indicate that queues are enabled */
+ if (v_ret == VIRTCHNL_STATUS_SUCCESS)
+ set_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
+
+error_param:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, v_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_vf_vsi_dis_single_txq - disable a single Tx queue
+ * @vf: VF to disable queue for
+ * @vsi: VSI for the VF
+ * @q_id: VF relative (0-based) queue ID
+ *
+ * Attempt to disable the Tx queue passed in. If the Tx queue was successfully
+ * disabled then clear q_id bit in the enabled queues bitmap and return
+ * success. Otherwise return error.
+ */
+int ice_vf_vsi_dis_single_txq(struct ice_vf *vf, struct ice_vsi *vsi, u16 q_id)
+{
+ struct ice_txq_meta txq_meta = { 0 };
+ struct ice_tx_ring *ring;
+ int err;
+
+ if (!test_bit(q_id, vf->txq_ena))
+ dev_dbg(ice_pf_to_dev(vsi->back), "Queue %u on VSI %u is not enabled, but stopping it anyway\n",
+ q_id, vsi->vsi_num);
+
+ ring = vsi->tx_rings[q_id];
+ if (!ring)
+ return -EINVAL;
+
+ ice_fill_txq_meta(vsi, ring, &txq_meta);
+
+ err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id, ring, &txq_meta);
+ if (err) {
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
+ q_id, vsi->vsi_num);
+ return err;
+ }
+
+ /* Clear enabled queues flag */
+ clear_bit(q_id, vf->txq_ena);
+
+ return 0;
+}
+
+/**
+ * ice_vc_dis_qs_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to disable all or specific queue(s)
+ */
+int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_queue_select *vqs =
+ (struct virtchnl_queue_select *)msg;
+ struct ice_vsi *vsi;
+ unsigned long q_map;
+ u16 vf_q_id;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
+ !test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_validate_vqs_bitmaps(vqs)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (vqs->tx_queues) {
+ q_map = vqs->tx_queues;
+
+ for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
+ if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (ice_vf_vsi_dis_single_txq(vf, vsi, vf_q_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+ }
+ }
+
+ q_map = vqs->rx_queues;
+ /* speed up Rx queue disable by batching them if possible */
+ if (q_map &&
+ bitmap_equal(&q_map, vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF)) {
+ if (ice_vsi_stop_all_rx_rings(vsi)) {
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to stop all Rx rings on VSI %d\n",
+ vsi->vsi_num);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
+ } else if (q_map) {
+ for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
+ if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* Skip queue if not enabled */
+ if (!test_bit(vf_q_id, vf->rxq_ena))
+ continue;
+
+ if (ice_vsi_ctrl_one_rx_ring(vsi, false, vf_q_id,
+ true)) {
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Rx ring %d on VSI %d\n",
+ vf_q_id, vsi->vsi_num);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* Clear enabled queues flag */
+ clear_bit(vf_q_id, vf->rxq_ena);
+ }
+ }
+
+ /* Clear enabled queues flag */
+ if (v_ret == VIRTCHNL_STATUS_SUCCESS && ice_vf_has_no_qs_ena(vf))
+ clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
+
+error_param:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, v_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_cfg_interrupt
+ * @vf: pointer to the VF info
+ * @vsi: the VSI being configured
+ * @map: vector map for mapping vectors to queues
+ * @q_vector: structure for interrupt vector
+ * configure the IRQ to queue map
+ */
+static enum virtchnl_status_code
+ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi,
+ struct virtchnl_vector_map *map,
+ struct ice_q_vector *q_vector)
+{
+ u16 vsi_q_id, vsi_q_id_idx;
+ unsigned long qmap;
+
+ q_vector->num_ring_rx = 0;
+ q_vector->num_ring_tx = 0;
+
+ qmap = map->rxq_map;
+ for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
+ vsi_q_id = vsi_q_id_idx;
+
+ if (!ice_vc_isvalid_q_id(vsi, vsi_q_id))
+ return VIRTCHNL_STATUS_ERR_PARAM;
+
+ q_vector->num_ring_rx++;
+ q_vector->rx.itr_idx = map->rxitr_idx;
+ vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
+ ice_cfg_rxq_interrupt(vsi, vsi_q_id,
+ q_vector->vf_reg_idx,
+ q_vector->rx.itr_idx);
+ }
+
+ qmap = map->txq_map;
+ for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
+ vsi_q_id = vsi_q_id_idx;
+
+ if (!ice_vc_isvalid_q_id(vsi, vsi_q_id))
+ return VIRTCHNL_STATUS_ERR_PARAM;
+
+ q_vector->num_ring_tx++;
+ q_vector->tx.itr_idx = map->txitr_idx;
+ vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
+ ice_cfg_txq_interrupt(vsi, vsi_q_id,
+ q_vector->vf_reg_idx,
+ q_vector->tx.itr_idx);
+ }
+
+ return VIRTCHNL_STATUS_SUCCESS;
+}
+
+/**
+ * ice_vc_cfg_irq_map_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to configure the IRQ to queue map
+ */
+int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ u16 num_q_vectors_mapped, vsi_id, vector_id;
+ struct virtchnl_irq_map_info *irqmap_info;
+ struct virtchnl_vector_map *map;
+ struct ice_vsi *vsi;
+ int i;
+
+ irqmap_info = (struct virtchnl_irq_map_info *)msg;
+ num_q_vectors_mapped = irqmap_info->num_vectors;
+
+ /* Check to make sure number of VF vectors mapped is not greater than
+ * number of VF vectors originally allocated, and check that
+ * there is actually at least a single VF queue vector mapped
+ */
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
+ vf->num_msix < num_q_vectors_mapped ||
+ !num_q_vectors_mapped) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ for (i = 0; i < num_q_vectors_mapped; i++) {
+ struct ice_q_vector *q_vector;
+
+ map = &irqmap_info->vecmap[i];
+
+ vector_id = map->vector_id;
+ vsi_id = map->vsi_id;
+ /* vector_id is always 0-based for each VF, and can never be
+ * larger than or equal to the max allowed interrupts per VF
+ */
+ if (!(vector_id < vf->num_msix) ||
+ !ice_vc_isvalid_vsi_id(vf, vsi_id) ||
+ (!vector_id && (map->rxq_map || map->txq_map))) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* No need to map VF miscellaneous or rogue vector */
+ if (!vector_id)
+ continue;
+
+ /* Subtract non queue vector from vector_id passed by VF
+ * to get actual number of VSI queue vector array index
+ */
+ q_vector = vsi->q_vectors[vector_id - ICE_NONQ_VECS_VF];
+ if (!q_vector) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* lookout for the invalid queue index */
+ v_ret = ice_cfg_interrupt(vf, vsi, map, q_vector);
+ if (v_ret)
+ goto error_param;
+ }
+
+error_param:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_vc_cfg_q_bw - Configure per queue bandwidth
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer which holds the command descriptor
+ *
+ * Configure VF queues bandwidth.
+ *
+ * Return: 0 on success or negative error value.
+ */
+int ice_vc_cfg_q_bw(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_queues_bw_cfg *qbw =
+ (struct virtchnl_queues_bw_cfg *)msg;
+ struct ice_vsi *vsi;
+ u16 i;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
+ !ice_vc_isvalid_vsi_id(vf, qbw->vsi_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ if (qbw->num_queues > ICE_MAX_RSS_QS_PER_VF ||
+ qbw->num_queues > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
+ dev_err(ice_pf_to_dev(vf->pf), "VF-%d trying to configure more than allocated number of queues: %d\n",
+ vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ for (i = 0; i < qbw->num_queues; i++) {
+ if (qbw->cfg[i].shaper.peak != 0 && vf->max_tx_rate != 0 &&
+ qbw->cfg[i].shaper.peak > vf->max_tx_rate) {
+ dev_warn(ice_pf_to_dev(vf->pf), "The maximum queue %d rate limit configuration may not take effect because the maximum TX rate for VF-%d is %d\n",
+ qbw->cfg[i].queue_id, vf->vf_id,
+ vf->max_tx_rate);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+ if (qbw->cfg[i].shaper.committed != 0 && vf->min_tx_rate != 0 &&
+ qbw->cfg[i].shaper.committed < vf->min_tx_rate) {
+ dev_warn(ice_pf_to_dev(vf->pf), "The minimum queue %d rate limit configuration may not take effect because the minimum TX rate for VF-%d is %d\n",
+ qbw->cfg[i].queue_id, vf->vf_id,
+ vf->min_tx_rate);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+ if (qbw->cfg[i].queue_id > vf->num_vf_qs) {
+ dev_warn(ice_pf_to_dev(vf->pf), "VF-%d trying to configure invalid queue_id\n",
+ vf->vf_id);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+ if (qbw->cfg[i].tc >= ICE_MAX_TRAFFIC_CLASS) {
+ dev_warn(ice_pf_to_dev(vf->pf), "VF-%d trying to configure a traffic class higher than allowed\n",
+ vf->vf_id);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+ }
+
+ for (i = 0; i < qbw->num_queues; i++) {
+ vf->qs_bw[i].queue_id = qbw->cfg[i].queue_id;
+ vf->qs_bw[i].peak = qbw->cfg[i].shaper.peak;
+ vf->qs_bw[i].committed = qbw->cfg[i].shaper.committed;
+ vf->qs_bw[i].tc = qbw->cfg[i].tc;
+ }
+
+ if (ice_vf_cfg_qs_bw(vf, qbw->num_queues))
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+
+err:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_QUEUE_BW,
+ v_ret, NULL, 0);
+}
+
+/**
+ * ice_vc_cfg_q_quanta - Configure per queue quanta
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer which holds the command descriptor
+ *
+ * Configure VF queues quanta.
+ *
+ * Return: 0 on success or negative error value.
+ */
+int ice_vc_cfg_q_quanta(struct ice_vf *vf, u8 *msg)
+{
+ u16 quanta_prof_id, quanta_size, start_qid, num_queues, end_qid, i;
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_quanta_cfg *qquanta =
+ (struct virtchnl_quanta_cfg *)msg;
+ struct ice_vsi *vsi;
+ int ret;
+
+ start_qid = qquanta->queue_select.start_queue_id;
+ num_queues = qquanta->queue_select.num_queues;
+
+ if (check_add_overflow(start_qid, num_queues, &end_qid)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ if (end_qid > ICE_MAX_RSS_QS_PER_VF ||
+ end_qid > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
+ dev_err(ice_pf_to_dev(vf->pf), "VF-%d trying to configure more than allocated number of queues: %d\n",
+ vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ quanta_size = qquanta->quanta_size;
+ if (quanta_size > ICE_MAX_QUANTA_SIZE ||
+ quanta_size < ICE_MIN_QUANTA_SIZE) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ if (quanta_size % 64) {
+ dev_err(ice_pf_to_dev(vf->pf), "quanta size should be the product of 64\n");
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ ret = ice_vf_cfg_q_quanta_profile(vf, quanta_size,
+ &quanta_prof_id);
+ if (ret) {
+ v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
+ goto err;
+ }
+
+ for (i = start_qid; i < end_qid; i++)
+ vsi->tx_rings[i]->quanta_prof_id = quanta_prof_id;
+
+err:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_QUANTA,
+ v_ret, NULL, 0);
+}
+
+/**
+ * ice_vc_cfg_qs_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to configure the Rx/Tx queues
+ */
+int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_vsi_queue_config_info *qci =
+ (struct virtchnl_vsi_queue_config_info *)msg;
+ struct virtchnl_queue_pair_info *qpi;
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+ int i = -1, q_idx;
+ bool ena_ts;
+ u8 act_prt;
+
+ mutex_lock(&pf->lag_mutex);
+ act_prt = ice_lag_prepare_vf_reset(pf->lag);
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
+ goto error_param;
+
+ if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id))
+ goto error_param;
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi)
+ goto error_param;
+
+ if (qci->num_queue_pairs > ICE_MAX_RSS_QS_PER_VF ||
+ qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
+ dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n",
+ vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
+ goto error_param;
+ }
+
+ for (i = 0; i < qci->num_queue_pairs; i++) {
+ if (!qci->qpair[i].rxq.crc_disable)
+ continue;
+
+ if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_CRC) ||
+ vf->vlan_strip_ena)
+ goto error_param;
+ }
+
+ for (i = 0; i < qci->num_queue_pairs; i++) {
+ qpi = &qci->qpair[i];
+ if (qpi->txq.vsi_id != qci->vsi_id ||
+ qpi->rxq.vsi_id != qci->vsi_id ||
+ qpi->rxq.queue_id != qpi->txq.queue_id ||
+ qpi->txq.headwb_enabled ||
+ !ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
+ !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
+ !ice_vc_isvalid_q_id(vsi, qpi->txq.queue_id)) {
+ goto error_param;
+ }
+
+ q_idx = qpi->rxq.queue_id;
+
+ /* make sure selected "q_idx" is in valid range of queues
+ * for selected "vsi"
+ */
+ if (q_idx >= vsi->alloc_txq || q_idx >= vsi->alloc_rxq) {
+ goto error_param;
+ }
+
+ /* copy Tx queue info from VF into VSI */
+ if (qpi->txq.ring_len > 0) {
+ vsi->tx_rings[q_idx]->dma = qpi->txq.dma_ring_addr;
+ vsi->tx_rings[q_idx]->count = qpi->txq.ring_len;
+
+ /* Disable any existing queue first */
+ if (ice_vf_vsi_dis_single_txq(vf, vsi, q_idx))
+ goto error_param;
+
+ /* Configure a queue with the requested settings */
+ if (ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx)) {
+ dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure TX queue %d\n",
+ vf->vf_id, q_idx);
+ goto error_param;
+ }
+ }
+
+ /* copy Rx queue info from VF into VSI */
+ if (qpi->rxq.ring_len > 0) {
+ u16 max_frame_size = ice_vc_get_max_frame_size(vf);
+ struct ice_rx_ring *ring = vsi->rx_rings[q_idx];
+ u32 rxdid;
+
+ ring->dma = qpi->rxq.dma_ring_addr;
+ ring->count = qpi->rxq.ring_len;
+
+ if (qpi->rxq.crc_disable)
+ ring->flags |= ICE_RX_FLAGS_CRC_STRIP_DIS;
+ else
+ ring->flags &= ~ICE_RX_FLAGS_CRC_STRIP_DIS;
+
+ if (qpi->rxq.databuffer_size != 0 &&
+ (qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
+ qpi->rxq.databuffer_size < 1024))
+ goto error_param;
+ ring->rx_buf_len = qpi->rxq.databuffer_size;
+ if (qpi->rxq.max_pkt_size > max_frame_size ||
+ qpi->rxq.max_pkt_size < 64)
+ goto error_param;
+
+ ring->max_frame = qpi->rxq.max_pkt_size;
+ /* add space for the port VLAN since the VF driver is
+ * not expected to account for it in the MTU
+ * calculation
+ */
+ if (ice_vf_is_port_vlan_ena(vf))
+ ring->max_frame += VLAN_HLEN;
+
+ if (ice_vsi_cfg_single_rxq(vsi, q_idx)) {
+ dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure RX queue %d\n",
+ vf->vf_id, q_idx);
+ goto error_param;
+ }
+
+ /* If Rx flex desc is supported, select RXDID for Rx
+ * queues. Otherwise, use legacy 32byte descriptor
+ * format. Legacy 16byte descriptor is not supported.
+ * If this RXDID is selected, return error.
+ */
+ if (vf->driver_caps &
+ VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
+ rxdid = qpi->rxq.rxdid;
+ if (!(BIT(rxdid) & pf->supported_rxdids))
+ goto error_param;
+ } else {
+ rxdid = ICE_RXDID_LEGACY_1;
+ }
+
+ ena_ts = ((vf->driver_caps &
+ VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) &&
+ (vf->driver_caps & VIRTCHNL_VF_CAP_PTP) &&
+ (qpi->rxq.flags & VIRTCHNL_PTP_RX_TSTAMP));
+
+ ice_write_qrxflxp_cntxt(&vsi->back->hw,
+ vsi->rxq_map[q_idx], rxdid,
+ ICE_RXDID_PRIO, ena_ts);
+ }
+ }
+
+ ice_lag_complete_vf_reset(pf->lag, act_prt);
+ mutex_unlock(&pf->lag_mutex);
+
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ VIRTCHNL_STATUS_SUCCESS, NULL, 0);
+error_param:
+ /* disable whatever we can */
+ for (; i >= 0; i--) {
+ if (ice_vsi_ctrl_one_rx_ring(vsi, false, i, true))
+ dev_err(ice_pf_to_dev(pf), "VF-%d could not disable RX queue %d\n",
+ vf->vf_id, i);
+ if (ice_vf_vsi_dis_single_txq(vf, vsi, i))
+ dev_err(ice_pf_to_dev(pf), "VF-%d could not disable TX queue %d\n",
+ vf->vf_id, i);
+ }
+
+ ice_lag_complete_vf_reset(pf->lag, act_prt);
+ mutex_unlock(&pf->lag_mutex);
+
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ VIRTCHNL_STATUS_ERR_PARAM, NULL, 0);
+}
+
+/**
+ * ice_vc_request_qs_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * VFs get a default number of queues but can use this message to request a
+ * different number. If the request is successful, PF will reset the VF and
+ * return 0. If unsuccessful, PF will send message informing VF of number of
+ * available queue pairs via virtchnl message response to VF.
+ */
+int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_vf_res_request *vfres =
+ (struct virtchnl_vf_res_request *)msg;
+ u16 req_queues = vfres->num_queue_pairs;
+ struct ice_pf *pf = vf->pf;
+ u16 max_allowed_vf_queues;
+ u16 tx_rx_queue_left;
+ struct device *dev;
+ u16 cur_queues;
+
+ dev = ice_pf_to_dev(pf);
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ cur_queues = vf->num_vf_qs;
+ tx_rx_queue_left = min_t(u16, ice_get_avail_txq_count(pf),
+ ice_get_avail_rxq_count(pf));
+ max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
+ if (!req_queues) {
+ dev_err(dev, "VF %d tried to request 0 queues. Ignoring.\n",
+ vf->vf_id);
+ } else if (req_queues > ICE_MAX_RSS_QS_PER_VF) {
+ dev_err(dev, "VF %d tried to request more than %d queues.\n",
+ vf->vf_id, ICE_MAX_RSS_QS_PER_VF);
+ vfres->num_queue_pairs = ICE_MAX_RSS_QS_PER_VF;
+ } else if (req_queues > cur_queues &&
+ req_queues - cur_queues > tx_rx_queue_left) {
+ dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n",
+ vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
+ vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues,
+ ICE_MAX_RSS_QS_PER_VF);
+ } else {
+ /* request is successful, then reset VF */
+ vf->num_req_qs = req_queues;
+ ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
+ dev_info(dev, "VF %d granted request of %u queues.\n",
+ vf->vf_id, req_queues);
+ return 0;
+ }
+
+error_param:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES,
+ v_ret, (u8 *)vfres, sizeof(*vfres));
+}
+
diff --git a/drivers/net/ethernet/intel/ice/virt/queues.h b/drivers/net/ethernet/intel/ice/virt/queues.h
new file mode 100644
index 000000000000..c4a792cecea1
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/virt/queues.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2022, Intel Corporation. */
+
+#ifndef _ICE_VIRT_QUEUES_H_
+#define _ICE_VIRT_QUEUES_H_
+
+#include <linux/types.h>
+
+struct ice_vf;
+
+u16 ice_vc_get_max_frame_size(struct ice_vf *vf);
+int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg);
+int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg);
+int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg);
+int ice_vc_cfg_q_bw(struct ice_vf *vf, u8 *msg);
+int ice_vc_cfg_q_quanta(struct ice_vf *vf, u8 *msg);
+int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg);
+int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg);
+
+#endif /* _ICE_VIRT_QUEUES_H_ */
diff --git a/drivers/net/ethernet/intel/ice/virt/rss.c b/drivers/net/ethernet/intel/ice/virt/rss.c
new file mode 100644
index 000000000000..cbdbb32d512b
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/virt/rss.c
@@ -0,0 +1,719 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2022, Intel Corporation. */
+
+#include "rss.h"
+#include "ice_vf_lib_private.h"
+#include "ice.h"
+
+#define FIELD_SELECTOR(proto_hdr_field) \
+ BIT((proto_hdr_field) & PROTO_HDR_FIELD_MASK)
+
+struct ice_vc_hdr_match_type {
+ u32 vc_hdr; /* virtchnl headers (VIRTCHNL_PROTO_HDR_XXX) */
+ u32 ice_hdr; /* ice headers (ICE_FLOW_SEG_HDR_XXX) */
+};
+
+static const struct ice_vc_hdr_match_type ice_vc_hdr_list[] = {
+ {VIRTCHNL_PROTO_HDR_NONE, ICE_FLOW_SEG_HDR_NONE},
+ {VIRTCHNL_PROTO_HDR_ETH, ICE_FLOW_SEG_HDR_ETH},
+ {VIRTCHNL_PROTO_HDR_S_VLAN, ICE_FLOW_SEG_HDR_VLAN},
+ {VIRTCHNL_PROTO_HDR_C_VLAN, ICE_FLOW_SEG_HDR_VLAN},
+ {VIRTCHNL_PROTO_HDR_IPV4, ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER},
+ {VIRTCHNL_PROTO_HDR_IPV6, ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER},
+ {VIRTCHNL_PROTO_HDR_TCP, ICE_FLOW_SEG_HDR_TCP},
+ {VIRTCHNL_PROTO_HDR_UDP, ICE_FLOW_SEG_HDR_UDP},
+ {VIRTCHNL_PROTO_HDR_SCTP, ICE_FLOW_SEG_HDR_SCTP},
+ {VIRTCHNL_PROTO_HDR_PPPOE, ICE_FLOW_SEG_HDR_PPPOE},
+ {VIRTCHNL_PROTO_HDR_GTPU_IP, ICE_FLOW_SEG_HDR_GTPU_IP},
+ {VIRTCHNL_PROTO_HDR_GTPU_EH, ICE_FLOW_SEG_HDR_GTPU_EH},
+ {VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN,
+ ICE_FLOW_SEG_HDR_GTPU_DWN},
+ {VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP,
+ ICE_FLOW_SEG_HDR_GTPU_UP},
+ {VIRTCHNL_PROTO_HDR_L2TPV3, ICE_FLOW_SEG_HDR_L2TPV3},
+ {VIRTCHNL_PROTO_HDR_ESP, ICE_FLOW_SEG_HDR_ESP},
+ {VIRTCHNL_PROTO_HDR_AH, ICE_FLOW_SEG_HDR_AH},
+ {VIRTCHNL_PROTO_HDR_PFCP, ICE_FLOW_SEG_HDR_PFCP_SESSION},
+};
+
+struct ice_vc_hash_field_match_type {
+ u32 vc_hdr; /* virtchnl headers
+ * (VIRTCHNL_PROTO_HDR_XXX)
+ */
+ u32 vc_hash_field; /* virtchnl hash fields selector
+ * FIELD_SELECTOR((VIRTCHNL_PROTO_HDR_ETH_XXX))
+ */
+ u64 ice_hash_field; /* ice hash fields
+ * (BIT_ULL(ICE_FLOW_FIELD_IDX_XXX))
+ */
+};
+
+static const struct
+ice_vc_hash_field_match_type ice_vc_hash_field_list[] = {
+ {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_SA)},
+ {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_DA)},
+ {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST),
+ ICE_FLOW_HASH_ETH},
+ {VIRTCHNL_PROTO_HDR_ETH,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_TYPE)},
+ {VIRTCHNL_PROTO_HDR_S_VLAN,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_S_VLAN_ID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_S_VLAN)},
+ {VIRTCHNL_PROTO_HDR_C_VLAN,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_C_VLAN_ID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_C_VLAN)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
+ ICE_FLOW_HASH_IPV4},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
+ ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)},
+ {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)},
+ {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
+ ICE_FLOW_HASH_IPV6},
+ {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
+ ICE_FLOW_HASH_IPV6 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
+ {VIRTCHNL_PROTO_HDR_TCP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)},
+ {VIRTCHNL_PROTO_HDR_TCP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)},
+ {VIRTCHNL_PROTO_HDR_TCP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
+ ICE_FLOW_HASH_TCP_PORT},
+ {VIRTCHNL_PROTO_HDR_UDP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)},
+ {VIRTCHNL_PROTO_HDR_UDP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)},
+ {VIRTCHNL_PROTO_HDR_UDP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
+ ICE_FLOW_HASH_UDP_PORT},
+ {VIRTCHNL_PROTO_HDR_SCTP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)},
+ {VIRTCHNL_PROTO_HDR_SCTP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)},
+ {VIRTCHNL_PROTO_HDR_SCTP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
+ ICE_FLOW_HASH_SCTP_PORT},
+ {VIRTCHNL_PROTO_HDR_PPPOE,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID)},
+ {VIRTCHNL_PROTO_HDR_GTPU_IP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_GTPU_IP_TEID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_IP_TEID)},
+ {VIRTCHNL_PROTO_HDR_L2TPV3,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID)},
+ {VIRTCHNL_PROTO_HDR_ESP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ESP_SPI),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_ESP_SPI)},
+ {VIRTCHNL_PROTO_HDR_AH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_AH_SPI),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_AH_SPI)},
+ {VIRTCHNL_PROTO_HDR_PFCP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PFCP_SEID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_PFCP_SEID)},
+};
+
+/**
+ * ice_vc_validate_pattern
+ * @vf: pointer to the VF info
+ * @proto: virtchnl protocol headers
+ *
+ * validate the pattern is supported or not.
+ *
+ * Return: true on success, false on error.
+ */
+bool
+ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto)
+{
+ bool is_ipv4 = false;
+ bool is_ipv6 = false;
+ bool is_udp = false;
+ u16 ptype = -1;
+ int i = 0;
+
+ while (i < proto->count &&
+ proto->proto_hdr[i].type != VIRTCHNL_PROTO_HDR_NONE) {
+ switch (proto->proto_hdr[i].type) {
+ case VIRTCHNL_PROTO_HDR_ETH:
+ ptype = ICE_PTYPE_MAC_PAY;
+ break;
+ case VIRTCHNL_PROTO_HDR_IPV4:
+ ptype = ICE_PTYPE_IPV4_PAY;
+ is_ipv4 = true;
+ break;
+ case VIRTCHNL_PROTO_HDR_IPV6:
+ ptype = ICE_PTYPE_IPV6_PAY;
+ is_ipv6 = true;
+ break;
+ case VIRTCHNL_PROTO_HDR_UDP:
+ if (is_ipv4)
+ ptype = ICE_PTYPE_IPV4_UDP_PAY;
+ else if (is_ipv6)
+ ptype = ICE_PTYPE_IPV6_UDP_PAY;
+ is_udp = true;
+ break;
+ case VIRTCHNL_PROTO_HDR_TCP:
+ if (is_ipv4)
+ ptype = ICE_PTYPE_IPV4_TCP_PAY;
+ else if (is_ipv6)
+ ptype = ICE_PTYPE_IPV6_TCP_PAY;
+ break;
+ case VIRTCHNL_PROTO_HDR_SCTP:
+ if (is_ipv4)
+ ptype = ICE_PTYPE_IPV4_SCTP_PAY;
+ else if (is_ipv6)
+ ptype = ICE_PTYPE_IPV6_SCTP_PAY;
+ break;
+ case VIRTCHNL_PROTO_HDR_GTPU_IP:
+ case VIRTCHNL_PROTO_HDR_GTPU_EH:
+ if (is_ipv4)
+ ptype = ICE_MAC_IPV4_GTPU;
+ else if (is_ipv6)
+ ptype = ICE_MAC_IPV6_GTPU;
+ goto out;
+ case VIRTCHNL_PROTO_HDR_L2TPV3:
+ if (is_ipv4)
+ ptype = ICE_MAC_IPV4_L2TPV3;
+ else if (is_ipv6)
+ ptype = ICE_MAC_IPV6_L2TPV3;
+ goto out;
+ case VIRTCHNL_PROTO_HDR_ESP:
+ if (is_ipv4)
+ ptype = is_udp ? ICE_MAC_IPV4_NAT_T_ESP :
+ ICE_MAC_IPV4_ESP;
+ else if (is_ipv6)
+ ptype = is_udp ? ICE_MAC_IPV6_NAT_T_ESP :
+ ICE_MAC_IPV6_ESP;
+ goto out;
+ case VIRTCHNL_PROTO_HDR_AH:
+ if (is_ipv4)
+ ptype = ICE_MAC_IPV4_AH;
+ else if (is_ipv6)
+ ptype = ICE_MAC_IPV6_AH;
+ goto out;
+ case VIRTCHNL_PROTO_HDR_PFCP:
+ if (is_ipv4)
+ ptype = ICE_MAC_IPV4_PFCP_SESSION;
+ else if (is_ipv6)
+ ptype = ICE_MAC_IPV6_PFCP_SESSION;
+ goto out;
+ default:
+ break;
+ }
+ i++;
+ }
+
+out:
+ return ice_hw_ptype_ena(&vf->pf->hw, ptype);
+}
+
+/**
+ * ice_vc_parse_rss_cfg - parses hash fields and headers from
+ * a specific virtchnl RSS cfg
+ * @hw: pointer to the hardware
+ * @rss_cfg: pointer to the virtchnl RSS cfg
+ * @hash_cfg: pointer to the HW hash configuration
+ *
+ * Return true if all the protocol header and hash fields in the RSS cfg could
+ * be parsed, else return false
+ *
+ * This function parses the virtchnl RSS cfg to be the intended
+ * hash fields and the intended header for RSS configuration
+ */
+static bool ice_vc_parse_rss_cfg(struct ice_hw *hw,
+ struct virtchnl_rss_cfg *rss_cfg,
+ struct ice_rss_hash_cfg *hash_cfg)
+{
+ const struct ice_vc_hash_field_match_type *hf_list;
+ const struct ice_vc_hdr_match_type *hdr_list;
+ int i, hf_list_len, hdr_list_len;
+ u32 *addl_hdrs = &hash_cfg->addl_hdrs;
+ u64 *hash_flds = &hash_cfg->hash_flds;
+
+ /* set outer layer RSS as default */
+ hash_cfg->hdr_type = ICE_RSS_OUTER_HEADERS;
+
+ if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC)
+ hash_cfg->symm = true;
+ else
+ hash_cfg->symm = false;
+
+ hf_list = ice_vc_hash_field_list;
+ hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list);
+ hdr_list = ice_vc_hdr_list;
+ hdr_list_len = ARRAY_SIZE(ice_vc_hdr_list);
+
+ for (i = 0; i < rss_cfg->proto_hdrs.count; i++) {
+ struct virtchnl_proto_hdr *proto_hdr =
+ &rss_cfg->proto_hdrs.proto_hdr[i];
+ bool hdr_found = false;
+ int j;
+
+ /* Find matched ice headers according to virtchnl headers. */
+ for (j = 0; j < hdr_list_len; j++) {
+ struct ice_vc_hdr_match_type hdr_map = hdr_list[j];
+
+ if (proto_hdr->type == hdr_map.vc_hdr) {
+ *addl_hdrs |= hdr_map.ice_hdr;
+ hdr_found = true;
+ }
+ }
+
+ if (!hdr_found)
+ return false;
+
+ /* Find matched ice hash fields according to
+ * virtchnl hash fields.
+ */
+ for (j = 0; j < hf_list_len; j++) {
+ struct ice_vc_hash_field_match_type hf_map = hf_list[j];
+
+ if (proto_hdr->type == hf_map.vc_hdr &&
+ proto_hdr->field_selector == hf_map.vc_hash_field) {
+ *hash_flds |= hf_map.ice_hash_field;
+ break;
+ }
+ }
+ }
+
+ return true;
+}
+
+/**
+ * ice_vf_adv_rss_offload_ena - determine if capabilities support advanced
+ * RSS offloads
+ * @caps: VF driver negotiated capabilities
+ *
+ * Return true if VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF capability is set,
+ * else return false
+ */
+static bool ice_vf_adv_rss_offload_ena(u32 caps)
+{
+ return !!(caps & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF);
+}
+
+/**
+ * ice_vc_handle_rss_cfg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the message buffer
+ * @add: add a RSS config if true, otherwise delete a RSS config
+ *
+ * This function adds/deletes a RSS config
+ */
+int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
+{
+ u32 v_opcode = add ? VIRTCHNL_OP_ADD_RSS_CFG : VIRTCHNL_OP_DEL_RSS_CFG;
+ struct virtchnl_rss_cfg *rss_cfg = (struct virtchnl_rss_cfg *)msg;
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_hw *hw = &vf->pf->hw;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
+ dev_dbg(dev, "VF %d attempting to configure RSS, but RSS is not supported by the PF\n",
+ vf->vf_id);
+ v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
+ goto error_param;
+ }
+
+ if (!ice_vf_adv_rss_offload_ena(vf->driver_caps)) {
+ dev_dbg(dev, "VF %d attempting to configure RSS, but Advanced RSS offload is not supported\n",
+ vf->vf_id);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (rss_cfg->proto_hdrs.count > VIRTCHNL_MAX_NUM_PROTO_HDRS ||
+ rss_cfg->rss_algorithm < VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC ||
+ rss_cfg->rss_algorithm > VIRTCHNL_RSS_ALG_XOR_SYMMETRIC) {
+ dev_dbg(dev, "VF %d attempting to configure RSS, but RSS configuration is not valid\n",
+ vf->vf_id);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_validate_pattern(vf, &rss_cfg->proto_hdrs)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_R_ASYMMETRIC) {
+ struct ice_vsi_ctx *ctx;
+ u8 lut_type, hash_type;
+ int status;
+
+ lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
+ hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_HASH_XOR :
+ ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
+ goto error_param;
+ }
+
+ ctx->info.q_opt_rss =
+ FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_LUT_M, lut_type) |
+ FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M, hash_type);
+
+ /* Preserve existing queueing option setting */
+ ctx->info.q_opt_rss |= (vsi->info.q_opt_rss &
+ ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_M);
+ ctx->info.q_opt_tc = vsi->info.q_opt_tc;
+ ctx->info.q_opt_flags = vsi->info.q_opt_rss;
+
+ ctx->info.valid_sections =
+ cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
+
+ status = ice_update_vsi(hw, vsi->idx, ctx, NULL);
+ if (status) {
+ dev_err(dev, "update VSI for RSS failed, err %d aq_err %s\n",
+ status, libie_aq_str(hw->adminq.sq_last_status));
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ } else {
+ vsi->info.q_opt_rss = ctx->info.q_opt_rss;
+ }
+
+ kfree(ctx);
+ } else {
+ struct ice_rss_hash_cfg cfg;
+
+ /* Only check for none raw pattern case */
+ if (!ice_vc_validate_pattern(vf, &rss_cfg->proto_hdrs)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+ cfg.addl_hdrs = ICE_FLOW_SEG_HDR_NONE;
+ cfg.hash_flds = ICE_HASH_INVALID;
+ cfg.hdr_type = ICE_RSS_ANY_HEADERS;
+
+ if (!ice_vc_parse_rss_cfg(hw, rss_cfg, &cfg)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (add) {
+ if (ice_add_rss_cfg(hw, vsi, &cfg)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ dev_err(dev, "ice_add_rss_cfg failed for vsi = %d, v_ret = %d\n",
+ vsi->vsi_num, v_ret);
+ }
+ } else {
+ int status;
+
+ status = ice_rem_rss_cfg(hw, vsi->idx, &cfg);
+ /* We just ignore -ENOENT, because if two configurations
+ * share the same profile remove one of them actually
+ * removes both, since the profile is deleted.
+ */
+ if (status && status != -ENOENT) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ dev_err(dev, "ice_rem_rss_cfg failed for VF ID:%d, error:%d\n",
+ vf->vf_id, status);
+ }
+ }
+ }
+
+error_param:
+ return ice_vc_send_msg_to_vf(vf, v_opcode, v_ret, NULL, 0);
+}
+
+/**
+ * ice_vc_config_rss_key
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * Configure the VF's RSS key
+ */
+int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_rss_key *vrk =
+ (struct virtchnl_rss_key *)msg;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (ice_set_rss_key(vsi, vrk->key))
+ v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
+error_param:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, v_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_vc_config_rss_lut
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * Configure the VF's RSS LUT
+ */
+int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (vrl->lut_entries != ICE_LUT_VSI_SIZE) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (ice_set_rss_lut(vsi, vrl->lut, ICE_LUT_VSI_SIZE))
+ v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
+error_param:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_vc_config_rss_hfunc
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * Configure the VF's RSS Hash function
+ */
+int ice_vc_config_rss_hfunc(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_rss_hfunc *vrh = (struct virtchnl_rss_hfunc *)msg;
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ u8 hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vrh->vsi_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (vrh->rss_algorithm == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC)
+ hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ;
+
+ if (ice_set_rss_hfunc(vsi, hfunc))
+ v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
+error_param:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_HFUNC, v_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_vc_get_rss_hashcfg - return the RSS Hash configuration
+ * @vf: pointer to the VF info
+ */
+int ice_vc_get_rss_hashcfg(struct ice_vf *vf)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_rss_hashcfg *vrh = NULL;
+ int len = 0, ret;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
+ dev_err(ice_pf_to_dev(vf->pf), "RSS not supported by PF\n");
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ len = sizeof(struct virtchnl_rss_hashcfg);
+ vrh = kzalloc(len, GFP_KERNEL);
+ if (!vrh) {
+ v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
+ len = 0;
+ goto err;
+ }
+
+ vrh->hashcfg = ICE_DEFAULT_RSS_HASHCFG;
+err:
+ /* send the response back to the VF */
+ ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS, v_ret,
+ (u8 *)vrh, len);
+ kfree(vrh);
+ return ret;
+}
+
+/**
+ * ice_vc_set_rss_hashcfg - set RSS Hash configuration bits for the VF
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ */
+int ice_vc_set_rss_hashcfg(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_rss_hashcfg *vrh = (struct virtchnl_rss_hashcfg *)msg;
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+ struct device *dev;
+ int status;
+
+ dev = ice_pf_to_dev(pf);
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
+ dev_err(dev, "RSS not supported by PF\n");
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ /* clear all previously programmed RSS configuration to allow VF drivers
+ * the ability to customize the RSS configuration and/or completely
+ * disable RSS
+ */
+ status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx);
+ if (status && !vrh->hashcfg) {
+ /* only report failure to clear the current RSS configuration if
+ * that was clearly the VF's intention (i.e. vrh->hashcfg = 0)
+ */
+ v_ret = ice_err_to_virt_err(status);
+ goto err;
+ } else if (status) {
+ /* allow the VF to update the RSS configuration even on failure
+ * to clear the current RSS confguration in an attempt to keep
+ * RSS in a working state
+ */
+ dev_warn(dev, "Failed to clear the RSS configuration for VF %u\n",
+ vf->vf_id);
+ }
+
+ if (vrh->hashcfg) {
+ status = ice_add_avf_rss_cfg(&pf->hw, vsi, vrh->hashcfg);
+ v_ret = ice_err_to_virt_err(status);
+ }
+
+ /* save the requested VF configuration */
+ if (!v_ret)
+ vf->rss_hashcfg = vrh->hashcfg;
+
+ /* send the response to the VF */
+err:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_SET_RSS_HASHCFG, v_ret,
+ NULL, 0);
+}
+
diff --git a/drivers/net/ethernet/intel/ice/virt/rss.h b/drivers/net/ethernet/intel/ice/virt/rss.h
new file mode 100644
index 000000000000..784d4c43ce8b
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/virt/rss.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2022, Intel Corporation. */
+
+#ifndef _ICE_VIRT_RSS_H_
+#define _ICE_VIRT_RSS_H_
+
+#include <linux/types.h>
+
+struct ice_vf;
+
+int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add);
+int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg);
+int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg);
+int ice_vc_config_rss_hfunc(struct ice_vf *vf, u8 *msg);
+int ice_vc_get_rss_hashcfg(struct ice_vf *vf);
+int ice_vc_set_rss_hashcfg(struct ice_vf *vf, u8 *msg);
+
+#endif /* _ICE_VIRT_RSS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/virt/virtchnl.c
index 257967273079..f3f921134379 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/virt/virtchnl.c
@@ -1,170 +1,20 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2022, Intel Corporation. */
-#include "ice_virtchnl.h"
+#include "virtchnl.h"
+#include "queues.h"
+#include "rss.h"
#include "ice_vf_lib_private.h"
#include "ice.h"
#include "ice_base.h"
#include "ice_lib.h"
#include "ice_fltr.h"
-#include "ice_virtchnl_allowlist.h"
+#include "allowlist.h"
#include "ice_vf_vsi_vlan_ops.h"
#include "ice_vlan.h"
#include "ice_flex_pipe.h"
#include "ice_dcb_lib.h"
-#define FIELD_SELECTOR(proto_hdr_field) \
- BIT((proto_hdr_field) & PROTO_HDR_FIELD_MASK)
-
-struct ice_vc_hdr_match_type {
- u32 vc_hdr; /* virtchnl headers (VIRTCHNL_PROTO_HDR_XXX) */
- u32 ice_hdr; /* ice headers (ICE_FLOW_SEG_HDR_XXX) */
-};
-
-static const struct ice_vc_hdr_match_type ice_vc_hdr_list[] = {
- {VIRTCHNL_PROTO_HDR_NONE, ICE_FLOW_SEG_HDR_NONE},
- {VIRTCHNL_PROTO_HDR_ETH, ICE_FLOW_SEG_HDR_ETH},
- {VIRTCHNL_PROTO_HDR_S_VLAN, ICE_FLOW_SEG_HDR_VLAN},
- {VIRTCHNL_PROTO_HDR_C_VLAN, ICE_FLOW_SEG_HDR_VLAN},
- {VIRTCHNL_PROTO_HDR_IPV4, ICE_FLOW_SEG_HDR_IPV4 |
- ICE_FLOW_SEG_HDR_IPV_OTHER},
- {VIRTCHNL_PROTO_HDR_IPV6, ICE_FLOW_SEG_HDR_IPV6 |
- ICE_FLOW_SEG_HDR_IPV_OTHER},
- {VIRTCHNL_PROTO_HDR_TCP, ICE_FLOW_SEG_HDR_TCP},
- {VIRTCHNL_PROTO_HDR_UDP, ICE_FLOW_SEG_HDR_UDP},
- {VIRTCHNL_PROTO_HDR_SCTP, ICE_FLOW_SEG_HDR_SCTP},
- {VIRTCHNL_PROTO_HDR_PPPOE, ICE_FLOW_SEG_HDR_PPPOE},
- {VIRTCHNL_PROTO_HDR_GTPU_IP, ICE_FLOW_SEG_HDR_GTPU_IP},
- {VIRTCHNL_PROTO_HDR_GTPU_EH, ICE_FLOW_SEG_HDR_GTPU_EH},
- {VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN,
- ICE_FLOW_SEG_HDR_GTPU_DWN},
- {VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP,
- ICE_FLOW_SEG_HDR_GTPU_UP},
- {VIRTCHNL_PROTO_HDR_L2TPV3, ICE_FLOW_SEG_HDR_L2TPV3},
- {VIRTCHNL_PROTO_HDR_ESP, ICE_FLOW_SEG_HDR_ESP},
- {VIRTCHNL_PROTO_HDR_AH, ICE_FLOW_SEG_HDR_AH},
- {VIRTCHNL_PROTO_HDR_PFCP, ICE_FLOW_SEG_HDR_PFCP_SESSION},
-};
-
-struct ice_vc_hash_field_match_type {
- u32 vc_hdr; /* virtchnl headers
- * (VIRTCHNL_PROTO_HDR_XXX)
- */
- u32 vc_hash_field; /* virtchnl hash fields selector
- * FIELD_SELECTOR((VIRTCHNL_PROTO_HDR_ETH_XXX))
- */
- u64 ice_hash_field; /* ice hash fields
- * (BIT_ULL(ICE_FLOW_FIELD_IDX_XXX))
- */
-};
-
-static const struct
-ice_vc_hash_field_match_type ice_vc_hash_field_list[] = {
- {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC),
- BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_SA)},
- {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST),
- BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_DA)},
- {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST),
- ICE_FLOW_HASH_ETH},
- {VIRTCHNL_PROTO_HDR_ETH,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE),
- BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_TYPE)},
- {VIRTCHNL_PROTO_HDR_S_VLAN,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_S_VLAN_ID),
- BIT_ULL(ICE_FLOW_FIELD_IDX_S_VLAN)},
- {VIRTCHNL_PROTO_HDR_C_VLAN,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_C_VLAN_ID),
- BIT_ULL(ICE_FLOW_FIELD_IDX_C_VLAN)},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
- ICE_FLOW_HASH_IPV4},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) |
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) |
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
- ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
- ICE_FLOW_HASH_IPV6},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) |
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA) |
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
- ICE_FLOW_HASH_IPV6 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
- {VIRTCHNL_PROTO_HDR_TCP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)},
- {VIRTCHNL_PROTO_HDR_TCP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)},
- {VIRTCHNL_PROTO_HDR_TCP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
- ICE_FLOW_HASH_TCP_PORT},
- {VIRTCHNL_PROTO_HDR_UDP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)},
- {VIRTCHNL_PROTO_HDR_UDP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)},
- {VIRTCHNL_PROTO_HDR_UDP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
- ICE_FLOW_HASH_UDP_PORT},
- {VIRTCHNL_PROTO_HDR_SCTP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)},
- {VIRTCHNL_PROTO_HDR_SCTP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)},
- {VIRTCHNL_PROTO_HDR_SCTP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
- ICE_FLOW_HASH_SCTP_PORT},
- {VIRTCHNL_PROTO_HDR_PPPOE,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID),
- BIT_ULL(ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID)},
- {VIRTCHNL_PROTO_HDR_GTPU_IP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_GTPU_IP_TEID),
- BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_IP_TEID)},
- {VIRTCHNL_PROTO_HDR_L2TPV3,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID),
- BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID)},
- {VIRTCHNL_PROTO_HDR_ESP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ESP_SPI),
- BIT_ULL(ICE_FLOW_FIELD_IDX_ESP_SPI)},
- {VIRTCHNL_PROTO_HDR_AH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_AH_SPI),
- BIT_ULL(ICE_FLOW_FIELD_IDX_AH_SPI)},
- {VIRTCHNL_PROTO_HDR_PFCP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PFCP_SEID),
- BIT_ULL(ICE_FLOW_FIELD_IDX_PFCP_SEID)},
-};
-
/**
* ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
* @pf: pointer to the PF structure
@@ -338,28 +188,6 @@ static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
}
/**
- * ice_vc_get_max_frame_size - get max frame size allowed for VF
- * @vf: VF used to determine max frame size
- *
- * Max frame size is determined based on the current port's max frame size and
- * whether a port VLAN is configured on this VF. The VF is not aware whether
- * it's in a port VLAN so the PF needs to account for this in max frame size
- * checks and sending the max frame size to the VF.
- */
-static u16 ice_vc_get_max_frame_size(struct ice_vf *vf)
-{
- struct ice_port_info *pi = ice_vf_get_port_info(vf);
- u16 max_frame_size;
-
- max_frame_size = pi->phy.link_info.max_frame_size;
-
- if (ice_vf_is_port_vlan_ena(vf))
- max_frame_size -= VLAN_HLEN;
-
- return max_frame_size;
-}
-
-/**
* ice_vc_get_vlan_caps
* @hw: pointer to the hw
* @vf: pointer to the VF info
@@ -559,488 +387,6 @@ bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
}
/**
- * ice_vc_isvalid_q_id
- * @vsi: VSI to check queue ID against
- * @qid: VSI relative queue ID
- *
- * check for the valid queue ID
- */
-static bool ice_vc_isvalid_q_id(struct ice_vsi *vsi, u16 qid)
-{
- /* allocated Tx and Rx queues should be always equal for VF VSI */
- return qid < vsi->alloc_txq;
-}
-
-/**
- * ice_vc_isvalid_ring_len
- * @ring_len: length of ring
- *
- * check for the valid ring count, should be multiple of ICE_REQ_DESC_MULTIPLE
- * or zero
- */
-static bool ice_vc_isvalid_ring_len(u16 ring_len)
-{
- return ring_len == 0 ||
- (ring_len >= ICE_MIN_NUM_DESC &&
- ring_len <= ICE_MAX_NUM_DESC &&
- !(ring_len % ICE_REQ_DESC_MULTIPLE));
-}
-
-/**
- * ice_vc_validate_pattern
- * @vf: pointer to the VF info
- * @proto: virtchnl protocol headers
- *
- * validate the pattern is supported or not.
- *
- * Return: true on success, false on error.
- */
-bool
-ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto)
-{
- bool is_ipv4 = false;
- bool is_ipv6 = false;
- bool is_udp = false;
- u16 ptype = -1;
- int i = 0;
-
- while (i < proto->count &&
- proto->proto_hdr[i].type != VIRTCHNL_PROTO_HDR_NONE) {
- switch (proto->proto_hdr[i].type) {
- case VIRTCHNL_PROTO_HDR_ETH:
- ptype = ICE_PTYPE_MAC_PAY;
- break;
- case VIRTCHNL_PROTO_HDR_IPV4:
- ptype = ICE_PTYPE_IPV4_PAY;
- is_ipv4 = true;
- break;
- case VIRTCHNL_PROTO_HDR_IPV6:
- ptype = ICE_PTYPE_IPV6_PAY;
- is_ipv6 = true;
- break;
- case VIRTCHNL_PROTO_HDR_UDP:
- if (is_ipv4)
- ptype = ICE_PTYPE_IPV4_UDP_PAY;
- else if (is_ipv6)
- ptype = ICE_PTYPE_IPV6_UDP_PAY;
- is_udp = true;
- break;
- case VIRTCHNL_PROTO_HDR_TCP:
- if (is_ipv4)
- ptype = ICE_PTYPE_IPV4_TCP_PAY;
- else if (is_ipv6)
- ptype = ICE_PTYPE_IPV6_TCP_PAY;
- break;
- case VIRTCHNL_PROTO_HDR_SCTP:
- if (is_ipv4)
- ptype = ICE_PTYPE_IPV4_SCTP_PAY;
- else if (is_ipv6)
- ptype = ICE_PTYPE_IPV6_SCTP_PAY;
- break;
- case VIRTCHNL_PROTO_HDR_GTPU_IP:
- case VIRTCHNL_PROTO_HDR_GTPU_EH:
- if (is_ipv4)
- ptype = ICE_MAC_IPV4_GTPU;
- else if (is_ipv6)
- ptype = ICE_MAC_IPV6_GTPU;
- goto out;
- case VIRTCHNL_PROTO_HDR_L2TPV3:
- if (is_ipv4)
- ptype = ICE_MAC_IPV4_L2TPV3;
- else if (is_ipv6)
- ptype = ICE_MAC_IPV6_L2TPV3;
- goto out;
- case VIRTCHNL_PROTO_HDR_ESP:
- if (is_ipv4)
- ptype = is_udp ? ICE_MAC_IPV4_NAT_T_ESP :
- ICE_MAC_IPV4_ESP;
- else if (is_ipv6)
- ptype = is_udp ? ICE_MAC_IPV6_NAT_T_ESP :
- ICE_MAC_IPV6_ESP;
- goto out;
- case VIRTCHNL_PROTO_HDR_AH:
- if (is_ipv4)
- ptype = ICE_MAC_IPV4_AH;
- else if (is_ipv6)
- ptype = ICE_MAC_IPV6_AH;
- goto out;
- case VIRTCHNL_PROTO_HDR_PFCP:
- if (is_ipv4)
- ptype = ICE_MAC_IPV4_PFCP_SESSION;
- else if (is_ipv6)
- ptype = ICE_MAC_IPV6_PFCP_SESSION;
- goto out;
- default:
- break;
- }
- i++;
- }
-
-out:
- return ice_hw_ptype_ena(&vf->pf->hw, ptype);
-}
-
-/**
- * ice_vc_parse_rss_cfg - parses hash fields and headers from
- * a specific virtchnl RSS cfg
- * @hw: pointer to the hardware
- * @rss_cfg: pointer to the virtchnl RSS cfg
- * @hash_cfg: pointer to the HW hash configuration
- *
- * Return true if all the protocol header and hash fields in the RSS cfg could
- * be parsed, else return false
- *
- * This function parses the virtchnl RSS cfg to be the intended
- * hash fields and the intended header for RSS configuration
- */
-static bool ice_vc_parse_rss_cfg(struct ice_hw *hw,
- struct virtchnl_rss_cfg *rss_cfg,
- struct ice_rss_hash_cfg *hash_cfg)
-{
- const struct ice_vc_hash_field_match_type *hf_list;
- const struct ice_vc_hdr_match_type *hdr_list;
- int i, hf_list_len, hdr_list_len;
- u32 *addl_hdrs = &hash_cfg->addl_hdrs;
- u64 *hash_flds = &hash_cfg->hash_flds;
-
- /* set outer layer RSS as default */
- hash_cfg->hdr_type = ICE_RSS_OUTER_HEADERS;
-
- if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC)
- hash_cfg->symm = true;
- else
- hash_cfg->symm = false;
-
- hf_list = ice_vc_hash_field_list;
- hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list);
- hdr_list = ice_vc_hdr_list;
- hdr_list_len = ARRAY_SIZE(ice_vc_hdr_list);
-
- for (i = 0; i < rss_cfg->proto_hdrs.count; i++) {
- struct virtchnl_proto_hdr *proto_hdr =
- &rss_cfg->proto_hdrs.proto_hdr[i];
- bool hdr_found = false;
- int j;
-
- /* Find matched ice headers according to virtchnl headers. */
- for (j = 0; j < hdr_list_len; j++) {
- struct ice_vc_hdr_match_type hdr_map = hdr_list[j];
-
- if (proto_hdr->type == hdr_map.vc_hdr) {
- *addl_hdrs |= hdr_map.ice_hdr;
- hdr_found = true;
- }
- }
-
- if (!hdr_found)
- return false;
-
- /* Find matched ice hash fields according to
- * virtchnl hash fields.
- */
- for (j = 0; j < hf_list_len; j++) {
- struct ice_vc_hash_field_match_type hf_map = hf_list[j];
-
- if (proto_hdr->type == hf_map.vc_hdr &&
- proto_hdr->field_selector == hf_map.vc_hash_field) {
- *hash_flds |= hf_map.ice_hash_field;
- break;
- }
- }
- }
-
- return true;
-}
-
-/**
- * ice_vf_adv_rss_offload_ena - determine if capabilities support advanced
- * RSS offloads
- * @caps: VF driver negotiated capabilities
- *
- * Return true if VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF capability is set,
- * else return false
- */
-static bool ice_vf_adv_rss_offload_ena(u32 caps)
-{
- return !!(caps & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF);
-}
-
-/**
- * ice_vc_handle_rss_cfg
- * @vf: pointer to the VF info
- * @msg: pointer to the message buffer
- * @add: add a RSS config if true, otherwise delete a RSS config
- *
- * This function adds/deletes a RSS config
- */
-static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
-{
- u32 v_opcode = add ? VIRTCHNL_OP_ADD_RSS_CFG : VIRTCHNL_OP_DEL_RSS_CFG;
- struct virtchnl_rss_cfg *rss_cfg = (struct virtchnl_rss_cfg *)msg;
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct device *dev = ice_pf_to_dev(vf->pf);
- struct ice_hw *hw = &vf->pf->hw;
- struct ice_vsi *vsi;
-
- if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
- dev_dbg(dev, "VF %d attempting to configure RSS, but RSS is not supported by the PF\n",
- vf->vf_id);
- v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
- goto error_param;
- }
-
- if (!ice_vf_adv_rss_offload_ena(vf->driver_caps)) {
- dev_dbg(dev, "VF %d attempting to configure RSS, but Advanced RSS offload is not supported\n",
- vf->vf_id);
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (rss_cfg->proto_hdrs.count > VIRTCHNL_MAX_NUM_PROTO_HDRS ||
- rss_cfg->rss_algorithm < VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC ||
- rss_cfg->rss_algorithm > VIRTCHNL_RSS_ALG_XOR_SYMMETRIC) {
- dev_dbg(dev, "VF %d attempting to configure RSS, but RSS configuration is not valid\n",
- vf->vf_id);
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vc_validate_pattern(vf, &rss_cfg->proto_hdrs)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_R_ASYMMETRIC) {
- struct ice_vsi_ctx *ctx;
- u8 lut_type, hash_type;
- int status;
-
- lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
- hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_HASH_XOR :
- ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
-
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (!ctx) {
- v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
- goto error_param;
- }
-
- ctx->info.q_opt_rss =
- FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_LUT_M, lut_type) |
- FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M, hash_type);
-
- /* Preserve existing queueing option setting */
- ctx->info.q_opt_rss |= (vsi->info.q_opt_rss &
- ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_M);
- ctx->info.q_opt_tc = vsi->info.q_opt_tc;
- ctx->info.q_opt_flags = vsi->info.q_opt_rss;
-
- ctx->info.valid_sections =
- cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
-
- status = ice_update_vsi(hw, vsi->idx, ctx, NULL);
- if (status) {
- dev_err(dev, "update VSI for RSS failed, err %d aq_err %s\n",
- status, libie_aq_str(hw->adminq.sq_last_status));
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- } else {
- vsi->info.q_opt_rss = ctx->info.q_opt_rss;
- }
-
- kfree(ctx);
- } else {
- struct ice_rss_hash_cfg cfg;
-
- /* Only check for none raw pattern case */
- if (!ice_vc_validate_pattern(vf, &rss_cfg->proto_hdrs)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
- cfg.addl_hdrs = ICE_FLOW_SEG_HDR_NONE;
- cfg.hash_flds = ICE_HASH_INVALID;
- cfg.hdr_type = ICE_RSS_ANY_HEADERS;
-
- if (!ice_vc_parse_rss_cfg(hw, rss_cfg, &cfg)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (add) {
- if (ice_add_rss_cfg(hw, vsi, &cfg)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- dev_err(dev, "ice_add_rss_cfg failed for vsi = %d, v_ret = %d\n",
- vsi->vsi_num, v_ret);
- }
- } else {
- int status;
-
- status = ice_rem_rss_cfg(hw, vsi->idx, &cfg);
- /* We just ignore -ENOENT, because if two configurations
- * share the same profile remove one of them actually
- * removes both, since the profile is deleted.
- */
- if (status && status != -ENOENT) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- dev_err(dev, "ice_rem_rss_cfg failed for VF ID:%d, error:%d\n",
- vf->vf_id, status);
- }
- }
- }
-
-error_param:
- return ice_vc_send_msg_to_vf(vf, v_opcode, v_ret, NULL, 0);
-}
-
-/**
- * ice_vc_config_rss_key
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * Configure the VF's RSS key
- */
-static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
-{
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct virtchnl_rss_key *vrk =
- (struct virtchnl_rss_key *)msg;
- struct ice_vsi *vsi;
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (ice_set_rss_key(vsi, vrk->key))
- v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
-error_param:
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, v_ret,
- NULL, 0);
-}
-
-/**
- * ice_vc_config_rss_lut
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * Configure the VF's RSS LUT
- */
-static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
-{
- struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct ice_vsi *vsi;
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (vrl->lut_entries != ICE_LUT_VSI_SIZE) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (ice_set_rss_lut(vsi, vrl->lut, ICE_LUT_VSI_SIZE))
- v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
-error_param:
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret,
- NULL, 0);
-}
-
-/**
- * ice_vc_config_rss_hfunc
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * Configure the VF's RSS Hash function
- */
-static int ice_vc_config_rss_hfunc(struct ice_vf *vf, u8 *msg)
-{
- struct virtchnl_rss_hfunc *vrh = (struct virtchnl_rss_hfunc *)msg;
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- u8 hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
- struct ice_vsi *vsi;
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vc_isvalid_vsi_id(vf, vrh->vsi_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (vrh->rss_algorithm == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC)
- hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ;
-
- if (ice_set_rss_hfunc(vsi, hfunc))
- v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
-error_param:
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_HFUNC, v_ret,
- NULL, 0);
-}
-
-/**
* ice_vc_get_qos_caps - Get current QoS caps from PF
* @vf: pointer to the VF info
*
@@ -1122,110 +468,6 @@ err:
}
/**
- * ice_vf_cfg_qs_bw - Configure per queue bandwidth
- * @vf: pointer to the VF info
- * @num_queues: number of queues to be configured
- *
- * Configure per queue bandwidth.
- *
- * Return: 0 on success or negative error value.
- */
-static int ice_vf_cfg_qs_bw(struct ice_vf *vf, u16 num_queues)
-{
- struct ice_hw *hw = &vf->pf->hw;
- struct ice_vsi *vsi;
- int ret;
- u16 i;
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi)
- return -EINVAL;
-
- for (i = 0; i < num_queues; i++) {
- u32 p_rate, min_rate;
- u8 tc;
-
- p_rate = vf->qs_bw[i].peak;
- min_rate = vf->qs_bw[i].committed;
- tc = vf->qs_bw[i].tc;
- if (p_rate)
- ret = ice_cfg_q_bw_lmt(hw->port_info, vsi->idx, tc,
- vf->qs_bw[i].queue_id,
- ICE_MAX_BW, p_rate);
- else
- ret = ice_cfg_q_bw_dflt_lmt(hw->port_info, vsi->idx, tc,
- vf->qs_bw[i].queue_id,
- ICE_MAX_BW);
- if (ret)
- return ret;
-
- if (min_rate)
- ret = ice_cfg_q_bw_lmt(hw->port_info, vsi->idx, tc,
- vf->qs_bw[i].queue_id,
- ICE_MIN_BW, min_rate);
- else
- ret = ice_cfg_q_bw_dflt_lmt(hw->port_info, vsi->idx, tc,
- vf->qs_bw[i].queue_id,
- ICE_MIN_BW);
-
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-/**
- * ice_vf_cfg_q_quanta_profile - Configure quanta profile
- * @vf: pointer to the VF info
- * @quanta_prof_idx: pointer to the quanta profile index
- * @quanta_size: quanta size to be set
- *
- * This function chooses available quanta profile and configures the register.
- * The quanta profile is evenly divided by the number of device ports, and then
- * available to the specific PF and VFs. The first profile for each PF is a
- * reserved default profile. Only quanta size of the rest unused profile can be
- * modified.
- *
- * Return: 0 on success or negative error value.
- */
-static int ice_vf_cfg_q_quanta_profile(struct ice_vf *vf, u16 quanta_size,
- u16 *quanta_prof_idx)
-{
- const u16 n_desc = calc_quanta_desc(quanta_size);
- struct ice_hw *hw = &vf->pf->hw;
- const u16 n_cmd = 2 * n_desc;
- struct ice_pf *pf = vf->pf;
- u16 per_pf, begin_id;
- u8 n_used;
- u32 reg;
-
- begin_id = (GLCOMM_QUANTA_PROF_MAX_INDEX + 1) / hw->dev_caps.num_funcs *
- hw->logical_pf_id;
-
- if (quanta_size == ICE_DFLT_QUANTA) {
- *quanta_prof_idx = begin_id;
- } else {
- per_pf = (GLCOMM_QUANTA_PROF_MAX_INDEX + 1) /
- hw->dev_caps.num_funcs;
- n_used = pf->num_quanta_prof_used;
- if (n_used < per_pf) {
- *quanta_prof_idx = begin_id + 1 + n_used;
- pf->num_quanta_prof_used++;
- } else {
- return -EINVAL;
- }
- }
-
- reg = FIELD_PREP(GLCOMM_QUANTA_PROF_QUANTA_SIZE_M, quanta_size) |
- FIELD_PREP(GLCOMM_QUANTA_PROF_MAX_CMD_M, n_cmd) |
- FIELD_PREP(GLCOMM_QUANTA_PROF_MAX_DESC_M, n_desc);
- wr32(hw, GLCOMM_QUANTA_PROF(*quanta_prof_idx), reg);
-
- return 0;
-}
-
-/**
* ice_vc_cfg_promiscuous_mode_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -1407,757 +649,6 @@ error_param:
}
/**
- * ice_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTCHNL
- * @vqs: virtchnl_queue_select structure containing bitmaps to validate
- *
- * Return true on successful validation, else false
- */
-static bool ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
-{
- if ((!vqs->rx_queues && !vqs->tx_queues) ||
- vqs->rx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF) ||
- vqs->tx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF))
- return false;
-
- return true;
-}
-
-/**
- * ice_vf_ena_txq_interrupt - enable Tx queue interrupt via QINT_TQCTL
- * @vsi: VSI of the VF to configure
- * @q_idx: VF queue index used to determine the queue in the PF's space
- */
-void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx)
-{
- struct ice_hw *hw = &vsi->back->hw;
- u32 pfq = vsi->txq_map[q_idx];
- u32 reg;
-
- reg = rd32(hw, QINT_TQCTL(pfq));
-
- /* MSI-X index 0 in the VF's space is always for the OICR, which means
- * this is most likely a poll mode VF driver, so don't enable an
- * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
- */
- if (!(reg & QINT_TQCTL_MSIX_INDX_M))
- return;
-
- wr32(hw, QINT_TQCTL(pfq), reg | QINT_TQCTL_CAUSE_ENA_M);
-}
-
-/**
- * ice_vf_ena_rxq_interrupt - enable Tx queue interrupt via QINT_RQCTL
- * @vsi: VSI of the VF to configure
- * @q_idx: VF queue index used to determine the queue in the PF's space
- */
-void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx)
-{
- struct ice_hw *hw = &vsi->back->hw;
- u32 pfq = vsi->rxq_map[q_idx];
- u32 reg;
-
- reg = rd32(hw, QINT_RQCTL(pfq));
-
- /* MSI-X index 0 in the VF's space is always for the OICR, which means
- * this is most likely a poll mode VF driver, so don't enable an
- * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
- */
- if (!(reg & QINT_RQCTL_MSIX_INDX_M))
- return;
-
- wr32(hw, QINT_RQCTL(pfq), reg | QINT_RQCTL_CAUSE_ENA_M);
-}
-
-/**
- * ice_vc_ena_qs_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * called from the VF to enable all or specific queue(s)
- */
-static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
-{
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct virtchnl_queue_select *vqs =
- (struct virtchnl_queue_select *)msg;
- struct ice_vsi *vsi;
- unsigned long q_map;
- u16 vf_q_id;
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vc_validate_vqs_bitmaps(vqs)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- /* Enable only Rx rings, Tx rings were enabled by the FW when the
- * Tx queue group list was configured and the context bits were
- * programmed using ice_vsi_cfg_txqs
- */
- q_map = vqs->rx_queues;
- for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
- if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- /* Skip queue if enabled */
- if (test_bit(vf_q_id, vf->rxq_ena))
- continue;
-
- if (ice_vsi_ctrl_one_rx_ring(vsi, true, vf_q_id, true)) {
- dev_err(ice_pf_to_dev(vsi->back), "Failed to enable Rx ring %d on VSI %d\n",
- vf_q_id, vsi->vsi_num);
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- ice_vf_ena_rxq_interrupt(vsi, vf_q_id);
- set_bit(vf_q_id, vf->rxq_ena);
- }
-
- q_map = vqs->tx_queues;
- for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
- if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- /* Skip queue if enabled */
- if (test_bit(vf_q_id, vf->txq_ena))
- continue;
-
- ice_vf_ena_txq_interrupt(vsi, vf_q_id);
- set_bit(vf_q_id, vf->txq_ena);
- }
-
- /* Set flag to indicate that queues are enabled */
- if (v_ret == VIRTCHNL_STATUS_SUCCESS)
- set_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
-
-error_param:
- /* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, v_ret,
- NULL, 0);
-}
-
-/**
- * ice_vf_vsi_dis_single_txq - disable a single Tx queue
- * @vf: VF to disable queue for
- * @vsi: VSI for the VF
- * @q_id: VF relative (0-based) queue ID
- *
- * Attempt to disable the Tx queue passed in. If the Tx queue was successfully
- * disabled then clear q_id bit in the enabled queues bitmap and return
- * success. Otherwise return error.
- */
-int ice_vf_vsi_dis_single_txq(struct ice_vf *vf, struct ice_vsi *vsi, u16 q_id)
-{
- struct ice_txq_meta txq_meta = { 0 };
- struct ice_tx_ring *ring;
- int err;
-
- if (!test_bit(q_id, vf->txq_ena))
- dev_dbg(ice_pf_to_dev(vsi->back), "Queue %u on VSI %u is not enabled, but stopping it anyway\n",
- q_id, vsi->vsi_num);
-
- ring = vsi->tx_rings[q_id];
- if (!ring)
- return -EINVAL;
-
- ice_fill_txq_meta(vsi, ring, &txq_meta);
-
- err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id, ring, &txq_meta);
- if (err) {
- dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
- q_id, vsi->vsi_num);
- return err;
- }
-
- /* Clear enabled queues flag */
- clear_bit(q_id, vf->txq_ena);
-
- return 0;
-}
-
-/**
- * ice_vc_dis_qs_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * called from the VF to disable all or specific queue(s)
- */
-static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
-{
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct virtchnl_queue_select *vqs =
- (struct virtchnl_queue_select *)msg;
- struct ice_vsi *vsi;
- unsigned long q_map;
- u16 vf_q_id;
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
- !test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vc_validate_vqs_bitmaps(vqs)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (vqs->tx_queues) {
- q_map = vqs->tx_queues;
-
- for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
- if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (ice_vf_vsi_dis_single_txq(vf, vsi, vf_q_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
- }
- }
-
- q_map = vqs->rx_queues;
- /* speed up Rx queue disable by batching them if possible */
- if (q_map &&
- bitmap_equal(&q_map, vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF)) {
- if (ice_vsi_stop_all_rx_rings(vsi)) {
- dev_err(ice_pf_to_dev(vsi->back), "Failed to stop all Rx rings on VSI %d\n",
- vsi->vsi_num);
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
- } else if (q_map) {
- for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
- if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- /* Skip queue if not enabled */
- if (!test_bit(vf_q_id, vf->rxq_ena))
- continue;
-
- if (ice_vsi_ctrl_one_rx_ring(vsi, false, vf_q_id,
- true)) {
- dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Rx ring %d on VSI %d\n",
- vf_q_id, vsi->vsi_num);
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- /* Clear enabled queues flag */
- clear_bit(vf_q_id, vf->rxq_ena);
- }
- }
-
- /* Clear enabled queues flag */
- if (v_ret == VIRTCHNL_STATUS_SUCCESS && ice_vf_has_no_qs_ena(vf))
- clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
-
-error_param:
- /* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, v_ret,
- NULL, 0);
-}
-
-/**
- * ice_cfg_interrupt
- * @vf: pointer to the VF info
- * @vsi: the VSI being configured
- * @map: vector map for mapping vectors to queues
- * @q_vector: structure for interrupt vector
- * configure the IRQ to queue map
- */
-static enum virtchnl_status_code
-ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi,
- struct virtchnl_vector_map *map,
- struct ice_q_vector *q_vector)
-{
- u16 vsi_q_id, vsi_q_id_idx;
- unsigned long qmap;
-
- q_vector->num_ring_rx = 0;
- q_vector->num_ring_tx = 0;
-
- qmap = map->rxq_map;
- for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
- vsi_q_id = vsi_q_id_idx;
-
- if (!ice_vc_isvalid_q_id(vsi, vsi_q_id))
- return VIRTCHNL_STATUS_ERR_PARAM;
-
- q_vector->num_ring_rx++;
- q_vector->rx.itr_idx = map->rxitr_idx;
- vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
- ice_cfg_rxq_interrupt(vsi, vsi_q_id,
- q_vector->vf_reg_idx,
- q_vector->rx.itr_idx);
- }
-
- qmap = map->txq_map;
- for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
- vsi_q_id = vsi_q_id_idx;
-
- if (!ice_vc_isvalid_q_id(vsi, vsi_q_id))
- return VIRTCHNL_STATUS_ERR_PARAM;
-
- q_vector->num_ring_tx++;
- q_vector->tx.itr_idx = map->txitr_idx;
- vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
- ice_cfg_txq_interrupt(vsi, vsi_q_id,
- q_vector->vf_reg_idx,
- q_vector->tx.itr_idx);
- }
-
- return VIRTCHNL_STATUS_SUCCESS;
-}
-
-/**
- * ice_vc_cfg_irq_map_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * called from the VF to configure the IRQ to queue map
- */
-static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
-{
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- u16 num_q_vectors_mapped, vsi_id, vector_id;
- struct virtchnl_irq_map_info *irqmap_info;
- struct virtchnl_vector_map *map;
- struct ice_vsi *vsi;
- int i;
-
- irqmap_info = (struct virtchnl_irq_map_info *)msg;
- num_q_vectors_mapped = irqmap_info->num_vectors;
-
- /* Check to make sure number of VF vectors mapped is not greater than
- * number of VF vectors originally allocated, and check that
- * there is actually at least a single VF queue vector mapped
- */
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
- vf->num_msix < num_q_vectors_mapped ||
- !num_q_vectors_mapped) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- for (i = 0; i < num_q_vectors_mapped; i++) {
- struct ice_q_vector *q_vector;
-
- map = &irqmap_info->vecmap[i];
-
- vector_id = map->vector_id;
- vsi_id = map->vsi_id;
- /* vector_id is always 0-based for each VF, and can never be
- * larger than or equal to the max allowed interrupts per VF
- */
- if (!(vector_id < vf->num_msix) ||
- !ice_vc_isvalid_vsi_id(vf, vsi_id) ||
- (!vector_id && (map->rxq_map || map->txq_map))) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- /* No need to map VF miscellaneous or rogue vector */
- if (!vector_id)
- continue;
-
- /* Subtract non queue vector from vector_id passed by VF
- * to get actual number of VSI queue vector array index
- */
- q_vector = vsi->q_vectors[vector_id - ICE_NONQ_VECS_VF];
- if (!q_vector) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- /* lookout for the invalid queue index */
- v_ret = ice_cfg_interrupt(vf, vsi, map, q_vector);
- if (v_ret)
- goto error_param;
- }
-
-error_param:
- /* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret,
- NULL, 0);
-}
-
-/**
- * ice_vc_cfg_q_bw - Configure per queue bandwidth
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer which holds the command descriptor
- *
- * Configure VF queues bandwidth.
- *
- * Return: 0 on success or negative error value.
- */
-static int ice_vc_cfg_q_bw(struct ice_vf *vf, u8 *msg)
-{
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct virtchnl_queues_bw_cfg *qbw =
- (struct virtchnl_queues_bw_cfg *)msg;
- struct ice_vsi *vsi;
- u16 i;
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
- !ice_vc_isvalid_vsi_id(vf, qbw->vsi_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
-
- if (qbw->num_queues > ICE_MAX_RSS_QS_PER_VF ||
- qbw->num_queues > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
- dev_err(ice_pf_to_dev(vf->pf), "VF-%d trying to configure more than allocated number of queues: %d\n",
- vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
-
- for (i = 0; i < qbw->num_queues; i++) {
- if (qbw->cfg[i].shaper.peak != 0 && vf->max_tx_rate != 0 &&
- qbw->cfg[i].shaper.peak > vf->max_tx_rate) {
- dev_warn(ice_pf_to_dev(vf->pf), "The maximum queue %d rate limit configuration may not take effect because the maximum TX rate for VF-%d is %d\n",
- qbw->cfg[i].queue_id, vf->vf_id,
- vf->max_tx_rate);
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
- if (qbw->cfg[i].shaper.committed != 0 && vf->min_tx_rate != 0 &&
- qbw->cfg[i].shaper.committed < vf->min_tx_rate) {
- dev_warn(ice_pf_to_dev(vf->pf), "The minimum queue %d rate limit configuration may not take effect because the minimum TX rate for VF-%d is %d\n",
- qbw->cfg[i].queue_id, vf->vf_id,
- vf->min_tx_rate);
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
- if (qbw->cfg[i].queue_id > vf->num_vf_qs) {
- dev_warn(ice_pf_to_dev(vf->pf), "VF-%d trying to configure invalid queue_id\n",
- vf->vf_id);
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
- if (qbw->cfg[i].tc >= ICE_MAX_TRAFFIC_CLASS) {
- dev_warn(ice_pf_to_dev(vf->pf), "VF-%d trying to configure a traffic class higher than allowed\n",
- vf->vf_id);
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
- }
-
- for (i = 0; i < qbw->num_queues; i++) {
- vf->qs_bw[i].queue_id = qbw->cfg[i].queue_id;
- vf->qs_bw[i].peak = qbw->cfg[i].shaper.peak;
- vf->qs_bw[i].committed = qbw->cfg[i].shaper.committed;
- vf->qs_bw[i].tc = qbw->cfg[i].tc;
- }
-
- if (ice_vf_cfg_qs_bw(vf, qbw->num_queues))
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-
-err:
- /* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_QUEUE_BW,
- v_ret, NULL, 0);
-}
-
-/**
- * ice_vc_cfg_q_quanta - Configure per queue quanta
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer which holds the command descriptor
- *
- * Configure VF queues quanta.
- *
- * Return: 0 on success or negative error value.
- */
-static int ice_vc_cfg_q_quanta(struct ice_vf *vf, u8 *msg)
-{
- u16 quanta_prof_id, quanta_size, start_qid, num_queues, end_qid, i;
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct virtchnl_quanta_cfg *qquanta =
- (struct virtchnl_quanta_cfg *)msg;
- struct ice_vsi *vsi;
- int ret;
-
- start_qid = qquanta->queue_select.start_queue_id;
- num_queues = qquanta->queue_select.num_queues;
-
- if (check_add_overflow(start_qid, num_queues, &end_qid)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
-
- if (end_qid > ICE_MAX_RSS_QS_PER_VF ||
- end_qid > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
- dev_err(ice_pf_to_dev(vf->pf), "VF-%d trying to configure more than allocated number of queues: %d\n",
- vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
-
- quanta_size = qquanta->quanta_size;
- if (quanta_size > ICE_MAX_QUANTA_SIZE ||
- quanta_size < ICE_MIN_QUANTA_SIZE) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
-
- if (quanta_size % 64) {
- dev_err(ice_pf_to_dev(vf->pf), "quanta size should be the product of 64\n");
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
-
- ret = ice_vf_cfg_q_quanta_profile(vf, quanta_size,
- &quanta_prof_id);
- if (ret) {
- v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
- goto err;
- }
-
- for (i = start_qid; i < end_qid; i++)
- vsi->tx_rings[i]->quanta_prof_id = quanta_prof_id;
-
-err:
- /* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_QUANTA,
- v_ret, NULL, 0);
-}
-
-/**
- * ice_vc_cfg_qs_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * called from the VF to configure the Rx/Tx queues
- */
-static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
-{
- struct virtchnl_vsi_queue_config_info *qci =
- (struct virtchnl_vsi_queue_config_info *)msg;
- struct virtchnl_queue_pair_info *qpi;
- struct ice_pf *pf = vf->pf;
- struct ice_vsi *vsi;
- int i = -1, q_idx;
- bool ena_ts;
- u8 act_prt;
-
- mutex_lock(&pf->lag_mutex);
- act_prt = ice_lag_prepare_vf_reset(pf->lag);
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
- goto error_param;
-
- if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id))
- goto error_param;
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi)
- goto error_param;
-
- if (qci->num_queue_pairs > ICE_MAX_RSS_QS_PER_VF ||
- qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
- dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n",
- vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
- goto error_param;
- }
-
- for (i = 0; i < qci->num_queue_pairs; i++) {
- if (!qci->qpair[i].rxq.crc_disable)
- continue;
-
- if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_CRC) ||
- vf->vlan_strip_ena)
- goto error_param;
- }
-
- for (i = 0; i < qci->num_queue_pairs; i++) {
- qpi = &qci->qpair[i];
- if (qpi->txq.vsi_id != qci->vsi_id ||
- qpi->rxq.vsi_id != qci->vsi_id ||
- qpi->rxq.queue_id != qpi->txq.queue_id ||
- qpi->txq.headwb_enabled ||
- !ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
- !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
- !ice_vc_isvalid_q_id(vsi, qpi->txq.queue_id)) {
- goto error_param;
- }
-
- q_idx = qpi->rxq.queue_id;
-
- /* make sure selected "q_idx" is in valid range of queues
- * for selected "vsi"
- */
- if (q_idx >= vsi->alloc_txq || q_idx >= vsi->alloc_rxq) {
- goto error_param;
- }
-
- /* copy Tx queue info from VF into VSI */
- if (qpi->txq.ring_len > 0) {
- vsi->tx_rings[q_idx]->dma = qpi->txq.dma_ring_addr;
- vsi->tx_rings[q_idx]->count = qpi->txq.ring_len;
-
- /* Disable any existing queue first */
- if (ice_vf_vsi_dis_single_txq(vf, vsi, q_idx))
- goto error_param;
-
- /* Configure a queue with the requested settings */
- if (ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx)) {
- dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure TX queue %d\n",
- vf->vf_id, q_idx);
- goto error_param;
- }
- }
-
- /* copy Rx queue info from VF into VSI */
- if (qpi->rxq.ring_len > 0) {
- u16 max_frame_size = ice_vc_get_max_frame_size(vf);
- struct ice_rx_ring *ring = vsi->rx_rings[q_idx];
- u32 rxdid;
-
- ring->dma = qpi->rxq.dma_ring_addr;
- ring->count = qpi->rxq.ring_len;
-
- if (qpi->rxq.crc_disable)
- ring->flags |= ICE_RX_FLAGS_CRC_STRIP_DIS;
- else
- ring->flags &= ~ICE_RX_FLAGS_CRC_STRIP_DIS;
-
- if (qpi->rxq.databuffer_size != 0 &&
- (qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
- qpi->rxq.databuffer_size < 1024))
- goto error_param;
- ring->rx_buf_len = qpi->rxq.databuffer_size;
- if (qpi->rxq.max_pkt_size > max_frame_size ||
- qpi->rxq.max_pkt_size < 64)
- goto error_param;
-
- ring->max_frame = qpi->rxq.max_pkt_size;
- /* add space for the port VLAN since the VF driver is
- * not expected to account for it in the MTU
- * calculation
- */
- if (ice_vf_is_port_vlan_ena(vf))
- ring->max_frame += VLAN_HLEN;
-
- if (ice_vsi_cfg_single_rxq(vsi, q_idx)) {
- dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure RX queue %d\n",
- vf->vf_id, q_idx);
- goto error_param;
- }
-
- /* If Rx flex desc is supported, select RXDID for Rx
- * queues. Otherwise, use legacy 32byte descriptor
- * format. Legacy 16byte descriptor is not supported.
- * If this RXDID is selected, return error.
- */
- if (vf->driver_caps &
- VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
- rxdid = qpi->rxq.rxdid;
- if (!(BIT(rxdid) & pf->supported_rxdids))
- goto error_param;
- } else {
- rxdid = ICE_RXDID_LEGACY_1;
- }
-
- ena_ts = ((vf->driver_caps &
- VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) &&
- (vf->driver_caps & VIRTCHNL_VF_CAP_PTP) &&
- (qpi->rxq.flags & VIRTCHNL_PTP_RX_TSTAMP));
-
- ice_write_qrxflxp_cntxt(&vsi->back->hw,
- vsi->rxq_map[q_idx], rxdid,
- ICE_RXDID_PRIO, ena_ts);
- }
- }
-
- ice_lag_complete_vf_reset(pf->lag, act_prt);
- mutex_unlock(&pf->lag_mutex);
-
- /* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
- VIRTCHNL_STATUS_SUCCESS, NULL, 0);
-error_param:
- /* disable whatever we can */
- for (; i >= 0; i--) {
- if (ice_vsi_ctrl_one_rx_ring(vsi, false, i, true))
- dev_err(ice_pf_to_dev(pf), "VF-%d could not disable RX queue %d\n",
- vf->vf_id, i);
- if (ice_vf_vsi_dis_single_txq(vf, vsi, i))
- dev_err(ice_pf_to_dev(pf), "VF-%d could not disable TX queue %d\n",
- vf->vf_id, i);
- }
-
- ice_lag_complete_vf_reset(pf->lag, act_prt);
- mutex_unlock(&pf->lag_mutex);
-
- ice_lag_move_new_vf_nodes(vf);
-
- /* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
- VIRTCHNL_STATUS_ERR_PARAM, NULL, 0);
-}
-
-/**
* ice_can_vf_change_mac
* @vf: pointer to the VF info
*
@@ -2531,66 +1022,6 @@ static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
}
/**
- * ice_vc_request_qs_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * VFs get a default number of queues but can use this message to request a
- * different number. If the request is successful, PF will reset the VF and
- * return 0. If unsuccessful, PF will send message informing VF of number of
- * available queue pairs via virtchnl message response to VF.
- */
-static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
-{
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct virtchnl_vf_res_request *vfres =
- (struct virtchnl_vf_res_request *)msg;
- u16 req_queues = vfres->num_queue_pairs;
- struct ice_pf *pf = vf->pf;
- u16 max_allowed_vf_queues;
- u16 tx_rx_queue_left;
- struct device *dev;
- u16 cur_queues;
-
- dev = ice_pf_to_dev(pf);
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- cur_queues = vf->num_vf_qs;
- tx_rx_queue_left = min_t(u16, ice_get_avail_txq_count(pf),
- ice_get_avail_rxq_count(pf));
- max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
- if (!req_queues) {
- dev_err(dev, "VF %d tried to request 0 queues. Ignoring.\n",
- vf->vf_id);
- } else if (req_queues > ICE_MAX_RSS_QS_PER_VF) {
- dev_err(dev, "VF %d tried to request more than %d queues.\n",
- vf->vf_id, ICE_MAX_RSS_QS_PER_VF);
- vfres->num_queue_pairs = ICE_MAX_RSS_QS_PER_VF;
- } else if (req_queues > cur_queues &&
- req_queues - cur_queues > tx_rx_queue_left) {
- dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n",
- vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
- vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues,
- ICE_MAX_RSS_QS_PER_VF);
- } else {
- /* request is successful, then reset VF */
- vf->num_req_qs = req_queues;
- ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
- dev_info(dev, "VF %d granted request of %u queues.\n",
- vf->vf_id, req_queues);
- return 0;
- }
-
-error_param:
- /* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES,
- v_ret, (u8 *)vfres, sizeof(*vfres));
-}
-
-/**
* ice_vf_vlan_offload_ena - determine if capabilities support VLAN offloads
* @caps: VF driver negotiated capabilities
*
@@ -2983,112 +1414,6 @@ error_param:
}
/**
- * ice_vc_get_rss_hashcfg - return the RSS Hash configuration
- * @vf: pointer to the VF info
- */
-static int ice_vc_get_rss_hashcfg(struct ice_vf *vf)
-{
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct virtchnl_rss_hashcfg *vrh = NULL;
- int len = 0, ret;
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
-
- if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
- dev_err(ice_pf_to_dev(vf->pf), "RSS not supported by PF\n");
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
-
- len = sizeof(struct virtchnl_rss_hashcfg);
- vrh = kzalloc(len, GFP_KERNEL);
- if (!vrh) {
- v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
- len = 0;
- goto err;
- }
-
- vrh->hashcfg = ICE_DEFAULT_RSS_HASHCFG;
-err:
- /* send the response back to the VF */
- ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS, v_ret,
- (u8 *)vrh, len);
- kfree(vrh);
- return ret;
-}
-
-/**
- * ice_vc_set_rss_hashcfg - set RSS Hash configuration bits for the VF
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- */
-static int ice_vc_set_rss_hashcfg(struct ice_vf *vf, u8 *msg)
-{
- struct virtchnl_rss_hashcfg *vrh = (struct virtchnl_rss_hashcfg *)msg;
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct ice_pf *pf = vf->pf;
- struct ice_vsi *vsi;
- struct device *dev;
- int status;
-
- dev = ice_pf_to_dev(pf);
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
-
- if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
- dev_err(dev, "RSS not supported by PF\n");
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
-
- /* clear all previously programmed RSS configuration to allow VF drivers
- * the ability to customize the RSS configuration and/or completely
- * disable RSS
- */
- status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx);
- if (status && !vrh->hashcfg) {
- /* only report failure to clear the current RSS configuration if
- * that was clearly the VF's intention (i.e. vrh->hashcfg = 0)
- */
- v_ret = ice_err_to_virt_err(status);
- goto err;
- } else if (status) {
- /* allow the VF to update the RSS configuration even on failure
- * to clear the current RSS confguration in an attempt to keep
- * RSS in a working state
- */
- dev_warn(dev, "Failed to clear the RSS configuration for VF %u\n",
- vf->vf_id);
- }
-
- if (vrh->hashcfg) {
- status = ice_add_avf_rss_cfg(&pf->hw, vsi, vrh->hashcfg);
- v_ret = ice_err_to_virt_err(status);
- }
-
- /* save the requested VF configuration */
- if (!v_ret)
- vf->rss_hashcfg = vrh->hashcfg;
-
- /* send the response to the VF */
-err:
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_SET_RSS_HASHCFG, v_ret,
- NULL, 0);
-}
-
-/**
* ice_vc_query_rxdid - query RXDID supported by DDP package
* @vf: pointer to VF info
*
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.h b/drivers/net/ethernet/intel/ice/virt/virtchnl.h
index 71bb456e2d71..71bb456e2d71 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.h
+++ b/drivers/net/ethernet/intel/ice/virt/virtchnl.h
diff --git a/drivers/net/ethernet/intel/idpf/Kconfig b/drivers/net/ethernet/intel/idpf/Kconfig
index 2c359a8551c7..adab2154125b 100644
--- a/drivers/net/ethernet/intel/idpf/Kconfig
+++ b/drivers/net/ethernet/intel/idpf/Kconfig
@@ -6,7 +6,7 @@ config IDPF
depends on PCI_MSI
depends on PTP_1588_CLOCK_OPTIONAL
select DIMLIB
- select LIBETH
+ select LIBETH_XDP
help
This driver supports Intel(R) Infrastructure Data Path Function
devices.
diff --git a/drivers/net/ethernet/intel/idpf/Makefile b/drivers/net/ethernet/intel/idpf/Makefile
index 4ef4b2b5e37a..651ddee942bd 100644
--- a/drivers/net/ethernet/intel/idpf/Makefile
+++ b/drivers/net/ethernet/intel/idpf/Makefile
@@ -21,3 +21,6 @@ idpf-$(CONFIG_IDPF_SINGLEQ) += idpf_singleq_txrx.o
idpf-$(CONFIG_PTP_1588_CLOCK) += idpf_ptp.o
idpf-$(CONFIG_PTP_1588_CLOCK) += idpf_virtchnl_ptp.o
+
+idpf-y += xdp.o
+idpf-y += xsk.o
diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h
index f4c0eaf9bde3..ca4da0c89979 100644
--- a/drivers/net/ethernet/intel/idpf/idpf.h
+++ b/drivers/net/ethernet/intel/idpf/idpf.h
@@ -40,6 +40,7 @@ struct idpf_vport_max_q;
#define IDPF_NUM_CHUNKS_PER_MSG(struct_sz, chunk_sz) \
((IDPF_CTLQ_MAX_BUF_LEN - (struct_sz)) / (chunk_sz))
+#define IDPF_WAIT_FOR_MARKER_TIMEO 500
#define IDPF_MAX_WAIT 500
/* available message levels */
@@ -148,6 +149,7 @@ enum idpf_vport_state {
* @link_speed_mbps: Link speed in mbps
* @vport_idx: Relative vport index
* @max_tx_hdr_size: Max header length hardware can support
+ * @tx_max_bufs: Max buffers that can be transmitted with scatter-gather
* @state: See enum idpf_vport_state
* @netstats: Packet and byte stats
* @stats_lock: Lock to protect stats update
@@ -159,6 +161,7 @@ struct idpf_netdev_priv {
u32 link_speed_mbps;
u16 vport_idx;
u16 max_tx_hdr_size;
+ u16 tx_max_bufs;
enum idpf_vport_state state;
struct rtnl_link_stats64 netstats;
spinlock_t stats_lock;
@@ -246,16 +249,28 @@ enum idpf_vport_reset_cause {
/**
* enum idpf_vport_flags - Vport flags
* @IDPF_VPORT_DEL_QUEUES: To send delete queues message
- * @IDPF_VPORT_SW_MARKER: Indicate TX pipe drain software marker packets
- * processing is done
* @IDPF_VPORT_FLAGS_NBITS: Must be last
*/
enum idpf_vport_flags {
IDPF_VPORT_DEL_QUEUES,
- IDPF_VPORT_SW_MARKER,
IDPF_VPORT_FLAGS_NBITS,
};
+/**
+ * struct idpf_tstamp_stats - Tx timestamp statistics
+ * @stats_sync: See struct u64_stats_sync
+ * @packets: Number of packets successfully timestamped by the hardware
+ * @discarded: Number of Tx skbs discarded due to cached PHC
+ * being too old to correctly extend timestamp
+ * @flushed: Number of Tx skbs flushed due to interface closed
+ */
+struct idpf_tstamp_stats {
+ struct u64_stats_sync stats_sync;
+ u64_stats_t packets;
+ u64_stats_t discarded;
+ u64_stats_t flushed;
+};
+
struct idpf_port_stats {
struct u64_stats_sync stats_sync;
u64_stats_t rx_hw_csum_err;
@@ -287,6 +302,10 @@ struct idpf_fsteer_fltr {
* @txq_model: Split queue or single queue queuing model
* @txqs: Used only in hotpath to get to the right queue very fast
* @crc_enable: Enable CRC insertion offload
+ * @xdpsq_share: whether XDPSQ sharing is enabled
+ * @num_xdp_txq: number of XDPSQs
+ * @xdp_txq_offset: index of the first XDPSQ (== number of regular SQs)
+ * @xdp_prog: installed XDP program
* @num_rxq: Number of allocated RX queues
* @num_bufq: Number of allocated buffer queues
* @rxq_desc_count: RX queue descriptor count. *MUST* have enough descriptors
@@ -312,16 +331,19 @@ struct idpf_fsteer_fltr {
* @num_q_vectors: Number of IRQ vectors allocated
* @q_vectors: Array of queue vectors
* @q_vector_idxs: Starting index of queue vectors
+ * @noirq_dyn_ctl: register to enable/disable the vector for NOIRQ queues
+ * @noirq_dyn_ctl_ena: value to write to the above to enable it
+ * @noirq_v_idx: ID of the NOIRQ vector
* @max_mtu: device given max possible MTU
* @default_mac_addr: device will give a default MAC to use
* @rx_itr_profile: RX profiles for Dynamic Interrupt Moderation
* @tx_itr_profile: TX profiles for Dynamic Interrupt Moderation
* @port_stats: per port csum, header split, and other offload stats
* @link_up: True if link is up
- * @sw_marker_wq: workqueue for marker packets
* @tx_tstamp_caps: Capabilities negotiated for Tx timestamping
* @tstamp_config: The Tx tstamp config
* @tstamp_task: Tx timestamping task
+ * @tstamp_stats: Tx timestamping statistics
*/
struct idpf_vport {
u16 num_txq;
@@ -335,6 +357,11 @@ struct idpf_vport {
struct idpf_tx_queue **txqs;
bool crc_enable;
+ bool xdpsq_share;
+ u16 num_xdp_txq;
+ u16 xdp_txq_offset;
+ struct bpf_prog *xdp_prog;
+
u16 num_rxq;
u16 num_bufq;
u32 rxq_desc_count;
@@ -359,6 +386,11 @@ struct idpf_vport {
u16 num_q_vectors;
struct idpf_q_vector *q_vectors;
u16 *q_vector_idxs;
+
+ void __iomem *noirq_dyn_ctl;
+ u32 noirq_dyn_ctl_ena;
+ u16 noirq_v_idx;
+
u16 max_mtu;
u8 default_mac_addr[ETH_ALEN];
u16 rx_itr_profile[IDPF_DIM_PROFILE_SLOTS];
@@ -367,11 +399,10 @@ struct idpf_vport {
bool link_up;
- wait_queue_head_t sw_marker_wq;
-
struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps;
struct kernel_hwtstamp_config tstamp_config;
struct work_struct tstamp_task;
+ struct idpf_tstamp_stats tstamp_stats;
};
/**
@@ -433,6 +464,7 @@ struct idpf_q_coalesce {
* ethtool
* @num_req_rxq_desc: Number of user requested RX queue descriptors through
* ethtool
+ * @xdp_prog: requested XDP program to install
* @user_flags: User toggled config flags
* @mac_filter_list: List of MAC filters
* @num_fsteer_fltrs: number of flow steering filters
@@ -447,6 +479,7 @@ struct idpf_vport_user_config_data {
u16 num_req_rx_qs;
u32 num_req_txq_desc;
u32 num_req_rxq_desc;
+ struct bpf_prog *xdp_prog;
DECLARE_BITMAP(user_flags, __IDPF_USER_FLAGS_NBITS);
struct list_head mac_filter_list;
u32 num_fsteer_fltrs;
@@ -676,6 +709,11 @@ static inline int idpf_is_queue_model_split(u16 q_model)
q_model == VIRTCHNL2_QUEUE_MODEL_SPLIT;
}
+static inline bool idpf_xdp_enabled(const struct idpf_vport *vport)
+{
+ return vport->adapter && vport->xdp_prog;
+}
+
#define idpf_is_cap_ena(adapter, field, flag) \
idpf_is_capability_ena(adapter, false, field, flag)
#define idpf_is_cap_ena_all(adapter, field, flag) \
@@ -957,6 +995,13 @@ static inline void idpf_vport_ctrl_unlock(struct net_device *netdev)
mutex_unlock(&np->adapter->vport_ctrl_lock);
}
+static inline bool idpf_vport_ctrl_is_locked(struct net_device *netdev)
+{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+
+ return mutex_is_locked(&np->adapter->vport_ctrl_lock);
+}
+
void idpf_statistics_task(struct work_struct *work);
void idpf_init_task(struct work_struct *work);
void idpf_service_task(struct work_struct *work);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_dev.c
index bfa60f7d43de..3a04a6bd0d7c 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_dev.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_dev.c
@@ -77,7 +77,7 @@ static int idpf_intr_reg_init(struct idpf_vport *vport)
int num_vecs = vport->num_q_vectors;
struct idpf_vec_regs *reg_vals;
int num_regs, i, err = 0;
- u32 rx_itr, tx_itr;
+ u32 rx_itr, tx_itr, val;
u16 total_vecs;
total_vecs = idpf_get_reserved_vecs(vport->adapter);
@@ -121,6 +121,15 @@ static int idpf_intr_reg_init(struct idpf_vport *vport)
intr->tx_itr = idpf_get_reg_addr(adapter, tx_itr);
}
+ /* Data vector for NOIRQ queues */
+
+ val = reg_vals[vport->q_vector_idxs[i] - IDPF_MBX_Q_VEC].dyn_ctl_reg;
+ vport->noirq_dyn_ctl = idpf_get_reg_addr(adapter, val);
+
+ val = PF_GLINT_DYN_CTL_WB_ON_ITR_M | PF_GLINT_DYN_CTL_INTENA_MSK_M |
+ FIELD_PREP(PF_GLINT_DYN_CTL_ITR_INDX_M, IDPF_NO_ITR_UPDATE_IDX);
+ vport->noirq_dyn_ctl_ena = val;
+
free_reg_vals:
kfree(reg_vals);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
index 0eb812ac19c2..a5a1eec9ade8 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
@@ -1245,8 +1245,8 @@ static void idpf_get_ethtool_stats(struct net_device *netdev,
*
* returns pointer to rx vector
*/
-static struct idpf_q_vector *idpf_find_rxq_vec(const struct idpf_vport *vport,
- int q_num)
+struct idpf_q_vector *idpf_find_rxq_vec(const struct idpf_vport *vport,
+ u32 q_num)
{
int q_grp, q_idx;
@@ -1266,8 +1266,8 @@ static struct idpf_q_vector *idpf_find_rxq_vec(const struct idpf_vport *vport,
*
* returns pointer to tx vector
*/
-static struct idpf_q_vector *idpf_find_txq_vec(const struct idpf_vport *vport,
- int q_num)
+struct idpf_q_vector *idpf_find_txq_vec(const struct idpf_vport *vport,
+ u32 q_num)
{
int q_grp;
@@ -1685,6 +1685,61 @@ unlock:
return err;
}
+/**
+ * idpf_get_ts_stats - Collect HW tstamping statistics
+ * @netdev: network interface device structure
+ * @ts_stats: HW timestamping stats structure
+ *
+ * Collect HW timestamping statistics including successfully timestamped
+ * packets, discarded due to illegal values, flushed during releasing PTP and
+ * skipped due to lack of the free index.
+ */
+static void idpf_get_ts_stats(struct net_device *netdev,
+ struct ethtool_ts_stats *ts_stats)
+{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+ struct idpf_vport *vport;
+ unsigned int start;
+
+ idpf_vport_ctrl_lock(netdev);
+ vport = idpf_netdev_to_vport(netdev);
+ do {
+ start = u64_stats_fetch_begin(&vport->tstamp_stats.stats_sync);
+ ts_stats->pkts = u64_stats_read(&vport->tstamp_stats.packets);
+ ts_stats->lost = u64_stats_read(&vport->tstamp_stats.flushed);
+ ts_stats->err = u64_stats_read(&vport->tstamp_stats.discarded);
+ } while (u64_stats_fetch_retry(&vport->tstamp_stats.stats_sync, start));
+
+ if (np->state != __IDPF_VPORT_UP)
+ goto exit;
+
+ for (u16 i = 0; i < vport->num_txq_grp; i++) {
+ struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
+
+ for (u16 j = 0; j < txq_grp->num_txq; j++) {
+ struct idpf_tx_queue *txq = txq_grp->txqs[j];
+ struct idpf_tx_queue_stats *stats;
+ u64 ts;
+
+ if (!txq)
+ continue;
+
+ stats = &txq->q_stats;
+ do {
+ start = u64_stats_fetch_begin(&txq->stats_sync);
+
+ ts = u64_stats_read(&stats->tstamp_skipped);
+ } while (u64_stats_fetch_retry(&txq->stats_sync,
+ start));
+
+ ts_stats->lost += ts;
+ }
+ }
+
+exit:
+ idpf_vport_ctrl_unlock(netdev);
+}
+
static const struct ethtool_ops idpf_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USE_ADAPTIVE,
@@ -1711,6 +1766,7 @@ static const struct ethtool_ops idpf_ethtool_ops = {
.set_ringparam = idpf_set_ringparam,
.get_link_ksettings = idpf_get_link_ksettings,
.get_ts_info = idpf_get_ts_info,
+ .get_ts_stats = idpf_get_ts_stats,
};
/**
diff --git a/drivers/net/ethernet/intel/idpf/idpf_idc.c b/drivers/net/ethernet/intel/idpf/idpf_idc.c
index 4d2905103215..7e20a07e98e5 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_idc.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_idc.c
@@ -247,10 +247,10 @@ static void idpf_unplug_aux_dev(struct auxiliary_device *adev)
if (!adev)
return;
+ ida_free(&idpf_idc_ida, adev->id);
+
auxiliary_device_delete(adev);
auxiliary_device_uninit(adev);
-
- ida_free(&idpf_idc_ida, adev->id);
}
/**
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h
index 7492d1713243..20d5af64e750 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h
@@ -186,13 +186,17 @@ struct idpf_base_tx_desc {
__le64 qw1; /* type_cmd_offset_bsz_l2tag1 */
}; /* read used with buffer queues */
-struct idpf_splitq_tx_compl_desc {
+struct idpf_splitq_4b_tx_compl_desc {
/* qid=[10:0] comptype=[13:11] rsvd=[14] gen=[15] */
__le16 qid_comptype_gen;
union {
__le16 q_head; /* Queue head */
__le16 compl_tag; /* Completion tag */
} q_head_compl_tag;
+}; /* writeback used with completion queues */
+
+struct idpf_splitq_tx_compl_desc {
+ struct idpf_splitq_4b_tx_compl_desc common;
u8 ts[3];
u8 rsvd; /* Reserved */
}; /* writeback used with completion queues */
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c
index 2c2a3e85d693..8a941f0fb048 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
@@ -4,6 +4,8 @@
#include "idpf.h"
#include "idpf_virtchnl.h"
#include "idpf_ptp.h"
+#include "xdp.h"
+#include "xsk.h"
static const struct net_device_ops idpf_netdev_ops;
@@ -776,6 +778,7 @@ static int idpf_cfg_netdev(struct idpf_vport *vport)
np->vport_idx = vport->idx;
np->vport_id = vport->vport_id;
np->max_tx_hdr_size = idpf_get_max_tx_hdr_size(adapter);
+ np->tx_max_bufs = idpf_get_max_tx_bufs(adapter);
spin_lock_init(&np->stats_lock);
@@ -834,6 +837,8 @@ static int idpf_cfg_netdev(struct idpf_vport *vport)
netdev->hw_features |= netdev->features | other_offloads;
netdev->vlan_features |= netdev->features | other_offloads;
netdev->hw_enc_features |= dflt_features | other_offloads;
+ idpf_xdp_set_features(vport);
+
idpf_set_ethtool_ops(netdev);
netif_set_affinity_auto(netdev);
SET_NETDEV_DEV(netdev, &adapter->pdev->dev);
@@ -883,14 +888,18 @@ static void idpf_remove_features(struct idpf_vport *vport)
/**
* idpf_vport_stop - Disable a vport
* @vport: vport to disable
+ * @rtnl: whether to take RTNL lock
*/
-static void idpf_vport_stop(struct idpf_vport *vport)
+static void idpf_vport_stop(struct idpf_vport *vport, bool rtnl)
{
struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
if (np->state <= __IDPF_VPORT_DOWN)
return;
+ if (rtnl)
+ rtnl_lock();
+
netif_carrier_off(vport->netdev);
netif_tx_disable(vport->netdev);
@@ -909,9 +918,13 @@ static void idpf_vport_stop(struct idpf_vport *vport)
vport->link_up = false;
idpf_vport_intr_deinit(vport);
+ idpf_xdp_rxq_info_deinit_all(vport);
idpf_vport_queues_rel(vport);
idpf_vport_intr_rel(vport);
np->state = __IDPF_VPORT_DOWN;
+
+ if (rtnl)
+ rtnl_unlock();
}
/**
@@ -935,7 +948,7 @@ static int idpf_stop(struct net_device *netdev)
idpf_vport_ctrl_lock(netdev);
vport = idpf_netdev_to_vport(netdev);
- idpf_vport_stop(vport);
+ idpf_vport_stop(vport, false);
idpf_vport_ctrl_unlock(netdev);
@@ -1028,7 +1041,7 @@ static void idpf_vport_dealloc(struct idpf_vport *vport)
idpf_idc_deinit_vport_aux_device(vport->vdev_info);
idpf_deinit_mac_addr(vport);
- idpf_vport_stop(vport);
+ idpf_vport_stop(vport, true);
if (!test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags))
idpf_decfg_netdev(vport);
@@ -1134,7 +1147,7 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
if (!vport)
return vport;
- num_max_q = max(max_q->max_txq, max_q->max_rxq);
+ num_max_q = max(max_q->max_txq, max_q->max_rxq) + IDPF_RESERVED_VECS;
if (!adapter->vport_config[idx]) {
struct idpf_vport_config *vport_config;
struct idpf_q_coalesce *q_coal;
@@ -1308,13 +1321,13 @@ static void idpf_restore_features(struct idpf_vport *vport)
*/
static int idpf_set_real_num_queues(struct idpf_vport *vport)
{
- int err;
+ int err, txq = vport->num_txq - vport->num_xdp_txq;
err = netif_set_real_num_rx_queues(vport->netdev, vport->num_rxq);
if (err)
return err;
- return netif_set_real_num_tx_queues(vport->netdev, vport->num_txq);
+ return netif_set_real_num_tx_queues(vport->netdev, txq);
}
/**
@@ -1369,8 +1382,9 @@ static void idpf_rx_init_buf_tail(struct idpf_vport *vport)
/**
* idpf_vport_open - Bring up a vport
* @vport: vport to bring up
+ * @rtnl: whether to take RTNL lock
*/
-static int idpf_vport_open(struct idpf_vport *vport)
+static int idpf_vport_open(struct idpf_vport *vport, bool rtnl)
{
struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
struct idpf_adapter *adapter = vport->adapter;
@@ -1380,6 +1394,9 @@ static int idpf_vport_open(struct idpf_vport *vport)
if (np->state != __IDPF_VPORT_DOWN)
return -EBUSY;
+ if (rtnl)
+ rtnl_lock();
+
/* we do not allow interface up just yet */
netif_carrier_off(vport->netdev);
@@ -1387,7 +1404,7 @@ static int idpf_vport_open(struct idpf_vport *vport)
if (err) {
dev_err(&adapter->pdev->dev, "Failed to allocate interrupts for vport %u: %d\n",
vport->vport_id, err);
- return err;
+ goto err_rtnl_unlock;
}
err = idpf_vport_queues_alloc(vport);
@@ -1408,35 +1425,44 @@ static int idpf_vport_open(struct idpf_vport *vport)
goto queues_rel;
}
- err = idpf_rx_bufs_init_all(vport);
+ err = idpf_queue_reg_init(vport);
if (err) {
- dev_err(&adapter->pdev->dev, "Failed to initialize RX buffers for vport %u: %d\n",
+ dev_err(&adapter->pdev->dev, "Failed to initialize queue registers for vport %u: %d\n",
vport->vport_id, err);
goto queues_rel;
}
- err = idpf_queue_reg_init(vport);
+ err = idpf_rx_bufs_init_all(vport);
if (err) {
- dev_err(&adapter->pdev->dev, "Failed to initialize queue registers for vport %u: %d\n",
+ dev_err(&adapter->pdev->dev, "Failed to initialize RX buffers for vport %u: %d\n",
vport->vport_id, err);
goto queues_rel;
}
idpf_rx_init_buf_tail(vport);
+
+ err = idpf_xdp_rxq_info_init_all(vport);
+ if (err) {
+ netdev_err(vport->netdev,
+ "Failed to initialize XDP RxQ info for vport %u: %pe\n",
+ vport->vport_id, ERR_PTR(err));
+ goto intr_deinit;
+ }
+
idpf_vport_intr_ena(vport);
err = idpf_send_config_queues_msg(vport);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to configure queues for vport %u, %d\n",
vport->vport_id, err);
- goto intr_deinit;
+ goto rxq_deinit;
}
err = idpf_send_map_unmap_queue_vector_msg(vport, true);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to map queue vectors for vport %u: %d\n",
vport->vport_id, err);
- goto intr_deinit;
+ goto rxq_deinit;
}
err = idpf_send_enable_queues_msg(vport);
@@ -1474,6 +1500,9 @@ static int idpf_vport_open(struct idpf_vport *vport)
goto deinit_rss;
}
+ if (rtnl)
+ rtnl_unlock();
+
return 0;
deinit_rss:
@@ -1484,6 +1513,8 @@ disable_queues:
idpf_send_disable_queues_msg(vport);
unmap_queue_vectors:
idpf_send_map_unmap_queue_vector_msg(vport, false);
+rxq_deinit:
+ idpf_xdp_rxq_info_deinit_all(vport);
intr_deinit:
idpf_vport_intr_deinit(vport);
queues_rel:
@@ -1491,6 +1522,10 @@ queues_rel:
intr_rel:
idpf_vport_intr_rel(vport);
+err_rtnl_unlock:
+ if (rtnl)
+ rtnl_unlock();
+
return err;
}
@@ -1547,8 +1582,6 @@ void idpf_init_task(struct work_struct *work)
index = vport->idx;
vport_config = adapter->vport_config[index];
- init_waitqueue_head(&vport->sw_marker_wq);
-
spin_lock_init(&vport_config->mac_filter_list_lock);
INIT_LIST_HEAD(&vport_config->user_config.mac_filter_list);
@@ -1571,7 +1604,7 @@ void idpf_init_task(struct work_struct *work)
np = netdev_priv(vport->netdev);
np->state = __IDPF_VPORT_DOWN;
if (test_and_clear_bit(IDPF_VPORT_UP_REQUESTED, vport_config->flags))
- idpf_vport_open(vport);
+ idpf_vport_open(vport, true);
/* Spawn and return 'idpf_init_task' work queue until all the
* default vports are created
@@ -1961,7 +1994,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
idpf_send_delete_queues_msg(vport);
} else {
set_bit(IDPF_VPORT_DEL_QUEUES, vport->flags);
- idpf_vport_stop(vport);
+ idpf_vport_stop(vport, false);
}
idpf_deinit_rss(vport);
@@ -1991,7 +2024,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
goto err_open;
if (current_state == __IDPF_VPORT_UP)
- err = idpf_vport_open(vport);
+ err = idpf_vport_open(vport, false);
goto free_vport;
@@ -2001,7 +2034,7 @@ err_reset:
err_open:
if (current_state == __IDPF_VPORT_UP)
- idpf_vport_open(vport);
+ idpf_vport_open(vport, false);
free_vport:
kfree(new_vport);
@@ -2239,7 +2272,7 @@ static int idpf_open(struct net_device *netdev)
if (err)
goto unlock;
- err = idpf_vport_open(vport);
+ err = idpf_vport_open(vport, false);
unlock:
idpf_vport_ctrl_unlock(netdev);
@@ -2272,6 +2305,92 @@ static int idpf_change_mtu(struct net_device *netdev, int new_mtu)
}
/**
+ * idpf_chk_tso_segment - Check skb is not using too many buffers
+ * @skb: send buffer
+ * @max_bufs: maximum number of buffers
+ *
+ * For TSO we need to count the TSO header and segment payload separately. As
+ * such we need to check cases where we have max_bufs-1 fragments or more as we
+ * can potentially require max_bufs+1 DMA transactions, 1 for the TSO header, 1
+ * for the segment payload in the first descriptor, and another max_buf-1 for
+ * the fragments.
+ *
+ * Returns true if the packet needs to be software segmented by core stack.
+ */
+static bool idpf_chk_tso_segment(const struct sk_buff *skb,
+ unsigned int max_bufs)
+{
+ const struct skb_shared_info *shinfo = skb_shinfo(skb);
+ const skb_frag_t *frag, *stale;
+ int nr_frags, sum;
+
+ /* no need to check if number of frags is less than max_bufs - 1 */
+ nr_frags = shinfo->nr_frags;
+ if (nr_frags < (max_bufs - 1))
+ return false;
+
+ /* We need to walk through the list and validate that each group
+ * of max_bufs-2 fragments totals at least gso_size.
+ */
+ nr_frags -= max_bufs - 2;
+ frag = &shinfo->frags[0];
+
+ /* Initialize size to the negative value of gso_size minus 1. We use
+ * this as the worst case scenario in which the frag ahead of us only
+ * provides one byte which is why we are limited to max_bufs-2
+ * descriptors for a single transmit as the header and previous
+ * fragment are already consuming 2 descriptors.
+ */
+ sum = 1 - shinfo->gso_size;
+
+ /* Add size of frags 0 through 4 to create our initial sum */
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
+
+ /* Walk through fragments adding latest fragment, testing it, and
+ * then removing stale fragments from the sum.
+ */
+ for (stale = &shinfo->frags[0];; stale++) {
+ int stale_size = skb_frag_size(stale);
+
+ sum += skb_frag_size(frag++);
+
+ /* The stale fragment may present us with a smaller
+ * descriptor than the actual fragment size. To account
+ * for that we need to remove all the data on the front and
+ * figure out what the remainder would be in the last
+ * descriptor associated with the fragment.
+ */
+ if (stale_size > IDPF_TX_MAX_DESC_DATA) {
+ int align_pad = -(skb_frag_off(stale)) &
+ (IDPF_TX_MAX_READ_REQ_SIZE - 1);
+
+ sum -= align_pad;
+ stale_size -= align_pad;
+
+ do {
+ sum -= IDPF_TX_MAX_DESC_DATA_ALIGNED;
+ stale_size -= IDPF_TX_MAX_DESC_DATA_ALIGNED;
+ } while (stale_size > IDPF_TX_MAX_DESC_DATA);
+ }
+
+ /* if sum is negative we failed to make sufficient progress */
+ if (sum < 0)
+ return true;
+
+ if (!nr_frags--)
+ break;
+
+ sum -= stale_size;
+ }
+
+ return false;
+}
+
+/**
* idpf_features_check - Validate packet conforms to limits
* @skb: skb buffer
* @netdev: This port's netdev
@@ -2292,12 +2411,15 @@ static netdev_features_t idpf_features_check(struct sk_buff *skb,
if (skb->ip_summed != CHECKSUM_PARTIAL)
return features;
- /* We cannot support GSO if the MSS is going to be less than
- * 88 bytes. If it is then we need to drop support for GSO.
- */
- if (skb_is_gso(skb) &&
- (skb_shinfo(skb)->gso_size < IDPF_TX_TSO_MIN_MSS))
- features &= ~NETIF_F_GSO_MASK;
+ if (skb_is_gso(skb)) {
+ /* We cannot support GSO if the MSS is going to be less than
+ * 88 bytes. If it is then we need to drop support for GSO.
+ */
+ if (skb_shinfo(skb)->gso_size < IDPF_TX_TSO_MIN_MSS)
+ features &= ~NETIF_F_GSO_MASK;
+ else if (idpf_chk_tso_segment(skb, np->tx_max_bufs))
+ features &= ~NETIF_F_GSO_MASK;
+ }
/* Ensure MACLEN is <= 126 bytes (63 words) and not an odd size */
len = skb_network_offset(skb);
@@ -2344,6 +2466,7 @@ static int idpf_set_mac(struct net_device *netdev, void *p)
struct idpf_netdev_priv *np = netdev_priv(netdev);
struct idpf_vport_config *vport_config;
struct sockaddr *addr = p;
+ u8 old_mac_addr[ETH_ALEN];
struct idpf_vport *vport;
int err = 0;
@@ -2367,17 +2490,19 @@ static int idpf_set_mac(struct net_device *netdev, void *p)
if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
goto unlock_mutex;
+ ether_addr_copy(old_mac_addr, vport->default_mac_addr);
+ ether_addr_copy(vport->default_mac_addr, addr->sa_data);
vport_config = vport->adapter->vport_config[vport->idx];
err = idpf_add_mac_filter(vport, np, addr->sa_data, false);
if (err) {
__idpf_del_mac_filter(vport_config, addr->sa_data);
+ ether_addr_copy(vport->default_mac_addr, netdev->dev_addr);
goto unlock_mutex;
}
- if (is_valid_ether_addr(vport->default_mac_addr))
- idpf_del_mac_filter(vport, np, vport->default_mac_addr, false);
+ if (is_valid_ether_addr(old_mac_addr))
+ __idpf_del_mac_filter(vport_config, old_mac_addr);
- ether_addr_copy(vport->default_mac_addr, addr->sa_data);
eth_hw_addr_set(netdev, addr->sa_data);
unlock_mutex:
@@ -2492,4 +2617,7 @@ static const struct net_device_ops idpf_netdev_ops = {
.ndo_tx_timeout = idpf_tx_timeout,
.ndo_hwtstamp_get = idpf_hwtstamp_get,
.ndo_hwtstamp_set = idpf_hwtstamp_set,
+ .ndo_bpf = idpf_xdp,
+ .ndo_xdp_xmit = idpf_xdp_xmit,
+ .ndo_xsk_wakeup = idpf_xsk_wakeup,
};
diff --git a/drivers/net/ethernet/intel/idpf/idpf_main.c b/drivers/net/ethernet/intel/idpf/idpf_main.c
index dfe9126f1f4a..8c46481d2e1f 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_main.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_main.c
@@ -9,6 +9,7 @@
MODULE_DESCRIPTION(DRV_SUMMARY);
MODULE_IMPORT_NS("LIBETH");
+MODULE_IMPORT_NS("LIBETH_XDP");
MODULE_LICENSE("GPL");
/**
diff --git a/drivers/net/ethernet/intel/idpf/idpf_ptp.c b/drivers/net/ethernet/intel/idpf/idpf_ptp.c
index ee21f2ff0cad..142823af1f9e 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_ptp.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_ptp.c
@@ -618,8 +618,13 @@ u64 idpf_ptp_extend_ts(struct idpf_vport *vport, u64 in_tstamp)
discard_time = ptp->cached_phc_jiffies + 2 * HZ;
- if (time_is_before_jiffies(discard_time))
+ if (time_is_before_jiffies(discard_time)) {
+ u64_stats_update_begin(&vport->tstamp_stats.stats_sync);
+ u64_stats_inc(&vport->tstamp_stats.discarded);
+ u64_stats_update_end(&vport->tstamp_stats.stats_sync);
+
return 0;
+ }
return idpf_ptp_tstamp_extend_32b_to_64b(ptp->cached_phc_time,
lower_32_bits(in_tstamp));
@@ -853,10 +858,14 @@ static void idpf_ptp_release_vport_tstamp(struct idpf_vport *vport)
/* Remove list with latches in use */
head = &vport->tx_tstamp_caps->latches_in_use;
+ u64_stats_update_begin(&vport->tstamp_stats.stats_sync);
list_for_each_entry_safe(ptp_tx_tstamp, tmp, head, list_member) {
+ u64_stats_inc(&vport->tstamp_stats.flushed);
+
list_del(&ptp_tx_tstamp->list_member);
kfree(ptp_tx_tstamp);
}
+ u64_stats_update_end(&vport->tstamp_stats.stats_sync);
spin_unlock_bh(&vport->tx_tstamp_caps->latches_lock);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
index 555879b1248d..61e613066140 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
@@ -1,8 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2023 Intel Corporation */
-#include <net/libeth/rx.h>
-#include <net/libeth/tx.h>
+#include <net/libeth/xdp.h>
#include "idpf.h"
@@ -180,6 +179,58 @@ static int idpf_tx_singleq_csum(struct sk_buff *skb,
}
/**
+ * idpf_tx_singleq_dma_map_error - handle TX DMA map errors
+ * @txq: queue to send buffer on
+ * @skb: send buffer
+ * @first: original first buffer info buffer for packet
+ * @idx: starting point on ring to unwind
+ */
+static void idpf_tx_singleq_dma_map_error(struct idpf_tx_queue *txq,
+ struct sk_buff *skb,
+ struct idpf_tx_buf *first, u16 idx)
+{
+ struct libeth_sq_napi_stats ss = { };
+ struct libeth_cq_pp cp = {
+ .dev = txq->dev,
+ .ss = &ss,
+ };
+
+ u64_stats_update_begin(&txq->stats_sync);
+ u64_stats_inc(&txq->q_stats.dma_map_errs);
+ u64_stats_update_end(&txq->stats_sync);
+
+ /* clear dma mappings for failed tx_buf map */
+ for (;;) {
+ struct idpf_tx_buf *tx_buf;
+
+ tx_buf = &txq->tx_buf[idx];
+ libeth_tx_complete(tx_buf, &cp);
+ if (tx_buf == first)
+ break;
+ if (idx == 0)
+ idx = txq->desc_count;
+ idx--;
+ }
+
+ if (skb_is_gso(skb)) {
+ union idpf_tx_flex_desc *tx_desc;
+
+ /* If we failed a DMA mapping for a TSO packet, we will have
+ * used one additional descriptor for a context
+ * descriptor. Reset that here.
+ */
+ tx_desc = &txq->flex_tx[idx];
+ memset(tx_desc, 0, sizeof(*tx_desc));
+ if (idx == 0)
+ idx = txq->desc_count;
+ idx--;
+ }
+
+ /* Update tail in case netdev_xmit_more was previously true */
+ idpf_tx_buf_hw_update(txq, idx, false);
+}
+
+/**
* idpf_tx_singleq_map - Build the Tx base descriptor
* @tx_q: queue to send buffer on
* @first: first buffer info buffer to use
@@ -219,8 +270,9 @@ static void idpf_tx_singleq_map(struct idpf_tx_queue *tx_q,
for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
unsigned int max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED;
- if (dma_mapping_error(tx_q->dev, dma))
- return idpf_tx_dma_map_error(tx_q, skb, first, i);
+ if (unlikely(dma_mapping_error(tx_q->dev, dma)))
+ return idpf_tx_singleq_dma_map_error(tx_q, skb,
+ first, i);
/* record length, and DMA address */
dma_unmap_len_set(tx_buf, len, size);
@@ -362,11 +414,11 @@ netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
{
struct idpf_tx_offload_params offload = { };
struct idpf_tx_buf *first;
+ u32 count, buf_count = 1;
int csum, tso, needed;
- unsigned int count;
__be16 protocol;
- count = idpf_tx_desc_count_required(tx_q, skb);
+ count = idpf_tx_res_count_required(tx_q, skb, &buf_count);
if (unlikely(!count))
return idpf_tx_drop_skb(tx_q, skb);
@@ -602,7 +654,7 @@ static void idpf_rx_singleq_csum(struct idpf_rx_queue *rxq,
bool ipv4, ipv6;
/* check if Rx checksum is enabled */
- if (!libeth_rx_pt_has_checksum(rxq->netdev, decoded))
+ if (!libeth_rx_pt_has_checksum(rxq->xdp_rxq.dev, decoded))
return;
/* check if HW has decoded the packet and checksum */
@@ -741,7 +793,7 @@ static void idpf_rx_singleq_base_hash(struct idpf_rx_queue *rx_q,
{
u64 mask, qw1;
- if (!libeth_rx_pt_has_hash(rx_q->netdev, decoded))
+ if (!libeth_rx_pt_has_hash(rx_q->xdp_rxq.dev, decoded))
return;
mask = VIRTCHNL2_RX_BASE_DESC_FLTSTAT_RSS_HASH_M;
@@ -769,7 +821,7 @@ static void idpf_rx_singleq_flex_hash(struct idpf_rx_queue *rx_q,
const union virtchnl2_rx_desc *rx_desc,
struct libeth_rx_pt decoded)
{
- if (!libeth_rx_pt_has_hash(rx_q->netdev, decoded))
+ if (!libeth_rx_pt_has_hash(rx_q->xdp_rxq.dev, decoded))
return;
if (FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_STATUS0_RSS_VALID_M,
@@ -781,7 +833,7 @@ static void idpf_rx_singleq_flex_hash(struct idpf_rx_queue *rx_q,
}
/**
- * idpf_rx_singleq_process_skb_fields - Populate skb header fields from Rx
+ * __idpf_rx_singleq_process_skb_fields - Populate skb header fields from Rx
* descriptor
* @rx_q: Rx ring being processed
* @skb: pointer to current skb being populated
@@ -793,17 +845,14 @@ static void idpf_rx_singleq_flex_hash(struct idpf_rx_queue *rx_q,
* other fields within the skb.
*/
static void
-idpf_rx_singleq_process_skb_fields(struct idpf_rx_queue *rx_q,
- struct sk_buff *skb,
- const union virtchnl2_rx_desc *rx_desc,
- u16 ptype)
+__idpf_rx_singleq_process_skb_fields(struct idpf_rx_queue *rx_q,
+ struct sk_buff *skb,
+ const union virtchnl2_rx_desc *rx_desc,
+ u16 ptype)
{
struct libeth_rx_pt decoded = rx_q->rx_ptype_lkup[ptype];
struct libeth_rx_csum csum_bits;
- /* modifies the skb - consumes the enet header */
- skb->protocol = eth_type_trans(skb, rx_q->netdev);
-
/* Check if we're using base mode descriptor IDs */
if (rx_q->rxdids == VIRTCHNL2_RXDID_1_32B_BASE_M) {
idpf_rx_singleq_base_hash(rx_q, skb, rx_desc, decoded);
@@ -814,7 +863,6 @@ idpf_rx_singleq_process_skb_fields(struct idpf_rx_queue *rx_q,
}
idpf_rx_singleq_csum(rx_q, skb, csum_bits, decoded);
- skb_record_rx_queue(skb, rx_q->idx);
}
/**
@@ -950,6 +998,32 @@ idpf_rx_singleq_extract_fields(const struct idpf_rx_queue *rx_q,
idpf_rx_singleq_extract_flex_fields(rx_desc, fields);
}
+static bool
+idpf_rx_singleq_process_skb_fields(struct sk_buff *skb,
+ const struct libeth_xdp_buff *xdp,
+ struct libeth_rq_napi_stats *rs)
+{
+ struct libeth_rqe_info fields;
+ struct idpf_rx_queue *rxq;
+
+ rxq = libeth_xdp_buff_to_rq(xdp, typeof(*rxq), xdp_rxq);
+
+ idpf_rx_singleq_extract_fields(rxq, xdp->desc, &fields);
+ __idpf_rx_singleq_process_skb_fields(rxq, skb, xdp->desc,
+ fields.ptype);
+
+ return true;
+}
+
+static void idpf_xdp_run_pass(struct libeth_xdp_buff *xdp,
+ struct napi_struct *napi,
+ struct libeth_rq_napi_stats *rs,
+ const union virtchnl2_rx_desc *desc)
+{
+ libeth_xdp_run_pass(xdp, NULL, napi, rs, desc, NULL,
+ idpf_rx_singleq_process_skb_fields);
+}
+
/**
* idpf_rx_singleq_clean - Reclaim resources after receive completes
* @rx_q: rx queue to clean
@@ -959,14 +1033,15 @@ idpf_rx_singleq_extract_fields(const struct idpf_rx_queue *rx_q,
*/
static int idpf_rx_singleq_clean(struct idpf_rx_queue *rx_q, int budget)
{
- unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
- struct sk_buff *skb = rx_q->skb;
+ struct libeth_rq_napi_stats rs = { };
u16 ntc = rx_q->next_to_clean;
+ LIBETH_XDP_ONSTACK_BUFF(xdp);
u16 cleaned_count = 0;
- bool failure = false;
+
+ libeth_xdp_init_buff(xdp, &rx_q->xdp, &rx_q->xdp_rxq);
/* Process Rx packets bounded by budget */
- while (likely(total_rx_pkts < (unsigned int)budget)) {
+ while (likely(rs.packets < budget)) {
struct libeth_rqe_info fields = { };
union virtchnl2_rx_desc *rx_desc;
struct idpf_rx_buf *rx_buf;
@@ -993,73 +1068,41 @@ static int idpf_rx_singleq_clean(struct idpf_rx_queue *rx_q, int budget)
idpf_rx_singleq_extract_fields(rx_q, rx_desc, &fields);
rx_buf = &rx_q->rx_buf[ntc];
- if (!libeth_rx_sync_for_cpu(rx_buf, fields.len))
- goto skip_data;
-
- if (skb)
- idpf_rx_add_frag(rx_buf, skb, fields.len);
- else
- skb = idpf_rx_build_skb(rx_buf, fields.len);
-
- /* exit if we failed to retrieve a buffer */
- if (!skb)
- break;
-
-skip_data:
+ libeth_xdp_process_buff(xdp, rx_buf, fields.len);
rx_buf->netmem = 0;
IDPF_SINGLEQ_BUMP_RING_IDX(rx_q, ntc);
cleaned_count++;
/* skip if it is non EOP desc */
- if (idpf_rx_singleq_is_non_eop(rx_desc) || unlikely(!skb))
+ if (idpf_rx_singleq_is_non_eop(rx_desc) ||
+ unlikely(!xdp->data))
continue;
#define IDPF_RXD_ERR_S FIELD_PREP(VIRTCHNL2_RX_BASE_DESC_QW1_ERROR_M, \
VIRTCHNL2_RX_BASE_DESC_ERROR_RXE_M)
if (unlikely(idpf_rx_singleq_test_staterr(rx_desc,
IDPF_RXD_ERR_S))) {
- dev_kfree_skb_any(skb);
- skb = NULL;
+ libeth_xdp_return_buff_slow(xdp);
continue;
}
- /* pad skb if needed (to make valid ethernet frame) */
- if (eth_skb_pad(skb)) {
- skb = NULL;
- continue;
- }
-
- /* probably a little skewed due to removing CRC */
- total_rx_bytes += skb->len;
-
- /* protocol */
- idpf_rx_singleq_process_skb_fields(rx_q, skb, rx_desc,
- fields.ptype);
-
- /* send completed skb up the stack */
- napi_gro_receive(rx_q->pp->p.napi, skb);
- skb = NULL;
-
- /* update budget accounting */
- total_rx_pkts++;
+ idpf_xdp_run_pass(xdp, rx_q->pp->p.napi, &rs, rx_desc);
}
- rx_q->skb = skb;
-
rx_q->next_to_clean = ntc;
+ libeth_xdp_save_buff(&rx_q->xdp, xdp);
page_pool_nid_changed(rx_q->pp, numa_mem_id());
if (cleaned_count)
- failure = idpf_rx_singleq_buf_hw_alloc_all(rx_q, cleaned_count);
+ idpf_rx_singleq_buf_hw_alloc_all(rx_q, cleaned_count);
u64_stats_update_begin(&rx_q->stats_sync);
- u64_stats_add(&rx_q->q_stats.packets, total_rx_pkts);
- u64_stats_add(&rx_q->q_stats.bytes, total_rx_bytes);
+ u64_stats_add(&rx_q->q_stats.packets, rs.packets);
+ u64_stats_add(&rx_q->q_stats.bytes, rs.bytes);
u64_stats_update_end(&rx_q->stats_sync);
- /* guarantee a trip back through this routine if there was a failure */
- return failure ? budget : (int)total_rx_pkts;
+ return rs.packets;
}
/**
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index 66a1b040639d..828f7c444d30 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -1,52 +1,36 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2023 Intel Corporation */
-#include <net/libeth/rx.h>
-#include <net/libeth/tx.h>
-
#include "idpf.h"
#include "idpf_ptp.h"
#include "idpf_virtchnl.h"
+#include "xdp.h"
+#include "xsk.h"
-struct idpf_tx_stash {
- struct hlist_node hlist;
- struct libeth_sqe buf;
-};
-
-#define idpf_tx_buf_compl_tag(buf) (*(u32 *)&(buf)->priv)
+#define idpf_tx_buf_next(buf) (*(u32 *)&(buf)->priv)
LIBETH_SQE_CHECK_PRIV(u32);
-static bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs,
- unsigned int count);
-
/**
- * idpf_buf_lifo_push - push a buffer pointer onto stack
- * @stack: pointer to stack struct
- * @buf: pointer to buf to push
+ * idpf_chk_linearize - Check if skb exceeds max descriptors per packet
+ * @skb: send buffer
+ * @max_bufs: maximum scatter gather buffers for single packet
+ * @count: number of buffers this packet needs
*
- * Returns 0 on success, negative on failure
- **/
-static int idpf_buf_lifo_push(struct idpf_buf_lifo *stack,
- struct idpf_tx_stash *buf)
+ * Make sure we don't exceed maximum scatter gather buffers for a single
+ * packet.
+ * TSO case has been handled earlier from idpf_features_check().
+ */
+static bool idpf_chk_linearize(const struct sk_buff *skb,
+ unsigned int max_bufs,
+ unsigned int count)
{
- if (unlikely(stack->top == stack->size))
- return -ENOSPC;
-
- stack->bufs[stack->top++] = buf;
-
- return 0;
-}
+ if (likely(count <= max_bufs))
+ return false;
-/**
- * idpf_buf_lifo_pop - pop a buffer pointer from stack
- * @stack: pointer to stack struct
- **/
-static struct idpf_tx_stash *idpf_buf_lifo_pop(struct idpf_buf_lifo *stack)
-{
- if (unlikely(!stack->top))
- return NULL;
+ if (skb_is_gso(skb))
+ return false;
- return stack->bufs[--stack->top];
+ return true;
}
/**
@@ -70,59 +54,42 @@ void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
}
}
-/**
- * idpf_tx_buf_rel_all - Free any empty Tx buffers
- * @txq: queue to be cleaned
- */
-static void idpf_tx_buf_rel_all(struct idpf_tx_queue *txq)
+static void idpf_tx_buf_clean(struct idpf_tx_queue *txq)
{
struct libeth_sq_napi_stats ss = { };
- struct idpf_buf_lifo *buf_stack;
- struct idpf_tx_stash *stash;
+ struct xdp_frame_bulk bq;
struct libeth_cq_pp cp = {
.dev = txq->dev,
+ .bq = &bq,
.ss = &ss,
};
- struct hlist_node *tmp;
- u32 i, tag;
- /* Buffers already cleared, nothing to do */
- if (!txq->tx_buf)
- return;
+ xdp_frame_bulk_init(&bq);
/* Free all the Tx buffer sk_buffs */
- for (i = 0; i < txq->desc_count; i++)
- libeth_tx_complete(&txq->tx_buf[i], &cp);
-
- kfree(txq->tx_buf);
- txq->tx_buf = NULL;
+ for (u32 i = 0; i < txq->buf_pool_size; i++)
+ libeth_tx_complete_any(&txq->tx_buf[i], &cp);
- if (!idpf_queue_has(FLOW_SCH_EN, txq))
- return;
+ xdp_flush_frame_bulk(&bq);
+}
- buf_stack = &txq->stash->buf_stack;
- if (!buf_stack->bufs)
+/**
+ * idpf_tx_buf_rel_all - Free any empty Tx buffers
+ * @txq: queue to be cleaned
+ */
+static void idpf_tx_buf_rel_all(struct idpf_tx_queue *txq)
+{
+ /* Buffers already cleared, nothing to do */
+ if (!txq->tx_buf)
return;
- /*
- * If a Tx timeout occurred, there are potentially still bufs in the
- * hash table, free them here.
- */
- hash_for_each_safe(txq->stash->sched_buf_hash, tag, tmp, stash,
- hlist) {
- if (!stash)
- continue;
-
- libeth_tx_complete(&stash->buf, &cp);
- hash_del(&stash->hlist);
- idpf_buf_lifo_push(buf_stack, stash);
- }
-
- for (i = 0; i < buf_stack->size; i++)
- kfree(buf_stack->bufs[i]);
+ if (idpf_queue_has(XSK, txq))
+ idpf_xsksq_clean(txq);
+ else
+ idpf_tx_buf_clean(txq);
- kfree(buf_stack->bufs);
- buf_stack->bufs = NULL;
+ kfree(txq->tx_buf);
+ txq->tx_buf = NULL;
}
/**
@@ -133,12 +100,24 @@ static void idpf_tx_buf_rel_all(struct idpf_tx_queue *txq)
*/
static void idpf_tx_desc_rel(struct idpf_tx_queue *txq)
{
+ bool xdp = idpf_queue_has(XDP, txq);
+
+ if (xdp)
+ libeth_xdpsq_deinit_timer(txq->timer);
+
idpf_tx_buf_rel_all(txq);
- netdev_tx_reset_subqueue(txq->netdev, txq->idx);
+
+ if (!xdp)
+ netdev_tx_reset_subqueue(txq->netdev, txq->idx);
+
+ idpf_xsk_clear_queue(txq, VIRTCHNL2_QUEUE_TYPE_TX);
if (!txq->desc_ring)
return;
+ if (!xdp && txq->refillq)
+ kfree(txq->refillq->ring);
+
dmam_free_coherent(txq->dev, txq->size, txq->desc_ring, txq->dma);
txq->desc_ring = NULL;
txq->next_to_use = 0;
@@ -153,12 +132,14 @@ static void idpf_tx_desc_rel(struct idpf_tx_queue *txq)
*/
static void idpf_compl_desc_rel(struct idpf_compl_queue *complq)
{
+ idpf_xsk_clear_queue(complq, VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION);
+
if (!complq->comp)
return;
dma_free_coherent(complq->netdev->dev.parent, complq->size,
- complq->comp, complq->dma);
- complq->comp = NULL;
+ complq->desc_ring, complq->dma);
+ complq->desc_ring = NULL;
complq->next_to_use = 0;
complq->next_to_clean = 0;
}
@@ -195,41 +176,18 @@ static void idpf_tx_desc_rel_all(struct idpf_vport *vport)
*/
static int idpf_tx_buf_alloc_all(struct idpf_tx_queue *tx_q)
{
- struct idpf_buf_lifo *buf_stack;
- int buf_size;
- int i;
-
/* Allocate book keeping buffers only. Buffers to be supplied to HW
* are allocated by kernel network stack and received as part of skb
*/
- buf_size = sizeof(struct idpf_tx_buf) * tx_q->desc_count;
- tx_q->tx_buf = kzalloc(buf_size, GFP_KERNEL);
+ if (idpf_queue_has(FLOW_SCH_EN, tx_q))
+ tx_q->buf_pool_size = U16_MAX;
+ else
+ tx_q->buf_pool_size = tx_q->desc_count;
+ tx_q->tx_buf = kcalloc(tx_q->buf_pool_size, sizeof(*tx_q->tx_buf),
+ GFP_KERNEL);
if (!tx_q->tx_buf)
return -ENOMEM;
- if (!idpf_queue_has(FLOW_SCH_EN, tx_q))
- return 0;
-
- buf_stack = &tx_q->stash->buf_stack;
-
- /* Initialize tx buf stack for out-of-order completions if
- * flow scheduling offload is enabled
- */
- buf_stack->bufs = kcalloc(tx_q->desc_count, sizeof(*buf_stack->bufs),
- GFP_KERNEL);
- if (!buf_stack->bufs)
- return -ENOMEM;
-
- buf_stack->size = tx_q->desc_count;
- buf_stack->top = tx_q->desc_count;
-
- for (i = 0; i < tx_q->desc_count; i++) {
- buf_stack->bufs[i] = kzalloc(sizeof(*buf_stack->bufs[i]),
- GFP_KERNEL);
- if (!buf_stack->bufs[i])
- return -ENOMEM;
- }
-
return 0;
}
@@ -244,6 +202,7 @@ static int idpf_tx_desc_alloc(const struct idpf_vport *vport,
struct idpf_tx_queue *tx_q)
{
struct device *dev = tx_q->dev;
+ struct idpf_sw_queue *refillq;
int err;
err = idpf_tx_buf_alloc_all(tx_q);
@@ -267,6 +226,33 @@ static int idpf_tx_desc_alloc(const struct idpf_vport *vport,
tx_q->next_to_clean = 0;
idpf_queue_set(GEN_CHK, tx_q);
+ idpf_xsk_setup_queue(vport, tx_q, VIRTCHNL2_QUEUE_TYPE_TX);
+
+ if (!idpf_queue_has(FLOW_SCH_EN, tx_q))
+ return 0;
+
+ refillq = tx_q->refillq;
+ refillq->desc_count = tx_q->buf_pool_size;
+ refillq->ring = kcalloc(refillq->desc_count, sizeof(u32),
+ GFP_KERNEL);
+ if (!refillq->ring) {
+ err = -ENOMEM;
+ goto err_alloc;
+ }
+
+ for (unsigned int i = 0; i < refillq->desc_count; i++)
+ refillq->ring[i] =
+ FIELD_PREP(IDPF_RFL_BI_BUFID_M, i) |
+ FIELD_PREP(IDPF_RFL_BI_GEN_M,
+ idpf_queue_has(GEN_CHK, refillq));
+
+ /* Go ahead and flip the GEN bit since this counts as filling
+ * up the ring, i.e. we already ring wrapped.
+ */
+ idpf_queue_change(GEN_CHK, refillq);
+
+ tx_q->last_re = tx_q->desc_count - IDPF_TX_SPLITQ_RE_MIN_GAP;
+
return 0;
err_alloc:
@@ -285,18 +271,25 @@ err_alloc:
static int idpf_compl_desc_alloc(const struct idpf_vport *vport,
struct idpf_compl_queue *complq)
{
- complq->size = array_size(complq->desc_count, sizeof(*complq->comp));
+ u32 desc_size;
- complq->comp = dma_alloc_coherent(complq->netdev->dev.parent,
- complq->size, &complq->dma,
- GFP_KERNEL);
- if (!complq->comp)
+ desc_size = idpf_queue_has(FLOW_SCH_EN, complq) ?
+ sizeof(*complq->comp) : sizeof(*complq->comp_4b);
+ complq->size = array_size(complq->desc_count, desc_size);
+
+ complq->desc_ring = dma_alloc_coherent(complq->netdev->dev.parent,
+ complq->size, &complq->dma,
+ GFP_KERNEL);
+ if (!complq->desc_ring)
return -ENOMEM;
complq->next_to_use = 0;
complq->next_to_clean = 0;
idpf_queue_set(GEN_CHK, complq);
+ idpf_xsk_setup_queue(vport, complq,
+ VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION);
+
return 0;
}
@@ -317,8 +310,6 @@ static int idpf_tx_desc_alloc_all(struct idpf_vport *vport)
for (i = 0; i < vport->num_txq_grp; i++) {
for (j = 0; j < vport->txq_grps[i].num_txq; j++) {
struct idpf_tx_queue *txq = vport->txq_grps[i].txqs[j];
- u8 gen_bits = 0;
- u16 bufidx_mask;
err = idpf_tx_desc_alloc(vport, txq);
if (err) {
@@ -327,34 +318,6 @@ static int idpf_tx_desc_alloc_all(struct idpf_vport *vport)
i);
goto err_out;
}
-
- if (!idpf_is_queue_model_split(vport->txq_model))
- continue;
-
- txq->compl_tag_cur_gen = 0;
-
- /* Determine the number of bits in the bufid
- * mask and add one to get the start of the
- * generation bits
- */
- bufidx_mask = txq->desc_count - 1;
- while (bufidx_mask >> 1) {
- txq->compl_tag_gen_s++;
- bufidx_mask = bufidx_mask >> 1;
- }
- txq->compl_tag_gen_s++;
-
- gen_bits = IDPF_TX_SPLITQ_COMPL_TAG_WIDTH -
- txq->compl_tag_gen_s;
- txq->compl_tag_gen_max = GETMAXVAL(gen_bits);
-
- /* Set bufid mask based on location of first
- * gen bit; it cannot simply be the descriptor
- * ring size-1 since we can have size values
- * where not all of those bits are set.
- */
- txq->compl_tag_bufid_m =
- GETMAXVAL(txq->compl_tag_gen_s);
}
if (!idpf_is_queue_model_split(vport->txq_model))
@@ -426,6 +389,11 @@ static void idpf_rx_buf_rel_bufq(struct idpf_buf_queue *bufq)
if (!bufq->buf)
return;
+ if (idpf_queue_has(XSK, bufq)) {
+ idpf_xskfq_rel(bufq);
+ return;
+ }
+
/* Free all the bufs allocated and given to hw on Rx queue */
for (u32 i = 0; i < bufq->desc_count; i++)
idpf_rx_page_rel(&bufq->buf[i]);
@@ -474,14 +442,14 @@ static void idpf_rx_desc_rel(struct idpf_rx_queue *rxq, struct device *dev,
if (!rxq)
return;
- if (rxq->skb) {
- dev_kfree_skb_any(rxq->skb);
- rxq->skb = NULL;
- }
+ if (!idpf_queue_has(XSK, rxq))
+ libeth_xdp_return_stash(&rxq->xdp);
if (!idpf_is_queue_model_split(model))
idpf_rx_buf_rel_all(rxq);
+ idpf_xsk_clear_queue(rxq, VIRTCHNL2_QUEUE_TYPE_RX);
+
rxq->next_to_alloc = 0;
rxq->next_to_clean = 0;
rxq->next_to_use = 0;
@@ -504,6 +472,7 @@ static void idpf_rx_desc_rel_bufq(struct idpf_buf_queue *bufq,
return;
idpf_rx_buf_rel_bufq(bufq);
+ idpf_xsk_clear_queue(bufq, VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
bufq->next_to_alloc = 0;
bufq->next_to_clean = 0;
@@ -586,6 +555,7 @@ static int idpf_rx_hdr_buf_alloc_all(struct idpf_buf_queue *bufq)
struct libeth_fq fq = {
.count = bufq->desc_count,
.type = LIBETH_FQE_HDR,
+ .xdp = idpf_xdp_enabled(bufq->q_vector->vport),
.nid = idpf_q_vector_to_mem(bufq->q_vector),
};
int ret;
@@ -603,18 +573,18 @@ static int idpf_rx_hdr_buf_alloc_all(struct idpf_buf_queue *bufq)
}
/**
- * idpf_rx_post_buf_refill - Post buffer id to refill queue
+ * idpf_post_buf_refill - Post buffer id to refill queue
* @refillq: refill queue to post to
* @buf_id: buffer id to post
*/
-static void idpf_rx_post_buf_refill(struct idpf_sw_queue *refillq, u16 buf_id)
+static void idpf_post_buf_refill(struct idpf_sw_queue *refillq, u16 buf_id)
{
u32 nta = refillq->next_to_use;
/* store the buffer ID and the SW maintained GEN bit to the refillq */
refillq->ring[nta] =
- FIELD_PREP(IDPF_RX_BI_BUFID_M, buf_id) |
- FIELD_PREP(IDPF_RX_BI_GEN_M,
+ FIELD_PREP(IDPF_RFL_BI_BUFID_M, buf_id) |
+ FIELD_PREP(IDPF_RFL_BI_GEN_M,
idpf_queue_has(GEN_CHK, refillq));
if (unlikely(++nta == refillq->desc_count)) {
@@ -785,10 +755,14 @@ static int idpf_rx_bufs_init(struct idpf_buf_queue *bufq,
.count = bufq->desc_count,
.type = type,
.hsplit = idpf_queue_has(HSPLIT_EN, bufq),
+ .xdp = idpf_xdp_enabled(bufq->q_vector->vport),
.nid = idpf_q_vector_to_mem(bufq->q_vector),
};
int ret;
+ if (idpf_queue_has(XSK, bufq))
+ return idpf_xskfq_init(bufq);
+
ret = libeth_rx_fq_create(&fq, &bufq->q_vector->napi);
if (ret)
return ret;
@@ -812,6 +786,8 @@ int idpf_rx_bufs_init_all(struct idpf_vport *vport)
bool split = idpf_is_queue_model_split(vport->rxq_model);
int i, j, err;
+ idpf_xdp_copy_prog_to_rqs(vport, vport->xdp_prog);
+
for (i = 0; i < vport->num_rxq_grp; i++) {
struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
u32 truesize = 0;
@@ -882,6 +858,8 @@ static int idpf_rx_desc_alloc(const struct idpf_vport *vport,
rxq->next_to_use = 0;
idpf_queue_set(GEN_CHK, rxq);
+ idpf_xsk_setup_queue(vport, rxq, VIRTCHNL2_QUEUE_TYPE_RX);
+
return 0;
}
@@ -907,9 +885,10 @@ static int idpf_bufq_desc_alloc(const struct idpf_vport *vport,
bufq->next_to_alloc = 0;
bufq->next_to_clean = 0;
bufq->next_to_use = 0;
-
idpf_queue_set(GEN_CHK, bufq);
+ idpf_xsk_setup_queue(vport, bufq, VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
+
return 0;
}
@@ -975,6 +954,341 @@ err_out:
return err;
}
+static int idpf_init_queue_set(const struct idpf_queue_set *qs)
+{
+ const struct idpf_vport *vport = qs->vport;
+ bool splitq;
+ int err;
+
+ splitq = idpf_is_queue_model_split(vport->rxq_model);
+
+ for (u32 i = 0; i < qs->num; i++) {
+ const struct idpf_queue_ptr *q = &qs->qs[i];
+ struct idpf_buf_queue *bufq;
+
+ switch (q->type) {
+ case VIRTCHNL2_QUEUE_TYPE_RX:
+ err = idpf_rx_desc_alloc(vport, q->rxq);
+ if (err)
+ break;
+
+ err = idpf_xdp_rxq_info_init(q->rxq);
+ if (err)
+ break;
+
+ if (!splitq)
+ err = idpf_rx_bufs_init_singleq(q->rxq);
+
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
+ bufq = q->bufq;
+
+ err = idpf_bufq_desc_alloc(vport, bufq);
+ if (err)
+ break;
+
+ for (u32 j = 0; j < bufq->q_vector->num_bufq; j++) {
+ struct idpf_buf_queue * const *bufqs;
+ enum libeth_fqe_type type;
+ u32 ts;
+
+ bufqs = bufq->q_vector->bufq;
+ if (bufqs[j] != bufq)
+ continue;
+
+ if (j) {
+ type = LIBETH_FQE_SHORT;
+ ts = bufqs[j - 1]->truesize >> 1;
+ } else {
+ type = LIBETH_FQE_MTU;
+ ts = 0;
+ }
+
+ bufq->truesize = ts;
+
+ err = idpf_rx_bufs_init(bufq, type);
+ break;
+ }
+
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_TX:
+ err = idpf_tx_desc_alloc(vport, q->txq);
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
+ err = idpf_compl_desc_alloc(vport, q->complq);
+ break;
+ default:
+ continue;
+ }
+
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void idpf_clean_queue_set(const struct idpf_queue_set *qs)
+{
+ const struct idpf_vport *vport = qs->vport;
+ struct device *dev = vport->netdev->dev.parent;
+
+ for (u32 i = 0; i < qs->num; i++) {
+ const struct idpf_queue_ptr *q = &qs->qs[i];
+
+ switch (q->type) {
+ case VIRTCHNL2_QUEUE_TYPE_RX:
+ idpf_xdp_rxq_info_deinit(q->rxq, vport->rxq_model);
+ idpf_rx_desc_rel(q->rxq, dev, vport->rxq_model);
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
+ idpf_rx_desc_rel_bufq(q->bufq, dev);
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_TX:
+ idpf_tx_desc_rel(q->txq);
+
+ if (idpf_queue_has(XDP, q->txq)) {
+ q->txq->pending = 0;
+ q->txq->xdp_tx = 0;
+ } else {
+ q->txq->txq_grp->num_completions_pending = 0;
+ }
+
+ writel(q->txq->next_to_use, q->txq->tail);
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
+ idpf_compl_desc_rel(q->complq);
+ q->complq->num_completions = 0;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static void idpf_qvec_ena_irq(struct idpf_q_vector *qv)
+{
+ if (qv->num_txq) {
+ u32 itr;
+
+ if (IDPF_ITR_IS_DYNAMIC(qv->tx_intr_mode))
+ itr = qv->vport->tx_itr_profile[qv->tx_dim.profile_ix];
+ else
+ itr = qv->tx_itr_value;
+
+ idpf_vport_intr_write_itr(qv, itr, true);
+ }
+
+ if (qv->num_rxq) {
+ u32 itr;
+
+ if (IDPF_ITR_IS_DYNAMIC(qv->rx_intr_mode))
+ itr = qv->vport->rx_itr_profile[qv->rx_dim.profile_ix];
+ else
+ itr = qv->rx_itr_value;
+
+ idpf_vport_intr_write_itr(qv, itr, false);
+ }
+
+ if (qv->num_txq || qv->num_rxq)
+ idpf_vport_intr_update_itr_ena_irq(qv);
+}
+
+/**
+ * idpf_vector_to_queue_set - create a queue set associated with the given
+ * queue vector
+ * @qv: queue vector corresponding to the queue pair
+ *
+ * Returns a pointer to a dynamically allocated array of pointers to all
+ * queues associated with a given queue vector (@qv).
+ * Please note that the caller is responsible to free the memory allocated
+ * by this function using kfree().
+ *
+ * Return: &idpf_queue_set on success, %NULL in case of error.
+ */
+static struct idpf_queue_set *
+idpf_vector_to_queue_set(struct idpf_q_vector *qv)
+{
+ bool xdp = qv->vport->xdp_txq_offset && !qv->num_xsksq;
+ struct idpf_vport *vport = qv->vport;
+ struct idpf_queue_set *qs;
+ u32 num;
+
+ num = qv->num_rxq + qv->num_bufq + qv->num_txq + qv->num_complq;
+ num += xdp ? qv->num_rxq * 2 : qv->num_xsksq * 2;
+ if (!num)
+ return NULL;
+
+ qs = idpf_alloc_queue_set(vport, num);
+ if (!qs)
+ return NULL;
+
+ num = 0;
+
+ for (u32 i = 0; i < qv->num_bufq; i++) {
+ qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
+ qs->qs[num++].bufq = qv->bufq[i];
+ }
+
+ for (u32 i = 0; i < qv->num_rxq; i++) {
+ qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_RX;
+ qs->qs[num++].rxq = qv->rx[i];
+ }
+
+ for (u32 i = 0; i < qv->num_txq; i++) {
+ qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_TX;
+ qs->qs[num++].txq = qv->tx[i];
+ }
+
+ for (u32 i = 0; i < qv->num_complq; i++) {
+ qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+ qs->qs[num++].complq = qv->complq[i];
+ }
+
+ if (!vport->xdp_txq_offset)
+ goto finalize;
+
+ if (xdp) {
+ for (u32 i = 0; i < qv->num_rxq; i++) {
+ u32 idx = vport->xdp_txq_offset + qv->rx[i]->idx;
+
+ qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_TX;
+ qs->qs[num++].txq = vport->txqs[idx];
+
+ qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+ qs->qs[num++].complq = vport->txqs[idx]->complq;
+ }
+ } else {
+ for (u32 i = 0; i < qv->num_xsksq; i++) {
+ qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_TX;
+ qs->qs[num++].txq = qv->xsksq[i];
+
+ qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+ qs->qs[num++].complq = qv->xsksq[i]->complq;
+ }
+ }
+
+finalize:
+ if (num != qs->num) {
+ kfree(qs);
+ return NULL;
+ }
+
+ return qs;
+}
+
+static int idpf_qp_enable(const struct idpf_queue_set *qs, u32 qid)
+{
+ struct idpf_vport *vport = qs->vport;
+ struct idpf_q_vector *q_vector;
+ int err;
+
+ q_vector = idpf_find_rxq_vec(vport, qid);
+
+ err = idpf_init_queue_set(qs);
+ if (err) {
+ netdev_err(vport->netdev, "Could not initialize queues in pair %u: %pe\n",
+ qid, ERR_PTR(err));
+ return err;
+ }
+
+ if (!vport->xdp_txq_offset)
+ goto config;
+
+ q_vector->xsksq = kcalloc(DIV_ROUND_UP(vport->num_rxq_grp,
+ vport->num_q_vectors),
+ sizeof(*q_vector->xsksq), GFP_KERNEL);
+ if (!q_vector->xsksq)
+ return -ENOMEM;
+
+ for (u32 i = 0; i < qs->num; i++) {
+ const struct idpf_queue_ptr *q = &qs->qs[i];
+
+ if (q->type != VIRTCHNL2_QUEUE_TYPE_TX)
+ continue;
+
+ if (!idpf_queue_has(XSK, q->txq))
+ continue;
+
+ idpf_xsk_init_wakeup(q_vector);
+
+ q->txq->q_vector = q_vector;
+ q_vector->xsksq[q_vector->num_xsksq++] = q->txq;
+ }
+
+config:
+ err = idpf_send_config_queue_set_msg(qs);
+ if (err) {
+ netdev_err(vport->netdev, "Could not configure queues in pair %u: %pe\n",
+ qid, ERR_PTR(err));
+ return err;
+ }
+
+ err = idpf_send_enable_queue_set_msg(qs);
+ if (err) {
+ netdev_err(vport->netdev, "Could not enable queues in pair %u: %pe\n",
+ qid, ERR_PTR(err));
+ return err;
+ }
+
+ napi_enable(&q_vector->napi);
+ idpf_qvec_ena_irq(q_vector);
+
+ netif_start_subqueue(vport->netdev, qid);
+
+ return 0;
+}
+
+static int idpf_qp_disable(const struct idpf_queue_set *qs, u32 qid)
+{
+ struct idpf_vport *vport = qs->vport;
+ struct idpf_q_vector *q_vector;
+ int err;
+
+ q_vector = idpf_find_rxq_vec(vport, qid);
+ netif_stop_subqueue(vport->netdev, qid);
+
+ writel(0, q_vector->intr_reg.dyn_ctl);
+ napi_disable(&q_vector->napi);
+
+ err = idpf_send_disable_queue_set_msg(qs);
+ if (err) {
+ netdev_err(vport->netdev, "Could not disable queues in pair %u: %pe\n",
+ qid, ERR_PTR(err));
+ return err;
+ }
+
+ idpf_clean_queue_set(qs);
+
+ kfree(q_vector->xsksq);
+ q_vector->num_xsksq = 0;
+
+ return 0;
+}
+
+/**
+ * idpf_qp_switch - enable or disable queues associated with queue pair
+ * @vport: vport to switch the pair for
+ * @qid: index of the queue pair to switch
+ * @en: whether to enable or disable the pair
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+int idpf_qp_switch(struct idpf_vport *vport, u32 qid, bool en)
+{
+ struct idpf_q_vector *q_vector = idpf_find_rxq_vec(vport, qid);
+ struct idpf_queue_set *qs __free(kfree) = NULL;
+
+ if (idpf_find_txq_vec(vport, qid) != q_vector)
+ return -EINVAL;
+
+ qs = idpf_vector_to_queue_set(q_vector);
+ if (!qs)
+ return -ENOMEM;
+
+ return en ? idpf_qp_enable(qs, qid) : idpf_qp_disable(qs, qid);
+}
+
/**
* idpf_txq_group_rel - Release all resources for txq groups
* @vport: vport to release txq groups on
@@ -995,6 +1309,11 @@ static void idpf_txq_group_rel(struct idpf_vport *vport)
struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
for (j = 0; j < txq_grp->num_txq; j++) {
+ if (flow_sch_en) {
+ kfree(txq_grp->txqs[j]->refillq);
+ txq_grp->txqs[j]->refillq = NULL;
+ }
+
kfree(txq_grp->txqs[j]);
txq_grp->txqs[j] = NULL;
}
@@ -1004,9 +1323,6 @@ static void idpf_txq_group_rel(struct idpf_vport *vport)
kfree(txq_grp->complq);
txq_grp->complq = NULL;
-
- if (flow_sch_en)
- kfree(txq_grp->stashes);
}
kfree(vport->txq_grps);
vport->txq_grps = NULL;
@@ -1088,8 +1404,12 @@ static void idpf_vport_queue_grp_rel_all(struct idpf_vport *vport)
*/
void idpf_vport_queues_rel(struct idpf_vport *vport)
{
+ idpf_xdp_copy_prog_to_rqs(vport, NULL);
+
idpf_tx_desc_rel_all(vport);
idpf_rx_desc_rel_all(vport);
+
+ idpf_xdpsqs_put(vport);
idpf_vport_queue_grp_rel_all(vport);
kfree(vport->txqs);
@@ -1163,6 +1483,18 @@ void idpf_vport_init_num_qs(struct idpf_vport *vport,
if (idpf_is_queue_model_split(vport->rxq_model))
vport->num_bufq = le16_to_cpu(vport_msg->num_rx_bufq);
+ vport->xdp_prog = config_data->xdp_prog;
+ if (idpf_xdp_enabled(vport)) {
+ vport->xdp_txq_offset = config_data->num_req_tx_qs;
+ vport->num_xdp_txq = le16_to_cpu(vport_msg->num_tx_q) -
+ vport->xdp_txq_offset;
+ vport->xdpsq_share = libeth_xdpsq_shared(vport->num_xdp_txq);
+ } else {
+ vport->xdp_txq_offset = 0;
+ vport->num_xdp_txq = 0;
+ vport->xdpsq_share = false;
+ }
+
/* Adjust number of buffer queues per Rx queue group. */
if (!idpf_is_queue_model_split(vport->rxq_model)) {
vport->num_bufqs_per_qgrp = 0;
@@ -1234,22 +1566,17 @@ int idpf_vport_calc_total_qs(struct idpf_adapter *adapter, u16 vport_idx,
int dflt_splitq_txq_grps = 0, dflt_singleq_txqs = 0;
int dflt_splitq_rxq_grps = 0, dflt_singleq_rxqs = 0;
u16 num_req_tx_qs = 0, num_req_rx_qs = 0;
+ struct idpf_vport_user_config_data *user;
struct idpf_vport_config *vport_config;
u16 num_txq_grps, num_rxq_grps;
- u32 num_qs;
+ u32 num_qs, num_xdpsq;
vport_config = adapter->vport_config[vport_idx];
if (vport_config) {
num_req_tx_qs = vport_config->user_config.num_req_tx_qs;
num_req_rx_qs = vport_config->user_config.num_req_rx_qs;
} else {
- int num_cpus;
-
- /* Restrict num of queues to cpus online as a default
- * configuration to give best performance. User can always
- * override to a max number of queues via ethtool.
- */
- num_cpus = num_online_cpus();
+ u32 num_cpus = netif_get_num_default_rss_queues();
dflt_splitq_txq_grps = min_t(int, max_q->max_txq, num_cpus);
dflt_singleq_txqs = min_t(int, max_q->max_txq, num_cpus);
@@ -1284,6 +1611,24 @@ int idpf_vport_calc_total_qs(struct idpf_adapter *adapter, u16 vport_idx,
vport_msg->num_rx_bufq = 0;
}
+ if (!vport_config)
+ return 0;
+
+ user = &vport_config->user_config;
+ user->num_req_rx_qs = le16_to_cpu(vport_msg->num_rx_q);
+ user->num_req_tx_qs = le16_to_cpu(vport_msg->num_tx_q);
+
+ if (vport_config->user_config.xdp_prog)
+ num_xdpsq = libeth_xdpsq_num(user->num_req_rx_qs,
+ user->num_req_tx_qs,
+ vport_config->max_q.max_txq);
+ else
+ num_xdpsq = 0;
+
+ vport_msg->num_tx_q = cpu_to_le16(user->num_req_tx_qs + num_xdpsq);
+ if (idpf_is_queue_model_split(le16_to_cpu(vport_msg->txq_model)))
+ vport_msg->num_tx_complq = vport_msg->num_tx_q;
+
return 0;
}
@@ -1333,14 +1678,13 @@ static void idpf_vport_calc_numq_per_grp(struct idpf_vport *vport,
static void idpf_rxq_set_descids(const struct idpf_vport *vport,
struct idpf_rx_queue *q)
{
- if (idpf_is_queue_model_split(vport->rxq_model)) {
- q->rxdids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
- } else {
- if (vport->base_rxd)
- q->rxdids = VIRTCHNL2_RXDID_1_32B_BASE_M;
- else
- q->rxdids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
- }
+ if (idpf_is_queue_model_split(vport->rxq_model))
+ return;
+
+ if (vport->base_rxd)
+ q->rxdids = VIRTCHNL2_RXDID_1_32B_BASE_M;
+ else
+ q->rxdids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
}
/**
@@ -1367,7 +1711,6 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
for (i = 0; i < vport->num_txq_grp; i++) {
struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
struct idpf_adapter *adapter = vport->adapter;
- struct idpf_txq_stash *stashes;
int j;
tx_qgrp->vport = vport;
@@ -1380,15 +1723,6 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
goto err_alloc;
}
- if (split && flow_sch_en) {
- stashes = kcalloc(num_txq, sizeof(*stashes),
- GFP_KERNEL);
- if (!stashes)
- goto err_alloc;
-
- tx_qgrp->stashes = stashes;
- }
-
for (j = 0; j < tx_qgrp->num_txq; j++) {
struct idpf_tx_queue *q = tx_qgrp->txqs[j];
@@ -1398,6 +1732,7 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
q->tx_min_pkt_len = idpf_get_min_tx_pkt_len(adapter);
q->netdev = vport->netdev;
q->txq_grp = tx_qgrp;
+ q->rel_q_id = j;
if (!split) {
q->clean_budget = vport->compln_clean_budget;
@@ -1408,12 +1743,14 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
if (!flow_sch_en)
continue;
- if (split) {
- q->stash = &stashes[j];
- hash_init(q->stash->sched_buf_hash);
- }
-
idpf_queue_set(FLOW_SCH_EN, q);
+
+ q->refillq = kzalloc(sizeof(*q->refillq), GFP_KERNEL);
+ if (!q->refillq)
+ goto err_alloc;
+
+ idpf_queue_set(GEN_CHK, q->refillq);
+ idpf_queue_set(RFL_GEN_CHK, q->refillq);
}
if (!split)
@@ -1556,7 +1893,6 @@ skip_splitq_rx_init:
setup_rxq:
q->desc_count = vport->rxq_desc_count;
q->rx_ptype_lkup = vport->rx_ptype_lkup;
- q->netdev = vport->netdev;
q->bufq_sets = rx_qgrp->splitq.bufq_sets;
q->idx = (i * num_rxq) + j;
q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK;
@@ -1617,15 +1953,19 @@ int idpf_vport_queues_alloc(struct idpf_vport *vport)
if (err)
goto err_out;
- err = idpf_tx_desc_alloc_all(vport);
+ err = idpf_vport_init_fast_path_txqs(vport);
if (err)
goto err_out;
- err = idpf_rx_desc_alloc_all(vport);
+ err = idpf_xdpsqs_get(vport);
if (err)
goto err_out;
- err = idpf_vport_init_fast_path_txqs(vport);
+ err = idpf_tx_desc_alloc_all(vport);
+ if (err)
+ goto err_out;
+
+ err = idpf_rx_desc_alloc_all(vport);
if (err)
goto err_out;
@@ -1638,32 +1978,6 @@ err_out:
}
/**
- * idpf_tx_handle_sw_marker - Handle queue marker packet
- * @tx_q: tx queue to handle software marker
- */
-static void idpf_tx_handle_sw_marker(struct idpf_tx_queue *tx_q)
-{
- struct idpf_netdev_priv *priv = netdev_priv(tx_q->netdev);
- struct idpf_vport *vport = priv->vport;
- int i;
-
- idpf_queue_clear(SW_MARKER, tx_q);
- /* Hardware must write marker packets to all queues associated with
- * completion queues. So check if all queues received marker packets
- */
- for (i = 0; i < vport->num_txq; i++)
- /* If we're still waiting on any other TXQ marker completions,
- * just return now since we cannot wake up the marker_wq yet.
- */
- if (idpf_queue_has(SW_MARKER, vport->txqs[i]))
- return;
-
- /* Drain complete */
- set_bit(IDPF_VPORT_SW_MARKER, vport->flags);
- wake_up(&vport->sw_marker_wq);
-}
-
-/**
* idpf_tx_read_tstamp - schedule a work to read Tx timestamp value
* @txq: queue to read the timestamp from
* @skb: socket buffer to provide Tx timestamp value
@@ -1697,87 +2011,6 @@ static void idpf_tx_read_tstamp(struct idpf_tx_queue *txq, struct sk_buff *skb)
spin_unlock_bh(&tx_tstamp_caps->status_lock);
}
-/**
- * idpf_tx_clean_stashed_bufs - clean bufs that were stored for
- * out of order completions
- * @txq: queue to clean
- * @compl_tag: completion tag of packet to clean (from completion descriptor)
- * @cleaned: pointer to stats struct to track cleaned packets/bytes
- * @budget: Used to determine if we are in netpoll
- */
-static void idpf_tx_clean_stashed_bufs(struct idpf_tx_queue *txq,
- u16 compl_tag,
- struct libeth_sq_napi_stats *cleaned,
- int budget)
-{
- struct idpf_tx_stash *stash;
- struct hlist_node *tmp_buf;
- struct libeth_cq_pp cp = {
- .dev = txq->dev,
- .ss = cleaned,
- .napi = budget,
- };
-
- /* Buffer completion */
- hash_for_each_possible_safe(txq->stash->sched_buf_hash, stash, tmp_buf,
- hlist, compl_tag) {
- if (unlikely(idpf_tx_buf_compl_tag(&stash->buf) != compl_tag))
- continue;
-
- hash_del(&stash->hlist);
-
- if (stash->buf.type == LIBETH_SQE_SKB &&
- (skb_shinfo(stash->buf.skb)->tx_flags & SKBTX_IN_PROGRESS))
- idpf_tx_read_tstamp(txq, stash->buf.skb);
-
- libeth_tx_complete(&stash->buf, &cp);
-
- /* Push shadow buf back onto stack */
- idpf_buf_lifo_push(&txq->stash->buf_stack, stash);
- }
-}
-
-/**
- * idpf_stash_flow_sch_buffers - store buffer parameters info to be freed at a
- * later time (only relevant for flow scheduling mode)
- * @txq: Tx queue to clean
- * @tx_buf: buffer to store
- */
-static int idpf_stash_flow_sch_buffers(struct idpf_tx_queue *txq,
- struct idpf_tx_buf *tx_buf)
-{
- struct idpf_tx_stash *stash;
-
- if (unlikely(tx_buf->type <= LIBETH_SQE_CTX))
- return 0;
-
- stash = idpf_buf_lifo_pop(&txq->stash->buf_stack);
- if (unlikely(!stash)) {
- net_err_ratelimited("%s: No out-of-order TX buffers left!\n",
- netdev_name(txq->netdev));
-
- return -ENOMEM;
- }
-
- /* Store buffer params in shadow buffer */
- stash->buf.skb = tx_buf->skb;
- stash->buf.bytes = tx_buf->bytes;
- stash->buf.packets = tx_buf->packets;
- stash->buf.type = tx_buf->type;
- stash->buf.nr_frags = tx_buf->nr_frags;
- dma_unmap_addr_set(&stash->buf, dma, dma_unmap_addr(tx_buf, dma));
- dma_unmap_len_set(&stash->buf, len, dma_unmap_len(tx_buf, len));
- idpf_tx_buf_compl_tag(&stash->buf) = idpf_tx_buf_compl_tag(tx_buf);
-
- /* Add buffer to buf_hash table to be freed later */
- hash_add(txq->stash->sched_buf_hash, &stash->hlist,
- idpf_tx_buf_compl_tag(&stash->buf));
-
- tx_buf->type = LIBETH_SQE_EMPTY;
-
- return 0;
-}
-
#define idpf_tx_splitq_clean_bump_ntc(txq, ntc, desc, buf) \
do { \
if (unlikely(++(ntc) == (txq)->desc_count)) { \
@@ -1805,14 +2038,8 @@ do { \
* Separate packet completion events will be reported on the completion queue,
* and the buffers will be cleaned separately. The stats are not updated from
* this function when using flow-based scheduling.
- *
- * Furthermore, in flow scheduling mode, check to make sure there are enough
- * reserve buffers to stash the packet. If there are not, return early, which
- * will leave next_to_clean pointing to the packet that failed to be stashed.
- *
- * Return: false in the scenario above, true otherwise.
*/
-static bool idpf_tx_splitq_clean(struct idpf_tx_queue *tx_q, u16 end,
+static void idpf_tx_splitq_clean(struct idpf_tx_queue *tx_q, u16 end,
int napi_budget,
struct libeth_sq_napi_stats *cleaned,
bool descs_only)
@@ -1826,7 +2053,12 @@ static bool idpf_tx_splitq_clean(struct idpf_tx_queue *tx_q, u16 end,
.napi = napi_budget,
};
struct idpf_tx_buf *tx_buf;
- bool clean_complete = true;
+
+ if (descs_only) {
+ /* Bump ring index to mark as cleaned. */
+ tx_q->next_to_clean = end;
+ return;
+ }
tx_desc = &tx_q->flex_tx[ntc];
next_pending_desc = &tx_q->flex_tx[end];
@@ -1846,136 +2078,61 @@ static bool idpf_tx_splitq_clean(struct idpf_tx_queue *tx_q, u16 end,
break;
eop_idx = tx_buf->rs_idx;
+ libeth_tx_complete(tx_buf, &cp);
- if (descs_only) {
- if (IDPF_TX_BUF_RSV_UNUSED(tx_q) < tx_buf->nr_frags) {
- clean_complete = false;
- goto tx_splitq_clean_out;
- }
-
- idpf_stash_flow_sch_buffers(tx_q, tx_buf);
+ /* unmap remaining buffers */
+ while (ntc != eop_idx) {
+ idpf_tx_splitq_clean_bump_ntc(tx_q, ntc,
+ tx_desc, tx_buf);
- while (ntc != eop_idx) {
- idpf_tx_splitq_clean_bump_ntc(tx_q, ntc,
- tx_desc, tx_buf);
- idpf_stash_flow_sch_buffers(tx_q, tx_buf);
- }
- } else {
+ /* unmap any remaining paged data */
libeth_tx_complete(tx_buf, &cp);
-
- /* unmap remaining buffers */
- while (ntc != eop_idx) {
- idpf_tx_splitq_clean_bump_ntc(tx_q, ntc,
- tx_desc, tx_buf);
-
- /* unmap any remaining paged data */
- libeth_tx_complete(tx_buf, &cp);
- }
}
fetch_next_txq_desc:
idpf_tx_splitq_clean_bump_ntc(tx_q, ntc, tx_desc, tx_buf);
}
-tx_splitq_clean_out:
tx_q->next_to_clean = ntc;
-
- return clean_complete;
}
-#define idpf_tx_clean_buf_ring_bump_ntc(txq, ntc, buf) \
-do { \
- (buf)++; \
- (ntc)++; \
- if (unlikely((ntc) == (txq)->desc_count)) { \
- buf = (txq)->tx_buf; \
- ntc = 0; \
- } \
-} while (0)
-
/**
- * idpf_tx_clean_buf_ring - clean flow scheduling TX queue buffers
+ * idpf_tx_clean_bufs - clean flow scheduling TX queue buffers
* @txq: queue to clean
- * @compl_tag: completion tag of packet to clean (from completion descriptor)
+ * @buf_id: packet's starting buffer ID, from completion descriptor
* @cleaned: pointer to stats struct to track cleaned packets/bytes
* @budget: Used to determine if we are in netpoll
*
- * Cleans all buffers associated with the input completion tag either from the
- * TX buffer ring or from the hash table if the buffers were previously
- * stashed. Returns the byte/segment count for the cleaned packet associated
- * this completion tag.
+ * Clean all buffers associated with the packet starting at buf_id. Returns the
+ * byte/segment count for the cleaned packet.
*/
-static bool idpf_tx_clean_buf_ring(struct idpf_tx_queue *txq, u16 compl_tag,
- struct libeth_sq_napi_stats *cleaned,
- int budget)
+static void idpf_tx_clean_bufs(struct idpf_tx_queue *txq, u32 buf_id,
+ struct libeth_sq_napi_stats *cleaned,
+ int budget)
{
- u16 idx = compl_tag & txq->compl_tag_bufid_m;
struct idpf_tx_buf *tx_buf = NULL;
struct libeth_cq_pp cp = {
.dev = txq->dev,
.ss = cleaned,
.napi = budget,
};
- u16 ntc, orig_idx = idx;
-
- tx_buf = &txq->tx_buf[idx];
-
- if (unlikely(tx_buf->type <= LIBETH_SQE_CTX ||
- idpf_tx_buf_compl_tag(tx_buf) != compl_tag))
- return false;
+ tx_buf = &txq->tx_buf[buf_id];
if (tx_buf->type == LIBETH_SQE_SKB) {
if (skb_shinfo(tx_buf->skb)->tx_flags & SKBTX_IN_PROGRESS)
idpf_tx_read_tstamp(txq, tx_buf->skb);
libeth_tx_complete(tx_buf, &cp);
+ idpf_post_buf_refill(txq->refillq, buf_id);
}
- idpf_tx_clean_buf_ring_bump_ntc(txq, idx, tx_buf);
+ while (idpf_tx_buf_next(tx_buf) != IDPF_TXBUF_NULL) {
+ buf_id = idpf_tx_buf_next(tx_buf);
- while (idpf_tx_buf_compl_tag(tx_buf) == compl_tag) {
+ tx_buf = &txq->tx_buf[buf_id];
libeth_tx_complete(tx_buf, &cp);
- idpf_tx_clean_buf_ring_bump_ntc(txq, idx, tx_buf);
+ idpf_post_buf_refill(txq->refillq, buf_id);
}
-
- /*
- * It's possible the packet we just cleaned was an out of order
- * completion, which means we can stash the buffers starting from
- * the original next_to_clean and reuse the descriptors. We need
- * to compare the descriptor ring next_to_clean packet's "first" buffer
- * to the "first" buffer of the packet we just cleaned to determine if
- * this is the case. Howevever, next_to_clean can point to either a
- * reserved buffer that corresponds to a context descriptor used for the
- * next_to_clean packet (TSO packet) or the "first" buffer (single
- * packet). The orig_idx from the packet we just cleaned will always
- * point to the "first" buffer. If next_to_clean points to a reserved
- * buffer, let's bump ntc once and start the comparison from there.
- */
- ntc = txq->next_to_clean;
- tx_buf = &txq->tx_buf[ntc];
-
- if (tx_buf->type == LIBETH_SQE_CTX)
- idpf_tx_clean_buf_ring_bump_ntc(txq, ntc, tx_buf);
-
- /*
- * If ntc still points to a different "first" buffer, clean the
- * descriptor ring and stash all of the buffers for later cleaning. If
- * we cannot stash all of the buffers, next_to_clean will point to the
- * "first" buffer of the packet that could not be stashed and cleaning
- * will start there next time.
- */
- if (unlikely(tx_buf != &txq->tx_buf[orig_idx] &&
- !idpf_tx_splitq_clean(txq, orig_idx, budget, cleaned,
- true)))
- return true;
-
- /*
- * Otherwise, update next_to_clean to reflect the cleaning that was
- * done above.
- */
- txq->next_to_clean = idx;
-
- return true;
}
/**
@@ -1994,22 +2151,17 @@ static void idpf_tx_handle_rs_completion(struct idpf_tx_queue *txq,
struct libeth_sq_napi_stats *cleaned,
int budget)
{
- u16 compl_tag;
+ /* RS completion contains queue head for queue based scheduling or
+ * completion tag for flow based scheduling.
+ */
+ u16 rs_compl_val = le16_to_cpu(desc->common.q_head_compl_tag.q_head);
if (!idpf_queue_has(FLOW_SCH_EN, txq)) {
- u16 head = le16_to_cpu(desc->q_head_compl_tag.q_head);
-
- idpf_tx_splitq_clean(txq, head, budget, cleaned, false);
+ idpf_tx_splitq_clean(txq, rs_compl_val, budget, cleaned, false);
return;
}
- compl_tag = le16_to_cpu(desc->q_head_compl_tag.compl_tag);
-
- /* If we didn't clean anything on the ring, this packet must be
- * in the hash table. Go clean it there.
- */
- if (!idpf_tx_clean_buf_ring(txq, compl_tag, cleaned, budget))
- idpf_tx_clean_stashed_bufs(txq, compl_tag, cleaned, budget);
+ idpf_tx_clean_bufs(txq, rs_compl_val, cleaned, budget);
}
/**
@@ -2037,19 +2189,19 @@ static bool idpf_tx_clean_complq(struct idpf_compl_queue *complq, int budget,
do {
struct libeth_sq_napi_stats cleaned_stats = { };
struct idpf_tx_queue *tx_q;
+ __le16 hw_head;
int rel_tx_qid;
- u16 hw_head;
u8 ctype; /* completion type */
u16 gen;
/* if the descriptor isn't done, no work yet to do */
- gen = le16_get_bits(tx_desc->qid_comptype_gen,
+ gen = le16_get_bits(tx_desc->common.qid_comptype_gen,
IDPF_TXD_COMPLQ_GEN_M);
if (idpf_queue_has(GEN_CHK, complq) != gen)
break;
/* Find necessary info of TX queue to clean buffers */
- rel_tx_qid = le16_get_bits(tx_desc->qid_comptype_gen,
+ rel_tx_qid = le16_get_bits(tx_desc->common.qid_comptype_gen,
IDPF_TXD_COMPLQ_QID_M);
if (rel_tx_qid >= complq->txq_grp->num_txq ||
!complq->txq_grp->txqs[rel_tx_qid]) {
@@ -2059,22 +2211,19 @@ static bool idpf_tx_clean_complq(struct idpf_compl_queue *complq, int budget,
tx_q = complq->txq_grp->txqs[rel_tx_qid];
/* Determine completion type */
- ctype = le16_get_bits(tx_desc->qid_comptype_gen,
+ ctype = le16_get_bits(tx_desc->common.qid_comptype_gen,
IDPF_TXD_COMPLQ_COMPL_TYPE_M);
switch (ctype) {
case IDPF_TXD_COMPLT_RE:
- hw_head = le16_to_cpu(tx_desc->q_head_compl_tag.q_head);
+ hw_head = tx_desc->common.q_head_compl_tag.q_head;
- idpf_tx_splitq_clean(tx_q, hw_head, budget,
- &cleaned_stats, true);
+ idpf_tx_splitq_clean(tx_q, le16_to_cpu(hw_head),
+ budget, &cleaned_stats, true);
break;
case IDPF_TXD_COMPLT_RS:
idpf_tx_handle_rs_completion(tx_q, tx_desc,
&cleaned_stats, budget);
break;
- case IDPF_TXD_COMPLT_SW_MARKER:
- idpf_tx_handle_sw_marker(tx_q);
- break;
default:
netdev_err(tx_q->netdev,
"Unknown TX completion type: %d\n", ctype);
@@ -2126,8 +2275,7 @@ fetch_next_desc:
/* Update BQL */
nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
- dont_wake = !complq_ok || IDPF_TX_BUF_RSV_LOW(tx_q) ||
- np->state != __IDPF_VPORT_UP ||
+ dont_wake = !complq_ok || np->state != __IDPF_VPORT_UP ||
!netif_carrier_ok(tx_q->netdev);
/* Check if the TXQ needs to and can be restarted */
__netif_txq_completed_wake(nq, tx_q->cleaned_pkts, tx_q->cleaned_bytes,
@@ -2148,6 +2296,69 @@ fetch_next_desc:
}
/**
+ * idpf_wait_for_sw_marker_completion - wait for SW marker of disabled Tx queue
+ * @txq: disabled Tx queue
+ *
+ * When Tx queue is requested for disabling, the CP sends a special completion
+ * descriptor called "SW marker", meaning the queue is ready to be destroyed.
+ * If, for some reason, the marker is not received within 500 ms, break the
+ * polling to not hang the driver.
+ */
+void idpf_wait_for_sw_marker_completion(const struct idpf_tx_queue *txq)
+{
+ struct idpf_compl_queue *complq;
+ unsigned long timeout;
+ bool flow, gen_flag;
+ u32 ntc;
+
+ if (!idpf_queue_has(SW_MARKER, txq))
+ return;
+
+ complq = idpf_queue_has(XDP, txq) ? txq->complq : txq->txq_grp->complq;
+ ntc = complq->next_to_clean;
+
+ flow = idpf_queue_has(FLOW_SCH_EN, complq);
+ gen_flag = idpf_queue_has(GEN_CHK, complq);
+
+ timeout = jiffies + msecs_to_jiffies(IDPF_WAIT_FOR_MARKER_TIMEO);
+
+ do {
+ struct idpf_splitq_4b_tx_compl_desc *tx_desc;
+ struct idpf_tx_queue *target;
+ u32 ctype_gen, id;
+
+ tx_desc = flow ? &complq->comp[ntc].common :
+ &complq->comp_4b[ntc];
+ ctype_gen = le16_to_cpu(tx_desc->qid_comptype_gen);
+
+ if (!!(ctype_gen & IDPF_TXD_COMPLQ_GEN_M) != gen_flag) {
+ usleep_range(500, 1000);
+ continue;
+ }
+
+ if (FIELD_GET(IDPF_TXD_COMPLQ_COMPL_TYPE_M, ctype_gen) !=
+ IDPF_TXD_COMPLT_SW_MARKER)
+ goto next;
+
+ id = FIELD_GET(IDPF_TXD_COMPLQ_QID_M, ctype_gen);
+ target = complq->txq_grp->txqs[id];
+
+ idpf_queue_clear(SW_MARKER, target);
+ if (target == txq)
+ break;
+
+next:
+ if (unlikely(++ntc == complq->desc_count)) {
+ ntc = 0;
+ gen_flag = !gen_flag;
+ }
+ } while (time_before(jiffies, timeout));
+
+ idpf_queue_assign(GEN_CHK, complq, gen_flag);
+ complq->next_to_clean = ntc;
+}
+
+/**
* idpf_tx_splitq_build_ctb - populate command tag and size for queue
* based scheduling descriptors
* @desc: descriptor to populate
@@ -2184,15 +2395,21 @@ void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc,
desc->flow.qw1.compl_tag = cpu_to_le16(params->compl_tag);
}
-/* Global conditions to tell whether the txq (and related resources)
- * has room to allow the use of "size" descriptors.
+/**
+ * idpf_tx_splitq_has_room - check if enough Tx splitq resources are available
+ * @tx_q: the queue to be checked
+ * @descs_needed: number of descriptors required for this packet
+ * @bufs_needed: number of Tx buffers required for this packet
+ *
+ * Return: 0 if no room available, 1 otherwise
*/
-static int idpf_txq_has_room(struct idpf_tx_queue *tx_q, u32 size)
+static int idpf_txq_has_room(struct idpf_tx_queue *tx_q, u32 descs_needed,
+ u32 bufs_needed)
{
- if (IDPF_DESC_UNUSED(tx_q) < size ||
+ if (IDPF_DESC_UNUSED(tx_q) < descs_needed ||
IDPF_TX_COMPLQ_PENDING(tx_q->txq_grp) >
IDPF_TX_COMPLQ_OVERFLOW_THRESH(tx_q->txq_grp->complq) ||
- IDPF_TX_BUF_RSV_LOW(tx_q))
+ idpf_tx_splitq_get_free_bufs(tx_q->refillq) < bufs_needed)
return 0;
return 1;
}
@@ -2201,14 +2418,21 @@ static int idpf_txq_has_room(struct idpf_tx_queue *tx_q, u32 size)
* idpf_tx_maybe_stop_splitq - 1st level check for Tx splitq stop conditions
* @tx_q: the queue to be checked
* @descs_needed: number of descriptors required for this packet
+ * @bufs_needed: number of buffers needed for this packet
*
- * Returns 0 if stop is not needed
+ * Return: 0 if stop is not needed
*/
static int idpf_tx_maybe_stop_splitq(struct idpf_tx_queue *tx_q,
- unsigned int descs_needed)
+ u32 descs_needed,
+ u32 bufs_needed)
{
+ /* Since we have multiple resources to check for splitq, our
+ * start,stop_thrs becomes a boolean check instead of a count
+ * threshold.
+ */
if (netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx,
- idpf_txq_has_room(tx_q, descs_needed),
+ idpf_txq_has_room(tx_q, descs_needed,
+ bufs_needed),
1, 1))
return 0;
@@ -2250,14 +2474,16 @@ void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val,
}
/**
- * idpf_tx_desc_count_required - calculate number of Tx descriptors needed
+ * idpf_tx_res_count_required - get number of Tx resources needed for this pkt
* @txq: queue to send buffer on
* @skb: send buffer
+ * @bufs_needed: (output) number of buffers needed for this skb.
*
- * Returns number of data descriptors needed for this skb.
+ * Return: number of data descriptors and buffers needed for this skb.
*/
-unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq,
- struct sk_buff *skb)
+unsigned int idpf_tx_res_count_required(struct idpf_tx_queue *txq,
+ struct sk_buff *skb,
+ u32 *bufs_needed)
{
const struct skb_shared_info *shinfo;
unsigned int count = 0, i;
@@ -2268,6 +2494,7 @@ unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq,
return count;
shinfo = skb_shinfo(skb);
+ *bufs_needed += shinfo->nr_frags;
for (i = 0; i < shinfo->nr_frags; i++) {
unsigned int size;
@@ -2297,71 +2524,89 @@ unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq,
}
/**
- * idpf_tx_dma_map_error - handle TX DMA map errors
- * @txq: queue to send buffer on
- * @skb: send buffer
- * @first: original first buffer info buffer for packet
- * @idx: starting point on ring to unwind
+ * idpf_tx_splitq_bump_ntu - adjust NTU and generation
+ * @txq: the tx ring to wrap
+ * @ntu: ring index to bump
*/
-void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb,
- struct idpf_tx_buf *first, u16 idx)
+static unsigned int idpf_tx_splitq_bump_ntu(struct idpf_tx_queue *txq, u16 ntu)
{
- struct libeth_sq_napi_stats ss = { };
- struct libeth_cq_pp cp = {
- .dev = txq->dev,
- .ss = &ss,
- };
+ ntu++;
- u64_stats_update_begin(&txq->stats_sync);
- u64_stats_inc(&txq->q_stats.dma_map_errs);
- u64_stats_update_end(&txq->stats_sync);
+ if (ntu == txq->desc_count)
+ ntu = 0;
- /* clear dma mappings for failed tx_buf map */
- for (;;) {
- struct idpf_tx_buf *tx_buf;
+ return ntu;
+}
- tx_buf = &txq->tx_buf[idx];
- libeth_tx_complete(tx_buf, &cp);
- if (tx_buf == first)
- break;
- if (idx == 0)
- idx = txq->desc_count;
- idx--;
- }
+/**
+ * idpf_tx_get_free_buf_id - get a free buffer ID from the refill queue
+ * @refillq: refill queue to get buffer ID from
+ * @buf_id: return buffer ID
+ *
+ * Return: true if a buffer ID was found, false if not
+ */
+static bool idpf_tx_get_free_buf_id(struct idpf_sw_queue *refillq,
+ u32 *buf_id)
+{
+ u32 ntc = refillq->next_to_clean;
+ u32 refill_desc;
- if (skb_is_gso(skb)) {
- union idpf_tx_flex_desc *tx_desc;
+ refill_desc = refillq->ring[ntc];
- /* If we failed a DMA mapping for a TSO packet, we will have
- * used one additional descriptor for a context
- * descriptor. Reset that here.
- */
- tx_desc = &txq->flex_tx[idx];
- memset(tx_desc, 0, sizeof(*tx_desc));
- if (idx == 0)
- idx = txq->desc_count;
- idx--;
+ if (unlikely(idpf_queue_has(RFL_GEN_CHK, refillq) !=
+ !!(refill_desc & IDPF_RFL_BI_GEN_M)))
+ return false;
+
+ *buf_id = FIELD_GET(IDPF_RFL_BI_BUFID_M, refill_desc);
+
+ if (unlikely(++ntc == refillq->desc_count)) {
+ idpf_queue_change(RFL_GEN_CHK, refillq);
+ ntc = 0;
}
- /* Update tail in case netdev_xmit_more was previously true */
- idpf_tx_buf_hw_update(txq, idx, false);
+ refillq->next_to_clean = ntc;
+
+ return true;
}
/**
- * idpf_tx_splitq_bump_ntu - adjust NTU and generation
- * @txq: the tx ring to wrap
- * @ntu: ring index to bump
+ * idpf_tx_splitq_pkt_err_unmap - Unmap buffers and bump tail in case of error
+ * @txq: Tx queue to unwind
+ * @params: pointer to splitq params struct
+ * @first: starting buffer for packet to unmap
*/
-static unsigned int idpf_tx_splitq_bump_ntu(struct idpf_tx_queue *txq, u16 ntu)
+static void idpf_tx_splitq_pkt_err_unmap(struct idpf_tx_queue *txq,
+ struct idpf_tx_splitq_params *params,
+ struct idpf_tx_buf *first)
{
- ntu++;
+ struct idpf_sw_queue *refillq = txq->refillq;
+ struct libeth_sq_napi_stats ss = { };
+ struct idpf_tx_buf *tx_buf = first;
+ struct libeth_cq_pp cp = {
+ .dev = txq->dev,
+ .ss = &ss,
+ };
- if (ntu == txq->desc_count) {
- ntu = 0;
- txq->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(txq);
+ u64_stats_update_begin(&txq->stats_sync);
+ u64_stats_inc(&txq->q_stats.dma_map_errs);
+ u64_stats_update_end(&txq->stats_sync);
+
+ libeth_tx_complete(tx_buf, &cp);
+ while (idpf_tx_buf_next(tx_buf) != IDPF_TXBUF_NULL) {
+ tx_buf = &txq->tx_buf[idpf_tx_buf_next(tx_buf)];
+ libeth_tx_complete(tx_buf, &cp);
}
- return ntu;
+ /* Update tail in case netdev_xmit_more was previously true. */
+ idpf_tx_buf_hw_update(txq, params->prev_ntu, false);
+
+ if (!refillq)
+ return;
+
+ /* Restore refillq state to avoid leaking tags. */
+ if (params->prev_refill_gen != idpf_queue_has(RFL_GEN_CHK, refillq))
+ idpf_queue_change(RFL_GEN_CHK, refillq);
+ refillq->next_to_clean = params->prev_refill_ntc;
}
/**
@@ -2385,6 +2630,7 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
struct netdev_queue *nq;
struct sk_buff *skb;
skb_frag_t *frag;
+ u32 next_buf_id;
u16 td_cmd = 0;
dma_addr_t dma;
@@ -2402,17 +2648,16 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
tx_buf = first;
first->nr_frags = 0;
- params->compl_tag =
- (tx_q->compl_tag_cur_gen << tx_q->compl_tag_gen_s) | i;
-
for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
unsigned int max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED;
- if (dma_mapping_error(tx_q->dev, dma))
- return idpf_tx_dma_map_error(tx_q, skb, first, i);
+ if (unlikely(dma_mapping_error(tx_q->dev, dma))) {
+ idpf_tx_buf_next(tx_buf) = IDPF_TXBUF_NULL;
+ return idpf_tx_splitq_pkt_err_unmap(tx_q, params,
+ first);
+ }
first->nr_frags++;
- idpf_tx_buf_compl_tag(tx_buf) = params->compl_tag;
tx_buf->type = LIBETH_SQE_FRAG;
/* record length, and DMA address */
@@ -2468,29 +2713,12 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
max_data);
if (unlikely(++i == tx_q->desc_count)) {
- tx_buf = tx_q->tx_buf;
tx_desc = &tx_q->flex_tx[0];
i = 0;
- tx_q->compl_tag_cur_gen =
- IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q);
} else {
- tx_buf++;
tx_desc++;
}
- /* Since this packet has a buffer that is going to span
- * multiple descriptors, it's going to leave holes in
- * to the TX buffer ring. To ensure these holes do not
- * cause issues in the cleaning routines, we will clear
- * them of any stale data and assign them the same
- * completion tag as the current packet. Then when the
- * packet is being cleaned, the cleaning routines will
- * simply pass over these holes and finish cleaning the
- * rest of the packet.
- */
- tx_buf->type = LIBETH_SQE_EMPTY;
- idpf_tx_buf_compl_tag(tx_buf) = params->compl_tag;
-
/* Adjust the DMA offset and the remaining size of the
* fragment. On the first iteration of this loop,
* max_data will be >= 12K and <= 16K-1. On any
@@ -2515,15 +2743,25 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, size);
if (unlikely(++i == tx_q->desc_count)) {
- tx_buf = tx_q->tx_buf;
tx_desc = &tx_q->flex_tx[0];
i = 0;
- tx_q->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q);
} else {
- tx_buf++;
tx_desc++;
}
+ if (idpf_queue_has(FLOW_SCH_EN, tx_q)) {
+ if (unlikely(!idpf_tx_get_free_buf_id(tx_q->refillq,
+ &next_buf_id))) {
+ idpf_tx_buf_next(tx_buf) = IDPF_TXBUF_NULL;
+ return idpf_tx_splitq_pkt_err_unmap(tx_q, params,
+ first);
+ }
+ } else {
+ next_buf_id = i;
+ }
+ idpf_tx_buf_next(tx_buf) = next_buf_id;
+ tx_buf = &tx_q->tx_buf[next_buf_id];
+
size = skb_frag_size(frag);
data_len -= size;
@@ -2538,6 +2776,7 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
/* write last descriptor with RS and EOP bits */
first->rs_idx = i;
+ idpf_tx_buf_next(tx_buf) = IDPF_TXBUF_NULL;
td_cmd |= params->eop_cmd;
idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, size);
i = idpf_tx_splitq_bump_ntu(tx_q, i);
@@ -2627,111 +2866,6 @@ int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off)
return 1;
}
-/**
- * __idpf_chk_linearize - Check skb is not using too many buffers
- * @skb: send buffer
- * @max_bufs: maximum number of buffers
- *
- * For TSO we need to count the TSO header and segment payload separately. As
- * such we need to check cases where we have max_bufs-1 fragments or more as we
- * can potentially require max_bufs+1 DMA transactions, 1 for the TSO header, 1
- * for the segment payload in the first descriptor, and another max_buf-1 for
- * the fragments.
- */
-static bool __idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs)
-{
- const struct skb_shared_info *shinfo = skb_shinfo(skb);
- const skb_frag_t *frag, *stale;
- int nr_frags, sum;
-
- /* no need to check if number of frags is less than max_bufs - 1 */
- nr_frags = shinfo->nr_frags;
- if (nr_frags < (max_bufs - 1))
- return false;
-
- /* We need to walk through the list and validate that each group
- * of max_bufs-2 fragments totals at least gso_size.
- */
- nr_frags -= max_bufs - 2;
- frag = &shinfo->frags[0];
-
- /* Initialize size to the negative value of gso_size minus 1. We use
- * this as the worst case scenario in which the frag ahead of us only
- * provides one byte which is why we are limited to max_bufs-2
- * descriptors for a single transmit as the header and previous
- * fragment are already consuming 2 descriptors.
- */
- sum = 1 - shinfo->gso_size;
-
- /* Add size of frags 0 through 4 to create our initial sum */
- sum += skb_frag_size(frag++);
- sum += skb_frag_size(frag++);
- sum += skb_frag_size(frag++);
- sum += skb_frag_size(frag++);
- sum += skb_frag_size(frag++);
-
- /* Walk through fragments adding latest fragment, testing it, and
- * then removing stale fragments from the sum.
- */
- for (stale = &shinfo->frags[0];; stale++) {
- int stale_size = skb_frag_size(stale);
-
- sum += skb_frag_size(frag++);
-
- /* The stale fragment may present us with a smaller
- * descriptor than the actual fragment size. To account
- * for that we need to remove all the data on the front and
- * figure out what the remainder would be in the last
- * descriptor associated with the fragment.
- */
- if (stale_size > IDPF_TX_MAX_DESC_DATA) {
- int align_pad = -(skb_frag_off(stale)) &
- (IDPF_TX_MAX_READ_REQ_SIZE - 1);
-
- sum -= align_pad;
- stale_size -= align_pad;
-
- do {
- sum -= IDPF_TX_MAX_DESC_DATA_ALIGNED;
- stale_size -= IDPF_TX_MAX_DESC_DATA_ALIGNED;
- } while (stale_size > IDPF_TX_MAX_DESC_DATA);
- }
-
- /* if sum is negative we failed to make sufficient progress */
- if (sum < 0)
- return true;
-
- if (!nr_frags--)
- break;
-
- sum -= stale_size;
- }
-
- return false;
-}
-
-/**
- * idpf_chk_linearize - Check if skb exceeds max descriptors per packet
- * @skb: send buffer
- * @max_bufs: maximum scatter gather buffers for single packet
- * @count: number of buffers this packet needs
- *
- * Make sure we don't exceed maximum scatter gather buffers for a single
- * packet. We have to do some special checking around the boundary (max_bufs-1)
- * if TSO is on since we need count the TSO header and payload separately.
- * E.g.: a packet with 7 fragments can require 9 DMA transactions; 1 for TSO
- * header, 1 for segment payload, and then 7 for the fragments.
- */
-static bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs,
- unsigned int count)
-{
- if (likely(count < max_bufs))
- return false;
- if (skb_is_gso(skb))
- return __idpf_chk_linearize(skb, max_bufs);
-
- return count > max_bufs;
-}
/**
* idpf_tx_splitq_get_ctx_desc - grab next desc and update buffer ring
@@ -2746,8 +2880,6 @@ idpf_tx_splitq_get_ctx_desc(struct idpf_tx_queue *txq)
union idpf_flex_tx_ctx_desc *desc;
int i = txq->next_to_use;
- txq->tx_buf[i].type = LIBETH_SQE_CTX;
-
/* grab the next descriptor */
desc = &txq->flex_ctx[i];
txq->next_to_use = idpf_tx_splitq_bump_ntu(txq, i);
@@ -2841,6 +2973,21 @@ static void idpf_tx_set_tstamp_desc(union idpf_flex_tx_ctx_desc *ctx_desc,
#endif /* CONFIG_PTP_1588_CLOCK */
/**
+ * idpf_tx_splitq_need_re - check whether RE bit needs to be set
+ * @tx_q: pointer to Tx queue
+ *
+ * Return: true if RE bit needs to be set, false otherwise
+ */
+static bool idpf_tx_splitq_need_re(struct idpf_tx_queue *tx_q)
+{
+ int gap = tx_q->next_to_use - tx_q->last_re;
+
+ gap += (gap < 0) ? tx_q->desc_count : 0;
+
+ return gap >= IDPF_TX_SPLITQ_RE_MIN_GAP;
+}
+
+/**
* idpf_tx_splitq_frame - Sends buffer on Tx ring using flex descriptors
* @skb: send buffer
* @tx_q: queue to send buffer on
@@ -2850,13 +2997,16 @@ static void idpf_tx_set_tstamp_desc(union idpf_flex_tx_ctx_desc *ctx_desc,
static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
struct idpf_tx_queue *tx_q)
{
- struct idpf_tx_splitq_params tx_params = { };
+ struct idpf_tx_splitq_params tx_params = {
+ .prev_ntu = tx_q->next_to_use,
+ };
union idpf_flex_tx_ctx_desc *ctx_desc;
struct idpf_tx_buf *first;
- unsigned int count;
+ u32 count, buf_count = 1;
int tso, idx;
+ u32 buf_id;
- count = idpf_tx_desc_count_required(tx_q, skb);
+ count = idpf_tx_res_count_required(tx_q, skb, &buf_count);
if (unlikely(!count))
return idpf_tx_drop_skb(tx_q, skb);
@@ -2866,7 +3016,7 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
/* Check for splitq specific TX resources */
count += (IDPF_TX_DESCS_PER_CACHE_LINE + tso);
- if (idpf_tx_maybe_stop_splitq(tx_q, count)) {
+ if (idpf_tx_maybe_stop_splitq(tx_q, count, buf_count)) {
idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
return NETDEV_TX_BUSY;
@@ -2898,36 +3048,47 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
idpf_tx_set_tstamp_desc(ctx_desc, idx);
}
- /* record the location of the first descriptor for this packet */
- first = &tx_q->tx_buf[tx_q->next_to_use];
- first->skb = skb;
+ if (idpf_queue_has(FLOW_SCH_EN, tx_q)) {
+ struct idpf_sw_queue *refillq = tx_q->refillq;
- if (tso) {
- first->packets = tx_params.offload.tso_segs;
- first->bytes = skb->len +
- ((first->packets - 1) * tx_params.offload.tso_hdr_len);
- } else {
- first->packets = 1;
- first->bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
- }
+ /* Save refillq state in case of a packet rollback. Otherwise,
+ * the tags will be leaked since they will be popped from the
+ * refillq but never reposted during cleaning.
+ */
+ tx_params.prev_refill_gen =
+ idpf_queue_has(RFL_GEN_CHK, refillq);
+ tx_params.prev_refill_ntc = refillq->next_to_clean;
+
+ if (unlikely(!idpf_tx_get_free_buf_id(tx_q->refillq,
+ &buf_id))) {
+ if (tx_params.prev_refill_gen !=
+ idpf_queue_has(RFL_GEN_CHK, refillq))
+ idpf_queue_change(RFL_GEN_CHK, refillq);
+ refillq->next_to_clean = tx_params.prev_refill_ntc;
+
+ tx_q->next_to_use = tx_params.prev_ntu;
+ return idpf_tx_drop_skb(tx_q, skb);
+ }
+ tx_params.compl_tag = buf_id;
- if (idpf_queue_has(FLOW_SCH_EN, tx_q)) {
tx_params.dtype = IDPF_TX_DESC_DTYPE_FLEX_FLOW_SCHE;
tx_params.eop_cmd = IDPF_TXD_FLEX_FLOW_CMD_EOP;
- /* Set the RE bit to catch any packets that may have not been
- * stashed during RS completion cleaning. MIN_GAP is set to
- * MIN_RING size to ensure it will be set at least once each
- * time around the ring.
+ /* Set the RE bit to periodically "clean" the descriptor ring.
+ * MIN_GAP is set to MIN_RING size to ensure it will be set at
+ * least once each time around the ring.
*/
- if (!(tx_q->next_to_use % IDPF_TX_SPLITQ_RE_MIN_GAP)) {
+ if (idpf_tx_splitq_need_re(tx_q)) {
tx_params.eop_cmd |= IDPF_TXD_FLEX_FLOW_CMD_RE;
tx_q->txq_grp->num_completions_pending++;
+ tx_q->last_re = tx_q->next_to_use;
}
if (skb->ip_summed == CHECKSUM_PARTIAL)
tx_params.offload.td_cmd |= IDPF_TXD_FLEX_FLOW_CMD_CS_EN;
} else {
+ buf_id = tx_q->next_to_use;
+
tx_params.dtype = IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2;
tx_params.eop_cmd = IDPF_TXD_LAST_DESC_CMD;
@@ -2935,6 +3096,18 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
tx_params.offload.td_cmd |= IDPF_TX_FLEX_DESC_CMD_CS_EN;
}
+ first = &tx_q->tx_buf[buf_id];
+ first->skb = skb;
+
+ if (tso) {
+ first->packets = tx_params.offload.tso_segs;
+ first->bytes = skb->len +
+ ((first->packets - 1) * tx_params.offload.tso_hdr_len);
+ } else {
+ first->packets = 1;
+ first->bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
+ }
+
idpf_tx_splitq_map(tx_q, &tx_params, first);
return NETDEV_TX_OK;
@@ -2949,10 +3122,11 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
*/
netdev_tx_t idpf_tx_start(struct sk_buff *skb, struct net_device *netdev)
{
- struct idpf_vport *vport = idpf_netdev_to_vport(netdev);
+ const struct idpf_vport *vport = idpf_netdev_to_vport(netdev);
struct idpf_tx_queue *tx_q;
- if (unlikely(skb_get_queue_mapping(skb) >= vport->num_txq)) {
+ if (unlikely(skb_get_queue_mapping(skb) >=
+ vport->num_txq - vport->num_xdp_txq)) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
@@ -2989,7 +3163,7 @@ idpf_rx_hash(const struct idpf_rx_queue *rxq, struct sk_buff *skb,
{
u32 hash;
- if (!libeth_rx_pt_has_hash(rxq->netdev, decoded))
+ if (!libeth_rx_pt_has_hash(rxq->xdp_rxq.dev, decoded))
return;
hash = le16_to_cpu(rx_desc->hash1) |
@@ -3015,7 +3189,7 @@ static void idpf_rx_csum(struct idpf_rx_queue *rxq, struct sk_buff *skb,
bool ipv4, ipv6;
/* check if Rx checksum is enabled */
- if (!libeth_rx_pt_has_checksum(rxq->netdev, decoded))
+ if (!libeth_rx_pt_has_checksum(rxq->xdp_rxq.dev, decoded))
return;
/* check if HW has decoded the packet and checksum */
@@ -3187,7 +3361,7 @@ idpf_rx_hwtstamp(const struct idpf_rx_queue *rxq,
}
/**
- * idpf_rx_process_skb_fields - Populate skb header fields from Rx descriptor
+ * __idpf_rx_process_skb_fields - Populate skb header fields from Rx descriptor
* @rxq: Rx descriptor ring packet is being transacted on
* @skb: pointer to current skb being populated
* @rx_desc: Receive descriptor
@@ -3197,8 +3371,8 @@ idpf_rx_hwtstamp(const struct idpf_rx_queue *rxq,
* other fields within the skb.
*/
static int
-idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb,
- const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
+__idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb,
+ const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
{
struct libeth_rx_csum csum_bits;
struct libeth_rx_pt decoded;
@@ -3214,9 +3388,6 @@ idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb,
if (idpf_queue_has(PTP, rxq))
idpf_rx_hwtstamp(rxq, rx_desc, skb);
- skb->protocol = eth_type_trans(skb, rxq->netdev);
- skb_record_rx_queue(skb, rxq->idx);
-
if (le16_get_bits(rx_desc->hdrlen_flags,
VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_M))
return idpf_rx_rsc(rxq, skb, rx_desc, decoded);
@@ -3227,25 +3398,24 @@ idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb,
return 0;
}
-/**
- * idpf_rx_add_frag - Add contents of Rx buffer to sk_buff as a frag
- * @rx_buf: buffer containing page to add
- * @skb: sk_buff to place the data into
- * @size: packet length from rx_desc
- *
- * This function will add the data contained in rx_buf->page to the skb.
- * It will just attach the page as a frag to the skb.
- * The function will then update the page offset.
- */
-void idpf_rx_add_frag(struct idpf_rx_buf *rx_buf, struct sk_buff *skb,
- unsigned int size)
+bool idpf_rx_process_skb_fields(struct sk_buff *skb,
+ const struct libeth_xdp_buff *xdp,
+ struct libeth_rq_napi_stats *rs)
{
- u32 hr = netmem_get_pp(rx_buf->netmem)->p.offset;
+ struct idpf_rx_queue *rxq;
+
+ rxq = libeth_xdp_buff_to_rq(xdp, typeof(*rxq), xdp_rxq);
- skb_add_rx_frag_netmem(skb, skb_shinfo(skb)->nr_frags, rx_buf->netmem,
- rx_buf->offset + hr, size, rx_buf->truesize);
+ return !__idpf_rx_process_skb_fields(rxq, skb, xdp->desc);
}
+LIBETH_XDP_DEFINE_START();
+LIBETH_XDP_DEFINE_RUN(static idpf_xdp_run_pass, idpf_xdp_run_prog,
+ idpf_xdp_tx_flush_bulk, idpf_rx_process_skb_fields);
+LIBETH_XDP_DEFINE_FINALIZE(static idpf_xdp_finalize_rx, idpf_xdp_tx_flush_bulk,
+ idpf_xdp_tx_finalize);
+LIBETH_XDP_DEFINE_END();
+
/**
* idpf_rx_hsplit_wa - handle header buffer overflows and split errors
* @hdr: Rx buffer for the headers
@@ -3288,36 +3458,6 @@ static u32 idpf_rx_hsplit_wa(const struct libeth_fqe *hdr,
}
/**
- * idpf_rx_build_skb - Allocate skb and populate it from header buffer
- * @buf: Rx buffer to pull data from
- * @size: the length of the packet
- *
- * This function allocates an skb. It then populates it with the page data from
- * the current receive descriptor, taking care to set up the skb correctly.
- */
-struct sk_buff *idpf_rx_build_skb(const struct libeth_fqe *buf, u32 size)
-{
- struct page *buf_page = __netmem_to_page(buf->netmem);
- u32 hr = pp_page_to_nmdesc(buf_page)->pp->p.offset;
- struct sk_buff *skb;
- void *va;
-
- va = page_address(buf_page) + buf->offset;
- prefetch(va + hr);
-
- skb = napi_build_skb(va, buf->truesize);
- if (unlikely(!skb))
- return NULL;
-
- skb_mark_for_recycle(skb);
-
- skb_reserve(skb, hr);
- __skb_put(skb, size);
-
- return skb;
-}
-
-/**
* idpf_rx_splitq_test_staterr - tests bits in Rx descriptor
* status and error fields
* @stat_err_field: field from descriptor to test bits in
@@ -3358,13 +3498,18 @@ static bool idpf_rx_splitq_is_eop(struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_de
*/
static int idpf_rx_splitq_clean(struct idpf_rx_queue *rxq, int budget)
{
- int total_rx_bytes = 0, total_rx_pkts = 0;
struct idpf_buf_queue *rx_bufq = NULL;
- struct sk_buff *skb = rxq->skb;
+ struct libeth_rq_napi_stats rs = { };
u16 ntc = rxq->next_to_clean;
+ LIBETH_XDP_ONSTACK_BUFF(xdp);
+ LIBETH_XDP_ONSTACK_BULK(bq);
+
+ libeth_xdp_tx_init_bulk(&bq, rxq->xdp_prog, rxq->xdp_rxq.dev,
+ rxq->xdpsqs, rxq->num_xdp_txq);
+ libeth_xdp_init_buff(xdp, &rxq->xdp, &rxq->xdp_rxq);
/* Process Rx packets bounded by budget */
- while (likely(total_rx_pkts < budget)) {
+ while (likely(rs.packets < budget)) {
struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc;
struct libeth_fqe *hdr, *rx_buf = NULL;
struct idpf_sw_queue *refillq = NULL;
@@ -3378,18 +3523,14 @@ static int idpf_rx_splitq_clean(struct idpf_rx_queue *rxq, int budget)
/* get the Rx desc from Rx queue based on 'next_to_clean' */
rx_desc = &rxq->rx[ntc].flex_adv_nic_3_wb;
- /* This memory barrier is needed to keep us from reading
- * any other fields out of the rx_desc
- */
- dma_rmb();
-
/* if the descriptor isn't done, no work yet to do */
gen_id = le16_get_bits(rx_desc->pktlen_gen_bufq_id,
VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M);
-
if (idpf_queue_has(GEN_CHK, rxq) != gen_id)
break;
+ dma_rmb();
+
rxdid = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_RXDID_M,
rx_desc->rxdid_ucast);
if (rxdid != VIRTCHNL2_RXDID_2_FLEX_SPLITQ) {
@@ -3434,7 +3575,7 @@ static int idpf_rx_splitq_clean(struct idpf_rx_queue *rxq, int budget)
hdr = &rx_bufq->hdr_buf[buf_id];
- if (unlikely(!hdr_len && !skb)) {
+ if (unlikely(!hdr_len && !xdp->data)) {
hdr_len = idpf_rx_hsplit_wa(hdr, rx_buf, pkt_len);
/* If failed, drop both buffers by setting len to 0 */
pkt_len -= hdr_len ? : pkt_len;
@@ -3444,75 +3585,37 @@ static int idpf_rx_splitq_clean(struct idpf_rx_queue *rxq, int budget)
u64_stats_update_end(&rxq->stats_sync);
}
- if (libeth_rx_sync_for_cpu(hdr, hdr_len)) {
- skb = idpf_rx_build_skb(hdr, hdr_len);
- if (!skb)
- break;
-
- u64_stats_update_begin(&rxq->stats_sync);
- u64_stats_inc(&rxq->q_stats.hsplit_pkts);
- u64_stats_update_end(&rxq->stats_sync);
- }
+ if (libeth_xdp_process_buff(xdp, hdr, hdr_len))
+ rs.hsplit++;
hdr->netmem = 0;
payload:
- if (!libeth_rx_sync_for_cpu(rx_buf, pkt_len))
- goto skip_data;
-
- if (skb)
- idpf_rx_add_frag(rx_buf, skb, pkt_len);
- else
- skb = idpf_rx_build_skb(rx_buf, pkt_len);
-
- /* exit if we failed to retrieve a buffer */
- if (!skb)
- break;
-
-skip_data:
+ libeth_xdp_process_buff(xdp, rx_buf, pkt_len);
rx_buf->netmem = 0;
- idpf_rx_post_buf_refill(refillq, buf_id);
+ idpf_post_buf_refill(refillq, buf_id);
IDPF_RX_BUMP_NTC(rxq, ntc);
/* skip if it is non EOP desc */
- if (!idpf_rx_splitq_is_eop(rx_desc) || unlikely(!skb))
- continue;
-
- /* pad skb if needed (to make valid ethernet frame) */
- if (eth_skb_pad(skb)) {
- skb = NULL;
- continue;
- }
-
- /* probably a little skewed due to removing CRC */
- total_rx_bytes += skb->len;
-
- /* protocol */
- if (unlikely(idpf_rx_process_skb_fields(rxq, skb, rx_desc))) {
- dev_kfree_skb_any(skb);
- skb = NULL;
+ if (!idpf_rx_splitq_is_eop(rx_desc) || unlikely(!xdp->data))
continue;
- }
-
- /* send completed skb up the stack */
- napi_gro_receive(rxq->napi, skb);
- skb = NULL;
- /* update budget accounting */
- total_rx_pkts++;
+ idpf_xdp_run_pass(xdp, &bq, rxq->napi, &rs, rx_desc);
}
+ idpf_xdp_finalize_rx(&bq);
+
rxq->next_to_clean = ntc;
+ libeth_xdp_save_buff(&rxq->xdp, xdp);
- rxq->skb = skb;
u64_stats_update_begin(&rxq->stats_sync);
- u64_stats_add(&rxq->q_stats.packets, total_rx_pkts);
- u64_stats_add(&rxq->q_stats.bytes, total_rx_bytes);
+ u64_stats_add(&rxq->q_stats.packets, rs.packets);
+ u64_stats_add(&rxq->q_stats.bytes, rs.bytes);
+ u64_stats_add(&rxq->q_stats.hsplit_pkts, rs.hsplit);
u64_stats_update_end(&rxq->stats_sync);
- /* guarantee a trip back through this routine if there was a failure */
- return total_rx_pkts;
+ return rs.packets;
}
/**
@@ -3580,10 +3683,10 @@ static void idpf_rx_clean_refillq(struct idpf_buf_queue *bufq,
bool failure;
if (idpf_queue_has(RFL_GEN_CHK, refillq) !=
- !!(refill_desc & IDPF_RX_BI_GEN_M))
+ !!(refill_desc & IDPF_RFL_BI_GEN_M))
break;
- buf_id = FIELD_GET(IDPF_RX_BI_BUFID_M, refill_desc);
+ buf_id = FIELD_GET(IDPF_RFL_BI_BUFID_M, refill_desc);
failure = idpf_rx_update_bufq_desc(bufq, buf_id, buf_desc);
if (failure)
break;
@@ -3655,7 +3758,7 @@ static irqreturn_t idpf_vport_intr_clean_queues(int __always_unused irq,
struct idpf_q_vector *q_vector = (struct idpf_q_vector *)data;
q_vector->total_events++;
- napi_schedule(&q_vector->napi);
+ napi_schedule_irqoff(&q_vector->napi);
return IRQ_HANDLED;
}
@@ -3696,6 +3799,8 @@ void idpf_vport_intr_rel(struct idpf_vport *vport)
for (u32 v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx];
+ kfree(q_vector->xsksq);
+ q_vector->xsksq = NULL;
kfree(q_vector->complq);
q_vector->complq = NULL;
kfree(q_vector->bufq);
@@ -3710,6 +3815,20 @@ void idpf_vport_intr_rel(struct idpf_vport *vport)
vport->q_vectors = NULL;
}
+static void idpf_q_vector_set_napi(struct idpf_q_vector *q_vector, bool link)
+{
+ struct napi_struct *napi = link ? &q_vector->napi : NULL;
+ struct net_device *dev = q_vector->vport->netdev;
+
+ for (u32 i = 0; i < q_vector->num_rxq; i++)
+ netif_queue_set_napi(dev, q_vector->rx[i]->idx,
+ NETDEV_QUEUE_TYPE_RX, napi);
+
+ for (u32 i = 0; i < q_vector->num_txq; i++)
+ netif_queue_set_napi(dev, q_vector->tx[i]->idx,
+ NETDEV_QUEUE_TYPE_TX, napi);
+}
+
/**
* idpf_vport_intr_rel_irq - Free the IRQ association with the OS
* @vport: main vport structure
@@ -3730,6 +3849,7 @@ static void idpf_vport_intr_rel_irq(struct idpf_vport *vport)
vidx = vport->q_vector_idxs[vector];
irq_num = adapter->msix_entries[vidx].vector;
+ idpf_q_vector_set_napi(q_vector, false);
kfree(free_irq(irq_num, q_vector));
}
}
@@ -3743,6 +3863,8 @@ static void idpf_vport_intr_dis_irq_all(struct idpf_vport *vport)
struct idpf_q_vector *q_vector = vport->q_vectors;
int q_idx;
+ writel(0, vport->noirq_dyn_ctl);
+
for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++)
writel(0, q_vector[q_idx].intr_reg.dyn_ctl);
}
@@ -3917,6 +4039,8 @@ static int idpf_vport_intr_req_irq(struct idpf_vport *vport)
"Request_irq failed, error: %d\n", err);
goto free_q_irqs;
}
+
+ idpf_q_vector_set_napi(q_vector, true);
}
return 0;
@@ -3984,6 +4108,8 @@ static void idpf_vport_intr_ena_irq_all(struct idpf_vport *vport)
if (qv->num_txq || qv->num_rxq)
idpf_vport_intr_update_itr_ena_irq(qv);
}
+
+ writel(vport->noirq_dyn_ctl_ena, vport->noirq_dyn_ctl);
}
/**
@@ -4133,7 +4259,9 @@ static bool idpf_rx_splitq_clean_all(struct idpf_q_vector *q_vec, int budget,
struct idpf_rx_queue *rxq = q_vec->rx[i];
int pkts_cleaned_per_q;
- pkts_cleaned_per_q = idpf_rx_splitq_clean(rxq, budget_per_q);
+ pkts_cleaned_per_q = idpf_queue_has(XSK, rxq) ?
+ idpf_xskrq_poll(rxq, budget_per_q) :
+ idpf_rx_splitq_clean(rxq, budget_per_q);
/* if we clean as many as budgeted, we must not be done */
if (pkts_cleaned_per_q >= budget_per_q)
clean_complete = false;
@@ -4143,8 +4271,10 @@ static bool idpf_rx_splitq_clean_all(struct idpf_q_vector *q_vec, int budget,
nid = numa_mem_id();
- for (i = 0; i < q_vec->num_bufq; i++)
- idpf_rx_clean_refillq_all(q_vec->bufq[i], nid);
+ for (i = 0; i < q_vec->num_bufq; i++) {
+ if (!idpf_queue_has(XSK, q_vec->bufq[i]))
+ idpf_rx_clean_refillq_all(q_vec->bufq[i], nid);
+ }
return clean_complete;
}
@@ -4158,7 +4288,7 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget)
{
struct idpf_q_vector *q_vector =
container_of(napi, struct idpf_q_vector, napi);
- bool clean_complete;
+ bool clean_complete = true;
int work_done = 0;
/* Handle case where we are called by netpoll with a budget of 0 */
@@ -4168,8 +4298,13 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget)
return 0;
}
- clean_complete = idpf_rx_splitq_clean_all(q_vector, budget, &work_done);
- clean_complete &= idpf_tx_splitq_clean_all(q_vector, budget, &work_done);
+ for (u32 i = 0; i < q_vector->num_xsksq; i++)
+ clean_complete &= idpf_xsk_xmit(q_vector->xsksq[i]);
+
+ clean_complete &= idpf_tx_splitq_clean_all(q_vector, budget,
+ &work_done);
+ clean_complete &= idpf_rx_splitq_clean_all(q_vector, budget,
+ &work_done);
/* If work not completed, return budget and polling will return */
if (!clean_complete) {
@@ -4177,20 +4312,12 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget)
return budget;
}
- /* Switch to poll mode in the tear-down path after sending disable
- * queues virtchnl message, as the interrupts will be disabled after
- * that.
- */
- if (unlikely(q_vector->num_txq && idpf_queue_has(POLL_MODE,
- q_vector->tx[0])))
- return budget;
-
work_done = min_t(int, work_done, budget - 1);
/* Exit the polling mode, but don't re-enable interrupts if stack might
* poll us due to busy-polling
*/
- if (likely(napi_complete_done(napi, work_done)))
+ if (napi_complete_done(napi, work_done))
idpf_vport_intr_update_itr_ena_irq(q_vector);
else
idpf_vport_intr_set_wb_on_itr(q_vector);
@@ -4206,8 +4333,8 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget)
*/
static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport)
{
+ u16 num_txq_grp = vport->num_txq_grp - vport->num_xdp_txq;
bool split = idpf_is_queue_model_split(vport->rxq_model);
- u16 num_txq_grp = vport->num_txq_grp;
struct idpf_rxq_group *rx_qgrp;
struct idpf_txq_group *tx_qgrp;
u32 i, qv_idx, q_index;
@@ -4283,6 +4410,21 @@ static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport)
qv_idx++;
}
+
+ for (i = 0; i < vport->num_xdp_txq; i++) {
+ struct idpf_tx_queue *xdpsq;
+ struct idpf_q_vector *qv;
+
+ xdpsq = vport->txqs[vport->xdp_txq_offset + i];
+ if (!idpf_queue_has(XSK, xdpsq))
+ continue;
+
+ qv = idpf_find_rxq_vec(vport, i);
+ idpf_xsk_init_wakeup(qv);
+
+ xdpsq->q_vector = qv;
+ qv->xsksq[qv->num_xsksq++] = xdpsq;
+ }
}
/**
@@ -4303,6 +4445,8 @@ static int idpf_vport_intr_init_vec_idx(struct idpf_vport *vport)
for (i = 0; i < vport->num_q_vectors; i++)
vport->q_vectors[i].v_idx = vport->q_vector_idxs[i];
+ vport->noirq_v_idx = vport->q_vector_idxs[i];
+
return 0;
}
@@ -4316,6 +4460,8 @@ static int idpf_vport_intr_init_vec_idx(struct idpf_vport *vport)
for (i = 0; i < vport->num_q_vectors; i++)
vport->q_vectors[i].v_idx = vecids[vport->q_vector_idxs[i]];
+ vport->noirq_v_idx = vecids[vport->q_vector_idxs[i]];
+
kfree(vecids);
return 0;
@@ -4416,6 +4562,15 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport)
GFP_KERNEL);
if (!q_vector->complq)
goto error;
+
+ if (!vport->xdp_txq_offset)
+ continue;
+
+ q_vector->xsksq = kcalloc(rxqs_per_vector,
+ sizeof(*q_vector->xsksq),
+ GFP_KERNEL);
+ if (!q_vector->xsksq)
+ goto error;
}
return 0;
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
index 281de655a813..75b977094741 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
@@ -7,8 +7,10 @@
#include <linux/dim.h>
#include <net/libeth/cache.h>
-#include <net/tcp.h>
+#include <net/libeth/types.h>
#include <net/netdev_queues.h>
+#include <net/tcp.h>
+#include <net/xdp.h>
#include "idpf_lan_txrx.h"
#include "virtchnl2_lan_desc.h"
@@ -58,6 +60,8 @@
#define IDPF_MBX_Q_VEC 1
#define IDPF_MIN_Q_VEC 1
#define IDPF_MIN_RDMA_VEC 2
+/* Data vector for NOIRQ queues */
+#define IDPF_RESERVED_VECS 1
#define IDPF_DFLT_TX_Q_DESC_COUNT 512
#define IDPF_DFLT_TX_COMPLQ_DESC_COUNT 512
@@ -108,8 +112,8 @@ do { \
*/
#define IDPF_TX_SPLITQ_RE_MIN_GAP 64
-#define IDPF_RX_BI_GEN_M BIT(16)
-#define IDPF_RX_BI_BUFID_M GENMASK(15, 0)
+#define IDPF_RFL_BI_GEN_M BIT(16)
+#define IDPF_RFL_BI_BUFID_M GENMASK(15, 0)
#define IDPF_RXD_EOF_SPLITQ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_EOF_M
#define IDPF_RXD_EOF_SINGLEQ VIRTCHNL2_RX_BASE_DESC_STATUS_EOF_M
@@ -118,10 +122,6 @@ do { \
((((txq)->next_to_clean > (txq)->next_to_use) ? 0 : (txq)->desc_count) + \
(txq)->next_to_clean - (txq)->next_to_use - 1)
-#define IDPF_TX_BUF_RSV_UNUSED(txq) ((txq)->stash->buf_stack.top)
-#define IDPF_TX_BUF_RSV_LOW(txq) (IDPF_TX_BUF_RSV_UNUSED(txq) < \
- (txq)->desc_count >> 2)
-
#define IDPF_TX_COMPLQ_OVERFLOW_THRESH(txcq) ((txcq)->desc_count >> 1)
/* Determine the absolute number of completions pending, i.e. the number of
* completions that are expected to arrive on the TX completion queue.
@@ -131,11 +131,7 @@ do { \
0 : U32_MAX) + \
(txq)->num_completions_pending - (txq)->complq->num_completions)
-#define IDPF_TX_SPLITQ_COMPL_TAG_WIDTH 16
-/* Adjust the generation for the completion tag and wrap if necessary */
-#define IDPF_TX_ADJ_COMPL_TAG_GEN(txq) \
- ((++(txq)->compl_tag_cur_gen) >= (txq)->compl_tag_gen_max ? \
- 0 : (txq)->compl_tag_cur_gen)
+#define IDPF_TXBUF_NULL U32_MAX
#define IDPF_TXD_LAST_DESC_CMD (IDPF_TX_DESC_CMD_EOP | IDPF_TX_DESC_CMD_RS)
@@ -145,6 +141,8 @@ do { \
#define IDPF_TX_FLAGS_TUNNEL BIT(3)
#define IDPF_TX_FLAGS_TSYN BIT(4)
+struct libeth_rq_napi_stats;
+
union idpf_tx_flex_desc {
struct idpf_flex_tx_desc q; /* queue based scheduling */
struct idpf_flex_tx_sched_desc flow; /* flow based scheduling */
@@ -153,18 +151,6 @@ union idpf_tx_flex_desc {
#define idpf_tx_buf libeth_sqe
/**
- * struct idpf_buf_lifo - LIFO for managing OOO completions
- * @top: Used to know how many buffers are left
- * @size: Total size of LIFO
- * @bufs: Backing array
- */
-struct idpf_buf_lifo {
- u16 top;
- u16 size;
- struct idpf_tx_stash **bufs;
-};
-
-/**
* struct idpf_tx_offload_params - Offload parameters for a given packet
* @tx_flags: Feature flags enabled for this packet
* @hdr_offsets: Offset parameter for single queue model
@@ -196,6 +182,9 @@ struct idpf_tx_offload_params {
* @compl_tag: Associated tag for completion
* @td_tag: Descriptor tunneling tag
* @offload: Offload parameters
+ * @prev_ntu: stored TxQ next_to_use in case of rollback
+ * @prev_refill_ntc: stored refillq next_to_clean in case of packet rollback
+ * @prev_refill_gen: stored refillq generation bit in case of packet rollback
*/
struct idpf_tx_splitq_params {
enum idpf_tx_desc_dtype_value dtype;
@@ -206,6 +195,10 @@ struct idpf_tx_splitq_params {
};
struct idpf_tx_offload_params offload;
+
+ u16 prev_ntu;
+ u16 prev_refill_ntc;
+ bool prev_refill_gen;
};
enum idpf_tx_ctx_desc_eipt_offload {
@@ -288,11 +281,13 @@ struct idpf_ptype_state {
* bit and Q_RFL_GEN is the SW bit.
* @__IDPF_Q_FLOW_SCH_EN: Enable flow scheduling
* @__IDPF_Q_SW_MARKER: Used to indicate TX queue marker completions
- * @__IDPF_Q_POLL_MODE: Enable poll mode
* @__IDPF_Q_CRC_EN: enable CRC offload in singleq mode
* @__IDPF_Q_HSPLIT_EN: enable header split on Rx (splitq)
* @__IDPF_Q_PTP: indicates whether the Rx timestamping is enabled for the
* queue
+ * @__IDPF_Q_NOIRQ: queue is polling-driven and has no interrupt
+ * @__IDPF_Q_XDP: this is an XDP queue
+ * @__IDPF_Q_XSK: the queue has an XSk pool installed
* @__IDPF_Q_FLAGS_NBITS: Must be last
*/
enum idpf_queue_flags_t {
@@ -300,10 +295,12 @@ enum idpf_queue_flags_t {
__IDPF_Q_RFL_GEN_CHK,
__IDPF_Q_FLOW_SCH_EN,
__IDPF_Q_SW_MARKER,
- __IDPF_Q_POLL_MODE,
__IDPF_Q_CRC_EN,
__IDPF_Q_HSPLIT_EN,
__IDPF_Q_PTP,
+ __IDPF_Q_NOIRQ,
+ __IDPF_Q_XDP,
+ __IDPF_Q_XSK,
__IDPF_Q_FLAGS_NBITS,
};
@@ -370,14 +367,17 @@ struct idpf_intr_reg {
* @num_txq: Number of TX queues
* @num_bufq: Number of buffer queues
* @num_complq: number of completion queues
+ * @num_xsksq: number of XSk send queues
* @rx: Array of RX queues to service
* @tx: Array of TX queues to service
* @bufq: Array of buffer queues to service
* @complq: array of completion queues
+ * @xsksq: array of XSk send queues
* @intr_reg: See struct idpf_intr_reg
- * @napi: napi handler
+ * @csd: XSk wakeup CSD
* @total_events: Number of interrupts processed
* @wb_on_itr: whether WB on ITR is enabled
+ * @napi: napi handler
* @tx_dim: Data for TX net_dim algorithm
* @tx_itr_value: TX interrupt throttling rate
* @tx_intr_mode: Dynamic ITR or not
@@ -396,19 +396,24 @@ struct idpf_q_vector {
u16 num_txq;
u16 num_bufq;
u16 num_complq;
+ u16 num_xsksq;
struct idpf_rx_queue **rx;
struct idpf_tx_queue **tx;
struct idpf_buf_queue **bufq;
struct idpf_compl_queue **complq;
+ struct idpf_tx_queue **xsksq;
struct idpf_intr_reg intr_reg;
__cacheline_group_end_aligned(read_mostly);
__cacheline_group_begin_aligned(read_write);
- struct napi_struct napi;
+ call_single_data_t csd;
+
u16 total_events;
bool wb_on_itr;
+ struct napi_struct napi;
+
struct dim tx_dim;
u16 tx_itr_value;
bool tx_intr_mode;
@@ -425,8 +430,8 @@ struct idpf_q_vector {
__cacheline_group_end_aligned(cold);
};
-libeth_cacheline_set_assert(struct idpf_q_vector, 120,
- 24 + sizeof(struct napi_struct) +
+libeth_cacheline_set_assert(struct idpf_q_vector, 136,
+ 56 + sizeof(struct napi_struct) +
2 * sizeof(struct dim),
8);
@@ -468,38 +473,32 @@ struct idpf_tx_queue_stats {
#define IDPF_DIM_DEFAULT_PROFILE_IX 1
/**
- * struct idpf_txq_stash - Tx buffer stash for Flow-based scheduling mode
- * @buf_stack: Stack of empty buffers to store buffer info for out of order
- * buffer completions. See struct idpf_buf_lifo
- * @sched_buf_hash: Hash table to store buffers
- */
-struct idpf_txq_stash {
- struct idpf_buf_lifo buf_stack;
- DECLARE_HASHTABLE(sched_buf_hash, 12);
-} ____cacheline_aligned;
-
-/**
* struct idpf_rx_queue - software structure representing a receive queue
* @rx: universal receive descriptor array
* @single_buf: buffer descriptor array in singleq
* @desc_ring: virtual descriptor ring address
* @bufq_sets: Pointer to the array of buffer queues in splitq mode
* @napi: NAPI instance corresponding to this queue (splitq)
+ * @xdp_prog: attached XDP program
* @rx_buf: See struct &libeth_fqe
* @pp: Page pool pointer in singleq mode
- * @netdev: &net_device corresponding to this queue
* @tail: Tail offset. Used for both queue models single and split.
* @flags: See enum idpf_queue_flags_t
* @idx: For RX queue, it is used to index to total RX queue across groups and
* used for skb reporting.
* @desc_count: Number of descriptors
+ * @num_xdp_txq: total number of XDP Tx queues
+ * @xdpsqs: shortcut for XDP Tx queues array
* @rxdids: Supported RX descriptor ids
+ * @truesize: data buffer truesize in singleq
* @rx_ptype_lkup: LUT of Rx ptypes
+ * @xdp_rxq: XDP queue info
* @next_to_use: Next descriptor to use
* @next_to_clean: Next descriptor to clean
* @next_to_alloc: RX buffer to allocate at
- * @skb: Pointer to the skb
- * @truesize: data buffer truesize in singleq
+ * @xdp: XDP buffer with the current frame
+ * @xsk: current XDP buffer in XSk mode
+ * @pool: XSk pool if installed
* @cached_phc_time: Cached PHC time for the Rx queue
* @stats_sync: See struct u64_stats_sync
* @q_stats: See union idpf_rx_queue_stats
@@ -524,30 +523,44 @@ struct idpf_rx_queue {
struct {
struct idpf_bufq_set *bufq_sets;
struct napi_struct *napi;
+ struct bpf_prog __rcu *xdp_prog;
};
struct {
struct libeth_fqe *rx_buf;
struct page_pool *pp;
+ void __iomem *tail;
};
};
- struct net_device *netdev;
- void __iomem *tail;
DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
u16 idx;
u16 desc_count;
- u32 rxdids;
+ u32 num_xdp_txq;
+ union {
+ struct idpf_tx_queue **xdpsqs;
+ struct {
+ u32 rxdids;
+ u32 truesize;
+ };
+ };
const struct libeth_rx_pt *rx_ptype_lkup;
+
+ struct xdp_rxq_info xdp_rxq;
__cacheline_group_end_aligned(read_mostly);
__cacheline_group_begin_aligned(read_write);
- u16 next_to_use;
- u16 next_to_clean;
- u16 next_to_alloc;
+ u32 next_to_use;
+ u32 next_to_clean;
+ u32 next_to_alloc;
- struct sk_buff *skb;
- u32 truesize;
+ union {
+ struct libeth_xdp_buff_stash xdp;
+ struct {
+ struct libeth_xdp_buff *xsk;
+ struct xsk_buff_pool *pool;
+ };
+ };
u64 cached_phc_time;
struct u64_stats_sync stats_sync;
@@ -567,8 +580,11 @@ struct idpf_rx_queue {
u16 rx_max_pkt_size;
__cacheline_group_end_aligned(cold);
};
-libeth_cacheline_set_assert(struct idpf_rx_queue, 64,
- 88 + sizeof(struct u64_stats_sync),
+libeth_cacheline_set_assert(struct idpf_rx_queue,
+ ALIGN(64, __alignof(struct xdp_rxq_info)) +
+ sizeof(struct xdp_rxq_info),
+ 96 + offsetof(struct idpf_rx_queue, q_stats) -
+ offsetofend(struct idpf_rx_queue, cached_phc_time),
32);
/**
@@ -580,36 +596,21 @@ libeth_cacheline_set_assert(struct idpf_rx_queue, 64,
* @desc_ring: virtual descriptor ring address
* @tx_buf: See struct idpf_tx_buf
* @txq_grp: See struct idpf_txq_group
+ * @complq: corresponding completion queue in XDP mode
* @dev: Device back pointer for DMA mapping
+ * @pool: corresponding XSk pool if installed
* @tail: Tail offset. Used for both queue models single and split
* @flags: See enum idpf_queue_flags_t
* @idx: For TX queue, it is used as index to map between TX queue group and
* hot path TX pointers stored in vport. Used in both singleq/splitq.
* @desc_count: Number of descriptors
* @tx_min_pkt_len: Min supported packet length
- * @compl_tag_gen_s: Completion tag generation bit
- * The format of the completion tag will change based on the TXQ
- * descriptor ring size so that we can maintain roughly the same level
- * of "uniqueness" across all descriptor sizes. For example, if the
- * TXQ descriptor ring size is 64 (the minimum size supported), the
- * completion tag will be formatted as below:
- * 15 6 5 0
- * --------------------------------
- * | GEN=0-1023 |IDX = 0-63|
- * --------------------------------
- *
- * This gives us 64*1024 = 65536 possible unique values. Similarly, if
- * the TXQ descriptor ring size is 8160 (the maximum size supported),
- * the completion tag will be formatted as below:
- * 15 13 12 0
- * --------------------------------
- * |GEN | IDX = 0-8159 |
- * --------------------------------
- *
- * This gives us 8*8160 = 65280 possible unique values.
+ * @thresh: XDP queue cleaning threshold
* @netdev: &net_device corresponding to this queue
* @next_to_use: Next descriptor to use
* @next_to_clean: Next descriptor to clean
+ * @last_re: last descriptor index that RE bit was set
+ * @tx_max_bufs: Max buffers that can be transmitted with scatter-gather
* @cleaned_bytes: Splitq only, TXQ only: When a TX completion is received on
* the TX completion queue, it can be for any TXQ associated
* with that completion queue. This means we can clean up to
@@ -620,11 +621,11 @@ libeth_cacheline_set_assert(struct idpf_rx_queue, 64,
* only once at the end of the cleaning routine.
* @clean_budget: singleq only, queue cleaning budget
* @cleaned_pkts: Number of packets cleaned for the above said case
- * @tx_max_bufs: Max buffers that can be transmitted with scatter-gather
- * @stash: Tx buffer stash for Flow-based scheduling mode
- * @compl_tag_bufid_m: Completion tag buffer id mask
- * @compl_tag_cur_gen: Used to keep track of current completion tag generation
- * @compl_tag_gen_max: To determine when compl_tag_cur_gen should be reset
+ * @refillq: Pointer to refill queue
+ * @pending: number of pending descriptors to send in QB
+ * @xdp_tx: number of pending &xdp_buff or &xdp_frame buffers
+ * @timer: timer for XDP Tx queue cleanup
+ * @xdp_lock: lock for XDP Tx queues sharing
* @cached_tstamp_caps: Tx timestamp capabilities negotiated with the CP
* @tstamp_task: Work that handles Tx timestamp read
* @stats_sync: See struct u64_stats_sync
@@ -633,6 +634,8 @@ libeth_cacheline_set_assert(struct idpf_rx_queue, 64,
* @size: Length of descriptor ring in bytes
* @dma: Physical address of ring
* @q_vector: Backreference to associated vector
+ * @buf_pool_size: Total number of idpf_tx_buf
+ * @rel_q_id: relative virtchnl queue index
*/
struct idpf_tx_queue {
__cacheline_group_begin_aligned(read_mostly);
@@ -645,36 +648,53 @@ struct idpf_tx_queue {
void *desc_ring;
};
struct libeth_sqe *tx_buf;
- struct idpf_txq_group *txq_grp;
- struct device *dev;
+ union {
+ struct idpf_txq_group *txq_grp;
+ struct idpf_compl_queue *complq;
+ };
+ union {
+ struct device *dev;
+ struct xsk_buff_pool *pool;
+ };
void __iomem *tail;
DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
u16 idx;
u16 desc_count;
- u16 tx_min_pkt_len;
- u16 compl_tag_gen_s;
+ union {
+ u16 tx_min_pkt_len;
+ u32 thresh;
+ };
struct net_device *netdev;
__cacheline_group_end_aligned(read_mostly);
__cacheline_group_begin_aligned(read_write);
- u16 next_to_use;
- u16 next_to_clean;
+ u32 next_to_use;
+ u32 next_to_clean;
union {
- u32 cleaned_bytes;
- u32 clean_budget;
- };
- u16 cleaned_pkts;
+ struct {
+ u16 last_re;
+ u16 tx_max_bufs;
- u16 tx_max_bufs;
- struct idpf_txq_stash *stash;
+ union {
+ u32 cleaned_bytes;
+ u32 clean_budget;
+ };
+ u16 cleaned_pkts;
- u16 compl_tag_bufid_m;
- u16 compl_tag_cur_gen;
- u16 compl_tag_gen_max;
+ struct idpf_sw_queue *refillq;
+ };
+ struct {
+ u32 pending;
+ u32 xdp_tx;
+
+ struct libeth_xdpsq_timer *timer;
+ struct libeth_xdpsq_lock xdp_lock;
+ };
+ };
struct idpf_ptp_vport_tx_tstamp_caps *cached_tstamp_caps;
struct work_struct *tstamp_task;
@@ -689,25 +709,36 @@ struct idpf_tx_queue {
dma_addr_t dma;
struct idpf_q_vector *q_vector;
+
+ u32 buf_pool_size;
+ u32 rel_q_id;
__cacheline_group_end_aligned(cold);
};
libeth_cacheline_set_assert(struct idpf_tx_queue, 64,
- 112 + sizeof(struct u64_stats_sync),
- 24);
+ 104 +
+ offsetof(struct idpf_tx_queue, cached_tstamp_caps) -
+ offsetofend(struct idpf_tx_queue, timer) +
+ offsetof(struct idpf_tx_queue, q_stats) -
+ offsetofend(struct idpf_tx_queue, tstamp_task),
+ 32);
/**
* struct idpf_buf_queue - software structure representing a buffer queue
* @split_buf: buffer descriptor array
- * @hdr_buf: &libeth_fqe for header buffers
- * @hdr_pp: &page_pool for header buffers
* @buf: &libeth_fqe for data buffers
* @pp: &page_pool for data buffers
+ * @xsk_buf: &xdp_buff for XSk Rx buffers
+ * @pool: &xsk_buff_pool on XSk queues
+ * @hdr_buf: &libeth_fqe for header buffers
+ * @hdr_pp: &page_pool for header buffers
* @tail: Tail offset
* @flags: See enum idpf_queue_flags_t
* @desc_count: Number of descriptors
+ * @thresh: refill threshold in XSk
* @next_to_use: Next descriptor to use
* @next_to_clean: Next descriptor to clean
* @next_to_alloc: RX buffer to allocate at
+ * @pending: number of buffers to refill (Xsk)
* @hdr_truesize: truesize for buffer headers
* @truesize: truesize for data buffers
* @q_id: Queue id
@@ -721,14 +752,24 @@ libeth_cacheline_set_assert(struct idpf_tx_queue, 64,
struct idpf_buf_queue {
__cacheline_group_begin_aligned(read_mostly);
struct virtchnl2_splitq_rx_buf_desc *split_buf;
+ union {
+ struct {
+ struct libeth_fqe *buf;
+ struct page_pool *pp;
+ };
+ struct {
+ struct libeth_xdp_buff **xsk_buf;
+ struct xsk_buff_pool *pool;
+ };
+ };
struct libeth_fqe *hdr_buf;
struct page_pool *hdr_pp;
- struct libeth_fqe *buf;
- struct page_pool *pp;
void __iomem *tail;
DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
u32 desc_count;
+
+ u32 thresh;
__cacheline_group_end_aligned(read_mostly);
__cacheline_group_begin_aligned(read_write);
@@ -736,6 +777,7 @@ struct idpf_buf_queue {
u32 next_to_clean;
u32 next_to_alloc;
+ u32 pending;
u32 hdr_truesize;
u32 truesize;
__cacheline_group_end_aligned(read_write);
@@ -756,7 +798,9 @@ libeth_cacheline_set_assert(struct idpf_buf_queue, 64, 24, 32);
/**
* struct idpf_compl_queue - software structure representing a completion queue
- * @comp: completion descriptor array
+ * @comp: 8-byte completion descriptor array
+ * @comp_4b: 4-byte completion descriptor array
+ * @desc_ring: virtual descriptor ring address
* @txq_grp: See struct idpf_txq_group
* @flags: See enum idpf_queue_flags_t
* @desc_count: Number of descriptors
@@ -776,7 +820,12 @@ libeth_cacheline_set_assert(struct idpf_buf_queue, 64, 24, 32);
*/
struct idpf_compl_queue {
__cacheline_group_begin_aligned(read_mostly);
- struct idpf_splitq_tx_compl_desc *comp;
+ union {
+ struct idpf_splitq_tx_compl_desc *comp;
+ struct idpf_splitq_4b_tx_compl_desc *comp_4b;
+
+ void *desc_ring;
+ };
struct idpf_txq_group *txq_grp;
DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
@@ -903,7 +952,6 @@ struct idpf_rxq_group {
* @vport: Vport back pointer
* @num_txq: Number of TX queues associated
* @txqs: Array of TX queue pointers
- * @stashes: array of OOO stashes for the queues
* @complq: Associated completion queue pointer, split queue only
* @num_completions_pending: Total number of completions pending for the
* completion queue, acculumated for all TX queues
@@ -918,7 +966,6 @@ struct idpf_txq_group {
u16 num_txq;
struct idpf_tx_queue *txqs[IDPF_LARGE_MAX_Q];
- struct idpf_txq_stash *stashes;
struct idpf_compl_queue *complq;
@@ -1011,6 +1058,17 @@ static inline void idpf_vport_intr_set_wb_on_itr(struct idpf_q_vector *q_vector)
reg->dyn_ctl);
}
+/**
+ * idpf_tx_splitq_get_free_bufs - get number of free buf_ids in refillq
+ * @refillq: pointer to refillq containing buf_ids
+ */
+static inline u32 idpf_tx_splitq_get_free_bufs(struct idpf_sw_queue *refillq)
+{
+ return (refillq->next_to_use > refillq->next_to_clean ?
+ 0 : refillq->desc_count) +
+ refillq->next_to_use - refillq->next_to_clean - 1;
+}
+
int idpf_vport_singleq_napi_poll(struct napi_struct *napi, int budget);
void idpf_vport_init_num_qs(struct idpf_vport *vport,
struct virtchnl2_create_vport *vport_msg);
@@ -1031,23 +1089,30 @@ int idpf_config_rss(struct idpf_vport *vport);
int idpf_init_rss(struct idpf_vport *vport);
void idpf_deinit_rss(struct idpf_vport *vport);
int idpf_rx_bufs_init_all(struct idpf_vport *vport);
-void idpf_rx_add_frag(struct idpf_rx_buf *rx_buf, struct sk_buff *skb,
- unsigned int size);
-struct sk_buff *idpf_rx_build_skb(const struct libeth_fqe *buf, u32 size);
+
+struct idpf_q_vector *idpf_find_rxq_vec(const struct idpf_vport *vport,
+ u32 q_num);
+struct idpf_q_vector *idpf_find_txq_vec(const struct idpf_vport *vport,
+ u32 q_num);
+int idpf_qp_switch(struct idpf_vport *vport, u32 qid, bool en);
+
void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val,
bool xmit_more);
unsigned int idpf_size_to_txd_count(unsigned int size);
netdev_tx_t idpf_tx_drop_skb(struct idpf_tx_queue *tx_q, struct sk_buff *skb);
-void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb,
- struct idpf_tx_buf *first, u16 ring_idx);
-unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq,
- struct sk_buff *skb);
+unsigned int idpf_tx_res_count_required(struct idpf_tx_queue *txq,
+ struct sk_buff *skb, u32 *buf_count);
void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue);
netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
struct idpf_tx_queue *tx_q);
netdev_tx_t idpf_tx_start(struct sk_buff *skb, struct net_device *netdev);
bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rxq,
u16 cleaned_count);
+bool idpf_rx_process_skb_fields(struct sk_buff *skb,
+ const struct libeth_xdp_buff *xdp,
+ struct libeth_rq_napi_stats *rs);
int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off);
+void idpf_wait_for_sw_marker_completion(const struct idpf_tx_queue *txq);
+
#endif /* !_IDPF_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
index 259d50fded67..4cc58c83688c 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
@@ -76,7 +76,7 @@ static int idpf_vf_intr_reg_init(struct idpf_vport *vport)
int num_vecs = vport->num_q_vectors;
struct idpf_vec_regs *reg_vals;
int num_regs, i, err = 0;
- u32 rx_itr, tx_itr;
+ u32 rx_itr, tx_itr, val;
u16 total_vecs;
total_vecs = idpf_get_reserved_vecs(vport->adapter);
@@ -120,6 +120,15 @@ static int idpf_vf_intr_reg_init(struct idpf_vport *vport)
intr->tx_itr = idpf_get_reg_addr(adapter, tx_itr);
}
+ /* Data vector for NOIRQ queues */
+
+ val = reg_vals[vport->q_vector_idxs[i] - IDPF_MBX_Q_VEC].dyn_ctl_reg;
+ vport->noirq_dyn_ctl = idpf_get_reg_addr(adapter, val);
+
+ val = VF_INT_DYN_CTLN_WB_ON_ITR_M | VF_INT_DYN_CTLN_INTENA_MSK_M |
+ FIELD_PREP(VF_INT_DYN_CTLN_ITR_INDX_M, IDPF_NO_ITR_UPDATE_IDX);
+ vport->noirq_dyn_ctl_ena = val;
+
free_reg_vals:
kfree(reg_vals);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
index a028c69f7fdc..cbb5fa30f5a0 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
@@ -702,9 +702,9 @@ int idpf_recv_mb_msg(struct idpf_adapter *adapter)
/* If post failed clear the only buffer we supplied */
if (post_err) {
if (dma_mem)
- dmam_free_coherent(&adapter->pdev->dev,
- dma_mem->size, dma_mem->va,
- dma_mem->pa);
+ dma_free_coherent(&adapter->pdev->dev,
+ dma_mem->size, dma_mem->va,
+ dma_mem->pa);
break;
}
@@ -716,34 +716,145 @@ int idpf_recv_mb_msg(struct idpf_adapter *adapter)
return err;
}
+struct idpf_chunked_msg_params {
+ u32 (*prepare_msg)(const struct idpf_vport *vport,
+ void *buf, const void *pos,
+ u32 num);
+
+ const void *chunks;
+ u32 num_chunks;
+
+ u32 chunk_sz;
+ u32 config_sz;
+
+ u32 vc_op;
+};
+
+struct idpf_queue_set *idpf_alloc_queue_set(struct idpf_vport *vport, u32 num)
+{
+ struct idpf_queue_set *qp;
+
+ qp = kzalloc(struct_size(qp, qs, num), GFP_KERNEL);
+ if (!qp)
+ return NULL;
+
+ qp->vport = vport;
+ qp->num = num;
+
+ return qp;
+}
+
/**
- * idpf_wait_for_marker_event - wait for software marker response
+ * idpf_send_chunked_msg - send VC message consisting of chunks
* @vport: virtual port data structure
+ * @params: message params
*
- * Returns 0 success, negative on failure.
- **/
-static int idpf_wait_for_marker_event(struct idpf_vport *vport)
+ * Helper function for preparing a message describing queues to be enabled
+ * or disabled.
+ *
+ * Return: the total size of the prepared message.
+ */
+static int idpf_send_chunked_msg(struct idpf_vport *vport,
+ const struct idpf_chunked_msg_params *params)
{
- int event;
- int i;
+ struct idpf_vc_xn_params xn_params = {
+ .vc_op = params->vc_op,
+ .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
+ };
+ const void *pos = params->chunks;
+ u32 num_chunks, num_msgs, buf_sz;
+ void *buf __free(kfree) = NULL;
+ u32 totqs = params->num_chunks;
+
+ num_chunks = min(IDPF_NUM_CHUNKS_PER_MSG(params->config_sz,
+ params->chunk_sz), totqs);
+ num_msgs = DIV_ROUND_UP(totqs, num_chunks);
- for (i = 0; i < vport->num_txq; i++)
- idpf_queue_set(SW_MARKER, vport->txqs[i]);
+ buf_sz = params->config_sz + num_chunks * params->chunk_sz;
+ buf = kzalloc(buf_sz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
- event = wait_event_timeout(vport->sw_marker_wq,
- test_and_clear_bit(IDPF_VPORT_SW_MARKER,
- vport->flags),
- msecs_to_jiffies(500));
+ xn_params.send_buf.iov_base = buf;
- for (i = 0; i < vport->num_txq; i++)
- idpf_queue_clear(POLL_MODE, vport->txqs[i]);
+ for (u32 i = 0; i < num_msgs; i++) {
+ ssize_t reply_sz;
- if (event)
- return 0;
+ memset(buf, 0, buf_sz);
+ xn_params.send_buf.iov_len = buf_sz;
- dev_warn(&vport->adapter->pdev->dev, "Failed to receive marker packets\n");
+ if (params->prepare_msg(vport, buf, pos, num_chunks) != buf_sz)
+ return -EINVAL;
+
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+
+ pos += num_chunks * params->chunk_sz;
+ totqs -= num_chunks;
+
+ num_chunks = min(num_chunks, totqs);
+ buf_sz = params->config_sz + num_chunks * params->chunk_sz;
+ }
- return -ETIMEDOUT;
+ return 0;
+}
+
+/**
+ * idpf_wait_for_marker_event_set - wait for software marker response for
+ * selected Tx queues
+ * @qs: set of the Tx queues
+ *
+ * Return: 0 success, -errno on failure.
+ */
+static int idpf_wait_for_marker_event_set(const struct idpf_queue_set *qs)
+{
+ struct idpf_tx_queue *txq;
+ bool markers_rcvd = true;
+
+ for (u32 i = 0; i < qs->num; i++) {
+ switch (qs->qs[i].type) {
+ case VIRTCHNL2_QUEUE_TYPE_TX:
+ txq = qs->qs[i].txq;
+
+ idpf_queue_set(SW_MARKER, txq);
+ idpf_wait_for_sw_marker_completion(txq);
+ markers_rcvd &= !idpf_queue_has(SW_MARKER, txq);
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (!markers_rcvd) {
+ netdev_warn(qs->vport->netdev,
+ "Failed to receive marker packets\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/**
+ * idpf_wait_for_marker_event - wait for software marker response
+ * @vport: virtual port data structure
+ *
+ * Return: 0 success, negative on failure.
+ **/
+static int idpf_wait_for_marker_event(struct idpf_vport *vport)
+{
+ struct idpf_queue_set *qs __free(kfree) = NULL;
+
+ qs = idpf_alloc_queue_set(vport, vport->num_txq);
+ if (!qs)
+ return -ENOMEM;
+
+ for (u32 i = 0; i < qs->num; i++) {
+ qs->qs[i].type = VIRTCHNL2_QUEUE_TYPE_TX;
+ qs->qs[i].txq = vport->txqs[i];
+ }
+
+ return idpf_wait_for_marker_event_set(qs);
}
/**
@@ -1061,21 +1172,35 @@ int idpf_vport_alloc_max_qs(struct idpf_adapter *adapter,
struct idpf_avail_queue_info *avail_queues = &adapter->avail_queues;
struct virtchnl2_get_capabilities *caps = &adapter->caps;
u16 default_vports = idpf_get_default_vports(adapter);
- int max_rx_q, max_tx_q;
+ u32 max_rx_q, max_tx_q, max_buf_q, max_compl_q;
mutex_lock(&adapter->queue_lock);
+ /* Caps are device-wide. Give each vport an equal piece */
max_rx_q = le16_to_cpu(caps->max_rx_q) / default_vports;
max_tx_q = le16_to_cpu(caps->max_tx_q) / default_vports;
- if (adapter->num_alloc_vports < default_vports) {
- max_q->max_rxq = min_t(u16, max_rx_q, IDPF_MAX_Q);
- max_q->max_txq = min_t(u16, max_tx_q, IDPF_MAX_Q);
- } else {
- max_q->max_rxq = IDPF_MIN_Q;
- max_q->max_txq = IDPF_MIN_Q;
+ max_buf_q = le16_to_cpu(caps->max_rx_bufq) / default_vports;
+ max_compl_q = le16_to_cpu(caps->max_tx_complq) / default_vports;
+
+ if (adapter->num_alloc_vports >= default_vports) {
+ max_rx_q = IDPF_MIN_Q;
+ max_tx_q = IDPF_MIN_Q;
}
- max_q->max_bufq = max_q->max_rxq * IDPF_MAX_BUFQS_PER_RXQ_GRP;
- max_q->max_complq = max_q->max_txq;
+
+ /*
+ * Harmonize the numbers. The current implementation always creates
+ * `IDPF_MAX_BUFQS_PER_RXQ_GRP` buffer queues for each Rx queue and
+ * one completion queue for each Tx queue for best performance.
+ * If less buffer or completion queues is available, cap the number
+ * of the corresponding Rx/Tx queues.
+ */
+ max_rx_q = min(max_rx_q, max_buf_q / IDPF_MAX_BUFQS_PER_RXQ_GRP);
+ max_tx_q = min(max_tx_q, max_compl_q);
+
+ max_q->max_rxq = max_rx_q;
+ max_q->max_txq = max_tx_q;
+ max_q->max_bufq = max_rx_q * IDPF_MAX_BUFQS_PER_RXQ_GRP;
+ max_q->max_complq = max_tx_q;
if (avail_queues->avail_rxq < max_q->max_rxq ||
avail_queues->avail_txq < max_q->max_txq ||
@@ -1506,7 +1631,7 @@ int idpf_send_destroy_vport_msg(struct idpf_vport *vport)
xn_params.vc_op = VIRTCHNL2_OP_DESTROY_VPORT;
xn_params.send_buf.iov_base = &v_id;
xn_params.send_buf.iov_len = sizeof(v_id);
- xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
return reply_sz < 0 ? reply_sz : 0;
@@ -1554,236 +1679,368 @@ int idpf_send_disable_vport_msg(struct idpf_vport *vport)
xn_params.vc_op = VIRTCHNL2_OP_DISABLE_VPORT;
xn_params.send_buf.iov_base = &v_id;
xn_params.send_buf.iov_len = sizeof(v_id);
- xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
return reply_sz < 0 ? reply_sz : 0;
}
/**
- * idpf_send_config_tx_queues_msg - Send virtchnl config tx queues message
+ * idpf_fill_txq_config_chunk - fill chunk describing the Tx queue
+ * @vport: virtual port data structure
+ * @q: Tx queue to be inserted into VC chunk
+ * @qi: pointer to the buffer containing the VC chunk
+ */
+static void idpf_fill_txq_config_chunk(const struct idpf_vport *vport,
+ const struct idpf_tx_queue *q,
+ struct virtchnl2_txq_info *qi)
+{
+ u32 val;
+
+ qi->queue_id = cpu_to_le32(q->q_id);
+ qi->model = cpu_to_le16(vport->txq_model);
+ qi->type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX);
+ qi->ring_len = cpu_to_le16(q->desc_count);
+ qi->dma_ring_addr = cpu_to_le64(q->dma);
+ qi->relative_queue_id = cpu_to_le16(q->rel_q_id);
+
+ if (!idpf_is_queue_model_split(vport->txq_model)) {
+ qi->sched_mode = cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_QUEUE);
+ return;
+ }
+
+ if (idpf_queue_has(XDP, q))
+ val = q->complq->q_id;
+ else
+ val = q->txq_grp->complq->q_id;
+
+ qi->tx_compl_queue_id = cpu_to_le16(val);
+
+ if (idpf_queue_has(FLOW_SCH_EN, q))
+ val = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
+ else
+ val = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
+
+ qi->sched_mode = cpu_to_le16(val);
+}
+
+/**
+ * idpf_fill_complq_config_chunk - fill chunk describing the completion queue
+ * @vport: virtual port data structure
+ * @q: completion queue to be inserted into VC chunk
+ * @qi: pointer to the buffer containing the VC chunk
+ */
+static void idpf_fill_complq_config_chunk(const struct idpf_vport *vport,
+ const struct idpf_compl_queue *q,
+ struct virtchnl2_txq_info *qi)
+{
+ u32 val;
+
+ qi->queue_id = cpu_to_le32(q->q_id);
+ qi->model = cpu_to_le16(vport->txq_model);
+ qi->type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION);
+ qi->ring_len = cpu_to_le16(q->desc_count);
+ qi->dma_ring_addr = cpu_to_le64(q->dma);
+
+ if (idpf_queue_has(FLOW_SCH_EN, q))
+ val = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
+ else
+ val = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
+
+ qi->sched_mode = cpu_to_le16(val);
+}
+
+/**
+ * idpf_prepare_cfg_txqs_msg - prepare message to configure selected Tx queues
* @vport: virtual port data structure
+ * @buf: buffer containing the message
+ * @pos: pointer to the first chunk describing the tx queue
+ * @num_chunks: number of chunks in the message
*
- * Send config tx queues virtchnl message. Returns 0 on success, negative on
- * failure.
+ * Helper function for preparing the message describing configuration of
+ * Tx queues.
+ *
+ * Return: the total size of the prepared message.
*/
-static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
+static u32 idpf_prepare_cfg_txqs_msg(const struct idpf_vport *vport,
+ void *buf, const void *pos,
+ u32 num_chunks)
+{
+ struct virtchnl2_config_tx_queues *ctq = buf;
+
+ ctq->vport_id = cpu_to_le32(vport->vport_id);
+ ctq->num_qinfo = cpu_to_le16(num_chunks);
+ memcpy(ctq->qinfo, pos, num_chunks * sizeof(*ctq->qinfo));
+
+ return struct_size(ctq, qinfo, num_chunks);
+}
+
+/**
+ * idpf_send_config_tx_queue_set_msg - send virtchnl config Tx queues
+ * message for selected queues
+ * @qs: set of the Tx queues to configure
+ *
+ * Send config queues virtchnl message for queues contained in the @qs array.
+ * The @qs array can contain Tx queues (or completion queues) only.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+static int idpf_send_config_tx_queue_set_msg(const struct idpf_queue_set *qs)
{
- struct virtchnl2_config_tx_queues *ctq __free(kfree) = NULL;
struct virtchnl2_txq_info *qi __free(kfree) = NULL;
- struct idpf_vc_xn_params xn_params = {};
- u32 config_sz, chunk_sz, buf_sz;
- int totqs, num_msgs, num_chunks;
- ssize_t reply_sz;
- int i, k = 0;
+ struct idpf_chunked_msg_params params = {
+ .vc_op = VIRTCHNL2_OP_CONFIG_TX_QUEUES,
+ .prepare_msg = idpf_prepare_cfg_txqs_msg,
+ .config_sz = sizeof(struct virtchnl2_config_tx_queues),
+ .chunk_sz = sizeof(*qi),
+ };
- totqs = vport->num_txq + vport->num_complq;
- qi = kcalloc(totqs, sizeof(struct virtchnl2_txq_info), GFP_KERNEL);
+ qi = kcalloc(qs->num, sizeof(*qi), GFP_KERNEL);
if (!qi)
return -ENOMEM;
- /* Populate the queue info buffer with all queue context info */
- for (i = 0; i < vport->num_txq_grp; i++) {
- struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
- int j, sched_mode;
-
- for (j = 0; j < tx_qgrp->num_txq; j++, k++) {
- qi[k].queue_id =
- cpu_to_le32(tx_qgrp->txqs[j]->q_id);
- qi[k].model =
- cpu_to_le16(vport->txq_model);
- qi[k].type =
- cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX);
- qi[k].ring_len =
- cpu_to_le16(tx_qgrp->txqs[j]->desc_count);
- qi[k].dma_ring_addr =
- cpu_to_le64(tx_qgrp->txqs[j]->dma);
- if (idpf_is_queue_model_split(vport->txq_model)) {
- struct idpf_tx_queue *q = tx_qgrp->txqs[j];
-
- qi[k].tx_compl_queue_id =
- cpu_to_le16(tx_qgrp->complq->q_id);
- qi[k].relative_queue_id = cpu_to_le16(j);
-
- if (idpf_queue_has(FLOW_SCH_EN, q))
- qi[k].sched_mode =
- cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_FLOW);
- else
- qi[k].sched_mode =
- cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_QUEUE);
- } else {
- qi[k].sched_mode =
- cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_QUEUE);
- }
- }
+ params.chunks = qi;
- if (!idpf_is_queue_model_split(vport->txq_model))
- continue;
+ for (u32 i = 0; i < qs->num; i++) {
+ if (qs->qs[i].type == VIRTCHNL2_QUEUE_TYPE_TX)
+ idpf_fill_txq_config_chunk(qs->vport, qs->qs[i].txq,
+ &qi[params.num_chunks++]);
+ else if (qs->qs[i].type == VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION)
+ idpf_fill_complq_config_chunk(qs->vport,
+ qs->qs[i].complq,
+ &qi[params.num_chunks++]);
+ }
- qi[k].queue_id = cpu_to_le32(tx_qgrp->complq->q_id);
- qi[k].model = cpu_to_le16(vport->txq_model);
- qi[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION);
- qi[k].ring_len = cpu_to_le16(tx_qgrp->complq->desc_count);
- qi[k].dma_ring_addr = cpu_to_le64(tx_qgrp->complq->dma);
+ return idpf_send_chunked_msg(qs->vport, &params);
+}
- if (idpf_queue_has(FLOW_SCH_EN, tx_qgrp->complq))
- sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
- else
- sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
- qi[k].sched_mode = cpu_to_le16(sched_mode);
+/**
+ * idpf_send_config_tx_queues_msg - send virtchnl config Tx queues message
+ * @vport: virtual port data structure
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
+{
+ struct idpf_queue_set *qs __free(kfree) = NULL;
+ u32 totqs = vport->num_txq + vport->num_complq;
+ u32 k = 0;
+
+ qs = idpf_alloc_queue_set(vport, totqs);
+ if (!qs)
+ return -ENOMEM;
- k++;
+ /* Populate the queue info buffer with all queue context info */
+ for (u32 i = 0; i < vport->num_txq_grp; i++) {
+ const struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
+
+ for (u32 j = 0; j < tx_qgrp->num_txq; j++) {
+ qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX;
+ qs->qs[k++].txq = tx_qgrp->txqs[j];
+ }
+
+ if (idpf_is_queue_model_split(vport->txq_model)) {
+ qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+ qs->qs[k++].complq = tx_qgrp->complq;
+ }
}
/* Make sure accounting agrees */
if (k != totqs)
return -EINVAL;
- /* Chunk up the queue contexts into multiple messages to avoid
- * sending a control queue message buffer that is too large
- */
- config_sz = sizeof(struct virtchnl2_config_tx_queues);
- chunk_sz = sizeof(struct virtchnl2_txq_info);
+ return idpf_send_config_tx_queue_set_msg(qs);
+}
- num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz),
- totqs);
- num_msgs = DIV_ROUND_UP(totqs, num_chunks);
+/**
+ * idpf_fill_rxq_config_chunk - fill chunk describing the Rx queue
+ * @vport: virtual port data structure
+ * @q: Rx queue to be inserted into VC chunk
+ * @qi: pointer to the buffer containing the VC chunk
+ */
+static void idpf_fill_rxq_config_chunk(const struct idpf_vport *vport,
+ struct idpf_rx_queue *q,
+ struct virtchnl2_rxq_info *qi)
+{
+ const struct idpf_bufq_set *sets;
+
+ qi->queue_id = cpu_to_le32(q->q_id);
+ qi->model = cpu_to_le16(vport->rxq_model);
+ qi->type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX);
+ qi->ring_len = cpu_to_le16(q->desc_count);
+ qi->dma_ring_addr = cpu_to_le64(q->dma);
+ qi->max_pkt_size = cpu_to_le32(q->rx_max_pkt_size);
+ qi->rx_buffer_low_watermark = cpu_to_le16(q->rx_buffer_low_watermark);
+ qi->qflags = cpu_to_le16(VIRTCHNL2_RX_DESC_SIZE_32BYTE);
+ if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW))
+ qi->qflags |= cpu_to_le16(VIRTCHNL2_RXQ_RSC);
+
+ if (!idpf_is_queue_model_split(vport->rxq_model)) {
+ qi->data_buffer_size = cpu_to_le32(q->rx_buf_size);
+ qi->desc_ids = cpu_to_le64(q->rxdids);
- buf_sz = struct_size(ctq, qinfo, num_chunks);
- ctq = kzalloc(buf_sz, GFP_KERNEL);
- if (!ctq)
- return -ENOMEM;
+ return;
+ }
- xn_params.vc_op = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
- xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ sets = q->bufq_sets;
- for (i = 0, k = 0; i < num_msgs; i++) {
- memset(ctq, 0, buf_sz);
- ctq->vport_id = cpu_to_le32(vport->vport_id);
- ctq->num_qinfo = cpu_to_le16(num_chunks);
- memcpy(ctq->qinfo, &qi[k], chunk_sz * num_chunks);
+ /*
+ * In splitq mode, RxQ buffer size should be set to that of the first
+ * buffer queue associated with this RxQ.
+ */
+ q->rx_buf_size = sets[0].bufq.rx_buf_size;
+ qi->data_buffer_size = cpu_to_le32(q->rx_buf_size);
- xn_params.send_buf.iov_base = ctq;
- xn_params.send_buf.iov_len = buf_sz;
- reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
- if (reply_sz < 0)
- return reply_sz;
+ qi->rx_bufq1_id = cpu_to_le16(sets[0].bufq.q_id);
+ if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP) {
+ qi->bufq2_ena = IDPF_BUFQ2_ENA;
+ qi->rx_bufq2_id = cpu_to_le16(sets[1].bufq.q_id);
+ }
- k += num_chunks;
- totqs -= num_chunks;
- num_chunks = min(num_chunks, totqs);
- /* Recalculate buffer size */
- buf_sz = struct_size(ctq, qinfo, num_chunks);
+ q->rx_hbuf_size = sets[0].bufq.rx_hbuf_size;
+
+ if (idpf_queue_has(HSPLIT_EN, q)) {
+ qi->qflags |= cpu_to_le16(VIRTCHNL2_RXQ_HDR_SPLIT);
+ qi->hdr_buffer_size = cpu_to_le16(q->rx_hbuf_size);
}
- return 0;
+ qi->desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M);
+}
+
+/**
+ * idpf_fill_bufq_config_chunk - fill chunk describing the buffer queue
+ * @vport: virtual port data structure
+ * @q: buffer queue to be inserted into VC chunk
+ * @qi: pointer to the buffer containing the VC chunk
+ */
+static void idpf_fill_bufq_config_chunk(const struct idpf_vport *vport,
+ const struct idpf_buf_queue *q,
+ struct virtchnl2_rxq_info *qi)
+{
+ qi->queue_id = cpu_to_le32(q->q_id);
+ qi->model = cpu_to_le16(vport->rxq_model);
+ qi->type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
+ qi->ring_len = cpu_to_le16(q->desc_count);
+ qi->dma_ring_addr = cpu_to_le64(q->dma);
+ qi->data_buffer_size = cpu_to_le32(q->rx_buf_size);
+ qi->rx_buffer_low_watermark = cpu_to_le16(q->rx_buffer_low_watermark);
+ qi->desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M);
+ qi->buffer_notif_stride = IDPF_RX_BUF_STRIDE;
+ if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW))
+ qi->qflags = cpu_to_le16(VIRTCHNL2_RXQ_RSC);
+
+ if (idpf_queue_has(HSPLIT_EN, q)) {
+ qi->qflags |= cpu_to_le16(VIRTCHNL2_RXQ_HDR_SPLIT);
+ qi->hdr_buffer_size = cpu_to_le16(q->rx_hbuf_size);
+ }
}
/**
- * idpf_send_config_rx_queues_msg - Send virtchnl config rx queues message
+ * idpf_prepare_cfg_rxqs_msg - prepare message to configure selected Rx queues
* @vport: virtual port data structure
+ * @buf: buffer containing the message
+ * @pos: pointer to the first chunk describing the rx queue
+ * @num_chunks: number of chunks in the message
*
- * Send config rx queues virtchnl message. Returns 0 on success, negative on
- * failure.
+ * Helper function for preparing the message describing configuration of
+ * Rx queues.
+ *
+ * Return: the total size of the prepared message.
*/
-static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport)
+static u32 idpf_prepare_cfg_rxqs_msg(const struct idpf_vport *vport,
+ void *buf, const void *pos,
+ u32 num_chunks)
+{
+ struct virtchnl2_config_rx_queues *crq = buf;
+
+ crq->vport_id = cpu_to_le32(vport->vport_id);
+ crq->num_qinfo = cpu_to_le16(num_chunks);
+ memcpy(crq->qinfo, pos, num_chunks * sizeof(*crq->qinfo));
+
+ return struct_size(crq, qinfo, num_chunks);
+}
+
+/**
+ * idpf_send_config_rx_queue_set_msg - send virtchnl config Rx queues message
+ * for selected queues.
+ * @qs: set of the Rx queues to configure
+ *
+ * Send config queues virtchnl message for queues contained in the @qs array.
+ * The @qs array can contain Rx queues (or buffer queues) only.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+static int idpf_send_config_rx_queue_set_msg(const struct idpf_queue_set *qs)
{
- struct virtchnl2_config_rx_queues *crq __free(kfree) = NULL;
struct virtchnl2_rxq_info *qi __free(kfree) = NULL;
- struct idpf_vc_xn_params xn_params = {};
- u32 config_sz, chunk_sz, buf_sz;
- int totqs, num_msgs, num_chunks;
- ssize_t reply_sz;
- int i, k = 0;
+ struct idpf_chunked_msg_params params = {
+ .vc_op = VIRTCHNL2_OP_CONFIG_RX_QUEUES,
+ .prepare_msg = idpf_prepare_cfg_rxqs_msg,
+ .config_sz = sizeof(struct virtchnl2_config_rx_queues),
+ .chunk_sz = sizeof(*qi),
+ };
- totqs = vport->num_rxq + vport->num_bufq;
- qi = kcalloc(totqs, sizeof(struct virtchnl2_rxq_info), GFP_KERNEL);
+ qi = kcalloc(qs->num, sizeof(*qi), GFP_KERNEL);
if (!qi)
return -ENOMEM;
- /* Populate the queue info buffer with all queue context info */
- for (i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
- u16 num_rxq;
- int j;
-
- if (!idpf_is_queue_model_split(vport->rxq_model))
- goto setup_rxqs;
-
- for (j = 0; j < vport->num_bufqs_per_qgrp; j++, k++) {
- struct idpf_buf_queue *bufq =
- &rx_qgrp->splitq.bufq_sets[j].bufq;
-
- qi[k].queue_id = cpu_to_le32(bufq->q_id);
- qi[k].model = cpu_to_le16(vport->rxq_model);
- qi[k].type =
- cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
- qi[k].desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M);
- qi[k].ring_len = cpu_to_le16(bufq->desc_count);
- qi[k].dma_ring_addr = cpu_to_le64(bufq->dma);
- qi[k].data_buffer_size = cpu_to_le32(bufq->rx_buf_size);
- qi[k].buffer_notif_stride = IDPF_RX_BUF_STRIDE;
- qi[k].rx_buffer_low_watermark =
- cpu_to_le16(bufq->rx_buffer_low_watermark);
- if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW))
- qi[k].qflags |= cpu_to_le16(VIRTCHNL2_RXQ_RSC);
- }
+ params.chunks = qi;
-setup_rxqs:
- if (idpf_is_queue_model_split(vport->rxq_model))
- num_rxq = rx_qgrp->splitq.num_rxq_sets;
- else
- num_rxq = rx_qgrp->singleq.num_rxq;
+ for (u32 i = 0; i < qs->num; i++) {
+ if (qs->qs[i].type == VIRTCHNL2_QUEUE_TYPE_RX)
+ idpf_fill_rxq_config_chunk(qs->vport, qs->qs[i].rxq,
+ &qi[params.num_chunks++]);
+ else if (qs->qs[i].type == VIRTCHNL2_QUEUE_TYPE_RX_BUFFER)
+ idpf_fill_bufq_config_chunk(qs->vport, qs->qs[i].bufq,
+ &qi[params.num_chunks++]);
+ }
- for (j = 0; j < num_rxq; j++, k++) {
- const struct idpf_bufq_set *sets;
- struct idpf_rx_queue *rxq;
+ return idpf_send_chunked_msg(qs->vport, &params);
+}
- if (!idpf_is_queue_model_split(vport->rxq_model)) {
- rxq = rx_qgrp->singleq.rxqs[j];
- goto common_qi_fields;
- }
+/**
+ * idpf_send_config_rx_queues_msg - send virtchnl config Rx queues message
+ * @vport: virtual port data structure
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport)
+{
+ bool splitq = idpf_is_queue_model_split(vport->rxq_model);
+ struct idpf_queue_set *qs __free(kfree) = NULL;
+ u32 totqs = vport->num_rxq + vport->num_bufq;
+ u32 k = 0;
- rxq = &rx_qgrp->splitq.rxq_sets[j]->rxq;
- sets = rxq->bufq_sets;
+ qs = idpf_alloc_queue_set(vport, totqs);
+ if (!qs)
+ return -ENOMEM;
- /* In splitq mode, RXQ buffer size should be
- * set to that of the first buffer queue
- * associated with this RXQ.
- */
- rxq->rx_buf_size = sets[0].bufq.rx_buf_size;
+ /* Populate the queue info buffer with all queue context info */
+ for (u32 i = 0; i < vport->num_rxq_grp; i++) {
+ const struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ u32 num_rxq;
- qi[k].rx_bufq1_id = cpu_to_le16(sets[0].bufq.q_id);
- if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP) {
- qi[k].bufq2_ena = IDPF_BUFQ2_ENA;
- qi[k].rx_bufq2_id =
- cpu_to_le16(sets[1].bufq.q_id);
- }
- qi[k].rx_buffer_low_watermark =
- cpu_to_le16(rxq->rx_buffer_low_watermark);
- if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW))
- qi[k].qflags |= cpu_to_le16(VIRTCHNL2_RXQ_RSC);
-
- rxq->rx_hbuf_size = sets[0].bufq.rx_hbuf_size;
-
- if (idpf_queue_has(HSPLIT_EN, rxq)) {
- qi[k].qflags |=
- cpu_to_le16(VIRTCHNL2_RXQ_HDR_SPLIT);
- qi[k].hdr_buffer_size =
- cpu_to_le16(rxq->rx_hbuf_size);
- }
+ if (!splitq) {
+ num_rxq = rx_qgrp->singleq.num_rxq;
+ goto rxq;
+ }
-common_qi_fields:
- qi[k].queue_id = cpu_to_le32(rxq->q_id);
- qi[k].model = cpu_to_le16(vport->rxq_model);
- qi[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX);
- qi[k].ring_len = cpu_to_le16(rxq->desc_count);
- qi[k].dma_ring_addr = cpu_to_le64(rxq->dma);
- qi[k].max_pkt_size = cpu_to_le32(rxq->rx_max_pkt_size);
- qi[k].data_buffer_size = cpu_to_le32(rxq->rx_buf_size);
- qi[k].qflags |=
- cpu_to_le16(VIRTCHNL2_RX_DESC_SIZE_32BYTE);
- qi[k].desc_ids = cpu_to_le64(rxq->rxdids);
+ for (u32 j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
+ qs->qs[k++].bufq = &rx_qgrp->splitq.bufq_sets[j].bufq;
+ }
+
+ num_rxq = rx_qgrp->splitq.num_rxq_sets;
+
+rxq:
+ for (u32 j = 0; j < num_rxq; j++) {
+ qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX;
+
+ if (splitq)
+ qs->qs[k++].rxq =
+ &rx_qgrp->splitq.rxq_sets[j]->rxq;
+ else
+ qs->qs[k++].rxq = rx_qgrp->singleq.rxqs[j];
}
}
@@ -1791,317 +2048,395 @@ common_qi_fields:
if (k != totqs)
return -EINVAL;
- /* Chunk up the queue contexts into multiple messages to avoid
- * sending a control queue message buffer that is too large
- */
- config_sz = sizeof(struct virtchnl2_config_rx_queues);
- chunk_sz = sizeof(struct virtchnl2_rxq_info);
+ return idpf_send_config_rx_queue_set_msg(qs);
+}
- num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz),
- totqs);
- num_msgs = DIV_ROUND_UP(totqs, num_chunks);
+/**
+ * idpf_prepare_ena_dis_qs_msg - prepare message to enable/disable selected
+ * queues
+ * @vport: virtual port data structure
+ * @buf: buffer containing the message
+ * @pos: pointer to the first chunk describing the queue
+ * @num_chunks: number of chunks in the message
+ *
+ * Helper function for preparing the message describing queues to be enabled
+ * or disabled.
+ *
+ * Return: the total size of the prepared message.
+ */
+static u32 idpf_prepare_ena_dis_qs_msg(const struct idpf_vport *vport,
+ void *buf, const void *pos,
+ u32 num_chunks)
+{
+ struct virtchnl2_del_ena_dis_queues *eq = buf;
+
+ eq->vport_id = cpu_to_le32(vport->vport_id);
+ eq->chunks.num_chunks = cpu_to_le16(num_chunks);
+ memcpy(eq->chunks.chunks, pos,
+ num_chunks * sizeof(*eq->chunks.chunks));
+
+ return struct_size(eq, chunks.chunks, num_chunks);
+}
+
+/**
+ * idpf_send_ena_dis_queue_set_msg - send virtchnl enable or disable queues
+ * message for selected queues
+ * @qs: set of the queues to enable or disable
+ * @en: whether to enable or disable queues
+ *
+ * Send enable or disable queues virtchnl message for queues contained
+ * in the @qs array.
+ * The @qs array can contain pointers to both Rx and Tx queues.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+static int idpf_send_ena_dis_queue_set_msg(const struct idpf_queue_set *qs,
+ bool en)
+{
+ struct virtchnl2_queue_chunk *qc __free(kfree) = NULL;
+ struct idpf_chunked_msg_params params = {
+ .vc_op = en ? VIRTCHNL2_OP_ENABLE_QUEUES :
+ VIRTCHNL2_OP_DISABLE_QUEUES,
+ .prepare_msg = idpf_prepare_ena_dis_qs_msg,
+ .config_sz = sizeof(struct virtchnl2_del_ena_dis_queues),
+ .chunk_sz = sizeof(*qc),
+ .num_chunks = qs->num,
+ };
- buf_sz = struct_size(crq, qinfo, num_chunks);
- crq = kzalloc(buf_sz, GFP_KERNEL);
- if (!crq)
+ qc = kcalloc(qs->num, sizeof(*qc), GFP_KERNEL);
+ if (!qc)
return -ENOMEM;
- xn_params.vc_op = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
- xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ params.chunks = qc;
- for (i = 0, k = 0; i < num_msgs; i++) {
- memset(crq, 0, buf_sz);
- crq->vport_id = cpu_to_le32(vport->vport_id);
- crq->num_qinfo = cpu_to_le16(num_chunks);
- memcpy(crq->qinfo, &qi[k], chunk_sz * num_chunks);
+ for (u32 i = 0; i < qs->num; i++) {
+ const struct idpf_queue_ptr *q = &qs->qs[i];
+ u32 qid;
- xn_params.send_buf.iov_base = crq;
- xn_params.send_buf.iov_len = buf_sz;
- reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
- if (reply_sz < 0)
- return reply_sz;
+ qc[i].type = cpu_to_le32(q->type);
+ qc[i].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
- k += num_chunks;
- totqs -= num_chunks;
- num_chunks = min(num_chunks, totqs);
- /* Recalculate buffer size */
- buf_sz = struct_size(crq, qinfo, num_chunks);
+ switch (q->type) {
+ case VIRTCHNL2_QUEUE_TYPE_RX:
+ qid = q->rxq->q_id;
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_TX:
+ qid = q->txq->q_id;
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
+ qid = q->bufq->q_id;
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
+ qid = q->complq->q_id;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ qc[i].start_queue_id = cpu_to_le32(qid);
}
- return 0;
+ return idpf_send_chunked_msg(qs->vport, &params);
}
/**
- * idpf_send_ena_dis_queues_msg - Send virtchnl enable or disable
- * queues message
+ * idpf_send_ena_dis_queues_msg - send virtchnl enable or disable queues
+ * message
* @vport: virtual port data structure
- * @ena: if true enable, false disable
+ * @en: whether to enable or disable queues
*
- * Send enable or disable queues virtchnl message. Returns 0 on success,
- * negative on failure.
+ * Return: 0 on success, -errno on failure.
*/
-static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool ena)
+static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool en)
{
- struct virtchnl2_del_ena_dis_queues *eq __free(kfree) = NULL;
- struct virtchnl2_queue_chunk *qc __free(kfree) = NULL;
- u32 num_msgs, num_chunks, num_txq, num_rxq, num_q;
- struct idpf_vc_xn_params xn_params = {};
- struct virtchnl2_queue_chunks *qcs;
- u32 config_sz, chunk_sz, buf_sz;
- ssize_t reply_sz;
- int i, j, k = 0;
+ struct idpf_queue_set *qs __free(kfree) = NULL;
+ u32 num_txq, num_q, k = 0;
+ bool split;
num_txq = vport->num_txq + vport->num_complq;
- num_rxq = vport->num_rxq + vport->num_bufq;
- num_q = num_txq + num_rxq;
- buf_sz = sizeof(struct virtchnl2_queue_chunk) * num_q;
- qc = kzalloc(buf_sz, GFP_KERNEL);
- if (!qc)
+ num_q = num_txq + vport->num_rxq + vport->num_bufq;
+
+ qs = idpf_alloc_queue_set(vport, num_q);
+ if (!qs)
return -ENOMEM;
- for (i = 0; i < vport->num_txq_grp; i++) {
- struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
+ split = idpf_is_queue_model_split(vport->txq_model);
- for (j = 0; j < tx_qgrp->num_txq; j++, k++) {
- qc[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX);
- qc[k].start_queue_id = cpu_to_le32(tx_qgrp->txqs[j]->q_id);
- qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
- }
- }
- if (vport->num_txq != k)
- return -EINVAL;
+ for (u32 i = 0; i < vport->num_txq_grp; i++) {
+ const struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
- if (!idpf_is_queue_model_split(vport->txq_model))
- goto setup_rx;
+ for (u32 j = 0; j < tx_qgrp->num_txq; j++) {
+ qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX;
+ qs->qs[k++].txq = tx_qgrp->txqs[j];
+ }
- for (i = 0; i < vport->num_txq_grp; i++, k++) {
- struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
+ if (!split)
+ continue;
- qc[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION);
- qc[k].start_queue_id = cpu_to_le32(tx_qgrp->complq->q_id);
- qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
+ qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+ qs->qs[k++].complq = tx_qgrp->complq;
}
- if (vport->num_complq != (k - vport->num_txq))
+
+ if (k != num_txq)
return -EINVAL;
-setup_rx:
- for (i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ split = idpf_is_queue_model_split(vport->rxq_model);
- if (idpf_is_queue_model_split(vport->rxq_model))
+ for (u32 i = 0; i < vport->num_rxq_grp; i++) {
+ const struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ u32 num_rxq;
+
+ if (split)
num_rxq = rx_qgrp->splitq.num_rxq_sets;
else
num_rxq = rx_qgrp->singleq.num_rxq;
- for (j = 0; j < num_rxq; j++, k++) {
- if (idpf_is_queue_model_split(vport->rxq_model)) {
- qc[k].start_queue_id =
- cpu_to_le32(rx_qgrp->splitq.rxq_sets[j]->rxq.q_id);
- qc[k].type =
- cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX);
- } else {
- qc[k].start_queue_id =
- cpu_to_le32(rx_qgrp->singleq.rxqs[j]->q_id);
- qc[k].type =
- cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX);
- }
- qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
- }
- }
- if (vport->num_rxq != k - (vport->num_txq + vport->num_complq))
- return -EINVAL;
-
- if (!idpf_is_queue_model_split(vport->rxq_model))
- goto send_msg;
+ for (u32 j = 0; j < num_rxq; j++) {
+ qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX;
- for (i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ if (split)
+ qs->qs[k++].rxq =
+ &rx_qgrp->splitq.rxq_sets[j]->rxq;
+ else
+ qs->qs[k++].rxq = rx_qgrp->singleq.rxqs[j];
+ }
- for (j = 0; j < vport->num_bufqs_per_qgrp; j++, k++) {
- const struct idpf_buf_queue *q;
+ if (!split)
+ continue;
- q = &rx_qgrp->splitq.bufq_sets[j].bufq;
- qc[k].type =
- cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
- qc[k].start_queue_id = cpu_to_le32(q->q_id);
- qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
+ for (u32 j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
+ qs->qs[k++].bufq = &rx_qgrp->splitq.bufq_sets[j].bufq;
}
}
- if (vport->num_bufq != k - (vport->num_txq +
- vport->num_complq +
- vport->num_rxq))
+
+ if (k != num_q)
return -EINVAL;
-send_msg:
- /* Chunk up the queue info into multiple messages */
- config_sz = sizeof(struct virtchnl2_del_ena_dis_queues);
- chunk_sz = sizeof(struct virtchnl2_queue_chunk);
+ return idpf_send_ena_dis_queue_set_msg(qs, en);
+}
- num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz),
- num_q);
- num_msgs = DIV_ROUND_UP(num_q, num_chunks);
+/**
+ * idpf_prep_map_unmap_queue_set_vector_msg - prepare message to map or unmap
+ * queue set to the interrupt vector
+ * @vport: virtual port data structure
+ * @buf: buffer containing the message
+ * @pos: pointer to the first chunk describing the vector mapping
+ * @num_chunks: number of chunks in the message
+ *
+ * Helper function for preparing the message describing mapping queues to
+ * q_vectors.
+ *
+ * Return: the total size of the prepared message.
+ */
+static u32
+idpf_prep_map_unmap_queue_set_vector_msg(const struct idpf_vport *vport,
+ void *buf, const void *pos,
+ u32 num_chunks)
+{
+ struct virtchnl2_queue_vector_maps *vqvm = buf;
- buf_sz = struct_size(eq, chunks.chunks, num_chunks);
- eq = kzalloc(buf_sz, GFP_KERNEL);
- if (!eq)
+ vqvm->vport_id = cpu_to_le32(vport->vport_id);
+ vqvm->num_qv_maps = cpu_to_le16(num_chunks);
+ memcpy(vqvm->qv_maps, pos, num_chunks * sizeof(*vqvm->qv_maps));
+
+ return struct_size(vqvm, qv_maps, num_chunks);
+}
+
+/**
+ * idpf_send_map_unmap_queue_set_vector_msg - send virtchnl map or unmap
+ * queue set vector message
+ * @qs: set of the queues to map or unmap
+ * @map: true for map and false for unmap
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+static int
+idpf_send_map_unmap_queue_set_vector_msg(const struct idpf_queue_set *qs,
+ bool map)
+{
+ struct virtchnl2_queue_vector *vqv __free(kfree) = NULL;
+ struct idpf_chunked_msg_params params = {
+ .vc_op = map ? VIRTCHNL2_OP_MAP_QUEUE_VECTOR :
+ VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR,
+ .prepare_msg = idpf_prep_map_unmap_queue_set_vector_msg,
+ .config_sz = sizeof(struct virtchnl2_queue_vector_maps),
+ .chunk_sz = sizeof(*vqv),
+ .num_chunks = qs->num,
+ };
+ bool split;
+
+ vqv = kcalloc(qs->num, sizeof(*vqv), GFP_KERNEL);
+ if (!vqv)
return -ENOMEM;
- if (ena) {
- xn_params.vc_op = VIRTCHNL2_OP_ENABLE_QUEUES;
- xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
- } else {
- xn_params.vc_op = VIRTCHNL2_OP_DISABLE_QUEUES;
- xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
- }
+ params.chunks = vqv;
- for (i = 0, k = 0; i < num_msgs; i++) {
- memset(eq, 0, buf_sz);
- eq->vport_id = cpu_to_le32(vport->vport_id);
- eq->chunks.num_chunks = cpu_to_le16(num_chunks);
- qcs = &eq->chunks;
- memcpy(qcs->chunks, &qc[k], chunk_sz * num_chunks);
+ split = idpf_is_queue_model_split(qs->vport->txq_model);
- xn_params.send_buf.iov_base = eq;
- xn_params.send_buf.iov_len = buf_sz;
- reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
- if (reply_sz < 0)
- return reply_sz;
+ for (u32 i = 0; i < qs->num; i++) {
+ const struct idpf_queue_ptr *q = &qs->qs[i];
+ const struct idpf_q_vector *vec;
+ u32 qid, v_idx, itr_idx;
+
+ vqv[i].queue_type = cpu_to_le32(q->type);
+
+ switch (q->type) {
+ case VIRTCHNL2_QUEUE_TYPE_RX:
+ qid = q->rxq->q_id;
+
+ if (idpf_queue_has(NOIRQ, q->rxq))
+ vec = NULL;
+ else
+ vec = q->rxq->q_vector;
+
+ if (vec) {
+ v_idx = vec->v_idx;
+ itr_idx = vec->rx_itr_idx;
+ } else {
+ v_idx = qs->vport->noirq_v_idx;
+ itr_idx = VIRTCHNL2_ITR_IDX_0;
+ }
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_TX:
+ qid = q->txq->q_id;
+
+ if (idpf_queue_has(NOIRQ, q->txq))
+ vec = NULL;
+ else if (idpf_queue_has(XDP, q->txq))
+ vec = q->txq->complq->q_vector;
+ else if (split)
+ vec = q->txq->txq_grp->complq->q_vector;
+ else
+ vec = q->txq->q_vector;
+
+ if (vec) {
+ v_idx = vec->v_idx;
+ itr_idx = vec->tx_itr_idx;
+ } else {
+ v_idx = qs->vport->noirq_v_idx;
+ itr_idx = VIRTCHNL2_ITR_IDX_1;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
- k += num_chunks;
- num_q -= num_chunks;
- num_chunks = min(num_chunks, num_q);
- /* Recalculate buffer size */
- buf_sz = struct_size(eq, chunks.chunks, num_chunks);
+ vqv[i].queue_id = cpu_to_le32(qid);
+ vqv[i].vector_id = cpu_to_le16(v_idx);
+ vqv[i].itr_idx = cpu_to_le32(itr_idx);
}
- return 0;
+ return idpf_send_chunked_msg(qs->vport, &params);
}
/**
- * idpf_send_map_unmap_queue_vector_msg - Send virtchnl map or unmap queue
- * vector message
+ * idpf_send_map_unmap_queue_vector_msg - send virtchnl map or unmap queue
+ * vector message
* @vport: virtual port data structure
* @map: true for map and false for unmap
*
- * Send map or unmap queue vector virtchnl message. Returns 0 on success,
- * negative on failure.
+ * Return: 0 on success, -errno on failure.
*/
int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
{
- struct virtchnl2_queue_vector_maps *vqvm __free(kfree) = NULL;
- struct virtchnl2_queue_vector *vqv __free(kfree) = NULL;
- struct idpf_vc_xn_params xn_params = {};
- u32 config_sz, chunk_sz, buf_sz;
- u32 num_msgs, num_chunks, num_q;
- ssize_t reply_sz;
- int i, j, k = 0;
+ struct idpf_queue_set *qs __free(kfree) = NULL;
+ u32 num_q = vport->num_txq + vport->num_rxq;
+ u32 k = 0;
- num_q = vport->num_txq + vport->num_rxq;
-
- buf_sz = sizeof(struct virtchnl2_queue_vector) * num_q;
- vqv = kzalloc(buf_sz, GFP_KERNEL);
- if (!vqv)
+ qs = idpf_alloc_queue_set(vport, num_q);
+ if (!qs)
return -ENOMEM;
- for (i = 0; i < vport->num_txq_grp; i++) {
- struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
-
- for (j = 0; j < tx_qgrp->num_txq; j++, k++) {
- vqv[k].queue_type =
- cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX);
- vqv[k].queue_id = cpu_to_le32(tx_qgrp->txqs[j]->q_id);
+ for (u32 i = 0; i < vport->num_txq_grp; i++) {
+ const struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
- if (idpf_is_queue_model_split(vport->txq_model)) {
- vqv[k].vector_id =
- cpu_to_le16(tx_qgrp->complq->q_vector->v_idx);
- vqv[k].itr_idx =
- cpu_to_le32(tx_qgrp->complq->q_vector->tx_itr_idx);
- } else {
- vqv[k].vector_id =
- cpu_to_le16(tx_qgrp->txqs[j]->q_vector->v_idx);
- vqv[k].itr_idx =
- cpu_to_le32(tx_qgrp->txqs[j]->q_vector->tx_itr_idx);
- }
+ for (u32 j = 0; j < tx_qgrp->num_txq; j++) {
+ qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_TX;
+ qs->qs[k++].txq = tx_qgrp->txqs[j];
}
}
- if (vport->num_txq != k)
+ if (k != vport->num_txq)
return -EINVAL;
- for (i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
- u16 num_rxq;
+ for (u32 i = 0; i < vport->num_rxq_grp; i++) {
+ const struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ u32 num_rxq;
if (idpf_is_queue_model_split(vport->rxq_model))
num_rxq = rx_qgrp->splitq.num_rxq_sets;
else
num_rxq = rx_qgrp->singleq.num_rxq;
- for (j = 0; j < num_rxq; j++, k++) {
- struct idpf_rx_queue *rxq;
+ for (u32 j = 0; j < num_rxq; j++) {
+ qs->qs[k].type = VIRTCHNL2_QUEUE_TYPE_RX;
if (idpf_is_queue_model_split(vport->rxq_model))
- rxq = &rx_qgrp->splitq.rxq_sets[j]->rxq;
+ qs->qs[k++].rxq =
+ &rx_qgrp->splitq.rxq_sets[j]->rxq;
else
- rxq = rx_qgrp->singleq.rxqs[j];
-
- vqv[k].queue_type =
- cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX);
- vqv[k].queue_id = cpu_to_le32(rxq->q_id);
- vqv[k].vector_id = cpu_to_le16(rxq->q_vector->v_idx);
- vqv[k].itr_idx = cpu_to_le32(rxq->q_vector->rx_itr_idx);
+ qs->qs[k++].rxq = rx_qgrp->singleq.rxqs[j];
}
}
- if (idpf_is_queue_model_split(vport->txq_model)) {
- if (vport->num_rxq != k - vport->num_complq)
- return -EINVAL;
- } else {
- if (vport->num_rxq != k - vport->num_txq)
- return -EINVAL;
- }
+ if (k != num_q)
+ return -EINVAL;
- /* Chunk up the vector info into multiple messages */
- config_sz = sizeof(struct virtchnl2_queue_vector_maps);
- chunk_sz = sizeof(struct virtchnl2_queue_vector);
+ return idpf_send_map_unmap_queue_set_vector_msg(qs, map);
+}
- num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz),
- num_q);
- num_msgs = DIV_ROUND_UP(num_q, num_chunks);
+/**
+ * idpf_send_enable_queue_set_msg - send enable queues virtchnl message for
+ * selected queues
+ * @qs: set of the queues
+ *
+ * Send enable queues virtchnl message for queues contained in the @qs array.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+int idpf_send_enable_queue_set_msg(const struct idpf_queue_set *qs)
+{
+ return idpf_send_ena_dis_queue_set_msg(qs, true);
+}
- buf_sz = struct_size(vqvm, qv_maps, num_chunks);
- vqvm = kzalloc(buf_sz, GFP_KERNEL);
- if (!vqvm)
- return -ENOMEM;
+/**
+ * idpf_send_disable_queue_set_msg - send disable queues virtchnl message for
+ * selected queues
+ * @qs: set of the queues
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+int idpf_send_disable_queue_set_msg(const struct idpf_queue_set *qs)
+{
+ int err;
- if (map) {
- xn_params.vc_op = VIRTCHNL2_OP_MAP_QUEUE_VECTOR;
- xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
- } else {
- xn_params.vc_op = VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR;
- xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
- }
+ err = idpf_send_ena_dis_queue_set_msg(qs, false);
+ if (err)
+ return err;
- for (i = 0, k = 0; i < num_msgs; i++) {
- memset(vqvm, 0, buf_sz);
- xn_params.send_buf.iov_base = vqvm;
- xn_params.send_buf.iov_len = buf_sz;
- vqvm->vport_id = cpu_to_le32(vport->vport_id);
- vqvm->num_qv_maps = cpu_to_le16(num_chunks);
- memcpy(vqvm->qv_maps, &vqv[k], chunk_sz * num_chunks);
+ return idpf_wait_for_marker_event_set(qs);
+}
- reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
- if (reply_sz < 0)
- return reply_sz;
+/**
+ * idpf_send_config_queue_set_msg - send virtchnl config queues message for
+ * selected queues
+ * @qs: set of the queues
+ *
+ * Send config queues virtchnl message for queues contained in the @qs array.
+ * The @qs array can contain both Rx or Tx queues.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+int idpf_send_config_queue_set_msg(const struct idpf_queue_set *qs)
+{
+ int err;
- k += num_chunks;
- num_q -= num_chunks;
- num_chunks = min(num_chunks, num_q);
- /* Recalculate buffer size */
- buf_sz = struct_size(vqvm, qv_maps, num_chunks);
- }
+ err = idpf_send_config_tx_queue_set_msg(qs);
+ if (err)
+ return err;
- return 0;
+ return idpf_send_config_rx_queue_set_msg(qs);
}
/**
@@ -2125,24 +2460,12 @@ int idpf_send_enable_queues_msg(struct idpf_vport *vport)
*/
int idpf_send_disable_queues_msg(struct idpf_vport *vport)
{
- int err, i;
+ int err;
err = idpf_send_ena_dis_queues_msg(vport, false);
if (err)
return err;
- /* switch to poll mode as interrupts will be disabled after disable
- * queues virtchnl message is sent
- */
- for (i = 0; i < vport->num_txq; i++)
- idpf_queue_set(POLL_MODE, vport->txqs[i]);
-
- /* schedule the napi to receive all the marker packets */
- local_bh_disable();
- for (i = 0; i < vport->num_q_vectors; i++)
- napi_schedule(&vport->q_vectors[i].napi);
- local_bh_enable();
-
return idpf_wait_for_marker_event(vport);
}
@@ -2207,7 +2530,7 @@ int idpf_send_delete_queues_msg(struct idpf_vport *vport)
num_chunks);
xn_params.vc_op = VIRTCHNL2_OP_DEL_QUEUES;
- xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
xn_params.send_buf.iov_base = eq;
xn_params.send_buf.iov_len = buf_size;
reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
@@ -2371,7 +2694,7 @@ int idpf_send_dealloc_vectors_msg(struct idpf_adapter *adapter)
xn_params.vc_op = VIRTCHNL2_OP_DEALLOC_VECTORS;
xn_params.send_buf.iov_base = vcs;
xn_params.send_buf.iov_len = buf_size;
- xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
if (reply_sz < 0)
return reply_sz;
@@ -3285,9 +3608,17 @@ int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport)
{
struct idpf_vector_info vec_info;
int num_alloc_vecs;
+ u32 req;
vec_info.num_curr_vecs = vport->num_q_vectors;
- vec_info.num_req_vecs = max(vport->num_txq, vport->num_rxq);
+ if (vec_info.num_curr_vecs)
+ vec_info.num_curr_vecs += IDPF_RESERVED_VECS;
+
+ /* XDPSQs are all bound to the NOIRQ vector from IDPF_RESERVED_VECS */
+ req = max(vport->num_txq - vport->num_xdp_txq, vport->num_rxq) +
+ IDPF_RESERVED_VECS;
+ vec_info.num_req_vecs = req;
+
vec_info.default_vport = vport->default_vport;
vec_info.index = vport->idx;
@@ -3300,7 +3631,7 @@ int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport)
return -EINVAL;
}
- vport->num_q_vectors = num_alloc_vecs;
+ vport->num_q_vectors = num_alloc_vecs - IDPF_RESERVED_VECS;
return 0;
}
@@ -3765,6 +4096,16 @@ u32 idpf_get_vport_id(struct idpf_vport *vport)
return le32_to_cpu(vport_msg->vport_id);
}
+static void idpf_set_mac_type(struct idpf_vport *vport,
+ struct virtchnl2_mac_addr *mac_addr)
+{
+ bool is_primary;
+
+ is_primary = ether_addr_equal(vport->default_mac_addr, mac_addr->addr);
+ mac_addr->type = is_primary ? VIRTCHNL2_MAC_ADDR_PRIMARY :
+ VIRTCHNL2_MAC_ADDR_EXTRA;
+}
+
/**
* idpf_mac_filter_async_handler - Async callback for mac filters
* @adapter: private data struct
@@ -3894,6 +4235,7 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport,
list) {
if (add && f->add) {
ether_addr_copy(mac_addr[i].addr, f->macaddr);
+ idpf_set_mac_type(vport, &mac_addr[i]);
i++;
f->add = false;
if (i == total_filters)
@@ -3901,6 +4243,7 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport,
}
if (!add && f->remove) {
ether_addr_copy(mac_addr[i].addr, f->macaddr);
+ idpf_set_mac_type(vport, &mac_addr[i]);
i++;
f->remove = false;
if (i == total_filters)
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h
index 86f30f0db07a..eac3d15daa42 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h
@@ -4,7 +4,8 @@
#ifndef _IDPF_VIRTCHNL_H_
#define _IDPF_VIRTCHNL_H_
-#define IDPF_VC_XN_MIN_TIMEOUT_MSEC 2000
+#include "virtchnl2.h"
+
#define IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC (60 * 1000)
#define IDPF_VC_XN_IDX_M GENMASK(7, 0)
#define IDPF_VC_XN_SALT_M GENMASK(15, 8)
@@ -115,6 +116,33 @@ int idpf_recv_mb_msg(struct idpf_adapter *adapter);
int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
u16 msg_size, u8 *msg, u16 cookie);
+struct idpf_queue_ptr {
+ enum virtchnl2_queue_type type;
+ union {
+ struct idpf_rx_queue *rxq;
+ struct idpf_tx_queue *txq;
+ struct idpf_buf_queue *bufq;
+ struct idpf_compl_queue *complq;
+ };
+};
+
+struct idpf_queue_set {
+ struct idpf_vport *vport;
+
+ u32 num;
+ struct idpf_queue_ptr qs[] __counted_by(num);
+};
+
+struct idpf_queue_set *idpf_alloc_queue_set(struct idpf_vport *vport, u32 num);
+
+int idpf_send_enable_queue_set_msg(const struct idpf_queue_set *qs);
+int idpf_send_disable_queue_set_msg(const struct idpf_queue_set *qs);
+int idpf_send_config_queue_set_msg(const struct idpf_queue_set *qs);
+
+int idpf_send_disable_queues_msg(struct idpf_vport *vport);
+int idpf_send_config_queues_msg(struct idpf_vport *vport);
+int idpf_send_enable_queues_msg(struct idpf_vport *vport);
+
void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q);
u32 idpf_get_vport_id(struct idpf_vport *vport);
int idpf_send_create_vport_msg(struct idpf_adapter *adapter,
@@ -131,9 +159,6 @@ void idpf_vport_dealloc_max_qs(struct idpf_adapter *adapter,
int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
u16 num_complq, u16 num_rx_q, u16 num_rx_bufq);
int idpf_send_delete_queues_msg(struct idpf_vport *vport);
-int idpf_send_enable_queues_msg(struct idpf_vport *vport);
-int idpf_send_disable_queues_msg(struct idpf_vport *vport);
-int idpf_send_config_queues_msg(struct idpf_vport *vport);
int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport);
int idpf_get_vec_ids(struct idpf_adapter *adapter,
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c
index 4f1fb0cefe51..8a2e0f8c5e36 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c
@@ -521,6 +521,10 @@ idpf_ptp_get_tstamp_value(struct idpf_vport *vport,
list_add(&ptp_tx_tstamp->list_member,
&tx_tstamp_caps->latches_free);
+ u64_stats_update_begin(&vport->tstamp_stats.stats_sync);
+ u64_stats_inc(&vport->tstamp_stats.packets);
+ u64_stats_update_end(&vport->tstamp_stats.stats_sync);
+
return 0;
}
diff --git a/drivers/net/ethernet/intel/idpf/xdp.c b/drivers/net/ethernet/intel/idpf/xdp.c
new file mode 100644
index 000000000000..21ce25b0567f
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/xdp.c
@@ -0,0 +1,486 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2025 Intel Corporation */
+
+#include "idpf.h"
+#include "idpf_virtchnl.h"
+#include "xdp.h"
+#include "xsk.h"
+
+static int idpf_rxq_for_each(const struct idpf_vport *vport,
+ int (*fn)(struct idpf_rx_queue *rxq, void *arg),
+ void *arg)
+{
+ bool splitq = idpf_is_queue_model_split(vport->rxq_model);
+
+ if (!vport->rxq_grps)
+ return -ENETDOWN;
+
+ for (u32 i = 0; i < vport->num_rxq_grp; i++) {
+ const struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ u32 num_rxq;
+
+ if (splitq)
+ num_rxq = rx_qgrp->splitq.num_rxq_sets;
+ else
+ num_rxq = rx_qgrp->singleq.num_rxq;
+
+ for (u32 j = 0; j < num_rxq; j++) {
+ struct idpf_rx_queue *q;
+ int err;
+
+ if (splitq)
+ q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
+ else
+ q = rx_qgrp->singleq.rxqs[j];
+
+ err = fn(q, arg);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int __idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq, void *arg)
+{
+ const struct idpf_vport *vport = rxq->q_vector->vport;
+ bool split = idpf_is_queue_model_split(vport->rxq_model);
+ int err;
+
+ err = __xdp_rxq_info_reg(&rxq->xdp_rxq, vport->netdev, rxq->idx,
+ rxq->q_vector->napi.napi_id,
+ rxq->rx_buf_size);
+ if (err)
+ return err;
+
+ if (idpf_queue_has(XSK, rxq)) {
+ err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq,
+ MEM_TYPE_XSK_BUFF_POOL,
+ rxq->pool);
+ if (err)
+ goto unreg;
+ } else {
+ const struct page_pool *pp;
+
+ pp = split ? rxq->bufq_sets[0].bufq.pp : rxq->pp;
+ xdp_rxq_info_attach_page_pool(&rxq->xdp_rxq, pp);
+ }
+
+ if (!split)
+ return 0;
+
+ rxq->xdpsqs = &vport->txqs[vport->xdp_txq_offset];
+ rxq->num_xdp_txq = vport->num_xdp_txq;
+
+ return 0;
+
+unreg:
+ xdp_rxq_info_unreg(&rxq->xdp_rxq);
+
+ return err;
+}
+
+int idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq)
+{
+ return __idpf_xdp_rxq_info_init(rxq, NULL);
+}
+
+int idpf_xdp_rxq_info_init_all(const struct idpf_vport *vport)
+{
+ return idpf_rxq_for_each(vport, __idpf_xdp_rxq_info_init, NULL);
+}
+
+static int __idpf_xdp_rxq_info_deinit(struct idpf_rx_queue *rxq, void *arg)
+{
+ if (idpf_is_queue_model_split((size_t)arg)) {
+ rxq->xdpsqs = NULL;
+ rxq->num_xdp_txq = 0;
+ }
+
+ if (!idpf_queue_has(XSK, rxq))
+ xdp_rxq_info_detach_mem_model(&rxq->xdp_rxq);
+
+ xdp_rxq_info_unreg(&rxq->xdp_rxq);
+
+ return 0;
+}
+
+void idpf_xdp_rxq_info_deinit(struct idpf_rx_queue *rxq, u32 model)
+{
+ __idpf_xdp_rxq_info_deinit(rxq, (void *)(size_t)model);
+}
+
+void idpf_xdp_rxq_info_deinit_all(const struct idpf_vport *vport)
+{
+ idpf_rxq_for_each(vport, __idpf_xdp_rxq_info_deinit,
+ (void *)(size_t)vport->rxq_model);
+}
+
+static int idpf_xdp_rxq_assign_prog(struct idpf_rx_queue *rxq, void *arg)
+{
+ struct bpf_prog *prog = arg;
+ struct bpf_prog *old;
+
+ if (prog)
+ bpf_prog_inc(prog);
+
+ old = rcu_replace_pointer(rxq->xdp_prog, prog, lockdep_rtnl_is_held());
+ if (old)
+ bpf_prog_put(old);
+
+ return 0;
+}
+
+void idpf_xdp_copy_prog_to_rqs(const struct idpf_vport *vport,
+ struct bpf_prog *xdp_prog)
+{
+ idpf_rxq_for_each(vport, idpf_xdp_rxq_assign_prog, xdp_prog);
+}
+
+static void idpf_xdp_tx_timer(struct work_struct *work);
+
+int idpf_xdpsqs_get(const struct idpf_vport *vport)
+{
+ struct libeth_xdpsq_timer **timers __free(kvfree) = NULL;
+ struct net_device *dev;
+ u32 sqs;
+
+ if (!idpf_xdp_enabled(vport))
+ return 0;
+
+ timers = kvcalloc(vport->num_xdp_txq, sizeof(*timers), GFP_KERNEL);
+ if (!timers)
+ return -ENOMEM;
+
+ for (u32 i = 0; i < vport->num_xdp_txq; i++) {
+ timers[i] = kzalloc_node(sizeof(*timers[i]), GFP_KERNEL,
+ cpu_to_mem(i));
+ if (!timers[i]) {
+ for (int j = i - 1; j >= 0; j--)
+ kfree(timers[j]);
+
+ return -ENOMEM;
+ }
+ }
+
+ dev = vport->netdev;
+ sqs = vport->xdp_txq_offset;
+
+ for (u32 i = sqs; i < vport->num_txq; i++) {
+ struct idpf_tx_queue *xdpsq = vport->txqs[i];
+
+ xdpsq->complq = xdpsq->txq_grp->complq;
+ kfree(xdpsq->refillq);
+ xdpsq->refillq = NULL;
+
+ idpf_queue_clear(FLOW_SCH_EN, xdpsq);
+ idpf_queue_clear(FLOW_SCH_EN, xdpsq->complq);
+ idpf_queue_set(NOIRQ, xdpsq);
+ idpf_queue_set(XDP, xdpsq);
+ idpf_queue_set(XDP, xdpsq->complq);
+
+ xdpsq->timer = timers[i - sqs];
+ libeth_xdpsq_get(&xdpsq->xdp_lock, dev, vport->xdpsq_share);
+ libeth_xdpsq_init_timer(xdpsq->timer, xdpsq, &xdpsq->xdp_lock,
+ idpf_xdp_tx_timer);
+
+ xdpsq->pending = 0;
+ xdpsq->xdp_tx = 0;
+ xdpsq->thresh = libeth_xdp_queue_threshold(xdpsq->desc_count);
+ }
+
+ return 0;
+}
+
+void idpf_xdpsqs_put(const struct idpf_vport *vport)
+{
+ struct net_device *dev;
+ u32 sqs;
+
+ if (!idpf_xdp_enabled(vport))
+ return;
+
+ dev = vport->netdev;
+ sqs = vport->xdp_txq_offset;
+
+ for (u32 i = sqs; i < vport->num_txq; i++) {
+ struct idpf_tx_queue *xdpsq = vport->txqs[i];
+
+ if (!idpf_queue_has_clear(XDP, xdpsq))
+ continue;
+
+ libeth_xdpsq_deinit_timer(xdpsq->timer);
+ libeth_xdpsq_put(&xdpsq->xdp_lock, dev);
+
+ kfree(xdpsq->timer);
+ xdpsq->refillq = NULL;
+ idpf_queue_clear(NOIRQ, xdpsq);
+ }
+}
+
+static int idpf_xdp_parse_cqe(const struct idpf_splitq_4b_tx_compl_desc *desc,
+ bool gen)
+{
+ u32 val;
+
+#ifdef __LIBETH_WORD_ACCESS
+ val = *(const u32 *)desc;
+#else
+ val = ((u32)le16_to_cpu(desc->q_head_compl_tag.q_head) << 16) |
+ le16_to_cpu(desc->qid_comptype_gen);
+#endif
+ if (!!(val & IDPF_TXD_COMPLQ_GEN_M) != gen)
+ return -ENODATA;
+
+ if (unlikely((val & GENMASK(IDPF_TXD_COMPLQ_GEN_S - 1, 0)) !=
+ FIELD_PREP(IDPF_TXD_COMPLQ_COMPL_TYPE_M,
+ IDPF_TXD_COMPLT_RS)))
+ return -EINVAL;
+
+ return upper_16_bits(val);
+}
+
+u32 idpf_xdpsq_poll(struct idpf_tx_queue *xdpsq, u32 budget)
+{
+ struct idpf_compl_queue *cq = xdpsq->complq;
+ u32 tx_ntc = xdpsq->next_to_clean;
+ u32 tx_cnt = xdpsq->desc_count;
+ u32 ntc = cq->next_to_clean;
+ u32 cnt = cq->desc_count;
+ u32 done_frames;
+ bool gen;
+
+ gen = idpf_queue_has(GEN_CHK, cq);
+
+ for (done_frames = 0; done_frames < budget; ) {
+ int ret;
+
+ ret = idpf_xdp_parse_cqe(&cq->comp_4b[ntc], gen);
+ if (ret >= 0) {
+ done_frames = ret > tx_ntc ? ret - tx_ntc :
+ ret + tx_cnt - tx_ntc;
+ goto next;
+ }
+
+ switch (ret) {
+ case -ENODATA:
+ goto out;
+ case -EINVAL:
+ break;
+ }
+
+next:
+ if (unlikely(++ntc == cnt)) {
+ ntc = 0;
+ gen = !gen;
+ idpf_queue_change(GEN_CHK, cq);
+ }
+ }
+
+out:
+ cq->next_to_clean = ntc;
+
+ return done_frames;
+}
+
+static u32 idpf_xdpsq_complete(void *_xdpsq, u32 budget)
+{
+ struct libeth_xdpsq_napi_stats ss = { };
+ struct idpf_tx_queue *xdpsq = _xdpsq;
+ u32 tx_ntc = xdpsq->next_to_clean;
+ u32 tx_cnt = xdpsq->desc_count;
+ struct xdp_frame_bulk bq;
+ struct libeth_cq_pp cp = {
+ .dev = xdpsq->dev,
+ .bq = &bq,
+ .xss = &ss,
+ .napi = true,
+ };
+ u32 done_frames;
+
+ done_frames = idpf_xdpsq_poll(xdpsq, budget);
+ if (unlikely(!done_frames))
+ return 0;
+
+ xdp_frame_bulk_init(&bq);
+
+ for (u32 i = 0; likely(i < done_frames); i++) {
+ libeth_xdp_complete_tx(&xdpsq->tx_buf[tx_ntc], &cp);
+
+ if (unlikely(++tx_ntc == tx_cnt))
+ tx_ntc = 0;
+ }
+
+ xdp_flush_frame_bulk(&bq);
+
+ xdpsq->next_to_clean = tx_ntc;
+ xdpsq->pending -= done_frames;
+ xdpsq->xdp_tx -= cp.xdp_tx;
+
+ return done_frames;
+}
+
+static u32 idpf_xdp_tx_prep(void *_xdpsq, struct libeth_xdpsq *sq)
+{
+ struct idpf_tx_queue *xdpsq = _xdpsq;
+ u32 free;
+
+ libeth_xdpsq_lock(&xdpsq->xdp_lock);
+
+ free = xdpsq->desc_count - xdpsq->pending;
+ if (free < xdpsq->thresh)
+ free += idpf_xdpsq_complete(xdpsq, xdpsq->thresh);
+
+ *sq = (struct libeth_xdpsq){
+ .sqes = xdpsq->tx_buf,
+ .descs = xdpsq->desc_ring,
+ .count = xdpsq->desc_count,
+ .lock = &xdpsq->xdp_lock,
+ .ntu = &xdpsq->next_to_use,
+ .pending = &xdpsq->pending,
+ .xdp_tx = &xdpsq->xdp_tx,
+ };
+
+ return free;
+}
+
+LIBETH_XDP_DEFINE_START();
+LIBETH_XDP_DEFINE_TIMER(static idpf_xdp_tx_timer, idpf_xdpsq_complete);
+LIBETH_XDP_DEFINE_FLUSH_TX(idpf_xdp_tx_flush_bulk, idpf_xdp_tx_prep,
+ idpf_xdp_tx_xmit);
+LIBETH_XDP_DEFINE_FLUSH_XMIT(static idpf_xdp_xmit_flush_bulk, idpf_xdp_tx_prep,
+ idpf_xdp_tx_xmit);
+LIBETH_XDP_DEFINE_END();
+
+int idpf_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags)
+{
+ const struct idpf_netdev_priv *np = netdev_priv(dev);
+ const struct idpf_vport *vport = np->vport;
+
+ if (unlikely(!netif_carrier_ok(dev) || !vport->link_up))
+ return -ENETDOWN;
+
+ return libeth_xdp_xmit_do_bulk(dev, n, frames, flags,
+ &vport->txqs[vport->xdp_txq_offset],
+ vport->num_xdp_txq,
+ idpf_xdp_xmit_flush_bulk,
+ idpf_xdp_tx_finalize);
+}
+
+static int idpf_xdpmo_rx_hash(const struct xdp_md *ctx, u32 *hash,
+ enum xdp_rss_hash_type *rss_type)
+{
+ const struct libeth_xdp_buff *xdp = (typeof(xdp))ctx;
+ struct idpf_xdp_rx_desc desc __uninitialized;
+ const struct idpf_rx_queue *rxq;
+ struct libeth_rx_pt pt;
+
+ rxq = libeth_xdp_buff_to_rq(xdp, typeof(*rxq), xdp_rxq);
+
+ idpf_xdp_get_qw0(&desc, xdp->desc);
+
+ pt = rxq->rx_ptype_lkup[idpf_xdp_rx_pt(&desc)];
+ if (!libeth_rx_pt_has_hash(rxq->xdp_rxq.dev, pt))
+ return -ENODATA;
+
+ idpf_xdp_get_qw2(&desc, xdp->desc);
+
+ return libeth_xdpmo_rx_hash(hash, rss_type, idpf_xdp_rx_hash(&desc),
+ pt);
+}
+
+static const struct xdp_metadata_ops idpf_xdpmo = {
+ .xmo_rx_hash = idpf_xdpmo_rx_hash,
+};
+
+void idpf_xdp_set_features(const struct idpf_vport *vport)
+{
+ if (!idpf_is_queue_model_split(vport->rxq_model))
+ return;
+
+ libeth_xdp_set_features_noredir(vport->netdev, &idpf_xdpmo,
+ idpf_get_max_tx_bufs(vport->adapter),
+ libeth_xsktmo);
+}
+
+static int idpf_xdp_setup_prog(struct idpf_vport *vport,
+ const struct netdev_bpf *xdp)
+{
+ const struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
+ struct bpf_prog *old, *prog = xdp->prog;
+ struct idpf_vport_config *cfg;
+ int ret;
+
+ cfg = vport->adapter->vport_config[vport->idx];
+
+ if (test_bit(IDPF_REMOVE_IN_PROG, vport->adapter->flags) ||
+ !test_bit(IDPF_VPORT_REG_NETDEV, cfg->flags) ||
+ !!vport->xdp_prog == !!prog) {
+ if (np->state == __IDPF_VPORT_UP)
+ idpf_xdp_copy_prog_to_rqs(vport, prog);
+
+ old = xchg(&vport->xdp_prog, prog);
+ if (old)
+ bpf_prog_put(old);
+
+ cfg->user_config.xdp_prog = prog;
+
+ return 0;
+ }
+
+ if (!vport->num_xdp_txq && vport->num_txq == cfg->max_q.max_txq) {
+ NL_SET_ERR_MSG_MOD(xdp->extack,
+ "No Tx queues available for XDP, please decrease the number of regular SQs");
+ return -ENOSPC;
+ }
+
+ old = cfg->user_config.xdp_prog;
+ cfg->user_config.xdp_prog = prog;
+
+ ret = idpf_initiate_soft_reset(vport, IDPF_SR_Q_CHANGE);
+ if (ret) {
+ NL_SET_ERR_MSG_MOD(xdp->extack,
+ "Could not reopen the vport after XDP setup");
+
+ cfg->user_config.xdp_prog = old;
+ old = prog;
+ }
+
+ if (old)
+ bpf_prog_put(old);
+
+ libeth_xdp_set_redirect(vport->netdev, vport->xdp_prog);
+
+ return ret;
+}
+
+int idpf_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ struct idpf_vport *vport;
+ int ret;
+
+ idpf_vport_ctrl_lock(dev);
+ vport = idpf_netdev_to_vport(dev);
+
+ if (!idpf_is_queue_model_split(vport->txq_model))
+ goto notsupp;
+
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ ret = idpf_xdp_setup_prog(vport, xdp);
+ break;
+ case XDP_SETUP_XSK_POOL:
+ ret = idpf_xsk_pool_setup(vport, xdp);
+ break;
+ default:
+notsupp:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ idpf_vport_ctrl_unlock(dev);
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/intel/idpf/xdp.h b/drivers/net/ethernet/intel/idpf/xdp.h
new file mode 100644
index 000000000000..479f5ef3c604
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/xdp.h
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2025 Intel Corporation */
+
+#ifndef _IDPF_XDP_H_
+#define _IDPF_XDP_H_
+
+#include <net/libeth/xdp.h>
+
+#include "idpf_txrx.h"
+
+int idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq);
+int idpf_xdp_rxq_info_init_all(const struct idpf_vport *vport);
+void idpf_xdp_rxq_info_deinit(struct idpf_rx_queue *rxq, u32 model);
+void idpf_xdp_rxq_info_deinit_all(const struct idpf_vport *vport);
+void idpf_xdp_copy_prog_to_rqs(const struct idpf_vport *vport,
+ struct bpf_prog *xdp_prog);
+
+int idpf_xdpsqs_get(const struct idpf_vport *vport);
+void idpf_xdpsqs_put(const struct idpf_vport *vport);
+
+u32 idpf_xdpsq_poll(struct idpf_tx_queue *xdpsq, u32 budget);
+bool idpf_xdp_tx_flush_bulk(struct libeth_xdp_tx_bulk *bq, u32 flags);
+
+/**
+ * idpf_xdp_tx_xmit - produce a single HW Tx descriptor out of XDP desc
+ * @desc: XDP descriptor to pull the DMA address and length from
+ * @i: descriptor index on the queue to fill
+ * @sq: XDP queue to produce the HW Tx descriptor on
+ * @priv: &xsk_tx_metadata_ops on XSk xmit or %NULL
+ */
+static inline void idpf_xdp_tx_xmit(struct libeth_xdp_tx_desc desc, u32 i,
+ const struct libeth_xdpsq *sq, u64 priv)
+{
+ struct idpf_flex_tx_desc *tx_desc = sq->descs;
+ u32 cmd;
+
+ cmd = FIELD_PREP(IDPF_FLEX_TXD_QW1_DTYPE_M,
+ IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2);
+ if (desc.flags & LIBETH_XDP_TX_LAST)
+ cmd |= FIELD_PREP(IDPF_FLEX_TXD_QW1_CMD_M,
+ IDPF_TX_DESC_CMD_EOP);
+ if (priv && (desc.flags & LIBETH_XDP_TX_CSUM))
+ cmd |= FIELD_PREP(IDPF_FLEX_TXD_QW1_CMD_M,
+ IDPF_TX_FLEX_DESC_CMD_CS_EN);
+
+ tx_desc = &tx_desc[i];
+ tx_desc->buf_addr = cpu_to_le64(desc.addr);
+#ifdef __LIBETH_WORD_ACCESS
+ *(u64 *)&tx_desc->qw1 = ((u64)desc.len << 48) | cmd;
+#else
+ tx_desc->qw1.buf_size = cpu_to_le16(desc.len);
+ tx_desc->qw1.cmd_dtype = cpu_to_le16(cmd);
+#endif
+}
+
+static inline void idpf_xdpsq_set_rs(const struct idpf_tx_queue *xdpsq)
+{
+ u32 ntu, cmd;
+
+ ntu = xdpsq->next_to_use;
+ if (unlikely(!ntu))
+ ntu = xdpsq->desc_count;
+
+ cmd = FIELD_PREP(IDPF_FLEX_TXD_QW1_CMD_M, IDPF_TX_DESC_CMD_RS);
+#ifdef __LIBETH_WORD_ACCESS
+ *(u64 *)&xdpsq->flex_tx[ntu - 1].q.qw1 |= cmd;
+#else
+ xdpsq->flex_tx[ntu - 1].q.qw1.cmd_dtype |= cpu_to_le16(cmd);
+#endif
+}
+
+static inline void idpf_xdpsq_update_tail(const struct idpf_tx_queue *xdpsq)
+{
+ dma_wmb();
+ writel_relaxed(xdpsq->next_to_use, xdpsq->tail);
+}
+
+/**
+ * idpf_xdp_tx_finalize - finalize sending over XDPSQ
+ * @_xdpsq: XDP Tx queue
+ * @sent: whether any frames were sent
+ * @flush: whether to update RS bit and the tail register
+ *
+ * Set the RS bit ("end of batch"), bump the tail, and queue the cleanup timer.
+ * To be called after a NAPI polling loop, at the end of .ndo_xdp_xmit() etc.
+ */
+static inline void idpf_xdp_tx_finalize(void *_xdpsq, bool sent, bool flush)
+{
+ struct idpf_tx_queue *xdpsq = _xdpsq;
+
+ if ((!flush || unlikely(!sent)) &&
+ likely(xdpsq->desc_count - 1 != xdpsq->pending))
+ return;
+
+ libeth_xdpsq_lock(&xdpsq->xdp_lock);
+
+ idpf_xdpsq_set_rs(xdpsq);
+ idpf_xdpsq_update_tail(xdpsq);
+
+ libeth_xdpsq_queue_timer(xdpsq->timer);
+
+ libeth_xdpsq_unlock(&xdpsq->xdp_lock);
+}
+
+struct idpf_xdp_rx_desc {
+ aligned_u64 qw0;
+#define IDPF_XDP_RX_BUFQ BIT_ULL(47)
+#define IDPF_XDP_RX_GEN BIT_ULL(46)
+#define IDPF_XDP_RX_LEN GENMASK_ULL(45, 32)
+#define IDPF_XDP_RX_PT GENMASK_ULL(25, 16)
+
+ aligned_u64 qw1;
+#define IDPF_XDP_RX_BUF GENMASK_ULL(47, 32)
+#define IDPF_XDP_RX_EOP BIT_ULL(1)
+
+ aligned_u64 qw2;
+#define IDPF_XDP_RX_HASH GENMASK_ULL(31, 0)
+
+ aligned_u64 qw3;
+} __aligned(4 * sizeof(u64));
+static_assert(sizeof(struct idpf_xdp_rx_desc) ==
+ sizeof(struct virtchnl2_rx_flex_desc_adv_nic_3));
+
+#define idpf_xdp_rx_bufq(desc) !!((desc)->qw0 & IDPF_XDP_RX_BUFQ)
+#define idpf_xdp_rx_gen(desc) !!((desc)->qw0 & IDPF_XDP_RX_GEN)
+#define idpf_xdp_rx_len(desc) FIELD_GET(IDPF_XDP_RX_LEN, (desc)->qw0)
+#define idpf_xdp_rx_pt(desc) FIELD_GET(IDPF_XDP_RX_PT, (desc)->qw0)
+#define idpf_xdp_rx_buf(desc) FIELD_GET(IDPF_XDP_RX_BUF, (desc)->qw1)
+#define idpf_xdp_rx_eop(desc) !!((desc)->qw1 & IDPF_XDP_RX_EOP)
+#define idpf_xdp_rx_hash(desc) FIELD_GET(IDPF_XDP_RX_HASH, (desc)->qw2)
+
+static inline void
+idpf_xdp_get_qw0(struct idpf_xdp_rx_desc *desc,
+ const struct virtchnl2_rx_flex_desc_adv_nic_3 *rxd)
+{
+#ifdef __LIBETH_WORD_ACCESS
+ desc->qw0 = ((const typeof(desc))rxd)->qw0;
+#else
+ desc->qw0 = ((u64)le16_to_cpu(rxd->pktlen_gen_bufq_id) << 32) |
+ ((u64)le16_to_cpu(rxd->ptype_err_fflags0) << 16);
+#endif
+}
+
+static inline void
+idpf_xdp_get_qw1(struct idpf_xdp_rx_desc *desc,
+ const struct virtchnl2_rx_flex_desc_adv_nic_3 *rxd)
+{
+#ifdef __LIBETH_WORD_ACCESS
+ desc->qw1 = ((const typeof(desc))rxd)->qw1;
+#else
+ desc->qw1 = ((u64)le16_to_cpu(rxd->buf_id) << 32) |
+ rxd->status_err0_qw1;
+#endif
+}
+
+static inline void
+idpf_xdp_get_qw2(struct idpf_xdp_rx_desc *desc,
+ const struct virtchnl2_rx_flex_desc_adv_nic_3 *rxd)
+{
+#ifdef __LIBETH_WORD_ACCESS
+ desc->qw2 = ((const typeof(desc))rxd)->qw2;
+#else
+ desc->qw2 = ((u64)rxd->hash3 << 24) |
+ ((u64)rxd->ff2_mirrid_hash2.hash2 << 16) |
+ le16_to_cpu(rxd->hash1);
+#endif
+}
+
+void idpf_xdp_set_features(const struct idpf_vport *vport);
+
+int idpf_xdp(struct net_device *dev, struct netdev_bpf *xdp);
+int idpf_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags);
+
+#endif /* _IDPF_XDP_H_ */
diff --git a/drivers/net/ethernet/intel/idpf/xsk.c b/drivers/net/ethernet/intel/idpf/xsk.c
new file mode 100644
index 000000000000..fd2cc43ab43c
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/xsk.c
@@ -0,0 +1,633 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2025 Intel Corporation */
+
+#include <net/libeth/xsk.h>
+
+#include "idpf.h"
+#include "xdp.h"
+#include "xsk.h"
+
+static void idpf_xsk_tx_timer(struct work_struct *work);
+
+static void idpf_xsk_setup_rxq(const struct idpf_vport *vport,
+ struct idpf_rx_queue *rxq)
+{
+ struct xsk_buff_pool *pool;
+
+ pool = xsk_get_pool_from_qid(vport->netdev, rxq->idx);
+ if (!pool || !pool->dev || !xsk_buff_can_alloc(pool, 1))
+ return;
+
+ rxq->pool = pool;
+
+ idpf_queue_set(XSK, rxq);
+}
+
+static void idpf_xsk_setup_bufq(const struct idpf_vport *vport,
+ struct idpf_buf_queue *bufq)
+{
+ struct xsk_buff_pool *pool;
+ u32 qid = U32_MAX;
+
+ for (u32 i = 0; i < vport->num_rxq_grp; i++) {
+ const struct idpf_rxq_group *grp = &vport->rxq_grps[i];
+
+ for (u32 j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ if (&grp->splitq.bufq_sets[j].bufq == bufq) {
+ qid = grp->splitq.rxq_sets[0]->rxq.idx;
+ goto setup;
+ }
+ }
+ }
+
+setup:
+ pool = xsk_get_pool_from_qid(vport->netdev, qid);
+ if (!pool || !pool->dev || !xsk_buff_can_alloc(pool, 1))
+ return;
+
+ bufq->pool = pool;
+
+ idpf_queue_set(XSK, bufq);
+}
+
+static void idpf_xsk_setup_txq(const struct idpf_vport *vport,
+ struct idpf_tx_queue *txq)
+{
+ struct xsk_buff_pool *pool;
+ u32 qid;
+
+ idpf_queue_clear(XSK, txq);
+
+ if (!idpf_queue_has(XDP, txq))
+ return;
+
+ qid = txq->idx - vport->xdp_txq_offset;
+
+ pool = xsk_get_pool_from_qid(vport->netdev, qid);
+ if (!pool || !pool->dev)
+ return;
+
+ txq->pool = pool;
+ libeth_xdpsq_init_timer(txq->timer, txq, &txq->xdp_lock,
+ idpf_xsk_tx_timer);
+
+ idpf_queue_assign(NOIRQ, txq, xsk_uses_need_wakeup(pool));
+ idpf_queue_set(XSK, txq);
+}
+
+static void idpf_xsk_setup_complq(const struct idpf_vport *vport,
+ struct idpf_compl_queue *complq)
+{
+ const struct xsk_buff_pool *pool;
+ u32 qid;
+
+ idpf_queue_clear(XSK, complq);
+
+ if (!idpf_queue_has(XDP, complq))
+ return;
+
+ qid = complq->txq_grp->txqs[0]->idx - vport->xdp_txq_offset;
+
+ pool = xsk_get_pool_from_qid(vport->netdev, qid);
+ if (!pool || !pool->dev)
+ return;
+
+ idpf_queue_set(XSK, complq);
+}
+
+void idpf_xsk_setup_queue(const struct idpf_vport *vport, void *q,
+ enum virtchnl2_queue_type type)
+{
+ if (!idpf_xdp_enabled(vport))
+ return;
+
+ switch (type) {
+ case VIRTCHNL2_QUEUE_TYPE_RX:
+ idpf_xsk_setup_rxq(vport, q);
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
+ idpf_xsk_setup_bufq(vport, q);
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_TX:
+ idpf_xsk_setup_txq(vport, q);
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
+ idpf_xsk_setup_complq(vport, q);
+ break;
+ default:
+ break;
+ }
+}
+
+void idpf_xsk_clear_queue(void *q, enum virtchnl2_queue_type type)
+{
+ struct idpf_compl_queue *complq;
+ struct idpf_buf_queue *bufq;
+ struct idpf_rx_queue *rxq;
+ struct idpf_tx_queue *txq;
+
+ switch (type) {
+ case VIRTCHNL2_QUEUE_TYPE_RX:
+ rxq = q;
+ if (!idpf_queue_has_clear(XSK, rxq))
+ return;
+
+ rxq->pool = NULL;
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
+ bufq = q;
+ if (!idpf_queue_has_clear(XSK, bufq))
+ return;
+
+ bufq->pool = NULL;
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_TX:
+ txq = q;
+ if (!idpf_queue_has_clear(XSK, txq))
+ return;
+
+ idpf_queue_set(NOIRQ, txq);
+ txq->dev = txq->netdev->dev.parent;
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
+ complq = q;
+ idpf_queue_clear(XSK, complq);
+ break;
+ default:
+ break;
+ }
+}
+
+void idpf_xsk_init_wakeup(struct idpf_q_vector *qv)
+{
+ libeth_xsk_init_wakeup(&qv->csd, &qv->napi);
+}
+
+void idpf_xsksq_clean(struct idpf_tx_queue *xdpsq)
+{
+ struct libeth_xdpsq_napi_stats ss = { };
+ u32 ntc = xdpsq->next_to_clean;
+ struct xdp_frame_bulk bq;
+ struct libeth_cq_pp cp = {
+ .dev = xdpsq->pool->dev,
+ .bq = &bq,
+ .xss = &ss,
+ };
+ u32 xsk_frames = 0;
+
+ xdp_frame_bulk_init(&bq);
+
+ while (ntc != xdpsq->next_to_use) {
+ struct libeth_sqe *sqe = &xdpsq->tx_buf[ntc];
+
+ if (sqe->type)
+ libeth_xdp_complete_tx(sqe, &cp);
+ else
+ xsk_frames++;
+
+ if (unlikely(++ntc == xdpsq->desc_count))
+ ntc = 0;
+ }
+
+ xdp_flush_frame_bulk(&bq);
+
+ if (xsk_frames)
+ xsk_tx_completed(xdpsq->pool, xsk_frames);
+}
+
+static noinline u32 idpf_xsksq_complete_slow(struct idpf_tx_queue *xdpsq,
+ u32 done)
+{
+ struct libeth_xdpsq_napi_stats ss = { };
+ u32 ntc = xdpsq->next_to_clean;
+ u32 cnt = xdpsq->desc_count;
+ struct xdp_frame_bulk bq;
+ struct libeth_cq_pp cp = {
+ .dev = xdpsq->pool->dev,
+ .bq = &bq,
+ .xss = &ss,
+ .napi = true,
+ };
+ u32 xsk_frames = 0;
+
+ xdp_frame_bulk_init(&bq);
+
+ for (u32 i = 0; likely(i < done); i++) {
+ struct libeth_sqe *sqe = &xdpsq->tx_buf[ntc];
+
+ if (sqe->type)
+ libeth_xdp_complete_tx(sqe, &cp);
+ else
+ xsk_frames++;
+
+ if (unlikely(++ntc == cnt))
+ ntc = 0;
+ }
+
+ xdp_flush_frame_bulk(&bq);
+
+ xdpsq->next_to_clean = ntc;
+ xdpsq->xdp_tx -= cp.xdp_tx;
+
+ return xsk_frames;
+}
+
+static __always_inline u32 idpf_xsksq_complete(void *_xdpsq, u32 budget)
+{
+ struct idpf_tx_queue *xdpsq = _xdpsq;
+ u32 tx_ntc = xdpsq->next_to_clean;
+ u32 tx_cnt = xdpsq->desc_count;
+ u32 done_frames;
+ u32 xsk_frames;
+
+ done_frames = idpf_xdpsq_poll(xdpsq, budget);
+ if (unlikely(!done_frames))
+ return 0;
+
+ if (likely(!xdpsq->xdp_tx)) {
+ tx_ntc += done_frames;
+ if (tx_ntc >= tx_cnt)
+ tx_ntc -= tx_cnt;
+
+ xdpsq->next_to_clean = tx_ntc;
+ xsk_frames = done_frames;
+
+ goto finalize;
+ }
+
+ xsk_frames = idpf_xsksq_complete_slow(xdpsq, done_frames);
+ if (xsk_frames)
+finalize:
+ xsk_tx_completed(xdpsq->pool, xsk_frames);
+
+ xdpsq->pending -= done_frames;
+
+ return done_frames;
+}
+
+static u32 idpf_xsk_tx_prep(void *_xdpsq, struct libeth_xdpsq *sq)
+{
+ struct idpf_tx_queue *xdpsq = _xdpsq;
+ u32 free;
+
+ libeth_xdpsq_lock(&xdpsq->xdp_lock);
+
+ free = xdpsq->desc_count - xdpsq->pending;
+ if (free < xdpsq->thresh)
+ free += idpf_xsksq_complete(xdpsq, xdpsq->thresh);
+
+ *sq = (struct libeth_xdpsq){
+ .pool = xdpsq->pool,
+ .sqes = xdpsq->tx_buf,
+ .descs = xdpsq->desc_ring,
+ .count = xdpsq->desc_count,
+ .lock = &xdpsq->xdp_lock,
+ .ntu = &xdpsq->next_to_use,
+ .pending = &xdpsq->pending,
+ .xdp_tx = &xdpsq->xdp_tx,
+ };
+
+ return free;
+}
+
+static u32 idpf_xsk_xmit_prep(void *_xdpsq, struct libeth_xdpsq *sq)
+{
+ struct idpf_tx_queue *xdpsq = _xdpsq;
+
+ *sq = (struct libeth_xdpsq){
+ .pool = xdpsq->pool,
+ .sqes = xdpsq->tx_buf,
+ .descs = xdpsq->desc_ring,
+ .count = xdpsq->desc_count,
+ .lock = &xdpsq->xdp_lock,
+ .ntu = &xdpsq->next_to_use,
+ .pending = &xdpsq->pending,
+ };
+
+ /*
+ * The queue is cleaned, the budget is already known, optimize out
+ * the second min() by passing the type limit.
+ */
+ return U32_MAX;
+}
+
+bool idpf_xsk_xmit(struct idpf_tx_queue *xsksq)
+{
+ u32 free;
+
+ libeth_xdpsq_lock(&xsksq->xdp_lock);
+
+ free = xsksq->desc_count - xsksq->pending;
+ if (free < xsksq->thresh)
+ free += idpf_xsksq_complete(xsksq, xsksq->thresh);
+
+ return libeth_xsk_xmit_do_bulk(xsksq->pool, xsksq,
+ min(free - 1, xsksq->thresh),
+ libeth_xsktmo, idpf_xsk_xmit_prep,
+ idpf_xdp_tx_xmit, idpf_xdp_tx_finalize);
+}
+
+LIBETH_XDP_DEFINE_START();
+LIBETH_XDP_DEFINE_TIMER(static idpf_xsk_tx_timer, idpf_xsksq_complete);
+LIBETH_XSK_DEFINE_FLUSH_TX(static idpf_xsk_tx_flush_bulk, idpf_xsk_tx_prep,
+ idpf_xdp_tx_xmit);
+LIBETH_XSK_DEFINE_RUN(static idpf_xsk_run_pass, idpf_xsk_run_prog,
+ idpf_xsk_tx_flush_bulk, idpf_rx_process_skb_fields);
+LIBETH_XSK_DEFINE_FINALIZE(static idpf_xsk_finalize_rx, idpf_xsk_tx_flush_bulk,
+ idpf_xdp_tx_finalize);
+LIBETH_XDP_DEFINE_END();
+
+static void idpf_xskfqe_init(const struct libeth_xskfq_fp *fq, u32 i)
+{
+ struct virtchnl2_splitq_rx_buf_desc *desc = fq->descs;
+
+ desc = &desc[i];
+#ifdef __LIBETH_WORD_ACCESS
+ *(u64 *)&desc->qword0 = i;
+#else
+ desc->qword0.buf_id = cpu_to_le16(i);
+#endif
+ desc->pkt_addr = cpu_to_le64(libeth_xsk_buff_xdp_get_dma(fq->fqes[i]));
+}
+
+static bool idpf_xskfq_refill_thresh(struct idpf_buf_queue *bufq, u32 count)
+{
+ struct libeth_xskfq_fp fq = {
+ .pool = bufq->pool,
+ .fqes = bufq->xsk_buf,
+ .descs = bufq->split_buf,
+ .ntu = bufq->next_to_use,
+ .count = bufq->desc_count,
+ };
+ u32 done;
+
+ done = libeth_xskfqe_alloc(&fq, count, idpf_xskfqe_init);
+ writel(fq.ntu, bufq->tail);
+
+ bufq->next_to_use = fq.ntu;
+ bufq->pending -= done;
+
+ return done == count;
+}
+
+static bool idpf_xskfq_refill(struct idpf_buf_queue *bufq)
+{
+ u32 count, rx_thresh = bufq->thresh;
+
+ count = ALIGN_DOWN(bufq->pending - 1, rx_thresh);
+
+ for (u32 i = 0; i < count; i += rx_thresh) {
+ if (unlikely(!idpf_xskfq_refill_thresh(bufq, rx_thresh)))
+ return false;
+ }
+
+ return true;
+}
+
+int idpf_xskfq_init(struct idpf_buf_queue *bufq)
+{
+ struct libeth_xskfq fq = {
+ .pool = bufq->pool,
+ .count = bufq->desc_count,
+ .nid = idpf_q_vector_to_mem(bufq->q_vector),
+ };
+ int ret;
+
+ ret = libeth_xskfq_create(&fq);
+ if (ret)
+ return ret;
+
+ bufq->xsk_buf = fq.fqes;
+ bufq->pending = fq.pending;
+ bufq->thresh = fq.thresh;
+ bufq->rx_buf_size = fq.buf_len;
+
+ if (!idpf_xskfq_refill(bufq))
+ netdev_err(bufq->pool->netdev,
+ "failed to allocate XSk buffers for qid %d\n",
+ bufq->pool->queue_id);
+
+ bufq->next_to_alloc = bufq->next_to_use;
+
+ idpf_queue_clear(HSPLIT_EN, bufq);
+ bufq->rx_hbuf_size = 0;
+
+ return 0;
+}
+
+void idpf_xskfq_rel(struct idpf_buf_queue *bufq)
+{
+ struct libeth_xskfq fq = {
+ .fqes = bufq->xsk_buf,
+ };
+
+ libeth_xskfq_destroy(&fq);
+
+ bufq->rx_buf_size = fq.buf_len;
+ bufq->thresh = fq.thresh;
+ bufq->pending = fq.pending;
+}
+
+struct idpf_xskfq_refill_set {
+ struct {
+ struct idpf_buf_queue *q;
+ u32 buf_id;
+ u32 pending;
+ } bufqs[IDPF_MAX_BUFQS_PER_RXQ_GRP];
+};
+
+static bool idpf_xskfq_refill_set(const struct idpf_xskfq_refill_set *set)
+{
+ bool ret = true;
+
+ for (u32 i = 0; i < ARRAY_SIZE(set->bufqs); i++) {
+ struct idpf_buf_queue *bufq = set->bufqs[i].q;
+ u32 ntc;
+
+ if (!bufq)
+ continue;
+
+ ntc = set->bufqs[i].buf_id;
+ if (unlikely(++ntc == bufq->desc_count))
+ ntc = 0;
+
+ bufq->next_to_clean = ntc;
+ bufq->pending += set->bufqs[i].pending;
+
+ if (bufq->pending > bufq->thresh)
+ ret &= idpf_xskfq_refill(bufq);
+ }
+
+ return ret;
+}
+
+int idpf_xskrq_poll(struct idpf_rx_queue *rxq, u32 budget)
+{
+ struct idpf_xskfq_refill_set set = { };
+ struct libeth_rq_napi_stats rs = { };
+ bool wake, gen, fail = false;
+ u32 ntc = rxq->next_to_clean;
+ struct libeth_xdp_buff *xdp;
+ LIBETH_XDP_ONSTACK_BULK(bq);
+ u32 cnt = rxq->desc_count;
+
+ wake = xsk_uses_need_wakeup(rxq->pool);
+ if (wake)
+ xsk_clear_rx_need_wakeup(rxq->pool);
+
+ gen = idpf_queue_has(GEN_CHK, rxq);
+
+ libeth_xsk_tx_init_bulk(&bq, rxq->xdp_prog, rxq->xdp_rxq.dev,
+ rxq->xdpsqs, rxq->num_xdp_txq);
+ xdp = rxq->xsk;
+
+ while (likely(rs.packets < budget)) {
+ const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc;
+ struct idpf_xdp_rx_desc desc __uninitialized;
+ struct idpf_buf_queue *bufq;
+ u32 bufq_id, buf_id;
+
+ rx_desc = &rxq->rx[ntc].flex_adv_nic_3_wb;
+
+ idpf_xdp_get_qw0(&desc, rx_desc);
+ if (idpf_xdp_rx_gen(&desc) != gen)
+ break;
+
+ dma_rmb();
+
+ bufq_id = idpf_xdp_rx_bufq(&desc);
+ bufq = set.bufqs[bufq_id].q;
+ if (!bufq) {
+ bufq = &rxq->bufq_sets[bufq_id].bufq;
+ set.bufqs[bufq_id].q = bufq;
+ }
+
+ idpf_xdp_get_qw1(&desc, rx_desc);
+ buf_id = idpf_xdp_rx_buf(&desc);
+
+ set.bufqs[bufq_id].buf_id = buf_id;
+ set.bufqs[bufq_id].pending++;
+
+ xdp = libeth_xsk_process_buff(xdp, bufq->xsk_buf[buf_id],
+ idpf_xdp_rx_len(&desc));
+
+ if (unlikely(++ntc == cnt)) {
+ ntc = 0;
+ gen = !gen;
+ idpf_queue_change(GEN_CHK, rxq);
+ }
+
+ if (!idpf_xdp_rx_eop(&desc) || unlikely(!xdp))
+ continue;
+
+ fail = !idpf_xsk_run_pass(xdp, &bq, rxq->napi, &rs, rx_desc);
+ xdp = NULL;
+
+ if (fail)
+ break;
+ }
+
+ idpf_xsk_finalize_rx(&bq);
+
+ rxq->next_to_clean = ntc;
+ rxq->xsk = xdp;
+
+ fail |= !idpf_xskfq_refill_set(&set);
+
+ u64_stats_update_begin(&rxq->stats_sync);
+ u64_stats_add(&rxq->q_stats.packets, rs.packets);
+ u64_stats_add(&rxq->q_stats.bytes, rs.bytes);
+ u64_stats_update_end(&rxq->stats_sync);
+
+ if (!wake)
+ return unlikely(fail) ? budget : rs.packets;
+
+ if (unlikely(fail))
+ xsk_set_rx_need_wakeup(rxq->pool);
+
+ return rs.packets;
+}
+
+int idpf_xsk_pool_setup(struct idpf_vport *vport, struct netdev_bpf *bpf)
+{
+ struct xsk_buff_pool *pool = bpf->xsk.pool;
+ u32 qid = bpf->xsk.queue_id;
+ bool restart;
+ int ret;
+
+ if (pool && !IS_ALIGNED(xsk_pool_get_rx_frame_size(pool),
+ LIBETH_RX_BUF_STRIDE)) {
+ NL_SET_ERR_MSG_FMT_MOD(bpf->extack,
+ "%s: HW doesn't support frames sizes not aligned to %u (qid %u: %u)",
+ netdev_name(vport->netdev),
+ LIBETH_RX_BUF_STRIDE, qid,
+ xsk_pool_get_rx_frame_size(pool));
+ return -EINVAL;
+ }
+
+ restart = idpf_xdp_enabled(vport) && netif_running(vport->netdev);
+ if (!restart)
+ goto pool;
+
+ ret = idpf_qp_switch(vport, qid, false);
+ if (ret) {
+ NL_SET_ERR_MSG_FMT_MOD(bpf->extack,
+ "%s: failed to disable queue pair %u: %pe",
+ netdev_name(vport->netdev), qid,
+ ERR_PTR(ret));
+ return ret;
+ }
+
+pool:
+ ret = libeth_xsk_setup_pool(vport->netdev, qid, pool);
+ if (ret) {
+ NL_SET_ERR_MSG_FMT_MOD(bpf->extack,
+ "%s: failed to configure XSk pool for pair %u: %pe",
+ netdev_name(vport->netdev), qid,
+ ERR_PTR(ret));
+ return ret;
+ }
+
+ if (!restart)
+ return 0;
+
+ ret = idpf_qp_switch(vport, qid, true);
+ if (ret) {
+ NL_SET_ERR_MSG_FMT_MOD(bpf->extack,
+ "%s: failed to enable queue pair %u: %pe",
+ netdev_name(vport->netdev), qid,
+ ERR_PTR(ret));
+ goto err_dis;
+ }
+
+ return 0;
+
+err_dis:
+ libeth_xsk_setup_pool(vport->netdev, qid, false);
+
+ return ret;
+}
+
+int idpf_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
+{
+ const struct idpf_netdev_priv *np = netdev_priv(dev);
+ const struct idpf_vport *vport = np->vport;
+ struct idpf_q_vector *q_vector;
+
+ if (unlikely(idpf_vport_ctrl_is_locked(dev)))
+ return -EBUSY;
+
+ if (unlikely(!vport->link_up))
+ return -ENETDOWN;
+
+ if (unlikely(!vport->num_xdp_txq))
+ return -ENXIO;
+
+ q_vector = idpf_find_rxq_vec(vport, qid);
+ if (unlikely(!q_vector->xsksq))
+ return -ENXIO;
+
+ libeth_xsk_wakeup(&q_vector->csd, qid);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/intel/idpf/xsk.h b/drivers/net/ethernet/intel/idpf/xsk.h
new file mode 100644
index 000000000000..b622d08c03e8
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/xsk.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2025 Intel Corporation */
+
+#ifndef _IDPF_XSK_H_
+#define _IDPF_XSK_H_
+
+#include <linux/types.h>
+
+enum virtchnl2_queue_type;
+struct idpf_buf_queue;
+struct idpf_q_vector;
+struct idpf_rx_queue;
+struct idpf_tx_queue;
+struct idpf_vport;
+struct net_device;
+struct netdev_bpf;
+
+void idpf_xsk_setup_queue(const struct idpf_vport *vport, void *q,
+ enum virtchnl2_queue_type type);
+void idpf_xsk_clear_queue(void *q, enum virtchnl2_queue_type type);
+void idpf_xsk_init_wakeup(struct idpf_q_vector *qv);
+
+int idpf_xskfq_init(struct idpf_buf_queue *bufq);
+void idpf_xskfq_rel(struct idpf_buf_queue *bufq);
+void idpf_xsksq_clean(struct idpf_tx_queue *xdpq);
+
+int idpf_xskrq_poll(struct idpf_rx_queue *rxq, u32 budget);
+bool idpf_xsk_xmit(struct idpf_tx_queue *xsksq);
+
+int idpf_xsk_pool_setup(struct idpf_vport *vport, struct netdev_bpf *xdp);
+int idpf_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags);
+
+#endif /* !_IDPF_XSK_H_ */
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 64dfc362d1dc..44a85ad749a4 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -2372,7 +2372,7 @@ static s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw,
checksum += nvm_data;
}
- if (checksum != (u16) NVM_SUM) {
+ if (checksum != NVM_SUM) {
hw_dbg("NVM Checksum Invalid\n");
ret_val = -E1000_ERR_NVM;
goto out;
@@ -2406,7 +2406,7 @@ static s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
}
checksum += nvm_data;
}
- checksum = (u16) NVM_SUM - checksum;
+ checksum = NVM_SUM - checksum;
ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1,
&checksum);
if (ret_val)
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index 503b239868e8..9db29b231d6a 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -602,7 +602,7 @@ static s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw)
}
checksum += nvm_data;
}
- checksum = (u16) NVM_SUM - checksum;
+ checksum = NVM_SUM - checksum;
ret_val = igb_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1,
&checksum);
if (ret_val) {
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c
index 2dcd64d6dec3..c8638502c2be 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.c
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c
@@ -636,7 +636,7 @@ s32 igb_validate_nvm_checksum(struct e1000_hw *hw)
checksum += nvm_data;
}
- if (checksum != (u16) NVM_SUM) {
+ if (checksum != NVM_SUM) {
hw_dbg("NVM Checksum Invalid\n");
ret_val = -E1000_ERR_NVM;
goto out;
@@ -668,7 +668,7 @@ s32 igb_update_nvm_checksum(struct e1000_hw *hw)
}
checksum += nvm_data;
}
- checksum = (u16) NVM_SUM - checksum;
+ checksum = NVM_SUM - checksum;
ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum);
if (ret_val)
hw_dbg("NVM Write Error while updating checksum.\n");
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index c3f4f7cd264e..0fff1df81b7b 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -217,7 +217,7 @@ static inline int igb_skb_pad(void)
#define IGB_MASTER_SLAVE e1000_ms_hw_default
#endif
-#define IGB_MNG_VLAN_NONE -1
+#define IGB_MNG_VLAN_NONE 0xFFFF
enum igb_tx_flags {
/* cmd_type flags */
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 92ef33459aec..f8a208c84f15 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -920,11 +920,11 @@ static int igb_set_ringparam(struct net_device *netdev,
}
if (adapter->num_tx_queues > adapter->num_rx_queues)
- temp_ring = vmalloc(array_size(sizeof(struct igb_ring),
- adapter->num_tx_queues));
+ temp_ring = vmalloc_array(adapter->num_tx_queues,
+ sizeof(struct igb_ring));
else
- temp_ring = vmalloc(array_size(sizeof(struct igb_ring),
- adapter->num_rx_queues));
+ temp_ring = vmalloc_array(adapter->num_rx_queues,
+ sizeof(struct igb_ring));
if (!temp_ring) {
err = -ENOMEM;
@@ -2081,11 +2081,8 @@ static void igb_diag_test(struct net_device *netdev,
} else {
dev_info(&adapter->pdev->dev, "online testing starting\n");
- /* PHY is powered down when interface is down */
- if (if_running && igb_link_test(adapter, &data[TEST_LINK]))
+ if (igb_link_test(adapter, &data[TEST_LINK]))
eth_test->flags |= ETH_TEST_FL_FAILED;
- else
- data[TEST_LINK] = 0;
/* Online tests aren't run; pass by default */
data[TEST_REG] = 0;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index a9a7a94ae61e..85f9589cc568 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1531,8 +1531,7 @@ static void igb_update_mng_vlan(struct igb_adapter *adapter)
adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
}
- if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
- (vid != old_vid) &&
+ if (old_vid != IGB_MNG_VLAN_NONE && vid != old_vid &&
!test_bit(old_vid, adapter->active_vlans)) {
/* remove VID from filter table */
igb_vfta_set(hw, vid, pf_id, false, true);
@@ -4453,8 +4452,7 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
- rx_ring->queue_index,
- rx_ring->q_vector->napi.napi_id);
+ rx_ring->queue_index, 0);
if (res < 0) {
dev_err(dev, "Failed to register xdp_rxq index %u\n",
rx_ring->queue_index);
diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c
index 773895c663fd..9c08ebfad804 100644
--- a/drivers/net/ethernet/intel/igbvf/ethtool.c
+++ b/drivers/net/ethernet/intel/igbvf/ethtool.c
@@ -30,11 +30,12 @@ static const struct igbvf_stats igbvf_gstrings_stats[] = {
{ "rx_bytes", IGBVF_STAT(stats.gorc, stats.base_gorc) },
{ "tx_bytes", IGBVF_STAT(stats.gotc, stats.base_gotc) },
{ "multicast", IGBVF_STAT(stats.mprc, stats.base_mprc) },
- { "lbrx_bytes", IGBVF_STAT(stats.gorlbc, stats.base_gorlbc) },
{ "lbrx_packets", IGBVF_STAT(stats.gprlbc, stats.base_gprlbc) },
+ { "lbtx_packets", IGBVF_STAT(stats.gptlbc, stats.base_gptlbc) },
+ { "lbrx_bytes", IGBVF_STAT(stats.gorlbc, stats.base_gorlbc) },
+ { "lbtx_bytes", IGBVF_STAT(stats.gotlbc, stats.base_gotlbc) },
{ "tx_restart_queue", IGBVF_STAT(restart_queue, zero_base) },
{ "tx_timeout_count", IGBVF_STAT(tx_timeout_count, zero_base) },
- { "rx_long_byte_count", IGBVF_STAT(stats.gorc, stats.base_gorc) },
{ "rx_csum_offload_good", IGBVF_STAT(hw_csum_good, zero_base) },
{ "rx_csum_offload_errors", IGBVF_STAT(hw_csum_err, zero_base) },
{ "rx_header_split", IGBVF_STAT(rx_hdr_split, zero_base) },
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 266bfcf2a28f..a427f05814c1 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -345,6 +345,7 @@ struct igc_adapter {
/* LEDs */
struct mutex led_mutex;
struct igc_led_classdev *leds;
+ bool leds_available;
};
void igc_up(struct igc_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index ecb35b693ce5..f3e7218ba6f3 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -627,11 +627,11 @@ igc_ethtool_set_ringparam(struct net_device *netdev,
}
if (adapter->num_tx_queues > adapter->num_rx_queues)
- temp_ring = vmalloc(array_size(sizeof(struct igc_ring),
- adapter->num_tx_queues));
+ temp_ring = vmalloc_array(adapter->num_tx_queues,
+ sizeof(struct igc_ring));
else
- temp_ring = vmalloc(array_size(sizeof(struct igc_ring),
- adapter->num_rx_queues));
+ temp_ring = vmalloc_array(adapter->num_rx_queues,
+ sizeof(struct igc_ring));
if (!temp_ring) {
err = -ENOMEM;
diff --git a/drivers/net/ethernet/intel/igc/igc_i225.c b/drivers/net/ethernet/intel/igc/igc_i225.c
index 0dd61719f1ed..5226d10cc95b 100644
--- a/drivers/net/ethernet/intel/igc/igc_i225.c
+++ b/drivers/net/ethernet/intel/igc/igc_i225.c
@@ -435,7 +435,7 @@ static s32 igc_update_nvm_checksum_i225(struct igc_hw *hw)
}
checksum += nvm_data;
}
- checksum = (u16)NVM_SUM - checksum;
+ checksum = NVM_SUM - checksum;
ret_val = igc_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1,
&checksum);
if (ret_val) {
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 458e5eaa92e5..728d7ca5338b 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -7149,6 +7149,13 @@ static int igc_probe(struct pci_dev *pdev,
adapter->port_num = hw->bus.func;
adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
+ /* PCI config space info */
+ hw->vendor_id = pdev->vendor;
+ hw->device_id = pdev->device;
+ hw->revision_id = pdev->revision;
+ hw->subsystem_vendor_id = pdev->subsystem_vendor;
+ hw->subsystem_device_id = pdev->subsystem_device;
+
/* Disable ASPM L1.2 on I226 devices to avoid packet loss */
if (igc_is_device_id_i226(hw))
pci_disable_link_state(pdev, PCIE_LINK_STATE_L1_2);
@@ -7175,13 +7182,6 @@ static int igc_probe(struct pci_dev *pdev,
netdev->mem_start = pci_resource_start(pdev, 0);
netdev->mem_end = pci_resource_end(pdev, 0);
- /* PCI config space info */
- hw->vendor_id = pdev->vendor;
- hw->device_id = pdev->device;
- hw->revision_id = pdev->revision;
- hw->subsystem_vendor_id = pdev->subsystem_vendor;
- hw->subsystem_device_id = pdev->subsystem_device;
-
/* Copy the default MAC and PHY function pointers */
memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
@@ -7335,8 +7335,14 @@ static int igc_probe(struct pci_dev *pdev,
if (IS_ENABLED(CONFIG_IGC_LEDS)) {
err = igc_led_setup(adapter);
- if (err)
- goto err_register;
+ if (err) {
+ netdev_warn_once(netdev,
+ "LED init failed (%d); continuing without LED support\n",
+ err);
+ adapter->leds_available = false;
+ } else {
+ adapter->leds_available = true;
+ }
}
return 0;
@@ -7392,7 +7398,7 @@ static void igc_remove(struct pci_dev *pdev)
cancel_work_sync(&adapter->watchdog_task);
hrtimer_cancel(&adapter->hrtimer);
- if (IS_ENABLED(CONFIG_IGC_LEDS))
+ if (IS_ENABLED(CONFIG_IGC_LEDS) && adapter->leds_available)
igc_led_free(adapter);
/* Release control of h/w to f/w. If f/w is AMT enabled, this
diff --git a/drivers/net/ethernet/intel/igc/igc_nvm.c b/drivers/net/ethernet/intel/igc/igc_nvm.c
index efd121c03967..a47b8d39238c 100644
--- a/drivers/net/ethernet/intel/igc/igc_nvm.c
+++ b/drivers/net/ethernet/intel/igc/igc_nvm.c
@@ -123,7 +123,7 @@ s32 igc_validate_nvm_checksum(struct igc_hw *hw)
checksum += nvm_data;
}
- if (checksum != (u16)NVM_SUM) {
+ if (checksum != NVM_SUM) {
hw_dbg("NVM Checksum Invalid\n");
ret_val = -IGC_ERR_NVM;
goto out;
@@ -155,7 +155,7 @@ s32 igc_update_nvm_checksum(struct igc_hw *hw)
}
checksum += nvm_data;
}
- checksum = (u16)NVM_SUM - checksum;
+ checksum = NVM_SUM - checksum;
ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum);
if (ret_val)
hw_dbg("NVM Write Error while updating checksum.\n");
diff --git a/drivers/net/ethernet/intel/ixgbe/devlink/devlink.c b/drivers/net/ethernet/intel/ixgbe/devlink/devlink.c
index 54f1b83dfe42..d227f4d2a2d1 100644
--- a/drivers/net/ethernet/intel/ixgbe/devlink/devlink.c
+++ b/drivers/net/ethernet/intel/ixgbe/devlink/devlink.c
@@ -543,6 +543,7 @@ int ixgbe_devlink_register_port(struct ixgbe_adapter *adapter)
attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
attrs.phys.port_number = adapter->hw.bus.func;
+ attrs.no_phys_port_name = 1;
ixgbe_devlink_set_switch_id(adapter, &attrs.switch_id);
devlink_port_attrs_set(devlink_port, &attrs);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 4ff19426ab74..3ea6765f9c5d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -1739,9 +1739,9 @@ int ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
}
}
- checksum = (u16)IXGBE_EEPROM_SUM - checksum;
+ checksum = IXGBE_EEPROM_SUM - checksum;
- return (int)checksum;
+ return checksum;
}
/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c
index d74116441d1c..c2f8189a0738 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c
@@ -774,7 +774,7 @@ static void ixgbe_parse_vf_func_caps(struct ixgbe_hw *hw,
* from parsing capabilities and use this to calculate the number of resources
* per PF based on the max value passed in.
*
- * Return: the number of resources per PF or 0, if no PH are available.
+ * Return: the number of resources per PF or 0, if no PFs are available.
*/
static u32 ixgbe_get_num_per_func(struct ixgbe_hw *hw, u32 max)
{
@@ -1953,6 +1953,16 @@ int ixgbe_identify_phy_e610(struct ixgbe_hw *hw)
phy_type_low & IXGBE_PHY_TYPE_LOW_1G_SGMII ||
phy_type_high & IXGBE_PHY_TYPE_HIGH_1G_USXGMII)
hw->phy.speeds_supported |= IXGBE_LINK_SPEED_1GB_FULL;
+ if (phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_T ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_X ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_KX ||
+ phy_type_high & IXGBE_PHY_TYPE_HIGH_2500M_SGMII ||
+ phy_type_high & IXGBE_PHY_TYPE_HIGH_2500M_USXGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL;
+ if (phy_type_low & IXGBE_PHY_TYPE_LOW_5GBASE_T ||
+ phy_type_low & IXGBE_PHY_TYPE_LOW_5GBASE_KR ||
+ phy_type_high & IXGBE_PHY_TYPE_HIGH_5G_USXGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL;
if (phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_T ||
phy_type_low & IXGBE_PHY_TYPE_LOW_10G_SFI_DA ||
phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_SR ||
@@ -1963,31 +1973,10 @@ int ixgbe_identify_phy_e610(struct ixgbe_hw *hw)
phy_type_high & IXGBE_PHY_TYPE_HIGH_10G_USXGMII)
hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10GB_FULL;
- /* 2.5 and 5 Gbps link speeds must be excluded from the
- * auto-negotiation set used during driver initialization due to
- * compatibility issues with certain switches. Those issues do not
- * exist in case of E610 2.5G SKU device (0x57b1).
- */
- if (!hw->phy.autoneg_advertised &&
- hw->device_id != IXGBE_DEV_ID_E610_2_5G_T)
- hw->phy.autoneg_advertised = hw->phy.speeds_supported;
-
- if (phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_T ||
- phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_X ||
- phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_KX ||
- phy_type_high & IXGBE_PHY_TYPE_HIGH_2500M_SGMII ||
- phy_type_high & IXGBE_PHY_TYPE_HIGH_2500M_USXGMII)
- hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL;
-
- if (!hw->phy.autoneg_advertised &&
- hw->device_id == IXGBE_DEV_ID_E610_2_5G_T)
+ /* Initialize autoneg speeds */
+ if (!hw->phy.autoneg_advertised)
hw->phy.autoneg_advertised = hw->phy.speeds_supported;
- if (phy_type_low & IXGBE_PHY_TYPE_LOW_5GBASE_T ||
- phy_type_low & IXGBE_PHY_TYPE_LOW_5GBASE_KR ||
- phy_type_high & IXGBE_PHY_TYPE_HIGH_5G_USXGMII)
- hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL;
-
/* Set PHY ID */
memcpy(&hw->phy.id, pcaps.phy_id_oui, sizeof(u32));
@@ -3008,50 +2997,71 @@ static int ixgbe_get_nvm_srev(struct ixgbe_hw *hw,
* Searches through the Option ROM flash contents to locate the CIVD data for
* the image.
*
- * Return: the exit code of the operation.
+ * Return: -ENOMEM when cannot allocate memory, -EDOM for checksum violation,
+ * -ENODATA when cannot find proper data, -EIO for faulty read or
+ * 0 on success.
+ *
+ * On success @civd stores collected data.
*/
static int
ixgbe_get_orom_civd_data(struct ixgbe_hw *hw, enum ixgbe_bank_select bank,
struct ixgbe_orom_civd_info *civd)
{
- struct ixgbe_orom_civd_info tmp;
+ u32 orom_size = hw->flash.banks.orom_size;
+ u8 *orom_data;
u32 offset;
int err;
+ orom_data = kzalloc(orom_size, GFP_KERNEL);
+ if (!orom_data)
+ return -ENOMEM;
+
+ err = ixgbe_read_flash_module(hw, bank,
+ IXGBE_E610_SR_1ST_OROM_BANK_PTR, 0,
+ orom_data, orom_size);
+ if (err) {
+ err = -EIO;
+ goto cleanup;
+ }
+
/* The CIVD section is located in the Option ROM aligned to 512 bytes.
* The first 4 bytes must contain the ASCII characters "$CIV".
* A simple modulo 256 sum of all of the bytes of the structure must
* equal 0.
*/
- for (offset = 0; (offset + SZ_512) <= hw->flash.banks.orom_size;
- offset += SZ_512) {
+ for (offset = 0; offset + SZ_512 <= orom_size; offset += SZ_512) {
+ struct ixgbe_orom_civd_info *tmp;
u8 sum = 0;
u32 i;
- err = ixgbe_read_flash_module(hw, bank,
- IXGBE_E610_SR_1ST_OROM_BANK_PTR,
- offset,
- (u8 *)&tmp, sizeof(tmp));
- if (err)
- return err;
+ BUILD_BUG_ON(sizeof(*tmp) > SZ_512);
+
+ tmp = (struct ixgbe_orom_civd_info *)&orom_data[offset];
/* Skip forward until we find a matching signature */
- if (memcmp(IXGBE_OROM_CIV_SIGNATURE, tmp.signature,
- sizeof(tmp.signature)))
+ if (memcmp(IXGBE_OROM_CIV_SIGNATURE, tmp->signature,
+ sizeof(tmp->signature)))
continue;
/* Verify that the simple checksum is zero */
- for (i = 0; i < sizeof(tmp); i++)
- sum += ((u8 *)&tmp)[i];
+ for (i = 0; i < sizeof(*tmp); i++)
+ sum += ((u8 *)tmp)[i];
+
+ if (sum) {
+ err = -EDOM;
+ goto cleanup;
+ }
- if (sum)
- return -EDOM;
+ *civd = *tmp;
+ err = 0;
- *civd = tmp;
- return 0;
+ goto cleanup;
}
- return -ENODATA;
+ err = -ENODATA;
+cleanup:
+ kfree(orom_data);
+ return err;
}
/**
@@ -3125,7 +3135,7 @@ static int ixgbe_get_orom_ver_info(struct ixgbe_hw *hw,
if (err)
return err;
- combo_ver = le32_to_cpu(civd.combo_ver);
+ combo_ver = get_unaligned_le32(&civd.combo_ver);
orom->major = (u8)FIELD_GET(IXGBE_OROM_VER_MASK, combo_ver);
orom->patch = (u8)FIELD_GET(IXGBE_OROM_VER_PATCH_MASK, combo_ver);
@@ -3911,6 +3921,38 @@ static int ixgbe_read_pba_string_e610(struct ixgbe_hw *hw, u8 *pba_num,
return err;
}
+static int __fwlog_send_cmd(void *priv, struct libie_aq_desc *desc, void *buf,
+ u16 size)
+{
+ struct ixgbe_hw *hw = priv;
+
+ return ixgbe_aci_send_cmd(hw, desc, buf, size);
+}
+
+int ixgbe_fwlog_init(struct ixgbe_hw *hw)
+{
+ struct ixgbe_adapter *adapter = hw->back;
+ struct libie_fwlog_api api = {
+ .pdev = adapter->pdev,
+ .send_cmd = __fwlog_send_cmd,
+ .debugfs_root = adapter->ixgbe_dbg_adapter,
+ .priv = hw,
+ };
+
+ if (hw->mac.type != ixgbe_mac_e610)
+ return -EOPNOTSUPP;
+
+ return libie_fwlog_init(&hw->fwlog, &api);
+}
+
+void ixgbe_fwlog_deinit(struct ixgbe_hw *hw)
+{
+ if (hw->mac.type != ixgbe_mac_e610)
+ return;
+
+ libie_fwlog_deinit(&hw->fwlog);
+}
+
static const struct ixgbe_mac_operations mac_ops_e610 = {
.init_hw = ixgbe_init_hw_generic,
.start_hw = ixgbe_start_hw_e610,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h
index 782c489b0fa7..11916b979d28 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h
@@ -96,5 +96,7 @@ int ixgbe_aci_update_nvm(struct ixgbe_hw *hw, u16 module_typeid,
bool last_command, u8 command_flags);
int ixgbe_nvm_write_activate(struct ixgbe_hw *hw, u16 cmd_flags,
u8 *response_flags);
+int ixgbe_fwlog_init(struct ixgbe_hw *hw);
+void ixgbe_fwlog_deinit(struct ixgbe_hw *hw);
#endif /* _IXGBE_E610_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 25c3a09ad7f1..2d660e9edb80 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -1278,7 +1278,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
/* allocate temporary buffer to store rings in */
i = max_t(int, adapter->num_tx_queues + adapter->num_xdp_queues,
adapter->num_rx_queues);
- temp_ring = vmalloc(array_size(i, sizeof(struct ixgbe_ring)));
+ temp_ring = vmalloc_array(i, sizeof(struct ixgbe_ring));
if (!temp_ring) {
err = -ENOMEM;
@@ -3571,13 +3571,13 @@ ixgbe_get_eee_fw(struct ixgbe_adapter *adapter, struct ethtool_keee *edata)
for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) {
if (hw->phy.eee_speeds_supported & ixgbe_ls_map[i].mac_speed)
- linkmode_set_bit(ixgbe_lp_map[i].link_mode,
+ linkmode_set_bit(ixgbe_ls_map[i].link_mode,
edata->supported);
}
for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) {
if (hw->phy.eee_speeds_advertised & ixgbe_ls_map[i].mac_speed)
- linkmode_set_bit(ixgbe_lp_map[i].link_mode,
+ linkmode_set_bit(ixgbe_ls_map[i].link_mode,
edata->advertised);
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 6122a0abb41f..90d4e57b1c93 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -172,6 +172,7 @@ static int debug = -1;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+MODULE_IMPORT_NS("LIBIE_FWLOG");
MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
MODULE_LICENSE("GPL v2");
@@ -968,10 +969,6 @@ static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter)
for (i = 0; i < adapter->num_tx_queues; i++)
clear_bit(__IXGBE_HANG_CHECK_ARMED,
&adapter->tx_ring[i]->state);
-
- for (i = 0; i < adapter->num_xdp_queues; i++)
- clear_bit(__IXGBE_HANG_CHECK_ARMED,
- &adapter->xdp_ring[i]->state);
}
static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
@@ -1214,7 +1211,7 @@ static void ixgbe_pf_handle_tx_hang(struct ixgbe_ring *tx_ring,
struct ixgbe_adapter *adapter = netdev_priv(tx_ring->netdev);
struct ixgbe_hw *hw = &adapter->hw;
- e_err(drv, "Detected Tx Unit Hang%s\n"
+ e_err(drv, "Detected Tx Unit Hang\n"
" Tx Queue <%d>\n"
" TDH, TDT <%x>, <%x>\n"
" next_to_use <%x>\n"
@@ -1222,16 +1219,14 @@ static void ixgbe_pf_handle_tx_hang(struct ixgbe_ring *tx_ring,
"tx_buffer_info[next_to_clean]\n"
" time_stamp <%lx>\n"
" jiffies <%lx>\n",
- ring_is_xdp(tx_ring) ? " (XDP)" : "",
tx_ring->queue_index,
IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
tx_ring->next_to_use, next,
tx_ring->tx_buffer_info[next].time_stamp, jiffies);
- if (!ring_is_xdp(tx_ring))
- netif_stop_subqueue(tx_ring->netdev,
- tx_ring->queue_index);
+ netif_stop_subqueue(tx_ring->netdev,
+ tx_ring->queue_index);
}
/**
@@ -1451,6 +1446,9 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
total_bytes);
adapter->tx_ipsec += total_ipsec;
+ if (ring_is_xdp(tx_ring))
+ return !!budget;
+
if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
if (adapter->hw.mac.type == ixgbe_mac_e610)
ixgbe_handle_mdd_event(adapter, tx_ring);
@@ -1468,9 +1466,6 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
return true;
}
- if (ring_is_xdp(tx_ring))
- return !!budget;
-
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index);
if (!__netif_txq_completed_wake(txq, total_packets, total_bytes,
@@ -3361,6 +3356,10 @@ static void ixgbe_handle_fw_event(struct ixgbe_adapter *adapter)
e_crit(drv, "%s\n", ixgbe_overheat_msg);
ixgbe_down(adapter);
break;
+ case libie_aqc_opc_fw_logs_event:
+ libie_get_fwlog_data(&hw->fwlog, event.msg_buf,
+ le16_to_cpu(event.desc.datalen));
+ break;
default:
e_warn(hw, "unknown FW async event captured\n");
break;
@@ -6979,6 +6978,13 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
break;
}
+ /* Make sure the SWFW semaphore is in a valid state */
+ if (hw->mac.ops.init_swfw_sync)
+ hw->mac.ops.init_swfw_sync(hw);
+
+ if (hw->mac.type == ixgbe_mac_e610)
+ mutex_init(&hw->aci.lock);
+
#ifdef IXGBE_FCOE
/* FCoE support exists, always init the FCoE lock */
spin_lock_init(&adapter->fcoe.lock);
@@ -7974,12 +7980,9 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
return;
/* Force detection of hung controller */
- if (netif_carrier_ok(adapter->netdev)) {
+ if (netif_carrier_ok(adapter->netdev))
for (i = 0; i < adapter->num_tx_queues; i++)
set_check_for_tx_hang(adapter->tx_ring[i]);
- for (i = 0; i < adapter->num_xdp_queues; i++)
- set_check_for_tx_hang(adapter->xdp_ring[i]);
- }
if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
/*
@@ -8199,13 +8202,6 @@ static bool ixgbe_ring_tx_pending(struct ixgbe_adapter *adapter)
return true;
}
- for (i = 0; i < adapter->num_xdp_queues; i++) {
- struct ixgbe_ring *ring = adapter->xdp_ring[i];
-
- if (ring->next_to_use != ring->next_to_clean)
- return true;
- }
-
return false;
}
@@ -11005,6 +11001,10 @@ static int ixgbe_xdp_xmit(struct net_device *dev, int n,
if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state)))
return -ENETDOWN;
+ if (!netif_carrier_ok(adapter->netdev) ||
+ !netif_running(adapter->netdev))
+ return -ENETDOWN;
+
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
return -EINVAL;
@@ -11655,10 +11655,6 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_sw_init;
- /* Make sure the SWFW semaphore is in a valid state */
- if (hw->mac.ops.init_swfw_sync)
- hw->mac.ops.init_swfw_sync(hw);
-
if (ixgbe_check_fw_error(adapter))
return ixgbe_recovery_probe(adapter);
@@ -11862,8 +11858,6 @@ skip_sriov:
ether_addr_copy(hw->mac.addr, hw->mac.perm_addr);
ixgbe_mac_set_default_filter(adapter);
- if (hw->mac.type == ixgbe_mac_e610)
- mutex_init(&hw->aci.lock);
timer_setup(&adapter->service_timer, ixgbe_service_timer, 0);
if (ixgbe_removed(hw->hw_addr)) {
@@ -12010,6 +12004,10 @@ skip_sriov:
ixgbe_devlink_init_regions(adapter);
devl_register(adapter->devlink);
devl_unlock(adapter->devlink);
+
+ if (ixgbe_fwlog_init(hw))
+ e_dev_info("Firmware logging not supported\n");
+
return 0;
err_netdev:
@@ -12019,9 +12017,9 @@ err_register:
devl_unlock(adapter->devlink);
ixgbe_release_hw_control(adapter);
ixgbe_clear_interrupt_scheme(adapter);
+err_sw_init:
if (hw->mac.type == ixgbe_mac_e610)
mutex_destroy(&adapter->hw.aci.lock);
-err_sw_init:
ixgbe_disable_sriov(adapter);
adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
iounmap(adapter->io_addr);
@@ -12067,15 +12065,14 @@ static void ixgbe_remove(struct pci_dev *pdev)
devl_lock(adapter->devlink);
devl_unregister(adapter->devlink);
ixgbe_devlink_destroy_regions(adapter);
+ ixgbe_fwlog_deinit(&adapter->hw);
ixgbe_dbg_adapter_exit(adapter);
set_bit(__IXGBE_REMOVING, &adapter->state);
cancel_work_sync(&adapter->service_task);
- if (adapter->hw.mac.type == ixgbe_mac_e610) {
+ if (adapter->hw.mac.type == ixgbe_mac_e610)
ixgbe_disable_link_status_events(adapter);
- mutex_destroy(&adapter->hw.aci.lock);
- }
if (adapter->mii_bus)
mdiobus_unregister(adapter->mii_bus);
@@ -12135,6 +12132,9 @@ static void ixgbe_remove(struct pci_dev *pdev)
disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
free_netdev(netdev);
+ if (adapter->hw.mac.type == ixgbe_mac_e610)
+ mutex_destroy(&adapter->hw.aci.lock);
+
if (disable_dev)
pci_disable_device(pdev);
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 36577091cd9e..b1bfeb21537a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -7,6 +7,7 @@
#include <linux/types.h>
#include <linux/mdio.h>
#include <linux/netdevice.h>
+#include <linux/net/intel/libie/fwlog.h>
#include "ixgbe_type_e610.h"
/* Device IDs */
@@ -3752,6 +3753,7 @@ struct ixgbe_hw {
struct ixgbe_flash_info flash;
struct ixgbe_hw_dev_caps dev_caps;
struct ixgbe_hw_func_caps func_caps;
+ struct libie_fwlog fwlog;
};
struct ixgbe_info {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h
index d2f22d8558f8..ff8d640a50b1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h
@@ -932,7 +932,7 @@ struct ixgbe_orom_civd_info {
__le32 combo_ver; /* Combo Image Version number */
u8 combo_name_len; /* Length of the unicode combo image version string, max of 32 */
__le16 combo_name[32]; /* Unicode string representing the Combo Image version */
-};
+} __packed;
/* Function specific capabilities */
struct ixgbe_hw_func_caps {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index c2353aed0120..e67e2feb045b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -373,9 +373,9 @@ static int ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
}
}
- checksum = (u16)IXGBE_EEPROM_SUM - checksum;
+ checksum = IXGBE_EEPROM_SUM - checksum;
- return (int)checksum;
+ return checksum;
}
/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index bfa647086c70..76d2fa3ef518 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -1060,9 +1060,9 @@ static int ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer,
return status;
}
- checksum = (u16)IXGBE_EEPROM_SUM - checksum;
+ checksum = IXGBE_EEPROM_SUM - checksum;
- return (int)checksum;
+ return checksum;
}
/** ixgbe_calc_eeprom_checksum_X550 - Calculates and returns the checksum
@@ -1163,7 +1163,7 @@ static int ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw,
return status;
}
-/** ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif
+/** ixgbe_write_ee_hostif_data_X550 - Write EEPROM word using hostif
* @hw: pointer to hardware structure
* @offset: offset of word in the EEPROM to write
* @data: word write to the EEPROM
@@ -2318,7 +2318,7 @@ static int ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
}
/**
- * ixgbe_get_lasi_ext_t_x550em - Determime external Base T PHY interrupt cause
+ * ixgbe_get_lasi_ext_t_x550em - Determine external Base T PHY interrupt cause
* @hw: pointer to hardware structure
* @lsc: pointer to boolean flag which indicates whether external Base T
* PHY interrupt is lsc
@@ -2628,7 +2628,7 @@ static int ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up)
}
/** ixgbe_setup_internal_phy_t_x550em - Configure KR PHY to X557 link
- * @hw: point to hardware structure
+ * @hw: pointer to hardware structure
*
* Configures the link between the integrated KR PHY and the external X557 PHY
* The driver will call this function when it gets a link status change
@@ -2745,7 +2745,7 @@ static int ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
if (led_idx >= IXGBE_X557_MAX_LED_INDEX)
return -EINVAL;
- /* To turn on the LED, set mode to ON. */
+ /* To turn off the LED, set mode to OFF. */
hw->phy.ops.read_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
MDIO_MMD_VEND1, &phy_data);
phy_data &= ~IXGBE_X557_LED_MANUAL_SET_MASK;
@@ -2812,7 +2812,7 @@ int ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
return ret_val;
}
-/** ixgbe_get_lcd_x550em - Determine lowest common denominator
+/** ixgbe_get_lcd_t_x550em - Determine lowest common denominator
* @hw: pointer to hardware structure
* @lcd_speed: pointer to lowest common link speed
*
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index ac58964b2f08..7b941505a9d0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -398,7 +398,7 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
dma_addr_t dma;
u32 cmd_type;
- while (budget-- > 0) {
+ while (likely(budget)) {
if (unlikely(!ixgbe_desc_unused(xdp_ring))) {
work_done = false;
break;
@@ -433,6 +433,8 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
xdp_ring->next_to_use++;
if (xdp_ring->next_to_use == xdp_ring->count)
xdp_ring->next_to_use = 0;
+
+ budget--;
}
if (tx_desc) {
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index 7ac53171b041..bebad564188e 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -276,9 +276,9 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
}
if (new_tx_count != adapter->tx_ring_count) {
- tx_ring = vmalloc(array_size(sizeof(*tx_ring),
- adapter->num_tx_queues +
- adapter->num_xdp_queues));
+ tx_ring = vmalloc_array(adapter->num_tx_queues +
+ adapter->num_xdp_queues,
+ sizeof(*tx_ring));
if (!tx_ring) {
err = -ENOMEM;
goto clear_reset;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 535d0f71f521..28e25641b167 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -4323,7 +4323,7 @@ static int ixgbevf_resume(struct device *dev_d)
struct pci_dev *pdev = to_pci_dev(dev_d);
struct net_device *netdev = pci_get_drvdata(pdev);
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
- u32 err;
+ int err;
adapter->hw.hw_addr = adapter->io_addr;
smp_mb__before_atomic();
diff --git a/drivers/net/ethernet/intel/libie/Kconfig b/drivers/net/ethernet/intel/libie/Kconfig
index e6072758e3d8..70831c7e336e 100644
--- a/drivers/net/ethernet/intel/libie/Kconfig
+++ b/drivers/net/ethernet/intel/libie/Kconfig
@@ -14,3 +14,12 @@ config LIBIE_ADMINQ
help
Helper functions used by Intel Ethernet drivers for administration
queue command interface (aka adminq).
+
+config LIBIE_FWLOG
+ tristate
+ select LIBIE_ADMINQ
+ help
+ Library to support firmware logging on device that have support
+ for it. Firmware logging is using admin queue interface to communicate
+ with the device. Debugfs is a user interface used to config logging
+ and dump all collected logs.
diff --git a/drivers/net/ethernet/intel/libie/Makefile b/drivers/net/ethernet/intel/libie/Makefile
index e98f00b865d3..db57fc6780ea 100644
--- a/drivers/net/ethernet/intel/libie/Makefile
+++ b/drivers/net/ethernet/intel/libie/Makefile
@@ -8,3 +8,7 @@ libie-y := rx.o
obj-$(CONFIG_LIBIE_ADMINQ) += libie_adminq.o
libie_adminq-y := adminq.o
+
+obj-$(CONFIG_LIBIE_FWLOG) += libie_fwlog.o
+
+libie_fwlog-y := fwlog.o
diff --git a/drivers/net/ethernet/intel/libie/adminq.c b/drivers/net/ethernet/intel/libie/adminq.c
index 55356548e3f0..7b4ff479e7e5 100644
--- a/drivers/net/ethernet/intel/libie/adminq.c
+++ b/drivers/net/ethernet/intel/libie/adminq.c
@@ -6,7 +6,7 @@
static const char * const libie_aq_str_arr[] = {
#define LIBIE_AQ_STR(x) \
- [LIBIE_AQ_RC_##x] = "LIBIE_AQ_RC" #x
+ [LIBIE_AQ_RC_##x] = "LIBIE_AQ_RC_" #x
LIBIE_AQ_STR(OK),
LIBIE_AQ_STR(EPERM),
LIBIE_AQ_STR(ENOENT),
diff --git a/drivers/net/ethernet/intel/libie/fwlog.c b/drivers/net/ethernet/intel/libie/fwlog.c
new file mode 100644
index 000000000000..f39cc11cb7c5
--- /dev/null
+++ b/drivers/net/ethernet/intel/libie/fwlog.c
@@ -0,0 +1,1115 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022, Intel Corporation. */
+
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/net/intel/libie/fwlog.h>
+#include <linux/pci.h>
+#include <linux/random.h>
+#include <linux/vmalloc.h>
+
+#define DEFAULT_SYMBOL_NAMESPACE "LIBIE_FWLOG"
+
+/* create a define that has an extra module that doesn't really exist. this
+ * is so we can add a module 'all' to easily enable/disable all the modules
+ */
+#define LIBIE_NR_FW_LOG_MODULES (LIBIE_AQC_FW_LOG_ID_MAX + 1)
+
+/* the ordering in this array is important. it matches the ordering of the
+ * values in the FW so the index is the same value as in
+ * libie_aqc_fw_logging_mod
+ */
+static const char * const libie_fwlog_module_string[] = {
+ "general",
+ "ctrl",
+ "link",
+ "link_topo",
+ "dnl",
+ "i2c",
+ "sdp",
+ "mdio",
+ "adminq",
+ "hdma",
+ "lldp",
+ "dcbx",
+ "dcb",
+ "xlr",
+ "nvm",
+ "auth",
+ "vpd",
+ "iosf",
+ "parser",
+ "sw",
+ "scheduler",
+ "txq",
+ "rsvd",
+ "post",
+ "watchdog",
+ "task_dispatch",
+ "mng",
+ "synce",
+ "health",
+ "tsdrv",
+ "pfreg",
+ "mdlver",
+ "all",
+};
+
+/* the ordering in this array is important. it matches the ordering of the
+ * values in the FW so the index is the same value as in libie_fwlog_level
+ */
+static const char * const libie_fwlog_level_string[] = {
+ "none",
+ "error",
+ "warning",
+ "normal",
+ "verbose",
+};
+
+static const char * const libie_fwlog_log_size[] = {
+ "128K",
+ "256K",
+ "512K",
+ "1M",
+ "2M",
+};
+
+static bool libie_fwlog_ring_empty(struct libie_fwlog_ring *rings)
+{
+ return rings->head == rings->tail;
+}
+
+static void libie_fwlog_ring_increment(u16 *item, u16 size)
+{
+ *item = (*item + 1) & (size - 1);
+}
+
+static int libie_fwlog_alloc_ring_buffs(struct libie_fwlog_ring *rings)
+{
+ int i, nr_bytes;
+ u8 *mem;
+
+ nr_bytes = rings->size * LIBIE_AQ_MAX_BUF_LEN;
+ mem = vzalloc(nr_bytes);
+ if (!mem)
+ return -ENOMEM;
+
+ for (i = 0; i < rings->size; i++) {
+ struct libie_fwlog_data *ring = &rings->rings[i];
+
+ ring->data_size = LIBIE_AQ_MAX_BUF_LEN;
+ ring->data = mem;
+ mem += LIBIE_AQ_MAX_BUF_LEN;
+ }
+
+ return 0;
+}
+
+static void libie_fwlog_free_ring_buffs(struct libie_fwlog_ring *rings)
+{
+ int i;
+
+ for (i = 0; i < rings->size; i++) {
+ struct libie_fwlog_data *ring = &rings->rings[i];
+
+ /* the first ring is the base memory for the whole range so
+ * free it
+ */
+ if (!i)
+ vfree(ring->data);
+
+ ring->data = NULL;
+ ring->data_size = 0;
+ }
+}
+
+#define LIBIE_FWLOG_INDEX_TO_BYTES(n) ((128 * 1024) << (n))
+/**
+ * libie_fwlog_realloc_rings - reallocate the FW log rings
+ * @fwlog: pointer to the fwlog structure
+ * @index: the new index to use to allocate memory for the log data
+ *
+ */
+static void libie_fwlog_realloc_rings(struct libie_fwlog *fwlog, int index)
+{
+ struct libie_fwlog_ring ring;
+ int status, ring_size;
+
+ /* convert the number of bytes into a number of 4K buffers. externally
+ * the driver presents the interface to the FW log data as a number of
+ * bytes because that's easy for users to understand. internally the
+ * driver uses a ring of buffers because the driver doesn't know where
+ * the beginning and end of any line of log data is so the driver has
+ * to overwrite data as complete blocks. when the data is returned to
+ * the user the driver knows that the data is correct and the FW log
+ * can be correctly parsed by the tools
+ */
+ ring_size = LIBIE_FWLOG_INDEX_TO_BYTES(index) / LIBIE_AQ_MAX_BUF_LEN;
+ if (ring_size == fwlog->ring.size)
+ return;
+
+ /* allocate space for the new rings and buffers then release the
+ * old rings and buffers. that way if we don't have enough
+ * memory then we at least have what we had before
+ */
+ ring.rings = kcalloc(ring_size, sizeof(*ring.rings), GFP_KERNEL);
+ if (!ring.rings)
+ return;
+
+ ring.size = ring_size;
+
+ status = libie_fwlog_alloc_ring_buffs(&ring);
+ if (status) {
+ dev_warn(&fwlog->pdev->dev, "Unable to allocate memory for FW log ring data buffers\n");
+ libie_fwlog_free_ring_buffs(&ring);
+ kfree(ring.rings);
+ return;
+ }
+
+ libie_fwlog_free_ring_buffs(&fwlog->ring);
+ kfree(fwlog->ring.rings);
+
+ fwlog->ring.rings = ring.rings;
+ fwlog->ring.size = ring.size;
+ fwlog->ring.index = index;
+ fwlog->ring.head = 0;
+ fwlog->ring.tail = 0;
+}
+
+/**
+ * libie_fwlog_supported - Cached for whether FW supports FW logging or not
+ * @fwlog: pointer to the fwlog structure
+ *
+ * This will always return false if called before libie_init_hw(), so it must be
+ * called after libie_init_hw().
+ */
+static bool libie_fwlog_supported(struct libie_fwlog *fwlog)
+{
+ return fwlog->supported;
+}
+
+/**
+ * libie_aq_fwlog_set - Set FW logging configuration AQ command (0xFF30)
+ * @fwlog: pointer to the fwlog structure
+ * @entries: entries to configure
+ * @num_entries: number of @entries
+ * @options: options from libie_fwlog_cfg->options structure
+ * @log_resolution: logging resolution
+ */
+static int
+libie_aq_fwlog_set(struct libie_fwlog *fwlog,
+ struct libie_fwlog_module_entry *entries, u16 num_entries,
+ u16 options, u16 log_resolution)
+{
+ struct libie_aqc_fw_log_cfg_resp *fw_modules;
+ struct libie_aq_desc desc = {0};
+ struct libie_aqc_fw_log *cmd;
+ int status;
+ int i;
+
+ fw_modules = kcalloc(num_entries, sizeof(*fw_modules), GFP_KERNEL);
+ if (!fw_modules)
+ return -ENOMEM;
+
+ for (i = 0; i < num_entries; i++) {
+ fw_modules[i].module_identifier =
+ cpu_to_le16(entries[i].module_id);
+ fw_modules[i].log_level = entries[i].log_level;
+ }
+
+ desc.opcode = cpu_to_le16(libie_aqc_opc_fw_logs_config);
+ desc.flags = cpu_to_le16(LIBIE_AQ_FLAG_SI) |
+ cpu_to_le16(LIBIE_AQ_FLAG_RD);
+
+ cmd = libie_aq_raw(&desc);
+
+ cmd->cmd_flags = LIBIE_AQC_FW_LOG_CONF_SET_VALID;
+ cmd->ops.cfg.log_resolution = cpu_to_le16(log_resolution);
+ cmd->ops.cfg.mdl_cnt = cpu_to_le16(num_entries);
+
+ if (options & LIBIE_FWLOG_OPTION_ARQ_ENA)
+ cmd->cmd_flags |= LIBIE_AQC_FW_LOG_CONF_AQ_EN;
+ if (options & LIBIE_FWLOG_OPTION_UART_ENA)
+ cmd->cmd_flags |= LIBIE_AQC_FW_LOG_CONF_UART_EN;
+
+ status = fwlog->send_cmd(fwlog->priv, &desc, fw_modules,
+ sizeof(*fw_modules) * num_entries);
+
+ kfree(fw_modules);
+
+ return status;
+}
+
+/**
+ * libie_fwlog_set - Set the firmware logging settings
+ * @fwlog: pointer to the fwlog structure
+ * @cfg: config used to set firmware logging
+ *
+ * This function should be called whenever the driver needs to set the firmware
+ * logging configuration. It can be called on initialization, reset, or during
+ * runtime.
+ *
+ * If the PF wishes to receive FW logging then it must register via
+ * libie_fwlog_register. Note, that libie_fwlog_register does not need to be called
+ * for init.
+ */
+static int libie_fwlog_set(struct libie_fwlog *fwlog,
+ struct libie_fwlog_cfg *cfg)
+{
+ if (!libie_fwlog_supported(fwlog))
+ return -EOPNOTSUPP;
+
+ return libie_aq_fwlog_set(fwlog, cfg->module_entries,
+ LIBIE_AQC_FW_LOG_ID_MAX, cfg->options,
+ cfg->log_resolution);
+}
+
+/**
+ * libie_aq_fwlog_register - Register PF for firmware logging events (0xFF31)
+ * @fwlog: pointer to the fwlog structure
+ * @reg: true to register and false to unregister
+ */
+static int libie_aq_fwlog_register(struct libie_fwlog *fwlog, bool reg)
+{
+ struct libie_aq_desc desc = {0};
+ struct libie_aqc_fw_log *cmd;
+
+ desc.opcode = cpu_to_le16(libie_aqc_opc_fw_logs_register);
+ desc.flags = cpu_to_le16(LIBIE_AQ_FLAG_SI);
+ cmd = libie_aq_raw(&desc);
+
+ if (reg)
+ cmd->cmd_flags = LIBIE_AQC_FW_LOG_AQ_REGISTER;
+
+ return fwlog->send_cmd(fwlog->priv, &desc, NULL, 0);
+}
+
+/**
+ * libie_fwlog_register - Register the PF for firmware logging
+ * @fwlog: pointer to the fwlog structure
+ *
+ * After this call the PF will start to receive firmware logging based on the
+ * configuration set in libie_fwlog_set.
+ */
+static int libie_fwlog_register(struct libie_fwlog *fwlog)
+{
+ int status;
+
+ if (!libie_fwlog_supported(fwlog))
+ return -EOPNOTSUPP;
+
+ status = libie_aq_fwlog_register(fwlog, true);
+ if (status)
+ dev_dbg(&fwlog->pdev->dev, "Failed to register for firmware logging events over ARQ\n");
+ else
+ fwlog->cfg.options |= LIBIE_FWLOG_OPTION_IS_REGISTERED;
+
+ return status;
+}
+
+/**
+ * libie_fwlog_unregister - Unregister the PF from firmware logging
+ * @fwlog: pointer to the fwlog structure
+ */
+static int libie_fwlog_unregister(struct libie_fwlog *fwlog)
+{
+ int status;
+
+ if (!libie_fwlog_supported(fwlog))
+ return -EOPNOTSUPP;
+
+ status = libie_aq_fwlog_register(fwlog, false);
+ if (status)
+ dev_dbg(&fwlog->pdev->dev, "Failed to unregister from firmware logging events over ARQ\n");
+ else
+ fwlog->cfg.options &= ~LIBIE_FWLOG_OPTION_IS_REGISTERED;
+
+ return status;
+}
+
+/**
+ * libie_fwlog_print_module_cfg - print current FW logging module configuration
+ * @cfg: pointer to the fwlog cfg structure
+ * @module: module to print
+ * @s: the seq file to put data into
+ */
+static void
+libie_fwlog_print_module_cfg(struct libie_fwlog_cfg *cfg, int module,
+ struct seq_file *s)
+{
+ struct libie_fwlog_module_entry *entry;
+
+ if (module != LIBIE_AQC_FW_LOG_ID_MAX) {
+ entry = &cfg->module_entries[module];
+
+ seq_printf(s, "\tModule: %s, Log Level: %s\n",
+ libie_fwlog_module_string[entry->module_id],
+ libie_fwlog_level_string[entry->log_level]);
+ } else {
+ int i;
+
+ for (i = 0; i < LIBIE_AQC_FW_LOG_ID_MAX; i++) {
+ entry = &cfg->module_entries[i];
+
+ seq_printf(s, "\tModule: %s, Log Level: %s\n",
+ libie_fwlog_module_string[entry->module_id],
+ libie_fwlog_level_string[entry->log_level]);
+ }
+ }
+}
+
+static int libie_find_module_by_dentry(struct dentry **modules, struct dentry *d)
+{
+ int i, module;
+
+ module = -1;
+ /* find the module based on the dentry */
+ for (i = 0; i < LIBIE_NR_FW_LOG_MODULES; i++) {
+ if (d == modules[i]) {
+ module = i;
+ break;
+ }
+ }
+
+ return module;
+}
+
+/**
+ * libie_debugfs_module_show - read from 'module' file
+ * @s: the opened file
+ * @v: pointer to the offset
+ */
+static int libie_debugfs_module_show(struct seq_file *s, void *v)
+{
+ struct libie_fwlog *fwlog = s->private;
+ const struct file *filp = s->file;
+ struct dentry *dentry;
+ int module;
+
+ dentry = file_dentry(filp);
+
+ module = libie_find_module_by_dentry(fwlog->debugfs_modules, dentry);
+ if (module < 0) {
+ dev_info(&fwlog->pdev->dev, "unknown module\n");
+ return -EINVAL;
+ }
+
+ libie_fwlog_print_module_cfg(&fwlog->cfg, module, s);
+
+ return 0;
+}
+
+static int libie_debugfs_module_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, libie_debugfs_module_show, inode->i_private);
+}
+
+/**
+ * libie_debugfs_module_write - write into 'module' file
+ * @filp: the opened file
+ * @buf: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ */
+static ssize_t
+libie_debugfs_module_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct libie_fwlog *fwlog = file_inode(filp)->i_private;
+ struct dentry *dentry = file_dentry(filp);
+ struct device *dev = &fwlog->pdev->dev;
+ char user_val[16], *cmd_buf;
+ int module, log_level, cnt;
+
+ /* don't allow partial writes or invalid input */
+ if (*ppos != 0 || count > 8)
+ return -EINVAL;
+
+ cmd_buf = memdup_user_nul(buf, count);
+ if (IS_ERR(cmd_buf))
+ return PTR_ERR(cmd_buf);
+
+ module = libie_find_module_by_dentry(fwlog->debugfs_modules, dentry);
+ if (module < 0) {
+ dev_info(dev, "unknown module\n");
+ return -EINVAL;
+ }
+
+ cnt = sscanf(cmd_buf, "%s", user_val);
+ if (cnt != 1)
+ return -EINVAL;
+
+ log_level = sysfs_match_string(libie_fwlog_level_string, user_val);
+ if (log_level < 0) {
+ dev_info(dev, "unknown log level '%s'\n", user_val);
+ return -EINVAL;
+ }
+
+ if (module != LIBIE_AQC_FW_LOG_ID_MAX) {
+ fwlog->cfg.module_entries[module].log_level = log_level;
+ } else {
+ /* the module 'all' is a shortcut so that we can set
+ * all of the modules to the same level quickly
+ */
+ int i;
+
+ for (i = 0; i < LIBIE_AQC_FW_LOG_ID_MAX; i++)
+ fwlog->cfg.module_entries[i].log_level = log_level;
+ }
+
+ return count;
+}
+
+static const struct file_operations libie_debugfs_module_fops = {
+ .owner = THIS_MODULE,
+ .open = libie_debugfs_module_open,
+ .read = seq_read,
+ .release = single_release,
+ .write = libie_debugfs_module_write,
+};
+
+/**
+ * libie_debugfs_nr_messages_read - read from 'nr_messages' file
+ * @filp: the opened file
+ * @buffer: where to write the data for the user to read
+ * @count: the size of the user's buffer
+ * @ppos: file position offset
+ */
+static ssize_t libie_debugfs_nr_messages_read(struct file *filp,
+ char __user *buffer, size_t count,
+ loff_t *ppos)
+{
+ struct libie_fwlog *fwlog = filp->private_data;
+ char buff[32] = {};
+
+ snprintf(buff, sizeof(buff), "%d\n",
+ fwlog->cfg.log_resolution);
+
+ return simple_read_from_buffer(buffer, count, ppos, buff, strlen(buff));
+}
+
+/**
+ * libie_debugfs_nr_messages_write - write into 'nr_messages' file
+ * @filp: the opened file
+ * @buf: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ */
+static ssize_t
+libie_debugfs_nr_messages_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct libie_fwlog *fwlog = filp->private_data;
+ struct device *dev = &fwlog->pdev->dev;
+ char user_val[8], *cmd_buf;
+ s16 nr_messages;
+ ssize_t ret;
+
+ /* don't allow partial writes or invalid input */
+ if (*ppos != 0 || count > 4)
+ return -EINVAL;
+
+ cmd_buf = memdup_user_nul(buf, count);
+ if (IS_ERR(cmd_buf))
+ return PTR_ERR(cmd_buf);
+
+ ret = sscanf(cmd_buf, "%s", user_val);
+ if (ret != 1)
+ return -EINVAL;
+
+ ret = kstrtos16(user_val, 0, &nr_messages);
+ if (ret)
+ return ret;
+
+ if (nr_messages < LIBIE_AQC_FW_LOG_MIN_RESOLUTION ||
+ nr_messages > LIBIE_AQC_FW_LOG_MAX_RESOLUTION) {
+ dev_err(dev, "Invalid FW log number of messages %d, value must be between %d - %d\n",
+ nr_messages, LIBIE_AQC_FW_LOG_MIN_RESOLUTION,
+ LIBIE_AQC_FW_LOG_MAX_RESOLUTION);
+ return -EINVAL;
+ }
+
+ fwlog->cfg.log_resolution = nr_messages;
+
+ return count;
+}
+
+static const struct file_operations libie_debugfs_nr_messages_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = libie_debugfs_nr_messages_read,
+ .write = libie_debugfs_nr_messages_write,
+};
+
+/**
+ * libie_debugfs_enable_read - read from 'enable' file
+ * @filp: the opened file
+ * @buffer: where to write the data for the user to read
+ * @count: the size of the user's buffer
+ * @ppos: file position offset
+ */
+static ssize_t libie_debugfs_enable_read(struct file *filp,
+ char __user *buffer, size_t count,
+ loff_t *ppos)
+{
+ struct libie_fwlog *fwlog = filp->private_data;
+ char buff[32] = {};
+
+ snprintf(buff, sizeof(buff), "%u\n",
+ (u16)(fwlog->cfg.options &
+ LIBIE_FWLOG_OPTION_IS_REGISTERED) >> 3);
+
+ return simple_read_from_buffer(buffer, count, ppos, buff, strlen(buff));
+}
+
+/**
+ * libie_debugfs_enable_write - write into 'enable' file
+ * @filp: the opened file
+ * @buf: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ */
+static ssize_t
+libie_debugfs_enable_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct libie_fwlog *fwlog = filp->private_data;
+ char user_val[8], *cmd_buf;
+ bool enable;
+ ssize_t ret;
+
+ /* don't allow partial writes or invalid input */
+ if (*ppos != 0 || count > 2)
+ return -EINVAL;
+
+ cmd_buf = memdup_user_nul(buf, count);
+ if (IS_ERR(cmd_buf))
+ return PTR_ERR(cmd_buf);
+
+ ret = sscanf(cmd_buf, "%s", user_val);
+ if (ret != 1)
+ return -EINVAL;
+
+ ret = kstrtobool(user_val, &enable);
+ if (ret)
+ goto enable_write_error;
+
+ if (enable)
+ fwlog->cfg.options |= LIBIE_FWLOG_OPTION_ARQ_ENA;
+ else
+ fwlog->cfg.options &= ~LIBIE_FWLOG_OPTION_ARQ_ENA;
+
+ ret = libie_fwlog_set(fwlog, &fwlog->cfg);
+ if (ret)
+ goto enable_write_error;
+
+ if (enable)
+ ret = libie_fwlog_register(fwlog);
+ else
+ ret = libie_fwlog_unregister(fwlog);
+
+ if (ret)
+ goto enable_write_error;
+
+ /* if we get here, nothing went wrong; return count since we didn't
+ * really write anything
+ */
+ ret = (ssize_t)count;
+
+enable_write_error:
+ /* This function always consumes all of the written input, or produces
+ * an error. Check and enforce this. Otherwise, the write operation
+ * won't complete properly.
+ */
+ if (WARN_ON(ret != (ssize_t)count && ret >= 0))
+ ret = -EIO;
+
+ return ret;
+}
+
+static const struct file_operations libie_debugfs_enable_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = libie_debugfs_enable_read,
+ .write = libie_debugfs_enable_write,
+};
+
+/**
+ * libie_debugfs_log_size_read - read from 'log_size' file
+ * @filp: the opened file
+ * @buffer: where to write the data for the user to read
+ * @count: the size of the user's buffer
+ * @ppos: file position offset
+ */
+static ssize_t libie_debugfs_log_size_read(struct file *filp,
+ char __user *buffer, size_t count,
+ loff_t *ppos)
+{
+ struct libie_fwlog *fwlog = filp->private_data;
+ char buff[32] = {};
+ int index;
+
+ index = fwlog->ring.index;
+ snprintf(buff, sizeof(buff), "%s\n", libie_fwlog_log_size[index]);
+
+ return simple_read_from_buffer(buffer, count, ppos, buff, strlen(buff));
+}
+
+/**
+ * libie_debugfs_log_size_write - write into 'log_size' file
+ * @filp: the opened file
+ * @buf: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ */
+static ssize_t
+libie_debugfs_log_size_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct libie_fwlog *fwlog = filp->private_data;
+ struct device *dev = &fwlog->pdev->dev;
+ char user_val[8], *cmd_buf;
+ ssize_t ret;
+ int index;
+
+ /* don't allow partial writes or invalid input */
+ if (*ppos != 0 || count > 5)
+ return -EINVAL;
+
+ cmd_buf = memdup_user_nul(buf, count);
+ if (IS_ERR(cmd_buf))
+ return PTR_ERR(cmd_buf);
+
+ ret = sscanf(cmd_buf, "%s", user_val);
+ if (ret != 1)
+ return -EINVAL;
+
+ index = sysfs_match_string(libie_fwlog_log_size, user_val);
+ if (index < 0) {
+ dev_info(dev, "Invalid log size '%s'. The value must be one of 128K, 256K, 512K, 1M, 2M\n",
+ user_val);
+ ret = -EINVAL;
+ goto log_size_write_error;
+ } else if (fwlog->cfg.options & LIBIE_FWLOG_OPTION_IS_REGISTERED) {
+ dev_info(dev, "FW logging is currently running. Please disable FW logging to change log_size\n");
+ ret = -EINVAL;
+ goto log_size_write_error;
+ }
+
+ /* free all the buffers and the tracking info and resize */
+ libie_fwlog_realloc_rings(fwlog, index);
+
+ /* if we get here, nothing went wrong; return count since we didn't
+ * really write anything
+ */
+ ret = (ssize_t)count;
+
+log_size_write_error:
+ /* This function always consumes all of the written input, or produces
+ * an error. Check and enforce this. Otherwise, the write operation
+ * won't complete properly.
+ */
+ if (WARN_ON(ret != (ssize_t)count && ret >= 0))
+ ret = -EIO;
+
+ return ret;
+}
+
+static const struct file_operations libie_debugfs_log_size_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = libie_debugfs_log_size_read,
+ .write = libie_debugfs_log_size_write,
+};
+
+/**
+ * libie_debugfs_data_read - read from 'data' file
+ * @filp: the opened file
+ * @buffer: where to write the data for the user to read
+ * @count: the size of the user's buffer
+ * @ppos: file position offset
+ */
+static ssize_t libie_debugfs_data_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct libie_fwlog *fwlog = filp->private_data;
+ int data_copied = 0;
+ bool done = false;
+
+ if (libie_fwlog_ring_empty(&fwlog->ring))
+ return 0;
+
+ while (!libie_fwlog_ring_empty(&fwlog->ring) && !done) {
+ struct libie_fwlog_data *log;
+ u16 cur_buf_len;
+
+ log = &fwlog->ring.rings[fwlog->ring.head];
+ cur_buf_len = log->data_size;
+ if (cur_buf_len >= count) {
+ done = true;
+ continue;
+ }
+
+ if (copy_to_user(buffer, log->data, cur_buf_len)) {
+ /* if there is an error then bail and return whatever
+ * the driver has copied so far
+ */
+ done = true;
+ continue;
+ }
+
+ data_copied += cur_buf_len;
+ buffer += cur_buf_len;
+ count -= cur_buf_len;
+ *ppos += cur_buf_len;
+ libie_fwlog_ring_increment(&fwlog->ring.head, fwlog->ring.size);
+ }
+
+ return data_copied;
+}
+
+/**
+ * libie_debugfs_data_write - write into 'data' file
+ * @filp: the opened file
+ * @buf: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ */
+static ssize_t
+libie_debugfs_data_write(struct file *filp, const char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ struct libie_fwlog *fwlog = filp->private_data;
+ struct device *dev = &fwlog->pdev->dev;
+ ssize_t ret;
+
+ /* don't allow partial writes */
+ if (*ppos != 0)
+ return 0;
+
+ /* any value is allowed to clear the buffer so no need to even look at
+ * what the value is
+ */
+ if (!(fwlog->cfg.options & LIBIE_FWLOG_OPTION_IS_REGISTERED)) {
+ fwlog->ring.head = 0;
+ fwlog->ring.tail = 0;
+ } else {
+ dev_info(dev, "Can't clear FW log data while FW log running\n");
+ ret = -EINVAL;
+ goto nr_buffs_write_error;
+ }
+
+ /* if we get here, nothing went wrong; return count since we didn't
+ * really write anything
+ */
+ ret = (ssize_t)count;
+
+nr_buffs_write_error:
+ /* This function always consumes all of the written input, or produces
+ * an error. Check and enforce this. Otherwise, the write operation
+ * won't complete properly.
+ */
+ if (WARN_ON(ret != (ssize_t)count && ret >= 0))
+ ret = -EIO;
+
+ return ret;
+}
+
+static const struct file_operations libie_debugfs_data_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = libie_debugfs_data_read,
+ .write = libie_debugfs_data_write,
+};
+
+/**
+ * libie_debugfs_fwlog_init - setup the debugfs directory
+ * @fwlog: pointer to the fwlog structure
+ * @root: debugfs root entry on which fwlog director will be registered
+ */
+static void libie_debugfs_fwlog_init(struct libie_fwlog *fwlog,
+ struct dentry *root)
+{
+ struct dentry *fw_modules_dir;
+ struct dentry **fw_modules;
+ int i;
+
+ /* allocate space for this first because if it fails then we don't
+ * need to unwind
+ */
+ fw_modules = kcalloc(LIBIE_NR_FW_LOG_MODULES, sizeof(*fw_modules),
+ GFP_KERNEL);
+ if (!fw_modules)
+ return;
+
+ fwlog->debugfs = debugfs_create_dir("fwlog", root);
+ if (IS_ERR(fwlog->debugfs))
+ goto err_create_module_files;
+
+ fw_modules_dir = debugfs_create_dir("modules", fwlog->debugfs);
+ if (IS_ERR(fw_modules_dir))
+ goto err_create_module_files;
+
+ for (i = 0; i < LIBIE_NR_FW_LOG_MODULES; i++) {
+ fw_modules[i] = debugfs_create_file(libie_fwlog_module_string[i],
+ 0600, fw_modules_dir, fwlog,
+ &libie_debugfs_module_fops);
+ if (IS_ERR(fw_modules[i]))
+ goto err_create_module_files;
+ }
+
+ debugfs_create_file("nr_messages", 0600, fwlog->debugfs, fwlog,
+ &libie_debugfs_nr_messages_fops);
+
+ fwlog->debugfs_modules = fw_modules;
+
+ debugfs_create_file("enable", 0600, fwlog->debugfs, fwlog,
+ &libie_debugfs_enable_fops);
+
+ debugfs_create_file("log_size", 0600, fwlog->debugfs, fwlog,
+ &libie_debugfs_log_size_fops);
+
+ debugfs_create_file("data", 0600, fwlog->debugfs, fwlog,
+ &libie_debugfs_data_fops);
+
+ return;
+
+err_create_module_files:
+ debugfs_remove_recursive(fwlog->debugfs);
+ kfree(fw_modules);
+}
+
+static bool libie_fwlog_ring_full(struct libie_fwlog_ring *rings)
+{
+ u16 head, tail;
+
+ head = rings->head;
+ tail = rings->tail;
+
+ if (head < tail && (tail - head == (rings->size - 1)))
+ return true;
+ else if (head > tail && (tail == (head - 1)))
+ return true;
+
+ return false;
+}
+
+/**
+ * libie_aq_fwlog_get - Get the current firmware logging configuration (0xFF32)
+ * @fwlog: pointer to the fwlog structure
+ * @cfg: firmware logging configuration to populate
+ */
+static int libie_aq_fwlog_get(struct libie_fwlog *fwlog,
+ struct libie_fwlog_cfg *cfg)
+{
+ struct libie_aqc_fw_log_cfg_resp *fw_modules;
+ struct libie_aq_desc desc = {0};
+ struct libie_aqc_fw_log *cmd;
+ u16 module_id_cnt;
+ int status;
+ void *buf;
+ int i;
+
+ memset(cfg, 0, sizeof(*cfg));
+
+ buf = kzalloc(LIBIE_AQ_MAX_BUF_LEN, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ desc.opcode = cpu_to_le16(libie_aqc_opc_fw_logs_query);
+ desc.flags = cpu_to_le16(LIBIE_AQ_FLAG_SI);
+ cmd = libie_aq_raw(&desc);
+
+ cmd->cmd_flags = LIBIE_AQC_FW_LOG_AQ_QUERY;
+
+ status = fwlog->send_cmd(fwlog->priv, &desc, buf, LIBIE_AQ_MAX_BUF_LEN);
+ if (status) {
+ dev_dbg(&fwlog->pdev->dev, "Failed to get FW log configuration\n");
+ goto status_out;
+ }
+
+ module_id_cnt = le16_to_cpu(cmd->ops.cfg.mdl_cnt);
+ if (module_id_cnt < LIBIE_AQC_FW_LOG_ID_MAX) {
+ dev_dbg(&fwlog->pdev->dev, "FW returned less than the expected number of FW log module IDs\n");
+ } else if (module_id_cnt > LIBIE_AQC_FW_LOG_ID_MAX) {
+ dev_dbg(&fwlog->pdev->dev, "FW returned more than expected number of FW log module IDs, setting module_id_cnt to software expected max %u\n",
+ LIBIE_AQC_FW_LOG_ID_MAX);
+ module_id_cnt = LIBIE_AQC_FW_LOG_ID_MAX;
+ }
+
+ cfg->log_resolution = le16_to_cpu(cmd->ops.cfg.log_resolution);
+ if (cmd->cmd_flags & LIBIE_AQC_FW_LOG_CONF_AQ_EN)
+ cfg->options |= LIBIE_FWLOG_OPTION_ARQ_ENA;
+ if (cmd->cmd_flags & LIBIE_AQC_FW_LOG_CONF_UART_EN)
+ cfg->options |= LIBIE_FWLOG_OPTION_UART_ENA;
+ if (cmd->cmd_flags & LIBIE_AQC_FW_LOG_QUERY_REGISTERED)
+ cfg->options |= LIBIE_FWLOG_OPTION_IS_REGISTERED;
+
+ fw_modules = (struct libie_aqc_fw_log_cfg_resp *)buf;
+
+ for (i = 0; i < module_id_cnt; i++) {
+ struct libie_aqc_fw_log_cfg_resp *fw_module = &fw_modules[i];
+
+ cfg->module_entries[i].module_id =
+ le16_to_cpu(fw_module->module_identifier);
+ cfg->module_entries[i].log_level = fw_module->log_level;
+ }
+
+status_out:
+ kfree(buf);
+ return status;
+}
+
+/**
+ * libie_fwlog_set_supported - Set if FW logging is supported by FW
+ * @fwlog: pointer to the fwlog structure
+ *
+ * If FW returns success to the libie_aq_fwlog_get call then it supports FW
+ * logging, else it doesn't. Set the fwlog_supported flag accordingly.
+ *
+ * This function is only meant to be called during driver init to determine if
+ * the FW support FW logging.
+ */
+static void libie_fwlog_set_supported(struct libie_fwlog *fwlog)
+{
+ struct libie_fwlog_cfg *cfg;
+ int status;
+
+ fwlog->supported = false;
+
+ cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
+ if (!cfg)
+ return;
+
+ status = libie_aq_fwlog_get(fwlog, cfg);
+ if (status)
+ dev_dbg(&fwlog->pdev->dev, "libie_aq_fwlog_get failed, FW logging is not supported on this version of FW, status %d\n",
+ status);
+ else
+ fwlog->supported = true;
+
+ kfree(cfg);
+}
+
+/**
+ * libie_fwlog_init - Initialize FW logging configuration
+ * @fwlog: pointer to the fwlog structure
+ * @api: api structure to init fwlog
+ *
+ * This function should be called on driver initialization during
+ * libie_init_hw().
+ */
+int libie_fwlog_init(struct libie_fwlog *fwlog, struct libie_fwlog_api *api)
+{
+ fwlog->api = *api;
+ libie_fwlog_set_supported(fwlog);
+
+ if (libie_fwlog_supported(fwlog)) {
+ int status;
+
+ /* read the current config from the FW and store it */
+ status = libie_aq_fwlog_get(fwlog, &fwlog->cfg);
+ if (status)
+ return status;
+
+ fwlog->ring.rings = kcalloc(LIBIE_FWLOG_RING_SIZE_DFLT,
+ sizeof(*fwlog->ring.rings),
+ GFP_KERNEL);
+ if (!fwlog->ring.rings) {
+ dev_warn(&fwlog->pdev->dev, "Unable to allocate memory for FW log rings\n");
+ return -ENOMEM;
+ }
+
+ fwlog->ring.size = LIBIE_FWLOG_RING_SIZE_DFLT;
+ fwlog->ring.index = LIBIE_FWLOG_RING_SIZE_INDEX_DFLT;
+
+ status = libie_fwlog_alloc_ring_buffs(&fwlog->ring);
+ if (status) {
+ dev_warn(&fwlog->pdev->dev, "Unable to allocate memory for FW log ring data buffers\n");
+ libie_fwlog_free_ring_buffs(&fwlog->ring);
+ kfree(fwlog->ring.rings);
+ return status;
+ }
+
+ libie_debugfs_fwlog_init(fwlog, api->debugfs_root);
+ } else {
+ dev_warn(&fwlog->pdev->dev, "FW logging is not supported in this NVM image. Please update the NVM to get FW log support\n");
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(libie_fwlog_init);
+
+/**
+ * libie_fwlog_deinit - unroll FW logging configuration
+ * @fwlog: pointer to the fwlog structure
+ *
+ * This function should be called in libie_deinit_hw().
+ */
+void libie_fwlog_deinit(struct libie_fwlog *fwlog)
+{
+ int status;
+
+ /* make sure FW logging is disabled to not put the FW in a weird state
+ * for the next driver load
+ */
+ fwlog->cfg.options &= ~LIBIE_FWLOG_OPTION_ARQ_ENA;
+ status = libie_fwlog_set(fwlog, &fwlog->cfg);
+ if (status)
+ dev_warn(&fwlog->pdev->dev, "Unable to turn off FW logging, status: %d\n",
+ status);
+
+ kfree(fwlog->debugfs_modules);
+
+ fwlog->debugfs_modules = NULL;
+
+ status = libie_fwlog_unregister(fwlog);
+ if (status)
+ dev_warn(&fwlog->pdev->dev, "Unable to unregister FW logging, status: %d\n",
+ status);
+
+ if (fwlog->ring.rings) {
+ libie_fwlog_free_ring_buffs(&fwlog->ring);
+ kfree(fwlog->ring.rings);
+ }
+}
+EXPORT_SYMBOL_GPL(libie_fwlog_deinit);
+
+/**
+ * libie_get_fwlog_data - copy the FW log data from ARQ event
+ * @fwlog: fwlog that the FW log event is associated with
+ * @buf: event buffer pointer
+ * @len: len of event descriptor
+ */
+void libie_get_fwlog_data(struct libie_fwlog *fwlog, u8 *buf, u16 len)
+{
+ struct libie_fwlog_data *log;
+
+ log = &fwlog->ring.rings[fwlog->ring.tail];
+
+ memset(log->data, 0, PAGE_SIZE);
+ log->data_size = len;
+
+ memcpy(log->data, buf, log->data_size);
+ libie_fwlog_ring_increment(&fwlog->ring.tail, fwlog->ring.size);
+
+ if (libie_fwlog_ring_full(&fwlog->ring)) {
+ /* the rings are full so bump the head to create room */
+ libie_fwlog_ring_increment(&fwlog->ring.head, fwlog->ring.size);
+ }
+}
+EXPORT_SYMBOL_GPL(libie_get_fwlog_data);
+
+void libie_fwlog_reregister(struct libie_fwlog *fwlog)
+{
+ if (!(fwlog->cfg.options & LIBIE_FWLOG_OPTION_IS_REGISTERED))
+ return;
+
+ if (libie_fwlog_register(fwlog))
+ fwlog->cfg.options &= ~LIBIE_FWLOG_OPTION_IS_REGISTERED;
+}
+EXPORT_SYMBOL_GPL(libie_fwlog_reregister);
+
+MODULE_DESCRIPTION("Intel(R) Ethernet common library");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 476e73e502fe..89ccb8eb82c7 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2416,10 +2416,9 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool,
skb->ip_summed = mvneta_rx_csum(pp, desc_status);
if (unlikely(xdp_buff_has_frags(xdp)))
- xdp_update_skb_shared_info(skb, num_frags,
- sinfo->xdp_frags_size,
- num_frags * xdp->frame_sz,
- xdp_buff_is_frag_pfmemalloc(xdp));
+ xdp_update_skb_frags_info(skb, num_frags, sinfo->xdp_frags_size,
+ num_frags * xdp->frame_sz,
+ xdp_buff_get_skb_flags(xdp));
return skb;
}
@@ -2985,6 +2984,13 @@ out:
if (txq->count >= txq->tx_stop_threshold)
netif_tx_stop_queue(nq);
+ /* This is not really the true transmit point, since we batch
+ * up several before hitting the hardware, but is the best we
+ * can do without more complexity to walk the packets in the
+ * pending section of the transmit queue.
+ */
+ skb_tx_timestamp(skb);
+
if (!netdev_xmit_more() || netif_xmit_stopped(nq) ||
txq->pending + frags > MVNETA_TXQ_DEC_SENT_MASK)
mvneta_txq_pend_desc_add(pp, txq, frags);
@@ -5357,6 +5363,7 @@ static const struct ethtool_ops mvneta_eth_tool_ops = {
.set_link_ksettings = mvneta_ethtool_set_link_ksettings,
.get_wol = mvneta_ethtool_get_wol,
.set_wol = mvneta_ethtool_set_wol,
+ .get_ts_info = ethtool_op_get_ts_info,
.get_eee = mvneta_ethtool_get_eee,
.set_eee = mvneta_ethtool_set_eee,
};
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 8ebb985d2573..ab0c99aa9f9a 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -4439,6 +4439,8 @@ out:
txq_pcpu->count += frags;
aggr_txq->count += frags;
+ skb_tx_timestamp(skb);
+
/* Enable transmit */
wmb();
mvpp2_aggr_txq_pend_desc_add(port, frags);
@@ -5252,14 +5254,14 @@ static int mvpp2_ethtool_get_ts_info(struct net_device *dev,
{
struct mvpp2_port *port = netdev_priv(dev);
+ ethtool_op_get_ts_info(dev, info);
if (!port->hwtstamp)
- return -EOPNOTSUPP;
+ return 0;
info->phc_index = mvpp22_tai_ptp_clock_index(port->priv->tai);
- info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_TX_HARDWARE |
- SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
info->tx_types = BIT(HWTSTAMP_TX_OFF) |
BIT(HWTSTAMP_TX_ON);
info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
@@ -6222,6 +6224,12 @@ static struct mvpp2_port *mvpp2_pcs_gmac_to_port(struct phylink_pcs *pcs)
return container_of(pcs, struct mvpp2_port, pcs_gmac);
}
+static unsigned int mvpp2_xjg_pcs_inband_caps(struct phylink_pcs *pcs,
+ phy_interface_t interface)
+{
+ return LINK_INBAND_DISABLE;
+}
+
static void mvpp2_xlg_pcs_get_state(struct phylink_pcs *pcs,
unsigned int neg_mode,
struct phylink_link_state *state)
@@ -6256,6 +6264,7 @@ static int mvpp2_xlg_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
}
static const struct phylink_pcs_ops mvpp2_phylink_xlg_pcs_ops = {
+ .pcs_inband_caps = mvpp2_xjg_pcs_inband_caps,
.pcs_get_state = mvpp2_xlg_pcs_get_state,
.pcs_config = mvpp2_xlg_pcs_config,
};
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
index a88c006ea65b..01c6c0a2f283 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
@@ -437,6 +437,15 @@ static int octep_set_link_ksettings(struct net_device *netdev,
return 0;
}
+static void octep_get_channels(struct net_device *dev,
+ struct ethtool_channels *channel)
+{
+ struct octep_device *oct = netdev_priv(dev);
+
+ channel->max_combined = CFG_GET_PORTS_MAX_IO_RINGS(oct->conf);
+ channel->combined_count = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+}
+
static const struct ethtool_ops octep_ethtool_ops = {
.get_drvinfo = octep_get_drvinfo,
.get_link = ethtool_op_get_link,
@@ -445,6 +454,7 @@ static const struct ethtool_ops octep_ethtool_ops = {
.get_ethtool_stats = octep_get_ethtool_stats,
.get_link_ksettings = octep_get_link_ksettings,
.set_link_ksettings = octep_set_link_ksettings,
+ .get_channels = octep_get_channels,
};
void octep_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
index 24499bb36c00..bcea3fc26a8c 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
@@ -1124,11 +1124,24 @@ static int octep_set_features(struct net_device *dev, netdev_features_t features
return err;
}
+static bool octep_is_vf_valid(struct octep_device *oct, int vf)
+{
+ if (vf >= CFG_GET_ACTIVE_VFS(oct->conf)) {
+ netdev_err(oct->netdev, "Invalid VF ID %d\n", vf);
+ return false;
+ }
+
+ return true;
+}
+
static int octep_get_vf_config(struct net_device *dev, int vf,
struct ifla_vf_info *ivi)
{
struct octep_device *oct = netdev_priv(dev);
+ if (!octep_is_vf_valid(oct, vf))
+ return -EINVAL;
+
ivi->vf = vf;
ether_addr_copy(ivi->mac, oct->vf_info[vf].mac_addr);
ivi->spoofchk = true;
@@ -1143,6 +1156,9 @@ static int octep_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
struct octep_device *oct = netdev_priv(dev);
int err;
+ if (!octep_is_vf_valid(oct, vf))
+ return -EINVAL;
+
if (!is_valid_ether_addr(mac)) {
dev_err(&oct->pdev->dev, "Invalid MAC Address %pM\n", mac);
return -EADDRNOTAVAIL;
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c
index ebecdd29f3bd..0867fab61b19 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c
@@ -196,6 +196,7 @@ static void octep_pfvf_get_mac_addr(struct octep_device *oct, u32 vf_id,
vf_id);
return;
}
+ ether_addr_copy(oct->vf_info[vf_id].mac_addr, rsp->s_set_mac.mac_addr);
rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK;
}
@@ -205,6 +206,8 @@ static void octep_pfvf_dev_remove(struct octep_device *oct, u32 vf_id,
{
int err;
+ /* Reset VF-specific information maintained by the PF */
+ memset(&oct->vf_info[vf_id], 0, sizeof(struct octep_pfvf_info));
err = octep_ctrl_net_dev_remove(oct, vf_id);
if (err) {
rsp->s.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK;
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c
index d60441928ba9..241a7e7c7ad2 100644
--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c
@@ -244,6 +244,15 @@ static int octep_vf_get_link_ksettings(struct net_device *netdev,
return 0;
}
+static void octep_vf_get_channels(struct net_device *dev,
+ struct ethtool_channels *channel)
+{
+ struct octep_vf_device *oct = netdev_priv(dev);
+
+ channel->max_combined = CFG_GET_PORTS_MAX_IO_RINGS(oct->conf);
+ channel->combined_count = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+}
+
static const struct ethtool_ops octep_vf_ethtool_ops = {
.get_drvinfo = octep_vf_get_drvinfo,
.get_link = ethtool_op_get_link,
@@ -251,6 +260,7 @@ static const struct ethtool_ops octep_vf_ethtool_ops = {
.get_sset_count = octep_vf_get_sset_count,
.get_ethtool_stats = octep_vf_get_ethtool_stats,
.get_link_ksettings = octep_vf_get_link_ksettings,
+ .get_channels = octep_vf_get_channels,
};
void octep_vf_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 4ff19a04b23e..d374a4454836 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -21,8 +21,7 @@
#include "rvu.h"
#include "lmac_common.h"
-#define DRV_NAME "Marvell-CGX/RPM"
-#define DRV_STRING "Marvell CGX/RPM Driver"
+#define DRV_NAME "Marvell-CGX-RPM"
#define CGX_RX_STAT_GLOBAL_INDEX 9
@@ -1978,6 +1977,13 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_release_regions;
}
+ if (!is_cn20k(pdev) &&
+ !is_cgx_mapped_to_nix(pdev->subsystem_device, cgx->cgx_id)) {
+ dev_notice(dev, "CGX %d not mapped to NIX, skipping probe\n",
+ cgx->cgx_id);
+ goto err_release_regions;
+ }
+
cgx->lmac_count = cgx->mac_ops->get_nr_lmacs(cgx);
if (!cgx->lmac_count) {
dev_notice(dev, "CGX %d LMAC count is zero, skipping probe\n", cgx->cgx_id);
@@ -1998,7 +2004,7 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* init wq for processing linkup requests */
INIT_WORK(&cgx->cgx_cmd_work, cgx_lmac_linkup_work);
- cgx->cgx_cmd_workq = alloc_workqueue("cgx_cmd_workq", 0, 0);
+ cgx->cgx_cmd_workq = alloc_workqueue("cgx_cmd_workq", WQ_PERCPU, 0);
if (!cgx->cgx_cmd_workq) {
dev_err(dev, "alloc workqueue failed for cgx cmd");
err = -ENOMEM;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
index 950231e7ea71..92ccf343dfe0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
@@ -161,10 +161,6 @@ int cgx_get_link_info(void *cgxd, int lmac_id,
struct cgx_link_user_info *linfo);
int cgx_lmac_linkup_start(void *cgxd);
int cgx_get_fwdata_base(u64 *base);
-int cgx_lmac_get_pause_frm(void *cgxd, int lmac_id,
- u8 *tx_pause, u8 *rx_pause);
-int cgx_lmac_set_pause_frm(void *cgxd, int lmac_id,
- u8 tx_pause, u8 rx_pause);
void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable);
u8 cgx_lmac_get_p2x(int cgx_id, int lmac_id);
int cgx_set_fec(u64 fec, int cgx_id, int lmac_id);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
index d7030dfa5dad..a80c8e7c94f2 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
@@ -913,7 +913,7 @@ int rvu_mcs_init(struct rvu *rvu)
/* Initialize the wq for handling mcs interrupts */
INIT_LIST_HEAD(&rvu->mcs_intrq_head);
INIT_WORK(&rvu->mcs_intr_work, mcs_intr_handler_task);
- rvu->mcs_intr_wq = alloc_workqueue("mcs_intr_wq", 0, 0);
+ rvu->mcs_intr_wq = alloc_workqueue("mcs_intr_wq", WQ_PERCPU, 0);
if (!rvu->mcs_intr_wq) {
dev_err(rvu->dev, "mcs alloc workqueue failed\n");
return -ENOMEM;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index c6bb3aaa8e0d..2d78e08f985f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -1164,6 +1164,9 @@ cpt:
rvu_program_channels(rvu);
cgx_start_linkup(rvu);
+ rvu_block_bcast_xon(rvu, BLKADDR_NIX0);
+ rvu_block_bcast_xon(rvu, BLKADDR_NIX1);
+
err = rvu_mcs_init(rvu);
if (err) {
dev_err(rvu->dev, "%s: Failed to initialize mcs\n", __func__);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 7ee1fdeb5295..b58283341923 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -783,6 +783,20 @@ static inline bool is_cn10kb(struct rvu *rvu)
return false;
}
+static inline bool is_cgx_mapped_to_nix(unsigned short id, u8 cgx_id)
+{
+ /* On CNF10KA and CNF10KB silicons only two CGX blocks are connected
+ * to NIX.
+ */
+ if (id == PCI_SUBSYS_DEVID_CNF10K_A || id == PCI_SUBSYS_DEVID_CNF10K_B)
+ return cgx_id <= 1;
+
+ return !(cgx_id && !(id == PCI_SUBSYS_DEVID_96XX ||
+ id == PCI_SUBSYS_DEVID_98XX ||
+ id == PCI_SUBSYS_DEVID_CN10K_A ||
+ id == PCI_SUBSYS_DEVID_CN10K_B));
+}
+
static inline bool is_rvu_npc_hash_extract_en(struct rvu *rvu)
{
u64 npc_const3;
@@ -1017,6 +1031,7 @@ int rvu_nix_mcast_update_mcam_entry(struct rvu *rvu, u16 pcifunc,
void rvu_nix_flr_free_bpids(struct rvu *rvu, u16 pcifunc);
int rvu_alloc_cint_qint_mem(struct rvu *rvu, struct rvu_pfvf *pfvf,
int blkaddr, int nixlf);
+void rvu_block_bcast_xon(struct rvu *rvu, int blkaddr);
/* NPC APIs */
void rvu_npc_freemem(struct rvu *rvu);
int rvu_npc_get_pkind(struct rvu *rvu, u16 pf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index 3303c475414a..3abd750a4bd7 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -315,7 +315,7 @@ static int cgx_lmac_event_handler_init(struct rvu *rvu)
spin_lock_init(&rvu->cgx_evq_lock);
INIT_LIST_HEAD(&rvu->cgx_evq_head);
INIT_WORK(&rvu->cgx_evh_work, cgx_evhandler_task);
- rvu->cgx_evh_wq = alloc_workqueue("rvu_evh_wq", 0, 0);
+ rvu->cgx_evh_wq = alloc_workqueue("rvu_evh_wq", WQ_PERCPU, 0);
if (!rvu->cgx_evh_wq) {
dev_err(rvu->dev, "alloc workqueue failed");
return -ENOMEM;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
index 27c3a2daaaa9..3735372539bd 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
@@ -505,7 +505,9 @@ static int rvu_nix_register_reporters(struct rvu_devlink *rvu_dl)
rvu_reporters->nix_event_ctx = nix_event_context;
rvu_reporters->rvu_hw_nix_intr_reporter =
- devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_intr_reporter_ops, 0, rvu);
+ devlink_health_reporter_create(rvu_dl->dl,
+ &rvu_hw_nix_intr_reporter_ops,
+ rvu);
if (IS_ERR(rvu_reporters->rvu_hw_nix_intr_reporter)) {
dev_warn(rvu->dev, "Failed to create hw_nix_intr reporter, err=%ld\n",
PTR_ERR(rvu_reporters->rvu_hw_nix_intr_reporter));
@@ -513,7 +515,9 @@ static int rvu_nix_register_reporters(struct rvu_devlink *rvu_dl)
}
rvu_reporters->rvu_hw_nix_gen_reporter =
- devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_gen_reporter_ops, 0, rvu);
+ devlink_health_reporter_create(rvu_dl->dl,
+ &rvu_hw_nix_gen_reporter_ops,
+ rvu);
if (IS_ERR(rvu_reporters->rvu_hw_nix_gen_reporter)) {
dev_warn(rvu->dev, "Failed to create hw_nix_gen reporter, err=%ld\n",
PTR_ERR(rvu_reporters->rvu_hw_nix_gen_reporter));
@@ -521,7 +525,9 @@ static int rvu_nix_register_reporters(struct rvu_devlink *rvu_dl)
}
rvu_reporters->rvu_hw_nix_err_reporter =
- devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_err_reporter_ops, 0, rvu);
+ devlink_health_reporter_create(rvu_dl->dl,
+ &rvu_hw_nix_err_reporter_ops,
+ rvu);
if (IS_ERR(rvu_reporters->rvu_hw_nix_err_reporter)) {
dev_warn(rvu->dev, "Failed to create hw_nix_err reporter, err=%ld\n",
PTR_ERR(rvu_reporters->rvu_hw_nix_err_reporter));
@@ -529,7 +535,9 @@ static int rvu_nix_register_reporters(struct rvu_devlink *rvu_dl)
}
rvu_reporters->rvu_hw_nix_ras_reporter =
- devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_ras_reporter_ops, 0, rvu);
+ devlink_health_reporter_create(rvu_dl->dl,
+ &rvu_hw_nix_ras_reporter_ops,
+ rvu);
if (IS_ERR(rvu_reporters->rvu_hw_nix_ras_reporter)) {
dev_warn(rvu->dev, "Failed to create hw_nix_ras reporter, err=%ld\n",
PTR_ERR(rvu_reporters->rvu_hw_nix_ras_reporter));
@@ -1051,7 +1059,9 @@ static int rvu_npa_register_reporters(struct rvu_devlink *rvu_dl)
rvu_reporters->npa_event_ctx = npa_event_context;
rvu_reporters->rvu_hw_npa_intr_reporter =
- devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_intr_reporter_ops, 0, rvu);
+ devlink_health_reporter_create(rvu_dl->dl,
+ &rvu_hw_npa_intr_reporter_ops,
+ rvu);
if (IS_ERR(rvu_reporters->rvu_hw_npa_intr_reporter)) {
dev_warn(rvu->dev, "Failed to create hw_npa_intr reporter, err=%ld\n",
PTR_ERR(rvu_reporters->rvu_hw_npa_intr_reporter));
@@ -1059,7 +1069,9 @@ static int rvu_npa_register_reporters(struct rvu_devlink *rvu_dl)
}
rvu_reporters->rvu_hw_npa_gen_reporter =
- devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_gen_reporter_ops, 0, rvu);
+ devlink_health_reporter_create(rvu_dl->dl,
+ &rvu_hw_npa_gen_reporter_ops,
+ rvu);
if (IS_ERR(rvu_reporters->rvu_hw_npa_gen_reporter)) {
dev_warn(rvu->dev, "Failed to create hw_npa_gen reporter, err=%ld\n",
PTR_ERR(rvu_reporters->rvu_hw_npa_gen_reporter));
@@ -1067,7 +1079,9 @@ static int rvu_npa_register_reporters(struct rvu_devlink *rvu_dl)
}
rvu_reporters->rvu_hw_npa_err_reporter =
- devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_err_reporter_ops, 0, rvu);
+ devlink_health_reporter_create(rvu_dl->dl,
+ &rvu_hw_npa_err_reporter_ops,
+ rvu);
if (IS_ERR(rvu_reporters->rvu_hw_npa_err_reporter)) {
dev_warn(rvu->dev, "Failed to create hw_npa_err reporter, err=%ld\n",
PTR_ERR(rvu_reporters->rvu_hw_npa_err_reporter));
@@ -1075,7 +1089,9 @@ static int rvu_npa_register_reporters(struct rvu_devlink *rvu_dl)
}
rvu_reporters->rvu_hw_npa_ras_reporter =
- devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_ras_reporter_ops, 0, rvu);
+ devlink_health_reporter_create(rvu_dl->dl,
+ &rvu_hw_npa_ras_reporter_ops,
+ rvu);
if (IS_ERR(rvu_reporters->rvu_hw_npa_ras_reporter)) {
dev_warn(rvu->dev, "Failed to create hw_npa_ras reporter, err=%ld\n",
PTR_ERR(rvu_reporters->rvu_hw_npa_ras_reporter));
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 60db1f616cc8..828316211b24 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -6616,3 +6616,19 @@ unlock_grp:
return ret;
}
+
+/* On CN10k and older series of silicons, hardware may incorrectly
+ * assert XOFF on certain channels. Issue a write on NIX_AF_RX_CHANX_CFG
+ * to broadcacst XON on the same.
+ */
+void rvu_block_bcast_xon(struct rvu *rvu, int blkaddr)
+{
+ struct rvu_block *block = &rvu->hw->block[blkaddr];
+ u64 cfg;
+
+ if (!block->implemented || is_cn20k(rvu->pdev))
+ return;
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(0));
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(0), cfg);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
index 1b765045aa63..b56395ac5a74 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
@@ -606,8 +606,8 @@ static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf)
if (!npc_check_field(rvu, blkaddr, NPC_LB, intf))
*features &= ~BIT_ULL(NPC_OUTER_VID);
- /* Set SPI flag only if AH/ESP and IPSEC_SPI are in the key */
- if (npc_check_field(rvu, blkaddr, NPC_IPSEC_SPI, intf) &&
+ /* Allow extracting SPI field from AH and ESP headers at same offset */
+ if (npc_is_field_present(rvu, NPC_IPSEC_SPI, intf) &&
(*features & (BIT_ULL(NPC_IPPROTO_ESP) | BIT_ULL(NPC_IPPROTO_AH))))
*features |= BIT_ULL(NPC_IPSEC_SPI);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c
index 03099bc570bd..4415d0ce9aef 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c
@@ -376,7 +376,7 @@ int rvu_rep_install_mcam_rules(struct rvu *rvu)
spin_lock_init(&rvu->rep_evtq_lock);
INIT_LIST_HEAD(&rvu->rep_evtq_head);
INIT_WORK(&rvu->rep_evt_work, rvu_rep_wq_handler);
- rvu->rep_evt_wq = alloc_workqueue("rep_evt_wq", 0, 0);
+ rvu->rep_evt_wq = alloc_workqueue("rep_evt_wq", WQ_PERCPU, 0);
if (!rvu->rep_evt_wq) {
dev_err(rvu->dev, "REP workqueue allocation failed\n");
return -ENOMEM;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
index c691f0722154..77543d472345 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
@@ -798,7 +798,8 @@ int cn10k_ipsec_init(struct net_device *netdev)
pf->ipsec.sa_size = sa_size;
INIT_WORK(&pf->ipsec.sa_work, cn10k_ipsec_sa_wq_handler);
- pf->ipsec.sa_workq = alloc_workqueue("cn10k_ipsec_sa_workq", 0, 0);
+ pf->ipsec.sa_workq = alloc_workqueue("cn10k_ipsec_sa_workq",
+ WQ_PERCPU, 0);
if (!pf->ipsec.sa_workq) {
netdev_err(pf->netdev, "SA alloc workqueue failed\n");
return -ENOMEM;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index f674729124e6..aff17c37ddde 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -124,7 +124,9 @@ void otx2_get_dev_stats(struct otx2_nic *pfvf)
dev_stats->rx_ucast_frames;
dev_stats->tx_bytes = OTX2_GET_TX_STATS(TX_OCTS);
- dev_stats->tx_drops = OTX2_GET_TX_STATS(TX_DROP);
+ dev_stats->tx_drops = OTX2_GET_TX_STATS(TX_DROP) +
+ (unsigned long)atomic_long_read(&dev_stats->tx_discards);
+
dev_stats->tx_bcast_frames = OTX2_GET_TX_STATS(TX_BCAST);
dev_stats->tx_mcast_frames = OTX2_GET_TX_STATS(TX_MCAST);
dev_stats->tx_ucast_frames = OTX2_GET_TX_STATS(TX_UCAST);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index e3765b73c434..1c8a3c078a64 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -153,6 +153,7 @@ struct otx2_dev_stats {
u64 tx_bcast_frames;
u64 tx_mcast_frames;
u64 tx_drops;
+ atomic_long_t tx_discards;
};
/* Driver counted stats */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index 998c734ff839..b90e23dc49de 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -1283,7 +1283,8 @@ end:
}
static void otx2_get_fec_stats(struct net_device *netdev,
- struct ethtool_fec_stats *fec_stats)
+ struct ethtool_fec_stats *fec_stats,
+ struct ethtool_fec_hist *hist)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
struct cgx_fw_data *rsp;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index b23585c5e5c2..e808995703cf 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -2220,6 +2220,7 @@ static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct otx2_nic *pf = netdev_priv(netdev);
int qidx = skb_get_queue_mapping(skb);
+ struct otx2_dev_stats *dev_stats;
struct otx2_snd_queue *sq;
struct netdev_queue *txq;
int sq_idx;
@@ -2232,6 +2233,8 @@ static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
/* Check for minimum and maximum packet length */
if (skb->len <= ETH_HLEN ||
(!skb_shinfo(skb)->gso_size && skb->len > pf->tx_max_pktlen)) {
+ dev_stats = &pf->hw.dev_stats;
+ atomic_long_inc(&dev_stats->tx_discards);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -3539,6 +3542,7 @@ static void otx2_remove(struct pci_dev *pdev)
otx2_disable_mbox_intr(pf);
otx2_pfaf_mbox_destroy(pf);
pci_free_irq_vectors(pf->pdev);
+ bitmap_free(pf->af_xdp_zc_qidx);
pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
index e52cc6b1a26c..dedd586ed310 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
@@ -491,7 +491,7 @@ void otx2_ptp_destroy(struct otx2_nic *pfvf)
if (!ptp)
return;
- cancel_delayed_work(&pfvf->ptp->synctstamp_work);
+ cancel_delayed_work_sync(&pfvf->ptp->synctstamp_work);
ptp_clock_unregister(ptp->ptp_clock);
kfree(ptp);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
index 5f80b23c5335..26a08d2cfbb1 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
@@ -1326,7 +1326,6 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
free_leaf:
otx2_tc_del_from_flow_list(flow_cfg, new_node);
- kfree_rcu(new_node, rcu);
if (new_node->is_act_police) {
mutex_lock(&nic->mbox.lock);
@@ -1346,6 +1345,7 @@ free_leaf:
mutex_unlock(&nic->mbox.lock);
}
+ kfree_rcu(new_node, rcu);
return rc;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index 5589fccd370b..25381f079b97 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -417,9 +417,19 @@ static netdev_tx_t otx2vf_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct otx2_nic *vf = netdev_priv(netdev);
int qidx = skb_get_queue_mapping(skb);
+ struct otx2_dev_stats *dev_stats;
struct otx2_snd_queue *sq;
struct netdev_queue *txq;
+ /* Check for minimum and maximum packet length */
+ if (skb->len <= ETH_HLEN ||
+ (!skb_shinfo(skb)->gso_size && skb->len > vf->tx_max_pktlen)) {
+ dev_stats = &vf->hw.dev_stats;
+ atomic_long_inc(&dev_stats->tx_discards);
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
sq = &vf->qset.sq[qidx];
txq = netdev_get_tx_queue(netdev, qidx);
@@ -844,6 +854,7 @@ static void otx2vf_remove(struct pci_dev *pdev)
qmem_free(vf->dev, vf->dync_lmt);
otx2vf_vfaf_mbox_destroy(vf);
pci_free_irq_vectors(vf->pdev);
+ bitmap_free(vf->af_xdp_zc_qidx);
pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/rep.c b/drivers/net/ethernet/marvell/octeontx2/nic/rep.c
index 25af98034e2e..b476733a0234 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/rep.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/rep.c
@@ -371,7 +371,8 @@ static void rvu_rep_get_stats(struct work_struct *work)
stats->rx_mcast_frames = rsp->rx.mcast;
stats->tx_bytes = rsp->tx.octs;
stats->tx_frames = rsp->tx.ucast + rsp->tx.bcast + rsp->tx.mcast;
- stats->tx_drops = rsp->tx.drop;
+ stats->tx_drops = rsp->tx.drop +
+ (unsigned long)atomic_long_read(&stats->tx_discards);
exit:
mutex_unlock(&priv->mbox.lock);
}
@@ -418,6 +419,16 @@ static netdev_tx_t rvu_rep_xmit(struct sk_buff *skb, struct net_device *dev)
struct otx2_nic *pf = rep->mdev;
struct otx2_snd_queue *sq;
struct netdev_queue *txq;
+ struct rep_stats *stats;
+
+ /* Check for minimum and maximum packet length */
+ if (skb->len <= ETH_HLEN ||
+ (!skb_shinfo(skb)->gso_size && skb->len > pf->tx_max_pktlen)) {
+ stats = &rep->stats;
+ atomic_long_inc(&stats->tx_discards);
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
sq = &pf->qset.sq[rep->rep_id];
txq = netdev_get_tx_queue(dev, 0);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/rep.h b/drivers/net/ethernet/marvell/octeontx2/nic/rep.h
index 38446b3e4f13..5bc9e2c7d800 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/rep.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/rep.h
@@ -27,6 +27,7 @@ struct rep_stats {
u64 tx_bytes;
u64 tx_frames;
u64 tx_drops;
+ atomic_long_t tx_discards;
};
struct rep_dev {
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c
index 71ffb55d1fc4..65e7ef033bde 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_main.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c
@@ -1500,7 +1500,7 @@ EXPORT_SYMBOL(prestera_device_unregister);
static int __init prestera_module_init(void)
{
- prestera_wq = alloc_workqueue("prestera", 0, 0);
+ prestera_wq = alloc_workqueue("prestera", WQ_PERCPU, 0);
if (!prestera_wq)
return -ENOMEM;
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_pci.c b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
index c45d108b2f6d..3e13322470da 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_pci.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
@@ -898,7 +898,7 @@ static int prestera_pci_probe(struct pci_dev *pdev,
dev_info(fw->dev.dev, "Prestera FW is ready\n");
- fw->wq = alloc_workqueue("prestera_fw_wq", WQ_HIGHPRI, 1);
+ fw->wq = alloc_workqueue("prestera_fw_wq", WQ_HIGHPRI | WQ_PERCPU, 1);
if (!fw->wq) {
err = -ENOMEM;
goto err_wq_alloc;
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 5a5fcde76dc0..e68997a29191 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -1761,6 +1761,13 @@ static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
bool gso = false;
int tx_num;
+ if (skb_vlan_tag_present(skb) &&
+ !eth_proto_is_802_3(eth_hdr(skb)->h_proto)) {
+ skb = __vlan_hwaccel_push_inside(skb);
+ if (!skb)
+ goto dropped;
+ }
+
/* normally we can rely on the stack not calling this more than once,
* however we have 2 queues running on the same ring so we need to lock
* the ring access
@@ -1806,8 +1813,9 @@ static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
drop:
spin_unlock(&eth->page_lock);
- stats->tx_dropped++;
dev_kfree_skb_any(skb);
+dropped:
+ stats->tx_dropped++;
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
index c855fb799ce1..e9bd32741983 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
@@ -101,7 +101,9 @@ mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_i
if (!IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED))
return -1;
+ rcu_read_lock();
err = dev_fill_forward_path(dev, addr, &stack);
+ rcu_read_unlock();
if (err)
return err;
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
index 73c26fcfd85e..3dbb113b792c 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
@@ -59,7 +59,9 @@ struct mtk_wed_flow_block_priv {
static const struct mtk_wed_soc_data mt7622_data = {
.regmap = {
.tx_bm_tkid = 0x088,
- .wpdma_rx_ring0 = 0x770,
+ .wpdma_rx_ring = {
+ 0x770,
+ },
.reset_idx_tx_mask = GENMASK(3, 0),
.reset_idx_rx_mask = GENMASK(17, 16),
},
@@ -70,7 +72,9 @@ static const struct mtk_wed_soc_data mt7622_data = {
static const struct mtk_wed_soc_data mt7986_data = {
.regmap = {
.tx_bm_tkid = 0x0c8,
- .wpdma_rx_ring0 = 0x770,
+ .wpdma_rx_ring = {
+ 0x770,
+ },
.reset_idx_tx_mask = GENMASK(1, 0),
.reset_idx_rx_mask = GENMASK(7, 6),
},
@@ -81,7 +85,10 @@ static const struct mtk_wed_soc_data mt7986_data = {
static const struct mtk_wed_soc_data mt7988_data = {
.regmap = {
.tx_bm_tkid = 0x0c8,
- .wpdma_rx_ring0 = 0x7d0,
+ .wpdma_rx_ring = {
+ 0x7d0,
+ 0x7d8,
+ },
.reset_idx_tx_mask = GENMASK(1, 0),
.reset_idx_rx_mask = GENMASK(7, 6),
},
@@ -621,8 +628,8 @@ mtk_wed_amsdu_init(struct mtk_wed_device *dev)
return ret;
}
- /* eagle E1 PCIE1 tx ring 22 flow control issue */
- if (dev->wlan.id == 0x7991)
+ /* Kite and Eagle E1 PCIE1 tx ring 22 flow control issue */
+ if (dev->wlan.id == 0x7991 || dev->wlan.id == 0x7992)
wed_clr(dev, MTK_WED_AMSDU_FIFO, MTK_WED_AMSDU_IS_PRIOR0_RING);
wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_AMSDU_EN);
@@ -1239,7 +1246,11 @@ mtk_wed_set_wpdma(struct mtk_wed_device *dev)
return;
wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo);
- wed_w32(dev, dev->hw->soc->regmap.wpdma_rx_ring0, dev->wlan.wpdma_rx);
+ wed_w32(dev, dev->hw->soc->regmap.wpdma_rx_ring[0],
+ dev->wlan.wpdma_rx[0]);
+ if (mtk_wed_is_v3_or_greater(dev->hw))
+ wed_w32(dev, dev->hw->soc->regmap.wpdma_rx_ring[1],
+ dev->wlan.wpdma_rx[1]);
if (!dev->wlan.hw_rro)
return;
@@ -2323,6 +2334,16 @@ mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
if (!dev->rx_wdma[i].desc)
mtk_wed_wdma_rx_ring_setup(dev, i, 16, false);
+ if (dev->wlan.hw_rro) {
+ for (i = 0; i < MTK_WED_RX_PAGE_QUEUES; i++) {
+ u32 addr = MTK_WED_RRO_MSDU_PG_CTRL0(i) +
+ MTK_WED_RING_OFS_COUNT;
+
+ if (!wed_r32(dev, addr))
+ wed_w32(dev, addr, 1);
+ }
+ }
+
mtk_wed_hw_init(dev);
mtk_wed_configure_irq(dev, irq_mask);
@@ -2782,7 +2803,6 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
if (!pdev)
goto err_of_node_put;
- get_device(&pdev->dev);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
goto err_put_device;
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.h b/drivers/net/ethernet/mediatek/mtk_wed.h
index c1f0479d7a71..b49aee9a8b65 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed.h
+++ b/drivers/net/ethernet/mediatek/mtk_wed.h
@@ -17,7 +17,7 @@ struct mtk_wed_wo;
struct mtk_wed_soc_data {
struct {
u32 tx_bm_tkid;
- u32 wpdma_rx_ring0;
+ u32 wpdma_rx_ring[MTK_WED_RX_QUEUES];
u32 reset_idx_tx_mask;
u32 reset_idx_rx_mask;
} regmap;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index d2071aff7b8f..308b4458e0d4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1180,9 +1180,9 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
mlx4_unregister_mac(mdev->dev, priv->port, mac);
hlist_del_rcu(&entry->hlist);
- kfree_rcu(entry, rcu);
en_dbg(DRV, priv, "Removed MAC %pM on port:%d\n",
entry->mac, priv->port);
+ kfree_rcu(entry, rcu);
++removed;
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 92a16ddb7d86..13666d50b90f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -267,8 +267,10 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
pp.dma_dir = priv->dma_dir;
ring->pp = page_pool_create(&pp);
- if (!ring->pp)
+ if (IS_ERR(ring->pp)) {
+ err = PTR_ERR(ring->pp);
goto err_ring;
+ }
if (xdp_rxq_info_reg(&ring->xdp_rxq, priv->dev, queue_index, 0) < 0)
goto err_pp;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 6ec7d6e0181d..3c3e84100d5a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -8,7 +8,6 @@ config MLX5_CORE
depends on PCI
select AUXILIARY_BUS
select NET_DEVLINK
- depends on VXLAN || !VXLAN
depends on MLXFW || !MLXFW
depends on PTP_1588_CLOCK_OPTIONAL
depends on PCI_HYPERV_INTERFACE || !PCI_HYPERV_INTERFACE
@@ -208,3 +207,14 @@ config MLX5_DPLL
help
DPLL support in Mellanox Technologies ConnectX NICs.
+config MLX5_EN_PSP
+ bool "Mellanox Technologies support for PSP cryptography-offload acceleration"
+ depends on INET_PSP
+ depends on MLX5_CORE_EN
+ default y
+ help
+ mlx5 device offload support for Google PSP Security Protocol offload.
+ Adds support for PSP encryption offload and for SPI and key generation
+ interfaces to PSP Stack which supports PSP crypto offload.
+
+ If unsure, say Y.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index a253c73db9e5..8ffa286a18f5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -17,7 +17,7 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
fs_counters.o fs_ft_pool.o rl.o lag/debugfs.o lag/lag.o dev.o events.o wq.o lib/gid.o \
lib/devcom.o lib/pci_vsc.o lib/dm.o lib/fs_ttc.o diag/fs_tracepoint.o \
diag/fw_tracer.o diag/crdump.o devlink.o diag/rsc_dump.o diag/reporter_vnic.o \
- fw_reset.o qos.o lib/tout.o lib/aso.o wc.o fs_pool.o
+ fw_reset.o qos.o lib/tout.o lib/aso.o wc.o fs_pool.o lib/nv_param.o
#
# Netdev basic
@@ -69,7 +69,7 @@ mlx5_core-$(CONFIG_MLX5_TC_SAMPLE) += en/tc/sample.o
# Core extra
#
mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o eswitch_offloads_termtbl.o \
- ecpf.o rdma.o esw/legacy.o \
+ ecpf.o rdma.o esw/legacy.o esw/adj_vport.o \
esw/devlink_port.o esw/vporttbl.o esw/qos.o esw/ipsec.o
mlx5_core-$(CONFIG_MLX5_ESWITCH) += esw/acl/helper.o \
@@ -85,7 +85,9 @@ mlx5_core-$(CONFIG_MLX5_BRIDGE) += esw/bridge.o esw/bridge_mcast.o esw/bridge
mlx5_core-$(CONFIG_HWMON) += hwmon.o
mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
-mlx5_core-$(CONFIG_VXLAN) += lib/vxlan.o
+ifneq ($(CONFIG_VXLAN),)
+ mlx5_core-y += lib/vxlan.o
+endif
mlx5_core-$(CONFIG_PTP_1588_CLOCK) += lib/clock.o
mlx5_core-$(CONFIG_PCI_HYPERV_INTERFACE) += lib/hv.o lib/hv_vhca.o
@@ -110,6 +112,8 @@ mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/ktls_stats.o \
en_accel/fs_tcp.o en_accel/ktls.o en_accel/ktls_txrx.o \
en_accel/ktls_tx.o en_accel/ktls_rx.o
+mlx5_core-$(CONFIG_MLX5_EN_PSP) += en_accel/psp.o en_accel/psp_rxtx.o
+
#
# SW Steering
#
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index e395ef5f356e..722282cebce9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -294,6 +294,10 @@ static void poll_timeout(struct mlx5_cmd_work_ent *ent)
return;
}
cond_resched();
+ if (mlx5_cmd_is_down(dev)) {
+ ent->ret = -ENXIO;
+ return;
+ }
} while (time_before(jiffies, poll_end));
ent->ret = -ETIMEDOUT;
@@ -1070,7 +1074,7 @@ static void cmd_work_handler(struct work_struct *work)
poll_timeout(ent);
/* make sure we read the descriptor after ownership is SW */
rmb();
- mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, (ent->ret == -ETIMEDOUT));
+ mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, !!ent->ret);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
index 1fd403713baf..e9f319a9bdd6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
@@ -145,7 +145,6 @@ int mlx5_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
mlx5_core_dbg(dev, "failed adding CP 0x%x to debug file system\n",
cq->cqn);
- cq->uar = dev->priv.uar;
cq->irqn = eq->core.irqn;
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index 3ffa3fbacd16..fceea83abbd7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -10,6 +10,7 @@
#include "esw/qos.h"
#include "sf/dev/dev.h"
#include "sf/sf.h"
+#include "lib/nv_param.h"
static int mlx5_devlink_flash_update(struct devlink *devlink,
struct devlink_flash_update_params *params,
@@ -160,7 +161,7 @@ static int mlx5_devlink_reload_fw_activate(struct devlink *devlink, struct netli
if (err)
return err;
- mlx5_unload_one_devl_locked(dev, true);
+ mlx5_sync_reset_unload_flow(dev, true);
err = mlx5_health_wait_pci_up(dev);
if (err)
NL_SET_ERR_MSG_MOD(extack, "FW activate aborted, PCI reads fail after reset");
@@ -203,11 +204,6 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
return 0;
}
- if (mlx5_lag_is_active(dev)) {
- NL_SET_ERR_MSG_MOD(extack, "reload is unsupported in Lag mode");
- return -EOPNOTSUPP;
- }
-
if (mlx5_core_is_mp_slave(dev)) {
NL_SET_ERR_MSG_MOD(extack, "reload is unsupported for multi port slave");
return -EOPNOTSUPP;
@@ -534,6 +530,25 @@ mlx5_devlink_hairpin_queue_size_validate(struct devlink *devlink, u32 id,
return 0;
}
+static int mlx5_devlink_num_doorbells_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *mdev = devlink_priv(devlink);
+ u32 val32 = val.vu32;
+ u32 max_num_channels;
+
+ max_num_channels = mlx5e_get_max_num_channels(mdev);
+ if (val32 > max_num_channels) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Requested num_doorbells (%u) exceeds maximum number of channels (%u)",
+ val32, max_num_channels);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static void mlx5_devlink_hairpin_params_init_values(struct devlink *devlink)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
@@ -613,6 +628,9 @@ static const struct devlink_param mlx5_devlink_eth_params[] = {
"hairpin_queue_size", DEVLINK_PARAM_TYPE_U32,
BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), NULL, NULL,
mlx5_devlink_hairpin_queue_size_validate),
+ DEVLINK_PARAM_GENERIC(NUM_DOORBELLS,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), NULL, NULL,
+ mlx5_devlink_num_doorbells_validate),
};
static int mlx5_devlink_eth_params_register(struct devlink *devlink)
@@ -636,6 +654,10 @@ static int mlx5_devlink_eth_params_register(struct devlink *devlink)
mlx5_devlink_hairpin_params_init_values(devlink);
+ value.vu32 = MLX5_DEFAULT_NUM_DOORBELLS;
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_NUM_DOORBELLS,
+ value);
return 0;
}
@@ -650,6 +672,105 @@ static void mlx5_devlink_eth_params_unregister(struct devlink *devlink)
ARRAY_SIZE(mlx5_devlink_eth_params));
}
+#define MLX5_PCIE_CONG_THRESH_MAX 10000
+#define MLX5_PCIE_CONG_THRESH_DEF_LOW 7500
+#define MLX5_PCIE_CONG_THRESH_DEF_HIGH 9000
+
+static int
+mlx5_devlink_pcie_cong_thresh_validate(struct devlink *devl, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ if (val.vu16 > MLX5_PCIE_CONG_THRESH_MAX) {
+ NL_SET_ERR_MSG_FMT_MOD(extack, "Value %u > max supported (%u)",
+ val.vu16, MLX5_PCIE_CONG_THRESH_MAX);
+
+ return -EINVAL;
+ }
+
+ switch (id) {
+ case MLX5_DEVLINK_PARAM_ID_PCIE_CONG_IN_LOW:
+ case MLX5_DEVLINK_PARAM_ID_PCIE_CONG_IN_HIGH:
+ case MLX5_DEVLINK_PARAM_ID_PCIE_CONG_OUT_LOW:
+ case MLX5_DEVLINK_PARAM_ID_PCIE_CONG_OUT_HIGH:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static void mlx5_devlink_pcie_cong_init_values(struct devlink *devlink)
+{
+ union devlink_param_value value;
+ u32 id;
+
+ value.vu16 = MLX5_PCIE_CONG_THRESH_DEF_LOW;
+ id = MLX5_DEVLINK_PARAM_ID_PCIE_CONG_IN_LOW;
+ devl_param_driverinit_value_set(devlink, id, value);
+
+ value.vu16 = MLX5_PCIE_CONG_THRESH_DEF_HIGH;
+ id = MLX5_DEVLINK_PARAM_ID_PCIE_CONG_IN_HIGH;
+ devl_param_driverinit_value_set(devlink, id, value);
+
+ value.vu16 = MLX5_PCIE_CONG_THRESH_DEF_LOW;
+ id = MLX5_DEVLINK_PARAM_ID_PCIE_CONG_OUT_LOW;
+ devl_param_driverinit_value_set(devlink, id, value);
+
+ value.vu16 = MLX5_PCIE_CONG_THRESH_DEF_HIGH;
+ id = MLX5_DEVLINK_PARAM_ID_PCIE_CONG_OUT_HIGH;
+ devl_param_driverinit_value_set(devlink, id, value);
+}
+
+static const struct devlink_param mlx5_devlink_pcie_cong_params[] = {
+ DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_PCIE_CONG_IN_LOW,
+ "pcie_cong_inbound_low", DEVLINK_PARAM_TYPE_U16,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), NULL, NULL,
+ mlx5_devlink_pcie_cong_thresh_validate),
+ DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_PCIE_CONG_IN_HIGH,
+ "pcie_cong_inbound_high", DEVLINK_PARAM_TYPE_U16,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), NULL, NULL,
+ mlx5_devlink_pcie_cong_thresh_validate),
+ DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_PCIE_CONG_OUT_LOW,
+ "pcie_cong_outbound_low", DEVLINK_PARAM_TYPE_U16,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), NULL, NULL,
+ mlx5_devlink_pcie_cong_thresh_validate),
+ DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_PCIE_CONG_OUT_HIGH,
+ "pcie_cong_outbound_high", DEVLINK_PARAM_TYPE_U16,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), NULL, NULL,
+ mlx5_devlink_pcie_cong_thresh_validate),
+};
+
+static int mlx5_devlink_pcie_cong_params_register(struct devlink *devlink)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ int err;
+
+ if (!mlx5_pcie_cong_event_supported(dev))
+ return 0;
+
+ err = devl_params_register(devlink, mlx5_devlink_pcie_cong_params,
+ ARRAY_SIZE(mlx5_devlink_pcie_cong_params));
+ if (err)
+ return err;
+
+ mlx5_devlink_pcie_cong_init_values(devlink);
+
+ return 0;
+}
+
+static void mlx5_devlink_pcie_cong_params_unregister(struct devlink *devlink)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+
+ if (!mlx5_pcie_cong_event_supported(dev))
+ return;
+
+ devl_params_unregister(devlink, mlx5_devlink_pcie_cong_params,
+ ARRAY_SIZE(mlx5_devlink_pcie_cong_params));
+}
+
static int mlx5_devlink_enable_rdma_validate(struct devlink *devlink, u32 id,
union devlink_param_value val,
struct netlink_ext_ack *extack)
@@ -895,8 +1016,20 @@ int mlx5_devlink_params_register(struct devlink *devlink)
if (err)
goto max_uc_list_err;
+ err = mlx5_devlink_pcie_cong_params_register(devlink);
+ if (err)
+ goto pcie_cong_err;
+
+ err = mlx5_nv_param_register_dl_params(devlink);
+ if (err)
+ goto nv_param_err;
+
return 0;
+nv_param_err:
+ mlx5_devlink_pcie_cong_params_unregister(devlink);
+pcie_cong_err:
+ mlx5_devlink_max_uc_list_params_unregister(devlink);
max_uc_list_err:
mlx5_devlink_auxdev_params_unregister(devlink);
auxdev_reg_err:
@@ -907,6 +1040,8 @@ auxdev_reg_err:
void mlx5_devlink_params_unregister(struct devlink *devlink)
{
+ mlx5_nv_param_unregister_dl_params(devlink);
+ mlx5_devlink_pcie_cong_params_unregister(devlink);
mlx5_devlink_max_uc_list_params_unregister(devlink);
mlx5_devlink_auxdev_params_unregister(devlink);
devl_params_unregister(devlink, mlx5_devlink_params,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
index 961f75da6227..c9555119a661 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
@@ -22,6 +22,11 @@ enum mlx5_devlink_param_id {
MLX5_DEVLINK_PARAM_ID_ESW_MULTIPORT,
MLX5_DEVLINK_PARAM_ID_HAIRPIN_NUM_QUEUES,
MLX5_DEVLINK_PARAM_ID_HAIRPIN_QUEUE_SIZE,
+ MLX5_DEVLINK_PARAM_ID_PCIE_CONG_IN_LOW,
+ MLX5_DEVLINK_PARAM_ID_PCIE_CONG_IN_HIGH,
+ MLX5_DEVLINK_PARAM_ID_PCIE_CONG_OUT_LOW,
+ MLX5_DEVLINK_PARAM_ID_PCIE_CONG_OUT_HIGH,
+ MLX5_DEVLINK_PARAM_ID_CQE_COMPRESSION_TYPE
};
struct mlx5_trap_ctx {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c
index 86253a89c24c..7cae0c6e5e8a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. */
+#include <linux/mlx5/vport.h>
+
#include "reporter_vnic.h"
#include "en_stats.h"
#include "devlink.h"
@@ -105,6 +107,15 @@ void mlx5_reporter_vnic_diagnose_counters(struct mlx5_core_dev *dev,
}
if (MLX5_CAP_GEN(dev, nic_cap_reg))
mlx5_reporter_vnic_diagnose_counter_icm(dev, fmsg, vport_num, other_vport);
+ if (MLX5_CAP_GEN(dev, vnic_env_cnt_bar_uar_access))
+ devlink_fmsg_u32_pair_put(fmsg, "bar_uar_access",
+ VNIC_ENV_GET(&vnic, bar_uar_access));
+ if (MLX5_CAP_GEN(dev, vnic_env_cnt_odp_page_fault)) {
+ devlink_fmsg_u32_pair_put(fmsg, "odp_local_triggered_page_fault",
+ VNIC_ENV_GET(&vnic, odp_local_triggered_page_fault));
+ devlink_fmsg_u32_pair_put(fmsg, "odp_remote_triggered_page_fault",
+ VNIC_ENV_GET(&vnic, odp_remote_triggered_page_fault));
+ }
devlink_fmsg_obj_nest_end(fmsg);
devlink_fmsg_pair_nest_end(fmsg);
@@ -133,11 +144,11 @@ void mlx5_reporter_vnic_create(struct mlx5_core_dev *dev)
health->vnic_reporter =
devlink_health_reporter_create(devlink,
&mlx5_reporter_vnic_ops,
- 0, dev);
+ dev);
if (IS_ERR(health->vnic_reporter))
mlx5_core_warn(dev,
- "Failed to create vnic reporter, err = %ld\n",
- PTR_ERR(health->vnic_reporter));
+ "Failed to create vnic reporter, err = %pe\n",
+ health->vnic_reporter);
}
void mlx5_reporter_vnic_destroy(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 0dd3bc0f4caa..14e3207b14e7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -47,6 +47,7 @@
#include <linux/rhashtable.h>
#include <net/udp_tunnel.h>
#include <net/switchdev.h>
+#include <net/psp/types.h>
#include <net/xdp.h>
#include <linux/dim.h>
#include <linux/bits.h>
@@ -68,7 +69,7 @@ struct page_pool;
#define MLX5E_METADATA_ETHER_TYPE (0x8CE4)
#define MLX5E_METADATA_ETHER_LEN 8
-#define MLX5E_ETH_HARD_MTU (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
+#define MLX5E_ETH_HARD_MTU (ETH_HLEN + PSP_ENCAP_HLEN + PSP_TRL_SIZE + VLAN_HLEN + ETH_FCS_LEN)
#define MLX5E_HW2SW_MTU(params, hwmtu) ((hwmtu) - ((params)->hard_mtu))
#define MLX5E_SW2HW_MTU(params, swmtu) ((swmtu) + ((params)->hard_mtu))
@@ -344,6 +345,7 @@ struct mlx5e_cq {
/* data path - accessed per napi poll */
u16 event_ctr;
struct napi_struct *napi;
+ struct mlx5_uars_page *uar;
struct mlx5_core_cq mcq;
struct mlx5e_ch_stats *ch_stats;
@@ -788,6 +790,7 @@ struct mlx5e_channel {
int vec_ix;
int sd_ix;
int cpu;
+ struct mlx5_sq_bfreg *bfreg;
/* Sync between icosq recovery and XSK enable/disable. */
struct mutex icosq_recovery_lock;
@@ -936,6 +939,9 @@ struct mlx5e_priv {
#ifdef CONFIG_MLX5_EN_IPSEC
struct mlx5e_ipsec *ipsec;
#endif
+#ifdef CONFIG_MLX5_EN_PSP
+ struct mlx5e_psp *psp;
+#endif
#ifdef CONFIG_MLX5_EN_TLS
struct mlx5e_tls *tls;
#endif
@@ -950,6 +956,7 @@ struct mlx5e_priv {
struct mlx5e_mqprio_rl *mqprio_rl;
struct dentry *dfs_root;
struct mlx5_devcom_comp_dev *devcom;
+ struct ethtool_fec_hist_range *fec_ranges;
};
struct mlx5e_dev {
@@ -1060,6 +1067,7 @@ struct mlx5e_create_cq_param {
struct mlx5e_ch_stats *ch_stats;
int node;
int ix;
+ struct mlx5_uars_page *uar;
};
struct mlx5e_cq_param;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h b/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h
index b59aee75de94..2c98a5299df3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h
@@ -26,7 +26,6 @@ struct mlx5e_dcbx {
u8 cap;
/* Buffer configuration */
- bool manual_buffer;
u32 cable_len;
u32 xoff;
u16 port_buff_cell_sz;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index 9560fcba643f..c3408b3f7010 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -57,7 +57,7 @@ struct mlx5e_l2_table {
bool promisc_enabled;
};
-#define MLX5E_NUM_INDIR_TIRS (MLX5_NUM_TT - 1)
+#define MLX5E_NUM_INDIR_TIRS (MLX5_NUM_INDIR_TIRS)
#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
MLX5_HASH_FIELD_SEL_DST_IP)
@@ -88,10 +88,11 @@ enum {
#ifdef CONFIG_MLX5_EN_ARFS
MLX5E_ARFS_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
#endif
-#ifdef CONFIG_MLX5_EN_IPSEC
+#if defined(CONFIG_MLX5_EN_IPSEC) || defined(CONFIG_MLX5_EN_PSP)
MLX5E_ACCEL_FS_ESP_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL,
MLX5E_ACCEL_FS_POL_FT_LEVEL,
+ MLX5E_ACCEL_FS_POL_MISS_FT_LEVEL,
MLX5E_ACCEL_FS_ESP_FT_ROCE_LEVEL,
#endif
};
@@ -131,7 +132,8 @@ struct mlx5e_ptp_fs;
void mlx5e_set_ttc_params(struct mlx5e_flow_steering *fs,
struct mlx5e_rx_res *rx_res,
- struct ttc_params *ttc_params, bool tunnel);
+ struct ttc_params *ttc_params, bool tunnel,
+ bool ipsec_rss);
void mlx5e_destroy_ttc_table(struct mlx5e_flow_steering *fs);
int mlx5e_create_ttc_table(struct mlx5e_flow_steering *fs,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c
index b4f3bd7d346e..195863b2c013 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c
@@ -138,8 +138,8 @@ void mlx5e_hv_vhca_stats_create(struct mlx5e_priv *priv)
if (IS_ERR_OR_NULL(agent)) {
if (IS_ERR(agent))
netdev_warn(priv->netdev,
- "Failed to create hv vhca stats agent, err = %ld\n",
- PTR_ERR(agent));
+ "Failed to create hv vhca stats agent, err = %pe\n",
+ agent);
kvfree(priv->stats_agent.buf);
return;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index 3cca06a74cf9..3692298e10f2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -6,6 +6,7 @@
#include "en/port.h"
#include "en_accel/en_accel.h"
#include "en_accel/ipsec.h"
+#include "en_accel/psp.h"
#include <linux/dim.h>
#include <net/page_pool/types.h>
#include <net/xdp_sock_drv.h>
@@ -611,6 +612,7 @@ void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e
.ch_stats = c->stats,
.node = cpu_to_node(c->cpu),
.ix = c->vec_ix,
+ .uar = c->bfreg->up,
};
}
@@ -810,7 +812,7 @@ static void mlx5e_build_common_cq_param(struct mlx5_core_dev *mdev,
{
void *cqc = param->cqc;
- MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
+ MLX5_SET(cqc, cqc, uar_page, mdev->priv.bfreg.up->index);
if (MLX5_CAP_GEN(mdev, cqe_128_always) && cache_line_size() >= 128)
MLX5_SET(cqc, cqc, cqe_sz, CQE_STRIDE_128_PAD);
}
@@ -1003,7 +1005,8 @@ void mlx5e_build_sq_param(struct mlx5_core_dev *mdev,
bool allow_swp;
allow_swp = mlx5_geneve_tx_allowed(mdev) ||
- (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_CRYPTO);
+ (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_CRYPTO) ||
+ mlx5_is_psp_device(mdev);
mlx5e_build_sq_param_common(mdev, param);
MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
MLX5_SET(sqc, sqc, allow_swp, allow_swp);
@@ -1229,7 +1232,6 @@ static void mlx5e_build_async_icosq_param(struct mlx5_core_dev *mdev,
void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk,
struct mlx5e_sq_param *param)
{
void *sqc = param->sqc;
@@ -1256,7 +1258,7 @@ int mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
async_icosq_log_wq_sz = mlx5e_build_async_icosq_log_wq_sz(mdev);
mlx5e_build_sq_param(mdev, params, &cparam->txq_sq);
- mlx5e_build_xdpsq_param(mdev, params, NULL, &cparam->xdp_sq);
+ mlx5e_build_xdpsq_param(mdev, params, &cparam->xdp_sq);
mlx5e_build_icosq_param(mdev, icosq_log_wq_sz, &cparam->icosq);
mlx5e_build_async_icosq_param(mdev, async_icosq_log_wq_sz, &cparam->async_icosq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
index 488ccdbc1e2c..00617c65fe3c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
@@ -51,6 +51,7 @@ struct mlx5e_create_sq_param {
u32 tisn;
u8 tis_lst_sz;
u8 min_inline_mode;
+ u32 uar_page;
};
/* Striding RQ dynamic parameters */
@@ -132,7 +133,6 @@ void mlx5e_build_tx_cq_param(struct mlx5_core_dev *mdev,
struct mlx5e_cq_param *param);
void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk,
struct mlx5e_sq_param *param);
int mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/pcie_cong_event.c b/drivers/net/ethernet/mellanox/mlx5/core/en/pcie_cong_event.c
index 0ed017569a19..2eb666a46f39 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/pcie_cong_event.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/pcie_cong_event.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES.
+#include "../devlink.h"
#include "en.h"
#include "pcie_cong_event.h"
@@ -23,6 +24,7 @@ struct mlx5e_pcie_cong_stats {
u32 pci_bw_inbound_low;
u32 pci_bw_outbound_high;
u32 pci_bw_outbound_low;
+ u32 pci_bw_stale_event;
};
struct mlx5e_pcie_cong_event {
@@ -41,13 +43,6 @@ struct mlx5e_pcie_cong_event {
struct mlx5e_pcie_cong_stats stats;
};
-/* In units of 0.01 % */
-static const struct mlx5e_pcie_cong_thresh default_thresh_config = {
- .inbound_high = 9000,
- .inbound_low = 7500,
- .outbound_high = 9000,
- .outbound_low = 7500,
-};
static const struct counter_desc mlx5e_pcie_cong_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_pcie_cong_stats,
@@ -58,6 +53,8 @@ static const struct counter_desc mlx5e_pcie_cong_stats_desc[] = {
pci_bw_outbound_high) },
{ MLX5E_DECLARE_STAT(struct mlx5e_pcie_cong_stats,
pci_bw_outbound_low) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_pcie_cong_stats,
+ pci_bw_stale_event) },
};
#define NUM_PCIE_CONG_COUNTERS ARRAY_SIZE(mlx5e_pcie_cong_stats_desc)
@@ -218,8 +215,10 @@ static void mlx5e_pcie_cong_event_work(struct work_struct *work)
}
changes = cong_event->state ^ new_cong_state;
- if (!changes)
+ if (!changes) {
+ cong_event->stats.pci_bw_stale_event++;
return;
+ }
cong_event->state = new_cong_state;
@@ -249,8 +248,60 @@ static int mlx5e_pcie_cong_event_handler(struct notifier_block *nb,
return NOTIFY_OK;
}
+static int
+mlx5e_pcie_cong_get_thresh_config(struct mlx5_core_dev *dev,
+ struct mlx5e_pcie_cong_thresh *config)
+{
+ u32 ids[4] = {
+ MLX5_DEVLINK_PARAM_ID_PCIE_CONG_IN_LOW,
+ MLX5_DEVLINK_PARAM_ID_PCIE_CONG_IN_HIGH,
+ MLX5_DEVLINK_PARAM_ID_PCIE_CONG_OUT_LOW,
+ MLX5_DEVLINK_PARAM_ID_PCIE_CONG_OUT_HIGH,
+ };
+ struct devlink *devlink = priv_to_devlink(dev);
+ union devlink_param_value val[4];
+
+ for (int i = 0; i < 4; i++) {
+ u32 id = ids[i];
+ int err;
+
+ err = devl_param_driverinit_value_get(devlink, id, &val[i]);
+ if (err)
+ return err;
+ }
+
+ config->inbound_low = val[0].vu16;
+ config->inbound_high = val[1].vu16;
+ config->outbound_low = val[2].vu16;
+ config->outbound_high = val[3].vu16;
+
+ return 0;
+}
+
+static int
+mlx5e_thresh_config_validate(struct mlx5_core_dev *mdev,
+ const struct mlx5e_pcie_cong_thresh *config)
+{
+ int err = 0;
+
+ if (config->inbound_low >= config->inbound_high) {
+ err = -EINVAL;
+ mlx5_core_err(mdev, "PCIe inbound congestion threshold configuration invalid: low (%u) >= high (%u).\n",
+ config->inbound_low, config->inbound_high);
+ }
+
+ if (config->outbound_low >= config->outbound_high) {
+ err = -EINVAL;
+ mlx5_core_err(mdev, "PCIe outbound congestion threshold configuration invalid: low (%u) >= high (%u).\n",
+ config->outbound_low, config->outbound_high);
+ }
+
+ return err;
+}
+
int mlx5e_pcie_cong_event_init(struct mlx5e_priv *priv)
{
+ struct mlx5e_pcie_cong_thresh thresh_config = {};
struct mlx5e_pcie_cong_event *cong_event;
struct mlx5_core_dev *mdev = priv->mdev;
int err;
@@ -258,6 +309,16 @@ int mlx5e_pcie_cong_event_init(struct mlx5e_priv *priv)
if (!mlx5_pcie_cong_event_supported(mdev))
return 0;
+ err = mlx5e_pcie_cong_get_thresh_config(mdev, &thresh_config);
+ if (WARN_ON(err))
+ return err;
+
+ err = mlx5e_thresh_config_validate(mdev, &thresh_config);
+ if (err) {
+ mlx5_core_err(mdev, "PCIe congestion event feature disabled\n");
+ return err;
+ }
+
cong_event = kvzalloc_node(sizeof(*cong_event), GFP_KERNEL,
mdev->priv.numa_node);
if (!cong_event)
@@ -269,7 +330,7 @@ int mlx5e_pcie_cong_event_init(struct mlx5e_priv *priv)
cong_event->priv = priv;
- err = mlx5_cmd_pcie_cong_event_set(mdev, &default_thresh_config,
+ err = mlx5_cmd_pcie_cong_event_set(mdev, &thresh_config,
&cong_event->obj_id);
if (err) {
mlx5_core_warn(mdev, "Error creating a PCIe congestion event object\n");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
index 5ae787656a7c..4720523813b9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
@@ -272,8 +272,8 @@ static int port_update_shared_buffer(struct mlx5_core_dev *mdev,
/* Total shared buffer size is split in a ratio of 3:1 between
* lossy and lossless pools respectively.
*/
- lossy_epool_size = (shared_buffer_size / 4) * 3;
lossless_ipool_size = shared_buffer_size / 4;
+ lossy_epool_size = shared_buffer_size - lossless_ipool_size;
mlx5e_port_set_sbpr(mdev, 0, MLX5_EGRESS_DIR, MLX5_LOSSY_POOL, 0,
lossy_epool_size);
@@ -288,14 +288,12 @@ static int port_set_buffer(struct mlx5e_priv *priv,
u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz;
struct mlx5_core_dev *mdev = priv->mdev;
int sz = MLX5_ST_SZ_BYTES(pbmc_reg);
- u32 new_headroom_size = 0;
- u32 current_headroom_size;
+ u32 current_headroom_cells = 0;
+ u32 new_headroom_cells = 0;
void *in;
int err;
int i;
- current_headroom_size = port_buffer->headroom_size;
-
in = kzalloc(sz, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -306,12 +304,14 @@ static int port_set_buffer(struct mlx5e_priv *priv,
for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) {
void *buffer = MLX5_ADDR_OF(pbmc_reg, in, buffer[i]);
+ current_headroom_cells += MLX5_GET(bufferx_reg, buffer, size);
+
u64 size = port_buffer->buffer[i].size;
u64 xoff = port_buffer->buffer[i].xoff;
u64 xon = port_buffer->buffer[i].xon;
- new_headroom_size += size;
do_div(size, port_buff_cell_sz);
+ new_headroom_cells += size;
do_div(xoff, port_buff_cell_sz);
do_div(xon, port_buff_cell_sz);
MLX5_SET(bufferx_reg, buffer, size, size);
@@ -320,10 +320,8 @@ static int port_set_buffer(struct mlx5e_priv *priv,
MLX5_SET(bufferx_reg, buffer, xon_threshold, xon);
}
- new_headroom_size /= port_buff_cell_sz;
- current_headroom_size /= port_buff_cell_sz;
- err = port_update_shared_buffer(priv->mdev, current_headroom_size,
- new_headroom_size);
+ err = port_update_shared_buffer(priv->mdev, current_headroom_cells,
+ new_headroom_cells);
if (err)
goto out;
@@ -577,7 +575,6 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
if (err)
return err;
}
- priv->dcbx.xoff = xoff;
/* Apply the settings */
if (update_buffer) {
@@ -586,6 +583,8 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
return err;
}
+ priv->dcbx.xoff = xoff;
+
if (update_prio2buffer)
err = mlx5e_port_set_priority2buffer(priv->mdev, prio2buffer);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index 391b4e9c9dc4..c93ee969ea64 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -334,7 +334,7 @@ static int mlx5e_ptp_alloc_txqsq(struct mlx5e_ptp *c, int txq_ix,
sq->mdev = mdev;
sq->ch_ix = MLX5E_PTP_CHANNEL_IX;
sq->txq_ix = txq_ix;
- sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
+ sq->uar_map = c->bfreg->map;
sq->min_inline_mode = params->tx_min_inline_mode;
sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
sq->stats = &c->priv->ptp_stats.sq[tc];
@@ -486,6 +486,7 @@ static int mlx5e_ptp_open_txqsq(struct mlx5e_ptp *c, u32 tisn,
csp.wq_ctrl = &txqsq->wq_ctrl;
csp.min_inline_mode = txqsq->min_inline_mode;
csp.ts_cqe_to_dest_cqn = ptpsq->ts_cq.mcq.cqn;
+ csp.uar_page = c->bfreg->index;
err = mlx5e_create_sq_rdy(c->mdev, sqp, &csp, 0, &txqsq->sqn);
if (err)
@@ -577,6 +578,7 @@ static int mlx5e_ptp_open_tx_cqs(struct mlx5e_ptp *c,
ccp.ch_stats = c->stats;
ccp.napi = &c->napi;
ccp.ix = MLX5E_PTP_CHANNEL_IX;
+ ccp.uar = c->bfreg->up;
cq_param = &cparams->txq_sq_param.cqp;
@@ -626,6 +628,7 @@ static int mlx5e_ptp_open_rx_cq(struct mlx5e_ptp *c,
ccp.ch_stats = c->stats;
ccp.napi = &c->napi;
ccp.ix = MLX5E_PTP_CHANNEL_IX;
+ ccp.uar = c->bfreg->up;
cq_param = &cparams->rq_param.cqp;
@@ -900,6 +903,7 @@ int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
c->num_tc = mlx5e_get_dcb_num_tc(params);
c->stats = &priv->ptp_stats.ch;
c->lag_port = lag_port;
+ c->bfreg = &mdev->priv.bfreg;
err = mlx5e_ptp_set_state(c, params);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
index 883c044852f1..1b3c9648220b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
@@ -66,6 +66,7 @@ struct mlx5e_ptp {
struct mlx5_core_dev *mdev;
struct hwtstamp_config *tstamp;
DECLARE_BITMAP(state, MLX5E_PTP_STATE_NUM_STATES);
+ struct mlx5_sq_bfreg *bfreg;
};
static inline bool mlx5e_use_ptpsq(struct sk_buff *skb)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
index 0f5d7ea8956f..9d1c677814e0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
@@ -488,8 +488,8 @@ static int mlx5_esw_bridge_switchdev_event(struct notifier_block *nb,
fdb_info,
br_offloads);
if (IS_ERR(work)) {
- WARN_ONCE(1, "Failed to init switchdev work, err=%ld",
- PTR_ERR(work));
+ WARN_ONCE(1, "Failed to init switchdev work, err=%pe",
+ work);
return notifier_from_errno(PTR_ERR(work));
}
@@ -527,7 +527,8 @@ void mlx5e_rep_bridge_init(struct mlx5e_priv *priv)
br_offloads = mlx5_esw_bridge_init(esw);
rtnl_unlock();
if (IS_ERR(br_offloads)) {
- esw_warn(mdev, "Failed to init esw bridge (err=%ld)\n", PTR_ERR(br_offloads));
+ esw_warn(mdev, "Failed to init esw bridge (err=%pe)\n",
+ br_offloads);
return;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
index 16c44d628eda..b1415992ffa2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
@@ -651,25 +651,29 @@ void mlx5e_reporter_icosq_resume_recovery(struct mlx5e_channel *c)
mutex_unlock(&c->icosq_recovery_lock);
}
+#define MLX5E_REPORTER_RX_GRACEFUL_PERIOD 500
+#define MLX5E_REPORTER_RX_BURST_PERIOD 500
+
static const struct devlink_health_reporter_ops mlx5_rx_reporter_ops = {
.name = "rx",
.recover = mlx5e_rx_reporter_recover,
.diagnose = mlx5e_rx_reporter_diagnose,
.dump = mlx5e_rx_reporter_dump,
+ .default_graceful_period = MLX5E_REPORTER_RX_GRACEFUL_PERIOD,
+ .default_burst_period = MLX5E_REPORTER_RX_BURST_PERIOD,
};
-#define MLX5E_REPORTER_RX_GRACEFUL_PERIOD 500
-
void mlx5e_reporter_rx_create(struct mlx5e_priv *priv)
{
+ struct devlink_port *port = priv->netdev->devlink_port;
struct devlink_health_reporter *reporter;
- reporter = devlink_port_health_reporter_create(priv->netdev->devlink_port,
+ reporter = devlink_port_health_reporter_create(port,
&mlx5_rx_reporter_ops,
- MLX5E_REPORTER_RX_GRACEFUL_PERIOD, priv);
+ priv);
if (IS_ERR(reporter)) {
- netdev_warn(priv->netdev, "Failed to create rx reporter, err = %ld\n",
- PTR_ERR(reporter));
+ netdev_warn(priv->netdev, "Failed to create rx reporter, err = %pe\n",
+ reporter);
return;
}
priv->rx_reporter = reporter;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index 85d5cb39b107..9e2cf191ed30 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -539,26 +539,30 @@ void mlx5e_reporter_tx_ptpsq_unhealthy(struct mlx5e_ptpsq *ptpsq)
mlx5e_health_report(priv, priv->tx_reporter, err_str, &err_ctx);
}
+#define MLX5E_REPORTER_TX_GRACEFUL_PERIOD 500
+#define MLX5E_REPORTER_TX_BURST_PERIOD 500
+
static const struct devlink_health_reporter_ops mlx5_tx_reporter_ops = {
.name = "tx",
.recover = mlx5e_tx_reporter_recover,
.diagnose = mlx5e_tx_reporter_diagnose,
.dump = mlx5e_tx_reporter_dump,
+ .default_graceful_period = MLX5E_REPORTER_TX_GRACEFUL_PERIOD,
+ .default_burst_period = MLX5E_REPORTER_TX_BURST_PERIOD,
};
-#define MLX5_REPORTER_TX_GRACEFUL_PERIOD 500
-
void mlx5e_reporter_tx_create(struct mlx5e_priv *priv)
{
+ struct devlink_port *port = priv->netdev->devlink_port;
struct devlink_health_reporter *reporter;
- reporter = devlink_port_health_reporter_create(priv->netdev->devlink_port,
+ reporter = devlink_port_health_reporter_create(port,
&mlx5_tx_reporter_ops,
- MLX5_REPORTER_TX_GRACEFUL_PERIOD, priv);
+ priv);
if (IS_ERR(reporter)) {
netdev_warn(priv->netdev,
- "Failed to create tx reporter, err = %ld\n",
- PTR_ERR(reporter));
+ "Failed to create tx reporter, err = %pe\n",
+ reporter);
return;
}
priv->tx_reporter = reporter;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
index c68ba0e58fa6..c96cbc4b0dbf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
@@ -75,15 +75,14 @@ struct mlx5e_rss {
struct mlx5e_tir *inner_tir[MLX5E_NUM_INDIR_TIRS];
struct mlx5e_rqt rqt;
struct mlx5_core_dev *mdev; /* primary */
- u32 drop_rqn;
- bool inner_ft_support;
+ struct mlx5e_rss_params params;
bool enabled;
refcount_t refcnt;
};
bool mlx5e_rss_get_inner_ft_support(struct mlx5e_rss *rss)
{
- return rss->inner_ft_support;
+ return rss->params.inner_ft_support;
}
void mlx5e_rss_params_indir_modify_actual_size(struct mlx5e_rss *rss, u32 num_channels)
@@ -91,7 +90,7 @@ void mlx5e_rss_params_indir_modify_actual_size(struct mlx5e_rss *rss, u32 num_ch
rss->indir.actual_table_size = mlx5e_rqt_size(rss->mdev, num_channels);
}
-int mlx5e_rss_params_indir_init(struct mlx5e_rss_params_indir *indir, struct mlx5_core_dev *mdev,
+int mlx5e_rss_params_indir_init(struct mlx5e_rss_params_indir *indir,
u32 actual_table_size, u32 max_table_size)
{
indir->table = kvmalloc_array(max_table_size, sizeof(*indir->table), GFP_KERNEL);
@@ -139,7 +138,8 @@ static struct mlx5e_rss *mlx5e_rss_init_copy(const struct mlx5e_rss *from)
if (!rss)
return ERR_PTR(-ENOMEM);
- err = mlx5e_rss_params_indir_init(&rss->indir, from->mdev, from->indir.actual_table_size,
+ err = mlx5e_rss_params_indir_init(&rss->indir,
+ from->indir.actual_table_size,
from->indir.max_table_size);
if (err)
goto err_free_rss;
@@ -192,11 +192,12 @@ mlx5e_rss_get_tt_config(struct mlx5e_rss *rss, enum mlx5_traffic_types tt)
return rss_tt;
}
-static int mlx5e_rss_create_tir(struct mlx5e_rss *rss,
- enum mlx5_traffic_types tt,
- const struct mlx5e_packet_merge_param *init_pkt_merge_param,
- bool inner)
+static int
+mlx5e_rss_create_tir(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
+ const struct mlx5e_packet_merge_param *pkt_merge_param,
+ bool inner)
{
+ bool rss_inner = rss->params.inner_ft_support;
struct mlx5e_rss_params_traffic_type rss_tt;
struct mlx5e_tir_builder *builder;
struct mlx5e_tir **tir_p;
@@ -204,7 +205,7 @@ static int mlx5e_rss_create_tir(struct mlx5e_rss *rss,
u32 rqtn;
int err;
- if (inner && !rss->inner_ft_support) {
+ if (inner && !rss_inner) {
mlx5e_rss_warn(rss->mdev,
"Cannot create inner indirect TIR[%d], RSS inner FT is not supported.\n",
tt);
@@ -227,8 +228,8 @@ static int mlx5e_rss_create_tir(struct mlx5e_rss *rss,
rqtn = mlx5e_rqt_get_rqtn(&rss->rqt);
mlx5e_tir_builder_build_rqt(builder, rss->mdev->mlx5e_res.hw_objs.td.tdn,
- rqtn, rss->inner_ft_support);
- mlx5e_tir_builder_build_packet_merge(builder, init_pkt_merge_param);
+ rqtn, rss_inner);
+ mlx5e_tir_builder_build_packet_merge(builder, pkt_merge_param);
rss_tt = mlx5e_rss_get_tt_config(rss, tt);
mlx5e_tir_builder_build_rss(builder, &rss->hash, &rss_tt, inner);
@@ -264,15 +265,16 @@ static void mlx5e_rss_destroy_tir(struct mlx5e_rss *rss, enum mlx5_traffic_types
*tir_p = NULL;
}
-static int mlx5e_rss_create_tirs(struct mlx5e_rss *rss,
- const struct mlx5e_packet_merge_param *init_pkt_merge_param,
- bool inner)
+static int
+mlx5e_rss_create_tirs(struct mlx5e_rss *rss,
+ const struct mlx5e_packet_merge_param *pkt_merge_param,
+ bool inner)
{
enum mlx5_traffic_types tt, max_tt;
int err;
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
- err = mlx5e_rss_create_tir(rss, tt, init_pkt_merge_param, inner);
+ err = mlx5e_rss_create_tir(rss, tt, pkt_merge_param, inner);
if (err)
goto err_destroy_tirs;
}
@@ -335,7 +337,7 @@ static int mlx5e_rss_update_tirs(struct mlx5e_rss *rss)
tt, err);
}
- if (!rss->inner_ft_support)
+ if (!rss->params.inner_ft_support)
continue;
err = mlx5e_rss_update_tir(rss, tt, true);
@@ -355,14 +357,16 @@ static int mlx5e_rss_init_no_tirs(struct mlx5e_rss *rss)
refcount_set(&rss->refcnt, 1);
return mlx5e_rqt_init_direct(&rss->rqt, rss->mdev, true,
- rss->drop_rqn, rss->indir.max_table_size);
+ rss->params.drop_rqn,
+ rss->indir.max_table_size);
}
-struct mlx5e_rss *mlx5e_rss_init(struct mlx5_core_dev *mdev, bool inner_ft_support, u32 drop_rqn,
- const struct mlx5e_packet_merge_param *init_pkt_merge_param,
- enum mlx5e_rss_init_type type, unsigned int nch,
- unsigned int max_nch)
+struct mlx5e_rss *
+mlx5e_rss_init(struct mlx5_core_dev *mdev,
+ const struct mlx5e_rss_params *params,
+ const struct mlx5e_rss_init_params *init_params)
{
+ u32 rqt_max_size, rqt_size;
struct mlx5e_rss *rss;
int err;
@@ -370,29 +374,31 @@ struct mlx5e_rss *mlx5e_rss_init(struct mlx5_core_dev *mdev, bool inner_ft_suppo
if (!rss)
return ERR_PTR(-ENOMEM);
- err = mlx5e_rss_params_indir_init(&rss->indir, mdev,
- mlx5e_rqt_size(mdev, nch),
- mlx5e_rqt_size(mdev, max_nch));
+ rqt_size = mlx5e_rqt_size(mdev, init_params->nch);
+ rqt_max_size = mlx5e_rqt_size(mdev, init_params->max_nch);
+ err = mlx5e_rss_params_indir_init(&rss->indir, rqt_size, rqt_max_size);
if (err)
goto err_free_rss;
rss->mdev = mdev;
- rss->inner_ft_support = inner_ft_support;
- rss->drop_rqn = drop_rqn;
+ rss->params = *params;
err = mlx5e_rss_init_no_tirs(rss);
if (err)
goto err_free_indir;
- if (type == MLX5E_RSS_INIT_NO_TIRS)
+ if (init_params->type == MLX5E_RSS_INIT_NO_TIRS)
goto out;
- err = mlx5e_rss_create_tirs(rss, init_pkt_merge_param, false);
+ err = mlx5e_rss_create_tirs(rss, init_params->pkt_merge_param,
+ false);
if (err)
goto err_destroy_rqt;
- if (inner_ft_support) {
- err = mlx5e_rss_create_tirs(rss, init_pkt_merge_param, true);
+ if (params->inner_ft_support) {
+ err = mlx5e_rss_create_tirs(rss,
+ init_params->pkt_merge_param,
+ true);
if (err)
goto err_destroy_tirs;
}
@@ -418,7 +424,7 @@ int mlx5e_rss_cleanup(struct mlx5e_rss *rss)
mlx5e_rss_destroy_tirs(rss, false);
- if (rss->inner_ft_support)
+ if (rss->params.inner_ft_support)
mlx5e_rss_destroy_tirs(rss, true);
mlx5e_rqt_destroy(&rss->rqt);
@@ -448,7 +454,7 @@ u32 mlx5e_rss_get_tirn(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
{
struct mlx5e_tir *tir;
- WARN_ON(inner && !rss->inner_ft_support);
+ WARN_ON(inner && !rss->params.inner_ft_support);
tir = rss_get_tir(rss, tt, inner);
WARN_ON(!tir);
@@ -468,10 +474,10 @@ bool mlx5e_rss_valid_tir(struct mlx5e_rss *rss, enum mlx5_traffic_types tt, bool
/* Fill the "tirn" output parameter.
* Create the requested TIR if it's its first usage.
*/
-int mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss,
- enum mlx5_traffic_types tt,
- const struct mlx5e_packet_merge_param *init_pkt_merge_param,
- bool inner, u32 *tirn)
+int
+mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
+ const struct mlx5e_packet_merge_param *pkt_merge_param,
+ bool inner, u32 *tirn)
{
struct mlx5e_tir *tir;
@@ -479,7 +485,7 @@ int mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss,
if (!tir) { /* TIR doesn't exist, create one */
int err;
- err = mlx5e_rss_create_tir(rss, tt, init_pkt_merge_param, inner);
+ err = mlx5e_rss_create_tir(rss, tt, pkt_merge_param, inner);
if (err)
return err;
tir = rss_get_tir(rss, tt, inner);
@@ -512,10 +518,11 @@ void mlx5e_rss_disable(struct mlx5e_rss *rss)
int err;
rss->enabled = false;
- err = mlx5e_rqt_redirect_direct(&rss->rqt, rss->drop_rqn, NULL);
+ err = mlx5e_rqt_redirect_direct(&rss->rqt, rss->params.drop_rqn, NULL);
if (err)
mlx5e_rss_warn(rss->mdev, "Failed to redirect RQT %#x to drop RQ %#x: err = %d\n",
- mlx5e_rqt_get_rqtn(&rss->rqt), rss->drop_rqn, err);
+ mlx5e_rqt_get_rqtn(&rss->rqt),
+ rss->params.drop_rqn, err);
}
int mlx5e_rss_packet_merge_set_param(struct mlx5e_rss *rss,
@@ -548,7 +555,7 @@ int mlx5e_rss_packet_merge_set_param(struct mlx5e_rss *rss,
}
inner_tir:
- if (!rss->inner_ft_support)
+ if (!rss->params.inner_ft_support)
continue;
tir = rss_get_tir(rss, tt, true);
@@ -681,7 +688,7 @@ int mlx5e_rss_set_hash_fields(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
return err;
}
- if (!(rss->inner_ft_support))
+ if (!(rss->params.inner_ft_support))
return 0;
err = mlx5e_rss_update_tir(rss, tt, true);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h
index c6c1b2847cf5..5fb03cd0a411 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h
@@ -13,19 +13,31 @@ enum mlx5e_rss_init_type {
MLX5E_RSS_INIT_TIRS
};
+struct mlx5e_rss_init_params {
+ enum mlx5e_rss_init_type type;
+ const struct mlx5e_packet_merge_param *pkt_merge_param;
+ unsigned int nch;
+ unsigned int max_nch;
+};
+
+struct mlx5e_rss_params {
+ bool inner_ft_support;
+ u32 drop_rqn;
+};
+
struct mlx5e_rss_params_traffic_type
mlx5e_rss_get_default_tt_config(enum mlx5_traffic_types tt);
struct mlx5e_rss;
-int mlx5e_rss_params_indir_init(struct mlx5e_rss_params_indir *indir, struct mlx5_core_dev *mdev,
+int mlx5e_rss_params_indir_init(struct mlx5e_rss_params_indir *indir,
u32 actual_table_size, u32 max_table_size);
void mlx5e_rss_params_indir_cleanup(struct mlx5e_rss_params_indir *indir);
void mlx5e_rss_params_indir_modify_actual_size(struct mlx5e_rss *rss, u32 num_channels);
-struct mlx5e_rss *mlx5e_rss_init(struct mlx5_core_dev *mdev, bool inner_ft_support, u32 drop_rqn,
- const struct mlx5e_packet_merge_param *init_pkt_merge_param,
- enum mlx5e_rss_init_type type, unsigned int nch,
- unsigned int max_nch);
+struct mlx5e_rss *
+mlx5e_rss_init(struct mlx5_core_dev *mdev,
+ const struct mlx5e_rss_params *params,
+ const struct mlx5e_rss_init_params *init_params);
int mlx5e_rss_cleanup(struct mlx5e_rss *rss);
void mlx5e_rss_refcnt_inc(struct mlx5e_rss *rss);
@@ -37,10 +49,10 @@ u32 mlx5e_rss_get_tirn(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
bool inner);
bool mlx5e_rss_valid_tir(struct mlx5e_rss *rss, enum mlx5_traffic_types tt, bool inner);
u32 mlx5e_rss_get_rqtn(struct mlx5e_rss *rss);
-int mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss,
- enum mlx5_traffic_types tt,
- const struct mlx5e_packet_merge_param *init_pkt_merge_param,
- bool inner, u32 *tirn);
+int
+mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
+ const struct mlx5e_packet_merge_param *pkt_merge_param,
+ bool inner, u32 *tirn);
void mlx5e_rss_enable(struct mlx5e_rss *rss, u32 *rqns, u32 *vhca_ids, unsigned int num_rqns);
void mlx5e_rss_disable(struct mlx5e_rss *rss);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
index a2acbfee2b77..ac26a32845d0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
@@ -54,17 +54,30 @@ static int mlx5e_rx_res_rss_init_def(struct mlx5e_rx_res *res,
unsigned int init_nch)
{
bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
+ struct mlx5e_rss_init_params init_params;
+ struct mlx5e_rss_params rss_params;
struct mlx5e_rss *rss;
if (WARN_ON(res->rss[0]))
return -EINVAL;
- rss = mlx5e_rss_init(res->mdev, inner_ft_support, res->drop_rqn,
- &res->pkt_merge_param, MLX5E_RSS_INIT_TIRS, init_nch, res->max_nch);
+ init_params = (struct mlx5e_rss_init_params) {
+ .type = MLX5E_RSS_INIT_TIRS,
+ .pkt_merge_param = &res->pkt_merge_param,
+ .nch = init_nch,
+ .max_nch = res->max_nch,
+ };
+
+ rss_params = (struct mlx5e_rss_params) {
+ .inner_ft_support = inner_ft_support,
+ .drop_rqn = res->drop_rqn,
+ };
+
+ rss = mlx5e_rss_init(res->mdev, &rss_params, &init_params);
if (IS_ERR(rss))
return PTR_ERR(rss);
- mlx5e_rss_set_indir_uniform(rss, init_nch);
+ mlx5e_rss_set_indir_uniform(rss, init_params.nch);
res->rss[0] = rss;
@@ -74,18 +87,30 @@ static int mlx5e_rx_res_rss_init_def(struct mlx5e_rx_res *res,
int mlx5e_rx_res_rss_init(struct mlx5e_rx_res *res, u32 rss_idx, unsigned int init_nch)
{
bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
+ struct mlx5e_rss_init_params init_params;
+ struct mlx5e_rss_params rss_params;
struct mlx5e_rss *rss;
if (WARN_ON_ONCE(res->rss[rss_idx]))
return -ENOSPC;
- rss = mlx5e_rss_init(res->mdev, inner_ft_support, res->drop_rqn,
- &res->pkt_merge_param, MLX5E_RSS_INIT_NO_TIRS, init_nch,
- res->max_nch);
+ init_params = (struct mlx5e_rss_init_params) {
+ .type = MLX5E_RSS_INIT_NO_TIRS,
+ .pkt_merge_param = &res->pkt_merge_param,
+ .nch = init_nch,
+ .max_nch = res->max_nch,
+ };
+
+ rss_params = (struct mlx5e_rss_params) {
+ .inner_ft_support = inner_ft_support,
+ .drop_rqn = res->drop_rqn,
+ };
+
+ rss = mlx5e_rss_init(res->mdev, &rss_params, &init_params);
if (IS_ERR(rss))
return PTR_ERR(rss);
- mlx5e_rss_set_indir_uniform(rss, init_nch);
+ mlx5e_rss_set_indir_uniform(rss, init_params.nch);
if (res->rss_active) {
u32 *vhca_ids = get_vhca_ids(res, 0);
@@ -438,7 +463,7 @@ static void mlx5e_rx_res_ptp_destroy(struct mlx5e_rx_res *res)
struct mlx5e_rx_res *
mlx5e_rx_res_create(struct mlx5_core_dev *mdev, enum mlx5e_rx_res_features features,
unsigned int max_nch, u32 drop_rqn,
- const struct mlx5e_packet_merge_param *init_pkt_merge_param,
+ const struct mlx5e_packet_merge_param *pkt_merge_param,
unsigned int init_nch)
{
bool multi_vhca = features & MLX5E_RX_RES_FEATURE_MULTI_VHCA;
@@ -454,7 +479,7 @@ mlx5e_rx_res_create(struct mlx5_core_dev *mdev, enum mlx5e_rx_res_features featu
res->max_nch = max_nch;
res->drop_rqn = drop_rqn;
- res->pkt_merge_param = *init_pkt_merge_param;
+ res->pkt_merge_param = *pkt_merge_param;
init_rwsem(&res->pkt_merge_param_sem);
err = mlx5e_rx_res_rss_init_def(res, init_nch);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
index 1d049e2aa264..65a857c215e1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
@@ -27,7 +27,7 @@ enum mlx5e_rx_res_features {
struct mlx5e_rx_res *
mlx5e_rx_res_create(struct mlx5_core_dev *mdev, enum mlx5e_rx_res_features features,
unsigned int max_nch, u32 drop_rqn,
- const struct mlx5e_packet_merge_param *init_pkt_merge_param,
+ const struct mlx5e_packet_merge_param *pkt_merge_param,
unsigned int init_nch);
void mlx5e_rx_res_destroy(struct mlx5e_rx_res *res);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_hmfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_hmfs.c
index a4263137fef5..d3db6146fcad 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_hmfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_hmfs.c
@@ -136,8 +136,8 @@ mlx5_ct_fs_hmfs_matcher_get(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec,
hws_bwc_matcher = mlx5_ct_fs_hmfs_matcher_create(fs, tbl, spec, ipv4, tcp, gre);
if (IS_ERR(hws_bwc_matcher)) {
netdev_warn(fs->netdev,
- "ct_fs_hmfs: failed to create bwc matcher (nat %d, ipv4 %d, tcp %d, gre %d), err: %ld\n",
- nat, ipv4, tcp, gre, PTR_ERR(hws_bwc_matcher));
+ "ct_fs_hmfs: failed to create bwc matcher (nat %d, ipv4 %d, tcp %d, gre %d), err: %pe\n",
+ nat, ipv4, tcp, gre, hws_bwc_matcher);
hmfs_matcher = ERR_CAST(hws_bwc_matcher);
goto out_unlock;
@@ -173,6 +173,8 @@ static void mlx5_ct_fs_hmfs_fill_rule_actions(struct mlx5_ct_fs_hmfs *fs_hmfs,
memset(rule_actions, 0, NUM_CT_HMFS_RULES * sizeof(*rule_actions));
rule_actions[0].action = mlx5_fc_get_hws_action(fs_hmfs->ctx, attr->counter);
+ rule_actions[0].counter.offset =
+ attr->counter->id - attr->counter->bulk->base_id;
/* Modify header is special, it may require extra arguments outside the action itself. */
if (mh_action->mh_data) {
rule_actions[1].modify_header.offset = mh_action->mh_data->offset;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c
index 0c97c5899904..4d6924b644c9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c
@@ -148,8 +148,8 @@ mlx5_ct_fs_smfs_matcher_get(struct mlx5_ct_fs *fs, bool nat, bool ipv4, bool tcp
dr_matcher = mlx5_ct_fs_smfs_matcher_create(fs, tbl, ipv4, tcp, gre, prio);
if (IS_ERR(dr_matcher)) {
netdev_warn(fs->netdev,
- "ct_fs_smfs: failed to create matcher (nat %d, ipv4 %d, tcp %d, gre %d), err: %ld\n",
- nat, ipv4, tcp, gre, PTR_ERR(dr_matcher));
+ "ct_fs_smfs: failed to create matcher (nat %d, ipv4 %d, tcp %d, gre %d), err: %pe\n",
+ nat, ipv4, tcp, gre, dr_matcher);
smfs_matcher = ERR_CAST(dr_matcher);
goto out_unlock;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.c
index 8afcec0c5d3c..896f718483c3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.c
@@ -93,8 +93,8 @@ mlx5e_int_port_create_rx_rule(struct mlx5_eswitch *esw,
flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
&flow_act, dest, 1);
if (IS_ERR(flow_rule))
- mlx5_core_warn(esw->dev, "ft offloads: Failed to add internal vport rx rule err %ld\n",
- PTR_ERR(flow_rule));
+ mlx5_core_warn(esw->dev, "ft offloads: Failed to add internal vport rx rule err %pe\n",
+ flow_rule);
kvfree(spec);
@@ -322,8 +322,8 @@ mlx5e_tc_int_port_init(struct mlx5e_priv *priv)
sizeof(u32) * 2,
(1 << ESW_VPORT_BITS) - 1, true);
if (IS_ERR(int_port_priv->metadata_mapping)) {
- mlx5_core_warn(priv->mdev, "Can't allocate metadata mapping of int port offload, err=%ld\n",
- PTR_ERR(int_port_priv->metadata_mapping));
+ mlx5_core_warn(priv->mdev, "Can't allocate metadata mapping of int port offload, err=%pe\n",
+ int_port_priv->metadata_mapping);
goto err_mapping;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index 2162d776fe35..a14f216048cd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -1,7 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2018 Mellanox Technologies. */
-#include <net/inet_ecn.h>
+#include <net/flow.h>
+#include <net/inet_dscp.h>
#include <net/vxlan.h>
#include <net/gre.h>
#include <net/geneve.h>
@@ -233,7 +234,7 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
int err;
/* add the IP fields */
- attr.fl.fl4.flowi4_tos = tun_key->tos & ~INET_ECN_MASK;
+ attr.fl.fl4.flowi4_dscp = inet_dsfield_to_dscp(tun_key->tos);
attr.fl.fl4.daddr = tun_key->u.ipv4.dst;
attr.fl.fl4.saddr = tun_key->u.ipv4.src;
attr.ttl = tun_key->ttl;
@@ -349,7 +350,7 @@ int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv,
int err;
/* add the IP fields */
- attr.fl.fl4.flowi4_tos = tun_key->tos & ~INET_ECN_MASK;
+ attr.fl.fl4.flowi4_dscp = inet_dsfield_to_dscp(tun_key->tos);
attr.fl.fl4.daddr = tun_key->u.ipv4.dst;
attr.fl.fl4.saddr = tun_key->u.ipv4.src;
attr.ttl = tun_key->ttl;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
index a0fc76a1bc08..0735d10f2bac 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
@@ -172,8 +172,8 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
&reformat_params,
MLX5_FLOW_NAMESPACE_FDB);
if (IS_ERR(e->pkt_reformat)) {
- mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %lu\n",
- PTR_ERR(e->pkt_reformat));
+ mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %pe\n",
+ e->pkt_reformat);
return;
}
e->flags |= MLX5_ENCAP_ENTRY_VALID;
@@ -1845,8 +1845,8 @@ static int mlx5e_tc_tun_fib_event(struct notifier_block *nb, unsigned long event
queue_work(priv->wq, &fib_work->work);
} else if (IS_ERR(fib_work)) {
NL_SET_ERR_MSG_MOD(info->extack, "Failed to init fib work");
- mlx5_core_warn(priv->mdev, "Failed to init fib work, %ld\n",
- PTR_ERR(fib_work));
+ mlx5_core_warn(priv->mdev, "Failed to init fib work, %pe\n",
+ fib_work);
}
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
index b5c19396e096..996fcdb5a29d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
@@ -76,6 +76,7 @@ static int mlx5e_open_trap_rq(struct mlx5e_priv *priv, struct mlx5e_trap *t)
ccp.ch_stats = t->stats;
ccp.napi = &t->napi;
ccp.ix = 0;
+ ccp.uar = mdev->priv.bfreg.up;
err = mlx5e_open_cq(priv->mdev, trap_moder, &rq_param->cqp, &ccp, &rq->cq);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index 5dc04bbfc71b..6760bb0336df 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -309,10 +309,7 @@ mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc, void __iomem *uar_map,
static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
{
- struct mlx5_core_cq *mcq;
-
- mcq = &cq->mcq;
- mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, cq->wq.cc);
+ mlx5_cq_arm(&cq->mcq, MLX5_CQ_DB_REQ_NOT, cq->uar->map, cq->wq.cc);
}
static inline struct mlx5e_sq_dma *
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
index d743e823362a..dbd88eb5c082 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -54,7 +54,7 @@ static void mlx5e_build_xsk_cparam(struct mlx5_core_dev *mdev,
struct mlx5e_channel_param *cparam)
{
mlx5e_build_rq_param(mdev, params, xsk, &cparam->rq);
- mlx5e_build_xdpsq_param(mdev, params, xsk, &cparam->xdp_sq);
+ mlx5e_build_xdpsq_param(mdev, params, &cparam->xdp_sq);
}
static int mlx5e_init_xsk_rq(struct mlx5e_channel *c,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
index 33e32584b07f..8bef99e8367e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
@@ -42,6 +42,8 @@
#include <en_accel/macsec.h>
#include "en.h"
#include "en/txrx.h"
+#include "en_accel/psp.h"
+#include "en_accel/psp_rxtx.h"
#if IS_ENABLED(CONFIG_GENEVE)
#include <net/geneve.h>
@@ -119,6 +121,9 @@ struct mlx5e_accel_tx_state {
#ifdef CONFIG_MLX5_EN_IPSEC
struct mlx5e_accel_tx_ipsec_state ipsec;
#endif
+#ifdef CONFIG_MLX5_EN_PSP
+ struct mlx5e_accel_tx_psp_state psp_st;
+#endif
};
static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
@@ -137,6 +142,13 @@ static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
return false;
#endif
+#ifdef CONFIG_MLX5_EN_PSP
+ if (mlx5e_psp_is_offload(skb, dev)) {
+ if (unlikely(!mlx5e_psp_handle_tx_skb(dev, skb, &state->psp_st)))
+ return false;
+ }
+#endif
+
#ifdef CONFIG_MLX5_EN_IPSEC
if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state) && xfrm_offload(skb)) {
if (unlikely(!mlx5e_ipsec_handle_tx_skb(dev, skb, &state->ipsec)))
@@ -157,8 +169,14 @@ static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
}
static inline unsigned int mlx5e_accel_tx_ids_len(struct mlx5e_txqsq *sq,
+ struct sk_buff *skb,
struct mlx5e_accel_tx_state *state)
{
+#ifdef CONFIG_MLX5_EN_PSP
+ if (mlx5e_psp_is_offload_state(&state->psp_st))
+ return mlx5e_psp_tx_ids_len(&state->psp_st);
+#endif
+
#ifdef CONFIG_MLX5_EN_IPSEC
if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state))
return mlx5e_ipsec_tx_ids_len(&state->ipsec);
@@ -172,8 +190,14 @@ static inline unsigned int mlx5e_accel_tx_ids_len(struct mlx5e_txqsq *sq,
static inline void mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
struct sk_buff *skb,
+ struct mlx5e_accel_tx_state *accel,
struct mlx5_wqe_eth_seg *eseg, u16 ihs)
{
+#ifdef CONFIG_MLX5_EN_PSP
+ if (mlx5e_psp_is_offload_state(&accel->psp_st))
+ mlx5e_psp_tx_build_eseg(priv, skb, &accel->psp_st, eseg);
+#endif
+
#ifdef CONFIG_MLX5_EN_IPSEC
if (xfrm_offload(skb))
mlx5e_ipsec_tx_build_eseg(priv, skb, eseg);
@@ -199,6 +223,11 @@ static inline void mlx5e_accel_tx_finish(struct mlx5e_txqsq *sq,
mlx5e_ktls_handle_tx_wqe(&wqe->ctrl, &state->tls);
#endif
+#ifdef CONFIG_MLX5_EN_PSP
+ if (mlx5e_psp_is_offload_state(&state->psp_st))
+ mlx5e_psp_handle_tx_wqe(wqe, &state->psp_st, inlseg);
+#endif
+
#ifdef CONFIG_MLX5_EN_IPSEC
if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state) &&
state->ipsec.xo && state->ipsec.tailen)
@@ -208,21 +237,40 @@ static inline void mlx5e_accel_tx_finish(struct mlx5e_txqsq *sq,
static inline int mlx5e_accel_init_rx(struct mlx5e_priv *priv)
{
- return mlx5e_ktls_init_rx(priv);
+ int err;
+
+ err = mlx5_accel_psp_fs_init_rx_tables(priv);
+ if (err)
+ goto out;
+
+ err = mlx5e_ktls_init_rx(priv);
+ if (err)
+ mlx5_accel_psp_fs_cleanup_rx_tables(priv);
+
+out:
+ return err;
}
static inline void mlx5e_accel_cleanup_rx(struct mlx5e_priv *priv)
{
mlx5e_ktls_cleanup_rx(priv);
+ mlx5_accel_psp_fs_cleanup_rx_tables(priv);
}
static inline int mlx5e_accel_init_tx(struct mlx5e_priv *priv)
{
+ int err;
+
+ err = mlx5_accel_psp_fs_init_tx_tables(priv);
+ if (err)
+ return err;
+
return mlx5e_ktls_init_tx(priv);
}
static inline void mlx5e_accel_cleanup_tx(struct mlx5e_priv *priv)
{
mlx5e_ktls_cleanup_tx(priv);
+ mlx5_accel_psp_fs_cleanup_tx_tables(priv);
}
#endif /* __MLX5E_EN_ACCEL_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
index 4f83e3172767..1febdc5b81f9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
@@ -138,7 +138,7 @@ struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_flow_steering *fs,
flow = mlx5_add_flow_rules(ft->t, spec, &flow_act, &dest, 1);
if (IS_ERR(flow))
- fs_err(fs, "mlx5_add_flow_rules() failed, flow is %ld\n", PTR_ERR(flow));
+ fs_err(fs, "mlx5_add_flow_rules() failed, flow is %pe\n", flow);
out:
kvfree(spec);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index 00e77c71e201..0a4fb8c92268 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -772,6 +772,7 @@ static int mlx5e_xfrm_add_state(struct net_device *dev,
struct netlink_ext_ack *extack)
{
struct mlx5e_ipsec_sa_entry *sa_entry = NULL;
+ bool allow_tunnel_mode = false;
struct mlx5e_ipsec *ipsec;
struct mlx5e_priv *priv;
gfp_t gfp;
@@ -803,6 +804,20 @@ static int mlx5e_xfrm_add_state(struct net_device *dev,
goto err_xfrm;
}
+ if (mlx5_eswitch_block_mode(priv->mdev))
+ goto unblock_ipsec;
+
+ if (x->props.mode == XFRM_MODE_TUNNEL &&
+ x->xso.type == XFRM_DEV_OFFLOAD_PACKET) {
+ allow_tunnel_mode = mlx5e_ipsec_fs_tunnel_allowed(sa_entry);
+ if (!allow_tunnel_mode) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Packet offload tunnel mode is disabled due to encap settings");
+ err = -EINVAL;
+ goto unblock_mode;
+ }
+ }
+
/* check esn */
if (x->props.flags & XFRM_STATE_ESN)
mlx5e_ipsec_update_esn_state(sa_entry);
@@ -817,7 +832,7 @@ static int mlx5e_xfrm_add_state(struct net_device *dev,
err = mlx5_ipsec_create_work(sa_entry);
if (err)
- goto unblock_ipsec;
+ goto unblock_encap;
err = mlx5e_ipsec_create_dwork(sa_entry);
if (err)
@@ -832,14 +847,6 @@ static int mlx5e_xfrm_add_state(struct net_device *dev,
if (err)
goto err_hw_ctx;
- if (x->props.mode == XFRM_MODE_TUNNEL &&
- x->xso.type == XFRM_DEV_OFFLOAD_PACKET &&
- !mlx5e_ipsec_fs_tunnel_enabled(sa_entry)) {
- NL_SET_ERR_MSG_MOD(extack, "Packet offload tunnel mode is disabled due to encap settings");
- err = -EINVAL;
- goto err_add_rule;
- }
-
/* We use *_bh() variant because xfrm_timer_handler(), which runs
* in softirq context, can reach our state delete logic and we need
* xa_erase_bh() there.
@@ -855,8 +862,7 @@ static int mlx5e_xfrm_add_state(struct net_device *dev,
queue_delayed_work(ipsec->wq, &sa_entry->dwork->dwork,
MLX5_IPSEC_RESCHED);
- if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET &&
- x->props.mode == XFRM_MODE_TUNNEL) {
+ if (allow_tunnel_mode) {
xa_lock_bh(&ipsec->sadb);
__xa_set_mark(&ipsec->sadb, sa_entry->ipsec_obj_id,
MLX5E_IPSEC_TUNNEL_SA);
@@ -865,6 +871,11 @@ static int mlx5e_xfrm_add_state(struct net_device *dev,
out:
x->xso.offload_handle = (unsigned long)sa_entry;
+ if (allow_tunnel_mode)
+ mlx5_eswitch_unblock_encap(priv->mdev);
+
+ mlx5_eswitch_unblock_mode(priv->mdev);
+
return 0;
err_add_rule:
@@ -877,6 +888,11 @@ release_work:
if (sa_entry->work)
kfree(sa_entry->work->data);
kfree(sa_entry->work);
+unblock_encap:
+ if (allow_tunnel_mode)
+ mlx5_eswitch_unblock_encap(priv->mdev);
+unblock_mode:
+ mlx5_eswitch_unblock_mode(priv->mdev);
unblock_ipsec:
mlx5_eswitch_unblock_ipsec(priv->mdev);
err_xfrm:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index ffcd0cdeb775..5d7c15abfcaf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -185,6 +185,7 @@ struct mlx5e_ipsec_rx_create_attr {
u32 family;
int prio;
int pol_level;
+ int pol_miss_level;
int sa_level;
int status_level;
enum mlx5_flow_namespace_type chains_ns;
@@ -318,7 +319,7 @@ void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry);
int mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry *pol_entry);
void mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry *pol_entry);
void mlx5e_accel_ipsec_fs_modify(struct mlx5e_ipsec_sa_entry *sa_entry);
-bool mlx5e_ipsec_fs_tunnel_enabled(struct mlx5e_ipsec_sa_entry *sa_entry);
+bool mlx5e_ipsec_fs_tunnel_allowed(struct mlx5e_ipsec_sa_entry *sa_entry);
int mlx5_ipsec_create_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry);
void mlx5_ipsec_free_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index 98b6a3a623f9..bf1d2769d4f1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -61,6 +61,7 @@ struct mlx5e_ipsec_rx {
struct mlx5_flow_table *pol_miss_ft;
struct mlx5_flow_handle *pol_miss_rule;
u8 allow_tunnel_mode : 1;
+ u8 ttc_rules_added : 1;
};
/* IPsec RX flow steering */
@@ -585,6 +586,20 @@ out:
return err;
}
+static struct mlx5_flow_destination
+ipsec_rx_decrypted_pkt_def_dest(struct mlx5_ttc_table *ttc, u32 family)
+{
+ struct mlx5_flow_destination dest;
+
+ if (!mlx5_ttc_has_esp_flow_group(ttc))
+ return mlx5_ttc_get_default_dest(ttc, family2tt(family));
+
+ dest.ft = mlx5_get_ttc_flow_table(ttc);
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+
+ return dest;
+}
+
static void ipsec_rx_update_default_dest(struct mlx5e_ipsec_rx *rx,
struct mlx5_flow_destination *old_dest,
struct mlx5_flow_destination *new_dest)
@@ -598,10 +613,10 @@ static void handle_ipsec_rx_bringup(struct mlx5e_ipsec *ipsec, u32 family)
{
struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, XFRM_DEV_OFFLOAD_PACKET);
struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(ipsec->fs, false);
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
struct mlx5_flow_destination old_dest, new_dest;
- old_dest = mlx5_ttc_get_default_dest(mlx5e_fs_get_ttc(ipsec->fs, false),
- family2tt(family));
+ old_dest = ipsec_rx_decrypted_pkt_def_dest(ttc, family);
mlx5_ipsec_fs_roce_rx_create(ipsec->mdev, ipsec->roce, ns, &old_dest, family,
MLX5E_ACCEL_FS_ESP_FT_ROCE_LEVEL, MLX5E_NIC_PRIO);
@@ -614,12 +629,12 @@ static void handle_ipsec_rx_bringup(struct mlx5e_ipsec *ipsec, u32 family)
static void handle_ipsec_rx_cleanup(struct mlx5e_ipsec *ipsec, u32 family)
{
struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, XFRM_DEV_OFFLOAD_PACKET);
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
struct mlx5_flow_destination old_dest, new_dest;
old_dest.ft = mlx5_ipsec_fs_roce_ft_get(ipsec->roce, family);
old_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- new_dest = mlx5_ttc_get_default_dest(mlx5e_fs_get_ttc(ipsec->fs, false),
- family2tt(family));
+ new_dest = ipsec_rx_decrypted_pkt_def_dest(ttc, family);
ipsec_rx_update_default_dest(rx, &old_dest, &new_dest);
mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, ipsec->mdev);
@@ -669,10 +684,13 @@ static void ipsec_mpv_work_handler(struct work_struct *_work)
complete(&work->master_priv->ipsec->comp);
}
-static void ipsec_rx_ft_disconnect(struct mlx5e_ipsec *ipsec, u32 family)
+static void ipsec_rx_ft_disconnect(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx, u32 family)
{
struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
+ if (rx->ttc_rules_added)
+ mlx5_ttc_destroy_ipsec_rules(ttc);
mlx5_ttc_fwd_default_dest(ttc, family2tt(family));
}
@@ -707,7 +725,7 @@ static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
{
/* disconnect */
if (rx != ipsec->rx_esw)
- ipsec_rx_ft_disconnect(ipsec, family);
+ ipsec_rx_ft_disconnect(ipsec, rx, family);
mlx5_del_flow_rules(rx->sa.rule);
mlx5_destroy_flow_group(rx->sa.group);
@@ -747,6 +765,7 @@ static void ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
attr->family = family;
attr->prio = MLX5E_NIC_PRIO;
attr->pol_level = MLX5E_ACCEL_FS_POL_FT_LEVEL;
+ attr->pol_miss_level = MLX5E_ACCEL_FS_POL_MISS_FT_LEVEL;
attr->sa_level = MLX5E_ACCEL_FS_ESP_FT_LEVEL;
attr->status_level = MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL;
attr->chains_ns = MLX5_FLOW_NAMESPACE_KERNEL;
@@ -763,7 +782,7 @@ static int ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec,
if (rx == ipsec->rx_esw)
return mlx5_esw_ipsec_rx_status_pass_dest_get(ipsec, dest);
- *dest = mlx5_ttc_get_default_dest(attr->ttc, family2tt(attr->family));
+ *dest = ipsec_rx_decrypted_pkt_def_dest(attr->ttc, attr->family);
err = mlx5_ipsec_fs_roce_rx_create(ipsec->mdev, ipsec->roce, attr->ns, dest,
attr->family, MLX5E_ACCEL_FS_ESP_FT_ROCE_LEVEL,
attr->prio);
@@ -806,10 +825,16 @@ static void ipsec_rx_ft_connect(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx_create_attr *attr)
{
struct mlx5_flow_destination dest = {};
+ struct mlx5_ttc_table *ttc, *inner_ttc;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest.ft = rx->ft.sa;
- mlx5_ttc_fwd_dest(attr->ttc, family2tt(attr->family), &dest);
+ if (mlx5_ttc_fwd_dest(attr->ttc, family2tt(attr->family), &dest))
+ return;
+
+ ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
+ inner_ttc = mlx5e_fs_get_ttc(ipsec->fs, true);
+ rx->ttc_rules_added = !mlx5_ttc_create_ipsec_rules(ttc, inner_ttc);
}
static int ipsec_rx_chains_create_miss(struct mlx5e_ipsec *ipsec,
@@ -833,7 +858,7 @@ static int ipsec_rx_chains_create_miss(struct mlx5e_ipsec *ipsec,
ft_attr.max_fte = 1;
ft_attr.autogroup.max_num_groups = 1;
- ft_attr.level = attr->pol_level;
+ ft_attr.level = attr->pol_miss_level;
ft_attr.prio = attr->prio;
ft = mlx5_create_auto_grouped_flow_table(attr->ns, &ft_attr);
@@ -1044,7 +1069,9 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
/* Create FT */
if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL)
- rx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev);
+ rx->allow_tunnel_mode =
+ mlx5_eswitch_block_encap(mdev, rx == ipsec->rx_esw);
+
if (rx->allow_tunnel_mode)
flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
ft = ipsec_ft_create(attr.ns, attr.sa_level, attr.prio, 1, 2, flags);
@@ -1285,7 +1312,9 @@ static int tx_create(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx,
goto err_status_rule;
if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL)
- tx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev);
+ tx->allow_tunnel_mode =
+ mlx5_eswitch_block_encap(mdev, tx == ipsec->tx_esw);
+
if (tx->allow_tunnel_mode)
flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
ft = ipsec_ft_create(tx->ns, attr.sa_level, attr.prio, 1, 4, flags);
@@ -1704,8 +1733,8 @@ static int setup_modify_header(struct mlx5e_ipsec *ipsec, int type, u32 val, u8
modify_hdr = mlx5_modify_header_alloc(mdev, ns_type, num_of_actions, action);
if (IS_ERR(modify_hdr)) {
- mlx5_core_err(mdev, "Failed to allocate modify_header %ld\n",
- PTR_ERR(modify_hdr));
+ mlx5_core_err(mdev, "Failed to allocate modify_header %pe\n",
+ modify_hdr);
return PTR_ERR(modify_hdr);
}
@@ -2821,18 +2850,24 @@ void mlx5e_accel_ipsec_fs_modify(struct mlx5e_ipsec_sa_entry *sa_entry)
memcpy(sa_entry, &sa_entry_shadow, sizeof(*sa_entry));
}
-bool mlx5e_ipsec_fs_tunnel_enabled(struct mlx5e_ipsec_sa_entry *sa_entry)
+bool mlx5e_ipsec_fs_tunnel_allowed(struct mlx5e_ipsec_sa_entry *sa_entry)
{
- struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
- struct mlx5e_ipsec_rx *rx;
- struct mlx5e_ipsec_tx *tx;
+ struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
+ struct xfrm_state *x = sa_entry->x;
+ bool from_fdb;
- rx = ipsec_rx(sa_entry->ipsec, attrs->addrs.family, attrs->type);
- tx = ipsec_tx(sa_entry->ipsec, attrs->type);
- if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT)
- return tx->allow_tunnel_mode;
+ if (x->xso.dir == XFRM_DEV_OFFLOAD_OUT) {
+ struct mlx5e_ipsec_tx *tx = ipsec_tx(ipsec, x->xso.type);
+
+ from_fdb = (tx == ipsec->tx_esw);
+ } else {
+ struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, x->props.family,
+ x->xso.type);
+
+ from_fdb = (rx == ipsec->rx_esw);
+ }
- return rx->allow_tunnel_mode;
+ return mlx5_eswitch_block_encap(ipsec->mdev, from_fdb);
}
void mlx5e_ipsec_handle_mpv_event(int event, struct mlx5e_priv *slave_priv,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
index 3cc640669247..45b0d19e735c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
@@ -40,7 +40,7 @@
#include "en/txrx.h"
/* Bit31: IPsec marker, Bit30: reserved, Bit29-24: IPsec syndrome, Bit23-0: IPsec obj id */
-#define MLX5_IPSEC_METADATA_MARKER(metadata) (((metadata) >> 31) & 0x1)
+#define MLX5_IPSEC_METADATA_MARKER(metadata) ((((metadata) >> 30) & 0x3) == 0x2)
#define MLX5_IPSEC_METADATA_SYNDROM(metadata) (((metadata) >> 24) & GENMASK(5, 0))
#define MLX5_IPSEC_METADATA_HANDLE(metadata) ((metadata) & GENMASK(23, 0))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
index 65ccb33edafb..d7a11ff9bbdb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
@@ -498,9 +498,9 @@ static void resync_update_sn(struct mlx5e_rq *rq, struct sk_buff *skb)
depth += sizeof(struct iphdr);
th = (void *)iph + sizeof(struct iphdr);
- sk = inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
- iph->saddr, th->source, iph->daddr,
- th->dest, netdev->ifindex);
+ sk = inet_lookup_established(net, iph->saddr, th->source,
+ iph->daddr, th->dest,
+ netdev->ifindex);
#if IS_ENABLED(CONFIG_IPV6)
} else {
struct ipv6hdr *ipv6h = (struct ipv6hdr *)iph;
@@ -508,8 +508,7 @@ static void resync_update_sn(struct mlx5e_rq *rq, struct sk_buff *skb)
depth += sizeof(struct ipv6hdr);
th = (void *)ipv6h + sizeof(struct ipv6hdr);
- sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
- &ipv6h->saddr, th->source,
+ sk = __inet6_lookup_established(net, &ipv6h->saddr, th->source,
&ipv6h->daddr, ntohs(th->dest),
netdev->ifindex, 0);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
index 6ab02f3fc291..528b04d4de41 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
@@ -1676,7 +1676,7 @@ void mlx5e_macsec_tx_build_eseg(struct mlx5e_macsec *macsec,
if (!fs_id)
return;
- eseg->flow_table_metadata = cpu_to_be32(MLX5_ETH_WQE_FT_META_MACSEC | fs_id << 2);
+ eseg->flow_table_metadata = cpu_to_be32(MLX5_MACSEC_TX_METADATA(fs_id));
}
void mlx5e_macsec_offload_handle_rx_skb(struct net_device *netdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.c
new file mode 100644
index 000000000000..8565cfe8d7dc
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.c
@@ -0,0 +1,952 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+#include <linux/mlx5/device.h>
+#include <net/psp.h>
+#include <linux/psp.h>
+#include "mlx5_core.h"
+#include "psp.h"
+#include "lib/crypto.h"
+#include "en_accel/psp.h"
+#include "fs_core.h"
+
+enum accel_fs_psp_type {
+ ACCEL_FS_PSP4,
+ ACCEL_FS_PSP6,
+ ACCEL_FS_PSP_NUM_TYPES,
+};
+
+enum accel_psp_syndrome {
+ PSP_OK = 0,
+ PSP_ICV_FAIL,
+ PSP_BAD_TRAILER,
+};
+
+struct mlx5e_psp_tx {
+ struct mlx5_flow_namespace *ns;
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_group *fg;
+ struct mlx5_flow_handle *rule;
+ struct mutex mutex; /* Protect PSP TX steering */
+ u32 refcnt;
+};
+
+struct mlx5e_psp_rx_err {
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_handle *drop_rule;
+ struct mlx5_modify_hdr *copy_modify_hdr;
+};
+
+struct mlx5e_accel_fs_psp_prot {
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_group *miss_group;
+ struct mlx5_flow_handle *miss_rule;
+ struct mlx5_flow_destination default_dest;
+ struct mlx5e_psp_rx_err rx_err;
+ u32 refcnt;
+ struct mutex prot_mutex; /* protect ESP4/ESP6 protocol */
+ struct mlx5_flow_handle *def_rule;
+};
+
+struct mlx5e_accel_fs_psp {
+ struct mlx5e_accel_fs_psp_prot fs_prot[ACCEL_FS_PSP_NUM_TYPES];
+};
+
+struct mlx5e_psp_fs {
+ struct mlx5_core_dev *mdev;
+ struct mlx5e_psp_tx *tx_fs;
+ /* Rx manage */
+ struct mlx5e_flow_steering *fs;
+ struct mlx5e_accel_fs_psp *rx_fs;
+};
+
+/* PSP RX flow steering */
+static enum mlx5_traffic_types fs_psp2tt(enum accel_fs_psp_type i)
+{
+ if (i == ACCEL_FS_PSP4)
+ return MLX5_TT_IPV4_UDP;
+
+ return MLX5_TT_IPV6_UDP;
+}
+
+static void accel_psp_fs_rx_err_del_rules(struct mlx5e_psp_fs *fs,
+ struct mlx5e_psp_rx_err *rx_err)
+{
+ if (rx_err->drop_rule) {
+ mlx5_del_flow_rules(rx_err->drop_rule);
+ rx_err->drop_rule = NULL;
+ }
+
+ if (rx_err->rule) {
+ mlx5_del_flow_rules(rx_err->rule);
+ rx_err->rule = NULL;
+ }
+
+ if (rx_err->copy_modify_hdr) {
+ mlx5_modify_header_dealloc(fs->mdev, rx_err->copy_modify_hdr);
+ rx_err->copy_modify_hdr = NULL;
+ }
+}
+
+static void accel_psp_fs_rx_err_destroy_ft(struct mlx5e_psp_fs *fs,
+ struct mlx5e_psp_rx_err *rx_err)
+{
+ accel_psp_fs_rx_err_del_rules(fs, rx_err);
+
+ if (rx_err->ft) {
+ mlx5_destroy_flow_table(rx_err->ft);
+ rx_err->ft = NULL;
+ }
+}
+
+static void accel_psp_setup_syndrome_match(struct mlx5_flow_spec *spec,
+ enum accel_psp_syndrome syndrome)
+{
+ void *misc_params_2;
+
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
+ misc_params_2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
+ MLX5_SET_TO_ONES(fte_match_set_misc2, misc_params_2, psp_syndrome);
+ misc_params_2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
+ MLX5_SET(fte_match_set_misc2, misc_params_2, psp_syndrome, syndrome);
+}
+
+static int accel_psp_fs_rx_err_add_rule(struct mlx5e_psp_fs *fs,
+ struct mlx5e_accel_fs_psp_prot *fs_prot,
+ struct mlx5e_psp_rx_err *rx_err)
+{
+ u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
+ struct mlx5_core_dev *mdev = fs->mdev;
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_modify_hdr *modify_hdr;
+ struct mlx5_flow_handle *fte;
+ struct mlx5_flow_spec *spec;
+ int err = 0;
+
+ spec = kzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ /* Action to copy 7 bit psp_syndrome to regB[23:29] */
+ MLX5_SET(copy_action_in, action, action_type, MLX5_ACTION_TYPE_COPY);
+ MLX5_SET(copy_action_in, action, src_field, MLX5_ACTION_IN_FIELD_PSP_SYNDROME);
+ MLX5_SET(copy_action_in, action, src_offset, 0);
+ MLX5_SET(copy_action_in, action, length, 7);
+ MLX5_SET(copy_action_in, action, dst_field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
+ MLX5_SET(copy_action_in, action, dst_offset, 23);
+
+ modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_KERNEL,
+ 1, action);
+ if (IS_ERR(modify_hdr)) {
+ err = PTR_ERR(modify_hdr);
+ mlx5_core_err(mdev,
+ "fail to alloc psp copy modify_header_id err=%d\n", err);
+ goto out_spec;
+ }
+
+ accel_psp_setup_syndrome_match(spec, PSP_OK);
+ /* create fte */
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ flow_act.modify_hdr = modify_hdr;
+ fte = mlx5_add_flow_rules(rx_err->ft, spec, &flow_act,
+ &fs_prot->default_dest, 1);
+ if (IS_ERR(fte)) {
+ err = PTR_ERR(fte);
+ mlx5_core_err(mdev, "fail to add psp rx err copy rule err=%d\n", err);
+ goto out;
+ }
+ rx_err->rule = fte;
+
+ /* add default drop rule */
+ memset(spec, 0, sizeof(*spec));
+ memset(&flow_act, 0, sizeof(flow_act));
+ /* create fte */
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
+ fte = mlx5_add_flow_rules(rx_err->ft, spec, &flow_act, NULL, 0);
+ if (IS_ERR(fte)) {
+ err = PTR_ERR(fte);
+ mlx5_core_err(mdev, "fail to add psp rx err drop rule err=%d\n", err);
+ goto out_drop_rule;
+ }
+ rx_err->drop_rule = fte;
+ rx_err->copy_modify_hdr = modify_hdr;
+
+ goto out_spec;
+
+out_drop_rule:
+ mlx5_del_flow_rules(rx_err->rule);
+ rx_err->rule = NULL;
+out:
+ mlx5_modify_header_dealloc(mdev, modify_hdr);
+out_spec:
+ kfree(spec);
+ return err;
+}
+
+static int accel_psp_fs_rx_err_create_ft(struct mlx5e_psp_fs *fs,
+ struct mlx5e_accel_fs_psp_prot *fs_prot,
+ struct mlx5e_psp_rx_err *rx_err)
+{
+ struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(fs->fs, false);
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_table *ft;
+ int err;
+
+ ft_attr.max_fte = 2;
+ ft_attr.autogroup.max_num_groups = 2;
+ ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL; // MLX5E_ACCEL_FS_TCP_FT_LEVEL
+ ft_attr.prio = MLX5E_NIC_PRIO;
+ ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ mlx5_core_err(fs->mdev, "fail to create psp rx inline ft err=%d\n", err);
+ return err;
+ }
+
+ rx_err->ft = ft;
+ err = accel_psp_fs_rx_err_add_rule(fs, fs_prot, rx_err);
+ if (err)
+ goto out_err;
+
+ return 0;
+
+out_err:
+ mlx5_destroy_flow_table(ft);
+ rx_err->ft = NULL;
+ return err;
+}
+
+static void accel_psp_fs_rx_fs_destroy(struct mlx5e_accel_fs_psp_prot *fs_prot)
+{
+ if (fs_prot->def_rule) {
+ mlx5_del_flow_rules(fs_prot->def_rule);
+ fs_prot->def_rule = NULL;
+ }
+
+ if (fs_prot->miss_rule) {
+ mlx5_del_flow_rules(fs_prot->miss_rule);
+ fs_prot->miss_rule = NULL;
+ }
+
+ if (fs_prot->miss_group) {
+ mlx5_destroy_flow_group(fs_prot->miss_group);
+ fs_prot->miss_group = NULL;
+ }
+
+ if (fs_prot->ft) {
+ mlx5_destroy_flow_table(fs_prot->ft);
+ fs_prot->ft = NULL;
+ }
+}
+
+static void setup_fte_udp_psp(struct mlx5_flow_spec *spec, u16 udp_port)
+{
+ spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria, udp_dport, 0xffff);
+ MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, udp_dport, udp_port);
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, spec->match_criteria, ip_protocol);
+ MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, ip_protocol, IPPROTO_UDP);
+}
+
+static int accel_psp_fs_rx_create_ft(struct mlx5e_psp_fs *fs,
+ struct mlx5e_accel_fs_psp_prot *fs_prot)
+{
+ struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(fs->fs, false);
+ u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_modify_hdr *modify_hdr = NULL;
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_core_dev *mdev = fs->mdev;
+ struct mlx5_flow_group *miss_group;
+ MLX5_DECLARE_FLOW_ACT(flow_act);
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ struct mlx5_flow_table *ft;
+ u32 *flow_group_in;
+ int err = 0;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!flow_group_in || !spec) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /* Create FT */
+ ft_attr.max_fte = 2;
+ ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_LEVEL;
+ ft_attr.prio = MLX5E_NIC_PRIO;
+ ft_attr.autogroup.num_reserved_entries = 1;
+ ft_attr.autogroup.max_num_groups = 1;
+ ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ mlx5_core_err(mdev, "fail to create psp rx ft err=%d\n", err);
+ goto out_err;
+ }
+ fs_prot->ft = ft;
+
+ /* Create miss_group */
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1);
+ miss_group = mlx5_create_flow_group(ft, flow_group_in);
+ if (IS_ERR(miss_group)) {
+ err = PTR_ERR(miss_group);
+ mlx5_core_err(mdev, "fail to create psp rx miss_group err=%d\n", err);
+ goto out_err;
+ }
+ fs_prot->miss_group = miss_group;
+
+ /* Create miss rule */
+ rule = mlx5_add_flow_rules(ft, spec, &flow_act, &fs_prot->default_dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev, "fail to create psp rx miss_rule err=%d\n", err);
+ goto out_err;
+ }
+ fs_prot->miss_rule = rule;
+
+ /* Add default Rx psp rule */
+ setup_fte_udp_psp(spec, PSP_DEFAULT_UDP_PORT);
+ flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_PSP;
+ /* Set bit[31, 30] PSP marker */
+ /* Set bit[29-23] psp_syndrome is set in error FT */
+#define MLX5E_PSP_MARKER_BIT (BIT(30) | BIT(31))
+ MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
+ MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
+ MLX5_SET(set_action_in, action, data, MLX5E_PSP_MARKER_BIT);
+ MLX5_SET(set_action_in, action, offset, 0);
+ MLX5_SET(set_action_in, action, length, 32);
+
+ modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_KERNEL, 1, action);
+ if (IS_ERR(modify_hdr)) {
+ err = PTR_ERR(modify_hdr);
+ mlx5_core_err(mdev, "fail to alloc psp set modify_header_id err=%d\n", err);
+ modify_hdr = NULL;
+ goto out_err;
+ }
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT |
+ MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+ flow_act.modify_hdr = modify_hdr;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = fs_prot->rx_err.ft;
+ rule = mlx5_add_flow_rules(fs_prot->ft, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev,
+ "fail to add psp rule Rx decryption, err=%d, flow_act.action = %#04X\n",
+ err, flow_act.action);
+ goto out_err;
+ }
+
+ fs_prot->def_rule = rule;
+ goto out;
+
+out_err:
+ accel_psp_fs_rx_fs_destroy(fs_prot);
+out:
+ kvfree(flow_group_in);
+ kvfree(spec);
+ return err;
+}
+
+static int accel_psp_fs_rx_destroy(struct mlx5e_psp_fs *fs, enum accel_fs_psp_type type)
+{
+ struct mlx5e_accel_fs_psp_prot *fs_prot;
+ struct mlx5e_accel_fs_psp *accel_psp;
+
+ accel_psp = fs->rx_fs;
+
+ /* The netdev unreg already happened, so all offloaded rule are already removed */
+ fs_prot = &accel_psp->fs_prot[type];
+
+ accel_psp_fs_rx_fs_destroy(fs_prot);
+
+ accel_psp_fs_rx_err_destroy_ft(fs, &fs_prot->rx_err);
+
+ return 0;
+}
+
+static int accel_psp_fs_rx_create(struct mlx5e_psp_fs *fs, enum accel_fs_psp_type type)
+{
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs->fs, false);
+ struct mlx5e_accel_fs_psp_prot *fs_prot;
+ struct mlx5e_accel_fs_psp *accel_psp;
+ int err;
+
+ accel_psp = fs->rx_fs;
+ fs_prot = &accel_psp->fs_prot[type];
+
+ fs_prot->default_dest = mlx5_ttc_get_default_dest(ttc, fs_psp2tt(type));
+
+ err = accel_psp_fs_rx_err_create_ft(fs, fs_prot, &fs_prot->rx_err);
+ if (err)
+ return err;
+
+ err = accel_psp_fs_rx_create_ft(fs, fs_prot);
+ if (err)
+ accel_psp_fs_rx_err_destroy_ft(fs, &fs_prot->rx_err);
+
+ return err;
+}
+
+static int accel_psp_fs_rx_ft_get(struct mlx5e_psp_fs *fs, enum accel_fs_psp_type type)
+{
+ struct mlx5e_accel_fs_psp_prot *fs_prot;
+ struct mlx5_flow_destination dest = {};
+ struct mlx5e_accel_fs_psp *accel_psp;
+ struct mlx5_ttc_table *ttc;
+ int err = 0;
+
+ if (!fs || !fs->rx_fs)
+ return -EINVAL;
+
+ ttc = mlx5e_fs_get_ttc(fs->fs, false);
+ accel_psp = fs->rx_fs;
+ fs_prot = &accel_psp->fs_prot[type];
+ mutex_lock(&fs_prot->prot_mutex);
+ if (fs_prot->refcnt++)
+ goto out;
+
+ /* create FT */
+ err = accel_psp_fs_rx_create(fs, type);
+ if (err) {
+ fs_prot->refcnt--;
+ goto out;
+ }
+
+ /* connect */
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = fs_prot->ft;
+ mlx5_ttc_fwd_dest(ttc, fs_psp2tt(type), &dest);
+
+out:
+ mutex_unlock(&fs_prot->prot_mutex);
+ return err;
+}
+
+static void accel_psp_fs_rx_ft_put(struct mlx5e_psp_fs *fs, enum accel_fs_psp_type type)
+{
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs->fs, false);
+ struct mlx5e_accel_fs_psp_prot *fs_prot;
+ struct mlx5e_accel_fs_psp *accel_psp;
+
+ accel_psp = fs->rx_fs;
+ fs_prot = &accel_psp->fs_prot[type];
+ mutex_lock(&fs_prot->prot_mutex);
+ if (--fs_prot->refcnt)
+ goto out;
+
+ /* disconnect */
+ mlx5_ttc_fwd_default_dest(ttc, fs_psp2tt(type));
+
+ /* remove FT */
+ accel_psp_fs_rx_destroy(fs, type);
+
+out:
+ mutex_unlock(&fs_prot->prot_mutex);
+}
+
+static void accel_psp_fs_cleanup_rx(struct mlx5e_psp_fs *fs)
+{
+ struct mlx5e_accel_fs_psp_prot *fs_prot;
+ struct mlx5e_accel_fs_psp *accel_psp;
+ enum accel_fs_psp_type i;
+
+ if (!fs->rx_fs)
+ return;
+
+ accel_psp = fs->rx_fs;
+ for (i = 0; i < ACCEL_FS_PSP_NUM_TYPES; i++) {
+ fs_prot = &accel_psp->fs_prot[i];
+ mutex_destroy(&fs_prot->prot_mutex);
+ WARN_ON(fs_prot->refcnt);
+ }
+ kfree(fs->rx_fs);
+ fs->rx_fs = NULL;
+}
+
+static int accel_psp_fs_init_rx(struct mlx5e_psp_fs *fs)
+{
+ struct mlx5e_accel_fs_psp_prot *fs_prot;
+ struct mlx5e_accel_fs_psp *accel_psp;
+ enum accel_fs_psp_type i;
+
+ accel_psp = kzalloc(sizeof(*accel_psp), GFP_KERNEL);
+ if (!accel_psp)
+ return -ENOMEM;
+
+ for (i = 0; i < ACCEL_FS_PSP_NUM_TYPES; i++) {
+ fs_prot = &accel_psp->fs_prot[i];
+ mutex_init(&fs_prot->prot_mutex);
+ }
+
+ fs->rx_fs = accel_psp;
+
+ return 0;
+}
+
+void mlx5_accel_psp_fs_cleanup_rx_tables(struct mlx5e_priv *priv)
+{
+ int i;
+
+ if (!priv->psp)
+ return;
+
+ for (i = 0; i < ACCEL_FS_PSP_NUM_TYPES; i++)
+ accel_psp_fs_rx_ft_put(priv->psp->fs, i);
+}
+
+int mlx5_accel_psp_fs_init_rx_tables(struct mlx5e_priv *priv)
+{
+ struct mlx5e_psp_fs *fs;
+ int err, i;
+
+ if (!priv->psp)
+ return 0;
+
+ fs = priv->psp->fs;
+ for (i = 0; i < ACCEL_FS_PSP_NUM_TYPES; i++) {
+ err = accel_psp_fs_rx_ft_get(fs, i);
+ if (err)
+ goto out_err;
+ }
+
+ return 0;
+
+out_err:
+ i--;
+ while (i >= 0) {
+ accel_psp_fs_rx_ft_put(fs, i);
+ --i;
+ }
+
+ return err;
+}
+
+static int accel_psp_fs_tx_create_ft_table(struct mlx5e_psp_fs *fs)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_core_dev *mdev = fs->mdev;
+ struct mlx5_flow_act flow_act = {};
+ u32 *in, *mc, *outer_headers_c;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ struct mlx5e_psp_tx *tx_fs;
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_group *fg;
+ int err = 0;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!spec || !in) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ ft_attr.max_fte = 1;
+#define MLX5E_PSP_PRIO 0
+ ft_attr.prio = MLX5E_PSP_PRIO;
+#define MLX5E_PSP_LEVEL 0
+ ft_attr.level = MLX5E_PSP_LEVEL;
+ ft_attr.autogroup.max_num_groups = 1;
+
+ tx_fs = fs->tx_fs;
+ ft = mlx5_create_flow_table(tx_fs->ns, &ft_attr);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ mlx5_core_err(mdev, "PSP: fail to add psp tx flow table, err = %d\n", err);
+ goto out;
+ }
+
+ mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+ outer_headers_c = MLX5_ADDR_OF(fte_match_param, mc, outer_headers);
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol);
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_dport);
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ fg = mlx5_create_flow_group(ft, in);
+ if (IS_ERR(fg)) {
+ err = PTR_ERR(fg);
+ mlx5_core_err(mdev, "PSP: fail to add psp tx flow group, err = %d\n", err);
+ goto err_create_fg;
+ }
+
+ setup_fte_udp_psp(spec, PSP_DEFAULT_UDP_PORT);
+ flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_PSP;
+ flow_act.flags |= FLOW_ACT_NO_APPEND;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW |
+ MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT;
+ rule = mlx5_add_flow_rules(ft, spec, &flow_act, NULL, 0);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev, "PSP: fail to add psp tx flow rule, err = %d\n", err);
+ goto err_add_flow_rule;
+ }
+
+ tx_fs->ft = ft;
+ tx_fs->fg = fg;
+ tx_fs->rule = rule;
+ goto out;
+
+err_add_flow_rule:
+ mlx5_destroy_flow_group(fg);
+err_create_fg:
+ mlx5_destroy_flow_table(ft);
+out:
+ kvfree(in);
+ kvfree(spec);
+ return err;
+}
+
+static void accel_psp_fs_tx_destroy(struct mlx5e_psp_tx *tx_fs)
+{
+ if (!tx_fs->ft)
+ return;
+
+ mlx5_del_flow_rules(tx_fs->rule);
+ mlx5_destroy_flow_group(tx_fs->fg);
+ mlx5_destroy_flow_table(tx_fs->ft);
+}
+
+static int accel_psp_fs_tx_ft_get(struct mlx5e_psp_fs *fs)
+{
+ struct mlx5e_psp_tx *tx_fs = fs->tx_fs;
+ int err = 0;
+
+ mutex_lock(&tx_fs->mutex);
+ if (tx_fs->refcnt++)
+ goto out;
+
+ err = accel_psp_fs_tx_create_ft_table(fs);
+ if (err)
+ tx_fs->refcnt--;
+out:
+ mutex_unlock(&tx_fs->mutex);
+ return err;
+}
+
+static void accel_psp_fs_tx_ft_put(struct mlx5e_psp_fs *fs)
+{
+ struct mlx5e_psp_tx *tx_fs = fs->tx_fs;
+
+ mutex_lock(&tx_fs->mutex);
+ if (--tx_fs->refcnt)
+ goto out;
+
+ accel_psp_fs_tx_destroy(tx_fs);
+out:
+ mutex_unlock(&tx_fs->mutex);
+}
+
+static void accel_psp_fs_cleanup_tx(struct mlx5e_psp_fs *fs)
+{
+ struct mlx5e_psp_tx *tx_fs = fs->tx_fs;
+
+ if (!tx_fs)
+ return;
+
+ mutex_destroy(&tx_fs->mutex);
+ WARN_ON(tx_fs->refcnt);
+ kfree(tx_fs);
+ fs->tx_fs = NULL;
+}
+
+static int accel_psp_fs_init_tx(struct mlx5e_psp_fs *fs)
+{
+ struct mlx5_flow_namespace *ns;
+ struct mlx5e_psp_tx *tx_fs;
+
+ ns = mlx5_get_flow_namespace(fs->mdev, MLX5_FLOW_NAMESPACE_EGRESS_IPSEC);
+ if (!ns)
+ return -EOPNOTSUPP;
+
+ tx_fs = kzalloc(sizeof(*tx_fs), GFP_KERNEL);
+ if (!tx_fs)
+ return -ENOMEM;
+
+ mutex_init(&tx_fs->mutex);
+ tx_fs->ns = ns;
+ fs->tx_fs = tx_fs;
+ return 0;
+}
+
+void mlx5_accel_psp_fs_cleanup_tx_tables(struct mlx5e_priv *priv)
+{
+ if (!priv->psp)
+ return;
+
+ accel_psp_fs_tx_ft_put(priv->psp->fs);
+}
+
+int mlx5_accel_psp_fs_init_tx_tables(struct mlx5e_priv *priv)
+{
+ if (!priv->psp)
+ return 0;
+
+ return accel_psp_fs_tx_ft_get(priv->psp->fs);
+}
+
+static void mlx5e_accel_psp_fs_cleanup(struct mlx5e_psp_fs *fs)
+{
+ accel_psp_fs_cleanup_rx(fs);
+ accel_psp_fs_cleanup_tx(fs);
+ kfree(fs);
+}
+
+static struct mlx5e_psp_fs *mlx5e_accel_psp_fs_init(struct mlx5e_priv *priv)
+{
+ struct mlx5e_psp_fs *fs;
+ int err = 0;
+
+ fs = kzalloc(sizeof(*fs), GFP_KERNEL);
+ if (!fs)
+ return ERR_PTR(-ENOMEM);
+
+ fs->mdev = priv->mdev;
+ err = accel_psp_fs_init_tx(fs);
+ if (err)
+ goto err_tx;
+
+ fs->fs = priv->fs;
+ err = accel_psp_fs_init_rx(fs);
+ if (err)
+ goto err_rx;
+
+ return fs;
+
+err_rx:
+ accel_psp_fs_cleanup_tx(fs);
+err_tx:
+ kfree(fs);
+ return ERR_PTR(err);
+}
+
+static int
+mlx5e_psp_set_config(struct psp_dev *psd, struct psp_dev_config *conf,
+ struct netlink_ext_ack *extack)
+{
+ return 0; /* TODO: this should actually do things to the device */
+}
+
+static int
+mlx5e_psp_generate_key_spi(struct mlx5_core_dev *mdev,
+ enum mlx5_psp_gen_spi_in_key_size keysz,
+ unsigned int keysz_bytes,
+ struct psp_key_parsed *key)
+{
+ u32 out[MLX5_ST_SZ_DW(psp_gen_spi_out) + MLX5_ST_SZ_DW(key_spi)] = {};
+ u32 in[MLX5_ST_SZ_DW(psp_gen_spi_in)] = {};
+ void *outkey;
+ int err;
+
+ WARN_ON_ONCE(keysz_bytes > PSP_MAX_KEY);
+
+ MLX5_SET(psp_gen_spi_in, in, opcode, MLX5_CMD_OP_PSP_GEN_SPI);
+ MLX5_SET(psp_gen_spi_in, in, key_size, keysz);
+ MLX5_SET(psp_gen_spi_in, in, num_of_spi, 1);
+ err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (err)
+ return err;
+
+ outkey = MLX5_ADDR_OF(psp_gen_spi_out, out, key_spi);
+ key->spi = cpu_to_be32(MLX5_GET(key_spi, outkey, spi));
+ memcpy(key->key, MLX5_ADDR_OF(key_spi, outkey, key) + 32 - keysz_bytes,
+ keysz_bytes);
+
+ return 0;
+}
+
+static int
+mlx5e_psp_rx_spi_alloc(struct psp_dev *psd, u32 version,
+ struct psp_key_parsed *assoc,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_priv *priv = netdev_priv(psd->main_netdev);
+ enum mlx5_psp_gen_spi_in_key_size keysz;
+ u8 keysz_bytes;
+
+ switch (version) {
+ case PSP_VERSION_HDR0_AES_GCM_128:
+ keysz = MLX5_PSP_GEN_SPI_IN_KEY_SIZE_128;
+ keysz_bytes = 16;
+ break;
+ case PSP_VERSION_HDR0_AES_GCM_256:
+ keysz = MLX5_PSP_GEN_SPI_IN_KEY_SIZE_256;
+ keysz_bytes = 32;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return mlx5e_psp_generate_key_spi(priv->mdev, keysz, keysz_bytes, assoc);
+}
+
+struct psp_key {
+ u32 id;
+};
+
+static int mlx5e_psp_assoc_add(struct psp_dev *psd, struct psp_assoc *pas,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_priv *priv = netdev_priv(psd->main_netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct psp_key_parsed *tx = &pas->tx;
+ struct mlx5e_psp *psp = priv->psp;
+ struct psp_key *nkey;
+ int err;
+
+ mdev = priv->mdev;
+ nkey = (struct psp_key *)pas->drv_data;
+
+ err = mlx5_create_encryption_key(mdev, tx->key,
+ psp_key_size(pas->version),
+ MLX5_ACCEL_OBJ_PSP_KEY,
+ &nkey->id);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to create encryption key (err = %d)\n", err);
+ return err;
+ }
+
+ atomic_inc(&psp->tx_key_cnt);
+ return 0;
+}
+
+static void mlx5e_psp_assoc_del(struct psp_dev *psd, struct psp_assoc *pas)
+{
+ struct mlx5e_priv *priv = netdev_priv(psd->main_netdev);
+ struct mlx5e_psp *psp = priv->psp;
+ struct psp_key *nkey;
+
+ nkey = (struct psp_key *)pas->drv_data;
+ mlx5_destroy_encryption_key(priv->mdev, nkey->id);
+ atomic_dec(&psp->tx_key_cnt);
+}
+
+static int mlx5e_psp_rotate_key(struct mlx5_core_dev *mdev)
+{
+ u32 in[MLX5_ST_SZ_DW(psp_rotate_key_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(psp_rotate_key_out)];
+
+ MLX5_SET(psp_rotate_key_in, in, opcode,
+ MLX5_CMD_OP_PSP_ROTATE_KEY);
+
+ return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+static int
+mlx5e_psp_key_rotate(struct psp_dev *psd, struct netlink_ext_ack *exack)
+{
+ struct mlx5e_priv *priv = netdev_priv(psd->main_netdev);
+
+ /* no support for protecting against external rotations */
+ psd->generation = 0;
+
+ return mlx5e_psp_rotate_key(priv->mdev);
+}
+
+static struct psp_dev_ops mlx5_psp_ops = {
+ .set_config = mlx5e_psp_set_config,
+ .rx_spi_alloc = mlx5e_psp_rx_spi_alloc,
+ .tx_key_add = mlx5e_psp_assoc_add,
+ .tx_key_del = mlx5e_psp_assoc_del,
+ .key_rotate = mlx5e_psp_key_rotate,
+};
+
+void mlx5e_psp_unregister(struct mlx5e_priv *priv)
+{
+ if (!priv->psp || !priv->psp->psp)
+ return;
+
+ psp_dev_unregister(priv->psp->psp);
+}
+
+void mlx5e_psp_register(struct mlx5e_priv *priv)
+{
+ /* FW Caps missing */
+ if (!priv->psp)
+ return;
+
+ priv->psp->caps.assoc_drv_spc = sizeof(u32);
+ priv->psp->caps.versions = 1 << PSP_VERSION_HDR0_AES_GCM_128;
+ if (MLX5_CAP_PSP(priv->mdev, psp_crypto_esp_aes_gcm_256_encrypt) &&
+ MLX5_CAP_PSP(priv->mdev, psp_crypto_esp_aes_gcm_256_decrypt))
+ priv->psp->caps.versions |= 1 << PSP_VERSION_HDR0_AES_GCM_256;
+
+ priv->psp->psp = psp_dev_create(priv->netdev, &mlx5_psp_ops,
+ &priv->psp->caps, NULL);
+ if (IS_ERR(priv->psp->psp))
+ mlx5_core_err(priv->mdev, "PSP failed to register due to %pe\n",
+ priv->psp->psp);
+}
+
+int mlx5e_psp_init(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_psp_fs *fs;
+ struct mlx5e_psp *psp;
+ int err;
+
+ if (!mlx5_is_psp_device(mdev)) {
+ mlx5_core_dbg(mdev, "PSP offload not supported\n");
+ return 0;
+ }
+
+ if (!MLX5_CAP_ETH(mdev, swp)) {
+ mlx5_core_dbg(mdev, "SWP not supported\n");
+ return 0;
+ }
+
+ if (!MLX5_CAP_ETH(mdev, swp_csum)) {
+ mlx5_core_dbg(mdev, "SWP checksum not supported\n");
+ return 0;
+ }
+
+ if (!MLX5_CAP_ETH(mdev, swp_csum_l4_partial)) {
+ mlx5_core_dbg(mdev, "SWP L4 partial checksum not supported\n");
+ return 0;
+ }
+
+ if (!MLX5_CAP_ETH(mdev, swp_lso)) {
+ mlx5_core_dbg(mdev, "PSP LSO not supported\n");
+ return 0;
+ }
+
+ psp = kzalloc(sizeof(*psp), GFP_KERNEL);
+ if (!psp)
+ return -ENOMEM;
+
+ priv->psp = psp;
+ fs = mlx5e_accel_psp_fs_init(priv);
+ if (IS_ERR(fs)) {
+ err = PTR_ERR(fs);
+ goto out_err;
+ }
+
+ psp->fs = fs;
+
+ mlx5_core_dbg(priv->mdev, "PSP attached to netdevice\n");
+ return 0;
+
+out_err:
+ priv->psp = NULL;
+ kfree(psp);
+ return err;
+}
+
+void mlx5e_psp_cleanup(struct mlx5e_priv *priv)
+{
+ struct mlx5e_psp *psp = priv->psp;
+
+ if (!psp)
+ return;
+
+ WARN_ON(atomic_read(&psp->tx_key_cnt));
+ mlx5e_accel_psp_fs_cleanup(psp->fs);
+ priv->psp = NULL;
+ kfree(psp);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.h
new file mode 100644
index 000000000000..42bb671fb2cb
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5E_ACCEL_PSP_H__
+#define __MLX5E_ACCEL_PSP_H__
+#if IS_ENABLED(CONFIG_MLX5_EN_PSP)
+#include <net/psp/types.h>
+#include "en.h"
+
+struct mlx5e_psp {
+ struct psp_dev *psp;
+ struct psp_dev_caps caps;
+ struct mlx5e_psp_fs *fs;
+ atomic_t tx_key_cnt;
+};
+
+static inline bool mlx5_is_psp_device(struct mlx5_core_dev *mdev)
+{
+ if (!MLX5_CAP_GEN(mdev, psp))
+ return false;
+
+ if (!MLX5_CAP_PSP(mdev, psp_crypto_offload) ||
+ !MLX5_CAP_PSP(mdev, psp_crypto_esp_aes_gcm_128_encrypt) ||
+ !MLX5_CAP_PSP(mdev, psp_crypto_esp_aes_gcm_128_decrypt))
+ return false;
+
+ return true;
+}
+
+int mlx5_accel_psp_fs_init_rx_tables(struct mlx5e_priv *priv);
+void mlx5_accel_psp_fs_cleanup_rx_tables(struct mlx5e_priv *priv);
+int mlx5_accel_psp_fs_init_tx_tables(struct mlx5e_priv *priv);
+void mlx5_accel_psp_fs_cleanup_tx_tables(struct mlx5e_priv *priv);
+void mlx5e_psp_register(struct mlx5e_priv *priv);
+void mlx5e_psp_unregister(struct mlx5e_priv *priv);
+int mlx5e_psp_init(struct mlx5e_priv *priv);
+void mlx5e_psp_cleanup(struct mlx5e_priv *priv);
+#else
+static inline int mlx5_accel_psp_fs_init_rx_tables(struct mlx5e_priv *priv)
+{
+ return 0;
+}
+
+static inline void mlx5_accel_psp_fs_cleanup_rx_tables(struct mlx5e_priv *priv) { }
+static inline int mlx5_accel_psp_fs_init_tx_tables(struct mlx5e_priv *priv)
+{
+ return 0;
+}
+
+static inline void mlx5_accel_psp_fs_cleanup_tx_tables(struct mlx5e_priv *priv) { }
+static inline bool mlx5_is_psp_device(struct mlx5_core_dev *mdev)
+{
+ return false;
+}
+
+static inline void mlx5e_psp_register(struct mlx5e_priv *priv) { }
+static inline void mlx5e_psp_unregister(struct mlx5e_priv *priv) { }
+static inline int mlx5e_psp_init(struct mlx5e_priv *priv) { return 0; }
+static inline void mlx5e_psp_cleanup(struct mlx5e_priv *priv) { }
+#endif /* CONFIG_MLX5_EN_PSP */
+#endif /* __MLX5E_ACCEL_PSP_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.c
new file mode 100644
index 000000000000..828bff1137af
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.c
@@ -0,0 +1,200 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include <linux/skbuff.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <net/protocol.h>
+#include <net/udp.h>
+#include <net/ip6_checksum.h>
+#include <net/psp/types.h>
+
+#include "en.h"
+#include "psp.h"
+#include "en_accel/psp_rxtx.h"
+#include "en_accel/psp.h"
+
+enum {
+ MLX5E_PSP_OFFLOAD_RX_SYNDROME_DECRYPTED,
+ MLX5E_PSP_OFFLOAD_RX_SYNDROME_AUTH_FAILED,
+ MLX5E_PSP_OFFLOAD_RX_SYNDROME_BAD_TRAILER,
+};
+
+static void mlx5e_psp_set_swp(struct sk_buff *skb,
+ struct mlx5e_accel_tx_psp_state *psp_st,
+ struct mlx5_wqe_eth_seg *eseg)
+{
+ /* Tunnel Mode:
+ * SWP: OutL3 InL3 InL4
+ * Pkt: MAC IP ESP IP L4
+ *
+ * Transport Mode:
+ * SWP: OutL3 OutL4
+ * Pkt: MAC IP ESP L4
+ *
+ * Tunnel(VXLAN TCP/UDP) over Transport Mode
+ * SWP: OutL3 InL3 InL4
+ * Pkt: MAC IP ESP UDP VXLAN IP L4
+ */
+ u8 inner_ipproto = 0;
+ struct ethhdr *eth;
+
+ /* Shared settings */
+ eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2;
+ if (skb->protocol == htons(ETH_P_IPV6))
+ eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6;
+
+ if (skb->inner_protocol_type == ENCAP_TYPE_IPPROTO) {
+ inner_ipproto = skb->inner_ipproto;
+ /* Set SWP additional flags for packet of type IP|UDP|PSP|[ TCP | UDP ] */
+ switch (inner_ipproto) {
+ case IPPROTO_UDP:
+ eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
+ fallthrough;
+ case IPPROTO_TCP:
+ eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
+ break;
+ default:
+ break;
+ }
+ } else {
+ /* IP in IP tunneling like vxlan*/
+ if (skb->inner_protocol_type != ENCAP_TYPE_ETHER)
+ return;
+
+ eth = (struct ethhdr *)skb_inner_mac_header(skb);
+ switch (ntohs(eth->h_proto)) {
+ case ETH_P_IP:
+ inner_ipproto = ((struct iphdr *)((char *)skb->data +
+ skb_inner_network_offset(skb)))->protocol;
+ break;
+ case ETH_P_IPV6:
+ inner_ipproto = ((struct ipv6hdr *)((char *)skb->data +
+ skb_inner_network_offset(skb)))->nexthdr;
+ break;
+ default:
+ break;
+ }
+
+ /* Tunnel(VXLAN TCP/UDP) over Transport Mode PSP i.e. PSP payload is vxlan tunnel */
+ switch (inner_ipproto) {
+ case IPPROTO_UDP:
+ eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
+ fallthrough;
+ case IPPROTO_TCP:
+ eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
+ eseg->swp_inner_l4_offset =
+ (skb->csum_start + skb->head - skb->data) / 2;
+ if (skb->protocol == htons(ETH_P_IPV6))
+ eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
+ break;
+ default:
+ break;
+ }
+
+ psp_st->inner_ipproto = inner_ipproto;
+ }
+}
+
+static bool mlx5e_psp_set_state(struct mlx5e_priv *priv,
+ struct sk_buff *skb,
+ struct mlx5e_accel_tx_psp_state *psp_st)
+{
+ struct psp_assoc *pas;
+ bool ret = false;
+
+ rcu_read_lock();
+ pas = psp_skb_get_assoc_rcu(skb);
+ if (!pas)
+ goto out;
+
+ ret = true;
+ psp_st->tailen = PSP_TRL_SIZE;
+ psp_st->spi = pas->tx.spi;
+ psp_st->ver = pas->version;
+ psp_st->keyid = *(u32 *)pas->drv_data;
+
+out:
+ rcu_read_unlock();
+ return ret;
+}
+
+bool mlx5e_psp_offload_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb,
+ struct mlx5_cqe64 *cqe)
+{
+ u32 psp_meta_data = be32_to_cpu(cqe->ft_metadata);
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ u16 dev_id = priv->psp->psp->id;
+ bool strip_icv = true;
+ u8 generation = 0;
+
+ /* TBD: report errors as SW counters to ethtool, any further handling ? */
+ if (MLX5_PSP_METADATA_SYNDROME(psp_meta_data) != MLX5E_PSP_OFFLOAD_RX_SYNDROME_DECRYPTED)
+ goto drop;
+
+ if (psp_dev_rcv(skb, dev_id, generation, strip_icv))
+ goto drop;
+
+ skb->decrypted = 1;
+ return false;
+
+drop:
+ kfree_skb(skb);
+ return true;
+}
+
+void mlx5e_psp_tx_build_eseg(struct mlx5e_priv *priv, struct sk_buff *skb,
+ struct mlx5e_accel_tx_psp_state *psp_st,
+ struct mlx5_wqe_eth_seg *eseg)
+{
+ if (!mlx5_is_psp_device(priv->mdev))
+ return;
+
+ if (unlikely(skb->protocol != htons(ETH_P_IP) &&
+ skb->protocol != htons(ETH_P_IPV6)))
+ return;
+
+ mlx5e_psp_set_swp(skb, psp_st, eseg);
+ /* Special WA for PSP LSO in ConnectX7 */
+ eseg->swp_outer_l3_offset = 0;
+ eseg->swp_inner_l3_offset = 0;
+
+ eseg->flow_table_metadata |= cpu_to_be32(psp_st->keyid);
+ eseg->trailer |= cpu_to_be32(MLX5_ETH_WQE_INSERT_TRAILER) |
+ cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_OUTER_L4_ASSOC);
+}
+
+void mlx5e_psp_handle_tx_wqe(struct mlx5e_tx_wqe *wqe,
+ struct mlx5e_accel_tx_psp_state *psp_st,
+ struct mlx5_wqe_inline_seg *inlseg)
+{
+ inlseg->byte_count = cpu_to_be32(psp_st->tailen | MLX5_INLINE_SEG);
+}
+
+bool mlx5e_psp_handle_tx_skb(struct net_device *netdev,
+ struct sk_buff *skb,
+ struct mlx5e_accel_tx_psp_state *psp_st)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct net *net = sock_net(skb->sk);
+ const struct ipv6hdr *ip6;
+ struct tcphdr *th;
+
+ if (!mlx5e_psp_set_state(priv, skb, psp_st))
+ return true;
+
+ /* psp_encap of the packet */
+ if (!psp_dev_encapsulate(net, skb, psp_st->spi, psp_st->ver, 0)) {
+ kfree_skb_reason(skb, SKB_DROP_REASON_PSP_OUTPUT);
+ return false;
+ }
+ if (skb_is_gso(skb)) {
+ ip6 = ipv6_hdr(skb);
+ th = inner_tcp_hdr(skb);
+
+ th->check = ~tcp_v6_check(skb_shinfo(skb)->gso_size + inner_tcp_hdrlen(skb), &ip6->saddr,
+ &ip6->daddr, 0);
+ }
+
+ return true;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.h
new file mode 100644
index 000000000000..70289c921bd6
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.h
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5E_PSP_RXTX_H__
+#define __MLX5E_PSP_RXTX_H__
+
+#include <linux/skbuff.h>
+#include <net/xfrm.h>
+#include <net/psp.h>
+#include "en.h"
+#include "en/txrx.h"
+
+/* Bit30: PSP marker, Bit29-23: PSP syndrome, Bit22-0: PSP obj id */
+#define MLX5_PSP_METADATA_MARKER(metadata) ((((metadata) >> 30) & 0x3) == 0x3)
+#define MLX5_PSP_METADATA_SYNDROME(metadata) (((metadata) >> 23) & GENMASK(6, 0))
+#define MLX5_PSP_METADATA_HANDLE(metadata) ((metadata) & GENMASK(22, 0))
+
+struct mlx5e_accel_tx_psp_state {
+ u32 tailen;
+ u32 keyid;
+ __be32 spi;
+ u8 inner_ipproto;
+ u8 ver;
+};
+
+#ifdef CONFIG_MLX5_EN_PSP
+static inline bool mlx5e_psp_is_offload_state(struct mlx5e_accel_tx_psp_state *psp_state)
+{
+ return (psp_state->tailen != 0);
+}
+
+static inline bool mlx5e_psp_is_offload(struct sk_buff *skb, struct net_device *netdev)
+{
+ bool ret;
+
+ rcu_read_lock();
+ ret = !!psp_skb_get_assoc_rcu(skb);
+ rcu_read_unlock();
+ return ret;
+}
+
+bool mlx5e_psp_handle_tx_skb(struct net_device *netdev,
+ struct sk_buff *skb,
+ struct mlx5e_accel_tx_psp_state *psp_st);
+
+void mlx5e_psp_tx_build_eseg(struct mlx5e_priv *priv, struct sk_buff *skb,
+ struct mlx5e_accel_tx_psp_state *psp_st,
+ struct mlx5_wqe_eth_seg *eseg);
+
+void mlx5e_psp_handle_tx_wqe(struct mlx5e_tx_wqe *wqe,
+ struct mlx5e_accel_tx_psp_state *psp_st,
+ struct mlx5_wqe_inline_seg *inlseg);
+
+static inline bool mlx5e_psp_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5e_accel_tx_psp_state *psp_st,
+ struct mlx5_wqe_eth_seg *eseg)
+{
+ u8 inner_ipproto;
+
+ if (!mlx5e_psp_is_offload_state(psp_st))
+ return false;
+
+ inner_ipproto = psp_st->inner_ipproto;
+ eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
+ if (inner_ipproto) {
+ eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM;
+ if (inner_ipproto == IPPROTO_TCP || inner_ipproto == IPPROTO_UDP)
+ eseg->cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM;
+ if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
+ sq->stats->csum_partial_inner++;
+ } else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
+ eseg->cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM;
+ sq->stats->csum_partial_inner++;
+ }
+
+ return true;
+}
+
+static inline unsigned int mlx5e_psp_tx_ids_len(struct mlx5e_accel_tx_psp_state *psp_st)
+{
+ return psp_st->tailen;
+}
+
+static inline bool mlx5e_psp_is_rx_flow(struct mlx5_cqe64 *cqe)
+{
+ return MLX5_PSP_METADATA_MARKER(be32_to_cpu(cqe->ft_metadata));
+}
+
+bool mlx5e_psp_offload_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb,
+ struct mlx5_cqe64 *cqe);
+#else
+static inline bool mlx5e_psp_is_offload_state(struct mlx5e_accel_tx_psp_state *psp_state)
+{
+ return false;
+}
+
+static inline bool mlx5e_psp_is_offload(struct sk_buff *skb, struct net_device *netdev)
+{
+ return false;
+}
+
+static inline bool mlx5e_psp_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5e_accel_tx_psp_state *psp_st,
+ struct mlx5_wqe_eth_seg *eseg)
+{
+ return false;
+}
+
+static inline bool mlx5e_psp_is_rx_flow(struct mlx5_cqe64 *cqe)
+{
+ return false;
+}
+
+static inline bool mlx5e_psp_offload_handle_rx_skb(struct net_device *netdev,
+ struct sk_buff *skb,
+ struct mlx5_cqe64 *cqe)
+{
+ return false;
+}
+#endif /* CONFIG_MLX5_EN_PSP */
+#endif /* __MLX5E_PSP_RXTX_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
index 6ed3a32b7e22..30424ccad584 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -30,6 +30,7 @@
* SOFTWARE.
*/
+#include "devlink.h"
#include "en.h"
#include "lib/crypto.h"
@@ -140,9 +141,22 @@ err_close_tises:
return err;
}
+static unsigned int
+mlx5e_get_devlink_param_num_doorbells(struct mlx5_core_dev *dev)
+{
+ const u32 param_id = DEVLINK_PARAM_GENERIC_ID_NUM_DOORBELLS;
+ struct devlink *devlink = priv_to_devlink(dev);
+ union devlink_param_value val;
+ int err;
+
+ err = devl_param_driverinit_value_get(devlink, param_id, &val);
+ return err ? MLX5_DEFAULT_NUM_DOORBELLS : val.vu32;
+}
+
int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev, bool create_tises)
{
struct mlx5e_hw_objs *res = &mdev->mlx5e_res.hw_objs;
+ unsigned int num_doorbells, i;
int err;
err = mlx5_core_alloc_pd(mdev, &res->pdn);
@@ -163,17 +177,30 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev, bool create_tises)
goto err_dealloc_transport_domain;
}
- err = mlx5_alloc_bfreg(mdev, &res->bfreg, false, false);
- if (err) {
- mlx5_core_err(mdev, "alloc bfreg failed, %d\n", err);
+ num_doorbells = min(mlx5e_get_devlink_param_num_doorbells(mdev),
+ mlx5e_get_max_num_channels(mdev));
+ res->bfregs = kcalloc(num_doorbells, sizeof(*res->bfregs), GFP_KERNEL);
+ if (!res->bfregs) {
+ err = -ENOMEM;
goto err_destroy_mkey;
}
+ for (i = 0; i < num_doorbells; i++) {
+ err = mlx5_alloc_bfreg(mdev, res->bfregs + i, false, false);
+ if (err) {
+ mlx5_core_warn(mdev,
+ "could only allocate %d/%d doorbells, err %d.\n",
+ i, num_doorbells, err);
+ break;
+ }
+ }
+ res->num_bfregs = i;
+
if (create_tises) {
err = mlx5e_create_tises(mdev, res->tisn);
if (err) {
mlx5_core_err(mdev, "alloc tises failed, %d\n", err);
- goto err_destroy_bfreg;
+ goto err_destroy_bfregs;
}
res->tisn_valid = true;
}
@@ -183,15 +210,17 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev, bool create_tises)
mdev->mlx5e_res.dek_priv = mlx5_crypto_dek_init(mdev);
if (IS_ERR(mdev->mlx5e_res.dek_priv)) {
- mlx5_core_err(mdev, "crypto dek init failed, %ld\n",
- PTR_ERR(mdev->mlx5e_res.dek_priv));
+ mlx5_core_err(mdev, "crypto dek init failed, %pe\n",
+ mdev->mlx5e_res.dek_priv);
mdev->mlx5e_res.dek_priv = NULL;
}
return 0;
-err_destroy_bfreg:
- mlx5_free_bfreg(mdev, &res->bfreg);
+err_destroy_bfregs:
+ for (i = 0; i < res->num_bfregs; i++)
+ mlx5_free_bfreg(mdev, res->bfregs + i);
+ kfree(res->bfregs);
err_destroy_mkey:
mlx5_core_destroy_mkey(mdev, res->mkey);
err_dealloc_transport_domain:
@@ -209,7 +238,9 @@ void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev)
mdev->mlx5e_res.dek_priv = NULL;
if (res->tisn_valid)
mlx5e_destroy_tises(mdev, res->tisn);
- mlx5_free_bfreg(mdev, &res->bfreg);
+ for (unsigned int i = 0; i < res->num_bfregs; i++)
+ mlx5_free_bfreg(mdev, res->bfregs + i);
+ kfree(res->bfregs);
mlx5_core_destroy_mkey(mdev, res->mkey);
mlx5_core_dealloc_transport_domain(mdev, res->td.tdn);
mlx5_core_dealloc_pd(mdev, res->pdn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index 5fe016e477b3..d166c0d5189e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -362,6 +362,7 @@ static int mlx5e_dcbnl_ieee_getpfc(struct net_device *dev,
static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev,
struct ieee_pfc *pfc)
{
+ u8 buffer_ownership = MLX5_BUF_OWNERSHIP_UNKNOWN;
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
u32 old_cable_len = priv->dcbx.cable_len;
@@ -389,7 +390,14 @@ static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev,
if (MLX5_BUFFER_SUPPORTED(mdev)) {
pfc_new.pfc_en = (changed & MLX5E_PORT_BUFFER_PFC) ? pfc->pfc_en : curr_pfc_en;
- if (priv->dcbx.manual_buffer)
+ ret = mlx5_query_port_buffer_ownership(mdev,
+ &buffer_ownership);
+ if (ret)
+ netdev_err(dev,
+ "%s, Failed to get buffer ownership: %d\n",
+ __func__, ret);
+
+ if (buffer_ownership == MLX5_BUF_OWNERSHIP_SW_OWNED)
ret = mlx5e_port_manual_buffer_config(priv, changed,
dev->mtu, &pfc_new,
NULL, NULL);
@@ -982,7 +990,6 @@ static int mlx5e_dcbnl_setbuffer(struct net_device *dev,
if (!changed)
return 0;
- priv->dcbx.manual_buffer = true;
err = mlx5e_port_manual_buffer_config(priv, changed, dev->mtu, NULL,
buffer_size, prio2buffer);
return err;
@@ -1252,7 +1259,6 @@ void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv)
priv->dcbx.cap |= DCB_CAP_DCBX_HOST;
priv->dcbx.port_buff_cell_sz = mlx5e_query_port_buffers_cell_size(priv);
- priv->dcbx.manual_buffer = false;
priv->dcbx.cable_len = MLX5E_DEFAULT_CABLE_LEN;
mlx5e_ets_init(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index d507366d773e..53e5ae252eac 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -1494,7 +1494,8 @@ static int mlx5e_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *
}
static int mlx5e_rxfh_hfunc_check(struct mlx5e_priv *priv,
- const struct ethtool_rxfh_param *rxfh)
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
unsigned int count;
@@ -1504,8 +1505,10 @@ static int mlx5e_rxfh_hfunc_check(struct mlx5e_priv *priv,
unsigned int xor8_max_channels = mlx5e_rqt_max_num_channels_allowed_for_xor8();
if (count > xor8_max_channels) {
- netdev_err(priv->netdev, "%s: Cannot set RSS hash function to XOR, current number of channels (%d) exceeds the maximum allowed for XOR8 RSS hfunc (%d)\n",
- __func__, count, xor8_max_channels);
+ NL_SET_ERR_MSG_FMT_MOD(
+ extack,
+ "Number of channels (%u) exceeds the max for XOR8 RSS (%u)",
+ count, xor8_max_channels);
return -EINVAL;
}
}
@@ -1524,7 +1527,7 @@ static int mlx5e_set_rxfh(struct net_device *dev,
mutex_lock(&priv->state_lock);
- err = mlx5e_rxfh_hfunc_check(priv, rxfh);
+ err = mlx5e_rxfh_hfunc_check(priv, rxfh, extack);
if (err)
goto unlock;
@@ -1550,7 +1553,7 @@ static int mlx5e_create_rxfh_context(struct net_device *dev,
mutex_lock(&priv->state_lock);
- err = mlx5e_rxfh_hfunc_check(priv, rxfh);
+ err = mlx5e_rxfh_hfunc_check(priv, rxfh, extack);
if (err)
goto unlock;
@@ -1590,7 +1593,7 @@ static int mlx5e_modify_rxfh_context(struct net_device *dev,
mutex_lock(&priv->state_lock);
- err = mlx5e_rxfh_hfunc_check(priv, rxfh);
+ err = mlx5e_rxfh_hfunc_check(priv, rxfh, extack);
if (err)
goto unlock;
@@ -1927,11 +1930,12 @@ static int mlx5e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
}
static void mlx5e_get_fec_stats(struct net_device *netdev,
- struct ethtool_fec_stats *fec_stats)
+ struct ethtool_fec_stats *fec_stats,
+ struct ethtool_fec_hist *hist)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- mlx5e_stats_fec_get(priv, fec_stats);
+ mlx5e_stats_fec_get(priv, fec_stats, hist);
}
static int mlx5e_get_fecparam(struct net_device *netdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 265c4ca85f7d..8928d2dcd43f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -905,6 +905,9 @@ static void mlx5e_set_inner_ttc_params(struct mlx5e_flow_steering *fs,
ft_attr->prio = MLX5E_NIC_PRIO;
for (tt = 0; tt < MLX5_NUM_TT; tt++) {
+ if (mlx5_ttc_is_decrypted_esp_tt(tt))
+ continue;
+
ttc_params->dests[tt].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
ttc_params->dests[tt].tir_num =
tt == MLX5_TT_ANY ?
@@ -914,9 +917,17 @@ static void mlx5e_set_inner_ttc_params(struct mlx5e_flow_steering *fs,
}
}
+static bool mlx5e_ipsec_rss_supported(struct mlx5_core_dev *mdev)
+{
+ return MLX5_CAP_NIC_RX_FT_FIELD_SUPPORT_2(mdev, ipsec_next_header) &&
+ MLX5_CAP_NIC_RX_FT_FIELD_SUPPORT_2(mdev, outer_l4_type_ext) &&
+ MLX5_CAP_NIC_RX_FT_FIELD_SUPPORT_2(mdev, inner_l4_type_ext);
+}
+
void mlx5e_set_ttc_params(struct mlx5e_flow_steering *fs,
struct mlx5e_rx_res *rx_res,
- struct ttc_params *ttc_params, bool tunnel)
+ struct ttc_params *ttc_params, bool tunnel,
+ bool ipsec_rss)
{
struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
@@ -927,7 +938,13 @@ void mlx5e_set_ttc_params(struct mlx5e_flow_steering *fs,
ft_attr->level = MLX5E_TTC_FT_LEVEL;
ft_attr->prio = MLX5E_NIC_PRIO;
+ ttc_params->ipsec_rss = ipsec_rss &&
+ mlx5e_ipsec_rss_supported(fs->mdev);
+
for (tt = 0; tt < MLX5_NUM_TT; tt++) {
+ if (mlx5_ttc_is_decrypted_esp_tt(tt))
+ continue;
+
ttc_params->dests[tt].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
ttc_params->dests[tt].tir_num =
tt == MLX5_TT_ANY ?
@@ -1293,7 +1310,7 @@ int mlx5e_create_ttc_table(struct mlx5e_flow_steering *fs,
{
struct ttc_params ttc_params = {};
- mlx5e_set_ttc_params(fs, rx_res, &ttc_params, true);
+ mlx5e_set_ttc_params(fs, rx_res, &ttc_params, true, true);
fs->ttc = mlx5_create_ttc_table(fs->mdev, &ttc_params);
return PTR_ERR_OR_ZERO(fs->ttc);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 21bb88c5d3dc..a56825921c23 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -52,6 +52,7 @@
#include "en_tc.h"
#include "en_rep.h"
#include "en_accel/ipsec.h"
+#include "en_accel/psp.h"
#include "en_accel/macsec.h"
#include "en_accel/en_accel.h"
#include "en_accel/ktls.h"
@@ -232,9 +233,13 @@ static int mlx5e_devcom_event_mpv(int event, void *my_data, void *event_data)
static int mlx5e_devcom_init_mpv(struct mlx5e_priv *priv, u64 *data)
{
+ struct mlx5_devcom_match_attr attr = {
+ .key.val = *data,
+ };
+
priv->devcom = mlx5_devcom_register_component(priv->mdev->priv.devc,
MLX5_DEVCOM_MPV,
- *data,
+ &attr,
mlx5e_devcom_event_mpv,
priv);
if (IS_ERR(priv->devcom))
@@ -777,13 +782,6 @@ static void mlx5e_rq_shampo_hd_info_free(struct mlx5e_rq *rq)
bitmap_free(rq->mpwqe.shampo->bitmap);
}
-static bool mlx5_rq_needs_separate_hd_pool(struct mlx5e_rq *rq)
-{
- struct netdev_rx_queue *rxq = __netif_get_rx_queue(rq->netdev, rq->ix);
-
- return !!rxq->mp_params.mp_ops;
-}
-
static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_rq_param *rqp,
@@ -822,7 +820,7 @@ static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev,
hd_pool_size = (rq->mpwqe.shampo->hd_per_wqe * wq_size) /
MLX5E_SHAMPO_WQ_HEADER_PER_PAGE;
- if (mlx5_rq_needs_separate_hd_pool(rq)) {
+ if (netif_rxq_has_unreadable_mp(rq->netdev, rq->ix)) {
/* Separate page pool for shampo headers */
struct page_pool_params pp_params = { };
@@ -1536,7 +1534,7 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
sq->pdev = c->pdev;
sq->mkey_be = c->mkey_be;
sq->channel = c;
- sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
+ sq->uar_map = c->bfreg->map;
sq->min_inline_mode = params->tx_min_inline_mode;
sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu) - ETH_FCS_LEN;
sq->xsk_pool = xsk_pool;
@@ -1621,7 +1619,7 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
int err;
sq->channel = c;
- sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
+ sq->uar_map = c->bfreg->map;
sq->reserved_room = param->stop_room;
param->wq.db_numa_node = cpu_to_node(c->cpu);
@@ -1706,7 +1704,7 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
sq->priv = c->priv;
sq->ch_ix = c->ix;
sq->txq_ix = txq_ix;
- sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
+ sq->uar_map = c->bfreg->map;
sq->min_inline_mode = params->tx_min_inline_mode;
sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
sq->max_sq_mpw_wqebbs = mlx5e_get_max_sq_aligned_wqebbs(mdev);
@@ -1782,7 +1780,7 @@ static int mlx5e_create_sq(struct mlx5_core_dev *mdev,
MLX5_SET(sqc, sqc, flush_in_error_en, 1);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
- MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.hw_objs.bfreg.index);
+ MLX5_SET(wq, wq, uar_page, csp->uar_page);
MLX5_SET(wq, wq, log_wq_pg_sz, csp->wq_ctrl->buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(wq, wq, dbr_addr, csp->wq_ctrl->db.dma);
@@ -1886,6 +1884,7 @@ int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix,
csp.cqn = sq->cq.mcq.cqn;
csp.wq_ctrl = &sq->wq_ctrl;
csp.min_inline_mode = sq->min_inline_mode;
+ csp.uar_page = c->bfreg->index;
err = mlx5e_create_sq_rdy(c->mdev, param, &csp, qos_queue_group_id, &sq->sqn);
if (err)
goto err_free_txqsq;
@@ -2056,6 +2055,7 @@ static int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params
csp.cqn = sq->cq.mcq.cqn;
csp.wq_ctrl = &sq->wq_ctrl;
csp.min_inline_mode = params->tx_min_inline_mode;
+ csp.uar_page = c->bfreg->index;
err = mlx5e_create_sq_rdy(c->mdev, param, &csp, 0, &sq->sqn);
if (err)
goto err_free_icosq;
@@ -2116,6 +2116,7 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
csp.cqn = sq->cq.mcq.cqn;
csp.wq_ctrl = &sq->wq_ctrl;
csp.min_inline_mode = sq->min_inline_mode;
+ csp.uar_page = c->bfreg->index;
set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
err = mlx5e_create_sq_rdy(c->mdev, param, &csp, 0, &sq->sqn);
@@ -2186,6 +2187,7 @@ static void mlx5e_close_xdpredirect_sq(struct mlx5e_xdpsq *xdpsq)
static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev,
struct net_device *netdev,
struct workqueue_struct *workqueue,
+ struct mlx5_uars_page *uar,
struct mlx5e_cq_param *param,
struct mlx5e_cq *cq)
{
@@ -2217,6 +2219,7 @@ static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev,
cq->mdev = mdev;
cq->netdev = netdev;
cq->workqueue = workqueue;
+ cq->uar = uar;
return 0;
}
@@ -2232,7 +2235,8 @@ static int mlx5e_alloc_cq(struct mlx5_core_dev *mdev,
param->wq.db_numa_node = ccp->node;
param->eq_ix = ccp->ix;
- err = mlx5e_alloc_cq_common(mdev, ccp->netdev, ccp->wq, param, cq);
+ err = mlx5e_alloc_cq_common(mdev, ccp->netdev, ccp->wq,
+ ccp->uar, param, cq);
cq->napi = ccp->napi;
cq->ch_stats = ccp->ch_stats;
@@ -2277,7 +2281,7 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
MLX5_SET(cqc, cqc, cq_period_mode, mlx5e_cq_period_mode(param->cq_period_mode));
MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
- MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
+ MLX5_SET(cqc, cqc, uar_page, cq->uar->index);
MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
@@ -2744,6 +2748,20 @@ void mlx5e_trigger_napi_sched(struct napi_struct *napi)
local_bh_enable();
}
+static void mlx5e_channel_pick_doorbell(struct mlx5e_channel *c)
+{
+ struct mlx5e_hw_objs *hw_objs = &c->mdev->mlx5e_res.hw_objs;
+
+ /* No dedicated Ethernet doorbells, use the global one. */
+ if (hw_objs->num_bfregs == 0) {
+ c->bfreg = &c->mdev->priv.bfreg;
+ return;
+ }
+
+ /* Round-robin between doorbells. */
+ c->bfreg = hw_objs->bfregs + c->vec_ix % hw_objs->num_bfregs;
+}
+
static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
struct mlx5e_params *params,
struct xsk_buff_pool *xsk_pool,
@@ -2798,6 +2816,8 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
c->aff_mask = irq_get_effective_affinity_mask(irq);
c->lag_port = mlx5e_enumerate_lag_port(mdev, ix);
+ mlx5e_channel_pick_doorbell(c);
+
netif_napi_add_config_locked(netdev, &c->napi, mlx5e_napi_poll, ix);
netif_napi_set_irq_locked(&c->napi, irq);
@@ -3569,7 +3589,8 @@ static int mlx5e_alloc_drop_cq(struct mlx5e_priv *priv,
param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
param->wq.db_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
- return mlx5e_alloc_cq_common(priv->mdev, priv->netdev, priv->wq, param, cq);
+ return mlx5e_alloc_cq_common(priv->mdev, priv->netdev, priv->wq,
+ mdev->priv.bfreg.up, param, cq);
}
int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
@@ -5625,12 +5646,36 @@ static int mlx5e_queue_start(struct net_device *dev, void *newq,
return 0;
}
+static struct device *mlx5e_queue_get_dma_dev(struct net_device *dev,
+ int queue_index)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_channels *channels;
+ struct device *pdev = NULL;
+ struct mlx5e_channel *ch;
+
+ channels = &priv->channels;
+
+ mutex_lock(&priv->state_lock);
+
+ if (queue_index >= channels->num)
+ goto out;
+
+ ch = channels->c[queue_index];
+ pdev = ch->pdev;
+out:
+ mutex_unlock(&priv->state_lock);
+
+ return pdev;
+}
+
static const struct netdev_queue_mgmt_ops mlx5e_queue_mgmt_ops = {
.ndo_queue_mem_size = sizeof(struct mlx5_qmgmt_data),
.ndo_queue_mem_alloc = mlx5e_queue_mem_alloc,
.ndo_queue_mem_free = mlx5e_queue_mem_free,
.ndo_queue_start = mlx5e_queue_start,
.ndo_queue_stop = mlx5e_queue_stop,
+ .ndo_queue_get_dma_dev = mlx5e_queue_get_dma_dev,
};
static void mlx5e_build_nic_netdev(struct net_device *netdev)
@@ -5858,6 +5903,10 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
}
priv->fs = fs;
+ err = mlx5e_psp_init(priv);
+ if (err)
+ mlx5_core_err(mdev, "PSP initialization failed, %d\n", err);
+
err = mlx5e_ktls_init(priv);
if (err)
mlx5_core_err(mdev, "TLS initialization failed, %d\n", err);
@@ -5870,6 +5919,7 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
if (take_rtnl)
rtnl_lock();
+ mlx5e_psp_register(priv);
/* update XDP supported features */
mlx5e_set_xdp_feature(netdev);
@@ -5882,7 +5932,9 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
{
mlx5e_health_destroy_reporters(priv);
+ mlx5e_psp_unregister(priv);
mlx5e_ktls_cleanup(priv);
+ mlx5e_psp_cleanup(priv);
mlx5e_fs_cleanup(priv->fs);
debugfs_remove_recursive(priv->dfs_root);
priv->fs = NULL;
@@ -6212,8 +6264,15 @@ int mlx5e_priv_init(struct mlx5e_priv *priv,
if (!priv->channel_stats)
goto err_free_tx_rates;
+ priv->fec_ranges = kcalloc(ETHTOOL_FEC_HIST_MAX,
+ sizeof(*priv->fec_ranges), GFP_KERNEL);
+ if (!priv->fec_ranges)
+ goto err_free_channel_stats;
+
return 0;
+err_free_channel_stats:
+ kfree(priv->channel_stats);
err_free_tx_rates:
kfree(priv->tx_rates);
err_free_txq2sq_stats:
@@ -6237,6 +6296,7 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
if (!priv->mdev)
return;
+ kfree(priv->fec_ranges);
for (i = 0; i < priv->stats_nch; i++)
kvfree(priv->channel_stats[i]);
kfree(priv->channel_stats);
@@ -6730,6 +6790,7 @@ static void _mlx5e_remove(struct auxiliary_device *adev)
* is already unregistered before changing to NIC profile.
*/
if (priv->netdev->reg_state == NETREG_REGISTERED) {
+ mlx5e_psp_unregister(priv);
unregister_netdev(priv->netdev);
_mlx5e_suspend(adev, false);
} else {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 63a7a788fb0d..0335ca8277ef 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -974,7 +974,7 @@ static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
MLX5_FLOW_NAMESPACE_KERNEL), false);
/* The inner_ttc in the ttc params is intentionally not set */
- mlx5e_set_ttc_params(priv->fs, priv->rx_res, &ttc_params, false);
+ mlx5e_set_ttc_params(priv->fs, priv->rx_res, &ttc_params, false, false);
if (rep->vport != MLX5_VPORT_UPLINK)
/* To give uplik rep TTC a lower level for chaining from root ft */
@@ -1447,11 +1447,11 @@ static void mlx5e_rep_vnic_reporter_create(struct mlx5e_priv *priv,
reporter = devl_port_health_reporter_create(dl_port,
&mlx5_rep_vnic_reporter_ops,
- 0, rpriv);
+ rpriv);
if (IS_ERR(reporter)) {
mlx5_core_err(priv->mdev,
- "Failed to create representor vnic reporter, err = %ld\n",
- PTR_ERR(reporter));
+ "Failed to create representor vnic reporter, err = %pe\n",
+ reporter);
return;
}
@@ -1506,12 +1506,21 @@ static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
static int
mlx5e_vport_uplink_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
{
- struct mlx5e_priv *priv = netdev_priv(mlx5_uplink_netdev_get(dev));
struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
+ struct net_device *netdev;
+ struct mlx5e_priv *priv;
+ int err;
+
+ netdev = mlx5_uplink_netdev_get(dev);
+ if (!netdev)
+ return 0;
+ priv = netdev_priv(netdev);
rpriv->netdev = priv->netdev;
- return mlx5e_netdev_change_profile(priv, &mlx5e_uplink_rep_profile,
- rpriv);
+ err = mlx5e_netdev_change_profile(priv, &mlx5e_uplink_rep_profile,
+ rpriv);
+ mlx5_uplink_netdev_put(dev, netdev);
+ return err;
}
static void
@@ -1638,8 +1647,16 @@ mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
{
struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
struct net_device *netdev = rpriv->netdev;
- struct mlx5e_priv *priv = netdev_priv(netdev);
- void *ppriv = priv->ppriv;
+ struct mlx5e_priv *priv;
+ void *ppriv;
+
+ if (!netdev) {
+ ppriv = rpriv;
+ goto free_ppriv;
+ }
+
+ priv = netdev_priv(netdev);
+ ppriv = priv->ppriv;
if (rep->vport == MLX5_VPORT_UPLINK) {
mlx5e_vport_uplink_rep_unload(rpriv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 218b1a09534c..263d5628ee44 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -51,6 +51,7 @@
#include "ipoib/ipoib.h"
#include "en_accel/ipsec.h"
#include "en_accel/macsec.h"
+#include "en_accel/psp_rxtx.h"
#include "en_accel/ipsec_rxtx.h"
#include "en_accel/ktls_txrx.h"
#include "en/xdp.h"
@@ -1289,8 +1290,12 @@ static void mlx5e_shampo_update_ipv4_tcp_hdr(struct mlx5e_rq *rq, struct iphdr *
tcp->check = ~tcp_v4_check(skb->len - tcp_off, ipv4->saddr,
ipv4->daddr, 0);
skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
- if (ntohs(ipv4->id) == rq->hw_gro_data->second_ip_id)
- skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
+ if (ntohs(ipv4->id) == rq->hw_gro_data->second_ip_id) {
+ bool encap = rq->hw_gro_data->fk.control.flags & FLOW_DIS_ENCAPSULATION;
+
+ skb_shinfo(skb)->gso_type |= encap ? SKB_GSO_TCP_FIXEDID_INNER :
+ SKB_GSO_TCP_FIXEDID;
+ }
skb->csum_start = (unsigned char *)tcp - skb->head;
skb->csum_offset = offsetof(struct tcphdr, check);
@@ -1521,6 +1526,11 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
+ if (unlikely(mlx5e_psp_is_rx_flow(cqe))) {
+ /* TBD: PSP csum complete corrections for now chose csum_unnecessary path */
+ goto csum_unnecessary;
+ }
+
if (test_bit(MLX5E_RQ_STATE_CSUM_FULL, &rq->state))
return; /* CQE csum covers all received bytes */
@@ -1549,7 +1559,7 @@ csum_none:
#define MLX5E_CE_BIT_MASK 0x80
-static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
+static inline bool mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
u32 cqe_bcnt,
struct mlx5e_rq *rq,
struct sk_buff *skb)
@@ -1563,6 +1573,11 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
if (unlikely(get_cqe_tls_offload(cqe)))
mlx5e_ktls_handle_rx_skb(rq, skb, cqe, &cqe_bcnt);
+ if (unlikely(mlx5e_psp_is_rx_flow(cqe))) {
+ if (mlx5e_psp_offload_handle_rx_skb(netdev, skb, cqe))
+ return true;
+ }
+
if (unlikely(mlx5_ipsec_is_rx_flow(cqe)))
mlx5e_ipsec_offload_handle_rx_skb(netdev, skb,
be32_to_cpu(cqe->ft_metadata));
@@ -1574,6 +1589,7 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
unsigned int hdrlen = mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt - hdrlen, lro_num_seg);
+ skb_shinfo(skb)->gso_segs = lro_num_seg;
/* Subtract one since we already counted this as one
* "regular" packet in mlx5e_complete_rx_cqe()
*/
@@ -1607,9 +1623,11 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
if (unlikely(mlx5e_skb_is_multicast(skb)))
stats->mcast_packets++;
+
+ return false;
}
-static void mlx5e_shampo_complete_rx_cqe(struct mlx5e_rq *rq,
+static bool mlx5e_shampo_complete_rx_cqe(struct mlx5e_rq *rq,
struct mlx5_cqe64 *cqe,
u32 cqe_bcnt,
struct sk_buff *skb)
@@ -1619,16 +1637,20 @@ static void mlx5e_shampo_complete_rx_cqe(struct mlx5e_rq *rq,
stats->packets++;
stats->bytes += cqe_bcnt;
if (NAPI_GRO_CB(skb)->count != 1)
- return;
- mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
+ return false;
+
+ if (mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb))
+ return true;
+
skb_reset_network_header(skb);
if (!skb_flow_dissect_flow_keys(skb, &rq->hw_gro_data->fk, 0)) {
napi_gro_receive(rq->cq.napi, skb);
rq->hw_gro_data->skb = NULL;
}
+ return false;
}
-static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
+static inline bool mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
struct mlx5_cqe64 *cqe,
u32 cqe_bcnt,
struct sk_buff *skb)
@@ -1637,7 +1659,7 @@ static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
stats->packets++;
stats->bytes += cqe_bcnt;
- mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
+ return mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
}
static inline
@@ -1795,10 +1817,9 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
if (xdp_buff_has_frags(&mxbuf->xdp)) {
/* sinfo->nr_frags is reset by build_skb, calculate again. */
- xdp_update_skb_shared_info(skb, wi - head_wi - 1,
- sinfo->xdp_frags_size, truesize,
- xdp_buff_is_frag_pfmemalloc(
- &mxbuf->xdp));
+ xdp_update_skb_frags_info(skb, wi - head_wi - 1,
+ sinfo->xdp_frags_size, truesize,
+ xdp_buff_get_skb_flags(&mxbuf->xdp));
for (struct mlx5e_wqe_frag_info *pwi = head_wi + 1; pwi < wi; pwi++)
pwi->frag_page->frags++;
@@ -1854,7 +1875,8 @@ static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
goto wq_cyc_pop;
}
- mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+ if (mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb))
+ goto wq_cyc_pop;
if (mlx5e_cqe_regb_chain(cqe))
if (!mlx5e_tc_update_skb_nic(cqe, skb)) {
@@ -1901,7 +1923,8 @@ static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
goto wq_cyc_pop;
}
- mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+ if (mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb))
+ goto wq_cyc_pop;
if (rep->vlan && skb_vlan_tag_present(skb))
skb_vlan_pop(skb);
@@ -1950,7 +1973,8 @@ static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64
if (!skb)
goto mpwrq_cqe_out;
- mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+ if (mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb))
+ goto mpwrq_cqe_out;
mlx5e_rep_tc_receive(cqe, rq, skb);
@@ -2104,10 +2128,10 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
struct mlx5e_frag_page *pagep;
/* sinfo->nr_frags is reset by build_skb, calculate again. */
- xdp_update_skb_shared_info(skb, frag_page - head_page,
- sinfo->xdp_frags_size, truesize,
- xdp_buff_is_frag_pfmemalloc(
- &mxbuf->xdp));
+ xdp_update_skb_frags_info(skb, frag_page - head_page,
+ sinfo->xdp_frags_size,
+ truesize,
+ xdp_buff_get_skb_flags(&mxbuf->xdp));
pagep = head_page;
do
@@ -2121,10 +2145,10 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
if (xdp_buff_has_frags(&mxbuf->xdp)) {
struct mlx5e_frag_page *pagep;
- xdp_update_skb_shared_info(skb, sinfo->nr_frags,
- sinfo->xdp_frags_size, truesize,
- xdp_buff_is_frag_pfmemalloc(
- &mxbuf->xdp));
+ xdp_update_skb_frags_info(skb, sinfo->nr_frags,
+ sinfo->xdp_frags_size,
+ truesize,
+ xdp_buff_get_skb_flags(&mxbuf->xdp));
pagep = frag_page - sinfo->nr_frags;
do
@@ -2387,7 +2411,10 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
stats->hds_nosplit_bytes += data_bcnt;
}
- mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb);
+ if (mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb)) {
+ *skb = NULL;
+ goto free_hd_entry;
+ }
if (flush && rq->hw_gro_data->skb)
mlx5e_shampo_flush_skb(rq, cqe, match);
free_hd_entry:
@@ -2445,7 +2472,8 @@ static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cq
if (!skb)
goto mpwrq_cqe_out;
- mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+ if (mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb))
+ goto mpwrq_cqe_out;
if (mlx5e_cqe_regb_chain(cqe))
if (!mlx5e_tc_update_skb_nic(cqe, skb)) {
@@ -2778,7 +2806,8 @@ static void mlx5e_trap_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe
if (!skb)
goto wq_cyc_pop;
- mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+ if (mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb))
+ goto wq_cyc_pop;
skb_push(skb, ETH_HLEN);
mlx5_devlink_trap_report(rq->mdev, trap_id, skb,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 87536f158d07..7c029a7d0fd7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -1446,16 +1446,13 @@ static void fec_set_rs_stats(struct ethtool_fec_stats *fec_stats, u32 *ppcnt)
}
static void fec_set_block_stats(struct mlx5e_priv *priv,
+ int mode,
struct ethtool_fec_stats *fec_stats)
{
struct mlx5_core_dev *mdev = priv->mdev;
u32 out[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
- int mode = fec_active_mode(mdev);
-
- if (mode == MLX5E_FEC_NOFEC)
- return;
MLX5_SET(ppcnt_reg, in, local_port, 1);
MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
@@ -1466,6 +1463,7 @@ static void fec_set_block_stats(struct mlx5e_priv *priv,
case MLX5E_FEC_RS_528_514:
case MLX5E_FEC_RS_544_514:
case MLX5E_FEC_LLRS_272_257_1:
+ case MLX5E_FEC_RS_544_514_INTERLEAVED_QUAD:
fec_set_rs_stats(fec_stats, out);
return;
case MLX5E_FEC_FIRECODE:
@@ -1493,14 +1491,130 @@ static void fec_set_corrected_bits_total(struct mlx5e_priv *priv,
phy_corrected_bits);
}
+#define MLX5_RS_HISTOGRAM_ENTRIES \
+ (MLX5_FLD_SZ_BYTES(rs_histogram_cntrs, hist) / \
+ MLX5_FLD_SZ_BYTES(rs_histogram_cntrs, hist[0]))
+
+enum {
+ MLX5E_HISTOGRAM_FEC_RS_544_514 = 1,
+ MLX5E_HISTOGRAM_FEC_LLRS = 2,
+ MLX5E_HISTOGRAM_FEC_RS_528_514 = 3,
+};
+
+static bool fec_rs_validate_hist_type(int mode, int hist_type)
+{
+ switch (mode) {
+ case MLX5E_FEC_RS_528_514:
+ return hist_type == MLX5E_HISTOGRAM_FEC_RS_528_514;
+ case MLX5E_FEC_RS_544_514_INTERLEAVED_QUAD:
+ case MLX5E_FEC_RS_544_514:
+ return hist_type == MLX5E_HISTOGRAM_FEC_RS_544_514;
+ case MLX5E_FEC_LLRS_272_257_1:
+ return hist_type == MLX5E_HISTOGRAM_FEC_LLRS;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+static u8
+fec_rs_histogram_fill_ranges(struct mlx5e_priv *priv, int mode,
+ const struct ethtool_fec_hist_range **ranges)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 out[MLX5_ST_SZ_DW(pphcr_reg)] = {0};
+ u32 in[MLX5_ST_SZ_DW(pphcr_reg)] = {0};
+ int sz = MLX5_ST_SZ_BYTES(pphcr_reg);
+ u8 hist_type, num_of_bins;
+
+ memset(priv->fec_ranges, 0,
+ ETHTOOL_FEC_HIST_MAX * sizeof(*priv->fec_ranges));
+ MLX5_SET(pphcr_reg, in, local_port, 1);
+ if (mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPHCR, 0, 0))
+ return 0;
+
+ hist_type = MLX5_GET(pphcr_reg, out, active_hist_type);
+ if (!fec_rs_validate_hist_type(mode, hist_type))
+ return 0;
+
+ num_of_bins = MLX5_GET(pphcr_reg, out, num_of_bins);
+ if (WARN_ON_ONCE(num_of_bins > MLX5_RS_HISTOGRAM_ENTRIES))
+ return 0;
+
+ for (int i = 0; i < num_of_bins; i++) {
+ void *bin_range = MLX5_ADDR_OF(pphcr_reg, out, bin_range[i]);
+
+ priv->fec_ranges[i].high = MLX5_GET(bin_range_layout, bin_range,
+ high_val);
+ priv->fec_ranges[i].low = MLX5_GET(bin_range_layout, bin_range,
+ low_val);
+ }
+ *ranges = priv->fec_ranges;
+
+ return num_of_bins;
+}
+
+static void fec_rs_histogram_fill_stats(struct mlx5e_priv *priv,
+ u8 num_of_bins,
+ struct ethtool_fec_hist *hist)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 out[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
+ u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
+ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+ void *rs_histogram_cntrs;
+
+ MLX5_SET(ppcnt_reg, in, local_port, 1);
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_RS_FEC_HISTOGRAM_GROUP);
+ if (mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0))
+ return;
+
+ rs_histogram_cntrs = MLX5_ADDR_OF(ppcnt_reg, out,
+ counter_set.rs_histogram_cntrs);
+ /* Guaranteed that num_of_bins is less than MLX5E_FEC_RS_HIST_MAX
+ * by fec_rs_histogram_fill_ranges().
+ */
+ for (int i = 0; i < num_of_bins; i++)
+ hist->values[i].sum = MLX5_GET64(rs_histogram_cntrs,
+ rs_histogram_cntrs,
+ hist[i]);
+}
+
+static void fec_set_histograms_stats(struct mlx5e_priv *priv, int mode,
+ struct ethtool_fec_hist *hist)
+{
+ u8 num_of_bins;
+
+ switch (mode) {
+ case MLX5E_FEC_RS_528_514:
+ case MLX5E_FEC_RS_544_514:
+ case MLX5E_FEC_LLRS_272_257_1:
+ case MLX5E_FEC_RS_544_514_INTERLEAVED_QUAD:
+ num_of_bins =
+ fec_rs_histogram_fill_ranges(priv, mode, &hist->ranges);
+ if (num_of_bins)
+ return fec_rs_histogram_fill_stats(priv, num_of_bins,
+ hist);
+ break;
+ default:
+ return;
+ }
+}
+
void mlx5e_stats_fec_get(struct mlx5e_priv *priv,
- struct ethtool_fec_stats *fec_stats)
+ struct ethtool_fec_stats *fec_stats,
+ struct ethtool_fec_hist *hist)
{
- if (!MLX5_CAP_PCAM_FEATURE(priv->mdev, ppcnt_statistical_group))
+ int mode = fec_active_mode(priv->mdev);
+
+ if (mode == MLX5E_FEC_NOFEC ||
+ !MLX5_CAP_PCAM_FEATURE(priv->mdev, ppcnt_statistical_group))
return;
fec_set_corrected_bits_total(priv, fec_stats);
- fec_set_block_stats(priv, fec_stats);
+ fec_set_block_stats(priv, mode, fec_stats);
+ fec_set_histograms_stats(priv, mode, hist);
}
#define PPORT_ETH_EXT_OFF(c) \
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 72dbcc1928ef..09f155acb461 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -117,7 +117,8 @@ void mlx5e_stats_update_ndo_stats(struct mlx5e_priv *priv);
void mlx5e_stats_pause_get(struct mlx5e_priv *priv,
struct ethtool_pause_stats *pause_stats);
void mlx5e_stats_fec_get(struct mlx5e_priv *priv,
- struct ethtool_fec_stats *fec_stats);
+ struct ethtool_fec_stats *fec_stats,
+ struct ethtool_fec_hist *hist);
void mlx5e_stats_eth_phy_get(struct mlx5e_priv *priv,
struct ethtool_eth_phy_stats *phy_stats);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 32c07a8b03d1..00c2763e57ca 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -66,6 +66,7 @@
#include "lib/devcom.h"
#include "lib/geneve.h"
#include "lib/fs_chains.h"
+#include "lib/mlx5.h"
#include "diag/en_tc_tracepoint.h"
#include <asm/div64.h>
#include "lag/lag.h"
@@ -757,11 +758,11 @@ static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
struct mlx5e_priv *priv = hp->func_priv;
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_rss_params_indir indir;
+ u32 rqt_size;
int err;
- err = mlx5e_rss_params_indir_init(&indir, mdev,
- mlx5e_rqt_size(mdev, hp->num_channels),
- mlx5e_rqt_size(mdev, hp->num_channels));
+ rqt_size = mlx5e_rqt_size(mdev, hp->num_channels);
+ err = mlx5e_rss_params_indir_init(&indir, rqt_size, rqt_size);
if (err)
return err;
@@ -837,6 +838,9 @@ static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp,
ttc_params->ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
for (tt = 0; tt < MLX5_NUM_TT; tt++) {
+ if (mlx5_ttc_is_decrypted_esp_tt(tt))
+ continue;
+
ttc_params->dests[tt].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
ttc_params->dests[tt].tir_num =
tt == MLX5_TT_ANY ?
@@ -5387,12 +5391,13 @@ void mlx5e_tc_ht_cleanup(struct rhashtable *tc_ht)
int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv)
{
const size_t sz_enc_opts = sizeof(struct tunnel_match_enc_opts);
+ struct mlx5_devcom_match_attr attr = {};
struct netdev_phys_item_id ppid;
struct mlx5e_rep_priv *rpriv;
struct mapping_ctx *mapping;
struct mlx5_eswitch *esw;
struct mlx5e_priv *priv;
- u64 mapping_id, key;
+ u64 mapping_id;
int err = 0;
rpriv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv);
@@ -5448,8 +5453,10 @@ int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv)
err = netif_get_port_parent_id(priv->netdev, &ppid, false);
if (!err) {
- memcpy(&key, &ppid.id, sizeof(key));
- mlx5_esw_offloads_devcom_init(esw, key);
+ memcpy(&attr.key.val, &ppid.id, sizeof(attr.key.val));
+ attr.flags = MLX5_DEVCOM_MATCH_FLAGS_NS;
+ attr.net = mlx5_core_net(esw->dev);
+ mlx5_esw_offloads_devcom_init(esw, &attr);
}
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 319061d31602..b7227afcb51d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -39,6 +39,7 @@
#include "ipoib/ipoib.h"
#include "en_accel/en_accel.h"
#include "en_accel/ipsec_rxtx.h"
+#include "en_accel/psp_rxtx.h"
#include "en_accel/macsec.h"
#include "en/ptp.h"
#include <net/ipv6.h>
@@ -120,6 +121,11 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_accel_tx_state *accel,
struct mlx5_wqe_eth_seg *eseg)
{
+#ifdef CONFIG_MLX5_EN_PSP
+ if (unlikely(mlx5e_psp_txwqe_build_eseg_csum(sq, skb, &accel->psp_st, eseg)))
+ return;
+#endif
+
if (unlikely(mlx5e_ipsec_txwqe_build_eseg_csum(sq, skb, eseg)))
return;
@@ -297,7 +303,7 @@ static void mlx5e_sq_xmit_prepare(struct mlx5e_txqsq *sq, struct sk_buff *skb,
stats->packets++;
}
- attr->insz = mlx5e_accel_tx_ids_len(sq, accel);
+ attr->insz = mlx5e_accel_tx_ids_len(sq, skb, accel);
stats->bytes += attr->num_bytes;
}
@@ -653,7 +659,7 @@ static void mlx5e_cqe_ts_id_eseg(struct mlx5e_ptpsq *ptpsq, struct sk_buff *skb,
struct mlx5_wqe_eth_seg *eseg)
{
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
- eseg->flow_table_metadata =
+ eseg->flow_table_metadata |=
cpu_to_be32(mlx5e_ptp_metadata_fifo_peek(&ptpsq->metadata_freelist));
}
@@ -661,7 +667,7 @@ static void mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *
struct sk_buff *skb, struct mlx5e_accel_tx_state *accel,
struct mlx5_wqe_eth_seg *eseg, u16 ihs)
{
- mlx5e_accel_tx_eseg(priv, skb, eseg, ihs);
+ mlx5e_accel_tx_eseg(priv, skb, accel, eseg, ihs);
mlx5e_txwqe_build_eseg_csum(sq, skb, accel, eseg);
if (unlikely(sq->ptpsq))
mlx5e_cqe_ts_id_eseg(sq->ptpsq, skb, eseg);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 1ab77159409d..25499da177bc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -32,9 +32,7 @@ enum {
MLX5_EQ_STATE_ALWAYS_ARMED = 0xb,
};
-enum {
- MLX5_EQ_DOORBEL_OFFSET = 0x40,
-};
+#define MLX5_EQ_DOORBELL_OFFSET 0x40
/* budget must be smaller than MLX5_NUM_SPARE_EQE to guarantee that we update
* the ci before we polled all the entries in the EQ. MLX5_NUM_SPARE_EQE is
@@ -309,7 +307,7 @@ create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry);
MLX5_SET(eqc, eqc, log_eq_size, eq->fbc.log_sz);
- MLX5_SET(eqc, eqc, uar_page, priv->uar->index);
+ MLX5_SET(eqc, eqc, uar_page, priv->bfreg.up->index);
MLX5_SET(eqc, eqc, intr, vecidx);
MLX5_SET(eqc, eqc, log_page_size,
eq->frag_buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
@@ -322,7 +320,7 @@ create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
eq->eqn = MLX5_GET(create_eq_out, out, eq_number);
eq->irqn = pci_irq_vector(dev->pdev, vecidx);
eq->dev = dev;
- eq->doorbell = priv->uar->map + MLX5_EQ_DOORBEL_OFFSET;
+ eq->doorbell = priv->bfreg.up->map + MLX5_EQ_DOORBELL_OFFSET;
err = mlx5_debug_eq_add(dev, eq);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
index 7dd1dc3f77c7..c9a1654d83a2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
@@ -87,8 +87,8 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw,
drop_counter = mlx5_fc_create(esw->dev, false);
if (IS_ERR(drop_counter)) {
esw_warn(esw->dev,
- "vport[%d] configure egress drop rule counter err(%ld)\n",
- vport->vport, PTR_ERR(drop_counter));
+ "vport[%d] configure egress drop rule counter err(%pe)\n",
+ vport->vport, drop_counter);
drop_counter = NULL;
}
vport->egress.legacy.drop_counter = drop_counter;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/adj_vport.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/adj_vport.c
new file mode 100644
index 000000000000..0091ba697bae
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/adj_vport.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include "fs_core.h"
+#include "eswitch.h"
+
+enum {
+ MLX5_ADJ_VPORT_DISCONNECT = 0x0,
+ MLX5_ADJ_VPORT_CONNECT = 0x1,
+};
+
+static int mlx5_esw_adj_vport_modify(struct mlx5_core_dev *dev,
+ u16 vport, bool connect)
+{
+ u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)] = {};
+
+ MLX5_SET(modify_vport_state_in, in, opcode,
+ MLX5_CMD_OP_MODIFY_VPORT_STATE);
+ MLX5_SET(modify_vport_state_in, in, op_mod,
+ MLX5_VPORT_STATE_OP_MOD_ESW_VPORT);
+ MLX5_SET(modify_vport_state_in, in, other_vport, 1);
+ MLX5_SET(modify_vport_state_in, in, vport_number, vport);
+ MLX5_SET(modify_vport_state_in, in, ingress_connect_valid, 1);
+ MLX5_SET(modify_vport_state_in, in, egress_connect_valid, 1);
+ MLX5_SET(modify_vport_state_in, in, ingress_connect, connect);
+ MLX5_SET(modify_vport_state_in, in, egress_connect, connect);
+
+ return mlx5_cmd_exec_in(dev, modify_vport_state, in);
+}
+
+static void mlx5_esw_destroy_esw_vport(struct mlx5_core_dev *dev, u16 vport)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_esw_vport_in)] = {};
+
+ MLX5_SET(destroy_esw_vport_in, in, opcode,
+ MLX5_CMD_OPCODE_DESTROY_ESW_VPORT);
+ MLX5_SET(destroy_esw_vport_in, in, vport_num, vport);
+
+ mlx5_cmd_exec_in(dev, destroy_esw_vport, in);
+}
+
+static int mlx5_esw_create_esw_vport(struct mlx5_core_dev *dev, u16 vhca_id,
+ u16 *vport_num)
+{
+ u32 out[MLX5_ST_SZ_DW(create_esw_vport_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(create_esw_vport_in)] = {};
+ int err;
+
+ MLX5_SET(create_esw_vport_in, in, opcode,
+ MLX5_CMD_OPCODE_CREATE_ESW_VPORT);
+ MLX5_SET(create_esw_vport_in, in, managed_vhca_id, vhca_id);
+
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (!err)
+ *vport_num = MLX5_GET(create_esw_vport_out, out, vport_num);
+
+ return err;
+}
+
+static int mlx5_esw_adj_vport_create(struct mlx5_eswitch *esw, u16 vhca_id,
+ const void *rid_info_reg)
+{
+ struct mlx5_vport *vport;
+ u16 vport_num;
+ int err;
+
+ err = mlx5_esw_create_esw_vport(esw->dev, vhca_id, &vport_num);
+ if (err) {
+ esw_warn(esw->dev,
+ "Failed to create adjacent vport for vhca_id %d, err %d\n",
+ vhca_id, err);
+ return err;
+ }
+
+ esw_debug(esw->dev, "Created adjacent vport[%d] %d for vhca_id 0x%x\n",
+ esw->last_vport_idx, vport_num, vhca_id);
+
+ err = mlx5_esw_vport_alloc(esw, esw->last_vport_idx++, vport_num);
+ if (err)
+ goto destroy_esw_vport;
+
+ xa_set_mark(&esw->vports, vport_num, MLX5_ESW_VPT_VF);
+ vport = mlx5_eswitch_get_vport(esw, vport_num);
+ vport->adjacent = true;
+ vport->vhca_id = vhca_id;
+
+ vport->adj_info.parent_pci_devfn =
+ MLX5_GET(function_vhca_rid_info_reg, rid_info_reg,
+ parent_pci_device_function);
+ vport->adj_info.function_id =
+ MLX5_GET(function_vhca_rid_info_reg, rid_info_reg, function_id);
+
+ mlx5_fs_vport_egress_acl_ns_add(esw->dev->priv.steering, vport->index);
+ mlx5_fs_vport_ingress_acl_ns_add(esw->dev->priv.steering, vport->index);
+ err = mlx5_esw_offloads_rep_add(esw, vport);
+ if (err)
+ goto acl_ns_remove;
+
+ mlx5_esw_adj_vport_modify(esw->dev, vport_num, MLX5_ADJ_VPORT_CONNECT);
+ return 0;
+
+acl_ns_remove:
+ mlx5_fs_vport_ingress_acl_ns_remove(esw->dev->priv.steering,
+ vport->index);
+ mlx5_fs_vport_egress_acl_ns_remove(esw->dev->priv.steering,
+ vport->index);
+ mlx5_esw_vport_free(esw, vport);
+destroy_esw_vport:
+ mlx5_esw_destroy_esw_vport(esw->dev, vport_num);
+ return err;
+}
+
+static void mlx5_esw_adj_vport_destroy(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ u16 vport_num = vport->vport;
+
+ esw_debug(esw->dev, "Destroying adjacent vport %d for vhca_id 0x%x\n",
+ vport_num, vport->vhca_id);
+ mlx5_esw_adj_vport_modify(esw->dev, vport_num,
+ MLX5_ADJ_VPORT_DISCONNECT);
+ mlx5_esw_offloads_rep_remove(esw, vport);
+ mlx5_fs_vport_egress_acl_ns_remove(esw->dev->priv.steering,
+ vport->index);
+ mlx5_fs_vport_ingress_acl_ns_remove(esw->dev->priv.steering,
+ vport->index);
+ mlx5_esw_vport_free(esw, vport);
+ /* Reset the vport index back so new adj vports can use this index.
+ * When vport count can incrementally change, this needs to be modified.
+ */
+ esw->last_vport_idx--;
+ mlx5_esw_destroy_esw_vport(esw->dev, vport_num);
+}
+
+void mlx5_esw_adjacent_vhcas_cleanup(struct mlx5_eswitch *esw)
+{
+ struct mlx5_vport *vport;
+ unsigned long i;
+
+ if (!MLX5_CAP_GEN_2(esw->dev, delegated_vhca_max))
+ return;
+
+ mlx5_esw_for_each_vf_vport(esw, i, vport, U16_MAX) {
+ if (!vport->adjacent)
+ continue;
+ mlx5_esw_adj_vport_destroy(esw, vport);
+ }
+}
+
+void mlx5_esw_adjacent_vhcas_setup(struct mlx5_eswitch *esw)
+{
+ u32 delegated_vhca_max = MLX5_CAP_GEN_2(esw->dev, delegated_vhca_max);
+ u32 in[MLX5_ST_SZ_DW(query_delegated_vhca_in)] = {};
+ int outlen, err, i = 0;
+ u8 *out;
+ u32 count;
+
+ if (!delegated_vhca_max)
+ return;
+
+ outlen = MLX5_ST_SZ_BYTES(query_delegated_vhca_out) +
+ delegated_vhca_max *
+ MLX5_ST_SZ_BYTES(delegated_function_vhca_rid_info);
+
+ esw_debug(esw->dev, "delegated_vhca_max=%d\n", delegated_vhca_max);
+
+ out = kvzalloc(outlen, GFP_KERNEL);
+ if (!out)
+ return;
+
+ MLX5_SET(query_delegated_vhca_in, in, opcode,
+ MLX5_CMD_OPCODE_QUERY_DELEGATED_VHCA);
+
+ err = mlx5_cmd_exec(esw->dev, in, sizeof(in), out, outlen);
+ if (err) {
+ kvfree(out);
+ esw_warn(esw->dev, "Failed to query delegated vhca, err %d\n",
+ err);
+ return;
+ }
+
+ count = MLX5_GET(query_delegated_vhca_out, out, functions_count);
+ esw_debug(esw->dev, "Delegated vhca functions count %d\n", count);
+
+ for (i = 0; i < count; i++) {
+ const void *rid_info, *rid_info_reg;
+ u16 vhca_id;
+
+ rid_info = MLX5_ADDR_OF(query_delegated_vhca_out, out,
+ delegated_function_vhca_rid_info[i]);
+
+ rid_info_reg = MLX5_ADDR_OF(delegated_function_vhca_rid_info,
+ rid_info, function_vhca_rid_info);
+
+ vhca_id = MLX5_GET(function_vhca_rid_info_reg, rid_info_reg,
+ vhca_id);
+ esw_debug(esw->dev, "Delegating vhca_id 0x%x\n", vhca_id);
+
+ err = mlx5_esw_adj_vport_create(esw, vhca_id, rid_info_reg);
+ if (err) {
+ esw_warn(esw->dev,
+ "Failed to init adjacent vhca 0x%x, err %d\n",
+ vhca_id, err);
+ break;
+ }
+ }
+
+ kvfree(out);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
index 76e35c827da0..60e10047770f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
@@ -81,7 +81,8 @@ mlx5_esw_bridge_table_create(int max_fte, u32 level, struct mlx5_eswitch *esw)
ft_attr.prio = FDB_BR_OFFLOAD;
fdb = mlx5_create_flow_table(ns, &ft_attr);
if (IS_ERR(fdb))
- esw_warn(dev, "Failed to create bridge FDB Table (err=%ld)\n", PTR_ERR(fdb));
+ esw_warn(dev, "Failed to create bridge FDB Table (err=%pe)\n",
+ fdb);
return fdb;
}
@@ -121,8 +122,8 @@ mlx5_esw_bridge_ingress_vlan_proto_fg_create(unsigned int from, unsigned int to,
kvfree(in);
if (IS_ERR(fg))
esw_warn(esw->dev,
- "Failed to create VLAN(proto=%x) flow group for bridge ingress table (err=%ld)\n",
- vlan_proto, PTR_ERR(fg));
+ "Failed to create VLAN(proto=%x) flow group for bridge ingress table (err=%pe)\n",
+ vlan_proto, fg);
return fg;
}
@@ -180,8 +181,8 @@ mlx5_esw_bridge_ingress_vlan_proto_filter_fg_create(unsigned int from, unsigned
fg = mlx5_create_flow_group(ingress_ft, in);
if (IS_ERR(fg))
esw_warn(esw->dev,
- "Failed to create bridge ingress table VLAN filter flow group (err=%ld)\n",
- PTR_ERR(fg));
+ "Failed to create bridge ingress table VLAN filter flow group (err=%pe)\n",
+ fg);
kvfree(in);
return fg;
}
@@ -237,8 +238,8 @@ mlx5_esw_bridge_ingress_mac_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow
fg = mlx5_create_flow_group(ingress_ft, in);
if (IS_ERR(fg))
esw_warn(esw->dev,
- "Failed to create MAC flow group for bridge ingress table (err=%ld)\n",
- PTR_ERR(fg));
+ "Failed to create MAC flow group for bridge ingress table (err=%pe)\n",
+ fg);
kvfree(in);
return fg;
@@ -274,8 +275,8 @@ mlx5_esw_bridge_egress_vlan_proto_fg_create(unsigned int from, unsigned int to,
fg = mlx5_create_flow_group(egress_ft, in);
if (IS_ERR(fg))
esw_warn(esw->dev,
- "Failed to create VLAN flow group for bridge egress table (err=%ld)\n",
- PTR_ERR(fg));
+ "Failed to create VLAN flow group for bridge egress table (err=%pe)\n",
+ fg);
kvfree(in);
return fg;
}
@@ -324,8 +325,8 @@ mlx5_esw_bridge_egress_mac_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_
fg = mlx5_create_flow_group(egress_ft, in);
if (IS_ERR(fg))
esw_warn(esw->dev,
- "Failed to create bridge egress table MAC flow group (err=%ld)\n",
- PTR_ERR(fg));
+ "Failed to create bridge egress table MAC flow group (err=%pe)\n",
+ fg);
kvfree(in);
return fg;
}
@@ -354,8 +355,8 @@ mlx5_esw_bridge_egress_miss_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow
fg = mlx5_create_flow_group(egress_ft, in);
if (IS_ERR(fg))
esw_warn(esw->dev,
- "Failed to create bridge egress table miss flow group (err=%ld)\n",
- PTR_ERR(fg));
+ "Failed to create bridge egress table miss flow group (err=%pe)\n",
+ fg);
kvfree(in);
return fg;
}
@@ -501,8 +502,8 @@ mlx5_esw_bridge_egress_table_init(struct mlx5_esw_bridge_offloads *br_offloads,
if (mlx5_esw_bridge_pkt_reformat_vlan_pop_supported(esw)) {
miss_fg = mlx5_esw_bridge_egress_miss_fg_create(esw, egress_ft);
if (IS_ERR(miss_fg)) {
- esw_warn(esw->dev, "Failed to create miss flow group (err=%ld)\n",
- PTR_ERR(miss_fg));
+ esw_warn(esw->dev, "Failed to create miss flow group (err=%pe)\n",
+ miss_fg);
miss_fg = NULL;
goto skip_miss_flow;
}
@@ -510,8 +511,8 @@ mlx5_esw_bridge_egress_table_init(struct mlx5_esw_bridge_offloads *br_offloads,
miss_pkt_reformat = mlx5_esw_bridge_pkt_reformat_vlan_pop_create(esw);
if (IS_ERR(miss_pkt_reformat)) {
esw_warn(esw->dev,
- "Failed to alloc packet reformat REMOVE_HEADER (err=%ld)\n",
- PTR_ERR(miss_pkt_reformat));
+ "Failed to alloc packet reformat REMOVE_HEADER (err=%pe)\n",
+ miss_pkt_reformat);
miss_pkt_reformat = NULL;
mlx5_destroy_flow_group(miss_fg);
miss_fg = NULL;
@@ -522,8 +523,8 @@ mlx5_esw_bridge_egress_table_init(struct mlx5_esw_bridge_offloads *br_offloads,
br_offloads->skip_ft,
miss_pkt_reformat);
if (IS_ERR(miss_handle)) {
- esw_warn(esw->dev, "Failed to create miss flow (err=%ld)\n",
- PTR_ERR(miss_handle));
+ esw_warn(esw->dev, "Failed to create miss flow (err=%pe)\n",
+ miss_handle);
miss_handle = NULL;
mlx5_packet_reformat_dealloc(esw->dev, miss_pkt_reformat);
miss_pkt_reformat = NULL;
@@ -1048,8 +1049,8 @@ mlx5_esw_bridge_vlan_push_create(u16 vlan_proto, struct mlx5_esw_bridge_vlan *vl
&reformat_params,
MLX5_FLOW_NAMESPACE_FDB);
if (IS_ERR(pkt_reformat)) {
- esw_warn(esw->dev, "Failed to alloc packet reformat INSERT_HEADER (err=%ld)\n",
- PTR_ERR(pkt_reformat));
+ esw_warn(esw->dev, "Failed to alloc packet reformat INSERT_HEADER (err=%pe)\n",
+ pkt_reformat);
return PTR_ERR(pkt_reformat);
}
@@ -1076,8 +1077,8 @@ mlx5_esw_bridge_vlan_pop_create(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_e
pkt_reformat = mlx5_esw_bridge_pkt_reformat_vlan_pop_create(esw);
if (IS_ERR(pkt_reformat)) {
- esw_warn(esw->dev, "Failed to alloc packet reformat REMOVE_HEADER (err=%ld)\n",
- PTR_ERR(pkt_reformat));
+ esw_warn(esw->dev, "Failed to alloc packet reformat REMOVE_HEADER (err=%pe)\n",
+ pkt_reformat);
return PTR_ERR(pkt_reformat);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
index b7102e14d23d..cf88a106d80d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
@@ -27,6 +27,7 @@ static void mlx5_esw_offloads_pf_vf_devlink_port_attrs_set(struct mlx5_eswitch *
{
struct mlx5_core_dev *dev = esw->dev;
struct netdev_phys_item_id ppid = {};
+ struct mlx5_vport *vport;
u32 controller_num = 0;
bool external;
u16 pfnum;
@@ -42,15 +43,25 @@ static void mlx5_esw_offloads_pf_vf_devlink_port_attrs_set(struct mlx5_eswitch *
dl_port->attrs.switch_id.id_len = ppid.id_len;
devlink_port_attrs_pci_pf_set(dl_port, controller_num, pfnum, external);
} else if (mlx5_eswitch_is_vf_vport(esw, vport_num)) {
+ u16 func_id = vport_num - 1;
+
+ vport = mlx5_eswitch_get_vport(esw, vport_num);
memcpy(dl_port->attrs.switch_id.id, ppid.id, ppid.id_len);
dl_port->attrs.switch_id.id_len = ppid.id_len;
+ if (vport->adjacent) {
+ func_id = vport->adj_info.function_id;
+ pfnum = vport->adj_info.parent_pci_devfn;
+ }
+
devlink_port_attrs_pci_vf_set(dl_port, controller_num, pfnum,
- vport_num - 1, external);
+ func_id, external);
} else if (mlx5_core_is_ec_vf_vport(esw->dev, vport_num)) {
+ u16 base_vport = mlx5_core_ec_vf_vport_base(dev);
+
memcpy(dl_port->attrs.switch_id.id, ppid.id, ppid.id_len);
dl_port->attrs.switch_id.id_len = ppid.id_len;
devlink_port_attrs_pci_vf_set(dl_port, 0, pfnum,
- vport_num - 1, false);
+ vport_num - base_vport, false);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
index 91d863c8c152..56e6f54b1e2e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
@@ -102,6 +102,8 @@ struct mlx5_esw_sched_node {
u8 level;
/* Valid only when this node represents a traffic class. */
u8 tc;
+ /* Valid only for a TC arbiter node or vport TC arbiter. */
+ u32 tc_bw[DEVLINK_RATE_TCS_MAX];
};
static void esw_qos_node_attach_to_parent(struct mlx5_esw_sched_node *node)
@@ -462,6 +464,7 @@ static int
esw_qos_vport_create_sched_element(struct mlx5_esw_sched_node *vport_node,
struct netlink_ext_ack *extack)
{
+ struct mlx5_esw_sched_node *parent = vport_node->parent;
u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
struct mlx5_core_dev *dev = vport_node->esw->dev;
void *attr;
@@ -477,7 +480,7 @@ esw_qos_vport_create_sched_element(struct mlx5_esw_sched_node *vport_node,
attr = MLX5_ADDR_OF(scheduling_context, sched_ctx, element_attributes);
MLX5_SET(vport_element, attr, vport_number, vport_node->vport->vport);
MLX5_SET(scheduling_context, sched_ctx, parent_element_id,
- vport_node->parent->ix);
+ parent ? parent->ix : vport_node->esw->qos.root_tsar_ix);
MLX5_SET(scheduling_context, sched_ctx, max_average_bw,
vport_node->max_rate);
@@ -608,10 +611,7 @@ static void
esw_qos_tc_arbiter_get_bw_shares(struct mlx5_esw_sched_node *tc_arbiter_node,
u32 *tc_bw)
{
- struct mlx5_esw_sched_node *vports_tc_node;
-
- list_for_each_entry(vports_tc_node, &tc_arbiter_node->children, entry)
- tc_bw[vports_tc_node->tc] = vports_tc_node->bw_share;
+ memcpy(tc_bw, tc_arbiter_node->tc_bw, sizeof(tc_arbiter_node->tc_bw));
}
static void
@@ -628,6 +628,7 @@ esw_qos_set_tc_arbiter_bw_shares(struct mlx5_esw_sched_node *tc_arbiter_node,
u8 tc = vports_tc_node->tc;
u32 bw_share;
+ tc_arbiter_node->tc_bw[tc] = tc_bw[tc];
bw_share = tc_bw[tc] * fw_max_bw_share;
bw_share = esw_qos_calc_bw_share(bw_share, divider,
fw_max_bw_share);
@@ -786,48 +787,15 @@ static int esw_qos_create(struct mlx5_eswitch *esw, struct netlink_ext_ack *exta
return err;
}
- if (MLX5_CAP_QOS(dev, log_esw_max_sched_depth)) {
- esw->qos.node0 = __esw_qos_create_vports_sched_node(esw, NULL, extack);
- } else {
- /* The eswitch doesn't support scheduling nodes.
- * Create a software-only node0 using the root TSAR to attach vport QoS to.
- */
- if (!__esw_qos_alloc_node(esw,
- esw->qos.root_tsar_ix,
- SCHED_NODE_TYPE_VPORTS_TSAR,
- NULL))
- esw->qos.node0 = ERR_PTR(-ENOMEM);
- else
- list_add_tail(&esw->qos.node0->entry,
- &esw->qos.domain->nodes);
- }
- if (IS_ERR(esw->qos.node0)) {
- err = PTR_ERR(esw->qos.node0);
- esw_warn(dev, "E-Switch create rate node 0 failed (%d)\n", err);
- goto err_node0;
- }
refcount_set(&esw->qos.refcnt, 1);
return 0;
-
-err_node0:
- if (mlx5_destroy_scheduling_element_cmd(esw->dev, SCHEDULING_HIERARCHY_E_SWITCH,
- esw->qos.root_tsar_ix))
- esw_warn(esw->dev, "E-Switch destroy root TSAR failed.\n");
-
- return err;
}
static void esw_qos_destroy(struct mlx5_eswitch *esw)
{
int err;
- if (esw->qos.node0->ix != esw->qos.root_tsar_ix)
- __esw_qos_destroy_node(esw->qos.node0, NULL);
- else
- __esw_qos_free_node(esw->qos.node0);
- esw->qos.node0 = NULL;
-
err = mlx5_destroy_scheduling_element_cmd(esw->dev,
SCHEDULING_HIERARCHY_E_SWITCH,
esw->qos.root_tsar_ix);
@@ -990,18 +958,22 @@ esw_qos_vport_tc_enable(struct mlx5_vport *vport, enum sched_node_type type,
struct netlink_ext_ack *extack)
{
struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node;
- int err, new_level, max_level;
+ struct mlx5_esw_sched_node *parent = vport_node->parent;
+ int err;
if (type == SCHED_NODE_TYPE_TC_ARBITER_TSAR) {
+ int new_level, max_level;
+
/* Increase the parent's level by 2 to account for both the
* TC arbiter and the vports TC scheduling element.
*/
- new_level = vport_node->parent->level + 2;
+ new_level = (parent ? parent->level : 2) + 2;
max_level = 1 << MLX5_CAP_QOS(vport_node->esw->dev,
log_esw_max_sched_depth);
if (new_level > max_level) {
- NL_SET_ERR_MSG_MOD(extack,
- "TC arbitration on leafs is not supported beyond max scheduling depth");
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "TC arbitration on leafs is not supported beyond max depth %d",
+ max_level);
return -EOPNOTSUPP;
}
}
@@ -1033,9 +1005,7 @@ esw_qos_vport_tc_enable(struct mlx5_vport *vport, enum sched_node_type type,
err_sched_nodes:
if (type == SCHED_NODE_TYPE_RATE_LIMITER) {
esw_qos_node_destroy_sched_element(vport_node, NULL);
- list_add_tail(&vport_node->entry,
- &vport_node->parent->children);
- vport_node->level = vport_node->parent->level + 1;
+ esw_qos_node_attach_to_parent(vport_node);
} else {
esw_qos_tc_arbiter_scheduling_teardown(vport_node, NULL);
}
@@ -1083,7 +1053,6 @@ err_out:
static void esw_qos_vport_disable(struct mlx5_vport *vport, struct netlink_ext_ack *extack)
{
struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node;
- struct mlx5_esw_sched_node *parent = vport_node->parent;
enum sched_node_type curr_type = vport_node->type;
if (curr_type == SCHED_NODE_TYPE_VPORT)
@@ -1092,8 +1061,9 @@ static void esw_qos_vport_disable(struct mlx5_vport *vport, struct netlink_ext_a
esw_qos_vport_tc_disable(vport, extack);
vport_node->bw_share = 0;
+ memset(vport_node->tc_bw, 0, sizeof(vport_node->tc_bw));
list_del_init(&vport_node->entry);
- esw_qos_normalize_min_rate(parent->esw, parent, extack);
+ esw_qos_normalize_min_rate(vport_node->esw, vport_node->parent, extack);
trace_mlx5_esw_vport_qos_destroy(vport_node->esw->dev, vport);
}
@@ -1103,25 +1073,23 @@ static int esw_qos_vport_enable(struct mlx5_vport *vport,
struct mlx5_esw_sched_node *parent,
struct netlink_ext_ack *extack)
{
+ struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node;
int err;
esw_assert_qos_lock_held(vport->dev->priv.eswitch);
- esw_qos_node_set_parent(vport->qos.sched_node, parent);
- if (type == SCHED_NODE_TYPE_VPORT) {
- err = esw_qos_vport_create_sched_element(vport->qos.sched_node,
- extack);
- } else {
+ esw_qos_node_set_parent(vport_node, parent);
+ if (type == SCHED_NODE_TYPE_VPORT)
+ err = esw_qos_vport_create_sched_element(vport_node, extack);
+ else
err = esw_qos_vport_tc_enable(vport, type, extack);
- }
if (err)
return err;
- vport->qos.sched_node->type = type;
- esw_qos_normalize_min_rate(parent->esw, parent, extack);
- trace_mlx5_esw_vport_qos_create(vport->dev, vport,
- vport->qos.sched_node->max_rate,
- vport->qos.sched_node->bw_share);
+ vport_node->type = type;
+ esw_qos_normalize_min_rate(vport_node->esw, parent, extack);
+ trace_mlx5_esw_vport_qos_create(vport->dev, vport, vport_node->max_rate,
+ vport_node->bw_share);
return 0;
}
@@ -1132,6 +1100,7 @@ static int mlx5_esw_qos_vport_enable(struct mlx5_vport *vport, enum sched_node_t
{
struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
struct mlx5_esw_sched_node *sched_node;
+ struct mlx5_eswitch *parent_esw;
int err;
esw_assert_qos_lock_held(esw);
@@ -1139,10 +1108,14 @@ static int mlx5_esw_qos_vport_enable(struct mlx5_vport *vport, enum sched_node_t
if (err)
return err;
- parent = parent ?: esw->qos.node0;
- sched_node = __esw_qos_alloc_node(parent->esw, 0, type, parent);
- if (!sched_node)
+ parent_esw = parent ? parent->esw : esw;
+ sched_node = __esw_qos_alloc_node(parent_esw, 0, type, parent);
+ if (!sched_node) {
+ esw_qos_put(esw);
return -ENOMEM;
+ }
+ if (!parent)
+ list_add_tail(&sched_node->entry, &esw->qos.domain->nodes);
sched_node->max_rate = max_rate;
sched_node->min_rate = min_rate;
@@ -1150,6 +1123,7 @@ static int mlx5_esw_qos_vport_enable(struct mlx5_vport *vport, enum sched_node_t
vport->qos.sched_node = sched_node;
err = esw_qos_vport_enable(vport, type, parent, extack);
if (err) {
+ __esw_qos_free_node(sched_node);
esw_qos_put(esw);
vport->qos.sched_node = NULL;
}
@@ -1157,6 +1131,19 @@ static int mlx5_esw_qos_vport_enable(struct mlx5_vport *vport, enum sched_node_t
return err;
}
+static void mlx5_esw_qos_vport_disable_locked(struct mlx5_vport *vport)
+{
+ struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
+
+ esw_assert_qos_lock_held(esw);
+ if (!vport->qos.sched_node)
+ return;
+
+ esw_qos_vport_disable(vport, NULL);
+ mlx5_esw_qos_vport_qos_free(vport);
+ esw_qos_put(esw);
+}
+
void mlx5_esw_qos_vport_disable(struct mlx5_vport *vport)
{
struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
@@ -1168,11 +1155,9 @@ void mlx5_esw_qos_vport_disable(struct mlx5_vport *vport)
goto unlock;
parent = vport->qos.sched_node->parent;
- WARN(parent != esw->qos.node0, "Disabling QoS on port before detaching it from node");
+ WARN(parent, "Disabling QoS on port before detaching it from node");
- esw_qos_vport_disable(vport, NULL);
- mlx5_esw_qos_vport_qos_free(vport);
- esw_qos_put(esw);
+ mlx5_esw_qos_vport_disable_locked(vport);
unlock:
esw_qos_unlock(esw);
}
@@ -1262,13 +1247,13 @@ static int esw_qos_vport_update(struct mlx5_vport *vport,
struct mlx5_esw_sched_node *parent,
struct netlink_ext_ack *extack)
{
- struct mlx5_esw_sched_node *curr_parent = vport->qos.sched_node->parent;
- enum sched_node_type curr_type = vport->qos.sched_node->type;
+ struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node;
+ struct mlx5_esw_sched_node *curr_parent = vport_node->parent;
+ enum sched_node_type curr_type = vport_node->type;
u32 curr_tc_bw[DEVLINK_RATE_TCS_MAX] = {0};
int err;
esw_assert_qos_lock_held(vport->dev->priv.eswitch);
- parent = parent ?: curr_parent;
if (curr_type == type && curr_parent == parent)
return 0;
@@ -1276,10 +1261,8 @@ static int esw_qos_vport_update(struct mlx5_vport *vport,
if (err)
return err;
- if (curr_type == SCHED_NODE_TYPE_TC_ARBITER_TSAR && curr_type == type) {
- esw_qos_tc_arbiter_get_bw_shares(vport->qos.sched_node,
- curr_tc_bw);
- }
+ if (curr_type == SCHED_NODE_TYPE_TC_ARBITER_TSAR && curr_type == type)
+ esw_qos_tc_arbiter_get_bw_shares(vport_node, curr_tc_bw);
esw_qos_vport_disable(vport, extack);
@@ -1290,8 +1273,8 @@ static int esw_qos_vport_update(struct mlx5_vport *vport,
}
if (curr_type == SCHED_NODE_TYPE_TC_ARBITER_TSAR && curr_type == type) {
- esw_qos_set_tc_arbiter_bw_shares(vport->qos.sched_node,
- curr_tc_bw, extack);
+ esw_qos_set_tc_arbiter_bw_shares(vport_node, curr_tc_bw,
+ extack);
}
return err;
@@ -1306,16 +1289,16 @@ static int esw_qos_vport_update_parent(struct mlx5_vport *vport, struct mlx5_esw
esw_assert_qos_lock_held(esw);
curr_parent = vport->qos.sched_node->parent;
- parent = parent ?: esw->qos.node0;
if (curr_parent == parent)
return 0;
/* Set vport QoS type based on parent node type if different from
* default QoS; otherwise, use the vport's current QoS type.
*/
- if (parent->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR)
+ if (parent && parent->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR)
type = SCHED_NODE_TYPE_RATE_LIMITER;
- else if (curr_parent->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR)
+ else if (curr_parent &&
+ curr_parent->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR)
type = SCHED_NODE_TYPE_VPORT;
else
type = vport->qos.sched_node->type;
@@ -1462,8 +1445,9 @@ static int esw_qos_node_enable_tc_arbitration(struct mlx5_esw_sched_node *node,
new_level = node->level + 1;
max_level = 1 << MLX5_CAP_QOS(node->esw->dev, log_esw_max_sched_depth);
if (new_level > max_level) {
- NL_SET_ERR_MSG_MOD(extack,
- "TC arbitration on nodes is not supported beyond max scheduling depth");
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "TC arbitration on nodes is not supported beyond max depth %d",
+ max_level);
return -EOPNOTSUPP;
}
@@ -1533,6 +1517,7 @@ static u32 mlx5_esw_qos_lag_link_speed_get_locked(struct mlx5_core_dev *mdev)
speed = lksettings.base.speed;
out:
+ mlx5_uplink_netdev_put(mdev, slave);
return speed;
}
@@ -1654,9 +1639,10 @@ static bool esw_qos_validate_unsupported_tc_bw(struct mlx5_eswitch *esw,
static bool esw_qos_vport_validate_unsupported_tc_bw(struct mlx5_vport *vport,
u32 *tc_bw)
{
- struct mlx5_eswitch *esw = vport->qos.sched_node ?
- vport->qos.sched_node->parent->esw :
- vport->dev->priv.eswitch;
+ struct mlx5_esw_sched_node *node = vport->qos.sched_node;
+ struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
+
+ esw = (node && node->parent) ? node->parent->esw : esw;
return esw_qos_validate_unsupported_tc_bw(esw, tc_bw);
}
@@ -1673,6 +1659,21 @@ static bool esw_qos_tc_bw_disabled(u32 *tc_bw)
return true;
}
+static void esw_vport_qos_prune_empty(struct mlx5_vport *vport)
+{
+ struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node;
+
+ esw_assert_qos_lock_held(vport->dev->priv.eswitch);
+ if (!vport_node)
+ return;
+
+ if (vport_node->parent || vport_node->max_rate ||
+ vport_node->min_rate || !esw_qos_tc_bw_disabled(vport_node->tc_bw))
+ return;
+
+ mlx5_esw_qos_vport_disable_locked(vport);
+}
+
int mlx5_esw_qos_init(struct mlx5_eswitch *esw)
{
if (esw->qos.domain)
@@ -1706,6 +1707,10 @@ int mlx5_esw_devlink_rate_leaf_tx_share_set(struct devlink_rate *rate_leaf, void
esw_qos_lock(esw);
err = mlx5_esw_qos_set_vport_min_rate(vport, tx_share, extack);
+ if (err)
+ goto out;
+ esw_vport_qos_prune_empty(vport);
+out:
esw_qos_unlock(esw);
return err;
}
@@ -1727,6 +1732,10 @@ int mlx5_esw_devlink_rate_leaf_tx_max_set(struct devlink_rate *rate_leaf, void *
esw_qos_lock(esw);
err = mlx5_esw_qos_set_vport_max_rate(vport, tx_max, extack);
+ if (err)
+ goto out;
+ esw_vport_qos_prune_empty(vport);
+out:
esw_qos_unlock(esw);
return err;
}
@@ -1763,7 +1772,8 @@ int mlx5_esw_devlink_rate_leaf_tc_bw_set(struct devlink_rate *rate_leaf,
if (disable) {
if (vport_node->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR)
err = esw_qos_vport_update(vport, SCHED_NODE_TYPE_VPORT,
- NULL, extack);
+ vport_node->parent, extack);
+ esw_vport_qos_prune_empty(vport);
goto unlock;
}
@@ -1775,7 +1785,7 @@ int mlx5_esw_devlink_rate_leaf_tc_bw_set(struct devlink_rate *rate_leaf,
} else {
err = esw_qos_vport_update(vport,
SCHED_NODE_TYPE_TC_ARBITER_TSAR,
- NULL, extack);
+ vport_node->parent, extack);
}
if (!err)
esw_qos_set_tc_arbiter_bw_shares(vport_node, tc_bw, extack);
@@ -1924,14 +1934,20 @@ int mlx5_esw_devlink_rate_leaf_parent_set(struct devlink_rate *devlink_rate,
void *priv, void *parent_priv,
struct netlink_ext_ack *extack)
{
- struct mlx5_esw_sched_node *node;
+ struct mlx5_esw_sched_node *node = parent ? parent_priv : NULL;
struct mlx5_vport *vport = priv;
+ int err;
- if (!parent)
- return mlx5_esw_qos_vport_update_parent(vport, NULL, extack);
+ err = mlx5_esw_qos_vport_update_parent(vport, node, extack);
+ if (!err) {
+ struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
- node = parent_priv;
- return mlx5_esw_qos_vport_update_parent(vport, node, extack);
+ esw_qos_lock(esw);
+ esw_vport_qos_prune_empty(vport);
+ esw_qos_unlock(esw);
+ }
+
+ return err;
}
static bool esw_qos_is_node_empty(struct mlx5_esw_sched_node *node)
@@ -1983,8 +1999,9 @@ mlx5_esw_qos_node_validate_set_parent(struct mlx5_esw_sched_node *node,
max_level = 1 << MLX5_CAP_QOS(node->esw->dev, log_esw_max_sched_depth);
if (new_level > max_level) {
- NL_SET_ERR_MSG_MOD(extack,
- "Node hierarchy depth exceeds the maximum supported level");
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Node hierarchy depth %d exceeds the maximum supported level %d",
+ new_level, max_level);
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/vporttbl.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/vporttbl.c
index 749c3957a128..407062096a82 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/vporttbl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/vporttbl.c
@@ -45,8 +45,8 @@ esw_vport_tbl_create(struct mlx5_eswitch *esw, struct mlx5_flow_namespace *ns,
ft_attr.flags = vport_ns->flags;
fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
if (IS_ERR(fdb)) {
- esw_warn(esw->dev, "Failed to create per vport FDB Table err %ld\n",
- PTR_ERR(fdb));
+ esw_warn(esw->dev, "Failed to create per vport FDB Table err %pe\n",
+ fdb);
}
return fdb;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 4917d185d0c3..e2ffb87b94cb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -257,8 +257,8 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u16 vport, bool rx_rule,
&flow_act, &dest, 1);
if (IS_ERR(flow_rule)) {
esw_warn(esw->dev,
- "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
- dmac_v, dmac_c, vport, PTR_ERR(flow_rule));
+ "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%pe)\n",
+ dmac_v, dmac_c, vport, flow_rule);
flow_rule = NULL;
}
@@ -820,6 +820,7 @@ static int mlx5_esw_vport_caps_get(struct mlx5_eswitch *esw, struct mlx5_vport *
hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
vport->info.roce_enabled = MLX5_GET(cmd_hca_cap, hca_caps, roce);
+ vport->vhca_id = MLX5_GET(cmd_hca_cap, hca_caps, vhca_id);
if (!MLX5_CAP_GEN_MAX(esw->dev, hca_cap_2))
goto out_free;
@@ -839,6 +840,18 @@ out_free:
return err;
}
+bool mlx5_esw_vport_vhca_id(struct mlx5_eswitch *esw, u16 vportn, u16 *vhca_id)
+{
+ struct mlx5_vport *vport;
+
+ vport = mlx5_eswitch_get_vport(esw, vportn);
+ if (IS_ERR(vport) || MLX5_VPORT_INVAL_VHCA_ID(vport))
+ return false;
+
+ *vhca_id = vport->vhca_id;
+ return true;
+}
+
static int esw_vport_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
bool vst_mode_steering = esw_vst_mode_is_steering(esw);
@@ -929,7 +942,7 @@ int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
if (!mlx5_esw_is_manager_vport(esw, vport_num) &&
MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) {
- ret = mlx5_esw_vport_vhca_id_set(esw, vport_num);
+ ret = mlx5_esw_vport_vhca_id_map(esw, vport);
if (ret)
goto err_vhca_mapping;
}
@@ -973,7 +986,7 @@ void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
if (!mlx5_esw_is_manager_vport(esw, vport_num) &&
MLX5_CAP_GEN(esw->dev, vhca_resource_manager))
- mlx5_esw_vport_vhca_id_clear(esw, vport_num);
+ mlx5_esw_vport_vhca_id_unmap(esw, vport);
if (vport->vport != MLX5_VPORT_PF &&
(vport->info.ipsec_crypto_enabled || vport->info.ipsec_packet_enabled))
@@ -1038,6 +1051,25 @@ const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
return ERR_PTR(err);
}
+static int mlx5_esw_host_functions_enabled_query(struct mlx5_eswitch *esw)
+{
+ const u32 *query_host_out;
+
+ if (!mlx5_core_is_ecpf_esw_manager(esw->dev))
+ return 0;
+
+ query_host_out = mlx5_esw_query_functions(esw->dev);
+ if (IS_ERR(query_host_out))
+ return PTR_ERR(query_host_out);
+
+ esw->esw_funcs.host_funcs_disabled =
+ MLX5_GET(query_esw_functions_out, query_host_out,
+ host_params_context.host_pf_not_exist);
+
+ kvfree(query_host_out);
+ return 0;
+}
+
static void mlx5_eswitch_event_handler_register(struct mlx5_eswitch *esw)
{
if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev)) {
@@ -1185,7 +1217,8 @@ void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs)
unsigned long i;
mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) {
- if (!vport->enabled)
+ /* Adjacent VFs are unloaded separately */
+ if (!vport->enabled || vport->adjacent)
continue;
mlx5_eswitch_unload_pf_vf_vport(esw, vport->vport);
}
@@ -1204,6 +1237,42 @@ static void mlx5_eswitch_unload_ec_vf_vports(struct mlx5_eswitch *esw,
}
}
+static void mlx5_eswitch_unload_adj_vf_vports(struct mlx5_eswitch *esw)
+{
+ struct mlx5_vport *vport;
+ unsigned long i;
+
+ mlx5_esw_for_each_vf_vport(esw, i, vport, U16_MAX) {
+ if (!vport->enabled || !vport->adjacent)
+ continue;
+ mlx5_eswitch_unload_pf_vf_vport(esw, vport->vport);
+ }
+}
+
+static int
+mlx5_eswitch_load_adj_vf_vports(struct mlx5_eswitch *esw,
+ enum mlx5_eswitch_vport_event enabled_events)
+{
+ struct mlx5_vport *vport;
+ unsigned long i;
+ int err;
+
+ mlx5_esw_for_each_vf_vport(esw, i, vport, U16_MAX) {
+ if (!vport->adjacent)
+ continue;
+ err = mlx5_eswitch_load_pf_vf_vport(esw, vport->vport,
+ enabled_events);
+ if (err)
+ goto unload_adj_vf_vport;
+ }
+
+ return 0;
+
+unload_adj_vf_vport:
+ mlx5_eswitch_unload_adj_vf_vports(esw);
+ return err;
+}
+
int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs,
enum mlx5_eswitch_vport_event enabled_events)
{
@@ -1278,17 +1347,19 @@ mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
esw->mode == MLX5_ESWITCH_LEGACY;
/* Enable PF vport */
- if (pf_needed) {
+ if (pf_needed && mlx5_esw_host_functions_enabled(esw->dev)) {
ret = mlx5_eswitch_load_pf_vf_vport(esw, MLX5_VPORT_PF,
enabled_events);
if (ret)
return ret;
}
- /* Enable external host PF HCA */
- ret = host_pf_enable_hca(esw->dev);
- if (ret)
- goto pf_hca_err;
+ if (mlx5_esw_host_functions_enabled(esw->dev)) {
+ /* Enable external host PF HCA */
+ ret = host_pf_enable_hca(esw->dev);
+ if (ret)
+ goto pf_hca_err;
+ }
/* Enable ECPF vport */
if (mlx5_ecpf_vport_exists(esw->dev)) {
@@ -1311,8 +1382,16 @@ mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
enabled_events);
if (ret)
goto vf_err;
+
+ /* Enable adjacent VF vports */
+ ret = mlx5_eswitch_load_adj_vf_vports(esw, enabled_events);
+ if (ret)
+ goto unload_vf_vports;
+
return 0;
+unload_vf_vports:
+ mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
vf_err:
if (mlx5_core_ec_sriov_enabled(esw->dev))
mlx5_eswitch_unload_ec_vf_vports(esw, esw->esw_funcs.num_ec_vfs);
@@ -1320,9 +1399,10 @@ ec_vf_err:
if (mlx5_ecpf_vport_exists(esw->dev))
mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_ECPF);
ecpf_err:
- host_pf_disable_hca(esw->dev);
+ if (mlx5_esw_host_functions_enabled(esw->dev))
+ host_pf_disable_hca(esw->dev);
pf_hca_err:
- if (pf_needed)
+ if (pf_needed && mlx5_esw_host_functions_enabled(esw->dev))
mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_PF);
return ret;
}
@@ -1332,6 +1412,8 @@ pf_hca_err:
*/
void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw)
{
+ mlx5_eswitch_unload_adj_vf_vports(esw);
+
mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
if (mlx5_core_ec_sriov_enabled(esw->dev))
@@ -1342,10 +1424,12 @@ void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw)
mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_ECPF);
}
- host_pf_disable_hca(esw->dev);
+ if (mlx5_esw_host_functions_enabled(esw->dev))
+ host_pf_disable_hca(esw->dev);
- if (mlx5_core_is_ecpf_esw_manager(esw->dev) ||
- esw->mode == MLX5_ESWITCH_LEGACY)
+ if ((mlx5_core_is_ecpf_esw_manager(esw->dev) ||
+ esw->mode == MLX5_ESWITCH_LEGACY) &&
+ mlx5_esw_host_functions_enabled(esw->dev))
mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_PF);
}
@@ -1402,19 +1486,76 @@ static void mlx5_esw_mode_change_notify(struct mlx5_eswitch *esw, u16 mode)
blocking_notifier_call_chain(&esw->n_head, 0, &info);
}
+static int mlx5_esw_egress_acls_init(struct mlx5_core_dev *dev)
+{
+ struct mlx5_flow_steering *steering = dev->priv.steering;
+ int total_vports = mlx5_eswitch_get_total_vports(dev);
+ int err;
+ int i;
+
+ for (i = 0; i < total_vports; i++) {
+ err = mlx5_fs_vport_egress_acl_ns_add(steering, i);
+ if (err)
+ goto acl_ns_remove;
+ }
+ return 0;
+
+acl_ns_remove:
+ while (i--)
+ mlx5_fs_vport_egress_acl_ns_remove(steering, i);
+ return err;
+}
+
+static void mlx5_esw_egress_acls_cleanup(struct mlx5_core_dev *dev)
+{
+ struct mlx5_flow_steering *steering = dev->priv.steering;
+ int total_vports = mlx5_eswitch_get_total_vports(dev);
+ int i;
+
+ for (i = total_vports - 1; i >= 0; i--)
+ mlx5_fs_vport_egress_acl_ns_remove(steering, i);
+}
+
+static int mlx5_esw_ingress_acls_init(struct mlx5_core_dev *dev)
+{
+ struct mlx5_flow_steering *steering = dev->priv.steering;
+ int total_vports = mlx5_eswitch_get_total_vports(dev);
+ int err;
+ int i;
+
+ for (i = 0; i < total_vports; i++) {
+ err = mlx5_fs_vport_ingress_acl_ns_add(steering, i);
+ if (err)
+ goto acl_ns_remove;
+ }
+ return 0;
+
+acl_ns_remove:
+ while (i--)
+ mlx5_fs_vport_ingress_acl_ns_remove(steering, i);
+ return err;
+}
+
+static void mlx5_esw_ingress_acls_cleanup(struct mlx5_core_dev *dev)
+{
+ struct mlx5_flow_steering *steering = dev->priv.steering;
+ int total_vports = mlx5_eswitch_get_total_vports(dev);
+ int i;
+
+ for (i = total_vports - 1; i >= 0; i--)
+ mlx5_fs_vport_ingress_acl_ns_remove(steering, i);
+}
+
static int mlx5_esw_acls_ns_init(struct mlx5_eswitch *esw)
{
struct mlx5_core_dev *dev = esw->dev;
- int total_vports;
int err;
if (esw->flags & MLX5_ESWITCH_VPORT_ACL_NS_CREATED)
return 0;
- total_vports = mlx5_eswitch_get_total_vports(dev);
-
if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
- err = mlx5_fs_egress_acls_init(dev, total_vports);
+ err = mlx5_esw_egress_acls_init(dev);
if (err)
return err;
} else {
@@ -1422,7 +1563,7 @@ static int mlx5_esw_acls_ns_init(struct mlx5_eswitch *esw)
}
if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
- err = mlx5_fs_ingress_acls_init(dev, total_vports);
+ err = mlx5_esw_ingress_acls_init(dev);
if (err)
goto err;
} else {
@@ -1433,7 +1574,7 @@ static int mlx5_esw_acls_ns_init(struct mlx5_eswitch *esw)
err:
if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
- mlx5_fs_egress_acls_cleanup(dev);
+ mlx5_esw_egress_acls_cleanup(dev);
return err;
}
@@ -1443,9 +1584,9 @@ static void mlx5_esw_acls_ns_cleanup(struct mlx5_eswitch *esw)
esw->flags &= ~MLX5_ESWITCH_VPORT_ACL_NS_CREATED;
if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
- mlx5_fs_ingress_acls_cleanup(dev);
+ mlx5_esw_ingress_acls_cleanup(dev);
if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
- mlx5_fs_egress_acls_cleanup(dev);
+ mlx5_esw_egress_acls_cleanup(dev);
}
/**
@@ -1674,7 +1815,8 @@ int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 *
void *hca_caps;
int err;
- if (!mlx5_core_is_ecpf(dev)) {
+ if (!mlx5_core_is_ecpf(dev) ||
+ !mlx5_esw_host_functions_enabled(dev)) {
*max_sfs = 0;
return 0;
}
@@ -1696,8 +1838,7 @@ out_free:
return err;
}
-static int mlx5_esw_vport_alloc(struct mlx5_eswitch *esw,
- int index, u16 vport_num)
+int mlx5_esw_vport_alloc(struct mlx5_eswitch *esw, int index, u16 vport_num)
{
struct mlx5_vport *vport;
int err;
@@ -1710,6 +1851,7 @@ static int mlx5_esw_vport_alloc(struct mlx5_eswitch *esw,
vport->vport = vport_num;
vport->index = index;
vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
+ vport->vhca_id = MLX5_VHCA_ID_INVALID;
INIT_WORK(&vport->vport_change_handler, esw_vport_change_handler);
err = xa_insert(&esw->vports, vport_num, vport, GFP_KERNEL);
if (err)
@@ -1723,8 +1865,9 @@ insert_err:
return err;
}
-static void mlx5_esw_vport_free(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
+void mlx5_esw_vport_free(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
+ esw->total_vports--;
xa_erase(&esw->vports, vport->vport);
kfree(vport);
}
@@ -1750,21 +1893,23 @@ static int mlx5_esw_vports_init(struct mlx5_eswitch *esw)
xa_init(&esw->vports);
- err = mlx5_esw_vport_alloc(esw, idx, MLX5_VPORT_PF);
- if (err)
- goto err;
- if (esw->first_host_vport == MLX5_VPORT_PF)
- xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_HOST_FN);
- idx++;
-
- for (i = 0; i < mlx5_core_max_vfs(dev); i++) {
- err = mlx5_esw_vport_alloc(esw, idx, idx);
+ if (mlx5_esw_host_functions_enabled(dev)) {
+ err = mlx5_esw_vport_alloc(esw, idx, MLX5_VPORT_PF);
if (err)
goto err;
- xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_VF);
- xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_HOST_FN);
+ if (esw->first_host_vport == MLX5_VPORT_PF)
+ xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_HOST_FN);
idx++;
+ for (i = 0; i < mlx5_core_max_vfs(dev); i++) {
+ err = mlx5_esw_vport_alloc(esw, idx, idx);
+ if (err)
+ goto err;
+ xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_VF);
+ xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_HOST_FN);
+ idx++;
+ }
}
+
base_sf_num = mlx5_sf_start_function_id(dev);
for (i = 0; i < mlx5_sf_max_functions(dev); i++) {
err = mlx5_esw_vport_alloc(esw, idx, base_sf_num + i);
@@ -1806,6 +1951,9 @@ static int mlx5_esw_vports_init(struct mlx5_eswitch *esw)
err = mlx5_esw_vport_alloc(esw, idx, MLX5_VPORT_UPLINK);
if (err)
goto err;
+
+ /* Adjacent vports or other dynamically create vports will use this */
+ esw->last_vport_idx = ++idx;
return 0;
err:
@@ -1864,6 +2012,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
goto free_esw;
esw->dev = dev;
+ dev->priv.eswitch = esw;
esw->manager_vport = mlx5_eswitch_manager_vport(dev);
esw->first_host_vport = mlx5_eswitch_first_host_vport_num(dev);
@@ -1874,11 +2023,14 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
goto abort;
}
+ err = mlx5_esw_host_functions_enabled_query(esw);
+ if (err)
+ goto abort;
+
err = mlx5_esw_vports_init(esw);
if (err)
goto abort;
- dev->priv.eswitch = esw;
err = esw_offloads_init(esw);
if (err)
goto reps_err;
@@ -2410,3 +2562,11 @@ void mlx5_eswitch_unblock_ipsec(struct mlx5_core_dev *dev)
dev->num_ipsec_offloads--;
mutex_unlock(&esw->state_lock);
}
+
+bool mlx5_esw_host_functions_enabled(const struct mlx5_core_dev *dev)
+{
+ if (!dev->priv.eswitch)
+ return true;
+
+ return !dev->priv.eswitch->esw_funcs.host_funcs_disabled;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index b0b8ef3ec3c4..16eb99aba2a7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -197,6 +197,11 @@ static inline struct mlx5_vport *mlx5_devlink_port_vport_get(struct devlink_port
return mlx5_devlink_port_get(dl_port)->vport;
}
+#define MLX5_VHCA_ID_INVALID (-1)
+
+#define MLX5_VPORT_INVAL_VHCA_ID(vport) \
+ ((vport)->vhca_id == MLX5_VHCA_ID_INVALID)
+
struct mlx5_vport {
struct mlx5_core_dev *dev;
struct hlist_head uc_list[MLX5_L2_ADDR_HASH_SIZE];
@@ -209,6 +214,13 @@ struct mlx5_vport {
struct vport_egress egress;
u32 default_metadata;
u32 metadata;
+ int vhca_id;
+
+ bool adjacent; /* delegated vhca from adjacent function */
+ struct {
+ u16 parent_pci_devfn; /* Adjacent parent PCI device function */
+ u16 function_id; /* Function ID of the delegated VPort */
+ } adj_info;
struct mlx5_vport_info info;
@@ -323,6 +335,7 @@ struct mlx5_host_work {
struct mlx5_esw_functions {
struct mlx5_nb nb;
+ bool host_funcs_disabled;
u16 num_vfs;
u16 num_ec_vfs;
};
@@ -373,15 +386,11 @@ struct mlx5_eswitch {
refcount_t refcnt;
u32 root_tsar_ix;
struct mlx5_qos_domain *domain;
- /* Contains all vports with QoS enabled but no explicit node.
- * Cannot be NULL if QoS is enabled, but may be a fake node
- * referencing the root TSAR if the esw doesn't support nodes.
- */
- struct mlx5_esw_sched_node *node0;
} qos;
struct mlx5_esw_bridge_offloads *br_offloads;
struct mlx5_esw_offload offloads;
+ u32 last_vport_idx;
int mode;
u16 manager_vport;
u16 first_host_vport;
@@ -415,6 +424,8 @@ int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32
/* E-Switch API */
int mlx5_eswitch_init(struct mlx5_core_dev *dev);
void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw);
+int mlx5_esw_vport_alloc(struct mlx5_eswitch *esw, int index, u16 vport_num);
+void mlx5_esw_vport_free(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
#define MLX5_ESWITCH_IGNORE_NUM_VFS (-1)
int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs);
@@ -422,7 +433,8 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs);
void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf);
void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw);
void mlx5_eswitch_disable(struct mlx5_eswitch *esw);
-void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw, u64 key);
+void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw,
+ const struct mlx5_devcom_match_attr *attr);
void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw);
bool mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch *esw);
int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
@@ -620,6 +632,9 @@ bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev);
+void mlx5_esw_adjacent_vhcas_setup(struct mlx5_eswitch *esw);
+void mlx5_esw_adjacent_vhcas_cleanup(struct mlx5_eswitch *esw);
+
#define MLX5_DEBUG_ESWITCH_MASK BIT(3)
#define esw_info(__dev, format, ...) \
@@ -822,9 +837,17 @@ struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u1
int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 *sf_base_id);
-int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num);
-void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num);
+int mlx5_esw_vport_vhca_id_map(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport);
+void mlx5_esw_vport_vhca_id_unmap(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport);
int mlx5_eswitch_vhca_id_to_vport(struct mlx5_eswitch *esw, u16 vhca_id, u16 *vport_num);
+bool mlx5_esw_vport_vhca_id(struct mlx5_eswitch *esw, u16 vportn, u16 *vhca_id);
+
+void mlx5_esw_offloads_rep_remove(struct mlx5_eswitch *esw,
+ const struct mlx5_vport *vport);
+int mlx5_esw_offloads_rep_add(struct mlx5_eswitch *esw,
+ const struct mlx5_vport *vport);
/**
* struct mlx5_esw_event_info - Indicates eswitch mode changed/changing.
@@ -856,7 +879,7 @@ void mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch *master_esw,
struct mlx5_eswitch *slave_esw);
int mlx5_eswitch_reload_ib_reps(struct mlx5_eswitch *esw);
-bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev);
+bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev, bool from_fdb);
void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev);
int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev);
@@ -898,6 +921,7 @@ int mlx5_esw_ipsec_vf_packet_offload_set(struct mlx5_eswitch *esw, struct mlx5_v
bool enable);
int mlx5_esw_ipsec_vf_packet_offload_supported(struct mlx5_core_dev *dev,
u16 vport_num);
+bool mlx5_esw_host_functions_enabled(const struct mlx5_core_dev *dev);
#else /* CONFIG_MLX5_ESWITCH */
/* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
@@ -905,7 +929,9 @@ static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {}
static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { return 0; }
static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf) {}
static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw) {}
-static inline void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw, u64 key) {}
+static inline void
+mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw,
+ const struct mlx5_devcom_match_attr *attr) {}
static inline void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) {}
static inline bool mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch *esw) { return false; }
static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
@@ -948,7 +974,8 @@ mlx5_eswitch_reload_ib_reps(struct mlx5_eswitch *esw)
return 0;
}
-static inline bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev)
+static inline bool
+mlx5_eswitch_block_encap(struct mlx5_core_dev *dev, bool from_fdb)
{
return true;
}
@@ -965,6 +992,19 @@ static inline bool mlx5_eswitch_block_ipsec(struct mlx5_core_dev *dev)
}
static inline void mlx5_eswitch_unblock_ipsec(struct mlx5_core_dev *dev) {}
+
+static inline bool
+mlx5_esw_host_functions_enabled(const struct mlx5_core_dev *dev)
+{
+ return true;
+}
+
+static inline bool
+mlx5_esw_vport_vhca_id(struct mlx5_eswitch *esw, u16 vportn, u16 *vhca_id)
+{
+ return -EOPNOTSUPP;
+}
+
#endif /* CONFIG_MLX5_ESWITCH */
#endif /* __MLX5_ESWITCH_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index bee906661282..4cf995be127d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -1016,8 +1016,8 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw,
flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(on_esw),
spec, &flow_act, &dest, 1);
if (IS_ERR(flow_rule))
- esw_warn(on_esw->dev, "FDB: Failed to add send to vport rule err %ld\n",
- PTR_ERR(flow_rule));
+ esw_warn(on_esw->dev, "FDB: Failed to add send to vport rule err %pe\n",
+ flow_rule);
out:
kvfree(spec);
return flow_rule;
@@ -1065,8 +1065,8 @@ mlx5_eswitch_add_send_to_vport_meta_rule(struct mlx5_eswitch *esw, u16 vport_num
flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
spec, &flow_act, &dest, 1);
if (IS_ERR(flow_rule))
- esw_warn(esw->dev, "FDB: Failed to add send to vport meta rule vport %d, err %ld\n",
- vport_num, PTR_ERR(flow_rule));
+ esw_warn(esw->dev, "FDB: Failed to add send to vport meta rule vport %d, err %pe\n",
+ vport_num, flow_rule);
kvfree(spec);
return flow_rule;
@@ -1213,7 +1213,8 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
misc_parameters);
- if (mlx5_core_is_ecpf_esw_manager(peer_dev)) {
+ if (mlx5_core_is_ecpf_esw_manager(peer_dev) &&
+ mlx5_esw_host_functions_enabled(peer_dev)) {
peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_PF);
esw_set_peer_miss_rule_source_port(esw, peer_esw, spec,
MLX5_VPORT_PF);
@@ -1239,19 +1240,21 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
flows[peer_vport->index] = flow;
}
- mlx5_esw_for_each_vf_vport(peer_esw, i, peer_vport,
- mlx5_core_max_vfs(peer_dev)) {
- esw_set_peer_miss_rule_source_port(esw,
- peer_esw,
- spec, peer_vport->vport);
+ if (mlx5_esw_host_functions_enabled(esw->dev)) {
+ mlx5_esw_for_each_vf_vport(peer_esw, i, peer_vport,
+ mlx5_core_max_vfs(peer_dev)) {
+ esw_set_peer_miss_rule_source_port(esw, peer_esw,
+ spec,
+ peer_vport->vport);
- flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
- spec, &flow_act, &dest, 1);
- if (IS_ERR(flow)) {
- err = PTR_ERR(flow);
- goto add_vf_flow_err;
+ flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
+ spec, &flow_act, &dest, 1);
+ if (IS_ERR(flow)) {
+ err = PTR_ERR(flow);
+ goto add_vf_flow_err;
+ }
+ flows[peer_vport->index] = flow;
}
- flows[peer_vport->index] = flow;
}
if (mlx5_core_ec_sriov_enabled(peer_dev)) {
@@ -1301,7 +1304,9 @@ add_vf_flow_err:
mlx5_del_flow_rules(flows[peer_vport->index]);
}
add_ecpf_flow_err:
- if (mlx5_core_is_ecpf_esw_manager(peer_dev)) {
+
+ if (mlx5_core_is_ecpf_esw_manager(peer_dev) &&
+ mlx5_esw_host_functions_enabled(peer_dev)) {
peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_PF);
mlx5_del_flow_rules(flows[peer_vport->index]);
}
@@ -2154,7 +2159,9 @@ mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
&flow_act, dest, 1);
if (IS_ERR(flow_rule)) {
- esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
+ esw_warn(esw->dev,
+ "fs offloads: Failed to add vport rx rule err %pe\n",
+ flow_rule);
goto out;
}
@@ -2173,8 +2180,8 @@ static int esw_create_vport_rx_drop_rule(struct mlx5_eswitch *esw)
&flow_act, NULL, 0);
if (IS_ERR(flow_rule)) {
esw_warn(esw->dev,
- "fs offloads: Failed to add vport rx drop rule err %ld\n",
- PTR_ERR(flow_rule));
+ "fs offloads: Failed to add vport rx drop rule err %pe\n",
+ flow_rule);
return PTR_ERR(flow_rule);
}
@@ -2373,7 +2380,20 @@ static int esw_offloads_start(struct mlx5_eswitch *esw,
return 0;
}
-static int mlx5_esw_offloads_rep_init(struct mlx5_eswitch *esw, const struct mlx5_vport *vport)
+void mlx5_esw_offloads_rep_remove(struct mlx5_eswitch *esw,
+ const struct mlx5_vport *vport)
+{
+ struct mlx5_eswitch_rep *rep = xa_load(&esw->offloads.vport_reps,
+ vport->vport);
+
+ if (!rep)
+ return;
+ xa_erase(&esw->offloads.vport_reps, vport->vport);
+ kfree(rep);
+}
+
+int mlx5_esw_offloads_rep_add(struct mlx5_eswitch *esw,
+ const struct mlx5_vport *vport)
{
struct mlx5_eswitch_rep *rep;
int rep_type;
@@ -2385,9 +2405,19 @@ static int mlx5_esw_offloads_rep_init(struct mlx5_eswitch *esw, const struct mlx
rep->vport = vport->vport;
rep->vport_index = vport->index;
- for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
- atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
-
+ for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
+ if (!esw->offloads.rep_ops[rep_type]) {
+ atomic_set(&rep->rep_data[rep_type].state,
+ REP_UNREGISTERED);
+ continue;
+ }
+ /* Dynamic/delegated vports add their representors after
+ * mlx5_eswitch_register_vport_reps, so mark them as registered
+ * for them to be loaded later with the others.
+ */
+ rep->esw = esw;
+ atomic_set(&rep->rep_data[rep_type].state, REP_REGISTERED);
+ }
err = xa_insert(&esw->offloads.vport_reps, rep->vport, rep, GFP_KERNEL);
if (err)
goto insert_err;
@@ -2425,7 +2455,7 @@ static int esw_offloads_init_reps(struct mlx5_eswitch *esw)
xa_init(&esw->offloads.vport_reps);
mlx5_esw_for_each_vport(esw, i, vport) {
- err = mlx5_esw_offloads_rep_init(esw, vport);
+ err = mlx5_esw_offloads_rep_add(esw, vport);
if (err)
goto err;
}
@@ -3076,7 +3106,8 @@ err_out:
return err;
}
-void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw, u64 key)
+void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw,
+ const struct mlx5_devcom_match_attr *attr)
{
int i;
@@ -3095,7 +3126,7 @@ void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw, u64 key)
esw->num_peers = 0;
esw->devcom = mlx5_devcom_register_component(esw->dev->priv.devc,
MLX5_DEVCOM_ESW_OFFLOADS,
- key,
+ attr,
mlx5_esw_offloads_devcom_event,
esw);
if (IS_ERR(esw->devcom))
@@ -3533,6 +3564,8 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
int err;
mutex_init(&esw->offloads.termtbl_mutex);
+ mlx5_esw_adjacent_vhcas_setup(esw);
+
err = mlx5_rdma_enable_roce(esw->dev);
if (err)
goto err_roce;
@@ -3597,6 +3630,7 @@ err_vport_metadata:
err_metadata:
mlx5_rdma_disable_roce(esw->dev);
err_roce:
+ mlx5_esw_adjacent_vhcas_cleanup(esw);
mutex_destroy(&esw->offloads.termtbl_mutex);
return err;
}
@@ -3630,6 +3664,7 @@ void esw_offloads_disable(struct mlx5_eswitch *esw)
mapping_destroy(esw->offloads.reg_c0_obj_pool);
esw_offloads_metadata_uninit(esw);
mlx5_rdma_disable_roce(esw->dev);
+ mlx5_esw_adjacent_vhcas_cleanup(esw);
mutex_destroy(&esw->offloads.termtbl_mutex);
}
@@ -3739,6 +3774,29 @@ void mlx5_eswitch_unblock_mode(struct mlx5_core_dev *dev)
up_write(&esw->mode_lock);
}
+/* Returns false only when uplink netdev exists and its netns is different from
+ * devlink's netns. True for all others so entering switchdev mode is allowed.
+ */
+static bool mlx5_devlink_netdev_netns_immutable_set(struct devlink *devlink,
+ bool immutable)
+{
+ struct mlx5_core_dev *mdev = devlink_priv(devlink);
+ struct net_device *netdev;
+ bool ret;
+
+ netdev = mlx5_uplink_netdev_get(mdev);
+ if (!netdev)
+ return true;
+
+ rtnl_lock();
+ netdev->netns_immutable = immutable;
+ ret = net_eq(dev_net(netdev), devlink_net(devlink));
+ rtnl_unlock();
+
+ mlx5_uplink_netdev_put(mdev, netdev);
+ return ret;
+}
+
int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
struct netlink_ext_ack *extack)
{
@@ -3781,6 +3839,14 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
esw->eswitch_operation_in_progress = true;
up_write(&esw->mode_lock);
+ if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV &&
+ !mlx5_devlink_netdev_netns_immutable_set(devlink, true)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can't change E-Switch mode to switchdev when netdev net namespace has diverged from the devlink's.");
+ err = -EINVAL;
+ goto skip;
+ }
+
if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
esw->dev->priv.flags |= MLX5_PRIV_FLAGS_SWITCH_LEGACY;
mlx5_eswitch_disable_locked(esw);
@@ -3799,6 +3865,8 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
}
skip:
+ if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV && err)
+ mlx5_devlink_netdev_netns_immutable_set(devlink, false);
down_write(&esw->mode_lock);
esw->eswitch_operation_in_progress = false;
unlock:
@@ -3938,23 +4006,25 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
}
-bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev)
+bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev, bool from_fdb)
{
struct mlx5_eswitch *esw = dev->priv.eswitch;
+ enum devlink_eswitch_encap_mode encap;
+ bool allow_tunnel = false;
if (!mlx5_esw_allowed(esw))
return true;
down_write(&esw->mode_lock);
- if (esw->mode != MLX5_ESWITCH_LEGACY &&
- esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
- up_write(&esw->mode_lock);
- return false;
+ encap = esw->offloads.encap;
+ if (esw->mode == MLX5_ESWITCH_LEGACY ||
+ (encap == DEVLINK_ESWITCH_ENCAP_MODE_NONE && !from_fdb)) {
+ allow_tunnel = true;
+ esw->offloads.num_block_encap++;
}
-
- esw->offloads.num_block_encap++;
up_write(&esw->mode_lock);
- return true;
+
+ return allow_tunnel;
}
void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev)
@@ -4059,7 +4129,8 @@ mlx5_eswitch_vport_has_rep(const struct mlx5_eswitch *esw, u16 vport_num)
{
/* Currently, only ECPF based device has representor for host PF. */
if (vport_num == MLX5_VPORT_PF &&
- !mlx5_core_is_ecpf_esw_manager(esw->dev))
+ (!mlx5_core_is_ecpf_esw_manager(esw->dev) ||
+ !mlx5_esw_host_functions_enabled(esw->dev)))
return false;
if (vport_num == MLX5_VPORT_ECPF &&
@@ -4161,23 +4232,28 @@ u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
}
EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match);
-int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num)
+int mlx5_esw_vport_vhca_id_map(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
u16 *old_entry, *vhca_map_entry, vhca_id;
- int err;
- err = mlx5_vport_get_vhca_id(esw->dev, vport_num, &vhca_id);
- if (err) {
- esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%u,err=%d)\n",
- vport_num, err);
- return err;
+ if (WARN_ONCE(MLX5_VPORT_INVAL_VHCA_ID(vport),
+ "vport %d vhca_id is not set", vport->vport)) {
+ int err;
+
+ err = mlx5_vport_get_vhca_id(vport->dev, vport->vport,
+ &vhca_id);
+ if (err)
+ return err;
+ vport->vhca_id = vhca_id;
}
+ vhca_id = vport->vhca_id;
vhca_map_entry = kmalloc(sizeof(*vhca_map_entry), GFP_KERNEL);
if (!vhca_map_entry)
return -ENOMEM;
- *vhca_map_entry = vport_num;
+ *vhca_map_entry = vport->vport;
old_entry = xa_store(&esw->offloads.vhca_map, vhca_id, vhca_map_entry, GFP_KERNEL);
if (xa_is_err(old_entry)) {
kfree(vhca_map_entry);
@@ -4187,17 +4263,12 @@ int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num)
return 0;
}
-void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num)
+void mlx5_esw_vport_vhca_id_unmap(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
- u16 *vhca_map_entry, vhca_id;
- int err;
-
- err = mlx5_vport_get_vhca_id(esw->dev, vport_num, &vhca_id);
- if (err)
- esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%hu,err=%d)\n",
- vport_num, err);
+ u16 *vhca_map_entry;
- vhca_map_entry = xa_erase(&esw->offloads.vhca_map, vhca_id);
+ vhca_map_entry = xa_erase(&esw->offloads.vhca_map, vport->vhca_id);
kfree(vhca_map_entry);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
index c4de6bf8d1b6..cb1319974f83 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
@@ -475,7 +475,6 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
*conn->cq.mcq.arm_db = 0;
conn->cq.mcq.vector = 0;
conn->cq.mcq.comp = mlx5_fpga_conn_cq_complete;
- conn->cq.mcq.uar = fdev->conn_res.uar;
tasklet_setup(&conn->cq.tasklet, mlx5_fpga_conn_cq_tasklet);
mlx5_fpga_dbg(fdev, "Created CQ #0x%x\n", conn->cq.mcq.cqn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index d87392360dbd..2db3ffb0a2b2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -114,9 +114,9 @@
#define ETHTOOL_NUM_PRIOS 11
#define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
/* Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}}, IPsec policy,
- * {IPsec RoCE MPV,Alias table},IPsec RoCE policy
+ * IPsec policy miss, {IPsec RoCE MPV,Alias table},IPsec RoCE policy
*/
-#define KERNEL_NIC_PRIO_NUM_LEVELS 10
+#define KERNEL_NIC_PRIO_NUM_LEVELS 11
#define KERNEL_NIC_NUM_PRIOS 1
/* One more level for tc, and one more for promisc */
#define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 2)
@@ -663,7 +663,7 @@ static void del_sw_hw_rule(struct fs_node *node)
BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
fte->act_dests.action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
- mlx5_fc_local_destroy(rule->dest_attr.counter);
+ mlx5_fc_local_put(rule->dest_attr.counter);
goto out;
}
@@ -2793,30 +2793,32 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
}
EXPORT_SYMBOL(mlx5_get_flow_namespace);
+struct mlx5_vport_acl_root_ns {
+ u16 vport_idx;
+ struct mlx5_flow_root_namespace *root_ns;
+};
+
struct mlx5_flow_namespace *
mlx5_get_flow_vport_namespace(struct mlx5_core_dev *dev,
enum mlx5_flow_namespace_type type, int vport_idx)
{
struct mlx5_flow_steering *steering = dev->priv.steering;
+ struct mlx5_vport_acl_root_ns *vport_ns;
if (!steering)
return NULL;
switch (type) {
case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
- if (vport_idx >= steering->esw_egress_acl_vports)
- return NULL;
- if (steering->esw_egress_root_ns &&
- steering->esw_egress_root_ns[vport_idx])
- return &steering->esw_egress_root_ns[vport_idx]->ns;
+ vport_ns = xa_load(&steering->esw_egress_root_ns, vport_idx);
+ if (vport_ns)
+ return &vport_ns->root_ns->ns;
else
return NULL;
case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
- if (vport_idx >= steering->esw_ingress_acl_vports)
- return NULL;
- if (steering->esw_ingress_root_ns &&
- steering->esw_ingress_root_ns[vport_idx])
- return &steering->esw_ingress_root_ns[vport_idx]->ns;
+ vport_ns = xa_load(&steering->esw_ingress_root_ns, vport_idx);
+ if (vport_ns)
+ return &vport_ns->root_ns->ns;
else
return NULL;
case MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_RX:
@@ -3575,118 +3577,102 @@ out_err:
return err;
}
-static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
+static void
+mlx5_fs_remove_vport_acl_root_ns(struct xarray *esw_acl_root_ns, u16 vport_idx)
{
- struct fs_prio *prio;
-
- steering->esw_egress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL);
- if (!steering->esw_egress_root_ns[vport])
- return -ENOMEM;
+ struct mlx5_vport_acl_root_ns *vport_ns;
- /* create 1 prio*/
- prio = fs_create_prio(&steering->esw_egress_root_ns[vport]->ns, 0, 1);
- return PTR_ERR_OR_ZERO(prio);
+ vport_ns = xa_erase(esw_acl_root_ns, vport_idx);
+ if (vport_ns) {
+ cleanup_root_ns(vport_ns->root_ns);
+ kfree(vport_ns);
+ }
}
-static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
+static int
+mlx5_fs_add_vport_acl_root_ns(struct mlx5_flow_steering *steering,
+ struct xarray *esw_acl_root_ns,
+ enum fs_flow_table_type table_type,
+ u16 vport_idx)
{
+ struct mlx5_vport_acl_root_ns *vport_ns;
struct fs_prio *prio;
+ int err;
- steering->esw_ingress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL);
- if (!steering->esw_ingress_root_ns[vport])
- return -ENOMEM;
+ /* sanity check, intended xarrays are used */
+ if (WARN_ON(esw_acl_root_ns != &steering->esw_egress_root_ns &&
+ esw_acl_root_ns != &steering->esw_ingress_root_ns))
+ return -EINVAL;
- /* create 1 prio*/
- prio = fs_create_prio(&steering->esw_ingress_root_ns[vport]->ns, 0, 1);
- return PTR_ERR_OR_ZERO(prio);
-}
+ if (table_type != FS_FT_ESW_EGRESS_ACL &&
+ table_type != FS_FT_ESW_INGRESS_ACL) {
+ mlx5_core_err(steering->dev,
+ "Invalid table type %d for egress/ingress ACLs\n",
+ table_type);
+ return -EINVAL;
+ }
-int mlx5_fs_egress_acls_init(struct mlx5_core_dev *dev, int total_vports)
-{
- struct mlx5_flow_steering *steering = dev->priv.steering;
- int err;
- int i;
+ if (xa_load(esw_acl_root_ns, vport_idx))
+ return -EEXIST;
- steering->esw_egress_root_ns =
- kcalloc(total_vports,
- sizeof(*steering->esw_egress_root_ns),
- GFP_KERNEL);
- if (!steering->esw_egress_root_ns)
+ vport_ns = kzalloc(sizeof(*vport_ns), GFP_KERNEL);
+ if (!vport_ns)
return -ENOMEM;
- for (i = 0; i < total_vports; i++) {
- err = init_egress_acl_root_ns(steering, i);
- if (err)
- goto cleanup_root_ns;
+ vport_ns->root_ns = create_root_ns(steering, table_type);
+ if (!vport_ns->root_ns) {
+ err = -ENOMEM;
+ goto kfree_vport_ns;
}
- steering->esw_egress_acl_vports = total_vports;
+
+ /* create 1 prio*/
+ prio = fs_create_prio(&vport_ns->root_ns->ns, 0, 1);
+ if (IS_ERR(prio)) {
+ err = PTR_ERR(prio);
+ goto cleanup_root_ns;
+ }
+
+ vport_ns->vport_idx = vport_idx;
+ err = xa_insert(esw_acl_root_ns, vport_idx, vport_ns, GFP_KERNEL);
+ if (err)
+ goto cleanup_root_ns;
return 0;
cleanup_root_ns:
- for (i--; i >= 0; i--)
- cleanup_root_ns(steering->esw_egress_root_ns[i]);
- kfree(steering->esw_egress_root_ns);
- steering->esw_egress_root_ns = NULL;
+ cleanup_root_ns(vport_ns->root_ns);
+kfree_vport_ns:
+ kfree(vport_ns);
return err;
}
-void mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev *dev)
+int mlx5_fs_vport_egress_acl_ns_add(struct mlx5_flow_steering *steering,
+ u16 vport_idx)
{
- struct mlx5_flow_steering *steering = dev->priv.steering;
- int i;
-
- if (!steering->esw_egress_root_ns)
- return;
-
- for (i = 0; i < steering->esw_egress_acl_vports; i++)
- cleanup_root_ns(steering->esw_egress_root_ns[i]);
-
- kfree(steering->esw_egress_root_ns);
- steering->esw_egress_root_ns = NULL;
+ return mlx5_fs_add_vport_acl_root_ns(steering,
+ &steering->esw_egress_root_ns,
+ FS_FT_ESW_EGRESS_ACL, vport_idx);
}
-int mlx5_fs_ingress_acls_init(struct mlx5_core_dev *dev, int total_vports)
+int mlx5_fs_vport_ingress_acl_ns_add(struct mlx5_flow_steering *steering,
+ u16 vport_idx)
{
- struct mlx5_flow_steering *steering = dev->priv.steering;
- int err;
- int i;
-
- steering->esw_ingress_root_ns =
- kcalloc(total_vports,
- sizeof(*steering->esw_ingress_root_ns),
- GFP_KERNEL);
- if (!steering->esw_ingress_root_ns)
- return -ENOMEM;
-
- for (i = 0; i < total_vports; i++) {
- err = init_ingress_acl_root_ns(steering, i);
- if (err)
- goto cleanup_root_ns;
- }
- steering->esw_ingress_acl_vports = total_vports;
- return 0;
-
-cleanup_root_ns:
- for (i--; i >= 0; i--)
- cleanup_root_ns(steering->esw_ingress_root_ns[i]);
- kfree(steering->esw_ingress_root_ns);
- steering->esw_ingress_root_ns = NULL;
- return err;
+ return mlx5_fs_add_vport_acl_root_ns(steering,
+ &steering->esw_ingress_root_ns,
+ FS_FT_ESW_INGRESS_ACL, vport_idx);
}
-void mlx5_fs_ingress_acls_cleanup(struct mlx5_core_dev *dev)
+void mlx5_fs_vport_egress_acl_ns_remove(struct mlx5_flow_steering *steering,
+ int vport_idx)
{
- struct mlx5_flow_steering *steering = dev->priv.steering;
- int i;
-
- if (!steering->esw_ingress_root_ns)
- return;
-
- for (i = 0; i < steering->esw_ingress_acl_vports; i++)
- cleanup_root_ns(steering->esw_ingress_root_ns[i]);
+ mlx5_fs_remove_vport_acl_root_ns(&steering->esw_egress_root_ns,
+ vport_idx);
+}
- kfree(steering->esw_ingress_root_ns);
- steering->esw_ingress_root_ns = NULL;
+void mlx5_fs_vport_ingress_acl_ns_remove(struct mlx5_flow_steering *steering,
+ int vport_idx)
+{
+ mlx5_fs_remove_vport_acl_root_ns(&steering->esw_ingress_root_ns,
+ vport_idx);
}
u32 mlx5_fs_get_capabilities(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type type)
@@ -3734,6 +3720,13 @@ static int mlx5_fs_mode_validate(struct devlink *devlink, u32 id,
char *value = val.vstr;
u8 eswitch_mode;
+ eswitch_mode = mlx5_eswitch_mode(dev);
+ if (eswitch_mode == MLX5_ESWITCH_OFFLOADS) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Changing fs mode is not supported when eswitch offloads enabled.");
+ return -EOPNOTSUPP;
+ }
+
if (!strcmp(value, "dmfs"))
return 0;
@@ -3759,14 +3752,6 @@ static int mlx5_fs_mode_validate(struct devlink *devlink, u32 id,
return -EINVAL;
}
- eswitch_mode = mlx5_eswitch_mode(dev);
- if (eswitch_mode == MLX5_ESWITCH_OFFLOADS) {
- NL_SET_ERR_MSG_FMT_MOD(extack,
- "Moving to %s is not supported when eswitch offloads enabled.",
- value);
- return -EOPNOTSUPP;
- }
-
return 0;
}
@@ -3819,6 +3804,11 @@ void mlx5_fs_core_cleanup(struct mlx5_core_dev *dev)
{
struct mlx5_flow_steering *steering = dev->priv.steering;
+ WARN_ON(!xa_empty(&steering->esw_egress_root_ns));
+ WARN_ON(!xa_empty(&steering->esw_ingress_root_ns));
+ xa_destroy(&steering->esw_egress_root_ns);
+ xa_destroy(&steering->esw_ingress_root_ns);
+
cleanup_root_ns(steering->root_ns);
cleanup_fdb_root_ns(steering);
cleanup_root_ns(steering->port_sel_root_ns);
@@ -3909,6 +3899,8 @@ int mlx5_fs_core_init(struct mlx5_core_dev *dev)
goto err;
}
+ xa_init(&steering->esw_egress_root_ns);
+ xa_init(&steering->esw_ingress_root_ns);
return 0;
err:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 500826229b0b..8458ce203dac 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -151,16 +151,14 @@ struct mlx5_flow_steering {
struct mlx5_flow_root_namespace *root_ns;
struct mlx5_flow_root_namespace *fdb_root_ns;
struct mlx5_flow_namespace **fdb_sub_ns;
- struct mlx5_flow_root_namespace **esw_egress_root_ns;
- struct mlx5_flow_root_namespace **esw_ingress_root_ns;
+ struct xarray esw_egress_root_ns;
+ struct xarray esw_ingress_root_ns;
struct mlx5_flow_root_namespace *sniffer_tx_root_ns;
struct mlx5_flow_root_namespace *sniffer_rx_root_ns;
struct mlx5_flow_root_namespace *rdma_rx_root_ns;
struct mlx5_flow_root_namespace *rdma_tx_root_ns;
struct mlx5_flow_root_namespace *egress_root_ns;
struct mlx5_flow_root_namespace *port_sel_root_ns;
- int esw_egress_acl_vports;
- int esw_ingress_acl_vports;
struct mlx5_flow_root_namespace **rdma_transport_rx_root_ns;
struct mlx5_flow_root_namespace **rdma_transport_tx_root_ns;
int rdma_transport_rx_vports;
@@ -343,6 +341,7 @@ struct mlx5_fc {
enum mlx5_fc_type type;
struct mlx5_fc_bulk *bulk;
struct mlx5_fc_cache cache;
+ refcount_t fc_local_refcount;
/* last{packets,bytes} are used for calculating deltas since last reading. */
u64 lastpackets;
u64 lastbytes;
@@ -378,10 +377,14 @@ void mlx5_fs_core_free(struct mlx5_core_dev *dev);
int mlx5_fs_core_init(struct mlx5_core_dev *dev);
void mlx5_fs_core_cleanup(struct mlx5_core_dev *dev);
-int mlx5_fs_egress_acls_init(struct mlx5_core_dev *dev, int total_vports);
-void mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev *dev);
-int mlx5_fs_ingress_acls_init(struct mlx5_core_dev *dev, int total_vports);
-void mlx5_fs_ingress_acls_cleanup(struct mlx5_core_dev *dev);
+int mlx5_fs_vport_egress_acl_ns_add(struct mlx5_flow_steering *steering,
+ u16 vport_idx);
+int mlx5_fs_vport_ingress_acl_ns_add(struct mlx5_flow_steering *steering,
+ u16 vport_idx);
+void mlx5_fs_vport_egress_acl_ns_remove(struct mlx5_flow_steering *steering,
+ int vport_idx);
+void mlx5_fs_vport_ingress_acl_ns_remove(struct mlx5_flow_steering *steering,
+ int vport_idx);
u32 mlx5_fs_get_capabilities(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type type);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index 492775d3d193..83001eda3884 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -562,17 +562,36 @@ mlx5_fc_local_create(u32 counter_id, u32 offset, u32 bulk_size)
counter->id = counter_id;
fc_bulk->base_id = counter_id - offset;
fc_bulk->fs_bulk.bulk_len = bulk_size;
+ refcount_set(&fc_bulk->hws_data.hws_action_refcount, 0);
+ mutex_init(&fc_bulk->hws_data.lock);
counter->bulk = fc_bulk;
+ refcount_set(&counter->fc_local_refcount, 1);
return counter;
}
EXPORT_SYMBOL(mlx5_fc_local_create);
void mlx5_fc_local_destroy(struct mlx5_fc *counter)
{
- if (!counter || counter->type != MLX5_FC_TYPE_LOCAL)
- return;
-
kfree(counter->bulk);
kfree(counter);
}
EXPORT_SYMBOL(mlx5_fc_local_destroy);
+
+void mlx5_fc_local_get(struct mlx5_fc *counter)
+{
+ if (!counter || counter->type != MLX5_FC_TYPE_LOCAL)
+ return;
+
+ refcount_inc(&counter->fc_local_refcount);
+}
+
+void mlx5_fc_local_put(struct mlx5_fc *counter)
+{
+ if (!counter || counter->type != MLX5_FC_TYPE_LOCAL)
+ return;
+
+ if (!refcount_dec_and_test(&counter->fc_local_refcount))
+ return;
+
+ mlx5_fc_local_destroy(counter);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 57476487e31f..eeb4437975f2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -294,6 +294,12 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
return err;
}
+ if (MLX5_CAP_GEN(dev, psp)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_PSP);
+ if (err)
+ return err;
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index 69933addd921..89e399606877 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -6,13 +6,15 @@
#include "fw_reset.h"
#include "diag/fw_tracer.h"
#include "lib/tout.h"
+#include "sf/sf.h"
enum {
MLX5_FW_RESET_FLAGS_RESET_REQUESTED,
MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST,
MLX5_FW_RESET_FLAGS_PENDING_COMP,
MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS,
- MLX5_FW_RESET_FLAGS_RELOAD_REQUIRED
+ MLX5_FW_RESET_FLAGS_RELOAD_REQUIRED,
+ MLX5_FW_RESET_FLAGS_UNLOAD_EVENT,
};
struct mlx5_fw_reset {
@@ -25,6 +27,7 @@ struct mlx5_fw_reset {
struct work_struct reset_reload_work;
struct work_struct reset_now_work;
struct work_struct reset_abort_work;
+ struct delayed_work reset_timeout_work;
unsigned long reset_flags;
u8 reset_method;
struct timer_list timer;
@@ -219,7 +222,7 @@ int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev)
return mlx5_reg_mfrl_set(dev, MLX5_MFRL_REG_RESET_LEVEL0, 0, 0, false);
}
-static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev, bool unloaded)
+static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev)
{
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
struct devlink *devlink = priv_to_devlink(dev);
@@ -228,8 +231,7 @@ static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev, bool unload
if (test_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags)) {
complete(&fw_reset->done);
} else {
- if (!unloaded)
- mlx5_unload_one(dev, false);
+ mlx5_sync_reset_unload_flow(dev, false);
if (mlx5_health_wait_pci_up(dev))
mlx5_core_err(dev, "reset reload flow aborted, PCI reads still not working\n");
else
@@ -258,6 +260,8 @@ static int mlx5_sync_reset_clear_reset_requested(struct mlx5_core_dev *dev, bool
return -EALREADY;
}
+ if (current_work() != &fw_reset->reset_timeout_work.work)
+ cancel_delayed_work(&fw_reset->reset_timeout_work);
mlx5_stop_sync_reset_poll(dev);
if (poll_health)
mlx5_start_health_poll(dev);
@@ -272,7 +276,7 @@ static void mlx5_sync_reset_reload_work(struct work_struct *work)
mlx5_sync_reset_clear_reset_requested(dev, false);
mlx5_enter_error_state(dev, true);
- mlx5_fw_reset_complete_reload(dev, false);
+ mlx5_fw_reset_complete_reload(dev);
}
#define MLX5_RESET_POLL_INTERVAL (HZ / 10)
@@ -329,6 +333,11 @@ static int mlx5_sync_reset_set_reset_requested(struct mlx5_core_dev *dev)
}
mlx5_stop_health_poll(dev, true);
mlx5_start_sync_reset_poll(dev);
+
+ if (!test_bit(MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS,
+ &fw_reset->reset_flags))
+ schedule_delayed_work(&fw_reset->reset_timeout_work,
+ msecs_to_jiffies(mlx5_tout_ms(dev, PCI_SYNC_UPDATE)));
return 0;
}
@@ -428,6 +437,11 @@ static bool mlx5_is_reset_now_capable(struct mlx5_core_dev *dev,
return false;
}
+ if (!mlx5_core_is_ecpf(dev) && !mlx5_sf_table_empty(dev)) {
+ mlx5_core_warn(dev, "SFs should be removed before reset\n");
+ return false;
+ }
+
#if IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)
if (reset_method != MLX5_MFRL_REG_PCI_RESET_METHOD_HOT_RESET) {
err = mlx5_check_hotplug_interrupt(dev, bridge);
@@ -586,6 +600,65 @@ static int mlx5_sync_pci_reset(struct mlx5_core_dev *dev, u8 reset_method)
return err;
}
+void mlx5_sync_reset_unload_flow(struct mlx5_core_dev *dev, bool locked)
+{
+ struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+ unsigned long timeout;
+ int poll_freq = 20;
+ bool reset_action;
+ u8 rst_state;
+ int err;
+
+ if (locked)
+ mlx5_unload_one_devl_locked(dev, false);
+ else
+ mlx5_unload_one(dev, false);
+
+ if (!test_bit(MLX5_FW_RESET_FLAGS_UNLOAD_EVENT, &fw_reset->reset_flags))
+ return;
+
+ mlx5_set_fw_rst_ack(dev);
+ mlx5_core_warn(dev, "Sync Reset Unload done, device reset expected\n");
+
+ reset_action = false;
+ timeout = jiffies + msecs_to_jiffies(mlx5_tout_ms(dev, RESET_UNLOAD));
+ do {
+ rst_state = mlx5_get_fw_rst_state(dev);
+ if (rst_state == MLX5_FW_RST_STATE_TOGGLE_REQ ||
+ rst_state == MLX5_FW_RST_STATE_IDLE) {
+ reset_action = true;
+ break;
+ }
+ if (rst_state == MLX5_FW_RST_STATE_DROP_MODE) {
+ mlx5_core_info(dev, "Sync Reset Drop mode ack\n");
+ mlx5_set_fw_rst_ack(dev);
+ poll_freq = 1000;
+ }
+ msleep(poll_freq);
+ } while (!time_after(jiffies, timeout));
+
+ if (!reset_action) {
+ mlx5_core_err(dev, "Got timeout waiting for sync reset action, state = %u\n",
+ rst_state);
+ fw_reset->ret = -ETIMEDOUT;
+ goto done;
+ }
+
+ mlx5_core_warn(dev, "Sync Reset, got reset action. rst_state = %u\n",
+ rst_state);
+ if (rst_state == MLX5_FW_RST_STATE_TOGGLE_REQ) {
+ err = mlx5_sync_pci_reset(dev, fw_reset->reset_method);
+ if (err) {
+ mlx5_core_warn(dev, "mlx5_sync_pci_reset failed, err %d\n",
+ err);
+ fw_reset->ret = err;
+ }
+ }
+
+done:
+ clear_bit(MLX5_FW_RESET_FLAGS_UNLOAD_EVENT, &fw_reset->reset_flags);
+}
+
static void mlx5_sync_reset_now_event(struct work_struct *work)
{
struct mlx5_fw_reset *fw_reset = container_of(work, struct mlx5_fw_reset,
@@ -613,17 +686,13 @@ static void mlx5_sync_reset_now_event(struct work_struct *work)
mlx5_enter_error_state(dev, true);
done:
fw_reset->ret = err;
- mlx5_fw_reset_complete_reload(dev, false);
+ mlx5_fw_reset_complete_reload(dev);
}
static void mlx5_sync_reset_unload_event(struct work_struct *work)
{
struct mlx5_fw_reset *fw_reset;
struct mlx5_core_dev *dev;
- unsigned long timeout;
- int poll_freq = 20;
- bool reset_action;
- u8 rst_state;
int err;
fw_reset = container_of(work, struct mlx5_fw_reset, reset_unload_work);
@@ -632,6 +701,7 @@ static void mlx5_sync_reset_unload_event(struct work_struct *work)
if (mlx5_sync_reset_clear_reset_requested(dev, false))
return;
+ set_bit(MLX5_FW_RESET_FLAGS_UNLOAD_EVENT, &fw_reset->reset_flags);
mlx5_core_warn(dev, "Sync Reset Unload. Function is forced down.\n");
err = mlx5_cmd_fast_teardown_hca(dev);
@@ -640,49 +710,7 @@ static void mlx5_sync_reset_unload_event(struct work_struct *work)
else
mlx5_enter_error_state(dev, true);
- if (test_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags))
- mlx5_unload_one_devl_locked(dev, false);
- else
- mlx5_unload_one(dev, false);
-
- mlx5_set_fw_rst_ack(dev);
- mlx5_core_warn(dev, "Sync Reset Unload done, device reset expected\n");
-
- reset_action = false;
- timeout = jiffies + msecs_to_jiffies(mlx5_tout_ms(dev, RESET_UNLOAD));
- do {
- rst_state = mlx5_get_fw_rst_state(dev);
- if (rst_state == MLX5_FW_RST_STATE_TOGGLE_REQ ||
- rst_state == MLX5_FW_RST_STATE_IDLE) {
- reset_action = true;
- break;
- }
- if (rst_state == MLX5_FW_RST_STATE_DROP_MODE) {
- mlx5_core_info(dev, "Sync Reset Drop mode ack\n");
- mlx5_set_fw_rst_ack(dev);
- poll_freq = 1000;
- }
- msleep(poll_freq);
- } while (!time_after(jiffies, timeout));
-
- if (!reset_action) {
- mlx5_core_err(dev, "Got timeout waiting for sync reset action, state = %u\n",
- rst_state);
- fw_reset->ret = -ETIMEDOUT;
- goto done;
- }
-
- mlx5_core_warn(dev, "Sync Reset, got reset action. rst_state = %u\n", rst_state);
- if (rst_state == MLX5_FW_RST_STATE_TOGGLE_REQ) {
- err = mlx5_sync_pci_reset(dev, fw_reset->reset_method);
- if (err) {
- mlx5_core_warn(dev, "mlx5_sync_pci_reset failed, err %d\n", err);
- fw_reset->ret = err;
- }
- }
-
-done:
- mlx5_fw_reset_complete_reload(dev, true);
+ mlx5_fw_reset_complete_reload(dev);
}
static void mlx5_sync_reset_abort_event(struct work_struct *work)
@@ -719,6 +747,19 @@ static void mlx5_sync_reset_events_handle(struct mlx5_fw_reset *fw_reset, struct
}
}
+static void mlx5_sync_reset_timeout_work(struct work_struct *work)
+{
+ struct delayed_work *dwork = container_of(work, struct delayed_work,
+ work);
+ struct mlx5_fw_reset *fw_reset =
+ container_of(dwork, struct mlx5_fw_reset, reset_timeout_work);
+ struct mlx5_core_dev *dev = fw_reset->dev;
+
+ if (mlx5_sync_reset_clear_reset_requested(dev, true))
+ return;
+ mlx5_core_warn(dev, "PCI Sync FW Update Reset Timeout.\n");
+}
+
static int fw_reset_event_notifier(struct notifier_block *nb, unsigned long action, void *data)
{
struct mlx5_fw_reset *fw_reset = mlx5_nb_cof(nb, struct mlx5_fw_reset, nb);
@@ -802,6 +843,7 @@ void mlx5_drain_fw_reset(struct mlx5_core_dev *dev)
cancel_work_sync(&fw_reset->reset_reload_work);
cancel_work_sync(&fw_reset->reset_now_work);
cancel_work_sync(&fw_reset->reset_abort_work);
+ cancel_delayed_work(&fw_reset->reset_timeout_work);
}
static const struct devlink_param mlx5_fw_reset_devlink_params[] = {
@@ -845,6 +887,8 @@ int mlx5_fw_reset_init(struct mlx5_core_dev *dev)
INIT_WORK(&fw_reset->reset_reload_work, mlx5_sync_reset_reload_work);
INIT_WORK(&fw_reset->reset_now_work, mlx5_sync_reset_now_event);
INIT_WORK(&fw_reset->reset_abort_work, mlx5_sync_reset_abort_event);
+ INIT_DELAYED_WORK(&fw_reset->reset_timeout_work,
+ mlx5_sync_reset_timeout_work);
init_completion(&fw_reset->done);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h
index ea527d06a85f..d5b28525c960 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h
@@ -12,6 +12,7 @@ int mlx5_fw_reset_set_reset_sync(struct mlx5_core_dev *dev, u8 reset_type_sel,
int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev);
int mlx5_fw_reset_wait_reset_done(struct mlx5_core_dev *dev);
+void mlx5_sync_reset_unload_flow(struct mlx5_core_dev *dev, bool locked);
int mlx5_fw_reset_verify_fw_complete(struct mlx5_core_dev *dev,
struct netlink_ext_ack *extack);
void mlx5_fw_reset_events_start(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index cf7a1edd0530..aeeb136f5ebc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -669,57 +669,64 @@ static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work)
}
}
+#define MLX5_FW_REPORTER_ECPF_GRACEFUL_PERIOD 180000
+#define MLX5_FW_REPORTER_PF_GRACEFUL_PERIOD 60000
+#define MLX5_FW_REPORTER_VF_GRACEFUL_PERIOD 30000
+#define MLX5_FW_REPORTER_DEFAULT_GRACEFUL_PERIOD \
+ MLX5_FW_REPORTER_VF_GRACEFUL_PERIOD
+
+static
+const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_ecpf_ops = {
+ .name = "fw_fatal",
+ .recover = mlx5_fw_fatal_reporter_recover,
+ .dump = mlx5_fw_fatal_reporter_dump,
+ .default_graceful_period =
+ MLX5_FW_REPORTER_ECPF_GRACEFUL_PERIOD,
+};
+
static const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_pf_ops = {
.name = "fw_fatal",
.recover = mlx5_fw_fatal_reporter_recover,
.dump = mlx5_fw_fatal_reporter_dump,
+ .default_graceful_period = MLX5_FW_REPORTER_PF_GRACEFUL_PERIOD,
};
static const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_ops = {
.name = "fw_fatal",
.recover = mlx5_fw_fatal_reporter_recover,
+ .default_graceful_period =
+ MLX5_FW_REPORTER_DEFAULT_GRACEFUL_PERIOD,
};
-#define MLX5_FW_REPORTER_ECPF_GRACEFUL_PERIOD 180000
-#define MLX5_FW_REPORTER_PF_GRACEFUL_PERIOD 60000
-#define MLX5_FW_REPORTER_VF_GRACEFUL_PERIOD 30000
-#define MLX5_FW_REPORTER_DEFAULT_GRACEFUL_PERIOD MLX5_FW_REPORTER_VF_GRACEFUL_PERIOD
-
void mlx5_fw_reporters_create(struct mlx5_core_dev *dev)
{
const struct devlink_health_reporter_ops *fw_fatal_ops;
struct mlx5_core_health *health = &dev->priv.health;
const struct devlink_health_reporter_ops *fw_ops;
struct devlink *devlink = priv_to_devlink(dev);
- u64 grace_period;
- fw_fatal_ops = &mlx5_fw_fatal_reporter_pf_ops;
fw_ops = &mlx5_fw_reporter_pf_ops;
if (mlx5_core_is_ecpf(dev)) {
- grace_period = MLX5_FW_REPORTER_ECPF_GRACEFUL_PERIOD;
+ fw_fatal_ops = &mlx5_fw_fatal_reporter_ecpf_ops;
} else if (mlx5_core_is_pf(dev)) {
- grace_period = MLX5_FW_REPORTER_PF_GRACEFUL_PERIOD;
+ fw_fatal_ops = &mlx5_fw_fatal_reporter_pf_ops;
} else {
/* VF or SF */
- grace_period = MLX5_FW_REPORTER_DEFAULT_GRACEFUL_PERIOD;
fw_fatal_ops = &mlx5_fw_fatal_reporter_ops;
fw_ops = &mlx5_fw_reporter_ops;
}
- health->fw_reporter =
- devl_health_reporter_create(devlink, fw_ops, 0, dev);
+ health->fw_reporter = devl_health_reporter_create(devlink, fw_ops, dev);
if (IS_ERR(health->fw_reporter))
- mlx5_core_warn(dev, "Failed to create fw reporter, err = %ld\n",
- PTR_ERR(health->fw_reporter));
-
- health->fw_fatal_reporter =
- devl_health_reporter_create(devlink,
- fw_fatal_ops,
- grace_period,
- dev);
+ mlx5_core_warn(dev, "Failed to create fw reporter, err = %pe\n",
+ health->fw_reporter);
+
+ health->fw_fatal_reporter = devl_health_reporter_create(devlink,
+ fw_fatal_ops,
+ dev);
if (IS_ERR(health->fw_fatal_reporter))
- mlx5_core_warn(dev, "Failed to create fw fatal reporter, err = %ld\n",
- PTR_ERR(health->fw_fatal_reporter));
+ mlx5_core_warn(dev, "Failed to create fw fatal reporter, err = %pe\n",
+ health->fw_fatal_reporter);
}
static void mlx5_fw_reporters_destroy(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
index 82d3c2568244..14d339eceb92 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
@@ -150,8 +150,8 @@ mlx5_irq_affinity_request(struct mlx5_core_dev *dev, struct mlx5_irq_pool *pool,
if (IS_ERR(new_irq)) {
if (!least_loaded_irq) {
/* We failed to create an IRQ and we didn't find an IRQ */
- mlx5_core_err(pool->dev, "Didn't find a matching IRQ. err = %ld\n",
- PTR_ERR(new_irq));
+ mlx5_core_err(pool->dev, "Didn't find a matching IRQ. err = %pe\n",
+ new_irq);
mutex_unlock(&pool->lock);
return new_irq;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index d058cbb4a00c..59c00c911275 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -35,6 +35,7 @@
#include <linux/mlx5/driver.h>
#include <linux/mlx5/eswitch.h>
#include <linux/mlx5/vport.h>
+#include "lib/mlx5.h"
#include "lib/devcom.h"
#include "mlx5_core.h"
#include "eswitch.h"
@@ -231,9 +232,13 @@ static void mlx5_do_bond_work(struct work_struct *work);
static void mlx5_ldev_free(struct kref *ref)
{
struct mlx5_lag *ldev = container_of(ref, struct mlx5_lag, ref);
+ struct net *net;
+
+ if (ldev->nb.notifier_call) {
+ net = read_pnet(&ldev->net);
+ unregister_netdevice_notifier_net(net, &ldev->nb);
+ }
- if (ldev->nb.notifier_call)
- unregister_netdevice_notifier_net(&init_net, &ldev->nb);
mlx5_lag_mp_cleanup(ldev);
cancel_delayed_work_sync(&ldev->bond_work);
destroy_workqueue(ldev->wq);
@@ -271,7 +276,8 @@ static struct mlx5_lag *mlx5_lag_dev_alloc(struct mlx5_core_dev *dev)
INIT_DELAYED_WORK(&ldev->bond_work, mlx5_do_bond_work);
ldev->nb.notifier_call = mlx5_lag_netdev_event;
- if (register_netdevice_notifier_net(&init_net, &ldev->nb)) {
+ write_pnet(&ldev->net, mlx5_core_net(dev));
+ if (register_netdevice_notifier_net(read_pnet(&ldev->net), &ldev->nb)) {
ldev->nb.notifier_call = NULL;
mlx5_core_err(dev, "Failed to register LAG netdev notifier\n");
}
@@ -1404,6 +1410,36 @@ static int __mlx5_lag_dev_add_mdev(struct mlx5_core_dev *dev)
return 0;
}
+static void mlx5_lag_unregister_hca_devcom_comp(struct mlx5_core_dev *dev)
+{
+ mlx5_devcom_unregister_component(dev->priv.hca_devcom_comp);
+}
+
+static int mlx5_lag_register_hca_devcom_comp(struct mlx5_core_dev *dev)
+{
+ struct mlx5_devcom_match_attr attr = {
+ .key.val = mlx5_query_nic_system_image_guid(dev),
+ .flags = MLX5_DEVCOM_MATCH_FLAGS_NS,
+ .net = mlx5_core_net(dev),
+ };
+
+ /* This component is use to sync adding core_dev to lag_dev and to sync
+ * changes of mlx5_adev_devices between LAG layer and other layers.
+ */
+ dev->priv.hca_devcom_comp =
+ mlx5_devcom_register_component(dev->priv.devc,
+ MLX5_DEVCOM_HCA_PORTS,
+ &attr, NULL, dev);
+ if (IS_ERR(dev->priv.hca_devcom_comp)) {
+ mlx5_core_err(dev,
+ "Failed to register devcom HCA component, err: %ld\n",
+ PTR_ERR(dev->priv.hca_devcom_comp));
+ return PTR_ERR(dev->priv.hca_devcom_comp);
+ }
+
+ return 0;
+}
+
void mlx5_lag_remove_mdev(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
@@ -1425,6 +1461,7 @@ recheck:
}
mlx5_ldev_remove_mdev(ldev, dev);
mutex_unlock(&ldev->lock);
+ mlx5_lag_unregister_hca_devcom_comp(dev);
mlx5_ldev_put(ldev);
}
@@ -1435,7 +1472,7 @@ void mlx5_lag_add_mdev(struct mlx5_core_dev *dev)
if (!mlx5_lag_is_supported(dev))
return;
- if (IS_ERR_OR_NULL(dev->priv.hca_devcom_comp))
+ if (mlx5_lag_register_hca_devcom_comp(dev))
return;
recheck:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
index c2f256bb2bc2..4918eee2b3da 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
@@ -67,6 +67,7 @@ struct mlx5_lag {
struct workqueue_struct *wq;
struct delayed_work bond_work;
struct notifier_block nb;
+ possible_net_t net;
struct lag_mp lag_mp;
struct mlx5_lag_port_sel port_sel;
/* Protect lag fields/state changes */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
index 58bd749b5e4d..129725159a93 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
@@ -100,7 +100,7 @@ static int create_aso_cq(struct mlx5_aso_cq *cq, void *cqc_data)
MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
- MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
+ MLX5_SET(cqc, cqc, uar_page, mdev->priv.bfreg.up->index);
MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
@@ -129,7 +129,7 @@ static int mlx5_aso_create_cq(struct mlx5_core_dev *mdev, int numa_node,
return -ENOMEM;
MLX5_SET(cqc, cqc_data, log_cq_size, 1);
- MLX5_SET(cqc, cqc_data, uar_page, mdev->priv.uar->index);
+ MLX5_SET(cqc, cqc_data, uar_page, mdev->priv.bfreg.up->index);
if (MLX5_CAP_GEN(mdev, cqe_128_always) && cache_line_size() >= 128)
MLX5_SET(cqc, cqc_data, cqe_sz, CQE_STRIDE_128_PAD);
@@ -163,7 +163,7 @@ static int mlx5_aso_alloc_sq(struct mlx5_core_dev *mdev, int numa_node,
struct mlx5_wq_param param;
int err;
- sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
+ sq->uar_map = mdev->priv.bfreg.map;
param.db_numa_node = numa_node;
param.buf_numa_node = numa_node;
@@ -203,7 +203,7 @@ static int create_aso_sq(struct mlx5_core_dev *mdev, int pdn,
MLX5_SET(sqc, sqc, ts_format, ts_format);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
- MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.hw_objs.bfreg.index);
+ MLX5_SET(wq, wq, uar_page, mdev->priv.bfreg.index);
MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index 214d732d18e9..d0ba83d77cd1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -247,27 +247,24 @@ static bool mlx5_is_ptm_source_time_available(struct mlx5_core_dev *dev)
return !!MLX5_GET(mtptm_reg, out, psta);
}
-static int mlx5_mtctr_syncdevicetime(ktime_t *device_time,
- struct system_counterval_t *sys_counterval,
- void *ctx)
+static int mlx5_mtctr_read(struct mlx5_core_dev *mdev,
+ bool real_time_mode,
+ struct system_counterval_t *sys_counterval,
+ u64 *device)
{
u32 out[MLX5_ST_SZ_DW(mtctr_reg)] = {0};
u32 in[MLX5_ST_SZ_DW(mtctr_reg)] = {0};
- struct mlx5_core_dev *mdev = ctx;
- bool real_time_mode;
- u64 host, device;
+ u64 host;
int err;
- real_time_mode = mlx5_real_time_mode(mdev);
-
MLX5_SET(mtctr_reg, in, first_clock_timestamp_request,
MLX5_MTCTR_REQUEST_PTM_ROOT_CLOCK);
MLX5_SET(mtctr_reg, in, second_clock_timestamp_request,
real_time_mode ? MLX5_MTCTR_REQUEST_REAL_TIME_CLOCK :
- MLX5_MTCTR_REQUEST_FREE_RUNNING_COUNTER);
+ MLX5_MTCTR_REQUEST_FREE_RUNNING_COUNTER);
- err = mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out), MLX5_REG_MTCTR,
- 0, 0);
+ err = mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out),
+ MLX5_REG_MTCTR, 0, 0);
if (err)
return err;
@@ -281,8 +278,26 @@ static int mlx5_mtctr_syncdevicetime(ktime_t *device_time,
.cs_id = CSID_X86_ART,
.use_nsecs = true,
};
+ *device = MLX5_GET64(mtctr_reg, out, second_clock_timestamp);
+
+ return 0;
+}
+
+static int mlx5_mtctr_syncdevicetime(ktime_t *device_time,
+ struct system_counterval_t *sys_counterval,
+ void *ctx)
+{
+ struct mlx5_core_dev *mdev = ctx;
+ bool real_time_mode;
+ u64 device;
+ int err;
+
+ real_time_mode = mlx5_real_time_mode(mdev);
+
+ err = mlx5_mtctr_read(mdev, real_time_mode, sys_counterval, &device);
+ if (err)
+ return err;
- device = MLX5_GET64(mtctr_reg, out, second_clock_timestamp);
if (real_time_mode)
*device_time = ns_to_ktime(REAL_TIME_TO_NS(device >> 32, device & U32_MAX));
else
@@ -291,6 +306,23 @@ static int mlx5_mtctr_syncdevicetime(ktime_t *device_time,
return 0;
}
+static int
+mlx5_mtctr_syncdevicecyclestime(ktime_t *device_time,
+ struct system_counterval_t *sys_counterval,
+ void *ctx)
+{
+ struct mlx5_core_dev *mdev = ctx;
+ u64 device;
+ int err;
+
+ err = mlx5_mtctr_read(mdev, false, sys_counterval, &device);
+ if (err)
+ return err;
+ *device_time = ns_to_ktime(device);
+
+ return 0;
+}
+
static int mlx5_ptp_getcrosststamp(struct ptp_clock_info *ptp,
struct system_device_crosststamp *cts)
{
@@ -315,6 +347,32 @@ unlock:
mlx5_clock_unlock(clock);
return err;
}
+
+static int mlx5_ptp_getcrosscycles(struct ptp_clock_info *ptp,
+ struct system_device_crosststamp *cts)
+{
+ struct mlx5_clock *clock =
+ container_of(ptp, struct mlx5_clock, ptp_info);
+ struct system_time_snapshot history_begin = {0};
+ struct mlx5_core_dev *mdev;
+ int err;
+
+ mlx5_clock_lock(clock);
+ mdev = mlx5_clock_mdev_get(clock);
+
+ if (!mlx5_is_ptm_source_time_available(mdev)) {
+ err = -EBUSY;
+ goto unlock;
+ }
+
+ ktime_get_snapshot(&history_begin);
+
+ err = get_device_system_crosststamp(mlx5_mtctr_syncdevicecyclestime,
+ mdev, &history_begin, cts);
+unlock:
+ mlx5_clock_unlock(clock);
+ return err;
+}
#endif /* CONFIG_X86 */
static u64 mlx5_read_time(struct mlx5_core_dev *dev,
@@ -513,6 +571,24 @@ out:
return 0;
}
+static int mlx5_ptp_getcyclesx(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
+ ptp_info);
+ struct mlx5_core_dev *mdev;
+ u64 cycles;
+
+ mlx5_clock_lock(clock);
+ mdev = mlx5_clock_mdev_get(clock);
+
+ cycles = mlx5_read_time(mdev, sts, false);
+ *ts = ns_to_timespec64(cycles);
+ mlx5_clock_unlock(clock);
+ return 0;
+}
+
static int mlx5_ptp_adjtime_real_time(struct mlx5_core_dev *mdev, s64 delta)
{
u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
@@ -1229,6 +1305,7 @@ static void mlx5_init_timer_max_freq_adjustment(struct mlx5_core_dev *mdev)
static void mlx5_init_timer_clock(struct mlx5_core_dev *mdev)
{
struct mlx5_clock *clock = mdev->clock;
+ bool expose_cycles;
/* Configure the PHC */
clock->ptp_info = mlx5_ptp_clock_info;
@@ -1236,12 +1313,22 @@ static void mlx5_init_timer_clock(struct mlx5_core_dev *mdev)
if (MLX5_CAP_MCAM_REG(mdev, mtutc))
mlx5_init_timer_max_freq_adjustment(mdev);
+ expose_cycles = !MLX5_CAP_GEN(mdev, disciplined_fr_counter) ||
+ !mlx5_real_time_mode(mdev);
+
#ifdef CONFIG_X86
if (MLX5_CAP_MCAM_REG3(mdev, mtptm) &&
- MLX5_CAP_MCAM_REG3(mdev, mtctr) && boot_cpu_has(X86_FEATURE_ART))
+ MLX5_CAP_MCAM_REG3(mdev, mtctr) && boot_cpu_has(X86_FEATURE_ART)) {
clock->ptp_info.getcrosststamp = mlx5_ptp_getcrosststamp;
+ if (expose_cycles)
+ clock->ptp_info.getcrosscycles =
+ mlx5_ptp_getcrosscycles;
+ }
#endif /* CONFIG_X86 */
+ if (expose_cycles)
+ clock->ptp_info.getcyclesx64 = mlx5_ptp_getcyclesx;
+
mlx5_timecounter_init(mdev);
mlx5_init_clock_info(mdev);
mlx5_init_overflow_period(mdev);
@@ -1278,9 +1365,9 @@ static void mlx5_init_clock_dev(struct mlx5_core_dev *mdev)
clock->ptp = ptp_clock_register(&clock->ptp_info,
clock->shared ? NULL : &mdev->pdev->dev);
if (IS_ERR(clock->ptp)) {
- mlx5_core_warn(mdev, "%sptp_clock_register failed %ld\n",
+ mlx5_core_warn(mdev, "%sptp_clock_register failed %pe\n",
clock->shared ? "shared clock " : "",
- PTR_ERR(clock->ptp));
+ clock->ptp);
clock->ptp = NULL;
}
@@ -1348,14 +1435,20 @@ static int mlx5_clock_alloc(struct mlx5_core_dev *mdev, bool shared)
static void mlx5_shared_clock_register(struct mlx5_core_dev *mdev, u64 key)
{
struct mlx5_core_dev *peer_dev, *next = NULL;
+ struct mlx5_devcom_match_attr attr = {
+ .key.val = key,
+ };
+ struct mlx5_devcom_comp_dev *compd;
struct mlx5_devcom_comp_dev *pos;
- mdev->clock_state->compdev = mlx5_devcom_register_component(mdev->priv.devc,
- MLX5_DEVCOM_SHARED_CLOCK,
- key, NULL, mdev);
- if (IS_ERR(mdev->clock_state->compdev))
+ compd = mlx5_devcom_register_component(mdev->priv.devc,
+ MLX5_DEVCOM_SHARED_CLOCK,
+ &attr, NULL, mdev);
+ if (IS_ERR(compd))
return;
+ mdev->clock_state->compdev = compd;
+
mlx5_devcom_comp_lock(mdev->clock_state->compdev);
mlx5_devcom_for_each_peer_entry(mdev->clock_state->compdev, peer_dev, pos) {
if (peer_dev->clock) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.h
index c819c047bb9c..4821163a547f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.h
@@ -8,6 +8,7 @@ enum {
MLX5_ACCEL_OBJ_TLS_KEY = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_PURPOSE_TLS,
MLX5_ACCEL_OBJ_IPSEC_KEY = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_PURPOSE_IPSEC,
MLX5_ACCEL_OBJ_MACSEC_KEY = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_PURPOSE_MACSEC,
+ MLX5_ACCEL_OBJ_PSP_KEY = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_PURPOSE_PSP,
MLX5_ACCEL_OBJ_TYPE_KEY_NUM,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
index 7b0766c89f4c..faa2833602c8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
@@ -4,6 +4,7 @@
#include <linux/mlx5/vport.h>
#include <linux/list.h>
#include "lib/devcom.h"
+#include "lib/mlx5.h"
#include "mlx5_core.h"
static LIST_HEAD(devcom_dev_list);
@@ -22,11 +23,17 @@ struct mlx5_devcom_dev {
struct kref ref;
};
+struct mlx5_devcom_key {
+ u32 flags;
+ union mlx5_devcom_match_key key;
+ possible_net_t net;
+};
+
struct mlx5_devcom_comp {
struct list_head comp_list;
enum mlx5_devcom_component id;
- u64 key;
struct list_head comp_dev_list_head;
+ struct mlx5_devcom_key key;
mlx5_devcom_event_handler_t handler;
struct kref ref;
bool ready;
@@ -108,7 +115,8 @@ void mlx5_devcom_unregister_device(struct mlx5_devcom_dev *devc)
}
static struct mlx5_devcom_comp *
-mlx5_devcom_comp_alloc(u64 id, u64 key, mlx5_devcom_event_handler_t handler)
+mlx5_devcom_comp_alloc(u64 id, const struct mlx5_devcom_match_attr *attr,
+ mlx5_devcom_event_handler_t handler)
{
struct mlx5_devcom_comp *comp;
@@ -117,7 +125,10 @@ mlx5_devcom_comp_alloc(u64 id, u64 key, mlx5_devcom_event_handler_t handler)
return ERR_PTR(-ENOMEM);
comp->id = id;
- comp->key = key;
+ comp->key.key = attr->key;
+ comp->key.flags = attr->flags;
+ if (attr->flags & MLX5_DEVCOM_MATCH_FLAGS_NS)
+ write_pnet(&comp->key.net, attr->net);
comp->handler = handler;
init_rwsem(&comp->sem);
lockdep_register_key(&comp->lock_key);
@@ -180,21 +191,34 @@ devcom_free_comp_dev(struct mlx5_devcom_comp_dev *devcom)
static bool
devcom_component_equal(struct mlx5_devcom_comp *devcom,
enum mlx5_devcom_component id,
- u64 key)
+ const struct mlx5_devcom_match_attr *attr)
{
- return devcom->id == id && devcom->key == key;
+ if (devcom->id != id)
+ return false;
+
+ if (devcom->key.flags != attr->flags)
+ return false;
+
+ if (memcmp(&devcom->key.key, &attr->key, sizeof(devcom->key.key)))
+ return false;
+
+ if (devcom->key.flags & MLX5_DEVCOM_MATCH_FLAGS_NS &&
+ !net_eq(read_pnet(&devcom->key.net), attr->net))
+ return false;
+
+ return true;
}
static struct mlx5_devcom_comp *
devcom_component_get(struct mlx5_devcom_dev *devc,
enum mlx5_devcom_component id,
- u64 key,
+ const struct mlx5_devcom_match_attr *attr,
mlx5_devcom_event_handler_t handler)
{
struct mlx5_devcom_comp *comp;
devcom_for_each_component(comp) {
- if (devcom_component_equal(comp, id, key)) {
+ if (devcom_component_equal(comp, id, attr)) {
if (handler == comp->handler) {
kref_get(&comp->ref);
return comp;
@@ -212,7 +236,7 @@ devcom_component_get(struct mlx5_devcom_dev *devc,
struct mlx5_devcom_comp_dev *
mlx5_devcom_register_component(struct mlx5_devcom_dev *devc,
enum mlx5_devcom_component id,
- u64 key,
+ const struct mlx5_devcom_match_attr *attr,
mlx5_devcom_event_handler_t handler,
void *data)
{
@@ -223,14 +247,14 @@ mlx5_devcom_register_component(struct mlx5_devcom_dev *devc,
return ERR_PTR(-EINVAL);
mutex_lock(&comp_list_lock);
- comp = devcom_component_get(devc, id, key, handler);
+ comp = devcom_component_get(devc, id, attr, handler);
if (IS_ERR(comp)) {
devcom = ERR_PTR(-EINVAL);
goto out_unlock;
}
if (!comp) {
- comp = mlx5_devcom_comp_alloc(id, key, handler);
+ comp = mlx5_devcom_comp_alloc(id, attr, handler);
if (IS_ERR(comp)) {
devcom = ERR_CAST(comp);
goto out_unlock;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
index c79699b94a02..609c85f47917 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
@@ -6,6 +6,20 @@
#include <linux/mlx5/driver.h>
+enum mlx5_devom_match_flags {
+ MLX5_DEVCOM_MATCH_FLAGS_NS = BIT(0),
+};
+
+union mlx5_devcom_match_key {
+ u64 val;
+};
+
+struct mlx5_devcom_match_attr {
+ u32 flags;
+ union mlx5_devcom_match_key key;
+ struct net *net;
+};
+
enum mlx5_devcom_component {
MLX5_DEVCOM_ESW_OFFLOADS,
MLX5_DEVCOM_MPV,
@@ -25,7 +39,7 @@ void mlx5_devcom_unregister_device(struct mlx5_devcom_dev *devc);
struct mlx5_devcom_comp_dev *
mlx5_devcom_register_component(struct mlx5_devcom_dev *devc,
enum mlx5_devcom_component id,
- u64 key,
+ const struct mlx5_devcom_match_attr *attr,
mlx5_devcom_event_handler_t handler,
void *data);
void mlx5_devcom_unregister_component(struct mlx5_devcom_comp_dev *devcom);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
index ca9ecec358b2..7adad784ad46 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
@@ -9,7 +9,7 @@
#include "mlx5_core.h"
#include "lib/fs_ttc.h"
-#define MLX5_TTC_MAX_NUM_GROUPS 4
+#define MLX5_TTC_MAX_NUM_GROUPS 7
#define MLX5_TTC_GROUP_TCPUDP_SIZE (MLX5_TT_IPV6_UDP + 1)
struct mlx5_fs_ttc_groups {
@@ -31,10 +31,14 @@ static int mlx5_fs_ttc_table_size(const struct mlx5_fs_ttc_groups *groups)
/* L3/L4 traffic type classifier */
struct mlx5_ttc_table {
int num_groups;
+ const struct mlx5_fs_ttc_groups *groups;
+ struct mlx5_core_dev *mdev;
struct mlx5_flow_table *t;
struct mlx5_flow_group **g;
struct mlx5_ttc_rule rules[MLX5_NUM_TT];
struct mlx5_flow_handle *tunnel_rules[MLX5_NUM_TUNNEL_TT];
+ u32 refcnt;
+ struct mutex mutex; /* Protect adding rules for ipsec crypto offload */
};
struct mlx5_flow_table *mlx5_get_ttc_flow_table(struct mlx5_ttc_table *ttc)
@@ -163,6 +167,8 @@ static struct mlx5_etype_proto ttc_tunnel_rules[] = {
enum TTC_GROUP_TYPE {
TTC_GROUPS_DEFAULT = 0,
TTC_GROUPS_USE_L4_TYPE = 1,
+ TTC_GROUPS_DEFAULT_ESP = 2,
+ TTC_GROUPS_USE_L4_TYPE_ESP = 3,
};
static const struct mlx5_fs_ttc_groups ttc_groups[] = {
@@ -184,6 +190,31 @@ static const struct mlx5_fs_ttc_groups ttc_groups[] = {
BIT(0),
},
},
+ [TTC_GROUPS_DEFAULT_ESP] = {
+ .num_groups = 6,
+ .group_size = {
+ MLX5_TTC_GROUP_TCPUDP_SIZE + BIT(1) +
+ MLX5_NUM_TUNNEL_TT,
+ BIT(2), /* decrypted outer L4 */
+ BIT(2), /* decrypted inner L4 */
+ BIT(1), /* ESP */
+ BIT(1),
+ BIT(0),
+ },
+ },
+ [TTC_GROUPS_USE_L4_TYPE_ESP] = {
+ .use_l4_type = true,
+ .num_groups = 7,
+ .group_size = {
+ MLX5_TTC_GROUP_TCPUDP_SIZE,
+ BIT(1) + MLX5_NUM_TUNNEL_TT,
+ BIT(2), /* decrypted outer L4 */
+ BIT(2), /* decrypted inner L4 */
+ BIT(1), /* ESP */
+ BIT(1),
+ BIT(0),
+ },
+ },
};
static const struct mlx5_fs_ttc_groups inner_ttc_groups[] = {
@@ -207,6 +238,23 @@ static const struct mlx5_fs_ttc_groups inner_ttc_groups[] = {
},
};
+static const struct mlx5_fs_ttc_groups *
+mlx5_ttc_get_fs_groups(bool use_l4_type, bool ipsec_rss)
+{
+ if (!ipsec_rss)
+ return use_l4_type ? &ttc_groups[TTC_GROUPS_USE_L4_TYPE] :
+ &ttc_groups[TTC_GROUPS_DEFAULT];
+
+ return use_l4_type ? &ttc_groups[TTC_GROUPS_USE_L4_TYPE_ESP] :
+ &ttc_groups[TTC_GROUPS_DEFAULT_ESP];
+}
+
+bool mlx5_ttc_has_esp_flow_group(struct mlx5_ttc_table *ttc)
+{
+ return ttc->groups == &ttc_groups[TTC_GROUPS_DEFAULT_ESP] ||
+ ttc->groups == &ttc_groups[TTC_GROUPS_USE_L4_TYPE_ESP];
+}
+
u8 mlx5_get_proto_by_tunnel_type(enum mlx5_tunnel_types tt)
{
return ttc_tunnel_rules[tt].proto;
@@ -257,6 +305,31 @@ static u8 mlx5_etype_to_ipv(u16 ethertype)
return 0;
}
+static void mlx5_fs_ttc_set_match_ipv_outer(struct mlx5_core_dev *mdev,
+ struct mlx5_flow_spec *spec,
+ u16 etype)
+{
+ int match_ipv_outer =
+ MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
+ ft_field_support.outer_ip_version);
+ u8 ipv;
+
+ ipv = mlx5_etype_to_ipv(etype);
+ if (match_ipv_outer && ipv) {
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.ip_version);
+ MLX5_SET(fte_match_param, spec->match_value,
+ outer_headers.ip_version, ipv);
+ } else {
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.ethertype);
+ MLX5_SET(fte_match_param, spec->match_value,
+ outer_headers.ethertype, etype);
+ }
+
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+}
+
static void mlx5_fs_ttc_set_match_proto(void *headers_c, void *headers_v,
u8 proto, bool use_l4_type)
{
@@ -279,16 +352,12 @@ static void mlx5_fs_ttc_set_match_proto(void *headers_c, void *headers_v,
static struct mlx5_flow_handle *
mlx5_generate_ttc_rule(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
struct mlx5_flow_destination *dest, u16 etype, u8 proto,
- bool use_l4_type)
+ bool use_l4_type, bool ipsec_rss)
{
- int match_ipv_outer =
- MLX5_CAP_FLOWTABLE_NIC_RX(dev,
- ft_field_support.outer_ip_version);
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec;
int err = 0;
- u8 ipv;
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
@@ -305,15 +374,15 @@ mlx5_generate_ttc_rule(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
proto, use_l4_type);
}
- ipv = mlx5_etype_to_ipv(etype);
- if (match_ipv_outer && ipv) {
- spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
- MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, ipv);
- } else if (etype) {
- spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype);
- MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, etype);
+ if (etype)
+ mlx5_fs_ttc_set_match_ipv_outer(dev, spec, etype);
+
+ if (ipsec_rss && proto == IPPROTO_ESP) {
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters_2.ipsec_next_header);
+ MLX5_SET(fte_match_param, spec->match_value,
+ misc_parameters_2.ipsec_next_header, 0);
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
}
rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
@@ -342,12 +411,16 @@ static int mlx5_generate_ttc_table_rules(struct mlx5_core_dev *dev,
for (tt = 0; tt < MLX5_NUM_TT; tt++) {
struct mlx5_ttc_rule *rule = &rules[tt];
+ if (mlx5_ttc_is_decrypted_esp_tt(tt))
+ continue;
+
if (test_bit(tt, params->ignore_dests))
continue;
rule->rule = mlx5_generate_ttc_rule(dev, ft, &params->dests[tt],
ttc_rules[tt].etype,
ttc_rules[tt].proto,
- use_l4_type);
+ use_l4_type,
+ params->ipsec_rss);
if (IS_ERR(rule->rule)) {
err = PTR_ERR(rule->rule);
rule->rule = NULL;
@@ -370,7 +443,7 @@ static int mlx5_generate_ttc_table_rules(struct mlx5_core_dev *dev,
&params->tunnel_dests[tt],
ttc_tunnel_rules[tt].etype,
ttc_tunnel_rules[tt].proto,
- use_l4_type);
+ use_l4_type, false);
if (IS_ERR(trules[tt])) {
err = PTR_ERR(trules[tt]);
trules[tt] = NULL;
@@ -385,10 +458,78 @@ del_rules:
return err;
}
+static int mlx5_create_ttc_table_ipsec_groups(struct mlx5_ttc_table *ttc,
+ bool use_ipv,
+ u32 *in, int *next_ix)
+{
+ u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+ const struct mlx5_fs_ttc_groups *groups = ttc->groups;
+ int ix = *next_ix;
+
+ MLX5_SET(fte_match_param, mc, outer_headers.ip_protocol, 0);
+
+ /* decrypted ESP outer group */
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.l4_type_ext);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += groups->group_size[ttc->num_groups];
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
+ if (IS_ERR(ttc->g[ttc->num_groups]))
+ goto err;
+ ttc->num_groups++;
+
+ MLX5_SET(fte_match_param, mc, outer_headers.l4_type_ext, 0);
+
+ /* decrypted ESP inner group */
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
+ if (use_ipv)
+ MLX5_SET(fte_match_param, mc, outer_headers.ip_version, 0);
+ else
+ MLX5_SET(fte_match_param, mc, outer_headers.ethertype, 0);
+ MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_version);
+ MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.l4_type_ext);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += groups->group_size[ttc->num_groups];
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
+ if (IS_ERR(ttc->g[ttc->num_groups]))
+ goto err;
+ ttc->num_groups++;
+
+ MLX5_SET(fte_match_param, mc, inner_headers.ip_version, 0);
+ MLX5_SET(fte_match_param, mc, inner_headers.l4_type_ext, 0);
+
+ /* undecrypted ESP group */
+ MLX5_SET_CFG(in, match_criteria_enable,
+ MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2);
+ if (use_ipv)
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_version);
+ else
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
+ MLX5_SET_TO_ONES(fte_match_param, mc,
+ misc_parameters_2.ipsec_next_header);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += groups->group_size[ttc->num_groups];
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
+ if (IS_ERR(ttc->g[ttc->num_groups]))
+ goto err;
+ ttc->num_groups++;
+
+ *next_ix = ix;
+
+ return 0;
+
+err:
+ return PTR_ERR(ttc->g[ttc->num_groups]);
+}
+
static int mlx5_create_ttc_table_groups(struct mlx5_ttc_table *ttc,
- bool use_ipv,
- const struct mlx5_fs_ttc_groups *groups)
+ bool use_ipv)
{
+ const struct mlx5_fs_ttc_groups *groups = ttc->groups;
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
int ix = 0;
u32 *in;
@@ -436,8 +577,18 @@ static int mlx5_create_ttc_table_groups(struct mlx5_ttc_table *ttc,
goto err;
ttc->num_groups++;
+ if (mlx5_ttc_has_esp_flow_group(ttc)) {
+ err = mlx5_create_ttc_table_ipsec_groups(ttc, use_ipv, in, &ix);
+ if (err)
+ goto err;
+
+ MLX5_SET(fte_match_param, mc,
+ misc_parameters_2.ipsec_next_header, 0);
+ }
+
/* L3 Group */
MLX5_SET(fte_match_param, mc, outer_headers.ip_protocol, 0);
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
MLX5_SET_CFG(in, start_flow_index, ix);
ix += groups->group_size[ttc->num_groups];
MLX5_SET_CFG(in, end_flow_index, ix - 1);
@@ -527,6 +678,9 @@ static int mlx5_generate_inner_ttc_table_rules(struct mlx5_core_dev *dev,
for (tt = 0; tt < MLX5_NUM_TT; tt++) {
struct mlx5_ttc_rule *rule = &rules[tt];
+ if (mlx5_ttc_is_decrypted_esp_tt(tt))
+ continue;
+
if (test_bit(tt, params->ignore_dests))
continue;
rule->rule = mlx5_generate_inner_ttc_rule(dev, ft,
@@ -700,6 +854,7 @@ void mlx5_destroy_ttc_table(struct mlx5_ttc_table *ttc)
kfree(ttc->g);
mlx5_destroy_flow_table(ttc->t);
+ mutex_destroy(&ttc->mutex);
kvfree(ttc);
}
@@ -709,7 +864,6 @@ struct mlx5_ttc_table *mlx5_create_ttc_table(struct mlx5_core_dev *dev,
bool match_ipv_outer =
MLX5_CAP_FLOWTABLE_NIC_RX(dev,
ft_field_support.outer_ip_version);
- const struct mlx5_fs_ttc_groups *groups;
struct mlx5_flow_namespace *ns;
struct mlx5_ttc_table *ttc;
bool use_l4_type;
@@ -738,11 +892,10 @@ struct mlx5_ttc_table *mlx5_create_ttc_table(struct mlx5_core_dev *dev,
return ERR_PTR(-EOPNOTSUPP);
}
- groups = use_l4_type ? &ttc_groups[TTC_GROUPS_USE_L4_TYPE] :
- &ttc_groups[TTC_GROUPS_DEFAULT];
+ ttc->groups = mlx5_ttc_get_fs_groups(use_l4_type, params->ipsec_rss);
WARN_ON_ONCE(params->ft_attr.max_fte);
- params->ft_attr.max_fte = mlx5_fs_ttc_table_size(groups);
+ params->ft_attr.max_fte = mlx5_fs_ttc_table_size(ttc->groups);
ttc->t = mlx5_create_flow_table(ns, &params->ft_attr);
if (IS_ERR(ttc->t)) {
err = PTR_ERR(ttc->t);
@@ -750,7 +903,7 @@ struct mlx5_ttc_table *mlx5_create_ttc_table(struct mlx5_core_dev *dev,
return ERR_PTR(err);
}
- err = mlx5_create_ttc_table_groups(ttc, match_ipv_outer, groups);
+ err = mlx5_create_ttc_table_groups(ttc, match_ipv_outer);
if (err)
goto destroy_ft;
@@ -758,6 +911,9 @@ struct mlx5_ttc_table *mlx5_create_ttc_table(struct mlx5_core_dev *dev,
if (err)
goto destroy_ft;
+ ttc->mdev = dev;
+ mutex_init(&ttc->mutex);
+
return ttc;
destroy_ft:
@@ -791,3 +947,194 @@ int mlx5_ttc_fwd_default_dest(struct mlx5_ttc_table *ttc,
return mlx5_ttc_fwd_dest(ttc, type, &dest);
}
+
+static void _mlx5_ttc_destroy_ipsec_rules(struct mlx5_ttc_table *ttc)
+{
+ enum mlx5_traffic_types i;
+
+ for (i = MLX5_TT_DECRYPTED_ESP_OUTER_IPV4_TCP;
+ i <= MLX5_TT_DECRYPTED_ESP_INNER_IPV6_UDP; i++) {
+ if (!ttc->rules[i].rule)
+ continue;
+
+ mlx5_del_flow_rules(ttc->rules[i].rule);
+ ttc->rules[i].rule = NULL;
+ }
+}
+
+void mlx5_ttc_destroy_ipsec_rules(struct mlx5_ttc_table *ttc)
+{
+ if (!mlx5_ttc_has_esp_flow_group(ttc))
+ return;
+
+ mutex_lock(&ttc->mutex);
+ if (--ttc->refcnt)
+ goto unlock;
+
+ _mlx5_ttc_destroy_ipsec_rules(ttc);
+unlock:
+ mutex_unlock(&ttc->mutex);
+}
+
+static int mlx5_ttc_get_tt_attrs(enum mlx5_traffic_types type,
+ u16 *etype, int *l4_type_ext,
+ enum mlx5_traffic_types *tir_tt)
+{
+ switch (type) {
+ case MLX5_TT_DECRYPTED_ESP_OUTER_IPV4_TCP:
+ case MLX5_TT_DECRYPTED_ESP_INNER_IPV4_TCP:
+ *etype = ETH_P_IP;
+ *l4_type_ext = MLX5_PACKET_L4_TYPE_EXT_TCP;
+ *tir_tt = MLX5_TT_IPV4_TCP;
+ break;
+ case MLX5_TT_DECRYPTED_ESP_OUTER_IPV6_TCP:
+ case MLX5_TT_DECRYPTED_ESP_INNER_IPV6_TCP:
+ *etype = ETH_P_IPV6;
+ *l4_type_ext = MLX5_PACKET_L4_TYPE_EXT_TCP;
+ *tir_tt = MLX5_TT_IPV6_TCP;
+ break;
+ case MLX5_TT_DECRYPTED_ESP_OUTER_IPV4_UDP:
+ case MLX5_TT_DECRYPTED_ESP_INNER_IPV4_UDP:
+ *etype = ETH_P_IP;
+ *l4_type_ext = MLX5_PACKET_L4_TYPE_EXT_UDP;
+ *tir_tt = MLX5_TT_IPV4_UDP;
+ break;
+ case MLX5_TT_DECRYPTED_ESP_OUTER_IPV6_UDP:
+ case MLX5_TT_DECRYPTED_ESP_INNER_IPV6_UDP:
+ *etype = ETH_P_IPV6;
+ *l4_type_ext = MLX5_PACKET_L4_TYPE_EXT_UDP;
+ *tir_tt = MLX5_TT_IPV6_UDP;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct mlx5_flow_handle *
+mlx5_ttc_create_ipsec_outer_rule(struct mlx5_ttc_table *ttc,
+ enum mlx5_traffic_types type)
+{
+ struct mlx5_flow_destination dest;
+ MLX5_DECLARE_FLOW_ACT(flow_act);
+ enum mlx5_traffic_types tir_tt;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ int l4_type_ext;
+ u16 etype;
+ int err;
+
+ err = mlx5_ttc_get_tt_attrs(type, &etype, &l4_type_ext, &tir_tt);
+ if (err)
+ return ERR_PTR(err);
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return ERR_PTR(-ENOMEM);
+
+ mlx5_fs_ttc_set_match_ipv_outer(ttc->mdev, spec, etype);
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.l4_type_ext);
+ MLX5_SET(fte_match_param, spec->match_value,
+ outer_headers.l4_type_ext, l4_type_ext);
+
+ dest = mlx5_ttc_get_default_dest(ttc, tir_tt);
+
+ rule = mlx5_add_flow_rules(ttc->t, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(ttc->mdev, "%s: add rule failed\n", __func__);
+ }
+
+ kvfree(spec);
+ return err ? ERR_PTR(err) : rule;
+}
+
+static struct mlx5_flow_handle *
+mlx5_ttc_create_ipsec_inner_rule(struct mlx5_ttc_table *ttc,
+ struct mlx5_ttc_table *inner_ttc,
+ enum mlx5_traffic_types type)
+{
+ struct mlx5_flow_destination dest;
+ MLX5_DECLARE_FLOW_ACT(flow_act);
+ enum mlx5_traffic_types tir_tt;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ int l4_type_ext;
+ u16 etype;
+ int err;
+
+ err = mlx5_ttc_get_tt_attrs(type, &etype, &l4_type_ext, &tir_tt);
+ if (err)
+ return ERR_PTR(err);
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return ERR_PTR(-ENOMEM);
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ inner_headers.ip_version);
+ MLX5_SET(fte_match_param, spec->match_value,
+ inner_headers.ip_version, mlx5_etype_to_ipv(etype));
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ inner_headers.l4_type_ext);
+ MLX5_SET(fte_match_param, spec->match_value,
+ inner_headers.l4_type_ext, l4_type_ext);
+
+ dest = mlx5_ttc_get_default_dest(inner_ttc, tir_tt);
+
+ spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS;
+
+ rule = mlx5_add_flow_rules(ttc->t, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(ttc->mdev, "%s: add rule failed\n", __func__);
+ }
+
+ kvfree(spec);
+ return err ? ERR_PTR(err) : rule;
+}
+
+int mlx5_ttc_create_ipsec_rules(struct mlx5_ttc_table *ttc,
+ struct mlx5_ttc_table *inner_ttc)
+{
+ struct mlx5_flow_handle *rule;
+ enum mlx5_traffic_types i;
+
+ if (!mlx5_ttc_has_esp_flow_group(ttc))
+ return 0;
+
+ mutex_lock(&ttc->mutex);
+ if (ttc->refcnt)
+ goto skip;
+
+ for (i = MLX5_TT_DECRYPTED_ESP_OUTER_IPV4_TCP;
+ i <= MLX5_TT_DECRYPTED_ESP_OUTER_IPV6_UDP; i++) {
+ rule = mlx5_ttc_create_ipsec_outer_rule(ttc, i);
+ if (IS_ERR(rule))
+ goto err_out;
+
+ ttc->rules[i].rule = rule;
+ }
+
+ for (i = MLX5_TT_DECRYPTED_ESP_INNER_IPV4_TCP;
+ i <= MLX5_TT_DECRYPTED_ESP_INNER_IPV6_UDP; i++) {
+ rule = mlx5_ttc_create_ipsec_inner_rule(ttc, inner_ttc, i);
+ if (IS_ERR(rule))
+ goto err_out;
+
+ ttc->rules[i].rule = rule;
+ }
+
+skip:
+ ttc->refcnt++;
+ mutex_unlock(&ttc->mutex);
+ return 0;
+
+err_out:
+ _mlx5_ttc_destroy_ipsec_rules(ttc);
+ mutex_unlock(&ttc->mutex);
+ return PTR_ERR(rule);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h
index ab9434fe3ae6..95f6e56724a2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h
@@ -18,6 +18,14 @@ enum mlx5_traffic_types {
MLX5_TT_IPV4,
MLX5_TT_IPV6,
MLX5_TT_ANY,
+ MLX5_TT_DECRYPTED_ESP_OUTER_IPV4_TCP,
+ MLX5_TT_DECRYPTED_ESP_OUTER_IPV6_TCP,
+ MLX5_TT_DECRYPTED_ESP_OUTER_IPV4_UDP,
+ MLX5_TT_DECRYPTED_ESP_OUTER_IPV6_UDP,
+ MLX5_TT_DECRYPTED_ESP_INNER_IPV4_TCP,
+ MLX5_TT_DECRYPTED_ESP_INNER_IPV6_TCP,
+ MLX5_TT_DECRYPTED_ESP_INNER_IPV4_UDP,
+ MLX5_TT_DECRYPTED_ESP_INNER_IPV6_UDP,
MLX5_NUM_TT,
MLX5_NUM_INDIR_TIRS = MLX5_TT_ANY,
};
@@ -47,6 +55,7 @@ struct ttc_params {
bool inner_ttc;
DECLARE_BITMAP(ignore_tunnel_dests, MLX5_NUM_TUNNEL_TT);
struct mlx5_flow_destination tunnel_dests[MLX5_NUM_TUNNEL_TT];
+ bool ipsec_rss;
};
const char *mlx5_ttc_get_name(enum mlx5_traffic_types tt);
@@ -70,4 +79,14 @@ int mlx5_ttc_fwd_default_dest(struct mlx5_ttc_table *ttc,
bool mlx5_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev);
u8 mlx5_get_proto_by_tunnel_type(enum mlx5_tunnel_types tt);
+bool mlx5_ttc_has_esp_flow_group(struct mlx5_ttc_table *ttc);
+int mlx5_ttc_create_ipsec_rules(struct mlx5_ttc_table *ttc,
+ struct mlx5_ttc_table *inner_ttc);
+void mlx5_ttc_destroy_ipsec_rules(struct mlx5_ttc_table *ttc);
+static inline bool mlx5_ttc_is_decrypted_esp_tt(enum mlx5_traffic_types tt)
+{
+ return tt >= MLX5_TT_DECRYPTED_ESP_OUTER_IPV4_TCP &&
+ tt <= MLX5_TT_DECRYPTED_ESP_INNER_IPV6_UDP;
+}
+
#endif /* __MLX5_FS_TTC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c
index b7d4b1a2baf2..d524f0220513 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c
@@ -164,6 +164,8 @@ ipsec_fs_roce_rx_rule_setup(struct mlx5_core_dev *mdev,
roce->rule = rule;
memset(spec, 0, sizeof(*spec));
+ if (default_dst->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
+ flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
rule = mlx5_add_flow_rules(roce->ft, spec, &flow_act, default_dst, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -178,6 +180,8 @@ ipsec_fs_roce_rx_rule_setup(struct mlx5_core_dev *mdev,
goto out;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ if (default_dst->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
+ flow_act.flags &= ~FLOW_ACT_IGNORE_FLOW_LEVEL;
dst.type = MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE;
dst.ft = roce->ft_rdma;
rule = mlx5_add_flow_rules(roce->nic_master_ft, NULL, &flow_act, &dst,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c
index 762d55ba9e51..e6be2f01daf4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c
@@ -45,11 +45,7 @@
#define MLX5_SECTAG_HEADER_SIZE_WITHOUT_SCI 0x8
#define MLX5_SECTAG_HEADER_SIZE_WITH_SCI (MLX5_SECTAG_HEADER_SIZE_WITHOUT_SCI + MACSEC_SCI_LEN)
-/* MACsec RX flow steering */
-#define MLX5_ETH_WQE_FT_META_MACSEC_MASK 0x3E
-
/* MACsec fs_id handling for steering */
-#define macsec_fs_set_tx_fs_id(fs_id) (MLX5_ETH_WQE_FT_META_MACSEC | (fs_id) << 2)
#define macsec_fs_set_rx_fs_id(fs_id) ((fs_id) | BIT(30))
struct mlx5_sectag_header {
@@ -597,7 +593,7 @@ static int macsec_fs_tx_setup_fte(struct mlx5_macsec_fs *macsec_fs,
MLX5_SET(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_a,
MLX5_ETH_WQE_FT_META_MACSEC_MASK);
MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_a,
- macsec_fs_set_tx_fs_id(id));
+ MLX5_MACSEC_TX_METADATA(id));
*fs_id = id;
flow_act->crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_MACSEC;
@@ -2219,9 +2215,11 @@ static int mlx5_macsec_fs_add_roce_rule_tx(struct mlx5_macsec_fs *macsec_fs, u32
MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_A);
- MLX5_SET(set_action_in, action, data, macsec_fs_set_tx_fs_id(fs_id));
- MLX5_SET(set_action_in, action, offset, 0);
- MLX5_SET(set_action_in, action, length, 32);
+ MLX5_SET(set_action_in, action, data,
+ mlx5_macsec_fs_set_tx_fs_id(fs_id));
+ MLX5_SET(set_action_in, action, offset,
+ MLX5_ETH_WQE_FT_META_MACSEC_SHIFT);
+ MLX5_SET(set_action_in, action, length, 8);
modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_RDMA_TX_MACSEC,
1, action);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.h
index 34b80c3ef6a5..15acaff43641 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.h
@@ -12,6 +12,21 @@
#define MLX5_MACSEC_METADATA_MARKER(metadata) ((((metadata) >> 30) & 0x3) == 0x1)
#define MLX5_MACSEC_RX_METADAT_HANDLE(metadata) ((metadata) & MLX5_MACSEC_RX_FS_ID_MASK)
+/* MACsec TX flow steering */
+#define MLX5_ETH_WQE_FT_META_MACSEC_MASK \
+ (MLX5_ETH_WQE_FT_META_MACSEC | MLX5_ETH_WQE_FT_META_MACSEC_FS_ID_MASK)
+#define MLX5_ETH_WQE_FT_META_MACSEC_SHIFT MLX5_ETH_WQE_FT_META_SHIFT
+
+/* MACsec fs_id handling for steering */
+#define mlx5_macsec_fs_set_tx_fs_id(fs_id) \
+ (((MLX5_ETH_WQE_FT_META_MACSEC) >> MLX5_ETH_WQE_FT_META_MACSEC_SHIFT) \
+ | ((fs_id) << 2))
+
+#define MLX5_MACSEC_TX_METADATA(fs_id) \
+ (mlx5_macsec_fs_set_tx_fs_id(fs_id) << \
+ MLX5_ETH_WQE_FT_META_MACSEC_SHIFT)
+
+/* MACsec fs_id uses 4 bits, supports up to 16 interfaces */
#define MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES 16
struct mlx5_macsec_fs;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
index b111ccd03b02..74ea5da58b7e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
@@ -47,7 +47,20 @@ int mlx5_crdump_collect(struct mlx5_core_dev *dev, u32 *cr_data);
static inline struct net_device *mlx5_uplink_netdev_get(struct mlx5_core_dev *mdev)
{
- return mdev->mlx5e_res.uplink_netdev;
+ struct mlx5e_resources *mlx5e_res = &mdev->mlx5e_res;
+ struct net_device *netdev;
+
+ mutex_lock(&mlx5e_res->uplink_netdev_lock);
+ netdev = mlx5e_res->uplink_netdev;
+ netdev_hold(netdev, &mlx5e_res->tracker, GFP_KERNEL);
+ mutex_unlock(&mlx5e_res->uplink_netdev_lock);
+ return netdev;
+}
+
+static inline void mlx5_uplink_netdev_put(struct mlx5_core_dev *mdev,
+ struct net_device *netdev)
+{
+ netdev_put(netdev, &mdev->mlx5e_res.tracker);
}
struct mlx5_sd;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.c
new file mode 100644
index 000000000000..459a0b4d08e6
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.c
@@ -0,0 +1,567 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include "nv_param.h"
+#include "mlx5_core.h"
+
+enum {
+ MLX5_CLASS_0_CTRL_ID_NV_GLOBAL_PCI_CONF = 0x80,
+ MLX5_CLASS_0_CTRL_ID_NV_GLOBAL_PCI_CAP = 0x81,
+ MLX5_CLASS_0_CTRL_ID_NV_SW_OFFLOAD_CONFIG = 0x10a,
+
+ MLX5_CLASS_3_CTRL_ID_NV_PF_PCI_CONF = 0x80,
+};
+
+struct mlx5_ifc_configuration_item_type_class_global_bits {
+ u8 type_class[0x8];
+ u8 parameter_index[0x18];
+};
+
+struct mlx5_ifc_configuration_item_type_class_per_host_pf_bits {
+ u8 type_class[0x8];
+ u8 pf_index[0x6];
+ u8 pci_bus_index[0x8];
+ u8 parameter_index[0xa];
+};
+
+union mlx5_ifc_config_item_type_auto_bits {
+ struct mlx5_ifc_configuration_item_type_class_global_bits
+ configuration_item_type_class_global;
+ struct mlx5_ifc_configuration_item_type_class_per_host_pf_bits
+ configuration_item_type_class_per_host_pf;
+ u8 reserved_at_0[0x20];
+};
+
+struct mlx5_ifc_config_item_bits {
+ u8 valid[0x2];
+ u8 priority[0x2];
+ u8 header_type[0x2];
+ u8 ovr_en[0x1];
+ u8 rd_en[0x1];
+ u8 access_mode[0x2];
+ u8 reserved_at_a[0x1];
+ u8 writer_id[0x5];
+ u8 version[0x4];
+ u8 reserved_at_14[0x2];
+ u8 host_id_valid[0x1];
+ u8 length[0x9];
+
+ union mlx5_ifc_config_item_type_auto_bits type;
+
+ u8 reserved_at_40[0x10];
+ u8 crc16[0x10];
+};
+
+struct mlx5_ifc_mnvda_reg_bits {
+ struct mlx5_ifc_config_item_bits configuration_item_header;
+
+ u8 configuration_item_data[64][0x20];
+};
+
+struct mlx5_ifc_nv_global_pci_conf_bits {
+ u8 sriov_valid[0x1];
+ u8 reserved_at_1[0x10];
+ u8 per_pf_total_vf[0x1];
+ u8 reserved_at_12[0xe];
+
+ u8 sriov_en[0x1];
+ u8 reserved_at_21[0xf];
+ u8 total_vfs[0x10];
+
+ u8 reserved_at_40[0x20];
+};
+
+struct mlx5_ifc_nv_global_pci_cap_bits {
+ u8 max_vfs_per_pf_valid[0x1];
+ u8 reserved_at_1[0x13];
+ u8 per_pf_total_vf_supported[0x1];
+ u8 reserved_at_15[0xb];
+
+ u8 sriov_support[0x1];
+ u8 reserved_at_21[0xf];
+ u8 max_vfs_per_pf[0x10];
+
+ u8 reserved_at_40[0x60];
+};
+
+struct mlx5_ifc_nv_pf_pci_conf_bits {
+ u8 reserved_at_0[0x9];
+ u8 pf_total_vf_en[0x1];
+ u8 reserved_at_a[0x16];
+
+ u8 reserved_at_20[0x20];
+
+ u8 reserved_at_40[0x10];
+ u8 total_vf[0x10];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_nv_sw_offload_conf_bits {
+ u8 ip_over_vxlan_port[0x10];
+ u8 tunnel_ecn_copy_offload_disable[0x1];
+ u8 pci_atomic_mode[0x3];
+ u8 sr_enable[0x1];
+ u8 ptp_cyc2realtime[0x1];
+ u8 vector_calc_disable[0x1];
+ u8 uctx_en[0x1];
+ u8 prio_tag_required_en[0x1];
+ u8 esw_fdb_ipv4_ttl_modify_enable[0x1];
+ u8 mkey_by_name[0x1];
+ u8 ip_over_vxlan_en[0x1];
+ u8 one_qp_per_recovery[0x1];
+ u8 cqe_compression[0x3];
+ u8 tunnel_udp_entropy_proto_disable[0x1];
+ u8 reserved_at_21[0x1];
+ u8 ar_enable[0x1];
+ u8 log_max_outstanding_wqe[0x5];
+ u8 vf_migration[0x2];
+ u8 log_tx_psn_win[0x6];
+ u8 lro_log_timeout3[0x4];
+ u8 lro_log_timeout2[0x4];
+ u8 lro_log_timeout1[0x4];
+ u8 lro_log_timeout0[0x4];
+};
+
+#define MNVDA_HDR_SZ \
+ (MLX5_ST_SZ_BYTES(mnvda_reg) - \
+ MLX5_BYTE_OFF(mnvda_reg, configuration_item_data))
+
+#define MLX5_SET_CFG_ITEM_TYPE(_cls_name, _mnvda_ptr, _field, _val) \
+ MLX5_SET(mnvda_reg, _mnvda_ptr, \
+ configuration_item_header.type.configuration_item_type_class_##_cls_name._field, \
+ _val)
+
+#define MLX5_SET_CFG_HDR_LEN(_mnvda_ptr, _cls_name) \
+ MLX5_SET(mnvda_reg, _mnvda_ptr, configuration_item_header.length, \
+ MLX5_ST_SZ_BYTES(_cls_name))
+
+#define MLX5_GET_CFG_HDR_LEN(_mnvda_ptr) \
+ MLX5_GET(mnvda_reg, _mnvda_ptr, configuration_item_header.length)
+
+static int mlx5_nv_param_read(struct mlx5_core_dev *dev, void *mnvda,
+ size_t len)
+{
+ u32 param_idx, type_class;
+ u32 header_len;
+ void *cls_ptr;
+ int err;
+
+ if (WARN_ON(len > MLX5_ST_SZ_BYTES(mnvda_reg)) || len < MNVDA_HDR_SZ)
+ return -EINVAL; /* A caller bug */
+
+ err = mlx5_core_access_reg(dev, mnvda, len, mnvda, len, MLX5_REG_MNVDA,
+ 0, 0);
+ if (!err)
+ return 0;
+
+ cls_ptr = MLX5_ADDR_OF(mnvda_reg, mnvda,
+ configuration_item_header.type.configuration_item_type_class_global);
+
+ type_class = MLX5_GET(configuration_item_type_class_global, cls_ptr,
+ type_class);
+ param_idx = MLX5_GET(configuration_item_type_class_global, cls_ptr,
+ parameter_index);
+ header_len = MLX5_GET_CFG_HDR_LEN(mnvda);
+
+ mlx5_core_warn(dev, "Failed to read mnvda reg: type_class 0x%x, param_idx 0x%x, header_len %u, err %d\n",
+ type_class, param_idx, header_len, err);
+
+ return -EOPNOTSUPP;
+}
+
+static int mlx5_nv_param_write(struct mlx5_core_dev *dev, void *mnvda,
+ size_t len)
+{
+ if (WARN_ON(len > MLX5_ST_SZ_BYTES(mnvda_reg)) || len < MNVDA_HDR_SZ)
+ return -EINVAL;
+
+ if (WARN_ON(MLX5_GET_CFG_HDR_LEN(mnvda) == 0))
+ return -EINVAL;
+
+ return mlx5_core_access_reg(dev, mnvda, len, mnvda, len, MLX5_REG_MNVDA,
+ 0, 1);
+}
+
+static int
+mlx5_nv_param_read_sw_offload_conf(struct mlx5_core_dev *dev, void *mnvda,
+ size_t len)
+{
+ MLX5_SET_CFG_ITEM_TYPE(global, mnvda, type_class, 0);
+ MLX5_SET_CFG_ITEM_TYPE(global, mnvda, parameter_index,
+ MLX5_CLASS_0_CTRL_ID_NV_SW_OFFLOAD_CONFIG);
+ MLX5_SET_CFG_HDR_LEN(mnvda, nv_sw_offload_conf);
+
+ return mlx5_nv_param_read(dev, mnvda, len);
+}
+
+static const char *const
+ cqe_compress_str[] = { "balanced", "aggressive" };
+
+static int
+mlx5_nv_param_devlink_cqe_compress_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ u32 mnvda[MLX5_ST_SZ_DW(mnvda_reg)] = {};
+ u8 value = U8_MAX;
+ void *data;
+ int err;
+
+ err = mlx5_nv_param_read_sw_offload_conf(dev, mnvda, sizeof(mnvda));
+ if (err)
+ return err;
+
+ data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
+ value = MLX5_GET(nv_sw_offload_conf, data, cqe_compression);
+
+ if (value >= ARRAY_SIZE(cqe_compress_str))
+ return -EOPNOTSUPP;
+
+ strscpy(ctx->val.vstr, cqe_compress_str[value], sizeof(ctx->val.vstr));
+ return 0;
+}
+
+static int
+mlx5_nv_param_devlink_cqe_compress_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cqe_compress_str); i++) {
+ if (!strcmp(val.vstr, cqe_compress_str[i]))
+ return 0;
+ }
+
+ NL_SET_ERR_MSG_MOD(extack,
+ "Invalid value, supported values are balanced/aggressive");
+ return -EOPNOTSUPP;
+}
+
+static int
+mlx5_nv_param_devlink_cqe_compress_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ u32 mnvda[MLX5_ST_SZ_DW(mnvda_reg)] = {};
+ int err = 0;
+ void *data;
+ u8 value;
+
+ if (!strcmp(ctx->val.vstr, "aggressive"))
+ value = 1;
+ else /* balanced: can't be anything else already validated above */
+ value = 0;
+
+ err = mlx5_nv_param_read_sw_offload_conf(dev, mnvda, sizeof(mnvda));
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to read sw_offload_conf mnvda reg");
+ return err;
+ }
+
+ data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
+ MLX5_SET(nv_sw_offload_conf, data, cqe_compression, value);
+
+ return mlx5_nv_param_write(dev, mnvda, sizeof(mnvda));
+}
+
+static int mlx5_nv_param_read_global_pci_conf(struct mlx5_core_dev *dev,
+ void *mnvda, size_t len)
+{
+ MLX5_SET_CFG_ITEM_TYPE(global, mnvda, type_class, 0);
+ MLX5_SET_CFG_ITEM_TYPE(global, mnvda, parameter_index,
+ MLX5_CLASS_0_CTRL_ID_NV_GLOBAL_PCI_CONF);
+ MLX5_SET_CFG_HDR_LEN(mnvda, nv_global_pci_conf);
+
+ return mlx5_nv_param_read(dev, mnvda, len);
+}
+
+static int mlx5_nv_param_read_global_pci_cap(struct mlx5_core_dev *dev,
+ void *mnvda, size_t len)
+{
+ MLX5_SET_CFG_ITEM_TYPE(global, mnvda, type_class, 0);
+ MLX5_SET_CFG_ITEM_TYPE(global, mnvda, parameter_index,
+ MLX5_CLASS_0_CTRL_ID_NV_GLOBAL_PCI_CAP);
+ MLX5_SET_CFG_HDR_LEN(mnvda, nv_global_pci_cap);
+
+ return mlx5_nv_param_read(dev, mnvda, len);
+}
+
+static int mlx5_nv_param_read_per_host_pf_conf(struct mlx5_core_dev *dev,
+ void *mnvda, size_t len)
+{
+ MLX5_SET_CFG_ITEM_TYPE(per_host_pf, mnvda, type_class, 3);
+ MLX5_SET_CFG_ITEM_TYPE(per_host_pf, mnvda, parameter_index,
+ MLX5_CLASS_3_CTRL_ID_NV_PF_PCI_CONF);
+ MLX5_SET_CFG_HDR_LEN(mnvda, nv_pf_pci_conf);
+
+ return mlx5_nv_param_read(dev, mnvda, len);
+}
+
+static int mlx5_devlink_enable_sriov_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ u32 mnvda[MLX5_ST_SZ_DW(mnvda_reg)] = {};
+ bool sriov_en = false;
+ void *data;
+ int err;
+
+ err = mlx5_nv_param_read_global_pci_cap(dev, mnvda, sizeof(mnvda));
+ if (err)
+ return err;
+
+ data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
+ if (!MLX5_GET(nv_global_pci_cap, data, sriov_support)) {
+ ctx->val.vbool = false;
+ return 0;
+ }
+
+ memset(mnvda, 0, sizeof(mnvda));
+ err = mlx5_nv_param_read_global_pci_conf(dev, mnvda, sizeof(mnvda));
+ if (err)
+ return err;
+
+ data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
+ sriov_en = MLX5_GET(nv_global_pci_conf, data, sriov_en);
+ if (!MLX5_GET(nv_global_pci_conf, data, per_pf_total_vf)) {
+ ctx->val.vbool = sriov_en;
+ return 0;
+ }
+
+ /* SRIOV is per PF */
+ memset(mnvda, 0, sizeof(mnvda));
+ err = mlx5_nv_param_read_per_host_pf_conf(dev, mnvda, sizeof(mnvda));
+ if (err)
+ return err;
+
+ data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
+ ctx->val.vbool = sriov_en &&
+ MLX5_GET(nv_pf_pci_conf, data, pf_total_vf_en);
+ return 0;
+}
+
+static int mlx5_devlink_enable_sriov_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ u32 mnvda[MLX5_ST_SZ_DW(mnvda_reg)] = {};
+ bool per_pf_support;
+ void *cap, *data;
+ int err;
+
+ err = mlx5_nv_param_read_global_pci_cap(dev, mnvda, sizeof(mnvda));
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to read global PCI capability");
+ return err;
+ }
+
+ cap = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
+ per_pf_support = MLX5_GET(nv_global_pci_cap, cap,
+ per_pf_total_vf_supported);
+
+ if (!MLX5_GET(nv_global_pci_cap, cap, sriov_support)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "SRIOV is not supported on this device");
+ return -EOPNOTSUPP;
+ }
+
+ if (!per_pf_support) {
+ /* We don't allow global SRIOV setting on per PF devlink */
+ NL_SET_ERR_MSG_MOD(extack,
+ "SRIOV is not per PF on this device");
+ return -EOPNOTSUPP;
+ }
+
+ memset(mnvda, 0, sizeof(mnvda));
+ err = mlx5_nv_param_read_global_pci_conf(dev, mnvda, sizeof(mnvda));
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unable to read global PCI configuration");
+ return err;
+ }
+
+ data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
+
+ /* setup per PF sriov mode */
+ MLX5_SET(nv_global_pci_conf, data, sriov_valid, 1);
+ MLX5_SET(nv_global_pci_conf, data, sriov_en, 1);
+ MLX5_SET(nv_global_pci_conf, data, per_pf_total_vf, 1);
+
+ err = mlx5_nv_param_write(dev, mnvda, sizeof(mnvda));
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unable to write global PCI configuration");
+ return err;
+ }
+
+ /* enable/disable sriov on this PF */
+ memset(mnvda, 0, sizeof(mnvda));
+ err = mlx5_nv_param_read_per_host_pf_conf(dev, mnvda, sizeof(mnvda));
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unable to read per host PF configuration");
+ return err;
+ }
+ MLX5_SET(nv_pf_pci_conf, data, pf_total_vf_en, ctx->val.vbool);
+ return mlx5_nv_param_write(dev, mnvda, sizeof(mnvda));
+}
+
+static int mlx5_devlink_total_vfs_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ u32 mnvda[MLX5_ST_SZ_DW(mnvda_reg)] = {};
+ void *data;
+ int err;
+
+ data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
+
+ err = mlx5_nv_param_read_global_pci_cap(dev, mnvda, sizeof(mnvda));
+ if (err)
+ return err;
+
+ if (!MLX5_GET(nv_global_pci_cap, data, sriov_support)) {
+ ctx->val.vu32 = 0;
+ return 0;
+ }
+
+ memset(mnvda, 0, sizeof(mnvda));
+ err = mlx5_nv_param_read_global_pci_conf(dev, mnvda, sizeof(mnvda));
+ if (err)
+ return err;
+
+ if (!MLX5_GET(nv_global_pci_conf, data, per_pf_total_vf)) {
+ ctx->val.vu32 = MLX5_GET(nv_global_pci_conf, data, total_vfs);
+ return 0;
+ }
+
+ /* SRIOV is per PF */
+ memset(mnvda, 0, sizeof(mnvda));
+ err = mlx5_nv_param_read_per_host_pf_conf(dev, mnvda, sizeof(mnvda));
+ if (err)
+ return err;
+
+ ctx->val.vu32 = MLX5_GET(nv_pf_pci_conf, data, total_vf);
+
+ return 0;
+}
+
+static int mlx5_devlink_total_vfs_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ u32 mnvda[MLX5_ST_SZ_DW(mnvda_reg)];
+ void *data;
+ int err;
+
+ err = mlx5_nv_param_read_global_pci_cap(dev, mnvda, sizeof(mnvda));
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to read global pci cap");
+ return err;
+ }
+
+ data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
+ if (!MLX5_GET(nv_global_pci_cap, data, sriov_support)) {
+ NL_SET_ERR_MSG_MOD(extack, "Not configurable on this device");
+ return -EOPNOTSUPP;
+ }
+
+ if (!MLX5_GET(nv_global_pci_cap, data, per_pf_total_vf_supported)) {
+ /* We don't allow global SRIOV setting on per PF devlink */
+ NL_SET_ERR_MSG_MOD(extack,
+ "SRIOV is not per PF on this device");
+ return -EOPNOTSUPP;
+ }
+
+ memset(mnvda, 0, sizeof(mnvda));
+ err = mlx5_nv_param_read_global_pci_conf(dev, mnvda, sizeof(mnvda));
+ if (err)
+ return err;
+
+ MLX5_SET(nv_global_pci_conf, data, sriov_valid, 1);
+ MLX5_SET(nv_global_pci_conf, data, per_pf_total_vf, 1);
+
+ err = mlx5_nv_param_write(dev, mnvda, sizeof(mnvda));
+ if (err)
+ return err;
+
+ memset(mnvda, 0, sizeof(mnvda));
+ err = mlx5_nv_param_read_per_host_pf_conf(dev, mnvda, sizeof(mnvda));
+ if (err)
+ return err;
+
+ data = MLX5_ADDR_OF(mnvda_reg, mnvda, configuration_item_data);
+ MLX5_SET(nv_pf_pci_conf, data, total_vf, ctx->val.vu32);
+ return mlx5_nv_param_write(dev, mnvda, sizeof(mnvda));
+}
+
+static int mlx5_devlink_total_vfs_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ u32 cap[MLX5_ST_SZ_DW(mnvda_reg)];
+ void *data;
+ u16 max;
+ int err;
+
+ data = MLX5_ADDR_OF(mnvda_reg, cap, configuration_item_data);
+
+ err = mlx5_nv_param_read_global_pci_cap(dev, cap, sizeof(cap));
+ if (err)
+ return err;
+
+ if (!MLX5_GET(nv_global_pci_cap, data, max_vfs_per_pf_valid))
+ return 0; /* optimistic, but set might fail later */
+
+ max = MLX5_GET(nv_global_pci_cap, data, max_vfs_per_pf);
+ if (val.vu16 > max) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Max allowed by device is %u", max);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct devlink_param mlx5_nv_param_devlink_params[] = {
+ DEVLINK_PARAM_GENERIC(ENABLE_SRIOV, BIT(DEVLINK_PARAM_CMODE_PERMANENT),
+ mlx5_devlink_enable_sriov_get,
+ mlx5_devlink_enable_sriov_set, NULL),
+ DEVLINK_PARAM_GENERIC(TOTAL_VFS, BIT(DEVLINK_PARAM_CMODE_PERMANENT),
+ mlx5_devlink_total_vfs_get,
+ mlx5_devlink_total_vfs_set,
+ mlx5_devlink_total_vfs_validate),
+ DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_CQE_COMPRESSION_TYPE,
+ "cqe_compress_type", DEVLINK_PARAM_TYPE_STRING,
+ BIT(DEVLINK_PARAM_CMODE_PERMANENT),
+ mlx5_nv_param_devlink_cqe_compress_get,
+ mlx5_nv_param_devlink_cqe_compress_set,
+ mlx5_nv_param_devlink_cqe_compress_validate),
+};
+
+int mlx5_nv_param_register_dl_params(struct devlink *devlink)
+{
+ if (!mlx5_core_is_pf(devlink_priv(devlink)))
+ return 0;
+
+ return devl_params_register(devlink, mlx5_nv_param_devlink_params,
+ ARRAY_SIZE(mlx5_nv_param_devlink_params));
+}
+
+void mlx5_nv_param_unregister_dl_params(struct devlink *devlink)
+{
+ if (!mlx5_core_is_pf(devlink_priv(devlink)))
+ return;
+
+ devl_params_unregister(devlink, mlx5_nv_param_devlink_params,
+ ARRAY_SIZE(mlx5_nv_param_devlink_params));
+}
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.h
new file mode 100644
index 000000000000..9f4922ff7745
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_NV_PARAM_H
+#define __MLX5_NV_PARAM_H
+
+#include <linux/mlx5/driver.h>
+#include "devlink.h"
+
+int mlx5_nv_param_register_dl_params(struct devlink *devlink);
+void mlx5_nv_param_unregister_dl_params(struct devlink *devlink);
+
+#endif
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
index eeb0b7ea05f1..f5c2701f6e87 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
@@ -210,13 +210,17 @@ static void sd_cleanup(struct mlx5_core_dev *dev)
static int sd_register(struct mlx5_core_dev *dev)
{
struct mlx5_devcom_comp_dev *devcom, *pos;
+ struct mlx5_devcom_match_attr attr = {};
struct mlx5_core_dev *peer, *primary;
struct mlx5_sd *sd, *primary_sd;
int err, i;
sd = mlx5_get_sd(dev);
+ attr.key.val = sd->group_id;
+ attr.flags = MLX5_DEVCOM_MATCH_FLAGS_NS;
+ attr.net = mlx5_core_net(dev);
devcom = mlx5_devcom_register_component(dev->priv.devc, MLX5_DEVCOM_SD_GROUP,
- sd->group_id, NULL, dev);
+ &attr, NULL, dev);
if (IS_ERR(devcom))
return PTR_ERR(devcom);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 8517d4e5d5ef..df93625c9dfa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -973,36 +973,14 @@ static void mlx5_pci_close(struct mlx5_core_dev *dev)
mlx5_pci_disable_device(dev);
}
-static void mlx5_register_hca_devcom_comp(struct mlx5_core_dev *dev)
-{
- /* This component is use to sync adding core_dev to lag_dev and to sync
- * changes of mlx5_adev_devices between LAG layer and other layers.
- */
- if (!mlx5_lag_is_supported(dev))
- return;
-
- dev->priv.hca_devcom_comp =
- mlx5_devcom_register_component(dev->priv.devc, MLX5_DEVCOM_HCA_PORTS,
- mlx5_query_nic_system_image_guid(dev),
- NULL, dev);
- if (IS_ERR(dev->priv.hca_devcom_comp))
- mlx5_core_err(dev, "Failed to register devcom HCA component\n");
-}
-
-static void mlx5_unregister_hca_devcom_comp(struct mlx5_core_dev *dev)
-{
- mlx5_devcom_unregister_component(dev->priv.hca_devcom_comp);
-}
-
static int mlx5_init_once(struct mlx5_core_dev *dev)
{
int err;
dev->priv.devc = mlx5_devcom_register_device(dev);
if (IS_ERR(dev->priv.devc))
- mlx5_core_warn(dev, "failed to register devcom device %ld\n",
- PTR_ERR(dev->priv.devc));
- mlx5_register_hca_devcom_comp(dev);
+ mlx5_core_warn(dev, "failed to register devcom device %pe\n",
+ dev->priv.devc);
err = mlx5_query_board_id(dev);
if (err) {
@@ -1140,7 +1118,6 @@ err_eq_cleanup:
err_irq_cleanup:
mlx5_irq_table_cleanup(dev);
err_devcom:
- mlx5_unregister_hca_devcom_comp(dev);
mlx5_devcom_unregister_device(dev->priv.devc);
return err;
@@ -1171,7 +1148,6 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
mlx5_events_cleanup(dev);
mlx5_eq_table_cleanup(dev);
mlx5_irq_table_cleanup(dev);
- mlx5_unregister_hca_devcom_comp(dev);
mlx5_devcom_unregister_device(dev->priv.devc);
}
@@ -1340,10 +1316,9 @@ static int mlx5_load(struct mlx5_core_dev *dev)
{
int err;
- dev->priv.uar = mlx5_get_uars_page(dev);
- if (IS_ERR(dev->priv.uar)) {
- mlx5_core_err(dev, "Failed allocating uar, aborting\n");
- err = PTR_ERR(dev->priv.uar);
+ err = mlx5_alloc_bfreg(dev, &dev->priv.bfreg, false, false);
+ if (err) {
+ mlx5_core_err(dev, "Failed allocating bfreg, %d\n", err);
return err;
}
@@ -1454,7 +1429,7 @@ err_eq_table:
err_irq_table:
mlx5_pagealloc_stop(dev);
mlx5_events_stop(dev);
- mlx5_put_uars_page(dev, dev->priv.uar);
+ mlx5_free_bfreg(dev, &dev->priv.bfreg);
return err;
}
@@ -1479,7 +1454,7 @@ static void mlx5_unload(struct mlx5_core_dev *dev)
mlx5_irq_table_destroy(dev);
mlx5_pagealloc_stop(dev);
mlx5_events_stop(dev);
- mlx5_put_uars_page(dev, dev->priv.uar);
+ mlx5_free_bfreg(dev, &dev->priv.bfreg);
}
int mlx5_init_one_devl_locked(struct mlx5_core_dev *dev)
@@ -1798,6 +1773,7 @@ static const int types[] = {
MLX5_CAP_VDPA_EMULATION,
MLX5_CAP_IPSEC,
MLX5_CAP_PORT_SELECTION,
+ MLX5_CAP_PSP,
MLX5_CAP_MACSEC,
MLX5_CAP_ADV_VIRTUALIZATION,
MLX5_CAP_CRYPTO,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index b6d53db27cd5..082259b56816 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -367,6 +367,8 @@ int mlx5_query_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *out);
int mlx5_set_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *in);
int mlx5_set_trust_state(struct mlx5_core_dev *mdev, u8 trust_state);
int mlx5_query_trust_state(struct mlx5_core_dev *mdev, u8 *trust_state);
+int mlx5_query_port_buffer_ownership(struct mlx5_core_dev *mdev,
+ u8 *buffer_ownership);
int mlx5_set_dscp2prio(struct mlx5_core_dev *mdev, u8 dscp, u8 prio);
int mlx5_query_dscp2prio(struct mlx5_core_dev *mdev, u8 *dscp2prio);
@@ -447,8 +449,6 @@ int mlx5_vport_set_other_func_cap(struct mlx5_core_dev *dev, const void *hca_cap
#define mlx5_vport_get_other_func_general_cap(dev, vport, out) \
mlx5_vport_get_other_func_cap(dev, vport, out, MLX5_CAP_GENERAL)
-int mlx5_vport_get_vhca_id(struct mlx5_core_dev *dev, u16 vport, u16 *vhca_id);
-
static inline u32 mlx5_sriov_get_vf_total_msix(struct pci_dev *pdev)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index 9bc9bd83c232..cd68c4b2c0bf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -489,9 +489,12 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
u32 func_id;
u32 npages;
u32 i = 0;
+ int err;
- if (!mlx5_cmd_is_down(dev))
- return mlx5_cmd_do(dev, in, in_size, out, out_size);
+ err = mlx5_cmd_do(dev, in, in_size, out, out_size);
+ /* If FW is gone (-ENXIO), proceed to forceful reclaim */
+ if (err != -ENXIO)
+ return err;
/* No hard feelings, we want our pages back! */
npages = MLX5_GET(manage_pages_in, in, input_num_entries);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index 692ef9c2f729..e18a850c615c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -54,7 +54,7 @@ static int mlx5_core_func_to_vport(const struct mlx5_core_dev *dev,
/**
* mlx5_get_default_msix_vec_count - Get the default number of MSI-X vectors
- * to be ssigned to each VF.
+ * to be assigned to each VF.
* @dev: PF to work on
* @num_vfs: Number of enabled VFs
*/
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 549f1066d2a5..aa9f2b0a77d3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -968,6 +968,26 @@ int mlx5_query_trust_state(struct mlx5_core_dev *mdev, u8 *trust_state)
return err;
}
+int mlx5_query_port_buffer_ownership(struct mlx5_core_dev *mdev,
+ u8 *buffer_ownership)
+{
+ u32 out[MLX5_ST_SZ_DW(pfcc_reg)] = {};
+ int err;
+
+ if (!MLX5_CAP_PCAM_FEATURE(mdev, buffer_ownership)) {
+ *buffer_ownership = MLX5_BUF_OWNERSHIP_UNKNOWN;
+ return 0;
+ }
+
+ err = mlx5_query_pfcc_reg(mdev, out, sizeof(out));
+ if (err)
+ return err;
+
+ *buffer_ownership = MLX5_GET(pfcc_reg, out, buf_ownership);
+
+ return 0;
+}
+
int mlx5_set_dscp2prio(struct mlx5_core_dev *mdev, u8 dscp, u8 prio)
{
int sz = MLX5_ST_SZ_BYTES(qpdpm_reg);
@@ -1150,7 +1170,11 @@ const struct mlx5_link_info *mlx5_port_ptys2info(struct mlx5_core_dev *mdev,
mlx5e_port_get_link_mode_info_arr(mdev, &table, &max_size,
force_legacy);
i = find_first_bit(&temp, max_size);
- if (i < max_size)
+
+ /* mlx5e_link_info has holes. Check speed
+ * is not zero as indication of one.
+ */
+ if (i < max_size && table[i].speed)
return &table[i];
return NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/diag/dev_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/diag/dev_tracepoint.h
index 0537de86f981..9b0f44253f33 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/diag/dev_tracepoint.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/diag/dev_tracepoint.h
@@ -28,7 +28,7 @@ DECLARE_EVENT_CLASS(mlx5_sf_dev_template,
__entry->hw_fn_id = sfdev->fn_id;
__entry->sfnum = sfdev->sfnum;
),
- TP_printk("(%s) sfdev=%pK aux_id=%d hw_id=0x%x sfnum=%u\n",
+ TP_printk("(%s) sfdev=%p aux_id=%d hw_id=0x%x sfnum=%u\n",
__get_str(devname), __entry->sfdev,
__entry->aux_id, __entry->hw_fn_id,
__entry->sfnum)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
index 0864ba625c07..3304f25cc805 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
@@ -518,3 +518,13 @@ void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev)
WARN_ON(!xa_empty(&table->function_ids));
kfree(table);
}
+
+bool mlx5_sf_table_empty(const struct mlx5_core_dev *dev)
+{
+ struct mlx5_sf_table *table = dev->priv.sf_table;
+
+ if (!table)
+ return true;
+
+ return xa_empty(&table->function_ids);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h
index 860f9ddb7107..89559a37997a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h
@@ -17,6 +17,7 @@ void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev);
int mlx5_sf_table_init(struct mlx5_core_dev *dev);
void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev);
+bool mlx5_sf_table_empty(const struct mlx5_core_dev *dev);
int mlx5_devlink_sf_port_new(struct devlink *devlink,
const struct devlink_port_new_attrs *add_attr,
@@ -61,6 +62,11 @@ static inline void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev)
{
}
+static inline bool mlx5_sf_table_empty(const struct mlx5_core_dev *dev)
+{
+ return true;
+}
+
#endif
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c
index 396804369b00..fe56b59e24c5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c
@@ -117,7 +117,7 @@ static int hws_action_get_shared_stc_nic(struct mlx5hws_context *ctx,
mlx5hws_err(ctx, "No such stc_type: %d\n", stc_type);
pr_warn("HWS: Invalid stc_type: %d\n", stc_type);
ret = -EINVAL;
- goto unlock_and_out;
+ goto free_shared_stc;
}
ret = mlx5hws_action_alloc_single_stc(ctx, &stc_attr, tbl_type,
@@ -1360,7 +1360,7 @@ free_action:
struct mlx5hws_action *
mlx5hws_action_create_dest_array(struct mlx5hws_context *ctx, size_t num_dest,
struct mlx5hws_action_dest_attr *dests,
- bool ignore_flow_level, u32 flags)
+ u32 flags)
{
struct mlx5hws_cmd_set_fte_dest *dest_list = NULL;
struct mlx5hws_cmd_ft_create_attr ft_attr = {0};
@@ -1397,7 +1397,7 @@ mlx5hws_action_create_dest_array(struct mlx5hws_context *ctx, size_t num_dest,
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest_list[i].destination_id = dests[i].dest->dest_obj.obj_id;
fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- fte_attr.ignore_flow_level = ignore_flow_level;
+ fte_attr.ignore_flow_level = 1;
if (dests[i].is_wire_ft)
last_dest_idx = i;
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
index 92de4b761a83..6ef0c4be27e1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c
@@ -51,9 +51,6 @@ static void hws_bwc_matcher_init_attr(struct mlx5hws_bwc_matcher *bwc_matcher,
u8 size_log_rx, u8 size_log_tx,
struct mlx5hws_matcher_attr *attr)
{
- struct mlx5hws_bwc_matcher *first_matcher =
- bwc_matcher->complex_first_bwc_matcher;
-
memset(attr, 0, sizeof(*attr));
attr->priority = priority;
@@ -66,17 +63,14 @@ static void hws_bwc_matcher_init_attr(struct mlx5hws_bwc_matcher *bwc_matcher,
attr->size[MLX5HWS_MATCHER_SIZE_TYPE_TX].rule.num_log = size_log_tx;
attr->resizable = true;
attr->max_num_of_at_attach = MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM;
-
- attr->isolated_matcher_end_ft_id =
- first_matcher ? first_matcher->matcher->end_ft_id : 0;
}
static int
hws_bwc_matcher_move_all_simple(struct mlx5hws_bwc_matcher *bwc_matcher)
{
- bool move_error = false, poll_error = false, drain_error = false;
struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
struct mlx5hws_matcher *matcher = bwc_matcher->matcher;
+ int drain_error = 0, move_error = 0, poll_error = 0;
u16 bwc_queues = mlx5hws_bwc_queues(ctx);
struct mlx5hws_rule_attr rule_attr;
struct mlx5hws_bwc_rule *bwc_rule;
@@ -84,6 +78,7 @@ hws_bwc_matcher_move_all_simple(struct mlx5hws_bwc_matcher *bwc_matcher)
struct list_head *rules_list;
u32 pending_rules;
int i, ret = 0;
+ bool drain;
mlx5hws_bwc_rule_fill_attr(bwc_matcher, 0, 0, &rule_attr);
@@ -99,23 +94,37 @@ hws_bwc_matcher_move_all_simple(struct mlx5hws_bwc_matcher *bwc_matcher)
ret = mlx5hws_matcher_resize_rule_move(matcher,
bwc_rule->rule,
&rule_attr);
- if (unlikely(ret && !move_error)) {
- mlx5hws_err(ctx,
- "Moving BWC rule: move failed (%d), attempting to move rest of the rules\n",
- ret);
- move_error = true;
+ if (unlikely(ret)) {
+ if (!move_error) {
+ mlx5hws_err(ctx,
+ "Moving BWC rule: move failed (%d), attempting to move rest of the rules\n",
+ ret);
+ move_error = ret;
+ }
+ /* Rule wasn't queued, no need to poll */
+ continue;
}
pending_rules++;
+ drain = pending_rules >=
+ hws_bwc_get_burst_th(ctx, rule_attr.queue_id);
ret = mlx5hws_bwc_queue_poll(ctx,
rule_attr.queue_id,
&pending_rules,
- false);
- if (unlikely(ret && !poll_error)) {
- mlx5hws_err(ctx,
- "Moving BWC rule: poll failed (%d), attempting to move rest of the rules\n",
- ret);
- poll_error = true;
+ drain);
+ if (unlikely(ret)) {
+ if (ret == -ETIMEDOUT) {
+ mlx5hws_err(ctx,
+ "Moving BWC rule: timeout polling for completions (%d), aborting rehash\n",
+ ret);
+ return ret;
+ }
+ if (!poll_error) {
+ mlx5hws_err(ctx,
+ "Moving BWC rule: polling for completions failed (%d), attempting to move rest of the rules\n",
+ ret);
+ poll_error = ret;
+ }
}
}
@@ -126,27 +135,46 @@ hws_bwc_matcher_move_all_simple(struct mlx5hws_bwc_matcher *bwc_matcher)
rule_attr.queue_id,
&pending_rules,
true);
- if (unlikely(ret && !drain_error)) {
- mlx5hws_err(ctx,
- "Moving BWC rule: drain failed (%d), attempting to move rest of the rules\n",
- ret);
- drain_error = true;
+ if (unlikely(ret)) {
+ if (ret == -ETIMEDOUT) {
+ mlx5hws_err(ctx,
+ "Moving bwc rule: timeout draining completions (%d), aborting rehash\n",
+ ret);
+ return ret;
+ }
+ if (!drain_error) {
+ mlx5hws_err(ctx,
+ "Moving bwc rule: drain failed (%d), attempting to move rest of the rules\n",
+ ret);
+ drain_error = ret;
+ }
}
}
}
- if (move_error || poll_error || drain_error)
- ret = -EINVAL;
+ /* Return the first error that happened */
+ if (unlikely(move_error))
+ return move_error;
+ if (unlikely(poll_error))
+ return poll_error;
+ if (unlikely(drain_error))
+ return drain_error;
return ret;
}
static int hws_bwc_matcher_move_all(struct mlx5hws_bwc_matcher *bwc_matcher)
{
- if (!bwc_matcher->complex)
+ switch (bwc_matcher->matcher_type) {
+ case MLX5HWS_BWC_MATCHER_SIMPLE:
return hws_bwc_matcher_move_all_simple(bwc_matcher);
-
- return mlx5hws_bwc_matcher_move_all_complex(bwc_matcher);
+ case MLX5HWS_BWC_MATCHER_COMPLEX_FIRST:
+ return mlx5hws_bwc_matcher_complex_move_first(bwc_matcher);
+ case MLX5HWS_BWC_MATCHER_COMPLEX_SUBMATCHER:
+ return mlx5hws_bwc_matcher_complex_move(bwc_matcher);
+ default:
+ return -EINVAL;
+ }
}
static int hws_bwc_matcher_move(struct mlx5hws_bwc_matcher *bwc_matcher)
@@ -221,6 +249,7 @@ int mlx5hws_bwc_matcher_create_simple(struct mlx5hws_bwc_matcher *bwc_matcher,
bwc_matcher->tx_size.size_log,
&attr);
+ bwc_matcher->matcher_type = MLX5HWS_BWC_MATCHER_SIMPLE;
bwc_matcher->priority = priority;
bwc_matcher->size_of_at_array = MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM;
@@ -365,7 +394,7 @@ int mlx5hws_bwc_matcher_destroy(struct mlx5hws_bwc_matcher *bwc_matcher)
"BWC matcher destroy: matcher still has %u RX and %u TX rules\n",
rx_rules, tx_rules);
- if (bwc_matcher->complex)
+ if (bwc_matcher->matcher_type == MLX5HWS_BWC_MATCHER_COMPLEX_FIRST)
mlx5hws_bwc_matcher_destroy_complex(bwc_matcher);
else
mlx5hws_bwc_matcher_destroy_simple(bwc_matcher);
@@ -623,7 +652,8 @@ int mlx5hws_bwc_rule_destroy_simple(struct mlx5hws_bwc_rule *bwc_rule)
int mlx5hws_bwc_rule_destroy(struct mlx5hws_bwc_rule *bwc_rule)
{
- bool is_complex = !!bwc_rule->bwc_matcher->complex;
+ bool is_complex = bwc_rule->bwc_matcher->matcher_type ==
+ MLX5HWS_BWC_MATCHER_COMPLEX_FIRST;
int ret = 0;
if (is_complex)
@@ -1035,6 +1065,21 @@ int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule,
return 0; /* rule inserted successfully */
}
+ /* Rule insertion could fail due to queue being full, timeout, or
+ * matcher in resize. In such cases, no point in trying to rehash.
+ */
+ if (ret == -EBUSY || ret == -ETIMEDOUT || ret == -EAGAIN) {
+ mutex_unlock(queue_lock);
+ mlx5hws_err(ctx,
+ "BWC rule insertion failed - %s (%d)\n",
+ ret == -EBUSY ? "queue is full" :
+ ret == -ETIMEDOUT ? "timeout" :
+ ret == -EAGAIN ? "matcher in resize" : "N/A",
+ ret);
+ hws_bwc_rule_cnt_dec(bwc_rule);
+ return ret;
+ }
+
/* At this point the rule wasn't added.
* It could be because there was collision, or some other problem.
* Try rehash by size and insert rule again - last chance.
@@ -1104,7 +1149,7 @@ mlx5hws_bwc_rule_create(struct mlx5hws_bwc_matcher *bwc_matcher,
bwc_queue_idx = hws_bwc_gen_queue_idx(ctx);
- if (bwc_matcher->complex)
+ if (bwc_matcher->matcher_type == MLX5HWS_BWC_MATCHER_COMPLEX_FIRST)
ret = mlx5hws_bwc_rule_create_complex(bwc_rule,
params,
flow_source,
@@ -1173,10 +1218,9 @@ int mlx5hws_bwc_rule_action_update(struct mlx5hws_bwc_rule *bwc_rule,
return -EINVAL;
}
- /* For complex rule, the update should happen on the second matcher */
- if (bwc_rule->isolated_bwc_rule)
- return hws_bwc_rule_action_update(bwc_rule->isolated_bwc_rule,
- rule_actions);
- else
- return hws_bwc_rule_action_update(bwc_rule, rule_actions);
+ /* For complex rules, the update should happen on the last subrule. */
+ while (bwc_rule->next_subrule)
+ bwc_rule = bwc_rule->next_subrule;
+
+ return hws_bwc_rule_action_update(bwc_rule, rule_actions);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h
index af391d70c14f..b905511f5c53 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h
@@ -18,6 +18,21 @@
#define MLX5HWS_BWC_POLLING_TIMEOUT 60
+enum mlx5hws_bwc_matcher_type {
+ /* Standalone bwc matcher. */
+ MLX5HWS_BWC_MATCHER_SIMPLE,
+ /* The first matcher of a complex matcher. When rules are inserted into
+ * a matcher of this type, they are split into subrules and inserted
+ * into their corresponding submatchers.
+ */
+ MLX5HWS_BWC_MATCHER_COMPLEX_FIRST,
+ /* A submatcher that is part of a complex matcher. For most purposes
+ * these are treated as simple matchers, except when it comes to moving
+ * rules during resize.
+ */
+ MLX5HWS_BWC_MATCHER_COMPLEX_SUBMATCHER,
+};
+
struct mlx5hws_bwc_matcher_complex_data;
struct mlx5hws_bwc_matcher_size {
@@ -31,9 +46,9 @@ struct mlx5hws_bwc_matcher {
struct mlx5hws_match_template *mt;
struct mlx5hws_action_template **at;
struct mlx5hws_bwc_matcher_complex_data *complex;
- struct mlx5hws_bwc_matcher *complex_first_bwc_matcher;
u8 num_of_at;
u8 size_of_at_array;
+ enum mlx5hws_bwc_matcher_type matcher_type;
u32 priority;
struct mlx5hws_bwc_matcher_size rx_size;
struct mlx5hws_bwc_matcher_size tx_size;
@@ -43,8 +58,8 @@ struct mlx5hws_bwc_matcher {
struct mlx5hws_bwc_rule {
struct mlx5hws_bwc_matcher *bwc_matcher;
struct mlx5hws_rule *rule;
- struct mlx5hws_bwc_rule *isolated_bwc_rule;
- struct mlx5hws_bwc_complex_rule_hash_node *complex_hash_node;
+ struct mlx5hws_bwc_rule *next_subrule;
+ struct mlx5hws_bwc_complex_subrule_data *subrule_data;
u32 flow_source;
u16 bwc_queue_idx;
bool skip_rx;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c
index ca7501c57468..660630f18ce9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c
@@ -3,25 +3,27 @@
#include "internal.h"
-#define HWS_CLEAR_MATCH_PARAM(mask, field) \
- MLX5_SET(fte_match_param, (mask)->match_buf, field, 0)
-
-#define HWS_SZ_MATCH_PARAM (MLX5_ST_SZ_DW_MATCH_PARAM * 4)
-
-static const struct rhashtable_params hws_refcount_hash = {
- .key_len = sizeof_field(struct mlx5hws_bwc_complex_rule_hash_node,
- match_buf),
- .key_offset = offsetof(struct mlx5hws_bwc_complex_rule_hash_node,
- match_buf),
- .head_offset = offsetof(struct mlx5hws_bwc_complex_rule_hash_node,
- hash_node),
- .automatic_shrinking = true,
- .min_size = 1,
+/* We chain submatchers by applying three rules on a subrule: modify header (to
+ * set register C6), jump to table (to the next submatcher) and the mandatory
+ * last rule.
+ */
+#define HWS_NUM_CHAIN_ACTIONS 3
+
+static const struct rhashtable_params hws_rules_hash_params = {
+ .key_len = sizeof_field(struct mlx5hws_bwc_complex_subrule_data,
+ match_tag),
+ .key_offset =
+ offsetof(struct mlx5hws_bwc_complex_subrule_data, match_tag),
+ .head_offset =
+ offsetof(struct mlx5hws_bwc_complex_subrule_data, hash_node),
+ .automatic_shrinking = true, .min_size = 1,
};
-bool mlx5hws_bwc_match_params_is_complex(struct mlx5hws_context *ctx,
- u8 match_criteria_enable,
- struct mlx5hws_match_parameters *mask)
+static bool
+hws_match_params_exceeds_definer(struct mlx5hws_context *ctx,
+ u8 match_criteria_enable,
+ struct mlx5hws_match_parameters *mask,
+ bool allow_jumbo)
{
struct mlx5hws_definer match_layout = {0};
struct mlx5hws_match_template *mt;
@@ -36,11 +38,11 @@ bool mlx5hws_bwc_match_params_is_complex(struct mlx5hws_context *ctx,
mask->match_sz,
match_criteria_enable);
if (!mt) {
- mlx5hws_err(ctx, "BWC: failed creating match template\n");
+ mlx5hws_err(ctx, "Complex matcher: failed creating match template\n");
return false;
}
- ret = mlx5hws_definer_calc_layout(ctx, mt, &match_layout);
+ ret = mlx5hws_definer_calc_layout(ctx, mt, &match_layout, allow_jumbo);
if (ret) {
/* The only case that we're interested in is E2BIG,
* which means that the match parameters need to be
@@ -64,825 +66,481 @@ bool mlx5hws_bwc_match_params_is_complex(struct mlx5hws_context *ctx,
return is_complex;
}
-static void
-hws_bwc_matcher_complex_params_clear_fld(struct mlx5hws_context *ctx,
- enum mlx5hws_definer_fname fname,
+bool mlx5hws_bwc_match_params_is_complex(struct mlx5hws_context *ctx,
+ u8 match_criteria_enable,
struct mlx5hws_match_parameters *mask)
{
- struct mlx5hws_cmd_query_caps *caps = ctx->caps;
-
- switch (fname) {
- case MLX5HWS_DEFINER_FNAME_ETH_TYPE_O:
- case MLX5HWS_DEFINER_FNAME_ETH_TYPE_I:
- case MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_O:
- case MLX5HWS_DEFINER_FNAME_ETH_L3_TYPE_I:
- case MLX5HWS_DEFINER_FNAME_IP_VERSION_O:
- case MLX5HWS_DEFINER_FNAME_IP_VERSION_I:
- /* Because of the strict requirements for IP address matching
- * that require ethtype/ip_version matching as well, don't clear
- * these fields - have them in both parts of the complex matcher
- */
- break;
- case MLX5HWS_DEFINER_FNAME_ETH_SMAC_47_16_O:
- HWS_CLEAR_MATCH_PARAM(mask, outer_headers.smac_47_16);
- break;
- case MLX5HWS_DEFINER_FNAME_ETH_SMAC_47_16_I:
- HWS_CLEAR_MATCH_PARAM(mask, inner_headers.smac_47_16);
- break;
- case MLX5HWS_DEFINER_FNAME_ETH_SMAC_15_0_O:
- HWS_CLEAR_MATCH_PARAM(mask, outer_headers.smac_15_0);
- break;
- case MLX5HWS_DEFINER_FNAME_ETH_SMAC_15_0_I:
- HWS_CLEAR_MATCH_PARAM(mask, inner_headers.smac_15_0);
- break;
- case MLX5HWS_DEFINER_FNAME_ETH_DMAC_47_16_O:
- HWS_CLEAR_MATCH_PARAM(mask, outer_headers.dmac_47_16);
- break;
- case MLX5HWS_DEFINER_FNAME_ETH_DMAC_47_16_I:
- HWS_CLEAR_MATCH_PARAM(mask, inner_headers.dmac_47_16);
- break;
- case MLX5HWS_DEFINER_FNAME_ETH_DMAC_15_0_O:
- HWS_CLEAR_MATCH_PARAM(mask, outer_headers.dmac_15_0);
- break;
- case MLX5HWS_DEFINER_FNAME_ETH_DMAC_15_0_I:
- HWS_CLEAR_MATCH_PARAM(mask, inner_headers.dmac_15_0);
- break;
- case MLX5HWS_DEFINER_FNAME_VLAN_TYPE_O:
- HWS_CLEAR_MATCH_PARAM(mask, outer_headers.cvlan_tag);
- HWS_CLEAR_MATCH_PARAM(mask, outer_headers.svlan_tag);
- break;
- case MLX5HWS_DEFINER_FNAME_VLAN_TYPE_I:
- HWS_CLEAR_MATCH_PARAM(mask, inner_headers.cvlan_tag);
- HWS_CLEAR_MATCH_PARAM(mask, inner_headers.svlan_tag);
- break;
- case MLX5HWS_DEFINER_FNAME_VLAN_FIRST_PRIO_O:
- HWS_CLEAR_MATCH_PARAM(mask, outer_headers.first_prio);
- break;
- case MLX5HWS_DEFINER_FNAME_VLAN_FIRST_PRIO_I:
- HWS_CLEAR_MATCH_PARAM(mask, inner_headers.first_prio);
- break;
- case MLX5HWS_DEFINER_FNAME_VLAN_CFI_O:
- HWS_CLEAR_MATCH_PARAM(mask, outer_headers.first_cfi);
- break;
- case MLX5HWS_DEFINER_FNAME_VLAN_CFI_I:
- HWS_CLEAR_MATCH_PARAM(mask, inner_headers.first_cfi);
- break;
- case MLX5HWS_DEFINER_FNAME_VLAN_ID_O:
- HWS_CLEAR_MATCH_PARAM(mask, outer_headers.first_vid);
- break;
- case MLX5HWS_DEFINER_FNAME_VLAN_ID_I:
- HWS_CLEAR_MATCH_PARAM(mask, inner_headers.first_vid);
- break;
- case MLX5HWS_DEFINER_FNAME_VLAN_SECOND_TYPE_O:
- HWS_CLEAR_MATCH_PARAM(mask,
- misc_parameters.outer_second_cvlan_tag);
- HWS_CLEAR_MATCH_PARAM(mask,
- misc_parameters.outer_second_svlan_tag);
- break;
- case MLX5HWS_DEFINER_FNAME_VLAN_SECOND_TYPE_I:
- HWS_CLEAR_MATCH_PARAM(mask,
- misc_parameters.inner_second_cvlan_tag);
- HWS_CLEAR_MATCH_PARAM(mask,
- misc_parameters.inner_second_svlan_tag);
- break;
- case MLX5HWS_DEFINER_FNAME_VLAN_SECOND_PRIO_O:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.outer_second_prio);
- break;
- case MLX5HWS_DEFINER_FNAME_VLAN_SECOND_PRIO_I:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.inner_second_prio);
- break;
- case MLX5HWS_DEFINER_FNAME_VLAN_SECOND_CFI_O:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.outer_second_cfi);
- break;
- case MLX5HWS_DEFINER_FNAME_VLAN_SECOND_CFI_I:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.inner_second_cfi);
- break;
- case MLX5HWS_DEFINER_FNAME_VLAN_SECOND_ID_O:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.outer_second_vid);
- break;
- case MLX5HWS_DEFINER_FNAME_VLAN_SECOND_ID_I:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.inner_second_vid);
- break;
- case MLX5HWS_DEFINER_FNAME_IPV4_IHL_O:
- HWS_CLEAR_MATCH_PARAM(mask, outer_headers.ipv4_ihl);
- break;
- case MLX5HWS_DEFINER_FNAME_IPV4_IHL_I:
- HWS_CLEAR_MATCH_PARAM(mask, inner_headers.ipv4_ihl);
- break;
- case MLX5HWS_DEFINER_FNAME_IP_DSCP_O:
- HWS_CLEAR_MATCH_PARAM(mask, outer_headers.ip_dscp);
- break;
- case MLX5HWS_DEFINER_FNAME_IP_DSCP_I:
- HWS_CLEAR_MATCH_PARAM(mask, inner_headers.ip_dscp);
- break;
- case MLX5HWS_DEFINER_FNAME_IP_ECN_O:
- HWS_CLEAR_MATCH_PARAM(mask, outer_headers.ip_ecn);
- break;
- case MLX5HWS_DEFINER_FNAME_IP_ECN_I:
- HWS_CLEAR_MATCH_PARAM(mask, inner_headers.ip_ecn);
- break;
- case MLX5HWS_DEFINER_FNAME_IP_TTL_O:
- HWS_CLEAR_MATCH_PARAM(mask, outer_headers.ttl_hoplimit);
- break;
- case MLX5HWS_DEFINER_FNAME_IP_TTL_I:
- HWS_CLEAR_MATCH_PARAM(mask, inner_headers.ttl_hoplimit);
- break;
- case MLX5HWS_DEFINER_FNAME_IPV4_DST_O:
- HWS_CLEAR_MATCH_PARAM(mask,
- outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0);
- break;
- case MLX5HWS_DEFINER_FNAME_IPV4_SRC_O:
- HWS_CLEAR_MATCH_PARAM(mask,
- outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0);
- break;
- case MLX5HWS_DEFINER_FNAME_IPV4_DST_I:
- HWS_CLEAR_MATCH_PARAM(mask,
- inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0);
- break;
- case MLX5HWS_DEFINER_FNAME_IPV4_SRC_I:
- HWS_CLEAR_MATCH_PARAM(mask,
- inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0);
- break;
- case MLX5HWS_DEFINER_FNAME_IP_FRAG_O:
- HWS_CLEAR_MATCH_PARAM(mask, outer_headers.frag);
- break;
- case MLX5HWS_DEFINER_FNAME_IP_FRAG_I:
- HWS_CLEAR_MATCH_PARAM(mask, inner_headers.frag);
- break;
- case MLX5HWS_DEFINER_FNAME_IPV6_FLOW_LABEL_O:
- HWS_CLEAR_MATCH_PARAM(mask,
- misc_parameters.outer_ipv6_flow_label);
- break;
- case MLX5HWS_DEFINER_FNAME_IPV6_FLOW_LABEL_I:
- HWS_CLEAR_MATCH_PARAM(mask,
- misc_parameters.inner_ipv6_flow_label);
- break;
- case MLX5HWS_DEFINER_FNAME_IPV6_DST_127_96_O:
- case MLX5HWS_DEFINER_FNAME_IPV6_DST_95_64_O:
- case MLX5HWS_DEFINER_FNAME_IPV6_DST_63_32_O:
- case MLX5HWS_DEFINER_FNAME_IPV6_DST_31_0_O:
- HWS_CLEAR_MATCH_PARAM(mask,
- outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_127_96);
- HWS_CLEAR_MATCH_PARAM(mask,
- outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_95_64);
- HWS_CLEAR_MATCH_PARAM(mask,
- outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_63_32);
- HWS_CLEAR_MATCH_PARAM(mask,
- outer_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0);
- break;
- case MLX5HWS_DEFINER_FNAME_IPV6_SRC_127_96_O:
- case MLX5HWS_DEFINER_FNAME_IPV6_SRC_95_64_O:
- case MLX5HWS_DEFINER_FNAME_IPV6_SRC_63_32_O:
- case MLX5HWS_DEFINER_FNAME_IPV6_SRC_31_0_O:
- HWS_CLEAR_MATCH_PARAM(mask,
- outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_127_96);
- HWS_CLEAR_MATCH_PARAM(mask,
- outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_95_64);
- HWS_CLEAR_MATCH_PARAM(mask,
- outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_63_32);
- HWS_CLEAR_MATCH_PARAM(mask,
- outer_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0);
- break;
- case MLX5HWS_DEFINER_FNAME_IPV6_DST_127_96_I:
- case MLX5HWS_DEFINER_FNAME_IPV6_DST_95_64_I:
- case MLX5HWS_DEFINER_FNAME_IPV6_DST_63_32_I:
- case MLX5HWS_DEFINER_FNAME_IPV6_DST_31_0_I:
- HWS_CLEAR_MATCH_PARAM(mask,
- inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_127_96);
- HWS_CLEAR_MATCH_PARAM(mask,
- inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_95_64);
- HWS_CLEAR_MATCH_PARAM(mask,
- inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_63_32);
- HWS_CLEAR_MATCH_PARAM(mask,
- inner_headers.dst_ipv4_dst_ipv6.ipv6_simple_layout.ipv6_31_0);
- break;
- case MLX5HWS_DEFINER_FNAME_IPV6_SRC_127_96_I:
- case MLX5HWS_DEFINER_FNAME_IPV6_SRC_95_64_I:
- case MLX5HWS_DEFINER_FNAME_IPV6_SRC_63_32_I:
- case MLX5HWS_DEFINER_FNAME_IPV6_SRC_31_0_I:
- HWS_CLEAR_MATCH_PARAM(mask,
- inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_127_96);
- HWS_CLEAR_MATCH_PARAM(mask,
- inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_95_64);
- HWS_CLEAR_MATCH_PARAM(mask,
- inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_63_32);
- HWS_CLEAR_MATCH_PARAM(mask,
- inner_headers.src_ipv4_src_ipv6.ipv6_simple_layout.ipv6_31_0);
- break;
- case MLX5HWS_DEFINER_FNAME_IP_PROTOCOL_O:
- HWS_CLEAR_MATCH_PARAM(mask, outer_headers.ip_protocol);
- break;
- case MLX5HWS_DEFINER_FNAME_IP_PROTOCOL_I:
- HWS_CLEAR_MATCH_PARAM(mask, inner_headers.ip_protocol);
- break;
- case MLX5HWS_DEFINER_FNAME_L4_SPORT_O:
- HWS_CLEAR_MATCH_PARAM(mask, outer_headers.tcp_sport);
- HWS_CLEAR_MATCH_PARAM(mask, outer_headers.udp_sport);
- break;
- case MLX5HWS_DEFINER_FNAME_L4_SPORT_I:
- HWS_CLEAR_MATCH_PARAM(mask, inner_headers.tcp_dport);
- HWS_CLEAR_MATCH_PARAM(mask, inner_headers.udp_dport);
- break;
- case MLX5HWS_DEFINER_FNAME_L4_DPORT_O:
- HWS_CLEAR_MATCH_PARAM(mask, outer_headers.tcp_dport);
- HWS_CLEAR_MATCH_PARAM(mask, outer_headers.udp_dport);
- break;
- case MLX5HWS_DEFINER_FNAME_L4_DPORT_I:
- HWS_CLEAR_MATCH_PARAM(mask, inner_headers.tcp_dport);
- HWS_CLEAR_MATCH_PARAM(mask, inner_headers.udp_dport);
- break;
- case MLX5HWS_DEFINER_FNAME_TCP_FLAGS_O:
- HWS_CLEAR_MATCH_PARAM(mask, outer_headers.tcp_flags);
- break;
- case MLX5HWS_DEFINER_FNAME_TCP_ACK_NUM:
- case MLX5HWS_DEFINER_FNAME_TCP_SEQ_NUM:
- HWS_CLEAR_MATCH_PARAM(mask,
- misc_parameters_3.outer_tcp_seq_num);
- HWS_CLEAR_MATCH_PARAM(mask,
- misc_parameters_3.outer_tcp_ack_num);
- HWS_CLEAR_MATCH_PARAM(mask,
- misc_parameters_3.inner_tcp_seq_num);
- HWS_CLEAR_MATCH_PARAM(mask,
- misc_parameters_3.inner_tcp_ack_num);
- break;
- case MLX5HWS_DEFINER_FNAME_GTP_TEID:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.gtpu_teid);
- break;
- case MLX5HWS_DEFINER_FNAME_GTP_MSG_TYPE:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.gtpu_msg_type);
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.gtpu_msg_flags);
- break;
- case MLX5HWS_DEFINER_FNAME_GTPU_FIRST_EXT_DW0:
- HWS_CLEAR_MATCH_PARAM(mask,
- misc_parameters_3.gtpu_first_ext_dw_0);
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.gtpu_dw_0);
- break;
- case MLX5HWS_DEFINER_FNAME_GTPU_DW2:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.gtpu_dw_2);
- break;
- case MLX5HWS_DEFINER_FNAME_FLEX_PARSER_0:
- case MLX5HWS_DEFINER_FNAME_FLEX_PARSER_1:
- case MLX5HWS_DEFINER_FNAME_FLEX_PARSER_2:
- case MLX5HWS_DEFINER_FNAME_FLEX_PARSER_3:
- case MLX5HWS_DEFINER_FNAME_FLEX_PARSER_4:
- case MLX5HWS_DEFINER_FNAME_FLEX_PARSER_5:
- case MLX5HWS_DEFINER_FNAME_FLEX_PARSER_6:
- case MLX5HWS_DEFINER_FNAME_FLEX_PARSER_7:
- HWS_CLEAR_MATCH_PARAM(mask,
- misc_parameters_2.outer_first_mpls_over_gre);
- HWS_CLEAR_MATCH_PARAM(mask,
- misc_parameters_2.outer_first_mpls_over_udp);
- HWS_CLEAR_MATCH_PARAM(mask,
- misc_parameters_3.geneve_tlv_option_0_data);
- HWS_CLEAR_MATCH_PARAM(mask,
- misc_parameters_4.prog_sample_field_id_0);
- HWS_CLEAR_MATCH_PARAM(mask,
- misc_parameters_4.prog_sample_field_value_0);
- HWS_CLEAR_MATCH_PARAM(mask,
- misc_parameters_4.prog_sample_field_value_1);
- HWS_CLEAR_MATCH_PARAM(mask,
- misc_parameters_4.prog_sample_field_id_2);
- HWS_CLEAR_MATCH_PARAM(mask,
- misc_parameters_4.prog_sample_field_value_2);
- HWS_CLEAR_MATCH_PARAM(mask,
- misc_parameters_4.prog_sample_field_id_3);
- HWS_CLEAR_MATCH_PARAM(mask,
- misc_parameters_4.prog_sample_field_value_3);
- break;
- case MLX5HWS_DEFINER_FNAME_VXLAN_VNI:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.vxlan_vni);
- break;
- case MLX5HWS_DEFINER_FNAME_VXLAN_GPE_FLAGS:
- HWS_CLEAR_MATCH_PARAM(mask,
- misc_parameters_3.outer_vxlan_gpe_flags);
- break;
- case MLX5HWS_DEFINER_FNAME_VXLAN_GPE_RSVD0:
- break;
- case MLX5HWS_DEFINER_FNAME_VXLAN_GPE_PROTO:
- HWS_CLEAR_MATCH_PARAM(mask,
- misc_parameters_3.outer_vxlan_gpe_next_protocol);
- break;
- case MLX5HWS_DEFINER_FNAME_VXLAN_GPE_VNI:
- HWS_CLEAR_MATCH_PARAM(mask,
- misc_parameters_3.outer_vxlan_gpe_vni);
- break;
- case MLX5HWS_DEFINER_FNAME_GENEVE_OPT_LEN:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.geneve_opt_len);
- break;
- case MLX5HWS_DEFINER_FNAME_GENEVE_OAM:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.geneve_oam);
- break;
- case MLX5HWS_DEFINER_FNAME_GENEVE_PROTO:
- HWS_CLEAR_MATCH_PARAM(mask,
- misc_parameters.geneve_protocol_type);
- break;
- case MLX5HWS_DEFINER_FNAME_GENEVE_VNI:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.geneve_vni);
- break;
- case MLX5HWS_DEFINER_FNAME_SOURCE_QP:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.source_sqn);
- break;
- case MLX5HWS_DEFINER_FNAME_SOURCE_GVMI:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.source_port);
- HWS_CLEAR_MATCH_PARAM(mask,
- misc_parameters.source_eswitch_owner_vhca_id);
- break;
- case MLX5HWS_DEFINER_FNAME_REG_0:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.metadata_reg_c_0);
- break;
- case MLX5HWS_DEFINER_FNAME_REG_1:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.metadata_reg_c_1);
- break;
- case MLX5HWS_DEFINER_FNAME_REG_2:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.metadata_reg_c_2);
- break;
- case MLX5HWS_DEFINER_FNAME_REG_3:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.metadata_reg_c_3);
- break;
- case MLX5HWS_DEFINER_FNAME_REG_4:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.metadata_reg_c_4);
- break;
- case MLX5HWS_DEFINER_FNAME_REG_5:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.metadata_reg_c_5);
- break;
- case MLX5HWS_DEFINER_FNAME_REG_7:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.metadata_reg_c_7);
- break;
- case MLX5HWS_DEFINER_FNAME_REG_A:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.metadata_reg_a);
- break;
- case MLX5HWS_DEFINER_FNAME_GRE_C:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.gre_c_present);
- break;
- case MLX5HWS_DEFINER_FNAME_GRE_K:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.gre_k_present);
- break;
- case MLX5HWS_DEFINER_FNAME_GRE_S:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.gre_s_present);
- break;
- case MLX5HWS_DEFINER_FNAME_GRE_PROTOCOL:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.gre_protocol);
- break;
- case MLX5HWS_DEFINER_FNAME_GRE_OPT_KEY:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters.gre_key.key);
- break;
- case MLX5HWS_DEFINER_FNAME_ICMP_DW1:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.icmp_header_data);
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.icmp_type);
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.icmp_code);
- HWS_CLEAR_MATCH_PARAM(mask,
- misc_parameters_3.icmpv6_header_data);
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.icmpv6_type);
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_3.icmpv6_code);
- break;
- case MLX5HWS_DEFINER_FNAME_MPLS0_O:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.outer_first_mpls);
- break;
- case MLX5HWS_DEFINER_FNAME_MPLS0_I:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_2.inner_first_mpls);
- break;
- case MLX5HWS_DEFINER_FNAME_TNL_HDR_0:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_5.tunnel_header_0);
- break;
- case MLX5HWS_DEFINER_FNAME_TNL_HDR_1:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_5.tunnel_header_1);
- break;
- case MLX5HWS_DEFINER_FNAME_TNL_HDR_2:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_5.tunnel_header_2);
- break;
- case MLX5HWS_DEFINER_FNAME_TNL_HDR_3:
- HWS_CLEAR_MATCH_PARAM(mask, misc_parameters_5.tunnel_header_3);
- break;
- case MLX5HWS_DEFINER_FNAME_FLEX_PARSER0_OK:
- case MLX5HWS_DEFINER_FNAME_FLEX_PARSER1_OK:
- case MLX5HWS_DEFINER_FNAME_FLEX_PARSER2_OK:
- case MLX5HWS_DEFINER_FNAME_FLEX_PARSER3_OK:
- case MLX5HWS_DEFINER_FNAME_FLEX_PARSER4_OK:
- case MLX5HWS_DEFINER_FNAME_FLEX_PARSER5_OK:
- case MLX5HWS_DEFINER_FNAME_FLEX_PARSER6_OK:
- case MLX5HWS_DEFINER_FNAME_FLEX_PARSER7_OK:
- /* assuming this is flex parser for geneve option */
- if ((fname == MLX5HWS_DEFINER_FNAME_FLEX_PARSER0_OK &&
- ctx->caps->flex_parser_id_geneve_tlv_option_0 != 0) ||
- (fname == MLX5HWS_DEFINER_FNAME_FLEX_PARSER1_OK &&
- ctx->caps->flex_parser_id_geneve_tlv_option_0 != 1) ||
- (fname == MLX5HWS_DEFINER_FNAME_FLEX_PARSER2_OK &&
- ctx->caps->flex_parser_id_geneve_tlv_option_0 != 2) ||
- (fname == MLX5HWS_DEFINER_FNAME_FLEX_PARSER3_OK &&
- ctx->caps->flex_parser_id_geneve_tlv_option_0 != 3) ||
- (fname == MLX5HWS_DEFINER_FNAME_FLEX_PARSER4_OK &&
- ctx->caps->flex_parser_id_geneve_tlv_option_0 != 4) ||
- (fname == MLX5HWS_DEFINER_FNAME_FLEX_PARSER5_OK &&
- ctx->caps->flex_parser_id_geneve_tlv_option_0 != 5) ||
- (fname == MLX5HWS_DEFINER_FNAME_FLEX_PARSER6_OK &&
- ctx->caps->flex_parser_id_geneve_tlv_option_0 != 6) ||
- (fname == MLX5HWS_DEFINER_FNAME_FLEX_PARSER7_OK &&
- ctx->caps->flex_parser_id_geneve_tlv_option_0 != 7)) {
- mlx5hws_err(ctx,
- "Complex params: unsupported field %s (%d), flex parser ID for geneve is %d\n",
- mlx5hws_definer_fname_to_str(fname), fname,
- caps->flex_parser_id_geneve_tlv_option_0);
- break;
- }
- HWS_CLEAR_MATCH_PARAM(mask,
- misc_parameters.geneve_tlv_option_0_exist);
- break;
- case MLX5HWS_DEFINER_FNAME_REG_6:
- default:
- mlx5hws_err(ctx, "Complex params: unsupported field %s (%d)\n",
- mlx5hws_definer_fname_to_str(fname), fname);
- break;
- }
+ return hws_match_params_exceeds_definer(ctx, match_criteria_enable,
+ mask, true);
}
-static bool
-hws_bwc_matcher_complex_params_comb_is_valid(struct mlx5hws_definer_fc *fc,
- int fc_sz,
- u32 combination_num)
+static int
+hws_get_last_set_dword_idx(const struct mlx5hws_match_parameters *mask)
{
- bool m1[MLX5HWS_DEFINER_FNAME_MAX] = {0};
- bool m2[MLX5HWS_DEFINER_FNAME_MAX] = {0};
- bool is_first_matcher;
int i;
- for (i = 0; i < fc_sz; i++) {
- is_first_matcher = !(combination_num & BIT(i));
- if (is_first_matcher)
- m1[fc[i].fname] = true;
- else
- m2[fc[i].fname] = true;
- }
-
- /* Not all the fields can be split into separate matchers.
- * Some should be together on the same matcher.
- * For example, IPv6 parts - the whole IPv6 address should be on the
- * same matcher in order for us to deduce if it's IPv6 or IPv4 address.
- */
- if (m1[MLX5HWS_DEFINER_FNAME_IP_FRAG_O] &&
- (m2[MLX5HWS_DEFINER_FNAME_ETH_SMAC_15_0_O] ||
- m2[MLX5HWS_DEFINER_FNAME_ETH_SMAC_47_16_O] ||
- m2[MLX5HWS_DEFINER_FNAME_ETH_DMAC_15_0_O] ||
- m2[MLX5HWS_DEFINER_FNAME_ETH_DMAC_47_16_O]))
- return false;
-
- if (m2[MLX5HWS_DEFINER_FNAME_IP_FRAG_O] &&
- (m1[MLX5HWS_DEFINER_FNAME_ETH_SMAC_15_0_O] ||
- m1[MLX5HWS_DEFINER_FNAME_ETH_SMAC_47_16_O] ||
- m1[MLX5HWS_DEFINER_FNAME_ETH_DMAC_15_0_O] ||
- m1[MLX5HWS_DEFINER_FNAME_ETH_DMAC_47_16_O]))
- return false;
+ for (i = mask->match_sz / 4 - 1; i >= 0; i--)
+ if (mask->match_buf[i])
+ return i;
- if (m1[MLX5HWS_DEFINER_FNAME_IP_FRAG_I] &&
- (m2[MLX5HWS_DEFINER_FNAME_ETH_SMAC_47_16_I] ||
- m2[MLX5HWS_DEFINER_FNAME_ETH_SMAC_15_0_I] ||
- m2[MLX5HWS_DEFINER_FNAME_ETH_DMAC_47_16_I] ||
- m2[MLX5HWS_DEFINER_FNAME_ETH_DMAC_15_0_I]))
- return false;
+ return -1;
+}
- if (m2[MLX5HWS_DEFINER_FNAME_IP_FRAG_I] &&
- (m1[MLX5HWS_DEFINER_FNAME_ETH_SMAC_47_16_I] ||
- m1[MLX5HWS_DEFINER_FNAME_ETH_SMAC_15_0_I] ||
- m1[MLX5HWS_DEFINER_FNAME_ETH_DMAC_47_16_I] ||
- m1[MLX5HWS_DEFINER_FNAME_ETH_DMAC_15_0_I]))
- return false;
+static bool hws_match_mask_is_empty(const struct mlx5hws_match_parameters *mask)
+{
+ return hws_get_last_set_dword_idx(mask) == -1;
+}
- /* Don't split outer IPv6 dest address. */
- if ((m1[MLX5HWS_DEFINER_FNAME_IPV6_DST_127_96_O] ||
- m1[MLX5HWS_DEFINER_FNAME_IPV6_DST_95_64_O] ||
- m1[MLX5HWS_DEFINER_FNAME_IPV6_DST_63_32_O] ||
- m1[MLX5HWS_DEFINER_FNAME_IPV6_DST_31_0_O]) &&
- (m2[MLX5HWS_DEFINER_FNAME_IPV6_DST_127_96_O] ||
- m2[MLX5HWS_DEFINER_FNAME_IPV6_DST_95_64_O] ||
- m2[MLX5HWS_DEFINER_FNAME_IPV6_DST_63_32_O] ||
- m2[MLX5HWS_DEFINER_FNAME_IPV6_DST_31_0_O]))
- return false;
+static bool hws_dword_is_inner_ipaddr_off(int dword_off)
+{
+ /* IPv4 and IPv6 addresses share the same entry via a union, and the
+ * source and dest addresses are contiguous in the fte_match_param. So
+ * we need to check 8 words.
+ */
+ static const int inner_ip_dword_off =
+ __mlx5_dw_off(fte_match_param, inner_headers.src_ipv4_src_ipv6);
- /* Don't split outer IPv6 source address. */
- if ((m1[MLX5HWS_DEFINER_FNAME_IPV6_SRC_127_96_O] ||
- m1[MLX5HWS_DEFINER_FNAME_IPV6_SRC_95_64_O] ||
- m1[MLX5HWS_DEFINER_FNAME_IPV6_SRC_63_32_O] ||
- m1[MLX5HWS_DEFINER_FNAME_IPV6_SRC_31_0_O]) &&
- (m2[MLX5HWS_DEFINER_FNAME_IPV6_SRC_127_96_O] ||
- m2[MLX5HWS_DEFINER_FNAME_IPV6_SRC_95_64_O] ||
- m2[MLX5HWS_DEFINER_FNAME_IPV6_SRC_63_32_O] ||
- m2[MLX5HWS_DEFINER_FNAME_IPV6_SRC_31_0_O]))
- return false;
+ return dword_off >= inner_ip_dword_off &&
+ dword_off < inner_ip_dword_off + 8;
+}
- /* Don't split inner IPv6 dest address. */
- if ((m1[MLX5HWS_DEFINER_FNAME_IPV6_DST_127_96_I] ||
- m1[MLX5HWS_DEFINER_FNAME_IPV6_DST_95_64_I] ||
- m1[MLX5HWS_DEFINER_FNAME_IPV6_DST_63_32_I] ||
- m1[MLX5HWS_DEFINER_FNAME_IPV6_DST_31_0_I]) &&
- (m2[MLX5HWS_DEFINER_FNAME_IPV6_DST_127_96_I] ||
- m2[MLX5HWS_DEFINER_FNAME_IPV6_DST_95_64_I] ||
- m2[MLX5HWS_DEFINER_FNAME_IPV6_DST_63_32_I] ||
- m2[MLX5HWS_DEFINER_FNAME_IPV6_DST_31_0_I]))
- return false;
+static bool hws_dword_is_outer_ipaddr_off(int dword_off)
+{
+ static const int outer_ip_dword_off =
+ __mlx5_dw_off(fte_match_param, outer_headers.src_ipv4_src_ipv6);
- /* Don't split inner IPv6 source address. */
- if ((m1[MLX5HWS_DEFINER_FNAME_IPV6_SRC_127_96_I] ||
- m1[MLX5HWS_DEFINER_FNAME_IPV6_SRC_95_64_I] ||
- m1[MLX5HWS_DEFINER_FNAME_IPV6_SRC_63_32_I] ||
- m1[MLX5HWS_DEFINER_FNAME_IPV6_SRC_31_0_I]) &&
- (m2[MLX5HWS_DEFINER_FNAME_IPV6_SRC_127_96_I] ||
- m2[MLX5HWS_DEFINER_FNAME_IPV6_SRC_95_64_I] ||
- m2[MLX5HWS_DEFINER_FNAME_IPV6_SRC_63_32_I] ||
- m2[MLX5HWS_DEFINER_FNAME_IPV6_SRC_31_0_I]))
- return false;
+ return dword_off >= outer_ip_dword_off &&
+ dword_off < outer_ip_dword_off + 8;
+}
- /* Don't split GRE parameters. */
- if ((m1[MLX5HWS_DEFINER_FNAME_GRE_C] ||
- m1[MLX5HWS_DEFINER_FNAME_GRE_K] ||
- m1[MLX5HWS_DEFINER_FNAME_GRE_S] ||
- m1[MLX5HWS_DEFINER_FNAME_GRE_PROTOCOL]) &&
- (m2[MLX5HWS_DEFINER_FNAME_GRE_C] ||
- m2[MLX5HWS_DEFINER_FNAME_GRE_K] ||
- m2[MLX5HWS_DEFINER_FNAME_GRE_S] ||
- m2[MLX5HWS_DEFINER_FNAME_GRE_PROTOCOL]))
- return false;
+static void hws_add_dword_to_mask(struct mlx5hws_match_parameters *mask,
+ const struct mlx5hws_match_parameters *orig,
+ int dword_idx, bool *added_inner_ipv,
+ bool *added_outer_ipv)
+{
+ mask->match_buf[dword_idx] |= orig->match_buf[dword_idx];
- /* Don't split TCP ack/seq numbers. */
- if ((m1[MLX5HWS_DEFINER_FNAME_TCP_ACK_NUM] ||
- m1[MLX5HWS_DEFINER_FNAME_TCP_SEQ_NUM]) &&
- (m2[MLX5HWS_DEFINER_FNAME_TCP_ACK_NUM] ||
- m2[MLX5HWS_DEFINER_FNAME_TCP_SEQ_NUM]))
- return false;
+ *added_inner_ipv = false;
+ *added_outer_ipv = false;
- /* Don't split flex parser. */
- if ((m1[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_0] ||
- m1[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_1] ||
- m1[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_2] ||
- m1[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_3] ||
- m1[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_4] ||
- m1[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_5] ||
- m1[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_6] ||
- m1[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_7]) &&
- (m2[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_0] ||
- m2[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_1] ||
- m2[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_2] ||
- m2[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_3] ||
- m2[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_4] ||
- m2[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_5] ||
- m2[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_6] ||
- m2[MLX5HWS_DEFINER_FNAME_FLEX_PARSER_7]))
- return false;
+ /* Any IP address fragment must be accompanied by a match on IP version.
+ * Use the `added_ipv` variables to keep track if we added IP versions
+ * specifically for this dword, so that we can roll them back if the
+ * match params become too large to fit into a definer.
+ */
+ if (hws_dword_is_inner_ipaddr_off(dword_idx) &&
+ !MLX5_GET(fte_match_param, mask->match_buf,
+ inner_headers.ip_version)) {
+ MLX5_SET(fte_match_param, mask->match_buf,
+ inner_headers.ip_version, 0xf);
+ *added_inner_ipv = true;
+ }
+ if (hws_dword_is_outer_ipaddr_off(dword_idx) &&
+ !MLX5_GET(fte_match_param, mask->match_buf,
+ outer_headers.ip_version)) {
+ MLX5_SET(fte_match_param, mask->match_buf,
+ outer_headers.ip_version, 0xf);
+ *added_outer_ipv = true;
+ }
+}
- return true;
+static void hws_remove_dword_from_mask(struct mlx5hws_match_parameters *mask,
+ int dword_idx, bool added_inner_ipv,
+ bool added_outer_ipv)
+{
+ mask->match_buf[dword_idx] = 0;
+ if (added_inner_ipv)
+ MLX5_SET(fte_match_param, mask->match_buf,
+ inner_headers.ip_version, 0);
+ if (added_outer_ipv)
+ MLX5_SET(fte_match_param, mask->match_buf,
+ outer_headers.ip_version, 0);
}
-static void
-hws_bwc_matcher_complex_params_comb_create(struct mlx5hws_context *ctx,
- struct mlx5hws_match_parameters *m,
- struct mlx5hws_match_parameters *m1,
- struct mlx5hws_match_parameters *m2,
- struct mlx5hws_definer_fc *fc,
- int fc_sz,
- u32 combination_num)
+/* Avoid leaving a single lower dword in `mask` if there are others present in
+ * `orig`. Splitting IPv6 addresses like this causes them to be interpreted as
+ * IPv4.
+ */
+static void hws_avoid_ipv6_split_of(struct mlx5hws_match_parameters *orig,
+ struct mlx5hws_match_parameters *mask,
+ int off)
{
- bool is_first_matcher;
- int i;
+ /* Masks are allocated to a full fte_match_param, but it can't hurt to
+ * double check.
+ */
+ if (orig->match_sz <= off + 3 || mask->match_sz <= off + 3)
+ return;
- memcpy(m1->match_buf, m->match_buf, m->match_sz);
- memcpy(m2->match_buf, m->match_buf, m->match_sz);
+ /* Lower dword is not set, nothing to do. */
+ if (!mask->match_buf[off + 3])
+ return;
- for (i = 0; i < fc_sz; i++) {
- is_first_matcher = !(combination_num & BIT(i));
- hws_bwc_matcher_complex_params_clear_fld(ctx,
- fc[i].fname,
- is_first_matcher ?
- m2 : m1);
- }
+ /* Higher dwords also present in `mask`, no ambiguity. */
+ if (mask->match_buf[off] || mask->match_buf[off + 1] ||
+ mask->match_buf[off + 2])
+ return;
- MLX5_SET(fte_match_param, m2->match_buf,
- misc_parameters_2.metadata_reg_c_6, -1);
+ /* There are no higher dwords in `orig`, i.e. we match on IPv4. */
+ if (!orig->match_buf[off] && !orig->match_buf[off + 1] &&
+ !orig->match_buf[off + 2])
+ return;
+
+ /* Put the lower dword back in `orig`. It is always safe to do this, the
+ * dword will just be picked up in the next submask.
+ */
+ orig->match_buf[off + 3] = mask->match_buf[off + 3];
+ mask->match_buf[off + 3] = 0;
}
-static void
-hws_bwc_matcher_complex_params_destroy(struct mlx5hws_match_parameters *mask_1,
- struct mlx5hws_match_parameters *mask_2)
+static void hws_avoid_ipv6_split(struct mlx5hws_match_parameters *orig,
+ struct mlx5hws_match_parameters *mask)
{
- kfree(mask_1->match_buf);
- kfree(mask_2->match_buf);
+ hws_avoid_ipv6_split_of(orig, mask,
+ __mlx5_dw_off(fte_match_param,
+ outer_headers.src_ipv4_src_ipv6));
+ hws_avoid_ipv6_split_of(orig, mask,
+ __mlx5_dw_off(fte_match_param,
+ outer_headers.dst_ipv4_dst_ipv6));
+ hws_avoid_ipv6_split_of(orig, mask,
+ __mlx5_dw_off(fte_match_param,
+ inner_headers.src_ipv4_src_ipv6));
+ hws_avoid_ipv6_split_of(orig, mask,
+ __mlx5_dw_off(fte_match_param,
+ inner_headers.dst_ipv4_dst_ipv6));
}
-static int
-hws_bwc_matcher_complex_params_create(struct mlx5hws_context *ctx,
- u8 match_criteria,
- struct mlx5hws_match_parameters *mask,
- struct mlx5hws_match_parameters *mask_1,
- struct mlx5hws_match_parameters *mask_2)
+/* Build a subset of the `orig` match parameters into `mask`. This subset is
+ * guaranteed to fit in a single definer an as such is a candidate for being a
+ * part of a complex matcher. Upon successful execution, the match params that
+ * go into `mask` are cleared from `orig`.
+ */
+static int hws_get_simple_params(struct mlx5hws_context *ctx, u8 match_criteria,
+ struct mlx5hws_match_parameters *orig,
+ struct mlx5hws_match_parameters *mask)
{
- struct mlx5hws_definer_fc *fc;
- u32 num_of_combinations;
- int fc_sz = 0;
- int res = 0;
- u32 i;
-
- if (MLX5_GET(fte_match_param, mask->match_buf,
- misc_parameters_2.metadata_reg_c_6)) {
- mlx5hws_err(ctx, "Complex matcher: REG_C_6 matching is reserved\n");
- res = -EINVAL;
- goto out;
- }
+ bool added_inner_ipv, added_outer_ipv;
+ int dword_idx;
+ u32 *backup;
+ int ret;
- mask_1->match_buf = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param),
- GFP_KERNEL);
- mask_2->match_buf = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param),
- GFP_KERNEL);
- if (!mask_1->match_buf || !mask_2->match_buf) {
- mlx5hws_err(ctx, "Complex matcher: failed to allocate match_param\n");
- res = -ENOMEM;
- goto free_params;
- }
+ dword_idx = hws_get_last_set_dword_idx(orig);
+ /* Nothing to do, we consumed all of the match params before. */
+ if (dword_idx == -1)
+ return 0;
- mask_1->match_sz = mask->match_sz;
- mask_2->match_sz = mask->match_sz;
+ backup = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
+ if (!backup)
+ return -ENOMEM;
- fc = mlx5hws_definer_conv_match_params_to_compressed_fc(ctx,
- match_criteria,
- mask->match_buf,
- &fc_sz);
- if (!fc) {
- res = -ENOMEM;
- goto free_params;
- }
+ while (1) {
+ dword_idx = hws_get_last_set_dword_idx(orig);
+ /* Nothing to do, we consumed all of the original match params
+ * into this subset, which still fits into a single matcher.
+ */
+ if (dword_idx == -1) {
+ ret = 0;
+ goto free_backup;
+ }
- if (fc_sz >= sizeof(num_of_combinations) * BITS_PER_BYTE) {
- mlx5hws_err(ctx,
- "Complex matcher: too many match parameters (%d)\n",
- fc_sz);
- res = -EINVAL;
- goto free_fc;
+ memcpy(backup, mask->match_buf, mask->match_sz);
+
+ /* Try to add this dword to the current subset. */
+ hws_add_dword_to_mask(mask, orig, dword_idx, &added_inner_ipv,
+ &added_outer_ipv);
+
+ if (hws_match_params_exceeds_definer(ctx, match_criteria, mask,
+ false)) {
+ /* We just added a match param that makes the definer
+ * too large. Revert and return what we had before.
+ * Note that we can't just zero out the affected fields,
+ * because it's possible that the dword we're looking at
+ * wasn't zero before (e.g. it included auto-added
+ * matches in IP version. This is why we employ the
+ * rather cumbersome memcpy for backing up.
+ */
+ memcpy(mask->match_buf, backup, mask->match_sz);
+ /* Possible future improvement: We can't add any more
+ * dwords, but it may be possible to squeeze in
+ * individual bytes, as definers have special slots for
+ * those.
+ *
+ * For now, keep the code simple. This results in an
+ * extra submatcher in some cases, but it's good enough.
+ */
+ ret = 0;
+ break;
+ }
+
+ /* The current subset of match params still fits in a single
+ * definer. Remove the dword from the original mask.
+ *
+ * Also remove any explicit match on IP version if we just
+ * included one here. We will still automatically add it to
+ * accompany any IP address fragment, but do not need to
+ * consider it by itself.
+ */
+ hws_remove_dword_from_mask(orig, dword_idx, added_inner_ipv,
+ added_outer_ipv);
}
- /* We have list of all the match fields from the match parameter.
- * Now try all the possibilities of splitting them into two match
- * buffers and look for the supported combination.
+ /* Make sure we have not picked up a single lower dword of an IPv6
+ * address, as the firmware will erroneously treat it as an IPv4
+ * address.
*/
- num_of_combinations = 1 << fc_sz;
+ hws_avoid_ipv6_split(orig, mask);
- /* Start from combination at index 1 - we know that 0 is unsupported */
- for (i = 1; i < num_of_combinations; i++) {
- if (!hws_bwc_matcher_complex_params_comb_is_valid(fc, fc_sz, i))
- continue;
+free_backup:
+ kfree(backup);
- hws_bwc_matcher_complex_params_comb_create(ctx,
- mask, mask_1, mask_2,
- fc, fc_sz, i);
- /* We now have two separate sets of match params.
- * Check if each of them can be used in its own matcher.
+ return ret;
+}
+
+static int
+hws_bwc_matcher_split_mask(struct mlx5hws_context *ctx, u8 match_criteria,
+ const struct mlx5hws_match_parameters *mask,
+ struct mlx5hws_match_parameters *submasks,
+ int *num_submasks)
+{
+ struct mlx5hws_match_parameters mask_copy;
+ int ret, i = 0;
+
+ mask_copy.match_sz = MLX5_ST_SZ_BYTES(fte_match_param);
+ mask_copy.match_buf = kzalloc(mask_copy.match_sz, GFP_KERNEL);
+ if (!mask_copy.match_buf)
+ return -ENOMEM;
+
+ memcpy(mask_copy.match_buf, mask->match_buf, mask->match_sz);
+
+ while (!hws_match_mask_is_empty(&mask_copy)) {
+ if (i >= MLX5HWS_BWC_COMPLEX_MAX_SUBMATCHERS) {
+ mlx5hws_err(ctx,
+ "Complex matcher: mask too large for %d matchers\n",
+ MLX5HWS_BWC_COMPLEX_MAX_SUBMATCHERS);
+ ret = -E2BIG;
+ goto free_copy;
+ }
+ /* All but the first matcher need to match on register C6 to
+ * connect pieces of the complex rule together.
*/
- if (!mlx5hws_bwc_match_params_is_complex(ctx,
- match_criteria,
- mask_1) &&
- !mlx5hws_bwc_match_params_is_complex(ctx,
- match_criteria,
- mask_2))
- break;
+ if (i > 0) {
+ MLX5_SET(fte_match_param, submasks[i].match_buf,
+ misc_parameters_2.metadata_reg_c_6, -1);
+ match_criteria |= MLX5HWS_DEFINER_MATCH_CRITERIA_MISC2;
+ }
+ ret = hws_get_simple_params(ctx, match_criteria, &mask_copy,
+ &submasks[i]);
+ if (ret < 0)
+ goto free_copy;
+ i++;
}
- if (i == num_of_combinations) {
- /* We've scanned all the combinations, but to no avail */
- mlx5hws_err(ctx, "Complex matcher: couldn't find match params combination\n");
- res = -EINVAL;
- goto free_fc;
- }
+ *num_submasks = i;
+ ret = 0;
- kfree(fc);
- return 0;
+free_copy:
+ kfree(mask_copy.match_buf);
-free_fc:
- kfree(fc);
-free_params:
- hws_bwc_matcher_complex_params_destroy(mask_1, mask_2);
-out:
- return res;
+ return ret;
}
-static int
-hws_bwc_isolated_table_create(struct mlx5hws_bwc_matcher *bwc_matcher,
- struct mlx5hws_table *table)
+static struct mlx5hws_table *
+hws_isolated_table_create(const struct mlx5hws_bwc_matcher *cmatcher)
{
+ struct mlx5hws_bwc_complex_submatcher *first_subm;
struct mlx5hws_cmd_ft_modify_attr ft_attr = {0};
- struct mlx5hws_context *ctx = table->ctx;
struct mlx5hws_table_attr tbl_attr = {0};
- struct mlx5hws_table *isolated_tbl;
- int ret = 0;
+ struct mlx5hws_table *orig_tbl;
+ struct mlx5hws_context *ctx;
+ struct mlx5hws_table *tbl;
+ int ret;
- tbl_attr.type = table->type;
- tbl_attr.level = table->level;
+ first_subm = &cmatcher->complex->submatchers[0];
+ orig_tbl = first_subm->tbl;
+ ctx = orig_tbl->ctx;
- bwc_matcher->complex->isolated_tbl =
- mlx5hws_table_create(ctx, &tbl_attr);
- isolated_tbl = bwc_matcher->complex->isolated_tbl;
- if (!isolated_tbl)
- return -EINVAL;
+ tbl_attr.type = orig_tbl->type;
+ tbl_attr.level = orig_tbl->level;
+ tbl = mlx5hws_table_create(ctx, &tbl_attr);
+ if (!tbl)
+ return ERR_PTR(-EINVAL);
- /* Set the default miss of the isolated table to
- * point to the end anchor of the original matcher.
+ /* Set the default miss of the isolated table to point
+ * to the end anchor of the original matcher.
*/
- mlx5hws_cmd_set_attr_connect_miss_tbl(ctx,
- isolated_tbl->fw_ft_type,
- isolated_tbl->type,
- &ft_attr);
- ft_attr.table_miss_id = bwc_matcher->matcher->end_ft_id;
-
- ret = mlx5hws_cmd_flow_table_modify(ctx->mdev,
- &ft_attr,
- isolated_tbl->ft_id);
+ mlx5hws_cmd_set_attr_connect_miss_tbl(ctx, tbl->fw_ft_type,
+ tbl->type, &ft_attr);
+ ft_attr.table_miss_id = first_subm->bwc_matcher->matcher->end_ft_id;
+
+ ret = mlx5hws_cmd_flow_table_modify(ctx->mdev, &ft_attr, tbl->ft_id);
if (ret) {
- mlx5hws_err(ctx, "Failed setting isolated tbl default miss\n");
+ mlx5hws_err(ctx, "Complex matcher: failed to set isolated tbl default miss\n");
goto destroy_tbl;
}
- return 0;
+ return tbl;
destroy_tbl:
- mlx5hws_table_destroy(isolated_tbl);
- return ret;
+ mlx5hws_table_destroy(tbl);
+
+ return ERR_PTR(ret);
}
-static void hws_bwc_isolated_table_destroy(struct mlx5hws_table *isolated_tbl)
+static int hws_submatcher_init_first(struct mlx5hws_bwc_matcher *cmatcher,
+ struct mlx5hws_table *table, u32 priority,
+ u8 match_criteria,
+ struct mlx5hws_match_parameters *mask)
{
- /* This table is isolated - no table is pointing to it, no need to
- * disconnect it from anywhere, it won't affect any other table's miss.
+ enum mlx5hws_action_type action_types[HWS_NUM_CHAIN_ACTIONS];
+ struct mlx5hws_bwc_complex_submatcher *subm;
+ int ret;
+
+ subm = &cmatcher->complex->submatchers[0];
+
+ /* The first submatcher lives in the original table and does not have an
+ * associated jump to table action. It also points to the outer complex
+ * matcher.
*/
- mlx5hws_table_destroy(isolated_tbl);
+ subm->tbl = table;
+ subm->action_tbl = NULL;
+ subm->bwc_matcher = cmatcher;
+
+ action_types[0] = MLX5HWS_ACTION_TYP_MODIFY_HDR;
+ action_types[1] = MLX5HWS_ACTION_TYP_TBL;
+ action_types[2] = MLX5HWS_ACTION_TYP_LAST;
+
+ ret = mlx5hws_bwc_matcher_create_simple(subm->bwc_matcher, subm->tbl,
+ priority, match_criteria, mask,
+ action_types);
+ if (ret)
+ return ret;
+
+ subm->bwc_matcher->matcher_type = MLX5HWS_BWC_MATCHER_COMPLEX_FIRST;
+
+ ret = rhashtable_init(&subm->rules_hash, &hws_rules_hash_params);
+ if (ret)
+ goto destroy_matcher;
+ mutex_init(&subm->hash_lock);
+ ida_init(&subm->chain_ida);
+
+ return 0;
+
+destroy_matcher:
+ mlx5hws_bwc_matcher_destroy_simple(subm->bwc_matcher);
+
+ return ret;
}
-static int
-hws_bwc_isolated_matcher_create(struct mlx5hws_bwc_matcher *bwc_matcher,
- struct mlx5hws_table *table,
- u8 match_criteria_enable,
- struct mlx5hws_match_parameters *mask)
+static int hws_submatcher_init(struct mlx5hws_bwc_matcher *cmatcher, int idx,
+ struct mlx5hws_table *table, u32 priority,
+ u8 match_criteria,
+ struct mlx5hws_match_parameters *mask)
{
- struct mlx5hws_table *isolated_tbl = bwc_matcher->complex->isolated_tbl;
- struct mlx5hws_bwc_matcher *isolated_bwc_matcher;
- struct mlx5hws_context *ctx = table->ctx;
+ enum mlx5hws_action_type action_types[HWS_NUM_CHAIN_ACTIONS];
+ struct mlx5hws_bwc_complex_submatcher *subm;
+ bool is_last;
int ret;
- isolated_bwc_matcher = kzalloc(sizeof(*bwc_matcher), GFP_KERNEL);
- if (!isolated_bwc_matcher)
- return -ENOMEM;
+ if (!idx)
+ return hws_submatcher_init_first(cmatcher, table, priority,
+ match_criteria, mask);
+
+ subm = &cmatcher->complex->submatchers[idx];
+ is_last = idx == cmatcher->complex->num_submatchers - 1;
+
+ subm->tbl = hws_isolated_table_create(cmatcher);
+ if (IS_ERR(subm->tbl))
+ return PTR_ERR(subm->tbl);
+
+ subm->action_tbl =
+ mlx5hws_action_create_dest_table(subm->tbl->ctx, subm->tbl,
+ MLX5HWS_ACTION_FLAG_HWS_FDB);
+ if (!subm->action_tbl) {
+ ret = -EINVAL;
+ goto destroy_tbl;
+ }
+
+ subm->bwc_matcher = kzalloc(sizeof(*subm->bwc_matcher), GFP_KERNEL);
+ if (!subm->bwc_matcher) {
+ ret = -ENOMEM;
+ goto destroy_action;
+ }
- bwc_matcher->complex->isolated_bwc_matcher = isolated_bwc_matcher;
+ /* Every matcher other than the first also matches of register C6 to
+ * bind subrules together in the complex rule using the chain ids.
+ */
+ match_criteria |= MLX5HWS_DEFINER_MATCH_CRITERIA_MISC2;
- /* Isolated BWC matcher needs access to the first BWC matcher */
- isolated_bwc_matcher->complex_first_bwc_matcher = bwc_matcher;
+ action_types[0] = MLX5HWS_ACTION_TYP_MODIFY_HDR;
+ action_types[1] = MLX5HWS_ACTION_TYP_TBL;
+ action_types[2] = MLX5HWS_ACTION_TYP_LAST;
- /* Isolated matcher needs to match on REG_C_6,
- * so make sure its criteria bit is on.
+ /* Every matcher other than the last sets register C6 and jumps to the
+ * next submatcher's table. The final submatcher will use the
+ * user-supplied actions and will attach an action template at rule
+ * insertion time.
*/
- match_criteria_enable |= MLX5HWS_DEFINER_MATCH_CRITERIA_MISC2;
-
- ret = mlx5hws_bwc_matcher_create_simple(isolated_bwc_matcher,
- isolated_tbl,
- 0,
- match_criteria_enable,
- mask,
- NULL);
- if (ret) {
- mlx5hws_err(ctx, "Complex matcher: failed creating isolated BWC matcher\n");
+ ret = mlx5hws_bwc_matcher_create_simple(subm->bwc_matcher, subm->tbl,
+ priority, match_criteria, mask,
+ is_last ? NULL : action_types);
+ if (ret)
goto free_matcher;
- }
+
+ subm->bwc_matcher->matcher_type =
+ MLX5HWS_BWC_MATCHER_COMPLEX_SUBMATCHER;
+
+ ret = rhashtable_init(&subm->rules_hash, &hws_rules_hash_params);
+ if (ret)
+ goto destroy_matcher;
+ mutex_init(&subm->hash_lock);
+ ida_init(&subm->chain_ida);
return 0;
+destroy_matcher:
+ mlx5hws_bwc_matcher_destroy_simple(subm->bwc_matcher);
free_matcher:
- kfree(bwc_matcher->complex->isolated_bwc_matcher);
+ kfree(subm->bwc_matcher);
+destroy_action:
+ mlx5hws_action_destroy(subm->action_tbl);
+destroy_tbl:
+ mlx5hws_table_destroy(subm->tbl);
+
return ret;
}
-static void
-hws_bwc_isolated_matcher_destroy(struct mlx5hws_bwc_matcher *bwc_matcher)
+static void hws_submatcher_destroy(struct mlx5hws_bwc_matcher *cmatcher,
+ int idx)
{
- mlx5hws_bwc_matcher_destroy_simple(bwc_matcher);
- kfree(bwc_matcher);
+ struct mlx5hws_bwc_complex_submatcher *subm;
+
+ subm = &cmatcher->complex->submatchers[idx];
+
+ ida_destroy(&subm->chain_ida);
+ mutex_destroy(&subm->hash_lock);
+ rhashtable_destroy(&subm->rules_hash);
+
+ if (subm->bwc_matcher) {
+ mlx5hws_bwc_matcher_destroy_simple(subm->bwc_matcher);
+ if (idx)
+ kfree(subm->bwc_matcher);
+ }
+
+ /* We own all of the isolated tables, but not the original one. */
+ if (idx) {
+ mlx5hws_action_destroy(subm->action_tbl);
+ mlx5hws_table_destroy(subm->tbl);
+ }
}
static int
-hws_bwc_isolated_actions_create(struct mlx5hws_bwc_matcher *bwc_matcher,
- struct mlx5hws_table *table)
+hws_complex_data_actions_init(struct mlx5hws_bwc_matcher_complex_data *cdata)
{
- struct mlx5hws_table *isolated_tbl = bwc_matcher->complex->isolated_tbl;
+ struct mlx5hws_context *ctx = cdata->submatchers[0].tbl->ctx;
u8 modify_hdr_action[MLX5_ST_SZ_BYTES(set_action_in)] = {0};
- struct mlx5hws_context *ctx = table->ctx;
struct mlx5hws_action_mh_pattern ptrn;
int ret = 0;
- /* Create action to jump to isolated table */
-
- bwc_matcher->complex->action_go_to_tbl =
- mlx5hws_action_create_dest_table(ctx,
- isolated_tbl,
- MLX5HWS_ACTION_FLAG_HWS_FDB);
- if (!bwc_matcher->complex->action_go_to_tbl) {
- mlx5hws_err(ctx, "Complex matcher: failed to create go-to-tbl action\n");
- return -EINVAL;
- }
-
/* Create modify header action to set REG_C_6 */
-
MLX5_SET(set_action_in, modify_hdr_action,
action_type, MLX5_MODIFICATION_TYPE_SET);
MLX5_SET(set_action_in, modify_hdr_action,
@@ -895,19 +553,18 @@ hws_bwc_isolated_actions_create(struct mlx5hws_bwc_matcher *bwc_matcher,
ptrn.data = (void *)modify_hdr_action;
ptrn.sz = MLX5HWS_ACTION_DOUBLE_SIZE;
- bwc_matcher->complex->action_metadata =
+ cdata->action_metadata =
mlx5hws_action_create_modify_header(ctx, 1, &ptrn, 0,
MLX5HWS_ACTION_FLAG_HWS_FDB);
- if (!bwc_matcher->complex->action_metadata) {
- ret = -EINVAL;
- goto destroy_action_go_to_tbl;
+ if (!cdata->action_metadata) {
+ mlx5hws_err(ctx, "Complex matcher: failed to create set reg C6 action\n");
+ return -EINVAL;
}
/* Create last action */
-
- bwc_matcher->complex->action_last =
+ cdata->action_last =
mlx5hws_action_create_last(ctx, MLX5HWS_ACTION_FLAG_HWS_FDB);
- if (!bwc_matcher->complex->action_last) {
+ if (!cdata->action_last) {
mlx5hws_err(ctx, "Complex matcher: failed to create last action\n");
ret = -EINVAL;
goto destroy_action_metadata;
@@ -916,196 +573,130 @@ hws_bwc_isolated_actions_create(struct mlx5hws_bwc_matcher *bwc_matcher,
return 0;
destroy_action_metadata:
- mlx5hws_action_destroy(bwc_matcher->complex->action_metadata);
-destroy_action_go_to_tbl:
- mlx5hws_action_destroy(bwc_matcher->complex->action_go_to_tbl);
+ mlx5hws_action_destroy(cdata->action_metadata);
+
return ret;
}
static void
-hws_bwc_isolated_actions_destroy(struct mlx5hws_bwc_matcher *bwc_matcher)
+hws_complex_data_actions_destroy(struct mlx5hws_bwc_matcher_complex_data *cdata)
{
- mlx5hws_action_destroy(bwc_matcher->complex->action_last);
- mlx5hws_action_destroy(bwc_matcher->complex->action_metadata);
- mlx5hws_action_destroy(bwc_matcher->complex->action_go_to_tbl);
+ mlx5hws_action_destroy(cdata->action_last);
+ mlx5hws_action_destroy(cdata->action_metadata);
}
int mlx5hws_bwc_matcher_create_complex(struct mlx5hws_bwc_matcher *bwc_matcher,
struct mlx5hws_table *table,
- u32 priority,
- u8 match_criteria_enable,
+ u32 priority, u8 match_criteria_enable,
struct mlx5hws_match_parameters *mask)
{
- enum mlx5hws_action_type complex_init_action_types[3];
- struct mlx5hws_bwc_matcher *isolated_bwc_matcher;
- struct mlx5hws_match_parameters mask_1 = {0};
- struct mlx5hws_match_parameters mask_2 = {0};
+ struct mlx5hws_match_parameters
+ submasks[MLX5HWS_BWC_COMPLEX_MAX_SUBMATCHERS] = {0};
+ struct mlx5hws_bwc_matcher_complex_data *cdata;
struct mlx5hws_context *ctx = table->ctx;
- int ret;
-
- ret = hws_bwc_matcher_complex_params_create(table->ctx,
- match_criteria_enable,
- mask, &mask_1, &mask_2);
- if (ret)
- goto err;
-
- bwc_matcher->complex =
- kzalloc(sizeof(*bwc_matcher->complex), GFP_KERNEL);
- if (!bwc_matcher->complex) {
- ret = -ENOMEM;
- goto free_masks;
- }
+ int num_submatchers;
+ int i, ret;
- ret = rhashtable_init(&bwc_matcher->complex->refcount_hash,
- &hws_refcount_hash);
- if (ret) {
- mlx5hws_err(ctx, "Complex matcher: failed to initialize rhashtable\n");
- goto free_complex;
+ for (i = 0; i < ARRAY_SIZE(submasks); i++) {
+ submasks[i].match_sz = MLX5_ST_SZ_BYTES(fte_match_param);
+ submasks[i].match_buf = kzalloc(submasks[i].match_sz,
+ GFP_KERNEL);
+ if (!submasks[i].match_buf) {
+ ret = -ENOMEM;
+ goto free_submasks;
+ }
}
- mutex_init(&bwc_matcher->complex->hash_lock);
- ida_init(&bwc_matcher->complex->metadata_ida);
-
- /* Create initial action template for the first matcher.
- * Usually the initial AT is just dummy, but in case of complex
- * matcher we know exactly which actions should it have.
- */
-
- complex_init_action_types[0] = MLX5HWS_ACTION_TYP_MODIFY_HDR;
- complex_init_action_types[1] = MLX5HWS_ACTION_TYP_TBL;
- complex_init_action_types[2] = MLX5HWS_ACTION_TYP_LAST;
-
- /* Create the first matcher */
-
- ret = mlx5hws_bwc_matcher_create_simple(bwc_matcher,
- table,
- priority,
- match_criteria_enable,
- &mask_1,
- complex_init_action_types);
+ ret = hws_bwc_matcher_split_mask(ctx, match_criteria_enable, mask,
+ submasks, &num_submatchers);
if (ret)
- goto destroy_ida;
+ goto free_submasks;
- /* Create isolated table to hold the second isolated matcher */
-
- ret = hws_bwc_isolated_table_create(bwc_matcher, table);
- if (ret) {
- mlx5hws_err(ctx, "Complex matcher: failed creating isolated table\n");
- goto destroy_first_matcher;
+ cdata = kzalloc(sizeof(*cdata), GFP_KERNEL);
+ if (!cdata) {
+ ret = -ENOMEM;
+ goto free_submasks;
}
- /* Now create the second BWC matcher - the isolated one */
+ bwc_matcher->complex = cdata;
+ cdata->num_submatchers = num_submatchers;
- ret = hws_bwc_isolated_matcher_create(bwc_matcher, table,
- match_criteria_enable, &mask_2);
- if (ret) {
- mlx5hws_err(ctx, "Complex matcher: failed creating isolated matcher\n");
- goto destroy_isolated_tbl;
+ for (i = 0; i < num_submatchers; i++) {
+ ret = hws_submatcher_init(bwc_matcher, i, table, priority,
+ match_criteria_enable, &submasks[i]);
+ if (ret)
+ goto destroy_submatchers;
}
- /* Create action for isolated matcher's rules */
-
- ret = hws_bwc_isolated_actions_create(bwc_matcher, table);
- if (ret) {
- mlx5hws_err(ctx, "Complex matcher: failed creating isolated actions\n");
- goto destroy_isolated_matcher;
- }
+ ret = hws_complex_data_actions_init(cdata);
+ if (ret)
+ goto destroy_submatchers;
- hws_bwc_matcher_complex_params_destroy(&mask_1, &mask_2);
- return 0;
+ ret = 0;
+ goto free_submasks;
-destroy_isolated_matcher:
- isolated_bwc_matcher = bwc_matcher->complex->isolated_bwc_matcher;
- hws_bwc_isolated_matcher_destroy(isolated_bwc_matcher);
-destroy_isolated_tbl:
- hws_bwc_isolated_table_destroy(bwc_matcher->complex->isolated_tbl);
-destroy_first_matcher:
- mlx5hws_bwc_matcher_destroy_simple(bwc_matcher);
-destroy_ida:
- ida_destroy(&bwc_matcher->complex->metadata_ida);
- mutex_destroy(&bwc_matcher->complex->hash_lock);
- rhashtable_destroy(&bwc_matcher->complex->refcount_hash);
-free_complex:
- kfree(bwc_matcher->complex);
+destroy_submatchers:
+ while (i--)
+ hws_submatcher_destroy(bwc_matcher, i);
+ kfree(cdata);
bwc_matcher->complex = NULL;
-free_masks:
- hws_bwc_matcher_complex_params_destroy(&mask_1, &mask_2);
-err:
+
+free_submasks:
+ for (i = 0; i < ARRAY_SIZE(submasks); i++)
+ kfree(submasks[i].match_buf);
+
return ret;
}
void
mlx5hws_bwc_matcher_destroy_complex(struct mlx5hws_bwc_matcher *bwc_matcher)
{
- struct mlx5hws_bwc_matcher *isolated_bwc_matcher =
- bwc_matcher->complex->isolated_bwc_matcher;
-
- hws_bwc_isolated_actions_destroy(bwc_matcher);
- hws_bwc_isolated_matcher_destroy(isolated_bwc_matcher);
- hws_bwc_isolated_table_destroy(bwc_matcher->complex->isolated_tbl);
- mlx5hws_bwc_matcher_destroy_simple(bwc_matcher);
- ida_destroy(&bwc_matcher->complex->metadata_ida);
- mutex_destroy(&bwc_matcher->complex->hash_lock);
- rhashtable_destroy(&bwc_matcher->complex->refcount_hash);
+ int i;
+
+ hws_complex_data_actions_destroy(bwc_matcher->complex);
+ for (i = 0; i < bwc_matcher->complex->num_submatchers; i++)
+ hws_submatcher_destroy(bwc_matcher, i);
kfree(bwc_matcher->complex);
bwc_matcher->complex = NULL;
}
-static void
-hws_bwc_matcher_complex_hash_lock(struct mlx5hws_bwc_matcher *bwc_matcher)
-{
- mutex_lock(&bwc_matcher->complex->hash_lock);
-}
-
-static void
-hws_bwc_matcher_complex_hash_unlock(struct mlx5hws_bwc_matcher *bwc_matcher)
-{
- mutex_unlock(&bwc_matcher->complex->hash_lock);
-}
-
static int
-hws_bwc_rule_complex_hash_node_get(struct mlx5hws_bwc_rule *bwc_rule,
- struct mlx5hws_match_parameters *params)
+hws_complex_get_subrule_data(struct mlx5hws_bwc_rule *bwc_rule,
+ struct mlx5hws_bwc_complex_submatcher *subm,
+ u32 *match_params)
+__must_hold(&subm->hash_lock)
{
- struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
- struct mlx5hws_bwc_complex_rule_hash_node *node, *old_node;
- struct rhashtable *refcount_hash;
- int ret, i;
-
- bwc_rule->complex_hash_node = NULL;
+ struct mlx5hws_bwc_matcher *bwc_matcher = subm->bwc_matcher;
+ struct mlx5hws_bwc_complex_subrule_data *sr_data, *old_data;
+ struct mlx5hws_match_template *mt;
+ int ret;
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (unlikely(!node))
+ sr_data = kzalloc(sizeof(*sr_data), GFP_KERNEL);
+ if (!sr_data)
return -ENOMEM;
- ret = ida_alloc(&bwc_matcher->complex->metadata_ida, GFP_KERNEL);
+ ret = ida_alloc(&subm->chain_ida, GFP_KERNEL);
if (ret < 0)
- goto err_free_node;
- node->tag = ret;
+ goto free_sr_data;
+ sr_data->chain_id = ret;
- refcount_set(&node->refcount, 1);
+ refcount_set(&sr_data->refcount, 1);
- /* Clear match buffer - turn off all the unrelated fields
- * in accordance with the match params mask for the first
- * matcher out of the two parts of the complex matcher.
- * The resulting mask is the key for the hash.
- */
- for (i = 0; i < MLX5_ST_SZ_DW_MATCH_PARAM; i++)
- node->match_buf[i] = params->match_buf[i] &
- bwc_matcher->mt->match_param[i];
-
- refcount_hash = &bwc_matcher->complex->refcount_hash;
- old_node = rhashtable_lookup_get_insert_fast(refcount_hash,
- &node->hash_node,
- hws_refcount_hash);
- if (IS_ERR(old_node)) {
- ret = PTR_ERR(old_node);
- goto err_free_ida;
+ mt = bwc_matcher->matcher->mt;
+ mlx5hws_definer_create_tag(match_params, mt->fc, mt->fc_sz,
+ (u8 *)&sr_data->match_tag);
+
+ old_data = rhashtable_lookup_get_insert_fast(&subm->rules_hash,
+ &sr_data->hash_node,
+ hws_rules_hash_params);
+ if (IS_ERR(old_data)) {
+ ret = PTR_ERR(old_data);
+ goto free_ida;
}
- if (old_node) {
+ if (old_data) {
/* Rule with the same tag already exists - update refcount */
- refcount_inc(&old_node->refcount);
+ refcount_inc(&old_data->refcount);
/* Let the new rule use the same tag as the existing rule.
* Note that we don't have any indication for the rule creation
* process that a rule with similar matching params already
@@ -1114,247 +705,281 @@ hws_bwc_rule_complex_hash_node_get(struct mlx5hws_bwc_rule *bwc_rule,
* There's some performance advantage in skipping such cases,
* so this is left for future optimizations.
*/
- ida_free(&bwc_matcher->complex->metadata_ida, node->tag);
- kfree(node);
- node = old_node;
+ bwc_rule->subrule_data = old_data;
+ ret = 0;
+ goto free_ida;
}
- bwc_rule->complex_hash_node = node;
+ bwc_rule->subrule_data = sr_data;
return 0;
-err_free_ida:
- ida_free(&bwc_matcher->complex->metadata_ida, node->tag);
-err_free_node:
- kfree(node);
+free_ida:
+ ida_free(&subm->chain_ida, sr_data->chain_id);
+free_sr_data:
+ kfree(sr_data);
+
return ret;
}
static void
-hws_bwc_rule_complex_hash_node_put(struct mlx5hws_bwc_rule *bwc_rule,
- bool *is_last_rule)
+hws_complex_put_subrule_data(struct mlx5hws_bwc_rule *bwc_rule,
+ struct mlx5hws_bwc_complex_submatcher *subm,
+ bool *is_last_rule)
+__must_hold(&subm->hash_lock)
{
- struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
- struct mlx5hws_bwc_complex_rule_hash_node *node;
+ struct mlx5hws_bwc_complex_subrule_data *sr_data;
if (is_last_rule)
*is_last_rule = false;
- node = bwc_rule->complex_hash_node;
- if (refcount_dec_and_test(&node->refcount)) {
- rhashtable_remove_fast(&bwc_matcher->complex->refcount_hash,
- &node->hash_node,
- hws_refcount_hash);
- ida_free(&bwc_matcher->complex->metadata_ida, node->tag);
- kfree(node);
+ sr_data = bwc_rule->subrule_data;
+ if (refcount_dec_and_test(&sr_data->refcount)) {
+ rhashtable_remove_fast(&subm->rules_hash,
+ &sr_data->hash_node,
+ hws_rules_hash_params);
+ ida_free(&subm->chain_ida, sr_data->chain_id);
+ kfree(sr_data);
if (is_last_rule)
*is_last_rule = true;
}
- bwc_rule->complex_hash_node = NULL;
+ bwc_rule->subrule_data = NULL;
}
-int mlx5hws_bwc_rule_create_complex(struct mlx5hws_bwc_rule *bwc_rule,
- struct mlx5hws_match_parameters *params,
- u32 flow_source,
- struct mlx5hws_rule_action rule_actions[],
- u16 bwc_queue_idx)
+static int hws_complex_subrule_create(struct mlx5hws_bwc_matcher *cmatcher,
+ struct mlx5hws_bwc_rule *subrule,
+ u32 *match_params, u32 flow_source,
+ int bwc_queue_idx, int subm_idx,
+ struct mlx5hws_rule_action *actions,
+ u32 *chain_id)
{
- struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
- struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ struct mlx5hws_rule_action chain_actions[HWS_NUM_CHAIN_ACTIONS] = {0};
u8 modify_hdr_action[MLX5_ST_SZ_BYTES(set_action_in)] = {0};
- struct mlx5hws_rule_action rule_actions_1[3] = {0};
- struct mlx5hws_bwc_matcher *isolated_bwc_matcher;
- u32 *match_buf_2;
- u32 metadata_val;
- int ret = 0;
+ struct mlx5hws_bwc_matcher_complex_data *cdata;
+ struct mlx5hws_bwc_complex_submatcher *subm;
+ int ret;
- isolated_bwc_matcher = bwc_matcher->complex->isolated_bwc_matcher;
- bwc_rule->isolated_bwc_rule =
- mlx5hws_bwc_rule_alloc(isolated_bwc_matcher);
- if (unlikely(!bwc_rule->isolated_bwc_rule))
- return -ENOMEM;
+ cdata = cmatcher->complex;
+ subm = &cdata->submatchers[subm_idx];
- hws_bwc_matcher_complex_hash_lock(bwc_matcher);
+ mutex_lock(&subm->hash_lock);
- /* Get a new hash node for this complex rule.
- * If this is a unique set of match params for the first matcher,
- * we will get a new hash node with newly allocated IDA.
- * Otherwise we will get an existing node with IDA and updated refcount.
- */
- ret = hws_bwc_rule_complex_hash_node_get(bwc_rule, params);
- if (unlikely(ret)) {
- mlx5hws_err(ctx, "Complex rule: failed getting RHT node for this rule\n");
- goto free_isolated_rule;
+ ret = hws_complex_get_subrule_data(subrule, subm, match_params);
+ if (ret)
+ goto unlock;
+
+ *chain_id = subrule->subrule_data->chain_id;
+
+ if (!actions) {
+ MLX5_SET(set_action_in, modify_hdr_action, data, *chain_id);
+ chain_actions[0].action = cdata->action_metadata;
+ chain_actions[0].modify_header.data = modify_hdr_action;
+ chain_actions[1].action =
+ cdata->submatchers[subm_idx + 1].action_tbl;
+ chain_actions[2].action = cdata->action_last;
+ actions = chain_actions;
}
- /* No need to clear match buffer's fields in accordance to what
- * will actually be matched on first and second matchers.
- * Both matchers were created with the appropriate masks
- * and each of them holds the appropriate field copy array,
- * so rule creation will use only the fields that will be copied
- * in accordance with setters in field copy array.
- * We do, however, need to temporary allocate match buffer
- * for the second (isolated) rule in order to not modify
- * user's match params buffer.
- */
-
- match_buf_2 = kmemdup(params->match_buf,
- MLX5_ST_SZ_BYTES(fte_match_param),
- GFP_KERNEL);
- if (unlikely(!match_buf_2)) {
- mlx5hws_err(ctx, "Complex rule: failed allocating match_buf\n");
- ret = -ENOMEM;
- goto hash_node_put;
- }
+ ret = mlx5hws_bwc_rule_create_simple(subrule, match_params, actions,
+ flow_source, bwc_queue_idx);
+ if (ret)
+ goto put_subrule_data;
- /* On 2nd matcher, use unique 32-bit ID as a matching tag */
- metadata_val = bwc_rule->complex_hash_node->tag;
- MLX5_SET(fte_match_param, match_buf_2,
- misc_parameters_2.metadata_reg_c_6, metadata_val);
-
- /* Isolated rule's rule_actions contain all the original actions */
- ret = mlx5hws_bwc_rule_create_simple(bwc_rule->isolated_bwc_rule,
- match_buf_2,
- rule_actions,
- flow_source,
- bwc_queue_idx);
- kfree(match_buf_2);
- if (unlikely(ret)) {
- mlx5hws_err(ctx,
- "Complex rule: failed creating isolated BWC rule (%d)\n",
- ret);
- goto hash_node_put;
- }
+ ret = 0;
+ goto unlock;
- /* First rule's rule_actions contain setting metadata and
- * jump to isolated table that contains the second matcher.
- * Set metadata value to a unique value for this rule.
- */
+put_subrule_data:
+ hws_complex_put_subrule_data(subrule, subm, NULL);
+unlock:
+ mutex_unlock(&subm->hash_lock);
- MLX5_SET(set_action_in, modify_hdr_action,
- action_type, MLX5_MODIFICATION_TYPE_SET);
- MLX5_SET(set_action_in, modify_hdr_action,
- field, MLX5_MODI_META_REG_C_6);
- MLX5_SET(set_action_in, modify_hdr_action,
- length, 0); /* zero means length of 32 */
- MLX5_SET(set_action_in, modify_hdr_action,
- offset, 0);
- MLX5_SET(set_action_in, modify_hdr_action,
- data, metadata_val);
+ return ret;
+}
- rule_actions_1[0].action = bwc_matcher->complex->action_metadata;
- rule_actions_1[0].modify_header.offset = 0;
- rule_actions_1[0].modify_header.data = modify_hdr_action;
+static int hws_complex_subrule_destroy(struct mlx5hws_bwc_rule *bwc_rule,
+ struct mlx5hws_bwc_matcher *cmatcher,
+ int subm_idx)
+{
+ struct mlx5hws_bwc_matcher_complex_data *cdata;
+ struct mlx5hws_bwc_complex_submatcher *subm;
+ struct mlx5hws_context *ctx;
+ bool is_last_rule;
+ int ret = 0;
- rule_actions_1[1].action = bwc_matcher->complex->action_go_to_tbl;
- rule_actions_1[2].action = bwc_matcher->complex->action_last;
+ cdata = cmatcher->complex;
+ subm = &cdata->submatchers[subm_idx];
+ ctx = subm->tbl->ctx;
- ret = mlx5hws_bwc_rule_create_simple(bwc_rule,
- params->match_buf,
- rule_actions_1,
- flow_source,
- bwc_queue_idx);
+ mutex_lock(&subm->hash_lock);
- if (unlikely(ret)) {
+ hws_complex_put_subrule_data(bwc_rule, subm, &is_last_rule);
+ bwc_rule->rule->skip_delete = !is_last_rule;
+ ret = mlx5hws_bwc_rule_destroy_simple(bwc_rule);
+ if (unlikely(ret))
mlx5hws_err(ctx,
- "Complex rule: failed creating first BWC rule (%d)\n",
- ret);
- goto destroy_isolated_rule;
- }
+ "Complex rule: failed to delete subrule %d (%d)\n",
+ subm_idx, ret);
- hws_bwc_matcher_complex_hash_unlock(bwc_matcher);
+ if (subm_idx)
+ mlx5hws_bwc_rule_free(bwc_rule);
- return 0;
+ mutex_unlock(&subm->hash_lock);
-destroy_isolated_rule:
- mlx5hws_bwc_rule_destroy_simple(bwc_rule->isolated_bwc_rule);
-hash_node_put:
- hws_bwc_rule_complex_hash_node_put(bwc_rule, NULL);
-free_isolated_rule:
- hws_bwc_matcher_complex_hash_unlock(bwc_matcher);
- mlx5hws_bwc_rule_free(bwc_rule->isolated_bwc_rule);
return ret;
}
-int mlx5hws_bwc_rule_destroy_complex(struct mlx5hws_bwc_rule *bwc_rule)
+int mlx5hws_bwc_rule_create_complex(struct mlx5hws_bwc_rule *bwc_rule,
+ struct mlx5hws_match_parameters *params,
+ u32 flow_source,
+ struct mlx5hws_rule_action rule_actions[],
+ u16 bwc_queue_idx)
{
- struct mlx5hws_context *ctx = bwc_rule->bwc_matcher->matcher->tbl->ctx;
- struct mlx5hws_bwc_rule *isolated_bwc_rule;
- int ret_isolated, ret;
- bool is_last_rule;
+ struct mlx5hws_bwc_rule
+ *subrules[MLX5HWS_BWC_COMPLEX_MAX_SUBMATCHERS] = {0};
+ struct mlx5hws_bwc_matcher *cmatcher = bwc_rule->bwc_matcher;
+ struct mlx5hws_bwc_matcher_complex_data *cdata;
+ struct mlx5hws_rule_action *subrule_actions;
+ struct mlx5hws_bwc_complex_submatcher *subm;
+ struct mlx5hws_bwc_rule *subrule;
+ u32 *match_params;
+ u32 chain_id;
+ int i, ret;
- hws_bwc_matcher_complex_hash_lock(bwc_rule->bwc_matcher);
+ cdata = cmatcher->complex;
+ if (!cdata)
+ return -EINVAL;
- hws_bwc_rule_complex_hash_node_put(bwc_rule, &is_last_rule);
- bwc_rule->rule->skip_delete = !is_last_rule;
+ /* Duplicate user data because we will modify it to set register C6
+ * values. For the same reason, make sure that we allocate a full
+ * match_param even if the user gave us fewer bytes. We need to ensure
+ * there is space for the match on C6.
+ */
+ match_params = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
+ if (!match_params)
+ return -ENOMEM;
- ret = mlx5hws_bwc_rule_destroy_simple(bwc_rule);
- if (unlikely(ret))
- mlx5hws_err(ctx, "BWC complex rule: failed destroying first rule\n");
+ memcpy(match_params, params->match_buf, params->match_sz);
+
+ ret = hws_complex_subrule_create(cmatcher, bwc_rule, match_params,
+ flow_source, bwc_queue_idx, 0,
+ NULL, &chain_id);
+ if (ret)
+ goto free_match_params;
+ subrules[0] = bwc_rule;
+
+ for (i = 1; i < cdata->num_submatchers; i++) {
+ subm = &cdata->submatchers[i];
+ subrule = mlx5hws_bwc_rule_alloc(subm->bwc_matcher);
+ if (!subrule) {
+ ret = -ENOMEM;
+ goto destroy_subrules;
+ }
+
+ /* Match on the previous subrule's chain_id. This is how
+ * subrules are connected in steering.
+ */
+ MLX5_SET(fte_match_param, match_params,
+ misc_parameters_2.metadata_reg_c_6, chain_id);
- isolated_bwc_rule = bwc_rule->isolated_bwc_rule;
- ret_isolated = mlx5hws_bwc_rule_destroy_simple(isolated_bwc_rule);
- if (unlikely(ret_isolated))
- mlx5hws_err(ctx, "BWC complex rule: failed destroying second (isolated) rule\n");
+ /* The last subrule uses the complex rule's user-specified
+ * actions. Everything else uses the chaining rules based on the
+ * next table and chain_id.
+ */
+ subrule_actions =
+ i == cdata->num_submatchers - 1 ? rule_actions : NULL;
+
+ ret = hws_complex_subrule_create(cmatcher, subrule,
+ match_params, flow_source,
+ bwc_queue_idx, i,
+ subrule_actions, &chain_id);
+ if (ret) {
+ mlx5hws_bwc_rule_free(subrule);
+ goto destroy_subrules;
+ }
+
+ subrules[i] = subrule;
+ }
+
+ for (i = 0; i < cdata->num_submatchers - 1; i++)
+ subrules[i]->next_subrule = subrules[i + 1];
- hws_bwc_matcher_complex_hash_unlock(bwc_rule->bwc_matcher);
+ kfree(match_params);
- mlx5hws_bwc_rule_free(isolated_bwc_rule);
+ return 0;
+
+destroy_subrules:
+ while (i--)
+ hws_complex_subrule_destroy(subrules[i], cmatcher, i);
+free_match_params:
+ kfree(match_params);
- return ret || ret_isolated;
+ return ret;
}
-static void
-hws_bwc_matcher_clear_hash_rtcs(struct mlx5hws_bwc_matcher *bwc_matcher)
+int mlx5hws_bwc_rule_destroy_complex(struct mlx5hws_bwc_rule *bwc_rule)
{
- struct mlx5hws_bwc_complex_rule_hash_node *node;
- struct rhashtable_iter iter;
+ struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
+ struct mlx5hws_bwc_rule
+ *subrules[MLX5HWS_BWC_COMPLEX_MAX_SUBMATCHERS] = {0};
+ struct mlx5hws_bwc_matcher_complex_data *cdata;
+ int i, err, ret_val;
+
+ cdata = bwc_matcher->complex;
+
+ /* Construct a list of all the subrules we need to destroy. */
+ subrules[0] = bwc_rule;
+ for (i = 1; i < cdata->num_submatchers; i++)
+ subrules[i] = subrules[i - 1]->next_subrule;
+
+ ret_val = 0;
+ for (i = 0; i < cdata->num_submatchers; i++) {
+ err = hws_complex_subrule_destroy(subrules[i], bwc_matcher, i);
+ /* If something goes wrong, plow along to destroy all of the
+ * subrules but return an error upstack.
+ */
+ if (unlikely(err))
+ ret_val = err;
+ }
+
+ return ret_val;
+}
- rhashtable_walk_enter(&bwc_matcher->complex->refcount_hash, &iter);
- rhashtable_walk_start(&iter);
+static void
+hws_bwc_matcher_init_move(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ u16 bwc_queues = mlx5hws_bwc_queues(ctx);
+ struct mlx5hws_bwc_rule *bwc_rule;
+ struct list_head *rules_list;
+ int i;
- while ((node = rhashtable_walk_next(&iter)) != NULL) {
- if (IS_ERR(node))
+ for (i = 0; i < bwc_queues; i++) {
+ rules_list = &bwc_matcher->rules[i];
+ if (list_empty(rules_list))
continue;
- node->rtc_valid = false;
- }
- rhashtable_walk_stop(&iter);
- rhashtable_walk_exit(&iter);
+ list_for_each_entry(bwc_rule, rules_list, list_node) {
+ if (!bwc_rule->subrule_data)
+ continue;
+ bwc_rule->subrule_data->was_moved = false;
+ }
+ }
}
-int
-mlx5hws_bwc_matcher_move_all_complex(struct mlx5hws_bwc_matcher *bwc_matcher)
+int mlx5hws_bwc_matcher_complex_move(struct mlx5hws_bwc_matcher *bwc_matcher)
{
struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
struct mlx5hws_matcher *matcher = bwc_matcher->matcher;
- bool move_error = false, poll_error = false;
u16 bwc_queues = mlx5hws_bwc_queues(ctx);
struct mlx5hws_bwc_rule *tmp_bwc_rule;
struct mlx5hws_rule_attr rule_attr;
- struct mlx5hws_table *isolated_tbl;
+ int move_error = 0, poll_error = 0;
struct mlx5hws_rule *tmp_rule;
struct list_head *rules_list;
u32 expected_completions = 1;
- u32 end_ft_id;
- int i, ret;
-
- /* We are rehashing the matcher that is the first part of the complex
- * matcher. Need to update the isolated matcher to point to the end_ft
- * of this new matcher. This needs to be done before moving any rules
- * to prevent possible steering loops.
- */
- isolated_tbl = bwc_matcher->complex->isolated_tbl;
- end_ft_id = bwc_matcher->matcher->resize_dst->end_ft_id;
- ret = mlx5hws_matcher_update_end_ft_isolated(isolated_tbl, end_ft_id);
- if (ret) {
- mlx5hws_err(ctx,
- "Failed updating end_ft of isolated matcher (%d)\n",
- ret);
- return ret;
- }
+ int i, ret = 0;
- hws_bwc_matcher_clear_hash_rtcs(bwc_matcher);
+ hws_bwc_matcher_init_move(bwc_matcher);
mlx5hws_bwc_rule_fill_attr(bwc_matcher, 0, 0, &rule_attr);
@@ -1369,15 +994,15 @@ mlx5hws_bwc_matcher_move_all_complex(struct mlx5hws_bwc_matcher *bwc_matcher)
/* Check if a rule with similar tag has already
* been moved.
*/
- if (tmp_bwc_rule->complex_hash_node->rtc_valid) {
- /* This rule is a duplicate of rule with similar
- * tag that has already been moved earlier.
- * Just update this rule's RTCs.
+ if (tmp_bwc_rule->subrule_data->was_moved) {
+ /* This rule is a duplicate of rule with
+ * identical tag that has already been moved
+ * earlier. Just update this rule's RTCs.
*/
tmp_bwc_rule->rule->rtc_0 =
- tmp_bwc_rule->complex_hash_node->rtc_0;
+ tmp_bwc_rule->subrule_data->rtc_0;
tmp_bwc_rule->rule->rtc_1 =
- tmp_bwc_rule->complex_hash_node->rtc_1;
+ tmp_bwc_rule->subrule_data->rtc_1;
tmp_bwc_rule->rule->matcher =
tmp_bwc_rule->rule->matcher->resize_dst;
continue;
@@ -1391,11 +1016,15 @@ mlx5hws_bwc_matcher_move_all_complex(struct mlx5hws_bwc_matcher *bwc_matcher)
ret = mlx5hws_matcher_resize_rule_move(matcher,
tmp_rule,
&rule_attr);
- if (unlikely(ret && !move_error)) {
- mlx5hws_err(ctx,
- "Moving complex BWC rule failed (%d), attempting to move rest of the rules\n",
- ret);
- move_error = true;
+ if (unlikely(ret)) {
+ if (!move_error) {
+ mlx5hws_err(ctx,
+ "Moving complex BWC rule: move failed (%d), attempting to move rest of the rules\n",
+ ret);
+ move_error = ret;
+ }
+ /* Rule wasn't queued, no need to poll */
+ continue;
}
expected_completions = 1;
@@ -1403,27 +1032,70 @@ mlx5hws_bwc_matcher_move_all_complex(struct mlx5hws_bwc_matcher *bwc_matcher)
rule_attr.queue_id,
&expected_completions,
true);
- if (unlikely(ret && !poll_error)) {
- mlx5hws_err(ctx,
- "Moving complex BWC rule: poll failed (%d), attempting to move rest of the rules\n",
- ret);
- poll_error = true;
+ if (unlikely(ret)) {
+ if (ret == -ETIMEDOUT) {
+ mlx5hws_err(ctx,
+ "Moving complex BWC rule: timeout polling for completions (%d), aborting rehash\n",
+ ret);
+ return ret;
+ }
+ if (!poll_error) {
+ mlx5hws_err(ctx,
+ "Moving complex BWC rule: polling for completions failed (%d), attempting to move rest of the rules\n",
+ ret);
+ poll_error = ret;
+ }
}
/* Done moving the rule to the new matcher,
* now update RTCs for all the duplicated rules.
*/
- tmp_bwc_rule->complex_hash_node->rtc_0 =
+ tmp_bwc_rule->subrule_data->rtc_0 =
tmp_bwc_rule->rule->rtc_0;
- tmp_bwc_rule->complex_hash_node->rtc_1 =
+ tmp_bwc_rule->subrule_data->rtc_1 =
tmp_bwc_rule->rule->rtc_1;
- tmp_bwc_rule->complex_hash_node->rtc_valid = true;
+ tmp_bwc_rule->subrule_data->was_moved = true;
}
}
- if (move_error || poll_error)
- ret = -EINVAL;
+ /* Return the first error that happened */
+ if (unlikely(move_error))
+ return move_error;
+ if (unlikely(poll_error))
+ return poll_error;
return ret;
}
+
+int
+mlx5hws_bwc_matcher_complex_move_first(struct mlx5hws_bwc_matcher *bwc_matcher)
+{
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ struct mlx5hws_bwc_matcher_complex_data *cdata;
+ struct mlx5hws_table *isolated_tbl;
+ u32 end_ft_id;
+ int i, ret;
+
+ cdata = bwc_matcher->complex;
+
+ /* We are rehashing the first submatcher. We need to update the
+ * subsequent submatchers to point to the end_ft of this new matcher.
+ * This needs to be done before moving any rules to prevent possible
+ * steering loops.
+ */
+ end_ft_id = bwc_matcher->matcher->resize_dst->end_ft_id;
+ for (i = 1; i < cdata->num_submatchers; i++) {
+ isolated_tbl = cdata->submatchers[i].tbl;
+ ret = mlx5hws_matcher_update_end_ft_isolated(isolated_tbl,
+ end_ft_id);
+ if (ret) {
+ mlx5hws_err(ctx,
+ "Complex matcher: failed updating end_ft of isolated matcher (%d)\n",
+ ret);
+ return ret;
+ }
+ }
+
+ return mlx5hws_bwc_matcher_complex_move(bwc_matcher);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.h
index a6887c7e39d5..d07de631ce9f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.h
@@ -4,25 +4,60 @@
#ifndef HWS_BWC_COMPLEX_H_
#define HWS_BWC_COMPLEX_H_
-struct mlx5hws_bwc_complex_rule_hash_node {
- u32 match_buf[MLX5_ST_SZ_DW_MATCH_PARAM];
- u32 tag;
+#define MLX5HWS_BWC_COMPLEX_MAX_SUBMATCHERS 4
+
+/* A matcher can't contain two rules with the same match tag, but it is possible
+ * that two different complex rules' subrules have the same match tag. In that
+ * case, those subrules correspond to a single rule, and we need to refcount.
+ */
+struct mlx5hws_bwc_complex_subrule_data {
+ struct mlx5hws_rule_match_tag match_tag;
refcount_t refcount;
- bool rtc_valid;
+ /* The chain_id is what glues individual subrules into larger complex
+ * rules. It is the value that this subrule writes to register C6, and
+ * that the next subrule matches against.
+ */
+ u32 chain_id;
u32 rtc_0;
u32 rtc_1;
+ /* During rehash we iterate through all the subrules to move them. But
+ * two or more subrules can share the same physical rule in the
+ * submatcher, so we use `was_moved` to keep track if a given rule was
+ * already moved.
+ */
+ bool was_moved;
struct rhash_head hash_node;
};
+struct mlx5hws_bwc_complex_submatcher {
+ /* Isolated table that the matcher lives in. Not set for the first
+ * matcher, which lives in the original table.
+ */
+ struct mlx5hws_table *tbl;
+ /* Match a rule with this action to go to `tbl`. This is set in all
+ * submatchers but the first.
+ */
+ struct mlx5hws_action *action_tbl;
+ /* This submatcher's simple matcher. The first submatcher points to the
+ * outer (complex) matcher.
+ */
+ struct mlx5hws_bwc_matcher *bwc_matcher;
+ struct rhashtable rules_hash;
+ struct ida chain_ida;
+ struct mutex hash_lock; /* Protect the hash and ida. */
+};
+
struct mlx5hws_bwc_matcher_complex_data {
- struct mlx5hws_table *isolated_tbl;
- struct mlx5hws_bwc_matcher *isolated_bwc_matcher;
+ struct mlx5hws_bwc_complex_submatcher
+ submatchers[MLX5HWS_BWC_COMPLEX_MAX_SUBMATCHERS];
+ int num_submatchers;
+ /* Actions used by all but the last submatcher to point to the next
+ * submatcher in the chain. The last submatcher uses the action template
+ * from the complex matcher, to perform the actions that the user
+ * originally requested.
+ */
struct mlx5hws_action *action_metadata;
- struct mlx5hws_action *action_go_to_tbl;
struct mlx5hws_action *action_last;
- struct rhashtable refcount_hash;
- struct mutex hash_lock; /* Protect the refcount rhashtable */
- struct ida metadata_ida;
};
bool mlx5hws_bwc_match_params_is_complex(struct mlx5hws_context *ctx,
@@ -37,7 +72,10 @@ int mlx5hws_bwc_matcher_create_complex(struct mlx5hws_bwc_matcher *bwc_matcher,
void mlx5hws_bwc_matcher_destroy_complex(struct mlx5hws_bwc_matcher *bwc_matcher);
-int mlx5hws_bwc_matcher_move_all_complex(struct mlx5hws_bwc_matcher *bwc_matcher);
+int mlx5hws_bwc_matcher_complex_move(struct mlx5hws_bwc_matcher *bwc_matcher);
+
+int
+mlx5hws_bwc_matcher_complex_move_first(struct mlx5hws_bwc_matcher *bwc_matcher);
int mlx5hws_bwc_rule_create_complex(struct mlx5hws_bwc_rule *bwc_rule,
struct mlx5hws_match_parameters *params,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c
index 9c83753e4592..f22eaf506d28 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c
@@ -55,6 +55,7 @@ int mlx5hws_cmd_flow_table_create(struct mlx5_core_dev *mdev,
MLX5_SET(create_flow_table_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_TABLE);
MLX5_SET(create_flow_table_in, in, table_type, ft_attr->type);
+ MLX5_SET(create_flow_table_in, in, uid, ft_attr->uid);
ft_ctx = MLX5_ADDR_OF(create_flow_table_in, in, flow_table_context);
MLX5_SET(flow_table_context, ft_ctx, level, ft_attr->level);
@@ -1199,34 +1200,20 @@ out:
int mlx5hws_cmd_query_gvmi(struct mlx5_core_dev *mdev, bool other_function,
u16 vport_number, u16 *gvmi)
{
- bool ec_vf_func = other_function ? mlx5_core_is_ec_vf_vport(mdev, vport_number) : false;
- u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {};
- int out_size;
- void *out;
int err;
- out_size = MLX5_ST_SZ_BYTES(query_hca_cap_out);
- out = kzalloc(out_size, GFP_KERNEL);
- if (!out)
- return -ENOMEM;
-
- MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
- MLX5_SET(query_hca_cap_in, in, other_function, other_function);
- MLX5_SET(query_hca_cap_in, in, function_id,
- mlx5_vport_to_func_id(mdev, vport_number, ec_vf_func));
- MLX5_SET(query_hca_cap_in, in, ec_vf_function, ec_vf_func);
- MLX5_SET(query_hca_cap_in, in, op_mod,
- MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1 | HCA_CAP_OPMOD_GET_CUR);
+ if (!other_function) {
+ /* self vhca_id */
+ *gvmi = MLX5_CAP_GEN(mdev, vhca_id);
+ return 0;
+ }
- err = mlx5_cmd_exec_inout(mdev, query_hca_cap, in, out);
+ err = mlx5_vport_get_vhca_id(mdev, vport_number, gvmi);
if (err) {
- kfree(out);
+ mlx5_core_err(mdev, "Failed to get vport vhca id for vport %d\n",
+ vport_number);
return err;
}
- *gvmi = MLX5_GET(query_hca_cap_out, out, capability.cmd_hca_cap.vhca_id);
-
- kfree(out);
-
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h
index fa6bff210266..122ccc671628 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h
@@ -36,6 +36,7 @@ struct mlx5hws_cmd_set_fte_attr {
struct mlx5hws_cmd_ft_create_attr {
u8 type;
u8 level;
+ u16 uid;
bool rtc_valid;
bool decap_en;
bool reformat_en;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c
index c6436c3a7a83..82fd122d4284 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c
@@ -1280,7 +1280,7 @@ hws_definer_conv_misc2(struct mlx5hws_definer_conv_data *cd,
struct mlx5hws_definer_fc *fc = cd->fc;
struct mlx5hws_definer_fc *curr_fc;
- if (HWS_IS_FLD_SET_SZ(match_param, misc_parameters_2.reserved_at_1a0, 0x8) ||
+ if (HWS_IS_FLD_SET_SZ(match_param, misc_parameters_2.psp_syndrome, 0x8) ||
HWS_IS_FLD_SET_SZ(match_param,
misc_parameters_2.ipsec_next_header, 0x8) ||
HWS_IS_FLD_SET_SZ(match_param, misc_parameters_2.reserved_at_1c0, 0x40) ||
@@ -1831,80 +1831,6 @@ err_free_fc:
return ret;
}
-struct mlx5hws_definer_fc *
-mlx5hws_definer_conv_match_params_to_compressed_fc(struct mlx5hws_context *ctx,
- u8 match_criteria_enable,
- u32 *match_param,
- int *fc_sz)
-{
- struct mlx5hws_definer_fc *compressed_fc = NULL;
- struct mlx5hws_definer_conv_data cd = {0};
- struct mlx5hws_definer_fc *fc;
- int ret;
-
- fc = hws_definer_alloc_fc(ctx, MLX5HWS_DEFINER_FNAME_MAX);
- if (!fc)
- return NULL;
-
- cd.fc = fc;
- cd.ctx = ctx;
-
- if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_OUTER) {
- ret = hws_definer_conv_outer(&cd, match_param);
- if (ret)
- goto err_free_fc;
- }
-
- if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_INNER) {
- ret = hws_definer_conv_inner(&cd, match_param);
- if (ret)
- goto err_free_fc;
- }
-
- if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC) {
- ret = hws_definer_conv_misc(&cd, match_param);
- if (ret)
- goto err_free_fc;
- }
-
- if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC2) {
- ret = hws_definer_conv_misc2(&cd, match_param);
- if (ret)
- goto err_free_fc;
- }
-
- if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC3) {
- ret = hws_definer_conv_misc3(&cd, match_param);
- if (ret)
- goto err_free_fc;
- }
-
- if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC4) {
- ret = hws_definer_conv_misc4(&cd, match_param);
- if (ret)
- goto err_free_fc;
- }
-
- if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC5) {
- ret = hws_definer_conv_misc5(&cd, match_param);
- if (ret)
- goto err_free_fc;
- }
-
- /* Allocate fc array on mt */
- compressed_fc = hws_definer_alloc_compressed_fc(fc);
- if (!compressed_fc) {
- mlx5hws_err(ctx,
- "Convert to compressed fc: failed to set field copy to match template\n");
- goto err_free_fc;
- }
- *fc_sz = hws_definer_get_fc_size(fc);
-
-err_free_fc:
- kfree(fc);
- return compressed_fc;
-}
-
static int
hws_definer_find_byte_in_tag(struct mlx5hws_definer *definer,
u32 hl_byte_off,
@@ -2067,7 +1993,7 @@ hws_definer_copy_sel_ctrl(struct mlx5hws_definer_sel_ctrl *ctrl,
static int
hws_definer_find_best_match_fit(struct mlx5hws_context *ctx,
struct mlx5hws_definer *definer,
- u8 *hl)
+ u8 *hl, bool allow_jumbo)
{
struct mlx5hws_definer_sel_ctrl ctrl = {0};
bool found;
@@ -2084,6 +2010,9 @@ hws_definer_find_best_match_fit(struct mlx5hws_context *ctx,
return 0;
}
+ if (!allow_jumbo)
+ return -E2BIG;
+
/* Try to create a full/limited jumbo definer */
ctrl.allowed_full_dw = ctx->caps->full_dw_jumbo_support ? DW_SELECTORS :
DW_SELECTORS_MATCH;
@@ -2160,7 +2089,8 @@ int mlx5hws_definer_compare(struct mlx5hws_definer *definer_a,
int
mlx5hws_definer_calc_layout(struct mlx5hws_context *ctx,
struct mlx5hws_match_template *mt,
- struct mlx5hws_definer *match_definer)
+ struct mlx5hws_definer *match_definer,
+ bool allow_jumbo)
{
u8 *match_hl;
int ret;
@@ -2182,7 +2112,8 @@ mlx5hws_definer_calc_layout(struct mlx5hws_context *ctx,
}
/* Find the match definer layout for header layout match union */
- ret = hws_definer_find_best_match_fit(ctx, match_definer, match_hl);
+ ret = hws_definer_find_best_match_fit(ctx, match_definer, match_hl,
+ allow_jumbo);
if (ret) {
if (ret == -E2BIG)
mlx5hws_dbg(ctx,
@@ -2370,7 +2301,7 @@ int mlx5hws_definer_mt_init(struct mlx5hws_context *ctx,
struct mlx5hws_definer match_layout = {0};
int ret;
- ret = mlx5hws_definer_calc_layout(ctx, mt, &match_layout);
+ ret = mlx5hws_definer_calc_layout(ctx, mt, &match_layout, true);
if (ret) {
mlx5hws_err(ctx, "Failed to calculate matcher definer layout\n");
return ret;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h
index 62da55389331..141f3eb2e307 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h
@@ -823,13 +823,8 @@ void mlx5hws_definer_free(struct mlx5hws_context *ctx,
int mlx5hws_definer_calc_layout(struct mlx5hws_context *ctx,
struct mlx5hws_match_template *mt,
- struct mlx5hws_definer *match_definer);
-
-struct mlx5hws_definer_fc *
-mlx5hws_definer_conv_match_params_to_compressed_fc(struct mlx5hws_context *ctx,
- u8 match_criteria_enable,
- u32 *match_param,
- int *fc_sz);
+ struct mlx5hws_definer *match_definer,
+ bool allow_jumbo);
const char *mlx5hws_definer_fname_to_str(enum mlx5hws_definer_fname fname);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
index 57592b92e24b..6a4c4cccd643 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
@@ -267,6 +267,7 @@ static int mlx5_cmd_hws_create_flow_table(struct mlx5_flow_root_namespace *ns,
tbl_attr.type = MLX5HWS_TABLE_TYPE_FDB;
tbl_attr.level = ft_attr->level;
+ tbl_attr.uid = ft_attr->uid;
tbl = mlx5hws_table_create(ctx, &tbl_attr);
if (!tbl) {
mlx5_core_err(ns->dev, "Failed creating hws flow_table\n");
@@ -571,12 +572,12 @@ static void mlx5_fs_put_dest_action_sampler(struct mlx5_fs_hws_context *fs_ctx,
static struct mlx5hws_action *
mlx5_fs_create_action_dest_array(struct mlx5hws_context *ctx,
struct mlx5hws_action_dest_attr *dests,
- u32 num_of_dests, bool ignore_flow_level)
+ u32 num_of_dests)
{
u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
return mlx5hws_action_create_dest_array(ctx, num_of_dests, dests,
- ignore_flow_level, flags);
+ flags);
}
static struct mlx5hws_action *
@@ -1013,19 +1014,14 @@ static int mlx5_fs_fte_get_hws_actions(struct mlx5_flow_root_namespace *ns,
}
(*ractions)[num_actions++].action = dest_actions->dest;
} else if (num_dest_actions > 1) {
- bool ignore_flow_level;
-
if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
num_fs_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
err = -EOPNOTSUPP;
goto free_actions;
}
- ignore_flow_level =
- !!(fte_action->flags & FLOW_ACT_IGNORE_FLOW_LEVEL);
tmp_action =
mlx5_fs_create_action_dest_array(ctx, dest_actions,
- num_dest_actions,
- ignore_flow_level);
+ num_dest_actions);
if (!tmp_action) {
err = -EOPNOTSUPP;
goto free_actions;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c
index f1ecdba74e1f..839d71bd4216 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c
@@ -407,15 +407,21 @@ struct mlx5hws_action *mlx5_fc_get_hws_action(struct mlx5hws_context *ctx,
{
struct mlx5_fs_hws_create_action_ctx create_ctx;
struct mlx5_fc_bulk *fc_bulk = counter->bulk;
+ struct mlx5hws_action *hws_action;
create_ctx.hws_ctx = ctx;
create_ctx.id = fc_bulk->base_id;
create_ctx.actions_type = MLX5HWS_ACTION_TYP_CTR;
- return mlx5_fs_get_hws_action(&fc_bulk->hws_data, &create_ctx);
+ mlx5_fc_local_get(counter);
+ hws_action = mlx5_fs_get_hws_action(&fc_bulk->hws_data, &create_ctx);
+ if (!hws_action)
+ mlx5_fc_local_put(counter);
+ return hws_action;
}
void mlx5_fc_put_hws_action(struct mlx5_fc *counter)
{
mlx5_fs_put_hws_action(&counter->bulk->hws_data);
+ mlx5_fc_local_put(counter);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c
index f3ea09caba2b..32f87fdf3213 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c
@@ -85,6 +85,7 @@ static int hws_matcher_create_end_ft_isolated(struct mlx5hws_matcher *matcher)
ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev,
tbl,
+ 0,
&matcher->end_ft_id);
if (ret) {
mlx5hws_err(tbl->ctx, "Isolated matcher: failed to create end flow table\n");
@@ -112,7 +113,9 @@ static int hws_matcher_create_end_ft(struct mlx5hws_matcher *matcher)
if (mlx5hws_matcher_is_isolated(matcher))
ret = hws_matcher_create_end_ft_isolated(matcher);
else
- ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev, tbl,
+ ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev,
+ tbl,
+ 0,
&matcher->end_ft_id);
if (ret) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h
index 59c14745ed0c..1ad7a50d938b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h
@@ -75,6 +75,7 @@ struct mlx5hws_context_attr {
struct mlx5hws_table_attr {
enum mlx5hws_table_type type;
u32 level;
+ u16 uid;
};
enum mlx5hws_matcher_flow_src {
@@ -734,7 +735,6 @@ mlx5hws_action_create_push_vlan(struct mlx5hws_context *ctx, u32 flags);
* @num_dest: The number of dests attributes.
* @dests: The destination array. Each contains a destination action and can
* have additional actions.
- * @ignore_flow_level: Whether to turn on 'ignore_flow_level' for this dest.
* @flags: Action creation flags (enum mlx5hws_action_flags).
*
* Return: pointer to mlx5hws_action on success NULL otherwise.
@@ -742,7 +742,7 @@ mlx5hws_action_create_push_vlan(struct mlx5hws_context *ctx, u32 flags);
struct mlx5hws_action *
mlx5hws_action_create_dest_array(struct mlx5hws_context *ctx, size_t num_dest,
struct mlx5hws_action_dest_attr *dests,
- bool ignore_flow_level, u32 flags);
+ u32 flags);
/**
* mlx5hws_action_create_insert_header - Create insert header action.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c
index 51e4c551e0ef..d56271a9e4f0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c
@@ -279,7 +279,7 @@ int mlx5hws_pat_get_pattern(struct mlx5hws_context *ctx,
return ret;
clean_pattern:
- mlx5hws_cmd_header_modify_pattern_destroy(ctx->mdev, *pattern_id);
+ mlx5hws_cmd_header_modify_pattern_destroy(ctx->mdev, ptrn_id);
out_unlock:
mutex_unlock(&ctx->pattern_cache->lock);
return ret;
@@ -527,7 +527,6 @@ int mlx5hws_pat_calc_nop(__be64 *pattern, size_t num_actions,
u32 *nop_locations, __be64 *new_pat)
{
u16 prev_src_field = INVALID_FIELD, prev_dst_field = INVALID_FIELD;
- u16 src_field, dst_field;
u8 action_type;
bool dependent;
size_t i, j;
@@ -539,6 +538,9 @@ int mlx5hws_pat_calc_nop(__be64 *pattern, size_t num_actions,
return 0;
for (i = 0, j = 0; i < num_actions; i++, j++) {
+ u16 src_field = INVALID_FIELD;
+ u16 dst_field = INVALID_FIELD;
+
if (j >= max_actions)
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c
index 7e37d6e9eb83..7b5071c3df36 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pool.c
@@ -124,6 +124,7 @@ static int hws_pool_buddy_init(struct mlx5hws_pool *pool)
mlx5hws_err(pool->ctx, "Failed to create resource type: %d size %zu\n",
pool->type, pool->alloc_log_sz);
mlx5hws_buddy_cleanup(buddy);
+ kfree(buddy);
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c
index c4b22be19a9b..24ef7d66fa8a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c
@@ -690,7 +690,7 @@ static int hws_send_ring_alloc_sq(struct mlx5_core_dev *mdev,
size_t buf_sz;
int err;
- sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
+ sq->uar_map = mdev->priv.bfreg.map;
sq->mdev = mdev;
param.db_numa_node = numa_node;
@@ -764,7 +764,7 @@ static int hws_send_ring_create_sq(struct mlx5_core_dev *mdev, u32 pdn,
MLX5_SET(sqc, sqc, ts_format, ts_format);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
- MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.hw_objs.bfreg.index);
+ MLX5_SET(wq, wq, uar_page, mdev->priv.bfreg.index);
MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
@@ -940,7 +940,7 @@ static int hws_send_ring_create_cq(struct mlx5_core_dev *mdev,
(__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
- MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
+ MLX5_SET(cqc, cqc, uar_page, mdev->priv.bfreg.up->index);
MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
@@ -963,8 +963,7 @@ static int hws_send_ring_open_cq(struct mlx5_core_dev *mdev,
if (!cqc_data)
return -ENOMEM;
- MLX5_SET(cqc, cqc_data, uar_page, mdev->priv.uar->index);
- MLX5_SET(cqc, cqc_data, cqe_sz, queue->num_entries);
+ MLX5_SET(cqc, cqc_data, uar_page, mdev->priv.bfreg.up->index);
MLX5_SET(cqc, cqc_data, log_cq_size, ilog2(queue->num_entries));
err = hws_send_ring_alloc_cq(mdev, numa_node, queue, cqc_data, cq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c
index 568f691733f3..6113383ae47b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c
@@ -9,6 +9,7 @@ u32 mlx5hws_table_get_id(struct mlx5hws_table *tbl)
}
static void hws_table_init_next_ft_attr(struct mlx5hws_table *tbl,
+ u16 uid,
struct mlx5hws_cmd_ft_create_attr *ft_attr)
{
ft_attr->type = tbl->fw_ft_type;
@@ -16,7 +17,9 @@ static void hws_table_init_next_ft_attr(struct mlx5hws_table *tbl,
ft_attr->level = tbl->ctx->caps->fdb_ft.max_level - 1;
else
ft_attr->level = tbl->ctx->caps->nic_ft.max_level - 1;
+
ft_attr->rtc_valid = true;
+ ft_attr->uid = uid;
}
static void hws_table_set_cap_attr(struct mlx5hws_table *tbl,
@@ -119,12 +122,12 @@ static int hws_table_connect_to_default_miss_tbl(struct mlx5hws_table *tbl, u32
int mlx5hws_table_create_default_ft(struct mlx5_core_dev *mdev,
struct mlx5hws_table *tbl,
- u32 *ft_id)
+ u16 uid, u32 *ft_id)
{
struct mlx5hws_cmd_ft_create_attr ft_attr = {0};
int ret;
- hws_table_init_next_ft_attr(tbl, &ft_attr);
+ hws_table_init_next_ft_attr(tbl, uid, &ft_attr);
hws_table_set_cap_attr(tbl, &ft_attr);
ret = mlx5hws_cmd_flow_table_create(mdev, &ft_attr, ft_id);
@@ -189,7 +192,10 @@ static int hws_table_init(struct mlx5hws_table *tbl)
}
mutex_lock(&ctx->ctrl_lock);
- ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev, tbl, &tbl->ft_id);
+ ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev,
+ tbl,
+ tbl->uid,
+ &tbl->ft_id);
if (ret) {
mlx5hws_err(tbl->ctx, "Failed to create flow table object\n");
mutex_unlock(&ctx->ctrl_lock);
@@ -239,6 +245,7 @@ struct mlx5hws_table *mlx5hws_table_create(struct mlx5hws_context *ctx,
tbl->ctx = ctx;
tbl->type = attr->type;
tbl->level = attr->level;
+ tbl->uid = attr->uid;
ret = hws_table_init(tbl);
if (ret) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.h
index 0400cce0c317..1246f9bd8422 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.h
@@ -18,6 +18,7 @@ struct mlx5hws_table {
enum mlx5hws_table_type type;
u32 fw_ft_type;
u32 level;
+ u16 uid;
struct list_head matchers_list;
struct list_head tbl_list_node;
struct mlx5hws_default_miss default_miss;
@@ -47,7 +48,7 @@ u32 mlx5hws_table_get_res_fw_ft_type(enum mlx5hws_table_type tbl_type,
int mlx5hws_table_create_default_ft(struct mlx5_core_dev *mdev,
struct mlx5hws_table *tbl,
- u32 *ft_id);
+ u16 uid, u32 *ft_id);
void mlx5hws_table_destroy_default_ft(struct mlx5hws_table *tbl,
u32 ft_id);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_cmd.c
index baefb9a3fa05..1ebb2b15c080 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_cmd.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2019 Mellanox Technologies. */
#include "dr_types.h"
+#include "eswitch.h"
int mlx5dr_cmd_query_esw_vport_context(struct mlx5_core_dev *mdev,
bool other_vport,
@@ -34,34 +35,21 @@ int mlx5dr_cmd_query_esw_vport_context(struct mlx5_core_dev *mdev,
int mlx5dr_cmd_query_gvmi(struct mlx5_core_dev *mdev, bool other_vport,
u16 vport_number, u16 *gvmi)
{
- bool ec_vf_func = other_vport ? mlx5_core_is_ec_vf_vport(mdev, vport_number) : false;
- u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {};
- int out_size;
- void *out;
int err;
- out_size = MLX5_ST_SZ_BYTES(query_hca_cap_out);
- out = kzalloc(out_size, GFP_KERNEL);
- if (!out)
- return -ENOMEM;
-
- MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
- MLX5_SET(query_hca_cap_in, in, other_function, other_vport);
- MLX5_SET(query_hca_cap_in, in, function_id, mlx5_vport_to_func_id(mdev, vport_number, ec_vf_func));
- MLX5_SET(query_hca_cap_in, in, ec_vf_function, ec_vf_func);
- MLX5_SET(query_hca_cap_in, in, op_mod,
- MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1 |
- HCA_CAP_OPMOD_GET_CUR);
+ if (!other_vport) {
+ /* self vhca_id */
+ *gvmi = MLX5_CAP_GEN(mdev, vhca_id);
+ return 0;
+ }
- err = mlx5_cmd_exec_inout(mdev, query_hca_cap, in, out);
+ err = mlx5_vport_get_vhca_id(mdev, vport_number, gvmi);
if (err) {
- kfree(out);
+ mlx5_core_err(mdev, "Failed to get vport vhca id for vport %d\n",
+ vport_number);
return err;
}
- *gvmi = MLX5_GET(query_hca_cap_out, out, capability.cmd_hca_cap.vhca_id);
-
- kfree(out);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c
index 4fd4e8483382..077a77fde670 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c
@@ -1131,7 +1131,6 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
*cq->mcq.arm_db = cpu_to_be32(2 << 28);
cq->mcq.vector = 0;
- cq->mcq.uar = uar;
cq->mdev = mdev;
return cq;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index da5c24fc7b30..2ed2e530b07d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -36,6 +36,7 @@
#include <linux/mlx5/vport.h>
#include <linux/mlx5/eswitch.h>
#include "mlx5_core.h"
+#include "eswitch.h"
#include "sf/sf.h"
/* Mutex to hold while enabling or disabling RoCE */
@@ -1189,18 +1190,44 @@ u64 mlx5_query_nic_system_image_guid(struct mlx5_core_dev *mdev)
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_system_image_guid);
+static bool mlx5_vport_use_vhca_id_as_func_id(struct mlx5_core_dev *dev,
+ u16 vport_num, u16 *vhca_id)
+{
+ if (!MLX5_CAP_GEN_2(dev, function_id_type_vhca_id))
+ return false;
+
+ return mlx5_esw_vport_vhca_id(dev->priv.eswitch, vport_num, vhca_id);
+}
+
int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 vport, void *out,
u16 opmod)
{
- bool ec_vf_func = mlx5_core_is_ec_vf_vport(dev, vport);
u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)] = {};
+ u16 vhca_id = 0, function_id = 0;
+ bool ec_vf_func = false;
+
+ /* if this vport is referring to a vport on the ec PF (embedded cpu )
+ * let the FW know which domain we are querying since vport numbers or
+ * function_ids are not unique across the different PF domains,
+ * unless we use vhca_id as the function_id below.
+ */
+ ec_vf_func = mlx5_core_is_ec_vf_vport(dev, vport);
+ function_id = mlx5_vport_to_func_id(dev, vport, ec_vf_func);
+
+ if (mlx5_vport_use_vhca_id_as_func_id(dev, vport, &vhca_id)) {
+ MLX5_SET(query_hca_cap_in, in, function_id_type, 1);
+ function_id = vhca_id;
+ ec_vf_func = false;
+ mlx5_core_dbg(dev, "%s using vhca_id as function_id for vport %d vhca_id 0x%x\n",
+ __func__, vport, vhca_id);
+ }
opmod = (opmod << 1) | (HCA_CAP_OPMOD_GET_MAX & 0x01);
MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
- MLX5_SET(query_hca_cap_in, in, function_id, mlx5_vport_to_func_id(dev, vport, ec_vf_func));
MLX5_SET(query_hca_cap_in, in, other_function, true);
MLX5_SET(query_hca_cap_in, in, ec_vf_function, ec_vf_func);
+ MLX5_SET(query_hca_cap_in, in, function_id, function_id);
return mlx5_cmd_exec_inout(dev, query_hca_cap, in, out);
}
EXPORT_SYMBOL_GPL(mlx5_vport_get_other_func_cap);
@@ -1212,7 +1239,9 @@ int mlx5_vport_get_vhca_id(struct mlx5_core_dev *dev, u16 vport, u16 *vhca_id)
void *hca_caps;
int err;
- *vhca_id = 0;
+ /* try get vhca_id via eswitch */
+ if (mlx5_esw_vport_vhca_id(dev->priv.eswitch, vport, vhca_id))
+ return 0;
query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
if (!query_ctx)
@@ -1229,12 +1258,14 @@ out_free:
kfree(query_ctx);
return err;
}
+EXPORT_SYMBOL_GPL(mlx5_vport_get_vhca_id);
int mlx5_vport_set_other_func_cap(struct mlx5_core_dev *dev, const void *hca_cap,
u16 vport, u16 opmod)
{
- bool ec_vf_func = mlx5_core_is_ec_vf_vport(dev, vport);
int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
+ u16 vhca_id = 0, function_id = 0;
+ bool ec_vf_func = false;
void *set_hca_cap;
void *set_ctx;
int ret;
@@ -1243,14 +1274,29 @@ int mlx5_vport_set_other_func_cap(struct mlx5_core_dev *dev, const void *hca_cap
if (!set_ctx)
return -ENOMEM;
+ /* if this vport is referring to a vport on the ec PF (embedded cpu )
+ * let the FW know which domain we are querying since vport numbers or
+ * function_ids are not unique across the different PF domains,
+ * unless we use vhca_id as the function_id below.
+ */
+ ec_vf_func = mlx5_core_is_ec_vf_vport(dev, vport);
+ function_id = mlx5_vport_to_func_id(dev, vport, ec_vf_func);
+
+ if (mlx5_vport_use_vhca_id_as_func_id(dev, vport, &vhca_id)) {
+ MLX5_SET(set_hca_cap_in, set_ctx, function_id_type, 1);
+ function_id = vhca_id;
+ ec_vf_func = false;
+ mlx5_core_dbg(dev, "%s using vhca_id as function_id for vport %d vhca_id 0x%x\n",
+ __func__, vport, vhca_id);
+ }
+
MLX5_SET(set_hca_cap_in, set_ctx, opcode, MLX5_CMD_OP_SET_HCA_CAP);
MLX5_SET(set_hca_cap_in, set_ctx, op_mod, opmod << 1);
set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
memcpy(set_hca_cap, hca_cap, MLX5_ST_SZ_BYTES(cmd_hca_cap));
- MLX5_SET(set_hca_cap_in, set_ctx, function_id,
- mlx5_vport_to_func_id(dev, vport, ec_vf_func));
MLX5_SET(set_hca_cap_in, set_ctx, other_function, true);
MLX5_SET(set_hca_cap_in, set_ctx, ec_vf_function, ec_vf_func);
+ MLX5_SET(set_hca_cap_in, set_ctx, function_id, function_id);
ret = mlx5_cmd_exec_in(dev, set_hca_cap, set_ctx);
kfree(set_ctx);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wc.c b/drivers/net/ethernet/mellanox/mlx5/core/wc.c
index 2f0316616fa4..05e5fd777d4f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wc.c
@@ -7,6 +7,10 @@
#include "mlx5_core.h"
#include "wq.h"
+#if IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && IS_ENABLED(CONFIG_ARM64)
+#include <asm/neon.h>
+#endif
+
#define TEST_WC_NUM_WQES 255
#define TEST_WC_LOG_CQ_SZ (order_base_2(TEST_WC_NUM_WQES))
#define TEST_WC_SQ_LOG_WQ_SZ TEST_WC_LOG_CQ_SZ
@@ -94,7 +98,7 @@ static int create_wc_cq(struct mlx5_wc_cq *cq, void *cqc_data)
MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
- MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
+ MLX5_SET(cqc, cqc, uar_page, mdev->priv.bfreg.up->index);
MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
@@ -116,7 +120,7 @@ static int mlx5_wc_create_cq(struct mlx5_core_dev *mdev, struct mlx5_wc_cq *cq)
return -ENOMEM;
MLX5_SET(cqc, cqc, log_cq_size, TEST_WC_LOG_CQ_SZ);
- MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
+ MLX5_SET(cqc, cqc, uar_page, mdev->priv.bfreg.up->index);
if (MLX5_CAP_GEN(mdev, cqe_128_always) && cache_line_size() >= 128)
MLX5_SET(cqc, cqc, cqe_sz, CQE_STRIDE_128_PAD);
@@ -255,7 +259,29 @@ static void mlx5_wc_destroy_sq(struct mlx5_wc_sq *sq)
mlx5_wq_destroy(&sq->wq_ctrl);
}
-static void mlx5_wc_post_nop(struct mlx5_wc_sq *sq, bool signaled)
+static void mlx5_iowrite64_copy(struct mlx5_wc_sq *sq, __be32 mmio_wqe[16],
+ size_t mmio_wqe_size, unsigned int offset)
+{
+#if IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && IS_ENABLED(CONFIG_ARM64)
+ if (cpu_has_neon()) {
+ kernel_neon_begin();
+ asm volatile
+ (".arch_extension simd\n\t"
+ "ld1 {v0.16b, v1.16b, v2.16b, v3.16b}, [%0]\n\t"
+ "st1 {v0.16b, v1.16b, v2.16b, v3.16b}, [%1]"
+ :
+ : "r"(mmio_wqe), "r"(sq->bfreg.map + offset)
+ : "memory", "v0", "v1", "v2", "v3");
+ kernel_neon_end();
+ return;
+ }
+#endif
+ __iowrite64_copy(sq->bfreg.map + offset, mmio_wqe,
+ mmio_wqe_size / 8);
+}
+
+static void mlx5_wc_post_nop(struct mlx5_wc_sq *sq, unsigned int *offset,
+ bool signaled)
{
int buf_size = (1 << MLX5_CAP_GEN(sq->cq.mdev, log_bf_reg_size)) / 2;
struct mlx5_wqe_ctrl_seg *ctrl;
@@ -288,10 +314,9 @@ static void mlx5_wc_post_nop(struct mlx5_wc_sq *sq, bool signaled)
*/
wmb();
- __iowrite64_copy(sq->bfreg.map + sq->bfreg.offset, mmio_wqe,
- sizeof(mmio_wqe) / 8);
+ mlx5_iowrite64_copy(sq, mmio_wqe, sizeof(mmio_wqe), *offset);
- sq->bfreg.offset ^= buf_size;
+ *offset ^= buf_size;
}
static int mlx5_wc_poll_cq(struct mlx5_wc_sq *sq)
@@ -332,6 +357,7 @@ static int mlx5_wc_poll_cq(struct mlx5_wc_sq *sq)
static void mlx5_core_test_wc(struct mlx5_core_dev *mdev)
{
+ unsigned int offset = 0;
unsigned long expires;
struct mlx5_wc_sq *sq;
int i, err;
@@ -358,9 +384,9 @@ static void mlx5_core_test_wc(struct mlx5_core_dev *mdev)
goto err_create_sq;
for (i = 0; i < TEST_WC_NUM_WQES - 1; i++)
- mlx5_wc_post_nop(sq, false);
+ mlx5_wc_post_nop(sq, &offset, false);
- mlx5_wc_post_nop(sq, true);
+ mlx5_wc_post_nop(sq, &offset, true);
expires = jiffies + TEST_WC_POLLING_MAX_TIME_JIFFIES;
do {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 2bb2b77351bd..83c7cf3bbea3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -886,7 +886,7 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
return 0;
- emad_wq = alloc_workqueue("mlxsw_core_emad", 0, 0);
+ emad_wq = alloc_workqueue("mlxsw_core_emad", WQ_PERCPU, 0);
if (!emad_wq)
return -ENOMEM;
mlxsw_core->emad_wq = emad_wq;
@@ -2043,7 +2043,7 @@ static int mlxsw_core_health_init(struct mlxsw_core *mlxsw_core)
return 0;
fw_fatal = devl_health_reporter_create(devlink, &mlxsw_core_health_fw_fatal_ops,
- 0, mlxsw_core);
+ mlxsw_core);
if (IS_ERR(fw_fatal)) {
dev_err(mlxsw_core->bus_info->dev, "Failed to create fw fatal reporter");
return PTR_ERR(fw_fatal);
@@ -3381,7 +3381,7 @@ static int __init mlxsw_core_module_init(void)
if (err)
return err;
- mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, 0, 0);
+ mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, WQ_PERCPU, 0);
if (!mlxsw_wq) {
err = -ENOMEM;
goto err_alloc_workqueue;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 618957d65663..9a2d64a0a858 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -2375,6 +2375,8 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
ROUTER_EXP, false),
MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD,
ROUTER_EXP, false),
+ MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_LINK_LOCAL, FORWARD,
+ ROUTER_EXP, false),
/* Multicast Router Traps */
MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false),
MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
index 50e591420bd9..b1094aaffa5f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
@@ -170,8 +170,7 @@ void mlxsw_sp_counter_pool_fini(struct mlxsw_sp *mlxsw_sp)
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
mlxsw_sp_counter_sub_pools_fini(mlxsw_sp);
- WARN_ON(find_first_bit(pool->usage, pool->pool_size) !=
- pool->pool_size);
+ WARN_ON(!bitmap_empty(pool->usage, pool->pool_size));
WARN_ON(atomic_read(&pool->active_entries_count));
bitmap_free(pool->usage);
devl_resource_occ_get_unregister(devlink,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index 80ee5c4825dc..9962dc157901 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -94,6 +94,7 @@ enum {
MLXSW_TRAP_ID_DISCARD_ING_ROUTER_IPV4_SIP_BC = 0x16A,
MLXSW_TRAP_ID_DISCARD_ING_ROUTER_IPV4_DIP_LOCAL_NET = 0x16B,
MLXSW_TRAP_ID_DISCARD_ING_ROUTER_DIP_LINK_LOCAL = 0x16C,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_SIP_LINK_LOCAL = 0x16D,
MLXSW_TRAP_ID_DISCARD_ROUTER_IRIF_EN = 0x178,
MLXSW_TRAP_ID_DISCARD_ROUTER_ERIF_EN = 0x179,
MLXSW_TRAP_ID_DISCARD_ROUTER_LPM4 = 0x17B,
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic.h b/drivers/net/ethernet/meta/fbnic/fbnic.h
index c376e06880c9..b03e5a3d5144 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic.h
@@ -27,6 +27,8 @@ struct fbnic_dev {
struct net_device *netdev;
struct dentry *dbg_fbd;
struct device *hwmon;
+ struct devlink_health_reporter *fw_reporter;
+ struct devlink_health_reporter *otp_reporter;
u32 __iomem *uc_addr0;
u32 __iomem *uc_addr4;
@@ -84,8 +86,9 @@ struct fbnic_dev {
/* Local copy of hardware statistics */
struct fbnic_hw_stats hw_stats;
- /* Lock protecting access to hw_stats */
- spinlock_t hw_stats_lock;
+ /* Firmware time since boot in milliseconds */
+ u64 firmware_time;
+ u64 prev_firmware_time;
struct fbnic_fw_log fw_log;
};
@@ -158,8 +161,13 @@ extern char fbnic_driver_name[];
void fbnic_devlink_free(struct fbnic_dev *fbd);
struct fbnic_dev *fbnic_devlink_alloc(struct pci_dev *pdev);
+int fbnic_devlink_health_create(struct fbnic_dev *fbd);
+void fbnic_devlink_health_destroy(struct fbnic_dev *fbd);
void fbnic_devlink_register(struct fbnic_dev *fbd);
void fbnic_devlink_unregister(struct fbnic_dev *fbd);
+void __printf(2, 3)
+fbnic_devlink_fw_report(struct fbnic_dev *fbd, const char *format, ...);
+void fbnic_devlink_otp_check(struct fbnic_dev *fbd, const char *msg);
int fbnic_fw_request_mbx(struct fbnic_dev *fbd);
void fbnic_fw_free_mbx(struct fbnic_dev *fbd);
@@ -190,6 +198,8 @@ void fbnic_dbg_fbd_exit(struct fbnic_dev *fbd);
void fbnic_dbg_init(void);
void fbnic_dbg_exit(void);
+void fbnic_rpc_reset_valid_entries(struct fbnic_dev *fbd);
+
void fbnic_csr_get_regs(struct fbnic_dev *fbd, u32 *data, u32 *regs_version);
int fbnic_csr_regs_len(struct fbnic_dev *fbd);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
index a81db842aa53..d3a7ad921f18 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
@@ -790,6 +790,21 @@ enum {
#define FBNIC_CSR_END_PCS 0x10668 /* CSR section delimiter */
#define FBNIC_CSR_START_RSFEC 0x10800 /* CSR section delimiter */
+
+/* We have 4 RSFEC engines present in our part, however we are only using 1.
+ * As such only CCW(0) and NCCW(0) will never be non-zero and the other
+ * registers can be ignored.
+ */
+#define FBNIC_RSFEC_CCW_LO(n) (0x10802 + 8 * (n)) /* 0x42008 + 32*n */
+#define FBNIC_RSFEC_CCW_HI(n) (0x10803 + 8 * (n)) /* 0x4200c + 32*n */
+#define FBNIC_RSFEC_NCCW_LO(n) (0x10804 + 8 * (n)) /* 0x42010 + 32*n */
+#define FBNIC_RSFEC_NCCW_HI(n) (0x10805 + 8 * (n)) /* 0x42014 + 32*n */
+
+#define FBNIC_PCS_MAX_LANES 4
+#define FBNIC_PCS_SYMBLERR_LO(n) \
+ (0x10880 + 2 * (n)) /* 0x42200 + 8*n */
+#define FBNIC_PCS_SYMBLERR_HI(n) \
+ (0x10881 + 2 * (n)) /* 0x42204 + 8*n */
#define FBNIC_CSR_END_RSFEC 0x108c8 /* CSR section delimiter */
/* MAC MAC registers (ASIC only) */
@@ -829,6 +844,10 @@ enum {
#define FBNIC_CSR_END_SIG 0x1184e /* CSR section delimiter */
#define FBNIC_CSR_START_MAC_STAT 0x11a00
+#define FBNIC_MAC_STAT_RX_XOFF_STB_L 0x11a00 /* 0x46800 */
+#define FBNIC_MAC_STAT_RX_XOFF_STB_H 0x11a01 /* 0x46804 */
+#define FBNIC_MAC_STAT_TX_XOFF_STB_L 0x11a04 /* 0x46810 */
+#define FBNIC_MAC_STAT_TX_XOFF_STB_H 0x11a05 /* 0x46814 */
#define FBNIC_MAC_STAT_RX_BYTE_COUNT_L 0x11a08 /* 0x46820 */
#define FBNIC_MAC_STAT_RX_BYTE_COUNT_H 0x11a09 /* 0x46824 */
#define FBNIC_MAC_STAT_RX_ALIGN_ERROR_L 0x11a0a /* 0x46828 */
@@ -1159,4 +1178,22 @@ enum {
#define FBNIC_IPC_MBX_DESC_FW_CMPL DESC_BIT(1)
#define FBNIC_IPC_MBX_DESC_HOST_CMPL DESC_BIT(0)
+/* OTP Registers
+ * These registers are accessible via bar4 offset and are written by CMRT
+ * on boot. For the write status, the register is broken up in half with OTP
+ * Write Data Status occupying the top 16 bits and the ECC status occupying the
+ * bottom 16 bits.
+ */
+#define FBNIC_NS_OTP_STATUS 0x0021d
+#define FBNIC_NS_OTP_WRITE_STATUS 0x0021e
+
+#define FBNIC_NS_OTP_WRITE_DATA_STATUS_MASK CSR_GENMASK(31, 16)
+#define FBNIC_NS_OTP_WRITE_ECC_STATUS_MASK CSR_GENMASK(15, 0)
+
+#define FBNIC_REGS_VERSION CSR_GENMASK(31, 16)
+#define FBNIC_REGS_HW_TYPE CSR_GENMASK(15, 8)
+enum{
+ FBNIC_CSR_VERSION_V1_0_ASIC = 1,
+};
+
#endif /* _FBNIC_CSR_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c b/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c
index c5f81f139e7e..b62b1d5b1453 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c
@@ -8,6 +8,7 @@
#include <net/devlink.h>
#include "fbnic.h"
+#include "fbnic_fw.h"
#include "fbnic_tlv.h"
#define FBNIC_SN_STR_LEN 24
@@ -369,6 +370,254 @@ static const struct devlink_ops fbnic_devlink_ops = {
.flash_update = fbnic_devlink_flash_update,
};
+static int fbnic_fw_reporter_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *priv_ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_dev *fbd = devlink_health_reporter_priv(reporter);
+ u32 offset, index, index_count, length, size;
+ struct fbnic_fw_completion *fw_cmpl;
+ u8 *dump_data, **data;
+ int err;
+
+ fw_cmpl = fbnic_fw_alloc_cmpl(FBNIC_TLV_MSG_ID_COREDUMP_GET_INFO_RESP);
+ if (!fw_cmpl)
+ return -ENOMEM;
+
+ err = fbnic_fw_xmit_coredump_info_msg(fbd, fw_cmpl, true);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to transmit core dump info msg");
+ goto cmpl_free;
+ }
+ if (!wait_for_completion_timeout(&fw_cmpl->done, 2 * HZ)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Timed out waiting on core dump info");
+ err = -ETIMEDOUT;
+ goto cmpl_cleanup;
+ }
+
+ size = fw_cmpl->u.coredump_info.size;
+ err = fw_cmpl->result;
+
+ fbnic_mbx_clear_cmpl(fbd, fw_cmpl);
+ fbnic_fw_put_cmpl(fw_cmpl);
+
+ /* Handle error returned by firmware */
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware core dump returned error");
+ return err;
+ }
+ if (!size) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Firmware core dump returned size 0");
+ return -EIO;
+ }
+
+ /* Read the dump, we can only transfer TLV_MAX_DATA at a time */
+ index_count = DIV_ROUND_UP(size, TLV_MAX_DATA);
+
+ fw_cmpl = __fbnic_fw_alloc_cmpl(FBNIC_TLV_MSG_ID_COREDUMP_READ_RESP,
+ sizeof(void *) * index_count + size);
+ if (!fw_cmpl)
+ return -ENOMEM;
+
+ /* Populate pointer table w/ pointer offsets */
+ dump_data = (void *)&fw_cmpl->u.coredump.data[index_count];
+ data = fw_cmpl->u.coredump.data;
+ fw_cmpl->u.coredump.size = size;
+ fw_cmpl->u.coredump.stride = TLV_MAX_DATA;
+
+ for (index = 0; index < index_count; index++) {
+ /* First iteration installs completion */
+ struct fbnic_fw_completion *cmpl_arg = index ? NULL : fw_cmpl;
+
+ offset = index * TLV_MAX_DATA;
+ length = min(size - offset, TLV_MAX_DATA);
+
+ data[index] = dump_data + offset;
+ err = fbnic_fw_xmit_coredump_read_msg(fbd, cmpl_arg,
+ offset, length);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to transmit core dump msg");
+ if (cmpl_arg)
+ goto cmpl_free;
+ else
+ goto cmpl_cleanup;
+ }
+
+ if (wait_for_completion_timeout(&fw_cmpl->done, 2 * HZ)) {
+ reinit_completion(&fw_cmpl->done);
+ } else {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Timed out waiting on core dump (%d/%d)",
+ index + 1, index_count);
+ err = -ETIMEDOUT;
+ goto cmpl_cleanup;
+ }
+
+ /* If we didn't see the reply record as incomplete */
+ if (fw_cmpl->u.coredump.data[index]) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "No data for core dump chunk (%d/%d)",
+ index + 1, index_count);
+ err = -EIO;
+ goto cmpl_cleanup;
+ }
+ }
+
+ devlink_fmsg_binary_pair_nest_start(fmsg, "FW coredump");
+
+ for (offset = 0; offset < size; offset += length) {
+ length = min_t(u32, size - offset, TLV_MAX_DATA);
+
+ devlink_fmsg_binary_put(fmsg, dump_data + offset, length);
+ }
+
+ devlink_fmsg_binary_pair_nest_end(fmsg);
+
+cmpl_cleanup:
+ fbnic_mbx_clear_cmpl(fbd, fw_cmpl);
+cmpl_free:
+ fbnic_fw_put_cmpl(fw_cmpl);
+
+ return err;
+}
+
+static int
+fbnic_fw_reporter_diagnose(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_dev *fbd = devlink_health_reporter_priv(reporter);
+ u32 sec, msec;
+
+ /* Device is most likely down, we're not exchanging heartbeats */
+ if (!fbd->prev_firmware_time)
+ return 0;
+
+ sec = div_u64_rem(fbd->firmware_time, MSEC_PER_SEC, &msec);
+
+ devlink_fmsg_pair_nest_start(fmsg, "last_heartbeat");
+ devlink_fmsg_obj_nest_start(fmsg);
+ devlink_fmsg_pair_nest_start(fmsg, "fw_uptime");
+ devlink_fmsg_obj_nest_start(fmsg);
+ devlink_fmsg_u32_pair_put(fmsg, "sec", sec);
+ devlink_fmsg_u32_pair_put(fmsg, "msec", msec);
+ devlink_fmsg_obj_nest_end(fmsg);
+ devlink_fmsg_pair_nest_end(fmsg);
+ devlink_fmsg_obj_nest_end(fmsg);
+ devlink_fmsg_pair_nest_end(fmsg);
+
+ return 0;
+}
+
+void __printf(2, 3)
+fbnic_devlink_fw_report(struct fbnic_dev *fbd, const char *format, ...)
+{
+ char msg[FBNIC_FW_LOG_MAX_SIZE];
+ va_list args;
+
+ va_start(args, format);
+ vsnprintf(msg, FBNIC_FW_LOG_MAX_SIZE, format, args);
+ va_end(args);
+
+ devlink_health_report(fbd->fw_reporter, msg, fbd);
+ if (fbnic_fw_log_ready(fbd))
+ fbnic_fw_log_write(fbd, 0, fbd->firmware_time, msg);
+}
+
+static const struct devlink_health_reporter_ops fbnic_fw_ops = {
+ .name = "fw",
+ .dump = fbnic_fw_reporter_dump,
+ .diagnose = fbnic_fw_reporter_diagnose,
+};
+
+static u32 fbnic_read_otp_status(struct fbnic_dev *fbd)
+{
+ return fbnic_fw_rd32(fbd, FBNIC_NS_OTP_STATUS);
+}
+
+static int
+fbnic_otp_reporter_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *priv_ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_dev *fbd = devlink_health_reporter_priv(reporter);
+ u32 otp_status, otp_write_status, m;
+
+ otp_status = fbnic_read_otp_status(fbd);
+ otp_write_status = fbnic_fw_rd32(fbd, FBNIC_NS_OTP_WRITE_STATUS);
+
+ /* Dump OTP status */
+ devlink_fmsg_pair_nest_start(fmsg, "OTP");
+ devlink_fmsg_obj_nest_start(fmsg);
+
+ devlink_fmsg_u32_pair_put(fmsg, "Status", otp_status);
+
+ /* Extract OTP Write Data status */
+ m = FBNIC_NS_OTP_WRITE_DATA_STATUS_MASK;
+ devlink_fmsg_u32_pair_put(fmsg, "Data",
+ FIELD_GET(m, otp_write_status));
+
+ /* Extract OTP Write ECC status */
+ m = FBNIC_NS_OTP_WRITE_ECC_STATUS_MASK;
+ devlink_fmsg_u32_pair_put(fmsg, "ECC",
+ FIELD_GET(m, otp_write_status));
+
+ devlink_fmsg_obj_nest_end(fmsg);
+ devlink_fmsg_pair_nest_end(fmsg);
+
+ return 0;
+}
+
+void fbnic_devlink_otp_check(struct fbnic_dev *fbd, const char *msg)
+{
+ /* Check if there is anything to report */
+ if (!fbnic_read_otp_status(fbd))
+ return;
+
+ devlink_health_report(fbd->otp_reporter, msg, fbd);
+ if (fbnic_fw_log_ready(fbd))
+ fbnic_fw_log_write(fbd, 0, fbd->firmware_time, msg);
+}
+
+static const struct devlink_health_reporter_ops fbnic_otp_ops = {
+ .name = "otp",
+ .dump = fbnic_otp_reporter_dump,
+};
+
+int fbnic_devlink_health_create(struct fbnic_dev *fbd)
+{
+ fbd->fw_reporter = devlink_health_reporter_create(priv_to_devlink(fbd),
+ &fbnic_fw_ops, fbd);
+ if (IS_ERR(fbd->fw_reporter)) {
+ dev_warn(fbd->dev,
+ "Failed to create FW fault reporter: %pe\n",
+ fbd->fw_reporter);
+ return PTR_ERR(fbd->fw_reporter);
+ }
+
+ fbd->otp_reporter = devlink_health_reporter_create(priv_to_devlink(fbd),
+ &fbnic_otp_ops, fbd);
+ if (IS_ERR(fbd->otp_reporter)) {
+ devlink_health_reporter_destroy(fbd->fw_reporter);
+ dev_warn(fbd->dev,
+ "Failed to create OTP fault reporter: %pe\n",
+ fbd->otp_reporter);
+ return PTR_ERR(fbd->otp_reporter);
+ }
+
+ return 0;
+}
+
+void fbnic_devlink_health_destroy(struct fbnic_dev *fbd)
+{
+ devlink_health_reporter_destroy(fbd->otp_reporter);
+ devlink_health_reporter_destroy(fbd->fw_reporter);
+}
+
void fbnic_devlink_free(struct fbnic_dev *fbd)
{
struct devlink *devlink = priv_to_devlink(fbd);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
index dc7ba8d5fc43..95fac020eb93 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c
@@ -2,6 +2,7 @@
/* Copyright (c) Meta Platforms, Inc. and affiliates. */
#include <linux/ethtool.h>
+#include <linux/ethtool_netlink.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <net/ipv6.h>
@@ -111,6 +112,20 @@ static const struct fbnic_stat fbnic_gstrings_hw_q_stats[] = {
FBNIC_HW_RXB_DEQUEUE_STATS_LEN * FBNIC_RXB_DEQUEUE_INDICES + \
FBNIC_HW_Q_STATS_LEN * FBNIC_MAX_QUEUES)
+#define FBNIC_QUEUE_STAT(name, stat) \
+ FBNIC_STAT_FIELDS(fbnic_ring, name, stat)
+
+static const struct fbnic_stat fbnic_gstrings_xdp_stats[] = {
+ FBNIC_QUEUE_STAT("xdp_tx_queue_%u_packets", stats.packets),
+ FBNIC_QUEUE_STAT("xdp_tx_queue_%u_bytes", stats.bytes),
+ FBNIC_QUEUE_STAT("xdp_tx_queue_%u_dropped", stats.dropped),
+};
+
+#define FBNIC_XDP_STATS_LEN ARRAY_SIZE(fbnic_gstrings_xdp_stats)
+
+#define FBNIC_STATS_LEN \
+ (FBNIC_HW_STATS_LEN + FBNIC_XDP_STATS_LEN * FBNIC_MAX_XDPQS)
+
static void
fbnic_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
{
@@ -160,6 +175,7 @@ static void fbnic_clone_swap_cfg(struct fbnic_net *orig,
swap(clone->num_rx_queues, orig->num_rx_queues);
swap(clone->num_tx_queues, orig->num_tx_queues);
swap(clone->num_napi, orig->num_napi);
+ swap(clone->hds_thresh, orig->hds_thresh);
}
static void fbnic_aggregate_vector_counters(struct fbnic_net *fbn,
@@ -169,13 +185,13 @@ static void fbnic_aggregate_vector_counters(struct fbnic_net *fbn,
for (i = 0; i < nv->txt_count; i++) {
fbnic_aggregate_ring_tx_counters(fbn, &nv->qt[i].sub0);
- fbnic_aggregate_ring_tx_counters(fbn, &nv->qt[i].sub1);
+ fbnic_aggregate_ring_xdp_counters(fbn, &nv->qt[i].sub1);
fbnic_aggregate_ring_tx_counters(fbn, &nv->qt[i].cmpl);
}
for (j = 0; j < nv->rxt_count; j++, i++) {
- fbnic_aggregate_ring_rx_counters(fbn, &nv->qt[i].sub0);
- fbnic_aggregate_ring_rx_counters(fbn, &nv->qt[i].sub1);
+ fbnic_aggregate_ring_bdq_counters(fbn, &nv->qt[i].sub0);
+ fbnic_aggregate_ring_bdq_counters(fbn, &nv->qt[i].sub1);
fbnic_aggregate_ring_rx_counters(fbn, &nv->qt[i].cmpl);
}
}
@@ -277,15 +293,21 @@ fbnic_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
ring->rx_mini_pending = fbn->hpq_size;
ring->rx_jumbo_pending = fbn->ppq_size;
ring->tx_pending = fbn->txq_size;
+
+ kernel_ring->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_ENABLED;
+ kernel_ring->hds_thresh_max = FBNIC_HDS_THRESH_MAX;
+ kernel_ring->hds_thresh = fbn->hds_thresh;
}
static void fbnic_set_rings(struct fbnic_net *fbn,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring)
{
fbn->rcq_size = ring->rx_pending;
fbn->hpq_size = ring->rx_mini_pending;
fbn->ppq_size = ring->rx_jumbo_pending;
fbn->txq_size = ring->tx_pending;
+ fbn->hds_thresh = kernel_ring->hds_thresh;
}
static int
@@ -316,8 +338,24 @@ fbnic_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
return -EINVAL;
}
+ if (kernel_ring->tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_DISABLED) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot disable TCP data split");
+ return -EINVAL;
+ }
+
+ /* If an XDP program is attached, we should check for potential frame
+ * splitting. If the new HDS threshold can cause splitting, we should
+ * only allow if the attached XDP program can handle frags.
+ */
+ if (fbnic_check_split_frames(fbn->xdp_prog, netdev->mtu,
+ kernel_ring->hds_thresh)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Use higher HDS threshold or multi-buf capable program");
+ return -EINVAL;
+ }
+
if (!netif_running(netdev)) {
- fbnic_set_rings(fbn, ring);
+ fbnic_set_rings(fbn, ring, kernel_ring);
return 0;
}
@@ -325,7 +363,7 @@ fbnic_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
if (!clone)
return -ENOMEM;
- fbnic_set_rings(clone, ring);
+ fbnic_set_rings(clone, ring, kernel_ring);
err = fbnic_alloc_napi_vectors(clone);
if (err)
@@ -398,6 +436,16 @@ static void fbnic_get_rxb_dequeue_strings(u8 **data, unsigned int idx)
ethtool_sprintf(data, stat->string, idx);
}
+static void fbnic_get_xdp_queue_strings(u8 **data, unsigned int idx)
+{
+ const struct fbnic_stat *stat;
+ int i;
+
+ stat = fbnic_gstrings_xdp_stats;
+ for (i = 0; i < FBNIC_XDP_STATS_LEN; i++, stat++)
+ ethtool_sprintf(data, stat->string, idx);
+}
+
static void fbnic_get_strings(struct net_device *dev, u32 sset, u8 *data)
{
const struct fbnic_stat *stat;
@@ -423,6 +471,9 @@ static void fbnic_get_strings(struct net_device *dev, u32 sset, u8 *data)
for (i = 0; i < FBNIC_HW_Q_STATS_LEN; i++, stat++)
ethtool_sprintf(&data, stat->string, idx);
}
+
+ for (i = 0; i < FBNIC_MAX_XDPQS; i++)
+ fbnic_get_xdp_queue_strings(&data, i);
break;
}
}
@@ -440,6 +491,24 @@ static void fbnic_report_hw_stats(const struct fbnic_stat *stat,
}
}
+static void fbnic_get_xdp_queue_stats(struct fbnic_ring *ring, u64 **data)
+{
+ const struct fbnic_stat *stat;
+ int i;
+
+ if (!ring) {
+ *data += FBNIC_XDP_STATS_LEN;
+ return;
+ }
+
+ stat = fbnic_gstrings_xdp_stats;
+ for (i = 0; i < FBNIC_XDP_STATS_LEN; i++, stat++, (*data)++) {
+ u8 *p = (u8 *)ring + stat->offset;
+
+ **data = *(u64 *)p;
+ }
+}
+
static void fbnic_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
@@ -449,7 +518,7 @@ static void fbnic_get_ethtool_stats(struct net_device *dev,
fbnic_get_hw_stats(fbn->fbd);
- spin_lock(&fbd->hw_stats_lock);
+ spin_lock(&fbd->hw_stats.lock);
fbnic_report_hw_stats(fbnic_gstrings_hw_stats, &fbd->hw_stats,
FBNIC_HW_FIXED_STATS_LEN, &data);
@@ -486,14 +555,17 @@ static void fbnic_get_ethtool_stats(struct net_device *dev,
fbnic_report_hw_stats(fbnic_gstrings_hw_q_stats, hw_q,
FBNIC_HW_Q_STATS_LEN, &data);
}
- spin_unlock(&fbd->hw_stats_lock);
+ spin_unlock(&fbd->hw_stats.lock);
+
+ for (i = 0; i < FBNIC_MAX_XDPQS; i++)
+ fbnic_get_xdp_queue_stats(fbn->tx[i + FBNIC_MAX_TXQS], &data);
}
static int fbnic_get_sset_count(struct net_device *dev, int sset)
{
switch (sset) {
case ETH_SS_STATS:
- return FBNIC_HW_STATS_LEN;
+ return FBNIC_STATS_LEN;
default:
return -EOPNOTSUPP;
}
@@ -1310,7 +1382,7 @@ fbnic_get_rss_hash_opts(struct net_device *netdev,
#define FBNIC_L2_HASH_OPTIONS \
(RXH_L2DA | RXH_DISCARD)
#define FBNIC_L3_HASH_OPTIONS \
- (FBNIC_L2_HASH_OPTIONS | RXH_IP_SRC | RXH_IP_DST)
+ (FBNIC_L2_HASH_OPTIONS | RXH_IP_SRC | RXH_IP_DST | RXH_IP6_FL)
#define FBNIC_L4_HASH_OPTIONS \
(FBNIC_L3_HASH_OPTIONS | RXH_L4_B_0_1 | RXH_L4_B_2_3)
@@ -1563,6 +1635,65 @@ static void fbnic_get_ts_stats(struct net_device *netdev,
}
}
+static int
+fbnic_get_module_eeprom_by_page(struct net_device *netdev,
+ const struct ethtool_module_eeprom *page_data,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_fw_completion *fw_cmpl;
+ struct fbnic_dev *fbd = fbn->fbd;
+ int err;
+
+ if (page_data->i2c_address != 0x50) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Invalid i2c address. Only 0x50 is supported");
+ return -EINVAL;
+ }
+
+ fw_cmpl = __fbnic_fw_alloc_cmpl(FBNIC_TLV_MSG_ID_QSFP_READ_RESP,
+ page_data->length);
+ if (!fw_cmpl)
+ return -ENOMEM;
+
+ /* Initialize completion and queue it for FW to process */
+ fw_cmpl->u.qsfp.length = page_data->length;
+ fw_cmpl->u.qsfp.offset = page_data->offset;
+ fw_cmpl->u.qsfp.page = page_data->page;
+ fw_cmpl->u.qsfp.bank = page_data->bank;
+
+ err = fbnic_fw_xmit_qsfp_read_msg(fbd, fw_cmpl, page_data->page,
+ page_data->bank, page_data->offset,
+ page_data->length);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to transmit EEPROM read request");
+ goto exit_free;
+ }
+
+ if (!wait_for_completion_timeout(&fw_cmpl->done, 2 * HZ)) {
+ err = -ETIMEDOUT;
+ NL_SET_ERR_MSG_MOD(extack,
+ "Timed out waiting for firmware response");
+ goto exit_cleanup;
+ }
+
+ if (fw_cmpl->result) {
+ err = fw_cmpl->result;
+ NL_SET_ERR_MSG_MOD(extack, "Failed to read EEPROM");
+ goto exit_cleanup;
+ }
+
+ memcpy(page_data->data, fw_cmpl->u.qsfp.data, page_data->length);
+
+exit_cleanup:
+ fbnic_mbx_clear_cmpl(fbd, fw_cmpl);
+exit_free:
+ fbnic_fw_put_cmpl(fw_cmpl);
+
+ return err ? : page_data->length;
+}
+
static void fbnic_set_counter(u64 *stat, struct fbnic_stat_counter *counter)
{
if (counter->reported)
@@ -1570,6 +1701,63 @@ static void fbnic_set_counter(u64 *stat, struct fbnic_stat_counter *counter)
}
static void
+fbnic_get_pause_stats(struct net_device *netdev,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_mac_stats *mac_stats;
+ struct fbnic_dev *fbd = fbn->fbd;
+
+ mac_stats = &fbd->hw_stats.mac;
+
+ fbd->mac->get_pause_stats(fbd, false, &mac_stats->pause);
+
+ pause_stats->tx_pause_frames = mac_stats->pause.tx_pause_frames.value;
+ pause_stats->rx_pause_frames = mac_stats->pause.rx_pause_frames.value;
+}
+
+static void
+fbnic_get_fec_stats(struct net_device *netdev,
+ struct ethtool_fec_stats *fec_stats,
+ struct ethtool_fec_hist *hist)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_phy_stats *phy_stats;
+ struct fbnic_dev *fbd = fbn->fbd;
+
+ fbnic_get_hw_stats32(fbd);
+ phy_stats = &fbd->hw_stats.phy;
+
+ spin_lock(&fbd->hw_stats.lock);
+ fec_stats->corrected_blocks.total =
+ phy_stats->fec.corrected_blocks.value;
+ fec_stats->uncorrectable_blocks.total =
+ phy_stats->fec.uncorrectable_blocks.value;
+ spin_unlock(&fbd->hw_stats.lock);
+}
+
+static void
+fbnic_get_eth_phy_stats(struct net_device *netdev,
+ struct ethtool_eth_phy_stats *eth_phy_stats)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_phy_stats *phy_stats;
+ struct fbnic_dev *fbd = fbn->fbd;
+ u64 total = 0;
+ int i;
+
+ fbnic_get_hw_stats32(fbd);
+ phy_stats = &fbd->hw_stats.phy;
+
+ spin_lock(&fbd->hw_stats.lock);
+ for (i = 0; i < FBNIC_PCS_MAX_LANES; i++)
+ total += phy_stats->pcs.SymbolErrorDuringCarrier.lanes[i].value;
+
+ eth_phy_stats->SymbolErrorDuringCarrier = total;
+ spin_unlock(&fbd->hw_stats.lock);
+}
+
+static void
fbnic_get_eth_mac_stats(struct net_device *netdev,
struct ethtool_eth_mac_stats *eth_mac_stats)
{
@@ -1676,8 +1864,11 @@ fbnic_get_rmon_stats(struct net_device *netdev,
}
static const struct ethtool_ops fbnic_ethtool_ops = {
+ .cap_link_lanes_supported = true,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_RX_MAX_FRAMES,
+ .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT |
+ ETHTOOL_RING_USE_HDS_THRS,
.rxfh_max_num_contexts = FBNIC_RPC_RSS_TBL_COUNT,
.get_drvinfo = fbnic_get_drvinfo,
.get_regs_len = fbnic_get_regs_len,
@@ -1687,6 +1878,7 @@ static const struct ethtool_ops fbnic_ethtool_ops = {
.set_coalesce = fbnic_set_coalesce,
.get_ringparam = fbnic_get_ringparam,
.set_ringparam = fbnic_set_ringparam,
+ .get_pause_stats = fbnic_get_pause_stats,
.get_pauseparam = fbnic_phylink_get_pauseparam,
.set_pauseparam = fbnic_phylink_set_pauseparam,
.get_strings = fbnic_get_strings,
@@ -1708,7 +1900,10 @@ static const struct ethtool_ops fbnic_ethtool_ops = {
.get_ts_info = fbnic_get_ts_info,
.get_ts_stats = fbnic_get_ts_stats,
.get_link_ksettings = fbnic_phylink_ethtool_ksettings_get,
+ .get_fec_stats = fbnic_get_fec_stats,
.get_fecparam = fbnic_phylink_get_fecparam,
+ .get_module_eeprom_by_page = fbnic_get_module_eeprom_by_page,
+ .get_eth_phy_stats = fbnic_get_eth_phy_stats,
.get_eth_mac_stats = fbnic_get_eth_mac_stats,
.get_eth_ctrl_stats = fbnic_get_eth_ctrl_stats,
.get_rmon_stats = fbnic_get_rmon_stats,
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
index 0c55be7d2547..c87cb9ed09e7 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
@@ -495,6 +495,11 @@ int fbnic_fw_xmit_ownership_msg(struct fbnic_dev *fbd, bool take_ownership)
fbd->last_heartbeat_request = req_time;
+ /* Set prev_firmware_time to 0 to avoid triggering firmware crash
+ * detection until we receive the second uptime in a heartbeat resp.
+ */
+ fbd->prev_firmware_time = 0;
+
/* Set heartbeat detection based on if we are taking ownership */
fbd->fw_heartbeat_enabled = take_ownership;
@@ -653,10 +658,14 @@ static int fbnic_fw_parse_cap_resp(void *opaque, struct fbnic_tlv_msg **results)
fbd->fw_cap.anti_rollback_version =
fta_get_uint(results, FBNIC_FW_CAP_RESP_ANTI_ROLLBACK_VERSION);
+ /* Always assume we need a BMC reinit */
+ fbd->fw_cap.need_bmc_tcam_reinit = true;
+
return 0;
}
static const struct fbnic_tlv_index fbnic_ownership_resp_index[] = {
+ FBNIC_TLV_ATTR_U64(FBNIC_FW_OWNERSHIP_TIME),
FBNIC_TLV_ATTR_LAST
};
@@ -668,10 +677,14 @@ static int fbnic_fw_parse_ownership_resp(void *opaque,
/* Count the ownership response as a heartbeat reply */
fbd->last_heartbeat_response = jiffies;
+ /* Capture firmware time for logging and firmware crash check */
+ fbd->firmware_time = fta_get_uint(results, FBNIC_FW_OWNERSHIP_TIME);
+
return 0;
}
static const struct fbnic_tlv_index fbnic_heartbeat_resp_index[] = {
+ FBNIC_TLV_ATTR_U64(FBNIC_FW_HEARTBEAT_UPTIME),
FBNIC_TLV_ATTR_LAST
};
@@ -682,6 +695,9 @@ static int fbnic_fw_parse_heartbeat_resp(void *opaque,
fbd->last_heartbeat_response = jiffies;
+ /* Capture firmware time for logging and firmware crash check */
+ fbd->firmware_time = fta_get_uint(results, FBNIC_FW_HEARTBEAT_UPTIME);
+
return 0;
}
@@ -703,6 +719,7 @@ static int fbnic_fw_xmit_heartbeat_message(struct fbnic_dev *fbd)
goto free_message;
fbd->last_heartbeat_request = req_time;
+ fbd->prev_firmware_time = fbd->firmware_time;
return err;
@@ -763,7 +780,8 @@ void fbnic_fw_check_heartbeat(struct fbnic_dev *fbd)
return;
/* Was the last heartbeat response long time ago? */
- if (!fbnic_fw_heartbeat_current(fbd)) {
+ if (!fbnic_fw_heartbeat_current(fbd) ||
+ fbd->firmware_time < fbd->prev_firmware_time) {
dev_warn(fbd->dev,
"Firmware did not respond to heartbeat message\n");
fbd->fw_heartbeat_enabled = false;
@@ -775,6 +793,215 @@ void fbnic_fw_check_heartbeat(struct fbnic_dev *fbd)
dev_warn(fbd->dev, "Failed to send heartbeat message\n");
}
+/**
+ * fbnic_fw_xmit_coredump_info_msg - Create and transmit a coredump info message
+ * @fbd: FBNIC device structure
+ * @cmpl_data: Structure to store info in
+ * @force: Force coredump event if one hasn't already occurred
+ *
+ * Return: zero on success, negative errno on failure
+ *
+ * Asks the FW for info related to coredump. If a coredump doesn't exist it
+ * can optionally force one if force is true.
+ */
+int fbnic_fw_xmit_coredump_info_msg(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data,
+ bool force)
+{
+ struct fbnic_tlv_msg *msg;
+ int err = 0;
+
+ msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_COREDUMP_GET_INFO_REQ);
+ if (!msg)
+ return -ENOMEM;
+
+ if (force) {
+ err = fbnic_tlv_attr_put_flag(msg, FBNIC_FW_COREDUMP_REQ_INFO_CREATE);
+ if (err)
+ goto free_msg;
+ }
+
+ err = fbnic_mbx_map_req_w_cmpl(fbd, msg, cmpl_data);
+ if (err)
+ goto free_msg;
+
+ return 0;
+
+free_msg:
+ free_page((unsigned long)msg);
+ return err;
+}
+
+static const struct fbnic_tlv_index fbnic_coredump_info_resp_index[] = {
+ FBNIC_TLV_ATTR_FLAG(FBNIC_FW_COREDUMP_INFO_AVAILABLE),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_COREDUMP_INFO_SIZE),
+ FBNIC_TLV_ATTR_S32(FBNIC_FW_COREDUMP_INFO_ERROR),
+ FBNIC_TLV_ATTR_LAST
+};
+
+static int
+fbnic_fw_parse_coredump_info_resp(void *opaque, struct fbnic_tlv_msg **results)
+{
+ struct fbnic_fw_completion *cmpl_data;
+ struct fbnic_dev *fbd = opaque;
+ u32 msg_type;
+ s32 err;
+
+ /* Verify we have a completion pointer to provide with data */
+ msg_type = FBNIC_TLV_MSG_ID_COREDUMP_GET_INFO_RESP;
+ cmpl_data = fbnic_fw_get_cmpl_by_type(fbd, msg_type);
+ if (!cmpl_data)
+ return -ENOSPC;
+
+ err = fta_get_sint(results, FBNIC_FW_COREDUMP_INFO_ERROR);
+ if (err)
+ goto msg_err;
+
+ if (!results[FBNIC_FW_COREDUMP_INFO_AVAILABLE]) {
+ err = -ENOENT;
+ goto msg_err;
+ }
+
+ cmpl_data->u.coredump_info.size =
+ fta_get_uint(results, FBNIC_FW_COREDUMP_INFO_SIZE);
+
+msg_err:
+ cmpl_data->result = err;
+ complete(&cmpl_data->done);
+ fbnic_fw_put_cmpl(cmpl_data);
+
+ return err;
+}
+
+/**
+ * fbnic_fw_xmit_coredump_read_msg - Create and transmit a coredump read request
+ * @fbd: FBNIC device structure
+ * @cmpl_data: Completion struct to store coredump
+ * @offset: Offset into coredump requested
+ * @length: Length of section of cordeump to fetch
+ *
+ * Return: zero on success, negative errno on failure
+ *
+ * Asks the firmware to provide a section of the cordeump back in a message.
+ * The response will have an offset and size matching the values provided.
+ */
+int fbnic_fw_xmit_coredump_read_msg(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data,
+ u32 offset, u32 length)
+{
+ struct fbnic_tlv_msg *msg;
+ int err = 0;
+
+ msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_COREDUMP_READ_REQ);
+ if (!msg)
+ return -ENOMEM;
+
+ if (offset) {
+ err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_COREDUMP_READ_OFFSET,
+ offset);
+ if (err)
+ goto free_message;
+ }
+
+ if (length) {
+ err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_COREDUMP_READ_LENGTH,
+ length);
+ if (err)
+ goto free_message;
+ }
+
+ err = fbnic_mbx_map_req_w_cmpl(fbd, msg, cmpl_data);
+ if (err)
+ goto free_message;
+
+ return 0;
+
+free_message:
+ free_page((unsigned long)msg);
+ return err;
+}
+
+static const struct fbnic_tlv_index fbnic_coredump_resp_index[] = {
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_COREDUMP_READ_OFFSET),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_COREDUMP_READ_LENGTH),
+ FBNIC_TLV_ATTR_RAW_DATA(FBNIC_FW_COREDUMP_READ_DATA),
+ FBNIC_TLV_ATTR_S32(FBNIC_FW_COREDUMP_READ_ERROR),
+ FBNIC_TLV_ATTR_LAST
+};
+
+static int fbnic_fw_parse_coredump_resp(void *opaque,
+ struct fbnic_tlv_msg **results)
+{
+ struct fbnic_fw_completion *cmpl_data;
+ u32 index, last_offset, last_length;
+ struct fbnic_dev *fbd = opaque;
+ struct fbnic_tlv_msg *data_hdr;
+ u32 length, offset;
+ u32 msg_type;
+ s32 err;
+
+ /* Verify we have a completion pointer to provide with data */
+ msg_type = FBNIC_TLV_MSG_ID_COREDUMP_READ_RESP;
+ cmpl_data = fbnic_fw_get_cmpl_by_type(fbd, msg_type);
+ if (!cmpl_data)
+ return -ENOSPC;
+
+ err = fta_get_sint(results, FBNIC_FW_COREDUMP_READ_ERROR);
+ if (err)
+ goto msg_err;
+
+ data_hdr = results[FBNIC_FW_COREDUMP_READ_DATA];
+ if (!data_hdr) {
+ err = -ENODATA;
+ goto msg_err;
+ }
+
+ offset = fta_get_uint(results, FBNIC_FW_COREDUMP_READ_OFFSET);
+ length = fta_get_uint(results, FBNIC_FW_COREDUMP_READ_LENGTH);
+
+ if (length > le16_to_cpu(data_hdr->hdr.len) - sizeof(u32)) {
+ dev_err(fbd->dev, "length greater than size of message\n");
+ err = -EINVAL;
+ goto msg_err;
+ }
+
+ /* Only the last offset can have a length != stride */
+ last_length =
+ (cmpl_data->u.coredump.size % cmpl_data->u.coredump.stride) ? :
+ cmpl_data->u.coredump.stride;
+ last_offset = cmpl_data->u.coredump.size - last_length;
+
+ /* Verify offset and length */
+ if (offset % cmpl_data->u.coredump.stride || offset > last_offset) {
+ dev_err(fbd->dev, "offset %d out of range\n", offset);
+ err = -EINVAL;
+ } else if (length != ((offset == last_offset) ?
+ last_length : cmpl_data->u.coredump.stride)) {
+ dev_err(fbd->dev, "length %d out of range for offset %d\n",
+ length, offset);
+ err = -EINVAL;
+ }
+ if (err)
+ goto msg_err;
+
+ /* If data pointer is NULL it is already filled, just skip the copy */
+ index = offset / cmpl_data->u.coredump.stride;
+ if (!cmpl_data->u.coredump.data[index])
+ goto msg_err;
+
+ /* Copy data and mark index filled by setting pointer to NULL */
+ memcpy(cmpl_data->u.coredump.data[index],
+ fbnic_tlv_attr_get_value_ptr(data_hdr), length);
+ cmpl_data->u.coredump.data[index] = NULL;
+
+msg_err:
+ cmpl_data->result = err;
+ complete(&cmpl_data->done);
+ fbnic_fw_put_cmpl(cmpl_data);
+
+ return err;
+}
+
int fbnic_fw_xmit_fw_start_upgrade(struct fbnic_dev *fbd,
struct fbnic_fw_completion *cmpl_data,
unsigned int id, unsigned int len)
@@ -958,6 +1185,138 @@ static int fbnic_fw_parse_fw_finish_upgrade_req(void *opaque,
}
/**
+ * fbnic_fw_xmit_qsfp_read_msg - Transmit a QSFP read request
+ * @fbd: FBNIC device structure
+ * @cmpl_data: Structure to store EEPROM response in
+ * @page: Refers to page number on page enabled QSFP modules
+ * @bank: Refers to a collection of pages
+ * @offset: Offset into QSFP EEPROM requested
+ * @length: Length of section of QSFP EEPROM to fetch
+ *
+ * Return: zero on success, negative value on failure
+ *
+ * Asks the firmware to provide a section of the QSFP EEPROM back in a
+ * message. The response will have an offset and size matching the values
+ * provided.
+ */
+int fbnic_fw_xmit_qsfp_read_msg(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data,
+ u32 page, u32 bank, u32 offset, u32 length)
+{
+ struct fbnic_tlv_msg *msg;
+ int err = 0;
+
+ if (!length || length > TLV_MAX_DATA)
+ return -EINVAL;
+
+ msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_QSFP_READ_REQ);
+ if (!msg)
+ return -ENOMEM;
+
+ err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_QSFP_BANK, bank);
+ if (err)
+ goto free_message;
+
+ err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_QSFP_PAGE, page);
+ if (err)
+ goto free_message;
+
+ err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_QSFP_OFFSET, offset);
+ if (err)
+ goto free_message;
+
+ err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_QSFP_LENGTH, length);
+ if (err)
+ goto free_message;
+
+ err = fbnic_mbx_map_req_w_cmpl(fbd, msg, cmpl_data);
+ if (err)
+ goto free_message;
+
+ return 0;
+
+free_message:
+ free_page((unsigned long)msg);
+ return err;
+}
+
+static const struct fbnic_tlv_index fbnic_qsfp_read_resp_index[] = {
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_QSFP_BANK),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_QSFP_PAGE),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_QSFP_OFFSET),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_QSFP_LENGTH),
+ FBNIC_TLV_ATTR_RAW_DATA(FBNIC_FW_QSFP_DATA),
+ FBNIC_TLV_ATTR_S32(FBNIC_FW_QSFP_ERROR),
+ FBNIC_TLV_ATTR_LAST
+};
+
+static int fbnic_fw_parse_qsfp_read_resp(void *opaque,
+ struct fbnic_tlv_msg **results)
+{
+ struct fbnic_fw_completion *cmpl_data;
+ struct fbnic_dev *fbd = opaque;
+ struct fbnic_tlv_msg *data_hdr;
+ u32 length, offset, page, bank;
+ u8 *data;
+ s32 err;
+
+ /* Verify we have a completion pointer to provide with data */
+ cmpl_data = fbnic_fw_get_cmpl_by_type(fbd,
+ FBNIC_TLV_MSG_ID_QSFP_READ_RESP);
+ if (!cmpl_data)
+ return -ENOSPC;
+
+ bank = fta_get_uint(results, FBNIC_FW_QSFP_BANK);
+ if (bank != cmpl_data->u.qsfp.bank) {
+ dev_warn(fbd->dev, "bank not equal to bank requested: %d vs %d\n",
+ bank, cmpl_data->u.qsfp.bank);
+ err = -EINVAL;
+ goto msg_err;
+ }
+
+ page = fta_get_uint(results, FBNIC_FW_QSFP_PAGE);
+ if (page != cmpl_data->u.qsfp.page) {
+ dev_warn(fbd->dev, "page not equal to page requested: %d vs %d\n",
+ page, cmpl_data->u.qsfp.page);
+ err = -EINVAL;
+ goto msg_err;
+ }
+
+ offset = fta_get_uint(results, FBNIC_FW_QSFP_OFFSET);
+ length = fta_get_uint(results, FBNIC_FW_QSFP_LENGTH);
+
+ if (length != cmpl_data->u.qsfp.length ||
+ offset != cmpl_data->u.qsfp.offset) {
+ dev_warn(fbd->dev,
+ "offset/length not equal to size requested: %d/%d vs %d/%d\n",
+ offset, length,
+ cmpl_data->u.qsfp.offset, cmpl_data->u.qsfp.length);
+ err = -EINVAL;
+ goto msg_err;
+ }
+
+ err = fta_get_sint(results, FBNIC_FW_QSFP_ERROR);
+ if (err)
+ goto msg_err;
+
+ data_hdr = results[FBNIC_FW_QSFP_DATA];
+ if (!data_hdr) {
+ err = -ENODATA;
+ goto msg_err;
+ }
+
+ /* Copy data */
+ data = fbnic_tlv_attr_get_value_ptr(data_hdr);
+ memcpy(cmpl_data->u.qsfp.data, data, length);
+msg_err:
+ cmpl_data->result = err;
+ complete(&cmpl_data->done);
+ fbnic_fw_put_cmpl(cmpl_data);
+
+ return err;
+}
+
+/**
* fbnic_fw_xmit_tsene_read_msg - Create and transmit a sensor read request
* @fbd: FBNIC device structure
* @cmpl_data: Completion data structure to store sensor response
@@ -1204,6 +1563,11 @@ static const struct fbnic_tlv_parser fbnic_fw_tlv_parser[] = {
fbnic_fw_parse_ownership_resp),
FBNIC_TLV_PARSER(HEARTBEAT_RESP, fbnic_heartbeat_resp_index,
fbnic_fw_parse_heartbeat_resp),
+ FBNIC_TLV_PARSER(COREDUMP_GET_INFO_RESP,
+ fbnic_coredump_info_resp_index,
+ fbnic_fw_parse_coredump_info_resp),
+ FBNIC_TLV_PARSER(COREDUMP_READ_RESP, fbnic_coredump_resp_index,
+ fbnic_fw_parse_coredump_resp),
FBNIC_TLV_PARSER(FW_START_UPGRADE_RESP,
fbnic_fw_start_upgrade_resp_index,
fbnic_fw_parse_fw_start_upgrade_resp),
@@ -1213,6 +1577,9 @@ static const struct fbnic_tlv_parser fbnic_fw_tlv_parser[] = {
FBNIC_TLV_PARSER(FW_FINISH_UPGRADE_REQ,
fbnic_fw_finish_upgrade_req_index,
fbnic_fw_parse_fw_finish_upgrade_req),
+ FBNIC_TLV_PARSER(QSFP_READ_RESP,
+ fbnic_qsfp_read_resp_index,
+ fbnic_fw_parse_qsfp_read_resp),
FBNIC_TLV_PARSER(TSENE_READ_RESP,
fbnic_tsene_read_resp_index,
fbnic_fw_parse_tsene_read_resp),
@@ -1410,6 +1777,109 @@ void fbnic_mbx_flush_tx(struct fbnic_dev *fbd)
} while (time_is_after_jiffies(timeout));
}
+int fbnic_fw_xmit_rpc_macda_sync(struct fbnic_dev *fbd)
+{
+ struct fbnic_tlv_msg *mac_array;
+ int i, addr_count = 0, err;
+ struct fbnic_tlv_msg *msg;
+ u32 rx_flags = 0;
+
+ /* Nothing to do if there is no FW to sync with */
+ if (!fbd->mbx[FBNIC_IPC_MBX_TX_IDX].ready)
+ return 0;
+
+ msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_RPC_MAC_SYNC_REQ);
+ if (!msg)
+ return -ENOMEM;
+
+ mac_array = fbnic_tlv_attr_nest_start(msg,
+ FBNIC_FW_RPC_MAC_SYNC_UC_ARRAY);
+ if (!mac_array)
+ goto free_message_nospc;
+
+ /* Populate the unicast MAC addrs and capture PROMISC/ALLMULTI flags */
+ for (addr_count = 0, i = FBNIC_RPC_TCAM_MACDA_PROMISC_IDX;
+ i >= fbd->mac_addr_boundary; i--) {
+ struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i];
+
+ if (mac_addr->state != FBNIC_TCAM_S_VALID)
+ continue;
+ if (test_bit(FBNIC_MAC_ADDR_T_ALLMULTI, mac_addr->act_tcam))
+ rx_flags |= FW_RPC_MAC_SYNC_RX_FLAGS_ALLMULTI;
+ if (test_bit(FBNIC_MAC_ADDR_T_PROMISC, mac_addr->act_tcam))
+ rx_flags |= FW_RPC_MAC_SYNC_RX_FLAGS_PROMISC;
+ if (!test_bit(FBNIC_MAC_ADDR_T_UNICAST, mac_addr->act_tcam))
+ continue;
+ if (addr_count == FW_RPC_MAC_SYNC_UC_ARRAY_SIZE) {
+ rx_flags |= FW_RPC_MAC_SYNC_RX_FLAGS_PROMISC;
+ continue;
+ }
+
+ err = fbnic_tlv_attr_put_value(mac_array,
+ FBNIC_FW_RPC_MAC_SYNC_MAC_ADDR,
+ mac_addr->value.addr8,
+ ETH_ALEN);
+ if (err)
+ goto free_message;
+ addr_count++;
+ }
+
+ /* Close array */
+ fbnic_tlv_attr_nest_stop(msg);
+
+ mac_array = fbnic_tlv_attr_nest_start(msg,
+ FBNIC_FW_RPC_MAC_SYNC_MC_ARRAY);
+ if (!mac_array)
+ goto free_message_nospc;
+
+ /* Repeat for multicast addrs, record BROADCAST/ALLMULTI flags */
+ for (addr_count = 0, i = FBNIC_RPC_TCAM_MACDA_BROADCAST_IDX;
+ i < fbd->mac_addr_boundary; i++) {
+ struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i];
+
+ if (mac_addr->state != FBNIC_TCAM_S_VALID)
+ continue;
+ if (test_bit(FBNIC_MAC_ADDR_T_BROADCAST, mac_addr->act_tcam))
+ rx_flags |= FW_RPC_MAC_SYNC_RX_FLAGS_BROADCAST;
+ if (test_bit(FBNIC_MAC_ADDR_T_ALLMULTI, mac_addr->act_tcam))
+ rx_flags |= FW_RPC_MAC_SYNC_RX_FLAGS_ALLMULTI;
+ if (!test_bit(FBNIC_MAC_ADDR_T_MULTICAST, mac_addr->act_tcam))
+ continue;
+ if (addr_count == FW_RPC_MAC_SYNC_MC_ARRAY_SIZE) {
+ rx_flags |= FW_RPC_MAC_SYNC_RX_FLAGS_ALLMULTI;
+ continue;
+ }
+
+ err = fbnic_tlv_attr_put_value(mac_array,
+ FBNIC_FW_RPC_MAC_SYNC_MAC_ADDR,
+ mac_addr->value.addr8,
+ ETH_ALEN);
+ if (err)
+ goto free_message;
+ addr_count++;
+ }
+
+ /* Close array */
+ fbnic_tlv_attr_nest_stop(msg);
+
+ /* Report flags at end of list */
+ err = fbnic_tlv_attr_put_int(msg, FBNIC_FW_RPC_MAC_SYNC_RX_FLAGS,
+ rx_flags);
+ if (err)
+ goto free_message;
+
+ /* Send message of to FW notifying it of current RPC config */
+ err = fbnic_mbx_map_tlv_msg(fbd, msg);
+ if (err)
+ goto free_message;
+ return 0;
+free_message_nospc:
+ err = -ENOSPC;
+free_message:
+ free_page((unsigned long)msg);
+ return err;
+}
+
void fbnic_get_fw_ver_commit_str(struct fbnic_dev *fbd, char *fw_version,
const size_t str_sz)
{
@@ -1423,11 +1893,12 @@ void fbnic_get_fw_ver_commit_str(struct fbnic_dev *fbd, char *fw_version,
fw_version, str_sz);
}
-struct fbnic_fw_completion *fbnic_fw_alloc_cmpl(u32 msg_type)
+struct fbnic_fw_completion *__fbnic_fw_alloc_cmpl(u32 msg_type,
+ size_t priv_size)
{
struct fbnic_fw_completion *cmpl;
- cmpl = kzalloc(sizeof(*cmpl), GFP_KERNEL);
+ cmpl = kzalloc(sizeof(*cmpl) + priv_size, GFP_KERNEL);
if (!cmpl)
return NULL;
@@ -1438,6 +1909,11 @@ struct fbnic_fw_completion *fbnic_fw_alloc_cmpl(u32 msg_type)
return cmpl;
}
+struct fbnic_fw_completion *fbnic_fw_alloc_cmpl(u32 msg_type)
+{
+ return __fbnic_fw_alloc_cmpl(msg_type, 0);
+}
+
void fbnic_fw_put_cmpl(struct fbnic_fw_completion *fw_cmpl)
{
kref_put(&fw_cmpl->ref_count, fbnic_fw_release_cmpl_data);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
index fde331696fdd..1ecd777aaada 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
@@ -51,8 +51,10 @@ struct fbnic_fw_cap {
} stored;
u8 active_slot;
u8 bmc_mac_addr[4][ETH_ALEN];
- u8 bmc_present : 1;
- u8 all_multi : 1;
+ u8 bmc_present : 1;
+ u8 need_bmc_tcam_reinit : 1;
+ u8 need_bmc_macda_sync : 1;
+ u8 all_multi : 1;
u8 link_speed;
u8 link_fec;
u32 anti_rollback_version;
@@ -65,10 +67,25 @@ struct fbnic_fw_completion {
int result;
union {
struct {
+ u32 size;
+ } coredump_info;
+ struct {
+ u32 size;
+ u16 stride;
+ u8 *data[];
+ } coredump;
+ struct {
u32 offset;
u32 length;
} fw_update;
struct {
+ u16 length;
+ u8 offset;
+ u8 page;
+ u8 bank;
+ u8 data[] __aligned(sizeof(u32)) __counted_by(length);
+ } qsfp;
+ struct {
s32 millivolts;
s32 millidegrees;
} tsene;
@@ -87,16 +104,28 @@ void fbnic_mbx_flush_tx(struct fbnic_dev *fbd);
int fbnic_fw_xmit_ownership_msg(struct fbnic_dev *fbd, bool take_ownership);
int fbnic_fw_init_heartbeat(struct fbnic_dev *fbd, bool poll);
void fbnic_fw_check_heartbeat(struct fbnic_dev *fbd);
+int fbnic_fw_xmit_coredump_info_msg(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data,
+ bool force);
+int fbnic_fw_xmit_coredump_read_msg(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data,
+ u32 offset, u32 length);
int fbnic_fw_xmit_fw_start_upgrade(struct fbnic_dev *fbd,
struct fbnic_fw_completion *cmpl_data,
unsigned int id, unsigned int len);
int fbnic_fw_xmit_fw_write_chunk(struct fbnic_dev *fbd,
const u8 *data, u32 offset, u16 length,
int cancel_error);
+int fbnic_fw_xmit_qsfp_read_msg(struct fbnic_dev *fbd,
+ struct fbnic_fw_completion *cmpl_data,
+ u32 page, u32 bank, u32 offset, u32 length);
int fbnic_fw_xmit_tsene_read_msg(struct fbnic_dev *fbd,
struct fbnic_fw_completion *cmpl_data);
int fbnic_fw_xmit_send_logs(struct fbnic_dev *fbd, bool enable,
bool send_log_history);
+int fbnic_fw_xmit_rpc_macda_sync(struct fbnic_dev *fbd);
+struct fbnic_fw_completion *__fbnic_fw_alloc_cmpl(u32 msg_type,
+ size_t priv_size);
struct fbnic_fw_completion *fbnic_fw_alloc_cmpl(u32 msg_type);
void fbnic_fw_put_cmpl(struct fbnic_fw_completion *cmpl_data);
@@ -132,17 +161,24 @@ enum {
FBNIC_TLV_MSG_ID_OWNERSHIP_RESP = 0x13,
FBNIC_TLV_MSG_ID_HEARTBEAT_REQ = 0x14,
FBNIC_TLV_MSG_ID_HEARTBEAT_RESP = 0x15,
+ FBNIC_TLV_MSG_ID_COREDUMP_GET_INFO_REQ = 0x18,
+ FBNIC_TLV_MSG_ID_COREDUMP_GET_INFO_RESP = 0x19,
+ FBNIC_TLV_MSG_ID_COREDUMP_READ_REQ = 0x20,
+ FBNIC_TLV_MSG_ID_COREDUMP_READ_RESP = 0x21,
FBNIC_TLV_MSG_ID_FW_START_UPGRADE_REQ = 0x22,
FBNIC_TLV_MSG_ID_FW_START_UPGRADE_RESP = 0x23,
FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_REQ = 0x24,
FBNIC_TLV_MSG_ID_FW_WRITE_CHUNK_RESP = 0x25,
FBNIC_TLV_MSG_ID_FW_FINISH_UPGRADE_REQ = 0x28,
FBNIC_TLV_MSG_ID_FW_FINISH_UPGRADE_RESP = 0x29,
+ FBNIC_TLV_MSG_ID_QSFP_READ_REQ = 0x38,
+ FBNIC_TLV_MSG_ID_QSFP_READ_RESP = 0x39,
FBNIC_TLV_MSG_ID_TSENE_READ_REQ = 0x3C,
FBNIC_TLV_MSG_ID_TSENE_READ_RESP = 0x3D,
FBNIC_TLV_MSG_ID_LOG_SEND_LOGS_REQ = 0x43,
FBNIC_TLV_MSG_ID_LOG_MSG_REQ = 0x44,
FBNIC_TLV_MSG_ID_LOG_MSG_RESP = 0x45,
+ FBNIC_TLV_MSG_ID_RPC_MAC_SYNC_REQ = 0x46,
};
#define FBNIC_FW_CAP_RESP_VERSION_MAJOR CSR_GENMASK(31, 24)
@@ -186,6 +222,16 @@ enum {
};
enum {
+ FBNIC_FW_QSFP_BANK = 0x0,
+ FBNIC_FW_QSFP_PAGE = 0x1,
+ FBNIC_FW_QSFP_OFFSET = 0x2,
+ FBNIC_FW_QSFP_LENGTH = 0x3,
+ FBNIC_FW_QSFP_ERROR = 0x4,
+ FBNIC_FW_QSFP_DATA = 0x5,
+ FBNIC_FW_QSFP_MSG_MAX
+};
+
+enum {
FBNIC_FW_TSENE_THERM = 0x0,
FBNIC_FW_TSENE_VOLT = 0x1,
FBNIC_FW_TSENE_ERROR = 0x2,
@@ -194,10 +240,37 @@ enum {
enum {
FBNIC_FW_OWNERSHIP_FLAG = 0x0,
+ FBNIC_FW_OWNERSHIP_TIME = 0x1,
FBNIC_FW_OWNERSHIP_MSG_MAX
};
enum {
+ FBNIC_FW_HEARTBEAT_UPTIME = 0x0,
+ FBNIC_FW_HEARTBEAT_NUMBER_OF_MESSAGES = 0x1,
+ FBNIC_FW_HEARTBEAT_MSG_MAX
+};
+
+enum {
+ FBNIC_FW_COREDUMP_REQ_INFO_CREATE = 0x0,
+ FBNIC_FW_COREDUMP_REQ_INFO_MSG_MAX
+};
+
+enum {
+ FBNIC_FW_COREDUMP_INFO_AVAILABLE = 0x0,
+ FBNIC_FW_COREDUMP_INFO_SIZE = 0x1,
+ FBNIC_FW_COREDUMP_INFO_ERROR = 0x2,
+ FBNIC_FW_COREDUMP_INFO_MSG_MAX
+};
+
+enum {
+ FBNIC_FW_COREDUMP_READ_OFFSET = 0x0,
+ FBNIC_FW_COREDUMP_READ_LENGTH = 0x1,
+ FBNIC_FW_COREDUMP_READ_DATA = 0x2,
+ FBNIC_FW_COREDUMP_READ_ERROR = 0x3,
+ FBNIC_FW_COREDUMP_READ_MSG_MAX
+};
+
+enum {
FBNIC_FW_START_UPGRADE_ERROR = 0x0,
FBNIC_FW_START_UPGRADE_SECTION = 0x1,
FBNIC_FW_START_UPGRADE_IMAGE_LENGTH = 0x2,
@@ -235,4 +308,19 @@ enum {
FBNIC_FW_LOG_MSG_MAX
};
+enum {
+ FBNIC_FW_RPC_MAC_SYNC_RX_FLAGS = 0x0,
+ FBNIC_FW_RPC_MAC_SYNC_UC_ARRAY = 0x1,
+ FBNIC_FW_RPC_MAC_SYNC_MC_ARRAY = 0x2,
+ FBNIC_FW_RPC_MAC_SYNC_MAC_ADDR = 0x3,
+ FBNIC_FW_RPC_MAC_SYNC_MSG_MAX
+};
+
+#define FW_RPC_MAC_SYNC_RX_FLAGS_PROMISC 1
+#define FW_RPC_MAC_SYNC_RX_FLAGS_ALLMULTI 2
+#define FW_RPC_MAC_SYNC_RX_FLAGS_BROADCAST 4
+
+#define FW_RPC_MAC_SYNC_UC_ARRAY_SIZE 8
+#define FW_RPC_MAC_SYNC_MC_ARRAY_SIZE 8
+
#endif /* _FBNIC_FW_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw_log.c b/drivers/net/ethernet/meta/fbnic/fbnic_fw_log.c
index c1663f042245..85a883dba385 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_fw_log.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw_log.c
@@ -72,7 +72,7 @@ void fbnic_fw_log_free(struct fbnic_dev *fbd)
}
int fbnic_fw_log_write(struct fbnic_dev *fbd, u64 index, u32 timestamp,
- char *msg)
+ const char *msg)
{
struct fbnic_fw_log_entry *entry, *head, *tail, *next;
struct fbnic_fw_log *log = &fbd->fw_log;
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw_log.h b/drivers/net/ethernet/meta/fbnic/fbnic_fw_log.h
index cb6555f40a24..50ec79003108 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_fw_log.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw_log.h
@@ -41,5 +41,5 @@ void fbnic_fw_log_disable(struct fbnic_dev *fbd);
int fbnic_fw_log_init(struct fbnic_dev *fbd);
void fbnic_fw_log_free(struct fbnic_dev *fbd);
int fbnic_fw_log_write(struct fbnic_dev *fbd, u64 index, u32 timestamp,
- char *msg);
+ const char *msg);
#endif /* _FBNIC_FW_LOG_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c b/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c
index 4223d8100e64..8b9b2076beec 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+#include <linux/rtnetlink.h>
+
#include "fbnic.h"
static void fbnic_hw_stat_rst32(struct fbnic_dev *fbd, u32 reg,
@@ -421,9 +423,9 @@ static void fbnic_get_hw_rxq_stats32(struct fbnic_dev *fbd,
void fbnic_get_hw_q_stats(struct fbnic_dev *fbd,
struct fbnic_hw_q_stats *hw_q)
{
- spin_lock(&fbd->hw_stats_lock);
+ spin_lock(&fbd->hw_stats.lock);
fbnic_get_hw_rxq_stats32(fbd, hw_q);
- spin_unlock(&fbd->hw_stats_lock);
+ spin_unlock(&fbd->hw_stats.lock);
}
static void fbnic_reset_pcie_stats_asic(struct fbnic_dev *fbd,
@@ -510,20 +512,68 @@ static void fbnic_get_pcie_stats_asic64(struct fbnic_dev *fbd,
&pcie->ob_rd_no_np_cred);
}
+static void fbnic_reset_phy_stats(struct fbnic_dev *fbd,
+ struct fbnic_phy_stats *phy_stats)
+{
+ const struct fbnic_mac *mac = fbd->mac;
+
+ mac->get_fec_stats(fbd, true, &phy_stats->fec);
+ mac->get_pcs_stats(fbd, true, &phy_stats->pcs);
+}
+
+static void fbnic_get_phy_stats32(struct fbnic_dev *fbd,
+ struct fbnic_phy_stats *phy_stats)
+{
+ const struct fbnic_mac *mac = fbd->mac;
+
+ mac->get_fec_stats(fbd, false, &phy_stats->fec);
+ mac->get_pcs_stats(fbd, false, &phy_stats->pcs);
+}
+
+static void fbnic_reset_hw_mac_stats(struct fbnic_dev *fbd,
+ struct fbnic_mac_stats *mac_stats)
+{
+ const struct fbnic_mac *mac = fbd->mac;
+
+ mac->get_eth_mac_stats(fbd, true, &mac_stats->eth_mac);
+ mac->get_pause_stats(fbd, true, &mac_stats->pause);
+ mac->get_eth_ctrl_stats(fbd, true, &mac_stats->eth_ctrl);
+ mac->get_rmon_stats(fbd, true, &mac_stats->rmon);
+}
+
void fbnic_reset_hw_stats(struct fbnic_dev *fbd)
{
- spin_lock(&fbd->hw_stats_lock);
+ spin_lock(&fbd->hw_stats.lock);
+ fbnic_reset_phy_stats(fbd, &fbd->hw_stats.phy);
fbnic_reset_tmi_stats(fbd, &fbd->hw_stats.tmi);
fbnic_reset_tti_stats(fbd, &fbd->hw_stats.tti);
fbnic_reset_rpc_stats(fbd, &fbd->hw_stats.rpc);
fbnic_reset_rxb_stats(fbd, &fbd->hw_stats.rxb);
fbnic_reset_hw_rxq_stats(fbd, fbd->hw_stats.hw_q);
fbnic_reset_pcie_stats_asic(fbd, &fbd->hw_stats.pcie);
- spin_unlock(&fbd->hw_stats_lock);
+ spin_unlock(&fbd->hw_stats.lock);
+
+ /* Once registered, the only other access to MAC stats is via the
+ * ethtool API which is protected by the rtnl_lock. The call to
+ * fbnic_reset_hw_stats() during PCI recovery is also protected
+ * by the rtnl_lock hence, we don't need the spinlock to access
+ * the MAC stats.
+ */
+ if (fbd->netdev)
+ ASSERT_RTNL();
+ fbnic_reset_hw_mac_stats(fbd, &fbd->hw_stats.mac);
+}
+
+void fbnic_init_hw_stats(struct fbnic_dev *fbd)
+{
+ spin_lock_init(&fbd->hw_stats.lock);
+
+ fbnic_reset_hw_stats(fbd);
}
static void __fbnic_get_hw_stats32(struct fbnic_dev *fbd)
{
+ fbnic_get_phy_stats32(fbd, &fbd->hw_stats.phy);
fbnic_get_tmi_stats32(fbd, &fbd->hw_stats.tmi);
fbnic_get_tti_stats32(fbd, &fbd->hw_stats.tti);
fbnic_get_rpc_stats32(fbd, &fbd->hw_stats.rpc);
@@ -533,19 +583,19 @@ static void __fbnic_get_hw_stats32(struct fbnic_dev *fbd)
void fbnic_get_hw_stats32(struct fbnic_dev *fbd)
{
- spin_lock(&fbd->hw_stats_lock);
+ spin_lock(&fbd->hw_stats.lock);
__fbnic_get_hw_stats32(fbd);
- spin_unlock(&fbd->hw_stats_lock);
+ spin_unlock(&fbd->hw_stats.lock);
}
void fbnic_get_hw_stats(struct fbnic_dev *fbd)
{
- spin_lock(&fbd->hw_stats_lock);
+ spin_lock(&fbd->hw_stats.lock);
__fbnic_get_hw_stats32(fbd);
fbnic_get_tmi_stats(fbd, &fbd->hw_stats.tmi);
fbnic_get_tti_stats(fbd, &fbd->hw_stats.tti);
fbnic_get_rxb_stats(fbd, &fbd->hw_stats.rxb);
fbnic_get_pcie_stats_asic64(fbd, &fbd->hw_stats.pcie);
- spin_unlock(&fbd->hw_stats_lock);
+ spin_unlock(&fbd->hw_stats.lock);
}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h b/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h
index 4fe239717497..aa3f429a9aed 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h
@@ -5,6 +5,7 @@
#define _FBNIC_HW_STATS_H_
#include <linux/ethtool.h>
+#include <linux/spinlock.h>
#include "fbnic_csr.h"
@@ -22,6 +23,16 @@ struct fbnic_hw_stat {
struct fbnic_stat_counter bytes;
};
+struct fbnic_fec_stats {
+ struct fbnic_stat_counter corrected_blocks, uncorrectable_blocks;
+};
+
+struct fbnic_pcs_stats {
+ struct {
+ struct fbnic_stat_counter lanes[FBNIC_PCS_MAX_LANES];
+ } SymbolErrorDuringCarrier;
+};
+
/* Note: not updated by fbnic_get_hw_stats() */
struct fbnic_eth_ctrl_stats {
struct fbnic_stat_counter MACControlFramesTransmitted;
@@ -39,6 +50,12 @@ struct fbnic_rmon_stats {
struct fbnic_stat_counter hist_tx[ETHTOOL_RMON_HIST_MAX];
};
+/* Note: not updated by fbnic_get_hw_stats() */
+struct fbnic_pause_stats {
+ struct fbnic_stat_counter tx_pause_frames;
+ struct fbnic_stat_counter rx_pause_frames;
+};
+
struct fbnic_eth_mac_stats {
struct fbnic_stat_counter FramesTransmittedOK;
struct fbnic_stat_counter FramesReceivedOK;
@@ -55,8 +72,14 @@ struct fbnic_eth_mac_stats {
struct fbnic_stat_counter FrameTooLongErrors;
};
+struct fbnic_phy_stats {
+ struct fbnic_fec_stats fec;
+ struct fbnic_pcs_stats pcs;
+};
+
struct fbnic_mac_stats {
struct fbnic_eth_mac_stats eth_mac;
+ struct fbnic_pause_stats pause;
struct fbnic_eth_ctrl_stats eth_ctrl;
struct fbnic_rmon_stats rmon;
};
@@ -115,6 +138,7 @@ struct fbnic_pcie_stats {
};
struct fbnic_hw_stats {
+ struct fbnic_phy_stats phy;
struct fbnic_mac_stats mac;
struct fbnic_tmi_stats tmi;
struct fbnic_tti_stats tti;
@@ -122,11 +146,15 @@ struct fbnic_hw_stats {
struct fbnic_rxb_stats rxb;
struct fbnic_hw_q_stats hw_q[FBNIC_MAX_QUEUES];
struct fbnic_pcie_stats pcie;
+
+ /* Lock protecting the access to hw stats */
+ spinlock_t lock;
};
u64 fbnic_stat_rd64(struct fbnic_dev *fbd, u32 reg, u32 offset);
void fbnic_reset_hw_stats(struct fbnic_dev *fbd);
+void fbnic_init_hw_stats(struct fbnic_dev *fbd);
void fbnic_get_hw_q_stats(struct fbnic_dev *fbd,
struct fbnic_hw_q_stats *hw_q);
void fbnic_get_hw_stats32(struct fbnic_dev *fbd);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
index fd8d67f9048e..2a84bd1d7e26 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
@@ -83,8 +83,16 @@ static void fbnic_mac_init_axi(struct fbnic_dev *fbd)
static void fbnic_mac_init_qm(struct fbnic_dev *fbd)
{
+ u64 default_meta = FIELD_PREP(FBNIC_TWD_L2_HLEN_MASK, ETH_HLEN) |
+ FBNIC_TWD_FLAG_REQ_COMPLETION;
u32 clock_freq;
+ /* Configure default TWQ Metadata descriptor */
+ wr32(fbd, FBNIC_QM_TWQ_DEFAULT_META_L,
+ lower_32_bits(default_meta));
+ wr32(fbd, FBNIC_QM_TWQ_DEFAULT_META_H,
+ upper_32_bits(default_meta));
+
/* Configure TSO behavior */
wr32(fbd, FBNIC_QM_TQS_CTL0,
FIELD_PREP(FBNIC_QM_TQS_CTL0_LSO_TS_MASK,
@@ -632,6 +640,50 @@ static void fbnic_mac_link_up_asic(struct fbnic_dev *fbd,
}
static void
+fbnic_pcs_rsfec_stat_rd32(struct fbnic_dev *fbd, u32 reg, bool reset,
+ struct fbnic_stat_counter *stat)
+{
+ u32 pcs_rsfec_stat;
+
+ /* The PCS/RFSEC registers are only 16b wide each. So what we will
+ * have after the 64b read is 0x0000xxxx0000xxxx. To make it usable
+ * as a full stat we will shift the upper bits into the lower set of
+ * 0s and then mask off the math at 32b.
+ *
+ * Read ordering must be lower reg followed by upper reg.
+ */
+ pcs_rsfec_stat = rd32(fbd, reg) & 0xffff;
+ pcs_rsfec_stat |= rd32(fbd, reg + 1) << 16;
+
+ /* RFSEC registers clear themselves upon being read so there is no
+ * need to store the old_reg_value.
+ */
+ if (!reset)
+ stat->value += pcs_rsfec_stat;
+}
+
+static void
+fbnic_mac_get_fec_stats(struct fbnic_dev *fbd, bool reset,
+ struct fbnic_fec_stats *s)
+{
+ fbnic_pcs_rsfec_stat_rd32(fbd, FBNIC_RSFEC_CCW_LO(0), reset,
+ &s->corrected_blocks);
+ fbnic_pcs_rsfec_stat_rd32(fbd, FBNIC_RSFEC_NCCW_LO(0), reset,
+ &s->uncorrectable_blocks);
+}
+
+static void
+fbnic_mac_get_pcs_stats(struct fbnic_dev *fbd, bool reset,
+ struct fbnic_pcs_stats *s)
+{
+ int i;
+
+ for (i = 0; i < FBNIC_PCS_MAX_LANES; i++)
+ fbnic_pcs_rsfec_stat_rd32(fbd, FBNIC_PCS_SYMBLERR_LO(i), reset,
+ &s->SymbolErrorDuringCarrier.lanes[i]);
+}
+
+static void
fbnic_mac_get_eth_mac_stats(struct fbnic_dev *fbd, bool reset,
struct fbnic_eth_mac_stats *mac_stats)
{
@@ -666,6 +718,16 @@ fbnic_mac_get_eth_mac_stats(struct fbnic_dev *fbd, bool reset,
}
static void
+fbnic_mac_get_pause_stats(struct fbnic_dev *fbd, bool reset,
+ struct fbnic_pause_stats *pause_stats)
+{
+ fbnic_mac_stat_rd64(fbd, reset, pause_stats->tx_pause_frames,
+ MAC_STAT_TX_XOFF_STB);
+ fbnic_mac_stat_rd64(fbd, reset, pause_stats->rx_pause_frames,
+ MAC_STAT_RX_XOFF_STB);
+}
+
+static void
fbnic_mac_get_eth_ctrl_stats(struct fbnic_dev *fbd, bool reset,
struct fbnic_eth_ctrl_stats *ctrl_stats)
{
@@ -809,7 +871,10 @@ static const struct fbnic_mac fbnic_mac_asic = {
.pcs_disable = fbnic_pcs_disable_asic,
.pcs_get_link = fbnic_pcs_get_link_asic,
.pcs_get_link_event = fbnic_pcs_get_link_event_asic,
+ .get_fec_stats = fbnic_mac_get_fec_stats,
+ .get_pcs_stats = fbnic_mac_get_pcs_stats,
.get_eth_mac_stats = fbnic_mac_get_eth_mac_stats,
+ .get_pause_stats = fbnic_mac_get_pause_stats,
.get_eth_ctrl_stats = fbnic_mac_get_eth_ctrl_stats,
.get_rmon_stats = fbnic_mac_get_rmon_stats,
.link_down = fbnic_mac_link_down_asic,
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.h b/drivers/net/ethernet/meta/fbnic/fbnic_mac.h
index 86fa06da2b3e..ede5ff0dae22 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_mac.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.h
@@ -79,8 +79,14 @@ struct fbnic_mac {
bool (*pcs_get_link)(struct fbnic_dev *fbd);
int (*pcs_get_link_event)(struct fbnic_dev *fbd);
+ void (*get_fec_stats)(struct fbnic_dev *fbd, bool reset,
+ struct fbnic_fec_stats *fec_stats);
+ void (*get_pcs_stats)(struct fbnic_dev *fbd, bool reset,
+ struct fbnic_pcs_stats *pcs_stats);
void (*get_eth_mac_stats)(struct fbnic_dev *fbd, bool reset,
struct fbnic_eth_mac_stats *mac_stats);
+ void (*get_pause_stats)(struct fbnic_dev *fbd, bool reset,
+ struct fbnic_pause_stats *pause_stats);
void (*get_eth_ctrl_stats)(struct fbnic_dev *fbd, bool reset,
struct fbnic_eth_ctrl_stats *ctrl_stats);
void (*get_rmon_stats)(struct fbnic_dev *fbd, bool reset,
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
index 7bd7812d9c06..e95be0e7bd9e 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
@@ -33,7 +33,7 @@ int __fbnic_open(struct fbnic_net *fbn)
dev_warn(fbd->dev,
"Error %d sending host ownership message to the firmware\n",
err);
- goto free_resources;
+ goto err_reset_queues;
}
err = fbnic_time_start(fbn);
@@ -52,11 +52,15 @@ int __fbnic_open(struct fbnic_net *fbn)
fbnic_bmc_rpc_init(fbd);
fbnic_rss_reinit(fbd, fbn);
+ phylink_resume(fbn->phylink);
+
return 0;
time_stop:
fbnic_time_stop(fbn);
release_ownership:
fbnic_fw_xmit_ownership_msg(fbn->fbd, false);
+err_reset_queues:
+ fbnic_reset_netif_queues(fbn);
free_resources:
fbnic_free_resources(fbn);
free_napi_vectors:
@@ -82,6 +86,8 @@ static int fbnic_stop(struct net_device *netdev)
{
struct fbnic_net *fbn = netdev_priv(netdev);
+ phylink_suspend(fbn->phylink, fbnic_bmc_present(fbn->fbd));
+
fbnic_down(fbn);
fbnic_pcs_free_irq(fbn->fbd);
@@ -177,11 +183,10 @@ static int fbnic_mc_unsync(struct net_device *netdev, const unsigned char *addr)
return ret;
}
-void __fbnic_set_rx_mode(struct net_device *netdev)
+void __fbnic_set_rx_mode(struct fbnic_dev *fbd)
{
- struct fbnic_net *fbn = netdev_priv(netdev);
bool uc_promisc = false, mc_promisc = false;
- struct fbnic_dev *fbd = fbn->fbd;
+ struct net_device *netdev = fbd->netdev;
struct fbnic_mac_addr *mac_addr;
int err;
@@ -218,49 +223,8 @@ void __fbnic_set_rx_mode(struct net_device *netdev)
uc_promisc |= !!(netdev->flags & IFF_PROMISC);
mc_promisc |= !!(netdev->flags & IFF_ALLMULTI) || uc_promisc;
- /* Populate last TCAM entry with promiscuous entry and 0/1 bit mask */
- mac_addr = &fbd->mac_addr[FBNIC_RPC_TCAM_MACDA_PROMISC_IDX];
- if (uc_promisc) {
- if (!is_zero_ether_addr(mac_addr->value.addr8) ||
- mac_addr->state != FBNIC_TCAM_S_VALID) {
- eth_zero_addr(mac_addr->value.addr8);
- eth_broadcast_addr(mac_addr->mask.addr8);
- clear_bit(FBNIC_MAC_ADDR_T_ALLMULTI,
- mac_addr->act_tcam);
- set_bit(FBNIC_MAC_ADDR_T_PROMISC,
- mac_addr->act_tcam);
- mac_addr->state = FBNIC_TCAM_S_ADD;
- }
- } else if (mc_promisc &&
- (!fbnic_bmc_present(fbd) || !fbd->fw_cap.all_multi)) {
- /* We have to add a special handler for multicast as the
- * BMC may have an all-multi rule already in place. As such
- * adding a rule ourselves won't do any good so we will have
- * to modify the rules for the ALL MULTI below if the BMC
- * already has the rule in place.
- */
- if (!is_multicast_ether_addr(mac_addr->value.addr8) ||
- mac_addr->state != FBNIC_TCAM_S_VALID) {
- eth_zero_addr(mac_addr->value.addr8);
- eth_broadcast_addr(mac_addr->mask.addr8);
- mac_addr->value.addr8[0] ^= 1;
- mac_addr->mask.addr8[0] ^= 1;
- set_bit(FBNIC_MAC_ADDR_T_ALLMULTI,
- mac_addr->act_tcam);
- clear_bit(FBNIC_MAC_ADDR_T_PROMISC,
- mac_addr->act_tcam);
- mac_addr->state = FBNIC_TCAM_S_ADD;
- }
- } else if (mac_addr->state == FBNIC_TCAM_S_VALID) {
- if (test_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam)) {
- clear_bit(FBNIC_MAC_ADDR_T_ALLMULTI,
- mac_addr->act_tcam);
- clear_bit(FBNIC_MAC_ADDR_T_PROMISC,
- mac_addr->act_tcam);
- } else {
- mac_addr->state = FBNIC_TCAM_S_DELETE;
- }
- }
+ /* Update the promiscuous rules */
+ fbnic_promisc_sync(fbd, uc_promisc, mc_promisc);
/* Add rules for BMC all multicast if it is enabled */
fbnic_bmc_rpc_all_multi_config(fbd, mc_promisc);
@@ -276,9 +240,12 @@ void __fbnic_set_rx_mode(struct net_device *netdev)
static void fbnic_set_rx_mode(struct net_device *netdev)
{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_dev *fbd = fbn->fbd;
+
/* No need to update the hardware if we are not running */
if (netif_running(netdev))
- __fbnic_set_rx_mode(netdev);
+ __fbnic_set_rx_mode(fbd);
}
static int fbnic_set_mac(struct net_device *netdev, void *p)
@@ -295,10 +262,9 @@ static int fbnic_set_mac(struct net_device *netdev, void *p)
return 0;
}
-void fbnic_clear_rx_mode(struct net_device *netdev)
+void fbnic_clear_rx_mode(struct fbnic_dev *fbd)
{
- struct fbnic_net *fbn = netdev_priv(netdev);
- struct fbnic_dev *fbd = fbn->fbd;
+ struct net_device *netdev = fbd->netdev;
int idx;
for (idx = ARRAY_SIZE(fbd->mac_addr); idx--;) {
@@ -405,11 +371,12 @@ static void fbnic_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats64)
{
u64 rx_bytes, rx_packets, rx_dropped = 0, rx_errors = 0;
+ u64 rx_over = 0, rx_missed = 0, rx_length = 0;
u64 tx_bytes, tx_packets, tx_dropped = 0;
struct fbnic_net *fbn = netdev_priv(dev);
struct fbnic_dev *fbd = fbn->fbd;
struct fbnic_queue_stats *stats;
- u64 rx_over = 0, rx_missed = 0;
+
unsigned int start, i;
fbnic_get_hw_stats(fbd);
@@ -420,15 +387,17 @@ static void fbnic_get_stats64(struct net_device *dev,
tx_packets = stats->packets;
tx_dropped = stats->dropped;
- stats64->tx_bytes = tx_bytes;
- stats64->tx_packets = tx_packets;
- stats64->tx_dropped = tx_dropped;
-
/* Record drops from Tx HW Datapath */
+ spin_lock(&fbd->hw_stats.lock);
tx_dropped += fbd->hw_stats.tmi.drop.frames.value +
fbd->hw_stats.tti.cm_drop.frames.value +
fbd->hw_stats.tti.frame_drop.frames.value +
fbd->hw_stats.tti.tbi_drop.frames.value;
+ spin_unlock(&fbd->hw_stats.lock);
+
+ stats64->tx_bytes = tx_bytes;
+ stats64->tx_packets = tx_packets;
+ stats64->tx_dropped = tx_dropped;
for (i = 0; i < fbn->num_tx_queues; i++) {
struct fbnic_ring *txr = fbn->tx[i];
@@ -455,7 +424,7 @@ static void fbnic_get_stats64(struct net_device *dev,
rx_packets = stats->packets;
rx_dropped = stats->dropped;
- spin_lock(&fbd->hw_stats_lock);
+ spin_lock(&fbd->hw_stats.lock);
/* Record drops for the host FIFOs.
* 4: network to Host, 6: BMC to Host
* Exclude the BMC and MC FIFOs as those stats may contain drops
@@ -475,7 +444,7 @@ static void fbnic_get_stats64(struct net_device *dev,
/* Report packets with errors */
rx_errors += fbd->hw_stats.hw_q[i].rde_pkt_err.value;
}
- spin_unlock(&fbd->hw_stats_lock);
+ spin_unlock(&fbd->hw_stats.lock);
stats64->rx_bytes = rx_bytes;
stats64->rx_packets = rx_packets;
@@ -485,6 +454,7 @@ static void fbnic_get_stats64(struct net_device *dev,
stats64->rx_missed_errors = rx_missed;
for (i = 0; i < fbn->num_rx_queues; i++) {
+ struct fbnic_ring *xdpr = fbn->tx[FBNIC_MAX_TXQS + i];
struct fbnic_ring *rxr = fbn->rx[i];
if (!rxr)
@@ -496,12 +466,64 @@ static void fbnic_get_stats64(struct net_device *dev,
rx_bytes = stats->bytes;
rx_packets = stats->packets;
rx_dropped = stats->dropped;
+ rx_length = stats->rx.length_errors;
} while (u64_stats_fetch_retry(&stats->syncp, start));
stats64->rx_bytes += rx_bytes;
stats64->rx_packets += rx_packets;
stats64->rx_dropped += rx_dropped;
+ stats64->rx_errors += rx_length;
+ stats64->rx_length_errors += rx_length;
+
+ if (!xdpr)
+ continue;
+
+ stats = &xdpr->stats;
+ do {
+ start = u64_stats_fetch_begin(&stats->syncp);
+ tx_bytes = stats->bytes;
+ tx_packets = stats->packets;
+ tx_dropped = stats->dropped;
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
+
+ stats64->tx_bytes += tx_bytes;
+ stats64->tx_packets += tx_packets;
+ stats64->tx_dropped += tx_dropped;
+ }
+}
+
+bool fbnic_check_split_frames(struct bpf_prog *prog, unsigned int mtu,
+ u32 hds_thresh)
+{
+ if (!prog)
+ return false;
+
+ if (prog->aux->xdp_has_frags)
+ return false;
+
+ return mtu + ETH_HLEN > hds_thresh;
+}
+
+static int fbnic_bpf(struct net_device *netdev, struct netdev_bpf *bpf)
+{
+ struct bpf_prog *prog = bpf->prog, *prev_prog;
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ if (bpf->command != XDP_SETUP_PROG)
+ return -EINVAL;
+
+ if (fbnic_check_split_frames(prog, netdev->mtu,
+ fbn->hds_thresh)) {
+ NL_SET_ERR_MSG_MOD(bpf->extack,
+ "MTU too high, or HDS threshold is too low for single buffer XDP");
+ return -EOPNOTSUPP;
}
+
+ prev_prog = xchg(&fbn->xdp_prog, prog);
+ if (prev_prog)
+ bpf_prog_put(prev_prog);
+
+ return 0;
}
static const struct net_device_ops fbnic_netdev_ops = {
@@ -513,6 +535,7 @@ static const struct net_device_ops fbnic_netdev_ops = {
.ndo_set_mac_address = fbnic_set_mac,
.ndo_set_rx_mode = fbnic_set_rx_mode,
.ndo_get_stats64 = fbnic_get_stats64,
+ .ndo_bpf = fbnic_bpf,
.ndo_hwtstamp_get = fbnic_hwtstamp_get,
.ndo_hwtstamp_set = fbnic_hwtstamp_set,
};
@@ -520,17 +543,21 @@ static const struct net_device_ops fbnic_netdev_ops = {
static void fbnic_get_queue_stats_rx(struct net_device *dev, int idx,
struct netdev_queue_stats_rx *rx)
{
+ u64 bytes, packets, alloc_fail, alloc_fail_bdq;
struct fbnic_net *fbn = netdev_priv(dev);
struct fbnic_ring *rxr = fbn->rx[idx];
struct fbnic_dev *fbd = fbn->fbd;
struct fbnic_queue_stats *stats;
- u64 bytes, packets, alloc_fail;
u64 csum_complete, csum_none;
+ struct fbnic_q_triad *qt;
unsigned int start;
if (!rxr)
return;
+ /* fbn->rx points to completion queues */
+ qt = container_of(rxr, struct fbnic_q_triad, cmpl);
+
stats = &rxr->stats;
do {
start = u64_stats_fetch_begin(&stats->syncp);
@@ -541,6 +568,20 @@ static void fbnic_get_queue_stats_rx(struct net_device *dev, int idx,
csum_none = stats->rx.csum_none;
} while (u64_stats_fetch_retry(&stats->syncp, start));
+ stats = &qt->sub0.stats;
+ do {
+ start = u64_stats_fetch_begin(&stats->syncp);
+ alloc_fail_bdq = stats->bdq.alloc_failed;
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
+ alloc_fail += alloc_fail_bdq;
+
+ stats = &qt->sub1.stats;
+ do {
+ start = u64_stats_fetch_begin(&stats->syncp);
+ alloc_fail_bdq = stats->bdq.alloc_failed;
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
+ alloc_fail += alloc_fail_bdq;
+
rx->bytes = bytes;
rx->packets = packets;
rx->alloc_fail = alloc_fail;
@@ -549,12 +590,12 @@ static void fbnic_get_queue_stats_rx(struct net_device *dev, int idx,
fbnic_get_hw_q_stats(fbd, fbd->hw_stats.hw_q);
- spin_lock(&fbd->hw_stats_lock);
+ spin_lock(&fbd->hw_stats.lock);
rx->hw_drop_overruns = fbd->hw_stats.hw_q[idx].rde_pkt_cq_drop.value +
fbd->hw_stats.hw_q[idx].rde_pkt_bdq_drop.value;
rx->hw_drops = fbd->hw_stats.hw_q[idx].rde_pkt_err.value +
rx->hw_drop_overruns;
- spin_unlock(&fbd->hw_stats_lock);
+ spin_unlock(&fbd->hw_stats.lock);
}
static void fbnic_get_queue_stats_tx(struct net_device *dev, int idx,
@@ -564,6 +605,7 @@ static void fbnic_get_queue_stats_tx(struct net_device *dev, int idx,
struct fbnic_ring *txr = fbn->tx[idx];
struct fbnic_queue_stats *stats;
u64 stop, wake, csum, lso;
+ struct fbnic_ring *xdpr;
unsigned int start;
u64 bytes, packets;
@@ -587,6 +629,19 @@ static void fbnic_get_queue_stats_tx(struct net_device *dev, int idx,
tx->hw_gso_wire_packets = lso;
tx->stop = stop;
tx->wake = wake;
+
+ xdpr = fbn->tx[FBNIC_MAX_TXQS + idx];
+ if (xdpr) {
+ stats = &xdpr->stats;
+ do {
+ start = u64_stats_fetch_begin(&stats->syncp);
+ bytes = stats->bytes;
+ packets = stats->packets;
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
+
+ tx->bytes += bytes;
+ tx->packets += packets;
+ }
}
static void fbnic_get_base_stats(struct net_device *dev,
@@ -604,7 +659,8 @@ static void fbnic_get_base_stats(struct net_device *dev,
rx->bytes = fbn->rx_stats.bytes;
rx->packets = fbn->rx_stats.packets;
- rx->alloc_fail = fbn->rx_stats.rx.alloc_failed;
+ rx->alloc_fail = fbn->rx_stats.rx.alloc_failed +
+ fbn->bdq_stats.bdq.alloc_failed;
rx->csum_complete = fbn->rx_stats.rx.csum_complete;
rx->csum_none = fbn->rx_stats.rx.csum_none;
}
@@ -674,6 +730,8 @@ struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd)
netdev->netdev_ops = &fbnic_netdev_ops;
netdev->stat_ops = &fbnic_stat_ops;
+ netdev->queue_mgmt_ops = &fbnic_queue_mgmt_ops;
+ netdev->netmem_tx = true;
fbnic_set_ethtool_ops(netdev);
@@ -691,6 +749,10 @@ struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd)
fbn->rx_usecs = FBNIC_RX_USECS_DEFAULT;
fbn->rx_max_frames = FBNIC_RX_FRAMES_DEFAULT;
+ /* Initialize the hds_thresh */
+ netdev->cfg->hds_thresh = FBNIC_HDS_THRESH_DEFAULT;
+ fbn->hds_thresh = FBNIC_HDS_THRESH_DEFAULT;
+
default_queues = netif_get_num_default_rss_queues();
if (default_queues > fbd->max_num_queues)
default_queues = fbd->max_num_queues;
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
index 86576ae04262..b0a87c57910f 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
@@ -18,7 +18,9 @@
#define FBNIC_TUN_GSO_FEATURES NETIF_F_GSO_IPXIP6
struct fbnic_net {
- struct fbnic_ring *tx[FBNIC_MAX_TXQS];
+ struct bpf_prog *xdp_prog;
+
+ struct fbnic_ring *tx[FBNIC_MAX_TXQS + FBNIC_MAX_XDPQS];
struct fbnic_ring *rx[FBNIC_MAX_RXQS];
struct fbnic_napi_vector *napi[FBNIC_MAX_NAPI_VECTORS];
@@ -31,6 +33,8 @@ struct fbnic_net {
u32 ppq_size;
u32 rcq_size;
+ u32 hds_thresh;
+
u16 rx_usecs;
u16 tx_usecs;
@@ -64,6 +68,7 @@ struct fbnic_net {
/* Storage for stats after ring destruction */
struct fbnic_queue_stats tx_stats;
struct fbnic_queue_stats rx_stats;
+ struct fbnic_queue_stats bdq_stats;
u64 link_down_events;
/* Time stamping filter config */
@@ -90,8 +95,8 @@ void fbnic_time_init(struct fbnic_net *fbn);
int fbnic_time_start(struct fbnic_net *fbn);
void fbnic_time_stop(struct fbnic_net *fbn);
-void __fbnic_set_rx_mode(struct net_device *netdev);
-void fbnic_clear_rx_mode(struct net_device *netdev);
+void __fbnic_set_rx_mode(struct fbnic_dev *fbd);
+void fbnic_clear_rx_mode(struct fbnic_dev *fbd);
void fbnic_phylink_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause);
@@ -102,4 +107,7 @@ int fbnic_phylink_ethtool_ksettings_get(struct net_device *netdev,
int fbnic_phylink_get_fecparam(struct net_device *netdev,
struct ethtool_fecparam *fecparam);
int fbnic_phylink_init(struct net_device *netdev);
+
+bool fbnic_check_split_frames(struct bpf_prog *prog,
+ unsigned int mtu, u32 hds_threshold);
#endif /* _FBNIC_NETDEV_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
index b70e4cadb37b..a7a6b4db8016 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
@@ -118,14 +118,12 @@ static void fbnic_service_task_start(struct fbnic_net *fbn)
struct fbnic_dev *fbd = fbn->fbd;
schedule_delayed_work(&fbd->service_task, HZ);
- phylink_resume(fbn->phylink);
}
static void fbnic_service_task_stop(struct fbnic_net *fbn)
{
struct fbnic_dev *fbd = fbn->fbd;
- phylink_suspend(fbn->phylink, fbnic_bmc_present(fbd));
cancel_delayed_work(&fbd->service_task);
}
@@ -137,7 +135,7 @@ void fbnic_up(struct fbnic_net *fbn)
fbnic_rss_reinit_hw(fbn->fbd, fbn);
- __fbnic_set_rx_mode(fbn->netdev);
+ __fbnic_set_rx_mode(fbn->fbd);
/* Enable Tx/Rx processing */
fbnic_napi_enable(fbn);
@@ -154,7 +152,7 @@ void fbnic_down_noidle(struct fbnic_net *fbn)
fbnic_napi_disable(fbn);
netif_tx_disable(fbn->netdev);
- fbnic_clear_rx_mode(fbn->netdev);
+ fbnic_clear_rx_mode(fbn->fbd);
fbnic_clear_rules(fbn->fbd);
fbnic_rss_disable_hw(fbn->fbd);
fbnic_disable(fbn);
@@ -169,6 +167,20 @@ void fbnic_down(struct fbnic_net *fbn)
fbnic_flush(fbn);
}
+static int fbnic_fw_config_after_crash(struct fbnic_dev *fbd)
+{
+ if (fbnic_fw_xmit_ownership_msg(fbd, true)) {
+ dev_err(fbd->dev, "NIC failed to take ownership\n");
+
+ return -1;
+ }
+
+ fbnic_rpc_reset_valid_entries(fbd);
+ __fbnic_set_rx_mode(fbd);
+
+ return 0;
+}
+
static void fbnic_health_check(struct fbnic_dev *fbd)
{
struct fbnic_fw_mbx *tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
@@ -184,13 +196,11 @@ static void fbnic_health_check(struct fbnic_dev *fbd)
if (tx_mbx->head != tx_mbx->tail)
return;
- /* TBD: Need to add a more thorough recovery here.
- * Specifically I need to verify what all the firmware will have
- * changed since we had setup and it rebooted. May just need to
- * perform a down/up. For now we will just reclaim ownership so
- * the heartbeat can catch the next fault.
- */
- fbnic_fw_xmit_ownership_msg(fbd, true);
+ fbnic_devlink_fw_report(fbd, "Firmware crashed detected!");
+ fbnic_devlink_otp_check(fbd, "error detected after firmware recovery");
+
+ if (fbnic_fw_config_after_crash(fbd))
+ dev_err(fbd->dev, "Firmware recovery failed after crash\n");
}
static void fbnic_service_task(struct work_struct *work)
@@ -206,8 +216,13 @@ static void fbnic_service_task(struct work_struct *work)
fbnic_health_check(fbd);
- if (netif_carrier_ok(fbd->netdev))
+ fbnic_bmc_rpc_check(fbd);
+
+ if (netif_carrier_ok(fbd->netdev)) {
+ netdev_lock(fbd->netdev);
fbnic_napi_depletion_check(fbd->netdev);
+ netdev_unlock(fbd->netdev);
+ }
if (netif_running(fbd->netdev))
schedule_delayed_work(&fbd->service_task, HZ);
@@ -266,6 +281,10 @@ static int fbnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENOMEM;
}
+ err = fbnic_devlink_health_create(fbd);
+ if (err)
+ goto free_fbd;
+
/* Populate driver with hardware-specific info and handlers */
fbd->max_num_queues = info->max_num_queues;
@@ -276,7 +295,7 @@ static int fbnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = fbnic_alloc_irqs(fbd);
if (err)
- goto free_fbd;
+ goto err_destroy_health;
err = fbnic_mac_init(fbd);
if (err) {
@@ -303,11 +322,11 @@ static int fbnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err);
fbnic_devlink_register(fbd);
+ fbnic_devlink_otp_check(fbd, "error detected during probe");
fbnic_dbg_fbd_init(fbd);
- spin_lock_init(&fbd->hw_stats_lock);
/* Capture snapshot of hardware stats so netdev can calculate delta */
- fbnic_reset_hw_stats(fbd);
+ fbnic_init_hw_stats(fbd);
fbnic_hwmon_register(fbd);
@@ -346,6 +365,8 @@ init_failure_mode:
return 0;
free_irqs:
fbnic_free_irqs(fbd);
+err_destroy_health:
+ fbnic_devlink_health_destroy(fbd);
free_fbd:
fbnic_devlink_free(fbd);
@@ -380,6 +401,7 @@ static void fbnic_remove(struct pci_dev *pdev)
fbnic_fw_free_mbx(fbd);
fbnic_free_irqs(fbd);
+ fbnic_devlink_health_destroy(fbd);
fbnic_devlink_free(fbd);
}
@@ -392,12 +414,14 @@ static int fbnic_pm_suspend(struct device *dev)
goto null_uc_addr;
rtnl_lock();
+ netdev_lock(netdev);
netif_device_detach(netdev);
if (netif_running(netdev))
netdev->netdev_ops->ndo_stop(netdev);
+ netdev_unlock(netdev);
rtnl_unlock();
null_uc_addr:
@@ -443,16 +467,18 @@ static int __fbnic_pm_resume(struct device *dev)
/* Re-enable mailbox */
err = fbnic_fw_request_mbx(fbd);
+ devl_unlock(priv_to_devlink(fbd));
if (err)
goto err_free_irqs;
- devl_unlock(priv_to_devlink(fbd));
-
/* Only send log history if log buffer is empty to prevent duplicate
* log entries.
*/
fbnic_fw_log_enable(fbd, list_empty(&fbd->fw_log.entries));
+ /* Since the FW should be up, check if it reported OTP errors */
+ fbnic_devlink_otp_check(fbd, "error detected after PM resume");
+
/* No netdev means there isn't a network interface to bring up */
if (fbnic_init_failure(fbd))
return 0;
@@ -463,21 +489,23 @@ static int __fbnic_pm_resume(struct device *dev)
fbnic_reset_queues(fbn, fbn->num_tx_queues, fbn->num_rx_queues);
rtnl_lock();
+ netdev_lock(netdev);
- if (netif_running(netdev)) {
+ if (netif_running(netdev))
err = __fbnic_open(fbn);
- if (err)
- goto err_free_mbx;
- }
+ netdev_unlock(netdev);
rtnl_unlock();
+ if (err)
+ goto err_free_mbx;
return 0;
err_free_mbx:
fbnic_fw_log_disable(fbd);
- rtnl_unlock();
+ devl_lock(priv_to_devlink(fbd));
fbnic_fw_free_mbx(fbd);
+ devl_unlock(priv_to_devlink(fbd));
err_free_irqs:
fbnic_free_irqs(fbd);
err_invalidate_uc_addr:
@@ -492,6 +520,10 @@ static void __fbnic_pm_attach(struct device *dev)
struct net_device *netdev = fbd->netdev;
struct fbnic_net *fbn;
+ rtnl_lock();
+ fbnic_reset_hw_stats(fbd);
+ rtnl_unlock();
+
if (fbnic_init_failure(fbd))
return;
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c
index 8ff07b5562e3..7f31e890031c 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c
@@ -6,6 +6,7 @@
#include <net/ipv6.h>
#include "fbnic.h"
+#include "fbnic_fw.h"
#include "fbnic_netdev.h"
#include "fbnic_rpc.h"
@@ -71,6 +72,8 @@ u16 fbnic_flow_hash_2_rss_en_mask(struct fbnic_net *fbn, int flow_type)
rss_en_mask |= FBNIC_FH_2_RSSEM_BIT(IP_DST, IP_DST, flow_hash);
rss_en_mask |= FBNIC_FH_2_RSSEM_BIT(L4_B_0_1, L4_SRC, flow_hash);
rss_en_mask |= FBNIC_FH_2_RSSEM_BIT(L4_B_2_3, L4_DST, flow_hash);
+ rss_en_mask |= FBNIC_FH_2_RSSEM_BIT(IP6_FL, OV6_FL_LBL, flow_hash);
+ rss_en_mask |= FBNIC_FH_2_RSSEM_BIT(IP6_FL, IV6_FL_LBL, flow_hash);
return rss_en_mask;
}
@@ -129,12 +132,9 @@ void fbnic_bmc_rpc_all_multi_config(struct fbnic_dev *fbd,
else
clear_bit(FBNIC_MAC_ADDR_T_ALLMULTI,
mac_addr->act_tcam);
- } else if (!test_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam) &&
- !is_zero_ether_addr(mac_addr->mask.addr8) &&
- mac_addr->state == FBNIC_TCAM_S_VALID) {
- clear_bit(FBNIC_MAC_ADDR_T_ALLMULTI, mac_addr->act_tcam);
- clear_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam);
- mac_addr->state = FBNIC_TCAM_S_DELETE;
+ } else {
+ __fbnic_xc_unsync(mac_addr, FBNIC_MAC_ADDR_T_BMC);
+ __fbnic_xc_unsync(mac_addr, FBNIC_MAC_ADDR_T_ALLMULTI);
}
/* We have to add a special handler for multicast as the
@@ -236,8 +236,25 @@ void fbnic_bmc_rpc_init(struct fbnic_dev *fbd)
act_tcam->mask.tcam[j] = 0xffff;
act_tcam->state = FBNIC_TCAM_S_UPDATE;
+}
+
+void fbnic_bmc_rpc_check(struct fbnic_dev *fbd)
+{
+ int err;
+
+ if (fbd->fw_cap.need_bmc_tcam_reinit) {
+ fbnic_bmc_rpc_init(fbd);
+ __fbnic_set_rx_mode(fbd);
+ fbd->fw_cap.need_bmc_tcam_reinit = false;
+ }
- fbnic_bmc_rpc_all_multi_config(fbd, false);
+ if (fbd->fw_cap.need_bmc_macda_sync) {
+ err = fbnic_fw_xmit_rpc_macda_sync(fbd);
+ if (err)
+ dev_warn(fbd->dev,
+ "Writing MACDA table to FW failed, err: %d\n", err);
+ fbd->fw_cap.need_bmc_macda_sync = false;
+ }
}
#define FBNIC_ACT1_INIT(_l4, _udp, _ip, _v6) \
@@ -452,6 +469,50 @@ int __fbnic_xc_unsync(struct fbnic_mac_addr *mac_addr, unsigned int tcam_idx)
return 0;
}
+void fbnic_promisc_sync(struct fbnic_dev *fbd,
+ bool uc_promisc, bool mc_promisc)
+{
+ struct fbnic_mac_addr *mac_addr;
+
+ /* Populate last TCAM entry with promiscuous entry and 0/1 bit mask */
+ mac_addr = &fbd->mac_addr[FBNIC_RPC_TCAM_MACDA_PROMISC_IDX];
+ if (uc_promisc) {
+ if (!is_zero_ether_addr(mac_addr->value.addr8) ||
+ mac_addr->state != FBNIC_TCAM_S_VALID) {
+ eth_zero_addr(mac_addr->value.addr8);
+ eth_broadcast_addr(mac_addr->mask.addr8);
+ clear_bit(FBNIC_MAC_ADDR_T_ALLMULTI,
+ mac_addr->act_tcam);
+ set_bit(FBNIC_MAC_ADDR_T_PROMISC,
+ mac_addr->act_tcam);
+ mac_addr->state = FBNIC_TCAM_S_ADD;
+ }
+ } else if (mc_promisc &&
+ (!fbnic_bmc_present(fbd) || !fbd->fw_cap.all_multi)) {
+ /* We have to add a special handler for multicast as the
+ * BMC may have an all-multi rule already in place. As such
+ * adding a rule ourselves won't do any good so we will have
+ * to modify the rules for the ALL MULTI below if the BMC
+ * already has the rule in place.
+ */
+ if (!is_multicast_ether_addr(mac_addr->value.addr8) ||
+ mac_addr->state != FBNIC_TCAM_S_VALID) {
+ eth_zero_addr(mac_addr->value.addr8);
+ eth_broadcast_addr(mac_addr->mask.addr8);
+ mac_addr->value.addr8[0] ^= 1;
+ mac_addr->mask.addr8[0] ^= 1;
+ set_bit(FBNIC_MAC_ADDR_T_ALLMULTI,
+ mac_addr->act_tcam);
+ clear_bit(FBNIC_MAC_ADDR_T_PROMISC,
+ mac_addr->act_tcam);
+ mac_addr->state = FBNIC_TCAM_S_ADD;
+ }
+ } else if (mac_addr->state == FBNIC_TCAM_S_VALID) {
+ __fbnic_xc_unsync(mac_addr, FBNIC_MAC_ADDR_T_ALLMULTI);
+ __fbnic_xc_unsync(mac_addr, FBNIC_MAC_ADDR_T_PROMISC);
+ }
+}
+
void fbnic_sift_macda(struct fbnic_dev *fbd)
{
int dest, src;
@@ -535,6 +596,21 @@ static void fbnic_clear_macda(struct fbnic_dev *fbd)
}
}
+static void fbnic_clear_valid_macda(struct fbnic_dev *fbd)
+{
+ int idx;
+
+ for (idx = ARRAY_SIZE(fbd->mac_addr); idx--;) {
+ struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[idx];
+
+ if (mac_addr->state == FBNIC_TCAM_S_VALID) {
+ fbnic_clear_macda_entry(fbd, idx);
+
+ mac_addr->state = FBNIC_TCAM_S_UPDATE;
+ }
+ }
+}
+
static void fbnic_write_macda_entry(struct fbnic_dev *fbd, unsigned int idx,
struct fbnic_mac_addr *mac_addr)
{
@@ -556,7 +632,7 @@ static void fbnic_write_macda_entry(struct fbnic_dev *fbd, unsigned int idx,
void fbnic_write_macda(struct fbnic_dev *fbd)
{
- int idx;
+ int idx, updates = 0;
for (idx = ARRAY_SIZE(fbd->mac_addr); idx--;) {
struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[idx];
@@ -565,6 +641,9 @@ void fbnic_write_macda(struct fbnic_dev *fbd)
if (!(mac_addr->state & FBNIC_TCAM_S_UPDATE))
continue;
+ /* Record update count */
+ updates++;
+
/* Clear by writing 0s. */
if (mac_addr->state == FBNIC_TCAM_S_DELETE) {
/* Invalidate entry and clear addr state info */
@@ -578,6 +657,14 @@ void fbnic_write_macda(struct fbnic_dev *fbd)
mac_addr->state = FBNIC_TCAM_S_VALID;
}
+
+ /* If reinitializing the BMC TCAM we are doing an initial update */
+ if (fbd->fw_cap.need_bmc_tcam_reinit)
+ updates++;
+
+ /* If needed notify firmware of changes to MACDA TCAM */
+ if (updates != 0 && fbnic_bmc_present(fbd))
+ fbd->fw_cap.need_bmc_macda_sync = true;
}
static void fbnic_clear_act_tcam(struct fbnic_dev *fbd, unsigned int idx)
@@ -1052,13 +1139,25 @@ void fbnic_write_ip_addr(struct fbnic_dev *fbd)
}
}
-void fbnic_clear_rules(struct fbnic_dev *fbd)
+static void fbnic_clear_valid_act_tcam(struct fbnic_dev *fbd)
{
- u32 dest = FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK,
- FBNIC_RPC_ACT_TBL0_DEST_BMC);
int i = FBNIC_RPC_TCAM_ACT_NUM_ENTRIES - 1;
struct fbnic_act_tcam *act_tcam;
+ /* Work from the bottom up deleting all other rules from hardware */
+ do {
+ act_tcam = &fbd->act_tcam[i];
+
+ if (act_tcam->state != FBNIC_TCAM_S_VALID)
+ continue;
+
+ fbnic_clear_act_tcam(fbd, i);
+ act_tcam->state = FBNIC_TCAM_S_UPDATE;
+ } while (i--);
+}
+
+void fbnic_clear_rules(struct fbnic_dev *fbd)
+{
/* Clear MAC rules */
fbnic_clear_macda(fbd);
@@ -1073,6 +1172,11 @@ void fbnic_clear_rules(struct fbnic_dev *fbd)
* the interface back up.
*/
if (fbnic_bmc_present(fbd)) {
+ u32 dest = FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK,
+ FBNIC_RPC_ACT_TBL0_DEST_BMC);
+ int i = FBNIC_RPC_TCAM_ACT_NUM_ENTRIES - 1;
+ struct fbnic_act_tcam *act_tcam;
+
act_tcam = &fbd->act_tcam[i];
if (act_tcam->state == FBNIC_TCAM_S_VALID &&
@@ -1081,21 +1185,10 @@ void fbnic_clear_rules(struct fbnic_dev *fbd)
wr32(fbd, FBNIC_RPC_ACT_TBL1(i), 0);
act_tcam->state = FBNIC_TCAM_S_UPDATE;
-
- i--;
}
}
- /* Work from the bottom up deleting all other rules from hardware */
- do {
- act_tcam = &fbd->act_tcam[i];
-
- if (act_tcam->state != FBNIC_TCAM_S_VALID)
- continue;
-
- fbnic_clear_act_tcam(fbd, i);
- act_tcam->state = FBNIC_TCAM_S_UPDATE;
- } while (i--);
+ fbnic_clear_valid_act_tcam(fbd);
}
static void fbnic_delete_act_tcam(struct fbnic_dev *fbd, unsigned int idx)
@@ -1145,3 +1238,9 @@ void fbnic_write_rules(struct fbnic_dev *fbd)
fbnic_update_act_tcam(fbd, i);
}
}
+
+void fbnic_rpc_reset_valid_entries(struct fbnic_dev *fbd)
+{
+ fbnic_clear_valid_act_tcam(fbd);
+ fbnic_clear_valid_macda(fbd);
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.h b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.h
index 6892414195c3..3d4925b2ac75 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.h
@@ -184,6 +184,7 @@ struct fbnic_net;
void fbnic_bmc_rpc_init(struct fbnic_dev *fbd);
void fbnic_bmc_rpc_all_multi_config(struct fbnic_dev *fbd, bool enable_host);
+void fbnic_bmc_rpc_check(struct fbnic_dev *fbd);
void fbnic_reset_indir_tbl(struct fbnic_net *fbn);
void fbnic_rss_key_fill(u32 *buffer);
@@ -201,6 +202,9 @@ struct fbnic_mac_addr *__fbnic_mc_sync(struct fbnic_dev *fbd,
void fbnic_sift_macda(struct fbnic_dev *fbd);
void fbnic_write_macda(struct fbnic_dev *fbd);
+void fbnic_promisc_sync(struct fbnic_dev *fbd,
+ bool uc_promisc, bool mc_promisc);
+
struct fbnic_ip_addr *__fbnic_ip4_sync(struct fbnic_dev *fbd,
struct fbnic_ip_addr *ip_addr,
const struct in_addr *addr,
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
index ac11389a764c..b1e8ce89870f 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
@@ -2,11 +2,14 @@
/* Copyright (c) Meta Platforms, Inc. and affiliates. */
#include <linux/bitfield.h>
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
#include <linux/iopoll.h>
#include <linux/pci.h>
#include <net/netdev_queues.h>
#include <net/page_pool/helpers.h>
#include <net/tcp.h>
+#include <net/xdp.h>
#include "fbnic.h"
#include "fbnic_csr.h"
@@ -14,6 +17,13 @@
#include "fbnic_txrx.h"
enum {
+ FBNIC_XDP_PASS = 0,
+ FBNIC_XDP_CONSUME,
+ FBNIC_XDP_TX,
+ FBNIC_XDP_LEN_ERR,
+};
+
+enum {
FBNIC_XMIT_CB_TS = 0x01,
};
@@ -27,6 +37,8 @@ struct fbnic_xmit_cb {
#define FBNIC_XMIT_CB(__skb) ((struct fbnic_xmit_cb *)((__skb)->cb))
+#define FBNIC_XMIT_NOUNMAP ((void *)1)
+
static u32 __iomem *fbnic_ring_csr_base(const struct fbnic_ring *ring)
{
unsigned long csr_base = (unsigned long)ring->doorbell;
@@ -305,6 +317,7 @@ fbnic_tx_map(struct fbnic_ring *ring, struct sk_buff *skb, __le64 *meta)
unsigned int tail = ring->tail, first;
unsigned int size, data_len;
skb_frag_t *frag;
+ bool is_net_iov;
dma_addr_t dma;
__le64 *twd;
@@ -320,6 +333,7 @@ fbnic_tx_map(struct fbnic_ring *ring, struct sk_buff *skb, __le64 *meta)
if (size > FIELD_MAX(FBNIC_TWD_LEN_MASK))
goto dma_error;
+ is_net_iov = false;
dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
@@ -332,6 +346,8 @@ fbnic_tx_map(struct fbnic_ring *ring, struct sk_buff *skb, __le64 *meta)
FIELD_PREP(FBNIC_TWD_LEN_MASK, size) |
FIELD_PREP(FBNIC_TWD_TYPE_MASK,
FBNIC_TWD_TYPE_AL));
+ if (is_net_iov)
+ ring->tx_buf[tail] = FBNIC_XMIT_NOUNMAP;
tail++;
tail &= ring->size_mask;
@@ -345,6 +361,7 @@ fbnic_tx_map(struct fbnic_ring *ring, struct sk_buff *skb, __le64 *meta)
if (size > FIELD_MAX(FBNIC_TWD_LEN_MASK))
goto dma_error;
+ is_net_iov = skb_frag_is_net_iov(frag);
dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
}
@@ -380,6 +397,8 @@ dma_error:
twd = &ring->desc[tail];
if (tail == first)
fbnic_unmap_single_twd(dev, twd);
+ else if (ring->tx_buf[tail] == FBNIC_XMIT_NOUNMAP)
+ ring->tx_buf[tail] = NULL;
else
fbnic_unmap_page_twd(dev, twd);
}
@@ -564,7 +583,11 @@ static void fbnic_clean_twq0(struct fbnic_napi_vector *nv, int napi_budget,
desc_cnt--;
while (desc_cnt--) {
- fbnic_unmap_page_twd(nv->dev, &ring->desc[head]);
+ if (ring->tx_buf[head] != FBNIC_XMIT_NOUNMAP)
+ fbnic_unmap_page_twd(nv->dev,
+ &ring->desc[head]);
+ else
+ ring->tx_buf[head] = NULL;
head++;
head &= ring->size_mask;
}
@@ -606,6 +629,54 @@ static void fbnic_clean_twq0(struct fbnic_napi_vector *nv, int napi_budget,
}
}
+static void fbnic_clean_twq1(struct fbnic_napi_vector *nv, bool pp_allow_direct,
+ struct fbnic_ring *ring, bool discard,
+ unsigned int hw_head)
+{
+ u64 total_bytes = 0, total_packets = 0;
+ unsigned int head = ring->head;
+
+ while (hw_head != head) {
+ struct page *page;
+ u64 twd;
+
+ if (unlikely(!(ring->desc[head] & FBNIC_TWD_TYPE(AL))))
+ goto next_desc;
+
+ twd = le64_to_cpu(ring->desc[head]);
+ page = ring->tx_buf[head];
+
+ /* TYPE_AL is 2, TYPE_LAST_AL is 3. So this trick gives
+ * us one increment per packet, with no branches.
+ */
+ total_packets += FIELD_GET(FBNIC_TWD_TYPE_MASK, twd) -
+ FBNIC_TWD_TYPE_AL;
+ total_bytes += FIELD_GET(FBNIC_TWD_LEN_MASK, twd);
+
+ page_pool_put_page(page->pp, page, -1, pp_allow_direct);
+next_desc:
+ head++;
+ head &= ring->size_mask;
+ }
+
+ if (!total_bytes)
+ return;
+
+ ring->head = head;
+
+ if (discard) {
+ u64_stats_update_begin(&ring->stats.syncp);
+ ring->stats.dropped += total_packets;
+ u64_stats_update_end(&ring->stats.syncp);
+ return;
+ }
+
+ u64_stats_update_begin(&ring->stats.syncp);
+ ring->stats.bytes += total_bytes;
+ ring->stats.packets += total_packets;
+ u64_stats_update_end(&ring->stats.syncp);
+}
+
static void fbnic_clean_tsq(struct fbnic_napi_vector *nv,
struct fbnic_ring *ring,
u64 tcd, int *ts_head, int *head0)
@@ -657,44 +728,65 @@ static void fbnic_clean_tsq(struct fbnic_napi_vector *nv,
}
static void fbnic_page_pool_init(struct fbnic_ring *ring, unsigned int idx,
- struct page *page)
+ netmem_ref netmem)
{
struct fbnic_rx_buf *rx_buf = &ring->rx_buf[idx];
- page_pool_fragment_page(page, PAGECNT_BIAS_MAX);
- rx_buf->pagecnt_bias = PAGECNT_BIAS_MAX;
- rx_buf->page = page;
+ page_pool_fragment_netmem(netmem, FBNIC_PAGECNT_BIAS_MAX);
+ rx_buf->pagecnt_bias = FBNIC_PAGECNT_BIAS_MAX;
+ rx_buf->netmem = netmem;
}
-static struct page *fbnic_page_pool_get(struct fbnic_ring *ring,
- unsigned int idx)
+static struct page *
+fbnic_page_pool_get_head(struct fbnic_q_triad *qt, unsigned int idx)
{
- struct fbnic_rx_buf *rx_buf = &ring->rx_buf[idx];
+ struct fbnic_rx_buf *rx_buf = &qt->sub0.rx_buf[idx];
rx_buf->pagecnt_bias--;
- return rx_buf->page;
+ /* sub0 is always fed system pages, from the NAPI-level page_pool */
+ return netmem_to_page(rx_buf->netmem);
+}
+
+static netmem_ref
+fbnic_page_pool_get_data(struct fbnic_q_triad *qt, unsigned int idx)
+{
+ struct fbnic_rx_buf *rx_buf = &qt->sub1.rx_buf[idx];
+
+ rx_buf->pagecnt_bias--;
+
+ return rx_buf->netmem;
}
static void fbnic_page_pool_drain(struct fbnic_ring *ring, unsigned int idx,
- struct fbnic_napi_vector *nv, int budget)
+ int budget)
{
struct fbnic_rx_buf *rx_buf = &ring->rx_buf[idx];
- struct page *page = rx_buf->page;
+ netmem_ref netmem = rx_buf->netmem;
- if (!page_pool_unref_page(page, rx_buf->pagecnt_bias))
- page_pool_put_unrefed_page(nv->page_pool, page, -1, !!budget);
+ if (!page_pool_unref_netmem(netmem, rx_buf->pagecnt_bias))
+ page_pool_put_unrefed_netmem(ring->page_pool, netmem, -1,
+ !!budget);
- rx_buf->page = NULL;
+ rx_buf->netmem = 0;
}
static void fbnic_clean_twq(struct fbnic_napi_vector *nv, int napi_budget,
- struct fbnic_q_triad *qt, s32 ts_head, s32 head0)
+ struct fbnic_q_triad *qt, s32 ts_head, s32 head0,
+ s32 head1)
{
if (head0 >= 0)
fbnic_clean_twq0(nv, napi_budget, &qt->sub0, false, head0);
else if (ts_head >= 0)
fbnic_clean_twq0(nv, napi_budget, &qt->sub0, false, ts_head);
+
+ if (head1 >= 0) {
+ qt->cmpl.deferred_head = -1;
+ if (napi_budget)
+ fbnic_clean_twq1(nv, true, &qt->sub1, false, head1);
+ else
+ qt->cmpl.deferred_head = head1;
+ }
}
static void
@@ -702,6 +794,7 @@ fbnic_clean_tcq(struct fbnic_napi_vector *nv, struct fbnic_q_triad *qt,
int napi_budget)
{
struct fbnic_ring *cmpl = &qt->cmpl;
+ s32 head1 = cmpl->deferred_head;
s32 head0 = -1, ts_head = -1;
__le64 *raw_tcd, done;
u32 head = cmpl->head;
@@ -719,7 +812,10 @@ fbnic_clean_tcq(struct fbnic_napi_vector *nv, struct fbnic_q_triad *qt,
switch (FIELD_GET(FBNIC_TCD_TYPE_MASK, tcd)) {
case FBNIC_TCD_TYPE_0:
- if (!(tcd & FBNIC_TCD_TWQ1))
+ if (tcd & FBNIC_TCD_TWQ1)
+ head1 = FIELD_GET(FBNIC_TCD_TYPE0_HEAD1_MASK,
+ tcd);
+ else
head0 = FIELD_GET(FBNIC_TCD_TYPE0_HEAD0_MASK,
tcd);
/* Currently all err status bits are related to
@@ -752,11 +848,11 @@ fbnic_clean_tcq(struct fbnic_napi_vector *nv, struct fbnic_q_triad *qt,
}
/* Unmap and free processed buffers */
- fbnic_clean_twq(nv, napi_budget, qt, ts_head, head0);
+ fbnic_clean_twq(nv, napi_budget, qt, ts_head, head0, head1);
}
-static void fbnic_clean_bdq(struct fbnic_napi_vector *nv, int napi_budget,
- struct fbnic_ring *ring, unsigned int hw_head)
+static void fbnic_clean_bdq(struct fbnic_ring *ring, unsigned int hw_head,
+ int napi_budget)
{
unsigned int head = ring->head;
@@ -764,7 +860,7 @@ static void fbnic_clean_bdq(struct fbnic_napi_vector *nv, int napi_budget,
return;
do {
- fbnic_page_pool_drain(ring, head, nv, napi_budget);
+ fbnic_page_pool_drain(ring, head, napi_budget);
head++;
head &= ring->size_mask;
@@ -773,10 +869,10 @@ static void fbnic_clean_bdq(struct fbnic_napi_vector *nv, int napi_budget,
ring->head = head;
}
-static void fbnic_bd_prep(struct fbnic_ring *bdq, u16 id, struct page *page)
+static void fbnic_bd_prep(struct fbnic_ring *bdq, u16 id, netmem_ref netmem)
{
__le64 *bdq_desc = &bdq->desc[id * FBNIC_BD_FRAG_COUNT];
- dma_addr_t dma = page_pool_get_dma_addr(page);
+ dma_addr_t dma = page_pool_get_dma_addr_netmem(netmem);
u64 bd, i = FBNIC_BD_FRAG_COUNT;
bd = (FBNIC_BD_PAGE_ADDR_MASK & dma) |
@@ -794,7 +890,7 @@ static void fbnic_bd_prep(struct fbnic_ring *bdq, u16 id, struct page *page)
} while (--i);
}
-static void fbnic_fill_bdq(struct fbnic_napi_vector *nv, struct fbnic_ring *bdq)
+static void fbnic_fill_bdq(struct fbnic_ring *bdq)
{
unsigned int count = fbnic_desc_unused(bdq);
unsigned int i = bdq->tail;
@@ -803,19 +899,19 @@ static void fbnic_fill_bdq(struct fbnic_napi_vector *nv, struct fbnic_ring *bdq)
return;
do {
- struct page *page;
+ netmem_ref netmem;
- page = page_pool_dev_alloc_pages(nv->page_pool);
- if (!page) {
+ netmem = page_pool_dev_alloc_netmems(bdq->page_pool);
+ if (!netmem) {
u64_stats_update_begin(&bdq->stats.syncp);
- bdq->stats.rx.alloc_failed++;
+ bdq->stats.bdq.alloc_failed++;
u64_stats_update_end(&bdq->stats.syncp);
break;
}
- fbnic_page_pool_init(bdq, i, page);
- fbnic_bd_prep(bdq, i, page);
+ fbnic_page_pool_init(bdq, i, netmem);
+ fbnic_bd_prep(bdq, i, netmem);
i++;
i &= bdq->size_mask;
@@ -862,7 +958,7 @@ static void fbnic_pkt_prepare(struct fbnic_napi_vector *nv, u64 rcd,
{
unsigned int hdr_pg_idx = FIELD_GET(FBNIC_RCD_AL_BUFF_PAGE_MASK, rcd);
unsigned int hdr_pg_off = FIELD_GET(FBNIC_RCD_AL_BUFF_OFF_MASK, rcd);
- struct page *page = fbnic_page_pool_get(&qt->sub0, hdr_pg_idx);
+ struct page *page = fbnic_page_pool_get_head(qt, hdr_pg_idx);
unsigned int len = FIELD_GET(FBNIC_RCD_AL_BUFF_LEN_MASK, rcd);
unsigned int frame_sz, hdr_pg_start, hdr_pg_end, headroom;
unsigned char *hdr_start;
@@ -877,7 +973,7 @@ static void fbnic_pkt_prepare(struct fbnic_napi_vector *nv, u64 rcd,
headroom = hdr_pg_off - hdr_pg_start + FBNIC_RX_PAD;
frame_sz = hdr_pg_end - hdr_pg_start;
- xdp_init_buff(&pkt->buff, frame_sz, NULL);
+ xdp_init_buff(&pkt->buff, frame_sz, &qt->xdp_rxq);
hdr_pg_start += (FBNIC_RCD_AL_BUFF_FRAG_MASK & rcd) *
FBNIC_BD_FRAG_SIZE;
@@ -888,13 +984,12 @@ static void fbnic_pkt_prepare(struct fbnic_napi_vector *nv, u64 rcd,
/* Build frame around buffer */
hdr_start = page_address(page) + hdr_pg_start;
-
+ net_prefetch(pkt->buff.data);
xdp_prepare_buff(&pkt->buff, hdr_start, headroom,
len - FBNIC_RX_PAD, true);
- pkt->data_truesize = 0;
- pkt->data_len = 0;
- pkt->nr_frags = 0;
+ pkt->hwtstamp = 0;
+ pkt->add_frag_failed = false;
}
static void fbnic_add_rx_frag(struct fbnic_napi_vector *nv, u64 rcd,
@@ -904,9 +999,9 @@ static void fbnic_add_rx_frag(struct fbnic_napi_vector *nv, u64 rcd,
unsigned int pg_idx = FIELD_GET(FBNIC_RCD_AL_BUFF_PAGE_MASK, rcd);
unsigned int pg_off = FIELD_GET(FBNIC_RCD_AL_BUFF_OFF_MASK, rcd);
unsigned int len = FIELD_GET(FBNIC_RCD_AL_BUFF_LEN_MASK, rcd);
- struct page *page = fbnic_page_pool_get(&qt->sub1, pg_idx);
- struct skb_shared_info *shinfo;
+ netmem_ref netmem = fbnic_page_pool_get_data(qt, pg_idx);
unsigned int truesize;
+ bool added;
truesize = FIELD_GET(FBNIC_RCD_AL_PAGE_FIN, rcd) ?
FBNIC_BD_FRAG_SIZE - pg_off : ALIGN(len, 128);
@@ -915,88 +1010,171 @@ static void fbnic_add_rx_frag(struct fbnic_napi_vector *nv, u64 rcd,
FBNIC_BD_FRAG_SIZE;
/* Sync DMA buffer */
- dma_sync_single_range_for_cpu(nv->dev, page_pool_get_dma_addr(page),
- pg_off, truesize, DMA_BIDIRECTIONAL);
-
- /* Add page to xdp shared info */
- shinfo = xdp_get_shared_info_from_buff(&pkt->buff);
-
- /* We use gso_segs to store truesize */
- pkt->data_truesize += truesize;
-
- __skb_fill_page_desc_noacc(shinfo, pkt->nr_frags++, page, pg_off, len);
-
- /* Store data_len in gso_size */
- pkt->data_len += len;
+ page_pool_dma_sync_netmem_for_cpu(qt->sub1.page_pool, netmem,
+ pg_off, truesize);
+
+ added = xdp_buff_add_frag(&pkt->buff, netmem, pg_off, len, truesize);
+ if (unlikely(!added)) {
+ pkt->add_frag_failed = true;
+ netdev_err_once(nv->napi.dev,
+ "Failed to add fragment to xdp_buff\n");
+ }
}
-static void fbnic_put_pkt_buff(struct fbnic_napi_vector *nv,
+static void fbnic_put_pkt_buff(struct fbnic_q_triad *qt,
struct fbnic_pkt_buff *pkt, int budget)
{
- struct skb_shared_info *shinfo;
struct page *page;
- int nr_frags;
if (!pkt->buff.data_hard_start)
return;
- shinfo = xdp_get_shared_info_from_buff(&pkt->buff);
- nr_frags = pkt->nr_frags;
+ if (xdp_buff_has_frags(&pkt->buff)) {
+ struct skb_shared_info *shinfo;
+ netmem_ref netmem;
+ int nr_frags;
- while (nr_frags--) {
- page = skb_frag_page(&shinfo->frags[nr_frags]);
- page_pool_put_full_page(nv->page_pool, page, !!budget);
+ shinfo = xdp_get_shared_info_from_buff(&pkt->buff);
+ nr_frags = shinfo->nr_frags;
+
+ while (nr_frags--) {
+ netmem = skb_frag_netmem(&shinfo->frags[nr_frags]);
+ page_pool_put_full_netmem(qt->sub1.page_pool, netmem,
+ !!budget);
+ }
}
page = virt_to_page(pkt->buff.data_hard_start);
- page_pool_put_full_page(nv->page_pool, page, !!budget);
+ page_pool_put_full_page(qt->sub0.page_pool, page, !!budget);
}
static struct sk_buff *fbnic_build_skb(struct fbnic_napi_vector *nv,
struct fbnic_pkt_buff *pkt)
{
- unsigned int nr_frags = pkt->nr_frags;
- struct skb_shared_info *shinfo;
- unsigned int truesize;
struct sk_buff *skb;
- truesize = xdp_data_hard_end(&pkt->buff) + FBNIC_RX_TROOM -
- pkt->buff.data_hard_start;
-
- /* Build frame around buffer */
- skb = napi_build_skb(pkt->buff.data_hard_start, truesize);
- if (unlikely(!skb))
+ skb = xdp_build_skb_from_buff(&pkt->buff);
+ if (!skb)
return NULL;
- /* Push data pointer to start of data, put tail to end of data */
- skb_reserve(skb, pkt->buff.data - pkt->buff.data_hard_start);
- __skb_put(skb, pkt->buff.data_end - pkt->buff.data);
+ /* Add timestamp if present */
+ if (pkt->hwtstamp)
+ skb_hwtstamps(skb)->hwtstamp = pkt->hwtstamp;
- /* Add tracking for metadata at the start of the frame */
- skb_metadata_set(skb, pkt->buff.data - pkt->buff.data_meta);
+ return skb;
+}
- /* Add Rx frags */
- if (nr_frags) {
- /* Verify that shared info didn't move */
+static long fbnic_pkt_tx(struct fbnic_napi_vector *nv,
+ struct fbnic_pkt_buff *pkt)
+{
+ struct fbnic_ring *ring = &nv->qt[0].sub1;
+ int size, offset, nsegs = 1, data_len = 0;
+ unsigned int tail = ring->tail;
+ struct skb_shared_info *shinfo;
+ skb_frag_t *frag = NULL;
+ struct page *page;
+ dma_addr_t dma;
+ __le64 *twd;
+
+ if (unlikely(xdp_buff_has_frags(&pkt->buff))) {
shinfo = xdp_get_shared_info_from_buff(&pkt->buff);
- WARN_ON(skb_shinfo(skb) != shinfo);
+ nsegs += shinfo->nr_frags;
+ data_len = shinfo->xdp_frags_size;
+ frag = &shinfo->frags[0];
+ }
- skb->truesize += pkt->data_truesize;
- skb->data_len += pkt->data_len;
- shinfo->nr_frags = nr_frags;
- skb->len += pkt->data_len;
+ if (fbnic_desc_unused(ring) < nsegs) {
+ u64_stats_update_begin(&ring->stats.syncp);
+ ring->stats.dropped++;
+ u64_stats_update_end(&ring->stats.syncp);
+ return -FBNIC_XDP_CONSUME;
}
- skb_mark_for_recycle(skb);
+ page = virt_to_page(pkt->buff.data_hard_start);
+ offset = offset_in_page(pkt->buff.data);
+ dma = page_pool_get_dma_addr(page);
- /* Set MAC header specific fields */
- skb->protocol = eth_type_trans(skb, nv->napi.dev);
+ size = pkt->buff.data_end - pkt->buff.data;
- /* Add timestamp if present */
- if (pkt->hwtstamp)
- skb_hwtstamps(skb)->hwtstamp = pkt->hwtstamp;
+ while (nsegs--) {
+ dma_sync_single_range_for_device(nv->dev, dma, offset, size,
+ DMA_BIDIRECTIONAL);
+ dma += offset;
- return skb;
+ ring->tx_buf[tail] = page;
+
+ twd = &ring->desc[tail];
+ *twd = cpu_to_le64(FIELD_PREP(FBNIC_TWD_ADDR_MASK, dma) |
+ FIELD_PREP(FBNIC_TWD_LEN_MASK, size) |
+ FIELD_PREP(FBNIC_TWD_TYPE_MASK,
+ FBNIC_TWD_TYPE_AL));
+
+ tail++;
+ tail &= ring->size_mask;
+
+ if (!data_len)
+ break;
+
+ offset = skb_frag_off(frag);
+ page = skb_frag_page(frag);
+ dma = page_pool_get_dma_addr(page);
+
+ size = skb_frag_size(frag);
+ data_len -= size;
+ frag++;
+ }
+
+ *twd |= FBNIC_TWD_TYPE(LAST_AL);
+
+ ring->tail = tail;
+
+ return -FBNIC_XDP_TX;
+}
+
+static void fbnic_pkt_commit_tail(struct fbnic_napi_vector *nv,
+ unsigned int pkt_tail)
+{
+ struct fbnic_ring *ring = &nv->qt[0].sub1;
+
+ /* Force DMA writes to flush before writing to tail */
+ dma_wmb();
+
+ writel(pkt_tail, ring->doorbell);
+}
+
+static struct sk_buff *fbnic_run_xdp(struct fbnic_napi_vector *nv,
+ struct fbnic_pkt_buff *pkt)
+{
+ struct fbnic_net *fbn = netdev_priv(nv->napi.dev);
+ struct bpf_prog *xdp_prog;
+ int act;
+
+ xdp_prog = READ_ONCE(fbn->xdp_prog);
+ if (!xdp_prog)
+ goto xdp_pass;
+
+ /* Should never happen, config paths enforce HDS threshold > MTU */
+ if (xdp_buff_has_frags(&pkt->buff) && !xdp_prog->aux->xdp_has_frags)
+ return ERR_PTR(-FBNIC_XDP_LEN_ERR);
+
+ act = bpf_prog_run_xdp(xdp_prog, &pkt->buff);
+ switch (act) {
+ case XDP_PASS:
+xdp_pass:
+ return fbnic_build_skb(nv, pkt);
+ case XDP_TX:
+ return ERR_PTR(fbnic_pkt_tx(nv, pkt));
+ default:
+ bpf_warn_invalid_xdp_action(nv->napi.dev, xdp_prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(nv->napi.dev, xdp_prog, act);
+ fallthrough;
+ case XDP_DROP:
+ break;
+ }
+
+ return ERR_PTR(-FBNIC_XDP_CONSUME);
}
static enum pkt_hash_types fbnic_skb_hash_type(u64 rcd)
@@ -1050,10 +1228,10 @@ static int fbnic_clean_rcq(struct fbnic_napi_vector *nv,
struct fbnic_q_triad *qt, int budget)
{
unsigned int packets = 0, bytes = 0, dropped = 0, alloc_failed = 0;
- u64 csum_complete = 0, csum_none = 0;
+ u64 csum_complete = 0, csum_none = 0, length_errors = 0;
+ s32 head0 = -1, head1 = -1, pkt_tail = -1;
struct fbnic_ring *rcq = &qt->cmpl;
struct fbnic_pkt_buff *pkt;
- s32 head0 = -1, head1 = -1;
__le64 *raw_rcd, done;
u32 head = rcq->head;
@@ -1064,6 +1242,7 @@ static int fbnic_clean_rcq(struct fbnic_napi_vector *nv,
/* Walk the completion queue collecting the heads reported by NIC */
while (likely(packets < budget)) {
struct sk_buff *skb = ERR_PTR(-EINVAL);
+ u32 pkt_bytes;
u64 rcd;
if ((*raw_rcd & cpu_to_le64(FBNIC_RCD_DONE)) == done)
@@ -1094,30 +1273,38 @@ static int fbnic_clean_rcq(struct fbnic_napi_vector *nv,
/* We currently ignore the action table index */
break;
case FBNIC_RCD_TYPE_META:
- if (likely(!fbnic_rcd_metadata_err(rcd)))
- skb = fbnic_build_skb(nv, pkt);
+ if (likely(!fbnic_rcd_metadata_err(rcd) &&
+ !pkt->add_frag_failed)) {
+ pkt_bytes = xdp_get_buff_len(&pkt->buff);
+ skb = fbnic_run_xdp(nv, pkt);
+ }
/* Populate skb and invalidate XDP */
if (!IS_ERR_OR_NULL(skb)) {
fbnic_populate_skb_fields(nv, rcd, skb, qt,
&csum_complete,
&csum_none);
-
- packets++;
- bytes += skb->len;
-
napi_gro_receive(&nv->napi, skb);
+ } else if (skb == ERR_PTR(-FBNIC_XDP_TX)) {
+ pkt_tail = nv->qt[0].sub1.tail;
+ } else if (PTR_ERR(skb) == -FBNIC_XDP_CONSUME) {
+ fbnic_put_pkt_buff(qt, pkt, 1);
} else {
- if (!skb) {
+ if (!skb)
alloc_failed++;
+
+ if (skb == ERR_PTR(-FBNIC_XDP_LEN_ERR))
+ length_errors++;
+ else
dropped++;
- } else {
- dropped++;
- }
- fbnic_put_pkt_buff(nv, pkt, 1);
+ fbnic_put_pkt_buff(qt, pkt, 1);
+ goto next_dont_count;
}
+ packets++;
+ bytes += pkt_bytes;
+next_dont_count:
pkt->buff.data_hard_start = NULL;
break;
@@ -1134,22 +1321,24 @@ static int fbnic_clean_rcq(struct fbnic_napi_vector *nv,
u64_stats_update_begin(&rcq->stats.syncp);
rcq->stats.packets += packets;
rcq->stats.bytes += bytes;
- /* Re-add ethernet header length (removed in fbnic_build_skb) */
- rcq->stats.bytes += ETH_HLEN * packets;
rcq->stats.dropped += dropped;
rcq->stats.rx.alloc_failed += alloc_failed;
rcq->stats.rx.csum_complete += csum_complete;
rcq->stats.rx.csum_none += csum_none;
+ rcq->stats.rx.length_errors += length_errors;
u64_stats_update_end(&rcq->stats.syncp);
+ if (pkt_tail >= 0)
+ fbnic_pkt_commit_tail(nv, pkt_tail);
+
/* Unmap and free processed buffers */
if (head0 >= 0)
- fbnic_clean_bdq(nv, budget, &qt->sub0, head0);
- fbnic_fill_bdq(nv, &qt->sub0);
+ fbnic_clean_bdq(&qt->sub0, head0, budget);
+ fbnic_fill_bdq(&qt->sub0);
if (head1 >= 0)
- fbnic_clean_bdq(nv, budget, &qt->sub1, head1);
- fbnic_fill_bdq(nv, &qt->sub1);
+ fbnic_clean_bdq(&qt->sub1, head1, budget);
+ fbnic_fill_bdq(&qt->sub1);
/* Record the current head/tail of the queue */
if (rcq->head != head) {
@@ -1220,8 +1409,20 @@ void fbnic_aggregate_ring_rx_counters(struct fbnic_net *fbn,
fbn->rx_stats.rx.alloc_failed += stats->rx.alloc_failed;
fbn->rx_stats.rx.csum_complete += stats->rx.csum_complete;
fbn->rx_stats.rx.csum_none += stats->rx.csum_none;
+ fbn->rx_stats.rx.length_errors += stats->rx.length_errors;
/* Remember to add new stats here */
- BUILD_BUG_ON(sizeof(fbn->rx_stats.rx) / 8 != 3);
+ BUILD_BUG_ON(sizeof(fbn->rx_stats.rx) / 8 != 4);
+}
+
+void fbnic_aggregate_ring_bdq_counters(struct fbnic_net *fbn,
+ struct fbnic_ring *bdq)
+{
+ struct fbnic_queue_stats *stats = &bdq->stats;
+
+ /* Capture stats from queues before dissasociating them */
+ fbn->bdq_stats.bdq.alloc_failed += stats->bdq.alloc_failed;
+ /* Remember to add new stats here */
+ BUILD_BUG_ON(sizeof(fbn->rx_stats.bdq) / 8 != 1);
}
void fbnic_aggregate_ring_tx_counters(struct fbnic_net *fbn,
@@ -1243,6 +1444,20 @@ void fbnic_aggregate_ring_tx_counters(struct fbnic_net *fbn,
BUILD_BUG_ON(sizeof(fbn->tx_stats.twq) / 8 != 6);
}
+void fbnic_aggregate_ring_xdp_counters(struct fbnic_net *fbn,
+ struct fbnic_ring *xdpr)
+{
+ struct fbnic_queue_stats *stats = &xdpr->stats;
+
+ if (!(xdpr->flags & FBNIC_RING_F_STATS))
+ return;
+
+ /* Capture stats from queues before dissasociating them */
+ fbn->tx_stats.dropped += stats->dropped;
+ fbn->tx_stats.bytes += stats->bytes;
+ fbn->tx_stats.packets += stats->packets;
+}
+
static void fbnic_remove_tx_ring(struct fbnic_net *fbn,
struct fbnic_ring *txr)
{
@@ -1256,6 +1471,19 @@ static void fbnic_remove_tx_ring(struct fbnic_net *fbn,
fbn->tx[txr->q_idx] = NULL;
}
+static void fbnic_remove_xdp_ring(struct fbnic_net *fbn,
+ struct fbnic_ring *xdpr)
+{
+ if (!(xdpr->flags & FBNIC_RING_F_STATS))
+ return;
+
+ fbnic_aggregate_ring_xdp_counters(fbn, xdpr);
+
+ /* Remove pointer to the Tx ring */
+ WARN_ON(fbn->tx[xdpr->q_idx] && fbn->tx[xdpr->q_idx] != xdpr);
+ fbn->tx[xdpr->q_idx] = NULL;
+}
+
static void fbnic_remove_rx_ring(struct fbnic_net *fbn,
struct fbnic_ring *rxr)
{
@@ -1269,6 +1497,21 @@ static void fbnic_remove_rx_ring(struct fbnic_net *fbn,
fbn->rx[rxr->q_idx] = NULL;
}
+static void fbnic_remove_bdq_ring(struct fbnic_net *fbn,
+ struct fbnic_ring *bdq)
+{
+ if (!(bdq->flags & FBNIC_RING_F_STATS))
+ return;
+
+ fbnic_aggregate_ring_bdq_counters(fbn, bdq);
+}
+
+static void fbnic_free_qt_page_pools(struct fbnic_q_triad *qt)
+{
+ page_pool_destroy(qt->sub0.page_pool);
+ page_pool_destroy(qt->sub1.page_pool);
+}
+
static void fbnic_free_napi_vector(struct fbnic_net *fbn,
struct fbnic_napi_vector *nv)
{
@@ -1277,18 +1520,18 @@ static void fbnic_free_napi_vector(struct fbnic_net *fbn,
for (i = 0; i < nv->txt_count; i++) {
fbnic_remove_tx_ring(fbn, &nv->qt[i].sub0);
+ fbnic_remove_xdp_ring(fbn, &nv->qt[i].sub1);
fbnic_remove_tx_ring(fbn, &nv->qt[i].cmpl);
}
for (j = 0; j < nv->rxt_count; j++, i++) {
- fbnic_remove_rx_ring(fbn, &nv->qt[i].sub0);
- fbnic_remove_rx_ring(fbn, &nv->qt[i].sub1);
+ fbnic_remove_bdq_ring(fbn, &nv->qt[i].sub0);
+ fbnic_remove_bdq_ring(fbn, &nv->qt[i].sub1);
fbnic_remove_rx_ring(fbn, &nv->qt[i].cmpl);
}
fbnic_napi_free_irq(fbd, nv);
- page_pool_destroy(nv->page_pool);
- netif_napi_del(&nv->napi);
+ netif_napi_del_locked(&nv->napi);
fbn->napi[fbnic_napi_idx(nv)] = NULL;
kfree(nv);
}
@@ -1302,23 +1545,22 @@ void fbnic_free_napi_vectors(struct fbnic_net *fbn)
fbnic_free_napi_vector(fbn, fbn->napi[i]);
}
-#define FBNIC_PAGE_POOL_FLAGS \
- (PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV)
-
-static int fbnic_alloc_nv_page_pool(struct fbnic_net *fbn,
- struct fbnic_napi_vector *nv)
+static int
+fbnic_alloc_qt_page_pools(struct fbnic_net *fbn, struct fbnic_q_triad *qt,
+ unsigned int rxq_idx)
{
struct page_pool_params pp_params = {
.order = 0,
- .flags = FBNIC_PAGE_POOL_FLAGS,
- .pool_size = (fbn->hpq_size + fbn->ppq_size) * nv->rxt_count,
+ .flags = PP_FLAG_DMA_MAP |
+ PP_FLAG_DMA_SYNC_DEV,
+ .pool_size = fbn->hpq_size + fbn->ppq_size,
.nid = NUMA_NO_NODE,
- .dev = nv->dev,
+ .dev = fbn->netdev->dev.parent,
.dma_dir = DMA_BIDIRECTIONAL,
.offset = 0,
.max_len = PAGE_SIZE,
- .napi = &nv->napi,
.netdev = fbn->netdev,
+ .queue_idx = rxq_idx,
};
struct page_pool *pp;
@@ -1338,9 +1580,24 @@ static int fbnic_alloc_nv_page_pool(struct fbnic_net *fbn,
if (IS_ERR(pp))
return PTR_ERR(pp);
- nv->page_pool = pp;
+ qt->sub0.page_pool = pp;
+ if (netif_rxq_has_unreadable_mp(fbn->netdev, rxq_idx)) {
+ pp_params.flags |= PP_FLAG_ALLOW_UNREADABLE_NETMEM;
+ pp_params.dma_dir = DMA_FROM_DEVICE;
+
+ pp = page_pool_create(&pp_params);
+ if (IS_ERR(pp))
+ goto err_destroy_sub0;
+ } else {
+ page_pool_get(pp);
+ }
+ qt->sub1.page_pool = pp;
return 0;
+
+err_destroy_sub0:
+ page_pool_destroy(pp);
+ return PTR_ERR(pp);
}
static void fbnic_ring_init(struct fbnic_ring *ring, u32 __iomem *doorbell,
@@ -1350,6 +1607,7 @@ static void fbnic_ring_init(struct fbnic_ring *ring, u32 __iomem *doorbell,
ring->doorbell = doorbell;
ring->q_idx = q_idx;
ring->flags = flags;
+ ring->deferred_head = -1;
}
static int fbnic_alloc_napi_vector(struct fbnic_dev *fbd, struct fbnic_net *fbn,
@@ -1359,11 +1617,18 @@ static int fbnic_alloc_napi_vector(struct fbnic_dev *fbd, struct fbnic_net *fbn,
{
int txt_count = txq_count, rxt_count = rxq_count;
u32 __iomem *uc_addr = fbd->uc_addr0;
+ int xdp_count = 0, qt_count, err;
struct fbnic_napi_vector *nv;
struct fbnic_q_triad *qt;
- int qt_count, err;
u32 __iomem *db;
+ /* We need to reserve at least one Tx Queue Triad for an XDP ring */
+ if (rxq_count) {
+ xdp_count = 1;
+ if (!txt_count)
+ txt_count = 1;
+ }
+
qt_count = txt_count + rxq_count;
if (!qt_count)
return -EINVAL;
@@ -1387,37 +1652,33 @@ static int fbnic_alloc_napi_vector(struct fbnic_dev *fbd, struct fbnic_net *fbn,
/* Tie napi to netdev */
fbn->napi[fbnic_napi_idx(nv)] = nv;
- netif_napi_add(fbn->netdev, &nv->napi, fbnic_poll);
+ netif_napi_add_config_locked(fbn->netdev, &nv->napi, fbnic_poll,
+ fbnic_napi_idx(nv));
/* Record IRQ to NAPI struct */
- netif_napi_set_irq(&nv->napi,
- pci_irq_vector(to_pci_dev(fbd->dev), nv->v_idx));
+ netif_napi_set_irq_locked(&nv->napi,
+ pci_irq_vector(to_pci_dev(fbd->dev),
+ nv->v_idx));
/* Tie nv back to PCIe dev */
nv->dev = fbd->dev;
- /* Allocate page pool */
- if (rxq_count) {
- err = fbnic_alloc_nv_page_pool(fbn, nv);
- if (err)
- goto napi_del;
- }
-
/* Request the IRQ for napi vector */
err = fbnic_napi_request_irq(fbd, nv);
if (err)
- goto pp_destroy;
+ goto napi_del;
/* Initialize queue triads */
qt = nv->qt;
while (txt_count) {
+ u8 flags = FBNIC_RING_F_CTX | FBNIC_RING_F_STATS;
+
/* Configure Tx queue */
db = &uc_addr[FBNIC_QUEUE(txq_idx) + FBNIC_QUEUE_TWQ0_TAIL];
/* Assign Tx queue to netdev if applicable */
if (txq_count > 0) {
- u8 flags = FBNIC_RING_F_CTX | FBNIC_RING_F_STATS;
fbnic_ring_init(&qt->sub0, db, txq_idx, flags);
fbn->tx[txq_idx] = &qt->sub0;
@@ -1427,6 +1688,28 @@ static int fbnic_alloc_napi_vector(struct fbnic_dev *fbd, struct fbnic_net *fbn,
FBNIC_RING_F_DISABLED);
}
+ /* Configure XDP queue */
+ db = &uc_addr[FBNIC_QUEUE(txq_idx) + FBNIC_QUEUE_TWQ1_TAIL];
+
+ /* Assign XDP queue to netdev if applicable
+ *
+ * The setup for this is in itself a bit different.
+ * 1. We only need one XDP Tx queue per NAPI vector.
+ * 2. We associate it to the first Rx queue index.
+ * 3. The hardware side is associated based on the Tx Queue.
+ * 4. The netdev queue is offset by FBNIC_MAX_TXQs.
+ */
+ if (xdp_count > 0) {
+ unsigned int xdp_idx = FBNIC_MAX_TXQS + rxq_idx;
+
+ fbnic_ring_init(&qt->sub1, db, xdp_idx, flags);
+ fbn->tx[xdp_idx] = &qt->sub1;
+ xdp_count--;
+ } else {
+ fbnic_ring_init(&qt->sub1, db, 0,
+ FBNIC_RING_F_DISABLED);
+ }
+
/* Configure Tx completion queue */
db = &uc_addr[FBNIC_QUEUE(txq_idx) + FBNIC_QUEUE_TCQ_HEAD];
fbnic_ring_init(&qt->cmpl, db, 0, 0);
@@ -1442,11 +1725,13 @@ static int fbnic_alloc_napi_vector(struct fbnic_dev *fbd, struct fbnic_net *fbn,
while (rxt_count) {
/* Configure header queue */
db = &uc_addr[FBNIC_QUEUE(rxq_idx) + FBNIC_QUEUE_BDQ_HPQ_TAIL];
- fbnic_ring_init(&qt->sub0, db, 0, FBNIC_RING_F_CTX);
+ fbnic_ring_init(&qt->sub0, db, 0,
+ FBNIC_RING_F_CTX | FBNIC_RING_F_STATS);
/* Configure payload queue */
db = &uc_addr[FBNIC_QUEUE(rxq_idx) + FBNIC_QUEUE_BDQ_PPQ_TAIL];
- fbnic_ring_init(&qt->sub1, db, 0, FBNIC_RING_F_CTX);
+ fbnic_ring_init(&qt->sub1, db, 0,
+ FBNIC_RING_F_CTX | FBNIC_RING_F_STATS);
/* Configure Rx completion queue */
db = &uc_addr[FBNIC_QUEUE(rxq_idx) + FBNIC_QUEUE_RCQ_HEAD];
@@ -1463,10 +1748,8 @@ static int fbnic_alloc_napi_vector(struct fbnic_dev *fbd, struct fbnic_net *fbn,
return 0;
-pp_destroy:
- page_pool_destroy(nv->page_pool);
napi_del:
- netif_napi_del(&nv->napi);
+ netif_napi_del_locked(&nv->napi);
fbn->napi[fbnic_napi_idx(nv)] = NULL;
kfree(nv);
return err;
@@ -1680,6 +1963,12 @@ static void fbnic_free_qt_resources(struct fbnic_net *fbn,
fbnic_free_ring_resources(dev, &qt->cmpl);
fbnic_free_ring_resources(dev, &qt->sub1);
fbnic_free_ring_resources(dev, &qt->sub0);
+
+ if (xdp_rxq_info_is_reg(&qt->xdp_rxq)) {
+ xdp_rxq_info_unreg_mem_model(&qt->xdp_rxq);
+ xdp_rxq_info_unreg(&qt->xdp_rxq);
+ fbnic_free_qt_page_pools(qt);
+ }
}
static int fbnic_alloc_tx_qt_resources(struct fbnic_net *fbn,
@@ -1692,6 +1981,10 @@ static int fbnic_alloc_tx_qt_resources(struct fbnic_net *fbn,
if (err)
return err;
+ err = fbnic_alloc_tx_ring_resources(fbn, &qt->sub1);
+ if (err)
+ goto free_sub0;
+
err = fbnic_alloc_tx_ring_resources(fbn, &qt->cmpl);
if (err)
goto free_sub1;
@@ -1699,20 +1992,37 @@ static int fbnic_alloc_tx_qt_resources(struct fbnic_net *fbn,
return 0;
free_sub1:
+ fbnic_free_ring_resources(dev, &qt->sub1);
+free_sub0:
fbnic_free_ring_resources(dev, &qt->sub0);
return err;
}
static int fbnic_alloc_rx_qt_resources(struct fbnic_net *fbn,
+ struct fbnic_napi_vector *nv,
struct fbnic_q_triad *qt)
{
struct device *dev = fbn->netdev->dev.parent;
int err;
- err = fbnic_alloc_rx_ring_resources(fbn, &qt->sub0);
+ err = fbnic_alloc_qt_page_pools(fbn, qt, qt->cmpl.q_idx);
if (err)
return err;
+ err = xdp_rxq_info_reg(&qt->xdp_rxq, fbn->netdev, qt->sub0.q_idx,
+ nv->napi.napi_id);
+ if (err)
+ goto free_page_pools;
+
+ err = xdp_rxq_info_reg_mem_model(&qt->xdp_rxq, MEM_TYPE_PAGE_POOL,
+ qt->sub0.page_pool);
+ if (err)
+ goto unreg_rxq;
+
+ err = fbnic_alloc_rx_ring_resources(fbn, &qt->sub0);
+ if (err)
+ goto unreg_mm;
+
err = fbnic_alloc_rx_ring_resources(fbn, &qt->sub1);
if (err)
goto free_sub0;
@@ -1727,19 +2037,21 @@ free_sub1:
fbnic_free_ring_resources(dev, &qt->sub1);
free_sub0:
fbnic_free_ring_resources(dev, &qt->sub0);
+unreg_mm:
+ xdp_rxq_info_unreg_mem_model(&qt->xdp_rxq);
+unreg_rxq:
+ xdp_rxq_info_unreg(&qt->xdp_rxq);
+free_page_pools:
+ fbnic_free_qt_page_pools(qt);
return err;
}
static void fbnic_free_nv_resources(struct fbnic_net *fbn,
struct fbnic_napi_vector *nv)
{
- int i, j;
-
- /* Free Tx Resources */
- for (i = 0; i < nv->txt_count; i++)
- fbnic_free_qt_resources(fbn, &nv->qt[i]);
+ int i;
- for (j = 0; j < nv->rxt_count; j++, i++)
+ for (i = 0; i < nv->txt_count + nv->rxt_count; i++)
fbnic_free_qt_resources(fbn, &nv->qt[i]);
}
@@ -1752,19 +2064,19 @@ static int fbnic_alloc_nv_resources(struct fbnic_net *fbn,
for (i = 0; i < nv->txt_count; i++) {
err = fbnic_alloc_tx_qt_resources(fbn, &nv->qt[i]);
if (err)
- goto free_resources;
+ goto free_qt_resources;
}
/* Allocate Rx Resources */
for (j = 0; j < nv->rxt_count; j++, i++) {
- err = fbnic_alloc_rx_qt_resources(fbn, &nv->qt[i]);
+ err = fbnic_alloc_rx_qt_resources(fbn, nv, &nv->qt[i]);
if (err)
- goto free_resources;
+ goto free_qt_resources;
}
return 0;
-free_resources:
+free_qt_resources:
while (i--)
fbnic_free_qt_resources(fbn, &nv->qt[i]);
return err;
@@ -1871,6 +2183,15 @@ static void fbnic_disable_twq0(struct fbnic_ring *txr)
fbnic_ring_wr32(txr, FBNIC_QUEUE_TWQ0_CTL, twq_ctl);
}
+static void fbnic_disable_twq1(struct fbnic_ring *txr)
+{
+ u32 twq_ctl = fbnic_ring_rd32(txr, FBNIC_QUEUE_TWQ1_CTL);
+
+ twq_ctl &= ~FBNIC_QUEUE_TWQ_CTL_ENABLE;
+
+ fbnic_ring_wr32(txr, FBNIC_QUEUE_TWQ1_CTL, twq_ctl);
+}
+
static void fbnic_disable_tcq(struct fbnic_ring *txr)
{
fbnic_ring_wr32(txr, FBNIC_QUEUE_TCQ_CTL, 0);
@@ -1897,36 +2218,48 @@ void fbnic_napi_disable(struct fbnic_net *fbn)
int i;
for (i = 0; i < fbn->num_napi; i++) {
- napi_disable(&fbn->napi[i]->napi);
+ napi_disable_locked(&fbn->napi[i]->napi);
fbnic_nv_irq_disable(fbn->napi[i]);
}
}
-void fbnic_disable(struct fbnic_net *fbn)
+static void __fbnic_nv_disable(struct fbnic_napi_vector *nv)
{
- struct fbnic_dev *fbd = fbn->fbd;
- int i, j, t;
-
- for (i = 0; i < fbn->num_napi; i++) {
- struct fbnic_napi_vector *nv = fbn->napi[i];
+ int i, t;
- /* Disable Tx queue triads */
- for (t = 0; t < nv->txt_count; t++) {
- struct fbnic_q_triad *qt = &nv->qt[t];
+ /* Disable Tx queue triads */
+ for (t = 0; t < nv->txt_count; t++) {
+ struct fbnic_q_triad *qt = &nv->qt[t];
- fbnic_disable_twq0(&qt->sub0);
- fbnic_disable_tcq(&qt->cmpl);
- }
+ fbnic_disable_twq0(&qt->sub0);
+ fbnic_disable_twq1(&qt->sub1);
+ fbnic_disable_tcq(&qt->cmpl);
+ }
- /* Disable Rx queue triads */
- for (j = 0; j < nv->rxt_count; j++, t++) {
- struct fbnic_q_triad *qt = &nv->qt[t];
+ /* Disable Rx queue triads */
+ for (i = 0; i < nv->rxt_count; i++, t++) {
+ struct fbnic_q_triad *qt = &nv->qt[t];
- fbnic_disable_bdq(&qt->sub0, &qt->sub1);
- fbnic_disable_rcq(&qt->cmpl);
- }
+ fbnic_disable_bdq(&qt->sub0, &qt->sub1);
+ fbnic_disable_rcq(&qt->cmpl);
}
+}
+
+static void
+fbnic_nv_disable(struct fbnic_net *fbn, struct fbnic_napi_vector *nv)
+{
+ __fbnic_nv_disable(nv);
+ fbnic_wrfl(fbn->fbd);
+}
+
+void fbnic_disable(struct fbnic_net *fbn)
+{
+ struct fbnic_dev *fbd = fbn->fbd;
+ int i;
+
+ for (i = 0; i < fbn->num_napi; i++)
+ __fbnic_nv_disable(fbn->napi[i]);
fbnic_wrfl(fbd);
}
@@ -2015,73 +2348,119 @@ int fbnic_wait_all_queues_idle(struct fbnic_dev *fbd, bool may_fail)
return err;
}
-void fbnic_flush(struct fbnic_net *fbn)
+static int
+fbnic_wait_queue_idle(struct fbnic_net *fbn, bool rx, unsigned int idx)
{
- int i;
+ static const unsigned int tx_regs[] = {
+ FBNIC_QM_TWQ_IDLE(0), FBNIC_QM_TQS_IDLE(0),
+ FBNIC_QM_TDE_IDLE(0), FBNIC_QM_TCQ_IDLE(0),
+ }, rx_regs[] = {
+ FBNIC_QM_HPQ_IDLE(0), FBNIC_QM_PPQ_IDLE(0),
+ FBNIC_QM_RCQ_IDLE(0),
+ };
+ struct fbnic_dev *fbd = fbn->fbd;
+ unsigned int val, mask, off;
+ const unsigned int *regs;
+ unsigned int reg_cnt;
+ int i, err;
- for (i = 0; i < fbn->num_napi; i++) {
- struct fbnic_napi_vector *nv = fbn->napi[i];
- int j, t;
+ regs = rx ? rx_regs : tx_regs;
+ reg_cnt = rx ? ARRAY_SIZE(rx_regs) : ARRAY_SIZE(tx_regs);
- /* Flush any processed Tx Queue Triads and drop the rest */
- for (t = 0; t < nv->txt_count; t++) {
- struct fbnic_q_triad *qt = &nv->qt[t];
- struct netdev_queue *tx_queue;
+ off = idx / 32;
+ mask = BIT(idx % 32);
- /* Clean the work queues of unprocessed work */
- fbnic_clean_twq0(nv, 0, &qt->sub0, true, qt->sub0.tail);
+ for (i = 0; i < reg_cnt; i++) {
+ err = read_poll_timeout_atomic(fbnic_rd32, val, val & mask,
+ 2, 500000, false,
+ fbd, regs[i] + off);
+ if (err) {
+ netdev_err(fbd->netdev,
+ "wait for queue %s%d idle failed 0x%04x(%d): %08x (mask: %08x)\n",
+ rx ? "Rx" : "Tx", idx, regs[i] + off, i,
+ val, mask);
+ return err;
+ }
+ }
- /* Reset completion queue descriptor ring */
- memset(qt->cmpl.desc, 0, qt->cmpl.size);
+ return 0;
+}
- /* Nothing else to do if Tx queue is disabled */
- if (qt->sub0.flags & FBNIC_RING_F_DISABLED)
- continue;
+static void fbnic_nv_flush(struct fbnic_napi_vector *nv)
+{
+ int j, t;
- /* Reset BQL associated with Tx queue */
- tx_queue = netdev_get_tx_queue(nv->napi.dev,
- qt->sub0.q_idx);
- netdev_tx_reset_queue(tx_queue);
- }
+ /* Flush any processed Tx Queue Triads and drop the rest */
+ for (t = 0; t < nv->txt_count; t++) {
+ struct fbnic_q_triad *qt = &nv->qt[t];
+ struct netdev_queue *tx_queue;
- /* Flush any processed Rx Queue Triads and drop the rest */
- for (j = 0; j < nv->rxt_count; j++, t++) {
- struct fbnic_q_triad *qt = &nv->qt[t];
+ /* Clean the work queues of unprocessed work */
+ fbnic_clean_twq0(nv, 0, &qt->sub0, true, qt->sub0.tail);
+ fbnic_clean_twq1(nv, false, &qt->sub1, true,
+ qt->sub1.tail);
- /* Clean the work queues of unprocessed work */
- fbnic_clean_bdq(nv, 0, &qt->sub0, qt->sub0.tail);
- fbnic_clean_bdq(nv, 0, &qt->sub1, qt->sub1.tail);
+ /* Reset completion queue descriptor ring */
+ memset(qt->cmpl.desc, 0, qt->cmpl.size);
- /* Reset completion queue descriptor ring */
- memset(qt->cmpl.desc, 0, qt->cmpl.size);
+ /* Nothing else to do if Tx queue is disabled */
+ if (qt->sub0.flags & FBNIC_RING_F_DISABLED)
+ continue;
- fbnic_put_pkt_buff(nv, qt->cmpl.pkt, 0);
- qt->cmpl.pkt->buff.data_hard_start = NULL;
- }
+ /* Reset BQL associated with Tx queue */
+ tx_queue = netdev_get_tx_queue(nv->napi.dev,
+ qt->sub0.q_idx);
+ netdev_tx_reset_queue(tx_queue);
+ }
+
+ /* Flush any processed Rx Queue Triads and drop the rest */
+ for (j = 0; j < nv->rxt_count; j++, t++) {
+ struct fbnic_q_triad *qt = &nv->qt[t];
+
+ /* Clean the work queues of unprocessed work */
+ fbnic_clean_bdq(&qt->sub0, qt->sub0.tail, 0);
+ fbnic_clean_bdq(&qt->sub1, qt->sub1.tail, 0);
+
+ /* Reset completion queue descriptor ring */
+ memset(qt->cmpl.desc, 0, qt->cmpl.size);
+
+ fbnic_put_pkt_buff(qt, qt->cmpl.pkt, 0);
+ memset(qt->cmpl.pkt, 0, sizeof(struct fbnic_pkt_buff));
}
}
-void fbnic_fill(struct fbnic_net *fbn)
+void fbnic_flush(struct fbnic_net *fbn)
{
int i;
- for (i = 0; i < fbn->num_napi; i++) {
- struct fbnic_napi_vector *nv = fbn->napi[i];
- int j, t;
+ for (i = 0; i < fbn->num_napi; i++)
+ fbnic_nv_flush(fbn->napi[i]);
+}
- /* Configure NAPI mapping and populate pages
- * in the BDQ rings to use for Rx
- */
- for (j = 0, t = nv->txt_count; j < nv->rxt_count; j++, t++) {
- struct fbnic_q_triad *qt = &nv->qt[t];
+static void fbnic_nv_fill(struct fbnic_napi_vector *nv)
+{
+ int j, t;
- /* Populate the header and payload BDQs */
- fbnic_fill_bdq(nv, &qt->sub0);
- fbnic_fill_bdq(nv, &qt->sub1);
- }
+ /* Configure NAPI mapping and populate pages
+ * in the BDQ rings to use for Rx
+ */
+ for (j = 0, t = nv->txt_count; j < nv->rxt_count; j++, t++) {
+ struct fbnic_q_triad *qt = &nv->qt[t];
+
+ /* Populate the header and payload BDQs */
+ fbnic_fill_bdq(&qt->sub0);
+ fbnic_fill_bdq(&qt->sub1);
}
}
+void fbnic_fill(struct fbnic_net *fbn)
+{
+ int i;
+
+ for (i = 0; i < fbn->num_napi; i++)
+ fbnic_nv_fill(fbn->napi[i]);
+}
+
static void fbnic_enable_twq0(struct fbnic_ring *twq)
{
u32 log_size = fls(twq->size_mask);
@@ -2104,6 +2483,28 @@ static void fbnic_enable_twq0(struct fbnic_ring *twq)
fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ0_CTL, FBNIC_QUEUE_TWQ_CTL_ENABLE);
}
+static void fbnic_enable_twq1(struct fbnic_ring *twq)
+{
+ u32 log_size = fls(twq->size_mask);
+
+ if (!twq->size_mask)
+ return;
+
+ /* Reset head/tail */
+ fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ1_CTL, FBNIC_QUEUE_TWQ_CTL_RESET);
+ twq->tail = 0;
+ twq->head = 0;
+
+ /* Store descriptor ring address and size */
+ fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ1_BAL, lower_32_bits(twq->dma));
+ fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ1_BAH, upper_32_bits(twq->dma));
+
+ /* Write lower 4 bits of log size as 64K ring size is 0 */
+ fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ1_SIZE, log_size & 0xf);
+
+ fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ1_CTL, FBNIC_QUEUE_TWQ_CTL_ENABLE);
+}
+
static void fbnic_enable_tcq(struct fbnic_napi_vector *nv,
struct fbnic_ring *tcq)
{
@@ -2232,13 +2633,22 @@ static void fbnic_enable_rcq(struct fbnic_napi_vector *nv,
{
struct fbnic_net *fbn = netdev_priv(nv->napi.dev);
u32 log_size = fls(rcq->size_mask);
- u32 rcq_ctl;
+ u32 hds_thresh = fbn->hds_thresh;
+ u32 rcq_ctl = 0;
fbnic_config_drop_mode_rcq(nv, rcq);
- rcq_ctl = FIELD_PREP(FBNIC_QUEUE_RDE_CTL1_PADLEN_MASK, FBNIC_RX_PAD) |
- FIELD_PREP(FBNIC_QUEUE_RDE_CTL1_MAX_HDR_MASK,
- FBNIC_RX_MAX_HDR) |
+ /* Force lower bound on MAX_HEADER_BYTES. Below this, all frames should
+ * be split at L4. It would also result in the frames being split at
+ * L2/L3 depending on the frame size.
+ */
+ if (fbn->hds_thresh < FBNIC_HDR_BYTES_MIN) {
+ rcq_ctl = FBNIC_QUEUE_RDE_CTL0_EN_HDR_SPLIT;
+ hds_thresh = FBNIC_HDR_BYTES_MIN;
+ }
+
+ rcq_ctl |= FIELD_PREP(FBNIC_QUEUE_RDE_CTL1_PADLEN_MASK, FBNIC_RX_PAD) |
+ FIELD_PREP(FBNIC_QUEUE_RDE_CTL1_MAX_HDR_MASK, hds_thresh) |
FIELD_PREP(FBNIC_QUEUE_RDE_CTL1_PAYLD_OFF_MASK,
FBNIC_RX_PAYLD_OFFSET) |
FIELD_PREP(FBNIC_QUEUE_RDE_CTL1_PAYLD_PG_CL_MASK,
@@ -2266,32 +2676,47 @@ static void fbnic_enable_rcq(struct fbnic_napi_vector *nv,
fbnic_ring_wr32(rcq, FBNIC_QUEUE_RCQ_CTL, FBNIC_QUEUE_RCQ_CTL_ENABLE);
}
-void fbnic_enable(struct fbnic_net *fbn)
+static void __fbnic_nv_enable(struct fbnic_napi_vector *nv)
{
- struct fbnic_dev *fbd = fbn->fbd;
- int i;
+ int j, t;
- for (i = 0; i < fbn->num_napi; i++) {
- struct fbnic_napi_vector *nv = fbn->napi[i];
- int j, t;
+ /* Setup Tx Queue Triads */
+ for (t = 0; t < nv->txt_count; t++) {
+ struct fbnic_q_triad *qt = &nv->qt[t];
- /* Setup Tx Queue Triads */
- for (t = 0; t < nv->txt_count; t++) {
- struct fbnic_q_triad *qt = &nv->qt[t];
+ fbnic_enable_twq0(&qt->sub0);
+ fbnic_enable_twq1(&qt->sub1);
+ fbnic_enable_tcq(nv, &qt->cmpl);
+ }
- fbnic_enable_twq0(&qt->sub0);
- fbnic_enable_tcq(nv, &qt->cmpl);
- }
+ /* Setup Rx Queue Triads */
+ for (j = 0; j < nv->rxt_count; j++, t++) {
+ struct fbnic_q_triad *qt = &nv->qt[t];
- /* Setup Rx Queue Triads */
- for (j = 0; j < nv->rxt_count; j++, t++) {
- struct fbnic_q_triad *qt = &nv->qt[t];
+ page_pool_enable_direct_recycling(qt->sub0.page_pool,
+ &nv->napi);
+ page_pool_enable_direct_recycling(qt->sub1.page_pool,
+ &nv->napi);
- fbnic_enable_bdq(&qt->sub0, &qt->sub1);
- fbnic_config_drop_mode_rcq(nv, &qt->cmpl);
- fbnic_enable_rcq(nv, &qt->cmpl);
- }
+ fbnic_enable_bdq(&qt->sub0, &qt->sub1);
+ fbnic_config_drop_mode_rcq(nv, &qt->cmpl);
+ fbnic_enable_rcq(nv, &qt->cmpl);
}
+}
+
+static void fbnic_nv_enable(struct fbnic_net *fbn, struct fbnic_napi_vector *nv)
+{
+ __fbnic_nv_enable(nv);
+ fbnic_wrfl(fbn->fbd);
+}
+
+void fbnic_enable(struct fbnic_net *fbn)
+{
+ struct fbnic_dev *fbd = fbn->fbd;
+ int i;
+
+ for (i = 0; i < fbn->num_napi; i++)
+ __fbnic_nv_enable(fbn->napi[i]);
fbnic_wrfl(fbd);
}
@@ -2310,7 +2735,7 @@ void fbnic_napi_enable(struct fbnic_net *fbn)
for (i = 0; i < fbn->num_napi; i++) {
struct fbnic_napi_vector *nv = fbn->napi[i];
- napi_enable(&nv->napi);
+ napi_enable_locked(&nv->napi);
fbnic_nv_irq_enable(nv);
@@ -2363,3 +2788,123 @@ void fbnic_napi_depletion_check(struct net_device *netdev)
fbnic_wrfl(fbd);
}
+
+static int fbnic_queue_mem_alloc(struct net_device *dev, void *qmem, int idx)
+{
+ struct fbnic_net *fbn = netdev_priv(dev);
+ const struct fbnic_q_triad *real;
+ struct fbnic_q_triad *qt = qmem;
+ struct fbnic_napi_vector *nv;
+
+ if (!netif_running(dev))
+ return fbnic_alloc_qt_page_pools(fbn, qt, idx);
+
+ real = container_of(fbn->rx[idx], struct fbnic_q_triad, cmpl);
+ nv = fbn->napi[idx % fbn->num_napi];
+
+ fbnic_ring_init(&qt->sub0, real->sub0.doorbell, real->sub0.q_idx,
+ real->sub0.flags);
+ fbnic_ring_init(&qt->sub1, real->sub1.doorbell, real->sub1.q_idx,
+ real->sub1.flags);
+ fbnic_ring_init(&qt->cmpl, real->cmpl.doorbell, real->cmpl.q_idx,
+ real->cmpl.flags);
+
+ return fbnic_alloc_rx_qt_resources(fbn, nv, qt);
+}
+
+static void fbnic_queue_mem_free(struct net_device *dev, void *qmem)
+{
+ struct fbnic_net *fbn = netdev_priv(dev);
+ struct fbnic_q_triad *qt = qmem;
+
+ if (!netif_running(dev))
+ fbnic_free_qt_page_pools(qt);
+ else
+ fbnic_free_qt_resources(fbn, qt);
+}
+
+static void __fbnic_nv_restart(struct fbnic_net *fbn,
+ struct fbnic_napi_vector *nv)
+{
+ struct fbnic_dev *fbd = fbn->fbd;
+ int i;
+
+ fbnic_nv_enable(fbn, nv);
+ fbnic_nv_fill(nv);
+
+ napi_enable_locked(&nv->napi);
+ fbnic_nv_irq_enable(nv);
+ fbnic_wr32(fbd, FBNIC_INTR_SET(nv->v_idx / 32), BIT(nv->v_idx % 32));
+ fbnic_wrfl(fbd);
+
+ for (i = 0; i < nv->txt_count; i++)
+ netif_wake_subqueue(fbn->netdev, nv->qt[i].sub0.q_idx);
+}
+
+static int fbnic_queue_start(struct net_device *dev, void *qmem, int idx)
+{
+ struct fbnic_net *fbn = netdev_priv(dev);
+ struct fbnic_napi_vector *nv;
+ struct fbnic_q_triad *real;
+
+ real = container_of(fbn->rx[idx], struct fbnic_q_triad, cmpl);
+ nv = fbn->napi[idx % fbn->num_napi];
+
+ fbnic_aggregate_ring_bdq_counters(fbn, &real->sub0);
+ fbnic_aggregate_ring_bdq_counters(fbn, &real->sub1);
+ fbnic_aggregate_ring_rx_counters(fbn, &real->cmpl);
+
+ memcpy(real, qmem, sizeof(*real));
+
+ __fbnic_nv_restart(fbn, nv);
+
+ return 0;
+}
+
+static int fbnic_queue_stop(struct net_device *dev, void *qmem, int idx)
+{
+ struct fbnic_net *fbn = netdev_priv(dev);
+ const struct fbnic_q_triad *real;
+ struct fbnic_napi_vector *nv;
+ int i, t;
+ int err;
+
+ real = container_of(fbn->rx[idx], struct fbnic_q_triad, cmpl);
+ nv = fbn->napi[idx % fbn->num_napi];
+
+ napi_disable_locked(&nv->napi);
+ fbnic_nv_irq_disable(nv);
+
+ for (i = 0; i < nv->txt_count; i++)
+ netif_stop_subqueue(dev, nv->qt[i].sub0.q_idx);
+ fbnic_nv_disable(fbn, nv);
+
+ for (t = 0; t < nv->txt_count + nv->rxt_count; t++) {
+ err = fbnic_wait_queue_idle(fbn, t >= nv->txt_count,
+ nv->qt[t].sub0.q_idx);
+ if (err)
+ goto err_restart;
+ }
+
+ fbnic_synchronize_irq(fbn->fbd, nv->v_idx);
+ fbnic_nv_flush(nv);
+
+ page_pool_disable_direct_recycling(real->sub0.page_pool);
+ page_pool_disable_direct_recycling(real->sub1.page_pool);
+
+ memcpy(qmem, real, sizeof(*real));
+
+ return 0;
+
+err_restart:
+ __fbnic_nv_restart(fbn, nv);
+ return err;
+}
+
+const struct netdev_queue_mgmt_ops fbnic_queue_mgmt_ops = {
+ .ndo_queue_mem_size = sizeof(struct fbnic_q_triad),
+ .ndo_queue_mem_alloc = fbnic_queue_mem_alloc,
+ .ndo_queue_mem_free = fbnic_queue_mem_free,
+ .ndo_queue_start = fbnic_queue_start,
+ .ndo_queue_stop = fbnic_queue_stop,
+};
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
index 2e361d6f03ff..ca37da5a0b17 100644
--- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
@@ -35,6 +35,7 @@ struct fbnic_net;
#define FBNIC_MAX_TXQS 128u
#define FBNIC_MAX_RXQS 128u
+#define FBNIC_MAX_XDPQS 128u
/* These apply to TWQs, TCQ, RCQ */
#define FBNIC_QUEUE_SIZE_MIN 16u
@@ -50,10 +51,10 @@ struct fbnic_net;
#define FBNIC_RX_TROOM \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
+#define FBNIC_RX_HROOM_PAD 128
#define FBNIC_RX_HROOM \
- (ALIGN(FBNIC_RX_TROOM + NET_SKB_PAD, 128) - FBNIC_RX_TROOM)
+ (ALIGN(FBNIC_RX_TROOM + FBNIC_RX_HROOM_PAD, 128) - FBNIC_RX_TROOM)
#define FBNIC_RX_PAD 0
-#define FBNIC_RX_MAX_HDR (1536 - FBNIC_RX_PAD)
#define FBNIC_RX_PAYLD_OFFSET 0
#define FBNIC_RX_PAYLD_PG_CL 0
@@ -61,12 +62,16 @@ struct fbnic_net;
#define FBNIC_RING_F_CTX BIT(1)
#define FBNIC_RING_F_STATS BIT(2) /* Ring's stats may be used */
+#define FBNIC_HDS_THRESH_MAX \
+ (4096 - FBNIC_RX_HROOM - FBNIC_RX_TROOM - FBNIC_RX_PAD)
+#define FBNIC_HDS_THRESH_DEFAULT \
+ (1536 - FBNIC_RX_PAD)
+#define FBNIC_HDR_BYTES_MIN 128
+
struct fbnic_pkt_buff {
struct xdp_buff buff;
ktime_t hwtstamp;
- u32 data_truesize;
- u16 data_len;
- u16 nr_frags;
+ bool add_frag_failed;
};
struct fbnic_queue_stats {
@@ -85,18 +90,20 @@ struct fbnic_queue_stats {
u64 alloc_failed;
u64 csum_complete;
u64 csum_none;
+ u64 length_errors;
} rx;
+ struct {
+ u64 alloc_failed;
+ } bdq;
};
u64 dropped;
struct u64_stats_sync syncp;
};
-/* Pagecnt bias is long max to reserve the last bit to catch overflow
- * cases where if we overcharge the bias it will flip over to be negative.
- */
-#define PAGECNT_BIAS_MAX LONG_MAX
+#define FBNIC_PAGECNT_BIAS_MAX PAGE_SIZE
+
struct fbnic_rx_buf {
- struct page *page;
+ netmem_ref netmem;
long pagecnt_bias;
};
@@ -117,6 +124,17 @@ struct fbnic_ring {
u32 head, tail; /* Head/Tail of ring */
+ union {
+ /* Rx BDQs only */
+ struct page_pool *page_pool;
+
+ /* Deferred_head is used to cache the head for TWQ1 if
+ * an attempt is made to clean TWQ1 with zero napi_budget.
+ * We do not use it for any other ring.
+ */
+ s32 deferred_head;
+ };
+
struct fbnic_queue_stats stats;
/* Slow path fields follow */
@@ -126,12 +144,12 @@ struct fbnic_ring {
struct fbnic_q_triad {
struct fbnic_ring sub0, sub1, cmpl;
+ struct xdp_rxq_info xdp_rxq;
};
struct fbnic_napi_vector {
struct napi_struct napi;
struct device *dev; /* Device for DMA unmapping */
- struct page_pool *page_pool;
struct fbnic_dev *fbd;
u16 v_idx;
@@ -141,6 +159,8 @@ struct fbnic_napi_vector {
struct fbnic_q_triad qt[];
};
+extern const struct netdev_queue_mgmt_ops fbnic_queue_mgmt_ops;
+
netdev_tx_t fbnic_xmit_frame(struct sk_buff *skb, struct net_device *dev);
netdev_features_t
fbnic_features_check(struct sk_buff *skb, struct net_device *dev,
@@ -148,8 +168,12 @@ fbnic_features_check(struct sk_buff *skb, struct net_device *dev,
void fbnic_aggregate_ring_rx_counters(struct fbnic_net *fbn,
struct fbnic_ring *rxr);
+void fbnic_aggregate_ring_bdq_counters(struct fbnic_net *fbn,
+ struct fbnic_ring *rxr);
void fbnic_aggregate_ring_tx_counters(struct fbnic_net *fbn,
struct fbnic_ring *txr);
+void fbnic_aggregate_ring_xdp_counters(struct fbnic_net *fbn,
+ struct fbnic_ring *xdpr);
int fbnic_alloc_napi_vectors(struct fbnic_net *fbn);
void fbnic_free_napi_vectors(struct fbnic_net *fbn);
diff --git a/drivers/net/ethernet/microchip/lan865x/lan865x.c b/drivers/net/ethernet/microchip/lan865x/lan865x.c
index dd436bdff0f8..0277d9737369 100644
--- a/drivers/net/ethernet/microchip/lan865x/lan865x.c
+++ b/drivers/net/ethernet/microchip/lan865x/lan865x.c
@@ -32,6 +32,10 @@
/* MAC Specific Addr 1 Top Reg */
#define LAN865X_REG_MAC_H_SADDR1 0x00010023
+/* MAC TSU Timer Increment Register */
+#define LAN865X_REG_MAC_TSU_TIMER_INCR 0x00010077
+#define MAC_TSU_TIMER_INCR_COUNT_NANOSECONDS 0x0028
+
struct lan865x_priv {
struct work_struct multicast_work;
struct net_device *netdev;
@@ -311,6 +315,8 @@ static int lan865x_net_open(struct net_device *netdev)
phy_start(netdev->phydev);
+ netif_start_queue(netdev);
+
return 0;
}
@@ -320,6 +326,8 @@ static const struct net_device_ops lan865x_netdev_ops = {
.ndo_start_xmit = lan865x_send_packet,
.ndo_set_rx_mode = lan865x_set_multicast_list,
.ndo_set_mac_address = lan865x_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
};
static int lan865x_probe(struct spi_device *spi)
@@ -344,6 +352,21 @@ static int lan865x_probe(struct spi_device *spi)
goto free_netdev;
}
+ /* LAN865x Rev.B0/B1 configuration parameters from AN1760
+ * As per the Configuration Application Note AN1760 published in the
+ * link, https://www.microchip.com/en-us/application-notes/an1760
+ * Revision F (DS60001760G - June 2024), configure the MAC to set time
+ * stamping at the end of the Start of Frame Delimiter (SFD) and set the
+ * Timer Increment reg to 40 ns to be used as a 25 MHz internal clock.
+ */
+ ret = oa_tc6_write_register(priv->tc6, LAN865X_REG_MAC_TSU_TIMER_INCR,
+ MAC_TSU_TIMER_INCR_COUNT_NANOSECONDS);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to config TSU Timer Incr reg: %d\n",
+ ret);
+ goto oa_tc6_exit;
+ }
+
/* As per the point s3 in the below errata, SPI receive Ethernet frame
* transfer may halt when starting the next frame in the same data block
* (chunk) as the end of a previous frame. The RFA field should be
@@ -402,13 +425,16 @@ static void lan865x_remove(struct spi_device *spi)
free_netdev(priv->netdev);
}
-static const struct spi_device_id spidev_spi_ids[] = {
+static const struct spi_device_id lan865x_ids[] = {
{ .name = "lan8650" },
+ { .name = "lan8651" },
{},
};
+MODULE_DEVICE_TABLE(spi, lan865x_ids);
static const struct of_device_id lan865x_dt_ids[] = {
{ .compatible = "microchip,lan8650" },
+ { .compatible = "microchip,lan8651" },
{ /* Sentinel */ }
};
MODULE_DEVICE_TABLE(of, lan865x_dt_ids);
@@ -420,7 +446,7 @@ static struct spi_driver lan865x_driver = {
},
.probe = lan865x_probe,
.remove = lan865x_remove,
- .id_table = spidev_spi_ids,
+ .id_table = lan865x_ids,
};
module_spi_driver(lan865x_driver);
diff --git a/drivers/net/ethernet/microchip/sparx5/Kconfig b/drivers/net/ethernet/microchip/sparx5/Kconfig
index 35e1c0cf345e..a4d6706590d2 100644
--- a/drivers/net/ethernet/microchip/sparx5/Kconfig
+++ b/drivers/net/ethernet/microchip/sparx5/Kconfig
@@ -3,7 +3,7 @@ config SPARX5_SWITCH
depends on NET_SWITCHDEV
depends on HAS_IOMEM
depends on OF
- depends on ARCH_SPARX5 || COMPILE_TEST
+ depends on ARCH_SPARX5 || ARCH_LAN969X || COMPILE_TEST
depends on PTP_1588_CLOCK_OPTIONAL
depends on BRIDGE || BRIDGE=n
select PHYLINK
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
index 832f4ae57c83..049541eeaae0 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
@@ -1212,6 +1212,22 @@ static int sparx5_get_ts_info(struct net_device *dev,
return 0;
}
+static void sparx5_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct sparx5_port *port = netdev_priv(dev);
+
+ phylink_ethtool_get_pauseparam(port->phylink, pause);
+}
+
+static int sparx5_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct sparx5_port *port = netdev_priv(dev);
+
+ return phylink_ethtool_set_pauseparam(port->phylink, pause);
+}
+
const struct ethtool_ops sparx5_ethtool_ops = {
.get_sset_count = sparx5_get_sset_count,
.get_strings = sparx5_get_sset_strings,
@@ -1224,6 +1240,8 @@ const struct ethtool_ops sparx5_ethtool_ops = {
.get_eth_ctrl_stats = sparx5_get_eth_mac_ctrl_stats,
.get_rmon_stats = sparx5_get_eth_rmon_stats,
.get_ts_info = sparx5_get_ts_info,
+ .get_pauseparam = sparx5_get_pauseparam,
+ .set_pauseparam = sparx5_set_pauseparam,
};
int sparx_stats_init(struct sparx5 *sparx5)
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
index 74ad1d73b465..40b1bfc600a7 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
@@ -708,6 +708,11 @@ static int sparx5_start(struct sparx5 *sparx5)
/* Init masks */
sparx5_update_fwd(sparx5);
+ /* Init flood masks */
+ for (int pgid = sparx5_get_pgid(sparx5, PGID_UC_FLOOD);
+ pgid <= sparx5_get_pgid(sparx5, PGID_BCAST); pgid++)
+ sparx5_pgid_clear(sparx5, pgid);
+
/* CPU copy CPU pgids */
spx5_wr(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(1), sparx5,
ANA_AC_PGID_MISC_CFG(sparx5_get_pgid(sparx5, PGID_CPU)));
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
index bc9ecb9392cd..0a71abbd3da5 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
@@ -176,6 +176,7 @@ static int sparx5_port_bridge_join(struct sparx5_port *port,
struct net_device *bridge,
struct netlink_ext_ack *extack)
{
+ struct switchdev_brport_flags flags = {0};
struct sparx5 *sparx5 = port->sparx5;
struct net_device *ndev = port->ndev;
int err;
@@ -205,6 +206,11 @@ static int sparx5_port_bridge_join(struct sparx5_port *port,
*/
__dev_mc_unsync(ndev, sparx5_mc_unsync);
+ /* Enable uc/mc/bc flooding */
+ flags.mask = BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
+ flags.val = flags.mask;
+ sparx5_port_attr_bridge_flags(port, flags);
+
return 0;
err_switchdev_offload:
@@ -215,6 +221,7 @@ err_switchdev_offload:
static void sparx5_port_bridge_leave(struct sparx5_port *port,
struct net_device *bridge)
{
+ struct switchdev_brport_flags flags = {0};
struct sparx5 *sparx5 = port->sparx5;
switchdev_bridge_port_unoffload(port->ndev, NULL, NULL, NULL);
@@ -234,6 +241,11 @@ static void sparx5_port_bridge_leave(struct sparx5_port *port,
/* Port enters in host more therefore restore mc list */
__dev_mc_sync(port->ndev, sparx5_mc_sync, sparx5_mc_unsync);
+
+ /* Disable uc/mc/bc flooding */
+ flags.mask = BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
+ flags.val = 0;
+ sparx5_port_attr_bridge_flags(port, flags);
}
static int sparx5_port_changeupper(struct net_device *dev,
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c b/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c
index d42097aa60a0..494782871903 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c
@@ -167,16 +167,6 @@ void sparx5_update_fwd(struct sparx5 *sparx5)
/* Divide up fwd mask in 32 bit words */
bitmap_to_arr32(mask, sparx5->bridge_fwd_mask, SPX5_PORTS);
- /* Update flood masks */
- for (port = sparx5_get_pgid(sparx5, PGID_UC_FLOOD);
- port <= sparx5_get_pgid(sparx5, PGID_BCAST); port++) {
- spx5_wr(mask[0], sparx5, ANA_AC_PGID_CFG(port));
- if (is_sparx5(sparx5)) {
- spx5_wr(mask[1], sparx5, ANA_AC_PGID_CFG1(port));
- spx5_wr(mask[2], sparx5, ANA_AC_PGID_CFG2(port));
- }
- }
-
/* Update SRC masks */
for (port = 0; port < sparx5->data->consts->n_ports; port++) {
if (test_bit(port, sparx5->bridge_fwd_mask)) {
diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c
index ef072e24c46d..ada6c78a2bef 100644
--- a/drivers/net/ethernet/microsoft/mana/hw_channel.c
+++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c
@@ -881,7 +881,12 @@ int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len,
if (!wait_for_completion_timeout(&ctx->comp_event,
(msecs_to_jiffies(hwc->hwc_timeout)))) {
if (hwc->hwc_timeout != 0)
- dev_err(hwc->dev, "HWC: Request timed out!\n");
+ dev_err(hwc->dev, "HWC: Request timed out: %u ms\n",
+ hwc->hwc_timeout);
+
+ /* Reduce further waiting if HWC no response */
+ if (hwc->hwc_timeout > 1)
+ hwc->hwc_timeout = 1;
err = -ETIMEDOUT;
goto out;
diff --git a/drivers/net/ethernet/microsoft/mana/mana_bpf.c b/drivers/net/ethernet/microsoft/mana/mana_bpf.c
index d30721d4516f..7697c9b52ed3 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_bpf.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_bpf.c
@@ -174,6 +174,7 @@ static int mana_xdp_set(struct net_device *ndev, struct bpf_prog *prog,
struct mana_port_context *apc = netdev_priv(ndev);
struct bpf_prog *old_prog;
struct gdma_context *gc;
+ int err;
gc = apc->ac->gdma_dev->gdma_context;
@@ -195,11 +196,45 @@ static int mana_xdp_set(struct net_device *ndev, struct bpf_prog *prog,
*/
apc->bpf_prog = prog;
- if (old_prog)
- bpf_prog_put(old_prog);
+ if (apc->port_is_up) {
+ /* Re-create rxq's after xdp prog was loaded or unloaded.
+ * Ex: re create rxq's to switch from full pages to smaller
+ * size page fragments when xdp prog is unloaded and
+ * vice-versa.
+ */
+
+ /* Pre-allocate buffers to prevent failure in mana_attach */
+ err = mana_pre_alloc_rxbufs(apc, ndev->mtu, apc->num_queues);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "XDP: Insufficient memory for tx/rx re-config");
+ return err;
+ }
+
+ err = mana_detach(ndev, false);
+ if (err) {
+ netdev_err(ndev,
+ "mana_detach failed at xdp set: %d\n", err);
+ NL_SET_ERR_MSG_MOD(extack,
+ "XDP: Re-config failed at detach");
+ goto err_dealloc_rxbuffs;
+ }
+
+ err = mana_attach(ndev);
+ if (err) {
+ netdev_err(ndev,
+ "mana_attach failed at xdp set: %d\n", err);
+ NL_SET_ERR_MSG_MOD(extack,
+ "XDP: Re-config failed at attach");
+ goto err_dealloc_rxbuffs;
+ }
- if (apc->port_is_up)
mana_chn_setxdp(apc, prog);
+ mana_pre_dealloc_rxbufs(apc);
+ }
+
+ if (old_prog)
+ bpf_prog_put(old_prog);
if (prog)
ndev->max_mtu = MANA_XDP_MTU_MAX;
@@ -207,6 +242,11 @@ static int mana_xdp_set(struct net_device *ndev, struct bpf_prog *prog,
ndev->max_mtu = gc->adapter_mtu - ETH_HLEN;
return 0;
+
+err_dealloc_rxbuffs:
+ apc->bpf_prog = old_prog;
+ mana_pre_dealloc_rxbufs(apc);
+ return err;
}
int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index 550843e2164b..0142fd98392c 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -57,6 +57,15 @@ static bool mana_en_need_log(struct mana_port_context *apc, int err)
return true;
}
+static void mana_put_rx_page(struct mana_rxq *rxq, struct page *page,
+ bool from_pool)
+{
+ if (from_pool)
+ page_pool_put_full_page(rxq->page_pool, page, false);
+ else
+ put_page(page);
+}
+
/* Microsoft Azure Network Adapter (MANA) functions */
static int mana_open(struct net_device *ndev)
@@ -630,21 +639,40 @@ static void *mana_get_rxbuf_pre(struct mana_rxq *rxq, dma_addr_t *da)
}
/* Get RX buffer's data size, alloc size, XDP headroom based on MTU */
-static void mana_get_rxbuf_cfg(int mtu, u32 *datasize, u32 *alloc_size,
- u32 *headroom)
+static void mana_get_rxbuf_cfg(struct mana_port_context *apc,
+ int mtu, u32 *datasize, u32 *alloc_size,
+ u32 *headroom, u32 *frag_count)
{
- if (mtu > MANA_XDP_MTU_MAX)
- *headroom = 0; /* no support for XDP */
- else
- *headroom = XDP_PACKET_HEADROOM;
+ u32 len, buf_size;
- *alloc_size = SKB_DATA_ALIGN(mtu + MANA_RXBUF_PAD + *headroom);
+ /* Calculate datasize first (consistent across all cases) */
+ *datasize = mtu + ETH_HLEN;
- /* Using page pool in this case, so alloc_size is PAGE_SIZE */
- if (*alloc_size < PAGE_SIZE)
- *alloc_size = PAGE_SIZE;
+ /* For xdp and jumbo frames make sure only one packet fits per page */
+ if (mtu + MANA_RXBUF_PAD > PAGE_SIZE / 2 || mana_xdp_get(apc)) {
+ if (mana_xdp_get(apc)) {
+ *headroom = XDP_PACKET_HEADROOM;
+ *alloc_size = PAGE_SIZE;
+ } else {
+ *headroom = 0; /* no support for XDP */
+ *alloc_size = SKB_DATA_ALIGN(mtu + MANA_RXBUF_PAD +
+ *headroom);
+ }
- *datasize = mtu + ETH_HLEN;
+ *frag_count = 1;
+ return;
+ }
+
+ /* Standard MTU case - optimize for multiple packets per page */
+ *headroom = 0;
+
+ /* Calculate base buffer size needed */
+ len = SKB_DATA_ALIGN(mtu + MANA_RXBUF_PAD + *headroom);
+ buf_size = ALIGN(len, MANA_RX_FRAG_ALIGNMENT);
+
+ /* Calculate how many packets can fit in a page */
+ *frag_count = PAGE_SIZE / buf_size;
+ *alloc_size = buf_size;
}
int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu, int num_queues)
@@ -656,8 +684,9 @@ int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu, int num_qu
void *va;
int i;
- mana_get_rxbuf_cfg(new_mtu, &mpc->rxbpre_datasize,
- &mpc->rxbpre_alloc_size, &mpc->rxbpre_headroom);
+ mana_get_rxbuf_cfg(mpc, new_mtu, &mpc->rxbpre_datasize,
+ &mpc->rxbpre_alloc_size, &mpc->rxbpre_headroom,
+ &mpc->rxbpre_frag_count);
dev = mpc->ac->gdma_dev->gdma_context->dev;
@@ -1842,8 +1871,11 @@ drop_xdp:
drop:
if (from_pool) {
- page_pool_recycle_direct(rxq->page_pool,
- virt_to_head_page(buf_va));
+ if (rxq->frag_count == 1)
+ page_pool_recycle_direct(rxq->page_pool,
+ virt_to_head_page(buf_va));
+ else
+ page_pool_free_va(rxq->page_pool, buf_va, true);
} else {
WARN_ON_ONCE(rxq->xdp_save_va);
/* Save for reuse */
@@ -1859,33 +1891,46 @@ static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
dma_addr_t *da, bool *from_pool)
{
struct page *page;
+ u32 offset;
void *va;
-
*from_pool = false;
- /* Reuse XDP dropped page if available */
- if (rxq->xdp_save_va) {
- va = rxq->xdp_save_va;
- rxq->xdp_save_va = NULL;
- } else {
- page = page_pool_dev_alloc_pages(rxq->page_pool);
- if (!page)
+ /* Don't use fragments for jumbo frames or XDP where it's 1 fragment
+ * per page.
+ */
+ if (rxq->frag_count == 1) {
+ /* Reuse XDP dropped page if available */
+ if (rxq->xdp_save_va) {
+ va = rxq->xdp_save_va;
+ page = virt_to_head_page(va);
+ rxq->xdp_save_va = NULL;
+ } else {
+ page = page_pool_dev_alloc_pages(rxq->page_pool);
+ if (!page)
+ return NULL;
+
+ *from_pool = true;
+ va = page_to_virt(page);
+ }
+
+ *da = dma_map_single(dev, va + rxq->headroom, rxq->datasize,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, *da)) {
+ mana_put_rx_page(rxq, page, *from_pool);
return NULL;
+ }
- *from_pool = true;
- va = page_to_virt(page);
+ return va;
}
- *da = dma_map_single(dev, va + rxq->headroom, rxq->datasize,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(dev, *da)) {
- if (*from_pool)
- page_pool_put_full_page(rxq->page_pool, page, false);
- else
- put_page(virt_to_head_page(va));
-
+ page = page_pool_dev_alloc_frag(rxq->page_pool, &offset,
+ rxq->alloc_size);
+ if (!page)
return NULL;
- }
+
+ va = page_to_virt(page) + offset;
+ *da = page_pool_get_dma_addr(page) + offset + rxq->headroom;
+ *from_pool = true;
return va;
}
@@ -1902,9 +1947,9 @@ static void mana_refill_rx_oob(struct device *dev, struct mana_rxq *rxq,
va = mana_get_rxfrag(rxq, dev, &da, &from_pool);
if (!va)
return;
-
- dma_unmap_single(dev, rxoob->sgl[0].address, rxq->datasize,
- DMA_FROM_DEVICE);
+ if (!rxoob->from_pool || rxq->frag_count == 1)
+ dma_unmap_single(dev, rxoob->sgl[0].address, rxq->datasize,
+ DMA_FROM_DEVICE);
*old_buf = rxoob->buf_va;
*old_fp = rxoob->from_pool;
@@ -2100,10 +2145,8 @@ static void mana_destroy_txq(struct mana_port_context *apc)
napi = &apc->tx_qp[i].tx_cq.napi;
if (apc->tx_qp[i].txq.napi_initialized) {
napi_synchronize(napi);
- netdev_lock_ops_to_full(napi->dev);
napi_disable_locked(napi);
netif_napi_del_locked(napi);
- netdev_unlock_full_to_ops(napi->dev);
apc->tx_qp[i].txq.napi_initialized = false;
}
mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object);
@@ -2256,10 +2299,8 @@ static int mana_create_txq(struct mana_port_context *apc,
mana_create_txq_debugfs(apc, i);
set_bit(NAPI_STATE_NO_BUSY_POLL, &cq->napi.state);
- netdev_lock_ops_to_full(net);
netif_napi_add_locked(net, &cq->napi, mana_poll);
napi_enable_locked(&cq->napi);
- netdev_unlock_full_to_ops(net);
txq->napi_initialized = true;
mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
@@ -2295,10 +2336,8 @@ static void mana_destroy_rxq(struct mana_port_context *apc,
if (napi_initialized) {
napi_synchronize(napi);
- netdev_lock_ops_to_full(napi->dev);
napi_disable_locked(napi);
netif_napi_del_locked(napi);
- netdev_unlock_full_to_ops(napi->dev);
}
xdp_rxq_info_unreg(&rxq->xdp_rxq);
@@ -2315,15 +2354,15 @@ static void mana_destroy_rxq(struct mana_port_context *apc,
if (!rx_oob->buf_va)
continue;
- dma_unmap_single(dev, rx_oob->sgl[0].address,
- rx_oob->sgl[0].size, DMA_FROM_DEVICE);
-
page = virt_to_head_page(rx_oob->buf_va);
- if (rx_oob->from_pool)
- page_pool_put_full_page(rxq->page_pool, page, false);
- else
- put_page(page);
+ if (rxq->frag_count == 1 || !rx_oob->from_pool) {
+ dma_unmap_single(dev, rx_oob->sgl[0].address,
+ rx_oob->sgl[0].size, DMA_FROM_DEVICE);
+ mana_put_rx_page(rxq, page, rx_oob->from_pool);
+ } else {
+ page_pool_free_va(rxq->page_pool, rx_oob->buf_va, true);
+ }
rx_oob->buf_va = NULL;
}
@@ -2429,11 +2468,22 @@ static int mana_create_page_pool(struct mana_rxq *rxq, struct gdma_context *gc)
struct page_pool_params pprm = {};
int ret;
- pprm.pool_size = mpc->rx_queue_size;
+ pprm.pool_size = mpc->rx_queue_size / rxq->frag_count + 1;
pprm.nid = gc->numa_node;
pprm.napi = &rxq->rx_cq.napi;
pprm.netdev = rxq->ndev;
pprm.order = get_order(rxq->alloc_size);
+ pprm.queue_idx = rxq->rxq_idx;
+ pprm.dev = gc->dev;
+
+ /* Let the page pool do the dma map when page sharing with multiple
+ * fragments enabled for rx buffers.
+ */
+ if (rxq->frag_count > 1) {
+ pprm.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
+ pprm.max_len = PAGE_SIZE;
+ pprm.dma_dir = DMA_FROM_DEVICE;
+ }
rxq->page_pool = page_pool_create(&pprm);
@@ -2472,9 +2522,8 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
rxq->rxq_idx = rxq_idx;
rxq->rxobj = INVALID_MANA_HANDLE;
- mana_get_rxbuf_cfg(ndev->mtu, &rxq->datasize, &rxq->alloc_size,
- &rxq->headroom);
-
+ mana_get_rxbuf_cfg(apc, ndev->mtu, &rxq->datasize, &rxq->alloc_size,
+ &rxq->headroom, &rxq->frag_count);
/* Create page pool for RX queue */
err = mana_create_page_pool(rxq, gc);
if (err) {
@@ -2549,18 +2598,14 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
gc->cq_table[cq->gdma_id] = cq->gdma_cq;
- netdev_lock_ops_to_full(ndev);
netif_napi_add_weight_locked(ndev, &cq->napi, mana_poll, 1);
- netdev_unlock_full_to_ops(ndev);
WARN_ON(xdp_rxq_info_reg(&rxq->xdp_rxq, ndev, rxq_idx,
cq->napi.napi_id));
WARN_ON(xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL,
rxq->page_pool));
- netdev_lock_ops_to_full(ndev);
napi_enable_locked(&cq->napi);
- netdev_unlock_full_to_ops(ndev);
mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
out:
diff --git a/drivers/net/ethernet/mscc/ocelot_stats.c b/drivers/net/ethernet/mscc/ocelot_stats.c
index 545710dadcf5..d2be1be37716 100644
--- a/drivers/net/ethernet/mscc/ocelot_stats.c
+++ b/drivers/net/ethernet/mscc/ocelot_stats.c
@@ -1021,6 +1021,6 @@ int ocelot_stats_init(struct ocelot *ocelot)
void ocelot_stats_deinit(struct ocelot *ocelot)
{
- cancel_delayed_work(&ocelot->stats_work);
+ disable_delayed_work_sync(&ocelot->stats_work);
destroy_workqueue(ocelot->stats_queue);
}
diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
index 56d5464222d9..cdbf82affa7b 100644
--- a/drivers/net/ethernet/natsemi/ns83820.c
+++ b/drivers/net/ethernet/natsemi/ns83820.c
@@ -820,7 +820,7 @@ static void rx_irq(struct net_device *ndev)
struct ns83820 *dev = PRIV(ndev);
struct rx_info *info = &dev->rx_info;
unsigned next_rx;
- int rx_rc, len;
+ int len;
u32 cmdsts;
__le32 *desc;
unsigned long flags;
@@ -881,8 +881,10 @@ static void rx_irq(struct net_device *ndev)
if (likely(CMDSTS_OK & cmdsts)) {
#endif
skb_put(skb, len);
- if (unlikely(!skb))
+ if (unlikely(!skb)) {
+ ndev->stats.rx_dropped++;
goto netdev_mangle_me_harder_failed;
+ }
if (cmdsts & CMDSTS_DEST_MULTI)
ndev->stats.multicast++;
ndev->stats.rx_packets++;
@@ -901,15 +903,12 @@ static void rx_irq(struct net_device *ndev)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_IPV6), tag);
}
#endif
- rx_rc = netif_rx(skb);
- if (NET_RX_DROP == rx_rc) {
-netdev_mangle_me_harder_failed:
- ndev->stats.rx_dropped++;
- }
+ netif_rx(skb);
} else {
dev_kfree_skb_irq(skb);
}
+netdev_mangle_me_harder_failed:
nr++;
next_rx = info->next_rx;
desc = info->descs + (DESC_SIZE * next_rx);
diff --git a/drivers/net/ethernet/netronome/nfp/crypto/tls.c b/drivers/net/ethernet/netronome/nfp/crypto/tls.c
index f80f1a6953fa..f252ecdcd2cd 100644
--- a/drivers/net/ethernet/netronome/nfp/crypto/tls.c
+++ b/drivers/net/ethernet/netronome/nfp/crypto/tls.c
@@ -495,14 +495,13 @@ int nfp_net_tls_rx_resync_req(struct net_device *netdev,
switch (ipv6h->version) {
case 4:
- sk = inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
- iph->saddr, th->source, iph->daddr,
- th->dest, netdev->ifindex);
+ sk = inet_lookup_established(net, iph->saddr, th->source,
+ iph->daddr, th->dest,
+ netdev->ifindex);
break;
#if IS_ENABLED(CONFIG_IPV6)
case 6:
- sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
- &ipv6h->saddr, th->source,
+ sk = __inet6_lookup_established(net, &ipv6h->saddr, th->source,
&ipv6h->daddr, ntohs(th->dest),
netdev->ifindex, 0);
break;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
index 80e4675582bf..dde60c4572fa 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
@@ -564,8 +564,8 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
/* Init ring buffer and unallocated stats_ids. */
priv->stats_ids.free_list.buf =
- vmalloc(array_size(NFP_FL_STATS_ELEM_RS,
- priv->stats_ring_size));
+ vmalloc_array(priv->stats_ring_size,
+ NFP_FL_STATS_ELEM_RS);
if (!priv->stats_ids.free_list.buf)
goto err_free_last_used;
diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/dp.c b/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
index 08086eb76996..91a227929a5f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
@@ -1169,14 +1169,10 @@ int nfp_nfd3_poll(struct napi_struct *napi, int budget)
if (r_vec->nfp_net->rx_coalesce_adapt_on && r_vec->rx_ring) {
struct dim_sample dim_sample = {};
- unsigned int start;
u64 pkts, bytes;
- do {
- start = u64_stats_fetch_begin(&r_vec->rx_sync);
- pkts = r_vec->rx_pkts;
- bytes = r_vec->rx_bytes;
- } while (u64_stats_fetch_retry(&r_vec->rx_sync, start));
+ pkts = r_vec->rx_pkts;
+ bytes = r_vec->rx_bytes;
dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample);
net_dim(&r_vec->rx_dim, &dim_sample);
@@ -1184,14 +1180,10 @@ int nfp_nfd3_poll(struct napi_struct *napi, int budget)
if (r_vec->nfp_net->tx_coalesce_adapt_on && r_vec->tx_ring) {
struct dim_sample dim_sample = {};
- unsigned int start;
u64 pkts, bytes;
- do {
- start = u64_stats_fetch_begin(&r_vec->tx_sync);
- pkts = r_vec->tx_pkts;
- bytes = r_vec->tx_bytes;
- } while (u64_stats_fetch_retry(&r_vec->tx_sync, start));
+ pkts = r_vec->tx_pkts;
+ bytes = r_vec->tx_bytes;
dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample);
net_dim(&r_vec->tx_dim, &dim_sample);
diff --git a/drivers/net/ethernet/netronome/nfp/nfdk/dp.c b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
index ab3cd06ed63e..ee0db3d5fd66 100644
--- a/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
@@ -1279,14 +1279,10 @@ int nfp_nfdk_poll(struct napi_struct *napi, int budget)
if (r_vec->nfp_net->rx_coalesce_adapt_on && r_vec->rx_ring) {
struct dim_sample dim_sample = {};
- unsigned int start;
u64 pkts, bytes;
- do {
- start = u64_stats_fetch_begin(&r_vec->rx_sync);
- pkts = r_vec->rx_pkts;
- bytes = r_vec->rx_bytes;
- } while (u64_stats_fetch_retry(&r_vec->rx_sync, start));
+ pkts = r_vec->rx_pkts;
+ bytes = r_vec->rx_bytes;
dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample);
net_dim(&r_vec->rx_dim, &dim_sample);
@@ -1294,14 +1290,10 @@ int nfp_nfdk_poll(struct napi_struct *napi, int budget)
if (r_vec->nfp_net->tx_coalesce_adapt_on && r_vec->tx_ring) {
struct dim_sample dim_sample = {};
- unsigned int start;
u64 pkts, bytes;
- do {
- start = u64_stats_fetch_begin(&r_vec->tx_sync);
- pkts = r_vec->tx_pkts;
- bytes = r_vec->tx_bytes;
- } while (u64_stats_fetch_retry(&r_vec->tx_sync, start));
+ pkts = r_vec->tx_pkts;
+ bytes = r_vec->tx_bytes;
dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample);
net_dim(&r_vec->tx_dim, &dim_sample);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
index 71301dbd8fb5..48390b2fd44d 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
@@ -797,7 +797,7 @@ static int nfp_pci_probe(struct pci_dev *pdev,
pf->pdev = pdev;
pf->dev_info = dev_info;
- pf->wq = alloc_workqueue("nfp-%s", 0, 2, pci_name(pdev));
+ pf->wq = alloc_workqueue("nfp-%s", WQ_PERCPU, 2, pci_name(pdev));
if (!pf->wq) {
err = -ENOMEM;
goto err_pci_priv_unset;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index a36215195923..16c828dd5c1a 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -1788,7 +1788,7 @@ static u32 nfp_net_get_rxfh_key_size(struct net_device *netdev)
struct nfp_net *nn = netdev_priv(netdev);
if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY))
- return -EOPNOTSUPP;
+ return 0;
return nfp_net_rss_key_sz(nn);
}
diff --git a/drivers/net/ethernet/oa_tc6.c b/drivers/net/ethernet/oa_tc6.c
index db200e4ec284..91a906a7918a 100644
--- a/drivers/net/ethernet/oa_tc6.c
+++ b/drivers/net/ethernet/oa_tc6.c
@@ -1249,7 +1249,8 @@ struct oa_tc6 *oa_tc6_init(struct spi_device *spi, struct net_device *netdev)
/* Set the SPI controller to pump at realtime priority */
tc6->spi->rt = true;
- spi_setup(tc6->spi);
+ if (spi_setup(tc6->spi) < 0)
+ return NULL;
tc6->spi_ctrl_tx_buf = devm_kzalloc(&tc6->spi->dev,
OA_TC6_CTRL_SPI_BUF_SIZE,
diff --git a/drivers/net/ethernet/pensando/Kconfig b/drivers/net/ethernet/pensando/Kconfig
index 01fe76786f77..c99758adf3ad 100644
--- a/drivers/net/ethernet/pensando/Kconfig
+++ b/drivers/net/ethernet/pensando/Kconfig
@@ -24,6 +24,7 @@ config IONIC
select NET_DEVLINK
select DIMLIB
select PAGE_POOL
+ select AUXILIARY_BUS
help
This enables the support for the Pensando family of Ethernet
adapters. More specific information on this driver can be
diff --git a/drivers/net/ethernet/pensando/ionic/Makefile b/drivers/net/ethernet/pensando/ionic/Makefile
index 4e7642a2d25f..a598972fef41 100644
--- a/drivers/net/ethernet/pensando/ionic/Makefile
+++ b/drivers/net/ethernet/pensando/ionic/Makefile
@@ -5,5 +5,5 @@ obj-$(CONFIG_IONIC) := ionic.o
ionic-y := ionic_main.o ionic_bus_pci.o ionic_devlink.o ionic_dev.o \
ionic_debugfs.o ionic_lif.o ionic_rx_filter.o ionic_ethtool.o \
- ionic_txrx.o ionic_stats.o ionic_fw.o
+ ionic_txrx.o ionic_stats.o ionic_fw.o ionic_aux.o
ionic-$(CONFIG_PTP_1588_CLOCK) += ionic_phc.o
diff --git a/drivers/net/ethernet/pensando/ionic/ionic.h b/drivers/net/ethernet/pensando/ionic/ionic.h
index 04f00ea94230..85198e6a806e 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic.h
@@ -65,16 +65,9 @@ struct ionic {
int watchdog_period;
};
-struct ionic_admin_ctx {
- struct completion work;
- union ionic_adminq_cmd cmd;
- union ionic_adminq_comp comp;
-};
-
int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx);
int ionic_adminq_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx,
const int err, const bool do_msg);
-int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx);
int ionic_adminq_post_wait_nomsg(struct ionic_lif *lif, struct ionic_admin_ctx *ctx);
void ionic_adminq_netdev_err_print(struct ionic_lif *lif, u8 opcode,
u8 status, int err);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_api.h b/drivers/net/ethernet/pensando/ionic/ionic_api.h
new file mode 100644
index 000000000000..bd88666836b8
--- /dev/null
+++ b/drivers/net/ethernet/pensando/ionic/ionic_api.h
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#ifndef _IONIC_API_H_
+#define _IONIC_API_H_
+
+#include <linux/auxiliary_bus.h>
+#include "ionic_if.h"
+#include "ionic_regs.h"
+
+/**
+ * struct ionic_aux_dev - Auxiliary device information
+ * @lif: Logical interface
+ * @idx: Index identifier
+ * @adev: Auxiliary device
+ */
+struct ionic_aux_dev {
+ struct ionic_lif *lif;
+ int idx;
+ struct auxiliary_device adev;
+};
+
+/**
+ * struct ionic_admin_ctx - Admin command context
+ * @work: Work completion wait queue element
+ * @cmd: Admin command (64B) to be copied to the queue
+ * @comp: Admin completion (16B) copied from the queue
+ */
+struct ionic_admin_ctx {
+ struct completion work;
+ union ionic_adminq_cmd cmd;
+ union ionic_adminq_comp comp;
+};
+
+#define IONIC_INTR_INDEX_NOT_ASSIGNED -1
+#define IONIC_INTR_NAME_MAX_SZ 32
+
+/**
+ * struct ionic_intr_info - Interrupt information
+ * @name: Name identifier
+ * @rearm_count: Interrupt rearm count
+ * @index: Interrupt index position
+ * @vector: Interrupt number
+ * @dim_coal_hw: Interrupt coalesce value in hardware units
+ * @affinity_mask: CPU affinity mask
+ * @aff_notify: context for notification of IRQ affinity changes
+ */
+struct ionic_intr_info {
+ char name[IONIC_INTR_NAME_MAX_SZ];
+ u64 rearm_count;
+ unsigned int index;
+ unsigned int vector;
+ u32 dim_coal_hw;
+ cpumask_var_t *affinity_mask;
+ struct irq_affinity_notify aff_notify;
+};
+
+/**
+ * ionic_adminq_post_wait - Post an admin command and wait for response
+ * @lif: Logical interface
+ * @ctx: API admin command context
+ *
+ * Post the command to an admin queue in the ethernet driver. If this command
+ * succeeds, then the command has been posted, but that does not indicate a
+ * completion. If this command returns success, then the completion callback
+ * will eventually be called.
+ *
+ * Return: zero or negative error status
+ */
+int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx);
+
+/**
+ * ionic_error_to_errno - Transform ionic_if errors to os errno
+ * @code: Ionic error number
+ *
+ * Return: Negative OS error number or zero
+ */
+int ionic_error_to_errno(enum ionic_status_code code);
+
+/**
+ * ionic_request_rdma_reset - request reset or disable the device or lif
+ * @lif: Logical interface
+ *
+ * The reset is triggered asynchronously. It will wait until reset request
+ * completes or times out.
+ */
+void ionic_request_rdma_reset(struct ionic_lif *lif);
+
+/**
+ * ionic_intr_alloc - Reserve a device interrupt
+ * @lif: Logical interface
+ * @intr: Reserved ionic interrupt structure
+ *
+ * Reserve an interrupt index and get irq number for that index.
+ *
+ * Return: zero or negative error status
+ */
+int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr);
+
+/**
+ * ionic_intr_free - Release a device interrupt index
+ * @lif: Logical interface
+ * @intr: Interrupt index
+ *
+ * Mark the interrupt index unused so that it can be reserved again.
+ */
+void ionic_intr_free(struct ionic_lif *lif, int intr);
+
+/**
+ * ionic_get_cmb - Reserve cmb pages
+ * @lif: Logical interface
+ * @pgid: First page index
+ * @pgaddr: First page bus addr (contiguous)
+ * @order: Log base two number of pages (PAGE_SIZE)
+ * @stride_log2: Size of stride to determine CMB pool
+ * @expdb: Will be set to true if this CMB region has expdb enabled
+ *
+ * Return: zero or negative error status
+ */
+int ionic_get_cmb(struct ionic_lif *lif, u32 *pgid, phys_addr_t *pgaddr,
+ int order, u8 stride_log2, bool *expdb);
+
+/**
+ * ionic_put_cmb - Release cmb pages
+ * @lif: Logical interface
+ * @pgid: First page index
+ * @order: Log base two number of pages (PAGE_SIZE)
+ */
+void ionic_put_cmb(struct ionic_lif *lif, u32 pgid, int order);
+
+#endif /* _IONIC_API_H_ */
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_aux.c b/drivers/net/ethernet/pensando/ionic/ionic_aux.c
new file mode 100644
index 000000000000..a2be338eb3e5
--- /dev/null
+++ b/drivers/net/ethernet/pensando/ionic/ionic_aux.c
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#include <linux/kernel.h>
+#include "ionic.h"
+#include "ionic_lif.h"
+#include "ionic_aux.h"
+
+static DEFINE_IDA(aux_ida);
+
+static void ionic_auxbus_release(struct device *dev)
+{
+ struct ionic_aux_dev *ionic_adev;
+
+ ionic_adev = container_of(dev, struct ionic_aux_dev, adev.dev);
+ ida_free(&aux_ida, ionic_adev->adev.id);
+ kfree(ionic_adev);
+}
+
+int ionic_auxbus_register(struct ionic_lif *lif)
+{
+ struct ionic_aux_dev *ionic_adev;
+ struct auxiliary_device *aux_dev;
+ int err, id;
+
+ if (!(le64_to_cpu(lif->ionic->ident.lif.capabilities) & IONIC_LIF_CAP_RDMA))
+ return 0;
+
+ ionic_adev = kzalloc(sizeof(*ionic_adev), GFP_KERNEL);
+ if (!ionic_adev)
+ return -ENOMEM;
+
+ aux_dev = &ionic_adev->adev;
+
+ id = ida_alloc(&aux_ida, GFP_KERNEL);
+ if (id < 0) {
+ dev_err(lif->ionic->dev, "Failed to allocate aux id: %d\n", id);
+ kfree(ionic_adev);
+ return id;
+ }
+
+ aux_dev->id = id;
+ aux_dev->name = "rdma";
+ aux_dev->dev.parent = &lif->ionic->pdev->dev;
+ aux_dev->dev.release = ionic_auxbus_release;
+ ionic_adev->lif = lif;
+ err = auxiliary_device_init(aux_dev);
+ if (err) {
+ dev_err(lif->ionic->dev, "Failed to initialize %s aux device: %d\n",
+ aux_dev->name, err);
+ ida_free(&aux_ida, id);
+ kfree(ionic_adev);
+ return err;
+ }
+
+ err = auxiliary_device_add(aux_dev);
+ if (err) {
+ dev_err(lif->ionic->dev, "Failed to add %s aux device: %d\n",
+ aux_dev->name, err);
+ auxiliary_device_uninit(aux_dev);
+ return err;
+ }
+
+ lif->ionic_adev = ionic_adev;
+ return 0;
+}
+
+void ionic_auxbus_unregister(struct ionic_lif *lif)
+{
+ mutex_lock(&lif->adev_lock);
+ if (!lif->ionic_adev)
+ goto out;
+
+ auxiliary_device_delete(&lif->ionic_adev->adev);
+ auxiliary_device_uninit(&lif->ionic_adev->adev);
+
+ lif->ionic_adev = NULL;
+out:
+ mutex_unlock(&lif->adev_lock);
+}
+
+void ionic_request_rdma_reset(struct ionic_lif *lif)
+{
+ struct ionic *ionic = lif->ionic;
+ int err;
+
+ union ionic_dev_cmd cmd = {
+ .cmd.opcode = IONIC_CMD_RDMA_RESET_LIF,
+ .cmd.lif_index = cpu_to_le16(lif->index),
+ };
+
+ mutex_lock(&ionic->dev_cmd_lock);
+
+ ionic_dev_cmd_go(&ionic->idev, &cmd);
+ err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
+
+ mutex_unlock(&ionic->dev_cmd_lock);
+
+ if (err)
+ pr_warn("%s request_reset: error %d\n", __func__, err);
+}
+EXPORT_SYMBOL_NS(ionic_request_rdma_reset, "NET_IONIC");
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_aux.h b/drivers/net/ethernet/pensando/ionic/ionic_aux.h
new file mode 100644
index 000000000000..f5528a9f187d
--- /dev/null
+++ b/drivers/net/ethernet/pensando/ionic/ionic_aux.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#ifndef _IONIC_AUX_H_
+#define _IONIC_AUX_H_
+
+int ionic_auxbus_register(struct ionic_lif *lif);
+void ionic_auxbus_unregister(struct ionic_lif *lif);
+
+#endif /* _IONIC_AUX_H_ */
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
index 136bfa3516d0..70d86c5f52fb 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
@@ -9,6 +9,7 @@
#include "ionic.h"
#include "ionic_bus.h"
#include "ionic_lif.h"
+#include "ionic_aux.h"
#include "ionic_debugfs.h"
/* Supported devices */
@@ -271,6 +272,8 @@ static int ionic_setup_one(struct ionic *ionic)
}
ionic_debugfs_add_ident(ionic);
+ ionic_map_cmb(ionic);
+
err = ionic_init(ionic);
if (err) {
dev_err(dev, "Cannot init device: %d, aborting\n", err);
@@ -375,6 +378,8 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_deregister_devlink;
}
+ ionic_auxbus_register(ionic->lif);
+
mod_timer(&ionic->watchdog_timer,
round_jiffies(jiffies + ionic->watchdog_period));
ionic_queue_doorbell_check(ionic, IONIC_NAPI_DEADLINE);
@@ -416,6 +421,7 @@ static void ionic_remove(struct pci_dev *pdev)
if (ionic->lif->doorbell_wa)
cancel_delayed_work_sync(&ionic->doorbell_check_dwork);
+ ionic_auxbus_unregister(ionic->lif);
ionic_lif_unregister(ionic->lif);
ionic_devlink_unregister(ionic);
ionic_lif_deinit(ionic->lif);
@@ -445,6 +451,7 @@ static void ionic_reset_prepare(struct pci_dev *pdev)
timer_delete_sync(&ionic->watchdog_timer);
cancel_work_sync(&lif->deferred.work);
+ ionic_auxbus_unregister(ionic->lif);
mutex_lock(&lif->queue_lock);
ionic_stop_queues_reconfig(lif);
ionic_txrx_free(lif);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
index 093c5358b6e8..ab27e9225c1e 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
@@ -199,13 +199,201 @@ void ionic_init_devinfo(struct ionic *ionic)
dev_dbg(ionic->dev, "fw_version %s\n", idev->dev_info.fw_version);
}
+static void ionic_map_disc_cmb(struct ionic *ionic)
+{
+ struct ionic_identity *ident = &ionic->ident;
+ u32 length_reg0, length, offset, num_regions;
+ struct ionic_dev_bar *bar = ionic->bars;
+ struct ionic_dev *idev = &ionic->idev;
+ struct device *dev = ionic->dev;
+ int err, sz, i;
+ u64 end;
+
+ mutex_lock(&ionic->dev_cmd_lock);
+
+ ionic_dev_cmd_discover_cmb(idev);
+ err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
+ if (!err) {
+ sz = min(sizeof(ident->cmb_layout),
+ sizeof(idev->dev_cmd_regs->data));
+ memcpy_fromio(&ident->cmb_layout,
+ &idev->dev_cmd_regs->data, sz);
+ }
+ mutex_unlock(&ionic->dev_cmd_lock);
+
+ if (err) {
+ dev_warn(dev, "Cannot discover CMB layout, disabling CMB\n");
+ return;
+ }
+
+ bar += 2;
+
+ num_regions = le32_to_cpu(ident->cmb_layout.num_regions);
+ if (!num_regions || num_regions > IONIC_MAX_CMB_REGIONS) {
+ dev_warn(dev, "Invalid number of CMB entries (%d)\n",
+ num_regions);
+ return;
+ }
+
+ dev_dbg(dev, "ionic_cmb_layout_identity num_regions %d flags %x:\n",
+ num_regions, ident->cmb_layout.flags);
+
+ for (i = 0; i < num_regions; i++) {
+ offset = le32_to_cpu(ident->cmb_layout.region[i].offset);
+ length = le32_to_cpu(ident->cmb_layout.region[i].length);
+ end = offset + length;
+
+ dev_dbg(dev, "CMB entry %d: bar_num %u cmb_type %u offset %x length %u\n",
+ i, ident->cmb_layout.region[i].bar_num,
+ ident->cmb_layout.region[i].cmb_type,
+ offset, length);
+
+ if (end > (bar->len >> IONIC_CMB_SHIFT_64K)) {
+ dev_warn(dev, "Out of bounds CMB region %d offset %x length %u\n",
+ i, offset, length);
+ return;
+ }
+ }
+
+ /* if first entry matches PCI config, expdb is not supported */
+ if (ident->cmb_layout.region[0].bar_num == bar->res_index &&
+ le32_to_cpu(ident->cmb_layout.region[0].length) == bar->len &&
+ !ident->cmb_layout.region[0].offset) {
+ dev_warn(dev, "No CMB mapping discovered\n");
+ return;
+ }
+
+ /* process first entry for regular mapping */
+ length_reg0 = le32_to_cpu(ident->cmb_layout.region[0].length);
+ if (!length_reg0) {
+ dev_warn(dev, "region len = 0. No CMB mapping discovered\n");
+ return;
+ }
+
+ /* Verify first entry size matches expected 8MB size (in 64KB pages) */
+ if (length_reg0 != IONIC_BAR2_CMB_ENTRY_SIZE >> IONIC_CMB_SHIFT_64K) {
+ dev_warn(dev, "Unexpected CMB size in entry 0: %u pages\n",
+ length_reg0);
+ return;
+ }
+
+ sz = BITS_TO_LONGS((length_reg0 << IONIC_CMB_SHIFT_64K) /
+ PAGE_SIZE) * sizeof(long);
+ idev->cmb_inuse = kzalloc(sz, GFP_KERNEL);
+ if (!idev->cmb_inuse) {
+ dev_warn(dev, "No memory for CMB, disabling\n");
+ idev->phy_cmb_pages = 0;
+ idev->phy_cmb_expdb64_pages = 0;
+ idev->phy_cmb_expdb128_pages = 0;
+ idev->phy_cmb_expdb256_pages = 0;
+ idev->phy_cmb_expdb512_pages = 0;
+ idev->cmb_npages = 0;
+ return;
+ }
+
+ for (i = 0; i < num_regions; i++) {
+ /* check this region matches first region length as to
+ * ease implementation
+ */
+ if (le32_to_cpu(ident->cmb_layout.region[i].length) !=
+ length_reg0)
+ continue;
+
+ offset = le32_to_cpu(ident->cmb_layout.region[i].offset);
+
+ switch (ident->cmb_layout.region[i].cmb_type) {
+ case IONIC_CMB_TYPE_DEVMEM:
+ idev->phy_cmb_pages = bar->bus_addr + offset;
+ idev->cmb_npages =
+ (length_reg0 << IONIC_CMB_SHIFT_64K) / PAGE_SIZE;
+ dev_dbg(dev, "regular cmb mapping: bar->bus_addr %pa region[%d].length %u\n",
+ &bar->bus_addr, i, length);
+ dev_dbg(dev, "idev->phy_cmb_pages %pad, idev->cmb_npages %u\n",
+ &idev->phy_cmb_pages, idev->cmb_npages);
+ break;
+
+ case IONIC_CMB_TYPE_EXPDB64:
+ idev->phy_cmb_expdb64_pages =
+ bar->bus_addr + (offset << IONIC_CMB_SHIFT_64K);
+ dev_dbg(dev, "idev->phy_cmb_expdb64_pages %pad\n",
+ &idev->phy_cmb_expdb64_pages);
+ break;
+
+ case IONIC_CMB_TYPE_EXPDB128:
+ idev->phy_cmb_expdb128_pages =
+ bar->bus_addr + (offset << IONIC_CMB_SHIFT_64K);
+ dev_dbg(dev, "idev->phy_cmb_expdb128_pages %pad\n",
+ &idev->phy_cmb_expdb128_pages);
+ break;
+
+ case IONIC_CMB_TYPE_EXPDB256:
+ idev->phy_cmb_expdb256_pages =
+ bar->bus_addr + (offset << IONIC_CMB_SHIFT_64K);
+ dev_dbg(dev, "idev->phy_cmb_expdb256_pages %pad\n",
+ &idev->phy_cmb_expdb256_pages);
+ break;
+
+ case IONIC_CMB_TYPE_EXPDB512:
+ idev->phy_cmb_expdb512_pages =
+ bar->bus_addr + (offset << IONIC_CMB_SHIFT_64K);
+ dev_dbg(dev, "idev->phy_cmb_expdb512_pages %pad\n",
+ &idev->phy_cmb_expdb512_pages);
+ break;
+
+ default:
+ dev_warn(dev, "[%d] Invalid cmb_type (%d)\n",
+ i, ident->cmb_layout.region[i].cmb_type);
+ break;
+ }
+ }
+}
+
+static void ionic_map_classic_cmb(struct ionic *ionic)
+{
+ struct ionic_dev_bar *bar = ionic->bars;
+ struct ionic_dev *idev = &ionic->idev;
+ struct device *dev = ionic->dev;
+ int sz;
+
+ bar += 2;
+ /* classic CMB mapping */
+ idev->phy_cmb_pages = bar->bus_addr;
+ idev->cmb_npages = bar->len / PAGE_SIZE;
+ dev_dbg(dev, "classic cmb mapping: bar->bus_addr %pa bar->len %lu\n",
+ &bar->bus_addr, bar->len);
+ dev_dbg(dev, "idev->phy_cmb_pages %pad, idev->cmb_npages %u\n",
+ &idev->phy_cmb_pages, idev->cmb_npages);
+
+ sz = BITS_TO_LONGS(idev->cmb_npages) * sizeof(long);
+ idev->cmb_inuse = kzalloc(sz, GFP_KERNEL);
+ if (!idev->cmb_inuse) {
+ idev->phy_cmb_pages = 0;
+ idev->cmb_npages = 0;
+ }
+}
+
+void ionic_map_cmb(struct ionic *ionic)
+{
+ struct pci_dev *pdev = ionic->pdev;
+ struct device *dev = ionic->dev;
+
+ if (!(pci_resource_flags(pdev, 4) & IORESOURCE_MEM)) {
+ dev_dbg(dev, "No CMB, disabling\n");
+ return;
+ }
+
+ if (ionic->ident.dev.capabilities & cpu_to_le64(IONIC_DEV_CAP_DISC_CMB))
+ ionic_map_disc_cmb(ionic);
+ else
+ ionic_map_classic_cmb(ionic);
+}
+
int ionic_dev_setup(struct ionic *ionic)
{
struct ionic_dev_bar *bar = ionic->bars;
unsigned int num_bars = ionic->num_bars;
struct ionic_dev *idev = &ionic->idev;
struct device *dev = ionic->dev;
- int size;
u32 sig;
int err;
@@ -255,16 +443,11 @@ int ionic_dev_setup(struct ionic *ionic)
mutex_init(&idev->cmb_inuse_lock);
if (num_bars < 3 || !ionic->bars[IONIC_PCI_BAR_CMB].len) {
idev->cmb_inuse = NULL;
+ idev->phy_cmb_pages = 0;
+ idev->cmb_npages = 0;
return 0;
}
- idev->phy_cmb_pages = bar->bus_addr;
- idev->cmb_npages = bar->len / PAGE_SIZE;
- size = BITS_TO_LONGS(idev->cmb_npages) * sizeof(long);
- idev->cmb_inuse = kzalloc(size, GFP_KERNEL);
- if (!idev->cmb_inuse)
- dev_warn(dev, "No memory for CMB, disabling\n");
-
return 0;
}
@@ -277,6 +460,11 @@ void ionic_dev_teardown(struct ionic *ionic)
idev->phy_cmb_pages = 0;
idev->cmb_npages = 0;
+ idev->phy_cmb_expdb64_pages = 0;
+ idev->phy_cmb_expdb128_pages = 0;
+ idev->phy_cmb_expdb256_pages = 0;
+ idev->phy_cmb_expdb512_pages = 0;
+
if (ionic->wq) {
destroy_workqueue(ionic->wq);
ionic->wq = NULL;
@@ -698,28 +886,79 @@ void ionic_dev_cmd_adminq_init(struct ionic_dev *idev, struct ionic_qcq *qcq,
ionic_dev_cmd_go(idev, &cmd);
}
+void ionic_dev_cmd_discover_cmb(struct ionic_dev *idev)
+{
+ union ionic_dev_cmd cmd = {
+ .discover_cmb.opcode = IONIC_CMD_DISCOVER_CMB,
+ };
+
+ ionic_dev_cmd_go(idev, &cmd);
+}
+
int ionic_db_page_num(struct ionic_lif *lif, int pid)
{
return (lif->hw_index * lif->dbid_count) + pid;
}
-int ionic_get_cmb(struct ionic_lif *lif, u32 *pgid, phys_addr_t *pgaddr, int order)
+int ionic_get_cmb(struct ionic_lif *lif, u32 *pgid, phys_addr_t *pgaddr,
+ int order, u8 stride_log2, bool *expdb)
{
struct ionic_dev *idev = &lif->ionic->idev;
- int ret;
+ void __iomem *nonexpdb_pgptr;
+ phys_addr_t nonexpdb_pgaddr;
+ int i, idx;
mutex_lock(&idev->cmb_inuse_lock);
- ret = bitmap_find_free_region(idev->cmb_inuse, idev->cmb_npages, order);
+ idx = bitmap_find_free_region(idev->cmb_inuse, idev->cmb_npages, order);
mutex_unlock(&idev->cmb_inuse_lock);
- if (ret < 0)
- return ret;
+ if (idx < 0)
+ return idx;
+
+ *pgid = (u32)idx;
+
+ if (idev->phy_cmb_expdb64_pages &&
+ stride_log2 == IONIC_EXPDB_64B_WQE_LG2) {
+ *pgaddr = idev->phy_cmb_expdb64_pages + idx * PAGE_SIZE;
+ if (expdb)
+ *expdb = true;
+ } else if (idev->phy_cmb_expdb128_pages &&
+ stride_log2 == IONIC_EXPDB_128B_WQE_LG2) {
+ *pgaddr = idev->phy_cmb_expdb128_pages + idx * PAGE_SIZE;
+ if (expdb)
+ *expdb = true;
+ } else if (idev->phy_cmb_expdb256_pages &&
+ stride_log2 == IONIC_EXPDB_256B_WQE_LG2) {
+ *pgaddr = idev->phy_cmb_expdb256_pages + idx * PAGE_SIZE;
+ if (expdb)
+ *expdb = true;
+ } else if (idev->phy_cmb_expdb512_pages &&
+ stride_log2 == IONIC_EXPDB_512B_WQE_LG2) {
+ *pgaddr = idev->phy_cmb_expdb512_pages + idx * PAGE_SIZE;
+ if (expdb)
+ *expdb = true;
+ } else {
+ *pgaddr = idev->phy_cmb_pages + idx * PAGE_SIZE;
+ if (expdb)
+ *expdb = false;
+ }
- *pgid = ret;
- *pgaddr = idev->phy_cmb_pages + ret * PAGE_SIZE;
+ /* clear the requested CMB region, 1 PAGE_SIZE ioremap at a time */
+ nonexpdb_pgaddr = idev->phy_cmb_pages + idx * PAGE_SIZE;
+ for (i = 0; i < (1 << order); i++) {
+ nonexpdb_pgptr =
+ ioremap_wc(nonexpdb_pgaddr + i * PAGE_SIZE, PAGE_SIZE);
+ if (!nonexpdb_pgptr) {
+ ionic_put_cmb(lif, *pgid, order);
+ return -ENOMEM;
+ }
+ memset_io(nonexpdb_pgptr, 0, PAGE_SIZE);
+ iounmap(nonexpdb_pgptr);
+ }
return 0;
}
+EXPORT_SYMBOL_NS(ionic_get_cmb, "NET_IONIC");
void ionic_put_cmb(struct ionic_lif *lif, u32 pgid, int order)
{
@@ -729,6 +968,7 @@ void ionic_put_cmb(struct ionic_lif *lif, u32 pgid, int order)
bitmap_release_region(idev->cmb_inuse, pgid, order);
mutex_unlock(&idev->cmb_inuse_lock);
}
+EXPORT_SYMBOL_NS(ionic_put_cmb, "NET_IONIC");
int ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq,
struct ionic_intr_info *intr,
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
index c8c710cfe70c..35566f97eaea 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
@@ -12,6 +12,7 @@
#include "ionic_if.h"
#include "ionic_regs.h"
+#include "ionic_api.h"
#define IONIC_MAX_TX_DESC 8192
#define IONIC_MAX_RX_DESC 16384
@@ -34,6 +35,11 @@
#define IONIC_RX_MIN_DOORBELL_DEADLINE (HZ / 100) /* 10ms */
#define IONIC_RX_MAX_DOORBELL_DEADLINE (HZ * 4) /* 4s */
+#define IONIC_EXPDB_64B_WQE_LG2 6
+#define IONIC_EXPDB_128B_WQE_LG2 7
+#define IONIC_EXPDB_256B_WQE_LG2 8
+#define IONIC_EXPDB_512B_WQE_LG2 9
+
struct ionic_dev_bar {
void __iomem *vaddr;
phys_addr_t bus_addr;
@@ -170,6 +176,11 @@ struct ionic_dev {
dma_addr_t phy_cmb_pages;
u32 cmb_npages;
+ dma_addr_t phy_cmb_expdb64_pages;
+ dma_addr_t phy_cmb_expdb128_pages;
+ dma_addr_t phy_cmb_expdb256_pages;
+ dma_addr_t phy_cmb_expdb512_pages;
+
u32 port_info_sz;
struct ionic_port_info *port_info;
dma_addr_t port_info_pa;
@@ -273,19 +284,6 @@ struct ionic_queue {
char name[IONIC_QUEUE_NAME_MAX_SZ];
} ____cacheline_aligned_in_smp;
-#define IONIC_INTR_INDEX_NOT_ASSIGNED -1
-#define IONIC_INTR_NAME_MAX_SZ 32
-
-struct ionic_intr_info {
- char name[IONIC_INTR_NAME_MAX_SZ];
- u64 rearm_count;
- unsigned int index;
- unsigned int vector;
- u32 dim_coal_hw;
- cpumask_var_t *affinity_mask;
- struct irq_affinity_notify aff_notify;
-};
-
struct ionic_cq {
struct ionic_lif *lif;
struct ionic_queue *bound_q;
@@ -363,8 +361,8 @@ void ionic_dev_cmd_adminq_init(struct ionic_dev *idev, struct ionic_qcq *qcq,
int ionic_db_page_num(struct ionic_lif *lif, int pid);
-int ionic_get_cmb(struct ionic_lif *lif, u32 *pgid, phys_addr_t *pgaddr, int order);
-void ionic_put_cmb(struct ionic_lif *lif, u32 pgid, int order);
+void ionic_dev_cmd_discover_cmb(struct ionic_dev *idev);
+void ionic_map_cmb(struct ionic *ionic);
int ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq,
struct ionic_intr_info *intr,
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index 92f30ff2d631..2d9efadb5d2a 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -978,7 +978,7 @@ static int ionic_get_module_eeprom_by_page(struct net_device *netdev,
{
struct ionic_lif *lif = netdev_priv(netdev);
struct ionic_dev *idev = &lif->ionic->idev;
- u32 err = -EINVAL;
+ int err;
u8 *src;
if (!page_data->length)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h
index 9886cd66ce68..47559c909c8b 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_if.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h
@@ -56,6 +56,9 @@ enum ionic_cmd_opcode {
IONIC_CMD_VF_SETATTR = 61,
IONIC_CMD_VF_CTRL = 62,
+ /* CMB command */
+ IONIC_CMD_DISCOVER_CMB = 80,
+
/* QoS commands */
IONIC_CMD_QOS_CLASS_IDENTIFY = 240,
IONIC_CMD_QOS_CLASS_INIT = 241,
@@ -269,9 +272,11 @@ union ionic_drv_identity {
/**
* enum ionic_dev_capability - Device capabilities
* @IONIC_DEV_CAP_VF_CTRL: Device supports VF ctrl operations
+ * @IONIC_DEV_CAP_DISC_CMB: Device supports CMB discovery operations
*/
enum ionic_dev_capability {
IONIC_DEV_CAP_VF_CTRL = BIT(0),
+ IONIC_DEV_CAP_DISC_CMB = BIT(1),
};
/**
@@ -395,6 +400,7 @@ enum ionic_logical_qtype {
* @IONIC_Q_F_4X_DESC: Quadruple main descriptor size
* @IONIC_Q_F_4X_CQ_DESC: Quadruple cq descriptor size
* @IONIC_Q_F_4X_SG_DESC: Quadruple sg descriptor size
+ * @IONIC_QIDENT_F_EXPDB: Queue supports express doorbell
*/
enum ionic_q_feature {
IONIC_QIDENT_F_CQ = BIT_ULL(0),
@@ -407,6 +413,7 @@ enum ionic_q_feature {
IONIC_Q_F_4X_DESC = BIT_ULL(7),
IONIC_Q_F_4X_CQ_DESC = BIT_ULL(8),
IONIC_Q_F_4X_SG_DESC = BIT_ULL(9),
+ IONIC_QIDENT_F_EXPDB = BIT_ULL(10),
};
/**
@@ -495,6 +502,16 @@ union ionic_lif_config {
};
/**
+ * enum ionic_lif_rdma_cap_stats - LIF stat type
+ * @IONIC_LIF_RDMA_STAT_GLOBAL: Global stats
+ * @IONIC_LIF_RDMA_STAT_QP: Queue pair stats
+ */
+enum ionic_lif_rdma_cap_stats {
+ IONIC_LIF_RDMA_STAT_GLOBAL = BIT(0),
+ IONIC_LIF_RDMA_STAT_QP = BIT(1),
+};
+
+/**
* struct ionic_lif_identity - LIF identity information (type-specific)
*
* @capabilities: LIF capabilities
@@ -513,10 +530,10 @@ union ionic_lif_config {
* @eth.config: LIF config struct with features, mtu, mac, q counts
*
* @rdma: RDMA identify structure
- * @rdma.version: RDMA version of opcodes and queue descriptors
+ * @rdma.version: RDMA capability version
* @rdma.qp_opcodes: Number of RDMA queue pair opcodes supported
* @rdma.admin_opcodes: Number of RDMA admin opcodes supported
- * @rdma.rsvd: reserved byte(s)
+ * @rdma.minor_version: RDMA capability minor version
* @rdma.npts_per_lif: Page table size per LIF
* @rdma.nmrs_per_lif: Number of memory regions per LIF
* @rdma.nahs_per_lif: Number of address handles per LIF
@@ -526,12 +543,17 @@ union ionic_lif_config {
* @rdma.rrq_stride: Remote RQ work request stride
* @rdma.rsq_stride: Remote SQ work request stride
* @rdma.dcqcn_profiles: Number of DCQCN profiles
- * @rdma.rsvd_dimensions: reserved byte(s)
+ * @rdma.udma_shift: Log2 number of queues per queue group
+ * @rdma.rsvd_dimensions: Reserved byte
+ * @rdma.page_size_cap: Supported page sizes
* @rdma.aq_qtype: RDMA Admin Qtype
* @rdma.sq_qtype: RDMA Send Qtype
* @rdma.rq_qtype: RDMA Receive Qtype
* @rdma.cq_qtype: RDMA Completion Qtype
* @rdma.eq_qtype: RDMA Event Qtype
+ * @rdma.stats_type: Supported statistics type
+ * (enum ionic_lif_rdma_cap_stats)
+ * @rdma.rsvd1: Reserved byte(s)
* @words: word access to struct contents
*/
union ionic_lif_identity {
@@ -557,7 +579,7 @@ union ionic_lif_identity {
u8 version;
u8 qp_opcodes;
u8 admin_opcodes;
- u8 rsvd;
+ u8 minor_version;
__le32 npts_per_lif;
__le32 nmrs_per_lif;
__le32 nahs_per_lif;
@@ -567,12 +589,16 @@ union ionic_lif_identity {
u8 rrq_stride;
u8 rsq_stride;
u8 dcqcn_profiles;
- u8 rsvd_dimensions[10];
+ u8 udma_shift;
+ u8 rsvd_dimensions;
+ __le64 page_size_cap;
struct ionic_lif_logical_qtype aq_qtype;
struct ionic_lif_logical_qtype sq_qtype;
struct ionic_lif_logical_qtype rq_qtype;
struct ionic_lif_logical_qtype cq_qtype;
struct ionic_lif_logical_qtype eq_qtype;
+ __le16 stats_type;
+ u8 rsvd1[162];
} __packed rdma;
} __packed;
__le32 words[478];
@@ -2195,6 +2221,80 @@ struct ionic_vf_ctrl_comp {
};
/**
+ * struct ionic_discover_cmb_cmd - CMB discovery command
+ * @opcode: Opcode for the command
+ * @rsvd: Reserved bytes
+ */
+struct ionic_discover_cmb_cmd {
+ u8 opcode;
+ u8 rsvd[63];
+};
+
+/**
+ * struct ionic_discover_cmb_comp - CMB discover command completion.
+ * @status: Status of the command (enum ionic_status_code)
+ * @rsvd: Reserved bytes
+ */
+struct ionic_discover_cmb_comp {
+ u8 status;
+ u8 rsvd[15];
+};
+
+#define IONIC_MAX_CMB_REGIONS 16
+#define IONIC_CMB_SHIFT_64K 16
+
+enum ionic_cmb_type {
+ IONIC_CMB_TYPE_DEVMEM = 0,
+ IONIC_CMB_TYPE_EXPDB64 = 1,
+ IONIC_CMB_TYPE_EXPDB128 = 2,
+ IONIC_CMB_TYPE_EXPDB256 = 3,
+ IONIC_CMB_TYPE_EXPDB512 = 4,
+};
+
+/**
+ * union ionic_cmb_region - Configuration for CMB region
+ * @bar_num: CMB mapping number from FW
+ * @cmb_type: Type of CMB this region describes (enum ionic_cmb_type)
+ * @rsvd: Reserved
+ * @offset: Offset within BAR in 64KB pages
+ * @length: Length of the CMB region
+ * @words: 32-bit words for direct access to the entire region
+ */
+union ionic_cmb_region {
+ struct {
+ u8 bar_num;
+ u8 cmb_type;
+ u8 rsvd[6];
+ __le32 offset;
+ __le32 length;
+ } __packed;
+ __le32 words[4];
+};
+
+/**
+ * union ionic_discover_cmb_identity - CMB layout identity structure
+ * @num_regions: Number of CMB regions, up to 16
+ * @flags: Feature and capability bits (0 for express
+ * doorbell, 1 for 4K alignment indicator,
+ * 31-24 for version information)
+ * @region: CMB mappings region, entry 0 for regular
+ * mapping, entries 1-7 for WQE sizes 64,
+ * 128, 256, 512, 1024, 2048 and 4096 bytes
+ * @words: Full union buffer size
+ */
+union ionic_discover_cmb_identity {
+ struct {
+ __le32 num_regions;
+#define IONIC_CMB_FLAG_EXPDB BIT(0)
+#define IONIC_CMB_FLAG_4KALIGN BIT(1)
+#define IONIC_CMB_FLAG_VERSION 0xff000000
+ __le32 flags;
+ union ionic_cmb_region region[IONIC_MAX_CMB_REGIONS];
+ };
+ __le32 words[478];
+};
+
+/**
* struct ionic_qos_identify_cmd - QoS identify command
* @opcode: opcode
* @ver: Highest version of identify supported by driver
@@ -3054,6 +3154,8 @@ union ionic_dev_cmd {
struct ionic_vf_getattr_cmd vf_getattr;
struct ionic_vf_ctrl_cmd vf_ctrl;
+ struct ionic_discover_cmb_cmd discover_cmb;
+
struct ionic_lif_identify_cmd lif_identify;
struct ionic_lif_init_cmd lif_init;
struct ionic_lif_reset_cmd lif_reset;
@@ -3093,6 +3195,8 @@ union ionic_dev_cmd_comp {
struct ionic_vf_getattr_comp vf_getattr;
struct ionic_vf_ctrl_comp vf_ctrl;
+ struct ionic_discover_cmb_comp discover_cmb;
+
struct ionic_lif_identify_comp lif_identify;
struct ionic_lif_init_comp lif_init;
ionic_lif_reset_comp lif_reset;
@@ -3234,6 +3338,9 @@ union ionic_adminq_comp {
#define IONIC_BAR0_DEV_CMD_DATA_REGS_OFFSET 0x0c00
#define IONIC_BAR0_INTR_STATUS_OFFSET 0x1000
#define IONIC_BAR0_INTR_CTRL_OFFSET 0x2000
+
+/* BAR2 */
+#define IONIC_BAR2_CMB_ENTRY_SIZE 0x800000
#define IONIC_DEV_CMD_DONE 0x00000001
#define IONIC_ASIC_TYPE_NONE 0
@@ -3287,6 +3394,7 @@ struct ionic_identity {
union ionic_port_identity port;
union ionic_qos_identity qos;
union ionic_q_identity txq;
+ union ionic_discover_cmb_identity cmb_layout;
};
#endif /* _IONIC_IF_H_ */
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 48cb5d30b5f6..b28966ae50c2 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -19,6 +19,7 @@
#include "ionic_bus.h"
#include "ionic_dev.h"
#include "ionic_lif.h"
+#include "ionic_aux.h"
#include "ionic_txrx.h"
#include "ionic_ethtool.h"
#include "ionic_debugfs.h"
@@ -243,29 +244,36 @@ static int ionic_request_irq(struct ionic_lif *lif, struct ionic_qcq *qcq)
0, intr->name, &qcq->napi);
}
-static int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr)
+int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr)
{
struct ionic *ionic = lif->ionic;
- int index;
+ int index, err;
index = find_first_zero_bit(ionic->intrs, ionic->nintrs);
- if (index == ionic->nintrs) {
- netdev_warn(lif->netdev, "%s: no intr, index=%d nintrs=%d\n",
- __func__, index, ionic->nintrs);
+ if (index == ionic->nintrs)
return -ENOSPC;
- }
set_bit(index, ionic->intrs);
ionic_intr_init(&ionic->idev, intr, index);
+ err = ionic_bus_get_irq(ionic, intr->index);
+ if (err < 0) {
+ clear_bit(index, ionic->intrs);
+ return err;
+ }
+
+ intr->vector = err;
+
return 0;
}
+EXPORT_SYMBOL_NS(ionic_intr_alloc, "NET_IONIC");
-static void ionic_intr_free(struct ionic *ionic, int index)
+void ionic_intr_free(struct ionic_lif *lif, int index)
{
- if (index != IONIC_INTR_INDEX_NOT_ASSIGNED && index < ionic->nintrs)
- clear_bit(index, ionic->intrs);
+ if (index != IONIC_INTR_INDEX_NOT_ASSIGNED && index < lif->ionic->nintrs)
+ clear_bit(index, lif->ionic->intrs);
}
+EXPORT_SYMBOL_NS(ionic_intr_free, "NET_IONIC");
static void ionic_irq_aff_notify(struct irq_affinity_notify *notify,
const cpumask_t *mask)
@@ -400,7 +408,7 @@ static void ionic_qcq_intr_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
irq_set_affinity_hint(qcq->intr.vector, NULL);
devm_free_irq(lif->ionic->dev, qcq->intr.vector, &qcq->napi);
qcq->intr.vector = 0;
- ionic_intr_free(lif->ionic, qcq->intr.index);
+ ionic_intr_free(lif, qcq->intr.index);
qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED;
}
@@ -510,13 +518,6 @@ static int ionic_alloc_qcq_interrupt(struct ionic_lif *lif, struct ionic_qcq *qc
goto err_out;
}
- err = ionic_bus_get_irq(lif->ionic, qcq->intr.index);
- if (err < 0) {
- netdev_warn(lif->netdev, "no vector for %s: %d\n",
- qcq->q.name, err);
- goto err_out_free_intr;
- }
- qcq->intr.vector = err;
ionic_intr_mask_assert(lif->ionic->idev.intr_ctrl, qcq->intr.index,
IONIC_INTR_MASK_SET);
@@ -545,7 +546,7 @@ static int ionic_alloc_qcq_interrupt(struct ionic_lif *lif, struct ionic_qcq *qc
return 0;
err_out_free_intr:
- ionic_intr_free(lif->ionic, qcq->intr.index);
+ ionic_intr_free(lif, qcq->intr.index);
err_out:
return err;
}
@@ -672,7 +673,7 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
new->cmb_order = order_base_2(new->cmb_q_size / PAGE_SIZE);
err = ionic_get_cmb(lif, &new->cmb_pgid, &new->cmb_q_base_pa,
- new->cmb_order);
+ new->cmb_order, 0, NULL);
if (err) {
netdev_err(lif->netdev,
"Cannot allocate queue order %d from cmb: err %d\n",
@@ -740,7 +741,7 @@ err_out_free_q:
err_out_free_irq:
if (flags & IONIC_QCQ_F_INTR) {
devm_free_irq(dev, new->intr.vector, &new->napi);
- ionic_intr_free(lif->ionic, new->intr.index);
+ ionic_intr_free(lif, new->intr.index);
}
err_out_free_page_pool:
page_pool_destroy(new->q.page_pool);
@@ -3293,6 +3294,7 @@ int ionic_lif_alloc(struct ionic *ionic)
mutex_init(&lif->queue_lock);
mutex_init(&lif->config_lock);
+ mutex_init(&lif->adev_lock);
spin_lock_init(&lif->adminq_lock);
@@ -3349,6 +3351,7 @@ err_out_free_lif_info:
lif->info = NULL;
lif->info_pa = 0;
err_out_free_mutex:
+ mutex_destroy(&lif->adev_lock);
mutex_destroy(&lif->config_lock);
mutex_destroy(&lif->queue_lock);
err_out_free_netdev:
@@ -3384,6 +3387,7 @@ static void ionic_lif_handle_fw_down(struct ionic_lif *lif)
netif_device_detach(lif->netdev);
+ ionic_auxbus_unregister(ionic->lif);
mutex_lock(&lif->queue_lock);
if (test_bit(IONIC_LIF_F_UP, lif->state)) {
dev_info(ionic->dev, "Surprise FW stop, stopping queues\n");
@@ -3446,6 +3450,8 @@ int ionic_restart_lif(struct ionic_lif *lif)
netif_device_attach(lif->netdev);
ionic_queue_doorbell_check(ionic, IONIC_NAPI_DEADLINE);
+ ionic_auxbus_register(ionic->lif);
+
return 0;
err_txrx_free:
@@ -3528,6 +3534,7 @@ void ionic_lif_free(struct ionic_lif *lif)
mutex_destroy(&lif->config_lock);
mutex_destroy(&lif->queue_lock);
+ mutex_destroy(&lif->adev_lock);
/* free netdev & lif */
ionic_debugfs_del_lif(lif);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
index e01756fb7fdd..43bdd0fb8733 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
@@ -10,6 +10,7 @@
#include <linux/dim.h>
#include <linux/pci.h>
#include "ionic_rx_filter.h"
+#include "ionic_api.h"
#define IONIC_ADMINQ_LENGTH 16 /* must be a power of two */
#define IONIC_NOTIFYQ_LENGTH 64 /* must be a power of two */
@@ -225,6 +226,8 @@ struct ionic_lif {
dma_addr_t info_pa;
u32 info_sz;
struct ionic_qtype_info qtype_info[IONIC_QTYPE_MAX];
+ struct ionic_aux_dev *ionic_adev;
+ struct mutex adev_lock; /* lock for aux_dev actions */
u8 rss_hash_key[IONIC_RSS_HASH_KEY_SIZE];
u8 *rss_ind_tbl;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index 0e60a6bef99a..14dc055be3e9 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -72,7 +72,7 @@ static const char *ionic_error_to_str(enum ionic_status_code code)
}
}
-static int ionic_error_to_errno(enum ionic_status_code code)
+int ionic_error_to_errno(enum ionic_status_code code)
{
switch (code) {
case IONIC_RC_SUCCESS:
@@ -114,6 +114,7 @@ static int ionic_error_to_errno(enum ionic_status_code code)
return -EIO;
}
}
+EXPORT_SYMBOL_NS(ionic_error_to_errno, "NET_IONIC");
static const char *ionic_opcode_to_str(enum ionic_cmd_opcode opcode)
{
@@ -480,6 +481,7 @@ int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
{
return __ionic_adminq_post_wait(lif, ctx, true);
}
+EXPORT_SYMBOL_NS(ionic_adminq_post_wait, "NET_IONIC");
int ionic_adminq_post_wait_nomsg(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
{
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index 9c3d3dd2f847..1f0cea3cae92 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -4462,10 +4462,11 @@ static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
goto out;
}
- /* Add override window info to buffer */
+ /* Add override window info to buffer, preventing buffer overflow */
override_window_dwords =
- qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
- PROTECTION_OVERRIDE_ELEMENT_DWORDS;
+ min(qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
+ PROTECTION_OVERRIDE_ELEMENT_DWORDS,
+ PROTECTION_OVERRIDE_DEPTH_DWORDS);
if (override_window_dwords) {
addr = BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW);
offset += qed_grc_dump_addr_range(p_hwfn,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_devlink.c b/drivers/net/ethernet/qlogic/qed/qed_devlink.c
index 1adc7fbb3f2f..94c5689b5abd 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_devlink.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_devlink.c
@@ -87,20 +87,21 @@ qed_fw_fatal_reporter_recover(struct devlink_health_reporter *reporter,
return 0;
}
+#define QED_REPORTER_FW_GRACEFUL_PERIOD 0
+
static const struct devlink_health_reporter_ops qed_fw_fatal_reporter_ops = {
.name = "fw_fatal",
.recover = qed_fw_fatal_reporter_recover,
.dump = qed_fw_fatal_reporter_dump,
+ .default_graceful_period = QED_REPORTER_FW_GRACEFUL_PERIOD,
};
-#define QED_REPORTER_FW_GRACEFUL_PERIOD 0
-
void qed_fw_reporters_create(struct devlink *devlink)
{
struct qed_devlink *dl = devlink_priv(devlink);
- dl->fw_reporter = devlink_health_reporter_create(devlink, &qed_fw_fatal_reporter_ops,
- QED_REPORTER_FW_GRACEFUL_PERIOD, dl);
+ dl->fw_reporter = devlink_health_reporter_create(devlink,
+ &qed_fw_fatal_reporter_ops, dl);
if (IS_ERR(dl->fw_reporter)) {
DP_NOTICE(dl->cdev, "Failed to create fw reporter, err = %ld\n",
PTR_ERR(dl->fw_reporter));
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 886061d7351a..d4685ad4b169 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -1214,7 +1214,8 @@ static int qed_slowpath_wq_start(struct qed_dev *cdev)
hwfn = &cdev->hwfns[i];
hwfn->slowpath_wq = alloc_workqueue("slowpath-%02x:%02x.%02x",
- 0, 0, cdev->pdev->bus->number,
+ WQ_PERCPU, 0,
+ cdev->pdev->bus->number,
PCI_SLOT(cdev->pdev->devfn),
hwfn->abs_pf_id);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.c b/drivers/net/ethernet/qlogic/qed/qed_ooo.c
index 5d725f59db24..8be567a6ad44 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ooo.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.c
@@ -183,9 +183,6 @@ void qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn,
struct qed_ooo_buffer,
list_entry);
- if (!p_buffer)
- break;
-
list_move_tail(&p_buffer->list_entry,
&p_ooo_info->free_buffers_list);
}
@@ -218,9 +215,6 @@ void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn,
struct qed_ooo_buffer,
list_entry);
- if (!p_buffer)
- break;
-
list_move_tail(&p_buffer->list_entry,
&p_ooo_info->free_buffers_list);
}
@@ -255,9 +249,6 @@ void qed_ooo_free(struct qed_hwfn *p_hwfn)
p_buffer = list_first_entry(&p_ooo_info->free_buffers_list,
struct qed_ooo_buffer, list_entry);
- if (!p_buffer)
- break;
-
list_del(&p_buffer->list_entry);
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
p_buffer->rx_buffer_size,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index d7cdea8f604d..91e7b38143ea 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -4215,7 +4215,6 @@ static pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *pdev)
struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
int err = 0;
- pdev->error_state = pci_channel_io_normal;
err = pci_enable_device(pdev);
if (err)
goto disconnect;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 53cdd36c4123..e051d8c7a28d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -3766,8 +3766,6 @@ static int qlcnic_attach_func(struct pci_dev *pdev)
struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
- pdev->error_state = pci_channel_io_normal;
-
err = pci_enable_device(pdev);
if (err)
return err;
diff --git a/drivers/net/ethernet/qualcomm/Kconfig b/drivers/net/ethernet/qualcomm/Kconfig
index a4434eb38950..ba7efb108637 100644
--- a/drivers/net/ethernet/qualcomm/Kconfig
+++ b/drivers/net/ethernet/qualcomm/Kconfig
@@ -60,6 +60,21 @@ config QCOM_EMAC
low power, Receive-Side Scaling (RSS), and IEEE 1588-2008
Precision Clock Synchronization Protocol.
+config QCOM_PPE
+ tristate "Qualcomm Technologies, Inc. PPE Ethernet support"
+ depends on COMMON_CLK && HAS_IOMEM && OF
+ depends on ARCH_QCOM || COMPILE_TEST
+ select REGMAP_MMIO
+ help
+ This driver supports the Qualcomm Technologies, Inc. packet
+ process engine (PPE) available with IPQ SoC. The PPE includes
+ the Ethernet MACs, Ethernet DMA (EDMA) and switch core that
+ supports L3 flow offload, L2 switch function, RSS and tunnel
+ offload.
+
+ To compile this driver as a module, choose M here. The module
+ will be called qcom-ppe.
+
source "drivers/net/ethernet/qualcomm/rmnet/Kconfig"
endif # NET_VENDOR_QUALCOMM
diff --git a/drivers/net/ethernet/qualcomm/Makefile b/drivers/net/ethernet/qualcomm/Makefile
index 9250976dd884..166a59aea363 100644
--- a/drivers/net/ethernet/qualcomm/Makefile
+++ b/drivers/net/ethernet/qualcomm/Makefile
@@ -11,4 +11,5 @@ qcauart-objs := qca_uart.o
obj-y += emac/
+obj-$(CONFIG_QCOM_PPE) += ppe/
obj-$(CONFIG_RMNET) += rmnet/
diff --git a/drivers/net/ethernet/qualcomm/ppe/Makefile b/drivers/net/ethernet/qualcomm/ppe/Makefile
new file mode 100644
index 000000000000..9e60b2400c16
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/ppe/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the device driver of PPE (Packet Process Engine) in IPQ SoC
+#
+
+obj-$(CONFIG_QCOM_PPE) += qcom-ppe.o
+qcom-ppe-objs := ppe.o ppe_config.o ppe_debugfs.o
diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe.c b/drivers/net/ethernet/qualcomm/ppe/ppe.c
new file mode 100644
index 000000000000..be747510d947
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/ppe/ppe.c
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+/* PPE platform device probe, DTSI parser and PPE clock initializations. */
+
+#include <linux/clk.h>
+#include <linux/interconnect.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+#include "ppe.h"
+#include "ppe_config.h"
+#include "ppe_debugfs.h"
+
+#define PPE_PORT_MAX 8
+#define PPE_CLK_RATE 353000000
+
+/* ICC clocks for enabling PPE device. The avg_bw and peak_bw with value 0
+ * will be updated by the clock rate of PPE.
+ */
+static const struct icc_bulk_data ppe_icc_data[] = {
+ {
+ .name = "ppe",
+ .avg_bw = 0,
+ .peak_bw = 0,
+ },
+ {
+ .name = "ppe_cfg",
+ .avg_bw = 0,
+ .peak_bw = 0,
+ },
+ {
+ .name = "qos_gen",
+ .avg_bw = 6000,
+ .peak_bw = 6000,
+ },
+ {
+ .name = "timeout_ref",
+ .avg_bw = 6000,
+ .peak_bw = 6000,
+ },
+ {
+ .name = "nssnoc_memnoc",
+ .avg_bw = 533333,
+ .peak_bw = 533333,
+ },
+ {
+ .name = "memnoc_nssnoc",
+ .avg_bw = 533333,
+ .peak_bw = 533333,
+ },
+ {
+ .name = "memnoc_nssnoc_1",
+ .avg_bw = 533333,
+ .peak_bw = 533333,
+ },
+};
+
+static const struct regmap_range ppe_readable_ranges[] = {
+ regmap_reg_range(0x0, 0x1ff), /* Global */
+ regmap_reg_range(0x400, 0x5ff), /* LPI CSR */
+ regmap_reg_range(0x1000, 0x11ff), /* GMAC0 */
+ regmap_reg_range(0x1200, 0x13ff), /* GMAC1 */
+ regmap_reg_range(0x1400, 0x15ff), /* GMAC2 */
+ regmap_reg_range(0x1600, 0x17ff), /* GMAC3 */
+ regmap_reg_range(0x1800, 0x19ff), /* GMAC4 */
+ regmap_reg_range(0x1a00, 0x1bff), /* GMAC5 */
+ regmap_reg_range(0xb000, 0xefff), /* PRX CSR */
+ regmap_reg_range(0xf000, 0x1efff), /* IPE */
+ regmap_reg_range(0x20000, 0x5ffff), /* PTX CSR */
+ regmap_reg_range(0x60000, 0x9ffff), /* IPE L2 CSR */
+ regmap_reg_range(0xb0000, 0xeffff), /* IPO CSR */
+ regmap_reg_range(0x100000, 0x17ffff), /* IPE PC */
+ regmap_reg_range(0x180000, 0x1bffff), /* PRE IPO CSR */
+ regmap_reg_range(0x1d0000, 0x1dffff), /* Tunnel parser */
+ regmap_reg_range(0x1e0000, 0x1effff), /* Ingress parse */
+ regmap_reg_range(0x200000, 0x2fffff), /* IPE L3 */
+ regmap_reg_range(0x300000, 0x3fffff), /* IPE tunnel */
+ regmap_reg_range(0x400000, 0x4fffff), /* Scheduler */
+ regmap_reg_range(0x500000, 0x503fff), /* XGMAC0 */
+ regmap_reg_range(0x504000, 0x507fff), /* XGMAC1 */
+ regmap_reg_range(0x508000, 0x50bfff), /* XGMAC2 */
+ regmap_reg_range(0x50c000, 0x50ffff), /* XGMAC3 */
+ regmap_reg_range(0x510000, 0x513fff), /* XGMAC4 */
+ regmap_reg_range(0x514000, 0x517fff), /* XGMAC5 */
+ regmap_reg_range(0x600000, 0x6fffff), /* BM */
+ regmap_reg_range(0x800000, 0x9fffff), /* QM */
+ regmap_reg_range(0xb00000, 0xbef800), /* EDMA */
+};
+
+static const struct regmap_access_table ppe_reg_table = {
+ .yes_ranges = ppe_readable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(ppe_readable_ranges),
+};
+
+static const struct regmap_config regmap_config_ipq9574 = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .rd_table = &ppe_reg_table,
+ .wr_table = &ppe_reg_table,
+ .max_register = 0xbef800,
+ .fast_io = true,
+};
+
+static int ppe_clock_init_and_reset(struct ppe_device *ppe_dev)
+{
+ unsigned long ppe_rate = ppe_dev->clk_rate;
+ struct device *dev = ppe_dev->dev;
+ struct reset_control *rstc;
+ struct clk_bulk_data *clks;
+ struct clk *clk;
+ int ret, i;
+
+ for (i = 0; i < ppe_dev->num_icc_paths; i++) {
+ ppe_dev->icc_paths[i].name = ppe_icc_data[i].name;
+ ppe_dev->icc_paths[i].avg_bw = ppe_icc_data[i].avg_bw ? :
+ Bps_to_icc(ppe_rate);
+
+ /* PPE does not have an explicit peak bandwidth requirement,
+ * so set the peak bandwidth to be equal to the average
+ * bandwidth.
+ */
+ ppe_dev->icc_paths[i].peak_bw = ppe_icc_data[i].peak_bw ? :
+ Bps_to_icc(ppe_rate);
+ }
+
+ ret = devm_of_icc_bulk_get(dev, ppe_dev->num_icc_paths,
+ ppe_dev->icc_paths);
+ if (ret)
+ return ret;
+
+ ret = icc_bulk_set_bw(ppe_dev->num_icc_paths, ppe_dev->icc_paths);
+ if (ret)
+ return ret;
+
+ /* The PPE clocks have a common parent clock. Setting the clock
+ * rate of "ppe" ensures the clock rate of all PPE clocks is
+ * configured to the same rate.
+ */
+ clk = devm_clk_get(dev, "ppe");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ ret = clk_set_rate(clk, ppe_rate);
+ if (ret)
+ return ret;
+
+ ret = devm_clk_bulk_get_all_enabled(dev, &clks);
+ if (ret < 0)
+ return ret;
+
+ /* Reset the PPE. */
+ rstc = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(rstc))
+ return PTR_ERR(rstc);
+
+ ret = reset_control_assert(rstc);
+ if (ret)
+ return ret;
+
+ /* The delay 10 ms of assert is necessary for resetting PPE. */
+ usleep_range(10000, 11000);
+
+ return reset_control_deassert(rstc);
+}
+
+static int qcom_ppe_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ppe_device *ppe_dev;
+ void __iomem *base;
+ int ret, num_icc;
+
+ num_icc = ARRAY_SIZE(ppe_icc_data);
+ ppe_dev = devm_kzalloc(dev, struct_size(ppe_dev, icc_paths, num_icc),
+ GFP_KERNEL);
+ if (!ppe_dev)
+ return -ENOMEM;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return dev_err_probe(dev, PTR_ERR(base), "PPE ioremap failed\n");
+
+ ppe_dev->regmap = devm_regmap_init_mmio(dev, base, &regmap_config_ipq9574);
+ if (IS_ERR(ppe_dev->regmap))
+ return dev_err_probe(dev, PTR_ERR(ppe_dev->regmap),
+ "PPE initialize regmap failed\n");
+ ppe_dev->dev = dev;
+ ppe_dev->clk_rate = PPE_CLK_RATE;
+ ppe_dev->num_ports = PPE_PORT_MAX;
+ ppe_dev->num_icc_paths = num_icc;
+
+ ret = ppe_clock_init_and_reset(ppe_dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "PPE clock config failed\n");
+
+ ret = ppe_hw_config(ppe_dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "PPE HW config failed\n");
+
+ ppe_debugfs_setup(ppe_dev);
+ platform_set_drvdata(pdev, ppe_dev);
+
+ return 0;
+}
+
+static void qcom_ppe_remove(struct platform_device *pdev)
+{
+ struct ppe_device *ppe_dev;
+
+ ppe_dev = platform_get_drvdata(pdev);
+ ppe_debugfs_teardown(ppe_dev);
+}
+
+static const struct of_device_id qcom_ppe_of_match[] = {
+ { .compatible = "qcom,ipq9574-ppe" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_ppe_of_match);
+
+static struct platform_driver qcom_ppe_driver = {
+ .driver = {
+ .name = "qcom_ppe",
+ .of_match_table = qcom_ppe_of_match,
+ },
+ .probe = qcom_ppe_probe,
+ .remove = qcom_ppe_remove,
+};
+module_platform_driver(qcom_ppe_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. IPQ PPE driver");
diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe.h b/drivers/net/ethernet/qualcomm/ppe/ppe.h
new file mode 100644
index 000000000000..27458f0bc206
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/ppe/ppe.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#ifndef __PPE_H__
+#define __PPE_H__
+
+#include <linux/compiler.h>
+#include <linux/interconnect.h>
+
+struct device;
+struct regmap;
+struct dentry;
+
+/**
+ * struct ppe_device - PPE device private data.
+ * @dev: PPE device structure.
+ * @regmap: PPE register map.
+ * @clk_rate: PPE clock rate.
+ * @num_ports: Number of PPE ports.
+ * @debugfs_root: Debugfs root entry.
+ * @num_icc_paths: Number of interconnect paths.
+ * @icc_paths: Interconnect path array.
+ *
+ * PPE device is the instance of PPE hardware, which is used to
+ * configure PPE packet process modules such as BM (buffer management),
+ * QM (queue management), and scheduler.
+ */
+struct ppe_device {
+ struct device *dev;
+ struct regmap *regmap;
+ unsigned long clk_rate;
+ unsigned int num_ports;
+ struct dentry *debugfs_root;
+ unsigned int num_icc_paths;
+ struct icc_bulk_data icc_paths[] __counted_by(num_icc_paths);
+};
+#endif
diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_config.c b/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
new file mode 100644
index 000000000000..e9a0e22907a6
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_config.c
@@ -0,0 +1,2034 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+/* PPE HW initialization configs such as BM(buffer management),
+ * QM(queue management) and scheduler configs.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitmap.h>
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/regmap.h>
+
+#include "ppe.h"
+#include "ppe_config.h"
+#include "ppe_regs.h"
+
+#define PPE_QUEUE_SCH_PRI_NUM 8
+
+/**
+ * struct ppe_bm_port_config - PPE BM port configuration.
+ * @port_id_start: The fist BM port ID to configure.
+ * @port_id_end: The last BM port ID to configure.
+ * @pre_alloc: BM port dedicated buffer number.
+ * @in_fly_buf: Buffer number for receiving the packet after pause frame sent.
+ * @ceil: Ceil to generate the back pressure.
+ * @weight: Weight value.
+ * @resume_offset: Resume offset from the threshold value.
+ * @resume_ceil: Ceil to resume from the back pressure state.
+ * @dynamic: Dynamic threshold used or not.
+ *
+ * The is for configuring the threshold that impacts the port
+ * flow control.
+ */
+struct ppe_bm_port_config {
+ unsigned int port_id_start;
+ unsigned int port_id_end;
+ unsigned int pre_alloc;
+ unsigned int in_fly_buf;
+ unsigned int ceil;
+ unsigned int weight;
+ unsigned int resume_offset;
+ unsigned int resume_ceil;
+ bool dynamic;
+};
+
+/**
+ * struct ppe_qm_queue_config - PPE queue config.
+ * @queue_start: PPE start of queue ID.
+ * @queue_end: PPE end of queue ID.
+ * @prealloc_buf: Queue dedicated buffer number.
+ * @ceil: Ceil to start drop packet from queue.
+ * @weight: Weight value.
+ * @resume_offset: Resume offset from the threshold.
+ * @dynamic: Threshold value is decided dynamically or statically.
+ *
+ * Queue configuration decides the threshold to drop packet from PPE
+ * hardware queue.
+ */
+struct ppe_qm_queue_config {
+ unsigned int queue_start;
+ unsigned int queue_end;
+ unsigned int prealloc_buf;
+ unsigned int ceil;
+ unsigned int weight;
+ unsigned int resume_offset;
+ bool dynamic;
+};
+
+/**
+ * enum ppe_scheduler_direction - PPE scheduler direction for packet.
+ * @PPE_SCH_INGRESS: Scheduler for the packet on ingress,
+ * @PPE_SCH_EGRESS: Scheduler for the packet on egress,
+ */
+enum ppe_scheduler_direction {
+ PPE_SCH_INGRESS = 0,
+ PPE_SCH_EGRESS = 1,
+};
+
+/**
+ * struct ppe_scheduler_bm_config - PPE arbitration for buffer config.
+ * @valid: Arbitration entry valid or not.
+ * @dir: Arbitration entry for egress or ingress.
+ * @port: Port ID to use arbitration entry.
+ * @backup_port_valid: Backup port valid or not.
+ * @backup_port: Backup port ID to use.
+ *
+ * Configure the scheduler settings for accessing and releasing the PPE buffers.
+ */
+struct ppe_scheduler_bm_config {
+ bool valid;
+ enum ppe_scheduler_direction dir;
+ unsigned int port;
+ bool backup_port_valid;
+ unsigned int backup_port;
+};
+
+/**
+ * struct ppe_scheduler_qm_config - PPE arbitration for scheduler config.
+ * @ensch_port_bmp: Port bit map for enqueue scheduler.
+ * @ensch_port: Port ID to enqueue scheduler.
+ * @desch_port: Port ID to dequeue scheduler.
+ * @desch_backup_port_valid: Dequeue for the backup port valid or not.
+ * @desch_backup_port: Backup port ID to dequeue scheduler.
+ *
+ * Configure the scheduler settings for enqueuing and dequeuing packets on
+ * the PPE port.
+ */
+struct ppe_scheduler_qm_config {
+ unsigned int ensch_port_bmp;
+ unsigned int ensch_port;
+ unsigned int desch_port;
+ bool desch_backup_port_valid;
+ unsigned int desch_backup_port;
+};
+
+/**
+ * struct ppe_scheduler_port_config - PPE port scheduler config.
+ * @port: Port ID to be scheduled.
+ * @flow_level: Scheduler flow level or not.
+ * @node_id: Node ID, for level 0, queue ID is used.
+ * @loop_num: Loop number of scheduler config.
+ * @pri_max: Max priority configured.
+ * @flow_id: Strict priority ID.
+ * @drr_node_id: Node ID for scheduler.
+ *
+ * PPE port scheduler configuration which decides the priority in the
+ * packet scheduler for the egress port.
+ */
+struct ppe_scheduler_port_config {
+ unsigned int port;
+ bool flow_level;
+ unsigned int node_id;
+ unsigned int loop_num;
+ unsigned int pri_max;
+ unsigned int flow_id;
+ unsigned int drr_node_id;
+};
+
+/**
+ * struct ppe_port_schedule_resource - PPE port scheduler resource.
+ * @ucastq_start: Unicast queue start ID.
+ * @ucastq_end: Unicast queue end ID.
+ * @mcastq_start: Multicast queue start ID.
+ * @mcastq_end: Multicast queue end ID.
+ * @flow_id_start: Flow start ID.
+ * @flow_id_end: Flow end ID.
+ * @l0node_start: Scheduler node start ID for queue level.
+ * @l0node_end: Scheduler node end ID for queue level.
+ * @l1node_start: Scheduler node start ID for flow level.
+ * @l1node_end: Scheduler node end ID for flow level.
+ *
+ * PPE scheduler resource allocated among the PPE ports.
+ */
+struct ppe_port_schedule_resource {
+ unsigned int ucastq_start;
+ unsigned int ucastq_end;
+ unsigned int mcastq_start;
+ unsigned int mcastq_end;
+ unsigned int flow_id_start;
+ unsigned int flow_id_end;
+ unsigned int l0node_start;
+ unsigned int l0node_end;
+ unsigned int l1node_start;
+ unsigned int l1node_end;
+};
+
+/* There are total 2048 buffers available in PPE, out of which some
+ * buffers are reserved for some specific purposes per PPE port. The
+ * rest of the pool of 1550 buffers are assigned to the general 'group0'
+ * which is shared among all ports of the PPE.
+ */
+static const int ipq9574_ppe_bm_group_config = 1550;
+
+/* The buffer configurations per PPE port. There are 15 BM ports and
+ * 4 BM groups supported by PPE. BM port (0-7) is for EDMA port 0,
+ * BM port (8-13) is for PPE physical port 1-6 and BM port 14 is for
+ * EIP port.
+ */
+static const struct ppe_bm_port_config ipq9574_ppe_bm_port_config[] = {
+ {
+ /* Buffer configuration for the BM port ID 0 of EDMA. */
+ .port_id_start = 0,
+ .port_id_end = 0,
+ .pre_alloc = 0,
+ .in_fly_buf = 100,
+ .ceil = 1146,
+ .weight = 7,
+ .resume_offset = 8,
+ .resume_ceil = 0,
+ .dynamic = true,
+ },
+ {
+ /* Buffer configuration for the BM port ID 1-7 of EDMA. */
+ .port_id_start = 1,
+ .port_id_end = 7,
+ .pre_alloc = 0,
+ .in_fly_buf = 100,
+ .ceil = 250,
+ .weight = 4,
+ .resume_offset = 36,
+ .resume_ceil = 0,
+ .dynamic = true,
+ },
+ {
+ /* Buffer configuration for the BM port ID 8-13 of PPE ports. */
+ .port_id_start = 8,
+ .port_id_end = 13,
+ .pre_alloc = 0,
+ .in_fly_buf = 128,
+ .ceil = 250,
+ .weight = 4,
+ .resume_offset = 36,
+ .resume_ceil = 0,
+ .dynamic = true,
+ },
+ {
+ /* Buffer configuration for the BM port ID 14 of EIP. */
+ .port_id_start = 14,
+ .port_id_end = 14,
+ .pre_alloc = 0,
+ .in_fly_buf = 40,
+ .ceil = 250,
+ .weight = 4,
+ .resume_offset = 36,
+ .resume_ceil = 0,
+ .dynamic = true,
+ },
+};
+
+/* QM fetches the packet from PPE buffer management for transmitting the
+ * packet out. The QM group configuration limits the total number of buffers
+ * enqueued by all PPE hardware queues.
+ * There are total 2048 buffers available, out of which some buffers are
+ * dedicated to hardware exception handlers. The remaining buffers are
+ * assigned to the general 'group0', which is the group assigned to all
+ * queues by default.
+ */
+static const int ipq9574_ppe_qm_group_config = 2000;
+
+/* Default QM settings for unicast and multicast queues for IPQ9754. */
+static const struct ppe_qm_queue_config ipq9574_ppe_qm_queue_config[] = {
+ {
+ /* QM settings for unicast queues 0 to 255. */
+ .queue_start = 0,
+ .queue_end = 255,
+ .prealloc_buf = 0,
+ .ceil = 1200,
+ .weight = 7,
+ .resume_offset = 36,
+ .dynamic = true,
+ },
+ {
+ /* QM settings for multicast queues 256 to 299. */
+ .queue_start = 256,
+ .queue_end = 299,
+ .prealloc_buf = 0,
+ .ceil = 250,
+ .weight = 0,
+ .resume_offset = 36,
+ .dynamic = false,
+ },
+};
+
+/* PPE scheduler configuration for BM includes multiple entries. Each entry
+ * indicates the primary port to be assigned the buffers for the ingress or
+ * to release the buffers for the egress. Backup port ID will be used when
+ * the primary port ID is down.
+ */
+static const struct ppe_scheduler_bm_config ipq9574_ppe_sch_bm_config[] = {
+ {true, PPE_SCH_INGRESS, 0, false, 0},
+ {true, PPE_SCH_EGRESS, 0, false, 0},
+ {true, PPE_SCH_INGRESS, 5, false, 0},
+ {true, PPE_SCH_EGRESS, 5, false, 0},
+ {true, PPE_SCH_INGRESS, 6, false, 0},
+ {true, PPE_SCH_EGRESS, 6, false, 0},
+ {true, PPE_SCH_INGRESS, 1, false, 0},
+ {true, PPE_SCH_EGRESS, 1, false, 0},
+ {true, PPE_SCH_INGRESS, 0, false, 0},
+ {true, PPE_SCH_EGRESS, 0, false, 0},
+ {true, PPE_SCH_INGRESS, 5, false, 0},
+ {true, PPE_SCH_EGRESS, 5, false, 0},
+ {true, PPE_SCH_INGRESS, 6, false, 0},
+ {true, PPE_SCH_EGRESS, 6, false, 0},
+ {true, PPE_SCH_INGRESS, 7, false, 0},
+ {true, PPE_SCH_EGRESS, 7, false, 0},
+ {true, PPE_SCH_INGRESS, 0, false, 0},
+ {true, PPE_SCH_EGRESS, 0, false, 0},
+ {true, PPE_SCH_INGRESS, 1, false, 0},
+ {true, PPE_SCH_EGRESS, 1, false, 0},
+ {true, PPE_SCH_INGRESS, 5, false, 0},
+ {true, PPE_SCH_EGRESS, 5, false, 0},
+ {true, PPE_SCH_INGRESS, 6, false, 0},
+ {true, PPE_SCH_EGRESS, 6, false, 0},
+ {true, PPE_SCH_INGRESS, 2, false, 0},
+ {true, PPE_SCH_EGRESS, 2, false, 0},
+ {true, PPE_SCH_INGRESS, 0, false, 0},
+ {true, PPE_SCH_EGRESS, 0, false, 0},
+ {true, PPE_SCH_INGRESS, 5, false, 0},
+ {true, PPE_SCH_EGRESS, 5, false, 0},
+ {true, PPE_SCH_INGRESS, 6, false, 0},
+ {true, PPE_SCH_EGRESS, 6, false, 0},
+ {true, PPE_SCH_INGRESS, 1, false, 0},
+ {true, PPE_SCH_EGRESS, 1, false, 0},
+ {true, PPE_SCH_INGRESS, 3, false, 0},
+ {true, PPE_SCH_EGRESS, 3, false, 0},
+ {true, PPE_SCH_INGRESS, 0, false, 0},
+ {true, PPE_SCH_EGRESS, 0, false, 0},
+ {true, PPE_SCH_INGRESS, 5, false, 0},
+ {true, PPE_SCH_EGRESS, 5, false, 0},
+ {true, PPE_SCH_INGRESS, 6, false, 0},
+ {true, PPE_SCH_EGRESS, 6, false, 0},
+ {true, PPE_SCH_INGRESS, 7, false, 0},
+ {true, PPE_SCH_EGRESS, 7, false, 0},
+ {true, PPE_SCH_INGRESS, 0, false, 0},
+ {true, PPE_SCH_EGRESS, 0, false, 0},
+ {true, PPE_SCH_INGRESS, 1, false, 0},
+ {true, PPE_SCH_EGRESS, 1, false, 0},
+ {true, PPE_SCH_INGRESS, 5, false, 0},
+ {true, PPE_SCH_EGRESS, 5, false, 0},
+ {true, PPE_SCH_INGRESS, 6, false, 0},
+ {true, PPE_SCH_EGRESS, 6, false, 0},
+ {true, PPE_SCH_INGRESS, 4, false, 0},
+ {true, PPE_SCH_EGRESS, 4, false, 0},
+ {true, PPE_SCH_INGRESS, 0, false, 0},
+ {true, PPE_SCH_EGRESS, 0, false, 0},
+ {true, PPE_SCH_INGRESS, 5, false, 0},
+ {true, PPE_SCH_EGRESS, 5, false, 0},
+ {true, PPE_SCH_INGRESS, 6, false, 0},
+ {true, PPE_SCH_EGRESS, 6, false, 0},
+ {true, PPE_SCH_INGRESS, 1, false, 0},
+ {true, PPE_SCH_EGRESS, 1, false, 0},
+ {true, PPE_SCH_INGRESS, 0, false, 0},
+ {true, PPE_SCH_EGRESS, 0, false, 0},
+ {true, PPE_SCH_INGRESS, 5, false, 0},
+ {true, PPE_SCH_EGRESS, 5, false, 0},
+ {true, PPE_SCH_INGRESS, 6, false, 0},
+ {true, PPE_SCH_EGRESS, 6, false, 0},
+ {true, PPE_SCH_INGRESS, 2, false, 0},
+ {true, PPE_SCH_EGRESS, 2, false, 0},
+ {true, PPE_SCH_INGRESS, 0, false, 0},
+ {true, PPE_SCH_EGRESS, 0, false, 0},
+ {true, PPE_SCH_INGRESS, 7, false, 0},
+ {true, PPE_SCH_EGRESS, 7, false, 0},
+ {true, PPE_SCH_INGRESS, 5, false, 0},
+ {true, PPE_SCH_EGRESS, 5, false, 0},
+ {true, PPE_SCH_INGRESS, 6, false, 0},
+ {true, PPE_SCH_EGRESS, 6, false, 0},
+ {true, PPE_SCH_INGRESS, 1, false, 0},
+ {true, PPE_SCH_EGRESS, 1, false, 0},
+ {true, PPE_SCH_INGRESS, 0, false, 0},
+ {true, PPE_SCH_EGRESS, 0, false, 0},
+ {true, PPE_SCH_INGRESS, 5, false, 0},
+ {true, PPE_SCH_EGRESS, 5, false, 0},
+ {true, PPE_SCH_INGRESS, 6, false, 0},
+ {true, PPE_SCH_EGRESS, 6, false, 0},
+ {true, PPE_SCH_INGRESS, 3, false, 0},
+ {true, PPE_SCH_EGRESS, 3, false, 0},
+ {true, PPE_SCH_INGRESS, 1, false, 0},
+ {true, PPE_SCH_EGRESS, 1, false, 0},
+ {true, PPE_SCH_INGRESS, 0, false, 0},
+ {true, PPE_SCH_EGRESS, 0, false, 0},
+ {true, PPE_SCH_INGRESS, 5, false, 0},
+ {true, PPE_SCH_EGRESS, 5, false, 0},
+ {true, PPE_SCH_INGRESS, 6, false, 0},
+ {true, PPE_SCH_EGRESS, 6, false, 0},
+ {true, PPE_SCH_INGRESS, 4, false, 0},
+ {true, PPE_SCH_EGRESS, 4, false, 0},
+ {true, PPE_SCH_INGRESS, 7, false, 0},
+ {true, PPE_SCH_EGRESS, 7, false, 0},
+};
+
+/* PPE scheduler configuration for QM includes multiple entries. Each entry
+ * contains ports to be dispatched for enqueueing and dequeueing. The backup
+ * port for dequeueing is supported to be used when the primary port for
+ * dequeueing is down.
+ */
+static const struct ppe_scheduler_qm_config ipq9574_ppe_sch_qm_config[] = {
+ {0x98, 6, 0, true, 1},
+ {0x94, 5, 6, true, 3},
+ {0x86, 0, 5, true, 4},
+ {0x8C, 1, 6, true, 0},
+ {0x1C, 7, 5, true, 1},
+ {0x98, 2, 6, true, 0},
+ {0x1C, 5, 7, true, 1},
+ {0x34, 3, 6, true, 0},
+ {0x8C, 4, 5, true, 1},
+ {0x98, 2, 6, true, 0},
+ {0x8C, 5, 4, true, 1},
+ {0xA8, 0, 6, true, 2},
+ {0x98, 5, 1, true, 0},
+ {0x98, 6, 5, true, 2},
+ {0x89, 1, 6, true, 4},
+ {0xA4, 3, 0, true, 1},
+ {0x8C, 5, 6, true, 4},
+ {0xA8, 0, 2, true, 1},
+ {0x98, 6, 5, true, 0},
+ {0xC4, 4, 3, true, 1},
+ {0x94, 6, 5, true, 0},
+ {0x1C, 7, 6, true, 1},
+ {0x98, 2, 5, true, 0},
+ {0x1C, 6, 7, true, 1},
+ {0x1C, 5, 6, true, 0},
+ {0x94, 3, 5, true, 1},
+ {0x8C, 4, 6, true, 0},
+ {0x94, 1, 5, true, 3},
+ {0x94, 6, 1, true, 0},
+ {0xD0, 3, 5, true, 2},
+ {0x98, 6, 0, true, 1},
+ {0x94, 5, 6, true, 3},
+ {0x94, 1, 5, true, 0},
+ {0x98, 2, 6, true, 1},
+ {0x8C, 4, 5, true, 0},
+ {0x1C, 7, 6, true, 1},
+ {0x8C, 0, 5, true, 4},
+ {0x89, 1, 6, true, 2},
+ {0x98, 5, 0, true, 1},
+ {0x94, 6, 5, true, 3},
+ {0x92, 0, 6, true, 2},
+ {0x98, 1, 5, true, 0},
+ {0x98, 6, 2, true, 1},
+ {0xD0, 0, 5, true, 3},
+ {0x94, 6, 0, true, 1},
+ {0x8C, 5, 6, true, 4},
+ {0x8C, 1, 5, true, 0},
+ {0x1C, 6, 7, true, 1},
+ {0x1C, 5, 6, true, 0},
+ {0xB0, 2, 3, true, 1},
+ {0xC4, 4, 5, true, 0},
+ {0x8C, 6, 4, true, 1},
+ {0xA4, 3, 6, true, 0},
+ {0x1C, 5, 7, true, 1},
+ {0x4C, 0, 5, true, 4},
+ {0x8C, 6, 0, true, 1},
+ {0x34, 7, 6, true, 3},
+ {0x94, 5, 0, true, 1},
+ {0x98, 6, 5, true, 2},
+};
+
+static const struct ppe_scheduler_port_config ppe_port_sch_config[] = {
+ {
+ .port = 0,
+ .flow_level = true,
+ .node_id = 0,
+ .loop_num = 1,
+ .pri_max = 1,
+ .flow_id = 0,
+ .drr_node_id = 0,
+ },
+ {
+ .port = 0,
+ .flow_level = false,
+ .node_id = 0,
+ .loop_num = 8,
+ .pri_max = 8,
+ .flow_id = 0,
+ .drr_node_id = 0,
+ },
+ {
+ .port = 0,
+ .flow_level = false,
+ .node_id = 8,
+ .loop_num = 8,
+ .pri_max = 8,
+ .flow_id = 0,
+ .drr_node_id = 0,
+ },
+ {
+ .port = 0,
+ .flow_level = false,
+ .node_id = 16,
+ .loop_num = 8,
+ .pri_max = 8,
+ .flow_id = 0,
+ .drr_node_id = 0,
+ },
+ {
+ .port = 0,
+ .flow_level = false,
+ .node_id = 24,
+ .loop_num = 8,
+ .pri_max = 8,
+ .flow_id = 0,
+ .drr_node_id = 0,
+ },
+ {
+ .port = 0,
+ .flow_level = false,
+ .node_id = 32,
+ .loop_num = 8,
+ .pri_max = 8,
+ .flow_id = 0,
+ .drr_node_id = 0,
+ },
+ {
+ .port = 0,
+ .flow_level = false,
+ .node_id = 40,
+ .loop_num = 8,
+ .pri_max = 8,
+ .flow_id = 0,
+ .drr_node_id = 0,
+ },
+ {
+ .port = 0,
+ .flow_level = false,
+ .node_id = 48,
+ .loop_num = 8,
+ .pri_max = 8,
+ .flow_id = 0,
+ .drr_node_id = 0,
+ },
+ {
+ .port = 0,
+ .flow_level = false,
+ .node_id = 56,
+ .loop_num = 8,
+ .pri_max = 8,
+ .flow_id = 0,
+ .drr_node_id = 0,
+ },
+ {
+ .port = 0,
+ .flow_level = false,
+ .node_id = 256,
+ .loop_num = 8,
+ .pri_max = 8,
+ .flow_id = 0,
+ .drr_node_id = 0,
+ },
+ {
+ .port = 0,
+ .flow_level = false,
+ .node_id = 264,
+ .loop_num = 8,
+ .pri_max = 8,
+ .flow_id = 0,
+ .drr_node_id = 0,
+ },
+ {
+ .port = 1,
+ .flow_level = true,
+ .node_id = 36,
+ .loop_num = 2,
+ .pri_max = 0,
+ .flow_id = 1,
+ .drr_node_id = 8,
+ },
+ {
+ .port = 1,
+ .flow_level = false,
+ .node_id = 144,
+ .loop_num = 16,
+ .pri_max = 8,
+ .flow_id = 36,
+ .drr_node_id = 48,
+ },
+ {
+ .port = 1,
+ .flow_level = false,
+ .node_id = 272,
+ .loop_num = 4,
+ .pri_max = 4,
+ .flow_id = 36,
+ .drr_node_id = 48,
+ },
+ {
+ .port = 2,
+ .flow_level = true,
+ .node_id = 40,
+ .loop_num = 2,
+ .pri_max = 0,
+ .flow_id = 2,
+ .drr_node_id = 12,
+ },
+ {
+ .port = 2,
+ .flow_level = false,
+ .node_id = 160,
+ .loop_num = 16,
+ .pri_max = 8,
+ .flow_id = 40,
+ .drr_node_id = 64,
+ },
+ {
+ .port = 2,
+ .flow_level = false,
+ .node_id = 276,
+ .loop_num = 4,
+ .pri_max = 4,
+ .flow_id = 40,
+ .drr_node_id = 64,
+ },
+ {
+ .port = 3,
+ .flow_level = true,
+ .node_id = 44,
+ .loop_num = 2,
+ .pri_max = 0,
+ .flow_id = 3,
+ .drr_node_id = 16,
+ },
+ {
+ .port = 3,
+ .flow_level = false,
+ .node_id = 176,
+ .loop_num = 16,
+ .pri_max = 8,
+ .flow_id = 44,
+ .drr_node_id = 80,
+ },
+ {
+ .port = 3,
+ .flow_level = false,
+ .node_id = 280,
+ .loop_num = 4,
+ .pri_max = 4,
+ .flow_id = 44,
+ .drr_node_id = 80,
+ },
+ {
+ .port = 4,
+ .flow_level = true,
+ .node_id = 48,
+ .loop_num = 2,
+ .pri_max = 0,
+ .flow_id = 4,
+ .drr_node_id = 20,
+ },
+ {
+ .port = 4,
+ .flow_level = false,
+ .node_id = 192,
+ .loop_num = 16,
+ .pri_max = 8,
+ .flow_id = 48,
+ .drr_node_id = 96,
+ },
+ {
+ .port = 4,
+ .flow_level = false,
+ .node_id = 284,
+ .loop_num = 4,
+ .pri_max = 4,
+ .flow_id = 48,
+ .drr_node_id = 96,
+ },
+ {
+ .port = 5,
+ .flow_level = true,
+ .node_id = 52,
+ .loop_num = 2,
+ .pri_max = 0,
+ .flow_id = 5,
+ .drr_node_id = 24,
+ },
+ {
+ .port = 5,
+ .flow_level = false,
+ .node_id = 208,
+ .loop_num = 16,
+ .pri_max = 8,
+ .flow_id = 52,
+ .drr_node_id = 112,
+ },
+ {
+ .port = 5,
+ .flow_level = false,
+ .node_id = 288,
+ .loop_num = 4,
+ .pri_max = 4,
+ .flow_id = 52,
+ .drr_node_id = 112,
+ },
+ {
+ .port = 6,
+ .flow_level = true,
+ .node_id = 56,
+ .loop_num = 2,
+ .pri_max = 0,
+ .flow_id = 6,
+ .drr_node_id = 28,
+ },
+ {
+ .port = 6,
+ .flow_level = false,
+ .node_id = 224,
+ .loop_num = 16,
+ .pri_max = 8,
+ .flow_id = 56,
+ .drr_node_id = 128,
+ },
+ {
+ .port = 6,
+ .flow_level = false,
+ .node_id = 292,
+ .loop_num = 4,
+ .pri_max = 4,
+ .flow_id = 56,
+ .drr_node_id = 128,
+ },
+ {
+ .port = 7,
+ .flow_level = true,
+ .node_id = 60,
+ .loop_num = 2,
+ .pri_max = 0,
+ .flow_id = 7,
+ .drr_node_id = 32,
+ },
+ {
+ .port = 7,
+ .flow_level = false,
+ .node_id = 240,
+ .loop_num = 16,
+ .pri_max = 8,
+ .flow_id = 60,
+ .drr_node_id = 144,
+ },
+ {
+ .port = 7,
+ .flow_level = false,
+ .node_id = 296,
+ .loop_num = 4,
+ .pri_max = 4,
+ .flow_id = 60,
+ .drr_node_id = 144,
+ },
+};
+
+/* The scheduler resource is applied to each PPE port, The resource
+ * includes the unicast & multicast queues, flow nodes and DRR nodes.
+ */
+static const struct ppe_port_schedule_resource ppe_scheduler_res[] = {
+ { .ucastq_start = 0,
+ .ucastq_end = 63,
+ .mcastq_start = 256,
+ .mcastq_end = 271,
+ .flow_id_start = 0,
+ .flow_id_end = 0,
+ .l0node_start = 0,
+ .l0node_end = 7,
+ .l1node_start = 0,
+ .l1node_end = 0,
+ },
+ { .ucastq_start = 144,
+ .ucastq_end = 159,
+ .mcastq_start = 272,
+ .mcastq_end = 275,
+ .flow_id_start = 36,
+ .flow_id_end = 39,
+ .l0node_start = 48,
+ .l0node_end = 63,
+ .l1node_start = 8,
+ .l1node_end = 11,
+ },
+ { .ucastq_start = 160,
+ .ucastq_end = 175,
+ .mcastq_start = 276,
+ .mcastq_end = 279,
+ .flow_id_start = 40,
+ .flow_id_end = 43,
+ .l0node_start = 64,
+ .l0node_end = 79,
+ .l1node_start = 12,
+ .l1node_end = 15,
+ },
+ { .ucastq_start = 176,
+ .ucastq_end = 191,
+ .mcastq_start = 280,
+ .mcastq_end = 283,
+ .flow_id_start = 44,
+ .flow_id_end = 47,
+ .l0node_start = 80,
+ .l0node_end = 95,
+ .l1node_start = 16,
+ .l1node_end = 19,
+ },
+ { .ucastq_start = 192,
+ .ucastq_end = 207,
+ .mcastq_start = 284,
+ .mcastq_end = 287,
+ .flow_id_start = 48,
+ .flow_id_end = 51,
+ .l0node_start = 96,
+ .l0node_end = 111,
+ .l1node_start = 20,
+ .l1node_end = 23,
+ },
+ { .ucastq_start = 208,
+ .ucastq_end = 223,
+ .mcastq_start = 288,
+ .mcastq_end = 291,
+ .flow_id_start = 52,
+ .flow_id_end = 55,
+ .l0node_start = 112,
+ .l0node_end = 127,
+ .l1node_start = 24,
+ .l1node_end = 27,
+ },
+ { .ucastq_start = 224,
+ .ucastq_end = 239,
+ .mcastq_start = 292,
+ .mcastq_end = 295,
+ .flow_id_start = 56,
+ .flow_id_end = 59,
+ .l0node_start = 128,
+ .l0node_end = 143,
+ .l1node_start = 28,
+ .l1node_end = 31,
+ },
+ { .ucastq_start = 240,
+ .ucastq_end = 255,
+ .mcastq_start = 296,
+ .mcastq_end = 299,
+ .flow_id_start = 60,
+ .flow_id_end = 63,
+ .l0node_start = 144,
+ .l0node_end = 159,
+ .l1node_start = 32,
+ .l1node_end = 35,
+ },
+ { .ucastq_start = 64,
+ .ucastq_end = 143,
+ .mcastq_start = 0,
+ .mcastq_end = 0,
+ .flow_id_start = 1,
+ .flow_id_end = 35,
+ .l0node_start = 8,
+ .l0node_end = 47,
+ .l1node_start = 1,
+ .l1node_end = 7,
+ },
+};
+
+/* Set the PPE queue level scheduler configuration. */
+static int ppe_scheduler_l0_queue_map_set(struct ppe_device *ppe_dev,
+ int node_id, int port,
+ struct ppe_scheduler_cfg scheduler_cfg)
+{
+ u32 val, reg;
+ int ret;
+
+ reg = PPE_L0_FLOW_MAP_TBL_ADDR + node_id * PPE_L0_FLOW_MAP_TBL_INC;
+ val = FIELD_PREP(PPE_L0_FLOW_MAP_TBL_FLOW_ID, scheduler_cfg.flow_id);
+ val |= FIELD_PREP(PPE_L0_FLOW_MAP_TBL_C_PRI, scheduler_cfg.pri);
+ val |= FIELD_PREP(PPE_L0_FLOW_MAP_TBL_E_PRI, scheduler_cfg.pri);
+ val |= FIELD_PREP(PPE_L0_FLOW_MAP_TBL_C_NODE_WT, scheduler_cfg.drr_node_wt);
+ val |= FIELD_PREP(PPE_L0_FLOW_MAP_TBL_E_NODE_WT, scheduler_cfg.drr_node_wt);
+
+ ret = regmap_write(ppe_dev->regmap, reg, val);
+ if (ret)
+ return ret;
+
+ reg = PPE_L0_C_FLOW_CFG_TBL_ADDR +
+ (scheduler_cfg.flow_id * PPE_QUEUE_SCH_PRI_NUM + scheduler_cfg.pri) *
+ PPE_L0_C_FLOW_CFG_TBL_INC;
+ val = FIELD_PREP(PPE_L0_C_FLOW_CFG_TBL_NODE_ID, scheduler_cfg.drr_node_id);
+ val |= FIELD_PREP(PPE_L0_C_FLOW_CFG_TBL_NODE_CREDIT_UNIT, scheduler_cfg.unit_is_packet);
+
+ ret = regmap_write(ppe_dev->regmap, reg, val);
+ if (ret)
+ return ret;
+
+ reg = PPE_L0_E_FLOW_CFG_TBL_ADDR +
+ (scheduler_cfg.flow_id * PPE_QUEUE_SCH_PRI_NUM + scheduler_cfg.pri) *
+ PPE_L0_E_FLOW_CFG_TBL_INC;
+ val = FIELD_PREP(PPE_L0_E_FLOW_CFG_TBL_NODE_ID, scheduler_cfg.drr_node_id);
+ val |= FIELD_PREP(PPE_L0_E_FLOW_CFG_TBL_NODE_CREDIT_UNIT, scheduler_cfg.unit_is_packet);
+
+ ret = regmap_write(ppe_dev->regmap, reg, val);
+ if (ret)
+ return ret;
+
+ reg = PPE_L0_FLOW_PORT_MAP_TBL_ADDR + node_id * PPE_L0_FLOW_PORT_MAP_TBL_INC;
+ val = FIELD_PREP(PPE_L0_FLOW_PORT_MAP_TBL_PORT_NUM, port);
+
+ ret = regmap_write(ppe_dev->regmap, reg, val);
+ if (ret)
+ return ret;
+
+ reg = PPE_L0_COMP_CFG_TBL_ADDR + node_id * PPE_L0_COMP_CFG_TBL_INC;
+ val = FIELD_PREP(PPE_L0_COMP_CFG_TBL_NODE_METER_LEN, scheduler_cfg.frame_mode);
+
+ return regmap_update_bits(ppe_dev->regmap, reg,
+ PPE_L0_COMP_CFG_TBL_NODE_METER_LEN,
+ val);
+}
+
+/* Set the PPE flow level scheduler configuration. */
+static int ppe_scheduler_l1_queue_map_set(struct ppe_device *ppe_dev,
+ int node_id, int port,
+ struct ppe_scheduler_cfg scheduler_cfg)
+{
+ u32 val, reg;
+ int ret;
+
+ val = FIELD_PREP(PPE_L1_FLOW_MAP_TBL_FLOW_ID, scheduler_cfg.flow_id);
+ val |= FIELD_PREP(PPE_L1_FLOW_MAP_TBL_C_PRI, scheduler_cfg.pri);
+ val |= FIELD_PREP(PPE_L1_FLOW_MAP_TBL_E_PRI, scheduler_cfg.pri);
+ val |= FIELD_PREP(PPE_L1_FLOW_MAP_TBL_C_NODE_WT, scheduler_cfg.drr_node_wt);
+ val |= FIELD_PREP(PPE_L1_FLOW_MAP_TBL_E_NODE_WT, scheduler_cfg.drr_node_wt);
+ reg = PPE_L1_FLOW_MAP_TBL_ADDR + node_id * PPE_L1_FLOW_MAP_TBL_INC;
+
+ ret = regmap_write(ppe_dev->regmap, reg, val);
+ if (ret)
+ return ret;
+
+ val = FIELD_PREP(PPE_L1_C_FLOW_CFG_TBL_NODE_ID, scheduler_cfg.drr_node_id);
+ val |= FIELD_PREP(PPE_L1_C_FLOW_CFG_TBL_NODE_CREDIT_UNIT, scheduler_cfg.unit_is_packet);
+ reg = PPE_L1_C_FLOW_CFG_TBL_ADDR +
+ (scheduler_cfg.flow_id * PPE_QUEUE_SCH_PRI_NUM + scheduler_cfg.pri) *
+ PPE_L1_C_FLOW_CFG_TBL_INC;
+
+ ret = regmap_write(ppe_dev->regmap, reg, val);
+ if (ret)
+ return ret;
+
+ val = FIELD_PREP(PPE_L1_E_FLOW_CFG_TBL_NODE_ID, scheduler_cfg.drr_node_id);
+ val |= FIELD_PREP(PPE_L1_E_FLOW_CFG_TBL_NODE_CREDIT_UNIT, scheduler_cfg.unit_is_packet);
+ reg = PPE_L1_E_FLOW_CFG_TBL_ADDR +
+ (scheduler_cfg.flow_id * PPE_QUEUE_SCH_PRI_NUM + scheduler_cfg.pri) *
+ PPE_L1_E_FLOW_CFG_TBL_INC;
+
+ ret = regmap_write(ppe_dev->regmap, reg, val);
+ if (ret)
+ return ret;
+
+ val = FIELD_PREP(PPE_L1_FLOW_PORT_MAP_TBL_PORT_NUM, port);
+ reg = PPE_L1_FLOW_PORT_MAP_TBL_ADDR + node_id * PPE_L1_FLOW_PORT_MAP_TBL_INC;
+
+ ret = regmap_write(ppe_dev->regmap, reg, val);
+ if (ret)
+ return ret;
+
+ reg = PPE_L1_COMP_CFG_TBL_ADDR + node_id * PPE_L1_COMP_CFG_TBL_INC;
+ val = FIELD_PREP(PPE_L1_COMP_CFG_TBL_NODE_METER_LEN, scheduler_cfg.frame_mode);
+
+ return regmap_update_bits(ppe_dev->regmap, reg, PPE_L1_COMP_CFG_TBL_NODE_METER_LEN, val);
+}
+
+/**
+ * ppe_queue_scheduler_set - Configure scheduler for PPE hardware queue
+ * @ppe_dev: PPE device
+ * @node_id: PPE queue ID or flow ID
+ * @flow_level: Flow level scheduler or queue level scheduler
+ * @port: PPE port ID set scheduler configuration
+ * @scheduler_cfg: PPE scheduler configuration
+ *
+ * PPE scheduler configuration supports queue level and flow level on
+ * the PPE egress port.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int ppe_queue_scheduler_set(struct ppe_device *ppe_dev,
+ int node_id, bool flow_level, int port,
+ struct ppe_scheduler_cfg scheduler_cfg)
+{
+ if (flow_level)
+ return ppe_scheduler_l1_queue_map_set(ppe_dev, node_id,
+ port, scheduler_cfg);
+
+ return ppe_scheduler_l0_queue_map_set(ppe_dev, node_id,
+ port, scheduler_cfg);
+}
+
+/**
+ * ppe_queue_ucast_base_set - Set PPE unicast queue base ID and profile ID
+ * @ppe_dev: PPE device
+ * @queue_dst: PPE queue destination configuration
+ * @queue_base: PPE queue base ID
+ * @profile_id: Profile ID
+ *
+ * The PPE unicast queue base ID and profile ID are configured based on the
+ * destination port information that can be service code or CPU code or the
+ * destination port.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int ppe_queue_ucast_base_set(struct ppe_device *ppe_dev,
+ struct ppe_queue_ucast_dest queue_dst,
+ int queue_base, int profile_id)
+{
+ int index, profile_size;
+ u32 val, reg;
+
+ profile_size = queue_dst.src_profile << 8;
+ if (queue_dst.service_code_en)
+ index = PPE_QUEUE_BASE_SERVICE_CODE + profile_size +
+ queue_dst.service_code;
+ else if (queue_dst.cpu_code_en)
+ index = PPE_QUEUE_BASE_CPU_CODE + profile_size +
+ queue_dst.cpu_code;
+ else
+ index = profile_size + queue_dst.dest_port;
+
+ val = FIELD_PREP(PPE_UCAST_QUEUE_MAP_TBL_PROFILE_ID, profile_id);
+ val |= FIELD_PREP(PPE_UCAST_QUEUE_MAP_TBL_QUEUE_ID, queue_base);
+ reg = PPE_UCAST_QUEUE_MAP_TBL_ADDR + index * PPE_UCAST_QUEUE_MAP_TBL_INC;
+
+ return regmap_write(ppe_dev->regmap, reg, val);
+}
+
+/**
+ * ppe_queue_ucast_offset_pri_set - Set PPE unicast queue offset based on priority
+ * @ppe_dev: PPE device
+ * @profile_id: Profile ID
+ * @priority: PPE internal priority to be used to set queue offset
+ * @queue_offset: Queue offset used for calculating the destination queue ID
+ *
+ * The PPE unicast queue offset is configured based on the PPE
+ * internal priority.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int ppe_queue_ucast_offset_pri_set(struct ppe_device *ppe_dev,
+ int profile_id,
+ int priority,
+ int queue_offset)
+{
+ u32 val, reg;
+ int index;
+
+ index = (profile_id << 4) + priority;
+ val = FIELD_PREP(PPE_UCAST_PRIORITY_MAP_TBL_CLASS, queue_offset);
+ reg = PPE_UCAST_PRIORITY_MAP_TBL_ADDR + index * PPE_UCAST_PRIORITY_MAP_TBL_INC;
+
+ return regmap_write(ppe_dev->regmap, reg, val);
+}
+
+/**
+ * ppe_queue_ucast_offset_hash_set - Set PPE unicast queue offset based on hash
+ * @ppe_dev: PPE device
+ * @profile_id: Profile ID
+ * @rss_hash: Packet hash value to be used to set queue offset
+ * @queue_offset: Queue offset used for calculating the destination queue ID
+ *
+ * The PPE unicast queue offset is configured based on the RSS hash value.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int ppe_queue_ucast_offset_hash_set(struct ppe_device *ppe_dev,
+ int profile_id,
+ int rss_hash,
+ int queue_offset)
+{
+ u32 val, reg;
+ int index;
+
+ index = (profile_id << 8) + rss_hash;
+ val = FIELD_PREP(PPE_UCAST_HASH_MAP_TBL_HASH, queue_offset);
+ reg = PPE_UCAST_HASH_MAP_TBL_ADDR + index * PPE_UCAST_HASH_MAP_TBL_INC;
+
+ return regmap_write(ppe_dev->regmap, reg, val);
+}
+
+/**
+ * ppe_port_resource_get - Get PPE resource per port
+ * @ppe_dev: PPE device
+ * @port: PPE port
+ * @type: Resource type
+ * @res_start: Resource start ID returned
+ * @res_end: Resource end ID returned
+ *
+ * PPE resource is assigned per PPE port, which is acquired for QoS scheduler.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int ppe_port_resource_get(struct ppe_device *ppe_dev, int port,
+ enum ppe_resource_type type,
+ int *res_start, int *res_end)
+{
+ struct ppe_port_schedule_resource res;
+
+ /* The reserved resource with the maximum port ID of PPE is
+ * also allowed to be acquired.
+ */
+ if (port > ppe_dev->num_ports)
+ return -EINVAL;
+
+ res = ppe_scheduler_res[port];
+ switch (type) {
+ case PPE_RES_UCAST:
+ *res_start = res.ucastq_start;
+ *res_end = res.ucastq_end;
+ break;
+ case PPE_RES_MCAST:
+ *res_start = res.mcastq_start;
+ *res_end = res.mcastq_end;
+ break;
+ case PPE_RES_FLOW_ID:
+ *res_start = res.flow_id_start;
+ *res_end = res.flow_id_end;
+ break;
+ case PPE_RES_L0_NODE:
+ *res_start = res.l0node_start;
+ *res_end = res.l0node_end;
+ break;
+ case PPE_RES_L1_NODE:
+ *res_start = res.l1node_start;
+ *res_end = res.l1node_end;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * ppe_sc_config_set - Set PPE service code configuration
+ * @ppe_dev: PPE device
+ * @sc: Service ID, 0-255 supported by PPE
+ * @cfg: Service code configuration
+ *
+ * PPE service code is used by the PPE during its packet processing stages,
+ * to perform or bypass certain selected packet operations on the packet.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int ppe_sc_config_set(struct ppe_device *ppe_dev, int sc, struct ppe_sc_cfg cfg)
+{
+ u32 val, reg, servcode_val[2] = {};
+ unsigned long bitmap_value;
+ int ret;
+
+ val = FIELD_PREP(PPE_IN_L2_SERVICE_TBL_DST_PORT_ID_VALID, cfg.dest_port_valid);
+ val |= FIELD_PREP(PPE_IN_L2_SERVICE_TBL_DST_PORT_ID, cfg.dest_port);
+ val |= FIELD_PREP(PPE_IN_L2_SERVICE_TBL_DST_DIRECTION, cfg.is_src);
+
+ bitmap_value = bitmap_read(cfg.bitmaps.egress, 0, PPE_SC_BYPASS_EGRESS_SIZE);
+ val |= FIELD_PREP(PPE_IN_L2_SERVICE_TBL_DST_BYPASS_BITMAP, bitmap_value);
+ val |= FIELD_PREP(PPE_IN_L2_SERVICE_TBL_RX_CNT_EN,
+ test_bit(PPE_SC_BYPASS_COUNTER_RX, cfg.bitmaps.counter));
+ val |= FIELD_PREP(PPE_IN_L2_SERVICE_TBL_TX_CNT_EN,
+ test_bit(PPE_SC_BYPASS_COUNTER_TX, cfg.bitmaps.counter));
+ reg = PPE_IN_L2_SERVICE_TBL_ADDR + PPE_IN_L2_SERVICE_TBL_INC * sc;
+
+ ret = regmap_write(ppe_dev->regmap, reg, val);
+ if (ret)
+ return ret;
+
+ bitmap_value = bitmap_read(cfg.bitmaps.ingress, 0, PPE_SC_BYPASS_INGRESS_SIZE);
+ PPE_SERVICE_SET_BYPASS_BITMAP(servcode_val, bitmap_value);
+ PPE_SERVICE_SET_RX_CNT_EN(servcode_val,
+ test_bit(PPE_SC_BYPASS_COUNTER_RX_VLAN, cfg.bitmaps.counter));
+ reg = PPE_SERVICE_TBL_ADDR + PPE_SERVICE_TBL_INC * sc;
+
+ ret = regmap_bulk_write(ppe_dev->regmap, reg,
+ servcode_val, ARRAY_SIZE(servcode_val));
+ if (ret)
+ return ret;
+
+ reg = PPE_EG_SERVICE_TBL_ADDR + PPE_EG_SERVICE_TBL_INC * sc;
+ ret = regmap_bulk_read(ppe_dev->regmap, reg,
+ servcode_val, ARRAY_SIZE(servcode_val));
+ if (ret)
+ return ret;
+
+ PPE_EG_SERVICE_SET_NEXT_SERVCODE(servcode_val, cfg.next_service_code);
+ PPE_EG_SERVICE_SET_UPDATE_ACTION(servcode_val, cfg.eip_field_update_bitmap);
+ PPE_EG_SERVICE_SET_HW_SERVICE(servcode_val, cfg.eip_hw_service);
+ PPE_EG_SERVICE_SET_OFFSET_SEL(servcode_val, cfg.eip_offset_sel);
+ PPE_EG_SERVICE_SET_TX_CNT_EN(servcode_val,
+ test_bit(PPE_SC_BYPASS_COUNTER_TX_VLAN, cfg.bitmaps.counter));
+
+ ret = regmap_bulk_write(ppe_dev->regmap, reg,
+ servcode_val, ARRAY_SIZE(servcode_val));
+ if (ret)
+ return ret;
+
+ bitmap_value = bitmap_read(cfg.bitmaps.tunnel, 0, PPE_SC_BYPASS_TUNNEL_SIZE);
+ val = FIELD_PREP(PPE_TL_SERVICE_TBL_BYPASS_BITMAP, bitmap_value);
+ reg = PPE_TL_SERVICE_TBL_ADDR + PPE_TL_SERVICE_TBL_INC * sc;
+
+ return regmap_write(ppe_dev->regmap, reg, val);
+}
+
+/**
+ * ppe_counter_enable_set - Set PPE port counter enabled
+ * @ppe_dev: PPE device
+ * @port: PPE port ID
+ *
+ * Enable PPE counters on the given port for the unicast packet, multicast
+ * packet and VLAN packet received and transmitted by PPE.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int ppe_counter_enable_set(struct ppe_device *ppe_dev, int port)
+{
+ u32 reg, mru_mtu_val[3];
+ int ret;
+
+ reg = PPE_MRU_MTU_CTRL_TBL_ADDR + PPE_MRU_MTU_CTRL_TBL_INC * port;
+ ret = regmap_bulk_read(ppe_dev->regmap, reg,
+ mru_mtu_val, ARRAY_SIZE(mru_mtu_val));
+ if (ret)
+ return ret;
+
+ PPE_MRU_MTU_CTRL_SET_RX_CNT_EN(mru_mtu_val, true);
+ PPE_MRU_MTU_CTRL_SET_TX_CNT_EN(mru_mtu_val, true);
+ ret = regmap_bulk_write(ppe_dev->regmap, reg,
+ mru_mtu_val, ARRAY_SIZE(mru_mtu_val));
+ if (ret)
+ return ret;
+
+ reg = PPE_MC_MTU_CTRL_TBL_ADDR + PPE_MC_MTU_CTRL_TBL_INC * port;
+ ret = regmap_set_bits(ppe_dev->regmap, reg, PPE_MC_MTU_CTRL_TBL_TX_CNT_EN);
+ if (ret)
+ return ret;
+
+ reg = PPE_PORT_EG_VLAN_TBL_ADDR + PPE_PORT_EG_VLAN_TBL_INC * port;
+
+ return regmap_set_bits(ppe_dev->regmap, reg, PPE_PORT_EG_VLAN_TBL_TX_COUNTING_EN);
+}
+
+static int ppe_rss_hash_ipv4_config(struct ppe_device *ppe_dev, int index,
+ struct ppe_rss_hash_cfg cfg)
+{
+ u32 reg, val;
+
+ switch (index) {
+ case 0:
+ val = cfg.hash_sip_mix[0];
+ break;
+ case 1:
+ val = cfg.hash_dip_mix[0];
+ break;
+ case 2:
+ val = cfg.hash_protocol_mix;
+ break;
+ case 3:
+ val = cfg.hash_dport_mix;
+ break;
+ case 4:
+ val = cfg.hash_sport_mix;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ reg = PPE_RSS_HASH_MIX_IPV4_ADDR + index * PPE_RSS_HASH_MIX_IPV4_INC;
+
+ return regmap_update_bits(ppe_dev->regmap, reg,
+ PPE_RSS_HASH_MIX_IPV4_VAL,
+ FIELD_PREP(PPE_RSS_HASH_MIX_IPV4_VAL, val));
+}
+
+static int ppe_rss_hash_ipv6_config(struct ppe_device *ppe_dev, int index,
+ struct ppe_rss_hash_cfg cfg)
+{
+ u32 reg, val;
+
+ switch (index) {
+ case 0 ... 3:
+ val = cfg.hash_sip_mix[index];
+ break;
+ case 4 ... 7:
+ val = cfg.hash_dip_mix[index - 4];
+ break;
+ case 8:
+ val = cfg.hash_protocol_mix;
+ break;
+ case 9:
+ val = cfg.hash_dport_mix;
+ break;
+ case 10:
+ val = cfg.hash_sport_mix;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ reg = PPE_RSS_HASH_MIX_ADDR + index * PPE_RSS_HASH_MIX_INC;
+
+ return regmap_update_bits(ppe_dev->regmap, reg,
+ PPE_RSS_HASH_MIX_VAL,
+ FIELD_PREP(PPE_RSS_HASH_MIX_VAL, val));
+}
+
+/**
+ * ppe_rss_hash_config_set - Configure the PPE hash settings for the packet received.
+ * @ppe_dev: PPE device.
+ * @mode: Configure RSS hash for the packet type IPv4 and IPv6.
+ * @cfg: RSS hash configuration.
+ *
+ * PPE RSS hash settings are configured for the packet type IPv4 and IPv6.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int ppe_rss_hash_config_set(struct ppe_device *ppe_dev, int mode,
+ struct ppe_rss_hash_cfg cfg)
+{
+ u32 val, reg;
+ int i, ret;
+
+ if (mode & PPE_RSS_HASH_MODE_IPV4) {
+ val = FIELD_PREP(PPE_RSS_HASH_MASK_IPV4_HASH_MASK, cfg.hash_mask);
+ val |= FIELD_PREP(PPE_RSS_HASH_MASK_IPV4_FRAGMENT, cfg.hash_fragment_mode);
+ ret = regmap_write(ppe_dev->regmap, PPE_RSS_HASH_MASK_IPV4_ADDR, val);
+ if (ret)
+ return ret;
+
+ val = FIELD_PREP(PPE_RSS_HASH_SEED_IPV4_VAL, cfg.hash_seed);
+ ret = regmap_write(ppe_dev->regmap, PPE_RSS_HASH_SEED_IPV4_ADDR, val);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < PPE_RSS_HASH_MIX_IPV4_ENTRIES; i++) {
+ ret = ppe_rss_hash_ipv4_config(ppe_dev, i, cfg);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < PPE_RSS_HASH_FIN_IPV4_ENTRIES; i++) {
+ val = FIELD_PREP(PPE_RSS_HASH_FIN_IPV4_INNER, cfg.hash_fin_inner[i]);
+ val |= FIELD_PREP(PPE_RSS_HASH_FIN_IPV4_OUTER, cfg.hash_fin_outer[i]);
+ reg = PPE_RSS_HASH_FIN_IPV4_ADDR + i * PPE_RSS_HASH_FIN_IPV4_INC;
+
+ ret = regmap_write(ppe_dev->regmap, reg, val);
+ if (ret)
+ return ret;
+ }
+ }
+
+ if (mode & PPE_RSS_HASH_MODE_IPV6) {
+ val = FIELD_PREP(PPE_RSS_HASH_MASK_HASH_MASK, cfg.hash_mask);
+ val |= FIELD_PREP(PPE_RSS_HASH_MASK_FRAGMENT, cfg.hash_fragment_mode);
+ ret = regmap_write(ppe_dev->regmap, PPE_RSS_HASH_MASK_ADDR, val);
+ if (ret)
+ return ret;
+
+ val = FIELD_PREP(PPE_RSS_HASH_SEED_VAL, cfg.hash_seed);
+ ret = regmap_write(ppe_dev->regmap, PPE_RSS_HASH_SEED_ADDR, val);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < PPE_RSS_HASH_MIX_ENTRIES; i++) {
+ ret = ppe_rss_hash_ipv6_config(ppe_dev, i, cfg);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < PPE_RSS_HASH_FIN_ENTRIES; i++) {
+ val = FIELD_PREP(PPE_RSS_HASH_FIN_INNER, cfg.hash_fin_inner[i]);
+ val |= FIELD_PREP(PPE_RSS_HASH_FIN_OUTER, cfg.hash_fin_outer[i]);
+ reg = PPE_RSS_HASH_FIN_ADDR + i * PPE_RSS_HASH_FIN_INC;
+
+ ret = regmap_write(ppe_dev->regmap, reg, val);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ppe_ring_queue_map_set - Set the PPE queue to Ethernet DMA ring mapping
+ * @ppe_dev: PPE device
+ * @ring_id: Ethernet DMA ring ID
+ * @queue_map: Bit map of queue IDs to given Ethernet DMA ring
+ *
+ * Configure the mapping from a set of PPE queues to a given Ethernet DMA ring.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int ppe_ring_queue_map_set(struct ppe_device *ppe_dev, int ring_id, u32 *queue_map)
+{
+ u32 reg, queue_bitmap_val[PPE_RING_TO_QUEUE_BITMAP_WORD_CNT];
+
+ memcpy(queue_bitmap_val, queue_map, sizeof(queue_bitmap_val));
+ reg = PPE_RING_Q_MAP_TBL_ADDR + PPE_RING_Q_MAP_TBL_INC * ring_id;
+
+ return regmap_bulk_write(ppe_dev->regmap, reg,
+ queue_bitmap_val,
+ ARRAY_SIZE(queue_bitmap_val));
+}
+
+static int ppe_config_bm_threshold(struct ppe_device *ppe_dev, int bm_port_id,
+ const struct ppe_bm_port_config port_cfg)
+{
+ u32 reg, val, bm_fc_val[2];
+ int ret;
+
+ reg = PPE_BM_PORT_FC_CFG_TBL_ADDR + PPE_BM_PORT_FC_CFG_TBL_INC * bm_port_id;
+ ret = regmap_bulk_read(ppe_dev->regmap, reg,
+ bm_fc_val, ARRAY_SIZE(bm_fc_val));
+ if (ret)
+ return ret;
+
+ /* Configure BM flow control related threshold. */
+ PPE_BM_PORT_FC_SET_WEIGHT(bm_fc_val, port_cfg.weight);
+ PPE_BM_PORT_FC_SET_RESUME_OFFSET(bm_fc_val, port_cfg.resume_offset);
+ PPE_BM_PORT_FC_SET_RESUME_THRESHOLD(bm_fc_val, port_cfg.resume_ceil);
+ PPE_BM_PORT_FC_SET_DYNAMIC(bm_fc_val, port_cfg.dynamic);
+ PPE_BM_PORT_FC_SET_REACT_LIMIT(bm_fc_val, port_cfg.in_fly_buf);
+ PPE_BM_PORT_FC_SET_PRE_ALLOC(bm_fc_val, port_cfg.pre_alloc);
+
+ /* Configure low/high bits of the ceiling for the BM port. */
+ val = FIELD_GET(GENMASK(2, 0), port_cfg.ceil);
+ PPE_BM_PORT_FC_SET_CEILING_LOW(bm_fc_val, val);
+ val = FIELD_GET(GENMASK(10, 3), port_cfg.ceil);
+ PPE_BM_PORT_FC_SET_CEILING_HIGH(bm_fc_val, val);
+
+ ret = regmap_bulk_write(ppe_dev->regmap, reg,
+ bm_fc_val, ARRAY_SIZE(bm_fc_val));
+ if (ret)
+ return ret;
+
+ /* Assign the default group ID 0 to the BM port. */
+ val = FIELD_PREP(PPE_BM_PORT_GROUP_ID_SHARED_GROUP_ID, 0);
+ reg = PPE_BM_PORT_GROUP_ID_ADDR + PPE_BM_PORT_GROUP_ID_INC * bm_port_id;
+ ret = regmap_update_bits(ppe_dev->regmap, reg,
+ PPE_BM_PORT_GROUP_ID_SHARED_GROUP_ID,
+ val);
+ if (ret)
+ return ret;
+
+ /* Enable BM port flow control. */
+ reg = PPE_BM_PORT_FC_MODE_ADDR + PPE_BM_PORT_FC_MODE_INC * bm_port_id;
+
+ return regmap_set_bits(ppe_dev->regmap, reg, PPE_BM_PORT_FC_MODE_EN);
+}
+
+/* Configure the buffer threshold for the port flow control function. */
+static int ppe_config_bm(struct ppe_device *ppe_dev)
+{
+ const struct ppe_bm_port_config *port_cfg;
+ unsigned int i, bm_port_id, port_cfg_cnt;
+ u32 reg, val;
+ int ret;
+
+ /* Configure the allocated buffer number only for group 0.
+ * The buffer number of group 1-3 is already cleared to 0
+ * after PPE reset during the probe of PPE driver.
+ */
+ reg = PPE_BM_SHARED_GROUP_CFG_ADDR;
+ val = FIELD_PREP(PPE_BM_SHARED_GROUP_CFG_SHARED_LIMIT,
+ ipq9574_ppe_bm_group_config);
+ ret = regmap_update_bits(ppe_dev->regmap, reg,
+ PPE_BM_SHARED_GROUP_CFG_SHARED_LIMIT,
+ val);
+ if (ret)
+ goto bm_config_fail;
+
+ /* Configure buffer thresholds for the BM ports. */
+ port_cfg = ipq9574_ppe_bm_port_config;
+ port_cfg_cnt = ARRAY_SIZE(ipq9574_ppe_bm_port_config);
+ for (i = 0; i < port_cfg_cnt; i++) {
+ for (bm_port_id = port_cfg[i].port_id_start;
+ bm_port_id <= port_cfg[i].port_id_end; bm_port_id++) {
+ ret = ppe_config_bm_threshold(ppe_dev, bm_port_id,
+ port_cfg[i]);
+ if (ret)
+ goto bm_config_fail;
+ }
+ }
+
+ return 0;
+
+bm_config_fail:
+ dev_err(ppe_dev->dev, "PPE BM config error %d\n", ret);
+ return ret;
+}
+
+/* Configure PPE hardware queue depth, which is decided by the threshold
+ * of queue.
+ */
+static int ppe_config_qm(struct ppe_device *ppe_dev)
+{
+ const struct ppe_qm_queue_config *queue_cfg;
+ int ret, i, queue_id, queue_cfg_count;
+ u32 reg, multicast_queue_cfg[5];
+ u32 unicast_queue_cfg[4];
+ u32 group_cfg[3];
+
+ /* Assign the buffer number to the group 0 by default. */
+ reg = PPE_AC_GRP_CFG_TBL_ADDR;
+ ret = regmap_bulk_read(ppe_dev->regmap, reg,
+ group_cfg, ARRAY_SIZE(group_cfg));
+ if (ret)
+ goto qm_config_fail;
+
+ PPE_AC_GRP_SET_BUF_LIMIT(group_cfg, ipq9574_ppe_qm_group_config);
+
+ ret = regmap_bulk_write(ppe_dev->regmap, reg,
+ group_cfg, ARRAY_SIZE(group_cfg));
+ if (ret)
+ goto qm_config_fail;
+
+ queue_cfg = ipq9574_ppe_qm_queue_config;
+ queue_cfg_count = ARRAY_SIZE(ipq9574_ppe_qm_queue_config);
+ for (i = 0; i < queue_cfg_count; i++) {
+ queue_id = queue_cfg[i].queue_start;
+
+ /* Configure threshold for dropping packets separately for
+ * unicast and multicast PPE queues.
+ */
+ while (queue_id <= queue_cfg[i].queue_end) {
+ if (queue_id < PPE_AC_UNICAST_QUEUE_CFG_TBL_ENTRIES) {
+ reg = PPE_AC_UNICAST_QUEUE_CFG_TBL_ADDR +
+ PPE_AC_UNICAST_QUEUE_CFG_TBL_INC * queue_id;
+
+ ret = regmap_bulk_read(ppe_dev->regmap, reg,
+ unicast_queue_cfg,
+ ARRAY_SIZE(unicast_queue_cfg));
+ if (ret)
+ goto qm_config_fail;
+
+ PPE_AC_UNICAST_QUEUE_SET_EN(unicast_queue_cfg, true);
+ PPE_AC_UNICAST_QUEUE_SET_GRP_ID(unicast_queue_cfg, 0);
+ PPE_AC_UNICAST_QUEUE_SET_PRE_LIMIT(unicast_queue_cfg,
+ queue_cfg[i].prealloc_buf);
+ PPE_AC_UNICAST_QUEUE_SET_DYNAMIC(unicast_queue_cfg,
+ queue_cfg[i].dynamic);
+ PPE_AC_UNICAST_QUEUE_SET_WEIGHT(unicast_queue_cfg,
+ queue_cfg[i].weight);
+ PPE_AC_UNICAST_QUEUE_SET_THRESHOLD(unicast_queue_cfg,
+ queue_cfg[i].ceil);
+ PPE_AC_UNICAST_QUEUE_SET_GRN_RESUME(unicast_queue_cfg,
+ queue_cfg[i].resume_offset);
+
+ ret = regmap_bulk_write(ppe_dev->regmap, reg,
+ unicast_queue_cfg,
+ ARRAY_SIZE(unicast_queue_cfg));
+ if (ret)
+ goto qm_config_fail;
+ } else {
+ reg = PPE_AC_MULTICAST_QUEUE_CFG_TBL_ADDR +
+ PPE_AC_MULTICAST_QUEUE_CFG_TBL_INC * queue_id;
+
+ ret = regmap_bulk_read(ppe_dev->regmap, reg,
+ multicast_queue_cfg,
+ ARRAY_SIZE(multicast_queue_cfg));
+ if (ret)
+ goto qm_config_fail;
+
+ PPE_AC_MULTICAST_QUEUE_SET_EN(multicast_queue_cfg, true);
+ PPE_AC_MULTICAST_QUEUE_SET_GRN_GRP_ID(multicast_queue_cfg, 0);
+ PPE_AC_MULTICAST_QUEUE_SET_GRN_PRE_LIMIT(multicast_queue_cfg,
+ queue_cfg[i].prealloc_buf);
+ PPE_AC_MULTICAST_QUEUE_SET_GRN_THRESHOLD(multicast_queue_cfg,
+ queue_cfg[i].ceil);
+ PPE_AC_MULTICAST_QUEUE_SET_GRN_RESUME(multicast_queue_cfg,
+ queue_cfg[i].resume_offset);
+
+ ret = regmap_bulk_write(ppe_dev->regmap, reg,
+ multicast_queue_cfg,
+ ARRAY_SIZE(multicast_queue_cfg));
+ if (ret)
+ goto qm_config_fail;
+ }
+
+ /* Enable enqueue. */
+ reg = PPE_ENQ_OPR_TBL_ADDR + PPE_ENQ_OPR_TBL_INC * queue_id;
+ ret = regmap_clear_bits(ppe_dev->regmap, reg,
+ PPE_ENQ_OPR_TBL_ENQ_DISABLE);
+ if (ret)
+ goto qm_config_fail;
+
+ /* Enable dequeue. */
+ reg = PPE_DEQ_OPR_TBL_ADDR + PPE_DEQ_OPR_TBL_INC * queue_id;
+ ret = regmap_clear_bits(ppe_dev->regmap, reg,
+ PPE_DEQ_OPR_TBL_DEQ_DISABLE);
+ if (ret)
+ goto qm_config_fail;
+
+ queue_id++;
+ }
+ }
+
+ /* Enable queue counter for all PPE hardware queues. */
+ ret = regmap_set_bits(ppe_dev->regmap, PPE_EG_BRIDGE_CONFIG_ADDR,
+ PPE_EG_BRIDGE_CONFIG_QUEUE_CNT_EN);
+ if (ret)
+ goto qm_config_fail;
+
+ return 0;
+
+qm_config_fail:
+ dev_err(ppe_dev->dev, "PPE QM config error %d\n", ret);
+ return ret;
+}
+
+static int ppe_node_scheduler_config(struct ppe_device *ppe_dev,
+ const struct ppe_scheduler_port_config config)
+{
+ struct ppe_scheduler_cfg sch_cfg;
+ int ret, i;
+
+ for (i = 0; i < config.loop_num; i++) {
+ if (!config.pri_max) {
+ /* Round robin scheduler without priority. */
+ sch_cfg.flow_id = config.flow_id;
+ sch_cfg.pri = 0;
+ sch_cfg.drr_node_id = config.drr_node_id;
+ } else {
+ sch_cfg.flow_id = config.flow_id + (i / config.pri_max);
+ sch_cfg.pri = i % config.pri_max;
+ sch_cfg.drr_node_id = config.drr_node_id + i;
+ }
+
+ /* Scheduler weight, must be more than 0. */
+ sch_cfg.drr_node_wt = 1;
+ /* Byte based to be scheduled. */
+ sch_cfg.unit_is_packet = false;
+ /* Frame + CRC calculated. */
+ sch_cfg.frame_mode = PPE_SCH_WITH_FRAME_CRC;
+
+ ret = ppe_queue_scheduler_set(ppe_dev, config.node_id + i,
+ config.flow_level,
+ config.port,
+ sch_cfg);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/* Initialize scheduler settings for PPE buffer utilization and dispatching
+ * packet on PPE queue.
+ */
+static int ppe_config_scheduler(struct ppe_device *ppe_dev)
+{
+ const struct ppe_scheduler_port_config *port_cfg;
+ const struct ppe_scheduler_qm_config *qm_cfg;
+ const struct ppe_scheduler_bm_config *bm_cfg;
+ int ret, i, count;
+ u32 val, reg;
+
+ count = ARRAY_SIZE(ipq9574_ppe_sch_bm_config);
+ bm_cfg = ipq9574_ppe_sch_bm_config;
+
+ /* Configure the depth of BM scheduler entries. */
+ val = FIELD_PREP(PPE_BM_SCH_CTRL_SCH_DEPTH, count);
+ val |= FIELD_PREP(PPE_BM_SCH_CTRL_SCH_OFFSET, 0);
+ val |= FIELD_PREP(PPE_BM_SCH_CTRL_SCH_EN, 1);
+
+ ret = regmap_write(ppe_dev->regmap, PPE_BM_SCH_CTRL_ADDR, val);
+ if (ret)
+ goto sch_config_fail;
+
+ /* Configure each BM scheduler entry with the valid ingress port and
+ * egress port, the second port takes effect when the specified port
+ * is in the inactive state.
+ */
+ for (i = 0; i < count; i++) {
+ val = FIELD_PREP(PPE_BM_SCH_CFG_TBL_VALID, bm_cfg[i].valid);
+ val |= FIELD_PREP(PPE_BM_SCH_CFG_TBL_DIR, bm_cfg[i].dir);
+ val |= FIELD_PREP(PPE_BM_SCH_CFG_TBL_PORT_NUM, bm_cfg[i].port);
+ val |= FIELD_PREP(PPE_BM_SCH_CFG_TBL_SECOND_PORT_VALID,
+ bm_cfg[i].backup_port_valid);
+ val |= FIELD_PREP(PPE_BM_SCH_CFG_TBL_SECOND_PORT,
+ bm_cfg[i].backup_port);
+
+ reg = PPE_BM_SCH_CFG_TBL_ADDR + i * PPE_BM_SCH_CFG_TBL_INC;
+ ret = regmap_write(ppe_dev->regmap, reg, val);
+ if (ret)
+ goto sch_config_fail;
+ }
+
+ count = ARRAY_SIZE(ipq9574_ppe_sch_qm_config);
+ qm_cfg = ipq9574_ppe_sch_qm_config;
+
+ /* Configure the depth of QM scheduler entries. */
+ val = FIELD_PREP(PPE_PSCH_SCH_DEPTH_CFG_SCH_DEPTH, count);
+ ret = regmap_write(ppe_dev->regmap, PPE_PSCH_SCH_DEPTH_CFG_ADDR, val);
+ if (ret)
+ goto sch_config_fail;
+
+ /* Configure each QM scheduler entry with enqueue port and dequeue
+ * port, the second port takes effect when the specified dequeue
+ * port is in the inactive port.
+ */
+ for (i = 0; i < count; i++) {
+ val = FIELD_PREP(PPE_PSCH_SCH_CFG_TBL_ENS_PORT_BITMAP,
+ qm_cfg[i].ensch_port_bmp);
+ val |= FIELD_PREP(PPE_PSCH_SCH_CFG_TBL_ENS_PORT,
+ qm_cfg[i].ensch_port);
+ val |= FIELD_PREP(PPE_PSCH_SCH_CFG_TBL_DES_PORT,
+ qm_cfg[i].desch_port);
+ val |= FIELD_PREP(PPE_PSCH_SCH_CFG_TBL_DES_SECOND_PORT_EN,
+ qm_cfg[i].desch_backup_port_valid);
+ val |= FIELD_PREP(PPE_PSCH_SCH_CFG_TBL_DES_SECOND_PORT,
+ qm_cfg[i].desch_backup_port);
+
+ reg = PPE_PSCH_SCH_CFG_TBL_ADDR + i * PPE_PSCH_SCH_CFG_TBL_INC;
+ ret = regmap_write(ppe_dev->regmap, reg, val);
+ if (ret)
+ goto sch_config_fail;
+ }
+
+ count = ARRAY_SIZE(ppe_port_sch_config);
+ port_cfg = ppe_port_sch_config;
+
+ /* Configure scheduler per PPE queue or flow. */
+ for (i = 0; i < count; i++) {
+ if (port_cfg[i].port >= ppe_dev->num_ports)
+ break;
+
+ ret = ppe_node_scheduler_config(ppe_dev, port_cfg[i]);
+ if (ret)
+ goto sch_config_fail;
+ }
+
+ return 0;
+
+sch_config_fail:
+ dev_err(ppe_dev->dev, "PPE scheduler arbitration config error %d\n", ret);
+ return ret;
+};
+
+/* Configure PPE queue destination of each PPE port. */
+static int ppe_queue_dest_init(struct ppe_device *ppe_dev)
+{
+ int ret, port_id, index, q_base, q_offset, res_start, res_end, pri_max;
+ struct ppe_queue_ucast_dest queue_dst;
+
+ for (port_id = 0; port_id < ppe_dev->num_ports; port_id++) {
+ memset(&queue_dst, 0, sizeof(queue_dst));
+
+ ret = ppe_port_resource_get(ppe_dev, port_id, PPE_RES_UCAST,
+ &res_start, &res_end);
+ if (ret)
+ return ret;
+
+ q_base = res_start;
+ queue_dst.dest_port = port_id;
+
+ /* Configure queue base ID and profile ID that is same as
+ * physical port ID.
+ */
+ ret = ppe_queue_ucast_base_set(ppe_dev, queue_dst,
+ q_base, port_id);
+ if (ret)
+ return ret;
+
+ /* Queue priority range supported by each PPE port */
+ ret = ppe_port_resource_get(ppe_dev, port_id, PPE_RES_L0_NODE,
+ &res_start, &res_end);
+ if (ret)
+ return ret;
+
+ pri_max = res_end - res_start;
+
+ /* Redirect ARP reply packet with the max priority on CPU port,
+ * which keeps the ARP reply directed to CPU (CPU code is 101)
+ * with highest priority queue of EDMA.
+ */
+ if (port_id == 0) {
+ memset(&queue_dst, 0, sizeof(queue_dst));
+
+ queue_dst.cpu_code_en = true;
+ queue_dst.cpu_code = 101;
+ ret = ppe_queue_ucast_base_set(ppe_dev, queue_dst,
+ q_base + pri_max,
+ 0);
+ if (ret)
+ return ret;
+ }
+
+ /* Initialize the queue offset of internal priority. */
+ for (index = 0; index < PPE_QUEUE_INTER_PRI_NUM; index++) {
+ q_offset = index > pri_max ? pri_max : index;
+
+ ret = ppe_queue_ucast_offset_pri_set(ppe_dev, port_id,
+ index, q_offset);
+ if (ret)
+ return ret;
+ }
+
+ /* Initialize the queue offset of RSS hash as 0 to avoid the
+ * random hardware value that will lead to the unexpected
+ * destination queue generated.
+ */
+ for (index = 0; index < PPE_QUEUE_HASH_NUM; index++) {
+ ret = ppe_queue_ucast_offset_hash_set(ppe_dev, port_id,
+ index, 0);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/* Initialize the service code 1 used by CPU port. */
+static int ppe_servcode_init(struct ppe_device *ppe_dev)
+{
+ struct ppe_sc_cfg sc_cfg = {};
+
+ bitmap_zero(sc_cfg.bitmaps.counter, PPE_SC_BYPASS_COUNTER_SIZE);
+ bitmap_zero(sc_cfg.bitmaps.tunnel, PPE_SC_BYPASS_TUNNEL_SIZE);
+
+ bitmap_fill(sc_cfg.bitmaps.ingress, PPE_SC_BYPASS_INGRESS_SIZE);
+ clear_bit(PPE_SC_BYPASS_INGRESS_FAKE_MAC_HEADER, sc_cfg.bitmaps.ingress);
+ clear_bit(PPE_SC_BYPASS_INGRESS_SERVICE_CODE, sc_cfg.bitmaps.ingress);
+ clear_bit(PPE_SC_BYPASS_INGRESS_FAKE_L2_PROTO, sc_cfg.bitmaps.ingress);
+
+ bitmap_fill(sc_cfg.bitmaps.egress, PPE_SC_BYPASS_EGRESS_SIZE);
+ clear_bit(PPE_SC_BYPASS_EGRESS_ACL_POST_ROUTING_CHECK, sc_cfg.bitmaps.egress);
+
+ return ppe_sc_config_set(ppe_dev, PPE_EDMA_SC_BYPASS_ID, sc_cfg);
+}
+
+/* Initialize PPE port configurations. */
+static int ppe_port_config_init(struct ppe_device *ppe_dev)
+{
+ u32 reg, val, mru_mtu_val[3];
+ int i, ret;
+
+ /* MTU and MRU settings are not required for CPU port 0. */
+ for (i = 1; i < ppe_dev->num_ports; i++) {
+ /* Enable Ethernet port counter */
+ ret = ppe_counter_enable_set(ppe_dev, i);
+ if (ret)
+ return ret;
+
+ reg = PPE_MRU_MTU_CTRL_TBL_ADDR + PPE_MRU_MTU_CTRL_TBL_INC * i;
+ ret = regmap_bulk_read(ppe_dev->regmap, reg,
+ mru_mtu_val, ARRAY_SIZE(mru_mtu_val));
+ if (ret)
+ return ret;
+
+ /* Drop the packet when the packet size is more than the MTU
+ * and redirect the packet to the CPU port when the received
+ * packet size is more than the MRU of the physical interface.
+ */
+ PPE_MRU_MTU_CTRL_SET_MRU_CMD(mru_mtu_val, PPE_ACTION_REDIRECT_TO_CPU);
+ PPE_MRU_MTU_CTRL_SET_MTU_CMD(mru_mtu_val, PPE_ACTION_DROP);
+ ret = regmap_bulk_write(ppe_dev->regmap, reg,
+ mru_mtu_val, ARRAY_SIZE(mru_mtu_val));
+ if (ret)
+ return ret;
+
+ reg = PPE_MC_MTU_CTRL_TBL_ADDR + PPE_MC_MTU_CTRL_TBL_INC * i;
+ val = FIELD_PREP(PPE_MC_MTU_CTRL_TBL_MTU_CMD, PPE_ACTION_DROP);
+ ret = regmap_update_bits(ppe_dev->regmap, reg,
+ PPE_MC_MTU_CTRL_TBL_MTU_CMD,
+ val);
+ if (ret)
+ return ret;
+ }
+
+ /* Enable CPU port counters. */
+ return ppe_counter_enable_set(ppe_dev, 0);
+}
+
+/* Initialize the PPE RSS configuration for IPv4 and IPv6 packet receive.
+ * RSS settings are to calculate the random RSS hash value generated during
+ * packet receive. This hash is then used to generate the queue offset used
+ * to determine the queue used to transmit the packet.
+ */
+static int ppe_rss_hash_init(struct ppe_device *ppe_dev)
+{
+ u16 fins[PPE_RSS_HASH_TUPLES] = { 0x205, 0x264, 0x227, 0x245, 0x201 };
+ u8 ips[PPE_RSS_HASH_IP_LENGTH] = { 0x13, 0xb, 0x13, 0xb };
+ struct ppe_rss_hash_cfg hash_cfg;
+ int i, ret;
+
+ hash_cfg.hash_seed = get_random_u32();
+ hash_cfg.hash_mask = 0xfff;
+
+ /* Use 5 tuple as RSS hash key for the first fragment of TCP, UDP
+ * and UDP-Lite packets.
+ */
+ hash_cfg.hash_fragment_mode = false;
+
+ /* The final common seed configs used to calculate the RSS has value,
+ * which is available for both IPv4 and IPv6 packet.
+ */
+ for (i = 0; i < ARRAY_SIZE(fins); i++) {
+ hash_cfg.hash_fin_inner[i] = fins[i] & 0x1f;
+ hash_cfg.hash_fin_outer[i] = fins[i] >> 5;
+ }
+
+ /* RSS seeds for IP protocol, L4 destination & source port and
+ * destination & source IP used to calculate the RSS hash value.
+ */
+ hash_cfg.hash_protocol_mix = 0x13;
+ hash_cfg.hash_dport_mix = 0xb;
+ hash_cfg.hash_sport_mix = 0x13;
+ hash_cfg.hash_dip_mix[0] = 0xb;
+ hash_cfg.hash_sip_mix[0] = 0x13;
+
+ /* Configure RSS seed configs for IPv4 packet. */
+ ret = ppe_rss_hash_config_set(ppe_dev, PPE_RSS_HASH_MODE_IPV4, hash_cfg);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(ips); i++) {
+ hash_cfg.hash_sip_mix[i] = ips[i];
+ hash_cfg.hash_dip_mix[i] = ips[i];
+ }
+
+ /* Configure RSS seed configs for IPv6 packet. */
+ return ppe_rss_hash_config_set(ppe_dev, PPE_RSS_HASH_MODE_IPV6, hash_cfg);
+}
+
+/* Initialize mapping between PPE queues assigned to CPU port 0
+ * to Ethernet DMA ring 0.
+ */
+static int ppe_queues_to_ring_init(struct ppe_device *ppe_dev)
+{
+ u32 queue_bmap[PPE_RING_TO_QUEUE_BITMAP_WORD_CNT] = {};
+ int ret, queue_id, queue_max;
+
+ ret = ppe_port_resource_get(ppe_dev, 0, PPE_RES_UCAST,
+ &queue_id, &queue_max);
+ if (ret)
+ return ret;
+
+ for (; queue_id <= queue_max; queue_id++)
+ queue_bmap[queue_id / 32] |= BIT_MASK(queue_id % 32);
+
+ return ppe_ring_queue_map_set(ppe_dev, 0, queue_bmap);
+}
+
+/* Initialize PPE bridge settings to only enable L2 frame receive and
+ * transmit between CPU port and PPE Ethernet ports.
+ */
+static int ppe_bridge_init(struct ppe_device *ppe_dev)
+{
+ u32 reg, mask, port_cfg[4], vsi_cfg[2];
+ int ret, i;
+
+ /* Configure the following settings for CPU port0:
+ * a.) Enable Bridge TX
+ * b.) Disable FDB new address learning
+ * c.) Disable station move address learning
+ */
+ mask = PPE_PORT_BRIDGE_TXMAC_EN;
+ mask |= PPE_PORT_BRIDGE_NEW_LRN_EN;
+ mask |= PPE_PORT_BRIDGE_STA_MOVE_LRN_EN;
+ ret = regmap_update_bits(ppe_dev->regmap,
+ PPE_PORT_BRIDGE_CTRL_ADDR,
+ mask,
+ PPE_PORT_BRIDGE_TXMAC_EN);
+ if (ret)
+ return ret;
+
+ for (i = 1; i < ppe_dev->num_ports; i++) {
+ /* Enable invalid VSI forwarding for all the physical ports
+ * to CPU port0, in case no VSI is assigned to the physical
+ * port.
+ */
+ reg = PPE_L2_VP_PORT_TBL_ADDR + PPE_L2_VP_PORT_TBL_INC * i;
+ ret = regmap_bulk_read(ppe_dev->regmap, reg,
+ port_cfg, ARRAY_SIZE(port_cfg));
+
+ if (ret)
+ return ret;
+
+ PPE_L2_PORT_SET_INVALID_VSI_FWD_EN(port_cfg, true);
+ PPE_L2_PORT_SET_DST_INFO(port_cfg, 0);
+
+ ret = regmap_bulk_write(ppe_dev->regmap, reg,
+ port_cfg, ARRAY_SIZE(port_cfg));
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < PPE_VSI_TBL_ENTRIES; i++) {
+ /* Set the VSI forward membership to include only CPU port0.
+ * FDB learning and forwarding take place only after switchdev
+ * is supported later to create the VSI and join the physical
+ * ports to the VSI port member.
+ */
+ reg = PPE_VSI_TBL_ADDR + PPE_VSI_TBL_INC * i;
+ ret = regmap_bulk_read(ppe_dev->regmap, reg,
+ vsi_cfg, ARRAY_SIZE(vsi_cfg));
+ if (ret)
+ return ret;
+
+ PPE_VSI_SET_MEMBER_PORT_BITMAP(vsi_cfg, BIT(0));
+ PPE_VSI_SET_UUC_BITMAP(vsi_cfg, BIT(0));
+ PPE_VSI_SET_UMC_BITMAP(vsi_cfg, BIT(0));
+ PPE_VSI_SET_BC_BITMAP(vsi_cfg, BIT(0));
+ PPE_VSI_SET_NEW_ADDR_LRN_EN(vsi_cfg, true);
+ PPE_VSI_SET_NEW_ADDR_FWD_CMD(vsi_cfg, PPE_ACTION_FORWARD);
+ PPE_VSI_SET_STATION_MOVE_LRN_EN(vsi_cfg, true);
+ PPE_VSI_SET_STATION_MOVE_FWD_CMD(vsi_cfg, PPE_ACTION_FORWARD);
+
+ ret = regmap_bulk_write(ppe_dev->regmap, reg,
+ vsi_cfg, ARRAY_SIZE(vsi_cfg));
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int ppe_hw_config(struct ppe_device *ppe_dev)
+{
+ int ret;
+
+ ret = ppe_config_bm(ppe_dev);
+ if (ret)
+ return ret;
+
+ ret = ppe_config_qm(ppe_dev);
+ if (ret)
+ return ret;
+
+ ret = ppe_config_scheduler(ppe_dev);
+ if (ret)
+ return ret;
+
+ ret = ppe_queue_dest_init(ppe_dev);
+ if (ret)
+ return ret;
+
+ ret = ppe_servcode_init(ppe_dev);
+ if (ret)
+ return ret;
+
+ ret = ppe_port_config_init(ppe_dev);
+ if (ret)
+ return ret;
+
+ ret = ppe_rss_hash_init(ppe_dev);
+ if (ret)
+ return ret;
+
+ ret = ppe_queues_to_ring_init(ppe_dev);
+ if (ret)
+ return ret;
+
+ return ppe_bridge_init(ppe_dev);
+}
diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_config.h b/drivers/net/ethernet/qualcomm/ppe/ppe_config.h
new file mode 100644
index 000000000000..4bb45ca40144
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_config.h
@@ -0,0 +1,317 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#ifndef __PPE_CONFIG_H__
+#define __PPE_CONFIG_H__
+
+#include <linux/types.h>
+
+#include "ppe.h"
+
+/* There are different table index ranges for configuring queue base ID of
+ * the destination port, CPU code and service code.
+ */
+#define PPE_QUEUE_BASE_DEST_PORT 0
+#define PPE_QUEUE_BASE_CPU_CODE 1024
+#define PPE_QUEUE_BASE_SERVICE_CODE 2048
+
+#define PPE_QUEUE_INTER_PRI_NUM 16
+#define PPE_QUEUE_HASH_NUM 256
+
+/* The service code is used by EDMA port to transmit packet to PPE. */
+#define PPE_EDMA_SC_BYPASS_ID 1
+
+/* The PPE RSS hash configured for IPv4 and IPv6 packet separately. */
+#define PPE_RSS_HASH_MODE_IPV4 BIT(0)
+#define PPE_RSS_HASH_MODE_IPV6 BIT(1)
+#define PPE_RSS_HASH_IP_LENGTH 4
+#define PPE_RSS_HASH_TUPLES 5
+
+/* PPE supports 300 queues, each bit presents as one queue. */
+#define PPE_RING_TO_QUEUE_BITMAP_WORD_CNT 10
+
+/**
+ * enum ppe_scheduler_frame_mode - PPE scheduler frame mode.
+ * @PPE_SCH_WITH_IPG_PREAMBLE_FRAME_CRC: The scheduled frame includes IPG,
+ * preamble, Ethernet packet and CRC.
+ * @PPE_SCH_WITH_FRAME_CRC: The scheduled frame includes Ethernet frame and CRC
+ * excluding IPG and preamble.
+ * @PPE_SCH_WITH_L3_PAYLOAD: The scheduled frame includes layer 3 packet data.
+ */
+enum ppe_scheduler_frame_mode {
+ PPE_SCH_WITH_IPG_PREAMBLE_FRAME_CRC = 0,
+ PPE_SCH_WITH_FRAME_CRC = 1,
+ PPE_SCH_WITH_L3_PAYLOAD = 2,
+};
+
+/**
+ * struct ppe_scheduler_cfg - PPE scheduler configuration.
+ * @flow_id: PPE flow ID.
+ * @pri: Scheduler priority.
+ * @drr_node_id: Node ID for scheduled traffic.
+ * @drr_node_wt: Weight for scheduled traffic.
+ * @unit_is_packet: Packet based or byte based unit for scheduled traffic.
+ * @frame_mode: Packet mode to be scheduled.
+ *
+ * PPE scheduler supports commit rate and exceed rate configurations.
+ */
+struct ppe_scheduler_cfg {
+ int flow_id;
+ int pri;
+ int drr_node_id;
+ int drr_node_wt;
+ bool unit_is_packet;
+ enum ppe_scheduler_frame_mode frame_mode;
+};
+
+/**
+ * enum ppe_resource_type - PPE resource type.
+ * @PPE_RES_UCAST: Unicast queue resource.
+ * @PPE_RES_MCAST: Multicast queue resource.
+ * @PPE_RES_L0_NODE: Level 0 for queue based node resource.
+ * @PPE_RES_L1_NODE: Level 1 for flow based node resource.
+ * @PPE_RES_FLOW_ID: Flow based node resource.
+ */
+enum ppe_resource_type {
+ PPE_RES_UCAST,
+ PPE_RES_MCAST,
+ PPE_RES_L0_NODE,
+ PPE_RES_L1_NODE,
+ PPE_RES_FLOW_ID,
+};
+
+/**
+ * struct ppe_queue_ucast_dest - PPE unicast queue destination.
+ * @src_profile: Source profile.
+ * @service_code_en: Enable service code to map the queue base ID.
+ * @service_code: Service code.
+ * @cpu_code_en: Enable CPU code to map the queue base ID.
+ * @cpu_code: CPU code.
+ * @dest_port: destination port.
+ *
+ * PPE egress queue ID is decided by the service code if enabled, otherwise
+ * by the CPU code if enabled, or by destination port if both service code
+ * and CPU code are disabled.
+ */
+struct ppe_queue_ucast_dest {
+ int src_profile;
+ bool service_code_en;
+ int service_code;
+ bool cpu_code_en;
+ int cpu_code;
+ int dest_port;
+};
+
+/* Hardware bitmaps for bypassing features of the ingress packet. */
+enum ppe_sc_ingress_type {
+ PPE_SC_BYPASS_INGRESS_VLAN_TAG_FMT_CHECK = 0,
+ PPE_SC_BYPASS_INGRESS_VLAN_MEMBER_CHECK = 1,
+ PPE_SC_BYPASS_INGRESS_VLAN_TRANSLATE = 2,
+ PPE_SC_BYPASS_INGRESS_MY_MAC_CHECK = 3,
+ PPE_SC_BYPASS_INGRESS_DIP_LOOKUP = 4,
+ PPE_SC_BYPASS_INGRESS_FLOW_LOOKUP = 5,
+ PPE_SC_BYPASS_INGRESS_FLOW_ACTION = 6,
+ PPE_SC_BYPASS_INGRESS_ACL = 7,
+ PPE_SC_BYPASS_INGRESS_FAKE_MAC_HEADER = 8,
+ PPE_SC_BYPASS_INGRESS_SERVICE_CODE = 9,
+ PPE_SC_BYPASS_INGRESS_WRONG_PKT_FMT_L2 = 10,
+ PPE_SC_BYPASS_INGRESS_WRONG_PKT_FMT_L3_IPV4 = 11,
+ PPE_SC_BYPASS_INGRESS_WRONG_PKT_FMT_L3_IPV6 = 12,
+ PPE_SC_BYPASS_INGRESS_WRONG_PKT_FMT_L4 = 13,
+ PPE_SC_BYPASS_INGRESS_FLOW_SERVICE_CODE = 14,
+ PPE_SC_BYPASS_INGRESS_ACL_SERVICE_CODE = 15,
+ PPE_SC_BYPASS_INGRESS_FAKE_L2_PROTO = 16,
+ PPE_SC_BYPASS_INGRESS_PPPOE_TERMINATION = 17,
+ PPE_SC_BYPASS_INGRESS_DEFAULT_VLAN = 18,
+ PPE_SC_BYPASS_INGRESS_DEFAULT_PCP = 19,
+ PPE_SC_BYPASS_INGRESS_VSI_ASSIGN = 20,
+ /* Values 21-23 are not specified by hardware. */
+ PPE_SC_BYPASS_INGRESS_VLAN_ASSIGN_FAIL = 24,
+ PPE_SC_BYPASS_INGRESS_SOURCE_GUARD = 25,
+ PPE_SC_BYPASS_INGRESS_MRU_MTU_CHECK = 26,
+ PPE_SC_BYPASS_INGRESS_FLOW_SRC_CHECK = 27,
+ PPE_SC_BYPASS_INGRESS_FLOW_QOS = 28,
+ /* This must be last as it determines the size of the BITMAP. */
+ PPE_SC_BYPASS_INGRESS_SIZE,
+};
+
+/* Hardware bitmaps for bypassing features of the egress packet. */
+enum ppe_sc_egress_type {
+ PPE_SC_BYPASS_EGRESS_VLAN_MEMBER_CHECK = 0,
+ PPE_SC_BYPASS_EGRESS_VLAN_TRANSLATE = 1,
+ PPE_SC_BYPASS_EGRESS_VLAN_TAG_FMT_CTRL = 2,
+ PPE_SC_BYPASS_EGRESS_FDB_LEARN = 3,
+ PPE_SC_BYPASS_EGRESS_FDB_REFRESH = 4,
+ PPE_SC_BYPASS_EGRESS_L2_SOURCE_SECURITY = 5,
+ PPE_SC_BYPASS_EGRESS_MANAGEMENT_FWD = 6,
+ PPE_SC_BYPASS_EGRESS_BRIDGING_FWD = 7,
+ PPE_SC_BYPASS_EGRESS_IN_STP_FLTR = 8,
+ PPE_SC_BYPASS_EGRESS_EG_STP_FLTR = 9,
+ PPE_SC_BYPASS_EGRESS_SOURCE_FLTR = 10,
+ PPE_SC_BYPASS_EGRESS_POLICER = 11,
+ PPE_SC_BYPASS_EGRESS_L2_PKT_EDIT = 12,
+ PPE_SC_BYPASS_EGRESS_L3_PKT_EDIT = 13,
+ PPE_SC_BYPASS_EGRESS_ACL_POST_ROUTING_CHECK = 14,
+ PPE_SC_BYPASS_EGRESS_PORT_ISOLATION = 15,
+ PPE_SC_BYPASS_EGRESS_PRE_ACL_QOS = 16,
+ PPE_SC_BYPASS_EGRESS_POST_ACL_QOS = 17,
+ PPE_SC_BYPASS_EGRESS_DSCP_QOS = 18,
+ PPE_SC_BYPASS_EGRESS_PCP_QOS = 19,
+ PPE_SC_BYPASS_EGRESS_PREHEADER_QOS = 20,
+ PPE_SC_BYPASS_EGRESS_FAKE_MAC_DROP = 21,
+ PPE_SC_BYPASS_EGRESS_TUNL_CONTEXT = 22,
+ PPE_SC_BYPASS_EGRESS_FLOW_POLICER = 23,
+ /* This must be last as it determines the size of the BITMAP. */
+ PPE_SC_BYPASS_EGRESS_SIZE,
+};
+
+/* Hardware bitmaps for bypassing counter of packet. */
+enum ppe_sc_counter_type {
+ PPE_SC_BYPASS_COUNTER_RX_VLAN = 0,
+ PPE_SC_BYPASS_COUNTER_RX = 1,
+ PPE_SC_BYPASS_COUNTER_TX_VLAN = 2,
+ PPE_SC_BYPASS_COUNTER_TX = 3,
+ /* This must be last as it determines the size of the BITMAP. */
+ PPE_SC_BYPASS_COUNTER_SIZE,
+};
+
+/* Hardware bitmaps for bypassing features of tunnel packet. */
+enum ppe_sc_tunnel_type {
+ PPE_SC_BYPASS_TUNNEL_SERVICE_CODE = 0,
+ PPE_SC_BYPASS_TUNNEL_TUNNEL_HANDLE = 1,
+ PPE_SC_BYPASS_TUNNEL_L3_IF_CHECK = 2,
+ PPE_SC_BYPASS_TUNNEL_VLAN_CHECK = 3,
+ PPE_SC_BYPASS_TUNNEL_DMAC_CHECK = 4,
+ PPE_SC_BYPASS_TUNNEL_UDP_CSUM_0_CHECK = 5,
+ PPE_SC_BYPASS_TUNNEL_TBL_DE_ACCE_CHECK = 6,
+ PPE_SC_BYPASS_TUNNEL_PPPOE_MC_TERM_CHECK = 7,
+ PPE_SC_BYPASS_TUNNEL_TTL_EXCEED_CHECK = 8,
+ PPE_SC_BYPASS_TUNNEL_MAP_SRC_CHECK = 9,
+ PPE_SC_BYPASS_TUNNEL_MAP_DST_CHECK = 10,
+ PPE_SC_BYPASS_TUNNEL_LPM_DST_LOOKUP = 11,
+ PPE_SC_BYPASS_TUNNEL_LPM_LOOKUP = 12,
+ PPE_SC_BYPASS_TUNNEL_WRONG_PKT_FMT_L2 = 13,
+ PPE_SC_BYPASS_TUNNEL_WRONG_PKT_FMT_L3_IPV4 = 14,
+ PPE_SC_BYPASS_TUNNEL_WRONG_PKT_FMT_L3_IPV6 = 15,
+ PPE_SC_BYPASS_TUNNEL_WRONG_PKT_FMT_L4 = 16,
+ PPE_SC_BYPASS_TUNNEL_WRONG_PKT_FMT_TUNNEL = 17,
+ /* Values 18-19 are not specified by hardware. */
+ PPE_SC_BYPASS_TUNNEL_PRE_IPO = 20,
+ /* This must be last as it determines the size of the BITMAP. */
+ PPE_SC_BYPASS_TUNNEL_SIZE,
+};
+
+/**
+ * struct ppe_sc_bypass - PPE service bypass bitmaps
+ * @ingress: Bitmap of features that can be bypassed on the ingress packet.
+ * @egress: Bitmap of features that can be bypassed on the egress packet.
+ * @counter: Bitmap of features that can be bypassed on the counter type.
+ * @tunnel: Bitmap of features that can be bypassed on the tunnel packet.
+ */
+struct ppe_sc_bypass {
+ DECLARE_BITMAP(ingress, PPE_SC_BYPASS_INGRESS_SIZE);
+ DECLARE_BITMAP(egress, PPE_SC_BYPASS_EGRESS_SIZE);
+ DECLARE_BITMAP(counter, PPE_SC_BYPASS_COUNTER_SIZE);
+ DECLARE_BITMAP(tunnel, PPE_SC_BYPASS_TUNNEL_SIZE);
+};
+
+/**
+ * struct ppe_sc_cfg - PPE service code configuration.
+ * @dest_port_valid: Generate destination port or not.
+ * @dest_port: Destination port ID.
+ * @bitmaps: Bitmap of bypass features.
+ * @is_src: Destination port acts as source port, packet sent to CPU.
+ * @next_service_code: New service code generated.
+ * @eip_field_update_bitmap: Fields updated as actions taken for EIP.
+ * @eip_hw_service: Selected hardware functions for EIP.
+ * @eip_offset_sel: Packet offset selection, using packet's layer 4 offset
+ * or using packet's layer 3 offset for EIP.
+ *
+ * Service code is generated during the packet passing through PPE.
+ */
+struct ppe_sc_cfg {
+ bool dest_port_valid;
+ int dest_port;
+ struct ppe_sc_bypass bitmaps;
+ bool is_src;
+ int next_service_code;
+ int eip_field_update_bitmap;
+ int eip_hw_service;
+ int eip_offset_sel;
+};
+
+/**
+ * enum ppe_action_type - PPE action of the received packet.
+ * @PPE_ACTION_FORWARD: Packet forwarded per L2/L3 process.
+ * @PPE_ACTION_DROP: Packet dropped by PPE.
+ * @PPE_ACTION_COPY_TO_CPU: Packet copied to CPU port per multicast queue.
+ * @PPE_ACTION_REDIRECT_TO_CPU: Packet redirected to CPU port per unicast queue.
+ */
+enum ppe_action_type {
+ PPE_ACTION_FORWARD = 0,
+ PPE_ACTION_DROP = 1,
+ PPE_ACTION_COPY_TO_CPU = 2,
+ PPE_ACTION_REDIRECT_TO_CPU = 3,
+};
+
+/**
+ * struct ppe_rss_hash_cfg - PPE RSS hash configuration.
+ * @hash_mask: Mask of the generated hash value.
+ * @hash_fragment_mode: Hash generation mode for the first fragment of TCP,
+ * UDP and UDP-Lite packets, to use either 3 tuple or 5 tuple for RSS hash
+ * key computation.
+ * @hash_seed: Seed to generate RSS hash.
+ * @hash_sip_mix: Source IP selection.
+ * @hash_dip_mix: Destination IP selection.
+ * @hash_protocol_mix: Protocol selection.
+ * @hash_sport_mix: Source L4 port selection.
+ * @hash_dport_mix: Destination L4 port selection.
+ * @hash_fin_inner: RSS hash value first selection.
+ * @hash_fin_outer: RSS hash value second selection.
+ *
+ * PPE RSS hash value is generated for the packet based on the RSS hash
+ * configured.
+ */
+struct ppe_rss_hash_cfg {
+ u32 hash_mask;
+ bool hash_fragment_mode;
+ u32 hash_seed;
+ u8 hash_sip_mix[PPE_RSS_HASH_IP_LENGTH];
+ u8 hash_dip_mix[PPE_RSS_HASH_IP_LENGTH];
+ u8 hash_protocol_mix;
+ u8 hash_sport_mix;
+ u8 hash_dport_mix;
+ u8 hash_fin_inner[PPE_RSS_HASH_TUPLES];
+ u8 hash_fin_outer[PPE_RSS_HASH_TUPLES];
+};
+
+int ppe_hw_config(struct ppe_device *ppe_dev);
+int ppe_queue_scheduler_set(struct ppe_device *ppe_dev,
+ int node_id, bool flow_level, int port,
+ struct ppe_scheduler_cfg scheduler_cfg);
+int ppe_queue_ucast_base_set(struct ppe_device *ppe_dev,
+ struct ppe_queue_ucast_dest queue_dst,
+ int queue_base,
+ int profile_id);
+int ppe_queue_ucast_offset_pri_set(struct ppe_device *ppe_dev,
+ int profile_id,
+ int priority,
+ int queue_offset);
+int ppe_queue_ucast_offset_hash_set(struct ppe_device *ppe_dev,
+ int profile_id,
+ int rss_hash,
+ int queue_offset);
+int ppe_port_resource_get(struct ppe_device *ppe_dev, int port,
+ enum ppe_resource_type type,
+ int *res_start, int *res_end);
+int ppe_sc_config_set(struct ppe_device *ppe_dev, int sc,
+ struct ppe_sc_cfg cfg);
+int ppe_counter_enable_set(struct ppe_device *ppe_dev, int port);
+int ppe_rss_hash_config_set(struct ppe_device *ppe_dev, int mode,
+ struct ppe_rss_hash_cfg hash_cfg);
+int ppe_ring_queue_map_set(struct ppe_device *ppe_dev,
+ int ring_id,
+ u32 *queue_map);
+#endif
diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.c b/drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.c
new file mode 100644
index 000000000000..fd959a76ff43
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.c
@@ -0,0 +1,847 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+/* PPE debugfs routines for display of PPE counters useful for debug. */
+
+#include <linux/bitfield.h>
+#include <linux/debugfs.h>
+#include <linux/dev_printk.h>
+#include <linux/device.h>
+#include <linux/regmap.h>
+#include <linux/seq_file.h>
+
+#include "ppe.h"
+#include "ppe_config.h"
+#include "ppe_debugfs.h"
+#include "ppe_regs.h"
+
+#define PPE_PKT_CNT_TBL_SIZE 3
+#define PPE_DROP_PKT_CNT_TBL_SIZE 5
+
+#define PPE_W0_PKT_CNT GENMASK(31, 0)
+#define PPE_W2_DROP_PKT_CNT_LOW GENMASK(31, 8)
+#define PPE_W3_DROP_PKT_CNT_HIGH GENMASK(7, 0)
+
+#define PPE_GET_PKT_CNT(tbl_cnt) \
+ FIELD_GET(PPE_W0_PKT_CNT, *(tbl_cnt))
+#define PPE_GET_DROP_PKT_CNT_LOW(tbl_cnt) \
+ FIELD_GET(PPE_W2_DROP_PKT_CNT_LOW, *((tbl_cnt) + 0x2))
+#define PPE_GET_DROP_PKT_CNT_HIGH(tbl_cnt) \
+ FIELD_GET(PPE_W3_DROP_PKT_CNT_HIGH, *((tbl_cnt) + 0x3))
+
+/**
+ * enum ppe_cnt_size_type - PPE counter size type
+ * @PPE_PKT_CNT_SIZE_1WORD: Counter size with single register
+ * @PPE_PKT_CNT_SIZE_3WORD: Counter size with table of 3 words
+ * @PPE_PKT_CNT_SIZE_5WORD: Counter size with table of 5 words
+ *
+ * PPE takes the different register size to record the packet counters.
+ * It uses single register, or register table with 3 words or 5 words.
+ * The counter with table size 5 words also records the drop counter.
+ * There are also some other counter types occupying sizes less than 32
+ * bits, which is not covered by this enumeration type.
+ */
+enum ppe_cnt_size_type {
+ PPE_PKT_CNT_SIZE_1WORD,
+ PPE_PKT_CNT_SIZE_3WORD,
+ PPE_PKT_CNT_SIZE_5WORD,
+};
+
+/**
+ * enum ppe_cnt_type - PPE counter type.
+ * @PPE_CNT_BM: Packet counter processed by BM.
+ * @PPE_CNT_PARSE: Packet counter parsed on ingress.
+ * @PPE_CNT_PORT_RX: Packet counter on the ingress port.
+ * @PPE_CNT_VLAN_RX: VLAN packet counter received.
+ * @PPE_CNT_L2_FWD: Packet counter processed by L2 forwarding.
+ * @PPE_CNT_CPU_CODE: Packet counter marked with various CPU codes.
+ * @PPE_CNT_VLAN_TX: VLAN packet counter transmitted.
+ * @PPE_CNT_PORT_TX: Packet counter on the egress port.
+ * @PPE_CNT_QM: Packet counter processed by QM.
+ */
+enum ppe_cnt_type {
+ PPE_CNT_BM,
+ PPE_CNT_PARSE,
+ PPE_CNT_PORT_RX,
+ PPE_CNT_VLAN_RX,
+ PPE_CNT_L2_FWD,
+ PPE_CNT_CPU_CODE,
+ PPE_CNT_VLAN_TX,
+ PPE_CNT_PORT_TX,
+ PPE_CNT_QM,
+};
+
+/**
+ * struct ppe_debugfs_entry - PPE debugfs entry.
+ * @name: Debugfs file name.
+ * @counter_type: PPE packet counter type.
+ * @ppe: PPE device.
+ *
+ * The PPE debugfs entry is used to create the debugfs file and passed
+ * to debugfs_create_file() as private data.
+ */
+struct ppe_debugfs_entry {
+ const char *name;
+ enum ppe_cnt_type counter_type;
+ struct ppe_device *ppe;
+};
+
+static const struct ppe_debugfs_entry debugfs_files[] = {
+ {
+ .name = "bm",
+ .counter_type = PPE_CNT_BM,
+ },
+ {
+ .name = "parse",
+ .counter_type = PPE_CNT_PARSE,
+ },
+ {
+ .name = "port_rx",
+ .counter_type = PPE_CNT_PORT_RX,
+ },
+ {
+ .name = "vlan_rx",
+ .counter_type = PPE_CNT_VLAN_RX,
+ },
+ {
+ .name = "l2_forward",
+ .counter_type = PPE_CNT_L2_FWD,
+ },
+ {
+ .name = "cpu_code",
+ .counter_type = PPE_CNT_CPU_CODE,
+ },
+ {
+ .name = "vlan_tx",
+ .counter_type = PPE_CNT_VLAN_TX,
+ },
+ {
+ .name = "port_tx",
+ .counter_type = PPE_CNT_PORT_TX,
+ },
+ {
+ .name = "qm",
+ .counter_type = PPE_CNT_QM,
+ },
+};
+
+static int ppe_pkt_cnt_get(struct ppe_device *ppe_dev, u32 reg,
+ enum ppe_cnt_size_type cnt_type,
+ u32 *cnt, u32 *drop_cnt)
+{
+ u32 drop_pkt_cnt[PPE_DROP_PKT_CNT_TBL_SIZE];
+ u32 pkt_cnt[PPE_PKT_CNT_TBL_SIZE];
+ u32 value;
+ int ret;
+
+ switch (cnt_type) {
+ case PPE_PKT_CNT_SIZE_1WORD:
+ ret = regmap_read(ppe_dev->regmap, reg, &value);
+ if (ret)
+ return ret;
+
+ *cnt = value;
+ break;
+ case PPE_PKT_CNT_SIZE_3WORD:
+ ret = regmap_bulk_read(ppe_dev->regmap, reg,
+ pkt_cnt, ARRAY_SIZE(pkt_cnt));
+ if (ret)
+ return ret;
+
+ *cnt = PPE_GET_PKT_CNT(pkt_cnt);
+ break;
+ case PPE_PKT_CNT_SIZE_5WORD:
+ ret = regmap_bulk_read(ppe_dev->regmap, reg,
+ drop_pkt_cnt, ARRAY_SIZE(drop_pkt_cnt));
+ if (ret)
+ return ret;
+
+ *cnt = PPE_GET_PKT_CNT(drop_pkt_cnt);
+
+ /* Drop counter with low 24 bits. */
+ value = PPE_GET_DROP_PKT_CNT_LOW(drop_pkt_cnt);
+ *drop_cnt = FIELD_PREP(GENMASK(23, 0), value);
+
+ /* Drop counter with high 8 bits. */
+ value = PPE_GET_DROP_PKT_CNT_HIGH(drop_pkt_cnt);
+ *drop_cnt |= FIELD_PREP(GENMASK(31, 24), value);
+ break;
+ }
+
+ return 0;
+}
+
+static void ppe_tbl_pkt_cnt_clear(struct ppe_device *ppe_dev, u32 reg,
+ enum ppe_cnt_size_type cnt_type)
+{
+ u32 drop_pkt_cnt[PPE_DROP_PKT_CNT_TBL_SIZE] = {};
+ u32 pkt_cnt[PPE_PKT_CNT_TBL_SIZE] = {};
+
+ switch (cnt_type) {
+ case PPE_PKT_CNT_SIZE_1WORD:
+ regmap_write(ppe_dev->regmap, reg, 0);
+ break;
+ case PPE_PKT_CNT_SIZE_3WORD:
+ regmap_bulk_write(ppe_dev->regmap, reg,
+ pkt_cnt, ARRAY_SIZE(pkt_cnt));
+ break;
+ case PPE_PKT_CNT_SIZE_5WORD:
+ regmap_bulk_write(ppe_dev->regmap, reg,
+ drop_pkt_cnt, ARRAY_SIZE(drop_pkt_cnt));
+ break;
+ }
+}
+
+static int ppe_bm_counter_get(struct ppe_device *ppe_dev, struct seq_file *seq)
+{
+ u32 reg, val, pkt_cnt, pkt_cnt1;
+ int ret, i, tag;
+
+ seq_printf(seq, "%-24s", "BM SILENT_DROP:");
+ tag = 0;
+ for (i = 0; i < PPE_DROP_CNT_TBL_ENTRIES; i++) {
+ reg = PPE_DROP_CNT_TBL_ADDR + i * PPE_DROP_CNT_TBL_INC;
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_1WORD,
+ &pkt_cnt, NULL);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ if (pkt_cnt > 0) {
+ if (!((++tag) % 4))
+ seq_printf(seq, "\n%-24s", "");
+
+ seq_printf(seq, "%10u(%s=%04d)", pkt_cnt, "port", i);
+ }
+ }
+
+ seq_putc(seq, '\n');
+
+ /* The number of packets dropped because hardware buffers were
+ * available only partially for the packet.
+ */
+ seq_printf(seq, "%-24s", "BM OVERFLOW_DROP:");
+ tag = 0;
+ for (i = 0; i < PPE_DROP_STAT_TBL_ENTRIES; i++) {
+ reg = PPE_DROP_STAT_TBL_ADDR + PPE_DROP_STAT_TBL_INC * i;
+
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
+ &pkt_cnt, NULL);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ if (pkt_cnt > 0) {
+ if (!((++tag) % 4))
+ seq_printf(seq, "\n%-24s", "");
+
+ seq_printf(seq, "%10u(%s=%04d)", pkt_cnt, "port", i);
+ }
+ }
+
+ seq_putc(seq, '\n');
+
+ /* The number of currently occupied buffers, that can't be flushed. */
+ seq_printf(seq, "%-24s", "BM USED/REACT:");
+ tag = 0;
+ for (i = 0; i < PPE_BM_USED_CNT_TBL_ENTRIES; i++) {
+ reg = PPE_BM_USED_CNT_TBL_ADDR + i * PPE_BM_USED_CNT_TBL_INC;
+ ret = regmap_read(ppe_dev->regmap, reg, &val);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ /* The number of PPE buffers used for caching the received
+ * packets before the pause frame sent.
+ */
+ pkt_cnt = FIELD_GET(PPE_BM_USED_CNT_VAL, val);
+
+ reg = PPE_BM_REACT_CNT_TBL_ADDR + i * PPE_BM_REACT_CNT_TBL_INC;
+ ret = regmap_read(ppe_dev->regmap, reg, &val);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ /* The number of PPE buffers used for caching the received
+ * packets after pause frame sent out.
+ */
+ pkt_cnt1 = FIELD_GET(PPE_BM_REACT_CNT_VAL, val);
+
+ if (pkt_cnt > 0 || pkt_cnt1 > 0) {
+ if (!((++tag) % 4))
+ seq_printf(seq, "\n%-24s", "");
+
+ seq_printf(seq, "%10u/%u(%s=%04d)", pkt_cnt, pkt_cnt1,
+ "port", i);
+ }
+ }
+
+ seq_putc(seq, '\n');
+
+ return 0;
+}
+
+/* The number of packets processed by the ingress parser module of PPE. */
+static int ppe_parse_pkt_counter_get(struct ppe_device *ppe_dev,
+ struct seq_file *seq)
+{
+ u32 reg, cnt = 0, tunnel_cnt = 0;
+ int i, ret, tag = 0;
+
+ seq_printf(seq, "%-24s", "PARSE TPRX/IPRX:");
+ for (i = 0; i < PPE_IPR_PKT_CNT_TBL_ENTRIES; i++) {
+ reg = PPE_TPR_PKT_CNT_TBL_ADDR + i * PPE_TPR_PKT_CNT_TBL_INC;
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_1WORD,
+ &tunnel_cnt, NULL);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ reg = PPE_IPR_PKT_CNT_TBL_ADDR + i * PPE_IPR_PKT_CNT_TBL_INC;
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_1WORD,
+ &cnt, NULL);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ if (tunnel_cnt > 0 || cnt > 0) {
+ if (!((++tag) % 4))
+ seq_printf(seq, "\n%-24s", "");
+
+ seq_printf(seq, "%10u/%u(%s=%04d)", tunnel_cnt, cnt,
+ "port", i);
+ }
+ }
+
+ seq_putc(seq, '\n');
+
+ return 0;
+}
+
+/* The number of packets received or dropped on the ingress port. */
+static int ppe_port_rx_counter_get(struct ppe_device *ppe_dev,
+ struct seq_file *seq)
+{
+ u32 reg, pkt_cnt = 0, drop_cnt = 0;
+ int ret, i, tag;
+
+ seq_printf(seq, "%-24s", "PORT RX/RX_DROP:");
+ tag = 0;
+ for (i = 0; i < PPE_PHY_PORT_RX_CNT_TBL_ENTRIES; i++) {
+ reg = PPE_PHY_PORT_RX_CNT_TBL_ADDR + PPE_PHY_PORT_RX_CNT_TBL_INC * i;
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_5WORD,
+ &pkt_cnt, &drop_cnt);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ if (pkt_cnt > 0) {
+ if (!((++tag) % 4))
+ seq_printf(seq, "\n%-24s", "");
+
+ seq_printf(seq, "%10u/%u(%s=%04d)", pkt_cnt, drop_cnt,
+ "port", i);
+ }
+ }
+
+ seq_putc(seq, '\n');
+
+ seq_printf(seq, "%-24s", "VPORT RX/RX_DROP:");
+ tag = 0;
+ for (i = 0; i < PPE_PORT_RX_CNT_TBL_ENTRIES; i++) {
+ reg = PPE_PORT_RX_CNT_TBL_ADDR + PPE_PORT_RX_CNT_TBL_INC * i;
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_5WORD,
+ &pkt_cnt, &drop_cnt);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ if (pkt_cnt > 0) {
+ if (!((++tag) % 4))
+ seq_printf(seq, "\n%-24s", "");
+
+ seq_printf(seq, "%10u/%u(%s=%04d)", pkt_cnt, drop_cnt,
+ "port", i);
+ }
+ }
+
+ seq_putc(seq, '\n');
+
+ return 0;
+}
+
+/* The number of packets received or dropped by layer 2 processing. */
+static int ppe_l2_counter_get(struct ppe_device *ppe_dev,
+ struct seq_file *seq)
+{
+ u32 reg, pkt_cnt = 0, drop_cnt = 0;
+ int ret, i, tag = 0;
+
+ seq_printf(seq, "%-24s", "L2 RX/RX_DROP:");
+ for (i = 0; i < PPE_PRE_L2_CNT_TBL_ENTRIES; i++) {
+ reg = PPE_PRE_L2_CNT_TBL_ADDR + PPE_PRE_L2_CNT_TBL_INC * i;
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_5WORD,
+ &pkt_cnt, &drop_cnt);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ if (pkt_cnt > 0) {
+ if (!((++tag) % 4))
+ seq_printf(seq, "\n%-24s", "");
+
+ seq_printf(seq, "%10u/%u(%s=%04d)", pkt_cnt, drop_cnt,
+ "vsi", i);
+ }
+ }
+
+ seq_putc(seq, '\n');
+
+ return 0;
+}
+
+/* The number of VLAN packets received by PPE. */
+static int ppe_vlan_rx_counter_get(struct ppe_device *ppe_dev,
+ struct seq_file *seq)
+{
+ u32 reg, pkt_cnt = 0;
+ int ret, i, tag = 0;
+
+ seq_printf(seq, "%-24s", "VLAN RX:");
+ for (i = 0; i < PPE_VLAN_CNT_TBL_ENTRIES; i++) {
+ reg = PPE_VLAN_CNT_TBL_ADDR + PPE_VLAN_CNT_TBL_INC * i;
+
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
+ &pkt_cnt, NULL);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ if (pkt_cnt > 0) {
+ if (!((++tag) % 4))
+ seq_printf(seq, "\n%-24s", "");
+
+ seq_printf(seq, "%10u(%s=%04d)", pkt_cnt, "vsi", i);
+ }
+ }
+
+ seq_putc(seq, '\n');
+
+ return 0;
+}
+
+/* The number of packets handed to CPU by PPE. */
+static int ppe_cpu_code_counter_get(struct ppe_device *ppe_dev,
+ struct seq_file *seq)
+{
+ u32 reg, pkt_cnt = 0;
+ int ret, i;
+
+ seq_printf(seq, "%-24s", "CPU CODE:");
+ for (i = 0; i < PPE_DROP_CPU_CNT_TBL_ENTRIES; i++) {
+ reg = PPE_DROP_CPU_CNT_TBL_ADDR + PPE_DROP_CPU_CNT_TBL_INC * i;
+
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
+ &pkt_cnt, NULL);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ if (!pkt_cnt)
+ continue;
+
+ /* There are 256 CPU codes saved in the first 256 entries
+ * of register table, and 128 drop codes for each PPE port
+ * (0-7), the total entries is 256 + 8 * 128.
+ */
+ if (i < 256)
+ seq_printf(seq, "%10u(cpucode:%d)", pkt_cnt, i);
+ else
+ seq_printf(seq, "%10u(port=%04d),dropcode:%d", pkt_cnt,
+ (i - 256) % 8, (i - 256) / 8);
+ seq_putc(seq, '\n');
+ seq_printf(seq, "%-24s", "");
+ }
+
+ seq_putc(seq, '\n');
+
+ return 0;
+}
+
+/* The number of packets forwarded by VLAN on the egress direction. */
+static int ppe_vlan_tx_counter_get(struct ppe_device *ppe_dev,
+ struct seq_file *seq)
+{
+ u32 reg, pkt_cnt = 0;
+ int ret, i, tag = 0;
+
+ seq_printf(seq, "%-24s", "VLAN TX:");
+ for (i = 0; i < PPE_EG_VSI_COUNTER_TBL_ENTRIES; i++) {
+ reg = PPE_EG_VSI_COUNTER_TBL_ADDR + PPE_EG_VSI_COUNTER_TBL_INC * i;
+
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
+ &pkt_cnt, NULL);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ if (pkt_cnt > 0) {
+ if (!((++tag) % 4))
+ seq_printf(seq, "\n%-24s", "");
+
+ seq_printf(seq, "%10u(%s=%04d)", pkt_cnt, "vsi", i);
+ }
+ }
+
+ seq_putc(seq, '\n');
+
+ return 0;
+}
+
+/* The number of packets transmitted or dropped on the egress port. */
+static int ppe_port_tx_counter_get(struct ppe_device *ppe_dev,
+ struct seq_file *seq)
+{
+ u32 reg, pkt_cnt = 0, drop_cnt = 0;
+ int ret, i, tag;
+
+ seq_printf(seq, "%-24s", "VPORT TX/TX_DROP:");
+ tag = 0;
+ for (i = 0; i < PPE_VPORT_TX_COUNTER_TBL_ENTRIES; i++) {
+ reg = PPE_VPORT_TX_COUNTER_TBL_ADDR + PPE_VPORT_TX_COUNTER_TBL_INC * i;
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
+ &pkt_cnt, NULL);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ reg = PPE_VPORT_TX_DROP_CNT_TBL_ADDR + PPE_VPORT_TX_DROP_CNT_TBL_INC * i;
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
+ &drop_cnt, NULL);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ if (pkt_cnt > 0 || drop_cnt > 0) {
+ if (!((++tag) % 4))
+ seq_printf(seq, "\n%-24s", "");
+
+ seq_printf(seq, "%10u/%u(%s=%04d)", pkt_cnt, drop_cnt,
+ "port", i);
+ }
+ }
+
+ seq_putc(seq, '\n');
+
+ seq_printf(seq, "%-24s", "PORT TX/TX_DROP:");
+ tag = 0;
+ for (i = 0; i < PPE_PORT_TX_COUNTER_TBL_ENTRIES; i++) {
+ reg = PPE_PORT_TX_COUNTER_TBL_ADDR + PPE_PORT_TX_COUNTER_TBL_INC * i;
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
+ &pkt_cnt, NULL);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ reg = PPE_PORT_TX_DROP_CNT_TBL_ADDR + PPE_PORT_TX_DROP_CNT_TBL_INC * i;
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
+ &drop_cnt, NULL);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ if (pkt_cnt > 0 || drop_cnt > 0) {
+ if (!((++tag) % 4))
+ seq_printf(seq, "\n%-24s", "");
+
+ seq_printf(seq, "%10u/%u(%s=%04d)", pkt_cnt, drop_cnt,
+ "port", i);
+ }
+ }
+
+ seq_putc(seq, '\n');
+
+ return 0;
+}
+
+/* The number of packets transmitted or pending by the PPE queue. */
+static int ppe_queue_counter_get(struct ppe_device *ppe_dev,
+ struct seq_file *seq)
+{
+ u32 reg, val, pkt_cnt = 0, pend_cnt = 0, drop_cnt = 0;
+ int ret, i, tag = 0;
+
+ seq_printf(seq, "%-24s", "QUEUE TX/PEND/DROP:");
+ for (i = 0; i < PPE_QUEUE_TX_COUNTER_TBL_ENTRIES; i++) {
+ reg = PPE_QUEUE_TX_COUNTER_TBL_ADDR + PPE_QUEUE_TX_COUNTER_TBL_INC * i;
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
+ &pkt_cnt, NULL);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ if (i < PPE_AC_UNICAST_QUEUE_CFG_TBL_ENTRIES) {
+ reg = PPE_AC_UNICAST_QUEUE_CNT_TBL_ADDR +
+ PPE_AC_UNICAST_QUEUE_CNT_TBL_INC * i;
+ ret = regmap_read(ppe_dev->regmap, reg, &val);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ pend_cnt = FIELD_GET(PPE_AC_UNICAST_QUEUE_CNT_TBL_PEND_CNT, val);
+
+ reg = PPE_UNICAST_DROP_CNT_TBL_ADDR +
+ PPE_AC_UNICAST_QUEUE_CNT_TBL_INC *
+ (i * PPE_UNICAST_DROP_TYPES + PPE_UNICAST_DROP_FORCE_OFFSET);
+
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
+ &drop_cnt, NULL);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+ } else {
+ int mq_offset = i - PPE_AC_UNICAST_QUEUE_CFG_TBL_ENTRIES;
+
+ reg = PPE_AC_MULTICAST_QUEUE_CNT_TBL_ADDR +
+ PPE_AC_MULTICAST_QUEUE_CNT_TBL_INC * mq_offset;
+ ret = regmap_read(ppe_dev->regmap, reg, &val);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+
+ pend_cnt = FIELD_GET(PPE_AC_MULTICAST_QUEUE_CNT_TBL_PEND_CNT, val);
+
+ if (mq_offset < PPE_P0_MULTICAST_QUEUE_NUM) {
+ reg = PPE_CPU_PORT_MULTICAST_FORCE_DROP_CNT_TBL_ADDR(mq_offset);
+ } else {
+ mq_offset -= PPE_P0_MULTICAST_QUEUE_NUM;
+
+ reg = PPE_P1_MULTICAST_DROP_CNT_TBL_ADDR;
+ reg += (mq_offset / PPE_MULTICAST_QUEUE_NUM) *
+ PPE_MULTICAST_QUEUE_PORT_ADDR_INC;
+ reg += (mq_offset % PPE_MULTICAST_QUEUE_NUM) *
+ PPE_MULTICAST_DROP_CNT_TBL_INC *
+ PPE_MULTICAST_DROP_TYPES;
+ }
+
+ ret = ppe_pkt_cnt_get(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD,
+ &drop_cnt, NULL);
+ if (ret) {
+ dev_err(ppe_dev->dev, "CNT ERROR %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (pkt_cnt > 0 || pend_cnt > 0 || drop_cnt > 0) {
+ if (!((++tag) % 4))
+ seq_printf(seq, "\n%-24s", "");
+
+ seq_printf(seq, "%10u/%u/%u(%s=%04d)",
+ pkt_cnt, pend_cnt, drop_cnt, "queue", i);
+ }
+ }
+
+ seq_putc(seq, '\n');
+
+ return 0;
+}
+
+/* Display the various packet counters of PPE. */
+static int ppe_packet_counter_show(struct seq_file *seq, void *v)
+{
+ struct ppe_debugfs_entry *entry = seq->private;
+ struct ppe_device *ppe_dev = entry->ppe;
+ int ret;
+
+ switch (entry->counter_type) {
+ case PPE_CNT_BM:
+ ret = ppe_bm_counter_get(ppe_dev, seq);
+ break;
+ case PPE_CNT_PARSE:
+ ret = ppe_parse_pkt_counter_get(ppe_dev, seq);
+ break;
+ case PPE_CNT_PORT_RX:
+ ret = ppe_port_rx_counter_get(ppe_dev, seq);
+ break;
+ case PPE_CNT_VLAN_RX:
+ ret = ppe_vlan_rx_counter_get(ppe_dev, seq);
+ break;
+ case PPE_CNT_L2_FWD:
+ ret = ppe_l2_counter_get(ppe_dev, seq);
+ break;
+ case PPE_CNT_CPU_CODE:
+ ret = ppe_cpu_code_counter_get(ppe_dev, seq);
+ break;
+ case PPE_CNT_VLAN_TX:
+ ret = ppe_vlan_tx_counter_get(ppe_dev, seq);
+ break;
+ case PPE_CNT_PORT_TX:
+ ret = ppe_port_tx_counter_get(ppe_dev, seq);
+ break;
+ case PPE_CNT_QM:
+ ret = ppe_queue_counter_get(ppe_dev, seq);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/* Flush the various packet counters of PPE. */
+static ssize_t ppe_packet_counter_write(struct file *file,
+ const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct ppe_debugfs_entry *entry = file_inode(file)->i_private;
+ struct ppe_device *ppe_dev = entry->ppe;
+ u32 reg;
+ int i;
+
+ switch (entry->counter_type) {
+ case PPE_CNT_BM:
+ for (i = 0; i < PPE_DROP_CNT_TBL_ENTRIES; i++) {
+ reg = PPE_DROP_CNT_TBL_ADDR + i * PPE_DROP_CNT_TBL_INC;
+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_1WORD);
+ }
+
+ for (i = 0; i < PPE_DROP_STAT_TBL_ENTRIES; i++) {
+ reg = PPE_DROP_STAT_TBL_ADDR + PPE_DROP_STAT_TBL_INC * i;
+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD);
+ }
+
+ break;
+ case PPE_CNT_PARSE:
+ for (i = 0; i < PPE_IPR_PKT_CNT_TBL_ENTRIES; i++) {
+ reg = PPE_IPR_PKT_CNT_TBL_ADDR + i * PPE_IPR_PKT_CNT_TBL_INC;
+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_1WORD);
+
+ reg = PPE_TPR_PKT_CNT_TBL_ADDR + i * PPE_TPR_PKT_CNT_TBL_INC;
+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_1WORD);
+ }
+
+ break;
+ case PPE_CNT_PORT_RX:
+ for (i = 0; i < PPE_PORT_RX_CNT_TBL_ENTRIES; i++) {
+ reg = PPE_PORT_RX_CNT_TBL_ADDR + PPE_PORT_RX_CNT_TBL_INC * i;
+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_5WORD);
+ }
+
+ for (i = 0; i < PPE_PHY_PORT_RX_CNT_TBL_ENTRIES; i++) {
+ reg = PPE_PHY_PORT_RX_CNT_TBL_ADDR + PPE_PHY_PORT_RX_CNT_TBL_INC * i;
+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_5WORD);
+ }
+
+ break;
+ case PPE_CNT_VLAN_RX:
+ for (i = 0; i < PPE_VLAN_CNT_TBL_ENTRIES; i++) {
+ reg = PPE_VLAN_CNT_TBL_ADDR + PPE_VLAN_CNT_TBL_INC * i;
+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD);
+ }
+
+ break;
+ case PPE_CNT_L2_FWD:
+ for (i = 0; i < PPE_PRE_L2_CNT_TBL_ENTRIES; i++) {
+ reg = PPE_PRE_L2_CNT_TBL_ADDR + PPE_PRE_L2_CNT_TBL_INC * i;
+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_5WORD);
+ }
+
+ break;
+ case PPE_CNT_CPU_CODE:
+ for (i = 0; i < PPE_DROP_CPU_CNT_TBL_ENTRIES; i++) {
+ reg = PPE_DROP_CPU_CNT_TBL_ADDR + PPE_DROP_CPU_CNT_TBL_INC * i;
+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD);
+ }
+
+ break;
+ case PPE_CNT_VLAN_TX:
+ for (i = 0; i < PPE_EG_VSI_COUNTER_TBL_ENTRIES; i++) {
+ reg = PPE_EG_VSI_COUNTER_TBL_ADDR + PPE_EG_VSI_COUNTER_TBL_INC * i;
+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD);
+ }
+
+ break;
+ case PPE_CNT_PORT_TX:
+ for (i = 0; i < PPE_PORT_TX_COUNTER_TBL_ENTRIES; i++) {
+ reg = PPE_PORT_TX_DROP_CNT_TBL_ADDR + PPE_PORT_TX_DROP_CNT_TBL_INC * i;
+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD);
+
+ reg = PPE_PORT_TX_COUNTER_TBL_ADDR + PPE_PORT_TX_COUNTER_TBL_INC * i;
+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD);
+ }
+
+ for (i = 0; i < PPE_VPORT_TX_COUNTER_TBL_ENTRIES; i++) {
+ reg = PPE_VPORT_TX_COUNTER_TBL_ADDR + PPE_VPORT_TX_COUNTER_TBL_INC * i;
+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD);
+
+ reg = PPE_VPORT_TX_DROP_CNT_TBL_ADDR + PPE_VPORT_TX_DROP_CNT_TBL_INC * i;
+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD);
+ }
+
+ break;
+ case PPE_CNT_QM:
+ for (i = 0; i < PPE_QUEUE_TX_COUNTER_TBL_ENTRIES; i++) {
+ reg = PPE_QUEUE_TX_COUNTER_TBL_ADDR + PPE_QUEUE_TX_COUNTER_TBL_INC * i;
+ ppe_tbl_pkt_cnt_clear(ppe_dev, reg, PPE_PKT_CNT_SIZE_3WORD);
+ }
+
+ break;
+ default:
+ break;
+ }
+
+ return count;
+}
+DEFINE_SHOW_STORE_ATTRIBUTE(ppe_packet_counter);
+
+void ppe_debugfs_setup(struct ppe_device *ppe_dev)
+{
+ struct ppe_debugfs_entry *entry;
+ int i;
+
+ ppe_dev->debugfs_root = debugfs_create_dir("ppe", NULL);
+ if (IS_ERR(ppe_dev->debugfs_root))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(debugfs_files); i++) {
+ entry = devm_kzalloc(ppe_dev->dev, sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return;
+
+ entry->ppe = ppe_dev;
+ entry->counter_type = debugfs_files[i].counter_type;
+
+ debugfs_create_file(debugfs_files[i].name, 0444,
+ ppe_dev->debugfs_root, entry,
+ &ppe_packet_counter_fops);
+ }
+}
+
+void ppe_debugfs_teardown(struct ppe_device *ppe_dev)
+{
+ debugfs_remove_recursive(ppe_dev->debugfs_root);
+ ppe_dev->debugfs_root = NULL;
+}
diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.h b/drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.h
new file mode 100644
index 000000000000..81f49a709123
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+/* PPE debugfs counters setup. */
+
+#ifndef __PPE_DEBUGFS_H__
+#define __PPE_DEBUGFS_H__
+
+#include "ppe.h"
+
+void ppe_debugfs_setup(struct ppe_device *ppe_dev);
+void ppe_debugfs_teardown(struct ppe_device *ppe_dev);
+
+#endif
diff --git a/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
new file mode 100644
index 000000000000..746dfbb5a682
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_regs.h
@@ -0,0 +1,591 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+/* PPE hardware register and table declarations. */
+#ifndef __PPE_REGS_H__
+#define __PPE_REGS_H__
+
+#include <linux/bitfield.h>
+
+/* PPE scheduler configurations for buffer manager block. */
+#define PPE_BM_SCH_CTRL_ADDR 0xb000
+#define PPE_BM_SCH_CTRL_INC 4
+#define PPE_BM_SCH_CTRL_SCH_DEPTH GENMASK(7, 0)
+#define PPE_BM_SCH_CTRL_SCH_OFFSET GENMASK(14, 8)
+#define PPE_BM_SCH_CTRL_SCH_EN BIT(31)
+
+/* PPE drop counters. */
+#define PPE_DROP_CNT_TBL_ADDR 0xb024
+#define PPE_DROP_CNT_TBL_ENTRIES 8
+#define PPE_DROP_CNT_TBL_INC 4
+
+/* BM port drop counters. */
+#define PPE_DROP_STAT_TBL_ADDR 0xe000
+#define PPE_DROP_STAT_TBL_ENTRIES 30
+#define PPE_DROP_STAT_TBL_INC 0x10
+
+/* Egress VLAN counters. */
+#define PPE_EG_VSI_COUNTER_TBL_ADDR 0x41000
+#define PPE_EG_VSI_COUNTER_TBL_ENTRIES 64
+#define PPE_EG_VSI_COUNTER_TBL_INC 0x10
+
+/* Port TX counters. */
+#define PPE_PORT_TX_COUNTER_TBL_ADDR 0x45000
+#define PPE_PORT_TX_COUNTER_TBL_ENTRIES 8
+#define PPE_PORT_TX_COUNTER_TBL_INC 0x10
+
+/* Virtual port TX counters. */
+#define PPE_VPORT_TX_COUNTER_TBL_ADDR 0x47000
+#define PPE_VPORT_TX_COUNTER_TBL_ENTRIES 256
+#define PPE_VPORT_TX_COUNTER_TBL_INC 0x10
+
+/* Queue counters. */
+#define PPE_QUEUE_TX_COUNTER_TBL_ADDR 0x4a000
+#define PPE_QUEUE_TX_COUNTER_TBL_ENTRIES 300
+#define PPE_QUEUE_TX_COUNTER_TBL_INC 0x10
+
+/* RSS settings are to calculate the random RSS hash value generated during
+ * packet receive to ARM cores. This hash is then used to generate the queue
+ * offset used to determine the queue used to transmit the packet to ARM cores.
+ */
+#define PPE_RSS_HASH_MASK_ADDR 0xb4318
+#define PPE_RSS_HASH_MASK_HASH_MASK GENMASK(20, 0)
+#define PPE_RSS_HASH_MASK_FRAGMENT BIT(28)
+
+#define PPE_RSS_HASH_SEED_ADDR 0xb431c
+#define PPE_RSS_HASH_SEED_VAL GENMASK(31, 0)
+
+#define PPE_RSS_HASH_MIX_ADDR 0xb4320
+#define PPE_RSS_HASH_MIX_ENTRIES 11
+#define PPE_RSS_HASH_MIX_INC 4
+#define PPE_RSS_HASH_MIX_VAL GENMASK(4, 0)
+
+#define PPE_RSS_HASH_FIN_ADDR 0xb4350
+#define PPE_RSS_HASH_FIN_ENTRIES 5
+#define PPE_RSS_HASH_FIN_INC 4
+#define PPE_RSS_HASH_FIN_INNER GENMASK(4, 0)
+#define PPE_RSS_HASH_FIN_OUTER GENMASK(9, 5)
+
+#define PPE_RSS_HASH_MASK_IPV4_ADDR 0xb4380
+#define PPE_RSS_HASH_MASK_IPV4_HASH_MASK GENMASK(20, 0)
+#define PPE_RSS_HASH_MASK_IPV4_FRAGMENT BIT(28)
+
+#define PPE_RSS_HASH_SEED_IPV4_ADDR 0xb4384
+#define PPE_RSS_HASH_SEED_IPV4_VAL GENMASK(31, 0)
+
+#define PPE_RSS_HASH_MIX_IPV4_ADDR 0xb4390
+#define PPE_RSS_HASH_MIX_IPV4_ENTRIES 5
+#define PPE_RSS_HASH_MIX_IPV4_INC 4
+#define PPE_RSS_HASH_MIX_IPV4_VAL GENMASK(4, 0)
+
+#define PPE_RSS_HASH_FIN_IPV4_ADDR 0xb43b0
+#define PPE_RSS_HASH_FIN_IPV4_ENTRIES 5
+#define PPE_RSS_HASH_FIN_IPV4_INC 4
+#define PPE_RSS_HASH_FIN_IPV4_INNER GENMASK(4, 0)
+#define PPE_RSS_HASH_FIN_IPV4_OUTER GENMASK(9, 5)
+
+#define PPE_BM_SCH_CFG_TBL_ADDR 0xc000
+#define PPE_BM_SCH_CFG_TBL_ENTRIES 128
+#define PPE_BM_SCH_CFG_TBL_INC 0x10
+#define PPE_BM_SCH_CFG_TBL_PORT_NUM GENMASK(3, 0)
+#define PPE_BM_SCH_CFG_TBL_DIR BIT(4)
+#define PPE_BM_SCH_CFG_TBL_VALID BIT(5)
+#define PPE_BM_SCH_CFG_TBL_SECOND_PORT_VALID BIT(6)
+#define PPE_BM_SCH_CFG_TBL_SECOND_PORT GENMASK(11, 8)
+
+/* PPE service code configuration for the ingress direction functions,
+ * including bypass configuration for relevant PPE switch core functions
+ * such as flow entry lookup bypass.
+ */
+#define PPE_SERVICE_TBL_ADDR 0x15000
+#define PPE_SERVICE_TBL_ENTRIES 256
+#define PPE_SERVICE_TBL_INC 0x10
+#define PPE_SERVICE_W0_BYPASS_BITMAP GENMASK(31, 0)
+#define PPE_SERVICE_W1_RX_CNT_EN BIT(0)
+
+#define PPE_SERVICE_SET_BYPASS_BITMAP(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_SERVICE_W0_BYPASS_BITMAP, tbl_cfg, value)
+#define PPE_SERVICE_SET_RX_CNT_EN(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_SERVICE_W1_RX_CNT_EN, (tbl_cfg) + 0x1, value)
+
+/* PPE port egress VLAN configurations. */
+#define PPE_PORT_EG_VLAN_TBL_ADDR 0x20020
+#define PPE_PORT_EG_VLAN_TBL_ENTRIES 8
+#define PPE_PORT_EG_VLAN_TBL_INC 4
+#define PPE_PORT_EG_VLAN_TBL_VLAN_TYPE BIT(0)
+#define PPE_PORT_EG_VLAN_TBL_CTAG_MODE GENMASK(2, 1)
+#define PPE_PORT_EG_VLAN_TBL_STAG_MODE GENMASK(4, 3)
+#define PPE_PORT_EG_VLAN_TBL_VSI_TAG_MODE_EN BIT(5)
+#define PPE_PORT_EG_VLAN_TBL_PCP_PROP_CMD BIT(6)
+#define PPE_PORT_EG_VLAN_TBL_DEI_PROP_CMD BIT(7)
+#define PPE_PORT_EG_VLAN_TBL_TX_COUNTING_EN BIT(8)
+
+/* PPE queue counters enable/disable control. */
+#define PPE_EG_BRIDGE_CONFIG_ADDR 0x20044
+#define PPE_EG_BRIDGE_CONFIG_QUEUE_CNT_EN BIT(2)
+
+/* PPE service code configuration on the egress direction. */
+#define PPE_EG_SERVICE_TBL_ADDR 0x43000
+#define PPE_EG_SERVICE_TBL_ENTRIES 256
+#define PPE_EG_SERVICE_TBL_INC 0x10
+#define PPE_EG_SERVICE_W0_UPDATE_ACTION GENMASK(31, 0)
+#define PPE_EG_SERVICE_W1_NEXT_SERVCODE GENMASK(7, 0)
+#define PPE_EG_SERVICE_W1_HW_SERVICE GENMASK(13, 8)
+#define PPE_EG_SERVICE_W1_OFFSET_SEL BIT(14)
+#define PPE_EG_SERVICE_W1_TX_CNT_EN BIT(15)
+
+#define PPE_EG_SERVICE_SET_UPDATE_ACTION(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_EG_SERVICE_W0_UPDATE_ACTION, tbl_cfg, value)
+#define PPE_EG_SERVICE_SET_NEXT_SERVCODE(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_EG_SERVICE_W1_NEXT_SERVCODE, (tbl_cfg) + 0x1, value)
+#define PPE_EG_SERVICE_SET_HW_SERVICE(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_EG_SERVICE_W1_HW_SERVICE, (tbl_cfg) + 0x1, value)
+#define PPE_EG_SERVICE_SET_OFFSET_SEL(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_EG_SERVICE_W1_OFFSET_SEL, (tbl_cfg) + 0x1, value)
+#define PPE_EG_SERVICE_SET_TX_CNT_EN(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_EG_SERVICE_W1_TX_CNT_EN, (tbl_cfg) + 0x1, value)
+
+/* PPE port bridge configuration */
+#define PPE_PORT_BRIDGE_CTRL_ADDR 0x60300
+#define PPE_PORT_BRIDGE_CTRL_ENTRIES 8
+#define PPE_PORT_BRIDGE_CTRL_INC 4
+#define PPE_PORT_BRIDGE_NEW_LRN_EN BIT(0)
+#define PPE_PORT_BRIDGE_STA_MOVE_LRN_EN BIT(3)
+#define PPE_PORT_BRIDGE_TXMAC_EN BIT(16)
+
+/* PPE port control configurations for the traffic to the multicast queues. */
+#define PPE_MC_MTU_CTRL_TBL_ADDR 0x60a00
+#define PPE_MC_MTU_CTRL_TBL_ENTRIES 8
+#define PPE_MC_MTU_CTRL_TBL_INC 4
+#define PPE_MC_MTU_CTRL_TBL_MTU GENMASK(13, 0)
+#define PPE_MC_MTU_CTRL_TBL_MTU_CMD GENMASK(15, 14)
+#define PPE_MC_MTU_CTRL_TBL_TX_CNT_EN BIT(16)
+
+/* PPE VSI configurations */
+#define PPE_VSI_TBL_ADDR 0x63800
+#define PPE_VSI_TBL_ENTRIES 64
+#define PPE_VSI_TBL_INC 0x10
+#define PPE_VSI_W0_MEMBER_PORT_BITMAP GENMASK(7, 0)
+#define PPE_VSI_W0_UUC_BITMAP GENMASK(15, 8)
+#define PPE_VSI_W0_UMC_BITMAP GENMASK(23, 16)
+#define PPE_VSI_W0_BC_BITMAP GENMASK(31, 24)
+#define PPE_VSI_W1_NEW_ADDR_LRN_EN BIT(0)
+#define PPE_VSI_W1_NEW_ADDR_FWD_CMD GENMASK(2, 1)
+#define PPE_VSI_W1_STATION_MOVE_LRN_EN BIT(3)
+#define PPE_VSI_W1_STATION_MOVE_FWD_CMD GENMASK(5, 4)
+
+#define PPE_VSI_SET_MEMBER_PORT_BITMAP(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_VSI_W0_MEMBER_PORT_BITMAP, tbl_cfg, value)
+#define PPE_VSI_SET_UUC_BITMAP(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_VSI_W0_UUC_BITMAP, tbl_cfg, value)
+#define PPE_VSI_SET_UMC_BITMAP(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_VSI_W0_UMC_BITMAP, tbl_cfg, value)
+#define PPE_VSI_SET_BC_BITMAP(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_VSI_W0_BC_BITMAP, tbl_cfg, value)
+#define PPE_VSI_SET_NEW_ADDR_LRN_EN(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_VSI_W1_NEW_ADDR_LRN_EN, (tbl_cfg) + 0x1, value)
+#define PPE_VSI_SET_NEW_ADDR_FWD_CMD(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_VSI_W1_NEW_ADDR_FWD_CMD, (tbl_cfg) + 0x1, value)
+#define PPE_VSI_SET_STATION_MOVE_LRN_EN(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_VSI_W1_STATION_MOVE_LRN_EN, (tbl_cfg) + 0x1, value)
+#define PPE_VSI_SET_STATION_MOVE_FWD_CMD(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_VSI_W1_STATION_MOVE_FWD_CMD, (tbl_cfg) + 0x1, value)
+
+/* PPE port control configurations for the traffic to the unicast queues. */
+#define PPE_MRU_MTU_CTRL_TBL_ADDR 0x65000
+#define PPE_MRU_MTU_CTRL_TBL_ENTRIES 256
+#define PPE_MRU_MTU_CTRL_TBL_INC 0x10
+#define PPE_MRU_MTU_CTRL_W0_MRU GENMASK(13, 0)
+#define PPE_MRU_MTU_CTRL_W0_MRU_CMD GENMASK(15, 14)
+#define PPE_MRU_MTU_CTRL_W0_MTU GENMASK(29, 16)
+#define PPE_MRU_MTU_CTRL_W0_MTU_CMD GENMASK(31, 30)
+#define PPE_MRU_MTU_CTRL_W1_RX_CNT_EN BIT(0)
+#define PPE_MRU_MTU_CTRL_W1_TX_CNT_EN BIT(1)
+#define PPE_MRU_MTU_CTRL_W1_SRC_PROFILE GENMASK(3, 2)
+#define PPE_MRU_MTU_CTRL_W1_INNER_PREC_LOW BIT(31)
+#define PPE_MRU_MTU_CTRL_W2_INNER_PREC_HIGH GENMASK(1, 0)
+
+#define PPE_MRU_MTU_CTRL_SET_MRU(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_MRU_MTU_CTRL_W0_MRU, tbl_cfg, value)
+#define PPE_MRU_MTU_CTRL_SET_MRU_CMD(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_MRU_MTU_CTRL_W0_MRU_CMD, tbl_cfg, value)
+#define PPE_MRU_MTU_CTRL_SET_MTU(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_MRU_MTU_CTRL_W0_MTU, tbl_cfg, value)
+#define PPE_MRU_MTU_CTRL_SET_MTU_CMD(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_MRU_MTU_CTRL_W0_MTU_CMD, tbl_cfg, value)
+#define PPE_MRU_MTU_CTRL_SET_RX_CNT_EN(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_MRU_MTU_CTRL_W1_RX_CNT_EN, (tbl_cfg) + 0x1, value)
+#define PPE_MRU_MTU_CTRL_SET_TX_CNT_EN(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_MRU_MTU_CTRL_W1_TX_CNT_EN, (tbl_cfg) + 0x1, value)
+
+/* PPE service code configuration for destination port and counter. */
+#define PPE_IN_L2_SERVICE_TBL_ADDR 0x66000
+#define PPE_IN_L2_SERVICE_TBL_ENTRIES 256
+#define PPE_IN_L2_SERVICE_TBL_INC 0x10
+#define PPE_IN_L2_SERVICE_TBL_DST_PORT_ID_VALID BIT(0)
+#define PPE_IN_L2_SERVICE_TBL_DST_PORT_ID GENMASK(4, 1)
+#define PPE_IN_L2_SERVICE_TBL_DST_DIRECTION BIT(5)
+#define PPE_IN_L2_SERVICE_TBL_DST_BYPASS_BITMAP GENMASK(29, 6)
+#define PPE_IN_L2_SERVICE_TBL_RX_CNT_EN BIT(30)
+#define PPE_IN_L2_SERVICE_TBL_TX_CNT_EN BIT(31)
+
+/* L2 Port configurations */
+#define PPE_L2_VP_PORT_TBL_ADDR 0x98000
+#define PPE_L2_VP_PORT_TBL_ENTRIES 256
+#define PPE_L2_VP_PORT_TBL_INC 0x10
+#define PPE_L2_VP_PORT_W0_INVALID_VSI_FWD_EN BIT(0)
+#define PPE_L2_VP_PORT_W0_DST_INFO GENMASK(9, 2)
+
+#define PPE_L2_PORT_SET_INVALID_VSI_FWD_EN(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_L2_VP_PORT_W0_INVALID_VSI_FWD_EN, tbl_cfg, value)
+#define PPE_L2_PORT_SET_DST_INFO(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_L2_VP_PORT_W0_DST_INFO, tbl_cfg, value)
+
+/* Port RX and RX drop counters. */
+#define PPE_PORT_RX_CNT_TBL_ADDR 0x150000
+#define PPE_PORT_RX_CNT_TBL_ENTRIES 256
+#define PPE_PORT_RX_CNT_TBL_INC 0x20
+
+/* Physical port RX and RX drop counters. */
+#define PPE_PHY_PORT_RX_CNT_TBL_ADDR 0x156000
+#define PPE_PHY_PORT_RX_CNT_TBL_ENTRIES 8
+#define PPE_PHY_PORT_RX_CNT_TBL_INC 0x20
+
+/* Counters for the packet to CPU port. */
+#define PPE_DROP_CPU_CNT_TBL_ADDR 0x160000
+#define PPE_DROP_CPU_CNT_TBL_ENTRIES 1280
+#define PPE_DROP_CPU_CNT_TBL_INC 0x10
+
+/* VLAN counters. */
+#define PPE_VLAN_CNT_TBL_ADDR 0x178000
+#define PPE_VLAN_CNT_TBL_ENTRIES 64
+#define PPE_VLAN_CNT_TBL_INC 0x10
+
+/* PPE L2 counters. */
+#define PPE_PRE_L2_CNT_TBL_ADDR 0x17c000
+#define PPE_PRE_L2_CNT_TBL_ENTRIES 64
+#define PPE_PRE_L2_CNT_TBL_INC 0x20
+
+/* Port TX drop counters. */
+#define PPE_PORT_TX_DROP_CNT_TBL_ADDR 0x17d000
+#define PPE_PORT_TX_DROP_CNT_TBL_ENTRIES 8
+#define PPE_PORT_TX_DROP_CNT_TBL_INC 0x10
+
+/* Virtual port TX counters. */
+#define PPE_VPORT_TX_DROP_CNT_TBL_ADDR 0x17e000
+#define PPE_VPORT_TX_DROP_CNT_TBL_ENTRIES 256
+#define PPE_VPORT_TX_DROP_CNT_TBL_INC 0x10
+
+/* Counters for the tunnel packet. */
+#define PPE_TPR_PKT_CNT_TBL_ADDR 0x1d0080
+#define PPE_TPR_PKT_CNT_TBL_ENTRIES 8
+#define PPE_TPR_PKT_CNT_TBL_INC 4
+
+/* Counters for the all packet received. */
+#define PPE_IPR_PKT_CNT_TBL_ADDR 0x1e0080
+#define PPE_IPR_PKT_CNT_TBL_ENTRIES 8
+#define PPE_IPR_PKT_CNT_TBL_INC 4
+
+/* PPE service code configuration for the tunnel packet. */
+#define PPE_TL_SERVICE_TBL_ADDR 0x306000
+#define PPE_TL_SERVICE_TBL_ENTRIES 256
+#define PPE_TL_SERVICE_TBL_INC 4
+#define PPE_TL_SERVICE_TBL_BYPASS_BITMAP GENMASK(31, 0)
+
+/* Port scheduler global config. */
+#define PPE_PSCH_SCH_DEPTH_CFG_ADDR 0x400000
+#define PPE_PSCH_SCH_DEPTH_CFG_INC 4
+#define PPE_PSCH_SCH_DEPTH_CFG_SCH_DEPTH GENMASK(7, 0)
+
+/* PPE queue level scheduler configurations. */
+#define PPE_L0_FLOW_MAP_TBL_ADDR 0x402000
+#define PPE_L0_FLOW_MAP_TBL_ENTRIES 300
+#define PPE_L0_FLOW_MAP_TBL_INC 0x10
+#define PPE_L0_FLOW_MAP_TBL_FLOW_ID GENMASK(5, 0)
+#define PPE_L0_FLOW_MAP_TBL_C_PRI GENMASK(8, 6)
+#define PPE_L0_FLOW_MAP_TBL_E_PRI GENMASK(11, 9)
+#define PPE_L0_FLOW_MAP_TBL_C_NODE_WT GENMASK(21, 12)
+#define PPE_L0_FLOW_MAP_TBL_E_NODE_WT GENMASK(31, 22)
+
+#define PPE_L0_C_FLOW_CFG_TBL_ADDR 0x404000
+#define PPE_L0_C_FLOW_CFG_TBL_ENTRIES 512
+#define PPE_L0_C_FLOW_CFG_TBL_INC 0x10
+#define PPE_L0_C_FLOW_CFG_TBL_NODE_ID GENMASK(7, 0)
+#define PPE_L0_C_FLOW_CFG_TBL_NODE_CREDIT_UNIT BIT(8)
+
+#define PPE_L0_E_FLOW_CFG_TBL_ADDR 0x406000
+#define PPE_L0_E_FLOW_CFG_TBL_ENTRIES 512
+#define PPE_L0_E_FLOW_CFG_TBL_INC 0x10
+#define PPE_L0_E_FLOW_CFG_TBL_NODE_ID GENMASK(7, 0)
+#define PPE_L0_E_FLOW_CFG_TBL_NODE_CREDIT_UNIT BIT(8)
+
+#define PPE_L0_FLOW_PORT_MAP_TBL_ADDR 0x408000
+#define PPE_L0_FLOW_PORT_MAP_TBL_ENTRIES 300
+#define PPE_L0_FLOW_PORT_MAP_TBL_INC 0x10
+#define PPE_L0_FLOW_PORT_MAP_TBL_PORT_NUM GENMASK(3, 0)
+
+#define PPE_L0_COMP_CFG_TBL_ADDR 0x428000
+#define PPE_L0_COMP_CFG_TBL_ENTRIES 300
+#define PPE_L0_COMP_CFG_TBL_INC 0x10
+#define PPE_L0_COMP_CFG_TBL_SHAPER_METER_LEN GENMASK(1, 0)
+#define PPE_L0_COMP_CFG_TBL_NODE_METER_LEN GENMASK(3, 2)
+
+/* PPE queue to Ethernet DMA ring mapping table. */
+#define PPE_RING_Q_MAP_TBL_ADDR 0x42a000
+#define PPE_RING_Q_MAP_TBL_ENTRIES 24
+#define PPE_RING_Q_MAP_TBL_INC 0x40
+
+/* Table addresses for per-queue dequeue setting. */
+#define PPE_DEQ_OPR_TBL_ADDR 0x430000
+#define PPE_DEQ_OPR_TBL_ENTRIES 300
+#define PPE_DEQ_OPR_TBL_INC 0x10
+#define PPE_DEQ_OPR_TBL_DEQ_DISABLE BIT(0)
+
+/* PPE flow level scheduler configurations. */
+#define PPE_L1_FLOW_MAP_TBL_ADDR 0x440000
+#define PPE_L1_FLOW_MAP_TBL_ENTRIES 64
+#define PPE_L1_FLOW_MAP_TBL_INC 0x10
+#define PPE_L1_FLOW_MAP_TBL_FLOW_ID GENMASK(3, 0)
+#define PPE_L1_FLOW_MAP_TBL_C_PRI GENMASK(6, 4)
+#define PPE_L1_FLOW_MAP_TBL_E_PRI GENMASK(9, 7)
+#define PPE_L1_FLOW_MAP_TBL_C_NODE_WT GENMASK(19, 10)
+#define PPE_L1_FLOW_MAP_TBL_E_NODE_WT GENMASK(29, 20)
+
+#define PPE_L1_C_FLOW_CFG_TBL_ADDR 0x442000
+#define PPE_L1_C_FLOW_CFG_TBL_ENTRIES 64
+#define PPE_L1_C_FLOW_CFG_TBL_INC 0x10
+#define PPE_L1_C_FLOW_CFG_TBL_NODE_ID GENMASK(5, 0)
+#define PPE_L1_C_FLOW_CFG_TBL_NODE_CREDIT_UNIT BIT(6)
+
+#define PPE_L1_E_FLOW_CFG_TBL_ADDR 0x444000
+#define PPE_L1_E_FLOW_CFG_TBL_ENTRIES 64
+#define PPE_L1_E_FLOW_CFG_TBL_INC 0x10
+#define PPE_L1_E_FLOW_CFG_TBL_NODE_ID GENMASK(5, 0)
+#define PPE_L1_E_FLOW_CFG_TBL_NODE_CREDIT_UNIT BIT(6)
+
+#define PPE_L1_FLOW_PORT_MAP_TBL_ADDR 0x446000
+#define PPE_L1_FLOW_PORT_MAP_TBL_ENTRIES 64
+#define PPE_L1_FLOW_PORT_MAP_TBL_INC 0x10
+#define PPE_L1_FLOW_PORT_MAP_TBL_PORT_NUM GENMASK(3, 0)
+
+#define PPE_L1_COMP_CFG_TBL_ADDR 0x46a000
+#define PPE_L1_COMP_CFG_TBL_ENTRIES 64
+#define PPE_L1_COMP_CFG_TBL_INC 0x10
+#define PPE_L1_COMP_CFG_TBL_SHAPER_METER_LEN GENMASK(1, 0)
+#define PPE_L1_COMP_CFG_TBL_NODE_METER_LEN GENMASK(3, 2)
+
+/* PPE port scheduler configurations for egress. */
+#define PPE_PSCH_SCH_CFG_TBL_ADDR 0x47a000
+#define PPE_PSCH_SCH_CFG_TBL_ENTRIES 128
+#define PPE_PSCH_SCH_CFG_TBL_INC 0x10
+#define PPE_PSCH_SCH_CFG_TBL_DES_PORT GENMASK(3, 0)
+#define PPE_PSCH_SCH_CFG_TBL_ENS_PORT GENMASK(7, 4)
+#define PPE_PSCH_SCH_CFG_TBL_ENS_PORT_BITMAP GENMASK(15, 8)
+#define PPE_PSCH_SCH_CFG_TBL_DES_SECOND_PORT_EN BIT(16)
+#define PPE_PSCH_SCH_CFG_TBL_DES_SECOND_PORT GENMASK(20, 17)
+
+/* There are 15 BM ports and 4 BM groups supported by PPE.
+ * BM port (0-7) is for EDMA port 0, BM port (8-13) is for
+ * PPE physical port 1-6 and BM port 14 is for EIP port.
+ */
+#define PPE_BM_PORT_FC_MODE_ADDR 0x600100
+#define PPE_BM_PORT_FC_MODE_ENTRIES 15
+#define PPE_BM_PORT_FC_MODE_INC 0x4
+#define PPE_BM_PORT_FC_MODE_EN BIT(0)
+
+#define PPE_BM_PORT_GROUP_ID_ADDR 0x600180
+#define PPE_BM_PORT_GROUP_ID_ENTRIES 15
+#define PPE_BM_PORT_GROUP_ID_INC 0x4
+#define PPE_BM_PORT_GROUP_ID_SHARED_GROUP_ID GENMASK(1, 0)
+
+/* Counters for PPE buffers used for packets cached. */
+#define PPE_BM_USED_CNT_TBL_ADDR 0x6001c0
+#define PPE_BM_USED_CNT_TBL_ENTRIES 15
+#define PPE_BM_USED_CNT_TBL_INC 0x4
+#define PPE_BM_USED_CNT_VAL GENMASK(10, 0)
+
+/* Counters for PPE buffers used for packets received after pause frame sent. */
+#define PPE_BM_REACT_CNT_TBL_ADDR 0x600240
+#define PPE_BM_REACT_CNT_TBL_ENTRIES 15
+#define PPE_BM_REACT_CNT_TBL_INC 0x4
+#define PPE_BM_REACT_CNT_VAL GENMASK(8, 0)
+
+#define PPE_BM_SHARED_GROUP_CFG_ADDR 0x600290
+#define PPE_BM_SHARED_GROUP_CFG_ENTRIES 4
+#define PPE_BM_SHARED_GROUP_CFG_INC 0x4
+#define PPE_BM_SHARED_GROUP_CFG_SHARED_LIMIT GENMASK(10, 0)
+
+#define PPE_BM_PORT_FC_CFG_TBL_ADDR 0x601000
+#define PPE_BM_PORT_FC_CFG_TBL_ENTRIES 15
+#define PPE_BM_PORT_FC_CFG_TBL_INC 0x10
+#define PPE_BM_PORT_FC_W0_REACT_LIMIT GENMASK(8, 0)
+#define PPE_BM_PORT_FC_W0_RESUME_THRESHOLD GENMASK(17, 9)
+#define PPE_BM_PORT_FC_W0_RESUME_OFFSET GENMASK(28, 18)
+#define PPE_BM_PORT_FC_W0_CEILING_LOW GENMASK(31, 29)
+#define PPE_BM_PORT_FC_W1_CEILING_HIGH GENMASK(7, 0)
+#define PPE_BM_PORT_FC_W1_WEIGHT GENMASK(10, 8)
+#define PPE_BM_PORT_FC_W1_DYNAMIC BIT(11)
+#define PPE_BM_PORT_FC_W1_PRE_ALLOC GENMASK(22, 12)
+
+#define PPE_BM_PORT_FC_SET_REACT_LIMIT(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_BM_PORT_FC_W0_REACT_LIMIT, tbl_cfg, value)
+#define PPE_BM_PORT_FC_SET_RESUME_THRESHOLD(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_BM_PORT_FC_W0_RESUME_THRESHOLD, tbl_cfg, value)
+#define PPE_BM_PORT_FC_SET_RESUME_OFFSET(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_BM_PORT_FC_W0_RESUME_OFFSET, tbl_cfg, value)
+#define PPE_BM_PORT_FC_SET_CEILING_LOW(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_BM_PORT_FC_W0_CEILING_LOW, tbl_cfg, value)
+#define PPE_BM_PORT_FC_SET_CEILING_HIGH(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_BM_PORT_FC_W1_CEILING_HIGH, (tbl_cfg) + 0x1, value)
+#define PPE_BM_PORT_FC_SET_WEIGHT(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_BM_PORT_FC_W1_WEIGHT, (tbl_cfg) + 0x1, value)
+#define PPE_BM_PORT_FC_SET_DYNAMIC(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_BM_PORT_FC_W1_DYNAMIC, (tbl_cfg) + 0x1, value)
+#define PPE_BM_PORT_FC_SET_PRE_ALLOC(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_BM_PORT_FC_W1_PRE_ALLOC, (tbl_cfg) + 0x1, value)
+
+/* The queue base configurations based on destination port,
+ * service code or CPU code.
+ */
+#define PPE_UCAST_QUEUE_MAP_TBL_ADDR 0x810000
+#define PPE_UCAST_QUEUE_MAP_TBL_ENTRIES 3072
+#define PPE_UCAST_QUEUE_MAP_TBL_INC 0x10
+#define PPE_UCAST_QUEUE_MAP_TBL_PROFILE_ID GENMASK(3, 0)
+#define PPE_UCAST_QUEUE_MAP_TBL_QUEUE_ID GENMASK(11, 4)
+
+/* The queue offset configurations based on RSS hash value. */
+#define PPE_UCAST_HASH_MAP_TBL_ADDR 0x830000
+#define PPE_UCAST_HASH_MAP_TBL_ENTRIES 4096
+#define PPE_UCAST_HASH_MAP_TBL_INC 0x10
+#define PPE_UCAST_HASH_MAP_TBL_HASH GENMASK(7, 0)
+
+/* The queue offset configurations based on PPE internal priority. */
+#define PPE_UCAST_PRIORITY_MAP_TBL_ADDR 0x842000
+#define PPE_UCAST_PRIORITY_MAP_TBL_ENTRIES 256
+#define PPE_UCAST_PRIORITY_MAP_TBL_INC 0x10
+#define PPE_UCAST_PRIORITY_MAP_TBL_CLASS GENMASK(3, 0)
+
+/* PPE unicast queue (0-255) configurations. */
+#define PPE_AC_UNICAST_QUEUE_CFG_TBL_ADDR 0x848000
+#define PPE_AC_UNICAST_QUEUE_CFG_TBL_ENTRIES 256
+#define PPE_AC_UNICAST_QUEUE_CFG_TBL_INC 0x10
+#define PPE_AC_UNICAST_QUEUE_CFG_W0_EN BIT(0)
+#define PPE_AC_UNICAST_QUEUE_CFG_W0_WRED_EN BIT(1)
+#define PPE_AC_UNICAST_QUEUE_CFG_W0_FC_EN BIT(2)
+#define PPE_AC_UNICAST_QUEUE_CFG_W0_CLR_AWARE BIT(3)
+#define PPE_AC_UNICAST_QUEUE_CFG_W0_GRP_ID GENMASK(5, 4)
+#define PPE_AC_UNICAST_QUEUE_CFG_W0_PRE_LIMIT GENMASK(16, 6)
+#define PPE_AC_UNICAST_QUEUE_CFG_W0_DYNAMIC BIT(17)
+#define PPE_AC_UNICAST_QUEUE_CFG_W0_WEIGHT GENMASK(20, 18)
+#define PPE_AC_UNICAST_QUEUE_CFG_W0_THRESHOLD GENMASK(31, 21)
+#define PPE_AC_UNICAST_QUEUE_CFG_W3_GRN_RESUME GENMASK(23, 13)
+
+#define PPE_AC_UNICAST_QUEUE_SET_EN(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_AC_UNICAST_QUEUE_CFG_W0_EN, tbl_cfg, value)
+#define PPE_AC_UNICAST_QUEUE_SET_GRP_ID(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_AC_UNICAST_QUEUE_CFG_W0_GRP_ID, tbl_cfg, value)
+#define PPE_AC_UNICAST_QUEUE_SET_PRE_LIMIT(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_AC_UNICAST_QUEUE_CFG_W0_PRE_LIMIT, tbl_cfg, value)
+#define PPE_AC_UNICAST_QUEUE_SET_DYNAMIC(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_AC_UNICAST_QUEUE_CFG_W0_DYNAMIC, tbl_cfg, value)
+#define PPE_AC_UNICAST_QUEUE_SET_WEIGHT(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_AC_UNICAST_QUEUE_CFG_W0_WEIGHT, tbl_cfg, value)
+#define PPE_AC_UNICAST_QUEUE_SET_THRESHOLD(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_AC_UNICAST_QUEUE_CFG_W0_THRESHOLD, tbl_cfg, value)
+#define PPE_AC_UNICAST_QUEUE_SET_GRN_RESUME(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_AC_UNICAST_QUEUE_CFG_W3_GRN_RESUME, (tbl_cfg) + 0x3, value)
+
+/* PPE multicast queue (256-299) configurations. */
+#define PPE_AC_MULTICAST_QUEUE_CFG_TBL_ADDR 0x84a000
+#define PPE_AC_MULTICAST_QUEUE_CFG_TBL_ENTRIES 44
+#define PPE_AC_MULTICAST_QUEUE_CFG_TBL_INC 0x10
+#define PPE_AC_MULTICAST_QUEUE_CFG_W0_EN BIT(0)
+#define PPE_AC_MULTICAST_QUEUE_CFG_W0_FC_EN BIT(1)
+#define PPE_AC_MULTICAST_QUEUE_CFG_W0_CLR_AWARE BIT(2)
+#define PPE_AC_MULTICAST_QUEUE_CFG_W0_GRP_ID GENMASK(4, 3)
+#define PPE_AC_MULTICAST_QUEUE_CFG_W0_PRE_LIMIT GENMASK(15, 5)
+#define PPE_AC_MULTICAST_QUEUE_CFG_W0_THRESHOLD GENMASK(26, 16)
+#define PPE_AC_MULTICAST_QUEUE_CFG_W2_RESUME GENMASK(17, 7)
+
+#define PPE_AC_MULTICAST_QUEUE_SET_EN(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_AC_MULTICAST_QUEUE_CFG_W0_EN, tbl_cfg, value)
+#define PPE_AC_MULTICAST_QUEUE_SET_GRN_GRP_ID(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_AC_MULTICAST_QUEUE_CFG_W0_GRP_ID, tbl_cfg, value)
+#define PPE_AC_MULTICAST_QUEUE_SET_GRN_PRE_LIMIT(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_AC_MULTICAST_QUEUE_CFG_W0_PRE_LIMIT, tbl_cfg, value)
+#define PPE_AC_MULTICAST_QUEUE_SET_GRN_THRESHOLD(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_AC_MULTICAST_QUEUE_CFG_W0_THRESHOLD, tbl_cfg, value)
+#define PPE_AC_MULTICAST_QUEUE_SET_GRN_RESUME(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_AC_MULTICAST_QUEUE_CFG_W2_RESUME, (tbl_cfg) + 0x2, value)
+
+/* PPE admission control group (0-3) configurations */
+#define PPE_AC_GRP_CFG_TBL_ADDR 0x84c000
+#define PPE_AC_GRP_CFG_TBL_ENTRIES 0x4
+#define PPE_AC_GRP_CFG_TBL_INC 0x10
+#define PPE_AC_GRP_W0_AC_EN BIT(0)
+#define PPE_AC_GRP_W0_AC_FC_EN BIT(1)
+#define PPE_AC_GRP_W0_CLR_AWARE BIT(2)
+#define PPE_AC_GRP_W0_THRESHOLD_LOW GENMASK(31, 25)
+#define PPE_AC_GRP_W1_THRESHOLD_HIGH GENMASK(3, 0)
+#define PPE_AC_GRP_W1_BUF_LIMIT GENMASK(14, 4)
+#define PPE_AC_GRP_W2_RESUME_GRN GENMASK(15, 5)
+#define PPE_AC_GRP_W2_PRE_ALLOC GENMASK(26, 16)
+
+#define PPE_AC_GRP_SET_BUF_LIMIT(tbl_cfg, value) \
+ FIELD_MODIFY(PPE_AC_GRP_W1_BUF_LIMIT, (tbl_cfg) + 0x1, value)
+
+/* Counters for packets handled by unicast queues (0-255). */
+#define PPE_AC_UNICAST_QUEUE_CNT_TBL_ADDR 0x84e000
+#define PPE_AC_UNICAST_QUEUE_CNT_TBL_ENTRIES 256
+#define PPE_AC_UNICAST_QUEUE_CNT_TBL_INC 0x10
+#define PPE_AC_UNICAST_QUEUE_CNT_TBL_PEND_CNT GENMASK(12, 0)
+
+/* Counters for packets handled by multicast queues (256-299). */
+#define PPE_AC_MULTICAST_QUEUE_CNT_TBL_ADDR 0x852000
+#define PPE_AC_MULTICAST_QUEUE_CNT_TBL_ENTRIES 44
+#define PPE_AC_MULTICAST_QUEUE_CNT_TBL_INC 0x10
+#define PPE_AC_MULTICAST_QUEUE_CNT_TBL_PEND_CNT GENMASK(12, 0)
+
+/* Table addresses for per-queue enqueue setting. */
+#define PPE_ENQ_OPR_TBL_ADDR 0x85c000
+#define PPE_ENQ_OPR_TBL_ENTRIES 300
+#define PPE_ENQ_OPR_TBL_INC 0x10
+#define PPE_ENQ_OPR_TBL_ENQ_DISABLE BIT(0)
+
+/* Unicast drop count includes the possible drops with WRED for the green,
+ * yellow and red categories.
+ */
+#define PPE_UNICAST_DROP_CNT_TBL_ADDR 0x9e0000
+#define PPE_UNICAST_DROP_CNT_TBL_ENTRIES 1536
+#define PPE_UNICAST_DROP_CNT_TBL_INC 0x10
+#define PPE_UNICAST_DROP_TYPES 6
+#define PPE_UNICAST_DROP_FORCE_OFFSET 3
+
+/* There are 16 multicast queues dedicated to CPU port 0. Multicast drop
+ * count includes the force drop for green, yellow and red category packets.
+ */
+#define PPE_P0_MULTICAST_DROP_CNT_TBL_ADDR 0x9f0000
+#define PPE_P0_MULTICAST_DROP_CNT_TBL_ENTRIES 48
+#define PPE_P0_MULTICAST_DROP_CNT_TBL_INC 0x10
+#define PPE_P0_MULTICAST_QUEUE_NUM 16
+
+/* Each PPE physical port has four dedicated multicast queues, providing
+ * a total of 12 entries per port. The multicast drop count includes forced
+ * drops for green, yellow, and red category packets.
+ */
+#define PPE_MULTICAST_QUEUE_PORT_ADDR_INC 0x1000
+#define PPE_MULTICAST_DROP_CNT_TBL_INC 0x10
+#define PPE_MULTICAST_DROP_TYPES 3
+#define PPE_MULTICAST_QUEUE_NUM 4
+#define PPE_MULTICAST_DROP_CNT_TBL_ENTRIES 12
+
+#define PPE_CPU_PORT_MULTICAST_FORCE_DROP_CNT_TBL_ADDR(mq_offset) \
+ (PPE_P0_MULTICAST_DROP_CNT_TBL_ADDR + \
+ (mq_offset) * PPE_P0_MULTICAST_DROP_CNT_TBL_INC * \
+ PPE_MULTICAST_DROP_TYPES)
+
+#define PPE_P1_MULTICAST_DROP_CNT_TBL_ADDR \
+ (PPE_P0_MULTICAST_DROP_CNT_TBL_ADDR + PPE_MULTICAST_QUEUE_PORT_ADDR_INC)
+#endif
diff --git a/drivers/net/ethernet/realtek/Kconfig b/drivers/net/ethernet/realtek/Kconfig
index fe136f61586f..272c83bfdc6c 100644
--- a/drivers/net/ethernet/realtek/Kconfig
+++ b/drivers/net/ethernet/realtek/Kconfig
@@ -58,7 +58,7 @@ config 8139TOO
config 8139TOO_PIO
bool "Use PIO instead of MMIO"
default y
- depends on 8139TOO
+ depends on 8139TOO && !NO_IOPORT_MAP
help
This instructs the driver to use programmed I/O ports (PIO) instead
of PCI shared memory (MMIO). This can possibly solve some problems
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 9c601f271c02..8903ae90afcb 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -3409,7 +3409,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
r8168_mac_ocp_modify(tp, 0xd412, 0x0fff, sw_cnt_1ms_ini);
}
- r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0070);
+ r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0000);
r8168_mac_ocp_modify(tp, 0xe052, 0x6000, 0x8008);
r8168_mac_ocp_modify(tp, 0xe0d6, 0x01ff, 0x017f);
r8168_mac_ocp_modify(tp, 0xd420, 0x0fff, 0x047f);
@@ -3514,7 +3514,7 @@ static void rtl_hw_start_8117(struct rtl8169_private *tp)
r8168_mac_ocp_modify(tp, 0xd412, 0x0fff, sw_cnt_1ms_ini);
}
- r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0070);
+ r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0000);
r8168_mac_ocp_write(tp, 0xea80, 0x0003);
r8168_mac_ocp_modify(tp, 0xe052, 0x0000, 0x0009);
r8168_mac_ocp_modify(tp, 0xd420, 0x0fff, 0x047f);
@@ -3715,7 +3715,7 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp)
r8168_mac_ocp_modify(tp, 0xc0b4, 0x0000, 0x000c);
r8168_mac_ocp_modify(tp, 0xeb6a, 0x00ff, 0x0033);
r8168_mac_ocp_modify(tp, 0xeb50, 0x03e0, 0x0040);
- r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0030);
+ r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0000);
r8168_mac_ocp_modify(tp, 0xe040, 0x1000, 0x0000);
r8168_mac_ocp_modify(tp, 0xea1c, 0x0003, 0x0001);
if (tp->mac_version == RTL_GIGA_MAC_VER_70 ||
@@ -5441,10 +5441,12 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Disable ASPM L1 as that cause random device stop working
* problems as well as full system hangs for some PCIe devices users.
*/
- if (rtl_aspm_is_safe(tp))
+ if (rtl_aspm_is_safe(tp)) {
+ dev_info(&pdev->dev, "System vendor flags ASPM as safe\n");
rc = 0;
- else
+ } else {
rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L1);
+ }
tp->aspm_manageable = !rc;
tp->dash_type = rtl_get_dash_type(tp);
diff --git a/drivers/net/ethernet/realtek/rtase/rtase.h b/drivers/net/ethernet/realtek/rtase/rtase.h
index 20decdeb9fdb..b9209eb6ea73 100644
--- a/drivers/net/ethernet/realtek/rtase/rtase.h
+++ b/drivers/net/ethernet/realtek/rtase/rtase.h
@@ -241,7 +241,7 @@ union rtase_rx_desc {
#define RTASE_RX_RES BIT(20)
#define RTASE_RX_RUNT BIT(19)
#define RTASE_RX_RWT BIT(18)
-#define RTASE_RX_CRC BIT(16)
+#define RTASE_RX_CRC BIT(17)
#define RTASE_RX_V6F BIT(31)
#define RTASE_RX_V4F BIT(30)
#define RTASE_RX_UDPT BIT(29)
diff --git a/drivers/net/ethernet/renesas/Makefile b/drivers/net/ethernet/renesas/Makefile
index f65fc76f8b4d..d63e0c61bb68 100644
--- a/drivers/net/ethernet/renesas/Makefile
+++ b/drivers/net/ethernet/renesas/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_SH_ETH) += sh_eth.o
ravb-objs := ravb_main.o ravb_ptp.o
obj-$(CONFIG_RAVB) += ravb.o
+rswitch-objs := rswitch_main.o rswitch_l2.o
obj-$(CONFIG_RENESAS_ETHER_SWITCH) += rswitch.o
obj-$(CONFIG_RENESAS_GEN4_PTP) += rcar_gen4_ptp.o
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 94b6fb94f8f1..9d3bd65b85ff 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -802,7 +802,6 @@ static int ravb_rx_gbeth(struct net_device *ndev, int budget, int q)
const struct ravb_hw_info *info = priv->info;
struct net_device_stats *stats;
struct ravb_rx_desc *desc;
- struct sk_buff *skb;
int rx_packets = 0;
u8 desc_status;
u16 desc_len;
@@ -815,6 +814,8 @@ static int ravb_rx_gbeth(struct net_device *ndev, int budget, int q)
stats = &priv->stats[q];
for (i = 0; i < limit; i++, priv->cur_rx[q]++) {
+ struct sk_buff *skb = NULL;
+
entry = priv->cur_rx[q] % priv->num_rx_ring[q];
desc = &priv->rx_ring[q].desc[entry];
if (rx_packets == budget || desc->die_dt == DT_FEMPTY)
diff --git a/drivers/net/ethernet/renesas/rcar_gen4_ptp.c b/drivers/net/ethernet/renesas/rcar_gen4_ptp.c
index 4c3e8cc5046f..d0979abd36de 100644
--- a/drivers/net/ethernet/renesas/rcar_gen4_ptp.c
+++ b/drivers/net/ethernet/renesas/rcar_gen4_ptp.c
@@ -12,19 +12,18 @@
#include <linux/slab.h>
#include "rcar_gen4_ptp.h"
-#define ptp_to_priv(ptp) container_of(ptp, struct rcar_gen4_ptp_private, info)
-static const struct rcar_gen4_ptp_reg_offset gen4_offs = {
- .enable = PTPTMEC,
- .disable = PTPTMDC,
- .increment = PTPTIVC0,
- .config_t0 = PTPTOVC00,
- .config_t1 = PTPTOVC10,
- .config_t2 = PTPTOVC20,
- .monitor_t0 = PTPGPTPTM00,
- .monitor_t1 = PTPGPTPTM10,
- .monitor_t2 = PTPGPTPTM20,
-};
+#define PTPTMEC_REG 0x0010
+#define PTPTMDC_REG 0x0014
+#define PTPTIVC0_REG 0x0020
+#define PTPTOVC00_REG 0x0030
+#define PTPTOVC10_REG 0x0034
+#define PTPTOVC20_REG 0x0038
+#define PTPGPTPTM00_REG 0x0050
+#define PTPGPTPTM10_REG 0x0054
+#define PTPGPTPTM20_REG 0x0058
+
+#define ptp_to_priv(ptp) container_of(ptp, struct rcar_gen4_ptp_private, info)
static int rcar_gen4_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
@@ -38,20 +37,21 @@ static int rcar_gen4_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
diff = div_s64(addend * scaled_ppm_to_ppb(scaled_ppm), NSEC_PER_SEC);
addend = neg_adj ? addend - diff : addend + diff;
- iowrite32(addend, ptp_priv->addr + ptp_priv->offs->increment);
+ iowrite32(addend, ptp_priv->addr + PTPTIVC0_REG);
return 0;
}
-/* Caller must hold the lock */
static void _rcar_gen4_ptp_gettime(struct ptp_clock_info *ptp,
struct timespec64 *ts)
{
struct rcar_gen4_ptp_private *ptp_priv = ptp_to_priv(ptp);
- ts->tv_nsec = ioread32(ptp_priv->addr + ptp_priv->offs->monitor_t0);
- ts->tv_sec = ioread32(ptp_priv->addr + ptp_priv->offs->monitor_t1) |
- ((s64)ioread32(ptp_priv->addr + ptp_priv->offs->monitor_t2) << 32);
+ lockdep_assert_held(&ptp_priv->lock);
+
+ ts->tv_nsec = ioread32(ptp_priv->addr + PTPGPTPTM00_REG);
+ ts->tv_sec = ioread32(ptp_priv->addr + PTPGPTPTM10_REG) |
+ ((s64)ioread32(ptp_priv->addr + PTPGPTPTM20_REG) << 32);
}
static int rcar_gen4_ptp_gettime(struct ptp_clock_info *ptp,
@@ -73,14 +73,14 @@ static void _rcar_gen4_ptp_settime(struct ptp_clock_info *ptp,
{
struct rcar_gen4_ptp_private *ptp_priv = ptp_to_priv(ptp);
- iowrite32(1, ptp_priv->addr + ptp_priv->offs->disable);
- iowrite32(0, ptp_priv->addr + ptp_priv->offs->config_t2);
- iowrite32(0, ptp_priv->addr + ptp_priv->offs->config_t1);
- iowrite32(0, ptp_priv->addr + ptp_priv->offs->config_t0);
- iowrite32(1, ptp_priv->addr + ptp_priv->offs->enable);
- iowrite32(ts->tv_sec >> 32, ptp_priv->addr + ptp_priv->offs->config_t2);
- iowrite32(ts->tv_sec, ptp_priv->addr + ptp_priv->offs->config_t1);
- iowrite32(ts->tv_nsec, ptp_priv->addr + ptp_priv->offs->config_t0);
+ iowrite32(1, ptp_priv->addr + PTPTMDC_REG);
+ iowrite32(0, ptp_priv->addr + PTPTOVC20_REG);
+ iowrite32(0, ptp_priv->addr + PTPTOVC10_REG);
+ iowrite32(0, ptp_priv->addr + PTPTOVC00_REG);
+ iowrite32(1, ptp_priv->addr + PTPTMEC_REG);
+ iowrite32(ts->tv_sec >> 32, ptp_priv->addr + PTPTOVC20_REG);
+ iowrite32(ts->tv_sec, ptp_priv->addr + PTPTOVC10_REG);
+ iowrite32(ts->tv_nsec, ptp_priv->addr + PTPTOVC00_REG);
}
static int rcar_gen4_ptp_settime(struct ptp_clock_info *ptp,
@@ -130,17 +130,6 @@ static struct ptp_clock_info rcar_gen4_ptp_info = {
.enable = rcar_gen4_ptp_enable,
};
-static int rcar_gen4_ptp_set_offs(struct rcar_gen4_ptp_private *ptp_priv,
- enum rcar_gen4_ptp_reg_layout layout)
-{
- if (layout != RCAR_GEN4_PTP_REG_LAYOUT)
- return -EINVAL;
-
- ptp_priv->offs = &gen4_offs;
-
- return 0;
-}
-
static s64 rcar_gen4_ptp_rate_to_increment(u32 rate)
{
/* Timer increment in ns.
@@ -151,27 +140,20 @@ static s64 rcar_gen4_ptp_rate_to_increment(u32 rate)
return div_s64(1000000000LL << 27, rate);
}
-int rcar_gen4_ptp_register(struct rcar_gen4_ptp_private *ptp_priv,
- enum rcar_gen4_ptp_reg_layout layout, u32 rate)
+int rcar_gen4_ptp_register(struct rcar_gen4_ptp_private *ptp_priv, u32 rate)
{
- int ret;
-
if (ptp_priv->initialized)
return 0;
spin_lock_init(&ptp_priv->lock);
- ret = rcar_gen4_ptp_set_offs(ptp_priv, layout);
- if (ret)
- return ret;
-
ptp_priv->default_addend = rcar_gen4_ptp_rate_to_increment(rate);
- iowrite32(ptp_priv->default_addend, ptp_priv->addr + ptp_priv->offs->increment);
+ iowrite32(ptp_priv->default_addend, ptp_priv->addr + PTPTIVC0_REG);
ptp_priv->clock = ptp_clock_register(&ptp_priv->info, NULL);
if (IS_ERR(ptp_priv->clock))
return PTR_ERR(ptp_priv->clock);
- iowrite32(0x01, ptp_priv->addr + ptp_priv->offs->enable);
+ iowrite32(0x01, ptp_priv->addr + PTPTMEC_REG);
ptp_priv->initialized = true;
return 0;
@@ -180,7 +162,7 @@ EXPORT_SYMBOL_GPL(rcar_gen4_ptp_register);
int rcar_gen4_ptp_unregister(struct rcar_gen4_ptp_private *ptp_priv)
{
- iowrite32(1, ptp_priv->addr + ptp_priv->offs->disable);
+ iowrite32(1, ptp_priv->addr + PTPTMDC_REG);
return ptp_clock_unregister(ptp_priv->clock);
}
diff --git a/drivers/net/ethernet/renesas/rcar_gen4_ptp.h b/drivers/net/ethernet/renesas/rcar_gen4_ptp.h
index e22da5acd53d..f77e79e47357 100644
--- a/drivers/net/ethernet/renesas/rcar_gen4_ptp.h
+++ b/drivers/net/ethernet/renesas/rcar_gen4_ptp.h
@@ -11,10 +11,6 @@
#define RCAR_GEN4_GPTP_OFFSET_S4 0x00018000
-enum rcar_gen4_ptp_reg_layout {
- RCAR_GEN4_PTP_REG_LAYOUT
-};
-
/* driver's definitions */
#define RCAR_GEN4_RXTSTAMP_ENABLED BIT(0)
#define RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT BIT(1)
@@ -23,37 +19,11 @@ enum rcar_gen4_ptp_reg_layout {
#define RCAR_GEN4_TXTSTAMP_ENABLED BIT(0)
-#define PTPRO 0
-
-enum rcar_gen4_ptp_reg {
- PTPTMEC = PTPRO + 0x0010,
- PTPTMDC = PTPRO + 0x0014,
- PTPTIVC0 = PTPRO + 0x0020,
- PTPTOVC00 = PTPRO + 0x0030,
- PTPTOVC10 = PTPRO + 0x0034,
- PTPTOVC20 = PTPRO + 0x0038,
- PTPGPTPTM00 = PTPRO + 0x0050,
- PTPGPTPTM10 = PTPRO + 0x0054,
- PTPGPTPTM20 = PTPRO + 0x0058,
-};
-
-struct rcar_gen4_ptp_reg_offset {
- u16 enable;
- u16 disable;
- u16 increment;
- u16 config_t0;
- u16 config_t1;
- u16 config_t2;
- u16 monitor_t0;
- u16 monitor_t1;
- u16 monitor_t2;
-};
struct rcar_gen4_ptp_private {
void __iomem *addr;
struct ptp_clock *clock;
struct ptp_clock_info info;
- const struct rcar_gen4_ptp_reg_offset *offs;
spinlock_t lock; /* For multiple registers access */
u32 tstamp_tx_ctrl;
u32 tstamp_rx_ctrl;
@@ -61,8 +31,7 @@ struct rcar_gen4_ptp_private {
bool initialized;
};
-int rcar_gen4_ptp_register(struct rcar_gen4_ptp_private *ptp_priv,
- enum rcar_gen4_ptp_reg_layout layout, u32 rate);
+int rcar_gen4_ptp_register(struct rcar_gen4_ptp_private *ptp_priv, u32 rate);
int rcar_gen4_ptp_unregister(struct rcar_gen4_ptp_private *ptp_priv);
struct rcar_gen4_ptp_private *rcar_gen4_ptp_alloc(struct platform_device *pdev);
diff --git a/drivers/net/ethernet/renesas/rswitch.h b/drivers/net/ethernet/renesas/rswitch.h
index 532192cbca4b..a1d4a877e5bd 100644
--- a/drivers/net/ethernet/renesas/rswitch.h
+++ b/drivers/net/ethernet/renesas/rswitch.h
@@ -1,19 +1,25 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Renesas Ethernet Switch device driver
*
- * Copyright (C) 2022 Renesas Electronics Corporation
+ * Copyright (C) 2022-2025 Renesas Electronics Corporation
*/
#ifndef __RSWITCH_H__
#define __RSWITCH_H__
#include <linux/platform_device.h>
+#include <linux/phy.h>
+
#include "rcar_gen4_ptp.h"
#define RSWITCH_MAX_NUM_QUEUES 128
#define RSWITCH_NUM_AGENTS 5
#define RSWITCH_NUM_PORTS 3
+
+#define rswitch_for_all_ports(_priv, _rdev) \
+ list_for_each_entry(_rdev, &_priv->port_list, list)
+
#define rswitch_for_each_enabled_port(priv, i) \
for (i = 0; i < RSWITCH_NUM_PORTS; i++) \
if (priv->rdev[i]->disabled) \
@@ -809,7 +815,8 @@ enum rswitch_gwca_mode {
#define FWPC0_IP4EA BIT(10)
#define FWPC0_IPDSA BIT(12)
#define FWPC0_IPHLA BIT(18)
-#define FWPC0_MACSDA BIT(20)
+#define FWPC0_MACDSA BIT(20)
+#define FWPC0_MACSSA BIT(23)
#define FWPC0_MACHLA BIT(26)
#define FWPC0_MACHMA BIT(27)
#define FWPC0_VLANSA BIT(28)
@@ -820,12 +827,30 @@ enum rswitch_gwca_mode {
#define FWPC2(i) (FWPC20 + (i) * 0x10)
#define FWCP2_LTWFW GENMASK(16 + (RSWITCH_NUM_AGENTS - 1), 16)
+#define FWCP2_LTWFW_MASK GENMASK(16 + (RSWITCH_NUM_AGENTS - 1), 16)
#define FWPBFC(i) (FWPBFC0 + (i) * 0x10)
#define FWPBFC_PBDV GENMASK(RSWITCH_NUM_AGENTS - 1, 0)
#define FWPBFCSDC(j, i) (FWPBFCSDC00 + (i) * 0x10 + (j) * 0x04)
+#define FWMACHEC_MACHMUE_MASK GENMASK(26, 16)
+
+#define FWMACTIM_MACTIOG BIT(0)
+#define FWMACTIM_MACTR BIT(1)
+
+#define FWMACAGUSPC_MACAGUSP GENMASK(9, 0)
+#define FWMACAGC_MACAGT GENMASK(15, 0)
+#define FWMACAGC_MACAGE BIT(16)
+#define FWMACAGC_MACAGSL BIT(17)
+#define FWMACAGC_MACAGPM BIT(18)
+#define FWMACAGC_MACDES BIT(24)
+#define FWMACAGC_MACAGOG BIT(28)
+#define FWMACAGC_MACDESOG BIT(29)
+
+#define RSW_AGEING_CLK_PER_US 0x140
+#define RSW_AGEING_TIME 300
+
/* TOP */
#define TPEMIMC7(queue) (TPEMIMC70 + (queue) * 4)
@@ -994,10 +1019,18 @@ struct rswitch_device {
DECLARE_BITMAP(ts_skb_used, TS_TAGS_PER_PORT);
bool disabled;
+ struct list_head list;
+
int port;
struct rswitch_etha *etha;
struct device_node *np_port;
struct phy *serdes;
+
+ struct net_device *brdev; /* master bridge device */
+ unsigned int learning_requested : 1;
+ unsigned int learning_offloaded : 1;
+ unsigned int forwarding_requested : 1;
+ unsigned int forwarding_offloaded : 1;
};
struct rswitch_mfwd_mac_table_entry {
@@ -1022,11 +1055,17 @@ struct rswitch_private {
struct rswitch_etha etha[RSWITCH_NUM_PORTS];
struct rswitch_mfwd mfwd;
+ struct list_head port_list;
+
spinlock_t lock; /* lock interrupt registers' control */
struct clk *clk;
bool etha_no_runtime_change;
bool gwca_halt;
+ struct net_device *offload_brdev;
};
+bool is_rdev(const struct net_device *ndev);
+void rswitch_modify(void __iomem *addr, enum rswitch_reg reg, u32 clear, u32 set);
+
#endif /* #ifndef __RSWITCH_H__ */
diff --git a/drivers/net/ethernet/renesas/rswitch_l2.c b/drivers/net/ethernet/renesas/rswitch_l2.c
new file mode 100644
index 000000000000..4a69ec77d69c
--- /dev/null
+++ b/drivers/net/ethernet/renesas/rswitch_l2.c
@@ -0,0 +1,316 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Renesas Ethernet Switch device driver
+ *
+ * Copyright (C) 2025 Renesas Electronics Corporation
+ */
+
+#include <linux/err.h>
+#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
+#include <linux/kernel.h>
+#include <net/switchdev.h>
+
+#include "rswitch.h"
+#include "rswitch_l2.h"
+
+static bool rdev_for_l2_offload(struct rswitch_device *rdev)
+{
+ return rdev->priv->offload_brdev &&
+ rdev->brdev == rdev->priv->offload_brdev &&
+ (test_bit(rdev->port, rdev->priv->opened_ports));
+}
+
+static void rswitch_change_l2_hw_offloading(struct rswitch_device *rdev,
+ bool start, bool learning)
+{
+ u32 bits = learning ? FWPC0_MACSSA | FWPC0_MACHLA | FWPC0_MACHMA : FWPC0_MACDSA;
+ u32 clear = start ? 0 : bits;
+ u32 set = start ? bits : 0;
+
+ if ((learning && rdev->learning_offloaded == start) ||
+ (!learning && rdev->forwarding_offloaded == start))
+ return;
+
+ rswitch_modify(rdev->priv->addr, FWPC0(rdev->port), clear, set);
+
+ if (learning)
+ rdev->learning_offloaded = start;
+ else
+ rdev->forwarding_offloaded = start;
+
+ netdev_info(rdev->ndev, "%s hw %s\n", start ? "starting" : "stopping",
+ learning ? "learning" : "forwarding");
+}
+
+static void rswitch_update_l2_hw_learning(struct rswitch_private *priv)
+{
+ struct rswitch_device *rdev;
+ bool learning_needed;
+
+ rswitch_for_all_ports(priv, rdev) {
+ if (rdev_for_l2_offload(rdev))
+ learning_needed = rdev->learning_requested;
+ else
+ learning_needed = false;
+
+ rswitch_change_l2_hw_offloading(rdev, learning_needed, true);
+ }
+}
+
+static void rswitch_update_l2_hw_forwarding(struct rswitch_private *priv)
+{
+ struct rswitch_device *rdev;
+ unsigned int fwd_mask;
+
+ /* calculate fwd_mask with zeroes in bits corresponding to ports that
+ * shall participate in hardware forwarding
+ */
+ fwd_mask = GENMASK(RSWITCH_NUM_AGENTS - 1, 0);
+
+ rswitch_for_all_ports(priv, rdev) {
+ if (rdev_for_l2_offload(rdev) && rdev->forwarding_requested)
+ fwd_mask &= ~BIT(rdev->port);
+ }
+
+ rswitch_for_all_ports(priv, rdev) {
+ if ((rdev_for_l2_offload(rdev) && rdev->forwarding_requested) ||
+ rdev->forwarding_offloaded) {
+ /* Update allowed offload destinations even for ports
+ * with L2 offload enabled earlier.
+ *
+ * Do not allow L2 forwarding to self for hw port.
+ */
+ iowrite32(FIELD_PREP(FWCP2_LTWFW_MASK, fwd_mask | BIT(rdev->port)),
+ priv->addr + FWPC2(rdev->port));
+ }
+
+ if (rdev_for_l2_offload(rdev) &&
+ rdev->forwarding_requested &&
+ !rdev->forwarding_offloaded) {
+ rswitch_change_l2_hw_offloading(rdev, true, false);
+ } else if (rdev->forwarding_offloaded) {
+ rswitch_change_l2_hw_offloading(rdev, false, false);
+ }
+ }
+}
+
+void rswitch_update_l2_offload(struct rswitch_private *priv)
+{
+ rswitch_update_l2_hw_learning(priv);
+ rswitch_update_l2_hw_forwarding(priv);
+}
+
+static void rswitch_update_offload_brdev(struct rswitch_private *priv)
+{
+ struct net_device *offload_brdev = NULL;
+ struct rswitch_device *rdev, *rdev2;
+
+ rswitch_for_all_ports(priv, rdev) {
+ if (!rdev->brdev)
+ continue;
+ rswitch_for_all_ports(priv, rdev2) {
+ if (rdev2 == rdev)
+ break;
+ if (rdev2->brdev == rdev->brdev) {
+ offload_brdev = rdev->brdev;
+ break;
+ }
+ }
+ if (offload_brdev)
+ break;
+ }
+
+ if (offload_brdev == priv->offload_brdev)
+ dev_dbg(&priv->pdev->dev,
+ "changing l2 offload from %s to %s\n",
+ netdev_name(priv->offload_brdev),
+ netdev_name(offload_brdev));
+ else if (offload_brdev)
+ dev_dbg(&priv->pdev->dev, "starting l2 offload for %s\n",
+ netdev_name(offload_brdev));
+ else if (!offload_brdev)
+ dev_dbg(&priv->pdev->dev, "stopping l2 offload for %s\n",
+ netdev_name(priv->offload_brdev));
+
+ priv->offload_brdev = offload_brdev;
+
+ rswitch_update_l2_offload(priv);
+}
+
+static bool rswitch_port_check(const struct net_device *ndev)
+{
+ return is_rdev(ndev);
+}
+
+static void rswitch_port_update_brdev(struct net_device *ndev,
+ struct net_device *brdev)
+{
+ struct rswitch_device *rdev;
+
+ if (!is_rdev(ndev))
+ return;
+
+ rdev = netdev_priv(ndev);
+ rdev->brdev = brdev;
+ rswitch_update_offload_brdev(rdev->priv);
+}
+
+static int rswitch_port_update_stp_state(struct net_device *ndev, u8 stp_state)
+{
+ struct rswitch_device *rdev;
+
+ if (!is_rdev(ndev))
+ return -ENODEV;
+
+ rdev = netdev_priv(ndev);
+ rdev->learning_requested = (stp_state == BR_STATE_LEARNING ||
+ stp_state == BR_STATE_FORWARDING);
+ rdev->forwarding_requested = (stp_state == BR_STATE_FORWARDING);
+ rswitch_update_l2_offload(rdev->priv);
+
+ return 0;
+}
+
+static int rswitch_netdevice_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
+ struct netdev_notifier_changeupper_info *info;
+ struct net_device *brdev;
+
+ if (!rswitch_port_check(ndev))
+ return NOTIFY_DONE;
+ if (event != NETDEV_CHANGEUPPER)
+ return NOTIFY_DONE;
+
+ info = ptr;
+
+ if (netif_is_bridge_master(info->upper_dev)) {
+ brdev = info->linking ? info->upper_dev : NULL;
+ rswitch_port_update_brdev(ndev, brdev);
+ }
+
+ return NOTIFY_OK;
+}
+
+static int rswitch_update_ageing_time(struct net_device *ndev, clock_t time)
+{
+ struct rswitch_device *rdev = netdev_priv(ndev);
+ u32 reg_val;
+
+ if (!is_rdev(ndev))
+ return -ENODEV;
+
+ if (!FIELD_FIT(FWMACAGC_MACAGT, time))
+ return -EINVAL;
+
+ reg_val = FIELD_PREP(FWMACAGC_MACAGT, time);
+ reg_val |= FWMACAGC_MACAGE | FWMACAGC_MACAGSL;
+ iowrite32(reg_val, rdev->priv->addr + FWMACAGC);
+
+ return 0;
+}
+
+static int rswitch_port_attr_set(struct net_device *ndev, const void *ctx,
+ const struct switchdev_attr *attr,
+ struct netlink_ext_ack *extack)
+{
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+ return rswitch_port_update_stp_state(ndev, attr->u.stp_state);
+ case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
+ return rswitch_update_ageing_time(ndev, attr->u.ageing_time);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int rswitch_switchdev_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *ndev = switchdev_notifier_info_to_dev(ptr);
+ int ret;
+
+ if (event == SWITCHDEV_PORT_ATTR_SET) {
+ ret = switchdev_handle_port_attr_set(ndev, ptr,
+ rswitch_port_check,
+ rswitch_port_attr_set);
+ return notifier_from_errno(ret);
+ }
+
+ if (!rswitch_port_check(ndev))
+ return NOTIFY_DONE;
+
+ return notifier_from_errno(-EOPNOTSUPP);
+}
+
+static int rswitch_switchdev_blocking_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *ndev = switchdev_notifier_info_to_dev(ptr);
+ int ret;
+
+ switch (event) {
+ case SWITCHDEV_PORT_OBJ_ADD:
+ return -EOPNOTSUPP;
+ case SWITCHDEV_PORT_OBJ_DEL:
+ return -EOPNOTSUPP;
+ case SWITCHDEV_PORT_ATTR_SET:
+ ret = switchdev_handle_port_attr_set(ndev, ptr,
+ rswitch_port_check,
+ rswitch_port_attr_set);
+ break;
+ default:
+ if (!rswitch_port_check(ndev))
+ return NOTIFY_DONE;
+ ret = -EOPNOTSUPP;
+ }
+
+ return notifier_from_errno(ret);
+}
+
+static struct notifier_block rswitch_netdevice_nb = {
+ .notifier_call = rswitch_netdevice_event,
+};
+
+static struct notifier_block rswitch_switchdev_nb = {
+ .notifier_call = rswitch_switchdev_event,
+};
+
+static struct notifier_block rswitch_switchdev_blocking_nb = {
+ .notifier_call = rswitch_switchdev_blocking_event,
+};
+
+int rswitch_register_notifiers(void)
+{
+ int ret;
+
+ ret = register_netdevice_notifier(&rswitch_netdevice_nb);
+ if (ret)
+ goto register_netdevice_notifier_failed;
+
+ ret = register_switchdev_notifier(&rswitch_switchdev_nb);
+ if (ret)
+ goto register_switchdev_notifier_failed;
+
+ ret = register_switchdev_blocking_notifier(&rswitch_switchdev_blocking_nb);
+ if (ret)
+ goto register_switchdev_blocking_notifier_failed;
+
+ return 0;
+
+register_switchdev_blocking_notifier_failed:
+ unregister_switchdev_notifier(&rswitch_switchdev_nb);
+register_switchdev_notifier_failed:
+ unregister_netdevice_notifier(&rswitch_netdevice_nb);
+register_netdevice_notifier_failed:
+
+ return ret;
+}
+
+void rswitch_unregister_notifiers(void)
+{
+ unregister_switchdev_blocking_notifier(&rswitch_switchdev_blocking_nb);
+ unregister_switchdev_notifier(&rswitch_switchdev_nb);
+ unregister_netdevice_notifier(&rswitch_netdevice_nb);
+}
diff --git a/drivers/net/ethernet/renesas/rswitch_l2.h b/drivers/net/ethernet/renesas/rswitch_l2.h
new file mode 100644
index 000000000000..57050ede8f31
--- /dev/null
+++ b/drivers/net/ethernet/renesas/rswitch_l2.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Renesas Ethernet Switch device driver
+ *
+ * Copyright (C) 2025 Renesas Electronics Corporation
+ */
+
+#ifndef __RSWITCH_L2_H__
+#define __RSWITCH_L2_H__
+
+void rswitch_update_l2_offload(struct rswitch_private *priv);
+
+int rswitch_register_notifiers(void);
+void rswitch_unregister_notifiers(void);
+
+#endif /* #ifndef __RSWITCH_L2_H__ */
diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch_main.c
index aba772e14555..8d8acc2124b8 100644
--- a/drivers/net/ethernet/renesas/rswitch.c
+++ b/drivers/net/ethernet/renesas/rswitch_main.c
@@ -1,15 +1,18 @@
// SPDX-License-Identifier: GPL-2.0
/* Renesas Ethernet Switch device driver
*
- * Copyright (C) 2022 Renesas Electronics Corporation
+ * Copyright (C) 2022-2025 Renesas Electronics Corporation
*/
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/ip.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
+#include <linux/list.h>
#include <linux/module.h>
#include <linux/net_tstamp.h>
#include <linux/of.h>
@@ -25,6 +28,7 @@
#include <linux/sys_soc.h>
#include "rswitch.h"
+#include "rswitch_l2.h"
static int rswitch_reg_wait(void __iomem *addr, u32 offs, u32 mask, u32 expected)
{
@@ -34,7 +38,7 @@ static int rswitch_reg_wait(void __iomem *addr, u32 offs, u32 mask, u32 expected
1, RSWITCH_TIMEOUT_US);
}
-static void rswitch_modify(void __iomem *addr, enum rswitch_reg reg, u32 clear, u32 set)
+void rswitch_modify(void __iomem *addr, enum rswitch_reg reg, u32 clear, u32 set)
{
iowrite32((ioread32(addr + reg) & ~clear) | set, addr + reg);
}
@@ -109,10 +113,11 @@ static void rswitch_top_init(struct rswitch_private *priv)
}
/* Forwarding engine block (MFWD) */
-static void rswitch_fwd_init(struct rswitch_private *priv)
+static int rswitch_fwd_init(struct rswitch_private *priv)
{
u32 all_ports_mask = GENMASK(RSWITCH_NUM_AGENTS - 1, 0);
unsigned int i;
+ u32 reg_val;
/* Start with empty configuration */
for (i = 0; i < RSWITCH_NUM_AGENTS; i++) {
@@ -128,6 +133,14 @@ static void rswitch_fwd_init(struct rswitch_private *priv)
iowrite32(0, priv->addr + FWPBFC(i));
}
+ /* Configure MAC table aging */
+ rswitch_modify(priv->addr, FWMACAGUSPC, FWMACAGUSPC_MACAGUSP,
+ FIELD_PREP(FWMACAGUSPC_MACAGUSP, RSW_AGEING_CLK_PER_US));
+
+ reg_val = FIELD_PREP(FWMACAGC_MACAGT, RSW_AGEING_TIME);
+ reg_val |= FWMACAGC_MACAGE | FWMACAGC_MACAGSL;
+ iowrite32(reg_val, priv->addr + FWMACAGC);
+
/* For enabled ETHA ports, setup port based forwarding */
rswitch_for_each_enabled_port(priv, i) {
/* Port based forwarding from port i to GWCA port */
@@ -140,6 +153,16 @@ static void rswitch_fwd_init(struct rswitch_private *priv)
/* For GWCA port, allow direct descriptor forwarding */
rswitch_modify(priv->addr, FWPC1(priv->gwca.index), FWPC1_DDE, FWPC1_DDE);
+
+ /* Initialize hardware L2 forwarding table */
+
+ /* Allow entire table to be used for "unsecure" entries */
+ rswitch_modify(priv->addr, FWMACHEC, 0, FWMACHEC_MACHMUE_MASK);
+
+ /* Initialize MAC hash table */
+ iowrite32(FWMACTIM_MACTIOG, priv->addr + FWMACTIM);
+
+ return rswitch_reg_wait(priv->addr, FWMACTIM, FWMACTIM_MACTIOG, 0);
}
/* Gateway CPU agent block (GWCA) */
@@ -1602,8 +1625,11 @@ static int rswitch_open(struct net_device *ndev)
netif_start_queue(ndev);
+ if (rdev->brdev)
+ rswitch_update_l2_offload(rdev->priv);
+
return 0;
-};
+}
static int rswitch_stop(struct net_device *ndev)
{
@@ -1624,12 +1650,13 @@ static int rswitch_stop(struct net_device *ndev)
napi_disable(&rdev->napi);
+ if (rdev->brdev)
+ rswitch_update_l2_offload(rdev->priv);
+
if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS))
iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDID);
- for (tag = find_first_bit(rdev->ts_skb_used, TS_TAGS_PER_PORT);
- tag < TS_TAGS_PER_PORT;
- tag = find_next_bit(rdev->ts_skb_used, TS_TAGS_PER_PORT, tag + 1)) {
+ for_each_set_bit(tag, rdev->ts_skb_used, TS_TAGS_PER_PORT) {
ts_skb = xchg(&rdev->ts_skb[tag], NULL);
clear_bit(tag, rdev->ts_skb_used);
if (ts_skb)
@@ -1637,7 +1664,7 @@ static int rswitch_stop(struct net_device *ndev)
}
return 0;
-};
+}
static bool rswitch_ext_desc_set_info1(struct rswitch_device *rdev,
struct sk_buff *skb,
@@ -1850,16 +1877,46 @@ static int rswitch_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd
}
}
+static int rswitch_get_port_parent_id(struct net_device *ndev,
+ struct netdev_phys_item_id *ppid)
+{
+ struct rswitch_device *rdev = netdev_priv(ndev);
+ const char *name;
+
+ name = dev_name(&rdev->priv->pdev->dev);
+ ppid->id_len = min_t(size_t, strlen(name), sizeof(ppid->id));
+ memcpy(ppid->id, name, ppid->id_len);
+
+ return 0;
+}
+
+static int rswitch_get_phys_port_name(struct net_device *ndev,
+ char *name, size_t len)
+{
+ struct rswitch_device *rdev = netdev_priv(ndev);
+
+ snprintf(name, len, "tsn%d", rdev->port);
+
+ return 0;
+}
+
static const struct net_device_ops rswitch_netdev_ops = {
.ndo_open = rswitch_open,
.ndo_stop = rswitch_stop,
.ndo_start_xmit = rswitch_start_xmit,
.ndo_get_stats = rswitch_get_stats,
.ndo_eth_ioctl = rswitch_eth_ioctl,
+ .ndo_get_port_parent_id = rswitch_get_port_parent_id,
+ .ndo_get_phys_port_name = rswitch_get_phys_port_name,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
};
+bool is_rdev(const struct net_device *ndev)
+{
+ return (ndev->netdev_ops == &rswitch_netdev_ops);
+}
+
static int rswitch_get_ts_info(struct net_device *ndev, struct kernel_ethtool_ts_info *info)
{
struct rswitch_device *rdev = netdev_priv(ndev);
@@ -1959,6 +2016,8 @@ static int rswitch_device_alloc(struct rswitch_private *priv, unsigned int index
if (err < 0)
goto out_txdmac;
+ list_add_tail(&rdev->list, &priv->port_list);
+
return 0;
out_txdmac:
@@ -1978,6 +2037,7 @@ static void rswitch_device_free(struct rswitch_private *priv, unsigned int index
struct rswitch_device *rdev = priv->rdev[index];
struct net_device *ndev = rdev->ndev;
+ list_del(&rdev->list);
rswitch_txdmac_free(ndev);
rswitch_rxdmac_free(ndev);
of_node_put(rdev->np_port);
@@ -2024,10 +2084,11 @@ static int rswitch_init(struct rswitch_private *priv)
}
}
- rswitch_fwd_init(priv);
+ err = rswitch_fwd_init(priv);
+ if (err < 0)
+ goto err_fwd_init;
- err = rcar_gen4_ptp_register(priv->ptp_priv, RCAR_GEN4_PTP_REG_LAYOUT,
- clk_get_rate(priv->clk));
+ err = rcar_gen4_ptp_register(priv->ptp_priv, clk_get_rate(priv->clk));
if (err < 0)
goto err_ptp_register;
@@ -2073,6 +2134,7 @@ err_gwca_ts_request_irq:
err_gwca_request_irq:
rcar_gen4_ptp_unregister(priv->ptp_priv);
+err_fwd_init:
err_ptp_register:
for (i = 0; i < RSWITCH_NUM_PORTS; i++)
rswitch_device_free(priv, i);
@@ -2107,6 +2169,7 @@ static int renesas_eth_sw_probe(struct platform_device *pdev)
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
+
spin_lock_init(&priv->lock);
priv->clk = devm_clk_get(&pdev->dev, NULL);
@@ -2144,6 +2207,8 @@ static int renesas_eth_sw_probe(struct platform_device *pdev)
if (!priv->gwca.queues)
return -ENOMEM;
+ INIT_LIST_HEAD(&priv->port_list);
+
pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
@@ -2154,6 +2219,15 @@ static int renesas_eth_sw_probe(struct platform_device *pdev)
return ret;
}
+ if (list_empty(&priv->port_list))
+ dev_warn(&pdev->dev, "could not initialize any ports\n");
+
+ ret = rswitch_register_notifiers();
+ if (ret) {
+ dev_err(&pdev->dev, "could not register notifiers\n");
+ return ret;
+ }
+
device_set_wakeup_capable(&pdev->dev, 1);
return ret;
@@ -2187,6 +2261,7 @@ static void renesas_eth_sw_remove(struct platform_device *pdev)
{
struct rswitch_private *priv = platform_get_drvdata(pdev);
+ rswitch_unregister_notifiers();
rswitch_deinit(priv);
pm_runtime_put(&pdev->dev);
diff --git a/drivers/net/ethernet/renesas/rtsn.c b/drivers/net/ethernet/renesas/rtsn.c
index 05c4b6c8c9c3..15a043e85431 100644
--- a/drivers/net/ethernet/renesas/rtsn.c
+++ b/drivers/net/ethernet/renesas/rtsn.c
@@ -1330,8 +1330,7 @@ static int rtsn_probe(struct platform_device *pdev)
device_set_wakeup_capable(&pdev->dev, 1);
- ret = rcar_gen4_ptp_register(priv->ptp_priv, RCAR_GEN4_PTP_REG_LAYOUT,
- clk_get_rate(priv->clk));
+ ret = rcar_gen4_ptp_register(priv->ptp_priv, clk_get_rate(priv->clk));
if (ret)
goto error_pm;
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 5fc8027c92c7..6fb0ffc1c844 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2233,7 +2233,7 @@ static void sh_eth_get_regs(struct net_device *ndev, struct ethtool_regs *regs,
pm_runtime_get_sync(&mdp->pdev->dev);
__sh_eth_get_regs(ndev, buf);
- pm_runtime_put_sync(&mdp->pdev->dev);
+ pm_runtime_put(&mdp->pdev->dev);
}
static u32 sh_eth_get_msglevel(struct net_device *ndev)
@@ -2360,6 +2360,7 @@ static int sh_eth_set_ringparam(struct net_device *ndev,
return 0;
}
+#ifdef CONFIG_PM_SLEEP
static void sh_eth_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -2386,6 +2387,7 @@ static int sh_eth_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
return 0;
}
+#endif
static const struct ethtool_ops sh_eth_ethtool_ops = {
.get_regs_len = sh_eth_get_regs_len,
@@ -2401,8 +2403,10 @@ static const struct ethtool_ops sh_eth_ethtool_ops = {
.set_ringparam = sh_eth_set_ringparam,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
+#ifdef CONFIG_PM_SLEEP
.get_wol = sh_eth_get_wol,
.set_wol = sh_eth_set_wol,
+#endif
};
/* network device open function */
@@ -2447,7 +2451,7 @@ out_free_irq:
free_irq(ndev->irq, ndev);
out_napi_off:
napi_disable(&mdp->napi);
- pm_runtime_put_sync(&mdp->pdev->dev);
+ pm_runtime_put(&mdp->pdev->dev);
return ret;
}
@@ -3443,8 +3447,6 @@ static void sh_eth_drv_remove(struct platform_device *pdev)
free_netdev(ndev);
}
-#ifdef CONFIG_PM
-#ifdef CONFIG_PM_SLEEP
static int sh_eth_wol_setup(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -3527,28 +3529,8 @@ static int sh_eth_resume(struct device *dev)
return ret;
}
-#endif
-
-static int sh_eth_runtime_nop(struct device *dev)
-{
- /* Runtime PM callback shared between ->runtime_suspend()
- * and ->runtime_resume(). Simply returns success.
- *
- * This driver re-initializes all registers after
- * pm_runtime_get_sync() anyway so there is no need
- * to save and restore registers here.
- */
- return 0;
-}
-static const struct dev_pm_ops sh_eth_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(sh_eth_suspend, sh_eth_resume)
- SET_RUNTIME_PM_OPS(sh_eth_runtime_nop, sh_eth_runtime_nop, NULL)
-};
-#define SH_ETH_PM_OPS (&sh_eth_dev_pm_ops)
-#else
-#define SH_ETH_PM_OPS NULL
-#endif
+static DEFINE_SIMPLE_DEV_PM_OPS(sh_eth_dev_pm_ops, sh_eth_suspend, sh_eth_resume);
static const struct platform_device_id sh_eth_id_table[] = {
{ "sh7619-ether", (kernel_ulong_t)&sh7619_data },
@@ -3568,7 +3550,7 @@ static struct platform_driver sh_eth_driver = {
.id_table = sh_eth_id_table,
.driver = {
.name = CARDNAME,
- .pm = SH_ETH_PM_OPS,
+ .pm = pm_sleep_ptr(&sh_eth_dev_pm_ops),
.of_match_table = of_match_ptr(sh_eth_match_table),
},
};
diff --git a/drivers/net/ethernet/sfc/ef100_tx.c b/drivers/net/ethernet/sfc/ef100_tx.c
index e6b6be549581..03005757c060 100644
--- a/drivers/net/ethernet/sfc/ef100_tx.c
+++ b/drivers/net/ethernet/sfc/ef100_tx.c
@@ -189,6 +189,7 @@ static void ef100_make_tso_desc(struct efx_nic *efx,
{
bool gso_partial = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL;
unsigned int len, ip_offset, tcp_offset, payload_segs;
+ u32 mangleid_outer = ESE_GZ_TX_DESC_IP4_ID_INC_MOD16;
u32 mangleid = ESE_GZ_TX_DESC_IP4_ID_INC_MOD16;
unsigned int outer_ip_offset, outer_l4_offset;
u16 vlan_tci = skb_vlan_tag_get(skb);
@@ -200,8 +201,17 @@ static void ef100_make_tso_desc(struct efx_nic *efx,
bool outer_csum;
u32 paylen;
- if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID)
- mangleid = ESE_GZ_TX_DESC_IP4_ID_NO_OP;
+ if (encap) {
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID_INNER)
+ mangleid = ESE_GZ_TX_DESC_IP4_ID_NO_OP;
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID)
+ mangleid_outer = ESE_GZ_TX_DESC_IP4_ID_NO_OP;
+ } else {
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID)
+ mangleid = ESE_GZ_TX_DESC_IP4_ID_NO_OP;
+ mangleid_outer = ESE_GZ_TX_DESC_IP4_ID_NO_OP;
+ }
+
if (efx->net_dev->features & NETIF_F_HW_VLAN_CTAG_TX)
vlan_enable = skb_vlan_tag_present(skb);
@@ -245,8 +255,7 @@ static void ef100_make_tso_desc(struct efx_nic *efx,
ESF_GZ_TX_TSO_OUTER_L4_OFF_W, outer_l4_offset >> 1,
ESF_GZ_TX_TSO_ED_OUTER_UDP_LEN, udp_encap && !gso_partial,
ESF_GZ_TX_TSO_ED_OUTER_IP_LEN, encap && !gso_partial,
- ESF_GZ_TX_TSO_ED_OUTER_IP4_ID, encap ? mangleid :
- ESE_GZ_TX_DESC_IP4_ID_NO_OP,
+ ESF_GZ_TX_TSO_ED_OUTER_IP4_ID, mangleid_outer,
ESF_GZ_TX_TSO_VLAN_INSERT_EN, vlan_enable,
ESF_GZ_TX_TSO_VLAN_INSERT_TCI, vlan_tci
);
diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
index 06b4f52713ef..ed3a96ebc7f3 100644
--- a/drivers/net/ethernet/sfc/efx_channels.c
+++ b/drivers/net/ethernet/sfc/efx_channels.c
@@ -216,8 +216,8 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
if (efx_separate_tx_channels) {
efx->n_tx_channels =
- min(max(n_channels / 2, 1U),
- efx->max_tx_channels);
+ clamp(n_channels / 2, 1U,
+ efx->max_tx_channels);
efx->tx_channel_offset =
n_channels - efx->n_tx_channels;
efx->n_rx_channels =
@@ -1281,7 +1281,7 @@ static int efx_poll(struct napi_struct *napi, int budget)
time = jiffies - channel->rfs_last_expiry;
/* Would our quota be >= 20? */
if (channel->rfs_filter_count * time >= 600 * HZ)
- mod_delayed_work(system_wq, &channel->filter_work, 0);
+ mod_delayed_work(system_percpu_wq, &channel->filter_work, 0);
#endif
/* There is no race here; although napi_disable() will
diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c
index 5a14d94163b1..e8fdbb62d872 100644
--- a/drivers/net/ethernet/sfc/efx_common.c
+++ b/drivers/net/ethernet/sfc/efx_common.c
@@ -1258,9 +1258,6 @@ out:
/* For simplicity and reliability, we always require a slot reset and try to
* reset the hardware when a pci error affecting the device is detected.
- * We leave both the link_reset and mmio_enabled callback unimplemented:
- * with our request for slot reset the mmio_enabled callback will never be
- * called, and the link_reset callback is not used by AER or EEH mechanisms.
*/
const struct pci_error_handlers efx_err_handlers = {
.error_detected = efx_io_error_detected,
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 23c6a7df78d0..18fe5850a978 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -217,7 +217,8 @@ static int efx_ethtool_set_wol(struct net_device *net_dev,
}
static void efx_ethtool_get_fec_stats(struct net_device *net_dev,
- struct ethtool_fec_stats *fec_stats)
+ struct ethtool_fec_stats *fec_stats,
+ struct ethtool_fec_hist *hist)
{
struct efx_nic *efx = efx_netdev_priv(net_dev);
diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c
index b07f7e4e2877..6ea41f6c9ef5 100644
--- a/drivers/net/ethernet/sfc/falcon/efx.c
+++ b/drivers/net/ethernet/sfc/falcon/efx.c
@@ -1394,9 +1394,8 @@ static int ef4_probe_interrupts(struct ef4_nic *efx)
if (n_channels > extra_channels)
n_channels -= extra_channels;
if (ef4_separate_tx_channels) {
- efx->n_tx_channels = min(max(n_channels / 2,
- 1U),
- efx->max_tx_channels);
+ efx->n_tx_channels = clamp(n_channels / 2, 1U,
+ efx->max_tx_channels);
efx->n_rx_channels = max(n_channels -
efx->n_tx_channels,
1U);
@@ -3128,9 +3127,6 @@ out:
/* For simplicity and reliability, we always require a slot reset and try to
* reset the hardware when a pci error affecting the device is detected.
- * We leave both the link_reset and mmio_enabled callback unimplemented:
- * with our request for slot reset the mmio_enabled callback will never be
- * called, and the link_reset callback is not used by AER or EEH mechanisms.
*/
static const struct pci_error_handlers ef4_err_handlers = {
.error_detected = ef4_io_error_detected,
diff --git a/drivers/net/ethernet/sfc/siena/efx_channels.c b/drivers/net/ethernet/sfc/siena/efx_channels.c
index d120b3c83ac0..fc075ab6b7b5 100644
--- a/drivers/net/ethernet/sfc/siena/efx_channels.c
+++ b/drivers/net/ethernet/sfc/siena/efx_channels.c
@@ -217,8 +217,8 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
if (efx_siena_separate_tx_channels) {
efx->n_tx_channels =
- min(max(n_channels / 2, 1U),
- efx->max_tx_channels);
+ clamp(n_channels / 2, 1U,
+ efx->max_tx_channels);
efx->tx_channel_offset =
n_channels - efx->n_tx_channels;
efx->n_rx_channels =
@@ -1300,7 +1300,7 @@ static int efx_poll(struct napi_struct *napi, int budget)
time = jiffies - channel->rfs_last_expiry;
/* Would our quota be >= 20? */
if (channel->rfs_filter_count * time >= 600 * HZ)
- mod_delayed_work(system_wq, &channel->filter_work, 0);
+ mod_delayed_work(system_percpu_wq, &channel->filter_work, 0);
#endif
/* There is no race here; although napi_disable() will
diff --git a/drivers/net/ethernet/sfc/siena/efx_common.c b/drivers/net/ethernet/sfc/siena/efx_common.c
index a0966f879664..35036cc902fe 100644
--- a/drivers/net/ethernet/sfc/siena/efx_common.c
+++ b/drivers/net/ethernet/sfc/siena/efx_common.c
@@ -1285,9 +1285,6 @@ out:
/* For simplicity and reliability, we always require a slot reset and try to
* reset the hardware when a pci error affecting the device is detected.
- * We leave both the link_reset and mmio_enabled callback unimplemented:
- * with our request for slot reset the mmio_enabled callback will never be
- * called, and the link_reset callback is not used by AER or EEH mechanisms.
*/
const struct pci_error_handlers efx_siena_err_handlers = {
.error_detected = efx_io_error_detected,
diff --git a/drivers/net/ethernet/sfc/siena/ethtool.c b/drivers/net/ethernet/sfc/siena/ethtool.c
index 994909789bfe..8c3ebd0617fb 100644
--- a/drivers/net/ethernet/sfc/siena/ethtool.c
+++ b/drivers/net/ethernet/sfc/siena/ethtool.c
@@ -217,7 +217,8 @@ static int efx_ethtool_set_wol(struct net_device *net_dev,
}
static void efx_ethtool_get_fec_stats(struct net_device *net_dev,
- struct ethtool_fec_stats *fec_stats)
+ struct ethtool_fec_stats *fec_stats,
+ struct ethtool_fec_hist *hist)
{
struct efx_nic *efx = netdev_priv(net_dev);
diff --git a/drivers/net/ethernet/sfc/tc_encap_actions.c b/drivers/net/ethernet/sfc/tc_encap_actions.c
index 2258f854e5be..eef06e48185d 100644
--- a/drivers/net/ethernet/sfc/tc_encap_actions.c
+++ b/drivers/net/ethernet/sfc/tc_encap_actions.c
@@ -11,6 +11,8 @@
#include "tc_encap_actions.h"
#include "tc.h"
#include "mae.h"
+#include <net/flow.h>
+#include <net/inet_dscp.h>
#include <net/vxlan.h>
#include <net/geneve.h>
#include <net/netevent.h>
@@ -99,7 +101,7 @@ static int efx_bind_neigh(struct efx_nic *efx,
case EFX_ENCAP_TYPE_GENEVE:
flow4.flowi4_proto = IPPROTO_UDP;
flow4.fl4_dport = encap->key.tp_dst;
- flow4.flowi4_tos = encap->key.tos;
+ flow4.flowi4_dscp = inet_dsfield_to_dscp(encap->key.tos);
flow4.daddr = encap->key.u.ipv4.dst;
flow4.saddr = encap->key.u.ipv4.src;
break;
@@ -442,7 +444,7 @@ static void efx_tc_update_encap(struct efx_nic *efx,
rule = container_of(acts, struct efx_tc_flow_rule, acts);
if (rule->fallback)
fallback = rule->fallback;
- else /* fallback: deliver to PF */
+ else /* fallback of the fallback: deliver to PF */
fallback = &efx->tc->facts.pf;
rc = efx_mae_update_rule(efx, fallback->fw_id,
rule->fw_id);
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 6ca290f7c0df..3ebd0664c697 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -2162,10 +2162,20 @@ static const struct net_device_ops smsc911x_netdev_ops = {
static void smsc911x_read_mac_address(struct net_device *dev)
{
struct smsc911x_data *pdata = netdev_priv(dev);
- u32 mac_high16 = smsc911x_mac_read(pdata, ADDRH);
- u32 mac_low32 = smsc911x_mac_read(pdata, ADDRL);
+ u32 mac_high16, mac_low32;
u8 addr[ETH_ALEN];
+ mac_high16 = smsc911x_mac_read(pdata, ADDRH);
+ mac_low32 = smsc911x_mac_read(pdata, ADDRL);
+
+ /* The first mac_read in some setups can incorrectly read 0. Re-read it
+ * to get the full MAC if this is observed.
+ */
+ if (mac_high16 == 0) {
+ SMSC_TRACE(pdata, probe, "Re-read MAC ADDRH\n");
+ mac_high16 = smsc911x_mac_read(pdata, ADDRH);
+ }
+
addr[0] = (u8)(mac_low32);
addr[1] = (u8)(mac_low32 >> 8);
addr[2] = (u8)(mac_low32 >> 16);
diff --git a/drivers/net/ethernet/spacemit/Kconfig b/drivers/net/ethernet/spacemit/Kconfig
new file mode 100644
index 000000000000..85ef61a9b4ef
--- /dev/null
+++ b/drivers/net/ethernet/spacemit/Kconfig
@@ -0,0 +1,29 @@
+config NET_VENDOR_SPACEMIT
+ bool "SpacemiT devices"
+ default y
+ depends on ARCH_SPACEMIT || COMPILE_TEST
+ help
+ If you have a network (Ethernet) device belonging to this class,
+ say Y.
+
+ Note that the answer to this question does not directly affect
+ the kernel: saying N will just cause the configurator to skip all
+ the questions regarding SpacemiT devices. If you say Y, you will
+ be asked for your specific chipset/driver in the following questions.
+
+if NET_VENDOR_SPACEMIT
+
+config SPACEMIT_K1_EMAC
+ tristate "SpacemiT K1 Ethernet MAC driver"
+ depends on ARCH_SPACEMIT || COMPILE_TEST
+ depends on MFD_SYSCON
+ depends on OF
+ default m if ARCH_SPACEMIT
+ select PHYLIB
+ help
+ This driver supports the Ethernet MAC in the SpacemiT K1 SoC.
+
+ To compile this driver as a module, choose M here: the module
+ will be called k1_emac.
+
+endif # NET_VENDOR_SPACEMIT
diff --git a/drivers/net/ethernet/spacemit/Makefile b/drivers/net/ethernet/spacemit/Makefile
new file mode 100644
index 000000000000..d29efd997a4f
--- /dev/null
+++ b/drivers/net/ethernet/spacemit/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the SpacemiT network device drivers.
+#
+
+obj-$(CONFIG_SPACEMIT_K1_EMAC) += k1_emac.o
diff --git a/drivers/net/ethernet/spacemit/k1_emac.c b/drivers/net/ethernet/spacemit/k1_emac.c
new file mode 100644
index 000000000000..e1c5faff3b71
--- /dev/null
+++ b/drivers/net/ethernet/spacemit/k1_emac.c
@@ -0,0 +1,2159 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SpacemiT K1 Ethernet driver
+ *
+ * Copyright (C) 2023-2025 SpacemiT (Hangzhou) Technology Co. Ltd
+ * Copyright (C) 2025 Vivian Wang <wangruikang@iscas.ac.cn>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/rtnetlink.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+
+#include "k1_emac.h"
+
+#define DRIVER_NAME "k1_emac"
+
+#define EMAC_DEFAULT_BUFSIZE 1536
+#define EMAC_RX_BUF_2K 2048
+#define EMAC_RX_BUF_4K 4096
+
+/* Tuning parameters from SpacemiT */
+#define EMAC_TX_FRAMES 64
+#define EMAC_TX_COAL_TIMEOUT 40000
+#define EMAC_RX_FRAMES 64
+#define EMAC_RX_COAL_TIMEOUT (600 * 312)
+
+#define DEFAULT_FC_PAUSE_TIME 0xffff
+#define DEFAULT_FC_FIFO_HIGH 1600
+#define DEFAULT_TX_ALMOST_FULL 0x1f8
+#define DEFAULT_TX_THRESHOLD 1518
+#define DEFAULT_RX_THRESHOLD 12
+#define DEFAULT_TX_RING_NUM 1024
+#define DEFAULT_RX_RING_NUM 1024
+#define DEFAULT_DMA_BURST MREGBIT_BURST_16WORD
+#define HASH_TABLE_SIZE 64
+
+struct desc_buf {
+ u64 dma_addr;
+ void *buff_addr;
+ u16 dma_len;
+ u8 map_as_page;
+};
+
+struct emac_tx_desc_buffer {
+ struct sk_buff *skb;
+ struct desc_buf buf[2];
+};
+
+struct emac_rx_desc_buffer {
+ struct sk_buff *skb;
+ u64 dma_addr;
+ void *buff_addr;
+ u16 dma_len;
+ u8 map_as_page;
+};
+
+/**
+ * struct emac_desc_ring - Software-side information for one descriptor ring
+ * Same structure used for both RX and TX
+ * @desc_addr: Virtual address to the descriptor ring memory
+ * @desc_dma_addr: DMA address of the descriptor ring
+ * @total_size: Size of ring in bytes
+ * @total_cnt: Number of descriptors
+ * @head: Next descriptor to associate a buffer with
+ * @tail: Next descriptor to check status bit
+ * @rx_desc_buf: Array of descriptors for RX
+ * @tx_desc_buf: Array of descriptors for TX, with max of two buffers each
+ */
+struct emac_desc_ring {
+ void *desc_addr;
+ dma_addr_t desc_dma_addr;
+ u32 total_size;
+ u32 total_cnt;
+ u32 head;
+ u32 tail;
+ union {
+ struct emac_rx_desc_buffer *rx_desc_buf;
+ struct emac_tx_desc_buffer *tx_desc_buf;
+ };
+};
+
+struct emac_priv {
+ void __iomem *iobase;
+ u32 dma_buf_sz;
+ struct emac_desc_ring tx_ring;
+ struct emac_desc_ring rx_ring;
+
+ struct net_device *ndev;
+ struct napi_struct napi;
+ struct platform_device *pdev;
+ struct clk *bus_clk;
+ struct clk *ref_clk;
+ struct regmap *regmap_apmu;
+ u32 regmap_apmu_offset;
+ int irq;
+
+ phy_interface_t phy_interface;
+
+ union emac_hw_tx_stats tx_stats, tx_stats_off;
+ union emac_hw_rx_stats rx_stats, rx_stats_off;
+
+ u32 tx_count_frames;
+ u32 tx_coal_frames;
+ u32 tx_coal_timeout;
+ struct work_struct tx_timeout_task;
+
+ struct timer_list txtimer;
+ struct timer_list stats_timer;
+
+ u32 tx_delay;
+ u32 rx_delay;
+
+ bool flow_control_autoneg;
+ u8 flow_control;
+
+ /* Softirq-safe, hold while touching hardware statistics */
+ spinlock_t stats_lock;
+};
+
+static void emac_wr(struct emac_priv *priv, u32 reg, u32 val)
+{
+ writel(val, priv->iobase + reg);
+}
+
+static u32 emac_rd(struct emac_priv *priv, u32 reg)
+{
+ return readl(priv->iobase + reg);
+}
+
+static int emac_phy_interface_config(struct emac_priv *priv)
+{
+ u32 val = 0, mask = REF_CLK_SEL | RGMII_TX_CLK_SEL | PHY_INTF_RGMII;
+
+ if (phy_interface_mode_is_rgmii(priv->phy_interface))
+ val |= PHY_INTF_RGMII;
+
+ regmap_update_bits(priv->regmap_apmu,
+ priv->regmap_apmu_offset + APMU_EMAC_CTRL_REG,
+ mask, val);
+
+ return 0;
+}
+
+/*
+ * Where the hardware expects a MAC address, it is laid out in this high, med,
+ * low order in three consecutive registers and in this format.
+ */
+
+static void emac_set_mac_addr_reg(struct emac_priv *priv,
+ const unsigned char *addr,
+ u32 reg)
+{
+ emac_wr(priv, reg + sizeof(u32) * 0, addr[1] << 8 | addr[0]);
+ emac_wr(priv, reg + sizeof(u32) * 1, addr[3] << 8 | addr[2]);
+ emac_wr(priv, reg + sizeof(u32) * 2, addr[5] << 8 | addr[4]);
+}
+
+static void emac_set_mac_addr(struct emac_priv *priv, const unsigned char *addr)
+{
+ /* We use only one address, so set the same for flow control as well */
+ emac_set_mac_addr_reg(priv, addr, MAC_ADDRESS1_HIGH);
+ emac_set_mac_addr_reg(priv, addr, MAC_FC_SOURCE_ADDRESS_HIGH);
+}
+
+static void emac_reset_hw(struct emac_priv *priv)
+{
+ /* Disable all interrupts */
+ emac_wr(priv, MAC_INTERRUPT_ENABLE, 0x0);
+ emac_wr(priv, DMA_INTERRUPT_ENABLE, 0x0);
+
+ /* Disable transmit and receive units */
+ emac_wr(priv, MAC_RECEIVE_CONTROL, 0x0);
+ emac_wr(priv, MAC_TRANSMIT_CONTROL, 0x0);
+
+ /* Disable DMA */
+ emac_wr(priv, DMA_CONTROL, 0x0);
+}
+
+static void emac_init_hw(struct emac_priv *priv)
+{
+ /* Destination address for 802.3x Ethernet flow control */
+ u8 fc_dest_addr[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x01 };
+
+ u32 rxirq = 0, dma = 0;
+
+ regmap_set_bits(priv->regmap_apmu,
+ priv->regmap_apmu_offset + APMU_EMAC_CTRL_REG,
+ AXI_SINGLE_ID);
+
+ /* Disable transmit and receive units */
+ emac_wr(priv, MAC_RECEIVE_CONTROL, 0x0);
+ emac_wr(priv, MAC_TRANSMIT_CONTROL, 0x0);
+
+ /* Enable MAC address 1 filtering */
+ emac_wr(priv, MAC_ADDRESS_CONTROL, MREGBIT_MAC_ADDRESS1_ENABLE);
+
+ /* Zero initialize the multicast hash table */
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE1, 0x0);
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE2, 0x0);
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE3, 0x0);
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE4, 0x0);
+
+ /* Configure thresholds */
+ emac_wr(priv, MAC_TRANSMIT_FIFO_ALMOST_FULL, DEFAULT_TX_ALMOST_FULL);
+ emac_wr(priv, MAC_TRANSMIT_PACKET_START_THRESHOLD,
+ DEFAULT_TX_THRESHOLD);
+ emac_wr(priv, MAC_RECEIVE_PACKET_START_THRESHOLD, DEFAULT_RX_THRESHOLD);
+
+ /* Configure flow control (enabled in emac_adjust_link() later) */
+ emac_set_mac_addr_reg(priv, fc_dest_addr, MAC_FC_SOURCE_ADDRESS_HIGH);
+ emac_wr(priv, MAC_FC_PAUSE_HIGH_THRESHOLD, DEFAULT_FC_FIFO_HIGH);
+ emac_wr(priv, MAC_FC_HIGH_PAUSE_TIME, DEFAULT_FC_PAUSE_TIME);
+ emac_wr(priv, MAC_FC_PAUSE_LOW_THRESHOLD, 0);
+
+ /* RX IRQ mitigation */
+ rxirq = FIELD_PREP(MREGBIT_RECEIVE_IRQ_FRAME_COUNTER_MASK,
+ EMAC_RX_FRAMES);
+ rxirq |= FIELD_PREP(MREGBIT_RECEIVE_IRQ_TIMEOUT_COUNTER_MASK,
+ EMAC_RX_COAL_TIMEOUT);
+ rxirq |= MREGBIT_RECEIVE_IRQ_MITIGATION_ENABLE;
+ emac_wr(priv, DMA_RECEIVE_IRQ_MITIGATION_CTRL, rxirq);
+
+ /* Disable and set DMA config */
+ emac_wr(priv, DMA_CONTROL, 0x0);
+
+ emac_wr(priv, DMA_CONFIGURATION, MREGBIT_SOFTWARE_RESET);
+ usleep_range(9000, 10000);
+ emac_wr(priv, DMA_CONFIGURATION, 0x0);
+ usleep_range(9000, 10000);
+
+ dma |= MREGBIT_STRICT_BURST;
+ dma |= MREGBIT_DMA_64BIT_MODE;
+ dma |= DEFAULT_DMA_BURST;
+
+ emac_wr(priv, DMA_CONFIGURATION, dma);
+}
+
+static void emac_dma_start_transmit(struct emac_priv *priv)
+{
+ /* The actual value written does not matter */
+ emac_wr(priv, DMA_TRANSMIT_POLL_DEMAND, 1);
+}
+
+static void emac_enable_interrupt(struct emac_priv *priv)
+{
+ u32 val;
+
+ val = emac_rd(priv, DMA_INTERRUPT_ENABLE);
+ val |= MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE;
+ val |= MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE;
+ emac_wr(priv, DMA_INTERRUPT_ENABLE, val);
+}
+
+static void emac_disable_interrupt(struct emac_priv *priv)
+{
+ u32 val;
+
+ val = emac_rd(priv, DMA_INTERRUPT_ENABLE);
+ val &= ~MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE;
+ val &= ~MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE;
+ emac_wr(priv, DMA_INTERRUPT_ENABLE, val);
+}
+
+static u32 emac_tx_avail(struct emac_priv *priv)
+{
+ struct emac_desc_ring *tx_ring = &priv->tx_ring;
+ u32 avail;
+
+ if (tx_ring->tail > tx_ring->head)
+ avail = tx_ring->tail - tx_ring->head - 1;
+ else
+ avail = tx_ring->total_cnt - tx_ring->head + tx_ring->tail - 1;
+
+ return avail;
+}
+
+static void emac_tx_coal_timer_resched(struct emac_priv *priv)
+{
+ mod_timer(&priv->txtimer,
+ jiffies + usecs_to_jiffies(priv->tx_coal_timeout));
+}
+
+static void emac_tx_coal_timer(struct timer_list *t)
+{
+ struct emac_priv *priv = timer_container_of(priv, t, txtimer);
+
+ napi_schedule(&priv->napi);
+}
+
+static bool emac_tx_should_interrupt(struct emac_priv *priv, u32 pkt_num)
+{
+ priv->tx_count_frames += pkt_num;
+ if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
+ emac_tx_coal_timer_resched(priv);
+ return false;
+ }
+
+ priv->tx_count_frames = 0;
+ return true;
+}
+
+static void emac_free_tx_buf(struct emac_priv *priv, int i)
+{
+ struct emac_tx_desc_buffer *tx_buf;
+ struct emac_desc_ring *tx_ring;
+ struct desc_buf *buf;
+ int j;
+
+ tx_ring = &priv->tx_ring;
+ tx_buf = &tx_ring->tx_desc_buf[i];
+
+ for (j = 0; j < 2; j++) {
+ buf = &tx_buf->buf[j];
+ if (!buf->dma_addr)
+ continue;
+
+ if (buf->map_as_page)
+ dma_unmap_page(&priv->pdev->dev, buf->dma_addr,
+ buf->dma_len, DMA_TO_DEVICE);
+ else
+ dma_unmap_single(&priv->pdev->dev,
+ buf->dma_addr, buf->dma_len,
+ DMA_TO_DEVICE);
+
+ buf->dma_addr = 0;
+ buf->map_as_page = false;
+ buf->buff_addr = NULL;
+ }
+
+ if (tx_buf->skb) {
+ dev_kfree_skb_any(tx_buf->skb);
+ tx_buf->skb = NULL;
+ }
+}
+
+static void emac_clean_tx_desc_ring(struct emac_priv *priv)
+{
+ struct emac_desc_ring *tx_ring = &priv->tx_ring;
+ u32 i;
+
+ for (i = 0; i < tx_ring->total_cnt; i++)
+ emac_free_tx_buf(priv, i);
+
+ tx_ring->head = 0;
+ tx_ring->tail = 0;
+}
+
+static void emac_clean_rx_desc_ring(struct emac_priv *priv)
+{
+ struct emac_rx_desc_buffer *rx_buf;
+ struct emac_desc_ring *rx_ring;
+ u32 i;
+
+ rx_ring = &priv->rx_ring;
+
+ for (i = 0; i < rx_ring->total_cnt; i++) {
+ rx_buf = &rx_ring->rx_desc_buf[i];
+
+ if (!rx_buf->skb)
+ continue;
+
+ dma_unmap_single(&priv->pdev->dev, rx_buf->dma_addr,
+ rx_buf->dma_len, DMA_FROM_DEVICE);
+
+ dev_kfree_skb(rx_buf->skb);
+ rx_buf->skb = NULL;
+ }
+
+ rx_ring->tail = 0;
+ rx_ring->head = 0;
+}
+
+static int emac_alloc_tx_resources(struct emac_priv *priv)
+{
+ struct emac_desc_ring *tx_ring = &priv->tx_ring;
+ struct platform_device *pdev = priv->pdev;
+
+ tx_ring->tx_desc_buf = kcalloc(tx_ring->total_cnt,
+ sizeof(*tx_ring->tx_desc_buf),
+ GFP_KERNEL);
+
+ if (!tx_ring->tx_desc_buf)
+ return -ENOMEM;
+
+ tx_ring->total_size = tx_ring->total_cnt * sizeof(struct emac_desc);
+ tx_ring->total_size = ALIGN(tx_ring->total_size, PAGE_SIZE);
+
+ tx_ring->desc_addr = dma_alloc_coherent(&pdev->dev, tx_ring->total_size,
+ &tx_ring->desc_dma_addr,
+ GFP_KERNEL);
+ if (!tx_ring->desc_addr) {
+ kfree(tx_ring->tx_desc_buf);
+ return -ENOMEM;
+ }
+
+ tx_ring->head = 0;
+ tx_ring->tail = 0;
+
+ return 0;
+}
+
+static int emac_alloc_rx_resources(struct emac_priv *priv)
+{
+ struct emac_desc_ring *rx_ring = &priv->rx_ring;
+ struct platform_device *pdev = priv->pdev;
+
+ rx_ring->rx_desc_buf = kcalloc(rx_ring->total_cnt,
+ sizeof(*rx_ring->rx_desc_buf),
+ GFP_KERNEL);
+ if (!rx_ring->rx_desc_buf)
+ return -ENOMEM;
+
+ rx_ring->total_size = rx_ring->total_cnt * sizeof(struct emac_desc);
+
+ rx_ring->total_size = ALIGN(rx_ring->total_size, PAGE_SIZE);
+
+ rx_ring->desc_addr = dma_alloc_coherent(&pdev->dev, rx_ring->total_size,
+ &rx_ring->desc_dma_addr,
+ GFP_KERNEL);
+ if (!rx_ring->desc_addr) {
+ kfree(rx_ring->rx_desc_buf);
+ return -ENOMEM;
+ }
+
+ rx_ring->head = 0;
+ rx_ring->tail = 0;
+
+ return 0;
+}
+
+static void emac_free_tx_resources(struct emac_priv *priv)
+{
+ struct emac_desc_ring *tr = &priv->tx_ring;
+ struct device *dev = &priv->pdev->dev;
+
+ emac_clean_tx_desc_ring(priv);
+
+ kfree(tr->tx_desc_buf);
+ tr->tx_desc_buf = NULL;
+
+ dma_free_coherent(dev, tr->total_size, tr->desc_addr,
+ tr->desc_dma_addr);
+ tr->desc_addr = NULL;
+}
+
+static void emac_free_rx_resources(struct emac_priv *priv)
+{
+ struct emac_desc_ring *rr = &priv->rx_ring;
+ struct device *dev = &priv->pdev->dev;
+
+ emac_clean_rx_desc_ring(priv);
+
+ kfree(rr->rx_desc_buf);
+ rr->rx_desc_buf = NULL;
+
+ dma_free_coherent(dev, rr->total_size, rr->desc_addr,
+ rr->desc_dma_addr);
+ rr->desc_addr = NULL;
+}
+
+static int emac_tx_clean_desc(struct emac_priv *priv)
+{
+ struct net_device *ndev = priv->ndev;
+ struct emac_desc_ring *tx_ring;
+ struct emac_desc *tx_desc;
+ u32 i;
+
+ netif_tx_lock(ndev);
+
+ tx_ring = &priv->tx_ring;
+
+ i = tx_ring->tail;
+
+ while (i != tx_ring->head) {
+ tx_desc = &((struct emac_desc *)tx_ring->desc_addr)[i];
+
+ /* Stop checking if desc still own by DMA */
+ if (READ_ONCE(tx_desc->desc0) & TX_DESC_0_OWN)
+ break;
+
+ emac_free_tx_buf(priv, i);
+ memset(tx_desc, 0, sizeof(struct emac_desc));
+
+ if (++i == tx_ring->total_cnt)
+ i = 0;
+ }
+
+ tx_ring->tail = i;
+
+ if (unlikely(netif_queue_stopped(ndev) &&
+ emac_tx_avail(priv) > tx_ring->total_cnt / 4))
+ netif_wake_queue(ndev);
+
+ netif_tx_unlock(ndev);
+
+ return 0;
+}
+
+static bool emac_rx_frame_good(struct emac_priv *priv, struct emac_desc *desc)
+{
+ const char *msg;
+ u32 len;
+
+ len = FIELD_GET(RX_DESC_0_FRAME_PACKET_LENGTH_MASK, desc->desc0);
+
+ if (WARN_ON_ONCE(!(desc->desc0 & RX_DESC_0_LAST_DESCRIPTOR)))
+ msg = "Not last descriptor"; /* This would be a bug */
+ else if (desc->desc0 & RX_DESC_0_FRAME_RUNT)
+ msg = "Runt frame";
+ else if (desc->desc0 & RX_DESC_0_FRAME_CRC_ERR)
+ msg = "Frame CRC error";
+ else if (desc->desc0 & RX_DESC_0_FRAME_MAX_LEN_ERR)
+ msg = "Frame exceeds max length";
+ else if (desc->desc0 & RX_DESC_0_FRAME_JABBER_ERR)
+ msg = "Frame jabber error";
+ else if (desc->desc0 & RX_DESC_0_FRAME_LENGTH_ERR)
+ msg = "Frame length error";
+ else if (len <= ETH_FCS_LEN || len > priv->dma_buf_sz)
+ msg = "Frame length unacceptable";
+ else
+ return true; /* All good */
+
+ dev_dbg_ratelimited(&priv->ndev->dev, "RX error: %s", msg);
+
+ return false;
+}
+
+static void emac_alloc_rx_desc_buffers(struct emac_priv *priv)
+{
+ struct emac_desc_ring *rx_ring = &priv->rx_ring;
+ struct emac_desc rx_desc, *rx_desc_addr;
+ struct net_device *ndev = priv->ndev;
+ struct emac_rx_desc_buffer *rx_buf;
+ struct sk_buff *skb;
+ u32 i;
+
+ i = rx_ring->head;
+ rx_buf = &rx_ring->rx_desc_buf[i];
+
+ while (!rx_buf->skb) {
+ skb = netdev_alloc_skb_ip_align(ndev, priv->dma_buf_sz);
+ if (!skb)
+ break;
+
+ skb->dev = ndev;
+
+ rx_buf->skb = skb;
+ rx_buf->dma_len = priv->dma_buf_sz;
+ rx_buf->dma_addr = dma_map_single(&priv->pdev->dev, skb->data,
+ priv->dma_buf_sz,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&priv->pdev->dev, rx_buf->dma_addr)) {
+ dev_err_ratelimited(&ndev->dev, "Mapping skb failed\n");
+ goto err_free_skb;
+ }
+
+ rx_desc_addr = &((struct emac_desc *)rx_ring->desc_addr)[i];
+
+ memset(&rx_desc, 0, sizeof(rx_desc));
+
+ rx_desc.buffer_addr_1 = rx_buf->dma_addr;
+ rx_desc.desc1 = FIELD_PREP(RX_DESC_1_BUFFER_SIZE_1_MASK,
+ rx_buf->dma_len);
+
+ if (++i == rx_ring->total_cnt) {
+ rx_desc.desc1 |= RX_DESC_1_END_RING;
+ i = 0;
+ }
+
+ *rx_desc_addr = rx_desc;
+ dma_wmb();
+ WRITE_ONCE(rx_desc_addr->desc0, rx_desc.desc0 | RX_DESC_0_OWN);
+
+ rx_buf = &rx_ring->rx_desc_buf[i];
+ }
+
+ rx_ring->head = i;
+ return;
+
+err_free_skb:
+ dev_kfree_skb_any(skb);
+ rx_buf->skb = NULL;
+}
+
+/* Returns number of packets received */
+static int emac_rx_clean_desc(struct emac_priv *priv, int budget)
+{
+ struct net_device *ndev = priv->ndev;
+ struct emac_rx_desc_buffer *rx_buf;
+ struct emac_desc_ring *rx_ring;
+ struct sk_buff *skb = NULL;
+ struct emac_desc *rx_desc;
+ u32 got = 0, skb_len, i;
+
+ rx_ring = &priv->rx_ring;
+
+ i = rx_ring->tail;
+
+ while (budget--) {
+ rx_desc = &((struct emac_desc *)rx_ring->desc_addr)[i];
+
+ /* Stop checking if rx_desc still owned by DMA */
+ if (READ_ONCE(rx_desc->desc0) & RX_DESC_0_OWN)
+ break;
+
+ dma_rmb();
+
+ rx_buf = &rx_ring->rx_desc_buf[i];
+
+ if (!rx_buf->skb)
+ break;
+
+ got++;
+
+ dma_unmap_single(&priv->pdev->dev, rx_buf->dma_addr,
+ rx_buf->dma_len, DMA_FROM_DEVICE);
+
+ if (likely(emac_rx_frame_good(priv, rx_desc))) {
+ skb = rx_buf->skb;
+
+ skb_len = FIELD_GET(RX_DESC_0_FRAME_PACKET_LENGTH_MASK,
+ rx_desc->desc0);
+ skb_len -= ETH_FCS_LEN;
+
+ skb_put(skb, skb_len);
+ skb->dev = ndev;
+ ndev->hard_header_len = ETH_HLEN;
+
+ skb->protocol = eth_type_trans(skb, ndev);
+
+ skb->ip_summed = CHECKSUM_NONE;
+
+ napi_gro_receive(&priv->napi, skb);
+
+ memset(rx_desc, 0, sizeof(struct emac_desc));
+ rx_buf->skb = NULL;
+ } else {
+ dev_kfree_skb_irq(rx_buf->skb);
+ rx_buf->skb = NULL;
+ }
+
+ if (++i == rx_ring->total_cnt)
+ i = 0;
+ }
+
+ rx_ring->tail = i;
+
+ emac_alloc_rx_desc_buffers(priv);
+
+ return got;
+}
+
+static int emac_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct emac_priv *priv = container_of(napi, struct emac_priv, napi);
+ int work_done;
+
+ emac_tx_clean_desc(priv);
+
+ work_done = emac_rx_clean_desc(priv, budget);
+ if (work_done < budget && napi_complete_done(napi, work_done))
+ emac_enable_interrupt(priv);
+
+ return work_done;
+}
+
+/*
+ * For convenience, skb->data is fragment 0, frags[0] is fragment 1, etc.
+ *
+ * Each descriptor can hold up to two fragments, called buffer 1 and 2. For each
+ * fragment f, if f % 2 == 0, it uses buffer 1, otherwise it uses buffer 2.
+ */
+
+static int emac_tx_map_frag(struct device *dev, struct emac_desc *tx_desc,
+ struct emac_tx_desc_buffer *tx_buf,
+ struct sk_buff *skb, u32 frag_idx)
+{
+ bool map_as_page, buf_idx;
+ const skb_frag_t *frag;
+ phys_addr_t addr;
+ u32 len;
+ int ret;
+
+ buf_idx = frag_idx % 2;
+
+ if (frag_idx == 0) {
+ /* Non-fragmented part */
+ len = skb_headlen(skb);
+ addr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
+ map_as_page = false;
+ } else {
+ /* Fragment */
+ frag = &skb_shinfo(skb)->frags[frag_idx - 1];
+ len = skb_frag_size(frag);
+ addr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
+ map_as_page = true;
+ }
+
+ ret = dma_mapping_error(dev, addr);
+ if (ret)
+ return ret;
+
+ tx_buf->buf[buf_idx].dma_addr = addr;
+ tx_buf->buf[buf_idx].dma_len = len;
+ tx_buf->buf[buf_idx].map_as_page = map_as_page;
+
+ if (buf_idx == 0) {
+ tx_desc->buffer_addr_1 = addr;
+ tx_desc->desc1 |= FIELD_PREP(TX_DESC_1_BUFFER_SIZE_1_MASK, len);
+ } else {
+ tx_desc->buffer_addr_2 = addr;
+ tx_desc->desc1 |= FIELD_PREP(TX_DESC_1_BUFFER_SIZE_2_MASK, len);
+ }
+
+ return 0;
+}
+
+static void emac_tx_mem_map(struct emac_priv *priv, struct sk_buff *skb)
+{
+ struct emac_desc_ring *tx_ring = &priv->tx_ring;
+ struct emac_desc tx_desc, *tx_desc_addr;
+ struct device *dev = &priv->pdev->dev;
+ struct emac_tx_desc_buffer *tx_buf;
+ u32 head, old_head, frag_num, f;
+ bool buf_idx;
+
+ frag_num = skb_shinfo(skb)->nr_frags;
+ head = tx_ring->head;
+ old_head = head;
+
+ for (f = 0; f < frag_num + 1; f++) {
+ buf_idx = f % 2;
+
+ /*
+ * If using buffer 1, initialize a new desc. Otherwise, use
+ * buffer 2 of previous fragment's desc.
+ */
+ if (!buf_idx) {
+ tx_buf = &tx_ring->tx_desc_buf[head];
+ tx_desc_addr =
+ &((struct emac_desc *)tx_ring->desc_addr)[head];
+ memset(&tx_desc, 0, sizeof(tx_desc));
+
+ /*
+ * Give ownership for all but first desc initially. For
+ * first desc, give at the end so DMA cannot start
+ * reading uninitialized descs.
+ */
+ if (head != old_head)
+ tx_desc.desc0 |= TX_DESC_0_OWN;
+
+ if (++head == tx_ring->total_cnt) {
+ /* Just used last desc in ring */
+ tx_desc.desc1 |= TX_DESC_1_END_RING;
+ head = 0;
+ }
+ }
+
+ if (emac_tx_map_frag(dev, &tx_desc, tx_buf, skb, f)) {
+ dev_err_ratelimited(&priv->ndev->dev,
+ "Map TX frag %d failed\n", f);
+ goto err_free_skb;
+ }
+
+ if (f == 0)
+ tx_desc.desc1 |= TX_DESC_1_FIRST_SEGMENT;
+
+ if (f == frag_num) {
+ tx_desc.desc1 |= TX_DESC_1_LAST_SEGMENT;
+ tx_buf->skb = skb;
+ if (emac_tx_should_interrupt(priv, frag_num + 1))
+ tx_desc.desc1 |=
+ TX_DESC_1_INTERRUPT_ON_COMPLETION;
+ }
+
+ *tx_desc_addr = tx_desc;
+ }
+
+ /* All descriptors are ready, give ownership for first desc */
+ tx_desc_addr = &((struct emac_desc *)tx_ring->desc_addr)[old_head];
+ dma_wmb();
+ WRITE_ONCE(tx_desc_addr->desc0, tx_desc_addr->desc0 | TX_DESC_0_OWN);
+
+ emac_dma_start_transmit(priv);
+
+ tx_ring->head = head;
+
+ return;
+
+err_free_skb:
+ dev_dstats_tx_dropped(priv->ndev);
+ dev_kfree_skb_any(skb);
+}
+
+static netdev_tx_t emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+ int nfrags = skb_shinfo(skb)->nr_frags;
+ struct device *dev = &priv->pdev->dev;
+
+ if (unlikely(emac_tx_avail(priv) < nfrags + 1)) {
+ if (!netif_queue_stopped(ndev)) {
+ netif_stop_queue(ndev);
+ dev_err_ratelimited(dev, "TX ring full, stop TX queue\n");
+ }
+ return NETDEV_TX_BUSY;
+ }
+
+ emac_tx_mem_map(priv, skb);
+
+ /* Make sure there is space in the ring for the next TX. */
+ if (unlikely(emac_tx_avail(priv) <= MAX_SKB_FRAGS + 2))
+ netif_stop_queue(ndev);
+
+ return NETDEV_TX_OK;
+}
+
+static int emac_set_mac_address(struct net_device *ndev, void *addr)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+ int ret = eth_mac_addr(ndev, addr);
+
+ if (ret)
+ return ret;
+
+ /* If running, set now; if not running it will be set in emac_up. */
+ if (netif_running(ndev))
+ emac_set_mac_addr(priv, ndev->dev_addr);
+
+ return 0;
+}
+
+static void emac_mac_multicast_filter_clear(struct emac_priv *priv)
+{
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE1, 0x0);
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE2, 0x0);
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE3, 0x0);
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE4, 0x0);
+}
+
+/*
+ * The upper 6 bits of the Ethernet CRC of the MAC address is used as the hash
+ * when matching multicast addresses.
+ */
+static u32 emac_ether_addr_hash(u8 addr[ETH_ALEN])
+{
+ u32 crc32 = ether_crc(ETH_ALEN, addr);
+
+ return crc32 >> 26;
+}
+
+/* Configure Multicast and Promiscuous modes */
+static void emac_set_rx_mode(struct net_device *ndev)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+ struct netdev_hw_addr *ha;
+ u32 mc_filter[4] = { 0 };
+ u32 hash, reg, bit, val;
+
+ val = emac_rd(priv, MAC_ADDRESS_CONTROL);
+
+ val &= ~MREGBIT_PROMISCUOUS_MODE;
+
+ if (ndev->flags & IFF_PROMISC) {
+ /* Enable promisc mode */
+ val |= MREGBIT_PROMISCUOUS_MODE;
+ } else if ((ndev->flags & IFF_ALLMULTI) ||
+ (netdev_mc_count(ndev) > HASH_TABLE_SIZE)) {
+ /* Accept all multicast frames by setting every bit */
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE1, 0xffff);
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE2, 0xffff);
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE3, 0xffff);
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE4, 0xffff);
+ } else if (!netdev_mc_empty(ndev)) {
+ emac_mac_multicast_filter_clear(priv);
+ netdev_for_each_mc_addr(ha, ndev) {
+ /*
+ * The hash table is an array of 4 16-bit registers. It
+ * is treated like an array of 64 bits (bits[hash]).
+ */
+ hash = emac_ether_addr_hash(ha->addr);
+ reg = hash / 16;
+ bit = hash % 16;
+ mc_filter[reg] |= BIT(bit);
+ }
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE1, mc_filter[0]);
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE2, mc_filter[1]);
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE3, mc_filter[2]);
+ emac_wr(priv, MAC_MULTICAST_HASH_TABLE4, mc_filter[3]);
+ }
+
+ emac_wr(priv, MAC_ADDRESS_CONTROL, val);
+}
+
+static int emac_change_mtu(struct net_device *ndev, int mtu)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+ u32 frame_len;
+
+ if (netif_running(ndev)) {
+ netdev_err(ndev, "must be stopped to change MTU\n");
+ return -EBUSY;
+ }
+
+ frame_len = mtu + ETH_HLEN + ETH_FCS_LEN;
+
+ if (frame_len <= EMAC_DEFAULT_BUFSIZE)
+ priv->dma_buf_sz = EMAC_DEFAULT_BUFSIZE;
+ else if (frame_len <= EMAC_RX_BUF_2K)
+ priv->dma_buf_sz = EMAC_RX_BUF_2K;
+ else
+ priv->dma_buf_sz = EMAC_RX_BUF_4K;
+
+ ndev->mtu = mtu;
+
+ return 0;
+}
+
+static void emac_tx_timeout(struct net_device *ndev, unsigned int txqueue)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+
+ schedule_work(&priv->tx_timeout_task);
+}
+
+static int emac_mii_read(struct mii_bus *bus, int phy_addr, int regnum)
+{
+ struct emac_priv *priv = bus->priv;
+ u32 cmd = 0, val;
+ int ret;
+
+ cmd |= FIELD_PREP(MREGBIT_PHY_ADDRESS, phy_addr);
+ cmd |= FIELD_PREP(MREGBIT_REGISTER_ADDRESS, regnum);
+ cmd |= MREGBIT_START_MDIO_TRANS | MREGBIT_MDIO_READ_WRITE;
+
+ emac_wr(priv, MAC_MDIO_DATA, 0x0);
+ emac_wr(priv, MAC_MDIO_CONTROL, cmd);
+
+ ret = readl_poll_timeout(priv->iobase + MAC_MDIO_CONTROL, val,
+ !(val & MREGBIT_START_MDIO_TRANS), 100, 10000);
+
+ if (ret)
+ return ret;
+
+ val = emac_rd(priv, MAC_MDIO_DATA);
+ return FIELD_GET(MREGBIT_MDIO_DATA, val);
+}
+
+static int emac_mii_write(struct mii_bus *bus, int phy_addr, int regnum,
+ u16 value)
+{
+ struct emac_priv *priv = bus->priv;
+ u32 cmd = 0, val;
+ int ret;
+
+ emac_wr(priv, MAC_MDIO_DATA, value);
+
+ cmd |= FIELD_PREP(MREGBIT_PHY_ADDRESS, phy_addr);
+ cmd |= FIELD_PREP(MREGBIT_REGISTER_ADDRESS, regnum);
+ cmd |= MREGBIT_START_MDIO_TRANS;
+
+ emac_wr(priv, MAC_MDIO_CONTROL, cmd);
+
+ ret = readl_poll_timeout(priv->iobase + MAC_MDIO_CONTROL, val,
+ !(val & MREGBIT_START_MDIO_TRANS), 100, 10000);
+
+ return ret;
+}
+
+static int emac_mdio_init(struct emac_priv *priv)
+{
+ struct device *dev = &priv->pdev->dev;
+ struct device_node *mii_np;
+ struct mii_bus *mii;
+ int ret;
+
+ mii = devm_mdiobus_alloc(dev);
+ if (!mii)
+ return -ENOMEM;
+
+ mii->priv = priv;
+ mii->name = "k1_emac_mii";
+ mii->read = emac_mii_read;
+ mii->write = emac_mii_write;
+ mii->parent = dev;
+ mii->phy_mask = ~0;
+ snprintf(mii->id, MII_BUS_ID_SIZE, "%s", priv->pdev->name);
+
+ mii_np = of_get_available_child_by_name(dev->of_node, "mdio-bus");
+
+ ret = devm_of_mdiobus_register(dev, mii, mii_np);
+ if (ret)
+ dev_err_probe(dev, ret, "Failed to register mdio bus\n");
+
+ of_node_put(mii_np);
+ return ret;
+}
+
+static void emac_set_tx_fc(struct emac_priv *priv, bool enable)
+{
+ u32 val;
+
+ val = emac_rd(priv, MAC_FC_CONTROL);
+
+ FIELD_MODIFY(MREGBIT_FC_GENERATION_ENABLE, &val, enable);
+ FIELD_MODIFY(MREGBIT_AUTO_FC_GENERATION_ENABLE, &val, enable);
+
+ emac_wr(priv, MAC_FC_CONTROL, val);
+}
+
+static void emac_set_rx_fc(struct emac_priv *priv, bool enable)
+{
+ u32 val = emac_rd(priv, MAC_FC_CONTROL);
+
+ FIELD_MODIFY(MREGBIT_FC_DECODE_ENABLE, &val, enable);
+
+ emac_wr(priv, MAC_FC_CONTROL, val);
+}
+
+static void emac_set_fc(struct emac_priv *priv, u8 fc)
+{
+ emac_set_tx_fc(priv, fc & FLOW_CTRL_TX);
+ emac_set_rx_fc(priv, fc & FLOW_CTRL_RX);
+ priv->flow_control = fc;
+}
+
+static void emac_set_fc_autoneg(struct emac_priv *priv)
+{
+ struct phy_device *phydev = priv->ndev->phydev;
+ u32 local_adv, remote_adv;
+ u8 fc;
+
+ local_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising);
+
+ remote_adv = 0;
+
+ if (phydev->pause)
+ remote_adv |= LPA_PAUSE_CAP;
+
+ if (phydev->asym_pause)
+ remote_adv |= LPA_PAUSE_ASYM;
+
+ fc = mii_resolve_flowctrl_fdx(local_adv, remote_adv);
+
+ priv->flow_control_autoneg = true;
+
+ emac_set_fc(priv, fc);
+}
+
+/*
+ * Even though this MAC supports gigabit operation, it only provides 32-bit
+ * statistics counters. The most overflow-prone counters are the "bytes" ones,
+ * which at gigabit overflow about twice a minute.
+ *
+ * Therefore, we maintain the high 32 bits of counters ourselves, incrementing
+ * every time statistics seem to go backwards. Also, update periodically to
+ * catch overflows when we are not otherwise checking the statistics often
+ * enough.
+ */
+
+#define EMAC_STATS_TIMER_PERIOD 20
+
+static int emac_read_stat_cnt(struct emac_priv *priv, u8 cnt, u32 *res,
+ u32 control_reg, u32 high_reg, u32 low_reg)
+{
+ u32 val, high, low;
+ int ret;
+
+ /* The "read" bit is the same for TX and RX */
+
+ val = MREGBIT_START_TX_COUNTER_READ | cnt;
+ emac_wr(priv, control_reg, val);
+ val = emac_rd(priv, control_reg);
+
+ ret = readl_poll_timeout_atomic(priv->iobase + control_reg, val,
+ !(val & MREGBIT_START_TX_COUNTER_READ),
+ 100, 10000);
+
+ if (ret) {
+ netdev_err(priv->ndev, "Read stat timeout\n");
+ return ret;
+ }
+
+ high = emac_rd(priv, high_reg);
+ low = emac_rd(priv, low_reg);
+ *res = high << 16 | lower_16_bits(low);
+
+ return 0;
+}
+
+static int emac_tx_read_stat_cnt(struct emac_priv *priv, u8 cnt, u32 *res)
+{
+ return emac_read_stat_cnt(priv, cnt, res, MAC_TX_STATCTR_CONTROL,
+ MAC_TX_STATCTR_DATA_HIGH,
+ MAC_TX_STATCTR_DATA_LOW);
+}
+
+static int emac_rx_read_stat_cnt(struct emac_priv *priv, u8 cnt, u32 *res)
+{
+ return emac_read_stat_cnt(priv, cnt, res, MAC_RX_STATCTR_CONTROL,
+ MAC_RX_STATCTR_DATA_HIGH,
+ MAC_RX_STATCTR_DATA_LOW);
+}
+
+static void emac_update_counter(u64 *counter, u32 new_low)
+{
+ u32 old_low = lower_32_bits(*counter);
+ u64 high = upper_32_bits(*counter);
+
+ if (old_low > new_low) {
+ /* Overflowed, increment high 32 bits */
+ high++;
+ }
+
+ *counter = (high << 32) | new_low;
+}
+
+static void emac_stats_update(struct emac_priv *priv)
+{
+ u64 *tx_stats_off = priv->tx_stats_off.array;
+ u64 *rx_stats_off = priv->rx_stats_off.array;
+ u64 *tx_stats = priv->tx_stats.array;
+ u64 *rx_stats = priv->rx_stats.array;
+ u32 i, res, offset;
+
+ assert_spin_locked(&priv->stats_lock);
+
+ if (!netif_running(priv->ndev) || !netif_device_present(priv->ndev)) {
+ /* Not up, don't try to update */
+ return;
+ }
+
+ for (i = 0; i < sizeof(priv->tx_stats) / sizeof(*tx_stats); i++) {
+ /*
+ * If reading stats times out, everything is broken and there's
+ * nothing we can do. Reading statistics also can't return an
+ * error, so just return without updating and without
+ * rescheduling.
+ */
+ if (emac_tx_read_stat_cnt(priv, i, &res))
+ return;
+
+ /*
+ * Re-initializing while bringing interface up resets counters
+ * to zero, so to provide continuity, we add the values saved
+ * last time we did emac_down() to the new hardware-provided
+ * value.
+ */
+ offset = lower_32_bits(tx_stats_off[i]);
+ emac_update_counter(&tx_stats[i], res + offset);
+ }
+
+ /* Similar remarks as TX stats */
+ for (i = 0; i < sizeof(priv->rx_stats) / sizeof(*rx_stats); i++) {
+ if (emac_rx_read_stat_cnt(priv, i, &res))
+ return;
+ offset = lower_32_bits(rx_stats_off[i]);
+ emac_update_counter(&rx_stats[i], res + offset);
+ }
+
+ mod_timer(&priv->stats_timer, jiffies + EMAC_STATS_TIMER_PERIOD * HZ);
+}
+
+static void emac_stats_timer(struct timer_list *t)
+{
+ struct emac_priv *priv = timer_container_of(priv, t, stats_timer);
+
+ spin_lock(&priv->stats_lock);
+
+ emac_stats_update(priv);
+
+ spin_unlock(&priv->stats_lock);
+}
+
+static const struct ethtool_rmon_hist_range emac_rmon_hist_ranges[] = {
+ { 64, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1518 },
+ { 1519, 4096 },
+ { /* sentinel */ },
+};
+
+/* Like dev_fetch_dstats(), but we only use tx_drops */
+static u64 emac_get_stat_tx_drops(struct emac_priv *priv)
+{
+ const struct pcpu_dstats *stats;
+ u64 tx_drops, total = 0;
+ unsigned int start;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ stats = per_cpu_ptr(priv->ndev->dstats, cpu);
+ do {
+ start = u64_stats_fetch_begin(&stats->syncp);
+ tx_drops = u64_stats_read(&stats->tx_drops);
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
+
+ total += tx_drops;
+ }
+
+ return total;
+}
+
+static void emac_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *storage)
+{
+ struct emac_priv *priv = netdev_priv(dev);
+ union emac_hw_tx_stats *tx_stats;
+ union emac_hw_rx_stats *rx_stats;
+
+ tx_stats = &priv->tx_stats;
+ rx_stats = &priv->rx_stats;
+
+ /* This is the only software counter */
+ storage->tx_dropped = emac_get_stat_tx_drops(priv);
+
+ spin_lock_bh(&priv->stats_lock);
+
+ emac_stats_update(priv);
+
+ storage->tx_packets = tx_stats->stats.tx_ok_pkts;
+ storage->tx_bytes = tx_stats->stats.tx_ok_bytes;
+ storage->tx_errors = tx_stats->stats.tx_err_pkts;
+
+ storage->rx_packets = rx_stats->stats.rx_ok_pkts;
+ storage->rx_bytes = rx_stats->stats.rx_ok_bytes;
+ storage->rx_errors = rx_stats->stats.rx_err_total_pkts;
+ storage->rx_crc_errors = rx_stats->stats.rx_crc_err_pkts;
+ storage->rx_frame_errors = rx_stats->stats.rx_align_err_pkts;
+ storage->rx_length_errors = rx_stats->stats.rx_len_err_pkts;
+
+ storage->collisions = tx_stats->stats.tx_singleclsn_pkts;
+ storage->collisions += tx_stats->stats.tx_multiclsn_pkts;
+ storage->collisions += tx_stats->stats.tx_excessclsn_pkts;
+
+ storage->rx_missed_errors = rx_stats->stats.rx_drp_fifo_full_pkts;
+ storage->rx_missed_errors += rx_stats->stats.rx_truncate_fifo_full_pkts;
+
+ spin_unlock_bh(&priv->stats_lock);
+}
+
+static void emac_get_rmon_stats(struct net_device *dev,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct emac_priv *priv = netdev_priv(dev);
+ union emac_hw_rx_stats *rx_stats;
+
+ rx_stats = &priv->rx_stats;
+
+ *ranges = emac_rmon_hist_ranges;
+
+ spin_lock_bh(&priv->stats_lock);
+
+ emac_stats_update(priv);
+
+ rmon_stats->undersize_pkts = rx_stats->stats.rx_len_undersize_pkts;
+ rmon_stats->oversize_pkts = rx_stats->stats.rx_len_oversize_pkts;
+ rmon_stats->fragments = rx_stats->stats.rx_len_fragment_pkts;
+ rmon_stats->jabbers = rx_stats->stats.rx_len_jabber_pkts;
+
+ /* Only RX has histogram stats */
+
+ rmon_stats->hist[0] = rx_stats->stats.rx_64_pkts;
+ rmon_stats->hist[1] = rx_stats->stats.rx_65_127_pkts;
+ rmon_stats->hist[2] = rx_stats->stats.rx_128_255_pkts;
+ rmon_stats->hist[3] = rx_stats->stats.rx_256_511_pkts;
+ rmon_stats->hist[4] = rx_stats->stats.rx_512_1023_pkts;
+ rmon_stats->hist[5] = rx_stats->stats.rx_1024_1518_pkts;
+ rmon_stats->hist[6] = rx_stats->stats.rx_1519_plus_pkts;
+
+ spin_unlock_bh(&priv->stats_lock);
+}
+
+static void emac_get_eth_mac_stats(struct net_device *dev,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct emac_priv *priv = netdev_priv(dev);
+ union emac_hw_tx_stats *tx_stats;
+ union emac_hw_rx_stats *rx_stats;
+
+ tx_stats = &priv->tx_stats;
+ rx_stats = &priv->rx_stats;
+
+ spin_lock_bh(&priv->stats_lock);
+
+ emac_stats_update(priv);
+
+ mac_stats->MulticastFramesXmittedOK = tx_stats->stats.tx_multicast_pkts;
+ mac_stats->BroadcastFramesXmittedOK = tx_stats->stats.tx_broadcast_pkts;
+
+ mac_stats->MulticastFramesReceivedOK =
+ rx_stats->stats.rx_multicast_pkts;
+ mac_stats->BroadcastFramesReceivedOK =
+ rx_stats->stats.rx_broadcast_pkts;
+
+ mac_stats->SingleCollisionFrames = tx_stats->stats.tx_singleclsn_pkts;
+ mac_stats->MultipleCollisionFrames = tx_stats->stats.tx_multiclsn_pkts;
+ mac_stats->LateCollisions = tx_stats->stats.tx_lateclsn_pkts;
+ mac_stats->FramesAbortedDueToXSColls =
+ tx_stats->stats.tx_excessclsn_pkts;
+
+ spin_unlock_bh(&priv->stats_lock);
+}
+
+static void emac_get_pause_stats(struct net_device *dev,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct emac_priv *priv = netdev_priv(dev);
+ union emac_hw_tx_stats *tx_stats;
+ union emac_hw_rx_stats *rx_stats;
+
+ tx_stats = &priv->tx_stats;
+ rx_stats = &priv->rx_stats;
+
+ spin_lock_bh(&priv->stats_lock);
+
+ emac_stats_update(priv);
+
+ pause_stats->tx_pause_frames = tx_stats->stats.tx_pause_pkts;
+ pause_stats->rx_pause_frames = rx_stats->stats.rx_pause_pkts;
+
+ spin_unlock_bh(&priv->stats_lock);
+}
+
+/* Other statistics that are not derivable from standard statistics */
+
+#define EMAC_ETHTOOL_STAT(type, name) \
+ { offsetof(type, stats.name) / sizeof(u64), #name }
+
+static const struct emac_ethtool_stats {
+ size_t offset;
+ char str[ETH_GSTRING_LEN];
+} emac_ethtool_rx_stats[] = {
+ EMAC_ETHTOOL_STAT(union emac_hw_rx_stats, rx_drp_fifo_full_pkts),
+ EMAC_ETHTOOL_STAT(union emac_hw_rx_stats, rx_truncate_fifo_full_pkts),
+};
+
+static int emac_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(emac_ethtool_rx_stats);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void emac_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < ARRAY_SIZE(emac_ethtool_rx_stats); i++) {
+ memcpy(data, emac_ethtool_rx_stats[i].str,
+ ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+ break;
+ }
+}
+
+static void emac_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct emac_priv *priv = netdev_priv(dev);
+ u64 *rx_stats = (u64 *)&priv->rx_stats;
+ int i;
+
+ spin_lock_bh(&priv->stats_lock);
+
+ emac_stats_update(priv);
+
+ for (i = 0; i < ARRAY_SIZE(emac_ethtool_rx_stats); i++)
+ data[i] = rx_stats[emac_ethtool_rx_stats[i].offset];
+
+ spin_unlock_bh(&priv->stats_lock);
+}
+
+static int emac_ethtool_get_regs_len(struct net_device *dev)
+{
+ return (EMAC_DMA_REG_CNT + EMAC_MAC_REG_CNT) * sizeof(u32);
+}
+
+static void emac_ethtool_get_regs(struct net_device *dev,
+ struct ethtool_regs *regs, void *space)
+{
+ struct emac_priv *priv = netdev_priv(dev);
+ u32 *reg_space = space;
+ int i;
+
+ regs->version = 1;
+
+ for (i = 0; i < EMAC_DMA_REG_CNT; i++)
+ reg_space[i] = emac_rd(priv, DMA_CONFIGURATION + i * 4);
+
+ for (i = 0; i < EMAC_MAC_REG_CNT; i++)
+ reg_space[i + EMAC_DMA_REG_CNT] =
+ emac_rd(priv, MAC_GLOBAL_CONTROL + i * 4);
+}
+
+static void emac_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct emac_priv *priv = netdev_priv(dev);
+
+ pause->autoneg = priv->flow_control_autoneg;
+ pause->tx_pause = !!(priv->flow_control & FLOW_CTRL_TX);
+ pause->rx_pause = !!(priv->flow_control & FLOW_CTRL_RX);
+}
+
+static int emac_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct emac_priv *priv = netdev_priv(dev);
+ u8 fc = 0;
+
+ priv->flow_control_autoneg = pause->autoneg;
+
+ if (pause->autoneg) {
+ emac_set_fc_autoneg(priv);
+ } else {
+ if (pause->tx_pause)
+ fc |= FLOW_CTRL_TX;
+
+ if (pause->rx_pause)
+ fc |= FLOW_CTRL_RX;
+
+ emac_set_fc(priv, fc);
+ }
+
+ return 0;
+}
+
+static void emac_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strscpy(info->driver, DRIVER_NAME, sizeof(info->driver));
+ info->n_stats = ARRAY_SIZE(emac_ethtool_rx_stats);
+}
+
+static void emac_tx_timeout_task(struct work_struct *work)
+{
+ struct net_device *ndev;
+ struct emac_priv *priv;
+
+ priv = container_of(work, struct emac_priv, tx_timeout_task);
+ ndev = priv->ndev;
+
+ rtnl_lock();
+
+ /* No need to reset if already down */
+ if (!netif_running(ndev)) {
+ rtnl_unlock();
+ return;
+ }
+
+ netdev_err(ndev, "MAC reset due to TX timeout\n");
+
+ netif_trans_update(ndev); /* prevent tx timeout */
+ dev_close(ndev);
+ dev_open(ndev, NULL);
+
+ rtnl_unlock();
+}
+
+static void emac_sw_init(struct emac_priv *priv)
+{
+ priv->dma_buf_sz = EMAC_DEFAULT_BUFSIZE;
+
+ priv->tx_ring.total_cnt = DEFAULT_TX_RING_NUM;
+ priv->rx_ring.total_cnt = DEFAULT_RX_RING_NUM;
+
+ spin_lock_init(&priv->stats_lock);
+
+ INIT_WORK(&priv->tx_timeout_task, emac_tx_timeout_task);
+
+ priv->tx_coal_frames = EMAC_TX_FRAMES;
+ priv->tx_coal_timeout = EMAC_TX_COAL_TIMEOUT;
+
+ timer_setup(&priv->txtimer, emac_tx_coal_timer, 0);
+ timer_setup(&priv->stats_timer, emac_stats_timer, 0);
+}
+
+static irqreturn_t emac_interrupt_handler(int irq, void *dev_id)
+{
+ struct net_device *ndev = (struct net_device *)dev_id;
+ struct emac_priv *priv = netdev_priv(ndev);
+ bool should_schedule = false;
+ u32 clr = 0;
+ u32 status;
+
+ status = emac_rd(priv, DMA_STATUS_IRQ);
+
+ if (status & MREGBIT_TRANSMIT_TRANSFER_DONE_IRQ) {
+ clr |= MREGBIT_TRANSMIT_TRANSFER_DONE_IRQ;
+ should_schedule = true;
+ }
+
+ if (status & MREGBIT_TRANSMIT_DES_UNAVAILABLE_IRQ)
+ clr |= MREGBIT_TRANSMIT_DES_UNAVAILABLE_IRQ;
+
+ if (status & MREGBIT_TRANSMIT_DMA_STOPPED_IRQ)
+ clr |= MREGBIT_TRANSMIT_DMA_STOPPED_IRQ;
+
+ if (status & MREGBIT_RECEIVE_TRANSFER_DONE_IRQ) {
+ clr |= MREGBIT_RECEIVE_TRANSFER_DONE_IRQ;
+ should_schedule = true;
+ }
+
+ if (status & MREGBIT_RECEIVE_DES_UNAVAILABLE_IRQ)
+ clr |= MREGBIT_RECEIVE_DES_UNAVAILABLE_IRQ;
+
+ if (status & MREGBIT_RECEIVE_DMA_STOPPED_IRQ)
+ clr |= MREGBIT_RECEIVE_DMA_STOPPED_IRQ;
+
+ if (status & MREGBIT_RECEIVE_MISSED_FRAME_IRQ)
+ clr |= MREGBIT_RECEIVE_MISSED_FRAME_IRQ;
+
+ if (should_schedule) {
+ if (napi_schedule_prep(&priv->napi)) {
+ emac_disable_interrupt(priv);
+ __napi_schedule_irqoff(&priv->napi);
+ }
+ }
+
+ emac_wr(priv, DMA_STATUS_IRQ, clr);
+
+ return IRQ_HANDLED;
+}
+
+static void emac_configure_tx(struct emac_priv *priv)
+{
+ u32 val;
+
+ /* Set base address */
+ val = (u32)priv->tx_ring.desc_dma_addr;
+ emac_wr(priv, DMA_TRANSMIT_BASE_ADDRESS, val);
+
+ /* Set TX inter-frame gap value, enable transmit */
+ val = emac_rd(priv, MAC_TRANSMIT_CONTROL);
+ val &= ~MREGBIT_IFG_LEN;
+ val |= MREGBIT_TRANSMIT_ENABLE;
+ val |= MREGBIT_TRANSMIT_AUTO_RETRY;
+ emac_wr(priv, MAC_TRANSMIT_CONTROL, val);
+
+ emac_wr(priv, DMA_TRANSMIT_AUTO_POLL_COUNTER, 0x0);
+
+ /* Start TX DMA */
+ val = emac_rd(priv, DMA_CONTROL);
+ val |= MREGBIT_START_STOP_TRANSMIT_DMA;
+ emac_wr(priv, DMA_CONTROL, val);
+}
+
+static void emac_configure_rx(struct emac_priv *priv)
+{
+ u32 val;
+
+ /* Set base address */
+ val = (u32)priv->rx_ring.desc_dma_addr;
+ emac_wr(priv, DMA_RECEIVE_BASE_ADDRESS, val);
+
+ /* Enable receive */
+ val = emac_rd(priv, MAC_RECEIVE_CONTROL);
+ val |= MREGBIT_RECEIVE_ENABLE;
+ val |= MREGBIT_STORE_FORWARD;
+ emac_wr(priv, MAC_RECEIVE_CONTROL, val);
+
+ /* Start RX DMA */
+ val = emac_rd(priv, DMA_CONTROL);
+ val |= MREGBIT_START_STOP_RECEIVE_DMA;
+ emac_wr(priv, DMA_CONTROL, val);
+}
+
+static void emac_adjust_link(struct net_device *dev)
+{
+ struct emac_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev = dev->phydev;
+ u32 ctrl;
+
+ if (phydev->link) {
+ ctrl = emac_rd(priv, MAC_GLOBAL_CONTROL);
+
+ /* Update duplex and speed from PHY */
+
+ FIELD_MODIFY(MREGBIT_FULL_DUPLEX_MODE, &ctrl,
+ phydev->duplex == DUPLEX_FULL);
+
+ ctrl &= ~MREGBIT_SPEED;
+
+ switch (phydev->speed) {
+ case SPEED_1000:
+ ctrl |= MREGBIT_SPEED_1000M;
+ break;
+ case SPEED_100:
+ ctrl |= MREGBIT_SPEED_100M;
+ break;
+ case SPEED_10:
+ ctrl |= MREGBIT_SPEED_10M;
+ break;
+ default:
+ netdev_err(dev, "Unknown speed: %d\n", phydev->speed);
+ phydev->speed = SPEED_UNKNOWN;
+ break;
+ }
+
+ emac_wr(priv, MAC_GLOBAL_CONTROL, ctrl);
+
+ emac_set_fc_autoneg(priv);
+ }
+
+ phy_print_status(phydev);
+}
+
+static void emac_update_delay_line(struct emac_priv *priv)
+{
+ u32 mask = 0, val = 0;
+
+ mask |= EMAC_RX_DLINE_EN;
+ mask |= EMAC_RX_DLINE_STEP_MASK | EMAC_RX_DLINE_CODE_MASK;
+ mask |= EMAC_TX_DLINE_EN;
+ mask |= EMAC_TX_DLINE_STEP_MASK | EMAC_TX_DLINE_CODE_MASK;
+
+ if (phy_interface_mode_is_rgmii(priv->phy_interface)) {
+ val |= EMAC_RX_DLINE_EN;
+ val |= FIELD_PREP(EMAC_RX_DLINE_STEP_MASK,
+ EMAC_DLINE_STEP_15P6);
+ val |= FIELD_PREP(EMAC_RX_DLINE_CODE_MASK, priv->rx_delay);
+
+ val |= EMAC_TX_DLINE_EN;
+ val |= FIELD_PREP(EMAC_TX_DLINE_STEP_MASK,
+ EMAC_DLINE_STEP_15P6);
+ val |= FIELD_PREP(EMAC_TX_DLINE_CODE_MASK, priv->tx_delay);
+ }
+
+ regmap_update_bits(priv->regmap_apmu,
+ priv->regmap_apmu_offset + APMU_EMAC_DLINE_REG,
+ mask, val);
+}
+
+static int emac_phy_connect(struct net_device *ndev)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+ struct device *dev = &priv->pdev->dev;
+ struct phy_device *phydev;
+ struct device_node *np;
+ int ret;
+
+ ret = of_get_phy_mode(dev->of_node, &priv->phy_interface);
+ if (ret) {
+ netdev_err(ndev, "No phy-mode found");
+ return ret;
+ }
+
+ switch (priv->phy_interface) {
+ case PHY_INTERFACE_MODE_RMII:
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ break;
+ default:
+ netdev_err(ndev, "Unsupported PHY interface %s",
+ phy_modes(priv->phy_interface));
+ return -EINVAL;
+ }
+
+ np = of_parse_phandle(dev->of_node, "phy-handle", 0);
+ if (!np && of_phy_is_fixed_link(dev->of_node))
+ np = of_node_get(dev->of_node);
+
+ if (!np) {
+ netdev_err(ndev, "No PHY specified");
+ return -ENODEV;
+ }
+
+ ret = emac_phy_interface_config(priv);
+ if (ret)
+ goto err_node_put;
+
+ phydev = of_phy_connect(ndev, np, &emac_adjust_link, 0,
+ priv->phy_interface);
+ if (!phydev) {
+ netdev_err(ndev, "Could not attach to PHY\n");
+ ret = -ENODEV;
+ goto err_node_put;
+ }
+
+ phy_support_asym_pause(phydev);
+
+ phydev->mac_managed_pm = true;
+
+ emac_update_delay_line(priv);
+
+err_node_put:
+ of_node_put(np);
+ return ret;
+}
+
+static int emac_up(struct emac_priv *priv)
+{
+ struct platform_device *pdev = priv->pdev;
+ struct net_device *ndev = priv->ndev;
+ int ret;
+
+ pm_runtime_get_sync(&pdev->dev);
+
+ ret = emac_phy_connect(ndev);
+ if (ret) {
+ dev_err(&pdev->dev, "emac_phy_connect failed\n");
+ goto err_pm_put;
+ }
+
+ emac_init_hw(priv);
+
+ emac_set_mac_addr(priv, ndev->dev_addr);
+ emac_configure_tx(priv);
+ emac_configure_rx(priv);
+
+ emac_alloc_rx_desc_buffers(priv);
+
+ phy_start(ndev->phydev);
+
+ ret = request_irq(priv->irq, emac_interrupt_handler, IRQF_SHARED,
+ ndev->name, ndev);
+ if (ret) {
+ dev_err(&pdev->dev, "request_irq failed\n");
+ goto err_reset_disconnect_phy;
+ }
+
+ /* Don't enable MAC interrupts */
+ emac_wr(priv, MAC_INTERRUPT_ENABLE, 0x0);
+
+ /* Enable DMA interrupts */
+ emac_wr(priv, DMA_INTERRUPT_ENABLE,
+ MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE |
+ MREGBIT_TRANSMIT_DMA_STOPPED_INTR_ENABLE |
+ MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE |
+ MREGBIT_RECEIVE_DMA_STOPPED_INTR_ENABLE |
+ MREGBIT_RECEIVE_MISSED_FRAME_INTR_ENABLE);
+
+ napi_enable(&priv->napi);
+
+ netif_start_queue(ndev);
+
+ mod_timer(&priv->stats_timer, jiffies);
+
+ return 0;
+
+err_reset_disconnect_phy:
+ emac_reset_hw(priv);
+ phy_disconnect(ndev->phydev);
+
+err_pm_put:
+ pm_runtime_put_sync(&pdev->dev);
+ return ret;
+}
+
+static int emac_down(struct emac_priv *priv)
+{
+ struct platform_device *pdev = priv->pdev;
+ struct net_device *ndev = priv->ndev;
+
+ netif_stop_queue(ndev);
+
+ phy_disconnect(ndev->phydev);
+
+ emac_wr(priv, MAC_INTERRUPT_ENABLE, 0x0);
+ emac_wr(priv, DMA_INTERRUPT_ENABLE, 0x0);
+
+ free_irq(priv->irq, ndev);
+
+ napi_disable(&priv->napi);
+
+ timer_delete_sync(&priv->txtimer);
+ cancel_work_sync(&priv->tx_timeout_task);
+
+ timer_delete_sync(&priv->stats_timer);
+
+ emac_reset_hw(priv);
+
+ /* Update and save current stats, see emac_stats_update() for usage */
+
+ spin_lock_bh(&priv->stats_lock);
+
+ emac_stats_update(priv);
+
+ priv->tx_stats_off = priv->tx_stats;
+ priv->rx_stats_off = priv->rx_stats;
+
+ spin_unlock_bh(&priv->stats_lock);
+
+ pm_runtime_put_sync(&pdev->dev);
+ return 0;
+}
+
+/* Called when net interface is brought up. */
+static int emac_open(struct net_device *ndev)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+ struct device *dev = &priv->pdev->dev;
+ int ret;
+
+ ret = emac_alloc_tx_resources(priv);
+ if (ret) {
+ dev_err(dev, "Cannot allocate TX resources\n");
+ return ret;
+ }
+
+ ret = emac_alloc_rx_resources(priv);
+ if (ret) {
+ dev_err(dev, "Cannot allocate RX resources\n");
+ goto err_free_tx;
+ }
+
+ ret = emac_up(priv);
+ if (ret) {
+ dev_err(dev, "Error when bringing interface up\n");
+ goto err_free_rx;
+ }
+ return 0;
+
+err_free_rx:
+ emac_free_rx_resources(priv);
+err_free_tx:
+ emac_free_tx_resources(priv);
+
+ return ret;
+}
+
+/* Called when interface is brought down. */
+static int emac_stop(struct net_device *ndev)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+
+ emac_down(priv);
+ emac_free_tx_resources(priv);
+ emac_free_rx_resources(priv);
+
+ return 0;
+}
+
+static const struct ethtool_ops emac_ethtool_ops = {
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .nway_reset = phy_ethtool_nway_reset,
+ .get_drvinfo = emac_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+
+ .get_regs = emac_ethtool_get_regs,
+ .get_regs_len = emac_ethtool_get_regs_len,
+
+ .get_rmon_stats = emac_get_rmon_stats,
+ .get_pause_stats = emac_get_pause_stats,
+ .get_eth_mac_stats = emac_get_eth_mac_stats,
+
+ .get_sset_count = emac_get_sset_count,
+ .get_strings = emac_get_strings,
+ .get_ethtool_stats = emac_get_ethtool_stats,
+
+ .get_pauseparam = emac_get_pauseparam,
+ .set_pauseparam = emac_set_pauseparam,
+};
+
+static const struct net_device_ops emac_netdev_ops = {
+ .ndo_open = emac_open,
+ .ndo_stop = emac_stop,
+ .ndo_start_xmit = emac_start_xmit,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = emac_set_mac_address,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
+ .ndo_change_mtu = emac_change_mtu,
+ .ndo_tx_timeout = emac_tx_timeout,
+ .ndo_set_rx_mode = emac_set_rx_mode,
+ .ndo_get_stats64 = emac_get_stats64,
+};
+
+/* Currently we always use 15.6 ps/step for the delay line */
+
+static u32 delay_ps_to_unit(u32 ps)
+{
+ return DIV_ROUND_CLOSEST(ps * 10, 156);
+}
+
+static u32 delay_unit_to_ps(u32 unit)
+{
+ return DIV_ROUND_CLOSEST(unit * 156, 10);
+}
+
+#define EMAC_MAX_DELAY_UNIT FIELD_MAX(EMAC_TX_DLINE_CODE_MASK)
+
+/* Minus one just to be safe from rounding errors */
+#define EMAC_MAX_DELAY_PS (delay_unit_to_ps(EMAC_MAX_DELAY_UNIT - 1))
+
+static int emac_config_dt(struct platform_device *pdev, struct emac_priv *priv)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ u8 mac_addr[ETH_ALEN] = { 0 };
+ int ret;
+
+ priv->iobase = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->iobase))
+ return dev_err_probe(dev, PTR_ERR(priv->iobase),
+ "ioremap failed\n");
+
+ priv->regmap_apmu =
+ syscon_regmap_lookup_by_phandle_args(np, "spacemit,apmu", 1,
+ &priv->regmap_apmu_offset);
+
+ if (IS_ERR(priv->regmap_apmu))
+ return dev_err_probe(dev, PTR_ERR(priv->regmap_apmu),
+ "failed to get syscon\n");
+
+ priv->irq = platform_get_irq(pdev, 0);
+ if (priv->irq < 0)
+ return priv->irq;
+
+ ret = of_get_mac_address(np, mac_addr);
+ if (ret) {
+ if (ret == -EPROBE_DEFER)
+ return dev_err_probe(dev, ret,
+ "Can't get MAC address\n");
+
+ dev_info(&pdev->dev, "Using random MAC address\n");
+ eth_hw_addr_random(priv->ndev);
+ } else {
+ eth_hw_addr_set(priv->ndev, mac_addr);
+ }
+
+ priv->tx_delay = 0;
+ priv->rx_delay = 0;
+
+ of_property_read_u32(np, "tx-internal-delay-ps", &priv->tx_delay);
+ of_property_read_u32(np, "rx-internal-delay-ps", &priv->rx_delay);
+
+ if (priv->tx_delay > EMAC_MAX_DELAY_PS) {
+ dev_err(&pdev->dev,
+ "tx-internal-delay-ps too large: max %d, got %d",
+ EMAC_MAX_DELAY_PS, priv->tx_delay);
+ return -EINVAL;
+ }
+
+ if (priv->rx_delay > EMAC_MAX_DELAY_PS) {
+ dev_err(&pdev->dev,
+ "rx-internal-delay-ps too large: max %d, got %d",
+ EMAC_MAX_DELAY_PS, priv->rx_delay);
+ return -EINVAL;
+ }
+
+ priv->tx_delay = delay_ps_to_unit(priv->tx_delay);
+ priv->rx_delay = delay_ps_to_unit(priv->rx_delay);
+
+ return 0;
+}
+
+static void emac_phy_deregister_fixed_link(void *data)
+{
+ struct device_node *of_node = data;
+
+ of_phy_deregister_fixed_link(of_node);
+}
+
+static int emac_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct reset_control *reset;
+ struct net_device *ndev;
+ struct emac_priv *priv;
+ int ret;
+
+ ndev = devm_alloc_etherdev(dev, sizeof(struct emac_priv));
+ if (!ndev)
+ return -ENOMEM;
+
+ ndev->hw_features = NETIF_F_SG;
+ ndev->features |= ndev->hw_features;
+
+ ndev->max_mtu = EMAC_RX_BUF_4K - (ETH_HLEN + ETH_FCS_LEN);
+ ndev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS;
+
+ priv = netdev_priv(ndev);
+ priv->ndev = ndev;
+ priv->pdev = pdev;
+ platform_set_drvdata(pdev, priv);
+
+ ret = emac_config_dt(pdev, priv);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Configuration failed\n");
+
+ ndev->watchdog_timeo = 5 * HZ;
+ ndev->base_addr = (unsigned long)priv->iobase;
+ ndev->irq = priv->irq;
+
+ ndev->ethtool_ops = &emac_ethtool_ops;
+ ndev->netdev_ops = &emac_netdev_ops;
+
+ devm_pm_runtime_enable(&pdev->dev);
+
+ priv->bus_clk = devm_clk_get_enabled(&pdev->dev, NULL);
+ if (IS_ERR(priv->bus_clk))
+ return dev_err_probe(dev, PTR_ERR(priv->bus_clk),
+ "Failed to get clock\n");
+
+ reset = devm_reset_control_get_optional_exclusive_deasserted(&pdev->dev,
+ NULL);
+ if (IS_ERR(reset))
+ return dev_err_probe(dev, PTR_ERR(reset),
+ "Failed to get reset\n");
+
+ if (of_phy_is_fixed_link(dev->of_node)) {
+ ret = of_phy_register_fixed_link(dev->of_node);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to register fixed-link\n");
+
+ ret = devm_add_action_or_reset(dev,
+ emac_phy_deregister_fixed_link,
+ dev->of_node);
+
+ if (ret) {
+ dev_err(dev, "devm_add_action_or_reset failed\n");
+ return ret;
+ }
+ }
+
+ emac_sw_init(priv);
+
+ ret = emac_mdio_init(priv);
+ if (ret)
+ goto err_timer_delete;
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ ret = devm_register_netdev(dev, ndev);
+ if (ret) {
+ dev_err(dev, "devm_register_netdev failed\n");
+ goto err_timer_delete;
+ }
+
+ netif_napi_add(ndev, &priv->napi, emac_rx_poll);
+ netif_carrier_off(ndev);
+
+ return 0;
+
+err_timer_delete:
+ timer_delete_sync(&priv->txtimer);
+ timer_delete_sync(&priv->stats_timer);
+
+ return ret;
+}
+
+static void emac_remove(struct platform_device *pdev)
+{
+ struct emac_priv *priv = platform_get_drvdata(pdev);
+
+ timer_shutdown_sync(&priv->txtimer);
+ cancel_work_sync(&priv->tx_timeout_task);
+
+ timer_shutdown_sync(&priv->stats_timer);
+
+ emac_reset_hw(priv);
+}
+
+static int emac_resume(struct device *dev)
+{
+ struct emac_priv *priv = dev_get_drvdata(dev);
+ struct net_device *ndev = priv->ndev;
+ int ret;
+
+ ret = clk_prepare_enable(priv->bus_clk);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable bus clock: %d\n", ret);
+ return ret;
+ }
+
+ if (!netif_running(ndev))
+ return 0;
+
+ ret = emac_open(ndev);
+ if (ret) {
+ clk_disable_unprepare(priv->bus_clk);
+ return ret;
+ }
+
+ netif_device_attach(ndev);
+
+ mod_timer(&priv->stats_timer, jiffies);
+
+ return 0;
+}
+
+static int emac_suspend(struct device *dev)
+{
+ struct emac_priv *priv = dev_get_drvdata(dev);
+ struct net_device *ndev = priv->ndev;
+
+ if (!ndev || !netif_running(ndev)) {
+ clk_disable_unprepare(priv->bus_clk);
+ return 0;
+ }
+
+ emac_stop(ndev);
+
+ clk_disable_unprepare(priv->bus_clk);
+ netif_device_detach(ndev);
+ return 0;
+}
+
+static const struct dev_pm_ops emac_pm_ops = {
+ SYSTEM_SLEEP_PM_OPS(emac_suspend, emac_resume)
+};
+
+static const struct of_device_id emac_of_match[] = {
+ { .compatible = "spacemit,k1-emac" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, emac_of_match);
+
+static struct platform_driver emac_driver = {
+ .probe = emac_probe,
+ .remove = emac_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = of_match_ptr(emac_of_match),
+ .pm = &emac_pm_ops,
+ },
+};
+module_platform_driver(emac_driver);
+
+MODULE_DESCRIPTION("SpacemiT K1 Ethernet driver");
+MODULE_AUTHOR("Vivian Wang <wangruikang@iscas.ac.cn>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/spacemit/k1_emac.h b/drivers/net/ethernet/spacemit/k1_emac.h
new file mode 100644
index 000000000000..5a09e946a276
--- /dev/null
+++ b/drivers/net/ethernet/spacemit/k1_emac.h
@@ -0,0 +1,416 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * SpacemiT K1 Ethernet hardware definitions
+ *
+ * Copyright (C) 2023-2025 SpacemiT (Hangzhou) Technology Co. Ltd
+ * Copyright (C) 2025 Vivian Wang <wangruikang@iscas.ac.cn>
+ */
+
+#ifndef _K1_EMAC_H_
+#define _K1_EMAC_H_
+
+#include <linux/stddef.h>
+
+/* APMU syscon registers */
+
+#define APMU_EMAC_CTRL_REG 0x0
+
+#define PHY_INTF_RGMII BIT(2)
+
+/*
+ * Only valid for RMII mode
+ * 0: Ref clock from External PHY
+ * 1: Ref clock from SoC
+ */
+#define REF_CLK_SEL BIT(3)
+
+/*
+ * Function clock select
+ * 0: 208 MHz
+ * 1: 312 MHz
+ */
+#define FUNC_CLK_SEL BIT(4)
+
+/* Only valid for RMII, invert TX clk */
+#define RMII_TX_CLK_SEL BIT(6)
+
+/* Only valid for RMII, invert RX clk */
+#define RMII_RX_CLK_SEL BIT(7)
+
+/*
+ * Only valid for RGMII
+ * 0: TX clk from RX clk
+ * 1: TX clk from SoC
+ */
+#define RGMII_TX_CLK_SEL BIT(8)
+
+#define PHY_IRQ_EN BIT(12)
+#define AXI_SINGLE_ID BIT(13)
+
+#define APMU_EMAC_DLINE_REG 0x4
+
+#define EMAC_RX_DLINE_EN BIT(0)
+#define EMAC_RX_DLINE_STEP_MASK GENMASK(5, 4)
+#define EMAC_RX_DLINE_CODE_MASK GENMASK(15, 8)
+
+#define EMAC_TX_DLINE_EN BIT(16)
+#define EMAC_TX_DLINE_STEP_MASK GENMASK(21, 20)
+#define EMAC_TX_DLINE_CODE_MASK GENMASK(31, 24)
+
+#define EMAC_DLINE_STEP_15P6 0 /* 15.6 ps/step */
+#define EMAC_DLINE_STEP_24P4 1 /* 24.4 ps/step */
+#define EMAC_DLINE_STEP_29P7 2 /* 29.7 ps/step */
+#define EMAC_DLINE_STEP_35P1 3 /* 35.1 ps/step */
+
+/* DMA register set */
+#define DMA_CONFIGURATION 0x0000
+#define DMA_CONTROL 0x0004
+#define DMA_STATUS_IRQ 0x0008
+#define DMA_INTERRUPT_ENABLE 0x000c
+
+#define DMA_TRANSMIT_AUTO_POLL_COUNTER 0x0010
+#define DMA_TRANSMIT_POLL_DEMAND 0x0014
+#define DMA_RECEIVE_POLL_DEMAND 0x0018
+
+#define DMA_TRANSMIT_BASE_ADDRESS 0x001c
+#define DMA_RECEIVE_BASE_ADDRESS 0x0020
+#define DMA_MISSED_FRAME_COUNTER 0x0024
+#define DMA_STOP_FLUSH_COUNTER 0x0028
+
+#define DMA_RECEIVE_IRQ_MITIGATION_CTRL 0x002c
+
+#define DMA_CURRENT_TRANSMIT_DESCRIPTOR_POINTER 0x0030
+#define DMA_CURRENT_TRANSMIT_BUFFER_POINTER 0x0034
+#define DMA_CURRENT_RECEIVE_DESCRIPTOR_POINTER 0x0038
+#define DMA_CURRENT_RECEIVE_BUFFER_POINTER 0x003c
+
+/* MAC Register set */
+#define MAC_GLOBAL_CONTROL 0x0100
+#define MAC_TRANSMIT_CONTROL 0x0104
+#define MAC_RECEIVE_CONTROL 0x0108
+#define MAC_MAXIMUM_FRAME_SIZE 0x010c
+#define MAC_TRANSMIT_JABBER_SIZE 0x0110
+#define MAC_RECEIVE_JABBER_SIZE 0x0114
+#define MAC_ADDRESS_CONTROL 0x0118
+#define MAC_MDIO_CLK_DIV 0x011c
+#define MAC_ADDRESS1_HIGH 0x0120
+#define MAC_ADDRESS1_MED 0x0124
+#define MAC_ADDRESS1_LOW 0x0128
+#define MAC_ADDRESS2_HIGH 0x012c
+#define MAC_ADDRESS2_MED 0x0130
+#define MAC_ADDRESS2_LOW 0x0134
+#define MAC_ADDRESS3_HIGH 0x0138
+#define MAC_ADDRESS3_MED 0x013c
+#define MAC_ADDRESS3_LOW 0x0140
+#define MAC_ADDRESS4_HIGH 0x0144
+#define MAC_ADDRESS4_MED 0x0148
+#define MAC_ADDRESS4_LOW 0x014c
+#define MAC_MULTICAST_HASH_TABLE1 0x0150
+#define MAC_MULTICAST_HASH_TABLE2 0x0154
+#define MAC_MULTICAST_HASH_TABLE3 0x0158
+#define MAC_MULTICAST_HASH_TABLE4 0x015c
+#define MAC_FC_CONTROL 0x0160
+#define MAC_FC_PAUSE_FRAME_GENERATE 0x0164
+#define MAC_FC_SOURCE_ADDRESS_HIGH 0x0168
+#define MAC_FC_SOURCE_ADDRESS_MED 0x016c
+#define MAC_FC_SOURCE_ADDRESS_LOW 0x0170
+#define MAC_FC_DESTINATION_ADDRESS_HIGH 0x0174
+#define MAC_FC_DESTINATION_ADDRESS_MED 0x0178
+#define MAC_FC_DESTINATION_ADDRESS_LOW 0x017c
+#define MAC_FC_PAUSE_TIME_VALUE 0x0180
+#define MAC_FC_HIGH_PAUSE_TIME 0x0184
+#define MAC_FC_LOW_PAUSE_TIME 0x0188
+#define MAC_FC_PAUSE_HIGH_THRESHOLD 0x018c
+#define MAC_FC_PAUSE_LOW_THRESHOLD 0x0190
+#define MAC_MDIO_CONTROL 0x01a0
+#define MAC_MDIO_DATA 0x01a4
+#define MAC_RX_STATCTR_CONTROL 0x01a8
+#define MAC_RX_STATCTR_DATA_HIGH 0x01ac
+#define MAC_RX_STATCTR_DATA_LOW 0x01b0
+#define MAC_TX_STATCTR_CONTROL 0x01b4
+#define MAC_TX_STATCTR_DATA_HIGH 0x01b8
+#define MAC_TX_STATCTR_DATA_LOW 0x01bc
+#define MAC_TRANSMIT_FIFO_ALMOST_FULL 0x01c0
+#define MAC_TRANSMIT_PACKET_START_THRESHOLD 0x01c4
+#define MAC_RECEIVE_PACKET_START_THRESHOLD 0x01c8
+#define MAC_STATUS_IRQ 0x01e0
+#define MAC_INTERRUPT_ENABLE 0x01e4
+
+/* Used for register dump */
+#define EMAC_DMA_REG_CNT 16
+#define EMAC_MAC_REG_CNT 124
+
+/* DMA_CONFIGURATION (0x0000) */
+
+/*
+ * 0-DMA controller in normal operation mode,
+ * 1-DMA controller reset to default state,
+ * clearing all internal state information
+ */
+#define MREGBIT_SOFTWARE_RESET BIT(0)
+
+#define MREGBIT_BURST_1WORD BIT(1)
+#define MREGBIT_BURST_2WORD BIT(2)
+#define MREGBIT_BURST_4WORD BIT(3)
+#define MREGBIT_BURST_8WORD BIT(4)
+#define MREGBIT_BURST_16WORD BIT(5)
+#define MREGBIT_BURST_32WORD BIT(6)
+#define MREGBIT_BURST_64WORD BIT(7)
+#define MREGBIT_BURST_LENGTH GENMASK(7, 1)
+#define MREGBIT_DESCRIPTOR_SKIP_LENGTH GENMASK(12, 8)
+
+/* For Receive and Transmit DMA operate in Big-Endian mode for Descriptors. */
+#define MREGBIT_DESCRIPTOR_BYTE_ORDERING BIT(13)
+
+#define MREGBIT_BIG_LITLE_ENDIAN BIT(14)
+#define MREGBIT_TX_RX_ARBITRATION BIT(15)
+#define MREGBIT_WAIT_FOR_DONE BIT(16)
+#define MREGBIT_STRICT_BURST BIT(17)
+#define MREGBIT_DMA_64BIT_MODE BIT(18)
+
+/* DMA_CONTROL (0x0004) */
+#define MREGBIT_START_STOP_TRANSMIT_DMA BIT(0)
+#define MREGBIT_START_STOP_RECEIVE_DMA BIT(1)
+
+/* DMA_STATUS_IRQ (0x0008) */
+#define MREGBIT_TRANSMIT_TRANSFER_DONE_IRQ BIT(0)
+#define MREGBIT_TRANSMIT_DES_UNAVAILABLE_IRQ BIT(1)
+#define MREGBIT_TRANSMIT_DMA_STOPPED_IRQ BIT(2)
+#define MREGBIT_RECEIVE_TRANSFER_DONE_IRQ BIT(4)
+#define MREGBIT_RECEIVE_DES_UNAVAILABLE_IRQ BIT(5)
+#define MREGBIT_RECEIVE_DMA_STOPPED_IRQ BIT(6)
+#define MREGBIT_RECEIVE_MISSED_FRAME_IRQ BIT(7)
+#define MREGBIT_MAC_IRQ BIT(8)
+#define MREGBIT_TRANSMIT_DMA_STATE GENMASK(18, 16)
+#define MREGBIT_RECEIVE_DMA_STATE GENMASK(23, 20)
+
+/* DMA_INTERRUPT_ENABLE (0x000c) */
+#define MREGBIT_TRANSMIT_TRANSFER_DONE_INTR_ENABLE BIT(0)
+#define MREGBIT_TRANSMIT_DES_UNAVAILABLE_INTR_ENABLE BIT(1)
+#define MREGBIT_TRANSMIT_DMA_STOPPED_INTR_ENABLE BIT(2)
+#define MREGBIT_RECEIVE_TRANSFER_DONE_INTR_ENABLE BIT(4)
+#define MREGBIT_RECEIVE_DES_UNAVAILABLE_INTR_ENABLE BIT(5)
+#define MREGBIT_RECEIVE_DMA_STOPPED_INTR_ENABLE BIT(6)
+#define MREGBIT_RECEIVE_MISSED_FRAME_INTR_ENABLE BIT(7)
+#define MREGBIT_MAC_INTR_ENABLE BIT(8)
+
+/* DMA_RECEIVE_IRQ_MITIGATION_CTRL (0x002c) */
+#define MREGBIT_RECEIVE_IRQ_FRAME_COUNTER_MASK GENMASK(7, 0)
+#define MREGBIT_RECEIVE_IRQ_TIMEOUT_COUNTER_MASK GENMASK(27, 8)
+#define MREGBIT_RECEIVE_IRQ_FRAME_COUNTER_MODE BIT(30)
+#define MREGBIT_RECEIVE_IRQ_MITIGATION_ENABLE BIT(31)
+
+/* MAC_GLOBAL_CONTROL (0x0100) */
+#define MREGBIT_SPEED GENMASK(1, 0)
+#define MREGBIT_SPEED_10M 0x0
+#define MREGBIT_SPEED_100M BIT(0)
+#define MREGBIT_SPEED_1000M BIT(1)
+#define MREGBIT_FULL_DUPLEX_MODE BIT(2)
+#define MREGBIT_RESET_RX_STAT_COUNTERS BIT(3)
+#define MREGBIT_RESET_TX_STAT_COUNTERS BIT(4)
+#define MREGBIT_UNICAST_WAKEUP_MODE BIT(8)
+#define MREGBIT_MAGIC_PACKET_WAKEUP_MODE BIT(9)
+
+/* MAC_TRANSMIT_CONTROL (0x0104) */
+#define MREGBIT_TRANSMIT_ENABLE BIT(0)
+#define MREGBIT_INVERT_FCS BIT(1)
+#define MREGBIT_DISABLE_FCS_INSERT BIT(2)
+#define MREGBIT_TRANSMIT_AUTO_RETRY BIT(3)
+#define MREGBIT_IFG_LEN GENMASK(6, 4)
+#define MREGBIT_PREAMBLE_LENGTH GENMASK(9, 7)
+
+/* MAC_RECEIVE_CONTROL (0x0108) */
+#define MREGBIT_RECEIVE_ENABLE BIT(0)
+#define MREGBIT_DISABLE_FCS_CHECK BIT(1)
+#define MREGBIT_STRIP_FCS BIT(2)
+#define MREGBIT_STORE_FORWARD BIT(3)
+#define MREGBIT_STATUS_FIRST BIT(4)
+#define MREGBIT_PASS_BAD_FRAMES BIT(5)
+#define MREGBIT_ACOOUNT_VLAN BIT(6)
+
+/* MAC_MAXIMUM_FRAME_SIZE (0x010c) */
+#define MREGBIT_MAX_FRAME_SIZE GENMASK(13, 0)
+
+/* MAC_TRANSMIT_JABBER_SIZE (0x0110) */
+#define MREGBIT_TRANSMIT_JABBER_SIZE GENMASK(15, 0)
+
+/* MAC_RECEIVE_JABBER_SIZE (0x0114) */
+#define MREGBIT_RECEIVE_JABBER_SIZE GENMASK(15, 0)
+
+/* MAC_ADDRESS_CONTROL (0x0118) */
+#define MREGBIT_MAC_ADDRESS1_ENABLE BIT(0)
+#define MREGBIT_MAC_ADDRESS2_ENABLE BIT(1)
+#define MREGBIT_MAC_ADDRESS3_ENABLE BIT(2)
+#define MREGBIT_MAC_ADDRESS4_ENABLE BIT(3)
+#define MREGBIT_INVERSE_MAC_ADDRESS1_ENABLE BIT(4)
+#define MREGBIT_INVERSE_MAC_ADDRESS2_ENABLE BIT(5)
+#define MREGBIT_INVERSE_MAC_ADDRESS3_ENABLE BIT(6)
+#define MREGBIT_INVERSE_MAC_ADDRESS4_ENABLE BIT(7)
+#define MREGBIT_PROMISCUOUS_MODE BIT(8)
+
+/* MAC_FC_CONTROL (0x0160) */
+#define MREGBIT_FC_DECODE_ENABLE BIT(0)
+#define MREGBIT_FC_GENERATION_ENABLE BIT(1)
+#define MREGBIT_AUTO_FC_GENERATION_ENABLE BIT(2)
+#define MREGBIT_MULTICAST_MODE BIT(3)
+#define MREGBIT_BLOCK_PAUSE_FRAMES BIT(4)
+
+/* MAC_FC_PAUSE_FRAME_GENERATE (0x0164) */
+#define MREGBIT_GENERATE_PAUSE_FRAME BIT(0)
+
+/* MAC_FC_PAUSE_TIME_VALUE (0x0180) */
+#define MREGBIT_MAC_FC_PAUSE_TIME GENMASK(15, 0)
+
+/* MAC_MDIO_CONTROL (0x01a0) */
+#define MREGBIT_PHY_ADDRESS GENMASK(4, 0)
+#define MREGBIT_REGISTER_ADDRESS GENMASK(9, 5)
+#define MREGBIT_MDIO_READ_WRITE BIT(10)
+#define MREGBIT_START_MDIO_TRANS BIT(15)
+
+/* MAC_MDIO_DATA (0x01a4) */
+#define MREGBIT_MDIO_DATA GENMASK(15, 0)
+
+/* MAC_RX_STATCTR_CONTROL (0x01a8) */
+#define MREGBIT_RX_COUNTER_NUMBER GENMASK(4, 0)
+#define MREGBIT_START_RX_COUNTER_READ BIT(15)
+
+/* MAC_RX_STATCTR_DATA_HIGH (0x01ac) */
+#define MREGBIT_RX_STATCTR_DATA_HIGH GENMASK(15, 0)
+/* MAC_RX_STATCTR_DATA_LOW (0x01b0) */
+#define MREGBIT_RX_STATCTR_DATA_LOW GENMASK(15, 0)
+
+/* MAC_TX_STATCTR_CONTROL (0x01b4) */
+#define MREGBIT_TX_COUNTER_NUMBER GENMASK(4, 0)
+#define MREGBIT_START_TX_COUNTER_READ BIT(15)
+
+/* MAC_TX_STATCTR_DATA_HIGH (0x01b8) */
+#define MREGBIT_TX_STATCTR_DATA_HIGH GENMASK(15, 0)
+/* MAC_TX_STATCTR_DATA_LOW (0x01bc) */
+#define MREGBIT_TX_STATCTR_DATA_LOW GENMASK(15, 0)
+
+/* MAC_TRANSMIT_FIFO_ALMOST_FULL (0x01c0) */
+#define MREGBIT_TX_FIFO_AF GENMASK(13, 0)
+
+/* MAC_TRANSMIT_PACKET_START_THRESHOLD (0x01c4) */
+#define MREGBIT_TX_PACKET_START_THRESHOLD GENMASK(13, 0)
+
+/* MAC_RECEIVE_PACKET_START_THRESHOLD (0x01c8) */
+#define MREGBIT_RX_PACKET_START_THRESHOLD GENMASK(13, 0)
+
+/* MAC_STATUS_IRQ (0x01e0) */
+#define MREGBIT_MAC_UNDERRUN_IRQ BIT(0)
+#define MREGBIT_MAC_JABBER_IRQ BIT(1)
+
+/* MAC_INTERRUPT_ENABLE (0x01e4) */
+#define MREGBIT_MAC_UNDERRUN_INTERRUPT_ENABLE BIT(0)
+#define MREGBIT_JABBER_INTERRUPT_ENABLE BIT(1)
+
+/* RX DMA descriptor */
+
+#define RX_DESC_0_FRAME_PACKET_LENGTH_MASK GENMASK(13, 0)
+#define RX_DESC_0_FRAME_ALIGN_ERR BIT(14)
+#define RX_DESC_0_FRAME_RUNT BIT(15)
+#define RX_DESC_0_FRAME_ETHERNET_TYPE BIT(16)
+#define RX_DESC_0_FRAME_VLAN BIT(17)
+#define RX_DESC_0_FRAME_MULTICAST BIT(18)
+#define RX_DESC_0_FRAME_BROADCAST BIT(19)
+#define RX_DESC_0_FRAME_CRC_ERR BIT(20)
+#define RX_DESC_0_FRAME_MAX_LEN_ERR BIT(21)
+#define RX_DESC_0_FRAME_JABBER_ERR BIT(22)
+#define RX_DESC_0_FRAME_LENGTH_ERR BIT(23)
+#define RX_DESC_0_FRAME_MAC_ADDR1_MATCH BIT(24)
+#define RX_DESC_0_FRAME_MAC_ADDR2_MATCH BIT(25)
+#define RX_DESC_0_FRAME_MAC_ADDR3_MATCH BIT(26)
+#define RX_DESC_0_FRAME_MAC_ADDR4_MATCH BIT(27)
+#define RX_DESC_0_FRAME_PAUSE_CTRL BIT(28)
+#define RX_DESC_0_LAST_DESCRIPTOR BIT(29)
+#define RX_DESC_0_FIRST_DESCRIPTOR BIT(30)
+#define RX_DESC_0_OWN BIT(31)
+
+#define RX_DESC_1_BUFFER_SIZE_1_MASK GENMASK(11, 0)
+#define RX_DESC_1_BUFFER_SIZE_2_MASK GENMASK(23, 12)
+ /* [24] reserved */
+#define RX_DESC_1_SECOND_ADDRESS_CHAINED BIT(25)
+#define RX_DESC_1_END_RING BIT(26)
+ /* [29:27] reserved */
+#define RX_DESC_1_RX_TIMESTAMP BIT(30)
+#define RX_DESC_1_PTP_PKT BIT(31)
+
+/* TX DMA descriptor */
+
+ /* [29:0] unused */
+#define TX_DESC_0_TX_TIMESTAMP BIT(30)
+#define TX_DESC_0_OWN BIT(31)
+
+#define TX_DESC_1_BUFFER_SIZE_1_MASK GENMASK(11, 0)
+#define TX_DESC_1_BUFFER_SIZE_2_MASK GENMASK(23, 12)
+#define TX_DESC_1_FORCE_EOP_ERROR BIT(24)
+#define TX_DESC_1_SECOND_ADDRESS_CHAINED BIT(25)
+#define TX_DESC_1_END_RING BIT(26)
+#define TX_DESC_1_DISABLE_PADDING BIT(27)
+#define TX_DESC_1_ADD_CRC_DISABLE BIT(28)
+#define TX_DESC_1_FIRST_SEGMENT BIT(29)
+#define TX_DESC_1_LAST_SEGMENT BIT(30)
+#define TX_DESC_1_INTERRUPT_ON_COMPLETION BIT(31)
+
+struct emac_desc {
+ u32 desc0;
+ u32 desc1;
+ u32 buffer_addr_1;
+ u32 buffer_addr_2;
+};
+
+/* Keep stats in this order, index used for accessing hardware */
+
+union emac_hw_tx_stats {
+ struct {
+ u64 tx_ok_pkts;
+ u64 tx_total_pkts;
+ u64 tx_ok_bytes;
+ u64 tx_err_pkts;
+ u64 tx_singleclsn_pkts;
+ u64 tx_multiclsn_pkts;
+ u64 tx_lateclsn_pkts;
+ u64 tx_excessclsn_pkts;
+ u64 tx_unicast_pkts;
+ u64 tx_multicast_pkts;
+ u64 tx_broadcast_pkts;
+ u64 tx_pause_pkts;
+ } stats;
+
+ DECLARE_FLEX_ARRAY(u64, array);
+};
+
+union emac_hw_rx_stats {
+ struct {
+ u64 rx_ok_pkts;
+ u64 rx_total_pkts;
+ u64 rx_crc_err_pkts;
+ u64 rx_align_err_pkts;
+ u64 rx_err_total_pkts;
+ u64 rx_ok_bytes;
+ u64 rx_total_bytes;
+ u64 rx_unicast_pkts;
+ u64 rx_multicast_pkts;
+ u64 rx_broadcast_pkts;
+ u64 rx_pause_pkts;
+ u64 rx_len_err_pkts;
+ u64 rx_len_undersize_pkts;
+ u64 rx_len_oversize_pkts;
+ u64 rx_len_fragment_pkts;
+ u64 rx_len_jabber_pkts;
+ u64 rx_64_pkts;
+ u64 rx_65_127_pkts;
+ u64 rx_128_255_pkts;
+ u64 rx_256_511_pkts;
+ u64 rx_512_1023_pkts;
+ u64 rx_1024_1518_pkts;
+ u64 rx_1519_plus_pkts;
+ u64 rx_drp_fifo_full_pkts;
+ u64 rx_truncate_fifo_full_pkts;
+ } stats;
+
+ DECLARE_FLEX_ARRAY(u64, array);
+};
+
+#endif /* _K1_EMAC_H_ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 67fa879b1e52..9507131875b2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -133,15 +133,17 @@ config DWMAC_QCOM_ETHQOS
stmmac device driver.
config DWMAC_RENESAS_GBETH
- tristate "Renesas RZ/V2H(P) GBETH support"
+ tristate "Renesas RZ/V2H(P) GBETH and RZ/T2H, RZ/N2H GMAC support"
default ARCH_RENESAS
depends on OF && (ARCH_RENESAS || COMPILE_TEST)
+ select PCS_RZN1_MIIC
help
- Support for Gigabit Ethernet Interface (GBETH) on Renesas
- RZ/V2H(P) SoCs.
+ Support for Gigabit Ethernet Interface (GBETH)/ Ethernet MAC (GMAC)
+ on Renesas SoCs.
- This selects the Renesas RZ/V2H(P) Soc specific glue layer support
- for the stmmac device driver.
+ This selects Renesas SoC glue layer support for the stmmac device
+ driver. This driver is used for the RZ/V2H(P) family, RZ/T2H and
+ RZ/N2H SoCs.
config DWMAC_ROCKCHIP
tristate "Rockchip dwmac support"
@@ -263,6 +265,18 @@ config DWMAC_SUN8I
stmmac device driver. This driver is used for H3/A83T/A64
EMAC ethernet controller.
+config DWMAC_SUN55I
+ tristate "Allwinner sun55i GMAC200 support"
+ default ARCH_SUNXI
+ depends on OF && (ARCH_SUNXI || COMPILE_TEST)
+ select MDIO_BUS_MUX
+ help
+ Support for Allwinner A523/T527 GMAC200 ethernet controllers.
+
+ This selects Allwinner SoC glue layer support for the
+ stmmac device driver. This driver is used for A523/T527
+ GMAC200 ethernet controller.
+
config DWMAC_THEAD
tristate "T-HEAD dwmac support"
depends on OF && (ARCH_THEAD || COMPILE_TEST)
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index b591d93f8503..51e068e26ce4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -31,6 +31,7 @@ obj-$(CONFIG_DWMAC_STI) += dwmac-sti.o
obj-$(CONFIG_DWMAC_STM32) += dwmac-stm32.o
obj-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o
obj-$(CONFIG_DWMAC_SUN8I) += dwmac-sun8i.o
+obj-$(CONFIG_DWMAC_SUN55I) += dwmac-sun55i.o
obj-$(CONFIG_DWMAC_THEAD) += dwmac-thead.o
obj-$(CONFIG_DWMAC_DWC_QOS_ETH) += dwmac-dwc-qos-eth.o
obj-$(CONFIG_DWMAC_INTEL_PLAT) += dwmac-intel-plat.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index cbffccb3b9af..8f34c9ad457f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -228,6 +228,7 @@ struct stmmac_extra_stats {
unsigned long mtl_est_btrlm;
unsigned long max_sdu_txq_drop[MTL_MAX_TX_QUEUES];
unsigned long mtl_est_txq_hlbf[MTL_MAX_TX_QUEUES];
+ unsigned long mtl_est_txq_hlbs[MTL_MAX_TX_QUEUES];
/* per queue statistics */
struct stmmac_txq_stats txq_stats[MTL_MAX_TX_QUEUES];
struct stmmac_rxq_stats rxq_stats[MTL_MAX_RX_QUEUES];
@@ -602,7 +603,6 @@ struct mac_device_info {
unsigned int mcast_bits_log2;
unsigned int rx_csum;
unsigned int pcs;
- unsigned int pmt;
unsigned int ps;
unsigned int xlgmac;
unsigned int num_vlan;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
index 09ae16e026eb..e8539cad4602 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
@@ -261,7 +261,8 @@ bypass_clk_reset_gpio:
plat_dat->set_clk_tx_rate = stmmac_set_clk_tx_rate;
plat_dat->bsp_priv = eqos;
plat_dat->flags |= STMMAC_FLAG_SPH_DISABLE |
- STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP;
+ STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP |
+ STMMAC_FLAG_USE_PHY_WOL;
return 0;
@@ -330,15 +331,11 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev)
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
- ret = devm_clk_bulk_get_all(&pdev->dev, &plat_dat->clks);
+ ret = devm_clk_bulk_get_all_enabled(&pdev->dev, &plat_dat->clks);
if (ret < 0)
- return dev_err_probe(&pdev->dev, ret, "Failed to retrieve all required clocks\n");
+ return dev_err_probe(&pdev->dev, ret, "Failed to retrieve and enable all required clocks\n");
plat_dat->num_clks = ret;
- ret = clk_bulk_prepare_enable(plat_dat->num_clks, plat_dat->clks);
- if (ret)
- return dev_err_probe(&pdev->dev, ret, "Failed to enable clocks\n");
-
plat_dat->stmmac_clk = stmmac_pltfr_find_clk(plat_dat,
data->stmmac_clk_name);
@@ -346,7 +343,6 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev)
ret = data->probe(pdev, plat_dat, &stmmac_res);
if (ret < 0) {
dev_err_probe(&pdev->dev, ret, "failed to probe subdriver\n");
- clk_bulk_disable_unprepare(plat_dat->num_clks, plat_dat->clks);
return ret;
}
@@ -370,15 +366,11 @@ remove:
static void dwc_eth_dwmac_remove(struct platform_device *pdev)
{
const struct dwc_eth_dwmac_data *data = device_get_match_data(&pdev->dev);
- struct plat_stmmacenet_data *plat_dat = dev_get_platdata(&pdev->dev);
stmmac_dvr_remove(&pdev->dev);
if (data->remove)
data->remove(pdev);
-
- if (plat_dat)
- clk_bulk_disable_unprepare(plat_dat->num_clks, plat_dat->clks);
}
static const struct of_device_id dwc_eth_dwmac_match[] = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
index 889e2bb6f7f5..4268b9987237 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
@@ -49,7 +49,7 @@ struct imx_dwmac_ops {
u32 flags;
bool mac_rgmii_txclk_auto_adj;
- int (*fix_soc_reset)(void *priv, void __iomem *ioaddr);
+ int (*fix_soc_reset)(struct stmmac_priv *priv, void __iomem *ioaddr);
int (*set_intf_mode)(struct plat_stmmacenet_data *plat_dat);
void (*fix_mac_speed)(void *priv, int speed, unsigned int mode);
};
@@ -72,7 +72,7 @@ static int imx8mp_set_intf_mode(struct plat_stmmacenet_data *plat_dat)
struct imx_priv_data *dwmac = plat_dat->bsp_priv;
int val;
- switch (plat_dat->mac_interface) {
+ switch (plat_dat->phy_interface) {
case PHY_INTERFACE_MODE_MII:
val = GPR_ENET_QOS_INTF_SEL_MII;
break;
@@ -88,8 +88,8 @@ static int imx8mp_set_intf_mode(struct plat_stmmacenet_data *plat_dat)
GPR_ENET_QOS_RGMII_EN;
break;
default:
- pr_debug("imx dwmac doesn't support %d interface\n",
- plat_dat->mac_interface);
+ pr_debug("imx dwmac doesn't support %s interface\n",
+ phy_modes(plat_dat->phy_interface));
return -EINVAL;
}
@@ -112,7 +112,7 @@ static int imx93_set_intf_mode(struct plat_stmmacenet_data *plat_dat)
struct imx_priv_data *dwmac = plat_dat->bsp_priv;
int val, ret;
- switch (plat_dat->mac_interface) {
+ switch (plat_dat->phy_interface) {
case PHY_INTERFACE_MODE_MII:
val = MX93_GPR_ENET_QOS_INTF_SEL_MII;
break;
@@ -134,8 +134,8 @@ static int imx93_set_intf_mode(struct plat_stmmacenet_data *plat_dat)
val = MX93_GPR_ENET_QOS_INTF_SEL_RGMII;
break;
default:
- dev_dbg(dwmac->dev, "imx dwmac doesn't support %d interface\n",
- plat_dat->mac_interface);
+ dev_dbg(dwmac->dev, "imx dwmac doesn't support %s interface\n",
+ phy_modes(plat_dat->phy_interface));
return -EINVAL;
}
@@ -197,7 +197,7 @@ static int imx_dwmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
{
struct imx_priv_data *dwmac = bsp_priv;
- interface = dwmac->plat_dat->mac_interface;
+ interface = dwmac->plat_dat->phy_interface;
if (interface == PHY_INTERFACE_MODE_RMII ||
interface == PHY_INTERFACE_MODE_MII)
return 0;
@@ -215,8 +215,8 @@ static void imx_dwmac_fix_speed(void *priv, int speed, unsigned int mode)
plat_dat = dwmac->plat_dat;
if (dwmac->ops->mac_rgmii_txclk_auto_adj ||
- (plat_dat->mac_interface == PHY_INTERFACE_MODE_RMII) ||
- (plat_dat->mac_interface == PHY_INTERFACE_MODE_MII))
+ (plat_dat->phy_interface == PHY_INTERFACE_MODE_RMII) ||
+ (plat_dat->phy_interface == PHY_INTERFACE_MODE_MII))
return;
rate = rgmii_clock(speed);
@@ -265,16 +265,16 @@ static void imx93_dwmac_fix_speed(void *priv, int speed, unsigned int mode)
writel(old_ctrl, dwmac->base_addr + MAC_CTRL_REG);
}
-static int imx_dwmac_mx93_reset(void *priv, void __iomem *ioaddr)
+static int imx_dwmac_mx93_reset(struct stmmac_priv *priv, void __iomem *ioaddr)
{
- struct plat_stmmacenet_data *plat_dat = priv;
+ struct plat_stmmacenet_data *plat_dat = priv->plat;
u32 value = readl(ioaddr + DMA_BUS_MODE);
/* DMA SW reset */
value |= DMA_BUS_MODE_SFT_RESET;
writel(value, ioaddr + DMA_BUS_MODE);
- if (plat_dat->mac_interface == PHY_INTERFACE_MODE_RMII) {
+ if (plat_dat->phy_interface == PHY_INTERFACE_MODE_RMII) {
usleep_range(100, 200);
writel(RMII_RESET_SPEED, ioaddr + MAC_CTRL_REG);
}
@@ -301,6 +301,7 @@ imx_dwmac_parse_dt(struct imx_priv_data *dwmac, struct device *dev)
dwmac->clk_mem = NULL;
if (of_machine_is_compatible("fsl,imx8dxl") ||
+ of_machine_is_compatible("fsl,imx91") ||
of_machine_is_compatible("fsl,imx93")) {
dwmac->clk_mem = devm_clk_get(dev, "mem");
if (IS_ERR(dwmac->clk_mem)) {
@@ -310,9 +311,10 @@ imx_dwmac_parse_dt(struct imx_priv_data *dwmac, struct device *dev)
}
if (of_machine_is_compatible("fsl,imx8mp") ||
+ of_machine_is_compatible("fsl,imx91") ||
of_machine_is_compatible("fsl,imx93")) {
/* Binding doc describes the propety:
- * is required by i.MX8MP, i.MX93.
+ * is required by i.MX8MP, i.MX91, i.MX93.
* is optinoal for i.MX8DXL.
*/
dwmac->intf_regmap =
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c
index 15abe214131f..c1670f6bae14 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c
@@ -90,7 +90,7 @@ static int jz4775_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
struct ingenic_mac *mac = plat_dat->bsp_priv;
unsigned int val;
- switch (plat_dat->mac_interface) {
+ switch (plat_dat->phy_interface) {
case PHY_INTERFACE_MODE_MII:
val = FIELD_PREP(MACPHYC_TXCLK_SEL_MASK, MACPHYC_TXCLK_SEL_INPUT) |
FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_MII);
@@ -119,7 +119,8 @@ static int jz4775_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
break;
default:
- dev_err(mac->dev, "Unsupported interface %d", plat_dat->mac_interface);
+ dev_err(mac->dev, "Unsupported interface %s\n",
+ phy_modes(plat_dat->phy_interface));
return -EINVAL;
}
@@ -131,13 +132,14 @@ static int x1000_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
{
struct ingenic_mac *mac = plat_dat->bsp_priv;
- switch (plat_dat->mac_interface) {
+ switch (plat_dat->phy_interface) {
case PHY_INTERFACE_MODE_RMII:
dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_RMII\n");
break;
default:
- dev_err(mac->dev, "Unsupported interface %d", plat_dat->mac_interface);
+ dev_err(mac->dev, "Unsupported interface %s\n",
+ phy_modes(plat_dat->phy_interface));
return -EINVAL;
}
@@ -150,14 +152,15 @@ static int x1600_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
struct ingenic_mac *mac = plat_dat->bsp_priv;
unsigned int val;
- switch (plat_dat->mac_interface) {
+ switch (plat_dat->phy_interface) {
case PHY_INTERFACE_MODE_RMII:
val = FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_RMII);
dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_RMII\n");
break;
default:
- dev_err(mac->dev, "Unsupported interface %d", plat_dat->mac_interface);
+ dev_err(mac->dev, "Unsupported interface %s\n",
+ phy_modes(plat_dat->phy_interface));
return -EINVAL;
}
@@ -170,7 +173,7 @@ static int x1830_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
struct ingenic_mac *mac = plat_dat->bsp_priv;
unsigned int val;
- switch (plat_dat->mac_interface) {
+ switch (plat_dat->phy_interface) {
case PHY_INTERFACE_MODE_RMII:
val = FIELD_PREP(MACPHYC_MODE_SEL_MASK, MACPHYC_MODE_SEL_RMII) |
FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_RMII);
@@ -178,7 +181,8 @@ static int x1830_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
break;
default:
- dev_err(mac->dev, "Unsupported interface %d", plat_dat->mac_interface);
+ dev_err(mac->dev, "Unsupported interface %s\n",
+ phy_modes(plat_dat->phy_interface));
return -EINVAL;
}
@@ -191,7 +195,7 @@ static int x2000_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
struct ingenic_mac *mac = plat_dat->bsp_priv;
unsigned int val;
- switch (plat_dat->mac_interface) {
+ switch (plat_dat->phy_interface) {
case PHY_INTERFACE_MODE_RMII:
val = FIELD_PREP(MACPHYC_TX_SEL_MASK, MACPHYC_TX_SEL_ORIGIN) |
FIELD_PREP(MACPHYC_RX_SEL_MASK, MACPHYC_RX_SEL_ORIGIN) |
@@ -221,7 +225,8 @@ static int x2000_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
break;
default:
- dev_err(mac->dev, "Unsupported interface %d", plat_dat->mac_interface);
+ dev_err(mac->dev, "Unsupported interface %s\n",
+ phy_modes(plat_dat->phy_interface));
return -EINVAL;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index ea33ae39be6b..e74d00984b88 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -371,9 +371,6 @@ static int intel_crosststamp(ktime_t *device,
u32 acr_value;
int i;
- if (!boot_cpu_has(X86_FEATURE_ART))
- return -EOPNOTSUPP;
-
intel_priv = priv->plat->bsp_priv;
/* Both internal crosstimestamping and external triggered event
@@ -566,7 +563,8 @@ static int intel_mac_finish(struct net_device *ndev,
static void common_default_data(struct plat_stmmacenet_data *plat)
{
- plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
+ /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
+ plat->clk_csr = STMMAC_CSR_20_35M;
plat->has_gmac = 1;
plat->force_sf_dma_mode = 1;
@@ -613,7 +611,7 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
plat->pdev = pdev;
plat->phy_addr = -1;
- plat->clk_csr = 5;
+ plat->clk_csr = STMMAC_CSR_250_300M;
plat->has_gmac = 0;
plat->has_gmac4 = 1;
plat->force_sf_dma_mode = 0;
@@ -755,7 +753,9 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
plat->int_snapshot_num = AUX_SNAPSHOT1;
- plat->crosststamp = intel_crosststamp;
+ if (boot_cpu_has(X86_FEATURE_ART))
+ plat->crosststamp = intel_crosststamp;
+
plat->flags &= ~STMMAC_FLAG_INT_SNAPSHOT_EN;
/* Setup MSI vector offset specific to Intel mGbE controller */
@@ -1231,6 +1231,37 @@ static int stmmac_config_multi_msi(struct pci_dev *pdev,
return 0;
}
+static int intel_eth_pci_suspend(struct device *dev, void *bsp_priv)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int ret;
+
+ ret = pci_save_state(pdev);
+ if (ret)
+ return ret;
+
+ pci_wake_from_d3(pdev, true);
+ pci_set_power_state(pdev, PCI_D3hot);
+ return 0;
+}
+
+static int intel_eth_pci_resume(struct device *dev, void *bsp_priv)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int ret;
+
+ pci_restore_state(pdev);
+ pci_set_power_state(pdev, PCI_D0);
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ pci_set_master(pdev);
+
+ return 0;
+}
+
/**
* intel_eth_pci_probe
*
@@ -1292,6 +1323,9 @@ static int intel_eth_pci_probe(struct pci_dev *pdev,
pci_set_master(pdev);
plat->bsp_priv = intel_priv;
+ plat->suspend = intel_eth_pci_suspend;
+ plat->resume = intel_eth_pci_resume;
+
intel_priv->mdio_adhoc_addr = INTEL_MGBE_ADHOC_ADDR;
intel_priv->crossts_adj = 1;
@@ -1355,44 +1389,6 @@ static void intel_eth_pci_remove(struct pci_dev *pdev)
clk_unregister_fixed_rate(priv->plat->stmmac_clk);
}
-static int __maybe_unused intel_eth_pci_suspend(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- int ret;
-
- ret = stmmac_suspend(dev);
- if (ret)
- return ret;
-
- ret = pci_save_state(pdev);
- if (ret)
- return ret;
-
- pci_wake_from_d3(pdev, true);
- pci_set_power_state(pdev, PCI_D3hot);
- return 0;
-}
-
-static int __maybe_unused intel_eth_pci_resume(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- int ret;
-
- pci_restore_state(pdev);
- pci_set_power_state(pdev, PCI_D0);
-
- ret = pcim_enable_device(pdev);
- if (ret)
- return ret;
-
- pci_set_master(pdev);
-
- return stmmac_resume(dev);
-}
-
-static SIMPLE_DEV_PM_OPS(intel_eth_pm_ops, intel_eth_pci_suspend,
- intel_eth_pci_resume);
-
#define PCI_DEVICE_ID_INTEL_QUARK 0x0937
#define PCI_DEVICE_ID_INTEL_EHL_RGMII1G 0x4b30
#define PCI_DEVICE_ID_INTEL_EHL_SGMII1G 0x4b31
@@ -1442,7 +1438,7 @@ static struct pci_driver intel_eth_pci_driver = {
.probe = intel_eth_pci_probe,
.remove = intel_eth_pci_remove,
.driver = {
- .pm = &intel_eth_pm_ops,
+ .pm = &stmmac_simple_pm_ops,
},
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
index e1591e6217d4..592aa9d636e5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
@@ -90,15 +90,14 @@ static void loongson_default_data(struct pci_dev *pdev,
/* Get bus_id, this can be overwritten later */
plat->bus_id = pci_dev_id(pdev);
- plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
+ /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
+ plat->clk_csr = STMMAC_CSR_20_35M;
plat->has_gmac = 1;
plat->force_sf_dma_mode = 1;
/* Set default value for multicast hash bins */
plat->multicast_filter_bins = 256;
- plat->mac_interface = PHY_INTERFACE_MODE_NA;
-
/* Set default value for unicast filter entries */
plat->unicast_filter_entries = 1;
@@ -509,10 +508,15 @@ static int loongson_dwmac_acpi_config(struct pci_dev *pdev,
}
/* Loongson's DWMAC device may take nearly two seconds to complete DMA reset */
-static int loongson_dwmac_fix_reset(void *priv, void __iomem *ioaddr)
+static int loongson_dwmac_fix_reset(struct stmmac_priv *priv, void __iomem *ioaddr)
{
u32 value = readl(ioaddr + DMA_BUS_MODE);
+ if (value & DMA_BUS_MODE_SFT_RESET) {
+ netdev_err(priv->dev, "the PHY clock is missing\n");
+ return -EINVAL;
+ }
+
value |= DMA_BUS_MODE_SFT_RESET;
writel(value, ioaddr + DMA_BUS_MODE);
@@ -521,6 +525,37 @@ static int loongson_dwmac_fix_reset(void *priv, void __iomem *ioaddr)
10000, 2000000);
}
+static int loongson_dwmac_suspend(struct device *dev, void *bsp_priv)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int ret;
+
+ ret = pci_save_state(pdev);
+ if (ret)
+ return ret;
+
+ pci_disable_device(pdev);
+ pci_wake_from_d3(pdev, true);
+ return 0;
+}
+
+static int loongson_dwmac_resume(struct device *dev, void *bsp_priv)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int ret;
+
+ pci_restore_state(pdev);
+ pci_set_power_state(pdev, PCI_D0);
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ pci_set_master(pdev);
+
+ return 0;
+}
+
static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct plat_stmmacenet_data *plat;
@@ -565,6 +600,8 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
plat->bsp_priv = ld;
plat->setup = loongson_dwmac_setup;
plat->fix_soc_reset = loongson_dwmac_fix_reset;
+ plat->suspend = loongson_dwmac_suspend;
+ plat->resume = loongson_dwmac_resume;
ld->dev = &pdev->dev;
ld->loongson_id = readl(res.addr + GMAC_VERSION) & 0xff;
@@ -621,44 +658,6 @@ static void loongson_dwmac_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-static int __maybe_unused loongson_dwmac_suspend(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- int ret;
-
- ret = stmmac_suspend(dev);
- if (ret)
- return ret;
-
- ret = pci_save_state(pdev);
- if (ret)
- return ret;
-
- pci_disable_device(pdev);
- pci_wake_from_d3(pdev, true);
- return 0;
-}
-
-static int __maybe_unused loongson_dwmac_resume(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- int ret;
-
- pci_restore_state(pdev);
- pci_set_power_state(pdev, PCI_D0);
-
- ret = pci_enable_device(pdev);
- if (ret)
- return ret;
-
- pci_set_master(pdev);
-
- return stmmac_resume(dev);
-}
-
-static SIMPLE_DEV_PM_OPS(loongson_dwmac_pm_ops, loongson_dwmac_suspend,
- loongson_dwmac_resume);
-
static const struct pci_device_id loongson_dwmac_id_table[] = {
{ PCI_DEVICE_DATA(LOONGSON, GMAC1, &loongson_gmac_pci_info) },
{ PCI_DEVICE_DATA(LOONGSON, GMAC2, &loongson_gmac_pci_info) },
@@ -673,7 +672,7 @@ static struct pci_driver loongson_dwmac_driver = {
.probe = loongson_dwmac_probe,
.remove = loongson_dwmac_remove,
.driver = {
- .pm = &loongson_dwmac_pm_ops,
+ .pm = &stmmac_simple_pm_ops,
},
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
index c0c44916f849..2562a6d036a2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
@@ -41,7 +41,6 @@ static int lpc18xx_dwmac_probe(struct platform_device *pdev)
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
- plat_dat->mac_interface = PHY_INTERFACE_MODE_NA;
plat_dat->has_gmac = true;
reg = syscon_regmap_lookup_by_compatible("nxp,lpc1850-creg");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
index 39421d6a34e4..f1b36f0a401d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
@@ -523,7 +523,7 @@ static int mediatek_dwmac_clk_init(struct mediatek_dwmac_plat_data *plat)
return ret;
}
-static int mediatek_dwmac_init(struct platform_device *pdev, void *priv)
+static int mediatek_dwmac_init(struct device *dev, void *priv)
{
struct mediatek_dwmac_plat_data *plat = priv;
const struct mediatek_dwmac_variant *variant = plat->variant;
@@ -532,7 +532,7 @@ static int mediatek_dwmac_init(struct platform_device *pdev, void *priv)
if (variant->dwmac_set_phy_interface) {
ret = variant->dwmac_set_phy_interface(plat);
if (ret) {
- dev_err(plat->dev, "failed to set phy interface, err = %d\n", ret);
+ dev_err(dev, "failed to set phy interface, err = %d\n", ret);
return ret;
}
}
@@ -540,7 +540,7 @@ static int mediatek_dwmac_init(struct platform_device *pdev, void *priv)
if (variant->dwmac_set_delay) {
ret = variant->dwmac_set_delay(plat);
if (ret) {
- dev_err(plat->dev, "failed to set delay value, err = %d\n", ret);
+ dev_err(dev, "failed to set delay value, err = %d\n", ret);
return ret;
}
}
@@ -589,7 +589,7 @@ static int mediatek_dwmac_common_data(struct platform_device *pdev,
plat->maxmtu = ETH_DATA_LEN;
plat->host_dma_width = priv_plat->variant->dma_bit_mask;
plat->bsp_priv = priv_plat;
- plat->init = mediatek_dwmac_init;
+ plat->resume = mediatek_dwmac_init;
plat->clks_config = mediatek_dwmac_clks_config;
plat->safety_feat_cfg = devm_kzalloc(&pdev->dev,
@@ -654,7 +654,7 @@ static int mediatek_dwmac_probe(struct platform_device *pdev)
return PTR_ERR(plat_dat);
mediatek_dwmac_common_data(pdev, plat_dat, priv_plat);
- mediatek_dwmac_init(pdev, priv_plat);
+ mediatek_dwmac_init(&pdev->dev, priv_plat);
ret = mediatek_dwmac_clks_config(priv_plat, true);
if (ret)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c
index df4ca897a60c..bc7bb975803c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c
@@ -16,12 +16,37 @@
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pcs-rzn1-miic.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
+#include <linux/types.h>
#include "stmmac_platform.h"
+/**
+ * struct renesas_gbeth_of_data - OF data for Renesas GBETH
+ *
+ * @clks: Array of clock names
+ * @num_clks: Number of clocks
+ * @stmmac_flags: Flags for the stmmac driver
+ * @handle_reset: Flag to indicate if reset control is
+ * handled by the glue driver or core driver.
+ * @set_clk_tx_rate: Flag to indicate if Tx clock is fixed or
+ * set_clk_tx_rate is needed.
+ * @has_pcs: Flag to indicate if the MAC has a PCS
+ */
+struct renesas_gbeth_of_data {
+ const char * const *clks;
+ u8 num_clks;
+ u32 stmmac_flags;
+ bool handle_reset;
+ bool set_clk_tx_rate;
+ bool has_pcs;
+};
+
struct renesas_gbeth {
+ const struct renesas_gbeth_of_data *of_data;
struct plat_stmmacenet_data *plat_dat;
struct reset_control *rstc;
struct device *dev;
@@ -31,6 +56,41 @@ static const char *const renesas_gbeth_clks[] = {
"tx", "tx-180", "rx", "rx-180",
};
+static const char *const renesas_gmac_clks[] = {
+ "tx",
+};
+
+static int renesas_gmac_pcs_init(struct stmmac_priv *priv)
+{
+ struct device_node *np = priv->device->of_node;
+ struct device_node *pcs_node;
+ struct phylink_pcs *pcs;
+
+ pcs_node = of_parse_phandle(np, "pcs-handle", 0);
+ if (pcs_node) {
+ pcs = miic_create(priv->device, pcs_node);
+ of_node_put(pcs_node);
+ if (IS_ERR(pcs))
+ return PTR_ERR(pcs);
+
+ priv->hw->phylink_pcs = pcs;
+ }
+
+ return 0;
+}
+
+static void renesas_gmac_pcs_exit(struct stmmac_priv *priv)
+{
+ if (priv->hw->phylink_pcs)
+ miic_destroy(priv->hw->phylink_pcs);
+}
+
+static struct phylink_pcs *renesas_gmac_select_pcs(struct stmmac_priv *priv,
+ phy_interface_t interface)
+{
+ return priv->hw->phylink_pcs;
+}
+
static int renesas_gbeth_init(struct platform_device *pdev, void *priv)
{
struct plat_stmmacenet_data *plat_dat;
@@ -70,6 +130,7 @@ static void renesas_gbeth_exit(struct platform_device *pdev, void *priv)
static int renesas_gbeth_probe(struct platform_device *pdev)
{
+ const struct renesas_gbeth_of_data *of_data;
struct plat_stmmacenet_data *plat_dat;
struct stmmac_resources stmmac_res;
struct device *dev = &pdev->dev;
@@ -91,14 +152,17 @@ static int renesas_gbeth_probe(struct platform_device *pdev)
if (!gbeth)
return -ENOMEM;
- plat_dat->num_clks = ARRAY_SIZE(renesas_gbeth_clks);
+ of_data = of_device_get_match_data(&pdev->dev);
+ gbeth->of_data = of_data;
+
+ plat_dat->num_clks = of_data->num_clks;
plat_dat->clks = devm_kcalloc(dev, plat_dat->num_clks,
sizeof(*plat_dat->clks), GFP_KERNEL);
if (!plat_dat->clks)
return -ENOMEM;
for (i = 0; i < plat_dat->num_clks; i++)
- plat_dat->clks[i].id = renesas_gbeth_clks[i];
+ plat_dat->clks[i].id = of_data->clks[i];
err = devm_clk_bulk_get(dev, plat_dat->num_clks, plat_dat->clks);
if (err < 0)
@@ -109,25 +173,49 @@ static int renesas_gbeth_probe(struct platform_device *pdev)
return dev_err_probe(dev, -EINVAL,
"error finding tx clock\n");
- gbeth->rstc = devm_reset_control_get_exclusive(dev, NULL);
- if (IS_ERR(gbeth->rstc))
- return PTR_ERR(gbeth->rstc);
+ if (of_data->handle_reset) {
+ gbeth->rstc = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(gbeth->rstc))
+ return PTR_ERR(gbeth->rstc);
+ }
gbeth->dev = dev;
gbeth->plat_dat = plat_dat;
plat_dat->bsp_priv = gbeth;
- plat_dat->set_clk_tx_rate = stmmac_set_clk_tx_rate;
+ if (of_data->set_clk_tx_rate)
+ plat_dat->set_clk_tx_rate = stmmac_set_clk_tx_rate;
plat_dat->init = renesas_gbeth_init;
plat_dat->exit = renesas_gbeth_exit;
- plat_dat->flags |= STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY |
- STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP |
- STMMAC_FLAG_SPH_DISABLE;
+ plat_dat->flags |= STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP |
+ gbeth->of_data->stmmac_flags;
+ if (of_data->has_pcs) {
+ plat_dat->pcs_init = renesas_gmac_pcs_init;
+ plat_dat->pcs_exit = renesas_gmac_pcs_exit;
+ plat_dat->select_pcs = renesas_gmac_select_pcs;
+ }
return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
}
+static const struct renesas_gbeth_of_data renesas_gbeth_of_data = {
+ .clks = renesas_gbeth_clks,
+ .num_clks = ARRAY_SIZE(renesas_gbeth_clks),
+ .handle_reset = true,
+ .set_clk_tx_rate = true,
+ .stmmac_flags = STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY |
+ STMMAC_FLAG_SPH_DISABLE,
+};
+
+static const struct renesas_gbeth_of_data renesas_gmac_of_data = {
+ .clks = renesas_gmac_clks,
+ .num_clks = ARRAY_SIZE(renesas_gmac_clks),
+ .stmmac_flags = STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY,
+ .has_pcs = true,
+};
+
static const struct of_device_id renesas_gbeth_match[] = {
- { .compatible = "renesas,rzv2h-gbeth", },
+ { .compatible = "renesas,r9a09g077-gbeth", .data = &renesas_gmac_of_data },
+ { .compatible = "renesas,rzv2h-gbeth", .data = &renesas_gbeth_of_data },
{ /* Sentinel */ }
};
MODULE_DEVICE_TABLE(of, renesas_gbeth_match);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 79b92130a03f..51ea0caf16c1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -8,6 +8,7 @@
*/
#include <linux/stmmac.h>
+#include <linux/hw_bitfield.h>
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/phy.h>
@@ -71,7 +72,6 @@ struct rk_priv_data {
phy_interface_t phy_iface;
int id;
struct regulator *regulator;
- bool suspended;
const struct rk_gmac_ops *ops;
bool clk_enabled;
@@ -150,7 +150,7 @@ static int rk_set_clk_mac_speed(struct rk_priv_data *bsp_priv,
}
#define HIWORD_UPDATE(val, mask, shift) \
- ((val) << (shift) | (mask) << ((shift) + 16))
+ (FIELD_PREP_WM16((mask) << (shift), (val)))
#define GRF_BIT(nr) (BIT(nr) | BIT(nr+16))
#define GRF_CLR_BIT(nr) (BIT(nr+16))
@@ -557,9 +557,7 @@ static const struct rk_gmac_ops rk3308_ops = {
#define RK3328_GMAC_RMII_MODE GRF_BIT(9)
#define RK3328_GMAC_RMII_MODE_CLR GRF_CLR_BIT(9)
#define RK3328_GMAC_TXCLK_DLY_ENABLE GRF_BIT(0)
-#define RK3328_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(0)
#define RK3328_GMAC_RXCLK_DLY_ENABLE GRF_BIT(1)
-#define RK3328_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(0)
/* RK3328_GRF_MACPHY_CON1 */
#define RK3328_MACPHY_RMII_MODE GRF_BIT(9)
@@ -1706,6 +1704,28 @@ static int rk_set_clk_tx_rate(void *bsp_priv_, struct clk *clk_tx_i,
return -EINVAL;
}
+static int rk_gmac_suspend(struct device *dev, void *bsp_priv_)
+{
+ struct rk_priv_data *bsp_priv = bsp_priv_;
+
+ /* Keep the PHY up if we use Wake-on-Lan. */
+ if (!device_may_wakeup(dev))
+ rk_gmac_powerdown(bsp_priv);
+
+ return 0;
+}
+
+static int rk_gmac_resume(struct device *dev, void *bsp_priv_)
+{
+ struct rk_priv_data *bsp_priv = bsp_priv_;
+
+ /* The PHY was up for Wake-on-Lan. */
+ if (!device_may_wakeup(dev))
+ rk_gmac_powerup(bsp_priv);
+
+ return 0;
+}
+
static int rk_gmac_probe(struct platform_device *pdev)
{
struct plat_stmmacenet_data *plat_dat;
@@ -1738,6 +1758,8 @@ static int rk_gmac_probe(struct platform_device *pdev)
plat_dat->get_interfaces = rk_get_interfaces;
plat_dat->set_clk_tx_rate = rk_set_clk_tx_rate;
+ plat_dat->suspend = rk_gmac_suspend;
+ plat_dat->resume = rk_gmac_resume;
plat_dat->bsp_priv = rk_gmac_setup(pdev, plat_dat, data);
if (IS_ERR(plat_dat->bsp_priv))
@@ -1765,44 +1787,17 @@ err_gmac_powerdown:
static void rk_gmac_remove(struct platform_device *pdev)
{
- struct rk_priv_data *bsp_priv = get_stmmac_bsp_priv(&pdev->dev);
+ struct stmmac_priv *priv = netdev_priv(platform_get_drvdata(pdev));
+ struct rk_priv_data *bsp_priv = priv->plat->bsp_priv;
stmmac_dvr_remove(&pdev->dev);
rk_gmac_powerdown(bsp_priv);
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int rk_gmac_suspend(struct device *dev)
-{
- struct rk_priv_data *bsp_priv = get_stmmac_bsp_priv(dev);
- int ret = stmmac_suspend(dev);
- /* Keep the PHY up if we use Wake-on-Lan. */
- if (!device_may_wakeup(dev)) {
- rk_gmac_powerdown(bsp_priv);
- bsp_priv->suspended = true;
- }
-
- return ret;
+ if (priv->plat->phy_node && bsp_priv->integrated_phy)
+ clk_put(bsp_priv->clk_phy);
}
-static int rk_gmac_resume(struct device *dev)
-{
- struct rk_priv_data *bsp_priv = get_stmmac_bsp_priv(dev);
-
- /* The PHY was up for Wake-on-Lan. */
- if (bsp_priv->suspended) {
- rk_gmac_powerup(bsp_priv);
- bsp_priv->suspended = false;
- }
-
- return stmmac_resume(dev);
-}
-#endif /* CONFIG_PM_SLEEP */
-
-static SIMPLE_DEV_PM_OPS(rk_gmac_pm_ops, rk_gmac_suspend, rk_gmac_resume);
-
static const struct of_device_id rk_gmac_dwmac_match[] = {
{ .compatible = "rockchip,px30-gmac", .data = &px30_ops },
{ .compatible = "rockchip,rk3128-gmac", .data = &rk3128_ops },
@@ -1828,7 +1823,7 @@ static struct platform_driver rk_gmac_dwmac_driver = {
.remove = rk_gmac_remove,
.driver = {
.name = "rk_gmac-dwmac",
- .pm = &rk_gmac_pm_ops,
+ .pm = &stmmac_simple_pm_ops,
.of_match_table = rk_gmac_dwmac_match,
},
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index 01dd0cf0923c..354f01184e6c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -234,7 +234,7 @@ err_node_put:
static int socfpga_get_plat_phymode(struct socfpga_dwmac *dwmac)
{
- return dwmac->plat_dat->mac_interface;
+ return dwmac->plat_dat->phy_interface;
}
static void socfpga_sgmii_config(struct socfpga_dwmac *dwmac, bool enable)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
index 2013d7477eb7..6938dd2a79b7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
@@ -38,7 +38,7 @@ static int starfive_dwmac_set_mode(struct plat_stmmacenet_data *plat_dat)
unsigned int mode;
int err;
- switch (plat_dat->mac_interface) {
+ switch (plat_dat->phy_interface) {
case PHY_INTERFACE_MODE_RMII:
mode = STARFIVE_DWMAC_PHY_INFT_RMII;
break;
@@ -51,8 +51,8 @@ static int starfive_dwmac_set_mode(struct plat_stmmacenet_data *plat_dat)
break;
default:
- dev_err(dwmac->dev, "unsupported interface %d\n",
- plat_dat->mac_interface);
+ dev_err(dwmac->dev, "unsupported interface %s\n",
+ phy_modes(plat_dat->phy_interface));
return -EINVAL;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
index 1eb16eec9c0d..6c179911ef3f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
@@ -171,7 +171,7 @@ static int stm32mp1_select_ethck_external(struct plat_stmmacenet_data *plat_dat)
{
struct stm32_dwmac *dwmac = plat_dat->bsp_priv;
- switch (plat_dat->mac_interface) {
+ switch (plat_dat->phy_interface) {
case PHY_INTERFACE_MODE_MII:
dwmac->enable_eth_ck = dwmac->ext_phyclk;
return 0;
@@ -193,7 +193,7 @@ static int stm32mp1_select_ethck_external(struct plat_stmmacenet_data *plat_dat)
default:
dwmac->enable_eth_ck = false;
dev_err(dwmac->dev, "Mode %s not supported",
- phy_modes(plat_dat->mac_interface));
+ phy_modes(plat_dat->phy_interface));
return -EINVAL;
}
}
@@ -206,7 +206,7 @@ static int stm32mp1_validate_ethck_rate(struct plat_stmmacenet_data *plat_dat)
if (!dwmac->enable_eth_ck)
return 0;
- switch (plat_dat->mac_interface) {
+ switch (plat_dat->phy_interface) {
case PHY_INTERFACE_MODE_MII:
case PHY_INTERFACE_MODE_GMII:
if (clk_rate == ETH_CK_F_25M)
@@ -228,7 +228,7 @@ static int stm32mp1_validate_ethck_rate(struct plat_stmmacenet_data *plat_dat)
}
dev_err(dwmac->dev, "Mode %s does not match eth-ck frequency %d Hz",
- phy_modes(plat_dat->mac_interface), clk_rate);
+ phy_modes(plat_dat->phy_interface), clk_rate);
return -EINVAL;
}
@@ -238,7 +238,7 @@ static int stm32mp1_configure_pmcr(struct plat_stmmacenet_data *plat_dat)
u32 reg = dwmac->mode_reg;
int val = 0;
- switch (plat_dat->mac_interface) {
+ switch (plat_dat->phy_interface) {
case PHY_INTERFACE_MODE_MII:
/*
* STM32MP15xx supports both MII and GMII, STM32MP13xx MII only.
@@ -269,12 +269,12 @@ static int stm32mp1_configure_pmcr(struct plat_stmmacenet_data *plat_dat)
break;
default:
dev_err(dwmac->dev, "Mode %s not supported",
- phy_modes(plat_dat->mac_interface));
+ phy_modes(plat_dat->phy_interface));
/* Do not manage others interfaces */
return -EINVAL;
}
- dev_dbg(dwmac->dev, "Mode %s", phy_modes(plat_dat->mac_interface));
+ dev_dbg(dwmac->dev, "Mode %s", phy_modes(plat_dat->phy_interface));
/* Shift value at correct ethernet MAC offset in SYSCFG_PMCSETR */
val <<= ffs(dwmac->mode_mask) - ffs(SYSCFG_MP1_ETH_MASK);
@@ -294,7 +294,7 @@ static int stm32mp2_configure_syscfg(struct plat_stmmacenet_data *plat_dat)
u32 reg = dwmac->mode_reg;
int val = 0;
- switch (plat_dat->mac_interface) {
+ switch (plat_dat->phy_interface) {
case PHY_INTERFACE_MODE_MII:
/* ETH_REF_CLK_SEL bit in SYSCFG register is not applicable in MII mode */
break;
@@ -319,12 +319,12 @@ static int stm32mp2_configure_syscfg(struct plat_stmmacenet_data *plat_dat)
break;
default:
dev_err(dwmac->dev, "Mode %s not supported",
- phy_modes(plat_dat->mac_interface));
+ phy_modes(plat_dat->phy_interface));
/* Do not manage others interfaces */
return -EINVAL;
}
- dev_dbg(dwmac->dev, "Mode %s", phy_modes(plat_dat->mac_interface));
+ dev_dbg(dwmac->dev, "Mode %s", phy_modes(plat_dat->phy_interface));
/* Select PTP (IEEE1588) clock selection from RCC (ck_ker_ethxptp) */
val |= SYSCFG_ETHCR_ETH_PTP_CLK_SEL;
@@ -359,7 +359,7 @@ static int stm32mcu_set_mode(struct plat_stmmacenet_data *plat_dat)
u32 reg = dwmac->mode_reg;
int val;
- switch (plat_dat->mac_interface) {
+ switch (plat_dat->phy_interface) {
case PHY_INTERFACE_MODE_MII:
val = SYSCFG_MCU_ETH_SEL_MII;
break;
@@ -368,12 +368,12 @@ static int stm32mcu_set_mode(struct plat_stmmacenet_data *plat_dat)
break;
default:
dev_err(dwmac->dev, "Mode %s not supported",
- phy_modes(plat_dat->mac_interface));
+ phy_modes(plat_dat->phy_interface));
/* Do not manage others interfaces */
return -EINVAL;
}
- dev_dbg(dwmac->dev, "Mode %s", phy_modes(plat_dat->mac_interface));
+ dev_dbg(dwmac->dev, "Mode %s", phy_modes(plat_dat->phy_interface));
return regmap_update_bits(dwmac->regmap, reg,
SYSCFG_MCU_ETH_MASK, val << 23);
@@ -498,6 +498,26 @@ static int stm32mp1_parse_data(struct stm32_dwmac *dwmac,
return err;
}
+static int stm32_dwmac_suspend(struct device *dev, void *bsp_priv)
+{
+ struct stm32_dwmac *dwmac = bsp_priv;
+
+ stm32_dwmac_clk_disable(dwmac);
+
+ return dwmac->ops->suspend ? dwmac->ops->suspend(dwmac) : 0;
+}
+
+static int stm32_dwmac_resume(struct device *dev, void *bsp_priv)
+{
+ struct stmmac_priv *priv = netdev_priv(dev_get_drvdata(dev));
+ struct stm32_dwmac *dwmac = bsp_priv;
+
+ if (dwmac->ops->resume)
+ dwmac->ops->resume(dwmac);
+
+ return stm32_dwmac_init(priv->plat);
+}
+
static int stm32_dwmac_probe(struct platform_device *pdev)
{
struct plat_stmmacenet_data *plat_dat;
@@ -535,6 +555,8 @@ static int stm32_dwmac_probe(struct platform_device *pdev)
plat_dat->flags |= STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP;
plat_dat->bsp_priv = dwmac;
+ plat_dat->suspend = stm32_dwmac_suspend;
+ plat_dat->resume = stm32_dwmac_resume;
ret = stm32_dwmac_init(plat_dat);
if (ret)
@@ -600,50 +622,6 @@ static void stm32mp1_resume(struct stm32_dwmac *dwmac)
clk_disable_unprepare(dwmac->clk_ethstp);
}
-#ifdef CONFIG_PM_SLEEP
-static int stm32_dwmac_suspend(struct device *dev)
-{
- struct net_device *ndev = dev_get_drvdata(dev);
- struct stmmac_priv *priv = netdev_priv(ndev);
- struct stm32_dwmac *dwmac = priv->plat->bsp_priv;
-
- int ret;
-
- ret = stmmac_suspend(dev);
- if (ret)
- return ret;
-
- stm32_dwmac_clk_disable(dwmac);
-
- if (dwmac->ops->suspend)
- ret = dwmac->ops->suspend(dwmac);
-
- return ret;
-}
-
-static int stm32_dwmac_resume(struct device *dev)
-{
- struct net_device *ndev = dev_get_drvdata(dev);
- struct stmmac_priv *priv = netdev_priv(ndev);
- struct stm32_dwmac *dwmac = priv->plat->bsp_priv;
- int ret;
-
- if (dwmac->ops->resume)
- dwmac->ops->resume(dwmac);
-
- ret = stm32_dwmac_init(priv->plat);
- if (ret)
- return ret;
-
- ret = stmmac_resume(dev);
-
- return ret;
-}
-#endif /* CONFIG_PM_SLEEP */
-
-static SIMPLE_DEV_PM_OPS(stm32_dwmac_pm_ops,
- stm32_dwmac_suspend, stm32_dwmac_resume);
-
static struct stm32_ops stm32mcu_dwmac_data = {
.set_mode = stm32mcu_set_mode
};
@@ -691,7 +669,7 @@ static struct platform_driver stm32_dwmac_driver = {
.remove = stm32_dwmac_remove,
.driver = {
.name = "stm32-dwmac",
- .pm = &stm32_dwmac_pm_ops,
+ .pm = &stmmac_simple_pm_ops,
.of_match_table = stm32_dwmac_match,
},
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun55i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun55i.c
new file mode 100644
index 000000000000..862df173d963
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun55i.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * dwmac-sun55i.c - Allwinner sun55i GMAC200 specific glue layer
+ *
+ * Copyright (C) 2025 Chen-Yu Tsai <wens@csie.org>
+ *
+ * syscon parts taken from dwmac-sun8i.c, which is
+ *
+ * Copyright (C) 2017 Corentin Labbe <clabbe.montjoie@gmail.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/stmmac.h>
+
+#include "stmmac.h"
+#include "stmmac_platform.h"
+
+#define SYSCON_REG 0x34
+
+/* RMII specific bits */
+#define SYSCON_RMII_EN BIT(13) /* 1: enable RMII (overrides EPIT) */
+/* Generic system control EMAC_CLK bits */
+#define SYSCON_ETXDC_MASK GENMASK(12, 10)
+#define SYSCON_ERXDC_MASK GENMASK(9, 5)
+/* EMAC PHY Interface Type */
+#define SYSCON_EPIT BIT(2) /* 1: RGMII, 0: MII */
+#define SYSCON_ETCS_MASK GENMASK(1, 0)
+#define SYSCON_ETCS_MII 0x0
+#define SYSCON_ETCS_EXT_GMII 0x1
+#define SYSCON_ETCS_INT_GMII 0x2
+
+static int sun55i_gmac200_set_syscon(struct device *dev,
+ struct plat_stmmacenet_data *plat)
+{
+ struct device_node *node = dev->of_node;
+ struct regmap *regmap;
+ u32 val, reg = 0;
+ int ret;
+
+ regmap = syscon_regmap_lookup_by_phandle(node, "syscon");
+ if (IS_ERR(regmap))
+ return dev_err_probe(dev, PTR_ERR(regmap), "Unable to map syscon\n");
+
+ if (!of_property_read_u32(node, "tx-internal-delay-ps", &val)) {
+ if (val % 100)
+ return dev_err_probe(dev, -EINVAL,
+ "tx-delay must be a multiple of 100ps\n");
+ val /= 100;
+ dev_dbg(dev, "set tx-delay to %x\n", val);
+ if (!FIELD_FIT(SYSCON_ETXDC_MASK, val))
+ return dev_err_probe(dev, -EINVAL,
+ "TX clock delay exceeds maximum (%u00ps > %lu00ps)\n",
+ val, FIELD_MAX(SYSCON_ETXDC_MASK));
+
+ reg |= FIELD_PREP(SYSCON_ETXDC_MASK, val);
+ }
+
+ if (!of_property_read_u32(node, "rx-internal-delay-ps", &val)) {
+ if (val % 100)
+ return dev_err_probe(dev, -EINVAL,
+ "rx-delay must be a multiple of 100ps\n");
+ val /= 100;
+ dev_dbg(dev, "set rx-delay to %x\n", val);
+ if (!FIELD_FIT(SYSCON_ERXDC_MASK, val))
+ return dev_err_probe(dev, -EINVAL,
+ "RX clock delay exceeds maximum (%u00ps > %lu00ps)\n",
+ val, FIELD_MAX(SYSCON_ERXDC_MASK));
+
+ reg |= FIELD_PREP(SYSCON_ERXDC_MASK, val);
+ }
+
+ switch (plat->phy_interface) {
+ case PHY_INTERFACE_MODE_MII:
+ /* default */
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ reg |= SYSCON_EPIT | SYSCON_ETCS_INT_GMII;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ reg |= SYSCON_RMII_EN;
+ break;
+ default:
+ return dev_err_probe(dev, -EINVAL, "Unsupported interface mode: %s",
+ phy_modes(plat->phy_interface));
+ }
+
+ ret = regmap_write(regmap, SYSCON_REG, reg);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to write to syscon\n");
+
+ return 0;
+}
+
+static int sun55i_gmac200_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ struct device *dev = &pdev->dev;
+ struct clk *clk;
+ int ret;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
+
+ /* BSP disables it */
+ plat_dat->flags |= STMMAC_FLAG_SPH_DISABLE;
+ plat_dat->host_dma_width = 32;
+
+ ret = sun55i_gmac200_set_syscon(dev, plat_dat);
+ if (ret)
+ return ret;
+
+ clk = devm_clk_get_enabled(dev, "mbus");
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk),
+ "Failed to get or enable MBUS clock\n");
+
+ ret = devm_regulator_get_enable_optional(dev, "phy");
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get or enable PHY supply\n");
+
+ return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
+}
+
+static const struct of_device_id sun55i_gmac200_match[] = {
+ { .compatible = "allwinner,sun55i-a523-gmac200" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sun55i_gmac200_match);
+
+static struct platform_driver sun55i_gmac200_driver = {
+ .probe = sun55i_gmac200_probe,
+ .driver = {
+ .name = "dwmac-sun55i",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = sun55i_gmac200_match,
+ },
+};
+module_platform_driver(sun55i_gmac200_driver);
+
+MODULE_AUTHOR("Chen-Yu Tsai <wens@csie.org>");
+MODULE_DESCRIPTION("Allwinner sun55i GMAC200 specific glue layer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 2796dc426943..5d871b2cd111 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -31,10 +31,6 @@
*/
/* struct emac_variant - Describe dwmac-sun8i hardware variant
- * @default_syscon_value: The default value of the EMAC register in syscon
- * This value is used for disabling properly EMAC
- * and used as a good starting value in case of the
- * boot process(uboot) leave some stuff.
* @syscon_field reg_field for the syscon's gmac register
* @soc_has_internal_phy: Does the MAC embed an internal PHY
* @support_mii: Does the MAC handle MII
@@ -48,7 +44,6 @@
* value of zero indicates this is not supported.
*/
struct emac_variant {
- u32 default_syscon_value;
const struct reg_field *syscon_field;
bool soc_has_internal_phy;
bool support_mii;
@@ -94,7 +89,6 @@ static const struct reg_field sun8i_ccu_reg_field = {
};
static const struct emac_variant emac_variant_h3 = {
- .default_syscon_value = 0x58000,
.syscon_field = &sun8i_syscon_reg_field,
.soc_has_internal_phy = true,
.support_mii = true,
@@ -105,14 +99,12 @@ static const struct emac_variant emac_variant_h3 = {
};
static const struct emac_variant emac_variant_v3s = {
- .default_syscon_value = 0x38000,
.syscon_field = &sun8i_syscon_reg_field,
.soc_has_internal_phy = true,
.support_mii = true
};
static const struct emac_variant emac_variant_a83t = {
- .default_syscon_value = 0,
.syscon_field = &sun8i_syscon_reg_field,
.soc_has_internal_phy = false,
.support_mii = true,
@@ -122,7 +114,6 @@ static const struct emac_variant emac_variant_a83t = {
};
static const struct emac_variant emac_variant_r40 = {
- .default_syscon_value = 0,
.syscon_field = &sun8i_ccu_reg_field,
.support_mii = true,
.support_rgmii = true,
@@ -130,7 +121,6 @@ static const struct emac_variant emac_variant_r40 = {
};
static const struct emac_variant emac_variant_a64 = {
- .default_syscon_value = 0,
.syscon_field = &sun8i_syscon_reg_field,
.soc_has_internal_phy = false,
.support_mii = true,
@@ -141,7 +131,6 @@ static const struct emac_variant emac_variant_a64 = {
};
static const struct emac_variant emac_variant_h6 = {
- .default_syscon_value = 0x50000,
.syscon_field = &sun8i_syscon_reg_field,
/* The "Internal PHY" of H6 is not on the die. It's on the
* co-packaged AC200 chip instead.
@@ -933,25 +922,11 @@ static int sun8i_dwmac_set_syscon(struct device *dev,
struct sunxi_priv_data *gmac = plat->bsp_priv;
struct device_node *node = dev->of_node;
int ret;
- u32 reg, val;
-
- ret = regmap_field_read(gmac->regmap_field, &val);
- if (ret) {
- dev_err(dev, "Fail to read from regmap field.\n");
- return ret;
- }
-
- reg = gmac->variant->default_syscon_value;
- if (reg != val)
- dev_warn(dev,
- "Current syscon value is not the default %x (expect %x)\n",
- val, reg);
+ u32 reg = 0, val;
if (gmac->variant->soc_has_internal_phy) {
if (of_property_read_bool(node, "allwinner,leds-active-low"))
reg |= H3_EPHY_LED_POL;
- else
- reg &= ~H3_EPHY_LED_POL;
/* Force EPHY xtal frequency to 24MHz. */
reg |= H3_EPHY_CLK_SEL;
@@ -965,11 +940,6 @@ static int sun8i_dwmac_set_syscon(struct device *dev,
* address. No need to mask it again.
*/
reg |= ret << H3_EPHY_ADDR_SHIFT;
- } else {
- /* For SoCs without internal PHY the PHY selection bit should be
- * set to 0 (external PHY).
- */
- reg &= ~H3_EPHY_SELECT;
}
if (!of_property_read_u32(node, "allwinner,tx-delay-ps", &val)) {
@@ -980,8 +950,6 @@ static int sun8i_dwmac_set_syscon(struct device *dev,
val /= 100;
dev_dbg(dev, "set tx-delay to %x\n", val);
if (val <= gmac->variant->tx_delay_max) {
- reg &= ~(gmac->variant->tx_delay_max <<
- SYSCON_ETXDC_SHIFT);
reg |= (val << SYSCON_ETXDC_SHIFT);
} else {
dev_err(dev, "Invalid TX clock delay: %d\n",
@@ -998,8 +966,6 @@ static int sun8i_dwmac_set_syscon(struct device *dev,
val /= 100;
dev_dbg(dev, "set rx-delay to %x\n", val);
if (val <= gmac->variant->rx_delay_max) {
- reg &= ~(gmac->variant->rx_delay_max <<
- SYSCON_ERXDC_SHIFT);
reg |= (val << SYSCON_ERXDC_SHIFT);
} else {
dev_err(dev, "Invalid RX clock delay: %d\n",
@@ -1008,12 +974,7 @@ static int sun8i_dwmac_set_syscon(struct device *dev,
}
}
- /* Clear interface mode bits */
- reg &= ~(SYSCON_ETCS_MASK | SYSCON_EPIT);
- if (gmac->variant->support_rmii)
- reg &= ~SYSCON_RMII_EN;
-
- switch (plat->mac_interface) {
+ switch (plat->phy_interface) {
case PHY_INTERFACE_MODE_MII:
/* default */
break;
@@ -1028,7 +989,7 @@ static int sun8i_dwmac_set_syscon(struct device *dev,
break;
default:
dev_err(dev, "Unsupported interface mode: %s",
- phy_modes(plat->mac_interface));
+ phy_modes(plat->phy_interface));
return -EINVAL;
}
@@ -1039,9 +1000,9 @@ static int sun8i_dwmac_set_syscon(struct device *dev,
static void sun8i_dwmac_unset_syscon(struct sunxi_priv_data *gmac)
{
- u32 reg = gmac->variant->default_syscon_value;
-
- regmap_field_write(gmac->regmap_field, reg);
+ if (gmac->variant->soc_has_internal_phy)
+ regmap_field_write(gmac->regmap_field,
+ (H3_EPHY_SHUTDOWN | H3_EPHY_SELECT));
}
static void sun8i_dwmac_exit(struct platform_device *pdev, void *priv)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c
index c72ee759aae5..a3378046b061 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c
@@ -56,7 +56,7 @@ static int thead_dwmac_set_phy_if(struct plat_stmmacenet_data *plat)
struct thead_dwmac *dwmac = plat->bsp_priv;
u32 phyif;
- switch (plat->mac_interface) {
+ switch (plat->phy_interface) {
case PHY_INTERFACE_MODE_MII:
phyif = PHY_INTF_MII_GMII;
break;
@@ -67,8 +67,8 @@ static int thead_dwmac_set_phy_if(struct plat_stmmacenet_data *plat)
phyif = PHY_INTF_RGMII;
break;
default:
- dev_err(dwmac->dev, "unsupported phy interface %d\n",
- plat->mac_interface);
+ dev_err(dwmac->dev, "unsupported phy interface %s\n",
+ phy_modes(plat->phy_interface));
return -EINVAL;
}
@@ -81,7 +81,7 @@ static int thead_dwmac_set_txclk_dir(struct plat_stmmacenet_data *plat)
struct thead_dwmac *dwmac = plat->bsp_priv;
u32 txclk_dir;
- switch (plat->mac_interface) {
+ switch (plat->phy_interface) {
case PHY_INTERFACE_MODE_MII:
txclk_dir = TXCLK_DIR_INPUT;
break;
@@ -92,8 +92,8 @@ static int thead_dwmac_set_txclk_dir(struct plat_stmmacenet_data *plat)
txclk_dir = TXCLK_DIR_OUTPUT;
break;
default:
- dev_err(dwmac->dev, "unsupported phy interface %d\n",
- plat->mac_interface);
+ dev_err(dwmac->dev, "unsupported phy interface %s\n",
+ phy_modes(plat->phy_interface));
return -EINVAL;
}
@@ -112,7 +112,7 @@ static int thead_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
plat = dwmac->plat;
- switch (plat->mac_interface) {
+ switch (plat->phy_interface) {
/* For MII, rxc/txc is provided by phy */
case PHY_INTERFACE_MODE_MII:
return 0;
@@ -143,8 +143,8 @@ static int thead_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
return 0;
default:
- dev_err(dwmac->dev, "unsupported phy interface %d\n",
- plat->mac_interface);
+ dev_err(dwmac->dev, "unsupported phy interface %s\n",
+ phy_modes(plat->phy_interface));
return -EINVAL;
}
}
@@ -152,9 +152,9 @@ static int thead_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
static int thead_dwmac_enable_clk(struct plat_stmmacenet_data *plat)
{
struct thead_dwmac *dwmac = plat->bsp_priv;
- u32 reg;
+ u32 reg, div;
- switch (plat->mac_interface) {
+ switch (plat->phy_interface) {
case PHY_INTERFACE_MODE_MII:
reg = GMAC_RX_CLK_EN | GMAC_TX_CLK_EN;
break;
@@ -164,14 +164,21 @@ static int thead_dwmac_enable_clk(struct plat_stmmacenet_data *plat)
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_TXID:
/* use pll */
+ div = clk_get_rate(plat->stmmac_clk) / rgmii_clock(SPEED_1000);
+ reg = FIELD_PREP(GMAC_PLLCLK_DIV_EN, 1) |
+ FIELD_PREP(GMAC_PLLCLK_DIV_NUM, div);
+
+ writel(0, dwmac->apb_base + GMAC_PLLCLK_DIV);
+ writel(reg, dwmac->apb_base + GMAC_PLLCLK_DIV);
+
writel(GMAC_GTXCLK_SEL_PLL, dwmac->apb_base + GMAC_GTXCLK_SEL);
reg = GMAC_TX_CLK_EN | GMAC_TX_CLK_N_EN | GMAC_TX_CLK_OUT_EN |
GMAC_RX_CLK_EN | GMAC_RX_CLK_N_EN;
break;
default:
- dev_err(dwmac->dev, "unsupported phy interface %d\n",
- plat->mac_interface);
+ dev_err(dwmac->dev, "unsupported phy interface %s\n",
+ phy_modes(plat->phy_interface));
return -EINVAL;
}
@@ -211,6 +218,7 @@ static int thead_dwmac_probe(struct platform_device *pdev)
struct stmmac_resources stmmac_res;
struct plat_stmmacenet_data *plat;
struct thead_dwmac *dwmac;
+ struct clk *apb_clk;
void __iomem *apb;
int ret;
@@ -224,6 +232,19 @@ static int thead_dwmac_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, PTR_ERR(plat),
"dt configuration failed\n");
+ /*
+ * The APB clock is essential for accessing glue registers. However,
+ * old devicetrees don't describe it correctly. We continue to probe
+ * and emit a warning if it isn't present.
+ */
+ apb_clk = devm_clk_get_enabled(&pdev->dev, "apb");
+ if (PTR_ERR(apb_clk) == -ENOENT)
+ dev_warn(&pdev->dev,
+ "cannot get apb clock, link may break after speed changes\n");
+ else if (IS_ERR(apb_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(apb_clk),
+ "failed to get apb clock\n");
+
dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
if (!dwmac)
return -ENOMEM;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
index f4694fd576f5..3dec1a264cf6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -341,6 +341,7 @@ static inline u32 mtl_chanx_base_addr(const struct dwmac4_addrs *addrs,
#define MTL_OP_MODE_RFA_SHIFT 8
#define MTL_OP_MODE_EHFC BIT(7)
+#define MTL_OP_MODE_DIS_TCP_EF BIT(6)
#define MTL_OP_MODE_RTC_MASK GENMASK(1, 0)
#define MTL_OP_MODE_RTC_SHIFT 0
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index a5fb31eb0192..aac68dc28dc1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -110,16 +110,20 @@ static int dwmac4_wrback_get_rx_status(struct stmmac_extra_stats *x,
message_type = (rdes1 & ERDES4_MSG_TYPE_MASK) >> 8;
- if (rdes1 & RDES1_IP_HDR_ERROR)
+ if (rdes1 & RDES1_IP_HDR_ERROR) {
x->ip_hdr_err++;
+ ret |= csum_none;
+ }
if (rdes1 & RDES1_IP_CSUM_BYPASSED)
x->ip_csum_bypassed++;
if (rdes1 & RDES1_IPV4_HEADER)
x->ipv4_pkt_rcvd++;
if (rdes1 & RDES1_IPV6_HEADER)
x->ipv6_pkt_rcvd++;
- if (rdes1 & RDES1_IP_PAYLOAD_ERROR)
+ if (rdes1 & RDES1_IP_PAYLOAD_ERROR) {
x->ip_payload_err++;
+ ret |= csum_none;
+ }
if (message_type == RDES_EXT_NO_PTP)
x->no_ptp_rx_msg_type_ext++;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
index 0cb84a0041a4..d87a8b595e6a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
@@ -268,6 +268,8 @@ static void dwmac4_dma_rx_chan_op_mode(struct stmmac_priv *priv,
mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(dwmac4_addrs, channel));
+ mtl_rx_op |= MTL_OP_MODE_DIS_TCP_EF;
+
if (mode == SF_DMA_MODE) {
pr_debug("GMAC: enable RX store and forward mode\n");
mtl_rx_op |= MTL_OP_MODE_RSF;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index 4846bf49c576..467f1a05747e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -251,7 +251,7 @@ void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr)
void stmmac_set_mac_addr(void __iomem *ioaddr, const u8 addr[6],
unsigned int high, unsigned int low)
{
- unsigned long data;
+ u32 data;
data = (addr[5] << 8) | addr[4];
/* For MAC Addr registers we have to set the Address Enable (AE)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index 6cadf8de4fdf..00e929bf280b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -49,6 +49,14 @@ static void dwxgmac2_core_init(struct mac_device_info *hw,
writel(XGMAC_INT_DEFAULT_EN, ioaddr + XGMAC_INT_EN);
}
+static void dwxgmac2_update_caps(struct stmmac_priv *priv)
+{
+ if (!priv->dma_cap.mbps_10_100)
+ priv->hw->link.caps &= ~(MAC_10 | MAC_100);
+ else if (!priv->dma_cap.half_duplex)
+ priv->hw->link.caps &= ~(MAC_10HD | MAC_100HD);
+}
+
static void dwxgmac2_set_mac(void __iomem *ioaddr, bool enable)
{
u32 tx = readl(ioaddr + XGMAC_TX_CONFIG);
@@ -1424,6 +1432,7 @@ static void dwxgmac2_set_arp_offload(struct mac_device_info *hw, bool en,
const struct stmmac_ops dwxgmac210_ops = {
.core_init = dwxgmac2_core_init,
+ .update_caps = dwxgmac2_update_caps,
.set_mac = dwxgmac2_set_mac,
.rx_ipc = dwxgmac2_rx_ipc,
.rx_queue_enable = dwxgmac2_rx_queue_enable,
@@ -1532,8 +1541,8 @@ int dwxgmac2_setup(struct stmmac_priv *priv)
mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
- MAC_1000FD | MAC_2500FD | MAC_5000FD |
- MAC_10000FD;
+ MAC_10 | MAC_100 | MAC_1000FD |
+ MAC_2500FD | MAC_5000FD | MAC_10000FD;
mac->link.duplex = 0;
mac->link.speed10 = XGMAC_CONFIG_SS_10_MII;
mac->link.speed100 = XGMAC_CONFIG_SS_100_MII;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
index 5dcc95bc0ad2..4d6bb995d8d8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
@@ -203,10 +203,6 @@ static void dwxgmac2_dma_rx_mode(struct stmmac_priv *priv, void __iomem *ioaddr,
}
writel(value, ioaddr + XGMAC_MTL_RXQ_OPMODE(channel));
-
- /* Enable MTL RX overflow */
- value = readl(ioaddr + XGMAC_MTL_QINTEN(channel));
- writel(value | XGMAC_RXOIE, ioaddr + XGMAC_MTL_QINTEN(channel));
}
static void dwxgmac2_dma_tx_mode(struct stmmac_priv *priv, void __iomem *ioaddr,
@@ -386,8 +382,11 @@ static int dwxgmac2_dma_interrupt(struct stmmac_priv *priv,
static int dwxgmac2_get_hw_feature(void __iomem *ioaddr,
struct dma_features *dma_cap)
{
+ struct stmmac_priv *priv;
u32 hw_cap;
+ priv = container_of(dma_cap, struct stmmac_priv, dma_cap);
+
/* MAC HW feature 0 */
hw_cap = readl(ioaddr + XGMAC_HW_FEATURE0);
dma_cap->edma = (hw_cap & XGMAC_HWFEAT_EDMA) >> 31;
@@ -410,6 +409,8 @@ static int dwxgmac2_get_hw_feature(void __iomem *ioaddr,
dma_cap->vlhash = (hw_cap & XGMAC_HWFEAT_VLHASH) >> 4;
dma_cap->half_duplex = (hw_cap & XGMAC_HWFEAT_HDSEL) >> 3;
dma_cap->mbps_1000 = (hw_cap & XGMAC_HWFEAT_GMIISEL) >> 1;
+ if (dma_cap->mbps_1000 && priv->synopsys_id >= DWXGMAC_CORE_2_20)
+ dma_cap->mbps_10_100 = 1;
/* MAC HW feature 1 */
hw_cap = readl(ioaddr + XGMAC_HW_FEATURE1);
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c
index 99635b37044a..3f7c765dcb79 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.c
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c
@@ -100,7 +100,7 @@ int stmmac_reset(struct stmmac_priv *priv, void __iomem *ioaddr)
return -EINVAL;
if (plat && plat->fix_soc_reset)
- return plat->fix_soc_reset(plat, ioaddr);
+ return plat->fix_soc_reset(priv, ioaddr);
return stmmac_do_callback(priv, dma, reset, ioaddr);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index cda09cf5dcca..7ca5477be390 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -289,8 +289,7 @@ struct stmmac_priv {
u32 msg_enable;
int wolopts;
int wol_irq;
- bool wol_irq_disabled;
- int clk_csr;
+ u32 gmii_address_bus_config;
struct timer_list eee_ctrl_timer;
int lpi_irq;
u32 tx_lpi_timer;
@@ -374,6 +373,18 @@ enum stmmac_state {
STMMAC_SERVICE_SCHED,
};
+extern const struct dev_pm_ops stmmac_simple_pm_ops;
+
+static inline bool stmmac_wol_enabled_mac(struct stmmac_priv *priv)
+{
+ return priv->plat->pmt && device_may_wakeup(priv->device);
+}
+
+static inline bool stmmac_wol_enabled_phy(struct stmmac_priv *priv)
+{
+ return !priv->plat->pmt && device_may_wakeup(priv->device);
+}
+
int stmmac_mdio_unregister(struct net_device *ndev);
int stmmac_mdio_register(struct net_device *ndev);
int stmmac_mdio_reset(struct mii_bus *mii);
@@ -381,7 +392,6 @@ int stmmac_pcs_setup(struct net_device *ndev);
void stmmac_pcs_clean(struct net_device *ndev);
void stmmac_set_ethtool_ops(struct net_device *netdev);
-int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags);
void stmmac_ptp_register(struct stmmac_priv *priv);
void stmmac_ptp_unregister(struct stmmac_priv *priv);
int stmmac_xdp_open(struct net_device *dev);
@@ -394,7 +404,6 @@ int stmmac_dvr_probe(struct device *device,
struct stmmac_resources *res);
int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt);
int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size);
-int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled);
int stmmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
phy_interface_t interface, int speed);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c
index ac6f2e3a3fcd..4b513d27a988 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c
@@ -63,7 +63,7 @@ static int est_configure(struct stmmac_priv *priv, struct stmmac_est *cfg,
EST_GMAC5_PTOV_SHIFT;
}
if (cfg->enable)
- ctrl |= EST_EEST | EST_SSWL;
+ ctrl |= EST_EEST | EST_SSWL | EST_DFBS;
else
ctrl &= ~EST_EEST;
@@ -109,6 +109,10 @@ static void est_irq_status(struct stmmac_priv *priv, struct net_device *dev,
x->mtl_est_hlbs++;
+ for (i = 0; i < txqcnt; i++)
+ if (value & BIT(i))
+ x->mtl_est_txq_hlbs[i]++;
+
/* Clear Interrupt */
writel(value, est_addr + EST_SCH_ERR);
@@ -131,10 +135,9 @@ static void est_irq_status(struct stmmac_priv *priv, struct net_device *dev,
x->mtl_est_hlbf++;
- for (i = 0; i < txqcnt; i++) {
+ for (i = 0; i < txqcnt; i++)
if (feqn & BIT(i))
x->mtl_est_txq_hlbf[i]++;
- }
/* Clear Interrupt */
writel(feqn, est_addr + EST_FRM_SZ_ERR);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.h
index d247fa383a6e..f70221c9c84a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.h
@@ -16,6 +16,7 @@
#define EST_XGMAC_PTOV_MUL 9
#define EST_SSWL BIT(1)
#define EST_EEST BIT(0)
+#define EST_DFBS BIT(5)
#define EST_STATUS 0x00000008
#define EST_GMAC5_BTRL GENMASK(11, 8)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 77758a7299b4..39fa1ec92f82 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -803,7 +803,6 @@ static void stmmac_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct stmmac_priv *priv = netdev_priv(dev);
- u32 support = WAKE_MAGIC | WAKE_UCAST;
if (!device_can_wakeup(priv->device))
return -EOPNOTSUPP;
@@ -816,29 +815,7 @@ static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
return ret;
}
- /* By default almost all GMAC devices support the WoL via
- * magic frame but we can disable it if the HW capability
- * register shows no support for pmt_magic_frame. */
- if ((priv->hw_cap_support) && (!priv->dma_cap.pmt_magic_frame))
- wol->wolopts &= ~WAKE_MAGIC;
-
- if (wol->wolopts & ~support)
- return -EINVAL;
-
- if (wol->wolopts) {
- pr_info("stmmac: wakeup enable\n");
- device_set_wakeup_enable(priv->device, 1);
- /* Avoid unbalanced enable_irq_wake calls */
- if (priv->wol_irq_disabled)
- enable_irq_wake(priv->wol_irq);
- priv->wol_irq_disabled = false;
- } else {
- device_set_wakeup_enable(priv->device, 0);
- /* Avoid unbalanced disable_irq_wake calls */
- if (!priv->wol_irq_disabled)
- disable_irq_wake(priv->wol_irq);
- priv->wol_irq_disabled = true;
- }
+ device_set_wakeup_enable(priv->device, !!wol->wolopts);
mutex_lock(&priv->lock);
priv->wolopts = wol->wolopts;
@@ -852,9 +829,6 @@ static int stmmac_ethtool_op_get_eee(struct net_device *dev,
{
struct stmmac_priv *priv = netdev_priv(dev);
- if (!priv->dma_cap.eee)
- return -EOPNOTSUPP;
-
return phylink_ethtool_get_eee(priv->phylink, edata);
}
@@ -863,9 +837,6 @@ static int stmmac_ethtool_op_set_eee(struct net_device *dev,
{
struct stmmac_priv *priv = netdev_priv(dev);
- if (!priv->dma_cap.eee)
- return -EOPNOTSUPP;
-
return phylink_ethtool_set_eee(priv->phylink, edata);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
index e2840fa241f2..bb110124f21e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -135,7 +135,6 @@ static int init_systime(void __iomem *ioaddr, u32 sec, u32 nsec)
static int config_addend(void __iomem *ioaddr, u32 addend)
{
u32 value;
- int limit;
writel(addend, ioaddr + PTP_TAR);
/* issue command to update the addend value */
@@ -144,23 +143,15 @@ static int config_addend(void __iomem *ioaddr, u32 addend)
writel(value, ioaddr + PTP_TCR);
/* wait for present addend update to complete */
- limit = 10;
- while (limit--) {
- if (!(readl(ioaddr + PTP_TCR) & PTP_TCR_TSADDREG))
- break;
- mdelay(10);
- }
- if (limit < 0)
- return -EBUSY;
-
- return 0;
+ return readl_poll_timeout_atomic(ioaddr + PTP_TCR, value,
+ !(value & PTP_TCR_TSADDREG),
+ 10, 100000);
}
static int adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec,
int add_sub, int gmac4)
{
u32 value;
- int limit;
if (add_sub) {
/* If the new sec value needs to be subtracted with
@@ -187,16 +178,9 @@ static int adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec,
writel(value, ioaddr + PTP_TCR);
/* wait for present system time adjust/update to complete */
- limit = 10;
- while (limit--) {
- if (!(readl(ioaddr + PTP_TCR) & PTP_TCR_TSUPDT))
- break;
- mdelay(10);
- }
- if (limit < 0)
- return -EBUSY;
-
- return 0;
+ return readl_poll_timeout_atomic(ioaddr + PTP_TCR, value,
+ !(value & PTP_TCR_TSUPDT),
+ 10, 100000);
}
static void get_systime(void __iomem *ioaddr, u64 *systime)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index f1abf4242cd2..650d75b73e0b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -29,6 +29,7 @@
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
+#include <linux/pm_wakeirq.h>
#include <linux/prefetch.h>
#include <linux/pinctrl/consumer.h>
#ifdef CONFIG_DEBUG_FS
@@ -146,38 +147,6 @@ static void stmmac_exit_fs(struct net_device *dev);
#define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
-int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
-{
- int ret = 0;
-
- if (enabled) {
- ret = clk_prepare_enable(priv->plat->stmmac_clk);
- if (ret)
- return ret;
- ret = clk_prepare_enable(priv->plat->pclk);
- if (ret) {
- clk_disable_unprepare(priv->plat->stmmac_clk);
- return ret;
- }
- if (priv->plat->clks_config) {
- ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
- if (ret) {
- clk_disable_unprepare(priv->plat->stmmac_clk);
- clk_disable_unprepare(priv->plat->pclk);
- return ret;
- }
- }
- } else {
- clk_disable_unprepare(priv->plat->stmmac_clk);
- clk_disable_unprepare(priv->plat->pclk);
- if (priv->plat->clks_config)
- priv->plat->clks_config(priv->plat->bsp_priv, enabled);
- }
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
-
/**
* stmmac_set_clk_tx_rate() - set the clock rate for the MAC transmit clock
* @bsp_priv: BSP private data structure (unused)
@@ -312,77 +281,6 @@ static void stmmac_global_err(struct stmmac_priv *priv)
stmmac_service_event_schedule(priv);
}
-/**
- * stmmac_clk_csr_set - dynamically set the MDC clock
- * @priv: driver private structure
- * Description: this is to dynamically set the MDC clock according to the csr
- * clock input.
- * Note:
- * If a specific clk_csr value is passed from the platform
- * this means that the CSR Clock Range selection cannot be
- * changed at run-time and it is fixed (as reported in the driver
- * documentation). Viceversa the driver will try to set the MDC
- * clock dynamically according to the actual clock input.
- */
-static void stmmac_clk_csr_set(struct stmmac_priv *priv)
-{
- unsigned long clk_rate;
-
- clk_rate = clk_get_rate(priv->plat->stmmac_clk);
-
- /* Platform provided default clk_csr would be assumed valid
- * for all other cases except for the below mentioned ones.
- * For values higher than the IEEE 802.3 specified frequency
- * we can not estimate the proper divider as it is not known
- * the frequency of clk_csr_i. So we do not change the default
- * divider.
- */
- if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
- if (clk_rate < CSR_F_35M)
- priv->clk_csr = STMMAC_CSR_20_35M;
- else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
- priv->clk_csr = STMMAC_CSR_35_60M;
- else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
- priv->clk_csr = STMMAC_CSR_60_100M;
- else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
- priv->clk_csr = STMMAC_CSR_100_150M;
- else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
- priv->clk_csr = STMMAC_CSR_150_250M;
- else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
- priv->clk_csr = STMMAC_CSR_250_300M;
- else if ((clk_rate >= CSR_F_300M) && (clk_rate < CSR_F_500M))
- priv->clk_csr = STMMAC_CSR_300_500M;
- else if ((clk_rate >= CSR_F_500M) && (clk_rate < CSR_F_800M))
- priv->clk_csr = STMMAC_CSR_500_800M;
- }
-
- if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
- if (clk_rate > 160000000)
- priv->clk_csr = 0x03;
- else if (clk_rate > 80000000)
- priv->clk_csr = 0x02;
- else if (clk_rate > 40000000)
- priv->clk_csr = 0x01;
- else
- priv->clk_csr = 0;
- }
-
- if (priv->plat->has_xgmac) {
- if (clk_rate > 400000000)
- priv->clk_csr = 0x5;
- else if (clk_rate > 350000000)
- priv->clk_csr = 0x4;
- else if (clk_rate > 300000000)
- priv->clk_csr = 0x3;
- else if (clk_rate > 250000000)
- priv->clk_csr = 0x2;
- else if (clk_rate > 150000000)
- priv->clk_csr = 0x1;
- else
- priv->clk_csr = 0x0;
- }
-}
-
static void print_pkt(unsigned char *buf, int len)
{
pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
@@ -795,16 +693,14 @@ static int stmmac_hwtstamp_get(struct net_device *dev,
* Will be rerun after resuming from suspend, case in which the timestamping
* flags updated by stmmac_hwtstamp_set() also need to be restored.
*/
-int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
+static int stmmac_init_tstamp_counter(struct stmmac_priv *priv,
+ u32 systime_flags)
{
bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
struct timespec64 now;
u32 sec_inc = 0;
u64 temp = 0;
- if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
- return -EOPNOTSUPP;
-
if (!priv->plat->clk_ptp_rate) {
netdev_err(priv->dev, "Invalid PTP clock rate");
return -EINVAL;
@@ -839,16 +735,15 @@ int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
return 0;
}
-EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
/**
- * stmmac_init_ptp - init PTP
+ * stmmac_init_timestamping - initialise timestamping
* @priv: driver private structure
* Description: this is to verify if the HW supports the PTPv1 or PTPv2.
* This is done by looking at the HW cap. register.
* This function also registers the ptp driver.
*/
-static int stmmac_init_ptp(struct stmmac_priv *priv)
+static int stmmac_init_timestamping(struct stmmac_priv *priv)
{
bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
int ret;
@@ -856,9 +751,16 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
if (priv->plat->ptp_clk_freq_config)
priv->plat->ptp_clk_freq_config(priv);
+ if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) {
+ netdev_info(priv->dev, "PTP not supported by HW\n");
+ return -EOPNOTSUPP;
+ }
+
ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
- if (ret)
+ if (ret) {
+ netdev_warn(priv->dev, "PTP init failed\n");
return ret;
+ }
priv->adv_ts = 0;
/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
@@ -884,10 +786,24 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
return 0;
}
+static void stmmac_setup_ptp(struct stmmac_priv *priv)
+{
+ int ret;
+
+ ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
+ if (ret < 0)
+ netdev_warn(priv->dev,
+ "failed to enable PTP reference clock: %pe\n",
+ ERR_PTR(ret));
+
+ if (stmmac_init_timestamping(priv) == 0)
+ stmmac_ptp_register(priv);
+}
+
static void stmmac_release_ptp(struct stmmac_priv *priv)
{
- clk_disable_unprepare(priv->plat->clk_ptp_ref);
stmmac_ptp_unregister(priv);
+ clk_disable_unprepare(priv->plat->clk_ptp_ref);
}
/**
@@ -1169,7 +1085,7 @@ static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
*/
static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
{
- int interface = priv->plat->mac_interface;
+ int interface = priv->plat->phy_interface;
if (priv->dma_cap.pcs) {
if ((interface == PHY_INTERFACE_MODE_RGMII) ||
@@ -1196,13 +1112,19 @@ static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
static int stmmac_init_phy(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
+ int mode = priv->plat->phy_interface;
struct fwnode_handle *phy_fwnode;
struct fwnode_handle *fwnode;
+ struct ethtool_keee eee;
int ret;
if (!phylink_expects_phy(priv->phylink))
return 0;
+ if (priv->hw->xpcs &&
+ xpcs_get_an_mode(priv->hw->xpcs, mode) == DW_AN_C73)
+ return 0;
+
fwnode = priv->plat->port_node;
if (!fwnode)
fwnode = dev_fwnode(priv->device);
@@ -1236,19 +1158,20 @@ static int stmmac_init_phy(struct net_device *dev)
ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
}
- if (ret == 0) {
- struct ethtool_keee eee;
+ if (ret) {
+ netdev_err(priv->dev, "cannot attach to PHY (error: %pe)\n",
+ ERR_PTR(ret));
+ return ret;
+ }
- /* Configure phylib's copy of the LPI timer. Normally,
- * phylink_config.lpi_timer_default would do this, but there is
- * a chance that userspace could change the eee_timer setting
- * via sysfs before the first open. Thus, preserve existing
- * behaviour.
- */
- if (!phylink_ethtool_get_eee(priv->phylink, &eee)) {
- eee.tx_lpi_timer = priv->tx_lpi_timer;
- phylink_ethtool_set_eee(priv->phylink, &eee);
- }
+ /* Configure phylib's copy of the LPI timer. Normally,
+ * phylink_config.lpi_timer_default would do this, but there is a
+ * chance that userspace could change the eee_timer setting via sysfs
+ * before the first open. Thus, preserve existing behaviour.
+ */
+ if (!phylink_ethtool_get_eee(priv->phylink, &eee)) {
+ eee.tx_lpi_timer = priv->tx_lpi_timer;
+ phylink_ethtool_set_eee(priv->phylink, &eee);
}
if (!priv->plat->pmt) {
@@ -1259,7 +1182,7 @@ static int stmmac_init_phy(struct net_device *dev)
device_set_wakeup_enable(priv->device, !!wol.wolopts);
}
- return ret;
+ return 0;
}
static int stmmac_phy_setup(struct stmmac_priv *priv)
@@ -2584,6 +2507,7 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
+ bool csum = !priv->plat->tx_queues_cfg[queue].coe_unsupported;
struct xsk_buff_pool *pool = tx_q->xsk_pool;
unsigned int entry = tx_q->cur_tx;
struct dma_desc *tx_desc = NULL;
@@ -2671,7 +2595,7 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
}
stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
- true, priv->mode, true, true,
+ csum, priv->mode, true, true,
xdp_desc.len);
stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
@@ -3476,7 +3400,6 @@ static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
/**
* stmmac_hw_setup - setup mac in a usable state.
* @dev : pointer to the device structure.
- * @ptp_register: register PTP if set
* Description:
* this is the main function to setup the HW in a usable state because the
* dma engine is reset, the core registers are configured (e.g. AXI,
@@ -3486,7 +3409,7 @@ static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
* 0 on success and an appropriate (-)ve integer as defined in errno.h
* file on failure.
*/
-static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
+static int stmmac_hw_setup(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
u32 rx_cnt = priv->plat->rx_queues_to_use;
@@ -3557,22 +3480,6 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
stmmac_mmc_setup(priv);
- if (ptp_register) {
- ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
- if (ret < 0)
- netdev_warn(priv->dev,
- "failed to enable PTP reference clock: %pe\n",
- ERR_PTR(ret));
- }
-
- ret = stmmac_init_ptp(priv);
- if (ret == -EOPNOTSUPP)
- netdev_info(priv->dev, "PTP not supported by HW\n");
- else if (ret)
- netdev_warn(priv->dev, "PTP init failed\n");
- else if (ptp_register)
- stmmac_ptp_register(priv);
-
if (priv->use_riwt) {
u32 queue;
@@ -3636,13 +3543,6 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
return 0;
}
-static void stmmac_hw_teardown(struct net_device *dev)
-{
- struct stmmac_priv *priv = netdev_priv(dev);
-
- clk_disable_unprepare(priv->plat->clk_ptp_ref);
-}
-
static void stmmac_free_irq(struct net_device *dev,
enum request_irq_err irq_err, int irq_idx)
{
@@ -3724,7 +3624,6 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev)
/* Request the Wake IRQ in case of another line
* is used for WoL
*/
- priv->wol_irq_disabled = true;
if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
int_name = priv->int_name_wol;
sprintf(int_name, "%s:%s", dev->name, "wol");
@@ -3885,7 +3784,6 @@ static int stmmac_request_irq_single(struct net_device *dev)
/* Request the Wake IRQ in case of another line
* is used for WoL
*/
- priv->wol_irq_disabled = true;
if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
ret = request_irq(priv->wol_irq, stmmac_interrupt,
IRQF_SHARED, dev->name, dev);
@@ -4034,29 +3932,9 @@ static int __stmmac_open(struct net_device *dev,
struct stmmac_dma_conf *dma_conf)
{
struct stmmac_priv *priv = netdev_priv(dev);
- int mode = priv->plat->phy_interface;
u32 chan;
int ret;
- /* Initialise the tx lpi timer, converting from msec to usec */
- if (!priv->tx_lpi_timer)
- priv->tx_lpi_timer = eee_timer * 1000;
-
- ret = pm_runtime_resume_and_get(priv->device);
- if (ret < 0)
- return ret;
-
- if ((!priv->hw->xpcs ||
- xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) {
- ret = stmmac_init_phy(dev);
- if (ret) {
- netdev_err(priv->dev,
- "%s: Cannot attach to PHY (error: %d)\n",
- __func__, ret);
- goto init_phy_error;
- }
- }
-
for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
@@ -4074,12 +3952,14 @@ static int __stmmac_open(struct net_device *dev,
}
}
- ret = stmmac_hw_setup(dev, true);
+ ret = stmmac_hw_setup(dev);
if (ret < 0) {
netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
goto init_error;
}
+ stmmac_setup_ptp(priv);
+
stmmac_init_coalesce(priv);
phylink_start(priv->phylink);
@@ -4102,11 +3982,8 @@ irq_error:
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
- stmmac_hw_teardown(dev);
+ stmmac_release_ptp(priv);
init_error:
- phylink_disconnect_phy(priv->phylink);
-init_phy_error:
- pm_runtime_put(priv->device);
return ret;
}
@@ -4116,34 +3993,54 @@ static int stmmac_open(struct net_device *dev)
struct stmmac_dma_conf *dma_conf;
int ret;
+ /* Initialise the tx lpi timer, converting from msec to usec */
+ if (!priv->tx_lpi_timer)
+ priv->tx_lpi_timer = eee_timer * 1000;
+
dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
if (IS_ERR(dma_conf))
return PTR_ERR(dma_conf);
+ ret = pm_runtime_resume_and_get(priv->device);
+ if (ret < 0)
+ goto err_dma_resources;
+
+ ret = stmmac_init_phy(dev);
+ if (ret)
+ goto err_runtime_pm;
+
ret = __stmmac_open(dev, dma_conf);
if (ret)
- free_dma_desc_resources(priv, dma_conf);
+ goto err_disconnect_phy;
kfree(dma_conf);
+
+ return ret;
+
+err_disconnect_phy:
+ phylink_disconnect_phy(priv->phylink);
+err_runtime_pm:
+ pm_runtime_put(priv->device);
+err_dma_resources:
+ free_dma_desc_resources(priv, dma_conf);
+ kfree(dma_conf);
return ret;
}
-/**
- * stmmac_release - close entry point of the driver
- * @dev : device pointer.
- * Description:
- * This is the stop entry point of the driver.
- */
-static int stmmac_release(struct net_device *dev)
+static void __stmmac_release(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
u32 chan;
+ /* If the PHY or MAC has WoL enabled, then the PHY will not be
+ * suspended when phylink_stop() is called below. Set the PHY
+ * to its slowest speed to save power.
+ */
if (device_may_wakeup(priv->device))
phylink_speed_down(priv->phylink, false);
+
/* Stop and disconnect the PHY */
phylink_stop(priv->phylink);
- phylink_disconnect_phy(priv->phylink);
stmmac_disable_all_queues(priv);
@@ -4169,7 +4066,21 @@ static int stmmac_release(struct net_device *dev)
if (stmmac_fpe_supported(priv))
ethtool_mmsv_stop(&priv->fpe_cfg.mmsv);
+}
+
+/**
+ * stmmac_release - close entry point of the driver
+ * @dev : device pointer.
+ * Description:
+ * This is the stop entry point of the driver.
+ */
+static int stmmac_release(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ __stmmac_release(dev);
+ phylink_disconnect_phy(priv->phylink);
pm_runtime_put(priv->device);
return 0;
@@ -4983,6 +4894,7 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
{
struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
+ bool csum = !priv->plat->tx_queues_cfg[queue].coe_unsupported;
unsigned int entry = tx_q->cur_tx;
struct dma_desc *tx_desc;
dma_addr_t dma_addr;
@@ -5034,7 +4946,7 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
stmmac_set_desc_addr(priv, tx_desc, dma_addr);
stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
- true, priv->mode, true, true,
+ csum, priv->mode, true, true,
xdpf->len);
tx_q->tx_count_frames++;
@@ -5733,7 +5645,8 @@ drain_data:
skb->protocol = eth_type_trans(skb, priv->dev);
- if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
+ if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb) ||
+ (status & csum_none))
skb_checksum_none_assert(skb);
else
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -5965,7 +5878,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
return PTR_ERR(dma_conf);
}
- stmmac_release(dev);
+ __stmmac_release(dev);
ret = __stmmac_open(dev, dma_conf);
if (ret) {
@@ -7055,7 +6968,6 @@ irq_error:
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
- stmmac_hw_teardown(dev);
init_error:
free_dma_desc_resources(priv, &priv->dma_conf);
dma_desc_error:
@@ -7240,7 +7152,6 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
priv->plat->enh_desc = priv->dma_cap.enh_desc;
priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
!(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
- priv->hw->pmt = priv->plat->pmt;
if (priv->dma_cap.hash_tb_sz) {
priv->hw->multicast_filter_bins =
(BIT(priv->dma_cap.hash_tb_sz) << 5);
@@ -7278,6 +7189,7 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
if (priv->plat->pmt) {
dev_info(priv->device, "Wake-Up On Lan supported\n");
device_set_wakeup_capable(priv->device, 1);
+ devm_pm_set_wake_irq(priv->device, priv->wol_irq);
}
if (priv->dma_cap.tsoen)
@@ -7710,17 +7622,6 @@ int stmmac_dvr_probe(struct device *device,
stmmac_fpe_init(priv);
- /* If a specific clk_csr value is passed from the platform
- * this means that the CSR Clock Range selection cannot be
- * changed at run-time and it is fixed. Viceversa the driver'll try to
- * set the MDC clock dynamically according to the csr actual
- * clock input.
- */
- if (priv->plat->clk_csr >= 0)
- priv->clk_csr = priv->plat->clk_csr;
- else
- stmmac_clk_csr_set(priv);
-
stmmac_check_pcs_mode(priv);
pm_runtime_get_noresume(device);
@@ -7858,7 +7759,7 @@ int stmmac_suspend(struct device *dev)
priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
/* Enable Power down mode by programming the PMT regs */
- if (device_may_wakeup(priv->device) && priv->plat->pmt) {
+ if (stmmac_wol_enabled_mac(priv)) {
stmmac_pmt(priv, priv->hw, priv->wolopts);
priv->irq_wake = 1;
} else {
@@ -7869,16 +7770,18 @@ int stmmac_suspend(struct device *dev)
mutex_unlock(&priv->lock);
rtnl_lock();
- if (device_may_wakeup(priv->device) && !priv->plat->pmt)
+ if (stmmac_wol_enabled_phy(priv))
phylink_speed_down(priv->phylink, false);
- phylink_suspend(priv->phylink,
- device_may_wakeup(priv->device) && priv->plat->pmt);
+ phylink_suspend(priv->phylink, stmmac_wol_enabled_mac(priv));
rtnl_unlock();
if (stmmac_fpe_supported(priv))
ethtool_mmsv_stop(&priv->fpe_cfg.mmsv);
+ if (priv->plat->suspend)
+ return priv->plat->suspend(dev, priv->plat->bsp_priv);
+
return 0;
}
EXPORT_SYMBOL_GPL(stmmac_suspend);
@@ -7931,6 +7834,12 @@ int stmmac_resume(struct device *dev)
struct stmmac_priv *priv = netdev_priv(ndev);
int ret;
+ if (priv->plat->resume) {
+ ret = priv->plat->resume(dev, priv->plat->bsp_priv);
+ if (ret)
+ return ret;
+ }
+
if (!netif_running(ndev))
return 0;
@@ -7940,7 +7849,7 @@ int stmmac_resume(struct device *dev)
* this bit because it can generate problems while resuming
* from another devices (e.g. serial console).
*/
- if (device_may_wakeup(priv->device) && priv->plat->pmt) {
+ if (stmmac_wol_enabled_mac(priv)) {
mutex_lock(&priv->lock);
stmmac_pmt(priv, priv->hw, 0);
mutex_unlock(&priv->lock);
@@ -7975,7 +7884,16 @@ int stmmac_resume(struct device *dev)
stmmac_free_tx_skbufs(priv);
stmmac_clear_descriptors(priv, &priv->dma_conf);
- stmmac_hw_setup(ndev, false);
+ ret = stmmac_hw_setup(ndev);
+ if (ret < 0) {
+ netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
+ mutex_unlock(&priv->lock);
+ rtnl_unlock();
+ return ret;
+ }
+
+ stmmac_init_timestamping(priv);
+
stmmac_init_coalesce(priv);
phylink_rx_clk_stop_block(priv->phylink);
stmmac_set_rx_mode(ndev);
@@ -7993,7 +7911,7 @@ int stmmac_resume(struct device *dev)
* workqueue thread, which will race with initialisation.
*/
phylink_resume(priv->phylink);
- if (device_may_wakeup(priv->device) && !priv->plat->pmt)
+ if (stmmac_wol_enabled_phy(priv))
phylink_speed_up(priv->phylink);
rtnl_unlock();
@@ -8004,6 +7922,10 @@ int stmmac_resume(struct device *dev)
}
EXPORT_SYMBOL_GPL(stmmac_resume);
+/* This is not the same as EXPORT_GPL_SIMPLE_DEV_PM_OPS() when CONFIG_PM=n */
+DEFINE_SIMPLE_DEV_PM_OPS(stmmac_simple_pm_ops, stmmac_suspend, stmmac_resume);
+EXPORT_SYMBOL_GPL(stmmac_simple_pm_ops);
+
#ifndef MODULE
static int __init stmmac_cmdline_opt(char *str)
{
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index 836f2848dfeb..f408737f6fc7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -23,9 +23,9 @@
#include "dwxgmac2.h"
#include "stmmac.h"
-#define MII_BUSY 0x00000001
-#define MII_WRITE 0x00000002
-#define MII_DATA_MASK GENMASK(15, 0)
+#define MII_ADDR_GBUSY BIT(0)
+#define MII_ADDR_GWRITE BIT(1)
+#define MII_DATA_GD_MASK GENMASK(15, 0)
/* GMAC4 defines */
#define MII_GMAC4_GOC_SHIFT 2
@@ -45,6 +45,16 @@
#define MII_XGMAC_PA_SHIFT 16
#define MII_XGMAC_DA_SHIFT 21
+static int stmmac_mdio_wait(void __iomem *reg, u32 mask)
+{
+ u32 v;
+
+ if (readl_poll_timeout(reg, v, !(v & mask), 100, 10000))
+ return -EBUSY;
+
+ return 0;
+}
+
static void stmmac_xgmac2_c45_format(struct stmmac_priv *priv, int phyaddr,
int devad, int phyreg, u32 *hw_addr)
{
@@ -83,7 +93,6 @@ static int stmmac_xgmac2_mdio_read(struct stmmac_priv *priv, u32 addr,
{
unsigned int mii_address = priv->hw->mii.addr;
unsigned int mii_data = priv->hw->mii.data;
- u32 tmp;
int ret;
ret = pm_runtime_resume_and_get(priv->device);
@@ -91,33 +100,25 @@ static int stmmac_xgmac2_mdio_read(struct stmmac_priv *priv, u32 addr,
return ret;
/* Wait until any existing MII operation is complete */
- if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
- !(tmp & MII_XGMAC_BUSY), 100, 10000)) {
- ret = -EBUSY;
+ ret = stmmac_mdio_wait(priv->ioaddr + mii_data, MII_XGMAC_BUSY);
+ if (ret)
goto err_disable_clks;
- }
- value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
- & priv->hw->mii.clk_csr_mask;
- value |= MII_XGMAC_READ;
+ value |= priv->gmii_address_bus_config | MII_XGMAC_READ;
/* Wait until any existing MII operation is complete */
- if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
- !(tmp & MII_XGMAC_BUSY), 100, 10000)) {
- ret = -EBUSY;
+ ret = stmmac_mdio_wait(priv->ioaddr + mii_data, MII_XGMAC_BUSY);
+ if (ret)
goto err_disable_clks;
- }
/* Set the MII address register to read */
writel(addr, priv->ioaddr + mii_address);
writel(value, priv->ioaddr + mii_data);
/* Wait until any existing MII operation is complete */
- if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
- !(tmp & MII_XGMAC_BUSY), 100, 10000)) {
- ret = -EBUSY;
+ ret = stmmac_mdio_wait(priv->ioaddr + mii_data, MII_XGMAC_BUSY);
+ if (ret)
goto err_disable_clks;
- }
/* Read the data from the MII data register */
ret = (int)readl(priv->ioaddr + mii_data) & GENMASK(15, 0);
@@ -131,12 +132,9 @@ err_disable_clks:
static int stmmac_xgmac2_mdio_read_c22(struct mii_bus *bus, int phyaddr,
int phyreg)
{
- struct net_device *ndev = bus->priv;
- struct stmmac_priv *priv;
+ struct stmmac_priv *priv = netdev_priv(bus->priv);
u32 addr;
- priv = netdev_priv(ndev);
-
/* Until ver 2.20 XGMAC does not support C22 addr >= 4 */
if (priv->synopsys_id < DWXGMAC_CORE_2_20 &&
phyaddr > MII_XGMAC_MAX_C22ADDR)
@@ -150,12 +148,9 @@ static int stmmac_xgmac2_mdio_read_c22(struct mii_bus *bus, int phyaddr,
static int stmmac_xgmac2_mdio_read_c45(struct mii_bus *bus, int phyaddr,
int devad, int phyreg)
{
- struct net_device *ndev = bus->priv;
- struct stmmac_priv *priv;
+ struct stmmac_priv *priv = netdev_priv(bus->priv);
u32 addr;
- priv = netdev_priv(ndev);
-
stmmac_xgmac2_c45_format(priv, phyaddr, devad, phyreg, &addr);
return stmmac_xgmac2_mdio_read(priv, addr, MII_XGMAC_BUSY);
@@ -166,7 +161,6 @@ static int stmmac_xgmac2_mdio_write(struct stmmac_priv *priv, u32 addr,
{
unsigned int mii_address = priv->hw->mii.addr;
unsigned int mii_data = priv->hw->mii.data;
- u32 tmp;
int ret;
ret = pm_runtime_resume_and_get(priv->device);
@@ -174,31 +168,23 @@ static int stmmac_xgmac2_mdio_write(struct stmmac_priv *priv, u32 addr,
return ret;
/* Wait until any existing MII operation is complete */
- if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
- !(tmp & MII_XGMAC_BUSY), 100, 10000)) {
- ret = -EBUSY;
+ ret = stmmac_mdio_wait(priv->ioaddr + mii_data, MII_XGMAC_BUSY);
+ if (ret)
goto err_disable_clks;
- }
- value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
- & priv->hw->mii.clk_csr_mask;
- value |= phydata;
- value |= MII_XGMAC_WRITE;
+ value |= priv->gmii_address_bus_config | phydata | MII_XGMAC_WRITE;
/* Wait until any existing MII operation is complete */
- if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
- !(tmp & MII_XGMAC_BUSY), 100, 10000)) {
- ret = -EBUSY;
+ ret = stmmac_mdio_wait(priv->ioaddr + mii_data, MII_XGMAC_BUSY);
+ if (ret)
goto err_disable_clks;
- }
/* Set the MII address register to write */
writel(addr, priv->ioaddr + mii_address);
writel(value, priv->ioaddr + mii_data);
/* Wait until any existing MII operation is complete */
- ret = readl_poll_timeout(priv->ioaddr + mii_data, tmp,
- !(tmp & MII_XGMAC_BUSY), 100, 10000);
+ ret = stmmac_mdio_wait(priv->ioaddr + mii_data, MII_XGMAC_BUSY);
err_disable_clks:
pm_runtime_put(priv->device);
@@ -209,12 +195,9 @@ err_disable_clks:
static int stmmac_xgmac2_mdio_write_c22(struct mii_bus *bus, int phyaddr,
int phyreg, u16 phydata)
{
- struct net_device *ndev = bus->priv;
- struct stmmac_priv *priv;
+ struct stmmac_priv *priv = netdev_priv(bus->priv);
u32 addr;
- priv = netdev_priv(ndev);
-
/* Until ver 2.20 XGMAC does not support C22 addr >= 4 */
if (priv->synopsys_id < DWXGMAC_CORE_2_20 &&
phyaddr > MII_XGMAC_MAX_C22ADDR)
@@ -229,37 +212,78 @@ static int stmmac_xgmac2_mdio_write_c22(struct mii_bus *bus, int phyaddr,
static int stmmac_xgmac2_mdio_write_c45(struct mii_bus *bus, int phyaddr,
int devad, int phyreg, u16 phydata)
{
- struct net_device *ndev = bus->priv;
- struct stmmac_priv *priv;
+ struct stmmac_priv *priv = netdev_priv(bus->priv);
u32 addr;
- priv = netdev_priv(ndev);
-
stmmac_xgmac2_c45_format(priv, phyaddr, devad, phyreg, &addr);
return stmmac_xgmac2_mdio_write(priv, addr, MII_XGMAC_BUSY,
phydata);
}
-static int stmmac_mdio_read(struct stmmac_priv *priv, int data, u32 value)
+/**
+ * stmmac_mdio_format_addr() - format the address register
+ * @priv: struct stmmac_priv pointer
+ * @pa: 5-bit MDIO package address
+ * @gr: 5-bit MDIO register address (C22) or MDIO device address (C45)
+ *
+ * Return: formatted address register
+ */
+static u32 stmmac_mdio_format_addr(struct stmmac_priv *priv,
+ unsigned int pa, unsigned int gr)
{
- unsigned int mii_address = priv->hw->mii.addr;
- unsigned int mii_data = priv->hw->mii.data;
- u32 v;
+ const struct mii_regs *mii_regs = &priv->hw->mii;
- if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY),
- 100, 10000))
- return -EBUSY;
+ return ((pa << mii_regs->addr_shift) & mii_regs->addr_mask) |
+ ((gr << mii_regs->reg_shift) & mii_regs->reg_mask) |
+ priv->gmii_address_bus_config |
+ MII_ADDR_GBUSY;
+}
- writel(data, priv->ioaddr + mii_data);
- writel(value, priv->ioaddr + mii_address);
+static int stmmac_mdio_access(struct stmmac_priv *priv, unsigned int pa,
+ unsigned int gr, u32 cmd, u32 data, bool read)
+{
+ void __iomem *mii_address = priv->ioaddr + priv->hw->mii.addr;
+ void __iomem *mii_data = priv->ioaddr + priv->hw->mii.data;
+ u32 addr;
+ int ret;
- if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY),
- 100, 10000))
- return -EBUSY;
+ ret = pm_runtime_resume_and_get(priv->device);
+ if (ret < 0)
+ return ret;
- /* Read the data from the MII data register */
- return readl(priv->ioaddr + mii_data) & MII_DATA_MASK;
+ ret = stmmac_mdio_wait(mii_address, MII_ADDR_GBUSY);
+ if (ret)
+ goto out;
+
+ addr = stmmac_mdio_format_addr(priv, pa, gr) | cmd;
+
+ writel(data, mii_data);
+ writel(addr, mii_address);
+
+ ret = stmmac_mdio_wait(mii_address, MII_ADDR_GBUSY);
+ if (ret)
+ goto out;
+
+ /* Read the data from the MII data register if in read mode */
+ ret = read ? readl(mii_data) & MII_DATA_GD_MASK : 0;
+
+out:
+ pm_runtime_put(priv->device);
+
+ return ret;
+}
+
+static int stmmac_mdio_read(struct stmmac_priv *priv, unsigned int pa,
+ unsigned int gr, u32 cmd, int data)
+{
+ return stmmac_mdio_access(priv, pa, gr, cmd, data, true);
+}
+
+static int stmmac_mdio_write(struct stmmac_priv *priv, unsigned int pa,
+ unsigned int gr, u32 cmd, int data)
+{
+ return stmmac_mdio_access(priv, pa, gr, cmd, data, false);
}
/**
@@ -274,28 +298,15 @@ static int stmmac_mdio_read(struct stmmac_priv *priv, int data, u32 value)
*/
static int stmmac_mdio_read_c22(struct mii_bus *bus, int phyaddr, int phyreg)
{
- struct net_device *ndev = bus->priv;
- struct stmmac_priv *priv = netdev_priv(ndev);
- u32 value = MII_BUSY;
- int data = 0;
-
- data = pm_runtime_resume_and_get(priv->device);
- if (data < 0)
- return data;
-
- value |= (phyaddr << priv->hw->mii.addr_shift)
- & priv->hw->mii.addr_mask;
- value |= (phyreg << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask;
- value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
- & priv->hw->mii.clk_csr_mask;
- if (priv->plat->has_gmac4)
- value |= MII_GMAC4_READ;
+ struct stmmac_priv *priv = netdev_priv(bus->priv);
+ u32 cmd;
- data = stmmac_mdio_read(priv, data, value);
-
- pm_runtime_put(priv->device);
+ if (priv->plat->has_gmac4)
+ cmd = MII_GMAC4_READ;
+ else
+ cmd = 0;
- return data;
+ return stmmac_mdio_read(priv, phyaddr, phyreg, cmd, 0);
}
/**
@@ -312,54 +323,11 @@ static int stmmac_mdio_read_c22(struct mii_bus *bus, int phyaddr, int phyreg)
static int stmmac_mdio_read_c45(struct mii_bus *bus, int phyaddr, int devad,
int phyreg)
{
- struct net_device *ndev = bus->priv;
- struct stmmac_priv *priv = netdev_priv(ndev);
- u32 value = MII_BUSY;
- int data = 0;
-
- data = pm_runtime_get_sync(priv->device);
- if (data < 0) {
- pm_runtime_put_noidle(priv->device);
- return data;
- }
-
- value |= (phyaddr << priv->hw->mii.addr_shift)
- & priv->hw->mii.addr_mask;
- value |= (phyreg << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask;
- value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
- & priv->hw->mii.clk_csr_mask;
- value |= MII_GMAC4_READ;
- value |= MII_GMAC4_C45E;
- value &= ~priv->hw->mii.reg_mask;
- value |= (devad << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask;
+ struct stmmac_priv *priv = netdev_priv(bus->priv);
+ int data = phyreg << MII_GMAC4_REG_ADDR_SHIFT;
+ u32 cmd = MII_GMAC4_READ | MII_GMAC4_C45E;
- data |= phyreg << MII_GMAC4_REG_ADDR_SHIFT;
-
- data = stmmac_mdio_read(priv, data, value);
-
- pm_runtime_put(priv->device);
-
- return data;
-}
-
-static int stmmac_mdio_write(struct stmmac_priv *priv, int data, u32 value)
-{
- unsigned int mii_address = priv->hw->mii.addr;
- unsigned int mii_data = priv->hw->mii.data;
- u32 v;
-
- /* Wait until any existing MII operation is complete */
- if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY),
- 100, 10000))
- return -EBUSY;
-
- /* Set the MII address register to write */
- writel(data, priv->ioaddr + mii_data);
- writel(value, priv->ioaddr + mii_address);
-
- /* Wait until any existing MII operation is complete */
- return readl_poll_timeout(priv->ioaddr + mii_address, v,
- !(v & MII_BUSY), 100, 10000);
+ return stmmac_mdio_read(priv, phyaddr, devad, cmd, data);
}
/**
@@ -373,31 +341,15 @@ static int stmmac_mdio_write(struct stmmac_priv *priv, int data, u32 value)
static int stmmac_mdio_write_c22(struct mii_bus *bus, int phyaddr, int phyreg,
u16 phydata)
{
- struct net_device *ndev = bus->priv;
- struct stmmac_priv *priv = netdev_priv(ndev);
- int ret, data = phydata;
- u32 value = MII_BUSY;
-
- ret = pm_runtime_resume_and_get(priv->device);
- if (ret < 0)
- return ret;
-
- value |= (phyaddr << priv->hw->mii.addr_shift)
- & priv->hw->mii.addr_mask;
- value |= (phyreg << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask;
+ struct stmmac_priv *priv = netdev_priv(bus->priv);
+ u32 cmd;
- value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
- & priv->hw->mii.clk_csr_mask;
if (priv->plat->has_gmac4)
- value |= MII_GMAC4_WRITE;
+ cmd = MII_GMAC4_WRITE;
else
- value |= MII_WRITE;
+ cmd = MII_ADDR_GWRITE;
- ret = stmmac_mdio_write(priv, data, value);
-
- pm_runtime_put(priv->device);
-
- return ret;
+ return stmmac_mdio_write(priv, phyaddr, phyreg, cmd, phydata);
}
/**
@@ -412,36 +364,13 @@ static int stmmac_mdio_write_c22(struct mii_bus *bus, int phyaddr, int phyreg,
static int stmmac_mdio_write_c45(struct mii_bus *bus, int phyaddr,
int devad, int phyreg, u16 phydata)
{
- struct net_device *ndev = bus->priv;
- struct stmmac_priv *priv = netdev_priv(ndev);
- int ret, data = phydata;
- u32 value = MII_BUSY;
-
- ret = pm_runtime_get_sync(priv->device);
- if (ret < 0) {
- pm_runtime_put_noidle(priv->device);
- return ret;
- }
-
- value |= (phyaddr << priv->hw->mii.addr_shift)
- & priv->hw->mii.addr_mask;
- value |= (phyreg << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask;
-
- value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
- & priv->hw->mii.clk_csr_mask;
-
- value |= MII_GMAC4_WRITE;
- value |= MII_GMAC4_C45E;
- value &= ~priv->hw->mii.reg_mask;
- value |= (devad << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask;
+ struct stmmac_priv *priv = netdev_priv(bus->priv);
+ u32 cmd = MII_GMAC4_WRITE | MII_GMAC4_C45E;
+ int data = phydata;
data |= phyreg << MII_GMAC4_REG_ADDR_SHIFT;
- ret = stmmac_mdio_write(priv, data, value);
-
- pm_runtime_put(priv->device);
-
- return ret;
+ return stmmac_mdio_write(priv, phyaddr, devad, cmd, data);
}
/**
@@ -452,8 +381,7 @@ static int stmmac_mdio_write_c45(struct mii_bus *bus, int phyaddr,
int stmmac_mdio_reset(struct mii_bus *bus)
{
#if IS_ENABLED(CONFIG_STMMAC_PLATFORM)
- struct net_device *ndev = bus->priv;
- struct stmmac_priv *priv = netdev_priv(ndev);
+ struct stmmac_priv *priv = netdev_priv(bus->priv);
unsigned int mii_address = priv->hw->mii.addr;
#ifdef CONFIG_OF
@@ -497,12 +425,11 @@ int stmmac_mdio_reset(struct mii_bus *bus)
int stmmac_pcs_setup(struct net_device *ndev)
{
+ struct stmmac_priv *priv = netdev_priv(ndev);
struct fwnode_handle *devnode, *pcsnode;
struct dw_xpcs *xpcs = NULL;
- struct stmmac_priv *priv;
int addr, ret;
- priv = netdev_priv(ndev);
devnode = priv->plat->port_node;
if (priv->plat->pcs_init) {
@@ -547,6 +474,102 @@ void stmmac_pcs_clean(struct net_device *ndev)
}
/**
+ * stmmac_clk_csr_set - dynamically set the MDC clock
+ * @priv: driver private structure
+ * Description: this is to dynamically set the MDC clock according to the csr
+ * clock input.
+ * Return: MII register CR field value
+ * Note:
+ * If a specific clk_csr value is passed from the platform
+ * this means that the CSR Clock Range selection cannot be
+ * changed at run-time and it is fixed (as reported in the driver
+ * documentation). Viceversa the driver will try to set the MDC
+ * clock dynamically according to the actual clock input.
+ */
+static u32 stmmac_clk_csr_set(struct stmmac_priv *priv)
+{
+ unsigned long clk_rate;
+ u32 value = ~0;
+
+ clk_rate = clk_get_rate(priv->plat->stmmac_clk);
+
+ /* Platform provided default clk_csr would be assumed valid
+ * for all other cases except for the below mentioned ones.
+ * For values higher than the IEEE 802.3 specified frequency
+ * we can not estimate the proper divider as it is not known
+ * the frequency of clk_csr_i. So we do not change the default
+ * divider.
+ */
+ if (clk_rate < CSR_F_35M)
+ value = STMMAC_CSR_20_35M;
+ else if (clk_rate < CSR_F_60M)
+ value = STMMAC_CSR_35_60M;
+ else if (clk_rate < CSR_F_100M)
+ value = STMMAC_CSR_60_100M;
+ else if (clk_rate < CSR_F_150M)
+ value = STMMAC_CSR_100_150M;
+ else if (clk_rate < CSR_F_250M)
+ value = STMMAC_CSR_150_250M;
+ else if (clk_rate <= CSR_F_300M)
+ value = STMMAC_CSR_250_300M;
+ else if (clk_rate < CSR_F_500M)
+ value = STMMAC_CSR_300_500M;
+ else if (clk_rate < CSR_F_800M)
+ value = STMMAC_CSR_500_800M;
+
+ if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
+ if (clk_rate > 160000000)
+ value = 0x03;
+ else if (clk_rate > 80000000)
+ value = 0x02;
+ else if (clk_rate > 40000000)
+ value = 0x01;
+ else
+ value = 0;
+ }
+
+ if (priv->plat->has_xgmac) {
+ if (clk_rate > 400000000)
+ value = 0x5;
+ else if (clk_rate > 350000000)
+ value = 0x4;
+ else if (clk_rate > 300000000)
+ value = 0x3;
+ else if (clk_rate > 250000000)
+ value = 0x2;
+ else if (clk_rate > 150000000)
+ value = 0x1;
+ else
+ value = 0x0;
+ }
+
+ return value;
+}
+
+static void stmmac_mdio_bus_config(struct stmmac_priv *priv)
+{
+ u32 value;
+
+ /* If a specific clk_csr value is passed from the platform, this means
+ * that the CSR Clock Range value should not be computed from the CSR
+ * clock.
+ */
+ if (priv->plat->clk_csr >= 0)
+ value = priv->plat->clk_csr;
+ else
+ value = stmmac_clk_csr_set(priv);
+
+ value <<= priv->hw->mii.clk_csr_shift;
+
+ if (value & ~priv->hw->mii.clk_csr_mask)
+ dev_warn(priv->device,
+ "clk_csr value out of range (0x%08x exceeds mask 0x%08x), truncating\n",
+ value, priv->hw->mii.clk_csr_mask);
+
+ priv->gmii_address_bus_config = value & priv->hw->mii.clk_csr_mask;
+}
+
+/**
* stmmac_mdio_register
* @ndev: net device structure
* Description: it registers the MII bus
@@ -566,6 +589,8 @@ int stmmac_mdio_register(struct net_device *ndev)
if (!mdio_bus_data)
return 0;
+ stmmac_mdio_bus_config(priv);
+
new_bus = mdiobus_alloc();
if (!new_bus)
return -ENOMEM;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 9c1b54b701f7..4e3aa611fda8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -21,7 +21,8 @@ struct stmmac_pci_info {
static void common_default_data(struct plat_stmmacenet_data *plat)
{
- plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
+ /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
+ plat->clk_csr = STMMAC_CSR_20_35M;
plat->has_gmac = 1;
plat->force_sf_dma_mode = 1;
@@ -74,7 +75,7 @@ static int snps_gmac5_default_data(struct pci_dev *pdev,
{
int i;
- plat->clk_csr = 5;
+ plat->clk_csr = STMMAC_CSR_250_300M;
plat->has_gmac4 = 1;
plat->force_sf_dma_mode = 1;
plat->flags |= STMMAC_FLAG_TSO_EN;
@@ -138,6 +139,37 @@ static const struct stmmac_pci_info snps_gmac5_pci_info = {
.setup = snps_gmac5_default_data,
};
+static int stmmac_pci_suspend(struct device *dev, void *bsp_priv)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int ret;
+
+ ret = pci_save_state(pdev);
+ if (ret)
+ return ret;
+
+ pci_disable_device(pdev);
+ pci_wake_from_d3(pdev, true);
+ return 0;
+}
+
+static int stmmac_pci_resume(struct device *dev, void *bsp_priv)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int ret;
+
+ pci_restore_state(pdev);
+ pci_set_power_state(pdev, PCI_D0);
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ pci_set_master(pdev);
+
+ return 0;
+}
+
/**
* stmmac_pci_probe
*
@@ -217,6 +249,9 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
plat->safety_feat_cfg->prtyen = 1;
plat->safety_feat_cfg->tmouten = 1;
+ plat->suspend = stmmac_pci_suspend;
+ plat->resume = stmmac_pci_resume;
+
return stmmac_dvr_probe(&pdev->dev, plat, &res);
}
@@ -231,43 +266,6 @@ static void stmmac_pci_remove(struct pci_dev *pdev)
stmmac_dvr_remove(&pdev->dev);
}
-static int __maybe_unused stmmac_pci_suspend(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- int ret;
-
- ret = stmmac_suspend(dev);
- if (ret)
- return ret;
-
- ret = pci_save_state(pdev);
- if (ret)
- return ret;
-
- pci_disable_device(pdev);
- pci_wake_from_d3(pdev, true);
- return 0;
-}
-
-static int __maybe_unused stmmac_pci_resume(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- int ret;
-
- pci_restore_state(pdev);
- pci_set_power_state(pdev, PCI_D0);
-
- ret = pci_enable_device(pdev);
- if (ret)
- return ret;
-
- pci_set_master(pdev);
-
- return stmmac_resume(dev);
-}
-
-static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_pci_suspend, stmmac_pci_resume);
-
/* synthetic ID, no official vendor */
#define PCI_VENDOR_ID_STMMAC 0x0700
@@ -289,7 +287,7 @@ static struct pci_driver stmmac_pci_driver = {
.probe = stmmac_pci_probe,
.remove = stmmac_pci_remove,
.driver = {
- .pm = &stmmac_pm_ops,
+ .pm = &stmmac_simple_pm_ops,
},
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 030fcf1b5993..27bcaae07a7f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -453,8 +453,12 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
return ERR_PTR(phy_mode);
plat->phy_interface = phy_mode;
+
rc = stmmac_of_get_mac_mode(np);
- plat->mac_interface = rc < 0 ? plat->phy_interface : rc;
+ if (rc >= 0 && rc != phy_mode)
+ dev_warn(&pdev->dev,
+ "\"mac-mode\" property used for %s but differs to \"phy-mode\" of %s, and will be ignored. Please report.\n",
+ phy_modes(rc), phy_modes(phy_mode));
/* Some wrapper drivers still rely on phy_node. Let's save it while
* they are not converted to phylink. */
@@ -811,6 +815,22 @@ static void stmmac_pltfr_exit(struct platform_device *pdev,
plat->exit(pdev, plat->bsp_priv);
}
+static int stmmac_plat_suspend(struct device *dev, void *bsp_priv)
+{
+ struct stmmac_priv *priv = netdev_priv(dev_get_drvdata(dev));
+
+ stmmac_pltfr_exit(to_platform_device(dev), priv->plat);
+
+ return 0;
+}
+
+static int stmmac_plat_resume(struct device *dev, void *bsp_priv)
+{
+ struct stmmac_priv *priv = netdev_priv(dev_get_drvdata(dev));
+
+ return stmmac_pltfr_init(to_platform_device(dev), priv->plat);
+}
+
/**
* stmmac_pltfr_probe
* @pdev: platform device pointer
@@ -825,6 +845,11 @@ int stmmac_pltfr_probe(struct platform_device *pdev,
{
int ret;
+ if (!plat->suspend && plat->exit)
+ plat->suspend = stmmac_plat_suspend;
+ if (!plat->resume && plat->init)
+ plat->resume = stmmac_plat_resume;
+
ret = stmmac_pltfr_init(pdev, plat);
if (ret)
return ret;
@@ -886,45 +911,36 @@ void stmmac_pltfr_remove(struct platform_device *pdev)
}
EXPORT_SYMBOL_GPL(stmmac_pltfr_remove);
-/**
- * stmmac_pltfr_suspend
- * @dev: device pointer
- * Description: this function is invoked when suspend the driver and it direcly
- * call the main suspend function and then, if required, on some platform, it
- * can call an exit helper.
- */
-static int __maybe_unused stmmac_pltfr_suspend(struct device *dev)
-{
- int ret;
- struct net_device *ndev = dev_get_drvdata(dev);
- struct stmmac_priv *priv = netdev_priv(ndev);
- struct platform_device *pdev = to_platform_device(dev);
-
- ret = stmmac_suspend(dev);
- stmmac_pltfr_exit(pdev, priv->plat);
-
- return ret;
-}
-
-/**
- * stmmac_pltfr_resume
- * @dev: device pointer
- * Description: this function is invoked when resume the driver before calling
- * the main resume function, on some platforms, it can call own init helper
- * if required.
- */
-static int __maybe_unused stmmac_pltfr_resume(struct device *dev)
+static int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
{
- struct net_device *ndev = dev_get_drvdata(dev);
- struct stmmac_priv *priv = netdev_priv(ndev);
- struct platform_device *pdev = to_platform_device(dev);
+ struct plat_stmmacenet_data *plat_dat = priv->plat;
int ret;
- ret = stmmac_pltfr_init(pdev, priv->plat);
- if (ret)
- return ret;
+ if (enabled) {
+ ret = clk_prepare_enable(plat_dat->stmmac_clk);
+ if (ret)
+ return ret;
+ ret = clk_prepare_enable(plat_dat->pclk);
+ if (ret) {
+ clk_disable_unprepare(plat_dat->stmmac_clk);
+ return ret;
+ }
+ if (plat_dat->clks_config) {
+ ret = plat_dat->clks_config(plat_dat->bsp_priv, enabled);
+ if (ret) {
+ clk_disable_unprepare(plat_dat->stmmac_clk);
+ clk_disable_unprepare(plat_dat->pclk);
+ return ret;
+ }
+ }
+ } else {
+ clk_disable_unprepare(plat_dat->stmmac_clk);
+ clk_disable_unprepare(plat_dat->pclk);
+ if (plat_dat->clks_config)
+ plat_dat->clks_config(plat_dat->bsp_priv, enabled);
+ }
- return stmmac_resume(dev);
+ return 0;
}
static int __maybe_unused stmmac_runtime_suspend(struct device *dev)
@@ -954,7 +970,7 @@ static int __maybe_unused stmmac_pltfr_noirq_suspend(struct device *dev)
if (!netif_running(ndev))
return 0;
- if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
+ if (!stmmac_wol_enabled_mac(priv)) {
/* Disable clock in case of PWM is off */
clk_disable_unprepare(priv->plat->clk_ptp_ref);
@@ -975,7 +991,7 @@ static int __maybe_unused stmmac_pltfr_noirq_resume(struct device *dev)
if (!netif_running(ndev))
return 0;
- if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
+ if (!stmmac_wol_enabled_mac(priv)) {
/* enable the clk previously disabled */
ret = pm_runtime_force_resume(dev);
if (ret)
@@ -994,7 +1010,7 @@ static int __maybe_unused stmmac_pltfr_noirq_resume(struct device *dev)
}
const struct dev_pm_ops stmmac_pltfr_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(stmmac_pltfr_suspend, stmmac_pltfr_resume)
+ SET_SYSTEM_SLEEP_PM_OPS(stmmac_suspend, stmmac_resume)
SET_RUNTIME_PM_OPS(stmmac_runtime_suspend, stmmac_runtime_resume, NULL)
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(stmmac_pltfr_noirq_suspend, stmmac_pltfr_noirq_resume)
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index 3767ba495e78..993ff4e87e55 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -10,6 +10,8 @@
#include "stmmac.h"
#include "stmmac_ptp.h"
+#define PTP_SAFE_TIME_OFFSET_NS 500000
+
/**
* stmmac_adjust_freq
*
@@ -171,7 +173,11 @@ static int stmmac_enable(struct ptp_clock_info *ptp,
u32 acr_value;
switch (rq->type) {
- case PTP_CLK_REQ_PEROUT:
+ case PTP_CLK_REQ_PEROUT: {
+ struct timespec64 curr_time;
+ u64 target_ns = 0;
+ u64 ns = 0;
+
/* Reject requests with unsupported flags */
if (rq->perout.flags)
return -EOPNOTSUPP;
@@ -180,6 +186,31 @@ static int stmmac_enable(struct ptp_clock_info *ptp,
cfg->start.tv_sec = rq->perout.start.sec;
cfg->start.tv_nsec = rq->perout.start.nsec;
+
+ /* A time set in the past won't trigger the start of the flexible PPS generation for
+ * the GMAC5. For some reason it does for the GMAC4 but setting a time in the past
+ * should be addressed anyway. Therefore, any value set it the past is considered as
+ * an offset compared to the current MAC system time.
+ * Be aware that an offset too low may not trigger flexible PPS generation
+ * if time spent in this configuration makes the targeted time already outdated.
+ * To address this, add a safe time offset.
+ */
+ if (!cfg->start.tv_sec && cfg->start.tv_nsec < PTP_SAFE_TIME_OFFSET_NS)
+ cfg->start.tv_nsec += PTP_SAFE_TIME_OFFSET_NS;
+
+ target_ns = cfg->start.tv_nsec + ((u64)cfg->start.tv_sec * NSEC_PER_SEC);
+
+ stmmac_get_systime(priv, priv->ptpaddr, &ns);
+ if (ns > TIME64_MAX - PTP_SAFE_TIME_OFFSET_NS)
+ return -EINVAL;
+
+ curr_time = ns_to_timespec64(ns);
+ if (target_ns < ns + PTP_SAFE_TIME_OFFSET_NS) {
+ cfg->start = timespec64_add_safe(cfg->start, curr_time);
+ if (cfg->start.tv_sec == TIME64_MAX)
+ return -EINVAL;
+ }
+
cfg->period.tv_sec = rq->perout.period.sec;
cfg->period.tv_nsec = rq->perout.period.nsec;
@@ -190,6 +221,7 @@ static int stmmac_enable(struct ptp_clock_info *ptp,
priv->systime_flags);
write_unlock_irqrestore(&priv->ptp_lock, flags);
break;
+ }
case PTP_CLK_REQ_EXTTS: {
u8 channel;
@@ -247,10 +279,7 @@ static int stmmac_get_syncdevicetime(ktime_t *device,
{
struct stmmac_priv *priv = (struct stmmac_priv *)ctx;
- if (priv->plat->crosststamp)
- return priv->plat->crosststamp(device, system, ctx);
- else
- return -EOPNOTSUPP;
+ return priv->plat->crosststamp(device, system, ctx);
}
static int stmmac_getcrosststamp(struct ptp_clock_info *ptp,
@@ -278,7 +307,6 @@ const struct ptp_clock_info stmmac_ptp_clock_ops = {
.gettime64 = stmmac_get_time,
.settime64 = stmmac_set_time,
.enable = stmmac_enable,
- .getcrosststamp = stmmac_getcrosststamp,
};
/* structure describing a PTP hardware clock */
@@ -296,7 +324,6 @@ const struct ptp_clock_info dwmac1000_ptp_clock_ops = {
.gettime64 = stmmac_get_time,
.settime64 = stmmac_set_time,
.enable = dwmac1000_ptp_enable,
- .getcrosststamp = stmmac_getcrosststamp,
};
/**
@@ -332,6 +359,9 @@ void stmmac_ptp_register(struct stmmac_priv *priv)
if (priv->plat->ptp_max_adj)
priv->ptp_clock_ops.max_adj = priv->plat->ptp_max_adj;
+ if (priv->plat->crosststamp)
+ priv->ptp_clock_ops.getcrosststamp = stmmac_getcrosststamp;
+
rwlock_init(&priv->ptp_lock);
mutex_init(&priv->aux_ts_lock);
@@ -340,8 +370,12 @@ void stmmac_ptp_register(struct stmmac_priv *priv)
if (IS_ERR(priv->ptp_clock)) {
netdev_err(priv->dev, "ptp_clock_register failed\n");
priv->ptp_clock = NULL;
- } else if (priv->ptp_clock)
+ }
+
+ if (priv->ptp_clock)
netdev_info(priv->dev, "registered PTP clock\n");
+ else
+ mutex_destroy(&priv->aux_ts_lock);
}
/**
@@ -357,7 +391,7 @@ void stmmac_ptp_unregister(struct stmmac_priv *priv)
priv->ptp_clock = NULL;
pr_debug("Removed PTP HW clock successfully on %s\n",
priv->dev->name);
- }
- mutex_destroy(&priv->aux_ts_lock);
+ mutex_destroy(&priv->aux_ts_lock);
+ }
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 694d6ee14381..97e89a604abd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -1080,6 +1080,7 @@ disable:
for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
priv->xstats.max_sdu_txq_drop[i] = 0;
priv->xstats.mtl_est_txq_hlbf[i] = 0;
+ priv->xstats.mtl_est_txq_hlbs[i] = 0;
}
mutex_unlock(&priv->est_lock);
}
@@ -1097,7 +1098,8 @@ static void tc_taprio_stats(struct stmmac_priv *priv,
for (i = 0; i < priv->plat->tx_queues_to_use; i++)
window_drops += priv->xstats.max_sdu_txq_drop[i] +
- priv->xstats.mtl_est_txq_hlbf[i];
+ priv->xstats.mtl_est_txq_hlbf[i] +
+ priv->xstats.mtl_est_txq_hlbs[i];
qopt->stats.window_drops = window_drops;
/* Transmission overrun doesn't happen for stmmac, hence always 0 */
@@ -1111,7 +1113,8 @@ static void tc_taprio_queue_stats(struct stmmac_priv *priv,
int queue = qopt->queue_stats.queue;
q_stats->stats.window_drops = priv->xstats.max_sdu_txq_drop[queue] +
- priv->xstats.mtl_est_txq_hlbf[queue];
+ priv->xstats.mtl_est_txq_hlbf[queue] +
+ priv->xstats.mtl_est_txq_hlbs[queue];
/* Transmission overrun doesn't happen for stmmac, hence always 0 */
q_stats->stats.tx_overruns = 0;
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index a07c910c497a..a54d71155263 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -229,4 +229,16 @@ config TI_ICSS_IEP
To compile this driver as a module, choose M here. The module
will be called icss_iep.
+config TI_PRUETH
+ tristate "TI PRU Ethernet EMAC driver"
+ depends on PRU_REMOTEPROC
+ depends on NET_SWITCHDEV
+ select TI_ICSS_IEP
+ imply PTP_1588_CLOCK
+ help
+ Some TI SoCs has Programmable Realtime Unit (PRU) cores which can
+ support Single or Dual Ethernet ports with the help of firmware code
+ running on PRU cores. This driver supports remoteproc based
+ communication to PRU firmware to expose Ethernet interface to Linux.
+
endif # NET_VENDOR_TI
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile
index cbcf44806924..93c0a4d0e33a 100644
--- a/drivers/net/ethernet/ti/Makefile
+++ b/drivers/net/ethernet/ti/Makefile
@@ -3,6 +3,9 @@
# Makefile for the TI network device drivers.
#
+obj-$(CONFIG_TI_PRUETH) += icssm-prueth.o
+icssm-prueth-y := icssm/icssm_prueth.o
+
obj-$(CONFIG_TI_CPSW) += cpsw-common.o
obj-$(CONFIG_TI_DAVINCI_EMAC) += cpsw-common.o
obj-$(CONFIG_TI_CPSW_SWITCHDEV) += cpsw-common.o
diff --git a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
index 9032444435e9..c57497074ae6 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
@@ -694,17 +694,20 @@ static int am65_cpsw_get_ethtool_ts_info(struct net_device *ndev,
struct kernel_ethtool_ts_info *info)
{
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
- unsigned int ptp_v2_filter;
-
- ptp_v2_filter = BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
- BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
- BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
- BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
- BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
- BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
- BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
- BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) |
- BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ);
+ unsigned int ptp_filter;
+
+ ptp_filter = BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ);
if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS))
return ethtool_op_get_ts_info(ndev, info);
@@ -716,7 +719,7 @@ static int am65_cpsw_get_ethtool_ts_info(struct net_device *ndev,
SOF_TIMESTAMPING_RAW_HARDWARE;
info->phc_index = am65_cpts_phc_index(common->cpts);
info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
- info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | ptp_v2_filter;
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | ptp_filter;
return 0;
}
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index ecd6ecac87bb..110eb2da8dbc 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -1522,7 +1522,7 @@ static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
}
}
- if (single_port) {
+ if (single_port && num_tx) {
netif_txq = netdev_get_tx_queue(ndev, chn);
netdev_tx_completed_queue(netif_txq, num_tx, total_bytes);
am65_cpsw_nuss_tx_wake(tx_chn, ndev, netif_txq);
@@ -1813,6 +1813,9 @@ static int am65_cpsw_nuss_hwtstamp_set(struct net_device *ndev,
case HWTSTAMP_FILTER_NONE:
port->rx_ts_enabled = false;
break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
@@ -1823,7 +1826,7 @@ static int am65_cpsw_nuss_hwtstamp_set(struct net_device *ndev,
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
port->rx_ts_enabled = true;
- cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT | HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
break;
case HWTSTAMP_FILTER_ALL:
case HWTSTAMP_FILTER_SOME:
@@ -1884,8 +1887,8 @@ static int am65_cpsw_nuss_hwtstamp_get(struct net_device *ndev,
cfg.flags = 0;
cfg.tx_type = port->tx_ts_enabled ?
HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
- cfg.rx_filter = port->rx_ts_enabled ?
- HWTSTAMP_FILTER_PTP_V2_EVENT : HWTSTAMP_FILTER_NONE;
+ cfg.rx_filter = port->rx_ts_enabled ? HWTSTAMP_FILTER_PTP_V2_EVENT |
+ HWTSTAMP_FILTER_PTP_V1_L4_EVENT : HWTSTAMP_FILTER_NONE;
return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
}
diff --git a/drivers/net/ethernet/ti/icssg/icss_iep.c b/drivers/net/ethernet/ti/icssg/icss_iep.c
index 2a1c43316f46..ec085897edf0 100644
--- a/drivers/net/ethernet/ti/icssg/icss_iep.c
+++ b/drivers/net/ethernet/ti/icssg/icss_iep.c
@@ -621,7 +621,8 @@ exit:
static int icss_iep_extts_enable(struct icss_iep *iep, u32 index, int on)
{
- u32 val, cap, ret = 0;
+ u32 val, cap;
+ int ret = 0;
mutex_lock(&iep->ptp_clk_mutex);
@@ -685,11 +686,17 @@ struct icss_iep *icss_iep_get_idx(struct device_node *np, int idx)
struct platform_device *pdev;
struct device_node *iep_np;
struct icss_iep *iep;
+ int ret;
iep_np = of_parse_phandle(np, "ti,iep", idx);
- if (!iep_np || !of_device_is_available(iep_np))
+ if (!iep_np)
return ERR_PTR(-ENODEV);
+ if (!of_device_is_available(iep_np)) {
+ of_node_put(iep_np);
+ return ERR_PTR(-ENODEV);
+ }
+
pdev = of_find_device_by_node(iep_np);
of_node_put(iep_np);
@@ -698,21 +705,28 @@ struct icss_iep *icss_iep_get_idx(struct device_node *np, int idx)
return ERR_PTR(-EPROBE_DEFER);
iep = platform_get_drvdata(pdev);
- if (!iep)
- return ERR_PTR(-EPROBE_DEFER);
+ if (!iep) {
+ ret = -EPROBE_DEFER;
+ goto err_put_pdev;
+ }
device_lock(iep->dev);
if (iep->client_np) {
device_unlock(iep->dev);
dev_err(iep->dev, "IEP is already acquired by %s",
iep->client_np->name);
- return ERR_PTR(-EBUSY);
+ ret = -EBUSY;
+ goto err_put_pdev;
}
iep->client_np = np;
device_unlock(iep->dev);
- get_device(iep->dev);
return iep;
+
+err_put_pdev:
+ put_device(&pdev->dev);
+
+ return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(icss_iep_get_idx);
@@ -968,11 +982,112 @@ static const struct icss_iep_plat_data am654_icss_iep_plat_data = {
.config = &am654_icss_iep_regmap_config,
};
+static const struct icss_iep_plat_data am57xx_icss_iep_plat_data = {
+ .flags = ICSS_IEP_64BIT_COUNTER_SUPPORT |
+ ICSS_IEP_SLOW_COMPEN_REG_SUPPORT,
+ .reg_offs = {
+ [ICSS_IEP_GLOBAL_CFG_REG] = 0x00,
+ [ICSS_IEP_COMPEN_REG] = 0x08,
+ [ICSS_IEP_SLOW_COMPEN_REG] = 0x0c,
+ [ICSS_IEP_COUNT_REG0] = 0x10,
+ [ICSS_IEP_COUNT_REG1] = 0x14,
+ [ICSS_IEP_CAPTURE_CFG_REG] = 0x18,
+ [ICSS_IEP_CAPTURE_STAT_REG] = 0x1c,
+
+ [ICSS_IEP_CAP6_RISE_REG0] = 0x50,
+ [ICSS_IEP_CAP6_RISE_REG1] = 0x54,
+
+ [ICSS_IEP_CAP7_RISE_REG0] = 0x60,
+ [ICSS_IEP_CAP7_RISE_REG1] = 0x64,
+
+ [ICSS_IEP_CMP_CFG_REG] = 0x70,
+ [ICSS_IEP_CMP_STAT_REG] = 0x74,
+ [ICSS_IEP_CMP0_REG0] = 0x78,
+ [ICSS_IEP_CMP0_REG1] = 0x7c,
+ [ICSS_IEP_CMP1_REG0] = 0x80,
+ [ICSS_IEP_CMP1_REG1] = 0x84,
+
+ [ICSS_IEP_CMP8_REG0] = 0xc0,
+ [ICSS_IEP_CMP8_REG1] = 0xc4,
+ [ICSS_IEP_SYNC_CTRL_REG] = 0x180,
+ [ICSS_IEP_SYNC0_STAT_REG] = 0x188,
+ [ICSS_IEP_SYNC1_STAT_REG] = 0x18c,
+ [ICSS_IEP_SYNC_PWIDTH_REG] = 0x190,
+ [ICSS_IEP_SYNC0_PERIOD_REG] = 0x194,
+ [ICSS_IEP_SYNC1_DELAY_REG] = 0x198,
+ [ICSS_IEP_SYNC_START_REG] = 0x19c,
+ },
+ .config = &am654_icss_iep_regmap_config,
+};
+
+static bool am335x_icss_iep_valid_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case ICSS_IEP_GLOBAL_CFG_REG ... ICSS_IEP_CAPTURE_STAT_REG:
+ case ICSS_IEP_CAP6_RISE_REG0:
+ case ICSS_IEP_CMP_CFG_REG ... ICSS_IEP_CMP0_REG0:
+ case ICSS_IEP_CMP8_REG0 ... ICSS_IEP_SYNC_START_REG:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_config am335x_icss_iep_regmap_config = {
+ .name = "icss iep",
+ .reg_stride = 1,
+ .reg_write = icss_iep_regmap_write,
+ .reg_read = icss_iep_regmap_read,
+ .writeable_reg = am335x_icss_iep_valid_reg,
+ .readable_reg = am335x_icss_iep_valid_reg,
+};
+
+static const struct icss_iep_plat_data am335x_icss_iep_plat_data = {
+ .flags = 0,
+ .reg_offs = {
+ [ICSS_IEP_GLOBAL_CFG_REG] = 0x00,
+ [ICSS_IEP_COMPEN_REG] = 0x08,
+ [ICSS_IEP_COUNT_REG0] = 0x0c,
+ [ICSS_IEP_CAPTURE_CFG_REG] = 0x10,
+ [ICSS_IEP_CAPTURE_STAT_REG] = 0x14,
+
+ [ICSS_IEP_CAP6_RISE_REG0] = 0x30,
+
+ [ICSS_IEP_CAP7_RISE_REG0] = 0x38,
+
+ [ICSS_IEP_CMP_CFG_REG] = 0x40,
+ [ICSS_IEP_CMP_STAT_REG] = 0x44,
+ [ICSS_IEP_CMP0_REG0] = 0x48,
+
+ [ICSS_IEP_CMP8_REG0] = 0x88,
+ [ICSS_IEP_SYNC_CTRL_REG] = 0x100,
+ [ICSS_IEP_SYNC0_STAT_REG] = 0x108,
+ [ICSS_IEP_SYNC1_STAT_REG] = 0x10c,
+ [ICSS_IEP_SYNC_PWIDTH_REG] = 0x110,
+ [ICSS_IEP_SYNC0_PERIOD_REG] = 0x114,
+ [ICSS_IEP_SYNC1_DELAY_REG] = 0x118,
+ [ICSS_IEP_SYNC_START_REG] = 0x11c,
+ },
+ .config = &am335x_icss_iep_regmap_config,
+};
+
static const struct of_device_id icss_iep_of_match[] = {
{
.compatible = "ti,am654-icss-iep",
.data = &am654_icss_iep_plat_data,
},
+ {
+ .compatible = "ti,am5728-icss-iep",
+ .data = &am57xx_icss_iep_plat_data,
+ },
+ {
+ .compatible = "ti,am4376-icss-iep",
+ .data = &am335x_icss_iep_plat_data,
+ },
+ {
+ .compatible = "ti,am3356-icss-iep",
+ .data = &am335x_icss_iep_plat_data,
+ },
{},
};
MODULE_DEVICE_TABLE(of, icss_iep_of_match);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_common.c b/drivers/net/ethernet/ti/icssg/icssg_common.c
index 12f25cec6255..57e5f1c88f50 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_common.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_common.c
@@ -706,9 +706,9 @@ static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id, u32 *xdp_state)
struct page_pool *pool;
struct sk_buff *skb;
struct xdp_buff xdp;
+ int headroom, ret;
u32 *psdata;
void *pa;
- int ret;
*xdp_state = 0;
pool = rx_chn->pg_pool;
@@ -757,22 +757,23 @@ static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id, u32 *xdp_state)
xdp_prepare_buff(&xdp, pa, PRUETH_HEADROOM, pkt_len, false);
*xdp_state = emac_run_xdp(emac, &xdp, page, &pkt_len);
- if (*xdp_state == ICSSG_XDP_PASS)
- skb = xdp_build_skb_from_buff(&xdp);
- else
+ if (*xdp_state != ICSSG_XDP_PASS)
goto requeue;
+ headroom = xdp.data - xdp.data_hard_start;
+ pkt_len = xdp.data_end - xdp.data;
} else {
- /* prepare skb and send to n/w stack */
- skb = napi_build_skb(pa, PAGE_SIZE);
+ headroom = PRUETH_HEADROOM;
}
+ /* prepare skb and send to n/w stack */
+ skb = napi_build_skb(pa, PAGE_SIZE);
if (!skb) {
ndev->stats.rx_dropped++;
page_pool_recycle_direct(pool, page);
goto requeue;
}
- skb_reserve(skb, PRUETH_HEADROOM);
+ skb_reserve(skb, headroom);
skb_put(skb, pkt_len);
skb->dev = ndev;
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
index 2b973d6e2341..e42d0fdefee1 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
@@ -50,6 +50,8 @@
/* CTRLMMR_ICSSG_RGMII_CTRL register bits */
#define ICSSG_CTRL_RGMII_ID_MODE BIT(24)
+static void emac_adjust_link(struct net_device *ndev);
+
static int emac_get_tx_ts(struct prueth_emac *emac,
struct emac_tx_ts_response *rsp)
{
@@ -201,6 +203,44 @@ static void prueth_emac_stop(struct prueth *prueth)
}
}
+static void icssg_enable_fw_offload(struct prueth *prueth)
+{
+ struct prueth_emac *emac;
+ int mac;
+
+ for (mac = PRUETH_MAC0; mac < PRUETH_NUM_MACS; mac++) {
+ emac = prueth->emac[mac];
+ if (prueth->is_hsr_offload_mode) {
+ if (emac->ndev->features & NETIF_F_HW_HSR_TAG_RM)
+ icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_ENABLE);
+ else
+ icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_DISABLE);
+ }
+
+ if (prueth->is_switch_mode || prueth->is_hsr_offload_mode) {
+ if (netif_running(emac->ndev)) {
+ icssg_fdb_add_del(emac, eth_stp_addr, prueth->default_vlan,
+ ICSSG_FDB_ENTRY_P0_MEMBERSHIP |
+ ICSSG_FDB_ENTRY_P1_MEMBERSHIP |
+ ICSSG_FDB_ENTRY_P2_MEMBERSHIP |
+ ICSSG_FDB_ENTRY_BLOCK,
+ true);
+ icssg_vtbl_modify(emac, emac->port_vlan | DEFAULT_VID,
+ BIT(emac->port_id) | DEFAULT_PORT_MASK,
+ BIT(emac->port_id) | DEFAULT_UNTAG_MASK,
+ true);
+ if (prueth->is_hsr_offload_mode)
+ icssg_vtbl_modify(emac, DEFAULT_VID,
+ DEFAULT_PORT_MASK,
+ DEFAULT_UNTAG_MASK, true);
+ icssg_set_pvid(prueth, emac->port_vlan, emac->port_id);
+ if (prueth->is_switch_mode)
+ icssg_set_port_state(emac, ICSSG_EMAC_PORT_VLAN_AWARE_ENABLE);
+ }
+ }
+ }
+}
+
static int prueth_emac_common_start(struct prueth *prueth)
{
struct prueth_emac *emac;
@@ -229,6 +269,10 @@ static int prueth_emac_common_start(struct prueth *prueth)
ret = icssg_config(prueth, emac, slice);
if (ret)
goto disable_class;
+
+ mutex_lock(&emac->ndev->phydev->lock);
+ emac_adjust_link(emac->ndev);
+ mutex_unlock(&emac->ndev->phydev->lock);
}
ret = prueth_emac_start(prueth);
@@ -610,7 +654,7 @@ static void icssg_prueth_hsr_fdb_add_del(struct prueth_emac *emac,
static int icssg_prueth_hsr_add_mcast(struct net_device *ndev, const u8 *addr)
{
- struct net_device *real_dev;
+ struct net_device *real_dev, *port_dev;
struct prueth_emac *emac;
u8 vlan_id, i;
@@ -619,11 +663,15 @@ static int icssg_prueth_hsr_add_mcast(struct net_device *ndev, const u8 *addr)
if (is_hsr_master(real_dev)) {
for (i = HSR_PT_SLAVE_A; i < HSR_PT_INTERLINK; i++) {
- emac = netdev_priv(hsr_get_port_ndev(real_dev, i));
- if (!emac)
+ port_dev = hsr_get_port_ndev(real_dev, i);
+ emac = netdev_priv(port_dev);
+ if (!emac) {
+ dev_put(port_dev);
return -EINVAL;
+ }
icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id,
true);
+ dev_put(port_dev);
}
} else {
emac = netdev_priv(real_dev);
@@ -635,7 +683,7 @@ static int icssg_prueth_hsr_add_mcast(struct net_device *ndev, const u8 *addr)
static int icssg_prueth_hsr_del_mcast(struct net_device *ndev, const u8 *addr)
{
- struct net_device *real_dev;
+ struct net_device *real_dev, *port_dev;
struct prueth_emac *emac;
u8 vlan_id, i;
@@ -644,11 +692,15 @@ static int icssg_prueth_hsr_del_mcast(struct net_device *ndev, const u8 *addr)
if (is_hsr_master(real_dev)) {
for (i = HSR_PT_SLAVE_A; i < HSR_PT_INTERLINK; i++) {
- emac = netdev_priv(hsr_get_port_ndev(real_dev, i));
- if (!emac)
+ port_dev = hsr_get_port_ndev(real_dev, i);
+ emac = netdev_priv(port_dev);
+ if (!emac) {
+ dev_put(port_dev);
return -EINVAL;
+ }
icssg_prueth_hsr_fdb_add_del(emac, addr, vlan_id,
false);
+ dev_put(port_dev);
}
} else {
emac = netdev_priv(real_dev);
@@ -747,6 +799,7 @@ static int emac_ndo_open(struct net_device *ndev)
ret = prueth_emac_common_start(prueth);
if (ret)
goto free_rx_irq;
+ icssg_enable_fw_offload(prueth);
}
flow_cfg = emac->dram.va + ICSSG_CONFIG_OFFSET + PSI_L_REGULAR_FLOW_ID_BASE_OFFSET;
@@ -1354,8 +1407,7 @@ static int prueth_emac_restart(struct prueth *prueth)
static void icssg_change_mode(struct prueth *prueth)
{
- struct prueth_emac *emac;
- int mac, ret;
+ int ret;
ret = prueth_emac_restart(prueth);
if (ret) {
@@ -1363,35 +1415,7 @@ static void icssg_change_mode(struct prueth *prueth)
return;
}
- for (mac = PRUETH_MAC0; mac < PRUETH_NUM_MACS; mac++) {
- emac = prueth->emac[mac];
- if (prueth->is_hsr_offload_mode) {
- if (emac->ndev->features & NETIF_F_HW_HSR_TAG_RM)
- icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_ENABLE);
- else
- icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_DISABLE);
- }
-
- if (netif_running(emac->ndev)) {
- icssg_fdb_add_del(emac, eth_stp_addr, prueth->default_vlan,
- ICSSG_FDB_ENTRY_P0_MEMBERSHIP |
- ICSSG_FDB_ENTRY_P1_MEMBERSHIP |
- ICSSG_FDB_ENTRY_P2_MEMBERSHIP |
- ICSSG_FDB_ENTRY_BLOCK,
- true);
- icssg_vtbl_modify(emac, emac->port_vlan | DEFAULT_VID,
- BIT(emac->port_id) | DEFAULT_PORT_MASK,
- BIT(emac->port_id) | DEFAULT_UNTAG_MASK,
- true);
- if (prueth->is_hsr_offload_mode)
- icssg_vtbl_modify(emac, DEFAULT_VID,
- DEFAULT_PORT_MASK,
- DEFAULT_UNTAG_MASK, true);
- icssg_set_pvid(prueth, emac->port_vlan, emac->port_id);
- if (prueth->is_switch_mode)
- icssg_set_port_state(emac, ICSSG_EMAC_PORT_VLAN_AWARE_ENABLE);
- }
- }
+ icssg_enable_fw_offload(prueth);
}
static int prueth_netdevice_port_link(struct net_device *ndev,
diff --git a/drivers/net/ethernet/ti/icssm/icssm_prueth.c b/drivers/net/ethernet/ti/icssm/icssm_prueth.c
new file mode 100644
index 000000000000..293b7af04263
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssm/icssm_prueth.c
@@ -0,0 +1,1746 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Texas Instruments ICSSM Ethernet Driver
+ *
+ * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/
+ *
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/genalloc.h>
+#include <linux/if_bridge.h>
+#include <linux/if_hsr.h>
+#include <linux/if_vlan.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/net_tstamp.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/platform_device.h>
+#include <linux/phy.h>
+#include <linux/remoteproc/pruss.h>
+#include <linux/ptp_classify.h>
+#include <linux/regmap.h>
+#include <linux/remoteproc.h>
+#include <net/pkt_cls.h>
+
+#include "icssm_prueth.h"
+#include "../icssg/icssg_mii_rt.h"
+#include "../icssg/icss_iep.h"
+
+#define OCMC_RAM_SIZE (SZ_64K)
+
+#define TX_START_DELAY 0x40
+#define TX_CLK_DELAY_100M 0x6
+#define HR_TIMER_TX_DELAY_US 100
+
+static void icssm_prueth_write_reg(struct prueth *prueth,
+ enum prueth_mem region,
+ unsigned int reg, u32 val)
+{
+ writel_relaxed(val, prueth->mem[region].va + reg);
+}
+
+/* Below macro is for 1528 Byte Frame support, to Allow even with
+ * Redundancy tag
+ */
+#define PRUSS_MII_RT_RX_FRMS_MAX_SUPPORT_EMAC (VLAN_ETH_FRAME_LEN + \
+ ETH_FCS_LEN + \
+ ICSSM_LRE_TAG_SIZE)
+
+/* ensure that order of PRUSS mem regions is same as enum prueth_mem */
+static enum pruss_mem pruss_mem_ids[] = { PRUSS_MEM_DRAM0, PRUSS_MEM_DRAM1,
+ PRUSS_MEM_SHRD_RAM2 };
+
+static const struct prueth_queue_info queue_infos[][NUM_QUEUES] = {
+ [PRUETH_PORT_QUEUE_HOST] = {
+ [PRUETH_QUEUE1] = {
+ P0_Q1_BUFFER_OFFSET,
+ HOST_QUEUE_DESC_OFFSET,
+ P0_Q1_BD_OFFSET,
+ P0_Q1_BD_OFFSET + ((HOST_QUEUE_1_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE2] = {
+ P0_Q2_BUFFER_OFFSET,
+ HOST_QUEUE_DESC_OFFSET + 8,
+ P0_Q2_BD_OFFSET,
+ P0_Q2_BD_OFFSET + ((HOST_QUEUE_2_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE3] = {
+ P0_Q3_BUFFER_OFFSET,
+ HOST_QUEUE_DESC_OFFSET + 16,
+ P0_Q3_BD_OFFSET,
+ P0_Q3_BD_OFFSET + ((HOST_QUEUE_3_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE4] = {
+ P0_Q4_BUFFER_OFFSET,
+ HOST_QUEUE_DESC_OFFSET + 24,
+ P0_Q4_BD_OFFSET,
+ P0_Q4_BD_OFFSET + ((HOST_QUEUE_4_SIZE - 1) * BD_SIZE),
+ },
+ },
+ [PRUETH_PORT_QUEUE_MII0] = {
+ [PRUETH_QUEUE1] = {
+ P1_Q1_BUFFER_OFFSET,
+ P1_Q1_BUFFER_OFFSET + ((QUEUE_1_SIZE - 1) *
+ ICSS_BLOCK_SIZE),
+ P1_Q1_BD_OFFSET,
+ P1_Q1_BD_OFFSET + ((QUEUE_1_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE2] = {
+ P1_Q2_BUFFER_OFFSET,
+ P1_Q2_BUFFER_OFFSET + ((QUEUE_2_SIZE - 1) *
+ ICSS_BLOCK_SIZE),
+ P1_Q2_BD_OFFSET,
+ P1_Q2_BD_OFFSET + ((QUEUE_2_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE3] = {
+ P1_Q3_BUFFER_OFFSET,
+ P1_Q3_BUFFER_OFFSET + ((QUEUE_3_SIZE - 1) *
+ ICSS_BLOCK_SIZE),
+ P1_Q3_BD_OFFSET,
+ P1_Q3_BD_OFFSET + ((QUEUE_3_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE4] = {
+ P1_Q4_BUFFER_OFFSET,
+ P1_Q4_BUFFER_OFFSET + ((QUEUE_4_SIZE - 1) *
+ ICSS_BLOCK_SIZE),
+ P1_Q4_BD_OFFSET,
+ P1_Q4_BD_OFFSET + ((QUEUE_4_SIZE - 1) * BD_SIZE),
+ },
+ },
+ [PRUETH_PORT_QUEUE_MII1] = {
+ [PRUETH_QUEUE1] = {
+ P2_Q1_BUFFER_OFFSET,
+ P2_Q1_BUFFER_OFFSET + ((QUEUE_1_SIZE - 1) *
+ ICSS_BLOCK_SIZE),
+ P2_Q1_BD_OFFSET,
+ P2_Q1_BD_OFFSET + ((QUEUE_1_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE2] = {
+ P2_Q2_BUFFER_OFFSET,
+ P2_Q2_BUFFER_OFFSET + ((QUEUE_2_SIZE - 1) *
+ ICSS_BLOCK_SIZE),
+ P2_Q2_BD_OFFSET,
+ P2_Q2_BD_OFFSET + ((QUEUE_2_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE3] = {
+ P2_Q3_BUFFER_OFFSET,
+ P2_Q3_BUFFER_OFFSET + ((QUEUE_3_SIZE - 1) *
+ ICSS_BLOCK_SIZE),
+ P2_Q3_BD_OFFSET,
+ P2_Q3_BD_OFFSET + ((QUEUE_3_SIZE - 1) * BD_SIZE),
+ },
+ [PRUETH_QUEUE4] = {
+ P2_Q4_BUFFER_OFFSET,
+ P2_Q4_BUFFER_OFFSET + ((QUEUE_4_SIZE - 1) *
+ ICSS_BLOCK_SIZE),
+ P2_Q4_BD_OFFSET,
+ P2_Q4_BD_OFFSET + ((QUEUE_4_SIZE - 1) * BD_SIZE),
+ },
+ },
+};
+
+static const struct prueth_queue_desc queue_descs[][NUM_QUEUES] = {
+ [PRUETH_PORT_QUEUE_HOST] = {
+ { .rd_ptr = P0_Q1_BD_OFFSET, .wr_ptr = P0_Q1_BD_OFFSET, },
+ { .rd_ptr = P0_Q2_BD_OFFSET, .wr_ptr = P0_Q2_BD_OFFSET, },
+ { .rd_ptr = P0_Q3_BD_OFFSET, .wr_ptr = P0_Q3_BD_OFFSET, },
+ { .rd_ptr = P0_Q4_BD_OFFSET, .wr_ptr = P0_Q4_BD_OFFSET, },
+ },
+ [PRUETH_PORT_QUEUE_MII0] = {
+ { .rd_ptr = P1_Q1_BD_OFFSET, .wr_ptr = P1_Q1_BD_OFFSET, },
+ { .rd_ptr = P1_Q2_BD_OFFSET, .wr_ptr = P1_Q2_BD_OFFSET, },
+ { .rd_ptr = P1_Q3_BD_OFFSET, .wr_ptr = P1_Q3_BD_OFFSET, },
+ { .rd_ptr = P1_Q4_BD_OFFSET, .wr_ptr = P1_Q4_BD_OFFSET, },
+ },
+ [PRUETH_PORT_QUEUE_MII1] = {
+ { .rd_ptr = P2_Q1_BD_OFFSET, .wr_ptr = P2_Q1_BD_OFFSET, },
+ { .rd_ptr = P2_Q2_BD_OFFSET, .wr_ptr = P2_Q2_BD_OFFSET, },
+ { .rd_ptr = P2_Q3_BD_OFFSET, .wr_ptr = P2_Q3_BD_OFFSET, },
+ { .rd_ptr = P2_Q4_BD_OFFSET, .wr_ptr = P2_Q4_BD_OFFSET, },
+ }
+};
+
+static void icssm_prueth_hostconfig(struct prueth *prueth)
+{
+ void __iomem *sram_base = prueth->mem[PRUETH_MEM_SHARED_RAM].va;
+ void __iomem *sram;
+
+ /* queue size lookup table */
+ sram = sram_base + HOST_QUEUE_SIZE_ADDR;
+ writew(HOST_QUEUE_1_SIZE, sram);
+ writew(HOST_QUEUE_2_SIZE, sram + 2);
+ writew(HOST_QUEUE_3_SIZE, sram + 4);
+ writew(HOST_QUEUE_4_SIZE, sram + 6);
+
+ /* queue information table */
+ sram = sram_base + HOST_Q1_RX_CONTEXT_OFFSET;
+ memcpy_toio(sram, queue_infos[PRUETH_PORT_QUEUE_HOST],
+ sizeof(queue_infos[PRUETH_PORT_QUEUE_HOST]));
+
+ /* buffer offset table */
+ sram = sram_base + HOST_QUEUE_OFFSET_ADDR;
+ writew(P0_Q1_BUFFER_OFFSET, sram);
+ writew(P0_Q2_BUFFER_OFFSET, sram + 2);
+ writew(P0_Q3_BUFFER_OFFSET, sram + 4);
+ writew(P0_Q4_BUFFER_OFFSET, sram + 6);
+
+ /* buffer descriptor offset table*/
+ sram = sram_base + HOST_QUEUE_DESCRIPTOR_OFFSET_ADDR;
+ writew(P0_Q1_BD_OFFSET, sram);
+ writew(P0_Q2_BD_OFFSET, sram + 2);
+ writew(P0_Q3_BD_OFFSET, sram + 4);
+ writew(P0_Q4_BD_OFFSET, sram + 6);
+
+ /* queue table */
+ sram = sram_base + HOST_QUEUE_DESC_OFFSET;
+ memcpy_toio(sram, queue_descs[PRUETH_PORT_QUEUE_HOST],
+ sizeof(queue_descs[PRUETH_PORT_QUEUE_HOST]));
+}
+
+static void icssm_prueth_mii_init(struct prueth *prueth)
+{
+ struct regmap *mii_rt;
+ u32 rxcfg_reg, rxcfg;
+ u32 txcfg_reg, txcfg;
+
+ mii_rt = prueth->mii_rt;
+
+ rxcfg = PRUSS_MII_RT_RXCFG_RX_ENABLE |
+ PRUSS_MII_RT_RXCFG_RX_DATA_RDY_MODE_DIS |
+ PRUSS_MII_RT_RXCFG_RX_L2_EN |
+ PRUSS_MII_RT_RXCFG_RX_CUT_PREAMBLE |
+ PRUSS_MII_RT_RXCFG_RX_L2_EOF_SCLR_DIS;
+
+ /* Configuration of Port 0 Rx */
+ rxcfg_reg = PRUSS_MII_RT_RXCFG0;
+
+ regmap_write(mii_rt, rxcfg_reg, rxcfg);
+
+ /* Configuration of Port 1 Rx */
+ rxcfg_reg = PRUSS_MII_RT_RXCFG1;
+
+ rxcfg |= PRUSS_MII_RT_RXCFG_RX_MUX_SEL;
+
+ regmap_write(mii_rt, rxcfg_reg, rxcfg);
+
+ txcfg = PRUSS_MII_RT_TXCFG_TX_ENABLE |
+ PRUSS_MII_RT_TXCFG_TX_AUTO_PREAMBLE |
+ PRUSS_MII_RT_TXCFG_TX_32_MODE_EN |
+ (TX_START_DELAY << PRUSS_MII_RT_TXCFG_TX_START_DELAY_SHIFT) |
+ (TX_CLK_DELAY_100M << PRUSS_MII_RT_TXCFG_TX_CLK_DELAY_SHIFT);
+
+ /* Configuration of Port 0 Tx */
+ txcfg_reg = PRUSS_MII_RT_TXCFG0;
+
+ regmap_write(mii_rt, txcfg_reg, txcfg);
+
+ txcfg |= PRUSS_MII_RT_TXCFG_TX_MUX_SEL;
+
+ /* Configuration of Port 1 Tx */
+ txcfg_reg = PRUSS_MII_RT_TXCFG1;
+
+ regmap_write(mii_rt, txcfg_reg, txcfg);
+
+ txcfg_reg = PRUSS_MII_RT_RX_FRMS0;
+
+ /* Min frame length should be set to 64 to allow receive of standard
+ * Ethernet frames such as PTP, LLDP that will not have the tag/rct.
+ * Actual size written to register is size - 1 per TRM. This also
+ * includes CRC/FCS.
+ */
+ txcfg = FIELD_PREP(PRUSS_MII_RT_RX_FRMS_MIN_FRM_MASK,
+ (PRUSS_MII_RT_RX_FRMS_MIN_FRM - 1));
+
+ /* For EMAC, set Max frame size to 1528 i.e size with VLAN.
+ * Actual size written to register is size - 1 as per TRM.
+ * Since driver support run time change of protocol, driver
+ * must overwrite the values based on Ethernet type.
+ */
+ txcfg |= FIELD_PREP(PRUSS_MII_RT_RX_FRMS_MAX_FRM_MASK,
+ (PRUSS_MII_RT_RX_FRMS_MAX_SUPPORT_EMAC - 1));
+
+ regmap_write(mii_rt, txcfg_reg, txcfg);
+
+ txcfg_reg = PRUSS_MII_RT_RX_FRMS1;
+
+ regmap_write(mii_rt, txcfg_reg, txcfg);
+}
+
+static void icssm_prueth_clearmem(struct prueth *prueth, enum prueth_mem region)
+{
+ memset_io(prueth->mem[region].va, 0, prueth->mem[region].size);
+}
+
+static void icssm_prueth_hostinit(struct prueth *prueth)
+{
+ /* Clear shared RAM */
+ icssm_prueth_clearmem(prueth, PRUETH_MEM_SHARED_RAM);
+
+ /* Clear OCMC RAM */
+ icssm_prueth_clearmem(prueth, PRUETH_MEM_OCMC);
+
+ /* Clear data RAMs */
+ if (prueth->eth_node[PRUETH_MAC0])
+ icssm_prueth_clearmem(prueth, PRUETH_MEM_DRAM0);
+ if (prueth->eth_node[PRUETH_MAC1])
+ icssm_prueth_clearmem(prueth, PRUETH_MEM_DRAM1);
+
+ /* Initialize host queues in shared RAM */
+ icssm_prueth_hostconfig(prueth);
+
+ /* Configure MII_RT */
+ icssm_prueth_mii_init(prueth);
+}
+
+/* This function initialize the driver in EMAC mode
+ * based on eth_type
+ */
+static void icssm_prueth_init_ethernet_mode(struct prueth *prueth)
+{
+ icssm_prueth_hostinit(prueth);
+}
+
+static void icssm_prueth_port_enable(struct prueth_emac *emac, bool enable)
+{
+ struct prueth *prueth = emac->prueth;
+ void __iomem *port_ctrl;
+ void __iomem *ram;
+
+ ram = prueth->mem[emac->dram].va;
+ port_ctrl = ram + PORT_CONTROL_ADDR;
+ writeb(!!enable, port_ctrl);
+}
+
+static int icssm_prueth_emac_config(struct prueth_emac *emac)
+{
+ struct prueth *prueth = emac->prueth;
+ u32 sharedramaddr, ocmcaddr;
+ void __iomem *dram_base;
+ void __iomem *mac_addr;
+ void __iomem *dram;
+ void __iomem *sram;
+
+ /* PRU needs local shared RAM address for C28 */
+ sharedramaddr = ICSS_LOCAL_SHARED_RAM;
+ /* PRU needs real global OCMC address for C30*/
+ ocmcaddr = (u32)prueth->mem[PRUETH_MEM_OCMC].pa;
+ sram = prueth->mem[PRUETH_MEM_SHARED_RAM].va;
+
+ /* Clear data RAM */
+ icssm_prueth_clearmem(prueth, emac->dram);
+
+ dram_base = prueth->mem[emac->dram].va;
+
+ /* setup mac address */
+ mac_addr = dram_base + PORT_MAC_ADDR;
+ memcpy_toio(mac_addr, emac->mac_addr, 6);
+
+ /* queue information table */
+ dram = dram_base + TX_CONTEXT_Q1_OFFSET_ADDR;
+ memcpy_toio(dram, queue_infos[emac->port_id],
+ sizeof(queue_infos[emac->port_id]));
+
+ /* queue table */
+ dram = dram_base + PORT_QUEUE_DESC_OFFSET;
+ memcpy_toio(dram, queue_descs[emac->port_id],
+ sizeof(queue_descs[emac->port_id]));
+
+ emac->rx_queue_descs = sram + HOST_QUEUE_DESC_OFFSET;
+ emac->tx_queue_descs = dram;
+
+ /* Set in constant table C28 of PRU0 to ICSS Shared memory */
+ pru_rproc_set_ctable(emac->pru, PRU_C28, sharedramaddr);
+
+ /* Set in constant table C30 of PRU0 to OCMC memory */
+ pru_rproc_set_ctable(emac->pru, PRU_C30, ocmcaddr);
+
+ return 0;
+}
+
+/* called back by PHY layer if there is change in link state of hw port*/
+static void icssm_emac_adjust_link(struct net_device *ndev)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct phy_device *phydev = emac->phydev;
+ struct prueth *prueth = emac->prueth;
+ bool new_state = false;
+ enum prueth_mem region;
+ unsigned long flags;
+ u32 port_status = 0;
+ u32 txcfg, mask;
+ u32 delay;
+
+ spin_lock_irqsave(&emac->lock, flags);
+
+ if (phydev->link) {
+ /* check the mode of operation */
+ if (phydev->duplex != emac->duplex) {
+ new_state = true;
+ emac->duplex = phydev->duplex;
+ }
+ if (phydev->speed != emac->speed) {
+ new_state = true;
+ emac->speed = phydev->speed;
+ }
+ if (!emac->link) {
+ new_state = true;
+ emac->link = 1;
+ }
+ } else if (emac->link) {
+ new_state = true;
+ emac->link = 0;
+ }
+
+ if (new_state) {
+ phy_print_status(phydev);
+ region = emac->dram;
+
+ /* update phy/port status information based on PHY values*/
+ if (emac->link) {
+ port_status |= PORT_LINK_MASK;
+
+ icssm_prueth_write_reg(prueth, region, PHY_SPEED_OFFSET,
+ emac->speed);
+
+ delay = TX_CLK_DELAY_100M;
+ delay = delay << PRUSS_MII_RT_TXCFG_TX_CLK_DELAY_SHIFT;
+ mask = PRUSS_MII_RT_TXCFG_TX_CLK_DELAY_MASK;
+
+ if (emac->port_id)
+ txcfg = PRUSS_MII_RT_TXCFG1;
+ else
+ txcfg = PRUSS_MII_RT_TXCFG0;
+
+ regmap_update_bits(prueth->mii_rt, txcfg, mask, delay);
+ }
+
+ writeb(port_status, prueth->mem[region].va +
+ PORT_STATUS_OFFSET);
+ }
+
+ if (emac->link) {
+ /* reactivate the transmit queue if it is stopped */
+ if (netif_running(ndev) && netif_queue_stopped(ndev))
+ netif_wake_queue(ndev);
+ } else {
+ if (!netif_queue_stopped(ndev))
+ netif_stop_queue(ndev);
+ }
+
+ spin_unlock_irqrestore(&emac->lock, flags);
+}
+
+static unsigned int
+icssm_get_buff_desc_count(const struct prueth_queue_info *queue)
+{
+ unsigned int buffer_desc_count;
+
+ buffer_desc_count = queue->buffer_desc_end -
+ queue->buffer_desc_offset;
+ buffer_desc_count /= BD_SIZE;
+ buffer_desc_count++;
+
+ return buffer_desc_count;
+}
+
+static void icssm_get_block(struct prueth_queue_desc __iomem *queue_desc,
+ const struct prueth_queue_info *queue,
+ int *write_block, int *read_block)
+{
+ *write_block = (readw(&queue_desc->wr_ptr) -
+ queue->buffer_desc_offset) / BD_SIZE;
+ *read_block = (readw(&queue_desc->rd_ptr) -
+ queue->buffer_desc_offset) / BD_SIZE;
+}
+
+/**
+ * icssm_emac_rx_irq - EMAC Rx interrupt handler
+ * @irq: interrupt number
+ * @dev_id: pointer to net_device
+ *
+ * EMAC Interrupt handler - we only schedule NAPI and not process any packets
+ * here.
+ *
+ * Return: IRQ_HANDLED if the interrupt handled
+ */
+static irqreturn_t icssm_emac_rx_irq(int irq, void *dev_id)
+{
+ struct net_device *ndev = (struct net_device *)dev_id;
+ struct prueth_emac *emac = netdev_priv(ndev);
+
+ if (likely(netif_running(ndev))) {
+ /* disable Rx system event */
+ disable_irq_nosync(emac->rx_irq);
+ napi_schedule(&emac->napi);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * icssm_prueth_tx_enqueue - queue a packet to firmware for transmission
+ *
+ * @emac: EMAC data structure
+ * @skb: packet data buffer
+ * @queue_id: priority queue id
+ *
+ * Return: 0 (Success)
+ */
+static int icssm_prueth_tx_enqueue(struct prueth_emac *emac,
+ struct sk_buff *skb,
+ enum prueth_queue_id queue_id)
+{
+ struct prueth_queue_desc __iomem *queue_desc;
+ const struct prueth_queue_info *txqueue;
+ struct net_device *ndev = emac->ndev;
+ unsigned int buffer_desc_count;
+ int free_blocks, update_block;
+ bool buffer_wrapped = false;
+ int write_block, read_block;
+ void *src_addr, *dst_addr;
+ int pkt_block_size;
+ void __iomem *dram;
+ int txport, pktlen;
+ u16 update_wr_ptr;
+ u32 wr_buf_desc;
+ void *ocmc_ram;
+
+ dram = emac->prueth->mem[emac->dram].va;
+ if (eth_skb_pad(skb)) {
+ if (netif_msg_tx_err(emac) && net_ratelimit())
+ netdev_err(ndev, "packet pad failed\n");
+ return -ENOMEM;
+ }
+
+ /* which port to tx: MII0 or MII1 */
+ txport = emac->tx_port_queue;
+ src_addr = skb->data;
+ pktlen = skb->len;
+ /* Get the tx queue */
+ queue_desc = emac->tx_queue_descs + queue_id;
+ txqueue = &queue_infos[txport][queue_id];
+
+ buffer_desc_count = icssm_get_buff_desc_count(txqueue);
+
+ /* the PRU firmware deals mostly in pointers already
+ * offset into ram, we would like to deal in indexes
+ * within the queue we are working with for code
+ * simplicity, calculate this here
+ */
+ icssm_get_block(queue_desc, txqueue, &write_block, &read_block);
+
+ if (write_block > read_block) {
+ free_blocks = buffer_desc_count - write_block;
+ free_blocks += read_block;
+ } else if (write_block < read_block) {
+ free_blocks = read_block - write_block;
+ } else { /* they are all free */
+ free_blocks = buffer_desc_count;
+ }
+
+ pkt_block_size = DIV_ROUND_UP(pktlen, ICSS_BLOCK_SIZE);
+ if (pkt_block_size > free_blocks) /* out of queue space */
+ return -ENOBUFS;
+
+ /* calculate end BD address post write */
+ update_block = write_block + pkt_block_size;
+
+ /* Check for wrap around */
+ if (update_block >= buffer_desc_count) {
+ update_block %= buffer_desc_count;
+ buffer_wrapped = true;
+ }
+
+ /* OCMC RAM is not cached and write order is not important */
+ ocmc_ram = (__force void *)emac->prueth->mem[PRUETH_MEM_OCMC].va;
+ dst_addr = ocmc_ram + txqueue->buffer_offset +
+ (write_block * ICSS_BLOCK_SIZE);
+
+ /* Copy the data from socket buffer(DRAM) to PRU buffers(OCMC) */
+ if (buffer_wrapped) { /* wrapped around buffer */
+ int bytes = (buffer_desc_count - write_block) * ICSS_BLOCK_SIZE;
+ int remaining;
+
+ /* bytes is integral multiple of ICSS_BLOCK_SIZE but
+ * entire packet may have fit within the last BD
+ * if pkt_info.length is not integral multiple of
+ * ICSS_BLOCK_SIZE
+ */
+ if (pktlen < bytes)
+ bytes = pktlen;
+
+ /* copy non-wrapped part */
+ memcpy(dst_addr, src_addr, bytes);
+
+ /* copy wrapped part */
+ src_addr += bytes;
+ remaining = pktlen - bytes;
+ dst_addr = ocmc_ram + txqueue->buffer_offset;
+ memcpy(dst_addr, src_addr, remaining);
+ } else {
+ memcpy(dst_addr, src_addr, pktlen);
+ }
+
+ /* update first buffer descriptor */
+ wr_buf_desc = (pktlen << PRUETH_BD_LENGTH_SHIFT) &
+ PRUETH_BD_LENGTH_MASK;
+ writel(wr_buf_desc, dram + readw(&queue_desc->wr_ptr));
+
+ /* update the write pointer in this queue descriptor, the firmware
+ * polls for this change so this will signal the start of transmission
+ */
+ update_wr_ptr = txqueue->buffer_desc_offset + (update_block * BD_SIZE);
+ writew(update_wr_ptr, &queue_desc->wr_ptr);
+
+ return 0;
+}
+
+void icssm_parse_packet_info(struct prueth *prueth, u32 buffer_descriptor,
+ struct prueth_packet_info *pkt_info)
+{
+ pkt_info->shadow = !!(buffer_descriptor & PRUETH_BD_SHADOW_MASK);
+ pkt_info->port = (buffer_descriptor & PRUETH_BD_PORT_MASK) >>
+ PRUETH_BD_PORT_SHIFT;
+ pkt_info->length = (buffer_descriptor & PRUETH_BD_LENGTH_MASK) >>
+ PRUETH_BD_LENGTH_SHIFT;
+ pkt_info->broadcast = !!(buffer_descriptor & PRUETH_BD_BROADCAST_MASK);
+ pkt_info->error = !!(buffer_descriptor & PRUETH_BD_ERROR_MASK);
+ pkt_info->lookup_success = !!(buffer_descriptor &
+ PRUETH_BD_LOOKUP_SUCCESS_MASK);
+ pkt_info->flood = !!(buffer_descriptor & PRUETH_BD_SW_FLOOD_MASK);
+ pkt_info->timestamp = !!(buffer_descriptor & PRUETH_BD_TIMESTAMP_MASK);
+}
+
+/**
+ * icssm_emac_rx_packet - EMAC Receive function
+ *
+ * @emac: EMAC data structure
+ * @bd_rd_ptr: Buffer descriptor read pointer
+ * @pkt_info: packet information structure
+ * @rxqueue: Receive queue information structure
+ *
+ * Get a packet from receive queue
+ *
+ * Return: 0 (Success)
+ */
+int icssm_emac_rx_packet(struct prueth_emac *emac, u16 *bd_rd_ptr,
+ struct prueth_packet_info *pkt_info,
+ const struct prueth_queue_info *rxqueue)
+{
+ struct net_device *ndev = emac->ndev;
+ unsigned int buffer_desc_count;
+ int read_block, update_block;
+ unsigned int actual_pkt_len;
+ bool buffer_wrapped = false;
+ void *src_addr, *dst_addr;
+ struct sk_buff *skb;
+ int pkt_block_size;
+ void *ocmc_ram;
+
+ /* the PRU firmware deals mostly in pointers already
+ * offset into ram, we would like to deal in indexes
+ * within the queue we are working with for code
+ * simplicity, calculate this here
+ */
+ buffer_desc_count = icssm_get_buff_desc_count(rxqueue);
+ read_block = (*bd_rd_ptr - rxqueue->buffer_desc_offset) / BD_SIZE;
+ pkt_block_size = DIV_ROUND_UP(pkt_info->length, ICSS_BLOCK_SIZE);
+
+ /* calculate end BD address post read */
+ update_block = read_block + pkt_block_size;
+
+ /* Check for wrap around */
+ if (update_block >= buffer_desc_count) {
+ update_block %= buffer_desc_count;
+ if (update_block)
+ buffer_wrapped = true;
+ }
+
+ /* calculate new pointer in ram */
+ *bd_rd_ptr = rxqueue->buffer_desc_offset + (update_block * BD_SIZE);
+
+ actual_pkt_len = pkt_info->length;
+
+ /* Allocate a socket buffer for this packet */
+ skb = netdev_alloc_skb_ip_align(ndev, actual_pkt_len);
+ if (!skb) {
+ if (netif_msg_rx_err(emac) && net_ratelimit())
+ netdev_err(ndev, "failed rx buffer alloc\n");
+ return -ENOMEM;
+ }
+
+ dst_addr = skb->data;
+
+ /* OCMC RAM is not cached and read order is not important */
+ ocmc_ram = (__force void *)emac->prueth->mem[PRUETH_MEM_OCMC].va;
+
+ /* Get the start address of the first buffer from
+ * the read buffer description
+ */
+ src_addr = ocmc_ram + rxqueue->buffer_offset +
+ (read_block * ICSS_BLOCK_SIZE);
+
+ /* Copy the data from PRU buffers(OCMC) to socket buffer(DRAM) */
+ if (buffer_wrapped) { /* wrapped around buffer */
+ int bytes = (buffer_desc_count - read_block) * ICSS_BLOCK_SIZE;
+ int remaining;
+ /* bytes is integral multiple of ICSS_BLOCK_SIZE but
+ * entire packet may have fit within the last BD
+ * if pkt_info.length is not integral multiple of
+ * ICSS_BLOCK_SIZE
+ */
+ if (pkt_info->length < bytes)
+ bytes = pkt_info->length;
+
+ /* copy non-wrapped part */
+ memcpy(dst_addr, src_addr, bytes);
+
+ /* copy wrapped part */
+ dst_addr += bytes;
+ remaining = actual_pkt_len - bytes;
+
+ src_addr = ocmc_ram + rxqueue->buffer_offset;
+ memcpy(dst_addr, src_addr, remaining);
+ src_addr += remaining;
+ } else {
+ memcpy(dst_addr, src_addr, actual_pkt_len);
+ src_addr += actual_pkt_len;
+ }
+
+ skb_put(skb, actual_pkt_len);
+
+ /* send packet up the stack */
+ skb->protocol = eth_type_trans(skb, ndev);
+ netif_receive_skb(skb);
+
+ /* update stats */
+ emac->stats.rx_bytes += actual_pkt_len;
+ emac->stats.rx_packets++;
+
+ return 0;
+}
+
+static int icssm_emac_rx_packets(struct prueth_emac *emac, int budget)
+{
+ struct prueth_queue_desc __iomem *queue_desc;
+ const struct prueth_queue_info *rxqueue;
+ struct prueth *prueth = emac->prueth;
+ struct prueth_packet_info pkt_info;
+ int start_queue, end_queue;
+ void __iomem *shared_ram;
+ u16 bd_rd_ptr, bd_wr_ptr;
+ u16 update_rd_ptr;
+ u8 overflow_cnt;
+ u32 rd_buf_desc;
+ int used = 0;
+ int i, ret;
+
+ shared_ram = emac->prueth->mem[PRUETH_MEM_SHARED_RAM].va;
+
+ start_queue = emac->rx_queue_start;
+ end_queue = emac->rx_queue_end;
+
+ /* skip Rx if budget is 0 */
+ if (!budget)
+ return 0;
+
+ /* search host queues for packets */
+ for (i = start_queue; i <= end_queue; i++) {
+ queue_desc = emac->rx_queue_descs + i;
+ rxqueue = &queue_infos[PRUETH_PORT_HOST][i];
+
+ overflow_cnt = readb(&queue_desc->overflow_cnt);
+ if (overflow_cnt > 0) {
+ emac->stats.rx_over_errors += overflow_cnt;
+ /* reset to zero */
+ writeb(0, &queue_desc->overflow_cnt);
+ }
+
+ bd_rd_ptr = readw(&queue_desc->rd_ptr);
+ bd_wr_ptr = readw(&queue_desc->wr_ptr);
+
+ /* while packets are available in this queue */
+ while (bd_rd_ptr != bd_wr_ptr) {
+ /* get packet info from the read buffer descriptor */
+ rd_buf_desc = readl(shared_ram + bd_rd_ptr);
+ icssm_parse_packet_info(prueth, rd_buf_desc, &pkt_info);
+
+ if (pkt_info.length <= 0) {
+ /* a packet length of zero will cause us to
+ * never move the read pointer ahead, locking
+ * the driver, so we manually have to move it
+ * to the write pointer, discarding all
+ * remaining packets in this queue. This should
+ * never happen.
+ */
+ update_rd_ptr = bd_wr_ptr;
+ emac->stats.rx_length_errors++;
+ } else if (pkt_info.length > EMAC_MAX_FRM_SUPPORT) {
+ /* if the packet is too large we skip it but we
+ * still need to move the read pointer ahead
+ * and assume something is wrong with the read
+ * pointer as the firmware should be filtering
+ * these packets
+ */
+ update_rd_ptr = bd_wr_ptr;
+ emac->stats.rx_length_errors++;
+ } else {
+ update_rd_ptr = bd_rd_ptr;
+ ret = icssm_emac_rx_packet(emac, &update_rd_ptr,
+ &pkt_info, rxqueue);
+ if (ret)
+ return used;
+ used++;
+ }
+
+ /* after reading the buffer descriptor we clear it
+ * to prevent improperly moved read pointer errors
+ * from simply looking like old packets.
+ */
+ writel(0, shared_ram + bd_rd_ptr);
+
+ /* update read pointer in queue descriptor */
+ writew(update_rd_ptr, &queue_desc->rd_ptr);
+ bd_rd_ptr = update_rd_ptr;
+
+ /* all we have room for? */
+ if (used >= budget)
+ return used;
+ }
+ }
+
+ return used;
+}
+
+static int icssm_emac_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct prueth_emac *emac = container_of(napi, struct prueth_emac, napi);
+ int num_rx;
+
+ num_rx = icssm_emac_rx_packets(emac, budget);
+
+ if (num_rx < budget && napi_complete_done(napi, num_rx))
+ enable_irq(emac->rx_irq);
+
+ return num_rx;
+}
+
+static int icssm_emac_set_boot_pru(struct prueth_emac *emac,
+ struct net_device *ndev)
+{
+ const struct prueth_firmware *pru_firmwares;
+ struct prueth *prueth = emac->prueth;
+ const char *fw_name;
+ int ret;
+
+ pru_firmwares = &prueth->fw_data->fw_pru[emac->port_id - 1];
+ fw_name = pru_firmwares->fw_name[prueth->eth_type];
+ if (!fw_name) {
+ netdev_err(ndev, "eth_type %d not supported\n",
+ prueth->eth_type);
+ return -ENODEV;
+ }
+
+ ret = rproc_set_firmware(emac->pru, fw_name);
+ if (ret) {
+ netdev_err(ndev, "failed to set %s firmware: %d\n",
+ fw_name, ret);
+ return ret;
+ }
+
+ ret = rproc_boot(emac->pru);
+ if (ret) {
+ netdev_err(ndev, "failed to boot %s firmware: %d\n",
+ fw_name, ret);
+ return ret;
+ }
+ return ret;
+}
+
+static int icssm_emac_request_irqs(struct prueth_emac *emac)
+{
+ struct net_device *ndev = emac->ndev;
+ int ret;
+
+ ret = request_irq(emac->rx_irq, icssm_emac_rx_irq,
+ IRQF_TRIGGER_HIGH,
+ ndev->name, ndev);
+ if (ret) {
+ netdev_err(ndev, "unable to request RX IRQ\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static void icssm_ptp_dram_init(struct prueth_emac *emac)
+{
+ void __iomem *sram = emac->prueth->mem[PRUETH_MEM_SHARED_RAM].va;
+ u64 temp64;
+
+ writew(0, sram + MII_RX_CORRECTION_OFFSET);
+ writew(0, sram + MII_TX_CORRECTION_OFFSET);
+
+ /* Initialize RCF to 1 (Linux N/A) */
+ writel(1 * 1024, sram + TIMESYNC_TC_RCF_OFFSET);
+
+ /* This flag will be set and cleared by firmware */
+ /* Write Sync0 period for sync signal generation in PTP
+ * memory in shared RAM
+ */
+ writel(200000000 / 50, sram + TIMESYNC_SYNC0_WIDTH_OFFSET);
+
+ /* Write CMP1 period for sync signal generation in PTP
+ * memory in shared RAM
+ */
+ temp64 = 1000000;
+ memcpy_toio(sram + TIMESYNC_CMP1_CMP_OFFSET, &temp64, sizeof(temp64));
+
+ /* Write Sync0 period for sync signal generation in PTP
+ * memory in shared RAM
+ */
+ writel(1000000, sram + TIMESYNC_CMP1_PERIOD_OFFSET);
+
+ /* Configures domainNumber list. Firmware supports 2 domains */
+ writeb(0, sram + TIMESYNC_DOMAIN_NUMBER_LIST);
+ writeb(0, sram + TIMESYNC_DOMAIN_NUMBER_LIST + 1);
+
+ /* Configure 1-step/2-step */
+ writeb(1, sram + DISABLE_SWITCH_SYNC_RELAY_OFFSET);
+
+ /* Configures the setting to Link local frame without HSR tag */
+ writeb(0, sram + LINK_LOCAL_FRAME_HAS_HSR_TAG);
+
+ /* Enable E2E/UDP PTP message timestamping */
+ writeb(1, sram + PTP_IPV4_UDP_E2E_ENABLE);
+}
+
+/**
+ * icssm_emac_ndo_open - EMAC device open
+ * @ndev: network adapter device
+ *
+ * Called when system wants to start the interface.
+ *
+ * Return: 0 for a successful open, or appropriate error code
+ */
+static int icssm_emac_ndo_open(struct net_device *ndev)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+ int ret;
+
+ /* set h/w MAC as user might have re-configured */
+ ether_addr_copy(emac->mac_addr, ndev->dev_addr);
+
+ if (!prueth->emac_configured)
+ icssm_prueth_init_ethernet_mode(prueth);
+
+ icssm_prueth_emac_config(emac);
+
+ if (!prueth->emac_configured) {
+ icssm_ptp_dram_init(emac);
+ ret = icss_iep_init(prueth->iep, NULL, NULL, 0);
+ if (ret) {
+ netdev_err(ndev, "Failed to initialize iep: %d\n", ret);
+ goto iep_exit;
+ }
+ }
+
+ ret = icssm_emac_set_boot_pru(emac, ndev);
+ if (ret)
+ goto iep_exit;
+
+ ret = icssm_emac_request_irqs(emac);
+ if (ret)
+ goto rproc_shutdown;
+
+ napi_enable(&emac->napi);
+
+ /* start PHY */
+ phy_start(emac->phydev);
+
+ /* enable the port and vlan */
+ icssm_prueth_port_enable(emac, true);
+
+ prueth->emac_configured |= BIT(emac->port_id);
+
+ if (netif_msg_drv(emac))
+ dev_notice(&ndev->dev, "started\n");
+
+ return 0;
+
+rproc_shutdown:
+ rproc_shutdown(emac->pru);
+
+iep_exit:
+ if (!prueth->emac_configured)
+ icss_iep_exit(prueth->iep);
+
+ return ret;
+}
+
+/**
+ * icssm_emac_ndo_stop - EMAC device stop
+ * @ndev: network adapter device
+ *
+ * Called when system wants to stop or down the interface.
+ *
+ * Return: Always 0 (Success)
+ */
+static int icssm_emac_ndo_stop(struct net_device *ndev)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+
+ prueth->emac_configured &= ~BIT(emac->port_id);
+
+ /* disable the mac port */
+ icssm_prueth_port_enable(emac, false);
+
+ /* stop PHY */
+ phy_stop(emac->phydev);
+
+ napi_disable(&emac->napi);
+ hrtimer_cancel(&emac->tx_hrtimer);
+
+ /* stop the PRU */
+ rproc_shutdown(emac->pru);
+
+ /* free rx interrupts */
+ free_irq(emac->rx_irq, ndev);
+
+ if (netif_msg_drv(emac))
+ dev_notice(&ndev->dev, "stopped\n");
+
+ return 0;
+}
+
+/* VLAN-tag PCP to priority queue map for EMAC/Switch/HSR/PRP used by driver
+ * Index is PCP val / 2.
+ * low - pcp 0..3 maps to Q4 for Host
+ * high - pcp 4..7 maps to Q3 for Host
+ * low - pcp 0..3 maps to Q2 (FWD Queue) for PRU-x
+ * where x = 1 for PRUETH_PORT_MII0
+ * 0 for PRUETH_PORT_MII1
+ * high - pcp 4..7 maps to Q1 (FWD Queue) for PRU-x
+ */
+static const unsigned short emac_pcp_tx_priority_queue_map[] = {
+ PRUETH_QUEUE4, PRUETH_QUEUE4,
+ PRUETH_QUEUE3, PRUETH_QUEUE3,
+ PRUETH_QUEUE2, PRUETH_QUEUE2,
+ PRUETH_QUEUE1, PRUETH_QUEUE1,
+};
+
+static u16 icssm_prueth_get_tx_queue_id(struct prueth *prueth,
+ struct sk_buff *skb)
+{
+ u16 vlan_tci, pcp;
+ int err;
+
+ err = vlan_get_tag(skb, &vlan_tci);
+ if (likely(err))
+ pcp = 0;
+ else
+ pcp = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+
+ /* Below code (pcp >>= 1) is made common for all
+ * protocols (i.e., EMAC, RSTP, HSR and PRP)*
+ * pcp value 0,1 will be updated to 0 mapped to QUEUE4
+ * pcp value 2,3 will be updated to 1 mapped to QUEUE4
+ * pcp value 4,5 will be updated to 2 mapped to QUEUE3
+ * pcp value 6,7 will be updated to 3 mapped to QUEUE3
+ */
+ pcp >>= 1;
+
+ return emac_pcp_tx_priority_queue_map[pcp];
+}
+
+/**
+ * icssm_emac_ndo_start_xmit - EMAC Transmit function
+ * @skb: SKB pointer
+ * @ndev: EMAC network adapter
+ *
+ * Called by the system to transmit a packet - we queue the packet in
+ * EMAC hardware transmit queue
+ *
+ * Return: enum netdev_tx
+ */
+static enum netdev_tx icssm_emac_ndo_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ int ret;
+ u16 qid;
+
+ qid = icssm_prueth_get_tx_queue_id(emac->prueth, skb);
+ ret = icssm_prueth_tx_enqueue(emac, skb, qid);
+ if (ret) {
+ if (ret != -ENOBUFS && netif_msg_tx_err(emac) &&
+ net_ratelimit())
+ netdev_err(ndev, "packet queue failed: %d\n", ret);
+ goto fail_tx;
+ }
+
+ emac->stats.tx_packets++;
+ emac->stats.tx_bytes += skb->len;
+ dev_kfree_skb_any(skb);
+
+ return NETDEV_TX_OK;
+
+fail_tx:
+ if (ret == -ENOBUFS) {
+ netif_stop_queue(ndev);
+ hrtimer_start(&emac->tx_hrtimer,
+ us_to_ktime(HR_TIMER_TX_DELAY_US),
+ HRTIMER_MODE_REL_PINNED);
+ ret = NETDEV_TX_BUSY;
+ } else {
+ /* error */
+ emac->stats.tx_dropped++;
+ ret = NET_XMIT_DROP;
+ }
+
+ return ret;
+}
+
+/**
+ * icssm_emac_ndo_get_stats64 - EMAC get statistics function
+ * @ndev: The EMAC network adapter
+ * @stats: rtnl_link_stats structure
+ *
+ * Called when system wants to get statistics from the device.
+ *
+ */
+static void icssm_emac_ndo_get_stats64(struct net_device *ndev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+
+ stats->rx_packets = emac->stats.rx_packets;
+ stats->rx_bytes = emac->stats.rx_bytes;
+ stats->tx_packets = emac->stats.tx_packets;
+ stats->tx_bytes = emac->stats.tx_bytes;
+ stats->tx_dropped = emac->stats.tx_dropped;
+ stats->rx_over_errors = emac->stats.rx_over_errors;
+ stats->rx_length_errors = emac->stats.rx_length_errors;
+}
+
+static const struct net_device_ops emac_netdev_ops = {
+ .ndo_open = icssm_emac_ndo_open,
+ .ndo_stop = icssm_emac_ndo_stop,
+ .ndo_start_xmit = icssm_emac_ndo_start_xmit,
+ .ndo_get_stats64 = icssm_emac_ndo_get_stats64,
+};
+
+/* get emac_port corresponding to eth_node name */
+static int icssm_prueth_node_port(struct device_node *eth_node)
+{
+ u32 port_id;
+ int ret;
+
+ ret = of_property_read_u32(eth_node, "reg", &port_id);
+ if (ret)
+ return ret;
+
+ if (port_id == 0)
+ return PRUETH_PORT_MII0;
+ else if (port_id == 1)
+ return PRUETH_PORT_MII1;
+ else
+ return PRUETH_PORT_INVALID;
+}
+
+/* get MAC instance corresponding to eth_node name */
+static int icssm_prueth_node_mac(struct device_node *eth_node)
+{
+ u32 port_id;
+ int ret;
+
+ ret = of_property_read_u32(eth_node, "reg", &port_id);
+ if (ret)
+ return ret;
+
+ if (port_id == 0)
+ return PRUETH_MAC0;
+ else if (port_id == 1)
+ return PRUETH_MAC1;
+ else
+ return PRUETH_MAC_INVALID;
+}
+
+static enum hrtimer_restart icssm_emac_tx_timer_callback(struct hrtimer *timer)
+{
+ struct prueth_emac *emac =
+ container_of(timer, struct prueth_emac, tx_hrtimer);
+
+ if (netif_queue_stopped(emac->ndev))
+ netif_wake_queue(emac->ndev);
+
+ return HRTIMER_NORESTART;
+}
+
+static int icssm_prueth_netdev_init(struct prueth *prueth,
+ struct device_node *eth_node)
+{
+ struct prueth_emac *emac;
+ struct net_device *ndev;
+ enum prueth_port port;
+ enum prueth_mac mac;
+ int ret;
+
+ port = icssm_prueth_node_port(eth_node);
+ if (port == PRUETH_PORT_INVALID)
+ return -EINVAL;
+
+ mac = icssm_prueth_node_mac(eth_node);
+ if (mac == PRUETH_MAC_INVALID)
+ return -EINVAL;
+
+ ndev = devm_alloc_etherdev(prueth->dev, sizeof(*emac));
+ if (!ndev)
+ return -ENOMEM;
+
+ SET_NETDEV_DEV(ndev, prueth->dev);
+ emac = netdev_priv(ndev);
+ prueth->emac[mac] = emac;
+ emac->prueth = prueth;
+ emac->ndev = ndev;
+ emac->port_id = port;
+
+ /* by default eth_type is EMAC */
+ switch (port) {
+ case PRUETH_PORT_MII0:
+ emac->tx_port_queue = PRUETH_PORT_QUEUE_MII0;
+
+ /* packets from MII0 are on queues 1 through 2 */
+ emac->rx_queue_start = PRUETH_QUEUE1;
+ emac->rx_queue_end = PRUETH_QUEUE2;
+
+ emac->dram = PRUETH_MEM_DRAM0;
+ emac->pru = prueth->pru0;
+ break;
+ case PRUETH_PORT_MII1:
+ emac->tx_port_queue = PRUETH_PORT_QUEUE_MII1;
+
+ /* packets from MII1 are on queues 3 through 4 */
+ emac->rx_queue_start = PRUETH_QUEUE3;
+ emac->rx_queue_end = PRUETH_QUEUE4;
+
+ emac->dram = PRUETH_MEM_DRAM1;
+ emac->pru = prueth->pru1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ emac->rx_irq = of_irq_get_byname(eth_node, "rx");
+ if (emac->rx_irq < 0) {
+ ret = emac->rx_irq;
+ if (ret != -EPROBE_DEFER)
+ dev_err(prueth->dev, "could not get rx irq\n");
+ goto free;
+ }
+
+ /* get mac address from DT and set private and netdev addr */
+ ret = of_get_ethdev_address(eth_node, ndev);
+ if (!is_valid_ether_addr(ndev->dev_addr)) {
+ eth_hw_addr_random(ndev);
+ dev_warn(prueth->dev, "port %d: using random MAC addr: %pM\n",
+ port, ndev->dev_addr);
+ }
+ ether_addr_copy(emac->mac_addr, ndev->dev_addr);
+
+ /* connect PHY */
+ emac->phydev = of_phy_get_and_connect(ndev, eth_node,
+ icssm_emac_adjust_link);
+ if (!emac->phydev) {
+ dev_dbg(prueth->dev, "PHY connection failed\n");
+ ret = -ENODEV;
+ goto free;
+ }
+
+ /* remove unsupported modes */
+ phy_remove_link_mode(emac->phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
+
+ phy_remove_link_mode(emac->phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
+ phy_remove_link_mode(emac->phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
+
+ phy_remove_link_mode(emac->phydev, ETHTOOL_LINK_MODE_Pause_BIT);
+ phy_remove_link_mode(emac->phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT);
+
+ ndev->dev.of_node = eth_node;
+ ndev->netdev_ops = &emac_netdev_ops;
+
+ netif_napi_add(ndev, &emac->napi, icssm_emac_napi_poll);
+
+ hrtimer_setup(&emac->tx_hrtimer, &icssm_emac_tx_timer_callback,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
+
+ return 0;
+free:
+ emac->ndev = NULL;
+ prueth->emac[mac] = NULL;
+
+ return ret;
+}
+
+static void icssm_prueth_netdev_exit(struct prueth *prueth,
+ struct device_node *eth_node)
+{
+ struct prueth_emac *emac;
+ enum prueth_mac mac;
+
+ mac = icssm_prueth_node_mac(eth_node);
+ if (mac == PRUETH_MAC_INVALID)
+ return;
+
+ emac = prueth->emac[mac];
+ if (!emac)
+ return;
+
+ phy_disconnect(emac->phydev);
+
+ netif_napi_del(&emac->napi);
+ prueth->emac[mac] = NULL;
+}
+
+static int icssm_prueth_probe(struct platform_device *pdev)
+{
+ struct device_node *eth0_node = NULL, *eth1_node = NULL;
+ struct device_node *eth_node, *eth_ports_node;
+ enum pruss_pru_id pruss_id0, pruss_id1;
+ struct device *dev = &pdev->dev;
+ struct device_node *np;
+ struct prueth *prueth;
+ struct pruss *pruss;
+ int i, ret;
+
+ np = dev->of_node;
+ if (!np)
+ return -ENODEV; /* we don't support non DT */
+
+ prueth = devm_kzalloc(dev, sizeof(*prueth), GFP_KERNEL);
+ if (!prueth)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, prueth);
+ prueth->dev = dev;
+ prueth->fw_data = device_get_match_data(dev);
+
+ eth_ports_node = of_get_child_by_name(np, "ethernet-ports");
+ if (!eth_ports_node)
+ return -ENOENT;
+
+ for_each_child_of_node(eth_ports_node, eth_node) {
+ u32 reg;
+
+ if (strcmp(eth_node->name, "ethernet-port"))
+ continue;
+ ret = of_property_read_u32(eth_node, "reg", &reg);
+ if (ret < 0) {
+ dev_err(dev, "%pOF error reading port_id %d\n",
+ eth_node, ret);
+ of_node_put(eth_node);
+ return ret;
+ }
+
+ of_node_get(eth_node);
+
+ if (reg == 0 && !eth0_node) {
+ eth0_node = eth_node;
+ if (!of_device_is_available(eth0_node)) {
+ of_node_put(eth0_node);
+ eth0_node = NULL;
+ }
+ } else if (reg == 1 && !eth1_node) {
+ eth1_node = eth_node;
+ if (!of_device_is_available(eth1_node)) {
+ of_node_put(eth1_node);
+ eth1_node = NULL;
+ }
+ } else {
+ if (reg == 0 || reg == 1)
+ dev_err(dev, "duplicate port reg value: %d\n",
+ reg);
+ else
+ dev_err(dev, "invalid port reg value: %d\n",
+ reg);
+
+ of_node_put(eth_node);
+ }
+ }
+
+ of_node_put(eth_ports_node);
+
+ /* At least one node must be present and available else we fail */
+ if (!eth0_node && !eth1_node) {
+ dev_err(dev, "neither port0 nor port1 node available\n");
+ return -ENODEV;
+ }
+
+ prueth->eth_node[PRUETH_MAC0] = eth0_node;
+ prueth->eth_node[PRUETH_MAC1] = eth1_node;
+
+ prueth->mii_rt = syscon_regmap_lookup_by_phandle(np, "ti,mii-rt");
+ if (IS_ERR(prueth->mii_rt)) {
+ dev_err(dev, "couldn't get mii-rt syscon regmap\n");
+ ret = PTR_ERR(prueth->mii_rt);
+ goto put_eth;
+ }
+
+ if (eth0_node) {
+ prueth->pru0 = pru_rproc_get(np, 0, &pruss_id0);
+ if (IS_ERR(prueth->pru0)) {
+ ret = PTR_ERR(prueth->pru0);
+ dev_err_probe(dev, ret, "unable to get PRU0");
+ goto put_eth;
+ }
+ }
+
+ if (eth1_node) {
+ prueth->pru1 = pru_rproc_get(np, 1, &pruss_id1);
+ if (IS_ERR(prueth->pru1)) {
+ ret = PTR_ERR(prueth->pru1);
+ dev_err_probe(dev, ret, "unable to get PRU1");
+ goto put_pru0;
+ }
+ }
+
+ pruss = pruss_get(prueth->pru0 ? prueth->pru0 : prueth->pru1);
+ if (IS_ERR(pruss)) {
+ ret = PTR_ERR(pruss);
+ dev_err(dev, "unable to get pruss handle\n");
+ goto put_pru1;
+ }
+ prueth->pruss = pruss;
+
+ /* Configure PRUSS */
+ if (eth0_node)
+ pruss_cfg_gpimode(pruss, pruss_id0, PRUSS_GPI_MODE_MII);
+ if (eth1_node)
+ pruss_cfg_gpimode(pruss, pruss_id1, PRUSS_GPI_MODE_MII);
+ pruss_cfg_miirt_enable(pruss, true);
+ pruss_cfg_xfr_enable(pruss, PRU_TYPE_PRU, true);
+
+ /* Get PRUSS mem resources */
+ /* OCMC is system resource which we get separately */
+ for (i = 0; i < ARRAY_SIZE(pruss_mem_ids); i++) {
+ /* skip appropriate DRAM if not required */
+ if (!eth0_node && i == PRUETH_MEM_DRAM0)
+ continue;
+
+ if (!eth1_node && i == PRUETH_MEM_DRAM1)
+ continue;
+
+ ret = pruss_request_mem_region(pruss, pruss_mem_ids[i],
+ &prueth->mem[i]);
+ if (ret) {
+ dev_err(dev, "unable to get PRUSS resource %d: %d\n",
+ i, ret);
+ goto put_mem;
+ }
+ }
+
+ prueth->sram_pool = of_gen_pool_get(np, "sram", 0);
+ if (!prueth->sram_pool) {
+ dev_err(dev, "unable to get SRAM pool\n");
+ ret = -ENODEV;
+ goto put_mem;
+ }
+
+ prueth->ocmc_ram_size = OCMC_RAM_SIZE;
+ /* Decreased by 8KB to address the reserved region for AM33x */
+ if (prueth->fw_data->driver_data == PRUSS_AM33XX)
+ prueth->ocmc_ram_size = (SZ_64K - SZ_8K);
+
+ prueth->mem[PRUETH_MEM_OCMC].va =
+ (void __iomem *)gen_pool_alloc(prueth->sram_pool,
+ prueth->ocmc_ram_size);
+ if (!prueth->mem[PRUETH_MEM_OCMC].va) {
+ dev_err(dev, "unable to allocate OCMC resource\n");
+ ret = -ENOMEM;
+ goto put_mem;
+ }
+ prueth->mem[PRUETH_MEM_OCMC].pa = gen_pool_virt_to_phys
+ (prueth->sram_pool, (unsigned long)
+ prueth->mem[PRUETH_MEM_OCMC].va);
+ prueth->mem[PRUETH_MEM_OCMC].size = prueth->ocmc_ram_size;
+ dev_dbg(dev, "ocmc: pa %pa va %p size %#zx\n",
+ &prueth->mem[PRUETH_MEM_OCMC].pa,
+ prueth->mem[PRUETH_MEM_OCMC].va,
+ prueth->mem[PRUETH_MEM_OCMC].size);
+
+ /* setup netdev interfaces */
+ if (eth0_node) {
+ ret = icssm_prueth_netdev_init(prueth, eth0_node);
+ if (ret) {
+ if (ret != -EPROBE_DEFER) {
+ dev_err(dev, "netdev init %s failed: %d\n",
+ eth0_node->name, ret);
+ }
+ goto free_pool;
+ }
+ }
+
+ if (eth1_node) {
+ ret = icssm_prueth_netdev_init(prueth, eth1_node);
+ if (ret) {
+ if (ret != -EPROBE_DEFER) {
+ dev_err(dev, "netdev init %s failed: %d\n",
+ eth1_node->name, ret);
+ }
+ goto netdev_exit;
+ }
+ }
+
+ prueth->iep = icss_iep_get(np);
+ if (IS_ERR(prueth->iep)) {
+ ret = PTR_ERR(prueth->iep);
+ dev_err(dev, "unable to get IEP\n");
+ goto netdev_exit;
+ }
+
+ /* register the network devices */
+ if (eth0_node) {
+ ret = register_netdev(prueth->emac[PRUETH_MAC0]->ndev);
+ if (ret) {
+ dev_err(dev, "can't register netdev for port MII0");
+ goto iep_put;
+ }
+
+ prueth->registered_netdevs[PRUETH_MAC0] =
+ prueth->emac[PRUETH_MAC0]->ndev;
+ }
+
+ if (eth1_node) {
+ ret = register_netdev(prueth->emac[PRUETH_MAC1]->ndev);
+ if (ret) {
+ dev_err(dev, "can't register netdev for port MII1");
+ goto netdev_unregister;
+ }
+
+ prueth->registered_netdevs[PRUETH_MAC1] =
+ prueth->emac[PRUETH_MAC1]->ndev;
+ }
+
+ dev_info(dev, "TI PRU ethernet driver initialized: %s EMAC mode\n",
+ (!eth0_node || !eth1_node) ? "single" : "dual");
+
+ if (eth1_node)
+ of_node_put(eth1_node);
+ if (eth0_node)
+ of_node_put(eth0_node);
+ return 0;
+
+netdev_unregister:
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ if (!prueth->registered_netdevs[i])
+ continue;
+ unregister_netdev(prueth->registered_netdevs[i]);
+ }
+
+iep_put:
+ icss_iep_put(prueth->iep);
+ prueth->iep = NULL;
+
+netdev_exit:
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ eth_node = prueth->eth_node[i];
+ if (!eth_node)
+ continue;
+
+ icssm_prueth_netdev_exit(prueth, eth_node);
+ }
+
+free_pool:
+ gen_pool_free(prueth->sram_pool,
+ (unsigned long)prueth->mem[PRUETH_MEM_OCMC].va,
+ prueth->ocmc_ram_size);
+
+put_mem:
+ for (i = PRUETH_MEM_DRAM0; i < PRUETH_MEM_OCMC; i++) {
+ if (prueth->mem[i].va)
+ pruss_release_mem_region(pruss, &prueth->mem[i]);
+ }
+ pruss_put(prueth->pruss);
+
+put_pru1:
+ if (eth1_node)
+ pru_rproc_put(prueth->pru1);
+put_pru0:
+ if (eth0_node)
+ pru_rproc_put(prueth->pru0);
+put_eth:
+ of_node_put(eth1_node);
+ of_node_put(eth0_node);
+
+ return ret;
+}
+
+static void icssm_prueth_remove(struct platform_device *pdev)
+{
+ struct prueth *prueth = platform_get_drvdata(pdev);
+ struct device_node *eth_node;
+ int i;
+
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ if (!prueth->registered_netdevs[i])
+ continue;
+ unregister_netdev(prueth->registered_netdevs[i]);
+ }
+
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ eth_node = prueth->eth_node[i];
+ if (!eth_node)
+ continue;
+
+ icssm_prueth_netdev_exit(prueth, eth_node);
+ of_node_put(eth_node);
+ }
+
+ gen_pool_free(prueth->sram_pool,
+ (unsigned long)prueth->mem[PRUETH_MEM_OCMC].va,
+ prueth->ocmc_ram_size);
+
+ for (i = PRUETH_MEM_DRAM0; i < PRUETH_MEM_OCMC; i++) {
+ if (prueth->mem[i].va)
+ pruss_release_mem_region(prueth->pruss,
+ &prueth->mem[i]);
+ }
+
+ icss_iep_put(prueth->iep);
+ prueth->iep = NULL;
+
+ pruss_put(prueth->pruss);
+
+ if (prueth->eth_node[PRUETH_MAC0])
+ pru_rproc_put(prueth->pru0);
+ if (prueth->eth_node[PRUETH_MAC1])
+ pru_rproc_put(prueth->pru1);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int icssm_prueth_suspend(struct device *dev)
+{
+ struct prueth *prueth = dev_get_drvdata(dev);
+ struct net_device *ndev;
+ int i, ret;
+
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ ndev = prueth->registered_netdevs[i];
+
+ if (!ndev)
+ continue;
+
+ if (netif_running(ndev)) {
+ netif_device_detach(ndev);
+ ret = icssm_emac_ndo_stop(ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "failed to stop: %d", ret);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int icssm_prueth_resume(struct device *dev)
+{
+ struct prueth *prueth = dev_get_drvdata(dev);
+ struct net_device *ndev;
+ int i, ret;
+
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ ndev = prueth->registered_netdevs[i];
+
+ if (!ndev)
+ continue;
+
+ if (netif_running(ndev)) {
+ ret = icssm_emac_ndo_open(ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "failed to start: %d", ret);
+ return ret;
+ }
+ netif_device_attach(ndev);
+ }
+ }
+
+ return 0;
+}
+
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops prueth_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(icssm_prueth_suspend, icssm_prueth_resume)
+};
+
+/* AM335x SoC-specific firmware data */
+static struct prueth_private_data am335x_prueth_pdata = {
+ .driver_data = PRUSS_AM33XX,
+ .fw_pru[PRUSS_PRU0] = {
+ .fw_name[PRUSS_ETHTYPE_EMAC] =
+ "ti-pruss/am335x-pru0-prueth-fw.elf",
+ },
+ .fw_pru[PRUSS_PRU1] = {
+ .fw_name[PRUSS_ETHTYPE_EMAC] =
+ "ti-pruss/am335x-pru1-prueth-fw.elf",
+ },
+};
+
+/* AM437x SoC-specific firmware data */
+static struct prueth_private_data am437x_prueth_pdata = {
+ .driver_data = PRUSS_AM43XX,
+ .fw_pru[PRUSS_PRU0] = {
+ .fw_name[PRUSS_ETHTYPE_EMAC] =
+ "ti-pruss/am437x-pru0-prueth-fw.elf",
+ },
+ .fw_pru[PRUSS_PRU1] = {
+ .fw_name[PRUSS_ETHTYPE_EMAC] =
+ "ti-pruss/am437x-pru1-prueth-fw.elf",
+ },
+};
+
+/* AM57xx SoC-specific firmware data */
+static struct prueth_private_data am57xx_prueth_pdata = {
+ .driver_data = PRUSS_AM57XX,
+ .fw_pru[PRUSS_PRU0] = {
+ .fw_name[PRUSS_ETHTYPE_EMAC] =
+ "ti-pruss/am57xx-pru0-prueth-fw.elf",
+ },
+ .fw_pru[PRUSS_PRU1] = {
+ .fw_name[PRUSS_ETHTYPE_EMAC] =
+ "ti-pruss/am57xx-pru1-prueth-fw.elf",
+ },
+};
+
+static const struct of_device_id prueth_dt_match[] = {
+ { .compatible = "ti,am57-prueth", .data = &am57xx_prueth_pdata, },
+ { .compatible = "ti,am4376-prueth", .data = &am437x_prueth_pdata, },
+ { .compatible = "ti,am3359-prueth", .data = &am335x_prueth_pdata, },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, prueth_dt_match);
+
+static struct platform_driver prueth_driver = {
+ .probe = icssm_prueth_probe,
+ .remove = icssm_prueth_remove,
+ .driver = {
+ .name = "prueth",
+ .of_match_table = prueth_dt_match,
+ .pm = &prueth_dev_pm_ops,
+ },
+};
+module_platform_driver(prueth_driver);
+
+MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>");
+MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
+MODULE_DESCRIPTION("PRUSS ICSSM Ethernet Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/ti/icssm/icssm_prueth.h b/drivers/net/ethernet/ti/icssm/icssm_prueth.h
new file mode 100644
index 000000000000..8e7e0af08144
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssm/icssm_prueth.h
@@ -0,0 +1,262 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Texas Instruments ICSSM Ethernet driver
+ *
+ * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/
+ *
+ */
+
+#ifndef __NET_TI_PRUETH_H
+#define __NET_TI_PRUETH_H
+
+#include <linux/phy.h>
+#include <linux/types.h>
+#include <linux/pruss_driver.h>
+#include <linux/remoteproc/pruss.h>
+
+#include "icssm_switch.h"
+#include "icssm_prueth_ptp.h"
+
+/* ICSSM size of redundancy tag */
+#define ICSSM_LRE_TAG_SIZE 6
+
+/* PRUSS local memory map */
+#define ICSS_LOCAL_SHARED_RAM 0x00010000
+#define EMAC_MAX_PKTLEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
+/* Below macro is for 1528 Byte Frame support, to Allow even with
+ * Redundancy tag
+ */
+#define EMAC_MAX_FRM_SUPPORT (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN + \
+ ICSSM_LRE_TAG_SIZE)
+
+/* PRU Ethernet Type - Ethernet functionality (protocol
+ * implemented) provided by the PRU firmware being loaded.
+ */
+enum pruss_ethtype {
+ PRUSS_ETHTYPE_EMAC = 0,
+ PRUSS_ETHTYPE_HSR,
+ PRUSS_ETHTYPE_PRP,
+ PRUSS_ETHTYPE_SWITCH,
+ PRUSS_ETHTYPE_MAX,
+};
+
+#define PRUETH_IS_EMAC(p) ((p)->eth_type == PRUSS_ETHTYPE_EMAC)
+#define PRUETH_IS_SWITCH(p) ((p)->eth_type == PRUSS_ETHTYPE_SWITCH)
+
+/**
+ * struct prueth_queue_desc - Queue descriptor
+ * @rd_ptr: Read pointer, points to a buffer descriptor in Shared PRU RAM.
+ * @wr_ptr: Write pointer, points to a buffer descriptor in Shared PRU RAM.
+ * @busy_s: Slave queue busy flag, set by slave(us) to request access from
+ * master(PRU).
+ * @status: Bit field status register, Bits:
+ * 0: Master queue busy flag.
+ * 1: Packet has been placed in collision queue.
+ * 2: Packet has been discarded due to overflow.
+ * @max_fill_level: Maximum queue usage seen.
+ * @overflow_cnt: Count of queue overflows.
+ *
+ * Each port has up to 4 queues with variable length. The queue is processed
+ * as ring buffer with read and write pointers. Both pointers are address
+ * pointers and increment by 4 for each buffer descriptor position. Queue has
+ * a length defined in constants and a status.
+ */
+struct prueth_queue_desc {
+ u16 rd_ptr;
+ u16 wr_ptr;
+ u8 busy_s;
+ u8 status;
+ u8 max_fill_level;
+ u8 overflow_cnt;
+};
+
+/**
+ * struct prueth_queue_info - Information about a queue in memory
+ * @buffer_offset: buffer offset in OCMC RAM
+ * @queue_desc_offset: queue descriptor offset in Shared RAM
+ * @buffer_desc_offset: buffer descriptors offset in Shared RAM
+ * @buffer_desc_end: end address of buffer descriptors in Shared RAM
+ */
+struct prueth_queue_info {
+ u16 buffer_offset;
+ u16 queue_desc_offset;
+ u16 buffer_desc_offset;
+ u16 buffer_desc_end;
+};
+
+/**
+ * struct prueth_packet_info - Info about a packet in buffer
+ * @shadow: this packet is stored in the collision queue
+ * @port: port packet is on
+ * @length: length of packet
+ * @broadcast: this packet is a broadcast packet
+ * @error: this packet has an error
+ * @lookup_success: src mac found in FDB
+ * @flood: packet is to be flooded
+ * @timestamp: Specifies if timestamp is appended to the packet
+ */
+struct prueth_packet_info {
+ bool shadow;
+ unsigned int port;
+ unsigned int length;
+ bool broadcast;
+ bool error;
+ bool lookup_success;
+ bool flood;
+ bool timestamp;
+};
+
+/* In switch mode there are 3 real ports i.e. 3 mac addrs.
+ * however Linux sees only the host side port. The other 2 ports
+ * are the switch ports.
+ * In emac mode there are 2 real ports i.e. 2 mac addrs.
+ * Linux sees both the ports.
+ */
+enum prueth_port {
+ PRUETH_PORT_HOST = 0, /* host side port */
+ PRUETH_PORT_MII0, /* physical port MII 0 */
+ PRUETH_PORT_MII1, /* physical port MII 1 */
+ PRUETH_PORT_INVALID, /* Invalid prueth port */
+};
+
+enum prueth_mac {
+ PRUETH_MAC0 = 0,
+ PRUETH_MAC1,
+ PRUETH_NUM_MACS,
+ PRUETH_MAC_INVALID,
+};
+
+/* In both switch & emac modes there are 3 port queues
+ * EMAC mode:
+ * RX packets for both MII0 & MII1 ports come on
+ * QUEUE_HOST.
+ * TX packets for MII0 go on QUEUE_MII0, TX packets
+ * for MII1 go on QUEUE_MII1.
+ * Switch mode:
+ * Host port RX packets come on QUEUE_HOST
+ * TX packets might have to go on MII0 or MII1 or both.
+ * MII0 TX queue is QUEUE_MII0 and MII1 TX queue is
+ * QUEUE_MII1.
+ */
+enum prueth_port_queue_id {
+ PRUETH_PORT_QUEUE_HOST = 0,
+ PRUETH_PORT_QUEUE_MII0,
+ PRUETH_PORT_QUEUE_MII1,
+ PRUETH_PORT_QUEUE_MAX,
+};
+
+/* Each port queue has 4 queues and 1 collision queue */
+enum prueth_queue_id {
+ PRUETH_QUEUE1 = 0,
+ PRUETH_QUEUE2,
+ PRUETH_QUEUE3,
+ PRUETH_QUEUE4,
+ PRUETH_COLQUEUE, /* collision queue */
+};
+
+/**
+ * struct prueth_firmware - PRU Ethernet FW data
+ * @fw_name: firmware names of firmware to run on PRU
+ */
+struct prueth_firmware {
+ const char *fw_name[PRUSS_ETHTYPE_MAX];
+};
+
+/* PRUeth memory range identifiers */
+enum prueth_mem {
+ PRUETH_MEM_DRAM0 = 0,
+ PRUETH_MEM_DRAM1,
+ PRUETH_MEM_SHARED_RAM,
+ PRUETH_MEM_OCMC,
+ PRUETH_MEM_MAX,
+};
+
+enum pruss_device {
+ PRUSS_AM57XX = 0,
+ PRUSS_AM43XX,
+ PRUSS_AM33XX,
+ PRUSS_K2G
+};
+
+/**
+ * struct prueth_private_data - PRU Ethernet private data
+ * @driver_data: PRU Ethernet device name
+ * @fw_pru: firmware names to be used for PRUSS ethernet usecases
+ */
+struct prueth_private_data {
+ enum pruss_device driver_data;
+ const struct prueth_firmware fw_pru[PRUSS_NUM_PRUS];
+};
+
+struct prueth_emac_stats {
+ u64 tx_packets;
+ u64 tx_dropped;
+ u64 tx_bytes;
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 rx_length_errors;
+ u64 rx_over_errors;
+};
+
+/* data for each emac port */
+struct prueth_emac {
+ struct prueth *prueth;
+ struct net_device *ndev;
+ struct napi_struct napi;
+
+ struct rproc *pru;
+ struct phy_device *phydev;
+ struct prueth_queue_desc __iomem *rx_queue_descs;
+ struct prueth_queue_desc __iomem *tx_queue_descs;
+
+ int link;
+ int speed;
+ int duplex;
+ int rx_irq;
+
+ enum prueth_port_queue_id tx_port_queue;
+ enum prueth_queue_id rx_queue_start;
+ enum prueth_queue_id rx_queue_end;
+ enum prueth_port port_id;
+ enum prueth_mem dram;
+ const char *phy_id;
+ u32 msg_enable;
+ u8 mac_addr[6];
+ phy_interface_t phy_if;
+
+ /* spin lock used to protect
+ * during link configuration
+ */
+ spinlock_t lock;
+
+ struct hrtimer tx_hrtimer;
+ struct prueth_emac_stats stats;
+};
+
+struct prueth {
+ struct device *dev;
+ struct pruss *pruss;
+ struct rproc *pru0, *pru1;
+ struct pruss_mem_region mem[PRUETH_MEM_MAX];
+ struct gen_pool *sram_pool;
+ struct regmap *mii_rt;
+ struct icss_iep *iep;
+
+ const struct prueth_private_data *fw_data;
+ struct prueth_fw_offsets *fw_offsets;
+
+ struct device_node *eth_node[PRUETH_NUM_MACS];
+ struct prueth_emac *emac[PRUETH_NUM_MACS];
+ struct net_device *registered_netdevs[PRUETH_NUM_MACS];
+
+ unsigned int eth_type;
+ size_t ocmc_ram_size;
+ u8 emac_configured;
+};
+
+void icssm_parse_packet_info(struct prueth *prueth, u32 buffer_descriptor,
+ struct prueth_packet_info *pkt_info);
+int icssm_emac_rx_packet(struct prueth_emac *emac, u16 *bd_rd_ptr,
+ struct prueth_packet_info *pkt_info,
+ const struct prueth_queue_info *rxqueue);
+
+#endif /* __NET_TI_PRUETH_H */
diff --git a/drivers/net/ethernet/ti/icssm/icssm_prueth_ptp.h b/drivers/net/ethernet/ti/icssm/icssm_prueth_ptp.h
new file mode 100644
index 000000000000..e0bf692beda1
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssm/icssm_prueth_ptp.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2021 Texas Instruments Incorporated - https://www.ti.com
+ */
+#ifndef PRUETH_PTP_H
+#define PRUETH_PTP_H
+
+#define RX_SYNC_TIMESTAMP_OFFSET_P1 0x8 /* 8 bytes */
+#define RX_PDELAY_REQ_TIMESTAMP_OFFSET_P1 0x14 /* 12 bytes */
+
+#define DISABLE_PTP_FRAME_FORWARDING_CTRL_OFFSET 0x14 /* 1 byte */
+
+#define RX_PDELAY_RESP_TIMESTAMP_OFFSET_P1 0x20 /* 12 bytes */
+#define RX_SYNC_TIMESTAMP_OFFSET_P2 0x2c /* 12 bytes */
+#define RX_PDELAY_REQ_TIMESTAMP_OFFSET_P2 0x38 /* 12 bytes */
+#define RX_PDELAY_RESP_TIMESTAMP_OFFSET_P2 0x44 /* 12 bytes */
+#define TIMESYNC_DOMAIN_NUMBER_LIST 0x50 /* 2 bytes */
+#define P1_SMA_LINE_DELAY_OFFSET 0x52 /* 4 bytes */
+#define P2_SMA_LINE_DELAY_OFFSET 0x56 /* 4 bytes */
+#define TIMESYNC_SECONDS_COUNT_OFFSET 0x5a /* 6 bytes */
+#define TIMESYNC_TC_RCF_OFFSET 0x60 /* 4 bytes */
+#define DUT_IS_MASTER_OFFSET 0x64 /* 1 byte */
+#define MASTER_PORT_NUM_OFFSET 0x65 /* 1 byte */
+#define SYNC_MASTER_MAC_OFFSET 0x66 /* 6 bytes */
+#define TX_TS_NOTIFICATION_OFFSET_SYNC_P1 0x6c /* 1 byte */
+#define TX_TS_NOTIFICATION_OFFSET_PDEL_REQ_P1 0x6d /* 1 byte */
+#define TX_TS_NOTIFICATION_OFFSET_PDEL_RES_P1 0x6e /* 1 byte */
+#define TX_TS_NOTIFICATION_OFFSET_SYNC_P2 0x6f /* 1 byte */
+#define TX_TS_NOTIFICATION_OFFSET_PDEL_REQ_P2 0x70 /* 1 byte */
+#define TX_TS_NOTIFICATION_OFFSET_PDEL_RES_P2 0x71 /* 1 byte */
+#define TX_SYNC_TIMESTAMP_OFFSET_P1 0x72 /* 12 bytes */
+#define TX_PDELAY_REQ_TIMESTAMP_OFFSET_P1 0x7e /* 12 bytes */
+#define TX_PDELAY_RESP_TIMESTAMP_OFFSET_P1 0x8a /* 12 bytes */
+#define TX_SYNC_TIMESTAMP_OFFSET_P2 0x96 /* 12 bytes */
+#define TX_PDELAY_REQ_TIMESTAMP_OFFSET_P2 0xa2 /* 12 bytes */
+#define TX_PDELAY_RESP_TIMESTAMP_OFFSET_P2 0xae /* 12 bytes */
+#define TIMESYNC_CTRL_VAR_OFFSET 0xba /* 1 byte */
+#define DISABLE_SWITCH_SYNC_RELAY_OFFSET 0xbb /* 1 byte */
+#define MII_RX_CORRECTION_OFFSET 0xbc /* 2 bytes */
+#define MII_TX_CORRECTION_OFFSET 0xbe /* 2 bytes */
+#define TIMESYNC_CMP1_CMP_OFFSET 0xc0 /* 8 bytes */
+#define TIMESYNC_SYNC0_CMP_OFFSET 0xc8 /* 8 bytes */
+#define TIMESYNC_CMP1_PERIOD_OFFSET 0xd0 /* 4 bytes */
+#define TIMESYNC_SYNC0_WIDTH_OFFSET 0xd4 /* 4 bytes */
+#define SINGLE_STEP_IEP_OFFSET_P1 0xd8 /* 8 bytes */
+#define SINGLE_STEP_SECONDS_OFFSET_P1 0xe0 /* 8 bytes */
+#define SINGLE_STEP_IEP_OFFSET_P2 0xe8 /* 8 bytes */
+#define SINGLE_STEP_SECONDS_OFFSET_P2 0xf0 /* 8 bytes */
+#define LINK_LOCAL_FRAME_HAS_HSR_TAG 0xf8 /* 1 bytes */
+#define PTP_PREV_TX_TIMESTAMP_P1 0xf9 /* 8 bytes */
+#define PTP_PREV_TX_TIMESTAMP_P2 0x101 /* 8 bytes */
+#define PTP_CLK_IDENTITY_OFFSET 0x109 /* 8 bytes */
+#define PTP_SCRATCH_MEM 0x111 /* 16 byte */
+#define PTP_IPV4_UDP_E2E_ENABLE 0x121 /* 1 byte */
+
+enum {
+ PRUETH_PTP_SYNC,
+ PRUETH_PTP_DLY_REQ,
+ PRUETH_PTP_DLY_RESP,
+ PRUETH_PTP_TS_EVENTS,
+};
+
+#define PRUETH_PTP_TS_SIZE 12
+#define PRUETH_PTP_TS_NOTIFY_SIZE 1
+#define PRUETH_PTP_TS_NOTIFY_MASK 0xff
+
+/* Bit definitions for TIMESYNC_CTRL */
+#define TIMESYNC_CTRL_BG_ENABLE BIT(0)
+#define TIMESYNC_CTRL_FORCED_2STEP BIT(1)
+
+static inline u32 icssm_prueth_tx_ts_offs_get(u8 port, u8 event)
+{
+ return TX_SYNC_TIMESTAMP_OFFSET_P1 + port *
+ PRUETH_PTP_TS_EVENTS * PRUETH_PTP_TS_SIZE +
+ event * PRUETH_PTP_TS_SIZE;
+}
+
+static inline u32 icssm_prueth_tx_ts_notify_offs_get(u8 port, u8 event)
+{
+ return TX_TS_NOTIFICATION_OFFSET_SYNC_P1 +
+ PRUETH_PTP_TS_EVENTS * PRUETH_PTP_TS_NOTIFY_SIZE * port +
+ event * PRUETH_PTP_TS_NOTIFY_SIZE;
+}
+
+#endif /* PRUETH_PTP_H */
diff --git a/drivers/net/ethernet/ti/icssm/icssm_switch.h b/drivers/net/ethernet/ti/icssm/icssm_switch.h
new file mode 100644
index 000000000000..8b494ffdcde7
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssm/icssm_switch.h
@@ -0,0 +1,257 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (C) 2015-2021 Texas Instruments Incorporated - https://www.ti.com
+ */
+
+#ifndef __ICSS_SWITCH_H
+#define __ICSS_SWITCH_H
+
+/* Basic Switch Parameters
+ * Used to auto compute offset addresses on L3 OCMC RAM. Do not modify these
+ * without changing firmware accordingly
+ */
+#define SWITCH_BUFFER_SIZE (64 * 1024) /* L3 buffer */
+#define ICSS_BLOCK_SIZE 32 /* data bytes per BD */
+#define BD_SIZE 4 /* byte buffer descriptor */
+#define NUM_QUEUES 4 /* Queues on Port 0/1/2 */
+
+#define PORT_LINK_MASK 0x1
+#define PORT_IS_HD_MASK 0x2
+
+/* Physical Port queue size (number of BDs). Same for both ports */
+#define QUEUE_1_SIZE 97 /* Network Management high */
+#define QUEUE_2_SIZE 97 /* Network Management low */
+#define QUEUE_3_SIZE 97 /* Protocol specific */
+#define QUEUE_4_SIZE 97 /* NRT (IP,ARP, ICMP) */
+
+/* Host queue size (number of BDs). Each BD points to data buffer of 32 bytes.
+ * HOST PORT QUEUES can buffer up to 4 full sized frames per queue
+ */
+#define HOST_QUEUE_1_SIZE 194 /* Protocol and VLAN priority 7 & 6 */
+#define HOST_QUEUE_2_SIZE 194 /* Protocol mid */
+#define HOST_QUEUE_3_SIZE 194 /* Protocol low */
+#define HOST_QUEUE_4_SIZE 194 /* NRT (IP, ARP, ICMP) */
+
+#define COL_QUEUE_SIZE 0
+
+/* NRT Buffer descriptor definition
+ * Each buffer descriptor points to a max 32 byte block and has 32 bit in size
+ * to have atomic operation.
+ * PRU can address bytewise into memory.
+ * Definition of 32 bit descriptor is as follows
+ *
+ * Bits Name Meaning
+ * =============================================================================
+ * 0..7 Index points to index in buffer queue, max 256 x 32
+ * byte blocks can be addressed
+ * 6 LookupSuccess For switch, FDB lookup was successful (source
+ * MAC address found in FDB).
+ * For RED, NodeTable lookup was successful.
+ * 7 Flood Packet should be flooded (destination MAC
+ * address found in FDB). For switch only.
+ * 8..12 Block_length number of valid bytes in this specific block.
+ * Will be <=32 bytes on last block of packet
+ * 13 More "More" bit indicating that there are more blocks
+ * 14 Shadow indicates that "index" is pointing into shadow
+ * buffer
+ * 15 TimeStamp indicates that this packet has time stamp in
+ * separate buffer - only needed if PTP runs on
+ * host
+ * 16..17 Port different meaning for ingress and egress,
+ * Ingress: Port = 0 indicates phy port 1 and
+ * Port = 1 indicates phy port 2.
+ * Egress: 0 sends on phy port 1 and 1 sends on
+ * phy port 2. Port = 2 goes over MAC table
+ * look-up
+ * 18..28 Length 11 bit of total packet length which is put into
+ * first BD only so that host access only one BD
+ * 29 VlanTag indicates that packet has Length/Type field of
+ * 0x08100 with VLAN tag in following byte
+ * 30 Broadcast indicates that packet goes out on both physical
+ * ports, there will be two bd but only one buffer
+ * 31 Error indicates there was an error in the packet
+ */
+#define PRUETH_BD_START_FLAG_MASK BIT(0)
+#define PRUETH_BD_START_FLAG_SHIFT 0
+
+#define PRUETH_BD_HSR_FRAME_MASK BIT(4)
+#define PRUETH_BD_HSR_FRAME_SHIFT 4
+
+#define PRUETH_BD_SUP_HSR_FRAME_MASK BIT(5)
+#define PRUETH_BD_SUP_HSR_FRAME_SHIFT 5
+
+#define PRUETH_BD_LOOKUP_SUCCESS_MASK BIT(6)
+#define PRUETH_BD_LOOKUP_SUCCESS_SHIFT 6
+
+#define PRUETH_BD_SW_FLOOD_MASK BIT(7)
+#define PRUETH_BD_SW_FLOOD_SHIFT 7
+
+#define PRUETH_BD_SHADOW_MASK BIT(14)
+#define PRUETH_BD_SHADOW_SHIFT 14
+
+#define PRUETH_BD_TIMESTAMP_MASK BIT(15)
+#define PRUETH_BD_TIMESTAMP_SHIFT 15
+
+#define PRUETH_BD_PORT_MASK GENMASK(17, 16)
+#define PRUETH_BD_PORT_SHIFT 16
+
+#define PRUETH_BD_LENGTH_MASK GENMASK(28, 18)
+#define PRUETH_BD_LENGTH_SHIFT 18
+
+#define PRUETH_BD_BROADCAST_MASK BIT(30)
+#define PRUETH_BD_BROADCAST_SHIFT 30
+
+#define PRUETH_BD_ERROR_MASK BIT(31)
+#define PRUETH_BD_ERROR_SHIFT 31
+
+/* The following offsets indicate which sections of the memory are used
+ * for EMAC internal tasks
+ */
+#define DRAM_START_OFFSET 0x1E98
+#define SRAM_START_OFFSET 0x400
+
+/* General Purpose Statistics
+ * These are present on both PRU0 and PRU1 DRAM
+ */
+/* base statistics offset */
+#define STATISTICS_OFFSET 0x1F00
+#define STAT_SIZE 0x98
+
+/* Offset for storing
+ * 1. Storm Prevention Params
+ * 2. PHY Speed Offset
+ * 3. Port Status Offset
+ * These are present on both PRU0 and PRU1
+ */
+/* 4 bytes */
+#define STORM_PREVENTION_OFFSET_BC (STATISTICS_OFFSET + STAT_SIZE)
+/* 4 bytes */
+#define PHY_SPEED_OFFSET (STATISTICS_OFFSET + STAT_SIZE + 4)
+/* 1 byte */
+#define PORT_STATUS_OFFSET (STATISTICS_OFFSET + STAT_SIZE + 8)
+/* 1 byte */
+#define COLLISION_COUNTER (STATISTICS_OFFSET + STAT_SIZE + 9)
+/* 4 bytes */
+#define RX_PKT_SIZE_OFFSET (STATISTICS_OFFSET + STAT_SIZE + 10)
+/* 4 bytes */
+#define PORT_CONTROL_ADDR (STATISTICS_OFFSET + STAT_SIZE + 14)
+/* 6 bytes */
+#define PORT_MAC_ADDR (STATISTICS_OFFSET + STAT_SIZE + 18)
+/* 1 byte */
+#define RX_INT_STATUS_OFFSET (STATISTICS_OFFSET + STAT_SIZE + 24)
+/* 4 bytes */
+#define STORM_PREVENTION_OFFSET_MC (STATISTICS_OFFSET + STAT_SIZE + 25)
+/* 4 bytes */
+#define STORM_PREVENTION_OFFSET_UC (STATISTICS_OFFSET + STAT_SIZE + 29)
+/* 4 bytes ? */
+#define STP_INVALID_STATE_OFFSET (STATISTICS_OFFSET + STAT_SIZE + 33)
+
+/* DRAM Offsets for EMAC
+ * Present on Both DRAM0 and DRAM1
+ */
+
+/* 4 queue descriptors for port tx = 32 bytes */
+#define TX_CONTEXT_Q1_OFFSET_ADDR (PORT_QUEUE_DESC_OFFSET + 32)
+#define PORT_QUEUE_DESC_OFFSET (ICSS_EMAC_TTS_CYC_TX_SOF + 8)
+
+/* EMAC Time Triggered Send Offsets */
+#define ICSS_EMAC_TTS_CYC_TX_SOF (ICSS_EMAC_TTS_PREV_TX_SOF + 8)
+#define ICSS_EMAC_TTS_PREV_TX_SOF \
+ (ICSS_EMAC_TTS_MISSED_CYCLE_CNT_OFFSET + 4)
+#define ICSS_EMAC_TTS_MISSED_CYCLE_CNT_OFFSET (ICSS_EMAC_TTS_STATUS_OFFSET \
+ + 4)
+#define ICSS_EMAC_TTS_STATUS_OFFSET (ICSS_EMAC_TTS_CFG_TIME_OFFSET + 4)
+#define ICSS_EMAC_TTS_CFG_TIME_OFFSET (ICSS_EMAC_TTS_CYCLE_PERIOD_OFFSET + 4)
+#define ICSS_EMAC_TTS_CYCLE_PERIOD_OFFSET \
+ (ICSS_EMAC_TTS_CYCLE_START_OFFSET + 8)
+#define ICSS_EMAC_TTS_CYCLE_START_OFFSET ICSS_EMAC_TTS_BASE_OFFSET
+#define ICSS_EMAC_TTS_BASE_OFFSET DRAM_START_OFFSET
+
+/* Shared RAM offsets for EMAC */
+
+/* Queue Descriptors */
+
+/* 4 queue descriptors for port 0 (host receive). 32 bytes */
+#define HOST_QUEUE_DESC_OFFSET (HOST_QUEUE_SIZE_ADDR + 16)
+
+/* table offset for queue size:
+ * 3 ports * 4 Queues * 1 byte offset = 12 bytes
+ */
+#define HOST_QUEUE_SIZE_ADDR (HOST_QUEUE_OFFSET_ADDR + 8)
+/* table offset for queue:
+ * 4 Queues * 2 byte offset = 8 bytes
+ */
+#define HOST_QUEUE_OFFSET_ADDR (HOST_QUEUE_DESCRIPTOR_OFFSET_ADDR + 8)
+/* table offset for Host queue descriptors:
+ * 1 ports * 4 Queues * 2 byte offset = 8 bytes
+ */
+#define HOST_QUEUE_DESCRIPTOR_OFFSET_ADDR (HOST_Q4_RX_CONTEXT_OFFSET + 8)
+
+/* Host Port Rx Context */
+#define HOST_Q4_RX_CONTEXT_OFFSET (HOST_Q3_RX_CONTEXT_OFFSET + 8)
+#define HOST_Q3_RX_CONTEXT_OFFSET (HOST_Q2_RX_CONTEXT_OFFSET + 8)
+#define HOST_Q2_RX_CONTEXT_OFFSET (HOST_Q1_RX_CONTEXT_OFFSET + 8)
+#define HOST_Q1_RX_CONTEXT_OFFSET (EMAC_PROMISCUOUS_MODE_OFFSET + 4)
+
+/* Promiscuous mode control */
+#define EMAC_P1_PROMISCUOUS_BIT BIT(0)
+#define EMAC_P2_PROMISCUOUS_BIT BIT(1)
+#define EMAC_PROMISCUOUS_MODE_OFFSET (EMAC_RESERVED + 4)
+#define EMAC_RESERVED EOF_48K_BUFFER_BD
+
+/* allow for max 48k buffer which spans the descriptors up to 0x1800 6kB */
+#define EOF_48K_BUFFER_BD (P0_BUFFER_DESC_OFFSET + HOST_BD_SIZE + \
+ PORT_BD_SIZE)
+
+#define HOST_BD_SIZE ((HOST_QUEUE_1_SIZE + \
+ HOST_QUEUE_2_SIZE + HOST_QUEUE_3_SIZE + \
+ HOST_QUEUE_4_SIZE) * BD_SIZE)
+#define PORT_BD_SIZE ((QUEUE_1_SIZE + QUEUE_2_SIZE + \
+ QUEUE_3_SIZE + QUEUE_4_SIZE) * 2 * BD_SIZE)
+
+#define END_OF_BD_POOL (P2_Q4_BD_OFFSET + QUEUE_4_SIZE * BD_SIZE)
+#define P2_Q4_BD_OFFSET (P2_Q3_BD_OFFSET + QUEUE_3_SIZE * BD_SIZE)
+#define P2_Q3_BD_OFFSET (P2_Q2_BD_OFFSET + QUEUE_2_SIZE * BD_SIZE)
+#define P2_Q2_BD_OFFSET (P2_Q1_BD_OFFSET + QUEUE_1_SIZE * BD_SIZE)
+#define P2_Q1_BD_OFFSET (P1_Q4_BD_OFFSET + QUEUE_4_SIZE * BD_SIZE)
+#define P1_Q4_BD_OFFSET (P1_Q3_BD_OFFSET + QUEUE_3_SIZE * BD_SIZE)
+#define P1_Q3_BD_OFFSET (P1_Q2_BD_OFFSET + QUEUE_2_SIZE * BD_SIZE)
+#define P1_Q2_BD_OFFSET (P1_Q1_BD_OFFSET + QUEUE_1_SIZE * BD_SIZE)
+#define P1_Q1_BD_OFFSET (P0_Q4_BD_OFFSET + HOST_QUEUE_4_SIZE * BD_SIZE)
+#define P0_Q4_BD_OFFSET (P0_Q3_BD_OFFSET + HOST_QUEUE_3_SIZE * BD_SIZE)
+#define P0_Q3_BD_OFFSET (P0_Q2_BD_OFFSET + HOST_QUEUE_2_SIZE * BD_SIZE)
+#define P0_Q2_BD_OFFSET (P0_Q1_BD_OFFSET + HOST_QUEUE_1_SIZE * BD_SIZE)
+#define P0_Q1_BD_OFFSET P0_BUFFER_DESC_OFFSET
+#define P0_BUFFER_DESC_OFFSET SRAM_START_OFFSET
+
+/* Memory Usage of L3 OCMC RAM */
+
+/* L3 64KB Memory - mainly buffer Pool */
+#define END_OF_BUFFER_POOL (P2_Q4_BUFFER_OFFSET + QUEUE_4_SIZE * \
+ ICSS_BLOCK_SIZE)
+#define P2_Q4_BUFFER_OFFSET (P2_Q3_BUFFER_OFFSET + QUEUE_3_SIZE * \
+ ICSS_BLOCK_SIZE)
+#define P2_Q3_BUFFER_OFFSET (P2_Q2_BUFFER_OFFSET + QUEUE_2_SIZE * \
+ ICSS_BLOCK_SIZE)
+#define P2_Q2_BUFFER_OFFSET (P2_Q1_BUFFER_OFFSET + QUEUE_1_SIZE * \
+ ICSS_BLOCK_SIZE)
+#define P2_Q1_BUFFER_OFFSET (P1_Q4_BUFFER_OFFSET + QUEUE_4_SIZE * \
+ ICSS_BLOCK_SIZE)
+#define P1_Q4_BUFFER_OFFSET (P1_Q3_BUFFER_OFFSET + QUEUE_3_SIZE * \
+ ICSS_BLOCK_SIZE)
+#define P1_Q3_BUFFER_OFFSET (P1_Q2_BUFFER_OFFSET + QUEUE_2_SIZE * \
+ ICSS_BLOCK_SIZE)
+#define P1_Q2_BUFFER_OFFSET (P1_Q1_BUFFER_OFFSET + QUEUE_1_SIZE * \
+ ICSS_BLOCK_SIZE)
+#define P1_Q1_BUFFER_OFFSET (P0_Q4_BUFFER_OFFSET + HOST_QUEUE_4_SIZE * \
+ ICSS_BLOCK_SIZE)
+#define P0_Q4_BUFFER_OFFSET (P0_Q3_BUFFER_OFFSET + HOST_QUEUE_3_SIZE * \
+ ICSS_BLOCK_SIZE)
+#define P0_Q3_BUFFER_OFFSET (P0_Q2_BUFFER_OFFSET + HOST_QUEUE_2_SIZE * \
+ ICSS_BLOCK_SIZE)
+#define P0_Q2_BUFFER_OFFSET (P0_Q1_BUFFER_OFFSET + HOST_QUEUE_1_SIZE * \
+ ICSS_BLOCK_SIZE)
+#define P0_COL_BUFFER_OFFSET 0xEE00
+#define P0_Q1_BUFFER_OFFSET 0x0000
+
+#endif /* __ICSS_SWITCH_H */
diff --git a/drivers/net/ethernet/wangxun/Kconfig b/drivers/net/ethernet/wangxun/Kconfig
index 424ec3212128..d138dea7d208 100644
--- a/drivers/net/ethernet/wangxun/Kconfig
+++ b/drivers/net/ethernet/wangxun/Kconfig
@@ -20,6 +20,7 @@ config LIBWX
tristate
depends on PTP_1588_CLOCK_OPTIONAL
select PAGE_POOL
+ select DIMLIB
help
Common library for Wangxun(R) Ethernet drivers.
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c
index c12a4cb951f6..06f401bd975c 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c
@@ -303,6 +303,11 @@ int wx_get_coalesce(struct net_device *netdev,
else
ec->rx_coalesce_usecs = wx->rx_itr_setting >> 2;
+ if (wx->adaptive_itr) {
+ ec->use_adaptive_rx_coalesce = 1;
+ ec->use_adaptive_tx_coalesce = 1;
+ }
+
/* if in mixed tx/rx queues per vector mode, report only rx settings */
if (wx->q_vector[0]->tx.count && wx->q_vector[0]->rx.count)
return 0;
@@ -334,19 +339,28 @@ int wx_set_coalesce(struct net_device *netdev,
return -EOPNOTSUPP;
}
- if (ec->tx_max_coalesced_frames_irq)
- wx->tx_work_limit = ec->tx_max_coalesced_frames_irq;
+ if (ec->tx_max_coalesced_frames_irq > U16_MAX ||
+ !ec->tx_max_coalesced_frames_irq)
+ return -EINVAL;
+
+ wx->tx_work_limit = ec->tx_max_coalesced_frames_irq;
switch (wx->mac.type) {
case wx_mac_sp:
max_eitr = WX_SP_MAX_EITR;
+ rx_itr_param = WX_20K_ITR;
+ tx_itr_param = WX_12K_ITR;
break;
case wx_mac_aml:
case wx_mac_aml40:
max_eitr = WX_AML_MAX_EITR;
+ rx_itr_param = WX_20K_ITR;
+ tx_itr_param = WX_12K_ITR;
break;
default:
max_eitr = WX_EM_MAX_EITR;
+ rx_itr_param = WX_7K_ITR;
+ tx_itr_param = WX_7K_ITR;
break;
}
@@ -354,36 +368,37 @@ int wx_set_coalesce(struct net_device *netdev,
(ec->tx_coalesce_usecs > (max_eitr >> 2)))
return -EINVAL;
+ if (ec->use_adaptive_rx_coalesce) {
+ wx->adaptive_itr = true;
+ wx->rx_itr_setting = 1;
+ wx->tx_itr_setting = 1;
+ return 0;
+ }
+
if (ec->rx_coalesce_usecs > 1)
wx->rx_itr_setting = ec->rx_coalesce_usecs << 2;
else
wx->rx_itr_setting = ec->rx_coalesce_usecs;
- if (wx->rx_itr_setting == 1)
- rx_itr_param = WX_20K_ITR;
- else
- rx_itr_param = wx->rx_itr_setting;
-
if (ec->tx_coalesce_usecs > 1)
wx->tx_itr_setting = ec->tx_coalesce_usecs << 2;
else
wx->tx_itr_setting = ec->tx_coalesce_usecs;
- if (wx->tx_itr_setting == 1) {
- switch (wx->mac.type) {
- case wx_mac_sp:
- case wx_mac_aml:
- case wx_mac_aml40:
- tx_itr_param = WX_12K_ITR;
- break;
- default:
- tx_itr_param = WX_20K_ITR;
- break;
- }
- } else {
- tx_itr_param = wx->tx_itr_setting;
+ if (wx->adaptive_itr) {
+ wx->adaptive_itr = false;
+ wx->rx_itr_setting = rx_itr_param;
+ wx->tx_itr_setting = tx_itr_param;
+ } else if (wx->rx_itr_setting == 1 || wx->tx_itr_setting == 1) {
+ wx->adaptive_itr = true;
}
+ if (wx->rx_itr_setting != 1)
+ rx_itr_param = wx->rx_itr_setting;
+
+ if (wx->tx_itr_setting != 1)
+ tx_itr_param = wx->tx_itr_setting;
+
/* mixed Rx/Tx */
if (wx->q_vector[0]->tx.count && wx->q_vector[0]->rx.count)
wx->tx_itr_setting = wx->rx_itr_setting;
@@ -466,6 +481,142 @@ int wx_set_channels(struct net_device *dev,
}
EXPORT_SYMBOL(wx_set_channels);
+u32 wx_rss_indir_size(struct net_device *netdev)
+{
+ struct wx *wx = netdev_priv(netdev);
+
+ return wx_rss_indir_tbl_entries(wx);
+}
+EXPORT_SYMBOL(wx_rss_indir_size);
+
+u32 wx_get_rxfh_key_size(struct net_device *netdev)
+{
+ return WX_RSS_KEY_SIZE;
+}
+EXPORT_SYMBOL(wx_get_rxfh_key_size);
+
+static void wx_get_reta(struct wx *wx, u32 *indir)
+{
+ u32 reta_size = wx_rss_indir_tbl_entries(wx);
+ u16 rss_m = wx->ring_feature[RING_F_RSS].mask;
+
+ if (test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags))
+ rss_m = wx->ring_feature[RING_F_RSS].indices - 1;
+
+ for (u32 i = 0; i < reta_size; i++)
+ indir[i] = wx->rss_indir_tbl[i] & rss_m;
+}
+
+int wx_get_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh)
+{
+ struct wx *wx = netdev_priv(netdev);
+
+ rxfh->hfunc = ETH_RSS_HASH_TOP;
+
+ if (rxfh->indir)
+ wx_get_reta(wx, rxfh->indir);
+
+ if (rxfh->key)
+ memcpy(rxfh->key, wx->rss_key, WX_RSS_KEY_SIZE);
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_get_rxfh);
+
+int wx_set_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ struct wx *wx = netdev_priv(netdev);
+ u32 reta_entries, i;
+
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+
+ reta_entries = wx_rss_indir_tbl_entries(wx);
+ /* Fill out the redirection table */
+ if (rxfh->indir) {
+ for (i = 0; i < reta_entries; i++)
+ wx->rss_indir_tbl[i] = rxfh->indir[i];
+
+ wx_store_reta(wx);
+ }
+
+ /* Fill out the rss hash key */
+ if (rxfh->key) {
+ memcpy(wx->rss_key, rxfh->key, WX_RSS_KEY_SIZE);
+ wx_store_rsskey(wx);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_set_rxfh);
+
+static const struct wx_rss_flow_map rss_flow_table[] = {
+ { TCP_V4_FLOW, RXH_L4_B_0_1 | RXH_L4_B_2_3, WX_RSS_FIELD_IPV4_TCP },
+ { TCP_V6_FLOW, RXH_L4_B_0_1 | RXH_L4_B_2_3, WX_RSS_FIELD_IPV6_TCP },
+ { UDP_V4_FLOW, RXH_L4_B_0_1 | RXH_L4_B_2_3, WX_RSS_FIELD_IPV4_UDP },
+ { UDP_V6_FLOW, RXH_L4_B_0_1 | RXH_L4_B_2_3, WX_RSS_FIELD_IPV6_UDP },
+ { SCTP_V4_FLOW, RXH_L4_B_0_1 | RXH_L4_B_2_3, WX_RSS_FIELD_IPV4_SCTP },
+ { SCTP_V6_FLOW, RXH_L4_B_0_1 | RXH_L4_B_2_3, WX_RSS_FIELD_IPV6_SCTP },
+};
+
+int wx_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *nfc)
+{
+ struct wx *wx = netdev_priv(dev);
+
+ nfc->data = RXH_IP_SRC | RXH_IP_DST;
+
+ for (u32 i = 0; i < ARRAY_SIZE(rss_flow_table); i++) {
+ const struct wx_rss_flow_map *entry = &rss_flow_table[i];
+
+ if (entry->flow_type == nfc->flow_type) {
+ if (wx->rss_flags & entry->flag)
+ nfc->data |= entry->data;
+ break;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_get_rxfh_fields);
+
+int wx_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack)
+{
+ struct wx *wx = netdev_priv(dev);
+ u8 flags = wx->rss_flags;
+
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST))
+ return -EINVAL;
+
+ for (u32 i = 0; i < ARRAY_SIZE(rss_flow_table); i++) {
+ const struct wx_rss_flow_map *entry = &rss_flow_table[i];
+
+ if (entry->flow_type == nfc->flow_type) {
+ if (nfc->data & entry->data)
+ flags |= entry->flag;
+ else
+ flags &= ~entry->flag;
+
+ if (flags != wx->rss_flags) {
+ wx->rss_flags = flags;
+ wx_config_rss_field(wx);
+ }
+
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(wx_set_rxfh_fields);
+
u32 wx_get_msglevel(struct net_device *netdev)
{
struct wx *wx = netdev_priv(netdev);
@@ -531,3 +682,36 @@ void wx_get_ptp_stats(struct net_device *dev,
}
}
EXPORT_SYMBOL(wx_get_ptp_stats);
+
+static int wx_get_link_ksettings_vf(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct wx *wx = netdev_priv(netdev);
+
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ cmd->base.port = PORT_NONE;
+ cmd->base.duplex = DUPLEX_FULL;
+ cmd->base.speed = wx->speed;
+
+ return 0;
+}
+
+static const struct ethtool_ops wx_ethtool_ops_vf = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ |
+ ETHTOOL_COALESCE_USE_ADAPTIVE,
+ .get_drvinfo = wx_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_ringparam = wx_get_ringparam,
+ .get_msglevel = wx_get_msglevel,
+ .get_coalesce = wx_get_coalesce,
+ .get_ts_info = ethtool_op_get_ts_info,
+ .get_link_ksettings = wx_get_link_ksettings_vf,
+};
+
+void wx_set_ethtool_ops_vf(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &wx_ethtool_ops_vf;
+}
+EXPORT_SYMBOL(wx_set_ethtool_ops_vf);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h
index 9e002e699eca..727093970462 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h
@@ -38,10 +38,23 @@ void wx_get_channels(struct net_device *dev,
struct ethtool_channels *ch);
int wx_set_channels(struct net_device *dev,
struct ethtool_channels *ch);
+u32 wx_rss_indir_size(struct net_device *netdev);
+u32 wx_get_rxfh_key_size(struct net_device *netdev);
+int wx_get_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh);
+int wx_set_rxfh(struct net_device *netdev,
+ struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack);
+int wx_get_rxfh_fields(struct net_device *dev,
+ struct ethtool_rxfh_fields *cmd);
+int wx_set_rxfh_fields(struct net_device *dev,
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack);
u32 wx_get_msglevel(struct net_device *netdev);
void wx_set_msglevel(struct net_device *netdev, u32 data);
int wx_get_ts_info(struct net_device *dev,
struct kernel_ethtool_ts_info *info);
void wx_get_ptp_stats(struct net_device *dev,
struct ethtool_ts_stats *ts_stats);
+void wx_set_ethtool_ops_vf(struct net_device *netdev);
#endif /* _WX_ETHTOOL_H_ */
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
index bcd07a715752..1e2713f0c921 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
@@ -1998,8 +1998,17 @@ static void wx_restore_vlan(struct wx *wx)
wx_vlan_rx_add_vid(wx->netdev, htons(ETH_P_8021Q), vid);
}
-static void wx_store_reta(struct wx *wx)
+u32 wx_rss_indir_tbl_entries(struct wx *wx)
{
+ if (test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags))
+ return 64;
+ else
+ return 128;
+}
+
+void wx_store_reta(struct wx *wx)
+{
+ u32 reta_entries = wx_rss_indir_tbl_entries(wx);
u8 *indir_tbl = wx->rss_indir_tbl;
u32 reta = 0;
u32 i;
@@ -2007,45 +2016,110 @@ static void wx_store_reta(struct wx *wx)
/* Fill out the redirection table as follows:
* - 8 bit wide entries containing 4 bit RSS index
*/
- for (i = 0; i < WX_MAX_RETA_ENTRIES; i++) {
+ for (i = 0; i < reta_entries; i++) {
reta |= indir_tbl[i] << (i & 0x3) * 8;
if ((i & 3) == 3) {
- wr32(wx, WX_RDB_RSSTBL(i >> 2), reta);
+ if (test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags) &&
+ test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags))
+ wr32(wx, WX_RDB_VMRSSTBL(i >> 2, wx->num_vfs), reta);
+ else
+ wr32(wx, WX_RDB_RSSTBL(i >> 2), reta);
reta = 0;
}
}
}
-static void wx_setup_reta(struct wx *wx)
+void wx_store_rsskey(struct wx *wx)
{
- u16 rss_i = wx->ring_feature[RING_F_RSS].indices;
- u32 random_key_size = WX_RSS_KEY_SIZE / 4;
- u32 i, j;
+ u32 key_size = WX_RSS_KEY_SIZE / 4;
+ u32 i;
- if (test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags)) {
- if (wx->mac.type == wx_mac_em)
- rss_i = 1;
- else
- rss_i = rss_i < 4 ? 4 : rss_i;
+ if (test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags) &&
+ test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
+ for (i = 0; i < key_size; i++)
+ wr32(wx, WX_RDB_VMRSSRK(i, wx->num_vfs),
+ wx->rss_key[i]);
+ } else {
+ for (i = 0; i < key_size; i++)
+ wr32(wx, WX_RDB_RSSRK(i), wx->rss_key[i]);
}
+}
+static void wx_setup_reta(struct wx *wx)
+{
/* Fill out hash function seeds */
- for (i = 0; i < random_key_size; i++)
- wr32(wx, WX_RDB_RSSRK(i), wx->rss_key[i]);
+ wx_store_rsskey(wx);
/* Fill out redirection table */
- memset(wx->rss_indir_tbl, 0, sizeof(wx->rss_indir_tbl));
+ if (!netif_is_rxfh_configured(wx->netdev)) {
+ u16 rss_i = wx->ring_feature[RING_F_RSS].indices;
+ u32 reta_entries = wx_rss_indir_tbl_entries(wx);
+ u32 i, j;
+
+ memset(wx->rss_indir_tbl, 0, sizeof(wx->rss_indir_tbl));
+
+ if (test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags)) {
+ if (test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags))
+ rss_i = rss_i < 2 ? 2 : rss_i;
+ else
+ rss_i = 1;
+ }
- for (i = 0, j = 0; i < WX_MAX_RETA_ENTRIES; i++, j++) {
- if (j == rss_i)
- j = 0;
+ for (i = 0, j = 0; i < reta_entries; i++, j++) {
+ if (j == rss_i)
+ j = 0;
- wx->rss_indir_tbl[i] = j;
+ wx->rss_indir_tbl[i] = j;
+ }
}
wx_store_reta(wx);
}
+void wx_config_rss_field(struct wx *wx)
+{
+ u32 rss_field;
+
+ if (test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags) &&
+ test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
+ rss_field = rd32(wx, WX_RDB_PL_CFG(wx->num_vfs));
+ rss_field &= ~WX_RDB_PL_CFG_RSS_MASK;
+ rss_field |= FIELD_PREP(WX_RDB_PL_CFG_RSS_MASK, wx->rss_flags);
+ wr32(wx, WX_RDB_PL_CFG(wx->num_vfs), rss_field);
+
+ /* Enable global RSS and multiple RSS to make the RSS
+ * field of each pool take effect.
+ */
+ wr32m(wx, WX_RDB_RA_CTL,
+ WX_RDB_RA_CTL_MULTI_RSS | WX_RDB_RA_CTL_RSS_EN,
+ WX_RDB_RA_CTL_MULTI_RSS | WX_RDB_RA_CTL_RSS_EN);
+ } else {
+ rss_field = rd32(wx, WX_RDB_RA_CTL);
+ rss_field &= ~WX_RDB_RA_CTL_RSS_MASK;
+ rss_field |= FIELD_PREP(WX_RDB_RA_CTL_RSS_MASK, wx->rss_flags);
+ wr32(wx, WX_RDB_RA_CTL, rss_field);
+ }
+}
+
+void wx_enable_rss(struct wx *wx, bool enable)
+{
+ if (test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags) &&
+ test_bit(WX_FLAG_MULTI_64_FUNC, wx->flags)) {
+ if (enable)
+ wr32m(wx, WX_RDB_PL_CFG(wx->num_vfs),
+ WX_RDB_PL_CFG_RSS_EN, WX_RDB_PL_CFG_RSS_EN);
+ else
+ wr32m(wx, WX_RDB_PL_CFG(wx->num_vfs),
+ WX_RDB_PL_CFG_RSS_EN, 0);
+ } else {
+ if (enable)
+ wr32m(wx, WX_RDB_RA_CTL, WX_RDB_RA_CTL_RSS_EN,
+ WX_RDB_RA_CTL_RSS_EN);
+ else
+ wr32m(wx, WX_RDB_RA_CTL, WX_RDB_RA_CTL_RSS_EN, 0);
+ }
+}
+
#define WX_RDB_RSS_PL_2 FIELD_PREP(GENMASK(31, 29), 1)
#define WX_RDB_RSS_PL_4 FIELD_PREP(GENMASK(31, 29), 2)
static void wx_setup_psrtype(struct wx *wx)
@@ -2076,31 +2150,12 @@ static void wx_setup_psrtype(struct wx *wx)
static void wx_setup_mrqc(struct wx *wx)
{
- u32 rss_field = 0;
-
- /* VT, and RSS do not coexist at the same time */
- if (test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags))
- return;
-
/* Disable indicating checksum in descriptor, enables RSS hash */
wr32m(wx, WX_PSR_CTL, WX_PSR_CTL_PCSD, WX_PSR_CTL_PCSD);
- /* Perform hash on these packet types */
- rss_field = WX_RDB_RA_CTL_RSS_IPV4 |
- WX_RDB_RA_CTL_RSS_IPV4_TCP |
- WX_RDB_RA_CTL_RSS_IPV4_UDP |
- WX_RDB_RA_CTL_RSS_IPV6 |
- WX_RDB_RA_CTL_RSS_IPV6_TCP |
- WX_RDB_RA_CTL_RSS_IPV6_UDP;
-
- netdev_rss_key_fill(wx->rss_key, sizeof(wx->rss_key));
-
+ wx_config_rss_field(wx);
+ wx_enable_rss(wx, wx->rss_enabled);
wx_setup_reta(wx);
-
- if (wx->rss_enabled)
- rss_field |= WX_RDB_RA_CTL_RSS_EN;
-
- wr32(wx, WX_RDB_RA_CTL, rss_field);
}
/**
@@ -2393,6 +2448,8 @@ int wx_sw_init(struct wx *wx)
wx_err(wx, "rss key allocation failed\n");
return err;
}
+ wx->rss_flags = WX_RSS_FIELD_IPV4 | WX_RSS_FIELD_IPV4_TCP |
+ WX_RSS_FIELD_IPV6 | WX_RSS_FIELD_IPV6_TCP;
wx->mac_table = kcalloc(wx->mac.num_rar_entries,
sizeof(struct wx_mac_addr),
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.h b/drivers/net/ethernet/wangxun/libwx/wx_hw.h
index 2393a743b564..13857376bbad 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.h
@@ -39,6 +39,11 @@ void wx_set_rx_mode(struct net_device *netdev);
int wx_change_mtu(struct net_device *netdev, int new_mtu);
void wx_disable_rx_queue(struct wx *wx, struct wx_ring *ring);
void wx_enable_rx_queue(struct wx *wx, struct wx_ring *ring);
+u32 wx_rss_indir_tbl_entries(struct wx *wx);
+void wx_store_reta(struct wx *wx);
+void wx_store_rsskey(struct wx *wx);
+void wx_config_rss_field(struct wx *wx);
+void wx_enable_rss(struct wx *wx, bool enable);
void wx_configure_rx(struct wx *wx);
void wx_configure(struct wx *wx);
void wx_start_hw(struct wx *wx);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
index 723785ef87bb..3adf7048320a 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
@@ -16,6 +16,7 @@
#include "wx_lib.h"
#include "wx_ptp.h"
#include "wx_hw.h"
+#include "wx_vf_lib.h"
/* Lookup table mapping the HW PTYPE to the bit field for decoding */
static struct wx_dec_ptype wx_ptype_lookup[256] = {
@@ -832,6 +833,36 @@ static bool wx_clean_tx_irq(struct wx_q_vector *q_vector,
return !!budget;
}
+static void wx_update_rx_dim_sample(struct wx_q_vector *q_vector)
+{
+ struct dim_sample sample = {};
+
+ dim_update_sample(q_vector->total_events,
+ q_vector->rx.total_packets,
+ q_vector->rx.total_bytes,
+ &sample);
+
+ net_dim(&q_vector->rx.dim, &sample);
+}
+
+static void wx_update_tx_dim_sample(struct wx_q_vector *q_vector)
+{
+ struct dim_sample sample = {};
+
+ dim_update_sample(q_vector->total_events,
+ q_vector->tx.total_packets,
+ q_vector->tx.total_bytes,
+ &sample);
+
+ net_dim(&q_vector->tx.dim, &sample);
+}
+
+static void wx_update_dim_sample(struct wx_q_vector *q_vector)
+{
+ wx_update_rx_dim_sample(q_vector);
+ wx_update_tx_dim_sample(q_vector);
+}
+
/**
* wx_poll - NAPI polling RX/TX cleanup routine
* @napi: napi struct with our devices info in it
@@ -878,6 +909,8 @@ static int wx_poll(struct napi_struct *napi, int budget)
/* all work done, exit the polling mode */
if (likely(napi_complete_done(napi, work_done))) {
+ if (wx->adaptive_itr)
+ wx_update_dim_sample(q_vector);
if (netif_running(wx->netdev))
wx_intr_enable(wx, WX_INTR_Q(q_vector->v_idx));
}
@@ -1591,6 +1624,65 @@ netdev_tx_t wx_xmit_frame(struct sk_buff *skb,
}
EXPORT_SYMBOL(wx_xmit_frame);
+static void wx_set_itr(struct wx_q_vector *q_vector)
+{
+ struct wx *wx = q_vector->wx;
+ u32 new_itr;
+
+ if (!wx->adaptive_itr)
+ return;
+
+ /* use the smallest value of new ITR delay calculations */
+ new_itr = min(q_vector->rx.itr, q_vector->tx.itr);
+ new_itr <<= 2;
+
+ if (new_itr != q_vector->itr) {
+ /* save the algorithm value here */
+ q_vector->itr = new_itr;
+
+ if (wx->pdev->is_virtfn)
+ wx_write_eitr_vf(q_vector);
+ else
+ wx_write_eitr(q_vector);
+ }
+}
+
+static void wx_rx_dim_work(struct work_struct *work)
+{
+ struct dim *dim = container_of(work, struct dim, work);
+ struct dim_cq_moder rx_moder;
+ struct wx_ring_container *rx;
+ struct wx_q_vector *q_vector;
+
+ rx = container_of(dim, struct wx_ring_container, dim);
+
+ rx_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
+ rx->itr = rx_moder.usec;
+
+ q_vector = container_of(rx, struct wx_q_vector, rx);
+ wx_set_itr(q_vector);
+
+ dim->state = DIM_START_MEASURE;
+}
+
+static void wx_tx_dim_work(struct work_struct *work)
+{
+ struct dim *dim = container_of(work, struct dim, work);
+ struct dim_cq_moder tx_moder;
+ struct wx_ring_container *tx;
+ struct wx_q_vector *q_vector;
+
+ tx = container_of(dim, struct wx_ring_container, dim);
+
+ tx_moder = net_dim_get_tx_moderation(dim->mode, dim->profile_ix);
+ tx->itr = tx_moder.usec;
+
+ q_vector = container_of(tx, struct wx_q_vector, tx);
+ wx_set_itr(q_vector);
+
+ dim->state = DIM_START_MEASURE;
+}
+
void wx_napi_enable_all(struct wx *wx)
{
struct wx_q_vector *q_vector;
@@ -1598,6 +1690,11 @@ void wx_napi_enable_all(struct wx *wx)
for (q_idx = 0; q_idx < wx->num_q_vectors; q_idx++) {
q_vector = wx->q_vector[q_idx];
+
+ INIT_WORK(&q_vector->rx.dim.work, wx_rx_dim_work);
+ INIT_WORK(&q_vector->tx.dim.work, wx_tx_dim_work);
+ q_vector->rx.dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE;
+ q_vector->tx.dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE;
napi_enable(&q_vector->napi);
}
}
@@ -1611,6 +1708,8 @@ void wx_napi_disable_all(struct wx *wx)
for (q_idx = 0; q_idx < wx->num_q_vectors; q_idx++) {
q_vector = wx->q_vector[q_idx];
napi_disable(&q_vector->napi);
+ disable_work_sync(&q_vector->rx.dim.work);
+ disable_work_sync(&q_vector->tx.dim.work);
}
}
EXPORT_SYMBOL(wx_napi_disable_all);
@@ -2197,8 +2296,10 @@ irqreturn_t wx_msix_clean_rings(int __always_unused irq, void *data)
struct wx_q_vector *q_vector = data;
/* EIAM disabled interrupts (on this vector) for us */
- if (q_vector->rx.ring || q_vector->tx.ring)
+ if (q_vector->rx.ring || q_vector->tx.ring) {
napi_schedule_irqoff(&q_vector->napi);
+ q_vector->total_events++;
+ }
return IRQ_HANDLED;
}
@@ -2915,14 +3016,8 @@ int wx_set_features(struct net_device *netdev, netdev_features_t features)
struct wx *wx = netdev_priv(netdev);
bool need_reset = false;
- if (features & NETIF_F_RXHASH) {
- wr32m(wx, WX_RDB_RA_CTL, WX_RDB_RA_CTL_RSS_EN,
- WX_RDB_RA_CTL_RSS_EN);
- wx->rss_enabled = true;
- } else {
- wr32m(wx, WX_RDB_RA_CTL, WX_RDB_RA_CTL_RSS_EN, 0);
- wx->rss_enabled = false;
- }
+ wx->rss_enabled = !!(features & NETIF_F_RXHASH);
+ wx_enable_rss(wx, wx->rss_enabled);
netdev->features = features;
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_sriov.c b/drivers/net/ethernet/wangxun/libwx/wx_sriov.c
index c82ae137756c..c6d158cd70da 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_sriov.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_sriov.c
@@ -150,6 +150,12 @@ static int wx_pci_sriov_enable(struct pci_dev *dev,
struct wx *wx = pci_get_drvdata(dev);
int err = 0, i;
+ if (netif_is_rxfh_configured(wx->netdev)) {
+ wx_err(wx, "Cannot enable SR-IOV while RXFH is configured\n");
+ wx_err(wx, "Run 'ethtool -X <if> default' to reset RSS table\n");
+ return -EBUSY;
+ }
+
err = __wx_enable_sriov(wx, num_vfs);
if (err)
return err;
@@ -173,12 +179,20 @@ err_out:
return err;
}
-static void wx_pci_sriov_disable(struct pci_dev *dev)
+static int wx_pci_sriov_disable(struct pci_dev *dev)
{
struct wx *wx = pci_get_drvdata(dev);
+ if (netif_is_rxfh_configured(wx->netdev)) {
+ wx_err(wx, "Cannot disable SR-IOV while RXFH is configured\n");
+ wx_err(wx, "Run 'ethtool -X <if> default' to reset RSS table\n");
+ return -EBUSY;
+ }
+
wx_disable_sriov(wx);
wx_sriov_reinit(wx);
+
+ return 0;
}
int wx_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
@@ -187,10 +201,8 @@ int wx_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
int err;
if (!num_vfs) {
- if (!pci_vfs_assigned(pdev)) {
- wx_pci_sriov_disable(pdev);
- return 0;
- }
+ if (!pci_vfs_assigned(pdev))
+ return wx_pci_sriov_disable(pdev);
wx_err(wx, "can't free VFs because some are assigned to VMs.\n");
return -EBUSY;
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h
index 9d5d10f9e410..d89b9b8a0a2c 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_type.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h
@@ -10,6 +10,7 @@
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
#include <linux/phylink.h>
+#include <linux/dim.h>
#include <net/ip.h>
#define WX_NCSI_SUP 0x8000
@@ -167,9 +168,12 @@
#define WX_RDB_PL_CFG_L2HDR BIT(3)
#define WX_RDB_PL_CFG_TUN_TUNHDR BIT(4)
#define WX_RDB_PL_CFG_TUN_OUTL2HDR BIT(5)
+#define WX_RDB_PL_CFG_RSS_EN BIT(24)
+#define WX_RDB_PL_CFG_RSS_MASK GENMASK(23, 16)
#define WX_RDB_RSSTBL(_i) (0x19400 + ((_i) * 4))
#define WX_RDB_RSSRK(_i) (0x19480 + ((_i) * 4))
#define WX_RDB_RA_CTL 0x194F4
+#define WX_RDB_RA_CTL_MULTI_RSS BIT(0)
#define WX_RDB_RA_CTL_RSS_EN BIT(2) /* RSS Enable */
#define WX_RDB_RA_CTL_RSS_IPV4_TCP BIT(16)
#define WX_RDB_RA_CTL_RSS_IPV4 BIT(17)
@@ -177,8 +181,12 @@
#define WX_RDB_RA_CTL_RSS_IPV6_TCP BIT(21)
#define WX_RDB_RA_CTL_RSS_IPV4_UDP BIT(22)
#define WX_RDB_RA_CTL_RSS_IPV6_UDP BIT(23)
+#define WX_RDB_RA_CTL_RSS_MASK GENMASK(23, 16)
#define WX_RDB_FDIR_MATCH 0x19558
#define WX_RDB_FDIR_MISS 0x1955C
+/* VM RSS */
+#define WX_RDB_VMRSSRK(_i, _p) (0x1A000 + ((_i) * 4) + ((_p) * 0x40))
+#define WX_RDB_VMRSSTBL(_i, _p) (0x1B000 + ((_i) * 4) + ((_p) * 0x40))
/******************************* PSR Registers *******************************/
/* psr control */
@@ -1033,6 +1041,7 @@ struct wx_ring_container {
unsigned int total_packets; /* total packets processed this int */
u8 count; /* total number of rings in vector */
u8 itr; /* current ITR setting for ring */
+ struct dim dim; /* data for net_dim algorithm */
};
struct wx_ring {
struct wx_ring *next; /* pointer to next ring in q_vector */
@@ -1089,6 +1098,8 @@ struct wx_q_vector {
struct napi_struct napi;
struct rcu_head rcu; /* to avoid race with update stats on free */
+ u16 total_events; /* number of interrupts processed */
+
char name[IFNAMSIZ + 17];
/* for dynamic allocation of rings associated with this q_vector */
@@ -1188,6 +1199,21 @@ struct vf_macvlans {
u8 vf_macvlan[ETH_ALEN];
};
+#define WX_RSS_FIELD_IPV4_TCP BIT(0)
+#define WX_RSS_FIELD_IPV4 BIT(1)
+#define WX_RSS_FIELD_IPV4_SCTP BIT(2)
+#define WX_RSS_FIELD_IPV6_SCTP BIT(3)
+#define WX_RSS_FIELD_IPV6_TCP BIT(4)
+#define WX_RSS_FIELD_IPV6 BIT(5)
+#define WX_RSS_FIELD_IPV4_UDP BIT(6)
+#define WX_RSS_FIELD_IPV6_UDP BIT(7)
+
+struct wx_rss_flow_map {
+ u8 flow_type;
+ u32 data;
+ u8 flag;
+};
+
enum wx_pf_flags {
WX_FLAG_MULTI_64_FUNC,
WX_FLAG_SWFW_RING,
@@ -1268,6 +1294,7 @@ struct wx {
int num_rx_queues;
u16 rx_itr_setting;
u16 rx_work_limit;
+ bool adaptive_itr;
int num_q_vectors; /* current number of q_vectors for device */
int max_q_vectors; /* upper limit of q_vectors for device */
@@ -1297,6 +1324,7 @@ struct wx {
#define WX_MAX_RETA_ENTRIES 128
#define WX_RSS_INDIR_TBL_MAX 64
u8 rss_indir_tbl[WX_MAX_RETA_ENTRIES];
+ u8 rss_flags;
bool rss_enabled;
#define WX_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */
u32 *rss_key;
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_vf.h b/drivers/net/ethernet/wangxun/libwx/wx_vf.h
index fec1126703e3..3f16de0fa427 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_vf.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_vf.h
@@ -4,6 +4,7 @@
#ifndef _WX_VF_H_
#define _WX_VF_H_
+/* Control registers */
#define WX_VF_MAX_RING_NUMS 8
#define WX_VX_PF_BME 0x4B8
#define WX_VF_BME_ENABLE BIT(0)
@@ -12,16 +13,32 @@
#define WX_VXCTRL_RST BIT(0)
#define WX_VXMRQC 0x78
+#define WX_VXMRQC_PSR_L4HDR BIT(0)
+#define WX_VXMRQC_PSR_L3HDR BIT(1)
+#define WX_VXMRQC_PSR_L2HDR BIT(2)
+#define WX_VXMRQC_PSR_TUNHDR BIT(3)
+#define WX_VXMRQC_PSR_TUNMAC BIT(4)
+#define WX_VXMRQC_PSR_MASK GENMASK(5, 1)
+#define WX_VXMRQC_PSR(f) FIELD_PREP(GENMASK(5, 1), f)
+#define WX_VXMRQC_RSS_HASH(f) FIELD_PREP(GENMASK(15, 13), f)
+#define WX_VXMRQC_RSS_MASK GENMASK(31, 16)
+#define WX_VXMRQC_RSS(f) FIELD_PREP(GENMASK(31, 16), f)
+#define WX_VXMRQC_RSS_ALG_IPV4_TCP BIT(0)
+#define WX_VXMRQC_RSS_ALG_IPV4 BIT(1)
+#define WX_VXMRQC_RSS_ALG_IPV6 BIT(4)
+#define WX_VXMRQC_RSS_ALG_IPV6_TCP BIT(5)
+#define WX_VXMRQC_RSS_EN BIT(8)
+
+#define WX_VXRSSRK(i) (0x80 + ((i) * 4)) /* i=[0,9] */
+#define WX_VXRETA(i) (0xC0 + ((i) * 4)) /* i=[0,15] */
+
+/* Interrupt registers */
#define WX_VXICR 0x100
#define WX_VXIMS 0x108
#define WX_VXIMC 0x10C
#define WX_VF_IRQ_CLEAR_MASK 7
#define WX_VF_MAX_TX_QUEUES 4
#define WX_VF_MAX_RX_QUEUES 4
-#define WX_VXTXDCTL(r) (0x3010 + (0x40 * (r)))
-#define WX_VXRXDCTL(r) (0x1010 + (0x40 * (r)))
-#define WX_VXRXDCTL_ENABLE BIT(0)
-#define WX_VXTXDCTL_FLUSH BIT(26)
#define WX_VXITR(i) (0x200 + (4 * (i))) /* i=[0,1] */
#define WX_VXITR_MASK GENMASK(8, 0)
@@ -29,16 +46,6 @@
#define WX_VXIVAR_MISC 0x260
#define WX_VXIVAR(i) (0x240 + (4 * (i))) /* i=[0,3] */
-#define WX_VXRXDCTL_RSCMAX(f) FIELD_PREP(GENMASK(24, 23), f)
-#define WX_VXRXDCTL_BUFLEN(f) FIELD_PREP(GENMASK(6, 1), f)
-#define WX_VXRXDCTL_BUFSZ(f) FIELD_PREP(GENMASK(11, 8), f)
-#define WX_VXRXDCTL_HDRSZ(f) FIELD_PREP(GENMASK(15, 12), f)
-
-#define WX_VXRXDCTL_RSCMAX_MASK GENMASK(24, 23)
-#define WX_VXRXDCTL_BUFLEN_MASK GENMASK(6, 1)
-#define WX_VXRXDCTL_BUFSZ_MASK GENMASK(11, 8)
-#define WX_VXRXDCTL_HDRSZ_MASK GENMASK(15, 12)
-
#define wx_conf_size(v, mwidth, uwidth) ({ \
typeof(v) _v = (v); \
(_v == 2 << (mwidth) ? 0 : _v >> (uwidth)); \
@@ -59,44 +66,35 @@
#define WX_VXRDBAH(r) (0x1004 + (0x40 * (r)))
#define WX_VXRDT(r) (0x1008 + (0x40 * (r)))
#define WX_VXRDH(r) (0x100C + (0x40 * (r)))
-
+#define WX_VXRXDCTL(r) (0x1010 + (0x40 * (r)))
+#define WX_VXRXDCTL_ENABLE BIT(0)
+#define WX_VXRXDCTL_BUFLEN_MASK GENMASK(6, 1)
+#define WX_VXRXDCTL_BUFLEN(f) FIELD_PREP(GENMASK(6, 1), f)
+#define WX_VXRXDCTL_BUFSZ_MASK GENMASK(11, 8)
+#define WX_VXRXDCTL_BUFSZ(f) FIELD_PREP(GENMASK(11, 8), f)
+#define WX_VXRXDCTL_HDRSZ_MASK GENMASK(15, 12)
+#define WX_VXRXDCTL_HDRSZ(f) FIELD_PREP(GENMASK(15, 12), f)
+#define WX_VXRXDCTL_RSCMAX_MASK GENMASK(24, 23)
+#define WX_VXRXDCTL_RSCMAX(f) FIELD_PREP(GENMASK(24, 23), f)
#define WX_VXRXDCTL_RSCEN BIT(29)
#define WX_VXRXDCTL_DROP BIT(30)
#define WX_VXRXDCTL_VLAN BIT(31)
+/* Transimit Path */
#define WX_VXTDBAL(r) (0x3000 + (0x40 * (r)))
#define WX_VXTDBAH(r) (0x3004 + (0x40 * (r)))
#define WX_VXTDT(r) (0x3008 + (0x40 * (r)))
#define WX_VXTDH(r) (0x300C + (0x40 * (r)))
-
+#define WX_VXTXDCTL(r) (0x3010 + (0x40 * (r)))
#define WX_VXTXDCTL_ENABLE BIT(0)
#define WX_VXTXDCTL_BUFLEN(f) FIELD_PREP(GENMASK(6, 1), f)
#define WX_VXTXDCTL_PTHRESH(f) FIELD_PREP(GENMASK(11, 8), f)
#define WX_VXTXDCTL_WTHRESH(f) FIELD_PREP(GENMASK(22, 16), f)
-
-#define WX_VXMRQC_PSR(f) FIELD_PREP(GENMASK(5, 1), f)
-#define WX_VXMRQC_PSR_MASK GENMASK(5, 1)
-#define WX_VXMRQC_PSR_L4HDR BIT(0)
-#define WX_VXMRQC_PSR_L3HDR BIT(1)
-#define WX_VXMRQC_PSR_L2HDR BIT(2)
-#define WX_VXMRQC_PSR_TUNHDR BIT(3)
-#define WX_VXMRQC_PSR_TUNMAC BIT(4)
-
-#define WX_VXRSSRK(i) (0x80 + ((i) * 4)) /* i=[0,9] */
-#define WX_VXRETA(i) (0xC0 + ((i) * 4)) /* i=[0,15] */
-
-#define WX_VXMRQC_RSS(f) FIELD_PREP(GENMASK(31, 16), f)
-#define WX_VXMRQC_RSS_MASK GENMASK(31, 16)
-#define WX_VXMRQC_RSS_ALG_IPV4_TCP BIT(0)
-#define WX_VXMRQC_RSS_ALG_IPV4 BIT(1)
-#define WX_VXMRQC_RSS_ALG_IPV6 BIT(4)
-#define WX_VXMRQC_RSS_ALG_IPV6_TCP BIT(5)
-#define WX_VXMRQC_RSS_EN BIT(8)
-#define WX_VXMRQC_RSS_HASH(f) FIELD_PREP(GENMASK(15, 13), f)
+#define WX_VXTXDCTL_FLUSH BIT(26)
#define WX_PFLINK_STATUS(g) FIELD_GET(BIT(0), g)
#define WX_PFLINK_SPEED(g) FIELD_GET(GENMASK(31, 1), g)
-#define WX_VXSTATUS_SPEED(g) FIELD_GET(GENMASK(4, 1), g)
+#define WX_VXSTATUS_SPEED(g) FIELD_GET(GENMASK(4, 1), g)
struct wx_link_reg_fields {
u32 mac_type;
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_vf_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_vf_lib.c
index 5d48df7a849f..a87887b9f8ee 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_vf_lib.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_vf_lib.c
@@ -10,7 +10,7 @@
#include "wx_vf.h"
#include "wx_vf_lib.h"
-static void wx_write_eitr_vf(struct wx_q_vector *q_vector)
+void wx_write_eitr_vf(struct wx_q_vector *q_vector)
{
struct wx *wx = q_vector->wx;
int v_idx = q_vector->v_idx;
@@ -192,7 +192,7 @@ void wx_setup_vfmrqc_vf(struct wx *wx)
u8 i, j;
/* Fill out hash function seeds */
- netdev_rss_key_fill(wx->rss_key, sizeof(wx->rss_key));
+ netdev_rss_key_fill(wx->rss_key, WX_RSS_KEY_SIZE);
for (i = 0; i < WX_RSS_KEY_SIZE / 4; i++)
wr32(wx, WX_VXRSSRK(i), wx->rss_key[i]);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_vf_lib.h b/drivers/net/ethernet/wangxun/libwx/wx_vf_lib.h
index 43ea126b79eb..a4bd23c92800 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_vf_lib.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_vf_lib.h
@@ -4,6 +4,7 @@
#ifndef _WX_VF_LIB_H_
#define _WX_VF_LIB_H_
+void wx_write_eitr_vf(struct wx_q_vector *q_vector);
void wx_configure_msix_vf(struct wx *wx);
int wx_write_uc_addr_list_vf(struct net_device *netdev);
void wx_setup_psrtype_vf(struct wx *wx);
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c
index 7e2d9ec38a30..662f28bdde8a 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c
@@ -115,7 +115,8 @@ static int ngbe_set_channels(struct net_device *dev,
static const struct ethtool_ops ngbe_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
- ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ,
+ ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ |
+ ETHTOOL_COALESCE_USE_ADAPTIVE,
.get_drvinfo = wx_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_link_ksettings = wx_get_link_ksettings,
@@ -136,6 +137,12 @@ static const struct ethtool_ops ngbe_ethtool_ops = {
.set_coalesce = wx_set_coalesce,
.get_channels = wx_get_channels,
.set_channels = ngbe_set_channels,
+ .get_rxfh_fields = wx_get_rxfh_fields,
+ .set_rxfh_fields = wx_set_rxfh_fields,
+ .get_rxfh_indir_size = wx_rss_indir_size,
+ .get_rxfh_key_size = wx_get_rxfh_key_size,
+ .get_rxfh = wx_get_rxfh,
+ .set_rxfh = wx_set_rxfh,
.get_msglevel = wx_get_msglevel,
.set_msglevel = wx_set_msglevel,
.get_ts_info = wx_get_ts_info,
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
index e0fc897b0a58..58488e138beb 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
@@ -119,9 +119,9 @@ static int ngbe_sw_init(struct wx *wx)
num_online_cpus());
wx->rss_enabled = true;
- /* enable itr by default in dynamic mode */
- wx->rx_itr_setting = 1;
- wx->tx_itr_setting = 1;
+ wx->adaptive_itr = false;
+ wx->rx_itr_setting = WX_7K_ITR;
+ wx->tx_itr_setting = WX_7K_ITR;
/* set default ring sizes */
wx->tx_ring_count = NGBE_DEFAULT_TXD;
diff --git a/drivers/net/ethernet/wangxun/ngbevf/ngbevf_main.c b/drivers/net/ethernet/wangxun/ngbevf/ngbevf_main.c
index c1246ab5239c..6ef43adcc425 100644
--- a/drivers/net/ethernet/wangxun/ngbevf/ngbevf_main.c
+++ b/drivers/net/ethernet/wangxun/ngbevf/ngbevf_main.c
@@ -14,6 +14,7 @@
#include "../libwx/wx_mbx.h"
#include "../libwx/wx_vf.h"
#include "../libwx/wx_vf_common.h"
+#include "../libwx/wx_ethtool.h"
#include "ngbevf_type.h"
/* ngbevf_pci_tbl - PCI Device ID Table
@@ -100,6 +101,7 @@ static int ngbevf_sw_init(struct wx *wx)
wx->mac.max_tx_queues = NGBEVF_MAX_TX_QUEUES;
wx->mac.max_rx_queues = NGBEVF_MAX_RX_QUEUES;
/* Enable dynamic interrupt throttling rates */
+ wx->adaptive_itr = true;
wx->rx_itr_setting = 1;
wx->tx_itr_setting = 1;
/* set default ring sizes */
@@ -185,6 +187,8 @@ static int ngbevf_probe(struct pci_dev *pdev,
goto err_pci_release_regions;
}
+ wx->driver_name = KBUILD_MODNAME;
+ wx_set_ethtool_ops_vf(netdev);
netdev->netdev_ops = &ngbevf_netdev_ops;
/* setup the private structure */
@@ -202,6 +206,7 @@ static int ngbevf_probe(struct pci_dev *pdev,
if (err)
goto err_free_sw_init;
+ wx_get_fw_version_vf(wx);
err = register_netdev(netdev);
if (err)
goto err_register;
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
index a4753402660e..e285b088c7b2 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
@@ -538,7 +538,8 @@ static int txgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
static const struct ethtool_ops txgbe_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
- ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ,
+ ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ |
+ ETHTOOL_COALESCE_USE_ADAPTIVE,
.get_drvinfo = wx_get_drvinfo,
.nway_reset = wx_nway_reset,
.get_link = ethtool_op_get_link,
@@ -559,6 +560,12 @@ static const struct ethtool_ops txgbe_ethtool_ops = {
.set_channels = txgbe_set_channels,
.get_rxnfc = txgbe_get_rxnfc,
.set_rxnfc = txgbe_set_rxnfc,
+ .get_rxfh_fields = wx_get_rxfh_fields,
+ .set_rxfh_fields = wx_set_rxfh_fields,
+ .get_rxfh_indir_size = wx_rss_indir_size,
+ .get_rxfh_key_size = wx_get_rxfh_key_size,
+ .get_rxfh = wx_get_rxfh,
+ .set_rxfh = wx_set_rxfh,
.get_msglevel = wx_get_msglevel,
.set_msglevel = wx_set_msglevel,
.get_ts_info = wx_get_ts_info,
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
index a5867f3c93fc..c4c4d70d8466 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
@@ -401,6 +401,7 @@ static int txgbe_sw_init(struct wx *wx)
set_bit(WX_FLAG_MULTI_64_FUNC, wx->flags);
/* enable itr by default in dynamic mode */
+ wx->adaptive_itr = true;
wx->rx_itr_setting = 1;
wx->tx_itr_setting = 1;
diff --git a/drivers/net/ethernet/wangxun/txgbevf/txgbevf_main.c b/drivers/net/ethernet/wangxun/txgbevf/txgbevf_main.c
index ebfce3cf753e..72663e3c4205 100644
--- a/drivers/net/ethernet/wangxun/txgbevf/txgbevf_main.c
+++ b/drivers/net/ethernet/wangxun/txgbevf/txgbevf_main.c
@@ -14,6 +14,7 @@
#include "../libwx/wx_mbx.h"
#include "../libwx/wx_vf.h"
#include "../libwx/wx_vf_common.h"
+#include "../libwx/wx_ethtool.h"
#include "txgbevf_type.h"
/* txgbevf_pci_tbl - PCI Device ID Table
@@ -144,6 +145,7 @@ static int txgbevf_sw_init(struct wx *wx)
wx->mac.max_tx_queues = TXGBEVF_MAX_TX_QUEUES;
wx->mac.max_rx_queues = TXGBEVF_MAX_RX_QUEUES;
/* Enable dynamic interrupt throttling rates */
+ wx->adaptive_itr = true;
wx->rx_itr_setting = 1;
wx->tx_itr_setting = 1;
/* set default ring sizes */
@@ -238,6 +240,8 @@ static int txgbevf_probe(struct pci_dev *pdev,
goto err_pci_release_regions;
}
+ wx->driver_name = KBUILD_MODNAME;
+ wx_set_ethtool_ops_vf(netdev);
netdev->netdev_ops = &txgbevf_netdev_ops;
/* setup the private structure */
@@ -255,6 +259,7 @@ static int txgbevf_probe(struct pci_dev *pdev,
if (err)
goto err_free_sw_init;
+ wx_get_fw_version_vf(wx);
err = register_netdev(netdev);
if (err)
goto err_register;
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index b77f096eaf99..c5424d882135 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -1142,7 +1142,7 @@ int w5100_probe(struct device *dev, const struct w5100_ops *ops,
if (err < 0)
goto err_register;
- priv->xfer_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
+ priv->xfer_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM | WQ_PERCPU, 0,
netdev_name(ndev));
if (!priv->xfer_wq) {
err = -ENOMEM;
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 6011d7eae0c7..284031fb2e2c 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -238,6 +238,8 @@ static u64 axienet_dma_rate(struct axienet_local *lp)
*
* Calculate a control register value based on the coalescing settings. The
* run/stop bit is not set.
+ *
+ * Return: Control register value with coalescing settings configured.
*/
static u32 axienet_calc_cr(struct axienet_local *lp, u32 count, u32 usec)
{
@@ -702,7 +704,8 @@ static void axienet_dma_stop(struct axienet_local *lp)
* are connected to Axi Ethernet reset lines, this in turn resets the Axi
* Ethernet core. No separate hardware reset is done for the Axi Ethernet
* core.
- * Returns 0 on success or a negative error number otherwise.
+ *
+ * Return: 0 on success or a negative error number otherwise.
*/
static int axienet_device_reset(struct net_device *ndev)
{
@@ -773,7 +776,8 @@ static int axienet_device_reset(struct net_device *ndev)
*
* Would either be called after a successful transmit operation, or after
* there was an error when setting up the chain.
- * Returns the number of packets handled.
+ *
+ * Return: The number of packets handled.
*/
static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd,
int nr_bds, bool force, u32 *sizep, int budget)
@@ -1160,6 +1164,7 @@ static void axienet_dma_rx_cb(void *data, const struct dmaengine_result *result)
struct axienet_local *lp = data;
struct sk_buff *skb;
u32 *app_metadata;
+ int i;
skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_tail++);
skb = skbuf_dma->skb;
@@ -1167,6 +1172,15 @@ static void axienet_dma_rx_cb(void *data, const struct dmaengine_result *result)
&meta_max_len);
dma_unmap_single(lp->dev, skbuf_dma->dma_address, lp->max_frm_size,
DMA_FROM_DEVICE);
+
+ if (IS_ERR(app_metadata)) {
+ if (net_ratelimit())
+ netdev_err(lp->ndev, "Failed to get RX metadata pointer\n");
+ dev_kfree_skb_any(skb);
+ lp->ndev->stats.rx_dropped++;
+ goto rx_submit;
+ }
+
/* TODO: Derive app word index programmatically */
rx_len = (app_metadata[LEN_APP] & 0xFFFF);
skb_put(skb, rx_len);
@@ -1178,7 +1192,11 @@ static void axienet_dma_rx_cb(void *data, const struct dmaengine_result *result)
u64_stats_add(&lp->rx_packets, 1);
u64_stats_add(&lp->rx_bytes, rx_len);
u64_stats_update_end(&lp->rx_stat_sync);
- axienet_rx_submit_desc(lp->ndev);
+
+rx_submit:
+ for (i = 0; i < CIRC_SPACE(lp->rx_ring_head, lp->rx_ring_tail,
+ RX_BUF_NUM_DEFAULT); i++)
+ axienet_rx_submit_desc(lp->ndev);
dma_async_issue_pending(lp->rx_chan);
}
@@ -1457,7 +1475,6 @@ static void axienet_rx_submit_desc(struct net_device *ndev)
if (!skbuf_dma)
return;
- lp->rx_ring_head++;
skb = netdev_alloc_skb(ndev, lp->max_frm_size);
if (!skb)
return;
@@ -1482,6 +1499,7 @@ static void axienet_rx_submit_desc(struct net_device *ndev)
skbuf_dma->desc = dma_rx_desc;
dma_rx_desc->callback_param = lp;
dma_rx_desc->callback_result = axienet_dma_rx_cb;
+ lp->rx_ring_head++;
dmaengine_submit(dma_rx_desc);
return;
@@ -2098,6 +2116,8 @@ static void axienet_update_coalesce_rx(struct axienet_local *lp, u32 cr,
/**
* axienet_dim_coalesce_count_rx() - RX coalesce count for DIM
* @lp: Device private data
+ *
+ * Return: RX coalescing frame count value for DIM.
*/
static u32 axienet_dim_coalesce_count_rx(struct axienet_local *lp)
{
diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c
index a31d5d5e6593..97e88886253f 100644
--- a/drivers/net/ethernet/xircom/xirc2ps_cs.c
+++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c
@@ -1576,7 +1576,7 @@ do_reset(struct net_device *dev, int full)
msleep(40); /* wait 40 msec to let it complete */
}
if (full_duplex)
- PutByte(XIRCREG1_ECR, GetByte(XIRCREG1_ECR | FullDuplex));
+ PutByte(XIRCREG1_ECR, GetByte(XIRCREG1_ECR) | FullDuplex);
} else { /* No MII */
SelectPage(0);
value = GetByte(XIRCREG_ESR); /* read the ESR */
diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c
index 4a4ed2ccf72f..b63965d9a1ba 100644
--- a/drivers/net/fjes/fjes_main.c
+++ b/drivers/net/fjes/fjes_main.c
@@ -1364,14 +1364,15 @@ static int fjes_probe(struct platform_device *plat_dev)
adapter->force_reset = false;
adapter->open_guard = false;
- adapter->txrx_wq = alloc_workqueue(DRV_NAME "/txrx", WQ_MEM_RECLAIM, 0);
+ adapter->txrx_wq = alloc_workqueue(DRV_NAME "/txrx",
+ WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (unlikely(!adapter->txrx_wq)) {
err = -ENOMEM;
goto err_free_netdev;
}
adapter->control_wq = alloc_workqueue(DRV_NAME "/control",
- WQ_MEM_RECLAIM, 0);
+ WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (unlikely(!adapter->control_wq)) {
err = -ENOMEM;
goto err_free_txrx_wq;
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 54384f9b3872..77b0c3d52041 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -53,7 +53,6 @@ struct geneve_dev_node {
};
struct geneve_config {
- struct ip_tunnel_info info;
bool collect_md;
bool use_udp6_rx_checksums;
bool ttl_inherit;
@@ -61,6 +60,9 @@ struct geneve_config {
bool inner_proto_inherit;
u16 port_min;
u16 port_max;
+
+ /* Must be last --ends in a flexible-array member. */
+ struct ip_tunnel_info info;
};
/* Pseudo network device */
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 4b668ebaa0f7..5cb59d72bc82 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -21,9 +21,10 @@
#include <linux/file.h>
#include <linux/gtp.h>
+#include <net/flow.h>
+#include <net/inet_dscp.h>
#include <net/net_namespace.h>
#include <net/protocol.h>
-#include <net/inet_dscp.h>
#include <net/inet_sock.h>
#include <net/ip.h>
#include <net/ipv6.h>
@@ -352,7 +353,7 @@ static struct rtable *ip4_route_output_gtp(struct flowi4 *fl4,
fl4->flowi4_oif = sk->sk_bound_dev_if;
fl4->daddr = daddr;
fl4->saddr = saddr;
- fl4->flowi4_tos = inet_dscp_to_dsfield(inet_sk_dscp(inet_sk(sk)));
+ fl4->flowi4_dscp = inet_sk_dscp(inet_sk(sk));
fl4->flowi4_scope = ip_sock_rt_scope(sk);
fl4->flowi4_proto = sk->sk_protocol;
@@ -2401,7 +2402,7 @@ static int gtp_genl_send_echo_req(struct sk_buff *skb, struct genl_info *info)
udp_tunnel_xmit_skb(rt, sk, skb_to_send,
fl4.saddr, fl4.daddr,
- fl4.flowi4_tos,
+ inet_dscp_to_dsfield(fl4.flowi4_dscp),
ip4_dst_hoplimit(&rt->dst),
0,
port, port,
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index c5e5423e1863..885992951e8a 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -115,8 +115,6 @@ struct sixpack {
struct timer_list tx_t;
struct timer_list resync_t;
- refcount_t refcnt;
- struct completion dead;
spinlock_t lock;
};
@@ -354,41 +352,12 @@ out_mem:
/* ----------------------------------------------------------------------- */
/*
- * We have a potential race on dereferencing tty->disc_data, because the tty
- * layer provides no locking at all - thus one cpu could be running
- * sixpack_receive_buf while another calls sixpack_close, which zeroes
- * tty->disc_data and frees the memory that sixpack_receive_buf is using. The
- * best way to fix this is to use a rwlock in the tty struct, but for now we
- * use a single global rwlock for all ttys in ppp line discipline.
- */
-static DEFINE_RWLOCK(disc_data_lock);
-
-static struct sixpack *sp_get(struct tty_struct *tty)
-{
- struct sixpack *sp;
-
- read_lock(&disc_data_lock);
- sp = tty->disc_data;
- if (sp)
- refcount_inc(&sp->refcnt);
- read_unlock(&disc_data_lock);
-
- return sp;
-}
-
-static void sp_put(struct sixpack *sp)
-{
- if (refcount_dec_and_test(&sp->refcnt))
- complete(&sp->dead);
-}
-
-/*
* Called by the TTY driver when there's room for more data. If we have
* more packets to send, we send them here.
*/
static void sixpack_write_wakeup(struct tty_struct *tty)
{
- struct sixpack *sp = sp_get(tty);
+ struct sixpack *sp = tty->disc_data;
int actual;
if (!sp)
@@ -400,7 +369,7 @@ static void sixpack_write_wakeup(struct tty_struct *tty)
clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
sp->tx_enable = 0;
netif_wake_queue(sp->dev);
- goto out;
+ return;
}
if (sp->tx_enable) {
@@ -408,9 +377,6 @@ static void sixpack_write_wakeup(struct tty_struct *tty)
sp->xleft -= actual;
sp->xhead += actual;
}
-
-out:
- sp_put(sp);
}
/* ----------------------------------------------------------------------- */
@@ -430,7 +396,7 @@ static void sixpack_receive_buf(struct tty_struct *tty, const u8 *cp,
if (!count)
return;
- sp = sp_get(tty);
+ sp = tty->disc_data;
if (!sp)
return;
@@ -446,7 +412,6 @@ static void sixpack_receive_buf(struct tty_struct *tty, const u8 *cp,
}
sixpack_decode(sp, cp, count1);
- sp_put(sp);
tty_unthrottle(tty);
}
@@ -561,8 +526,6 @@ static int sixpack_open(struct tty_struct *tty)
spin_lock_init(&sp->lock);
spin_lock_init(&sp->rxlock);
- refcount_set(&sp->refcnt, 1);
- init_completion(&sp->dead);
/* !!! length of the buffers. MTU is IP MTU, not PACLEN! */
@@ -638,19 +601,11 @@ static void sixpack_close(struct tty_struct *tty)
{
struct sixpack *sp;
- write_lock_irq(&disc_data_lock);
sp = tty->disc_data;
- tty->disc_data = NULL;
- write_unlock_irq(&disc_data_lock);
if (!sp)
return;
- /*
- * We have now ensured that nobody can start using ap from now on, but
- * we have to wait for all existing users to finish.
- */
- if (!refcount_dec_and_test(&sp->refcnt))
- wait_for_completion(&sp->dead);
+ tty->disc_data = NULL;
/* We must stop the queue to avoid potentially scribbling
* on the free buffers. The sp->dead completion is not sufficient
@@ -673,7 +628,7 @@ static void sixpack_close(struct tty_struct *tty)
static int sixpack_ioctl(struct tty_struct *tty, unsigned int cmd,
unsigned long arg)
{
- struct sixpack *sp = sp_get(tty);
+ struct sixpack *sp = tty->disc_data;
struct net_device *dev;
unsigned int tmp, err;
@@ -725,8 +680,6 @@ static int sixpack_ioctl(struct tty_struct *tty, unsigned int cmd,
err = tty_mode_ioctl(tty, cmd, arg);
}
- sp_put(sp);
-
return err;
}
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index 0e0fe32d2da4..045c5177262e 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -138,7 +138,7 @@ static inline struct net_device *bpq_get_ax25_dev(struct net_device *dev)
static inline int dev_is_ethdev(struct net_device *dev)
{
- return dev->type == ARPHRD_ETHER && strncmp(dev->name, "dummy", 5);
+ return dev->type == ARPHRD_ETHER && !netdev_need_ops_lock(dev);
}
/* ------------------------------------------------------------------------ */
diff --git a/drivers/net/hyperv/Kconfig b/drivers/net/hyperv/Kconfig
index c8cbd85adcf9..982964c1a9fb 100644
--- a/drivers/net/hyperv/Kconfig
+++ b/drivers/net/hyperv/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config HYPERV_NET
tristate "Microsoft Hyper-V virtual network driver"
- depends on HYPERV
+ depends on HYPERV_VMBUS
select UCS2_STRING
select NLS
help
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index cb6f5482d203..7397c693f984 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -1061,6 +1061,7 @@ struct net_device_context {
struct net_device __rcu *vf_netdev;
struct netvsc_vf_pcpu_stats __percpu *vf_stats;
struct delayed_work vf_takeover;
+ struct delayed_work vfns_work;
/* 1: allocated, serial number is valid. 0: not allocated */
u32 vf_alloc;
@@ -1075,6 +1076,8 @@ struct net_device_context {
struct netvsc_device_info *saved_netvsc_dev_info;
};
+void netvsc_vfns_work(struct work_struct *w);
+
/* Azure hosts don't support non-TCP port numbers in hashing for fragmented
* packets. We can use ethtool to change UDP hash level when necessary.
*/
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 720104661d7f..60a4629fe6ba 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -1812,6 +1812,11 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
/* Enable NAPI handler before init callbacks */
netif_napi_add(ndev, &net_device->chan_table[0].napi, netvsc_poll);
+ napi_enable(&net_device->chan_table[0].napi);
+ netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_RX,
+ &net_device->chan_table[0].napi);
+ netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_TX,
+ &net_device->chan_table[0].napi);
/* Open the channel */
device->channel->next_request_id_callback = vmbus_next_request_id;
@@ -1831,12 +1836,6 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
/* Channel is opened */
netdev_dbg(ndev, "hv_netvsc channel opened successfully\n");
- napi_enable(&net_device->chan_table[0].napi);
- netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_RX,
- &net_device->chan_table[0].napi);
- netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_TX,
- &net_device->chan_table[0].napi);
-
/* Connect with the NetVsp */
ret = netvsc_connect_vsp(device, net_device, device_info);
if (ret != 0) {
@@ -1854,14 +1853,14 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
close:
RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
- netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_TX, NULL);
- netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_RX, NULL);
- napi_disable(&net_device->chan_table[0].napi);
/* Now, we can close the channel safely */
vmbus_close(device->channel);
cleanup:
+ netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_TX, NULL);
+ netif_queue_set_napi(ndev, 0, NETDEV_QUEUE_TYPE_RX, NULL);
+ napi_disable(&net_device->chan_table[0].napi);
netif_napi_del(&net_device->chan_table[0].napi);
cleanup2:
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index f44753756358..39c892e46cb0 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -2522,6 +2522,7 @@ static int netvsc_probe(struct hv_device *dev,
spin_lock_init(&net_device_ctx->lock);
INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
INIT_DELAYED_WORK(&net_device_ctx->vf_takeover, netvsc_vf_setup);
+ INIT_DELAYED_WORK(&net_device_ctx->vfns_work, netvsc_vfns_work);
net_device_ctx->vf_stats
= netdev_alloc_pcpu_stats(struct netvsc_vf_pcpu_stats);
@@ -2666,6 +2667,8 @@ static void netvsc_remove(struct hv_device *dev)
cancel_delayed_work_sync(&ndev_ctx->dwork);
rtnl_lock();
+ cancel_delayed_work_sync(&ndev_ctx->vfns_work);
+
nvdev = rtnl_dereference(ndev_ctx->nvdev);
if (nvdev) {
cancel_work_sync(&nvdev->subchan_work);
@@ -2707,6 +2710,7 @@ static int netvsc_suspend(struct hv_device *dev)
cancel_delayed_work_sync(&ndev_ctx->dwork);
rtnl_lock();
+ cancel_delayed_work_sync(&ndev_ctx->vfns_work);
nvdev = rtnl_dereference(ndev_ctx->nvdev);
if (nvdev == NULL) {
@@ -2800,6 +2804,27 @@ static void netvsc_event_set_vf_ns(struct net_device *ndev)
}
}
+void netvsc_vfns_work(struct work_struct *w)
+{
+ struct net_device_context *ndev_ctx =
+ container_of(w, struct net_device_context, vfns_work.work);
+ struct net_device *ndev;
+
+ if (!rtnl_trylock()) {
+ schedule_delayed_work(&ndev_ctx->vfns_work, 1);
+ return;
+ }
+
+ ndev = hv_get_drvdata(ndev_ctx->device_ctx);
+ if (!ndev)
+ goto out;
+
+ netvsc_event_set_vf_ns(ndev);
+
+out:
+ rtnl_unlock();
+}
+
/*
* On Hyper-V, every VF interface is matched with a corresponding
* synthetic interface. The synthetic interface is presented first
@@ -2810,10 +2835,12 @@ static int netvsc_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
+ struct net_device_context *ndev_ctx;
int ret = 0;
if (event_dev->netdev_ops == &device_ops && event == NETDEV_REGISTER) {
- netvsc_event_set_vf_ns(event_dev);
+ ndev_ctx = netdev_priv(event_dev);
+ schedule_delayed_work(&ndev_ctx->vfns_work, 0);
return NOTIFY_DONE;
}
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 9e73959e61ee..c35f9685b6bf 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -1252,17 +1252,26 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
new_sc->rqstor_size = netvsc_rqstor_size(netvsc_ring_bytes);
new_sc->max_pkt_size = NETVSC_MAX_PKT_SIZE;
+ /* Enable napi before opening the vmbus channel to avoid races
+ * as the host placing data on the host->guest ring may be left
+ * out if napi was not enabled.
+ */
+ napi_enable(&nvchan->napi);
+ netif_queue_set_napi(ndev, chn_index, NETDEV_QUEUE_TYPE_RX,
+ &nvchan->napi);
+ netif_queue_set_napi(ndev, chn_index, NETDEV_QUEUE_TYPE_TX,
+ &nvchan->napi);
+
ret = vmbus_open(new_sc, netvsc_ring_bytes,
netvsc_ring_bytes, NULL, 0,
netvsc_channel_cb, nvchan);
- if (ret == 0) {
- napi_enable(&nvchan->napi);
- netif_queue_set_napi(ndev, chn_index, NETDEV_QUEUE_TYPE_RX,
- &nvchan->napi);
- netif_queue_set_napi(ndev, chn_index, NETDEV_QUEUE_TYPE_TX,
- &nvchan->napi);
- } else {
+ if (ret != 0) {
netdev_notice(ndev, "sub channel open failed: %d\n", ret);
+ netif_queue_set_napi(ndev, chn_index, NETDEV_QUEUE_TYPE_TX,
+ NULL);
+ netif_queue_set_napi(ndev, chn_index, NETDEV_QUEUE_TYPE_RX,
+ NULL);
+ napi_disable(&nvchan->napi);
}
if (atomic_inc_return(&nvscdev->open_chn) == nvscdev->num_chn)
diff --git a/drivers/net/ipa/Kconfig b/drivers/net/ipa/Kconfig
index 6782c2cbf542..01d219d3760c 100644
--- a/drivers/net/ipa/Kconfig
+++ b/drivers/net/ipa/Kconfig
@@ -5,7 +5,7 @@ config QCOM_IPA
depends on INTERCONNECT
depends on QCOM_RPROC_COMMON || (QCOM_RPROC_COMMON=n && COMPILE_TEST)
depends on QCOM_AOSS_QMP || QCOM_AOSS_QMP=n
- select QCOM_MDT_LOADER if ARCH_QCOM
+ select QCOM_MDT_LOADER
select QCOM_SCM
select QCOM_QMI_HELPERS
help
diff --git a/drivers/net/ipa/ipa_sysfs.c b/drivers/net/ipa/ipa_sysfs.c
index a59bd215494c..a53e9e6f6cdf 100644
--- a/drivers/net/ipa/ipa_sysfs.c
+++ b/drivers/net/ipa/ipa_sysfs.c
@@ -37,8 +37,12 @@ static const char *ipa_version_string(struct ipa *ipa)
return "4.11";
case IPA_VERSION_5_0:
return "5.0";
+ case IPA_VERSION_5_1:
+ return "5.1";
+ case IPA_VERSION_5_5:
+ return "5.5";
default:
- return "0.0"; /* Won't happen (checked at probe time) */
+ return "0.0"; /* Should not happen */
}
}
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index e3e65772c599..d7e3ddbcab6f 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -2,7 +2,7 @@
/* Copyright (c) 2014 Mahesh Bandewar <maheshb@google.com>
*/
-#include <net/inet_dscp.h>
+#include <net/flow.h>
#include <net/ip.h>
#include "ipvlan.h"
@@ -433,7 +433,7 @@ static noinline_for_stack int ipvlan_process_v4_outbound(struct sk_buff *skb)
ip4h = ip_hdr(skb);
fl4.daddr = ip4h->daddr;
fl4.saddr = ip4h->saddr;
- fl4.flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(ip4h));
+ fl4.flowi4_dscp = ip4h_dscp(ip4h);
rt = ip_route_output_flow(net, &fl4, NULL);
if (IS_ERR(rt))
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 4c75d1fea552..5200fd5a10e5 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -1583,9 +1583,6 @@ static struct macsec_tx_sa *get_txsa_from_nl(struct net *net,
if (IS_ERR(dev))
return ERR_CAST(dev);
- if (*assoc_num >= MACSEC_NUM_AN)
- return ERR_PTR(-EINVAL);
-
secy = &macsec_priv(dev)->secy;
tx_sc = &secy->tx_sc;
@@ -1646,8 +1643,6 @@ static struct macsec_rx_sa *get_rxsa_from_nl(struct net *net,
return ERR_PTR(-EINVAL);
*assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
- if (*assoc_num >= MACSEC_NUM_AN)
- return ERR_PTR(-EINVAL);
rx_sc = get_rxsc_from_nl(net, attrs, tb_rxsc, devp, secyp);
if (IS_ERR(rx_sc))
@@ -1670,24 +1665,21 @@ static const struct nla_policy macsec_genl_policy[NUM_MACSEC_ATTR] = {
static const struct nla_policy macsec_genl_rxsc_policy[NUM_MACSEC_RXSC_ATTR] = {
[MACSEC_RXSC_ATTR_SCI] = { .type = NLA_U64 },
- [MACSEC_RXSC_ATTR_ACTIVE] = { .type = NLA_U8 },
+ [MACSEC_RXSC_ATTR_ACTIVE] = NLA_POLICY_MAX(NLA_U8, 1),
};
static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = {
- [MACSEC_SA_ATTR_AN] = { .type = NLA_U8 },
- [MACSEC_SA_ATTR_ACTIVE] = { .type = NLA_U8 },
- [MACSEC_SA_ATTR_PN] = NLA_POLICY_MIN_LEN(4),
- [MACSEC_SA_ATTR_KEYID] = { .type = NLA_BINARY,
- .len = MACSEC_KEYID_LEN, },
- [MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY,
- .len = MACSEC_MAX_KEY_LEN, },
+ [MACSEC_SA_ATTR_AN] = NLA_POLICY_MAX(NLA_U8, MACSEC_NUM_AN - 1),
+ [MACSEC_SA_ATTR_ACTIVE] = NLA_POLICY_MAX(NLA_U8, 1),
+ [MACSEC_SA_ATTR_PN] = NLA_POLICY_MIN(NLA_UINT, 1),
+ [MACSEC_SA_ATTR_KEYID] = NLA_POLICY_EXACT_LEN(MACSEC_KEYID_LEN),
+ [MACSEC_SA_ATTR_KEY] = NLA_POLICY_MAX_LEN(MACSEC_MAX_KEY_LEN),
[MACSEC_SA_ATTR_SSCI] = { .type = NLA_U32 },
- [MACSEC_SA_ATTR_SALT] = { .type = NLA_BINARY,
- .len = MACSEC_SALT_LEN, },
+ [MACSEC_SA_ATTR_SALT] = NLA_POLICY_EXACT_LEN(MACSEC_SALT_LEN),
};
static const struct nla_policy macsec_genl_offload_policy[NUM_MACSEC_OFFLOAD_ATTR] = {
- [MACSEC_OFFLOAD_ATTR_TYPE] = { .type = NLA_U8 },
+ [MACSEC_OFFLOAD_ATTR_TYPE] = NLA_POLICY_MAX(NLA_U8, MACSEC_OFFLOAD_MAX),
};
/* Offloads an operation to a device driver */
@@ -1739,21 +1731,6 @@ static bool validate_add_rxsa(struct nlattr **attrs)
!attrs[MACSEC_SA_ATTR_KEYID])
return false;
- if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
- return false;
-
- if (attrs[MACSEC_SA_ATTR_PN] &&
- nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
- return false;
-
- if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
- if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
- return false;
- }
-
- if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN)
- return false;
-
return true;
}
@@ -1812,14 +1789,6 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
rtnl_unlock();
return -EINVAL;
}
-
- if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) {
- pr_notice("macsec: nl: add_rxsa: bad salt length: %d != %d\n",
- nla_len(tb_sa[MACSEC_SA_ATTR_SALT]),
- MACSEC_SALT_LEN);
- rtnl_unlock();
- return -EINVAL;
- }
}
rx_sa = rtnl_dereference(rx_sc->sa[assoc_num]);
@@ -1844,7 +1813,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
if (tb_sa[MACSEC_SA_ATTR_PN]) {
spin_lock_bh(&rx_sa->lock);
- rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
+ rx_sa->next_pn = nla_get_uint(tb_sa[MACSEC_SA_ATTR_PN]);
spin_unlock_bh(&rx_sa->lock);
}
@@ -1895,19 +1864,6 @@ cleanup:
return err;
}
-static bool validate_add_rxsc(struct nlattr **attrs)
-{
- if (!attrs[MACSEC_RXSC_ATTR_SCI])
- return false;
-
- if (attrs[MACSEC_RXSC_ATTR_ACTIVE]) {
- if (nla_get_u8(attrs[MACSEC_RXSC_ATTR_ACTIVE]) > 1)
- return false;
- }
-
- return true;
-}
-
static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
{
struct net_device *dev;
@@ -1925,7 +1881,7 @@ static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
if (parse_rxsc_config(attrs, tb_rxsc))
return -EINVAL;
- if (!validate_add_rxsc(tb_rxsc))
+ if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI])
return -EINVAL;
rtnl_lock();
@@ -1984,20 +1940,6 @@ static bool validate_add_txsa(struct nlattr **attrs)
!attrs[MACSEC_SA_ATTR_KEYID])
return false;
- if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
- return false;
-
- if (nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
- return false;
-
- if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
- if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
- return false;
- }
-
- if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN)
- return false;
-
return true;
}
@@ -2055,14 +1997,6 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
rtnl_unlock();
return -EINVAL;
}
-
- if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) {
- pr_notice("macsec: nl: add_txsa: bad salt length: %d != %d\n",
- nla_len(tb_sa[MACSEC_SA_ATTR_SALT]),
- MACSEC_SALT_LEN);
- rtnl_unlock();
- return -EINVAL;
- }
}
tx_sa = rtnl_dereference(tx_sc->sa[assoc_num]);
@@ -2086,7 +2020,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
}
spin_lock_bh(&tx_sa->lock);
- tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
+ tx_sa->next_pn = nla_get_uint(tb_sa[MACSEC_SA_ATTR_PN]);
spin_unlock_bh(&tx_sa->lock);
if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
@@ -2339,17 +2273,6 @@ static bool validate_upd_sa(struct nlattr **attrs)
attrs[MACSEC_SA_ATTR_SALT])
return false;
- if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
- return false;
-
- if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
- return false;
-
- if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
- if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
- return false;
- }
-
return true;
}
@@ -2398,7 +2321,7 @@ static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
spin_lock_bh(&tx_sa->lock);
prev_pn = tx_sa->next_pn_halves;
- tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
+ tx_sa->next_pn = nla_get_uint(tb_sa[MACSEC_SA_ATTR_PN]);
spin_unlock_bh(&tx_sa->lock);
}
@@ -2496,7 +2419,7 @@ static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
spin_lock_bh(&rx_sa->lock);
prev_pn = rx_sa->next_pn_halves;
- rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
+ rx_sa->next_pn = nla_get_uint(tb_sa[MACSEC_SA_ATTR_PN]);
spin_unlock_bh(&rx_sa->lock);
}
@@ -2556,7 +2479,7 @@ static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info)
if (parse_rxsc_config(attrs, tb_rxsc))
return -EINVAL;
- if (!validate_add_rxsc(tb_rxsc))
+ if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI])
return -EINVAL;
rtnl_lock();
@@ -3834,21 +3757,23 @@ static const struct device_type macsec_type = {
.name = "macsec",
};
+static int validate_cipher_suite(const struct nlattr *attr,
+ struct netlink_ext_ack *extack);
static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = {
[IFLA_MACSEC_SCI] = { .type = NLA_U64 },
[IFLA_MACSEC_PORT] = { .type = NLA_U16 },
- [IFLA_MACSEC_ICV_LEN] = { .type = NLA_U8 },
- [IFLA_MACSEC_CIPHER_SUITE] = { .type = NLA_U64 },
+ [IFLA_MACSEC_ICV_LEN] = NLA_POLICY_RANGE(NLA_U8, MACSEC_MIN_ICV_LEN, MACSEC_STD_ICV_LEN),
+ [IFLA_MACSEC_CIPHER_SUITE] = NLA_POLICY_VALIDATE_FN(NLA_U64, validate_cipher_suite),
[IFLA_MACSEC_WINDOW] = { .type = NLA_U32 },
- [IFLA_MACSEC_ENCODING_SA] = { .type = NLA_U8 },
- [IFLA_MACSEC_ENCRYPT] = { .type = NLA_U8 },
- [IFLA_MACSEC_PROTECT] = { .type = NLA_U8 },
- [IFLA_MACSEC_INC_SCI] = { .type = NLA_U8 },
- [IFLA_MACSEC_ES] = { .type = NLA_U8 },
- [IFLA_MACSEC_SCB] = { .type = NLA_U8 },
- [IFLA_MACSEC_REPLAY_PROTECT] = { .type = NLA_U8 },
- [IFLA_MACSEC_VALIDATION] = { .type = NLA_U8 },
- [IFLA_MACSEC_OFFLOAD] = { .type = NLA_U8 },
+ [IFLA_MACSEC_ENCODING_SA] = NLA_POLICY_MAX(NLA_U8, MACSEC_NUM_AN - 1),
+ [IFLA_MACSEC_ENCRYPT] = NLA_POLICY_MAX(NLA_U8, 1),
+ [IFLA_MACSEC_PROTECT] = NLA_POLICY_MAX(NLA_U8, 1),
+ [IFLA_MACSEC_INC_SCI] = NLA_POLICY_MAX(NLA_U8, 1),
+ [IFLA_MACSEC_ES] = NLA_POLICY_MAX(NLA_U8, 1),
+ [IFLA_MACSEC_SCB] = NLA_POLICY_MAX(NLA_U8, 1),
+ [IFLA_MACSEC_REPLAY_PROTECT] = NLA_POLICY_MAX(NLA_U8, 1),
+ [IFLA_MACSEC_VALIDATION] = NLA_POLICY_MAX(NLA_U8, MACSEC_VALIDATE_MAX),
+ [IFLA_MACSEC_OFFLOAD] = NLA_POLICY_MAX(NLA_U8, MACSEC_OFFLOAD_MAX),
};
static void macsec_free_netdev(struct net_device *dev)
@@ -4286,6 +4211,7 @@ static int macsec_newlink(struct net_device *dev,
if (err < 0)
goto del_dev;
+ netdev_update_features(dev);
netif_stacked_transfer_operstate(real_dev, dev);
linkwatch_fire_event(dev);
@@ -4302,20 +4228,30 @@ unregister:
return err;
}
+static int validate_cipher_suite(const struct nlattr *attr,
+ struct netlink_ext_ack *extack)
+{
+ switch (nla_get_u64(attr)) {
+ case MACSEC_CIPHER_ID_GCM_AES_128:
+ case MACSEC_CIPHER_ID_GCM_AES_256:
+ case MACSEC_CIPHER_ID_GCM_AES_XPN_128:
+ case MACSEC_CIPHER_ID_GCM_AES_XPN_256:
+ case MACSEC_DEFAULT_CIPHER_ID:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
- u64 csid = MACSEC_DEFAULT_CIPHER_ID;
u8 icv_len = MACSEC_DEFAULT_ICV_LEN;
- int flag;
bool es, scb, sci;
if (!data)
return 0;
- if (data[IFLA_MACSEC_CIPHER_SUITE])
- csid = nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE]);
-
if (data[IFLA_MACSEC_ICV_LEN]) {
icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
if (icv_len != MACSEC_DEFAULT_ICV_LEN) {
@@ -4331,34 +4267,6 @@ static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[],
}
}
- switch (csid) {
- case MACSEC_CIPHER_ID_GCM_AES_128:
- case MACSEC_CIPHER_ID_GCM_AES_256:
- case MACSEC_CIPHER_ID_GCM_AES_XPN_128:
- case MACSEC_CIPHER_ID_GCM_AES_XPN_256:
- case MACSEC_DEFAULT_CIPHER_ID:
- if (icv_len < MACSEC_MIN_ICV_LEN ||
- icv_len > MACSEC_STD_ICV_LEN)
- return -EINVAL;
- break;
- default:
- return -EINVAL;
- }
-
- if (data[IFLA_MACSEC_ENCODING_SA]) {
- if (nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]) >= MACSEC_NUM_AN)
- return -EINVAL;
- }
-
- for (flag = IFLA_MACSEC_ENCODING_SA + 1;
- flag < IFLA_MACSEC_VALIDATION;
- flag++) {
- if (data[flag]) {
- if (nla_get_u8(data[flag]) > 1)
- return -EINVAL;
- }
- }
-
es = nla_get_u8_default(data[IFLA_MACSEC_ES], false);
sci = nla_get_u8_default(data[IFLA_MACSEC_INC_SCI], false);
scb = nla_get_u8_default(data[IFLA_MACSEC_SCB], false);
@@ -4366,10 +4274,6 @@ static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[],
if ((sci && (scb || es)) || (scb && es))
return -EINVAL;
- if (data[IFLA_MACSEC_VALIDATION] &&
- nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX)
- return -EINVAL;
-
if ((data[IFLA_MACSEC_REPLAY_PROTECT] &&
nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT])) &&
!data[IFLA_MACSEC_WINDOW])
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 4df991e494bd..7966545512cf 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -369,7 +369,7 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port,
}
spin_unlock(&port->bc_queue.lock);
- queue_work(system_unbound_wq, &port->bc_work);
+ queue_work(system_dfl_wq, &port->bc_work);
if (err)
goto free_nskb;
diff --git a/drivers/net/mctp/mctp-usb.c b/drivers/net/mctp/mctp-usb.c
index 775a386d0aca..36ccc53b1797 100644
--- a/drivers/net/mctp/mctp-usb.c
+++ b/drivers/net/mctp/mctp-usb.c
@@ -183,6 +183,7 @@ static void mctp_usb_in_complete(struct urb *urb)
struct mctp_usb_hdr *hdr;
u8 pkt_len; /* length of MCTP packet, no USB header */
+ skb_reset_mac_header(skb);
hdr = skb_pull_data(skb, sizeof(*hdr));
if (!hdr)
break;
diff --git a/drivers/net/mdio/Kconfig b/drivers/net/mdio/Kconfig
index e1e32b687068..44380378911b 100644
--- a/drivers/net/mdio/Kconfig
+++ b/drivers/net/mdio/Kconfig
@@ -3,11 +3,6 @@
# MDIO Layer Configuration
#
-config MDIO_BUS
- tristate "MDIO bus consumer layer"
- help
- MDIO bus consumer layer
-
if PHYLIB
config FWNODE_MDIO
diff --git a/drivers/net/mdio/mdio-bcm-unimac.c b/drivers/net/mdio/mdio-bcm-unimac.c
index b6e30bdf5325..37e35f282d9a 100644
--- a/drivers/net/mdio/mdio-bcm-unimac.c
+++ b/drivers/net/mdio/mdio-bcm-unimac.c
@@ -209,14 +209,15 @@ static int unimac_mdio_clk_set(struct unimac_mdio_priv *priv)
if (ret)
return ret;
- if (!priv->clk)
+ rate = clk_get_rate(priv->clk);
+ if (!rate)
rate = 250000000;
- else
- rate = clk_get_rate(priv->clk);
div = (rate / (2 * priv->clk_freq)) - 1;
if (div & ~MDIO_CLK_DIV_MASK) {
- pr_warn("Incorrect MDIO clock frequency, ignoring\n");
+ dev_warn(priv->mii_bus->parent,
+ "Ignoring MDIO clock frequency request: %d vs. rate: %ld\n",
+ priv->clk_freq, rate);
ret = 0;
goto out;
}
diff --git a/drivers/net/mdio/mdio-i2c.c b/drivers/net/mdio/mdio-i2c.c
index 53e96bfab542..ed20352a589a 100644
--- a/drivers/net/mdio/mdio-i2c.c
+++ b/drivers/net/mdio/mdio-i2c.c
@@ -116,17 +116,23 @@ static int smbus_byte_mii_read_default_c22(struct mii_bus *bus, int phy_id,
if (!i2c_mii_valid_phy_id(phy_id))
return 0;
- ret = i2c_smbus_xfer(i2c, i2c_mii_phy_addr(phy_id), 0,
- I2C_SMBUS_READ, reg,
- I2C_SMBUS_BYTE_DATA, &smbus_data);
+ i2c_lock_bus(i2c, I2C_LOCK_SEGMENT);
+
+ ret = __i2c_smbus_xfer(i2c, i2c_mii_phy_addr(phy_id), 0,
+ I2C_SMBUS_READ, reg,
+ I2C_SMBUS_BYTE_DATA, &smbus_data);
if (ret < 0)
- return ret;
+ goto unlock;
val = (smbus_data.byte & 0xff) << 8;
- ret = i2c_smbus_xfer(i2c, i2c_mii_phy_addr(phy_id), 0,
- I2C_SMBUS_READ, reg,
- I2C_SMBUS_BYTE_DATA, &smbus_data);
+ ret = __i2c_smbus_xfer(i2c, i2c_mii_phy_addr(phy_id), 0,
+ I2C_SMBUS_READ, reg,
+ I2C_SMBUS_BYTE_DATA, &smbus_data);
+
+unlock:
+ i2c_unlock_bus(i2c, I2C_LOCK_SEGMENT);
+
if (ret < 0)
return ret;
@@ -147,17 +153,22 @@ static int smbus_byte_mii_write_default_c22(struct mii_bus *bus, int phy_id,
smbus_data.byte = (val & 0xff00) >> 8;
- ret = i2c_smbus_xfer(i2c, i2c_mii_phy_addr(phy_id), 0,
- I2C_SMBUS_WRITE, reg,
- I2C_SMBUS_BYTE_DATA, &smbus_data);
+ i2c_lock_bus(i2c, I2C_LOCK_SEGMENT);
+
+ ret = __i2c_smbus_xfer(i2c, i2c_mii_phy_addr(phy_id), 0,
+ I2C_SMBUS_WRITE, reg,
+ I2C_SMBUS_BYTE_DATA, &smbus_data);
if (ret < 0)
- return ret;
+ goto unlock;
smbus_data.byte = val & 0xff;
- ret = i2c_smbus_xfer(i2c, i2c_mii_phy_addr(phy_id), 0,
- I2C_SMBUS_WRITE, reg,
- I2C_SMBUS_BYTE_DATA, &smbus_data);
+ ret = __i2c_smbus_xfer(i2c, i2c_mii_phy_addr(phy_id), 0,
+ I2C_SMBUS_WRITE, reg,
+ I2C_SMBUS_BYTE_DATA, &smbus_data);
+
+unlock:
+ i2c_unlock_bus(i2c, I2C_LOCK_SEGMENT);
return ret < 0 ? ret : 0;
}
diff --git a/drivers/net/mdio/of_mdio.c b/drivers/net/mdio/of_mdio.c
index 98f667b121f7..1357348e01d5 100644
--- a/drivers/net/mdio/of_mdio.c
+++ b/drivers/net/mdio/of_mdio.c
@@ -447,6 +447,8 @@ int of_phy_register_fixed_link(struct device_node *np)
/* Old binding */
if (of_property_read_u32_array(np, "fixed-link", fixed_link_prop,
ARRAY_SIZE(fixed_link_prop)) == 0) {
+ pr_warn_once("%pOF uses deprecated array-style fixed-link binding!\n",
+ np);
status.link = 1;
status.duplex = fixed_link_prop[1];
status.speed = fixed_link_prop[2];
@@ -473,6 +475,5 @@ void of_phy_deregister_fixed_link(struct device_node *np)
fixed_phy_unregister(phydev);
put_device(&phydev->mdio.dev); /* of_phy_find_device() */
- phy_device_free(phydev); /* fixed_phy_register() */
}
EXPORT_SYMBOL(of_phy_deregister_fixed_link);
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index e3722de08ea9..194570443493 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -300,6 +300,33 @@ static void netconsole_print_banner(struct netpoll *np)
np_info(np, "remote ethernet address %pM\n", np->remote_mac);
}
+/* Parse the string and populate the `inet_addr` union. Return 0 if IPv4 is
+ * populated, 1 if IPv6 is populated, and -1 upon failure.
+ */
+static int netpoll_parse_ip_addr(const char *str, union inet_addr *addr)
+{
+ const char *end = NULL;
+ int len;
+
+ len = strlen(str);
+ if (!len)
+ return -1;
+
+ if (str[len - 1] == '\n')
+ len -= 1;
+
+ if (in4_pton(str, len, (void *)addr, -1, &end) > 0 &&
+ (!end || *end == 0 || *end == '\n'))
+ return 0;
+
+ if (IS_ENABLED(CONFIG_IPV6) &&
+ in6_pton(str, len, (void *)addr, -1, &end) > 0 &&
+ (!end || *end == 0 || *end == '\n'))
+ return 1;
+
+ return -1;
+}
+
#ifdef CONFIG_NETCONSOLE_DYNAMIC
/*
@@ -730,6 +757,7 @@ static ssize_t local_ip_store(struct config_item *item, const char *buf,
{
struct netconsole_target *nt = to_target(item);
ssize_t ret = -EINVAL;
+ int ipv6;
mutex_lock(&dynamic_netconsole_mutex);
if (nt->enabled) {
@@ -738,23 +766,10 @@ static ssize_t local_ip_store(struct config_item *item, const char *buf,
goto out_unlock;
}
- if (strnchr(buf, count, ':')) {
- const char *end;
-
- if (in6_pton(buf, count, nt->np.local_ip.in6.s6_addr, -1, &end) > 0) {
- if (*end && *end != '\n') {
- pr_err("invalid IPv6 address at: <%c>\n", *end);
- goto out_unlock;
- }
- nt->np.ipv6 = true;
- } else
- goto out_unlock;
- } else {
- if (!nt->np.ipv6)
- nt->np.local_ip.ip = in_aton(buf);
- else
- goto out_unlock;
- }
+ ipv6 = netpoll_parse_ip_addr(buf, &nt->np.local_ip);
+ if (ipv6 == -1)
+ goto out_unlock;
+ nt->np.ipv6 = !!ipv6;
ret = strnlen(buf, count);
out_unlock:
@@ -767,6 +782,7 @@ static ssize_t remote_ip_store(struct config_item *item, const char *buf,
{
struct netconsole_target *nt = to_target(item);
ssize_t ret = -EINVAL;
+ int ipv6;
mutex_lock(&dynamic_netconsole_mutex);
if (nt->enabled) {
@@ -775,23 +791,10 @@ static ssize_t remote_ip_store(struct config_item *item, const char *buf,
goto out_unlock;
}
- if (strnchr(buf, count, ':')) {
- const char *end;
-
- if (in6_pton(buf, count, nt->np.remote_ip.in6.s6_addr, -1, &end) > 0) {
- if (*end && *end != '\n') {
- pr_err("invalid IPv6 address at: <%c>\n", *end);
- goto out_unlock;
- }
- nt->np.ipv6 = true;
- } else
- goto out_unlock;
- } else {
- if (!nt->np.ipv6)
- nt->np.remote_ip.ip = in_aton(buf);
- else
- goto out_unlock;
- }
+ ipv6 = netpoll_parse_ip_addr(buf, &nt->np.remote_ip);
+ if (ipv6 == -1)
+ goto out_unlock;
+ nt->np.ipv6 = !!ipv6;
ret = strnlen(buf, count);
out_unlock:
@@ -1742,26 +1745,6 @@ static void write_msg(struct console *con, const char *msg, unsigned int len)
spin_unlock_irqrestore(&target_list_lock, flags);
}
-static int netpoll_parse_ip_addr(const char *str, union inet_addr *addr)
-{
- const char *end;
-
- if (!strchr(str, ':') &&
- in4_pton(str, -1, (void *)addr, -1, &end) > 0) {
- if (!*end)
- return 0;
- }
- if (in6_pton(str, -1, addr->in6.s6_addr, -1, &end) > 0) {
-#if IS_ENABLED(CONFIG_IPV6)
- if (!*end)
- return 1;
-#else
- return -1;
-#endif
- }
- return -1;
-}
-
static int netconsole_parser_cmdline(struct netpoll *np, char *opt)
{
bool ipversion_set = false;
diff --git a/drivers/net/netdevsim/Makefile b/drivers/net/netdevsim/Makefile
index f8de93bc5f5b..14a553e000ec 100644
--- a/drivers/net/netdevsim/Makefile
+++ b/drivers/net/netdevsim/Makefile
@@ -18,6 +18,10 @@ ifneq ($(CONFIG_PSAMPLE),)
netdevsim-objs += psample.o
endif
+ifneq ($(CONFIG_INET_PSP),)
+netdevsim-objs += psp.o
+endif
+
ifneq ($(CONFIG_MACSEC),)
netdevsim-objs += macsec.o
endif
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index 2672d071b325..95f66c1f59db 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -851,7 +851,7 @@ static void nsim_dev_trap_report_work(struct work_struct *work)
nsim_dev = nsim_trap_data->nsim_dev;
if (!devl_trylock(priv_to_devlink(nsim_dev))) {
- queue_delayed_work(system_unbound_wq,
+ queue_delayed_work(system_dfl_wq,
&nsim_dev->trap_data->trap_report_dw, 1);
return;
}
@@ -867,7 +867,7 @@ static void nsim_dev_trap_report_work(struct work_struct *work)
cond_resched();
}
devl_unlock(priv_to_devlink(nsim_dev));
- queue_delayed_work(system_unbound_wq,
+ queue_delayed_work(system_dfl_wq,
&nsim_dev->trap_data->trap_report_dw,
msecs_to_jiffies(NSIM_TRAP_REPORT_INTERVAL_MS));
}
@@ -924,7 +924,7 @@ static int nsim_dev_traps_init(struct devlink *devlink)
INIT_DELAYED_WORK(&nsim_dev->trap_data->trap_report_dw,
nsim_dev_trap_report_work);
- queue_delayed_work(system_unbound_wq,
+ queue_delayed_work(system_dfl_wq,
&nsim_dev->trap_data->trap_report_dw,
msecs_to_jiffies(NSIM_TRAP_REPORT_INTERVAL_MS));
diff --git a/drivers/net/netdevsim/ethtool.c b/drivers/net/netdevsim/ethtool.c
index f631d90c428a..36a201533aae 100644
--- a/drivers/net/netdevsim/ethtool.c
+++ b/drivers/net/netdevsim/ethtool.c
@@ -165,11 +165,34 @@ nsim_set_fecparam(struct net_device *dev, struct ethtool_fecparam *fecparam)
return 0;
}
+static const struct ethtool_fec_hist_range netdevsim_fec_ranges[] = {
+ { 0, 0},
+ { 1, 3},
+ { 4, 7},
+ { 0, 0}
+};
+
static void
-nsim_get_fec_stats(struct net_device *dev, struct ethtool_fec_stats *fec_stats)
+nsim_get_fec_stats(struct net_device *dev, struct ethtool_fec_stats *fec_stats,
+ struct ethtool_fec_hist *hist)
{
+ struct ethtool_fec_hist_value *values = hist->values;
+
+ hist->ranges = netdevsim_fec_ranges;
+
fec_stats->corrected_blocks.total = 123;
fec_stats->uncorrectable_blocks.total = 4;
+
+ values[0].per_lane[0] = 125;
+ values[0].per_lane[1] = 120;
+ values[0].per_lane[2] = 100;
+ values[0].per_lane[3] = 100;
+ values[1].sum = 12;
+ values[2].sum = 2;
+ values[2].per_lane[0] = 2;
+ values[2].per_lane[1] = 0;
+ values[2].per_lane[2] = 0;
+ values[2].per_lane[3] = 0;
}
static int nsim_get_ts_info(struct net_device *dev,
diff --git a/drivers/net/netdevsim/health.c b/drivers/net/netdevsim/health.c
index 688f05316b5e..3bd0e7a489c3 100644
--- a/drivers/net/netdevsim/health.c
+++ b/drivers/net/netdevsim/health.c
@@ -183,14 +183,14 @@ int nsim_dev_health_init(struct nsim_dev *nsim_dev, struct devlink *devlink)
health->empty_reporter =
devl_health_reporter_create(devlink,
&nsim_dev_empty_reporter_ops,
- 0, health);
+ health);
if (IS_ERR(health->empty_reporter))
return PTR_ERR(health->empty_reporter);
health->dummy_reporter =
devl_health_reporter_create(devlink,
&nsim_dev_dummy_reporter_ops,
- 0, health);
+ health);
if (IS_ERR(health->dummy_reporter)) {
err = PTR_ERR(health->dummy_reporter);
goto err_empty_reporter_destroy;
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index 39fe28af48b9..ebc3833e95b4 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -103,28 +103,42 @@ static int nsim_napi_rx(struct net_device *tx_dev, struct net_device *rx_dev,
static int nsim_forward_skb(struct net_device *tx_dev,
struct net_device *rx_dev,
struct sk_buff *skb,
- struct nsim_rq *rq)
+ struct nsim_rq *rq,
+ struct skb_ext *psp_ext)
{
- return __dev_forward_skb(rx_dev, skb) ?:
- nsim_napi_rx(tx_dev, rx_dev, rq, skb);
+ int ret;
+
+ ret = __dev_forward_skb(rx_dev, skb);
+ if (ret)
+ return ret;
+
+ nsim_psp_handle_ext(skb, psp_ext);
+
+ return nsim_napi_rx(tx_dev, rx_dev, rq, skb);
}
static netdev_tx_t nsim_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct netdevsim *ns = netdev_priv(dev);
+ struct skb_ext *psp_ext = NULL;
struct net_device *peer_dev;
unsigned int len = skb->len;
struct netdevsim *peer_ns;
struct netdev_config *cfg;
struct nsim_rq *rq;
int rxq;
+ int dr;
rcu_read_lock();
if (!nsim_ipsec_tx(ns, skb))
- goto out_drop_free;
+ goto out_drop_any;
peer_ns = rcu_dereference(ns->peer);
if (!peer_ns)
+ goto out_drop_any;
+
+ dr = nsim_do_psp(skb, ns, peer_ns, &psp_ext);
+ if (dr)
goto out_drop_free;
peer_dev = peer_ns->netdev;
@@ -141,7 +155,8 @@ static netdev_tx_t nsim_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb_linearize(skb);
skb_tx_timestamp(skb);
- if (unlikely(nsim_forward_skb(dev, peer_dev, skb, rq) == NET_RX_DROP))
+ if (unlikely(nsim_forward_skb(dev, peer_dev,
+ skb, rq, psp_ext) == NET_RX_DROP))
goto out_drop_cnt;
if (!hrtimer_active(&rq->napi_timer))
@@ -151,8 +166,10 @@ static netdev_tx_t nsim_start_xmit(struct sk_buff *skb, struct net_device *dev)
dev_dstats_tx_add(dev, len);
return NETDEV_TX_OK;
+out_drop_any:
+ dr = SKB_DROP_REASON_NOT_SPECIFIED;
out_drop_free:
- dev_kfree_skb(skb);
+ kfree_skb_reason(skb, dr);
out_drop_cnt:
rcu_read_unlock();
dev_dstats_tx_dropped(dev);
@@ -710,9 +727,13 @@ static struct nsim_rq *nsim_queue_alloc(void)
static void nsim_queue_free(struct net_device *dev, struct nsim_rq *rq)
{
hrtimer_cancel(&rq->napi_timer);
- local_bh_disable();
- dev_dstats_rx_dropped_add(dev, rq->skb_queue.qlen);
- local_bh_enable();
+
+ if (rq->skb_queue.qlen) {
+ local_bh_disable();
+ dev_dstats_rx_dropped_add(dev, rq->skb_queue.qlen);
+ local_bh_enable();
+ }
+
skb_queue_purge_reason(&rq->skb_queue, SKB_DROP_REASON_QUEUE_PURGE);
kfree(rq);
}
@@ -998,6 +1019,7 @@ static void nsim_queue_uninit(struct netdevsim *ns)
static int nsim_init_netdevsim(struct netdevsim *ns)
{
+ struct netdevsim *peer;
struct mock_phc *phc;
int err;
@@ -1032,6 +1054,10 @@ static int nsim_init_netdevsim(struct netdevsim *ns)
goto err_ipsec_teardown;
rtnl_unlock();
+ err = nsim_psp_init(ns);
+ if (err)
+ goto err_unregister_netdev;
+
if (IS_ENABLED(CONFIG_DEBUG_NET)) {
ns->nb.notifier_call = netdev_debug_event;
if (register_netdevice_notifier_dev_net(ns->netdev, &ns->nb,
@@ -1041,6 +1067,13 @@ static int nsim_init_netdevsim(struct netdevsim *ns)
return 0;
+err_unregister_netdev:
+ rtnl_lock();
+ peer = rtnl_dereference(ns->peer);
+ if (peer)
+ RCU_INIT_POINTER(peer->peer, NULL);
+ RCU_INIT_POINTER(ns->peer, NULL);
+ unregister_netdevice(ns->netdev);
err_ipsec_teardown:
nsim_ipsec_teardown(ns);
nsim_macsec_teardown(ns);
@@ -1128,6 +1161,8 @@ void nsim_destroy(struct netdevsim *ns)
unregister_netdevice_notifier_dev_net(ns->netdev, &ns->nb,
&ns->nn);
+ nsim_psp_uninit(ns);
+
rtnl_lock();
peer = rtnl_dereference(ns->peer);
if (peer)
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index bddd24c1389d..02c1c97b7008 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -108,6 +108,12 @@ struct netdevsim {
int rq_reset_mode;
+ struct {
+ struct psp_dev *dev;
+ u32 spi;
+ u32 assoc_cnt;
+ } psp;
+
struct nsim_bus_dev *nsim_bus_dev;
struct bpf_prog *bpf_offloaded;
@@ -421,6 +427,27 @@ static inline void nsim_macsec_teardown(struct netdevsim *ns)
}
#endif
+#if IS_ENABLED(CONFIG_INET_PSP)
+int nsim_psp_init(struct netdevsim *ns);
+void nsim_psp_uninit(struct netdevsim *ns);
+void nsim_psp_handle_ext(struct sk_buff *skb, struct skb_ext *psp_ext);
+enum skb_drop_reason
+nsim_do_psp(struct sk_buff *skb, struct netdevsim *ns,
+ struct netdevsim *peer_ns, struct skb_ext **psp_ext);
+#else
+static inline int nsim_psp_init(struct netdevsim *ns) { return 0; }
+static inline void nsim_psp_uninit(struct netdevsim *ns) {}
+static inline enum skb_drop_reason
+nsim_do_psp(struct sk_buff *skb, struct netdevsim *ns,
+ struct netdevsim *peer_ns, struct skb_ext **psp_ext)
+{
+ return 0;
+}
+
+static inline void
+nsim_psp_handle_ext(struct sk_buff *skb, struct skb_ext *psp_ext) {}
+#endif
+
struct nsim_bus_dev {
struct device dev;
struct list_head list;
diff --git a/drivers/net/netdevsim/psp.c b/drivers/net/netdevsim/psp.c
new file mode 100644
index 000000000000..332b5b744f01
--- /dev/null
+++ b/drivers/net/netdevsim/psp.c
@@ -0,0 +1,225 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <net/ip6_checksum.h>
+#include <net/psp.h>
+#include <net/sock.h>
+
+#include "netdevsim.h"
+
+void nsim_psp_handle_ext(struct sk_buff *skb, struct skb_ext *psp_ext)
+{
+ if (psp_ext)
+ __skb_ext_set(skb, SKB_EXT_PSP, psp_ext);
+}
+
+enum skb_drop_reason
+nsim_do_psp(struct sk_buff *skb, struct netdevsim *ns,
+ struct netdevsim *peer_ns, struct skb_ext **psp_ext)
+{
+ enum skb_drop_reason rc = 0;
+ struct psp_assoc *pas;
+ struct net *net;
+ void **ptr;
+
+ rcu_read_lock();
+ pas = psp_skb_get_assoc_rcu(skb);
+ if (!pas) {
+ rc = SKB_NOT_DROPPED_YET;
+ goto out_unlock;
+ }
+
+ if (!skb_transport_header_was_set(skb)) {
+ rc = SKB_DROP_REASON_PSP_OUTPUT;
+ goto out_unlock;
+ }
+
+ ptr = psp_assoc_drv_data(pas);
+ if (*ptr != ns) {
+ rc = SKB_DROP_REASON_PSP_OUTPUT;
+ goto out_unlock;
+ }
+
+ net = sock_net(skb->sk);
+ if (!psp_dev_encapsulate(net, skb, pas->tx.spi, pas->version, 0)) {
+ rc = SKB_DROP_REASON_PSP_OUTPUT;
+ goto out_unlock;
+ }
+
+ /* Now pretend we just received this frame */
+ if (peer_ns->psp.dev->config.versions & (1 << pas->version)) {
+ bool strip_icv = false;
+ u8 generation;
+
+ /* We cheat a bit and put the generation in the key.
+ * In real life if generation was too old, then decryption would
+ * fail. Here, we just make it so a bad key causes a bad
+ * generation too, and psp_sk_rx_policy_check() will fail.
+ */
+ generation = pas->tx.key[0];
+
+ skb_ext_reset(skb);
+ skb->mac_len = ETH_HLEN;
+ if (psp_dev_rcv(skb, peer_ns->psp.dev->id, generation,
+ strip_icv)) {
+ rc = SKB_DROP_REASON_PSP_OUTPUT;
+ goto out_unlock;
+ }
+
+ *psp_ext = skb->extensions;
+ refcount_inc(&(*psp_ext)->refcnt);
+ skb->decrypted = 1;
+ } else {
+ struct ipv6hdr *ip6h __maybe_unused;
+ struct iphdr *iph;
+ struct udphdr *uh;
+ __wsum csum;
+
+ /* Do not decapsulate. Receive the skb with the udp and psp
+ * headers still there as if this is a normal udp packet.
+ * psp_dev_encapsulate() sets udp checksum to 0, so we need to
+ * provide a valid checksum here, so the skb isn't dropped.
+ */
+ uh = udp_hdr(skb);
+ csum = skb_checksum(skb, skb_transport_offset(skb),
+ ntohs(uh->len), 0);
+
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ iph = ip_hdr(skb);
+ uh->check = udp_v4_check(ntohs(uh->len), iph->saddr,
+ iph->daddr, csum);
+ break;
+#if IS_ENABLED(CONFIG_IPV6)
+ case htons(ETH_P_IPV6):
+ ip6h = ipv6_hdr(skb);
+ uh->check = udp_v6_check(ntohs(uh->len), &ip6h->saddr,
+ &ip6h->daddr, csum);
+ break;
+#endif
+ }
+
+ uh->check = uh->check ?: CSUM_MANGLED_0;
+ skb->ip_summed = CHECKSUM_NONE;
+ }
+
+out_unlock:
+ rcu_read_unlock();
+ return rc;
+}
+
+static int
+nsim_psp_set_config(struct psp_dev *psd, struct psp_dev_config *conf,
+ struct netlink_ext_ack *extack)
+{
+ return 0;
+}
+
+static int
+nsim_rx_spi_alloc(struct psp_dev *psd, u32 version,
+ struct psp_key_parsed *assoc,
+ struct netlink_ext_ack *extack)
+{
+ struct netdevsim *ns = psd->drv_priv;
+ unsigned int new;
+ int i;
+
+ new = ++ns->psp.spi & PSP_SPI_KEY_ID;
+ if (psd->generation & 1)
+ new |= PSP_SPI_KEY_PHASE;
+
+ assoc->spi = cpu_to_be32(new);
+ assoc->key[0] = psd->generation;
+ for (i = 1; i < PSP_MAX_KEY; i++)
+ assoc->key[i] = ns->psp.spi + i;
+
+ return 0;
+}
+
+static int nsim_assoc_add(struct psp_dev *psd, struct psp_assoc *pas,
+ struct netlink_ext_ack *extack)
+{
+ struct netdevsim *ns = psd->drv_priv;
+ void **ptr = psp_assoc_drv_data(pas);
+
+ /* Copy drv_priv from psd to assoc */
+ *ptr = psd->drv_priv;
+ ns->psp.assoc_cnt++;
+
+ return 0;
+}
+
+static int nsim_key_rotate(struct psp_dev *psd, struct netlink_ext_ack *extack)
+{
+ return 0;
+}
+
+static void nsim_assoc_del(struct psp_dev *psd, struct psp_assoc *pas)
+{
+ struct netdevsim *ns = psd->drv_priv;
+ void **ptr = psp_assoc_drv_data(pas);
+
+ *ptr = NULL;
+ ns->psp.assoc_cnt--;
+}
+
+static struct psp_dev_ops nsim_psp_ops = {
+ .set_config = nsim_psp_set_config,
+ .rx_spi_alloc = nsim_rx_spi_alloc,
+ .tx_key_add = nsim_assoc_add,
+ .tx_key_del = nsim_assoc_del,
+ .key_rotate = nsim_key_rotate,
+};
+
+static struct psp_dev_caps nsim_psp_caps = {
+ .versions = 1 << PSP_VERSION_HDR0_AES_GCM_128 |
+ 1 << PSP_VERSION_HDR0_AES_GMAC_128 |
+ 1 << PSP_VERSION_HDR0_AES_GCM_256 |
+ 1 << PSP_VERSION_HDR0_AES_GMAC_256,
+ .assoc_drv_spc = sizeof(void *),
+};
+
+void nsim_psp_uninit(struct netdevsim *ns)
+{
+ if (!IS_ERR(ns->psp.dev))
+ psp_dev_unregister(ns->psp.dev);
+ WARN_ON(ns->psp.assoc_cnt);
+}
+
+static ssize_t
+nsim_psp_rereg_write(struct file *file, const char __user *data, size_t count,
+ loff_t *ppos)
+{
+ struct netdevsim *ns = file->private_data;
+ int err;
+
+ nsim_psp_uninit(ns);
+
+ ns->psp.dev = psp_dev_create(ns->netdev, &nsim_psp_ops,
+ &nsim_psp_caps, ns);
+ err = PTR_ERR_OR_ZERO(ns->psp.dev);
+ return err ?: count;
+}
+
+static const struct file_operations nsim_psp_rereg_fops = {
+ .open = simple_open,
+ .write = nsim_psp_rereg_write,
+ .llseek = generic_file_llseek,
+ .owner = THIS_MODULE,
+};
+
+int nsim_psp_init(struct netdevsim *ns)
+{
+ struct dentry *ddir = ns->nsim_dev_port->ddir;
+ int err;
+
+ ns->psp.dev = psp_dev_create(ns->netdev, &nsim_psp_ops,
+ &nsim_psp_caps, ns);
+ err = PTR_ERR_OR_ZERO(ns->psp.dev);
+ if (err)
+ return err;
+
+ debugfs_create_file("psp_rereg", 0200, ddir, ns, &nsim_psp_rereg_fops);
+ return 0;
+}
diff --git a/drivers/net/pcs/Kconfig b/drivers/net/pcs/Kconfig
index f6aa437473de..ecbc3530e780 100644
--- a/drivers/net/pcs/Kconfig
+++ b/drivers/net/pcs/Kconfig
@@ -26,11 +26,12 @@ config PCS_MTK_LYNXI
which is part of MediaTek's SoC and Ethernet switch ICs.
config PCS_RZN1_MIIC
- tristate "Renesas RZ/N1 MII converter"
- depends on OF && (ARCH_RZN1 || COMPILE_TEST)
+ tristate "Renesas RZ/N1, RZ/N2H, RZ/T2H MII converter"
+ depends on OF
+ depends on ARCH_RENESAS || COMPILE_TEST
help
- This module provides a driver for the MII converter that is available
- on RZ/N1 SoCs. This PCS converts MII to RMII/RGMII or can be set in
- pass-through mode for MII.
+ This module provides a driver for the MII converter available on
+ Renesas RZ/N1, RZ/N2H, and RZ/T2H SoCs. This PCS converts MII to
+ RMII/RGMII, or can be set in pass-through mode for MII.
endmenu
diff --git a/drivers/net/pcs/pcs-lynx.c b/drivers/net/pcs/pcs-lynx.c
index 23b40e9eacbb..677f92883976 100644
--- a/drivers/net/pcs/pcs-lynx.c
+++ b/drivers/net/pcs/pcs-lynx.c
@@ -49,6 +49,7 @@ static unsigned int lynx_pcs_inband_caps(struct phylink_pcs *pcs,
return LINK_INBAND_DISABLE;
case PHY_INTERFACE_MODE_USXGMII:
+ case PHY_INTERFACE_MODE_10G_QXGMII:
return LINK_INBAND_ENABLE;
default:
@@ -115,6 +116,7 @@ static void lynx_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
lynx_pcs_get_state_2500basex(lynx->mdio, state);
break;
case PHY_INTERFACE_MODE_USXGMII:
+ case PHY_INTERFACE_MODE_10G_QXGMII:
lynx_pcs_get_state_usxgmii(lynx->mdio, state);
break;
case PHY_INTERFACE_MODE_10GBASER:
@@ -170,6 +172,7 @@ static int lynx_pcs_config_giga(struct mdio_device *pcs,
}
static int lynx_pcs_config_usxgmii(struct mdio_device *pcs,
+ phy_interface_t interface,
const unsigned long *advertising,
unsigned int neg_mode)
{
@@ -177,7 +180,8 @@ static int lynx_pcs_config_usxgmii(struct mdio_device *pcs,
int addr = pcs->addr;
if (neg_mode != PHYLINK_PCS_NEG_INBAND_ENABLED) {
- dev_err(&pcs->dev, "USXGMII only supports in-band AN for now\n");
+ dev_err(&pcs->dev, "%s only supports in-band AN for now\n",
+ phy_modes(interface));
return -EOPNOTSUPP;
}
@@ -208,7 +212,8 @@ static int lynx_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
}
break;
case PHY_INTERFACE_MODE_USXGMII:
- return lynx_pcs_config_usxgmii(lynx->mdio, advertising,
+ case PHY_INTERFACE_MODE_10G_QXGMII:
+ return lynx_pcs_config_usxgmii(lynx->mdio, ifmode, advertising,
neg_mode);
case PHY_INTERFACE_MODE_10GBASER:
/* Nothing to do here for 10GBASER */
@@ -317,6 +322,7 @@ static void lynx_pcs_link_up(struct phylink_pcs *pcs, unsigned int neg_mode,
lynx_pcs_link_up_2500basex(lynx->mdio, neg_mode, speed, duplex);
break;
case PHY_INTERFACE_MODE_USXGMII:
+ case PHY_INTERFACE_MODE_10G_QXGMII:
/* At the moment, only in-band AN is supported for USXGMII
* so nothing to do in link_up
*/
@@ -341,6 +347,7 @@ static const phy_interface_t lynx_interfaces[] = {
PHY_INTERFACE_MODE_2500BASEX,
PHY_INTERFACE_MODE_10GBASER,
PHY_INTERFACE_MODE_USXGMII,
+ PHY_INTERFACE_MODE_10G_QXGMII,
};
static struct phylink_pcs *lynx_pcs_create(struct mdio_device *mdio)
diff --git a/drivers/net/pcs/pcs-rzn1-miic.c b/drivers/net/pcs/pcs-rzn1-miic.c
index d79bb9b06cd2..885f17c32643 100644
--- a/drivers/net/pcs/pcs-rzn1-miic.c
+++ b/drivers/net/pcs/pcs-rzn1-miic.c
@@ -5,8 +5,12 @@
* Clément Léger <clement.leger@bootlin.com>
*/
+#include <linux/array_size.h>
+#include <linux/bits.h>
+#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/device.h>
+#include <linux/device/devres.h>
#include <linux/mdio.h>
#include <linux/of.h>
#include <linux/of_platform.h>
@@ -14,13 +18,15 @@
#include <linux/phylink.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
#include <dt-bindings/net/pcs-rzn1-miic.h>
+#include <dt-bindings/net/renesas,r9a09g077-pcs-miic.h>
#define MIIC_PRCMD 0x0
#define MIIC_ESID_CODE 0x4
-#define MIIC_MODCTRL 0x20
-#define MIIC_MODCTRL_SW_MODE GENMASK(4, 0)
+#define MIIC_MODCTRL 0x8
#define MIIC_CONVCTRL(port) (0x100 + (port) * 4)
@@ -46,21 +52,23 @@
#define MIIC_SWCTRL 0x304
#define MIIC_SWDUPC 0x308
-#define MIIC_MAX_NR_PORTS 5
-
-#define MIIC_MODCTRL_CONF_CONV_NUM 6
+#define MIIC_MODCTRL_CONF_CONV_MAX 6
#define MIIC_MODCTRL_CONF_NONE -1
+#define MIIC_MAX_NUM_RSTS 2
+
/**
* struct modctrl_match - Matching table entry for convctrl configuration
* See section 8.2.1 of manual.
* @mode_cfg: Configuration value for convctrl
* @conv: Configuration of ethernet port muxes. First index is SWITCH_PORTIN,
- * then index 1 - 5 are CONV1 - CONV5.
+ * then index 1 - 5 are CONV1 - CONV5 for RZ/N1 SoCs. In case
+ * of RZ/T2H and RZ/N2H SoCs, the first index is SWITCH_PORTIN then
+ * index 0 - 3 are CONV0 - CONV3.
*/
struct modctrl_match {
u32 mode_cfg;
- u8 conv[MIIC_MODCTRL_CONF_CONV_NUM];
+ u8 conv[MIIC_MODCTRL_CONF_CONV_MAX];
};
static struct modctrl_match modctrl_match_table[] = {
@@ -109,7 +117,7 @@ static const char * const conf_to_string[] = {
[MIIC_HSR_PORTB] = "HSR_PORTB",
};
-static const char *index_to_string[MIIC_MODCTRL_CONF_CONV_NUM] = {
+static const char * const index_to_string[] = {
"SWITCH_PORTIN",
"CONV1",
"CONV2",
@@ -118,16 +126,106 @@ static const char *index_to_string[MIIC_MODCTRL_CONF_CONV_NUM] = {
"CONV5",
};
+static struct modctrl_match rzt2h_modctrl_match_table[] = {
+ {0x0, {ETHSS_GMAC0_PORT, ETHSS_ETHSW_PORT0, ETHSS_ETHSW_PORT1,
+ ETHSS_ETHSW_PORT2, ETHSS_GMAC1_PORT}},
+
+ {0x1, {MIIC_MODCTRL_CONF_NONE, ETHSS_ESC_PORT0, ETHSS_ESC_PORT1,
+ ETHSS_GMAC2_PORT, ETHSS_GMAC1_PORT}},
+
+ {0x2, {ETHSS_GMAC0_PORT, ETHSS_ESC_PORT0, ETHSS_ESC_PORT1,
+ ETHSS_ETHSW_PORT2, ETHSS_GMAC1_PORT}},
+
+ {0x3, {MIIC_MODCTRL_CONF_NONE, ETHSS_ESC_PORT0, ETHSS_ESC_PORT1,
+ ETHSS_ESC_PORT2, ETHSS_GMAC1_PORT}},
+
+ {0x4, {ETHSS_GMAC0_PORT, ETHSS_ETHSW_PORT0, ETHSS_ESC_PORT1,
+ ETHSS_ESC_PORT2, ETHSS_GMAC1_PORT}},
+
+ {0x5, {ETHSS_GMAC0_PORT, ETHSS_ETHSW_PORT0, ETHSS_ESC_PORT1,
+ ETHSS_ETHSW_PORT2, ETHSS_GMAC1_PORT}},
+
+ {0x6, {ETHSS_GMAC0_PORT, ETHSS_ETHSW_PORT0, ETHSS_ETHSW_PORT1,
+ ETHSS_GMAC2_PORT, ETHSS_GMAC1_PORT}},
+
+ {0x7, {MIIC_MODCTRL_CONF_NONE, ETHSS_GMAC0_PORT, ETHSS_GMAC1_PORT,
+ ETHSS_GMAC2_PORT, MIIC_MODCTRL_CONF_NONE}}
+};
+
+static const char * const rzt2h_conf_to_string[] = {
+ [ETHSS_GMAC0_PORT] = "GMAC0_PORT",
+ [ETHSS_GMAC1_PORT] = "GMAC1_PORT",
+ [ETHSS_GMAC2_PORT] = "GMAC2_PORT",
+ [ETHSS_ESC_PORT0] = "ETHERCAT_PORT0",
+ [ETHSS_ESC_PORT1] = "ETHERCAT_PORT1",
+ [ETHSS_ESC_PORT2] = "ETHERCAT_PORT2",
+ [ETHSS_ETHSW_PORT0] = "SWITCH_PORT0",
+ [ETHSS_ETHSW_PORT1] = "SWITCH_PORT1",
+ [ETHSS_ETHSW_PORT2] = "SWITCH_PORT2",
+};
+
+static const char * const rzt2h_index_to_string[] = {
+ "SWITCH_PORTIN",
+ "CONV0",
+ "CONV1",
+ "CONV2",
+ "CONV3",
+};
+
+static const char * const rzt2h_reset_ids[] = {
+ "rst",
+ "crst",
+};
+
/**
* struct miic - MII converter structure
* @base: base address of the MII converter
* @dev: Device associated to the MII converter
* @lock: Lock used for read-modify-write access
+ * @rsts: Reset controls for the MII converter
+ * @of_data: Pointer to OF data
*/
struct miic {
void __iomem *base;
struct device *dev;
spinlock_t lock;
+ struct reset_control_bulk_data rsts[MIIC_MAX_NUM_RSTS];
+ const struct miic_of_data *of_data;
+};
+
+/**
+ * struct miic_of_data - OF data for MII converter
+ * @match_table: Matching table for convctrl configuration
+ * @match_table_count: Number of entries in the matching table
+ * @conf_conv_count: Number of entries in the conf_conv array
+ * @conf_to_string: String representations of the configuration values
+ * @conf_to_string_count: Number of entries in the conf_to_string array
+ * @index_to_string: String representations of the index values
+ * @index_to_string_count: Number of entries in the index_to_string array
+ * @miic_port_start: MIIC port start number
+ * @miic_port_max: Maximum MIIC supported
+ * @sw_mode_mask: Switch mode mask
+ * @reset_ids: Reset names array
+ * @reset_count: Number of entries in the reset_ids array
+ * @init_unlock_lock_regs: Flag to indicate if registers need to be unlocked
+ * before access.
+ * @miic_write: Function pointer to write a value to a MIIC register
+ */
+struct miic_of_data {
+ struct modctrl_match *match_table;
+ u8 match_table_count;
+ u8 conf_conv_count;
+ const char * const *conf_to_string;
+ u8 conf_to_string_count;
+ const char * const *index_to_string;
+ u8 index_to_string_count;
+ u8 miic_port_start;
+ u8 miic_port_max;
+ u8 sw_mode_mask;
+ const char * const *reset_ids;
+ u8 reset_count;
+ bool init_unlock_lock_regs;
+ void (*miic_write)(struct miic *miic, int offset, u32 value);
};
/**
@@ -149,9 +247,36 @@ static struct miic_port *phylink_pcs_to_miic_port(struct phylink_pcs *pcs)
return container_of(pcs, struct miic_port, pcs);
}
-static void miic_reg_writel(struct miic *miic, int offset, u32 value)
+static void miic_unlock_regs(struct miic *miic)
+{
+ /* Unprotect register writes */
+ writel(0x00A5, miic->base + MIIC_PRCMD);
+ writel(0x0001, miic->base + MIIC_PRCMD);
+ writel(0xFFFE, miic->base + MIIC_PRCMD);
+ writel(0x0001, miic->base + MIIC_PRCMD);
+}
+
+static void miic_lock_regs(struct miic *miic)
+{
+ /* Protect register writes */
+ writel(0x0000, miic->base + MIIC_PRCMD);
+}
+
+static void miic_reg_writel_unlocked(struct miic *miic, int offset, u32 value)
+{
+ writel(value, miic->base + offset);
+}
+
+static void miic_reg_writel_locked(struct miic *miic, int offset, u32 value)
{
+ miic_unlock_regs(miic);
writel(value, miic->base + offset);
+ miic_lock_regs(miic);
+}
+
+static void miic_reg_writel(struct miic *miic, int offset, u32 value)
+{
+ miic->of_data->miic_write(miic, offset, value);
}
static u32 miic_reg_readl(struct miic *miic, int offset)
@@ -303,6 +428,7 @@ static const struct phylink_pcs_ops miic_phylink_ops = {
struct phylink_pcs *miic_create(struct device *dev, struct device_node *np)
{
+ const struct miic_of_data *of_data;
struct platform_device *pdev;
struct miic_port *miic_port;
struct device_node *pcs_np;
@@ -315,9 +441,6 @@ struct phylink_pcs *miic_create(struct device *dev, struct device_node *np)
if (of_property_read_u32(np, "reg", &port))
return ERR_PTR(-EINVAL);
- if (port > MIIC_MAX_NR_PORTS || port < 1)
- return ERR_PTR(-EINVAL);
-
/* The PCS pdev is attached to the parent node */
pcs_np = of_get_parent(np);
if (!pcs_np)
@@ -336,18 +459,24 @@ struct phylink_pcs *miic_create(struct device *dev, struct device_node *np)
return ERR_PTR(-EPROBE_DEFER);
}
+ miic = platform_get_drvdata(pdev);
+ of_data = miic->of_data;
+ if (port > of_data->miic_port_max || port < of_data->miic_port_start) {
+ put_device(&pdev->dev);
+ return ERR_PTR(-EINVAL);
+ }
+
miic_port = kzalloc(sizeof(*miic_port), GFP_KERNEL);
if (!miic_port) {
put_device(&pdev->dev);
return ERR_PTR(-ENOMEM);
}
- miic = platform_get_drvdata(pdev);
device_link_add(dev, miic->dev, DL_FLAG_AUTOREMOVE_CONSUMER);
put_device(&pdev->dev);
miic_port->miic = miic;
- miic_port->port = port - 1;
+ miic_port->port = port - of_data->miic_port_start;
miic_port->pcs.ops = &miic_phylink_ops;
phy_interface_set_rgmii(miic_port->pcs.supported_interfaces);
@@ -369,21 +498,23 @@ EXPORT_SYMBOL(miic_destroy);
static int miic_init_hw(struct miic *miic, u32 cfg_mode)
{
+ u8 sw_mode_mask = miic->of_data->sw_mode_mask;
int port;
/* Unlock write access to accessory registers (cf datasheet). If this
* is going to be used in conjunction with the Cortex-M3, this sequence
* will have to be moved in register write
*/
- miic_reg_writel(miic, MIIC_PRCMD, 0x00A5);
- miic_reg_writel(miic, MIIC_PRCMD, 0x0001);
- miic_reg_writel(miic, MIIC_PRCMD, 0xFFFE);
- miic_reg_writel(miic, MIIC_PRCMD, 0x0001);
+ if (miic->of_data->init_unlock_lock_regs)
+ miic_unlock_regs(miic);
+ /* TODO: Replace with FIELD_PREP() when compile-time constant
+ * restriction is lifted. Currently __ffs() returns 0 for sw_mode_mask.
+ */
miic_reg_writel(miic, MIIC_MODCTRL,
- FIELD_PREP(MIIC_MODCTRL_SW_MODE, cfg_mode));
+ ((cfg_mode << __ffs(sw_mode_mask)) & sw_mode_mask));
- for (port = 0; port < MIIC_MAX_NR_PORTS; port++) {
+ for (port = 0; port < miic->of_data->miic_port_max; port++) {
miic_converter_enable(miic, port, 0);
/* Disable speed/duplex control from these registers, datasheet
* says switch registers should be used to setup switch port
@@ -396,12 +527,11 @@ static int miic_init_hw(struct miic *miic, u32 cfg_mode)
return 0;
}
-static bool miic_modctrl_match(s8 table_val[MIIC_MODCTRL_CONF_CONV_NUM],
- s8 dt_val[MIIC_MODCTRL_CONF_CONV_NUM])
+static bool miic_modctrl_match(s8 *table_val, s8 *dt_val, u8 count)
{
int i;
- for (i = 0; i < MIIC_MODCTRL_CONF_CONV_NUM; i++) {
+ for (i = 0; i < count; i++) {
if (dt_val[i] == MIIC_MODCTRL_CONF_NONE)
continue;
@@ -412,53 +542,59 @@ static bool miic_modctrl_match(s8 table_val[MIIC_MODCTRL_CONF_CONV_NUM],
return true;
}
-static void miic_dump_conf(struct device *dev,
- s8 conf[MIIC_MODCTRL_CONF_CONV_NUM])
+static void miic_dump_conf(struct miic *miic, s8 *conf)
{
+ const struct miic_of_data *of_data = miic->of_data;
const char *conf_name;
int i;
- for (i = 0; i < MIIC_MODCTRL_CONF_CONV_NUM; i++) {
+ for (i = 0; i < of_data->conf_conv_count; i++) {
if (conf[i] != MIIC_MODCTRL_CONF_NONE)
- conf_name = conf_to_string[conf[i]];
+ conf_name = of_data->conf_to_string[conf[i]];
else
conf_name = "NONE";
- dev_err(dev, "%s: %s\n", index_to_string[i], conf_name);
+ dev_err(miic->dev, "%s: %s\n",
+ of_data->index_to_string[i], conf_name);
}
}
-static int miic_match_dt_conf(struct device *dev,
- s8 dt_val[MIIC_MODCTRL_CONF_CONV_NUM],
- u32 *mode_cfg)
+static int miic_match_dt_conf(struct miic *miic, s8 *dt_val, u32 *mode_cfg)
{
+ const struct miic_of_data *of_data = miic->of_data;
struct modctrl_match *table_entry;
int i;
- for (i = 0; i < ARRAY_SIZE(modctrl_match_table); i++) {
- table_entry = &modctrl_match_table[i];
+ for (i = 0; i < of_data->match_table_count; i++) {
+ table_entry = &of_data->match_table[i];
- if (miic_modctrl_match(table_entry->conv, dt_val)) {
+ if (miic_modctrl_match(table_entry->conv, dt_val,
+ miic->of_data->conf_conv_count)) {
*mode_cfg = table_entry->mode_cfg;
return 0;
}
}
- dev_err(dev, "Failed to apply requested configuration\n");
- miic_dump_conf(dev, dt_val);
+ dev_err(miic->dev, "Failed to apply requested configuration\n");
+ miic_dump_conf(miic, dt_val);
return -EINVAL;
}
-static int miic_parse_dt(struct device *dev, u32 *mode_cfg)
+static int miic_parse_dt(struct miic *miic, u32 *mode_cfg)
{
- s8 dt_val[MIIC_MODCTRL_CONF_CONV_NUM];
- struct device_node *np = dev->of_node;
+ struct device_node *np = miic->dev->of_node;
struct device_node *conv;
+ int port, ret;
+ s8 *dt_val;
u32 conf;
- int port;
- memset(dt_val, MIIC_MODCTRL_CONF_NONE, sizeof(dt_val));
+ dt_val = kmalloc_array(miic->of_data->conf_conv_count,
+ sizeof(*dt_val), GFP_KERNEL);
+ if (!dt_val)
+ return -ENOMEM;
+
+ memset(dt_val, MIIC_MODCTRL_CONF_NONE, sizeof(*dt_val));
if (of_property_read_u32(np, "renesas,miic-switch-portin", &conf) == 0)
dt_val[0] = conf;
@@ -467,11 +603,58 @@ static int miic_parse_dt(struct device *dev, u32 *mode_cfg)
if (of_property_read_u32(conv, "reg", &port))
continue;
+ /* Adjust for 0 based index */
+ port += !miic->of_data->miic_port_start;
if (of_property_read_u32(conv, "renesas,miic-input", &conf) == 0)
dt_val[port] = conf;
}
- return miic_match_dt_conf(dev, dt_val, mode_cfg);
+ ret = miic_match_dt_conf(miic, dt_val, mode_cfg);
+ kfree(dt_val);
+
+ return ret;
+}
+
+static void miic_reset_control_bulk_assert(void *data)
+{
+ struct miic *miic = data;
+ int ret;
+
+ ret = reset_control_bulk_assert(miic->of_data->reset_count, miic->rsts);
+ if (ret)
+ dev_err(miic->dev, "failed to assert reset lines\n");
+}
+
+static int miic_reset_control_init(struct miic *miic)
+{
+ const struct miic_of_data *of_data = miic->of_data;
+ struct device *dev = miic->dev;
+ int ret;
+ u8 i;
+
+ if (!of_data->reset_count)
+ return 0;
+
+ for (i = 0; i < of_data->reset_count; i++)
+ miic->rsts[i].id = of_data->reset_ids[i];
+
+ ret = devm_reset_control_bulk_get_exclusive(dev, of_data->reset_count,
+ miic->rsts);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to get bulk reset lines\n");
+
+ ret = reset_control_bulk_deassert(of_data->reset_count, miic->rsts);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to deassert reset lines\n");
+
+ ret = devm_add_action_or_reset(dev, miic_reset_control_bulk_assert,
+ miic);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to add reset action\n");
+
+ return 0;
}
static int miic_probe(struct platform_device *pdev)
@@ -481,20 +664,26 @@ static int miic_probe(struct platform_device *pdev)
u32 mode_cfg;
int ret;
- ret = miic_parse_dt(dev, &mode_cfg);
- if (ret < 0)
- return ret;
-
miic = devm_kzalloc(dev, sizeof(*miic), GFP_KERNEL);
if (!miic)
return -ENOMEM;
- spin_lock_init(&miic->lock);
+ miic->of_data = of_device_get_match_data(dev);
miic->dev = dev;
+
+ ret = miic_parse_dt(miic, &mode_cfg);
+ if (ret < 0)
+ return ret;
+
+ spin_lock_init(&miic->lock);
miic->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(miic->base))
return PTR_ERR(miic->base);
+ ret = miic_reset_control_init(miic);
+ if (ret)
+ return ret;
+
ret = devm_pm_runtime_enable(dev);
if (ret < 0)
return ret;
@@ -527,9 +716,41 @@ static void miic_remove(struct platform_device *pdev)
pm_runtime_put(&pdev->dev);
}
+static struct miic_of_data rzn1_miic_of_data = {
+ .match_table = modctrl_match_table,
+ .match_table_count = ARRAY_SIZE(modctrl_match_table),
+ .conf_conv_count = MIIC_MODCTRL_CONF_CONV_MAX,
+ .conf_to_string = conf_to_string,
+ .conf_to_string_count = ARRAY_SIZE(conf_to_string),
+ .index_to_string = index_to_string,
+ .index_to_string_count = ARRAY_SIZE(index_to_string),
+ .miic_port_start = 1,
+ .miic_port_max = 5,
+ .sw_mode_mask = GENMASK(4, 0),
+ .init_unlock_lock_regs = true,
+ .miic_write = miic_reg_writel_unlocked,
+};
+
+static struct miic_of_data rzt2h_miic_of_data = {
+ .match_table = rzt2h_modctrl_match_table,
+ .match_table_count = ARRAY_SIZE(rzt2h_modctrl_match_table),
+ .conf_conv_count = 5,
+ .conf_to_string = rzt2h_conf_to_string,
+ .conf_to_string_count = ARRAY_SIZE(rzt2h_conf_to_string),
+ .index_to_string = rzt2h_index_to_string,
+ .index_to_string_count = ARRAY_SIZE(rzt2h_index_to_string),
+ .miic_port_start = 0,
+ .miic_port_max = 4,
+ .sw_mode_mask = GENMASK(2, 0),
+ .reset_ids = rzt2h_reset_ids,
+ .reset_count = ARRAY_SIZE(rzt2h_reset_ids),
+ .miic_write = miic_reg_writel_locked,
+};
+
static const struct of_device_id miic_of_mtable[] = {
- { .compatible = "renesas,rzn1-miic" },
- { /* sentinel */ },
+ { .compatible = "renesas,r9a09g077-miic", .data = &rzt2h_miic_of_data },
+ { .compatible = "renesas,rzn1-miic", .data = &rzn1_miic_of_data },
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, miic_of_mtable);
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 28acc6392cfc..98700d069191 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -3,6 +3,11 @@
# PHY Layer Configuration
#
+config MDIO_BUS
+ tristate "MDIO bus consumer layer"
+ help
+ MDIO bus consumer layer
+
config PHYLINK
tristate
select PHYLIB
@@ -298,7 +303,7 @@ config MICREL_PHY
depends on PTP_1588_CLOCK_OPTIONAL
select PHY_PACKAGE
help
- Supports the KSZ9021, VSC8201, KS8001 PHYs.
+ Supports the KSZ8xxx, KSZ9xxx, and LAN88xx families of Micrel/Microchip PHYs.
config MICROCHIP_T1S_PHY
tristate "Microchip 10BASE-T1S Ethernet PHYs"
@@ -361,7 +366,7 @@ config NXP_TJA11XX_PHY
tristate "NXP TJA11xx PHYs support"
depends on HWMON
help
- Currently supports the NXP TJA1100 and TJA1101 PHY.
+ Currently supports the NXP TJA1100, TJA1101 and TJA1102 PHYs.
config NCN26000_PHY
tristate "Onsemi 10BASE-T1S Ethernet PHY"
@@ -465,7 +470,3 @@ config XILINX_GMII2RGMII
Ethernet physical media devices and the Gigabit Ethernet controller.
endif # PHYLIB
-
-config MICREL_KS8995MA
- tristate "Micrel KS8995MA 5-ports 10/100 managed Ethernet switch"
- depends on SPI
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index b4795aaf9c1c..76e0db40f879 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -8,7 +8,7 @@ mdio-bus-y += mdio_bus.o mdio_device.o
ifdef CONFIG_PHYLIB
# built-in whenever PHYLIB is built-in or module
-obj-y += stubs.o mdio-boardinfo.o
+obj-y += stubs.o
endif
libphy-$(CONFIG_SWPHY) += swphy.o
@@ -72,7 +72,6 @@ obj-$(CONFIG_MAXLINEAR_GPHY) += mxl-gpy.o
obj-$(CONFIG_MAXLINEAR_86110_PHY) += mxl-86110.o
obj-y += mediatek/
obj-$(CONFIG_MESON_GXL_PHY) += meson-gxl.o
-obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o
obj-$(CONFIG_MICREL_PHY) += micrel.o
obj-$(CONFIG_MICROCHIP_PHY) += microchip.o
obj-$(CONFIG_MICROCHIP_PHY_RDS_PTP) += microchip_rds_ptp.o
diff --git a/drivers/net/phy/aquantia/aquantia.h b/drivers/net/phy/aquantia/aquantia.h
index 0c78bfabace5..31427ee343e3 100644
--- a/drivers/net/phy/aquantia/aquantia.h
+++ b/drivers/net/phy/aquantia/aquantia.h
@@ -55,6 +55,7 @@
#define VEND1_GLOBAL_CFG_SERDES_MODE_SGMII 3
#define VEND1_GLOBAL_CFG_SERDES_MODE_OCSGMII 4
#define VEND1_GLOBAL_CFG_SERDES_MODE_XFI5G 6
+#define VEND1_GLOBAL_CFG_AUTONEG_ENA BIT(3)
#define VEND1_GLOBAL_CFG_RATE_ADAPT GENMASK(8, 7)
#define VEND1_GLOBAL_CFG_RATE_ADAPT_NONE 0
#define VEND1_GLOBAL_CFG_RATE_ADAPT_USX 1
@@ -152,6 +153,28 @@
#define AQR_MAX_LEDS 3
+/* Custom driver definitions for constructing a single variable out of
+ * aggregate firmware build information. These do not represent hardware
+ * fields.
+ */
+#define AQR_FW_FINGERPRINT_MAJOR GENMASK_ULL(63, 56)
+#define AQR_FW_FINGERPRINT_MINOR GENMASK_ULL(55, 48)
+#define AQR_FW_FINGERPRINT_BUILD_ID GENMASK_ULL(47, 40)
+#define AQR_FW_FINGERPRINT_PROV_ID GENMASK_ULL(39, 32)
+#define AQR_FW_FINGERPRINT_MISC_ID GENMASK_ULL(31, 16)
+#define AQR_FW_FINGERPRINT_MISC_VER GENMASK_ULL(15, 0)
+#define AQR_FW_FINGERPRINT(major, minor, build_id, prov_id, misc_id, misc_ver) \
+ (FIELD_PREP(AQR_FW_FINGERPRINT_MAJOR, major) | \
+ FIELD_PREP(AQR_FW_FINGERPRINT_MINOR, minor) | \
+ FIELD_PREP(AQR_FW_FINGERPRINT_BUILD_ID, build_id) | \
+ FIELD_PREP(AQR_FW_FINGERPRINT_PROV_ID, prov_id) | \
+ FIELD_PREP(AQR_FW_FINGERPRINT_MISC_ID, misc_id) | \
+ FIELD_PREP(AQR_FW_FINGERPRINT_MISC_VER, misc_ver))
+
+/* 10G-QXGMII firmware for NXP SPF-30841 riser board (AQR412C) */
+#define AQR_G3_V4_3_C_AQR_NXP_SPF_30841_MUSX_ID40019_VER1198 \
+ AQR_FW_FINGERPRINT(4, 3, 0xc, 1, 40019, 1198)
+
struct aqr107_hw_stat {
const char *name;
int reg;
@@ -174,10 +197,39 @@ static const struct aqr107_hw_stat aqr107_hw_stats[] = {
#define AQR107_SGMII_STAT_SZ ARRAY_SIZE(aqr107_hw_stats)
+static const struct {
+ int speed;
+ u16 reg;
+} aqr_global_cfg_regs[] = {
+ { SPEED_10, VEND1_GLOBAL_CFG_10M, },
+ { SPEED_100, VEND1_GLOBAL_CFG_100M, },
+ { SPEED_1000, VEND1_GLOBAL_CFG_1G, },
+ { SPEED_2500, VEND1_GLOBAL_CFG_2_5G, },
+ { SPEED_5000, VEND1_GLOBAL_CFG_5G, },
+ { SPEED_10000, VEND1_GLOBAL_CFG_10G, },
+};
+
+#define AQR_NUM_GLOBAL_CFG ARRAY_SIZE(aqr_global_cfg_regs)
+
+enum aqr_rate_adaptation {
+ AQR_RATE_ADAPT_NONE,
+ AQR_RATE_ADAPT_USX,
+ AQR_RATE_ADAPT_PAUSE,
+};
+
+struct aqr_global_syscfg {
+ int speed;
+ phy_interface_t interface;
+ enum aqr_rate_adaptation rate_adapt;
+};
+
struct aqr107_priv {
u64 sgmii_stats[AQR107_SGMII_STAT_SZ];
+ u64 fingerprint;
unsigned long leds_active_low;
unsigned long leds_active_high;
+ bool wait_on_global_cfg;
+ struct aqr_global_syscfg global_cfg[AQR_NUM_GLOBAL_CFG];
};
#if IS_REACHABLE(CONFIG_HWMON)
diff --git a/drivers/net/phy/aquantia/aquantia_main.c b/drivers/net/phy/aquantia/aquantia_main.c
index 77a48635d7bf..41f3676c7f1e 100644
--- a/drivers/net/phy/aquantia/aquantia_main.c
+++ b/drivers/net/phy/aquantia/aquantia_main.c
@@ -26,13 +26,18 @@
#define PHY_ID_AQR111 0x03a1b610
#define PHY_ID_AQR111B0 0x03a1b612
#define PHY_ID_AQR112 0x03a1b662
-#define PHY_ID_AQR412 0x03a1b712
+#define PHY_ID_AQR412 0x03a1b6f2
+#define PHY_ID_AQR412C 0x03a1b712
#define PHY_ID_AQR113 0x31c31c40
#define PHY_ID_AQR113C 0x31c31c12
#define PHY_ID_AQR114C 0x31c31c22
+#define PHY_ID_AQR115 0x31c31c63
#define PHY_ID_AQR115C 0x31c31c33
#define PHY_ID_AQR813 0x31c31cb2
+#define MDIO_PHYXS_VEND_PROV2 0xc441
+#define MDIO_PHYXS_VEND_PROV2_USX_AN BIT(3)
+
#define MDIO_PHYXS_VEND_IF_STATUS 0xe812
#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_MASK GENMASK(7, 3)
#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_KR 0
@@ -83,6 +88,9 @@
#define MDIO_AN_TX_VEND_INT_MASK2 0xd401
#define MDIO_AN_TX_VEND_INT_MASK2_LINK BIT(0)
+#define PMAPMD_FW_MISC_ID 0xc41d
+#define PMAPMD_FW_MISC_VER 0xc41e
+
#define PMAPMD_RSVD_VEND_PROV 0xe400
#define PMAPMD_RSVD_VEND_PROV_MDI_CONF GENMASK(1, 0)
#define PMAPMD_RSVD_VEND_PROV_MDI_REVERSE BIT(0)
@@ -465,7 +473,7 @@ static int aqr105_config_aneg(struct phy_device *phydev)
return genphy_c45_check_and_restart_aneg(phydev, changed);
}
-static int aqr105_read_rate(struct phy_device *phydev)
+static int aqr_gen1_read_rate(struct phy_device *phydev)
{
int val;
@@ -504,8 +512,31 @@ static int aqr105_read_rate(struct phy_device *phydev)
return 0;
}
-static int aqr105_read_status(struct phy_device *phydev)
+/* Quad port PHYs like AQR412(C) have 4 system interfaces, but they can also be
+ * used with a single system interface over which all 4 ports are multiplexed
+ * (10G-QXGMII). To the MDIO registers, this mode is indistinguishable from
+ * USXGMII (which implies a single 10G port).
+ *
+ * To not rely solely on the device tree, we allow the regular system interface
+ * detection to work as usual, but we replace USXGMII with 10G-QXGMII based on
+ * the specific fingerprint of firmware images that are known to be for MUSX.
+ */
+static phy_interface_t aqr_translate_interface(struct phy_device *phydev,
+ phy_interface_t interface)
{
+ struct aqr107_priv *priv = phydev->priv;
+
+ if (phy_id_compare(phydev->drv->phy_id, PHY_ID_AQR412C, phydev->drv->phy_id_mask) &&
+ priv->fingerprint == AQR_G3_V4_3_C_AQR_NXP_SPF_30841_MUSX_ID40019_VER1198 &&
+ interface == PHY_INTERFACE_MODE_USXGMII)
+ return PHY_INTERFACE_MODE_10G_QXGMII;
+
+ return interface;
+}
+
+static int aqr_gen1_read_status(struct phy_device *phydev)
+{
+ phy_interface_t interface;
int ret;
int val;
@@ -531,155 +562,65 @@ static int aqr105_read_status(struct phy_device *phydev)
switch (FIELD_GET(MDIO_PHYXS_VEND_IF_STATUS_TYPE_MASK, val)) {
case MDIO_PHYXS_VEND_IF_STATUS_TYPE_KR:
- phydev->interface = PHY_INTERFACE_MODE_10GKR;
+ interface = PHY_INTERFACE_MODE_10GKR;
break;
case MDIO_PHYXS_VEND_IF_STATUS_TYPE_KX:
- phydev->interface = PHY_INTERFACE_MODE_1000BASEKX;
+ interface = PHY_INTERFACE_MODE_1000BASEKX;
break;
case MDIO_PHYXS_VEND_IF_STATUS_TYPE_XFI:
- phydev->interface = PHY_INTERFACE_MODE_10GBASER;
+ interface = PHY_INTERFACE_MODE_10GBASER;
break;
case MDIO_PHYXS_VEND_IF_STATUS_TYPE_USXGMII:
- phydev->interface = PHY_INTERFACE_MODE_USXGMII;
+ interface = PHY_INTERFACE_MODE_USXGMII;
break;
case MDIO_PHYXS_VEND_IF_STATUS_TYPE_XAUI:
- phydev->interface = PHY_INTERFACE_MODE_XAUI;
+ interface = PHY_INTERFACE_MODE_XAUI;
break;
case MDIO_PHYXS_VEND_IF_STATUS_TYPE_SGMII:
- phydev->interface = PHY_INTERFACE_MODE_SGMII;
+ interface = PHY_INTERFACE_MODE_SGMII;
break;
case MDIO_PHYXS_VEND_IF_STATUS_TYPE_RXAUI:
- phydev->interface = PHY_INTERFACE_MODE_RXAUI;
+ interface = PHY_INTERFACE_MODE_RXAUI;
break;
case MDIO_PHYXS_VEND_IF_STATUS_TYPE_OCSGMII:
- phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
+ interface = PHY_INTERFACE_MODE_2500BASEX;
break;
case MDIO_PHYXS_VEND_IF_STATUS_TYPE_OFF:
default:
phydev->link = false;
- phydev->interface = PHY_INTERFACE_MODE_NA;
+ interface = PHY_INTERFACE_MODE_NA;
break;
}
- /* Read rate from vendor register */
- return aqr105_read_rate(phydev);
-}
-
-static int aqr107_read_rate(struct phy_device *phydev)
-{
- u32 config_reg;
- int val;
+ phydev->interface = aqr_translate_interface(phydev, interface);
- val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_TX_VEND_STATUS1);
- if (val < 0)
- return val;
-
- if (val & MDIO_AN_TX_VEND_STATUS1_FULL_DUPLEX)
- phydev->duplex = DUPLEX_FULL;
- else
- phydev->duplex = DUPLEX_HALF;
-
- switch (FIELD_GET(MDIO_AN_TX_VEND_STATUS1_RATE_MASK, val)) {
- case MDIO_AN_TX_VEND_STATUS1_10BASET:
- phydev->speed = SPEED_10;
- config_reg = VEND1_GLOBAL_CFG_10M;
- break;
- case MDIO_AN_TX_VEND_STATUS1_100BASETX:
- phydev->speed = SPEED_100;
- config_reg = VEND1_GLOBAL_CFG_100M;
- break;
- case MDIO_AN_TX_VEND_STATUS1_1000BASET:
- phydev->speed = SPEED_1000;
- config_reg = VEND1_GLOBAL_CFG_1G;
- break;
- case MDIO_AN_TX_VEND_STATUS1_2500BASET:
- phydev->speed = SPEED_2500;
- config_reg = VEND1_GLOBAL_CFG_2_5G;
- break;
- case MDIO_AN_TX_VEND_STATUS1_5000BASET:
- phydev->speed = SPEED_5000;
- config_reg = VEND1_GLOBAL_CFG_5G;
- break;
- case MDIO_AN_TX_VEND_STATUS1_10GBASET:
- phydev->speed = SPEED_10000;
- config_reg = VEND1_GLOBAL_CFG_10G;
- break;
- default:
- phydev->speed = SPEED_UNKNOWN;
- return 0;
- }
-
- val = phy_read_mmd(phydev, MDIO_MMD_VEND1, config_reg);
- if (val < 0)
- return val;
-
- if (FIELD_GET(VEND1_GLOBAL_CFG_RATE_ADAPT, val) ==
- VEND1_GLOBAL_CFG_RATE_ADAPT_PAUSE)
- phydev->rate_matching = RATE_MATCH_PAUSE;
- else
- phydev->rate_matching = RATE_MATCH_NONE;
-
- return 0;
+ /* Read rate from vendor register */
+ return aqr_gen1_read_rate(phydev);
}
-static int aqr107_read_status(struct phy_device *phydev)
+static int aqr_gen2_read_status(struct phy_device *phydev)
{
- int val, ret;
+ struct aqr107_priv *priv = phydev->priv;
+ int i, ret;
- ret = aqr_read_status(phydev);
+ ret = aqr_gen1_read_status(phydev);
if (ret)
return ret;
- if (!phydev->link || phydev->autoneg == AUTONEG_DISABLE)
- return 0;
+ for (i = 0; i < AQR_NUM_GLOBAL_CFG; i++) {
+ struct aqr_global_syscfg *syscfg = &priv->global_cfg[i];
- /* The status register is not immediately correct on line side link up.
- * Poll periodically until it reflects the correct ON state.
- * Only return fail for read error, timeout defaults to OFF state.
- */
- ret = phy_read_mmd_poll_timeout(phydev, MDIO_MMD_PHYXS,
- MDIO_PHYXS_VEND_IF_STATUS, val,
- (FIELD_GET(MDIO_PHYXS_VEND_IF_STATUS_TYPE_MASK, val) !=
- MDIO_PHYXS_VEND_IF_STATUS_TYPE_OFF),
- AQR107_OP_IN_PROG_SLEEP,
- AQR107_OP_IN_PROG_TIMEOUT, false);
- if (ret && ret != -ETIMEDOUT)
- return ret;
+ if (syscfg->speed != phydev->speed)
+ continue;
- switch (FIELD_GET(MDIO_PHYXS_VEND_IF_STATUS_TYPE_MASK, val)) {
- case MDIO_PHYXS_VEND_IF_STATUS_TYPE_KR:
- phydev->interface = PHY_INTERFACE_MODE_10GKR;
- break;
- case MDIO_PHYXS_VEND_IF_STATUS_TYPE_KX:
- phydev->interface = PHY_INTERFACE_MODE_1000BASEKX;
- break;
- case MDIO_PHYXS_VEND_IF_STATUS_TYPE_XFI:
- phydev->interface = PHY_INTERFACE_MODE_10GBASER;
- break;
- case MDIO_PHYXS_VEND_IF_STATUS_TYPE_USXGMII:
- phydev->interface = PHY_INTERFACE_MODE_USXGMII;
- break;
- case MDIO_PHYXS_VEND_IF_STATUS_TYPE_XAUI:
- phydev->interface = PHY_INTERFACE_MODE_XAUI;
- break;
- case MDIO_PHYXS_VEND_IF_STATUS_TYPE_SGMII:
- phydev->interface = PHY_INTERFACE_MODE_SGMII;
- break;
- case MDIO_PHYXS_VEND_IF_STATUS_TYPE_RXAUI:
- phydev->interface = PHY_INTERFACE_MODE_RXAUI;
- break;
- case MDIO_PHYXS_VEND_IF_STATUS_TYPE_OCSGMII:
- phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
- break;
- case MDIO_PHYXS_VEND_IF_STATUS_TYPE_OFF:
- default:
- phydev->link = false;
- phydev->interface = PHY_INTERFACE_MODE_NA;
+ if (syscfg->rate_adapt == AQR_RATE_ADAPT_PAUSE)
+ phydev->rate_matching = RATE_MATCH_PAUSE;
+ else
+ phydev->rate_matching = RATE_MATCH_NONE;
break;
}
- /* Read possibly downshifted rate from vendor register */
- return aqr107_read_rate(phydev);
+ return 0;
}
static int aqr107_get_downshift(struct phy_device *phydev, u8 *data)
@@ -764,27 +705,46 @@ int aqr_wait_reset_complete(struct phy_device *phydev)
return ret;
}
-static void aqr107_chip_info(struct phy_device *phydev)
+static int aqr_build_fingerprint(struct phy_device *phydev)
{
u8 fw_major, fw_minor, build_id, prov_id;
+ struct aqr107_priv *priv = phydev->priv;
+ u16 misc_id, misc_ver;
int val;
val = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_FW_ID);
if (val < 0)
- return;
+ return val;
fw_major = FIELD_GET(VEND1_GLOBAL_FW_ID_MAJOR, val);
fw_minor = FIELD_GET(VEND1_GLOBAL_FW_ID_MINOR, val);
val = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_RSVD_STAT1);
if (val < 0)
- return;
+ return val;
build_id = FIELD_GET(VEND1_GLOBAL_RSVD_STAT1_FW_BUILD_ID, val);
prov_id = FIELD_GET(VEND1_GLOBAL_RSVD_STAT1_PROV_ID, val);
- phydev_dbg(phydev, "FW %u.%u, Build %u, Provisioning %u\n",
- fw_major, fw_minor, build_id, prov_id);
+ val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, PMAPMD_FW_MISC_ID);
+ if (val < 0)
+ return val;
+
+ misc_id = val;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, PMAPMD_FW_MISC_VER);
+ if (val < 0)
+ return val;
+
+ misc_ver = val;
+
+ priv->fingerprint = AQR_FW_FINGERPRINT(fw_major, fw_minor, build_id,
+ prov_id, misc_id, misc_ver);
+
+ phydev_dbg(phydev, "FW %u.%u, Build %u, Provisioning %u, Misc ID %u, Version %u\n",
+ fw_major, fw_minor, build_id, prov_id, misc_id, misc_ver);
+
+ return 0;
}
static int aqr107_config_mdi(struct phy_device *phydev)
@@ -810,7 +770,7 @@ static int aqr107_config_mdi(struct phy_device *phydev)
mdi_conf | PMAPMD_RSVD_VEND_PROV_MDI_FORCE);
}
-static int aqr107_config_init(struct phy_device *phydev)
+static int aqr_gen1_config_init(struct phy_device *phydev)
{
struct aqr107_priv *priv = phydev->priv;
u32 led_idx;
@@ -822,6 +782,7 @@ static int aqr107_config_init(struct phy_device *phydev)
phydev->interface != PHY_INTERFACE_MODE_2500BASEX &&
phydev->interface != PHY_INTERFACE_MODE_XGMII &&
phydev->interface != PHY_INTERFACE_MODE_USXGMII &&
+ phydev->interface != PHY_INTERFACE_MODE_10G_QXGMII &&
phydev->interface != PHY_INTERFACE_MODE_10GKR &&
phydev->interface != PHY_INTERFACE_MODE_10GBASER &&
phydev->interface != PHY_INTERFACE_MODE_XAUI &&
@@ -832,8 +793,14 @@ static int aqr107_config_init(struct phy_device *phydev)
"Your devicetree is out of date, please update it. The AQR107 family doesn't support XGMII, maybe you mean USXGMII.\n");
ret = aqr_wait_reset_complete(phydev);
- if (!ret)
- aqr107_chip_info(phydev);
+ if (!ret) {
+ /* The PHY might work without a firmware image, so only build a
+ * fingerprint if the firmware was initialized.
+ */
+ ret = aqr_build_fingerprint(phydev);
+ if (ret)
+ return ret;
+ }
ret = aqr107_set_downshift(phydev, MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT);
if (ret)
@@ -859,20 +826,145 @@ static int aqr107_config_init(struct phy_device *phydev)
return 0;
}
-static int aqcs109_config_init(struct phy_device *phydev)
+/* Walk the media-speed configuration registers to determine which
+ * host-side serdes modes may be used by the PHY depending on the
+ * negotiated media speed.
+ */
+static int aqr_gen2_read_global_syscfg(struct phy_device *phydev)
+{
+ struct aqr107_priv *priv = phydev->priv;
+ unsigned int serdes_mode, rate_adapt;
+ phy_interface_t interface;
+ int i, val;
+
+ for (i = 0; i < AQR_NUM_GLOBAL_CFG; i++) {
+ struct aqr_global_syscfg *syscfg = &priv->global_cfg[i];
+
+ syscfg->speed = aqr_global_cfg_regs[i].speed;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+ aqr_global_cfg_regs[i].reg);
+ if (val < 0)
+ return val;
+
+ serdes_mode = FIELD_GET(VEND1_GLOBAL_CFG_SERDES_MODE, val);
+ rate_adapt = FIELD_GET(VEND1_GLOBAL_CFG_RATE_ADAPT, val);
+
+ switch (serdes_mode) {
+ case VEND1_GLOBAL_CFG_SERDES_MODE_XFI:
+ if (rate_adapt == VEND1_GLOBAL_CFG_RATE_ADAPT_USX)
+ interface = PHY_INTERFACE_MODE_USXGMII;
+ else
+ interface = PHY_INTERFACE_MODE_10GBASER;
+ break;
+
+ case VEND1_GLOBAL_CFG_SERDES_MODE_XFI5G:
+ interface = PHY_INTERFACE_MODE_5GBASER;
+ break;
+
+ case VEND1_GLOBAL_CFG_SERDES_MODE_OCSGMII:
+ interface = PHY_INTERFACE_MODE_2500BASEX;
+ break;
+
+ case VEND1_GLOBAL_CFG_SERDES_MODE_SGMII:
+ interface = PHY_INTERFACE_MODE_SGMII;
+ break;
+
+ default:
+ phydev_warn(phydev, "unrecognised serdes mode %u\n",
+ serdes_mode);
+ interface = PHY_INTERFACE_MODE_NA;
+ break;
+ }
+
+ syscfg->interface = aqr_translate_interface(phydev, interface);
+
+ switch (rate_adapt) {
+ case VEND1_GLOBAL_CFG_RATE_ADAPT_NONE:
+ syscfg->rate_adapt = AQR_RATE_ADAPT_NONE;
+ break;
+ case VEND1_GLOBAL_CFG_RATE_ADAPT_USX:
+ syscfg->rate_adapt = AQR_RATE_ADAPT_USX;
+ break;
+ case VEND1_GLOBAL_CFG_RATE_ADAPT_PAUSE:
+ syscfg->rate_adapt = AQR_RATE_ADAPT_PAUSE;
+ break;
+ default:
+ phydev_warn(phydev, "unrecognized rate adapt mode %u\n",
+ rate_adapt);
+ break;
+ }
+
+ phydev_dbg(phydev,
+ "Media speed %d uses host interface %s with %s\n",
+ syscfg->speed, phy_modes(syscfg->interface),
+ syscfg->rate_adapt == AQR_RATE_ADAPT_NONE ? "no rate adaptation" :
+ syscfg->rate_adapt == AQR_RATE_ADAPT_PAUSE ? "rate adaptation through flow control" :
+ syscfg->rate_adapt == AQR_RATE_ADAPT_USX ? "rate adaptation through symbol replication" :
+ "unrecognized rate adaptation type");
+ }
+
+ return 0;
+}
+
+static int aqr_gen2_fill_interface_modes(struct phy_device *phydev)
+{
+ unsigned long *possible = phydev->possible_interfaces;
+ struct aqr107_priv *priv = phydev->priv;
+ phy_interface_t interface;
+ int i, val, ret;
+
+ /* It's been observed on some models that - when coming out of suspend
+ * - the FW signals that the PHY is ready but the GLOBAL_CFG registers
+ * continue on returning zeroes for some time. Let's poll the 100M
+ * register until it returns a real value as both 113c and 115c support
+ * this mode.
+ */
+ if (priv->wait_on_global_cfg) {
+ ret = phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
+ VEND1_GLOBAL_CFG_100M, val,
+ val != 0, 1000, 100000, false);
+ if (ret)
+ return ret;
+ }
+
+ ret = aqr_gen2_read_global_syscfg(phydev);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < AQR_NUM_GLOBAL_CFG; i++) {
+ interface = priv->global_cfg[i].interface;
+ if (interface != PHY_INTERFACE_MODE_NA)
+ __set_bit(interface, possible);
+ }
+
+ return 0;
+}
+
+static int aqr_gen2_config_init(struct phy_device *phydev)
{
int ret;
+ ret = aqr_gen1_config_init(phydev);
+ if (ret)
+ return ret;
+
+ return aqr_gen2_fill_interface_modes(phydev);
+}
+
+static int aqr_gen3_config_init(struct phy_device *phydev)
+{
+ return aqr_gen2_config_init(phydev);
+}
+
+static int aqcs109_config_init(struct phy_device *phydev)
+{
/* Check that the PHY interface type is compatible */
if (phydev->interface != PHY_INTERFACE_MODE_SGMII &&
phydev->interface != PHY_INTERFACE_MODE_2500BASEX)
return -ENODEV;
- ret = aqr_wait_reset_complete(phydev);
- if (!ret)
- aqr107_chip_info(phydev);
-
- return aqr107_set_downshift(phydev, MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT);
+ return aqr_gen2_config_init(phydev);
}
static void aqr107_link_change_notify(struct phy_device *phydev)
@@ -920,7 +1012,7 @@ static void aqr107_link_change_notify(struct phy_device *phydev)
phydev_info(phydev, "Aquantia 1000Base-T2 mode active\n");
}
-static int aqr107_wait_processor_intensive_op(struct phy_device *phydev)
+static int aqr_gen1_wait_processor_intensive_op(struct phy_device *phydev)
{
int val, err;
@@ -944,17 +1036,16 @@ static int aqr107_wait_processor_intensive_op(struct phy_device *phydev)
return 0;
}
-static int aqr107_get_rate_matching(struct phy_device *phydev,
- phy_interface_t iface)
+static int aqr_gen2_get_rate_matching(struct phy_device *phydev,
+ phy_interface_t iface)
{
if (iface == PHY_INTERFACE_MODE_10GBASER ||
- iface == PHY_INTERFACE_MODE_2500BASEX ||
- iface == PHY_INTERFACE_MODE_NA)
+ iface == PHY_INTERFACE_MODE_2500BASEX)
return RATE_MATCH_PAUSE;
return RATE_MATCH_NONE;
}
-static int aqr107_suspend(struct phy_device *phydev)
+static int aqr_gen1_suspend(struct phy_device *phydev)
{
int err;
@@ -963,10 +1054,10 @@ static int aqr107_suspend(struct phy_device *phydev)
if (err)
return err;
- return aqr107_wait_processor_intensive_op(phydev);
+ return aqr_gen1_wait_processor_intensive_op(phydev);
}
-static int aqr107_resume(struct phy_device *phydev)
+static int aqr_gen1_resume(struct phy_device *phydev)
{
int err;
@@ -975,89 +1066,7 @@ static int aqr107_resume(struct phy_device *phydev)
if (err)
return err;
- return aqr107_wait_processor_intensive_op(phydev);
-}
-
-static const u16 aqr_global_cfg_regs[] = {
- VEND1_GLOBAL_CFG_10M,
- VEND1_GLOBAL_CFG_100M,
- VEND1_GLOBAL_CFG_1G,
- VEND1_GLOBAL_CFG_2_5G,
- VEND1_GLOBAL_CFG_5G,
- VEND1_GLOBAL_CFG_10G
-};
-
-static int aqr107_fill_interface_modes(struct phy_device *phydev)
-{
- unsigned long *possible = phydev->possible_interfaces;
- unsigned int serdes_mode, rate_adapt;
- phy_interface_t interface;
- int i, val;
-
- /* Walk the media-speed configuration registers to determine which
- * host-side serdes modes may be used by the PHY depending on the
- * negotiated media speed.
- */
- for (i = 0; i < ARRAY_SIZE(aqr_global_cfg_regs); i++) {
- val = phy_read_mmd(phydev, MDIO_MMD_VEND1,
- aqr_global_cfg_regs[i]);
- if (val < 0)
- return val;
-
- serdes_mode = FIELD_GET(VEND1_GLOBAL_CFG_SERDES_MODE, val);
- rate_adapt = FIELD_GET(VEND1_GLOBAL_CFG_RATE_ADAPT, val);
-
- switch (serdes_mode) {
- case VEND1_GLOBAL_CFG_SERDES_MODE_XFI:
- if (rate_adapt == VEND1_GLOBAL_CFG_RATE_ADAPT_USX)
- interface = PHY_INTERFACE_MODE_USXGMII;
- else
- interface = PHY_INTERFACE_MODE_10GBASER;
- break;
-
- case VEND1_GLOBAL_CFG_SERDES_MODE_XFI5G:
- interface = PHY_INTERFACE_MODE_5GBASER;
- break;
-
- case VEND1_GLOBAL_CFG_SERDES_MODE_OCSGMII:
- interface = PHY_INTERFACE_MODE_2500BASEX;
- break;
-
- case VEND1_GLOBAL_CFG_SERDES_MODE_SGMII:
- interface = PHY_INTERFACE_MODE_SGMII;
- break;
-
- default:
- phydev_warn(phydev, "unrecognised serdes mode %u\n",
- serdes_mode);
- interface = PHY_INTERFACE_MODE_NA;
- break;
- }
-
- if (interface != PHY_INTERFACE_MODE_NA)
- __set_bit(interface, possible);
- }
-
- return 0;
-}
-
-static int aqr113c_fill_interface_modes(struct phy_device *phydev)
-{
- int val, ret;
-
- /* It's been observed on some models that - when coming out of suspend
- * - the FW signals that the PHY is ready but the GLOBAL_CFG registers
- * continue on returning zeroes for some time. Let's poll the 100M
- * register until it returns a real value as both 113c and 115c support
- * this mode.
- */
- ret = phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
- VEND1_GLOBAL_CFG_100M, val, val != 0,
- 1000, 100000, false);
- if (ret)
- return ret;
-
- return aqr107_fill_interface_modes(phydev);
+ return aqr_gen1_wait_processor_intensive_op(phydev);
}
static int aqr115c_get_features(struct phy_device *phydev)
@@ -1085,11 +1094,14 @@ static int aqr111_get_features(struct phy_device *phydev)
return 0;
}
-static int aqr113c_config_init(struct phy_device *phydev)
+static int aqr_gen4_config_init(struct phy_device *phydev)
{
+ struct aqr107_priv *priv = phydev->priv;
int ret;
- ret = aqr107_config_init(phydev);
+ priv->wait_on_global_cfg = true;
+
+ ret = aqr_gen3_config_init(phydev);
if (ret < 0)
return ret;
@@ -1098,11 +1110,55 @@ static int aqr113c_config_init(struct phy_device *phydev)
if (ret)
return ret;
- ret = aqr107_wait_processor_intensive_op(phydev);
- if (ret)
- return ret;
+ return aqr_gen1_wait_processor_intensive_op(phydev);
+}
+
+static unsigned int aqr_gen2_inband_caps(struct phy_device *phydev,
+ phy_interface_t interface)
+{
+ if (interface == PHY_INTERFACE_MODE_SGMII ||
+ interface == PHY_INTERFACE_MODE_USXGMII ||
+ interface == PHY_INTERFACE_MODE_10G_QXGMII)
+ return LINK_INBAND_ENABLE | LINK_INBAND_DISABLE;
+
+ return 0;
+}
+
+static int aqr_gen2_config_inband(struct phy_device *phydev, unsigned int modes)
+{
+ struct aqr107_priv *priv = phydev->priv;
+
+ if (phydev->interface == PHY_INTERFACE_MODE_USXGMII ||
+ phydev->interface == PHY_INTERFACE_MODE_10G_QXGMII) {
+ u16 set = 0;
+
+ if (modes == LINK_INBAND_ENABLE)
+ set = MDIO_PHYXS_VEND_PROV2_USX_AN;
+
+ return phy_modify_mmd(phydev, MDIO_MMD_PHYXS,
+ MDIO_PHYXS_VEND_PROV2,
+ MDIO_PHYXS_VEND_PROV2_USX_AN, set);
+ }
+
+ for (int i = 0; i < AQR_NUM_GLOBAL_CFG; i++) {
+ struct aqr_global_syscfg *syscfg = &priv->global_cfg[i];
+ u16 set = 0;
+ int err;
+
+ if (syscfg->interface != phydev->interface)
+ continue;
+
+ if (modes == LINK_INBAND_ENABLE)
+ set = VEND1_GLOBAL_CFG_AUTONEG_ENA;
+
+ err = phy_modify_mmd(phydev, MDIO_MMD_VEND1,
+ aqr_global_cfg_regs[i].reg,
+ VEND1_GLOBAL_CFG_AUTONEG_ENA, set);
+ if (err)
+ return err;
+ }
- return aqr113c_fill_interface_modes(phydev);
+ return 0;
}
static int aqr107_probe(struct phy_device *phydev)
@@ -1144,13 +1200,13 @@ static struct phy_driver aqr_driver[] = {
.name = "Aquantia AQR105",
.get_features = aqr105_get_features,
.probe = aqr107_probe,
- .config_init = aqr107_config_init,
+ .config_init = aqr_gen1_config_init,
.config_aneg = aqr105_config_aneg,
.config_intr = aqr_config_intr,
.handle_interrupt = aqr_handle_interrupt,
- .read_status = aqr105_read_status,
- .suspend = aqr107_suspend,
- .resume = aqr107_resume,
+ .read_status = aqr_gen1_read_status,
+ .suspend = aqr_gen1_suspend,
+ .resume = aqr_gen1_resume,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_AQR106),
@@ -1164,16 +1220,16 @@ static struct phy_driver aqr_driver[] = {
PHY_ID_MATCH_MODEL(PHY_ID_AQR107),
.name = "Aquantia AQR107",
.probe = aqr107_probe,
- .get_rate_matching = aqr107_get_rate_matching,
- .config_init = aqr107_config_init,
+ .get_rate_matching = aqr_gen2_get_rate_matching,
+ .config_init = aqr_gen2_config_init,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
.handle_interrupt = aqr_handle_interrupt,
- .read_status = aqr107_read_status,
+ .read_status = aqr_gen2_read_status,
.get_tunable = aqr107_get_tunable,
.set_tunable = aqr107_set_tunable,
- .suspend = aqr107_suspend,
- .resume = aqr107_resume,
+ .suspend = aqr_gen1_suspend,
+ .resume = aqr_gen1_resume,
.get_sset_count = aqr107_get_sset_count,
.get_strings = aqr107_get_strings,
.get_stats = aqr107_get_stats,
@@ -1183,21 +1239,23 @@ static struct phy_driver aqr_driver[] = {
.led_hw_control_set = aqr_phy_led_hw_control_set,
.led_hw_control_get = aqr_phy_led_hw_control_get,
.led_polarity_set = aqr_phy_led_polarity_set,
+ .inband_caps = aqr_gen2_inband_caps,
+ .config_inband = aqr_gen2_config_inband,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_AQCS109),
.name = "Aquantia AQCS109",
.probe = aqr107_probe,
- .get_rate_matching = aqr107_get_rate_matching,
+ .get_rate_matching = aqr_gen2_get_rate_matching,
.config_init = aqcs109_config_init,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
.handle_interrupt = aqr_handle_interrupt,
- .read_status = aqr107_read_status,
+ .read_status = aqr_gen2_read_status,
.get_tunable = aqr107_get_tunable,
.set_tunable = aqr107_set_tunable,
- .suspend = aqr107_suspend,
- .resume = aqr107_resume,
+ .suspend = aqr_gen1_suspend,
+ .resume = aqr_gen1_resume,
.get_sset_count = aqr107_get_sset_count,
.get_strings = aqr107_get_strings,
.get_stats = aqr107_get_stats,
@@ -1208,21 +1266,23 @@ static struct phy_driver aqr_driver[] = {
.led_hw_control_set = aqr_phy_led_hw_control_set,
.led_hw_control_get = aqr_phy_led_hw_control_get,
.led_polarity_set = aqr_phy_led_polarity_set,
+ .inband_caps = aqr_gen2_inband_caps,
+ .config_inband = aqr_gen2_config_inband,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_AQR111),
.name = "Aquantia AQR111",
.probe = aqr107_probe,
- .get_rate_matching = aqr107_get_rate_matching,
- .config_init = aqr107_config_init,
+ .get_rate_matching = aqr_gen2_get_rate_matching,
+ .config_init = aqr_gen3_config_init,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
.handle_interrupt = aqr_handle_interrupt,
- .read_status = aqr107_read_status,
+ .read_status = aqr_gen2_read_status,
.get_tunable = aqr107_get_tunable,
.set_tunable = aqr107_set_tunable,
- .suspend = aqr107_suspend,
- .resume = aqr107_resume,
+ .suspend = aqr_gen1_suspend,
+ .resume = aqr_gen1_resume,
.get_sset_count = aqr107_get_sset_count,
.get_strings = aqr107_get_strings,
.get_stats = aqr107_get_stats,
@@ -1233,21 +1293,23 @@ static struct phy_driver aqr_driver[] = {
.led_hw_control_set = aqr_phy_led_hw_control_set,
.led_hw_control_get = aqr_phy_led_hw_control_get,
.led_polarity_set = aqr_phy_led_polarity_set,
+ .inband_caps = aqr_gen2_inband_caps,
+ .config_inband = aqr_gen2_config_inband,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_AQR111B0),
.name = "Aquantia AQR111B0",
.probe = aqr107_probe,
- .get_rate_matching = aqr107_get_rate_matching,
- .config_init = aqr107_config_init,
+ .get_rate_matching = aqr_gen2_get_rate_matching,
+ .config_init = aqr_gen3_config_init,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
.handle_interrupt = aqr_handle_interrupt,
- .read_status = aqr107_read_status,
+ .read_status = aqr_gen2_read_status,
.get_tunable = aqr107_get_tunable,
.set_tunable = aqr107_set_tunable,
- .suspend = aqr107_suspend,
- .resume = aqr107_resume,
+ .suspend = aqr_gen1_suspend,
+ .resume = aqr_gen1_resume,
.get_sset_count = aqr107_get_sset_count,
.get_strings = aqr107_get_strings,
.get_stats = aqr107_get_stats,
@@ -1258,6 +1320,8 @@ static struct phy_driver aqr_driver[] = {
.led_hw_control_set = aqr_phy_led_hw_control_set,
.led_hw_control_get = aqr_phy_led_hw_control_get,
.led_polarity_set = aqr_phy_led_polarity_set,
+ .inband_caps = aqr_gen2_inband_caps,
+ .config_inband = aqr_gen2_config_inband,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_AQR405),
@@ -1266,20 +1330,23 @@ static struct phy_driver aqr_driver[] = {
.config_intr = aqr_config_intr,
.handle_interrupt = aqr_handle_interrupt,
.read_status = aqr_read_status,
+ .inband_caps = aqr_gen2_inband_caps,
+ .config_inband = aqr_gen2_config_inband,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_AQR112),
.name = "Aquantia AQR112",
.probe = aqr107_probe,
+ .config_init = aqr_gen3_config_init,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
.handle_interrupt = aqr_handle_interrupt,
.get_tunable = aqr107_get_tunable,
.set_tunable = aqr107_set_tunable,
- .suspend = aqr107_suspend,
- .resume = aqr107_resume,
- .read_status = aqr107_read_status,
- .get_rate_matching = aqr107_get_rate_matching,
+ .suspend = aqr_gen1_suspend,
+ .resume = aqr_gen1_resume,
+ .read_status = aqr_gen2_read_status,
+ .get_rate_matching = aqr_gen2_get_rate_matching,
.get_sset_count = aqr107_get_sset_count,
.get_strings = aqr107_get_strings,
.get_stats = aqr107_get_stats,
@@ -1289,39 +1356,65 @@ static struct phy_driver aqr_driver[] = {
.led_hw_control_set = aqr_phy_led_hw_control_set,
.led_hw_control_get = aqr_phy_led_hw_control_get,
.led_polarity_set = aqr_phy_led_polarity_set,
+ .inband_caps = aqr_gen2_inband_caps,
+ .config_inband = aqr_gen2_config_inband,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_AQR412),
.name = "Aquantia AQR412",
.probe = aqr107_probe,
+ .config_init = aqr_gen3_config_init,
+ .config_aneg = aqr_config_aneg,
+ .config_intr = aqr_config_intr,
+ .handle_interrupt = aqr_handle_interrupt,
+ .get_tunable = aqr107_get_tunable,
+ .set_tunable = aqr107_set_tunable,
+ .suspend = aqr_gen1_suspend,
+ .resume = aqr_gen1_resume,
+ .read_status = aqr_gen2_read_status,
+ .get_rate_matching = aqr_gen2_get_rate_matching,
+ .get_sset_count = aqr107_get_sset_count,
+ .get_strings = aqr107_get_strings,
+ .get_stats = aqr107_get_stats,
+ .link_change_notify = aqr107_link_change_notify,
+ .inband_caps = aqr_gen2_inband_caps,
+ .config_inband = aqr_gen2_config_inband,
+},
+{
+ PHY_ID_MATCH_MODEL(PHY_ID_AQR412C),
+ .name = "Aquantia AQR412C",
+ .probe = aqr107_probe,
+ .config_init = aqr_gen3_config_init,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
.handle_interrupt = aqr_handle_interrupt,
.get_tunable = aqr107_get_tunable,
.set_tunable = aqr107_set_tunable,
- .suspend = aqr107_suspend,
- .resume = aqr107_resume,
- .read_status = aqr107_read_status,
- .get_rate_matching = aqr107_get_rate_matching,
+ .suspend = aqr_gen1_suspend,
+ .resume = aqr_gen1_resume,
+ .read_status = aqr_gen2_read_status,
+ .get_rate_matching = aqr_gen2_get_rate_matching,
.get_sset_count = aqr107_get_sset_count,
.get_strings = aqr107_get_strings,
.get_stats = aqr107_get_stats,
.link_change_notify = aqr107_link_change_notify,
+ .inband_caps = aqr_gen2_inband_caps,
+ .config_inband = aqr_gen2_config_inband,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_AQR113),
.name = "Aquantia AQR113",
.probe = aqr107_probe,
- .get_rate_matching = aqr107_get_rate_matching,
- .config_init = aqr113c_config_init,
+ .get_rate_matching = aqr_gen2_get_rate_matching,
+ .config_init = aqr_gen4_config_init,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
.handle_interrupt = aqr_handle_interrupt,
- .read_status = aqr107_read_status,
+ .read_status = aqr_gen2_read_status,
.get_tunable = aqr107_get_tunable,
.set_tunable = aqr107_set_tunable,
- .suspend = aqr107_suspend,
- .resume = aqr107_resume,
+ .suspend = aqr_gen1_suspend,
+ .resume = aqr_gen1_resume,
.get_sset_count = aqr107_get_sset_count,
.get_strings = aqr107_get_strings,
.get_stats = aqr107_get_stats,
@@ -1331,21 +1424,23 @@ static struct phy_driver aqr_driver[] = {
.led_hw_control_set = aqr_phy_led_hw_control_set,
.led_hw_control_get = aqr_phy_led_hw_control_get,
.led_polarity_set = aqr_phy_led_polarity_set,
+ .inband_caps = aqr_gen2_inband_caps,
+ .config_inband = aqr_gen2_config_inband,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_AQR113C),
.name = "Aquantia AQR113C",
.probe = aqr107_probe,
- .get_rate_matching = aqr107_get_rate_matching,
- .config_init = aqr113c_config_init,
+ .get_rate_matching = aqr_gen2_get_rate_matching,
+ .config_init = aqr_gen4_config_init,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
.handle_interrupt = aqr_handle_interrupt,
- .read_status = aqr107_read_status,
+ .read_status = aqr_gen2_read_status,
.get_tunable = aqr107_get_tunable,
.set_tunable = aqr107_set_tunable,
- .suspend = aqr107_suspend,
- .resume = aqr107_resume,
+ .suspend = aqr_gen1_suspend,
+ .resume = aqr_gen1_resume,
.get_sset_count = aqr107_get_sset_count,
.get_strings = aqr107_get_strings,
.get_stats = aqr107_get_stats,
@@ -1355,21 +1450,23 @@ static struct phy_driver aqr_driver[] = {
.led_hw_control_set = aqr_phy_led_hw_control_set,
.led_hw_control_get = aqr_phy_led_hw_control_get,
.led_polarity_set = aqr_phy_led_polarity_set,
+ .inband_caps = aqr_gen2_inband_caps,
+ .config_inband = aqr_gen2_config_inband,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_AQR114C),
.name = "Aquantia AQR114C",
.probe = aqr107_probe,
- .get_rate_matching = aqr107_get_rate_matching,
- .config_init = aqr107_config_init,
+ .get_rate_matching = aqr_gen2_get_rate_matching,
+ .config_init = aqr_gen4_config_init,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
.handle_interrupt = aqr_handle_interrupt,
- .read_status = aqr107_read_status,
+ .read_status = aqr_gen2_read_status,
.get_tunable = aqr107_get_tunable,
.set_tunable = aqr107_set_tunable,
- .suspend = aqr107_suspend,
- .resume = aqr107_resume,
+ .suspend = aqr_gen1_suspend,
+ .resume = aqr_gen1_resume,
.get_sset_count = aqr107_get_sset_count,
.get_strings = aqr107_get_strings,
.get_stats = aqr107_get_stats,
@@ -1380,21 +1477,50 @@ static struct phy_driver aqr_driver[] = {
.led_hw_control_set = aqr_phy_led_hw_control_set,
.led_hw_control_get = aqr_phy_led_hw_control_get,
.led_polarity_set = aqr_phy_led_polarity_set,
+ .inband_caps = aqr_gen2_inband_caps,
+ .config_inband = aqr_gen2_config_inband,
+},
+{
+ PHY_ID_MATCH_MODEL(PHY_ID_AQR115),
+ .name = "Aquantia AQR115",
+ .probe = aqr107_probe,
+ .get_rate_matching = aqr_gen2_get_rate_matching,
+ .config_init = aqr_gen4_config_init,
+ .config_aneg = aqr_config_aneg,
+ .config_intr = aqr_config_intr,
+ .handle_interrupt = aqr_handle_interrupt,
+ .read_status = aqr_gen2_read_status,
+ .get_tunable = aqr107_get_tunable,
+ .set_tunable = aqr107_set_tunable,
+ .suspend = aqr_gen1_suspend,
+ .resume = aqr_gen1_resume,
+ .get_sset_count = aqr107_get_sset_count,
+ .get_strings = aqr107_get_strings,
+ .get_stats = aqr107_get_stats,
+ .get_features = aqr115c_get_features,
+ .link_change_notify = aqr107_link_change_notify,
+ .led_brightness_set = aqr_phy_led_brightness_set,
+ .led_hw_is_supported = aqr_phy_led_hw_is_supported,
+ .led_hw_control_set = aqr_phy_led_hw_control_set,
+ .led_hw_control_get = aqr_phy_led_hw_control_get,
+ .led_polarity_set = aqr_phy_led_polarity_set,
+ .inband_caps = aqr_gen2_inband_caps,
+ .config_inband = aqr_gen2_config_inband,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_AQR115C),
.name = "Aquantia AQR115C",
.probe = aqr107_probe,
- .get_rate_matching = aqr107_get_rate_matching,
- .config_init = aqr113c_config_init,
+ .get_rate_matching = aqr_gen2_get_rate_matching,
+ .config_init = aqr_gen4_config_init,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
.handle_interrupt = aqr_handle_interrupt,
- .read_status = aqr107_read_status,
+ .read_status = aqr_gen2_read_status,
.get_tunable = aqr107_get_tunable,
.set_tunable = aqr107_set_tunable,
- .suspend = aqr107_suspend,
- .resume = aqr107_resume,
+ .suspend = aqr_gen1_suspend,
+ .resume = aqr_gen1_resume,
.get_sset_count = aqr107_get_sset_count,
.get_strings = aqr107_get_strings,
.get_stats = aqr107_get_stats,
@@ -1405,21 +1531,23 @@ static struct phy_driver aqr_driver[] = {
.led_hw_control_set = aqr_phy_led_hw_control_set,
.led_hw_control_get = aqr_phy_led_hw_control_get,
.led_polarity_set = aqr_phy_led_polarity_set,
+ .inband_caps = aqr_gen2_inband_caps,
+ .config_inband = aqr_gen2_config_inband,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_AQR813),
.name = "Aquantia AQR813",
.probe = aqr107_probe,
- .get_rate_matching = aqr107_get_rate_matching,
- .config_init = aqr107_config_init,
+ .get_rate_matching = aqr_gen2_get_rate_matching,
+ .config_init = aqr_gen4_config_init,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
.handle_interrupt = aqr_handle_interrupt,
- .read_status = aqr107_read_status,
+ .read_status = aqr_gen2_read_status,
.get_tunable = aqr107_get_tunable,
.set_tunable = aqr107_set_tunable,
- .suspend = aqr107_suspend,
- .resume = aqr107_resume,
+ .suspend = aqr_gen1_suspend,
+ .resume = aqr_gen1_resume,
.get_sset_count = aqr107_get_sset_count,
.get_strings = aqr107_get_strings,
.get_stats = aqr107_get_stats,
@@ -1429,6 +1557,8 @@ static struct phy_driver aqr_driver[] = {
.led_hw_control_set = aqr_phy_led_hw_control_set,
.led_hw_control_get = aqr_phy_led_hw_control_get,
.led_polarity_set = aqr_phy_led_polarity_set,
+ .inband_caps = aqr_gen2_inband_caps,
+ .config_inband = aqr_gen2_config_inband,
},
};
@@ -1446,9 +1576,11 @@ static const struct mdio_device_id __maybe_unused aqr_tbl[] = {
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR111B0) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR112) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR412) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_AQR412C) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR113) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR113C) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR114C) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_AQR115) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR115C) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR813) },
{ }
diff --git a/drivers/net/phy/as21xxx.c b/drivers/net/phy/as21xxx.c
index 92697f43087d..005277360656 100644
--- a/drivers/net/phy/as21xxx.c
+++ b/drivers/net/phy/as21xxx.c
@@ -884,11 +884,12 @@ static int as21xxx_match_phy_device(struct phy_device *phydev,
u32 phy_id;
int ret;
- /* Skip PHY that are not AS21xxx or already have firmware loaded */
- if (phydev->c45_ids.device_ids[MDIO_MMD_PCS] != PHY_ID_AS21XXX)
+ /* Skip PHY that are not AS21xxx */
+ if (!phy_id_compare_vendor(phydev->c45_ids.device_ids[MDIO_MMD_PCS],
+ PHY_VENDOR_AEONSEMI))
return genphy_match_phy_device(phydev, phydrv);
- /* Read PHY ID to handle firmware just loaded */
+ /* Read PHY ID to handle firmware loaded or HW reset */
ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MII_PHYSID1);
if (ret < 0)
return ret;
diff --git a/drivers/net/phy/ax88796b.c b/drivers/net/phy/ax88796b.c
index 694df1401aa2..f20ddf649149 100644
--- a/drivers/net/phy/ax88796b.c
+++ b/drivers/net/phy/ax88796b.c
@@ -112,9 +112,8 @@ static struct phy_driver asix_driver[] = {
.resume = genphy_resume,
.soft_reset = asix_soft_reset,
}, {
- .phy_id = PHY_ID_ASIX_AX88796B,
+ PHY_ID_MATCH_MODEL(PHY_ID_ASIX_AX88796B),
.name = "Asix Electronics AX88796B",
- .phy_id_mask = 0xfffffff0,
/* PHY_BASIC_FEATURES */
.soft_reset = asix_soft_reset,
} };
@@ -124,7 +123,7 @@ module_phy_driver(asix_driver);
static const struct mdio_device_id __maybe_unused asix_tbl[] = {
{ PHY_ID_MATCH_EXACT(PHY_ID_ASIX_AX88772A) },
{ PHY_ID_MATCH_EXACT(PHY_ID_ASIX_AX88772C) },
- { PHY_ID_ASIX_AX88796B, 0xfffffff0 },
+ { PHY_ID_MATCH_MODEL(PHY_ID_ASIX_AX88796B) },
{ }
};
diff --git a/drivers/net/phy/bcm-phy-ptp.c b/drivers/net/phy/bcm-phy-ptp.c
index eba8b5fb1365..d3501f8487d9 100644
--- a/drivers/net/phy/bcm-phy-ptp.c
+++ b/drivers/net/phy/bcm-phy-ptp.c
@@ -597,10 +597,6 @@ static int bcm_ptp_perout_locked(struct bcm_ptp_private *priv,
period = BCM_MAX_PERIOD_8NS; /* write nonzero value */
- /* Reject unsupported flags */
- if (req->flags & ~PTP_PEROUT_DUTY_CYCLE)
- return -EOPNOTSUPP;
-
if (req->flags & PTP_PEROUT_DUTY_CYCLE)
pulse = ktime_to_ns(ktime_set(req->on.sec, req->on.nsec));
else
@@ -741,6 +737,8 @@ static const struct ptp_clock_info bcm_ptp_clock_info = {
.n_pins = 1,
.n_per_out = 1,
.n_ext_ts = 1,
+ .supported_perout_flags = PTP_PEROUT_DUTY_CYCLE,
+ .supported_extts_flags = PTP_STRICT_FLAGS | PTP_RISING_EDGE,
};
static void bcm_ptp_txtstamp(struct mii_timestamper *mii_ts,
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index a60e58ef90c4..3459a0e9d8b9 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -23,9 +23,6 @@
#include <linux/irq.h>
#include <linux/gpio/consumer.h>
-#define BRCM_PHY_MODEL(phydev) \
- ((phydev)->drv->phy_id & (phydev)->drv->phy_id_mask)
-
#define BRCM_PHY_REV(phydev) \
((phydev)->drv->phy_id & ~((phydev)->drv->phy_id_mask))
@@ -249,8 +246,8 @@ static int bcm54xx_phydsp_config(struct phy_device *phydev)
if (err < 0)
return err;
- if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM50610 ||
- BRCM_PHY_MODEL(phydev) == PHY_ID_BCM50610M) {
+ if (phy_id_compare_model(phydev->drv->phy_id, PHY_ID_BCM50610) ||
+ phy_id_compare_model(phydev->drv->phy_id, PHY_ID_BCM50610M)) {
/* Clear bit 9 to fix a phy interop issue. */
err = bcm_phy_write_exp(phydev, MII_BCM54XX_EXP_EXP08,
MII_BCM54XX_EXP_EXP08_RJCT_2MHZ);
@@ -264,7 +261,7 @@ static int bcm54xx_phydsp_config(struct phy_device *phydev)
}
}
- if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM57780) {
+ if (phy_id_compare_model(phydev->drv->phy_id, PHY_ID_BCM57780)) {
int val;
val = bcm_phy_read_exp(phydev, MII_BCM54XX_EXP_EXP75);
@@ -292,12 +289,12 @@ static void bcm54xx_adjust_rxrefclk(struct phy_device *phydev)
bool clk125en = true;
/* Abort if we are using an untested phy. */
- if (BRCM_PHY_MODEL(phydev) != PHY_ID_BCM57780 &&
- BRCM_PHY_MODEL(phydev) != PHY_ID_BCM50610 &&
- BRCM_PHY_MODEL(phydev) != PHY_ID_BCM50610M &&
- BRCM_PHY_MODEL(phydev) != PHY_ID_BCM54210E &&
- BRCM_PHY_MODEL(phydev) != PHY_ID_BCM54810 &&
- BRCM_PHY_MODEL(phydev) != PHY_ID_BCM54811)
+ if (!(phy_id_compare_model(phydev->drv->phy_id, PHY_ID_BCM57780) ||
+ phy_id_compare_model(phydev->drv->phy_id, PHY_ID_BCM50610) ||
+ phy_id_compare_model(phydev->drv->phy_id, PHY_ID_BCM50610M) ||
+ phy_id_compare_model(phydev->drv->phy_id, PHY_ID_BCM54210E) ||
+ phy_id_compare_model(phydev->drv->phy_id, PHY_ID_BCM54810) ||
+ phy_id_compare_model(phydev->drv->phy_id, PHY_ID_BCM54811)))
return;
val = bcm_phy_read_shadow(phydev, BCM54XX_SHD_SCR3);
@@ -306,8 +303,8 @@ static void bcm54xx_adjust_rxrefclk(struct phy_device *phydev)
orig = val;
- if ((BRCM_PHY_MODEL(phydev) == PHY_ID_BCM50610 ||
- BRCM_PHY_MODEL(phydev) == PHY_ID_BCM50610M) &&
+ if ((phy_id_compare_model(phydev->drv->phy_id, PHY_ID_BCM50610) ||
+ phy_id_compare_model(phydev->drv->phy_id, PHY_ID_BCM50610M)) &&
BRCM_PHY_REV(phydev) >= 0x3) {
/*
* Here, bit 0 _disables_ CLK125 when set.
@@ -316,7 +313,8 @@ static void bcm54xx_adjust_rxrefclk(struct phy_device *phydev)
clk125en = false;
} else {
if (phydev->dev_flags & PHY_BRCM_RX_REFCLK_UNUSED) {
- if (BRCM_PHY_MODEL(phydev) != PHY_ID_BCM54811) {
+ if (!phy_id_compare_model(phydev->drv->phy_id,
+ PHY_ID_BCM54811)) {
/* Here, bit 0 _enables_ CLK125 when set */
val &= ~BCM54XX_SHD_SCR3_DEF_CLK125;
}
@@ -330,9 +328,9 @@ static void bcm54xx_adjust_rxrefclk(struct phy_device *phydev)
val |= BCM54XX_SHD_SCR3_DLLAPD_DIS;
if (phydev->dev_flags & PHY_BRCM_DIS_TXCRXC_NOENRGY) {
- if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54210E ||
- BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54810 ||
- BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54811)
+ if (phy_id_compare_model(phydev->drv->phy_id, PHY_ID_BCM54210E) ||
+ phy_id_compare_model(phydev->drv->phy_id, PHY_ID_BCM54810) ||
+ phy_id_compare_model(phydev->drv->phy_id, PHY_ID_BCM54811))
val |= BCM54XX_SHD_SCR3_RXCTXC_DIS;
else
val |= BCM54XX_SHD_SCR3_TRDDAPD;
@@ -461,14 +459,14 @@ static int bcm54xx_config_init(struct phy_device *phydev)
if (err < 0)
return err;
- if ((BRCM_PHY_MODEL(phydev) == PHY_ID_BCM50610 ||
- BRCM_PHY_MODEL(phydev) == PHY_ID_BCM50610M) &&
+ if ((phy_id_compare_model(phydev->drv->phy_id, PHY_ID_BCM50610) ||
+ phy_id_compare_model(phydev->drv->phy_id, PHY_ID_BCM50610M)) &&
(phydev->dev_flags & PHY_BRCM_CLEAR_RGMII_MODE))
bcm_phy_write_shadow(phydev, BCM54XX_SHD_RGMII_MODE, 0);
bcm54xx_adjust_rxrefclk(phydev);
- switch (BRCM_PHY_MODEL(phydev)) {
+ switch (phydev->drv->phy_id & PHY_ID_MATCH_MODEL_MASK) {
case PHY_ID_BCM50610:
case PHY_ID_BCM50610M:
err = bcm54xx_config_clock_delay(phydev);
@@ -693,7 +691,7 @@ static int bcm5481x_read_abilities(struct phy_device *phydev)
* So we must read the bcm54811 as unable to auto-negotiate
* in BroadR-Reach mode.
*/
- if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54811)
+ if (phy_id_compare_model(phydev->drv->phy_id, PHY_ID_BCM54811))
aneg = 0;
else
aneg = val & LRESR_LDSABILITY;
@@ -1438,8 +1436,7 @@ static int bcm54811_read_status(struct phy_device *phydev)
static struct phy_driver broadcom_drivers[] = {
{
- .phy_id = PHY_ID_BCM5411,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_BCM5411),
.name = "Broadcom BCM5411",
/* PHY_GBIT_FEATURES */
.get_sset_count = bcm_phy_get_sset_count,
@@ -1451,8 +1448,7 @@ static struct phy_driver broadcom_drivers[] = {
.handle_interrupt = bcm_phy_handle_interrupt,
.link_change_notify = bcm54xx_link_change_notify,
}, {
- .phy_id = PHY_ID_BCM5421,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_BCM5421),
.name = "Broadcom BCM5421",
/* PHY_GBIT_FEATURES */
.get_sset_count = bcm_phy_get_sset_count,
@@ -1464,8 +1460,7 @@ static struct phy_driver broadcom_drivers[] = {
.handle_interrupt = bcm_phy_handle_interrupt,
.link_change_notify = bcm54xx_link_change_notify,
}, {
- .phy_id = PHY_ID_BCM54210E,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_BCM54210E),
.name = "Broadcom BCM54210E",
/* PHY_GBIT_FEATURES */
.flags = PHY_ALWAYS_CALL_SUSPEND,
@@ -1483,8 +1478,7 @@ static struct phy_driver broadcom_drivers[] = {
.set_wol = bcm54xx_phy_set_wol,
.led_brightness_set = bcm_phy_led_brightness_set,
}, {
- .phy_id = PHY_ID_BCM5461,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_BCM5461),
.name = "Broadcom BCM5461",
/* PHY_GBIT_FEATURES */
.get_sset_count = bcm_phy_get_sset_count,
@@ -1497,8 +1491,7 @@ static struct phy_driver broadcom_drivers[] = {
.link_change_notify = bcm54xx_link_change_notify,
.led_brightness_set = bcm_phy_led_brightness_set,
}, {
- .phy_id = PHY_ID_BCM54612E,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_BCM54612E),
.name = "Broadcom BCM54612E",
/* PHY_GBIT_FEATURES */
.get_sset_count = bcm_phy_get_sset_count,
@@ -1513,8 +1506,7 @@ static struct phy_driver broadcom_drivers[] = {
.suspend = bcm54xx_suspend,
.resume = bcm54xx_resume,
}, {
- .phy_id = PHY_ID_BCM54616S,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_BCM54616S),
.name = "Broadcom BCM54616S",
/* PHY_GBIT_FEATURES */
.soft_reset = genphy_soft_reset,
@@ -1527,8 +1519,7 @@ static struct phy_driver broadcom_drivers[] = {
.link_change_notify = bcm54xx_link_change_notify,
.led_brightness_set = bcm_phy_led_brightness_set,
}, {
- .phy_id = PHY_ID_BCM5464,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_BCM5464),
.name = "Broadcom BCM5464",
/* PHY_GBIT_FEATURES */
.get_sset_count = bcm_phy_get_sset_count,
@@ -1543,8 +1534,7 @@ static struct phy_driver broadcom_drivers[] = {
.link_change_notify = bcm54xx_link_change_notify,
.led_brightness_set = bcm_phy_led_brightness_set,
}, {
- .phy_id = PHY_ID_BCM5481,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_BCM5481),
.name = "Broadcom BCM5481",
/* PHY_GBIT_FEATURES */
.get_sset_count = bcm_phy_get_sset_count,
@@ -1558,8 +1548,7 @@ static struct phy_driver broadcom_drivers[] = {
.link_change_notify = bcm54xx_link_change_notify,
.led_brightness_set = bcm_phy_led_brightness_set,
}, {
- .phy_id = PHY_ID_BCM54810,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_BCM54810),
.name = "Broadcom BCM54810",
/* PHY_GBIT_FEATURES */
.get_sset_count = bcm_phy_get_sset_count,
@@ -1577,8 +1566,7 @@ static struct phy_driver broadcom_drivers[] = {
.link_change_notify = bcm54xx_link_change_notify,
.led_brightness_set = bcm_phy_led_brightness_set,
}, {
- .phy_id = PHY_ID_BCM54811,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_BCM54811),
.name = "Broadcom BCM54811",
/* PHY_GBIT_FEATURES */
.get_sset_count = bcm_phy_get_sset_count,
@@ -1596,8 +1584,7 @@ static struct phy_driver broadcom_drivers[] = {
.link_change_notify = bcm54xx_link_change_notify,
.led_brightness_set = bcm_phy_led_brightness_set,
}, {
- .phy_id = PHY_ID_BCM5482,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_BCM5482),
.name = "Broadcom BCM5482",
/* PHY_GBIT_FEATURES */
.get_sset_count = bcm_phy_get_sset_count,
@@ -1610,8 +1597,7 @@ static struct phy_driver broadcom_drivers[] = {
.link_change_notify = bcm54xx_link_change_notify,
.led_brightness_set = bcm_phy_led_brightness_set,
}, {
- .phy_id = PHY_ID_BCM50610,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_BCM50610),
.name = "Broadcom BCM50610",
/* PHY_GBIT_FEATURES */
.get_sset_count = bcm_phy_get_sset_count,
@@ -1626,8 +1612,7 @@ static struct phy_driver broadcom_drivers[] = {
.resume = bcm54xx_resume,
.led_brightness_set = bcm_phy_led_brightness_set,
}, {
- .phy_id = PHY_ID_BCM50610M,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_BCM50610M),
.name = "Broadcom BCM50610M",
/* PHY_GBIT_FEATURES */
.get_sset_count = bcm_phy_get_sset_count,
@@ -1642,8 +1627,7 @@ static struct phy_driver broadcom_drivers[] = {
.resume = bcm54xx_resume,
.led_brightness_set = bcm_phy_led_brightness_set,
}, {
- .phy_id = PHY_ID_BCM57780,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_BCM57780),
.name = "Broadcom BCM57780",
/* PHY_GBIT_FEATURES */
.get_sset_count = bcm_phy_get_sset_count,
@@ -1656,8 +1640,7 @@ static struct phy_driver broadcom_drivers[] = {
.link_change_notify = bcm54xx_link_change_notify,
.led_brightness_set = bcm_phy_led_brightness_set,
}, {
- .phy_id = PHY_ID_BCMAC131,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_BCMAC131),
.name = "Broadcom BCMAC131",
/* PHY_BASIC_FEATURES */
.config_init = brcm_fet_config_init,
@@ -1666,8 +1649,7 @@ static struct phy_driver broadcom_drivers[] = {
.suspend = brcm_fet_suspend,
.resume = brcm_fet_config_init,
}, {
- .phy_id = PHY_ID_BCM5241,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_BCM5241),
.name = "Broadcom BCM5241",
/* PHY_BASIC_FEATURES */
.config_init = brcm_fet_config_init,
@@ -1676,8 +1658,7 @@ static struct phy_driver broadcom_drivers[] = {
.suspend = brcm_fet_suspend,
.resume = brcm_fet_config_init,
}, {
- .phy_id = PHY_ID_BCM5221,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_BCM5221),
.name = "Broadcom BCM5221",
/* PHY_BASIC_FEATURES */
.config_init = brcm_fet_config_init,
@@ -1688,8 +1669,7 @@ static struct phy_driver broadcom_drivers[] = {
.config_aneg = bcm5221_config_aneg,
.read_status = bcm5221_read_status,
}, {
- .phy_id = PHY_ID_BCM5395,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_BCM5395),
.name = "Broadcom BCM5395",
.flags = PHY_IS_INTERNAL,
/* PHY_GBIT_FEATURES */
@@ -1700,8 +1680,7 @@ static struct phy_driver broadcom_drivers[] = {
.link_change_notify = bcm54xx_link_change_notify,
.led_brightness_set = bcm_phy_led_brightness_set,
}, {
- .phy_id = PHY_ID_BCM53125,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_BCM53125),
.name = "Broadcom BCM53125",
.flags = PHY_IS_INTERNAL,
/* PHY_GBIT_FEATURES */
@@ -1715,8 +1694,7 @@ static struct phy_driver broadcom_drivers[] = {
.link_change_notify = bcm54xx_link_change_notify,
.led_brightness_set = bcm_phy_led_brightness_set,
}, {
- .phy_id = PHY_ID_BCM53128,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_BCM53128),
.name = "Broadcom BCM53128",
.flags = PHY_IS_INTERNAL,
/* PHY_GBIT_FEATURES */
@@ -1730,8 +1708,7 @@ static struct phy_driver broadcom_drivers[] = {
.link_change_notify = bcm54xx_link_change_notify,
.led_brightness_set = bcm_phy_led_brightness_set,
}, {
- .phy_id = PHY_ID_BCM89610,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_MODEL(PHY_ID_BCM89610),
.name = "Broadcom BCM89610",
/* PHY_GBIT_FEATURES */
.get_sset_count = bcm_phy_get_sset_count,
@@ -1747,27 +1724,27 @@ static struct phy_driver broadcom_drivers[] = {
module_phy_driver(broadcom_drivers);
static const struct mdio_device_id __maybe_unused broadcom_tbl[] = {
- { PHY_ID_BCM5411, 0xfffffff0 },
- { PHY_ID_BCM5421, 0xfffffff0 },
- { PHY_ID_BCM54210E, 0xfffffff0 },
- { PHY_ID_BCM5461, 0xfffffff0 },
- { PHY_ID_BCM54612E, 0xfffffff0 },
- { PHY_ID_BCM54616S, 0xfffffff0 },
- { PHY_ID_BCM5464, 0xfffffff0 },
- { PHY_ID_BCM5481, 0xfffffff0 },
- { PHY_ID_BCM54810, 0xfffffff0 },
- { PHY_ID_BCM54811, 0xfffffff0 },
- { PHY_ID_BCM5482, 0xfffffff0 },
- { PHY_ID_BCM50610, 0xfffffff0 },
- { PHY_ID_BCM50610M, 0xfffffff0 },
- { PHY_ID_BCM57780, 0xfffffff0 },
- { PHY_ID_BCMAC131, 0xfffffff0 },
- { PHY_ID_BCM5221, 0xfffffff0 },
- { PHY_ID_BCM5241, 0xfffffff0 },
- { PHY_ID_BCM5395, 0xfffffff0 },
- { PHY_ID_BCM53125, 0xfffffff0 },
- { PHY_ID_BCM53128, 0xfffffff0 },
- { PHY_ID_BCM89610, 0xfffffff0 },
+ { PHY_ID_MATCH_MODEL(PHY_ID_BCM5411) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_BCM5421) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_BCM54210E) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_BCM5461) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_BCM54612E) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_BCM54616S) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_BCM5464) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_BCM5481) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_BCM54810) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_BCM54811) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_BCM5482) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_BCM50610) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_BCM50610M) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_BCM57780) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_BCMAC131) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_BCM5221) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_BCM5241) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_BCM5395) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_BCM53125) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_BCM53128) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_BCM89610) },
{ }
};
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index daab555721df..74396453f5bb 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -953,30 +953,6 @@ static void decode_status_frame(struct dp83640_private *dp83640,
}
}
-static void dp83640_free_clocks(void)
-{
- struct dp83640_clock *clock;
- struct list_head *this, *next;
-
- mutex_lock(&phyter_clocks_lock);
-
- list_for_each_safe(this, next, &phyter_clocks) {
- clock = list_entry(this, struct dp83640_clock, list);
- if (!list_empty(&clock->phylist)) {
- pr_warn("phy list non-empty while unloading\n");
- BUG();
- }
- list_del(&clock->list);
- mutex_destroy(&clock->extreg_lock);
- mutex_destroy(&clock->clock_lock);
- put_device(&clock->bus->dev);
- kfree(clock->caps.pin_config);
- kfree(clock);
- }
-
- mutex_unlock(&phyter_clocks_lock);
-}
-
static void dp83640_clock_init(struct dp83640_clock *clock, struct mii_bus *bus)
{
INIT_LIST_HEAD(&clock->list);
@@ -1479,6 +1455,7 @@ static void dp83640_remove(struct phy_device *phydev)
struct dp83640_clock *clock;
struct list_head *this, *next;
struct dp83640_private *tmp, *dp83640 = phydev->priv;
+ bool remove_clock = false;
if (phydev->mdio.addr == BROADCAST_ADDR)
return;
@@ -1506,11 +1483,27 @@ static void dp83640_remove(struct phy_device *phydev)
}
}
+ if (!clock->chosen && list_empty(&clock->phylist))
+ remove_clock = true;
+
dp83640_clock_put(clock);
kfree(dp83640);
+
+ if (remove_clock) {
+ mutex_lock(&phyter_clocks_lock);
+ list_del(&clock->list);
+ mutex_unlock(&phyter_clocks_lock);
+
+ mutex_destroy(&clock->extreg_lock);
+ mutex_destroy(&clock->clock_lock);
+ put_device(&clock->bus->dev);
+ kfree(clock->caps.pin_config);
+ kfree(clock);
+ }
}
-static struct phy_driver dp83640_driver = {
+static struct phy_driver dp83640_driver[] = {
+{
.phy_id = DP83640_PHY_ID,
.phy_id_mask = 0xfffffff0,
.name = "NatSemi DP83640",
@@ -1521,26 +1514,15 @@ static struct phy_driver dp83640_driver = {
.config_init = dp83640_config_init,
.config_intr = dp83640_config_intr,
.handle_interrupt = dp83640_handle_interrupt,
+},
};
-static int __init dp83640_init(void)
-{
- return phy_driver_register(&dp83640_driver, THIS_MODULE);
-}
-
-static void __exit dp83640_exit(void)
-{
- dp83640_free_clocks();
- phy_driver_unregister(&dp83640_driver);
-}
+module_phy_driver(dp83640_driver);
MODULE_DESCRIPTION("National Semiconductor DP83640 PHY driver");
MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.com>");
MODULE_LICENSE("GPL");
-module_init(dp83640_init);
-module_exit(dp83640_exit);
-
static const struct mdio_device_id __maybe_unused dp83640_tbl[] = {
{ DP83640_PHY_ID, 0xfffffff0 },
{ }
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index 033656d574b8..0e1b28f06f18 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -10,7 +10,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/device/faux.h>
#include <linux/list.h>
#include <linux/mii.h>
#include <linux/phy.h>
@@ -18,83 +17,65 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/of.h>
-#include <linux/gpio/consumer.h>
#include <linux/idr.h>
#include <linux/netdevice.h>
#include <linux/linkmode.h>
#include "swphy.h"
-struct fixed_mdio_bus {
- struct mii_bus *mii_bus;
- struct list_head phys;
-};
-
struct fixed_phy {
int addr;
struct phy_device *phydev;
struct fixed_phy_status status;
- bool no_carrier;
int (*link_update)(struct net_device *, struct fixed_phy_status *);
struct list_head node;
- struct gpio_desc *link_gpiod;
};
-static struct faux_device *fdev;
-static struct fixed_mdio_bus platform_fmb = {
- .phys = LIST_HEAD_INIT(platform_fmb.phys),
-};
+static struct mii_bus *fmb_mii_bus;
+static LIST_HEAD(fmb_phys);
+
+static struct fixed_phy *fixed_phy_find(int addr)
+{
+ struct fixed_phy *fp;
+
+ list_for_each_entry(fp, &fmb_phys, node) {
+ if (fp->addr == addr)
+ return fp;
+ }
+
+ return NULL;
+}
int fixed_phy_change_carrier(struct net_device *dev, bool new_carrier)
{
- struct fixed_mdio_bus *fmb = &platform_fmb;
struct phy_device *phydev = dev->phydev;
struct fixed_phy *fp;
if (!phydev || !phydev->mdio.bus)
return -EINVAL;
- list_for_each_entry(fp, &fmb->phys, node) {
- if (fp->addr == phydev->mdio.addr) {
- fp->no_carrier = !new_carrier;
- return 0;
- }
- }
- return -EINVAL;
-}
-EXPORT_SYMBOL_GPL(fixed_phy_change_carrier);
+ fp = fixed_phy_find(phydev->mdio.addr);
+ if (!fp)
+ return -EINVAL;
-static void fixed_phy_update(struct fixed_phy *fp)
-{
- if (!fp->no_carrier && fp->link_gpiod)
- fp->status.link = !!gpiod_get_value_cansleep(fp->link_gpiod);
+ fp->status.link = new_carrier;
+
+ return 0;
}
+EXPORT_SYMBOL_GPL(fixed_phy_change_carrier);
static int fixed_mdio_read(struct mii_bus *bus, int phy_addr, int reg_num)
{
- struct fixed_mdio_bus *fmb = bus->priv;
struct fixed_phy *fp;
- list_for_each_entry(fp, &fmb->phys, node) {
- if (fp->addr == phy_addr) {
- struct fixed_phy_status state;
-
- fp->status.link = !fp->no_carrier;
-
- /* Issue callback if user registered it. */
- if (fp->link_update)
- fp->link_update(fp->phydev->attached_dev,
- &fp->status);
-
- /* Check the GPIO for change in status */
- fixed_phy_update(fp);
- state = fp->status;
+ fp = fixed_phy_find(phy_addr);
+ if (!fp)
+ return 0xffff;
- return swphy_read_reg(reg_num, &state);
- }
- }
+ if (fp->link_update)
+ fp->link_update(fp->phydev->attached_dev, &fp->status);
- return 0xFFFF;
+ return swphy_read_reg(reg_num, &fp->status);
}
static int fixed_mdio_write(struct mii_bus *bus, int phy_addr, int reg_num,
@@ -112,31 +93,27 @@ int fixed_phy_set_link_update(struct phy_device *phydev,
int (*link_update)(struct net_device *,
struct fixed_phy_status *))
{
- struct fixed_mdio_bus *fmb = &platform_fmb;
struct fixed_phy *fp;
if (!phydev || !phydev->mdio.bus)
return -EINVAL;
- list_for_each_entry(fp, &fmb->phys, node) {
- if (fp->addr == phydev->mdio.addr) {
- fp->link_update = link_update;
- fp->phydev = phydev;
- return 0;
- }
- }
+ fp = fixed_phy_find(phydev->mdio.addr);
+ if (!fp)
+ return -ENOENT;
- return -ENOENT;
+ fp->link_update = link_update;
+ fp->phydev = phydev;
+
+ return 0;
}
EXPORT_SYMBOL_GPL(fixed_phy_set_link_update);
-static int fixed_phy_add_gpiod(unsigned int irq, int phy_addr,
- const struct fixed_phy_status *status,
- struct gpio_desc *gpiod)
+static int __fixed_phy_add(int phy_addr,
+ const struct fixed_phy_status *status)
{
- int ret;
- struct fixed_mdio_bus *fmb = &platform_fmb;
struct fixed_phy *fp;
+ int ret;
ret = swphy_validate_state(status);
if (ret < 0)
@@ -146,23 +123,17 @@ static int fixed_phy_add_gpiod(unsigned int irq, int phy_addr,
if (!fp)
return -ENOMEM;
- if (irq != PHY_POLL)
- fmb->mii_bus->irq[phy_addr] = irq;
-
fp->addr = phy_addr;
fp->status = *status;
- fp->link_gpiod = gpiod;
-
- fixed_phy_update(fp);
- list_add_tail(&fp->node, &fmb->phys);
+ list_add_tail(&fp->node, &fmb_phys);
return 0;
}
-int fixed_phy_add(int phy_addr, const struct fixed_phy_status *status)
+void fixed_phy_add(const struct fixed_phy_status *status)
{
- return fixed_phy_add_gpiod(PHY_POLL, phy_addr, status, NULL);
+ __fixed_phy_add(0, status);
}
EXPORT_SYMBOL_GPL(fixed_phy_add);
@@ -170,87 +141,39 @@ static DEFINE_IDA(phy_fixed_ida);
static void fixed_phy_del(int phy_addr)
{
- struct fixed_mdio_bus *fmb = &platform_fmb;
- struct fixed_phy *fp, *tmp;
-
- list_for_each_entry_safe(fp, tmp, &fmb->phys, node) {
- if (fp->addr == phy_addr) {
- list_del(&fp->node);
- if (fp->link_gpiod)
- gpiod_put(fp->link_gpiod);
- kfree(fp);
- ida_free(&phy_fixed_ida, phy_addr);
- return;
- }
- }
-}
+ struct fixed_phy *fp;
-#ifdef CONFIG_OF_GPIO
-static struct gpio_desc *fixed_phy_get_gpiod(struct device_node *np)
-{
- struct device_node *fixed_link_node;
- struct gpio_desc *gpiod;
-
- if (!np)
- return NULL;
-
- fixed_link_node = of_get_child_by_name(np, "fixed-link");
- if (!fixed_link_node)
- return NULL;
-
- /*
- * As the fixed link is just a device tree node without any
- * Linux device associated with it, we simply have obtain
- * the GPIO descriptor from the device tree like this.
- */
- gpiod = fwnode_gpiod_get_index(of_fwnode_handle(fixed_link_node),
- "link", 0, GPIOD_IN, "mdio");
- if (IS_ERR(gpiod) && PTR_ERR(gpiod) != -EPROBE_DEFER) {
- if (PTR_ERR(gpiod) != -ENOENT)
- pr_err("error getting GPIO for fixed link %pOF, proceed without\n",
- fixed_link_node);
- gpiod = NULL;
- }
- of_node_put(fixed_link_node);
+ fp = fixed_phy_find(phy_addr);
+ if (!fp)
+ return;
- return gpiod;
-}
-#else
-static struct gpio_desc *fixed_phy_get_gpiod(struct device_node *np)
-{
- return NULL;
+ list_del(&fp->node);
+ kfree(fp);
+ ida_free(&phy_fixed_ida, phy_addr);
}
-#endif
struct phy_device *fixed_phy_register(const struct fixed_phy_status *status,
struct device_node *np)
{
- struct fixed_mdio_bus *fmb = &platform_fmb;
- struct gpio_desc *gpiod;
struct phy_device *phy;
int phy_addr;
int ret;
- if (!fmb->mii_bus || fmb->mii_bus->state != MDIOBUS_REGISTERED)
+ if (!fmb_mii_bus || fmb_mii_bus->state != MDIOBUS_REGISTERED)
return ERR_PTR(-EPROBE_DEFER);
- /* Check if we have a GPIO associated with this fixed phy */
- gpiod = fixed_phy_get_gpiod(np);
- if (IS_ERR(gpiod))
- return ERR_CAST(gpiod);
-
/* Get the next available PHY address, up to PHY_MAX_ADDR */
phy_addr = ida_alloc_max(&phy_fixed_ida, PHY_MAX_ADDR - 1, GFP_KERNEL);
if (phy_addr < 0)
return ERR_PTR(phy_addr);
- ret = fixed_phy_add_gpiod(PHY_POLL, phy_addr, status, gpiod);
+ ret = __fixed_phy_add(phy_addr, status);
if (ret < 0) {
ida_free(&phy_fixed_ida, phy_addr);
return ERR_PTR(ret);
}
- phy = get_phy_device(fmb->mii_bus, phy_addr, false);
+ phy = get_phy_device(fmb_mii_bus, phy_addr, false);
if (IS_ERR(phy)) {
fixed_phy_del(phy_addr);
return ERR_PTR(-EINVAL);
@@ -309,56 +232,44 @@ void fixed_phy_unregister(struct phy_device *phy)
phy_device_remove(phy);
of_node_put(phy->mdio.dev.of_node);
fixed_phy_del(phy->mdio.addr);
+ phy_device_free(phy);
}
EXPORT_SYMBOL_GPL(fixed_phy_unregister);
static int __init fixed_mdio_bus_init(void)
{
- struct fixed_mdio_bus *fmb = &platform_fmb;
int ret;
- fdev = faux_device_create("Fixed MDIO bus", NULL, NULL);
- if (!fdev)
- return -ENODEV;
-
- fmb->mii_bus = mdiobus_alloc();
- if (fmb->mii_bus == NULL) {
- ret = -ENOMEM;
- goto err_mdiobus_reg;
- }
+ fmb_mii_bus = mdiobus_alloc();
+ if (!fmb_mii_bus)
+ return -ENOMEM;
- snprintf(fmb->mii_bus->id, MII_BUS_ID_SIZE, "fixed-0");
- fmb->mii_bus->name = "Fixed MDIO Bus";
- fmb->mii_bus->priv = fmb;
- fmb->mii_bus->parent = &fdev->dev;
- fmb->mii_bus->read = &fixed_mdio_read;
- fmb->mii_bus->write = &fixed_mdio_write;
- fmb->mii_bus->phy_mask = ~0;
+ snprintf(fmb_mii_bus->id, MII_BUS_ID_SIZE, "fixed-0");
+ fmb_mii_bus->name = "Fixed MDIO Bus";
+ fmb_mii_bus->read = &fixed_mdio_read;
+ fmb_mii_bus->write = &fixed_mdio_write;
+ fmb_mii_bus->phy_mask = ~0;
- ret = mdiobus_register(fmb->mii_bus);
+ ret = mdiobus_register(fmb_mii_bus);
if (ret)
goto err_mdiobus_alloc;
return 0;
err_mdiobus_alloc:
- mdiobus_free(fmb->mii_bus);
-err_mdiobus_reg:
- faux_device_destroy(fdev);
+ mdiobus_free(fmb_mii_bus);
return ret;
}
module_init(fixed_mdio_bus_init);
static void __exit fixed_mdio_bus_exit(void)
{
- struct fixed_mdio_bus *fmb = &platform_fmb;
struct fixed_phy *fp, *tmp;
- mdiobus_unregister(fmb->mii_bus);
- mdiobus_free(fmb->mii_bus);
- faux_device_destroy(fdev);
+ mdiobus_unregister(fmb_mii_bus);
+ mdiobus_free(fmb_mii_bus);
- list_for_each_entry_safe(fp, tmp, &fmb->phys, node) {
+ list_for_each_entry_safe(fp, tmp, &fmb_phys, node) {
list_del(&fp->node);
kfree(fp);
}
diff --git a/drivers/net/phy/marvell-88x2222.c b/drivers/net/phy/marvell-88x2222.c
index fad2f54c1eac..894bcee61e65 100644
--- a/drivers/net/phy/marvell-88x2222.c
+++ b/drivers/net/phy/marvell-88x2222.c
@@ -475,21 +475,20 @@ static int mv2222_config_init(struct phy_device *phydev)
static int mv2222_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
{
- DECLARE_PHY_INTERFACE_MASK(interfaces);
struct phy_device *phydev = upstream;
+ const struct sfp_module_caps *caps;
phy_interface_t sfp_interface;
struct mv2222_data *priv;
struct device *dev;
int ret;
- __ETHTOOL_DECLARE_LINK_MODE_MASK(sfp_supported) = { 0, };
-
priv = phydev->priv;
dev = &phydev->mdio.dev;
- sfp_parse_support(phydev->sfp_bus, id, sfp_supported, interfaces);
- phydev->port = sfp_parse_port(phydev->sfp_bus, id, sfp_supported);
- sfp_interface = sfp_select_interface(phydev->sfp_bus, sfp_supported);
+ caps = sfp_get_module_caps(phydev->sfp_bus);
+
+ phydev->port = caps->port;
+ sfp_interface = sfp_select_interface(phydev->sfp_bus, caps->link_modes);
dev_info(dev, "%s SFP module inserted\n", phy_modes(sfp_interface));
@@ -502,7 +501,7 @@ static int mv2222_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
}
priv->line_interface = sfp_interface;
- linkmode_and(priv->supported, phydev->supported, sfp_supported);
+ linkmode_and(priv->supported, phydev->supported, caps->link_modes);
ret = mv2222_config_line(phydev);
if (ret < 0)
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 623292948fa7..c248c90510ae 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -1902,6 +1902,43 @@ error:
return err;
}
+/* m88e1510_resume
+ *
+ * The 88e1510 PHY has an erratum where the phy downshift counter is not cleared
+ * after phy being suspended(BMCR_PDOWN set) and then later resumed(BMCR_PDOWN
+ * cleared). This can cause the link to intermittently downshift to a lower speed.
+ *
+ * Disabling and re-enabling the downshift feature clears the counter, allowing
+ * the PHY to retry gigabit link negotiation up to the programmed retry count
+ * before downshifting. This behavior has been observed on copper links.
+ */
+static int m88e1510_resume(struct phy_device *phydev)
+{
+ int err;
+ u8 cnt = 0;
+
+ err = marvell_resume(phydev);
+ if (err < 0)
+ return err;
+
+ /* read downshift counter value */
+ err = m88e1011_get_downshift(phydev, &cnt);
+ if (err < 0)
+ return err;
+
+ if (cnt) {
+ /* downshift disabled */
+ err = m88e1011_set_downshift(phydev, 0);
+ if (err < 0)
+ return err;
+
+ /* downshift enabled, with previous counter value */
+ err = m88e1011_set_downshift(phydev, cnt);
+ }
+
+ return err;
+}
+
static int marvell_aneg_done(struct phy_device *phydev)
{
int retval = phy_read(phydev, MII_M1011_PHY_STATUS);
@@ -3563,20 +3600,18 @@ static int marvell_probe(struct phy_device *phydev)
static int m88e1510_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
{
- DECLARE_PHY_INTERFACE_MASK(interfaces);
struct phy_device *phydev = upstream;
+ const struct sfp_module_caps *caps;
phy_interface_t interface;
struct device *dev;
int oldpage;
int ret = 0;
u16 mode;
- __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, };
-
dev = &phydev->mdio.dev;
- sfp_parse_support(phydev->sfp_bus, id, supported, interfaces);
- interface = sfp_select_interface(phydev->sfp_bus, supported);
+ caps = sfp_get_module_caps(phydev->sfp_bus);
+ interface = sfp_select_interface(phydev->sfp_bus, caps->link_modes);
dev_info(dev, "%s SFP module inserted\n", phy_modes(interface));
@@ -3923,7 +3958,7 @@ static struct phy_driver marvell_drivers[] = {
.handle_interrupt = marvell_handle_interrupt,
.get_wol = m88e1318_get_wol,
.set_wol = m88e1318_set_wol,
- .resume = marvell_resume,
+ .resume = m88e1510_resume,
.suspend = marvell_suspend,
.read_page = marvell_read_page,
.write_page = marvell_write_page,
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index 13e81dff42c1..8fd42131cdbf 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -466,12 +466,11 @@ static int mv3310_set_edpd(struct phy_device *phydev, u16 edpd)
static int mv3310_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
{
struct phy_device *phydev = upstream;
- __ETHTOOL_DECLARE_LINK_MODE_MASK(support) = { 0, };
- DECLARE_PHY_INTERFACE_MASK(interfaces);
+ const struct sfp_module_caps *caps;
phy_interface_t iface;
- sfp_parse_support(phydev->sfp_bus, id, support, interfaces);
- iface = sfp_select_interface(phydev->sfp_bus, support);
+ caps = sfp_get_module_caps(phydev->sfp_bus);
+ iface = sfp_select_interface(phydev->sfp_bus, caps->link_modes);
if (iface != PHY_INTERFACE_MODE_10GBASER) {
dev_err(&phydev->mdio.dev, "incompatible SFP module inserted\n");
diff --git a/drivers/net/phy/mdio-boardinfo.c b/drivers/net/phy/mdio-boardinfo.c
deleted file mode 100644
index d3184e8f12ec..000000000000
--- a/drivers/net/phy/mdio-boardinfo.c
+++ /dev/null
@@ -1,79 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * mdio-boardinfo - Collect pre-declarations for MDIO devices
- */
-
-#include <linux/export.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/mutex.h>
-#include <linux/phy.h>
-#include <linux/slab.h>
-
-#include "mdio-boardinfo.h"
-
-static LIST_HEAD(mdio_board_list);
-static DEFINE_MUTEX(mdio_board_lock);
-
-struct mdio_board_entry {
- struct list_head list;
- struct mdio_board_info board_info;
-};
-
-/**
- * mdiobus_setup_mdiodev_from_board_info - create and setup MDIO devices
- * from pre-collected board specific MDIO information
- * @bus: Bus the board_info belongs to
- * @cb: Callback to create device on bus
- * Context: can sleep
- */
-void mdiobus_setup_mdiodev_from_board_info(struct mii_bus *bus,
- int (*cb)
- (struct mii_bus *bus,
- struct mdio_board_info *bi))
-{
- struct mdio_board_entry *be, *tmp;
-
- mutex_lock(&mdio_board_lock);
- list_for_each_entry_safe(be, tmp, &mdio_board_list, list) {
- struct mdio_board_info *bi = &be->board_info;
-
- if (strcmp(bus->id, bi->bus_id))
- continue;
-
- mutex_unlock(&mdio_board_lock);
- cb(bus, bi);
- mutex_lock(&mdio_board_lock);
- }
- mutex_unlock(&mdio_board_lock);
-}
-EXPORT_SYMBOL(mdiobus_setup_mdiodev_from_board_info);
-
-/**
- * mdiobus_register_board_info - register MDIO devices for a given board
- * @info: array of devices descriptors
- * @n: number of descriptors provided
- * Context: can sleep
- *
- * The board info passed can be marked with __initdata but be pointers
- * such as platform_data etc. are copied as-is
- */
-int mdiobus_register_board_info(const struct mdio_board_info *info,
- unsigned int n)
-{
- struct mdio_board_entry *be;
-
- be = kcalloc(n, sizeof(*be), GFP_KERNEL);
- if (!be)
- return -ENOMEM;
-
- for (int i = 0; i < n; i++, be++) {
- be->board_info = info[i];
- mutex_lock(&mdio_board_lock);
- list_add_tail(&be->list, &mdio_board_list);
- mutex_unlock(&mdio_board_lock);
- }
-
- return 0;
-}
-EXPORT_SYMBOL(mdiobus_register_board_info);
diff --git a/drivers/net/phy/mdio-boardinfo.h b/drivers/net/phy/mdio-boardinfo.h
deleted file mode 100644
index 0878b77878d4..000000000000
--- a/drivers/net/phy/mdio-boardinfo.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * mdio-boardinfo.h - board info interface internal to the mdio_bus
- * component
- */
-
-#ifndef __MDIO_BOARD_INFO_H
-#define __MDIO_BOARD_INFO_H
-
-struct mii_bus;
-struct mdio_board_info;
-
-void mdiobus_setup_mdiodev_from_board_info(struct mii_bus *bus,
- int (*cb)
- (struct mii_bus *bus,
- struct mdio_board_info *bi));
-
-#endif /* __MDIO_BOARD_INFO_H */
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index fda2e27c1810..cad6ed3aa10b 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -91,6 +91,7 @@ int mdiobus_unregister_device(struct mdio_device *mdiodev)
if (mdiodev->bus->mdio_map[mdiodev->addr] != mdiodev)
return -EINVAL;
+ gpiod_put(mdiodev->reset_gpio);
reset_control_put(mdiodev->reset_ctrl);
mdiodev->bus->mdio_map[mdiodev->addr] = NULL;
diff --git a/drivers/net/phy/mdio_bus_provider.c b/drivers/net/phy/mdio_bus_provider.c
index 48dc4bf85125..a2391d4b7e5c 100644
--- a/drivers/net/phy/mdio_bus_provider.c
+++ b/drivers/net/phy/mdio_bus_provider.c
@@ -29,8 +29,6 @@
#include <linux/uaccess.h>
#include <linux/unistd.h>
-#include "mdio-boardinfo.h"
-
/**
* mdiobus_alloc_size - allocate a mii_bus structure
* @size: extra amount of memory to allocate for private storage.
@@ -132,35 +130,6 @@ static void of_mdiobus_link_mdiodev(struct mii_bus *bus,
}
#endif
-/**
- * mdiobus_create_device - create a full MDIO device given
- * a mdio_board_info structure
- * @bus: MDIO bus to create the devices on
- * @bi: mdio_board_info structure describing the devices
- *
- * Returns 0 on success or < 0 on error.
- */
-static int mdiobus_create_device(struct mii_bus *bus,
- struct mdio_board_info *bi)
-{
- struct mdio_device *mdiodev;
- int ret = 0;
-
- mdiodev = mdio_device_create(bus, bi->mdio_addr);
- if (IS_ERR(mdiodev))
- return -ENODEV;
-
- strscpy(mdiodev->modalias, bi->modalias,
- sizeof(mdiodev->modalias));
- mdiodev->dev.platform_data = (void *)bi->platform_data;
-
- ret = mdio_device_register(mdiodev);
- if (ret)
- mdio_device_free(mdiodev);
-
- return ret;
-}
-
static struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr, bool c45)
{
struct phy_device *phydev = ERR_PTR(-ENODEV);
@@ -404,8 +373,6 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
goto error;
}
- mdiobus_setup_mdiodev_from_board_info(bus, mdiobus_create_device);
-
bus->state = MDIOBUS_REGISTERED;
dev_dbg(&bus->dev, "probed\n");
return 0;
@@ -443,9 +410,6 @@ void mdiobus_unregister(struct mii_bus *bus)
if (!mdiodev)
continue;
- if (mdiodev->reset_gpio)
- gpiod_put(mdiodev->reset_gpio);
-
mdiodev->device_remove(mdiodev);
mdiodev->device_free(mdiodev);
}
diff --git a/drivers/net/phy/mediatek/mtk-2p5ge.c b/drivers/net/phy/mediatek/mtk-2p5ge.c
index e147eab523ef..de8a41a1841d 100644
--- a/drivers/net/phy/mediatek/mtk-2p5ge.c
+++ b/drivers/net/phy/mediatek/mtk-2p5ge.c
@@ -249,8 +249,80 @@ static int mt798x_2p5ge_phy_get_rate_matching(struct phy_device *phydev,
return RATE_MATCH_PAUSE;
}
+static const unsigned long supported_triggers =
+ BIT(TRIGGER_NETDEV_FULL_DUPLEX) |
+ BIT(TRIGGER_NETDEV_LINK) |
+ BIT(TRIGGER_NETDEV_LINK_10) |
+ BIT(TRIGGER_NETDEV_LINK_100) |
+ BIT(TRIGGER_NETDEV_LINK_1000) |
+ BIT(TRIGGER_NETDEV_LINK_2500) |
+ BIT(TRIGGER_NETDEV_RX) |
+ BIT(TRIGGER_NETDEV_TX);
+
+static int mt798x_2p5ge_phy_led_blink_set(struct phy_device *phydev, u8 index,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ bool blinking = false;
+ int err = 0;
+
+ err = mtk_phy_led_num_dly_cfg(index, delay_on, delay_off, &blinking);
+ if (err < 0)
+ return err;
+
+ err = mtk_phy_hw_led_blink_set(phydev, index, blinking);
+ if (err)
+ return err;
+
+ if (blinking)
+ mtk_phy_hw_led_on_set(phydev, index, MTK_2P5GPHY_LED_ON_MASK,
+ false);
+
+ return 0;
+}
+
+static int mt798x_2p5ge_phy_led_brightness_set(struct phy_device *phydev,
+ u8 index,
+ enum led_brightness value)
+{
+ int err;
+
+ err = mtk_phy_hw_led_blink_set(phydev, index, false);
+ if (err)
+ return err;
+
+ return mtk_phy_hw_led_on_set(phydev, index, MTK_2P5GPHY_LED_ON_MASK,
+ (value != LED_OFF));
+}
+
+static int mt798x_2p5ge_phy_led_hw_is_supported(struct phy_device *phydev,
+ u8 index, unsigned long rules)
+{
+ return mtk_phy_led_hw_is_supported(phydev, index, rules,
+ supported_triggers);
+}
+
+static int mt798x_2p5ge_phy_led_hw_control_get(struct phy_device *phydev,
+ u8 index, unsigned long *rules)
+{
+ return mtk_phy_led_hw_ctrl_get(phydev, index, rules,
+ MTK_2P5GPHY_LED_ON_SET,
+ MTK_2P5GPHY_LED_RX_BLINK_SET,
+ MTK_2P5GPHY_LED_TX_BLINK_SET);
+};
+
+static int mt798x_2p5ge_phy_led_hw_control_set(struct phy_device *phydev,
+ u8 index, unsigned long rules)
+{
+ return mtk_phy_led_hw_ctrl_set(phydev, index, rules,
+ MTK_2P5GPHY_LED_ON_SET,
+ MTK_2P5GPHY_LED_RX_BLINK_SET,
+ MTK_2P5GPHY_LED_TX_BLINK_SET);
+};
+
static int mt798x_2p5ge_phy_probe(struct phy_device *phydev)
{
+ struct mtk_socphy_priv *priv;
struct pinctrl *pinctrl;
int ret;
@@ -273,19 +345,34 @@ static int mt798x_2p5ge_phy_probe(struct phy_device *phydev)
if (ret < 0)
return ret;
- /* Setup LED */
+ /* Setup LED. On default, LED0 is on/off when link is up/down. As for
+ * LED1, it blinks as tx/rx transmission takes place.
+ */
phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, MTK_PHY_LED0_ON_CTRL,
- MTK_PHY_LED_ON_POLARITY | MTK_PHY_LED_ON_LINK10 |
- MTK_PHY_LED_ON_LINK100 | MTK_PHY_LED_ON_LINK1000 |
- MTK_PHY_LED_ON_LINK2500);
- phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, MTK_PHY_LED1_ON_CTRL,
- MTK_PHY_LED_ON_FDX | MTK_PHY_LED_ON_HDX);
+ MTK_PHY_LED_ON_POLARITY | MTK_2P5GPHY_LED_ON_SET);
+ phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2, MTK_PHY_LED0_BLINK_CTRL,
+ MTK_2P5GPHY_LED_TX_BLINK_SET |
+ MTK_2P5GPHY_LED_RX_BLINK_SET);
+ phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2, MTK_PHY_LED1_ON_CTRL,
+ MTK_PHY_LED_ON_FDX | MTK_PHY_LED_ON_HDX |
+ MTK_2P5GPHY_LED_ON_SET);
+ phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, MTK_PHY_LED1_BLINK_CTRL,
+ MTK_2P5GPHY_LED_TX_BLINK_SET |
+ MTK_2P5GPHY_LED_RX_BLINK_SET);
/* Switch pinctrl after setting polarity to avoid bogus blinking */
pinctrl = devm_pinctrl_get_select(&phydev->mdio.dev, "i2p5gbe-led");
if (IS_ERR(pinctrl))
dev_err(&phydev->mdio.dev, "Fail to set LED pins!\n");
+ priv = devm_kzalloc(&phydev->mdio.dev, sizeof(struct mtk_socphy_priv),
+ GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ phydev->priv = priv;
+
+ mtk_phy_leds_state_init(phydev);
+
return 0;
}
@@ -303,6 +390,11 @@ static struct phy_driver mtk_2p5gephy_driver[] = {
.resume = genphy_resume,
.read_page = mtk_phy_read_page,
.write_page = mtk_phy_write_page,
+ .led_blink_set = mt798x_2p5ge_phy_led_blink_set,
+ .led_brightness_set = mt798x_2p5ge_phy_led_brightness_set,
+ .led_hw_is_supported = mt798x_2p5ge_phy_led_hw_is_supported,
+ .led_hw_control_get = mt798x_2p5ge_phy_led_hw_control_get,
+ .led_hw_control_set = mt798x_2p5ge_phy_led_hw_control_set,
},
};
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 605b0315b4cb..79ce3eb6752b 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -107,6 +107,7 @@
#define LAN8814_INTC 0x18
#define LAN8814_INTS 0x1B
+#define LAN8814_INT_FLF BIT(15)
#define LAN8814_INT_LINK_DOWN BIT(2)
#define LAN8814_INT_LINK_UP BIT(0)
#define LAN8814_INT_LINK (LAN8814_INT_LINK_UP |\
@@ -266,6 +267,8 @@
#define LAN8814_LED_CTRL_1 0x0
#define LAN8814_LED_CTRL_1_KSZ9031_LED_MODE_ BIT(6)
+#define LAN8814_LED_CTRL_2 0x1
+#define LAN8814_LED_CTRL_2_LED1_COM_DIS BIT(8)
/* PHY Control 1 */
#define MII_KSZPHY_CTRL_1 0x1e
@@ -362,6 +365,8 @@
/* Delay used to get the second part from the LTC */
#define LAN8841_GET_SEC_LTC_DELAY (500 * NSEC_PER_MSEC)
+#define LAN8842_REV_8832 0x8832
+
struct kszphy_hw_stat {
const char *string;
u8 reg;
@@ -448,6 +453,19 @@ struct kszphy_priv {
struct kszphy_phy_stats phy_stats;
};
+struct lan8842_phy_stats {
+ u64 rx_packets;
+ u64 rx_errors;
+ u64 tx_packets;
+ u64 tx_errors;
+};
+
+struct lan8842_priv {
+ struct lan8842_phy_stats phy_stats;
+ struct kszphy_ptp_priv ptp_priv;
+ u16 rev;
+};
+
static const struct kszphy_type lan8814_type = {
.led_mode_reg = ~LAN8814_LED_CTRL_1,
.cable_diag_reg = LAN8814_CABLE_DIAG,
@@ -2790,6 +2808,60 @@ static int ksz886x_cable_test_get_status(struct phy_device *phydev,
return ret;
}
+/**
+ * LAN8814_PAGE_PCS - Selects Extended Page 0.
+ *
+ * This page contains timers used for auto-negotiation, debug registers and
+ * register to configure fast link failure.
+ */
+#define LAN8814_PAGE_PCS 0
+
+/**
+ * LAN8814_PAGE_AFE_PMA - Selects Extended Page 1.
+ *
+ * This page appears to control the Analog Front-End (AFE) and Physical
+ * Medium Attachment (PMA) layers. It is used to access registers like
+ * LAN8814_PD_CONTROLS and LAN8814_LINK_QUALITY.
+ */
+#define LAN8814_PAGE_AFE_PMA 1
+
+/**
+ * LAN8814_PAGE_PCS_DIGITAL - Selects Extended Page 2.
+ *
+ * This page seems dedicated to the Physical Coding Sublayer (PCS) and other
+ * digital logic. It is used for MDI-X alignment (LAN8814_ALIGN_SWAP) and EEE
+ * state (LAN8814_EEE_STATE) in the LAN8814, and is repurposed for statistics
+ * and self-test counters in the LAN8842.
+ */
+#define LAN8814_PAGE_PCS_DIGITAL 2
+
+/**
+ * LAN8814_PAGE_COMMON_REGS - Selects Extended Page 4.
+ *
+ * This page contains device-common registers that affect the entire chip.
+ * It includes controls for chip-level resets, strap status, GPIO,
+ * QSGMII, the shared 1588 PTP block, and the PVT monitor.
+ */
+#define LAN8814_PAGE_COMMON_REGS 4
+
+/**
+ * LAN8814_PAGE_PORT_REGS - Selects Extended Page 5.
+ *
+ * This page contains port-specific registers that must be accessed
+ * on a per-port basis. It includes controls for port LEDs, QSGMII PCS,
+ * rate adaptation FIFOs, and the per-port 1588 TSU block.
+ */
+#define LAN8814_PAGE_PORT_REGS 5
+
+/**
+ * LAN8814_PAGE_SYSTEM_CTRL - Selects Extended Page 31.
+ *
+ * This page appears to hold fundamental system or global controls. In the
+ * driver, it is used by the related LAN8804 to access the
+ * LAN8814_CLOCK_MANAGEMENT register.
+ */
+#define LAN8814_PAGE_SYSTEM_CTRL 31
+
#define LAN_EXT_PAGE_ACCESS_CONTROL 0x16
#define LAN_EXT_PAGE_ACCESS_ADDRESS_DATA 0x17
#define LAN_EXT_PAGE_ACCESS_CTRL_EP_FUNC 0x4000
@@ -2840,6 +2912,27 @@ static int lanphy_write_page_reg(struct phy_device *phydev, int page, u16 addr,
return val;
}
+static int lanphy_modify_page_reg(struct phy_device *phydev, int page, u16 addr,
+ u16 mask, u16 set)
+{
+ int ret;
+
+ phy_lock_mdio_bus(phydev);
+ __phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL, page);
+ __phy_write(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA, addr);
+ __phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL,
+ (page | LAN_EXT_PAGE_ACCESS_CTRL_EP_FUNC));
+ ret = __phy_modify_changed(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA,
+ mask, set);
+ phy_unlock_mdio_bus(phydev);
+
+ if (ret < 0)
+ phydev_err(phydev, "__phy_modify_changed() failed: %pe\n",
+ ERR_PTR(ret));
+
+ return ret;
+}
+
static int lan8814_config_ts_intr(struct phy_device *phydev, bool enable)
{
u16 val = 0;
@@ -2850,35 +2943,46 @@ static int lan8814_config_ts_intr(struct phy_device *phydev, bool enable)
PTP_TSU_INT_EN_PTP_RX_TS_EN_ |
PTP_TSU_INT_EN_PTP_RX_TS_OVRFL_EN_;
- return lanphy_write_page_reg(phydev, 5, PTP_TSU_INT_EN, val);
+ return lanphy_write_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_TSU_INT_EN, val);
}
static void lan8814_ptp_rx_ts_get(struct phy_device *phydev,
u32 *seconds, u32 *nano_seconds, u16 *seq_id)
{
- *seconds = lanphy_read_page_reg(phydev, 5, PTP_RX_INGRESS_SEC_HI);
+ *seconds = lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_RX_INGRESS_SEC_HI);
*seconds = (*seconds << 16) |
- lanphy_read_page_reg(phydev, 5, PTP_RX_INGRESS_SEC_LO);
+ lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_RX_INGRESS_SEC_LO);
- *nano_seconds = lanphy_read_page_reg(phydev, 5, PTP_RX_INGRESS_NS_HI);
+ *nano_seconds = lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_RX_INGRESS_NS_HI);
*nano_seconds = ((*nano_seconds & 0x3fff) << 16) |
- lanphy_read_page_reg(phydev, 5, PTP_RX_INGRESS_NS_LO);
+ lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_RX_INGRESS_NS_LO);
- *seq_id = lanphy_read_page_reg(phydev, 5, PTP_RX_MSG_HEADER2);
+ *seq_id = lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_RX_MSG_HEADER2);
}
static void lan8814_ptp_tx_ts_get(struct phy_device *phydev,
u32 *seconds, u32 *nano_seconds, u16 *seq_id)
{
- *seconds = lanphy_read_page_reg(phydev, 5, PTP_TX_EGRESS_SEC_HI);
+ *seconds = lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_TX_EGRESS_SEC_HI);
*seconds = *seconds << 16 |
- lanphy_read_page_reg(phydev, 5, PTP_TX_EGRESS_SEC_LO);
+ lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_TX_EGRESS_SEC_LO);
- *nano_seconds = lanphy_read_page_reg(phydev, 5, PTP_TX_EGRESS_NS_HI);
+ *nano_seconds = lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_TX_EGRESS_NS_HI);
*nano_seconds = ((*nano_seconds & 0x3fff) << 16) |
- lanphy_read_page_reg(phydev, 5, PTP_TX_EGRESS_NS_LO);
+ lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_TX_EGRESS_NS_LO);
- *seq_id = lanphy_read_page_reg(phydev, 5, PTP_TX_MSG_HEADER2);
+ *seq_id = lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_TX_MSG_HEADER2);
}
static int lan8814_ts_info(struct mii_timestamper *mii_ts, struct kernel_ethtool_ts_info *info)
@@ -2912,11 +3016,11 @@ static void lan8814_flush_fifo(struct phy_device *phydev, bool egress)
int i;
for (i = 0; i < FIFO_SIZE; ++i)
- lanphy_read_page_reg(phydev, 5,
+ lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
egress ? PTP_TX_MSG_HEADER2 : PTP_RX_MSG_HEADER2);
/* Read to clear overflow status bit */
- lanphy_read_page_reg(phydev, 5, PTP_TSU_INT_STS);
+ lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS, PTP_TSU_INT_STS);
}
static int lan8814_hwtstamp(struct mii_timestamper *mii_ts,
@@ -2928,7 +3032,6 @@ static int lan8814_hwtstamp(struct mii_timestamper *mii_ts,
struct lan8814_ptp_rx_ts *rx_ts, *tmp;
int txcfg = 0, rxcfg = 0;
int pkt_ts_enable;
- int tx_mod;
ptp_priv->hwts_tx_type = config->tx_type;
ptp_priv->rx_filter = config->rx_filter;
@@ -2967,21 +3070,28 @@ static int lan8814_hwtstamp(struct mii_timestamper *mii_ts,
rxcfg |= PTP_RX_PARSE_CONFIG_IPV4_EN_ | PTP_RX_PARSE_CONFIG_IPV6_EN_;
txcfg |= PTP_TX_PARSE_CONFIG_IPV4_EN_ | PTP_TX_PARSE_CONFIG_IPV6_EN_;
}
- lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_RX_PARSE_CONFIG, rxcfg);
- lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_TX_PARSE_CONFIG, txcfg);
+ lanphy_write_page_reg(ptp_priv->phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_RX_PARSE_CONFIG, rxcfg);
+ lanphy_write_page_reg(ptp_priv->phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_TX_PARSE_CONFIG, txcfg);
pkt_ts_enable = PTP_TIMESTAMP_EN_SYNC_ | PTP_TIMESTAMP_EN_DREQ_ |
PTP_TIMESTAMP_EN_PDREQ_ | PTP_TIMESTAMP_EN_PDRES_;
- lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_RX_TIMESTAMP_EN, pkt_ts_enable);
- lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_TX_TIMESTAMP_EN, pkt_ts_enable);
+ lanphy_write_page_reg(ptp_priv->phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_RX_TIMESTAMP_EN, pkt_ts_enable);
+ lanphy_write_page_reg(ptp_priv->phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_TX_TIMESTAMP_EN, pkt_ts_enable);
- tx_mod = lanphy_read_page_reg(ptp_priv->phydev, 5, PTP_TX_MOD);
if (ptp_priv->hwts_tx_type == HWTSTAMP_TX_ONESTEP_SYNC) {
- lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_TX_MOD,
- tx_mod | PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_);
+ lanphy_modify_page_reg(ptp_priv->phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_TX_MOD,
+ PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_,
+ PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_);
} else if (ptp_priv->hwts_tx_type == HWTSTAMP_TX_ON) {
- lanphy_write_page_reg(ptp_priv->phydev, 5, PTP_TX_MOD,
- tx_mod & ~PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_);
+ lanphy_modify_page_reg(ptp_priv->phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_TX_MOD,
+ PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_,
+ 0);
}
if (config->rx_filter != HWTSTAMP_FILTER_NONE)
@@ -3103,29 +3213,41 @@ static bool lan8814_rxtstamp(struct mii_timestamper *mii_ts, struct sk_buff *skb
static void lan8814_ptp_clock_set(struct phy_device *phydev,
time64_t sec, u32 nsec)
{
- lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_SEC_LO, lower_16_bits(sec));
- lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_SEC_MID, upper_16_bits(sec));
- lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_SEC_HI, upper_32_bits(sec));
- lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_NS_LO, lower_16_bits(nsec));
- lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_NS_HI, upper_16_bits(nsec));
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_CLOCK_SET_SEC_LO, lower_16_bits(sec));
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_CLOCK_SET_SEC_MID, upper_16_bits(sec));
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_CLOCK_SET_SEC_HI, upper_32_bits(sec));
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_CLOCK_SET_NS_LO, lower_16_bits(nsec));
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_CLOCK_SET_NS_HI, upper_16_bits(nsec));
- lanphy_write_page_reg(phydev, 4, PTP_CMD_CTL, PTP_CMD_CTL_PTP_CLOCK_LOAD_);
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS, PTP_CMD_CTL,
+ PTP_CMD_CTL_PTP_CLOCK_LOAD_);
}
static void lan8814_ptp_clock_get(struct phy_device *phydev,
time64_t *sec, u32 *nsec)
{
- lanphy_write_page_reg(phydev, 4, PTP_CMD_CTL, PTP_CMD_CTL_PTP_CLOCK_READ_);
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS, PTP_CMD_CTL,
+ PTP_CMD_CTL_PTP_CLOCK_READ_);
- *sec = lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_SEC_HI);
+ *sec = lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_CLOCK_READ_SEC_HI);
*sec <<= 16;
- *sec |= lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_SEC_MID);
+ *sec |= lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_CLOCK_READ_SEC_MID);
*sec <<= 16;
- *sec |= lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_SEC_LO);
+ *sec |= lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_CLOCK_READ_SEC_LO);
- *nsec = lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_NS_HI);
+ *nsec = lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_CLOCK_READ_NS_HI);
*nsec <<= 16;
- *nsec |= lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_NS_LO);
+ *nsec |= lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_CLOCK_READ_NS_LO);
}
static int lan8814_ptpci_gettime64(struct ptp_clock_info *ptpci,
@@ -3164,14 +3286,18 @@ static void lan8814_ptp_set_target(struct phy_device *phydev, int event,
s64 start_sec, u32 start_nsec)
{
/* Set the start time */
- lanphy_write_page_reg(phydev, 4, LAN8814_PTP_CLOCK_TARGET_SEC_LO(event),
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8814_PTP_CLOCK_TARGET_SEC_LO(event),
lower_16_bits(start_sec));
- lanphy_write_page_reg(phydev, 4, LAN8814_PTP_CLOCK_TARGET_SEC_HI(event),
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8814_PTP_CLOCK_TARGET_SEC_HI(event),
upper_16_bits(start_sec));
- lanphy_write_page_reg(phydev, 4, LAN8814_PTP_CLOCK_TARGET_NS_LO(event),
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8814_PTP_CLOCK_TARGET_NS_LO(event),
lower_16_bits(start_nsec));
- lanphy_write_page_reg(phydev, 4, LAN8814_PTP_CLOCK_TARGET_NS_HI(event),
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8814_PTP_CLOCK_TARGET_NS_HI(event),
upper_16_bits(start_nsec) & 0x3fff);
}
@@ -3269,9 +3395,11 @@ static void lan8814_ptp_clock_step(struct phy_device *phydev,
adjustment_value_lo = adjustment_value & 0xffff;
adjustment_value_hi = (adjustment_value >> 16) & 0x3fff;
- lanphy_write_page_reg(phydev, 4, PTP_LTC_STEP_ADJ_LO,
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_LTC_STEP_ADJ_LO,
adjustment_value_lo);
- lanphy_write_page_reg(phydev, 4, PTP_LTC_STEP_ADJ_HI,
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_LTC_STEP_ADJ_HI,
PTP_LTC_STEP_ADJ_DIR_ |
adjustment_value_hi);
seconds -= ((s32)adjustment_value);
@@ -3289,9 +3417,11 @@ static void lan8814_ptp_clock_step(struct phy_device *phydev,
adjustment_value_lo = adjustment_value & 0xffff;
adjustment_value_hi = (adjustment_value >> 16) & 0x3fff;
- lanphy_write_page_reg(phydev, 4, PTP_LTC_STEP_ADJ_LO,
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_LTC_STEP_ADJ_LO,
adjustment_value_lo);
- lanphy_write_page_reg(phydev, 4, PTP_LTC_STEP_ADJ_HI,
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_LTC_STEP_ADJ_HI,
adjustment_value_hi);
seconds += ((s32)adjustment_value);
@@ -3299,8 +3429,8 @@ static void lan8814_ptp_clock_step(struct phy_device *phydev,
set_seconds += adjustment_value;
lan8814_ptp_update_target(phydev, set_seconds);
}
- lanphy_write_page_reg(phydev, 4, PTP_CMD_CTL,
- PTP_CMD_CTL_PTP_LTC_STEP_SEC_);
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_CMD_CTL, PTP_CMD_CTL_PTP_LTC_STEP_SEC_);
}
if (nano_seconds) {
u16 nano_seconds_lo;
@@ -3309,12 +3439,14 @@ static void lan8814_ptp_clock_step(struct phy_device *phydev,
nano_seconds_lo = nano_seconds & 0xffff;
nano_seconds_hi = (nano_seconds >> 16) & 0x3fff;
- lanphy_write_page_reg(phydev, 4, PTP_LTC_STEP_ADJ_LO,
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_LTC_STEP_ADJ_LO,
nano_seconds_lo);
- lanphy_write_page_reg(phydev, 4, PTP_LTC_STEP_ADJ_HI,
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_LTC_STEP_ADJ_HI,
PTP_LTC_STEP_ADJ_DIR_ |
nano_seconds_hi);
- lanphy_write_page_reg(phydev, 4, PTP_CMD_CTL,
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS, PTP_CMD_CTL,
PTP_CMD_CTL_PTP_LTC_STEP_NSEC_);
}
}
@@ -3356,8 +3488,10 @@ static int lan8814_ptpci_adjfine(struct ptp_clock_info *ptpci, long scaled_ppm)
kszphy_rate_adj_hi |= PTP_CLOCK_RATE_ADJ_DIR_;
mutex_lock(&shared->shared_lock);
- lanphy_write_page_reg(phydev, 4, PTP_CLOCK_RATE_ADJ_HI, kszphy_rate_adj_hi);
- lanphy_write_page_reg(phydev, 4, PTP_CLOCK_RATE_ADJ_LO, kszphy_rate_adj_lo);
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS, PTP_CLOCK_RATE_ADJ_HI,
+ kszphy_rate_adj_hi);
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS, PTP_CLOCK_RATE_ADJ_LO,
+ kszphy_rate_adj_lo);
mutex_unlock(&shared->shared_lock);
return 0;
@@ -3366,17 +3500,17 @@ static int lan8814_ptpci_adjfine(struct ptp_clock_info *ptpci, long scaled_ppm)
static void lan8814_ptp_set_reload(struct phy_device *phydev, int event,
s64 period_sec, u32 period_nsec)
{
- lanphy_write_page_reg(phydev, 4,
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
LAN8814_PTP_CLOCK_TARGET_RELOAD_SEC_LO(event),
lower_16_bits(period_sec));
- lanphy_write_page_reg(phydev, 4,
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
LAN8814_PTP_CLOCK_TARGET_RELOAD_SEC_HI(event),
upper_16_bits(period_sec));
- lanphy_write_page_reg(phydev, 4,
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
LAN8814_PTP_CLOCK_TARGET_RELOAD_NS_LO(event),
lower_16_bits(period_nsec));
- lanphy_write_page_reg(phydev, 4,
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
LAN8814_PTP_CLOCK_TARGET_RELOAD_NS_HI(event),
upper_16_bits(period_nsec) & 0x3fff);
}
@@ -3384,73 +3518,72 @@ static void lan8814_ptp_set_reload(struct phy_device *phydev, int event,
static void lan8814_ptp_enable_event(struct phy_device *phydev, int event,
int pulse_width)
{
- u16 val;
-
- val = lanphy_read_page_reg(phydev, 4, LAN8814_PTP_GENERAL_CONFIG);
- /* Set the pulse width of the event */
- val &= ~(LAN8814_PTP_GENERAL_CONFIG_LTC_EVENT_MASK(event));
- /* Make sure that the target clock will be incremented each time when
+ /* Set the pulse width of the event,
+ * Make sure that the target clock will be incremented each time when
* local time reaches or pass it
+ * Set the polarity high
*/
- val |= LAN8814_PTP_GENERAL_CONFIG_LTC_EVENT_SET(event, pulse_width);
- val &= ~(LAN8814_PTP_GENERAL_CONFIG_RELOAD_ADD_X(event));
- /* Set the polarity high */
- val |= LAN8814_PTP_GENERAL_CONFIG_POLARITY_X(event);
- lanphy_write_page_reg(phydev, 4, LAN8814_PTP_GENERAL_CONFIG, val);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS, LAN8814_PTP_GENERAL_CONFIG,
+ LAN8814_PTP_GENERAL_CONFIG_LTC_EVENT_MASK(event) |
+ LAN8814_PTP_GENERAL_CONFIG_LTC_EVENT_SET(event, pulse_width) |
+ LAN8814_PTP_GENERAL_CONFIG_RELOAD_ADD_X(event) |
+ LAN8814_PTP_GENERAL_CONFIG_POLARITY_X(event),
+ LAN8814_PTP_GENERAL_CONFIG_LTC_EVENT_SET(event, pulse_width) |
+ LAN8814_PTP_GENERAL_CONFIG_POLARITY_X(event));
}
static void lan8814_ptp_disable_event(struct phy_device *phydev, int event)
{
- u16 val;
-
/* Set target to too far in the future, effectively disabling it */
lan8814_ptp_set_target(phydev, event, 0xFFFFFFFF, 0);
/* And then reload once it recheas the target */
- val = lanphy_read_page_reg(phydev, 4, LAN8814_PTP_GENERAL_CONFIG);
- val |= LAN8814_PTP_GENERAL_CONFIG_RELOAD_ADD_X(event);
- lanphy_write_page_reg(phydev, 4, LAN8814_PTP_GENERAL_CONFIG, val);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS, LAN8814_PTP_GENERAL_CONFIG,
+ LAN8814_PTP_GENERAL_CONFIG_RELOAD_ADD_X(event),
+ LAN8814_PTP_GENERAL_CONFIG_RELOAD_ADD_X(event));
}
static void lan8814_ptp_perout_off(struct phy_device *phydev, int pin)
{
- u16 val;
-
/* Disable gpio alternate function,
* 1: select as gpio,
* 0: select alt func
*/
- val = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_EN_ADDR(pin));
- val |= LAN8814_GPIO_EN_BIT(pin);
- lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_EN_ADDR(pin), val);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8814_GPIO_EN_ADDR(pin),
+ LAN8814_GPIO_EN_BIT(pin),
+ LAN8814_GPIO_EN_BIT(pin));
- val = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin));
- val &= ~LAN8814_GPIO_DIR_BIT(pin);
- lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin), val);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8814_GPIO_DIR_ADDR(pin),
+ LAN8814_GPIO_DIR_BIT(pin),
+ 0);
- val = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_BUF_ADDR(pin));
- val &= ~LAN8814_GPIO_BUF_BIT(pin);
- lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_BUF_ADDR(pin), val);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8814_GPIO_BUF_ADDR(pin),
+ LAN8814_GPIO_BUF_BIT(pin),
+ 0);
}
static void lan8814_ptp_perout_on(struct phy_device *phydev, int pin)
{
- int val;
-
/* Set as gpio output */
- val = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin));
- val |= LAN8814_GPIO_DIR_BIT(pin);
- lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin), val);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8814_GPIO_DIR_ADDR(pin),
+ LAN8814_GPIO_DIR_BIT(pin),
+ LAN8814_GPIO_DIR_BIT(pin));
/* Enable gpio 0:for alternate function, 1:gpio */
- val = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_EN_ADDR(pin));
- val &= ~LAN8814_GPIO_EN_BIT(pin);
- lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_EN_ADDR(pin), val);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8814_GPIO_EN_ADDR(pin),
+ LAN8814_GPIO_EN_BIT(pin),
+ 0);
/* Set buffer type to push pull */
- val = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_BUF_ADDR(pin));
- val |= LAN8814_GPIO_BUF_BIT(pin);
- lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_BUF_ADDR(pin), val);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8814_GPIO_BUF_ADDR(pin),
+ LAN8814_GPIO_BUF_BIT(pin),
+ LAN8814_GPIO_BUF_BIT(pin));
}
static int lan8814_ptp_perout(struct ptp_clock_info *ptpci,
@@ -3565,61 +3698,64 @@ static int lan8814_ptp_perout(struct ptp_clock_info *ptpci,
static void lan8814_ptp_extts_on(struct phy_device *phydev, int pin, u32 flags)
{
- u16 tmp;
-
/* Set as gpio input */
- tmp = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin));
- tmp &= ~LAN8814_GPIO_DIR_BIT(pin);
- lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin), tmp);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8814_GPIO_DIR_ADDR(pin),
+ LAN8814_GPIO_DIR_BIT(pin),
+ 0);
/* Map the pin to ltc pin 0 of the capture map registers */
- tmp = lanphy_read_page_reg(phydev, 4, PTP_GPIO_CAP_MAP_LO);
- tmp |= pin;
- lanphy_write_page_reg(phydev, 4, PTP_GPIO_CAP_MAP_LO, tmp);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_GPIO_CAP_MAP_LO, pin, pin);
/* Enable capture on the edges of the ltc pin */
- tmp = lanphy_read_page_reg(phydev, 4, PTP_GPIO_CAP_EN);
if (flags & PTP_RISING_EDGE)
- tmp |= PTP_GPIO_CAP_EN_GPIO_RE_CAPTURE_ENABLE(0);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_GPIO_CAP_EN,
+ PTP_GPIO_CAP_EN_GPIO_RE_CAPTURE_ENABLE(0),
+ PTP_GPIO_CAP_EN_GPIO_RE_CAPTURE_ENABLE(0));
if (flags & PTP_FALLING_EDGE)
- tmp |= PTP_GPIO_CAP_EN_GPIO_FE_CAPTURE_ENABLE(0);
- lanphy_write_page_reg(phydev, 4, PTP_GPIO_CAP_EN, tmp);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_GPIO_CAP_EN,
+ PTP_GPIO_CAP_EN_GPIO_FE_CAPTURE_ENABLE(0),
+ PTP_GPIO_CAP_EN_GPIO_FE_CAPTURE_ENABLE(0));
/* Enable interrupt top interrupt */
- tmp = lanphy_read_page_reg(phydev, 4, PTP_COMMON_INT_ENA);
- tmp |= PTP_COMMON_INT_ENA_GPIO_CAP_EN;
- lanphy_write_page_reg(phydev, 4, PTP_COMMON_INT_ENA, tmp);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS, PTP_COMMON_INT_ENA,
+ PTP_COMMON_INT_ENA_GPIO_CAP_EN,
+ PTP_COMMON_INT_ENA_GPIO_CAP_EN);
}
static void lan8814_ptp_extts_off(struct phy_device *phydev, int pin)
{
- u16 tmp;
-
/* Set as gpio out */
- tmp = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin));
- tmp |= LAN8814_GPIO_DIR_BIT(pin);
- lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin), tmp);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8814_GPIO_DIR_ADDR(pin),
+ LAN8814_GPIO_DIR_BIT(pin),
+ LAN8814_GPIO_DIR_BIT(pin));
/* Enable alternate, 0:for alternate function, 1:gpio */
- tmp = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_EN_ADDR(pin));
- tmp &= ~LAN8814_GPIO_EN_BIT(pin);
- lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_EN_ADDR(pin), tmp);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8814_GPIO_EN_ADDR(pin),
+ LAN8814_GPIO_EN_BIT(pin),
+ 0);
/* Clear the mapping of pin to registers 0 of the capture registers */
- tmp = lanphy_read_page_reg(phydev, 4, PTP_GPIO_CAP_MAP_LO);
- tmp &= ~GENMASK(3, 0);
- lanphy_write_page_reg(phydev, 4, PTP_GPIO_CAP_MAP_LO, tmp);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_GPIO_CAP_MAP_LO,
+ GENMASK(3, 0),
+ 0);
/* Disable capture on both of the edges */
- tmp = lanphy_read_page_reg(phydev, 4, PTP_GPIO_CAP_EN);
- tmp &= ~PTP_GPIO_CAP_EN_GPIO_RE_CAPTURE_ENABLE(pin);
- tmp &= ~PTP_GPIO_CAP_EN_GPIO_FE_CAPTURE_ENABLE(pin);
- lanphy_write_page_reg(phydev, 4, PTP_GPIO_CAP_EN, tmp);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS, PTP_GPIO_CAP_EN,
+ PTP_GPIO_CAP_EN_GPIO_RE_CAPTURE_ENABLE(pin) |
+ PTP_GPIO_CAP_EN_GPIO_FE_CAPTURE_ENABLE(pin),
+ 0);
/* Disable interrupt top interrupt */
- tmp = lanphy_read_page_reg(phydev, 4, PTP_COMMON_INT_ENA);
- tmp &= ~PTP_COMMON_INT_ENA_GPIO_CAP_EN;
- lanphy_write_page_reg(phydev, 4, PTP_COMMON_INT_ENA, tmp);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS, PTP_COMMON_INT_ENA,
+ PTP_COMMON_INT_ENA_GPIO_CAP_EN,
+ 0);
}
static int lan8814_ptp_extts(struct ptp_clock_info *ptpci,
@@ -3749,7 +3885,8 @@ static void lan8814_get_tx_ts(struct kszphy_ptp_priv *ptp_priv)
/* If other timestamps are available in the FIFO,
* process them.
*/
- reg = lanphy_read_page_reg(phydev, 5, PTP_CAP_INFO);
+ reg = lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_CAP_INFO);
} while (PTP_CAP_INFO_TX_TS_CNT_GET_(reg) > 0);
}
@@ -3822,7 +3959,8 @@ static void lan8814_get_rx_ts(struct kszphy_ptp_priv *ptp_priv)
/* If other timestamps are available in the FIFO,
* process them.
*/
- reg = lanphy_read_page_reg(phydev, 5, PTP_CAP_INFO);
+ reg = lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_CAP_INFO);
} while (PTP_CAP_INFO_RX_TS_CNT_GET_(reg) > 0);
}
@@ -3859,31 +3997,40 @@ static int lan8814_gpio_process_cap(struct lan8814_shared_priv *shared)
/* This is 0 because whatever was the input pin it was mapped it to
* ltc gpio pin 0
*/
- tmp = lanphy_read_page_reg(phydev, 4, PTP_GPIO_SEL);
- tmp |= PTP_GPIO_SEL_GPIO_SEL(0);
- lanphy_write_page_reg(phydev, 4, PTP_GPIO_SEL, tmp);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS, PTP_GPIO_SEL,
+ PTP_GPIO_SEL_GPIO_SEL(0),
+ PTP_GPIO_SEL_GPIO_SEL(0));
- tmp = lanphy_read_page_reg(phydev, 4, PTP_GPIO_CAP_STS);
+ tmp = lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_GPIO_CAP_STS);
if (!(tmp & PTP_GPIO_CAP_STS_PTP_GPIO_RE_STS(0)) &&
!(tmp & PTP_GPIO_CAP_STS_PTP_GPIO_FE_STS(0)))
return -1;
if (tmp & BIT(0)) {
- sec = lanphy_read_page_reg(phydev, 4, PTP_GPIO_RE_LTC_SEC_HI_CAP);
+ sec = lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_GPIO_RE_LTC_SEC_HI_CAP);
sec <<= 16;
- sec |= lanphy_read_page_reg(phydev, 4, PTP_GPIO_RE_LTC_SEC_LO_CAP);
+ sec |= lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_GPIO_RE_LTC_SEC_LO_CAP);
- nsec = lanphy_read_page_reg(phydev, 4, PTP_GPIO_RE_LTC_NS_HI_CAP) & 0x3fff;
+ nsec = lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_GPIO_RE_LTC_NS_HI_CAP) & 0x3fff;
nsec <<= 16;
- nsec |= lanphy_read_page_reg(phydev, 4, PTP_GPIO_RE_LTC_NS_LO_CAP);
+ nsec |= lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_GPIO_RE_LTC_NS_LO_CAP);
} else {
- sec = lanphy_read_page_reg(phydev, 4, PTP_GPIO_FE_LTC_SEC_HI_CAP);
+ sec = lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_GPIO_FE_LTC_SEC_HI_CAP);
sec <<= 16;
- sec |= lanphy_read_page_reg(phydev, 4, PTP_GPIO_FE_LTC_SEC_LO_CAP);
+ sec |= lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_GPIO_FE_LTC_SEC_LO_CAP);
- nsec = lanphy_read_page_reg(phydev, 4, PTP_GPIO_FE_LTC_NS_HI_CAP) & 0x3fff;
+ nsec = lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_GPIO_FE_LTC_NS_HI_CAP) & 0x3fff;
nsec <<= 16;
- nsec |= lanphy_read_page_reg(phydev, 4, PTP_GPIO_RE_LTC_NS_LO_CAP);
+ nsec |= lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ PTP_GPIO_RE_LTC_NS_LO_CAP);
}
ptp_event.index = 0;
@@ -3908,19 +4055,17 @@ static int lan8814_handle_gpio_interrupt(struct phy_device *phydev, u16 status)
static int lan8804_config_init(struct phy_device *phydev)
{
- int val;
-
/* MDI-X setting for swap A,B transmit */
- val = lanphy_read_page_reg(phydev, 2, LAN8804_ALIGN_SWAP);
- val &= ~LAN8804_ALIGN_TX_A_B_SWAP_MASK;
- val |= LAN8804_ALIGN_TX_A_B_SWAP;
- lanphy_write_page_reg(phydev, 2, LAN8804_ALIGN_SWAP, val);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_PCS_DIGITAL, LAN8804_ALIGN_SWAP,
+ LAN8804_ALIGN_TX_A_B_SWAP_MASK,
+ LAN8804_ALIGN_TX_A_B_SWAP);
/* Make sure that the PHY will not stop generating the clock when the
* link partner goes down
*/
- lanphy_write_page_reg(phydev, 31, LAN8814_CLOCK_MANAGEMENT, 0x27e);
- lanphy_read_page_reg(phydev, 1, LAN8814_LINK_QUALITY);
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_SYSTEM_CTRL,
+ LAN8814_CLOCK_MANAGEMENT, 0x27e);
+ lanphy_read_page_reg(phydev, LAN8814_PAGE_AFE_PMA, LAN8814_LINK_QUALITY);
return 0;
}
@@ -4002,7 +4147,8 @@ static irqreturn_t lan8814_handle_interrupt(struct phy_device *phydev)
}
while (true) {
- irq_status = lanphy_read_page_reg(phydev, 5, PTP_TSU_INT_STS);
+ irq_status = lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_TSU_INT_STS);
if (!irq_status)
break;
@@ -4030,7 +4176,7 @@ static int lan8814_config_intr(struct phy_device *phydev)
{
int err;
- lanphy_write_page_reg(phydev, 4, LAN8814_INTR_CTRL_REG,
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS, LAN8814_INTR_CTRL_REG,
LAN8814_INTR_CTRL_REG_POLARITY |
LAN8814_INTR_CTRL_REG_INTR_ENABLE);
@@ -4056,35 +4202,41 @@ static void lan8814_ptp_init(struct phy_device *phydev)
{
struct kszphy_priv *priv = phydev->priv;
struct kszphy_ptp_priv *ptp_priv = &priv->ptp_priv;
- u32 temp;
if (!IS_ENABLED(CONFIG_PTP_1588_CLOCK) ||
!IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING))
return;
- lanphy_write_page_reg(phydev, 5, TSU_HARD_RESET, TSU_HARD_RESET_);
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ TSU_HARD_RESET, TSU_HARD_RESET_);
- temp = lanphy_read_page_reg(phydev, 5, PTP_TX_MOD);
- temp |= PTP_TX_MOD_BAD_UDPV4_CHKSUM_FORCE_FCS_DIS_;
- lanphy_write_page_reg(phydev, 5, PTP_TX_MOD, temp);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_PORT_REGS, PTP_TX_MOD,
+ PTP_TX_MOD_BAD_UDPV4_CHKSUM_FORCE_FCS_DIS_,
+ PTP_TX_MOD_BAD_UDPV4_CHKSUM_FORCE_FCS_DIS_);
- temp = lanphy_read_page_reg(phydev, 5, PTP_RX_MOD);
- temp |= PTP_RX_MOD_BAD_UDPV4_CHKSUM_FORCE_FCS_DIS_;
- lanphy_write_page_reg(phydev, 5, PTP_RX_MOD, temp);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_PORT_REGS, PTP_RX_MOD,
+ PTP_RX_MOD_BAD_UDPV4_CHKSUM_FORCE_FCS_DIS_,
+ PTP_RX_MOD_BAD_UDPV4_CHKSUM_FORCE_FCS_DIS_);
- lanphy_write_page_reg(phydev, 5, PTP_RX_PARSE_CONFIG, 0);
- lanphy_write_page_reg(phydev, 5, PTP_TX_PARSE_CONFIG, 0);
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_RX_PARSE_CONFIG, 0);
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_TX_PARSE_CONFIG, 0);
/* Removing default registers configs related to L2 and IP */
- lanphy_write_page_reg(phydev, 5, PTP_TX_PARSE_L2_ADDR_EN, 0);
- lanphy_write_page_reg(phydev, 5, PTP_RX_PARSE_L2_ADDR_EN, 0);
- lanphy_write_page_reg(phydev, 5, PTP_TX_PARSE_IP_ADDR_EN, 0);
- lanphy_write_page_reg(phydev, 5, PTP_RX_PARSE_IP_ADDR_EN, 0);
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_TX_PARSE_L2_ADDR_EN, 0);
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_RX_PARSE_L2_ADDR_EN, 0);
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_TX_PARSE_IP_ADDR_EN, 0);
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_RX_PARSE_IP_ADDR_EN, 0);
/* Disable checking for minorVersionPTP field */
- lanphy_write_page_reg(phydev, 5, PTP_RX_VERSION,
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_PORT_REGS, PTP_RX_VERSION,
PTP_MAX_VERSION(0xff) | PTP_MIN_VERSION(0x0));
- lanphy_write_page_reg(phydev, 5, PTP_TX_VERSION,
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_PORT_REGS, PTP_TX_VERSION,
PTP_MAX_VERSION(0xff) | PTP_MIN_VERSION(0x0));
skb_queue_head_init(&ptp_priv->tx_queue);
@@ -4105,7 +4257,8 @@ static void lan8814_ptp_init(struct phy_device *phydev)
phydev->default_timestamp = true;
}
-static int lan8814_ptp_probe_once(struct phy_device *phydev)
+static int __lan8814_ptp_probe_once(struct phy_device *phydev, char *pin_name,
+ int gpios)
{
struct lan8814_shared_priv *shared = phy_package_get_priv(phydev);
@@ -4113,18 +4266,18 @@ static int lan8814_ptp_probe_once(struct phy_device *phydev)
mutex_init(&shared->shared_lock);
shared->pin_config = devm_kmalloc_array(&phydev->mdio.dev,
- LAN8814_PTP_GPIO_NUM,
+ gpios,
sizeof(*shared->pin_config),
GFP_KERNEL);
if (!shared->pin_config)
return -ENOMEM;
- for (int i = 0; i < LAN8814_PTP_GPIO_NUM; i++) {
+ for (int i = 0; i < gpios; i++) {
struct ptp_pin_desc *ptp_pin = &shared->pin_config[i];
memset(ptp_pin, 0, sizeof(*ptp_pin));
snprintf(ptp_pin->name,
- sizeof(ptp_pin->name), "lan8814_ptp_pin_%02d", i);
+ sizeof(ptp_pin->name), "%s_%02d", pin_name, i);
ptp_pin->index = i;
ptp_pin->func = PTP_PF_NONE;
}
@@ -4134,7 +4287,7 @@ static int lan8814_ptp_probe_once(struct phy_device *phydev)
shared->ptp_clock_info.max_adj = 31249999;
shared->ptp_clock_info.n_alarm = 0;
shared->ptp_clock_info.n_ext_ts = LAN8814_PTP_EXTTS_NUM;
- shared->ptp_clock_info.n_pins = LAN8814_PTP_GPIO_NUM;
+ shared->ptp_clock_info.n_pins = gpios;
shared->ptp_clock_info.pps = 0;
shared->ptp_clock_info.supported_extts_flags = PTP_RISING_EDGE |
PTP_FALLING_EDGE |
@@ -4153,8 +4306,8 @@ static int lan8814_ptp_probe_once(struct phy_device *phydev)
shared->ptp_clock = ptp_clock_register(&shared->ptp_clock_info,
&phydev->mdio.dev);
if (IS_ERR(shared->ptp_clock)) {
- phydev_err(phydev, "ptp_clock_register failed %lu\n",
- PTR_ERR(shared->ptp_clock));
+ phydev_err(phydev, "ptp_clock_register failed %pe\n",
+ shared->ptp_clock);
return -EINVAL;
}
@@ -4169,50 +4322,60 @@ static int lan8814_ptp_probe_once(struct phy_device *phydev)
/* The EP.4 is shared between all the PHYs in the package and also it
* can be accessed by any of the PHYs
*/
- lanphy_write_page_reg(phydev, 4, LTC_HARD_RESET, LTC_HARD_RESET_);
- lanphy_write_page_reg(phydev, 4, PTP_OPERATING_MODE,
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LTC_HARD_RESET, LTC_HARD_RESET_);
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS, PTP_OPERATING_MODE,
PTP_OPERATING_MODE_STANDALONE_);
/* Enable ptp to run LTC clock for ptp and gpio 1PPS operation */
- lanphy_write_page_reg(phydev, 4, PTP_CMD_CTL, PTP_CMD_CTL_PTP_ENABLE_);
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS, PTP_CMD_CTL,
+ PTP_CMD_CTL_PTP_ENABLE_);
return 0;
}
+static int lan8814_ptp_probe_once(struct phy_device *phydev)
+{
+ return __lan8814_ptp_probe_once(phydev, "lan8814_ptp_pin",
+ LAN8814_PTP_GPIO_NUM);
+}
+
static void lan8814_setup_led(struct phy_device *phydev, int val)
{
int temp;
- temp = lanphy_read_page_reg(phydev, 5, LAN8814_LED_CTRL_1);
+ temp = lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ LAN8814_LED_CTRL_1);
if (val)
temp |= LAN8814_LED_CTRL_1_KSZ9031_LED_MODE_;
else
temp &= ~LAN8814_LED_CTRL_1_KSZ9031_LED_MODE_;
- lanphy_write_page_reg(phydev, 5, LAN8814_LED_CTRL_1, temp);
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ LAN8814_LED_CTRL_1, temp);
}
static int lan8814_config_init(struct phy_device *phydev)
{
struct kszphy_priv *lan8814 = phydev->priv;
- int val;
/* Reset the PHY */
- val = lanphy_read_page_reg(phydev, 4, LAN8814_QSGMII_SOFT_RESET);
- val |= LAN8814_QSGMII_SOFT_RESET_BIT;
- lanphy_write_page_reg(phydev, 4, LAN8814_QSGMII_SOFT_RESET, val);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8814_QSGMII_SOFT_RESET,
+ LAN8814_QSGMII_SOFT_RESET_BIT,
+ LAN8814_QSGMII_SOFT_RESET_BIT);
/* Disable ANEG with QSGMII PCS Host side */
- val = lanphy_read_page_reg(phydev, 5, LAN8814_QSGMII_PCS1G_ANEG_CONFIG);
- val &= ~LAN8814_QSGMII_PCS1G_ANEG_CONFIG_ANEG_ENA;
- lanphy_write_page_reg(phydev, 5, LAN8814_QSGMII_PCS1G_ANEG_CONFIG, val);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ LAN8814_QSGMII_PCS1G_ANEG_CONFIG,
+ LAN8814_QSGMII_PCS1G_ANEG_CONFIG_ANEG_ENA,
+ 0);
/* MDI-X setting for swap A,B transmit */
- val = lanphy_read_page_reg(phydev, 2, LAN8814_ALIGN_SWAP);
- val &= ~LAN8814_ALIGN_TX_A_B_SWAP_MASK;
- val |= LAN8814_ALIGN_TX_A_B_SWAP;
- lanphy_write_page_reg(phydev, 2, LAN8814_ALIGN_SWAP, val);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_PCS_DIGITAL, LAN8814_ALIGN_SWAP,
+ LAN8814_ALIGN_TX_A_B_SWAP_MASK,
+ LAN8814_ALIGN_TX_A_B_SWAP);
if (lan8814->led_mode >= 0)
lan8814_setup_led(phydev, lan8814->led_mode);
@@ -4243,29 +4406,24 @@ static int lan8814_release_coma_mode(struct phy_device *phydev)
static void lan8814_clear_2psp_bit(struct phy_device *phydev)
{
- u16 val;
-
/* It was noticed that when traffic is passing through the PHY and the
* cable is removed then the LED was still one even though there is no
* link
*/
- val = lanphy_read_page_reg(phydev, 2, LAN8814_EEE_STATE);
- val &= ~LAN8814_EEE_STATE_MASK2P5P;
- lanphy_write_page_reg(phydev, 2, LAN8814_EEE_STATE, val);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_PCS_DIGITAL, LAN8814_EEE_STATE,
+ LAN8814_EEE_STATE_MASK2P5P,
+ 0);
}
static void lan8814_update_meas_time(struct phy_device *phydev)
{
- u16 val;
-
/* By setting the measure time to a value of 0xb this will allow cables
* longer than 100m to be used. This configuration can be used
* regardless of the mode of operation of the PHY
*/
- val = lanphy_read_page_reg(phydev, 1, LAN8814_PD_CONTROLS);
- val &= ~LAN8814_PD_CONTROLS_PD_MEAS_TIME_MASK;
- val |= LAN8814_PD_CONTROLS_PD_MEAS_TIME_VAL;
- lanphy_write_page_reg(phydev, 1, LAN8814_PD_CONTROLS, val);
+ lanphy_modify_page_reg(phydev, LAN8814_PAGE_AFE_PMA, LAN8814_PD_CONTROLS,
+ LAN8814_PD_CONTROLS_PD_MEAS_TIME_MASK,
+ LAN8814_PD_CONTROLS_PD_MEAS_TIME_VAL);
}
static int lan8814_probe(struct phy_device *phydev)
@@ -4288,7 +4446,7 @@ static int lan8814_probe(struct phy_device *phydev)
/* Strap-in value for PHY address, below register read gives starting
* phy address value
*/
- addr = lanphy_read_page_reg(phydev, 4, 0) & 0x1F;
+ addr = lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS, 0) & 0x1F;
devm_phy_package_join(&phydev->mdio.dev, phydev,
addr, sizeof(struct lan8814_shared_priv));
@@ -5582,8 +5740,8 @@ static int lan8841_probe(struct phy_device *phydev)
ptp_priv->ptp_clock = ptp_clock_register(&ptp_priv->ptp_clock_info,
&phydev->mdio.dev);
if (IS_ERR(ptp_priv->ptp_clock)) {
- phydev_err(phydev, "ptp_clock_register failed: %lu\n",
- PTR_ERR(ptp_priv->ptp_clock));
+ phydev_err(phydev, "ptp_clock_register failed: %pe\n",
+ ptp_priv->ptp_clock);
return -EINVAL;
}
@@ -5643,10 +5801,367 @@ static int ksz9131_resume(struct phy_device *phydev)
return kszphy_resume(phydev);
}
+#define LAN8842_PTP_GPIO_NUM 16
+
+static int lan8842_ptp_probe_once(struct phy_device *phydev)
+{
+ return __lan8814_ptp_probe_once(phydev, "lan8842_ptp_pin",
+ LAN8842_PTP_GPIO_NUM);
+}
+
+#define LAN8842_STRAP_REG 0 /* 0x0 */
+#define LAN8842_STRAP_REG_PHYADDR_MASK GENMASK(4, 0)
+#define LAN8842_SKU_REG 11 /* 0x0b */
+#define LAN8842_SELF_TEST 14 /* 0x0e */
+#define LAN8842_SELF_TEST_RX_CNT_ENA BIT(8)
+#define LAN8842_SELF_TEST_TX_CNT_ENA BIT(4)
+
+static int lan8842_probe(struct phy_device *phydev)
+{
+ struct lan8842_priv *priv;
+ int addr;
+ int ret;
+
+ priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ phydev->priv = priv;
+
+ /* Similar to lan8814 this PHY has a pin which needs to be pulled down
+ * to enable to pass any traffic through it. Therefore use the same
+ * function as lan8814
+ */
+ ret = lan8814_release_coma_mode(phydev);
+ if (ret)
+ return ret;
+
+ /* Enable to count the RX and TX packets */
+ ret = lanphy_write_page_reg(phydev, LAN8814_PAGE_PCS_DIGITAL,
+ LAN8842_SELF_TEST,
+ LAN8842_SELF_TEST_RX_CNT_ENA |
+ LAN8842_SELF_TEST_TX_CNT_ENA);
+ if (ret < 0)
+ return ret;
+
+ /* Revision lan8832 doesn't have support for PTP, therefore don't add
+ * any PTP clocks
+ */
+ ret = lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8842_SKU_REG);
+ if (ret < 0)
+ return ret;
+
+ priv->rev = ret;
+ if (priv->rev == LAN8842_REV_8832)
+ return 0;
+
+ /* As the lan8814 and lan8842 has the same IP for the PTP block, the
+ * only difference is the number of the GPIOs, then make sure that the
+ * lan8842 initialized also the shared data pointer as this is used in
+ * all the PTP functions for lan8814. The lan8842 doesn't have multiple
+ * PHYs in the same package.
+ */
+ addr = lanphy_read_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8842_STRAP_REG);
+ if (addr < 0)
+ return addr;
+ addr &= LAN8842_STRAP_REG_PHYADDR_MASK;
+
+ ret = devm_phy_package_join(&phydev->mdio.dev, phydev, addr,
+ sizeof(struct lan8814_shared_priv));
+ if (ret)
+ return ret;
+
+ if (phy_package_init_once(phydev)) {
+ ret = lan8842_ptp_probe_once(phydev);
+ if (ret)
+ return ret;
+ }
+
+ lan8814_ptp_init(phydev);
+
+ return 0;
+}
+
+static int lan8842_config_init(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Reset the PHY */
+ ret = lanphy_modify_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8814_QSGMII_SOFT_RESET,
+ LAN8814_QSGMII_SOFT_RESET_BIT,
+ LAN8814_QSGMII_SOFT_RESET_BIT);
+ if (ret < 0)
+ return ret;
+
+ /* Even if the GPIOs are set to control the LEDs the behaviour of the
+ * LEDs is wrong, they are not blinking when there is traffic.
+ * To fix this it is required to set extended LED mode
+ */
+ ret = lanphy_modify_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ LAN8814_LED_CTRL_1,
+ LAN8814_LED_CTRL_1_KSZ9031_LED_MODE_, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = lanphy_modify_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ LAN8814_LED_CTRL_2,
+ LAN8814_LED_CTRL_2_LED1_COM_DIS,
+ LAN8814_LED_CTRL_2_LED1_COM_DIS);
+ if (ret < 0)
+ return ret;
+
+ /* To allow the PHY to control the LEDs the GPIOs of the PHY should have
+ * a function mode and not the GPIO. Apparently by default the value is
+ * GPIO and not function even though the datasheet it says that it is
+ * function. Therefore set this value.
+ */
+ return lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8814_GPIO_EN2, 0);
+}
+
+#define LAN8842_INTR_CTRL_REG 52 /* 0x34 */
+
+static int lan8842_config_intr(struct phy_device *phydev)
+{
+ int err;
+
+ lanphy_write_page_reg(phydev, LAN8814_PAGE_COMMON_REGS,
+ LAN8842_INTR_CTRL_REG,
+ LAN8814_INTR_CTRL_REG_INTR_ENABLE);
+
+ /* enable / disable interrupts */
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = lan8814_ack_interrupt(phydev);
+ if (err)
+ return err;
+
+ err = phy_write(phydev, LAN8814_INTC,
+ LAN8814_INT_LINK | LAN8814_INT_FLF);
+ } else {
+ err = phy_write(phydev, LAN8814_INTC, 0);
+ if (err)
+ return err;
+
+ err = lan8814_ack_interrupt(phydev);
+ }
+
+ return err;
+}
+
+static unsigned int lan8842_inband_caps(struct phy_device *phydev,
+ phy_interface_t interface)
+{
+ /* Inband configuration can be enabled or disabled using the registers
+ * PCS1G_ANEG_CONFIG.
+ */
+ return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE;
+}
+
+static int lan8842_config_inband(struct phy_device *phydev, unsigned int modes)
+{
+ bool enable;
+
+ if (modes == LINK_INBAND_DISABLE)
+ enable = false;
+ else
+ enable = true;
+
+ /* Disable or enable in-band autoneg with PCS Host side
+ * It has the same address as lan8814
+ */
+ return lanphy_modify_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ LAN8814_QSGMII_PCS1G_ANEG_CONFIG,
+ LAN8814_QSGMII_PCS1G_ANEG_CONFIG_ANEG_ENA,
+ enable ? LAN8814_QSGMII_PCS1G_ANEG_CONFIG_ANEG_ENA : 0);
+}
+
+static void lan8842_handle_ptp_interrupt(struct phy_device *phydev, u16 status)
+{
+ struct kszphy_ptp_priv *ptp_priv;
+ struct lan8842_priv *priv;
+
+ priv = phydev->priv;
+ ptp_priv = &priv->ptp_priv;
+
+ if (status & PTP_TSU_INT_STS_PTP_TX_TS_EN_)
+ lan8814_get_tx_ts(ptp_priv);
+
+ if (status & PTP_TSU_INT_STS_PTP_RX_TS_EN_)
+ lan8814_get_rx_ts(ptp_priv);
+
+ if (status & PTP_TSU_INT_STS_PTP_TX_TS_OVRFL_INT_) {
+ lan8814_flush_fifo(phydev, true);
+ skb_queue_purge(&ptp_priv->tx_queue);
+ }
+
+ if (status & PTP_TSU_INT_STS_PTP_RX_TS_OVRFL_INT_) {
+ lan8814_flush_fifo(phydev, false);
+ skb_queue_purge(&ptp_priv->rx_queue);
+ }
+}
+
+static irqreturn_t lan8842_handle_interrupt(struct phy_device *phydev)
+{
+ struct lan8842_priv *priv = phydev->priv;
+ int ret = IRQ_NONE;
+ int irq_status;
+
+ irq_status = phy_read(phydev, LAN8814_INTS);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (irq_status & (LAN8814_INT_LINK | LAN8814_INT_FLF)) {
+ phy_trigger_machine(phydev);
+ ret = IRQ_HANDLED;
+ }
+
+ /* Phy revision lan8832 doesn't have support for PTP therefore there is
+ * not need to check the PTP and GPIO interrupts
+ */
+ if (priv->rev == LAN8842_REV_8832)
+ goto out;
+
+ while (true) {
+ irq_status = lanphy_read_page_reg(phydev, LAN8814_PAGE_PORT_REGS,
+ PTP_TSU_INT_STS);
+ if (!irq_status)
+ break;
+
+ lan8842_handle_ptp_interrupt(phydev, irq_status);
+ ret = IRQ_HANDLED;
+ }
+
+ if (!lan8814_handle_gpio_interrupt(phydev, irq_status))
+ ret = IRQ_HANDLED;
+
+out:
+ return ret;
+}
+
+static u64 lan8842_get_stat(struct phy_device *phydev, int count, int *regs)
+{
+ u64 ret = 0;
+ int val;
+
+ for (int j = 0; j < count; ++j) {
+ val = lanphy_read_page_reg(phydev, LAN8814_PAGE_PCS_DIGITAL,
+ regs[j]);
+ if (val < 0)
+ return U64_MAX;
+
+ ret <<= 16;
+ ret += val;
+ }
+ return ret;
+}
+
+static int lan8842_update_stats(struct phy_device *phydev)
+{
+ struct lan8842_priv *priv = phydev->priv;
+ int rx_packets_regs[] = {88, 61, 60};
+ int rx_errors_regs[] = {63, 62};
+ int tx_packets_regs[] = {89, 85, 84};
+ int tx_errors_regs[] = {87, 86};
+
+ priv->phy_stats.rx_packets = lan8842_get_stat(phydev,
+ ARRAY_SIZE(rx_packets_regs),
+ rx_packets_regs);
+ priv->phy_stats.rx_errors = lan8842_get_stat(phydev,
+ ARRAY_SIZE(rx_errors_regs),
+ rx_errors_regs);
+ priv->phy_stats.tx_packets = lan8842_get_stat(phydev,
+ ARRAY_SIZE(tx_packets_regs),
+ tx_packets_regs);
+ priv->phy_stats.tx_errors = lan8842_get_stat(phydev,
+ ARRAY_SIZE(tx_errors_regs),
+ tx_errors_regs);
+
+ return 0;
+}
+
+#define LAN8842_FLF 15 /* 0x0e */
+#define LAN8842_FLF_ENA BIT(1)
+#define LAN8842_FLF_ENA_LINK_DOWN BIT(0)
+
+static int lan8842_get_fast_down(struct phy_device *phydev, u8 *msecs)
+{
+ int ret;
+
+ ret = lanphy_read_page_reg(phydev, LAN8814_PAGE_PCS, LAN8842_FLF);
+ if (ret < 0)
+ return ret;
+
+ if (ret & LAN8842_FLF_ENA)
+ *msecs = ETHTOOL_PHY_FAST_LINK_DOWN_ON;
+ else
+ *msecs = ETHTOOL_PHY_FAST_LINK_DOWN_OFF;
+
+ return 0;
+}
+
+static int lan8842_set_fast_down(struct phy_device *phydev, const u8 *msecs)
+{
+ u16 flf;
+
+ switch (*msecs) {
+ case ETHTOOL_PHY_FAST_LINK_DOWN_OFF:
+ flf = 0;
+ break;
+ case ETHTOOL_PHY_FAST_LINK_DOWN_ON:
+ flf = LAN8842_FLF_ENA | LAN8842_FLF_ENA_LINK_DOWN;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return lanphy_modify_page_reg(phydev, LAN8814_PAGE_PCS,
+ LAN8842_FLF,
+ LAN8842_FLF_ENA |
+ LAN8842_FLF_ENA_LINK_DOWN, flf);
+}
+
+static int lan8842_get_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_FAST_LINK_DOWN:
+ return lan8842_get_fast_down(phydev, data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int lan8842_set_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, const void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_FAST_LINK_DOWN:
+ return lan8842_set_fast_down(phydev, data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void lan8842_get_phy_stats(struct phy_device *phydev,
+ struct ethtool_eth_phy_stats *eth_stats,
+ struct ethtool_phy_stats *stats)
+{
+ struct lan8842_priv *priv = phydev->priv;
+
+ stats->rx_packets = priv->phy_stats.rx_packets;
+ stats->rx_errors = priv->phy_stats.rx_errors;
+ stats->tx_packets = priv->phy_stats.tx_packets;
+ stats->tx_errors = priv->phy_stats.tx_errors;
+}
+
static struct phy_driver ksphy_driver[] = {
{
- .phy_id = PHY_ID_KS8737,
- .phy_id_mask = MICREL_PHY_ID_MASK,
+ PHY_ID_MATCH_MODEL(PHY_ID_KS8737),
.name = "Micrel KS8737",
/* PHY_BASIC_FEATURES */
.driver_data = &ks8737_type,
@@ -5687,8 +6202,7 @@ static struct phy_driver ksphy_driver[] = {
.suspend = kszphy_suspend,
.resume = kszphy_resume,
}, {
- .phy_id = PHY_ID_KSZ8041,
- .phy_id_mask = MICREL_PHY_ID_MASK,
+ PHY_ID_MATCH_MODEL(PHY_ID_KSZ8041),
.name = "Micrel KSZ8041",
/* PHY_BASIC_FEATURES */
.driver_data = &ksz8041_type,
@@ -5703,8 +6217,7 @@ static struct phy_driver ksphy_driver[] = {
.suspend = ksz8041_suspend,
.resume = ksz8041_resume,
}, {
- .phy_id = PHY_ID_KSZ8041RNLI,
- .phy_id_mask = MICREL_PHY_ID_MASK,
+ PHY_ID_MATCH_MODEL(PHY_ID_KSZ8041RNLI),
.name = "Micrel KSZ8041RNLI",
/* PHY_BASIC_FEATURES */
.driver_data = &ksz8041_type,
@@ -5747,9 +6260,8 @@ static struct phy_driver ksphy_driver[] = {
.suspend = kszphy_suspend,
.resume = kszphy_resume,
}, {
- .phy_id = PHY_ID_KSZ8081,
+ PHY_ID_MATCH_MODEL(PHY_ID_KSZ8081),
.name = "Micrel KSZ8081 or KSZ8091",
- .phy_id_mask = MICREL_PHY_ID_MASK,
.flags = PHY_POLL_CABLE_TEST,
/* PHY_BASIC_FEATURES */
.driver_data = &ksz8081_type,
@@ -5768,9 +6280,8 @@ static struct phy_driver ksphy_driver[] = {
.cable_test_start = ksz886x_cable_test_start,
.cable_test_get_status = ksz886x_cable_test_get_status,
}, {
- .phy_id = PHY_ID_KSZ8061,
+ PHY_ID_MATCH_MODEL(PHY_ID_KSZ8061),
.name = "Micrel KSZ8061",
- .phy_id_mask = MICREL_PHY_ID_MASK,
/* PHY_BASIC_FEATURES */
.probe = kszphy_probe,
.config_init = ksz8061_config_init,
@@ -5798,8 +6309,7 @@ static struct phy_driver ksphy_driver[] = {
.read_mmd = genphy_read_mmd_unsupported,
.write_mmd = genphy_write_mmd_unsupported,
}, {
- .phy_id = PHY_ID_KSZ9031,
- .phy_id_mask = MICREL_PHY_ID_MASK,
+ PHY_ID_MATCH_MODEL(PHY_ID_KSZ9031),
.name = "Micrel KSZ9031 Gigabit PHY",
.flags = PHY_POLL_CABLE_TEST,
.driver_data = &ksz9021_type,
@@ -5819,8 +6329,7 @@ static struct phy_driver ksphy_driver[] = {
.cable_test_get_status = ksz9x31_cable_test_get_status,
.set_loopback = ksz9031_set_loopback,
}, {
- .phy_id = PHY_ID_LAN8814,
- .phy_id_mask = MICREL_PHY_ID_MASK,
+ PHY_ID_MATCH_MODEL(PHY_ID_LAN8814),
.name = "Microchip INDY Gigabit Quad PHY",
.flags = PHY_POLL_CABLE_TEST,
.config_init = lan8814_config_init,
@@ -5838,8 +6347,7 @@ static struct phy_driver ksphy_driver[] = {
.cable_test_start = lan8814_cable_test_start,
.cable_test_get_status = ksz886x_cable_test_get_status,
}, {
- .phy_id = PHY_ID_LAN8804,
- .phy_id_mask = MICREL_PHY_ID_MASK,
+ PHY_ID_MATCH_MODEL(PHY_ID_LAN8804),
.name = "Microchip LAN966X Gigabit PHY",
.config_init = lan8804_config_init,
.driver_data = &ksz9021_type,
@@ -5854,8 +6362,7 @@ static struct phy_driver ksphy_driver[] = {
.config_intr = lan8804_config_intr,
.handle_interrupt = lan8804_handle_interrupt,
}, {
- .phy_id = PHY_ID_LAN8841,
- .phy_id_mask = MICREL_PHY_ID_MASK,
+ PHY_ID_MATCH_MODEL(PHY_ID_LAN8841),
.name = "Microchip LAN8841 Gigabit PHY",
.flags = PHY_POLL_CABLE_TEST,
.driver_data = &lan8841_type,
@@ -5872,8 +6379,24 @@ static struct phy_driver ksphy_driver[] = {
.cable_test_start = lan8814_cable_test_start,
.cable_test_get_status = ksz886x_cable_test_get_status,
}, {
- .phy_id = PHY_ID_KSZ9131,
- .phy_id_mask = MICREL_PHY_ID_MASK,
+ PHY_ID_MATCH_MODEL(PHY_ID_LAN8842),
+ .name = "Microchip LAN8842 Gigabit PHY",
+ .flags = PHY_POLL_CABLE_TEST,
+ .driver_data = &lan8814_type,
+ .probe = lan8842_probe,
+ .config_init = lan8842_config_init,
+ .config_intr = lan8842_config_intr,
+ .inband_caps = lan8842_inband_caps,
+ .config_inband = lan8842_config_inband,
+ .handle_interrupt = lan8842_handle_interrupt,
+ .get_phy_stats = lan8842_get_phy_stats,
+ .update_stats = lan8842_update_stats,
+ .get_tunable = lan8842_get_tunable,
+ .set_tunable = lan8842_set_tunable,
+ .cable_test_start = lan8814_cable_test_start,
+ .cable_test_get_status = ksz886x_cable_test_get_status,
+}, {
+ PHY_ID_MATCH_MODEL(PHY_ID_KSZ9131),
.name = "Microchip KSZ9131 Gigabit PHY",
/* PHY_GBIT_FEATURES */
.flags = PHY_POLL_CABLE_TEST,
@@ -5894,8 +6417,7 @@ static struct phy_driver ksphy_driver[] = {
.cable_test_get_status = ksz9x31_cable_test_get_status,
.get_features = ksz9477_get_features,
}, {
- .phy_id = PHY_ID_KSZ8873MLL,
- .phy_id_mask = MICREL_PHY_ID_MASK,
+ PHY_ID_MATCH_MODEL(PHY_ID_KSZ8873MLL),
.name = "Micrel KSZ8873MLL Switch",
/* PHY_BASIC_FEATURES */
.config_init = kszphy_config_init,
@@ -5904,8 +6426,7 @@ static struct phy_driver ksphy_driver[] = {
.suspend = genphy_suspend,
.resume = genphy_resume,
}, {
- .phy_id = PHY_ID_KSZ886X,
- .phy_id_mask = MICREL_PHY_ID_MASK,
+ PHY_ID_MATCH_MODEL(PHY_ID_KSZ886X),
.name = "Micrel KSZ8851 Ethernet MAC or KSZ886X Switch",
.driver_data = &ksz886x_type,
/* PHY_BASIC_FEATURES */
@@ -5925,8 +6446,7 @@ static struct phy_driver ksphy_driver[] = {
.suspend = genphy_suspend,
.resume = genphy_resume,
}, {
- .phy_id = PHY_ID_KSZ9477,
- .phy_id_mask = MICREL_PHY_ID_MASK,
+ PHY_ID_MATCH_MODEL(PHY_ID_KSZ9477),
.name = "Microchip KSZ9477",
.probe = kszphy_probe,
/* PHY_GBIT_FEATURES */
@@ -5953,22 +6473,24 @@ MODULE_LICENSE("GPL");
static const struct mdio_device_id __maybe_unused micrel_tbl[] = {
{ PHY_ID_KSZ9021, 0x000ffffe },
- { PHY_ID_KSZ9031, MICREL_PHY_ID_MASK },
- { PHY_ID_KSZ9131, MICREL_PHY_ID_MASK },
+ { PHY_ID_MATCH_MODEL(PHY_ID_KSZ9031) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_KSZ9131) },
{ PHY_ID_KSZ8001, 0x00fffffc },
- { PHY_ID_KS8737, MICREL_PHY_ID_MASK },
+ { PHY_ID_MATCH_MODEL(PHY_ID_KS8737) },
{ PHY_ID_KSZ8021, 0x00ffffff },
{ PHY_ID_KSZ8031, 0x00ffffff },
- { PHY_ID_KSZ8041, MICREL_PHY_ID_MASK },
- { PHY_ID_KSZ8051, MICREL_PHY_ID_MASK },
- { PHY_ID_KSZ8061, MICREL_PHY_ID_MASK },
- { PHY_ID_KSZ8081, MICREL_PHY_ID_MASK },
- { PHY_ID_KSZ8873MLL, MICREL_PHY_ID_MASK },
- { PHY_ID_KSZ886X, MICREL_PHY_ID_MASK },
- { PHY_ID_KSZ9477, MICREL_PHY_ID_MASK },
- { PHY_ID_LAN8814, MICREL_PHY_ID_MASK },
- { PHY_ID_LAN8804, MICREL_PHY_ID_MASK },
- { PHY_ID_LAN8841, MICREL_PHY_ID_MASK },
+ { PHY_ID_MATCH_MODEL(PHY_ID_KSZ8041) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_KSZ8041RNLI) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_KSZ8051) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_KSZ8061) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_KSZ8081) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_KSZ8873MLL) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_KSZ886X) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_KSZ9477) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_LAN8814) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_LAN8804) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_LAN8841) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_LAN8842) },
{ }
};
diff --git a/drivers/net/phy/motorcomm.c b/drivers/net/phy/motorcomm.c
index 0e91f5d1a4fd..a3593e663059 100644
--- a/drivers/net/phy/motorcomm.c
+++ b/drivers/net/phy/motorcomm.c
@@ -213,6 +213,20 @@
#define YT8521_RC1R_RGMII_2_100_NS 14
#define YT8521_RC1R_RGMII_2_250_NS 15
+/* LED CONFIG */
+#define YT8521_MAX_LEDS 3
+#define YT8521_LED0_CFG_REG 0xA00C
+#define YT8521_LED1_CFG_REG 0xA00D
+#define YT8521_LED2_CFG_REG 0xA00E
+#define YT8521_LED_ACT_BLK_IND BIT(13)
+#define YT8521_LED_FDX_ON_EN BIT(12)
+#define YT8521_LED_HDX_ON_EN BIT(11)
+#define YT8521_LED_TXACT_BLK_EN BIT(10)
+#define YT8521_LED_RXACT_BLK_EN BIT(9)
+#define YT8521_LED_1000_ON_EN BIT(6)
+#define YT8521_LED_100_ON_EN BIT(5)
+#define YT8521_LED_10_ON_EN BIT(4)
+
#define YTPHY_MISC_CONFIG_REG 0xA006
#define YTPHY_MCR_FIBER_SPEED_MASK BIT(0)
#define YTPHY_MCR_FIBER_1000BX (0x1 << 0)
@@ -1681,6 +1695,106 @@ err_restore_page:
return phy_restore_page(phydev, old_page, ret);
}
+static const unsigned long supported_trgs = (BIT(TRIGGER_NETDEV_FULL_DUPLEX) |
+ BIT(TRIGGER_NETDEV_HALF_DUPLEX) |
+ BIT(TRIGGER_NETDEV_LINK) |
+ BIT(TRIGGER_NETDEV_LINK_10) |
+ BIT(TRIGGER_NETDEV_LINK_100) |
+ BIT(TRIGGER_NETDEV_LINK_1000) |
+ BIT(TRIGGER_NETDEV_RX) |
+ BIT(TRIGGER_NETDEV_TX));
+
+static int yt8521_led_hw_is_supported(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ if (index >= YT8521_MAX_LEDS)
+ return -EINVAL;
+
+ /* All combinations of the supported triggers are allowed */
+ if (rules & ~supported_trgs)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int yt8521_led_hw_control_set(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ u16 val = 0;
+
+ if (index >= YT8521_MAX_LEDS)
+ return -EINVAL;
+
+ if (test_bit(TRIGGER_NETDEV_LINK, &rules)) {
+ val |= YT8521_LED_10_ON_EN;
+ val |= YT8521_LED_100_ON_EN;
+ val |= YT8521_LED_1000_ON_EN;
+ }
+
+ if (test_bit(TRIGGER_NETDEV_LINK_10, &rules))
+ val |= YT8521_LED_10_ON_EN;
+
+ if (test_bit(TRIGGER_NETDEV_LINK_100, &rules))
+ val |= YT8521_LED_100_ON_EN;
+
+ if (test_bit(TRIGGER_NETDEV_LINK_1000, &rules))
+ val |= YT8521_LED_1000_ON_EN;
+
+ if (test_bit(TRIGGER_NETDEV_FULL_DUPLEX, &rules))
+ val |= YT8521_LED_HDX_ON_EN;
+
+ if (test_bit(TRIGGER_NETDEV_HALF_DUPLEX, &rules))
+ val |= YT8521_LED_FDX_ON_EN;
+
+ if (test_bit(TRIGGER_NETDEV_TX, &rules) ||
+ test_bit(TRIGGER_NETDEV_RX, &rules))
+ val |= YT8521_LED_ACT_BLK_IND;
+
+ if (test_bit(TRIGGER_NETDEV_TX, &rules))
+ val |= YT8521_LED_TXACT_BLK_EN;
+
+ if (test_bit(TRIGGER_NETDEV_RX, &rules))
+ val |= YT8521_LED_RXACT_BLK_EN;
+
+ return ytphy_write_ext(phydev, YT8521_LED0_CFG_REG + index, val);
+}
+
+static int yt8521_led_hw_control_get(struct phy_device *phydev, u8 index,
+ unsigned long *rules)
+{
+ int val;
+
+ if (index >= YT8521_MAX_LEDS)
+ return -EINVAL;
+
+ val = ytphy_read_ext(phydev, YT8521_LED0_CFG_REG + index);
+ if (val < 0)
+ return val;
+
+ if (val & YT8521_LED_TXACT_BLK_EN || val & YT8521_LED_ACT_BLK_IND)
+ __set_bit(TRIGGER_NETDEV_TX, rules);
+
+ if (val & YT8521_LED_RXACT_BLK_EN || val & YT8521_LED_ACT_BLK_IND)
+ __set_bit(TRIGGER_NETDEV_RX, rules);
+
+ if (val & YT8521_LED_FDX_ON_EN)
+ __set_bit(TRIGGER_NETDEV_FULL_DUPLEX, rules);
+
+ if (val & YT8521_LED_HDX_ON_EN)
+ __set_bit(TRIGGER_NETDEV_HALF_DUPLEX, rules);
+
+ if (val & YT8521_LED_1000_ON_EN)
+ __set_bit(TRIGGER_NETDEV_LINK_1000, rules);
+
+ if (val & YT8521_LED_100_ON_EN)
+ __set_bit(TRIGGER_NETDEV_LINK_100, rules);
+
+ if (val & YT8521_LED_10_ON_EN)
+ __set_bit(TRIGGER_NETDEV_LINK_10, rules);
+
+ return 0;
+}
+
static int yt8531_config_init(struct phy_device *phydev)
{
struct device_node *node = phydev->mdio.dev.of_node;
@@ -2920,6 +3034,9 @@ static struct phy_driver motorcomm_phy_drvs[] = {
.soft_reset = yt8521_soft_reset,
.suspend = yt8521_suspend,
.resume = yt8521_resume,
+ .led_hw_is_supported = yt8521_led_hw_is_supported,
+ .led_hw_control_set = yt8521_led_hw_control_set,
+ .led_hw_control_get = yt8521_led_hw_control_get,
},
{
PHY_ID_MATCH_EXACT(PHY_ID_YT8531),
diff --git a/drivers/net/phy/mscc/mscc.h b/drivers/net/phy/mscc/mscc.h
index 6a3d8a754eb8..2d8eca54c40a 100644
--- a/drivers/net/phy/mscc/mscc.h
+++ b/drivers/net/phy/mscc/mscc.h
@@ -196,6 +196,9 @@ enum rgmii_clock_delay {
#define MSCC_PHY_EXTENDED_INT_MS_EGR BIT(9)
/* Extended Page 3 Registers */
+#define MSCC_PHY_SERDES_PCS_CTRL 16
+#define MSCC_PHY_SERDES_ANEG BIT(7)
+
#define MSCC_PHY_SERDES_TX_VALID_CNT 21
#define MSCC_PHY_SERDES_TX_CRC_ERR_CNT 22
#define MSCC_PHY_SERDES_RX_VALID_CNT 28
@@ -362,6 +365,13 @@ struct vsc85xx_hw_stat {
u16 mask;
};
+struct vsc8531_skb_cb {
+ u32 ns;
+};
+
+#define VSC8531_SKB_CB(skb) \
+ ((struct vsc8531_skb_cb *)((skb)->cb))
+
struct vsc8531_private {
int rate_magic;
u16 supp_led_modes;
@@ -410,6 +420,11 @@ struct vsc8531_private {
*/
struct mutex ts_lock;
struct mutex phc_lock;
+
+ /* list of skbs that were received and need timestamp information but it
+ * didn't received it yet
+ */
+ struct sk_buff_head rx_skbs_list;
};
/* Shared structure between the PHYs of the same package.
@@ -469,6 +484,7 @@ static inline void vsc8584_config_macsec_intr(struct phy_device *phydev)
void vsc85xx_link_change_notify(struct phy_device *phydev);
void vsc8584_config_ts_intr(struct phy_device *phydev);
int vsc8584_ptp_init(struct phy_device *phydev);
+void vsc8584_ptp_deinit(struct phy_device *phydev);
int vsc8584_ptp_probe_once(struct phy_device *phydev);
int vsc8584_ptp_probe(struct phy_device *phydev);
irqreturn_t vsc8584_handle_ts_interrupt(struct phy_device *phydev);
@@ -483,6 +499,9 @@ static inline int vsc8584_ptp_init(struct phy_device *phydev)
{
return 0;
}
+static inline void vsc8584_ptp_deinit(struct phy_device *phydev)
+{
+}
static inline int vsc8584_ptp_probe_once(struct phy_device *phydev)
{
return 0;
diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c
index 7ed6522fb0ef..ef0ef1570d39 100644
--- a/drivers/net/phy/mscc/mscc_main.c
+++ b/drivers/net/phy/mscc/mscc_main.c
@@ -2202,6 +2202,28 @@ static int vsc85xx_read_status(struct phy_device *phydev)
return genphy_read_status(phydev);
}
+static unsigned int vsc85xx_inband_caps(struct phy_device *phydev,
+ phy_interface_t interface)
+{
+ if (interface != PHY_INTERFACE_MODE_SGMII &&
+ interface != PHY_INTERFACE_MODE_QSGMII)
+ return 0;
+
+ return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE;
+}
+
+static int vsc85xx_config_inband(struct phy_device *phydev, unsigned int modes)
+{
+ u16 reg_val = 0;
+
+ if (modes == LINK_INBAND_ENABLE)
+ reg_val = MSCC_PHY_SERDES_ANEG;
+
+ return phy_modify_paged(phydev, MSCC_PHY_PAGE_EXTENDED_3,
+ MSCC_PHY_SERDES_PCS_CTRL, MSCC_PHY_SERDES_ANEG,
+ reg_val);
+}
+
static int vsc8514_probe(struct phy_device *phydev)
{
struct vsc8531_private *vsc8531;
@@ -2335,6 +2357,11 @@ static int vsc85xx_probe(struct phy_device *phydev)
return vsc85xx_dt_led_modes_get(phydev, default_mode);
}
+static void vsc85xx_remove(struct phy_device *phydev)
+{
+ vsc8584_ptp_deinit(phydev);
+}
+
/* Microsemi VSC85xx PHYs */
static struct phy_driver vsc85xx_driver[] = {
{
@@ -2409,6 +2436,8 @@ static struct phy_driver vsc85xx_driver[] = {
.get_sset_count = &vsc85xx_get_sset_count,
.get_strings = &vsc85xx_get_strings,
.get_stats = &vsc85xx_get_stats,
+ .inband_caps = vsc85xx_inband_caps,
+ .config_inband = vsc85xx_config_inband,
},
{
.phy_id = PHY_ID_VSC8514,
@@ -2432,6 +2461,8 @@ static struct phy_driver vsc85xx_driver[] = {
.get_sset_count = &vsc85xx_get_sset_count,
.get_strings = &vsc85xx_get_strings,
.get_stats = &vsc85xx_get_stats,
+ .inband_caps = vsc85xx_inband_caps,
+ .config_inband = vsc85xx_config_inband,
},
{
.phy_id = PHY_ID_VSC8530,
@@ -2552,6 +2583,8 @@ static struct phy_driver vsc85xx_driver[] = {
.get_sset_count = &vsc85xx_get_sset_count,
.get_strings = &vsc85xx_get_strings,
.get_stats = &vsc85xx_get_stats,
+ .inband_caps = vsc85xx_inband_caps,
+ .config_inband = vsc85xx_config_inband,
},
{
.phy_id = PHY_ID_VSC856X,
@@ -2574,6 +2607,8 @@ static struct phy_driver vsc85xx_driver[] = {
.get_sset_count = &vsc85xx_get_sset_count,
.get_strings = &vsc85xx_get_strings,
.get_stats = &vsc85xx_get_stats,
+ .inband_caps = vsc85xx_inband_caps,
+ .config_inband = vsc85xx_config_inband,
},
{
.phy_id = PHY_ID_VSC8572,
@@ -2589,6 +2624,7 @@ static struct phy_driver vsc85xx_driver[] = {
.config_intr = &vsc85xx_config_intr,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
+ .remove = &vsc85xx_remove,
.probe = &vsc8574_probe,
.set_wol = &vsc85xx_wol_set,
.get_wol = &vsc85xx_wol_get,
@@ -2599,6 +2635,8 @@ static struct phy_driver vsc85xx_driver[] = {
.get_sset_count = &vsc85xx_get_sset_count,
.get_strings = &vsc85xx_get_strings,
.get_stats = &vsc85xx_get_stats,
+ .inband_caps = vsc85xx_inband_caps,
+ .config_inband = vsc85xx_config_inband,
},
{
.phy_id = PHY_ID_VSC8574,
@@ -2614,6 +2652,7 @@ static struct phy_driver vsc85xx_driver[] = {
.config_intr = &vsc85xx_config_intr,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
+ .remove = &vsc85xx_remove,
.probe = &vsc8574_probe,
.set_wol = &vsc85xx_wol_set,
.get_wol = &vsc85xx_wol_get,
@@ -2624,6 +2663,8 @@ static struct phy_driver vsc85xx_driver[] = {
.get_sset_count = &vsc85xx_get_sset_count,
.get_strings = &vsc85xx_get_strings,
.get_stats = &vsc85xx_get_stats,
+ .inband_caps = vsc85xx_inband_caps,
+ .config_inband = vsc85xx_config_inband,
},
{
.phy_id = PHY_ID_VSC8575,
@@ -2639,6 +2680,7 @@ static struct phy_driver vsc85xx_driver[] = {
.config_intr = &vsc85xx_config_intr,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
+ .remove = &vsc85xx_remove,
.probe = &vsc8584_probe,
.get_tunable = &vsc85xx_get_tunable,
.set_tunable = &vsc85xx_set_tunable,
@@ -2647,6 +2689,8 @@ static struct phy_driver vsc85xx_driver[] = {
.get_sset_count = &vsc85xx_get_sset_count,
.get_strings = &vsc85xx_get_strings,
.get_stats = &vsc85xx_get_stats,
+ .inband_caps = vsc85xx_inband_caps,
+ .config_inband = vsc85xx_config_inband,
},
{
.phy_id = PHY_ID_VSC8582,
@@ -2662,6 +2706,7 @@ static struct phy_driver vsc85xx_driver[] = {
.config_intr = &vsc85xx_config_intr,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
+ .remove = &vsc85xx_remove,
.probe = &vsc8584_probe,
.get_tunable = &vsc85xx_get_tunable,
.set_tunable = &vsc85xx_set_tunable,
@@ -2670,6 +2715,8 @@ static struct phy_driver vsc85xx_driver[] = {
.get_sset_count = &vsc85xx_get_sset_count,
.get_strings = &vsc85xx_get_strings,
.get_stats = &vsc85xx_get_stats,
+ .inband_caps = vsc85xx_inband_caps,
+ .config_inband = vsc85xx_config_inband,
},
{
.phy_id = PHY_ID_VSC8584,
@@ -2685,6 +2732,7 @@ static struct phy_driver vsc85xx_driver[] = {
.config_intr = &vsc85xx_config_intr,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
+ .remove = &vsc85xx_remove,
.probe = &vsc8584_probe,
.get_tunable = &vsc85xx_get_tunable,
.set_tunable = &vsc85xx_set_tunable,
@@ -2694,6 +2742,8 @@ static struct phy_driver vsc85xx_driver[] = {
.get_strings = &vsc85xx_get_strings,
.get_stats = &vsc85xx_get_stats,
.link_change_notify = &vsc85xx_link_change_notify,
+ .inband_caps = vsc85xx_inband_caps,
+ .config_inband = vsc85xx_config_inband,
}
};
diff --git a/drivers/net/phy/mscc/mscc_ptp.c b/drivers/net/phy/mscc/mscc_ptp.c
index 6b800081eed5..d692df7d975c 100644
--- a/drivers/net/phy/mscc/mscc_ptp.c
+++ b/drivers/net/phy/mscc/mscc_ptp.c
@@ -456,12 +456,12 @@ static void vsc85xx_dequeue_skb(struct vsc85xx_ptp *ptp)
*p++ = (reg >> 24) & 0xff;
}
- len = skb_queue_len(&ptp->tx_queue);
+ len = skb_queue_len_lockless(&ptp->tx_queue);
if (len < 1)
return;
while (len--) {
- skb = __skb_dequeue(&ptp->tx_queue);
+ skb = skb_dequeue(&ptp->tx_queue);
if (!skb)
return;
@@ -486,7 +486,7 @@ static void vsc85xx_dequeue_skb(struct vsc85xx_ptp *ptp)
* packet in the FIFO right now, reschedule it for later
* packets.
*/
- __skb_queue_tail(&ptp->tx_queue, skb);
+ skb_queue_tail(&ptp->tx_queue, skb);
}
}
@@ -900,6 +900,7 @@ static int vsc85xx_eth1_conf(struct phy_device *phydev, enum ts_blk blk,
get_unaligned_be32(ptp_multicast));
} else {
val |= ANA_ETH1_FLOW_ADDR_MATCH2_ANY_MULTICAST;
+ val |= ANA_ETH1_FLOW_ADDR_MATCH2_ANY_UNICAST;
vsc85xx_ts_write_csr(phydev, blk,
MSCC_ANA_ETH1_FLOW_ADDR_MATCH2(0), val);
vsc85xx_ts_write_csr(phydev, blk,
@@ -1067,6 +1068,7 @@ static int vsc85xx_hwtstamp(struct mii_timestamper *mii_ts,
case HWTSTAMP_TX_ON:
break;
case HWTSTAMP_TX_OFF:
+ skb_queue_purge(&vsc8531->ptp->tx_queue);
break;
default:
return -ERANGE;
@@ -1091,9 +1093,6 @@ static int vsc85xx_hwtstamp(struct mii_timestamper *mii_ts,
mutex_lock(&vsc8531->ts_lock);
- __skb_queue_purge(&vsc8531->ptp->tx_queue);
- __skb_queue_head_init(&vsc8531->ptp->tx_queue);
-
/* Disable predictor while configuring the 1588 block */
val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
MSCC_PHY_PTP_INGR_PREDICTOR);
@@ -1179,9 +1178,7 @@ static void vsc85xx_txtstamp(struct mii_timestamper *mii_ts,
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- mutex_lock(&vsc8531->ts_lock);
- __skb_queue_tail(&vsc8531->ptp->tx_queue, skb);
- mutex_unlock(&vsc8531->ts_lock);
+ skb_queue_tail(&vsc8531->ptp->tx_queue, skb);
return;
out:
@@ -1193,9 +1190,7 @@ static bool vsc85xx_rxtstamp(struct mii_timestamper *mii_ts,
{
struct vsc8531_private *vsc8531 =
container_of(mii_ts, struct vsc8531_private, mii_ts);
- struct skb_shared_hwtstamps *shhwtstamps = NULL;
struct vsc85xx_ptphdr *ptphdr;
- struct timespec64 ts;
unsigned long ns;
if (!vsc8531->ptp->configured)
@@ -1205,27 +1200,52 @@ static bool vsc85xx_rxtstamp(struct mii_timestamper *mii_ts,
type == PTP_CLASS_NONE)
return false;
- vsc85xx_gettime(&vsc8531->ptp->caps, &ts);
-
ptphdr = get_ptp_header_rx(skb, vsc8531->ptp->rx_filter);
if (!ptphdr)
return false;
- shhwtstamps = skb_hwtstamps(skb);
- memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
-
ns = ntohl(ptphdr->rsrvd2);
- /* nsec is in reserved field */
- if (ts.tv_nsec < ns)
- ts.tv_sec--;
+ VSC8531_SKB_CB(skb)->ns = ns;
+ skb_queue_tail(&vsc8531->rx_skbs_list, skb);
- shhwtstamps->hwtstamp = ktime_set(ts.tv_sec, ns);
- netif_rx(skb);
+ ptp_schedule_worker(vsc8531->ptp->ptp_clock, 0);
return true;
}
+static long vsc85xx_do_aux_work(struct ptp_clock_info *info)
+{
+ struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
+ struct skb_shared_hwtstamps *shhwtstamps = NULL;
+ struct phy_device *phydev = ptp->phydev;
+ struct vsc8531_private *priv = phydev->priv;
+ struct sk_buff_head received;
+ struct sk_buff *rx_skb;
+ struct timespec64 ts;
+ unsigned long flags;
+
+ __skb_queue_head_init(&received);
+ spin_lock_irqsave(&priv->rx_skbs_list.lock, flags);
+ skb_queue_splice_tail_init(&priv->rx_skbs_list, &received);
+ spin_unlock_irqrestore(&priv->rx_skbs_list.lock, flags);
+
+ vsc85xx_gettime(info, &ts);
+ while ((rx_skb = __skb_dequeue(&received)) != NULL) {
+ shhwtstamps = skb_hwtstamps(rx_skb);
+ memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
+
+ if (ts.tv_nsec < VSC8531_SKB_CB(rx_skb)->ns)
+ ts.tv_sec--;
+
+ shhwtstamps->hwtstamp = ktime_set(ts.tv_sec,
+ VSC8531_SKB_CB(rx_skb)->ns);
+ netif_rx(rx_skb);
+ }
+
+ return -1;
+}
+
static const struct ptp_clock_info vsc85xx_clk_caps = {
.owner = THIS_MODULE,
.name = "VSC85xx timer",
@@ -1239,6 +1259,7 @@ static const struct ptp_clock_info vsc85xx_clk_caps = {
.adjfine = &vsc85xx_adjfine,
.gettime64 = &vsc85xx_gettime,
.settime64 = &vsc85xx_settime,
+ .do_aux_work = &vsc85xx_do_aux_work,
};
static struct vsc8531_private *vsc8584_base_priv(struct phy_device *phydev)
@@ -1273,7 +1294,6 @@ static void vsc8584_set_input_clk_configured(struct phy_device *phydev)
static int __vsc8584_init_ptp(struct phy_device *phydev)
{
- struct vsc8531_private *vsc8531 = phydev->priv;
static const u32 ltc_seq_e[] = { 0, 400000, 0, 0, 0 };
static const u8 ltc_seq_a[] = { 8, 6, 5, 4, 2 };
u32 val;
@@ -1490,17 +1510,7 @@ static int __vsc8584_init_ptp(struct phy_device *phydev)
vsc85xx_ts_eth_cmp1_sig(phydev);
- vsc8531->mii_ts.rxtstamp = vsc85xx_rxtstamp;
- vsc8531->mii_ts.txtstamp = vsc85xx_txtstamp;
- vsc8531->mii_ts.hwtstamp = vsc85xx_hwtstamp;
- vsc8531->mii_ts.ts_info = vsc85xx_ts_info;
- phydev->mii_ts = &vsc8531->mii_ts;
-
- memcpy(&vsc8531->ptp->caps, &vsc85xx_clk_caps, sizeof(vsc85xx_clk_caps));
-
- vsc8531->ptp->ptp_clock = ptp_clock_register(&vsc8531->ptp->caps,
- &phydev->mdio.dev);
- return PTR_ERR_OR_ZERO(vsc8531->ptp->ptp_clock);
+ return 0;
}
void vsc8584_config_ts_intr(struct phy_device *phydev)
@@ -1527,6 +1537,17 @@ int vsc8584_ptp_init(struct phy_device *phydev)
return 0;
}
+void vsc8584_ptp_deinit(struct phy_device *phydev)
+{
+ struct vsc8531_private *vsc8531 = phydev->priv;
+
+ if (vsc8531->ptp->ptp_clock) {
+ ptp_clock_unregister(vsc8531->ptp->ptp_clock);
+ skb_queue_purge(&vsc8531->rx_skbs_list);
+ skb_queue_purge(&vsc8531->ptp->tx_queue);
+ }
+}
+
irqreturn_t vsc8584_handle_ts_interrupt(struct phy_device *phydev)
{
struct vsc8531_private *priv = phydev->priv;
@@ -1547,7 +1568,7 @@ irqreturn_t vsc8584_handle_ts_interrupt(struct phy_device *phydev)
if (rc & VSC85XX_1588_INT_FIFO_ADD) {
vsc85xx_get_tx_ts(priv->ptp);
} else if (rc & VSC85XX_1588_INT_FIFO_OVERFLOW) {
- __skb_queue_purge(&priv->ptp->tx_queue);
+ skb_queue_purge(&priv->ptp->tx_queue);
vsc85xx_ts_reset_fifo(phydev);
}
@@ -1566,6 +1587,8 @@ int vsc8584_ptp_probe(struct phy_device *phydev)
mutex_init(&vsc8531->phc_lock);
mutex_init(&vsc8531->ts_lock);
+ skb_queue_head_init(&vsc8531->rx_skbs_list);
+ skb_queue_head_init(&vsc8531->ptp->tx_queue);
/* Retrieve the shared load/save GPIO. Request it as non exclusive as
* the same GPIO can be requested by all the PHYs of the same package.
@@ -1586,7 +1609,16 @@ int vsc8584_ptp_probe(struct phy_device *phydev)
vsc8531->ptp->phydev = phydev;
- return 0;
+ vsc8531->mii_ts.rxtstamp = vsc85xx_rxtstamp;
+ vsc8531->mii_ts.txtstamp = vsc85xx_txtstamp;
+ vsc8531->mii_ts.hwtstamp = vsc85xx_hwtstamp;
+ vsc8531->mii_ts.ts_info = vsc85xx_ts_info;
+ phydev->mii_ts = &vsc8531->mii_ts;
+
+ memcpy(&vsc8531->ptp->caps, &vsc85xx_clk_caps, sizeof(vsc85xx_clk_caps));
+ vsc8531->ptp->ptp_clock = ptp_clock_register(&vsc8531->ptp->caps,
+ &phydev->mdio.dev);
+ return PTR_ERR_OR_ZERO(vsc8531->ptp->ptp_clock);
}
int vsc8584_ptp_probe_once(struct phy_device *phydev)
diff --git a/drivers/net/phy/mscc/mscc_ptp.h b/drivers/net/phy/mscc/mscc_ptp.h
index da3465360e90..ae9ad925bfa8 100644
--- a/drivers/net/phy/mscc/mscc_ptp.h
+++ b/drivers/net/phy/mscc/mscc_ptp.h
@@ -98,6 +98,7 @@
#define MSCC_ANA_ETH1_FLOW_ADDR_MATCH2(x) (MSCC_ANA_ETH1_FLOW_ENA(x) + 3)
#define ANA_ETH1_FLOW_ADDR_MATCH2_MASK_MASK GENMASK(22, 20)
#define ANA_ETH1_FLOW_ADDR_MATCH2_ANY_MULTICAST 0x400000
+#define ANA_ETH1_FLOW_ADDR_MATCH2_ANY_UNICAST 0x200000
#define ANA_ETH1_FLOW_ADDR_MATCH2_FULL_ADDR 0x100000
#define ANA_ETH1_FLOW_ADDR_MATCH2_SRC_DEST_MASK GENMASK(17, 16)
#define ANA_ETH1_FLOW_ADDR_MATCH2_SRC_DEST 0x020000
diff --git a/drivers/net/phy/mxl-86110.c b/drivers/net/phy/mxl-86110.c
index ff2a3a22bd5b..e5d137a37a1d 100644
--- a/drivers/net/phy/mxl-86110.c
+++ b/drivers/net/phy/mxl-86110.c
@@ -15,6 +15,7 @@
/* PHY ID */
#define PHY_ID_MXL86110 0xc1335580
+#define PHY_ID_MXL86111 0xc1335588
/* required to access extended registers */
#define MXL86110_EXTD_REG_ADDR_OFFSET 0x1E
@@ -22,7 +23,15 @@
#define PHY_IRQ_ENABLE_REG 0x12
#define PHY_IRQ_ENABLE_REG_WOL BIT(6)
-/* SyncE Configuration Register - COM_EXT SYNCE_CFG */
+/* different pages for EXTD access for MXL86111 */
+/* SerDes/PHY Control Access Register - COM_EXT_SMI_SDS_PHY */
+#define MXL86111_EXT_SMI_SDS_PHY_REG 0xA000
+#define MXL86111_EXT_SMI_SDS_PHYSPACE_MASK BIT(1)
+#define MXL86111_EXT_SMI_SDS_PHYFIBER_SPACE (0x1 << 1)
+#define MXL86111_EXT_SMI_SDS_PHYUTP_SPACE (0x0 << 1)
+#define MXL86111_EXT_SMI_SDS_PHY_AUTO 0xff
+
+/* SyncE Configuration Register - COM_EXT_SYNCE_CFG */
#define MXL86110_EXT_SYNCE_CFG_REG 0xA012
#define MXL86110_EXT_SYNCE_CFG_CLK_FRE_SEL BIT(4)
#define MXL86110_EXT_SYNCE_CFG_EN_SYNC_E_DURING_LNKDN BIT(5)
@@ -71,6 +80,11 @@
#define MXL86110_MAX_LEDS 3
/* LED registers and defines */
+#define MXL86110_COM_EXT_LED_GEN_CFG 0xA00B
+# define MXL86110_COM_EXT_LED_GEN_CFG_LFM(x) ((BIT(0) | BIT(1)) << (3 * (x)))
+# define MXL86110_COM_EXT_LED_GEN_CFG_LFME(x) (BIT(0) << (3 * (x)))
+# define MXL86110_COM_EXT_LED_GEN_CFG_LFE(x) (BIT(2) << (3 * (x)))
+
#define MXL86110_LED0_CFG_REG 0xA00C
#define MXL86110_LED1_CFG_REG 0xA00D
#define MXL86110_LED2_CFG_REG 0xA00E
@@ -110,9 +124,67 @@
/* Chip Configuration Register - COM_EXT_CHIP_CFG */
#define MXL86110_EXT_CHIP_CFG_REG 0xA001
+#define MXL86111_EXT_CHIP_CFG_MODE_SEL_MASK GENMASK(2, 0)
+#define MXL86111_EXT_CHIP_CFG_MODE_UTP_TO_RGMII 0
+#define MXL86111_EXT_CHIP_CFG_MODE_FIBER_TO_RGMII 1
+#define MXL86111_EXT_CHIP_CFG_MODE_UTP_FIBER_TO_RGMII 2
+#define MXL86111_EXT_CHIP_CFG_MODE_UTP_TO_SGMII 3
+#define MXL86111_EXT_CHIP_CFG_MODE_SGPHY_TO_RGMAC 4
+#define MXL86111_EXT_CHIP_CFG_MODE_SGMAC_TO_RGPHY 5
+#define MXL86111_EXT_CHIP_CFG_MODE_UTP_TO_FIBER_AUTO 6
+#define MXL86111_EXT_CHIP_CFG_MODE_UTP_TO_FIBER_FORCE 7
+
+#define MXL86111_EXT_CHIP_CFG_CLDO_MASK GENMASK(5, 4)
+#define MXL86111_EXT_CHIP_CFG_CLDO_3V3 0
+#define MXL86111_EXT_CHIP_CFG_CLDO_2V5 1
+#define MXL86111_EXT_CHIP_CFG_CLDO_1V8_2 2
+#define MXL86111_EXT_CHIP_CFG_CLDO_1V8_3 3
+#define MXL86111_EXT_CHIP_CFG_CLDO_SHIFT 4
+#define MXL86111_EXT_CHIP_CFG_ELDO BIT(6)
#define MXL86110_EXT_CHIP_CFG_RXDLY_ENABLE BIT(8)
#define MXL86110_EXT_CHIP_CFG_SW_RST_N_MODE BIT(15)
+/* Specific Status Register - PHY_STAT */
+#define MXL86111_PHY_STAT_REG 0x11
+#define MXL86111_PHY_STAT_SPEED_MASK GENMASK(15, 14)
+#define MXL86111_PHY_STAT_SPEED_OFFSET 14
+#define MXL86111_PHY_STAT_SPEED_10M 0x0
+#define MXL86111_PHY_STAT_SPEED_100M 0x1
+#define MXL86111_PHY_STAT_SPEED_1000M 0x2
+#define MXL86111_PHY_STAT_DPX_OFFSET 13
+#define MXL86111_PHY_STAT_DPX BIT(13)
+#define MXL86111_PHY_STAT_LSRT BIT(10)
+
+/* 3 phy reg page modes,auto mode combines utp and fiber mode*/
+#define MXL86111_MODE_FIBER 0x1
+#define MXL86111_MODE_UTP 0x2
+#define MXL86111_MODE_AUTO 0x3
+
+/* FIBER Auto-Negotiation link partner ability - SDS_AN_LPA */
+#define MXL86111_SDS_AN_LPA_PAUSE (0x3 << 7)
+#define MXL86111_SDS_AN_LPA_ASYM_PAUSE (0x2 << 7)
+
+/* Miscellaneous Control Register - COM_EXT _MISC_CFG */
+#define MXL86111_EXT_MISC_CONFIG_REG 0xa006
+#define MXL86111_EXT_MISC_CONFIG_FIB_SPEED_SEL BIT(0)
+#define MXL86111_EXT_MISC_CONFIG_FIB_SPEED_SEL_1000BX (0x1 << 0)
+#define MXL86111_EXT_MISC_CONFIG_FIB_SPEED_SEL_100BX (0x0 << 0)
+
+/* Phy fiber Link timer cfg2 Register - EXT_SDS_LINK_TIMER_CFG2 */
+#define MXL86111_EXT_SDS_LINK_TIMER_CFG2_REG 0xA5
+#define MXL86111_EXT_SDS_LINK_TIMER_CFG2_EN_AUTOSEN BIT(15)
+
+/* default values of PHY register, required for Dual Media mode */
+#define MII_BMSR_DEFAULT_VAL 0x7949
+#define MII_ESTATUS_DEFAULT_VAL 0x2000
+
+/* Timeout in ms for PHY SW reset check in STD_CTRL/SDS_CTRL */
+#define BMCR_RESET_TIMEOUT 500
+
+/* PL P1 requires optimized RGMII timing for 1.8V RGMII voltage
+ */
+#define MXL86111_PL_P1 0x500
+
/**
* __mxl86110_write_extended_reg() - write to a PHY's extended register
* @phydev: pointer to the PHY device structure
@@ -236,6 +308,29 @@ static int mxl86110_read_extended_reg(struct phy_device *phydev, u16 regnum)
}
/**
+ * mxl86110_modify_extended_reg() - modify bits of a PHY's extended register
+ * @phydev: pointer to the PHY device structure
+ * @regnum: register number to write
+ * @mask: bit mask of bits to clear
+ * @set: bit mask of bits to set
+ *
+ * Note: register value = (old register value & ~mask) | set.
+ *
+ * Return: 0 or negative error code
+ */
+static int mxl86110_modify_extended_reg(struct phy_device *phydev,
+ u16 regnum, u16 mask, u16 set)
+{
+ int ret;
+
+ phy_lock_mdio_bus(phydev);
+ ret = __mxl86110_modify_extended_reg(phydev, regnum, mask, set);
+ phy_unlock_mdio_bus(phydev);
+
+ return ret;
+}
+
+/**
* mxl86110_get_wol() - report if wake-on-lan is enabled
* @phydev: pointer to the phy_device
* @wol: a pointer to a &struct ethtool_wolinfo
@@ -394,6 +489,7 @@ static int mxl86110_led_hw_control_set(struct phy_device *phydev, u8 index,
unsigned long rules)
{
u16 val = 0;
+ int ret;
if (index >= MXL86110_MAX_LEDS)
return -EINVAL;
@@ -423,8 +519,43 @@ static int mxl86110_led_hw_control_set(struct phy_device *phydev, u8 index,
rules & BIT(TRIGGER_NETDEV_RX))
val |= MXL86110_LEDX_CFG_BLINK;
- return mxl86110_write_extended_reg(phydev,
+ ret = mxl86110_write_extended_reg(phydev,
MXL86110_LED0_CFG_REG + index, val);
+ if (ret)
+ return ret;
+
+ /* clear manual control bit */
+ ret = mxl86110_modify_extended_reg(phydev,
+ MXL86110_COM_EXT_LED_GEN_CFG,
+ MXL86110_COM_EXT_LED_GEN_CFG_LFE(index),
+ 0);
+
+ return ret;
+}
+
+static int mxl86110_led_brightness_set(struct phy_device *phydev,
+ u8 index, enum led_brightness value)
+{
+ u16 mask, set;
+ int ret;
+
+ if (index >= MXL86110_MAX_LEDS)
+ return -EINVAL;
+
+ /* force manual control */
+ set = MXL86110_COM_EXT_LED_GEN_CFG_LFE(index);
+ /* clear previous force mode */
+ mask = MXL86110_COM_EXT_LED_GEN_CFG_LFM(index);
+
+ /* force LED to be permanently on */
+ if (value != LED_OFF)
+ set |= MXL86110_COM_EXT_LED_GEN_CFG_LFME(index);
+
+ ret = mxl86110_modify_extended_reg(phydev,
+ MXL86110_COM_EXT_LED_GEN_CFG,
+ mask, set);
+
+ return ret;
}
/**
@@ -521,22 +652,15 @@ static int mxl86110_enable_led_activity_blink(struct phy_device *phydev)
}
/**
- * mxl86110_config_init() - initialize the PHY
+ * mxl86110_config_rgmii_delay() - configure RGMII delays
* @phydev: pointer to the phy_device
*
* Return: 0 or negative errno code
*/
-static int mxl86110_config_init(struct phy_device *phydev)
+static int mxl86110_config_rgmii_delay(struct phy_device *phydev)
{
- u16 val = 0;
int ret;
-
- phy_lock_mdio_bus(phydev);
-
- /* configure syncE / clk output */
- ret = mxl86110_synce_clk_cfg(phydev);
- if (ret < 0)
- goto out;
+ u16 val;
switch (phydev->interface) {
case PHY_INTERFACE_MODE_RGMII:
@@ -578,6 +702,31 @@ static int mxl86110_config_init(struct phy_device *phydev)
if (ret < 0)
goto out;
+out:
+ return ret;
+}
+
+/**
+ * mxl86110_config_init() - initialize the MXL86110 PHY
+ * @phydev: pointer to the phy_device
+ *
+ * Return: 0 or negative errno code
+ */
+static int mxl86110_config_init(struct phy_device *phydev)
+{
+ int ret;
+
+ phy_lock_mdio_bus(phydev);
+
+ /* configure syncE / clk output */
+ ret = mxl86110_synce_clk_cfg(phydev);
+ if (ret < 0)
+ goto out;
+
+ ret = mxl86110_config_rgmii_delay(phydev);
+ if (ret < 0)
+ goto out;
+
ret = mxl86110_enable_led_activity_blink(phydev);
if (ret < 0)
goto out;
@@ -589,6 +738,201 @@ out:
return ret;
}
+/**
+ * mxl86111_probe() - validate bootstrap chip config and set UTP page
+ * @phydev: pointer to the phy_device
+ *
+ * Return: 0 or negative errno code
+ */
+static int mxl86111_probe(struct phy_device *phydev)
+{
+ int chip_config;
+ u16 reg_page;
+ int ret;
+
+ chip_config = mxl86110_read_extended_reg(phydev, MXL86110_EXT_CHIP_CFG_REG);
+ if (chip_config < 0)
+ return chip_config;
+
+ switch (chip_config & MXL86111_EXT_CHIP_CFG_MODE_SEL_MASK) {
+ case MXL86111_EXT_CHIP_CFG_MODE_UTP_TO_SGMII:
+ case MXL86111_EXT_CHIP_CFG_MODE_UTP_TO_RGMII:
+ phydev->port = PORT_TP;
+ reg_page = MXL86111_EXT_SMI_SDS_PHYUTP_SPACE;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ ret = mxl86110_write_extended_reg(phydev,
+ MXL86111_EXT_SMI_SDS_PHY_REG,
+ reg_page);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * mxl86111_config_init() - initialize the MXL86111 PHY
+ * @phydev: pointer to the phy_device
+ *
+ * Return: 0 or negative errno code
+ */
+static int mxl86111_config_init(struct phy_device *phydev)
+{
+ int ret;
+
+ phy_lock_mdio_bus(phydev);
+
+ /* configure syncE / clk output */
+ ret = mxl86110_synce_clk_cfg(phydev);
+ if (ret < 0)
+ goto out;
+
+ switch (phydev->interface) {
+ case PHY_INTERFACE_MODE_100BASEX:
+ ret = __mxl86110_modify_extended_reg(phydev,
+ MXL86111_EXT_MISC_CONFIG_REG,
+ MXL86111_EXT_MISC_CONFIG_FIB_SPEED_SEL,
+ MXL86111_EXT_MISC_CONFIG_FIB_SPEED_SEL_100BX);
+ if (ret < 0)
+ goto out;
+ break;
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_SGMII:
+ ret = __mxl86110_modify_extended_reg(phydev,
+ MXL86111_EXT_MISC_CONFIG_REG,
+ MXL86111_EXT_MISC_CONFIG_FIB_SPEED_SEL,
+ MXL86111_EXT_MISC_CONFIG_FIB_SPEED_SEL_1000BX);
+ if (ret < 0)
+ goto out;
+ break;
+ default:
+ /* RGMII modes */
+ ret = mxl86110_config_rgmii_delay(phydev);
+ if (ret < 0)
+ goto out;
+ ret = __mxl86110_modify_extended_reg(phydev, MXL86110_EXT_RGMII_CFG1_REG,
+ MXL86110_EXT_RGMII_CFG1_FULL_MASK, ret);
+
+ /* PL P1 requires optimized RGMII timing for 1.8V RGMII voltage
+ */
+ ret = __mxl86110_read_extended_reg(phydev, 0xf);
+ if (ret < 0)
+ goto out;
+
+ if (ret == MXL86111_PL_P1) {
+ ret = __mxl86110_read_extended_reg(phydev, MXL86110_EXT_CHIP_CFG_REG);
+ if (ret < 0)
+ goto out;
+
+ /* check if LDO is in 1.8V mode */
+ switch (FIELD_GET(MXL86111_EXT_CHIP_CFG_CLDO_MASK, ret)) {
+ case MXL86111_EXT_CHIP_CFG_CLDO_1V8_3:
+ case MXL86111_EXT_CHIP_CFG_CLDO_1V8_2:
+ ret = __mxl86110_write_extended_reg(phydev, 0xa010, 0xabff);
+ if (ret < 0)
+ goto out;
+ break;
+ default:
+ break;
+ }
+ }
+ break;
+ }
+
+ ret = mxl86110_enable_led_activity_blink(phydev);
+ if (ret < 0)
+ goto out;
+
+ ret = mxl86110_broadcast_cfg(phydev);
+out:
+ phy_unlock_mdio_bus(phydev);
+
+ return ret;
+}
+
+/**
+ * mxl86111_read_page() - read reg page
+ * @phydev: pointer to the phy_device
+ *
+ * Return: current reg space of mxl86111 or negative errno code
+ */
+static int mxl86111_read_page(struct phy_device *phydev)
+{
+ int page;
+
+ page = __mxl86110_read_extended_reg(phydev, MXL86111_EXT_SMI_SDS_PHY_REG);
+ if (page < 0)
+ return page;
+
+ return page & MXL86111_EXT_SMI_SDS_PHYSPACE_MASK;
+};
+
+/**
+ * mxl86111_write_page() - Set reg page
+ * @phydev: pointer to the phy_device
+ * @page: The reg page to set
+ *
+ * Return: 0 or negative errno code
+ */
+static int mxl86111_write_page(struct phy_device *phydev, int page)
+{
+ return __mxl86110_modify_extended_reg(phydev, MXL86111_EXT_SMI_SDS_PHY_REG,
+ MXL86111_EXT_SMI_SDS_PHYSPACE_MASK, page);
+};
+
+static int mxl86111_config_inband(struct phy_device *phydev, unsigned int modes)
+{
+ int ret;
+
+ ret = phy_modify_paged(phydev, MXL86111_EXT_SMI_SDS_PHYFIBER_SPACE,
+ MII_BMCR, BMCR_ANENABLE,
+ (modes == LINK_INBAND_DISABLE) ? 0 : BMCR_ANENABLE);
+ if (ret < 0)
+ goto out;
+
+ phy_lock_mdio_bus(phydev);
+
+ ret = __mxl86110_modify_extended_reg(phydev, MXL86111_EXT_SDS_LINK_TIMER_CFG2_REG,
+ MXL86111_EXT_SDS_LINK_TIMER_CFG2_EN_AUTOSEN,
+ (modes == LINK_INBAND_DISABLE) ? 0 :
+ MXL86111_EXT_SDS_LINK_TIMER_CFG2_EN_AUTOSEN);
+ if (ret < 0)
+ goto out;
+
+ ret = __mxl86110_modify_extended_reg(phydev, MXL86110_EXT_CHIP_CFG_REG,
+ MXL86110_EXT_CHIP_CFG_SW_RST_N_MODE, 0);
+ if (ret < 0)
+ goto out;
+
+ /* For fiber forced mode, power down/up to re-aneg */
+ if (modes != LINK_INBAND_DISABLE) {
+ __phy_modify(phydev, MII_BMCR, 0, BMCR_PDOWN);
+ usleep_range(1000, 1050);
+ __phy_modify(phydev, MII_BMCR, BMCR_PDOWN, 0);
+ }
+
+out:
+ phy_unlock_mdio_bus(phydev);
+
+ return ret;
+}
+
+static unsigned int mxl86111_inband_caps(struct phy_device *phydev,
+ phy_interface_t interface)
+{
+ switch (interface) {
+ case PHY_INTERFACE_MODE_100BASEX:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_SGMII:
+ return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE;
+ default:
+ return 0;
+ }
+}
+
static struct phy_driver mxl_phy_drvs[] = {
{
PHY_ID_MATCH_EXACT(PHY_ID_MXL86110),
@@ -596,9 +940,26 @@ static struct phy_driver mxl_phy_drvs[] = {
.config_init = mxl86110_config_init,
.get_wol = mxl86110_get_wol,
.set_wol = mxl86110_set_wol,
+ .led_brightness_set = mxl86110_led_brightness_set,
+ .led_hw_is_supported = mxl86110_led_hw_is_supported,
+ .led_hw_control_get = mxl86110_led_hw_control_get,
+ .led_hw_control_set = mxl86110_led_hw_control_set,
+ },
+ {
+ PHY_ID_MATCH_EXACT(PHY_ID_MXL86111),
+ .name = "MXL86111 Gigabit Ethernet",
+ .probe = mxl86111_probe,
+ .config_init = mxl86111_config_init,
+ .get_wol = mxl86110_get_wol,
+ .set_wol = mxl86110_set_wol,
+ .inband_caps = mxl86111_inband_caps,
+ .config_inband = mxl86111_config_inband,
+ .read_page = mxl86111_read_page,
+ .write_page = mxl86111_write_page,
+ .led_brightness_set = mxl86110_led_brightness_set,
.led_hw_is_supported = mxl86110_led_hw_is_supported,
- .led_hw_control_get = mxl86110_led_hw_control_get,
- .led_hw_control_set = mxl86110_led_hw_control_set,
+ .led_hw_control_get = mxl86110_led_hw_control_get,
+ .led_hw_control_set = mxl86110_led_hw_control_set,
},
};
@@ -606,11 +967,12 @@ module_phy_driver(mxl_phy_drvs);
static const struct mdio_device_id __maybe_unused mxl_tbl[] = {
{ PHY_ID_MATCH_EXACT(PHY_ID_MXL86110) },
+ { PHY_ID_MATCH_EXACT(PHY_ID_MXL86111) },
{ }
};
MODULE_DEVICE_TABLE(mdio, mxl_tbl);
-MODULE_DESCRIPTION("MaxLinear MXL86110 PHY driver");
+MODULE_DESCRIPTION("MaxLinear MXL86110/MXL86111 PHY driver");
MODULE_AUTHOR("Stefano Radaelli");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/nxp-c45-tja11xx-macsec.c b/drivers/net/phy/nxp-c45-tja11xx-macsec.c
index 550ef08970f4..fc897ba79b03 100644
--- a/drivers/net/phy/nxp-c45-tja11xx-macsec.c
+++ b/drivers/net/phy/nxp-c45-tja11xx-macsec.c
@@ -926,7 +926,6 @@ static int nxp_c45_mdo_dev_open(struct macsec_context *ctx)
struct phy_device *phydev = ctx->phydev;
struct nxp_c45_phy *priv = phydev->priv;
struct nxp_c45_secy *phy_secy;
- int any_bit_set;
phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
if (IS_ERR(phy_secy))
@@ -939,8 +938,7 @@ static int nxp_c45_mdo_dev_open(struct macsec_context *ctx)
if (phy_secy->rx_sc)
nxp_c45_rx_sc_en(phydev, phy_secy->rx_sc, true);
- any_bit_set = find_first_bit(priv->macsec->secy_bitmap, TX_SC_MAX);
- if (any_bit_set == TX_SC_MAX)
+ if (bitmap_empty(priv->macsec->secy_bitmap, TX_SC_MAX))
nxp_c45_macsec_en(phydev, true);
set_bit(phy_secy->secy_id, priv->macsec->secy_bitmap);
@@ -953,7 +951,6 @@ static int nxp_c45_mdo_dev_stop(struct macsec_context *ctx)
struct phy_device *phydev = ctx->phydev;
struct nxp_c45_phy *priv = phydev->priv;
struct nxp_c45_secy *phy_secy;
- int any_bit_set;
phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
if (IS_ERR(phy_secy))
@@ -967,8 +964,7 @@ static int nxp_c45_mdo_dev_stop(struct macsec_context *ctx)
nxp_c45_set_rx_sc0_impl(phydev, false);
clear_bit(phy_secy->secy_id, priv->macsec->secy_bitmap);
- any_bit_set = find_first_bit(priv->macsec->secy_bitmap, TX_SC_MAX);
- if (any_bit_set == TX_SC_MAX)
+ if (bitmap_empty(priv->macsec->secy_bitmap, TX_SC_MAX))
nxp_c45_macsec_en(phydev, false);
return 0;
diff --git a/drivers/net/phy/nxp-c45-tja11xx.c b/drivers/net/phy/nxp-c45-tja11xx.c
index 4c6d905f0a9f..87adb6508017 100644
--- a/drivers/net/phy/nxp-c45-tja11xx.c
+++ b/drivers/net/phy/nxp-c45-tja11xx.c
@@ -1965,24 +1965,27 @@ static int nxp_c45_macsec_ability(struct phy_device *phydev)
return macsec_ability;
}
+static bool tja11xx_phy_id_compare(struct phy_device *phydev,
+ const struct phy_driver *phydrv)
+{
+ u32 id = phydev->is_c45 ? phydev->c45_ids.device_ids[MDIO_MMD_PMAPMD] :
+ phydev->phy_id;
+
+ return phy_id_compare(id, phydrv->phy_id, phydrv->phy_id_mask);
+}
+
static int tja11xx_no_macsec_match_phy_device(struct phy_device *phydev,
const struct phy_driver *phydrv)
{
- if (!phy_id_compare(phydev->phy_id, phydrv->phy_id,
- phydrv->phy_id_mask))
- return 0;
-
- return !nxp_c45_macsec_ability(phydev);
+ return tja11xx_phy_id_compare(phydev, phydrv) &&
+ !nxp_c45_macsec_ability(phydev);
}
static int tja11xx_macsec_match_phy_device(struct phy_device *phydev,
const struct phy_driver *phydrv)
{
- if (!phy_id_compare(phydev->phy_id, phydrv->phy_id,
- phydrv->phy_id_mask))
- return 0;
-
- return nxp_c45_macsec_ability(phydev);
+ return tja11xx_phy_id_compare(phydev, phydrv) &&
+ nxp_c45_macsec_ability(phydev);
}
static const struct nxp_c45_regmap tja1120_regmap = {
diff --git a/drivers/net/phy/phy-caps.h b/drivers/net/phy/phy-caps.h
index 157759966650..b7f0c6a3037a 100644
--- a/drivers/net/phy/phy-caps.h
+++ b/drivers/net/phy/phy-caps.h
@@ -41,7 +41,7 @@ struct link_capabilities {
__ETHTOOL_DECLARE_LINK_MODE_MASK(linkmodes);
};
-int phy_caps_init(void);
+int __init phy_caps_init(void);
size_t phy_caps_speeds(unsigned int *speeds, size_t size,
unsigned long *linkmodes);
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 13df28445f02..02da4a203ddd 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -1065,23 +1065,19 @@ EXPORT_SYMBOL_GPL(phy_inband_caps);
*/
int phy_config_inband(struct phy_device *phydev, unsigned int modes)
{
- int err;
+ lockdep_assert_held(&phydev->lock);
if (!!(modes & LINK_INBAND_DISABLE) +
!!(modes & LINK_INBAND_ENABLE) +
!!(modes & LINK_INBAND_BYPASS) != 1)
return -EINVAL;
- mutex_lock(&phydev->lock);
if (!phydev->drv)
- err = -EIO;
+ return -EIO;
else if (!phydev->drv->config_inband)
- err = -EOPNOTSUPP;
- else
- err = phydev->drv->config_inband(phydev, modes);
- mutex_unlock(&phydev->lock);
+ return -EOPNOTSUPP;
- return err;
+ return phydev->drv->config_inband(phydev, modes);
}
EXPORT_SYMBOL(phy_config_inband);
@@ -1552,9 +1548,24 @@ static enum phy_state_work _phy_state_machine(struct phy_device *phydev)
}
break;
case PHY_HALTED:
+ if (phydev->link) {
+ if (phydev->autoneg == AUTONEG_ENABLE) {
+ phydev->speed = SPEED_UNKNOWN;
+ phydev->duplex = DUPLEX_UNKNOWN;
+ }
+ if (phydev->master_slave_state !=
+ MASTER_SLAVE_STATE_UNSUPPORTED)
+ phydev->master_slave_state =
+ MASTER_SLAVE_STATE_UNKNOWN;
+ phydev->mdix = ETH_TP_MDI_INVALID;
+ linkmode_zero(phydev->lp_advertising);
+ }
+ fallthrough;
case PHY_ERROR:
if (phydev->link) {
phydev->link = 0;
+ phydev->eee_active = false;
+ phydev->enable_tx_lpi = false;
phy_link_down(phydev);
}
state_work = PHY_STATE_WORK_SUSPEND;
diff --git a/drivers/net/phy/phy_caps.c b/drivers/net/phy/phy_caps.c
index 2cc9ee97e867..23c808b59b6f 100644
--- a/drivers/net/phy/phy_caps.c
+++ b/drivers/net/phy/phy_caps.c
@@ -70,7 +70,7 @@ static int speed_duplex_to_capa(int speed, unsigned int duplex)
* unexpected linkmode setting that requires LINK_CAPS update.
*
*/
-int phy_caps_init(void)
+int __init phy_caps_init(void)
{
const struct link_mode_info *linkmode;
int i, capa;
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 7556aa3dd7ee..7a67c900e79a 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -91,7 +91,7 @@ const int phy_basic_ports_array[3] = {
};
EXPORT_SYMBOL_GPL(phy_basic_ports_array);
-static const int phy_all_ports_features_array[7] = {
+static const int phy_all_ports_features_array[7] __initconst = {
ETHTOOL_LINK_MODE_Autoneg_BIT,
ETHTOOL_LINK_MODE_TP_BIT,
ETHTOOL_LINK_MODE_MII_BIT,
@@ -101,30 +101,30 @@ static const int phy_all_ports_features_array[7] = {
ETHTOOL_LINK_MODE_Backplane_BIT,
};
-static const int phy_10_100_features_array[4] = {
+static const int phy_10_100_features_array[4] __initconst = {
ETHTOOL_LINK_MODE_10baseT_Half_BIT,
ETHTOOL_LINK_MODE_10baseT_Full_BIT,
ETHTOOL_LINK_MODE_100baseT_Half_BIT,
ETHTOOL_LINK_MODE_100baseT_Full_BIT,
};
-static const int phy_basic_t1_features_array[3] = {
+static const int phy_basic_t1_features_array[3] __initconst = {
ETHTOOL_LINK_MODE_TP_BIT,
ETHTOOL_LINK_MODE_10baseT1L_Full_BIT,
ETHTOOL_LINK_MODE_100baseT1_Full_BIT,
};
-static const int phy_basic_t1s_p2mp_features_array[2] = {
+static const int phy_basic_t1s_p2mp_features_array[2] __initconst = {
ETHTOOL_LINK_MODE_TP_BIT,
ETHTOOL_LINK_MODE_10baseT1S_P2MP_Half_BIT,
};
-static const int phy_gbit_features_array[2] = {
+static const int phy_gbit_features_array[2] __initconst = {
ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
};
-static const int phy_eee_cap1_features_array[] = {
+static const int phy_eee_cap1_features_array[] __initconst = {
ETHTOOL_LINK_MODE_100baseT_Full_BIT,
ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
@@ -136,7 +136,7 @@ static const int phy_eee_cap1_features_array[] = {
__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_eee_cap1_features) __ro_after_init;
EXPORT_SYMBOL_GPL(phy_eee_cap1_features);
-static const int phy_eee_cap2_features_array[] = {
+static const int phy_eee_cap2_features_array[] __initconst = {
ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
};
@@ -144,7 +144,7 @@ static const int phy_eee_cap2_features_array[] = {
__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_eee_cap2_features) __ro_after_init;
EXPORT_SYMBOL_GPL(phy_eee_cap2_features);
-static void features_init(void)
+static void __init features_init(void)
{
/* 10/100 half/full*/
linkmode_set_bit_array(phy_basic_ports_array,
@@ -287,8 +287,7 @@ static bool phy_uses_state_machine(struct phy_device *phydev)
if (phydev->phy_link_change == phy_link_change)
return phydev->attached_dev && phydev->adjust_link;
- /* phydev->phy_link_change is implicitly phylink_phy_change() */
- return true;
+ return !!phydev->phy_link_change;
}
static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
@@ -1864,6 +1863,8 @@ void phy_detach(struct phy_device *phydev)
phydev->attached_dev = NULL;
phy_link_topo_del_phy(dev, phydev);
}
+
+ phydev->phy_link_change = NULL;
phydev->phylink = NULL;
if (!phydev->is_on_sfp_module)
@@ -3543,7 +3544,8 @@ static int phy_remove(struct device *dev)
* @new_driver: new phy_driver to register
* @owner: module owning this PHY
*/
-int phy_driver_register(struct phy_driver *new_driver, struct module *owner)
+static int phy_driver_register(struct phy_driver *new_driver,
+ struct module *owner)
{
int retval;
@@ -3586,7 +3588,11 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner)
return 0;
}
-EXPORT_SYMBOL(phy_driver_register);
+
+static void phy_driver_unregister(struct phy_driver *drv)
+{
+ driver_unregister(&drv->mdiodrv.driver);
+}
int phy_drivers_register(struct phy_driver *new_driver, int n,
struct module *owner)
@@ -3605,12 +3611,6 @@ int phy_drivers_register(struct phy_driver *new_driver, int n,
}
EXPORT_SYMBOL(phy_drivers_register);
-void phy_driver_unregister(struct phy_driver *drv)
-{
- driver_unregister(&drv->mdiodrv.driver);
-}
-EXPORT_SYMBOL(phy_driver_unregister);
-
void phy_drivers_unregister(struct phy_driver *drv, int n)
{
int i;
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index c7f867b361dd..9d7799ea1c17 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -67,6 +67,8 @@ struct phylink {
struct timer_list link_poll;
struct mutex state_mutex;
+ /* Serialize updates to pl->phydev with phylink_resolve() */
+ struct mutex phydev_mutex;
struct phylink_link_state phy_state;
unsigned int phy_ib_mode;
struct work_struct resolve;
@@ -700,6 +702,9 @@ static int phylink_parse_fixedlink(struct phylink *pl,
return -EINVAL;
}
+ phylink_warn(pl, "%pfw uses deprecated array-style fixed-link binding!\n",
+ fwnode);
+
ret = fwnode_property_read_u32_array(fwnode, "fixed-link",
prop, ARRAY_SIZE(prop));
if (!ret) {
@@ -1016,6 +1021,42 @@ static void phylink_pcs_an_restart(struct phylink *pl)
pl->pcs->ops->pcs_an_restart(pl->pcs);
}
+enum inband_type {
+ INBAND_NONE,
+ INBAND_CISCO_SGMII,
+ INBAND_BASEX,
+};
+
+static enum inband_type phylink_get_inband_type(phy_interface_t interface)
+{
+ switch (interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ case PHY_INTERFACE_MODE_QUSGMII:
+ case PHY_INTERFACE_MODE_USXGMII:
+ case PHY_INTERFACE_MODE_10G_QXGMII:
+ /* These protocols are designed for use with a PHY which
+ * communicates its negotiation result back to the MAC via
+ * inband communication. Note: there exist PHYs that run
+ * with SGMII but do not send the inband data.
+ */
+ return INBAND_CISCO_SGMII;
+
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ /* 1000base-X is designed for use media-side for Fibre
+ * connections, and thus the Autoneg bit needs to be
+ * taken into account. We also do this for 2500base-X
+ * as well, but drivers may not support this, so may
+ * need to override this.
+ */
+ return INBAND_BASEX;
+
+ default:
+ return INBAND_NONE;
+ }
+}
+
/**
* phylink_pcs_neg_mode() - helper to determine PCS inband mode
* @pl: a pointer to a &struct phylink returned from phylink_create()
@@ -1043,46 +1084,19 @@ static void phylink_pcs_neg_mode(struct phylink *pl, struct phylink_pcs *pcs,
unsigned int pcs_ib_caps = 0;
unsigned int phy_ib_caps = 0;
unsigned int neg_mode, mode;
- enum {
- INBAND_CISCO_SGMII,
- INBAND_BASEX,
- } type;
-
- mode = pl->req_link_an_mode;
-
- pl->phy_ib_mode = 0;
-
- switch (interface) {
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_QSGMII:
- case PHY_INTERFACE_MODE_QUSGMII:
- case PHY_INTERFACE_MODE_USXGMII:
- case PHY_INTERFACE_MODE_10G_QXGMII:
- /* These protocols are designed for use with a PHY which
- * communicates its negotiation result back to the MAC via
- * inband communication. Note: there exist PHYs that run
- * with SGMII but do not send the inband data.
- */
- type = INBAND_CISCO_SGMII;
- break;
-
- case PHY_INTERFACE_MODE_1000BASEX:
- case PHY_INTERFACE_MODE_2500BASEX:
- /* 1000base-X is designed for use media-side for Fibre
- * connections, and thus the Autoneg bit needs to be
- * taken into account. We also do this for 2500base-X
- * as well, but drivers may not support this, so may
- * need to override this.
- */
- type = INBAND_BASEX;
- break;
+ enum inband_type type;
- default:
+ type = phylink_get_inband_type(interface);
+ if (type == INBAND_NONE) {
pl->pcs_neg_mode = PHYLINK_PCS_NEG_NONE;
- pl->act_link_an_mode = mode;
+ pl->act_link_an_mode = pl->req_link_an_mode;
return;
}
+ mode = pl->req_link_an_mode;
+
+ pl->phy_ib_mode = 0;
+
if (pcs)
pcs_ib_caps = phylink_pcs_inband_caps(pcs, interface);
@@ -1423,6 +1437,7 @@ static void phylink_get_fixed_state(struct phylink *pl,
static void phylink_mac_initial_config(struct phylink *pl, bool force_restart)
{
struct phylink_link_state link_state;
+ struct phy_device *phy = pl->phydev;
switch (pl->req_link_an_mode) {
case MLO_AN_PHY:
@@ -1446,7 +1461,11 @@ static void phylink_mac_initial_config(struct phylink *pl, bool force_restart)
link_state.link = false;
phylink_apply_manual_flow(pl, &link_state);
+ if (phy)
+ mutex_lock(&phy->lock);
phylink_major_config(pl, force_restart, &link_state);
+ if (phy)
+ mutex_unlock(&phy->lock);
}
static const char *phylink_pause_to_str(int pause)
@@ -1582,8 +1601,13 @@ static void phylink_resolve(struct work_struct *w)
struct phylink_link_state link_state;
bool mac_config = false;
bool retrigger = false;
+ struct phy_device *phy;
bool cur_link_state;
+ mutex_lock(&pl->phydev_mutex);
+ phy = pl->phydev;
+ if (phy)
+ mutex_lock(&phy->lock);
mutex_lock(&pl->state_mutex);
cur_link_state = phylink_link_is_up(pl);
@@ -1617,11 +1641,11 @@ static void phylink_resolve(struct work_struct *w)
/* If we have a phy, the "up" state is the union of both the
* PHY and the MAC
*/
- if (pl->phydev)
+ if (phy)
link_state.link &= pl->phy_state.link;
/* Only update if the PHY link is up */
- if (pl->phydev && pl->phy_state.link) {
+ if (phy && pl->phy_state.link) {
/* If the interface has changed, force a link down
* event if the link isn't already down, and re-resolve.
*/
@@ -1685,6 +1709,9 @@ static void phylink_resolve(struct work_struct *w)
queue_work(system_power_efficient_wq, &pl->resolve);
}
mutex_unlock(&pl->state_mutex);
+ if (phy)
+ mutex_unlock(&phy->lock);
+ mutex_unlock(&pl->phydev_mutex);
}
static void phylink_run_resolve(struct phylink *pl)
@@ -1820,6 +1847,7 @@ struct phylink *phylink_create(struct phylink_config *config,
if (!pl)
return ERR_PTR(-ENOMEM);
+ mutex_init(&pl->phydev_mutex);
mutex_init(&pl->state_mutex);
INIT_WORK(&pl->resolve, phylink_resolve);
@@ -2080,6 +2108,7 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy,
dev_name(&phy->mdio.dev), phy->drv->name, irq_str);
kfree(irq_str);
+ mutex_lock(&pl->phydev_mutex);
mutex_lock(&phy->lock);
mutex_lock(&pl->state_mutex);
pl->phydev = phy;
@@ -2125,6 +2154,7 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy,
mutex_unlock(&pl->state_mutex);
mutex_unlock(&phy->lock);
+ mutex_unlock(&pl->phydev_mutex);
phylink_dbg(pl,
"phy: %s setting supported %*pb advertising %*pb\n",
@@ -2132,9 +2162,6 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy,
__ETHTOOL_LINK_MODE_MASK_NBITS, pl->supported,
__ETHTOOL_LINK_MODE_MASK_NBITS, phy->advertising);
- if (phy_interrupt_is_valid(phy))
- phy_request_interrupt(phy);
-
if (pl->config->mac_managed_pm)
phy->mac_managed_pm = true;
@@ -2151,6 +2178,9 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy,
ret = 0;
}
+ if (ret == 0 && phy_interrupt_is_valid(phy))
+ phy_request_interrupt(phy);
+
return ret;
}
@@ -2303,6 +2333,7 @@ void phylink_disconnect_phy(struct phylink *pl)
ASSERT_RTNL();
+ mutex_lock(&pl->phydev_mutex);
phy = pl->phydev;
if (phy) {
mutex_lock(&phy->lock);
@@ -2312,8 +2343,11 @@ void phylink_disconnect_phy(struct phylink *pl)
pl->mac_tx_clk_stop = false;
mutex_unlock(&pl->state_mutex);
mutex_unlock(&phy->lock);
- flush_work(&pl->resolve);
+ }
+ mutex_unlock(&pl->phydev_mutex);
+ if (phy) {
+ flush_work(&pl->resolve);
phy_disconnect(phy);
}
}
@@ -3625,6 +3659,7 @@ static int phylink_sfp_config_optical(struct phylink *pl)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(support);
struct phylink_link_state config;
+ enum inband_type inband_type;
phy_interface_t interface;
int ret;
@@ -3671,6 +3706,23 @@ static int phylink_sfp_config_optical(struct phylink *pl)
phylink_dbg(pl, "optical SFP: chosen %s interface\n",
phy_modes(interface));
+ inband_type = phylink_get_inband_type(interface);
+ if (inband_type == INBAND_NONE) {
+ /* If this is the sole interface, and there is no inband
+ * support, clear the advertising mask and Autoneg bit in
+ * the support mask. Otherwise, just clear the Autoneg bit
+ * in the advertising mask.
+ */
+ if (phy_interface_weight(pl->sfp_interfaces) == 1) {
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ pl->sfp_support);
+ linkmode_zero(config.advertising);
+ } else {
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ config.advertising);
+ }
+ }
+
if (!phylink_validate_pcs_inband_autoneg(pl, interface,
config.advertising)) {
phylink_err(pl, "autoneg setting not compatible with PCS");
@@ -3698,17 +3750,18 @@ static int phylink_sfp_config_optical(struct phylink *pl)
static int phylink_sfp_module_insert(void *upstream,
const struct sfp_eeprom_id *id)
{
+ const struct sfp_module_caps *caps;
struct phylink *pl = upstream;
ASSERT_RTNL();
- linkmode_zero(pl->sfp_support);
- phy_interface_zero(pl->sfp_interfaces);
- sfp_parse_support(pl->sfp_bus, id, pl->sfp_support, pl->sfp_interfaces);
- pl->sfp_port = sfp_parse_port(pl->sfp_bus, id, pl->sfp_support);
+ caps = sfp_get_module_caps(pl->sfp_bus);
+ phy_interface_copy(pl->sfp_interfaces, caps->interfaces);
+ linkmode_copy(pl->sfp_support, caps->link_modes);
+ pl->sfp_may_have_phy = caps->may_have_phy;
+ pl->sfp_port = caps->port;
/* If this module may have a PHY connecting later, defer until later */
- pl->sfp_may_have_phy = sfp_may_have_phy(pl->sfp_bus, id);
if (pl->sfp_may_have_phy)
return 0;
diff --git a/drivers/net/phy/qcom/at803x.c b/drivers/net/phy/qcom/at803x.c
index 51a132242462..338acd11a9b6 100644
--- a/drivers/net/phy/qcom/at803x.c
+++ b/drivers/net/phy/qcom/at803x.c
@@ -771,10 +771,10 @@ static int at8031_register_regulators(struct phy_device *phydev)
static int at8031_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
{
- struct phy_device *phydev = upstream;
__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_support);
__ETHTOOL_DECLARE_LINK_MODE_MASK(sfp_support);
- DECLARE_PHY_INTERFACE_MASK(interfaces);
+ struct phy_device *phydev = upstream;
+ const struct sfp_module_caps *caps;
phy_interface_t iface;
linkmode_zero(phy_support);
@@ -784,12 +784,11 @@ static int at8031_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
phylink_set(phy_support, Pause);
phylink_set(phy_support, Asym_Pause);
- linkmode_zero(sfp_support);
- sfp_parse_support(phydev->sfp_bus, id, sfp_support, interfaces);
+ caps = sfp_get_module_caps(phydev->sfp_bus);
/* Some modules support 10G modes as well as others we support.
* Mask out non-supported modes so the correct interface is picked.
*/
- linkmode_and(sfp_support, phy_support, sfp_support);
+ linkmode_and(sfp_support, phy_support, caps->link_modes);
if (linkmode_empty(sfp_support)) {
dev_err(&phydev->mdio.dev, "incompatible SFP module inserted\n");
diff --git a/drivers/net/phy/qcom/qca807x.c b/drivers/net/phy/qcom/qca807x.c
index 04e84ebb646c..1be8295a95cb 100644
--- a/drivers/net/phy/qcom/qca807x.c
+++ b/drivers/net/phy/qcom/qca807x.c
@@ -427,7 +427,7 @@ static int qca807x_gpio(struct phy_device *phydev)
gc->get_direction = qca807x_gpio_get_direction;
gc->direction_output = qca807x_gpio_dir_out;
gc->get = qca807x_gpio_get;
- gc->set_rv = qca807x_gpio_set;
+ gc->set = qca807x_gpio_set;
return devm_gpiochip_add_data(dev, gc, priv);
}
@@ -646,13 +646,12 @@ exit:
static int qca807x_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
{
struct phy_device *phydev = upstream;
- __ETHTOOL_DECLARE_LINK_MODE_MASK(support) = { 0, };
+ const struct sfp_module_caps *caps;
phy_interface_t iface;
int ret;
- DECLARE_PHY_INTERFACE_MASK(interfaces);
- sfp_parse_support(phydev->sfp_bus, id, support, interfaces);
- iface = sfp_select_interface(phydev->sfp_bus, support);
+ caps = sfp_get_module_caps(phydev->sfp_bus);
+ iface = sfp_select_interface(phydev->sfp_bus, caps->link_modes);
dev_info(&phydev->mdio.dev, "%s SFP module inserted\n", phy_modes(iface));
diff --git a/drivers/net/phy/realtek/realtek_main.c b/drivers/net/phy/realtek/realtek_main.c
index dd0d675149ad..82d8e1335215 100644
--- a/drivers/net/phy/realtek/realtek_main.c
+++ b/drivers/net/phy/realtek/realtek_main.c
@@ -10,6 +10,7 @@
#include <linux/bitops.h>
#include <linux/of.h>
#include <linux/phy.h>
+#include <linux/pm_wakeirq.h>
#include <linux/netdevice.h>
#include <linux/module.h>
#include <linux/delay.h>
@@ -31,6 +32,7 @@
#define RTL821x_INER 0x12
#define RTL8211B_INER_INIT 0x6400
#define RTL8211E_INER_LINK_STATUS BIT(10)
+#define RTL8211F_INER_PME BIT(7)
#define RTL8211F_INER_LINK_STATUS BIT(4)
#define RTL821x_INSR 0x13
@@ -96,17 +98,13 @@
#define RTL8211F_RXCR 0x15
#define RTL8211F_RX_DELAY BIT(3)
-/* RTL8211F WOL interrupt configuration */
-#define RTL8211F_INTBCR_PAGE 0xd40
-#define RTL8211F_INTBCR 0x16
-#define RTL8211F_INTBCR_INTB_PMEB BIT(5)
-
/* RTL8211F WOL settings */
-#define RTL8211F_WOL_SETTINGS_PAGE 0xd8a
+#define RTL8211F_WOL_PAGE 0xd8a
#define RTL8211F_WOL_SETTINGS_EVENTS 16
#define RTL8211F_WOL_EVENT_MAGIC BIT(12)
-#define RTL8211F_WOL_SETTINGS_STATUS 17
-#define RTL8211F_WOL_STATUS_RESET (BIT(15) | 0x1fff)
+#define RTL8211F_WOL_RST_RMSQ 17
+#define RTL8211F_WOL_RG_RSTB BIT(15)
+#define RTL8211F_WOL_RMSQ 0x1fff
/* RTL8211F Unique phyiscal and multicast address (WOL) */
#define RTL8211F_PHYSICAL_ADDR_PAGE 0xd8c
@@ -172,7 +170,8 @@ struct rtl821x_priv {
u16 phycr2;
bool has_phycr2;
struct clk *clk;
- u32 saved_wolopts;
+ /* rtl8211f */
+ u16 iner;
};
static int rtl821x_read_page(struct phy_device *phydev)
@@ -255,6 +254,34 @@ static int rtl821x_probe(struct phy_device *phydev)
return 0;
}
+static int rtl8211f_probe(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ int ret;
+
+ ret = rtl821x_probe(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* Disable all PME events */
+ ret = phy_write_paged(phydev, RTL8211F_WOL_PAGE,
+ RTL8211F_WOL_SETTINGS_EVENTS, 0);
+ if (ret < 0)
+ return ret;
+
+ /* Mark this PHY as wakeup capable and register the interrupt as a
+ * wakeup IRQ if the PHY is marked as a wakeup source in firmware,
+ * and the interrupt is valid.
+ */
+ if (device_property_read_bool(dev, "wakeup-source") &&
+ phy_interrupt_is_valid(phydev)) {
+ device_set_wakeup_capable(dev, true);
+ devm_pm_set_wake_irq(dev, phydev->irq);
+ }
+
+ return ret;
+}
+
static int rtl8201_ack_interrupt(struct phy_device *phydev)
{
int err;
@@ -352,6 +379,7 @@ static int rtl8211e_config_intr(struct phy_device *phydev)
static int rtl8211f_config_intr(struct phy_device *phydev)
{
+ struct rtl821x_priv *priv = phydev->priv;
u16 val;
int err;
@@ -362,8 +390,10 @@ static int rtl8211f_config_intr(struct phy_device *phydev)
val = RTL8211F_INER_LINK_STATUS;
err = phy_write_paged(phydev, 0xa42, RTL821x_INER, val);
+ if (err == 0)
+ priv->iner = val;
} else {
- val = 0;
+ priv->iner = val = 0;
err = phy_write_paged(phydev, 0xa42, RTL821x_INER, val);
if (err)
return err;
@@ -426,21 +456,34 @@ static irqreturn_t rtl8211f_handle_interrupt(struct phy_device *phydev)
return IRQ_NONE;
}
- if (!(irq_status & RTL8211F_INER_LINK_STATUS))
- return IRQ_NONE;
+ if (irq_status & RTL8211F_INER_LINK_STATUS) {
+ phy_trigger_machine(phydev);
+ return IRQ_HANDLED;
+ }
- phy_trigger_machine(phydev);
+ if (irq_status & RTL8211F_INER_PME) {
+ pm_wakeup_event(&phydev->mdio.dev, 0);
+ return IRQ_HANDLED;
+ }
- return IRQ_HANDLED;
+ return IRQ_NONE;
}
static void rtl8211f_get_wol(struct phy_device *dev, struct ethtool_wolinfo *wol)
{
int wol_events;
+ /* If the PHY is not capable of waking the system, then WoL can not
+ * be supported.
+ */
+ if (!device_can_wakeup(&dev->mdio.dev)) {
+ wol->supported = 0;
+ return;
+ }
+
wol->supported = WAKE_MAGIC;
- wol_events = phy_read_paged(dev, RTL8211F_WOL_SETTINGS_PAGE, RTL8211F_WOL_SETTINGS_EVENTS);
+ wol_events = phy_read_paged(dev, RTL8211F_WOL_PAGE, RTL8211F_WOL_SETTINGS_EVENTS);
if (wol_events < 0)
return;
@@ -453,6 +496,9 @@ static int rtl8211f_set_wol(struct phy_device *dev, struct ethtool_wolinfo *wol)
const u8 *mac_addr = dev->attached_dev->dev_addr;
int oldpage;
+ if (!device_can_wakeup(&dev->mdio.dev))
+ return -EOPNOTSUPP;
+
oldpage = phy_save_page(dev);
if (oldpage < 0)
goto err;
@@ -464,25 +510,23 @@ static int rtl8211f_set_wol(struct phy_device *dev, struct ethtool_wolinfo *wol)
__phy_write(dev, RTL8211F_PHYSICAL_ADDR_WORD1, mac_addr[3] << 8 | (mac_addr[2]));
__phy_write(dev, RTL8211F_PHYSICAL_ADDR_WORD2, mac_addr[5] << 8 | (mac_addr[4]));
- /* Enable magic packet matching and reset WOL status */
- rtl821x_write_page(dev, RTL8211F_WOL_SETTINGS_PAGE);
+ /* Enable magic packet matching */
+ rtl821x_write_page(dev, RTL8211F_WOL_PAGE);
__phy_write(dev, RTL8211F_WOL_SETTINGS_EVENTS, RTL8211F_WOL_EVENT_MAGIC);
- __phy_write(dev, RTL8211F_WOL_SETTINGS_STATUS, RTL8211F_WOL_STATUS_RESET);
-
- /* Enable the WOL interrupt */
- rtl821x_write_page(dev, RTL8211F_INTBCR_PAGE);
- __phy_set_bits(dev, RTL8211F_INTBCR, RTL8211F_INTBCR_INTB_PMEB);
+ /* Set the maximum packet size, and assert WoL reset */
+ __phy_write(dev, RTL8211F_WOL_RST_RMSQ, RTL8211F_WOL_RMSQ);
} else {
- /* Disable the WOL interrupt */
- rtl821x_write_page(dev, RTL8211F_INTBCR_PAGE);
- __phy_clear_bits(dev, RTL8211F_INTBCR, RTL8211F_INTBCR_INTB_PMEB);
-
- /* Disable magic packet matching and reset WOL status */
- rtl821x_write_page(dev, RTL8211F_WOL_SETTINGS_PAGE);
+ /* Disable magic packet matching */
+ rtl821x_write_page(dev, RTL8211F_WOL_PAGE);
__phy_write(dev, RTL8211F_WOL_SETTINGS_EVENTS, 0);
- __phy_write(dev, RTL8211F_WOL_SETTINGS_STATUS, RTL8211F_WOL_STATUS_RESET);
+
+ /* Place WoL in reset */
+ __phy_clear_bits(dev, RTL8211F_WOL_RST_RMSQ,
+ RTL8211F_WOL_RG_RSTB);
}
+ device_set_wakeup_enable(&dev->mdio.dev, !!(wol->wolopts & WAKE_MAGIC));
+
err:
return phy_restore_page(dev, oldpage, 0);
}
@@ -628,6 +672,52 @@ static int rtl821x_suspend(struct phy_device *phydev)
return ret;
}
+static int rtl8211f_suspend(struct phy_device *phydev)
+{
+ u16 wol_rst;
+ int ret;
+
+ ret = rtl821x_suspend(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* If a PME event is enabled, then configure the interrupt for
+ * PME events only, disabling link interrupt. We avoid switching
+ * to PMEB mode as we don't have a status bit for that.
+ */
+ if (device_may_wakeup(&phydev->mdio.dev)) {
+ ret = phy_write_paged(phydev, 0xa42, RTL821x_INER,
+ RTL8211F_INER_PME);
+ if (ret < 0)
+ goto err;
+
+ /* Read the INSR to clear any pending interrupt */
+ phy_read_paged(phydev, RTL8211F_INSR_PAGE, RTL8211F_INSR);
+
+ /* Reset the WoL to ensure that an event is picked up.
+ * Unless we do this, even if we receive another packet,
+ * we may not have a PME interrupt raised.
+ */
+ ret = phy_read_paged(phydev, RTL8211F_WOL_PAGE,
+ RTL8211F_WOL_RST_RMSQ);
+ if (ret < 0)
+ goto err;
+
+ wol_rst = ret & ~RTL8211F_WOL_RG_RSTB;
+ ret = phy_write_paged(phydev, RTL8211F_WOL_PAGE,
+ RTL8211F_WOL_RST_RMSQ, wol_rst);
+ if (ret < 0)
+ goto err;
+
+ wol_rst |= RTL8211F_WOL_RG_RSTB;
+ ret = phy_write_paged(phydev, RTL8211F_WOL_PAGE,
+ RTL8211F_WOL_RST_RMSQ, wol_rst);
+ }
+
+err:
+ return ret;
+}
+
static int rtl821x_resume(struct phy_device *phydev)
{
struct rtl821x_priv *priv = phydev->priv;
@@ -645,10 +735,29 @@ static int rtl821x_resume(struct phy_device *phydev)
return 0;
}
+static int rtl8211f_resume(struct phy_device *phydev)
+{
+ struct rtl821x_priv *priv = phydev->priv;
+ int ret;
+
+ ret = rtl821x_resume(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* If the device was programmed for a PME event, restore the interrupt
+ * enable so phylib can receive link state interrupts.
+ */
+ if (device_may_wakeup(&phydev->mdio.dev))
+ ret = phy_write_paged(phydev, 0xa42, RTL821x_INER, priv->iner);
+
+ return ret;
+}
+
static int rtl8211x_led_hw_is_supported(struct phy_device *phydev, u8 index,
unsigned long rules)
{
- const unsigned long mask = BIT(TRIGGER_NETDEV_LINK_10) |
+ const unsigned long mask = BIT(TRIGGER_NETDEV_LINK) |
+ BIT(TRIGGER_NETDEV_LINK_10) |
BIT(TRIGGER_NETDEV_LINK_100) |
BIT(TRIGGER_NETDEV_LINK_1000) |
BIT(TRIGGER_NETDEV_RX) |
@@ -706,6 +815,12 @@ static int rtl8211f_led_hw_control_get(struct phy_device *phydev, u8 index,
if (val & RTL8211F_LEDCR_LINK_1000)
__set_bit(TRIGGER_NETDEV_LINK_1000, rules);
+ if ((val & RTL8211F_LEDCR_LINK_10) &&
+ (val & RTL8211F_LEDCR_LINK_100) &&
+ (val & RTL8211F_LEDCR_LINK_1000)) {
+ __set_bit(TRIGGER_NETDEV_LINK, rules);
+ }
+
if (val & RTL8211F_LEDCR_ACT_TXRX) {
__set_bit(TRIGGER_NETDEV_RX, rules);
__set_bit(TRIGGER_NETDEV_TX, rules);
@@ -723,14 +838,20 @@ static int rtl8211f_led_hw_control_set(struct phy_device *phydev, u8 index,
if (index >= RTL8211x_LED_COUNT)
return -EINVAL;
- if (test_bit(TRIGGER_NETDEV_LINK_10, &rules))
+ if (test_bit(TRIGGER_NETDEV_LINK, &rules) ||
+ test_bit(TRIGGER_NETDEV_LINK_10, &rules)) {
reg |= RTL8211F_LEDCR_LINK_10;
+ }
- if (test_bit(TRIGGER_NETDEV_LINK_100, &rules))
+ if (test_bit(TRIGGER_NETDEV_LINK, &rules) ||
+ test_bit(TRIGGER_NETDEV_LINK_100, &rules)) {
reg |= RTL8211F_LEDCR_LINK_100;
+ }
- if (test_bit(TRIGGER_NETDEV_LINK_1000, &rules))
+ if (test_bit(TRIGGER_NETDEV_LINK, &rules) ||
+ test_bit(TRIGGER_NETDEV_LINK_1000, &rules)) {
reg |= RTL8211F_LEDCR_LINK_1000;
+ }
if (test_bit(TRIGGER_NETDEV_RX, &rules) ||
test_bit(TRIGGER_NETDEV_TX, &rules)) {
@@ -778,6 +899,12 @@ static int rtl8211e_led_hw_control_get(struct phy_device *phydev, u8 index,
if (cr2 & RTL8211E_LEDCR2_LINK_1000)
__set_bit(TRIGGER_NETDEV_LINK_1000, rules);
+ if ((cr2 & RTL8211E_LEDCR2_LINK_10) &&
+ (cr2 & RTL8211E_LEDCR2_LINK_100) &&
+ (cr2 & RTL8211E_LEDCR2_LINK_1000)) {
+ __set_bit(TRIGGER_NETDEV_LINK, rules);
+ }
+
return ret;
}
@@ -805,14 +932,20 @@ static int rtl8211e_led_hw_control_set(struct phy_device *phydev, u8 index,
if (ret < 0)
return ret;
- if (test_bit(TRIGGER_NETDEV_LINK_10, &rules))
+ if (test_bit(TRIGGER_NETDEV_LINK, &rules) ||
+ test_bit(TRIGGER_NETDEV_LINK_10, &rules)) {
cr2 |= RTL8211E_LEDCR2_LINK_10;
+ }
- if (test_bit(TRIGGER_NETDEV_LINK_100, &rules))
+ if (test_bit(TRIGGER_NETDEV_LINK, &rules) ||
+ test_bit(TRIGGER_NETDEV_LINK_100, &rules)) {
cr2 |= RTL8211E_LEDCR2_LINK_100;
+ }
- if (test_bit(TRIGGER_NETDEV_LINK_1000, &rules))
+ if (test_bit(TRIGGER_NETDEV_LINK, &rules) ||
+ test_bit(TRIGGER_NETDEV_LINK_1000, &rules)) {
cr2 |= RTL8211E_LEDCR2_LINK_1000;
+ }
cr2 <<= RTL8211E_LEDCR2_SHIFT * index;
ret = rtl821x_modify_ext_page(phydev, RTL8211E_LEDCR_EXT_PAGE,
@@ -1038,7 +1171,7 @@ static int rtl822x_probe(struct phy_device *phydev)
return 0;
}
-static int rtl822xb_config_init(struct phy_device *phydev)
+static int rtl822x_set_serdes_option_mode(struct phy_device *phydev, bool gen1)
{
bool has_2500, has_sgmii;
u16 mode;
@@ -1073,15 +1206,18 @@ static int rtl822xb_config_init(struct phy_device *phydev)
/* the following sequence with magic numbers sets up the SerDes
* option mode
*/
- ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x75f3, 0);
- if (ret < 0)
- return ret;
+
+ if (!gen1) {
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x75f3, 0);
+ if (ret < 0)
+ return ret;
+ }
ret = phy_modify_mmd_changed(phydev, MDIO_MMD_VEND1,
RTL822X_VND1_SERDES_OPTION,
RTL822X_VND1_SERDES_OPTION_MODE_MASK,
mode);
- if (ret < 0)
+ if (gen1 || ret < 0)
return ret;
ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x6a04, 0x0503);
@@ -1095,6 +1231,16 @@ static int rtl822xb_config_init(struct phy_device *phydev)
return phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x6f11, 0x8020);
}
+static int rtl822x_config_init(struct phy_device *phydev)
+{
+ return rtl822x_set_serdes_option_mode(phydev, true);
+}
+
+static int rtl822xb_config_init(struct phy_device *phydev)
+{
+ return rtl822x_set_serdes_option_mode(phydev, false);
+}
+
static int rtl822xb_get_rate_matching(struct phy_device *phydev,
phy_interface_t iface)
{
@@ -1280,6 +1426,21 @@ static int rtl822x_c45_read_status(struct phy_device *phydev)
return 0;
}
+static int rtl822x_c45_soft_reset(struct phy_device *phydev)
+{
+ int ret, val;
+
+ ret = phy_modify_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1,
+ MDIO_CTRL1_RESET, MDIO_CTRL1_RESET);
+ if (ret < 0)
+ return ret;
+
+ return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_PMAPMD,
+ MDIO_CTRL1, val,
+ !(val & MDIO_CTRL1_RESET),
+ 5000, 100000, true);
+}
+
static int rtl822xb_c45_read_status(struct phy_device *phydev)
{
int ret;
@@ -1612,15 +1773,15 @@ static struct phy_driver realtek_drvs[] = {
}, {
PHY_ID_MATCH_EXACT(0x001cc916),
.name = "RTL8211F Gigabit Ethernet",
- .probe = rtl821x_probe,
+ .probe = rtl8211f_probe,
.config_init = &rtl8211f_config_init,
.read_status = rtlgen_read_status,
.config_intr = &rtl8211f_config_intr,
.handle_interrupt = rtl8211f_handle_interrupt,
.set_wol = rtl8211f_set_wol,
.get_wol = rtl8211f_get_wol,
- .suspend = rtl821x_suspend,
- .resume = rtl821x_resume,
+ .suspend = rtl8211f_suspend,
+ .resume = rtl8211f_resume,
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
.flags = PHY_ALWAYS_CALL_SUSPEND,
@@ -1675,13 +1836,13 @@ static struct phy_driver realtek_drvs[] = {
}, {
PHY_ID_MATCH_EXACT(0x001cc838),
.name = "RTL8226-CG 2.5Gbps PHY",
- .get_features = rtl822x_get_features,
- .config_aneg = rtl822x_config_aneg,
- .read_status = rtl822x_read_status,
- .suspend = genphy_suspend,
- .resume = rtlgen_resume,
- .read_page = rtl821x_read_page,
- .write_page = rtl821x_write_page,
+ .soft_reset = rtl822x_c45_soft_reset,
+ .get_features = rtl822x_c45_get_features,
+ .config_aneg = rtl822x_c45_config_aneg,
+ .config_init = rtl822x_config_init,
+ .read_status = rtl822xb_c45_read_status,
+ .suspend = genphy_c45_pma_suspend,
+ .resume = rtlgen_c45_resume,
}, {
PHY_ID_MATCH_EXACT(0x001cc848),
.name = "RTL8226B-CG_RTL8221B-CG 2.5Gbps PHY",
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index f13c00b5b449..b945d75966d5 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -22,7 +22,6 @@ struct sfp_bus {
const struct sfp_socket_ops *socket_ops;
struct device *sfp_dev;
struct sfp *sfp;
- const struct sfp_quirk *sfp_quirk;
const struct sfp_upstream_ops *upstream_ops;
void *upstream;
@@ -30,24 +29,18 @@ struct sfp_bus {
bool registered;
bool started;
+
+ struct sfp_module_caps caps;
};
-/**
- * sfp_parse_port() - Parse the EEPROM base ID, setting the port type
- * @bus: a pointer to the &struct sfp_bus structure for the sfp module
- * @id: a pointer to the module's &struct sfp_eeprom_id
- * @support: optional pointer to an array of unsigned long for the
- * ethtool support mask
- *
- * Parse the EEPROM identification given in @id, and return one of
- * %PORT_TP, %PORT_FIBRE or %PORT_OTHER. If @support is non-%NULL,
- * also set the ethtool %ETHTOOL_LINK_MODE_xxx_BIT corresponding with
- * the connector type.
- *
- * If the port type is not known, returns %PORT_OTHER.
- */
-int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
- unsigned long *support)
+const struct sfp_module_caps *sfp_get_module_caps(struct sfp_bus *bus)
+{
+ return &bus->caps;
+}
+EXPORT_SYMBOL_GPL(sfp_get_module_caps);
+
+static void sfp_module_parse_port(struct sfp_bus *bus,
+ const struct sfp_eeprom_id *id)
{
int port;
@@ -91,34 +84,26 @@ int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
break;
}
- if (support) {
- switch (port) {
- case PORT_FIBRE:
- phylink_set(support, FIBRE);
- break;
+ switch (port) {
+ case PORT_FIBRE:
+ phylink_set(bus->caps.link_modes, FIBRE);
+ break;
- case PORT_TP:
- phylink_set(support, TP);
- break;
- }
+ case PORT_TP:
+ phylink_set(bus->caps.link_modes, TP);
+ break;
}
- return port;
+ bus->caps.port = port;
}
-EXPORT_SYMBOL_GPL(sfp_parse_port);
-/**
- * sfp_may_have_phy() - indicate whether the module may have a PHY
- * @bus: a pointer to the &struct sfp_bus structure for the sfp module
- * @id: a pointer to the module's &struct sfp_eeprom_id
- *
- * Parse the EEPROM identification given in @id, and return whether
- * this module may have a PHY.
- */
-bool sfp_may_have_phy(struct sfp_bus *bus, const struct sfp_eeprom_id *id)
+static void sfp_module_parse_may_have_phy(struct sfp_bus *bus,
+ const struct sfp_eeprom_id *id)
{
- if (id->base.e1000_base_t)
- return true;
+ if (id->base.e1000_base_t) {
+ bus->caps.may_have_phy = true;
+ return;
+ }
if (id->base.phys_id != SFF8024_ID_DWDM_SFP) {
switch (id->base.extended_cc) {
@@ -126,30 +111,20 @@ bool sfp_may_have_phy(struct sfp_bus *bus, const struct sfp_eeprom_id *id)
case SFF8024_ECC_10GBASE_T_SR:
case SFF8024_ECC_5GBASE_T:
case SFF8024_ECC_2_5GBASE_T:
- return true;
+ bus->caps.may_have_phy = true;
+ return;
}
}
- return false;
+ bus->caps.may_have_phy = false;
}
-EXPORT_SYMBOL_GPL(sfp_may_have_phy);
-/**
- * sfp_parse_support() - Parse the eeprom id for supported link modes
- * @bus: a pointer to the &struct sfp_bus structure for the sfp module
- * @id: a pointer to the module's &struct sfp_eeprom_id
- * @support: pointer to an array of unsigned long for the ethtool support mask
- * @interfaces: pointer to an array of unsigned long for phy interface modes
- * mask
- *
- * Parse the EEPROM identification information and derive the supported
- * ethtool link modes for the module.
- */
-void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
- unsigned long *support, unsigned long *interfaces)
+static void sfp_module_parse_support(struct sfp_bus *bus,
+ const struct sfp_eeprom_id *id)
{
+ unsigned long *interfaces = bus->caps.interfaces;
+ unsigned long *modes = bus->caps.link_modes;
unsigned int br_min, br_nom, br_max;
- __ETHTOOL_DECLARE_LINK_MODE_MASK(modes) = { 0, };
/* Decode the bitrate information to MBd */
br_min = br_nom = br_max = 0;
@@ -338,13 +313,21 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
phylink_set(modes, Autoneg);
phylink_set(modes, Pause);
phylink_set(modes, Asym_Pause);
+}
- if (bus->sfp_quirk && bus->sfp_quirk->modes)
- bus->sfp_quirk->modes(id, modes, interfaces);
+static void sfp_init_module(struct sfp_bus *bus,
+ const struct sfp_eeprom_id *id,
+ const struct sfp_quirk *quirk)
+{
+ memset(&bus->caps, 0, sizeof(bus->caps));
+
+ sfp_module_parse_support(bus, id);
+ sfp_module_parse_port(bus, id);
+ sfp_module_parse_may_have_phy(bus, id);
- linkmode_or(support, support, modes);
+ if (quirk && quirk->support)
+ quirk->support(id, &bus->caps);
}
-EXPORT_SYMBOL_GPL(sfp_parse_support);
/**
* sfp_select_interface() - Select appropriate phy_interface_t mode
@@ -794,7 +777,7 @@ int sfp_module_insert(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
const struct sfp_upstream_ops *ops = sfp_get_upstream_ops(bus);
int ret = 0;
- bus->sfp_quirk = quirk;
+ sfp_init_module(bus, id, quirk);
if (ops && ops->module_insert)
ret = ops->module_insert(bus->upstream, id);
@@ -809,8 +792,6 @@ void sfp_module_remove(struct sfp_bus *bus)
if (ops && ops->module_remove)
ops->module_remove(bus->upstream);
-
- bus->sfp_quirk = NULL;
}
EXPORT_SYMBOL_GPL(sfp_module_remove);
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index 5347c95d1e77..0401fa6b24d2 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -17,7 +17,6 @@
#include <linux/workqueue.h>
#include "sfp.h"
-#include "swphy.h"
enum {
GPIO_MODDEF0,
@@ -221,6 +220,8 @@ static const enum gpiod_flags gpio_flags[] = {
*/
#define SFP_EEPROM_BLOCK_SIZE 16
+#define SFP_POLL_INTERVAL msecs_to_jiffies(100)
+
struct sff_data {
unsigned int gpios;
bool (*module_supported)(const struct sfp_eeprom_id *id);
@@ -299,6 +300,11 @@ struct sfp {
#endif
};
+static void sfp_schedule_poll(struct sfp *sfp)
+{
+ mod_delayed_work(system_percpu_wq, &sfp->poll, SFP_POLL_INTERVAL);
+}
+
static bool sff_module_supported(const struct sfp_eeprom_id *id)
{
return id->base.phys_id == SFF8024_ID_SFF_8472 &&
@@ -440,45 +446,44 @@ static void sfp_fixup_rollball_cc(struct sfp *sfp)
}
static void sfp_quirk_2500basex(const struct sfp_eeprom_id *id,
- unsigned long *modes,
- unsigned long *interfaces)
+ struct sfp_module_caps *caps)
{
- linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseX_Full_BIT, modes);
- __set_bit(PHY_INTERFACE_MODE_2500BASEX, interfaces);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseX_Full_BIT,
+ caps->link_modes);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, caps->interfaces);
}
static void sfp_quirk_disable_autoneg(const struct sfp_eeprom_id *id,
- unsigned long *modes,
- unsigned long *interfaces)
+ struct sfp_module_caps *caps)
{
- linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, modes);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, caps->link_modes);
}
static void sfp_quirk_oem_2_5g(const struct sfp_eeprom_id *id,
- unsigned long *modes,
- unsigned long *interfaces)
+ struct sfp_module_caps *caps)
{
/* Copper 2.5G SFP */
- linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, modes);
- __set_bit(PHY_INTERFACE_MODE_2500BASEX, interfaces);
- sfp_quirk_disable_autoneg(id, modes, interfaces);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ caps->link_modes);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, caps->interfaces);
+ sfp_quirk_disable_autoneg(id, caps);
}
static void sfp_quirk_ubnt_uf_instant(const struct sfp_eeprom_id *id,
- unsigned long *modes,
- unsigned long *interfaces)
+ struct sfp_module_caps *caps)
{
/* Ubiquiti U-Fiber Instant module claims that support all transceiver
* types including 10G Ethernet which is not truth. So clear all claimed
* modes and set only one mode which module supports: 1000baseX_Full.
*/
- linkmode_zero(modes);
- linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, modes);
+ linkmode_zero(caps->link_modes);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+ caps->link_modes);
}
-#define SFP_QUIRK(_v, _p, _m, _f) \
- { .vendor = _v, .part = _p, .modes = _m, .fixup = _f, }
-#define SFP_QUIRK_M(_v, _p, _m) SFP_QUIRK(_v, _p, _m, NULL)
+#define SFP_QUIRK(_v, _p, _s, _f) \
+ { .vendor = _v, .part = _p, .support = _s, .fixup = _f, }
+#define SFP_QUIRK_S(_v, _p, _s) SFP_QUIRK(_v, _p, _s, NULL)
#define SFP_QUIRK_F(_v, _p, _f) SFP_QUIRK(_v, _p, NULL, _f)
static const struct sfp_quirk sfp_quirks[] = {
@@ -492,6 +497,9 @@ static const struct sfp_quirk sfp_quirks[] = {
SFP_QUIRK("ALCATELLUCENT", "3FE46541AA", sfp_quirk_2500basex,
sfp_fixup_nokia),
+ // FLYPRO SFP-10GT-CS-30M uses Rollball protocol to talk to the PHY.
+ SFP_QUIRK_F("FLYPRO", "SFP-10GT-CS-30M", sfp_fixup_rollball),
+
// Fiberstore SFP-10G-T doesn't identify as copper, uses the Rollball
// protocol to talk to the PHY and needs 4 sec wait before probing the
// PHY.
@@ -511,7 +519,7 @@ static const struct sfp_quirk sfp_quirks[] = {
// HG MXPD-483II-F 2.5G supports 2500Base-X, but incorrectly reports
// 2600MBd in their EERPOM
- SFP_QUIRK_M("HG GENUINE", "MXPD-483II", sfp_quirk_2500basex),
+ SFP_QUIRK_S("HG GENUINE", "MXPD-483II", sfp_quirk_2500basex),
// Huawei MA5671A can operate at 2500base-X, but report 1.2GBd NRZ in
// their EEPROM
@@ -520,9 +528,9 @@ static const struct sfp_quirk sfp_quirks[] = {
// Lantech 8330-262D-E can operate at 2500base-X, but incorrectly report
// 2500MBd NRZ in their EEPROM
- SFP_QUIRK_M("Lantech", "8330-262D-E", sfp_quirk_2500basex),
+ SFP_QUIRK_S("Lantech", "8330-262D-E", sfp_quirk_2500basex),
- SFP_QUIRK_M("UBNT", "UF-INSTANT", sfp_quirk_ubnt_uf_instant),
+ SFP_QUIRK_S("UBNT", "UF-INSTANT", sfp_quirk_ubnt_uf_instant),
// Walsun HXSX-ATR[CI]-1 don't identify as copper, and use the
// Rollball protocol to talk to the PHY.
@@ -535,9 +543,9 @@ static const struct sfp_quirk sfp_quirks[] = {
SFP_QUIRK_F("OEM", "SFP-GE-T", sfp_fixup_ignore_tx_fault),
SFP_QUIRK_F("OEM", "SFP-10G-T", sfp_fixup_rollball_cc),
- SFP_QUIRK_M("OEM", "SFP-2.5G-T", sfp_quirk_oem_2_5g),
- SFP_QUIRK_M("OEM", "SFP-2.5G-BX10-D", sfp_quirk_2500basex),
- SFP_QUIRK_M("OEM", "SFP-2.5G-BX10-U", sfp_quirk_2500basex),
+ SFP_QUIRK_S("OEM", "SFP-2.5G-T", sfp_quirk_oem_2_5g),
+ SFP_QUIRK_S("OEM", "SFP-2.5G-BX10-D", sfp_quirk_2500basex),
+ SFP_QUIRK_S("OEM", "SFP-2.5G-BX10-U", sfp_quirk_2500basex),
SFP_QUIRK_F("OEM", "RTSFP-10", sfp_fixup_rollball_cc),
SFP_QUIRK_F("OEM", "RTSFP-10G", sfp_fixup_rollball_cc),
SFP_QUIRK_F("Turris", "RTSFP-2.5G", sfp_fixup_rollball),
@@ -585,8 +593,6 @@ static const struct sfp_quirk *sfp_lookup_quirk(const struct sfp_eeprom_id *id)
return NULL;
}
-static unsigned long poll_jiffies;
-
static unsigned int sfp_gpio_get_state(struct sfp *sfp)
{
unsigned int i, state, v;
@@ -909,7 +915,7 @@ static void sfp_soft_start_poll(struct sfp *sfp)
if (sfp->state_soft_mask & (SFP_F_LOS | SFP_F_TX_FAULT) &&
!sfp->need_poll)
- mod_delayed_work(system_wq, &sfp->poll, poll_jiffies);
+ sfp_schedule_poll(sfp);
mutex_unlock(&sfp->st_mutex);
}
@@ -1680,7 +1686,7 @@ static void sfp_hwmon_probe(struct work_struct *work)
err = sfp_read(sfp, true, 0, &sfp->diag, sizeof(sfp->diag));
if (err < 0) {
if (sfp->hwmon_tries--) {
- mod_delayed_work(system_wq, &sfp->hwmon_probe,
+ mod_delayed_work(system_percpu_wq, &sfp->hwmon_probe,
T_PROBE_RETRY_SLOW);
} else {
dev_warn(sfp->dev, "hwmon probe failed: %pe\n",
@@ -1707,7 +1713,7 @@ static void sfp_hwmon_probe(struct work_struct *work)
static int sfp_hwmon_insert(struct sfp *sfp)
{
if (sfp->have_a2 && sfp->id.ext.diagmon & SFP_DIAGMON_DDM) {
- mod_delayed_work(system_wq, &sfp->hwmon_probe, 1);
+ mod_delayed_work(system_percpu_wq, &sfp->hwmon_probe, 1);
sfp->hwmon_tries = R_PROBE_RETRY_SLOW;
}
@@ -2561,7 +2567,7 @@ static void sfp_sm_module(struct sfp *sfp, unsigned int event)
/* Force a poll to re-read the hardware signal state after
* sfp_sm_mod_probe() changed state_hw_mask.
*/
- mod_delayed_work(system_wq, &sfp->poll, 1);
+ mod_delayed_work(system_percpu_wq, &sfp->poll, 1);
err = sfp_hwmon_insert(sfp);
if (err)
@@ -3006,7 +3012,7 @@ static void sfp_poll(struct work_struct *work)
// it's unimportant if we race while reading this.
if (sfp->state_soft_mask & (SFP_F_LOS | SFP_F_TX_FAULT) ||
sfp->need_poll)
- mod_delayed_work(system_wq, &sfp->poll, poll_jiffies);
+ sfp_schedule_poll(sfp);
}
static struct sfp *sfp_alloc(struct device *dev)
@@ -3176,7 +3182,7 @@ static int sfp_probe(struct platform_device *pdev)
}
if (sfp->need_poll)
- mod_delayed_work(system_wq, &sfp->poll, poll_jiffies);
+ sfp_schedule_poll(sfp);
/* We could have an issue in cases no Tx disable pin is available or
* wired as modules using a laser as their light source will continue to
@@ -3243,19 +3249,7 @@ static struct platform_driver sfp_driver = {
},
};
-static int sfp_init(void)
-{
- poll_jiffies = msecs_to_jiffies(100);
-
- return platform_driver_register(&sfp_driver);
-}
-module_init(sfp_init);
-
-static void sfp_exit(void)
-{
- platform_driver_unregister(&sfp_driver);
-}
-module_exit(sfp_exit);
+module_platform_driver(sfp_driver);
MODULE_ALIAS("platform:sfp");
MODULE_AUTHOR("Russell King");
diff --git a/drivers/net/phy/sfp.h b/drivers/net/phy/sfp.h
index 1fd097dccb9f..879dff7afe6a 100644
--- a/drivers/net/phy/sfp.h
+++ b/drivers/net/phy/sfp.h
@@ -9,8 +9,8 @@ struct sfp;
struct sfp_quirk {
const char *vendor;
const char *part;
- void (*modes)(const struct sfp_eeprom_id *id, unsigned long *modes,
- unsigned long *interfaces);
+ void (*support)(const struct sfp_eeprom_id *id,
+ struct sfp_module_caps *caps);
void (*fixup)(struct sfp *sfp);
};
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index b6489da5cfcd..48487149c225 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -785,6 +785,7 @@ static struct phy_driver smsc_phy_driver[] = {
/* PHY_BASIC_FEATURES */
+ .flags = PHY_RST_AFTER_CLK_EN,
.probe = smsc_phy_probe,
/* basic functions */
diff --git a/drivers/net/ppp/Kconfig b/drivers/net/ppp/Kconfig
index 8c9ed1889d1a..a1806b4b84be 100644
--- a/drivers/net/ppp/Kconfig
+++ b/drivers/net/ppp/Kconfig
@@ -85,9 +85,8 @@ config PPP_FILTER
config PPP_MPPE
tristate "PPP MPPE compression (encryption)"
depends on PPP
- select CRYPTO
- select CRYPTO_SHA1
select CRYPTO_LIB_ARC4
+ select CRYPTO_LIB_SHA1
help
Support for the MPPE Encryption protocol, as employed by the
Microsoft Point-to-Point Tunneling Protocol.
diff --git a/drivers/net/ppp/bsd_comp.c b/drivers/net/ppp/bsd_comp.c
index 55954594e157..f385b759d5cf 100644
--- a/drivers/net/ppp/bsd_comp.c
+++ b/drivers/net/ppp/bsd_comp.c
@@ -406,7 +406,7 @@ static void *bsd_alloc (unsigned char *options, int opt_len, int decomp)
* Allocate space for the dictionary. This may be more than one page in
* length.
*/
- db->dict = vmalloc(array_size(hsize, sizeof(struct bsd_dict)));
+ db->dict = vmalloc_array(hsize, sizeof(struct bsd_dict));
if (!db->dict)
{
bsd_free (db);
@@ -425,7 +425,7 @@ static void *bsd_alloc (unsigned char *options, int opt_len, int decomp)
*/
else
{
- db->lens = vmalloc(array_size(sizeof(db->lens[0]), (maxmaxcode + 1)));
+ db->lens = vmalloc_array(maxmaxcode + 1, sizeof(db->lens[0]));
if (!db->lens)
{
bsd_free (db);
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 8c98cbd4b06d..f9f0f16c41d1 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -33,6 +33,7 @@
#include <linux/ppp_channel.h>
#include <linux/ppp-comp.h>
#include <linux/skbuff.h>
+#include <linux/rculist.h>
#include <linux/rtnetlink.h>
#include <linux/if_arp.h>
#include <linux/ip.h>
@@ -178,11 +179,11 @@ struct channel {
struct ppp_channel *chan; /* public channel data structure */
struct rw_semaphore chan_sem; /* protects `chan' during chan ioctl */
spinlock_t downl; /* protects `chan', file.xq dequeue */
- struct ppp *ppp; /* ppp unit we're connected to */
+ struct ppp __rcu *ppp; /* ppp unit we're connected to */
struct net *chan_net; /* the net channel belongs to */
netns_tracker ns_tracker;
struct list_head clist; /* link in list of channels per unit */
- rwlock_t upl; /* protects `ppp' and 'bridge' */
+ spinlock_t upl; /* protects `ppp' and 'bridge' */
struct channel __rcu *bridge; /* "bridged" ppp channel */
#ifdef CONFIG_PPP_MULTILINK
u8 avail; /* flag used in multilink stuff */
@@ -644,34 +645,34 @@ static struct bpf_prog *compat_ppp_get_filter(struct sock_fprog32 __user *p)
*/
static int ppp_bridge_channels(struct channel *pch, struct channel *pchb)
{
- write_lock_bh(&pch->upl);
- if (pch->ppp ||
+ spin_lock(&pch->upl);
+ if (rcu_dereference_protected(pch->ppp, lockdep_is_held(&pch->upl)) ||
rcu_dereference_protected(pch->bridge, lockdep_is_held(&pch->upl))) {
- write_unlock_bh(&pch->upl);
+ spin_unlock(&pch->upl);
return -EALREADY;
}
refcount_inc(&pchb->file.refcnt);
rcu_assign_pointer(pch->bridge, pchb);
- write_unlock_bh(&pch->upl);
+ spin_unlock(&pch->upl);
- write_lock_bh(&pchb->upl);
- if (pchb->ppp ||
+ spin_lock(&pchb->upl);
+ if (rcu_dereference_protected(pchb->ppp, lockdep_is_held(&pchb->upl)) ||
rcu_dereference_protected(pchb->bridge, lockdep_is_held(&pchb->upl))) {
- write_unlock_bh(&pchb->upl);
+ spin_unlock(&pchb->upl);
goto err_unset;
}
refcount_inc(&pch->file.refcnt);
rcu_assign_pointer(pchb->bridge, pch);
- write_unlock_bh(&pchb->upl);
+ spin_unlock(&pchb->upl);
return 0;
err_unset:
- write_lock_bh(&pch->upl);
+ spin_lock(&pch->upl);
/* Re-read pch->bridge with upl held in case it was modified concurrently */
pchb = rcu_dereference_protected(pch->bridge, lockdep_is_held(&pch->upl));
RCU_INIT_POINTER(pch->bridge, NULL);
- write_unlock_bh(&pch->upl);
+ spin_unlock(&pch->upl);
synchronize_rcu();
if (pchb)
@@ -685,25 +686,25 @@ static int ppp_unbridge_channels(struct channel *pch)
{
struct channel *pchb, *pchbb;
- write_lock_bh(&pch->upl);
+ spin_lock(&pch->upl);
pchb = rcu_dereference_protected(pch->bridge, lockdep_is_held(&pch->upl));
if (!pchb) {
- write_unlock_bh(&pch->upl);
+ spin_unlock(&pch->upl);
return -EINVAL;
}
RCU_INIT_POINTER(pch->bridge, NULL);
- write_unlock_bh(&pch->upl);
+ spin_unlock(&pch->upl);
/* Only modify pchb if phcb->bridge points back to pch.
* If not, it implies that there has been a race unbridging (and possibly
* even rebridging) pchb. We should leave pchb alone to avoid either a
* refcount underflow, or breaking another established bridge instance.
*/
- write_lock_bh(&pchb->upl);
+ spin_lock(&pchb->upl);
pchbb = rcu_dereference_protected(pchb->bridge, lockdep_is_held(&pchb->upl));
if (pchbb == pch)
RCU_INIT_POINTER(pchb->bridge, NULL);
- write_unlock_bh(&pchb->upl);
+ spin_unlock(&pchb->upl);
synchronize_rcu();
@@ -1598,11 +1599,14 @@ static int ppp_fill_forward_path(struct net_device_path_ctx *ctx,
if (ppp->flags & SC_MULTILINK)
return -EOPNOTSUPP;
- if (list_empty(&ppp->channels))
+ pch = list_first_or_null_rcu(&ppp->channels, struct channel, clist);
+ if (!pch)
+ return -ENODEV;
+
+ chan = READ_ONCE(pch->chan);
+ if (!chan)
return -ENODEV;
- pch = list_first_entry(&ppp->channels, struct channel, clist);
- chan = pch->chan;
if (!chan->ops->fill_forward_path)
return -EOPNOTSUPP;
@@ -1740,7 +1744,6 @@ pad_compress_skb(struct ppp *ppp, struct sk_buff *skb)
*/
if (net_ratelimit())
netdev_err(ppp->dev, "ppp: compressor dropped pkt\n");
- kfree_skb(skb);
consume_skb(new_skb);
new_skb = NULL;
}
@@ -1841,9 +1844,10 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
"down - pkt dropped.\n");
goto drop;
}
- skb = pad_compress_skb(ppp, skb);
- if (!skb)
+ new_skb = pad_compress_skb(ppp, skb);
+ if (!new_skb)
goto drop;
+ skb = new_skb;
}
/*
@@ -2154,10 +2158,9 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
#endif /* CONFIG_PPP_MULTILINK */
/* Try to send data out on a channel */
-static void __ppp_channel_push(struct channel *pch)
+static void __ppp_channel_push(struct channel *pch, struct ppp *ppp)
{
struct sk_buff *skb;
- struct ppp *ppp;
spin_lock(&pch->downl);
if (pch->chan) {
@@ -2176,7 +2179,6 @@ static void __ppp_channel_push(struct channel *pch)
spin_unlock(&pch->downl);
/* see if there is anything from the attached unit to be sent */
if (skb_queue_empty(&pch->file.xq)) {
- ppp = pch->ppp;
if (ppp)
__ppp_xmit_process(ppp, NULL);
}
@@ -2185,19 +2187,21 @@ static void __ppp_channel_push(struct channel *pch)
static void ppp_channel_push(struct channel *pch)
{
struct ppp_xmit_recursion *xmit_recursion;
+ struct ppp *ppp;
- read_lock_bh(&pch->upl);
- if (pch->ppp) {
- xmit_recursion = this_cpu_ptr(pch->ppp->xmit_recursion);
- local_lock_nested_bh(&pch->ppp->xmit_recursion->bh_lock);
+ rcu_read_lock_bh();
+ ppp = rcu_dereference_bh(pch->ppp);
+ if (ppp) {
+ xmit_recursion = this_cpu_ptr(ppp->xmit_recursion);
+ local_lock_nested_bh(&ppp->xmit_recursion->bh_lock);
xmit_recursion->owner = current;
- __ppp_channel_push(pch);
+ __ppp_channel_push(pch, ppp);
xmit_recursion->owner = NULL;
- local_unlock_nested_bh(&pch->ppp->xmit_recursion->bh_lock);
+ local_unlock_nested_bh(&ppp->xmit_recursion->bh_lock);
} else {
- __ppp_channel_push(pch);
+ __ppp_channel_push(pch, NULL);
}
- read_unlock_bh(&pch->upl);
+ rcu_read_unlock_bh();
}
/*
@@ -2299,6 +2303,7 @@ void
ppp_input(struct ppp_channel *chan, struct sk_buff *skb)
{
struct channel *pch = chan->ppp;
+ struct ppp *ppp;
int proto;
if (!pch) {
@@ -2310,18 +2315,19 @@ ppp_input(struct ppp_channel *chan, struct sk_buff *skb)
if (ppp_channel_bridge_input(pch, skb))
return;
- read_lock_bh(&pch->upl);
+ rcu_read_lock_bh();
+ ppp = rcu_dereference_bh(pch->ppp);
if (!ppp_decompress_proto(skb)) {
kfree_skb(skb);
- if (pch->ppp) {
- ++pch->ppp->dev->stats.rx_length_errors;
- ppp_receive_error(pch->ppp);
+ if (ppp) {
+ ++ppp->dev->stats.rx_length_errors;
+ ppp_receive_error(ppp);
}
goto done;
}
proto = PPP_PROTO(skb);
- if (!pch->ppp || proto >= 0xc000 || proto == PPP_CCPFRAG) {
+ if (!ppp || proto >= 0xc000 || proto == PPP_CCPFRAG) {
/* put it on the channel queue */
skb_queue_tail(&pch->file.rq, skb);
/* drop old frames if queue too long */
@@ -2330,11 +2336,11 @@ ppp_input(struct ppp_channel *chan, struct sk_buff *skb)
kfree_skb(skb);
wake_up_interruptible(&pch->file.rwait);
} else {
- ppp_do_recv(pch->ppp, skb, pch);
+ ppp_do_recv(ppp, skb, pch);
}
done:
- read_unlock_bh(&pch->upl);
+ rcu_read_unlock_bh();
}
/* Put a 0-length skb in the receive queue as an error indication */
@@ -2343,20 +2349,22 @@ ppp_input_error(struct ppp_channel *chan, int code)
{
struct channel *pch = chan->ppp;
struct sk_buff *skb;
+ struct ppp *ppp;
if (!pch)
return;
- read_lock_bh(&pch->upl);
- if (pch->ppp) {
+ rcu_read_lock_bh();
+ ppp = rcu_dereference_bh(pch->ppp);
+ if (ppp) {
skb = alloc_skb(0, GFP_ATOMIC);
if (skb) {
skb->len = 0; /* probably unnecessary */
skb->cb[0] = code;
- ppp_do_recv(pch->ppp, skb, pch);
+ ppp_do_recv(ppp, skb, pch);
}
}
- read_unlock_bh(&pch->upl);
+ rcu_read_unlock_bh();
}
/*
@@ -2904,7 +2912,6 @@ int ppp_register_net_channel(struct net *net, struct ppp_channel *chan)
pn = ppp_pernet(net);
- pch->ppp = NULL;
pch->chan = chan;
pch->chan_net = get_net_track(net, &pch->ns_tracker, GFP_KERNEL);
chan->ppp = pch;
@@ -2915,7 +2922,7 @@ int ppp_register_net_channel(struct net *net, struct ppp_channel *chan)
#endif /* CONFIG_PPP_MULTILINK */
init_rwsem(&pch->chan_sem);
spin_lock_init(&pch->downl);
- rwlock_init(&pch->upl);
+ spin_lock_init(&pch->upl);
spin_lock_bh(&pn->all_channels_lock);
pch->file.index = ++pn->last_channel_index;
@@ -2944,13 +2951,15 @@ int ppp_channel_index(struct ppp_channel *chan)
int ppp_unit_number(struct ppp_channel *chan)
{
struct channel *pch = chan->ppp;
+ struct ppp *ppp;
int unit = -1;
if (pch) {
- read_lock_bh(&pch->upl);
- if (pch->ppp)
- unit = pch->ppp->file.index;
- read_unlock_bh(&pch->upl);
+ rcu_read_lock();
+ ppp = rcu_dereference(pch->ppp);
+ if (ppp)
+ unit = ppp->file.index;
+ rcu_read_unlock();
}
return unit;
}
@@ -2962,12 +2971,14 @@ char *ppp_dev_name(struct ppp_channel *chan)
{
struct channel *pch = chan->ppp;
char *name = NULL;
+ struct ppp *ppp;
if (pch) {
- read_lock_bh(&pch->upl);
- if (pch->ppp && pch->ppp->dev)
- name = pch->ppp->dev->name;
- read_unlock_bh(&pch->upl);
+ rcu_read_lock();
+ ppp = rcu_dereference(pch->ppp);
+ if (ppp && ppp->dev)
+ name = ppp->dev->name;
+ rcu_read_unlock();
}
return name;
}
@@ -2994,7 +3005,7 @@ ppp_unregister_channel(struct ppp_channel *chan)
*/
down_write(&pch->chan_sem);
spin_lock_bh(&pch->downl);
- pch->chan = NULL;
+ WRITE_ONCE(pch->chan, NULL);
spin_unlock_bh(&pch->downl);
up_write(&pch->chan_sem);
ppp_disconnect_channel(pch);
@@ -3490,9 +3501,9 @@ ppp_connect_channel(struct channel *pch, int unit)
ppp = ppp_find_unit(pn, unit);
if (!ppp)
goto out;
- write_lock_bh(&pch->upl);
+ spin_lock(&pch->upl);
ret = -EINVAL;
- if (pch->ppp ||
+ if (rcu_dereference_protected(pch->ppp, lockdep_is_held(&pch->upl)) ||
rcu_dereference_protected(pch->bridge, lockdep_is_held(&pch->upl)))
goto outl;
@@ -3515,15 +3526,15 @@ ppp_connect_channel(struct channel *pch, int unit)
hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */
if (hdrlen > ppp->dev->hard_header_len)
ppp->dev->hard_header_len = hdrlen;
- list_add_tail(&pch->clist, &ppp->channels);
+ list_add_tail_rcu(&pch->clist, &ppp->channels);
++ppp->n_channels;
- pch->ppp = ppp;
+ rcu_assign_pointer(pch->ppp, ppp);
refcount_inc(&ppp->file.refcnt);
ppp_unlock(ppp);
ret = 0;
outl:
- write_unlock_bh(&pch->upl);
+ spin_unlock(&pch->upl);
out:
mutex_unlock(&pn->all_ppp_mutex);
return ret;
@@ -3538,17 +3549,17 @@ ppp_disconnect_channel(struct channel *pch)
struct ppp *ppp;
int err = -EINVAL;
- write_lock_bh(&pch->upl);
- ppp = pch->ppp;
- pch->ppp = NULL;
- write_unlock_bh(&pch->upl);
+ spin_lock(&pch->upl);
+ ppp = rcu_replace_pointer(pch->ppp, NULL, lockdep_is_held(&pch->upl));
+ spin_unlock(&pch->upl);
if (ppp) {
/* remove it from the ppp unit's list */
ppp_lock(ppp);
- list_del(&pch->clist);
+ list_del_rcu(&pch->clist);
if (--ppp->n_channels == 0)
wake_up_interruptible(&ppp->file.rwait);
ppp_unlock(ppp);
+ synchronize_net();
if (refcount_dec_and_test(&ppp->file.refcnt))
ppp_destroy_interface(ppp);
err = 0;
diff --git a/drivers/net/ppp/ppp_mppe.c b/drivers/net/ppp/ppp_mppe.c
index bcc1eaedf58f..630cbf71c147 100644
--- a/drivers/net/ppp/ppp_mppe.c
+++ b/drivers/net/ppp/ppp_mppe.c
@@ -43,7 +43,7 @@
*/
#include <crypto/arc4.h>
-#include <crypto/hash.h>
+#include <crypto/sha1.h>
#include <linux/err.h>
#include <linux/fips.h>
#include <linux/module.h>
@@ -55,7 +55,6 @@
#include <linux/mm.h>
#include <linux/ppp_defs.h>
#include <linux/ppp-comp.h>
-#include <linux/scatterlist.h>
#include <linux/unaligned.h>
#include "ppp_mppe.h"
@@ -67,31 +66,15 @@ MODULE_ALIAS("ppp-compress-" __stringify(CI_MPPE));
MODULE_VERSION("1.0.2");
#define SHA1_PAD_SIZE 40
-
-/*
- * kernel crypto API needs its arguments to be in kmalloc'd memory, not in the module
- * static data area. That means sha_pad needs to be kmalloc'd.
- */
-
-struct sha_pad {
- unsigned char sha_pad1[SHA1_PAD_SIZE];
- unsigned char sha_pad2[SHA1_PAD_SIZE];
-};
-static struct sha_pad *sha_pad;
-
-static inline void sha_pad_init(struct sha_pad *shapad)
-{
- memset(shapad->sha_pad1, 0x00, sizeof(shapad->sha_pad1));
- memset(shapad->sha_pad2, 0xF2, sizeof(shapad->sha_pad2));
-}
+static const u8 sha_pad1[SHA1_PAD_SIZE] = { 0 };
+static const u8 sha_pad2[SHA1_PAD_SIZE] = { [0 ... SHA1_PAD_SIZE - 1] = 0xF2 };
/*
* State for an MPPE (de)compressor.
*/
struct ppp_mppe_state {
struct arc4_ctx arc4;
- struct shash_desc *sha1;
- unsigned char *sha1_digest;
+ unsigned char sha1_digest[SHA1_DIGEST_SIZE];
unsigned char master_key[MPPE_MAX_KEY_LEN];
unsigned char session_key[MPPE_MAX_KEY_LEN];
unsigned keylen; /* key length in bytes */
@@ -130,16 +113,14 @@ struct ppp_mppe_state {
*/
static void get_new_key_from_sha(struct ppp_mppe_state * state)
{
- crypto_shash_init(state->sha1);
- crypto_shash_update(state->sha1, state->master_key,
- state->keylen);
- crypto_shash_update(state->sha1, sha_pad->sha_pad1,
- sizeof(sha_pad->sha_pad1));
- crypto_shash_update(state->sha1, state->session_key,
- state->keylen);
- crypto_shash_update(state->sha1, sha_pad->sha_pad2,
- sizeof(sha_pad->sha_pad2));
- crypto_shash_final(state->sha1, state->sha1_digest);
+ struct sha1_ctx ctx;
+
+ sha1_init(&ctx);
+ sha1_update(&ctx, state->master_key, state->keylen);
+ sha1_update(&ctx, sha_pad1, sizeof(sha_pad1));
+ sha1_update(&ctx, state->session_key, state->keylen);
+ sha1_update(&ctx, sha_pad2, sizeof(sha_pad2));
+ sha1_final(&ctx, state->sha1_digest);
}
/*
@@ -171,39 +152,15 @@ static void mppe_rekey(struct ppp_mppe_state * state, int initial_key)
static void *mppe_alloc(unsigned char *options, int optlen)
{
struct ppp_mppe_state *state;
- struct crypto_shash *shash;
- unsigned int digestsize;
if (optlen != CILEN_MPPE + sizeof(state->master_key) ||
options[0] != CI_MPPE || options[1] != CILEN_MPPE ||
fips_enabled)
- goto out;
+ return NULL;
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (state == NULL)
- goto out;
-
-
- shash = crypto_alloc_shash("sha1", 0, 0);
- if (IS_ERR(shash))
- goto out_free;
-
- state->sha1 = kmalloc(sizeof(*state->sha1) +
- crypto_shash_descsize(shash),
- GFP_KERNEL);
- if (!state->sha1) {
- crypto_free_shash(shash);
- goto out_free;
- }
- state->sha1->tfm = shash;
-
- digestsize = crypto_shash_digestsize(shash);
- if (digestsize < MPPE_MAX_KEY_LEN)
- goto out_free;
-
- state->sha1_digest = kmalloc(digestsize, GFP_KERNEL);
- if (!state->sha1_digest)
- goto out_free;
+ return NULL;
/* Save keys. */
memcpy(state->master_key, &options[CILEN_MPPE],
@@ -217,16 +174,6 @@ static void *mppe_alloc(unsigned char *options, int optlen)
*/
return (void *)state;
-
-out_free:
- kfree(state->sha1_digest);
- if (state->sha1) {
- crypto_free_shash(state->sha1->tfm);
- kfree_sensitive(state->sha1);
- }
- kfree(state);
-out:
- return NULL;
}
/*
@@ -235,12 +182,8 @@ out:
static void mppe_free(void *arg)
{
struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
- if (state) {
- kfree(state->sha1_digest);
- crypto_free_shash(state->sha1->tfm);
- kfree_sensitive(state->sha1);
- kfree_sensitive(state);
- }
+
+ kfree_sensitive(state);
}
/*
@@ -649,31 +592,17 @@ static struct compressor ppp_mppe = {
.comp_extra = MPPE_PAD,
};
-/*
- * ppp_mppe_init()
- *
- * Prior to allowing load, try to load the arc4 and sha1 crypto
- * libraries. The actual use will be allocated later, but
- * this way the module will fail to insmod if they aren't available.
- */
-
static int __init ppp_mppe_init(void)
{
int answer;
- if (fips_enabled || !crypto_has_ahash("sha1", 0, CRYPTO_ALG_ASYNC))
- return -ENODEV;
- sha_pad = kmalloc(sizeof(struct sha_pad), GFP_KERNEL);
- if (!sha_pad)
- return -ENOMEM;
- sha_pad_init(sha_pad);
+ if (fips_enabled)
+ return -ENODEV;
answer = ppp_register_compressor(&ppp_mppe);
if (answer == 0)
printk(KERN_INFO "PPP MPPE Compression module registered\n");
- else
- kfree(sha_pad);
return answer;
}
@@ -681,7 +610,6 @@ static int __init ppp_mppe_init(void)
static void __exit ppp_mppe_cleanup(void)
{
ppp_unregister_compressor(&ppp_mppe);
- kfree(sha_pad);
}
module_init(ppp_mppe_init);
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 410effa42ade..4ac6afce267b 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -100,8 +100,8 @@ struct pppoe_net {
* as well, moreover in case of SMP less locking
* controversy here
*/
- struct pppox_sock *hash_table[PPPOE_HASH_SIZE];
- rwlock_t hash_lock;
+ struct pppox_sock __rcu *hash_table[PPPOE_HASH_SIZE];
+ spinlock_t hash_lock;
};
/*
@@ -162,13 +162,13 @@ static struct pppox_sock *__get_item(struct pppoe_net *pn, __be16 sid,
int hash = hash_item(sid, addr);
struct pppox_sock *ret;
- ret = pn->hash_table[hash];
+ ret = rcu_dereference(pn->hash_table[hash]);
while (ret) {
if (cmp_addr(&ret->pppoe_pa, sid, addr) &&
ret->pppoe_ifindex == ifindex)
return ret;
- ret = ret->next;
+ ret = rcu_dereference(ret->next);
}
return NULL;
@@ -177,19 +177,20 @@ static struct pppox_sock *__get_item(struct pppoe_net *pn, __be16 sid,
static int __set_item(struct pppoe_net *pn, struct pppox_sock *po)
{
int hash = hash_item(po->pppoe_pa.sid, po->pppoe_pa.remote);
- struct pppox_sock *ret;
+ struct pppox_sock *ret, *first;
- ret = pn->hash_table[hash];
+ first = rcu_dereference_protected(pn->hash_table[hash], lockdep_is_held(&pn->hash_lock));
+ ret = first;
while (ret) {
if (cmp_2_addr(&ret->pppoe_pa, &po->pppoe_pa) &&
ret->pppoe_ifindex == po->pppoe_ifindex)
return -EALREADY;
- ret = ret->next;
+ ret = rcu_dereference_protected(ret->next, lockdep_is_held(&pn->hash_lock));
}
- po->next = pn->hash_table[hash];
- pn->hash_table[hash] = po;
+ RCU_INIT_POINTER(po->next, first);
+ rcu_assign_pointer(pn->hash_table[hash], po);
return 0;
}
@@ -198,20 +199,24 @@ static void __delete_item(struct pppoe_net *pn, __be16 sid,
char *addr, int ifindex)
{
int hash = hash_item(sid, addr);
- struct pppox_sock *ret, **src;
+ struct pppox_sock *ret, __rcu **src;
- ret = pn->hash_table[hash];
+ ret = rcu_dereference_protected(pn->hash_table[hash], lockdep_is_held(&pn->hash_lock));
src = &pn->hash_table[hash];
while (ret) {
if (cmp_addr(&ret->pppoe_pa, sid, addr) &&
ret->pppoe_ifindex == ifindex) {
- *src = ret->next;
+ struct pppox_sock *next;
+
+ next = rcu_dereference_protected(ret->next,
+ lockdep_is_held(&pn->hash_lock));
+ rcu_assign_pointer(*src, next);
break;
}
src = &ret->next;
- ret = ret->next;
+ ret = rcu_dereference_protected(ret->next, lockdep_is_held(&pn->hash_lock));
}
}
@@ -225,17 +230,15 @@ static inline struct pppox_sock *get_item(struct pppoe_net *pn, __be16 sid,
{
struct pppox_sock *po;
- read_lock_bh(&pn->hash_lock);
po = __get_item(pn, sid, addr, ifindex);
- if (po)
- sock_hold(sk_pppox(po));
- read_unlock_bh(&pn->hash_lock);
+ if (po && !refcount_inc_not_zero(&sk_pppox(po)->sk_refcnt))
+ po = NULL;
return po;
}
-static inline struct pppox_sock *get_item_by_addr(struct net *net,
- struct sockaddr_pppox *sp)
+static inline struct pppox_sock *__get_item_by_addr(struct net *net,
+ struct sockaddr_pppox *sp)
{
struct net_device *dev;
struct pppoe_net *pn;
@@ -243,24 +246,22 @@ static inline struct pppox_sock *get_item_by_addr(struct net *net,
int ifindex;
- rcu_read_lock();
dev = dev_get_by_name_rcu(net, sp->sa_addr.pppoe.dev);
if (dev) {
ifindex = dev->ifindex;
pn = pppoe_pernet(net);
- pppox_sock = get_item(pn, sp->sa_addr.pppoe.sid,
- sp->sa_addr.pppoe.remote, ifindex);
+ pppox_sock = __get_item(pn, sp->sa_addr.pppoe.sid,
+ sp->sa_addr.pppoe.remote, ifindex);
}
- rcu_read_unlock();
return pppox_sock;
}
static inline void delete_item(struct pppoe_net *pn, __be16 sid,
char *addr, int ifindex)
{
- write_lock_bh(&pn->hash_lock);
+ spin_lock(&pn->hash_lock);
__delete_item(pn, sid, addr, ifindex);
- write_unlock_bh(&pn->hash_lock);
+ spin_unlock(&pn->hash_lock);
}
/***************************************************************************
@@ -276,14 +277,16 @@ static void pppoe_flush_dev(struct net_device *dev)
int i;
pn = pppoe_pernet(dev_net(dev));
- write_lock_bh(&pn->hash_lock);
+ spin_lock(&pn->hash_lock);
for (i = 0; i < PPPOE_HASH_SIZE; i++) {
- struct pppox_sock *po = pn->hash_table[i];
+ struct pppox_sock *po = rcu_dereference_protected(pn->hash_table[i],
+ lockdep_is_held(&pn->hash_lock));
struct sock *sk;
while (po) {
while (po && po->pppoe_dev != dev) {
- po = po->next;
+ po = rcu_dereference_protected(po->next,
+ lockdep_is_held(&pn->hash_lock));
}
if (!po)
@@ -300,7 +303,7 @@ static void pppoe_flush_dev(struct net_device *dev)
*/
sock_hold(sk);
- write_unlock_bh(&pn->hash_lock);
+ spin_unlock(&pn->hash_lock);
lock_sock(sk);
if (po->pppoe_dev == dev &&
@@ -320,11 +323,12 @@ static void pppoe_flush_dev(struct net_device *dev)
*/
BUG_ON(pppoe_pernet(dev_net(dev)) == NULL);
- write_lock_bh(&pn->hash_lock);
- po = pn->hash_table[i];
+ spin_lock(&pn->hash_lock);
+ po = rcu_dereference_protected(pn->hash_table[i],
+ lockdep_is_held(&pn->hash_lock));
}
}
- write_unlock_bh(&pn->hash_lock);
+ spin_unlock(&pn->hash_lock);
}
static int pppoe_device_event(struct notifier_block *this,
@@ -375,18 +379,16 @@ static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb)
if (sk->sk_state & PPPOX_BOUND) {
ppp_input(&po->chan, skb);
} else if (sk->sk_state & PPPOX_RELAY) {
- relay_po = get_item_by_addr(sock_net(sk),
- &po->pppoe_relay);
+ relay_po = __get_item_by_addr(sock_net(sk),
+ &po->pppoe_relay);
if (relay_po == NULL)
goto abort_kfree;
if ((sk_pppox(relay_po)->sk_state & PPPOX_CONNECTED) == 0)
- goto abort_put;
+ goto abort_kfree;
if (!__pppoe_xmit(sk_pppox(relay_po), skb))
- goto abort_put;
-
- sock_put(sk_pppox(relay_po));
+ goto abort_kfree;
} else {
if (sock_queue_rcv_skb(sk, skb))
goto abort_kfree;
@@ -394,9 +396,6 @@ static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb)
return NET_RX_SUCCESS;
-abort_put:
- sock_put(sk_pppox(relay_po));
-
abort_kfree:
kfree_skb(skb);
return NET_RX_DROP;
@@ -441,14 +440,11 @@ static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev,
ph = pppoe_hdr(skb);
pn = pppoe_pernet(dev_net(dev));
- /* Note that get_item does a sock_hold(), so sk_pppox(po)
- * is known to be safe.
- */
- po = get_item(pn, ph->sid, eth_hdr(skb)->h_source, dev->ifindex);
+ po = __get_item(pn, ph->sid, eth_hdr(skb)->h_source, dev->ifindex);
if (!po)
goto drop;
- return sk_receive_skb(sk_pppox(po), skb, 0);
+ return __sk_receive_skb(sk_pppox(po), skb, 0, 1, false);
drop:
kfree_skb(skb);
@@ -528,6 +524,11 @@ static struct proto pppoe_sk_proto __read_mostly = {
.obj_size = sizeof(struct pppox_sock),
};
+static void pppoe_destruct(struct sock *sk)
+{
+ skb_queue_purge(&sk->sk_receive_queue);
+}
+
/***********************************************************************
*
* Initialize a new struct sock.
@@ -542,11 +543,13 @@ static int pppoe_create(struct net *net, struct socket *sock, int kern)
return -ENOMEM;
sock_init_data(sock, sk);
+ sock_set_flag(sk, SOCK_RCU_FREE);
sock->state = SS_UNCONNECTED;
sock->ops = &pppoe_ops;
sk->sk_backlog_rcv = pppoe_rcv_core;
+ sk->sk_destruct = pppoe_destruct;
sk->sk_state = PPPOX_NONE;
sk->sk_type = SOCK_STREAM;
sk->sk_family = PF_PPPOX;
@@ -599,7 +602,6 @@ static int pppoe_release(struct socket *sock)
sock_orphan(sk);
sock->sk = NULL;
- skb_queue_purge(&sk->sk_receive_queue);
release_sock(sk);
sock_put(sk);
@@ -681,9 +683,9 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
&sp->sa_addr.pppoe,
sizeof(struct pppoe_addr));
- write_lock_bh(&pn->hash_lock);
+ spin_lock(&pn->hash_lock);
error = __set_item(pn, po);
- write_unlock_bh(&pn->hash_lock);
+ spin_unlock(&pn->hash_lock);
if (error < 0)
goto err_put;
@@ -808,11 +810,12 @@ static int pppoe_ioctl(struct socket *sock, unsigned int cmd,
/* Check that the socket referenced by the address
actually exists. */
- relay_po = get_item_by_addr(sock_net(sk), &po->pppoe_relay);
+ rcu_read_lock();
+ relay_po = __get_item_by_addr(sock_net(sk), &po->pppoe_relay);
+ rcu_read_unlock();
if (!relay_po)
break;
- sock_put(sk_pppox(relay_po));
sk->sk_state |= PPPOX_RELAY;
err = 0;
break;
@@ -1052,11 +1055,11 @@ static inline struct pppox_sock *pppoe_get_idx(struct pppoe_net *pn, loff_t pos)
int i;
for (i = 0; i < PPPOE_HASH_SIZE; i++) {
- po = pn->hash_table[i];
+ po = rcu_dereference(pn->hash_table[i]);
while (po) {
if (!pos--)
goto out;
- po = po->next;
+ po = rcu_dereference(po->next);
}
}
@@ -1065,19 +1068,19 @@ out:
}
static void *pppoe_seq_start(struct seq_file *seq, loff_t *pos)
- __acquires(pn->hash_lock)
+ __acquires(RCU)
{
struct pppoe_net *pn = pppoe_pernet(seq_file_net(seq));
loff_t l = *pos;
- read_lock_bh(&pn->hash_lock);
+ rcu_read_lock();
return l ? pppoe_get_idx(pn, --l) : SEQ_START_TOKEN;
}
static void *pppoe_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct pppoe_net *pn = pppoe_pernet(seq_file_net(seq));
- struct pppox_sock *po;
+ struct pppox_sock *po, *next;
++*pos;
if (v == SEQ_START_TOKEN) {
@@ -1085,14 +1088,15 @@ static void *pppoe_seq_next(struct seq_file *seq, void *v, loff_t *pos)
goto out;
}
po = v;
- if (po->next)
- po = po->next;
+ next = rcu_dereference(po->next);
+ if (next)
+ po = next;
else {
int hash = hash_item(po->pppoe_pa.sid, po->pppoe_pa.remote);
po = NULL;
while (++hash < PPPOE_HASH_SIZE) {
- po = pn->hash_table[hash];
+ po = rcu_dereference(pn->hash_table[hash]);
if (po)
break;
}
@@ -1103,10 +1107,9 @@ out:
}
static void pppoe_seq_stop(struct seq_file *seq, void *v)
- __releases(pn->hash_lock)
+ __releases(RCU)
{
- struct pppoe_net *pn = pppoe_pernet(seq_file_net(seq));
- read_unlock_bh(&pn->hash_lock);
+ rcu_read_unlock();
}
static const struct seq_operations pppoe_seq_ops = {
@@ -1149,7 +1152,7 @@ static __net_init int pppoe_init_net(struct net *net)
struct pppoe_net *pn = pppoe_pernet(net);
struct proc_dir_entry *pde;
- rwlock_init(&pn->hash_lock);
+ spin_lock_init(&pn->hash_lock);
pde = proc_create_net("pppoe", 0444, net->proc_net,
&pppoe_seq_ops, sizeof(struct seq_net_private));
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 5feaa70b5f47..90737cb71892 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -159,19 +159,17 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
int len;
unsigned char *data;
__u32 seq_recv;
-
-
struct rtable *rt;
struct net_device *tdev;
struct iphdr *iph;
int max_headroom;
if (sk_pppox(po)->sk_state & PPPOX_DEAD)
- goto tx_error;
+ goto tx_drop;
rt = pptp_route_output(po, &fl4);
if (IS_ERR(rt))
- goto tx_error;
+ goto tx_drop;
tdev = rt->dst.dev;
@@ -179,16 +177,20 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
if (skb_headroom(skb) < max_headroom || skb_cloned(skb) || skb_shared(skb)) {
struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
- if (!new_skb) {
- ip_rt_put(rt);
+
+ if (!new_skb)
goto tx_error;
- }
+
if (skb->sk)
skb_set_owner_w(new_skb, skb->sk);
consume_skb(skb);
skb = new_skb;
}
+ /* Ensure we can safely access protocol field and LCP code */
+ if (!pskb_may_pull(skb, 3))
+ goto tx_error;
+
data = skb->data;
islcp = ((data[0] << 8) + data[1]) == PPP_LCP && 1 <= data[2] && data[2] <= 7;
@@ -262,6 +264,8 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
return 1;
tx_error:
+ ip_rt_put(rt);
+tx_drop:
kfree_skb(skb);
return 1;
}
diff --git a/drivers/net/pse-pd/Kconfig b/drivers/net/pse-pd/Kconfig
index 7fab916a7f46..7ef29657ee5d 100644
--- a/drivers/net/pse-pd/Kconfig
+++ b/drivers/net/pse-pd/Kconfig
@@ -32,6 +32,17 @@ config PSE_PD692X0
To compile this driver as a module, choose M here: the
module will be called pd692x0.
+config PSE_SI3474
+ tristate "Si3474 PSE controller"
+ depends on I2C
+ help
+ This module provides support for Si3474 regulator based Ethernet
+ Power Sourcing Equipment.
+ Only 4-pair PSE configurations are supported.
+
+ To compile this driver as a module, choose M here: the
+ module will be called si3474.
+
config PSE_TPS23881
tristate "TPS23881 PSE controller"
depends on I2C
diff --git a/drivers/net/pse-pd/Makefile b/drivers/net/pse-pd/Makefile
index 9d2898b36737..cc78f7ea7f5f 100644
--- a/drivers/net/pse-pd/Makefile
+++ b/drivers/net/pse-pd/Makefile
@@ -5,4 +5,5 @@ obj-$(CONFIG_PSE_CONTROLLER) += pse_core.o
obj-$(CONFIG_PSE_REGULATOR) += pse_regulator.o
obj-$(CONFIG_PSE_PD692X0) += pd692x0.o
+obj-$(CONFIG_PSE_SI3474) += si3474.o
obj-$(CONFIG_PSE_TPS23881) += tps23881.o
diff --git a/drivers/net/pse-pd/pd692x0.c b/drivers/net/pse-pd/pd692x0.c
index 399ce9febda4..f4e91ba64a66 100644
--- a/drivers/net/pse-pd/pd692x0.c
+++ b/drivers/net/pse-pd/pd692x0.c
@@ -1041,6 +1041,10 @@ pd692x0_configure_managers(struct pd692x0_priv *priv, int nmanagers)
int pw_budget;
pw_budget = regulator_get_unclaimed_power_budget(supply);
+ if (!pw_budget)
+ /* Do nothing if no power budget */
+ continue;
+
/* Max power budget per manager */
if (pw_budget > 6000000)
pw_budget = 6000000;
@@ -1162,12 +1166,44 @@ pd692x0_write_ports_matrix(struct pd692x0_priv *priv,
return 0;
}
+static void pd692x0_of_put_managers(struct pd692x0_priv *priv,
+ struct pd692x0_manager *manager,
+ int nmanagers)
+{
+ int i, j;
+
+ for (i = 0; i < nmanagers; i++) {
+ for (j = 0; j < manager[i].nports; j++)
+ of_node_put(manager[i].port_node[j]);
+ of_node_put(manager[i].node);
+ }
+}
+
+static void pd692x0_managers_free_pw_budget(struct pd692x0_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < PD692X0_MAX_MANAGERS; i++) {
+ struct regulator *supply;
+
+ if (!priv->manager_reg[i] || !priv->manager_pw_budget[i])
+ continue;
+
+ supply = priv->manager_reg[i]->supply;
+ if (!supply)
+ continue;
+
+ regulator_free_power_budget(supply,
+ priv->manager_pw_budget[i]);
+ }
+}
+
static int pd692x0_setup_pi_matrix(struct pse_controller_dev *pcdev)
{
struct pd692x0_manager *manager __free(kfree) = NULL;
struct pd692x0_priv *priv = to_pd692x0_priv(pcdev);
struct pd692x0_matrix port_matrix[PD692X0_MAX_PIS];
- int ret, i, j, nmanagers;
+ int ret, nmanagers;
/* Should we flash the port matrix */
if (priv->fw_state != PD692X0_FW_OK &&
@@ -1185,31 +1221,27 @@ static int pd692x0_setup_pi_matrix(struct pse_controller_dev *pcdev)
nmanagers = ret;
ret = pd692x0_register_managers_regulator(priv, manager, nmanagers);
if (ret)
- goto out;
+ goto err_of_managers;
ret = pd692x0_configure_managers(priv, nmanagers);
if (ret)
- goto out;
+ goto err_of_managers;
ret = pd692x0_set_ports_matrix(priv, manager, nmanagers, port_matrix);
if (ret)
- goto out;
+ goto err_managers_req_pw;
ret = pd692x0_write_ports_matrix(priv, port_matrix);
if (ret)
- goto out;
-
-out:
- for (i = 0; i < nmanagers; i++) {
- struct regulator *supply = priv->manager_reg[i]->supply;
+ goto err_managers_req_pw;
- regulator_free_power_budget(supply,
- priv->manager_pw_budget[i]);
+ pd692x0_of_put_managers(priv, manager, nmanagers);
+ return 0;
- for (j = 0; j < manager[i].nports; j++)
- of_node_put(manager[i].port_node[j]);
- of_node_put(manager[i].node);
- }
+err_managers_req_pw:
+ pd692x0_managers_free_pw_budget(priv);
+err_of_managers:
+ pd692x0_of_put_managers(priv, manager, nmanagers);
return ret;
}
@@ -1748,6 +1780,7 @@ static void pd692x0_i2c_remove(struct i2c_client *client)
{
struct pd692x0_priv *priv = i2c_get_clientdata(client);
+ pd692x0_managers_free_pw_budget(priv);
firmware_upload_unregister(priv->fwl);
}
diff --git a/drivers/net/pse-pd/si3474.c b/drivers/net/pse-pd/si3474.c
new file mode 100644
index 000000000000..aa07ffbce54d
--- /dev/null
+++ b/drivers/net/pse-pd/si3474.c
@@ -0,0 +1,578 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Driver for the Skyworks Si3474 PoE PSE Controller
+ *
+ * Chip Architecture & Terminology:
+ *
+ * The Si3474 is a single-chip PoE PSE controller managing 8 physical power
+ * delivery channels. Internally, it's structured into two logical "Quads".
+ *
+ * Quad 0: Manages physical channels ('ports' in datasheet) 0, 1, 2, 3
+ * Quad 1: Manages physical channels ('ports' in datasheet) 4, 5, 6, 7
+ *
+ * Each Quad is accessed via a separate I2C address. The base address range is
+ * set by hardware pins A1-A4, and the specific address selects Quad 0 (usually
+ * the lower/even address) or Quad 1 (usually the higher/odd address).
+ * See datasheet Table 2.2 for the address mapping.
+ *
+ * While the Quads manage channel-specific operations, the Si3474 package has
+ * several resources shared across the entire chip:
+ * - Single RESETb input pin.
+ * - Single INTb output pin (signals interrupts from *either* Quad).
+ * - Single OSS input pin (Emergency Shutdown).
+ * - Global I2C Address (0x7F) used for firmware updates.
+ * - Global status monitoring (Temperature, VDD/VPWR Undervoltage Lockout).
+ *
+ * Driver Architecture:
+ *
+ * To handle the mix of per-Quad access and shared resources correctly, this
+ * driver treats the entire Si3474 package as one logical device. The driver
+ * instance associated with the primary I2C address (Quad 0) takes ownership.
+ * It discovers and manages the I2C client for the secondary address (Quad 1).
+ * This primary instance handles shared resources like IRQ management and
+ * registers a single PSE controller device representing all logical PIs.
+ * Internal functions route I2C commands to the appropriate Quad's i2c_client
+ * based on the target channel or PI.
+ *
+ * Terminology Mapping:
+ *
+ * - "PI" (Power Interface): Refers to the logical PSE port as defined by
+ * IEEE 802.3 (typically corresponds to an RJ45 connector). This is the
+ * `id` (0-7) used in the pse_controller_ops.
+ * - "Channel": Refers to one of the 8 physical power control paths within
+ * the Si3474 chip itself (hardware channels 0-7). This terminology is
+ * used internally within the driver to avoid confusion with 'ports'.
+ * - "Quad": One of the two internal 4-channel management units within the
+ * Si3474, each accessed via its own I2C address.
+ *
+ * Relationship:
+ * - A 2-Pair PoE PI uses 1 Channel.
+ * - A 4-Pair PoE PI uses 2 Channels.
+ *
+ * ASCII Schematic:
+ *
+ * +-----------------------------------------------------+
+ * | Si3474 Chip |
+ * | |
+ * | +---------------------+ +---------------------+ |
+ * | | Quad 0 | | Quad 1 | |
+ * | | Channels 0, 1, 2, 3 | | Channels 4, 5, 6, 7 | |
+ * | +----------^----------+ +-------^-------------+ |
+ * | I2C Addr 0 | | I2C Addr 1 |
+ * | +------------------------+ |
+ * | (Primary Driver Instance) (Managed by Primary) |
+ * | |
+ * | Shared Resources (affect whole chip): |
+ * | - Single INTb Output -> Handled by Primary |
+ * | - Single RESETb Input |
+ * | - Single OSS Input -> Handled by Primary |
+ * | - Global I2C Addr (0x7F) for Firmware Update |
+ * | - Global Status (Temp, VDD/VPWR UVLO) |
+ * +-----------------------------------------------------+
+ * | | | | | | | |
+ * Ch0 Ch1 Ch2 Ch3 Ch4 Ch5 Ch6 Ch7 (Physical Channels)
+ *
+ * Example Mapping (Logical PI to Physical Channel(s)):
+ * * 2-Pair Mode (8 PIs):
+ * PI 0 -> Ch 0
+ * PI 1 -> Ch 1
+ * ...
+ * PI 7 -> Ch 7
+ * * 4-Pair Mode (4 PIs):
+ * PI 0 -> Ch 0 + Ch 1 (Managed via Quad 0 Addr)
+ * PI 1 -> Ch 2 + Ch 3 (Managed via Quad 0 Addr)
+ * PI 2 -> Ch 4 + Ch 5 (Managed via Quad 1 Addr)
+ * PI 3 -> Ch 6 + Ch 7 (Managed via Quad 1 Addr)
+ * (Note: Actual mapping depends on Device Tree and PORT_REMAP config)
+ */
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pse-pd/pse.h>
+
+#define SI3474_MAX_CHANS 8
+
+#define MANUFACTURER_ID 0x08
+#define IC_ID 0x05
+#define SI3474_DEVICE_ID (MANUFACTURER_ID << 3 | IC_ID)
+
+/* Misc registers */
+#define VENDOR_IC_ID_REG 0x1B
+#define TEMPERATURE_REG 0x2C
+#define FIRMWARE_REVISION_REG 0x41
+#define CHIP_REVISION_REG 0x43
+
+/* Main status registers */
+#define POWER_STATUS_REG 0x10
+#define PORT_MODE_REG 0x12
+#define DETECT_CLASS_ENABLE_REG 0x14
+
+/* PORTn Current */
+#define PORT1_CURRENT_LSB_REG 0x30
+
+/* PORTn Current [mA], return in [nA] */
+/* 1000 * ((PORTn_CURRENT_MSB << 8) + PORTn_CURRENT_LSB) / 16384 */
+#define SI3474_NA_STEP (1000 * 1000 * 1000 / 16384)
+
+/* VPWR Voltage */
+#define VPWR_LSB_REG 0x2E
+#define VPWR_MSB_REG 0x2F
+
+/* PORTn Voltage */
+#define PORT1_VOLTAGE_LSB_REG 0x32
+
+/* VPWR Voltage [V], return in [uV] */
+/* 60 * (( VPWR_MSB << 8) + VPWR_LSB) / 16384 */
+#define SI3474_UV_STEP (1000 * 1000 * 60 / 16384)
+
+/* Helper macros */
+#define CHAN_IDX(chan) ((chan) % 4)
+#define CHAN_BIT(chan) BIT(CHAN_IDX(chan))
+#define CHAN_UPPER_BIT(chan) BIT(CHAN_IDX(chan) + 4)
+
+#define CHAN_MASK(chan) (0x03U << (2 * CHAN_IDX(chan)))
+#define CHAN_REG(base, chan) ((base) + (CHAN_IDX(chan) * 4))
+
+struct si3474_pi_desc {
+ u8 chan[2];
+ bool is_4p;
+};
+
+struct si3474_priv {
+ struct i2c_client *client[2];
+ struct pse_controller_dev pcdev;
+ struct device_node *np;
+ struct si3474_pi_desc pi[SI3474_MAX_CHANS];
+};
+
+static struct si3474_priv *to_si3474_priv(struct pse_controller_dev *pcdev)
+{
+ return container_of(pcdev, struct si3474_priv, pcdev);
+}
+
+static void si3474_get_channels(struct si3474_priv *priv, int id,
+ u8 *chan0, u8 *chan1)
+{
+ *chan0 = priv->pi[id].chan[0];
+ *chan1 = priv->pi[id].chan[1];
+}
+
+static struct i2c_client *si3474_get_chan_client(struct si3474_priv *priv,
+ u8 chan)
+{
+ return (chan < 4) ? priv->client[0] : priv->client[1];
+}
+
+static int si3474_pi_get_admin_state(struct pse_controller_dev *pcdev, int id,
+ struct pse_admin_state *admin_state)
+{
+ struct si3474_priv *priv = to_si3474_priv(pcdev);
+ struct i2c_client *client;
+ bool is_enabled;
+ u8 chan0, chan1;
+ s32 ret;
+
+ si3474_get_channels(priv, id, &chan0, &chan1);
+ client = si3474_get_chan_client(priv, chan0);
+
+ ret = i2c_smbus_read_byte_data(client, PORT_MODE_REG);
+ if (ret < 0) {
+ admin_state->c33_admin_state =
+ ETHTOOL_C33_PSE_ADMIN_STATE_UNKNOWN;
+ return ret;
+ }
+
+ is_enabled = ret & (CHAN_MASK(chan0) | CHAN_MASK(chan1));
+
+ if (is_enabled)
+ admin_state->c33_admin_state =
+ ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED;
+ else
+ admin_state->c33_admin_state =
+ ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED;
+
+ return 0;
+}
+
+static int si3474_pi_get_pw_status(struct pse_controller_dev *pcdev, int id,
+ struct pse_pw_status *pw_status)
+{
+ struct si3474_priv *priv = to_si3474_priv(pcdev);
+ struct i2c_client *client;
+ bool delivering;
+ u8 chan0, chan1;
+ s32 ret;
+
+ si3474_get_channels(priv, id, &chan0, &chan1);
+ client = si3474_get_chan_client(priv, chan0);
+
+ ret = i2c_smbus_read_byte_data(client, POWER_STATUS_REG);
+ if (ret < 0) {
+ pw_status->c33_pw_status = ETHTOOL_C33_PSE_PW_D_STATUS_UNKNOWN;
+ return ret;
+ }
+
+ delivering = ret & (CHAN_UPPER_BIT(chan0) | CHAN_UPPER_BIT(chan1));
+
+ if (delivering)
+ pw_status->c33_pw_status =
+ ETHTOOL_C33_PSE_PW_D_STATUS_DELIVERING;
+ else
+ pw_status->c33_pw_status = ETHTOOL_C33_PSE_PW_D_STATUS_DISABLED;
+
+ return 0;
+}
+
+static int si3474_get_of_channels(struct si3474_priv *priv)
+{
+ struct pse_pi *pi;
+ u32 chan_id;
+ u8 pi_no;
+ s32 ret;
+
+ for (pi_no = 0; pi_no < SI3474_MAX_CHANS; pi_no++) {
+ pi = &priv->pcdev.pi[pi_no];
+ bool pairset_found = false;
+ u8 pairset_no;
+
+ for (pairset_no = 0; pairset_no < 2; pairset_no++) {
+ if (!pi->pairset[pairset_no].np)
+ continue;
+
+ pairset_found = true;
+
+ ret = of_property_read_u32(pi->pairset[pairset_no].np,
+ "reg", &chan_id);
+ if (ret) {
+ dev_err(&priv->client[0]->dev,
+ "Failed to read channel reg property\n");
+ return ret;
+ }
+ if (chan_id > SI3474_MAX_CHANS) {
+ dev_err(&priv->client[0]->dev,
+ "Incorrect channel number: %d\n", chan_id);
+ return -EINVAL;
+ }
+
+ priv->pi[pi_no].chan[pairset_no] = chan_id;
+ /* Mark as 4-pair if second pairset is present */
+ priv->pi[pi_no].is_4p = (pairset_no == 1);
+ }
+
+ if (pairset_found && !priv->pi[pi_no].is_4p) {
+ dev_err(&priv->client[0]->dev,
+ "Second pairset is missing for PI %pOF, only 4p configs are supported\n",
+ pi->np);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int si3474_setup_pi_matrix(struct pse_controller_dev *pcdev)
+{
+ struct si3474_priv *priv = to_si3474_priv(pcdev);
+ s32 ret;
+
+ ret = si3474_get_of_channels(priv);
+ if (ret < 0)
+ dev_warn(&priv->client[0]->dev,
+ "Unable to parse DT PSE power interface matrix\n");
+
+ return ret;
+}
+
+static int si3474_pi_enable(struct pse_controller_dev *pcdev, int id)
+{
+ struct si3474_priv *priv = to_si3474_priv(pcdev);
+ struct i2c_client *client;
+ u8 chan0, chan1;
+ s32 ret;
+ u8 val;
+
+ si3474_get_channels(priv, id, &chan0, &chan1);
+ client = si3474_get_chan_client(priv, chan0);
+
+ /* Release PI from shutdown */
+ ret = i2c_smbus_read_byte_data(client, PORT_MODE_REG);
+ if (ret < 0)
+ return ret;
+
+ val = (u8)ret;
+ val |= CHAN_MASK(chan0);
+ val |= CHAN_MASK(chan1);
+
+ ret = i2c_smbus_write_byte_data(client, PORT_MODE_REG, val);
+ if (ret)
+ return ret;
+
+ /* DETECT_CLASS_ENABLE must be set when using AUTO mode,
+ * otherwise PI does not power up - datasheet section 2.10.2
+ */
+ val = CHAN_BIT(chan0) | CHAN_UPPER_BIT(chan0) |
+ CHAN_BIT(chan1) | CHAN_UPPER_BIT(chan1);
+
+ ret = i2c_smbus_write_byte_data(client, DETECT_CLASS_ENABLE_REG, val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int si3474_pi_disable(struct pse_controller_dev *pcdev, int id)
+{
+ struct si3474_priv *priv = to_si3474_priv(pcdev);
+ struct i2c_client *client;
+ u8 chan0, chan1;
+ s32 ret;
+ u8 val;
+
+ si3474_get_channels(priv, id, &chan0, &chan1);
+ client = si3474_get_chan_client(priv, chan0);
+
+ /* Set PI in shutdown mode */
+ ret = i2c_smbus_read_byte_data(client, PORT_MODE_REG);
+ if (ret < 0)
+ return ret;
+
+ val = (u8)ret;
+ val &= ~CHAN_MASK(chan0);
+ val &= ~CHAN_MASK(chan1);
+
+ ret = i2c_smbus_write_byte_data(client, PORT_MODE_REG, val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int si3474_pi_get_chan_current(struct si3474_priv *priv, u8 chan)
+{
+ struct i2c_client *client;
+ u64 tmp_64;
+ s32 ret;
+ u8 reg;
+
+ client = si3474_get_chan_client(priv, chan);
+
+ /* Registers 0x30 to 0x3d */
+ reg = CHAN_REG(PORT1_CURRENT_LSB_REG, chan);
+
+ ret = i2c_smbus_read_word_data(client, reg);
+ if (ret < 0)
+ return ret;
+
+ tmp_64 = ret * SI3474_NA_STEP;
+
+ /* uA = nA / 1000 */
+ tmp_64 = DIV_ROUND_CLOSEST_ULL(tmp_64, 1000);
+ return (int)tmp_64;
+}
+
+static int si3474_pi_get_chan_voltage(struct si3474_priv *priv, u8 chan)
+{
+ struct i2c_client *client;
+ s32 ret;
+ u32 val;
+ u8 reg;
+
+ client = si3474_get_chan_client(priv, chan);
+
+ /* Registers 0x32 to 0x3f */
+ reg = CHAN_REG(PORT1_VOLTAGE_LSB_REG, chan);
+
+ ret = i2c_smbus_read_word_data(client, reg);
+ if (ret < 0)
+ return ret;
+
+ val = ret * SI3474_UV_STEP;
+
+ return (int)val;
+}
+
+static int si3474_pi_get_voltage(struct pse_controller_dev *pcdev, int id)
+{
+ struct si3474_priv *priv = to_si3474_priv(pcdev);
+ struct i2c_client *client;
+ u8 chan0, chan1;
+ s32 ret;
+
+ si3474_get_channels(priv, id, &chan0, &chan1);
+ client = si3474_get_chan_client(priv, chan0);
+
+ /* Check which channels are enabled*/
+ ret = i2c_smbus_read_byte_data(client, POWER_STATUS_REG);
+ if (ret < 0)
+ return ret;
+
+ /* Take voltage from the first enabled channel */
+ if (ret & CHAN_BIT(chan0))
+ ret = si3474_pi_get_chan_voltage(priv, chan0);
+ else if (ret & CHAN_BIT(chan1))
+ ret = si3474_pi_get_chan_voltage(priv, chan1);
+ else
+ /* 'should' be no voltage in this case */
+ return 0;
+
+ return ret;
+}
+
+static int si3474_pi_get_actual_pw(struct pse_controller_dev *pcdev, int id)
+{
+ struct si3474_priv *priv = to_si3474_priv(pcdev);
+ u8 chan0, chan1;
+ u32 uV, uA;
+ u64 tmp_64;
+ s32 ret;
+
+ ret = si3474_pi_get_voltage(&priv->pcdev, id);
+
+ /* Do not read currents if voltage is 0 */
+ if (ret <= 0)
+ return ret;
+ uV = ret;
+
+ si3474_get_channels(priv, id, &chan0, &chan1);
+
+ ret = si3474_pi_get_chan_current(priv, chan0);
+ if (ret < 0)
+ return ret;
+ uA = ret;
+
+ ret = si3474_pi_get_chan_current(priv, chan1);
+ if (ret < 0)
+ return ret;
+ uA += ret;
+
+ tmp_64 = uV;
+ tmp_64 *= uA;
+ /* mW = uV * uA / 1000000000 */
+ return DIV_ROUND_CLOSEST_ULL(tmp_64, 1000000000);
+}
+
+static const struct pse_controller_ops si3474_ops = {
+ .setup_pi_matrix = si3474_setup_pi_matrix,
+ .pi_enable = si3474_pi_enable,
+ .pi_disable = si3474_pi_disable,
+ .pi_get_actual_pw = si3474_pi_get_actual_pw,
+ .pi_get_voltage = si3474_pi_get_voltage,
+ .pi_get_admin_state = si3474_pi_get_admin_state,
+ .pi_get_pw_status = si3474_pi_get_pw_status,
+};
+
+static void si3474_ancillary_i2c_remove(void *data)
+{
+ struct i2c_client *client = data;
+
+ i2c_unregister_device(client);
+}
+
+static int si3474_i2c_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct si3474_priv *priv;
+ u8 fw_version;
+ s32 ret;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(dev, "i2c check functionality failed\n");
+ return -ENXIO;
+ }
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ ret = i2c_smbus_read_byte_data(client, VENDOR_IC_ID_REG);
+ if (ret < 0)
+ return ret;
+
+ if (ret != SI3474_DEVICE_ID) {
+ dev_err(dev, "Wrong device ID: 0x%x\n", ret);
+ return -ENXIO;
+ }
+
+ ret = i2c_smbus_read_byte_data(client, FIRMWARE_REVISION_REG);
+ if (ret < 0)
+ return ret;
+ fw_version = ret;
+
+ ret = i2c_smbus_read_byte_data(client, CHIP_REVISION_REG);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(dev, "Chip revision: 0x%x, firmware version: 0x%x\n",
+ ret, fw_version);
+
+ priv->client[0] = client;
+ i2c_set_clientdata(client, priv);
+
+ priv->client[1] = i2c_new_ancillary_device(priv->client[0], "secondary",
+ priv->client[0]->addr + 1);
+ if (IS_ERR(priv->client[1]))
+ return PTR_ERR(priv->client[1]);
+
+ ret = devm_add_action_or_reset(dev, si3474_ancillary_i2c_remove, priv->client[1]);
+ if (ret < 0) {
+ dev_err(&priv->client[1]->dev, "Cannot register remove callback\n");
+ return ret;
+ }
+
+ ret = i2c_smbus_read_byte_data(priv->client[1], VENDOR_IC_ID_REG);
+ if (ret < 0) {
+ dev_err(&priv->client[1]->dev, "Cannot access secondary PSE controller\n");
+ return ret;
+ }
+
+ if (ret != SI3474_DEVICE_ID) {
+ dev_err(&priv->client[1]->dev,
+ "Wrong device ID for secondary PSE controller: 0x%x\n", ret);
+ return -ENXIO;
+ }
+
+ priv->np = dev->of_node;
+ priv->pcdev.owner = THIS_MODULE;
+ priv->pcdev.ops = &si3474_ops;
+ priv->pcdev.dev = dev;
+ priv->pcdev.types = ETHTOOL_PSE_C33;
+ priv->pcdev.nr_lines = SI3474_MAX_CHANS;
+
+ ret = devm_pse_controller_register(dev, &priv->pcdev);
+ if (ret) {
+ dev_err(dev, "Failed to register PSE controller: 0x%x\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct i2c_device_id si3474_id[] = {
+ { "si3474" },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, si3474_id);
+
+static const struct of_device_id si3474_of_match[] = {
+ {
+ .compatible = "skyworks,si3474",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, si3474_of_match);
+
+static struct i2c_driver si3474_driver = {
+ .probe = si3474_i2c_probe,
+ .id_table = si3474_id,
+ .driver = {
+ .name = "si3474",
+ .of_match_table = si3474_of_match,
+ },
+};
+module_i2c_driver(si3474_driver);
+
+MODULE_AUTHOR("Piotr Kubik <piotr.kubik@adtran.com>");
+MODULE_DESCRIPTION("Skyworks Si3474 PoE PSE Controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/pse-pd/tps23881.c b/drivers/net/pse-pd/tps23881.c
index 63f8f43062bc..b724b222ab44 100644
--- a/drivers/net/pse-pd/tps23881.c
+++ b/drivers/net/pse-pd/tps23881.c
@@ -62,7 +62,7 @@
#define TPS23881_REG_SRAM_DATA 0x61
#define TPS23881_UV_STEP 3662
-#define TPS23881_NA_STEP 70190
+#define TPS23881_NA_STEP 89500
#define TPS23881_MW_STEP 500
#define TPS23881_MIN_PI_PW_LIMIT_MW 2000
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index cc6c50180663..8192740357a0 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1875,6 +1875,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
local_bh_enable();
goto unlock_frags;
}
+
+ if (frags && skb != tfile->napi.skb)
+ tfile->napi.skb = skb;
}
rcu_read_unlock();
local_bh_enable();
@@ -2823,13 +2826,13 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
if (netif_running(tun->dev))
netif_tx_wake_all_queues(tun->dev);
- strcpy(ifr->ifr_name, tun->dev->name);
+ strscpy(ifr->ifr_name, tun->dev->name);
return 0;
}
static void tun_get_iff(struct tun_struct *tun, struct ifreq *ifr)
{
- strcpy(ifr->ifr_name, tun->dev->name);
+ strscpy(ifr->ifr_name, tun->dev->name);
ifr->ifr_flags = tun_flags(tun);
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 0a678e31cfaa..856e648d804e 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -116,6 +116,7 @@ config USB_LAN78XX
select PHYLINK
select MICROCHIP_PHY
select CRC32
+ imply NET_SELFTESTS
help
This option adds support for Microchip LAN78XX based USB 2
& USB 3 10/100/1000 Ethernet adapters.
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 9b0318fb50b5..85bd5d845409 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -625,6 +625,21 @@ static void ax88772_suspend(struct usbnet *dev)
asix_read_medium_status(dev, 1));
}
+/* Notes on PM callbacks and locking context:
+ *
+ * - asix_suspend()/asix_resume() are invoked for both runtime PM and
+ * system-wide suspend/resume. For struct usb_driver the ->resume()
+ * callback does not receive pm_message_t, so the resume type cannot
+ * be distinguished here.
+ *
+ * - The MAC driver must hold RTNL when calling phylink interfaces such as
+ * phylink_suspend()/resume(). Those calls will also perform MDIO I/O.
+ *
+ * - Taking RTNL and doing MDIO from a runtime-PM resume callback (while
+ * the USB PM lock is held) is fragile. Since autosuspend brings no
+ * measurable power saving here, we block it by holding a PM usage
+ * reference in ax88772_bind().
+ */
static int asix_suspend(struct usb_interface *intf, pm_message_t message)
{
struct usbnet *dev = usb_get_intfdata(intf);
@@ -676,6 +691,7 @@ static int ax88772_init_mdio(struct usbnet *dev)
priv->mdio->read = &asix_mdio_bus_read;
priv->mdio->write = &asix_mdio_bus_write;
priv->mdio->name = "Asix MDIO Bus";
+ priv->mdio->phy_mask = ~(BIT(priv->phy_addr & 0x1f) | BIT(AX_EMBD_PHY_ADDR));
/* mii bus name is usb-<usb bus number>-<usb device number> */
snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
dev->udev->bus->busnum, dev->udev->devnum);
@@ -918,6 +934,13 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
if (ret)
goto initphy_err;
+ /* Keep this interface runtime-PM active by taking a usage ref.
+ * Prevents runtime suspend while bound and avoids resume paths
+ * that could deadlock (autoresume under RTNL while USB PM lock
+ * is held, phylink/MDIO wants RTNL).
+ */
+ pm_runtime_get_noresume(&intf->dev);
+
return 0;
initphy_err:
@@ -947,6 +970,8 @@ static void ax88772_unbind(struct usbnet *dev, struct usb_interface *intf)
phylink_destroy(priv->phylink);
ax88772_mdio_unregister(priv);
asix_rx_fixup_common_free(dev->driver_priv);
+ /* Drop the PM usage ref taken in bind() */
+ pm_runtime_put(&intf->dev);
}
static void ax88178_unbind(struct usbnet *dev, struct usb_interface *intf)
@@ -1599,6 +1624,11 @@ static struct usb_driver asix_driver = {
.resume = asix_resume,
.reset_resume = asix_resume,
.disconnect = usbnet_disconnect,
+ /* usbnet enables autosuspend by default (supports_autosuspend=1).
+ * We keep runtime-PM active for AX88772* by taking a PM usage
+ * reference in ax88772_bind() (pm_runtime_get_noresume()) and
+ * dropping it in unbind(), which effectively blocks autosuspend.
+ */
.supports_autosuspend = 1,
.disable_hub_initiated_lpm = 1,
};
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index ea0e5e276cd6..5d123df0a866 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -2087,6 +2087,13 @@ static const struct usb_device_id cdc_devs[] = {
.driver_info = (unsigned long)&wwan_info,
},
+ /* Intel modem (label from OEM reads Fibocom L850-GL) */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x8087, 0x095a,
+ USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&wwan_info,
+ },
+
/* DisplayLink docking stations */
{ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
| USB_DEVICE_ID_MATCH_VENDOR,
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 1ff25f57329a..42d35cc6b421 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -20,6 +20,7 @@
#include <linux/mdio.h>
#include <linux/phy.h>
#include <net/ip6_checksum.h>
+#include <net/selftests.h>
#include <net/vxlan.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
@@ -1079,10 +1080,13 @@ static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
}
read_raw_eeprom_done:
- if (dev->chipid == ID_REV_CHIP_ID_7800_)
- return lan78xx_write_reg(dev, HW_CFG, saved);
-
- return 0;
+ if (dev->chipid == ID_REV_CHIP_ID_7800_) {
+ int rc = lan78xx_write_reg(dev, HW_CFG, saved);
+ /* If USB fails, there is nothing to do */
+ if (rc < 0)
+ return rc;
+ }
+ return ret;
}
static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
@@ -1702,12 +1706,16 @@ static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
{
if (stringset == ETH_SS_STATS)
memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
+ else if (stringset == ETH_SS_TEST)
+ net_selftest_get_strings(data);
}
static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
{
if (sset == ETH_SS_STATS)
return ARRAY_SIZE(lan78xx_gstrings);
+ else if (sset == ETH_SS_TEST)
+ return net_selftest_get_count();
else
return -EOPNOTSUPP;
}
@@ -1894,6 +1902,7 @@ static const struct ethtool_ops lan78xx_ethtool_ops = {
.set_eeprom = lan78xx_ethtool_set_eeprom,
.get_ethtool_stats = lan78xx_get_stats,
.get_sset_count = lan78xx_get_sset_count,
+ .self_test = net_selftest,
.get_strings = lan78xx_get_strings,
.get_wol = lan78xx_get_wol,
.set_wol = lan78xx_set_wol,
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index f5647ee0adde..11352d85475a 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1355,12 +1355,16 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
{QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1031, 3)}, /* Telit LE910C1-EUX */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1034, 2)}, /* Telit LE910C4-WWX */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1037, 4)}, /* Telit LE910C4-WWX */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1038, 3)}, /* Telit LE910C4-WWX */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x103a, 0)}, /* Telit LE910C4-WWX */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1057, 2)}, /* Telit FN980 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1060, 2)}, /* Telit LN920 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1070, 2)}, /* Telit FN990A */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1077, 2)}, /* Telit FN990A w/audio */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1080, 2)}, /* Telit FE990A */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x10a0, 0)}, /* Telit FN920C04 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x10a4, 0)}, /* Telit FN920C04 */
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index ddff6f19ff98..92add3daadbb 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -664,7 +664,6 @@ static void rtl8150_set_multicast(struct net_device *netdev)
rtl8150_t *dev = netdev_priv(netdev);
u16 rx_creg = 0x9e;
- netif_stop_queue(netdev);
if (netdev->flags & IFF_PROMISC) {
rx_creg |= 0x0001;
dev_info(&netdev->dev, "%s: promiscuous mode\n", netdev->name);
@@ -678,7 +677,6 @@ static void rtl8150_set_multicast(struct net_device *netdev)
rx_creg &= 0x00fc;
}
async_set_registers(dev, RCR, sizeof(rx_creg), rx_creg);
- netif_wake_queue(netdev);
}
static netdev_tx_t rtl8150_start_xmit(struct sk_buff *skb,
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index d14e6d602273..a757cbcab87f 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -962,7 +962,7 @@ static void virtnet_rq_unmap(struct receive_queue *rq, void *buf, u32 len)
if (dma->need_sync && len) {
offset = buf - (head + sizeof(*dma));
- virtqueue_dma_sync_single_range_for_cpu(rq->vq, dma->addr,
+ virtqueue_map_sync_single_range_for_cpu(rq->vq, dma->addr,
offset, len,
DMA_FROM_DEVICE);
}
@@ -970,8 +970,8 @@ static void virtnet_rq_unmap(struct receive_queue *rq, void *buf, u32 len)
if (dma->ref)
return;
- virtqueue_dma_unmap_single_attrs(rq->vq, dma->addr, dma->len,
- DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ virtqueue_unmap_single_attrs(rq->vq, dma->addr, dma->len,
+ DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
put_page(page);
}
@@ -1038,13 +1038,13 @@ static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp)
dma->len = alloc_frag->size - sizeof(*dma);
- addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1,
- dma->len, DMA_FROM_DEVICE, 0);
- if (virtqueue_dma_mapping_error(rq->vq, addr))
+ addr = virtqueue_map_single_attrs(rq->vq, dma + 1,
+ dma->len, DMA_FROM_DEVICE, 0);
+ if (virtqueue_map_mapping_error(rq->vq, addr))
return NULL;
dma->addr = addr;
- dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr);
+ dma->need_sync = virtqueue_map_need_sync(rq->vq, addr);
/* Add a reference to dma to prevent the entire dma from
* being released during error handling. This reference
@@ -2185,10 +2185,9 @@ static struct sk_buff *build_skb_from_xdp_buff(struct net_device *dev,
skb_metadata_set(skb, metasize);
if (unlikely(xdp_buff_has_frags(xdp)))
- xdp_update_skb_shared_info(skb, nr_frags,
- sinfo->xdp_frags_size,
- xdp_frags_truesz,
- xdp_buff_is_frag_pfmemalloc(xdp));
+ xdp_update_skb_frags_info(skb, nr_frags, sinfo->xdp_frags_size,
+ xdp_frags_truesz,
+ xdp_buff_get_skb_flags(xdp));
return skb;
}
@@ -5610,20 +5609,11 @@ static int virtnet_set_rxfh(struct net_device *dev,
return 0;
}
-static int virtnet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rule_locs)
+static u32 virtnet_get_rx_ring_count(struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
- int rc = 0;
- switch (info->cmd) {
- case ETHTOOL_GRXRINGS:
- info->data = vi->curr_queue_pairs;
- break;
- default:
- rc = -EOPNOTSUPP;
- }
-
- return rc;
+ return vi->curr_queue_pairs;
}
static const struct ethtool_ops virtnet_ethtool_ops = {
@@ -5651,7 +5641,7 @@ static const struct ethtool_ops virtnet_ethtool_ops = {
.set_rxfh = virtnet_set_rxfh,
.get_rxfh_fields = virtnet_get_hashflow,
.set_rxfh_fields = virtnet_set_hashflow,
- .get_rxnfc = virtnet_get_rxnfc,
+ .get_rx_ring_count = virtnet_get_rx_ring_count,
};
static void virtnet_get_queue_stats_rx(struct net_device *dev, int i,
@@ -5758,14 +5748,15 @@ static void virtnet_freeze_down(struct virtio_device *vdev)
disable_rx_mode_work(vi);
flush_work(&vi->rx_mode_work);
- netif_tx_lock_bh(vi->dev);
- netif_device_detach(vi->dev);
- netif_tx_unlock_bh(vi->dev);
if (netif_running(vi->dev)) {
rtnl_lock();
virtnet_close(vi->dev);
rtnl_unlock();
}
+
+ netif_tx_lock_bh(vi->dev);
+ netif_device_detach(vi->dev);
+ netif_tx_unlock_bh(vi->dev);
}
static int init_vqs(struct virtnet_info *vi);
@@ -5951,9 +5942,9 @@ static int virtnet_xsk_pool_enable(struct net_device *dev,
if (!rq->xsk_buffs)
return -ENOMEM;
- hdr_dma = virtqueue_dma_map_single_attrs(sq->vq, &xsk_hdr, vi->hdr_len,
- DMA_TO_DEVICE, 0);
- if (virtqueue_dma_mapping_error(sq->vq, hdr_dma)) {
+ hdr_dma = virtqueue_map_single_attrs(sq->vq, &xsk_hdr, vi->hdr_len,
+ DMA_TO_DEVICE, 0);
+ if (virtqueue_map_mapping_error(sq->vq, hdr_dma)) {
err = -ENOMEM;
goto err_free_buffs;
}
@@ -5982,8 +5973,8 @@ err_sq:
err_rq:
xsk_pool_dma_unmap(pool, 0);
err_xsk_map:
- virtqueue_dma_unmap_single_attrs(rq->vq, hdr_dma, vi->hdr_len,
- DMA_TO_DEVICE, 0);
+ virtqueue_unmap_single_attrs(rq->vq, hdr_dma, vi->hdr_len,
+ DMA_TO_DEVICE, 0);
err_free_buffs:
kvfree(rq->xsk_buffs);
return err;
@@ -6010,8 +6001,8 @@ static int virtnet_xsk_pool_disable(struct net_device *dev, u16 qid)
xsk_pool_dma_unmap(pool, 0);
- virtqueue_dma_unmap_single_attrs(sq->vq, sq->xsk_hdr_dma_addr,
- vi->hdr_len, DMA_TO_DEVICE, 0);
+ virtqueue_unmap_single_attrs(sq->vq, sq->xsk_hdr_dma_addr,
+ vi->hdr_len, DMA_TO_DEVICE, 0);
kvfree(rq->xsk_buffs);
return err;
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 3ccd649913b5..571847a7f86d 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -26,6 +26,7 @@
#include <linux/inetdevice.h>
#include <net/arp.h>
+#include <net/flow.h>
#include <net/ip.h>
#include <net/ip_fib.h>
#include <net/ip6_fib.h>
@@ -38,7 +39,6 @@
#include <net/sch_generic.h>
#include <net/netns/generic.h>
#include <net/netfilter/nf_conntrack.h>
-#include <net/inet_dscp.h>
#define DRV_NAME "vrf"
#define DRV_VERSION "1.1"
@@ -505,7 +505,7 @@ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
/* needed to match OIF rule */
fl4.flowi4_l3mdev = vrf_dev->ifindex;
fl4.flowi4_iif = LOOPBACK_IFINDEX;
- fl4.flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(ip4h));
+ fl4.flowi4_dscp = ip4h_dscp(ip4h);
fl4.flowi4_flags = FLOWI_FLAG_ANYSRC;
fl4.flowi4_proto = ip4h->protocol;
fl4.daddr = ip4h->daddr;
diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
index f32be2e301f2..a5c55e7e4d79 100644
--- a/drivers/net/vxlan/vxlan_core.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -446,7 +446,7 @@ int vxlan_fdb_find_uc(struct net_device *dev, const u8 *mac, __be32 vni,
{
struct vxlan_dev *vxlan = netdev_priv(dev);
u8 eth_addr[ETH_ALEN + 2] = { 0 };
- struct vxlan_rdst *rdst;
+ struct vxlan_rdst *rdst = NULL;
struct vxlan_fdb *f;
int rc = 0;
@@ -459,12 +459,13 @@ int vxlan_fdb_find_uc(struct net_device *dev, const u8 *mac, __be32 vni,
rcu_read_lock();
f = vxlan_find_mac_rcu(vxlan, eth_addr, vni);
- if (!f) {
+ if (f)
+ rdst = first_remote_rcu(f);
+ if (!rdst) {
rc = -ENOENT;
goto out;
}
- rdst = first_remote_rcu(f);
vxlan_fdb_switchdev_notifier_info(vxlan, f, rdst, NULL, fdb_info);
out:
@@ -1445,6 +1446,10 @@ static enum skb_drop_reason vxlan_snoop(struct net_device *dev,
if (READ_ONCE(f->updated) != now)
WRITE_ONCE(f->updated, now);
+ /* Don't override an fdb with nexthop with a learnt entry */
+ if (rcu_access_pointer(f->nh))
+ return SKB_DROP_REASON_VXLAN_ENTRY_EXISTS;
+
if (likely(vxlan_addr_equal(&rdst->remote_ip, src_ip) &&
rdst->remote_ifindex == ifindex))
return SKB_NOT_DROPPED_YET;
@@ -1453,10 +1458,6 @@ static enum skb_drop_reason vxlan_snoop(struct net_device *dev,
if (f->state & (NUD_PERMANENT | NUD_NOARP))
return SKB_DROP_REASON_VXLAN_ENTRY_EXISTS;
- /* Don't override an fdb with nexthop with a learnt entry */
- if (rcu_access_pointer(f->nh))
- return SKB_DROP_REASON_VXLAN_ENTRY_EXISTS;
-
if (net_ratelimit())
netdev_info(dev,
"%pM migrated from %pIS to %pIS\n",
@@ -1877,6 +1878,7 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
n = neigh_lookup(&arp_tbl, &tip, dev);
if (n) {
+ struct vxlan_rdst *rdst = NULL;
struct vxlan_fdb *f;
struct sk_buff *reply;
@@ -1887,7 +1889,9 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
rcu_read_lock();
f = vxlan_find_mac_tx(vxlan, n->ha, vni);
- if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) {
+ if (f)
+ rdst = first_remote_rcu(f);
+ if (rdst && vxlan_addr_any(&rdst->remote_ip)) {
/* bridge-local neighbor */
neigh_release(n);
rcu_read_unlock();
@@ -2044,6 +2048,7 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
n = neigh_lookup(ipv6_stub->nd_tbl, &msg->target, dev);
if (n) {
+ struct vxlan_rdst *rdst = NULL;
struct vxlan_fdb *f;
struct sk_buff *reply;
@@ -2053,7 +2058,9 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
}
f = vxlan_find_mac_tx(vxlan, n->ha, vni);
- if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) {
+ if (f)
+ rdst = first_remote_rcu(f);
+ if (rdst && vxlan_addr_any(&rdst->remote_ip)) {
/* bridge-local neighbor */
neigh_release(n);
goto out;
diff --git a/drivers/net/vxlan/vxlan_private.h b/drivers/net/vxlan/vxlan_private.h
index 6c625fb29c6c..99fe772ad679 100644
--- a/drivers/net/vxlan/vxlan_private.h
+++ b/drivers/net/vxlan/vxlan_private.h
@@ -61,9 +61,7 @@ static inline struct hlist_head *vs_head(struct net *net, __be16 port)
return &vn->sock_list[hash_32(ntohs(port), PORT_HASH_BITS)];
}
-/* First remote destination for a forwarding entry.
- * Guaranteed to be non-NULL because remotes are never deleted.
- */
+/* First remote destination for a forwarding entry. */
static inline struct vxlan_rdst *first_remote_rcu(struct vxlan_fdb *fdb)
{
if (rcu_access_pointer(fdb->nh))
diff --git a/drivers/net/wan/framer/pef2256/pef2256.c b/drivers/net/wan/framer/pef2256/pef2256.c
index 1e4c8e85d598..c5501826db1e 100644
--- a/drivers/net/wan/framer/pef2256/pef2256.c
+++ b/drivers/net/wan/framer/pef2256/pef2256.c
@@ -37,6 +37,7 @@ struct pef2256 {
struct device *dev;
struct regmap *regmap;
enum pef2256_version version;
+ const char *version_txt;
struct clk *mclk;
struct clk *sclkr;
struct clk *sclkx;
@@ -114,6 +115,16 @@ enum pef2256_version pef2256_get_version(struct pef2256 *pef2256)
}
EXPORT_SYMBOL_GPL(pef2256_get_version);
+static ssize_t version_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct pef2256 *pef2256 = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%s\n", pef2256->version_txt);
+}
+
+static DEVICE_ATTR_RO(version);
+
enum pef2256_gcm_config_item {
PEF2256_GCM_CONFIG_1544000 = 0,
PEF2256_GCM_CONFIG_2048000,
@@ -697,7 +708,6 @@ static int pef2256_probe(struct platform_device *pdev)
unsigned long sclkr_rate, sclkx_rate;
struct framer_provider *framer_provider;
struct pef2256 *pef2256;
- const char *version_txt;
void __iomem *iomem;
int ret;
int irq;
@@ -719,8 +729,8 @@ static int pef2256_probe(struct platform_device *pdev)
pef2256->regmap = devm_regmap_init_mmio(&pdev->dev, iomem,
&pef2256_regmap_config);
if (IS_ERR(pef2256->regmap)) {
- dev_err(&pdev->dev, "Failed to initialise Regmap (%ld)\n",
- PTR_ERR(pef2256->regmap));
+ dev_err(&pdev->dev, "Failed to initialise Regmap (%pe)\n",
+ pef2256->regmap);
return PTR_ERR(pef2256->regmap);
}
@@ -763,18 +773,18 @@ static int pef2256_probe(struct platform_device *pdev)
pef2256->version = pef2256_get_version(pef2256);
switch (pef2256->version) {
case PEF2256_VERSION_1_2:
- version_txt = "1.2";
+ pef2256->version_txt = "1.2";
break;
case PEF2256_VERSION_2_1:
- version_txt = "2.1";
+ pef2256->version_txt = "2.1";
break;
case PEF2256_VERSION_2_2:
- version_txt = "2.2";
+ pef2256->version_txt = "2.2";
break;
default:
return -ENODEV;
}
- dev_info(pef2256->dev, "Version %s detected\n", version_txt);
+ dev_info(pef2256->dev, "Version %s detected\n", pef2256->version_txt);
ret = pef2556_of_parse(pef2256, np);
if (ret)
@@ -835,6 +845,8 @@ static int pef2256_probe(struct platform_device *pdev)
return ret;
}
+ device_create_file(pef2256->dev, &dev_attr_version);
+
return 0;
}
@@ -849,6 +861,8 @@ static void pef2256_remove(struct platform_device *pdev)
pef2256_write8(pef2256, PEF2256_IMR3, 0xff);
pef2256_write8(pef2256, PEF2256_IMR4, 0xff);
pef2256_write8(pef2256, PEF2256_IMR5, 0xff);
+
+ device_remove_file(pef2256->dev, &dev_attr_version);
}
static const struct of_device_id pef2256_id_table[] = {
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
index 995a7207bdf8..f357a7ac70ac 100644
--- a/drivers/net/wan/lapbether.c
+++ b/drivers/net/wan/lapbether.c
@@ -81,7 +81,7 @@ static struct lapbethdev *lapbeth_get_x25_dev(struct net_device *dev)
static __inline__ int dev_is_ethdev(struct net_device *dev)
{
- return dev->type == ARPHRD_ETHER && strncmp(dev->name, "dummy", 5);
+ return dev->type == ARPHRD_ETHER && !netdev_need_ops_lock(dev);
}
/* ------------------------------------------------------------------------ */
diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c
index 813bd10d3dc7..46a71ec36af8 100644
--- a/drivers/net/wireguard/device.c
+++ b/drivers/net/wireguard/device.c
@@ -333,7 +333,8 @@ static int wg_newlink(struct net_device *dev,
goto err_free_peer_hashtable;
wg->handshake_receive_wq = alloc_workqueue("wg-kex-%s",
- WQ_CPU_INTENSIVE | WQ_FREEZABLE, 0, dev->name);
+ WQ_CPU_INTENSIVE | WQ_FREEZABLE | WQ_PERCPU, 0,
+ dev->name);
if (!wg->handshake_receive_wq)
goto err_free_index_hashtable;
@@ -343,7 +344,8 @@ static int wg_newlink(struct net_device *dev,
goto err_destroy_handshake_receive;
wg->packet_crypt_wq = alloc_workqueue("wg-crypt-%s",
- WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 0, dev->name);
+ WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_PERCPU, 0,
+ dev->name);
if (!wg->packet_crypt_wq)
goto err_destroy_handshake_send;
diff --git a/drivers/net/wireguard/queueing.h b/drivers/net/wireguard/queueing.h
index 7eb76724b3ed..79b6d70de236 100644
--- a/drivers/net/wireguard/queueing.h
+++ b/drivers/net/wireguard/queueing.h
@@ -104,16 +104,11 @@ static inline void wg_reset_packet(struct sk_buff *skb, bool encapsulating)
static inline int wg_cpumask_choose_online(int *stored_cpu, unsigned int id)
{
- unsigned int cpu = *stored_cpu, cpu_index, i;
+ unsigned int cpu = *stored_cpu;
+
+ while (unlikely(cpu >= nr_cpu_ids || !cpu_online(cpu)))
+ cpu = *stored_cpu = cpumask_nth(id % num_online_cpus(), cpu_online_mask);
- if (unlikely(cpu >= nr_cpu_ids ||
- !cpumask_test_cpu(cpu, cpu_online_mask))) {
- cpu_index = id % cpumask_weight(cpu_online_mask);
- cpu = cpumask_first(cpu_online_mask);
- for (i = 0; i < cpu_index; ++i)
- cpu = cpumask_next(cpu, cpu_online_mask);
- *stored_cpu = cpu;
- }
return cpu;
}
diff --git a/drivers/net/wireless/ath/ath10k/leds.c b/drivers/net/wireless/ath/ath10k/leds.c
index 9b1d04eb4265..3a6c8111e7c6 100644
--- a/drivers/net/wireless/ath/ath10k/leds.c
+++ b/drivers/net/wireless/ath/ath10k/leds.c
@@ -27,7 +27,7 @@ static int ath10k_leds_set_brightness_blocking(struct led_classdev *led_cdev,
goto out;
ar->leds.gpio_state_pin = (brightness != LED_OFF) ^ led->active_low;
- ath10k_wmi_gpio_output(ar, led->gpio, ar->leds.gpio_state_pin);
+ ath10k_wmi_gpio_output(ar, ar->hw_params.led_pin, ar->leds.gpio_state_pin);
out:
mutex_unlock(&ar->conf_mutex);
@@ -64,7 +64,6 @@ int ath10k_leds_register(struct ath10k *ar)
snprintf(ar->leds.label, sizeof(ar->leds.label), "ath10k-%s",
wiphy_name(ar->hw->wiphy));
ar->leds.wifi_led.active_low = 1;
- ar->leds.wifi_led.gpio = ar->hw_params.led_pin;
ar->leds.wifi_led.name = ar->leds.label;
ar->leds.wifi_led.default_state = LEDS_GPIO_DEFSTATE_KEEP;
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 24dd794e31ea..154ac7a70982 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -16,6 +16,7 @@
#include <linux/acpi.h>
#include <linux/of.h>
#include <linux/bitfield.h>
+#include <linux/random.h>
#include "hif.h"
#include "core.h"
@@ -290,8 +291,15 @@ static int ath10k_send_key(struct ath10k_vif *arvif,
key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
if (cmd == DISABLE_KEY) {
- arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_NONE];
- arg.key_data = NULL;
+ if (flags & WMI_KEY_GROUP) {
+ /* Not all hardware handles group-key deletion operation
+ * correctly. Replace the key with a junk value to invalidate it.
+ */
+ get_random_bytes(key->key, key->keylen);
+ } else {
+ arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_NONE];
+ arg.key_data = NULL;
+ }
}
return ath10k_wmi_vdev_install_key(arvif->ar, &arg);
diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
index f0713bd36173..b3f6424c17d3 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.c
+++ b/drivers/net/wireless/ath/ath10k/snoc.c
@@ -13,7 +13,7 @@
#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <linux/remoteproc/qcom_rproc.h>
-#include <linux/of_address.h>
+#include <linux/of_reserved_mem.h>
#include <linux/iommu.h>
#include "ce.h"
@@ -1559,19 +1559,11 @@ static void ath10k_modem_deinit(struct ath10k *ar)
static int ath10k_setup_msa_resources(struct ath10k *ar, u32 msa_size)
{
struct device *dev = ar->dev;
- struct device_node *node;
struct resource r;
int ret;
- node = of_parse_phandle(dev->of_node, "memory-region", 0);
- if (node) {
- ret = of_address_to_resource(node, 0, &r);
- of_node_put(node);
- if (ret) {
- dev_err(dev, "failed to resolve msa fixed region\n");
- return ret;
- }
-
+ ret = of_reserved_mem_region_to_resource(dev->of_node, 0, &r);
+ if (!ret) {
ar->msa.paddr = r.start;
ar->msa.mem_size = resource_size(&r);
ar->msa.vaddr = devm_memremap(dev, ar->msa.paddr,
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index cb8ae751eb31..e595b0979a56 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -1764,33 +1764,32 @@ void ath10k_wmi_put_wmi_channel(struct ath10k *ar, struct wmi_channel *ch,
int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)
{
+ unsigned long timeout = jiffies + WMI_SERVICE_READY_TIMEOUT_HZ;
unsigned long time_left, i;
- time_left = wait_for_completion_timeout(&ar->wmi.service_ready,
- WMI_SERVICE_READY_TIMEOUT_HZ);
- if (!time_left) {
- /* Sometimes the PCI HIF doesn't receive interrupt
- * for the service ready message even if the buffer
- * was completed. PCIe sniffer shows that it's
- * because the corresponding CE ring doesn't fires
- * it. Workaround here by polling CE rings once.
- */
- ath10k_warn(ar, "failed to receive service ready completion, polling..\n");
-
+ /* Sometimes the PCI HIF doesn't receive interrupt
+ * for the service ready message even if the buffer
+ * was completed. PCIe sniffer shows that it's
+ * because the corresponding CE ring doesn't fires
+ * it. Workaround here by polling CE rings. Since
+ * the message could arrive at any time, continue
+ * polling until timeout.
+ */
+ do {
for (i = 0; i < CE_COUNT; i++)
ath10k_hif_send_complete_check(ar, i, 1);
+ /* The 100 ms granularity is a tradeoff considering scheduler
+ * overhead and response latency
+ */
time_left = wait_for_completion_timeout(&ar->wmi.service_ready,
- WMI_SERVICE_READY_TIMEOUT_HZ);
- if (!time_left) {
- ath10k_warn(ar, "polling timed out\n");
- return -ETIMEDOUT;
- }
-
- ath10k_warn(ar, "service ready completion received, continuing normally\n");
- }
+ msecs_to_jiffies(100));
+ if (time_left)
+ return 0;
+ } while (time_before(jiffies, timeout));
- return 0;
+ ath10k_warn(ar, "failed to receive service ready completion\n");
+ return -ETIMEDOUT;
}
int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar)
diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c
index 50809cc1dad4..8dfe9b40c126 100644
--- a/drivers/net/wireless/ath/ath11k/ahb.c
+++ b/drivers/net/wireless/ath/ath11k/ahb.c
@@ -9,8 +9,8 @@
#include <linux/property.h>
#include <linux/of_device.h>
#include <linux/of.h>
+#include <linux/of_reserved_mem.h>
#include <linux/dma-mapping.h>
-#include <linux/of_address.h>
#include <linux/iommu.h>
#include "ahb.h"
#include "debug.h"
@@ -919,16 +919,10 @@ static int ath11k_ahb_setup_msa_resources(struct ath11k_base *ab)
{
struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
struct device *dev = ab->dev;
- struct device_node *node;
struct resource r;
int ret;
- node = of_parse_phandle(dev->of_node, "memory-region", 0);
- if (!node)
- return -ENOENT;
-
- ret = of_address_to_resource(node, 0, &r);
- of_node_put(node);
+ ret = of_reserved_mem_region_to_resource(dev->of_node, 0, &r);
if (ret) {
dev_err(dev, "failed to resolve msa fixed region\n");
return ret;
@@ -937,12 +931,7 @@ static int ath11k_ahb_setup_msa_resources(struct ath11k_base *ab)
ab_ahb->fw.msa_paddr = r.start;
ab_ahb->fw.msa_size = resource_size(&r);
- node = of_parse_phandle(dev->of_node, "memory-region", 1);
- if (!node)
- return -ENOENT;
-
- ret = of_address_to_resource(node, 0, &r);
- of_node_put(node);
+ ret = of_reserved_mem_region_to_resource(dev->of_node, 1, &r);
if (ret) {
dev_err(dev, "failed to resolve ce fixed region\n");
return ret;
diff --git a/drivers/net/wireless/ath/ath11k/ce.c b/drivers/net/wireless/ath/ath11k/ce.c
index c65fc9fb539e..a7a163621b21 100644
--- a/drivers/net/wireless/ath/ath11k/ce.c
+++ b/drivers/net/wireless/ath/ath11k/ce.c
@@ -354,7 +354,8 @@ static int ath11k_ce_rx_post_pipe(struct ath11k_ce_pipe *pipe)
ret = ath11k_ce_rx_buf_enqueue_pipe(pipe, skb, paddr);
if (ret) {
- ath11k_warn(ab, "failed to enqueue rx buf: %d\n", ret);
+ ath11k_dbg(ab, ATH11K_DBG_CE, "failed to enqueue rx buf: %d\n",
+ ret);
dma_unmap_single(ab->dev, paddr,
skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
index d49353b6b2e7..2810752260f2 100644
--- a/drivers/net/wireless/ath/ath11k/core.c
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -2215,14 +2215,10 @@ static int ath11k_core_reconfigure_on_crash(struct ath11k_base *ab)
mutex_unlock(&ab->core_lock);
ath11k_dp_free(ab);
- ath11k_hal_srng_deinit(ab);
+ ath11k_hal_srng_clear(ab);
ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS(ab))) - 1;
- ret = ath11k_hal_srng_init(ab);
- if (ret)
- return ret;
-
clear_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags);
ret = ath11k_core_qmi_firmware_ready(ab);
diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
index 220d69a7a429..e8780b05ce11 100644
--- a/drivers/net/wireless/ath/ath11k/core.h
+++ b/drivers/net/wireless/ath/ath11k/core.h
@@ -411,6 +411,8 @@ struct ath11k_vif {
bool do_not_send_tmpl;
struct ath11k_arp_ns_offload arp_ns_offload;
struct ath11k_rekey_data rekey_data;
+ u32 num_stations;
+ bool reinstall_group_keys;
struct ath11k_reg_tpc_power_info reg_tpc_info;
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
index ffc7482c77b6..b9e976ddcbbf 100644
--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
@@ -4615,7 +4615,6 @@ static void ath11k_hal_rx_msdu_list_get(struct ath11k *ar,
msdu_details[i].buf_addr_info.info0) == 0) {
msdu_desc_info = &msdu_details[i - 1].rx_msdu_info;
msdu_desc_info->info0 |= last;
- ;
break;
}
msdu_desc_info = &msdu_details[i].rx_msdu_info;
diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c
index 0c3ce7509ab8..0c797b8d0a27 100644
--- a/drivers/net/wireless/ath/ath11k/hal.c
+++ b/drivers/net/wireless/ath/ath11k/hal.c
@@ -1386,6 +1386,22 @@ void ath11k_hal_srng_deinit(struct ath11k_base *ab)
}
EXPORT_SYMBOL(ath11k_hal_srng_deinit);
+void ath11k_hal_srng_clear(struct ath11k_base *ab)
+{
+ /* No need to memset rdp and wrp memory since each individual
+ * segment would get cleared in ath11k_hal_srng_src_hw_init()
+ * and ath11k_hal_srng_dst_hw_init().
+ */
+ memset(ab->hal.srng_list, 0,
+ sizeof(ab->hal.srng_list));
+ memset(ab->hal.shadow_reg_addr, 0,
+ sizeof(ab->hal.shadow_reg_addr));
+ ab->hal.avail_blk_resource = 0;
+ ab->hal.current_blk_index = 0;
+ ab->hal.num_shadow_reg_configured = 0;
+}
+EXPORT_SYMBOL(ath11k_hal_srng_clear);
+
void ath11k_hal_dump_srng_stats(struct ath11k_base *ab)
{
struct hal_srng *srng;
diff --git a/drivers/net/wireless/ath/ath11k/hal.h b/drivers/net/wireless/ath/ath11k/hal.h
index 601542410c75..839095af9267 100644
--- a/drivers/net/wireless/ath/ath11k/hal.h
+++ b/drivers/net/wireless/ath/ath11k/hal.h
@@ -965,6 +965,7 @@ int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type,
struct hal_srng_params *params);
int ath11k_hal_srng_init(struct ath11k_base *ath11k);
void ath11k_hal_srng_deinit(struct ath11k_base *ath11k);
+void ath11k_hal_srng_clear(struct ath11k_base *ab);
void ath11k_hal_dump_srng_stats(struct ath11k_base *ab);
void ath11k_hal_srng_get_shadow_config(struct ath11k_base *ab,
u32 **cfg, u32 *len);
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index 1fadf5faafb8..106e2530b64e 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -4317,6 +4317,40 @@ static int ath11k_clear_peer_keys(struct ath11k_vif *arvif,
return first_errno;
}
+static int ath11k_set_group_keys(struct ath11k_vif *arvif)
+{
+ struct ath11k *ar = arvif->ar;
+ struct ath11k_base *ab = ar->ab;
+ const u8 *addr = arvif->bssid;
+ int i, ret, first_errno = 0;
+ struct ath11k_peer *peer;
+
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find(ab, arvif->vdev_id, addr);
+ spin_unlock_bh(&ab->base_lock);
+
+ if (!peer)
+ return -ENOENT;
+
+ for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
+ struct ieee80211_key_conf *key = peer->keys[i];
+
+ if (!key || (key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ continue;
+
+ ret = ath11k_install_key(arvif, key, SET_KEY, addr,
+ WMI_KEY_GROUP);
+ if (ret < 0 && first_errno == 0)
+ first_errno = ret;
+
+ if (ret < 0)
+ ath11k_warn(ab, "failed to set group key of idx %d for vdev %d: %d\n",
+ i, arvif->vdev_id, ret);
+ }
+
+ return first_errno;
+}
+
static int ath11k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
@@ -4326,6 +4360,7 @@ static int ath11k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
struct ath11k_peer *peer;
struct ath11k_sta *arsta;
+ bool is_ap_with_no_sta;
const u8 *peer_addr;
int ret = 0;
u32 flags = 0;
@@ -4386,16 +4421,57 @@ static int ath11k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
else
flags |= WMI_KEY_GROUP;
- ret = ath11k_install_key(arvif, key, cmd, peer_addr, flags);
- if (ret) {
- ath11k_warn(ab, "ath11k_install_key failed (%d)\n", ret);
- goto exit;
- }
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "%s for peer %pM on vdev %d flags 0x%X, type = %d, num_sta %d\n",
+ cmd == SET_KEY ? "SET_KEY" : "DEL_KEY", peer_addr, arvif->vdev_id,
+ flags, arvif->vdev_type, arvif->num_stations);
+
+ /* Allow group key clearing only in AP mode when no stations are
+ * associated. There is a known race condition in firmware where
+ * group addressed packets may be dropped if the key is cleared
+ * and immediately set again during rekey.
+ *
+ * During GTK rekey, mac80211 issues a clear key (if the old key
+ * exists) followed by an install key operation for same key
+ * index. This causes ath11k to send two WMI commands in quick
+ * succession: one to clear the old key and another to install the
+ * new key in the same slot.
+ *
+ * Under certain conditions—especially under high load or time
+ * sensitive scenarios, firmware may process these commands
+ * asynchronously in a way that firmware assumes the key is
+ * cleared whereas hardware has a valid key. This inconsistency
+ * between hardware and firmware leads to group addressed packet
+ * drops after rekey.
+ * Only setting the same key again can restore a valid key in
+ * firmware and allow packets to be transmitted.
+ *
+ * There is a use case where an AP can transition from Secure mode
+ * to open mode without a vdev restart by just deleting all
+ * associated peers and clearing key, Hence allow clear key for
+ * that case alone. Mark arvif->reinstall_group_keys in such cases
+ * and reinstall the same key when the first peer is added,
+ * allowing firmware to recover from the race if it had occurred.
+ */
- ret = ath11k_dp_peer_rx_pn_replay_config(arvif, peer_addr, cmd, key);
- if (ret) {
- ath11k_warn(ab, "failed to offload PN replay detection %d\n", ret);
- goto exit;
+ is_ap_with_no_sta = (vif->type == NL80211_IFTYPE_AP &&
+ !arvif->num_stations);
+ if ((flags & WMI_KEY_PAIRWISE) || cmd == SET_KEY || is_ap_with_no_sta) {
+ ret = ath11k_install_key(arvif, key, cmd, peer_addr, flags);
+ if (ret) {
+ ath11k_warn(ab, "ath11k_install_key failed (%d)\n", ret);
+ goto exit;
+ }
+
+ ret = ath11k_dp_peer_rx_pn_replay_config(arvif, peer_addr, cmd, key);
+ if (ret) {
+ ath11k_warn(ab, "failed to offload PN replay detection %d\n",
+ ret);
+ goto exit;
+ }
+
+ if ((flags & WMI_KEY_GROUP) && cmd == SET_KEY && is_ap_with_no_sta)
+ arvif->reinstall_group_keys = true;
}
spin_lock_bh(&ab->base_lock);
@@ -4994,6 +5070,7 @@ static int ath11k_mac_inc_num_stations(struct ath11k_vif *arvif,
return -ENOBUFS;
ar->num_stations++;
+ arvif->num_stations++;
return 0;
}
@@ -5009,6 +5086,7 @@ static void ath11k_mac_dec_num_stations(struct ath11k_vif *arvif,
return;
ar->num_stations--;
+ arvif->num_stations--;
}
static u32 ath11k_mac_ieee80211_sta_bw_to_wmi(struct ath11k *ar,
@@ -9540,6 +9618,21 @@ static int ath11k_mac_station_add(struct ath11k *ar,
goto exit;
}
+ /* Driver allows the DEL KEY followed by SET KEY sequence for
+ * group keys for only when there is no clients associated, if at
+ * all firmware has entered the race during that window,
+ * reinstalling the same key when the first sta connects will allow
+ * firmware to recover from the race.
+ */
+ if (arvif->num_stations == 1 && arvif->reinstall_group_keys) {
+ ath11k_dbg(ab, ATH11K_DBG_MAC, "set group keys on 1st station add for vdev %d\n",
+ arvif->vdev_id);
+ ret = ath11k_set_group_keys(arvif);
+ if (ret)
+ goto dec_num_station;
+ arvif->reinstall_group_keys = false;
+ }
+
arsta->rx_stats = kzalloc(sizeof(*arsta->rx_stats), GFP_KERNEL);
if (!arsta->rx_stats) {
ret = -ENOMEM;
diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
index 378ac96b861b..aea56c38bf8f 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.c
+++ b/drivers/net/wireless/ath/ath11k/qmi.c
@@ -13,7 +13,7 @@
#include "debug.h"
#include "hif.h"
#include <linux/of.h>
-#include <linux/of_address.h>
+#include <linux/of_reserved_mem.h>
#include <linux/ioport.h>
#include <linux/firmware.h>
#include <linux/of_irq.h>
@@ -2040,23 +2040,14 @@ static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab)
static int ath11k_qmi_assign_target_mem_chunk(struct ath11k_base *ab)
{
struct device *dev = ab->dev;
- struct device_node *hremote_node = NULL;
- struct resource res;
+ struct resource res = {};
u32 host_ddr_sz;
int i, idx, ret;
for (i = 0, idx = 0; i < ab->qmi.mem_seg_count; i++) {
switch (ab->qmi.target_mem[i].type) {
case HOST_DDR_REGION_TYPE:
- hremote_node = of_parse_phandle(dev->of_node, "memory-region", 0);
- if (!hremote_node) {
- ath11k_dbg(ab, ATH11K_DBG_QMI,
- "fail to get hremote_node\n");
- return -ENODEV;
- }
-
- ret = of_address_to_resource(hremote_node, 0, &res);
- of_node_put(hremote_node);
+ ret = of_reserved_mem_region_to_resource(dev->of_node, 0, &res);
if (ret) {
ath11k_dbg(ab, ATH11K_DBG_QMI,
"fail to get reg from hremote\n");
@@ -2095,7 +2086,7 @@ static int ath11k_qmi_assign_target_mem_chunk(struct ath11k_base *ab)
}
if (ath11k_core_coldboot_cal_support(ab)) {
- if (hremote_node) {
+ if (resource_size(&res)) {
ab->qmi.target_mem[idx].paddr =
res.start + host_ddr_sz;
ab->qmi.target_mem[idx].iaddr =
@@ -2557,7 +2548,7 @@ static int ath11k_qmi_m3_load(struct ath11k_base *ab)
GFP_KERNEL);
if (!m3_mem->vaddr) {
ath11k_err(ab, "failed to allocate memory for M3 with size %zu\n",
- fw->size);
+ m3_len);
ret = -ENOMEM;
goto out;
}
diff --git a/drivers/net/wireless/ath/ath12k/ahb.c b/drivers/net/wireless/ath/ath12k/ahb.c
index 3b983f4e3268..b30527c402f6 100644
--- a/drivers/net/wireless/ath/ath12k/ahb.c
+++ b/drivers/net/wireless/ath/ath12k/ahb.c
@@ -414,7 +414,7 @@ static int ath12k_ahb_power_up(struct ath12k_base *ab)
goto err_fw2;
}
- ret = qcom_mdt_load_no_init(dev, fw2, fw2_name, pasid, mem_region, mem_phys,
+ ret = qcom_mdt_load_no_init(dev, fw2, fw2_name, mem_region, mem_phys,
mem_size, &mem_phys);
if (ret) {
ath12k_err(ab, "Failed to load MDT segments: %d\n", ret);
diff --git a/drivers/net/wireless/ath/ath12k/ce.c b/drivers/net/wireless/ath/ath12k/ce.c
index f93a419abf65..9a63608838ac 100644
--- a/drivers/net/wireless/ath/ath12k/ce.c
+++ b/drivers/net/wireless/ath/ath12k/ce.c
@@ -392,7 +392,8 @@ static int ath12k_ce_rx_post_pipe(struct ath12k_ce_pipe *pipe)
ret = ath12k_ce_rx_buf_enqueue_pipe(pipe, skb, paddr);
if (ret) {
- ath12k_warn(ab, "failed to enqueue rx buf: %d\n", ret);
+ ath12k_dbg(ab, ATH12K_DBG_CE, "failed to enqueue rx buf: %d\n",
+ ret);
dma_unmap_single(ab->dev, paddr,
skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
@@ -478,7 +479,7 @@ static void ath12k_ce_recv_process_cb(struct ath12k_ce_pipe *pipe)
}
while ((skb = __skb_dequeue(&list))) {
- ath12k_dbg(ab, ATH12K_DBG_AHB, "rx ce pipe %d len %d\n",
+ ath12k_dbg(ab, ATH12K_DBG_CE, "rx ce pipe %d len %d\n",
pipe->pipe_num, skb->len);
pipe->recv_cb(ab, skb);
}
diff --git a/drivers/net/wireless/ath/ath12k/core.h b/drivers/net/wireless/ath/ath12k/core.h
index 519f826f56c8..3d1956966a48 100644
--- a/drivers/net/wireless/ath/ath12k/core.h
+++ b/drivers/net/wireless/ath/ath12k/core.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#ifndef ATH12K_CORE_H
@@ -72,6 +72,9 @@
#define ATH12K_MAX_MLO_PEERS 256
#define ATH12K_MLO_PEER_ID_INVALID 0xFFFF
+#define ATH12K_INVALID_RSSI_FULL -1
+#define ATH12K_INVALID_RSSI_EMPTY -128
+
enum ath12k_bdf_search {
ATH12K_BDF_SEARCH_DEFAULT,
ATH12K_BDF_SEARCH_BUS_AND_BOARD,
@@ -560,6 +563,7 @@ struct ath12k_link_sta {
u32 bw_prev;
u32 peer_nss;
s8 rssi_beacon;
+ s8 chain_signal[IEEE80211_MAX_CHAINS];
/* For now the assoc link will be considered primary */
bool is_assoc_link;
@@ -730,6 +734,7 @@ struct ath12k {
u32 txpower_scale;
u32 power_scale;
u32 chan_tx_pwr;
+ u32 rts_threshold;
u32 num_stations;
u32 max_num_stations;
diff --git a/drivers/net/wireless/ath/ath12k/debug.h b/drivers/net/wireless/ath/ath12k/debug.h
index 48916e4e1f60..bf254e43a68d 100644
--- a/drivers/net/wireless/ath/ath12k/debug.h
+++ b/drivers/net/wireless/ath/ath12k/debug.h
@@ -26,6 +26,7 @@ enum ath12k_debug_mask {
ATH12K_DBG_DP_TX = 0x00002000,
ATH12K_DBG_DP_RX = 0x00004000,
ATH12K_DBG_WOW = 0x00008000,
+ ATH12K_DBG_CE = 0x00010000,
ATH12K_DBG_ANY = 0xffffffff,
};
diff --git a/drivers/net/wireless/ath/ath12k/dp.c b/drivers/net/wireless/ath/ath12k/dp.c
index f893fce6d9bd..4a54b8c35311 100644
--- a/drivers/net/wireless/ath/ath12k/dp.c
+++ b/drivers/net/wireless/ath/ath12k/dp.c
@@ -1745,7 +1745,9 @@ int ath12k_dp_alloc(struct ath12k_base *ab)
INIT_LIST_HEAD(&dp->reo_cmd_list);
INIT_LIST_HEAD(&dp->reo_cmd_cache_flush_list);
+ INIT_LIST_HEAD(&dp->reo_cmd_update_rx_queue_list);
spin_lock_init(&dp->reo_cmd_lock);
+ spin_lock_init(&dp->reo_rxq_flush_lock);
dp->reo_cmd_cache_flush_count = 0;
dp->idle_link_rbm = ath12k_dp_get_idle_link_rbm(ab);
diff --git a/drivers/net/wireless/ath/ath12k/dp.h b/drivers/net/wireless/ath/ath12k/dp.h
index 7baa48b86f7a..4ffec6ad7d8d 100644
--- a/drivers/net/wireless/ath/ath12k/dp.h
+++ b/drivers/net/wireless/ath/ath12k/dp.h
@@ -184,7 +184,7 @@ struct ath12k_pdev_dp {
#define DP_REO_REINJECT_RING_SIZE 32
#define DP_RX_RELEASE_RING_SIZE 1024
#define DP_REO_EXCEPTION_RING_SIZE 128
-#define DP_REO_CMD_RING_SIZE 128
+#define DP_REO_CMD_RING_SIZE 256
#define DP_REO_STATUS_RING_SIZE 2048
#define DP_RXDMA_BUF_RING_SIZE 4096
#define DP_RX_MAC_BUF_RING_SIZE 2048
@@ -389,15 +389,19 @@ struct ath12k_dp {
struct dp_srng reo_dst_ring[DP_REO_DST_RING_MAX];
struct dp_tx_ring tx_ring[DP_TCL_NUM_RING_MAX];
struct hal_wbm_idle_scatter_list scatter_list[DP_IDLE_SCATTER_BUFS_MAX];
- struct list_head reo_cmd_list;
+ struct list_head reo_cmd_update_rx_queue_list;
struct list_head reo_cmd_cache_flush_list;
u32 reo_cmd_cache_flush_count;
-
/* protects access to below fields,
- * - reo_cmd_list
+ * - reo_cmd_update_rx_queue_list
* - reo_cmd_cache_flush_list
* - reo_cmd_cache_flush_count
*/
+ spinlock_t reo_rxq_flush_lock;
+ struct list_head reo_cmd_list;
+ /* protects access to below fields,
+ * - reo_cmd_list
+ */
spinlock_t reo_cmd_lock;
struct ath12k_hp_update_timer reo_cmd_timer;
struct ath12k_hp_update_timer tx_ring_timer[DP_TCL_NUM_RING_MAX];
diff --git a/drivers/net/wireless/ath/ath12k/dp_mon.c b/drivers/net/wireless/ath/ath12k/dp_mon.c
index 8189e52ed007..009c49502148 100644
--- a/drivers/net/wireless/ath/ath12k/dp_mon.c
+++ b/drivers/net/wireless/ath/ath12k/dp_mon.c
@@ -1441,6 +1441,34 @@ static void ath12k_dp_mon_parse_rx_msdu_end_err(u32 info, u32 *errmap)
}
static void
+ath12k_parse_cmn_usr_info(const struct hal_phyrx_common_user_info *cmn_usr_info,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ struct hal_rx_radiotap_eht *eht = &ppdu_info->eht_info.eht;
+ u32 known, data, cp_setting, ltf_size;
+
+ known = __le32_to_cpu(eht->known);
+ known |= IEEE80211_RADIOTAP_EHT_KNOWN_GI |
+ IEEE80211_RADIOTAP_EHT_KNOWN_EHT_LTF;
+ eht->known = cpu_to_le32(known);
+
+ cp_setting = le32_get_bits(cmn_usr_info->info0,
+ HAL_RX_CMN_USR_INFO0_CP_SETTING);
+ ltf_size = le32_get_bits(cmn_usr_info->info0,
+ HAL_RX_CMN_USR_INFO0_LTF_SIZE);
+
+ data = __le32_to_cpu(eht->data[0]);
+ data |= u32_encode_bits(cp_setting, IEEE80211_RADIOTAP_EHT_DATA0_GI);
+ data |= u32_encode_bits(ltf_size, IEEE80211_RADIOTAP_EHT_DATA0_LTF);
+ eht->data[0] = cpu_to_le32(data);
+
+ if (!ppdu_info->ltf_size)
+ ppdu_info->ltf_size = ltf_size;
+ if (!ppdu_info->gi)
+ ppdu_info->gi = cp_setting;
+}
+
+static void
ath12k_dp_mon_parse_status_msdu_end(struct ath12k_mon_data *pmon,
const struct hal_rx_msdu_end *msdu_end)
{
@@ -1627,25 +1655,22 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k *ar,
const struct hal_rx_phyrx_rssi_legacy_info *rssi = tlv_data;
info[0] = __le32_to_cpu(rssi->info0);
- info[1] = __le32_to_cpu(rssi->info1);
+ info[2] = __le32_to_cpu(rssi->info2);
/* TODO: Please note that the combined rssi will not be accurate
* in MU case. Rssi in MU needs to be retrieved from
* PHYRX_OTHER_RECEIVE_INFO TLV.
*/
ppdu_info->rssi_comb =
- u32_get_bits(info[1],
- HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO1_RSSI_COMB);
+ u32_get_bits(info[2],
+ HAL_RX_RSSI_LEGACY_INFO_INFO2_RSSI_COMB_PPDU);
ppdu_info->bw = u32_get_bits(info[0],
- HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO0_RX_BW);
+ HAL_RX_RSSI_LEGACY_INFO_INFO0_RX_BW);
break;
}
- case HAL_PHYRX_OTHER_RECEIVE_INFO: {
- const struct hal_phyrx_common_user_info *cmn_usr_info = tlv_data;
-
- ppdu_info->gi = le32_get_bits(cmn_usr_info->info0,
- HAL_RX_PHY_CMN_USER_INFO0_GI);
+ case HAL_PHYRX_COMMON_USER_INFO: {
+ ath12k_parse_cmn_usr_info(tlv_data, ppdu_info);
break;
}
case HAL_RX_PPDU_START_USER_INFO:
@@ -2154,8 +2179,12 @@ static void ath12k_dp_mon_update_radiotap(struct ath12k *ar,
spin_unlock_bh(&ar->data_lock);
rxs->flag |= RX_FLAG_MACTIME_START;
- rxs->signal = ppduinfo->rssi_comb + noise_floor;
rxs->nss = ppduinfo->nss + 1;
+ if (test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT,
+ ar->ab->wmi_ab.svc_map))
+ rxs->signal = ppduinfo->rssi_comb;
+ else
+ rxs->signal = ppduinfo->rssi_comb + noise_floor;
if (ppduinfo->userstats[ppduinfo->userid].ampdu_present) {
rxs->flag |= RX_FLAG_AMPDU_DETAILS;
@@ -2244,6 +2273,7 @@ static void ath12k_dp_mon_update_radiotap(struct ath12k *ar,
static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *napi,
struct sk_buff *msdu,
+ const struct hal_rx_mon_ppdu_info *ppduinfo,
struct ieee80211_rx_status *status,
u8 decap)
{
@@ -2257,7 +2287,6 @@ static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct
struct ieee80211_sta *pubsta = NULL;
struct ath12k_peer *peer;
struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
- struct ath12k_dp_rx_info rx_info;
bool is_mcbc = rxcb->is_mcbc;
bool is_eapol_tkip = rxcb->is_eapol;
@@ -2271,8 +2300,7 @@ static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct
}
spin_lock_bh(&ar->ab->base_lock);
- rx_info.addr2_present = false;
- peer = ath12k_dp_rx_h_find_peer(ar->ab, msdu, &rx_info);
+ peer = ath12k_peer_find_by_id(ar->ab, ppduinfo->peer_id);
if (peer && peer->sta) {
pubsta = peer->sta;
if (pubsta->valid_links) {
@@ -2365,7 +2393,7 @@ static int ath12k_dp_mon_rx_deliver(struct ath12k *ar,
decap = mon_mpdu->decap_format;
ath12k_dp_mon_update_radiotap(ar, ppduinfo, mon_skb, rxs);
- ath12k_dp_mon_rx_deliver_msdu(ar, napi, mon_skb, rxs, decap);
+ ath12k_dp_mon_rx_deliver_msdu(ar, napi, mon_skb, ppduinfo, rxs, decap);
mon_skb = skb_next;
} while (mon_skb);
rxs->flag = 0;
diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.c b/drivers/net/wireless/ath/ath12k/dp_rx.c
index 8ab91273592c..5e5c14a70316 100644
--- a/drivers/net/wireless/ath/ath12k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath12k/dp_rx.c
@@ -21,6 +21,9 @@
#define ATH12K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ)
+static int ath12k_dp_rx_tid_delete_handler(struct ath12k_base *ab,
+ struct ath12k_dp_rx_tid_rxq *rx_tid);
+
static enum hal_encrypt_type ath12k_dp_rx_h_enctype(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
@@ -581,49 +584,71 @@ static int ath12k_dp_rx_pdev_srng_alloc(struct ath12k *ar)
return 0;
}
+static void ath12k_dp_init_rx_tid_rxq(struct ath12k_dp_rx_tid_rxq *rx_tid_rxq,
+ struct ath12k_dp_rx_tid *rx_tid)
+{
+ rx_tid_rxq->tid = rx_tid->tid;
+ rx_tid_rxq->active = rx_tid->active;
+ rx_tid_rxq->qbuf = rx_tid->qbuf;
+}
+
+static void ath12k_dp_rx_tid_cleanup(struct ath12k_base *ab,
+ struct ath12k_reoq_buf *tid_qbuf)
+{
+ if (tid_qbuf->vaddr) {
+ dma_unmap_single(ab->dev, tid_qbuf->paddr_aligned,
+ tid_qbuf->size, DMA_BIDIRECTIONAL);
+ kfree(tid_qbuf->vaddr);
+ tid_qbuf->vaddr = NULL;
+ }
+}
+
void ath12k_dp_rx_reo_cmd_list_cleanup(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
struct ath12k_dp_rx_reo_cmd *cmd, *tmp;
struct ath12k_dp_rx_reo_cache_flush_elem *cmd_cache, *tmp_cache;
+ struct dp_reo_update_rx_queue_elem *cmd_queue, *tmp_queue;
- spin_lock_bh(&dp->reo_cmd_lock);
- list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
- list_del(&cmd->list);
- dma_unmap_single(ab->dev, cmd->data.qbuf.paddr_aligned,
- cmd->data.qbuf.size, DMA_BIDIRECTIONAL);
- kfree(cmd->data.qbuf.vaddr);
- kfree(cmd);
+ spin_lock_bh(&dp->reo_rxq_flush_lock);
+ list_for_each_entry_safe(cmd_queue, tmp_queue, &dp->reo_cmd_update_rx_queue_list,
+ list) {
+ list_del(&cmd_queue->list);
+ ath12k_dp_rx_tid_cleanup(ab, &cmd_queue->rx_tid.qbuf);
+ kfree(cmd_queue);
}
-
list_for_each_entry_safe(cmd_cache, tmp_cache,
&dp->reo_cmd_cache_flush_list, list) {
list_del(&cmd_cache->list);
dp->reo_cmd_cache_flush_count--;
- dma_unmap_single(ab->dev, cmd_cache->data.qbuf.paddr_aligned,
- cmd_cache->data.qbuf.size, DMA_BIDIRECTIONAL);
- kfree(cmd_cache->data.qbuf.vaddr);
+ ath12k_dp_rx_tid_cleanup(ab, &cmd_cache->data.qbuf);
kfree(cmd_cache);
}
+ spin_unlock_bh(&dp->reo_rxq_flush_lock);
+
+ spin_lock_bh(&dp->reo_cmd_lock);
+ list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
+ list_del(&cmd->list);
+ ath12k_dp_rx_tid_cleanup(ab, &cmd->data.qbuf);
+ kfree(cmd);
+ }
spin_unlock_bh(&dp->reo_cmd_lock);
}
static void ath12k_dp_reo_cmd_free(struct ath12k_dp *dp, void *ctx,
enum hal_reo_cmd_status status)
{
- struct ath12k_dp_rx_tid *rx_tid = ctx;
+ struct ath12k_dp_rx_tid_rxq *rx_tid = ctx;
if (status != HAL_REO_CMD_SUCCESS)
ath12k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n",
rx_tid->tid, status);
- dma_unmap_single(dp->ab->dev, rx_tid->qbuf.paddr_aligned, rx_tid->qbuf.size,
- DMA_BIDIRECTIONAL);
- kfree(rx_tid->qbuf.vaddr);
- rx_tid->qbuf.vaddr = NULL;
+ ath12k_dp_rx_tid_cleanup(dp->ab, &rx_tid->qbuf);
}
-static int ath12k_dp_reo_cmd_send(struct ath12k_base *ab, struct ath12k_dp_rx_tid *rx_tid,
+static int ath12k_dp_reo_cmd_send(struct ath12k_base *ab,
+ struct ath12k_dp_rx_tid_rxq *rx_tid,
enum hal_reo_cmd_type type,
struct ath12k_hal_reo_cmd *cmd,
void (*cb)(struct ath12k_dp *dp, void *ctx,
@@ -668,51 +693,95 @@ static int ath12k_dp_reo_cmd_send(struct ath12k_base *ab, struct ath12k_dp_rx_ti
return 0;
}
-static void ath12k_dp_reo_cache_flush(struct ath12k_base *ab,
- struct ath12k_dp_rx_tid *rx_tid)
+static int ath12k_dp_reo_cache_flush(struct ath12k_base *ab,
+ struct ath12k_dp_rx_tid_rxq *rx_tid)
{
struct ath12k_hal_reo_cmd cmd = {};
- unsigned long tot_desc_sz, desc_sz;
int ret;
- tot_desc_sz = rx_tid->qbuf.size;
- desc_sz = ath12k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID);
-
- while (tot_desc_sz > desc_sz) {
- tot_desc_sz -= desc_sz;
- cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned + tot_desc_sz);
- cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned);
- ret = ath12k_dp_reo_cmd_send(ab, rx_tid,
- HAL_REO_CMD_FLUSH_CACHE, &cmd,
- NULL);
- if (ret)
- ath12k_warn(ab,
- "failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n",
- rx_tid->tid, ret);
- }
-
- memset(&cmd, 0, sizeof(cmd));
cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned);
cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned);
- cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
+ /* HAL_REO_CMD_FLG_FLUSH_FWD_ALL_MPDUS - all pending MPDUs
+ *in the bitmap will be forwarded/flushed to REO output rings
+ */
+ cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS |
+ HAL_REO_CMD_FLG_FLUSH_FWD_ALL_MPDUS;
+
+ /* For all QoS TIDs (except NON_QOS), the driver allocates a maximum
+ * window size of 1024. In such cases, the driver can issue a single
+ * 1KB descriptor flush command instead of sending multiple 128-byte
+ * flush commands for each QoS TID, improving efficiency.
+ */
+
+ if (rx_tid->tid != HAL_DESC_REO_NON_QOS_TID)
+ cmd.flag |= HAL_REO_CMD_FLG_FLUSH_QUEUE_1K_DESC;
+
ret = ath12k_dp_reo_cmd_send(ab, rx_tid,
HAL_REO_CMD_FLUSH_CACHE,
&cmd, ath12k_dp_reo_cmd_free);
- if (ret) {
- ath12k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n",
- rx_tid->tid, ret);
- dma_unmap_single(ab->dev, rx_tid->qbuf.paddr_aligned, rx_tid->qbuf.size,
- DMA_BIDIRECTIONAL);
- kfree(rx_tid->qbuf.vaddr);
- rx_tid->qbuf.vaddr = NULL;
+ return ret;
+}
+
+static void ath12k_peer_rx_tid_qref_reset(struct ath12k_base *ab, u16 peer_id, u16 tid)
+{
+ struct ath12k_reo_queue_ref *qref;
+ struct ath12k_dp *dp = &ab->dp;
+ bool ml_peer = false;
+
+ if (!ab->hw_params->reoq_lut_support)
+ return;
+
+ if (peer_id & ATH12K_PEER_ML_ID_VALID) {
+ peer_id &= ~ATH12K_PEER_ML_ID_VALID;
+ ml_peer = true;
+ }
+
+ if (ml_peer)
+ qref = (struct ath12k_reo_queue_ref *)dp->ml_reoq_lut.vaddr +
+ (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
+ else
+ qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
+ (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
+
+ qref->info0 = u32_encode_bits(0, BUFFER_ADDR_INFO0_ADDR);
+ qref->info1 = u32_encode_bits(0, BUFFER_ADDR_INFO1_ADDR) |
+ u32_encode_bits(tid, DP_REO_QREF_NUM);
+}
+
+static void ath12k_dp_rx_process_reo_cmd_update_rx_queue_list(struct ath12k_dp *dp)
+{
+ struct ath12k_base *ab = dp->ab;
+ struct dp_reo_update_rx_queue_elem *elem, *tmp;
+
+ spin_lock_bh(&dp->reo_rxq_flush_lock);
+
+ list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_update_rx_queue_list, list) {
+ if (elem->rx_tid.active)
+ continue;
+
+ if (ath12k_dp_rx_tid_delete_handler(ab, &elem->rx_tid))
+ break;
+
+ ath12k_peer_rx_tid_qref_reset(ab,
+ elem->is_ml_peer ? elem->ml_peer_id :
+ elem->peer_id,
+ elem->rx_tid.tid);
+
+ if (ab->hw_params->reoq_lut_support)
+ ath12k_hal_reo_shared_qaddr_cache_clear(ab);
+
+ list_del(&elem->list);
+ kfree(elem);
}
+
+ spin_unlock_bh(&dp->reo_rxq_flush_lock);
}
static void ath12k_dp_rx_tid_del_func(struct ath12k_dp *dp, void *ctx,
enum hal_reo_cmd_status status)
{
struct ath12k_base *ab = dp->ab;
- struct ath12k_dp_rx_tid *rx_tid = ctx;
+ struct ath12k_dp_rx_tid_rxq *rx_tid = ctx;
struct ath12k_dp_rx_reo_cache_flush_elem *elem, *tmp;
if (status == HAL_REO_CMD_DRAIN) {
@@ -724,6 +793,13 @@ static void ath12k_dp_rx_tid_del_func(struct ath12k_dp *dp, void *ctx,
return;
}
+ /* Retry the HAL_REO_CMD_UPDATE_RX_QUEUE command for entries
+ * in the pending queue list marked TID as inactive
+ */
+ spin_lock_bh(&dp->ab->base_lock);
+ ath12k_dp_rx_process_reo_cmd_update_rx_queue_list(dp);
+ spin_unlock_bh(&dp->ab->base_lock);
+
elem = kzalloc(sizeof(*elem), GFP_ATOMIC);
if (!elem)
goto free_desc;
@@ -731,7 +807,7 @@ static void ath12k_dp_rx_tid_del_func(struct ath12k_dp *dp, void *ctx,
elem->ts = jiffies;
memcpy(&elem->data, rx_tid, sizeof(*rx_tid));
- spin_lock_bh(&dp->reo_cmd_lock);
+ spin_lock_bh(&dp->reo_rxq_flush_lock);
list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list);
dp->reo_cmd_cache_flush_count++;
@@ -741,32 +817,44 @@ static void ath12k_dp_rx_tid_del_func(struct ath12k_dp *dp, void *ctx,
if (dp->reo_cmd_cache_flush_count > ATH12K_DP_RX_REO_DESC_FREE_THRES ||
time_after(jiffies, elem->ts +
msecs_to_jiffies(ATH12K_DP_RX_REO_DESC_FREE_TIMEOUT_MS))) {
- list_del(&elem->list);
- dp->reo_cmd_cache_flush_count--;
-
- /* Unlock the reo_cmd_lock before using ath12k_dp_reo_cmd_send()
- * within ath12k_dp_reo_cache_flush. The reo_cmd_cache_flush_list
- * is used in only two contexts, one is in this function called
- * from napi and the other in ath12k_dp_free during core destroy.
- * Before dp_free, the irqs would be disabled and would wait to
- * synchronize. Hence there wouldn’t be any race against add or
- * delete to this list. Hence unlock-lock is safe here.
+ /* The reo_cmd_cache_flush_list is used in only two contexts,
+ * one is in this function called from napi and the
+ * other in ath12k_dp_free during core destroy.
+ * If cache command sent is success, delete the element in
+ * the cache list. ath12k_dp_rx_reo_cmd_list_cleanup
+ * will be called during core destroy.
*/
- spin_unlock_bh(&dp->reo_cmd_lock);
- ath12k_dp_reo_cache_flush(ab, &elem->data);
+ if (ath12k_dp_reo_cache_flush(ab, &elem->data))
+ break;
+
+ list_del(&elem->list);
+ dp->reo_cmd_cache_flush_count--;
kfree(elem);
- spin_lock_bh(&dp->reo_cmd_lock);
}
}
- spin_unlock_bh(&dp->reo_cmd_lock);
+ spin_unlock_bh(&dp->reo_rxq_flush_lock);
return;
free_desc:
- dma_unmap_single(ab->dev, rx_tid->qbuf.paddr_aligned, rx_tid->qbuf.size,
- DMA_BIDIRECTIONAL);
- kfree(rx_tid->qbuf.vaddr);
- rx_tid->qbuf.vaddr = NULL;
+ ath12k_dp_rx_tid_cleanup(ab, &rx_tid->qbuf);
+}
+
+static int ath12k_dp_rx_tid_delete_handler(struct ath12k_base *ab,
+ struct ath12k_dp_rx_tid_rxq *rx_tid)
+{
+ struct ath12k_hal_reo_cmd cmd = {};
+
+ cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
+ cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned);
+ cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned);
+ cmd.upd0 |= HAL_REO_CMD_UPD0_VLD;
+ /* Observed flush cache failure, to avoid that set vld bit during delete */
+ cmd.upd1 |= HAL_REO_CMD_UPD1_VLD;
+
+ return ath12k_dp_reo_cmd_send(ab, rx_tid,
+ HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
+ ath12k_dp_rx_tid_del_func);
}
static void ath12k_peer_rx_tid_qref_setup(struct ath12k_base *ab, u16 peer_id, u16 tid,
@@ -799,64 +887,38 @@ static void ath12k_peer_rx_tid_qref_setup(struct ath12k_base *ab, u16 peer_id, u
ath12k_hal_reo_shared_qaddr_cache_clear(ab);
}
-static void ath12k_peer_rx_tid_qref_reset(struct ath12k_base *ab, u16 peer_id, u16 tid)
+static void ath12k_dp_mark_tid_as_inactive(struct ath12k_dp *dp, int peer_id, u8 tid)
{
- struct ath12k_reo_queue_ref *qref;
- struct ath12k_dp *dp = &ab->dp;
- bool ml_peer = false;
-
- if (!ab->hw_params->reoq_lut_support)
- return;
+ struct dp_reo_update_rx_queue_elem *elem;
+ struct ath12k_dp_rx_tid_rxq *rx_tid;
- if (peer_id & ATH12K_PEER_ML_ID_VALID) {
- peer_id &= ~ATH12K_PEER_ML_ID_VALID;
- ml_peer = true;
+ spin_lock_bh(&dp->reo_rxq_flush_lock);
+ list_for_each_entry(elem, &dp->reo_cmd_update_rx_queue_list, list) {
+ if (elem->peer_id == peer_id) {
+ rx_tid = &elem->rx_tid;
+ if (rx_tid->tid == tid) {
+ rx_tid->active = false;
+ break;
+ }
+ }
}
-
- if (ml_peer)
- qref = (struct ath12k_reo_queue_ref *)dp->ml_reoq_lut.vaddr +
- (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
- else
- qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
- (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
-
- qref->info0 = u32_encode_bits(0, BUFFER_ADDR_INFO0_ADDR);
- qref->info1 = u32_encode_bits(0, BUFFER_ADDR_INFO1_ADDR) |
- u32_encode_bits(tid, DP_REO_QREF_NUM);
+ spin_unlock_bh(&dp->reo_rxq_flush_lock);
}
void ath12k_dp_rx_peer_tid_delete(struct ath12k *ar,
struct ath12k_peer *peer, u8 tid)
{
- struct ath12k_hal_reo_cmd cmd = {};
struct ath12k_dp_rx_tid *rx_tid = &peer->rx_tid[tid];
- int ret;
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_dp *dp = &ab->dp;
if (!rx_tid->active)
return;
- cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
- cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned);
- cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned);
- cmd.upd0 = HAL_REO_CMD_UPD0_VLD;
- ret = ath12k_dp_reo_cmd_send(ar->ab, rx_tid,
- HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
- ath12k_dp_rx_tid_del_func);
- if (ret) {
- ath12k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
- tid, ret);
- dma_unmap_single(ar->ab->dev, rx_tid->qbuf.paddr_aligned,
- rx_tid->qbuf.size, DMA_BIDIRECTIONAL);
- kfree(rx_tid->qbuf.vaddr);
- rx_tid->qbuf.vaddr = NULL;
- }
-
- if (peer->mlo)
- ath12k_peer_rx_tid_qref_reset(ar->ab, peer->ml_id, tid);
- else
- ath12k_peer_rx_tid_qref_reset(ar->ab, peer->peer_id, tid);
-
rx_tid->active = false;
+
+ ath12k_dp_mark_tid_as_inactive(dp, peer->peer_id, tid);
+ ath12k_dp_rx_process_reo_cmd_update_rx_queue_list(dp);
}
int ath12k_dp_rx_link_desc_return(struct ath12k_base *ab,
@@ -941,9 +1003,12 @@ static int ath12k_peer_rx_tid_reo_update(struct ath12k *ar,
{
struct ath12k_hal_reo_cmd cmd = {};
int ret;
+ struct ath12k_dp_rx_tid_rxq rx_tid_rxq;
- cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned);
- cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned);
+ ath12k_dp_init_rx_tid_rxq(&rx_tid_rxq, rx_tid);
+
+ cmd.addr_lo = lower_32_bits(rx_tid_rxq.qbuf.paddr_aligned);
+ cmd.addr_hi = upper_32_bits(rx_tid_rxq.qbuf.paddr_aligned);
cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE;
cmd.ba_window_size = ba_win_sz;
@@ -953,12 +1018,12 @@ static int ath12k_peer_rx_tid_reo_update(struct ath12k *ar,
cmd.upd2 = u32_encode_bits(ssn, HAL_REO_CMD_UPD2_SSN);
}
- ret = ath12k_dp_reo_cmd_send(ar->ab, rx_tid,
+ ret = ath12k_dp_reo_cmd_send(ar->ab, &rx_tid_rxq,
HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
NULL);
if (ret) {
ath12k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n",
- rx_tid->tid, ret);
+ rx_tid_rxq.tid, ret);
return ret;
}
@@ -1018,6 +1083,29 @@ static int ath12k_dp_rx_assign_reoq(struct ath12k_base *ab,
return 0;
}
+static int ath12k_dp_prepare_reo_update_elem(struct ath12k_dp *dp,
+ struct ath12k_peer *peer,
+ struct ath12k_dp_rx_tid *rx_tid)
+{
+ struct dp_reo_update_rx_queue_elem *elem;
+
+ elem = kzalloc(sizeof(*elem), GFP_ATOMIC);
+ if (!elem)
+ return -ENOMEM;
+
+ elem->peer_id = peer->peer_id;
+ elem->is_ml_peer = peer->mlo;
+ elem->ml_peer_id = peer->ml_id;
+
+ ath12k_dp_init_rx_tid_rxq(&elem->rx_tid, rx_tid);
+
+ spin_lock_bh(&dp->reo_rxq_flush_lock);
+ list_add_tail(&elem->list, &dp->reo_cmd_update_rx_queue_list);
+ spin_unlock_bh(&dp->reo_rxq_flush_lock);
+
+ return 0;
+}
+
int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id,
u8 tid, u32 ba_win_sz, u16 ssn,
enum hal_pn_type pn_type)
@@ -1098,6 +1186,19 @@ int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_
return ret;
}
+ /* Pre-allocate the update_rxq_list for the corresponding tid
+ * This will be used during the tid delete. The reason we are not
+ * allocating during tid delete is that, if any alloc fail in update_rxq_list
+ * we may not be able to delete the tid vaddr/paddr and may lead to leak
+ */
+ ret = ath12k_dp_prepare_reo_update_elem(dp, peer, rx_tid);
+ if (ret) {
+ ath12k_warn(ab, "failed to alloc update_rxq_list for rx tid %u\n", tid);
+ ath12k_dp_rx_tid_cleanup(ab, &rx_tid->qbuf);
+ spin_unlock_bh(&ab->base_lock);
+ return ret;
+ }
+
paddr_aligned = rx_tid->qbuf.paddr_aligned;
if (ab->hw_params->reoq_lut_support) {
/* Update the REO queue LUT at the corresponding peer id
@@ -1207,6 +1308,7 @@ int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_link_vif *arvif,
struct ath12k_hal_reo_cmd cmd = {};
struct ath12k_peer *peer;
struct ath12k_dp_rx_tid *rx_tid;
+ struct ath12k_dp_rx_tid_rxq rx_tid_rxq;
u8 tid;
int ret = 0;
@@ -1253,9 +1355,11 @@ int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_link_vif *arvif,
rx_tid = &peer->rx_tid[tid];
if (!rx_tid->active)
continue;
- cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned);
- cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned);
- ret = ath12k_dp_reo_cmd_send(ab, rx_tid,
+
+ ath12k_dp_init_rx_tid_rxq(&rx_tid_rxq, rx_tid);
+ cmd.addr_lo = lower_32_bits(rx_tid_rxq.qbuf.paddr_aligned);
+ cmd.addr_hi = upper_32_bits(rx_tid_rxq.qbuf.paddr_aligned);
+ ret = ath12k_dp_reo_cmd_send(ab, &rx_tid_rxq,
HAL_REO_CMD_UPDATE_RX_QUEUE,
&cmd, NULL);
if (ret) {
@@ -2533,6 +2637,8 @@ void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct ath12k_dp_rx_info *rx_info)
channel_num = meta_data;
center_freq = meta_data >> 16;
+ rx_status->band = NUM_NL80211_BANDS;
+
if (center_freq >= ATH12K_MIN_6GHZ_FREQ &&
center_freq <= ATH12K_MAX_6GHZ_FREQ) {
rx_status->band = NL80211_BAND_6GHZ;
@@ -2541,21 +2647,33 @@ void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct ath12k_dp_rx_info *rx_info)
rx_status->band = NL80211_BAND_2GHZ;
} else if (channel_num >= 36 && channel_num <= 173) {
rx_status->band = NL80211_BAND_5GHZ;
- } else {
+ }
+
+ if (unlikely(rx_status->band == NUM_NL80211_BANDS ||
+ !ath12k_ar_to_hw(ar)->wiphy->bands[rx_status->band])) {
+ ath12k_warn(ar->ab, "sband is NULL for status band %d channel_num %d center_freq %d pdev_id %d\n",
+ rx_status->band, channel_num, center_freq, ar->pdev_idx);
+
spin_lock_bh(&ar->data_lock);
channel = ar->rx_channel;
if (channel) {
rx_status->band = channel->band;
channel_num =
ieee80211_frequency_to_channel(channel->center_freq);
+ rx_status->freq = ieee80211_channel_to_frequency(channel_num,
+ rx_status->band);
+ } else {
+ ath12k_err(ar->ab, "unable to determine channel, band for rx packet");
}
spin_unlock_bh(&ar->data_lock);
+ goto h_rate;
}
if (rx_status->band != NL80211_BAND_6GHZ)
rx_status->freq = ieee80211_channel_to_frequency(channel_num,
rx_status->band);
+h_rate:
ath12k_dp_rx_h_rate(ar, rx_info);
}
diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.h b/drivers/net/wireless/ath/ath12k/dp_rx.h
index e971a314bd2d..69d0a36a91d8 100644
--- a/drivers/net/wireless/ath/ath12k/dp_rx.h
+++ b/drivers/net/wireless/ath/ath12k/dp_rx.h
@@ -31,15 +31,29 @@ struct ath12k_dp_rx_tid {
struct ath12k_base *ab;
};
+struct ath12k_dp_rx_tid_rxq {
+ u8 tid;
+ bool active;
+ struct ath12k_reoq_buf qbuf;
+};
+
struct ath12k_dp_rx_reo_cache_flush_elem {
struct list_head list;
- struct ath12k_dp_rx_tid data;
+ struct ath12k_dp_rx_tid_rxq data;
unsigned long ts;
};
+struct dp_reo_update_rx_queue_elem {
+ struct list_head list;
+ struct ath12k_dp_rx_tid_rxq rx_tid;
+ int peer_id;
+ bool is_ml_peer;
+ u16 ml_peer_id;
+};
+
struct ath12k_dp_rx_reo_cmd {
struct list_head list;
- struct ath12k_dp_rx_tid data;
+ struct ath12k_dp_rx_tid_rxq data;
int cmd_num;
void (*handler)(struct ath12k_dp *dp, void *ctx,
enum hal_reo_cmd_status status);
diff --git a/drivers/net/wireless/ath/ath12k/hal.h b/drivers/net/wireless/ath/ath12k/hal.h
index c1750b5dc03c..efe00e167998 100644
--- a/drivers/net/wireless/ath/ath12k/hal.h
+++ b/drivers/net/wireless/ath/ath12k/hal.h
@@ -832,6 +832,7 @@ enum hal_rx_buf_return_buf_manager {
#define HAL_REO_CMD_FLG_FLUSH_ALL BIT(6)
#define HAL_REO_CMD_FLG_UNBLK_RESOURCE BIT(7)
#define HAL_REO_CMD_FLG_UNBLK_CACHE BIT(8)
+#define HAL_REO_CMD_FLG_FLUSH_QUEUE_1K_DESC BIT(9)
/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO0_UPD_* fields */
#define HAL_REO_CMD_UPD0_RX_QUEUE_NUM BIT(8)
diff --git a/drivers/net/wireless/ath/ath12k/hal_desc.h b/drivers/net/wireless/ath/ath12k/hal_desc.h
index 0173f731bfef..13ddac4a9412 100644
--- a/drivers/net/wireless/ath/ath12k/hal_desc.h
+++ b/drivers/net/wireless/ath/ath12k/hal_desc.h
@@ -1225,6 +1225,7 @@ struct hal_reo_flush_queue {
#define HAL_REO_FLUSH_CACHE_INFO0_FLUSH_WO_INVALIDATE BIT(12)
#define HAL_REO_FLUSH_CACHE_INFO0_BLOCK_CACHE_USAGE BIT(13)
#define HAL_REO_FLUSH_CACHE_INFO0_FLUSH_ALL BIT(14)
+#define HAL_REO_FLUSH_CACHE_INFO0_FLUSH_QUEUE_1K_DESC BIT(15)
struct hal_reo_flush_cache {
struct hal_reo_cmd_hdr cmd;
diff --git a/drivers/net/wireless/ath/ath12k/hal_rx.c b/drivers/net/wireless/ath/ath12k/hal_rx.c
index 48aa48c48606..669096278fdd 100644
--- a/drivers/net/wireless/ath/ath12k/hal_rx.c
+++ b/drivers/net/wireless/ath/ath12k/hal_rx.c
@@ -89,6 +89,9 @@ static int ath12k_hal_reo_cmd_flush_cache(struct ath12k_hal *hal,
if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_ALL)
desc->info0 |= cpu_to_le32(HAL_REO_FLUSH_CACHE_INFO0_FLUSH_ALL);
+ if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_QUEUE_1K_DESC)
+ desc->info0 |= cpu_to_le32(HAL_REO_FLUSH_CACHE_INFO0_FLUSH_QUEUE_1K_DESC);
+
return le32_get_bits(desc->cmd.info0, HAL_REO_CMD_HDR_INFO0_CMD_NUMBER);
}
diff --git a/drivers/net/wireless/ath/ath12k/hal_rx.h b/drivers/net/wireless/ath/ath12k/hal_rx.h
index a3ab588aae19..d1ad7747b82c 100644
--- a/drivers/net/wireless/ath/ath12k/hal_rx.h
+++ b/drivers/net/wireless/ath/ath12k/hal_rx.h
@@ -483,15 +483,16 @@ enum hal_rx_ul_reception_type {
HAL_RECEPTION_TYPE_FRAMELESS
};
-#define HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO0_RECEPTION GENMASK(3, 0)
-#define HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO0_RX_BW GENMASK(7, 5)
-#define HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO1_RSSI_COMB GENMASK(15, 8)
+#define HAL_RX_RSSI_LEGACY_INFO_INFO0_RECEPTION GENMASK(3, 0)
+#define HAL_RX_RSSI_LEGACY_INFO_INFO0_RX_BW GENMASK(7, 5)
+#define HAL_RX_RSSI_LEGACY_INFO_INFO1_RSSI_COMB GENMASK(15, 8)
+#define HAL_RX_RSSI_LEGACY_INFO_INFO2_RSSI_COMB_PPDU GENMASK(7, 0)
struct hal_rx_phyrx_rssi_legacy_info {
__le32 info0;
__le32 rsvd0[39];
__le32 info1;
- __le32 rsvd1;
+ __le32 info2;
} __packed;
#define HAL_RX_MPDU_START_INFO0_PPDU_ID GENMASK(31, 16)
@@ -695,7 +696,8 @@ struct hal_rx_resp_req_info {
#define HAL_RX_MPDU_ERR_MPDU_LEN BIT(6)
#define HAL_RX_MPDU_ERR_UNENCRYPTED_FRAME BIT(7)
-#define HAL_RX_PHY_CMN_USER_INFO0_GI GENMASK(17, 16)
+#define HAL_RX_CMN_USR_INFO0_CP_SETTING GENMASK(17, 16)
+#define HAL_RX_CMN_USR_INFO0_LTF_SIZE GENMASK(19, 18)
struct hal_phyrx_common_user_info {
__le32 rsvd[2];
diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c
index bd1ec3b2c084..1d7b60aa5cb0 100644
--- a/drivers/net/wireless/ath/ath12k/mac.c
+++ b/drivers/net/wireless/ath/ath12k/mac.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include <net/mac80211.h>
@@ -1822,22 +1822,16 @@ void ath12k_mac_handle_beacon(struct ath12k *ar, struct sk_buff *skb)
skb);
}
-static void ath12k_mac_handle_beacon_miss_iter(void *data, u8 *mac,
- struct ieee80211_vif *vif)
+void ath12k_mac_handle_beacon_miss(struct ath12k *ar,
+ struct ath12k_link_vif *arvif)
{
- u32 *vdev_id = data;
- struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
- struct ath12k_link_vif *arvif = &ahvif->deflink;
- struct ieee80211_hw *hw;
-
- if (!arvif->is_created || arvif->vdev_id != *vdev_id)
- return;
+ struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
+ struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
- if (!arvif->is_up)
+ if (!(arvif->is_created && arvif->is_up))
return;
ieee80211_beacon_loss(vif);
- hw = ath12k_ar_to_hw(arvif->ar);
/* Firmware doesn't report beacon loss events repeatedly. If AP probe
* (done by mac80211) succeeds but beacons do not resume then it
@@ -1848,14 +1842,6 @@ static void ath12k_mac_handle_beacon_miss_iter(void *data, u8 *mac,
ATH12K_CONNECTION_LOSS_HZ);
}
-void ath12k_mac_handle_beacon_miss(struct ath12k *ar, u32 vdev_id)
-{
- ieee80211_iterate_active_interfaces_atomic(ath12k_ar_to_hw(ar),
- IEEE80211_IFACE_ITER_NORMAL,
- ath12k_mac_handle_beacon_miss_iter,
- &vdev_id);
-}
-
static void ath12k_mac_vif_sta_connection_loss_work(struct work_struct *work)
{
struct ath12k_link_vif *arvif = container_of(work, struct ath12k_link_vif,
@@ -4078,12 +4064,68 @@ static int ath12k_mac_fils_discovery(struct ath12k_link_vif *arvif,
return ret;
}
+static void ath12k_mac_vif_setup_ps(struct ath12k_link_vif *arvif)
+{
+ struct ath12k *ar = arvif->ar;
+ struct ieee80211_vif *vif = arvif->ahvif->vif;
+ struct ieee80211_conf *conf = &ath12k_ar_to_hw(ar)->conf;
+ enum wmi_sta_powersave_param param;
+ struct ieee80211_bss_conf *info;
+ enum wmi_sta_ps_mode psmode;
+ int ret;
+ int timeout;
+ bool enable_ps;
+
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ enable_ps = arvif->ahvif->ps;
+ if (enable_ps) {
+ psmode = WMI_STA_PS_MODE_ENABLED;
+ param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
+
+ timeout = conf->dynamic_ps_timeout;
+ if (timeout == 0) {
+ info = ath12k_mac_get_link_bss_conf(arvif);
+ if (!info) {
+ ath12k_warn(ar->ab, "unable to access bss link conf in setup ps for vif %pM link %u\n",
+ vif->addr, arvif->link_id);
+ return;
+ }
+
+ /* firmware doesn't like 0 */
+ timeout = ieee80211_tu_to_usec(info->beacon_int) / 1000;
+ }
+
+ ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
+ timeout);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set inactivity time for vdev %d: %i\n",
+ arvif->vdev_id, ret);
+ return;
+ }
+ } else {
+ psmode = WMI_STA_PS_MODE_DISABLED;
+ }
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %d psmode %s\n",
+ arvif->vdev_id, psmode ? "enable" : "disable");
+
+ ret = ath12k_wmi_pdev_set_ps_mode(ar, arvif->vdev_id, psmode);
+ if (ret)
+ ath12k_warn(ar->ab, "failed to set sta power save mode %d for vdev %d: %d\n",
+ psmode, arvif->vdev_id, ret);
+}
+
static void ath12k_mac_op_vif_cfg_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
u64 changed)
{
struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
unsigned long links = ahvif->links_map;
+ struct ieee80211_vif_cfg *vif_cfg;
struct ieee80211_bss_conf *info;
struct ath12k_link_vif *arvif;
struct ieee80211_sta *sta;
@@ -4147,61 +4189,24 @@ static void ath12k_mac_op_vif_cfg_changed(struct ieee80211_hw *hw,
}
}
}
-}
-static void ath12k_mac_vif_setup_ps(struct ath12k_link_vif *arvif)
-{
- struct ath12k *ar = arvif->ar;
- struct ieee80211_vif *vif = arvif->ahvif->vif;
- struct ieee80211_conf *conf = &ath12k_ar_to_hw(ar)->conf;
- enum wmi_sta_powersave_param param;
- struct ieee80211_bss_conf *info;
- enum wmi_sta_ps_mode psmode;
- int ret;
- int timeout;
- bool enable_ps;
-
- lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+ if (changed & BSS_CHANGED_PS) {
+ links = ahvif->links_map;
+ vif_cfg = &vif->cfg;
- if (vif->type != NL80211_IFTYPE_STATION)
- return;
+ for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
+ if (!arvif || !arvif->ar)
+ continue;
- enable_ps = arvif->ahvif->ps;
- if (enable_ps) {
- psmode = WMI_STA_PS_MODE_ENABLED;
- param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
+ ar = arvif->ar;
- timeout = conf->dynamic_ps_timeout;
- if (timeout == 0) {
- info = ath12k_mac_get_link_bss_conf(arvif);
- if (!info) {
- ath12k_warn(ar->ab, "unable to access bss link conf in setup ps for vif %pM link %u\n",
- vif->addr, arvif->link_id);
- return;
+ if (ar->ab->hw_params->supports_sta_ps) {
+ ahvif->ps = vif_cfg->ps;
+ ath12k_mac_vif_setup_ps(arvif);
}
-
- /* firmware doesn't like 0 */
- timeout = ieee80211_tu_to_usec(info->beacon_int) / 1000;
- }
-
- ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
- timeout);
- if (ret) {
- ath12k_warn(ar->ab, "failed to set inactivity time for vdev %d: %i\n",
- arvif->vdev_id, ret);
- return;
}
- } else {
- psmode = WMI_STA_PS_MODE_DISABLED;
}
-
- ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %d psmode %s\n",
- arvif->vdev_id, psmode ? "enable" : "disable");
-
- ret = ath12k_wmi_pdev_set_ps_mode(ar, arvif->vdev_id, psmode);
- if (ret)
- ath12k_warn(ar->ab, "failed to set sta power save mode %d for vdev %d: %d\n",
- psmode, arvif->vdev_id, ret);
}
static bool ath12k_mac_supports_tpc(struct ath12k *ar, struct ath12k_vif *ahvif,
@@ -4223,7 +4228,6 @@ static void ath12k_mac_bss_info_changed(struct ath12k *ar,
{
struct ath12k_vif *ahvif = arvif->ahvif;
struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif);
- struct ieee80211_vif_cfg *vif_cfg = &vif->cfg;
struct cfg80211_chan_def def;
u32 param_id, param_value;
enum nl80211_band band;
@@ -4510,12 +4514,6 @@ static void ath12k_mac_bss_info_changed(struct ath12k *ar,
}
ath12k_mac_fils_discovery(arvif, info);
-
- if (changed & BSS_CHANGED_PS &&
- ar->ab->hw_params->supports_sta_ps) {
- ahvif->ps = vif_cfg->ps;
- ath12k_mac_vif_setup_ps(arvif);
- }
}
static struct ath12k_vif_cache *ath12k_ahvif_get_link_cache(struct ath12k_vif *ahvif,
@@ -9848,6 +9846,7 @@ int ath12k_mac_vdev_create(struct ath12k *ar, struct ath12k_link_vif *arvif)
param_id = WMI_VDEV_PARAM_RTS_THRESHOLD;
param_value = hw->wiphy->rts_threshold;
+ ar->rts_threshold = param_value;
ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
param_id, param_value);
if (ret) {
@@ -11228,8 +11227,8 @@ void ath12k_mac_fill_reg_tpc_info(struct ath12k *ar,
struct ieee80211_channel *chan, *temp_chan;
u8 pwr_lvl_idx, num_pwr_levels, pwr_reduction;
bool is_psd_power = false, is_tpe_present = false;
- s8 max_tx_power[ATH12K_NUM_PWR_LEVELS],
- psd_power, tx_power, eirp_power;
+ s8 max_tx_power[ATH12K_NUM_PWR_LEVELS], psd_power, tx_power;
+ s8 eirp_power = 0;
struct ath12k_vif *ahvif = arvif->ahvif;
u16 start_freq, center_freq;
u8 reg_6ghz_power_mode;
@@ -11435,8 +11434,10 @@ static void ath12k_mac_parse_tx_pwr_env(struct ath12k *ar,
tpc_info->num_pwr_levels = max(local_psd->count,
reg_psd->count);
- if (tpc_info->num_pwr_levels > ATH12K_NUM_PWR_LEVELS)
- tpc_info->num_pwr_levels = ATH12K_NUM_PWR_LEVELS;
+ tpc_info->num_pwr_levels =
+ min3(tpc_info->num_pwr_levels,
+ IEEE80211_TPE_PSD_ENTRIES_320MHZ,
+ ATH12K_NUM_PWR_LEVELS);
for (i = 0; i < tpc_info->num_pwr_levels; i++) {
tpc_info->tpe[i] = min(local_psd->power[i],
@@ -11451,8 +11452,10 @@ static void ath12k_mac_parse_tx_pwr_env(struct ath12k *ar,
tpc_info->num_pwr_levels = max(local_non_psd->count,
reg_non_psd->count);
- if (tpc_info->num_pwr_levels > ATH12K_NUM_PWR_LEVELS)
- tpc_info->num_pwr_levels = ATH12K_NUM_PWR_LEVELS;
+ tpc_info->num_pwr_levels =
+ min3(tpc_info->num_pwr_levels,
+ IEEE80211_TPE_EIRP_ENTRIES_320MHZ,
+ ATH12K_NUM_PWR_LEVELS);
for (i = 0; i < tpc_info->num_pwr_levels; i++) {
tpc_info->tpe[i] = min(local_non_psd->power[i],
@@ -11675,16 +11678,32 @@ static int ath12k_mac_op_set_rts_threshold(struct ieee80211_hw *hw,
int radio_idx, u32 value)
{
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct wiphy *wiphy = hw->wiphy;
struct ath12k *ar;
- int param_id = WMI_VDEV_PARAM_RTS_THRESHOLD, ret = 0, i;
+ int param_id = WMI_VDEV_PARAM_RTS_THRESHOLD;
+ int ret = 0, ret_err, i;
lockdep_assert_wiphy(hw->wiphy);
- /* Currently we set the rts threshold value to all the vifs across
- * all radios of the single wiphy.
- * TODO Once support for vif specific RTS threshold in mac80211 is
- * available, ath12k can make use of it.
- */
+ if (radio_idx >= wiphy->n_radio || radio_idx < -1)
+ return -EINVAL;
+
+ if (radio_idx != -1) {
+ /* Update RTS threshold in specified radio */
+ ar = ath12k_ah_to_ar(ah, radio_idx);
+ ret = ath12k_set_vdev_param_to_all_vifs(ar, param_id, value);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to set RTS config for all vdevs of pdev %d",
+ ar->pdev->pdev_id);
+ return ret;
+ }
+
+ ar->rts_threshold = value;
+ return 0;
+ }
+
+ /* Radio_index passed is -1, so set RTS threshold for all radios. */
for_each_ar(ah, ar, i) {
ret = ath12k_set_vdev_param_to_all_vifs(ar, param_id, value);
if (ret) {
@@ -11693,6 +11712,25 @@ static int ath12k_mac_op_set_rts_threshold(struct ieee80211_hw *hw,
break;
}
}
+ if (!ret) {
+ /* Setting new RTS threshold for vdevs of all radios passed, so update
+ * the RTS threshold value for all radios
+ */
+ for_each_ar(ah, ar, i)
+ ar->rts_threshold = value;
+ return 0;
+ }
+
+ /* RTS threshold config failed, revert to the previous RTS threshold */
+ for (i = i - 1; i >= 0; i--) {
+ ar = ath12k_ah_to_ar(ah, i);
+ ret_err = ath12k_set_vdev_param_to_all_vifs(ar, param_id,
+ ar->rts_threshold);
+ if (ret_err)
+ ath12k_warn(ar->ab,
+ "failed to restore RTS threshold for all vdevs of pdev %d",
+ ar->pdev->pdev_id);
+ }
return ret;
}
@@ -12598,6 +12636,27 @@ static int ath12k_mac_op_get_survey(struct ieee80211_hw *hw, int idx,
return 0;
}
+static void ath12k_mac_put_chain_rssi(struct station_info *sinfo,
+ struct ath12k_link_sta *arsta)
+{
+ s8 rssi;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sinfo->chain_signal); i++) {
+ sinfo->chains &= ~BIT(i);
+ rssi = arsta->chain_signal[i];
+
+ if (rssi != ATH12K_DEFAULT_NOISE_FLOOR &&
+ rssi != ATH12K_INVALID_RSSI_FULL &&
+ rssi != ATH12K_INVALID_RSSI_EMPTY &&
+ rssi != 0) {
+ sinfo->chain_signal[i] = rssi;
+ sinfo->chains |= BIT(i);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL);
+ }
+ }
+}
+
static void ath12k_mac_op_sta_statistics(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -12655,6 +12714,12 @@ static void ath12k_mac_op_sta_statistics(struct ieee80211_hw *hw,
!(ath12k_mac_get_fw_stats(ar, &params)))
signal = arsta->rssi_beacon;
+ params.stats_id = WMI_REQUEST_RSSI_PER_CHAIN_STAT;
+ if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL)) &&
+ ahsta->ahvif->vdev_type == WMI_VDEV_TYPE_STA &&
+ !(ath12k_mac_get_fw_stats(ar, &params)))
+ ath12k_mac_put_chain_rssi(sinfo, arsta);
+
spin_lock_bh(&ar->data_lock);
noise_floor = ath12k_pdev_get_noise_floor(ar);
spin_unlock_bh(&ar->data_lock);
diff --git a/drivers/net/wireless/ath/ath12k/mac.h b/drivers/net/wireless/ath/ath12k/mac.h
index 18c79d4002cb..c05af40bd7a2 100644
--- a/drivers/net/wireless/ath/ath12k/mac.h
+++ b/drivers/net/wireless/ath/ath12k/mac.h
@@ -168,7 +168,8 @@ int ath12k_mac_rfkill_enable_radio(struct ath12k *ar, bool enable);
int ath12k_mac_rfkill_config(struct ath12k *ar);
int ath12k_mac_wait_tx_complete(struct ath12k *ar);
void ath12k_mac_handle_beacon(struct ath12k *ar, struct sk_buff *skb);
-void ath12k_mac_handle_beacon_miss(struct ath12k *ar, u32 vdev_id);
+void ath12k_mac_handle_beacon_miss(struct ath12k *ar,
+ struct ath12k_link_vif *arvif);
int ath12k_mac_vif_set_keepalive(struct ath12k_link_vif *arvif,
enum wmi_sta_keepalive_method method,
u32 interval);
diff --git a/drivers/net/wireless/ath/ath12k/qmi.c b/drivers/net/wireless/ath/ath12k/qmi.c
index 7c611a1fd6d0..36325e62aa24 100644
--- a/drivers/net/wireless/ath/ath12k/qmi.c
+++ b/drivers/net/wireless/ath/ath12k/qmi.c
@@ -3307,20 +3307,28 @@ static int ath12k_qmi_wlanfw_wlan_cfg_send(struct ath12k_base *ab)
/* This is number of CE configs */
req->tgt_cfg_len = ab->qmi.ce_cfg.tgt_ce_len;
for (pipe_num = 0; pipe_num < req->tgt_cfg_len ; pipe_num++) {
- req->tgt_cfg[pipe_num].pipe_num = ce_cfg[pipe_num].pipenum;
- req->tgt_cfg[pipe_num].pipe_dir = ce_cfg[pipe_num].pipedir;
- req->tgt_cfg[pipe_num].nentries = ce_cfg[pipe_num].nentries;
- req->tgt_cfg[pipe_num].nbytes_max = ce_cfg[pipe_num].nbytes_max;
- req->tgt_cfg[pipe_num].flags = ce_cfg[pipe_num].flags;
+ req->tgt_cfg[pipe_num].pipe_num =
+ __le32_to_cpu(ce_cfg[pipe_num].pipenum);
+ req->tgt_cfg[pipe_num].pipe_dir =
+ __le32_to_cpu(ce_cfg[pipe_num].pipedir);
+ req->tgt_cfg[pipe_num].nentries =
+ __le32_to_cpu(ce_cfg[pipe_num].nentries);
+ req->tgt_cfg[pipe_num].nbytes_max =
+ __le32_to_cpu(ce_cfg[pipe_num].nbytes_max);
+ req->tgt_cfg[pipe_num].flags =
+ __le32_to_cpu(ce_cfg[pipe_num].flags);
}
req->svc_cfg_valid = 1;
/* This is number of Service/CE configs */
req->svc_cfg_len = ab->qmi.ce_cfg.svc_to_ce_map_len;
for (pipe_num = 0; pipe_num < req->svc_cfg_len; pipe_num++) {
- req->svc_cfg[pipe_num].service_id = svc_cfg[pipe_num].service_id;
- req->svc_cfg[pipe_num].pipe_dir = svc_cfg[pipe_num].pipedir;
- req->svc_cfg[pipe_num].pipe_num = svc_cfg[pipe_num].pipenum;
+ req->svc_cfg[pipe_num].service_id =
+ __le32_to_cpu(svc_cfg[pipe_num].service_id);
+ req->svc_cfg[pipe_num].pipe_dir =
+ __le32_to_cpu(svc_cfg[pipe_num].pipedir);
+ req->svc_cfg[pipe_num].pipe_num =
+ __le32_to_cpu(svc_cfg[pipe_num].pipenum);
}
/* set shadow v3 configuration */
diff --git a/drivers/net/wireless/ath/ath12k/qmi.h b/drivers/net/wireless/ath/ath12k/qmi.h
index abdaade3b542..4767d9a2e309 100644
--- a/drivers/net/wireless/ath/ath12k/qmi.h
+++ b/drivers/net/wireless/ath/ath12k/qmi.h
@@ -392,17 +392,17 @@ enum qmi_wlanfw_pipedir_enum_v01 {
};
struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01 {
- __le32 pipe_num;
- __le32 pipe_dir;
- __le32 nentries;
- __le32 nbytes_max;
- __le32 flags;
+ u32 pipe_num;
+ u32 pipe_dir;
+ u32 nentries;
+ u32 nbytes_max;
+ u32 flags;
};
struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01 {
- __le32 service_id;
- __le32 pipe_dir;
- __le32 pipe_num;
+ u32 service_id;
+ u32 pipe_dir;
+ u32 pipe_num;
};
struct qmi_wlanfw_shadow_reg_cfg_s_v01 {
diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c
index da85c28ec355..ff6b3d4ea820 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.c
+++ b/drivers/net/wireless/ath/ath12k/wmi.c
@@ -30,6 +30,9 @@ struct ath12k_wmi_svc_ready_parse {
struct wmi_tlv_fw_stats_parse {
const struct wmi_stats_event *ev;
struct ath12k_fw_stats *stats;
+ const struct wmi_per_chain_rssi_stat_params *rssi;
+ int rssi_num;
+ bool chain_rssi_done;
};
struct ath12k_wmi_dma_ring_caps_parse {
@@ -185,6 +188,8 @@ static const struct ath12k_wmi_tlv_policy ath12k_wmi_tlv_policies[] = {
.min_len = sizeof(struct wmi_p2p_noa_event) },
[WMI_TAG_11D_NEW_COUNTRY_EVENT] = {
.min_len = sizeof(struct wmi_11d_new_cc_event) },
+ [WMI_TAG_PER_CHAIN_RSSI_STATS] = {
+ .min_len = sizeof(struct wmi_per_chain_rssi_stat_params) },
};
__le32 ath12k_wmi_tlv_hdr(u32 cmd, u32 len)
@@ -843,7 +848,7 @@ int ath12k_wmi_mgmt_send(struct ath12k_link_vif *arvif, u32 buf_id,
cmd->tx_params_valid = 0;
frame_tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
- frame_tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, buf_len);
+ frame_tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, buf_len_aligned);
memcpy(frame_tlv->value, frame->data, buf_len);
@@ -2423,6 +2428,7 @@ int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar,
eml_cap = arg->ml.eml_cap;
if (u16_get_bits(eml_cap, IEEE80211_EML_CAP_EMLSR_SUPP)) {
+ ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_EMLSR_SUPPORT);
/* Padding delay */
eml_pad_delay = ieee80211_emlsr_pad_delay_in_us(eml_cap);
ml_params->emlsr_padding_delay_us = cpu_to_le32(eml_pad_delay);
@@ -6461,6 +6467,8 @@ static int ath12k_pull_peer_sta_kickout_ev(struct ath12k_base *ab, struct sk_buf
}
arg->mac_addr = ev->peer_macaddr.addr;
+ arg->reason = le32_to_cpu(ev->reason);
+ arg->rssi = le32_to_cpu(ev->rssi);
kfree(tb);
return 0;
@@ -7297,8 +7305,10 @@ static void ath12k_scan_event(struct ath12k_base *ab, struct sk_buff *skb)
static void ath12k_peer_sta_kickout_event(struct ath12k_base *ab, struct sk_buff *skb)
{
struct wmi_peer_sta_kickout_arg arg = {};
+ struct ath12k_link_vif *arvif;
struct ieee80211_sta *sta;
struct ath12k_peer *peer;
+ unsigned int link_id;
struct ath12k *ar;
if (ath12k_pull_peer_sta_kickout_ev(ab, skb, &arg) != 0) {
@@ -7318,25 +7328,49 @@ static void ath12k_peer_sta_kickout_event(struct ath12k_base *ab, struct sk_buff
goto exit;
}
- ar = ath12k_mac_get_ar_by_vdev_id(ab, peer->vdev_id);
- if (!ar) {
+ arvif = ath12k_mac_get_arvif_by_vdev_id(ab, peer->vdev_id);
+ if (!arvif) {
ath12k_warn(ab, "invalid vdev id in peer sta kickout ev %d",
peer->vdev_id);
goto exit;
}
- sta = ieee80211_find_sta_by_ifaddr(ath12k_ar_to_hw(ar),
- arg.mac_addr, NULL);
+ ar = arvif->ar;
+
+ if (peer->mlo) {
+ sta = ieee80211_find_sta_by_link_addrs(ath12k_ar_to_hw(ar),
+ arg.mac_addr,
+ NULL, &link_id);
+ if (peer->link_id != link_id) {
+ ath12k_warn(ab,
+ "Spurious quick kickout for MLO STA %pM with invalid link_id, peer: %d, sta: %d\n",
+ arg.mac_addr, peer->link_id, link_id);
+ goto exit;
+ }
+ } else {
+ sta = ieee80211_find_sta_by_ifaddr(ath12k_ar_to_hw(ar),
+ arg.mac_addr, NULL);
+ }
if (!sta) {
- ath12k_warn(ab, "Spurious quick kickout for STA %pM\n",
- arg.mac_addr);
+ ath12k_warn(ab, "Spurious quick kickout for %sSTA %pM\n",
+ peer->mlo ? "MLO " : "", arg.mac_addr);
goto exit;
}
- ath12k_dbg(ab, ATH12K_DBG_WMI, "peer sta kickout event %pM",
- arg.mac_addr);
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "peer sta kickout event %pM reason: %d rssi: %d\n",
+ arg.mac_addr, arg.reason, arg.rssi);
- ieee80211_report_low_ack(sta, 10);
+ switch (arg.reason) {
+ case WMI_PEER_STA_KICKOUT_REASON_INACTIVITY:
+ if (arvif->ahvif->vif->type == NL80211_IFTYPE_STATION) {
+ ath12k_mac_handle_beacon_miss(ar, arvif);
+ break;
+ }
+ fallthrough;
+ default:
+ ieee80211_report_low_ack(sta, 10);
+ }
exit:
spin_unlock_bh(&ab->base_lock);
@@ -7345,6 +7379,7 @@ exit:
static void ath12k_roam_event(struct ath12k_base *ab, struct sk_buff *skb)
{
+ struct ath12k_link_vif *arvif;
struct wmi_roam_event roam_ev = {};
struct ath12k *ar;
u32 vdev_id;
@@ -7363,21 +7398,22 @@ static void ath12k_roam_event(struct ath12k_base *ab, struct sk_buff *skb)
"wmi roam event vdev %u reason %d rssi %d\n",
vdev_id, roam_reason, roam_ev.rssi);
- rcu_read_lock();
- ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
- if (!ar) {
+ guard(rcu)();
+ arvif = ath12k_mac_get_arvif_by_vdev_id(ab, vdev_id);
+ if (!arvif) {
ath12k_warn(ab, "invalid vdev id in roam ev %d", vdev_id);
- rcu_read_unlock();
return;
}
+ ar = arvif->ar;
+
if (roam_reason >= WMI_ROAM_REASON_MAX)
ath12k_warn(ab, "ignoring unknown roam event reason %d on vdev %i\n",
roam_reason, vdev_id);
switch (roam_reason) {
case WMI_ROAM_REASON_BEACON_MISS:
- ath12k_mac_handle_beacon_miss(ar, vdev_id);
+ ath12k_mac_handle_beacon_miss(ar, arvif);
break;
case WMI_ROAM_REASON_BETTER_AP:
case WMI_ROAM_REASON_LOW_RSSI:
@@ -7387,8 +7423,6 @@ static void ath12k_roam_event(struct ath12k_base *ab, struct sk_buff *skb)
roam_reason, vdev_id);
break;
}
-
- rcu_read_unlock();
}
static void ath12k_chan_info_event(struct ath12k_base *ab, struct sk_buff *skb)
@@ -8218,6 +8252,77 @@ exit:
return ret;
}
+static int ath12k_wmi_tlv_rssi_chain_parse(struct ath12k_base *ab,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ const struct wmi_rssi_stat_params *stats_rssi = ptr;
+ struct wmi_tlv_fw_stats_parse *parse = data;
+ const struct wmi_stats_event *ev = parse->ev;
+ struct ath12k_fw_stats *stats = parse->stats;
+ struct ath12k_link_vif *arvif;
+ struct ath12k_link_sta *arsta;
+ struct ieee80211_sta *sta;
+ struct ath12k_sta *ahsta;
+ struct ath12k *ar;
+ int vdev_id;
+ int j;
+
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch update stats ev");
+ return -EPROTO;
+ }
+
+ if (tag != WMI_TAG_RSSI_STATS)
+ return -EPROTO;
+
+ if (!stats)
+ return -EINVAL;
+
+ stats->pdev_id = le32_to_cpu(ev->pdev_id);
+ vdev_id = le32_to_cpu(stats_rssi->vdev_id);
+ guard(rcu)();
+ ar = ath12k_mac_get_ar_by_pdev_id(ab, stats->pdev_id);
+ if (!ar) {
+ ath12k_warn(ab, "invalid pdev id %d in rssi chain parse\n",
+ stats->pdev_id);
+ return -EPROTO;
+ }
+
+ arvif = ath12k_mac_get_arvif(ar, vdev_id);
+ if (!arvif) {
+ ath12k_warn(ab, "not found vif for vdev id %d\n", vdev_id);
+ return -EPROTO;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "stats bssid %pM vif %p\n",
+ arvif->bssid, arvif->ahvif->vif);
+
+ sta = ieee80211_find_sta_by_ifaddr(ath12k_ar_to_hw(ar),
+ arvif->bssid,
+ NULL);
+ if (!sta) {
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "not found station of bssid %pM for rssi chain\n",
+ arvif->bssid);
+ return -EPROTO;
+ }
+
+ ahsta = ath12k_sta_to_ahsta(sta);
+ arsta = &ahsta->deflink;
+
+ BUILD_BUG_ON(ARRAY_SIZE(arsta->chain_signal) >
+ ARRAY_SIZE(stats_rssi->rssi_avg_beacon));
+
+ for (j = 0; j < ARRAY_SIZE(arsta->chain_signal); j++)
+ arsta->chain_signal[j] = le32_to_cpu(stats_rssi->rssi_avg_beacon[j]);
+
+ stats->stats_id = WMI_REQUEST_RSSI_PER_CHAIN_STAT;
+
+ return 0;
+}
+
static int ath12k_wmi_tlv_fw_stats_parse(struct ath12k_base *ab,
u16 tag, u16 len,
const void *ptr, void *data)
@@ -8232,6 +8337,22 @@ static int ath12k_wmi_tlv_fw_stats_parse(struct ath12k_base *ab,
case WMI_TAG_ARRAY_BYTE:
ret = ath12k_wmi_tlv_fw_stats_data_parse(ab, parse, ptr, len);
break;
+ case WMI_TAG_PER_CHAIN_RSSI_STATS:
+ parse->rssi = ptr;
+ if (le32_to_cpu(parse->ev->stats_id) & WMI_REQUEST_RSSI_PER_CHAIN_STAT)
+ parse->rssi_num = le32_to_cpu(parse->rssi->num_per_chain_rssi);
+ break;
+ case WMI_TAG_ARRAY_STRUCT:
+ if (parse->rssi_num && !parse->chain_rssi_done) {
+ ret = ath12k_wmi_tlv_iter(ab, ptr, len,
+ ath12k_wmi_tlv_rssi_chain_parse,
+ parse);
+ if (ret)
+ return ret;
+
+ parse->chain_rssi_done = true;
+ }
+ break;
default:
break;
}
@@ -8345,6 +8466,12 @@ static void ath12k_update_stats_event(struct ath12k_base *ab, struct sk_buff *sk
goto complete;
}
+ /* Handle WMI_REQUEST_RSSI_PER_CHAIN_STAT status update */
+ if (stats.stats_id == WMI_REQUEST_RSSI_PER_CHAIN_STAT) {
+ complete(&ar->fw_stats_done);
+ goto complete;
+ }
+
/* Handle WMI_REQUEST_VDEV_STAT and WMI_REQUEST_BCN_STAT updates. */
ath12k_wmi_fw_stats_process(ar, &stats);
diff --git a/drivers/net/wireless/ath/ath12k/wmi.h b/drivers/net/wireless/ath/ath12k/wmi.h
index f3b0a6f57ec2..a8c3190e8ad9 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.h
+++ b/drivers/net/wireless/ath/ath12k/wmi.h
@@ -4548,12 +4548,27 @@ struct wmi_scan_event {
__le32 tsf_timestamp;
} __packed;
+enum wmi_peer_sta_kickout_reason {
+ WMI_PEER_STA_KICKOUT_REASON_UNSPECIFIED = 0,
+ WMI_PEER_STA_KICKOUT_REASON_XRETRY = 1,
+ WMI_PEER_STA_KICKOUT_REASON_INACTIVITY = 2,
+ WMI_PEER_STA_KICKOUT_REASON_IBSS_DISCONNECT = 3,
+ WMI_PEER_STA_KICKOUT_REASON_TDLS_DISCONNECT = 4,
+ WMI_PEER_STA_KICKOUT_REASON_SA_QUERY_TIMEOUT = 5,
+ WMI_PEER_STA_KICKOUT_REASON_ROAMING_EVENT = 6,
+ WMI_PEER_STA_KICKOUT_REASON_PMF_ERROR = 7,
+};
+
struct wmi_peer_sta_kickout_arg {
const u8 *mac_addr;
+ enum wmi_peer_sta_kickout_reason reason;
+ u32 rssi;
};
struct wmi_peer_sta_kickout_event {
struct ath12k_wmi_mac_addr_params peer_macaddr;
+ __le32 reason;
+ __le32 rssi;
} __packed;
#define WMI_ROAM_REASON_MASK GENMASK(3, 0)
@@ -5875,9 +5890,10 @@ struct wmi_stats_event {
} __packed;
enum wmi_stats_id {
- WMI_REQUEST_PDEV_STAT = BIT(2),
- WMI_REQUEST_VDEV_STAT = BIT(3),
- WMI_REQUEST_BCN_STAT = BIT(11),
+ WMI_REQUEST_PDEV_STAT = BIT(2),
+ WMI_REQUEST_VDEV_STAT = BIT(3),
+ WMI_REQUEST_RSSI_PER_CHAIN_STAT = BIT(8),
+ WMI_REQUEST_BCN_STAT = BIT(11),
};
struct wmi_request_stats_cmd {
@@ -5888,6 +5904,17 @@ struct wmi_request_stats_cmd {
__le32 pdev_id;
} __packed;
+struct wmi_rssi_stat_params {
+ __le32 vdev_id;
+ __le32 rssi_avg_beacon[WMI_MAX_CHAINS];
+ __le32 rssi_avg_data[WMI_MAX_CHAINS];
+ struct ath12k_wmi_mac_addr_params peer_macaddr;
+} __packed;
+
+struct wmi_per_chain_rssi_stat_params {
+ __le32 num_per_chain_rssi;
+} __packed;
+
#define WLAN_MAX_AC 4
#define MAX_TX_RATE_VALUES 10
diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c
index 908c4c8b7f82..6833430130f4 100644
--- a/drivers/net/wireless/ath/carl9170/rx.c
+++ b/drivers/net/wireless/ath/carl9170/rx.c
@@ -555,7 +555,7 @@ static void carl9170_ps_beacon(struct ar9170 *ar, void *data, unsigned int len)
/* Check whenever the PHY can be turned off again. */
/* 1. What about buffered unicast traffic for our AID? */
- cam = ieee80211_check_tim(tim_ie, tim_len, ar->common.curaid);
+ cam = ieee80211_check_tim(tim_ie, tim_len, ar->common.curaid, false);
/* 2. Maybe the AP wants to send multicast/broadcast data? */
cam |= !!(tim_ie->bitmap_ctrl & 0x01);
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 7703a0933a14..7218fe70f3bc 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -2708,6 +2708,7 @@ static void wil_wiphy_init(struct wiphy *wiphy)
wiphy->n_cipher_suites = ARRAY_SIZE(wil_cipher_suites);
wiphy->mgmt_stypes = wil_mgmt_stypes;
wiphy->features |= NL80211_FEATURE_SK_TX_STATUS;
+ wiphy->bss_param_support = WIPHY_BSS_PARAM_AP_ISOLATE;
wiphy->n_vendor_commands = ARRAY_SIZE(wil_nl80211_vendor_commands);
wiphy->vendor_commands = wil_nl80211_vendor_commands;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index 8ab7d1e34a6e..6a3f187320fc 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -997,9 +997,9 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = {
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4356, WCC),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4359, WCC),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43751, WCC),
+ BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43752, WCC),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_CYPRESS_4373, CYW),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_CYPRESS_43012, CYW),
- BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_CYPRESS_43752, CYW),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_CYPRESS_89359, CYW),
CYW_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_CYPRESS_43439, CYW),
{ /* end: all zeroes */ }
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c
index 69ef8cf203d2..67c0c5a92f99 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c
@@ -393,10 +393,8 @@ void brcmf_btcoex_detach(struct brcmf_cfg80211_info *cfg)
if (!cfg->btcoex)
return;
- if (cfg->btcoex->timer_on) {
- cfg->btcoex->timer_on = false;
- timer_shutdown_sync(&cfg->btcoex->timer);
- }
+ timer_shutdown_sync(&cfg->btcoex->timer);
+ cfg->btcoex->timer_on = false;
cancel_work_sync(&cfg->btcoex->work);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 8af402555b5e..8afaffe31031 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -5958,6 +5958,26 @@ static int brcmf_cfg80211_del_pmk(struct wiphy *wiphy, struct net_device *dev,
return brcmf_set_pmk(ifp, NULL, 0);
}
+static int brcmf_cfg80211_change_bss(struct wiphy *wiphy, struct net_device *dev,
+ struct bss_parameters *params)
+{
+ struct brcmf_if *ifp = netdev_priv(dev);
+ int ret = 0;
+
+ /* In AP mode, the "ap_isolate" value represents
+ * 0 = allow low-level bridging of frames between associated stations
+ * 1 = restrict low-level bridging of frames to isolate associated stations
+ * -1 = do not change existing setting
+ */
+ if (params->ap_isolate >= 0) {
+ ret = brcmf_fil_iovar_int_set(ifp, "ap_isolate", params->ap_isolate);
+ if (ret < 0)
+ brcmf_err("ap_isolate iovar failed: ret=%d\n", ret);
+ }
+
+ return ret;
+}
+
static struct cfg80211_ops brcmf_cfg80211_ops = {
.add_virtual_intf = brcmf_cfg80211_add_iface,
.del_virtual_intf = brcmf_cfg80211_del_iface,
@@ -6005,6 +6025,7 @@ static struct cfg80211_ops brcmf_cfg80211_ops = {
.update_connect_params = brcmf_cfg80211_update_conn_params,
.set_pmk = brcmf_cfg80211_set_pmk,
.del_pmk = brcmf_cfg80211_del_pmk,
+ .change_bss = brcmf_cfg80211_change_bss,
};
struct cfg80211_ops *brcmf_cfg80211_get_ops(struct brcmf_mp_device *settings)
@@ -7659,6 +7680,8 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
BIT(NL80211_BSS_SELECT_ATTR_BAND_PREF) |
BIT(NL80211_BSS_SELECT_ATTR_RSSI_ADJUST);
+ wiphy->bss_param_support = WIPHY_BSS_PARAM_AP_ISOLATE;
+
wiphy->flags |= WIPHY_FLAG_NETNS_OK |
WIPHY_FLAG_PS_ON_BY_DEFAULT |
WIPHY_FLAG_HAVE_AP_SME |
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
index 9074ab49e806..4239f2b21e54 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
@@ -738,8 +738,8 @@ static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci)
case BRCM_CC_4364_CHIP_ID:
case CY_CC_4373_CHIP_ID:
return 0x160000;
- case CY_CC_43752_CHIP_ID:
case BRCM_CC_43751_CHIP_ID:
+ case BRCM_CC_43752_CHIP_ID:
case BRCM_CC_4377_CHIP_ID:
return 0x170000;
case BRCM_CC_4378_CHIP_ID:
@@ -1452,7 +1452,7 @@ bool brcmf_chip_sr_capable(struct brcmf_chip *pub)
return (reg & CC_SR_CTL0_ENABLE_MASK) != 0;
case BRCM_CC_4359_CHIP_ID:
case BRCM_CC_43751_CHIP_ID:
- case CY_CC_43752_CHIP_ID:
+ case BRCM_CC_43752_CHIP_ID:
case CY_CC_43012_CHIP_ID:
addr = CORE_CC_REG(pmu->base, retention_ctl);
reg = chip->ops->read32(chip->ctx, addr);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
index 83f8ed7d00f9..ef79924fd8f4 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
@@ -554,12 +554,16 @@ static int brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx)
data = (u8 *)fw->data;
data_len = fw->size;
} else {
- if ((data = bcm47xx_nvram_get_contents(&data_len)))
+ data = bcm47xx_nvram_get_contents(&data_len);
+ if (data) {
free_bcm47xx_nvram = true;
- else if ((data = brcmf_fw_nvram_from_efi(&data_len)))
- kfree_nvram = true;
- else if (!(cur->flags & BRCMF_FW_REQF_OPTIONAL))
- goto fail;
+ } else {
+ data = brcmf_fw_nvram_from_efi(&data_len);
+ if (data)
+ kfree_nvram = true;
+ else if (!(cur->flags & BRCMF_FW_REQF_OPTIONAL))
+ goto fail;
+ }
}
if (data)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index 8a0bad5119a0..8cf9d7e7c3f7 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -655,10 +655,10 @@ static const struct brcmf_firmware_mapping brcmf_sdio_fwnames[] = {
BRCMF_FW_ENTRY(BRCM_CC_4356_CHIP_ID, 0xFFFFFFFF, 4356),
BRCMF_FW_ENTRY(BRCM_CC_4359_CHIP_ID, 0xFFFFFFFF, 4359),
BRCMF_FW_ENTRY(BRCM_CC_43751_CHIP_ID, 0xFFFFFFFF, 43752),
+ BRCMF_FW_ENTRY(BRCM_CC_43752_CHIP_ID, 0xFFFFFFFF, 43752),
BRCMF_FW_ENTRY(CY_CC_4373_CHIP_ID, 0xFFFFFFFF, 4373),
BRCMF_FW_ENTRY(CY_CC_43012_CHIP_ID, 0xFFFFFFFF, 43012),
BRCMF_FW_ENTRY(CY_CC_43439_CHIP_ID, 0xFFFFFFFF, 43439),
- BRCMF_FW_ENTRY(CY_CC_43752_CHIP_ID, 0xFFFFFFFF, 43752)
};
#define TXCTL_CREDITS 2
@@ -3426,8 +3426,8 @@ err:
static bool brcmf_sdio_aos_no_decode(struct brcmf_sdio *bus)
{
if (bus->ci->chip == BRCM_CC_43751_CHIP_ID ||
- bus->ci->chip == CY_CC_43012_CHIP_ID ||
- bus->ci->chip == CY_CC_43752_CHIP_ID)
+ bus->ci->chip == BRCM_CC_43752_CHIP_ID ||
+ bus->ci->chip == CY_CC_43012_CHIP_ID)
return true;
else
return false;
@@ -4278,8 +4278,8 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
switch (sdiod->func1->device) {
case SDIO_DEVICE_ID_BROADCOM_43751:
+ case SDIO_DEVICE_ID_BROADCOM_43752:
case SDIO_DEVICE_ID_BROADCOM_CYPRESS_4373:
- case SDIO_DEVICE_ID_BROADCOM_CYPRESS_43752:
brcmf_dbg(INFO, "set F2 watermark to 0x%x*4 bytes\n",
CY_4373_F2_WATERMARK);
brcmf_sdiod_writeb(sdiod, SBSDIO_WATERMARK,
diff --git a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
index b39c5c1ee18b..df3b67ba4db2 100644
--- a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
@@ -60,7 +60,6 @@
#define CY_CC_4373_CHIP_ID 0x4373
#define CY_CC_43012_CHIP_ID 43012
#define CY_CC_43439_CHIP_ID 43439
-#define CY_CC_43752_CHIP_ID 43752
/* USB Device IDs */
#define BRCM_USB_43143_DEVICE_ID 0xbd1e
diff --git a/drivers/net/wireless/intel/iwlegacy/iwl-spectrum.h b/drivers/net/wireless/intel/iwlegacy/iwl-spectrum.h
index 1e8ab704dbfb..f00eb878b94b 100644
--- a/drivers/net/wireless/intel/iwlegacy/iwl-spectrum.h
+++ b/drivers/net/wireless/intel/iwlegacy/iwl-spectrum.h
@@ -50,28 +50,4 @@ struct ieee80211_measurement_params {
__le16 duration;
} __packed;
-struct ieee80211_info_element {
- u8 id;
- u8 len;
- u8 data[];
-} __packed;
-
-struct ieee80211_measurement_request {
- struct ieee80211_info_element ie;
- u8 token;
- u8 mode;
- u8 type;
- struct ieee80211_measurement_params params[];
-} __packed;
-
-struct ieee80211_measurement_report {
- struct ieee80211_info_element ie;
- u8 token;
- u8 mode;
- u8 type;
- union {
- struct ieee80211_basic_report basic[0];
- } u;
-} __packed;
-
#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
index 9f543946b285..3e6206e739f6 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
@@ -9,11 +9,11 @@
#include "iwl-prph.h"
#include "fw/api/txq.h"
-/* Highest firmware API version supported */
-#define IWL_BZ_UCODE_API_MAX 102
+/* Highest firmware core release supported */
+#define IWL_BZ_UCODE_CORE_MAX 99
/* Lowest firmware API version supported */
-#define IWL_BZ_UCODE_API_MIN 98
+#define IWL_BZ_UCODE_API_MIN 100
/* Memory offsets and lengths */
#define IWL_BZ_SMEM_OFFSET 0x400000
@@ -75,7 +75,7 @@ static const struct iwl_family_base_params iwl_bz_base = {
},
},
.features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM,
- .ucode_api_max = IWL_BZ_UCODE_API_MAX,
+ .ucode_api_max = ENCODE_CORE_AS_API(IWL_BZ_UCODE_CORE_MAX),
.ucode_api_min = IWL_BZ_UCODE_API_MIN,
};
@@ -101,8 +101,8 @@ const struct iwl_mac_cfg iwl_gl_mac_cfg = {
.low_latency_xtal = true,
};
-IWL_FW_AND_PNVM(IWL_BZ_A_FM_B_FW_PRE, IWL_BZ_UCODE_API_MAX);
-IWL_FW_AND_PNVM(IWL_BZ_A_FM_C_FW_PRE, IWL_BZ_UCODE_API_MAX);
-IWL_FW_AND_PNVM(IWL_BZ_A_FM4_B_FW_PRE, IWL_BZ_UCODE_API_MAX);
-IWL_FW_AND_PNVM(IWL_GL_B_FM_B_FW_PRE, IWL_BZ_UCODE_API_MAX);
-IWL_FW_AND_PNVM(IWL_GL_C_FM_C_FW_PRE, IWL_BZ_UCODE_API_MAX);
+IWL_CORE_FW(IWL_BZ_A_FM_B_FW_PRE, IWL_BZ_UCODE_CORE_MAX);
+IWL_CORE_FW(IWL_BZ_A_FM_C_FW_PRE, IWL_BZ_UCODE_CORE_MAX);
+IWL_CORE_FW(IWL_BZ_A_FM4_B_FW_PRE, IWL_BZ_UCODE_CORE_MAX);
+IWL_CORE_FW(IWL_GL_B_FM_B_FW_PRE, IWL_BZ_UCODE_CORE_MAX);
+IWL_CORE_FW(IWL_GL_C_FM_C_FW_PRE, IWL_BZ_UCODE_CORE_MAX);
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/dr.c b/drivers/net/wireless/intel/iwlwifi/cfg/dr.c
index 807f4e29d55a..e53a785686c8 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/dr.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/dr.c
@@ -8,11 +8,11 @@
#include "iwl-prph.h"
#include "fw/api/txq.h"
-/* Highest firmware API version supported */
-#define IWL_DR_UCODE_API_MAX 102
+/* Highest firmware core release supported */
+#define IWL_DR_UCODE_CORE_MAX 99
/* Lowest firmware API version supported */
-#define IWL_DR_UCODE_API_MIN 98
+#define IWL_DR_UCODE_API_MIN 100
/* Memory offsets and lengths */
#define IWL_DR_SMEM_OFFSET 0x400000
@@ -20,9 +20,6 @@
#define IWL_DR_A_PE_A_FW_PRE "iwlwifi-dr-a0-pe-a0"
-#define IWL_DR_A_PE_A_FW_MODULE_FIRMWARE(api) \
- IWL_DR_A_PE_A_FW_PRE "-" __stringify(api) ".ucode"
-
static const struct iwl_family_base_params iwl_dr_base = {
.num_of_queues = 512,
.max_tfd_queue_size = 65536,
@@ -73,7 +70,7 @@ static const struct iwl_family_base_params iwl_dr_base = {
},
},
.features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM,
- .ucode_api_max = IWL_DR_UCODE_API_MAX,
+ .ucode_api_max = ENCODE_CORE_AS_API(IWL_DR_UCODE_CORE_MAX),
.ucode_api_min = IWL_DR_UCODE_API_MIN,
};
@@ -89,5 +86,5 @@ const struct iwl_mac_cfg iwl_dr_mac_cfg = {
.ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US,
};
-MODULE_FIRMWARE(IWL_DR_A_PE_A_FW_MODULE_FIRMWARE(IWL_DR_UCODE_API_MAX));
+IWL_CORE_FW(IWL_DR_A_PE_A_FW_PRE, IWL_DR_UCODE_CORE_MAX);
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/rf-gf.c b/drivers/net/wireless/intel/iwlwifi/cfg/rf-gf.c
index 7ff5170faaa9..c16cda087a7c 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/rf-gf.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/rf-gf.c
@@ -9,7 +9,7 @@
#define IWL_GF_UCODE_API_MAX 100
/* Lowest firmware API version supported */
-#define IWL_GF_UCODE_API_MIN 98
+#define IWL_GF_UCODE_API_MIN 100
#define IWL_SO_A_GF_A_FW_PRE "iwlwifi-so-a0-gf-a0"
#define IWL_TY_A_GF_A_FW_PRE "iwlwifi-ty-a0-gf-a0"
@@ -23,6 +23,18 @@
#define IWL_SC_A_GF_A_FW_PRE "iwlwifi-sc-a0-gf-a0"
#define IWL_SC_A_GF4_A_FW_PRE "iwlwifi-sc-a0-gf4-a0"
+#define IWL_BZ_A_GF_A_MODULE_FIRMWARE(api) \
+ IWL_BZ_A_GF_A_FW_PRE "-" __stringify(api) ".ucode"
+
+#define IWL_BZ_A_GF4_A_MODULE_FIRMWARE(api) \
+ IWL_BZ_A_GF4_A_FW_PRE "-" __stringify(api) ".ucode"
+
+#define IWL_SC_A_GF_A_MODULE_FIRMWARE(api) \
+ IWL_SC_A_GF_A_FW_PRE "-" __stringify(api) ".ucode"
+
+#define IWL_SC_A_GF4_A_MODULE_FIRMWARE(api) \
+ IWL_SC_A_GF4_A_FW_PRE "-" __stringify(api) ".ucode"
+
/* NVM versions */
#define IWL_GF_NVM_VERSION 0x0a1d
@@ -67,7 +79,7 @@ IWL_FW_AND_PNVM(IWL_MA_A_GF_A_FW_PRE, IWL_GF_UCODE_API_MAX);
IWL_FW_AND_PNVM(IWL_MA_B_GF_A_FW_PRE, IWL_GF_UCODE_API_MAX);
IWL_FW_AND_PNVM(IWL_MA_A_GF4_A_FW_PRE, IWL_GF_UCODE_API_MAX);
IWL_FW_AND_PNVM(IWL_MA_B_GF4_A_FW_PRE, IWL_GF_UCODE_API_MAX);
-IWL_FW_AND_PNVM(IWL_BZ_A_GF_A_FW_PRE, IWL_GF_UCODE_API_MAX);
-IWL_FW_AND_PNVM(IWL_BZ_A_GF4_A_FW_PRE, IWL_GF_UCODE_API_MAX);
-IWL_FW_AND_PNVM(IWL_SC_A_GF_A_FW_PRE, IWL_GF_UCODE_API_MAX);
-IWL_FW_AND_PNVM(IWL_SC_A_GF4_A_FW_PRE, IWL_GF_UCODE_API_MAX);
+MODULE_FIRMWARE(IWL_BZ_A_GF_A_MODULE_FIRMWARE(IWL_GF_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_BZ_A_GF4_A_MODULE_FIRMWARE(IWL_GF_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_SC_A_GF_A_MODULE_FIRMWARE(IWL_GF_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_SC_A_GF4_A_MODULE_FIRMWARE(IWL_GF_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/rf-hr.c b/drivers/net/wireless/intel/iwlwifi/cfg/rf-hr.c
index 9f408d276ce9..6cf187d92dbf 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/rf-hr.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/rf-hr.c
@@ -9,7 +9,7 @@
#define IWL_HR_UCODE_API_MAX 100
/* Lowest firmware API version supported */
-#define IWL_HR_UCODE_API_MIN 98
+#define IWL_HR_UCODE_API_MIN 100
#define IWL_QU_B_HR_B_FW_PRE "iwlwifi-Qu-b0-hr-b0"
#define IWL_QU_C_HR_B_FW_PRE "iwlwifi-Qu-c0-hr-b0"
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
index 6d4a3bce49b9..e9449b59114a 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
@@ -9,11 +9,11 @@
#include "iwl-prph.h"
#include "fw/api/txq.h"
-/* Highest firmware API version supported */
-#define IWL_SC_UCODE_API_MAX 102
+/* Highest firmware core release supported */
+#define IWL_SC_UCODE_CORE_MAX 99
/* Lowest firmware API version supported */
-#define IWL_SC_UCODE_API_MIN 98
+#define IWL_SC_UCODE_API_MIN 100
/* NVM versions */
#define IWL_SC_NVM_VERSION 0x0a1d
@@ -78,7 +78,7 @@ static const struct iwl_family_base_params iwl_sc_base = {
},
},
.features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM,
- .ucode_api_max = IWL_SC_UCODE_API_MAX,
+ .ucode_api_max = ENCODE_CORE_AS_API(IWL_SC_UCODE_CORE_MAX),
.ucode_api_min = IWL_SC_UCODE_API_MIN,
};
@@ -94,8 +94,8 @@ const struct iwl_mac_cfg iwl_sc_mac_cfg = {
.ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US,
};
-IWL_FW_AND_PNVM(IWL_SC_A_FM_B_FW_PRE, IWL_SC_UCODE_API_MAX);
-IWL_FW_AND_PNVM(IWL_SC_A_FM_C_FW_PRE, IWL_SC_UCODE_API_MAX);
-IWL_FW_AND_PNVM(IWL_SC_A_WH_A_FW_PRE, IWL_SC_UCODE_API_MAX);
-IWL_FW_AND_PNVM(IWL_SC2_A_FM_C_FW_PRE, IWL_SC_UCODE_API_MAX);
-IWL_FW_AND_PNVM(IWL_SC2_A_WH_A_FW_PRE, IWL_SC_UCODE_API_MAX);
+IWL_CORE_FW(IWL_SC_A_FM_B_FW_PRE, IWL_SC_UCODE_CORE_MAX);
+IWL_CORE_FW(IWL_SC_A_FM_C_FW_PRE, IWL_SC_UCODE_CORE_MAX);
+IWL_CORE_FW(IWL_SC_A_WH_A_FW_PRE, IWL_SC_UCODE_CORE_MAX);
+IWL_CORE_FW(IWL_SC2_A_FM_C_FW_PRE, IWL_SC_UCODE_CORE_MAX);
+IWL_CORE_FW(IWL_SC2_A_WH_A_FW_PRE, IWL_SC_UCODE_CORE_MAX);
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/eeprom.c b/drivers/net/wireless/intel/iwlwifi/dvm/eeprom.c
index 9f8cdb027839..d337ab543eb0 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/eeprom.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/eeprom.c
@@ -766,7 +766,7 @@ static int iwl_init_otp_access(struct iwl_trans *trans)
{
int ret;
- ret = iwl_finish_nic_init(trans);
+ ret = iwl_trans_activate_nic(trans);
if (ret)
return ret;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
index 0771a46bd552..a0a26ef482a5 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
@@ -378,7 +378,7 @@ static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_SET,
CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
- iwl_trans_d3_suspend(priv->trans, false, true);
+ iwl_trans_d3_suspend(priv->trans, true);
goto out;
@@ -422,7 +422,6 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
struct ieee80211_vif *vif;
u32 base;
int ret;
- enum iwl_d3_status d3_status;
struct error_table_start {
/* cf. struct iwl_error_event_table */
u32 valid;
@@ -451,15 +450,10 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
/* we'll clear ctx->vif during iwlagn_prepare_restart() */
vif = ctx->vif;
- ret = iwl_trans_d3_resume(priv->trans, &d3_status, false, true);
+ ret = iwl_trans_d3_resume(priv->trans, true);
if (ret)
goto out_unlock;
- if (d3_status != IWL_D3_STATUS_ALIVE) {
- IWL_INFO(priv, "Device was reset during suspend\n");
- goto out_unlock;
- }
-
/* uCode is no longer operating by itself */
iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_CLR,
CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/power.c b/drivers/net/wireless/intel/iwlwifi/dvm/power.c
index 6b42d6e5f30f..e7dbba7134f7 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/power.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/power.c
@@ -368,7 +368,7 @@ int iwl_power_update_mode(struct iwl_priv *priv, bool force)
/* initialize to default */
void iwl_power_initialize(struct iwl_priv *priv)
{
- priv->power_data.bus_pm = priv->trans->pm_support;
+ priv->power_data.bus_pm = iwl_trans_is_pm_supported(priv->trans);
priv->power_data.debug_sleep_level_override = -1;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
index bee7d92293b8..52edc19d8cdd 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
@@ -9,9 +9,9 @@
#include "acpi.h"
#include "fw/runtime.h"
-const guid_t iwl_guid = GUID_INIT(0xF21202BF, 0x8F78, 0x4DC6,
- 0xA5, 0xB3, 0x1F, 0x73,
- 0x8E, 0x28, 0x5A, 0xDE);
+static const guid_t iwl_guid = GUID_INIT(0xF21202BF, 0x8F78, 0x4DC6,
+ 0xA5, 0xB3, 0x1F, 0x73,
+ 0x8E, 0x28, 0x5A, 0xDE);
static const size_t acpi_dsm_size[DSM_FUNC_NUM_FUNCS] = {
[DSM_FUNC_QUERY] = sizeof(u32),
@@ -169,7 +169,7 @@ int iwl_acpi_get_dsm(struct iwl_fw_runtime *fwrt,
BUILD_BUG_ON(ARRAY_SIZE(acpi_dsm_size) != DSM_FUNC_NUM_FUNCS);
- if (WARN_ON(func >= ARRAY_SIZE(acpi_dsm_size)))
+ if (WARN_ON(func >= ARRAY_SIZE(acpi_dsm_size) || !func))
return -EINVAL;
expected_size = acpi_dsm_size[func];
@@ -178,6 +178,29 @@ int iwl_acpi_get_dsm(struct iwl_fw_runtime *fwrt,
if (expected_size != sizeof(u8) && expected_size != sizeof(u32))
return -EOPNOTSUPP;
+ if (!fwrt->acpi_dsm_funcs_valid) {
+ ret = iwl_acpi_get_dsm_integer(fwrt->dev, ACPI_DSM_REV,
+ DSM_FUNC_QUERY,
+ &iwl_guid, &tmp,
+ acpi_dsm_size[DSM_FUNC_QUERY]);
+ if (ret) {
+ /* always indicate BIT(0) to avoid re-reading */
+ fwrt->acpi_dsm_funcs_valid = BIT(0);
+ return ret;
+ }
+
+ IWL_DEBUG_RADIO(fwrt, "ACPI DSM validity bitmap 0x%x\n",
+ (u32)tmp);
+ /* always indicate BIT(0) to avoid re-reading */
+ fwrt->acpi_dsm_funcs_valid = tmp | BIT(0);
+ }
+
+ if (!(fwrt->acpi_dsm_funcs_valid & BIT(func))) {
+ IWL_DEBUG_RADIO(fwrt, "ACPI DSM %d not indicated as valid\n",
+ func);
+ return -ENODATA;
+ }
+
ret = iwl_acpi_get_dsm_integer(fwrt->dev, ACPI_DSM_REV, func,
&iwl_guid, &tmp, expected_size);
if (ret)
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
index 68d8fb5f6357..20bc6671f4eb 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
@@ -140,8 +140,6 @@ struct iwl_dsm_internal_product_reset_cmd {
struct iwl_fw_runtime;
-extern const guid_t iwl_guid;
-
union acpi_object *iwl_acpi_get_dsm_object(struct device *dev, int rev,
int func, union acpi_object *args,
const guid_t *guid);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
index 53445087e9cb..d3bed0216df4 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
@@ -367,6 +367,7 @@ enum iwl_wowlan_flags {
ENABLE_NBNS_FILTERING = BIT(2),
ENABLE_DHCP_FILTERING = BIT(3),
ENABLE_STORE_BEACON = BIT(4),
+ HAS_BEACON_PROTECTION = BIT(5),
};
/**
@@ -631,10 +632,65 @@ struct iwl_wowlan_gtk_status_v3 {
struct iwl_wowlan_all_rsc_tsc_v5 sc;
} __packed; /* WOWLAN_GTK_MATERIAL_VER_3 */
+/**
+ * enum iwl_wowlan_key_status - Status of security keys in WoWLAN notifications
+ * @IWL_WOWLAN_NOTIF_NO_KEY: No key is present; this entry should be ignored.
+ * @IWL_WOWLAN_STATUS_OLD_KEY: old key exists; no rekey occurred, and only
+ * metadata is available.
+ * @IWL_WOWLAN_STATUS_NEW_KEY: A new key was created after a rekey; new key
+ * material is available.
+ */
+enum iwl_wowlan_key_status {
+ IWL_WOWLAN_NOTIF_NO_KEY = 0,
+ IWL_WOWLAN_STATUS_OLD_KEY = 1,
+ IWL_WOWLAN_STATUS_NEW_KEY = 2
+};
+
+/**
+ * struct iwl_wowlan_gtk_status - GTK status
+ * @key: GTK material
+ * @key_len: GTK length, if set to 0, the key is not available
+ * @key_flags: information about the key:
+ * bits[0:1]: key index assigned by the AP
+ * bits[2:6]: GTK index of the key in the internal DB
+ * bit[7]: Set iff this is the currently used GTK
+ * @key_status: key status, see &enum iwl_wowlan_key_status
+ * @reserved: padding
+ * @tkip_mic_key: TKIP RX MIC key
+ * @sc: RSC/TSC counters
+ */
+struct iwl_wowlan_gtk_status {
+ u8 key[WOWLAN_KEY_MAX_SIZE];
+ u8 key_len;
+ u8 key_flags;
+ u8 key_status;
+ u8 reserved;
+ u8 tkip_mic_key[IWL_MIC_KEY_SIZE];
+ struct iwl_wowlan_all_rsc_tsc_v5 sc;
+} __packed; /* WOWLAN_GTK_MATERIAL_VER_4 */
+
#define IWL_WOWLAN_GTK_IDX_MASK (BIT(0) | BIT(1))
#define IWL_WOWLAN_IGTK_BIGTK_IDX_MASK (BIT(0))
/**
+ * struct iwl_wowlan_igtk_status_v1 - IGTK status
+ * @key: IGTK material
+ * @ipn: the IGTK packet number (replay counter)
+ * @key_len: IGTK length, if set to 0, the key is not available
+ * @key_flags: information about the key:
+ * bits[0]: key index assigned by the AP (0: index 4, 1: index 5)
+ * (0: index 6, 1: index 7 with bigtk)
+ * bits[1:5]: IGTK index of the key in the internal DB
+ * bit[6]: Set iff this is the currently used IGTK
+ */
+struct iwl_wowlan_igtk_status_v1 {
+ u8 key[WOWLAN_KEY_MAX_SIZE];
+ u8 ipn[6];
+ u8 key_len;
+ u8 key_flags;
+} __packed; /* WOWLAN_IGTK_MATERIAL_VER_1 */
+
+/**
* struct iwl_wowlan_igtk_status - IGTK status
* @key: IGTK material
* @ipn: the IGTK packet number (replay counter)
@@ -644,13 +700,17 @@ struct iwl_wowlan_gtk_status_v3 {
* (0: index 6, 1: index 7 with bigtk)
* bits[1:5]: IGTK index of the key in the internal DB
* bit[6]: Set iff this is the currently used IGTK
+ * @key_status: key status, see &enum iwl_wowlan_key_status
+ * @reserved: padding
*/
struct iwl_wowlan_igtk_status {
u8 key[WOWLAN_KEY_MAX_SIZE];
u8 ipn[6];
u8 key_len;
u8 key_flags;
-} __packed; /* WOWLAN_IGTK_MATERIAL_VER_1 */
+ u8 key_status;
+ u8 reserved[3];
+} __packed; /* WOWLAN_IGTK_MATERIAL_VER_2 */
/**
* struct iwl_wowlan_status_v6 - WoWLAN status
@@ -700,7 +760,7 @@ struct iwl_wowlan_status_v6 {
*/
struct iwl_wowlan_status_v7 {
struct iwl_wowlan_gtk_status_v2 gtk[WOWLAN_GTK_KEYS_NUM];
- struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM];
+ struct iwl_wowlan_igtk_status_v1 igtk[WOWLAN_IGTK_KEYS_NUM];
__le64 replay_ctr;
__le16 pattern_number;
__le16 non_qos_seq_ctr;
@@ -735,7 +795,7 @@ struct iwl_wowlan_status_v7 {
*/
struct iwl_wowlan_info_notif_v1 {
struct iwl_wowlan_gtk_status_v3 gtk[WOWLAN_GTK_KEYS_NUM];
- struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM];
+ struct iwl_wowlan_igtk_status_v1 igtk[WOWLAN_IGTK_KEYS_NUM];
__le64 replay_ctr;
__le16 pattern_number;
__le16 reserved1;
@@ -817,8 +877,8 @@ struct iwl_wowlan_mlo_gtk {
*/
struct iwl_wowlan_info_notif_v3 {
struct iwl_wowlan_gtk_status_v3 gtk[WOWLAN_GTK_KEYS_NUM];
- struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM];
- struct iwl_wowlan_igtk_status bigtk[WOWLAN_BIGTK_KEYS_NUM];
+ struct iwl_wowlan_igtk_status_v1 igtk[WOWLAN_IGTK_KEYS_NUM];
+ struct iwl_wowlan_igtk_status_v1 bigtk[WOWLAN_BIGTK_KEYS_NUM];
__le64 replay_ctr;
__le16 pattern_number;
__le16 reserved1;
@@ -833,6 +893,45 @@ struct iwl_wowlan_info_notif_v3 {
} __packed; /* WOWLAN_INFO_NTFY_API_S_VER_3 */
/**
+ * struct iwl_wowlan_info_notif_v5 - WoWLAN information notification
+ * @gtk: GTK data
+ * @igtk: IGTK data
+ * @bigtk: BIGTK data
+ * @replay_ctr: GTK rekey replay counter
+ * @pattern_number: number of the matched patterns
+ * @qos_seq_ctr: QoS sequence counters to use next
+ * @wakeup_reasons: wakeup reasons, see &enum iwl_wowlan_wakeup_reason
+ * @num_of_gtk_rekeys: number of GTK rekeys
+ * @transmitted_ndps: number of transmitted neighbor discovery packets
+ * @received_beacons: number of received beacons
+ * @tid_tear_down: bit mask of tids whose BA sessions were closed
+ * in suspend state
+ * @station_id: station id
+ * @num_mlo_link_keys: number of &struct iwl_wowlan_mlo_gtk structs
+ * following this notif
+ * @tid_offloaded_tx: tid used by the firmware to transmit data packets
+ * while in wowlan
+ * @mlo_gtks: array of GTKs of size num_mlo_link_keys
+ */
+struct iwl_wowlan_info_notif_v5 {
+ struct iwl_wowlan_gtk_status_v3 gtk[WOWLAN_GTK_KEYS_NUM];
+ struct iwl_wowlan_igtk_status_v1 igtk[WOWLAN_IGTK_KEYS_NUM];
+ struct iwl_wowlan_igtk_status_v1 bigtk[WOWLAN_BIGTK_KEYS_NUM];
+ __le64 replay_ctr;
+ __le16 pattern_number;
+ __le16 qos_seq_ctr;
+ __le32 wakeup_reasons;
+ __le32 num_of_gtk_rekeys;
+ __le32 transmitted_ndps;
+ __le32 received_beacons;
+ u8 tid_tear_down;
+ u8 station_id;
+ u8 num_mlo_link_keys;
+ u8 tid_offloaded_tx;
+ struct iwl_wowlan_mlo_gtk mlo_gtks[];
+} __packed; /* WOWLAN_INFO_NTFY_API_S_VER_5 */
+
+/**
* struct iwl_wowlan_info_notif - WoWLAN information notification
* @gtk: GTK data
* @igtk: IGTK data
@@ -854,7 +953,7 @@ struct iwl_wowlan_info_notif_v3 {
* @mlo_gtks: array of GTKs of size num_mlo_link_keys
*/
struct iwl_wowlan_info_notif {
- struct iwl_wowlan_gtk_status_v3 gtk[WOWLAN_GTK_KEYS_NUM];
+ struct iwl_wowlan_gtk_status gtk[WOWLAN_GTK_KEYS_NUM];
struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM];
struct iwl_wowlan_igtk_status bigtk[WOWLAN_BIGTK_KEYS_NUM];
__le64 replay_ctr;
@@ -869,7 +968,7 @@ struct iwl_wowlan_info_notif {
u8 num_mlo_link_keys;
u8 tid_offloaded_tx;
struct iwl_wowlan_mlo_gtk mlo_gtks[];
-} __packed; /* WOWLAN_INFO_NTFY_API_S_VER_5 */
+} __packed; /* WOWLAN_INFO_NTFY_API_S_VER_6 */
/**
* struct iwl_wowlan_wake_pkt_notif - WoWLAN wake packet notification
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
index b9f559dac39f..f76cea6e9ec8 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
@@ -420,6 +420,8 @@ struct iwl_mac_config_cmd {
* eht_support set to true. No longer used since _VER_3 of this command.
* @LINK_CONTEXT_MODIFY_BANDWIDTH: Covers iwl_link_ctx_cfg_cmd::modify_bandwidth.
* Request RX OMI to the AP to modify bandwidth of this link.
+ * @LINK_CONTEXT_MODIFY_UHR_PARAMS: covers iwl_link_ctx_cfg_cmd::npca_params and
+ * iwl_link_ctx_cfg_cmd::prio_edca_params. Since _VER_7.
* @LINK_CONTEXT_MODIFY_ALL: set all above flags
*/
enum iwl_link_ctx_modify_flags {
@@ -432,6 +434,7 @@ enum iwl_link_ctx_modify_flags {
LINK_CONTEXT_MODIFY_BSS_COLOR_DISABLE = BIT(6),
LINK_CONTEXT_MODIFY_EHT_PARAMS = BIT(7),
LINK_CONTEXT_MODIFY_BANDWIDTH = BIT(8),
+ LINK_CONTEXT_MODIFY_UHR_PARAMS = BIT(9),
LINK_CONTEXT_MODIFY_ALL = 0xff,
}; /* LINK_CONTEXT_MODIFY_MASK_E_VER_1 */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h b/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h
index 2a1c2b0f19e4..bb801650a565 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h
@@ -20,7 +20,7 @@ enum iwl_prot_offload_subcmd_ids {
/**
* @WOWLAN_INFO_NOTIFICATION: Notification in
* &struct iwl_wowlan_info_notif_v1, iwl_wowlan_info_notif_v3,
- * or &struct iwl_wowlan_info_notif
+ * &struct iwl_wowlan_info_notif_v5 or &struct iwl_wowlan_info_notif
*/
WOWLAN_INFO_NOTIFICATION = 0xFD,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
index 786b3bf4b448..5eb8d10678fd 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
@@ -571,17 +571,16 @@ enum iwl_ppag_flags {
/**
* union iwl_ppag_table_cmd - union for all versions of PPAG command
* @v1: command version 1 structure.
- * @v2: command version from 2 to 6 are same structure as v2.
- * but has a different format of the flags bitmap
- * @v3: command version 7 structure.
+ * @v5: command version 5 structure.
+ * @v7: command version 7 structure.
* @v1.flags: values from &enum iwl_ppag_flags
* @v1.gain: table of antenna gain values per chain and sub-band
* @v1.reserved: reserved
- * @v2.flags: values from &enum iwl_ppag_flags
- * @v2.gain: table of antenna gain values per chain and sub-band
- * @v3.ppag_config_info: see @struct bios_value_u32
- * @v3.gain: table of antenna gain values per chain and sub-band
- * @v3.reserved: reserved
+ * @v5.flags: values from &enum iwl_ppag_flags
+ * @v5.gain: table of antenna gain values per chain and sub-band
+ * @v7.ppag_config_info: see @struct bios_value_u32
+ * @v7.gain: table of antenna gain values per chain and sub-band
+ * @v7.reserved: reserved
*/
union iwl_ppag_table_cmd {
struct {
@@ -593,30 +592,19 @@ union iwl_ppag_table_cmd {
__le32 flags;
s8 gain[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V2];
s8 reserved[2];
- } __packed v2; /* PER_PLAT_ANTENNA_GAIN_CMD_API_S_VER_2, VER3, VER4,
- * VER5, VER6
- */
+ } __packed v5; /* PER_PLAT_ANTENNA_GAIN_CMD_API_S_VER_5 */
struct {
struct bios_value_u32 ppag_config_info;
s8 gain[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V2];
s8 reserved[2];
- } __packed v3; /* PER_PLAT_ANTENNA_GAIN_CMD_API_S_VER_7 */
+ } __packed v7; /* PER_PLAT_ANTENNA_GAIN_CMD_API_S_VER_7 */
} __packed;
-#define IWL_PPAG_CMD_V4_MASK (IWL_PPAG_ETSI_MASK | IWL_PPAG_CHINA_MASK)
-#define IWL_PPAG_CMD_V5_MASK (IWL_PPAG_CMD_V4_MASK | \
+#define IWL_PPAG_CMD_V1_MASK (IWL_PPAG_ETSI_MASK | IWL_PPAG_CHINA_MASK)
+#define IWL_PPAG_CMD_V5_MASK (IWL_PPAG_CMD_V1_MASK | \
IWL_PPAG_ETSI_LPI_UHB_MASK | \
IWL_PPAG_USA_LPI_UHB_MASK)
-#define IWL_PPAG_CMD_V6_MASK (IWL_PPAG_CMD_V5_MASK | \
- IWL_PPAG_ETSI_VLP_UHB_MASK | \
- IWL_PPAG_ETSI_SP_UHB_MASK | \
- IWL_PPAG_USA_VLP_UHB_MASK | \
- IWL_PPAG_USA_SP_UHB_MASK | \
- IWL_PPAG_CANADA_LPI_UHB_MASK | \
- IWL_PPAG_CANADA_VLP_UHB_MASK | \
- IWL_PPAG_CANADA_SP_UHB_MASK)
-
#define MCC_TO_SAR_OFFSET_TABLE_ROW_SIZE 26
#define MCC_TO_SAR_OFFSET_TABLE_COL_SIZE 13
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
index 3222cbcbe1ab..9c464e7aba10 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
@@ -24,6 +24,8 @@
* for BPSK (MCS 0) with 2 spatial
* streams
* @IWL_TLC_MNG_CFG_FLAGS_EHT_EXTRA_LTF_MSK: enable support for EHT extra LTF
+ * @IWL_TLC_MNG_CFG_FLAGS_UHR_ELR_1_5_MBPS_MSK: support ELR 1.5 Mbps
+ * @IWL_TLC_MNG_CFG_FLAGS_UHR_ELR_3_MBPS_MSK: support ELR 3 Mbps
*/
enum iwl_tlc_mng_cfg_flags {
IWL_TLC_MNG_CFG_FLAGS_STBC_MSK = BIT(0),
@@ -32,6 +34,8 @@ enum iwl_tlc_mng_cfg_flags {
IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_1_MSK = BIT(3),
IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_2_MSK = BIT(4),
IWL_TLC_MNG_CFG_FLAGS_EHT_EXTRA_LTF_MSK = BIT(6),
+ IWL_TLC_MNG_CFG_FLAGS_UHR_ELR_1_5_MBPS_MSK = BIT(7),
+ IWL_TLC_MNG_CFG_FLAGS_UHR_ELR_3_MBPS_MSK = BIT(8),
};
/**
@@ -201,6 +205,37 @@ struct iwl_tlc_config_cmd_v4 {
} __packed; /* TLC_MNG_CONFIG_CMD_API_S_VER_4 */
/**
+ * struct iwl_tlc_config_cmd - TLC configuration
+ * @sta_id: station id
+ * @reserved1: reserved
+ * @max_ch_width: max supported channel width from &enum iwl_tlc_mng_cfg_cw
+ * @mode: &enum iwl_tlc_mng_cfg_mode
+ * @chains: bitmask of &enum iwl_tlc_mng_cfg_chains
+ * @sgi_ch_width_supp: bitmap of SGI support per channel width
+ * use BIT(&enum iwl_tlc_mng_cfg_cw)
+ * @flags: bitmask of &enum iwl_tlc_mng_cfg_flags
+ * @non_ht_rates: bitmap of supported legacy rates
+ * @ht_rates: bitmap of &enum iwl_tlc_mng_ht_rates, per <nss, channel-width>
+ * pair (0 - 80mhz width and below, 1 - 160mhz, 2 - 320mhz).
+ * @max_mpdu_len: max MPDU length, in bytes
+ * @max_tx_op: max TXOP in uSecs for all AC (BK, BE, VO, VI),
+ * set zero for no limit.
+ */
+struct iwl_tlc_config_cmd {
+ u8 sta_id;
+ u8 reserved1[3];
+ u8 max_ch_width;
+ u8 mode;
+ u8 chains;
+ u8 sgi_ch_width_supp;
+ __le16 flags;
+ __le16 non_ht_rates;
+ __le32 ht_rates[IWL_TLC_NSS_MAX][IWL_TLC_MCS_PER_BW_NUM_V4];
+ __le16 max_mpdu_len;
+ __le16 max_tx_op;
+} __packed; /* TLC_MNG_CONFIG_CMD_API_S_VER_5 */
+
+/**
* enum iwl_tlc_update_flags - updated fields
* @IWL_TLC_NOTIF_FLAG_RATE: last initial rate update
* @IWL_TLC_NOTIF_FLAG_AMSDU: umsdu parameters update
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index 2879be4b8fcb..2ce55859641c 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -830,7 +830,7 @@ iwl_fw_error_dump_file(struct iwl_fw_runtime *fwrt,
}
/* reading RXF/TXF sizes */
- if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) {
+ if (iwl_trans_is_fw_error(fwrt->trans)) {
fifo_len = iwl_fw_rxf_len(fwrt, mem_cfg);
fifo_len += iwl_fw_txf_len(fwrt, mem_cfg);
@@ -2393,7 +2393,7 @@ static u32 iwl_dump_ini_info(struct iwl_fw_runtime *fwrt,
struct iwl_fw_ini_dump_cfg_name *cfg_name;
u32 size = sizeof(*tlv) + sizeof(*dump);
u32 num_of_cfg_names = 0;
- u32 hw_type, is_cdb, is_jacket;
+ u32 hw_type, is_cdb;
list_for_each_entry(node, &fwrt->trans->dbg.debug_info_tlv_list, list) {
size += sizeof(*cfg_name);
@@ -2426,11 +2426,7 @@ static u32 iwl_dump_ini_info(struct iwl_fw_runtime *fwrt,
hw_type = CSR_HW_REV_TYPE(fwrt->trans->info.hw_rev);
is_cdb = CSR_HW_RFID_IS_CDB(fwrt->trans->info.hw_rf_id);
- is_jacket = !!(iwl_read_umac_prph(fwrt->trans, WFPM_OTP_CFG1_ADDR) &
- WFPM_OTP_CFG1_IS_JACKET_BIT);
-
- /* Use bits 12 and 13 to indicate jacket/CDB, respectively */
- hw_type |= (is_jacket | (is_cdb << 1)) << IWL_JACKET_CDB_SHIFT;
+ hw_type |= IWL_CDB_MASK(is_cdb);
dump->hw_type = cpu_to_le32(hw_type);
@@ -2478,36 +2474,6 @@ static u32 iwl_dump_ini_info(struct iwl_fw_runtime *fwrt,
return entry->size;
}
-static u32 iwl_dump_ini_file_name_info(struct iwl_fw_runtime *fwrt,
- struct list_head *list)
-{
- struct iwl_fw_ini_dump_entry *entry;
- struct iwl_dump_file_name_info *tlv;
- u32 len = strnlen(fwrt->trans->dbg.dump_file_name_ext,
- IWL_FW_INI_MAX_NAME);
-
- if (!fwrt->trans->dbg.dump_file_name_ext_valid)
- return 0;
-
- entry = vzalloc(sizeof(*entry) + sizeof(*tlv) + len);
- if (!entry)
- return 0;
-
- entry->size = sizeof(*tlv) + len;
-
- tlv = (void *)entry->data;
- tlv->type = cpu_to_le32(IWL_INI_DUMP_NAME_TYPE);
- tlv->len = cpu_to_le32(len);
- memcpy(tlv->data, fwrt->trans->dbg.dump_file_name_ext, len);
-
- /* add the dump file name extension tlv to the list */
- list_add_tail(&entry->list, list);
-
- fwrt->trans->dbg.dump_file_name_ext_valid = false;
-
- return entry->size;
-}
-
static const struct iwl_dump_ini_mem_ops iwl_dump_ini_region_ops[] = {
[IWL_FW_INI_REGION_INVALID] = {},
[IWL_FW_INI_REGION_INTERNAL_BUFFER] = {
@@ -2764,7 +2730,6 @@ static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt,
&iwl_dump_ini_region_ops[IWL_FW_INI_REGION_DRAM_IMR]);
if (size) {
- size += iwl_dump_ini_file_name_info(fwrt, list);
size += iwl_dump_ini_info(fwrt, trigger, list);
}
@@ -3151,7 +3116,7 @@ static void iwl_send_dbg_dump_complete_cmd(struct iwl_fw_runtime *fwrt,
.len[0] = sizeof(hcmd_data),
};
- if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status))
+ if (iwl_trans_is_fw_error(fwrt->trans))
return;
if (fw_has_capa(&fwrt->fw->ucode_capa,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dump.c b/drivers/net/wireless/intel/iwlwifi/fw/dump.c
index f633124979ab..ddd714cff2f4 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dump.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dump.c
@@ -14,13 +14,6 @@
#include "iwl-csr.h"
#include "pnvm.h"
-#define FW_ASSERT_LMAC_FATAL 0x70
-#define FW_ASSERT_LMAC2_FATAL 0x72
-#define FW_ASSERT_UMAC_FATAL 0x71
-#define UMAC_RT_NMI_LMAC2_FATAL 0x72
-#define RT_NMI_INTERRUPT_OTHER_LMAC_FATAL 0x73
-#define FW_ASSERT_NMI_UNKNOWN 0x84
-
/*
* Note: This structure is read from the device with IO accesses,
* and the reading already does the endian conversion. As it is
@@ -103,17 +96,6 @@ struct iwl_umac_error_event_table {
#define ERROR_START_OFFSET (1 * sizeof(u32))
#define ERROR_ELEM_SIZE (7 * sizeof(u32))
-static bool iwl_fwrt_if_errorid_other_cpu(u32 err_id)
-{
- err_id &= 0xFF;
-
- if ((err_id >= FW_ASSERT_LMAC_FATAL &&
- err_id <= RT_NMI_INTERRUPT_OTHER_LMAC_FATAL) ||
- err_id == FW_ASSERT_NMI_UNKNOWN)
- return true;
- return false;
-}
-
static void iwl_fwrt_dump_umac_error_log(struct iwl_fw_runtime *fwrt)
{
struct iwl_trans *trans = fwrt->trans;
@@ -131,13 +113,6 @@ static void iwl_fwrt_dump_umac_error_log(struct iwl_fw_runtime *fwrt)
if (table.valid)
fwrt->dump.umac_err_id = table.error_id;
- if (!iwl_fwrt_if_errorid_other_cpu(fwrt->dump.umac_err_id) &&
- !fwrt->trans->dbg.dump_file_name_ext_valid) {
- fwrt->trans->dbg.dump_file_name_ext_valid = true;
- snprintf(fwrt->trans->dbg.dump_file_name_ext, IWL_FW_INI_MAX_NAME,
- "0x%x", fwrt->dump.umac_err_id);
- }
-
if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
IWL_ERR(trans, "Start IWL Error Log Dump:\n");
IWL_ERR(trans, "Transport status: 0x%08lX, valid: %d\n",
@@ -203,7 +178,7 @@ static void iwl_fwrt_dump_lmac_error_log(struct iwl_fw_runtime *fwrt, u8 lmac_nu
if (err)
return;
- err = iwl_finish_nic_init(trans);
+ err = iwl_trans_activate_nic(trans);
if (err)
return;
}
@@ -213,13 +188,6 @@ static void iwl_fwrt_dump_lmac_error_log(struct iwl_fw_runtime *fwrt, u8 lmac_nu
if (table.valid)
fwrt->dump.lmac_err_id[lmac_num] = table.error_id;
- if (!iwl_fwrt_if_errorid_other_cpu(fwrt->dump.lmac_err_id[lmac_num]) &&
- !fwrt->trans->dbg.dump_file_name_ext_valid) {
- fwrt->trans->dbg.dump_file_name_ext_valid = true;
- snprintf(fwrt->trans->dbg.dump_file_name_ext, IWL_FW_INI_MAX_NAME,
- "0x%x", fwrt->dump.lmac_err_id[lmac_num]);
- }
-
if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
IWL_ERR(trans, "Start IWL Error Log Dump:\n");
IWL_ERR(trans, "Transport status: 0x%08lX, valid: %d\n",
@@ -305,16 +273,6 @@ static void iwl_fwrt_dump_tcm_error_log(struct iwl_fw_runtime *fwrt, int idx)
iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
- if (table.valid)
- fwrt->dump.tcm_err_id[idx] = table.error_id;
-
- if (!iwl_fwrt_if_errorid_other_cpu(fwrt->dump.tcm_err_id[idx]) &&
- !fwrt->trans->dbg.dump_file_name_ext_valid) {
- fwrt->trans->dbg.dump_file_name_ext_valid = true;
- snprintf(fwrt->trans->dbg.dump_file_name_ext, IWL_FW_INI_MAX_NAME,
- "0x%x", fwrt->dump.tcm_err_id[idx]);
- }
-
IWL_ERR(fwrt, "TCM%d status:\n", idx + 1);
IWL_ERR(fwrt, "0x%08X | error ID\n", table.error_id);
IWL_ERR(fwrt, "0x%08X | tcm branchlink2\n", table.blink2);
@@ -378,16 +336,6 @@ static void iwl_fwrt_dump_rcm_error_log(struct iwl_fw_runtime *fwrt, int idx)
iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
- if (table.valid)
- fwrt->dump.rcm_err_id[idx] = table.error_id;
-
- if (!iwl_fwrt_if_errorid_other_cpu(fwrt->dump.rcm_err_id[idx]) &&
- !fwrt->trans->dbg.dump_file_name_ext_valid) {
- fwrt->trans->dbg.dump_file_name_ext_valid = true;
- snprintf(fwrt->trans->dbg.dump_file_name_ext, IWL_FW_INI_MAX_NAME,
- "0x%x", fwrt->dump.rcm_err_id[idx]);
- }
-
IWL_ERR(fwrt, "RCM%d status:\n", idx + 1);
IWL_ERR(fwrt, "0x%08X | error ID\n", table.error_id);
IWL_ERR(fwrt, "0x%08X | rcm branchlink2\n", table.blink2);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
index cf41021d59ad..c2a73cc85eff 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
@@ -372,7 +372,8 @@ struct iwl_fw_ini_dump_cfg_name {
u8 cfg_name[IWL_FW_INI_MAX_CFG_NAME];
} __packed;
-#define IWL_JACKET_CDB_SHIFT 12
+#define IWL_CDB_MASK(val) val << 13
+
/* struct iwl_fw_ini_dump_info - ini dump information
* @version: dump version
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
index 4d91ae065c8d..f297e82d63d2 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
@@ -237,11 +237,12 @@ static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data,
return -ENOENT;
}
-static int iwl_pnvm_get_from_fs(struct iwl_trans *trans, u8 **data, size_t *len)
+static u8 *iwl_pnvm_get_from_fs(struct iwl_trans *trans, size_t *len)
{
const struct firmware *pnvm;
char pnvm_name[MAX_PNVM_NAME];
size_t new_len;
+ u8 *data;
int ret;
iwl_pnvm_get_fs_name(trans, pnvm_name, sizeof(pnvm_name));
@@ -250,29 +251,73 @@ static int iwl_pnvm_get_from_fs(struct iwl_trans *trans, u8 **data, size_t *len)
if (ret) {
IWL_DEBUG_FW(trans, "PNVM file %s not found %d\n",
pnvm_name, ret);
- return ret;
+ return NULL;
}
new_len = pnvm->size;
- *data = kvmemdup(pnvm->data, pnvm->size, GFP_KERNEL);
+ data = kvmemdup(pnvm->data, pnvm->size, GFP_KERNEL);
release_firmware(pnvm);
- if (!*data)
- return -ENOMEM;
+ if (!data)
+ return NULL;
*len = new_len;
- return 0;
+ return data;
+}
+
+/**
+ * enum iwl_pnvm_source - different PNVM possible sources
+ *
+ * @IWL_PNVM_SOURCE_NONE: No PNVM.
+ * @IWL_PNVM_SOURCE_BIOS: PNVM should be read from BIOS.
+ * @IWL_PNVM_SOURCE_EXTERNAL: read .pnvm external file
+ * @IWL_PNVM_SOURCE_EMBEDDED: PNVM is embedded in the .ucode file.
+ */
+enum iwl_pnvm_source {
+ IWL_PNVM_SOURCE_NONE,
+ IWL_PNVM_SOURCE_BIOS,
+ IWL_PNVM_SOURCE_EXTERNAL,
+ IWL_PNVM_SOURCE_EMBEDDED
+};
+
+static enum iwl_pnvm_source iwl_select_pnvm_source(struct iwl_trans *trans,
+ bool intel_sku)
+{
+
+ /* Get PNVM from BIOS for non-Intel SKU */
+ if (!intel_sku)
+ return IWL_PNVM_SOURCE_BIOS;
+
+ /* Before those devices, PNVM didn't exist at all */
+ if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
+ return IWL_PNVM_SOURCE_NONE;
+
+ /* After those devices, we moved to embedded PNVM */
+ if (trans->mac_cfg->device_family > IWL_DEVICE_FAMILY_AX210)
+ return IWL_PNVM_SOURCE_EMBEDDED;
+
+ /* For IWL_DEVICE_FAMILY_AX210, depends on the CRF */
+ if (CSR_HW_RFID_TYPE(trans->info.hw_rf_id) == IWL_CFG_RF_TYPE_GF)
+ return IWL_PNVM_SOURCE_EXTERNAL;
+
+ return IWL_PNVM_SOURCE_NONE;
}
static const u8 *iwl_get_pnvm_image(struct iwl_trans *trans_p, size_t *len,
__le32 sku_id[3], const struct iwl_fw *fw)
{
struct pnvm_sku_package *package;
+ enum iwl_pnvm_source pnvm_src =
+ iwl_select_pnvm_source(trans_p, sku_id[2] == 0);
u8 *image = NULL;
- /* Get PNVM from BIOS for non-Intel SKU */
- if (sku_id[2]) {
+ IWL_DEBUG_FW(trans_p, "PNVM source %d\n", pnvm_src);
+
+ if (pnvm_src == IWL_PNVM_SOURCE_NONE)
+ return NULL;
+
+ if (pnvm_src == IWL_PNVM_SOURCE_BIOS) {
package = iwl_uefi_get_pnvm(trans_p, len);
if (!IS_ERR_OR_NULL(package)) {
if (*len >= sizeof(*package)) {
@@ -289,18 +334,26 @@ static const u8 *iwl_get_pnvm_image(struct iwl_trans *trans_p, size_t *len,
if (image)
return image;
}
+
+ /* PNVM doesn't exist in BIOS. Find the fallback source */
+ pnvm_src = iwl_select_pnvm_source(trans_p, true);
+ IWL_DEBUG_FW(trans_p, "PNVM in BIOS doesn't exist, try %d\n",
+ pnvm_src);
}
- if (fw->pnvm_data) {
- *len = fw->pnvm_size;
+ if (pnvm_src == IWL_PNVM_SOURCE_EXTERNAL) {
+ image = iwl_pnvm_get_from_fs(trans_p, len);
+ if (image)
+ return image;
+ }
+ if (pnvm_src == IWL_PNVM_SOURCE_EMBEDDED && fw->pnvm_data) {
+ *len = fw->pnvm_size;
return fw->pnvm_data;
}
- /* If it's not available, or for Intel SKU, try from the filesystem */
- if (iwl_pnvm_get_from_fs(trans_p, &image, len))
- return NULL;
- return image;
+ IWL_ERR(trans_p, "Couldn't get PNVM from required source: %d\n", pnvm_src);
+ return NULL;
}
static void
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c
index 3d6d1a85bb51..e1f28b053253 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c
@@ -59,11 +59,16 @@ static const struct dmi_system_id dmi_ppag_approved_list[] = {
DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
},
},
- { .ident = "ASUS",
+ { .ident = "ASUSTEK",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
},
},
+ { .ident = "ASUS",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUS"),
+ },
+ },
{ .ident = "GOOGLE-HP",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Google"),
@@ -141,11 +146,16 @@ static const struct dmi_system_id dmi_tas_approved_list[] = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
},
},
- { .ident = "ASUS",
+ { .ident = "ASUSTEK",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
},
},
+ { .ident = "ASUS",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUS"),
+ },
+ },
{ .ident = "GOOGLE-HP",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Google"),
@@ -305,6 +315,7 @@ static bool iwl_ppag_value_valid(struct iwl_fw_runtime *fwrt, int chain,
return true;
}
+/* Utility function for iwlmvm and iwlxvt */
int iwl_fill_ppag_table(struct iwl_fw_runtime *fwrt,
union iwl_ppag_table_cmd *cmd, int *cmd_size)
{
@@ -344,18 +355,18 @@ int iwl_fill_ppag_table(struct iwl_fw_runtime *fwrt,
num_sub_bands = IWL_NUM_SUB_BANDS_V1;
gain = cmd->v1.gain[0];
*cmd_size = sizeof(cmd->v1);
- cmd->v1.flags = cpu_to_le32(fwrt->ppag_flags);
+ cmd->v1.flags = cpu_to_le32(fwrt->ppag_flags & IWL_PPAG_CMD_V1_MASK);
if (fwrt->ppag_bios_rev >= 1) {
/* in this case FW supports revision 0 */
IWL_DEBUG_RADIO(fwrt,
"PPAG table rev is %d, send truncated table\n",
fwrt->ppag_bios_rev);
}
- } else if (cmd_ver >= 2 && cmd_ver <= 6) {
+ } else if (cmd_ver == 5) {
num_sub_bands = IWL_NUM_SUB_BANDS_V2;
- gain = cmd->v2.gain[0];
- *cmd_size = sizeof(cmd->v2);
- cmd->v2.flags = cpu_to_le32(fwrt->ppag_flags);
+ gain = cmd->v5.gain[0];
+ *cmd_size = sizeof(cmd->v5);
+ cmd->v5.flags = cpu_to_le32(fwrt->ppag_flags & IWL_PPAG_CMD_V5_MASK);
if (fwrt->ppag_bios_rev == 0) {
/* in this case FW supports revisions 1,2 or 3 */
IWL_DEBUG_RADIO(fwrt,
@@ -363,11 +374,11 @@ int iwl_fill_ppag_table(struct iwl_fw_runtime *fwrt,
}
} else if (cmd_ver == 7) {
num_sub_bands = IWL_NUM_SUB_BANDS_V2;
- gain = cmd->v3.gain[0];
- *cmd_size = sizeof(cmd->v3);
- cmd->v3.ppag_config_info.table_source = fwrt->ppag_bios_source;
- cmd->v3.ppag_config_info.table_revision = fwrt->ppag_bios_rev;
- cmd->v3.ppag_config_info.value = cpu_to_le32(fwrt->ppag_flags);
+ gain = cmd->v7.gain[0];
+ *cmd_size = sizeof(cmd->v7);
+ cmd->v7.ppag_config_info.table_source = fwrt->ppag_bios_source;
+ cmd->v7.ppag_config_info.table_revision = fwrt->ppag_bios_rev;
+ cmd->v7.ppag_config_info.value = cpu_to_le32(fwrt->ppag_flags);
} else {
IWL_DEBUG_RADIO(fwrt, "Unsupported PPAG command version\n");
return -EINVAL;
@@ -378,30 +389,22 @@ int iwl_fill_ppag_table(struct iwl_fw_runtime *fwrt,
"PPAG MODE bits were read from bios: %d\n",
fwrt->ppag_flags);
- if (cmd_ver == 6)
- cmd->v1.flags &= cpu_to_le32(IWL_PPAG_CMD_V6_MASK);
- else if (cmd_ver == 5)
- cmd->v1.flags &= cpu_to_le32(IWL_PPAG_CMD_V5_MASK);
- else if (cmd_ver < 5)
- cmd->v1.flags &= cpu_to_le32(IWL_PPAG_CMD_V4_MASK);
-
- if ((cmd_ver == 1 &&
- !fw_has_capa(&fwrt->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_PPAG_CHINA_BIOS_SUPPORT)) ||
- (cmd_ver == 2 && fwrt->ppag_bios_rev >= 2)) {
+ if (cmd_ver == 1 &&
+ !fw_has_capa(&fwrt->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_PPAG_CHINA_BIOS_SUPPORT)) {
cmd->v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK);
IWL_DEBUG_RADIO(fwrt, "masking ppag China bit\n");
} else {
IWL_DEBUG_RADIO(fwrt, "isn't masking ppag China bit\n");
}
- /* The 'flags' field is the same in v1 and v2 so we can just
+ /* The 'flags' field is the same in v1 and v5 so we can just
* use v1 to access it.
*/
IWL_DEBUG_RADIO(fwrt,
"PPAG MODE bits going to be sent: %d\n",
(cmd_ver < 7) ? le32_to_cpu(cmd->v1.flags) :
- le32_to_cpu(cmd->v3.ppag_config_info.value));
+ le32_to_cpu(cmd->v7.ppag_config_info.value));
for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
for (j = 0; j < num_sub_bands; j++) {
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h
index a07c512b6ed4..735482e7adf5 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h
@@ -12,7 +12,6 @@
#include "fw/api/phy.h"
#include "fw/api/config.h"
#include "fw/api/nvm-reg.h"
-#include "fw/img.h"
#include "iwl-trans.h"
#define BIOS_SAR_MAX_PROFILE_NUM 4
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
index 0444a736c2b2..806f9bcdf4f5 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
@@ -113,6 +113,10 @@ struct iwl_txf_iter_data {
* @phy_filters: specific phy filters as read from WPFC BIOS table
* @ppag_bios_rev: PPAG BIOS revision
* @ppag_bios_source: see &enum bios_source
+ * @acpi_dsm_funcs_valid: bitmap indicating which DSM values are valid,
+ * zero (default initialization) means it hasn't been read yet,
+ * and BIT(0) is set when it has since function 0 also has this
+ * bitmap and is always supported
*/
struct iwl_fw_runtime {
struct iwl_trans *trans;
@@ -146,8 +150,6 @@ struct iwl_fw_runtime {
unsigned long non_collect_ts_start[IWL_FW_INI_TIME_POINT_NUM];
u32 *d3_debug_data;
u32 lmac_err_id[MAX_NUM_LMAC];
- u32 tcm_err_id[MAX_NUM_TCM];
- u32 rcm_err_id[MAX_NUM_RCM];
u32 umac_err_id;
struct iwl_txf_iter_data txf_iter_data;
@@ -189,6 +191,10 @@ struct iwl_fw_runtime {
bool uats_valid;
u8 uefi_tables_lock_status;
struct iwl_phy_specific_cfg phy_filters;
+
+#ifdef CONFIG_ACPI
+ u32 acpi_dsm_funcs_valid;
+#endif
};
void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
index 48126ec6b94b..4ae4d215e633 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
@@ -727,6 +727,8 @@ int iwl_uefi_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func,
struct uefi_cnv_var_general_cfg *data;
int ret = -EINVAL;
+ BUILD_BUG_ON(ARRAY_SIZE(data->functions) < DSM_FUNC_NUM_FUNCS);
+
/* Not supported function index */
if (func >= DSM_FUNC_NUM_FUNCS || func == 5)
return -EOPNOTSUPP;
@@ -742,8 +744,9 @@ int iwl_uefi_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func,
goto out;
}
- if (ARRAY_SIZE(data->functions) != UEFI_MAX_DSM_FUNCS) {
- IWL_DEBUG_RADIO(fwrt, "Invalid size of DSM functions array\n");
+ if (!(data->functions[DSM_FUNC_QUERY] & BIT(func))) {
+ IWL_DEBUG_RADIO(fwrt, "DSM func %d not in 0x%x\n",
+ func, data->functions[DSM_FUNC_QUERY]);
goto out;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index 30e5f5a5cd89..a607e7ab914b 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -11,6 +11,7 @@
#include <linux/netdevice.h>
#include <linux/ieee80211.h>
#include <linux/nl80211.h>
+#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include "iwl-csr.h"
#include "iwl-drv.h"
@@ -107,6 +108,9 @@ enum iwl_nvm_type {
MODULE_FIRMWARE(pfx "-" __stringify(api) ".ucode"); \
MODULE_FIRMWARE(pfx ".pnvm")
+#define IWL_CORE_FW(pfx, core) \
+ MODULE_FIRMWARE(pfx "-c" __stringify(core) ".ucode")
+
static inline u8 num_of_ant(u8 mask)
{
return !!((mask) & ANT_A) +
@@ -192,8 +196,8 @@ struct iwl_family_base_params {
u8 max_ll_items;
u8 led_compensation;
- u8 ucode_api_max;
- u8 ucode_api_min;
+ u16 ucode_api_max;
+ u16 ucode_api_min;
u32 mac_addr_from_csr:10;
u8 nvm_hw_section_num;
netdev_features_t features;
@@ -211,6 +215,34 @@ struct iwl_family_base_params {
};
/*
+ * FW is released as "core N release", and we used to have a
+ * gap of 3 between the API version and core number. Now the
+ * reported API version will be 1000 + core and we encode it
+ * in the filename as "c<core>".
+ */
+#define API_IS_CORE_START 1000
+#define API_TO_CORE_OFFS 3
+#define ENCODE_CORE_AS_API(core) (API_IS_CORE_START + (core))
+
+static inline bool iwl_api_is_core_number(int api)
+{
+ return api >= API_IS_CORE_START;
+}
+
+static inline int iwl_api_to_core(int api)
+{
+ if (iwl_api_is_core_number(api))
+ return api - API_IS_CORE_START;
+
+ return api - API_TO_CORE_OFFS;
+}
+
+#define FW_API_FMT "%s%d"
+#define FW_API_ARG(n) \
+ iwl_api_is_core_number(n) ? "c" : "", \
+ iwl_api_is_core_number(n) ? (n) - API_IS_CORE_START : (n)
+
+/*
* @stbc: support Tx STBC and 1*SS Rx STBC
* @ldpc: support Tx/Rx with LDPC
* @use_rts_for_aggregation: use rts/cts protection for HT traffic
@@ -422,8 +454,8 @@ struct iwl_rf_cfg {
u8 valid_tx_ant;
u8 valid_rx_ant;
u8 non_shared_ant;
- u8 ucode_api_max;
- u8 ucode_api_min;
+ u16 ucode_api_max;
+ u16 ucode_api_min;
u16 num_rbds;
};
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index 28aad975434b..607fcea6f4ef 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -337,10 +337,18 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
return -EINVAL;
}
+ if (CSR_HW_RFID_TYPE(drv->trans->info.hw_rf_id) == IWL_CFG_RF_TYPE_WH &&
+ CSR_HW_RFID_STEP(drv->trans->info.hw_rf_id) == SILICON_A_STEP) {
+ IWL_ERR(drv, "WH A step is not supported\n");
+ return -EINVAL;
+ }
+
fw_name_pre = iwl_drv_get_fwname_pre(drv->trans, _fw_name_pre);
if (first)
drv->fw_index = ucode_api_max;
+ else if (drv->fw_index == ENCODE_CORE_AS_API(99))
+ drv->fw_index = 101; /* last API-scheme number below core 99 */
else
drv->fw_index--;
@@ -348,13 +356,15 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
IWL_ERR(drv, "no suitable firmware found!\n");
if (ucode_api_min == ucode_api_max) {
- IWL_ERR(drv, "%s-%d is required\n", fw_name_pre,
- ucode_api_max);
+ IWL_ERR(drv, "%s-" FW_API_FMT " is required\n",
+ fw_name_pre, FW_API_ARG(ucode_api_max));
} else {
- IWL_ERR(drv, "minimum version required: %s-%d\n",
- fw_name_pre, ucode_api_min);
- IWL_ERR(drv, "maximum version supported: %s-%d\n",
- fw_name_pre, ucode_api_max);
+ IWL_ERR(drv,
+ "minimum version required: %s-" FW_API_FMT "\n",
+ fw_name_pre, FW_API_ARG(ucode_api_min));
+ IWL_ERR(drv,
+ "maximum version supported: %s-" FW_API_FMT "\n",
+ fw_name_pre, FW_API_ARG(ucode_api_max));
}
IWL_ERR(drv,
@@ -362,8 +372,9 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
return -ENOENT;
}
- snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s-%d.ucode",
- fw_name_pre, drv->fw_index);
+ snprintf(drv->firmware_name, sizeof(drv->firmware_name),
+ "%s-" FW_API_FMT ".ucode",
+ fw_name_pre, FW_API_ARG(drv->fw_index));
IWL_DEBUG_FW_INFO(drv, "attempting to load firmware '%s'\n",
drv->firmware_name);
@@ -1588,6 +1599,7 @@ static void _iwl_op_mode_stop(struct iwl_drv *drv)
*/
static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
{
+ unsigned int min_core, max_core, loaded_core;
struct iwl_drv *drv = context;
struct iwl_fw *fw = &drv->fw;
const struct iwl_ucode_header *ucode;
@@ -1650,11 +1662,24 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
* firmware filename ... but we don't check for that and only rely
* on the API version read from firmware header from here on forward
*/
- if (api_ver < api_min || api_ver > api_max) {
+
+ /*
+ * if -cN.ucode file was loaded, core version == file version,
+ * otherwise core version == file version (API version) - 3
+ */
+ if (iwl_api_is_core_number(drv->fw_index))
+ loaded_core = api_ver;
+ else
+ loaded_core = api_ver - API_TO_CORE_OFFS;
+
+ min_core = iwl_api_to_core(api_min);
+ max_core = iwl_api_to_core(api_max);
+
+ if (loaded_core < min_core || loaded_core > max_core) {
IWL_ERR(drv,
"Driver unable to support your firmware API. "
- "Driver supports v%u, firmware is v%u.\n",
- api_max, api_ver);
+ "Driver supports FW core %u..%u, firmware is %u.\n",
+ min_core, max_core, loaded_core);
goto try_again;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.c b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
index 5e483a55a4ba..b1944584c693 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
@@ -3,7 +3,6 @@
* Copyright (C) 2003-2014, 2018-2022, 2024-2025 Intel Corporation
* Copyright (C) 2015-2016 Intel Deutschland GmbH
*/
-#include <linux/delay.h>
#include <linux/device.h>
#include <linux/export.h>
@@ -13,6 +12,7 @@
#include "iwl-debug.h"
#include "iwl-prph.h"
#include "iwl-fh.h"
+#include "pcie/gen1_2/internal.h"
void iwl_write8(struct iwl_trans *trans, u32 ofs, u8 val)
{
@@ -160,7 +160,7 @@ int iwl_poll_prph_bit(struct iwl_trans *trans, u32 addr,
do {
if ((iwl_read_prph(trans, addr) & mask) == (bits & mask))
- return t;
+ return 0;
udelay(IWL_POLL_INTERVAL);
t += IWL_POLL_INTERVAL;
} while (t < timeout);
@@ -396,96 +396,11 @@ int iwl_dump_fh(struct iwl_trans *trans, char **buf)
return 0;
}
-#define IWL_HOST_MON_BLOCK_PEMON 0x00
-#define IWL_HOST_MON_BLOCK_HIPM 0x22
-
-#define IWL_HOST_MON_BLOCK_PEMON_VEC0 0x00
-#define IWL_HOST_MON_BLOCK_PEMON_VEC1 0x01
-#define IWL_HOST_MON_BLOCK_PEMON_WFPM 0x06
-
-static void iwl_dump_host_monitor_block(struct iwl_trans *trans,
- u32 block, u32 vec, u32 iter)
-{
- int i;
-
- IWL_ERR(trans, "Host monitor block 0x%x vector 0x%x\n", block, vec);
- iwl_write32(trans, CSR_MONITOR_CFG_REG, (block << 8) | vec);
- for (i = 0; i < iter; i++)
- IWL_ERR(trans, " value [iter %d]: 0x%08x\n",
- i, iwl_read32(trans, CSR_MONITOR_STATUS_REG));
-}
-
-static void iwl_dump_host_monitor(struct iwl_trans *trans)
-{
- switch (trans->mac_cfg->device_family) {
- case IWL_DEVICE_FAMILY_22000:
- case IWL_DEVICE_FAMILY_AX210:
- IWL_ERR(trans, "CSR_RESET = 0x%x\n",
- iwl_read32(trans, CSR_RESET));
- iwl_dump_host_monitor_block(trans, IWL_HOST_MON_BLOCK_PEMON,
- IWL_HOST_MON_BLOCK_PEMON_VEC0, 15);
- iwl_dump_host_monitor_block(trans, IWL_HOST_MON_BLOCK_PEMON,
- IWL_HOST_MON_BLOCK_PEMON_VEC1, 15);
- iwl_dump_host_monitor_block(trans, IWL_HOST_MON_BLOCK_PEMON,
- IWL_HOST_MON_BLOCK_PEMON_WFPM, 15);
- iwl_dump_host_monitor_block(trans, IWL_HOST_MON_BLOCK_HIPM,
- IWL_HOST_MON_BLOCK_PEMON_VEC0, 1);
- break;
- default:
- /* not supported yet */
- return;
- }
-}
-
-int iwl_finish_nic_init(struct iwl_trans *trans)
+int iwl_trans_activate_nic(struct iwl_trans *trans)
{
- const struct iwl_mac_cfg *mac_cfg = trans->mac_cfg;
- u32 poll_ready;
- int err;
-
- if (mac_cfg->bisr_workaround) {
- /* ensure the TOP FSM isn't still in previous reset */
- mdelay(2);
- }
-
- /*
- * Set "initialization complete" bit to move adapter from
- * D0U* --> D0A* (powered-up active) state.
- */
- if (mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
- iwl_set_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ |
- CSR_GP_CNTRL_REG_FLAG_MAC_INIT);
- poll_ready = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS;
- } else {
- iwl_set_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
- poll_ready = CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY;
- }
-
- if (mac_cfg->device_family == IWL_DEVICE_FAMILY_8000)
- udelay(2);
-
- /*
- * Wait for clock stabilization; once stabilized, access to
- * device-internal resources is supported, e.g. iwl_write_prph()
- * and accesses to uCode SRAM.
- */
- err = iwl_poll_bits(trans, CSR_GP_CNTRL, poll_ready, 25000);
- if (err < 0) {
- IWL_DEBUG_INFO(trans, "Failed to wake NIC\n");
-
- iwl_dump_host_monitor(trans);
- }
-
- if (mac_cfg->bisr_workaround) {
- /* ensure BISR shift has finished */
- udelay(200);
- }
-
- return err < 0 ? err : 0;
+ return iwl_pcie_gen1_2_activate_nic(trans);
}
-IWL_EXPORT_SYMBOL(iwl_finish_nic_init);
+IWL_EXPORT_SYMBOL(iwl_trans_activate_nic);
void iwl_trans_sync_nmi_with_addr(struct iwl_trans *trans, u32 inta_addr,
u32 sw_err_bit)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.h b/drivers/net/wireless/intel/iwlwifi/iwl-io.h
index 731cda1a4e66..5bcec239ffc4 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.h
@@ -57,7 +57,7 @@ void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs,
void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask);
void iwl_force_nmi(struct iwl_trans *trans);
-int iwl_finish_nic_init(struct iwl_trans *trans);
+int iwl_trans_activate_nic(struct iwl_trans *trans);
/* Error handling */
int iwl_dump_fh(struct iwl_trans *trans, char **buf);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index a67b9572aac3..23465e4c4b39 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -140,50 +140,6 @@ static struct ieee80211_rate iwl_cfg80211_rates[] = {
#define N_RATES_52 (N_RATES_24 - RATES_52_OFFS)
/**
- * enum iwl_nvm_channel_flags - channel flags in NVM
- * @NVM_CHANNEL_VALID: channel is usable for this SKU/geo
- * @NVM_CHANNEL_IBSS: usable as an IBSS channel and deprecated
- * when %IWL_NVM_SBANDS_FLAGS_LAR enabled.
- * @NVM_CHANNEL_ALLOW_20MHZ_ACTIVITY: active scanning allowed and
- * AP allowed only in 20 MHz. Valid only
- * when %IWL_NVM_SBANDS_FLAGS_LAR enabled.
- * @NVM_CHANNEL_ACTIVE: active scanning allowed and allows IBSS
- * when %IWL_NVM_SBANDS_FLAGS_LAR enabled.
- * @NVM_CHANNEL_RADAR: radar detection required
- * @NVM_CHANNEL_INDOOR_ONLY: only indoor use is allowed
- * @NVM_CHANNEL_GO_CONCURRENT: GO operation is allowed when connected to BSS
- * on same channel on 2.4 or same UNII band on 5.2
- * @NVM_CHANNEL_UNIFORM: uniform spreading required
- * @NVM_CHANNEL_20MHZ: 20 MHz channel okay
- * @NVM_CHANNEL_40MHZ: 40 MHz channel okay
- * @NVM_CHANNEL_80MHZ: 80 MHz channel okay
- * @NVM_CHANNEL_160MHZ: 160 MHz channel okay
- * @NVM_CHANNEL_DC_HIGH: DC HIGH required/allowed (?)
- * @NVM_CHANNEL_VLP: client support connection to UHB VLP AP
- * @NVM_CHANNEL_AFC: client support connection to UHB AFC AP
- * @NVM_CHANNEL_VLP_AP_NOT_ALLOWED: UHB VLP AP not allowed,
- * Valid only when %NVM_CHANNEL_VLP is enabled.
- */
-enum iwl_nvm_channel_flags {
- NVM_CHANNEL_VALID = BIT(0),
- NVM_CHANNEL_IBSS = BIT(1),
- NVM_CHANNEL_ALLOW_20MHZ_ACTIVITY = BIT(2),
- NVM_CHANNEL_ACTIVE = BIT(3),
- NVM_CHANNEL_RADAR = BIT(4),
- NVM_CHANNEL_INDOOR_ONLY = BIT(5),
- NVM_CHANNEL_GO_CONCURRENT = BIT(6),
- NVM_CHANNEL_UNIFORM = BIT(7),
- NVM_CHANNEL_20MHZ = BIT(8),
- NVM_CHANNEL_40MHZ = BIT(9),
- NVM_CHANNEL_80MHZ = BIT(10),
- NVM_CHANNEL_160MHZ = BIT(11),
- NVM_CHANNEL_DC_HIGH = BIT(12),
- NVM_CHANNEL_VLP = BIT(13),
- NVM_CHANNEL_AFC = BIT(14),
- NVM_CHANNEL_VLP_AP_NOT_ALLOWED = BIT(15),
-};
-
-/**
* enum iwl_reg_capa_flags_v1 - global flags applied for the whole regulatory
* domain.
* @REG_CAPA_V1_BF_CCD_LOW_BAND: Beam-forming or Cyclic Delay Diversity in the
@@ -282,30 +238,6 @@ enum iwl_reg_capa_flags_v4 {
*/
#define REG_CAPA_V4_RESP_VER 8
-/**
- * struct iwl_reg_capa - struct for global regulatory capabilities, Used for
- * handling the different APIs of reg_capa_flags.
- *
- * @allow_40mhz: 11n channel with a width of 40Mhz is allowed
- * for this regulatory domain.
- * @allow_80mhz: 11ac channel with a width of 80Mhz is allowed
- * for this regulatory domain (valid only in 5 and 6 Ghz).
- * @allow_160mhz: 11ac channel with a width of 160Mhz is allowed
- * for this regulatory domain (valid only in 5 and 6 Ghz).
- * @allow_320mhz: 11be channel with a width of 320Mhz is allowed
- * for this regulatory domain (valid only in 6 Ghz).
- * @disable_11ax: 11ax is forbidden for this regulatory domain.
- * @disable_11be: 11be is forbidden for this regulatory domain.
- */
-struct iwl_reg_capa {
- bool allow_40mhz;
- bool allow_80mhz;
- bool allow_160mhz;
- bool allow_320mhz;
- bool disable_11ax;
- bool disable_11be;
-};
-
static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level,
int chan, u32 flags)
{
@@ -1042,10 +974,6 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
iftype_data->he_cap.he_cap_elem.phy_cap_info[2] |=
IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO;
- if (fw_has_capa(&fw->ucode_capa, IWL_UCODE_TLV_CAPA_BROADCAST_TWT))
- iftype_data->he_cap.he_cap_elem.mac_cap_info[2] |=
- IEEE80211_HE_MAC_CAP2_BCAST_TWT;
-
if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_22000 &&
!is_ap) {
iftype_data->vendor_elems.data = iwl_vendor_caps;
@@ -1600,9 +1528,10 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_rf_cfg *cfg,
}
IWL_EXPORT_SYMBOL(iwl_parse_nvm_data);
-static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
- int ch_idx, u16 nvm_flags,
- struct iwl_reg_capa reg_capa)
+VISIBLE_IF_IWLWIFI_KUNIT
+u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
+ int ch_idx, u16 nvm_flags,
+ struct iwl_reg_capa reg_capa)
{
u32 flags = NL80211_RRF_NO_HT40;
@@ -1692,6 +1621,7 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
return flags;
}
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_nvm_get_regdom_bw_flags);
static struct iwl_reg_capa iwl_get_reg_capa(u32 flags, u8 resp_ver)
{
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
index 9ce9fa4e78fd..cbc92abf9f87 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
@@ -21,6 +21,80 @@ enum iwl_nvm_sbands_flags {
IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ = BIT(1),
};
+/**
+ * struct iwl_reg_capa - struct for global regulatory capabilities, Used for
+ * handling the different APIs of reg_capa_flags.
+ *
+ * @allow_40mhz: 11n channel with a width of 40Mhz is allowed
+ * for this regulatory domain.
+ * @allow_80mhz: 11ac channel with a width of 80Mhz is allowed
+ * for this regulatory domain (valid only in 5 and 6 Ghz).
+ * @allow_160mhz: 11ac channel with a width of 160Mhz is allowed
+ * for this regulatory domain (valid only in 5 and 6 Ghz).
+ * @allow_320mhz: 11be channel with a width of 320Mhz is allowed
+ * for this regulatory domain (valid only in 6 Ghz).
+ * @disable_11ax: 11ax is forbidden for this regulatory domain.
+ * @disable_11be: 11be is forbidden for this regulatory domain.
+ */
+struct iwl_reg_capa {
+ bool allow_40mhz;
+ bool allow_80mhz;
+ bool allow_160mhz;
+ bool allow_320mhz;
+ bool disable_11ax;
+ bool disable_11be;
+};
+
+/**
+ * enum iwl_nvm_channel_flags - channel flags in NVM
+ * @NVM_CHANNEL_VALID: channel is usable for this SKU/geo
+ * @NVM_CHANNEL_IBSS: usable as an IBSS channel and deprecated
+ * when %IWL_NVM_SBANDS_FLAGS_LAR enabled.
+ * @NVM_CHANNEL_ALLOW_20MHZ_ACTIVITY: active scanning allowed and
+ * AP allowed only in 20 MHz. Valid only
+ * when %IWL_NVM_SBANDS_FLAGS_LAR enabled.
+ * @NVM_CHANNEL_ACTIVE: active scanning allowed and allows IBSS
+ * when %IWL_NVM_SBANDS_FLAGS_LAR enabled.
+ * @NVM_CHANNEL_RADAR: radar detection required
+ * @NVM_CHANNEL_INDOOR_ONLY: only indoor use is allowed
+ * @NVM_CHANNEL_GO_CONCURRENT: GO operation is allowed when connected to BSS
+ * on same channel on 2.4 or same UNII band on 5.2
+ * @NVM_CHANNEL_UNIFORM: uniform spreading required
+ * @NVM_CHANNEL_20MHZ: 20 MHz channel okay
+ * @NVM_CHANNEL_40MHZ: 40 MHz channel okay
+ * @NVM_CHANNEL_80MHZ: 80 MHz channel okay
+ * @NVM_CHANNEL_160MHZ: 160 MHz channel okay
+ * @NVM_CHANNEL_DC_HIGH: DC HIGH required/allowed (?)
+ * @NVM_CHANNEL_VLP: client support connection to UHB VLP AP
+ * @NVM_CHANNEL_AFC: client support connection to UHB AFC AP
+ * @NVM_CHANNEL_VLP_AP_NOT_ALLOWED: UHB VLP AP not allowed,
+ * Valid only when %NVM_CHANNEL_VLP is enabled.
+ */
+enum iwl_nvm_channel_flags {
+ NVM_CHANNEL_VALID = BIT(0),
+ NVM_CHANNEL_IBSS = BIT(1),
+ NVM_CHANNEL_ALLOW_20MHZ_ACTIVITY = BIT(2),
+ NVM_CHANNEL_ACTIVE = BIT(3),
+ NVM_CHANNEL_RADAR = BIT(4),
+ NVM_CHANNEL_INDOOR_ONLY = BIT(5),
+ NVM_CHANNEL_GO_CONCURRENT = BIT(6),
+ NVM_CHANNEL_UNIFORM = BIT(7),
+ NVM_CHANNEL_20MHZ = BIT(8),
+ NVM_CHANNEL_40MHZ = BIT(9),
+ NVM_CHANNEL_80MHZ = BIT(10),
+ NVM_CHANNEL_160MHZ = BIT(11),
+ NVM_CHANNEL_DC_HIGH = BIT(12),
+ NVM_CHANNEL_VLP = BIT(13),
+ NVM_CHANNEL_AFC = BIT(14),
+ NVM_CHANNEL_VLP_AP_NOT_ALLOWED = BIT(15),
+};
+
+#if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS)
+u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
+ int ch_idx, u16 nvm_flags,
+ struct iwl_reg_capa reg_capa);
+#endif
+
/*
* iwl_parse_nvm_data - parse NVM data and return values
*
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
index 3694b41d6621..5232f66c2d52 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
@@ -268,9 +268,7 @@ static void iwl_trans_restart_wk(struct work_struct *wk)
struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
struct device *dev,
- const struct iwl_mac_cfg *mac_cfg,
- unsigned int txcmd_size,
- unsigned int txcmd_align)
+ const struct iwl_mac_cfg *mac_cfg)
{
struct iwl_trans *trans;
#ifdef CONFIG_LOCKDEP
@@ -292,22 +290,12 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
INIT_DELAYED_WORK(&trans->restart.wk, iwl_trans_restart_wk);
- snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
- "iwl_cmd_pool:%s", dev_name(trans->dev));
- trans->dev_cmd_pool =
- kmem_cache_create(trans->dev_cmd_pool_name,
- txcmd_size, txcmd_align,
- SLAB_HWCACHE_ALIGN, NULL);
- if (!trans->dev_cmd_pool)
- return NULL;
-
return trans;
}
void iwl_trans_free(struct iwl_trans *trans)
{
cancel_delayed_work_sync(&trans->restart.wk);
- kmem_cache_destroy(trans->dev_cmd_pool);
}
int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
@@ -318,9 +306,6 @@ int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
test_bit(STATUS_RFKILL_OPMODE, &trans->status)))
return -ERFKILL;
- if (unlikely(test_bit(STATUS_SUSPENDED, &trans->status)))
- return -EHOSTDOWN;
-
if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
return -EIO;
@@ -348,6 +333,19 @@ int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
}
IWL_EXPORT_SYMBOL(iwl_trans_send_cmd);
+struct iwl_device_tx_cmd *iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
+{
+ return iwl_pcie_gen1_2_alloc_tx_cmd(trans);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_alloc_tx_cmd);
+
+void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
+ struct iwl_device_tx_cmd *dev_cmd)
+{
+ iwl_pcie_gen1_2_free_tx_cmd(trans, dev_cmd);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_free_tx_cmd);
+
/* Comparator for struct iwl_hcmd_names.
* Used in the binary search over a list of host commands.
*
@@ -399,7 +397,7 @@ void iwl_trans_op_mode_enter(struct iwl_trans *trans,
WARN_ON_ONCE(!trans->conf.rx_mpdu_cmd);
- iwl_trans_pcie_op_mode_enter(trans);
+ iwl_pcie_gen1_2_op_mode_enter(trans);
}
IWL_EXPORT_SYMBOL(iwl_trans_op_mode_enter);
@@ -408,8 +406,6 @@ int iwl_trans_start_hw(struct iwl_trans *trans)
might_sleep();
clear_bit(STATUS_TRANS_RESET_IN_PROGRESS, &trans->status);
- /* opmode may not resume if it detects errors */
- clear_bit(STATUS_SUSPENDED, &trans->status);
return iwl_trans_pcie_start_hw(trans);
}
@@ -507,33 +503,19 @@ iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask,
sanitize_ops, sanitize_ctx);
}
-int iwl_trans_d3_suspend(struct iwl_trans *trans, bool test, bool reset)
+int iwl_trans_d3_suspend(struct iwl_trans *trans, bool reset)
{
- int err;
-
might_sleep();
- err = iwl_trans_pcie_d3_suspend(trans, test, reset);
-
- if (!err)
- set_bit(STATUS_SUSPENDED, &trans->status);
-
- return err;
+ return iwl_trans_pcie_d3_suspend(trans, reset);
}
IWL_EXPORT_SYMBOL(iwl_trans_d3_suspend);
-int iwl_trans_d3_resume(struct iwl_trans *trans, enum iwl_d3_status *status,
- bool test, bool reset)
+int iwl_trans_d3_resume(struct iwl_trans *trans, bool reset)
{
- int err;
-
might_sleep();
- err = iwl_trans_pcie_d3_resume(trans, status, test, reset);
-
- clear_bit(STATUS_SUSPENDED, &trans->status);
-
- return err;
+ return iwl_trans_pcie_d3_resume(trans, reset);
}
IWL_EXPORT_SYMBOL(iwl_trans_d3_resume);
@@ -825,3 +807,18 @@ void iwl_trans_set_reduce_power(struct iwl_trans *trans,
{
iwl_trans_pcie_ctx_info_v2_set_reduce_power(trans, capa);
}
+
+bool iwl_trans_is_pm_supported(struct iwl_trans *trans)
+{
+ if (WARN_ON(trans->mac_cfg->gen2))
+ return false;
+
+ return iwl_pcie_gen1_is_pm_supported(trans);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_is_pm_supported);
+
+bool iwl_trans_is_ltr_enabled(struct iwl_trans *trans)
+{
+ return iwl_pcie_gen1_2_is_ltr_enabled(trans);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_is_ltr_enabled);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index d0e658801c2e..a0cc5d7745e8 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -275,16 +275,6 @@ static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r)
#define IWL_9000_MAX_RX_HW_QUEUES 1
/**
- * enum iwl_d3_status - WoWLAN image/device status
- * @IWL_D3_STATUS_ALIVE: firmware is still running after resume
- * @IWL_D3_STATUS_RESET: device was reset while suspended
- */
-enum iwl_d3_status {
- IWL_D3_STATUS_ALIVE,
- IWL_D3_STATUS_RESET,
-};
-
-/**
* enum iwl_trans_status: transport status flags
* @STATUS_SYNC_HCMD_ACTIVE: a SYNC command is being processed
* @STATUS_DEVICE_ENABLED: APM is enabled
@@ -294,16 +284,12 @@ enum iwl_d3_status {
* @STATUS_RFKILL_OPMODE: RF-kill state reported to opmode
* @STATUS_FW_ERROR: the fw is in error state
* @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation
- * @STATUS_SUPPRESS_CMD_ERROR_ONCE: suppress "FW error in SYNC CMD" once,
- * e.g. for testing
* @STATUS_IN_SW_RESET: device is undergoing reset, cleared by opmode
* via iwl_trans_finish_sw_reset()
* @STATUS_RESET_PENDING: reset worker was scheduled, but didn't dump
* the firmware state yet
* @STATUS_TRANS_RESET_IN_PROGRESS: reset is still in progress, don't
* attempt another reset yet
- * @STATUS_SUSPENDED: device is suspended, don't send commands that
- * aren't marked accordingly
*/
enum iwl_trans_status {
STATUS_SYNC_HCMD_ACTIVE,
@@ -314,11 +300,9 @@ enum iwl_trans_status {
STATUS_RFKILL_OPMODE,
STATUS_FW_ERROR,
STATUS_TRANS_DEAD,
- STATUS_SUPPRESS_CMD_ERROR_ONCE,
STATUS_IN_SW_RESET,
STATUS_RESET_PENDING,
STATUS_TRANS_RESET_IN_PROGRESS,
- STATUS_SUSPENDED,
};
static inline int
@@ -658,8 +642,6 @@ struct iwl_pc_data {
* @restart_required: indicates debug restart is required
* @last_tp_resetfw: last handling of reset during debug timepoint
* @imr_data: IMR debug data allocation
- * @dump_file_name_ext: dump file name extension
- * @dump_file_name_ext_valid: dump file name extension if valid or not
* @num_pc: number of program counter for cpu
* @pc_data: details of the program counter
* @yoyo_bin_loaded: tells if a yoyo debug file has been loaded
@@ -698,8 +680,6 @@ struct iwl_trans_debug {
bool restart_required;
u32 last_tp_resetfw;
struct iwl_imr_data imr_data;
- u8 dump_file_name_ext[IWL_FW_INI_MAX_NAME];
- bool dump_file_name_ext_valid;
u32 num_pc;
struct iwl_pc_data *pc_data;
bool yoyo_bin_loaded;
@@ -830,7 +810,6 @@ struct iwl_txq {
* @hw_rf_id: the device RF ID
* @hw_cnv_id: the device CNV ID
* @hw_crf_id: the device CRF ID
- * @hw_wfpm_id: the device wfpm ID
* @hw_id: the ID of the device / sub-device
* Bits 0:15 represent the sub-device ID
* Bits 16:31 represent the device ID.
@@ -846,7 +825,6 @@ struct iwl_trans_info {
u32 hw_rf_id;
u32 hw_crf_id;
u32 hw_cnv_id;
- u32 hw_wfpm_id;
u32 hw_id;
u8 pcie_link_speed;
u8 num_rxqs;
@@ -866,14 +844,11 @@ struct iwl_trans_info {
* @dev: pointer to struct device * that represents the device
* @info: device information for use by other layers
* @pnvm_loaded: indicates PNVM was loaded
- * @pm_support: set to true in start_hw if link pm is supported
- * @ltr_enabled: set to true if the LTR is enabled
+ * @suppress_cmd_error_once: suppress "FW error in SYNC CMD" once,
+ * e.g. for testing
* @fail_to_parse_pnvm_image: set to true if pnvm parsing failed
* @reduce_power_loaded: indicates reduced power section was loaded
* @failed_to_load_reduce_power_image: set to true if pnvm loading failed
- * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
- * The user should use iwl_trans_{alloc,free}_tx_cmd.
- * @dev_cmd_pool_name: name for the TX command allocation pool
* @dbgfs_dir: iwlwifi debugfs base dir for this device
* @sync_cmd_lockdep_map: lockdep map for checking sync commands
* @dbg: additional debug data, see &struct iwl_trans_debug
@@ -905,18 +880,13 @@ struct iwl_trans {
const struct iwl_trans_info info;
bool reduced_cap_sku;
bool step_urm;
+ bool suppress_cmd_error_once;
- bool pm_support;
- bool ltr_enabled;
u8 pnvm_loaded:1;
u8 fail_to_parse_pnvm_image:1;
u8 reduce_power_loaded:1;
u8 failed_to_load_reduce_power_image:1;
- /* The following fields are internal only */
- struct kmem_cache *dev_cmd_pool;
- char dev_cmd_pool_name[50];
-
struct dentry *dbgfs_dir;
#ifdef CONFIG_LOCKDEP
@@ -956,29 +926,21 @@ int iwl_trans_start_fw(struct iwl_trans *trans, const struct iwl_fw *fw,
void iwl_trans_stop_device(struct iwl_trans *trans);
-int iwl_trans_d3_suspend(struct iwl_trans *trans, bool test, bool reset);
+int iwl_trans_d3_suspend(struct iwl_trans *trans, bool reset);
-int iwl_trans_d3_resume(struct iwl_trans *trans, enum iwl_d3_status *status,
- bool test, bool reset);
+int iwl_trans_d3_resume(struct iwl_trans *trans, bool reset);
struct iwl_trans_dump_data *
iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask,
const struct iwl_dump_sanitize_ops *sanitize_ops,
void *sanitize_ctx);
-static inline struct iwl_device_tx_cmd *
-iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
-{
- return kmem_cache_zalloc(trans->dev_cmd_pool, GFP_ATOMIC);
-}
+struct iwl_device_tx_cmd *iwl_trans_alloc_tx_cmd(struct iwl_trans *trans);
int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
-static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
- struct iwl_device_tx_cmd *dev_cmd)
-{
- kmem_cache_free(trans->dev_cmd_pool, dev_cmd);
-}
+void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
+ struct iwl_device_tx_cmd *dev_cmd);
int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_tx_cmd *dev_cmd, int queue);
@@ -1205,9 +1167,7 @@ static inline void iwl_trans_finish_sw_reset(struct iwl_trans *trans)
*****************************************************/
struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
struct device *dev,
- const struct iwl_mac_cfg *mac_cfg,
- unsigned int txcmd_size,
- unsigned int txcmd_align);
+ const struct iwl_mac_cfg *mac_cfg);
void iwl_trans_free(struct iwl_trans *trans);
static inline bool iwl_trans_is_hw_error_value(u32 val)
@@ -1230,11 +1190,6 @@ static inline u16 iwl_trans_get_num_rbds(struct iwl_trans *trans)
return result;
}
-static inline void iwl_trans_suppress_cmd_error_once(struct iwl_trans *trans)
-{
- set_bit(STATUS_SUPPRESS_CMD_ERROR_ONCE, &trans->status);
-}
-
static inline bool iwl_trans_device_enabled(struct iwl_trans *trans)
{
return test_bit(STATUS_DEVICE_ENABLED, &trans->status);
@@ -1245,6 +1200,20 @@ static inline bool iwl_trans_is_dead(struct iwl_trans *trans)
return test_bit(STATUS_TRANS_DEAD, &trans->status);
}
+static inline bool iwl_trans_is_fw_error(struct iwl_trans *trans)
+{
+ return test_bit(STATUS_FW_ERROR, &trans->status);
+}
+
+/*
+ * This function notifies the transport layer of firmware error, the recovery
+ * will be handled by the op mode
+ */
+static inline void iwl_trans_notify_fw_error(struct iwl_trans *trans)
+{
+ trans->state = IWL_TRANS_NO_FW;
+ set_bit(STATUS_FW_ERROR, &trans->status);
+}
/*****************************************************
* PCIe handling
*****************************************************/
@@ -1289,4 +1258,8 @@ static inline u16 iwl_trans_get_device_id(struct iwl_trans *trans)
return u32_get_bits(trans->info.hw_id, GENMASK(31, 16));
}
+bool iwl_trans_is_pm_supported(struct iwl_trans *trans);
+
+bool iwl_trans_is_ltr_enabled(struct iwl_trans *trans);
+
#endif /* __iwl_trans_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mei/sap.h b/drivers/net/wireless/intel/iwlwifi/mei/sap.h
index ba1f75f739c2..f985ab90d41c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mei/sap.h
+++ b/drivers/net/wireless/intel/iwlwifi/mei/sap.h
@@ -300,13 +300,11 @@ enum iwl_sap_msg {
* @type: See &enum iwl_sap_msg.
* @len: The length of the message (header not included).
* @seq_num: For debug.
- * @payload: The payload of the message.
*/
struct iwl_sap_hdr {
__le16 type;
__le16 len;
__le32 seq_num;
- u8 payload[];
};
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/d3.c b/drivers/net/wireless/intel/iwlwifi/mld/d3.c
index ed0a0f76f1c5..1d4282a21f09 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/d3.c
@@ -11,6 +11,7 @@
#include "mcc.h"
#include "sta.h"
#include "mlo.h"
+#include "key.h"
#include "fw/api/d3.h"
#include "fw/api/offload.h"
@@ -40,8 +41,6 @@ enum iwl_mld_d3_notif {
struct iwl_mld_resume_key_iter_data {
struct iwl_mld *mld;
struct iwl_mld_wowlan_status *wowlan_status;
- u32 num_keys, gtk_cipher, igtk_cipher, bigtk_cipher;
- bool unhandled_cipher;
};
struct iwl_mld_suspend_key_iter_data {
@@ -71,6 +70,12 @@ struct iwl_mld_mcast_key_data {
};
+struct iwl_mld_wowlan_mlo_key {
+ u8 key[WOWLAN_KEY_MAX_SIZE];
+ u8 idx, type, link_id;
+ u8 pn[6];
+};
+
/**
* struct iwl_mld_wowlan_status - contains wowlan status data from
* all wowlan notifications
@@ -89,6 +94,8 @@ struct iwl_mld_mcast_key_data {
* @bigtk: data of the last two used gtk's by the FW upon resume
* @ptk: last seq numbers per tid passed by the FW,
* holds both in tkip and aes formats
+ * @num_mlo_keys: number of &struct iwl_mld_wowlan_mlo_key structs
+ * @mlo_keys: array of MLO keys
*/
struct iwl_mld_wowlan_status {
u32 wakeup_reasons;
@@ -108,6 +115,9 @@ struct iwl_mld_wowlan_status {
struct ieee80211_key_seq tkip_seq[IWL_MAX_TID_COUNT];
} ptk;
+
+ int num_mlo_keys;
+ struct iwl_mld_wowlan_mlo_key mlo_keys[WOWLAN_MAX_MLO_KEYS];
};
#define NETDETECT_QUERY_BUF_LEN \
@@ -271,7 +281,7 @@ iwl_mld_convert_gtk_resume_seq(struct iwl_mld_mcast_key_data *gtk_data,
static void
iwl_mld_convert_gtk_resume_data(struct iwl_mld *mld,
struct iwl_mld_wowlan_status *wowlan_status,
- const struct iwl_wowlan_gtk_status_v3 *gtk_data,
+ const struct iwl_wowlan_gtk_status *gtk_data,
const struct iwl_wowlan_all_rsc_tsc_v5 *sc)
{
int status_idx = 0;
@@ -283,8 +293,9 @@ iwl_mld_convert_gtk_resume_data(struct iwl_mld *mld,
for (int notif_idx = 0; notif_idx < ARRAY_SIZE(wowlan_status->gtk);
notif_idx++) {
int rsc_idx;
+ u8 key_status = gtk_data[notif_idx].key_status;
- if (!(gtk_data[notif_idx].key_len))
+ if (!key_status)
continue;
wowlan_status->gtk[status_idx].len =
@@ -294,10 +305,6 @@ iwl_mld_convert_gtk_resume_data(struct iwl_mld *mld,
wowlan_status->gtk[status_idx].id =
wowlan_status->gtk[status_idx].flags &
IWL_WOWLAN_GTK_IDX_MASK;
- memcpy(wowlan_status->gtk[status_idx].key,
- gtk_data[notif_idx].key,
- sizeof(gtk_data[notif_idx].key));
-
/* The rsc for both gtk keys are stored in gtk[0]->sc->mcast_rsc
* The gtk ids can be any two numbers between 0 and 3,
* the id_map maps between the key id and the index in sc->mcast
@@ -307,13 +314,27 @@ iwl_mld_convert_gtk_resume_data(struct iwl_mld *mld,
iwl_mld_convert_gtk_resume_seq(&wowlan_status->gtk[status_idx],
sc, rsc_idx);
- /* if it's as long as the TKIP encryption key, copy MIC key */
- if (wowlan_status->gtk[status_idx].len ==
- NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY)
- memcpy(wowlan_status->gtk[status_idx].key +
- NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY,
- gtk_data[notif_idx].tkip_mic_key,
- sizeof(gtk_data[notif_idx].tkip_mic_key));
+ if (key_status == IWL_WOWLAN_STATUS_NEW_KEY) {
+ memcpy(wowlan_status->gtk[status_idx].key,
+ gtk_data[notif_idx].key,
+ sizeof(gtk_data[notif_idx].key));
+
+ /* if it's as long as the TKIP encryption key,
+ * copy MIC key
+ */
+ if (wowlan_status->gtk[status_idx].len ==
+ NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY)
+ memcpy(wowlan_status->gtk[status_idx].key +
+ NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY,
+ gtk_data[notif_idx].tkip_mic_key,
+ sizeof(gtk_data[notif_idx].tkip_mic_key));
+ } else {
+ /* If the key status is WOWLAN_STATUS_OLD_KEY, it
+ * indicates that no key material is present, Set the
+ * key length to 0 as an indication
+ */
+ wowlan_status->gtk[status_idx].len = 0;
+ }
status_idx++;
}
}
@@ -360,11 +381,11 @@ static void
iwl_mld_convert_igtk_resume_data(struct iwl_mld_wowlan_status *wowlan_status,
const struct iwl_wowlan_igtk_status *igtk)
{
- BUILD_BUG_ON(sizeof(wowlan_status->igtk.key) < sizeof(igtk->key));
-
- if (!igtk->key_len)
+ if (!igtk->key_status)
return;
+ BUILD_BUG_ON(sizeof(wowlan_status->igtk.key) < sizeof(igtk->key));
+
wowlan_status->igtk.len = igtk->key_len;
wowlan_status->igtk.flags = igtk->key_flags;
wowlan_status->igtk.id =
@@ -372,7 +393,15 @@ iwl_mld_convert_igtk_resume_data(struct iwl_mld_wowlan_status *wowlan_status,
IWL_WOWLAN_IGTK_BIGTK_IDX_MASK) +
WOWLAN_IGTK_MIN_INDEX;
- memcpy(wowlan_status->igtk.key, igtk->key, sizeof(igtk->key));
+ if (igtk->key_status == IWL_WOWLAN_STATUS_NEW_KEY)
+ memcpy(wowlan_status->igtk.key, igtk->key, sizeof(igtk->key));
+ else
+ /* If the key status is WOWLAN_STATUS_OLD_KEY, it indicates
+ * that no key material is present. Set the key length to 0
+ * as an indication.
+ */
+ wowlan_status->igtk.len = 0;
+
iwl_mld_convert_mcast_ipn(&wowlan_status->igtk, igtk);
}
@@ -386,7 +415,7 @@ iwl_mld_convert_bigtk_resume_data(struct iwl_mld_wowlan_status *wowlan_status,
for (int notif_idx = 0; notif_idx < WOWLAN_BIGTK_KEYS_NUM;
notif_idx++) {
- if (!bigtk[notif_idx].key_len)
+ if (!bigtk[notif_idx].key_status)
continue;
wowlan_status->bigtk[status_idx].len = bigtk[notif_idx].key_len;
@@ -399,32 +428,218 @@ iwl_mld_convert_bigtk_resume_data(struct iwl_mld_wowlan_status *wowlan_status,
BUILD_BUG_ON(sizeof(wowlan_status->bigtk[status_idx].key) <
sizeof(bigtk[notif_idx].key));
- memcpy(wowlan_status->bigtk[status_idx].key,
- bigtk[notif_idx].key, sizeof(bigtk[notif_idx].key));
+ if (bigtk[notif_idx].key_status == IWL_WOWLAN_STATUS_NEW_KEY)
+ memcpy(wowlan_status->bigtk[status_idx].key,
+ bigtk[notif_idx].key,
+ sizeof(bigtk[notif_idx].key));
+ else
+ /* If the key status is WOWLAN_STATUS_OLD_KEY, it
+ * indicates that no key material is present. Set the
+ * key length to 0 as an indication.
+ */
+ wowlan_status->bigtk[status_idx].len = 0;
+
iwl_mld_convert_mcast_ipn(&wowlan_status->bigtk[status_idx],
&bigtk[notif_idx]);
status_idx++;
}
}
+static void
+iwl_mld_convert_mlo_keys(struct iwl_mld *mld,
+ const struct iwl_wowlan_info_notif *notif,
+ struct iwl_mld_wowlan_status *wowlan_status)
+{
+ if (!notif->num_mlo_link_keys)
+ return;
+
+ wowlan_status->num_mlo_keys = notif->num_mlo_link_keys;
+
+ if (IWL_FW_CHECK(mld, wowlan_status->num_mlo_keys > WOWLAN_MAX_MLO_KEYS,
+ "Too many MLO keys: %d, max %d\n",
+ wowlan_status->num_mlo_keys, WOWLAN_MAX_MLO_KEYS))
+ wowlan_status->num_mlo_keys = WOWLAN_MAX_MLO_KEYS;
+
+ for (int i = 0; i < wowlan_status->num_mlo_keys; i++) {
+ const struct iwl_wowlan_mlo_gtk *fw_mlo_key = &notif->mlo_gtks[i];
+ struct iwl_mld_wowlan_mlo_key *driver_mlo_key =
+ &wowlan_status->mlo_keys[i];
+ u16 flags = le16_to_cpu(fw_mlo_key->flags);
+
+ driver_mlo_key->link_id =
+ u16_get_bits(flags, WOWLAN_MLO_GTK_FLAG_LINK_ID_MSK);
+ driver_mlo_key->type =
+ u16_get_bits(flags, WOWLAN_MLO_GTK_FLAG_KEY_TYPE_MSK);
+ driver_mlo_key->idx =
+ u16_get_bits(flags, WOWLAN_MLO_GTK_FLAG_KEY_ID_MSK);
+
+ BUILD_BUG_ON(sizeof(driver_mlo_key->key) != sizeof(fw_mlo_key->key));
+ BUILD_BUG_ON(sizeof(driver_mlo_key->pn) != sizeof(fw_mlo_key->pn));
+
+ memcpy(driver_mlo_key->key, fw_mlo_key->key, sizeof(fw_mlo_key->key));
+ memcpy(driver_mlo_key->pn, fw_mlo_key->pn, sizeof(fw_mlo_key->pn));
+ }
+}
+
+static void
+iwl_mld_convert_wowlan_notif_v5(const struct iwl_wowlan_info_notif_v5 *notif_v5,
+ struct iwl_wowlan_info_notif *notif)
+{
+ /* Convert GTK from v3 to the new format */
+ BUILD_BUG_ON(ARRAY_SIZE(notif->gtk) != ARRAY_SIZE(notif_v5->gtk));
+
+ for (int i = 0; i < ARRAY_SIZE(notif_v5->gtk); i++) {
+ const struct iwl_wowlan_gtk_status_v3 *gtk_v3 = &notif_v5->gtk[i];
+ struct iwl_wowlan_gtk_status *gtk = &notif->gtk[i];
+
+ /* Copy key material and metadata */
+ BUILD_BUG_ON(sizeof(gtk->key) != sizeof(gtk_v3->key));
+ BUILD_BUG_ON(sizeof(gtk->tkip_mic_key) != sizeof(gtk_v3->tkip_mic_key));
+
+ memcpy(gtk->key, gtk_v3->key, sizeof(gtk_v3->key));
+
+ gtk->key_len = gtk_v3->key_len;
+ gtk->key_flags = gtk_v3->key_flags;
+
+ memcpy(gtk->tkip_mic_key, gtk_v3->tkip_mic_key,
+ sizeof(gtk_v3->tkip_mic_key));
+ gtk->sc = gtk_v3->sc;
+
+ /* Set key_status based on whether key material is present.
+ * in v5, a key is either invalid (should be skipped) or has
+ * both meta data and the key itself.
+ */
+ if (gtk_v3->key_len)
+ gtk->key_status = IWL_WOWLAN_STATUS_NEW_KEY;
+ }
+
+ /* Convert IGTK from v1 to the new format, only one IGTK is passed by FW */
+ BUILD_BUG_ON(offsetof(struct iwl_wowlan_igtk_status, key_status) !=
+ sizeof(struct iwl_wowlan_igtk_status_v1));
+
+ memcpy(&notif->igtk[0], &notif_v5->igtk[0],
+ offsetof(struct iwl_wowlan_igtk_status, key_status));
+
+ /* Set key_status based on whether key material is present.
+ * in v5, a key is either invalid (should be skipped) or has
+ * both meta data and the key itself.
+ */
+ if (notif_v5->igtk[0].key_len)
+ notif->igtk[0].key_status = IWL_WOWLAN_STATUS_NEW_KEY;
+
+ /* Convert BIGTK from v1 to the new format */
+ BUILD_BUG_ON(ARRAY_SIZE(notif->bigtk) != ARRAY_SIZE(notif_v5->bigtk));
+
+ for (int i = 0; i < ARRAY_SIZE(notif_v5->bigtk); i++) {
+ /* Copy everything until key_status */
+ memcpy(&notif->bigtk[i], &notif_v5->bigtk[i],
+ offsetof(struct iwl_wowlan_igtk_status, key_status));
+
+ /* Set key_status based on whether key material is present.
+ * in v5, a key is either invalid (should be skipped) or has
+ * both meta data and the key itself.
+ */
+ if (notif_v5->bigtk[i].key_len)
+ notif->bigtk[i].key_status = IWL_WOWLAN_STATUS_NEW_KEY;
+ }
+
+ notif->replay_ctr = notif_v5->replay_ctr;
+ notif->pattern_number = notif_v5->pattern_number;
+ notif->qos_seq_ctr = notif_v5->qos_seq_ctr;
+ notif->wakeup_reasons = notif_v5->wakeup_reasons;
+ notif->num_of_gtk_rekeys = notif_v5->num_of_gtk_rekeys;
+ notif->transmitted_ndps = notif_v5->transmitted_ndps;
+ notif->received_beacons = notif_v5->received_beacons;
+ notif->tid_tear_down = notif_v5->tid_tear_down;
+ notif->station_id = notif_v5->station_id;
+ notif->num_mlo_link_keys = notif_v5->num_mlo_link_keys;
+ notif->tid_offloaded_tx = notif_v5->tid_offloaded_tx;
+
+ /* Copy MLO GTK keys */
+ if (notif_v5->num_mlo_link_keys) {
+ memcpy(notif->mlo_gtks, notif_v5->mlo_gtks,
+ notif_v5->num_mlo_link_keys * sizeof(struct iwl_wowlan_mlo_gtk));
+ }
+}
+
+static bool iwl_mld_validate_wowlan_notif_size(struct iwl_mld *mld,
+ u32 len,
+ u32 expected_len,
+ u8 num_mlo_keys,
+ int version)
+{
+ u32 len_with_mlo_keys;
+
+ if (IWL_FW_CHECK(mld, len < expected_len,
+ "Invalid wowlan_info_notif v%d (expected=%u got=%u)\n",
+ version, expected_len, len))
+ return false;
+
+ len_with_mlo_keys = expected_len +
+ (num_mlo_keys * sizeof(struct iwl_wowlan_mlo_gtk));
+
+ if (IWL_FW_CHECK(mld, len < len_with_mlo_keys,
+ "Invalid wowlan_info_notif v%d with MLO keys (expected=%u got=%u)\n",
+ version, len_with_mlo_keys, len))
+ return false;
+
+ return true;
+}
+
static bool
iwl_mld_handle_wowlan_info_notif(struct iwl_mld *mld,
struct iwl_mld_wowlan_status *wowlan_status,
struct iwl_rx_packet *pkt)
{
- const struct iwl_wowlan_info_notif *notif = (void *)pkt->data;
- u32 expected_len, len = iwl_rx_packet_payload_len(pkt);
-
- expected_len = sizeof(*notif);
+ const struct iwl_wowlan_info_notif *notif;
+ struct iwl_wowlan_info_notif *converted_notif __free(kfree) = NULL;
+ u32 len = iwl_rx_packet_payload_len(pkt);
+ int wowlan_info_ver = iwl_fw_lookup_notif_ver(mld->fw,
+ PROT_OFFLOAD_GROUP,
+ WOWLAN_INFO_NOTIFICATION,
+ IWL_FW_CMD_VER_UNKNOWN);
+
+ if (wowlan_info_ver == 5) {
+ /* v5 format - validate before conversion */
+ const struct iwl_wowlan_info_notif_v5 *notif_v5 = (void *)pkt->data;
+
+ if (!iwl_mld_validate_wowlan_notif_size(mld, len,
+ sizeof(*notif_v5),
+ notif_v5->num_mlo_link_keys,
+ 5))
+ return true;
+
+ converted_notif = kzalloc(struct_size(converted_notif,
+ mlo_gtks,
+ notif_v5->num_mlo_link_keys),
+ GFP_ATOMIC);
+ if (!converted_notif) {
+ IWL_ERR(mld,
+ "Failed to allocate memory for converted wowlan_info_notif\n");
+ return true;
+ }
- if (IWL_FW_CHECK(mld, len < expected_len,
- "Invalid wowlan_info_notif (expected=%ud got=%ud)\n",
- expected_len, len))
+ iwl_mld_convert_wowlan_notif_v5(notif_v5,
+ converted_notif);
+ notif = converted_notif;
+ } else if (wowlan_info_ver == 6) {
+ notif = (void *)pkt->data;
+ if (!iwl_mld_validate_wowlan_notif_size(mld, len,
+ sizeof(*notif),
+ notif->num_mlo_link_keys,
+ 6))
+ return true;
+ } else {
+ /* smaller versions are not supported */
+ IWL_WARN(mld,
+ "Unsupported wowlan_info_notif version %d\n",
+ wowlan_info_ver);
return true;
+ }
if (IWL_FW_CHECK(mld, notif->tid_offloaded_tx != IWL_WOWLAN_OFFLOAD_TID,
"Invalid tid_offloaded_tx %d\n",
- wowlan_status->tid_offloaded_tx))
+ notif->tid_offloaded_tx))
return true;
iwl_mld_convert_gtk_resume_data(mld, wowlan_status, notif->gtk,
@@ -442,8 +657,10 @@ iwl_mld_handle_wowlan_info_notif(struct iwl_mld *mld,
wowlan_status->num_of_gtk_rekeys =
le32_to_cpu(notif->num_of_gtk_rekeys);
wowlan_status->wakeup_reasons = le32_to_cpu(notif->wakeup_reasons);
+
+ iwl_mld_convert_mlo_keys(mld, notif, wowlan_status);
+
return false;
- /* TODO: mlo_links (task=MLO)*/
}
static bool
@@ -619,8 +836,8 @@ iwl_mld_set_key_rx_seq_tids(struct ieee80211_key_conf *key,
}
static void
-iwl_mld_set_key_rx_seq(struct ieee80211_key_conf *key,
- struct iwl_mld_mcast_key_data *key_data)
+iwl_mld_update_mcast_rx_seq(struct ieee80211_key_conf *key,
+ struct iwl_mld_mcast_key_data *key_data)
{
switch (key->cipher) {
case WLAN_CIPHER_SUITE_CCMP:
@@ -687,132 +904,53 @@ iwl_mld_resume_keys_iter(struct ieee80211_hw *hw,
struct iwl_mld_wowlan_status *wowlan_status = data->wowlan_status;
u8 status_idx;
- /* TODO: check key link id (task=MLO) */
- if (data->unhandled_cipher)
- return;
-
- switch (key->cipher) {
- case WLAN_CIPHER_SUITE_WEP40:
- case WLAN_CIPHER_SUITE_WEP104:
- /* ignore WEP completely, nothing to do */
- return;
- case WLAN_CIPHER_SUITE_CCMP:
- case WLAN_CIPHER_SUITE_GCMP:
- case WLAN_CIPHER_SUITE_GCMP_256:
- case WLAN_CIPHER_SUITE_TKIP:
+ if (key->keyidx >= 0 && key->keyidx <= 3) {
+ /* PTK */
if (sta) {
iwl_mld_update_ptk_rx_seq(data->mld, wowlan_status,
sta, key,
key->cipher ==
WLAN_CIPHER_SUITE_TKIP);
- return;
+ /* GTK */
+ } else {
+ status_idx = key->keyidx == wowlan_status->gtk[1].id;
+ iwl_mld_update_mcast_rx_seq(key,
+ &wowlan_status->gtk[status_idx]);
}
+ }
- if (WARN_ON(data->gtk_cipher &&
- data->gtk_cipher != key->cipher))
- return;
+ /* IGTK */
+ if (key->keyidx == 4 || key->keyidx == 5) {
+ if (key->keyidx == wowlan_status->igtk.id)
+ iwl_mld_update_mcast_rx_seq(key, &wowlan_status->igtk);
+ }
- data->gtk_cipher = key->cipher;
- status_idx = key->keyidx == wowlan_status->gtk[1].id;
- iwl_mld_set_key_rx_seq(key, &wowlan_status->gtk[status_idx]);
- break;
- case WLAN_CIPHER_SUITE_BIP_GMAC_128:
- case WLAN_CIPHER_SUITE_BIP_GMAC_256:
- case WLAN_CIPHER_SUITE_BIP_CMAC_256:
- case WLAN_CIPHER_SUITE_AES_CMAC:
- if (key->keyidx == 4 || key->keyidx == 5) {
- if (WARN_ON(data->igtk_cipher &&
- data->igtk_cipher != key->cipher))
- return;
-
- data->igtk_cipher = key->cipher;
- if (key->keyidx == wowlan_status->igtk.id)
- iwl_mld_set_key_rx_seq(key, &wowlan_status->igtk);
- }
- if (key->keyidx == 6 || key->keyidx == 7) {
- if (WARN_ON(data->bigtk_cipher &&
- data->bigtk_cipher != key->cipher))
- return;
-
- data->bigtk_cipher = key->cipher;
- status_idx = key->keyidx == wowlan_status->bigtk[1].id;
- iwl_mld_set_key_rx_seq(key, &wowlan_status->bigtk[status_idx]);
- }
- break;
- default:
- data->unhandled_cipher = true;
- return;
+ /* BIGTK */
+ if (key->keyidx == 6 || key->keyidx == 7) {
+ status_idx = key->keyidx == wowlan_status->bigtk[1].id;
+ iwl_mld_update_mcast_rx_seq(key,
+ &wowlan_status->bigtk[status_idx]);
}
- data->num_keys++;
}
static void
iwl_mld_add_mcast_rekey(struct ieee80211_vif *vif,
struct iwl_mld *mld,
struct iwl_mld_mcast_key_data *key_data,
- struct ieee80211_bss_conf *link_conf,
- u32 cipher)
+ struct ieee80211_bss_conf *link_conf)
{
struct ieee80211_key_conf *key_config;
- struct {
- struct ieee80211_key_conf conf;
- u8 key[WOWLAN_KEY_MAX_SIZE];
- } conf = {
- .conf.cipher = cipher,
- .conf.keyidx = key_data->id,
- };
int link_id = vif->active_links ? __ffs(vif->active_links) : -1;
- u8 key[WOWLAN_KEY_MAX_SIZE];
-
- BUILD_BUG_ON(WLAN_KEY_LEN_CCMP != WLAN_KEY_LEN_GCMP);
- BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_CCMP);
- BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_GCMP_256);
- BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_TKIP);
- BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_BIP_GMAC_128);
- BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_BIP_GMAC_256);
- BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_AES_CMAC);
- BUILD_BUG_ON(sizeof(conf.key) < sizeof(key_data->key));
if (!key_data->len)
return;
- switch (cipher) {
- case WLAN_CIPHER_SUITE_CCMP:
- case WLAN_CIPHER_SUITE_GCMP:
- conf.conf.keylen = WLAN_KEY_LEN_CCMP;
- break;
- case WLAN_CIPHER_SUITE_GCMP_256:
- conf.conf.keylen = WLAN_KEY_LEN_GCMP_256;
- break;
- case WLAN_CIPHER_SUITE_TKIP:
- conf.conf.keylen = WLAN_KEY_LEN_TKIP;
- break;
- case WLAN_CIPHER_SUITE_BIP_GMAC_128:
- conf.conf.keylen = WLAN_KEY_LEN_BIP_GMAC_128;
- break;
- case WLAN_CIPHER_SUITE_BIP_GMAC_256:
- conf.conf.keylen = WLAN_KEY_LEN_BIP_GMAC_256;
- break;
- case WLAN_CIPHER_SUITE_AES_CMAC:
- conf.conf.keylen = WLAN_KEY_LEN_AES_CMAC;
- break;
- case WLAN_CIPHER_SUITE_BIP_CMAC_256:
- conf.conf.keylen = WLAN_KEY_LEN_BIP_CMAC_256;
- break;
- default:
- WARN_ON(1);
- }
-
- memcpy(conf.conf.key, key_data->key, conf.conf.keylen);
-
- memcpy(key, key_data->key, sizeof(key_data->key));
-
- key_config = ieee80211_gtk_rekey_add(vif, key_data->id, key,
- sizeof(key), link_id);
+ key_config = ieee80211_gtk_rekey_add(vif, key_data->id, key_data->key,
+ sizeof(key_data->key), link_id);
if (IS_ERR(key_config))
return;
- iwl_mld_set_key_rx_seq(key_config, key_data);
+ iwl_mld_update_mcast_rx_seq(key_config, key_data);
/* The FW holds only one igtk so we keep track of the valid one */
if (key_config->keyidx == 4 || key_config->keyidx == 5) {
@@ -831,37 +969,78 @@ iwl_mld_add_mcast_rekey(struct ieee80211_vif *vif,
}
/* Also keep track of the new BIGTK */
- if ((key_config->keyidx == 6 || key_config->keyidx == 7) &&
- vif->type == NL80211_IFTYPE_STATION) {
- struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
-
- rcu_assign_pointer(mld_vif->bigtks[key_config->keyidx - 6], key_config);
- }
+ if (key_config->keyidx == 6 || key_config->keyidx == 7)
+ iwl_mld_track_bigtk(mld, vif, key_config, true);
}
static void
-iwl_mld_add_all_rekeys(struct ieee80211_vif *vif,
+iwl_mld_add_all_rekeys(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
struct iwl_mld_wowlan_status *wowlan_status,
- struct iwl_mld_resume_key_iter_data *key_iter_data,
struct ieee80211_bss_conf *link_conf)
{
int i;
for (i = 0; i < ARRAY_SIZE(wowlan_status->gtk); i++)
- iwl_mld_add_mcast_rekey(vif, key_iter_data->mld,
- &wowlan_status->gtk[i],
- link_conf,
- key_iter_data->gtk_cipher);
+ iwl_mld_add_mcast_rekey(vif, mld, &wowlan_status->gtk[i],
+ link_conf);
- iwl_mld_add_mcast_rekey(vif, key_iter_data->mld,
- &wowlan_status->igtk,
- link_conf, key_iter_data->igtk_cipher);
+ iwl_mld_add_mcast_rekey(vif, mld, &wowlan_status->igtk, link_conf);
for (i = 0; i < ARRAY_SIZE(wowlan_status->bigtk); i++)
- iwl_mld_add_mcast_rekey(vif, key_iter_data->mld,
- &wowlan_status->bigtk[i],
- link_conf,
- key_iter_data->bigtk_cipher);
+ iwl_mld_add_mcast_rekey(vif, mld, &wowlan_status->bigtk[i],
+ link_conf);
+}
+
+static void iwl_mld_mlo_rekey(struct iwl_mld *mld,
+ struct iwl_mld_wowlan_status *wowlan_status,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mld_old_mlo_keys *old_keys __free(kfree) = NULL;
+
+ IWL_DEBUG_WOWLAN(mld, "Num of MLO Keys: %d\n", wowlan_status->num_mlo_keys);
+
+ if (!wowlan_status->num_mlo_keys)
+ return;
+
+ for (int i = 0; i < wowlan_status->num_mlo_keys; i++) {
+ struct iwl_mld_wowlan_mlo_key *mlo_key = &wowlan_status->mlo_keys[i];
+ struct ieee80211_key_conf *key;
+ struct ieee80211_key_seq seq;
+ u8 link_id = mlo_key->link_id;
+
+ if (IWL_FW_CHECK(mld, mlo_key->link_id >= IEEE80211_MLD_MAX_NUM_LINKS ||
+ mlo_key->idx >= 8 ||
+ mlo_key->type >= WOWLAN_MLO_GTK_KEY_NUM_TYPES,
+ "Invalid MLO key link_id %d, idx %d, type %d\n",
+ mlo_key->link_id, mlo_key->idx, mlo_key->type))
+ continue;
+
+ if (!(vif->valid_links & BIT(link_id)) ||
+ (vif->active_links & BIT(link_id)))
+ continue;
+
+ IWL_DEBUG_WOWLAN(mld, "Add MLO key id %d, link id %d\n",
+ mlo_key->idx, link_id);
+
+ key = ieee80211_gtk_rekey_add(vif, mlo_key->idx, mlo_key->key,
+ sizeof(mlo_key->key), link_id);
+
+ if (IS_ERR(key))
+ continue;
+
+ /*
+ * mac80211 expects the PN in big-endian
+ * also note that seq is a union of all cipher types
+ * (ccmp, gcmp, cmac, gmac), and they all have the same
+ * pn field (of length 6) so just copy it to ccmp.pn.
+ */
+ for (int j = 5; j >= 0; j--)
+ seq.ccmp.pn[5 - j] = mlo_key->pn[j];
+
+ /* group keys are non-QoS and use TID 0 */
+ ieee80211_set_key_rx_seq(key, 0, &seq);
+ }
}
static bool
@@ -884,23 +1063,19 @@ iwl_mld_update_sec_keys(struct iwl_mld *mld,
ieee80211_iter_keys(mld->hw, vif, iwl_mld_resume_keys_iter,
&key_iter_data);
- if (key_iter_data.unhandled_cipher)
- return false;
-
- IWL_DEBUG_WOWLAN(mld,
- "Number of installed keys: %d, Number of rekeys: %d\n",
- key_iter_data.num_keys,
+ IWL_DEBUG_WOWLAN(mld, "Number of rekeys: %d\n",
wowlan_status->num_of_gtk_rekeys);
- if (!key_iter_data.num_keys || !wowlan_status->num_of_gtk_rekeys)
+ if (!wowlan_status->num_of_gtk_rekeys)
return true;
- iwl_mld_add_all_rekeys(vif, wowlan_status, &key_iter_data,
+ iwl_mld_add_all_rekeys(mld, vif, wowlan_status,
link_conf);
+ iwl_mld_mlo_rekey(mld, wowlan_status, vif);
+
ieee80211_gtk_rekey_notify(vif, link_conf->bssid,
(void *)&replay_ctr, GFP_KERNEL);
- /* TODO: MLO rekey (task=MLO) */
return true;
}
@@ -1179,7 +1354,6 @@ static int iwl_mld_wait_d3_notif(struct iwl_mld *mld,
WIDE_ID(PROT_OFFLOAD_GROUP, D3_END_NOTIFICATION)
};
struct iwl_notification_wait wait_d3_notif;
- enum iwl_d3_status d3_status;
int ret;
if (with_wowlan)
@@ -1195,14 +1369,10 @@ static int iwl_mld_wait_d3_notif(struct iwl_mld *mld,
iwl_mld_handle_d3_notif,
resume_data);
- ret = iwl_trans_d3_resume(mld->trans, &d3_status, false, false);
- if (ret || d3_status != IWL_D3_STATUS_ALIVE) {
- if (d3_status != IWL_D3_STATUS_ALIVE) {
- IWL_INFO(mld, "Device was reset during suspend\n");
- ret = -ENOENT;
- } else {
- IWL_ERR(mld, "Transport resume failed\n");
- }
+ ret = iwl_trans_d3_resume(mld->trans, false);
+ if (ret) {
+ /* Avoid sending commands if the FW is dead */
+ iwl_trans_notify_fw_error(mld->trans);
iwl_remove_notification(&mld->notif_wait, &wait_d3_notif);
return ret;
}
@@ -1236,16 +1406,11 @@ int iwl_mld_no_wowlan_suspend(struct iwl_mld *mld)
iwl_mld_low_latency_stop(mld);
- /* This will happen if iwl_mld_supsend failed with FW error */
- if (mld->trans->state == IWL_TRANS_NO_FW &&
- test_bit(STATUS_FW_ERROR, &mld->trans->status))
- return -ENODEV;
-
ret = iwl_mld_update_device_power(mld, true);
if (ret) {
IWL_ERR(mld,
"d3 suspend: couldn't send power_device %d\n", ret);
- goto out;
+ return ret;
}
ret = iwl_mld_send_cmd_pdu(mld, D3_CONFIG_CMD,
@@ -1253,24 +1418,20 @@ int iwl_mld_no_wowlan_suspend(struct iwl_mld *mld)
if (ret) {
IWL_ERR(mld,
"d3 suspend: couldn't send D3_CONFIG_CMD %d\n", ret);
- goto out;
+ return ret;
}
- ret = iwl_trans_d3_suspend(mld->trans, false, false);
+ ret = iwl_trans_d3_suspend(mld->trans, false);
if (ret) {
IWL_ERR(mld, "d3 suspend: trans_d3_suspend failed %d\n", ret);
+ /* We are going to stop the FW. Avoid sending commands in that flow */
+ iwl_trans_notify_fw_error(mld->trans);
} else {
/* Async notification might send hcmds, which is not allowed in suspend */
iwl_mld_cancel_async_notifications(mld);
mld->fw_status.in_d3 = true;
}
- out:
- if (ret) {
- mld->trans->state = IWL_TRANS_NO_FW;
- set_bit(STATUS_FW_ERROR, &mld->trans->status);
- }
-
return ret;
}
@@ -1290,15 +1451,12 @@ int iwl_mld_no_wowlan_resume(struct iwl_mld *mld)
iwl_fw_dbg_read_d3_debug_data(&mld->fwrt);
ret = iwl_mld_wait_d3_notif(mld, &resume_data, false);
+ if (ret)
+ return ret;
if (!ret && (resume_data.d3_end_flags & IWL_D0I3_RESET_REQUIRE))
return -ENODEV;
- if (ret) {
- mld->trans->state = IWL_TRANS_NO_FW;
- set_bit(STATUS_FW_ERROR, &mld->trans->status);
- return ret;
- }
iwl_mld_low_latency_restart(mld);
return iwl_mld_update_device_power(mld, false);
@@ -1530,7 +1688,8 @@ static void
iwl_mld_set_wowlan_config_cmd(struct iwl_mld *mld,
struct cfg80211_wowlan *wowlan,
struct iwl_wowlan_config_cmd *wowlan_config_cmd,
- struct ieee80211_sta *ap_sta)
+ struct ieee80211_sta *ap_sta,
+ struct ieee80211_bss_conf *link)
{
wowlan_config_cmd->is_11n_connection =
ap_sta->deflink.ht_cap.ht_supported;
@@ -1540,6 +1699,9 @@ iwl_mld_set_wowlan_config_cmd(struct iwl_mld *mld,
if (ap_sta->mfp)
wowlan_config_cmd->flags |= IS_11W_ASSOC;
+ if (iwl_mld_beacon_protection_enabled(mld, link))
+ wowlan_config_cmd->flags |= HAS_BEACON_PROTECTION;
+
if (wowlan->disconnect)
wowlan_config_cmd->wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS |
@@ -1737,7 +1899,7 @@ iwl_mld_wowlan_config(struct iwl_mld *mld, struct ieee80211_vif *bss_vif,
return ret;
iwl_mld_set_wowlan_config_cmd(mld, wowlan,
- &wowlan_config_cmd, ap_sta);
+ &wowlan_config_cmd, ap_sta, link_conf);
ret = iwl_mld_send_cmd_pdu(mld, WOWLAN_CONFIGURATION,
&wowlan_config_cmd);
if (ret)
@@ -1807,7 +1969,6 @@ int iwl_mld_wowlan_resume(struct iwl_mld *mld)
};
int link_id;
int ret;
- bool fw_err = false;
lockdep_assert_wiphy(mld->wiphy);
@@ -1850,7 +2011,6 @@ int iwl_mld_wowlan_resume(struct iwl_mld *mld)
ret = iwl_mld_wait_d3_notif(mld, &resume_data, true);
if (ret) {
IWL_ERR(mld, "Couldn't get the d3 notifs %d\n", ret);
- fw_err = true;
goto err;
}
@@ -1887,11 +2047,6 @@ int iwl_mld_wowlan_resume(struct iwl_mld *mld)
goto out;
err:
- if (fw_err) {
- mld->trans->state = IWL_TRANS_NO_FW;
- set_bit(STATUS_FW_ERROR, &mld->trans->status);
- }
-
mld->fw_status.in_hw_restart = true;
ret = 1;
out:
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mld/debugfs.c
index cc052b0aa53f..b9c9cd3f44e4 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/debugfs.c
@@ -86,7 +86,7 @@ static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mld *mld, char *buf,
if (count == 6 && !strcmp(buf, "nolog\n")) {
mld->fw_status.do_not_dump_once = true;
- iwl_trans_suppress_cmd_error_once(mld->trans);
+ mld->trans->suppress_cmd_error_once = true;
}
/* take the return value to make compiler happy - it will
@@ -1001,8 +1001,12 @@ void iwl_mld_add_link_debugfs(struct ieee80211_hw *hw,
* If not, this is a per-link dir of a MLO vif, add in it the iwlmld
* dir.
*/
- if (!mld_link_dir)
+ if (!mld_link_dir) {
mld_link_dir = debugfs_create_dir("iwlmld", dir);
+ } else {
+ /* Release the reference from debugfs_lookup */
+ dput(mld_link_dir);
+ }
}
static ssize_t _iwl_dbgfs_fixed_rate_write(struct iwl_mld *mld, char *buf,
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/iface.c b/drivers/net/wireless/intel/iwlwifi/mld/iface.c
index 38993d65c052..ed379825a923 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/iface.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/iface.c
@@ -115,20 +115,12 @@ static bool iwl_mld_is_nic_ack_enabled(struct iwl_mld *mld,
static void iwl_mld_set_he_support(struct iwl_mld *mld,
struct ieee80211_vif *vif,
- struct iwl_mac_config_cmd *cmd,
- int cmd_ver)
+ struct iwl_mac_config_cmd *cmd)
{
- if (vif->type == NL80211_IFTYPE_AP) {
- if (cmd_ver == 2)
- cmd->wifi_gen_v2.he_ap_support = cpu_to_le16(1);
- else
- cmd->wifi_gen.he_ap_support = 1;
- } else {
- if (cmd_ver == 2)
- cmd->wifi_gen_v2.he_support = cpu_to_le16(1);
- else
- cmd->wifi_gen.he_support = 1;
- }
+ if (vif->type == NL80211_IFTYPE_AP)
+ cmd->wifi_gen.he_ap_support = 1;
+ else
+ cmd->wifi_gen.he_support = 1;
}
/* fill the common part for all interface types */
@@ -140,9 +132,6 @@ static void iwl_mld_mac_cmd_fill_common(struct iwl_mld *mld,
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
struct ieee80211_bss_conf *link_conf;
unsigned int link_id;
- int cmd_ver = iwl_fw_lookup_cmd_ver(mld->fw,
- WIDE_ID(MAC_CONF_GROUP,
- MAC_CONFIG_CMD), 0);
lockdep_assert_wiphy(mld->wiphy);
@@ -169,11 +158,8 @@ static void iwl_mld_mac_cmd_fill_common(struct iwl_mld *mld,
* and enable both when we have MLO.
*/
if (ieee80211_vif_is_mld(vif)) {
- iwl_mld_set_he_support(mld, vif, cmd, cmd_ver);
- if (cmd_ver == 2)
- cmd->wifi_gen_v2.eht_support = cpu_to_le32(1);
- else
- cmd->wifi_gen.eht_support = 1;
+ iwl_mld_set_he_support(mld, vif, cmd);
+ cmd->wifi_gen.eht_support = 1;
return;
}
@@ -181,7 +167,7 @@ static void iwl_mld_mac_cmd_fill_common(struct iwl_mld *mld,
if (!link_conf->he_support)
continue;
- iwl_mld_set_he_support(mld, vif, cmd, cmd_ver);
+ iwl_mld_set_he_support(mld, vif, cmd);
/* EHT, if supported, was already set above */
break;
@@ -451,24 +437,21 @@ int iwl_mld_add_vif(struct iwl_mld *mld, struct ieee80211_vif *vif)
return ret;
}
-int iwl_mld_rm_vif(struct iwl_mld *mld, struct ieee80211_vif *vif)
+void iwl_mld_rm_vif(struct iwl_mld *mld, struct ieee80211_vif *vif)
{
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
- int ret;
lockdep_assert_wiphy(mld->wiphy);
- ret = iwl_mld_mac_fw_action(mld, vif, FW_CTXT_ACTION_REMOVE);
+ iwl_mld_mac_fw_action(mld, vif, FW_CTXT_ACTION_REMOVE);
if (WARN_ON(mld_vif->fw_id >= ARRAY_SIZE(mld->fw_id_to_vif)))
- return -EINVAL;
+ return;
RCU_INIT_POINTER(mld->fw_id_to_vif[mld_vif->fw_id], NULL);
iwl_mld_cancel_notifications_of_object(mld, IWL_MLD_OBJECT_TYPE_VIF,
mld_vif->fw_id);
-
- return ret;
}
void iwl_mld_set_vif_associated(struct iwl_mld *mld,
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/iface.h b/drivers/net/wireless/intel/iwlwifi/mld/iface.h
index 05dcb63701b1..a3573d20f214 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/iface.h
+++ b/drivers/net/wireless/intel/iwlwifi/mld/iface.h
@@ -125,8 +125,6 @@ struct iwl_mld_emlsr {
* @ap_sta: pointer to AP sta, for easier access to it.
* Relevant only for STA vifs.
* @authorized: indicates the AP station was set to authorized
- * @bigtks: BIGTKs of the AP, for beacon protection.
- * Only valid for STA. (FIXME: needs to be per link)
* @num_associated_stas: number of associated STAs. Relevant only for AP mode.
* @ap_ibss_active: whether the AP/IBSS was started
* @cca_40mhz_workaround: When we are connected in 2.4 GHz and 40 MHz, and the
@@ -158,7 +156,6 @@ struct iwl_mld_vif {
struct iwl_mld_session_protect session_protect;
struct ieee80211_sta *ap_sta;
bool authorized;
- struct ieee80211_key_conf __rcu *bigtks[2];
u8 num_associated_stas;
bool ap_ibss_active;
enum iwl_mld_cca_40mhz_wa_status cca_40mhz_workaround;
@@ -227,7 +224,7 @@ void iwl_mld_cleanup_vif(void *data, u8 *mac, struct ieee80211_vif *vif);
int iwl_mld_mac_fw_action(struct iwl_mld *mld, struct ieee80211_vif *vif,
u32 action);
int iwl_mld_add_vif(struct iwl_mld *mld, struct ieee80211_vif *vif);
-int iwl_mld_rm_vif(struct iwl_mld *mld, struct ieee80211_vif *vif);
+void iwl_mld_rm_vif(struct iwl_mld *mld, struct ieee80211_vif *vif);
void iwl_mld_set_vif_associated(struct iwl_mld *mld,
struct ieee80211_vif *vif);
u8 iwl_mld_get_fw_bss_vifs_ids(struct iwl_mld *mld);
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/key.c b/drivers/net/wireless/intel/iwlwifi/mld/key.c
index 13462a5ad79a..04192c5f07ff 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/key.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/key.c
@@ -368,3 +368,41 @@ int iwl_mld_update_sta_keys(struct iwl_mld *mld,
&data);
return data.err;
}
+
+void iwl_mld_track_bigtk(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_key_conf *key, bool add)
+{
+ struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
+ struct iwl_mld_link *link;
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ if (WARN_ON(key->keyidx < 6 || key->keyidx > 7))
+ return;
+
+ if (WARN_ON(key->link_id < 0))
+ return;
+
+ link = iwl_mld_link_dereference_check(mld_vif, key->link_id);
+ if (WARN_ON(!link))
+ return;
+
+ if (add)
+ rcu_assign_pointer(link->bigtks[key->keyidx - 6], key);
+ else
+ RCU_INIT_POINTER(link->bigtks[key->keyidx - 6], NULL);
+}
+
+bool iwl_mld_beacon_protection_enabled(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *link)
+{
+ struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
+
+ if (WARN_ON(!mld_link))
+ return false;
+
+ return rcu_access_pointer(mld_link->bigtks[0]) ||
+ rcu_access_pointer(mld_link->bigtks[1]);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/key.h b/drivers/net/wireless/intel/iwlwifi/mld/key.h
index a68ea48913be..5a9efdaa3b03 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/key.h
+++ b/drivers/net/wireless/intel/iwlwifi/mld/key.h
@@ -36,4 +36,11 @@ iwl_mld_cleanup_keys_iter(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
key->hw_key_idx = STA_KEY_IDX_INVALID;
}
+void iwl_mld_track_bigtk(struct iwl_mld *mld,
+ struct ieee80211_vif *vif,
+ struct ieee80211_key_conf *key, bool add);
+
+bool iwl_mld_beacon_protection_enabled(struct iwl_mld *mld,
+ struct ieee80211_bss_conf *link);
+
#endif /* __iwl_mld_key_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/link.c b/drivers/net/wireless/intel/iwlwifi/mld/link.c
index 782fc41aa1c3..738f80fe0c50 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/link.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/link.c
@@ -532,7 +532,8 @@ void iwl_mld_handle_missed_beacon_notif(struct iwl_mld *mld,
le32_to_cpu(notif->consec_missed_beacons_other_link);
struct ieee80211_bss_conf *link_conf =
iwl_mld_fw_id_to_link_conf(mld, fw_link_id);
- u32 bss_param_ch_cnt_link_id;
+ struct ieee80211_bss_conf *other_link;
+ u32 bss_param_ch_cnt_link_id, other_link_fw_id;
struct ieee80211_vif *vif;
u8 link_id;
@@ -550,11 +551,6 @@ void iwl_mld_handle_missed_beacon_notif(struct iwl_mld *mld,
if (WARN_ON(!vif))
return;
- mld->trans->dbg.dump_file_name_ext_valid = true;
- snprintf(mld->trans->dbg.dump_file_name_ext, IWL_FW_INI_MAX_NAME,
- "LinkId_%d_MacType_%d", fw_link_id,
- iwl_mld_mac80211_iftype_to_fw(vif));
-
iwl_dbg_tlv_time_point(&mld->fwrt,
IWL_FW_INI_TIME_POINT_MISSED_BEACONS, &tp_data);
@@ -572,8 +568,11 @@ void iwl_mld_handle_missed_beacon_notif(struct iwl_mld *mld,
if (missed_bcon_since_rx > IWL_MLD_MISSED_BEACONS_THRESHOLD) {
ieee80211_cqm_beacon_loss_notify(vif, GFP_ATOMIC);
- /* try to switch links, no-op if we don't have MLO */
- iwl_mld_int_mlo_scan(mld, vif);
+ /* Not in EMLSR and we can't hear the link.
+ * Try to switch to a better link. EMLSR case is handled below.
+ */
+ if (!iwl_mld_emlsr_active(vif))
+ iwl_mld_int_mlo_scan(mld, vif);
}
/* no more logic if we're not in EMLSR */
@@ -584,6 +583,17 @@ void iwl_mld_handle_missed_beacon_notif(struct iwl_mld *mld,
if (le32_to_cpu(notif->other_link_id) == FW_CTXT_ID_INVALID)
return;
+ other_link_fw_id = le32_to_cpu(notif->other_link_id);
+ other_link = iwl_mld_fw_id_to_link_conf(mld, other_link_fw_id);
+
+ if (IWL_FW_CHECK(mld, !other_link, "link doesn't exist for: %d\n",
+ other_link_fw_id))
+ return;
+
+ IWL_DEBUG_EHT(mld,
+ "missed bcn on the other link (link_id=%u): %u\n",
+ other_link->link_id, scnd_lnk_bcn_lost);
+
/* Exit EMLSR if we lost more than
* IWL_MLD_MISSED_BEACONS_EXIT_ESR_THRESH beacons on boths links
* OR more than IWL_MLD_BCN_LOSS_EXIT_ESR_THRESH on current link.
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/link.h b/drivers/net/wireless/intel/iwlwifi/mld/link.h
index cad2c9426349..9e4da8e4de93 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/link.h
+++ b/drivers/net/wireless/intel/iwlwifi/mld/link.h
@@ -36,6 +36,7 @@ struct iwl_probe_resp_data {
* @he_ru_2mhz_block: 26-tone RU OFDMA transmissions should be blocked.
* @igtk: fw can only have one IGTK at a time, whereas mac80211 can have two.
* This tracks the one IGTK that currently exists in FW.
+ * @bigtks: BIGTKs of the AP. Only valid for STA mode.
* @bcast_sta: station used for broadcast packets. Used in AP, GO and IBSS.
* @mcast_sta: station used for multicast packets. Used in AP, GO and IBSS.
* @mon_sta: station used for TX injection in monitor interface.
@@ -59,6 +60,7 @@ struct iwl_mld_link {
struct ieee80211_chanctx_conf __rcu *chan_ctx;
bool he_ru_2mhz_block;
struct ieee80211_key_conf *igtk;
+ struct ieee80211_key_conf __rcu *bigtks[2];
);
/* And here fields that survive a fw restart */
struct iwl_mld_int_sta bcast_sta;
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c
index b0bd01914a91..5725104a53bf 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c
@@ -626,7 +626,7 @@ int iwl_mld_mac80211_add_interface(struct ieee80211_hw *hw,
IEEE80211_VIF_SUPPORTS_CQM_RSSI;
}
- if (vif->p2p || iwl_fw_lookup_cmd_ver(mld->fw, PHY_CONTEXT_CMD, 0) < 5)
+ if (vif->p2p)
vif->driver_flags |= IEEE80211_VIF_IGNORE_OFDMA_WIDER_BW;
/*
@@ -1966,13 +1966,8 @@ iwl_mld_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
iwl_fw_runtime_suspend(&mld->fwrt);
ret = iwl_mld_wowlan_suspend(mld, wowlan);
- if (ret) {
- if (ret < 0) {
- mld->trans->state = IWL_TRANS_NO_FW;
- set_bit(STATUS_FW_ERROR, &mld->trans->status);
- }
+ if (ret)
return 1;
- }
if (iwl_mld_no_wowlan_suspend(mld))
return 1;
@@ -2065,9 +2060,8 @@ static int iwl_mld_set_key_add(struct iwl_mld *mld,
return -EOPNOTSUPP;
}
- if (vif->type == NL80211_IFTYPE_STATION &&
- (keyidx == 6 || keyidx == 7))
- rcu_assign_pointer(mld_vif->bigtks[keyidx - 6], key);
+ if (keyidx == 6 || keyidx == 7)
+ iwl_mld_track_bigtk(mld, vif, key, true);
/* After exiting from RFKILL, hostapd configures GTK/ITGK before the
* AP is started, but those keys can't be sent to the FW before the
@@ -2116,9 +2110,8 @@ static void iwl_mld_set_key_remove(struct iwl_mld *mld,
sta ? iwl_mld_sta_from_mac80211(sta) : NULL;
int keyidx = key->keyidx;
- if (vif->type == NL80211_IFTYPE_STATION &&
- (keyidx == 6 || keyidx == 7))
- RCU_INIT_POINTER(mld_vif->bigtks[keyidx - 6], NULL);
+ if (keyidx == 6 || keyidx == 7)
+ iwl_mld_track_bigtk(mld, vif, key, false);
if (mld_sta && key->flags & IEEE80211_KEY_FLAG_PAIRWISE &&
(key->cipher == WLAN_CIPHER_SUITE_CCMP ||
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mld.c b/drivers/net/wireless/intel/iwlwifi/mld/mld.c
index 7b46ccc306ab..a6962256bdd1 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/mld.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/mld.c
@@ -147,6 +147,7 @@ iwl_mld_construct_fw_runtime(struct iwl_mld *mld, struct iwl_trans *trans,
*/
static const struct iwl_hcmd_names iwl_mld_legacy_names[] = {
HCMD_NAME(UCODE_ALIVE_NTFY),
+ HCMD_NAME(REPLY_ERROR),
HCMD_NAME(INIT_COMPLETE_NOTIF),
HCMD_NAME(PHY_CONTEXT_CMD),
HCMD_NAME(SCAN_CFG_CMD),
@@ -158,12 +159,14 @@ static const struct iwl_hcmd_names iwl_mld_legacy_names[] = {
HCMD_NAME(LEDS_CMD),
HCMD_NAME(WNM_80211V_TIMING_MEASUREMENT_NOTIFICATION),
HCMD_NAME(WNM_80211V_TIMING_MEASUREMENT_CONFIRM_NOTIFICATION),
+ HCMD_NAME(PHY_CONFIGURATION_CMD),
HCMD_NAME(SCAN_OFFLOAD_UPDATE_PROFILES_CMD),
HCMD_NAME(POWER_TABLE_CMD),
HCMD_NAME(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION),
HCMD_NAME(BEACON_NOTIFICATION),
HCMD_NAME(BEACON_TEMPLATE_CMD),
HCMD_NAME(TX_ANT_CONFIGURATION_CMD),
+ HCMD_NAME(BT_CONFIG),
HCMD_NAME(REDUCE_TX_POWER_CMD),
HCMD_NAME(MISSED_BEACONS_NOTIFICATION),
HCMD_NAME(MAC_PM_POWER_TABLE),
@@ -251,6 +254,7 @@ static const struct iwl_hcmd_names iwl_mld_data_path_names[] = {
HCMD_NAME(TLC_MNG_CONFIG_CMD),
HCMD_NAME(RX_BAID_ALLOCATION_CONFIG_CMD),
HCMD_NAME(SCD_QUEUE_CONFIG_CMD),
+ HCMD_NAME(SEC_KEY_CMD),
HCMD_NAME(ESR_MODE_NOTIF),
HCMD_NAME(MONITOR_NOTIF),
HCMD_NAME(TLC_MNG_UPDATE_NOTIF),
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mlo.c b/drivers/net/wireless/intel/iwlwifi/mld/mlo.c
index e57f5388fe77..241a6271d13d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/mlo.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/mlo.c
@@ -357,38 +357,26 @@ iwl_mld_vif_iter_emlsr_mode_notif(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
const struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
- enum iwl_mvm_fw_esr_recommendation action;
- const struct iwl_esr_mode_notif *notif = NULL;
-
- if (iwl_fw_lookup_notif_ver(mld_vif->mld->fw, DATA_PATH_GROUP,
- ESR_MODE_NOTIF, 0) > 1) {
- notif = (void *)data;
- action = le32_to_cpu(notif->action);
- } else {
- const struct iwl_esr_mode_notif_v1 *notif_v1 = (void *)data;
-
- action = le32_to_cpu(notif_v1->action);
- }
+ const struct iwl_esr_mode_notif *notif = (void *)data;
+ enum iwl_mvm_fw_esr_recommendation action = le32_to_cpu(notif->action);
if (!iwl_mld_vif_has_emlsr_cap(vif))
return;
switch (action) {
case ESR_RECOMMEND_LEAVE:
- if (notif)
- IWL_DEBUG_INFO(mld_vif->mld,
- "FW recommend leave reason = 0x%x\n",
- le32_to_cpu(notif->leave_reason_mask));
+ IWL_DEBUG_INFO(mld_vif->mld,
+ "FW recommend leave reason = 0x%x\n",
+ le32_to_cpu(notif->leave_reason_mask));
iwl_mld_exit_emlsr(mld_vif->mld, vif,
IWL_MLD_EMLSR_EXIT_FW_REQUEST,
iwl_mld_get_primary_link(vif));
break;
case ESR_FORCE_LEAVE:
- if (notif)
- IWL_DEBUG_INFO(mld_vif->mld,
- "FW force leave reason = 0x%x\n",
- le32_to_cpu(notif->leave_reason_mask));
+ IWL_DEBUG_INFO(mld_vif->mld,
+ "FW force leave reason = 0x%x\n",
+ le32_to_cpu(notif->leave_reason_mask));
fallthrough;
case ESR_RECOMMEND_ENTER:
default:
@@ -735,12 +723,6 @@ iwl_mld_set_link_sel_data(struct iwl_mld *mld,
u16 max_grade = 0;
unsigned long link_id;
- /*
- * TODO: don't select links that weren't discovered in the last scan
- * This requires mac80211 (or cfg80211) changes to forward/track when
- * a BSS was last updated. cfg80211 already tracks this information but
- * it is not exposed within the kernel.
- */
for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
struct ieee80211_bss_conf *link_conf =
link_conf_dereference_protected(vif, link_id);
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/notif.c b/drivers/net/wireless/intel/iwlwifi/mld/notif.c
index f17aeca4fae6..884973d0b344 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/notif.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/notif.c
@@ -333,7 +333,6 @@ CMD_VERSIONS(bt_coex_notif,
CMD_VERSIONS(beacon_notification,
CMD_VER_ENTRY(6, iwl_extended_beacon_notif))
CMD_VERSIONS(emlsr_mode_notif,
- CMD_VER_ENTRY(1, iwl_esr_mode_notif_v1)
CMD_VER_ENTRY(2, iwl_esr_mode_notif))
CMD_VERSIONS(emlsr_trans_fail_notif,
CMD_VER_ENTRY(1, iwl_esr_trans_fail_notif))
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/regulatory.c b/drivers/net/wireless/intel/iwlwifi/mld/regulatory.c
index 75d2f5cb23a7..40571125b3ab 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/regulatory.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/regulatory.c
@@ -163,18 +163,32 @@ int iwl_mld_init_sgom(struct iwl_mld *mld)
static int iwl_mld_ppag_send_cmd(struct iwl_mld *mld)
{
- union iwl_ppag_table_cmd cmd = {};
- int ret, len;
+ struct iwl_fw_runtime *fwrt = &mld->fwrt;
+ union iwl_ppag_table_cmd cmd = {
+ .v7.ppag_config_info.table_source = fwrt->ppag_bios_source,
+ .v7.ppag_config_info.table_revision = fwrt->ppag_bios_rev,
+ .v7.ppag_config_info.value = cpu_to_le32(fwrt->ppag_flags),
+ };
+ int ret;
- ret = iwl_fill_ppag_table(&mld->fwrt, &cmd, &len);
- /* Not supporting PPAG table is a valid scenario */
- if (ret < 0)
- return 0;
+ IWL_DEBUG_RADIO(fwrt,
+ "PPAG MODE bits going to be sent: %d\n",
+ fwrt->ppag_flags);
+
+ for (int chain = 0; chain < IWL_NUM_CHAIN_LIMITS; chain++) {
+ for (int subband = 0; subband < IWL_NUM_SUB_BANDS_V2; subband++) {
+ cmd.v7.gain[chain][subband] =
+ fwrt->ppag_chains[chain].subbands[subband];
+ IWL_DEBUG_RADIO(fwrt,
+ "PPAG table: chain[%d] band[%d]: gain = %d\n",
+ chain, subband, cmd.v7.gain[chain][subband]);
+ }
+ }
IWL_DEBUG_RADIO(mld, "Sending PER_PLATFORM_ANT_GAIN_CMD\n");
ret = iwl_mld_send_cmd_pdu(mld, WIDE_ID(PHY_OPS_GROUP,
PER_PLATFORM_ANT_GAIN_CMD),
- &cmd, len);
+ &cmd, sizeof(cmd.v7));
if (ret < 0)
IWL_ERR(mld, "failed to send PER_PLATFORM_ANT_GAIN_CMD (%d)\n",
ret);
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/roc.c b/drivers/net/wireless/intel/iwlwifi/mld/roc.c
index e85f45bce79a..4136c98030d0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/roc.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/roc.c
@@ -82,9 +82,6 @@ int iwl_mld_start_roc(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct iwl_roc_req cmd = {
.action = cpu_to_le32(FW_CTXT_ACTION_ADD),
};
- u8 ver = iwl_fw_lookup_cmd_ver(mld->fw,
- WIDE_ID(MAC_CONF_GROUP, ROC_CMD), 0);
- u16 cmd_len = ver < 6 ? sizeof(struct iwl_roc_req_v5) : sizeof(cmd);
enum iwl_roc_activity activity;
int ret = 0;
@@ -140,7 +137,7 @@ int iwl_mld_start_roc(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
memcpy(cmd.node_addr, vif->addr, ETH_ALEN);
ret = iwl_mld_send_cmd_pdu(mld, WIDE_ID(MAC_CONF_GROUP, ROC_CMD),
- &cmd, cmd_len);
+ &cmd);
if (ret) {
IWL_ERR(mld, "Couldn't send the ROC_CMD\n");
return ret;
@@ -190,9 +187,6 @@ int iwl_mld_cancel_roc(struct ieee80211_hw *hw,
struct iwl_roc_req cmd = {
.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
};
- u8 ver = iwl_fw_lookup_cmd_ver(mld->fw,
- WIDE_ID(MAC_CONF_GROUP, ROC_CMD), 0);
- u16 cmd_len = ver < 6 ? sizeof(struct iwl_roc_req_v5) : sizeof(cmd);
int ret;
lockdep_assert_wiphy(mld->wiphy);
@@ -208,7 +202,7 @@ int iwl_mld_cancel_roc(struct ieee80211_hw *hw,
cmd.activity = cpu_to_le32(mld_vif->roc_activity);
ret = iwl_mld_send_cmd_pdu(mld, WIDE_ID(MAC_CONF_GROUP, ROC_CMD),
- &cmd, cmd_len);
+ &cmd);
if (ret)
IWL_ERR(mld, "Couldn't send the command to cancel the ROC\n");
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/rx.c b/drivers/net/wireless/intel/iwlwifi/mld/rx.c
index b6dedd1ecd4d..20d866dd92c2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/rx.c
@@ -1611,20 +1611,21 @@ iwl_mld_rx_with_sta(struct iwl_mld *mld, struct ieee80211_hdr *hdr,
return sta;
}
-#define KEY_IDX_LEN 2
-
static int iwl_mld_rx_mgmt_prot(struct ieee80211_sta *sta,
struct ieee80211_hdr *hdr,
struct ieee80211_rx_status *rx_status,
u32 mpdu_status,
u32 mpdu_len)
{
+ struct iwl_mld_link *link;
struct wireless_dev *wdev;
struct iwl_mld_sta *mld_sta;
struct iwl_mld_vif *mld_vif;
u8 keyidx;
struct ieee80211_key_conf *key;
const u8 *frame = (void *)hdr;
+ const u8 *mmie;
+ u8 link_id;
if ((mpdu_status & IWL_RX_MPDU_STATUS_SEC_MASK) ==
IWL_RX_MPDU_STATUS_SEC_NONE)
@@ -1657,21 +1658,30 @@ static int iwl_mld_rx_mgmt_prot(struct ieee80211_sta *sta,
return 0;
}
+ link_id = rx_status->link_valid ? rx_status->link_id : 0;
+ link = rcu_dereference(mld_vif->link[link_id]);
+ if (WARN_ON_ONCE(!link))
+ return -1;
+
/* both keys will have the same cipher and MIC length, use
* whichever one is available
*/
- key = rcu_dereference(mld_vif->bigtks[0]);
+ key = rcu_dereference(link->bigtks[0]);
if (!key) {
- key = rcu_dereference(mld_vif->bigtks[1]);
+ key = rcu_dereference(link->bigtks[1]);
if (!key)
goto report;
}
- if (mpdu_len < key->icv_len + IEEE80211_GMAC_PN_LEN + KEY_IDX_LEN)
+ /* get the real key ID */
+ if (mpdu_len < key->icv_len)
goto report;
- /* get the real key ID */
- keyidx = frame[mpdu_len - key->icv_len - IEEE80211_GMAC_PN_LEN - KEY_IDX_LEN];
+ mmie = frame + (mpdu_len - key->icv_len);
+
+ /* the position of the key_id in ieee80211_mmie_16 is the same */
+ keyidx = le16_to_cpu(((const struct ieee80211_mmie *) mmie)->key_id);
+
/* and if that's the other key, look it up */
if (keyidx != key->keyidx) {
/* shouldn't happen since firmware checked, but be safe
@@ -1680,7 +1690,7 @@ static int iwl_mld_rx_mgmt_prot(struct ieee80211_sta *sta,
if (keyidx != 6 && keyidx != 7)
return -1;
- key = rcu_dereference(mld_vif->bigtks[keyidx - 6]);
+ key = rcu_dereference(link->bigtks[keyidx - 6]);
if (!key)
goto report;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/scan.c b/drivers/net/wireless/intel/iwlwifi/mld/scan.c
index 62f97a18a16c..fd1022ddc912 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/scan.c
@@ -504,9 +504,7 @@ iwl_mld_scan_get_cmd_gen_flags2(struct iwl_mld *mld,
*/
if (scan_status == IWL_MLD_SCAN_REGULAR &&
ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_AP &&
- gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE &&
- iwl_fw_lookup_notif_ver(mld->fw, SCAN_GROUP,
- CHANNEL_SURVEY_NOTIF, 0) >= 1)
+ gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE)
flags |= IWL_UMAC_SCAN_GEN_FLAGS2_COLLECT_CHANNEL_STATS;
return flags;
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/sta.c b/drivers/net/wireless/intel/iwlwifi/mld/sta.c
index 8fb51209b4a6..5cdbfa29a202 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/sta.c
@@ -401,11 +401,9 @@ static u32 iwl_mld_get_htc_flags(struct ieee80211_link_sta *link_sta)
static int iwl_mld_send_sta_cmd(struct iwl_mld *mld,
const struct iwl_sta_cfg_cmd *cmd)
{
- u32 cmd_id = WIDE_ID(MAC_CONF_GROUP, STA_CONFIG_CMD);
- int cmd_len = iwl_fw_lookup_cmd_ver(mld->fw, cmd_id, 0) > 1 ?
- sizeof(*cmd) :
- sizeof(struct iwl_sta_cfg_cmd_v1);
- int ret = iwl_mld_send_cmd_pdu(mld, cmd_id, cmd, cmd_len);
+ int ret = iwl_mld_send_cmd_pdu(mld,
+ WIDE_ID(MAC_CONF_GROUP, STA_CONFIG_CMD),
+ cmd);
if (ret)
IWL_ERR(mld, "STA_CONFIG_CMD send failed, ret=0x%x\n", ret);
return ret;
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/stats.c b/drivers/net/wireless/intel/iwlwifi/mld/stats.c
index cbc64db5eab6..7b8709716324 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/stats.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/stats.c
@@ -379,11 +379,14 @@ static void iwl_mld_update_link_sig(struct ieee80211_vif *vif, int sig,
/* TODO: task=statistics handle CQM notifications */
- if (sig < IWL_MLD_LOW_RSSI_MLO_SCAN_THRESH)
- iwl_mld_int_mlo_scan(mld, vif);
-
- if (!iwl_mld_emlsr_active(vif))
+ if (!iwl_mld_emlsr_active(vif)) {
+ /* We're not in EMLSR and our signal is bad,
+ * try to switch link maybe. EMLSR will be handled below.
+ */
+ if (sig < IWL_MLD_LOW_RSSI_MLO_SCAN_THRESH)
+ iwl_mld_int_mlo_scan(mld, vif);
return;
+ }
/* We are in EMLSR, check if we need to exit */
exit_emlsr_thresh =
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tlc.c b/drivers/net/wireless/intel/iwlwifi/mld/tlc.c
index a9ca92c0455e..0e172281b0c8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/tlc.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/tlc.c
@@ -157,9 +157,9 @@ iwl_mld_get_highest_fw_mcs(const struct ieee80211_sta_vht_cap *vht_cap,
static void
iwl_mld_fill_vht_rates(const struct ieee80211_link_sta *link_sta,
const struct ieee80211_sta_vht_cap *vht_cap,
- struct iwl_tlc_config_cmd_v4 *cmd)
+ struct iwl_tlc_config_cmd *cmd)
{
- u16 supp;
+ u32 supp;
int i, highest_mcs;
u8 max_nss = link_sta->rx_nss;
struct ieee80211_vht_cap ieee_vht_cap = {
@@ -182,7 +182,7 @@ iwl_mld_fill_vht_rates(const struct ieee80211_link_sta *link_sta,
if (link_sta->bandwidth == IEEE80211_STA_RX_BW_20)
supp &= ~BIT(IWL_TLC_MNG_HT_RATE_MCS9);
- cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_80] = cpu_to_le16(supp);
+ cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_80] = cpu_to_le32(supp);
/* Check if VHT extended NSS indicates that the bandwidth/NSS
* configuration is supported - only for MCS 0 since we already
* decoded the MCS bits anyway ourselves.
@@ -196,7 +196,7 @@ iwl_mld_fill_vht_rates(const struct ieee80211_link_sta *link_sta,
}
}
-static u16 iwl_mld_he_mac80211_mcs_to_fw_mcs(u16 mcs)
+static u32 iwl_mld_he_mac80211_mcs_to_fw_mcs(u16 mcs)
{
switch (mcs) {
case IEEE80211_HE_MCS_SUPPORT_0_7:
@@ -216,7 +216,7 @@ static u16 iwl_mld_he_mac80211_mcs_to_fw_mcs(u16 mcs)
static void
iwl_mld_fill_he_rates(const struct ieee80211_link_sta *link_sta,
const struct ieee80211_sta_he_cap *own_he_cap,
- struct iwl_tlc_config_cmd_v4 *cmd)
+ struct iwl_tlc_config_cmd *cmd)
{
const struct ieee80211_sta_he_cap *he_cap = &link_sta->he_cap;
u16 mcs_160 = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160);
@@ -245,7 +245,7 @@ iwl_mld_fill_he_rates(const struct ieee80211_link_sta *link_sta,
if (_mcs_80 > _tx_mcs_80)
_mcs_80 = _tx_mcs_80;
cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_80] =
- cpu_to_le16(iwl_mld_he_mac80211_mcs_to_fw_mcs(_mcs_80));
+ cpu_to_le32(iwl_mld_he_mac80211_mcs_to_fw_mcs(_mcs_80));
/* If one side doesn't support - mark both as not supporting */
if (_mcs_160 == IEEE80211_HE_MCS_NOT_SUPPORTED ||
@@ -256,19 +256,19 @@ iwl_mld_fill_he_rates(const struct ieee80211_link_sta *link_sta,
if (_mcs_160 > _tx_mcs_160)
_mcs_160 = _tx_mcs_160;
cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_160] =
- cpu_to_le16(iwl_mld_he_mac80211_mcs_to_fw_mcs(_mcs_160));
+ cpu_to_le32(iwl_mld_he_mac80211_mcs_to_fw_mcs(_mcs_160));
}
}
-static void iwl_mld_set_eht_mcs(__le16 ht_rates[][3],
+static void iwl_mld_set_eht_mcs(__le32 ht_rates[][3],
enum IWL_TLC_MCS_PER_BW bw,
- u8 max_nss, u16 mcs_msk)
+ u8 max_nss, u32 mcs_msk)
{
if (max_nss >= 2)
- ht_rates[IWL_TLC_NSS_2][bw] |= cpu_to_le16(mcs_msk);
+ ht_rates[IWL_TLC_NSS_2][bw] |= cpu_to_le32(mcs_msk);
if (max_nss >= 1)
- ht_rates[IWL_TLC_NSS_1][bw] |= cpu_to_le16(mcs_msk);
+ ht_rates[IWL_TLC_NSS_1][bw] |= cpu_to_le32(mcs_msk);
}
static const
@@ -307,7 +307,7 @@ iwl_mld_fill_eht_rates(struct ieee80211_vif *vif,
const struct ieee80211_link_sta *link_sta,
const struct ieee80211_sta_he_cap *own_he_cap,
const struct ieee80211_sta_eht_cap *own_eht_cap,
- struct iwl_tlc_config_cmd_v4 *cmd)
+ struct iwl_tlc_config_cmd *cmd)
{
/* peer RX mcs capa */
const struct ieee80211_eht_mcs_nss_supp *eht_rx_mcs =
@@ -405,7 +405,7 @@ iwl_mld_fill_supp_rates(struct iwl_mld *mld, struct ieee80211_vif *vif,
struct ieee80211_supported_band *sband,
const struct ieee80211_sta_he_cap *own_he_cap,
const struct ieee80211_sta_eht_cap *own_eht_cap,
- struct iwl_tlc_config_cmd_v4 *cmd)
+ struct iwl_tlc_config_cmd *cmd)
{
int i;
u16 non_ht_rates = 0;
@@ -435,7 +435,7 @@ iwl_mld_fill_supp_rates(struct iwl_mld *mld, struct ieee80211_vif *vif,
} else if (ht_cap->ht_supported) {
cmd->mode = IWL_TLC_MNG_MODE_HT;
cmd->ht_rates[IWL_TLC_NSS_1][IWL_TLC_MCS_PER_BW_80] =
- cpu_to_le16(ht_cap->mcs.rx_mask[0]);
+ cpu_to_le32(ht_cap->mcs.rx_mask[0]);
/* the station support only a single receive chain */
if (link_sta->smps_mode == IEEE80211_SMPS_STATIC)
@@ -443,10 +443,30 @@ iwl_mld_fill_supp_rates(struct iwl_mld *mld, struct ieee80211_vif *vif,
0;
else
cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_MCS_PER_BW_80] =
- cpu_to_le16(ht_cap->mcs.rx_mask[1]);
+ cpu_to_le32(ht_cap->mcs.rx_mask[1]);
}
}
+static void iwl_mld_convert_tlc_cmd_to_v4(struct iwl_tlc_config_cmd *cmd,
+ struct iwl_tlc_config_cmd_v4 *cmd_v4)
+{
+ /* Copy everything until ht_rates */
+ memcpy(cmd_v4, cmd, offsetof(struct iwl_tlc_config_cmd, ht_rates));
+
+ /* Convert ht_rates from __le32 to __le16 */
+ BUILD_BUG_ON(ARRAY_SIZE(cmd_v4->ht_rates) != ARRAY_SIZE(cmd->ht_rates));
+ BUILD_BUG_ON(ARRAY_SIZE(cmd_v4->ht_rates[0]) != ARRAY_SIZE(cmd->ht_rates[0]));
+
+ for (int nss = 0; nss < ARRAY_SIZE(cmd->ht_rates); nss++)
+ for (int bw = 0; bw < ARRAY_SIZE(cmd->ht_rates[nss]); bw++)
+ cmd_v4->ht_rates[nss][bw] =
+ cpu_to_le16(le32_to_cpu(cmd->ht_rates[nss][bw]));
+
+ /* Copy the rest */
+ cmd_v4->max_mpdu_len = cmd->max_mpdu_len;
+ cmd_v4->max_tx_op = cmd->max_tx_op;
+}
+
static void iwl_mld_send_tlc_cmd(struct iwl_mld *mld,
struct ieee80211_vif *vif,
struct ieee80211_link_sta *link_sta,
@@ -458,7 +478,7 @@ static void iwl_mld_send_tlc_cmd(struct iwl_mld *mld,
ieee80211_get_he_iftype_cap_vif(sband, vif);
const struct ieee80211_sta_eht_cap *own_eht_cap =
ieee80211_get_eht_iftype_cap_vif(sband, vif);
- struct iwl_tlc_config_cmd_v4 cmd = {
+ struct iwl_tlc_config_cmd cmd = {
/* For AP mode, use 20 MHz until the STA is authorized */
.max_ch_width = mld_sta->sta_state > IEEE80211_STA_ASSOC ?
iwl_mld_fw_bw_from_sta_bw(link_sta) :
@@ -470,6 +490,11 @@ static void iwl_mld_send_tlc_cmd(struct iwl_mld *mld,
.max_mpdu_len = cpu_to_le16(link_sta->agg.max_amsdu_len),
};
int fw_sta_id = iwl_mld_fw_sta_id_from_link_sta(mld, link_sta);
+ u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, TLC_MNG_CONFIG_CMD);
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mld->fw, cmd_id, 0);
+ struct iwl_tlc_config_cmd_v4 cmd_v4;
+ void *cmd_ptr;
+ u8 cmd_size;
int ret;
if (fw_sta_id < 0)
@@ -481,14 +506,26 @@ static void iwl_mld_send_tlc_cmd(struct iwl_mld *mld,
own_he_cap, own_eht_cap,
&cmd);
+ if (cmd_ver == 5) {
+ cmd_ptr = &cmd;
+ cmd_size = sizeof(cmd);
+ } else if (cmd_ver == 4) {
+ iwl_mld_convert_tlc_cmd_to_v4(&cmd, &cmd_v4);
+ cmd_ptr = &cmd_v4;
+ cmd_size = sizeof(cmd_v4);
+ } else {
+ IWL_ERR(mld, "Unsupported TLC config cmd version %d\n",
+ cmd_ver);
+ return;
+ }
+
IWL_DEBUG_RATE(mld,
"TLC CONFIG CMD, sta_id=%d, max_ch_width=%d, mode=%d\n",
cmd.sta_id, cmd.max_ch_width, cmd.mode);
/* Send async since this can be called within a RCU-read section */
- ret = iwl_mld_send_cmd_with_flags_pdu(mld, WIDE_ID(DATA_PATH_GROUP,
- TLC_MNG_CONFIG_CMD),
- CMD_ASYNC, &cmd);
+ ret = iwl_mld_send_cmd_with_flags_pdu(mld, cmd_id, CMD_ASYNC, cmd_ptr,
+ cmd_size);
if (ret)
IWL_ERR(mld, "Failed to send TLC cmd (%d)\n", ret);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
index 13cdc077d8d3..ca63f780140f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
@@ -253,93 +253,6 @@ static void iwl_mvm_bt_coex_tcm_based_ci(struct iwl_mvm *mvm,
swap(data->primary, data->secondary);
}
-/*
- * This function receives the LB link id and checks if eSR should be
- * enabled or disabled (due to BT coex)
- */
-bool
-iwl_mvm_bt_coex_calculate_esr_mode(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- s32 link_rssi,
- bool primary)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- bool have_wifi_loss_rate =
- iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
- BT_PROFILE_NOTIFICATION, 0) > 4 ||
- iwl_fw_lookup_notif_ver(mvm->fw, BT_COEX_GROUP,
- PROFILE_NOTIF, 0) >= 1;
- u8 wifi_loss_mid_high_rssi;
- u8 wifi_loss_low_rssi;
- u8 wifi_loss_rate;
-
- if (iwl_fw_lookup_notif_ver(mvm->fw, BT_COEX_GROUP,
- PROFILE_NOTIF, 0) >= 1) {
- /* For now, we consider 2.4 GHz band / ANT_A only */
- wifi_loss_mid_high_rssi =
- mvm->last_bt_wifi_loss.wifi_loss_mid_high_rssi[PHY_BAND_24][0];
- wifi_loss_low_rssi =
- mvm->last_bt_wifi_loss.wifi_loss_low_rssi[PHY_BAND_24][0];
- } else {
- wifi_loss_mid_high_rssi = mvm->last_bt_notif.wifi_loss_mid_high_rssi;
- wifi_loss_low_rssi = mvm->last_bt_notif.wifi_loss_low_rssi;
- }
-
- if (wifi_loss_low_rssi == BT_OFF)
- return true;
-
- if (primary)
- return false;
-
- /* The feature is not supported */
- if (!have_wifi_loss_rate)
- return true;
-
-
- /*
- * In case we don't know the RSSI - take the lower wifi loss,
- * so we will more likely enter eSR, and if RSSI is low -
- * we will get an update on this and exit eSR.
- */
- if (!link_rssi)
- wifi_loss_rate = wifi_loss_mid_high_rssi;
-
- else if (mvmvif->esr_active)
- /* RSSI needs to get really low to disable eSR... */
- wifi_loss_rate =
- link_rssi <= -IWL_MVM_BT_COEX_DISABLE_ESR_THRESH ?
- wifi_loss_low_rssi :
- wifi_loss_mid_high_rssi;
- else
- /* ...And really high before we enable it back */
- wifi_loss_rate =
- link_rssi <= -IWL_MVM_BT_COEX_ENABLE_ESR_THRESH ?
- wifi_loss_low_rssi :
- wifi_loss_mid_high_rssi;
-
- return wifi_loss_rate <= IWL_MVM_BT_COEX_WIFI_LOSS_THRESH;
-}
-
-void iwl_mvm_bt_coex_update_link_esr(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- int link_id)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm_vif_link_info *link = mvmvif->link[link_id];
-
- if (!ieee80211_vif_is_mld(vif) ||
- !iwl_mvm_vif_from_mac80211(vif)->authorized ||
- WARN_ON(!link))
- return;
-
- if (!iwl_mvm_bt_coex_calculate_esr_mode(mvm, vif,
- (s8)link->beacon_stats.avg_signal,
- link_id == iwl_mvm_get_primary_link(vif)))
- /* In case we decided to exit eSR - stay with the primary */
- iwl_mvm_exit_esr(mvm, vif, IWL_MVM_ESR_EXIT_COEX,
- iwl_mvm_get_primary_link(vif));
-}
-
static void iwl_mvm_bt_notif_per_link(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct iwl_bt_iterator_data *data,
@@ -385,8 +298,6 @@ static void iwl_mvm_bt_notif_per_link(struct iwl_mvm *mvm,
return;
}
- iwl_mvm_bt_coex_update_link_esr(mvm, vif, link_id);
-
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2))
min_ag_for_static_smps = BT_VERY_HIGH_TRAFFIC;
else
@@ -525,32 +436,6 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
iwl_mvm_bt_notif_per_link(mvm, vif, data, link_id);
}
-/* must be called under rcu_read_lock */
-static void iwl_mvm_bt_coex_notif_iterator(void *_data, u8 *mac,
- struct ieee80211_vif *vif)
-{
- struct iwl_mvm *mvm = _data;
- struct ieee80211_bss_conf *link_conf;
- unsigned int link_id;
-
- lockdep_assert_held(&mvm->mutex);
-
- if (vif->type != NL80211_IFTYPE_STATION)
- return;
-
- for_each_vif_active_link(vif, link_conf, link_id) {
- struct ieee80211_chanctx_conf *chanctx_conf =
- rcu_dereference_check(link_conf->chanctx_conf,
- lockdep_is_held(&mvm->mutex));
-
- if ((!chanctx_conf ||
- chanctx_conf->def.chan->band != NL80211_BAND_2GHZ))
- continue;
-
- iwl_mvm_bt_coex_update_link_esr(mvm, vif, link_id);
- }
-}
-
static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
{
struct iwl_bt_iterator_data data = {
@@ -654,22 +539,6 @@ void iwl_mvm_rx_bt_coex_old_notif(struct iwl_mvm *mvm,
iwl_mvm_bt_coex_notif_handle(mvm);
}
-void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb)
-{
- const struct iwl_rx_packet *pkt = rxb_addr(rxb);
- const struct iwl_bt_coex_profile_notif *notif = (const void *)pkt->data;
-
- lockdep_assert_held(&mvm->mutex);
-
- mvm->last_bt_wifi_loss = *notif;
-
- ieee80211_iterate_active_interfaces(mvm->hw,
- IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_bt_coex_notif_iterator,
- mvm);
-}
-
void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
enum ieee80211_rssi_event_data rssi_event)
{
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
index 776600ddaea6..17f94663c941 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
- * Copyright (C) 2013-2014, 2018-2024 Intel Corporation
+ * Copyright (C) 2013-2014, 2018-2025 Intel Corporation
* Copyright (C) 2015 Intel Deutschland GmbH
*/
#ifndef __MVM_CONSTANTS_H
@@ -11,15 +11,7 @@
#include "fw-api.h"
#define IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM 20
-#define IWL_MVM_BT_COEX_DISABLE_ESR_THRESH 69
-#define IWL_MVM_BT_COEX_ENABLE_ESR_THRESH 63
-#define IWL_MVM_BT_COEX_WIFI_LOSS_THRESH 0
#define IWL_MVM_TRIGGER_LINK_SEL_TIME_SEC 30
-#define IWL_MVM_TPT_COUNT_WINDOW_SEC 5
-#define IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH_2_LINKS 5
-#define IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH 15
-#define IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH_BSS_PARAM_CHANGED 11
-#define IWL_MVM_LOW_RSSI_MLO_SCAN_THRESH -72
#define IWL_MVM_DEFAULT_PS_TX_DATA_TIMEOUT (100 * USEC_PER_MSEC)
#define IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT (100 * USEC_PER_MSEC)
@@ -129,14 +121,4 @@
#define IWL_MVM_MIN_BEACON_INTERVAL_TU 16
#define IWL_MVM_AUTO_EML_ENABLE true
-#define IWL_MVM_HIGH_RSSI_THRESH_20MHZ -67
-#define IWL_MVM_LOW_RSSI_THRESH_20MHZ -71
-#define IWL_MVM_HIGH_RSSI_THRESH_40MHZ -64
-#define IWL_MVM_LOW_RSSI_THRESH_40MHZ -67
-#define IWL_MVM_HIGH_RSSI_THRESH_80MHZ -61
-#define IWL_MVM_LOW_RSSI_THRESH_80MHZ -74
-#define IWL_MVM_HIGH_RSSI_THRESH_160MHZ -58
-#define IWL_MVM_LOW_RSSI_THRESH_160MHZ -61
-
-#define IWL_MVM_ENTER_ESR_TPT_THRESH 400
#endif /* __MVM_CONSTANTS_H */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 029c846a236f..07f1a84c274e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -928,6 +928,10 @@ iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm,
if (ap_sta->mfp)
wowlan_config_cmd->flags |= IS_11W_ASSOC;
+ if (rcu_access_pointer(mvmvif->bcn_prot.keys[0]) ||
+ rcu_access_pointer(mvmvif->bcn_prot.keys[1]))
+ wowlan_config_cmd->flags |= HAS_BEACON_PROTECTION;
+
if (iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_CONFIGURATION, 0) < 6) {
/* Query the last used seqno and set it */
int ret = iwl_mvm_get_last_nonqos_seq(mvm, vif);
@@ -1238,15 +1242,14 @@ static void iwl_mvm_free_nd(struct iwl_mvm *mvm)
}
static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
- struct cfg80211_wowlan *wowlan,
- bool test)
+ struct cfg80211_wowlan *wowlan)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct ieee80211_vif *vif = NULL;
struct iwl_mvm_vif *mvmvif = NULL;
struct ieee80211_sta *ap_sta = NULL;
struct iwl_mvm_vif_link_info *mvm_link;
- struct iwl_d3_manager_config d3_cfg_cmd_data = {
+ struct iwl_d3_manager_config d3_cfg_cmd = {
/*
* Program the minimum sleep time to 10 seconds, as many
* platforms have issues processing a wakeup signal while
@@ -1254,23 +1257,14 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
*/
.min_sleep_time = cpu_to_le32(10 * 1000 * 1000),
};
- struct iwl_host_cmd d3_cfg_cmd = {
- .id = D3_CONFIG_CMD,
- .flags = CMD_WANT_SKB,
- .data[0] = &d3_cfg_cmd_data,
- .len[0] = sizeof(d3_cfg_cmd_data),
- };
int ret;
int len __maybe_unused;
bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
if (!wowlan) {
- /*
- * mac80211 shouldn't get here, but for D3 test
- * it doesn't warrant a warning
- */
- WARN_ON(!test);
+ /* mac80211 shouldn't get here */
+ WARN_ON(1);
return -EINVAL;
}
@@ -1278,10 +1272,6 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
if (IS_ERR_OR_NULL(vif))
return 1;
- ret = iwl_mvm_block_esr_sync(mvm, vif, IWL_MVM_ESR_BLOCKED_WOWLAN);
- if (ret)
- return ret;
-
mutex_lock(&mvm->mutex);
set_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
@@ -1351,7 +1341,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
#ifdef CONFIG_IWLWIFI_DEBUGFS
if (mvm->d3_wake_sysassert)
- d3_cfg_cmd_data.wakeup_flags |=
+ d3_cfg_cmd.wakeup_flags |=
cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR);
#endif
@@ -1364,21 +1354,14 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
iwl_fw_dbg_stop_restart_recording(&mvm->fwrt, NULL, true);
/* must be last -- this switches firmware state */
- ret = iwl_mvm_send_cmd(mvm, &d3_cfg_cmd);
+ ret = iwl_mvm_send_cmd_pdu(mvm, D3_CONFIG_CMD, 0, sizeof(d3_cfg_cmd),
+ &d3_cfg_cmd);
if (ret)
goto out;
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- len = iwl_rx_packet_payload_len(d3_cfg_cmd.resp_pkt);
- if (len >= sizeof(u32)) {
- mvm->d3_test_pme_ptr =
- le32_to_cpup((__le32 *)d3_cfg_cmd.resp_pkt->data);
- }
-#endif
- iwl_free_resp(&d3_cfg_cmd);
clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
- ret = iwl_trans_d3_suspend(mvm->trans, test, !unified_image);
+ ret = iwl_trans_d3_suspend(mvm->trans, !unified_image);
out:
if (ret < 0) {
iwl_mvm_free_nd(mvm);
@@ -1401,7 +1384,7 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
iwl_fw_runtime_suspend(&mvm->fwrt);
mutex_unlock(&mvm->mutex);
- return __iwl_mvm_suspend(hw, wowlan, false);
+ return __iwl_mvm_suspend(hw, wowlan);
}
struct iwl_multicast_key_data {
@@ -1794,63 +1777,8 @@ static void iwl_mvm_set_key_rx_seq(struct ieee80211_key_conf *key,
struct iwl_mvm_d3_gtk_iter_data {
struct iwl_mvm *mvm;
struct iwl_wowlan_status_data *status;
- u32 gtk_cipher, igtk_cipher, bigtk_cipher;
- bool unhandled_cipher, igtk_support, bigtk_support;
- int num_keys;
};
-static void iwl_mvm_d3_find_last_keys(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- struct ieee80211_key_conf *key,
- void *_data)
-{
- struct iwl_mvm_d3_gtk_iter_data *data = _data;
- int link_id = vif->active_links ? __ffs(vif->active_links) : -1;
-
- if (link_id >= 0 && key->link_id >= 0 && link_id != key->link_id)
- return;
-
- if (data->unhandled_cipher)
- return;
-
- switch (key->cipher) {
- case WLAN_CIPHER_SUITE_WEP40:
- case WLAN_CIPHER_SUITE_WEP104:
- /* ignore WEP completely, nothing to do */
- return;
- case WLAN_CIPHER_SUITE_CCMP:
- case WLAN_CIPHER_SUITE_GCMP:
- case WLAN_CIPHER_SUITE_GCMP_256:
- case WLAN_CIPHER_SUITE_TKIP:
- /* we support these */
- data->gtk_cipher = key->cipher;
- break;
- case WLAN_CIPHER_SUITE_BIP_GMAC_128:
- case WLAN_CIPHER_SUITE_BIP_GMAC_256:
- case WLAN_CIPHER_SUITE_BIP_CMAC_256:
- case WLAN_CIPHER_SUITE_AES_CMAC:
- /* we support these */
- if (data->igtk_support &&
- (key->keyidx == 4 || key->keyidx == 5)) {
- data->igtk_cipher = key->cipher;
- } else if (data->bigtk_support &&
- (key->keyidx == 6 || key->keyidx == 7)) {
- data->bigtk_cipher = key->cipher;
- } else {
- data->unhandled_cipher = true;
- return;
- }
- break;
- default:
- /* everything else - disconnect from AP */
- data->unhandled_cipher = true;
- return;
- }
-
- data->num_keys++;
-}
-
static void
iwl_mvm_d3_set_igtk_bigtk_ipn(const struct iwl_multicast_key_data *key,
struct ieee80211_key_seq *seq, u32 cipher)
@@ -1896,9 +1824,6 @@ static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw,
if (link_id >= 0 && key->link_id >= 0 && link_id != key->link_id)
return;
- if (data->unhandled_cipher)
- return;
-
switch (key->cipher) {
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
@@ -1947,52 +1872,24 @@ static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw,
static bool iwl_mvm_gtk_rekey(struct iwl_wowlan_status_data *status,
struct ieee80211_vif *vif,
- struct iwl_mvm *mvm, u32 gtk_cipher)
+ struct iwl_mvm *mvm)
{
int i, j;
struct ieee80211_key_conf *key;
- DEFINE_RAW_FLEX(struct ieee80211_key_conf, conf, key,
- WOWLAN_KEY_MAX_SIZE);
int link_id = vif->active_links ? __ffs(vif->active_links) : -1;
- u8 key_data[WOWLAN_KEY_MAX_SIZE];
-
- conf->cipher = gtk_cipher;
-
- BUILD_BUG_ON(WLAN_KEY_LEN_CCMP != WLAN_KEY_LEN_GCMP);
- BUILD_BUG_ON(WOWLAN_KEY_MAX_SIZE < WLAN_KEY_LEN_CCMP);
- BUILD_BUG_ON(WOWLAN_KEY_MAX_SIZE < WLAN_KEY_LEN_GCMP_256);
- BUILD_BUG_ON(WOWLAN_KEY_MAX_SIZE < WLAN_KEY_LEN_TKIP);
- BUILD_BUG_ON(WOWLAN_KEY_MAX_SIZE < sizeof(status->gtk[0].key));
-
- switch (gtk_cipher) {
- case WLAN_CIPHER_SUITE_CCMP:
- case WLAN_CIPHER_SUITE_GCMP:
- conf->keylen = WLAN_KEY_LEN_CCMP;
- break;
- case WLAN_CIPHER_SUITE_GCMP_256:
- conf->keylen = WLAN_KEY_LEN_GCMP_256;
- break;
- case WLAN_CIPHER_SUITE_TKIP:
- conf->keylen = WLAN_KEY_LEN_TKIP;
- break;
- default:
- WARN_ON(1);
- }
for (i = 0; i < ARRAY_SIZE(status->gtk); i++) {
if (!status->gtk[i].len)
continue;
- conf->keyidx = status->gtk[i].id;
IWL_DEBUG_WOWLAN(mvm,
- "Received from FW GTK cipher %d, key index %d\n",
- conf->cipher, conf->keyidx);
- memcpy(conf->key, status->gtk[i].key,
- sizeof(status->gtk[i].key));
- memcpy(key_data, status->gtk[i].key, sizeof(status->gtk[i].key));
-
- key = ieee80211_gtk_rekey_add(vif, status->gtk[i].id, key_data,
- sizeof(key_data), link_id);
+ "Received from FW GTK: key index %d\n",
+ status->gtk[i].id);
+
+ key = ieee80211_gtk_rekey_add(vif, status->gtk[i].id,
+ status->gtk[i].key,
+ sizeof(status->gtk[i].key),
+ link_id);
if (IS_ERR(key)) {
/* FW may send also the old keys */
if (PTR_ERR(key) == -EALREADY)
@@ -2015,53 +1912,26 @@ static bool iwl_mvm_gtk_rekey(struct iwl_wowlan_status_data *status,
static bool
iwl_mvm_d3_igtk_bigtk_rekey_add(struct iwl_wowlan_status_data *status,
- struct ieee80211_vif *vif, u32 cipher,
+ struct ieee80211_vif *vif,
struct iwl_multicast_key_data *key_data)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- DEFINE_RAW_FLEX(struct ieee80211_key_conf, conf, key,
- WOWLAN_KEY_MAX_SIZE);
struct ieee80211_key_conf *key_config;
struct ieee80211_key_seq seq;
int link_id = vif->active_links ? __ffs(vif->active_links) : -1;
- u8 key[WOWLAN_KEY_MAX_SIZE];
s8 keyidx = key_data->id;
- conf->cipher = cipher;
- conf->keyidx = keyidx;
-
if (!key_data->len)
return true;
- iwl_mvm_d3_set_igtk_bigtk_ipn(key_data, &seq, conf->cipher);
-
- switch (cipher) {
- case WLAN_CIPHER_SUITE_BIP_GMAC_128:
- conf->keylen = WLAN_KEY_LEN_BIP_GMAC_128;
- break;
- case WLAN_CIPHER_SUITE_BIP_GMAC_256:
- conf->keylen = WLAN_KEY_LEN_BIP_GMAC_256;
- break;
- case WLAN_CIPHER_SUITE_AES_CMAC:
- conf->keylen = WLAN_KEY_LEN_AES_CMAC;
- break;
- case WLAN_CIPHER_SUITE_BIP_CMAC_256:
- conf->keylen = WLAN_KEY_LEN_BIP_CMAC_256;
- break;
- default:
- WARN_ON(1);
- }
- BUILD_BUG_ON(WOWLAN_KEY_MAX_SIZE < sizeof(key_data->key));
- memcpy(conf->key, key_data->key, conf->keylen);
-
- memcpy(key, key_data->key, sizeof(key_data->key));
-
- key_config = ieee80211_gtk_rekey_add(vif, keyidx, key, sizeof(key),
- link_id);
+ key_config = ieee80211_gtk_rekey_add(vif, keyidx, key_data->key,
+ sizeof(key_data->key), link_id);
if (IS_ERR(key_config)) {
/* FW may send also the old keys */
return PTR_ERR(key_config) == -EALREADY;
}
+
+ iwl_mvm_d3_set_igtk_bigtk_ipn(key_data, &seq, key_config->cipher);
ieee80211_set_key_rx_seq(key_config, 0, &seq);
if (keyidx == 4 || keyidx == 5) {
@@ -2115,27 +1985,6 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
if (!status || !vif->bss_conf.bssid)
return false;
-
- if (iwl_mvm_lookup_wowlan_status_ver(mvm) > 6 ||
- iwl_fw_lookup_notif_ver(mvm->fw, PROT_OFFLOAD_GROUP,
- WOWLAN_INFO_NOTIFICATION,
- 0))
- gtkdata.igtk_support = true;
-
- if (iwl_fw_lookup_notif_ver(mvm->fw, PROT_OFFLOAD_GROUP,
- WOWLAN_INFO_NOTIFICATION,
- 0) >= 3)
- gtkdata.bigtk_support = true;
-
- /* find last GTK that we used initially, if any */
- ieee80211_iter_keys(mvm->hw, vif,
- iwl_mvm_d3_find_last_keys, &gtkdata);
- /* not trying to keep connections with MFP/unhandled ciphers */
- if (gtkdata.unhandled_cipher)
- return false;
- if (!gtkdata.num_keys)
- goto out;
-
/*
* invalidate all other GTKs that might still exist and update
* the one that we used
@@ -2149,17 +1998,15 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
IWL_DEBUG_WOWLAN(mvm, "num of GTK rekeying %d\n",
status->num_of_gtk_rekeys);
- if (!iwl_mvm_gtk_rekey(status, vif, mvm, gtkdata.gtk_cipher))
+ if (!iwl_mvm_gtk_rekey(status, vif, mvm))
return false;
if (!iwl_mvm_d3_igtk_bigtk_rekey_add(status, vif,
- gtkdata.igtk_cipher,
&status->igtk))
return false;
for (i = 0; i < ARRAY_SIZE(status->bigtk); i++) {
if (!iwl_mvm_d3_igtk_bigtk_rekey_add(status, vif,
- gtkdata.bigtk_cipher,
&status->bigtk[i]))
return false;
}
@@ -2168,7 +2015,6 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
(void *)&replay_ctr, GFP_KERNEL);
}
-out:
if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP,
WOWLAN_GET_STATUSES,
IWL_FW_CMD_VER_UNKNOWN) < 10) {
@@ -2236,7 +2082,7 @@ static void iwl_mvm_convert_gtk_v3(struct iwl_wowlan_status_data *status,
}
static void iwl_mvm_convert_igtk(struct iwl_wowlan_status_data *status,
- struct iwl_wowlan_igtk_status *data)
+ struct iwl_wowlan_igtk_status_v1 *data)
{
int i;
@@ -2260,7 +2106,7 @@ static void iwl_mvm_convert_igtk(struct iwl_wowlan_status_data *status,
}
static void iwl_mvm_convert_bigtk(struct iwl_wowlan_status_data *status,
- const struct iwl_wowlan_igtk_status *data)
+ const struct iwl_wowlan_igtk_status_v1 *data)
{
int data_idx, status_idx = 0;
@@ -2291,7 +2137,7 @@ static void iwl_mvm_convert_bigtk(struct iwl_wowlan_status_data *status,
}
static void iwl_mvm_parse_wowlan_info_notif(struct iwl_mvm *mvm,
- struct iwl_wowlan_info_notif *data,
+ struct iwl_wowlan_info_notif_v5 *data,
struct iwl_wowlan_status_data *status,
u32 len)
{
@@ -2727,7 +2573,6 @@ enum iwl_d3_notif {
/* manage d3 resume data */
struct iwl_d3_data {
struct iwl_wowlan_status_data *status;
- bool test;
u32 d3_end_flags;
u32 notif_expected; /* bitmap - see &enum iwl_d3_notif */
u32 notif_received; /* bitmap - see &enum iwl_d3_notif */
@@ -2919,18 +2764,11 @@ iwl_mvm_choose_query_wakeup_reasons(struct iwl_mvm *mvm,
if (mvm->net_detect) {
iwl_mvm_query_netdetect_reasons(mvm, vif, d3_data);
- } else {
- bool keep = iwl_mvm_query_wakeup_reasons(mvm, vif,
- d3_data->status);
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- if (keep)
- mvm->keep_vif = vif;
-#endif
-
- return keep;
+ return false;
}
- return false;
+
+ return iwl_mvm_query_wakeup_reasons(mvm, vif,
+ d3_data->status);
}
#define IWL_WOWLAN_WAKEUP_REASON_HAS_WAKEUP_PKT (IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET | \
@@ -3069,7 +2907,7 @@ static bool iwl_mvm_wait_d3_notif(struct iwl_notif_wait_data *notif_wait,
iwl_mvm_parse_wowlan_info_notif_v3(mvm, notif,
d3_data->status, len);
} else if (wowlan_info_ver == 5) {
- struct iwl_wowlan_info_notif *notif =
+ struct iwl_wowlan_info_notif_v5 *notif =
(void *)pkt->data;
iwl_mvm_parse_wowlan_info_notif(mvm, notif,
@@ -3143,10 +2981,9 @@ static bool iwl_mvm_wait_d3_notif(struct iwl_notif_wait_data *notif_wait,
return d3_data->notif_received == d3_data->notif_expected;
}
-static int iwl_mvm_resume_firmware(struct iwl_mvm *mvm, bool test)
+static int iwl_mvm_resume_firmware(struct iwl_mvm *mvm)
{
int ret;
- enum iwl_d3_status d3_status;
struct iwl_host_cmd cmd = {
.id = D0I3_END_CMD,
.flags = CMD_WANT_SKB,
@@ -3154,15 +2991,10 @@ static int iwl_mvm_resume_firmware(struct iwl_mvm *mvm, bool test)
bool reset = fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
- ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test, !reset);
+ ret = iwl_trans_d3_resume(mvm->trans, !reset);
if (ret)
return ret;
- if (d3_status != IWL_D3_STATUS_ALIVE) {
- IWL_INFO(mvm, "Device was reset during suspend\n");
- return -ENOENT;
- }
-
/*
* We should trigger resume flow using command only for 22000 family
* AX210 and above don't need the command since they have
@@ -3207,7 +3039,7 @@ static int iwl_mvm_d3_notif_wait(struct iwl_mvm *mvm,
ARRAY_SIZE(d3_resume_notif),
iwl_mvm_wait_d3_notif, d3_data);
- ret = iwl_mvm_resume_firmware(mvm, d3_data->test);
+ ret = iwl_mvm_resume_firmware(mvm);
if (ret) {
iwl_remove_notification(&mvm->notif_wait, &wait_d3_notif);
return ret;
@@ -3227,13 +3059,12 @@ static inline bool iwl_mvm_d3_resume_notif_based(struct iwl_mvm *mvm)
D3_END_NOTIFICATION, 0);
}
-static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
+static int __iwl_mvm_resume(struct iwl_mvm *mvm)
{
struct ieee80211_vif *vif = NULL;
int ret = 1;
struct iwl_mvm_nd_results results = {};
struct iwl_d3_data d3_data = {
- .test = test,
.notif_expected =
IWL_D3_NOTIF_WOWLAN_INFO |
IWL_D3_NOTIF_D3_END_NOTIF,
@@ -3271,7 +3102,7 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
rt_status = iwl_mvm_check_rt_status(mvm, vif);
if (rt_status != FW_ALIVE) {
- set_bit(STATUS_FW_ERROR, &mvm->trans->status);
+ iwl_trans_notify_fw_error(mvm->trans);
if (rt_status == FW_ERROR) {
IWL_ERR(mvm, "FW Error occurred during suspend. Restarting.\n");
iwl_mvm_dump_nic_error_log(mvm);
@@ -3298,13 +3129,11 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
if (ret)
goto err;
} else {
- ret = iwl_mvm_resume_firmware(mvm, test);
+ ret = iwl_mvm_resume_firmware(mvm);
if (ret < 0)
goto err;
}
- iwl_mvm_unblock_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_WOWLAN);
-
/* when reset is required we can't send these following commands */
if (d3_data.d3_end_flags & IWL_D0I3_RESET_REQUIRE)
goto query_wakeup_reasons;
@@ -3346,7 +3175,7 @@ out:
kfree(d3_data.status);
iwl_mvm_free_nd(mvm);
- if (!d3_data.test && !mvm->net_detect)
+ if (!mvm->net_detect)
ieee80211_iterate_active_interfaces_mtx(mvm->hw,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_d3_disconnect_iter,
@@ -3385,7 +3214,7 @@ int iwl_mvm_resume(struct ieee80211_hw *hw)
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
int ret;
- ret = __iwl_mvm_resume(mvm, false);
+ ret = __iwl_mvm_resume(mvm);
iwl_mvm_resume_tcm(mvm);
@@ -3420,7 +3249,7 @@ void iwl_mvm_fast_suspend(struct iwl_mvm *mvm)
IWL_ERR(mvm,
"fast suspend: couldn't send D3_CONFIG_CMD %d\n", ret);
- ret = iwl_trans_d3_suspend(mvm->trans, false, false);
+ ret = iwl_trans_d3_suspend(mvm->trans, false);
if (ret)
IWL_ERR(mvm, "fast suspend: trans_d3_suspend failed %d\n", ret);
}
@@ -3443,7 +3272,7 @@ int iwl_mvm_fast_resume(struct iwl_mvm *mvm)
rt_status = iwl_mvm_check_rt_status(mvm, NULL);
if (rt_status != FW_ALIVE) {
- set_bit(STATUS_FW_ERROR, &mvm->trans->status);
+ iwl_trans_notify_fw_error(mvm->trans);
if (rt_status == FW_ERROR) {
IWL_ERR(mvm,
"iwl_mvm_check_rt_status failed, device is gone during suspend\n");
@@ -3455,7 +3284,6 @@ int iwl_mvm_fast_resume(struct iwl_mvm *mvm)
&iwl_dump_desc_assert,
false, 0);
}
- mvm->trans->state = IWL_TRANS_NO_FW;
ret = -ENODEV;
goto out;
@@ -3473,125 +3301,3 @@ out:
return ret;
}
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file)
-{
- struct iwl_mvm *mvm = inode->i_private;
- int err;
-
- if (mvm->d3_test_active)
- return -EBUSY;
-
- file->private_data = inode->i_private;
-
- iwl_mvm_pause_tcm(mvm, true);
-
- iwl_fw_runtime_suspend(&mvm->fwrt);
-
- /* start pseudo D3 */
- rtnl_lock();
- wiphy_lock(mvm->hw->wiphy);
- err = __iwl_mvm_suspend(mvm->hw, mvm->hw->wiphy->wowlan_config, true);
- wiphy_unlock(mvm->hw->wiphy);
- rtnl_unlock();
- if (err > 0)
- err = -EINVAL;
- if (err)
- return err;
-
- mvm->d3_test_active = true;
- mvm->keep_vif = NULL;
- return 0;
-}
-
-static ssize_t iwl_mvm_d3_test_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_mvm *mvm = file->private_data;
- unsigned long end = jiffies + 60 * HZ;
- u32 pme_asserted;
-
- while (true) {
- /* read pme_ptr if available */
- if (mvm->d3_test_pme_ptr) {
- pme_asserted = iwl_trans_read_mem32(mvm->trans,
- mvm->d3_test_pme_ptr);
- if (pme_asserted)
- break;
- }
-
- if (msleep_interruptible(100))
- break;
-
- if (time_is_before_jiffies(end)) {
- IWL_ERR(mvm,
- "ending pseudo-D3 with timeout after ~60 seconds\n");
- return -ETIMEDOUT;
- }
- }
-
- return 0;
-}
-
-static void iwl_mvm_d3_test_disconn_work_iter(void *_data, u8 *mac,
- struct ieee80211_vif *vif)
-{
- /* skip the one we keep connection on */
- if (_data == vif)
- return;
-
- if (vif->type == NL80211_IFTYPE_STATION)
- ieee80211_connection_loss(vif);
-}
-
-static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
-{
- struct iwl_mvm *mvm = inode->i_private;
- bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
-
- mvm->d3_test_active = false;
-
- iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt);
-
- rtnl_lock();
- wiphy_lock(mvm->hw->wiphy);
- __iwl_mvm_resume(mvm, true);
- wiphy_unlock(mvm->hw->wiphy);
- rtnl_unlock();
-
- iwl_mvm_resume_tcm(mvm);
-
- iwl_fw_runtime_resume(&mvm->fwrt);
-
- iwl_abort_notification_waits(&mvm->notif_wait);
- if (!unified_image) {
- int remaining_time = 10;
-
- ieee80211_restart_hw(mvm->hw);
-
- /* wait for restart and disconnect all interfaces */
- while (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
- remaining_time > 0) {
- remaining_time--;
- msleep(1000);
- }
-
- if (remaining_time == 0)
- IWL_ERR(mvm, "Timed out waiting for HW restart!\n");
- }
-
- ieee80211_iterate_active_interfaces_atomic(
- mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_d3_test_disconn_work_iter, mvm->keep_vif);
-
- return 0;
-}
-
-const struct file_operations iwl_dbgfs_d3_test_ops = {
- .open = iwl_mvm_d3_test_open,
- .read = iwl_mvm_d3_test_read,
- .release = iwl_mvm_d3_test_release,
-};
-#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
index fbe4e4a50852..a56c352a459a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
@@ -762,96 +762,6 @@ static ssize_t iwl_dbgfs_max_tx_op_read(struct file *file,
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
-static ssize_t iwl_dbgfs_int_mlo_scan_write(struct ieee80211_vif *vif,
- char *buf, size_t count,
- loff_t *ppos)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm *mvm = mvmvif->mvm;
- u32 action;
- int ret;
-
- if (!vif->cfg.assoc || !ieee80211_vif_is_mld(vif))
- return -EINVAL;
-
- if (kstrtou32(buf, 0, &action))
- return -EINVAL;
-
- mutex_lock(&mvm->mutex);
-
- if (!action) {
- ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_INT_MLO, false);
- } else if (action == 1) {
- ret = iwl_mvm_int_mlo_scan(mvm, vif);
- } else {
- ret = -EINVAL;
- }
-
- mutex_unlock(&mvm->mutex);
-
- return ret ?: count;
-}
-
-static ssize_t iwl_dbgfs_esr_disable_reason_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ieee80211_vif *vif = file->private_data;
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm *mvm = mvmvif->mvm;
- unsigned long esr_mask;
- char *buf;
- int bufsz, pos, i;
- ssize_t rv;
-
- mutex_lock(&mvm->mutex);
- esr_mask = mvmvif->esr_disable_reason;
- mutex_unlock(&mvm->mutex);
-
- bufsz = hweight32(esr_mask) * 32 + 40;
- buf = kmalloc(bufsz, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- pos = scnprintf(buf, bufsz, "EMLSR state: '0x%lx'\nreasons:\n",
- esr_mask);
- for_each_set_bit(i, &esr_mask, BITS_PER_LONG)
- pos += scnprintf(buf + pos, bufsz - pos, " - %s\n",
- iwl_get_esr_state_string(BIT(i)));
-
- rv = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return rv;
-}
-
-static ssize_t iwl_dbgfs_esr_disable_reason_write(struct ieee80211_vif *vif,
- char *buf, size_t count,
- loff_t *ppos)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm *mvm = mvmvif->mvm;
- u32 reason;
- u8 block;
- int ret;
-
- ret = sscanf(buf, "%u %hhu", &reason, &block);
- if (ret < 0)
- return ret;
-
- if (hweight16(reason) != 1 || !(reason & IWL_MVM_BLOCK_ESR_REASONS))
- return -EINVAL;
-
- mutex_lock(&mvm->mutex);
- if (block)
- iwl_mvm_block_esr(mvm, vif, reason,
- iwl_mvm_get_primary_link(vif));
- else
- iwl_mvm_unblock_esr(mvm, vif, reason);
- mutex_unlock(&mvm->mutex);
-
- return count;
-}
-
#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
_MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif)
#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
@@ -884,8 +794,6 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(rx_phyinfo, 10);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(quota_min, 32);
MVM_DEBUGFS_READ_FILE_OPS(os_device_timediff);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(max_tx_op, 10);
-MVM_DEBUGFS_WRITE_FILE_OPS(int_mlo_scan, 32);
-MVM_DEBUGFS_READ_WRITE_FILE_OPS(esr_disable_reason, 32);
void iwl_mvm_vif_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
@@ -916,8 +824,6 @@ void iwl_mvm_vif_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
MVM_DEBUGFS_ADD_FILE_VIF(max_tx_op, mvmvif->dbgfs_dir, 0600);
debugfs_create_bool("ftm_unprotected", 0200, mvmvif->dbgfs_dir,
&mvmvif->ftm_unprotected);
- MVM_DEBUGFS_ADD_FILE_VIF(int_mlo_scan, mvmvif->dbgfs_dir, 0200);
- MVM_DEBUGFS_ADD_FILE_VIF(esr_disable_reason, mvmvif->dbgfs_dir, 0600);
if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
mvmvif == mvm->bf_allowed_vif)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index f0e184c8a81a..683c0ba5fb39 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -1134,7 +1134,7 @@ static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mvm *mvm, char *buf,
if (count == 6 && !strcmp(buf, "nolog\n")) {
set_bit(IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE, &mvm->status);
- iwl_trans_suppress_cmd_error_once(mvm->trans);
+ mvm->trans->suppress_cmd_error_once = true;
}
/* take the return value to make compiler happy - it will fail anyway */
@@ -2159,7 +2159,6 @@ void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm)
MVM_DEBUGFS_ADD_FILE(uapsd_noagg_bssids, mvm->debugfs_dir, S_IRUSR);
#ifdef CONFIG_PM_SLEEP
- MVM_DEBUGFS_ADD_FILE(d3_test, mvm->debugfs_dir, 0400);
debugfs_create_bool("d3_wake_sysassert", 0600, mvm->debugfs_dir,
&mvm->d3_wake_sysassert);
debugfs_create_u32("last_netdetect_scans", 0400, mvm->debugfs_dir,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index d931c6eaf12f..865f973f677d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -837,7 +837,7 @@ static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
.flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE),
};
- if (!mvm->trans->ltr_enabled)
+ if (!iwl_trans_is_ltr_enabled(mvm->trans))
return 0;
return iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/link.c b/drivers/net/wireless/intel/iwlwifi/mvm/link.c
index 2269acc55c0e..738facceb240 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/link.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/link.c
@@ -5,50 +5,6 @@
#include "mvm.h"
#include "time-event.h"
-#define HANDLE_ESR_REASONS(HOW) \
- HOW(BLOCKED_PREVENTION) \
- HOW(BLOCKED_WOWLAN) \
- HOW(BLOCKED_TPT) \
- HOW(BLOCKED_FW) \
- HOW(BLOCKED_NON_BSS) \
- HOW(BLOCKED_ROC) \
- HOW(BLOCKED_TMP_NON_BSS) \
- HOW(EXIT_MISSED_BEACON) \
- HOW(EXIT_LOW_RSSI) \
- HOW(EXIT_COEX) \
- HOW(EXIT_BANDWIDTH) \
- HOW(EXIT_CSA) \
- HOW(EXIT_LINK_USAGE)
-
-static const char *const iwl_mvm_esr_states_names[] = {
-#define NAME_ENTRY(x) [ilog2(IWL_MVM_ESR_##x)] = #x,
- HANDLE_ESR_REASONS(NAME_ENTRY)
-};
-
-const char *iwl_get_esr_state_string(enum iwl_mvm_esr_state state)
-{
- int offs = ilog2(state);
-
- if (offs >= ARRAY_SIZE(iwl_mvm_esr_states_names) ||
- !iwl_mvm_esr_states_names[offs])
- return "UNKNOWN";
-
- return iwl_mvm_esr_states_names[offs];
-}
-
-static void iwl_mvm_print_esr_state(struct iwl_mvm *mvm, u32 mask)
-{
-#define NAME_FMT(x) "%s"
-#define NAME_PR(x) (mask & IWL_MVM_ESR_##x) ? "[" #x "]" : "",
- IWL_DEBUG_INFO(mvm,
- "EMLSR state = " HANDLE_ESR_REASONS(NAME_FMT)
- " (0x%x)\n",
- HANDLE_ESR_REASONS(NAME_PR)
- mask);
-#undef NAME_FMT
-#undef NAME_PR
-}
-
static int iwl_mvm_link_cmd_send(struct iwl_mvm *mvm,
struct iwl_link_config_cmd *cmd,
enum iwl_ctxt_action action)
@@ -114,65 +70,6 @@ int iwl_mvm_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return iwl_mvm_link_cmd_send(mvm, &cmd, FW_CTXT_ACTION_ADD);
}
-struct iwl_mvm_esr_iter_data {
- struct ieee80211_vif *vif;
- unsigned int link_id;
- bool lift_block;
-};
-
-static void iwl_mvm_esr_vif_iterator(void *_data, u8 *mac,
- struct ieee80211_vif *vif)
-{
- struct iwl_mvm_esr_iter_data *data = _data;
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- int link_id;
-
- if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_STATION)
- return;
-
- for_each_mvm_vif_valid_link(mvmvif, link_id) {
- struct iwl_mvm_vif_link_info *link_info =
- mvmvif->link[link_id];
- if (vif == data->vif && link_id == data->link_id)
- continue;
- if (link_info->active)
- data->lift_block = false;
- }
-}
-
-int iwl_mvm_esr_non_bss_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- unsigned int link_id, bool active)
-{
- /* An active link of a non-station vif blocks EMLSR. Upon activation
- * block EMLSR on the bss vif. Upon deactivation, check if this link
- * was the last non-station link active, and if so unblock the bss vif
- */
- struct ieee80211_vif *bss_vif = iwl_mvm_get_bss_vif(mvm);
- struct iwl_mvm_esr_iter_data data = {
- .vif = vif,
- .link_id = link_id,
- .lift_block = true,
- };
-
- if (IS_ERR_OR_NULL(bss_vif))
- return 0;
-
- if (active)
- return iwl_mvm_block_esr_sync(mvm, bss_vif,
- IWL_MVM_ESR_BLOCKED_NON_BSS);
-
- ieee80211_iterate_active_interfaces(mvm->hw,
- IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_esr_vif_iterator, &data);
- if (data.lift_block) {
- mutex_lock(&mvm->mutex);
- iwl_mvm_unblock_esr(mvm, bss_vif, IWL_MVM_ESR_BLOCKED_NON_BSS);
- mutex_unlock(&mvm->mutex);
- }
-
- return 0;
-}
-
int iwl_mvm_link_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf,
u32 changes, bool active)
@@ -388,452 +285,6 @@ int iwl_mvm_disable_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return ret;
}
-struct iwl_mvm_rssi_to_grade {
- s8 rssi[2];
- u16 grade;
-};
-
-#define RSSI_TO_GRADE_LINE(_lb, _hb_uhb, _grade) \
- { \
- .rssi = {_lb, _hb_uhb}, \
- .grade = _grade \
- }
-
-/*
- * This array must be sorted by increasing RSSI for proper functionality.
- * The grades are actually estimated throughput, represented as fixed-point
- * with a scale factor of 1/10.
- */
-static const struct iwl_mvm_rssi_to_grade rssi_to_grade_map[] = {
- RSSI_TO_GRADE_LINE(-85, -89, 177),
- RSSI_TO_GRADE_LINE(-83, -86, 344),
- RSSI_TO_GRADE_LINE(-82, -85, 516),
- RSSI_TO_GRADE_LINE(-80, -83, 688),
- RSSI_TO_GRADE_LINE(-77, -79, 1032),
- RSSI_TO_GRADE_LINE(-73, -76, 1376),
- RSSI_TO_GRADE_LINE(-70, -74, 1548),
- RSSI_TO_GRADE_LINE(-69, -72, 1750),
- RSSI_TO_GRADE_LINE(-65, -68, 2064),
- RSSI_TO_GRADE_LINE(-61, -66, 2294),
- RSSI_TO_GRADE_LINE(-58, -61, 2580),
- RSSI_TO_GRADE_LINE(-55, -58, 2868),
- RSSI_TO_GRADE_LINE(-46, -55, 3098),
- RSSI_TO_GRADE_LINE(-43, -54, 3442)
-};
-
-#define MAX_GRADE (rssi_to_grade_map[ARRAY_SIZE(rssi_to_grade_map) - 1].grade)
-
-#define DEFAULT_CHAN_LOAD_LB 30
-#define DEFAULT_CHAN_LOAD_HB 15
-#define DEFAULT_CHAN_LOAD_UHB 0
-
-/* Factors calculation is done with fixed-point with a scaling factor of 1/256 */
-#define SCALE_FACTOR 256
-
-/* Convert a percentage from [0,100] to [0,255] */
-#define NORMALIZE_PERCENT_TO_255(percentage) ((percentage) * SCALE_FACTOR / 100)
-
-static unsigned int
-iwl_mvm_get_puncturing_factor(const struct ieee80211_bss_conf *link_conf)
-{
- enum nl80211_chan_width chan_width =
- link_conf->chanreq.oper.width;
- int mhz = nl80211_chan_width_to_mhz(chan_width);
- unsigned int n_subchannels, n_punctured, puncturing_penalty;
-
- if (WARN_ONCE(mhz < 20 || mhz > 320,
- "Invalid channel width : (%d)\n", mhz))
- return SCALE_FACTOR;
-
- /* No puncturing, no penalty */
- if (mhz < 80)
- return SCALE_FACTOR;
-
- /* total number of subchannels */
- n_subchannels = mhz / 20;
- /* how many of these are punctured */
- n_punctured = hweight16(link_conf->chanreq.oper.punctured);
-
- puncturing_penalty = n_punctured * SCALE_FACTOR / n_subchannels;
- return SCALE_FACTOR - puncturing_penalty;
-}
-
-static unsigned int
-iwl_mvm_get_chan_load(struct ieee80211_bss_conf *link_conf)
-{
- struct ieee80211_vif *vif = link_conf->vif;
- struct iwl_mvm_vif_link_info *mvm_link =
- iwl_mvm_vif_from_mac80211(link_conf->vif)->link[link_conf->link_id];
- const struct element *bss_load_elem;
- const struct ieee80211_bss_load_elem *bss_load;
- enum nl80211_band band = link_conf->chanreq.oper.chan->band;
- const struct cfg80211_bss_ies *ies;
- unsigned int chan_load;
- u32 chan_load_by_us;
-
- rcu_read_lock();
- if (ieee80211_vif_link_active(vif, link_conf->link_id))
- ies = rcu_dereference(link_conf->bss->beacon_ies);
- else
- ies = rcu_dereference(link_conf->bss->ies);
-
- if (ies)
- bss_load_elem = cfg80211_find_elem(WLAN_EID_QBSS_LOAD,
- ies->data, ies->len);
- else
- bss_load_elem = NULL;
-
- /* If there isn't BSS Load element, take the defaults */
- if (!bss_load_elem ||
- bss_load_elem->datalen != sizeof(*bss_load)) {
- rcu_read_unlock();
- switch (band) {
- case NL80211_BAND_2GHZ:
- chan_load = DEFAULT_CHAN_LOAD_LB;
- break;
- case NL80211_BAND_5GHZ:
- chan_load = DEFAULT_CHAN_LOAD_HB;
- break;
- case NL80211_BAND_6GHZ:
- chan_load = DEFAULT_CHAN_LOAD_UHB;
- break;
- default:
- chan_load = 0;
- break;
- }
- /* The defaults are given in percentage */
- return NORMALIZE_PERCENT_TO_255(chan_load);
- }
-
- bss_load = (const void *)bss_load_elem->data;
- /* Channel util is in range 0-255 */
- chan_load = bss_load->channel_util;
- rcu_read_unlock();
-
- if (!mvm_link || !mvm_link->active)
- return chan_load;
-
- if (WARN_ONCE(!mvm_link->phy_ctxt,
- "Active link (%u) without phy ctxt assigned!\n",
- link_conf->link_id))
- return chan_load;
-
- /* channel load by us is given in percentage */
- chan_load_by_us =
- NORMALIZE_PERCENT_TO_255(mvm_link->phy_ctxt->channel_load_by_us);
-
- /* Use only values that firmware sends that can possibly be valid */
- if (chan_load_by_us <= chan_load)
- chan_load -= chan_load_by_us;
-
- return chan_load;
-}
-
-static unsigned int
-iwl_mvm_get_chan_load_factor(struct ieee80211_bss_conf *link_conf)
-{
- return SCALE_FACTOR - iwl_mvm_get_chan_load(link_conf);
-}
-
-/* This function calculates the grade of a link. Returns 0 in error case */
-VISIBLE_IF_IWLWIFI_KUNIT
-unsigned int iwl_mvm_get_link_grade(struct ieee80211_bss_conf *link_conf)
-{
- enum nl80211_band band;
- int i, rssi_idx;
- s32 link_rssi;
- unsigned int grade = MAX_GRADE;
-
- if (WARN_ON_ONCE(!link_conf))
- return 0;
-
- band = link_conf->chanreq.oper.chan->band;
- if (WARN_ONCE(band != NL80211_BAND_2GHZ &&
- band != NL80211_BAND_5GHZ &&
- band != NL80211_BAND_6GHZ,
- "Invalid band (%u)\n", band))
- return 0;
-
- link_rssi = MBM_TO_DBM(link_conf->bss->signal);
- /*
- * For 6 GHz the RSSI of the beacons is lower than
- * the RSSI of the data.
- */
- if (band == NL80211_BAND_6GHZ)
- link_rssi += 4;
-
- rssi_idx = band == NL80211_BAND_2GHZ ? 0 : 1;
-
- /* No valid RSSI - take the lowest grade */
- if (!link_rssi)
- link_rssi = rssi_to_grade_map[0].rssi[rssi_idx];
-
- /* Get grade based on RSSI */
- for (i = 0; i < ARRAY_SIZE(rssi_to_grade_map); i++) {
- const struct iwl_mvm_rssi_to_grade *line =
- &rssi_to_grade_map[i];
-
- if (link_rssi > line->rssi[rssi_idx])
- continue;
- grade = line->grade;
- break;
- }
-
- /* apply the channel load and puncturing factors */
- grade = grade * iwl_mvm_get_chan_load_factor(link_conf) / SCALE_FACTOR;
- grade = grade * iwl_mvm_get_puncturing_factor(link_conf) / SCALE_FACTOR;
- return grade;
-}
-EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mvm_get_link_grade);
-
-static
-u8 iwl_mvm_set_link_selection_data(struct ieee80211_vif *vif,
- struct iwl_mvm_link_sel_data *data,
- unsigned long usable_links,
- u8 *best_link_idx)
-{
- u8 n_data = 0;
- u16 max_grade = 0;
- unsigned long link_id;
-
- /* TODO: don't select links that weren't discovered in the last scan */
- for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
- struct ieee80211_bss_conf *link_conf =
- link_conf_dereference_protected(vif, link_id);
-
- if (WARN_ON_ONCE(!link_conf))
- continue;
-
- data[n_data].link_id = link_id;
- data[n_data].chandef = &link_conf->chanreq.oper;
- data[n_data].signal = link_conf->bss->signal / 100;
- data[n_data].grade = iwl_mvm_get_link_grade(link_conf);
-
- if (data[n_data].grade > max_grade) {
- max_grade = data[n_data].grade;
- *best_link_idx = n_data;
- }
- n_data++;
- }
-
- return n_data;
-}
-
-struct iwl_mvm_bw_to_rssi_threshs {
- s8 low;
- s8 high;
-};
-
-#define BW_TO_RSSI_THRESHOLDS(_bw) \
- [IWL_PHY_CHANNEL_MODE ## _bw] = { \
- .low = IWL_MVM_LOW_RSSI_THRESH_##_bw##MHZ, \
- .high = IWL_MVM_HIGH_RSSI_THRESH_##_bw##MHZ \
- }
-
-s8 iwl_mvm_get_esr_rssi_thresh(struct iwl_mvm *mvm,
- const struct cfg80211_chan_def *chandef,
- bool low)
-{
- const struct iwl_mvm_bw_to_rssi_threshs bw_to_rssi_threshs_map[] = {
- BW_TO_RSSI_THRESHOLDS(20),
- BW_TO_RSSI_THRESHOLDS(40),
- BW_TO_RSSI_THRESHOLDS(80),
- BW_TO_RSSI_THRESHOLDS(160)
- /* 320 MHz has the same thresholds as 20 MHz */
- };
- const struct iwl_mvm_bw_to_rssi_threshs *threshs;
- u8 chan_width = iwl_mvm_get_channel_width(chandef);
-
- if (WARN_ON(chandef->chan->band != NL80211_BAND_2GHZ &&
- chandef->chan->band != NL80211_BAND_5GHZ &&
- chandef->chan->band != NL80211_BAND_6GHZ))
- return S8_MAX;
-
- /* 6 GHz will always use 20 MHz thresholds, regardless of the BW */
- if (chan_width == IWL_PHY_CHANNEL_MODE320)
- chan_width = IWL_PHY_CHANNEL_MODE20;
-
- threshs = &bw_to_rssi_threshs_map[chan_width];
-
- return low ? threshs->low : threshs->high;
-}
-
-static u32
-iwl_mvm_esr_disallowed_with_link(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- const struct iwl_mvm_link_sel_data *link,
- bool primary)
-{
- struct wiphy *wiphy = mvm->hw->wiphy;
- struct ieee80211_bss_conf *conf;
- enum iwl_mvm_esr_state ret = 0;
- s8 thresh;
-
- conf = wiphy_dereference(wiphy, vif->link_conf[link->link_id]);
- if (WARN_ON_ONCE(!conf))
- return false;
-
- /* BT Coex effects eSR mode only if one of the links is on LB */
- if (link->chandef->chan->band == NL80211_BAND_2GHZ &&
- (!iwl_mvm_bt_coex_calculate_esr_mode(mvm, vif, link->signal,
- primary)))
- ret |= IWL_MVM_ESR_EXIT_COEX;
-
- thresh = iwl_mvm_get_esr_rssi_thresh(mvm, link->chandef,
- false);
-
- if (link->signal < thresh)
- ret |= IWL_MVM_ESR_EXIT_LOW_RSSI;
-
- if (conf->csa_active)
- ret |= IWL_MVM_ESR_EXIT_CSA;
-
- if (ret) {
- IWL_DEBUG_INFO(mvm,
- "Link %d is not allowed for esr\n",
- link->link_id);
- iwl_mvm_print_esr_state(mvm, ret);
- }
- return ret;
-}
-
-VISIBLE_IF_IWLWIFI_KUNIT
-bool iwl_mvm_mld_valid_link_pair(struct ieee80211_vif *vif,
- const struct iwl_mvm_link_sel_data *a,
- const struct iwl_mvm_link_sel_data *b)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm *mvm = mvmvif->mvm;
- enum iwl_mvm_esr_state ret = 0;
-
- /* Per-link considerations */
- if (iwl_mvm_esr_disallowed_with_link(mvm, vif, a, true) ||
- iwl_mvm_esr_disallowed_with_link(mvm, vif, b, false))
- return false;
-
- if (a->chandef->chan->band == b->chandef->chan->band ||
- a->chandef->width != b->chandef->width)
- ret |= IWL_MVM_ESR_EXIT_BANDWIDTH;
-
- if (ret) {
- IWL_DEBUG_INFO(mvm,
- "Links %d and %d are not a valid pair for EMLSR\n",
- a->link_id, b->link_id);
- iwl_mvm_print_esr_state(mvm, ret);
- return false;
- }
-
- return true;
-
-}
-EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mvm_mld_valid_link_pair);
-
-/*
- * Returns the combined eSR grade of two given links.
- * Returns 0 if eSR is not allowed with these 2 links.
- */
-static
-unsigned int iwl_mvm_get_esr_grade(struct ieee80211_vif *vif,
- const struct iwl_mvm_link_sel_data *a,
- const struct iwl_mvm_link_sel_data *b,
- u8 *primary_id)
-{
- struct ieee80211_bss_conf *primary_conf;
- struct wiphy *wiphy = ieee80211_vif_to_wdev(vif)->wiphy;
- unsigned int primary_load;
-
- lockdep_assert_wiphy(wiphy);
-
- /* a is always primary, b is always secondary */
- if (b->grade > a->grade)
- swap(a, b);
-
- *primary_id = a->link_id;
-
- if (!iwl_mvm_mld_valid_link_pair(vif, a, b))
- return 0;
-
- primary_conf = wiphy_dereference(wiphy, vif->link_conf[*primary_id]);
-
- if (WARN_ON_ONCE(!primary_conf))
- return 0;
-
- primary_load = iwl_mvm_get_chan_load(primary_conf);
-
- return a->grade +
- ((b->grade * primary_load) / SCALE_FACTOR);
-}
-
-void iwl_mvm_select_links(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
-{
- struct iwl_mvm_link_sel_data data[IEEE80211_MLD_MAX_NUM_LINKS];
- struct iwl_mvm_link_sel_data *best_link;
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- u32 max_active_links = iwl_mvm_max_active_links(mvm, vif);
- u16 usable_links = ieee80211_vif_usable_links(vif);
- u8 best, primary_link, best_in_pair, n_data;
- u16 max_esr_grade = 0, new_active_links;
-
- lockdep_assert_wiphy(mvm->hw->wiphy);
-
- if (!mvmvif->authorized || !ieee80211_vif_is_mld(vif))
- return;
-
- if (!IWL_MVM_AUTO_EML_ENABLE)
- return;
-
- /* The logic below is a simple version that doesn't suit more than 2
- * links
- */
- WARN_ON_ONCE(max_active_links > 2);
-
- n_data = iwl_mvm_set_link_selection_data(vif, data, usable_links,
- &best);
-
- if (WARN(!n_data, "Couldn't find a valid grade for any link!\n"))
- return;
-
- best_link = &data[best];
- primary_link = best_link->link_id;
- new_active_links = BIT(best_link->link_id);
-
- /* eSR is not supported/blocked, or only one usable link */
- if (max_active_links == 1 || !iwl_mvm_vif_has_esr_cap(mvm, vif) ||
- mvmvif->esr_disable_reason || n_data == 1)
- goto set_active;
-
- for (u8 a = 0; a < n_data; a++)
- for (u8 b = a + 1; b < n_data; b++) {
- u16 esr_grade = iwl_mvm_get_esr_grade(vif, &data[a],
- &data[b],
- &best_in_pair);
-
- if (esr_grade <= max_esr_grade)
- continue;
-
- max_esr_grade = esr_grade;
- primary_link = best_in_pair;
- new_active_links = BIT(data[a].link_id) |
- BIT(data[b].link_id);
- }
-
- /* No valid pair was found, go with the best link */
- if (hweight16(new_active_links) <= 1)
- goto set_active;
-
- /* For equal grade - prefer EMLSR */
- if (best_link->grade > max_esr_grade) {
- primary_link = best_link->link_id;
- new_active_links = BIT(best_link->link_id);
- }
-set_active:
- IWL_DEBUG_INFO(mvm, "Link selection result: 0x%x. Primary = %d\n",
- new_active_links, primary_link);
- ieee80211_set_active_links_async(vif, new_active_links);
- mvmvif->link_selection_res = new_active_links;
- mvmvif->link_selection_primary = primary_link;
-}
-
u8 iwl_mvm_get_primary_link(struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -856,266 +307,6 @@ u8 iwl_mvm_get_primary_link(struct ieee80211_vif *vif)
return __ffs(vif->active_links);
}
-/*
- * For non-MLO/single link, this will return the deflink/single active link,
- * respectively
- */
-u8 iwl_mvm_get_other_link(struct ieee80211_vif *vif, u8 link_id)
-{
- switch (hweight16(vif->active_links)) {
- case 0:
- return 0;
- default:
- WARN_ON(1);
- fallthrough;
- case 1:
- return __ffs(vif->active_links);
- case 2:
- return __ffs(vif->active_links & ~BIT(link_id));
- }
-}
-
-/* Reasons that can cause esr prevention */
-#define IWL_MVM_ESR_PREVENT_REASONS IWL_MVM_ESR_EXIT_MISSED_BEACON
-#define IWL_MVM_PREVENT_ESR_TIMEOUT (HZ * 400)
-#define IWL_MVM_ESR_PREVENT_SHORT (HZ * 300)
-#define IWL_MVM_ESR_PREVENT_LONG (HZ * 600)
-
-static bool iwl_mvm_check_esr_prevention(struct iwl_mvm *mvm,
- struct iwl_mvm_vif *mvmvif,
- enum iwl_mvm_esr_state reason)
-{
- bool timeout_expired = time_after(jiffies,
- mvmvif->last_esr_exit.ts +
- IWL_MVM_PREVENT_ESR_TIMEOUT);
- unsigned long delay;
-
- lockdep_assert_held(&mvm->mutex);
-
- /* Only handle reasons that can cause prevention */
- if (!(reason & IWL_MVM_ESR_PREVENT_REASONS))
- return false;
-
- /*
- * Reset the counter if more than 400 seconds have passed between one
- * exit and the other, or if we exited due to a different reason.
- * Will also reset the counter after the long prevention is done.
- */
- if (timeout_expired || mvmvif->last_esr_exit.reason != reason) {
- mvmvif->exit_same_reason_count = 1;
- return false;
- }
-
- mvmvif->exit_same_reason_count++;
- if (WARN_ON(mvmvif->exit_same_reason_count < 2 ||
- mvmvif->exit_same_reason_count > 3))
- return false;
-
- mvmvif->esr_disable_reason |= IWL_MVM_ESR_BLOCKED_PREVENTION;
-
- /*
- * For the second exit, use a short prevention, and for the third one,
- * use a long prevention.
- */
- delay = mvmvif->exit_same_reason_count == 2 ?
- IWL_MVM_ESR_PREVENT_SHORT :
- IWL_MVM_ESR_PREVENT_LONG;
-
- IWL_DEBUG_INFO(mvm,
- "Preventing EMLSR for %ld seconds due to %u exits with the reason = %s (0x%x)\n",
- delay / HZ, mvmvif->exit_same_reason_count,
- iwl_get_esr_state_string(reason), reason);
-
- wiphy_delayed_work_queue(mvm->hw->wiphy,
- &mvmvif->prevent_esr_done_wk, delay);
- return true;
-}
-
-#define IWL_MVM_TRIGGER_LINK_SEL_TIME (IWL_MVM_TRIGGER_LINK_SEL_TIME_SEC * HZ)
-
-/* API to exit eSR mode */
-void iwl_mvm_exit_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- enum iwl_mvm_esr_state reason,
- u8 link_to_keep)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- u16 new_active_links;
- bool prevented;
-
- lockdep_assert_held(&mvm->mutex);
-
- if (!IWL_MVM_AUTO_EML_ENABLE)
- return;
-
- /* Nothing to do */
- if (!mvmvif->esr_active)
- return;
-
- if (WARN_ON(!ieee80211_vif_is_mld(vif) || !mvmvif->authorized))
- return;
-
- if (WARN_ON(!(vif->active_links & BIT(link_to_keep))))
- link_to_keep = __ffs(vif->active_links);
-
- new_active_links = BIT(link_to_keep);
- IWL_DEBUG_INFO(mvm,
- "Exiting EMLSR. reason = %s (0x%x). Current active links=0x%x, new active links = 0x%x\n",
- iwl_get_esr_state_string(reason), reason,
- vif->active_links, new_active_links);
-
- ieee80211_set_active_links_async(vif, new_active_links);
-
- /* Prevent EMLSR if needed */
- prevented = iwl_mvm_check_esr_prevention(mvm, mvmvif, reason);
-
- /* Remember why and when we exited EMLSR */
- mvmvif->last_esr_exit.ts = jiffies;
- mvmvif->last_esr_exit.reason = reason;
-
- /*
- * If EMLSR is prevented now - don't try to get back to EMLSR.
- * If we exited due to a blocking event, we will try to get back to
- * EMLSR when the corresponding unblocking event will happen.
- */
- if (prevented || reason & IWL_MVM_BLOCK_ESR_REASONS)
- return;
-
- /* If EMLSR is not blocked - try enabling it again in 30 seconds */
- wiphy_delayed_work_queue(mvm->hw->wiphy,
- &mvmvif->mlo_int_scan_wk,
- round_jiffies_relative(IWL_MVM_TRIGGER_LINK_SEL_TIME));
-}
-
-void iwl_mvm_block_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- enum iwl_mvm_esr_state reason,
- u8 link_to_keep)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-
- lockdep_assert_held(&mvm->mutex);
-
- if (!IWL_MVM_AUTO_EML_ENABLE)
- return;
-
- /* This should be called only with disable reasons */
- if (WARN_ON(!(reason & IWL_MVM_BLOCK_ESR_REASONS)))
- return;
-
- if (mvmvif->esr_disable_reason & reason)
- return;
-
- IWL_DEBUG_INFO(mvm,
- "Blocking EMLSR mode. reason = %s (0x%x)\n",
- iwl_get_esr_state_string(reason), reason);
-
- mvmvif->esr_disable_reason |= reason;
-
- iwl_mvm_print_esr_state(mvm, mvmvif->esr_disable_reason);
-
- iwl_mvm_exit_esr(mvm, vif, reason, link_to_keep);
-}
-
-int iwl_mvm_block_esr_sync(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- enum iwl_mvm_esr_state reason)
-{
- int primary_link = iwl_mvm_get_primary_link(vif);
- int ret;
-
- if (!IWL_MVM_AUTO_EML_ENABLE || !ieee80211_vif_is_mld(vif))
- return 0;
-
- /* This should be called only with blocking reasons */
- if (WARN_ON(!(reason & IWL_MVM_BLOCK_ESR_REASONS)))
- return 0;
-
- /* leave ESR immediately, not only async with iwl_mvm_block_esr() */
- ret = ieee80211_set_active_links(vif, BIT(primary_link));
- if (ret)
- return ret;
-
- mutex_lock(&mvm->mutex);
- /* only additionally block for consistency and to avoid concurrency */
- iwl_mvm_block_esr(mvm, vif, reason, primary_link);
- mutex_unlock(&mvm->mutex);
-
- return 0;
-}
-
-static void iwl_mvm_esr_unblocked(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- bool need_new_sel = time_after(jiffies, mvmvif->last_esr_exit.ts +
- IWL_MVM_TRIGGER_LINK_SEL_TIME);
-
- lockdep_assert_held(&mvm->mutex);
-
- if (!ieee80211_vif_is_mld(vif) || !mvmvif->authorized ||
- mvmvif->esr_active)
- return;
-
- IWL_DEBUG_INFO(mvm, "EMLSR is unblocked\n");
-
- /* If we exited due to an EXIT reason, and the exit was in less than
- * 30 seconds, then a MLO scan was scheduled already.
- */
- if (!need_new_sel &&
- !(mvmvif->last_esr_exit.reason & IWL_MVM_BLOCK_ESR_REASONS)) {
- IWL_DEBUG_INFO(mvm, "Wait for MLO scan\n");
- return;
- }
-
- /*
- * If EMLSR was blocked for more than 30 seconds, or the last link
- * selection decided to not enter EMLSR, trigger a new scan.
- */
- if (need_new_sel || hweight16(mvmvif->link_selection_res) < 2) {
- IWL_DEBUG_INFO(mvm, "Trigger MLO scan\n");
- wiphy_delayed_work_queue(mvm->hw->wiphy,
- &mvmvif->mlo_int_scan_wk, 0);
- /*
- * If EMLSR was blocked for less than 30 seconds, and the last link
- * selection decided to use EMLSR, activate EMLSR using the previous
- * link selection result.
- */
- } else {
- IWL_DEBUG_INFO(mvm,
- "Use the latest link selection result: 0x%x\n",
- mvmvif->link_selection_res);
- ieee80211_set_active_links_async(vif,
- mvmvif->link_selection_res);
- }
-}
-
-void iwl_mvm_unblock_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- enum iwl_mvm_esr_state reason)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-
- lockdep_assert_held(&mvm->mutex);
-
- if (!IWL_MVM_AUTO_EML_ENABLE)
- return;
-
- /* This should be called only with disable reasons */
- if (WARN_ON(!(reason & IWL_MVM_BLOCK_ESR_REASONS)))
- return;
-
- /* No Change */
- if (!(mvmvif->esr_disable_reason & reason))
- return;
-
- mvmvif->esr_disable_reason &= ~reason;
-
- IWL_DEBUG_INFO(mvm,
- "Unblocking EMLSR mode. reason = %s (0x%x)\n",
- iwl_get_esr_state_string(reason), reason);
- iwl_mvm_print_esr_state(mvm, mvmvif->esr_disable_reason);
-
- if (!mvmvif->esr_disable_reason)
- iwl_mvm_esr_unblocked(mvm, vif);
-}
-
void iwl_mvm_init_link(struct iwl_mvm_vif_link_info *link)
{
link->bcast_sta.sta_id = IWL_INVALID_STA;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index 8805ab344895..9c9e0e1c6e1d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -1586,13 +1586,11 @@ iwl_mvm_handle_missed_beacons_notif(struct iwl_mvm *mvm,
u32 id = le32_to_cpu(mb->link_id);
union iwl_dbg_tlv_tp_data tp_data = { .fw_pkt = pkt };
u32 mac_type;
- int link_id;
u8 notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
MISSED_BEACONS_NOTIFICATION,
0);
u8 new_notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP,
MISSED_BEACONS_NOTIF, 0);
- struct ieee80211_bss_conf *bss_conf;
/* If the firmware uses the new notification (from MAC_CONF_GROUP),
* refer to that notification's version.
@@ -1617,16 +1615,10 @@ iwl_mvm_handle_missed_beacons_notif(struct iwl_mvm *mvm,
if (!vif)
return;
- bss_conf = &vif->bss_conf;
- link_id = bss_conf->link_id;
mac_type = iwl_mvm_get_mac_type(vif);
IWL_DEBUG_INFO(mvm, "missed beacon mac_type=%u,\n", mac_type);
- mvm->trans->dbg.dump_file_name_ext_valid = true;
- snprintf(mvm->trans->dbg.dump_file_name_ext, IWL_FW_INI_MAX_NAME,
- "MacId_%d_MacType_%d", id, mac_type);
-
rx_missed_bcon = le32_to_cpu(mb->consec_missed_beacons);
rx_missed_bcon_since_rx =
le32_to_cpu(mb->consec_missed_beacons_since_last_rx);
@@ -1644,41 +1636,11 @@ iwl_mvm_handle_missed_beacons_notif(struct iwl_mvm *mvm,
"missed_beacons:%d, missed_beacons_since_rx:%d\n",
rx_missed_bcon, rx_missed_bcon_since_rx);
}
- } else if (link_id >= 0 && hweight16(vif->active_links) > 1) {
- u32 bss_param_ch_cnt_link_id =
- bss_conf->bss_param_ch_cnt_link_id;
- u32 scnd_lnk_bcn_lost = 0;
-
- if (notif_ver >= 5 &&
- !IWL_FW_CHECK(mvm,
- le32_to_cpu(mb->other_link_id) == IWL_MVM_FW_LINK_ID_INVALID,
- "No data for other link id but we are in EMLSR active_links: 0x%x\n",
- vif->active_links))
- scnd_lnk_bcn_lost =
- le32_to_cpu(mb->consec_missed_beacons_other_link);
-
- /* Exit EMLSR if we lost more than
- * IWL_MVM_MISSED_BEACONS_EXIT_ESR_THRESH beacons on boths links
- * OR more than IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH on any link.
- * OR more than IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH_BSS_PARAM_CHANGED
- * and the link's bss_param_ch_count has changed.
- */
- if ((rx_missed_bcon >= IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH_2_LINKS &&
- scnd_lnk_bcn_lost >= IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH_2_LINKS) ||
- rx_missed_bcon >= IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH ||
- (bss_param_ch_cnt_link_id != link_id &&
- rx_missed_bcon >= IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH_BSS_PARAM_CHANGED))
- iwl_mvm_exit_esr(mvm, vif,
- IWL_MVM_ESR_EXIT_MISSED_BEACON,
- iwl_mvm_get_primary_link(vif));
} else if (rx_missed_bcon_since_rx > IWL_MVM_MISSED_BEACONS_THRESHOLD) {
if (!iwl_mvm_has_new_tx_api(mvm))
ieee80211_beacon_loss(vif);
else
ieee80211_cqm_beacon_loss_notify(vif, GFP_ATOMIC);
-
- /* try to switch links, no-op if we don't have MLO */
- iwl_mvm_int_mlo_scan(mvm, vif);
}
iwl_dbg_tlv_time_point(&mvm->fwrt,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 4ad3d32683d8..44029ceb8f77 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -1425,12 +1425,6 @@ void iwl_mvm_mac_stop(struct ieee80211_hw *hw, bool suspend)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- /* Stop internal MLO scan, if running */
- mutex_lock(&mvm->mutex);
- iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_INT_MLO, false);
- mutex_unlock(&mvm->mutex);
-
- wiphy_work_cancel(mvm->hw->wiphy, &mvm->trig_link_selection_wk);
wiphy_work_flush(mvm->hw->wiphy, &mvm->async_handlers_wiphy_wk);
flush_work(&mvm->async_handlers_wk);
flush_work(&mvm->add_stream_wk);
@@ -1715,57 +1709,6 @@ static int iwl_mvm_alloc_bcast_mcast_sta(struct iwl_mvm *mvm,
IWL_STA_MULTICAST);
}
-static void iwl_mvm_prevent_esr_done_wk(struct wiphy *wiphy,
- struct wiphy_work *wk)
-{
- struct iwl_mvm_vif *mvmvif =
- container_of(wk, struct iwl_mvm_vif, prevent_esr_done_wk.work);
- struct iwl_mvm *mvm = mvmvif->mvm;
- struct ieee80211_vif *vif =
- container_of((void *)mvmvif, struct ieee80211_vif, drv_priv);
-
- guard(mvm)(mvm);
- iwl_mvm_unblock_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_PREVENTION);
-}
-
-static void iwl_mvm_mlo_int_scan_wk(struct wiphy *wiphy, struct wiphy_work *wk)
-{
- struct iwl_mvm_vif *mvmvif = container_of(wk, struct iwl_mvm_vif,
- mlo_int_scan_wk.work);
- struct ieee80211_vif *vif =
- container_of((void *)mvmvif, struct ieee80211_vif, drv_priv);
-
- guard(mvm)(mvmvif->mvm);
- iwl_mvm_int_mlo_scan(mvmvif->mvm, vif);
-}
-
-static void iwl_mvm_unblock_esr_tpt(struct wiphy *wiphy, struct wiphy_work *wk)
-{
- struct iwl_mvm_vif *mvmvif =
- container_of(wk, struct iwl_mvm_vif, unblock_esr_tpt_wk);
- struct iwl_mvm *mvm = mvmvif->mvm;
- struct ieee80211_vif *vif =
- container_of((void *)mvmvif, struct ieee80211_vif, drv_priv);
-
- guard(mvm)(mvm);
- iwl_mvm_unblock_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_TPT);
-}
-
-static void iwl_mvm_unblock_esr_tmp_non_bss(struct wiphy *wiphy,
- struct wiphy_work *wk)
-{
- struct iwl_mvm_vif *mvmvif =
- container_of(wk, struct iwl_mvm_vif,
- unblock_esr_tmp_non_bss_wk.work);
- struct iwl_mvm *mvm = mvmvif->mvm;
- struct ieee80211_vif *vif =
- container_of((void *)mvmvif, struct ieee80211_vif, drv_priv);
-
- mutex_lock(&mvm->mutex);
- iwl_mvm_unblock_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_TMP_NON_BSS);
- mutex_unlock(&mvm->mutex);
-}
-
void iwl_mvm_mac_init_mvmvif(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif)
{
lockdep_assert_held(&mvm->mutex);
@@ -1777,18 +1720,6 @@ void iwl_mvm_mac_init_mvmvif(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif)
INIT_DELAYED_WORK(&mvmvif->csa_work,
iwl_mvm_channel_switch_disconnect_wk);
-
- wiphy_delayed_work_init(&mvmvif->prevent_esr_done_wk,
- iwl_mvm_prevent_esr_done_wk);
-
- wiphy_delayed_work_init(&mvmvif->mlo_int_scan_wk,
- iwl_mvm_mlo_int_scan_wk);
-
- wiphy_work_init(&mvmvif->unblock_esr_tpt_wk,
- iwl_mvm_unblock_esr_tpt);
-
- wiphy_delayed_work_init(&mvmvif->unblock_esr_tmp_non_bss_wk,
- iwl_mvm_unblock_esr_tmp_non_bss);
}
static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
@@ -1926,16 +1857,6 @@ void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
flush_work(&mvm->roc_done_wk);
}
- wiphy_delayed_work_cancel(mvm->hw->wiphy,
- &mvmvif->prevent_esr_done_wk);
-
- wiphy_delayed_work_cancel(mvm->hw->wiphy,
- &mvmvif->mlo_int_scan_wk);
-
- wiphy_work_cancel(mvm->hw->wiphy, &mvmvif->unblock_esr_tpt_wk);
- wiphy_delayed_work_cancel(mvm->hw->wiphy,
- &mvmvif->unblock_esr_tmp_non_bss_wk);
-
cancel_delayed_work_sync(&mvmvif->csa_work);
}
@@ -4008,21 +3929,6 @@ iwl_mvm_sta_state_assoc_to_authorized(struct iwl_mvm *mvm,
callbacks->mac_ctxt_changed(mvm, vif, false);
iwl_mvm_mei_host_associated(mvm, vif, mvm_sta);
-
- memset(&mvmvif->last_esr_exit, 0,
- sizeof(mvmvif->last_esr_exit));
-
- iwl_mvm_block_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_TPT, 0);
-
- /* Block until FW notif will arrive */
- iwl_mvm_block_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_FW, 0);
-
- /* when client is authorized (AP station marked as such),
- * try to enable the best link(s).
- */
- if (vif->type == NL80211_IFTYPE_STATION &&
- !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
- iwl_mvm_select_links(mvm, vif);
}
mvm_sta->authorized = true;
@@ -4070,16 +3976,6 @@ iwl_mvm_sta_state_authorized_to_assoc(struct iwl_mvm *mvm,
/* disable beacon filtering */
iwl_mvm_disable_beacon_filter(mvm, vif);
-
- wiphy_delayed_work_cancel(mvm->hw->wiphy,
- &mvmvif->prevent_esr_done_wk);
-
- wiphy_delayed_work_cancel(mvm->hw->wiphy,
- &mvmvif->mlo_int_scan_wk);
-
- wiphy_work_cancel(mvm->hw->wiphy, &mvmvif->unblock_esr_tpt_wk);
- wiphy_delayed_work_cancel(mvm->hw->wiphy,
- &mvmvif->unblock_esr_tmp_non_bss_wk);
}
return 0;
@@ -4920,7 +4816,6 @@ int iwl_mvm_roc_common(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const struct iwl_mvm_roc_ops *ops)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- struct ieee80211_vif *bss_vif = iwl_mvm_get_bss_vif(mvm);
u32 lmac_id;
int ret;
@@ -4933,13 +4828,6 @@ int iwl_mvm_roc_common(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
*/
flush_work(&mvm->roc_done_wk);
- if (!IS_ERR_OR_NULL(bss_vif)) {
- ret = iwl_mvm_block_esr_sync(mvm, bss_vif,
- IWL_MVM_ESR_BLOCKED_ROC);
- if (ret)
- return ret;
- }
-
guard(mvm)(mvm);
switch (vif->type) {
@@ -5604,9 +5492,9 @@ static void iwl_mvm_csa_block_txqs(void *data, struct ieee80211_sta *sta)
}
#define IWL_MAX_CSA_BLOCK_TX 1500
-int iwl_mvm_pre_channel_switch(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- struct ieee80211_channel_switch *chsw)
+static int iwl_mvm_pre_channel_switch(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel_switch *chsw)
{
struct ieee80211_vif *csa_vif;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -5724,9 +5612,9 @@ int iwl_mvm_pre_channel_switch(struct iwl_mvm *mvm,
return ret;
}
-static int iwl_mvm_mac_pre_channel_switch(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_channel_switch *chsw)
+int iwl_mvm_mac_pre_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel_switch *chsw)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
index bf24f8cb673e..b1dca76b7141 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
@@ -340,20 +340,6 @@ static int iwl_mvm_mld_assign_vif_chanctx(struct ieee80211_hw *hw,
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- /* update EMLSR mode */
- if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION) {
- int ret;
-
- ret = iwl_mvm_esr_non_bss_link(mvm, vif, link_conf->link_id,
- true);
- /*
- * Don't activate this link if failed to exit EMLSR in
- * the BSS interface
- */
- if (ret)
- return ret;
- }
-
guard(mvm)(mvm);
return __iwl_mvm_mld_assign_vif_chanctx(mvm, vif, link_conf, ctx, false);
}
@@ -472,10 +458,6 @@ static void iwl_mvm_mld_unassign_vif_chanctx(struct ieee80211_hw *hw,
iwl_mvm_add_link(mvm, vif, link_conf);
}
mutex_unlock(&mvm->mutex);
-
- /* update EMLSR mode */
- if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION)
- iwl_mvm_esr_non_bss_link(mvm, vif, link_conf->link_id, false);
}
static void
@@ -684,25 +666,6 @@ static int iwl_mvm_mld_mac_sta_state(struct ieee80211_hw *hw,
&callbacks);
}
-static bool iwl_mvm_esr_bw_criteria(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *link_conf)
-{
- struct ieee80211_bss_conf *other_link;
- int link_id;
-
- /* Exit EMLSR if links don't have equal bandwidths */
- for_each_vif_active_link(vif, other_link, link_id) {
- if (link_id == link_conf->link_id)
- continue;
- if (link_conf->chanreq.oper.width ==
- other_link->chanreq.oper.width)
- return true;
- }
-
- return false;
-}
-
static void
iwl_mvm_mld_link_info_changed_station(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
@@ -737,14 +700,6 @@ iwl_mvm_mld_link_info_changed_station(struct iwl_mvm *mvm,
link_changes |= LINK_CONTEXT_MODIFY_HE_PARAMS;
}
- if ((changes & BSS_CHANGED_BANDWIDTH) &&
- ieee80211_vif_link_active(vif, link_conf->link_id) &&
- mvmvif->esr_active &&
- !iwl_mvm_esr_bw_criteria(mvm, vif, link_conf))
- iwl_mvm_exit_esr(mvm, vif,
- IWL_MVM_ESR_EXIT_BANDWIDTH,
- iwl_mvm_get_primary_link(vif));
-
/* if associated, maybe puncturing changed - we'll check later */
if (vif->cfg.assoc)
link_changes |= LINK_CONTEXT_MODIFY_EHT_PARAMS;
@@ -879,11 +834,6 @@ static void iwl_mvm_mld_vif_cfg_changed_station(struct iwl_mvm *mvm,
if (ret)
IWL_ERR(mvm, "failed to update power mode\n");
}
-
- if (changes & (BSS_CHANGED_MLD_VALID_LINKS | BSS_CHANGED_MLD_TTLM) &&
- ieee80211_vif_is_mld(vif) && mvmvif->authorized)
- wiphy_delayed_work_queue(mvm->hw->wiphy,
- &mvmvif->mlo_int_scan_wk, 0);
}
static void
@@ -1239,91 +1189,6 @@ iwl_mvm_mld_can_neg_ttlm(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
return NEG_TTLM_RES_ACCEPT;
}
-static int
-iwl_mvm_mld_mac_pre_channel_switch(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_channel_switch *chsw)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- int ret;
-
- mutex_lock(&mvm->mutex);
- if (mvmvif->esr_active) {
- u8 primary = iwl_mvm_get_primary_link(vif);
- int selected;
-
- /* prefer primary unless quiet CSA on it */
- if (chsw->link_id == primary && chsw->block_tx)
- selected = iwl_mvm_get_other_link(vif, primary);
- else
- selected = primary;
-
- /*
- * remembers to tell the firmware that this link can't tx
- * Note that this logic seems to be unrelated to esr, but it
- * really is needed only when esr is active. When we have a
- * single link, the firmware will handle all this on its own.
- * In multi-link scenarios, we can learn about the CSA from
- * another link and this logic is too complex for the firmware
- * to track.
- * Since we want to de-activate the link that got a CSA, we
- * need to tell the firmware not to send any frame on that link
- * as the firmware may not be aware that link is under a CSA
- * with mode=1 (no Tx allowed).
- */
- if (chsw->block_tx && mvmvif->link[chsw->link_id])
- mvmvif->link[chsw->link_id]->csa_block_tx = true;
-
- iwl_mvm_exit_esr(mvm, vif, IWL_MVM_ESR_EXIT_CSA, selected);
- mutex_unlock(&mvm->mutex);
-
- /*
- * If we've not kept the link active that's doing the CSA
- * then we don't need to do anything else, just return.
- */
- if (selected != chsw->link_id)
- return 0;
-
- mutex_lock(&mvm->mutex);
- }
-
- ret = iwl_mvm_pre_channel_switch(mvm, vif, chsw);
- mutex_unlock(&mvm->mutex);
-
- return ret;
-}
-
-#define IWL_MVM_MLD_UNBLOCK_ESR_NON_BSS_TIMEOUT (5 * HZ)
-
-static void iwl_mvm_mld_prep_add_interface(struct ieee80211_hw *hw,
- enum nl80211_iftype type)
-{
- struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- struct ieee80211_vif *bss_vif = iwl_mvm_get_bss_vif(mvm);
- struct iwl_mvm_vif *mvmvif;
- int ret;
-
- IWL_DEBUG_MAC80211(mvm, "prep_add_interface: type=%u\n",
- type);
-
- if (IS_ERR_OR_NULL(bss_vif) ||
- !(type == NL80211_IFTYPE_AP ||
- type == NL80211_IFTYPE_P2P_GO ||
- type == NL80211_IFTYPE_P2P_CLIENT))
- return;
-
- mvmvif = iwl_mvm_vif_from_mac80211(bss_vif);
- ret = iwl_mvm_block_esr_sync(mvm, bss_vif,
- IWL_MVM_ESR_BLOCKED_TMP_NON_BSS);
- if (ret)
- return;
-
- wiphy_delayed_work_queue(mvmvif->mvm->hw->wiphy,
- &mvmvif->unblock_esr_tmp_non_bss_wk,
- IWL_MVM_MLD_UNBLOCK_ESR_NON_BSS_TIMEOUT);
-}
-
const struct ieee80211_ops iwl_mvm_mld_hw_ops = {
.tx = iwl_mvm_mac_tx,
.wake_tx_queue = iwl_mvm_mac_wake_tx_queue,
@@ -1377,7 +1242,7 @@ const struct ieee80211_ops iwl_mvm_mld_hw_ops = {
.tx_last_beacon = iwl_mvm_tx_last_beacon,
.channel_switch = iwl_mvm_channel_switch,
- .pre_channel_switch = iwl_mvm_mld_mac_pre_channel_switch,
+ .pre_channel_switch = iwl_mvm_mac_pre_channel_switch,
.post_channel_switch = iwl_mvm_post_channel_switch,
.abort_channel_switch = iwl_mvm_abort_channel_switch,
.channel_switch_rx_beacon = iwl_mvm_channel_switch_rx_beacon,
@@ -1418,5 +1283,4 @@ const struct ieee80211_ops iwl_mvm_mld_hw_ops = {
.change_sta_links = iwl_mvm_mld_change_sta_links,
.can_activate_links = iwl_mvm_mld_can_activate_links,
.can_neg_ttlm = iwl_mvm_mld_can_neg_ttlm,
- .prep_add_interface = iwl_mvm_mld_prep_add_interface,
};
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
index e1010521c3ea..d9a2801636cf 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
@@ -852,8 +852,6 @@ int iwl_mvm_mld_rm_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
iwl_mvm_mld_free_sta_link(mvm, mvm_sta, mvm_link_sta, link_id);
}
- kfree(mvm_sta->mpdu_counters);
- mvm_sta->mpdu_counters = NULL;
return ret;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index fdaeefa305e1..b515028adc8f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -348,68 +348,6 @@ struct iwl_mvm_vif_link_info {
};
/**
- * enum iwl_mvm_esr_state - defines reasons for which the EMLSR is exited or
- * blocked.
- * The low 16 bits are used for blocking reasons, and the 16 higher bits
- * are used for exit reasons.
- * For the blocking reasons - use iwl_mvm_(un)block_esr(), and for the exit
- * reasons - use iwl_mvm_exit_esr().
- *
- * Note: new reasons shall be added to HANDLE_ESR_REASONS as well (for logs)
- *
- * @IWL_MVM_ESR_BLOCKED_PREVENTION: Prevent EMLSR to avoid entering and exiting
- * in a loop.
- * @IWL_MVM_ESR_BLOCKED_WOWLAN: WOWLAN is preventing the enablement of EMLSR
- * @IWL_MVM_ESR_BLOCKED_TPT: block EMLSR when there is not enough traffic
- * @IWL_MVM_ESR_BLOCKED_FW: FW didn't recommended/forced exit from EMLSR
- * @IWL_MVM_ESR_BLOCKED_NON_BSS: An active non-BSS interface's link is
- * preventing EMLSR
- * @IWL_MVM_ESR_BLOCKED_ROC: remain-on-channel is preventing EMLSR
- * @IWL_MVM_ESR_BLOCKED_TMP_NON_BSS: An expected active non-BSS interface's link
- * is preventing EMLSR. This is a temporary blocking that is set when there
- * is an indication that a non-BSS interface is to be added.
- * @IWL_MVM_ESR_EXIT_MISSED_BEACON: exited EMLSR due to missed beacons
- * @IWL_MVM_ESR_EXIT_LOW_RSSI: link is deactivated/not allowed for EMLSR
- * due to low RSSI.
- * @IWL_MVM_ESR_EXIT_COEX: link is deactivated/not allowed for EMLSR
- * due to BT Coex.
- * @IWL_MVM_ESR_EXIT_BANDWIDTH: Bandwidths of primary and secondry links
- * preventing the enablement of EMLSR
- * @IWL_MVM_ESR_EXIT_CSA: CSA happened, so exit EMLSR
- * @IWL_MVM_ESR_EXIT_LINK_USAGE: Exit EMLSR due to low tpt on secondary link
- */
-enum iwl_mvm_esr_state {
- IWL_MVM_ESR_BLOCKED_PREVENTION = 0x1,
- IWL_MVM_ESR_BLOCKED_WOWLAN = 0x2,
- IWL_MVM_ESR_BLOCKED_TPT = 0x4,
- IWL_MVM_ESR_BLOCKED_FW = 0x8,
- IWL_MVM_ESR_BLOCKED_NON_BSS = 0x10,
- IWL_MVM_ESR_BLOCKED_ROC = 0x20,
- IWL_MVM_ESR_BLOCKED_TMP_NON_BSS = 0x40,
- IWL_MVM_ESR_EXIT_MISSED_BEACON = 0x10000,
- IWL_MVM_ESR_EXIT_LOW_RSSI = 0x20000,
- IWL_MVM_ESR_EXIT_COEX = 0x40000,
- IWL_MVM_ESR_EXIT_BANDWIDTH = 0x80000,
- IWL_MVM_ESR_EXIT_CSA = 0x100000,
- IWL_MVM_ESR_EXIT_LINK_USAGE = 0x200000,
-};
-
-#define IWL_MVM_BLOCK_ESR_REASONS 0xffff
-
-const char *iwl_get_esr_state_string(enum iwl_mvm_esr_state state);
-
-/**
- * struct iwl_mvm_esr_exit - details of the last exit from EMLSR mode.
- * @reason: The reason for the last exit from EMLSR.
- * &iwl_mvm_prevent_esr_reasons. Will be 0 before exiting EMLSR.
- * @ts: the time stamp of the last time we existed EMLSR.
- */
-struct iwl_mvm_esr_exit {
- unsigned long ts;
- enum iwl_mvm_esr_state reason;
-};
-
-/**
* struct iwl_mvm_vif - data per Virtual Interface, it is a MAC context
* @mvm: pointer back to the mvm struct
* @id: between 0 and 3
@@ -443,7 +381,6 @@ struct iwl_mvm_esr_exit {
* @deflink: default link data for use in non-MLO
* @link: link data for each link in MLO
* @esr_active: indicates eSR mode is active
- * @esr_disable_reason: a bitmap of &enum iwl_mvm_esr_state
* @pm_enabled: indicates powersave is enabled
* @link_selection_res: bitmap of active links as it was decided in the last
* link selection. Valid only for a MLO vif after assoc. 0 if there wasn't
@@ -451,15 +388,6 @@ struct iwl_mvm_esr_exit {
* @link_selection_primary: primary link selected by link selection
* @primary_link: primary link in eSR. Valid only for an associated MLD vif,
* and in eSR mode. Valid only for a STA.
- * @last_esr_exit: Details of the last exit from EMLSR.
- * @exit_same_reason_count: The number of times we exited due to the specified
- * @last_esr_exit::reason, only counting exits due to
- * &IWL_MVM_ESR_PREVENT_REASONS.
- * @prevent_esr_done_wk: work that should be done when esr prevention ends.
- * @mlo_int_scan_wk: work for the internal MLO scan.
- * @unblock_esr_tpt_wk: work for unblocking EMLSR when tpt is high enough.
- * @unblock_esr_tmp_non_bss_wk: work for removing the
- * IWL_MVM_ESR_BLOCKED_TMP_NON_BSS blocking for EMLSR.
* @roc_activity: currently running ROC activity for this vif (or
* ROC_NUM_ACTIVITIES if no activity is running).
* @session_prot_connection_loss: the connection was lost due to session
@@ -515,7 +443,6 @@ struct iwl_mvm_vif {
u8 authorized:1;
bool ps_disabled;
- u32 esr_disable_reason;
u32 ap_beacon_time;
bool bf_enabled;
bool ba_enabled;
@@ -591,12 +518,6 @@ struct iwl_mvm_vif {
u16 link_selection_res;
u8 link_selection_primary;
u8 primary_link;
- struct iwl_mvm_esr_exit last_esr_exit;
- u8 exit_same_reason_count;
- struct wiphy_delayed_work prevent_esr_done_wk;
- struct wiphy_delayed_work mlo_int_scan_wk;
- struct wiphy_work unblock_esr_tpt_wk;
- struct wiphy_delayed_work unblock_esr_tmp_non_bss_wk;
struct iwl_mvm_vif_link_info deflink;
struct iwl_mvm_vif_link_info *link[IEEE80211_MLD_MAX_NUM_LINKS];
@@ -622,7 +543,6 @@ enum iwl_scan_status {
IWL_MVM_SCAN_REGULAR = BIT(0),
IWL_MVM_SCAN_SCHED = BIT(1),
IWL_MVM_SCAN_NETDETECT = BIT(2),
- IWL_MVM_SCAN_INT_MLO = BIT(3),
IWL_MVM_SCAN_STOPPING_REGULAR = BIT(8),
IWL_MVM_SCAN_STOPPING_SCHED = BIT(9),
@@ -635,8 +555,6 @@ enum iwl_scan_status {
IWL_MVM_SCAN_STOPPING_SCHED,
IWL_MVM_SCAN_NETDETECT_MASK = IWL_MVM_SCAN_NETDETECT |
IWL_MVM_SCAN_STOPPING_NETDETECT,
- IWL_MVM_SCAN_INT_MLO_MASK = IWL_MVM_SCAN_INT_MLO |
- IWL_MVM_SCAN_STOPPING_INT_MLO,
IWL_MVM_SCAN_STOPPING_MASK = 0xff << IWL_MVM_SCAN_STOPPING_SHIFT,
IWL_MVM_SCAN_MASK = 0xff,
@@ -1017,8 +935,6 @@ struct iwl_mvm {
/* For async rx handlers that require the wiphy lock */
struct wiphy_work async_handlers_wiphy_wk;
- struct wiphy_work trig_link_selection_wk;
-
struct work_struct roc_done_wk;
unsigned long init_status;
@@ -1203,20 +1119,13 @@ struct iwl_mvm {
u8 offload_tid;
#ifdef CONFIG_IWLWIFI_DEBUGFS
bool d3_wake_sysassert;
- bool d3_test_active;
- u32 d3_test_pme_ptr;
- struct ieee80211_vif *keep_vif;
u32 last_netdetect_scans; /* no. of scans in the last net-detect wake */
#endif
#endif
wait_queue_head_t rx_sync_waitq;
- /* BT-Coex - only one of those will be used */
- union {
- struct iwl_bt_coex_prof_old_notif last_bt_notif;
- struct iwl_bt_coex_profile_notif last_bt_wifi_loss;
- };
+ struct iwl_bt_coex_prof_old_notif last_bt_notif;
struct iwl_bt_coex_ci_cmd last_bt_ci_cmd;
u8 bt_tx_prio;
@@ -2099,9 +2008,7 @@ int iwl_mvm_remove_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
int iwl_mvm_disable_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf);
-void iwl_mvm_select_links(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
u8 iwl_mvm_get_primary_link(struct ieee80211_vif *vif);
-u8 iwl_mvm_get_other_link(struct ieee80211_vif *vif, u8 link_id);
struct iwl_mvm_link_sel_data {
u8 link_id;
@@ -2111,13 +2018,6 @@ struct iwl_mvm_link_sel_data {
};
#if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS)
-unsigned int iwl_mvm_get_link_grade(struct ieee80211_bss_conf *link_conf);
-bool iwl_mvm_mld_valid_link_pair(struct ieee80211_vif *vif,
- const struct iwl_mvm_link_sel_data *a,
- const struct iwl_mvm_link_sel_data *b);
-
-s8 iwl_mvm_average_dbm_values(const struct iwl_umac_scan_channel_survey_notif *notif);
-
extern const struct iwl_hcmd_arr iwl_mvm_groups[];
extern const unsigned int iwl_mvm_groups_size;
#endif
@@ -2201,7 +2101,6 @@ int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify);
int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm);
void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm);
void iwl_mvm_scan_timeout_wk(struct work_struct *work);
-int iwl_mvm_int_mlo_scan(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
void iwl_mvm_rx_channel_survey_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
@@ -2327,8 +2226,6 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
int iwl_mvm_send_bt_init_conf(struct iwl_mvm *mvm);
void iwl_mvm_rx_bt_coex_old_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
-void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
enum ieee80211_rssi_event_data);
void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm);
@@ -2929,9 +2826,10 @@ void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
int iwl_mvm_tx_last_beacon(struct ieee80211_hw *hw);
void iwl_mvm_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_channel_switch *chsw);
-int iwl_mvm_pre_channel_switch(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- struct ieee80211_channel_switch *chsw);
+int iwl_mvm_mac_pre_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel_switch *chsw);
+
void iwl_mvm_abort_channel_switch(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf);
@@ -2988,30 +2886,6 @@ int iwl_mvm_roc_add_cmd(struct iwl_mvm *mvm,
/* EMLSR */
bool iwl_mvm_vif_has_esr_cap(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
-void iwl_mvm_block_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- enum iwl_mvm_esr_state reason,
- u8 link_to_keep);
-int iwl_mvm_block_esr_sync(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- enum iwl_mvm_esr_state reason);
-void iwl_mvm_unblock_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- enum iwl_mvm_esr_state reason);
-void iwl_mvm_exit_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- enum iwl_mvm_esr_state reason,
- u8 link_to_keep);
-s8 iwl_mvm_get_esr_rssi_thresh(struct iwl_mvm *mvm,
- const struct cfg80211_chan_def *chandef,
- bool low);
-void iwl_mvm_bt_coex_update_link_esr(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- int link_id);
-bool
-iwl_mvm_bt_coex_calculate_esr_mode(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- s32 link_rssi,
- bool primary);
-int iwl_mvm_esr_non_bss_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- unsigned int link_id, bool active);
-
void
iwl_mvm_send_ap_tx_power_constraint_cmd(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index c7f08cde1f72..5ebd046371f5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -143,24 +143,6 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
}
-static void iwl_mvm_rx_esr_mode_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_esr_mode_notif *notif = (void *)pkt->data;
- struct ieee80211_vif *vif = iwl_mvm_get_bss_vif(mvm);
-
- /* FW recommendations is only for entering EMLSR */
- if (IS_ERR_OR_NULL(vif) || iwl_mvm_vif_from_mac80211(vif)->esr_active)
- return;
-
- if (le32_to_cpu(notif->action) == ESR_RECOMMEND_ENTER)
- iwl_mvm_unblock_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_FW);
- else
- iwl_mvm_block_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_FW,
- iwl_mvm_get_primary_link(vif));
-}
-
static void iwl_mvm_rx_monitor_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
@@ -345,9 +327,6 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_old_notif,
RX_HANDLER_ASYNC_LOCKED_WIPHY,
struct iwl_bt_coex_prof_old_notif),
- RX_HANDLER_GRP(BT_COEX_GROUP, PROFILE_NOTIF, iwl_mvm_rx_bt_coex_notif,
- RX_HANDLER_ASYNC_LOCKED_WIPHY,
- struct iwl_bt_coex_profile_notif),
RX_HANDLER_NO_SIZE(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif,
RX_HANDLER_ASYNC_LOCKED),
RX_HANDLER_NO_SIZE(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics,
@@ -457,11 +436,6 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER_ASYNC_UNLOCKED,
struct iwl_channel_switch_error_notif),
- RX_HANDLER_GRP(DATA_PATH_GROUP, ESR_MODE_NOTIF,
- iwl_mvm_rx_esr_mode_notif,
- RX_HANDLER_ASYNC_LOCKED_WIPHY,
- struct iwl_esr_mode_notif),
-
RX_HANDLER_GRP(DATA_PATH_GROUP, MONITOR_NOTIF,
iwl_mvm_rx_monitor_notif, RX_HANDLER_ASYNC_LOCKED,
struct iwl_datapath_monitor_notif),
@@ -661,7 +635,6 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
HCMD_NAME(CHEST_COLLECTOR_FILTER_CONFIG_CMD),
HCMD_NAME(SCD_QUEUE_CONFIG_CMD),
HCMD_NAME(SEC_KEY_CMD),
- HCMD_NAME(ESR_MODE_NOTIF),
HCMD_NAME(MONITOR_NOTIF),
HCMD_NAME(THERMAL_DUAL_CHAIN_REQUEST),
HCMD_NAME(BEACON_FILTER_IN_NOTIF),
@@ -1220,29 +1193,6 @@ static const struct iwl_mei_ops mei_ops = {
.nic_stolen = iwl_mvm_mei_nic_stolen,
};
-static void iwl_mvm_find_link_selection_vif(void *_data, u8 *mac,
- struct ieee80211_vif *vif)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-
- if (ieee80211_vif_is_mld(vif) && mvmvif->authorized)
- iwl_mvm_select_links(mvmvif->mvm, vif);
-}
-
-static void iwl_mvm_trig_link_selection(struct wiphy *wiphy,
- struct wiphy_work *wk)
-{
- struct iwl_mvm *mvm =
- container_of(wk, struct iwl_mvm, trig_link_selection_wk);
-
- mutex_lock(&mvm->mutex);
- ieee80211_iterate_active_interfaces(mvm->hw,
- IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_find_link_selection_vif,
- NULL);
- mutex_unlock(&mvm->mutex);
-}
-
static struct iwl_op_mode *
iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_rf_cfg *cfg,
const struct iwl_fw *fw, struct dentry *dbgfs_dir)
@@ -1411,9 +1361,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_rf_cfg *cfg,
wiphy_work_init(&mvm->async_handlers_wiphy_wk,
iwl_mvm_async_handlers_wiphy_wk);
- wiphy_work_init(&mvm->trig_link_selection_wk,
- iwl_mvm_trig_link_selection);
-
init_waitqueue_head(&mvm->rx_sync_waitq);
mvm->queue_sync_state = 0;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
index 8fae0d41b119..8c1bb3a7ffca 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -563,7 +563,6 @@ static void iwl_mvm_update_link_sig(struct ieee80211_vif *vif, int sig,
int thold = bss_conf->cqm_rssi_thold;
int hyst = bss_conf->cqm_rssi_hyst;
int last_event;
- s8 exit_esr_thresh;
if (sig == 0) {
IWL_DEBUG_RX(mvm, "RSSI is 0 - skip signal based decision\n");
@@ -619,27 +618,6 @@ static void iwl_mvm_update_link_sig(struct ieee80211_vif *vif, int sig,
sig,
GFP_KERNEL);
}
-
- /* ESR recalculation */
- if (!vif->cfg.assoc || !ieee80211_vif_is_mld(vif))
- return;
-
- /* We're not in EMLSR and our signal is bad, try to switch link maybe */
- if (sig < IWL_MVM_LOW_RSSI_MLO_SCAN_THRESH && !mvmvif->esr_active) {
- iwl_mvm_int_mlo_scan(mvm, vif);
- return;
- }
-
- /* We are in EMLSR, check if we need to exit */
- exit_esr_thresh =
- iwl_mvm_get_esr_rssi_thresh(mvm,
- &bss_conf->chanreq.oper,
- true);
-
- if (sig < exit_esr_thresh)
- iwl_mvm_exit_esr(mvm, vif, IWL_MVM_ESR_EXIT_LOW_RSSI,
- iwl_mvm_get_other_link(vif,
- bss_conf->link_id));
}
static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
@@ -914,10 +892,6 @@ iwl_mvm_stat_iterator_all_links(struct iwl_mvm *mvm,
link_info->beacon_stats.avg_signal =
-le32_to_cpu(link_stats->beacon_average_energy);
- if (link_info->phy_ctxt &&
- link_info->phy_ctxt->channel->band == NL80211_BAND_2GHZ)
- iwl_mvm_bt_coex_update_link_esr(mvm, vif, link_id);
-
/* make sure that beacon statistics don't go backwards with TCM
* request to clear statistics
*/
@@ -956,111 +930,6 @@ iwl_mvm_stat_iterator_all_links(struct iwl_mvm *mvm,
}
}
-#define SEC_LINK_MIN_PERC 10
-#define SEC_LINK_MIN_TX 3000
-#define SEC_LINK_MIN_RX 400
-
-/* Accept a ~20% short window to avoid issues due to jitter */
-#define IWL_MVM_TPT_MIN_COUNT_WINDOW (IWL_MVM_TPT_COUNT_WINDOW_SEC * HZ * 4 / 5)
-
-static void iwl_mvm_update_esr_mode_tpt(struct iwl_mvm *mvm)
-{
- struct ieee80211_vif *bss_vif = iwl_mvm_get_bss_vif(mvm);
- struct iwl_mvm_vif *mvmvif;
- struct iwl_mvm_sta *mvmsta;
- unsigned long total_tx = 0, total_rx = 0;
- unsigned long sec_link_tx = 0, sec_link_rx = 0;
- u8 sec_link_tx_perc, sec_link_rx_perc;
- u8 sec_link;
- bool skip = false;
-
- lockdep_assert_held(&mvm->mutex);
-
- if (IS_ERR_OR_NULL(bss_vif))
- return;
-
- mvmvif = iwl_mvm_vif_from_mac80211(bss_vif);
-
- if (!mvmvif->esr_active || !mvmvif->ap_sta)
- return;
-
- mvmsta = iwl_mvm_sta_from_mac80211(mvmvif->ap_sta);
- /* We only count for the AP sta in a MLO connection */
- if (!mvmsta->mpdu_counters)
- return;
-
- /* Get the FW ID of the secondary link */
- sec_link = iwl_mvm_get_other_link(bss_vif,
- iwl_mvm_get_primary_link(bss_vif));
- if (WARN_ON(!mvmvif->link[sec_link]))
- return;
- sec_link = mvmvif->link[sec_link]->fw_link_id;
-
- /* Sum up RX and TX MPDUs from the different queues/links */
- for (int q = 0; q < mvm->trans->info.num_rxqs; q++) {
- spin_lock_bh(&mvmsta->mpdu_counters[q].lock);
-
- /* The link IDs that doesn't exist will contain 0 */
- for (int link = 0; link < IWL_FW_MAX_LINK_ID; link++) {
- total_tx += mvmsta->mpdu_counters[q].per_link[link].tx;
- total_rx += mvmsta->mpdu_counters[q].per_link[link].rx;
- }
-
- sec_link_tx += mvmsta->mpdu_counters[q].per_link[sec_link].tx;
- sec_link_rx += mvmsta->mpdu_counters[q].per_link[sec_link].rx;
-
- /*
- * In EMLSR we have statistics every 5 seconds, so we can reset
- * the counters upon every statistics notification.
- * The FW sends the notification regularly, but it will be
- * misaligned at the start. Skipping the measurement if it is
- * short will synchronize us.
- */
- if (jiffies - mvmsta->mpdu_counters[q].window_start <
- IWL_MVM_TPT_MIN_COUNT_WINDOW)
- skip = true;
- mvmsta->mpdu_counters[q].window_start = jiffies;
- memset(mvmsta->mpdu_counters[q].per_link, 0,
- sizeof(mvmsta->mpdu_counters[q].per_link));
-
- spin_unlock_bh(&mvmsta->mpdu_counters[q].lock);
- }
-
- if (skip) {
- IWL_DEBUG_INFO(mvm, "MPDU statistics window was short\n");
- return;
- }
-
- IWL_DEBUG_INFO(mvm, "total Tx MPDUs: %ld. total Rx MPDUs: %ld\n",
- total_tx, total_rx);
-
- /* If we don't have enough MPDUs - exit EMLSR */
- if (total_tx < IWL_MVM_ENTER_ESR_TPT_THRESH &&
- total_rx < IWL_MVM_ENTER_ESR_TPT_THRESH) {
- iwl_mvm_block_esr(mvm, bss_vif, IWL_MVM_ESR_BLOCKED_TPT,
- iwl_mvm_get_primary_link(bss_vif));
- return;
- }
-
- IWL_DEBUG_INFO(mvm, "Secondary Link %d: Tx MPDUs: %ld. Rx MPDUs: %ld\n",
- sec_link, sec_link_tx, sec_link_rx);
-
- /* Calculate the percentage of the secondary link TX/RX */
- sec_link_tx_perc = total_tx ? sec_link_tx * 100 / total_tx : 0;
- sec_link_rx_perc = total_rx ? sec_link_rx * 100 / total_rx : 0;
-
- /*
- * The TX/RX percentage is checked only if it exceeds the required
- * minimum. In addition, RX is checked only if the TX check failed.
- */
- if ((total_tx > SEC_LINK_MIN_TX &&
- sec_link_tx_perc < SEC_LINK_MIN_PERC) ||
- (total_rx > SEC_LINK_MIN_RX &&
- sec_link_rx_perc < SEC_LINK_MIN_PERC))
- iwl_mvm_exit_esr(mvm, bss_vif, IWL_MVM_ESR_EXIT_LINK_USAGE,
- iwl_mvm_get_primary_link(bss_vif));
-}
-
void iwl_mvm_handle_rx_system_oper_stats(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
@@ -1088,8 +957,6 @@ void iwl_mvm_handle_rx_system_oper_stats(struct iwl_mvm *mvm,
ieee80211_iterate_stations_atomic(mvm->hw, iwl_mvm_stats_energy_iter,
average_energy);
iwl_mvm_handle_per_phy_stats(mvm, stats->per_phy);
-
- iwl_mvm_update_esr_mode_tpt(mvm);
}
void iwl_mvm_handle_rx_system_oper_part1_stats(struct iwl_mvm *mvm,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 62e76a79f621..d35c63a673b6 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -332,6 +332,7 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
struct ieee80211_key_conf *key;
u32 len = le16_to_cpu(desc->mpdu_len);
const u8 *frame = (void *)hdr;
+ const u8 *mmie;
if ((status & IWL_RX_MPDU_STATUS_SEC_MASK) == IWL_RX_MPDU_STATUS_SEC_NONE)
return 0;
@@ -375,11 +376,15 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
goto report;
}
- if (len < key->icv_len + IEEE80211_GMAC_PN_LEN + 2)
+ if (len < key->icv_len)
goto report;
/* get the real key ID */
- keyid = frame[len - key->icv_len - IEEE80211_GMAC_PN_LEN - 2];
+ mmie = frame + (len - key->icv_len);
+
+ /* the position of the key_id in ieee80211_mmie_16 is the same */
+ keyid = le16_to_cpu(((const struct ieee80211_mmie *) mmie)->key_id);
+
/* and if that's the other key, look it up */
if (keyid != key->keyidx) {
/*
@@ -2099,7 +2104,6 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
struct ieee80211_sta *sta = NULL;
struct sk_buff *skb;
u8 crypt_len = 0;
- u8 sta_id = le32_get_bits(desc->status, IWL_RX_MPDU_STATUS_STA_ID);
size_t desc_size;
struct iwl_mvm_rx_phy_data phy_data = {};
u32 format;
@@ -2246,6 +2250,9 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
rcu_read_lock();
if (desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_SRC_STA_FOUND)) {
+ u8 sta_id = le32_get_bits(desc->status,
+ IWL_RX_MPDU_STATUS_STA_ID);
+
if (!WARN_ON_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations)) {
struct ieee80211_link_sta *link_sta;
@@ -2373,16 +2380,6 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
iwl_mvm_agg_rx_received(mvm, reorder_data, baid);
}
-
- if (ieee80211_is_data(hdr->frame_control)) {
- u8 sub_frame_idx = desc->amsdu_info &
- IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
-
- /* 0 means not an A-MSDU, and 1 means a new A-MSDU */
- if (!sub_frame_idx || sub_frame_idx == 1)
- iwl_mvm_count_mpdu(mvmsta, sta_id, 1, false,
- queue);
- }
}
/* management stuff on default queue */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index 9ce1ce0dab34..b588f1dcf20d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -1392,8 +1392,6 @@ static u32 iwl_mvm_scan_umac_ooc_priority(int type)
{
if (type == IWL_MVM_SCAN_REGULAR)
return IWL_SCAN_PRIORITY_EXT_6;
- if (type == IWL_MVM_SCAN_INT_MLO)
- return IWL_SCAN_PRIORITY_EXT_4;
return IWL_SCAN_PRIORITY_EXT_2;
}
@@ -3220,7 +3218,6 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
struct iwl_umac_scan_complete *notif = (void *)pkt->data;
u32 uid = __le32_to_cpu(notif->uid);
bool aborted = (notif->status == IWL_SCAN_OFFLOAD_ABORTED);
- bool select_links = false;
mvm->mei_scan_filter.is_mei_limited_scan = false;
@@ -3267,13 +3264,6 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
} else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) {
ieee80211_sched_scan_stopped(mvm->hw);
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
- } else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_INT_MLO) {
- IWL_DEBUG_SCAN(mvm, "Internal MLO scan completed\n");
- /*
- * Other scan types won't necessarily scan for the MLD links channels.
- * Therefore, only select links after successful internal scan.
- */
- select_links = notif->status == IWL_SCAN_OFFLOAD_COMPLETED;
}
mvm->scan_status &= ~mvm->scan_uid_status[uid];
@@ -3286,9 +3276,6 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
mvm->last_ebs_successful = false;
mvm->scan_uid_status[uid] = 0;
-
- if (select_links)
- wiphy_work_queue(mvm->hw->wiphy, &mvm->trig_link_selection_wk);
}
void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
@@ -3483,11 +3470,6 @@ void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm)
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
mvm->scan_uid_status[uid] = 0;
}
- uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_INT_MLO);
- if (uid >= 0) {
- IWL_DEBUG_SCAN(mvm, "Internal MLO scan aborted\n");
- mvm->scan_uid_status[uid] = 0;
- }
uid = iwl_mvm_scan_uid_by_status(mvm,
IWL_MVM_SCAN_STOPPING_REGULAR);
@@ -3583,89 +3565,6 @@ out:
return ret;
}
-static int iwl_mvm_int_mlo_scan_start(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- struct ieee80211_channel **channels,
- size_t n_channels)
-{
- struct cfg80211_scan_request *req = NULL;
- struct ieee80211_scan_ies ies = {};
- size_t size, i;
- int ret;
-
- lockdep_assert_held(&mvm->mutex);
-
- IWL_DEBUG_SCAN(mvm, "Starting Internal MLO scan: n_channels=%zu\n",
- n_channels);
-
- if (!vif->cfg.assoc || !ieee80211_vif_is_mld(vif) ||
- hweight16(vif->valid_links) == 1)
- return -EINVAL;
-
- size = struct_size(req, channels, n_channels);
- req = kzalloc(size, GFP_KERNEL);
- if (!req)
- return -ENOMEM;
-
- /* set the requested channels */
- for (i = 0; i < n_channels; i++)
- req->channels[i] = channels[i];
-
- req->n_channels = n_channels;
-
- /* set the rates */
- for (i = 0; i < NUM_NL80211_BANDS; i++)
- if (mvm->hw->wiphy->bands[i])
- req->rates[i] =
- (1 << mvm->hw->wiphy->bands[i]->n_bitrates) - 1;
-
- req->wdev = ieee80211_vif_to_wdev(vif);
- req->wiphy = mvm->hw->wiphy;
- req->scan_start = jiffies;
- req->tsf_report_link_id = -1;
-
- ret = _iwl_mvm_single_scan_start(mvm, vif, req, &ies,
- IWL_MVM_SCAN_INT_MLO);
- kfree(req);
-
- IWL_DEBUG_SCAN(mvm, "Internal MLO scan: ret=%d\n", ret);
- return ret;
-}
-
-int iwl_mvm_int_mlo_scan(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
-{
- struct ieee80211_channel *channels[IEEE80211_MLD_MAX_NUM_LINKS];
- unsigned long usable_links = ieee80211_vif_usable_links(vif);
- size_t n_channels = 0;
- u8 link_id;
-
- lockdep_assert_held(&mvm->mutex);
-
- if (mvm->scan_status & IWL_MVM_SCAN_INT_MLO) {
- IWL_DEBUG_SCAN(mvm, "Internal MLO scan is already running\n");
- return -EBUSY;
- }
-
- rcu_read_lock();
-
- for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
- struct ieee80211_bss_conf *link_conf =
- rcu_dereference(vif->link_conf[link_id]);
-
- if (WARN_ON_ONCE(!link_conf))
- continue;
-
- channels[n_channels++] = link_conf->chanreq.oper.chan;
- }
-
- rcu_read_unlock();
-
- if (!n_channels)
- return -EINVAL;
-
- return iwl_mvm_int_mlo_scan_start(mvm, vif, channels, n_channels);
-}
-
static int iwl_mvm_chanidx_from_phy(struct iwl_mvm *mvm,
enum nl80211_band band,
u16 phy_chan_num)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 11c6b86db4ec..363232bb74fa 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -1835,18 +1835,6 @@ int iwl_mvm_sta_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
iwl_mvm_toggle_tx_ant(mvm, &mvm_sta->tx_ant);
- /* MPDUs are counted only when EMLSR is possible */
- if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
- !sta->tdls && ieee80211_vif_is_mld(vif)) {
- mvm_sta->mpdu_counters =
- kcalloc(mvm->trans->info.num_rxqs,
- sizeof(*mvm_sta->mpdu_counters),
- GFP_KERNEL);
- if (mvm_sta->mpdu_counters)
- for (int q = 0; q < mvm->trans->info.num_rxqs; q++)
- spin_lock_init(&mvm_sta->mpdu_counters[q].lock);
- }
-
return 0;
}
@@ -4328,80 +4316,3 @@ void iwl_mvm_cancel_channel_switch(struct iwl_mvm *mvm,
if (ret)
IWL_ERR(mvm, "Failed to cancel the channel switch\n");
}
-
-static int iwl_mvm_fw_sta_id_to_fw_link_id(struct iwl_mvm_vif *mvmvif,
- u8 fw_sta_id)
-{
- struct ieee80211_link_sta *link_sta =
- rcu_dereference(mvmvif->mvm->fw_id_to_link_sta[fw_sta_id]);
- struct iwl_mvm_vif_link_info *link;
-
- if (WARN_ON_ONCE(!link_sta))
- return -EINVAL;
-
- link = mvmvif->link[link_sta->link_id];
-
- if (WARN_ON_ONCE(!link))
- return -EINVAL;
-
- return link->fw_link_id;
-}
-
-#define IWL_MVM_TPT_COUNT_WINDOW (IWL_MVM_TPT_COUNT_WINDOW_SEC * HZ)
-
-void iwl_mvm_count_mpdu(struct iwl_mvm_sta *mvm_sta, u8 fw_sta_id, u32 count,
- bool tx, int queue)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvm_sta->vif);
- struct iwl_mvm *mvm = mvmvif->mvm;
- struct iwl_mvm_tpt_counter *queue_counter;
- struct iwl_mvm_mpdu_counter *link_counter;
- u32 total_mpdus = 0;
- int fw_link_id;
-
- /* Count only for a BSS sta, and only when EMLSR is possible */
- if (!mvm_sta->mpdu_counters)
- return;
-
- /* Map sta id to link id */
- fw_link_id = iwl_mvm_fw_sta_id_to_fw_link_id(mvmvif, fw_sta_id);
- if (fw_link_id < 0)
- return;
-
- queue_counter = &mvm_sta->mpdu_counters[queue];
- link_counter = &queue_counter->per_link[fw_link_id];
-
- spin_lock_bh(&queue_counter->lock);
-
- if (tx)
- link_counter->tx += count;
- else
- link_counter->rx += count;
-
- /*
- * When not in EMLSR, the window and the decision to enter EMLSR are
- * handled during counting, when in EMLSR - in the statistics flow
- */
- if (mvmvif->esr_active)
- goto out;
-
- if (time_is_before_jiffies(queue_counter->window_start +
- IWL_MVM_TPT_COUNT_WINDOW)) {
- memset(queue_counter->per_link, 0,
- sizeof(queue_counter->per_link));
- queue_counter->window_start = jiffies;
-
- IWL_DEBUG_INFO(mvm, "MPDU counters are cleared\n");
- }
-
- for (int i = 0; i < IWL_FW_MAX_LINK_ID; i++)
- total_mpdus += tx ? queue_counter->per_link[i].tx :
- queue_counter->per_link[i].rx;
-
- if (total_mpdus > IWL_MVM_ENTER_ESR_TPT_THRESH)
- wiphy_work_queue(mvmvif->mvm->hw->wiphy,
- &mvmvif->unblock_esr_tpt_wk);
-
-out:
- spin_unlock_bh(&queue_counter->lock);
-}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index f6906061510b..c25edc7c1813 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -344,24 +344,6 @@ struct iwl_mvm_link_sta {
u8 avg_energy;
};
-struct iwl_mvm_mpdu_counter {
- u32 tx;
- u32 rx;
-};
-
-/**
- * struct iwl_mvm_tpt_counter - per-queue MPDU counter
- *
- * @lock: Needed to protect the counters when modified from statistics.
- * @per_link: per-link counters.
- * @window_start: timestamp of the counting-window start
- */
-struct iwl_mvm_tpt_counter {
- spinlock_t lock;
- struct iwl_mvm_mpdu_counter per_link[IWL_FW_MAX_LINK_ID];
- unsigned long window_start;
-} ____cacheline_aligned_in_smp;
-
/**
* struct iwl_mvm_sta - representation of a station in the driver
* @vif: the interface the station belongs to
@@ -409,7 +391,6 @@ struct iwl_mvm_tpt_counter {
* @link: per link sta entries. For non-MLO only link[0] holds data. For MLO,
* link[0] points to deflink and link[link_id] is allocated when new link
* sta is added.
- * @mpdu_counters: RX/TX MPDUs counters for each queue.
*
* When mac80211 creates a station it reserves some space (hw->sta_data_size)
* in the structure for use by driver. This structure is placed in that
@@ -449,8 +430,6 @@ struct iwl_mvm_sta {
struct iwl_mvm_link_sta deflink;
struct iwl_mvm_link_sta __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS];
-
- struct iwl_mvm_tpt_counter *mpdu_counters;
};
u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data);
@@ -533,9 +512,6 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
-void iwl_mvm_count_mpdu(struct iwl_mvm_sta *mvm_sta, u8 fw_sta_id, u32 count,
- bool tx, int queue);
-
/* AMPDU */
int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int tid, u16 ssn, bool start, u16 buf_size, u16 timeout);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile b/drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile
index bb33f4a06f1c..2267be4cfb44 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile
@@ -1,3 +1,3 @@
-iwlmvm-tests-y += module.o links.o hcmd.o
+iwlmvm-tests-y += module.o hcmd.o
obj-$(CONFIG_IWLWIFI_KUNIT_TESTS) += iwlmvm-tests.o
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tests/links.c b/drivers/net/wireless/intel/iwlwifi/mvm/tests/links.c
deleted file mode 100644
index d692f1813d44..000000000000
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tests/links.c
+++ /dev/null
@@ -1,433 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * KUnit tests for channel helper functions
- *
- * Copyright (C) 2024 Intel Corporation
- */
-#include <net/mac80211.h>
-#include "../mvm.h"
-#include <kunit/test.h>
-
-MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
-
-static struct wiphy wiphy = {
- .mtx = __MUTEX_INITIALIZER(wiphy.mtx),
-};
-
-static struct ieee80211_hw hw = {
- .wiphy = &wiphy,
-};
-
-static struct ieee80211_channel chan_5ghz = {
- .band = NL80211_BAND_5GHZ,
-};
-
-static struct ieee80211_channel chan_6ghz = {
- .band = NL80211_BAND_6GHZ,
-};
-
-static struct ieee80211_channel chan_2ghz = {
- .band = NL80211_BAND_2GHZ,
-};
-
-static struct cfg80211_chan_def chandef_a = {};
-
-static struct cfg80211_chan_def chandef_b = {};
-
-static struct iwl_mvm_phy_ctxt ctx = {};
-
-static struct iwl_mvm_vif_link_info mvm_link = {
- .phy_ctxt = &ctx,
- .active = true
-};
-
-static struct cfg80211_bss bss = {};
-
-static struct ieee80211_bss_conf link_conf = {.bss = &bss};
-
-static const struct iwl_fw_cmd_version entry = {
- .group = LEGACY_GROUP,
- .cmd = BT_PROFILE_NOTIFICATION,
- .notif_ver = 4
-};
-
-static struct iwl_fw fw = {
- .ucode_capa = {
- .n_cmd_versions = 1,
- .cmd_versions = &entry,
- },
-};
-
-static struct iwl_mvm mvm = {
- .hw = &hw,
- .fw = &fw,
-};
-
-static const struct link_grading_case {
- const char *desc;
- const struct cfg80211_chan_def chandef;
- s32 signal;
- s16 channel_util;
- int chan_load_by_us;
- unsigned int grade;
-} link_grading_cases[] = {
- {
- .desc = "UHB, RSSI below range, no factors",
- .chandef = {
- .chan = &chan_6ghz,
- .width = NL80211_CHAN_WIDTH_20,
- },
- .signal = -100,
- .grade = 177,
- },
- {
- .desc = "LB, RSSI in range, no factors",
- .chandef = {
- .chan = &chan_2ghz,
- .width = NL80211_CHAN_WIDTH_20,
- },
- .signal = -84,
- .grade = 344,
- },
- {
- .desc = "HB, RSSI above range, no factors",
- .chandef = {
- .chan = &chan_5ghz,
- .width = NL80211_CHAN_WIDTH_20,
- },
- .signal = -50,
- .grade = 3442,
- },
- {
- .desc = "HB, BSS Load IE (20 percent), inactive link, no puncturing factor",
- .chandef = {
- .chan = &chan_5ghz,
- .width = NL80211_CHAN_WIDTH_20,
- },
- .signal = -66,
- .channel_util = 51,
- .grade = 1836,
- },
- {
- .desc = "LB, BSS Load IE (20 percent), active link, chan_load_by_us=10 percent. No puncturing factor",
- .chandef = {
- .chan = &chan_2ghz,
- .width = NL80211_CHAN_WIDTH_20,
- },
- .signal = -61,
- .channel_util = 51,
- .chan_load_by_us = 10,
- .grade = 2061,
- },
- {
- .desc = "UHB, BSS Load IE (40 percent), active link, chan_load_by_us=50 (invalid) percent. No puncturing factor",
- .chandef = {
- .chan = &chan_6ghz,
- .width = NL80211_CHAN_WIDTH_20,
- },
- .signal = -66,
- .channel_util = 102,
- .chan_load_by_us = 50,
- .grade = 1552,
- },
- { .desc = "HB, 80 MHz, no channel load factor, punctured percentage 0",
- .chandef = {
- .chan = &chan_5ghz,
- .width = NL80211_CHAN_WIDTH_80,
- .punctured = 0x0000
- },
- .signal = -72,
- .grade = 1750,
- },
- { .desc = "HB, 160 MHz, no channel load factor, punctured percentage 25",
- .chandef = {
- .chan = &chan_5ghz,
- .width = NL80211_CHAN_WIDTH_160,
- .punctured = 0x3
- },
- .signal = -72,
- .grade = 1312,
- },
- { .desc = "UHB, 320 MHz, no channel load factor, punctured percentage 12.5 (2/16)",
- .chandef = {
- .chan = &chan_6ghz,
- .width = NL80211_CHAN_WIDTH_320,
- .punctured = 0x3
- },
- .signal = -72,
- .grade = 1806,
- },
- { .desc = "HB, 160 MHz, channel load 20, channel load by us 10, punctured percentage 25",
- .chandef = {
- .chan = &chan_5ghz,
- .width = NL80211_CHAN_WIDTH_160,
- .punctured = 0x3
- },
- .channel_util = 51,
- .chan_load_by_us = 10,
- .signal = -72,
- .grade = 1179,
- },
-};
-
-KUNIT_ARRAY_PARAM_DESC(link_grading, link_grading_cases, desc)
-
-static void setup_link_conf(struct kunit *test)
-{
- const struct link_grading_case *params = test->param_value;
- size_t vif_size = sizeof(struct ieee80211_vif) +
- sizeof(struct iwl_mvm_vif);
- struct ieee80211_vif *vif = kunit_kzalloc(test, vif_size, GFP_KERNEL);
- struct ieee80211_bss_load_elem *bss_load;
- struct element *element;
- size_t ies_size = sizeof(struct cfg80211_bss_ies) + sizeof(*bss_load) + sizeof(element);
- struct cfg80211_bss_ies *ies;
- struct iwl_mvm_vif *mvmvif;
-
- KUNIT_ASSERT_NOT_NULL(test, vif);
-
- mvmvif = iwl_mvm_vif_from_mac80211(vif);
- if (params->chan_load_by_us > 0) {
- ctx.channel_load_by_us = params->chan_load_by_us;
- mvmvif->link[0] = &mvm_link;
- }
-
- link_conf.vif = vif;
- link_conf.chanreq.oper = params->chandef;
- bss.signal = DBM_TO_MBM(params->signal);
-
- ies = kunit_kzalloc(test, ies_size, GFP_KERNEL);
- KUNIT_ASSERT_NOT_NULL(test, ies);
- ies->len = sizeof(*bss_load) + sizeof(struct element);
-
- element = (void *)ies->data;
- element->datalen = sizeof(*bss_load);
- element->id = 11;
-
- bss_load = (void *)element->data;
- bss_load->channel_util = params->channel_util;
-
- rcu_assign_pointer(bss.ies, ies);
- rcu_assign_pointer(bss.beacon_ies, ies);
-}
-
-static void test_link_grading(struct kunit *test)
-{
- const struct link_grading_case *params = test->param_value;
- unsigned int ret;
-
- setup_link_conf(test);
-
- rcu_read_lock();
- ret = iwl_mvm_get_link_grade(&link_conf);
- rcu_read_unlock();
-
- KUNIT_EXPECT_EQ(test, ret, params->grade);
-
- kunit_kfree(test, link_conf.vif);
- RCU_INIT_POINTER(bss.ies, NULL);
-}
-
-static struct kunit_case link_grading_test_cases[] = {
- KUNIT_CASE_PARAM(test_link_grading, link_grading_gen_params),
- {}
-};
-
-static struct kunit_suite link_grading = {
- .name = "iwlmvm-link-grading",
- .test_cases = link_grading_test_cases,
-};
-
-kunit_test_suite(link_grading);
-
-static const struct valid_link_pair_case {
- const char *desc;
- bool bt;
- struct ieee80211_channel *chan_a;
- struct ieee80211_channel *chan_b;
- enum nl80211_chan_width cw_a;
- enum nl80211_chan_width cw_b;
- s32 sig_a;
- s32 sig_b;
- bool csa_a;
- bool valid;
-} valid_link_pair_cases[] = {
- {
- .desc = "HB + UHB, valid.",
- .chan_a = &chan_6ghz,
- .chan_b = &chan_5ghz,
- .valid = true,
- },
- {
- .desc = "LB + HB, no BT.",
- .chan_a = &chan_2ghz,
- .chan_b = &chan_5ghz,
- .valid = true,
- },
- {
- .desc = "LB + HB, with BT.",
- .bt = true,
- .chan_a = &chan_2ghz,
- .chan_b = &chan_5ghz,
- .valid = false,
- },
- {
- .desc = "Same band",
- .chan_a = &chan_2ghz,
- .chan_b = &chan_2ghz,
- .valid = false,
- },
- {
- .desc = "RSSI: LB, 20 MHz, low",
- .chan_a = &chan_2ghz,
- .cw_a = NL80211_CHAN_WIDTH_20,
- .sig_a = -68,
- .chan_b = &chan_5ghz,
- .valid = false,
- },
- {
- .desc = "RSSI: UHB, 20 MHz, high",
- .chan_a = &chan_6ghz,
- .cw_a = NL80211_CHAN_WIDTH_20,
- .sig_a = -66,
- .chan_b = &chan_5ghz,
- .cw_b = NL80211_CHAN_WIDTH_20,
- .valid = true,
- },
- {
- .desc = "RSSI: UHB, 40 MHz, low",
- .chan_a = &chan_6ghz,
- .cw_a = NL80211_CHAN_WIDTH_40,
- .sig_a = -65,
- .chan_b = &chan_5ghz,
- .cw_b = NL80211_CHAN_WIDTH_40,
- .valid = false,
- },
- {
- .desc = "RSSI: UHB, 40 MHz, high",
- .chan_a = &chan_6ghz,
- .cw_a = NL80211_CHAN_WIDTH_40,
- .sig_a = -63,
- .chan_b = &chan_5ghz,
- .cw_b = NL80211_CHAN_WIDTH_40,
- .valid = true,
- },
- {
- .desc = "RSSI: UHB, 80 MHz, low",
- .chan_a = &chan_6ghz,
- .cw_a = NL80211_CHAN_WIDTH_80,
- .sig_a = -62,
- .chan_b = &chan_5ghz,
- .cw_b = NL80211_CHAN_WIDTH_80,
- .valid = false,
- },
- {
- .desc = "RSSI: UHB, 80 MHz, high",
- .chan_a = &chan_6ghz,
- .cw_a = NL80211_CHAN_WIDTH_80,
- .sig_a = -60,
- .chan_b = &chan_5ghz,
- .cw_b = NL80211_CHAN_WIDTH_80,
- .valid = true,
- },
- {
- .desc = "RSSI: UHB, 160 MHz, low",
- .chan_a = &chan_6ghz,
- .cw_a = NL80211_CHAN_WIDTH_160,
- .sig_a = -59,
- .chan_b = &chan_5ghz,
- .cw_b = NL80211_CHAN_WIDTH_160,
- .valid = false,
- },
- {
- .desc = "RSSI: HB, 160 MHz, high",
- .chan_a = &chan_6ghz,
- .cw_a = NL80211_CHAN_WIDTH_160,
- .sig_a = -5,
- .chan_b = &chan_5ghz,
- .cw_b = NL80211_CHAN_WIDTH_160,
- .valid = true,
- },
- {
- .desc = "CSA active",
- .chan_a = &chan_6ghz,
- .cw_a = NL80211_CHAN_WIDTH_160,
- .sig_a = -5,
- .chan_b = &chan_5ghz,
- .cw_b = NL80211_CHAN_WIDTH_160,
- .valid = false,
- /* same as previous entry with valid=true except for CSA */
- .csa_a = true,
- },
-};
-
-KUNIT_ARRAY_PARAM_DESC(valid_link_pair, valid_link_pair_cases, desc)
-
-static void test_valid_link_pair(struct kunit *test)
-{
- const struct valid_link_pair_case *params = test->param_value;
- size_t vif_size = sizeof(struct ieee80211_vif) +
- sizeof(struct iwl_mvm_vif);
- struct ieee80211_vif *vif = kunit_kzalloc(test, vif_size, GFP_KERNEL);
- struct iwl_trans *trans = kunit_kzalloc(test, sizeof(struct iwl_trans),
- GFP_KERNEL);
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm_link_sel_data link_a = {
- .chandef = &chandef_a,
- .link_id = 1,
- .signal = params->sig_a,
- };
- struct iwl_mvm_link_sel_data link_b = {
- .chandef = &chandef_b,
- .link_id = 5,
- .signal = params->sig_b,
- };
- struct ieee80211_bss_conf *conf;
- bool result;
-
- KUNIT_ASSERT_NOT_NULL(test, vif);
- KUNIT_ASSERT_NOT_NULL(test, trans);
-
- chandef_a.chan = params->chan_a;
- chandef_b.chan = params->chan_b;
-
- chandef_a.width = params->cw_a ?: NL80211_CHAN_WIDTH_20;
- chandef_b.width = params->cw_b ?: NL80211_CHAN_WIDTH_20;
-
- mvm.trans = trans;
-
- mvm.last_bt_notif.wifi_loss_low_rssi = params->bt;
- mvmvif->mvm = &mvm;
-
- conf = kunit_kzalloc(test, sizeof(*vif->link_conf[0]), GFP_KERNEL);
- KUNIT_ASSERT_NOT_NULL(test, conf);
- conf->chanreq.oper = chandef_a;
- conf->csa_active = params->csa_a;
- vif->link_conf[link_a.link_id] = (void __rcu *)conf;
-
- conf = kunit_kzalloc(test, sizeof(*vif->link_conf[0]), GFP_KERNEL);
- KUNIT_ASSERT_NOT_NULL(test, conf);
- conf->chanreq.oper = chandef_b;
- vif->link_conf[link_b.link_id] = (void __rcu *)conf;
-
- wiphy_lock(&wiphy);
- result = iwl_mvm_mld_valid_link_pair(vif, &link_a, &link_b);
- wiphy_unlock(&wiphy);
-
- KUNIT_EXPECT_EQ(test, result, params->valid);
-
- kunit_kfree(test, vif);
- kunit_kfree(test, trans);
-}
-
-static struct kunit_case valid_link_pair_test_cases[] = {
- KUNIT_CASE_PARAM(test_valid_link_pair, valid_link_pair_gen_params),
- {},
-};
-
-static struct kunit_suite valid_link_pair = {
- .name = "iwlmvm-valid-link-pair",
- .test_cases = valid_link_pair_test_cases,
-};
-
-kunit_test_suite(valid_link_pair);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index aa653782d6d7..0c9c2492d8a7 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -47,7 +47,6 @@ void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
static void iwl_mvm_cleanup_roc(struct iwl_mvm *mvm)
{
- struct ieee80211_vif *bss_vif = iwl_mvm_get_bss_vif(mvm);
struct ieee80211_vif *vif = mvm->p2p_device_vif;
lockdep_assert_held(&mvm->mutex);
@@ -125,8 +124,6 @@ static void iwl_mvm_cleanup_roc(struct iwl_mvm *mvm)
iwl_mvm_rm_aux_sta(mvm);
}
- if (!IS_ERR_OR_NULL(bss_vif))
- iwl_mvm_unblock_esr(mvm, bss_vif, IWL_MVM_ESR_BLOCKED_ROC);
mutex_unlock(&mvm->mutex);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 25d1a882a6a0..bb97837baeda 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -1769,9 +1769,6 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
IWL_DEBUG_TX_REPLY(mvm,
"Next reclaimed packet:%d\n",
next_reclaimed);
- if (tid < IWL_MAX_TID_COUNT)
- iwl_mvm_count_mpdu(mvmsta, sta_id, 1,
- true, 0);
} else {
IWL_DEBUG_TX_REPLY(mvm,
"NDP - don't update next_reclaimed\n");
@@ -2150,13 +2147,10 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
ba_res->tx_rate, false);
}
- if (mvmsta) {
+ if (mvmsta)
iwl_mvm_tx_airtime(mvm, mvmsta,
le32_to_cpu(ba_res->wireless_time));
- iwl_mvm_count_mpdu(mvmsta, sta_id,
- le16_to_cpu(ba_res->txed), true, 0);
- }
rcu_read_unlock();
return;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index 62da0132f383..22602c32faa5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -22,11 +22,6 @@ int iwl_mvm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd)
{
int ret;
-#if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP)
- if (WARN_ON(mvm->d3_test_active))
- return -EIO;
-#endif
-
/*
* Synchronous commands from this op-mode must hold
* the mutex, this ensures we don't try to send two
@@ -79,11 +74,6 @@ int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd,
lockdep_assert_held(&mvm->mutex);
-#if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP)
- if (WARN_ON(mvm->d3_test_active))
- return -EIO;
-#endif
-
/*
* Only synchronous commands can wait for status,
* we use WANT_SKB so the caller can't.
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index b7add05f7a85..b21a4d8eb105 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -124,13 +124,13 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x0082, 0x1304, iwl6005_mac_cfg)},/* low 5GHz active */
{IWL_PCI_DEVICE(0x0082, 0x1305, iwl6005_mac_cfg)},/* high 5GHz active */
-/* 6x30 Series */
- {IWL_PCI_DEVICE(0x008A, 0x5305, iwl1000_mac_cfg)},
- {IWL_PCI_DEVICE(0x008A, 0x5307, iwl1000_mac_cfg)},
- {IWL_PCI_DEVICE(0x008A, 0x5325, iwl1000_mac_cfg)},
- {IWL_PCI_DEVICE(0x008A, 0x5327, iwl1000_mac_cfg)},
- {IWL_PCI_DEVICE(0x008B, 0x5315, iwl1000_mac_cfg)},
- {IWL_PCI_DEVICE(0x008B, 0x5317, iwl1000_mac_cfg)},
+/* 1030/6x30 Series */
+ {IWL_PCI_DEVICE(0x008A, 0x5305, iwl6030_mac_cfg)},
+ {IWL_PCI_DEVICE(0x008A, 0x5307, iwl6030_mac_cfg)},
+ {IWL_PCI_DEVICE(0x008A, 0x5325, iwl6030_mac_cfg)},
+ {IWL_PCI_DEVICE(0x008A, 0x5327, iwl6030_mac_cfg)},
+ {IWL_PCI_DEVICE(0x008B, 0x5315, iwl6030_mac_cfg)},
+ {IWL_PCI_DEVICE(0x008B, 0x5317, iwl6030_mac_cfg)},
{IWL_PCI_DEVICE(0x0090, 0x5211, iwl6030_mac_cfg)},
{IWL_PCI_DEVICE(0x0090, 0x5215, iwl6030_mac_cfg)},
{IWL_PCI_DEVICE(0x0090, 0x5216, iwl6030_mac_cfg)},
@@ -181,12 +181,12 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x08AE, 0x1027, iwl1000_mac_cfg)},
/* 130 Series WiFi */
- {IWL_PCI_DEVICE(0x0896, 0x5005, iwl1000_mac_cfg)},
- {IWL_PCI_DEVICE(0x0896, 0x5007, iwl1000_mac_cfg)},
- {IWL_PCI_DEVICE(0x0897, 0x5015, iwl1000_mac_cfg)},
- {IWL_PCI_DEVICE(0x0897, 0x5017, iwl1000_mac_cfg)},
- {IWL_PCI_DEVICE(0x0896, 0x5025, iwl1000_mac_cfg)},
- {IWL_PCI_DEVICE(0x0896, 0x5027, iwl1000_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0896, 0x5005, iwl6030_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0896, 0x5007, iwl6030_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0897, 0x5015, iwl6030_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0897, 0x5017, iwl6030_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0896, 0x5025, iwl6030_mac_cfg)},
+ {IWL_PCI_DEVICE(0x0896, 0x5027, iwl6030_mac_cfg)},
/* 2x00 Series */
{IWL_PCI_DEVICE(0x0890, 0x4022, iwl2000_mac_cfg)},
@@ -673,6 +673,8 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_DEV_INFO(iwl6005_n_cfg, iwl6005_2agn_sff_name,
DEVICE(0x0082), SUBDEV_MASKED(0xC000, 0xF000)),
+ IWL_DEV_INFO(iwl6005_n_cfg, iwl6005_2agn_sff_name,
+ DEVICE(0x0085), SUBDEV_MASKED(0xC000, 0xF000)),
IWL_DEV_INFO(iwl6005_n_cfg, iwl6005_2agn_d_name,
DEVICE(0x0082), SUBDEV(0x4820)),
IWL_DEV_INFO(iwl6005_n_cfg, iwl6005_2agn_mow1_name,
@@ -729,10 +731,10 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = {
DEVICE(0x0083), SUBDEV_MASKED(0x5, 0xF)),
IWL_DEV_INFO(iwl1000_bg_cfg, iwl1000_bg_name,
DEVICE(0x0083), SUBDEV_MASKED(0x6, 0xF)),
+ IWL_DEV_INFO(iwl1000_bgn_cfg, iwl1000_bgn_name,
+ DEVICE(0x0084), SUBDEV_MASKED(0x5, 0xF)),
IWL_DEV_INFO(iwl1000_bg_cfg, iwl1000_bg_name,
- DEVICE(0x0084), SUBDEV(0x1216)),
- IWL_DEV_INFO(iwl1000_bg_cfg, iwl1000_bg_name,
- DEVICE(0x0084), SUBDEV(0x1316)),
+ DEVICE(0x0084), SUBDEV_MASKED(0x6, 0xF)),
/* 100 Series WiFi */
IWL_DEV_INFO(iwl100_bgn_cfg, iwl100_bgn_name,
@@ -964,6 +966,12 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = {
DEVICE(0x24F3), SUBDEV(0x0004)),
IWL_DEV_INFO(iwl8260_cfg, iwl8260_2n_name,
DEVICE(0x24F3), SUBDEV(0x0044)),
+ IWL_DEV_INFO(iwl8260_cfg, iwl8260_2ac_name,
+ DEVICE(0x24F4)),
+ IWL_DEV_INFO(iwl8260_cfg, iwl4165_2ac_name,
+ DEVICE(0x24F5)),
+ IWL_DEV_INFO(iwl8260_cfg, iwl4165_2ac_name,
+ DEVICE(0x24F6)),
IWL_DEV_INFO(iwl8265_cfg, iwl8265_2ac_name,
DEVICE(0x24FD)),
IWL_DEV_INFO(iwl8265_cfg, iwl8275_2ac_name,
@@ -1171,16 +1179,11 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
static void iwl_pci_remove(struct pci_dev *pdev)
{
struct iwl_trans *trans = pci_get_drvdata(pdev);
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
if (!trans)
return;
- cancel_delayed_work_sync(&trans_pcie->me_recheck_wk);
-
- iwl_drv_stop(trans->drv);
-
- iwl_trans_pcie_free(trans);
+ iwl_pcie_gen1_2_remove(trans);
}
#ifdef CONFIG_PM_SLEEP
@@ -1222,11 +1225,15 @@ static int _iwl_pci_resume(struct device *device, bool restore)
* Note: MAC (bits 0:7) will be cleared upon suspend even with wowlan,
* but not bits [15:8]. So if we have bits set in lower word, assume
* the device is alive.
+ * Alternatively, if the scratch value is 0xFFFFFFFF, then we no longer
+ * have access to the device and consider it powered off.
* For older devices, just try silently to grab the NIC.
*/
if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
- if (!(iwl_read32(trans, CSR_FUNC_SCRATCH) &
- CSR_FUNC_SCRATCH_POWER_OFF_MASK))
+ u32 scratch = iwl_read32(trans, CSR_FUNC_SCRATCH);
+
+ if (!(scratch & CSR_FUNC_SCRATCH_POWER_OFF_MASK) ||
+ scratch == ~0U)
device_was_powered_off = true;
} else {
/*
@@ -1248,7 +1255,7 @@ static int _iwl_pci_resume(struct device *device, bool restore)
* won't really know how to recover.
*/
iwl_pcie_prepare_card_hw(trans);
- iwl_finish_nic_init(trans);
+ iwl_trans_activate_nic(trans);
iwl_op_mode_device_powered_off(trans->op_mode);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/internal.h
index f48aeebb151c..207c56e338dd 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/internal.h
@@ -400,6 +400,11 @@ struct iwl_pcie_txqs {
* @me_recheck_wk: worker to recheck WiAMT/CSME presence
* @invalid_tx_cmd: invalid TX command buffer
* @wait_command_queue: wait queue for sync commands
+ * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
+ * The user should use iwl_trans_{alloc,free}_tx_cmd.
+ * @dev_cmd_pool_name: name for the TX command allocation pool
+ * @pm_support: set to true in start_hw if link pm is supported
+ * @ltr_enabled: set to true if the LTR is enabled
*/
struct iwl_trans_pcie {
struct iwl_rxq *rxq;
@@ -506,6 +511,12 @@ struct iwl_trans_pcie {
struct iwl_dma_ptr invalid_tx_cmd;
wait_queue_head_t wait_command_queue;
+
+ struct kmem_cache *dev_cmd_pool;
+ char dev_cmd_pool_name[50];
+
+ bool pm_support;
+ bool ltr_enabled;
};
static inline struct iwl_trans_pcie *
@@ -783,6 +794,23 @@ static inline u16 iwl_txq_gen1_tfd_tb_get_len(struct iwl_trans *trans,
return le16_to_cpu(tb->hi_n_len) >> 4;
}
+static inline struct iwl_device_tx_cmd *
+iwl_pcie_gen1_2_alloc_tx_cmd(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ return kmem_cache_zalloc(trans_pcie->dev_cmd_pool, GFP_ATOMIC);
+}
+
+static inline void
+iwl_pcie_gen1_2_free_tx_cmd(struct iwl_trans *trans,
+ struct iwl_device_tx_cmd *dev_cmd)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ kmem_cache_free(trans_pcie->dev_cmd_pool, dev_cmd);
+}
+
void iwl_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
struct sk_buff_head *skbs, bool is_flush);
void iwl_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr);
@@ -818,6 +846,8 @@ static inline void _iwl_disable_interrupts(struct iwl_trans *trans)
trans_pcie->fh_init_mask);
iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
trans_pcie->hw_init_mask);
+ trans_pcie->fh_mask = 0;
+ trans_pcie->hw_mask = 0;
}
IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
}
@@ -1000,6 +1030,7 @@ static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
} else {
iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
trans_pcie->fh_init_mask);
+ trans_pcie->fh_mask = 0;
iwl_enable_hw_int_msk_msix(trans,
MSIX_HW_INT_CAUSES_REG_RF_KILL);
}
@@ -1047,7 +1078,7 @@ static inline void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) { }
void iwl_pcie_rx_allocator_work(struct work_struct *data);
/* common trans ops for all generations transports */
-void iwl_trans_pcie_op_mode_enter(struct iwl_trans *trans);
+void iwl_pcie_gen1_2_op_mode_enter(struct iwl_trans *trans);
int _iwl_trans_pcie_start_hw(struct iwl_trans *trans);
int iwl_trans_pcie_start_hw(struct iwl_trans *trans);
void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans);
@@ -1064,9 +1095,8 @@ iwl_trans_pcie_dump_data(struct iwl_trans *trans, u32 dump_mask,
const struct iwl_dump_sanitize_ops *sanitize_ops,
void *sanitize_ctx);
int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
- enum iwl_d3_status *status,
- bool test, bool reset);
-int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, bool reset);
+ bool reset);
+int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool reset);
void iwl_trans_pci_interrupts(struct iwl_trans *trans, bool enable);
void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans);
void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
@@ -1081,6 +1111,7 @@ int iwl_pci_gen1_2_probe(struct pci_dev *pdev,
const struct pci_device_id *ent,
const struct iwl_mac_cfg *mac_cfg,
u8 __iomem *hw_base, u32 hw_rev);
+void iwl_pcie_gen1_2_remove(struct iwl_trans *trans);
/* transport gen 1 exported functions */
void iwl_trans_pcie_fw_alive(struct iwl_trans *trans);
@@ -1105,6 +1136,7 @@ int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
struct iwl_dma_ptr *ptr, size_t size);
void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr);
void iwl_pcie_apply_destination(struct iwl_trans *trans);
+int iwl_pcie_gen1_2_activate_nic(struct iwl_trans *trans);
/* transport gen 2 exported functions */
int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
@@ -1124,4 +1156,17 @@ int iwl_trans_pcie_copy_imr(struct iwl_trans *trans,
int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue,
struct iwl_trans_rxq_dma_data *data);
+static inline bool iwl_pcie_gen1_is_pm_supported(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ return trans_pcie->pm_support;
+}
+
+static inline bool iwl_pcie_gen1_2_is_ltr_enabled(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ return trans_pcie->ltr_enabled;
+}
#endif /* __iwl_trans_int_pcie_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans-gen2.c
index 1951be3a30b7..b15c5d486527 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans-gen2.c
@@ -47,7 +47,7 @@ int iwl_pcie_gen2_apm_init(struct iwl_trans *trans)
iwl_pcie_apm_config(trans);
- ret = iwl_finish_nic_init(trans);
+ ret = iwl_trans_activate_nic(trans);
if (ret)
return ret;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
index 327366bf87de..59307b5df441 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
@@ -25,13 +25,52 @@
#include "fw/dbg.h"
#include "fw/api/tx.h"
#include "fw/acpi.h"
-#include "fw/api/tx.h"
#include "mei/iwl-mei.h"
#include "internal.h"
#include "iwl-fh.h"
#include "pcie/iwl-context-info-v2.h"
#include "pcie/utils.h"
+#define IWL_HOST_MON_BLOCK_PEMON 0x00
+#define IWL_HOST_MON_BLOCK_HIPM 0x22
+
+#define IWL_HOST_MON_BLOCK_PEMON_VEC0 0x00
+#define IWL_HOST_MON_BLOCK_PEMON_VEC1 0x01
+#define IWL_HOST_MON_BLOCK_PEMON_WFPM 0x06
+
+static void iwl_dump_host_monitor_block(struct iwl_trans *trans,
+ u32 block, u32 vec, u32 iter)
+{
+ int i;
+
+ IWL_ERR(trans, "Host monitor block 0x%x vector 0x%x\n", block, vec);
+ iwl_write32(trans, CSR_MONITOR_CFG_REG, (block << 8) | vec);
+ for (i = 0; i < iter; i++)
+ IWL_ERR(trans, " value [iter %d]: 0x%08x\n",
+ i, iwl_read32(trans, CSR_MONITOR_STATUS_REG));
+}
+
+static void iwl_pcie_dump_host_monitor(struct iwl_trans *trans)
+{
+ switch (trans->mac_cfg->device_family) {
+ case IWL_DEVICE_FAMILY_22000:
+ case IWL_DEVICE_FAMILY_AX210:
+ IWL_ERR(trans, "CSR_RESET = 0x%x\n",
+ iwl_read32(trans, CSR_RESET));
+ iwl_dump_host_monitor_block(trans, IWL_HOST_MON_BLOCK_PEMON,
+ IWL_HOST_MON_BLOCK_PEMON_VEC0, 15);
+ iwl_dump_host_monitor_block(trans, IWL_HOST_MON_BLOCK_PEMON,
+ IWL_HOST_MON_BLOCK_PEMON_VEC1, 15);
+ iwl_dump_host_monitor_block(trans, IWL_HOST_MON_BLOCK_PEMON,
+ IWL_HOST_MON_BLOCK_PEMON_WFPM, 15);
+ iwl_dump_host_monitor_block(trans, IWL_HOST_MON_BLOCK_HIPM,
+ IWL_HOST_MON_BLOCK_PEMON_VEC0, 1);
+ break;
+ default:
+ return;
+ }
+}
+
/* extended range in FW SRAM */
#define IWL_FW_MEM_EXTENDED_START 0x40000
#define IWL_FW_MEM_EXTENDED_END 0x57FFF
@@ -175,13 +214,13 @@ void iwl_pcie_apm_config(struct iwl_trans *trans)
iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_DISABLED);
pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
- trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
+ trans_pcie->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap);
- trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN;
+ trans_pcie->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN;
IWL_DEBUG_POWER(trans, "L1 %sabled - LTR %sabled\n",
(lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis",
- trans->ltr_enabled ? "En" : "Dis");
+ trans_pcie->ltr_enabled ? "En" : "Dis");
}
/*
@@ -228,7 +267,7 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)
if (trans->mac_cfg->base->pll_cfg)
iwl_set_bit(trans, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL);
- ret = iwl_finish_nic_init(trans);
+ ret = iwl_trans_activate_nic(trans);
if (ret)
return ret;
@@ -301,7 +340,7 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
ret = iwl_trans_pcie_sw_reset(trans, true);
if (!ret)
- ret = iwl_finish_nic_init(trans);
+ ret = iwl_trans_activate_nic(trans);
if (WARN_ON(ret)) {
/* Release XTAL ON request */
@@ -1397,17 +1436,10 @@ void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state, bool from_irq)
}
static void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans,
- bool test, bool reset)
+ bool reset)
{
iwl_disable_interrupts(trans);
- /*
- * in testing mode, the host stays awake and the
- * hardware won't be reset (not even partially)
- */
- if (test)
- return;
-
iwl_pcie_disable_ict(trans);
iwl_pcie_synchronize_irqs(trans);
@@ -1478,7 +1510,7 @@ static int iwl_pcie_d3_handshake(struct iwl_trans *trans, bool suspend)
return ret;
}
-int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, bool reset)
+int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool reset)
{
int ret;
@@ -1491,26 +1523,18 @@ int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, bool reset)
if (ret)
return ret;
- iwl_pcie_d3_complete_suspend(trans, test, reset);
+ iwl_pcie_d3_complete_suspend(trans, reset);
return 0;
}
int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
- enum iwl_d3_status *status,
- bool test, bool reset)
+ bool reset)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 val;
int ret;
- if (test) {
- iwl_enable_interrupts(trans);
- *status = IWL_D3_STATUS_ALIVE;
- ret = 0;
- goto out;
- }
-
if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
iwl_set_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ);
@@ -1518,9 +1542,12 @@ int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
iwl_set_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
- ret = iwl_finish_nic_init(trans);
- if (ret)
+ ret = iwl_trans_activate_nic(trans);
+ if (ret) {
+ IWL_ERR(trans, "Failed to init nic upon resume. err = %d\n",
+ ret);
return ret;
+ }
/*
* Reconfigure IVAR table in case of MSIX or reset ict table in
@@ -1554,18 +1581,13 @@ int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
iwl_read_umac_prph(trans, WFPM_GP2));
val = iwl_read32(trans, CSR_RESET);
- if (val & CSR_RESET_REG_FLAG_NEVO_RESET)
- *status = IWL_D3_STATUS_RESET;
- else
- *status = IWL_D3_STATUS_ALIVE;
-
-out:
- if (*status == IWL_D3_STATUS_ALIVE)
- ret = iwl_pcie_d3_handshake(trans, false);
- else
+ if (val & CSR_RESET_REG_FLAG_NEVO_RESET) {
+ IWL_INFO(trans, "Device was reset during suspend\n");
trans->state = IWL_TRANS_NO_FW;
+ return -ENOENT;
+ }
- return ret;
+ return iwl_pcie_d3_handshake(trans, false);
}
static void
@@ -1744,7 +1766,7 @@ static int iwl_pcie_gen2_force_power_gating(struct iwl_trans *trans)
{
int ret;
- ret = iwl_finish_nic_init(trans);
+ ret = iwl_trans_activate_nic(trans);
if (ret < 0)
return ret;
@@ -1882,7 +1904,7 @@ void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr, u32 val)
iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
}
-void iwl_trans_pcie_op_mode_enter(struct iwl_trans *trans)
+void iwl_pcie_gen1_2_op_mode_enter(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -2000,6 +2022,7 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
free_percpu(trans_pcie->txqs.tso_hdr_page);
}
+ kmem_cache_destroy(trans_pcie->dev_cmd_pool);
iwl_trans_free(trans);
}
@@ -3516,7 +3539,7 @@ iwl_trans_pcie_dump_data(struct iwl_trans *trans, u32 dump_mask,
struct iwl_trans_dump_data *dump_data;
u32 len, num_rbs = 0, monitor_len = 0;
int i, ptr;
- bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) &&
+ bool dump_rbs = iwl_trans_is_fw_error(trans) &&
!trans->mac_cfg->mq_rx_supported &&
dump_mask & BIT(IWL_FW_ERROR_DUMP_RB);
@@ -3684,28 +3707,40 @@ void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans)
iwl_trans_sync_nmi_with_addr(trans, inta_addr, sw_err_bit);
}
-static int iwl_trans_pcie_set_txcmd_info(const struct iwl_mac_cfg *mac_cfg,
- unsigned int *txcmd_size,
- unsigned int *txcmd_align)
+static int iwl_trans_pcie_alloc_txcmd_pool(struct iwl_trans *trans)
{
- if (!mac_cfg->gen2) {
- *txcmd_size = sizeof(struct iwl_tx_cmd_v6);
- *txcmd_align = sizeof(void *);
- } else if (mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210) {
- *txcmd_size = sizeof(struct iwl_tx_cmd_v9);
- *txcmd_align = 64;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ unsigned int txcmd_size, txcmd_align;
+
+ if (!trans->mac_cfg->gen2) {
+ txcmd_size = sizeof(struct iwl_tx_cmd_v6);
+ txcmd_align = sizeof(void *);
+ } else if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210) {
+ txcmd_size = sizeof(struct iwl_tx_cmd_v9);
+ txcmd_align = 64;
} else {
- *txcmd_size = sizeof(struct iwl_tx_cmd);
- *txcmd_align = 128;
+ txcmd_size = sizeof(struct iwl_tx_cmd);
+ txcmd_align = 128;
}
- *txcmd_size += sizeof(struct iwl_cmd_header);
- *txcmd_size += 36; /* biggest possible 802.11 header */
+ txcmd_size += sizeof(struct iwl_cmd_header);
+ txcmd_size += 36; /* biggest possible 802.11 header */
/* Ensure device TX cmd cannot reach/cross a page boundary in gen2 */
- if (WARN_ON((mac_cfg->gen2 && *txcmd_size >= *txcmd_align)))
+ if (WARN_ON((trans->mac_cfg->gen2 && txcmd_size >= txcmd_align)))
return -EINVAL;
+ snprintf(trans_pcie->dev_cmd_pool_name,
+ sizeof(trans_pcie->dev_cmd_pool_name),
+ "iwl_cmd_pool:%s", dev_name(trans->dev));
+
+ trans_pcie->dev_cmd_pool =
+ kmem_cache_create(trans_pcie->dev_cmd_pool_name,
+ txcmd_size, txcmd_align,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!trans_pcie->dev_cmd_pool)
+ return -ENOMEM;
+
return 0;
}
@@ -3715,18 +3750,12 @@ iwl_trans_pcie_alloc(struct pci_dev *pdev,
struct iwl_trans_info *info, u8 __iomem *hw_base)
{
struct iwl_trans_pcie *trans_pcie, **priv;
- unsigned int txcmd_size, txcmd_align;
struct iwl_trans *trans;
unsigned int bc_tbl_n_entries;
int ret, addr_size;
- ret = iwl_trans_pcie_set_txcmd_info(mac_cfg, &txcmd_size,
- &txcmd_align);
- if (ret)
- return ERR_PTR(ret);
-
trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev,
- mac_cfg, txcmd_size, txcmd_align);
+ mac_cfg);
if (!trans)
return ERR_PTR(-ENOMEM);
@@ -3737,6 +3766,10 @@ iwl_trans_pcie_alloc(struct pci_dev *pdev,
/* Initialize the wait queue for commands */
init_waitqueue_head(&trans_pcie->wait_command_queue);
+ ret = iwl_trans_pcie_alloc_txcmd_pool(trans);
+ if (ret)
+ goto out_free_trans;
+
if (trans->mac_cfg->gen2) {
trans_pcie->txqs.tfd.addr_size = 64;
trans_pcie->txqs.tfd.max_tbs = IWL_TFH_NUM_TBS;
@@ -3756,7 +3789,7 @@ iwl_trans_pcie_alloc(struct pci_dev *pdev,
trans_pcie->txqs.tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page);
if (!trans_pcie->txqs.tso_hdr_page) {
ret = -ENOMEM;
- goto out_free_trans;
+ goto out_free_txcmd_pool;
}
if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
@@ -3906,6 +3939,8 @@ out_free_ndev:
free_netdev(trans_pcie->napi_dev);
out_free_tso:
free_percpu(trans_pcie->txqs.tso_hdr_page);
+out_free_txcmd_pool:
+ kmem_cache_destroy(trans_pcie->dev_cmd_pool);
out_free_trans:
iwl_trans_free(trans);
return ERR_PTR(ret);
@@ -3999,6 +4034,7 @@ static void get_crf_id(struct iwl_trans *iwl_trans,
/* Read cdb info (also contains the jacket info if needed in the future */
hw_wfpm_id = iwl_read_umac_prph_no_grab(iwl_trans, WFPM_OTP_CFG1_ADDR);
+
IWL_INFO(iwl_trans, "Detected crf-id 0x%x, cnv-id 0x%x wfpm id 0x%x\n",
info->hw_crf_id, info->hw_cnv_id, hw_wfpm_id);
}
@@ -4014,10 +4050,8 @@ static int map_crf_id(struct iwl_trans *iwl_trans,
u32 val = info->hw_crf_id;
u32 step_id = REG_CRF_ID_STEP(val);
u32 slave_id = REG_CRF_ID_SLAVE(val);
- u32 jacket_id_cnv = REG_CRF_ID_SLAVE(info->hw_cnv_id);
u32 hw_wfpm_id = iwl_read_umac_prph_no_grab(iwl_trans,
WFPM_OTP_CFG1_ADDR);
- u32 jacket_id_wfpm = WFPM_OTP_CFG1_IS_JACKET(hw_wfpm_id);
u32 cdb_id_wfpm = WFPM_OTP_CFG1_IS_CDB(hw_wfpm_id);
/* Map between crf id to rf id */
@@ -4066,21 +4100,12 @@ static int map_crf_id(struct iwl_trans *iwl_trans,
IWL_INFO(iwl_trans, "Adding cdb to rf id\n");
}
- /* Set Jacket capabilities */
- if (jacket_id_wfpm || jacket_id_cnv) {
- info->hw_rf_id += BIT(29);
- IWL_INFO(iwl_trans, "Adding jacket to rf id\n");
- }
-
IWL_INFO(iwl_trans,
"Detected rf-type 0x%x step-id 0x%x slave-id 0x%x from crf id 0x%x\n",
REG_CRF_ID_TYPE(val), step_id, slave_id, info->hw_rf_id);
IWL_INFO(iwl_trans,
- "Detected cdb-id 0x%x jacket-id 0x%x from wfpm id 0x%x\n",
- cdb_id_wfpm, jacket_id_wfpm, hw_wfpm_id);
- IWL_INFO(iwl_trans, "Detected jacket-id 0x%x from cnvi id 0x%x\n",
- jacket_id_cnv, info->hw_cnv_id);
-
+ "Detected cdb-id 0x%x from wfpm id 0x%x\n",
+ cdb_id_wfpm, hw_wfpm_id);
out:
return ret;
}
@@ -4163,7 +4188,7 @@ int iwl_pci_gen1_2_probe(struct pci_dev *pdev,
*/
ret = iwl_pcie_prepare_card_hw(iwl_trans);
if (!ret) {
- ret = iwl_finish_nic_init(iwl_trans);
+ ret = iwl_trans_activate_nic(iwl_trans);
if (ret)
goto out_free_trans;
if (iwl_trans_grab_nic_access(iwl_trans)) {
@@ -4271,3 +4296,63 @@ out_free_trans:
iwl_trans_pcie_free(iwl_trans);
return ret;
}
+
+void iwl_pcie_gen1_2_remove(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ cancel_delayed_work_sync(&trans_pcie->me_recheck_wk);
+
+ iwl_drv_stop(trans->drv);
+
+ iwl_trans_pcie_free(trans);
+}
+
+int iwl_pcie_gen1_2_activate_nic(struct iwl_trans *trans)
+{
+ const struct iwl_mac_cfg *mac_cfg = trans->mac_cfg;
+ u32 poll_ready;
+ int err;
+
+ if (mac_cfg->bisr_workaround) {
+ /* ensure the TOP FSM isn't still in previous reset */
+ mdelay(2);
+ }
+
+ /*
+ * Set "initialization complete" bit to move adapter from
+ * D0U* --> D0A* (powered-up active) state.
+ */
+ if (mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
+ iwl_set_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ |
+ CSR_GP_CNTRL_REG_FLAG_MAC_INIT);
+ poll_ready = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS;
+ } else {
+ iwl_set_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+ poll_ready = CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY;
+ }
+
+ if (mac_cfg->device_family == IWL_DEVICE_FAMILY_8000)
+ udelay(2);
+
+ /*
+ * Wait for clock stabilization; once stabilized, access to
+ * device-internal resources is supported, e.g. iwl_write_prph()
+ * and accesses to uCode SRAM.
+ */
+ err = iwl_poll_bits(trans, CSR_GP_CNTRL, poll_ready, 25000);
+ if (err < 0) {
+ IWL_DEBUG_INFO(trans, "Failed to wake NIC\n");
+
+ iwl_pcie_dump_host_monitor(trans);
+ }
+
+ if (mac_cfg->bisr_workaround) {
+ /* ensure BISR shift has finished */
+ udelay(200);
+ }
+
+ return err;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c
index 84a05cc1c27a..6e85aa519e1b 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c
@@ -2092,7 +2092,8 @@ static void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
break;
}
- if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_7000 &&
+ trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
len = DIV_ROUND_UP(len, 4);
if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX))
@@ -2601,8 +2602,9 @@ static int iwl_trans_pcie_send_hcmd_sync(struct iwl_trans *trans,
}
if (test_bit(STATUS_FW_ERROR, &trans->status)) {
- if (!test_and_clear_bit(STATUS_SUPPRESS_CMD_ERROR_ONCE,
- &trans->status)) {
+ if (trans->suppress_cmd_error_once) {
+ trans->suppress_cmd_error_once = false;
+ } else {
IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str);
dump_stack();
}
diff --git a/drivers/net/wireless/intel/iwlwifi/tests/Makefile b/drivers/net/wireless/intel/iwlwifi/tests/Makefile
index 1b49241c578f..b996c45d43e7 100644
--- a/drivers/net/wireless/intel/iwlwifi/tests/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/tests/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
-iwlwifi-tests-y += module.o devinfo.o utils.o
+iwlwifi-tests-y += module.o devinfo.o utils.o nvm_parse.o
ccflags-y += -I$(src)/../
diff --git a/drivers/net/wireless/intel/iwlwifi/tests/nvm_parse.c b/drivers/net/wireless/intel/iwlwifi/tests/nvm_parse.c
new file mode 100644
index 000000000000..853911900bfd
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/tests/nvm_parse.c
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * KUnit tests for NVM parse
+ *
+ * Copyright (C) 2025 Intel Corporation
+ */
+#include <kunit/static_stub.h>
+#include <kunit/test.h>
+#include <iwl-nvm-parse.h>
+
+MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
+
+static const struct nvm_flag_case {
+ const char *desc;
+ u16 nvm_flags;
+ u32 reg_rule_flags;
+ u32 set_reg_rule_flags;
+ u32 clear_reg_rule_flags;
+} nvm_flag_cases[] = {
+ {
+ .desc = "Restricting VLP client and AP access",
+ .nvm_flags = 0,
+ .set_reg_rule_flags = NL80211_RRF_NO_6GHZ_VLP_CLIENT,
+ .clear_reg_rule_flags = NL80211_RRF_ALLOW_6GHZ_VLP_AP,
+ },
+ {
+ .desc = "Allow VLP client and AP access",
+ .nvm_flags = NVM_CHANNEL_VLP,
+ .set_reg_rule_flags = NL80211_RRF_ALLOW_6GHZ_VLP_AP,
+ .clear_reg_rule_flags = NL80211_RRF_NO_6GHZ_VLP_CLIENT,
+ },
+ {
+ .desc = "Allow VLP client access, while restricting AP access",
+ .nvm_flags = NVM_CHANNEL_VLP | NVM_CHANNEL_VLP_AP_NOT_ALLOWED,
+ .set_reg_rule_flags = 0,
+ .clear_reg_rule_flags = NL80211_RRF_ALLOW_6GHZ_VLP_AP |
+ NL80211_RRF_NO_6GHZ_VLP_CLIENT,
+ },
+};
+
+KUNIT_ARRAY_PARAM_DESC(nvm_flag, nvm_flag_cases, desc)
+
+static void test_nvm_flags(struct kunit *test)
+{
+ const struct nvm_flag_case *params = test->param_value;
+ struct iwl_reg_capa reg_capa = {};
+ u32 flags = 0;
+
+ flags = iwl_nvm_get_regdom_bw_flags(NULL, 0, params->nvm_flags,
+ reg_capa);
+
+ if ((params->set_reg_rule_flags & flags) != params->set_reg_rule_flags)
+ KUNIT_FAIL(test, "Expected set bits:0x%08x flags:0x%08x\n",
+ params->set_reg_rule_flags, flags);
+
+ if (params->clear_reg_rule_flags & flags)
+ KUNIT_FAIL(test, "Expected clear bits:0x%08x flags:0x%08x\n",
+ params->clear_reg_rule_flags, flags);
+}
+
+static struct kunit_case nvm_flags_test_cases[] = {
+ KUNIT_CASE_PARAM(test_nvm_flags,
+ nvm_flag_gen_params),
+ {},
+};
+
+static struct kunit_suite nvm_flags_suite = {
+ .name = "iwlwifi-nvm_flags",
+ .test_cases = nvm_flags_test_cases,
+};
+
+kunit_test_suite(nvm_flags_suite);
diff --git a/drivers/net/wireless/intersil/p54/txrx.c b/drivers/net/wireless/intersil/p54/txrx.c
index 2deb1bb54f24..1294a1d6528e 100644
--- a/drivers/net/wireless/intersil/p54/txrx.c
+++ b/drivers/net/wireless/intersil/p54/txrx.c
@@ -317,7 +317,7 @@ static void p54_pspoll_workaround(struct p54_common *priv, struct sk_buff *skb)
tim_len = tim[1];
tim_ie = (struct ieee80211_tim_ie *) &tim[2];
- new_psm = ieee80211_check_tim(tim_ie, tim_len, priv->aid);
+ new_psm = ieee80211_check_tim(tim_ie, tim_len, priv->aid, false);
if (new_psm != priv->powersave_override) {
priv->powersave_override = new_psm;
p54_set_ps(priv);
diff --git a/drivers/net/wireless/marvell/libertas/cfg.c b/drivers/net/wireless/marvell/libertas/cfg.c
index 94dd488becaf..caba7491cd5a 100644
--- a/drivers/net/wireless/marvell/libertas/cfg.c
+++ b/drivers/net/wireless/marvell/libertas/cfg.c
@@ -1151,10 +1151,13 @@ static int lbs_associate(struct lbs_private *priv,
/* add SSID TLV */
rcu_read_lock();
ssid_eid = ieee80211_bss_get_ie(bss, WLAN_EID_SSID);
- if (ssid_eid)
- pos += lbs_add_ssid_tlv(pos, ssid_eid + 2, ssid_eid[1]);
- else
+ if (ssid_eid) {
+ u32 ssid_len = min(ssid_eid[1], IEEE80211_MAX_SSID_LEN);
+
+ pos += lbs_add_ssid_tlv(pos, ssid_eid + 2, ssid_len);
+ } else {
lbs_deb_assoc("no SSID\n");
+ }
rcu_read_unlock();
/* add DS param TLV */
diff --git a/drivers/net/wireless/marvell/libertas/if_sdio.c b/drivers/net/wireless/marvell/libertas/if_sdio.c
index 524034699972..fc5318035822 100644
--- a/drivers/net/wireless/marvell/libertas/if_sdio.c
+++ b/drivers/net/wireless/marvell/libertas/if_sdio.c
@@ -1181,7 +1181,8 @@ static int if_sdio_probe(struct sdio_func *func,
spin_lock_init(&card->lock);
INIT_LIST_HEAD(&card->packets);
- card->workqueue = alloc_workqueue("libertas_sdio", WQ_MEM_RECLAIM, 0);
+ card->workqueue = alloc_workqueue("libertas_sdio",
+ WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
if (unlikely(!card->workqueue)) {
ret = -ENOMEM;
goto err_queue;
diff --git a/drivers/net/wireless/marvell/libertas/if_spi.c b/drivers/net/wireless/marvell/libertas/if_spi.c
index b722a6587fd3..8a2504a62840 100644
--- a/drivers/net/wireless/marvell/libertas/if_spi.c
+++ b/drivers/net/wireless/marvell/libertas/if_spi.c
@@ -1153,7 +1153,8 @@ static int if_spi_probe(struct spi_device *spi)
priv->fw_ready = 1;
/* Initialize interrupt handling stuff. */
- card->workqueue = alloc_workqueue("libertas_spi", WQ_MEM_RECLAIM, 0);
+ card->workqueue = alloc_workqueue("libertas_spi",
+ WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
if (!card->workqueue) {
err = -ENOMEM;
goto remove_card;
diff --git a/drivers/net/wireless/marvell/libertas_tf/main.c b/drivers/net/wireless/marvell/libertas_tf/main.c
index d1067874428f..fb20fe31cd36 100644
--- a/drivers/net/wireless/marvell/libertas_tf/main.c
+++ b/drivers/net/wireless/marvell/libertas_tf/main.c
@@ -708,7 +708,7 @@ EXPORT_SYMBOL_GPL(lbtf_bcn_sent);
static int __init lbtf_init_module(void)
{
lbtf_deb_enter(LBTF_DEB_MAIN);
- lbtf_wq = alloc_workqueue("libertastf", WQ_MEM_RECLAIM, 0);
+ lbtf_wq = alloc_workqueue("libertastf", WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
if (lbtf_wq == NULL) {
printk(KERN_ERR "libertastf: couldn't create workqueue\n");
return -ENOMEM;
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index 3498743d5ec0..be23a29e7de0 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -686,10 +686,9 @@ static void mwifiex_reg_notifier(struct wiphy *wiphy,
return;
}
- /* Don't send world or same regdom info to firmware */
- if (strncmp(request->alpha2, "00", 2) &&
- strncmp(request->alpha2, adapter->country_code,
- sizeof(request->alpha2))) {
+ /* Don't send same regdom info to firmware */
+ if (strncmp(request->alpha2, adapter->country_code,
+ sizeof(request->alpha2)) != 0) {
memcpy(adapter->country_code, request->alpha2,
sizeof(request->alpha2));
mwifiex_send_domain_info_cmd_fw(wiphy);
@@ -4673,8 +4672,9 @@ int mwifiex_init_channel_scan_gap(struct mwifiex_adapter *adapter)
* additional active scan request for hidden SSIDs on passive channels.
*/
adapter->num_in_chan_stats = 2 * (n_channels_bg + n_channels_a);
- adapter->chan_stats = vmalloc(array_size(sizeof(*adapter->chan_stats),
- adapter->num_in_chan_stats));
+ adapter->chan_stats = kcalloc(adapter->num_in_chan_stats,
+ sizeof(*adapter->chan_stats),
+ GFP_KERNEL);
if (!adapter->chan_stats)
return -ENOMEM;
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index 7b50a88a18e5..ff177b06f42d 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -494,6 +494,11 @@ static void mwifiex_free_adapter(struct mwifiex_adapter *adapter)
return;
}
+ if (adapter->rgpower_data) {
+ release_firmware(adapter->rgpower_data);
+ adapter->rgpower_data = NULL;
+ }
+
mwifiex_unregister(adapter);
pr_debug("info: %s: free adapter\n", __func__);
}
@@ -642,7 +647,7 @@ static int _mwifiex_fw_dpc(const struct firmware *firmware, void *context)
goto done;
err_add_intf:
- vfree(adapter->chan_stats);
+ kfree(adapter->chan_stats);
err_init_chan_scan:
wiphy_unregister(adapter->wiphy);
wiphy_free(adapter->wiphy);
@@ -1485,7 +1490,7 @@ static void mwifiex_uninit_sw(struct mwifiex_adapter *adapter)
wiphy_free(adapter->wiphy);
adapter->wiphy = NULL;
- vfree(adapter->chan_stats);
+ kfree(adapter->chan_stats);
mwifiex_free_cmd_buffers(adapter);
}
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index 9ac36bef980e..27559e2ddc31 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -982,6 +982,7 @@ struct mwifiex_adapter {
u8 country_code[IEEE80211_COUNTRY_STRING_LEN];
u16 max_mgmt_ie_index;
const struct firmware *cal_data;
+ const struct firmware *rgpower_data;
struct device_node *dt_node;
/* 11AC */
@@ -1579,6 +1580,8 @@ int mwifiex_11h_handle_event_chanswann(struct mwifiex_private *priv);
int mwifiex_dnld_dt_cfgdata(struct mwifiex_private *priv,
struct device_node *node, const char *prefix);
void mwifiex_dnld_txpwr_table(struct mwifiex_private *priv);
+int mwifiex_send_rgpower_table(struct mwifiex_private *priv, const u8 *data,
+ const size_t size);
extern const struct ethtool_ops mwifiex_ethtool_ops;
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
index c93281f5a47c..dcca71158fc6 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
@@ -1483,6 +1483,119 @@ int mwifiex_dnld_dt_cfgdata(struct mwifiex_private *priv,
return 0;
}
+static int mwifiex_rgpower_table_advance_to_content(u8 **pos, const u8 *data,
+ const size_t size)
+{
+ while (*pos - data < size) {
+ /* skip spaces, tabs and empty lines */
+ if (**pos == '\r' || **pos == '\n' || **pos == '\0' ||
+ isspace(**pos)) {
+ (*pos)++;
+ continue;
+ }
+ /* skip line comments */
+ if (**pos == '#') {
+ *pos = strchr(*pos, '\n');
+ if (!*pos)
+ return -EINVAL;
+ (*pos)++;
+ continue;
+ }
+ return 0;
+ }
+ return 0;
+}
+
+int mwifiex_send_rgpower_table(struct mwifiex_private *priv, const u8 *data,
+ const size_t size)
+{
+ int ret = 0;
+ bool start_raw = false;
+ u8 *ptr, *token, *pos = NULL;
+ u8 *_data __free(kfree) = NULL;
+ struct mwifiex_adapter *adapter = priv->adapter;
+ struct mwifiex_ds_misc_cmd *hostcmd __free(kfree) = NULL;
+
+ hostcmd = kzalloc(sizeof(*hostcmd), GFP_KERNEL);
+ if (!hostcmd)
+ return -ENOMEM;
+
+ _data = kmemdup(data, size, GFP_KERNEL);
+ if (!_data)
+ return -ENOMEM;
+
+ pos = _data;
+ ptr = hostcmd->cmd;
+ while ((pos - _data) < size) {
+ ret = mwifiex_rgpower_table_advance_to_content(&pos, _data, size);
+ if (ret) {
+ mwifiex_dbg(
+ adapter, ERROR,
+ "%s: failed to advance to content in rgpower table\n",
+ __func__);
+ return ret;
+ }
+
+ if (*pos == '}' && start_raw) {
+ hostcmd->len = get_unaligned_le16(&hostcmd->cmd[2]);
+ ret = mwifiex_send_cmd(priv, 0, 0, 0, hostcmd, false);
+ if (ret) {
+ mwifiex_dbg(adapter, ERROR,
+ "%s: failed to send hostcmd %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ memset(hostcmd->cmd, 0, MWIFIEX_SIZE_OF_CMD_BUFFER);
+ ptr = hostcmd->cmd;
+ start_raw = false;
+ pos++;
+ continue;
+ }
+
+ if (!start_raw) {
+ pos = strchr(pos, '=');
+ if (pos) {
+ pos = strchr(pos, '{');
+ if (pos) {
+ start_raw = true;
+ pos++;
+ continue;
+ }
+ }
+ mwifiex_dbg(adapter, ERROR,
+ "%s: syntax error in hostcmd\n", __func__);
+ return -EINVAL;
+ }
+
+ if (start_raw) {
+ while ((*pos != '\r' && *pos != '\n') &&
+ (token = strsep((char **)&pos, " "))) {
+ if (ptr - hostcmd->cmd >=
+ MWIFIEX_SIZE_OF_CMD_BUFFER) {
+ mwifiex_dbg(
+ adapter, ERROR,
+ "%s: hostcmd is larger than %d, aborting\n",
+ __func__, MWIFIEX_SIZE_OF_CMD_BUFFER);
+ return -ENOMEM;
+ }
+
+ ret = kstrtou8(token, 16, ptr);
+ if (ret < 0) {
+ mwifiex_dbg(
+ adapter, ERROR,
+ "%s: failed to parse hostcmd %d token: %s\n",
+ __func__, ret, token);
+ return ret;
+ }
+ ptr++;
+ }
+ }
+ }
+
+ return ret;
+}
+
/* This function prepares command of set_cfg_data. */
static int mwifiex_cmd_cfg_data(struct mwifiex_private *priv,
struct host_cmd_ds_command *cmd, void *data_buf)
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
index f79589cafe57..ef6722ffdc74 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
@@ -180,7 +180,7 @@ int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv,
return mwifiex_update_bss_desc_with_ie(priv->adapter, bss_desc);
}
-void mwifiex_dnld_txpwr_table(struct mwifiex_private *priv)
+static void mwifiex_dnld_dt_txpwr_table(struct mwifiex_private *priv)
{
if (priv->adapter->dt_node) {
char txpwr[] = {"marvell,00_txpwrlimit"};
@@ -190,6 +190,62 @@ void mwifiex_dnld_txpwr_table(struct mwifiex_private *priv)
}
}
+static int mwifiex_request_rgpower_table(struct mwifiex_private *priv)
+{
+ struct mwifiex_802_11d_domain_reg *domain_info = &priv->adapter->domain_reg;
+ struct mwifiex_adapter *adapter = priv->adapter;
+ char rgpower_table_name[30];
+ char country_code[3];
+
+ strscpy(country_code, domain_info->country_code, sizeof(country_code));
+
+ /* World regulatory domain "00" has WW as country code */
+ if (strncmp(country_code, "00", 2) == 0)
+ strscpy(country_code, "WW", sizeof(country_code));
+
+ snprintf(rgpower_table_name, sizeof(rgpower_table_name),
+ "nxp/rgpower_%s.bin", country_code);
+
+ mwifiex_dbg(adapter, INFO, "info: %s: requesting regulatory power table %s\n",
+ __func__, rgpower_table_name);
+
+ if (adapter->rgpower_data) {
+ release_firmware(adapter->rgpower_data);
+ adapter->rgpower_data = NULL;
+ }
+
+ if ((request_firmware(&adapter->rgpower_data, rgpower_table_name,
+ adapter->dev))) {
+ mwifiex_dbg(
+ adapter, INFO,
+ "info: %s: failed to request regulatory power table\n",
+ __func__);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int mwifiex_dnld_rgpower_table(struct mwifiex_private *priv)
+{
+ int ret;
+
+ ret = mwifiex_request_rgpower_table(priv);
+ if (ret)
+ return ret;
+
+ return mwifiex_send_rgpower_table(priv, priv->adapter->rgpower_data->data,
+ priv->adapter->rgpower_data->size);
+}
+
+void mwifiex_dnld_txpwr_table(struct mwifiex_private *priv)
+{
+ if (mwifiex_dnld_rgpower_table(priv) == 0)
+ return;
+
+ mwifiex_dnld_dt_txpwr_table(priv);
+}
+
static int mwifiex_process_country_ie(struct mwifiex_private *priv,
struct cfg80211_bss *bss)
{
diff --git a/drivers/net/wireless/mediatek/mt76/agg-rx.c b/drivers/net/wireless/mediatek/mt76/agg-rx.c
index 07c386c7b4d0..936ab1ca9246 100644
--- a/drivers/net/wireless/mediatek/mt76/agg-rx.c
+++ b/drivers/net/wireless/mediatek/mt76/agg-rx.c
@@ -173,6 +173,8 @@ void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames)
if (ackp == IEEE80211_QOS_CTL_ACK_POLICY_NOACK)
return;
+ if (wcid->def_wcid)
+ wcid = wcid->def_wcid;
tid = rcu_dereference(wcid->aggr[tidno]);
if (!tid)
return;
diff --git a/drivers/net/wireless/mediatek/mt76/channel.c b/drivers/net/wireless/mediatek/mt76/channel.c
index 77b75792eb48..130af1b254db 100644
--- a/drivers/net/wireless/mediatek/mt76/channel.c
+++ b/drivers/net/wireless/mediatek/mt76/channel.c
@@ -314,21 +314,24 @@ void mt76_put_vif_phy_link(struct mt76_phy *phy, struct ieee80211_vif *vif,
kfree(mlink);
}
-static void mt76_roc_complete(struct mt76_phy *phy)
+void mt76_roc_complete(struct mt76_phy *phy)
{
struct mt76_vif_link *mlink = phy->roc_link;
+ struct mt76_dev *dev = phy->dev;
if (!phy->roc_vif)
return;
if (mlink)
mlink->mvif->roc_phy = NULL;
- if (phy->main_chandef.chan)
+ if (phy->main_chandef.chan &&
+ !test_bit(MT76_MCU_RESET, &dev->phy.state))
mt76_set_channel(phy, &phy->main_chandef, false);
mt76_put_vif_phy_link(phy, phy->roc_vif, phy->roc_link);
phy->roc_vif = NULL;
phy->roc_link = NULL;
- ieee80211_remain_on_channel_expired(phy->hw);
+ if (!test_bit(MT76_MCU_RESET, &dev->phy.state))
+ ieee80211_remain_on_channel_expired(phy->hw);
}
void mt76_roc_complete_work(struct work_struct *work)
@@ -351,6 +354,7 @@ void mt76_abort_roc(struct mt76_phy *phy)
mt76_roc_complete(phy);
mutex_unlock(&dev->mutex);
}
+EXPORT_SYMBOL_GPL(mt76_abort_roc);
int mt76_remain_on_channel(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_channel *chan, int duration,
@@ -368,7 +372,8 @@ int mt76_remain_on_channel(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mutex_lock(&dev->mutex);
- if (phy->roc_vif || dev->scan.phy == phy) {
+ if (phy->roc_vif || dev->scan.phy == phy ||
+ test_bit(MT76_MCU_RESET, &dev->phy.state)) {
ret = -EBUSY;
goto out;
}
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 87f531297f85..1fa7de1d2c45 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -186,6 +186,37 @@ mt76_free_pending_rxwi(struct mt76_dev *dev)
EXPORT_SYMBOL_GPL(mt76_free_pending_rxwi);
static void
+mt76_dma_queue_magic_cnt_init(struct mt76_dev *dev, struct mt76_queue *q)
+{
+ if (!mt76_queue_is_wed_rro(q))
+ return;
+
+ q->magic_cnt = 0;
+ if (mt76_queue_is_wed_rro_ind(q)) {
+ struct mt76_wed_rro_desc *rro_desc;
+ u32 data1 = FIELD_PREP(RRO_IND_DATA1_MAGIC_CNT_MASK,
+ MT_DMA_WED_IND_CMD_CNT - 1);
+ int i;
+
+ rro_desc = (struct mt76_wed_rro_desc *)q->desc;
+ for (i = 0; i < q->ndesc; i++) {
+ struct mt76_wed_rro_ind *cmd;
+
+ cmd = (struct mt76_wed_rro_ind *)&rro_desc[i];
+ cmd->data1 = cpu_to_le32(data1);
+ }
+ } else if (mt76_queue_is_wed_rro_rxdmad_c(q)) {
+ struct mt76_rro_rxdmad_c *dmad = (void *)q->desc;
+ u32 data3 = FIELD_PREP(RRO_RXDMAD_DATA3_MAGIC_CNT_MASK,
+ MT_DMA_MAGIC_CNT - 1);
+ int i;
+
+ for (i = 0; i < q->ndesc; i++)
+ dmad[i].data3 = cpu_to_le32(data3);
+ }
+}
+
+static void
mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
{
Q_WRITE(q, desc_base, q->desc_dma);
@@ -197,13 +228,14 @@ mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
q->tail = q->head;
}
-void __mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q,
- bool reset_idx)
+void mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q,
+ bool reset_idx)
{
if (!q || !q->ndesc)
return;
- if (!mt76_queue_is_wed_rro_ind(q)) {
+ if (!mt76_queue_is_wed_rro_ind(q) &&
+ !mt76_queue_is_wed_rro_rxdmad_c(q)) {
int i;
/* clear descriptors */
@@ -211,27 +243,26 @@ void __mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q,
q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
}
+ mt76_dma_queue_magic_cnt_init(dev, q);
if (reset_idx) {
- Q_WRITE(q, cpu_idx, 0);
+ if (mt76_queue_is_emi(q))
+ *q->emi_cpu_idx = 0;
+ else
+ Q_WRITE(q, cpu_idx, 0);
Q_WRITE(q, dma_idx, 0);
}
mt76_dma_sync_idx(dev, q);
}
-void mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
-{
- __mt76_dma_queue_reset(dev, q, true);
-}
-
static int
mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
struct mt76_queue_buf *buf, void *data)
{
struct mt76_queue_entry *entry = &q->entry[q->head];
struct mt76_txwi_cache *txwi = NULL;
+ u32 buf1 = 0, ctrl, info = 0;
struct mt76_desc *desc;
int idx = q->head;
- u32 buf1 = 0, ctrl;
int rx_token;
if (mt76_queue_is_wed_rro_ind(q)) {
@@ -240,6 +271,9 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
rro_desc = (struct mt76_wed_rro_desc *)q->desc;
data = &rro_desc[q->head];
goto done;
+ } else if (mt76_queue_is_wed_rro_rxdmad_c(q)) {
+ data = &q->desc[q->head];
+ goto done;
}
desc = &q->desc[q->head];
@@ -248,7 +282,7 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
buf1 = FIELD_PREP(MT_DMA_CTL_SDP0_H, buf->addr >> 32);
#endif
- if (mt76_queue_is_wed_rx(q)) {
+ if (mt76_queue_is_wed_rx(q) || mt76_queue_is_wed_rro_data(q)) {
txwi = mt76_get_rxwi(dev);
if (!txwi)
return -ENOMEM;
@@ -261,12 +295,26 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
buf1 |= FIELD_PREP(MT_DMA_CTL_TOKEN, rx_token);
ctrl |= MT_DMA_CTL_TO_HOST;
+
+ txwi->qid = q - dev->q_rx;
+ }
+
+ if (mt76_queue_is_wed_rro_msdu_pg(q) &&
+ dev->drv->rx_rro_add_msdu_page) {
+ if (dev->drv->rx_rro_add_msdu_page(dev, q, buf->addr, data))
+ return -ENOMEM;
+ }
+
+ if (q->flags & MT_QFLAG_WED_RRO_EN) {
+ info |= FIELD_PREP(MT_DMA_MAGIC_MASK, q->magic_cnt);
+ if ((q->head + 1) == q->ndesc)
+ q->magic_cnt = (q->magic_cnt + 1) % MT_DMA_MAGIC_CNT;
}
WRITE_ONCE(desc->buf0, cpu_to_le32(buf->addr));
WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
- WRITE_ONCE(desc->info, 0);
+ WRITE_ONCE(desc->info, cpu_to_le32(info));
done:
entry->dma_addr[0] = buf->addr;
@@ -375,7 +423,10 @@ static void
mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
{
wmb();
- Q_WRITE(q, cpu_idx, q->head);
+ if (mt76_queue_is_emi(q))
+ *q->emi_cpu_idx = cpu_to_le16(q->head);
+ else
+ Q_WRITE(q, cpu_idx, q->head);
}
static void
@@ -419,15 +470,61 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
}
static void *
+mt76_dma_get_rxdmad_c_buf(struct mt76_dev *dev, struct mt76_queue *q,
+ int idx, int *len, bool *more)
+{
+ struct mt76_queue_entry *e = &q->entry[idx];
+ struct mt76_rro_rxdmad_c *dmad = e->buf;
+ u32 data1 = le32_to_cpu(dmad->data1);
+ u32 data2 = le32_to_cpu(dmad->data2);
+ struct mt76_txwi_cache *t;
+ u16 rx_token_id;
+ u8 ind_reason;
+ void *buf;
+
+ rx_token_id = FIELD_GET(RRO_RXDMAD_DATA2_RX_TOKEN_ID_MASK, data2);
+ t = mt76_rx_token_release(dev, rx_token_id);
+ if (!t)
+ return ERR_PTR(-EAGAIN);
+
+ q = &dev->q_rx[t->qid];
+ dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr,
+ SKB_WITH_OVERHEAD(q->buf_size),
+ page_pool_get_dma_dir(q->page_pool));
+
+ if (len)
+ *len = FIELD_GET(RRO_RXDMAD_DATA1_SDL0_MASK, data1);
+ if (more)
+ *more = !FIELD_GET(RRO_RXDMAD_DATA1_LS_MASK, data1);
+
+ buf = t->ptr;
+ ind_reason = FIELD_GET(RRO_RXDMAD_DATA2_IND_REASON_MASK, data2);
+ if (ind_reason == MT_DMA_WED_IND_REASON_REPEAT ||
+ ind_reason == MT_DMA_WED_IND_REASON_OLDPKT) {
+ mt76_put_page_pool_buf(buf, false);
+ buf = ERR_PTR(-EAGAIN);
+ }
+ t->ptr = NULL;
+ t->dma_addr = 0;
+
+ mt76_put_rxwi(dev, t);
+
+ return buf;
+}
+
+static void *
mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
- int *len, u32 *info, bool *more, bool *drop)
+ int *len, u32 *info, bool *more, bool *drop, bool flush)
{
struct mt76_queue_entry *e = &q->entry[idx];
struct mt76_desc *desc = &q->desc[idx];
u32 ctrl, desc_info, buf1;
void *buf = e->buf;
- if (mt76_queue_is_wed_rro_ind(q))
+ if (mt76_queue_is_wed_rro_rxdmad_c(q) && !flush)
+ buf = mt76_dma_get_rxdmad_c_buf(dev, q, idx, len, more);
+
+ if (mt76_queue_is_wed_rro(q))
goto done;
ctrl = le32_to_cpu(READ_ONCE(desc->ctrl));
@@ -482,20 +579,50 @@ mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
if (!q->queued)
return NULL;
- if (mt76_queue_is_wed_rro_data(q))
- return NULL;
+ if (mt76_queue_is_wed_rro_data(q) || mt76_queue_is_wed_rro_msdu_pg(q))
+ goto done;
+
+ if (mt76_queue_is_wed_rro_ind(q)) {
+ struct mt76_wed_rro_ind *cmd;
+ u8 magic_cnt;
+
+ if (flush)
+ goto done;
+
+ cmd = q->entry[idx].buf;
+ magic_cnt = FIELD_GET(RRO_IND_DATA1_MAGIC_CNT_MASK,
+ le32_to_cpu(cmd->data1));
+ if (magic_cnt != q->magic_cnt)
+ return NULL;
+
+ if (q->tail == q->ndesc - 1)
+ q->magic_cnt = (q->magic_cnt + 1) % MT_DMA_WED_IND_CMD_CNT;
+ } else if (mt76_queue_is_wed_rro_rxdmad_c(q)) {
+ struct mt76_rro_rxdmad_c *dmad;
+ u16 magic_cnt;
- if (!mt76_queue_is_wed_rro_ind(q)) {
+ if (flush)
+ goto done;
+
+ dmad = q->entry[idx].buf;
+ magic_cnt = FIELD_GET(RRO_RXDMAD_DATA3_MAGIC_CNT_MASK,
+ le32_to_cpu(dmad->data3));
+ if (magic_cnt != q->magic_cnt)
+ return NULL;
+
+ if (q->tail == q->ndesc - 1)
+ q->magic_cnt = (q->magic_cnt + 1) % MT_DMA_MAGIC_CNT;
+ } else {
if (flush)
q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE);
else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE)))
return NULL;
}
-
+done:
q->tail = (q->tail + 1) % q->ndesc;
q->queued--;
- return mt76_dma_get_buf(dev, q, idx, len, info, more, drop);
+ return mt76_dma_get_buf(dev, q, idx, len, info, more, drop, flush);
}
static int
@@ -646,7 +773,8 @@ mt76_dma_rx_fill_buf(struct mt76_dev *dev, struct mt76_queue *q,
void *buf = NULL;
int offset;
- if (mt76_queue_is_wed_rro_ind(q))
+ if (mt76_queue_is_wed_rro_ind(q) ||
+ mt76_queue_is_wed_rro_rxdmad_c(q))
goto done;
buf = mt76_get_page_pool_buf(q, &offset, q->buf_size);
@@ -676,9 +804,6 @@ int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
{
int frames;
- if (!q->ndesc)
- return 0;
-
spin_lock_bh(&q->lock);
frames = mt76_dma_rx_fill_buf(dev, q, allow_direct);
spin_unlock_bh(&q->lock);
@@ -708,19 +833,7 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
if (!q->desc)
return -ENOMEM;
- if (mt76_queue_is_wed_rro_ind(q)) {
- struct mt76_wed_rro_desc *rro_desc;
- int i;
-
- rro_desc = (struct mt76_wed_rro_desc *)q->desc;
- for (i = 0; i < q->ndesc; i++) {
- struct mt76_wed_rro_ind *cmd;
-
- cmd = (struct mt76_wed_rro_ind *)&rro_desc[i];
- cmd->magic_cnt = MT_DMA_WED_IND_CMD_CNT - 1;
- }
- }
-
+ mt76_dma_queue_magic_cnt_init(dev, q);
size = q->ndesc * sizeof(*q->entry);
q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL);
if (!q->entry)
@@ -740,7 +853,10 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
return 0;
}
- mt76_dma_queue_reset(dev, q);
+ /* HW specific driver is supposed to reset brand-new EMI queues since
+ * it needs to set cpu index pointer.
+ */
+ mt76_dma_queue_reset(dev, q, !mt76_queue_is_emi(q));
return 0;
}
@@ -783,7 +899,8 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
if (!q->ndesc)
return;
- if (!mt76_queue_is_wed_rro_ind(q)) {
+ if (!mt76_queue_is_wed_rro_ind(q) &&
+ !mt76_queue_is_wed_rro_rxdmad_c(q)) {
int i;
for (i = 0; i < q->ndesc; i++)
@@ -843,8 +960,9 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
bool allow_direct = !mt76_queue_is_wed_rx(q);
bool more;
- if (IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) &&
- mt76_queue_is_wed_tx_free(q)) {
+ if ((q->flags & MT_QFLAG_WED_RRO_EN) ||
+ (IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) &&
+ mt76_queue_is_wed_tx_free(q))) {
dma_idx = Q_READ(q, dma_idx);
check_ddone = true;
}
@@ -866,6 +984,20 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
if (!data)
break;
+ if (PTR_ERR(data) == -EAGAIN) {
+ done++;
+ continue;
+ }
+
+ if (mt76_queue_is_wed_rro_ind(q) && dev->drv->rx_rro_ind_process)
+ dev->drv->rx_rro_ind_process(dev, data);
+
+ if (mt76_queue_is_wed_rro(q) &&
+ !mt76_queue_is_wed_rro_rxdmad_c(q)) {
+ done++;
+ continue;
+ }
+
if (drop)
goto free_frag;
@@ -943,6 +1075,15 @@ int mt76_dma_rx_poll(struct napi_struct *napi, int budget)
}
EXPORT_SYMBOL_GPL(mt76_dma_rx_poll);
+static void
+mt76_dma_rx_queue_init(struct mt76_dev *dev, enum mt76_rxq_id qid,
+ int (*poll)(struct napi_struct *napi, int budget))
+{
+ netif_napi_add(dev->napi_dev, &dev->napi[qid], poll);
+ mt76_dma_rx_fill_buf(dev, &dev->q_rx[qid], false);
+ napi_enable(&dev->napi[qid]);
+}
+
static int
mt76_dma_init(struct mt76_dev *dev,
int (*poll)(struct napi_struct *napi, int budget))
@@ -975,9 +1116,10 @@ mt76_dma_init(struct mt76_dev *dev,
init_completion(&dev->mmio.wed_reset_complete);
mt76_for_each_q_rx(dev, i) {
- netif_napi_add(dev->napi_dev, &dev->napi[i], poll);
- mt76_dma_rx_fill_buf(dev, &dev->q_rx[i], false);
- napi_enable(&dev->napi[i]);
+ if (mt76_queue_is_wed_rro(&dev->q_rx[i]))
+ continue;
+
+ mt76_dma_rx_queue_init(dev, i, poll);
}
return 0;
@@ -990,6 +1132,7 @@ static const struct mt76_queue_ops mt76_dma_ops = {
.tx_queue_skb_raw = mt76_dma_tx_queue_skb_raw,
.tx_queue_skb = mt76_dma_tx_queue_skb,
.tx_cleanup = mt76_dma_tx_cleanup,
+ .rx_queue_init = mt76_dma_rx_queue_init,
.rx_cleanup = mt76_dma_rx_cleanup,
.rx_reset = mt76_dma_rx_reset,
.kick = mt76_dma_kick_queue,
diff --git a/drivers/net/wireless/mediatek/mt76/dma.h b/drivers/net/wireless/mediatek/mt76/dma.h
index e3ddc7a83757..17a80e1757fc 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.h
+++ b/drivers/net/wireless/mediatek/mt76/dma.h
@@ -31,7 +31,12 @@
#define MT_DMA_CTL_PN_CHK_FAIL BIT(13)
#define MT_DMA_CTL_VER_MASK BIT(7)
-#define MT_DMA_RRO_EN BIT(13)
+#define MT_DMA_SDP0 GENMASK(15, 0)
+#define MT_DMA_TOKEN_ID GENMASK(31, 16)
+#define MT_DMA_MAGIC_MASK GENMASK(31, 28)
+#define MT_DMA_RRO_EN BIT(13)
+
+#define MT_DMA_MAGIC_CNT 16
#define MT_DMA_WED_IND_CMD_CNT 8
#define MT_DMA_WED_IND_REASON GENMASK(15, 12)
@@ -53,6 +58,21 @@ struct mt76_wed_rro_desc {
__le32 buf1;
} __packed __aligned(4);
+/* data1 */
+#define RRO_RXDMAD_DATA1_LS_MASK BIT(30)
+#define RRO_RXDMAD_DATA1_SDL0_MASK GENMASK(29, 16)
+/* data2 */
+#define RRO_RXDMAD_DATA2_RX_TOKEN_ID_MASK GENMASK(31, 16)
+#define RRO_RXDMAD_DATA2_IND_REASON_MASK GENMASK(15, 12)
+/* data3 */
+#define RRO_RXDMAD_DATA3_MAGIC_CNT_MASK GENMASK(31, 28)
+struct mt76_rro_rxdmad_c {
+ __le32 data0;
+ __le32 data1;
+ __le32 data2;
+ __le32 data3;
+};
+
enum mt76_qsel {
MT_QSEL_MGMT,
MT_QSEL_HCCA,
@@ -81,14 +101,13 @@ void mt76_dma_attach(struct mt76_dev *dev);
void mt76_dma_cleanup(struct mt76_dev *dev);
int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
bool allow_direct);
-void __mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q,
- bool reset_idx);
-void mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q);
+void mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q,
+ bool reset_idx);
static inline void
mt76_dma_reset_tx_queue(struct mt76_dev *dev, struct mt76_queue *q)
{
- dev->queue_ops->reset_q(dev, q);
+ dev->queue_ops->reset_q(dev, q, true);
if (mtk_wed_device_active(&dev->mmio.wed))
mt76_wed_dma_setup(dev, q, true);
}
diff --git a/drivers/net/wireless/mediatek/mt76/eeprom.c b/drivers/net/wireless/mediatek/mt76/eeprom.c
index 443517d06c9f..a987c5e4eff6 100644
--- a/drivers/net/wireless/mediatek/mt76/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/eeprom.c
@@ -163,13 +163,16 @@ static int mt76_get_of_eeprom(struct mt76_dev *dev, void *eep, int len)
return mt76_get_of_data_from_nvmem(dev, eep, "eeprom", len);
}
-void
+int
mt76_eeprom_override(struct mt76_phy *phy)
{
struct mt76_dev *dev = phy->dev;
struct device_node *np = dev->dev->of_node;
+ int err;
- of_get_mac_address(np, phy->macaddr);
+ err = of_get_mac_address(np, phy->macaddr);
+ if (err == -EPROBE_DEFER)
+ return err;
if (!is_valid_ether_addr(phy->macaddr)) {
eth_random_addr(phy->macaddr);
@@ -177,6 +180,8 @@ mt76_eeprom_override(struct mt76_phy *phy)
"Invalid MAC address, using random address %pM\n",
phy->macaddr);
}
+
+ return 0;
}
EXPORT_SYMBOL_GPL(mt76_eeprom_override);
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index 3afe4c4cd7bb..5ceaf78c9ea0 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -818,6 +818,48 @@ void mt76_free_device(struct mt76_dev *dev)
}
EXPORT_SYMBOL_GPL(mt76_free_device);
+static void mt76_reset_phy(struct mt76_phy *phy)
+{
+ if (!phy)
+ return;
+
+ INIT_LIST_HEAD(&phy->tx_list);
+ phy->num_sta = 0;
+ phy->chanctx = NULL;
+ mt76_roc_complete(phy);
+}
+
+void mt76_reset_device(struct mt76_dev *dev)
+{
+ int i;
+
+ rcu_read_lock();
+ for (i = 0; i < ARRAY_SIZE(dev->wcid); i++) {
+ struct mt76_wcid *wcid;
+
+ wcid = rcu_dereference(dev->wcid[i]);
+ if (!wcid)
+ continue;
+
+ wcid->sta = 0;
+ mt76_wcid_cleanup(dev, wcid);
+ rcu_assign_pointer(dev->wcid[i], NULL);
+ }
+ rcu_read_unlock();
+
+ mt76_abort_scan(dev);
+
+ INIT_LIST_HEAD(&dev->wcid_list);
+ INIT_LIST_HEAD(&dev->sta_poll_list);
+ dev->vif_mask = 0;
+ memset(dev->wcid_mask, 0, sizeof(dev->wcid_mask));
+
+ mt76_reset_phy(&dev->phy);
+ for (i = 0; i < ARRAY_SIZE(dev->phys); i++)
+ mt76_reset_phy(dev->phys[i]);
+}
+EXPORT_SYMBOL_GPL(mt76_reset_device);
+
struct mt76_phy *mt76_vif_phy(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
@@ -1198,6 +1240,8 @@ mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
mstat = *((struct mt76_rx_status *)skb->cb);
memset(status, 0, sizeof(*status));
+ skb->priority = mstat.qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
+
status->flag = mstat.flag;
status->freq = mstat.freq;
status->enc_flags = mstat.enc_flags;
@@ -1679,6 +1723,10 @@ void mt76_wcid_cleanup(struct mt76_dev *dev, struct mt76_wcid *wcid)
skb_queue_splice_tail_init(&wcid->tx_pending, &list);
spin_unlock(&wcid->tx_pending.lock);
+ spin_lock(&wcid->tx_offchannel.lock);
+ skb_queue_splice_tail_init(&wcid->tx_offchannel, &list);
+ spin_unlock(&wcid->tx_offchannel.lock);
+
spin_unlock_bh(&phy->tx_lock);
while ((skb = __skb_dequeue(&list)) != NULL) {
@@ -1690,7 +1738,7 @@ EXPORT_SYMBOL_GPL(mt76_wcid_cleanup);
void mt76_wcid_add_poll(struct mt76_dev *dev, struct mt76_wcid *wcid)
{
- if (test_bit(MT76_MCU_RESET, &dev->phy.state))
+ if (test_bit(MT76_MCU_RESET, &dev->phy.state) || !wcid->sta)
return;
spin_lock_bh(&dev->sta_poll_lock);
@@ -2019,3 +2067,55 @@ void mt76_vif_cleanup(struct mt76_dev *dev, struct ieee80211_vif *vif)
mt76_abort_roc(mvif->roc_phy);
}
EXPORT_SYMBOL_GPL(mt76_vif_cleanup);
+
+u16 mt76_select_links(struct ieee80211_vif *vif, int max_active_links)
+{
+ unsigned long usable_links = ieee80211_vif_usable_links(vif);
+ struct {
+ u8 link_id;
+ enum nl80211_band band;
+ } data[IEEE80211_MLD_MAX_NUM_LINKS];
+ unsigned int link_id;
+ int i, n_data = 0;
+ u16 sel_links = 0;
+
+ if (!ieee80211_vif_is_mld(vif))
+ return 0;
+
+ if (vif->active_links == usable_links)
+ return vif->active_links;
+
+ rcu_read_lock();
+ for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ struct ieee80211_bss_conf *link_conf;
+
+ link_conf = rcu_dereference(vif->link_conf[link_id]);
+ if (WARN_ON_ONCE(!link_conf))
+ continue;
+
+ data[n_data].link_id = link_id;
+ data[n_data].band = link_conf->chanreq.oper.chan->band;
+ n_data++;
+ }
+ rcu_read_unlock();
+
+ for (i = 0; i < n_data; i++) {
+ int j;
+
+ if (!(BIT(data[i].link_id) & vif->active_links))
+ continue;
+
+ sel_links = BIT(data[i].link_id);
+ for (j = 0; j < n_data; j++) {
+ if (data[i].band != data[j].band) {
+ sel_links |= BIT(data[j].link_id);
+ if (hweight16(sel_links) == max_active_links)
+ break;
+ }
+ }
+ break;
+ }
+
+ return sel_links;
+}
+EXPORT_SYMBOL_GPL(mt76_select_links);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index 8dd5c29fb75b..e0d50b58cd01 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -33,6 +33,7 @@
#define MT_QFLAG_WED BIT(5)
#define MT_QFLAG_WED_RRO BIT(6)
#define MT_QFLAG_WED_RRO_EN BIT(7)
+#define MT_QFLAG_EMI_EN BIT(8)
#define __MT_WED_Q(_type, _n) (MT_QFLAG_WED | \
FIELD_PREP(MT_QFLAG_WED_TYPE, _type) | \
@@ -45,6 +46,7 @@
#define MT_WED_RRO_Q_DATA(_n) __MT_WED_RRO_Q(MT76_WED_RRO_Q_DATA, _n)
#define MT_WED_RRO_Q_MSDU_PG(_n) __MT_WED_RRO_Q(MT76_WED_RRO_Q_MSDU_PG, _n)
#define MT_WED_RRO_Q_IND __MT_WED_RRO_Q(MT76_WED_RRO_Q_IND, 0)
+#define MT_WED_RRO_Q_RXDMAD_C __MT_WED_RRO_Q(MT76_WED_RRO_Q_RXDMAD_C, 0)
struct mt76_dev;
struct mt76_phy;
@@ -71,6 +73,13 @@ enum mt76_wed_type {
MT76_WED_RRO_Q_DATA,
MT76_WED_RRO_Q_MSDU_PG,
MT76_WED_RRO_Q_IND,
+ MT76_WED_RRO_Q_RXDMAD_C,
+};
+
+enum mt76_hwrro_mode {
+ MT76_HWRRO_OFF,
+ MT76_HWRRO_V3,
+ MT76_HWRRO_V3_1,
};
struct mt76_bus_ops {
@@ -129,6 +138,7 @@ enum mt76_rxq_id {
MT_RXQ_TXFREE_BAND1,
MT_RXQ_TXFREE_BAND2,
MT_RXQ_RRO_IND,
+ MT_RXQ_RRO_RXDMAD_C,
__MT_RXQ_MAX
};
@@ -232,6 +242,9 @@ struct mt76_queue {
u8 buf_offset;
u16 flags;
+ u8 magic_cnt;
+
+ __le16 *emi_cpu_idx;
struct mtk_wed_device *wed;
u32 wed_regs;
@@ -286,11 +299,15 @@ struct mt76_queue_ops {
void (*tx_cleanup)(struct mt76_dev *dev, struct mt76_queue *q,
bool flush);
+ void (*rx_queue_init)(struct mt76_dev *dev, enum mt76_rxq_id qid,
+ int (*poll)(struct napi_struct *napi, int budget));
+
void (*rx_cleanup)(struct mt76_dev *dev, struct mt76_queue *q);
void (*kick)(struct mt76_dev *dev, struct mt76_queue *q);
- void (*reset_q)(struct mt76_dev *dev, struct mt76_queue *q);
+ void (*reset_q)(struct mt76_dev *dev, struct mt76_queue *q,
+ bool reset_idx);
};
enum mt76_phy_type {
@@ -398,15 +415,16 @@ struct mt76_txq {
bool aggr;
};
+/* data0 */
+#define RRO_IND_DATA0_IND_REASON_MASK GENMASK(31, 28)
+#define RRO_IND_DATA0_START_SEQ_MASK GENMASK(27, 16)
+#define RRO_IND_DATA0_SEQ_ID_MASK GENMASK(11, 0)
+/* data1 */
+#define RRO_IND_DATA1_MAGIC_CNT_MASK GENMASK(31, 29)
+#define RRO_IND_DATA1_IND_COUNT_MASK GENMASK(12, 0)
struct mt76_wed_rro_ind {
- u32 se_id : 12;
- u32 rsv : 4;
- u32 start_sn : 12;
- u32 ind_reason : 4;
- u32 ind_cnt : 13;
- u32 win_sz : 3;
- u32 rsv2 : 13;
- u32 magic_cnt : 3;
+ __le32 data0;
+ __le32 data1;
};
struct mt76_txwi_cache {
@@ -417,6 +435,8 @@ struct mt76_txwi_cache {
struct sk_buff *skb;
void *ptr;
};
+
+ u8 qid;
};
struct mt76_rx_tid {
@@ -533,6 +553,10 @@ struct mt76_driver_ops {
void (*rx_poll_complete)(struct mt76_dev *dev, enum mt76_rxq_id q);
+ void (*rx_rro_ind_process)(struct mt76_dev *dev, void *data);
+ int (*rx_rro_add_msdu_page)(struct mt76_dev *dev, struct mt76_queue *q,
+ dma_addr_t p, void *data);
+
void (*sta_ps)(struct mt76_dev *dev, struct ieee80211_sta *sta,
bool ps);
@@ -910,6 +934,7 @@ struct mt76_dev {
struct mt76_queue q_rx[__MT_RXQ_MAX];
const struct mt76_queue_ops *queue_ops;
int tx_dma_idx[4];
+ enum mt76_hwrro_mode hwrro_mode;
struct mt76_worker tx_worker;
struct napi_struct tx_napi;
@@ -1214,6 +1239,7 @@ static inline int mt76_wed_dma_setup(struct mt76_dev *dev, struct mt76_queue *q,
#define mt76_tx_queue_skb(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb(&((dev)->mphy), __VA_ARGS__)
#define mt76_queue_rx_reset(dev, ...) (dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__)
#define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__)
+#define mt76_queue_rx_init(dev, ...) (dev)->mt76.queue_ops->rx_queue_init(&((dev)->mt76), __VA_ARGS__)
#define mt76_queue_rx_cleanup(dev, ...) (dev)->mt76.queue_ops->rx_cleanup(&((dev)->mt76), __VA_ARGS__)
#define mt76_queue_kick(dev, ...) (dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__)
#define mt76_queue_reset(dev, ...) (dev)->mt76.queue_ops->reset_q(&((dev)->mt76), __VA_ARGS__)
@@ -1243,6 +1269,7 @@ int mt76_register_device(struct mt76_dev *dev, bool vht,
struct ieee80211_rate *rates, int n_rates);
void mt76_unregister_device(struct mt76_dev *dev);
void mt76_free_device(struct mt76_dev *dev);
+void mt76_reset_device(struct mt76_dev *dev);
void mt76_unregister_phy(struct mt76_phy *phy);
struct mt76_phy *mt76_alloc_radio_phy(struct mt76_dev *dev, unsigned int size,
@@ -1267,7 +1294,7 @@ void mt76_seq_puts_array(struct seq_file *file, const char *str,
s8 *val, int len);
int mt76_eeprom_init(struct mt76_dev *dev, int len);
-void mt76_eeprom_override(struct mt76_phy *phy);
+int mt76_eeprom_override(struct mt76_phy *phy);
int mt76_get_of_data_from_mtd(struct mt76_dev *dev, void *eep, int offset, int len);
int mt76_get_of_data_from_nvmem(struct mt76_dev *dev, void *eep,
const char *cell_name, int len);
@@ -1616,6 +1643,7 @@ int mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef,
void mt76_scan_work(struct work_struct *work);
void mt76_abort_scan(struct mt76_dev *dev);
void mt76_roc_complete_work(struct work_struct *work);
+void mt76_roc_complete(struct mt76_phy *phy);
void mt76_abort_roc(struct mt76_phy *phy);
struct mt76_vif_link *mt76_get_vif_phy_link(struct mt76_phy *phy,
struct ieee80211_vif *vif);
@@ -1781,21 +1809,34 @@ static inline bool mt76_queue_is_wed_rro_ind(struct mt76_queue *q)
FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_RRO_Q_IND;
}
+static inline bool mt76_queue_is_wed_rro_rxdmad_c(struct mt76_queue *q)
+{
+ return mt76_queue_is_wed_rro(q) &&
+ FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_RRO_Q_RXDMAD_C;
+}
+
static inline bool mt76_queue_is_wed_rro_data(struct mt76_queue *q)
{
return mt76_queue_is_wed_rro(q) &&
- (FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_RRO_Q_DATA ||
- FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_RRO_Q_MSDU_PG);
+ FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_RRO_Q_DATA;
}
-static inline bool mt76_queue_is_wed_rx(struct mt76_queue *q)
+static inline bool mt76_queue_is_wed_rro_msdu_pg(struct mt76_queue *q)
{
- if (!(q->flags & MT_QFLAG_WED))
- return false;
+ return mt76_queue_is_wed_rro(q) &&
+ FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) ==
+ MT76_WED_RRO_Q_MSDU_PG;
+}
- return FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX ||
- mt76_queue_is_wed_rro_ind(q) || mt76_queue_is_wed_rro_data(q);
+static inline bool mt76_queue_is_wed_rx(struct mt76_queue *q)
+{
+ return (q->flags & MT_QFLAG_WED) &&
+ FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX;
+}
+static inline bool mt76_queue_is_emi(struct mt76_queue *q)
+{
+ return q->flags & MT_QFLAG_EMI_EN;
}
struct mt76_txwi_cache *
@@ -1871,6 +1912,7 @@ mt76_vif_init(struct ieee80211_vif *vif, struct mt76_vif_data *mvif)
}
void mt76_vif_cleanup(struct mt76_dev *dev, struct ieee80211_vif *vif);
+u16 mt76_select_links(struct ieee80211_vif *vif, int max_active_links);
static inline struct mt76_vif_link *
mt76_vif_link(struct mt76_dev *dev, struct ieee80211_vif *vif, int link_id)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c
index f5a6b03bc61d..88382b537a33 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c
@@ -182,7 +182,6 @@ int mt7603_eeprom_init(struct mt7603_dev *dev)
dev->mphy.antenna_mask = 1;
dev->mphy.chainmask = dev->mphy.antenna_mask;
- mt76_eeprom_override(&dev->mphy);
- return 0;
+ return mt76_eeprom_override(&dev->mphy);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/soc.c b/drivers/net/wireless/mediatek/mt76/mt7603/soc.c
index 08590aa68356..1dd372372048 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/soc.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/soc.c
@@ -48,7 +48,7 @@ mt76_wmac_probe(struct platform_device *pdev)
return 0;
error:
- ieee80211_free_hw(mt76_hw(dev));
+ mt76_free_device(mdev);
return ret;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
index ccedea7e8a50..d4bc7e11e772 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
@@ -351,8 +351,6 @@ int mt7615_eeprom_init(struct mt7615_dev *dev, u32 addr)
memcpy(dev->mphy.macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR,
ETH_ALEN);
- mt76_eeprom_override(&dev->mphy);
-
- return 0;
+ return mt76_eeprom_override(&dev->mphy);
}
EXPORT_SYMBOL_GPL(mt7615_eeprom_init);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/init.c b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
index aae80005a3c1..3e7af3e58736 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
@@ -570,7 +570,10 @@ int mt7615_register_ext_phy(struct mt7615_dev *dev)
ETH_ALEN);
mphy->macaddr[0] |= 2;
mphy->macaddr[0] ^= BIT(7);
- mt76_eeprom_override(mphy);
+
+ ret = mt76_eeprom_override(mphy);
+ if (ret)
+ return ret;
/* second phy can only handle 5 GHz */
mphy->cap.has_5ghz = true;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h
index 1013cad57a7f..c5eaedca11e0 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h
@@ -294,6 +294,13 @@ enum tx_frag_idx {
#define MT_TXP_BUF_LEN GENMASK(11, 0)
#define MT_TXP_DMA_ADDR_H GENMASK(15, 12)
+#define MT_TXP0_TOKEN_ID0 GENMASK(14, 0)
+#define MT_TXP0_TOKEN_ID0_VALID_MASK BIT(15)
+
+#define MT_TXP1_TID_ADDBA GENMASK(14, 12)
+#define MT_TXP3_ML0_MASK BIT(15)
+#define MT_TXP3_DMA_ADDR_H GENMASK(13, 12)
+
#define MT_TX_RATE_STBC BIT(14)
#define MT_TX_RATE_NSS GENMASK(13, 10)
#define MT_TX_RATE_MODE GENMASK(9, 6)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
index 16db0f2082d1..fc3e6728fcfb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
@@ -1662,6 +1662,31 @@ int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy,
return err;
}
+ if (enable && vif->bss_conf.bssid_indicator) {
+ struct {
+ struct {
+ u8 bss_idx;
+ u8 pad[3];
+ } __packed hdr;
+ struct bss_info_uni_mbssid mbssid;
+ } mbssid_req = {
+ .hdr = {
+ .bss_idx = mvif->idx,
+ },
+ .mbssid = {
+ .tag = cpu_to_le16(UNI_BSS_INFO_11V_MBSSID),
+ .len = cpu_to_le16(sizeof(struct bss_info_uni_mbssid)),
+ .max_indicator = vif->bss_conf.bssid_indicator,
+ .mbss_idx = vif->bss_conf.bssid_index,
+ },
+ };
+
+ err = mt76_mcu_send_msg(mdev, MCU_UNI_CMD(BSS_INFO_UPDATE),
+ &mbssid_req, sizeof(mbssid_req), true);
+ if (err < 0)
+ return err;
+ }
+
return mt76_connac_mcu_uni_set_chctx(phy, mvif, ctx);
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_uni_add_bss);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
index 4de45a56812d..d4506b8b46fa 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
@@ -332,7 +332,11 @@ int mt76x0_eeprom_init(struct mt76x02_dev *dev)
memcpy(dev->mphy.macaddr, (u8 *)dev->mt76.eeprom.data + MT_EE_MAC_ADDR,
ETH_ALEN);
- mt76_eeprom_override(&dev->mphy);
+
+ err = mt76_eeprom_override(&dev->mphy);
+ if (err)
+ return err;
+
mt76x02_mac_setaddr(dev, dev->mphy.macaddr);
mt76x0_set_chip_cap(dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
index 156b16c17b2b..221805deb42f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
@@ -499,7 +499,9 @@ int mt76x2_eeprom_init(struct mt76x02_dev *dev)
mt76x02_eeprom_parse_hw_cap(dev);
mt76x2_eeprom_get_macaddr(dev);
- mt76_eeprom_override(&dev->mphy);
+ ret = mt76_eeprom_override(&dev->mphy);
+ if (ret)
+ return ret;
dev->mphy.macaddr[0] &= ~BIT(1);
return 0;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
index 0c62272fe7d0..009ef713f437 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
@@ -624,13 +624,13 @@ int mt7915_dma_reset(struct mt7915_dev *dev, bool force)
}
for (i = 0; i < __MT_MCUQ_MAX; i++)
- mt76_queue_reset(dev, dev->mt76.q_mcu[i]);
+ mt76_queue_reset(dev, dev->mt76.q_mcu[i], true);
mt76_for_each_q_rx(&dev->mt76, i) {
if (mt76_queue_is_wed_tx_free(&dev->mt76.q_rx[i]))
continue;
- mt76_queue_reset(dev, &dev->mt76.q_rx[i]);
+ mt76_queue_reset(dev, &dev->mt76.q_rx[i], true);
}
mt76_tx_status_check(&dev->mt76, true);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
index c0f3402d30bb..38dfd5de365c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
@@ -284,9 +284,7 @@ int mt7915_eeprom_init(struct mt7915_dev *dev)
memcpy(dev->mphy.macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR,
ETH_ALEN);
- mt76_eeprom_override(&dev->mphy);
-
- return 0;
+ return mt76_eeprom_override(&dev->mphy);
}
int mt7915_eeprom_get_target_power(struct mt7915_dev *dev,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
index 31aec0f40232..73611c9d26e1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
@@ -50,9 +50,9 @@ enum mt7915_eeprom_field {
#define MT_EE_CAL_GROUP_SIZE_7975 (54 * MT_EE_CAL_UNIT + 16)
#define MT_EE_CAL_GROUP_SIZE_7976 (94 * MT_EE_CAL_UNIT + 16)
#define MT_EE_CAL_GROUP_SIZE_7916_6G (94 * MT_EE_CAL_UNIT + 16)
+#define MT_EE_CAL_GROUP_SIZE_7981 (144 * MT_EE_CAL_UNIT + 16)
#define MT_EE_CAL_DPD_SIZE_V1 (54 * MT_EE_CAL_UNIT)
#define MT_EE_CAL_DPD_SIZE_V2 (300 * MT_EE_CAL_UNIT)
-#define MT_EE_CAL_DPD_SIZE_V2_7981 (102 * MT_EE_CAL_UNIT) /* no 6g dpd data */
#define MT_EE_WIFI_CONF0_TX_PATH GENMASK(2, 0)
#define MT_EE_WIFI_CONF0_RX_PATH GENMASK(5, 3)
@@ -180,6 +180,8 @@ mt7915_get_cal_group_size(struct mt7915_dev *dev)
val = FIELD_GET(MT_EE_WIFI_CONF0_BAND_SEL, val);
return (val == MT_EE_V2_BAND_SEL_6GHZ) ? MT_EE_CAL_GROUP_SIZE_7916_6G :
MT_EE_CAL_GROUP_SIZE_7916;
+ } else if (is_mt7981(&dev->mt76)) {
+ return MT_EE_CAL_GROUP_SIZE_7981;
} else if (mt7915_check_adie(dev, false)) {
return MT_EE_CAL_GROUP_SIZE_7976;
} else {
@@ -192,8 +194,6 @@ mt7915_get_cal_dpd_size(struct mt7915_dev *dev)
{
if (is_mt7915(&dev->mt76))
return MT_EE_CAL_DPD_SIZE_V1;
- else if (is_mt7981(&dev->mt76))
- return MT_EE_CAL_DPD_SIZE_V2_7981;
else
return MT_EE_CAL_DPD_SIZE_V2;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
index 3e30ca5155d2..5ea8b46e092e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
@@ -702,7 +702,9 @@ mt7915_register_ext_phy(struct mt7915_dev *dev, struct mt7915_phy *phy)
mphy->macaddr[0] |= 2;
mphy->macaddr[0] ^= BIT(7);
}
- mt76_eeprom_override(mphy);
+ ret = mt76_eeprom_override(mphy);
+ if (ret)
+ return ret;
/* init wiphy according to mphy and phy */
mt7915_init_wiphy(phy);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
index 6639976afcee..1c0d310146d6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
@@ -1460,17 +1460,15 @@ mt7915_mac_full_reset(struct mt7915_dev *dev)
if (i == 10)
dev_err(dev->mt76.dev, "chip full reset failed\n");
- spin_lock_bh(&dev->mt76.sta_poll_lock);
- while (!list_empty(&dev->mt76.sta_poll_list))
- list_del_init(dev->mt76.sta_poll_list.next);
- spin_unlock_bh(&dev->mt76.sta_poll_lock);
-
- memset(dev->mt76.wcid_mask, 0, sizeof(dev->mt76.wcid_mask));
- dev->mt76.vif_mask = 0;
dev->phy.omac_mask = 0;
if (phy2)
phy2->omac_mask = 0;
+ mt76_reset_device(&dev->mt76);
+
+ INIT_LIST_HEAD(&dev->sta_rc_list);
+ INIT_LIST_HEAD(&dev->twt_list);
+
i = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7915_WTBL_STA);
dev->mt76.global_wcid.idx = i;
dev->recovery.hw_full_reset = false;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
index 2928e75b2397..c1fdd3c4f1ba 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
@@ -3052,30 +3052,15 @@ static int mt7915_dpd_freq_idx(struct mt7915_dev *dev, u16 freq, u8 bw)
/* 5G BW160 */
5250, 5570, 5815
};
- static const u16 freq_list_v2_7981[] = {
- /* 5G BW20 */
- 5180, 5200, 5220, 5240,
- 5260, 5280, 5300, 5320,
- 5500, 5520, 5540, 5560,
- 5580, 5600, 5620, 5640,
- 5660, 5680, 5700, 5720,
- 5745, 5765, 5785, 5805,
- 5825, 5845, 5865, 5885,
- /* 5G BW160 */
- 5250, 5570, 5815
- };
- const u16 *freq_list = freq_list_v1;
- int n_freqs = ARRAY_SIZE(freq_list_v1);
- int idx;
+ const u16 *freq_list;
+ int idx, n_freqs;
if (!is_mt7915(&dev->mt76)) {
- if (is_mt7981(&dev->mt76)) {
- freq_list = freq_list_v2_7981;
- n_freqs = ARRAY_SIZE(freq_list_v2_7981);
- } else {
- freq_list = freq_list_v2;
- n_freqs = ARRAY_SIZE(freq_list_v2);
- }
+ freq_list = freq_list_v2;
+ n_freqs = ARRAY_SIZE(freq_list_v2);
+ } else {
+ freq_list = freq_list_v1;
+ n_freqs = ARRAY_SIZE(freq_list_v1);
}
if (freq < 4000) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
index 4a82f8e4c118..36488aa6cc20 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
@@ -664,8 +664,8 @@ int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr,
MT_RXQ_WED_RING_BASE;
wed->wlan.wpdma_rx_glo = pci_resource_start(pci_dev, 0) +
MT_WPDMA_GLO_CFG;
- wed->wlan.wpdma_rx = pci_resource_start(pci_dev, 0) +
- MT_RXQ_WED_DATA_RING_BASE;
+ wed->wlan.wpdma_rx[0] = pci_resource_start(pci_dev, 0) +
+ MT_RXQ_WED_DATA_RING_BASE;
} else {
struct platform_device *plat_dev = pdev_ptr;
struct resource *res;
@@ -687,7 +687,7 @@ int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr,
wed->wlan.wpdma_tx = res->start + MT_TXQ_WED_RING_BASE;
wed->wlan.wpdma_txfree = res->start + MT_RXQ_WED_RING_BASE;
wed->wlan.wpdma_rx_glo = res->start + MT_WPDMA_GLO_CFG;
- wed->wlan.wpdma_rx = res->start + MT_RXQ_WED_DATA_RING_BASE;
+ wed->wlan.wpdma_rx[0] = res->start + MT_RXQ_WED_DATA_RING_BASE;
}
wed->wlan.nbuf = MT7915_HW_TOKEN_SIZE;
wed->wlan.tx_tbit[0] = is_mt7915(&dev->mt76) ? 4 : 30;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/init.c b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
index 14e17dc90256..b9098a7331b1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
@@ -189,7 +189,9 @@ static int __mt7921_init_hardware(struct mt792x_dev *dev)
if (ret)
goto out;
- mt76_eeprom_override(&dev->mphy);
+ ret = mt76_eeprom_override(&dev->mphy);
+ if (ret)
+ goto out;
ret = mt7921_mcu_set_eeprom(dev);
if (ret)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
index 40954e64c7fc..67383c41a319 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
@@ -135,6 +135,8 @@ mt7921_init_he_caps(struct mt792x_phy *phy, enum nl80211_band band,
if (is_mt7922(phy->mt76->dev)) {
he_cap_elem->phy_cap_info[0] |=
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G;
+ he_cap_elem->phy_cap_info[4] |=
+ IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4;
he_cap_elem->phy_cap_info[8] |=
IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU;
@@ -1459,11 +1461,8 @@ static int mt7921_pre_channel_switch(struct ieee80211_hw *hw,
if (vif->type != NL80211_IFTYPE_STATION || !vif->cfg.assoc)
return -EOPNOTSUPP;
- /* Avoid beacon loss due to the CAC(Channel Availability Check) time
- * of the AP.
- */
if (!cfg80211_chandef_usable(hw->wiphy, &chsw->chandef,
- IEEE80211_CHAN_RADAR))
+ IEEE80211_CHAN_DISABLED))
return -EOPNOTSUPP;
return 0;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
index fe9751851ff7..100bdba32ba5 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
@@ -21,6 +21,9 @@ static const struct usb_device_id mt7921u_device_table[] = {
/* Netgear, Inc. [A8000,AXE3000] */
{ USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9060, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)MT7921_FIRMWARE_WM },
+ /* Netgear, Inc. A7500 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9065, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)MT7921_FIRMWARE_WM },
/* TP-Link TXE50UH */
{ USB_DEVICE_AND_INTERFACE_INFO(0x35bc, 0x0107, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)MT7921_FIRMWARE_WM },
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/init.c b/drivers/net/wireless/mediatek/mt76/mt7925/init.c
index 4249bad83c93..d7d5afe365ed 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/init.c
@@ -249,7 +249,9 @@ static int __mt7925_init_hardware(struct mt792x_dev *dev)
if (ret)
goto out;
- mt76_eeprom_override(&dev->mphy);
+ ret = mt76_eeprom_override(&dev->mphy);
+ if (ret)
+ goto out;
ret = mt7925_mcu_set_eeprom(dev);
if (ret)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mac.c b/drivers/net/wireless/mediatek/mt76/mt7925/mac.c
index 75823c9fd3a1..1e44e96f034e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/mac.c
@@ -1300,7 +1300,6 @@ void mt7925_mac_reset_work(struct work_struct *work)
cancel_delayed_work_sync(&dev->mphy.mac_work);
cancel_delayed_work_sync(&pm->ps_work);
cancel_work_sync(&pm->wake_work);
- dev->sar_inited = false;
for (i = 0; i < 10; i++) {
mutex_lock(&dev->mt76.mutex);
@@ -1329,6 +1328,10 @@ void mt7925_mac_reset_work(struct work_struct *work)
IEEE80211_IFACE_ITER_RESUME_ALL,
mt7925_vif_connect_iter, NULL);
mt76_connac_power_save_sched(&dev->mt76.phy, pm);
+
+ mt792x_mutex_acquire(dev);
+ mt7925_mcu_set_clc(dev, "00", ENVIRON_INDOOR);
+ mt792x_mutex_release(dev);
}
void mt7925_coredump_work(struct work_struct *work)
@@ -1449,7 +1452,7 @@ void mt7925_usb_sdio_tx_complete_skb(struct mt76_dev *mdev,
sta = wcid_to_sta(wcid);
if (sta && likely(e->skb->protocol != cpu_to_be16(ETH_P_PAE)))
- mt76_connac2_tx_check_aggr(sta, txwi);
+ mt7925_tx_check_aggr(sta, e->skb, wcid);
skb_pull(e->skb, headroom);
mt76_tx_complete_skb(mdev, e->wcid, e->skb);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/main.c b/drivers/net/wireless/mediatek/mt76/mt7925/main.c
index a8d25b7d47d0..ac3d485a2f78 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/main.c
@@ -240,6 +240,7 @@ int mt7925_init_mlo_caps(struct mt792x_phy *phy)
{
struct wiphy *wiphy = phy->mt76->hw->wiphy;
static const u8 ext_capa_sta[] = {
+ [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT,
[7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
};
static struct wiphy_iftype_ext_capab ext_capab[] = {
@@ -310,7 +311,6 @@ void mt7925_set_stream_he_eht_caps(struct mt792x_phy *phy)
int __mt7925_start(struct mt792x_phy *phy)
{
struct mt76_phy *mphy = phy->mt76;
- struct mt792x_dev *dev = phy->dev;
int err;
err = mt7925_mcu_set_channel_domain(mphy);
@@ -321,13 +321,6 @@ int __mt7925_start(struct mt792x_phy *phy)
if (err)
return err;
- if (!dev->sar_inited) {
- err = mt7925_set_tx_sar_pwr(mphy->hw, NULL);
- if (err)
- return err;
- dev->sar_inited = true;
- }
-
mt792x_mac_reset_counters(phy);
set_bit(MT76_STATE_RUNNING, &mphy->state);
@@ -987,56 +980,6 @@ int mt7925_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
}
EXPORT_SYMBOL_GPL(mt7925_mac_sta_add);
-static u16
-mt7925_mac_select_links(struct mt76_dev *mdev, struct ieee80211_vif *vif)
-{
- unsigned long usable_links = ieee80211_vif_usable_links(vif);
- struct {
- u8 link_id;
- enum nl80211_band band;
- } data[IEEE80211_MLD_MAX_NUM_LINKS];
- u8 link_id, i, j, n_data = 0;
- u16 sel_links = 0;
-
- if (!ieee80211_vif_is_mld(vif))
- return 0;
-
- if (vif->active_links == usable_links)
- return vif->active_links;
-
- rcu_read_lock();
- for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
- struct ieee80211_bss_conf *link_conf =
- rcu_dereference(vif->link_conf[link_id]);
-
- if (WARN_ON_ONCE(!link_conf))
- continue;
-
- data[n_data].link_id = link_id;
- data[n_data].band = link_conf->chanreq.oper.chan->band;
- n_data++;
- }
- rcu_read_unlock();
-
- for (i = 0; i < n_data; i++) {
- if (!(BIT(data[i].link_id) & vif->active_links))
- continue;
-
- sel_links = BIT(data[i].link_id);
-
- for (j = 0; j < n_data; j++) {
- if (data[i].band != data[j].band) {
- sel_links |= BIT(data[j].link_id);
- break;
- }
- }
-
- break;
- }
-
- return sel_links;
-}
-
static void
mt7925_mac_set_links(struct mt76_dev *mdev, struct ieee80211_vif *vif)
{
@@ -1047,7 +990,7 @@ mt7925_mac_set_links(struct mt76_dev *mdev, struct ieee80211_vif *vif)
struct cfg80211_chan_def *chandef = &link_conf->chanreq.oper;
enum nl80211_band band = chandef->chan->band, secondary_band;
- u16 sel_links = mt7925_mac_select_links(mdev, vif);
+ u16 sel_links = mt76_select_links(vif, 2);
u8 secondary_link_id = __ffs(~BIT(mvif->deflink_id) & sel_links);
if (!ieee80211_vif_is_mld(vif) || hweight16(sel_links) < 2)
@@ -1191,6 +1134,9 @@ mt7925_mac_sta_remove_links(struct mt792x_dev *dev, struct ieee80211_vif *vif,
struct mt792x_bss_conf *mconf;
struct mt792x_link_sta *mlink;
+ if (vif->type == NL80211_IFTYPE_AP)
+ break;
+
link_sta = mt792x_sta_to_link_sta(vif, sta, link_id);
if (!link_sta)
continue;
@@ -1728,13 +1674,7 @@ static int mt7925_set_sar_specs(struct ieee80211_hw *hw,
int err;
mt792x_mutex_acquire(dev);
- err = mt7925_mcu_set_clc(dev, dev->mt76.alpha2,
- dev->country_ie_env);
- if (err < 0)
- goto out;
-
err = mt7925_set_tx_sar_pwr(hw, sar);
-out:
mt792x_mutex_release(dev);
return err;
@@ -2069,8 +2009,10 @@ mt7925_change_vif_links(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
GFP_KERNEL);
mlink = devm_kzalloc(dev->mt76.dev, sizeof(*mlink),
GFP_KERNEL);
- if (!mconf || !mlink)
+ if (!mconf || !mlink) {
+ mt792x_mutex_release(dev);
return -ENOMEM;
+ }
}
mconfs[link_id] = mconf;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
index 300c863f0e3e..8eda407e4135 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
@@ -759,7 +759,6 @@ static int mt7925_load_clc(struct mt792x_dev *dev, const char *fw_name)
}
}
- ret = mt7925_mcu_set_clc(dev, "00", ENVIRON_INDOOR);
out:
release_firmware(fw);
@@ -1834,13 +1833,13 @@ mt7925_mcu_sta_eht_mld_tlv(struct sk_buff *skb,
struct tlv *tlv;
u16 eml_cap;
+ if (!ieee80211_vif_is_mld(vif))
+ return;
+
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_EHT_MLD, sizeof(*eht_mld));
eht_mld = (struct sta_rec_eht_mld *)tlv;
eht_mld->mld_type = 0xff;
- if (!ieee80211_vif_is_mld(vif))
- return;
-
ext_capa = cfg80211_get_iftype_ext_capa(wiphy,
ieee80211_vif_type_p2p(vif));
if (!ext_capa)
@@ -1912,6 +1911,7 @@ mt7925_mcu_sta_cmd(struct mt76_phy *phy,
struct mt76_dev *dev = phy->dev;
struct mt792x_bss_conf *mconf;
struct sk_buff *skb;
+ int conn_state;
mconf = mt792x_vif_to_link(mvif, info->wcid->link_id);
@@ -1920,10 +1920,13 @@ mt7925_mcu_sta_cmd(struct mt76_phy *phy,
if (IS_ERR(skb))
return PTR_ERR(skb);
+ conn_state = info->enable ? CONN_STATE_PORT_SECURE :
+ CONN_STATE_DISCONNECT;
+
if (info->enable && info->link_sta) {
mt76_connac_mcu_sta_basic_tlv(dev, skb, info->link_conf,
info->link_sta,
- info->enable, info->newly);
+ conn_state, info->newly);
mt7925_mcu_sta_phy_tlv(skb, info->vif, info->link_sta);
mt7925_mcu_sta_ht_tlv(skb, info->link_sta);
mt7925_mcu_sta_vht_tlv(skb, info->link_sta);
@@ -2618,6 +2621,25 @@ mt7925_mcu_bss_qos_tlv(struct sk_buff *skb, struct ieee80211_bss_conf *link_conf
}
static void
+mt7925_mcu_bss_mbssid_tlv(struct sk_buff *skb, struct ieee80211_bss_conf *link_conf,
+ bool enable)
+{
+ struct bss_info_uni_mbssid *mbssid;
+ struct tlv *tlv;
+
+ if (!enable && !link_conf->bssid_indicator)
+ return;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, UNI_BSS_INFO_11V_MBSSID,
+ sizeof(*mbssid));
+
+ mbssid = (struct bss_info_uni_mbssid *)tlv;
+ mbssid->max_indicator = link_conf->bssid_indicator;
+ mbssid->mbss_idx = link_conf->bssid_index;
+ mbssid->tx_bss_omac_idx = 0;
+}
+
+static void
mt7925_mcu_bss_he_tlv(struct sk_buff *skb, struct ieee80211_bss_conf *link_conf,
struct mt792x_phy *phy)
{
@@ -2783,8 +2805,10 @@ int mt7925_mcu_add_bss_info(struct mt792x_phy *phy,
mt7925_mcu_bss_color_tlv(skb, link_conf, enable);
}
- if (enable)
+ if (enable) {
mt7925_mcu_bss_rlm_tlv(skb, phy->mt76, link_conf, ctx);
+ mt7925_mcu_bss_mbssid_tlv(skb, link_conf, enable);
+ }
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_UNI_CMD(BSS_INFO_UPDATE), true);
@@ -3699,6 +3723,8 @@ out:
int mt7925_mcu_set_rate_txpower(struct mt76_phy *phy)
{
+ struct mt76_dev *mdev = phy->dev;
+ struct mt792x_dev *dev = mt792x_hw_dev(mdev->hw);
int err;
if (phy->cap.has_2ghz) {
@@ -3715,7 +3741,7 @@ int mt7925_mcu_set_rate_txpower(struct mt76_phy *phy)
return err;
}
- if (phy->cap.has_6ghz) {
+ if (phy->cap.has_6ghz && dev->phy.clc_chan_conf) {
err = mt7925_mcu_rate_txpower_band(phy,
NL80211_BAND_6GHZ);
if (err < 0)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
index 89dc30f7c6b7..8eb1fe1082d1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
@@ -529,7 +529,7 @@ restore_suspend:
return err;
}
-static int mt7925_pci_resume(struct device *device)
+static int _mt7925_pci_resume(struct device *device, bool restore)
{
struct pci_dev *pdev = to_pci_dev(device);
struct mt76_dev *mdev = pci_get_drvdata(pdev);
@@ -569,6 +569,9 @@ static int mt7925_pci_resume(struct device *device)
napi_schedule(&mdev->tx_napi);
local_bh_enable();
+ if (restore)
+ goto failed;
+
mt76_connac_mcu_set_hif_suspend(mdev, false, false);
ret = wait_event_timeout(dev->wait,
dev->hif_resumed, 3 * HZ);
@@ -585,7 +588,7 @@ static int mt7925_pci_resume(struct device *device)
failed:
pm->suspended = false;
- if (err < 0)
+ if (err < 0 || restore)
mt792x_reset(&dev->mt76);
return err;
@@ -596,7 +599,24 @@ static void mt7925_pci_shutdown(struct pci_dev *pdev)
mt7925_pci_remove(pdev);
}
-static DEFINE_SIMPLE_DEV_PM_OPS(mt7925_pm_ops, mt7925_pci_suspend, mt7925_pci_resume);
+static int mt7925_pci_resume(struct device *device)
+{
+ return _mt7925_pci_resume(device, false);
+}
+
+static int mt7925_pci_restore(struct device *device)
+{
+ return _mt7925_pci_resume(device, true);
+}
+
+static const struct dev_pm_ops mt7925_pm_ops = {
+ .suspend = pm_sleep_ptr(mt7925_pci_suspend),
+ .resume = pm_sleep_ptr(mt7925_pci_resume),
+ .freeze = pm_sleep_ptr(mt7925_pci_suspend),
+ .thaw = pm_sleep_ptr(mt7925_pci_resume),
+ .poweroff = pm_sleep_ptr(mt7925_pci_suspend),
+ .restore = pm_sleep_ptr(mt7925_pci_restore),
+};
static struct pci_driver mt7925_pci_driver = {
.name = KBUILD_MODNAME,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/usb.c b/drivers/net/wireless/mediatek/mt76/mt7925/usb.c
index 4dfbc1b6cfdd..bf040f34e4b9 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/usb.c
@@ -12,6 +12,9 @@
static const struct usb_device_id mt7925u_device_table[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x0e8d, 0x7925, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)MT7925_FIRMWARE_WM },
+ /* Netgear, Inc. A9000 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9072, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)MT7925_FIRMWARE_WM },
{ },
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x.h b/drivers/net/wireless/mediatek/mt76/mt792x.h
index 443d397d9961..f2c8b9e4aa0f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x.h
+++ b/drivers/net/wireless/mediatek/mt76/mt792x.h
@@ -234,7 +234,6 @@ struct mt792x_dev {
bool aspm_supported:1;
bool hif_idle:1;
bool hif_resumed:1;
- bool sar_inited:1;
bool regd_change:1;
wait_queue_head_t wait;
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_core.c b/drivers/net/wireless/mediatek/mt76/mt792x_core.c
index e3a703398b30..c0e56541a954 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x_core.c
+++ b/drivers/net/wireless/mediatek/mt76/mt792x_core.c
@@ -689,8 +689,12 @@ int mt792x_init_wiphy(struct ieee80211_hw *hw)
ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW);
ieee80211_hw_set(hw, CONNECTION_MONITOR);
ieee80211_hw_set(hw, NO_VIRTUAL_MONITOR);
- if (is_mt7921(&dev->mt76))
+ ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID);
+ ieee80211_hw_set(hw, SUPPORTS_ONLY_HE_MULTI_BSSID);
+
+ if (is_mt7921(&dev->mt76)) {
ieee80211_hw_set(hw, CHANCTX_STA_CSA);
+ }
if (dev->pm.enable)
ieee80211_hw_set(hw, CONNECTION_MONITOR);
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_dma.c b/drivers/net/wireless/mediatek/mt76/mt792x_dma.c
index 6f9db782338e..69217ce91130 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x_dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt792x_dma.c
@@ -181,13 +181,13 @@ mt792x_dma_reset(struct mt792x_dev *dev, bool force)
/* reset hw queues */
for (i = 0; i < __MT_TXQ_MAX; i++)
- mt76_queue_reset(dev, dev->mphy.q_tx[i]);
+ mt76_queue_reset(dev, dev->mphy.q_tx[i], true);
for (i = 0; i < __MT_MCUQ_MAX; i++)
- mt76_queue_reset(dev, dev->mt76.q_mcu[i]);
+ mt76_queue_reset(dev, dev->mt76.q_mcu[i], true);
mt76_for_each_q_rx(&dev->mt76, i)
- mt76_queue_reset(dev, &dev->mt76.q_rx[i]);
+ mt76_queue_reset(dev, &dev->mt76.q_rx[i], true);
mt76_tx_status_check(&dev->mt76, true);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/dma.c b/drivers/net/wireless/mediatek/mt76/mt7996/dma.c
index c8bef0b2a144..659015f93d32 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/dma.c
@@ -17,7 +17,7 @@ int mt7996_init_tx_queues(struct mt7996_phy *phy, int idx, int n_desc,
ring_base += MT_TXQ_ID(0) * MT_RING_SIZE;
idx -= MT_TXQ_ID(0);
- if (phy->mt76->band_idx == MT_BAND2)
+ if (wed == &dev->mt76.mmio.wed_hif2)
flags = MT_WED_Q_TX(0);
else
flags = MT_WED_Q_TX(idx);
@@ -83,36 +83,74 @@ static void mt7996_dma_config(struct mt7996_dev *dev)
break;
}
- if (dev->has_rro) {
+ if (mt7996_has_hwrro(dev)) {
/* band0 */
RXQ_CONFIG(MT_RXQ_RRO_BAND0, WFDMA0, MT_INT_RX_DONE_RRO_BAND0,
MT7996_RXQ_RRO_BAND0);
- RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND0, WFDMA0, MT_INT_RX_DONE_MSDU_PG_BAND0,
- MT7996_RXQ_MSDU_PG_BAND0);
- RXQ_CONFIG(MT_RXQ_TXFREE_BAND0, WFDMA0, MT_INT_RX_TXFREE_MAIN,
- MT7996_RXQ_TXFREE0);
- /* band1 */
- RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND1, WFDMA0, MT_INT_RX_DONE_MSDU_PG_BAND1,
- MT7996_RXQ_MSDU_PG_BAND1);
- /* band2 */
- RXQ_CONFIG(MT_RXQ_RRO_BAND2, WFDMA0, MT_INT_RX_DONE_RRO_BAND2,
- MT7996_RXQ_RRO_BAND2);
- RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND2, WFDMA0, MT_INT_RX_DONE_MSDU_PG_BAND2,
- MT7996_RXQ_MSDU_PG_BAND2);
- RXQ_CONFIG(MT_RXQ_TXFREE_BAND2, WFDMA0, MT_INT_RX_TXFREE_TRI,
- MT7996_RXQ_TXFREE2);
-
- RXQ_CONFIG(MT_RXQ_RRO_IND, WFDMA0, MT_INT_RX_DONE_RRO_IND,
- MT7996_RXQ_RRO_IND);
+ if (dev->mt76.hwrro_mode == MT76_HWRRO_V3)
+ RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND0, WFDMA0,
+ MT_INT_RX_DONE_MSDU_PG_BAND0,
+ MT7996_RXQ_MSDU_PG_BAND0);
+ if (is_mt7996(&dev->mt76)) {
+ RXQ_CONFIG(MT_RXQ_TXFREE_BAND0, WFDMA0,
+ MT_INT_RX_TXFREE_MAIN, MT7996_RXQ_TXFREE0);
+ /* band1 */
+ RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND1, WFDMA0,
+ MT_INT_RX_DONE_MSDU_PG_BAND1,
+ MT7996_RXQ_MSDU_PG_BAND1);
+ /* band2 */
+ RXQ_CONFIG(MT_RXQ_RRO_BAND2, WFDMA0,
+ MT_INT_RX_DONE_RRO_BAND2,
+ MT7996_RXQ_RRO_BAND2);
+ RXQ_CONFIG(MT_RXQ_MSDU_PAGE_BAND2, WFDMA0,
+ MT_INT_RX_DONE_MSDU_PG_BAND2,
+ MT7996_RXQ_MSDU_PG_BAND2);
+ RXQ_CONFIG(MT_RXQ_TXFREE_BAND2, WFDMA0,
+ MT_INT_RX_TXFREE_TRI, MT7996_RXQ_TXFREE2);
+ } else {
+ RXQ_CONFIG(MT_RXQ_RRO_BAND1, WFDMA0,
+ MT_INT_RX_DONE_RRO_BAND1,
+ MT7996_RXQ_RRO_BAND1);
+ }
+
+ if (dev->mt76.hwrro_mode == MT76_HWRRO_V3)
+ RXQ_CONFIG(MT_RXQ_RRO_IND, WFDMA0,
+ MT_INT_RX_DONE_RRO_IND,
+ MT7996_RXQ_RRO_IND);
+ else
+ RXQ_CONFIG(MT_RXQ_RRO_RXDMAD_C, WFDMA0,
+ MT_INT_RX_DONE_RRO_RXDMAD_C,
+ MT7996_RXQ_RRO_RXDMAD_C);
}
/* data tx queue */
- TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0, MT7996_TXQ_BAND0);
if (is_mt7996(&dev->mt76)) {
- TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1, MT7996_TXQ_BAND1);
- TXQ_CONFIG(2, WFDMA0, MT_INT_TX_DONE_BAND2, MT7996_TXQ_BAND2);
+ TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0, MT7996_TXQ_BAND0);
+ if (dev->hif2) {
+ /* default bn1:ring19 bn2:ring21 */
+ TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1,
+ MT7996_TXQ_BAND1);
+ TXQ_CONFIG(2, WFDMA0, MT_INT_TX_DONE_BAND2,
+ MT7996_TXQ_BAND2);
+ } else {
+ /* single pcie bn0/1:ring18 bn2:ring19 */
+ TXQ_CONFIG(2, WFDMA0, MT_INT_TX_DONE_BAND1,
+ MT7996_TXQ_BAND1);
+ }
} else {
- TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1, MT7996_TXQ_BAND1);
+ if (dev->hif2) {
+ /* bn0:ring18 bn1:ring21 */
+ TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0,
+ MT7996_TXQ_BAND0);
+ TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND2,
+ MT7996_TXQ_BAND2);
+ } else {
+ /* single pcie bn0:ring18 bn1:ring19 */
+ TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0,
+ MT7996_TXQ_BAND0);
+ TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1,
+ MT7996_TXQ_BAND1);
+ }
}
/* mcu tx queue */
@@ -166,11 +204,12 @@ static void __mt7996_dma_prefetch(struct mt7996_dev *dev, u32 ofs)
/* Rx TxFreeDone From MAC Rings */
val = is_mt7996(&dev->mt76) ? 4 : 8;
- if (is_mt7990(&dev->mt76) || (is_mt7996(&dev->mt76) && dev->has_rro))
+ if ((is_mt7996(&dev->mt76) && mt7996_has_hwrro(dev)) ||
+ is_mt7990(&dev->mt76))
mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_TXFREE_BAND0) + ofs, PREFETCH(val));
if (is_mt7990(&dev->mt76) && dev->hif2)
mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_TXFREE_BAND1) + ofs, PREFETCH(val));
- else if (is_mt7996(&dev->mt76) && dev->has_rro)
+ else if (is_mt7996(&dev->mt76) && mt7996_has_hwrro(dev))
mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_TXFREE_BAND2) + ofs, PREFETCH(val));
/* Rx Data Rings */
@@ -179,7 +218,7 @@ static void __mt7996_dma_prefetch(struct mt7996_dev *dev, u32 ofs)
mt76_wr(dev, MT_RXQ_EXT_CTRL(queue) + ofs, PREFETCH(0x10));
/* Rx RRO Rings */
- if (dev->has_rro) {
+ if (mt7996_has_hwrro(dev)) {
mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_RRO_BAND0) + ofs, PREFETCH(0x10));
queue = is_mt7996(&dev->mt76) ? MT_RXQ_RRO_BAND2 : MT_RXQ_RRO_BAND1;
mt76_wr(dev, MT_RXQ_EXT_CTRL(queue) + ofs, PREFETCH(0x10));
@@ -288,11 +327,14 @@ void mt7996_dma_start(struct mt7996_dev *dev, bool reset, bool wed_reset)
if (mt7996_band_valid(dev, MT_BAND0))
irq_mask |= MT_INT_BAND0_RX_DONE;
- if (mt7996_band_valid(dev, MT_BAND1))
+ if (mt7996_band_valid(dev, MT_BAND1)) {
irq_mask |= MT_INT_BAND1_RX_DONE;
+ if (is_mt7992(&dev->mt76) && dev->hif2)
+ irq_mask |= MT_INT_RX_TXFREE_BAND1_EXT;
+ }
if (mt7996_band_valid(dev, MT_BAND2))
- irq_mask |= MT_INT_BAND2_RX_DONE;
+ irq_mask |= MT_INT_BAND2_RX_DONE | MT_INT_TX_RX_DONE_EXT;
if (mtk_wed_device_active(wed) && wed_reset) {
u32 wed_irq_mask = irq_mask;
@@ -378,13 +420,48 @@ static void mt7996_dma_enable(struct mt7996_dev *dev, bool reset)
WF_WFDMA0_GLO_CFG_EXT1_TX_FCTRL_MODE);
mt76_set(dev, MT_WFDMA_HOST_CONFIG,
- MT_WFDMA_HOST_CONFIG_PDMA_BAND |
- MT_WFDMA_HOST_CONFIG_BAND2_PCIE1);
+ MT_WFDMA_HOST_CONFIG_PDMA_BAND);
+
+ mt76_clear(dev, MT_WFDMA_HOST_CONFIG,
+ MT_WFDMA_HOST_CONFIG_BAND0_PCIE1 |
+ MT_WFDMA_HOST_CONFIG_BAND1_PCIE1 |
+ MT_WFDMA_HOST_CONFIG_BAND2_PCIE1);
+
+ if (is_mt7996(&dev->mt76))
+ mt76_set(dev, MT_WFDMA_HOST_CONFIG,
+ MT_WFDMA_HOST_CONFIG_BAND2_PCIE1);
+ else
+ mt76_set(dev, MT_WFDMA_HOST_CONFIG,
+ MT_WFDMA_HOST_CONFIG_BAND1_PCIE1);
/* AXI read outstanding number */
mt76_rmw(dev, MT_WFDMA_AXI_R2A_CTRL,
MT_WFDMA_AXI_R2A_CTRL_OUTSTAND_MASK, 0x14);
+ if (dev->hif2->speed < PCIE_SPEED_5_0GT ||
+ (dev->hif2->speed == PCIE_SPEED_5_0GT &&
+ dev->hif2->width < PCIE_LNK_X2)) {
+ mt76_rmw(dev, WF_WFDMA0_GLO_CFG_EXT0 + hif1_ofs,
+ WF_WFDMA0_GLO_CFG_EXT0_OUTSTAND_MASK,
+ FIELD_PREP(WF_WFDMA0_GLO_CFG_EXT0_OUTSTAND_MASK,
+ 0x1));
+ mt76_rmw(dev, MT_WFDMA_AXI_R2A_CTRL2,
+ MT_WFDMA_AXI_R2A_CTRL2_OUTSTAND_MASK,
+ FIELD_PREP(MT_WFDMA_AXI_R2A_CTRL2_OUTSTAND_MASK,
+ 0x1));
+ } else if (dev->hif2->speed < PCIE_SPEED_8_0GT ||
+ (dev->hif2->speed == PCIE_SPEED_8_0GT &&
+ dev->hif2->width < PCIE_LNK_X2)) {
+ mt76_rmw(dev, WF_WFDMA0_GLO_CFG_EXT0 + hif1_ofs,
+ WF_WFDMA0_GLO_CFG_EXT0_OUTSTAND_MASK,
+ FIELD_PREP(WF_WFDMA0_GLO_CFG_EXT0_OUTSTAND_MASK,
+ 0x2));
+ mt76_rmw(dev, MT_WFDMA_AXI_R2A_CTRL2,
+ MT_WFDMA_AXI_R2A_CTRL2_OUTSTAND_MASK,
+ FIELD_PREP(MT_WFDMA_AXI_R2A_CTRL2_OUTSTAND_MASK,
+ 0x2));
+ }
+
/* WFDMA rx threshold */
mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_45_TH + hif1_ofs, 0xc000c);
mt76_wr(dev, MT_WFDMA0_PAUSE_RX_Q_67_TH + hif1_ofs, 0x10008);
@@ -397,27 +474,58 @@ static void mt7996_dma_enable(struct mt7996_dev *dev, bool reset)
* so, redirect pcie0 rx ring3 interrupt to pcie1
*/
if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
- dev->has_rro)
+ mt7996_has_hwrro(dev)) {
+ u32 intr = is_mt7996(&dev->mt76) ?
+ MT_WFDMA0_RX_INT_SEL_RING6 :
+ MT_WFDMA0_RX_INT_SEL_RING9 |
+ MT_WFDMA0_RX_INT_SEL_RING5;
+
mt76_set(dev, MT_WFDMA0_RX_INT_PCIE_SEL + hif1_ofs,
- MT_WFDMA0_RX_INT_SEL_RING6);
- else
+ intr);
+ } else {
mt76_set(dev, MT_WFDMA0_RX_INT_PCIE_SEL,
MT_WFDMA0_RX_INT_SEL_RING3);
+ }
}
mt7996_dma_start(dev, reset, true);
}
-#ifdef CONFIG_NET_MEDIATEK_SOC_WED
int mt7996_dma_rro_init(struct mt7996_dev *dev)
{
struct mt76_dev *mdev = &dev->mt76;
u32 irq_mask;
int ret;
+ if (dev->mt76.hwrro_mode == MT76_HWRRO_V3_1) {
+ /* rxdmad_c */
+ mdev->q_rx[MT_RXQ_RRO_RXDMAD_C].flags = MT_WED_RRO_Q_RXDMAD_C;
+ if (mtk_wed_device_active(&mdev->mmio.wed))
+ mdev->q_rx[MT_RXQ_RRO_RXDMAD_C].wed = &mdev->mmio.wed;
+ else
+ mdev->q_rx[MT_RXQ_RRO_RXDMAD_C].flags |= MT_QFLAG_EMI_EN;
+ ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_RRO_RXDMAD_C],
+ MT_RXQ_ID(MT_RXQ_RRO_RXDMAD_C),
+ MT7996_RX_RING_SIZE,
+ MT7996_RX_BUF_SIZE,
+ MT_RXQ_RRO_AP_RING_BASE);
+ if (ret)
+ return ret;
+
+ /* We need to set cpu idx pointer before resetting the EMI
+ * queues.
+ */
+ mdev->q_rx[MT_RXQ_RRO_RXDMAD_C].emi_cpu_idx =
+ &dev->wed_rro.emi_rings_cpu.ptr->ring[0].idx;
+ mt76_queue_reset(dev, &mdev->q_rx[MT_RXQ_RRO_RXDMAD_C], true);
+ goto start_hw_rro;
+ }
+
/* ind cmd */
mdev->q_rx[MT_RXQ_RRO_IND].flags = MT_WED_RRO_Q_IND;
- mdev->q_rx[MT_RXQ_RRO_IND].wed = &mdev->mmio.wed;
+ if (mtk_wed_device_active(&mdev->mmio.wed) &&
+ mtk_wed_get_rx_capa(&mdev->mmio.wed))
+ mdev->q_rx[MT_RXQ_RRO_IND].wed = &mdev->mmio.wed;
ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_RRO_IND],
MT_RXQ_ID(MT_RXQ_RRO_IND),
MT7996_RX_RING_SIZE,
@@ -428,7 +536,9 @@ int mt7996_dma_rro_init(struct mt7996_dev *dev)
/* rx msdu page queue for band0 */
mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND0].flags =
MT_WED_RRO_Q_MSDU_PG(0) | MT_QFLAG_WED_RRO_EN;
- mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND0].wed = &mdev->mmio.wed;
+ if (mtk_wed_device_active(&mdev->mmio.wed) &&
+ mtk_wed_get_rx_capa(&mdev->mmio.wed))
+ mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND0].wed = &mdev->mmio.wed;
ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND0],
MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND0),
MT7996_RX_RING_SIZE,
@@ -437,11 +547,13 @@ int mt7996_dma_rro_init(struct mt7996_dev *dev)
if (ret)
return ret;
- if (mt7996_band_valid(dev, MT_BAND1)) {
+ if (mt7996_band_valid(dev, MT_BAND1) && is_mt7996(&dev->mt76)) {
/* rx msdu page queue for band1 */
mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND1].flags =
MT_WED_RRO_Q_MSDU_PG(1) | MT_QFLAG_WED_RRO_EN;
- mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND1].wed = &mdev->mmio.wed;
+ if (mtk_wed_device_active(&mdev->mmio.wed) &&
+ mtk_wed_get_rx_capa(&mdev->mmio.wed))
+ mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND1].wed = &mdev->mmio.wed;
ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND1],
MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND1),
MT7996_RX_RING_SIZE,
@@ -455,7 +567,9 @@ int mt7996_dma_rro_init(struct mt7996_dev *dev)
/* rx msdu page queue for band2 */
mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND2].flags =
MT_WED_RRO_Q_MSDU_PG(2) | MT_QFLAG_WED_RRO_EN;
- mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND2].wed = &mdev->mmio.wed;
+ if (mtk_wed_device_active(&mdev->mmio.wed) &&
+ mtk_wed_get_rx_capa(&mdev->mmio.wed))
+ mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND2].wed = &mdev->mmio.wed;
ret = mt76_queue_alloc(dev, &mdev->q_rx[MT_RXQ_MSDU_PAGE_BAND2],
MT_RXQ_ID(MT_RXQ_MSDU_PAGE_BAND2),
MT7996_RX_RING_SIZE,
@@ -465,15 +579,42 @@ int mt7996_dma_rro_init(struct mt7996_dev *dev)
return ret;
}
- irq_mask = mdev->mmio.irqmask | MT_INT_RRO_RX_DONE |
- MT_INT_TX_DONE_BAND2;
- mt76_wr(dev, MT_INT_MASK_CSR, irq_mask);
- mtk_wed_device_start_hw_rro(&mdev->mmio.wed, irq_mask, false);
- mt7996_irq_enable(dev, irq_mask);
+start_hw_rro:
+ if (mtk_wed_device_active(&mdev->mmio.wed)) {
+ irq_mask = mdev->mmio.irqmask |
+ MT_INT_TX_DONE_BAND2;
+
+ mt76_wr(dev, MT_INT_MASK_CSR, irq_mask);
+ mtk_wed_device_start_hw_rro(&mdev->mmio.wed, irq_mask, false);
+ mt7996_irq_enable(dev, irq_mask);
+ } else {
+ if (is_mt7996(&dev->mt76)) {
+ mt76_queue_rx_init(dev, MT_RXQ_MSDU_PAGE_BAND1,
+ mt76_dma_rx_poll);
+ mt76_queue_rx_init(dev, MT_RXQ_MSDU_PAGE_BAND2,
+ mt76_dma_rx_poll);
+ mt76_queue_rx_init(dev, MT_RXQ_RRO_BAND2,
+ mt76_dma_rx_poll);
+ } else {
+ mt76_queue_rx_init(dev, MT_RXQ_RRO_BAND1,
+ mt76_dma_rx_poll);
+ }
+
+ mt76_queue_rx_init(dev, MT_RXQ_RRO_BAND0, mt76_dma_rx_poll);
+ if (dev->mt76.hwrro_mode == MT76_HWRRO_V3_1) {
+ mt76_queue_rx_init(dev, MT_RXQ_RRO_RXDMAD_C,
+ mt76_dma_rx_poll);
+ } else {
+ mt76_queue_rx_init(dev, MT_RXQ_RRO_IND,
+ mt76_dma_rx_poll);
+ mt76_queue_rx_init(dev, MT_RXQ_MSDU_PAGE_BAND0,
+ mt76_dma_rx_poll);
+ }
+ mt7996_irq_enable(dev, MT_INT_RRO_RX_DONE);
+ }
return 0;
}
-#endif /* CONFIG_NET_MEDIATEK_SOC_WED */
int mt7996_dma_init(struct mt7996_dev *dev)
{
@@ -560,7 +701,9 @@ int mt7996_dma_init(struct mt7996_dev *dev)
return ret;
/* tx free notify event from WA for band0 */
- if (mtk_wed_device_active(wed) && !dev->has_rro) {
+ if (mtk_wed_device_active(wed) &&
+ ((is_mt7996(&dev->mt76) && !mt7996_has_hwrro(dev)) ||
+ (is_mt7992(&dev->mt76)))) {
dev->mt76.q_rx[MT_RXQ_MAIN_WA].flags = MT_WED_Q_TXFREE;
dev->mt76.q_rx[MT_RXQ_MAIN_WA].wed = wed;
}
@@ -615,7 +758,7 @@ int mt7996_dma_init(struct mt7996_dev *dev)
/* tx free notify event from WA for mt7996 band2
* use pcie0's rx ring3, but, redirect pcie0 rx ring3 interrupt to pcie1
*/
- if (mtk_wed_device_active(wed_hif2) && !dev->has_rro) {
+ if (mtk_wed_device_active(wed_hif2) && !mt7996_has_hwrro(dev)) {
dev->mt76.q_rx[MT_RXQ_BAND2_WA].flags = MT_WED_Q_TXFREE;
dev->mt76.q_rx[MT_RXQ_BAND2_WA].wed = wed_hif2;
}
@@ -630,6 +773,11 @@ int mt7996_dma_init(struct mt7996_dev *dev)
} else if (mt7996_band_valid(dev, MT_BAND1)) {
/* rx data queue for mt7992 band1 */
rx_base = MT_RXQ_RING_BASE(MT_RXQ_BAND1) + hif1_ofs;
+ if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed)) {
+ dev->mt76.q_rx[MT_RXQ_BAND1].flags = MT_WED_Q_RX(1);
+ dev->mt76.q_rx[MT_RXQ_BAND1].wed = wed;
+ }
+
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND1],
MT_RXQ_ID(MT_RXQ_BAND1),
MT7996_RX_RING_SIZE,
@@ -641,6 +789,12 @@ int mt7996_dma_init(struct mt7996_dev *dev)
/* tx free notify event from WA for mt7992 band1 */
if (mt7996_has_wa(dev)) {
rx_base = MT_RXQ_RING_BASE(MT_RXQ_BAND1_WA) + hif1_ofs;
+ if (mtk_wed_device_active(wed_hif2)) {
+ dev->mt76.q_rx[MT_RXQ_BAND1_WA].flags =
+ MT_WED_Q_TXFREE;
+ dev->mt76.q_rx[MT_RXQ_BAND1_WA].wed = wed_hif2;
+ }
+
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND1_WA],
MT_RXQ_ID(MT_RXQ_BAND1_WA),
MT7996_RX_MCU_RING_SIZE,
@@ -651,12 +805,12 @@ int mt7996_dma_init(struct mt7996_dev *dev)
}
}
- if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed) &&
- dev->has_rro) {
+ if (mt7996_has_hwrro(dev)) {
/* rx rro data queue for band0 */
dev->mt76.q_rx[MT_RXQ_RRO_BAND0].flags =
MT_WED_RRO_Q_DATA(0) | MT_QFLAG_WED_RRO_EN;
- dev->mt76.q_rx[MT_RXQ_RRO_BAND0].wed = wed;
+ if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed))
+ dev->mt76.q_rx[MT_RXQ_RRO_BAND0].wed = wed;
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_RRO_BAND0],
MT_RXQ_ID(MT_RXQ_RRO_BAND0),
MT7996_RX_RING_SIZE,
@@ -665,23 +819,44 @@ int mt7996_dma_init(struct mt7996_dev *dev)
if (ret)
return ret;
- /* tx free notify event from WA for band0 */
- dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].flags = MT_WED_Q_TXFREE;
- dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].wed = wed;
+ if (is_mt7992(&dev->mt76)) {
+ dev->mt76.q_rx[MT_RXQ_RRO_BAND1].flags =
+ MT_WED_RRO_Q_DATA(1) | MT_QFLAG_WED_RRO_EN;
+ if (mtk_wed_device_active(wed) &&
+ mtk_wed_get_rx_capa(wed))
+ dev->mt76.q_rx[MT_RXQ_RRO_BAND1].wed = wed;
+ ret = mt76_queue_alloc(dev,
+ &dev->mt76.q_rx[MT_RXQ_RRO_BAND1],
+ MT_RXQ_ID(MT_RXQ_RRO_BAND1),
+ MT7996_RX_RING_SIZE,
+ MT7996_RX_BUF_SIZE,
+ MT_RXQ_RING_BASE(MT_RXQ_RRO_BAND1) + hif1_ofs);
+ if (ret)
+ return ret;
+ } else {
+ if (mtk_wed_device_active(wed)) {
+ /* tx free notify event from WA for band0 */
+ dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].flags = MT_WED_Q_TXFREE;
+ dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].wed = wed;
+ }
- ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0],
- MT_RXQ_ID(MT_RXQ_TXFREE_BAND0),
- MT7996_RX_MCU_RING_SIZE,
- MT7996_RX_BUF_SIZE,
- MT_RXQ_RING_BASE(MT_RXQ_TXFREE_BAND0));
- if (ret)
- return ret;
+ ret = mt76_queue_alloc(dev,
+ &dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0],
+ MT_RXQ_ID(MT_RXQ_TXFREE_BAND0),
+ MT7996_RX_MCU_RING_SIZE,
+ MT7996_RX_BUF_SIZE,
+ MT_RXQ_RING_BASE(MT_RXQ_TXFREE_BAND0));
+ if (ret)
+ return ret;
+ }
if (mt7996_band_valid(dev, MT_BAND2)) {
/* rx rro data queue for band2 */
dev->mt76.q_rx[MT_RXQ_RRO_BAND2].flags =
MT_WED_RRO_Q_DATA(1) | MT_QFLAG_WED_RRO_EN;
- dev->mt76.q_rx[MT_RXQ_RRO_BAND2].wed = wed;
+ if (mtk_wed_device_active(wed) &&
+ mtk_wed_get_rx_capa(wed))
+ dev->mt76.q_rx[MT_RXQ_RRO_BAND2].wed = wed;
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_RRO_BAND2],
MT_RXQ_ID(MT_RXQ_RRO_BAND2),
MT7996_RX_RING_SIZE,
@@ -752,6 +927,10 @@ void mt7996_dma_reset(struct mt7996_dev *dev, bool force)
mt76_tx_status_check(&dev->mt76, true);
+ if (mt7996_has_hwrro(dev) &&
+ !mtk_wed_device_active(&dev->mt76.mmio.wed))
+ mt7996_rro_msdu_page_map_free(dev);
+
/* reset wfsys */
if (force)
mt7996_wfsys_reset(dev);
@@ -775,21 +954,32 @@ void mt7996_dma_reset(struct mt7996_dev *dev, bool force)
}
for (i = 0; i < __MT_MCUQ_MAX; i++)
- mt76_queue_reset(dev, dev->mt76.q_mcu[i]);
+ mt76_queue_reset(dev, dev->mt76.q_mcu[i], true);
mt76_for_each_q_rx(&dev->mt76, i) {
- if (mtk_wed_device_active(&dev->mt76.mmio.wed))
- if (mt76_queue_is_wed_rro(&dev->mt76.q_rx[i]) ||
- mt76_queue_is_wed_tx_free(&dev->mt76.q_rx[i]))
- continue;
+ struct mt76_queue *q = &dev->mt76.q_rx[i];
- mt76_queue_reset(dev, &dev->mt76.q_rx[i]);
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
+ if (mt76_queue_is_wed_rro(q) ||
+ mt76_queue_is_wed_tx_free(q)) {
+ if (force && mt76_queue_is_wed_rro_data(q))
+ mt76_queue_reset(dev, q, false);
+ continue;
+ }
+ }
+ mt76_queue_reset(dev, q, true);
}
mt76_tx_status_check(&dev->mt76, true);
- mt76_for_each_q_rx(&dev->mt76, i)
+ mt76_for_each_q_rx(&dev->mt76, i) {
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed) && force &&
+ (mt76_queue_is_wed_rro_ind(&dev->mt76.q_rx[i]) ||
+ mt76_queue_is_wed_rro_msdu_pg(&dev->mt76.q_rx[i])))
+ continue;
+
mt76_queue_rx_reset(dev, i);
+ }
mt7996_dma_enable(dev, !force);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c
index 87c6192b6384..da3231c9aa11 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c
@@ -334,9 +334,8 @@ int mt7996_eeprom_init(struct mt7996_dev *dev)
return ret;
memcpy(dev->mphy.macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR, ETH_ALEN);
- mt76_eeprom_override(&dev->mphy);
- return 0;
+ return mt76_eeprom_override(&dev->mphy);
}
int mt7996_eeprom_get_target_power(struct mt7996_dev *dev,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/init.c b/drivers/net/wireless/mediatek/mt76/mt7996/init.c
index a9599c286328..5e95a36b42d1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/init.c
@@ -63,6 +63,33 @@ static const struct ieee80211_iface_combination if_comb = {
.beacon_int_min_gcd = 100,
};
+static const u8 if_types_ext_capa_ap[] = {
+ [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
+ [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT,
+ [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
+};
+
+static const struct wiphy_iftype_ext_capab iftypes_ext_capa[] = {
+ {
+ .iftype = NL80211_IFTYPE_AP,
+ .extended_capabilities = if_types_ext_capa_ap,
+ .extended_capabilities_mask = if_types_ext_capa_ap,
+ .extended_capabilities_len = sizeof(if_types_ext_capa_ap),
+ .eml_capabilities = IEEE80211_EML_CAP_EMLSR_SUPP,
+ .mld_capa_and_ops =
+ FIELD_PREP_CONST(IEEE80211_MLD_CAP_OP_MAX_SIMUL_LINKS,
+ MT7996_MAX_RADIOS - 1),
+ }, {
+ .iftype = NL80211_IFTYPE_STATION,
+ .extended_capabilities = if_types_ext_capa_ap,
+ .extended_capabilities_mask = if_types_ext_capa_ap,
+ .extended_capabilities_len = sizeof(if_types_ext_capa_ap),
+ .mld_capa_and_ops =
+ FIELD_PREP_CONST(IEEE80211_MLD_CAP_OP_MAX_SIMUL_LINKS,
+ MT7996_MAX_RADIOS - 1),
+ },
+};
+
static ssize_t mt7996_thermal_temp_show(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -383,6 +410,7 @@ mt7996_init_wiphy_band(struct ieee80211_hw *hw, struct mt7996_phy *phy)
phy->slottime = 9;
phy->beacon_rate = -1;
+ phy->rxfilter = MT_WF_RFCR_DROP_OTHER_UC;
if (phy->mt76->cap.has_2ghz) {
phy->mt76->sband_2g.sband.ht_cap.cap |=
@@ -463,8 +491,11 @@ mt7996_init_wiphy(struct ieee80211_hw *hw, struct mtk_wed_device *wed)
wiphy->radio = dev->radios;
wiphy->reg_notifier = mt7996_regd_notifier;
- wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+ wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH |
+ WIPHY_FLAG_SUPPORTS_MLO;
wiphy->mbssid_max_interfaces = 16;
+ wiphy->iftype_ext_capab = iftypes_ext_capa;
+ wiphy->num_iftype_ext_capab = ARRAY_SIZE(iftypes_ext_capa);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BSS_COLOR);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
@@ -575,19 +606,21 @@ void mt7996_mac_init(struct mt7996_dev *dev)
}
/* rro module init */
- if (is_mt7996(&dev->mt76))
- mt7996_mcu_set_rro(dev, UNI_RRO_SET_PLATFORM_TYPE, 2);
- else
+ if (dev->hif2)
mt7996_mcu_set_rro(dev, UNI_RRO_SET_PLATFORM_TYPE,
- dev->hif2 ? 7 : 0);
+ is_mt7996(&dev->mt76) ? 2 : 7);
+ else
+ mt7996_mcu_set_rro(dev, UNI_RRO_SET_PLATFORM_TYPE, 0);
- if (dev->has_rro) {
+ if (mt7996_has_hwrro(dev)) {
u16 timeout;
timeout = mt76_rr(dev, MT_HW_REV) == MT_HW_REV1 ? 512 : 128;
mt7996_mcu_set_rro(dev, UNI_RRO_SET_FLUSH_TIMEOUT, timeout);
- mt7996_mcu_set_rro(dev, UNI_RRO_SET_BYPASS_MODE, 1);
- mt7996_mcu_set_rro(dev, UNI_RRO_SET_TXFREE_PATH, 0);
+ mt7996_mcu_set_rro(dev, UNI_RRO_SET_BYPASS_MODE,
+ is_mt7996(&dev->mt76) ? 1 : 2);
+ mt7996_mcu_set_rro(dev, UNI_RRO_SET_TXFREE_PATH,
+ !is_mt7996(&dev->mt76));
} else {
mt7996_mcu_set_rro(dev, UNI_RRO_SET_BYPASS_MODE, 3);
mt7996_mcu_set_rro(dev, UNI_RRO_SET_TXFREE_PATH, 1);
@@ -634,7 +667,9 @@ static int mt7996_register_phy(struct mt7996_dev *dev, enum mt76_band_id band)
if (!mt7996_band_valid(dev, band))
return 0;
- if (is_mt7996(&dev->mt76) && band == MT_BAND2 && dev->hif2) {
+ if (dev->hif2 &&
+ ((is_mt7996(&dev->mt76) && band == MT_BAND2) ||
+ (is_mt7992(&dev->mt76) && band == MT_BAND1))) {
hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
wed = &dev->mt76.mmio.wed_hif2;
}
@@ -667,17 +702,26 @@ static int mt7996_register_phy(struct mt7996_dev *dev, enum mt76_band_id band)
if (band == MT_BAND2)
mphy->macaddr[0] ^= BIT(6);
}
- mt76_eeprom_override(mphy);
+ ret = mt76_eeprom_override(mphy);
+ if (ret)
+ goto error;
/* init wiphy according to mphy and phy */
mt7996_init_wiphy_band(mphy->hw, phy);
- ret = mt7996_init_tx_queues(mphy->priv,
- MT_TXQ_ID(band),
- MT7996_TX_RING_SIZE,
- MT_TXQ_RING_BASE(band) + hif1_ofs,
- wed);
- if (ret)
- goto error;
+
+ if (is_mt7996(&dev->mt76) && !dev->hif2 && band == MT_BAND1) {
+ int i;
+
+ for (i = 0; i <= MT_TXQ_PSD; i++)
+ mphy->q_tx[i] = dev->mt76.phys[MT_BAND0]->q_tx[0];
+ } else {
+ ret = mt7996_init_tx_queues(mphy->priv, MT_TXQ_ID(band),
+ MT7996_TX_RING_SIZE,
+ MT_TXQ_RING_BASE(band) + hif1_ofs,
+ wed);
+ if (ret)
+ goto error;
+ }
ret = mt76_register_phy(mphy, true, mt76_rates,
ARRAY_SIZE(mt76_rates));
@@ -685,10 +729,9 @@ static int mt7996_register_phy(struct mt7996_dev *dev, enum mt76_band_id band)
goto error;
if (wed == &dev->mt76.mmio.wed_hif2 && mtk_wed_device_active(wed)) {
- u32 irq_mask = dev->mt76.mmio.irqmask | MT_INT_TX_DONE_BAND2;
-
- mt76_wr(dev, MT_INT1_MASK_CSR, irq_mask);
- mtk_wed_device_start(&dev->mt76.mmio.wed_hif2, irq_mask);
+ mt76_wr(dev, MT_INT_PCIE1_MASK_CSR, MT_INT_TX_RX_DONE_EXT);
+ mtk_wed_device_start(&dev->mt76.mmio.wed_hif2,
+ MT_INT_TX_RX_DONE_EXT);
}
return 0;
@@ -724,30 +767,151 @@ void mt7996_wfsys_reset(struct mt7996_dev *dev)
msleep(20);
}
-static int mt7996_wed_rro_init(struct mt7996_dev *dev)
+static void mt7996_rro_hw_init_v3(struct mt7996_dev *dev)
{
-#ifdef CONFIG_NET_MEDIATEK_SOC_WED
struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
+ u32 session_id;
+
+ if (dev->mt76.hwrro_mode == MT76_HWRRO_V3_1)
+ return;
+
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+ if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed)) {
+ wed->wlan.ind_cmd.win_size =
+ ffs(MT7996_RRO_WINDOW_MAX_LEN) - 6;
+ if (is_mt7996(&dev->mt76))
+ wed->wlan.ind_cmd.particular_sid =
+ MT7996_RRO_MAX_SESSION;
+ else
+ wed->wlan.ind_cmd.particular_sid = 1;
+ wed->wlan.ind_cmd.particular_se_phys =
+ dev->wed_rro.session.phy_addr;
+ wed->wlan.ind_cmd.se_group_nums = MT7996_RRO_ADDR_ELEM_LEN;
+ wed->wlan.ind_cmd.ack_sn_addr = MT_RRO_ACK_SN_CTRL;
+ }
+#endif /* CONFIG_NET_MEDIATEK_SOC_WED */
+
+ if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed)) {
+ mt76_wr(dev, MT_RRO_IND_CMD_SIGNATURE_BASE0, 0x15010e00);
+ mt76_set(dev, MT_RRO_IND_CMD_SIGNATURE_BASE1,
+ MT_RRO_IND_CMD_SIGNATURE_BASE1_EN);
+ } else {
+ mt76_wr(dev, MT_RRO_IND_CMD_SIGNATURE_BASE0, 0);
+ mt76_wr(dev, MT_RRO_IND_CMD_SIGNATURE_BASE1, 0);
+ }
+
+ /* particular session configure */
+ /* use max session idx + 1 as particular session id */
+ mt76_wr(dev, MT_RRO_PARTICULAR_CFG0, dev->wed_rro.session.phy_addr);
+
+ session_id = is_mt7996(&dev->mt76) ? MT7996_RRO_MAX_SESSION : 1;
+ mt76_wr(dev, MT_RRO_PARTICULAR_CFG1,
+ MT_RRO_PARTICULAR_CONFG_EN |
+ FIELD_PREP(MT_RRO_PARTICULAR_SID, session_id));
+}
+
+void mt7996_rro_hw_init(struct mt7996_dev *dev)
+{
u32 reg = MT_RRO_ADDR_ELEM_SEG_ADDR0;
+ int i;
+
+ if (!mt7996_has_hwrro(dev))
+ return;
+
+ INIT_LIST_HEAD(&dev->wed_rro.page_cache);
+ for (i = 0; i < ARRAY_SIZE(dev->wed_rro.page_map); i++)
+ INIT_LIST_HEAD(&dev->wed_rro.page_map[i]);
+
+ if (!is_mt7996(&dev->mt76)) {
+ reg = MT_RRO_MSDU_PG_SEG_ADDR0;
+
+ if (dev->mt76.hwrro_mode == MT76_HWRRO_V3_1) {
+ mt76_clear(dev, MT_RRO_3_0_EMU_CONF,
+ MT_RRO_3_0_EMU_CONF_EN_MASK);
+ mt76_set(dev, MT_RRO_3_1_GLOBAL_CONFIG,
+ MT_RRO_3_1_GLOBAL_CONFIG_RXDMAD_SEL);
+ if (!mtk_wed_device_active(&dev->mt76.mmio.wed)) {
+ mt76_set(dev, MT_RRO_3_1_GLOBAL_CONFIG,
+ MT_RRO_3_1_GLOBAL_CONFIG_RX_DIDX_WR_EN |
+ MT_RRO_3_1_GLOBAL_CONFIG_RX_CIDX_RD_EN);
+ mt76_wr(dev, MT_RRO_RX_RING_AP_CIDX_ADDR,
+ dev->wed_rro.emi_rings_cpu.phy_addr >> 4);
+ mt76_wr(dev, MT_RRO_RX_RING_AP_DIDX_ADDR,
+ dev->wed_rro.emi_rings_dma.phy_addr >> 4);
+ }
+ } else {
+ /* set emul 3.0 function */
+ mt76_wr(dev, MT_RRO_3_0_EMU_CONF,
+ MT_RRO_3_0_EMU_CONF_EN_MASK);
+
+ mt76_wr(dev, MT_RRO_ADDR_ARRAY_BASE0,
+ dev->wed_rro.addr_elem[0].phy_addr);
+ }
+
+ mt76_set(dev, MT_RRO_3_1_GLOBAL_CONFIG,
+ MT_RRO_3_1_GLOBAL_CONFIG_INTERLEAVE_EN);
+
+ /* setup Msdu page address */
+ for (i = 0; i < ARRAY_SIZE(dev->wed_rro.msdu_pg); i++) {
+ mt76_wr(dev, reg,
+ dev->wed_rro.msdu_pg[i].phy_addr >> 4);
+ reg += 4;
+ }
+ } else {
+ /* TODO: remove line after WM has set */
+ mt76_clear(dev, WF_RRO_AXI_MST_CFG,
+ WF_RRO_AXI_MST_CFG_DIDX_OK);
+
+ /* setup BA bitmap cache address */
+ mt76_wr(dev, MT_RRO_BA_BITMAP_BASE0,
+ dev->wed_rro.ba_bitmap[0].phy_addr);
+ mt76_wr(dev, MT_RRO_BA_BITMAP_BASE1, 0);
+ mt76_wr(dev, MT_RRO_BA_BITMAP_BASE_EXT0,
+ dev->wed_rro.ba_bitmap[1].phy_addr);
+ mt76_wr(dev, MT_RRO_BA_BITMAP_BASE_EXT1, 0);
+
+ /* Setup Address element address */
+ for (i = 0; i < ARRAY_SIZE(dev->wed_rro.addr_elem); i++) {
+ mt76_wr(dev, reg,
+ dev->wed_rro.addr_elem[i].phy_addr >> 4);
+ reg += 4;
+ }
+
+ /* Setup Address element address - separate address segment
+ * mode.
+ */
+ mt76_wr(dev, MT_RRO_ADDR_ARRAY_BASE1,
+ MT_RRO_ADDR_ARRAY_ELEM_ADDR_SEG_MODE);
+ }
+
+ mt7996_rro_hw_init_v3(dev);
+
+ /* interrupt enable */
+ mt76_wr(dev, MT_RRO_HOST_INT_ENA,
+ MT_RRO_HOST_INT_ENA_HOST_RRO_DONE_ENA);
+}
+
+static int mt7996_wed_rro_init(struct mt7996_dev *dev)
+{
+ u32 val = FIELD_PREP(WED_RRO_ADDR_SIGNATURE_MASK, 0xff);
struct mt7996_wed_rro_addr *addr;
void *ptr;
int i;
- if (!dev->has_rro)
+ if (!mt7996_has_hwrro(dev))
return 0;
- if (!mtk_wed_device_active(wed))
- return 0;
+ if (dev->mt76.hwrro_mode == MT76_HWRRO_V3) {
+ for (i = 0; i < ARRAY_SIZE(dev->wed_rro.ba_bitmap); i++) {
+ ptr = dmam_alloc_coherent(dev->mt76.dma_dev,
+ MT7996_RRO_BA_BITMAP_CR_SIZE,
+ &dev->wed_rro.ba_bitmap[i].phy_addr,
+ GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
- for (i = 0; i < ARRAY_SIZE(dev->wed_rro.ba_bitmap); i++) {
- ptr = dmam_alloc_coherent(dev->mt76.dma_dev,
- MT7996_RRO_BA_BITMAP_CR_SIZE,
- &dev->wed_rro.ba_bitmap[i].phy_addr,
- GFP_KERNEL);
- if (!ptr)
- return -ENOMEM;
-
- dev->wed_rro.ba_bitmap[i].ptr = ptr;
+ dev->wed_rro.ba_bitmap[i].ptr = ptr;
+ }
}
for (i = 0; i < ARRAY_SIZE(dev->wed_rro.addr_elem); i++) {
@@ -766,12 +930,53 @@ static int mt7996_wed_rro_init(struct mt7996_dev *dev)
addr = dev->wed_rro.addr_elem[i].ptr;
for (j = 0; j < MT7996_RRO_WINDOW_MAX_SIZE; j++) {
- addr->signature = 0xff;
+ addr->data = cpu_to_le32(val);
addr++;
}
- wed->wlan.ind_cmd.addr_elem_phys[i] =
- dev->wed_rro.addr_elem[i].phy_addr;
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
+ mtk_wed_get_rx_capa(&dev->mt76.mmio.wed)) {
+ struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
+
+ wed->wlan.ind_cmd.addr_elem_phys[i] =
+ dev->wed_rro.addr_elem[i].phy_addr;
+ }
+#endif /* CONFIG_NET_MEDIATEK_SOC_WED */
+ }
+
+ for (i = 0; i < ARRAY_SIZE(dev->wed_rro.msdu_pg); i++) {
+ ptr = dmam_alloc_coherent(dev->mt76.dma_dev,
+ MT7996_RRO_MSDU_PG_SIZE_PER_CR,
+ &dev->wed_rro.msdu_pg[i].phy_addr,
+ GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ dev->wed_rro.msdu_pg[i].ptr = ptr;
+
+ memset(dev->wed_rro.msdu_pg[i].ptr, 0,
+ MT7996_RRO_MSDU_PG_SIZE_PER_CR);
+ }
+
+ if (dev->mt76.hwrro_mode == MT76_HWRRO_V3_1) {
+ ptr = dmam_alloc_coherent(dev->mt76.dma_dev,
+ sizeof(dev->wed_rro.emi_rings_cpu.ptr),
+ &dev->wed_rro.emi_rings_cpu.phy_addr,
+ GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ dev->wed_rro.emi_rings_cpu.ptr = ptr;
+
+ ptr = dmam_alloc_coherent(dev->mt76.dma_dev,
+ sizeof(dev->wed_rro.emi_rings_dma.ptr),
+ &dev->wed_rro.emi_rings_dma.phy_addr,
+ GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ dev->wed_rro.emi_rings_dma.ptr = ptr;
}
ptr = dmam_alloc_coherent(dev->mt76.dma_dev,
@@ -784,69 +989,20 @@ static int mt7996_wed_rro_init(struct mt7996_dev *dev)
dev->wed_rro.session.ptr = ptr;
addr = dev->wed_rro.session.ptr;
for (i = 0; i < MT7996_RRO_WINDOW_MAX_LEN; i++) {
- addr->signature = 0xff;
+ addr->data = cpu_to_le32(val);
addr++;
}
- /* rro hw init */
- /* TODO: remove line after WM has set */
- mt76_clear(dev, WF_RRO_AXI_MST_CFG, WF_RRO_AXI_MST_CFG_DIDX_OK);
+ mt7996_rro_hw_init(dev);
- /* setup BA bitmap cache address */
- mt76_wr(dev, MT_RRO_BA_BITMAP_BASE0,
- dev->wed_rro.ba_bitmap[0].phy_addr);
- mt76_wr(dev, MT_RRO_BA_BITMAP_BASE1, 0);
- mt76_wr(dev, MT_RRO_BA_BITMAP_BASE_EXT0,
- dev->wed_rro.ba_bitmap[1].phy_addr);
- mt76_wr(dev, MT_RRO_BA_BITMAP_BASE_EXT1, 0);
-
- /* setup Address element address */
- for (i = 0; i < ARRAY_SIZE(dev->wed_rro.addr_elem); i++) {
- mt76_wr(dev, reg, dev->wed_rro.addr_elem[i].phy_addr >> 4);
- reg += 4;
- }
-
- /* setup Address element address - separate address segment mode */
- mt76_wr(dev, MT_RRO_ADDR_ARRAY_BASE1,
- MT_RRO_ADDR_ARRAY_ELEM_ADDR_SEG_MODE);
-
- wed->wlan.ind_cmd.win_size = ffs(MT7996_RRO_WINDOW_MAX_LEN) - 6;
- wed->wlan.ind_cmd.particular_sid = MT7996_RRO_MAX_SESSION;
- wed->wlan.ind_cmd.particular_se_phys = dev->wed_rro.session.phy_addr;
- wed->wlan.ind_cmd.se_group_nums = MT7996_RRO_ADDR_ELEM_LEN;
- wed->wlan.ind_cmd.ack_sn_addr = MT_RRO_ACK_SN_CTRL;
-
- mt76_wr(dev, MT_RRO_IND_CMD_SIGNATURE_BASE0, 0x15010e00);
- mt76_set(dev, MT_RRO_IND_CMD_SIGNATURE_BASE1,
- MT_RRO_IND_CMD_SIGNATURE_BASE1_EN);
-
- /* particular session configure */
- /* use max session idx + 1 as particular session id */
- mt76_wr(dev, MT_RRO_PARTICULAR_CFG0, dev->wed_rro.session.phy_addr);
- mt76_wr(dev, MT_RRO_PARTICULAR_CFG1,
- MT_RRO_PARTICULAR_CONFG_EN |
- FIELD_PREP(MT_RRO_PARTICULAR_SID, MT7996_RRO_MAX_SESSION));
-
- /* interrupt enable */
- mt76_wr(dev, MT_RRO_HOST_INT_ENA,
- MT_RRO_HOST_INT_ENA_HOST_RRO_DONE_ENA);
-
- /* rro ind cmd queue init */
return mt7996_dma_rro_init(dev);
-#else
- return 0;
-#endif
}
static void mt7996_wed_rro_free(struct mt7996_dev *dev)
{
-#ifdef CONFIG_NET_MEDIATEK_SOC_WED
int i;
- if (!dev->has_rro)
- return;
-
- if (!mtk_wed_device_active(&dev->mt76.mmio.wed))
+ if (!mt7996_has_hwrro(dev))
return;
for (i = 0; i < ARRAY_SIZE(dev->wed_rro.ba_bitmap); i++) {
@@ -870,6 +1026,16 @@ static void mt7996_wed_rro_free(struct mt7996_dev *dev)
dev->wed_rro.addr_elem[i].phy_addr);
}
+ for (i = 0; i < ARRAY_SIZE(dev->wed_rro.msdu_pg); i++) {
+ if (!dev->wed_rro.msdu_pg[i].ptr)
+ continue;
+
+ dmam_free_coherent(dev->mt76.dma_dev,
+ MT7996_RRO_MSDU_PG_SIZE_PER_CR,
+ dev->wed_rro.msdu_pg[i].ptr,
+ dev->wed_rro.msdu_pg[i].phy_addr);
+ }
+
if (!dev->wed_rro.session.ptr)
return;
@@ -878,12 +1044,11 @@ static void mt7996_wed_rro_free(struct mt7996_dev *dev)
sizeof(struct mt7996_wed_rro_addr),
dev->wed_rro.session.ptr,
dev->wed_rro.session.phy_addr);
-#endif
}
static void mt7996_wed_rro_work(struct work_struct *work)
{
-#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+ u32 val = FIELD_PREP(WED_RRO_ADDR_SIGNATURE_MASK, 0xff);
struct mt7996_dev *dev;
LIST_HEAD(list);
@@ -920,13 +1085,12 @@ static void mt7996_wed_rro_work(struct work_struct *work)
MT7996_RRO_WINDOW_MAX_LEN;
reset:
elem = ptr + elem_id * sizeof(*elem);
- elem->signature = 0xff;
+ elem->data |= cpu_to_le32(val);
}
mt7996_mcu_wed_rro_reset_sessions(dev, e->id);
out:
kfree(e);
}
-#endif
}
static int mt7996_variant_type_init(struct mt7996_dev *dev)
@@ -1321,7 +1485,6 @@ mt7996_init_eht_caps(struct mt7996_phy *phy, enum nl80211_band band,
eht_cap->has_eht = true;
eht_cap_elem->mac_cap_info[0] =
- IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS |
IEEE80211_EHT_MAC_CAP0_OM_CONTROL |
u8_encode_bits(IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_11454,
IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_MASK);
@@ -1551,6 +1714,9 @@ void mt7996_unregister_device(struct mt7996_dev *dev)
mt7996_mcu_exit(dev);
mt7996_tx_token_put(dev);
mt7996_dma_cleanup(dev);
+ if (mt7996_has_hwrro(dev) &&
+ !mtk_wed_device_active(&dev->mt76.mmio.wed))
+ mt7996_rro_msdu_page_map_free(dev);
tasklet_disable(&dev->mt76.irq_tasklet);
mt76_free_device(&dev->mt76);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
index 226534490792..9501def3e0e3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
@@ -62,7 +62,7 @@ static struct mt76_wcid *mt7996_rx_get_wcid(struct mt7996_dev *dev,
int i;
wcid = mt76_wcid_ptr(dev, idx);
- if (!wcid)
+ if (!wcid || !wcid->sta)
return NULL;
if (!mt7996_band_valid(dev, band_idx))
@@ -229,7 +229,9 @@ static int mt7996_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
{
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
struct ethhdr *eth_hdr = (struct ethhdr *)(skb->data + hdr_gap);
- struct mt7996_sta *msta = (struct mt7996_sta *)status->wcid;
+ struct mt7996_sta_link *msta_link = (void *)status->wcid;
+ struct mt7996_sta *msta = msta_link->sta;
+ struct ieee80211_bss_conf *link_conf;
__le32 *rxd = (__le32 *)skb->data;
struct ieee80211_sta *sta;
struct ieee80211_vif *vif;
@@ -246,8 +248,11 @@ static int mt7996_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
if (!msta || !msta->vif)
return -EINVAL;
- sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
+ sta = wcid_to_sta(status->wcid);
vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
+ link_conf = rcu_dereference(vif->link_conf[msta_link->wcid.link_id]);
+ if (!link_conf)
+ return -EINVAL;
/* store the info from RXD and ethhdr to avoid being overridden */
frame_control = le32_get_bits(rxd[8], MT_RXD8_FRAME_CONTROL);
@@ -260,7 +265,7 @@ static int mt7996_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
switch (frame_control & (IEEE80211_FCTL_TODS |
IEEE80211_FCTL_FROMDS)) {
case 0:
- ether_addr_copy(hdr.addr3, vif->bss_conf.bssid);
+ ether_addr_copy(hdr.addr3, link_conf->bssid);
break;
case IEEE80211_FCTL_FROMDS:
ether_addr_copy(hdr.addr3, eth_hdr->h_source);
@@ -797,6 +802,9 @@ mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi,
mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ) {
if (is_mt7990(&dev->mt76))
txwi[6] |= cpu_to_le32(FIELD_PREP(MT_TXD6_TID_ADDBA, tid));
+ else
+ txwi[7] |= cpu_to_le32(MT_TXD7_MAC_TXD);
+
tid = MT_TX_ADDBA;
} else if (ieee80211_is_mgmt(hdr->frame_control)) {
tid = MT_TX_NORMAL;
@@ -903,8 +911,12 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
IEEE80211_TX_CTRL_MLO_LINK);
mvif = vif ? (struct mt7996_vif *)vif->drv_priv : NULL;
- if (mvif)
- mlink = rcu_dereference(mvif->mt76.link[link_id]);
+ if (mvif) {
+ if (wcid->offchannel)
+ mlink = rcu_dereference(mvif->mt76.offchannel_link);
+ if (!mlink)
+ mlink = rcu_dereference(mvif->mt76.link[link_id]);
+ }
if (mlink) {
omac_idx = mlink->omac_idx;
@@ -956,8 +968,9 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
val |= MT_TXD5_TX_STATUS_HOST;
txwi[5] = cpu_to_le32(val);
- val = MT_TXD6_DAS;
- if (q_idx >= MT_LMAC_ALTX0 && q_idx <= MT_LMAC_BCN0)
+ val = MT_TXD6_DAS | MT_TXD6_VTA;
+ if ((q_idx >= MT_LMAC_ALTX0 && q_idx <= MT_LMAC_BCN0) ||
+ skb->protocol == cpu_to_be16(ETH_P_PAE))
val |= MT_TXD6_DIS_MAT;
if (is_mt7996(&dev->mt76))
@@ -1025,10 +1038,10 @@ int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
struct ieee80211_key_conf *key = info->control.hw_key;
struct ieee80211_vif *vif = info->control.vif;
- struct mt76_connac_txp_common *txp;
struct mt76_txwi_cache *t;
int id, i, pid, nbuf = tx_info->nbuf - 1;
bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
+ __le32 *ptr = (__le32 *)txwi_ptr;
u8 *txwi = (u8 *)txwi_ptr;
if (unlikely(tx_info->skb->len <= ETH_HLEN))
@@ -1044,6 +1057,41 @@ int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
if (id < 0)
return id;
+ /* Since the rules of HW MLD address translation are not fully
+ * compatible with 802.11 EAPOL frame, we do the translation by
+ * software
+ */
+ if (tx_info->skb->protocol == cpu_to_be16(ETH_P_PAE) && sta->mlo) {
+ struct ieee80211_hdr *hdr = (void *)tx_info->skb->data;
+ struct ieee80211_bss_conf *link_conf;
+ struct ieee80211_link_sta *link_sta;
+
+ link_conf = rcu_dereference(vif->link_conf[wcid->link_id]);
+ if (!link_conf)
+ return -EINVAL;
+
+ link_sta = rcu_dereference(sta->link[wcid->link_id]);
+ if (!link_sta)
+ return -EINVAL;
+
+ dma_sync_single_for_cpu(mdev->dma_dev, tx_info->buf[1].addr,
+ tx_info->buf[1].len, DMA_TO_DEVICE);
+
+ memcpy(hdr->addr1, link_sta->addr, ETH_ALEN);
+ memcpy(hdr->addr2, link_conf->addr, ETH_ALEN);
+ if (ieee80211_has_a4(hdr->frame_control)) {
+ memcpy(hdr->addr3, sta->addr, ETH_ALEN);
+ memcpy(hdr->addr4, vif->addr, ETH_ALEN);
+ } else if (ieee80211_has_tods(hdr->frame_control)) {
+ memcpy(hdr->addr3, sta->addr, ETH_ALEN);
+ } else if (ieee80211_has_fromds(hdr->frame_control)) {
+ memcpy(hdr->addr3, vif->addr, ETH_ALEN);
+ }
+
+ dma_sync_single_for_device(mdev->dma_dev, tx_info->buf[1].addr,
+ tx_info->buf[1].len, DMA_TO_DEVICE);
+ }
+
pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
memset(txwi_ptr, 0, MT_TXD_SIZE);
/* Transmit non qos data by 802.11 header and need to fill txd by host*/
@@ -1051,46 +1099,76 @@ int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
mt7996_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, key,
pid, qid, 0);
- txp = (struct mt76_connac_txp_common *)(txwi + MT_TXD_SIZE);
- for (i = 0; i < nbuf; i++) {
- u16 len;
+ /* MT7996 and MT7992 require driver to provide the MAC TXP for AddBA
+ * req
+ */
+ if (le32_to_cpu(ptr[7]) & MT_TXD7_MAC_TXD) {
+ u32 val;
- len = FIELD_PREP(MT_TXP_BUF_LEN, tx_info->buf[i + 1].len);
+ ptr = (__le32 *)(txwi + MT_TXD_SIZE);
+ memset((void *)ptr, 0, sizeof(struct mt76_connac_fw_txp));
+
+ val = FIELD_PREP(MT_TXP0_TOKEN_ID0, id) |
+ MT_TXP0_TOKEN_ID0_VALID_MASK;
+ ptr[0] = cpu_to_le32(val);
+
+ val = FIELD_PREP(MT_TXP1_TID_ADDBA,
+ tx_info->skb->priority &
+ IEEE80211_QOS_CTL_TID_MASK);
+ ptr[1] = cpu_to_le32(val);
+ ptr[2] = cpu_to_le32(tx_info->buf[1].addr & 0xFFFFFFFF);
+
+ val = FIELD_PREP(MT_TXP_BUF_LEN, tx_info->buf[1].len) |
+ MT_TXP3_ML0_MASK;
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- len |= FIELD_PREP(MT_TXP_DMA_ADDR_H,
- tx_info->buf[i + 1].addr >> 32);
+ val |= FIELD_PREP(MT_TXP3_DMA_ADDR_H,
+ tx_info->buf[1].addr >> 32);
#endif
+ ptr[3] = cpu_to_le32(val);
+ } else {
+ struct mt76_connac_txp_common *txp;
- txp->fw.buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr);
- txp->fw.len[i] = cpu_to_le16(len);
- }
- txp->fw.nbuf = nbuf;
+ txp = (struct mt76_connac_txp_common *)(txwi + MT_TXD_SIZE);
+ for (i = 0; i < nbuf; i++) {
+ u16 len;
- txp->fw.flags = cpu_to_le16(MT_CT_INFO_FROM_HOST);
+ len = FIELD_PREP(MT_TXP_BUF_LEN, tx_info->buf[i + 1].len);
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ len |= FIELD_PREP(MT_TXP_DMA_ADDR_H,
+ tx_info->buf[i + 1].addr >> 32);
+#endif
- if (!is_8023 || pid >= MT_PACKET_ID_FIRST)
- txp->fw.flags |= cpu_to_le16(MT_CT_INFO_APPLY_TXD);
+ txp->fw.buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr);
+ txp->fw.len[i] = cpu_to_le16(len);
+ }
+ txp->fw.nbuf = nbuf;
- if (!key)
- txp->fw.flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME);
+ txp->fw.flags = cpu_to_le16(MT_CT_INFO_FROM_HOST);
- if (!is_8023 && mt7996_tx_use_mgmt(dev, tx_info->skb))
- txp->fw.flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME);
+ if (!is_8023 || pid >= MT_PACKET_ID_FIRST)
+ txp->fw.flags |= cpu_to_le16(MT_CT_INFO_APPLY_TXD);
- if (vif) {
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- struct mt76_vif_link *mlink = NULL;
+ if (!key)
+ txp->fw.flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME);
- if (wcid->offchannel)
- mlink = rcu_dereference(mvif->mt76.offchannel_link);
- if (!mlink)
- mlink = rcu_dereference(mvif->mt76.link[wcid->link_id]);
+ if (!is_8023 && mt7996_tx_use_mgmt(dev, tx_info->skb))
+ txp->fw.flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME);
- txp->fw.bss_idx = mlink ? mlink->idx : mvif->deflink.mt76.idx;
- }
+ if (vif) {
+ struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ struct mt76_vif_link *mlink = NULL;
- txp->fw.token = cpu_to_le16(id);
- txp->fw.rept_wds_wcid = cpu_to_le16(sta ? wcid->idx : 0xfff);
+ if (wcid->offchannel)
+ mlink = rcu_dereference(mvif->mt76.offchannel_link);
+ if (!mlink)
+ mlink = rcu_dereference(mvif->mt76.link[wcid->link_id]);
+
+ txp->fw.bss_idx = mlink ? mlink->idx : mvif->deflink.mt76.idx;
+ }
+
+ txp->fw.token = cpu_to_le16(id);
+ txp->fw.rept_wds_wcid = cpu_to_le16(sta ? wcid->idx : 0xfff);
+ }
tx_info->skb = NULL;
@@ -1179,8 +1257,14 @@ mt7996_txwi_free(struct mt7996_dev *dev, struct mt76_txwi_cache *t,
txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t);
if (link_sta) {
wcid_idx = wcid->idx;
- if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE)))
- mt7996_tx_check_aggr(link_sta, wcid, t->skb);
+ if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE))) {
+ struct mt7996_sta *msta;
+
+ /* AMPDU state is stored in the primary link */
+ msta = (void *)link_sta->sta->drv_priv;
+ mt7996_tx_check_aggr(link_sta, &msta->deflink.wcid,
+ t->skb);
+ }
} else {
wcid_idx = le32_get_bits(txwi[9], MT_TXD9_WLAN_IDX);
}
@@ -1238,19 +1322,38 @@ mt7996_mac_tx_free(struct mt7996_dev *dev, void *data, int len)
info = le32_to_cpu(*cur_info);
if (info & MT_TXFREE_INFO_PAIR) {
struct ieee80211_sta *sta;
+ unsigned long valid_links;
+ struct mt7996_sta *msta;
+ unsigned int id;
u16 idx;
idx = FIELD_GET(MT_TXFREE_INFO_WLAN_ID, info);
wcid = mt76_wcid_ptr(dev, idx);
sta = wcid_to_sta(wcid);
- if (!sta)
+ if (!sta) {
+ link_sta = NULL;
goto next;
+ }
link_sta = rcu_dereference(sta->link[wcid->link_id]);
if (!link_sta)
goto next;
- mt76_wcid_add_poll(&dev->mt76, wcid);
+ msta = (struct mt7996_sta *)sta->drv_priv;
+ valid_links = sta->valid_links ?: BIT(0);
+
+ /* For MLD STA, add all link's wcid to sta_poll_list */
+ for_each_set_bit(id, &valid_links,
+ IEEE80211_MLD_MAX_NUM_LINKS) {
+ struct mt7996_sta_link *msta_link;
+
+ msta_link = rcu_dereference(msta->link[id]);
+ if (!msta_link)
+ continue;
+
+ mt76_wcid_add_poll(&dev->mt76,
+ &msta_link->wcid);
+ }
next:
/* ver 7 has a new DW with pair = 1, skip it */
if (ver == 7 && ((void *)(cur_info + 1) < end) &&
@@ -1568,6 +1671,363 @@ void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
}
}
+static struct mt7996_msdu_page *
+mt7996_msdu_page_get_from_cache(struct mt7996_dev *dev)
+{
+ struct mt7996_msdu_page *p = NULL;
+
+ spin_lock(&dev->wed_rro.lock);
+
+ if (!list_empty(&dev->wed_rro.page_cache)) {
+ p = list_first_entry(&dev->wed_rro.page_cache,
+ struct mt7996_msdu_page, list);
+ if (p)
+ list_del(&p->list);
+ }
+
+ spin_unlock(&dev->wed_rro.lock);
+
+ return p;
+}
+
+static struct mt7996_msdu_page *mt7996_msdu_page_get(struct mt7996_dev *dev)
+{
+ struct mt7996_msdu_page *p;
+
+ p = mt7996_msdu_page_get_from_cache(dev);
+ if (!p) {
+ p = kzalloc(L1_CACHE_ALIGN(sizeof(*p)), GFP_ATOMIC);
+ if (p)
+ INIT_LIST_HEAD(&p->list);
+ }
+
+ return p;
+}
+
+static void mt7996_msdu_page_put_to_cache(struct mt7996_dev *dev,
+ struct mt7996_msdu_page *p)
+{
+ if (p->buf) {
+ mt76_put_page_pool_buf(p->buf, false);
+ p->buf = NULL;
+ }
+
+ spin_lock(&dev->wed_rro.lock);
+ list_add(&p->list, &dev->wed_rro.page_cache);
+ spin_unlock(&dev->wed_rro.lock);
+}
+
+static void mt7996_msdu_page_free_cache(struct mt7996_dev *dev)
+{
+ while (true) {
+ struct mt7996_msdu_page *p;
+
+ p = mt7996_msdu_page_get_from_cache(dev);
+ if (!p)
+ break;
+
+ if (p->buf)
+ mt76_put_page_pool_buf(p->buf, false);
+
+ kfree(p);
+ }
+}
+
+static u32 mt7996_msdu_page_hash_from_addr(dma_addr_t dma_addr)
+{
+ u32 val = 0;
+ int i = 0;
+
+ while (dma_addr) {
+ val += (u32)((dma_addr & 0xff) + i) % MT7996_RRO_MSDU_PG_HASH_SIZE;
+ dma_addr >>= 8;
+ i += 13;
+ }
+
+ return val % MT7996_RRO_MSDU_PG_HASH_SIZE;
+}
+
+static struct mt7996_msdu_page *
+mt7996_rro_msdu_page_get(struct mt7996_dev *dev, dma_addr_t dma_addr)
+{
+ u32 hash = mt7996_msdu_page_hash_from_addr(dma_addr);
+ struct mt7996_msdu_page *p, *tmp, *addr = NULL;
+
+ spin_lock(&dev->wed_rro.lock);
+
+ list_for_each_entry_safe(p, tmp, &dev->wed_rro.page_map[hash],
+ list) {
+ if (p->dma_addr == dma_addr) {
+ list_del(&p->list);
+ addr = p;
+ break;
+ }
+ }
+
+ spin_unlock(&dev->wed_rro.lock);
+
+ return addr;
+}
+
+static void mt7996_rx_token_put(struct mt7996_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < dev->mt76.rx_token_size; i++) {
+ struct mt76_txwi_cache *t;
+
+ t = mt76_rx_token_release(&dev->mt76, i);
+ if (!t || !t->ptr)
+ continue;
+
+ mt76_put_page_pool_buf(t->ptr, false);
+ t->dma_addr = 0;
+ t->ptr = NULL;
+
+ mt76_put_rxwi(&dev->mt76, t);
+ }
+}
+
+void mt7996_rro_msdu_page_map_free(struct mt7996_dev *dev)
+{
+ struct mt7996_msdu_page *p, *tmp;
+ int i;
+
+ local_bh_disable();
+
+ for (i = 0; i < ARRAY_SIZE(dev->wed_rro.page_map); i++) {
+ list_for_each_entry_safe(p, tmp, &dev->wed_rro.page_map[i],
+ list) {
+ list_del_init(&p->list);
+ if (p->buf)
+ mt76_put_page_pool_buf(p->buf, false);
+ kfree(p);
+ }
+ }
+ mt7996_msdu_page_free_cache(dev);
+
+ local_bh_enable();
+
+ mt7996_rx_token_put(dev);
+}
+
+int mt7996_rro_msdu_page_add(struct mt76_dev *mdev, struct mt76_queue *q,
+ dma_addr_t dma_addr, void *data)
+{
+ struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
+ struct mt7996_msdu_page_info *pinfo = data;
+ struct mt7996_msdu_page *p;
+ u32 hash;
+
+ pinfo->data |= cpu_to_le32(FIELD_PREP(MSDU_PAGE_INFO_OWNER_MASK, 1));
+ p = mt7996_msdu_page_get(dev);
+ if (!p)
+ return -ENOMEM;
+
+ p->buf = data;
+ p->dma_addr = dma_addr;
+ p->q = q;
+
+ hash = mt7996_msdu_page_hash_from_addr(dma_addr);
+
+ spin_lock(&dev->wed_rro.lock);
+ list_add_tail(&p->list, &dev->wed_rro.page_map[hash]);
+ spin_unlock(&dev->wed_rro.lock);
+
+ return 0;
+}
+
+static struct mt7996_wed_rro_addr *
+mt7996_rro_addr_elem_get(struct mt7996_dev *dev, u16 session_id, u16 seq_num)
+{
+ u32 idx = 0;
+ void *addr;
+
+ if (session_id == MT7996_RRO_MAX_SESSION) {
+ addr = dev->wed_rro.session.ptr;
+ } else {
+ idx = session_id / MT7996_RRO_BA_BITMAP_SESSION_SIZE;
+ addr = dev->wed_rro.addr_elem[idx].ptr;
+
+ idx = session_id % MT7996_RRO_BA_BITMAP_SESSION_SIZE;
+ idx = idx * MT7996_RRO_WINDOW_MAX_LEN;
+ }
+ idx += seq_num % MT7996_RRO_WINDOW_MAX_LEN;
+
+ return addr + idx * sizeof(struct mt7996_wed_rro_addr);
+}
+
+#define MT996_RRO_SN_MASK GENMASK(11, 0)
+
+void mt7996_rro_rx_process(struct mt76_dev *mdev, void *data)
+{
+ struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
+ struct mt76_wed_rro_ind *cmd = (struct mt76_wed_rro_ind *)data;
+ u32 cmd_data0 = le32_to_cpu(cmd->data0);
+ u32 cmd_data1 = le32_to_cpu(cmd->data1);
+ u8 ind_reason = FIELD_GET(RRO_IND_DATA0_IND_REASON_MASK, cmd_data0);
+ u16 start_seq = FIELD_GET(RRO_IND_DATA0_START_SEQ_MASK, cmd_data0);
+ u16 seq_id = FIELD_GET(RRO_IND_DATA0_SEQ_ID_MASK, cmd_data0);
+ u16 ind_count = FIELD_GET(RRO_IND_DATA1_IND_COUNT_MASK, cmd_data1);
+ struct mt7996_msdu_page_info *pinfo = NULL;
+ struct mt7996_msdu_page *p = NULL;
+ int i, seq_num = 0;
+
+ for (i = 0; i < ind_count; i++) {
+ struct mt7996_wed_rro_addr *e;
+ struct mt76_rx_status *status;
+ struct mt7996_rro_hif *rxd;
+ int j, len, qid, data_len;
+ struct mt76_txwi_cache *t;
+ dma_addr_t dma_addr = 0;
+ u16 rx_token_id, count;
+ struct mt76_queue *q;
+ struct sk_buff *skb;
+ u32 info = 0, data;
+ u8 signature;
+ void *buf;
+ bool ls;
+
+ seq_num = FIELD_GET(MT996_RRO_SN_MASK, start_seq + i);
+ e = mt7996_rro_addr_elem_get(dev, seq_id, seq_num);
+ data = le32_to_cpu(e->data);
+ signature = FIELD_GET(WED_RRO_ADDR_SIGNATURE_MASK, data);
+ if (signature != (seq_num / MT7996_RRO_WINDOW_MAX_LEN)) {
+ u32 val = FIELD_PREP(WED_RRO_ADDR_SIGNATURE_MASK,
+ 0xff);
+
+ e->data |= cpu_to_le32(val);
+ goto update_ack_seq_num;
+ }
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ dma_addr = FIELD_GET(WED_RRO_ADDR_HEAD_HIGH_MASK, data);
+ dma_addr <<= 32;
+#endif
+ dma_addr |= le32_to_cpu(e->head_low);
+
+ count = FIELD_GET(WED_RRO_ADDR_COUNT_MASK, data);
+ for (j = 0; j < count; j++) {
+ if (!p) {
+ p = mt7996_rro_msdu_page_get(dev, dma_addr);
+ if (!p)
+ continue;
+
+ dma_sync_single_for_cpu(mdev->dma_dev, p->dma_addr,
+ SKB_WITH_OVERHEAD(p->q->buf_size),
+ page_pool_get_dma_dir(p->q->page_pool));
+ pinfo = (struct mt7996_msdu_page_info *)p->buf;
+ }
+
+ rxd = &pinfo->rxd[j % MT7996_MAX_HIF_RXD_IN_PG];
+ len = FIELD_GET(RRO_HIF_DATA1_SDL_MASK,
+ le32_to_cpu(rxd->data1));
+
+ rx_token_id = FIELD_GET(RRO_HIF_DATA4_RX_TOKEN_ID_MASK,
+ le32_to_cpu(rxd->data4));
+ t = mt76_rx_token_release(mdev, rx_token_id);
+ if (!t)
+ goto next_page;
+
+ qid = t->qid;
+ buf = t->ptr;
+ q = &mdev->q_rx[qid];
+ dma_sync_single_for_cpu(mdev->dma_dev, t->dma_addr,
+ SKB_WITH_OVERHEAD(q->buf_size),
+ page_pool_get_dma_dir(q->page_pool));
+
+ t->dma_addr = 0;
+ t->ptr = NULL;
+ mt76_put_rxwi(mdev, t);
+ if (!buf)
+ goto next_page;
+
+ if (q->rx_head)
+ data_len = q->buf_size;
+ else
+ data_len = SKB_WITH_OVERHEAD(q->buf_size);
+
+ if (data_len < len + q->buf_offset) {
+ dev_kfree_skb(q->rx_head);
+ mt76_put_page_pool_buf(buf, false);
+ q->rx_head = NULL;
+ goto next_page;
+ }
+
+ ls = FIELD_GET(RRO_HIF_DATA1_LS_MASK,
+ le32_to_cpu(rxd->data1));
+ if (q->rx_head) {
+ /* TODO: Take into account non-linear skb. */
+ mt76_put_page_pool_buf(buf, false);
+ if (ls) {
+ dev_kfree_skb(q->rx_head);
+ q->rx_head = NULL;
+ }
+ goto next_page;
+ }
+
+ if (ls && !mt7996_rx_check(mdev, buf, len))
+ goto next_page;
+
+ skb = build_skb(buf, q->buf_size);
+ if (!skb)
+ goto next_page;
+
+ skb_reserve(skb, q->buf_offset);
+ skb_mark_for_recycle(skb);
+ __skb_put(skb, len);
+
+ if (ind_reason == 1 || ind_reason == 2) {
+ dev_kfree_skb(skb);
+ goto next_page;
+ }
+
+ if (!ls) {
+ q->rx_head = skb;
+ goto next_page;
+ }
+
+ status = (struct mt76_rx_status *)skb->cb;
+ if (seq_id != MT7996_RRO_MAX_SESSION)
+ status->aggr = true;
+
+ mt7996_queue_rx_skb(mdev, qid, skb, &info);
+next_page:
+ if ((j + 1) % MT7996_MAX_HIF_RXD_IN_PG == 0) {
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ dma_addr =
+ FIELD_GET(MSDU_PAGE_INFO_PG_HIGH_MASK,
+ le32_to_cpu(pinfo->data));
+ dma_addr <<= 32;
+ dma_addr |= le32_to_cpu(pinfo->pg_low);
+#else
+ dma_addr = le32_to_cpu(pinfo->pg_low);
+#endif
+ mt7996_msdu_page_put_to_cache(dev, p);
+ p = NULL;
+ }
+ }
+
+update_ack_seq_num:
+ if ((i + 1) % 4 == 0)
+ mt76_wr(dev, MT_RRO_ACK_SN_CTRL,
+ FIELD_PREP(MT_RRO_ACK_SN_CTRL_SESSION_MASK,
+ seq_id) |
+ FIELD_PREP(MT_RRO_ACK_SN_CTRL_SN_MASK,
+ seq_num));
+ if (p) {
+ mt7996_msdu_page_put_to_cache(dev, p);
+ p = NULL;
+ }
+ }
+
+ /* Update ack_seq_num for remaining addr_elem */
+ if (i % 4)
+ mt76_wr(dev, MT_RRO_ACK_SN_CTRL,
+ FIELD_PREP(MT_RRO_ACK_SN_CTRL_SESSION_MASK, seq_id) |
+ FIELD_PREP(MT_RRO_ACK_SN_CTRL_SN_MASK, seq_num));
+}
+
void mt7996_mac_cca_stats_reset(struct mt7996_phy *phy)
{
struct mt7996_dev *dev = phy->dev;
@@ -1694,17 +2154,38 @@ mt7996_wait_reset_state(struct mt7996_dev *dev, u32 state)
static void
mt7996_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif)
{
- struct ieee80211_hw *hw = priv;
+ struct ieee80211_bss_conf *link_conf;
+ struct mt7996_phy *phy = priv;
+ struct mt7996_dev *dev = phy->dev;
+ unsigned int link_id;
+
switch (vif->type) {
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_AP:
- mt7996_mcu_add_beacon(hw, vif, &vif->bss_conf);
break;
default:
- break;
+ return;
}
+
+ for_each_vif_active_link(vif, link_conf, link_id) {
+ struct mt7996_vif_link *link;
+
+ link = mt7996_vif_link(dev, vif, link_id);
+ if (!link || link->phy != phy)
+ continue;
+
+ mt7996_mcu_add_beacon(dev->mt76.hw, vif, link_conf,
+ link_conf->enable_beacon);
+ }
+}
+
+void mt7996_mac_update_beacons(struct mt7996_phy *phy)
+{
+ ieee80211_iterate_active_interfaces(phy->mt76->hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7996_update_vif_beacon, phy);
}
static void
@@ -1712,25 +2193,15 @@ mt7996_update_beacons(struct mt7996_dev *dev)
{
struct mt76_phy *phy2, *phy3;
- ieee80211_iterate_active_interfaces(dev->mt76.hw,
- IEEE80211_IFACE_ITER_RESUME_ALL,
- mt7996_update_vif_beacon, dev->mt76.hw);
+ mt7996_mac_update_beacons(&dev->phy);
phy2 = dev->mt76.phys[MT_BAND1];
- if (!phy2)
- return;
-
- ieee80211_iterate_active_interfaces(phy2->hw,
- IEEE80211_IFACE_ITER_RESUME_ALL,
- mt7996_update_vif_beacon, phy2->hw);
+ if (phy2)
+ mt7996_mac_update_beacons(phy2->priv);
phy3 = dev->mt76.phys[MT_BAND2];
- if (!phy3)
- return;
-
- ieee80211_iterate_active_interfaces(phy3->hw,
- IEEE80211_IFACE_ITER_RESUME_ALL,
- mt7996_update_vif_beacon, phy3->hw);
+ if (phy3)
+ mt7996_mac_update_beacons(phy3->priv);
}
void mt7996_tx_token_put(struct mt7996_dev *dev)
@@ -1750,13 +2221,10 @@ void mt7996_tx_token_put(struct mt7996_dev *dev)
static int
mt7996_mac_restart(struct mt7996_dev *dev)
{
- struct mt7996_phy *phy2, *phy3;
struct mt76_dev *mdev = &dev->mt76;
+ struct mt7996_phy *phy;
int i, ret;
- phy2 = mt7996_phy2(dev);
- phy3 = mt7996_phy3(dev);
-
if (dev->hif2) {
mt76_wr(dev, MT_INT1_MASK_CSR, 0x0);
mt76_wr(dev, MT_INT1_SOURCE_CSR, ~0);
@@ -1768,20 +2236,14 @@ mt7996_mac_restart(struct mt7996_dev *dev)
mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0x0);
}
- set_bit(MT76_RESET, &dev->mphy.state);
set_bit(MT76_MCU_RESET, &dev->mphy.state);
+ mt7996_for_each_phy(dev, phy)
+ set_bit(MT76_RESET, &phy->mt76->state);
wake_up(&dev->mt76.mcu.wait);
- if (phy2)
- set_bit(MT76_RESET, &phy2->mt76->state);
- if (phy3)
- set_bit(MT76_RESET, &phy3->mt76->state);
/* lock/unlock all queues to ensure that no tx is pending */
- mt76_txq_schedule_all(&dev->mphy);
- if (phy2)
- mt76_txq_schedule_all(phy2->mt76);
- if (phy3)
- mt76_txq_schedule_all(phy3->mt76);
+ mt7996_for_each_phy(dev, phy)
+ mt76_txq_schedule_all(phy->mt76);
/* disable all tx/rx napi */
mt76_worker_disable(&dev->mt76.tx_worker);
@@ -1833,42 +2295,57 @@ mt7996_mac_restart(struct mt7996_dev *dev)
if (ret)
goto out;
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
+ mt7996_has_hwrro(dev)) {
+ u32 wed_irq_mask = dev->mt76.mmio.irqmask |
+ MT_INT_TX_DONE_BAND2;
+
+ mt7996_rro_hw_init(dev);
+ mt76_for_each_q_rx(&dev->mt76, i) {
+ if (mt76_queue_is_wed_rro_ind(&dev->mt76.q_rx[i]) ||
+ mt76_queue_is_wed_rro_msdu_pg(&dev->mt76.q_rx[i]))
+ mt76_queue_rx_reset(dev, i);
+ }
+
+ mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask);
+ mtk_wed_device_start_hw_rro(&dev->mt76.mmio.wed, wed_irq_mask,
+ false);
+ mt7996_irq_enable(dev, wed_irq_mask);
+ mt7996_irq_disable(dev, 0);
+ }
+
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2)) {
+ mt76_wr(dev, MT_INT_PCIE1_MASK_CSR,
+ MT_INT_TX_RX_DONE_EXT);
+ mtk_wed_device_start(&dev->mt76.mmio.wed_hif2,
+ MT_INT_TX_RX_DONE_EXT);
+ }
+
/* set the necessary init items */
ret = mt7996_mcu_set_eeprom(dev);
if (ret)
goto out;
mt7996_mac_init(dev);
- mt7996_init_txpower(&dev->phy);
- mt7996_init_txpower(phy2);
- mt7996_init_txpower(phy3);
+ mt7996_for_each_phy(dev, phy)
+ mt7996_init_txpower(phy);
ret = mt7996_txbf_init(dev);
+ if (ret)
+ goto out;
- if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state)) {
- ret = mt7996_run(&dev->phy);
- if (ret)
- goto out;
- }
-
- if (phy2 && test_bit(MT76_STATE_RUNNING, &phy2->mt76->state)) {
- ret = mt7996_run(phy2);
- if (ret)
- goto out;
- }
+ mt7996_for_each_phy(dev, phy) {
+ if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
+ continue;
- if (phy3 && test_bit(MT76_STATE_RUNNING, &phy3->mt76->state)) {
- ret = mt7996_run(phy3);
+ ret = mt7996_run(&dev->phy);
if (ret)
goto out;
}
out:
/* reset done */
- clear_bit(MT76_RESET, &dev->mphy.state);
- if (phy2)
- clear_bit(MT76_RESET, &phy2->mt76->state);
- if (phy3)
- clear_bit(MT76_RESET, &phy3->mt76->state);
+ mt7996_for_each_phy(dev, phy)
+ clear_bit(MT76_RESET, &phy->mt76->state);
napi_enable(&dev->mt76.tx_napi);
local_bh_disable();
@@ -1880,74 +2357,121 @@ out:
}
static void
+mt7996_mac_reset_sta_iter(void *data, struct ieee80211_sta *sta)
+{
+ struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ struct mt7996_dev *dev = data;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(msta->link); i++) {
+ struct mt7996_sta_link *msta_link = NULL;
+
+ msta_link = rcu_replace_pointer(msta->link[i], msta_link,
+ lockdep_is_held(&dev->mt76.mutex));
+ if (!msta_link)
+ continue;
+
+ mt7996_mac_sta_deinit_link(dev, msta_link);
+
+ if (msta->deflink_id == i) {
+ msta->deflink_id = IEEE80211_LINK_UNSPECIFIED;
+ continue;
+ }
+
+ kfree_rcu(msta_link, rcu_head);
+ }
+}
+
+static void
+mt7996_mac_reset_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv;
+ struct mt76_vif_data *mvif = mlink->mvif;
+ struct mt7996_dev *dev = data;
+ int i;
+
+ rcu_read_lock();
+ for (i = 0; i < ARRAY_SIZE(mvif->link); i++) {
+
+ mlink = mt76_dereference(mvif->link[i], &dev->mt76);
+ if (!mlink || mlink == (struct mt76_vif_link *)vif->drv_priv)
+ continue;
+
+ rcu_assign_pointer(mvif->link[i], NULL);
+ kfree_rcu(mlink, rcu_head);
+ }
+ rcu_read_unlock();
+}
+
+static void
mt7996_mac_full_reset(struct mt7996_dev *dev)
{
- struct mt7996_phy *phy2, *phy3;
+ struct ieee80211_hw *hw = mt76_hw(dev);
+ struct mt7996_phy *phy;
+ LIST_HEAD(list);
int i;
- phy2 = mt7996_phy2(dev);
- phy3 = mt7996_phy3(dev);
dev->recovery.hw_full_reset = true;
wake_up(&dev->mt76.mcu.wait);
- ieee80211_stop_queues(mt76_hw(dev));
- if (phy2)
- ieee80211_stop_queues(phy2->mt76->hw);
- if (phy3)
- ieee80211_stop_queues(phy3->mt76->hw);
+ ieee80211_stop_queues(hw);
cancel_work_sync(&dev->wed_rro.work);
- cancel_delayed_work_sync(&dev->mphy.mac_work);
- if (phy2)
- cancel_delayed_work_sync(&phy2->mt76->mac_work);
- if (phy3)
- cancel_delayed_work_sync(&phy3->mt76->mac_work);
+ mt7996_for_each_phy(dev, phy)
+ cancel_delayed_work_sync(&phy->mt76->mac_work);
mutex_lock(&dev->mt76.mutex);
for (i = 0; i < 10; i++) {
if (!mt7996_mac_restart(dev))
break;
}
- mutex_unlock(&dev->mt76.mutex);
if (i == 10)
dev_err(dev->mt76.dev, "chip full reset failed\n");
- ieee80211_restart_hw(mt76_hw(dev));
- if (phy2)
- ieee80211_restart_hw(phy2->mt76->hw);
- if (phy3)
- ieee80211_restart_hw(phy3->mt76->hw);
+ mt7996_for_each_phy(dev, phy)
+ phy->omac_mask = 0;
- ieee80211_wake_queues(mt76_hw(dev));
- if (phy2)
- ieee80211_wake_queues(phy2->mt76->hw);
- if (phy3)
- ieee80211_wake_queues(phy3->mt76->hw);
+ ieee80211_iterate_stations_atomic(hw, mt7996_mac_reset_sta_iter, dev);
+ ieee80211_iterate_active_interfaces_atomic(hw,
+ IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER,
+ mt7996_mac_reset_vif_iter, dev);
+ mt76_reset_device(&dev->mt76);
+
+ INIT_LIST_HEAD(&dev->sta_rc_list);
+ INIT_LIST_HEAD(&dev->twt_list);
+ spin_lock_bh(&dev->wed_rro.lock);
+ list_splice_init(&dev->wed_rro.poll_list, &list);
+ spin_unlock_bh(&dev->wed_rro.lock);
+
+ while (!list_empty(&list)) {
+ struct mt7996_wed_rro_session_id *e;
+
+ e = list_first_entry(&list, struct mt7996_wed_rro_session_id,
+ list);
+ list_del_init(&e->list);
+ kfree(e);
+ }
+
+ i = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7996_WTBL_STA);
+ dev->mt76.global_wcid.idx = i;
dev->recovery.hw_full_reset = false;
- ieee80211_queue_delayed_work(mt76_hw(dev),
- &dev->mphy.mac_work,
- MT7996_WATCHDOG_TIME);
- if (phy2)
- ieee80211_queue_delayed_work(phy2->mt76->hw,
- &phy2->mt76->mac_work,
- MT7996_WATCHDOG_TIME);
- if (phy3)
- ieee80211_queue_delayed_work(phy3->mt76->hw,
- &phy3->mt76->mac_work,
- MT7996_WATCHDOG_TIME);
+
+ mutex_unlock(&dev->mt76.mutex);
+
+ ieee80211_restart_hw(mt76_hw(dev));
}
void mt7996_mac_reset_work(struct work_struct *work)
{
- struct mt7996_phy *phy2, *phy3;
+ struct ieee80211_hw *hw;
struct mt7996_dev *dev;
+ struct mt7996_phy *phy;
int i;
dev = container_of(work, struct mt7996_dev, reset_work);
- phy2 = mt7996_phy2(dev);
- phy3 = mt7996_phy3(dev);
+ hw = mt76_hw(dev);
/* chip full reset */
if (dev->recovery.restart) {
@@ -1978,7 +2502,7 @@ void mt7996_mac_reset_work(struct work_struct *work)
return;
dev_info(dev->mt76.dev,"\n%s L1 SER recovery start.",
- wiphy_name(dev->mt76.hw->wiphy));
+ wiphy_name(hw->wiphy));
if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2))
mtk_wed_device_stop(&dev->mt76.mmio.wed_hif2);
@@ -1987,25 +2511,19 @@ void mt7996_mac_reset_work(struct work_struct *work)
mtk_wed_device_stop(&dev->mt76.mmio.wed);
ieee80211_stop_queues(mt76_hw(dev));
- if (phy2)
- ieee80211_stop_queues(phy2->mt76->hw);
- if (phy3)
- ieee80211_stop_queues(phy3->mt76->hw);
set_bit(MT76_RESET, &dev->mphy.state);
set_bit(MT76_MCU_RESET, &dev->mphy.state);
+ mt76_abort_scan(&dev->mt76);
wake_up(&dev->mt76.mcu.wait);
cancel_work_sync(&dev->wed_rro.work);
- cancel_delayed_work_sync(&dev->mphy.mac_work);
- if (phy2) {
- set_bit(MT76_RESET, &phy2->mt76->state);
- cancel_delayed_work_sync(&phy2->mt76->mac_work);
- }
- if (phy3) {
- set_bit(MT76_RESET, &phy3->mt76->state);
- cancel_delayed_work_sync(&phy3->mt76->mac_work);
+ mt7996_for_each_phy(dev, phy) {
+ mt76_abort_roc(phy->mt76);
+ set_bit(MT76_RESET, &phy->mt76->state);
+ cancel_delayed_work_sync(&phy->mt76->mac_work);
}
+
mt76_worker_disable(&dev->mt76.tx_worker);
mt76_for_each_q_rx(&dev->mt76, i) {
if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
@@ -2036,15 +2554,14 @@ void mt7996_mac_reset_work(struct work_struct *work)
/* enable DMA Tx/Tx and interrupt */
mt7996_dma_start(dev, false, false);
+ if (!is_mt7996(&dev->mt76) && dev->mt76.hwrro_mode == MT76_HWRRO_V3)
+ mt76_wr(dev, MT_RRO_3_0_EMU_CONF, MT_RRO_3_0_EMU_CONF_EN_MASK);
+
if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
- u32 wed_irq_mask = MT_INT_RRO_RX_DONE | MT_INT_TX_DONE_BAND2 |
+ u32 wed_irq_mask = MT_INT_TX_DONE_BAND2 |
dev->mt76.mmio.irqmask;
- if (mtk_wed_get_rx_capa(&dev->mt76.mmio.wed))
- wed_irq_mask &= ~MT_INT_RX_DONE_RRO_IND;
-
mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask);
-
mtk_wed_device_start_hw_rro(&dev->mt76.mmio.wed, wed_irq_mask,
true);
mt7996_irq_enable(dev, wed_irq_mask);
@@ -2058,11 +2575,8 @@ void mt7996_mac_reset_work(struct work_struct *work)
}
clear_bit(MT76_MCU_RESET, &dev->mphy.state);
- clear_bit(MT76_RESET, &dev->mphy.state);
- if (phy2)
- clear_bit(MT76_RESET, &phy2->mt76->state);
- if (phy3)
- clear_bit(MT76_RESET, &phy3->mt76->state);
+ mt7996_for_each_phy(dev, phy)
+ clear_bit(MT76_RESET, &phy->mt76->state);
mt76_for_each_q_rx(&dev->mt76, i) {
if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
@@ -2084,25 +2598,14 @@ void mt7996_mac_reset_work(struct work_struct *work)
napi_schedule(&dev->mt76.tx_napi);
local_bh_enable();
- ieee80211_wake_queues(mt76_hw(dev));
- if (phy2)
- ieee80211_wake_queues(phy2->mt76->hw);
- if (phy3)
- ieee80211_wake_queues(phy3->mt76->hw);
+ ieee80211_wake_queues(hw);
mutex_unlock(&dev->mt76.mutex);
mt7996_update_beacons(dev);
- ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work,
- MT7996_WATCHDOG_TIME);
- if (phy2)
- ieee80211_queue_delayed_work(phy2->mt76->hw,
- &phy2->mt76->mac_work,
- MT7996_WATCHDOG_TIME);
- if (phy3)
- ieee80211_queue_delayed_work(phy3->mt76->hw,
- &phy3->mt76->mac_work,
+ mt7996_for_each_phy(dev, phy)
+ ieee80211_queue_delayed_work(hw, &phy->mt76->mac_work,
MT7996_WATCHDOG_TIME);
dev_info(dev->mt76.dev,"\n%s L1 SER recovery completed.",
wiphy_name(dev->mt76.hw->wiphy));
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/main.c b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
index 92b57bcce749..581314368c5b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
@@ -138,6 +138,28 @@ static int get_omac_idx(enum nl80211_iftype type, u64 mask)
return -1;
}
+static int get_own_mld_idx(u64 mask, bool group_mld)
+{
+ u8 start = group_mld ? 0 : 16;
+ u8 end = group_mld ? 15 : 63;
+ int idx;
+
+ idx = get_free_idx(mask, start, end);
+ if (idx)
+ return idx - 1;
+
+ /* If the 16-63 range is not available, perform another lookup in the
+ * range 0-15
+ */
+ if (!group_mld) {
+ idx = get_free_idx(mask, 0, 15);
+ if (idx)
+ return idx - 1;
+ }
+
+ return -EINVAL;
+}
+
static void
mt7996_init_bitrate_mask(struct ieee80211_vif *vif, struct mt7996_vif_link *mlink)
{
@@ -160,112 +182,105 @@ mt7996_init_bitrate_mask(struct ieee80211_vif *vif, struct mt7996_vif_link *mlin
static int
mt7996_set_hw_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
- struct ieee80211_key_conf *key)
+ unsigned int link_id, struct ieee80211_key_conf *key)
{
struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ struct ieee80211_bss_conf *link_conf;
+ struct mt7996_sta_link *msta_link;
+ struct mt7996_vif_link *link;
int idx = key->keyidx;
- unsigned int link_id;
- unsigned long links;
+ u8 *wcid_keyidx;
+ bool is_bigtk;
+ int err;
- if (key->link_id >= 0)
- links = BIT(key->link_id);
- else if (sta && sta->valid_links)
- links = sta->valid_links;
- else if (vif->valid_links)
- links = vif->valid_links;
- else
- links = BIT(0);
+ link = mt7996_vif_link(dev, vif, link_id);
+ if (!link)
+ return 0;
- for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
- struct mt7996_sta_link *msta_link;
- struct mt7996_vif_link *link;
- u8 *wcid_keyidx;
- int err;
+ if (!mt7996_vif_link_phy(link))
+ return 0;
- link = mt7996_vif_link(dev, vif, link_id);
- if (!link)
- continue;
+ if (sta) {
+ struct mt7996_sta *msta;
- if (sta) {
- struct mt7996_sta *msta;
+ msta = (struct mt7996_sta *)sta->drv_priv;
+ msta_link = mt76_dereference(msta->link[link_id],
+ &dev->mt76);
+ if (!msta_link)
+ return 0;
- msta = (struct mt7996_sta *)sta->drv_priv;
- msta_link = mt76_dereference(msta->link[link_id],
- &dev->mt76);
- if (!msta_link)
- continue;
+ if (!msta_link->wcid.sta)
+ return -EOPNOTSUPP;
+ } else {
+ msta_link = &link->msta_link;
+ }
+ wcid_keyidx = &msta_link->wcid.hw_key_idx;
- if (!msta_link->wcid.sta)
- return -EOPNOTSUPP;
- } else {
- msta_link = &link->msta_link;
- }
- wcid_keyidx = &msta_link->wcid.hw_key_idx;
-
- switch (key->cipher) {
- case WLAN_CIPHER_SUITE_AES_CMAC:
- case WLAN_CIPHER_SUITE_BIP_CMAC_256:
- case WLAN_CIPHER_SUITE_BIP_GMAC_128:
- case WLAN_CIPHER_SUITE_BIP_GMAC_256:
- if (key->keyidx == 6 || key->keyidx == 7) {
- wcid_keyidx = &msta_link->wcid.hw_key_idx2;
- key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE;
- }
- break;
- default:
- break;
+ is_bigtk = key->keyidx == 6 || key->keyidx == 7;
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ if (is_bigtk) {
+ wcid_keyidx = &msta_link->wcid.hw_key_idx2;
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE;
}
+ break;
+ default:
+ break;
+ }
- if (cmd == SET_KEY && !sta && !link->mt76.cipher) {
- struct ieee80211_bss_conf *link_conf;
+ link_conf = link_conf_dereference_protected(vif, link_id);
+ if (!link_conf)
+ link_conf = &vif->bss_conf;
- link_conf = link_conf_dereference_protected(vif,
- link_id);
- if (!link_conf)
- link_conf = &vif->bss_conf;
+ if (cmd == SET_KEY && !sta && !link->mt76.cipher) {
+ link->mt76.cipher =
+ mt76_connac_mcu_get_cipher(key->cipher);
+ mt7996_mcu_add_bss_info(link->phy, vif, link_conf,
+ &link->mt76, msta_link, true);
+ }
- link->mt76.cipher =
- mt76_connac_mcu_get_cipher(key->cipher);
- mt7996_mcu_add_bss_info(link->phy, vif, link_conf,
- &link->mt76, msta_link, true);
- }
+ if (cmd == SET_KEY)
+ *wcid_keyidx = idx;
+ else if (idx == *wcid_keyidx)
+ *wcid_keyidx = -1;
- if (cmd == SET_KEY) {
- *wcid_keyidx = idx;
- } else {
- if (idx == *wcid_keyidx)
- *wcid_keyidx = -1;
- continue;
- }
+ if (cmd != SET_KEY && sta)
+ return 0;
- mt76_wcid_key_setup(&dev->mt76, &msta_link->wcid, key);
+ mt76_wcid_key_setup(&dev->mt76, &msta_link->wcid, key);
- if (key->keyidx == 6 || key->keyidx == 7) {
- err = mt7996_mcu_bcn_prot_enable(dev, link,
- msta_link, key);
- if (err)
- return err;
- }
+ err = mt7996_mcu_add_key(&dev->mt76, vif, key,
+ MCU_WMWA_UNI_CMD(STA_REC_UPDATE),
+ &msta_link->wcid, cmd);
- err = mt7996_mcu_add_key(&dev->mt76, vif, key,
- MCU_WMWA_UNI_CMD(STA_REC_UPDATE),
- &msta_link->wcid, cmd);
- if (err)
- return err;
+ /* remove and add beacon in order to enable beacon protection */
+ if (cmd == SET_KEY && is_bigtk && link_conf->enable_beacon) {
+ mt7996_mcu_add_beacon(hw, vif, link_conf, false);
+ mt7996_mcu_add_beacon(hw, vif, link_conf, true);
}
- return 0;
+ return err;
}
+struct mt7996_key_iter_data {
+ enum set_key_cmd cmd;
+ unsigned int link_id;
+};
+
static void
mt7996_key_iter(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct ieee80211_key_conf *key,
void *data)
{
+ struct mt7996_key_iter_data *it = data;
+
if (sta)
return;
- WARN_ON(mt7996_set_hw_key(hw, SET_KEY, vif, NULL, key));
+ WARN_ON(mt7996_set_hw_key(hw, it->cmd, vif, NULL, it->link_id, key));
}
int mt7996_vif_link_add(struct mt76_phy *mphy, struct ieee80211_vif *vif,
@@ -278,8 +293,12 @@ int mt7996_vif_link_add(struct mt76_phy *mphy, struct ieee80211_vif *vif,
struct mt7996_phy *phy = mphy->priv;
struct mt7996_dev *dev = phy->dev;
u8 band_idx = phy->mt76->band_idx;
+ struct mt7996_key_iter_data it = {
+ .cmd = SET_KEY,
+ .link_id = link_conf->link_id,
+ };
struct mt76_txq *mtxq;
- int idx, ret;
+ int mld_idx, idx, ret;
mlink->idx = __ffs64(~dev->mt76.vif_mask);
if (mlink->idx >= mt7996_max_interface_num(dev))
@@ -289,6 +308,17 @@ int mt7996_vif_link_add(struct mt76_phy *mphy, struct ieee80211_vif *vif,
if (idx < 0)
return -ENOSPC;
+ if (!dev->mld_idx_mask) { /* first link in the group */
+ mvif->mld_group_idx = get_own_mld_idx(dev->mld_idx_mask, true);
+ mvif->mld_remap_idx = get_free_idx(dev->mld_remap_idx_mask,
+ 0, 15);
+ }
+
+ mld_idx = get_own_mld_idx(dev->mld_idx_mask, false);
+ if (mld_idx < 0)
+ return -ENOSPC;
+
+ link->mld_idx = mld_idx;
link->phy = phy;
mlink->omac_idx = idx;
mlink->band_idx = band_idx;
@@ -301,6 +331,11 @@ int mt7996_vif_link_add(struct mt76_phy *mphy, struct ieee80211_vif *vif,
return ret;
dev->mt76.vif_mask |= BIT_ULL(mlink->idx);
+ if (!dev->mld_idx_mask) {
+ dev->mld_idx_mask |= BIT_ULL(mvif->mld_group_idx);
+ dev->mld_remap_idx_mask |= BIT_ULL(mvif->mld_remap_idx);
+ }
+ dev->mld_idx_mask |= BIT_ULL(link->mld_idx);
phy->omac_mask |= BIT_ULL(mlink->omac_idx);
idx = MT7996_WTBL_RESERVED - mlink->idx;
@@ -339,7 +374,7 @@ int mt7996_vif_link_add(struct mt76_phy *mphy, struct ieee80211_vif *vif,
CONN_STATE_PORT_SECURE, true);
rcu_assign_pointer(dev->mt76.wcid[idx], &msta_link->wcid);
- ieee80211_iter_keys(mphy->hw, vif, mt7996_key_iter, NULL);
+ ieee80211_iter_keys(mphy->hw, vif, mt7996_key_iter, &it);
if (mvif->mt76.deflink_id == IEEE80211_LINK_UNSPECIFIED)
mvif->mt76.deflink_id = link_conf->link_id;
@@ -356,8 +391,14 @@ void mt7996_vif_link_remove(struct mt76_phy *mphy, struct ieee80211_vif *vif,
struct mt7996_sta_link *msta_link = &link->msta_link;
struct mt7996_phy *phy = mphy->priv;
struct mt7996_dev *dev = phy->dev;
+ struct mt7996_key_iter_data it = {
+ .cmd = SET_KEY,
+ .link_id = link_conf->link_id,
+ };
int idx = msta_link->wcid.idx;
+ ieee80211_iter_keys(mphy->hw, vif, mt7996_key_iter, &it);
+
mt7996_mcu_add_sta(dev, link_conf, NULL, link, NULL,
CONN_STATE_DISCONNECT, false);
mt7996_mcu_add_bss_info(phy, vif, link_conf, mlink, msta_link, false);
@@ -380,7 +421,13 @@ void mt7996_vif_link_remove(struct mt76_phy *mphy, struct ieee80211_vif *vif,
}
dev->mt76.vif_mask &= ~BIT_ULL(mlink->idx);
+ dev->mld_idx_mask &= ~BIT_ULL(link->mld_idx);
phy->omac_mask &= ~BIT_ULL(mlink->omac_idx);
+ if (!(dev->mld_idx_mask & ~BIT_ULL(mvif->mld_group_idx))) {
+ /* last link */
+ dev->mld_idx_mask &= ~BIT_ULL(mvif->mld_group_idx);
+ dev->mld_remap_idx_mask &= ~BIT_ULL(mvif->mld_remap_idx);
+ }
spin_lock_bh(&dev->mt76.sta_poll_lock);
if (!list_empty(&msta_link->wcid.poll_list))
@@ -516,6 +563,9 @@ int mt7996_set_channel(struct mt76_phy *mphy)
struct mt7996_phy *phy = mphy->priv;
int ret;
+ if (mphy->offchannel)
+ mt7996_mac_update_beacons(phy);
+
ret = mt7996_mcu_set_chan_info(phy, UNI_CHANNEL_SWITCH);
if (ret)
goto out;
@@ -533,6 +583,8 @@ int mt7996_set_channel(struct mt76_phy *mphy)
mt7996_mac_reset_counters(phy);
phy->noise = 0;
+ if (!mphy->offchannel)
+ mt7996_mac_update_beacons(phy);
out:
ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
@@ -546,8 +598,9 @@ static int mt7996_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_key_conf *key)
{
struct mt7996_dev *dev = mt7996_hw_dev(hw);
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- int err;
+ unsigned int link_id;
+ unsigned long links;
+ int err = 0;
/* The hardware does not support per-STA RX GTK, fallback
* to software mode for these.
@@ -581,11 +634,22 @@ static int mt7996_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
return -EOPNOTSUPP;
}
- if (!mt7996_vif_link_phy(&mvif->deflink))
- return 0; /* defer until after link add */
-
mutex_lock(&dev->mt76.mutex);
- err = mt7996_set_hw_key(hw, cmd, vif, sta, key);
+
+ if (key->link_id >= 0)
+ links = BIT(key->link_id);
+ else if (sta && sta->valid_links)
+ links = sta->valid_links;
+ else if (vif->valid_links)
+ links = vif->valid_links;
+ else
+ links = BIT(0);
+
+ for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ err = mt7996_set_hw_key(hw, cmd, vif, sta, link_id, key);
+ if (err)
+ break;
+ }
mutex_unlock(&dev->mt76.mutex);
return err;
@@ -845,7 +909,7 @@ mt7996_link_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
link->mt76.beacon_rates_idx =
mt7996_get_rates_table(phy, info, true, false);
- mt7996_mcu_add_beacon(hw, vif, info);
+ mt7996_mcu_add_beacon(hw, vif, info, info->enable_beacon);
}
if (changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP |
@@ -873,7 +937,7 @@ mt7996_channel_switch_beacon(struct ieee80211_hw *hw,
struct mt7996_dev *dev = mt7996_hw_dev(hw);
mutex_lock(&dev->mt76.mutex);
- mt7996_mcu_add_beacon(hw, vif, &vif->bss_conf);
+ mt7996_mcu_add_beacon(hw, vif, &vif->bss_conf, vif->bss_conf.enable_beacon);
mutex_unlock(&dev->mt76.mutex);
}
@@ -920,6 +984,7 @@ mt7996_mac_sta_init_link(struct mt7996_dev *dev,
msta_link->wcid.sta = 1;
msta_link->wcid.idx = idx;
msta_link->wcid.link_id = link_id;
+ msta_link->wcid.def_wcid = &msta->deflink.wcid;
ewma_avg_signal_init(&msta_link->avg_ack_signal);
ewma_signal_init(&msta_link->wcid.rssi);
@@ -936,18 +1001,9 @@ mt7996_mac_sta_init_link(struct mt7996_dev *dev,
return 0;
}
-static void
-mt7996_mac_sta_deinit_link(struct mt7996_dev *dev,
- struct mt7996_sta_link *msta_link)
+void mt7996_mac_sta_deinit_link(struct mt7996_dev *dev,
+ struct mt7996_sta_link *msta_link)
{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(msta_link->wcid.aggr); i++)
- mt76_rx_aggr_stop(&dev->mt76, &msta_link->wcid, i);
-
- mt7996_mac_wtbl_update(dev, msta_link->wcid.idx,
- MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
-
spin_lock_bh(&dev->mt76.sta_poll_lock);
if (!list_empty(&msta_link->wcid.poll_list))
list_del_init(&msta_link->wcid.poll_list);
@@ -977,6 +1033,9 @@ mt7996_mac_sta_remove_links(struct mt7996_dev *dev, struct ieee80211_vif *vif,
if (!msta_link)
continue;
+ mt7996_mac_wtbl_update(dev, msta_link->wcid.idx,
+ MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+
mt7996_mac_sta_deinit_link(dev, msta_link);
link = mt7996_vif_link(dev, vif, link_id);
if (!link)
@@ -1031,16 +1090,17 @@ mt7996_mac_sta_add_links(struct mt7996_dev *dev, struct ieee80211_vif *vif,
goto error_unlink;
}
- err = mt7996_mac_sta_init_link(dev, link_conf, link_sta, link,
- link_id);
- if (err)
- goto error_unlink;
-
mphy = mt76_vif_link_phy(&link->mt76);
if (!mphy) {
err = -EINVAL;
goto error_unlink;
}
+
+ err = mt7996_mac_sta_init_link(dev, link_conf, link_sta, link,
+ link_id);
+ if (err)
+ goto error_unlink;
+
mphy->num_sta++;
}
@@ -1174,6 +1234,24 @@ mt7996_mac_sta_remove(struct mt7996_dev *dev, struct ieee80211_vif *vif,
mutex_unlock(&dev->mt76.mutex);
}
+static void
+mt7996_set_active_links(struct ieee80211_vif *vif)
+{
+ u16 active_links;
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ if (!ieee80211_vif_is_mld(vif))
+ return;
+
+ active_links = mt76_select_links(vif, MT7996_MAX_RADIOS);
+ if (hweight16(active_links) < 2)
+ return;
+
+ ieee80211_set_active_links_async(vif, active_links);
+}
+
static int
mt7996_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, enum ieee80211_sta_state old_state,
@@ -1191,16 +1269,18 @@ mt7996_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mt7996_mac_sta_remove(dev, vif, sta);
if (old_state == IEEE80211_STA_AUTH &&
- new_state == IEEE80211_STA_ASSOC)
+ new_state == IEEE80211_STA_ASSOC) {
+ mt7996_set_active_links(vif);
ev = MT76_STA_EVENT_ASSOC;
- else if (old_state == IEEE80211_STA_ASSOC &&
- new_state == IEEE80211_STA_AUTHORIZED)
+ } else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTHORIZED) {
ev = MT76_STA_EVENT_AUTHORIZE;
- else if (old_state == IEEE80211_STA_ASSOC &&
- new_state == IEEE80211_STA_AUTH)
+ } else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTH) {
ev = MT76_STA_EVENT_DISASSOC;
- else
+ } else {
return 0;
+ }
return mt7996_mac_sta_event(dev, vif, sta, ev);
}
@@ -1209,28 +1289,61 @@ static void mt7996_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
struct sk_buff *skb)
{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ struct ieee80211_sta *sta = control->sta;
+ struct mt7996_sta *msta = sta ? (void *)sta->drv_priv : NULL;
struct mt76_phy *mphy = hw->priv;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_vif *vif = info->control.vif;
+ struct mt7996_vif *mvif = vif ? (void *)vif->drv_priv : NULL;
struct mt76_wcid *wcid = &dev->mt76.global_wcid;
u8 link_id = u32_get_bits(info->control.flags,
IEEE80211_TX_CTRL_MLO_LINK);
rcu_read_lock();
- if (vif) {
- struct mt7996_vif *mvif = (void *)vif->drv_priv;
+ /* Use primary link_id if the value from mac80211 is set to
+ * IEEE80211_LINK_UNSPECIFIED.
+ */
+ if (link_id == IEEE80211_LINK_UNSPECIFIED) {
+ if (msta)
+ link_id = msta->deflink_id;
+ else if (mvif)
+ link_id = mvif->mt76.deflink_id;
+ }
+
+ if (vif && ieee80211_vif_is_mld(vif)) {
+ struct ieee80211_bss_conf *link_conf;
+
+ if (msta) {
+ struct ieee80211_link_sta *link_sta;
+
+ link_sta = rcu_dereference(sta->link[link_id]);
+ if (!link_sta)
+ link_sta = rcu_dereference(sta->link[msta->deflink_id]);
+
+ if (link_sta) {
+ memcpy(hdr->addr1, link_sta->addr, ETH_ALEN);
+ if (ether_addr_equal(sta->addr, hdr->addr3))
+ memcpy(hdr->addr3, link_sta->addr, ETH_ALEN);
+ }
+ }
+
+ link_conf = rcu_dereference(vif->link_conf[link_id]);
+ if (link_conf) {
+ memcpy(hdr->addr2, link_conf->addr, ETH_ALEN);
+ if (ether_addr_equal(vif->addr, hdr->addr3))
+ memcpy(hdr->addr3, link_conf->addr, ETH_ALEN);
+ }
+ }
+
+ if (mvif) {
struct mt76_vif_link *mlink = &mvif->deflink.mt76;
if (link_id < IEEE80211_LINK_UNSPECIFIED)
mlink = rcu_dereference(mvif->mt76.link[link_id]);
- if (!mlink) {
- ieee80211_free_txskb(hw, skb);
- goto unlock;
- }
-
if (mlink->wcid)
wcid = mlink->wcid;
@@ -1249,8 +1362,7 @@ static void mt7996_tx(struct ieee80211_hw *hw,
goto unlock;
}
- if (control->sta && link_id < IEEE80211_LINK_UNSPECIFIED) {
- struct mt7996_sta *msta = (void *)control->sta->drv_priv;
+ if (msta && link_id < IEEE80211_LINK_UNSPECIFIED) {
struct mt7996_sta_link *msta_link;
msta_link = rcu_dereference(msta->link[link_id]);
@@ -1287,16 +1399,13 @@ static int
mt7996_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_ampdu_params *params)
{
- enum ieee80211_ampdu_mlme_action action = params->action;
struct mt7996_dev *dev = mt7996_hw_dev(hw);
struct ieee80211_sta *sta = params->sta;
struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
struct ieee80211_txq *txq = sta->txq[params->tid];
- struct ieee80211_link_sta *link_sta;
u16 tid = params->tid;
u16 ssn = params->ssn;
struct mt76_txq *mtxq;
- unsigned int link_id;
int ret = 0;
if (!txq)
@@ -1306,59 +1415,42 @@ mt7996_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mutex_lock(&dev->mt76.mutex);
- for_each_sta_active_link(vif, sta, link_sta, link_id) {
- struct mt7996_sta_link *msta_link;
- struct mt7996_vif_link *link;
-
- msta_link = mt76_dereference(msta->link[link_id], &dev->mt76);
- if (!msta_link)
- continue;
-
- link = mt7996_vif_link(dev, vif, link_id);
- if (!link)
- continue;
-
- switch (action) {
- case IEEE80211_AMPDU_RX_START:
- mt76_rx_aggr_start(&dev->mt76, &msta_link->wcid, tid,
- ssn, params->buf_size);
- ret = mt7996_mcu_add_rx_ba(dev, params, link, true);
- break;
- case IEEE80211_AMPDU_RX_STOP:
- mt76_rx_aggr_stop(&dev->mt76, &msta_link->wcid, tid);
- ret = mt7996_mcu_add_rx_ba(dev, params, link, false);
- break;
- case IEEE80211_AMPDU_TX_OPERATIONAL:
- mtxq->aggr = true;
- mtxq->send_bar = false;
- ret = mt7996_mcu_add_tx_ba(dev, params, link,
- msta_link, true);
- break;
- case IEEE80211_AMPDU_TX_STOP_FLUSH:
- case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
- mtxq->aggr = false;
- clear_bit(tid, &msta_link->wcid.ampdu_state);
- ret = mt7996_mcu_add_tx_ba(dev, params, link,
- msta_link, false);
- break;
- case IEEE80211_AMPDU_TX_START:
- set_bit(tid, &msta_link->wcid.ampdu_state);
- ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
- break;
- case IEEE80211_AMPDU_TX_STOP_CONT:
- mtxq->aggr = false;
- clear_bit(tid, &msta_link->wcid.ampdu_state);
- ret = mt7996_mcu_add_tx_ba(dev, params, link,
- msta_link, false);
- break;
- }
-
- if (ret)
- break;
- }
-
- if (action == IEEE80211_AMPDU_TX_STOP_CONT)
+ switch (params->action) {
+ case IEEE80211_AMPDU_RX_START:
+ /* Since packets belonging to the same TID can be split over
+ * multiple links, store the AMPDU state for reordering in the
+ * primary link
+ */
+ mt76_rx_aggr_start(&dev->mt76, &msta->deflink.wcid, tid,
+ ssn, params->buf_size);
+ ret = mt7996_mcu_add_rx_ba(dev, params, vif, true);
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ mt76_rx_aggr_stop(&dev->mt76, &msta->deflink.wcid, tid);
+ ret = mt7996_mcu_add_rx_ba(dev, params, vif, false);
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ mtxq->aggr = true;
+ mtxq->send_bar = false;
+ ret = mt7996_mcu_add_tx_ba(dev, params, vif, true);
+ break;
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ mtxq->aggr = false;
+ clear_bit(tid, &msta->deflink.wcid.ampdu_state);
+ ret = mt7996_mcu_add_tx_ba(dev, params, vif, false);
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ set_bit(tid, &msta->deflink.wcid.ampdu_state);
+ ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
+ break;
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ mtxq->aggr = false;
+ clear_bit(tid, &msta->deflink.wcid.ampdu_state);
+ ret = mt7996_mcu_add_tx_ba(dev, params, vif, false);
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ }
mutex_unlock(&dev->mt76.mutex);
@@ -1612,19 +1704,13 @@ static void mt7996_sta_statistics(struct ieee80211_hw *hw,
}
}
-static void mt7996_link_rate_ctrl_update(void *data, struct ieee80211_sta *sta)
+static void mt7996_link_rate_ctrl_update(void *data,
+ struct mt7996_sta_link *msta_link)
{
- struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ struct mt7996_sta *msta = msta_link->sta;
struct mt7996_dev *dev = msta->vif->deflink.phy->dev;
- struct mt7996_sta_link *msta_link;
u32 *changed = data;
- rcu_read_lock();
-
- msta_link = rcu_dereference(msta->link[msta->deflink_id]);
- if (!msta_link)
- goto out;
-
spin_lock_bh(&dev->mt76.sta_poll_lock);
msta_link->changed |= *changed;
@@ -1632,8 +1718,6 @@ static void mt7996_link_rate_ctrl_update(void *data, struct ieee80211_sta *sta)
list_add_tail(&msta_link->rc_list, &dev->sta_rc_list);
spin_unlock_bh(&dev->mt76.sta_poll_lock);
-out:
- rcu_read_unlock();
}
static void mt7996_link_sta_rc_update(struct ieee80211_hw *hw,
@@ -1641,11 +1725,32 @@ static void mt7996_link_sta_rc_update(struct ieee80211_hw *hw,
struct ieee80211_link_sta *link_sta,
u32 changed)
{
- struct mt7996_dev *dev = mt7996_hw_dev(hw);
struct ieee80211_sta *sta = link_sta->sta;
+ struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ struct mt7996_sta_link *msta_link;
- mt7996_link_rate_ctrl_update(&changed, sta);
- ieee80211_queue_work(hw, &dev->rc_work);
+ rcu_read_lock();
+
+ msta_link = rcu_dereference(msta->link[link_sta->link_id]);
+ if (msta_link) {
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
+
+ mt7996_link_rate_ctrl_update(&changed, msta_link);
+ ieee80211_queue_work(hw, &dev->rc_work);
+ }
+
+ rcu_read_unlock();
+}
+
+static void mt7996_sta_rate_ctrl_update(void *data, struct ieee80211_sta *sta)
+{
+ struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ struct mt7996_sta_link *msta_link;
+ u32 *changed = data;
+
+ msta_link = rcu_dereference(msta->link[msta->deflink_id]);
+ if (msta_link)
+ mt7996_link_rate_ctrl_update(&changed, msta_link);
}
static int
@@ -1666,7 +1771,7 @@ mt7996_set_bitrate_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
* - multiple rates: if it's not in range format i.e 0-{7,8,9} for VHT
* then multiple MCS setting (MCS 4,5,6) is not supported.
*/
- ieee80211_iterate_stations_atomic(hw, mt7996_link_rate_ctrl_update,
+ ieee80211_iterate_stations_atomic(hw, mt7996_sta_rate_ctrl_update,
&changed);
ieee80211_queue_work(hw, &dev->rc_work);
@@ -2067,9 +2172,7 @@ mt7996_net_fill_forward_path(struct ieee80211_hw *hw,
struct mt7996_dev *dev = mt7996_hw_dev(hw);
struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
struct mt7996_sta_link *msta_link;
- struct mt7996_vif_link *link;
struct mt76_vif_link *mlink;
- struct mt7996_phy *phy;
mlink = rcu_dereference(mvif->mt76.link[msta->deflink_id]);
if (!mlink)
@@ -2082,12 +2185,9 @@ mt7996_net_fill_forward_path(struct ieee80211_hw *hw,
if (!msta_link->wcid.sta || msta_link->wcid.idx > MT7996_WTBL_STA)
return -EIO;
- link = (struct mt7996_vif_link *)mlink;
- phy = mt7996_vif_link_phy(link);
- if (!phy)
- return -ENODEV;
-
- if (phy != &dev->phy && phy->mt76->band_idx == MT_BAND2)
+ if (dev->hif2 &&
+ ((is_mt7996(&dev->mt76) && msta_link->wcid.phy_idx == MT_BAND2) ||
+ (is_mt7992(&dev->mt76) && msta_link->wcid.phy_idx == MT_BAND1)))
wed = &dev->mt76.mmio.wed_hif2;
if (!mtk_wed_device_active(wed))
@@ -2100,7 +2200,11 @@ mt7996_net_fill_forward_path(struct ieee80211_hw *hw,
path->mtk_wdma.queue = 0;
path->mtk_wdma.wcid = msta_link->wcid.idx;
- path->mtk_wdma.amsdu = mtk_wed_is_amsdu_supported(wed);
+ if (ieee80211_hw_check(hw, SUPPORTS_AMSDU_IN_AMPDU) &&
+ mtk_wed_is_amsdu_supported(wed))
+ path->mtk_wdma.amsdu = msta_link->wcid.amsdu;
+ else
+ path->mtk_wdma.amsdu = 0;
ctx->dev = NULL;
return 0;
@@ -2116,6 +2220,19 @@ mt7996_change_vif_links(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
return 0;
}
+static void
+mt7996_reconfig_complete(struct ieee80211_hw *hw,
+ enum ieee80211_reconfig_type reconfig_type)
+{
+ struct mt7996_dev *dev = mt7996_hw_dev(hw);
+ struct mt7996_phy *phy;
+
+ ieee80211_wake_queues(hw);
+ mt7996_for_each_phy(dev, phy)
+ ieee80211_queue_delayed_work(hw, &phy->mt76->mac_work,
+ MT7996_WATCHDOG_TIME);
+}
+
const struct ieee80211_ops mt7996_ops = {
.add_chanctx = mt76_add_chanctx,
.remove_chanctx = mt76_remove_chanctx,
@@ -2174,4 +2291,5 @@ const struct ieee80211_ops mt7996_ops = {
#endif
.change_vif_links = mt7996_change_vif_links,
.change_sta_links = mt7996_mac_sta_change_links,
+ .reconfig_complete = mt7996_reconfig_complete,
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
index 3593fd40c51b..0347ee0c2dd7 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
@@ -242,6 +242,30 @@ mt7996_mcu_parse_response(struct mt76_dev *mdev, int cmd,
return ret;
}
+static void
+mt7996_mcu_set_timeout(struct mt76_dev *mdev, int cmd)
+{
+ mdev->mcu.timeout = 5 * HZ;
+
+ if (!(cmd & __MCU_CMD_FIELD_UNI))
+ return;
+
+ switch (FIELD_GET(__MCU_CMD_FIELD_ID, cmd)) {
+ case MCU_UNI_CMD_THERMAL:
+ case MCU_UNI_CMD_TWT:
+ case MCU_UNI_CMD_GET_MIB_INFO:
+ case MCU_UNI_CMD_STA_REC_UPDATE:
+ case MCU_UNI_CMD_BSS_INFO_UPDATE:
+ mdev->mcu.timeout = 2 * HZ;
+ return;
+ case MCU_UNI_CMD_EFUSE_CTRL:
+ mdev->mcu.timeout = 20 * HZ;
+ return;
+ default:
+ break;
+ }
+}
+
static int
mt7996_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
int cmd, int *wait_seq)
@@ -255,7 +279,7 @@ mt7996_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
u32 val;
u8 seq;
- mdev->mcu.timeout = 20 * HZ;
+ mt7996_mcu_set_timeout(mdev, cmd);
seq = ++dev->mt76.mcu.msg_seq & 0xf;
if (!seq)
@@ -660,7 +684,7 @@ mt7996_mcu_wed_rro_event(struct mt7996_dev *dev, struct sk_buff *skb)
{
struct mt7996_mcu_wed_rro_event *event = (void *)skb->data;
- if (!dev->has_rro)
+ if (!mt7996_has_hwrro(dev))
return;
skb_pull(skb, sizeof(struct mt7996_mcu_rxd) + 4);
@@ -899,17 +923,28 @@ mt7996_mcu_bss_txcmd_tlv(struct sk_buff *skb, bool en)
}
static void
-mt7996_mcu_bss_mld_tlv(struct sk_buff *skb, struct mt76_vif_link *mlink)
+mt7996_mcu_bss_mld_tlv(struct sk_buff *skb,
+ struct ieee80211_bss_conf *link_conf,
+ struct mt7996_vif_link *link)
{
+ struct ieee80211_vif *vif = link_conf->vif;
+ struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
struct bss_mld_tlv *mld;
struct tlv *tlv;
tlv = mt7996_mcu_add_uni_tlv(skb, UNI_BSS_INFO_MLD, sizeof(*mld));
-
mld = (struct bss_mld_tlv *)tlv;
- mld->group_mld_id = 0xff;
- mld->own_mld_id = mlink->idx;
- mld->remap_idx = 0xff;
+ mld->own_mld_id = link->mld_idx;
+ mld->link_id = link_conf->link_id;
+
+ if (ieee80211_vif_is_mld(vif)) {
+ mld->group_mld_id = mvif->mld_group_idx;
+ mld->remap_idx = mvif->mld_remap_idx;
+ memcpy(mld->mac_addr, vif->addr, ETH_ALEN);
+ } else {
+ mld->group_mld_id = 0xff;
+ mld->remap_idx = 0xff;
+ }
}
static void
@@ -1108,6 +1143,8 @@ int mt7996_mcu_add_bss_info(struct mt7996_phy *phy, struct ieee80211_vif *vif,
goto out;
if (enable) {
+ struct mt7996_vif_link *link;
+
mt7996_mcu_bss_rfch_tlv(skb, phy);
mt7996_mcu_bss_bmc_tlv(skb, mlink, phy);
mt7996_mcu_bss_ra_tlv(skb, phy);
@@ -1118,7 +1155,8 @@ int mt7996_mcu_add_bss_info(struct mt7996_phy *phy, struct ieee80211_vif *vif,
mt7996_mcu_bss_he_tlv(skb, vif, link_conf, phy);
/* this tag is necessary no matter if the vif is MLD */
- mt7996_mcu_bss_mld_tlv(skb, mlink);
+ link = container_of(mlink, struct mt7996_vif_link, mt76);
+ mt7996_mcu_bss_mld_tlv(skb, link_conf, link);
}
mt7996_mcu_bss_mbssid_tlv(skb, link_conf, enable);
@@ -1149,9 +1187,8 @@ int mt7996_mcu_set_timing(struct mt7996_phy *phy, struct ieee80211_vif *vif,
static int
mt7996_mcu_sta_ba(struct mt7996_dev *dev, struct mt76_vif_link *mvif,
struct ieee80211_ampdu_params *params,
- bool enable, bool tx)
+ struct mt76_wcid *wcid, bool enable, bool tx)
{
- struct mt76_wcid *wcid = (struct mt76_wcid *)params->sta->drv_priv;
struct sta_rec_ba_uni *ba;
struct sk_buff *skb;
struct tlv *tlv;
@@ -1170,7 +1207,7 @@ mt7996_mcu_sta_ba(struct mt7996_dev *dev, struct mt76_vif_link *mvif,
ba->ba_en = enable << params->tid;
ba->amsdu = params->amsdu;
ba->tid = params->tid;
- ba->ba_rdd_rro = !tx && enable && dev->has_rro;
+ ba->ba_rdd_rro = !tx && enable && mt7996_has_hwrro(dev);
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_WMWA_UNI_CMD(STA_REC_UPDATE), true);
@@ -1179,20 +1216,67 @@ mt7996_mcu_sta_ba(struct mt7996_dev *dev, struct mt76_vif_link *mvif,
/** starec & wtbl **/
int mt7996_mcu_add_tx_ba(struct mt7996_dev *dev,
struct ieee80211_ampdu_params *params,
- struct mt7996_vif_link *link,
- struct mt7996_sta_link *msta_link, bool enable)
+ struct ieee80211_vif *vif, bool enable)
{
- if (enable && !params->amsdu)
- msta_link->wcid.amsdu = false;
+ struct ieee80211_sta *sta = params->sta;
+ struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ struct ieee80211_link_sta *link_sta;
+ unsigned int link_id;
+ int ret = 0;
+
+ for_each_sta_active_link(vif, sta, link_sta, link_id) {
+ struct mt7996_sta_link *msta_link;
+ struct mt7996_vif_link *link;
+
+ msta_link = mt76_dereference(msta->link[link_id], &dev->mt76);
+ if (!msta_link)
+ continue;
+
+ link = mt7996_vif_link(dev, vif, link_id);
+ if (!link)
+ continue;
+
+ if (enable && !params->amsdu)
+ msta_link->wcid.amsdu = false;
+
+ ret = mt7996_mcu_sta_ba(dev, &link->mt76, params,
+ &msta_link->wcid, enable, true);
+ if (ret)
+ break;
+ }
- return mt7996_mcu_sta_ba(dev, &link->mt76, params, enable, true);
+ return ret;
}
int mt7996_mcu_add_rx_ba(struct mt7996_dev *dev,
struct ieee80211_ampdu_params *params,
- struct mt7996_vif_link *link, bool enable)
+ struct ieee80211_vif *vif, bool enable)
{
- return mt7996_mcu_sta_ba(dev, &link->mt76, params, enable, false);
+ struct ieee80211_sta *sta = params->sta;
+ struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ struct ieee80211_link_sta *link_sta;
+ unsigned int link_id;
+ int ret = 0;
+
+ for_each_sta_active_link(vif, sta, link_sta, link_id) {
+ struct mt7996_sta_link *msta_link;
+ struct mt7996_vif_link *link;
+
+ msta_link = mt76_dereference(msta->link[link_id], &dev->mt76);
+ if (!msta_link)
+ continue;
+
+ link = mt7996_vif_link(dev, vif, link_id);
+ if (!link)
+ continue;
+
+ ret = mt7996_mcu_sta_ba(dev, &link->mt76, params,
+ &msta_link->wcid, enable, false);
+ if (ret)
+ break;
+ }
+
+ return ret;
}
static void
@@ -1753,19 +1837,6 @@ mt7996_mcu_sta_bfer_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
bf->mem_20m = bf->nrow < BF_MAT_ORDER ?
matrix[bf->nrow][bf->ncol] : 0;
}
-
- switch (link_sta->bandwidth) {
- case IEEE80211_STA_RX_BW_160:
- case IEEE80211_STA_RX_BW_80:
- bf->mem_total = bf->mem_20m * 2;
- break;
- case IEEE80211_STA_RX_BW_40:
- bf->mem_total = bf->mem_20m;
- break;
- case IEEE80211_STA_RX_BW_20:
- default:
- break;
- }
}
static void
@@ -1879,8 +1950,8 @@ mt7996_mcu_get_mmps_mode(enum ieee80211_smps_mode smps)
int mt7996_mcu_set_fixed_rate_ctrl(struct mt7996_dev *dev,
void *data, u16 version)
{
+ struct uni_header hdr = {};
struct ra_fixed_rate *req;
- struct uni_header hdr;
struct sk_buff *skb;
struct tlv *tlv;
int len;
@@ -2265,13 +2336,10 @@ error_unlock:
}
static int
-mt7996_mcu_add_group(struct mt7996_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+mt7996_mcu_add_group(struct mt7996_dev *dev, struct mt7996_vif_link *link,
+ struct mt76_wcid *wcid)
{
#define MT_STA_BSS_GROUP 1
- struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- struct mt7996_sta_link *msta_link;
- struct mt7996_sta *msta;
struct {
u8 __rsv1[4];
@@ -2286,13 +2354,10 @@ mt7996_mcu_add_group(struct mt7996_dev *dev, struct ieee80211_vif *vif,
.tag = cpu_to_le16(UNI_VOW_DRR_CTRL),
.len = cpu_to_le16(sizeof(req) - 4),
.action = cpu_to_le32(MT_STA_BSS_GROUP),
- .val = cpu_to_le32(mvif->deflink.mt76.idx % 16),
+ .val = cpu_to_le32(link->mt76.idx % 16),
+ .wlan_idx = cpu_to_le16(wcid->idx),
};
- msta = sta ? (struct mt7996_sta *)sta->drv_priv : NULL;
- msta_link = msta ? &msta->deflink : &mvif->deflink.msta_link;
- req.wlan_idx = cpu_to_le16(msta_link->wcid.idx);
-
return mt76_mcu_send_msg(&dev->mt76, MCU_WM_UNI_CMD(VOW), &req,
sizeof(req), true);
}
@@ -2432,7 +2497,7 @@ int mt7996_mcu_add_sta(struct mt7996_dev *dev,
}
}
- ret = mt7996_mcu_add_group(dev, link_conf->vif, sta);
+ ret = mt7996_mcu_add_group(dev, link, wcid);
if (ret) {
dev_kfree_skb(skb);
return ret;
@@ -2467,41 +2532,58 @@ mt7996_mcu_sta_key_tlv(struct mt76_wcid *wcid,
enum set_key_cmd cmd)
{
struct sta_rec_sec_uni *sec;
+ struct sec_key_uni *sec_key;
struct tlv *tlv;
+ u8 cipher;
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_KEY_V2, sizeof(*sec));
sec = (struct sta_rec_sec_uni *)tlv;
- sec->add = cmd;
-
- if (cmd == SET_KEY) {
- struct sec_key_uni *sec_key;
- u8 cipher;
-
- cipher = mt76_connac_mcu_get_cipher(key->cipher);
- if (cipher == MCU_CIPHER_NONE)
- return -EOPNOTSUPP;
-
- sec_key = &sec->key[0];
- sec_key->wlan_idx = cpu_to_le16(wcid->idx);
- sec_key->mgmt_prot = 0;
- sec_key->cipher_id = cipher;
- sec_key->cipher_len = sizeof(*sec_key);
- sec_key->key_id = key->keyidx;
- sec_key->key_len = key->keylen;
- sec_key->need_resp = 0;
- memcpy(sec_key->key, key->key, key->keylen);
-
- if (cipher == MCU_CIPHER_TKIP) {
- /* Rx/Tx MIC keys are swapped */
- memcpy(sec_key->key + 16, key->key + 24, 8);
- memcpy(sec_key->key + 24, key->key + 16, 8);
- }
+ sec->add = 0;
+ sec->n_cipher = 1;
+ sec_key = &sec->key[0];
+ sec_key->wlan_idx = cpu_to_le16(wcid->idx);
+ sec_key->key_id = key->keyidx;
- sec->n_cipher = 1;
- } else {
- sec->n_cipher = 0;
+ if (cmd != SET_KEY)
+ return 0;
+
+ cipher = mt76_connac_mcu_get_cipher(key->cipher);
+ if (cipher == MCU_CIPHER_NONE)
+ return -EOPNOTSUPP;
+
+ sec_key->mgmt_prot = 0;
+ sec_key->cipher_id = cipher;
+ sec_key->cipher_len = sizeof(*sec_key);
+ sec_key->key_len = key->keylen;
+ sec_key->need_resp = 0;
+ memcpy(sec_key->key, key->key, key->keylen);
+
+ if (cipher == MCU_CIPHER_TKIP) {
+ /* Rx/Tx MIC keys are swapped */
+ memcpy(sec_key->key + 16, key->key + 24, 8);
+ memcpy(sec_key->key + 24, key->key + 16, 8);
+ return 0;
+ }
+
+ if (sec_key->key_id != 6 && sec_key->key_id != 7)
+ return 0;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ sec_key->cipher_id = MCU_CIPHER_BCN_PROT_CMAC_128;
+ break;
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ sec_key->cipher_id = MCU_CIPHER_BCN_PROT_GMAC_128;
+ break;
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ sec_key->cipher_id = MCU_CIPHER_BCN_PROT_GMAC_256;
+ break;
+ default:
+ return -EOPNOTSUPP;
}
+ sec_key->bcn_mode = BP_SW_MODE;
+
return 0;
}
@@ -2519,99 +2601,12 @@ int mt7996_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif,
return PTR_ERR(skb);
ret = mt7996_mcu_sta_key_tlv(wcid, skb, key, cmd);
- if (ret)
- return ret;
-
- return mt76_mcu_skb_send_msg(dev, skb, mcu_cmd, true);
-}
-
-static int mt7996_mcu_get_pn(struct mt7996_dev *dev,
- struct mt7996_vif_link *link,
- struct mt7996_sta_link *msta_link, u8 *pn)
-{
-#define TSC_TYPE_BIGTK_PN 2
- struct sta_rec_pn_info *pn_info;
- struct sk_buff *skb, *rskb;
- struct tlv *tlv;
- int ret;
-
- skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &link->mt76,
- &msta_link->wcid);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_PN_INFO, sizeof(*pn_info));
- pn_info = (struct sta_rec_pn_info *)tlv;
-
- pn_info->tsc_type = TSC_TYPE_BIGTK_PN;
- ret = mt76_mcu_skb_send_and_get_msg(&dev->mt76, skb,
- MCU_WM_UNI_CMD_QUERY(STA_REC_UPDATE),
- true, &rskb);
- if (ret)
- return ret;
-
- skb_pull(rskb, 4);
-
- pn_info = (struct sta_rec_pn_info *)rskb->data;
- if (le16_to_cpu(pn_info->tag) == STA_REC_PN_INFO)
- memcpy(pn, pn_info->pn, 6);
-
- dev_kfree_skb(rskb);
- return 0;
-}
-
-int mt7996_mcu_bcn_prot_enable(struct mt7996_dev *dev,
- struct mt7996_vif_link *link,
- struct mt7996_sta_link *msta_link,
- struct ieee80211_key_conf *key)
-{
- struct mt7996_mcu_bcn_prot_tlv *bcn_prot;
- struct sk_buff *skb;
- struct tlv *tlv;
- u8 pn[6] = {};
- int len = sizeof(struct bss_req_hdr) +
- sizeof(struct mt7996_mcu_bcn_prot_tlv);
- int ret;
-
- skb = __mt7996_mcu_alloc_bss_req(&dev->mt76, &link->mt76, len);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- tlv = mt76_connac_mcu_add_tlv(skb, UNI_BSS_INFO_BCN_PROT, sizeof(*bcn_prot));
-
- bcn_prot = (struct mt7996_mcu_bcn_prot_tlv *)tlv;
-
- ret = mt7996_mcu_get_pn(dev, link, msta_link, pn);
if (ret) {
dev_kfree_skb(skb);
return ret;
}
- switch (key->cipher) {
- case WLAN_CIPHER_SUITE_AES_CMAC:
- bcn_prot->cipher_id = MCU_CIPHER_BCN_PROT_CMAC_128;
- break;
- case WLAN_CIPHER_SUITE_BIP_GMAC_128:
- bcn_prot->cipher_id = MCU_CIPHER_BCN_PROT_GMAC_128;
- break;
- case WLAN_CIPHER_SUITE_BIP_GMAC_256:
- bcn_prot->cipher_id = MCU_CIPHER_BCN_PROT_GMAC_256;
- break;
- case WLAN_CIPHER_SUITE_BIP_CMAC_256:
- default:
- dev_err(dev->mt76.dev, "Not supported Bigtk Cipher\n");
- dev_kfree_skb(skb);
- return -EOPNOTSUPP;
- }
-
- pn[0]++;
- memcpy(bcn_prot->pn, pn, 6);
- bcn_prot->enable = BP_SW_MODE;
- memcpy(bcn_prot->key, key->key, WLAN_MAX_KEY_LEN);
- bcn_prot->key_id = key->keyidx;
-
- return mt76_mcu_skb_send_msg(&dev->mt76, skb,
- MCU_WMWA_UNI_CMD(BSS_INFO_UPDATE), true);
+ return mt76_mcu_skb_send_msg(dev, skb, mcu_cmd, true);
}
int mt7996_mcu_add_dev_info(struct mt7996_phy *phy, struct ieee80211_vif *vif,
@@ -2752,10 +2747,11 @@ mt7996_mcu_beacon_cont(struct mt7996_dev *dev,
}
int mt7996_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *link_conf)
+ struct ieee80211_bss_conf *link_conf, bool enabled)
{
struct mt7996_dev *dev = mt7996_hw_dev(hw);
- struct mt76_vif_link *mlink = mt76_vif_conf_link(&dev->mt76, vif, link_conf);
+ struct mt7996_vif_link *link = mt7996_vif_conf_link(dev, vif, link_conf);
+ struct mt76_vif_link *mlink = link ? &link->mt76 : NULL;
struct ieee80211_mutable_offsets offs;
struct ieee80211_tx_info *info;
struct sk_buff *skb, *rskb;
@@ -2769,13 +2765,16 @@ int mt7996_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (!mlink)
return -EINVAL;
+ if (link->phy && link->phy->mt76->offchannel)
+ enabled = false;
+
rskb = __mt7996_mcu_alloc_bss_req(&dev->mt76, mlink,
MT7996_MAX_BSS_OFFLOAD_SIZE);
if (IS_ERR(rskb))
return PTR_ERR(rskb);
skb = ieee80211_beacon_get_template(hw, vif, &offs, link_conf->link_id);
- if (link_conf->enable_beacon && !skb) {
+ if (enabled && !skb) {
dev_kfree_skb(rskb);
return -EINVAL;
}
@@ -2794,7 +2793,7 @@ int mt7996_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
len = ALIGN(sizeof(*bcn) + MT_TXD_SIZE + extra_len, 4);
tlv = mt7996_mcu_add_uni_tlv(rskb, UNI_BSS_INFO_BCN_CONTENT, len);
bcn = (struct bss_bcn_content_tlv *)tlv;
- bcn->enable = link_conf->enable_beacon;
+ bcn->enable = enabled;
if (!bcn->enable)
goto out;
@@ -3372,7 +3371,7 @@ int mt7996_mcu_set_hdr_trans(struct mt7996_dev *dev, bool hdr_trans)
{
struct {
u8 __rsv[4];
- } __packed hdr;
+ } __packed hdr = {};
struct hdr_trans_blacklist *req_blacklist;
struct hdr_trans_en *req_en;
struct sk_buff *skb;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
index 130ea95626d5..c841da1c60e5 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
@@ -351,17 +351,6 @@ enum {
BP_HW_MODE,
};
-struct mt7996_mcu_bcn_prot_tlv {
- __le16 tag;
- __le16 len;
- u8 pn[6];
- u8 enable;
- u8 cipher_id;
- u8 key[WLAN_MAX_KEY_LEN];
- u8 key_id;
- u8 __rsv[3];
-} __packed;
-
struct bss_ra_tlv {
__le16 tag;
__le16 len;
@@ -481,7 +470,8 @@ struct bss_mld_tlv {
u8 own_mld_id;
u8 mac_addr[ETH_ALEN];
u8 remap_idx;
- u8 __rsv[3];
+ u8 link_id;
+ u8 __rsv[2];
} __packed;
struct sta_rec_ht_uni {
@@ -530,6 +520,9 @@ struct sec_key_uni {
u8 key_len;
u8 need_resp;
u8 key[32];
+ u8 pn[6];
+ u8 bcn_mode;
+ u8 _rsv;
} __packed;
struct sta_rec_sec_uni {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
index 30b40f4a91be..d14b626ee511 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
@@ -459,14 +459,15 @@ int mt7996_mmio_wed_init(struct mt7996_dev *dev, void *pdev_ptr,
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
struct pci_dev *pci_dev = pdev_ptr;
- u32 hif1_ofs = 0;
+ u32 hif1_ofs;
if (!wed_enable)
return 0;
- dev->has_rro = true;
+ dev->mt76.hwrro_mode = is_mt7996(&dev->mt76) ? MT76_HWRRO_V3
+ : MT76_HWRRO_V3_1;
- hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
+ hif1_ofs = dev->hif2 ? MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0) : 0;
if (hif2)
wed = &dev->mt76.mmio.wed_hif2;
@@ -490,11 +491,18 @@ int mt7996_mmio_wed_init(struct mt7996_dev *dev, void *pdev_ptr,
wed->wlan.wpdma_tx = wed->wlan.phy_base + hif1_ofs +
MT_TXQ_RING_BASE(0) +
MT7996_TXQ_BAND2 * MT_RING_SIZE;
- if (dev->has_rro) {
- wed->wlan.wpdma_txfree = wed->wlan.phy_base + hif1_ofs +
- MT_RXQ_RING_BASE(0) +
- MT7996_RXQ_TXFREE2 * MT_RING_SIZE;
- wed->wlan.txfree_tbit = ffs(MT_INT_RX_TXFREE_EXT) - 1;
+ if (mt7996_has_hwrro(dev)) {
+ if (is_mt7996(&dev->mt76)) {
+ wed->wlan.txfree_tbit = ffs(MT_INT_RX_TXFREE_EXT) - 1;
+ wed->wlan.wpdma_txfree = wed->wlan.phy_base + hif1_ofs +
+ MT_RXQ_RING_BASE(0) +
+ MT7996_RXQ_TXFREE2 * MT_RING_SIZE;
+ } else {
+ wed->wlan.txfree_tbit = ffs(MT_INT_RX_TXFREE_BAND1_EXT) - 1;
+ wed->wlan.wpdma_txfree = wed->wlan.phy_base + hif1_ofs +
+ MT_RXQ_RING_BASE(0) +
+ MT7996_RXQ_MCU_WA_EXT * MT_RING_SIZE;
+ }
} else {
wed->wlan.wpdma_txfree = wed->wlan.phy_base + hif1_ofs +
MT_RXQ_RING_BASE(0) +
@@ -503,14 +511,14 @@ int mt7996_mmio_wed_init(struct mt7996_dev *dev, void *pdev_ptr,
}
wed->wlan.wpdma_rx_glo = wed->wlan.phy_base + hif1_ofs + MT_WFDMA0_GLO_CFG;
- wed->wlan.wpdma_rx = wed->wlan.phy_base + hif1_ofs +
- MT_RXQ_RING_BASE(MT7996_RXQ_BAND0) +
- MT7996_RXQ_BAND0 * MT_RING_SIZE;
+ wed->wlan.wpdma_rx[0] = wed->wlan.phy_base + hif1_ofs +
+ MT_RXQ_RING_BASE(MT7996_RXQ_BAND2) +
+ MT7996_RXQ_BAND2 * MT_RING_SIZE;
wed->wlan.id = MT7996_DEVICE_ID_2;
wed->wlan.tx_tbit[0] = ffs(MT_INT_TX_DONE_BAND2) - 1;
} else {
- wed->wlan.hw_rro = dev->has_rro; /* default on */
+ wed->wlan.hw_rro = mt7996_has_hwrro(dev);
wed->wlan.wpdma_int = wed->wlan.phy_base + MT_INT_SOURCE_CSR;
wed->wlan.wpdma_mask = wed->wlan.phy_base + MT_INT_MASK_CSR;
wed->wlan.wpdma_tx = wed->wlan.phy_base + MT_TXQ_RING_BASE(0) +
@@ -518,16 +526,26 @@ int mt7996_mmio_wed_init(struct mt7996_dev *dev, void *pdev_ptr,
wed->wlan.wpdma_rx_glo = wed->wlan.phy_base + MT_WFDMA0_GLO_CFG;
- wed->wlan.wpdma_rx = wed->wlan.phy_base +
- MT_RXQ_RING_BASE(MT7996_RXQ_BAND0) +
- MT7996_RXQ_BAND0 * MT_RING_SIZE;
+ wed->wlan.wpdma_rx[0] = wed->wlan.phy_base +
+ MT_RXQ_RING_BASE(MT7996_RXQ_BAND0) +
+ MT7996_RXQ_BAND0 * MT_RING_SIZE;
wed->wlan.wpdma_rx_rro[0] = wed->wlan.phy_base +
MT_RXQ_RING_BASE(MT7996_RXQ_RRO_BAND0) +
MT7996_RXQ_RRO_BAND0 * MT_RING_SIZE;
- wed->wlan.wpdma_rx_rro[1] = wed->wlan.phy_base + hif1_ofs +
- MT_RXQ_RING_BASE(MT7996_RXQ_RRO_BAND2) +
- MT7996_RXQ_RRO_BAND2 * MT_RING_SIZE;
+ if (is_mt7996(&dev->mt76)) {
+ wed->wlan.wpdma_rx_rro[1] = wed->wlan.phy_base + hif1_ofs +
+ MT_RXQ_RING_BASE(MT7996_RXQ_RRO_BAND2) +
+ MT7996_RXQ_RRO_BAND2 * MT_RING_SIZE;
+ } else {
+ wed->wlan.wpdma_rx_rro[1] = wed->wlan.phy_base + hif1_ofs +
+ MT_RXQ_RING_BASE(MT7996_RXQ_RRO_BAND1) +
+ MT7996_RXQ_RRO_BAND1 * MT_RING_SIZE;
+ wed->wlan.wpdma_rx[1] = wed->wlan.phy_base + hif1_ofs +
+ MT_RXQ_RING_BASE(MT7996_RXQ_BAND1) +
+ MT7996_RXQ_BAND1 * MT_RING_SIZE;
+ }
+
wed->wlan.wpdma_rx_pg = wed->wlan.phy_base +
MT_RXQ_RING_BASE(MT7996_RXQ_MSDU_PG_BAND0) +
MT7996_RXQ_MSDU_PG_BAND0 * MT_RING_SIZE;
@@ -537,10 +555,14 @@ int mt7996_mmio_wed_init(struct mt7996_dev *dev, void *pdev_ptr,
wed->wlan.rx_size = SKB_WITH_OVERHEAD(MT_RX_BUF_SIZE);
wed->wlan.rx_tbit[0] = ffs(MT_INT_RX_DONE_BAND0) - 1;
- wed->wlan.rx_tbit[1] = ffs(MT_INT_RX_DONE_BAND2) - 1;
-
wed->wlan.rro_rx_tbit[0] = ffs(MT_INT_RX_DONE_RRO_BAND0) - 1;
- wed->wlan.rro_rx_tbit[1] = ffs(MT_INT_RX_DONE_RRO_BAND2) - 1;
+ if (is_mt7996(&dev->mt76)) {
+ wed->wlan.rx_tbit[1] = ffs(MT_INT_RX_DONE_BAND2) - 1;
+ wed->wlan.rro_rx_tbit[1] = ffs(MT_INT_RX_DONE_RRO_BAND2) - 1;
+ } else {
+ wed->wlan.rx_tbit[1] = ffs(MT_INT_RX_DONE_BAND1) - 1;
+ wed->wlan.rro_rx_tbit[1] = ffs(MT_INT_RX_DONE_RRO_BAND1) - 1;
+ }
wed->wlan.rx_pg_tbit[0] = ffs(MT_INT_RX_DONE_MSDU_PG_BAND0) - 1;
wed->wlan.rx_pg_tbit[1] = ffs(MT_INT_RX_DONE_MSDU_PG_BAND1) - 1;
@@ -548,16 +570,27 @@ int mt7996_mmio_wed_init(struct mt7996_dev *dev, void *pdev_ptr,
wed->wlan.tx_tbit[0] = ffs(MT_INT_TX_DONE_BAND0) - 1;
wed->wlan.tx_tbit[1] = ffs(MT_INT_TX_DONE_BAND1) - 1;
- if (dev->has_rro) {
- wed->wlan.wpdma_txfree = wed->wlan.phy_base + MT_RXQ_RING_BASE(0) +
- MT7996_RXQ_TXFREE0 * MT_RING_SIZE;
- wed->wlan.txfree_tbit = ffs(MT_INT_RX_TXFREE_MAIN) - 1;
+ if (is_mt7996(&dev->mt76)) {
+ if (mt7996_has_hwrro(dev)) {
+ wed->wlan.wpdma_txfree = wed->wlan.phy_base +
+ MT_RXQ_RING_BASE(0) +
+ MT7996_RXQ_TXFREE0 * MT_RING_SIZE;
+ wed->wlan.txfree_tbit = ffs(MT_INT_RX_TXFREE_MAIN) - 1;
+ } else {
+ wed->wlan.wpdma_txfree = wed->wlan.phy_base +
+ MT_RXQ_RING_BASE(0) +
+ MT7996_RXQ_MCU_WA_MAIN * MT_RING_SIZE;
+ wed->wlan.txfree_tbit = ffs(MT_INT_RX_DONE_WA_MAIN) - 1;
+ }
} else {
wed->wlan.txfree_tbit = ffs(MT_INT_RX_DONE_WA_MAIN) - 1;
wed->wlan.wpdma_txfree = wed->wlan.phy_base + MT_RXQ_RING_BASE(0) +
MT7996_RXQ_MCU_WA_MAIN * MT_RING_SIZE;
}
dev->mt76.rx_token_size = MT7996_TOKEN_SIZE + wed->wlan.rx_npkt;
+
+ if (dev->hif2 && is_mt7992(&dev->mt76))
+ wed->wlan.id = 0x7992;
}
wed->wlan.nbuf = MT7996_HW_TOKEN_SIZE;
@@ -576,8 +609,10 @@ int mt7996_mmio_wed_init(struct mt7996_dev *dev, void *pdev_ptr,
wed->wlan.reset_complete = mt76_wed_reset_complete;
}
- if (mtk_wed_device_attach(wed))
+ if (mtk_wed_device_attach(wed)) {
+ dev->mt76.hwrro_mode = MT76_HWRRO_OFF;
return 0;
+ }
*irq = wed->irq;
dev->mt76.dma_dev = wed->dev;
@@ -690,12 +725,18 @@ static void mt7996_irq_tasklet(struct tasklet_struct *t)
dev->mt76.mmio.irqmask);
if (intr1 & MT_INT_RX_TXFREE_EXT)
napi_schedule(&dev->mt76.napi[MT_RXQ_TXFREE_BAND2]);
+
+ if (intr1 & MT_INT_RX_DONE_BAND2_EXT)
+ napi_schedule(&dev->mt76.napi[MT_RXQ_BAND2]);
+
+ if (intr1 & MT_INT_RX_TXFREE_BAND1_EXT)
+ napi_schedule(&dev->mt76.napi[MT_RXQ_BAND1_WA]);
}
if (mtk_wed_device_active(wed)) {
mtk_wed_device_irq_set_mask(wed, 0);
intr = mtk_wed_device_irq_get(wed, dev->mt76.mmio.irqmask);
- intr |= (intr1 & ~MT_INT_RX_TXFREE_EXT);
+ intr |= (intr1 & ~MT_INT_TX_RX_DONE_EXT);
} else {
mt76_wr(dev, MT_INT_MASK_CSR, 0);
if (dev->hif2)
@@ -781,6 +822,8 @@ struct mt7996_dev *mt7996_mmio_probe(struct device *pdev,
.rx_skb = mt7996_queue_rx_skb,
.rx_check = mt7996_rx_check,
.rx_poll_complete = mt7996_rx_poll_complete,
+ .rx_rro_ind_process = mt7996_rro_rx_process,
+ .rx_rro_add_msdu_page = mt7996_rro_msdu_page_add,
.update_survey = mt7996_update_channel,
.set_channel = mt7996_set_channel,
.vif_link_add = mt7996_vif_link_add,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
index 33ac16b64ef1..8ec2acdb3319 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
@@ -112,6 +112,8 @@
#define MT7996_CRIT_TEMP 110
#define MT7996_MAX_TEMP 120
+#define MT7996_MAX_HIF_RXD_IN_PG 5
+#define MT7996_RRO_MSDU_PG_HASH_SIZE 127
#define MT7996_RRO_MAX_SESSION 1024
#define MT7996_RRO_WINDOW_MAX_LEN 1024
#define MT7996_RRO_ADDR_ELEM_LEN 128
@@ -128,6 +130,10 @@
#define MT7996_RX_MSDU_PAGE_SIZE (128 + \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+/* RRO 3.1 */
+#define MT7996_RRO_MSDU_PG_CR_CNT 8
+#define MT7996_RRO_MSDU_PG_SIZE_PER_CR 0x10000
+
struct mt7996_vif;
struct mt7996_sta;
struct mt7996_dfs_pulse;
@@ -178,7 +184,7 @@ enum mt7996_rxq_id {
MT7996_RXQ_BAND1 = 5, /* for mt7992 */
MT7996_RXQ_BAND2 = 5,
MT7996_RXQ_RRO_BAND0 = 8,
- MT7996_RXQ_RRO_BAND1 = 8,/* unused */
+ MT7996_RXQ_RRO_BAND1 = 9,
MT7996_RXQ_RRO_BAND2 = 6,
MT7996_RXQ_MSDU_PG_BAND0 = 10,
MT7996_RXQ_MSDU_PG_BAND1 = 11,
@@ -187,6 +193,7 @@ enum mt7996_rxq_id {
MT7996_RXQ_TXFREE1 = 9,
MT7996_RXQ_TXFREE2 = 7,
MT7996_RXQ_RRO_IND = 0,
+ MT7996_RXQ_RRO_RXDMAD_C = 0,
MT7990_RXQ_TXFREE0 = 6,
MT7990_RXQ_TXFREE1 = 7,
};
@@ -248,11 +255,16 @@ struct mt7996_vif_link {
struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS];
struct cfg80211_bitrate_mask bitrate_mask;
+
+ u8 mld_idx;
};
struct mt7996_vif {
struct mt7996_vif_link deflink; /* must be first */
struct mt76_vif_data mt76;
+
+ u8 mld_group_idx;
+ u8 mld_remap_idx;
};
/* crash-dump */
@@ -270,15 +282,17 @@ struct mt7996_hif {
struct device *dev;
void __iomem *regs;
int irq;
+
+ enum pci_bus_speed speed;
+ enum pcie_link_width width;
};
+#define WED_RRO_ADDR_SIGNATURE_MASK GENMASK(31, 24)
+#define WED_RRO_ADDR_COUNT_MASK GENMASK(14, 4)
+#define WED_RRO_ADDR_HEAD_HIGH_MASK GENMASK(3, 0)
struct mt7996_wed_rro_addr {
- u32 head_low;
- u32 head_high : 4;
- u32 count: 11;
- u32 oor: 1;
- u32 rsv : 8;
- u32 signature : 8;
+ __le32 head_low;
+ __le32 data;
};
struct mt7996_wed_rro_session_id {
@@ -286,6 +300,44 @@ struct mt7996_wed_rro_session_id {
u16 id;
};
+struct mt7996_msdu_page {
+ struct list_head list;
+
+ struct mt76_queue *q;
+ dma_addr_t dma_addr;
+ void *buf;
+};
+
+/* data1 */
+#define RRO_HIF_DATA1_LS_MASK BIT(30)
+#define RRO_HIF_DATA1_SDL_MASK GENMASK(29, 16)
+/* data4 */
+#define RRO_HIF_DATA4_RX_TOKEN_ID_MASK GENMASK(15, 0)
+struct mt7996_rro_hif {
+ __le32 data0;
+ __le32 data1;
+ __le32 data2;
+ __le32 data3;
+ __le32 data4;
+ __le32 data5;
+};
+
+#define MSDU_PAGE_INFO_OWNER_MASK BIT(31)
+#define MSDU_PAGE_INFO_PG_HIGH_MASK GENMASK(3, 0)
+struct mt7996_msdu_page_info {
+ struct mt7996_rro_hif rxd[MT7996_MAX_HIF_RXD_IN_PG];
+ __le32 pg_low;
+ __le32 data;
+};
+
+#define MT7996_MAX_RRO_RRS_RING 4
+struct mt7996_rro_queue_regs_emi {
+ struct {
+ __le16 idx;
+ __le16 rsv;
+ } ring[MT7996_MAX_RRO_RRS_RING];
+};
+
struct mt7996_phy {
struct mt76_phy *mt76;
struct mt7996_dev *dev;
@@ -337,6 +389,9 @@ struct mt7996_dev {
u32 q_int_mask[MT7996_MAX_QUEUE];
u32 q_wfdma_mask;
+ u64 mld_idx_mask;
+ u64 mld_remap_idx_mask;
+
const struct mt76_bus_ops *bus_ops;
struct mt7996_phy phy;
@@ -377,7 +432,6 @@ struct mt7996_dev {
bool flash_mode:1;
bool has_eht:1;
- bool has_rro:1;
struct {
struct {
@@ -392,10 +446,25 @@ struct mt7996_dev {
void *ptr;
dma_addr_t phy_addr;
} session;
+ struct {
+ void *ptr;
+ dma_addr_t phy_addr;
+ } msdu_pg[MT7996_RRO_MSDU_PG_CR_CNT];
+ struct {
+ struct mt7996_rro_queue_regs_emi *ptr;
+ dma_addr_t phy_addr;
+ } emi_rings_cpu;
+ struct {
+ struct mt7996_rro_queue_regs_emi *ptr;
+ dma_addr_t phy_addr;
+ } emi_rings_dma;
struct work_struct work;
struct list_head poll_list;
spinlock_t lock;
+
+ struct list_head page_cache;
+ struct list_head page_map[MT7996_RRO_MSDU_PG_HASH_SIZE];
} wed_rro;
bool ibf;
@@ -552,6 +621,7 @@ extern struct pci_driver mt7996_hif_driver;
struct mt7996_dev *mt7996_mmio_probe(struct device *pdev,
void __iomem *mem_base, u32 device_id);
+void mt7996_rro_hw_init(struct mt7996_dev *dev);
void mt7996_wfsys_reset(struct mt7996_dev *dev);
irqreturn_t mt7996_irq_handler(int irq, void *dev_instance);
u64 __mt7996_get_tsf(struct ieee80211_hw *hw, struct mt7996_vif_link *link);
@@ -604,16 +674,15 @@ int mt7996_mcu_teardown_mld_sta(struct mt7996_dev *dev,
struct mt7996_sta_link *msta_link);
int mt7996_mcu_add_tx_ba(struct mt7996_dev *dev,
struct ieee80211_ampdu_params *params,
- struct mt7996_vif_link *link,
- struct mt7996_sta_link *msta_link, bool enable);
+ struct ieee80211_vif *vif, bool enable);
int mt7996_mcu_add_rx_ba(struct mt7996_dev *dev,
struct ieee80211_ampdu_params *params,
- struct mt7996_vif_link *link, bool enable);
+ struct ieee80211_vif *vif, bool enable);
int mt7996_mcu_update_bss_color(struct mt7996_dev *dev,
struct mt76_vif_link *mlink,
struct cfg80211_he_bss_color *he_bss_color);
int mt7996_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *link_conf);
+ struct ieee80211_bss_conf *link_conf, bool enabled);
int mt7996_mcu_beacon_inband_discov(struct mt7996_dev *dev,
struct ieee80211_bss_conf *link_conf,
struct mt7996_vif_link *link, u32 changed);
@@ -669,6 +738,11 @@ int mt7996_mcu_get_all_sta_info(struct mt7996_phy *phy, u16 tag);
int mt7996_mcu_wed_rro_reset_sessions(struct mt7996_dev *dev, u16 id);
int mt7996_mcu_set_sniffer_mode(struct mt7996_phy *phy, bool enabled);
+static inline bool mt7996_has_hwrro(struct mt7996_dev *dev)
+{
+ return dev->mt76.hwrro_mode != MT76_HWRRO_OFF;
+}
+
static inline u8 mt7996_max_interface_num(struct mt7996_dev *dev)
{
return min(MT7996_MAX_INTERFACES * (1 + mt7996_band_valid(dev, MT_BAND1) +
@@ -732,6 +806,7 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_key_conf *key, int pid,
enum mt76_txq_id qid, u32 changed);
+void mt7996_mac_update_beacons(struct mt7996_phy *phy);
void mt7996_mac_set_coverage_class(struct mt7996_phy *phy);
void mt7996_mac_work(struct work_struct *work);
void mt7996_mac_reset_work(struct work_struct *work);
@@ -742,6 +817,8 @@ void mt7996_mac_twt_teardown_flow(struct mt7996_dev *dev,
struct mt7996_vif_link *link,
struct mt7996_sta_link *msta_link,
u8 flowid);
+void mt7996_mac_sta_deinit_link(struct mt7996_dev *dev,
+ struct mt7996_sta_link *msta_link);
void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
struct ieee80211_twt_setup *twt);
@@ -752,6 +829,10 @@ int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
void mt7996_tx_token_put(struct mt7996_dev *dev);
void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb, u32 *info);
+void mt7996_rro_msdu_page_map_free(struct mt7996_dev *dev);
+int mt7996_rro_msdu_page_add(struct mt76_dev *mdev, struct mt76_queue *q,
+ dma_addr_t dma_addr, void *data);
+void mt7996_rro_rx_process(struct mt76_dev *mdev, void *data);
bool mt7996_rx_check(struct mt76_dev *mdev, void *data, int len);
void mt7996_stats_work(struct work_struct *work);
int mt76_dfs_start_rdd(struct mt7996_dev *dev, bool force);
@@ -786,8 +867,6 @@ u32 mt7996_wed_init_buf(void *ptr, dma_addr_t phys, int token_id);
int mt7996_mtk_init_debugfs(struct mt7996_phy *phy, struct dentry *dir);
#endif
-#ifdef CONFIG_NET_MEDIATEK_SOC_WED
int mt7996_dma_rro_init(struct mt7996_dev *dev);
-#endif /* CONFIG_NET_MEDIATEK_SOC_WED */
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/pci.c b/drivers/net/wireless/mediatek/mt76/mt7996/pci.c
index 19e99bc1c6c4..3f49bbbba3b9 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/pci.c
@@ -87,6 +87,7 @@ static int mt7996_pci_hif2_probe(struct pci_dev *pdev)
hif->dev = &pdev->dev;
hif->regs = pcim_iomap_table(pdev)[0];
hif->irq = pdev->irq;
+ pcie_bandwidth_available(pdev, NULL, &hif->speed, &hif->width);
spin_lock_bh(&hif_lock);
list_add(&hif->list, &hif_list);
spin_unlock_bh(&hif_lock);
@@ -137,6 +138,7 @@ static int mt7996_pci_probe(struct pci_dev *pdev,
mdev = &dev->mt76;
mt7996_wfsys_reset(dev);
hif2 = mt7996_pci_init_hif2(pdev);
+ dev->hif2 = hif2;
ret = mt7996_mmio_wed_init(dev, pdev, false, &irq);
if (ret < 0)
@@ -161,7 +163,6 @@ static int mt7996_pci_probe(struct pci_dev *pdev,
if (hif2) {
hif2_dev = container_of(hif2->dev, struct pci_dev, dev);
- dev->hif2 = hif2;
ret = mt7996_mmio_wed_init(dev, hif2_dev, true, &hif2_irq);
if (ret < 0)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/regs.h b/drivers/net/wireless/mediatek/mt76/mt7996/regs.h
index e942c0058731..0fa325f87fcd 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/regs.h
@@ -88,6 +88,8 @@ enum offs_rev {
#define MT_RRO_BA_BITMAP_BASE1 MT_RRO_TOP(0xC)
#define WF_RRO_AXI_MST_CFG MT_RRO_TOP(0xB8)
#define WF_RRO_AXI_MST_CFG_DIDX_OK BIT(12)
+
+#define MT_RRO_ADDR_ARRAY_BASE0 MT_RRO_TOP(0x30)
#define MT_RRO_ADDR_ARRAY_BASE1 MT_RRO_TOP(0x34)
#define MT_RRO_ADDR_ARRAY_ELEM_ADDR_SEG_MODE BIT(31)
@@ -108,6 +110,19 @@ enum offs_rev {
#define MT_RRO_ADDR_ELEM_SEG_ADDR0 MT_RRO_TOP(0x400)
+#define MT_RRO_3_0_EMU_CONF MT_RRO_TOP(0x600)
+#define MT_RRO_3_0_EMU_CONF_EN_MASK BIT(11)
+
+#define MT_RRO_3_1_GLOBAL_CONFIG MT_RRO_TOP(0x604)
+#define MT_RRO_3_1_GLOBAL_CONFIG_RXDMAD_SEL BIT(6)
+#define MT_RRO_3_1_GLOBAL_CONFIG_RX_CIDX_RD_EN BIT(3)
+#define MT_RRO_3_1_GLOBAL_CONFIG_RX_DIDX_WR_EN BIT(2)
+#define MT_RRO_3_1_GLOBAL_CONFIG_INTERLEAVE_EN BIT(0)
+
+#define MT_RRO_MSDU_PG_SEG_ADDR0 MT_RRO_TOP(0x620)
+#define MT_RRO_RX_RING_AP_CIDX_ADDR MT_RRO_TOP(0x6f0)
+#define MT_RRO_RX_RING_AP_DIDX_ADDR MT_RRO_TOP(0x6f4)
+
#define MT_RRO_ACK_SN_CTRL MT_RRO_TOP(0x50)
#define MT_RRO_ACK_SN_CTRL_SN_MASK GENMASK(27, 16)
#define MT_RRO_ACK_SN_CTRL_SESSION_MASK GENMASK(11, 0)
@@ -412,7 +427,9 @@ enum offs_rev {
#define MT_WFDMA0_RX_INT_PCIE_SEL MT_WFDMA0(0x154)
#define MT_WFDMA0_RX_INT_SEL_RING3 BIT(3)
+#define MT_WFDMA0_RX_INT_SEL_RING5 BIT(5)
#define MT_WFDMA0_RX_INT_SEL_RING6 BIT(6)
+#define MT_WFDMA0_RX_INT_SEL_RING9 BIT(9)
#define MT_WFDMA0_MCU_HOST_INT_ENA MT_WFDMA0(0x1f4)
@@ -430,6 +447,7 @@ enum offs_rev {
#define MT_WFDMA0_PAUSE_RX_Q_RRO_TH MT_WFDMA0(0x27c)
#define WF_WFDMA0_GLO_CFG_EXT0 MT_WFDMA0(0x2b0)
+#define WF_WFDMA0_GLO_CFG_EXT0_OUTSTAND_MASK GENMASK(27, 24)
#define WF_WFDMA0_GLO_CFG_EXT0_RX_WB_RXD BIT(18)
#define WF_WFDMA0_GLO_CFG_EXT0_WED_MERGE_MODE BIT(14)
@@ -451,6 +469,8 @@ enum offs_rev {
#define MT_WFDMA_HOST_CONFIG MT_WFDMA_EXT_CSR(0x30)
#define MT_WFDMA_HOST_CONFIG_PDMA_BAND BIT(0)
+#define MT_WFDMA_HOST_CONFIG_BAND0_PCIE1 BIT(20)
+#define MT_WFDMA_HOST_CONFIG_BAND1_PCIE1 BIT(21)
#define MT_WFDMA_HOST_CONFIG_BAND2_PCIE1 BIT(22)
#define MT_WFDMA_EXT_CSR_HIF_MISC MT_WFDMA_EXT_CSR(0x44)
@@ -459,6 +479,9 @@ enum offs_rev {
#define MT_WFDMA_AXI_R2A_CTRL MT_WFDMA_EXT_CSR(0x500)
#define MT_WFDMA_AXI_R2A_CTRL_OUTSTAND_MASK GENMASK(4, 0)
+#define MT_WFDMA_AXI_R2A_CTRL2 MT_WFDMA_EXT_CSR(0x508)
+#define MT_WFDMA_AXI_R2A_CTRL2_OUTSTAND_MASK GENMASK(31, 28)
+
#define MT_PCIE_RECOG_ID 0xd7090
#define MT_PCIE_RECOG_ID_MASK GENMASK(30, 0)
#define MT_PCIE_RECOG_ID_SEM BIT(31)
@@ -492,6 +515,8 @@ enum offs_rev {
#define MT_RXQ_RING_BASE(q) (MT_Q_BASE(__RXQ(q)) + 0x500)
#define MT_RXQ_RRO_IND_RING_BASE MT_RRO_TOP(0x40)
+#define MT_RXQ_RRO_AP_RING_BASE MT_RRO_TOP(0x650)
+
#define MT_MCUQ_EXT_CTRL(q) (MT_Q_BASE(q) + 0x600 + \
MT_MCUQ_ID(q) * 0x4)
#define MT_RXQ_EXT_CTRL(q) (MT_Q_BASE(__RXQ(q)) + 0x680 + \
@@ -514,7 +539,9 @@ enum offs_rev {
#define MT_INT_RX_DONE_WA_EXT BIT(3) /* for mt7992 */
#define MT_INT_RX_DONE_WA_TRI BIT(3)
#define MT_INT_RX_TXFREE_MAIN BIT(17)
+#define MT_INT_RX_TXFREE_BAND1 BIT(15)
#define MT_INT_RX_TXFREE_TRI BIT(15)
+#define MT_INT_RX_TXFREE_BAND1_EXT BIT(19) /* for mt7992 two PCIE*/
#define MT_INT_RX_TXFREE_BAND0_MT7990 BIT(14)
#define MT_INT_RX_TXFREE_BAND1_MT7990 BIT(15)
#define MT_INT_RX_DONE_BAND2_EXT BIT(23)
@@ -522,9 +549,10 @@ enum offs_rev {
#define MT_INT_MCU_CMD BIT(29)
#define MT_INT_RX_DONE_RRO_BAND0 BIT(16)
-#define MT_INT_RX_DONE_RRO_BAND1 BIT(16)
+#define MT_INT_RX_DONE_RRO_BAND1 BIT(17)
#define MT_INT_RX_DONE_RRO_BAND2 BIT(14)
#define MT_INT_RX_DONE_RRO_IND BIT(11)
+#define MT_INT_RX_DONE_RRO_RXDMAD_C BIT(11)
#define MT_INT_RX_DONE_MSDU_PG_BAND0 BIT(18)
#define MT_INT_RX_DONE_MSDU_PG_BAND1 BIT(19)
#define MT_INT_RX_DONE_MSDU_PG_BAND2 BIT(23)
@@ -552,6 +580,8 @@ enum offs_rev {
#define MT_INT_RRO_RX_DONE (MT_INT_RX(MT_RXQ_RRO_BAND0) | \
MT_INT_RX(MT_RXQ_RRO_BAND1) | \
MT_INT_RX(MT_RXQ_RRO_BAND2) | \
+ MT_INT_RX(MT_RXQ_RRO_IND) | \
+ MT_INT_RX(MT_RXQ_RRO_RXDMAD_C) | \
MT_INT_RX(MT_RXQ_MSDU_PAGE_BAND0) | \
MT_INT_RX(MT_RXQ_MSDU_PAGE_BAND1) | \
MT_INT_RX(MT_RXQ_MSDU_PAGE_BAND2))
diff --git a/drivers/net/wireless/mediatek/mt76/scan.c b/drivers/net/wireless/mediatek/mt76/scan.c
index 9b20ccbeb8cf..5a875aac410f 100644
--- a/drivers/net/wireless/mediatek/mt76/scan.c
+++ b/drivers/net/wireless/mediatek/mt76/scan.c
@@ -16,11 +16,13 @@ static void mt76_scan_complete(struct mt76_dev *dev, bool abort)
clear_bit(MT76_SCANNING, &phy->state);
- if (dev->scan.chan && phy->main_chandef.chan)
+ if (dev->scan.chan && phy->main_chandef.chan &&
+ !test_bit(MT76_MCU_RESET, &dev->phy.state))
mt76_set_channel(phy, &phy->main_chandef, false);
mt76_put_vif_phy_link(phy, dev->scan.vif, dev->scan.mlink);
memset(&dev->scan, 0, sizeof(dev->scan));
- ieee80211_scan_completed(phy->hw, &info);
+ if (!test_bit(MT76_MCU_RESET, &dev->phy.state))
+ ieee80211_scan_completed(phy->hw, &info);
}
void mt76_abort_scan(struct mt76_dev *dev)
@@ -28,6 +30,7 @@ void mt76_abort_scan(struct mt76_dev *dev)
cancel_delayed_work_sync(&dev->scan_work);
mt76_scan_complete(dev, true);
}
+EXPORT_SYMBOL_GPL(mt76_abort_scan);
static void
mt76_scan_send_probe(struct mt76_dev *dev, struct cfg80211_ssid *ssid)
@@ -112,9 +115,6 @@ void mt76_scan_work(struct work_struct *work)
local_bh_enable();
out:
- if (!duration)
- return;
-
if (dev->scan.chan)
duration = max_t(int, duration,
msecs_to_jiffies(req->duration +
@@ -139,7 +139,8 @@ int mt76_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mutex_lock(&dev->mutex);
- if (dev->scan.req || phy->roc_vif) {
+ if (dev->scan.req || phy->roc_vif ||
+ test_bit(MT76_MCU_RESET, &dev->phy.state)) {
ret = -EBUSY;
goto out;
}
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
index e6cf16706667..b78ae6a34b65 100644
--- a/drivers/net/wireless/mediatek/mt76/tx.c
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -332,6 +332,7 @@ mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta,
struct mt76_wcid *wcid, struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (void *)skb->data;
struct sk_buff_head *head;
if (mt76_testmode_enabled(phy)) {
@@ -349,7 +350,8 @@ mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta,
info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->band_idx);
if ((info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) ||
- (info->control.flags & IEEE80211_TX_CTRL_DONT_USE_RATE_MASK))
+ ((info->control.flags & IEEE80211_TX_CTRL_DONT_USE_RATE_MASK) &&
+ ieee80211_is_probe_req(hdr->frame_control)))
head = &wcid->tx_offchannel;
else
head = &wcid->tx_pending;
@@ -616,7 +618,8 @@ mt76_txq_schedule_pending_wcid(struct mt76_phy *phy, struct mt76_wcid *wcid,
!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
!ieee80211_is_data(hdr->frame_control) &&
(!ieee80211_is_bufferable_mmpdu(skb) ||
- ieee80211_is_deauth(hdr->frame_control)))
+ ieee80211_is_deauth(hdr->frame_control) ||
+ head == &wcid->tx_offchannel))
qid = MT_TXQ_PSD;
q = phy->q_tx[qid];
@@ -644,6 +647,7 @@ mt76_txq_schedule_pending_wcid(struct mt76_phy *phy, struct mt76_wcid *wcid,
static void mt76_txq_schedule_pending(struct mt76_phy *phy)
{
LIST_HEAD(tx_list);
+ int ret = 0;
if (list_empty(&phy->tx_list))
return;
@@ -655,13 +659,13 @@ static void mt76_txq_schedule_pending(struct mt76_phy *phy)
list_splice_init(&phy->tx_list, &tx_list);
while (!list_empty(&tx_list)) {
struct mt76_wcid *wcid;
- int ret;
wcid = list_first_entry(&tx_list, struct mt76_wcid, tx_list);
list_del_init(&wcid->tx_list);
spin_unlock(&phy->tx_lock);
- ret = mt76_txq_schedule_pending_wcid(phy, wcid, &wcid->tx_offchannel);
+ if (ret >= 0)
+ ret = mt76_txq_schedule_pending_wcid(phy, wcid, &wcid->tx_offchannel);
if (ret >= 0 && !phy->offchannel)
ret = mt76_txq_schedule_pending_wcid(phy, wcid, &wcid->tx_pending);
spin_lock(&phy->tx_lock);
@@ -670,9 +674,6 @@ static void mt76_txq_schedule_pending(struct mt76_phy *phy)
!skb_queue_empty(&wcid->tx_offchannel) &&
list_empty(&wcid->tx_list))
list_add_tail(&wcid->tx_list, &phy->tx_list);
-
- if (ret < 0)
- break;
}
spin_unlock(&phy->tx_lock);
diff --git a/drivers/net/wireless/mediatek/mt76/wed.c b/drivers/net/wireless/mediatek/mt76/wed.c
index 63f69e152b1c..907a8e43e72a 100644
--- a/drivers/net/wireless/mediatek/mt76/wed.c
+++ b/drivers/net/wireless/mediatek/mt76/wed.c
@@ -118,7 +118,7 @@ int mt76_wed_dma_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset)
case MT76_WED_Q_TXFREE:
/* WED txfree queue needs ring to be initialized before setup */
q->flags = 0;
- mt76_dma_queue_reset(dev, q);
+ mt76_dma_queue_reset(dev, q, true);
mt76_dma_rx_fill(dev, q, false);
ret = mtk_wed_device_txfree_ring_setup(q->wed, q->regs);
@@ -133,21 +133,21 @@ int mt76_wed_dma_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset)
break;
case MT76_WED_RRO_Q_DATA:
q->flags &= ~MT_QFLAG_WED;
- __mt76_dma_queue_reset(dev, q, false);
+ mt76_dma_queue_reset(dev, q, false);
mtk_wed_device_rro_rx_ring_setup(q->wed, ring, q->regs);
q->head = q->ndesc - 1;
q->queued = q->head;
break;
case MT76_WED_RRO_Q_MSDU_PG:
q->flags &= ~MT_QFLAG_WED;
- __mt76_dma_queue_reset(dev, q, false);
+ mt76_dma_queue_reset(dev, q, false);
mtk_wed_device_msdu_pg_rx_ring_setup(q->wed, ring, q->regs);
q->head = q->ndesc - 1;
q->queued = q->head;
break;
case MT76_WED_RRO_Q_IND:
q->flags &= ~MT_QFLAG_WED;
- mt76_dma_queue_reset(dev, q);
+ mt76_dma_queue_reset(dev, q, true);
mt76_dma_rx_fill(dev, q, false);
mtk_wed_device_ind_rx_ring_setup(q->wed, q->regs);
break;
diff --git a/drivers/net/wireless/microchip/wilc1000/cfg80211.c b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
index a395829ebadf..c39e7f313ea1 100644
--- a/drivers/net/wireless/microchip/wilc1000/cfg80211.c
+++ b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
@@ -794,12 +794,6 @@ static int get_station(struct wiphy *wiphy, struct net_device *dev,
return 0;
}
-static int change_bss(struct wiphy *wiphy, struct net_device *dev,
- struct bss_parameters *params)
-{
- return 0;
-}
-
static int set_wiphy_params(struct wiphy *wiphy, int radio_idx, u32 changed)
{
int ret = -EINVAL;
@@ -1709,7 +1703,6 @@ static const struct cfg80211_ops wilc_cfg80211_ops = {
.change_station = change_station,
.get_station = get_station,
.dump_station = dump_station,
- .change_bss = change_bss,
.set_wiphy_params = set_wiphy_params,
.external_auth = external_auth,
diff --git a/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c b/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c
index 131388886acb..cfabd5aebb54 100644
--- a/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c
+++ b/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c
@@ -41,10 +41,10 @@ static const struct wilc_cfg_word g_cfg_word[] = {
};
static const struct wilc_cfg_str g_cfg_str[] = {
- {WID_FIRMWARE_VERSION, NULL},
- {WID_MAC_ADDR, NULL},
- {WID_ASSOC_RES_INFO, NULL},
- {WID_NIL, NULL}
+ {WID_FIRMWARE_VERSION, 0, NULL},
+ {WID_MAC_ADDR, 0, NULL},
+ {WID_ASSOC_RES_INFO, 0, NULL},
+ {WID_NIL, 0, NULL}
};
#define WILC_RESP_MSG_TYPE_CONFIG_REPLY 'R'
@@ -147,44 +147,58 @@ static void wilc_wlan_parse_response_frame(struct wilc *wl, u8 *info, int size)
switch (FIELD_GET(WILC_WID_TYPE, wid)) {
case WID_CHAR:
+ len = 3;
+ if (len + 2 > size)
+ return;
+
while (cfg->b[i].id != WID_NIL && cfg->b[i].id != wid)
i++;
if (cfg->b[i].id == wid)
cfg->b[i].val = info[4];
- len = 3;
break;
case WID_SHORT:
+ len = 4;
+ if (len + 2 > size)
+ return;
+
while (cfg->hw[i].id != WID_NIL && cfg->hw[i].id != wid)
i++;
if (cfg->hw[i].id == wid)
cfg->hw[i].val = get_unaligned_le16(&info[4]);
- len = 4;
break;
case WID_INT:
+ len = 6;
+ if (len + 2 > size)
+ return;
+
while (cfg->w[i].id != WID_NIL && cfg->w[i].id != wid)
i++;
if (cfg->w[i].id == wid)
cfg->w[i].val = get_unaligned_le32(&info[4]);
- len = 6;
break;
case WID_STR:
+ len = 2 + get_unaligned_le16(&info[2]);
+
while (cfg->s[i].id != WID_NIL && cfg->s[i].id != wid)
i++;
- if (cfg->s[i].id == wid)
+ if (cfg->s[i].id == wid) {
+ if (len > cfg->s[i].len || (len + 2 > size))
+ return;
+
memcpy(cfg->s[i].str, &info[2],
- get_unaligned_le16(&info[2]) + 2);
+ len);
+ }
- len = 2 + get_unaligned_le16(&info[2]);
break;
default:
@@ -384,12 +398,15 @@ int wilc_wlan_cfg_init(struct wilc *wl)
/* store the string cfg parameters */
wl->cfg.s[i].id = WID_FIRMWARE_VERSION;
wl->cfg.s[i].str = str_vals->firmware_version;
+ wl->cfg.s[i].len = sizeof(str_vals->firmware_version);
i++;
wl->cfg.s[i].id = WID_MAC_ADDR;
wl->cfg.s[i].str = str_vals->mac_address;
+ wl->cfg.s[i].len = sizeof(str_vals->mac_address);
i++;
wl->cfg.s[i].id = WID_ASSOC_RES_INFO;
wl->cfg.s[i].str = str_vals->assoc_rsp;
+ wl->cfg.s[i].len = sizeof(str_vals->assoc_rsp);
i++;
wl->cfg.s[i].id = WID_NIL;
wl->cfg.s[i].str = NULL;
diff --git a/drivers/net/wireless/microchip/wilc1000/wlan_cfg.h b/drivers/net/wireless/microchip/wilc1000/wlan_cfg.h
index 7038b74f8e8f..5ae74bced7d7 100644
--- a/drivers/net/wireless/microchip/wilc1000/wlan_cfg.h
+++ b/drivers/net/wireless/microchip/wilc1000/wlan_cfg.h
@@ -24,12 +24,13 @@ struct wilc_cfg_word {
struct wilc_cfg_str {
u16 id;
+ u16 len;
u8 *str;
};
struct wilc_cfg_str_vals {
- u8 mac_address[7];
- u8 firmware_version[129];
+ u8 mac_address[8];
+ u8 firmware_version[130];
u8 assoc_rsp[WILC_MAX_ASSOC_RESP_FRAME_SIZE];
};
diff --git a/drivers/net/wireless/ralink/rt2x00/Kconfig b/drivers/net/wireless/ralink/rt2x00/Kconfig
index 4d98b7723c56..17f063fc0b57 100644
--- a/drivers/net/wireless/ralink/rt2x00/Kconfig
+++ b/drivers/net/wireless/ralink/rt2x00/Kconfig
@@ -66,7 +66,6 @@ config RT2800PCI
select RT2X00_LIB_PCI
select RT2X00_LIB_FIRMWARE
select RT2X00_LIB_CRYPTO
- select CRC_CCITT
select EEPROM_93CX6
help
This adds support for rt27xx/rt28xx/rt30xx wireless chipset family.
@@ -142,7 +141,6 @@ config RT2800USB
select RT2X00_LIB_USB
select RT2X00_LIB_FIRMWARE
select RT2X00_LIB_CRYPTO
- select CRC_CCITT
help
This adds support for rt27xx/rt28xx/rt30xx wireless chipset family.
Supported chips: RT2770, RT2870 & RT3070, RT3071 & RT3072
@@ -217,6 +215,7 @@ config RT2800SOC
config RT2800_LIB
tristate
+ select CRC_CCITT
config RT2800_LIB_MMIO
tristate
@@ -225,6 +224,7 @@ config RT2800_LIB_MMIO
config RT2X00_LIB_MMIO
tristate
+ select RT2X00_LIB
config RT2X00_LIB_PCI
tristate
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
index 7db29e90eb4f..f8a6f9c968a1 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
@@ -679,7 +679,7 @@ static void rt2x00lib_rxdone_check_ps(struct rt2x00_dev *rt2x00dev,
/* Check whenever the PHY can be turned off again. */
/* 1. What about buffered unicast traffic for our AID? */
- cam = ieee80211_check_tim(tim_ie, tim_len, rt2x00dev->aid);
+ cam = ieee80211_check_tim(tim_ie, tim_len, rt2x00dev->aid, false);
/* 2. Maybe the AP wants to send multicast/broadcast data? */
cam |= (tim_ie->bitmap_ctrl & 0x01);
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/core.c b/drivers/net/wireless/realtek/rtl8xxxu/core.c
index 831b5025c634..3ded5952729f 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/core.c
@@ -1901,6 +1901,27 @@ static void rtl8xxxu_dump_efuse(struct rtl8xxxu_priv *priv)
priv->efuse_wifi.raw, EFUSE_MAP_LEN, true);
}
+static ssize_t read_file_efuse(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct rtl8xxxu_priv *priv = file_inode(file)->i_private;
+
+ return simple_read_from_buffer(user_buf, count, ppos,
+ priv->efuse_wifi.raw, EFUSE_MAP_LEN);
+}
+
+static const struct debugfs_short_fops fops_efuse = {
+ .read = read_file_efuse,
+};
+
+static void rtl8xxxu_debugfs_init(struct rtl8xxxu_priv *priv)
+{
+ struct dentry *phydir;
+
+ phydir = debugfs_create_dir("rtl8xxxu", priv->hw->wiphy->debugfsdir);
+ debugfs_create_file("efuse", 0400, phydir, priv, &fops_efuse);
+}
+
void rtl8xxxu_reset_8051(struct rtl8xxxu_priv *priv)
{
u8 val8;
@@ -7815,7 +7836,8 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
untested = 0;
break;
case 0x2357:
- if (id->idProduct == 0x0109 || id->idProduct == 0x0135)
+ if (id->idProduct == 0x0109 || id->idProduct == 0x010c ||
+ id->idProduct == 0x0135)
untested = 0;
break;
case 0x0b05:
@@ -7974,6 +7996,7 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
}
rtl8xxxu_init_led(priv);
+ rtl8xxxu_debugfs_init(priv);
return 0;
@@ -8172,8 +8195,6 @@ static const struct usb_device_id dev_table[] = {
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x06f8, 0xe033, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
-{USB_DEVICE_AND_INTERFACE_INFO(0x07b8, 0x8188, 0xff, 0xff, 0xff),
- .driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x07b8, 0x8189, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9041, 0xff, 0xff, 0xff),
diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c
index 6241e4fed4f6..bcab12c3b4c1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/ps.c
+++ b/drivers/net/wireless/realtek/rtlwifi/ps.c
@@ -519,7 +519,7 @@ void rtl_swlps_beacon(struct ieee80211_hw *hw, void *data, unsigned int len)
/* 1. What about buffered unicast traffic for our AID? */
u_buffed = ieee80211_check_tim(tim_ie, tim_len,
- rtlpriv->mac80211.assoc_id);
+ rtlpriv->mac80211.assoc_id, false);
/* 2. Maybe the AP wants to send multicast/broadcast data? */
m_buffed = tim_ie->bitmap_ctrl & 0x01;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
index 00a6778df704..9480823af838 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
@@ -291,7 +291,6 @@ static const struct usb_device_id rtl8192c_usb_ids[] = {
{RTL_USB_DEVICE(0x050d, 0x1102, rtl92cu_hal_cfg)}, /*Belkin - Edimax*/
{RTL_USB_DEVICE(0x050d, 0x11f2, rtl92cu_hal_cfg)}, /*Belkin - ISY*/
{RTL_USB_DEVICE(0x06f8, 0xe033, rtl92cu_hal_cfg)}, /*Hercules - Edimax*/
- {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
{RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
{RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/
{RTL_USB_DEVICE(0x0846, 0x9043, rtl92cu_hal_cfg)}, /*NG WNA1000Mv2*/
diff --git a/drivers/net/wireless/realtek/rtw88/led.c b/drivers/net/wireless/realtek/rtw88/led.c
index 25aa6cbaa728..4cc62e49d167 100644
--- a/drivers/net/wireless/realtek/rtw88/led.c
+++ b/drivers/net/wireless/realtek/rtw88/led.c
@@ -6,13 +6,17 @@
#include "debug.h"
#include "led.h"
-static int rtw_led_set_blocking(struct led_classdev *led,
- enum led_brightness brightness)
+static int rtw_led_set(struct led_classdev *led,
+ enum led_brightness brightness)
{
struct rtw_dev *rtwdev = container_of(led, struct rtw_dev, led_cdev);
+ mutex_lock(&rtwdev->mutex);
+
rtwdev->chip->ops->led_set(led, brightness);
+ mutex_unlock(&rtwdev->mutex);
+
return 0;
}
@@ -36,10 +40,7 @@ void rtw_led_init(struct rtw_dev *rtwdev)
if (!rtwdev->chip->ops->led_set)
return;
- if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_PCIE)
- led->brightness_set = rtwdev->chip->ops->led_set;
- else
- led->brightness_set_blocking = rtw_led_set_blocking;
+ led->brightness_set_blocking = rtw_led_set;
snprintf(rtwdev->led_name, sizeof(rtwdev->led_name),
"rtw88-%s", dev_name(rtwdev->dev));
diff --git a/drivers/net/wireless/realtek/rtw88/sdio.c b/drivers/net/wireless/realtek/rtw88/sdio.c
index cc2d4fef3587..99d7c629eac6 100644
--- a/drivers/net/wireless/realtek/rtw88/sdio.c
+++ b/drivers/net/wireless/realtek/rtw88/sdio.c
@@ -144,6 +144,10 @@ static u32 rtw_sdio_to_io_address(struct rtw_dev *rtwdev, u32 addr,
static bool rtw_sdio_use_direct_io(struct rtw_dev *rtwdev, u32 addr)
{
+ if (!test_bit(RTW_FLAG_POWERON, rtwdev->flags) &&
+ !rtw_sdio_is_bus_addr(addr))
+ return false;
+
return !rtw_sdio_is_sdio30_supported(rtwdev) ||
rtw_sdio_is_bus_addr(addr);
}
diff --git a/drivers/net/wireless/realtek/rtw89/chan.c b/drivers/net/wireless/realtek/rtw89/chan.c
index f7d1c5d3b92e..86f1b39a967f 100644
--- a/drivers/net/wireless/realtek/rtw89/chan.c
+++ b/drivers/net/wireless/realtek/rtw89/chan.c
@@ -281,6 +281,7 @@ void rtw89_entity_init(struct rtw89_dev *rtwdev)
{
struct rtw89_hal *hal = &rtwdev->hal;
struct rtw89_entity_mgnt *mgnt = &hal->entity_mgnt;
+ int i, j;
hal->entity_pause = false;
bitmap_zero(hal->entity_map, NUM_OF_RTW89_CHANCTX);
@@ -289,6 +290,11 @@ void rtw89_entity_init(struct rtw89_dev *rtwdev)
INIT_LIST_HEAD(&mgnt->active_list);
+ for (i = 0; i < RTW89_MAX_INTERFACE_NUM; i++) {
+ for (j = 0; j < __RTW89_MLD_MAX_LINK_NUM; j++)
+ mgnt->chanctx_tbl[i][j] = RTW89_CHANCTX_IDLE;
+ }
+
rtw89_config_default_chandef(rtwdev);
}
@@ -353,7 +359,7 @@ static void rtw89_normalize_link_chanctx(struct rtw89_dev *rtwdev,
const struct rtw89_chan *__rtw89_mgnt_chan_get(struct rtw89_dev *rtwdev,
const char *caller_message,
- u8 link_index)
+ u8 link_index, bool nullchk)
{
struct rtw89_hal *hal = &rtwdev->hal;
struct rtw89_entity_mgnt *mgnt = &hal->entity_mgnt;
@@ -400,6 +406,9 @@ const struct rtw89_chan *__rtw89_mgnt_chan_get(struct rtw89_dev *rtwdev,
return rtw89_chan_get(rtwdev, chanctx_idx);
dflt:
+ if (unlikely(nullchk))
+ return NULL;
+
rtw89_debug(rtwdev, RTW89_DBG_CHAN,
"%s (%s): prefetch NULL on link index %u\n",
__func__, caller_message ?: "", link_index);
diff --git a/drivers/net/wireless/realtek/rtw89/chan.h b/drivers/net/wireless/realtek/rtw89/chan.h
index b1175419f92b..5b22764d5329 100644
--- a/drivers/net/wireless/realtek/rtw89/chan.h
+++ b/drivers/net/wireless/realtek/rtw89/chan.h
@@ -180,10 +180,16 @@ void rtw89_chanctx_proceed(struct rtw89_dev *rtwdev,
const struct rtw89_chan *__rtw89_mgnt_chan_get(struct rtw89_dev *rtwdev,
const char *caller_message,
- u8 link_index);
+ u8 link_index, bool nullchk);
#define rtw89_mgnt_chan_get(rtwdev, link_index) \
- __rtw89_mgnt_chan_get(rtwdev, __func__, link_index)
+ __rtw89_mgnt_chan_get(rtwdev, __func__, link_index, false)
+
+static inline const struct rtw89_chan *
+rtw89_mgnt_chan_get_or_null(struct rtw89_dev *rtwdev, u8 link_index)
+{
+ return __rtw89_mgnt_chan_get(rtwdev, NULL, link_index, true);
+}
struct rtw89_mcc_links_info {
struct rtw89_vif_link *links[NUM_OF_RTW89_MCC_ROLES];
diff --git a/drivers/net/wireless/realtek/rtw89/coex.c b/drivers/net/wireless/realtek/rtw89/coex.c
index e4e6daf51a1b..0f7ae572ef91 100644
--- a/drivers/net/wireless/realtek/rtw89/coex.c
+++ b/drivers/net/wireless/realtek/rtw89/coex.c
@@ -93,7 +93,7 @@ static const struct rtw89_btc_fbtc_slot s_def[] = {
[CXST_E2G] = __DEF_FBTC_SLOT(5, 0xea5a5a5a, SLOT_MIX),
[CXST_E5G] = __DEF_FBTC_SLOT(5, 0xffffffff, SLOT_ISO),
[CXST_EBT] = __DEF_FBTC_SLOT(5, 0xe5555555, SLOT_MIX),
- [CXST_ENULL] = __DEF_FBTC_SLOT(5, 0xaaaaaaaa, SLOT_ISO),
+ [CXST_ENULL] = __DEF_FBTC_SLOT(5, 0x55555555, SLOT_MIX),
[CXST_WLK] = __DEF_FBTC_SLOT(250, 0xea5a5a5a, SLOT_MIX),
[CXST_W1FDD] = __DEF_FBTC_SLOT(50, 0xffffffff, SLOT_ISO),
[CXST_B1FDD] = __DEF_FBTC_SLOT(50, 0xffffdfff, SLOT_ISO),
@@ -4153,6 +4153,7 @@ void rtw89_btc_set_policy_v1(struct rtw89_dev *rtwdev, u16 policy_type)
s_def[CXST_EBT].cxtbl, s_def[CXST_EBT].cxtype);
_slot_set_le(btc, CXST_ENULL, s_def[CXST_ENULL].dur,
s_def[CXST_ENULL].cxtbl, s_def[CXST_ENULL].cxtype);
+ _slot_set_dur(btc, CXST_EBT, dur_2);
break;
case BTC_CXP_OFFE_DEF2:
_slot_set(btc, CXST_E2G, 20, cxtbl[1], SLOT_ISO);
@@ -4162,6 +4163,7 @@ void rtw89_btc_set_policy_v1(struct rtw89_dev *rtwdev, u16 policy_type)
s_def[CXST_EBT].cxtbl, s_def[CXST_EBT].cxtype);
_slot_set_le(btc, CXST_ENULL, s_def[CXST_ENULL].dur,
s_def[CXST_ENULL].cxtbl, s_def[CXST_ENULL].cxtype);
+ _slot_set_dur(btc, CXST_EBT, dur_2);
break;
case BTC_CXP_OFFE_2GBWMIXB:
if (a2dp->exist)
@@ -4170,6 +4172,7 @@ void rtw89_btc_set_policy_v1(struct rtw89_dev *rtwdev, u16 policy_type)
_slot_set(btc, CXST_E2G, 5, tbl_w1, SLOT_MIX);
_slot_set_le(btc, CXST_EBT, cpu_to_le16(40),
s_def[CXST_EBT].cxtbl, s_def[CXST_EBT].cxtype);
+ _slot_set_dur(btc, CXST_EBT, dur_2);
break;
case BTC_CXP_OFFE_WL: /* for 4-way */
_slot_set(btc, CXST_E2G, 5, cxtbl[1], SLOT_MIX);
diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c
index 57590f5577a3..917b2adede61 100644
--- a/drivers/net/wireless/realtek/rtw89/core.c
+++ b/drivers/net/wireless/realtek/rtw89/core.c
@@ -2,6 +2,7 @@
/* Copyright(c) 2019-2020 Realtek Corporation
*/
#include <linux/ip.h>
+#include <linux/sort.h>
#include <linux/udp.h>
#include "cam.h"
@@ -272,17 +273,18 @@ rtw89_get_6ghz_span(struct rtw89_dev *rtwdev, u32 center_freq)
return NULL;
}
-bool rtw89_ra_report_to_bitrate(struct rtw89_dev *rtwdev, u8 rpt_rate, u16 *bitrate)
+bool rtw89_legacy_rate_to_bitrate(struct rtw89_dev *rtwdev, u8 legacy_rate, u16 *bitrate)
{
- struct ieee80211_rate rate;
+ const struct ieee80211_rate *rate;
- if (unlikely(rpt_rate >= ARRAY_SIZE(rtw89_bitrates))) {
- rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "invalid rpt rate %d\n", rpt_rate);
+ if (unlikely(legacy_rate >= ARRAY_SIZE(rtw89_bitrates))) {
+ rtw89_debug(rtwdev, RTW89_DBG_UNEXP,
+ "invalid legacy rate %d\n", legacy_rate);
return false;
}
- rate = rtw89_bitrates[rpt_rate];
- *bitrate = rate.bitrate;
+ rate = &rtw89_bitrates[legacy_rate];
+ *bitrate = rate->bitrate;
return true;
}
@@ -697,6 +699,66 @@ static void rtw89_core_tx_update_llc_hdr(struct rtw89_dev *rtwdev,
desc_info->hdr_llc_len >>= 1; /* in unit of 2 bytes */
}
+u8 rtw89_core_get_ch_dma(struct rtw89_dev *rtwdev, u8 qsel)
+{
+ switch (qsel) {
+ default:
+ rtw89_warn(rtwdev, "Cannot map qsel to dma: %d\n", qsel);
+ fallthrough;
+ case RTW89_TX_QSEL_BE_0:
+ case RTW89_TX_QSEL_BE_1:
+ case RTW89_TX_QSEL_BE_2:
+ case RTW89_TX_QSEL_BE_3:
+ return RTW89_TXCH_ACH0;
+ case RTW89_TX_QSEL_BK_0:
+ case RTW89_TX_QSEL_BK_1:
+ case RTW89_TX_QSEL_BK_2:
+ case RTW89_TX_QSEL_BK_3:
+ return RTW89_TXCH_ACH1;
+ case RTW89_TX_QSEL_VI_0:
+ case RTW89_TX_QSEL_VI_1:
+ case RTW89_TX_QSEL_VI_2:
+ case RTW89_TX_QSEL_VI_3:
+ return RTW89_TXCH_ACH2;
+ case RTW89_TX_QSEL_VO_0:
+ case RTW89_TX_QSEL_VO_1:
+ case RTW89_TX_QSEL_VO_2:
+ case RTW89_TX_QSEL_VO_3:
+ return RTW89_TXCH_ACH3;
+ case RTW89_TX_QSEL_B0_MGMT:
+ return RTW89_TXCH_CH8;
+ case RTW89_TX_QSEL_B0_HI:
+ return RTW89_TXCH_CH9;
+ case RTW89_TX_QSEL_B1_MGMT:
+ return RTW89_TXCH_CH10;
+ case RTW89_TX_QSEL_B1_HI:
+ return RTW89_TXCH_CH11;
+ }
+}
+EXPORT_SYMBOL(rtw89_core_get_ch_dma);
+
+u8 rtw89_core_get_ch_dma_v1(struct rtw89_dev *rtwdev, u8 qsel)
+{
+ switch (qsel) {
+ default:
+ rtw89_warn(rtwdev, "Cannot map qsel to dma v1: %d\n", qsel);
+ fallthrough;
+ case RTW89_TX_QSEL_BE_0:
+ case RTW89_TX_QSEL_BK_0:
+ return RTW89_TXCH_ACH0;
+ case RTW89_TX_QSEL_VI_0:
+ case RTW89_TX_QSEL_VO_0:
+ return RTW89_TXCH_ACH2;
+ case RTW89_TX_QSEL_B0_MGMT:
+ case RTW89_TX_QSEL_B0_HI:
+ return RTW89_TXCH_CH8;
+ case RTW89_TX_QSEL_B1_MGMT:
+ case RTW89_TX_QSEL_B1_HI:
+ return RTW89_TXCH_CH10;
+ }
+}
+EXPORT_SYMBOL(rtw89_core_get_ch_dma_v1);
+
static void
rtw89_core_tx_update_mgmt_info(struct rtw89_dev *rtwdev,
struct rtw89_core_tx_request *tx_req)
@@ -710,7 +772,7 @@ rtw89_core_tx_update_mgmt_info(struct rtw89_dev *rtwdev,
u8 qsel, ch_dma;
qsel = rtw89_core_get_qsel_mgmt(rtwdev, tx_req);
- ch_dma = rtw89_core_get_ch_dma(rtwdev, qsel);
+ ch_dma = rtw89_chip_get_ch_dma(rtwdev, qsel);
desc_info->qsel = qsel;
desc_info->ch_dma = ch_dma;
@@ -927,7 +989,7 @@ rtw89_core_tx_update_data_info(struct rtw89_dev *rtwdev,
tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
tid_indicate = rtw89_core_get_tid_indicate(rtwdev, tid);
qsel = desc_info->hiq ? RTW89_TX_QSEL_B0_HI : rtw89_core_get_qsel(rtwdev, tid);
- ch_dma = rtw89_core_get_ch_dma(rtwdev, qsel);
+ ch_dma = rtw89_chip_get_ch_dma(rtwdev, qsel);
desc_info->ch_dma = ch_dma;
desc_info->tid_indicate = tid_indicate;
@@ -1073,42 +1135,46 @@ rtw89_core_tx_update_desc_info(struct rtw89_dev *rtwdev,
}
}
+static void rtw89_tx_wait_work(struct wiphy *wiphy, struct wiphy_work *work)
+{
+ struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
+ tx_wait_work.work);
+
+ rtw89_tx_wait_list_clear(rtwdev);
+}
+
void rtw89_core_tx_kick_off(struct rtw89_dev *rtwdev, u8 qsel)
{
u8 ch_dma;
- ch_dma = rtw89_core_get_ch_dma(rtwdev, qsel);
+ ch_dma = rtw89_chip_get_ch_dma(rtwdev, qsel);
rtw89_hci_tx_kick_off(rtwdev, ch_dma);
}
int rtw89_core_tx_kick_off_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb,
- int qsel, unsigned int timeout)
+ struct rtw89_tx_wait_info *wait, int qsel,
+ unsigned int timeout)
{
- struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb);
- struct rtw89_tx_wait_info *wait;
unsigned long time_left;
int ret = 0;
- wait = kzalloc(sizeof(*wait), GFP_KERNEL);
- if (!wait) {
- rtw89_core_tx_kick_off(rtwdev, qsel);
- return 0;
- }
-
- init_completion(&wait->completion);
- rcu_assign_pointer(skb_data->wait, wait);
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
rtw89_core_tx_kick_off(rtwdev, qsel);
time_left = wait_for_completion_timeout(&wait->completion,
msecs_to_jiffies(timeout));
- if (time_left == 0)
- ret = -ETIMEDOUT;
- else if (!wait->tx_done)
- ret = -EAGAIN;
- rcu_assign_pointer(skb_data->wait, NULL);
- kfree_rcu(wait, rcu_head);
+ if (time_left == 0) {
+ ret = -ETIMEDOUT;
+ list_add_tail(&wait->list, &rtwdev->tx_waits);
+ wiphy_delayed_work_queue(rtwdev->hw->wiphy, &rtwdev->tx_wait_work,
+ RTW89_TX_WAIT_WORK_TIMEOUT);
+ } else {
+ if (!wait->tx_done)
+ ret = -EAGAIN;
+ rtw89_tx_wait_release(wait);
+ }
return ret;
}
@@ -1157,10 +1223,12 @@ int rtw89_h2c_tx(struct rtw89_dev *rtwdev,
static int rtw89_core_tx_write_link(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
struct rtw89_sta_link *rtwsta_link,
- struct sk_buff *skb, int *qsel, bool sw_mld)
+ struct sk_buff *skb, int *qsel, bool sw_mld,
+ struct rtw89_tx_wait_info *wait)
{
struct ieee80211_sta *sta = rtwsta_link_to_sta_safe(rtwsta_link);
struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
+ struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb);
struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
struct rtw89_core_tx_request tx_req = {};
int ret;
@@ -1177,6 +1245,8 @@ static int rtw89_core_tx_write_link(struct rtw89_dev *rtwdev,
rtw89_core_tx_update_desc_info(rtwdev, &tx_req);
rtw89_core_tx_wake(rtwdev, &tx_req);
+ rcu_assign_pointer(skb_data->wait, wait);
+
ret = rtw89_hci_tx_write(rtwdev, &tx_req);
if (ret) {
rtw89_err(rtwdev, "failed to transmit skb to HCI\n");
@@ -1213,7 +1283,8 @@ int rtw89_core_tx_write(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
}
}
- return rtw89_core_tx_write_link(rtwdev, rtwvif_link, rtwsta_link, skb, qsel, false);
+ return rtw89_core_tx_write_link(rtwdev, rtwvif_link, rtwsta_link, skb, qsel, false,
+ NULL);
}
static __le32 rtw89_build_txwd_body0(struct rtw89_tx_desc_info *desc_info)
@@ -1835,6 +1906,10 @@ static void rtw89_core_parse_phy_status_ie00(struct rtw89_dev *rtwdev,
tmp_rpl = le32_get_bits(ie->w0, RTW89_PHY_STS_IE00_W0_RPL);
phy_ppdu->rpl_avg = tmp_rpl >> 1;
+
+ if (!phy_ppdu->hdr_2_en)
+ phy_ppdu->rx_path_en =
+ le32_get_bits(ie->w3, RTW89_PHY_STS_IE00_W3_RX_PATH_EN);
}
static void rtw89_core_parse_phy_status_ie00_v2(struct rtw89_dev *rtwdev,
@@ -2221,6 +2296,435 @@ static void rtw89_vif_sync_bcn_tsf(struct rtw89_vif_link *rtwvif_link,
WRITE_ONCE(rtwvif_link->sync_bcn_tsf, le64_to_cpu(mgmt->u.beacon.timestamp));
}
+static u32 rtw89_bcn_calc_min_tbtt(struct rtw89_dev *rtwdev, u32 tbtt1, u32 tbtt2)
+{
+ struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track;
+ u32 close_bcn_intvl_th = bcn_track->close_bcn_intvl_th;
+ u32 tbtt_diff_th = bcn_track->tbtt_diff_th;
+
+ if (tbtt2 > tbtt1)
+ swap(tbtt1, tbtt2);
+
+ if (tbtt1 - tbtt2 > tbtt_diff_th)
+ return tbtt1;
+ else if (tbtt2 > close_bcn_intvl_th)
+ return tbtt2;
+ else if (tbtt1 > close_bcn_intvl_th)
+ return tbtt1;
+ else
+ return tbtt2;
+}
+
+static void rtw89_bcn_cfg_tbtt_offset(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link)
+{
+ struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track;
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
+ u32 offset = bcn_track->tbtt_offset;
+
+ if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ const struct rtw89_port_reg *p = mac->port_base;
+ u32 bcnspc, val;
+
+ bcnspc = rtw89_read32_port_mask(rtwdev, rtwvif_link,
+ p->bcn_space, B_AX_BCN_SPACE_MASK);
+ val = bcnspc - (offset / 1024);
+ val = u32_encode_bits(val, B_AX_TBTT_SHIFT_OFST_MAG) |
+ B_AX_TBTT_SHIFT_OFST_SIGN;
+
+ rtw89_write16_port_mask(rtwdev, rtwvif_link, p->tbtt_shift,
+ B_AX_TBTT_SHIFT_OFST_MASK, val);
+
+ return;
+ }
+
+ rtw89_fw_h2c_tbtt_tuning(rtwdev, rtwvif_link, offset);
+}
+
+static void rtw89_bcn_update_tbtt_offset(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link)
+{
+ struct rtw89_beacon_stat *bcn_stat = &rtwdev->phystat.bcn_stat;
+ struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track;
+ u32 *tbtt_us = bcn_stat->tbtt_us;
+ u32 offset = tbtt_us[0];
+ u8 i;
+
+ for (i = 1; i < RTW89_BCN_TRACK_STAT_NR; i++)
+ offset = rtw89_bcn_calc_min_tbtt(rtwdev, tbtt_us[i], offset);
+
+ if (bcn_track->tbtt_offset == offset)
+ return;
+
+ bcn_track->tbtt_offset = offset;
+ rtw89_bcn_cfg_tbtt_offset(rtwdev, rtwvif_link);
+}
+
+static int cmp_u16(const void *a, const void *b)
+{
+ return *(const u16 *)a - *(const u16 *)b;
+}
+
+static u16 _rtw89_bcn_calc_drift(u16 tbtt, u16 offset, u16 beacon_int)
+{
+ if (tbtt < offset)
+ return beacon_int - offset + tbtt;
+
+ return tbtt - offset;
+}
+
+static void rtw89_bcn_calc_drift(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_beacon_stat *bcn_stat = &rtwdev->phystat.bcn_stat;
+ struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track;
+ u16 offset_tu = bcn_track->tbtt_offset / 1024;
+ u16 *tbtt_tu = bcn_stat->tbtt_tu;
+ u16 *drift = bcn_stat->drift;
+ u8 i;
+
+ bcn_stat->tbtt_tu_min = U16_MAX;
+ bcn_stat->tbtt_tu_max = 0;
+ for (i = 0; i < RTW89_BCN_TRACK_STAT_NR; i++) {
+ drift[i] = _rtw89_bcn_calc_drift(tbtt_tu[i], offset_tu,
+ bcn_track->beacon_int);
+
+ bcn_stat->tbtt_tu_min = min(bcn_stat->tbtt_tu_min, tbtt_tu[i]);
+ bcn_stat->tbtt_tu_max = max(bcn_stat->tbtt_tu_max, tbtt_tu[i]);
+ }
+
+ sort(drift, RTW89_BCN_TRACK_STAT_NR, sizeof(*drift), cmp_u16, NULL);
+}
+
+static void rtw89_bcn_calc_distribution(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_beacon_stat *bcn_stat = &rtwdev->phystat.bcn_stat;
+ struct rtw89_beacon_dist *bcn_dist = &bcn_stat->bcn_dist;
+ u16 lower_bound, upper_bound, outlier_count = 0;
+ u16 *drift = bcn_stat->drift;
+ u16 *bins = bcn_dist->bins;
+ u16 q1, q3, iqr, tmp;
+ u8 i;
+
+ BUILD_BUG_ON(RTW89_BCN_TRACK_STAT_NR % 4 != 0);
+
+ memset(bcn_dist, 0, sizeof(*bcn_dist));
+
+ bcn_dist->min = drift[0];
+ bcn_dist->max = drift[RTW89_BCN_TRACK_STAT_NR - 1];
+
+ tmp = RTW89_BCN_TRACK_STAT_NR / 4;
+ q1 = ((drift[tmp] + drift[tmp - 1]) * RTW89_BCN_TRACK_SCALE_FACTOR) / 2;
+
+ tmp = (RTW89_BCN_TRACK_STAT_NR * 3) / 4;
+ q3 = ((drift[tmp] + drift[tmp - 1]) * RTW89_BCN_TRACK_SCALE_FACTOR) / 2;
+
+ iqr = q3 - q1;
+ tmp = (3 * iqr) / 2;
+
+ if (bcn_dist->min <= 5)
+ lower_bound = bcn_dist->min;
+ else if (q1 > tmp)
+ lower_bound = (q1 - tmp) / RTW89_BCN_TRACK_SCALE_FACTOR;
+ else
+ lower_bound = 0;
+
+ upper_bound = (q3 + tmp) / RTW89_BCN_TRACK_SCALE_FACTOR;
+
+ for (i = 0; i < RTW89_BCN_TRACK_STAT_NR; i++) {
+ u16 tbtt = bcn_stat->tbtt_tu[i];
+ u16 min = bcn_stat->tbtt_tu_min;
+ u8 bin_idx;
+
+ /* histogram */
+ bin_idx = min((tbtt - min) / RTW89_BCN_TRACK_BIN_WIDTH,
+ RTW89_BCN_TRACK_MAX_BIN_NUM - 1);
+ bins[bin_idx]++;
+
+ /* boxplot outlier */
+ if (drift[i] < lower_bound || drift[i] > upper_bound)
+ outlier_count++;
+ }
+
+ bcn_dist->outlier_count = outlier_count;
+ bcn_dist->lower_bound = lower_bound;
+ bcn_dist->upper_bound = upper_bound;
+}
+
+static u8 rtw89_bcn_get_coverage(struct rtw89_dev *rtwdev, u16 threshold)
+{
+ struct rtw89_beacon_stat *bcn_stat = &rtwdev->phystat.bcn_stat;
+ int l = 0, r = RTW89_BCN_TRACK_STAT_NR - 1, m;
+ u16 *drift = bcn_stat->drift;
+ int index = -1;
+ u8 count = 0;
+
+ while (l <= r) {
+ m = l + (r - l) / 2;
+
+ if (drift[m] <= threshold) {
+ index = m;
+ l = m + 1;
+ } else {
+ r = m - 1;
+ }
+ }
+
+ count = (index == -1) ? 0 : (index + 1);
+
+ return (count * PERCENT) / RTW89_BCN_TRACK_STAT_NR;
+}
+
+static u16 rtw89_bcn_get_histogram_bound(struct rtw89_dev *rtwdev, u8 target)
+{
+ struct rtw89_beacon_stat *bcn_stat = &rtwdev->phystat.bcn_stat;
+ struct rtw89_beacon_dist *bcn_dist = &bcn_stat->bcn_dist;
+ u16 tbtt_tu_max = bcn_stat->tbtt_tu_max;
+ u16 upper, lower = bcn_stat->tbtt_tu_min;
+ u8 i, count = 0;
+
+ for (i = 0; i < RTW89_BCN_TRACK_MAX_BIN_NUM; i++) {
+ upper = lower + RTW89_BCN_TRACK_BIN_WIDTH - 1;
+ if (i == RTW89_BCN_TRACK_MAX_BIN_NUM - 1)
+ upper = max(upper, tbtt_tu_max);
+
+ count += bcn_dist->bins[i];
+ if (count > target)
+ break;
+
+ lower = upper + 1;
+ }
+
+ return upper;
+}
+
+static u16 rtw89_bcn_get_rx_time(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan)
+{
+#define RTW89_SYMBOL_TIME_2GHZ 192
+#define RTW89_SYMBOL_TIME_5GHZ 20
+#define RTW89_SYMBOL_TIME_6GHZ 20
+ struct rtw89_pkt_stat *pkt_stat = &rtwdev->phystat.cur_pkt_stat;
+ u16 bitrate, val;
+
+ if (!rtw89_legacy_rate_to_bitrate(rtwdev, pkt_stat->beacon_rate, &bitrate))
+ return 0;
+
+ val = (pkt_stat->beacon_len * 8 * RTW89_BCN_TRACK_SCALE_FACTOR) / bitrate;
+
+ switch (chan->band_type) {
+ default:
+ case RTW89_BAND_2G:
+ val += RTW89_SYMBOL_TIME_2GHZ;
+ break;
+ case RTW89_BAND_5G:
+ val += RTW89_SYMBOL_TIME_5GHZ;
+ break;
+ case RTW89_BAND_6G:
+ val += RTW89_SYMBOL_TIME_6GHZ;
+ break;
+ }
+
+ /* convert to millisecond */
+ return DIV_ROUND_UP(val, 1000);
+}
+
+static void rtw89_bcn_calc_timeout(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link)
+{
+#define RTW89_BCN_TRACK_EXTEND_TIMEOUT 5
+#define RTW89_BCN_TRACK_COVERAGE_TH 0 /* unit: TU */
+#define RTW89_BCN_TRACK_STRONG_RSSI 80
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx);
+ struct rtw89_pkt_stat *pkt_stat = &rtwdev->phystat.cur_pkt_stat;
+ struct rtw89_beacon_stat *bcn_stat = &rtwdev->phystat.bcn_stat;
+ struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track;
+ struct rtw89_beacon_dist *bcn_dist = &bcn_stat->bcn_dist;
+ u16 outlier_high_bcn_th = bcn_track->outlier_high_bcn_th;
+ u16 outlier_low_bcn_th = bcn_track->outlier_low_bcn_th;
+ u8 rssi = ewma_rssi_read(&rtwdev->phystat.bcn_rssi);
+ u16 target_bcn_th = bcn_track->target_bcn_th;
+ u16 low_bcn_th = bcn_track->low_bcn_th;
+ u16 med_bcn_th = bcn_track->med_bcn_th;
+ u16 beacon_int = bcn_track->beacon_int;
+ u16 bcn_timeout;
+
+ if (pkt_stat->beacon_nr < low_bcn_th) {
+ bcn_timeout = (RTW89_BCN_TRACK_TARGET_BCN * beacon_int) / PERCENT;
+ goto out;
+ }
+
+ if (bcn_dist->outlier_count >= outlier_high_bcn_th) {
+ bcn_timeout = bcn_dist->max;
+ goto out;
+ }
+
+ if (pkt_stat->beacon_nr < med_bcn_th) {
+ if (bcn_dist->outlier_count > outlier_low_bcn_th)
+ bcn_timeout = (bcn_dist->max + bcn_dist->upper_bound) / 2;
+ else
+ bcn_timeout = bcn_dist->upper_bound +
+ RTW89_BCN_TRACK_EXTEND_TIMEOUT;
+
+ goto out;
+ }
+
+ if (rssi >= RTW89_BCN_TRACK_STRONG_RSSI) {
+ if (rtw89_bcn_get_coverage(rtwdev, RTW89_BCN_TRACK_COVERAGE_TH) >= 90) {
+ /* ideal case */
+ bcn_timeout = 0;
+ } else {
+ u16 offset_tu = bcn_track->tbtt_offset / 1024;
+ u16 upper_bound;
+
+ upper_bound =
+ rtw89_bcn_get_histogram_bound(rtwdev, target_bcn_th);
+ bcn_timeout =
+ _rtw89_bcn_calc_drift(upper_bound, offset_tu, beacon_int);
+ }
+
+ goto out;
+ }
+
+ bcn_timeout = bcn_stat->drift[target_bcn_th];
+
+out:
+ bcn_track->bcn_timeout = bcn_timeout + rtw89_bcn_get_rx_time(rtwdev, chan);
+}
+
+static void rtw89_bcn_update_timeout(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link)
+{
+ rtw89_bcn_calc_drift(rtwdev);
+ rtw89_bcn_calc_distribution(rtwdev);
+ rtw89_bcn_calc_timeout(rtwdev, rtwvif_link);
+}
+
+static void rtw89_core_bcn_track(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track;
+ struct rtw89_vif_link *rtwvif_link;
+ struct rtw89_vif *rtwvif;
+ unsigned int link_id;
+
+ if (!RTW89_CHK_FW_FEATURE(BEACON_TRACKING, &rtwdev->fw))
+ return;
+
+ if (!rtwdev->lps_enabled)
+ return;
+
+ if (!bcn_track->is_data_ready)
+ return;
+
+ rtw89_for_each_rtwvif(rtwdev, rtwvif) {
+ rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) {
+ if (!(rtwvif_link->wifi_role == RTW89_WIFI_ROLE_STATION ||
+ rtwvif_link->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT))
+ continue;
+
+ rtw89_bcn_update_tbtt_offset(rtwdev, rtwvif_link);
+ rtw89_bcn_update_timeout(rtwdev, rtwvif_link);
+ }
+ }
+}
+
+static bool rtw89_core_bcn_track_can_lps(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track;
+
+ if (!RTW89_CHK_FW_FEATURE(BEACON_TRACKING, &rtwdev->fw))
+ return true;
+
+ return bcn_track->is_data_ready;
+}
+
+static void rtw89_core_bcn_track_assoc(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link)
+{
+#define RTW89_BCN_TRACK_MED_BCN 70
+#define RTW89_BCN_TRACK_LOW_BCN 30
+#define RTW89_BCN_TRACK_OUTLIER_HIGH_BCN 30
+#define RTW89_BCN_TRACK_OUTLIER_LOW_BCN 20
+ struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track;
+ u32 period = jiffies_to_msecs(RTW89_TRACK_WORK_PERIOD);
+ struct ieee80211_bss_conf *bss_conf;
+ u32 beacons_in_period;
+ u32 bcn_intvl_us;
+ u16 beacon_int;
+ u8 dtim;
+
+ rcu_read_lock();
+ bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
+ beacon_int = bss_conf->beacon_int;
+ dtim = bss_conf->dtim_period;
+ rcu_read_unlock();
+
+ beacons_in_period = period / beacon_int / dtim;
+ bcn_intvl_us = ieee80211_tu_to_usec(beacon_int);
+
+ bcn_track->low_bcn_th =
+ (beacons_in_period * RTW89_BCN_TRACK_LOW_BCN) / PERCENT;
+ bcn_track->med_bcn_th =
+ (beacons_in_period * RTW89_BCN_TRACK_MED_BCN) / PERCENT;
+ bcn_track->outlier_low_bcn_th =
+ (RTW89_BCN_TRACK_STAT_NR * RTW89_BCN_TRACK_OUTLIER_LOW_BCN) / PERCENT;
+ bcn_track->outlier_high_bcn_th =
+ (RTW89_BCN_TRACK_STAT_NR * RTW89_BCN_TRACK_OUTLIER_HIGH_BCN) / PERCENT;
+ bcn_track->target_bcn_th =
+ (RTW89_BCN_TRACK_STAT_NR * RTW89_BCN_TRACK_TARGET_BCN) / PERCENT;
+
+ bcn_track->close_bcn_intvl_th = ieee80211_tu_to_usec(beacon_int - 3);
+ bcn_track->tbtt_diff_th = (bcn_intvl_us * 85) / PERCENT;
+ bcn_track->beacon_int = beacon_int;
+ bcn_track->dtim = dtim;
+}
+
+static void rtw89_core_bcn_track_reset(struct rtw89_dev *rtwdev)
+{
+ memset(&rtwdev->phystat.bcn_stat, 0, sizeof(rtwdev->phystat.bcn_stat));
+ memset(&rtwdev->bcn_track, 0, sizeof(rtwdev->bcn_track));
+}
+
+static void rtw89_vif_rx_bcn_stat(struct rtw89_dev *rtwdev,
+ struct ieee80211_bss_conf *bss_conf,
+ struct sk_buff *skb)
+{
+#define RTW89_APPEND_TSF_2GHZ 384
+#define RTW89_APPEND_TSF_5GHZ 52
+#define RTW89_APPEND_TSF_6GHZ 52
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
+ struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
+ struct rtw89_beacon_stat *bcn_stat = &rtwdev->phystat.bcn_stat;
+ struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track;
+ u32 bcn_intvl_us = ieee80211_tu_to_usec(bss_conf->beacon_int);
+ u64 tsf = le64_to_cpu(mgmt->u.beacon.timestamp);
+ u8 wp, num = bcn_stat->num;
+ u16 append;
+
+ if (!RTW89_CHK_FW_FEATURE(BEACON_TRACKING, &rtwdev->fw))
+ return;
+
+ switch (rx_status->band) {
+ default:
+ case NL80211_BAND_2GHZ:
+ append = RTW89_APPEND_TSF_2GHZ;
+ break;
+ case NL80211_BAND_5GHZ:
+ append = RTW89_APPEND_TSF_5GHZ;
+ break;
+ case NL80211_BAND_6GHZ:
+ append = RTW89_APPEND_TSF_6GHZ;
+ break;
+ }
+
+ wp = bcn_stat->wp;
+ div_u64_rem(tsf - append, bcn_intvl_us, &bcn_stat->tbtt_us[wp]);
+ bcn_stat->tbtt_tu[wp] = bcn_stat->tbtt_us[wp] / 1024;
+ bcn_stat->wp = (wp + 1) % RTW89_BCN_TRACK_STAT_NR;
+ bcn_stat->num = umin(num + 1, RTW89_BCN_TRACK_STAT_NR);
+ bcn_track->is_data_ready = bcn_stat->num == RTW89_BCN_TRACK_STAT_NR;
+}
+
static void rtw89_vif_rx_stats_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
@@ -2237,6 +2741,7 @@ static void rtw89_vif_rx_stats_iter(void *data, u8 *mac,
struct ieee80211_bss_conf *bss_conf;
struct rtw89_vif_link *rtwvif_link;
const u8 *bssid = iter_data->bssid;
+ const u8 *target_bssid;
if (rtwdev->scanning &&
(ieee80211_is_beacon(hdr->frame_control) ||
@@ -2258,7 +2763,10 @@ static void rtw89_vif_rx_stats_iter(void *data, u8 *mac,
goto out;
}
- if (!ether_addr_equal(bss_conf->bssid, bssid))
+ target_bssid = ieee80211_is_beacon(hdr->frame_control) &&
+ bss_conf->nontransmitted ?
+ bss_conf->transmitter_bssid : bss_conf->bssid;
+ if (!ether_addr_equal(target_bssid, bssid))
goto out;
if (is_mld) {
@@ -2272,7 +2780,6 @@ static void rtw89_vif_rx_stats_iter(void *data, u8 *mac,
rtw89_vif_sync_bcn_tsf(rtwvif_link, hdr, skb->len);
rtw89_fw_h2c_rssi_offload(rtwdev, phy_ppdu);
}
- pkt_stat->beacon_nr++;
if (phy_ppdu) {
ewma_rssi_add(&rtwdev->phystat.bcn_rssi, phy_ppdu->rssi_avg);
@@ -2280,7 +2787,11 @@ static void rtw89_vif_rx_stats_iter(void *data, u8 *mac,
rtwvif_link->bcn_bw_idx = phy_ppdu->bw_idx;
}
+ pkt_stat->beacon_nr++;
pkt_stat->beacon_rate = desc_info->data_rate;
+ pkt_stat->beacon_len = skb->len;
+
+ rtw89_vif_rx_bcn_stat(rtwdev, bss_conf, skb);
}
if (!ether_addr_equal(bss_conf->addr, hdr->addr1))
@@ -3226,7 +3737,7 @@ static u32 rtw89_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev, u8 tid)
u8 qsel, ch_dma;
qsel = rtw89_core_get_qsel(rtwdev, tid);
- ch_dma = rtw89_core_get_ch_dma(rtwdev, qsel);
+ ch_dma = rtw89_chip_get_ch_dma(rtwdev, qsel);
return rtw89_hci_check_and_reclaim_tx_resource(rtwdev, ch_dma);
}
@@ -3411,6 +3922,7 @@ int rtw89_core_send_nullfunc(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rt
struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
int link_id = ieee80211_vif_is_mld(vif) ? rtwvif_link->link_id : -1;
struct rtw89_sta_link *rtwsta_link;
+ struct rtw89_tx_wait_info *wait;
struct ieee80211_sta *sta;
struct ieee80211_hdr *hdr;
struct rtw89_sta *rtwsta;
@@ -3420,6 +3932,12 @@ int rtw89_core_send_nullfunc(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rt
if (vif->type != NL80211_IFTYPE_STATION || !vif->cfg.assoc)
return 0;
+ wait = kzalloc(sizeof(*wait), GFP_KERNEL);
+ if (!wait)
+ return -ENOMEM;
+
+ init_completion(&wait->completion);
+
rcu_read_lock();
sta = ieee80211_find_sta(vif, vif->cfg.ap_addr);
if (!sta) {
@@ -3434,6 +3952,8 @@ int rtw89_core_send_nullfunc(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rt
goto out;
}
+ wait->skb = skb;
+
hdr = (struct ieee80211_hdr *)skb->data;
if (ps)
hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
@@ -3441,10 +3961,12 @@ int rtw89_core_send_nullfunc(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rt
rtwsta_link = rtwsta->links[rtwvif_link->link_id];
if (unlikely(!rtwsta_link)) {
ret = -ENOLINK;
+ dev_kfree_skb_any(skb);
goto out;
}
- ret = rtw89_core_tx_write_link(rtwdev, rtwvif_link, rtwsta_link, skb, &qsel, true);
+ ret = rtw89_core_tx_write_link(rtwdev, rtwvif_link, rtwsta_link, skb, &qsel, true,
+ wait);
if (ret) {
rtw89_warn(rtwdev, "nullfunc transmit failed: %d\n", ret);
dev_kfree_skb_any(skb);
@@ -3453,10 +3975,11 @@ int rtw89_core_send_nullfunc(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rt
rcu_read_unlock();
- return rtw89_core_tx_kick_off_and_wait(rtwdev, skb, qsel,
+ return rtw89_core_tx_kick_off_and_wait(rtwdev, skb, wait, qsel,
timeout);
out:
rcu_read_unlock();
+ kfree(wait);
return ret;
}
@@ -3697,6 +4220,9 @@ static void rtw89_enter_lps_track(struct rtw89_dev *rtwdev)
vif->type == NL80211_IFTYPE_P2P_CLIENT))
continue;
+ if (!rtw89_core_bcn_track_can_lps(rtwdev))
+ continue;
+
rtw89_enter_lps(rtwdev, rtwvif, true);
}
}
@@ -3883,6 +4409,7 @@ static void rtw89_track_work(struct wiphy *wiphy, struct wiphy_work *work)
rtw89_btc_ntfy_wl_sta(rtwdev);
}
rtw89_mac_bf_monitor_track(rtwdev);
+ rtw89_core_bcn_track(rtwdev);
rtw89_phy_stat_track(rtwdev);
rtw89_phy_env_monitor_track(rtwdev);
rtw89_phy_dig(rtwdev);
@@ -4129,8 +4656,10 @@ int rtw89_core_sta_link_disassoc(struct rtw89_dev *rtwdev,
rtw89_assoc_link_clr(rtwsta_link);
- if (vif->type == NL80211_IFTYPE_STATION)
+ if (vif->type == NL80211_IFTYPE_STATION) {
rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, rtwvif_link, false);
+ rtw89_core_bcn_track_reset(rtwdev);
+ }
if (rtwvif_link->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT)
rtw89_p2p_noa_once_deinit(rtwvif_link);
@@ -4271,6 +4800,7 @@ int rtw89_core_sta_link_assoc(struct rtw89_dev *rtwdev,
BTC_ROLE_MSTS_STA_CONN_END);
rtw89_core_get_no_ul_ofdma_htc(rtwdev, &rtwsta_link->htc_template, chan);
rtw89_phy_ul_tb_assoc(rtwdev, rtwvif_link);
+ rtw89_core_bcn_track_assoc(rtwdev, rtwvif_link);
ret = rtw89_fw_h2c_general_pkt(rtwdev, rtwvif_link, rtwsta_link->mac_id);
if (ret) {
@@ -4829,39 +5359,96 @@ void rtw89_core_csa_beacon_work(struct wiphy *wiphy, struct wiphy_work *work)
}
}
-int rtw89_wait_for_cond(struct rtw89_wait_info *wait, unsigned int cond)
+struct rtw89_wait_response *
+rtw89_wait_for_cond_prep(struct rtw89_wait_info *wait, unsigned int cond)
{
- struct completion *cmpl = &wait->completion;
- unsigned long time_left;
+ struct rtw89_wait_response *prep;
unsigned int cur;
+ /* use -EPERM _iff_ telling eval side not to make any changes */
+
cur = atomic_cmpxchg(&wait->cond, RTW89_WAIT_COND_IDLE, cond);
if (cur != RTW89_WAIT_COND_IDLE)
- return -EBUSY;
+ return ERR_PTR(-EPERM);
+
+ prep = kzalloc(sizeof(*prep), GFP_KERNEL);
+ if (!prep)
+ return ERR_PTR(-ENOMEM);
+
+ init_completion(&prep->completion);
+
+ rcu_assign_pointer(wait->resp, prep);
+
+ return prep;
+}
+
+int rtw89_wait_for_cond_eval(struct rtw89_wait_info *wait,
+ struct rtw89_wait_response *prep, int err)
+{
+ unsigned long time_left;
+
+ if (IS_ERR(prep)) {
+ err = err ?: PTR_ERR(prep);
+
+ /* special error case: no permission to reset anything */
+ if (PTR_ERR(prep) == -EPERM)
+ return err;
- time_left = wait_for_completion_timeout(cmpl, RTW89_WAIT_FOR_COND_TIMEOUT);
+ goto reset;
+ }
+
+ if (err)
+ goto cleanup;
+
+ time_left = wait_for_completion_timeout(&prep->completion,
+ RTW89_WAIT_FOR_COND_TIMEOUT);
if (time_left == 0) {
- atomic_set(&wait->cond, RTW89_WAIT_COND_IDLE);
- return -ETIMEDOUT;
+ err = -ETIMEDOUT;
+ goto cleanup;
}
+ wait->data = prep->data;
+
+cleanup:
+ rcu_assign_pointer(wait->resp, NULL);
+ kfree_rcu(prep, rcu_head);
+
+reset:
+ atomic_set(&wait->cond, RTW89_WAIT_COND_IDLE);
+
+ if (err)
+ return err;
+
if (wait->data.err)
return -EFAULT;
return 0;
}
+static void rtw89_complete_cond_resp(struct rtw89_wait_response *resp,
+ const struct rtw89_completion_data *data)
+{
+ resp->data = *data;
+ complete(&resp->completion);
+}
+
void rtw89_complete_cond(struct rtw89_wait_info *wait, unsigned int cond,
const struct rtw89_completion_data *data)
{
+ struct rtw89_wait_response *resp;
unsigned int cur;
+ guard(rcu)();
+
+ resp = rcu_dereference(wait->resp);
+ if (!resp)
+ return;
+
cur = atomic_cmpxchg(&wait->cond, cond, RTW89_WAIT_COND_IDLE);
if (cur != cond)
return;
- wait->data = *data;
- complete(&wait->completion);
+ rtw89_complete_cond_resp(resp, data);
}
void rtw89_core_ntfy_btc_event(struct rtw89_dev *rtwdev, enum rtw89_btc_hmsg event)
@@ -4908,6 +5495,8 @@ int rtw89_core_start(struct rtw89_dev *rtwdev)
{
int ret;
+ rtw89_phy_init_bb_afe(rtwdev);
+
ret = rtw89_mac_init(rtwdev);
if (ret) {
rtw89_err(rtwdev, "mac init fail, ret:%d\n", ret);
@@ -4952,6 +5541,7 @@ int rtw89_core_start(struct rtw89_dev *rtwdev)
rtw89_btc_ntfy_radio_state(rtwdev, BTC_RFCTRL_WL_ON);
rtw89_fw_h2c_fw_log(rtwdev, rtwdev->fw.log.enable);
rtw89_fw_h2c_init_ba_cam(rtwdev);
+ rtw89_tas_fw_timer_enable(rtwdev, true);
return 0;
}
@@ -4967,6 +5557,7 @@ void rtw89_core_stop(struct rtw89_dev *rtwdev)
if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags))
return;
+ rtw89_tas_fw_timer_enable(rtwdev, false);
rtw89_btc_ntfy_radio_state(rtwdev, BTC_RFCTRL_WL_OFF);
clear_bit(RTW89_FLAG_RUNNING, rtwdev->flags);
@@ -4978,6 +5569,7 @@ void rtw89_core_stop(struct rtw89_dev *rtwdev)
wiphy_work_cancel(wiphy, &btc->dhcp_notify_work);
wiphy_work_cancel(wiphy, &btc->icmp_notify_work);
cancel_delayed_work_sync(&rtwdev->txq_reinvoke_work);
+ wiphy_delayed_work_cancel(wiphy, &rtwdev->tx_wait_work);
wiphy_delayed_work_cancel(wiphy, &rtwdev->track_work);
wiphy_delayed_work_cancel(wiphy, &rtwdev->track_ps_work);
wiphy_delayed_work_cancel(wiphy, &rtwdev->chanctx_work);
@@ -5203,6 +5795,7 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
INIT_LIST_HEAD(&rtwdev->scan_info.pkt_list[band]);
}
INIT_LIST_HEAD(&rtwdev->scan_info.chan_list);
+ INIT_LIST_HEAD(&rtwdev->tx_waits);
INIT_WORK(&rtwdev->ba_work, rtw89_core_ba_work);
INIT_WORK(&rtwdev->txq_work, rtw89_core_txq_work);
INIT_DELAYED_WORK(&rtwdev->txq_reinvoke_work, rtw89_core_txq_reinvoke_work);
@@ -5214,6 +5807,7 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
wiphy_delayed_work_init(&rtwdev->coex_rfk_chk_work, rtw89_coex_rfk_chk_work);
wiphy_delayed_work_init(&rtwdev->cfo_track_work, rtw89_phy_cfo_track_work);
wiphy_delayed_work_init(&rtwdev->mcc_prepare_done_work, rtw89_mcc_prepare_done_work);
+ wiphy_delayed_work_init(&rtwdev->tx_wait_work, rtw89_tx_wait_work);
INIT_DELAYED_WORK(&rtwdev->forbid_ba_work, rtw89_forbid_ba_work);
wiphy_delayed_work_init(&rtwdev->antdiv_work, rtw89_phy_antdiv_work);
rtwdev->txq_wq = alloc_workqueue("rtw89_tx_wq", WQ_UNBOUND | WQ_HIGHPRI, 0);
@@ -5813,6 +6407,7 @@ int rtw89_core_register(struct rtw89_dev *rtwdev)
return ret;
}
+ rtw89_phy_dm_init_data(rtwdev);
rtw89_debugfs_init(rtwdev);
return 0;
@@ -5863,6 +6458,9 @@ struct rtw89_dev *rtw89_alloc_ieee80211_hw(struct device *device,
ops->cancel_remain_on_channel = NULL;
}
+ if (!chip->support_noise)
+ ops->get_survey = NULL;
+
driver_data_size = sizeof(struct rtw89_dev) + bus_data_size;
hw = ieee80211_alloc_hw(driver_data_size, ops);
if (!hw)
diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h
index 43e10278e14d..928c8c84c964 100644
--- a/drivers/net/wireless/realtek/rtw89/core.h
+++ b/drivers/net/wireless/realtek/rtw89/core.h
@@ -1011,6 +1011,7 @@ struct rtw89_port_reg {
u32 ptcl_dbg;
u32 ptcl_dbg_info;
u32 bcn_drop_all;
+ u32 bcn_psr_rpt;
u32 hiq_win[RTW89_PORT_NUM];
};
@@ -3506,9 +3507,12 @@ struct rtw89_phy_rate_pattern {
bool enable;
};
+#define RTW89_TX_WAIT_WORK_TIMEOUT msecs_to_jiffies(500)
struct rtw89_tx_wait_info {
struct rcu_head rcu_head;
+ struct list_head list;
struct completion completion;
+ struct sk_buff *skb;
bool tx_done;
};
@@ -3759,6 +3763,7 @@ struct rtw89_chip_ops {
void (*fill_txdesc_fwcmd)(struct rtw89_dev *rtwdev,
struct rtw89_tx_desc_info *desc_info,
void *txdesc);
+ u8 (*get_ch_dma)(struct rtw89_dev *rtwdev, u8 qsel);
int (*cfg_ctrl_path)(struct rtw89_dev *rtwdev, bool wl);
int (*mac_cfg_gnt)(struct rtw89_dev *rtwdev,
const struct rtw89_mac_ax_coex_gnt *gnt_cfg);
@@ -4363,6 +4368,9 @@ struct rtw89_chanctx_listener {
(struct rtw89_dev *rtwdev, enum rtw89_chanctx_state state);
};
+#define RTW89_NHM_TH_NUM 11
+#define RTW89_NHM_RPT_NUM 12
+
struct rtw89_chip_info {
enum rtw89_core_chip_id chip_id;
enum rtw89_chip_gen chip_gen;
@@ -4397,6 +4405,7 @@ struct rtw89_chip_info {
bool support_ant_gain;
bool support_tas;
bool support_sar_by_ant;
+ bool support_noise;
bool ul_tb_waveform_ctrl;
bool ul_tb_pwr_diff;
bool rx_freq_frome_ie;
@@ -4481,6 +4490,8 @@ struct rtw89_chip_info {
bool cfo_hw_comp;
const struct rtw89_reg_def *dcfo_comp;
u8 dcfo_comp_sft;
+ const struct rtw89_reg_def (*nhm_report)[RTW89_NHM_RPT_NUM];
+ const struct rtw89_reg_def (*nhm_th)[RTW89_NHM_TH_NUM];
const struct rtw89_imr_info *imr_info;
const struct rtw89_imr_table *imr_dmac_table;
const struct rtw89_imr_table *imr_cmac_table;
@@ -4542,17 +4553,23 @@ struct rtw89_completion_data {
u8 buf[RTW89_COMPLETION_BUF_SIZE];
};
+struct rtw89_wait_response {
+ struct rcu_head rcu_head;
+ struct completion completion;
+ struct rtw89_completion_data data;
+};
+
struct rtw89_wait_info {
atomic_t cond;
- struct completion completion;
struct rtw89_completion_data data;
+ struct rtw89_wait_response __rcu *resp;
};
#define RTW89_WAIT_FOR_COND_TIMEOUT msecs_to_jiffies(100)
static inline void rtw89_init_wait(struct rtw89_wait_info *wait)
{
- init_completion(&wait->completion);
+ rcu_assign_pointer(wait->resp, NULL);
atomic_set(&wait->cond, RTW89_WAIT_COND_IDLE);
}
@@ -4622,6 +4639,7 @@ enum rtw89_fw_feature {
RTW89_FW_FEATURE_SCAN_OFFLOAD_EXTRA_OP,
RTW89_FW_FEATURE_RFK_NTFY_MCC_V0,
RTW89_FW_FEATURE_LPS_DACK_BY_C2H_REG,
+ RTW89_FW_FEATURE_BEACON_TRACKING,
};
struct rtw89_fw_suit {
@@ -4681,6 +4699,7 @@ struct rtw89_fw_elm_info {
struct rtw89_fw_txpwr_track_cfg *txpwr_trk;
struct rtw89_phy_rfk_log_fmt *rfk_log_fmt;
const struct rtw89_regd_data *regd;
+ const struct rtw89_fw_element_hdr *afe;
};
enum rtw89_fw_mss_dev_type {
@@ -5074,9 +5093,36 @@ struct rtw89_pkt_drop_params {
struct rtw89_pkt_stat {
u16 beacon_nr;
u8 beacon_rate;
+ u32 beacon_len;
u32 rx_rate_cnt[RTW89_HW_RATE_NR];
};
+#define RTW89_BCN_TRACK_STAT_NR 32
+#define RTW89_BCN_TRACK_SCALE_FACTOR 10
+#define RTW89_BCN_TRACK_MAX_BIN_NUM 6
+#define RTW89_BCN_TRACK_BIN_WIDTH 5
+#define RTW89_BCN_TRACK_TARGET_BCN 80
+
+struct rtw89_beacon_dist {
+ u16 min;
+ u16 max;
+ u16 outlier_count;
+ u16 lower_bound;
+ u16 upper_bound;
+ u16 bins[RTW89_BCN_TRACK_MAX_BIN_NUM];
+};
+
+struct rtw89_beacon_stat {
+ u8 num;
+ u8 wp;
+ u16 tbtt_tu_min;
+ u16 tbtt_tu_max;
+ u16 drift[RTW89_BCN_TRACK_STAT_NR];
+ u32 tbtt_us[RTW89_BCN_TRACK_STAT_NR];
+ u16 tbtt_tu[RTW89_BCN_TRACK_STAT_NR];
+ struct rtw89_beacon_dist bcn_dist;
+};
+
DECLARE_EWMA(thermal, 4, 4);
struct rtw89_phy_stat {
@@ -5085,6 +5131,7 @@ struct rtw89_phy_stat {
struct ewma_rssi bcn_rssi;
struct rtw89_pkt_stat cur_pkt_stat;
struct rtw89_pkt_stat last_pkt_stat;
+ struct rtw89_beacon_stat bcn_stat;
};
enum rtw89_rfk_report_state {
@@ -5434,6 +5481,7 @@ enum rtw89_env_racing_lv {
struct rtw89_ccx_para_info {
enum rtw89_env_racing_lv rac_lv;
u16 mntr_time;
+ bool nhm_incld_cca;
u8 nhm_manual_th_ofst;
u8 nhm_manual_th0;
enum rtw89_ifs_clm_application ifs_clm_app;
@@ -5467,9 +5515,13 @@ enum rtw89_ccx_edcca_opt_bw_idx {
RTW89_CCX_EDCCA_BW20_7 = 7
};
-#define RTW89_NHM_TH_NUM 11
+struct rtw89_nhm_report {
+ struct list_head list;
+ struct ieee80211_channel *channel;
+ u8 noise;
+};
+
#define RTW89_FAHM_TH_NUM 11
-#define RTW89_NHM_RPT_NUM 12
#define RTW89_FAHM_RPT_NUM 12
#define RTW89_IFS_CLM_NUM 4
struct rtw89_env_monitor_info {
@@ -5503,6 +5555,13 @@ struct rtw89_env_monitor_info {
u16 ifs_clm_ofdm_fa_permil;
u32 ifs_clm_ifs_avg[RTW89_IFS_CLM_NUM];
u32 ifs_clm_cca_avg[RTW89_IFS_CLM_NUM];
+ bool nhm_include_cca;
+ u32 nhm_sum;
+ u32 nhm_mntr_time;
+ u16 nhm_result[RTW89_NHM_RPT_NUM];
+ u8 nhm_th[RTW89_NHM_RPT_NUM];
+ struct rtw89_nhm_report *nhm_his[RTW89_BAND_NUM];
+ struct list_head nhm_rpt_list;
};
enum rtw89_ser_rcvy_step {
@@ -5715,8 +5774,8 @@ struct rtw89_wow_gtk_info {
u8 kck[32];
u8 kek[32];
u8 tk1[16];
- u8 txmickey[8];
u8 rxmickey[8];
+ u8 txmickey[8];
__le32 igtk_keyid;
__le64 ipn;
u8 igtk[2][32];
@@ -5882,6 +5941,24 @@ struct rtw89_mlo_info {
struct rtw89_wait_info wait;
};
+struct rtw89_beacon_track_info {
+ bool is_data_ready;
+ u32 tbtt_offset; /* in unit of microsecond */
+ u16 bcn_timeout; /* in unit of millisecond */
+
+ /* The following are constant and set at association. */
+ u8 dtim;
+ u16 beacon_int;
+ u16 low_bcn_th;
+ u16 med_bcn_th;
+ u16 high_bcn_th;
+ u16 target_bcn_th;
+ u16 outlier_low_bcn_th;
+ u16 outlier_high_bcn_th;
+ u32 close_bcn_intvl_th;
+ u32 tbtt_diff_th;
+};
+
struct rtw89_dev {
struct ieee80211_hw *hw;
struct device *dev;
@@ -5896,6 +5973,7 @@ struct rtw89_dev {
const struct rtw89_pci_info *pci_info;
const struct rtw89_rfe_parms *rfe_parms;
struct rtw89_hal hal;
+ struct rtw89_beacon_track_info bcn_track;
struct rtw89_mcc_info mcc;
struct rtw89_mlo_info mlo;
struct rtw89_mac_info mac;
@@ -5925,6 +6003,9 @@ struct rtw89_dev {
/* used to protect rpwm */
spinlock_t rpwm_lock;
+ struct list_head tx_waits;
+ struct wiphy_delayed_work tx_wait_work;
+
struct rtw89_cam_info cam_info;
struct sk_buff_head c2h_queue;
@@ -6181,6 +6262,26 @@ rtw89_assoc_link_rcu_dereference(struct rtw89_dev *rtwdev, u8 macid)
list_first_entry_or_null(&p->dlink_pool, typeof(*p->links_inst), dlink_schd); \
})
+static inline void rtw89_tx_wait_release(struct rtw89_tx_wait_info *wait)
+{
+ dev_kfree_skb_any(wait->skb);
+ kfree_rcu(wait, rcu_head);
+}
+
+static inline void rtw89_tx_wait_list_clear(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_tx_wait_info *wait, *tmp;
+
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
+
+ list_for_each_entry_safe(wait, tmp, &rtwdev->tx_waits, list) {
+ if (!completion_done(&wait->completion))
+ continue;
+ list_del(&wait->list);
+ rtw89_tx_wait_release(wait);
+ }
+}
+
static inline int rtw89_hci_tx_write(struct rtw89_dev *rtwdev,
struct rtw89_core_tx_request *tx_req)
{
@@ -6190,6 +6291,7 @@ static inline int rtw89_hci_tx_write(struct rtw89_dev *rtwdev,
static inline void rtw89_hci_reset(struct rtw89_dev *rtwdev)
{
rtwdev->hci.ops->reset(rtwdev);
+ rtw89_tx_wait_list_clear(rtwdev);
}
static inline int rtw89_hci_start(struct rtw89_dev *rtwdev)
@@ -6322,9 +6424,13 @@ static inline void rtw89_hci_clear(struct rtw89_dev *rtwdev, struct pci_dev *pde
static inline
struct rtw89_tx_skb_data *RTW89_TX_SKB_CB(struct sk_buff *skb)
{
+ /*
+ * This should be used by/after rtw89_hci_tx_write() and before doing
+ * ieee80211_tx_info_clear_status().
+ */
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- return (struct rtw89_tx_skb_data *)info->status.status_driver_data;
+ return (struct rtw89_tx_skb_data *)info->driver_data;
}
static inline u8 rtw89_read8(struct rtw89_dev *rtwdev, u32 addr)
@@ -7131,6 +7237,14 @@ void rtw89_chip_fill_txdesc_fwcmd(struct rtw89_dev *rtwdev,
}
static inline
+u8 rtw89_chip_get_ch_dma(struct rtw89_dev *rtwdev, u8 qsel)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ return chip->ops->get_ch_dma(rtwdev, qsel);
+}
+
+static inline
void rtw89_chip_mac_cfg_gnt(struct rtw89_dev *rtwdev,
const struct rtw89_mac_ax_coex_gnt *gnt_cfg)
{
@@ -7258,11 +7372,12 @@ static inline struct sk_buff *rtw89_alloc_skb_for_rx(struct rtw89_dev *rtwdev,
return dev_alloc_skb(length);
}
-static inline void rtw89_core_tx_wait_complete(struct rtw89_dev *rtwdev,
+static inline bool rtw89_core_tx_wait_complete(struct rtw89_dev *rtwdev,
struct rtw89_tx_skb_data *skb_data,
bool tx_done)
{
struct rtw89_tx_wait_info *wait;
+ bool ret = false;
rcu_read_lock();
@@ -7270,11 +7385,14 @@ static inline void rtw89_core_tx_wait_complete(struct rtw89_dev *rtwdev,
if (!wait)
goto out;
+ ret = true;
wait->tx_done = tx_done;
- complete(&wait->completion);
+ /* Don't access skb anymore after completion */
+ complete_all(&wait->completion);
out:
rcu_read_unlock();
+ return ret;
}
static inline bool rtw89_is_mlo_1_1(struct rtw89_dev *rtwdev)
@@ -7358,7 +7476,8 @@ int rtw89_h2c_tx(struct rtw89_dev *rtwdev,
struct sk_buff *skb, bool fwdl);
void rtw89_core_tx_kick_off(struct rtw89_dev *rtwdev, u8 qsel);
int rtw89_core_tx_kick_off_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb,
- int qsel, unsigned int timeout);
+ struct rtw89_tx_wait_info *wait, int qsel,
+ unsigned int timeout);
void rtw89_core_fill_txdesc(struct rtw89_dev *rtwdev,
struct rtw89_tx_desc_info *desc_info,
void *txdesc);
@@ -7374,6 +7493,8 @@ void rtw89_core_fill_txdesc_fwcmd_v1(struct rtw89_dev *rtwdev,
void rtw89_core_fill_txdesc_fwcmd_v2(struct rtw89_dev *rtwdev,
struct rtw89_tx_desc_info *desc_info,
void *txdesc);
+u8 rtw89_core_get_ch_dma(struct rtw89_dev *rtwdev, u8 qsel);
+u8 rtw89_core_get_ch_dma_v1(struct rtw89_dev *rtwdev, u8 qsel);
void rtw89_core_rx(struct rtw89_dev *rtwdev,
struct rtw89_rx_desc_info *desc_info,
struct sk_buff *skb);
@@ -7454,13 +7575,18 @@ void rtw89_vif_type_mapping(struct rtw89_vif_link *rtwvif_link, bool assoc);
int rtw89_chip_info_setup(struct rtw89_dev *rtwdev);
void rtw89_chip_cfg_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link);
-bool rtw89_ra_report_to_bitrate(struct rtw89_dev *rtwdev, u8 rpt_rate, u16 *bitrate);
+bool rtw89_legacy_rate_to_bitrate(struct rtw89_dev *rtwdev, u8 legacy_rate, u16 *bitrate);
int rtw89_regd_setup(struct rtw89_dev *rtwdev);
int rtw89_regd_init_hint(struct rtw89_dev *rtwdev);
const char *rtw89_regd_get_string(enum rtw89_regulation_type regd);
void rtw89_traffic_stats_init(struct rtw89_dev *rtwdev,
struct rtw89_traffic_stats *stats);
-int rtw89_wait_for_cond(struct rtw89_wait_info *wait, unsigned int cond);
+struct rtw89_wait_response *
+rtw89_wait_for_cond_prep(struct rtw89_wait_info *wait, unsigned int cond)
+__acquires(rtw89_wait);
+int rtw89_wait_for_cond_eval(struct rtw89_wait_info *wait,
+ struct rtw89_wait_response *prep, int err)
+__releases(rtw89_wait);
void rtw89_complete_cond(struct rtw89_wait_info *wait, unsigned int cond,
const struct rtw89_completion_data *data);
int rtw89_core_start(struct rtw89_dev *rtwdev);
diff --git a/drivers/net/wireless/realtek/rtw89/debug.c b/drivers/net/wireless/realtek/rtw89/debug.c
index afdfb9647fc1..3dc7981c510f 100644
--- a/drivers/net/wireless/realtek/rtw89/debug.c
+++ b/drivers/net/wireless/realtek/rtw89/debug.c
@@ -86,6 +86,7 @@ struct rtw89_debugfs {
struct rtw89_debugfs_priv stations;
struct rtw89_debugfs_priv disable_dm;
struct rtw89_debugfs_priv mlo_mode;
+ struct rtw89_debugfs_priv beacon_info;
};
struct rtw89_debugfs_iter_data {
@@ -3562,6 +3563,58 @@ static int rtw89_dbg_trigger_ctrl_error(struct rtw89_dev *rtwdev)
return 0;
}
+static int rtw89_dbg_trigger_mac_error_ax(struct rtw89_dev *rtwdev)
+{
+ u16 val16;
+ u8 val8;
+ int ret;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_CMAC_SEL);
+ if (ret)
+ return ret;
+
+ val8 = rtw89_read8(rtwdev, R_AX_CMAC_FUNC_EN);
+ rtw89_write8(rtwdev, R_AX_CMAC_FUNC_EN, val8 & ~B_AX_TMAC_EN);
+ mdelay(1);
+ rtw89_write8(rtwdev, R_AX_CMAC_FUNC_EN, val8);
+
+ val16 = rtw89_read16(rtwdev, R_AX_PTCL_IMR0);
+ rtw89_write16(rtwdev, R_AX_PTCL_IMR0, val16 | B_AX_F2PCMD_EMPTY_ERR_INT_EN);
+ rtw89_write16(rtwdev, R_AX_PTCL_IMR0, val16);
+
+ return 0;
+}
+
+static int rtw89_dbg_trigger_mac_error_be(struct rtw89_dev *rtwdev)
+{
+ int ret;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_CMAC_SEL);
+ if (ret)
+ return ret;
+
+ rtw89_write32_set(rtwdev, R_BE_CMAC_FW_TRIGGER_IDCT_ISR,
+ B_BE_CMAC_FW_TRIG_IDCT | B_BE_CMAC_FW_ERR_IDCT_IMR);
+
+ return 0;
+}
+
+static int rtw89_dbg_trigger_mac_error(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ rtw89_leave_ps_mode(rtwdev);
+
+ switch (chip->chip_gen) {
+ case RTW89_CHIP_AX:
+ return rtw89_dbg_trigger_mac_error_ax(rtwdev);
+ case RTW89_CHIP_BE:
+ return rtw89_dbg_trigger_mac_error_be(rtwdev);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static ssize_t
rtw89_debug_priv_fw_crash_get(struct rtw89_dev *rtwdev,
struct rtw89_debugfs_priv *debugfs_priv,
@@ -3577,6 +3630,7 @@ rtw89_debug_priv_fw_crash_get(struct rtw89_dev *rtwdev,
enum rtw89_dbg_crash_simulation_type {
RTW89_DBG_SIM_CPU_EXCEPTION = 1,
RTW89_DBG_SIM_CTRL_ERROR = 2,
+ RTW89_DBG_SIM_MAC_ERROR = 3,
};
static ssize_t
@@ -3585,6 +3639,7 @@ rtw89_debug_priv_fw_crash_set(struct rtw89_dev *rtwdev,
const char *buf, size_t count)
{
int (*sim)(struct rtw89_dev *rtwdev);
+ bool announce = true;
u8 crash_type;
int ret;
@@ -3603,11 +3658,19 @@ rtw89_debug_priv_fw_crash_set(struct rtw89_dev *rtwdev,
case RTW89_DBG_SIM_CTRL_ERROR:
sim = rtw89_dbg_trigger_ctrl_error;
break;
+ case RTW89_DBG_SIM_MAC_ERROR:
+ sim = rtw89_dbg_trigger_mac_error;
+
+ /* Driver SER flow won't get involved; only FW will. */
+ announce = false;
+ break;
default:
return -EINVAL;
}
- set_bit(RTW89_FLAG_CRASH_SIMULATING, rtwdev->flags);
+ if (announce)
+ set_bit(RTW89_FLAG_CRASH_SIMULATING, rtwdev->flags);
+
ret = sim(rtwdev);
if (ret)
@@ -4298,6 +4361,64 @@ rtw89_debug_priv_mlo_mode_set(struct rtw89_dev *rtwdev,
return count;
}
+static ssize_t
+rtw89_debug_priv_beacon_info_get(struct rtw89_dev *rtwdev,
+ struct rtw89_debugfs_priv *debugfs_priv,
+ char *buf, size_t bufsz)
+{
+ struct rtw89_pkt_stat *pkt_stat = &rtwdev->phystat.last_pkt_stat;
+ struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track;
+ struct rtw89_beacon_stat *bcn_stat = &rtwdev->phystat.bcn_stat;
+ struct rtw89_beacon_dist *bcn_dist = &bcn_stat->bcn_dist;
+ u16 upper, lower = bcn_stat->tbtt_tu_min;
+ char *p = buf, *end = buf + bufsz;
+ u16 *drift = bcn_stat->drift;
+ u8 bcn_num = bcn_stat->num;
+ u8 count;
+ u8 i;
+
+ p += scnprintf(p, end - p, "[Beacon info]\n");
+ p += scnprintf(p, end - p, "count: %u\n", pkt_stat->beacon_nr);
+ p += scnprintf(p, end - p, "interval: %u\n", bcn_track->beacon_int);
+ p += scnprintf(p, end - p, "dtim: %u\n", bcn_track->dtim);
+ p += scnprintf(p, end - p, "raw rssi: %lu\n",
+ ewma_rssi_read(&rtwdev->phystat.bcn_rssi));
+ p += scnprintf(p, end - p, "hw rate: %u\n", pkt_stat->beacon_rate);
+ p += scnprintf(p, end - p, "length: %u\n", pkt_stat->beacon_len);
+
+ p += scnprintf(p, end - p, "\n[Distribution]\n");
+ p += scnprintf(p, end - p, "tbtt\n");
+ for (i = 0; i < RTW89_BCN_TRACK_MAX_BIN_NUM; i++) {
+ upper = lower + RTW89_BCN_TRACK_BIN_WIDTH - 1;
+ if (i == RTW89_BCN_TRACK_MAX_BIN_NUM - 1)
+ upper = max(upper, bcn_stat->tbtt_tu_max);
+
+ p += scnprintf(p, end - p, "%02u - %02u: %u\n",
+ lower, upper, bcn_dist->bins[i]);
+
+ lower = upper + 1;
+ }
+
+ p += scnprintf(p, end - p, "\ndrift\n");
+
+ for (i = 0; i < bcn_num; i += count) {
+ count = 1;
+ while (i + count < bcn_num && drift[i] == drift[i + count])
+ count++;
+
+ p += scnprintf(p, end - p, "%u: %u\n", drift[i], count);
+ }
+ p += scnprintf(p, end - p, "\nlower bound: %u\n", bcn_dist->lower_bound);
+ p += scnprintf(p, end - p, "upper bound: %u\n", bcn_dist->upper_bound);
+ p += scnprintf(p, end - p, "outlier count: %u\n", bcn_dist->outlier_count);
+
+ p += scnprintf(p, end - p, "\n[Tracking]\n");
+ p += scnprintf(p, end - p, "tbtt offset: %u\n", bcn_track->tbtt_offset);
+ p += scnprintf(p, end - p, "bcn timeout: %u\n", bcn_track->bcn_timeout);
+
+ return p - buf;
+}
+
#define rtw89_debug_priv_get(name, opts...) \
{ \
.cb_read = rtw89_debug_priv_ ##name## _get, \
@@ -4356,6 +4477,7 @@ static const struct rtw89_debugfs rtw89_debugfs_templ = {
.stations = rtw89_debug_priv_get(stations, RLOCK),
.disable_dm = rtw89_debug_priv_set_and_get(disable_dm, RWLOCK),
.mlo_mode = rtw89_debug_priv_set_and_get(mlo_mode, RWLOCK),
+ .beacon_info = rtw89_debug_priv_get(beacon_info),
};
#define rtw89_debugfs_add(name, mode, fopname, parent) \
@@ -4401,6 +4523,7 @@ void rtw89_debugfs_add_sec1(struct rtw89_dev *rtwdev, struct dentry *debugfs_top
rtw89_debugfs_add_r(stations);
rtw89_debugfs_add_rw(disable_dm);
rtw89_debugfs_add_rw(mlo_mode);
+ rtw89_debugfs_add_r(beacon_info);
}
void rtw89_debugfs_init(struct rtw89_dev *rtwdev)
diff --git a/drivers/net/wireless/realtek/rtw89/debug.h b/drivers/net/wireless/realtek/rtw89/debug.h
index fc690f7c55dc..a364e7adb079 100644
--- a/drivers/net/wireless/realtek/rtw89/debug.h
+++ b/drivers/net/wireless/realtek/rtw89/debug.h
@@ -56,6 +56,7 @@ static inline void rtw89_debugfs_deinit(struct rtw89_dev *rtwdev) {}
#endif
#define rtw89_info(rtwdev, a...) dev_info((rtwdev)->dev, ##a)
+#define rtw89_info_once(rtwdev, a...) dev_info_once((rtwdev)->dev, ##a)
#define rtw89_warn(rtwdev, a...) dev_warn((rtwdev)->dev, ##a)
#define rtw89_err(rtwdev, a...) dev_err((rtwdev)->dev, ##a)
diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c
index 16e59a4a486e..ab904a7def1b 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.c
+++ b/drivers/net/wireless/realtek/rtw89/fw.c
@@ -830,11 +830,13 @@ static const struct __fw_feat_cfg fw_feat_tbl[] = {
__CFG_FW_FEAT(RTL8852B, ge, 0, 29, 127, 0, LPS_DACK_BY_C2H_REG),
__CFG_FW_FEAT(RTL8852B, ge, 0, 29, 128, 0, CRASH_TRIGGER_TYPE_1),
__CFG_FW_FEAT(RTL8852B, ge, 0, 29, 128, 0, SCAN_OFFLOAD_EXTRA_OP),
+ __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 128, 0, BEACON_TRACKING),
__CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 74, 0, NO_LPS_PG),
__CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 74, 0, TX_WAKE),
__CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 90, 0, CRASH_TRIGGER_TYPE_0),
__CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 91, 0, SCAN_OFFLOAD),
__CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 110, 0, BEACON_FILTER),
+ __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 122, 0, BEACON_TRACKING),
__CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 127, 0, SCAN_OFFLOAD_EXTRA_OP),
__CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 127, 0, LPS_DACK_BY_C2H_REG),
__CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 127, 0, CRASH_TRIGGER_TYPE_1),
@@ -846,6 +848,9 @@ static const struct __fw_feat_cfg fw_feat_tbl[] = {
__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 56, 10, BEACON_FILTER),
__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 80, 0, WOW_REASON_V1),
__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 128, 0, BEACON_LOSS_COUNT_V1),
+ __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 128, 0, LPS_DACK_BY_C2H_REG),
+ __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 128, 0, CRASH_TRIGGER_TYPE_1),
+ __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 129, 1, BEACON_TRACKING),
__CFG_FW_FEAT(RTL8922A, ge, 0, 34, 30, 0, CRASH_TRIGGER_TYPE_0),
__CFG_FW_FEAT(RTL8922A, ge, 0, 34, 11, 0, MACID_PAUSE_SLEEP),
__CFG_FW_FEAT(RTL8922A, ge, 0, 34, 35, 0, SCAN_OFFLOAD),
@@ -864,6 +869,7 @@ static const struct __fw_feat_cfg fw_feat_tbl[] = {
__CFG_FW_FEAT(RTL8922A, ge, 0, 35, 71, 0, BEACON_LOSS_COUNT_V1),
__CFG_FW_FEAT(RTL8922A, ge, 0, 35, 76, 0, LPS_DACK_BY_C2H_REG),
__CFG_FW_FEAT(RTL8922A, ge, 0, 35, 79, 0, CRASH_TRIGGER_TYPE_1),
+ __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 80, 0, BEACON_TRACKING),
};
static void rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info *fw,
@@ -1280,6 +1286,18 @@ int rtw89_recognize_regd_from_elm(struct rtw89_dev *rtwdev,
return 0;
}
+static
+int rtw89_build_afe_pwr_seq_from_elm(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_element_hdr *elm,
+ const union rtw89_fw_element_arg arg)
+{
+ struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
+
+ elm_info->afe = elm;
+
+ return 0;
+}
+
static const struct rtw89_fw_element_handler __fw_element_handlers[] = {
[RTW89_FW_ELEMENT_ID_BBMCU0] = {__rtw89_fw_recognize_from_elm,
{ .fw_type = RTW89_FW_BBMCU0 }, NULL},
@@ -1365,6 +1383,9 @@ static const struct rtw89_fw_element_handler __fw_element_handlers[] = {
[RTW89_FW_ELEMENT_ID_REGD] = {
rtw89_recognize_regd_from_elm, {}, "REGD",
},
+ [RTW89_FW_ELEMENT_ID_AFE_PWR_SEQ] = {
+ rtw89_build_afe_pwr_seq_from_elm, {}, "AFE",
+ },
};
int rtw89_fw_recognize_elements(struct rtw89_dev *rtwdev)
@@ -1537,7 +1558,7 @@ static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev,
struct rtw89_fw_hdr *fw_hdr;
struct sk_buff *skb;
u32 truncated;
- u32 ret = 0;
+ int ret;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
@@ -3990,6 +4011,93 @@ fail:
}
EXPORT_SYMBOL(rtw89_fw_h2c_update_beacon_be);
+int rtw89_fw_h2c_tbtt_tuning(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link, u32 offset)
+{
+ struct rtw89_h2c_tbtt_tuning *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c tbtt tuning\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_tbtt_tuning *)skb->data;
+
+ h2c->w0 = le32_encode_bits(rtwvif_link->phy_idx, RTW89_H2C_TBTT_TUNING_W0_BAND) |
+ le32_encode_bits(rtwvif_link->port, RTW89_H2C_TBTT_TUNING_W0_PORT);
+ h2c->w1 = le32_encode_bits(offset, RTW89_H2C_TBTT_TUNING_W1_SHIFT);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_PS,
+ H2C_FUNC_TBTT_TUNING, 0, 0,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+int rtw89_fw_h2c_pwr_lvl(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link)
+{
+#define RTW89_BCN_TO_VAL_MIN 4
+#define RTW89_BCN_TO_VAL_MAX 64
+#define RTW89_DTIM_TO_VAL_MIN 7
+#define RTW89_DTIM_TO_VAL_MAX 15
+ struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track;
+ struct rtw89_h2c_pwr_lvl *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ u8 bcn_to_val;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c pwr lvl\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_pwr_lvl *)skb->data;
+
+ bcn_to_val = clamp_t(u8, bcn_track->bcn_timeout,
+ RTW89_BCN_TO_VAL_MIN, RTW89_BCN_TO_VAL_MAX);
+
+ h2c->w0 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_PWR_LVL_W0_MACID) |
+ le32_encode_bits(bcn_to_val, RTW89_H2C_PWR_LVL_W0_BCN_TO_VAL) |
+ le32_encode_bits(0, RTW89_H2C_PWR_LVL_W0_PS_LVL) |
+ le32_encode_bits(0, RTW89_H2C_PWR_LVL_W0_TRX_LVL) |
+ le32_encode_bits(RTW89_DTIM_TO_VAL_MIN,
+ RTW89_H2C_PWR_LVL_W0_DTIM_TO_VAL);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_PS,
+ H2C_FUNC_PS_POWER_LEVEL, 0, 0,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link,
struct rtw89_sta_link *rtwsta_link,
@@ -6580,6 +6688,40 @@ fail:
return ret;
}
+int rtw89_fw_h2c_rf_tas_trigger(struct rtw89_dev *rtwdev, bool enable)
+{
+ struct rtw89_h2c_rf_tas *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c RF TAS\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_rf_tas *)skb->data;
+
+ h2c->enable = cpu_to_le32(enable);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
+ H2C_FUNC_RFK_TAS_OFFLOAD, 0, 0, len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev,
u8 h2c_class, u8 h2c_func, u8 *buf, u16 len,
bool rack, bool dack)
@@ -6826,8 +6968,9 @@ static int rtw89_fw_read_c2h_reg(struct rtw89_dev *rtwdev,
const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_fw_info *fw_info = &rtwdev->fw;
const u32 *c2h_reg = chip->c2h_regs;
- u32 ret, timeout;
+ u32 timeout;
u8 i, val;
+ int ret;
info->id = RTW89_FWCMD_C2HREG_FUNC_NULL;
@@ -6865,7 +7008,7 @@ int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev,
struct rtw89_mac_h2c_info *h2c_info,
struct rtw89_mac_c2h_info *c2h_info)
{
- u32 ret;
+ int ret;
if (h2c_info && h2c_info->id != RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE)
lockdep_assert_wiphy(rtwdev->hw->wiphy);
@@ -7123,7 +7266,6 @@ static void rtw89_pno_scan_add_chan_ax(struct rtw89_dev *rtwdev,
struct rtw89_pktofld_info *info;
u8 probe_count = 0;
- ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK;
ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS;
ch_info->bw = RTW89_SCAN_WIDTH;
ch_info->tx_pkt = true;
@@ -7264,7 +7406,6 @@ static void rtw89_pno_scan_add_chan_be(struct rtw89_dev *rtwdev, int chan_type,
struct rtw89_pktofld_info *info;
u8 probe_count = 0, i;
- ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK;
ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS;
ch_info->bw = RTW89_SCAN_WIDTH;
ch_info->tx_null = false;
@@ -8585,9 +8726,10 @@ int rtw89_fw_h2c_wow_gtk_ofld(struct rtw89_dev *rtwdev,
goto fail;
}
- /* not support TKIP yet */
h2c->w0 = le32_encode_bits(enable, RTW89_H2C_WOW_GTK_OFLD_W0_EN) |
- le32_encode_bits(0, RTW89_H2C_WOW_GTK_OFLD_W0_TKIP_EN) |
+ le32_encode_bits(!!memchr_inv(gtk_info->txmickey, 0,
+ sizeof(gtk_info->txmickey)),
+ RTW89_H2C_WOW_GTK_OFLD_W0_TKIP_EN) |
le32_encode_bits(gtk_info->igtk_keyid ? 1 : 0,
RTW89_H2C_WOW_GTK_OFLD_W0_IEEE80211W_EN) |
le32_encode_bits(macid, RTW89_H2C_WOW_GTK_OFLD_W0_MAC_ID) |
@@ -8679,19 +8821,30 @@ int rtw89_fw_h2c_wow_request_aoac(struct rtw89_dev *rtwdev)
static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb,
struct rtw89_wait_info *wait, unsigned int cond)
{
- int ret;
+ struct rtw89_wait_response *prep;
+ int ret = 0;
+
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
+
+ prep = rtw89_wait_for_cond_prep(wait, cond);
+ if (IS_ERR(prep))
+ goto out;
ret = rtw89_h2c_tx(rtwdev, skb, false);
if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
dev_kfree_skb_any(skb);
- return -EBUSY;
+ ret = -EBUSY;
+ goto out;
}
- if (test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags))
- return 1;
+ if (test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags)) {
+ ret = 1;
+ goto out;
+ }
- return rtw89_wait_for_cond(wait, cond);
+out:
+ return rtw89_wait_for_cond_eval(wait, prep, ret);
}
#define H2C_ADD_MCC_LEN 16
diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h
index 3de29137b113..ddebf7972068 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.h
+++ b/drivers/net/wireless/realtek/rtw89/fw.h
@@ -1602,6 +1602,28 @@ struct rtw89_h2c_bcn_upd_be {
#define RTW89_H2C_BCN_UPD_BE_W7_ECSA_OFST GENMASK(30, 16)
#define RTW89_H2C_BCN_UPD_BE_W7_PROTECTION_KEY_ID BIT(31)
+struct rtw89_h2c_tbtt_tuning {
+ __le32 w0;
+ __le32 w1;
+} __packed;
+
+#define RTW89_H2C_TBTT_TUNING_W0_BAND GENMASK(3, 0)
+#define RTW89_H2C_TBTT_TUNING_W0_PORT GENMASK(7, 4)
+#define RTW89_H2C_TBTT_TUNING_W1_SHIFT GENMASK(31, 0)
+
+struct rtw89_h2c_pwr_lvl {
+ __le32 w0;
+ __le32 w1;
+} __packed;
+
+#define RTW89_H2C_PWR_LVL_W0_MACID GENMASK(7, 0)
+#define RTW89_H2C_PWR_LVL_W0_BCN_TO_VAL GENMASK(15, 8)
+#define RTW89_H2C_PWR_LVL_W0_PS_LVL GENMASK(19, 16)
+#define RTW89_H2C_PWR_LVL_W0_TRX_LVL GENMASK(23, 20)
+#define RTW89_H2C_PWR_LVL_W0_BCN_TO_LVL GENMASK(27, 24)
+#define RTW89_H2C_PWR_LVL_W0_DTIM_TO_VAL GENMASK(31, 28)
+#define RTW89_H2C_PWR_LVL_W1_MACID_EXT GENMASK(7, 0)
+
struct rtw89_h2c_role_maintain {
__le32 w0;
};
@@ -3962,6 +3984,7 @@ enum rtw89_fw_element_id {
RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_RU_2GHZ = 24,
RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_RU_5GHZ = 25,
RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_RU_6GHZ = 26,
+ RTW89_FW_ELEMENT_ID_AFE_PWR_SEQ = 27,
RTW89_FW_ELEMENT_ID_NUM,
};
@@ -4067,6 +4090,30 @@ struct rtw89_fw_txpwr_track_cfg {
BIT(RTW89_FW_TXPWR_TRK_TYPE_2G_CCK_A_N) | \
BIT(RTW89_FW_TXPWR_TRK_TYPE_2G_CCK_A_P))
+enum rtw89_fw_afe_action {
+ RTW89_FW_AFE_ACTION_WRITE = 0,
+ RTW89_FW_AFE_ACTION_DELAY = 1,
+ RTW89_FW_AFE_ACTION_POLL = 2,
+};
+
+enum rtw89_fw_afe_cat {
+ RTW89_FW_AFE_CAT_BB = 0,
+ RTW89_FW_AFE_CAT_BB1 = 1,
+ RTW89_FW_AFE_CAT_MAC = 2,
+ RTW89_FW_AFE_CAT_MAC1 = 3,
+ RTW89_FW_AFE_CAT_AFEDIG = 4,
+ RTW89_FW_AFE_CAT_AFEDIG1 = 5,
+};
+
+enum rtw89_fw_afe_class {
+ RTW89_FW_AFE_CLASS_P0 = 0,
+ RTW89_FW_AFE_CLASS_P1 = 1,
+ RTW89_FW_AFE_CLASS_P2 = 2,
+ RTW89_FW_AFE_CLASS_P3 = 3,
+ RTW89_FW_AFE_CLASS_P4 = 4,
+ RTW89_FW_AFE_CLASS_CMN = 5,
+};
+
struct rtw89_fw_element_hdr {
__le32 id; /* enum rtw89_fw_element_id */
__le32 size; /* exclude header size */
@@ -4104,6 +4151,17 @@ struct rtw89_fw_element_hdr {
u8 rsvd1[3];
__le16 offset[];
} __packed rfk_log_fmt;
+ struct {
+ u8 rsvd[8];
+ struct rtw89_phy_afe_info {
+ __le32 action; /* enum rtw89_fw_afe_action */
+ __le32 cat; /* enum rtw89_fw_afe_cat */
+ __le32 class; /* enum rtw89_fw_afe_class */
+ __le32 addr;
+ __le32 mask;
+ __le32 val;
+ } __packed infos[];
+ } __packed afe;
struct __rtw89_fw_txpwr_element txpwr;
struct __rtw89_fw_regd_element regd;
} __packed u;
@@ -4201,6 +4259,8 @@ enum rtw89_ps_h2c_func {
H2C_FUNC_MAC_LPS_PARM = 0x0,
H2C_FUNC_P2P_ACT = 0x1,
H2C_FUNC_IPS_CFG = 0x3,
+ H2C_FUNC_PS_POWER_LEVEL = 0x7,
+ H2C_FUNC_TBTT_TUNING = 0xA,
NUM_OF_RTW89_PS_H2C_FUNC,
};
@@ -4370,6 +4430,7 @@ enum rtw89_rfk_offload_h2c_func {
H2C_FUNC_RFK_DACK_OFFLOAD = 0x5,
H2C_FUNC_RFK_RXDCK_OFFLOAD = 0x6,
H2C_FUNC_RFK_PRE_NOTIFY = 0x8,
+ H2C_FUNC_RFK_TAS_OFFLOAD = 0x9,
};
struct rtw89_fw_h2c_rf_get_mccch {
@@ -4551,6 +4612,10 @@ struct rtw89_h2c_rf_rxdck_v0 {
u8 rxdck_dbg_en;
} __packed;
+struct rtw89_h2c_rf_tas {
+ __le32 enable;
+} __packed;
+
struct rtw89_h2c_rf_rxdck {
struct rtw89_h2c_rf_rxdck_v0 v0;
u8 is_chl_k;
@@ -4683,12 +4748,16 @@ struct rtw89_c2h_rfk_report {
u8 version;
} __packed;
-struct rtw89_c2h_rf_tas_info {
- struct rtw89_c2h_hdr hdr;
+struct rtw89_c2h_rf_tas_rpt_log {
__le32 cur_idx;
__le16 txpwr_history[20];
} __packed;
+struct rtw89_c2h_rf_tas_info {
+ struct rtw89_c2h_hdr hdr;
+ struct rtw89_c2h_rf_tas_rpt_log content;
+} __packed;
+
#define RTW89_FW_RSVD_PLE_SIZE 0x800
#define RTW89_FW_BACKTRACE_INFO_SIZE 8
@@ -4750,6 +4819,9 @@ int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link);
int rtw89_fw_h2c_update_beacon_be(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link);
+int rtw89_fw_h2c_tbtt_tuning(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link, u32 offset);
+int rtw89_fw_h2c_pwr_lvl(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link);
int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif_link *vif,
struct rtw89_sta_link *rtwsta_link, const u8 *scan_mac_addr);
int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev,
@@ -4826,6 +4898,7 @@ int rtw89_fw_h2c_rf_dack(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
const struct rtw89_chan *chan);
int rtw89_fw_h2c_rf_rxdck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
const struct rtw89_chan *chan, bool is_chl_k);
+int rtw89_fw_h2c_rf_tas_trigger(struct rtw89_dev *rtwdev, bool enable);
int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev,
u8 h2c_class, u8 h2c_func, u8 *buf, u16 len,
bool rack, bool dack);
diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c
index 5a5da9d9c0c5..fd11b8fb3c89 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.c
+++ b/drivers/net/wireless/realtek/rtw89/mac.c
@@ -9,6 +9,7 @@
#include "fw.h"
#include "mac.h"
#include "pci.h"
+#include "phy.h"
#include "ps.h"
#include "reg.h"
#include "util.h"
@@ -177,7 +178,7 @@ int rtw89_mac_dle_dfi_qempty_cfg(struct rtw89_dev *rtwdev,
struct rtw89_mac_dle_dfi_qempty *qempty)
{
struct rtw89_mac_dle_dfi_ctrl ctrl;
- u32 ret;
+ int ret;
ctrl.type = qempty->dle_type;
ctrl.target = DLE_DFI_TYPE_QEMPTY;
@@ -985,7 +986,7 @@ static int hfc_upd_ch_info(struct rtw89_dev *rtwdev, u8 ch)
struct rtw89_hfc_ch_info *info = param->ch_info;
const struct rtw89_hfc_ch_cfg *cfg = param->ch_cfg;
u32 val;
- u32 ret;
+ int ret;
ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL);
if (ret)
@@ -1176,8 +1177,8 @@ int rtw89_mac_hfc_init(struct rtw89_dev *rtwdev, bool reset, bool en, bool h2c_e
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
const struct rtw89_chip_info *chip = rtwdev->chip;
u32 dma_ch_mask = chip->dma_ch_mask;
+ int ret = 0;
u8 ch;
- u32 ret = 0;
if (reset)
ret = hfc_reset_param(rtwdev);
@@ -1193,7 +1194,7 @@ int rtw89_mac_hfc_init(struct rtw89_dev *rtwdev, bool reset, bool en, bool h2c_e
if (!en && h2c_en) {
mac->hfc_h2c_cfg(rtwdev);
mac->hfc_func_en(rtwdev, en, h2c_en);
- return ret;
+ return 0;
}
for (ch = RTW89_DMA_ACH0; ch < RTW89_DMA_H2C; ch++) {
@@ -2413,7 +2414,7 @@ static int addr_cam_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx)
static int scheduler_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx)
{
- u32 ret;
+ int ret;
u32 reg;
u32 val;
@@ -2954,7 +2955,7 @@ static int rtw89_mac_read_phycap(struct rtw89_dev *rtwdev,
struct rtw89_mac_h2c_info h2c_info = {};
enum rtw89_mac_c2h_type c2h_type;
u8 content_len;
- u32 ret;
+ int ret;
if (chip->chip_gen == RTW89_CHIP_AX)
content_len = 0;
@@ -3105,10 +3106,10 @@ int rtw89_mac_setup_phycap(struct rtw89_dev *rtwdev)
static int rtw89_hw_sch_tx_en_h2c(struct rtw89_dev *rtwdev, u8 band,
u16 tx_en_u16, u16 mask_u16)
{
- u32 ret;
struct rtw89_mac_c2h_info c2h_info = {0};
struct rtw89_mac_h2c_info h2c_info = {0};
struct rtw89_h2creg_sch_tx_en *sch_tx_en = &h2c_info.u.sch_tx_en;
+ int ret;
h2c_info.id = RTW89_FWCMD_H2CREG_FUNC_SCH_TX_EN;
h2c_info.content_len = sizeof(*sch_tx_en) - RTW89_H2CREG_HDR_LEN;
@@ -4197,6 +4198,7 @@ static const struct rtw89_port_reg rtw89_port_base_ax = {
.ptcl_dbg = R_AX_PTCL_DBG,
.ptcl_dbg_info = R_AX_PTCL_DBG_INFO,
.bcn_drop_all = R_AX_BCN_DROP_ALL0,
+ .bcn_psr_rpt = R_AX_BCN_PSR_RPT_P0,
.hiq_win = {R_AX_P0MB_HGQ_WINDOW_CFG_0, R_AX_PORT_HGQ_WINDOW_CFG,
R_AX_PORT_HGQ_WINDOW_CFG + 1, R_AX_PORT_HGQ_WINDOW_CFG + 2,
R_AX_PORT_HGQ_WINDOW_CFG + 3},
@@ -4649,25 +4651,28 @@ static void rtw89_mac_port_cfg_bcn_early(struct rtw89_dev *rtwdev,
BCN_ERLY_DEF);
}
-static void rtw89_mac_port_cfg_tbtt_shift(struct rtw89_dev *rtwdev,
- struct rtw89_vif_link *rtwvif_link)
+static void rtw89_mac_port_cfg_bcn_psr_rpt(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link)
{
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
const struct rtw89_port_reg *p = mac->port_base;
- u16 val;
+ struct ieee80211_bss_conf *bss_conf;
+ u8 bssid_index;
+ u32 reg;
- if (rtwdev->chip->chip_id != RTL8852C)
- return;
+ rcu_read_lock();
- if (rtwvif_link->wifi_role != RTW89_WIFI_ROLE_P2P_CLIENT &&
- rtwvif_link->wifi_role != RTW89_WIFI_ROLE_STATION)
- return;
+ bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
+ if (bss_conf->nontransmitted)
+ bssid_index = bss_conf->bssid_index;
+ else
+ bssid_index = 0;
- val = FIELD_PREP(B_AX_TBTT_SHIFT_OFST_MAG, 1) |
- B_AX_TBTT_SHIFT_OFST_SIGN;
+ rcu_read_unlock();
- rtw89_write16_port_mask(rtwdev, rtwvif_link, p->tbtt_shift,
- B_AX_TBTT_SHIFT_OFST_MASK, val);
+ reg = rtw89_mac_reg_by_idx(rtwdev, p->bcn_psr_rpt + rtwvif_link->port * 4,
+ rtwvif_link->mac_idx);
+ rtw89_write32_mask(rtwdev, reg, B_AX_BCAID_P0_MASK, bssid_index);
}
void rtw89_mac_port_tsf_sync(struct rtw89_dev *rtwdev,
@@ -4820,13 +4825,13 @@ int rtw89_mac_port_update(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvi
rtw89_mac_port_cfg_bcn_hold_time(rtwdev, rtwvif_link);
rtw89_mac_port_cfg_bcn_mask_area(rtwdev, rtwvif_link);
rtw89_mac_port_cfg_tbtt_early(rtwdev, rtwvif_link);
- rtw89_mac_port_cfg_tbtt_shift(rtwdev, rtwvif_link);
rtw89_mac_port_cfg_bss_color(rtwdev, rtwvif_link);
rtw89_mac_port_cfg_mbssid(rtwdev, rtwvif_link);
rtw89_mac_port_cfg_func_en(rtwdev, rtwvif_link, true);
rtw89_mac_port_tsf_resync_all(rtwdev);
fsleep(BCN_ERLY_SET_DLY);
rtw89_mac_port_cfg_bcn_early(rtwdev, rtwvif_link);
+ rtw89_mac_port_cfg_bcn_psr_rpt(rtwdev, rtwvif_link);
return 0;
}
@@ -5041,6 +5046,8 @@ rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *skb,
if (op_chan) {
rtw89_mac_enable_aps_bcn_by_chan(rtwdev, op_chan, false);
ieee80211_stop_queues(rtwdev->hw);
+ } else {
+ rtw89_phy_nhm_get_result(rtwdev, band, chan);
}
return;
case RTW89_SCAN_END_SCAN_NOTIFY:
@@ -5071,6 +5078,7 @@ rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *skb,
RTW89_CHANNEL_WIDTH_20);
rtw89_assign_entity_chan(rtwdev, rtwvif_link->chanctx_idx,
&new);
+ rtw89_phy_nhm_trigger(rtwdev);
}
break;
default:
@@ -5236,6 +5244,11 @@ rtw89_mac_c2h_bcn_cnt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
}
static void
+rtw89_mac_c2h_bcn_upd_done(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
+{
+}
+
+static void
rtw89_mac_c2h_pkt_ofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *skb_c2h,
u32 len)
{
@@ -5258,6 +5271,11 @@ rtw89_mac_c2h_pkt_ofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *skb_c2h,
}
static void
+rtw89_mac_c2h_bcn_resend(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
+{
+}
+
+static void
rtw89_mac_c2h_tx_duty_rpt(struct rtw89_dev *rtwdev, struct sk_buff *skb_c2h, u32 len)
{
struct rtw89_c2h_tx_duty_rpt *c2h =
@@ -5646,7 +5664,7 @@ void (* const rtw89_mac_c2h_ofld_handler[])(struct rtw89_dev *rtwdev,
[RTW89_MAC_C2H_FUNC_EFUSE_DUMP] = NULL,
[RTW89_MAC_C2H_FUNC_READ_RSP] = NULL,
[RTW89_MAC_C2H_FUNC_PKT_OFLD_RSP] = rtw89_mac_c2h_pkt_ofld_rsp,
- [RTW89_MAC_C2H_FUNC_BCN_RESEND] = NULL,
+ [RTW89_MAC_C2H_FUNC_BCN_RESEND] = rtw89_mac_c2h_bcn_resend,
[RTW89_MAC_C2H_FUNC_MACID_PAUSE] = rtw89_mac_c2h_macid_pause,
[RTW89_MAC_C2H_FUNC_SCANOFLD_RSP] = rtw89_mac_c2h_scanofld_rsp,
[RTW89_MAC_C2H_FUNC_TX_DUTY_RPT] = rtw89_mac_c2h_tx_duty_rpt,
@@ -5661,6 +5679,7 @@ void (* const rtw89_mac_c2h_info_handler[])(struct rtw89_dev *rtwdev,
[RTW89_MAC_C2H_FUNC_DONE_ACK] = rtw89_mac_c2h_done_ack,
[RTW89_MAC_C2H_FUNC_C2H_LOG] = rtw89_mac_c2h_log,
[RTW89_MAC_C2H_FUNC_BCN_CNT] = rtw89_mac_c2h_bcn_cnt,
+ [RTW89_MAC_C2H_FUNC_BCN_UPD_DONE] = rtw89_mac_c2h_bcn_upd_done,
};
static
@@ -5813,12 +5832,11 @@ void rtw89_mac_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
case RTW89_MAC_C2H_CLASS_ROLE:
return;
default:
- rtw89_info(rtwdev, "MAC c2h class %d not support\n", class);
- return;
+ break;
}
if (!handler) {
- rtw89_info(rtwdev, "MAC c2h class %d func %d not support\n", class,
- func);
+ rtw89_info_once(rtwdev, "MAC c2h class %d func %d not support\n",
+ class, func);
return;
}
handler(rtwdev, skb, len);
@@ -6720,7 +6738,7 @@ int rtw89_mac_set_hw_muedca_ctrl(struct rtw89_dev *rtwdev,
u8 mac_idx = rtwvif_link->mac_idx;
u16 set = mac->muedca_ctrl.mask;
u32 reg;
- u32 ret;
+ int ret;
ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
if (ret)
@@ -6862,7 +6880,7 @@ int rtw89_mac_cpu_io_rx(struct rtw89_dev *rtwdev, bool wow_enable)
{
struct rtw89_mac_h2c_info h2c_info = {};
struct rtw89_mac_c2h_info c2h_info = {};
- u32 ret;
+ int ret;
if (RTW89_CHK_FW_FEATURE(NO_WOW_CPU_IO_RX, &rtwdev->fw))
return 0;
diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h
index 241e89983c4a..25fe5e5c8a97 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.h
+++ b/drivers/net/wireless/realtek/rtw89/mac.h
@@ -419,6 +419,7 @@ enum rtw89_mac_c2h_info_func {
RTW89_MAC_C2H_FUNC_DONE_ACK,
RTW89_MAC_C2H_FUNC_C2H_LOG,
RTW89_MAC_C2H_FUNC_BCN_CNT,
+ RTW89_MAC_C2H_FUNC_BCN_UPD_DONE = 0x06,
RTW89_MAC_C2H_FUNC_INFO_MAX,
};
diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c
index c1ca6d741b32..7b04183a3a5d 100644
--- a/drivers/net/wireless/realtek/rtw89/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw89/mac80211.c
@@ -1837,6 +1837,40 @@ static void rtw89_set_rekey_data(struct ieee80211_hw *hw,
}
#endif
+static int rtw89_ops_get_survey(struct ieee80211_hw *hw, int idx,
+ struct survey_info *survey)
+{
+ struct ieee80211_conf *conf = &hw->conf;
+ struct rtw89_dev *rtwdev = hw->priv;
+ struct rtw89_bb_ctx *bb;
+
+ if (idx == 0) {
+ survey->channel = conf->chandef.chan;
+ survey->filled = SURVEY_INFO_NOISE_DBM;
+ survey->noise = RTW89_NOISE_DEFAULT;
+
+ return 0;
+ }
+
+ rtw89_for_each_active_bb(rtwdev, bb) {
+ struct rtw89_env_monitor_info *env = &bb->env_monitor;
+ struct rtw89_nhm_report *rpt;
+
+ rpt = list_first_entry_or_null(&env->nhm_rpt_list, typeof(*rpt), list);
+ if (!rpt)
+ continue;
+
+ survey->filled = SURVEY_INFO_NOISE_DBM;
+ survey->noise = rpt->noise - MAX_RSSI;
+ survey->channel = rpt->channel;
+ list_del_init(&rpt->list);
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
static void rtw89_ops_rfkill_poll(struct ieee80211_hw *hw)
{
struct rtw89_dev *rtwdev = hw->priv;
@@ -1869,6 +1903,7 @@ const struct ieee80211_ops rtw89_ops = {
.sta_state = rtw89_ops_sta_state,
.set_key = rtw89_ops_set_key,
.ampdu_action = rtw89_ops_ampdu_action,
+ .get_survey = rtw89_ops_get_survey,
.set_rts_threshold = rtw89_ops_set_rts_threshold,
.sta_statistics = rtw89_ops_sta_statistics,
.flush = rtw89_ops_flush,
diff --git a/drivers/net/wireless/realtek/rtw89/mac_be.c b/drivers/net/wireless/realtek/rtw89/mac_be.c
index 0078080b3999..ef69672b6862 100644
--- a/drivers/net/wireless/realtek/rtw89/mac_be.c
+++ b/drivers/net/wireless/realtek/rtw89/mac_be.c
@@ -56,6 +56,7 @@ static const struct rtw89_port_reg rtw89_port_base_be = {
.ptcl_dbg = R_BE_PTCL_DBG,
.ptcl_dbg_info = R_BE_PTCL_DBG_INFO,
.bcn_drop_all = R_BE_BCN_DROP_ALL0,
+ .bcn_psr_rpt = R_BE_BCN_PSR_RPT_P0,
.hiq_win = {R_BE_P0MB_HGQ_WINDOW_CFG_0, R_BE_PORT_HGQ_WINDOW_CFG,
R_BE_PORT_HGQ_WINDOW_CFG + 1, R_BE_PORT_HGQ_WINDOW_CFG + 2,
R_BE_PORT_HGQ_WINDOW_CFG + 3},
diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c
index a669f2f843aa..0ee5f8579447 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.c
+++ b/drivers/net/wireless/realtek/rtw89/pci.c
@@ -134,7 +134,7 @@ static void rtw89_pci_release_fwcmd(struct rtw89_dev *rtwdev,
static void rtw89_pci_reclaim_tx_fwcmd(struct rtw89_dev *rtwdev,
struct rtw89_pci *rtwpci)
{
- struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12];
+ struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx.rings[RTW89_TXCH_CH12];
u32 cnt;
cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring);
@@ -440,7 +440,7 @@ static int rtw89_pci_poll_rxq_dma(struct rtw89_dev *rtwdev,
int countdown = rtwdev->napi_budget_countdown;
u32 cnt;
- rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RXQ];
+ rx_ring = &rtwpci->rx.rings[RTW89_RXCH_RXQ];
cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring);
if (!cnt)
@@ -464,7 +464,8 @@ static void rtw89_pci_tx_status(struct rtw89_dev *rtwdev,
struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb);
struct ieee80211_tx_info *info;
- rtw89_core_tx_wait_complete(rtwdev, skb_data, tx_status == RTW89_TX_DONE);
+ if (rtw89_core_tx_wait_complete(rtwdev, skb_data, tx_status == RTW89_TX_DONE))
+ return;
info = IEEE80211_SKB_CB(skb);
ieee80211_tx_info_clear_status(info);
@@ -568,31 +569,52 @@ static void rtw89_pci_release_txwd_skb(struct rtw89_dev *rtwdev,
rtw89_pci_enqueue_txwd(tx_ring, txwd);
}
-static void rtw89_pci_release_rpp(struct rtw89_dev *rtwdev,
- struct rtw89_pci_rpp_fmt *rpp)
+void rtw89_pci_parse_rpp(struct rtw89_dev *rtwdev, void *_rpp,
+ struct rtw89_pci_rpp_info *rpp_info)
+{
+ const struct rtw89_pci_rpp_fmt *rpp = _rpp;
+
+ rpp_info->seq = le32_get_bits(rpp->dword, RTW89_PCI_RPP_SEQ);
+ rpp_info->qsel = le32_get_bits(rpp->dword, RTW89_PCI_RPP_QSEL);
+ rpp_info->tx_status = le32_get_bits(rpp->dword, RTW89_PCI_RPP_TX_STATUS);
+ rpp_info->txch = rtw89_chip_get_ch_dma(rtwdev, rpp_info->qsel);
+}
+EXPORT_SYMBOL(rtw89_pci_parse_rpp);
+
+void rtw89_pci_parse_rpp_v1(struct rtw89_dev *rtwdev, void *_rpp,
+ struct rtw89_pci_rpp_info *rpp_info)
+{
+ const struct rtw89_pci_rpp_fmt_v1 *rpp = _rpp;
+
+ rpp_info->seq = le32_get_bits(rpp->w0, RTW89_PCI_RPP_W0_PCIE_SEQ_V1_MASK);
+ rpp_info->qsel = le32_get_bits(rpp->w1, RTW89_PCI_RPP_W1_QSEL_V1_MASK);
+ rpp_info->tx_status = le32_get_bits(rpp->w0, RTW89_PCI_RPP_W0_TX_STATUS_V1_MASK);
+ rpp_info->txch = le32_get_bits(rpp->w0, RTW89_PCI_RPP_W0_DMA_CH_MASK);
+}
+EXPORT_SYMBOL(rtw89_pci_parse_rpp_v1);
+
+static void rtw89_pci_release_rpp(struct rtw89_dev *rtwdev, void *rpp)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
- struct rtw89_pci_tx_ring *tx_ring;
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ struct rtw89_pci_rpp_info rpp_info = {};
struct rtw89_pci_tx_wd_ring *wd_ring;
+ struct rtw89_pci_tx_ring *tx_ring;
struct rtw89_pci_tx_wd *txwd;
- u16 seq;
- u8 qsel, tx_status, txch;
- seq = le32_get_bits(rpp->dword, RTW89_PCI_RPP_SEQ);
- qsel = le32_get_bits(rpp->dword, RTW89_PCI_RPP_QSEL);
- tx_status = le32_get_bits(rpp->dword, RTW89_PCI_RPP_TX_STATUS);
- txch = rtw89_core_get_ch_dma(rtwdev, qsel);
+ info->parse_rpp(rtwdev, rpp, &rpp_info);
- if (txch == RTW89_TXCH_CH12) {
+ if (rpp_info.txch == RTW89_TXCH_CH12) {
rtw89_warn(rtwdev, "should no fwcmd release report\n");
return;
}
- tx_ring = &rtwpci->tx_rings[txch];
+ tx_ring = &rtwpci->tx.rings[rpp_info.txch];
wd_ring = &tx_ring->wd_ring;
- txwd = &wd_ring->pages[seq];
+ txwd = &wd_ring->pages[rpp_info.seq];
- rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, seq, tx_status);
+ rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, rpp_info.seq,
+ rpp_info.tx_status);
}
static void rtw89_pci_release_pending_txwd_skb(struct rtw89_dev *rtwdev,
@@ -617,13 +639,14 @@ static u32 rtw89_pci_release_tx_skbs(struct rtw89_dev *rtwdev,
u32 max_cnt)
{
struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
- struct rtw89_pci_rx_info *rx_info;
- struct rtw89_pci_rpp_fmt *rpp;
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
struct rtw89_rx_desc_info desc_info = {};
+ struct rtw89_pci_rx_info *rx_info;
struct sk_buff *skb;
- u32 cnt = 0;
- u32 rpp_size = sizeof(struct rtw89_pci_rpp_fmt);
+ void *rpp;
u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info);
+ u32 rpp_size = info->rpp_fmt_size;
+ u32 cnt = 0;
u32 skb_idx;
u32 offset;
int ret;
@@ -649,7 +672,7 @@ static u32 rtw89_pci_release_tx_skbs(struct rtw89_dev *rtwdev,
/* first segment has RX desc */
offset = desc_info.offset + desc_info.rxd_len;
for (; offset + rpp_size <= rx_info->len; offset += rpp_size) {
- rpp = (struct rtw89_pci_rpp_fmt *)(skb->data + offset);
+ rpp = skb->data + offset;
rtw89_pci_release_rpp(rtwdev, rpp);
}
@@ -694,7 +717,7 @@ static int rtw89_pci_poll_rpq_dma(struct rtw89_dev *rtwdev,
u32 cnt;
int work_done;
- rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ];
+ rx_ring = &rtwpci->rx.rings[RTW89_RXCH_RPQ];
spin_lock_bh(&rtwpci->trx_lock);
@@ -724,7 +747,7 @@ static void rtw89_pci_isr_rxd_unavail(struct rtw89_dev *rtwdev,
int i;
for (i = 0; i < RTW89_RXCH_NUM; i++) {
- rx_ring = &rtwpci->rx_rings[i];
+ rx_ring = &rtwpci->rx.rings[i];
bd_ring = &rx_ring->bd_ring;
reg_idx = rtw89_read32(rtwdev, bd_ring->addr.idx);
@@ -797,6 +820,29 @@ void rtw89_pci_recognize_intrs_v2(struct rtw89_dev *rtwdev,
}
EXPORT_SYMBOL(rtw89_pci_recognize_intrs_v2);
+void rtw89_pci_recognize_intrs_v3(struct rtw89_dev *rtwdev,
+ struct rtw89_pci *rtwpci,
+ struct rtw89_pci_isrs *isrs)
+{
+ isrs->ind_isrs = rtw89_read32(rtwdev, R_BE_PCIE_HISR) & rtwpci->ind_intrs;
+ isrs->halt_c2h_isrs = isrs->ind_isrs & B_BE_HS0ISR_IND_INT ?
+ rtw89_read32(rtwdev, R_BE_HISR0) & rtwpci->halt_c2h_intrs : 0;
+ isrs->isrs[1] = rtw89_read32(rtwdev, R_BE_PCIE_DMA_ISR) & rtwpci->intrs[1];
+
+ /* isrs[0] is not used, so borrow to store RDU status to share common
+ * flow in rtw89_pci_interrupt_threadfn().
+ */
+ isrs->isrs[0] = isrs->isrs[1] & (B_BE_PCIE_RDU_CH1_INT |
+ B_BE_PCIE_RDU_CH0_INT);
+
+ if (isrs->halt_c2h_isrs)
+ rtw89_write32(rtwdev, R_BE_HISR0, isrs->halt_c2h_isrs);
+ if (isrs->isrs[1])
+ rtw89_write32(rtwdev, R_BE_PCIE_DMA_ISR, isrs->isrs[1]);
+ rtw89_write32(rtwdev, R_BE_PCIE_HISR, isrs->ind_isrs);
+}
+EXPORT_SYMBOL(rtw89_pci_recognize_intrs_v3);
+
void rtw89_pci_enable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
{
rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs);
@@ -844,6 +890,21 @@ void rtw89_pci_disable_intr_v2(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpc
}
EXPORT_SYMBOL(rtw89_pci_disable_intr_v2);
+void rtw89_pci_enable_intr_v3(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
+{
+ rtw89_write32(rtwdev, R_BE_HIMR0, rtwpci->halt_c2h_intrs);
+ rtw89_write32(rtwdev, R_BE_PCIE_DMA_IMR_0_V1, rtwpci->intrs[1]);
+ rtw89_write32(rtwdev, R_BE_PCIE_HIMR0, rtwpci->ind_intrs);
+}
+EXPORT_SYMBOL(rtw89_pci_enable_intr_v3);
+
+void rtw89_pci_disable_intr_v3(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
+{
+ rtw89_write32(rtwdev, R_BE_PCIE_HIMR0, 0);
+ rtw89_write32(rtwdev, R_BE_PCIE_DMA_IMR_0_V1, 0);
+}
+EXPORT_SYMBOL(rtw89_pci_disable_intr_v3);
+
static void rtw89_pci_ops_recovery_start(struct rtw89_dev *rtwdev)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
@@ -885,7 +946,7 @@ static irqreturn_t rtw89_pci_interrupt_threadfn(int irq, void *dev)
struct rtw89_dev *rtwdev = dev;
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
const struct rtw89_pci_info *info = rtwdev->pci_info;
- const struct rtw89_pci_gen_def *gen_def = info->gen_def;
+ const struct rtw89_pci_isr_def *isr_def = info->isr_def;
struct rtw89_pci_isrs isrs;
unsigned long flags;
@@ -893,13 +954,13 @@ static irqreturn_t rtw89_pci_interrupt_threadfn(int irq, void *dev)
rtw89_chip_recognize_intrs(rtwdev, rtwpci, &isrs);
spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
- if (unlikely(isrs.isrs[0] & gen_def->isr_rdu))
+ if (unlikely(isrs.isrs[0] & isr_def->isr_rdu))
rtw89_pci_isr_rxd_unavail(rtwdev, rtwpci);
- if (unlikely(isrs.halt_c2h_isrs & gen_def->isr_halt_c2h))
+ if (unlikely(isrs.halt_c2h_isrs & isr_def->isr_halt_c2h))
rtw89_ser_notify(rtwdev, rtw89_mac_get_err_status(rtwdev));
- if (unlikely(isrs.halt_c2h_isrs & gen_def->isr_wdt_timeout))
+ if (unlikely(isrs.halt_c2h_isrs & isr_def->isr_wdt_timeout))
rtw89_ser_notify(rtwdev, MAC_AX_ERR_L2_ERR_WDT_TIMEOUT_INT);
if (unlikely(rtwpci->under_recovery))
@@ -950,6 +1011,24 @@ exit:
return irqret;
}
+#define DEF_TXCHADDRS_TYPE3(gen, ch_idx, txch, v...) \
+ [RTW89_TXCH_##ch_idx] = { \
+ .num = R_##gen##_##txch##_TXBD_CFG, \
+ .idx = R_##gen##_##txch##_TXBD_IDX ##v, \
+ .bdram = 0, \
+ .desa_l = 0, \
+ .desa_h = 0, \
+ }
+
+#define DEF_TXCHADDRS_TYPE3_GRP_BASE(gen, ch_idx, txch, grp, v...) \
+ [RTW89_TXCH_##ch_idx] = { \
+ .num = R_##gen##_##txch##_TXBD_CFG, \
+ .idx = R_##gen##_##txch##_TXBD_IDX ##v, \
+ .bdram = 0, \
+ .desa_l = R_##gen##_##grp##_TXBD_DESA_L, \
+ .desa_h = R_##gen##_##grp##_TXBD_DESA_H, \
+ }
+
#define DEF_TXCHADDRS_TYPE2(gen, ch_idx, txch, v...) \
[RTW89_TXCH_##ch_idx] = { \
.num = R_##gen##_##txch##_TXBD_NUM ##v, \
@@ -977,6 +1056,22 @@ exit:
.desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \
}
+#define DEF_RXCHADDRS_TYPE3(gen, ch_idx, rxch, v...) \
+ [RTW89_RXCH_##ch_idx] = { \
+ .num = R_##gen##_RX_##rxch##_RXBD_CONFIG, \
+ .idx = R_##gen##_##ch_idx##0_RXBD_IDX ##v, \
+ .desa_l = 0, \
+ .desa_h = 0, \
+ }
+
+#define DEF_RXCHADDRS_TYPE3_GRP_BASE(gen, ch_idx, rxch, grp, v...) \
+ [RTW89_RXCH_##ch_idx] = { \
+ .num = R_##gen##_RX_##rxch##_RXBD_CONFIG, \
+ .idx = R_##gen##_##ch_idx##0_RXBD_IDX ##v, \
+ .desa_l = R_##gen##_##grp##_RXBD_DESA_L, \
+ .desa_h = R_##gen##_##grp##_RXBD_DESA_H, \
+ }
+
#define DEF_RXCHADDRS(gen, ch_idx, rxch, v...) \
[RTW89_RXCH_##ch_idx] = { \
.num = R_##gen##_##rxch##_RXBD_NUM ##v, \
@@ -1054,8 +1149,36 @@ const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_be = {
};
EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set_be);
+const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_be_v1 = {
+ .tx = {
+ DEF_TXCHADDRS_TYPE3_GRP_BASE(BE, ACH0, CH0, ACQ, _V1),
+ /* no CH1 */
+ DEF_TXCHADDRS_TYPE3(BE, ACH2, CH2, _V1),
+ /* no CH3 */
+ DEF_TXCHADDRS_TYPE3(BE, ACH4, CH4, _V1),
+ /* no CH5 */
+ DEF_TXCHADDRS_TYPE3(BE, ACH6, CH6, _V1),
+ /* no CH7 */
+ DEF_TXCHADDRS_TYPE3_GRP_BASE(BE, CH8, CH8, NACQ, _V1),
+ /* no CH9 */
+ DEF_TXCHADDRS_TYPE3(BE, CH10, CH10, _V1),
+ /* no CH11 */
+ DEF_TXCHADDRS_TYPE3(BE, CH12, CH12, _V1),
+ },
+ .rx = {
+ DEF_RXCHADDRS_TYPE3_GRP_BASE(BE, RXQ, CH0, HOST0, _V1),
+ DEF_RXCHADDRS_TYPE3(BE, RPQ, CH1, _V1),
+ },
+};
+EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set_be_v1);
+
+#undef DEF_TXCHADDRS_TYPE3
+#undef DEF_TXCHADDRS_TYPE3_GRP_BASE
+#undef DEF_TXCHADDRS_TYPE2
#undef DEF_TXCHADDRS_TYPE1
#undef DEF_TXCHADDRS
+#undef DEF_RXCHADDRS_TYPE3
+#undef DEF_RXCHADDRS_TYPE3_GRP_BASE
#undef DEF_RXCHADDRS
static int rtw89_pci_get_txch_addrs(struct rtw89_dev *rtwdev,
@@ -1101,7 +1224,7 @@ static
u32 __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(struct rtw89_dev *rtwdev)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
- struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12];
+ struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx.rings[RTW89_TXCH_CH12];
u32 cnt;
spin_lock_bh(&rtwpci->trx_lock);
@@ -1117,7 +1240,7 @@ u32 __rtw89_pci_check_and_reclaim_tx_resource_noio(struct rtw89_dev *rtwdev,
u8 txch)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
- struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch];
+ struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx.rings[txch];
struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
u32 cnt;
@@ -1134,7 +1257,7 @@ static u32 __rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev,
u8 txch)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
- struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch];
+ struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx.rings[txch];
struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
const struct rtw89_chip_info *chip = rtwdev->chip;
u32 bd_cnt, wd_cnt, min_cnt = 0;
@@ -1142,7 +1265,7 @@ static u32 __rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev,
enum rtw89_debug_mask debug_mask;
u32 cnt;
- rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ];
+ rx_ring = &rtwpci->rx.rings[RTW89_RXCH_RPQ];
spin_lock_bh(&rtwpci->trx_lock);
bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
@@ -1227,7 +1350,7 @@ static void rtw89_pci_tx_bd_ring_update(struct rtw89_dev *rtwdev, struct rtw89_p
static void rtw89_pci_ops_tx_kick_off(struct rtw89_dev *rtwdev, u8 txch)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
- struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch];
+ struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx.rings[txch];
if (rtwdev->hci.paused) {
set_bit(txch, rtwpci->kick_map);
@@ -1247,7 +1370,7 @@ static void rtw89_pci_tx_kick_off_pending(struct rtw89_dev *rtwdev)
if (!test_and_clear_bit(txch, rtwpci->kick_map))
continue;
- tx_ring = &rtwpci->tx_rings[txch];
+ tx_ring = &rtwpci->tx.rings[txch];
__rtw89_pci_tx_kick_off(rtwdev, tx_ring);
}
}
@@ -1255,7 +1378,7 @@ static void rtw89_pci_tx_kick_off_pending(struct rtw89_dev *rtwdev)
static void __pci_flush_txch(struct rtw89_dev *rtwdev, u8 txch, bool drop)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
- struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch];
+ struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx.rings[txch];
struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
u32 cur_idx, cur_rp;
u8 i;
@@ -1371,7 +1494,6 @@ static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev,
struct pci_dev *pdev = rtwpci->pdev;
struct sk_buff *skb = tx_req->skb;
struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb);
- struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb);
bool en_wd_info = desc_info->en_wd_info;
u32 txwd_len;
u32 txwp_len;
@@ -1387,7 +1509,6 @@ static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev,
}
tx_data->dma = dma;
- rcu_assign_pointer(skb_data->wait, NULL);
txwp_len = sizeof(*txwp_info);
txwd_len = chip->txwd_body_size;
@@ -1521,7 +1642,7 @@ static int rtw89_pci_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_req
return -EINVAL;
}
- tx_ring = &rtwpci->tx_rings[txch];
+ tx_ring = &rtwpci->tx.rings[txch];
spin_lock_bh(&rtwpci->trx_lock);
n_avail_txbd = rtw89_pci_get_avail_txbd_num(tx_ring);
@@ -1607,6 +1728,41 @@ static void rtw89_pci_init_wp_16sel(struct rtw89_dev *rtwdev)
}
}
+static u16 rtw89_pci_enc_bd_cfg(struct rtw89_dev *rtwdev, u16 bd_num,
+ u32 dma_offset)
+{
+ u16 dma_offset_sel;
+ u16 num_sel;
+
+ /* B_BE_TX_NUM_SEL_MASK, B_BE_RX_NUM_SEL_MASK:
+ * 0 -> 0
+ * 1 -> 64 = 2^6
+ * 2 -> 128 = 2^7
+ * ...
+ * 7 -> 4096 = 2^12
+ */
+ num_sel = ilog2(bd_num) - 5;
+
+ if (hweight16(bd_num) != 1)
+ rtw89_warn(rtwdev, "bd_num %u is not power of 2\n", bd_num);
+
+ /* B_BE_TX_START_OFFSET_MASK, B_BE_RX_START_OFFSET_MASK:
+ * 0 -> 0 = 0 * 2^9
+ * 1 -> 512 = 1 * 2^9
+ * 2 -> 1024 = 2 * 2^9
+ * 3 -> 1536 = 3 * 2^9
+ * ...
+ * 255 -> 130560 = 255 * 2^9
+ */
+ dma_offset_sel = dma_offset >> 9;
+
+ if (dma_offset % 512)
+ rtw89_warn(rtwdev, "offset %u is not multiple of 512\n", dma_offset);
+
+ return u16_encode_bits(num_sel, B_BE_TX_NUM_SEL_MASK) |
+ u16_encode_bits(dma_offset_sel, B_BE_TX_START_OFFSET_MASK);
+}
+
static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
@@ -1616,10 +1772,12 @@ static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev)
struct rtw89_pci_rx_ring *rx_ring;
struct rtw89_pci_dma_ring *bd_ring;
const struct rtw89_pci_bd_ram *bd_ram;
+ dma_addr_t group_dma_base = 0;
+ u16 num_or_offset;
+ u32 addr_desa_l;
+ u32 addr_bdram;
u32 addr_num;
u32 addr_idx;
- u32 addr_bdram;
- u32 addr_desa_l;
u32 val32;
int i;
@@ -1627,7 +1785,7 @@ static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev)
if (info->tx_dma_ch_mask & BIT(i))
continue;
- tx_ring = &rtwpci->tx_rings[i];
+ tx_ring = &rtwpci->tx.rings[i];
bd_ring = &tx_ring->bd_ring;
bd_ram = bd_ram_table ? &bd_ram_table[i] : NULL;
addr_num = bd_ring->addr.num;
@@ -1636,7 +1794,18 @@ static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev)
bd_ring->wp = 0;
bd_ring->rp = 0;
- rtw89_write16(rtwdev, addr_num, bd_ring->len);
+ if (info->group_bd_addr) {
+ if (addr_desa_l)
+ group_dma_base = bd_ring->dma;
+
+ num_or_offset =
+ rtw89_pci_enc_bd_cfg(rtwdev, bd_ring->len,
+ bd_ring->dma - group_dma_base);
+ } else {
+ num_or_offset = bd_ring->len;
+ }
+ rtw89_write16(rtwdev, addr_num, num_or_offset);
+
if (addr_bdram && bd_ram) {
val32 = FIELD_PREP(BDRAM_SIDX_MASK, bd_ram->start_idx) |
FIELD_PREP(BDRAM_MAX_MASK, bd_ram->max_num) |
@@ -1644,12 +1813,14 @@ static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev)
rtw89_write32(rtwdev, addr_bdram, val32);
}
- rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma);
- rtw89_write32(rtwdev, addr_desa_l + 4, upper_32_bits(bd_ring->dma));
+ if (addr_desa_l) {
+ rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma);
+ rtw89_write32(rtwdev, addr_desa_l + 4, upper_32_bits(bd_ring->dma));
+ }
}
for (i = 0; i < RTW89_RXCH_NUM; i++) {
- rx_ring = &rtwpci->rx_rings[i];
+ rx_ring = &rtwpci->rx.rings[i];
bd_ring = &rx_ring->bd_ring;
addr_num = bd_ring->addr.num;
addr_idx = bd_ring->addr.idx;
@@ -1663,9 +1834,22 @@ static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev)
rx_ring->diliver_desc.ready = false;
rx_ring->target_rx_tag = 0;
- rtw89_write16(rtwdev, addr_num, bd_ring->len);
- rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma);
- rtw89_write32(rtwdev, addr_desa_l + 4, upper_32_bits(bd_ring->dma));
+ if (info->group_bd_addr) {
+ if (addr_desa_l)
+ group_dma_base = bd_ring->dma;
+
+ num_or_offset =
+ rtw89_pci_enc_bd_cfg(rtwdev, bd_ring->len,
+ bd_ring->dma - group_dma_base);
+ } else {
+ num_or_offset = bd_ring->len;
+ }
+ rtw89_write16(rtwdev, addr_num, num_or_offset);
+
+ if (addr_desa_l) {
+ rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma);
+ rtw89_write32(rtwdev, addr_desa_l + 4, upper_32_bits(bd_ring->dma));
+ }
if (info->rx_ring_eq_is_full)
rtw89_write16(rtwdev, addr_idx, bd_ring->wp);
@@ -1698,7 +1882,7 @@ void rtw89_pci_ops_reset(struct rtw89_dev *rtwdev)
skb_queue_len(&rtwpci->h2c_queue), true);
continue;
}
- rtw89_pci_release_tx_ring(rtwdev, &rtwpci->tx_rings[txch]);
+ rtw89_pci_release_tx_ring(rtwdev, &rtwpci->tx.rings[txch]);
}
spin_unlock_bh(&rtwpci->trx_lock);
}
@@ -1774,14 +1958,14 @@ void rtw89_pci_switch_bd_idx_addr(struct rtw89_dev *rtwdev, bool low_power)
return;
for (i = 0; i < RTW89_TXCH_NUM; i++) {
- tx_ring = &rtwpci->tx_rings[i];
+ tx_ring = &rtwpci->tx.rings[i];
tx_ring->bd_ring.addr.idx = low_power ?
bd_idx_addr->tx_bd_addrs[i] :
dma_addr_set->tx[i].idx;
}
for (i = 0; i < RTW89_RXCH_NUM; i++) {
- rx_ring = &rtwpci->rx_rings[i];
+ rx_ring = &rtwpci->rx.rings[i];
rx_ring->bd_ring.addr.idx = low_power ?
bd_idx_addr->rx_bd_addrs[i] :
dma_addr_set->rx[i].idx;
@@ -2725,7 +2909,7 @@ static int rtw89_pci_poll_rxdma_ch_idle_ax(struct rtw89_dev *rtwdev)
static int rtw89_pci_poll_dma_all_idle(struct rtw89_dev *rtwdev)
{
- u32 ret;
+ int ret;
ret = rtw89_pci_poll_txdma_ch_idle_ax(rtwdev);
if (ret) {
@@ -3211,15 +3395,6 @@ static void rtw89_pci_free_tx_ring(struct rtw89_dev *rtwdev,
struct pci_dev *pdev,
struct rtw89_pci_tx_ring *tx_ring)
{
- int ring_sz;
- u8 *head;
- dma_addr_t dma;
-
- head = tx_ring->bd_ring.head;
- dma = tx_ring->bd_ring.dma;
- ring_sz = tx_ring->bd_ring.desc_size * tx_ring->bd_ring.len;
- dma_free_coherent(&pdev->dev, ring_sz, head, dma);
-
tx_ring->bd_ring.head = NULL;
}
@@ -3227,6 +3402,7 @@ static void rtw89_pci_free_tx_rings(struct rtw89_dev *rtwdev,
struct pci_dev *pdev)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ struct rtw89_pci_dma_pool *bd_pool = &rtwpci->tx.bd_pool;
const struct rtw89_pci_info *info = rtwdev->pci_info;
struct rtw89_pci_tx_ring *tx_ring;
int i;
@@ -3234,10 +3410,12 @@ static void rtw89_pci_free_tx_rings(struct rtw89_dev *rtwdev,
for (i = 0; i < RTW89_TXCH_NUM; i++) {
if (info->tx_dma_ch_mask & BIT(i))
continue;
- tx_ring = &rtwpci->tx_rings[i];
+ tx_ring = &rtwpci->tx.rings[i];
rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring);
rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring);
}
+
+ dma_free_coherent(&pdev->dev, bd_pool->size, bd_pool->head, bd_pool->dma);
}
static void rtw89_pci_free_rx_ring(struct rtw89_dev *rtwdev,
@@ -3248,8 +3426,6 @@ static void rtw89_pci_free_rx_ring(struct rtw89_dev *rtwdev,
struct sk_buff *skb;
dma_addr_t dma;
u32 buf_sz;
- u8 *head;
- int ring_sz = rx_ring->bd_ring.desc_size * rx_ring->bd_ring.len;
int i;
buf_sz = rx_ring->buf_sz;
@@ -3265,10 +3441,6 @@ static void rtw89_pci_free_rx_ring(struct rtw89_dev *rtwdev,
rx_ring->buf[i] = NULL;
}
- head = rx_ring->bd_ring.head;
- dma = rx_ring->bd_ring.dma;
- dma_free_coherent(&pdev->dev, ring_sz, head, dma);
-
rx_ring->bd_ring.head = NULL;
}
@@ -3276,13 +3448,16 @@ static void rtw89_pci_free_rx_rings(struct rtw89_dev *rtwdev,
struct pci_dev *pdev)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ struct rtw89_pci_dma_pool *bd_pool = &rtwpci->rx.bd_pool;
struct rtw89_pci_rx_ring *rx_ring;
int i;
for (i = 0; i < RTW89_RXCH_NUM; i++) {
- rx_ring = &rtwpci->rx_rings[i];
+ rx_ring = &rtwpci->rx.rings[i];
rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring);
}
+
+ dma_free_coherent(&pdev->dev, bd_pool->size, bd_pool->head, bd_pool->dma);
}
static void rtw89_pci_free_trx_rings(struct rtw89_dev *rtwdev,
@@ -3374,12 +3549,10 @@ static int rtw89_pci_alloc_tx_ring(struct rtw89_dev *rtwdev,
struct pci_dev *pdev,
struct rtw89_pci_tx_ring *tx_ring,
u32 desc_size, u32 len,
- enum rtw89_tx_channel txch)
+ enum rtw89_tx_channel txch,
+ void *head, dma_addr_t dma)
{
const struct rtw89_pci_ch_dma_addr *txch_addr;
- int ring_sz = desc_size * len;
- u8 *head;
- dma_addr_t dma;
int ret;
ret = rtw89_pci_alloc_tx_wd_ring(rtwdev, pdev, tx_ring, txch);
@@ -3394,12 +3567,6 @@ static int rtw89_pci_alloc_tx_ring(struct rtw89_dev *rtwdev,
goto err_free_wd_ring;
}
- head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
- if (!head) {
- ret = -ENOMEM;
- goto err_free_wd_ring;
- }
-
INIT_LIST_HEAD(&tx_ring->busy_pages);
tx_ring->bd_ring.head = head;
tx_ring->bd_ring.dma = dma;
@@ -3422,25 +3589,48 @@ static int rtw89_pci_alloc_tx_rings(struct rtw89_dev *rtwdev,
struct pci_dev *pdev)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ struct rtw89_pci_dma_pool *bd_pool = &rtwpci->tx.bd_pool;
const struct rtw89_pci_info *info = rtwdev->pci_info;
struct rtw89_pci_tx_ring *tx_ring;
+ u32 i, tx_allocated;
+ dma_addr_t dma;
u32 desc_size;
+ u32 ring_sz;
+ u32 pool_sz;
+ u32 ch_num;
+ void *head;
u32 len;
- u32 i, tx_allocated;
int ret;
+ BUILD_BUG_ON(RTW89_PCI_TXBD_NUM_MAX % 16);
+
+ desc_size = sizeof(struct rtw89_pci_tx_bd_32);
+ len = RTW89_PCI_TXBD_NUM_MAX;
+ ch_num = RTW89_TXCH_NUM - hweight32(info->tx_dma_ch_mask);
+ ring_sz = desc_size * len;
+ pool_sz = ring_sz * ch_num;
+
+ head = dma_alloc_coherent(&pdev->dev, pool_sz, &dma, GFP_KERNEL);
+ if (!head)
+ return -ENOMEM;
+
+ bd_pool->head = head;
+ bd_pool->dma = dma;
+ bd_pool->size = pool_sz;
+
for (i = 0; i < RTW89_TXCH_NUM; i++) {
if (info->tx_dma_ch_mask & BIT(i))
continue;
- tx_ring = &rtwpci->tx_rings[i];
- desc_size = sizeof(struct rtw89_pci_tx_bd_32);
- len = RTW89_PCI_TXBD_NUM_MAX;
+ tx_ring = &rtwpci->tx.rings[i];
ret = rtw89_pci_alloc_tx_ring(rtwdev, pdev, tx_ring,
- desc_size, len, i);
+ desc_size, len, i, head, dma);
if (ret) {
rtw89_err(rtwdev, "failed to alloc tx ring %d\n", i);
goto err_free;
}
+
+ head += ring_sz;
+ dma += ring_sz;
}
return 0;
@@ -3448,24 +3638,24 @@ static int rtw89_pci_alloc_tx_rings(struct rtw89_dev *rtwdev,
err_free:
tx_allocated = i;
for (i = 0; i < tx_allocated; i++) {
- tx_ring = &rtwpci->tx_rings[i];
+ tx_ring = &rtwpci->tx.rings[i];
rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring);
}
+ dma_free_coherent(&pdev->dev, bd_pool->size, bd_pool->head, bd_pool->dma);
+
return ret;
}
static int rtw89_pci_alloc_rx_ring(struct rtw89_dev *rtwdev,
struct pci_dev *pdev,
struct rtw89_pci_rx_ring *rx_ring,
- u32 desc_size, u32 len, u32 rxch)
+ u32 desc_size, u32 len, u32 rxch,
+ void *head, dma_addr_t dma)
{
const struct rtw89_pci_info *info = rtwdev->pci_info;
const struct rtw89_pci_ch_dma_addr *rxch_addr;
struct sk_buff *skb;
- u8 *head;
- dma_addr_t dma;
- int ring_sz = desc_size * len;
int buf_sz = RTW89_PCI_RX_BUF_SIZE;
int i, allocated;
int ret;
@@ -3476,12 +3666,6 @@ static int rtw89_pci_alloc_rx_ring(struct rtw89_dev *rtwdev,
return ret;
}
- head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
- if (!head) {
- ret = -ENOMEM;
- goto err;
- }
-
rx_ring->bd_ring.head = head;
rx_ring->bd_ring.dma = dma;
rx_ring->bd_ring.len = len;
@@ -3530,12 +3714,8 @@ err_free:
rx_ring->buf[i] = NULL;
}
- head = rx_ring->bd_ring.head;
- dma = rx_ring->bd_ring.dma;
- dma_free_coherent(&pdev->dev, ring_sz, head, dma);
-
rx_ring->bd_ring.head = NULL;
-err:
+
return ret;
}
@@ -3543,22 +3723,43 @@ static int rtw89_pci_alloc_rx_rings(struct rtw89_dev *rtwdev,
struct pci_dev *pdev)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ struct rtw89_pci_dma_pool *bd_pool = &rtwpci->rx.bd_pool;
struct rtw89_pci_rx_ring *rx_ring;
+ int i, rx_allocated;
+ dma_addr_t dma;
u32 desc_size;
+ u32 ring_sz;
+ u32 pool_sz;
+ void *head;
u32 len;
- int i, rx_allocated;
int ret;
+ desc_size = sizeof(struct rtw89_pci_rx_bd_32);
+ len = RTW89_PCI_RXBD_NUM_MAX;
+ ring_sz = desc_size * len;
+ pool_sz = ring_sz * RTW89_RXCH_NUM;
+
+ head = dma_alloc_coherent(&pdev->dev, pool_sz, &dma, GFP_KERNEL);
+ if (!head)
+ return -ENOMEM;
+
+ bd_pool->head = head;
+ bd_pool->dma = dma;
+ bd_pool->size = pool_sz;
+
for (i = 0; i < RTW89_RXCH_NUM; i++) {
- rx_ring = &rtwpci->rx_rings[i];
- desc_size = sizeof(struct rtw89_pci_rx_bd_32);
- len = RTW89_PCI_RXBD_NUM_MAX;
+ rx_ring = &rtwpci->rx.rings[i];
+
ret = rtw89_pci_alloc_rx_ring(rtwdev, pdev, rx_ring,
- desc_size, len, i);
+ desc_size, len, i,
+ head, dma);
if (ret) {
rtw89_err(rtwdev, "failed to alloc rx ring %d\n", i);
goto err_free;
}
+
+ head += ring_sz;
+ dma += ring_sz;
}
return 0;
@@ -3566,10 +3767,12 @@ static int rtw89_pci_alloc_rx_rings(struct rtw89_dev *rtwdev,
err_free:
rx_allocated = i;
for (i = 0; i < rx_allocated; i++) {
- rx_ring = &rtwpci->rx_rings[i];
+ rx_ring = &rtwpci->rx.rings[i];
rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring);
}
+ dma_free_coherent(&pdev->dev, bd_pool->size, bd_pool->head, bd_pool->dma);
+
return ret;
}
@@ -3776,6 +3979,40 @@ void rtw89_pci_config_intr_mask_v2(struct rtw89_dev *rtwdev)
}
EXPORT_SYMBOL(rtw89_pci_config_intr_mask_v2);
+static void rtw89_pci_recovery_intr_mask_v3(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+
+ rtwpci->ind_intrs = B_BE_HS0_IND_INT_EN0;
+ rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN;
+ rtwpci->intrs[0] = 0;
+ rtwpci->intrs[1] = 0;
+}
+
+static void rtw89_pci_default_intr_mask_v3(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+
+ rtwpci->ind_intrs = B_BE_HS0_IND_INT_EN0;
+ rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN;
+ rtwpci->intrs[0] = 0;
+ rtwpci->intrs[1] = B_BE_PCIE_RDU_CH1_IMR |
+ B_BE_PCIE_RDU_CH0_IMR |
+ B_BE_PCIE_RX_RX0P2_IMR0_V1 |
+ B_BE_PCIE_RX_RPQ0_IMR0_V1;
+}
+
+void rtw89_pci_config_intr_mask_v3(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+
+ if (rtwpci->under_recovery)
+ rtw89_pci_recovery_intr_mask_v3(rtwdev);
+ else
+ rtw89_pci_default_intr_mask_v3(rtwdev);
+}
+EXPORT_SYMBOL(rtw89_pci_config_intr_mask_v3);
+
static int rtw89_pci_request_irq(struct rtw89_dev *rtwdev,
struct pci_dev *pdev)
{
@@ -4158,7 +4395,7 @@ static int rtw89_pci_lv1rst_stop_dma_ax(struct rtw89_dev *rtwdev)
static int rtw89_pci_lv1rst_start_dma_ax(struct rtw89_dev *rtwdev)
{
- u32 ret;
+ int ret;
if (rtwdev->chip->chip_id == RTL8852C)
return 0;
@@ -4172,7 +4409,7 @@ static int rtw89_pci_lv1rst_start_dma_ax(struct rtw89_dev *rtwdev)
return ret;
rtw89_pci_ctrl_dma_all(rtwdev, true);
- return ret;
+ return 0;
}
static int rtw89_pci_ops_mac_lv1_recovery(struct rtw89_dev *rtwdev,
@@ -4228,18 +4465,18 @@ static int rtw89_pci_napi_poll(struct napi_struct *napi, int budget)
struct rtw89_dev *rtwdev = container_of(napi, struct rtw89_dev, napi);
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
const struct rtw89_pci_info *info = rtwdev->pci_info;
- const struct rtw89_pci_gen_def *gen_def = info->gen_def;
+ const struct rtw89_pci_isr_def *isr_def = info->isr_def;
unsigned long flags;
int work_done;
rtwdev->napi_budget_countdown = budget;
- rtw89_write32(rtwdev, gen_def->isr_clear_rpq.addr, gen_def->isr_clear_rpq.data);
+ rtw89_write32(rtwdev, isr_def->isr_clear_rpq.addr, isr_def->isr_clear_rpq.data);
work_done = rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown);
if (work_done == budget)
return budget;
- rtw89_write32(rtwdev, gen_def->isr_clear_rxq.addr, gen_def->isr_clear_rxq.data);
+ rtw89_write32(rtwdev, isr_def->isr_clear_rxq.addr, isr_def->isr_clear_rxq.data);
work_done += rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown);
if (work_done < budget && napi_complete_done(napi, work_done)) {
spin_lock_irqsave(&rtwpci->irq_lock, flags);
@@ -4394,14 +4631,17 @@ const struct pci_error_handlers rtw89_pci_err_handler = {
};
EXPORT_SYMBOL(rtw89_pci_err_handler);
-const struct rtw89_pci_gen_def rtw89_pci_gen_ax = {
+const struct rtw89_pci_isr_def rtw89_pci_isr_ax = {
.isr_rdu = B_AX_RDU_INT,
.isr_halt_c2h = B_AX_HALT_C2H_INT_EN,
.isr_wdt_timeout = B_AX_WDT_TIMEOUT_INT_EN,
.isr_clear_rpq = {R_AX_PCIE_HISR00, B_AX_RPQDMA_INT | B_AX_RPQBD_FULL_INT},
.isr_clear_rxq = {R_AX_PCIE_HISR00, B_AX_RXP1DMA_INT | B_AX_RXDMA_INT |
B_AX_RDU_INT},
+};
+EXPORT_SYMBOL(rtw89_pci_isr_ax);
+const struct rtw89_pci_gen_def rtw89_pci_gen_ax = {
.mac_pre_init = rtw89_pci_ops_mac_pre_init_ax,
.mac_pre_deinit = rtw89_pci_ops_mac_pre_deinit_ax,
.mac_post_init = rtw89_pci_ops_mac_post_init_ax,
diff --git a/drivers/net/wireless/realtek/rtw89/pci.h b/drivers/net/wireless/realtek/rtw89/pci.h
index 52f527069da6..cb05c83dfd56 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.h
+++ b/drivers/net/wireless/realtek/rtw89/pci.h
@@ -372,6 +372,14 @@
#define B_BE_HS0ISR_IND_INT BIT(0)
#define R_BE_PCIE_DMA_IMR_0_V1 0x30B8
+#define B_BE_PCIE_RDU_CH7_IMR BIT(31)
+#define B_BE_PCIE_RDU_CH6_IMR BIT(30)
+#define B_BE_PCIE_RDU_CH5_IMR BIT(29)
+#define B_BE_PCIE_RDU_CH4_IMR BIT(28)
+#define B_BE_PCIE_RDU_CH3_IMR BIT(27)
+#define B_BE_PCIE_RDU_CH2_IMR BIT(26)
+#define B_BE_PCIE_RDU_CH1_IMR BIT(25)
+#define B_BE_PCIE_RDU_CH0_IMR BIT(24)
#define B_BE_PCIE_RX_RX1P1_IMR0_V1 BIT(23)
#define B_BE_PCIE_RX_RX0P1_IMR0_V1 BIT(22)
#define B_BE_PCIE_RX_ROQ1_IMR0_V1 BIT(21)
@@ -397,6 +405,14 @@
#define B_BE_PCIE_TX_CH0_IMR0 BIT(0)
#define R_BE_PCIE_DMA_ISR 0x30BC
+#define B_BE_PCIE_RDU_CH7_INT BIT(31)
+#define B_BE_PCIE_RDU_CH6_INT BIT(30)
+#define B_BE_PCIE_RDU_CH5_INT BIT(29)
+#define B_BE_PCIE_RDU_CH4_INT BIT(28)
+#define B_BE_PCIE_RDU_CH3_INT BIT(27)
+#define B_BE_PCIE_RDU_CH2_INT BIT(26)
+#define B_BE_PCIE_RDU_CH1_INT BIT(25)
+#define B_BE_PCIE_RDU_CH0_INT BIT(24)
#define B_BE_PCIE_RX_RX1P1_ISR_V1 BIT(23)
#define B_BE_PCIE_RX_RX0P1_ISR_V1 BIT(22)
#define B_BE_PCIE_RX_ROQ1_ISR_V1 BIT(21)
@@ -426,9 +442,13 @@
#define B_BE_RDU_CH4_INT_IMR_V1 BIT(29)
#define B_BE_RDU_CH3_INT_IMR_V1 BIT(28)
#define B_BE_RDU_CH2_INT_IMR_V1 BIT(27)
+#define B_BE_RDU_CH1_INT_EN_V2 BIT(27)
#define B_BE_RDU_CH1_INT_IMR_V1 BIT(26)
+#define B_BE_RDU_CH0_INT_EN_V2 BIT(26)
#define B_BE_RDU_CH0_INT_IMR_V1 BIT(25)
+#define B_BE_RXDMA_STUCK_INT_EN_V2 BIT(25)
#define B_BE_RXDMA_STUCK_INT_EN_V1 BIT(24)
+#define B_BE_TXDMA_STUCK_INT_EN_V2 BIT(24)
#define B_BE_TXDMA_STUCK_INT_EN_V1 BIT(23)
#define B_BE_TXDMA_CH14_INT_EN_V1 BIT(22)
#define B_BE_TXDMA_CH13_INT_EN_V1 BIT(21)
@@ -459,9 +479,13 @@
#define B_BE_RDU_CH4_INT_V1 BIT(29)
#define B_BE_RDU_CH3_INT_V1 BIT(28)
#define B_BE_RDU_CH2_INT_V1 BIT(27)
+#define B_BE_RDU_CH1_INT_V2 BIT(27)
#define B_BE_RDU_CH1_INT_V1 BIT(26)
+#define B_BE_RDU_CH0_INT_V2 BIT(26)
#define B_BE_RDU_CH0_INT_V1 BIT(25)
+#define B_BE_RXDMA_STUCK_INT_V2 BIT(25)
#define B_BE_RXDMA_STUCK_INT_V1 BIT(24)
+#define B_BE_TXDMA_STUCK_INT_V2 BIT(24)
#define B_BE_TXDMA_STUCK_INT_V1 BIT(23)
#define B_BE_TXDMA_CH14_INT_V1 BIT(22)
#define B_BE_TXDMA_CH13_INT_V1 BIT(21)
@@ -784,9 +808,25 @@
#define R_BE_CH13_TXBD_NUM_V1 0xB04C
#define R_BE_CH14_TXBD_NUM_V1 0xB04E
+#define R_BE_CH0_TXBD_CFG 0xB030
+#define R_BE_CH2_TXBD_CFG 0xB034
+#define R_BE_CH4_TXBD_CFG 0xB038
+#define R_BE_CH6_TXBD_CFG 0xB03C
+#define R_BE_CH8_TXBD_CFG 0xB040
+#define R_BE_CH10_TXBD_CFG 0xB044
+#define R_BE_CH12_TXBD_CFG 0xB048
+#define B_BE_TX_FLAG BIT(14)
+#define B_BE_TX_START_OFFSET_MASK GENMASK(12, 4)
+#define B_BE_TX_NUM_SEL_MASK GENMASK(2, 0)
+
#define R_BE_RXQ0_RXBD_NUM_V1 0xB050
#define R_BE_RPQ0_RXBD_NUM_V1 0xB052
+#define R_BE_RX_CH0_RXBD_CONFIG 0xB050
+#define R_BE_RX_CH1_RXBD_CONFIG 0xB052
+#define B_BE_RX_START_OFFSET_MASK GENMASK(11, 4)
+#define B_BE_RX_NUM_SEL_MASK GENMASK(2, 0)
+
#define R_BE_CH0_TXBD_IDX_V1 0xB100
#define R_BE_CH1_TXBD_IDX_V1 0xB104
#define R_BE_CH2_TXBD_IDX_V1 0xB108
@@ -837,11 +877,25 @@
#define R_BE_CH14_TXBD_DESA_L_V1 0xB270
#define R_BE_CH14_TXBD_DESA_H_V1 0xB274
+#define R_BE_ACQ_TXBD_DESA_L 0xB200
+#define B_BE_TX_ACQ_DESA_L_MASK GENMASK(31, 3)
+#define R_BE_ACQ_TXBD_DESA_H 0xB204
+#define B_BE_TX_ACQ_DESA_H_MASK GENMASK(7, 0)
+#define R_BE_NACQ_TXBD_DESA_L 0xB240
+#define B_BE_TX_NACQ_DESA_L_MASK GENMASK(31, 3)
+#define R_BE_NACQ_TXBD_DESA_H 0xB244
+#define B_BE_TX_NACQ_DESA_H_MASK GENMASK(7, 0)
+
#define R_BE_RXQ0_RXBD_DESA_L_V1 0xB300
#define R_BE_RXQ0_RXBD_DESA_H_V1 0xB304
#define R_BE_RPQ0_RXBD_DESA_L_V1 0xB308
#define R_BE_RPQ0_RXBD_DESA_H_V1 0xB30C
+#define R_BE_HOST0_RXBD_DESA_L 0xB300
+#define B_BE_RX_HOST0_DESA_L_MASK GENMASK(31, 3)
+#define R_BE_HOST0_RXBD_DESA_H 0xB304
+#define B_BE_RX_HOST0_DESA_H_MASK GENMASK(7, 0)
+
#define R_BE_WP_ADDR_H_SEL0_3_V1 0xB420
#define R_BE_WP_ADDR_H_SEL4_7_V1 0xB424
#define R_BE_WP_ADDR_H_SEL8_11_V1 0xB428
@@ -1249,7 +1303,7 @@ struct rtw89_pci_bd_idx_addr {
};
struct rtw89_pci_ch_dma_addr {
- u32 num;
+ u32 num; /* also `offset` addr for group_bd_addr design */
u32 idx;
u32 bdram;
u32 desa_l;
@@ -1267,13 +1321,15 @@ struct rtw89_pci_bd_ram {
u8 min_num;
};
-struct rtw89_pci_gen_def {
+struct rtw89_pci_isr_def {
u32 isr_rdu;
u32 isr_halt_c2h;
u32 isr_wdt_timeout;
struct rtw89_reg2_def isr_clear_rpq;
struct rtw89_reg2_def isr_clear_rxq;
+};
+struct rtw89_pci_gen_def {
int (*mac_pre_init)(struct rtw89_dev *rtwdev);
int (*mac_pre_deinit)(struct rtw89_dev *rtwdev);
int (*mac_post_init)(struct rtw89_dev *rtwdev);
@@ -1309,8 +1365,16 @@ struct rtw89_pci_ssid_quirk {
unsigned long bitmap; /* bitmap of rtw89_quirks */
};
+struct rtw89_pci_rpp_info {
+ u16 seq;
+ u8 qsel;
+ u8 tx_status;
+ u8 txch;
+};
+
struct rtw89_pci_info {
const struct rtw89_pci_gen_def *gen_def;
+ const struct rtw89_pci_isr_def *isr_def;
enum mac_ax_bd_trunc_mode txbd_trunc_mode;
enum mac_ax_bd_trunc_mode rxbd_trunc_mode;
enum mac_ax_rxbd_mode rxbd_mode;
@@ -1328,6 +1392,8 @@ struct rtw89_pci_info {
bool rx_ring_eq_is_full;
bool check_rx_tag;
bool no_rxbd_fs;
+ bool group_bd_addr;
+ u32 rpp_fmt_size;
u32 init_cfg_reg;
u32 txhci_en_bit;
@@ -1357,6 +1423,8 @@ struct rtw89_pci_info {
u32 (*fill_txaddr_info)(struct rtw89_dev *rtwdev,
void *txaddr_info_addr, u32 total_len,
dma_addr_t dma, u8 *add_info_nr);
+ void (*parse_rpp)(struct rtw89_dev *rtwdev, void *rpp,
+ struct rtw89_pci_rpp_info *rpp_info);
void (*config_intr_mask)(struct rtw89_dev *rtwdev);
void (*enable_intr)(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci);
void (*disable_intr)(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci);
@@ -1430,6 +1498,19 @@ struct rtw89_pci_rpp_fmt {
__le32 dword;
} __packed;
+#define RTW89_PCI_RPP_W0_MACID_V1_MASK GENMASK(9, 0)
+#define RTW89_PCI_RPP_W0_DMA_CH_MASK GENMASK(13, 10)
+#define RTW89_PCI_RPP_W0_TX_STATUS_V1_MASK GENMASK(16, 14)
+#define RTW89_PCI_RPP_W0_PCIE_SEQ_V1_MASK GENMASK(31, 17)
+#define RTW89_PCI_RPP_W1_QSEL_V1_MASK GENMASK(5, 0)
+#define RTW89_PCI_RPP_W1_TID_IND BIT(6)
+#define RTW89_PCI_RPP_W1_CHANGE_LINK BIT(7)
+
+struct rtw89_pci_rpp_fmt_v1 {
+ __le32 w0;
+ __le32 w1;
+} __packed;
+
struct rtw89_pci_rx_bd_32 {
__le16 buf_size;
__le16 opt;
@@ -1468,6 +1549,12 @@ struct rtw89_pci_dma_ring {
u32 rp; /* hw idx */
};
+struct rtw89_pci_dma_pool {
+ void *head;
+ dma_addr_t dma;
+ u32 size;
+};
+
struct rtw89_pci_tx_wd_ring {
void *head;
dma_addr_t dma;
@@ -1497,6 +1584,11 @@ struct rtw89_pci_tx_ring {
u64 tx_mac_id_drop;
};
+struct rtw89_pci_tx_rings {
+ struct rtw89_pci_tx_ring rings[RTW89_TXCH_NUM];
+ struct rtw89_pci_dma_pool bd_pool;
+};
+
struct rtw89_pci_rx_ring {
struct rtw89_pci_dma_ring bd_ring;
struct sk_buff *buf[RTW89_PCI_RXBD_NUM_MAX];
@@ -1506,6 +1598,11 @@ struct rtw89_pci_rx_ring {
u32 target_rx_tag:13;
};
+struct rtw89_pci_rx_rings {
+ struct rtw89_pci_rx_ring rings[RTW89_RXCH_NUM];
+ struct rtw89_pci_dma_pool bd_pool;
+};
+
struct rtw89_pci_isrs {
u32 ind_isrs;
u32 halt_c2h_isrs;
@@ -1523,8 +1620,8 @@ struct rtw89_pci {
bool low_power;
bool under_recovery;
bool enable_dac;
- struct rtw89_pci_tx_ring tx_rings[RTW89_TXCH_NUM];
- struct rtw89_pci_rx_ring rx_rings[RTW89_RXCH_NUM];
+ struct rtw89_pci_tx_rings tx;
+ struct rtw89_pci_rx_rings rx;
struct sk_buff_head h2c_queue;
struct sk_buff_head h2c_release_queue;
DECLARE_BITMAP(kick_map, RTW89_TXCH_NUM);
@@ -1537,10 +1634,7 @@ struct rtw89_pci {
static inline struct rtw89_pci_rx_info *RTW89_PCI_RX_SKB_CB(struct sk_buff *skb)
{
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-
- BUILD_BUG_ON(sizeof(struct rtw89_pci_tx_data) >
- sizeof(info->status.status_driver_data));
+ BUILD_BUG_ON(sizeof(struct rtw89_pci_rx_info) > sizeof(skb->cb));
return (struct rtw89_pci_rx_info *)skb->cb;
}
@@ -1571,6 +1665,10 @@ static inline struct rtw89_pci_tx_data *RTW89_PCI_TX_SKB_CB(struct sk_buff *skb)
{
struct rtw89_tx_skb_data *data = RTW89_TX_SKB_CB(skb);
+ BUILD_BUG_ON(sizeof(struct rtw89_tx_skb_data) +
+ sizeof(struct rtw89_pci_tx_data) >
+ sizeof_field(struct ieee80211_tx_info, driver_data));
+
return (struct rtw89_pci_tx_data *)data->hci_priv;
}
@@ -1626,8 +1724,12 @@ extern const struct pci_error_handlers rtw89_pci_err_handler;
extern const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set;
extern const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_v1;
extern const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_be;
+extern const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_be_v1;
extern const struct rtw89_pci_bd_ram rtw89_bd_ram_table_dual[RTW89_TXCH_NUM];
extern const struct rtw89_pci_bd_ram rtw89_bd_ram_table_single[RTW89_TXCH_NUM];
+extern const struct rtw89_pci_isr_def rtw89_pci_isr_ax;
+extern const struct rtw89_pci_isr_def rtw89_pci_isr_be;
+extern const struct rtw89_pci_isr_def rtw89_pci_isr_be_v1;
extern const struct rtw89_pci_gen_def rtw89_pci_gen_ax;
extern const struct rtw89_pci_gen_def rtw89_pci_gen_be;
@@ -1646,16 +1748,23 @@ u32 rtw89_pci_fill_txaddr_info(struct rtw89_dev *rtwdev,
u32 rtw89_pci_fill_txaddr_info_v1(struct rtw89_dev *rtwdev,
void *txaddr_info_addr, u32 total_len,
dma_addr_t dma, u8 *add_info_nr);
+void rtw89_pci_parse_rpp(struct rtw89_dev *rtwdev, void *_rpp,
+ struct rtw89_pci_rpp_info *rpp_info);
+void rtw89_pci_parse_rpp_v1(struct rtw89_dev *rtwdev, void *_rpp,
+ struct rtw89_pci_rpp_info *rpp_info);
void rtw89_pci_ctrl_dma_all(struct rtw89_dev *rtwdev, bool enable);
void rtw89_pci_config_intr_mask(struct rtw89_dev *rtwdev);
void rtw89_pci_config_intr_mask_v1(struct rtw89_dev *rtwdev);
void rtw89_pci_config_intr_mask_v2(struct rtw89_dev *rtwdev);
+void rtw89_pci_config_intr_mask_v3(struct rtw89_dev *rtwdev);
void rtw89_pci_enable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci);
void rtw89_pci_disable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci);
void rtw89_pci_enable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci);
void rtw89_pci_disable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci);
void rtw89_pci_enable_intr_v2(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci);
void rtw89_pci_disable_intr_v2(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci);
+void rtw89_pci_enable_intr_v3(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci);
+void rtw89_pci_disable_intr_v3(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci);
void rtw89_pci_recognize_intrs(struct rtw89_dev *rtwdev,
struct rtw89_pci *rtwpci,
struct rtw89_pci_isrs *isrs);
@@ -1665,6 +1774,9 @@ void rtw89_pci_recognize_intrs_v1(struct rtw89_dev *rtwdev,
void rtw89_pci_recognize_intrs_v2(struct rtw89_dev *rtwdev,
struct rtw89_pci *rtwpci,
struct rtw89_pci_isrs *isrs);
+void rtw89_pci_recognize_intrs_v3(struct rtw89_dev *rtwdev,
+ struct rtw89_pci *rtwpci,
+ struct rtw89_pci_isrs *isrs);
static inline
u32 rtw89_chip_fill_txaddr_info(struct rtw89_dev *rtwdev,
diff --git a/drivers/net/wireless/realtek/rtw89/pci_be.c b/drivers/net/wireless/realtek/rtw89/pci_be.c
index 12e6a0cbb889..e4590879b800 100644
--- a/drivers/net/wireless/realtek/rtw89/pci_be.c
+++ b/drivers/net/wireless/realtek/rtw89/pci_be.c
@@ -175,10 +175,10 @@ static void rtw89_pci_clr_idx_all_be(struct rtw89_dev *rtwdev)
rtw89_write32(rtwdev, R_BE_RXBD_RWPTR_CLR1_V1,
B_BE_CLR_RXQ0_IDX | B_BE_CLR_RPQ0_IDX);
- rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RXQ];
+ rx_ring = &rtwpci->rx.rings[RTW89_RXCH_RXQ];
rtw89_write16(rtwdev, R_BE_RXQ0_RXBD_IDX_V1, rx_ring->bd_ring.len - 1);
- rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ];
+ rx_ring = &rtwpci->rx.rings[RTW89_RXCH_RPQ];
rtw89_write16(rtwdev, R_BE_RPQ0_RXBD_IDX_V1, rx_ring->bd_ring.len - 1);
}
@@ -665,13 +665,25 @@ static int __maybe_unused rtw89_pci_resume_be(struct device *dev)
SIMPLE_DEV_PM_OPS(rtw89_pm_ops_be, rtw89_pci_suspend_be, rtw89_pci_resume_be);
EXPORT_SYMBOL(rtw89_pm_ops_be);
-const struct rtw89_pci_gen_def rtw89_pci_gen_be = {
+const struct rtw89_pci_isr_def rtw89_pci_isr_be = {
.isr_rdu = B_BE_RDU_CH1_INT_V1 | B_BE_RDU_CH0_INT_V1,
.isr_halt_c2h = B_BE_HALT_C2H_INT,
.isr_wdt_timeout = B_BE_WDT_TIMEOUT_INT,
.isr_clear_rpq = {R_BE_PCIE_DMA_ISR, B_BE_PCIE_RX_RPQ0_ISR_V1},
.isr_clear_rxq = {R_BE_PCIE_DMA_ISR, B_BE_PCIE_RX_RX0P2_ISR_V1},
+};
+EXPORT_SYMBOL(rtw89_pci_isr_be);
+const struct rtw89_pci_isr_def rtw89_pci_isr_be_v1 = {
+ .isr_rdu = B_BE_PCIE_RDU_CH1_INT | B_BE_PCIE_RDU_CH0_INT,
+ .isr_halt_c2h = B_BE_HALT_C2H_INT,
+ .isr_wdt_timeout = B_BE_WDT_TIMEOUT_INT,
+ .isr_clear_rpq = {R_BE_PCIE_DMA_ISR, B_BE_PCIE_RX_RPQ0_ISR_V1},
+ .isr_clear_rxq = {R_BE_PCIE_DMA_ISR, B_BE_PCIE_RX_RX0P2_ISR_V1},
+};
+EXPORT_SYMBOL(rtw89_pci_isr_be_v1);
+
+const struct rtw89_pci_gen_def rtw89_pci_gen_be = {
.mac_pre_init = rtw89_pci_ops_mac_pre_init_be,
.mac_pre_deinit = rtw89_pci_ops_mac_pre_deinit_be,
.mac_post_init = rtw89_pci_ops_mac_post_init_be,
diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c
index d607577b353c..ba7feadd7582 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.c
+++ b/drivers/net/wireless/realtek/rtw89/phy.c
@@ -1702,6 +1702,91 @@ void rtw89_phy_init_bb_reg(struct rtw89_dev *rtwdev)
rtw89_phy_bb_reset(rtwdev);
}
+void rtw89_phy_init_bb_afe(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
+ const struct rtw89_fw_element_hdr *afe_elm = elm_info->afe;
+ const struct rtw89_phy_afe_info *info;
+ u32 action, cat, class;
+ u32 addr, mask, val;
+ u32 poll, rpt;
+ u32 n, i;
+
+ if (!afe_elm)
+ return;
+
+ n = le32_to_cpu(afe_elm->size) / sizeof(*info);
+
+ for (i = 0; i < n; i++) {
+ info = &afe_elm->u.afe.infos[i];
+
+ class = le32_to_cpu(info->class);
+ switch (class) {
+ case RTW89_FW_AFE_CLASS_P0:
+ case RTW89_FW_AFE_CLASS_P1:
+ case RTW89_FW_AFE_CLASS_CMN:
+ /* Currently support two paths */
+ break;
+ case RTW89_FW_AFE_CLASS_P2:
+ case RTW89_FW_AFE_CLASS_P3:
+ case RTW89_FW_AFE_CLASS_P4:
+ default:
+ rtw89_warn(rtwdev, "unexpected AFE class %u\n", class);
+ continue;
+ }
+
+ addr = le32_to_cpu(info->addr);
+ mask = le32_to_cpu(info->mask);
+ val = le32_to_cpu(info->val);
+ cat = le32_to_cpu(info->cat);
+ action = le32_to_cpu(info->action);
+
+ switch (action) {
+ case RTW89_FW_AFE_ACTION_WRITE:
+ switch (cat) {
+ case RTW89_FW_AFE_CAT_MAC:
+ case RTW89_FW_AFE_CAT_MAC1:
+ rtw89_write32_mask(rtwdev, addr, mask, val);
+ break;
+ case RTW89_FW_AFE_CAT_AFEDIG:
+ case RTW89_FW_AFE_CAT_AFEDIG1:
+ rtw89_write32_mask(rtwdev, addr, mask, val);
+ break;
+ case RTW89_FW_AFE_CAT_BB:
+ rtw89_phy_write32_idx(rtwdev, addr, mask, val, RTW89_PHY_0);
+ break;
+ case RTW89_FW_AFE_CAT_BB1:
+ rtw89_phy_write32_idx(rtwdev, addr, mask, val, RTW89_PHY_1);
+ break;
+ default:
+ rtw89_warn(rtwdev,
+ "unexpected AFE writing action %u\n", action);
+ break;
+ }
+ break;
+ case RTW89_FW_AFE_ACTION_POLL:
+ for (poll = 0; poll <= 10; poll++) {
+ /*
+ * For CAT_BB, AFE reads register with mcu_offset 0,
+ * so both CAT_MAC and CAT_BB use the same method.
+ */
+ rpt = rtw89_read32_mask(rtwdev, addr, mask);
+ if (rpt == val)
+ goto poll_done;
+
+ fsleep(1);
+ }
+ rtw89_warn(rtwdev, "failed to poll AFE cat=%u addr=0x%x mask=0x%x\n",
+ cat, addr, mask);
+poll_done:
+ break;
+ case RTW89_FW_AFE_ACTION_DELAY:
+ fsleep(addr);
+ break;
+ }
+ }
+}
+
static u32 rtw89_phy_nctl_poll(struct rtw89_dev *rtwdev)
{
rtw89_phy_write32(rtwdev, 0x8080, 0x4);
@@ -2943,7 +3028,7 @@ static void __rtw89_phy_c2h_ra_rpt_iter(struct rtw89_sta_link *rtwsta_link,
}
if (mode == RTW89_RA_RPT_MODE_LEGACY) {
- valid = rtw89_ra_report_to_bitrate(rtwdev, rate, &legacy_bitrate);
+ valid = rtw89_legacy_rate_to_bitrate(rtwdev, rate, &legacy_bitrate);
if (!valid)
return;
}
@@ -3087,6 +3172,34 @@ void (* const rtw89_phy_c2h_dm_handler[])(struct rtw89_dev *rtwdev,
[RTW89_PHY_C2H_DM_FUNC_FW_SCAN] = rtw89_phy_c2h_fw_scan_rpt,
};
+static
+void rtw89_phy_c2h_rfk_tas_pwr(struct rtw89_dev *rtwdev,
+ const struct rtw89_c2h_rf_tas_rpt_log *content)
+{
+ const enum rtw89_sar_sources src = rtwdev->sar.src;
+ struct rtw89_tas_info *tas = &rtwdev->tas;
+ u64 linear = 0;
+ u32 i, cur_idx;
+ s16 txpwr;
+
+ if (!tas->enable || src == RTW89_SAR_SOURCE_NONE)
+ return;
+
+ cur_idx = le32_to_cpu(content->cur_idx);
+ for (i = 0; i < cur_idx; i++) {
+ txpwr = le16_to_cpu(content->txpwr_history[i]);
+ linear += rtw89_db_quarter_to_linear(txpwr);
+
+ rtw89_debug(rtwdev, RTW89_DBG_SAR,
+ "tas: index: %u, txpwr: %d\n", i, txpwr);
+ }
+
+ if (cur_idx == 0)
+ tas->instant_txpwr = rtw89_db_to_linear(0);
+ else
+ tas->instant_txpwr = DIV_ROUND_DOWN_ULL(linear, cur_idx);
+}
+
static void rtw89_phy_c2h_rfk_rpt_log(struct rtw89_dev *rtwdev,
enum rtw89_phy_c2h_rfk_log_func func,
void *content, u16 len)
@@ -3338,6 +3451,13 @@ static void rtw89_phy_c2h_rfk_rpt_log(struct rtw89_dev *rtwdev,
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt power_d[1] = %*ph\n",
(int)sizeof(txgapk->power_d[1]), txgapk->power_d[1]);
return;
+ case RTW89_PHY_C2H_RFK_LOG_FUNC_TAS_PWR:
+ if (len != sizeof(struct rtw89_c2h_rf_tas_rpt_log))
+ goto out;
+
+ rtw89_phy_c2h_rfk_tas_pwr(rtwdev, content);
+
+ return;
default:
break;
}
@@ -3390,9 +3510,6 @@ static void rtw89_phy_c2h_rfk_log(struct rtw89_dev *rtwdev, struct sk_buff *c2h,
u16 chunk_len;
bool handled;
- if (!rtw89_debug_is_enabled(rtwdev, RTW89_DBG_RFK))
- return;
-
log_ptr += sizeof(*c2h_hdr);
len -= sizeof(*c2h_hdr);
@@ -3469,6 +3586,13 @@ rtw89_phy_c2h_rfk_log_txgapk(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32
RTW89_PHY_C2H_RFK_LOG_FUNC_TXGAPK, "TXGAPK");
}
+static void
+rtw89_phy_c2h_rfk_log_tas_pwr(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
+{
+ rtw89_phy_c2h_rfk_log(rtwdev, c2h, len,
+ RTW89_PHY_C2H_RFK_LOG_FUNC_TAS_PWR, "TAS");
+}
+
static
void (* const rtw89_phy_c2h_rfk_log_handler[])(struct rtw89_dev *rtwdev,
struct sk_buff *c2h, u32 len) = {
@@ -3478,6 +3602,7 @@ void (* const rtw89_phy_c2h_rfk_log_handler[])(struct rtw89_dev *rtwdev,
[RTW89_PHY_C2H_RFK_LOG_FUNC_RXDCK] = rtw89_phy_c2h_rfk_log_rxdck,
[RTW89_PHY_C2H_RFK_LOG_FUNC_TSSI] = rtw89_phy_c2h_rfk_log_tssi,
[RTW89_PHY_C2H_RFK_LOG_FUNC_TXGAPK] = rtw89_phy_c2h_rfk_log_txgapk,
+ [RTW89_PHY_C2H_RFK_LOG_FUNC_TAS_PWR] = rtw89_phy_c2h_rfk_log_tas_pwr,
};
static
@@ -3540,39 +3665,19 @@ rtw89_phy_c2h_rfk_report_state(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u3
}
static void
-rtw89_phy_c2h_rfk_log_tas_pwr(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
+rtw89_phy_c2h_rfk_report_tas_pwr(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
{
- const struct rtw89_c2h_rf_tas_info *rf_tas =
+ const struct rtw89_c2h_rf_tas_info *report =
(const struct rtw89_c2h_rf_tas_info *)c2h->data;
- const enum rtw89_sar_sources src = rtwdev->sar.src;
- struct rtw89_tas_info *tas = &rtwdev->tas;
- u64 linear = 0;
- u32 i, cur_idx;
- s16 txpwr;
-
- if (!tas->enable || src == RTW89_SAR_SOURCE_NONE)
- return;
-
- cur_idx = le32_to_cpu(rf_tas->cur_idx);
- for (i = 0; i < cur_idx; i++) {
- txpwr = (s16)le16_to_cpu(rf_tas->txpwr_history[i]);
- linear += rtw89_db_quarter_to_linear(txpwr);
-
- rtw89_debug(rtwdev, RTW89_DBG_SAR,
- "tas: index: %u, txpwr: %d\n", i, txpwr);
- }
- if (cur_idx == 0)
- tas->instant_txpwr = rtw89_db_to_linear(0);
- else
- tas->instant_txpwr = DIV_ROUND_DOWN_ULL(linear, cur_idx);
+ rtw89_phy_c2h_rfk_tas_pwr(rtwdev, &report->content);
}
static
void (* const rtw89_phy_c2h_rfk_report_handler[])(struct rtw89_dev *rtwdev,
struct sk_buff *c2h, u32 len) = {
[RTW89_PHY_C2H_RFK_REPORT_FUNC_STATE] = rtw89_phy_c2h_rfk_report_state,
- [RTW89_PHY_C2H_RFK_LOG_TAS_PWR] = rtw89_phy_c2h_rfk_log_tas_pwr,
+ [RTW89_PHY_C2H_RFK_REPORT_FUNC_TAS_PWR] = rtw89_phy_c2h_rfk_report_tas_pwr,
};
bool rtw89_phy_c2h_chk_atomic(struct rtw89_dev *rtwdev, u8 class, u8 func)
@@ -3626,12 +3731,11 @@ void rtw89_phy_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
handler = rtw89_phy_c2h_dm_handler[func];
break;
default:
- rtw89_info(rtwdev, "PHY c2h class %d not support\n", class);
- return;
+ break;
}
if (!handler) {
- rtw89_info(rtwdev, "PHY c2h class %d func %d not support\n", class,
- func);
+ rtw89_info_once(rtwdev, "PHY c2h class %d func %d not support\n",
+ class, func);
return;
}
handler(rtwdev, skb, len);
@@ -5497,6 +5601,34 @@ static void rtw89_phy_ifs_clm_set_th_reg(struct rtw89_dev *rtwdev,
i + 1, env->ifs_clm_th_l[i], env->ifs_clm_th_h[i]);
}
+static void __rtw89_phy_nhm_setting_init(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb)
+{
+ const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
+ struct rtw89_env_monitor_info *env = &bb->env_monitor;
+ const struct rtw89_ccx_regs *ccx = phy->ccx;
+
+ env->nhm_include_cca = false;
+ env->nhm_mntr_time = 0;
+ env->nhm_sum = 0;
+
+ rtw89_phy_write32_idx_set(rtwdev, ccx->nhm_config, ccx->nhm_en_mask, bb->phy_idx);
+ rtw89_phy_write32_idx_set(rtwdev, ccx->nhm_method, ccx->nhm_pwr_method_msk,
+ bb->phy_idx);
+}
+
+void rtw89_phy_nhm_setting_init(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_bb_ctx *bb;
+
+ if (!chip->support_noise)
+ return;
+
+ rtw89_for_each_active_bb(rtwdev, bb)
+ __rtw89_phy_nhm_setting_init(rtwdev, bb);
+}
+
static void rtw89_phy_ifs_clm_setting_init(struct rtw89_dev *rtwdev,
struct rtw89_bb_ctx *bb)
{
@@ -5558,7 +5690,7 @@ static int rtw89_phy_ccx_racing_ctrl(struct rtw89_dev *rtwdev,
}
static void rtw89_phy_ccx_trigger(struct rtw89_dev *rtwdev,
- struct rtw89_bb_ctx *bb)
+ struct rtw89_bb_ctx *bb, u8 sel)
{
const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
struct rtw89_env_monitor_info *env = &bb->env_monitor;
@@ -5568,10 +5700,17 @@ static void rtw89_phy_ccx_trigger(struct rtw89_dev *rtwdev,
bb->phy_idx);
rtw89_phy_write32_idx(rtwdev, ccx->setting_addr, ccx->measurement_trig_mask, 0,
bb->phy_idx);
+ if (sel & RTW89_PHY_ENV_MON_NHM)
+ rtw89_phy_write32_idx_clr(rtwdev, ccx->nhm_config,
+ ccx->nhm_en_mask, bb->phy_idx);
+
rtw89_phy_write32_idx(rtwdev, ccx->ifs_cnt_addr, ccx->ifs_clm_cnt_clear_mask, 1,
bb->phy_idx);
rtw89_phy_write32_idx(rtwdev, ccx->setting_addr, ccx->measurement_trig_mask, 1,
bb->phy_idx);
+ if (sel & RTW89_PHY_ENV_MON_NHM)
+ rtw89_phy_write32_idx_set(rtwdev, ccx->nhm_config,
+ ccx->nhm_en_mask, bb->phy_idx);
env->ccx_ongoing = true;
}
@@ -5642,6 +5781,125 @@ static void rtw89_phy_ifs_clm_get_utility(struct rtw89_dev *rtwdev,
env->ifs_clm_cca_avg[i]);
}
+static u8 rtw89_nhm_weighted_avg(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb)
+{
+ struct rtw89_env_monitor_info *env = &bb->env_monitor;
+ u8 nhm_weight[RTW89_NHM_RPT_NUM];
+ u32 nhm_weighted_sum = 0;
+ u8 weight_zero;
+ u8 i;
+
+ if (env->nhm_sum == 0)
+ return 0;
+
+ weight_zero = clamp_t(u16, env->nhm_th[0] - RTW89_NHM_WEIGHT_OFFSET, 0, U8_MAX);
+
+ for (i = 0; i < RTW89_NHM_RPT_NUM; i++) {
+ if (i == 0)
+ nhm_weight[i] = weight_zero;
+ else if (i == (RTW89_NHM_RPT_NUM - 1))
+ nhm_weight[i] = env->nhm_th[i - 1] + RTW89_NHM_WEIGHT_OFFSET;
+ else
+ nhm_weight[i] = (env->nhm_th[i - 1] + env->nhm_th[i]) / 2;
+ }
+
+ if (rtwdev->chip->chip_id == RTL8852A || rtwdev->chip->chip_id == RTL8852B ||
+ rtwdev->chip->chip_id == RTL8852C) {
+ if (env->nhm_th[RTW89_NHM_TH_NUM - 1] == RTW89_NHM_WA_TH) {
+ nhm_weight[RTW89_NHM_RPT_NUM - 1] =
+ env->nhm_th[RTW89_NHM_TH_NUM - 2] +
+ RTW89_NHM_WEIGHT_OFFSET;
+ nhm_weight[RTW89_NHM_RPT_NUM - 2] =
+ nhm_weight[RTW89_NHM_RPT_NUM - 1];
+ }
+
+ env->nhm_result[0] += env->nhm_result[RTW89_NHM_RPT_NUM - 1];
+ env->nhm_result[RTW89_NHM_RPT_NUM - 1] = 0;
+ }
+
+ for (i = 0; i < RTW89_NHM_RPT_NUM; i++)
+ nhm_weighted_sum += env->nhm_result[i] * nhm_weight[i];
+
+ return (nhm_weighted_sum / env->nhm_sum) >> RTW89_NHM_TH_FACTOR;
+}
+
+static void __rtw89_phy_nhm_get_result(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb, enum rtw89_band hw_band,
+ u16 ch_hw_value)
+{
+ const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
+ struct rtw89_env_monitor_info *env = &bb->env_monitor;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_ccx_regs *ccx = phy->ccx;
+ struct ieee80211_supported_band *sband;
+ const struct rtw89_reg_def *nhm_rpt;
+ enum nl80211_band band;
+ u32 sum = 0;
+ u8 chan_idx;
+ u8 nhm_pwr;
+ u8 i;
+
+ if (!rtw89_phy_read32_idx(rtwdev, ccx->nhm, ccx->nhm_ready, bb->phy_idx)) {
+ rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "[NHM] Get NHM report Fail\n");
+ return;
+ }
+
+ for (i = 0; i < RTW89_NHM_RPT_NUM; i++) {
+ nhm_rpt = &(*chip->nhm_report)[i];
+
+ env->nhm_result[i] =
+ rtw89_phy_read32_idx(rtwdev, nhm_rpt->addr,
+ nhm_rpt->mask, bb->phy_idx);
+ sum += env->nhm_result[i];
+ }
+ env->nhm_sum = sum;
+ nhm_pwr = rtw89_nhm_weighted_avg(rtwdev, bb);
+
+ if (!ch_hw_value)
+ return;
+
+ band = rtw89_hw_to_nl80211_band(hw_band);
+ sband = rtwdev->hw->wiphy->bands[band];
+ if (!sband)
+ return;
+
+ for (chan_idx = 0; chan_idx < sband->n_channels; chan_idx++) {
+ struct ieee80211_channel *channel;
+ struct rtw89_nhm_report *rpt;
+ struct list_head *nhm_list;
+
+ channel = &sband->channels[chan_idx];
+ if (channel->hw_value != ch_hw_value)
+ continue;
+
+ rpt = &env->nhm_his[hw_band][chan_idx];
+ nhm_list = &env->nhm_rpt_list;
+
+ rpt->channel = channel;
+ rpt->noise = nhm_pwr;
+
+ if (list_empty(&rpt->list))
+ list_add_tail(&rpt->list, nhm_list);
+
+ return;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "[NHM] channel not found\n");
+}
+
+void rtw89_phy_nhm_get_result(struct rtw89_dev *rtwdev, enum rtw89_band hw_band,
+ u16 ch_hw_value)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_bb_ctx *bb;
+
+ if (!chip->support_noise)
+ return;
+
+ rtw89_for_each_active_bb(rtwdev, bb)
+ __rtw89_phy_nhm_get_result(rtwdev, bb, hw_band, ch_hw_value);
+}
+
static bool rtw89_phy_ifs_clm_get_result(struct rtw89_dev *rtwdev,
struct rtw89_bb_ctx *bb)
{
@@ -5742,6 +6000,107 @@ static bool rtw89_phy_ifs_clm_get_result(struct rtw89_dev *rtwdev,
return true;
}
+static void rtw89_phy_nhm_th_update(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb)
+{
+ struct rtw89_env_monitor_info *env = &bb->env_monitor;
+ static const u8 nhm_th_11k[RTW89_NHM_RPT_NUM] = {
+ 18, 21, 24, 27, 30, 35, 40, 45, 50, 55, 60, 0
+ };
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_reg_def *nhm_th;
+ u8 i;
+
+ for (i = 0; i < RTW89_NHM_RPT_NUM; i++)
+ env->nhm_th[i] = nhm_th_11k[i] << RTW89_NHM_TH_FACTOR;
+
+ if (chip->chip_id == RTL8852A || chip->chip_id == RTL8852B ||
+ chip->chip_id == RTL8852C)
+ env->nhm_th[RTW89_NHM_TH_NUM - 1] = RTW89_NHM_WA_TH;
+
+ for (i = 0; i < RTW89_NHM_TH_NUM; i++) {
+ nhm_th = &(*chip->nhm_th)[i];
+
+ rtw89_phy_write32_idx(rtwdev, nhm_th->addr, nhm_th->mask,
+ env->nhm_th[i], bb->phy_idx);
+ }
+}
+
+static int rtw89_phy_nhm_set(struct rtw89_dev *rtwdev,
+ struct rtw89_bb_ctx *bb,
+ struct rtw89_ccx_para_info *para)
+{
+ const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
+ struct rtw89_env_monitor_info *env = &bb->env_monitor;
+ const struct rtw89_ccx_regs *ccx = phy->ccx;
+ u32 unit_idx = 0;
+ u32 period = 0;
+
+ if (para->mntr_time == 0) {
+ rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
+ "[NHM] MNTR_TIME is 0\n");
+ return -EINVAL;
+ }
+
+ if (rtw89_phy_ccx_racing_ctrl(rtwdev, bb, para->rac_lv))
+ return -EINVAL;
+
+ rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
+ "[NHM]nhm_incld_cca=%d, mntr_time=%d ms\n",
+ para->nhm_incld_cca, para->mntr_time);
+
+ if (para->mntr_time != env->nhm_mntr_time) {
+ rtw89_phy_ccx_ms_to_period_unit(rtwdev, para->mntr_time,
+ &period, &unit_idx);
+ rtw89_phy_write32_idx(rtwdev, ccx->nhm_config,
+ ccx->nhm_period_mask, period, bb->phy_idx);
+ rtw89_phy_write32_idx(rtwdev, ccx->nhm_config,
+ ccx->nhm_unit_mask, period, bb->phy_idx);
+
+ env->nhm_mntr_time = para->mntr_time;
+ env->ccx_period = period;
+ env->ccx_unit_idx = unit_idx;
+ }
+
+ if (para->nhm_incld_cca != env->nhm_include_cca) {
+ rtw89_phy_write32_idx(rtwdev, ccx->nhm_config,
+ ccx->nhm_include_cca_mask, para->nhm_incld_cca,
+ bb->phy_idx);
+
+ env->nhm_include_cca = para->nhm_incld_cca;
+ }
+
+ rtw89_phy_nhm_th_update(rtwdev, bb);
+
+ return 0;
+}
+
+static void __rtw89_phy_nhm_trigger(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb)
+{
+ struct rtw89_ccx_para_info para = {
+ .mntr_time = RTW89_NHM_MNTR_TIME,
+ .rac_lv = RTW89_RAC_LV_1,
+ .nhm_incld_cca = true,
+ };
+
+ rtw89_phy_ccx_racing_release(rtwdev, bb);
+
+ rtw89_phy_nhm_set(rtwdev, bb, &para);
+ rtw89_phy_ccx_trigger(rtwdev, bb, RTW89_PHY_ENV_MON_NHM);
+}
+
+void rtw89_phy_nhm_trigger(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_bb_ctx *bb;
+
+ if (!chip->support_noise)
+ return;
+
+ rtw89_for_each_active_bb(rtwdev, bb)
+ __rtw89_phy_nhm_trigger(rtwdev, bb);
+}
+
static int rtw89_phy_ifs_clm_set(struct rtw89_dev *rtwdev,
struct rtw89_bb_ctx *bb,
struct rtw89_ccx_para_info *para)
@@ -5816,7 +6175,7 @@ static void __rtw89_phy_env_monitor_track(struct rtw89_dev *rtwdev,
if (rtw89_phy_ifs_clm_set(rtwdev, bb, &para) == 0)
chk_result |= RTW89_PHY_ENV_MON_IFS_CLM;
if (chk_result)
- rtw89_phy_ccx_trigger(rtwdev, bb);
+ rtw89_phy_ccx_trigger(rtwdev, bb, chk_result);
rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
"get_result=0x%x, chk_result:0x%x\n",
@@ -5930,8 +6289,6 @@ static void __rtw89_physts_parsing_init(struct rtw89_dev *rtwdev,
val |= BIT(RTW89_PHYSTS_IE13_DL_MU_DEF) |
BIT(RTW89_PHYSTS_IE01_CMN_OFDM);
} else if (i >= RTW89_CCK_PKT) {
- val |= BIT(RTW89_PHYSTS_IE09_FTR_0);
-
val &= ~(GENMASK(RTW89_PHYSTS_IE07_CMN_EXT_PATH_D,
RTW89_PHYSTS_IE04_CMN_EXT_PATH_A));
@@ -6910,6 +7267,7 @@ void rtw89_phy_dm_init(struct rtw89_dev *rtwdev)
rtw89_chip_bb_sethw(rtwdev);
rtw89_phy_env_monitor_init(rtwdev);
+ rtw89_phy_nhm_setting_init(rtwdev);
rtw89_physts_parsing_init(rtwdev);
rtw89_phy_dig_init(rtwdev);
rtw89_phy_cfo_init(rtwdev);
@@ -6935,6 +7293,43 @@ void rtw89_phy_dm_reinit(struct rtw89_dev *rtwdev)
rtw89_physts_parsing_init(rtwdev);
}
+static void __rtw89_phy_dm_init_data(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb)
+{
+ struct rtw89_env_monitor_info *env = &bb->env_monitor;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct ieee80211_supported_band *sband;
+ enum rtw89_band hw_band;
+ enum nl80211_band band;
+ u8 idx;
+
+ if (!chip->support_noise)
+ return;
+
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+ sband = rtwdev->hw->wiphy->bands[band];
+ if (!sband)
+ continue;
+
+ hw_band = rtw89_nl80211_to_hw_band(band);
+ env->nhm_his[hw_band] =
+ devm_kcalloc(rtwdev->dev, sband->n_channels,
+ sizeof(*env->nhm_his[0]), GFP_KERNEL);
+
+ for (idx = 0; idx < sband->n_channels; idx++)
+ INIT_LIST_HEAD(&env->nhm_his[hw_band][idx].list);
+
+ INIT_LIST_HEAD(&env->nhm_rpt_list);
+ }
+}
+
+void rtw89_phy_dm_init_data(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_bb_ctx *bb;
+
+ rtw89_for_each_capab_bb(rtwdev, bb)
+ __rtw89_phy_dm_init_data(rtwdev, bb);
+}
+
void rtw89_phy_set_bss_color(struct rtw89_dev *rtwdev,
struct rtw89_vif_link *rtwvif_link)
{
@@ -7590,6 +7985,15 @@ static const struct rtw89_ccx_regs rtw89_ccx_regs_ax = {
.ifs_total_addr = R_IFSCNT,
.ifs_cnt_done_mask = B_IFSCNT_DONE_MSK,
.ifs_total_mask = B_IFSCNT_TOTAL_CNT_MSK,
+ .nhm = R_NHM_AX,
+ .nhm_ready = B_NHM_READY_MSK,
+ .nhm_config = R_NHM_CFG,
+ .nhm_period_mask = B_NHM_PERIOD_MSK,
+ .nhm_unit_mask = B_NHM_COUNTER_MSK,
+ .nhm_include_cca_mask = B_NHM_INCLUDE_CCA_MSK,
+ .nhm_en_mask = B_NHM_EN_MSK,
+ .nhm_method = R_NHM_TH9,
+ .nhm_pwr_method_msk = B_NHM_PWDB_METHOD_MSK,
};
static const struct rtw89_physts_regs rtw89_physts_regs_ax = {
diff --git a/drivers/net/wireless/realtek/rtw89/phy.h b/drivers/net/wireless/realtek/rtw89/phy.h
index dc156376d951..9caacffd0af8 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.h
+++ b/drivers/net/wireless/realtek/rtw89/phy.h
@@ -149,13 +149,14 @@ enum rtw89_phy_c2h_rfk_log_func {
RTW89_PHY_C2H_RFK_LOG_FUNC_RXDCK = 3,
RTW89_PHY_C2H_RFK_LOG_FUNC_TSSI = 4,
RTW89_PHY_C2H_RFK_LOG_FUNC_TXGAPK = 5,
+ RTW89_PHY_C2H_RFK_LOG_FUNC_TAS_PWR = 9,
RTW89_PHY_C2H_RFK_LOG_FUNC_NUM,
};
enum rtw89_phy_c2h_rfk_report_func {
RTW89_PHY_C2H_RFK_REPORT_FUNC_STATE = 0,
- RTW89_PHY_C2H_RFK_LOG_TAS_PWR = 6,
+ RTW89_PHY_C2H_RFK_REPORT_FUNC_TAS_PWR = 6,
};
enum rtw89_phy_c2h_dm_func {
@@ -188,6 +189,12 @@ enum rtw89_env_monitor_result_level {
RTW89_PHY_ENV_MON_EDCCA_CLM = BIT(4),
};
+#define RTW89_NHM_WEIGHT_OFFSET 2
+#define RTW89_NHM_WA_TH (109 << 1)
+#define RTW89_NOISE_DEFAULT -96
+#define RTW89_NHM_MNTR_TIME 40
+#define RTW89_NHM_TH_FACTOR 1
+
#define CCX_US_BASE_RATIO 4
enum rtw89_ccx_unit {
RTW89_CCX_4_US = 0,
@@ -428,6 +435,15 @@ struct rtw89_ccx_regs {
u32 ifs_total_addr;
u32 ifs_cnt_done_mask;
u32 ifs_total_mask;
+ u32 nhm;
+ u32 nhm_ready;
+ u32 nhm_config;
+ u32 nhm_period_mask;
+ u32 nhm_unit_mask;
+ u32 nhm_include_cca_mask;
+ u32 nhm_en_mask;
+ u32 nhm_method;
+ u32 nhm_pwr_method_msk;
};
struct rtw89_physts_regs {
@@ -814,6 +830,7 @@ bool rtw89_phy_write_rf_v1(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
bool rtw89_phy_write_rf_v2(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
u32 addr, u32 mask, u32 data);
void rtw89_phy_init_bb_reg(struct rtw89_dev *rtwdev);
+void rtw89_phy_init_bb_afe(struct rtw89_dev *rtwdev);
void rtw89_phy_init_rf_reg(struct rtw89_dev *rtwdev, bool noio);
void rtw89_phy_config_rf_reg_v1(struct rtw89_dev *rtwdev,
const struct rtw89_reg2_def *reg,
@@ -821,6 +838,7 @@ void rtw89_phy_config_rf_reg_v1(struct rtw89_dev *rtwdev,
void *extra_data);
void rtw89_phy_dm_init(struct rtw89_dev *rtwdev);
void rtw89_phy_dm_reinit(struct rtw89_dev *rtwdev);
+void rtw89_phy_dm_init_data(struct rtw89_dev *rtwdev);
void rtw89_phy_write32_idx(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
u32 data, enum rtw89_phy_idx phy_idx);
void rtw89_phy_write32_idx_set(struct rtw89_dev *rtwdev, u32 addr, u32 bits,
@@ -1038,5 +1056,9 @@ enum rtw89_rf_path rtw89_phy_get_syn_sel(struct rtw89_dev *rtwdev,
u8 rtw89_rfk_chan_lookup(struct rtw89_dev *rtwdev,
const struct rtw89_rfk_chan_desc *desc, u8 desc_nr,
const struct rtw89_chan *target_chan);
+void rtw89_phy_nhm_setting_init(struct rtw89_dev *rtwdev);
+void rtw89_phy_nhm_get_result(struct rtw89_dev *rtwdev, enum rtw89_band hw_band,
+ u16 ch_hw_value);
+void rtw89_phy_nhm_trigger(struct rtw89_dev *rtwdev);
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/phy_be.c b/drivers/net/wireless/realtek/rtw89/phy_be.c
index d321cf1fc485..3316a38a62d0 100644
--- a/drivers/net/wireless/realtek/rtw89/phy_be.c
+++ b/drivers/net/wireless/realtek/rtw89/phy_be.c
@@ -63,6 +63,15 @@ static const struct rtw89_ccx_regs rtw89_ccx_regs_be = {
.ifs_total_addr = R_IFSCNT_V1,
.ifs_cnt_done_mask = B_IFSCNT_DONE_MSK,
.ifs_total_mask = B_IFSCNT_TOTAL_CNT_MSK,
+ .nhm = R_NHM_BE,
+ .nhm_ready = B_NHM_READY_BE_MSK,
+ .nhm_config = R_NHM_CFG,
+ .nhm_period_mask = B_NHM_PERIOD_MSK,
+ .nhm_unit_mask = B_NHM_COUNTER_MSK,
+ .nhm_include_cca_mask = B_NHM_INCLUDE_CCA_MSK,
+ .nhm_en_mask = B_NHM_EN_MSK,
+ .nhm_method = R_NHM_TH9,
+ .nhm_pwr_method_msk = B_NHM_PWDB_METHOD_MSK,
};
static const struct rtw89_physts_regs rtw89_physts_regs_be = {
diff --git a/drivers/net/wireless/realtek/rtw89/ps.c b/drivers/net/wireless/realtek/rtw89/ps.c
index 652f8fc81b79..cf58121eb541 100644
--- a/drivers/net/wireless/realtek/rtw89/ps.c
+++ b/drivers/net/wireless/realtek/rtw89/ps.c
@@ -119,6 +119,9 @@ static void __rtw89_enter_lps_link(struct rtw89_dev *rtwdev,
rtw89_btc_ntfy_radio_state(rtwdev, BTC_RFCTRL_FW_CTRL);
rtw89_fw_h2c_lps_parm(rtwdev, &lps_param);
+
+ if (RTW89_CHK_FW_FEATURE(BEACON_TRACKING, &rtwdev->fw))
+ rtw89_fw_h2c_pwr_lvl(rtwdev, rtwvif_link);
}
static void __rtw89_leave_lps(struct rtw89_dev *rtwdev,
diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h
index de81103a072f..ed1d958bc49e 100644
--- a/drivers/net/wireless/realtek/rtw89/reg.h
+++ b/drivers/net/wireless/realtek/rtw89/reg.h
@@ -3370,6 +3370,10 @@
#define B_AX_CSIPRT_HESU_AID_EN BIT(25)
#define B_AX_CSIPRT_VHTSU_AID_EN BIT(24)
+#define R_AX_BCN_PSR_RPT_P0 0xCE84
+#define R_AX_BCN_PSR_RPT_P0_C1 0xEE84
+#define B_AX_BCAID_P0_MASK GENMASK(10, 0)
+
#define R_AX_RX_STATE_MONITOR 0xCEF0
#define R_AX_RX_STATE_MONITOR_C1 0xEEF0
#define B_AX_RX_STATE_MONITOR_MASK GENMASK(31, 0)
@@ -6258,6 +6262,11 @@
#define B_BE_PTCL_TOP_ERR_IND BIT(1)
#define B_BE_SCHEDULE_TOP_ERR_IND BIT(0)
+#define R_BE_CMAC_FW_TRIGGER_IDCT_ISR 0x10168
+#define R_BE_CMAC_FW_TRIGGER_IDCT_ISR_C1 0x14168
+#define B_BE_CMAC_FW_ERR_IDCT_IMR BIT(31)
+#define B_BE_CMAC_FW_TRIG_IDCT BIT(0)
+
#define R_BE_SER_L0_DBG_CNT 0x10170
#define R_BE_SER_L0_DBG_CNT_C1 0x14170
#define B_BE_SER_L0_PHYINTF_CNT_MASK GENMASK(31, 24)
@@ -7494,6 +7503,10 @@
#define R_BE_DRV_INFO_OPTION_C1 0x15470
#define B_BE_DRV_INFO_PHYRPT_EN BIT(0)
+#define R_BE_BCN_PSR_RPT_P0 0x11484
+#define R_BE_BCN_PSR_RPT_P0_C1 0x15484
+#define B_BE_BCAID_P0_MASK GENMASK(10, 0)
+
#define R_BE_RX_ERR_ISR 0x114F4
#define R_BE_RX_ERR_ISR_C1 0x154F4
#define B_BE_RX_ERR_TRIG_ACT_TO BIT(9)
@@ -8092,6 +8105,26 @@
#define B_MEASUREMENT_TRIG_MSK BIT(2)
#define B_CCX_TRIG_OPT_MSK BIT(1)
#define B_CCX_EN_MSK BIT(0)
+#define R_NHM_CFG 0x0C08
+#define B_NHM_PERIOD_MSK GENMASK(15, 0)
+#define B_NHM_COUNTER_MSK GENMASK(17, 16)
+#define B_NHM_EN_MSK BIT(18)
+#define B_NHM_INCLUDE_CCA_MSK BIT(19)
+#define B_NHM_TH0_MSK GENMASK(31, 24)
+#define R_NHM_TH1 0x0C0C
+#define B_NHM_TH1_MSK GENMASK(7, 0)
+#define B_NHM_TH2_MSK GENMASK(15, 8)
+#define B_NHM_TH3_MSK GENMASK(23, 16)
+#define B_NHM_TH4_MSK GENMASK(31, 24)
+#define R_NHM_TH5 0x0C10
+#define B_NHM_TH5_MSK GENMASK(7, 0)
+#define B_NHM_TH6_MSK GENMASK(15, 8)
+#define B_NHM_TH7_MSK GENMASK(23, 16)
+#define B_NHM_TH8_MSK GENMASK(31, 24)
+#define R_NHM_TH9 0x0C14
+#define B_NHM_TH9_MSK GENMASK(7, 0)
+#define B_NHM_TH10_MSK GENMASK(15, 8)
+#define B_NHM_PWDB_METHOD_MSK GENMASK(17, 16)
#define R_FAHM 0x0C1C
#define B_RXTD_CKEN BIT(2)
#define R_IFS_COUNTER 0x0C28
@@ -8161,6 +8194,8 @@
#define R_BRK_ASYNC_RST_EN_1 0x0DC0
#define R_BRK_ASYNC_RST_EN_2 0x0DC4
#define R_BRK_ASYNC_RST_EN_3 0x0DC8
+#define R_NHM_BE 0x0EA4
+#define B_NHM_READY_BE_MSK BIT(16)
#define R_CTLTOP 0x1008
#define B_CTLTOP_ON BIT(23)
#define B_CTLTOP_VAL GENMASK(15, 12)
@@ -8216,6 +8251,26 @@
#define B_SWSI_R_BUSY_V1 BIT(25)
#define B_SWSI_R_DATA_DONE_V1 BIT(26)
#define R_TX_COUNTER 0x1A40
+#define R_NHM_CNT0 0x1A88
+#define B_NHM_CNT0_MSK GENMASK(15, 0)
+#define B_NHM_CNT1_MSK GENMASK(31, 16)
+#define R_NHM_CNT2 0x1A8C
+#define B_NHM_CNT2_MSK GENMASK(15, 0)
+#define B_NHM_CNT3_MSK GENMASK(31, 16)
+#define R_NHM_CNT4 0x1A90
+#define B_NHM_CNT4_MSK GENMASK(15, 0)
+#define B_NHM_CNT5_MSK GENMASK(31, 16)
+#define R_NHM_CNT6 0x1A94
+#define B_NHM_CNT6_MSK GENMASK(15, 0)
+#define B_NHM_CNT7_MSK GENMASK(31, 16)
+#define R_NHM_CNT8 0x1A98
+#define B_NHM_CNT8_MSK GENMASK(15, 0)
+#define B_NHM_CNT9_MSK GENMASK(31, 16)
+#define R_NHM_CNT10 0x1A9C
+#define B_NHM_CNT10_MSK GENMASK(15, 0)
+#define B_NHM_CNT11_MSK GENMASK(31, 16)
+#define R_NHM_AX 0x1AA4
+#define B_NHM_READY_MSK BIT(16)
#define R_IFS_CLM_TX_CNT 0x1ACC
#define R_IFS_CLM_TX_CNT_V1 0x0ECC
#define B_IFS_CLM_EDCCA_EXCLUDE_CCA_FA_MSK GENMASK(31, 16)
@@ -9126,6 +9181,7 @@
#define B_COEF_SEL_MDPD BIT(8)
#define B_COEF_SEL_MDPD_V1 GENMASK(9, 8)
#define B_COEF_SEL_EN BIT(31)
+#define R_CFIR_COEF 0x810c
#define R_CFIR_SYS 0x8120
#define R_IQK_RES 0x8124
#define B_IQK_RES_K BIT(28)
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b.c b/drivers/net/wireless/realtek/rtw89/rtw8851b.c
index 393df2b0dcae..edcbda124916 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8851b.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851b.c
@@ -2537,6 +2537,7 @@ static const struct rtw89_chip_ops rtw8851b_chip_ops = {
.query_rxdesc = rtw89_core_query_rxdesc,
.fill_txdesc = rtw89_core_fill_txdesc,
.fill_txdesc_fwcmd = rtw89_core_fill_txdesc,
+ .get_ch_dma = rtw89_core_get_ch_dma,
.cfg_ctrl_path = rtw89_mac_cfg_ctrl_path,
.mac_cfg_gnt = rtw89_mac_cfg_gnt,
.stop_sch_tx = rtw89_mac_stop_sch_tx,
@@ -2628,6 +2629,7 @@ const struct rtw89_chip_info rtw8851b_chip_info = {
.support_ant_gain = false,
.support_tas = false,
.support_sar_by_ant = false,
+ .support_noise = false,
.ul_tb_waveform_ctrl = true,
.ul_tb_pwr_diff = false,
.rx_freq_frome_ie = true,
@@ -2689,6 +2691,8 @@ const struct rtw89_chip_info rtw8851b_chip_info = {
.cfo_hw_comp = true,
.dcfo_comp = &rtw8851b_dcfo_comp,
.dcfo_comp_sft = 12,
+ .nhm_report = NULL,
+ .nhm_th = NULL,
.imr_info = &rtw8851b_imr_info,
.imr_dmac_table = NULL,
.imr_cmac_table = NULL,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c
index 7a319a6c838a..84c46d2f4d85 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c
@@ -17,8 +17,9 @@
#define DPK_RF_REG_NUM_8851B 4
#define DPK_KSET_NUM 4
#define RTW8851B_RXK_GROUP_NR 4
-#define RTW8851B_RXK_GROUP_IDX_NR 2
-#define RTW8851B_TXK_GROUP_NR 1
+#define RTW8851B_RXK_GROUP_IDX_NR 4
+#define RTW8851B_A_TXK_GROUP_NR 2
+#define RTW8851B_G_TXK_GROUP_NR 1
#define RTW8851B_IQK_VER 0x14
#define RTW8851B_IQK_SS 1
#define RTW8851B_LOK_GRAM 10
@@ -114,19 +115,21 @@ static const u32 _tssi_de_mcs_10m[RF_PATH_NUM_8851B] = {0x5830};
static const u32 g_idxrxgain[RTW8851B_RXK_GROUP_NR] = {0x10e, 0x116, 0x28e, 0x296};
static const u32 g_idxattc2[RTW8851B_RXK_GROUP_NR] = {0x0, 0xf, 0x0, 0xf};
static const u32 g_idxrxagc[RTW8851B_RXK_GROUP_NR] = {0x0, 0x1, 0x2, 0x3};
-static const u32 a_idxrxgain[RTW8851B_RXK_GROUP_IDX_NR] = {0x10C, 0x28c};
-static const u32 a_idxattc2[RTW8851B_RXK_GROUP_IDX_NR] = {0xf, 0xf};
-static const u32 a_idxrxagc[RTW8851B_RXK_GROUP_IDX_NR] = {0x4, 0x6};
-static const u32 a_power_range[RTW8851B_TXK_GROUP_NR] = {0x0};
-static const u32 a_track_range[RTW8851B_TXK_GROUP_NR] = {0x6};
-static const u32 a_gain_bb[RTW8851B_TXK_GROUP_NR] = {0x0a};
-static const u32 a_itqt[RTW8851B_TXK_GROUP_NR] = {0x12};
-static const u32 g_power_range[RTW8851B_TXK_GROUP_NR] = {0x0};
-static const u32 g_track_range[RTW8851B_TXK_GROUP_NR] = {0x6};
-static const u32 g_gain_bb[RTW8851B_TXK_GROUP_NR] = {0x10};
-static const u32 g_itqt[RTW8851B_TXK_GROUP_NR] = {0x12};
-
-static const u32 rtw8851b_backup_bb_regs[] = {0xc0d4, 0xc0d8, 0xc0c4, 0xc0ec, 0xc0e8};
+static const u32 a_idxrxgain[RTW8851B_RXK_GROUP_IDX_NR] = {0x10C, 0x112, 0x28c, 0x292};
+static const u32 a_idxattc2[RTW8851B_RXK_GROUP_IDX_NR] = {0xf, 0xf, 0xf, 0xf};
+static const u32 a_idxrxagc[RTW8851B_RXK_GROUP_IDX_NR] = {0x4, 0x5, 0x6, 0x7};
+static const u32 a_power_range[RTW8851B_A_TXK_GROUP_NR] = {0x0, 0x0};
+static const u32 a_track_range[RTW8851B_A_TXK_GROUP_NR] = {0x7, 0x7};
+static const u32 a_gain_bb[RTW8851B_A_TXK_GROUP_NR] = {0x08, 0x0d};
+static const u32 a_itqt[RTW8851B_A_TXK_GROUP_NR] = {0x12, 0x12};
+static const u32 a_att_smxr[RTW8851B_A_TXK_GROUP_NR] = {0x0, 0x2};
+static const u32 g_power_range[RTW8851B_G_TXK_GROUP_NR] = {0x0};
+static const u32 g_track_range[RTW8851B_G_TXK_GROUP_NR] = {0x6};
+static const u32 g_gain_bb[RTW8851B_G_TXK_GROUP_NR] = {0x10};
+static const u32 g_itqt[RTW8851B_G_TXK_GROUP_NR] = {0x12};
+
+static const u32 rtw8851b_backup_bb_regs[] = {
+ 0xc0d4, 0xc0d8, 0xc0c4, 0xc0ec, 0xc0e8, 0x12a0, 0xc0f0};
static const u32 rtw8851b_backup_rf_regs[] = {
0xef, 0xde, 0x0, 0x1e, 0x2, 0x85, 0x90, 0x5};
@@ -139,17 +142,6 @@ static const u32 dpk_rf_reg[DPK_RF_REG_NUM_8851B] = {0xde, 0x8f, 0x5, 0x10005};
static void _set_ch(struct rtw89_dev *rtwdev, u32 val);
-static u8 _rxk_5ghz_group_from_idx(u8 idx)
-{
- /* There are four RXK groups (RTW8851B_RXK_GROUP_NR), but only group 0
- * and 2 are used in 5 GHz band, so reduce elements to 2.
- */
- if (idx < RTW8851B_RXK_GROUP_IDX_NR)
- return idx * 2;
-
- return 0;
-}
-
static u8 _kpath(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
{
return RF_A;
@@ -196,7 +188,7 @@ static void _txck_force(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
static void _rxck_force(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
bool force, enum adc_ck ck)
{
- static const u32 ck960_8851b[] = {0x8, 0x2, 0x2, 0x4, 0xf, 0xa, 0x93};
+ static const u32 ck960_8851b[] = {0x8, 0x2, 0x2, 0x4, 0xf, 0xa, 0x92};
static const u32 ck1920_8851b[] = {0x9, 0x0, 0x0, 0x3, 0xf, 0xa, 0x49};
const u32 *data;
@@ -800,7 +792,7 @@ static bool _iqk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
"[IQK]============ S%d ID_NBTXK ============\n", path);
rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x0);
rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT,
- 0x00b);
+ 0x11);
iqk_cmd = 0x408 | (1 << (4 + path));
break;
case ID_NBRXK:
@@ -818,7 +810,7 @@ static bool _iqk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, iqk_cmd + 1);
notready = _iqk_check_cal(rtwdev, path);
if (iqk_info->iqk_sram_en &&
- (ktype == ID_NBRXK || ktype == ID_RXK))
+ (ktype == ID_NBRXK || ktype == ID_RXK || ktype == ID_NBTXK))
_iqk_sram(rtwdev, path);
rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x0);
@@ -905,18 +897,27 @@ static bool _rxk_5g_group_sel(struct rtw89_dev *rtwdev,
bool kfail = false;
bool notready;
u32 rf_0;
- u8 idx;
+ u32 val;
u8 gp;
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
- for (idx = 0; idx < RTW8851B_RXK_GROUP_IDX_NR; idx++) {
- gp = _rxk_5ghz_group_from_idx(idx);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x1000);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RFREG_MASK, 0x4);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD0, RFREG_MASK, 0x17);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RFREG_MASK, 0x5);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD0, RFREG_MASK, 0x27);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x0);
+
+ val = rtw89_read_rf(rtwdev, RF_PATH_A, RR_RXA2, 0x20);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RR_MOD_MASK, 0xc);
+ for (gp = 0; gp < RTW8851B_RXK_GROUP_IDX_NR; gp++) {
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, gp = %x\n", path, gp);
- rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RR_MOD_RGM, a_idxrxgain[idx]);
- rtw89_write_rf(rtwdev, RF_PATH_A, RR_RXA2, RR_RXA2_ATT, a_idxattc2[idx]);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RR_MOD_RGM, a_idxrxgain[gp]);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_RXA2, RR_RXA2_ATT, a_idxattc2[gp]);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_RXA2, 0x20, 0x1);
rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, 0x1);
rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, 0x0);
@@ -926,7 +927,7 @@ static bool _rxk_5g_group_sel(struct rtw89_dev *rtwdev,
fsleep(100);
rf_0 = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK);
rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI, rf_0);
- rtw89_phy_write32_mask(rtwdev, R_IQK_RXA, B_IQK_RXAGC, a_idxrxagc[idx]);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_RXA, B_IQK_RXAGC, a_idxrxagc[gp]);
rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x11);
notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXAGC);
@@ -959,6 +960,7 @@ static bool _rxk_5g_group_sel(struct rtw89_dev *rtwdev,
_iqk_sram(rtwdev, path);
if (kfail) {
+ rtw89_phy_write32_mask(rtwdev, R_IQK_RES, B_IQK_RES_RXCFIR, 0x0);
rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD,
iqk_info->nb_rxcfir[path] | 0x2);
iqk_info->is_wb_txiqk[path] = false;
@@ -968,6 +970,14 @@ static bool _rxk_5g_group_sel(struct rtw89_dev *rtwdev,
iqk_info->is_wb_txiqk[path] = true;
}
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_RXA2, 0x20, val);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x1000);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RFREG_MASK, 0x4);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD0, RFREG_MASK, 0x37);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RFREG_MASK, 0x5);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD0, RFREG_MASK, 0x27);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x0);
+
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"[IQK]S%x, kfail = 0x%x, 0x8%x3c = 0x%x\n", path, kfail,
1 << path, iqk_info->nb_rxcfir[path]);
@@ -980,17 +990,26 @@ static bool _iqk_5g_nbrxk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
bool kfail = false;
bool notready;
- u8 idx = 0x1;
+ u8 gp = 2;
u32 rf_0;
- u8 gp;
-
- gp = _rxk_5ghz_group_from_idx(idx);
+ u32 val;
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, gp = %x\n", path, gp);
- rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RR_MOD_RGM, a_idxrxgain[idx]);
- rtw89_write_rf(rtwdev, RF_PATH_A, RR_RXA2, RR_RXA2_ATT, a_idxattc2[idx]);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x1000);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RFREG_MASK, 0x4);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD0, RFREG_MASK, 0x17);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RFREG_MASK, 0x5);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD0, RFREG_MASK, 0x27);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x0);
+
+ val = rtw89_read_rf(rtwdev, RF_PATH_A, RR_RXA2, 0x20);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RR_MOD_MASK, 0xc);
+
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RR_MOD_RGM, a_idxrxgain[gp]);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_RXA2, RR_RXA2_ATT, a_idxattc2[gp]);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_RXA2, 0x20, 0x1);
rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, 0x1);
rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, 0x0);
@@ -1000,7 +1019,7 @@ static bool _iqk_5g_nbrxk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
fsleep(100);
rf_0 = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK);
rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI, rf_0);
- rtw89_phy_write32_mask(rtwdev, R_IQK_RXA, B_IQK_RXAGC, a_idxrxagc[idx]);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_RXA, B_IQK_RXAGC, a_idxrxagc[gp]);
rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x11);
notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXAGC);
@@ -1026,6 +1045,7 @@ static bool _iqk_5g_nbrxk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG);
if (kfail) {
+ rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8), 0xf, 0x0);
rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
MASKDWORD, 0x40000002);
iqk_info->is_wb_rxiqk[path] = false;
@@ -1033,6 +1053,14 @@ static bool _iqk_5g_nbrxk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
iqk_info->is_wb_rxiqk[path] = false;
}
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_RXA2, 0x20, val);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x1000);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RFREG_MASK, 0x4);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD0, RFREG_MASK, 0x37);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RFREG_MASK, 0x5);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD0, RFREG_MASK, 0x27);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x0);
+
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"[IQK]S%x, kfail = 0x%x, 0x8%x3c = 0x%x\n", path, kfail,
1 << path, iqk_info->nb_rxcfir[path]);
@@ -1149,6 +1177,7 @@ static void _iqk_rxclk_setting(struct rtw89_dev *rtwdev, u8 path)
static bool _txk_5g_group_sel(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx, u8 path)
{
+ static const u8 a_idx[RTW8851B_A_TXK_GROUP_NR] = {2, 3};
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
bool kfail = false;
bool notready;
@@ -1156,16 +1185,20 @@ static bool _txk_5g_group_sel(struct rtw89_dev *rtwdev,
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
- for (gp = 0x0; gp < RTW8851B_TXK_GROUP_NR; gp++) {
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_COEF, MASKDWORD, 0x33332222);
+
+ for (gp = 0x0; gp < RTW8851B_A_TXK_GROUP_NR; gp++) {
rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, a_power_range[gp]);
rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, a_track_range[gp]);
rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, a_gain_bb[gp]);
+ rtw89_write_rf(rtwdev, path, RR_BIASA, RR_BIASA_A, a_att_smxr[gp]);
rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, 0x1);
rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, 0x1);
rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G2, 0x0);
- rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP, gp);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP, a_idx[gp]);
rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x11);
rtw89_phy_write32_mask(rtwdev, R_KIP_IQP, MASKDWORD, a_itqt[gp]);
notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK);
@@ -1206,7 +1239,9 @@ static bool _txk_2g_group_sel(struct rtw89_dev *rtwdev,
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
- for (gp = 0x0; gp < RTW8851B_TXK_GROUP_NR; gp++) {
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_COEF, MASKDWORD, 0x0);
+
+ for (gp = 0x0; gp < RTW8851B_G_TXK_GROUP_NR; gp++) {
rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, g_power_range[gp]);
rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, g_track_range[gp]);
rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, g_gain_bb[gp]);
@@ -1249,29 +1284,29 @@ static bool _txk_2g_group_sel(struct rtw89_dev *rtwdev,
static bool _iqk_5g_nbtxk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
u8 path)
{
+ static const u8 a_idx[RTW8851B_A_TXK_GROUP_NR] = {2, 3};
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
bool kfail = false;
bool notready;
- u8 gp;
+ u8 gp = 0;
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
- for (gp = 0x0; gp < RTW8851B_TXK_GROUP_NR; gp++) {
- rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, a_power_range[gp]);
- rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, a_track_range[gp]);
- rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, a_gain_bb[gp]);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, a_power_range[gp]);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, a_track_range[gp]);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, a_gain_bb[gp]);
+ rtw89_write_rf(rtwdev, path, RR_BIASA, RR_BIASA_A, a_att_smxr[gp]);
- rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, 0x1);
- rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, 0x1);
- rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G2, 0x0);
- rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP, gp);
- rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
- rtw89_phy_write32_mask(rtwdev, R_KIP_IQP, MASKDWORD, a_itqt[gp]);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G2, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP, a_idx[gp]);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP, MASKDWORD, a_itqt[gp]);
- notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK);
- iqk_info->nb_txcfir[path] =
- rtw89_phy_read32_mask(rtwdev, R_TXIQC, MASKDWORD) | 0x2;
- }
+ notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK);
+ iqk_info->nb_txcfir[path] =
+ rtw89_phy_read32_mask(rtwdev, R_TXIQC, MASKDWORD) | 0x2;
if (!notready)
kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG);
@@ -1300,7 +1335,7 @@ static bool _iqk_2g_nbtxk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
- for (gp = 0x0; gp < RTW8851B_TXK_GROUP_NR; gp++) {
+ for (gp = 0x0; gp < RTW8851B_G_TXK_GROUP_NR; gp++) {
rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, g_power_range[gp]);
rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, g_track_range[gp]);
rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, g_gain_bb[gp]);
@@ -1664,8 +1699,6 @@ static void _iqk_init(struct rtw89_dev *rtwdev)
struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
u8 idx, path;
- rtw89_phy_write32_mask(rtwdev, R_IQKINF, MASKDWORD, 0x0);
-
if (iqk_info->is_iqk_init)
return;
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851be.c b/drivers/net/wireless/realtek/rtw89/rtw8851be.c
index 598730831707..ce59ac9f56ba 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8851be.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851be.c
@@ -11,6 +11,7 @@
static const struct rtw89_pci_info rtw8851b_pci_info = {
.gen_def = &rtw89_pci_gen_ax,
+ .isr_def = &rtw89_pci_isr_ax,
.txbd_trunc_mode = MAC_AX_BD_TRUNC,
.rxbd_trunc_mode = MAC_AX_BD_TRUNC,
.rxbd_mode = MAC_AX_RXBD_PKT,
@@ -28,6 +29,8 @@ static const struct rtw89_pci_info rtw8851b_pci_info = {
.rx_ring_eq_is_full = false,
.check_rx_tag = false,
.no_rxbd_fs = false,
+ .group_bd_addr = false,
+ .rpp_fmt_size = sizeof(struct rtw89_pci_rpp_fmt),
.init_cfg_reg = R_AX_PCIE_INIT_CFG1,
.txhci_en_bit = B_AX_TXHCI_EN,
@@ -57,6 +60,7 @@ static const struct rtw89_pci_info rtw8851b_pci_info = {
.ltr_set = rtw89_pci_ltr_set,
.fill_txaddr_info = rtw89_pci_fill_txaddr_info,
+ .parse_rpp = rtw89_pci_parse_rpp,
.config_intr_mask = rtw89_pci_config_intr_mask,
.enable_intr = rtw89_pci_enable_intr,
.disable_intr = rtw89_pci_disable_intr,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851bu.c b/drivers/net/wireless/realtek/rtw89/rtw8851bu.c
index c3722547c6b0..04e1ab13b753 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8851bu.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851bu.c
@@ -16,6 +16,9 @@ static const struct rtw89_driver_info rtw89_8851bu_info = {
static const struct usb_device_id rtw_8851bu_id_table[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x0bda, 0xb851, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&rtw89_8851bu_info },
+ /* D-Link AX9U rev. A1 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x332a, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&rtw89_8851bu_info },
/* TP-Link Archer TX10UB Nano */
{ USB_DEVICE_AND_INTERFACE_INFO(0x3625, 0x010b, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&rtw89_8851bu_info },
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
index 3bbe2a808844..232f4c1bee1b 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
@@ -426,6 +426,35 @@ static const struct rtw89_reg_def rtw8852a_dcfo_comp = {
R_DCFO_COMP_S0, B_DCFO_COMP_S0_MSK
};
+static const struct rtw89_reg_def rtw8852a_nhm_th[RTW89_NHM_TH_NUM] = {
+ {R_NHM_CFG, B_NHM_TH0_MSK},
+ {R_NHM_TH1, B_NHM_TH1_MSK},
+ {R_NHM_TH1, B_NHM_TH2_MSK},
+ {R_NHM_TH1, B_NHM_TH3_MSK},
+ {R_NHM_TH1, B_NHM_TH4_MSK},
+ {R_NHM_TH5, B_NHM_TH5_MSK},
+ {R_NHM_TH5, B_NHM_TH6_MSK},
+ {R_NHM_TH5, B_NHM_TH7_MSK},
+ {R_NHM_TH5, B_NHM_TH8_MSK},
+ {R_NHM_TH9, B_NHM_TH9_MSK},
+ {R_NHM_TH9, B_NHM_TH10_MSK},
+};
+
+static const struct rtw89_reg_def rtw8852a_nhm_rpt[RTW89_NHM_RPT_NUM] = {
+ {R_NHM_CNT0, B_NHM_CNT0_MSK},
+ {R_NHM_CNT0, B_NHM_CNT1_MSK},
+ {R_NHM_CNT2, B_NHM_CNT2_MSK},
+ {R_NHM_CNT2, B_NHM_CNT3_MSK},
+ {R_NHM_CNT4, B_NHM_CNT4_MSK},
+ {R_NHM_CNT4, B_NHM_CNT5_MSK},
+ {R_NHM_CNT6, B_NHM_CNT6_MSK},
+ {R_NHM_CNT6, B_NHM_CNT7_MSK},
+ {R_NHM_CNT8, B_NHM_CNT8_MSK},
+ {R_NHM_CNT8, B_NHM_CNT9_MSK},
+ {R_NHM_CNT10, B_NHM_CNT10_MSK},
+ {R_NHM_CNT10, B_NHM_CNT11_MSK},
+};
+
static const struct rtw89_imr_info rtw8852a_imr_info = {
.wdrls_imr_set = B_AX_WDRLS_IMR_SET,
.wsec_imr_reg = R_AX_SEC_DEBUG,
@@ -2080,10 +2109,17 @@ static void rtw8852a_query_ppdu(struct rtw89_dev *rtwdev,
{
u8 path;
u8 *rx_power = phy_ppdu->rssi;
+ u8 raw;
+
+ if (!status->signal) {
+ if (phy_ppdu->to_self)
+ raw = ewma_rssi_read(&rtwdev->phystat.bcn_rssi);
+ else
+ raw = max(rx_power[RF_PATH_A], rx_power[RF_PATH_B]);
+
+ status->signal = RTW89_RSSI_RAW_TO_DBM(raw);
+ }
- if (!status->signal)
- status->signal = RTW89_RSSI_RAW_TO_DBM(max(rx_power[RF_PATH_A],
- rx_power[RF_PATH_B]));
for (path = 0; path < rtwdev->chip->rf_path_num; path++) {
status->chains |= BIT(path);
status->chain_signal[path] = RTW89_RSSI_RAW_TO_DBM(rx_power[path]);
@@ -2142,6 +2178,7 @@ static const struct rtw89_chip_ops rtw8852a_chip_ops = {
.query_rxdesc = rtw89_core_query_rxdesc,
.fill_txdesc = rtw89_core_fill_txdesc,
.fill_txdesc_fwcmd = rtw89_core_fill_txdesc,
+ .get_ch_dma = rtw89_core_get_ch_dma,
.cfg_ctrl_path = rtw89_mac_cfg_ctrl_path,
.mac_cfg_gnt = rtw89_mac_cfg_gnt,
.stop_sch_tx = rtw89_mac_stop_sch_tx,
@@ -2220,6 +2257,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.support_ant_gain = false,
.support_tas = false,
.support_sar_by_ant = false,
+ .support_noise = true,
.ul_tb_waveform_ctrl = false,
.ul_tb_pwr_diff = false,
.rx_freq_frome_ie = true,
@@ -2282,6 +2320,8 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.cfo_hw_comp = false,
.dcfo_comp = &rtw8852a_dcfo_comp,
.dcfo_comp_sft = 10,
+ .nhm_report = &rtw8852a_nhm_rpt,
+ .nhm_th = &rtw8852a_nhm_th,
.imr_info = &rtw8852a_imr_info,
.imr_dmac_table = NULL,
.imr_cmac_table = NULL,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
index 90ffaf9f4f6a..9e05e831569d 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
@@ -11,6 +11,7 @@
static const struct rtw89_pci_info rtw8852a_pci_info = {
.gen_def = &rtw89_pci_gen_ax,
+ .isr_def = &rtw89_pci_isr_ax,
.txbd_trunc_mode = MAC_AX_BD_TRUNC,
.rxbd_trunc_mode = MAC_AX_BD_TRUNC,
.rxbd_mode = MAC_AX_RXBD_PKT,
@@ -28,6 +29,8 @@ static const struct rtw89_pci_info rtw8852a_pci_info = {
.rx_ring_eq_is_full = false,
.check_rx_tag = false,
.no_rxbd_fs = false,
+ .group_bd_addr = false,
+ .rpp_fmt_size = sizeof(struct rtw89_pci_rpp_fmt),
.init_cfg_reg = R_AX_PCIE_INIT_CFG1,
.txhci_en_bit = B_AX_TXHCI_EN,
@@ -55,6 +58,7 @@ static const struct rtw89_pci_info rtw8852a_pci_info = {
.ltr_set = rtw89_pci_ltr_set,
.fill_txaddr_info = rtw89_pci_fill_txaddr_info,
+ .parse_rpp = rtw89_pci_parse_rpp,
.config_intr_mask = rtw89_pci_config_intr_mask,
.enable_intr = rtw89_pci_enable_intr,
.disable_intr = rtw89_pci_disable_intr,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b.c b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
index 7ede07f7b1eb..0777e336aaa1 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
@@ -842,6 +842,7 @@ static const struct rtw89_chip_ops rtw8852b_chip_ops = {
.query_rxdesc = rtw89_core_query_rxdesc,
.fill_txdesc = rtw89_core_fill_txdesc,
.fill_txdesc_fwcmd = rtw89_core_fill_txdesc,
+ .get_ch_dma = rtw89_core_get_ch_dma,
.cfg_ctrl_path = rtw89_mac_cfg_ctrl_path,
.mac_cfg_gnt = rtw89_mac_cfg_gnt,
.stop_sch_tx = rtw89_mac_stop_sch_tx,
@@ -939,6 +940,7 @@ const struct rtw89_chip_info rtw8852b_chip_info = {
.support_ant_gain = true,
.support_tas = false,
.support_sar_by_ant = true,
+ .support_noise = false,
.ul_tb_waveform_ctrl = true,
.ul_tb_pwr_diff = false,
.rx_freq_frome_ie = true,
@@ -1001,6 +1003,8 @@ const struct rtw89_chip_info rtw8852b_chip_info = {
.cfo_hw_comp = true,
.dcfo_comp = &rtw8852b_dcfo_comp,
.dcfo_comp_sft = 10,
+ .nhm_report = NULL,
+ .nhm_th = NULL,
.imr_info = &rtw8852b_imr_info,
.imr_dmac_table = NULL,
.imr_cmac_table = NULL,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852be.c b/drivers/net/wireless/realtek/rtw89/rtw8852be.c
index b0726f590ca2..12db0d0be547 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852be.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852be.c
@@ -11,6 +11,7 @@
static const struct rtw89_pci_info rtw8852b_pci_info = {
.gen_def = &rtw89_pci_gen_ax,
+ .isr_def = &rtw89_pci_isr_ax,
.txbd_trunc_mode = MAC_AX_BD_TRUNC,
.rxbd_trunc_mode = MAC_AX_BD_TRUNC,
.rxbd_mode = MAC_AX_RXBD_PKT,
@@ -28,6 +29,8 @@ static const struct rtw89_pci_info rtw8852b_pci_info = {
.rx_ring_eq_is_full = false,
.check_rx_tag = false,
.no_rxbd_fs = false,
+ .group_bd_addr = false,
+ .rpp_fmt_size = sizeof(struct rtw89_pci_rpp_fmt),
.init_cfg_reg = R_AX_PCIE_INIT_CFG1,
.txhci_en_bit = B_AX_TXHCI_EN,
@@ -57,6 +60,7 @@ static const struct rtw89_pci_info rtw8852b_pci_info = {
.ltr_set = rtw89_pci_ltr_set,
.fill_txaddr_info = rtw89_pci_fill_txaddr_info,
+ .parse_rpp = rtw89_pci_parse_rpp,
.config_intr_mask = rtw89_pci_config_intr_mask,
.enable_intr = rtw89_pci_enable_intr,
.disable_intr = rtw89_pci_disable_intr,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt.c b/drivers/net/wireless/realtek/rtw89/rtw8852bt.c
index 9427823aca2f..b3a79ebc7e75 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852bt.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt.c
@@ -708,6 +708,7 @@ static const struct rtw89_chip_ops rtw8852bt_chip_ops = {
.query_rxdesc = rtw89_core_query_rxdesc,
.fill_txdesc = rtw89_core_fill_txdesc,
.fill_txdesc_fwcmd = rtw89_core_fill_txdesc,
+ .get_ch_dma = rtw89_core_get_ch_dma,
.cfg_ctrl_path = rtw89_mac_cfg_ctrl_path,
.mac_cfg_gnt = rtw89_mac_cfg_gnt,
.stop_sch_tx = rtw89_mac_stop_sch_tx,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c
index d0e299803225..961b26ba2d3c 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c
@@ -1799,22 +1799,14 @@ static void _dpk_onoff(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool o
{
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
u8 val, kidx = dpk->cur_idx[path];
- bool off_reverse;
val = dpk->is_dpk_enable && !off && dpk->bp[path][kidx].path_ok;
- if (off)
- off_reverse = false;
- else
- off_reverse = true;
-
- val = dpk->is_dpk_enable & off_reverse & dpk->bp[path][kidx].path_ok;
-
rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
BIT(24), val);
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s !!!\n", path,
- kidx, str_enable_disable(dpk->is_dpk_enable & off_reverse));
+ kidx, str_enable_disable(dpk->is_dpk_enable && !off));
}
static void _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
@@ -1883,8 +1875,8 @@ static void _dpk_information(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
rtw89_debug(rtwdev, RTW89_DBG_RFK,
"[DPK] S%d[%d] (PHY%d): TSSI %s/ DBCC %s/ %s/ CH%d/ %s\n",
path, dpk->cur_idx[path], phy,
- rtwdev->is_tssi_mode[path] ? "on" : "off",
- rtwdev->dbcc_en ? "on" : "off",
+ str_on_off(rtwdev->is_tssi_mode[path]),
+ str_on_off(rtwdev->dbcc_en),
dpk->bp[path][kidx].band == 0 ? "2G" :
dpk->bp[path][kidx].band == 1 ? "5G" : "6G",
dpk->bp[path][kidx].ch,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bte.c b/drivers/net/wireless/realtek/rtw89/rtw8852bte.c
index a584c75b801d..8c995aa95325 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852bte.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852bte.c
@@ -17,6 +17,7 @@ static const struct rtw89_pci_ssid_quirk rtw8852bt_pci_ssid_quirks[] = {
static const struct rtw89_pci_info rtw8852bt_pci_info = {
.gen_def = &rtw89_pci_gen_ax,
+ .isr_def = &rtw89_pci_isr_ax,
.txbd_trunc_mode = MAC_AX_BD_TRUNC,
.rxbd_trunc_mode = MAC_AX_BD_TRUNC,
.rxbd_mode = MAC_AX_RXBD_PKT,
@@ -34,6 +35,8 @@ static const struct rtw89_pci_info rtw8852bt_pci_info = {
.rx_ring_eq_is_full = false,
.check_rx_tag = false,
.no_rxbd_fs = false,
+ .group_bd_addr = false,
+ .rpp_fmt_size = sizeof(struct rtw89_pci_rpp_fmt),
.init_cfg_reg = R_AX_PCIE_INIT_CFG1,
.txhci_en_bit = B_AX_TXHCI_EN,
@@ -63,6 +66,7 @@ static const struct rtw89_pci_info rtw8852bt_pci_info = {
.ltr_set = rtw89_pci_ltr_set,
.fill_txaddr_info = rtw89_pci_fill_txaddr_info,
+ .parse_rpp = rtw89_pci_parse_rpp,
.config_intr_mask = rtw89_pci_config_intr_mask,
.enable_intr = rtw89_pci_enable_intr,
.disable_intr = rtw89_pci_disable_intr,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bu.c b/drivers/net/wireless/realtek/rtw89/rtw8852bu.c
index b315cb997758..0694272f7ffa 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852bu.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852bu.c
@@ -30,6 +30,8 @@ static const struct usb_device_id rtw_8852bu_id_table[] = {
.driver_info = (kernel_ulong_t)&rtw89_8852bu_info },
{ USB_DEVICE_AND_INTERFACE_INFO(0x0db0, 0x6931, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&rtw89_8852bu_info },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3327, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&rtw89_8852bu_info },
{ USB_DEVICE_AND_INTERFACE_INFO(0x3574, 0x6121, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&rtw89_8852bu_info },
{ USB_DEVICE_AND_INTERFACE_INFO(0x35bc, 0x0100, 0xff, 0xff, 0xff),
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
index 88cf8ea13e7c..440801d63343 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
@@ -2962,6 +2962,7 @@ static const struct rtw89_chip_ops rtw8852c_chip_ops = {
.query_rxdesc = rtw89_core_query_rxdesc,
.fill_txdesc = rtw89_core_fill_txdesc_v1,
.fill_txdesc_fwcmd = rtw89_core_fill_txdesc_fwcmd_v1,
+ .get_ch_dma = rtw89_core_get_ch_dma,
.cfg_ctrl_path = rtw89_mac_cfg_ctrl_path_v1,
.mac_cfg_gnt = rtw89_mac_cfg_gnt_v1,
.stop_sch_tx = rtw89_mac_stop_sch_tx_v1,
@@ -3043,6 +3044,7 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.support_ant_gain = true,
.support_tas = true,
.support_sar_by_ant = true,
+ .support_noise = false,
.ul_tb_waveform_ctrl = false,
.ul_tb_pwr_diff = true,
.rx_freq_frome_ie = false,
@@ -3106,6 +3108,8 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.cfo_hw_comp = false,
.dcfo_comp = &rtw8852c_dcfo_comp,
.dcfo_comp_sft = 12,
+ .nhm_report = NULL,
+ .nhm_th = NULL,
.imr_info = &rtw8852c_imr_info,
.imr_dmac_table = NULL,
.imr_cmac_table = NULL,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
index db01d3966c27..150fed189414 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
@@ -20,6 +20,7 @@ static const struct rtw89_pci_bd_idx_addr rtw8852c_bd_idx_addr_low_power = {
static const struct rtw89_pci_info rtw8852c_pci_info = {
.gen_def = &rtw89_pci_gen_ax,
+ .isr_def = &rtw89_pci_isr_ax,
.txbd_trunc_mode = MAC_AX_BD_TRUNC,
.rxbd_trunc_mode = MAC_AX_BD_TRUNC,
.rxbd_mode = MAC_AX_RXBD_PKT,
@@ -37,6 +38,8 @@ static const struct rtw89_pci_info rtw8852c_pci_info = {
.rx_ring_eq_is_full = false,
.check_rx_tag = false,
.no_rxbd_fs = false,
+ .group_bd_addr = false,
+ .rpp_fmt_size = sizeof(struct rtw89_pci_rpp_fmt),
.init_cfg_reg = R_AX_HAXI_INIT_CFG1,
.txhci_en_bit = B_AX_TXHCI_EN_V1,
@@ -64,6 +67,7 @@ static const struct rtw89_pci_info rtw8852c_pci_info = {
.ltr_set = rtw89_pci_ltr_set_v1,
.fill_txaddr_info = rtw89_pci_fill_txaddr_info_v1,
+ .parse_rpp = rtw89_pci_parse_rpp,
.config_intr_mask = rtw89_pci_config_intr_mask_v1,
.enable_intr = rtw89_pci_enable_intr_v1,
.disable_intr = rtw89_pci_disable_intr_v1,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922a.c b/drivers/net/wireless/realtek/rtw89/rtw8922a.c
index 36c641e3bc13..6aa19ad259ac 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8922a.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8922a.c
@@ -2765,6 +2765,10 @@ static int rtw8922a_mac_disable_bb_rf(struct rtw89_dev *rtwdev)
return 0;
}
+static const struct rtw89_chanctx_listener rtw8922a_chanctx_listener = {
+ .callbacks[RTW89_CHANCTX_CALLBACK_TAS] = rtw89_tas_chanctx_cb,
+};
+
#ifdef CONFIG_PM
static const struct wiphy_wowlan_support rtw_wowlan_stub_8922a = {
.flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT |
@@ -2817,6 +2821,7 @@ static const struct rtw89_chip_ops rtw8922a_chip_ops = {
.query_rxdesc = rtw89_core_query_rxdesc_v2,
.fill_txdesc = rtw89_core_fill_txdesc_v2,
.fill_txdesc_fwcmd = rtw89_core_fill_txdesc_fwcmd_v2,
+ .get_ch_dma = rtw89_core_get_ch_dma,
.cfg_ctrl_path = rtw89_mac_cfg_ctrl_path_v2,
.mac_cfg_gnt = rtw89_mac_cfg_gnt_v2,
.stop_sch_tx = rtw89_mac_stop_sch_tx_v2,
@@ -2875,6 +2880,7 @@ const struct rtw89_chip_info rtw8922a_chip_info = {
.nctl_post_table = NULL,
.dflt_parms = NULL, /* load parm from fw */
.rfe_parms_conf = NULL, /* load parm from fw */
+ .chanctx_listener = &rtw8922a_chanctx_listener,
.txpwr_factor_bb = 3,
.txpwr_factor_rf = 2,
.txpwr_factor_mac = 1,
@@ -2894,8 +2900,9 @@ const struct rtw89_chip_info rtw8922a_chip_info = {
BIT(NL80211_CHAN_WIDTH_160),
.support_unii4 = true,
.support_ant_gain = true,
- .support_tas = false,
+ .support_tas = true,
.support_sar_by_ant = true,
+ .support_noise = false,
.ul_tb_waveform_ctrl = false,
.ul_tb_pwr_diff = false,
.rx_freq_frome_ie = false,
@@ -2958,6 +2965,8 @@ const struct rtw89_chip_info rtw8922a_chip_info = {
.cfo_hw_comp = true,
.dcfo_comp = NULL,
.dcfo_comp_sft = 0,
+ .nhm_report = NULL,
+ .nhm_th = NULL,
.imr_info = NULL,
.imr_dmac_table = &rtw8922a_imr_dmac_table,
.imr_cmac_table = &rtw8922a_imr_cmac_table,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922ae.c b/drivers/net/wireless/realtek/rtw89/rtw8922ae.c
index b730d79edd10..90c62b757c57 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8922ae.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8922ae.c
@@ -17,6 +17,7 @@ static const struct rtw89_pci_ssid_quirk rtw8922a_pci_ssid_quirks[] = {
static const struct rtw89_pci_info rtw8922a_pci_info = {
.gen_def = &rtw89_pci_gen_be,
+ .isr_def = &rtw89_pci_isr_be,
.txbd_trunc_mode = MAC_AX_BD_TRUNC,
.rxbd_trunc_mode = MAC_AX_BD_TRUNC,
.rxbd_mode = MAC_AX_RXBD_PKT,
@@ -34,6 +35,8 @@ static const struct rtw89_pci_info rtw8922a_pci_info = {
.rx_ring_eq_is_full = true,
.check_rx_tag = true,
.no_rxbd_fs = true,
+ .group_bd_addr = false,
+ .rpp_fmt_size = sizeof(struct rtw89_pci_rpp_fmt),
.init_cfg_reg = R_BE_HAXI_INIT_CFG1,
.txhci_en_bit = B_BE_TXDMA_EN,
@@ -61,6 +64,7 @@ static const struct rtw89_pci_info rtw8922a_pci_info = {
.ltr_set = rtw89_pci_ltr_set_v2,
.fill_txaddr_info = rtw89_pci_fill_txaddr_info_v1,
+ .parse_rpp = rtw89_pci_parse_rpp,
.config_intr_mask = rtw89_pci_config_intr_mask_v2,
.enable_intr = rtw89_pci_enable_intr_v2,
.disable_intr = rtw89_pci_disable_intr_v2,
diff --git a/drivers/net/wireless/realtek/rtw89/sar.c b/drivers/net/wireless/realtek/rtw89/sar.c
index 7f568ffb3766..ef7feccccd5e 100644
--- a/drivers/net/wireless/realtek/rtw89/sar.c
+++ b/drivers/net/wireless/realtek/rtw89/sar.c
@@ -4,6 +4,7 @@
#include "acpi.h"
#include "debug.h"
+#include "fw.h"
#include "phy.h"
#include "reg.h"
#include "sar.h"
@@ -843,6 +844,20 @@ void rtw89_tas_chanctx_cb(struct rtw89_dev *rtwdev,
}
EXPORT_SYMBOL(rtw89_tas_chanctx_cb);
+void rtw89_tas_fw_timer_enable(struct rtw89_dev *rtwdev, bool enable)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_tas_info *tas = &rtwdev->tas;
+
+ if (!tas->enable)
+ return;
+
+ if (chip->chip_gen == RTW89_CHIP_AX)
+ return;
+
+ rtw89_fw_h2c_rf_tas_trigger(rtwdev, enable);
+}
+
void rtw89_sar_init(struct rtw89_dev *rtwdev)
{
rtw89_set_sar_from_acpi(rtwdev);
diff --git a/drivers/net/wireless/realtek/rtw89/sar.h b/drivers/net/wireless/realtek/rtw89/sar.h
index 4b7f3d44f57b..60b3aec5b3be 100644
--- a/drivers/net/wireless/realtek/rtw89/sar.h
+++ b/drivers/net/wireless/realtek/rtw89/sar.h
@@ -37,6 +37,7 @@ void rtw89_tas_reset(struct rtw89_dev *rtwdev, bool force);
void rtw89_tas_scan(struct rtw89_dev *rtwdev, bool start);
void rtw89_tas_chanctx_cb(struct rtw89_dev *rtwdev,
enum rtw89_chanctx_state state);
+void rtw89_tas_fw_timer_enable(struct rtw89_dev *rtwdev, bool enable);
void rtw89_sar_init(struct rtw89_dev *rtwdev);
void rtw89_sar_track(struct rtw89_dev *rtwdev);
diff --git a/drivers/net/wireless/realtek/rtw89/ser.c b/drivers/net/wireless/realtek/rtw89/ser.c
index bb39fdbcba0d..f99e179f7ff9 100644
--- a/drivers/net/wireless/realtek/rtw89/ser.c
+++ b/drivers/net/wireless/realtek/rtw89/ser.c
@@ -205,7 +205,6 @@ static void rtw89_ser_hdl_work(struct work_struct *work)
static int ser_send_msg(struct rtw89_ser *ser, u8 event)
{
- struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
struct ser_msg *msg = NULL;
if (test_bit(RTW89_SER_DRV_STOP_RUN, ser->flags))
@@ -221,7 +220,7 @@ static int ser_send_msg(struct rtw89_ser *ser, u8 event)
list_add(&msg->list, &ser->msg_q);
spin_unlock_irq(&ser->msg_q_lock);
- ieee80211_queue_work(rtwdev->hw, &ser->ser_hdl_work);
+ schedule_work(&ser->ser_hdl_work);
return 0;
}
@@ -502,7 +501,9 @@ static void ser_reset_trx_st_hdl(struct rtw89_ser *ser, u8 evt)
}
drv_stop_rx(ser);
+ wiphy_lock(wiphy);
drv_trx_reset(ser);
+ wiphy_unlock(wiphy);
/* wait m3 */
hal_send_m2_event(ser);
diff --git a/drivers/net/wireless/realtek/rtw89/txrx.h b/drivers/net/wireless/realtek/rtw89/txrx.h
index ec01bfc363da..984c9fdbb018 100644
--- a/drivers/net/wireless/realtek/rtw89/txrx.h
+++ b/drivers/net/wireless/realtek/rtw89/txrx.h
@@ -572,6 +572,7 @@ struct rtw89_phy_sts_ie00 {
} __packed;
#define RTW89_PHY_STS_IE00_W0_RPL GENMASK(15, 7)
+#define RTW89_PHY_STS_IE00_W3_RX_PATH_EN GENMASK(31, 28)
struct rtw89_phy_sts_ie00_v2 {
__le32 w0;
@@ -732,43 +733,6 @@ rtw89_core_get_qsel_mgmt(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request
return RTW89_TX_QSEL_B0_MGMT;
}
-static inline u8 rtw89_core_get_ch_dma(struct rtw89_dev *rtwdev, u8 qsel)
-{
- switch (qsel) {
- default:
- rtw89_warn(rtwdev, "Cannot map qsel to dma: %d\n", qsel);
- fallthrough;
- case RTW89_TX_QSEL_BE_0:
- case RTW89_TX_QSEL_BE_1:
- case RTW89_TX_QSEL_BE_2:
- case RTW89_TX_QSEL_BE_3:
- return RTW89_TXCH_ACH0;
- case RTW89_TX_QSEL_BK_0:
- case RTW89_TX_QSEL_BK_1:
- case RTW89_TX_QSEL_BK_2:
- case RTW89_TX_QSEL_BK_3:
- return RTW89_TXCH_ACH1;
- case RTW89_TX_QSEL_VI_0:
- case RTW89_TX_QSEL_VI_1:
- case RTW89_TX_QSEL_VI_2:
- case RTW89_TX_QSEL_VI_3:
- return RTW89_TXCH_ACH2;
- case RTW89_TX_QSEL_VO_0:
- case RTW89_TX_QSEL_VO_1:
- case RTW89_TX_QSEL_VO_2:
- case RTW89_TX_QSEL_VO_3:
- return RTW89_TXCH_ACH3;
- case RTW89_TX_QSEL_B0_MGMT:
- return RTW89_TXCH_CH8;
- case RTW89_TX_QSEL_B0_HI:
- return RTW89_TXCH_CH9;
- case RTW89_TX_QSEL_B1_MGMT:
- return RTW89_TXCH_CH10;
- case RTW89_TX_QSEL_B1_HI:
- return RTW89_TXCH_CH11;
- }
-}
-
static inline u8 rtw89_core_get_tid_indicate(struct rtw89_dev *rtwdev, u8 tid)
{
switch (tid) {
diff --git a/drivers/net/wireless/realtek/rtw89/wow.c b/drivers/net/wireless/realtek/rtw89/wow.c
index 5bb7c1a42f1d..5faa51ad896a 100644
--- a/drivers/net/wireless/realtek/rtw89/wow.c
+++ b/drivers/net/wireless/realtek/rtw89/wow.c
@@ -99,13 +99,26 @@ static int rtw89_rx_pn_to_iv(struct rtw89_dev *rtwdev,
ieee80211_get_key_rx_seq(key, 0, &seq);
- /* seq.ccmp.pn[] is BE order array */
- pn = u64_encode_bits(seq.ccmp.pn[0], RTW89_KEY_PN_5) |
- u64_encode_bits(seq.ccmp.pn[1], RTW89_KEY_PN_4) |
- u64_encode_bits(seq.ccmp.pn[2], RTW89_KEY_PN_3) |
- u64_encode_bits(seq.ccmp.pn[3], RTW89_KEY_PN_2) |
- u64_encode_bits(seq.ccmp.pn[4], RTW89_KEY_PN_1) |
- u64_encode_bits(seq.ccmp.pn[5], RTW89_KEY_PN_0);
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ pn = u64_encode_bits(seq.tkip.iv32, RTW89_KEY_TKIP_PN_IV32) |
+ u64_encode_bits(seq.tkip.iv16, RTW89_KEY_TKIP_PN_IV16);
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ /* seq.ccmp.pn[] is BE order array */
+ pn = u64_encode_bits(seq.ccmp.pn[0], RTW89_KEY_PN_5) |
+ u64_encode_bits(seq.ccmp.pn[1], RTW89_KEY_PN_4) |
+ u64_encode_bits(seq.ccmp.pn[2], RTW89_KEY_PN_3) |
+ u64_encode_bits(seq.ccmp.pn[3], RTW89_KEY_PN_2) |
+ u64_encode_bits(seq.ccmp.pn[4], RTW89_KEY_PN_1) |
+ u64_encode_bits(seq.ccmp.pn[5], RTW89_KEY_PN_0);
+ break;
+ default:
+ return -EINVAL;
+ }
err = _pn_to_iv(rtwdev, key, iv, pn, key->keyidx);
if (err)
@@ -177,13 +190,26 @@ static int rtw89_rx_iv_to_pn(struct rtw89_dev *rtwdev,
if (err)
return err;
- /* seq.ccmp.pn[] is BE order array */
- seq.ccmp.pn[0] = u64_get_bits(pn, RTW89_KEY_PN_5);
- seq.ccmp.pn[1] = u64_get_bits(pn, RTW89_KEY_PN_4);
- seq.ccmp.pn[2] = u64_get_bits(pn, RTW89_KEY_PN_3);
- seq.ccmp.pn[3] = u64_get_bits(pn, RTW89_KEY_PN_2);
- seq.ccmp.pn[4] = u64_get_bits(pn, RTW89_KEY_PN_1);
- seq.ccmp.pn[5] = u64_get_bits(pn, RTW89_KEY_PN_0);
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ seq.tkip.iv32 = u64_get_bits(pn, RTW89_KEY_TKIP_PN_IV32);
+ seq.tkip.iv16 = u64_get_bits(pn, RTW89_KEY_TKIP_PN_IV16);
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ /* seq.ccmp.pn[] is BE order array */
+ seq.ccmp.pn[0] = u64_get_bits(pn, RTW89_KEY_PN_5);
+ seq.ccmp.pn[1] = u64_get_bits(pn, RTW89_KEY_PN_4);
+ seq.ccmp.pn[2] = u64_get_bits(pn, RTW89_KEY_PN_3);
+ seq.ccmp.pn[3] = u64_get_bits(pn, RTW89_KEY_PN_2);
+ seq.ccmp.pn[4] = u64_get_bits(pn, RTW89_KEY_PN_1);
+ seq.ccmp.pn[5] = u64_get_bits(pn, RTW89_KEY_PN_0);
+ break;
+ default:
+ return -EINVAL;
+ }
ieee80211_set_key_rx_seq(key, 0, &seq);
rtw89_debug(rtwdev, RTW89_DBG_WOW, "%s key %d iv-%*ph to pn-%*ph\n",
@@ -285,6 +311,11 @@ static void rtw89_wow_get_key_info_iter(struct ieee80211_hw *hw,
switch (key->cipher) {
case WLAN_CIPHER_SUITE_TKIP:
+ if (sta)
+ memcpy(gtk_info->txmickey,
+ key->key + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY,
+ sizeof(gtk_info->txmickey));
+ fallthrough;
case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_CCMP_256:
@@ -348,10 +379,27 @@ static void rtw89_wow_set_key_info_iter(struct ieee80211_hw *hw,
struct rtw89_wow_aoac_report *aoac_rpt = &rtw_wow->aoac_rpt;
struct rtw89_set_key_info_iter_data *iter_data = data;
bool update_tx_key_info = iter_data->rx_ready;
+ u8 tmp[RTW89_MIC_KEY_LEN];
int ret;
switch (key->cipher) {
case WLAN_CIPHER_SUITE_TKIP:
+ /*
+ * TX MIC KEY and RX MIC KEY is oppsite in FW,
+ * need to swap it before sending to mac80211.
+ */
+ if (!sta && update_tx_key_info && aoac_rpt->rekey_ok &&
+ !iter_data->tkip_gtk_swapped) {
+ memcpy(tmp, &aoac_rpt->gtk[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
+ RTW89_MIC_KEY_LEN);
+ memcpy(&aoac_rpt->gtk[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
+ &aoac_rpt->gtk[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
+ RTW89_MIC_KEY_LEN);
+ memcpy(&aoac_rpt->gtk[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
+ tmp, RTW89_MIC_KEY_LEN);
+ iter_data->tkip_gtk_swapped = true;
+ }
+ fallthrough;
case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_CCMP_256:
@@ -642,7 +690,8 @@ static void rtw89_wow_update_key_info(struct rtw89_dev *rtwdev, bool rx_ready)
struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
struct rtw89_wow_aoac_report *aoac_rpt = &rtw_wow->aoac_rpt;
struct rtw89_set_key_info_iter_data data = {.error = false,
- .rx_ready = rx_ready};
+ .rx_ready = rx_ready,
+ .tkip_gtk_swapped = false};
struct ieee80211_bss_conf *bss_conf;
struct ieee80211_key_conf *key;
diff --git a/drivers/net/wireless/realtek/rtw89/wow.h b/drivers/net/wireless/realtek/rtw89/wow.h
index 6606528d31c7..d2ba6cebc2a6 100644
--- a/drivers/net/wireless/realtek/rtw89/wow.h
+++ b/drivers/net/wireless/realtek/rtw89/wow.h
@@ -5,6 +5,9 @@
#ifndef __RTW89_WOW_H__
#define __RTW89_WOW_H__
+#define RTW89_KEY_TKIP_PN_IV16 GENMASK_ULL(15, 0)
+#define RTW89_KEY_TKIP_PN_IV32 GENMASK_ULL(47, 16)
+
#define RTW89_KEY_PN_0 GENMASK_ULL(7, 0)
#define RTW89_KEY_PN_1 GENMASK_ULL(15, 8)
#define RTW89_KEY_PN_2 GENMASK_ULL(23, 16)
@@ -25,6 +28,8 @@
#define RTW89_WOW_SYMBOL_CHK_PTK BIT(0)
#define RTW89_WOW_SYMBOL_CHK_GTK BIT(1)
+#define RTW89_MIC_KEY_LEN 8
+
enum rtw89_wake_reason {
RTW89_WOW_RSN_RX_PTK_REKEY = 0x1,
RTW89_WOW_RSN_RX_GTK_REKEY = 0x2,
@@ -73,6 +78,7 @@ struct rtw89_set_key_info_iter_data {
u32 igtk_cipher;
bool rx_ready;
bool error;
+ bool tkip_gtk_swapped;
};
static inline int rtw89_wow_get_sec_hdr_len(struct rtw89_dev *rtwdev)
diff --git a/drivers/net/wireless/st/cw1200/sta.c b/drivers/net/wireless/st/cw1200/sta.c
index b1dd76e8aecb..5d8eaa700779 100644
--- a/drivers/net/wireless/st/cw1200/sta.c
+++ b/drivers/net/wireless/st/cw1200/sta.c
@@ -1291,7 +1291,7 @@ static void cw1200_do_join(struct cw1200_common *priv)
rcu_read_lock();
ssidie = ieee80211_bss_get_ie(bss, WLAN_EID_SSID);
if (ssidie) {
- join.ssid_len = ssidie[1];
+ join.ssid_len = min(ssidie[1], IEEE80211_MAX_SSID_LEN);
memcpy(join.ssid, &ssidie[2], join.ssid_len);
}
rcu_read_unlock();
diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.c b/drivers/net/wireless/virtual/mac80211_hwsim.c
index 3789d46d5614..9f856042a67a 100644
--- a/drivers/net/wireless/virtual/mac80211_hwsim.c
+++ b/drivers/net/wireless/virtual/mac80211_hwsim.c
@@ -645,6 +645,7 @@ static LIST_HEAD(hwsim_radios);
static struct rhashtable hwsim_radios_rht;
static int hwsim_radio_idx;
static int hwsim_radios_generation = 1;
+static u8 hwsim_nan_cluster_id[ETH_ALEN];
static struct platform_driver mac80211_hwsim_driver = {
.driver = {
@@ -670,7 +671,7 @@ struct mac80211_hwsim_data {
struct ieee80211_channel channels_s1g[ARRAY_SIZE(hwsim_channels_s1g)];
struct ieee80211_rate rates[ARRAY_SIZE(hwsim_rates)];
struct ieee80211_iface_combination if_combination;
- struct ieee80211_iface_limit if_limits[3];
+ struct ieee80211_iface_limit if_limits[4];
int n_if_limits;
struct ieee80211_iface_combination if_combination_radio;
@@ -679,7 +680,7 @@ struct mac80211_hwsim_data {
u32 ciphers[ARRAY_SIZE(hwsim_ciphers)];
- struct mac_address addresses[2];
+ struct mac_address addresses[3];
int channels, idx;
bool use_chanctx;
bool destroy_on_close;
@@ -752,6 +753,14 @@ struct mac80211_hwsim_data {
struct wireless_dev *pmsr_request_wdev;
struct mac80211_hwsim_link_data link_data[IEEE80211_MLD_MAX_NUM_LINKS];
+
+ struct ieee80211_vif *nan_device_vif;
+ u8 nan_bands;
+
+ enum nl80211_band nan_curr_dw_band;
+ struct hrtimer nan_timer;
+ bool notify_dw;
+ struct ieee80211_vif *nan_vif;
};
static const struct rhashtable_params hwsim_rht_params = {
@@ -926,6 +935,7 @@ static const struct nla_policy hwsim_genl_policy[HWSIM_ATTR_MAX + 1] = {
[HWSIM_ATTR_PMSR_SUPPORT] = NLA_POLICY_NESTED(hwsim_pmsr_capa_policy),
[HWSIM_ATTR_PMSR_RESULT] = NLA_POLICY_NESTED(hwsim_pmsr_peers_result_policy),
[HWSIM_ATTR_MULTI_RADIO] = { .type = NLA_FLAG },
+ [HWSIM_ATTR_SUPPORT_NAN_DEVICE] = { .type = NLA_FLAG },
};
#if IS_REACHABLE(CONFIG_VIRTIO)
@@ -1644,6 +1654,16 @@ static void mac80211_hwsim_tx_iter(void *_data, u8 *addr,
struct tx_iter_data *data = _data;
int i;
+ /* For NAN Device simulation purposes, assume that NAN is always
+ * on channel 6 or channel 149.
+ */
+ if (vif->type == NL80211_IFTYPE_NAN) {
+ data->receive = (data->channel &&
+ (data->channel->center_freq == 2437 ||
+ data->channel->center_freq == 5745));
+ return;
+ }
+
for (i = 0; i < ARRAY_SIZE(vif->link_conf); i++) {
struct ieee80211_bss_conf *conf;
struct ieee80211_chanctx_conf *chanctx;
@@ -1944,6 +1964,7 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
struct ieee80211_hdr *hdr = (void *)skb->data;
struct ieee80211_chanctx_conf *chanctx_conf;
struct ieee80211_channel *channel;
+ struct ieee80211_vif *vif = txi->control.vif;
bool ack;
enum nl80211_chan_width confbw = NL80211_CHAN_WIDTH_20_NOHT;
u32 _portid, i;
@@ -1954,7 +1975,23 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
return;
}
- if (!data->use_chanctx) {
+ if (vif && vif->type == NL80211_IFTYPE_NAN && !data->tmp_chan) {
+ /* For NAN Device simulation purposes, assume that NAN is always
+ * on channel 6 or channel 149, unless a ROC is in progress (for
+ * USD use cases).
+ */
+ if (data->nan_curr_dw_band == NL80211_BAND_2GHZ)
+ channel = ieee80211_get_channel(hw->wiphy, 2437);
+ else if (data->nan_curr_dw_band == NL80211_BAND_5GHZ)
+ channel = ieee80211_get_channel(hw->wiphy, 5745);
+ else
+ channel = NULL;
+
+ if (WARN_ON(!channel)) {
+ ieee80211_free_txskb(hw, skb);
+ return;
+ }
+ } else if (!data->use_chanctx) {
channel = data->channel;
confbw = data->bw;
} else if (txi->hw_queue == 4) {
@@ -1962,7 +1999,6 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
} else {
u8 link = u32_get_bits(IEEE80211_SKB_CB(skb)->control.flags,
IEEE80211_TX_CTRL_MLO_LINK);
- struct ieee80211_vif *vif = txi->control.vif;
struct ieee80211_link_sta *link_sta = NULL;
struct ieee80211_sta *sta = control->sta;
struct ieee80211_bss_conf *bss_conf;
@@ -3950,6 +3986,168 @@ out:
return err;
}
+static enum hrtimer_restart
+mac80211_hwsim_nan_dw_start(struct hrtimer *timer)
+{
+ struct mac80211_hwsim_data *data =
+ container_of(timer, struct mac80211_hwsim_data,
+ nan_timer);
+ struct ieee80211_hw *hw = data->hw;
+ u64 orig_tsf = mac80211_hwsim_get_tsf(hw, NULL), tsf = orig_tsf;
+ u32 dw_int = 512 * 1024;
+ u64 until_dw;
+
+ if (!data->nan_device_vif)
+ return HRTIMER_NORESTART;
+
+ if (data->nan_bands & BIT(NL80211_BAND_5GHZ)) {
+ if (data->nan_curr_dw_band == NL80211_BAND_2GHZ) {
+ dw_int = 128 * 1024;
+ data->nan_curr_dw_band = NL80211_BAND_5GHZ;
+ } else if (data->nan_curr_dw_band == NL80211_BAND_5GHZ) {
+ data->nan_curr_dw_band = NL80211_BAND_2GHZ;
+ }
+ }
+
+ until_dw = dw_int - do_div(tsf, dw_int);
+
+ /* The timer might fire just before the actual DW, in which case
+ * update the timeout to the actual next DW
+ */
+ if (until_dw < dw_int / 2)
+ until_dw += dw_int;
+
+ /* The above do_div() call directly modifies the 'tsf' variable, thus,
+ * use a copy so that the print below would show the original TSF.
+ */
+ wiphy_debug(hw->wiphy,
+ "%s: tsf=%llx, curr_dw_band=%u, next_dw=%llu\n",
+ __func__, orig_tsf, data->nan_curr_dw_band,
+ until_dw);
+
+ hrtimer_forward_now(&data->nan_timer,
+ ns_to_ktime(until_dw * NSEC_PER_USEC));
+
+ if (data->notify_dw) {
+ struct ieee80211_channel *ch;
+ struct wireless_dev *wdev =
+ ieee80211_vif_to_wdev(data->nan_device_vif);
+
+ if (data->nan_curr_dw_band == NL80211_BAND_5GHZ)
+ ch = ieee80211_get_channel(hw->wiphy, 5475);
+ else
+ ch = ieee80211_get_channel(hw->wiphy, 2437);
+
+ cfg80211_next_nan_dw_notif(wdev, ch, GFP_ATOMIC);
+ }
+
+ return HRTIMER_RESTART;
+}
+
+static int mac80211_hwsim_start_nan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_nan_conf *conf)
+{
+ struct mac80211_hwsim_data *data = hw->priv;
+ u64 tsf = mac80211_hwsim_get_tsf(hw, NULL);
+ u32 dw_int = 512 * 1000;
+ u64 until_dw = dw_int - do_div(tsf, dw_int);
+ struct wireless_dev *wdev = ieee80211_vif_to_wdev(vif);
+
+ if (vif->type != NL80211_IFTYPE_NAN)
+ return -EINVAL;
+
+ if (data->nan_device_vif)
+ return -EALREADY;
+
+ /* set this before starting the timer, as preemption might occur */
+ data->nan_device_vif = vif;
+ data->nan_bands = conf->bands;
+ data->nan_curr_dw_band = NL80211_BAND_2GHZ;
+
+ wiphy_debug(hw->wiphy, "nan_started, next_dw=%llu\n",
+ until_dw);
+
+ hrtimer_start(&data->nan_timer,
+ ns_to_ktime(until_dw * NSEC_PER_USEC),
+ HRTIMER_MODE_REL_SOFT);
+
+ if (conf->cluster_id && !is_zero_ether_addr(conf->cluster_id) &&
+ is_zero_ether_addr(hwsim_nan_cluster_id)) {
+ memcpy(hwsim_nan_cluster_id, conf->cluster_id, ETH_ALEN);
+ } else if (is_zero_ether_addr(hwsim_nan_cluster_id)) {
+ hwsim_nan_cluster_id[0] = 0x50;
+ hwsim_nan_cluster_id[1] = 0x6f;
+ hwsim_nan_cluster_id[2] = 0x9a;
+ hwsim_nan_cluster_id[3] = 0x01;
+ hwsim_nan_cluster_id[4] = get_random_u8();
+ hwsim_nan_cluster_id[5] = get_random_u8();
+ }
+
+ data->notify_dw = conf->enable_dw_notification;
+
+ cfg80211_nan_cluster_joined(wdev, hwsim_nan_cluster_id, true,
+ GFP_KERNEL);
+
+ return 0;
+}
+
+static int mac80211_hwsim_stop_nan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mac80211_hwsim_data *data = hw->priv;
+ struct mac80211_hwsim_data *data2;
+ bool nan_cluster_running = false;
+
+ if (vif->type != NL80211_IFTYPE_NAN || !data->nan_device_vif ||
+ data->nan_device_vif != vif)
+ return -EINVAL;
+
+ hrtimer_cancel(&data->nan_timer);
+ data->nan_device_vif = NULL;
+
+ spin_lock(&hwsim_radio_lock);
+ list_for_each_entry(data2, &hwsim_radios, list) {
+ if (data2->nan_device_vif) {
+ nan_cluster_running = true;
+ break;
+ }
+ }
+ spin_unlock(&hwsim_radio_lock);
+
+ if (!nan_cluster_running)
+ memset(hwsim_nan_cluster_id, 0, ETH_ALEN);
+
+ return 0;
+}
+
+static int mac80211_hwsim_change_nan_config(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_nan_conf *conf,
+ u32 changes)
+{
+ struct mac80211_hwsim_data *data = hw->priv;
+
+ if (vif->type != NL80211_IFTYPE_NAN)
+ return -EINVAL;
+
+ if (!data->nan_device_vif)
+ return -EINVAL;
+
+ wiphy_debug(hw->wiphy, "nan_config_changed: changes=0x%x\n", changes);
+
+ /* Handle only the changes we care about for simulation purposes */
+ if (changes & CFG80211_NAN_CONF_CHANGED_BANDS) {
+ data->nan_bands = conf->bands;
+ data->nan_curr_dw_band = NL80211_BAND_2GHZ;
+ }
+
+ if (changes & CFG80211_NAN_CONF_CHANGED_CONFIG)
+ data->notify_dw = conf->enable_dw_notification;
+
+ return 0;
+}
+
#ifdef CONFIG_MAC80211_DEBUGFS
#define HWSIM_DEBUGFS_OPS \
.link_add_debugfs = mac80211_hwsim_link_add_debugfs,
@@ -3982,6 +4180,9 @@ out:
.get_et_strings = mac80211_hwsim_get_et_strings, \
.start_pmsr = mac80211_hwsim_start_pmsr, \
.abort_pmsr = mac80211_hwsim_abort_pmsr, \
+ .start_nan = mac80211_hwsim_start_nan, \
+ .stop_nan = mac80211_hwsim_stop_nan, \
+ .nan_change_conf = mac80211_hwsim_change_nan_config, \
HWSIM_DEBUGFS_OPS
#define HWSIM_NON_MLO_OPS \
@@ -4047,6 +4248,7 @@ struct hwsim_new_radio_params {
u8 n_ciphers;
bool mlo;
const struct cfg80211_pmsr_capabilities *pmsr_capa;
+ bool nan_device;
};
static void hwsim_mcast_config_msg(struct sk_buff *mcast_skb,
@@ -4127,6 +4329,11 @@ static int append_radio_msg(struct sk_buff *skb, int id,
return ret;
}
+ if (param->nan_device) {
+ ret = nla_put_flag(skb, HWSIM_ATTR_SUPPORT_NAN_DEVICE);
+ if (ret < 0)
+ return ret;
+ }
return 0;
}
@@ -5239,14 +5446,18 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
/* Why need here second address ? */
memcpy(data->addresses[1].addr, addr, ETH_ALEN);
data->addresses[1].addr[0] |= 0x40;
- hw->wiphy->n_addresses = 2;
+ memcpy(data->addresses[2].addr, addr, ETH_ALEN);
+ data->addresses[2].addr[0] |= 0x50;
+
+ hw->wiphy->n_addresses = 3;
hw->wiphy->addresses = data->addresses;
/* possible address clash is checked at hash table insertion */
} else {
memcpy(data->addresses[0].addr, param->perm_addr, ETH_ALEN);
/* compatibility with automatically generated mac addr */
memcpy(data->addresses[1].addr, param->perm_addr, ETH_ALEN);
- hw->wiphy->n_addresses = 2;
+ memcpy(data->addresses[2].addr, param->perm_addr, ETH_ALEN);
+ hw->wiphy->n_addresses = 3;
hw->wiphy->addresses = data->addresses;
}
@@ -5283,6 +5494,30 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
n_limits++;
}
+ if (param->iftypes & BIT(NL80211_IFTYPE_NAN)) {
+ data->if_limits[n_limits].max = 1;
+ data->if_limits[n_limits].types = BIT(NL80211_IFTYPE_NAN);
+ n_limits++;
+
+ hw->wiphy->nan_supported_bands = BIT(NL80211_BAND_2GHZ) |
+ BIT(NL80211_BAND_5GHZ);
+
+ hw->wiphy->nan_capa.flags = WIPHY_NAN_FLAGS_CONFIGURABLE_SYNC |
+ WIPHY_NAN_FLAGS_USERSPACE_DE;
+ hw->wiphy->nan_capa.op_mode = NAN_OP_MODE_PHY_MODE_MASK |
+ NAN_OP_MODE_80P80MHZ |
+ NAN_OP_MODE_160MHZ;
+
+ hw->wiphy->nan_capa.n_antennas = 0x22;
+ hw->wiphy->nan_capa.max_channel_switch_time = 0;
+ hw->wiphy->nan_capa.dev_capabilities =
+ NAN_DEV_CAPA_EXT_KEY_ID_SUPPORTED |
+ NAN_DEV_CAPA_NDPE_SUPPORTED;
+
+ hrtimer_setup(&data->nan_timer, mac80211_hwsim_nan_dw_start,
+ CLOCK_MONOTONIC, HRTIMER_MODE_ABS_SOFT);
+ }
+
data->if_combination.radar_detect_widths =
BIT(NL80211_CHAN_WIDTH_5) |
BIT(NL80211_CHAN_WIDTH_10) |
@@ -5701,6 +5936,8 @@ static int mac80211_hwsim_get_radio(struct sk_buff *skb,
REGULATORY_STRICT_REG);
param.p2p_device = !!(data->hw->wiphy->interface_modes &
BIT(NL80211_IFTYPE_P2P_DEVICE));
+ param.nan_device = !!(data->hw->wiphy->interface_modes &
+ BIT(NL80211_IFTYPE_NAN));
param.use_chanctx = data->use_chanctx;
param.regd = data->regd;
param.channels = data->channels;
@@ -6119,6 +6356,7 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
param.reg_strict = info->attrs[HWSIM_ATTR_REG_STRICT_REG];
param.p2p_device = info->attrs[HWSIM_ATTR_SUPPORT_P2P_DEVICE];
+ param.nan_device = info->attrs[HWSIM_ATTR_SUPPORT_NAN_DEVICE];
param.channels = channels;
param.destroy_on_close =
info->attrs[HWSIM_ATTR_DESTROY_RADIO_ON_CLOSE];
@@ -6190,6 +6428,13 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
param.p2p_device = true;
}
+ /* ensure both flag and iftype support is honored */
+ if (param.nan_device ||
+ param.iftypes & BIT(NL80211_IFTYPE_NAN)) {
+ param.iftypes |= BIT(NL80211_IFTYPE_NAN);
+ param.nan_device = true;
+ }
+
if (info->attrs[HWSIM_ATTR_CIPHER_SUPPORT]) {
u32 len = nla_len(info->attrs[HWSIM_ATTR_CIPHER_SUPPORT]);
@@ -6910,12 +7155,14 @@ static int __init init_mac80211_hwsim(void)
}
param.p2p_device = support_p2p_device;
+ param.nan_device = true;
param.mlo = mlo;
param.multi_radio = multi_radio;
param.use_chanctx = channels > 1 || mlo || multi_radio;
param.iftypes = HWSIM_IFTYPE_SUPPORT_MASK;
if (param.p2p_device)
param.iftypes |= BIT(NL80211_IFTYPE_P2P_DEVICE);
+ param.iftypes |= BIT(NL80211_IFTYPE_NAN);
err = mac80211_hwsim_new_radio(NULL, &param);
if (err < 0)
diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.h b/drivers/net/wireless/virtual/mac80211_hwsim.h
index fa157c883f7f..c2d06cf852a5 100644
--- a/drivers/net/wireless/virtual/mac80211_hwsim.h
+++ b/drivers/net/wireless/virtual/mac80211_hwsim.h
@@ -3,7 +3,7 @@
* mac80211_hwsim - software simulator of 802.11 radio(s) for mac80211
* Copyright (c) 2008, Jouni Malinen <j@w1.fi>
* Copyright (c) 2011, Javier Lopez <jlopex@gmail.com>
- * Copyright (C) 2020, 2022-2024 Intel Corporation
+ * Copyright (C) 2020, 2022-2025 Intel Corporation
*/
#ifndef __MAC80211_HWSIM_H
@@ -160,6 +160,7 @@ enum hwsim_commands {
* @HWSIM_ATTR_MULTI_RADIO: Register multiple wiphy radios (flag).
* Adds one radio for each band. Number of supported channels will be set for
* each radio instead of for the wiphy.
+ * @HWSIM_ATTR_SUPPORT_NAN_DEVICE: support NAN Device virtual interface (flag)
* @__HWSIM_ATTR_MAX: enum limit
*/
enum hwsim_attrs {
@@ -193,6 +194,7 @@ enum hwsim_attrs {
HWSIM_ATTR_PMSR_REQUEST,
HWSIM_ATTR_PMSR_RESULT,
HWSIM_ATTR_MULTI_RADIO,
+ HWSIM_ATTR_SUPPORT_NAN_DEVICE,
__HWSIM_ATTR_MAX,
};
#define HWSIM_ATTR_MAX (__HWSIM_ATTR_MAX - 1)
diff --git a/drivers/net/wireless/virtual/virt_wifi.c b/drivers/net/wireless/virtual/virt_wifi.c
index 1fffeff2190c..4eae89376feb 100644
--- a/drivers/net/wireless/virtual/virt_wifi.c
+++ b/drivers/net/wireless/virtual/virt_wifi.c
@@ -277,7 +277,9 @@ static void virt_wifi_connect_complete(struct work_struct *work)
priv->is_connected = true;
/* Schedules an event that acquires the rtnl lock. */
- cfg80211_connect_result(priv->upperdev, requested_bss, NULL, 0, NULL, 0,
+ cfg80211_connect_result(priv->upperdev,
+ priv->is_connected ? fake_router_bssid : NULL,
+ NULL, 0, NULL, 0,
status, GFP_KERNEL);
netif_carrier_on(priv->upperdev);
}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_pcie.c b/drivers/net/wwan/iosm/iosm_ipc_pcie.c
index a066977af0be..08ff0d6ccfab 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_pcie.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_pcie.c
@@ -69,7 +69,7 @@ static int ipc_pcie_resources_request(struct iosm_pcie *ipc_pcie)
{
struct pci_dev *pci = ipc_pcie->pci;
u32 cap = 0;
- u32 ret;
+ int ret;
/* Reserved PCI I/O and memory resources.
* Mark all PCI regions associated with PCI device pci as
diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
index 6a7a26085fc7..2310493203d3 100644
--- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
+++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
@@ -1085,7 +1085,8 @@ static void t7xx_dpmaif_bat_release_work(struct work_struct *work)
int t7xx_dpmaif_bat_rel_wq_alloc(struct dpmaif_ctrl *dpmaif_ctrl)
{
dpmaif_ctrl->bat_release_wq = alloc_workqueue("dpmaif_bat_release_work_queue",
- WQ_MEM_RECLAIM, 1);
+ WQ_MEM_RECLAIM | WQ_PERCPU,
+ 1);
if (!dpmaif_ctrl->bat_release_wq)
return -ENOMEM;
diff --git a/drivers/net/wwan/t7xx/t7xx_pci.c b/drivers/net/wwan/t7xx/t7xx_pci.c
index 8bf63f2dcbbf..eb137e078423 100644
--- a/drivers/net/wwan/t7xx/t7xx_pci.c
+++ b/drivers/net/wwan/t7xx/t7xx_pci.c
@@ -939,6 +939,7 @@ static void t7xx_pci_remove(struct pci_dev *pdev)
static const struct pci_device_id t7xx_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x4d75) },
+ { PCI_DEVICE(0x03f0, 0x09c8) }, // HP DRMR-H01
{ PCI_DEVICE(0x14c0, 0x4d75) }, // Dell DW5933e
{ }
};
diff --git a/drivers/net/wwan/wwan_hwsim.c b/drivers/net/wwan/wwan_hwsim.c
index b02befd1b6fb..733688cd4607 100644
--- a/drivers/net/wwan/wwan_hwsim.c
+++ b/drivers/net/wwan/wwan_hwsim.c
@@ -509,7 +509,7 @@ static int __init wwan_hwsim_init(void)
if (wwan_hwsim_devsnum < 0 || wwan_hwsim_devsnum > 128)
return -EINVAL;
- wwan_wq = alloc_workqueue("wwan_wq", 0, 0);
+ wwan_wq = alloc_workqueue("wwan_wq", WQ_PERCPU, 0);
if (!wwan_wq)
return -ENOMEM;
diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c
index 14661249c690..2b043a9f9533 100644
--- a/drivers/nfc/pn533/pn533.c
+++ b/drivers/nfc/pn533/pn533.c
@@ -1412,11 +1412,9 @@ static int pn533_autopoll_complete(struct pn533 *dev, void *arg,
if (dev->poll_mod_count != 0)
return rc;
goto stop_poll;
- } else if (rc < 0) {
- nfc_err(dev->dev,
- "Error %d when running autopoll\n", rc);
- goto stop_poll;
}
+ nfc_err(dev->dev, "Error %d when running autopoll\n", rc);
+ goto stop_poll;
}
nbtg = resp->data[0];
@@ -1505,11 +1503,9 @@ static int pn533_poll_complete(struct pn533 *dev, void *arg,
if (dev->poll_mod_count != 0)
return rc;
goto stop_poll;
- } else if (rc < 0) {
- nfc_err(dev->dev,
- "Error %d when running poll\n", rc);
- goto stop_poll;
}
+ nfc_err(dev->dev, "Error %d when running poll\n", rc);
+ goto stop_poll;
}
cur_mod = dev->poll_mod_active[dev->poll_mod_curr];
diff --git a/drivers/nfc/s3fwrn5/Kconfig b/drivers/nfc/s3fwrn5/Kconfig
index 8a6b1a79de25..96386b73fa2b 100644
--- a/drivers/nfc/s3fwrn5/Kconfig
+++ b/drivers/nfc/s3fwrn5/Kconfig
@@ -1,8 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config NFC_S3FWRN5
tristate
- select CRYPTO
- select CRYPTO_HASH
+ select CRYPTO_LIB_SHA1
help
Core driver for Samsung S3FWRN5 NFC chip. Contains core utilities
of chip. It's intended to be used by PHYs to avoid duplicating lots
diff --git a/drivers/nfc/s3fwrn5/firmware.c b/drivers/nfc/s3fwrn5/firmware.c
index 781cdbcac104..64d61b2a715a 100644
--- a/drivers/nfc/s3fwrn5/firmware.c
+++ b/drivers/nfc/s3fwrn5/firmware.c
@@ -8,7 +8,6 @@
#include <linux/completion.h>
#include <linux/firmware.h>
-#include <crypto/hash.h>
#include <crypto/sha1.h>
#include "s3fwrn5.h"
@@ -411,27 +410,13 @@ int s3fwrn5_fw_download(struct s3fwrn5_fw_info *fw_info)
struct device *dev = &fw_info->ndev->nfc_dev->dev;
struct s3fwrn5_fw_image *fw = &fw_info->fw;
u8 hash_data[SHA1_DIGEST_SIZE];
- struct crypto_shash *tfm;
u32 image_size, off;
int ret;
image_size = fw_info->sector_size * fw->image_sectors;
/* Compute SHA of firmware data */
-
- tfm = crypto_alloc_shash("sha1", 0, 0);
- if (IS_ERR(tfm)) {
- dev_err(dev, "Cannot allocate shash (code=%pe)\n", tfm);
- return PTR_ERR(tfm);
- }
-
- ret = crypto_shash_tfm_digest(tfm, fw->image, image_size, hash_data);
-
- crypto_free_shash(tfm);
- if (ret) {
- dev_err(dev, "Cannot compute hash (code=%d)\n", ret);
- return ret;
- }
+ sha1(fw->image, image_size, hash_data);
/* Firmware update process */
diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.c b/drivers/ntb/hw/amd/ntb_hw_amd.c
index 63ceed89b62e..1a163596ddf5 100644
--- a/drivers/ntb/hw/amd/ntb_hw_amd.c
+++ b/drivers/ntb/hw/amd/ntb_hw_amd.c
@@ -197,13 +197,22 @@ static int amd_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx,
static int amd_ntb_get_link_status(struct amd_ntb_dev *ndev)
{
- struct pci_dev *pdev = NULL;
+ struct pci_dev *pdev = ndev->ntb.pdev;
struct pci_dev *pci_swds = NULL;
struct pci_dev *pci_swus = NULL;
u32 stat;
int rc;
if (ndev->ntb.topo == NTB_TOPO_SEC) {
+ if (ndev->dev_data->is_endpoint) {
+ rc = pcie_capability_read_dword(pdev, PCI_EXP_LNKCTL, &stat);
+ if (rc)
+ return rc;
+
+ ndev->lnk_sta = stat;
+ return 0;
+ }
+
/* Locate the pointer to Downstream Switch for this device */
pci_swds = pci_upstream_bridge(ndev->ntb.pdev);
if (pci_swds) {
@@ -1311,6 +1320,11 @@ static const struct ntb_dev_data dev_data[] = {
.mw_count = 2,
.mw_idx = 2,
},
+ { /* for device 0x17d7 */
+ .mw_count = 2,
+ .mw_idx = 2,
+ .is_endpoint = true,
+ },
};
static const struct pci_device_id amd_ntb_pci_tbl[] = {
@@ -1319,6 +1333,8 @@ static const struct pci_device_id amd_ntb_pci_tbl[] = {
{ PCI_VDEVICE(AMD, 0x14c0), (kernel_ulong_t)&dev_data[1] },
{ PCI_VDEVICE(AMD, 0x14c3), (kernel_ulong_t)&dev_data[1] },
{ PCI_VDEVICE(AMD, 0x155a), (kernel_ulong_t)&dev_data[1] },
+ { PCI_VDEVICE(AMD, 0x17d4), (kernel_ulong_t)&dev_data[1] },
+ { PCI_VDEVICE(AMD, 0x17d7), (kernel_ulong_t)&dev_data[2] },
{ PCI_VDEVICE(HYGON, 0x145b), (kernel_ulong_t)&dev_data[0] },
{ 0, }
};
diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.h b/drivers/ntb/hw/amd/ntb_hw_amd.h
index 5f337b1572a0..e8c3165fa38b 100644
--- a/drivers/ntb/hw/amd/ntb_hw_amd.h
+++ b/drivers/ntb/hw/amd/ntb_hw_amd.h
@@ -168,6 +168,7 @@ enum {
struct ntb_dev_data {
const unsigned char mw_count;
const unsigned int mw_idx;
+ const bool is_endpoint;
};
struct amd_ntb_dev;
diff --git a/drivers/ntb/hw/epf/ntb_hw_epf.c b/drivers/ntb/hw/epf/ntb_hw_epf.c
index 00f0e78f685b..d3ecf25a5162 100644
--- a/drivers/ntb/hw/epf/ntb_hw_epf.c
+++ b/drivers/ntb/hw/epf/ntb_hw_epf.c
@@ -49,6 +49,7 @@
#define NTB_EPF_COMMAND_TIMEOUT 1000 /* 1 Sec */
enum pci_barno {
+ NO_BAR = -1,
BAR_0,
BAR_1,
BAR_2,
@@ -57,16 +58,26 @@ enum pci_barno {
BAR_5,
};
+enum epf_ntb_bar {
+ BAR_CONFIG,
+ BAR_PEER_SPAD,
+ BAR_DB,
+ BAR_MW1,
+ BAR_MW2,
+ BAR_MW3,
+ BAR_MW4,
+ NTB_BAR_NUM,
+};
+
+#define NTB_EPF_MAX_MW_COUNT (NTB_BAR_NUM - BAR_MW1)
+
struct ntb_epf_dev {
struct ntb_dev ntb;
struct device *dev;
/* Mutex to protect providing commands to NTB EPF */
struct mutex cmd_lock;
- enum pci_barno ctrl_reg_bar;
- enum pci_barno peer_spad_reg_bar;
- enum pci_barno db_reg_bar;
- enum pci_barno mw_bar;
+ const enum pci_barno *barno_map;
unsigned int mw_count;
unsigned int spad_count;
@@ -85,17 +96,6 @@ struct ntb_epf_dev {
#define ntb_ndev(__ntb) container_of(__ntb, struct ntb_epf_dev, ntb)
-struct ntb_epf_data {
- /* BAR that contains both control region and self spad region */
- enum pci_barno ctrl_reg_bar;
- /* BAR that contains peer spad region */
- enum pci_barno peer_spad_reg_bar;
- /* BAR that contains Doorbell region and Memory window '1' */
- enum pci_barno db_reg_bar;
- /* BAR that contains memory windows*/
- enum pci_barno mw_bar;
-};
-
static int ntb_epf_send_command(struct ntb_epf_dev *ndev, u32 command,
u32 argument)
{
@@ -144,7 +144,7 @@ static int ntb_epf_mw_to_bar(struct ntb_epf_dev *ndev, int idx)
return -EINVAL;
}
- return idx + 2;
+ return ndev->barno_map[BAR_MW1 + idx];
}
static int ntb_epf_mw_count(struct ntb_dev *ntb, int pidx)
@@ -413,7 +413,9 @@ static int ntb_epf_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx,
return -EINVAL;
}
- bar = idx + ndev->mw_bar;
+ bar = ntb_epf_mw_to_bar(ndev, idx);
+ if (bar < 0)
+ return bar;
mw_size = pci_resource_len(ntb->pdev, bar);
@@ -455,7 +457,9 @@ static int ntb_epf_peer_mw_get_addr(struct ntb_dev *ntb, int idx,
if (idx == 0)
offset = readl(ndev->ctrl_reg + NTB_EPF_MW1_OFFSET);
- bar = idx + ndev->mw_bar;
+ bar = ntb_epf_mw_to_bar(ndev, idx);
+ if (bar < 0)
+ return bar;
if (base)
*base = pci_resource_start(ndev->ntb.pdev, bar) + offset;
@@ -560,6 +564,11 @@ static int ntb_epf_init_dev(struct ntb_epf_dev *ndev)
ndev->mw_count = readl(ndev->ctrl_reg + NTB_EPF_MW_COUNT);
ndev->spad_count = readl(ndev->ctrl_reg + NTB_EPF_SPAD_COUNT);
+ if (ndev->mw_count > NTB_EPF_MAX_MW_COUNT) {
+ dev_err(dev, "Unsupported MW count: %u\n", ndev->mw_count);
+ return -EINVAL;
+ }
+
return 0;
}
@@ -596,14 +605,15 @@ static int ntb_epf_init_pci(struct ntb_epf_dev *ndev,
dev_warn(&pdev->dev, "Cannot DMA highmem\n");
}
- ndev->ctrl_reg = pci_iomap(pdev, ndev->ctrl_reg_bar, 0);
+ ndev->ctrl_reg = pci_iomap(pdev, ndev->barno_map[BAR_CONFIG], 0);
if (!ndev->ctrl_reg) {
ret = -EIO;
goto err_pci_regions;
}
- if (ndev->peer_spad_reg_bar) {
- ndev->peer_spad_reg = pci_iomap(pdev, ndev->peer_spad_reg_bar, 0);
+ if (ndev->barno_map[BAR_PEER_SPAD] != ndev->barno_map[BAR_CONFIG]) {
+ ndev->peer_spad_reg = pci_iomap(pdev,
+ ndev->barno_map[BAR_PEER_SPAD], 0);
if (!ndev->peer_spad_reg) {
ret = -EIO;
goto err_pci_regions;
@@ -614,7 +624,7 @@ static int ntb_epf_init_pci(struct ntb_epf_dev *ndev,
ndev->peer_spad_reg = ndev->ctrl_reg + spad_off + spad_sz;
}
- ndev->db_reg = pci_iomap(pdev, ndev->db_reg_bar, 0);
+ ndev->db_reg = pci_iomap(pdev, ndev->barno_map[BAR_DB], 0);
if (!ndev->db_reg) {
ret = -EIO;
goto err_pci_regions;
@@ -659,12 +669,7 @@ static void ntb_epf_cleanup_isr(struct ntb_epf_dev *ndev)
static int ntb_epf_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
- enum pci_barno peer_spad_reg_bar = BAR_1;
- enum pci_barno ctrl_reg_bar = BAR_0;
- enum pci_barno db_reg_bar = BAR_2;
- enum pci_barno mw_bar = BAR_2;
struct device *dev = &pdev->dev;
- struct ntb_epf_data *data;
struct ntb_epf_dev *ndev;
int ret;
@@ -675,18 +680,10 @@ static int ntb_epf_pci_probe(struct pci_dev *pdev,
if (!ndev)
return -ENOMEM;
- data = (struct ntb_epf_data *)id->driver_data;
- if (data) {
- peer_spad_reg_bar = data->peer_spad_reg_bar;
- ctrl_reg_bar = data->ctrl_reg_bar;
- db_reg_bar = data->db_reg_bar;
- mw_bar = data->mw_bar;
- }
+ ndev->barno_map = (const enum pci_barno *)id->driver_data;
+ if (!ndev->barno_map)
+ return -EINVAL;
- ndev->peer_spad_reg_bar = peer_spad_reg_bar;
- ndev->ctrl_reg_bar = ctrl_reg_bar;
- ndev->db_reg_bar = db_reg_bar;
- ndev->mw_bar = mw_bar;
ndev->dev = dev;
ntb_epf_init_struct(ndev, pdev);
@@ -730,30 +727,51 @@ static void ntb_epf_pci_remove(struct pci_dev *pdev)
ntb_epf_deinit_pci(ndev);
}
-static const struct ntb_epf_data j721e_data = {
- .ctrl_reg_bar = BAR_0,
- .peer_spad_reg_bar = BAR_1,
- .db_reg_bar = BAR_2,
- .mw_bar = BAR_2,
+static const enum pci_barno j721e_map[NTB_BAR_NUM] = {
+ [BAR_CONFIG] = BAR_0,
+ [BAR_PEER_SPAD] = BAR_1,
+ [BAR_DB] = BAR_2,
+ [BAR_MW1] = BAR_2,
+ [BAR_MW2] = BAR_3,
+ [BAR_MW3] = BAR_4,
+ [BAR_MW4] = BAR_5
};
-static const struct ntb_epf_data mx8_data = {
- .ctrl_reg_bar = BAR_0,
- .peer_spad_reg_bar = BAR_0,
- .db_reg_bar = BAR_2,
- .mw_bar = BAR_4,
+static const enum pci_barno mx8_map[NTB_BAR_NUM] = {
+ [BAR_CONFIG] = BAR_0,
+ [BAR_PEER_SPAD] = BAR_0,
+ [BAR_DB] = BAR_2,
+ [BAR_MW1] = BAR_4,
+ [BAR_MW2] = BAR_5,
+ [BAR_MW3] = NO_BAR,
+ [BAR_MW4] = NO_BAR
+};
+
+static const enum pci_barno rcar_barno[NTB_BAR_NUM] = {
+ [BAR_CONFIG] = BAR_0,
+ [BAR_PEER_SPAD] = BAR_0,
+ [BAR_DB] = BAR_4,
+ [BAR_MW1] = BAR_2,
+ [BAR_MW2] = NO_BAR,
+ [BAR_MW3] = NO_BAR,
+ [BAR_MW4] = NO_BAR,
};
static const struct pci_device_id ntb_epf_pci_tbl[] = {
{
PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721E),
.class = PCI_CLASS_MEMORY_RAM << 8, .class_mask = 0xffff00,
- .driver_data = (kernel_ulong_t)&j721e_data,
+ .driver_data = (kernel_ulong_t)j721e_map,
},
{
PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x0809),
.class = PCI_CLASS_MEMORY_RAM << 8, .class_mask = 0xffff00,
- .driver_data = (kernel_ulong_t)&mx8_data,
+ .driver_data = (kernel_ulong_t)mx8_map,
+ },
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, 0x0030),
+ .class = PCI_CLASS_MEMORY_RAM << 8, .class_mask = 0xffff00,
+ .driver_data = (kernel_ulong_t)rcar_barno,
},
{ },
};
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index 4f775c3e218f..eb875e3db2e3 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -59,6 +59,7 @@
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/uaccess.h>
+#include <linux/mutex.h>
#include "linux/ntb.h"
#include "linux/ntb_transport.h"
@@ -241,6 +242,9 @@ struct ntb_transport_ctx {
struct work_struct link_cleanup;
struct dentry *debugfs_node_dir;
+
+ /* Make sure workq of link event be executed serially */
+ struct mutex link_event_lock;
};
enum {
@@ -1024,6 +1028,7 @@ static void ntb_transport_link_cleanup_work(struct work_struct *work)
struct ntb_transport_ctx *nt =
container_of(work, struct ntb_transport_ctx, link_cleanup);
+ guard(mutex)(&nt->link_event_lock);
ntb_transport_link_cleanup(nt);
}
@@ -1047,6 +1052,8 @@ static void ntb_transport_link_work(struct work_struct *work)
u32 val;
int rc = 0, i, spad;
+ guard(mutex)(&nt->link_event_lock);
+
/* send the local info, in the opposite order of the way we read it */
if (nt->use_msi) {
diff --git a/drivers/nvdimm/badrange.c b/drivers/nvdimm/badrange.c
index ee478ccde7c6..36c626db459a 100644
--- a/drivers/nvdimm/badrange.c
+++ b/drivers/nvdimm/badrange.c
@@ -278,8 +278,7 @@ void nvdimm_badblocks_populate(struct nd_region *nd_region,
}
nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
- nvdimm_bus_lock(&nvdimm_bus->dev);
+ guard(nvdimm_bus)(&nvdimm_bus->dev);
badblocks_populate(&nvdimm_bus->badrange, bb, range);
- nvdimm_bus_unlock(&nvdimm_bus->dev);
}
EXPORT_SYMBOL_GPL(nvdimm_badblocks_populate);
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index 2a1aa32e6693..a933db961ed7 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1478,12 +1478,12 @@ static void btt_submit_bio(struct bio *bio)
bio_endio(bio);
}
-static int btt_getgeo(struct block_device *bd, struct hd_geometry *geo)
+static int btt_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
/* some standard values */
geo->heads = 1 << 6;
geo->sectors = 1 << 5;
- geo->cylinders = get_capacity(bd->bd_disk) >> 11;
+ geo->cylinders = get_capacity(disk) >> 11;
return 0;
}
diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c
index 497fd434a6a1..b3279b86bbfd 100644
--- a/drivers/nvdimm/btt_devs.c
+++ b/drivers/nvdimm/btt_devs.c
@@ -50,14 +50,12 @@ static ssize_t sector_size_store(struct device *dev,
struct nd_btt *nd_btt = to_nd_btt(dev);
ssize_t rc;
- device_lock(dev);
- nvdimm_bus_lock(dev);
+ guard(device)(dev);
+ guard(nvdimm_bus)(dev);
rc = nd_size_select_store(dev, buf, &nd_btt->lbasize,
btt_lbasize_supported);
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
- nvdimm_bus_unlock(dev);
- device_unlock(dev);
return rc ? rc : len;
}
@@ -93,13 +91,10 @@ static ssize_t namespace_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nd_btt *nd_btt = to_nd_btt(dev);
- ssize_t rc;
- nvdimm_bus_lock(dev);
- rc = sprintf(buf, "%s\n", nd_btt->ndns
+ guard(nvdimm_bus)(dev);
+ return sprintf(buf, "%s\n", nd_btt->ndns
? dev_name(&nd_btt->ndns->dev) : "");
- nvdimm_bus_unlock(dev);
- return rc;
}
static ssize_t namespace_store(struct device *dev,
@@ -108,13 +103,11 @@ static ssize_t namespace_store(struct device *dev,
struct nd_btt *nd_btt = to_nd_btt(dev);
ssize_t rc;
- device_lock(dev);
- nvdimm_bus_lock(dev);
+ guard(device)(dev);
+ guard(nvdimm_bus)(dev);
rc = nd_namespace_store(dev, &nd_btt->ndns, buf, len);
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
- nvdimm_bus_unlock(dev);
- device_unlock(dev);
return rc;
}
@@ -351,9 +344,8 @@ int nd_btt_probe(struct device *dev, struct nd_namespace_common *ndns)
return -ENODEV;
}
- nvdimm_bus_lock(&ndns->dev);
- btt_dev = __nd_btt_create(nd_region, 0, NULL, ndns);
- nvdimm_bus_unlock(&ndns->dev);
+ scoped_guard(nvdimm_bus, &ndns->dev)
+ btt_dev = __nd_btt_create(nd_region, 0, NULL, ndns);
if (!btt_dev)
return -ENOMEM;
btt_sb = devm_kzalloc(dev, sizeof(*btt_sb), GFP_KERNEL);
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 0ccf4a9e523a..87178a53ff9c 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -5,7 +5,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/libnvdimm.h>
#include <linux/sched/mm.h>
-#include <linux/vmalloc.h>
+#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/blkdev.h>
@@ -13,7 +13,6 @@
#include <linux/async.h>
#include <linux/ndctl.h>
#include <linux/sched.h>
-#include <linux/slab.h>
#include <linux/cpu.h>
#include <linux/fs.h>
#include <linux/io.h>
@@ -64,17 +63,15 @@ static struct module *to_bus_provider(struct device *dev)
static void nvdimm_bus_probe_start(struct nvdimm_bus *nvdimm_bus)
{
- nvdimm_bus_lock(&nvdimm_bus->dev);
+ guard(nvdimm_bus)(&nvdimm_bus->dev);
nvdimm_bus->probe_active++;
- nvdimm_bus_unlock(&nvdimm_bus->dev);
}
static void nvdimm_bus_probe_end(struct nvdimm_bus *nvdimm_bus)
{
- nvdimm_bus_lock(&nvdimm_bus->dev);
+ guard(nvdimm_bus)(&nvdimm_bus->dev);
if (--nvdimm_bus->probe_active == 0)
wake_up(&nvdimm_bus->wait);
- nvdimm_bus_unlock(&nvdimm_bus->dev);
}
static int nvdimm_bus_probe(struct device *dev)
@@ -1031,14 +1028,12 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
unsigned int cmd = _IOC_NR(ioctl_cmd);
struct device *dev = &nvdimm_bus->dev;
void __user *p = (void __user *) arg;
- char *out_env = NULL, *in_env = NULL;
const char *cmd_name, *dimm_name;
u32 in_len = 0, out_len = 0;
unsigned int func = cmd;
unsigned long cmd_mask;
struct nd_cmd_pkg pkg;
int rc, i, cmd_rc;
- void *buf = NULL;
u64 buf_len = 0;
if (nvdimm) {
@@ -1097,7 +1092,7 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
}
/* process an input envelope */
- in_env = kzalloc(ND_CMD_MAX_ENVELOPE, GFP_KERNEL);
+ char *in_env __free(kfree) = kzalloc(ND_CMD_MAX_ENVELOPE, GFP_KERNEL);
if (!in_env)
return -ENOMEM;
for (i = 0; i < desc->in_num; i++) {
@@ -1107,17 +1102,14 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
if (in_size == UINT_MAX) {
dev_err(dev, "%s:%s unknown input size cmd: %s field: %d\n",
__func__, dimm_name, cmd_name, i);
- rc = -ENXIO;
- goto out;
+ return -ENXIO;
}
if (in_len < ND_CMD_MAX_ENVELOPE)
copy = min_t(u32, ND_CMD_MAX_ENVELOPE - in_len, in_size);
else
copy = 0;
- if (copy && copy_from_user(&in_env[in_len], p + in_len, copy)) {
- rc = -EFAULT;
- goto out;
- }
+ if (copy && copy_from_user(&in_env[in_len], p + in_len, copy))
+ return -EFAULT;
in_len += in_size;
}
@@ -1129,11 +1121,9 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
}
/* process an output envelope */
- out_env = kzalloc(ND_CMD_MAX_ENVELOPE, GFP_KERNEL);
- if (!out_env) {
- rc = -ENOMEM;
- goto out;
- }
+ char *out_env __free(kfree) = kzalloc(ND_CMD_MAX_ENVELOPE, GFP_KERNEL);
+ if (!out_env)
+ return -ENOMEM;
for (i = 0; i < desc->out_num; i++) {
u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i,
@@ -1143,8 +1133,7 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
if (out_size == UINT_MAX) {
dev_dbg(dev, "%s unknown output size cmd: %s field: %d\n",
dimm_name, cmd_name, i);
- rc = -EFAULT;
- goto out;
+ return -EFAULT;
}
if (out_len < ND_CMD_MAX_ENVELOPE)
copy = min_t(u32, ND_CMD_MAX_ENVELOPE - out_len, out_size);
@@ -1152,8 +1141,7 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
copy = 0;
if (copy && copy_from_user(&out_env[out_len],
p + in_len + out_len, copy)) {
- rc = -EFAULT;
- goto out;
+ return -EFAULT;
}
out_len += out_size;
}
@@ -1162,30 +1150,25 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
if (buf_len > ND_IOCTL_MAX_BUFLEN) {
dev_dbg(dev, "%s cmd: %s buf_len: %llu > %d\n", dimm_name,
cmd_name, buf_len, ND_IOCTL_MAX_BUFLEN);
- rc = -EINVAL;
- goto out;
+ return -EINVAL;
}
- buf = vmalloc(buf_len);
- if (!buf) {
- rc = -ENOMEM;
- goto out;
- }
+ void *buf __free(kvfree) = kvzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
- if (copy_from_user(buf, p, buf_len)) {
- rc = -EFAULT;
- goto out;
- }
+ if (copy_from_user(buf, p, buf_len))
+ return -EFAULT;
- device_lock(dev);
- nvdimm_bus_lock(dev);
+ guard(device)(dev);
+ guard(nvdimm_bus)(dev);
rc = nd_cmd_clear_to_send(nvdimm_bus, nvdimm, func, buf);
if (rc)
- goto out_unlock;
+ return rc;
rc = nd_desc->ndctl(nd_desc, nvdimm, cmd, buf, buf_len, &cmd_rc);
if (rc < 0)
- goto out_unlock;
+ return rc;
if (!nvdimm && cmd == ND_CMD_CLEAR_ERROR && cmd_rc >= 0) {
struct nd_cmd_clear_error *clear_err = buf;
@@ -1195,16 +1178,9 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
}
if (copy_to_user(p, buf, buf_len))
- rc = -EFAULT;
+ return -EFAULT;
-out_unlock:
- nvdimm_bus_unlock(dev);
- device_unlock(dev);
-out:
- kfree(in_env);
- kfree(out_env);
- vfree(buf);
- return rc;
+ return 0;
}
enum nd_ioctl_mode {
diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c
index 51614651d2e7..309cd2cddb0e 100644
--- a/drivers/nvdimm/claim.c
+++ b/drivers/nvdimm/claim.c
@@ -34,11 +34,10 @@ void nd_detach_ndns(struct device *dev,
if (!ndns)
return;
- get_device(&ndns->dev);
- nvdimm_bus_lock(&ndns->dev);
+
+ struct device *ndev __free(put_device) = get_device(&ndns->dev);
+ guard(nvdimm_bus)(ndev);
__nd_detach_ndns(dev, _ndns);
- nvdimm_bus_unlock(&ndns->dev);
- put_device(&ndns->dev);
}
bool __nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c
index eaa796629c27..5ba204113fe1 100644
--- a/drivers/nvdimm/core.c
+++ b/drivers/nvdimm/core.c
@@ -141,9 +141,8 @@ static void nvdimm_map_put(void *data)
struct nvdimm_map *nvdimm_map = data;
struct nvdimm_bus *nvdimm_bus = nvdimm_map->nvdimm_bus;
- nvdimm_bus_lock(&nvdimm_bus->dev);
+ guard(nvdimm_bus)(&nvdimm_bus->dev);
kref_put(&nvdimm_map->kref, nvdimm_map_release);
- nvdimm_bus_unlock(&nvdimm_bus->dev);
}
/**
@@ -158,13 +157,13 @@ void *devm_nvdimm_memremap(struct device *dev, resource_size_t offset,
{
struct nvdimm_map *nvdimm_map;
- nvdimm_bus_lock(dev);
- nvdimm_map = find_nvdimm_map(dev, offset);
- if (!nvdimm_map)
- nvdimm_map = alloc_nvdimm_map(dev, offset, size, flags);
- else
- kref_get(&nvdimm_map->kref);
- nvdimm_bus_unlock(dev);
+ scoped_guard(nvdimm_bus, dev) {
+ nvdimm_map = find_nvdimm_map(dev, offset);
+ if (!nvdimm_map)
+ nvdimm_map = alloc_nvdimm_map(dev, offset, size, flags);
+ else
+ kref_get(&nvdimm_map->kref);
+ }
if (!nvdimm_map)
return NULL;
diff --git a/drivers/nvdimm/dax_devs.c b/drivers/nvdimm/dax_devs.c
index 37b743acbb7b..ba4c409ede65 100644
--- a/drivers/nvdimm/dax_devs.c
+++ b/drivers/nvdimm/dax_devs.c
@@ -104,12 +104,12 @@ int nd_dax_probe(struct device *dev, struct nd_namespace_common *ndns)
return -ENODEV;
}
- nvdimm_bus_lock(&ndns->dev);
- nd_dax = nd_dax_alloc(nd_region);
- dax_dev = nd_dax_devinit(nd_dax, ndns);
- nvdimm_bus_unlock(&ndns->dev);
- if (!dax_dev)
- return -ENOMEM;
+ scoped_guard(nvdimm_bus, &ndns->dev) {
+ nd_dax = nd_dax_alloc(nd_region);
+ dax_dev = nd_dax_devinit(nd_dax, ndns);
+ if (!dax_dev)
+ return -ENOMEM;
+ }
pfn_sb = devm_kmalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
nd_pfn = &nd_dax->nd_pfn;
nd_pfn->pfn_sb = pfn_sb;
diff --git a/drivers/nvdimm/dimm.c b/drivers/nvdimm/dimm.c
index 91d9163ee303..2f6c26cc6a3e 100644
--- a/drivers/nvdimm/dimm.c
+++ b/drivers/nvdimm/dimm.c
@@ -117,9 +117,8 @@ static void nvdimm_remove(struct device *dev)
{
struct nvdimm_drvdata *ndd = dev_get_drvdata(dev);
- nvdimm_bus_lock(dev);
- dev_set_drvdata(dev, NULL);
- nvdimm_bus_unlock(dev);
+ scoped_guard(nvdimm_bus, dev)
+ dev_set_drvdata(dev, NULL);
put_ndd(ndd);
}
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index 21498d461fde..e1349ef5f8fd 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -226,10 +226,10 @@ void nvdimm_drvdata_release(struct kref *kref)
struct resource *res, *_r;
dev_dbg(dev, "trace\n");
- nvdimm_bus_lock(dev);
- for_each_dpa_resource_safe(ndd, res, _r)
- nvdimm_free_dpa(ndd, res);
- nvdimm_bus_unlock(dev);
+ scoped_guard(nvdimm_bus, dev) {
+ for_each_dpa_resource_safe(ndd, res, _r)
+ nvdimm_free_dpa(ndd, res);
+ }
kvfree(ndd->data);
kfree(ndd);
@@ -319,23 +319,20 @@ static DEVICE_ATTR_RO(state);
static ssize_t __available_slots_show(struct nvdimm_drvdata *ndd, char *buf)
{
struct device *dev;
- ssize_t rc;
u32 nfree;
if (!ndd)
return -ENXIO;
dev = ndd->dev;
- nvdimm_bus_lock(dev);
+ guard(nvdimm_bus)(dev);
nfree = nd_label_nfree(ndd);
if (nfree - 1 > nfree) {
dev_WARN_ONCE(dev, 1, "we ate our last label?\n");
nfree = 0;
} else
nfree--;
- rc = sprintf(buf, "%d\n", nfree);
- nvdimm_bus_unlock(dev);
- return rc;
+ return sprintf(buf, "%d\n", nfree);
}
static ssize_t available_slots_show(struct device *dev,
@@ -388,21 +385,15 @@ static ssize_t security_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
- ssize_t rc;
-
/*
* Require all userspace triggered security management to be
* done while probing is idle and the DIMM is not in active use
* in any region.
*/
- device_lock(dev);
- nvdimm_bus_lock(dev);
+ guard(device)(dev);
+ guard(nvdimm_bus)(dev);
wait_nvdimm_bus_probe_idle(dev);
- rc = nvdimm_security_store(dev, buf, len);
- nvdimm_bus_unlock(dev);
- device_unlock(dev);
-
- return rc;
+ return nvdimm_security_store(dev, buf, len);
}
static DEVICE_ATTR_RW(security);
@@ -454,9 +445,8 @@ static ssize_t result_show(struct device *dev, struct device_attribute *attr, ch
if (!nvdimm->fw_ops)
return -EOPNOTSUPP;
- nvdimm_bus_lock(dev);
+ guard(nvdimm_bus)(dev);
result = nvdimm->fw_ops->activate_result(nvdimm);
- nvdimm_bus_unlock(dev);
switch (result) {
case NVDIMM_FWA_RESULT_NONE:
@@ -483,9 +473,8 @@ static ssize_t activate_show(struct device *dev, struct device_attribute *attr,
if (!nvdimm->fw_ops)
return -EOPNOTSUPP;
- nvdimm_bus_lock(dev);
+ guard(nvdimm_bus)(dev);
state = nvdimm->fw_ops->activate_state(nvdimm);
- nvdimm_bus_unlock(dev);
switch (state) {
case NVDIMM_FWA_IDLE:
@@ -516,9 +505,8 @@ static ssize_t activate_store(struct device *dev, struct device_attribute *attr,
else
return -EINVAL;
- nvdimm_bus_lock(dev);
+ guard(nvdimm_bus)(dev);
rc = nvdimm->fw_ops->arm(nvdimm, arg);
- nvdimm_bus_unlock(dev);
if (rc < 0)
return rc;
@@ -545,9 +533,8 @@ static umode_t nvdimm_firmware_visible(struct kobject *kobj, struct attribute *a
if (!nvdimm->fw_ops)
return 0;
- nvdimm_bus_lock(dev);
+ guard(nvdimm_bus)(dev);
cap = nd_desc->fw_ops->capability(nd_desc);
- nvdimm_bus_unlock(dev);
if (cap < NVDIMM_FWA_CAP_QUIESCE)
return 0;
@@ -641,11 +628,10 @@ void nvdimm_delete(struct nvdimm *nvdimm)
bool dev_put = false;
/* We are shutting down. Make state frozen artificially. */
- nvdimm_bus_lock(dev);
- set_bit(NVDIMM_SECURITY_FROZEN, &nvdimm->sec.flags);
- if (test_and_clear_bit(NDD_WORK_PENDING, &nvdimm->flags))
- dev_put = true;
- nvdimm_bus_unlock(dev);
+ scoped_guard(nvdimm_bus, dev) {
+ set_bit(NVDIMM_SECURITY_FROZEN, &nvdimm->sec.flags);
+ dev_put = test_and_clear_bit(NDD_WORK_PENDING, &nvdimm->flags);
+ }
cancel_delayed_work_sync(&nvdimm->dwork);
if (dev_put)
put_device(dev);
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 55cfbf1e0a95..a5edcacfe46d 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -264,15 +264,13 @@ static ssize_t alt_name_store(struct device *dev,
struct nd_region *nd_region = to_nd_region(dev->parent);
ssize_t rc;
- device_lock(dev);
- nvdimm_bus_lock(dev);
+ guard(device)(dev);
+ guard(nvdimm_bus)(dev);
wait_nvdimm_bus_probe_idle(dev);
rc = __alt_name_store(dev, buf, len);
if (rc >= 0)
rc = nd_namespace_label_update(nd_region, dev);
dev_dbg(dev, "%s(%zd)\n", rc < 0 ? "fail " : "", rc);
- nvdimm_bus_unlock(dev);
- device_unlock(dev);
return rc < 0 ? rc : len;
}
@@ -849,8 +847,8 @@ static ssize_t size_store(struct device *dev,
if (rc)
return rc;
- device_lock(dev);
- nvdimm_bus_lock(dev);
+ guard(device)(dev);
+ guard(nvdimm_bus)(dev);
wait_nvdimm_bus_probe_idle(dev);
rc = __size_store(dev, val);
if (rc >= 0)
@@ -866,9 +864,6 @@ static ssize_t size_store(struct device *dev,
dev_dbg(dev, "%llx %s (%d)\n", val, rc < 0 ? "fail" : "success", rc);
- nvdimm_bus_unlock(dev);
- device_unlock(dev);
-
return rc < 0 ? rc : len;
}
@@ -891,13 +886,8 @@ resource_size_t __nvdimm_namespace_capacity(struct nd_namespace_common *ndns)
resource_size_t nvdimm_namespace_capacity(struct nd_namespace_common *ndns)
{
- resource_size_t size;
-
- nvdimm_bus_lock(&ndns->dev);
- size = __nvdimm_namespace_capacity(ndns);
- nvdimm_bus_unlock(&ndns->dev);
-
- return size;
+ guard(nvdimm_bus)(&ndns->dev);
+ return __nvdimm_namespace_capacity(ndns);
}
EXPORT_SYMBOL(nvdimm_namespace_capacity);
@@ -1044,8 +1034,8 @@ static ssize_t uuid_store(struct device *dev,
} else
return -ENXIO;
- device_lock(dev);
- nvdimm_bus_lock(dev);
+ guard(device)(dev);
+ guard(nvdimm_bus)(dev);
wait_nvdimm_bus_probe_idle(dev);
if (to_ndns(dev)->claim)
rc = -EBUSY;
@@ -1059,8 +1049,6 @@ static ssize_t uuid_store(struct device *dev,
kfree(uuid);
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
- nvdimm_bus_unlock(dev);
- device_unlock(dev);
return rc < 0 ? rc : len;
}
@@ -1119,20 +1107,30 @@ static ssize_t sector_size_store(struct device *dev,
} else
return -ENXIO;
- device_lock(dev);
- nvdimm_bus_lock(dev);
- if (to_ndns(dev)->claim)
- rc = -EBUSY;
- if (rc >= 0)
- rc = nd_size_select_store(dev, buf, lbasize, supported);
- if (rc >= 0)
- rc = nd_namespace_label_update(nd_region, dev);
- dev_dbg(dev, "result: %zd %s: %s%s", rc, rc < 0 ? "tried" : "wrote",
+ guard(device)(dev);
+ guard(nvdimm_bus)(dev);
+ if (to_ndns(dev)->claim) {
+ dev_dbg(dev, "namespace %s already claimed\n", dev_name(dev));
+ return -EBUSY;
+ }
+
+ rc = nd_size_select_store(dev, buf, lbasize, supported);
+ if (rc < 0) {
+ dev_dbg(dev, "size select fail: %zd tried: %s%s", rc,
buf, buf[len - 1] == '\n' ? "" : "\n");
- nvdimm_bus_unlock(dev);
- device_unlock(dev);
+ return rc;
+ }
+
+ rc = nd_namespace_label_update(nd_region, dev);
+ if (rc < 0) {
+ dev_dbg(dev, "label update fail: %zd tried: %s%s",
+ rc, buf, buf[len - 1] == '\n' ? "" : "\n");
+ return rc;
+ }
+
+ dev_dbg(dev, "wrote: %s%s", buf, buf[len - 1] == '\n' ? "" : "\n");
- return rc ? rc : len;
+ return len;
}
static DEVICE_ATTR_RW(sector_size);
@@ -1145,7 +1143,7 @@ static ssize_t dpa_extents_show(struct device *dev,
int count = 0, i;
u32 flags = 0;
- nvdimm_bus_lock(dev);
+ guard(nvdimm_bus)(dev);
if (is_namespace_pmem(dev)) {
struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
@@ -1154,7 +1152,7 @@ static ssize_t dpa_extents_show(struct device *dev,
}
if (!uuid)
- goto out;
+ return sprintf(buf, "%d\n", count);
nd_label_gen_id(&label_id, uuid, flags);
for (i = 0; i < nd_region->ndr_mappings; i++) {
@@ -1166,8 +1164,6 @@ static ssize_t dpa_extents_show(struct device *dev,
if (strcmp(res->name, label_id.id) == 0)
count++;
}
- out:
- nvdimm_bus_unlock(dev);
return sprintf(buf, "%d\n", count);
}
@@ -1279,15 +1275,13 @@ static ssize_t holder_class_store(struct device *dev,
struct nd_region *nd_region = to_nd_region(dev->parent);
int rc;
- device_lock(dev);
- nvdimm_bus_lock(dev);
+ guard(device)(dev);
+ guard(nvdimm_bus)(dev);
wait_nvdimm_bus_probe_idle(dev);
rc = __holder_class_store(dev, buf);
if (rc >= 0)
rc = nd_namespace_label_update(nd_region, dev);
dev_dbg(dev, "%s(%d)\n", rc < 0 ? "fail " : "", rc);
- nvdimm_bus_unlock(dev);
- device_unlock(dev);
return rc < 0 ? rc : len;
}
@@ -1983,7 +1977,7 @@ static struct device **scan_labels(struct nd_region *nd_region)
}
dev_dbg(&nd_region->dev, "discovered %d namespace%s\n", count,
- count == 1 ? "" : "s");
+ str_plural(count));
if (count == 0) {
struct nd_namespace_pmem *nspm;
@@ -2152,31 +2146,38 @@ out:
nd_region);
}
-int nd_region_register_namespaces(struct nd_region *nd_region, int *err)
+static int create_relevant_namespaces(struct nd_region *nd_region, int *type,
+ struct device ***devs)
{
- struct device **devs = NULL;
- int i, rc = 0, type;
+ int rc;
- *err = 0;
- nvdimm_bus_lock(&nd_region->dev);
+ guard(nvdimm_bus)(&nd_region->dev);
rc = init_active_labels(nd_region);
- if (rc) {
- nvdimm_bus_unlock(&nd_region->dev);
+ if (rc)
return rc;
- }
- type = nd_region_to_nstype(nd_region);
- switch (type) {
+ *type = nd_region_to_nstype(nd_region);
+ switch (*type) {
case ND_DEVICE_NAMESPACE_IO:
- devs = create_namespace_io(nd_region);
+ *devs = create_namespace_io(nd_region);
break;
case ND_DEVICE_NAMESPACE_PMEM:
- devs = create_namespaces(nd_region);
- break;
- default:
+ *devs = create_namespaces(nd_region);
break;
}
- nvdimm_bus_unlock(&nd_region->dev);
+
+ return 0;
+}
+
+int nd_region_register_namespaces(struct nd_region *nd_region, int *err)
+{
+ struct device **devs = NULL;
+ int i, rc = 0, type;
+
+ *err = 0;
+ rc = create_relevant_namespaces(nd_region, &type, &devs);
+ if (rc)
+ return rc;
if (!devs)
return -ENODEV;
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index cc5c8f3f81e8..b199eea3260e 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -632,6 +632,9 @@ u64 nd_region_interleave_set_cookie(struct nd_region *nd_region,
u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region);
void nvdimm_bus_lock(struct device *dev);
void nvdimm_bus_unlock(struct device *dev);
+DEFINE_GUARD(nvdimm_bus, struct device *,
+ if (_T) nvdimm_bus_lock(_T), if (_T) nvdimm_bus_unlock(_T));
+
bool is_nvdimm_bus_locked(struct device *dev);
void nvdimm_check_and_set_ro(struct gendisk *disk);
void nvdimm_drvdata_release(struct kref *kref);
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 8f3e816e805d..42b172fc5576 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -56,30 +56,26 @@ static ssize_t mode_store(struct device *dev,
{
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc = 0;
+ size_t n = len - 1;
- device_lock(dev);
- nvdimm_bus_lock(dev);
+ guard(device)(dev);
+ guard(nvdimm_bus)(dev);
if (dev->driver)
- rc = -EBUSY;
- else {
- size_t n = len - 1;
-
- if (strncmp(buf, "pmem\n", n) == 0
- || strncmp(buf, "pmem", n) == 0) {
- nd_pfn->mode = PFN_MODE_PMEM;
- } else if (strncmp(buf, "ram\n", n) == 0
- || strncmp(buf, "ram", n) == 0)
- nd_pfn->mode = PFN_MODE_RAM;
- else if (strncmp(buf, "none\n", n) == 0
- || strncmp(buf, "none", n) == 0)
- nd_pfn->mode = PFN_MODE_NONE;
- else
- rc = -EINVAL;
- }
+ return -EBUSY;
+
+ if (strncmp(buf, "pmem\n", n) == 0
+ || strncmp(buf, "pmem", n) == 0) {
+ nd_pfn->mode = PFN_MODE_PMEM;
+ } else if (strncmp(buf, "ram\n", n) == 0
+ || strncmp(buf, "ram", n) == 0)
+ nd_pfn->mode = PFN_MODE_RAM;
+ else if (strncmp(buf, "none\n", n) == 0
+ || strncmp(buf, "none", n) == 0)
+ nd_pfn->mode = PFN_MODE_NONE;
+ else
+ rc = -EINVAL;
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
- nvdimm_bus_unlock(dev);
- device_unlock(dev);
return rc ? rc : len;
}
@@ -125,14 +121,12 @@ static ssize_t align_store(struct device *dev,
unsigned long aligns[MAX_NVDIMM_ALIGN] = { [0] = 0, };
ssize_t rc;
- device_lock(dev);
- nvdimm_bus_lock(dev);
+ guard(device)(dev);
+ guard(nvdimm_bus)(dev);
rc = nd_size_select_store(dev, buf, &nd_pfn->align,
nd_pfn_supported_alignments(aligns));
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
- nvdimm_bus_unlock(dev);
- device_unlock(dev);
return rc ? rc : len;
}
@@ -168,13 +162,10 @@ static ssize_t namespace_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
- ssize_t rc;
- nvdimm_bus_lock(dev);
- rc = sprintf(buf, "%s\n", nd_pfn->ndns
+ guard(nvdimm_bus)(dev);
+ return sprintf(buf, "%s\n", nd_pfn->ndns
? dev_name(&nd_pfn->ndns->dev) : "");
- nvdimm_bus_unlock(dev);
- return rc;
}
static ssize_t namespace_store(struct device *dev,
@@ -183,13 +174,11 @@ static ssize_t namespace_store(struct device *dev,
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc;
- device_lock(dev);
- nvdimm_bus_lock(dev);
+ guard(device)(dev);
+ guard(nvdimm_bus)(dev);
rc = nd_namespace_store(dev, &nd_pfn->ndns, buf, len);
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
- nvdimm_bus_unlock(dev);
- device_unlock(dev);
return rc;
}
@@ -639,10 +628,10 @@ int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns)
return -ENODEV;
}
- nvdimm_bus_lock(&ndns->dev);
- nd_pfn = nd_pfn_alloc(nd_region);
- pfn_dev = nd_pfn_devinit(nd_pfn, ndns);
- nvdimm_bus_unlock(&ndns->dev);
+ scoped_guard(nvdimm_bus, &ndns->dev) {
+ nd_pfn = nd_pfn_alloc(nd_region);
+ pfn_dev = nd_pfn_devinit(nd_pfn, ndns);
+ }
if (!pfn_dev)
return -ENOMEM;
pfn_sb = devm_kmalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
diff --git a/drivers/nvdimm/region.c b/drivers/nvdimm/region.c
index 88dc062af5f8..cd9b52040d7b 100644
--- a/drivers/nvdimm/region.c
+++ b/drivers/nvdimm/region.c
@@ -70,7 +70,7 @@ static int nd_region_probe(struct device *dev)
* "<async-registered>/<total>" namespace count.
*/
dev_err(dev, "failed to register %d namespace%s, continuing...\n",
- err, err == 1 ? "" : "s");
+ err, str_plural(err));
return 0;
}
@@ -87,13 +87,13 @@ static void nd_region_remove(struct device *dev)
device_for_each_child(dev, NULL, child_unregister);
/* flush attribute readers and disable */
- nvdimm_bus_lock(dev);
- nd_region->ns_seed = NULL;
- nd_region->btt_seed = NULL;
- nd_region->pfn_seed = NULL;
- nd_region->dax_seed = NULL;
- dev_set_drvdata(dev, NULL);
- nvdimm_bus_unlock(dev);
+ scoped_guard(nvdimm_bus, dev) {
+ nd_region->ns_seed = NULL;
+ nd_region->btt_seed = NULL;
+ nd_region->pfn_seed = NULL;
+ nd_region->dax_seed = NULL;
+ dev_set_drvdata(dev, NULL);
+ }
/*
* Note, this assumes device_lock() context to not race
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index de1ee5ebc851..a5ceaf5db595 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -102,31 +102,44 @@ out:
return 0;
}
-int nd_region_activate(struct nd_region *nd_region)
+static int get_flush_data(struct nd_region *nd_region, size_t *size, int *num_flush)
{
- int i, j, rc, num_flush = 0;
- struct nd_region_data *ndrd;
- struct device *dev = &nd_region->dev;
size_t flush_data_size = sizeof(void *);
+ int _num_flush = 0;
+ int i;
- nvdimm_bus_lock(&nd_region->dev);
+ guard(nvdimm_bus)(&nd_region->dev);
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
struct nvdimm *nvdimm = nd_mapping->nvdimm;
- if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
- nvdimm_bus_unlock(&nd_region->dev);
+ if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags))
return -EBUSY;
- }
/* at least one null hint slot per-dimm for the "no-hint" case */
flush_data_size += sizeof(void *);
- num_flush = min_not_zero(num_flush, nvdimm->num_flush);
+ _num_flush = min_not_zero(_num_flush, nvdimm->num_flush);
if (!nvdimm->num_flush)
continue;
flush_data_size += nvdimm->num_flush * sizeof(void *);
}
- nvdimm_bus_unlock(&nd_region->dev);
+
+ *size = flush_data_size;
+ *num_flush = _num_flush;
+
+ return 0;
+}
+
+int nd_region_activate(struct nd_region *nd_region)
+{
+ int i, j, rc, num_flush;
+ struct nd_region_data *ndrd;
+ struct device *dev = &nd_region->dev;
+ size_t flush_data_size;
+
+ rc = get_flush_data(nd_region, &flush_data_size, &num_flush);
+ if (rc)
+ return rc;
rc = nd_region_invalidate_memregion(nd_region);
if (rc)
@@ -327,8 +340,8 @@ static ssize_t set_cookie_show(struct device *dev,
* the v1.1 namespace label cookie definition. To read all this
* data we need to wait for probing to settle.
*/
- device_lock(dev);
- nvdimm_bus_lock(dev);
+ guard(device)(dev);
+ guard(nvdimm_bus)(dev);
wait_nvdimm_bus_probe_idle(dev);
if (nd_region->ndr_mappings) {
struct nd_mapping *nd_mapping = &nd_region->mapping[0];
@@ -343,8 +356,6 @@ static ssize_t set_cookie_show(struct device *dev,
nsindex));
}
}
- nvdimm_bus_unlock(dev);
- device_unlock(dev);
if (rc)
return rc;
@@ -393,7 +404,6 @@ static ssize_t available_size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nd_region *nd_region = to_nd_region(dev);
- unsigned long long available = 0;
/*
* Flush in-flight updates and grab a snapshot of the available
@@ -401,14 +411,11 @@ static ssize_t available_size_show(struct device *dev,
* memory nvdimm_bus_lock() is dropped, but that's userspace's
* problem to not race itself.
*/
- device_lock(dev);
- nvdimm_bus_lock(dev);
+ guard(device)(dev);
+ guard(nvdimm_bus)(dev);
wait_nvdimm_bus_probe_idle(dev);
- available = nd_region_available_dpa(nd_region);
- nvdimm_bus_unlock(dev);
- device_unlock(dev);
- return sprintf(buf, "%llu\n", available);
+ return sprintf(buf, "%llu\n", nd_region_available_dpa(nd_region));
}
static DEVICE_ATTR_RO(available_size);
@@ -416,16 +423,12 @@ static ssize_t max_available_extent_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nd_region *nd_region = to_nd_region(dev);
- unsigned long long available = 0;
- device_lock(dev);
- nvdimm_bus_lock(dev);
+ guard(device)(dev);
+ guard(nvdimm_bus)(dev);
wait_nvdimm_bus_probe_idle(dev);
- available = nd_region_allocatable_dpa(nd_region);
- nvdimm_bus_unlock(dev);
- device_unlock(dev);
- return sprintf(buf, "%llu\n", available);
+ return sprintf(buf, "%llu\n", nd_region_allocatable_dpa(nd_region));
}
static DEVICE_ATTR_RO(max_available_extent);
@@ -433,16 +436,12 @@ static ssize_t init_namespaces_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nd_region_data *ndrd = dev_get_drvdata(dev);
- ssize_t rc;
- nvdimm_bus_lock(dev);
- if (ndrd)
- rc = sprintf(buf, "%d/%d\n", ndrd->ns_active, ndrd->ns_count);
- else
- rc = -ENXIO;
- nvdimm_bus_unlock(dev);
+ guard(nvdimm_bus)(dev);
+ if (!ndrd)
+ return -ENXIO;
- return rc;
+ return sprintf(buf, "%d/%d\n", ndrd->ns_active, ndrd->ns_count);
}
static DEVICE_ATTR_RO(init_namespaces);
@@ -450,15 +449,12 @@ static ssize_t namespace_seed_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nd_region *nd_region = to_nd_region(dev);
- ssize_t rc;
- nvdimm_bus_lock(dev);
+ guard(nvdimm_bus)(dev);
if (nd_region->ns_seed)
- rc = sprintf(buf, "%s\n", dev_name(nd_region->ns_seed));
- else
- rc = sprintf(buf, "\n");
- nvdimm_bus_unlock(dev);
- return rc;
+ return sprintf(buf, "%s\n", dev_name(nd_region->ns_seed));
+
+ return sprintf(buf, "\n");
}
static DEVICE_ATTR_RO(namespace_seed);
@@ -466,16 +462,12 @@ static ssize_t btt_seed_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nd_region *nd_region = to_nd_region(dev);
- ssize_t rc;
- nvdimm_bus_lock(dev);
+ guard(nvdimm_bus)(dev);
if (nd_region->btt_seed)
- rc = sprintf(buf, "%s\n", dev_name(nd_region->btt_seed));
- else
- rc = sprintf(buf, "\n");
- nvdimm_bus_unlock(dev);
+ return sprintf(buf, "%s\n", dev_name(nd_region->btt_seed));
- return rc;
+ return sprintf(buf, "\n");
}
static DEVICE_ATTR_RO(btt_seed);
@@ -483,16 +475,12 @@ static ssize_t pfn_seed_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nd_region *nd_region = to_nd_region(dev);
- ssize_t rc;
- nvdimm_bus_lock(dev);
+ guard(nvdimm_bus)(dev);
if (nd_region->pfn_seed)
- rc = sprintf(buf, "%s\n", dev_name(nd_region->pfn_seed));
- else
- rc = sprintf(buf, "\n");
- nvdimm_bus_unlock(dev);
+ return sprintf(buf, "%s\n", dev_name(nd_region->pfn_seed));
- return rc;
+ return sprintf(buf, "\n");
}
static DEVICE_ATTR_RO(pfn_seed);
@@ -500,16 +488,12 @@ static ssize_t dax_seed_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nd_region *nd_region = to_nd_region(dev);
- ssize_t rc;
- nvdimm_bus_lock(dev);
+ guard(nvdimm_bus)(dev);
if (nd_region->dax_seed)
- rc = sprintf(buf, "%s\n", dev_name(nd_region->dax_seed));
- else
- rc = sprintf(buf, "\n");
- nvdimm_bus_unlock(dev);
+ return sprintf(buf, "%s\n", dev_name(nd_region->dax_seed));
- return rc;
+ return sprintf(buf, "\n");
}
static DEVICE_ATTR_RO(dax_seed);
@@ -581,9 +565,8 @@ static ssize_t align_store(struct device *dev,
* times ensure it does not change for the duration of the
* allocation.
*/
- nvdimm_bus_lock(dev);
+ guard(nvdimm_bus)(dev);
nd_region->align = val;
- nvdimm_bus_unlock(dev);
return len;
}
@@ -890,7 +873,7 @@ void nd_mapping_free_labels(struct nd_mapping *nd_mapping)
*/
void nd_region_advance_seeds(struct nd_region *nd_region, struct device *dev)
{
- nvdimm_bus_lock(dev);
+ guard(nvdimm_bus)(dev);
if (nd_region->ns_seed == dev) {
nd_region_create_ns_seed(nd_region);
} else if (is_nd_btt(dev)) {
@@ -915,7 +898,6 @@ void nd_region_advance_seeds(struct nd_region *nd_region, struct device *dev)
if (nd_region->ns_seed == &nd_dax->nd_pfn.ndns->dev)
nd_region_create_ns_seed(nd_region);
}
- nvdimm_bus_unlock(dev);
}
/**
diff --git a/drivers/nvdimm/security.c b/drivers/nvdimm/security.c
index a03e3c45f297..4adce8c38870 100644
--- a/drivers/nvdimm/security.c
+++ b/drivers/nvdimm/security.c
@@ -219,12 +219,9 @@ static int __nvdimm_security_unlock(struct nvdimm *nvdimm)
int nvdimm_security_unlock(struct device *dev)
{
struct nvdimm *nvdimm = to_nvdimm(dev);
- int rc;
- nvdimm_bus_lock(dev);
- rc = __nvdimm_security_unlock(nvdimm);
- nvdimm_bus_unlock(dev);
- return rc;
+ guard(nvdimm_bus)(dev);
+ return __nvdimm_security_unlock(nvdimm);
}
static int check_security_state(struct nvdimm *nvdimm)
@@ -490,9 +487,8 @@ void nvdimm_security_overwrite_query(struct work_struct *work)
struct nvdimm *nvdimm =
container_of(work, typeof(*nvdimm), dwork.work);
- nvdimm_bus_lock(&nvdimm->dev);
+ guard(nvdimm_bus)(&nvdimm->dev);
__nvdimm_security_overwrite_query(nvdimm);
- nvdimm_bus_unlock(&nvdimm->dev);
}
#define OPS \
diff --git a/drivers/nvme/common/auth.c b/drivers/nvme/common/auth.c
index 91e273b89fea..1f51fbebd9fa 100644
--- a/drivers/nvme/common/auth.c
+++ b/drivers/nvme/common/auth.c
@@ -684,6 +684,59 @@ out_free_enc:
EXPORT_SYMBOL_GPL(nvme_auth_generate_digest);
/**
+ * hkdf_expand_label - HKDF-Expand-Label (RFC 8846 section 7.1)
+ * @hmac_tfm: hash context keyed with pseudorandom key
+ * @label: ASCII label without "tls13 " prefix
+ * @labellen: length of @label
+ * @context: context bytes
+ * @contextlen: length of @context
+ * @okm: output keying material
+ * @okmlen: length of @okm
+ *
+ * Build the TLS 1.3 HkdfLabel structure and invoke hkdf_expand().
+ *
+ * Returns 0 on success with output keying material stored in @okm,
+ * or a negative errno value otherwise.
+ */
+static int hkdf_expand_label(struct crypto_shash *hmac_tfm,
+ const u8 *label, unsigned int labellen,
+ const u8 *context, unsigned int contextlen,
+ u8 *okm, unsigned int okmlen)
+{
+ int err;
+ u8 *info;
+ unsigned int infolen;
+ const char *tls13_prefix = "tls13 ";
+ unsigned int prefixlen = strlen(tls13_prefix);
+
+ if (WARN_ON(labellen > (255 - prefixlen)))
+ return -EINVAL;
+ if (WARN_ON(contextlen > 255))
+ return -EINVAL;
+
+ infolen = 2 + (1 + prefixlen + labellen) + (1 + contextlen);
+ info = kzalloc(infolen, GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ /* HkdfLabel.Length */
+ put_unaligned_be16(okmlen, info);
+
+ /* HkdfLabel.Label */
+ info[2] = prefixlen + labellen;
+ memcpy(info + 3, tls13_prefix, prefixlen);
+ memcpy(info + 3 + prefixlen, label, labellen);
+
+ /* HkdfLabel.Context */
+ info[3 + prefixlen + labellen] = contextlen;
+ memcpy(info + 4 + prefixlen + labellen, context, contextlen);
+
+ err = hkdf_expand(hmac_tfm, info, infolen, okm, okmlen);
+ kfree_sensitive(info);
+ return err;
+}
+
+/**
* nvme_auth_derive_tls_psk - Derive TLS PSK
* @hmac_id: Hash function identifier
* @psk: generated input PSK
@@ -715,10 +768,10 @@ int nvme_auth_derive_tls_psk(int hmac_id, u8 *psk, size_t psk_len,
{
struct crypto_shash *hmac_tfm;
const char *hmac_name;
- const char *psk_prefix = "tls13 nvme-tls-psk";
+ const char *label = "nvme-tls-psk";
static const char default_salt[HKDF_MAX_HASHLEN];
- size_t info_len, prk_len;
- char *info;
+ size_t prk_len;
+ const char *ctx;
unsigned char *prk, *tls_key;
int ret;
@@ -758,36 +811,29 @@ int nvme_auth_derive_tls_psk(int hmac_id, u8 *psk, size_t psk_len,
if (ret)
goto out_free_prk;
- /*
- * 2 additional bytes for the length field from HDKF-Expand-Label,
- * 2 additional bytes for the HMAC ID, and one byte for the space
- * separator.
- */
- info_len = strlen(psk_digest) + strlen(psk_prefix) + 5;
- info = kzalloc(info_len + 1, GFP_KERNEL);
- if (!info) {
+ ctx = kasprintf(GFP_KERNEL, "%02d %s", hmac_id, psk_digest);
+ if (!ctx) {
ret = -ENOMEM;
goto out_free_prk;
}
- put_unaligned_be16(psk_len, info);
- memcpy(info + 2, psk_prefix, strlen(psk_prefix));
- sprintf(info + 2 + strlen(psk_prefix), "%02d %s", hmac_id, psk_digest);
-
tls_key = kzalloc(psk_len, GFP_KERNEL);
if (!tls_key) {
ret = -ENOMEM;
- goto out_free_info;
+ goto out_free_ctx;
}
- ret = hkdf_expand(hmac_tfm, info, info_len, tls_key, psk_len);
+ ret = hkdf_expand_label(hmac_tfm,
+ label, strlen(label),
+ ctx, strlen(ctx),
+ tls_key, psk_len);
if (ret) {
kfree(tls_key);
- goto out_free_info;
+ goto out_free_ctx;
}
*ret_psk = tls_key;
-out_free_info:
- kfree(info);
+out_free_ctx:
+ kfree(ctx);
out_free_prk:
kfree(prk);
out_free_shash:
diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c
index 1286c31320e6..f35d3f71d14f 100644
--- a/drivers/nvme/host/apple.c
+++ b/drivers/nvme/host/apple.c
@@ -35,7 +35,6 @@
#include "nvme.h"
#define APPLE_ANS_BOOT_TIMEOUT USEC_PER_SEC
-#define APPLE_ANS_MAX_QUEUE_DEPTH 64
#define APPLE_ANS_COPROC_CPU_CONTROL 0x44
#define APPLE_ANS_COPROC_CPU_CONTROL_RUN BIT(4)
@@ -75,6 +74,8 @@
#define APPLE_NVME_AQ_DEPTH 2
#define APPLE_NVME_AQ_MQ_TAG_DEPTH (APPLE_NVME_AQ_DEPTH - 1)
+#define APPLE_NVME_IOSQES 7
+
/*
* These can be higher, but we need to ensure that any command doesn't
* require an sg allocation that needs more than a page of data.
@@ -142,6 +143,7 @@ struct apple_nvme_queue {
u32 __iomem *sq_db;
u32 __iomem *cq_db;
+ u16 sq_tail;
u16 cq_head;
u8 cq_phase;
@@ -166,11 +168,17 @@ struct apple_nvme_iod {
struct scatterlist *sg;
};
+struct apple_nvme_hw {
+ bool has_lsq_nvmmu;
+ u32 max_queue_depth;
+};
+
struct apple_nvme {
struct device *dev;
void __iomem *mmio_coproc;
void __iomem *mmio_nvme;
+ const struct apple_nvme_hw *hw;
struct device **pd_dev;
struct device_link **pd_link;
@@ -215,10 +223,12 @@ static inline struct apple_nvme *queue_to_apple_nvme(struct apple_nvme_queue *q)
static unsigned int apple_nvme_queue_depth(struct apple_nvme_queue *q)
{
- if (q->is_adminq)
+ struct apple_nvme *anv = queue_to_apple_nvme(q);
+
+ if (q->is_adminq && anv->hw->has_lsq_nvmmu)
return APPLE_NVME_AQ_DEPTH;
- return APPLE_ANS_MAX_QUEUE_DEPTH;
+ return anv->hw->max_queue_depth;
}
static void apple_nvme_rtkit_crashed(void *cookie, const void *crashlog, size_t crashlog_size)
@@ -280,7 +290,28 @@ static void apple_nvmmu_inval(struct apple_nvme_queue *q, unsigned int tag)
"NVMMU TCB invalidation failed\n");
}
-static void apple_nvme_submit_cmd(struct apple_nvme_queue *q,
+static void apple_nvme_submit_cmd_t8015(struct apple_nvme_queue *q,
+ struct nvme_command *cmd)
+{
+ struct apple_nvme *anv = queue_to_apple_nvme(q);
+
+ spin_lock_irq(&anv->lock);
+
+ if (q->is_adminq)
+ memcpy(&q->sqes[q->sq_tail], cmd, sizeof(*cmd));
+ else
+ memcpy((void *)q->sqes + (q->sq_tail << APPLE_NVME_IOSQES),
+ cmd, sizeof(*cmd));
+
+ if (++q->sq_tail == anv->hw->max_queue_depth)
+ q->sq_tail = 0;
+
+ writel(q->sq_tail, q->sq_db);
+ spin_unlock_irq(&anv->lock);
+}
+
+
+static void apple_nvme_submit_cmd_t8103(struct apple_nvme_queue *q,
struct nvme_command *cmd)
{
struct apple_nvme *anv = queue_to_apple_nvme(q);
@@ -590,7 +621,8 @@ static inline void apple_nvme_handle_cqe(struct apple_nvme_queue *q,
__u16 command_id = READ_ONCE(cqe->command_id);
struct request *req;
- apple_nvmmu_inval(q, command_id);
+ if (anv->hw->has_lsq_nvmmu)
+ apple_nvmmu_inval(q, command_id);
req = nvme_find_rq(apple_nvme_queue_tagset(anv, q), command_id);
if (unlikely(!req)) {
@@ -685,7 +717,7 @@ static int apple_nvme_create_cq(struct apple_nvme *anv)
c.create_cq.opcode = nvme_admin_create_cq;
c.create_cq.prp1 = cpu_to_le64(anv->ioq.cq_dma_addr);
c.create_cq.cqid = cpu_to_le16(1);
- c.create_cq.qsize = cpu_to_le16(APPLE_ANS_MAX_QUEUE_DEPTH - 1);
+ c.create_cq.qsize = cpu_to_le16(anv->hw->max_queue_depth - 1);
c.create_cq.cq_flags = cpu_to_le16(NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED);
c.create_cq.irq_vector = cpu_to_le16(0);
@@ -713,7 +745,7 @@ static int apple_nvme_create_sq(struct apple_nvme *anv)
c.create_sq.opcode = nvme_admin_create_sq;
c.create_sq.prp1 = cpu_to_le64(anv->ioq.sq_dma_addr);
c.create_sq.sqid = cpu_to_le16(1);
- c.create_sq.qsize = cpu_to_le16(APPLE_ANS_MAX_QUEUE_DEPTH - 1);
+ c.create_sq.qsize = cpu_to_le16(anv->hw->max_queue_depth - 1);
c.create_sq.sq_flags = cpu_to_le16(NVME_QUEUE_PHYS_CONTIG);
c.create_sq.cqid = cpu_to_le16(1);
@@ -765,7 +797,12 @@ static blk_status_t apple_nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
}
nvme_start_request(req);
- apple_nvme_submit_cmd(q, cmnd);
+
+ if (anv->hw->has_lsq_nvmmu)
+ apple_nvme_submit_cmd_t8103(q, cmnd);
+ else
+ apple_nvme_submit_cmd_t8015(q, cmnd);
+
return BLK_STS_OK;
out_free_cmd:
@@ -970,11 +1007,13 @@ static const struct blk_mq_ops apple_nvme_mq_ops = {
static void apple_nvme_init_queue(struct apple_nvme_queue *q)
{
unsigned int depth = apple_nvme_queue_depth(q);
+ struct apple_nvme *anv = queue_to_apple_nvme(q);
q->cq_head = 0;
q->cq_phase = 1;
- memset(q->tcbs, 0,
- APPLE_ANS_MAX_QUEUE_DEPTH * sizeof(struct apple_nvmmu_tcb));
+ if (anv->hw->has_lsq_nvmmu)
+ memset(q->tcbs, 0, anv->hw->max_queue_depth
+ * sizeof(struct apple_nvmmu_tcb));
memset(q->cqes, 0, depth * sizeof(struct nvme_completion));
WRITE_ONCE(q->enabled, true);
wmb(); /* ensure the first interrupt sees the initialization */
@@ -1069,49 +1108,55 @@ static void apple_nvme_reset_work(struct work_struct *work)
dma_set_max_seg_size(anv->dev, 0xffffffff);
- /*
- * Enable NVMMU and linear submission queues.
- * While we could keep those disabled and pretend this is slightly
- * more common NVMe controller we'd still need some quirks (e.g.
- * sq entries will be 128 bytes) and Apple might drop support for
- * that mode in the future.
- */
- writel(APPLE_ANS_LINEAR_SQ_EN,
- anv->mmio_nvme + APPLE_ANS_LINEAR_SQ_CTRL);
+ if (anv->hw->has_lsq_nvmmu) {
+ /*
+ * Enable NVMMU and linear submission queues which is required
+ * since T6000.
+ */
+ writel(APPLE_ANS_LINEAR_SQ_EN,
+ anv->mmio_nvme + APPLE_ANS_LINEAR_SQ_CTRL);
- /* Allow as many pending command as possible for both queues */
- writel(APPLE_ANS_MAX_QUEUE_DEPTH | (APPLE_ANS_MAX_QUEUE_DEPTH << 16),
- anv->mmio_nvme + APPLE_ANS_MAX_PEND_CMDS_CTRL);
+ /* Allow as many pending command as possible for both queues */
+ writel(anv->hw->max_queue_depth
+ | (anv->hw->max_queue_depth << 16), anv->mmio_nvme
+ + APPLE_ANS_MAX_PEND_CMDS_CTRL);
- /* Setup the NVMMU for the maximum admin and IO queue depth */
- writel(APPLE_ANS_MAX_QUEUE_DEPTH - 1,
- anv->mmio_nvme + APPLE_NVMMU_NUM_TCBS);
+ /* Setup the NVMMU for the maximum admin and IO queue depth */
+ writel(anv->hw->max_queue_depth - 1,
+ anv->mmio_nvme + APPLE_NVMMU_NUM_TCBS);
- /*
- * This is probably a chicken bit: without it all commands where any PRP
- * is set to zero (including those that don't use that field) fail and
- * the co-processor complains about "completed with err BAD_CMD-" or
- * a "NULL_PRP_PTR_ERR" in the syslog
- */
- writel(readl(anv->mmio_nvme + APPLE_ANS_UNKNOWN_CTRL) &
- ~APPLE_ANS_PRP_NULL_CHECK,
- anv->mmio_nvme + APPLE_ANS_UNKNOWN_CTRL);
+ /*
+ * This is probably a chicken bit: without it all commands
+ * where any PRP is set to zero (including those that don't use
+ * that field) fail and the co-processor complains about
+ * "completed with err BAD_CMD-" or a "NULL_PRP_PTR_ERR" in the
+ * syslog
+ */
+ writel(readl(anv->mmio_nvme + APPLE_ANS_UNKNOWN_CTRL) &
+ ~APPLE_ANS_PRP_NULL_CHECK,
+ anv->mmio_nvme + APPLE_ANS_UNKNOWN_CTRL);
+ }
/* Setup the admin queue */
- aqa = APPLE_NVME_AQ_DEPTH - 1;
+ if (anv->hw->has_lsq_nvmmu)
+ aqa = APPLE_NVME_AQ_DEPTH - 1;
+ else
+ aqa = anv->hw->max_queue_depth - 1;
aqa |= aqa << 16;
writel(aqa, anv->mmio_nvme + NVME_REG_AQA);
writeq(anv->adminq.sq_dma_addr, anv->mmio_nvme + NVME_REG_ASQ);
writeq(anv->adminq.cq_dma_addr, anv->mmio_nvme + NVME_REG_ACQ);
- /* Setup NVMMU for both queues */
- writeq(anv->adminq.tcb_dma_addr,
- anv->mmio_nvme + APPLE_NVMMU_ASQ_TCB_BASE);
- writeq(anv->ioq.tcb_dma_addr,
- anv->mmio_nvme + APPLE_NVMMU_IOSQ_TCB_BASE);
+ if (anv->hw->has_lsq_nvmmu) {
+ /* Setup NVMMU for both queues */
+ writeq(anv->adminq.tcb_dma_addr,
+ anv->mmio_nvme + APPLE_NVMMU_ASQ_TCB_BASE);
+ writeq(anv->ioq.tcb_dma_addr,
+ anv->mmio_nvme + APPLE_NVMMU_IOSQ_TCB_BASE);
+ }
anv->ctrl.sqsize =
- APPLE_ANS_MAX_QUEUE_DEPTH - 1; /* 0's based queue depth */
+ anv->hw->max_queue_depth - 1; /* 0's based queue depth */
anv->ctrl.cap = readq(anv->mmio_nvme + NVME_REG_CAP);
dev_dbg(anv->dev, "Enabling controller now");
@@ -1282,8 +1327,9 @@ static int apple_nvme_alloc_tagsets(struct apple_nvme *anv)
* both queues. The admin queue gets the first APPLE_NVME_AQ_DEPTH which
* must be marked as reserved in the IO queue.
*/
- anv->tagset.reserved_tags = APPLE_NVME_AQ_DEPTH;
- anv->tagset.queue_depth = APPLE_ANS_MAX_QUEUE_DEPTH - 1;
+ if (anv->hw->has_lsq_nvmmu)
+ anv->tagset.reserved_tags = APPLE_NVME_AQ_DEPTH;
+ anv->tagset.queue_depth = anv->hw->max_queue_depth - 1;
anv->tagset.timeout = NVME_IO_TIMEOUT;
anv->tagset.numa_node = NUMA_NO_NODE;
anv->tagset.cmd_size = sizeof(struct apple_nvme_iod);
@@ -1307,6 +1353,7 @@ static int apple_nvme_queue_alloc(struct apple_nvme *anv,
struct apple_nvme_queue *q)
{
unsigned int depth = apple_nvme_queue_depth(q);
+ size_t iosq_size;
q->cqes = dmam_alloc_coherent(anv->dev,
depth * sizeof(struct nvme_completion),
@@ -1314,22 +1361,28 @@ static int apple_nvme_queue_alloc(struct apple_nvme *anv,
if (!q->cqes)
return -ENOMEM;
- q->sqes = dmam_alloc_coherent(anv->dev,
- depth * sizeof(struct nvme_command),
+ if (anv->hw->has_lsq_nvmmu)
+ iosq_size = depth * sizeof(struct nvme_command);
+ else
+ iosq_size = depth << APPLE_NVME_IOSQES;
+
+ q->sqes = dmam_alloc_coherent(anv->dev, iosq_size,
&q->sq_dma_addr, GFP_KERNEL);
if (!q->sqes)
return -ENOMEM;
- /*
- * We need the maximum queue depth here because the NVMMU only has a
- * single depth configuration shared between both queues.
- */
- q->tcbs = dmam_alloc_coherent(anv->dev,
- APPLE_ANS_MAX_QUEUE_DEPTH *
- sizeof(struct apple_nvmmu_tcb),
- &q->tcb_dma_addr, GFP_KERNEL);
- if (!q->tcbs)
- return -ENOMEM;
+ if (anv->hw->has_lsq_nvmmu) {
+ /*
+ * We need the maximum queue depth here because the NVMMU only
+ * has a single depth configuration shared between both queues.
+ */
+ q->tcbs = dmam_alloc_coherent(anv->dev,
+ anv->hw->max_queue_depth *
+ sizeof(struct apple_nvmmu_tcb),
+ &q->tcb_dma_addr, GFP_KERNEL);
+ if (!q->tcbs)
+ return -ENOMEM;
+ }
/*
* initialize phase to make sure the allocated and empty memory
@@ -1413,6 +1466,12 @@ static struct apple_nvme *apple_nvme_alloc(struct platform_device *pdev)
anv->adminq.is_adminq = true;
platform_set_drvdata(pdev, anv);
+ anv->hw = of_device_get_match_data(&pdev->dev);
+ if (!anv->hw) {
+ ret = -ENODEV;
+ goto put_dev;
+ }
+
ret = apple_nvme_attach_genpd(anv);
if (ret < 0) {
dev_err_probe(dev, ret, "Failed to attach power domains");
@@ -1444,10 +1503,17 @@ static struct apple_nvme *apple_nvme_alloc(struct platform_device *pdev)
goto put_dev;
}
- anv->adminq.sq_db = anv->mmio_nvme + APPLE_ANS_LINEAR_ASQ_DB;
- anv->adminq.cq_db = anv->mmio_nvme + APPLE_ANS_ACQ_DB;
- anv->ioq.sq_db = anv->mmio_nvme + APPLE_ANS_LINEAR_IOSQ_DB;
- anv->ioq.cq_db = anv->mmio_nvme + APPLE_ANS_IOCQ_DB;
+ if (anv->hw->has_lsq_nvmmu) {
+ anv->adminq.sq_db = anv->mmio_nvme + APPLE_ANS_LINEAR_ASQ_DB;
+ anv->adminq.cq_db = anv->mmio_nvme + APPLE_ANS_ACQ_DB;
+ anv->ioq.sq_db = anv->mmio_nvme + APPLE_ANS_LINEAR_IOSQ_DB;
+ anv->ioq.cq_db = anv->mmio_nvme + APPLE_ANS_IOCQ_DB;
+ } else {
+ anv->adminq.sq_db = anv->mmio_nvme + NVME_REG_DBS;
+ anv->adminq.cq_db = anv->mmio_nvme + APPLE_ANS_ACQ_DB;
+ anv->ioq.sq_db = anv->mmio_nvme + NVME_REG_DBS + 8;
+ anv->ioq.cq_db = anv->mmio_nvme + APPLE_ANS_IOCQ_DB;
+ }
anv->sart = devm_apple_sart_get(dev);
if (IS_ERR(anv->sart)) {
@@ -1625,8 +1691,19 @@ static int apple_nvme_suspend(struct device *dev)
static DEFINE_SIMPLE_DEV_PM_OPS(apple_nvme_pm_ops, apple_nvme_suspend,
apple_nvme_resume);
+static const struct apple_nvme_hw apple_nvme_t8015_hw = {
+ .has_lsq_nvmmu = false,
+ .max_queue_depth = 16,
+};
+
+static const struct apple_nvme_hw apple_nvme_t8103_hw = {
+ .has_lsq_nvmmu = true,
+ .max_queue_depth = 64,
+};
+
static const struct of_device_id apple_nvme_of_match[] = {
- { .compatible = "apple,nvme-ans2" },
+ { .compatible = "apple,t8015-nvme-ans2", .data = &apple_nvme_t8015_hw },
+ { .compatible = "apple,nvme-ans2", .data = &apple_nvme_t8103_hw },
{},
};
MODULE_DEVICE_TABLE(of, apple_nvme_of_match);
diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c
index f6ddbe553289..012fcfc79a73 100644
--- a/drivers/nvme/host/auth.c
+++ b/drivers/nvme/host/auth.c
@@ -331,9 +331,10 @@ static int nvme_auth_set_dhchap_reply_data(struct nvme_ctrl *ctrl,
} else {
memset(chap->c2, 0, chap->hash_len);
}
- if (ctrl->opts->concat)
+ if (ctrl->opts->concat) {
chap->s2 = 0;
- else
+ chap->bi_directional = false;
+ } else
chap->s2 = nvme_auth_get_seqnum();
data->seqnum = cpu_to_le32(chap->s2);
if (chap->host_key_len) {
@@ -742,7 +743,7 @@ static int nvme_auth_secure_concat(struct nvme_ctrl *ctrl,
"%s: qid %d failed to generate digest, error %d\n",
__func__, chap->qid, ret);
goto out_free_psk;
- };
+ }
dev_dbg(ctrl->device, "%s: generated digest %s\n",
__func__, digest);
ret = nvme_auth_derive_tls_psk(chap->hash_id, psk, psk_len,
@@ -752,7 +753,7 @@ static int nvme_auth_secure_concat(struct nvme_ctrl *ctrl,
"%s: qid %d failed to derive TLS psk, error %d\n",
__func__, chap->qid, ret);
goto out_free_digest;
- };
+ }
tls_key = nvme_tls_psk_refresh(ctrl->opts->keyring,
ctrl->opts->host->nqn,
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 9d988f4cb87a..fa4181d7de73 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -903,6 +903,15 @@ static void nvme_set_ref_tag(struct nvme_ns *ns, struct nvme_command *cmnd,
u32 upper, lower;
u64 ref48;
+ /* only type1 and type 2 PI formats have a reftag */
+ switch (ns->head->pi_type) {
+ case NVME_NS_DPS_PI_TYPE1:
+ case NVME_NS_DPS_PI_TYPE2:
+ break;
+ default:
+ return;
+ }
+
/* both rw and write zeroes share the same reftag format */
switch (ns->head->guard_type) {
case NVME_NVM_NS_16B_GUARD:
@@ -942,13 +951,7 @@ static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns,
if (nvme_ns_has_pi(ns->head)) {
cmnd->write_zeroes.control |= cpu_to_le16(NVME_RW_PRINFO_PRACT);
-
- switch (ns->head->pi_type) {
- case NVME_NS_DPS_PI_TYPE1:
- case NVME_NS_DPS_PI_TYPE2:
- nvme_set_ref_tag(ns, cmnd, req);
- break;
- }
+ nvme_set_ref_tag(ns, cmnd, req);
}
return BLK_STS_OK;
@@ -1039,6 +1042,7 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
if (WARN_ON_ONCE(!nvme_ns_has_pi(ns->head)))
return BLK_STS_NOTSUPP;
control |= NVME_RW_PRINFO_PRACT;
+ nvme_set_ref_tag(ns, cmnd, req);
}
if (bio_integrity_flagged(req->bio, BIP_CHECK_GUARD))
@@ -1803,12 +1807,12 @@ static void nvme_release(struct gendisk *disk)
nvme_ns_release(disk->private_data);
}
-int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+int nvme_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
/* some standard values */
geo->heads = 1 << 6;
geo->sectors = 1 << 5;
- geo->cylinders = get_capacity(bdev->bd_disk) >> 11;
+ geo->cylinders = get_capacity(disk) >> 11;
return 0;
}
@@ -3158,6 +3162,16 @@ static inline bool nvme_discovery_ctrl(struct nvme_ctrl *ctrl)
return ctrl->opts && ctrl->opts->discovery_nqn;
}
+static inline bool nvme_admin_ctrl(struct nvme_ctrl *ctrl)
+{
+ return ctrl->cntrltype == NVME_CTRL_ADMIN;
+}
+
+static inline bool nvme_is_io_ctrl(struct nvme_ctrl *ctrl)
+{
+ return !nvme_discovery_ctrl(ctrl) && !nvme_admin_ctrl(ctrl);
+}
+
static bool nvme_validate_cntlid(struct nvme_subsystem *subsys,
struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
{
@@ -3360,7 +3374,7 @@ static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl)
else
ctrl->max_zeroes_sectors = 0;
- if (ctrl->subsys->subtype != NVME_NQN_NVME ||
+ if (!nvme_is_io_ctrl(ctrl) ||
!nvme_id_cns_ok(ctrl, NVME_ID_CNS_CS_CTRL) ||
test_bit(NVME_CTRL_SKIP_ID_CNS_CS, &ctrl->flags))
return 0;
@@ -3482,14 +3496,14 @@ static int nvme_check_ctrl_fabric_info(struct nvme_ctrl *ctrl, struct nvme_id_ct
return -EINVAL;
}
- if (!nvme_discovery_ctrl(ctrl) && ctrl->ioccsz < 4) {
+ if (nvme_is_io_ctrl(ctrl) && ctrl->ioccsz < 4) {
dev_err(ctrl->device,
"I/O queue command capsule supported size %d < 4\n",
ctrl->ioccsz);
return -EINVAL;
}
- if (!nvme_discovery_ctrl(ctrl) && ctrl->iorcsz < 1) {
+ if (nvme_is_io_ctrl(ctrl) && ctrl->iorcsz < 1) {
dev_err(ctrl->device,
"I/O queue response capsule supported size %d < 1\n",
ctrl->iorcsz);
@@ -3670,6 +3684,17 @@ int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl, bool was_suspended)
if (ret)
return ret;
+ if (nvme_admin_ctrl(ctrl)) {
+ /*
+ * An admin controller has one admin queue, but no I/O queues.
+ * Override queue_count so it only creates an admin queue.
+ */
+ dev_dbg(ctrl->device,
+ "Subsystem %s is an administrative controller",
+ ctrl->subsys->subnqn);
+ ctrl->queue_count = 1;
+ }
+
ret = nvme_configure_apst(ctrl);
if (ret < 0)
return ret;
@@ -4970,8 +4995,14 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl)
* checking that they started once before, hence are reconnecting back.
*/
if (test_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags) &&
- nvme_discovery_ctrl(ctrl))
+ nvme_discovery_ctrl(ctrl)) {
+ if (!ctrl->kato) {
+ nvme_stop_keep_alive(ctrl);
+ ctrl->kato = NVME_DEFAULT_KATO;
+ nvme_start_keep_alive(ctrl);
+ }
nvme_change_uevent(ctrl, "NVME_EVENT=rediscover");
+ }
if (ctrl->queue_count > 1) {
nvme_queue_scan(ctrl);
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 08a5ea3e9383..03987f497a5b 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -1363,7 +1363,7 @@ nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
* down, and the related FC-NVME Association ID and Connection IDs
* become invalid.
*
- * The behavior of the fc-nvme initiator is such that it's
+ * The behavior of the fc-nvme initiator is such that its
* understanding of the association and connections will implicitly
* be torn down. The action is implicit as it may be due to a loss of
* connectivity with the fc-nvme target, so you may never get a
@@ -2777,7 +2777,7 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
* as WRITE ZEROES will return a non-zero rq payload_bytes yet
* there is no actual payload to be transferred.
* To get it right, key data transmission on there being 1 or
- * more physical segments in the sg list. If there is no
+ * more physical segments in the sg list. If there are no
* physical segments, there is no payload.
*/
if (blk_rq_nr_phys_segments(rq)) {
@@ -3032,11 +3032,17 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
++ctrl->ctrl.nr_reconnects;
- if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
+ spin_lock_irqsave(&ctrl->rport->lock, flags);
+ if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE) {
+ spin_unlock_irqrestore(&ctrl->rport->lock, flags);
return -ENODEV;
+ }
- if (nvme_fc_ctlr_active_on_rport(ctrl))
+ if (nvme_fc_ctlr_active_on_rport(ctrl)) {
+ spin_unlock_irqrestore(&ctrl->rport->lock, flags);
return -ENOTUNIQ;
+ }
+ spin_unlock_irqrestore(&ctrl->rport->lock, flags);
dev_info(ctrl->ctrl.device,
"NVME-FC{%d}: create association : host wwpn 0x%016llx "
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index 6b3ac8ae3f34..c212fa952c0f 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -142,14 +142,9 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
ret = blk_rq_map_user_io(req, NULL, nvme_to_user_ptr(ubuffer),
bufflen, GFP_KERNEL, flags & NVME_IOCTL_VEC, 0,
0, rq_data_dir(req));
-
if (ret)
return ret;
- bio = req->bio;
- if (bdev)
- bio_set_dev(bio, bdev);
-
if (has_metadata) {
ret = blk_rq_integrity_map_user(req, meta_buffer, meta_len);
if (ret)
@@ -410,7 +405,7 @@ static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd,
if (pdu->bio)
blk_rq_unmap_user(pdu->bio);
- io_uring_cmd_done(ioucmd, pdu->status, pdu->result, issue_flags);
+ io_uring_cmd_done32(ioucmd, pdu->status, pdu->result, issue_flags);
}
static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index cfd2b5b90b91..102fae6a231c 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -936,7 +936,7 @@ int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd,
unsigned int issue_flags);
int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
struct nvme_id_ns **id);
-int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo);
+int nvme_getgeo(struct gendisk *disk, struct hd_geometry *geo);
int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags);
extern const struct attribute_group *nvme_ns_attr_groups[];
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 071efec25346..c916176bd9f0 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -172,9 +172,7 @@ struct nvme_dev {
u32 last_ps;
bool hmb;
struct sg_table *hmb_sgt;
-
mempool_t *dmavec_mempool;
- mempool_t *iod_meta_mempool;
/* shadow doorbell buffer support: */
__le32 *dbbuf_dbs;
@@ -261,6 +259,9 @@ enum nvme_iod_flags {
/* single segment dma mapping */
IOD_SINGLE_SEGMENT = 1U << 2,
+
+ /* Metadata using non-coalesced MPTR */
+ IOD_SINGLE_META_SEGMENT = 1U << 5,
};
struct nvme_dma_vec {
@@ -284,7 +285,8 @@ struct nvme_iod {
unsigned int nr_dma_vecs;
dma_addr_t meta_dma;
- struct sg_table meta_sgt;
+ unsigned int meta_total_len;
+ struct dma_iova_state meta_dma_state;
struct nvme_sgl_desc *meta_descriptor;
};
@@ -641,6 +643,11 @@ static inline struct dma_pool *nvme_dma_pool(struct nvme_queue *nvmeq,
return nvmeq->descriptor_pools.large;
}
+static inline bool nvme_pci_cmd_use_meta_sgl(struct nvme_command *cmd)
+{
+ return (cmd->common.flags & NVME_CMD_SGL_ALL) == NVME_CMD_SGL_METASEG;
+}
+
static inline bool nvme_pci_cmd_use_sgl(struct nvme_command *cmd)
{
return cmd->common.flags &
@@ -690,25 +697,52 @@ static void nvme_free_prps(struct request *req)
mempool_free(iod->dma_vecs, nvmeq->dev->dmavec_mempool);
}
-static void nvme_free_sgls(struct request *req)
+static void nvme_free_sgls(struct request *req, struct nvme_sgl_desc *sge,
+ struct nvme_sgl_desc *sg_list)
{
- struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
+ enum dma_data_direction dir = rq_dma_dir(req);
+ unsigned int len = le32_to_cpu(sge->length);
struct device *dma_dev = nvmeq->dev->dev;
- dma_addr_t sqe_dma_addr = le64_to_cpu(iod->cmd.common.dptr.sgl.addr);
- unsigned int sqe_dma_len = le32_to_cpu(iod->cmd.common.dptr.sgl.length);
- struct nvme_sgl_desc *sg_list = iod->descriptors[0];
+ unsigned int i;
+
+ if (sge->type == (NVME_SGL_FMT_DATA_DESC << 4)) {
+ dma_unmap_page(dma_dev, le64_to_cpu(sge->addr), len, dir);
+ return;
+ }
+
+ for (i = 0; i < len / sizeof(*sg_list); i++)
+ dma_unmap_page(dma_dev, le64_to_cpu(sg_list[i].addr),
+ le32_to_cpu(sg_list[i].length), dir);
+}
+
+static void nvme_unmap_metadata(struct request *req)
+{
+ struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
enum dma_data_direction dir = rq_dma_dir(req);
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ struct device *dma_dev = nvmeq->dev->dev;
+ struct nvme_sgl_desc *sge = iod->meta_descriptor;
- if (iod->nr_descriptors) {
- unsigned int nr_entries = sqe_dma_len / sizeof(*sg_list), i;
+ if (iod->flags & IOD_SINGLE_META_SEGMENT) {
+ dma_unmap_page(dma_dev, iod->meta_dma,
+ rq_integrity_vec(req).bv_len,
+ rq_dma_dir(req));
+ return;
+ }
- for (i = 0; i < nr_entries; i++)
- dma_unmap_page(dma_dev, le64_to_cpu(sg_list[i].addr),
- le32_to_cpu(sg_list[i].length), dir);
- } else {
- dma_unmap_page(dma_dev, sqe_dma_addr, sqe_dma_len, dir);
+ if (!blk_rq_integrity_dma_unmap(req, dma_dev, &iod->meta_dma_state,
+ iod->meta_total_len)) {
+ if (nvme_pci_cmd_use_meta_sgl(&iod->cmd))
+ nvme_free_sgls(req, sge, &sge[1]);
+ else
+ dma_unmap_page(dma_dev, iod->meta_dma,
+ iod->meta_total_len, dir);
}
+
+ if (iod->meta_descriptor)
+ dma_pool_free(nvmeq->descriptor_pools.small,
+ iod->meta_descriptor, iod->meta_dma);
}
static void nvme_unmap_data(struct request *req)
@@ -727,7 +761,8 @@ static void nvme_unmap_data(struct request *req)
if (!blk_rq_dma_unmap(req, dma_dev, &iod->dma_state, iod->total_len)) {
if (nvme_pci_cmd_use_sgl(&iod->cmd))
- nvme_free_sgls(req);
+ nvme_free_sgls(req, iod->descriptors[0],
+ &iod->cmd.common.dptr.sgl);
else
nvme_free_prps(req);
}
@@ -935,7 +970,7 @@ static blk_status_t nvme_pci_setup_data_sgl(struct request *req,
nvme_pci_sgl_set_seg(&iod->cmd.common.dptr.sgl, sgl_dma, mapped);
if (unlikely(iter->status))
- nvme_free_sgls(req);
+ nvme_unmap_data(req);
return iter->status;
}
@@ -1007,70 +1042,70 @@ static blk_status_t nvme_map_data(struct request *req)
return nvme_pci_setup_data_prp(req, &iter);
}
-static void nvme_pci_sgl_set_data_sg(struct nvme_sgl_desc *sge,
- struct scatterlist *sg)
-{
- sge->addr = cpu_to_le64(sg_dma_address(sg));
- sge->length = cpu_to_le32(sg_dma_len(sg));
- sge->type = NVME_SGL_FMT_DATA_DESC << 4;
-}
-
static blk_status_t nvme_pci_setup_meta_sgls(struct request *req)
{
struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
- struct nvme_dev *dev = nvmeq->dev;
+ unsigned int entries = req->nr_integrity_segments;
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ struct nvme_dev *dev = nvmeq->dev;
struct nvme_sgl_desc *sg_list;
- struct scatterlist *sgl, *sg;
- unsigned int entries;
+ struct blk_dma_iter iter;
dma_addr_t sgl_dma;
- int rc, i;
+ int i = 0;
- iod->meta_sgt.sgl = mempool_alloc(dev->iod_meta_mempool, GFP_ATOMIC);
- if (!iod->meta_sgt.sgl)
- return BLK_STS_RESOURCE;
+ if (!blk_rq_integrity_dma_map_iter_start(req, dev->dev,
+ &iod->meta_dma_state, &iter))
+ return iter.status;
- sg_init_table(iod->meta_sgt.sgl, req->nr_integrity_segments);
- iod->meta_sgt.orig_nents = blk_rq_map_integrity_sg(req,
- iod->meta_sgt.sgl);
- if (!iod->meta_sgt.orig_nents)
- goto out_free_sg;
+ if (blk_rq_dma_map_coalesce(&iod->meta_dma_state))
+ entries = 1;
- rc = dma_map_sgtable(dev->dev, &iod->meta_sgt, rq_dma_dir(req),
- DMA_ATTR_NO_WARN);
- if (rc)
- goto out_free_sg;
+ /*
+ * The NVMe MPTR descriptor has an implicit length that the host and
+ * device must agree on to avoid data/memory corruption. We trust the
+ * kernel allocated correctly based on the format's parameters, so use
+ * the more efficient MPTR to avoid extra dma pool allocations for the
+ * SGL indirection.
+ *
+ * But for user commands, we don't necessarily know what they do, so
+ * the driver can't validate the metadata buffer size. The SGL
+ * descriptor provides an explicit length, so we're relying on that
+ * mechanism to catch any misunderstandings between the application and
+ * device.
+ */
+ if (entries == 1 && !(nvme_req(req)->flags & NVME_REQ_USERCMD)) {
+ iod->cmd.common.metadata = cpu_to_le64(iter.addr);
+ iod->meta_total_len = iter.len;
+ iod->meta_dma = iter.addr;
+ iod->meta_descriptor = NULL;
+ return BLK_STS_OK;
+ }
sg_list = dma_pool_alloc(nvmeq->descriptor_pools.small, GFP_ATOMIC,
&sgl_dma);
if (!sg_list)
- goto out_unmap_sg;
+ return BLK_STS_RESOURCE;
- entries = iod->meta_sgt.nents;
iod->meta_descriptor = sg_list;
iod->meta_dma = sgl_dma;
-
iod->cmd.common.flags = NVME_CMD_SGL_METASEG;
iod->cmd.common.metadata = cpu_to_le64(sgl_dma);
-
- sgl = iod->meta_sgt.sgl;
if (entries == 1) {
- nvme_pci_sgl_set_data_sg(sg_list, sgl);
+ iod->meta_total_len = iter.len;
+ nvme_pci_sgl_set_data(sg_list, &iter);
return BLK_STS_OK;
}
sgl_dma += sizeof(*sg_list);
- nvme_pci_sgl_set_seg(sg_list, sgl_dma, entries);
- for_each_sg(sgl, sg, entries, i)
- nvme_pci_sgl_set_data_sg(&sg_list[i + 1], sg);
-
- return BLK_STS_OK;
+ do {
+ nvme_pci_sgl_set_data(&sg_list[++i], &iter);
+ iod->meta_total_len += iter.len;
+ } while (blk_rq_integrity_dma_map_iter_next(req, dev->dev, &iter));
-out_unmap_sg:
- dma_unmap_sgtable(dev->dev, &iod->meta_sgt, rq_dma_dir(req), 0);
-out_free_sg:
- mempool_free(iod->meta_sgt.sgl, dev->iod_meta_mempool);
- return BLK_STS_RESOURCE;
+ nvme_pci_sgl_set_seg(sg_list, sgl_dma, i);
+ if (unlikely(iter.status))
+ nvme_unmap_metadata(req);
+ return iter.status;
}
static blk_status_t nvme_pci_setup_meta_mptr(struct request *req)
@@ -1083,6 +1118,7 @@ static blk_status_t nvme_pci_setup_meta_mptr(struct request *req)
if (dma_mapping_error(nvmeq->dev->dev, iod->meta_dma))
return BLK_STS_IOERR;
iod->cmd.common.metadata = cpu_to_le64(iod->meta_dma);
+ iod->flags |= IOD_SINGLE_META_SEGMENT;
return BLK_STS_OK;
}
@@ -1104,7 +1140,7 @@ static blk_status_t nvme_prep_rq(struct request *req)
iod->flags = 0;
iod->nr_descriptors = 0;
iod->total_len = 0;
- iod->meta_sgt.nents = 0;
+ iod->meta_total_len = 0;
ret = nvme_setup_cmd(req->q->queuedata, req);
if (ret)
@@ -1215,25 +1251,6 @@ static void nvme_queue_rqs(struct rq_list *rqlist)
*rqlist = requeue_list;
}
-static __always_inline void nvme_unmap_metadata(struct request *req)
-{
- struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
- struct nvme_dev *dev = nvmeq->dev;
-
- if (!iod->meta_sgt.nents) {
- dma_unmap_page(dev->dev, iod->meta_dma,
- rq_integrity_vec(req).bv_len,
- rq_dma_dir(req));
- return;
- }
-
- dma_pool_free(nvmeq->descriptor_pools.small, iod->meta_descriptor,
- iod->meta_dma);
- dma_unmap_sgtable(dev->dev, &iod->meta_sgt, rq_dma_dir(req), 0);
- mempool_free(iod->meta_sgt.sgl, dev->iod_meta_mempool);
-}
-
static __always_inline void nvme_pci_unmap_rq(struct request *req)
{
if (blk_integrity_rq(req))
@@ -3039,7 +3056,6 @@ static int nvme_disable_prepare_reset(struct nvme_dev *dev, bool shutdown)
static int nvme_pci_alloc_iod_mempool(struct nvme_dev *dev)
{
- size_t meta_size = sizeof(struct scatterlist) * (NVME_MAX_META_SEGS + 1);
size_t alloc_size = sizeof(struct nvme_dma_vec) * NVME_MAX_SEGS;
dev->dmavec_mempool = mempool_create_node(1,
@@ -3048,17 +3064,7 @@ static int nvme_pci_alloc_iod_mempool(struct nvme_dev *dev)
dev_to_node(dev->dev));
if (!dev->dmavec_mempool)
return -ENOMEM;
-
- dev->iod_meta_mempool = mempool_create_node(1,
- mempool_kmalloc, mempool_kfree,
- (void *)meta_size, GFP_KERNEL,
- dev_to_node(dev->dev));
- if (!dev->iod_meta_mempool)
- goto free;
return 0;
-free:
- mempool_destroy(dev->dmavec_mempool);
- return -ENOMEM;
}
static void nvme_free_tagset(struct nvme_dev *dev)
@@ -3324,10 +3330,12 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
* Exclude Samsung 990 Evo from NVME_QUIRK_SIMPLE_SUSPEND
* because of high power consumption (> 2 Watt) in s2idle
* sleep. Only some boards with Intel CPU are affected.
+ * (Note for testing: Samsung 990 Evo Plus has same PCI ID)
*/
if (dmi_match(DMI_BOARD_NAME, "DN50Z-140HC-YD") ||
dmi_match(DMI_BOARD_NAME, "GMxPXxx") ||
dmi_match(DMI_BOARD_NAME, "GXxMRXx") ||
+ dmi_match(DMI_BOARD_NAME, "NS5X_NS7XAU") ||
dmi_match(DMI_BOARD_NAME, "PH4PG31") ||
dmi_match(DMI_BOARD_NAME, "PH4PRX1_PH6PRX1") ||
dmi_match(DMI_BOARD_NAME, "PH6PG01_PH6PG71"))
@@ -3508,7 +3516,6 @@ out_disable:
nvme_free_queues(dev, 0);
out_release_iod_mempool:
mempool_destroy(dev->dmavec_mempool);
- mempool_destroy(dev->iod_meta_mempool);
out_dev_unmap:
nvme_dev_unmap(dev);
out_uninit_ctrl:
@@ -3572,7 +3579,6 @@ static void nvme_remove(struct pci_dev *pdev)
nvme_dbbuf_dma_free(dev);
nvme_free_queues(dev, 0);
mempool_destroy(dev->dmavec_mempool);
- mempool_destroy(dev->iod_meta_mempool);
nvme_release_descriptor_pools(dev);
nvme_dev_unmap(dev);
nvme_uninit_ctrl(&dev->ctrl);
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 9233f088fac8..1413788ca7d5 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -2179,7 +2179,7 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
/*
* Only start IO queues for which we have allocated the tagset
- * and limitted it to the available queues. On reconnects, the
+ * and limited it to the available queues. On reconnects, the
* queue number might have changed.
*/
nr_queues = min(ctrl->tagset->nr_hw_queues + 1, ctrl->queue_count);
@@ -2250,6 +2250,9 @@ static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
if (error)
goto out_cleanup_tagset;
+ if (ctrl->opts->concat && !ctrl->tls_pskid)
+ return 0;
+
error = nvme_enable_ctrl(ctrl);
if (error)
goto out_stop_queue;
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 884286f90688..5d7d483bfbe3 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -513,9 +513,6 @@ static int nvmet_p2pmem_ns_enable(struct nvmet_ns *ns)
return 0;
}
-/*
- * Note: ctrl->subsys->lock should be held when calling this function
- */
static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl,
struct nvmet_ns *ns)
{
@@ -523,6 +520,8 @@ static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl,
struct pci_dev *p2p_dev;
int ret;
+ lockdep_assert_held(&ctrl->subsys->lock);
+
if (!ctrl->p2p_client || !ns->use_p2pmem)
return;
@@ -1539,15 +1538,14 @@ bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn)
return false;
}
-/*
- * Note: ctrl->subsys->lock should be held when calling this function
- */
static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl,
struct device *p2p_client)
{
struct nvmet_ns *ns;
unsigned long idx;
+ lockdep_assert_held(&ctrl->subsys->lock);
+
if (!p2p_client)
return;
@@ -1557,14 +1555,13 @@ static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl,
nvmet_p2pmem_ns_add_p2p(ctrl, ns);
}
-/*
- * Note: ctrl->subsys->lock should be held when calling this function
- */
static void nvmet_release_p2p_ns_map(struct nvmet_ctrl *ctrl)
{
struct radix_tree_iter iter;
void __rcu **slot;
+ lockdep_assert_held(&ctrl->subsys->lock);
+
radix_tree_for_each_slot(slot, &ctrl->p2p_ns_map, &iter, 0)
pci_dev_put(radix_tree_deref_slot(slot));
@@ -1960,24 +1957,24 @@ static int __init nvmet_init(void)
if (!nvmet_wq)
goto out_free_buffered_work_queue;
- error = nvmet_init_discovery();
+ error = nvmet_init_debugfs();
if (error)
goto out_free_nvmet_work_queue;
- error = nvmet_init_debugfs();
+ error = nvmet_init_discovery();
if (error)
- goto out_exit_discovery;
+ goto out_exit_debugfs;
error = nvmet_init_configfs();
if (error)
- goto out_exit_debugfs;
+ goto out_exit_discovery;
return 0;
-out_exit_debugfs:
- nvmet_exit_debugfs();
out_exit_discovery:
nvmet_exit_discovery();
+out_exit_debugfs:
+ nvmet_exit_debugfs();
out_free_nvmet_work_queue:
destroy_workqueue(nvmet_wq);
out_free_buffered_work_queue:
@@ -1992,8 +1989,8 @@ out_destroy_bvec_cache:
static void __exit nvmet_exit(void)
{
nvmet_exit_configfs();
- nvmet_exit_debugfs();
nvmet_exit_discovery();
+ nvmet_exit_debugfs();
ida_destroy(&cntlid_ida);
destroy_workqueue(nvmet_wq);
destroy_workqueue(buffered_io_wq);
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 25598a46bf0d..7d84527d5a43 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -54,6 +54,8 @@ struct nvmet_fc_ls_req_op { /* for an LS RQST XMT */
int ls_error;
struct list_head lsreq_list; /* tgtport->ls_req_list */
bool req_queued;
+
+ struct work_struct put_work;
};
@@ -111,8 +113,6 @@ struct nvmet_fc_tgtport {
struct nvmet_fc_port_entry *pe;
struct kref ref;
u32 max_sg_cnt;
-
- struct work_struct put_work;
};
struct nvmet_fc_port_entry {
@@ -235,12 +235,13 @@ static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
-static void nvmet_fc_put_tgtport_work(struct work_struct *work)
+static void nvmet_fc_put_lsop_work(struct work_struct *work)
{
- struct nvmet_fc_tgtport *tgtport =
- container_of(work, struct nvmet_fc_tgtport, put_work);
+ struct nvmet_fc_ls_req_op *lsop =
+ container_of(work, struct nvmet_fc_ls_req_op, put_work);
- nvmet_fc_tgtport_put(tgtport);
+ nvmet_fc_tgtport_put(lsop->tgtport);
+ kfree(lsop);
}
static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
@@ -367,7 +368,7 @@ __nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop)
DMA_BIDIRECTIONAL);
out_putwork:
- queue_work(nvmet_wq, &tgtport->put_work);
+ queue_work(nvmet_wq, &lsop->put_work);
}
static int
@@ -388,6 +389,7 @@ __nvmet_fc_send_ls_req(struct nvmet_fc_tgtport *tgtport,
lsreq->done = done;
lsop->req_queued = false;
INIT_LIST_HEAD(&lsop->lsreq_list);
+ INIT_WORK(&lsop->put_work, nvmet_fc_put_lsop_work);
lsreq->rqstdma = fc_dma_map_single(tgtport->dev, lsreq->rqstaddr,
lsreq->rqstlen + lsreq->rsplen,
@@ -447,8 +449,6 @@ nvmet_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
__nvmet_fc_finish_ls_req(lsop);
/* fc-nvme target doesn't care about success or failure of cmd */
-
- kfree(lsop);
}
/*
@@ -459,7 +459,7 @@ nvmet_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
* down, and the related FC-NVME Association ID and Connection IDs
* become invalid.
*
- * The behavior of the fc-nvme target is such that it's
+ * The behavior of the fc-nvme target is such that its
* understanding of the association and connections will implicitly
* be torn down. The action is implicit as it may be due to a loss of
* connectivity with the fc-nvme host, so the target may never get a
@@ -1075,6 +1075,14 @@ nvmet_fc_delete_assoc_work(struct work_struct *work)
static void
nvmet_fc_schedule_delete_assoc(struct nvmet_fc_tgt_assoc *assoc)
{
+ int terminating;
+
+ terminating = atomic_xchg(&assoc->terminating, 1);
+
+ /* if already terminating, do nothing */
+ if (terminating)
+ return;
+
nvmet_fc_tgtport_get(assoc->tgtport);
if (!queue_work(nvmet_wq, &assoc->del_work))
nvmet_fc_tgtport_put(assoc->tgtport);
@@ -1202,13 +1210,7 @@ nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
{
struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
unsigned long flags;
- int i, terminating;
-
- terminating = atomic_xchg(&assoc->terminating, 1);
-
- /* if already terminating, do nothing */
- if (terminating)
- return;
+ int i;
spin_lock_irqsave(&tgtport->lock, flags);
list_del_rcu(&assoc->a_list);
@@ -1410,7 +1412,6 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
kref_init(&newrec->ref);
ida_init(&newrec->assoc_cnt);
newrec->max_sg_cnt = template->max_sgl_segments;
- INIT_WORK(&newrec->put_work, nvmet_fc_put_tgtport_work);
ret = nvmet_fc_alloc_ls_iodlist(newrec);
if (ret) {
@@ -2313,7 +2314,7 @@ nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
if (ret) {
/*
- * should be ok to set w/o lock as its in the thread of
+ * should be ok to set w/o lock as it's in the thread of
* execution (not an async timer routine) and doesn't
* contend with any clearing action
*/
@@ -2629,7 +2630,7 @@ transport_error:
* and the api of the FC LLDD which may issue a hw command to send the
* response, but the LLDD may not get the hw completion for that command
* and upcall the nvmet_fc layer before a new command may be
- * asynchronously received - its possible for a command to be received
+ * asynchronously received - it's possible for a command to be received
* before the LLDD and nvmet_fc have recycled the job structure. It gives
* the appearance of more commands received than fits in the sq.
* To alleviate this scenario, a temporary queue is maintained in the
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index 257b497d515a..5dffcc5becae 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -496,13 +496,15 @@ fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport,
if (!targetport) {
/*
* The target port is gone. The target doesn't expect any
- * response anymore and the ->done call is not valid
- * because the resources have been freed by
- * nvmet_fc_free_pending_reqs.
+ * response anymore and thus lsreq can't be accessed anymore.
*
* We end up here from delete association exchange:
* nvmet_fc_xmt_disconnect_assoc sends an async request.
+ *
+ * Return success because this is what LLDDs do; silently
+ * drop the response.
*/
+ lsrsp->done(lsrsp);
kmem_cache_free(lsreq_cache, tls_req);
return 0;
}
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index 3b4b0df8f879..0c361b1e3566 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -533,6 +533,8 @@ u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
case NVME_FEAT_HOST_ID:
req->execute = nvmet_execute_get_features;
return NVME_SC_SUCCESS;
+ case NVME_FEAT_FDP:
+ return nvmet_setup_passthru_command(req);
default:
return nvmet_passthru_get_set_features(req);
}
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 67f61c67c167..0485e25ab797 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -1731,7 +1731,7 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
* We registered an ib_client to handle device removal for queues,
* so we only need to handle the listening port cm_ids. In this case
* we nullify the priv to prevent double cm_id destruction and destroying
- * the cm_id implicitely by returning a non-zero rc to the callout.
+ * the cm_id implicitly by returning a non-zero rc to the callout.
*/
static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id,
struct nvmet_rdma_queue *queue)
@@ -1742,7 +1742,7 @@ static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id,
/*
* This is a queue cm_id. we have registered
* an ib_client to handle queues removal
- * so don't interfear and just return.
+ * so don't interfere and just return.
*/
return 0;
}
@@ -1760,7 +1760,7 @@ static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id,
/*
* We need to return 1 so that the core will destroy
- * it's own ID. What a great API design..
+ * its own ID. What a great API design..
*/
return 1;
}
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index edd811444ce5..e0d88d3199c1 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -28,6 +28,17 @@ source "drivers/nvmem/layouts/Kconfig"
# Devices
+config NVMEM_AN8855_EFUSE
+ tristate "Airoha AN8855 eFuse support"
+ depends on MFD_AIROHA_AN8855 || COMPILE_TEST
+ help
+ Say y here to enable support for reading eFuses on Airoha AN8855
+ Switch. These are e.g. used to store factory programmed
+ calibration data required for the PHY.
+
+ This driver can also be built as a module. If so, the module will
+ be called nvmem-an8855-efuse.
+
config NVMEM_APPLE_EFUSES
tristate "Apple eFuse support"
depends on ARCH_APPLE || COMPILE_TEST
@@ -240,6 +251,16 @@ config NVMEM_NINTENDO_OTP
This driver can also be built as a module. If so, the module
will be called nvmem-nintendo-otp.
+config NVMEM_S32G_OCOTP
+ tristate "S32G SoC OCOTP support"
+ depends on ARCH_S32
+ help
+ This is a driver for the 'OCOTP' peripheral available on S32G
+ platforms.
+
+ If you say Y here, you will get support for the One Time
+ Programmable memory pages.
+
config NVMEM_QCOM_QFPROM
tristate "QCOM QFPROM Support"
depends on ARCH_QCOM || COMPILE_TEST
diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile
index 2021d59688db..70a4464dcb1e 100644
--- a/drivers/nvmem/Makefile
+++ b/drivers/nvmem/Makefile
@@ -10,6 +10,8 @@ nvmem_layouts-y := layouts.o
obj-y += layouts/
# Devices
+obj-$(CONFIG_NVMEM_AN8855_EFUSE) += nvmem-an8855-efuse.o
+nvmem-an8855-efuse-y := an8855-efuse.o
obj-$(CONFIG_NVMEM_APPLE_EFUSES) += nvmem-apple-efuses.o
nvmem-apple-efuses-y := apple-efuses.o
obj-$(CONFIG_NVMEM_APPLE_SPMI) += apple_nvmem_spmi.o
@@ -79,6 +81,8 @@ obj-$(CONFIG_NVMEM_SUNPLUS_OCOTP) += nvmem_sunplus_ocotp.o
nvmem_sunplus_ocotp-y := sunplus-ocotp.o
obj-$(CONFIG_NVMEM_SUNXI_SID) += nvmem_sunxi_sid.o
nvmem_sunxi_sid-y := sunxi_sid.o
+obj-$(CONFIG_NVMEM_S32G_OCOTP) += nvmem-s32g-ocotp-nvmem.o
+nvmem-s32g-ocotp-nvmem-y := s32g-ocotp-nvmem.o
obj-$(CONFIG_NVMEM_U_BOOT_ENV) += nvmem_u-boot-env.o
nvmem_u-boot-env-y := u-boot-env.o
obj-$(CONFIG_NVMEM_UNIPHIER_EFUSE) += nvmem-uniphier-efuse.o
diff --git a/drivers/nvmem/an8855-efuse.c b/drivers/nvmem/an8855-efuse.c
new file mode 100644
index 000000000000..d1afde6f623f
--- /dev/null
+++ b/drivers/nvmem/an8855-efuse.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Airoha AN8855 Switch EFUSE Driver
+ */
+
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define AN8855_EFUSE_CELL 50
+
+#define AN8855_EFUSE_DATA0 0x1000a500
+#define AN8855_EFUSE_R50O GENMASK(30, 24)
+
+static int an8855_efuse_read(void *context, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct regmap *regmap = context;
+
+ return regmap_bulk_read(regmap, AN8855_EFUSE_DATA0 + offset,
+ val, bytes / sizeof(u32));
+}
+
+static int an8855_efuse_probe(struct platform_device *pdev)
+{
+ struct nvmem_config an8855_nvmem_config = {
+ .name = "an8855-efuse",
+ .size = AN8855_EFUSE_CELL * sizeof(u32),
+ .stride = sizeof(u32),
+ .word_size = sizeof(u32),
+ .reg_read = an8855_efuse_read,
+ };
+ struct device *dev = &pdev->dev;
+ struct nvmem_device *nvmem;
+ struct regmap *regmap;
+
+ /* Assign NVMEM priv to MFD regmap */
+ regmap = dev_get_regmap(dev->parent, NULL);
+ if (!regmap)
+ return -ENOENT;
+
+ an8855_nvmem_config.priv = regmap;
+ an8855_nvmem_config.dev = dev;
+ nvmem = devm_nvmem_register(dev, &an8855_nvmem_config);
+
+ return PTR_ERR_OR_ZERO(nvmem);
+}
+
+static const struct of_device_id an8855_efuse_of_match[] = {
+ { .compatible = "airoha,an8855-efuse", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, an8855_efuse_of_match);
+
+static struct platform_driver an8855_efuse_driver = {
+ .probe = an8855_efuse_probe,
+ .driver = {
+ .name = "an8855-efuse",
+ .of_match_table = an8855_efuse_of_match,
+ },
+};
+module_platform_driver(an8855_efuse_driver);
+
+MODULE_AUTHOR("Christian Marangi <ansuelsmth@gmail.com>");
+MODULE_DESCRIPTION("Driver for AN8855 Switch EFUSE");
+MODULE_LICENSE("GPL");
diff --git a/drivers/nvmem/layouts.c b/drivers/nvmem/layouts.c
index 65d39e19f6ec..f381ce1e84bd 100644
--- a/drivers/nvmem/layouts.c
+++ b/drivers/nvmem/layouts.c
@@ -45,11 +45,24 @@ static void nvmem_layout_bus_remove(struct device *dev)
return drv->remove(layout);
}
+static int nvmem_layout_bus_uevent(const struct device *dev,
+ struct kobj_uevent_env *env)
+{
+ int ret;
+
+ ret = of_device_uevent_modalias(dev, env);
+ if (ret != ENODEV)
+ return ret;
+
+ return 0;
+}
+
static const struct bus_type nvmem_layout_bus_type = {
.name = "nvmem-layout",
.match = nvmem_layout_bus_match,
.probe = nvmem_layout_bus_probe,
.remove = nvmem_layout_bus_remove,
+ .uevent = nvmem_layout_bus_uevent,
};
int __nvmem_layout_driver_register(struct nvmem_layout_driver *drv,
diff --git a/drivers/nvmem/s32g-ocotp-nvmem.c b/drivers/nvmem/s32g-ocotp-nvmem.c
new file mode 100644
index 000000000000..119871ab3a94
--- /dev/null
+++ b/drivers/nvmem/s32g-ocotp-nvmem.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2023-2025 NXP
+ */
+
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+struct s32g_ocotp_priv {
+ struct device *dev;
+ void __iomem *base;
+};
+
+static int s32g_ocotp_read(void *context, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct s32g_ocotp_priv *s32g_data = context;
+ u32 *dst = val;
+
+ while (bytes >= sizeof(u32)) {
+ *dst++ = ioread32(s32g_data->base + offset);
+
+ bytes -= sizeof(u32);
+ offset += sizeof(u32);
+ }
+
+ return 0;
+}
+
+static struct nvmem_keepout s32g_keepouts[] = {
+ { .start = 0, .end = 520 },
+ { .start = 540, .end = 564 },
+ { .start = 596, .end = 664 },
+ { .start = 668, .end = 676 },
+ { .start = 684, .end = 732 },
+ { .start = 744, .end = 864 },
+ { .start = 908, .end = 924 },
+ { .start = 928, .end = 936 },
+ { .start = 948, .end = 964 },
+ { .start = 968, .end = 976 },
+ { .start = 984, .end = 1012 },
+};
+
+static struct nvmem_config s32g_ocotp_nvmem_config = {
+ .name = "s32g-ocotp",
+ .add_legacy_fixed_of_cells = true,
+ .read_only = true,
+ .word_size = 4,
+ .reg_read = s32g_ocotp_read,
+ .keepout = s32g_keepouts,
+ .nkeepout = ARRAY_SIZE(s32g_keepouts),
+};
+
+static const struct of_device_id ocotp_of_match[] = {
+ { .compatible = "nxp,s32g2-ocotp" },
+ { /* sentinel */ }
+};
+
+static int s32g_ocotp_probe(struct platform_device *pdev)
+{
+ struct s32g_ocotp_priv *s32g_data;
+ struct device *dev = &pdev->dev;
+ struct nvmem_device *nvmem;
+ struct resource *res;
+
+ s32g_data = devm_kzalloc(dev, sizeof(*s32g_data), GFP_KERNEL);
+ if (!s32g_data)
+ return -ENOMEM;
+
+ s32g_data->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(s32g_data->base))
+ return dev_err_probe(dev, PTR_ERR(s32g_data->base),
+ "Cannot map OCOTP device.\n");
+
+ s32g_data->dev = dev;
+ s32g_ocotp_nvmem_config.dev = dev;
+ s32g_ocotp_nvmem_config.priv = s32g_data;
+ s32g_ocotp_nvmem_config.size = resource_size(res);
+
+ nvmem = devm_nvmem_register(dev, &s32g_ocotp_nvmem_config);
+
+ return PTR_ERR_OR_ZERO(nvmem);
+}
+
+static struct platform_driver s32g_ocotp_driver = {
+ .probe = s32g_ocotp_probe,
+ .driver = {
+ .name = "s32g-ocotp",
+ .of_match_table = ocotp_of_match,
+ },
+};
+module_platform_driver(s32g_ocotp_driver);
+MODULE_AUTHOR("NXP");
+MODULE_DESCRIPTION("S32G OCOTP driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/of/device.c b/drivers/of/device.c
index c80426510ec2..f7e75e527667 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -17,8 +17,8 @@
/**
* of_match_device - Tell if a struct device matches an of_device_id list
- * @matches: array of of device match structures to search in
- * @dev: the of device structure to match against
+ * @matches: array of of_device_id match structures to search in
+ * @dev: the OF device structure to match against
*
* Used by a driver to check whether an platform_device present in the
* system is in its list of supported devices.
diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
index 0aba760f7577..2eaaddcb0ec4 100644
--- a/drivers/of/dynamic.c
+++ b/drivers/of/dynamic.c
@@ -935,10 +935,15 @@ static int of_changeset_add_prop_helper(struct of_changeset *ocs,
return -ENOMEM;
ret = of_changeset_add_property(ocs, np, new_pp);
- if (ret)
+ if (ret) {
__of_prop_free(new_pp);
+ return ret;
+ }
- return ret;
+ new_pp->next = np->deadprops;
+ np->deadprops = new_pp;
+
+ return 0;
}
/**
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 74aaea61de13..65c3c23255b7 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -163,7 +163,7 @@ const __be32 *of_irq_parse_imap_parent(const __be32 *imap, int len, struct of_ph
* @out_irq: structure of_phandle_args updated by this function
*
* This function is a low-level interrupt tree walking function. It
- * can be used to do a partial walk with synthetized reg and interrupts
+ * can be used to do a partial walk with synthesized reg and interrupts
* properties, for example when resolving PCI interrupts when no device
* node exist for the parent. It takes an interrupt specifier structure as
* input, walks the tree looking for any interrupt-map properties, translates
@@ -519,6 +519,7 @@ int of_irq_count(struct device_node *dev)
return nr;
}
+EXPORT_SYMBOL_GPL(of_irq_count);
/**
* of_irq_to_resource_table - Fill in resource table with node's IRQ info
@@ -673,13 +674,14 @@ err:
/**
* of_msi_xlate - map a MSI ID and find relevant MSI controller node
* @dev: device for which the mapping is to be done.
- * @msi_np: Pointer to store the MSI controller node
+ * @msi_np: Pointer to target MSI controller node
* @id_in: Device ID.
*
* Walk up the device hierarchy looking for devices with a "msi-map"
- * property. If found, apply the mapping to @id_in. @msi_np pointed
- * value must be NULL on entry, if an MSI controller is found @msi_np is
- * initialized to the MSI controller node with a reference held.
+ * property. If found, apply the mapping to @id_in.
+ * If @msi_np points to a non-NULL device node pointer, only entries targeting
+ * that node will be matched; if it points to a NULL value, it will receive the
+ * device node of the first matching target phandle, with a reference held.
*
* Returns: The mapped MSI id.
*/
@@ -700,22 +702,6 @@ u32 of_msi_xlate(struct device *dev, struct device_node **msi_np, u32 id_in)
}
/**
- * of_msi_map_id - Map a MSI ID for a device.
- * @dev: device for which the mapping is to be done.
- * @msi_np: device node of the expected msi controller.
- * @id_in: unmapped MSI ID for the device.
- *
- * Walk up the device hierarchy looking for devices with a "msi-map"
- * property. If found, apply the mapping to @id_in.
- *
- * Return: The mapped MSI ID.
- */
-u32 of_msi_map_id(struct device *dev, struct device_node *msi_np, u32 id_in)
-{
- return of_msi_xlate(dev, &msi_np, id_in);
-}
-
-/**
* of_msi_map_get_device_domain - Use msi-map to find the relevant MSI domain
* @dev: device for which the mapping is to be done.
* @id: Device ID.
diff --git a/drivers/of/of_numa.c b/drivers/of/of_numa.c
index 230d5f628c1b..cd2dc8e825c9 100644
--- a/drivers/of/of_numa.c
+++ b/drivers/of/of_numa.c
@@ -59,8 +59,11 @@ static int __init of_numa_parse_memory_nodes(void)
r = -EINVAL;
}
- for (i = 0; !r && !of_address_to_resource(np, i, &rsrc); i++)
+ for (i = 0; !r && !of_address_to_resource(np, i, &rsrc); i++) {
r = numa_add_memblk(nid, rsrc.start, rsrc.end + 1);
+ if (!r)
+ node_set(nid, numa_nodes_parsed);
+ }
if (!i || r) {
of_node_put(np);
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index 77016c0cc296..2e9ea751ed2d 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -25,6 +25,7 @@
#include <linux/memblock.h>
#include <linux/kmemleak.h>
#include <linux/cma.h>
+#include <linux/dma-map-ops.h>
#include "of_private.h"
@@ -175,13 +176,17 @@ static int __init __reserved_mem_reserve_reg(unsigned long node,
base = dt_mem_next_cell(dt_root_addr_cells, &prop);
size = dt_mem_next_cell(dt_root_size_cells, &prop);
- if (size &&
- early_init_dt_reserve_memory(base, size, nomap) == 0)
+ if (size && early_init_dt_reserve_memory(base, size, nomap) == 0) {
+ /* Architecture specific contiguous memory fixup. */
+ if (of_flat_dt_is_compatible(node, "shared-dma-pool") &&
+ of_get_flat_dt_prop(node, "reusable", NULL))
+ dma_contiguous_early_fixup(base, size);
pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %lu MiB\n",
uname, &base, (unsigned long)(size / SZ_1M));
- else
+ } else {
pr_err("Reserved memory: failed to reserve memory for node '%s': base %pa, size %lu MiB\n",
uname, &base, (unsigned long)(size / SZ_1M));
+ }
len -= t_len;
}
@@ -472,7 +477,10 @@ static int __init __reserved_mem_alloc_size(unsigned long node, const char *unam
uname, (unsigned long)(size / SZ_1M));
return -ENOMEM;
}
-
+ /* Architecture specific contiguous memory fixup. */
+ if (of_flat_dt_is_compatible(node, "shared-dma-pool") &&
+ of_get_flat_dt_prop(node, "reusable", NULL))
+ dma_contiguous_early_fixup(base, size);
/* Save region in the reserved_mem array */
fdt_reserved_mem_save_node(node, uname, base, size);
return 0;
@@ -771,6 +779,7 @@ int of_reserved_mem_region_to_resource(const struct device_node *np,
return -EINVAL;
resource_set_range(res, rmem->base, rmem->size);
+ res->flags = IORESOURCE_MEM;
res->name = rmem->name;
return 0;
}
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
index 1af6f52d0708..255e8362f600 100644
--- a/drivers/of/overlay.c
+++ b/drivers/of/overlay.c
@@ -135,7 +135,7 @@ static BLOCKING_NOTIFIER_HEAD(overlay_notify_chain);
* @nb: Notifier block to register
*
* Register for notification on overlay operations on device tree nodes. The
- * reported actions definied by @of_reconfig_change. The notifier callback
+ * reported actions defined by @of_reconfig_change. The notifier callback
* furthermore receives a pointer to the affected device tree node.
*
* Note that a notifier callback is not supposed to store pointers to a device
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index e3503ec20f6c..388e9ec2cccf 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -4300,6 +4300,7 @@ static int of_unittest_pci_node_verify(struct pci_dev *pdev, bool add)
unittest(!np, "Child device tree node is not removed\n");
child_dev = device_find_any_child(&pdev->dev);
unittest(!child_dev, "Child device is not removed\n");
+ put_device(child_dev);
}
failed:
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index edbd60501cf0..bba4f7daff8c 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -476,6 +476,16 @@ static unsigned long _read_bw(struct dev_pm_opp *opp, int index)
return opp->bandwidth[index].peak;
}
+static unsigned long _read_opp_key(struct dev_pm_opp *opp, int index,
+ struct dev_pm_opp_key *key)
+{
+ key->bw = opp->bandwidth ? opp->bandwidth[index].peak : 0;
+ key->freq = opp->rates[index];
+ key->level = opp->level;
+
+ return true;
+}
+
/* Generic comparison helpers */
static bool _compare_exact(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
unsigned long opp_key, unsigned long key)
@@ -509,6 +519,22 @@ static bool _compare_floor(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
return false;
}
+static bool _compare_opp_key_exact(struct dev_pm_opp **opp,
+ struct dev_pm_opp *temp_opp, struct dev_pm_opp_key *opp_key,
+ struct dev_pm_opp_key *key)
+{
+ bool level_match = (key->level == OPP_LEVEL_UNSET || opp_key->level == key->level);
+ bool freq_match = (key->freq == 0 || opp_key->freq == key->freq);
+ bool bw_match = (key->bw == 0 || opp_key->bw == key->bw);
+
+ if (freq_match && level_match && bw_match) {
+ *opp = temp_opp;
+ return true;
+ }
+
+ return false;
+}
+
/* Generic key finding helpers */
static struct dev_pm_opp *_opp_table_find_key(struct opp_table *opp_table,
unsigned long *key, int index, bool available,
@@ -541,6 +567,37 @@ static struct dev_pm_opp *_opp_table_find_key(struct opp_table *opp_table,
return opp;
}
+static struct dev_pm_opp *_opp_table_find_opp_key(struct opp_table *opp_table,
+ struct dev_pm_opp_key *key, bool available,
+ unsigned long (*read)(struct dev_pm_opp *opp, int index,
+ struct dev_pm_opp_key *key),
+ bool (*compare)(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
+ struct dev_pm_opp_key *opp_key, struct dev_pm_opp_key *key),
+ bool (*assert)(struct opp_table *opp_table, unsigned int index))
+{
+ struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
+ struct dev_pm_opp_key temp_key;
+
+ /* Assert that the requirement is met */
+ if (!assert(opp_table, 0))
+ return ERR_PTR(-EINVAL);
+
+ guard(mutex)(&opp_table->lock);
+
+ list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
+ if (temp_opp->available == available) {
+ read(temp_opp, 0, &temp_key);
+ if (compare(&opp, temp_opp, &temp_key, key)) {
+ /* Increment the reference count of OPP */
+ dev_pm_opp_get(opp);
+ break;
+ }
+ }
+ }
+
+ return opp;
+}
+
static struct dev_pm_opp *
_find_key(struct device *dev, unsigned long *key, int index, bool available,
unsigned long (*read)(struct dev_pm_opp *opp, int index),
@@ -633,6 +690,48 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
/**
+ * dev_pm_opp_find_key_exact() - Search for an OPP with exact key set
+ * @dev: Device for which the OPP is being searched
+ * @key: OPP key set to match
+ * @available: true/false - match for available OPP
+ *
+ * Search for an exact match of the key set in the OPP table.
+ *
+ * Return: A matching opp on success, else ERR_PTR in case of error.
+ * Possible error values:
+ * EINVAL: for bad pointers
+ * ERANGE: no match found for search
+ * ENODEV: if device not found in list of registered devices
+ *
+ * Note: 'available' is a modifier for the search. If 'available' == true,
+ * then the match is for exact matching key and is available in the stored
+ * OPP table. If false, the match is for exact key which is not available.
+ *
+ * This provides a mechanism to enable an OPP which is not available currently
+ * or the opposite as well.
+ *
+ * The callers are required to call dev_pm_opp_put() for the returned OPP after
+ * use.
+ */
+struct dev_pm_opp *dev_pm_opp_find_key_exact(struct device *dev,
+ struct dev_pm_opp_key *key,
+ bool available)
+{
+ struct opp_table *opp_table __free(put_opp_table) = _find_opp_table(dev);
+
+ if (IS_ERR(opp_table)) {
+ dev_err(dev, "%s: OPP table not found (%ld)\n", __func__,
+ PTR_ERR(opp_table));
+ return ERR_CAST(opp_table);
+ }
+
+ return _opp_table_find_opp_key(opp_table, key, available,
+ _read_opp_key, _compare_opp_key_exact,
+ assert_single_clk);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_find_key_exact);
+
+/**
* dev_pm_opp_find_freq_exact_indexed() - Search for an exact freq for the
* clock corresponding to the index
* @dev: Device for which we do this operation
diff --git a/drivers/parisc/eisa_eeprom.c b/drivers/parisc/eisa_eeprom.c
index 443b15422fc1..601cbb22574f 100644
--- a/drivers/parisc/eisa_eeprom.c
+++ b/drivers/parisc/eisa_eeprom.c
@@ -15,8 +15,6 @@
#include <linux/uaccess.h>
#include <asm/eisa_eeprom.h>
-#define EISA_EEPROM_MINOR 241
-
static loff_t eisa_eeprom_llseek(struct file *file, loff_t offset, int origin)
{
return fixed_size_llseek(file, offset, origin, HPEE_MAX_LENGTH);
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 9a249c65aedc..7065a8e5f9b1 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -221,7 +221,7 @@ config PCI_LABEL
config PCI_HYPERV
tristate "Hyper-V PCI Frontend"
- depends on ((X86 && X86_64) || ARM64) && HYPERV && PCI_MSI && SYSFS
+ depends on ((X86 && X86_64) || ARM64) && HYPERV_VMBUS && PCI_MSI && SYSFS
select PCI_HYPERV_INTERFACE
select IRQ_MSI_LIB
help
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index b77fd30bbfd9..f26aec6ff588 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -204,6 +204,9 @@ static int pci_bus_alloc_from_region(struct pci_bus *bus, struct resource *res,
if (!r)
continue;
+ if (r->flags & (IORESOURCE_UNSET|IORESOURCE_DISABLED))
+ continue;
+
/* type_mask must match */
if ((res->flags ^ r->flags) & type_mask)
continue;
@@ -361,11 +364,15 @@ void pci_bus_add_device(struct pci_dev *dev)
* before PCI client drivers.
*/
pdev = of_find_device_by_node(dn);
- if (pdev && of_pci_supply_present(dn)) {
- if (!device_link_add(&dev->dev, &pdev->dev,
- DL_FLAG_AUTOREMOVE_CONSUMER))
- pci_err(dev, "failed to add device link to power control device %s\n",
- pdev->name);
+ if (pdev) {
+ if (of_pci_supply_present(dn)) {
+ if (!device_link_add(&dev->dev, &pdev->dev,
+ DL_FLAG_AUTOREMOVE_CONSUMER)) {
+ pci_err(dev, "failed to add device link to power control device %s\n",
+ pdev->name);
+ }
+ }
+ put_device(&pdev->dev);
}
if (!dn || of_device_is_available(dn))
diff --git a/drivers/pci/controller/cadence/Kconfig b/drivers/pci/controller/cadence/Kconfig
index 666e16b6367f..02a639e55fd8 100644
--- a/drivers/pci/controller/cadence/Kconfig
+++ b/drivers/pci/controller/cadence/Kconfig
@@ -42,6 +42,15 @@ config PCIE_CADENCE_PLAT_EP
endpoint mode. This PCIe controller may be embedded into many
different vendors SoCs.
+config PCIE_SG2042_HOST
+ tristate "Sophgo SG2042 PCIe controller (host mode)"
+ depends on OF && (ARCH_SOPHGO || COMPILE_TEST)
+ select PCIE_CADENCE_HOST
+ help
+ Say Y here if you want to support the Sophgo SG2042 PCIe platform
+ controller in host mode. Sophgo SG2042 PCIe controller uses Cadence
+ PCIe core.
+
config PCI_J721E
tristate
select PCIE_CADENCE_HOST if PCI_J721E_HOST != n
@@ -67,4 +76,5 @@ config PCI_J721E_EP
Say Y here if you want to support the TI J721E PCIe platform
controller in endpoint mode. TI J721E PCIe controller uses Cadence PCIe
core.
+
endmenu
diff --git a/drivers/pci/controller/cadence/Makefile b/drivers/pci/controller/cadence/Makefile
index 9bac5fb2f13d..5e23f8539ecc 100644
--- a/drivers/pci/controller/cadence/Makefile
+++ b/drivers/pci/controller/cadence/Makefile
@@ -4,3 +4,4 @@ obj-$(CONFIG_PCIE_CADENCE_HOST) += pcie-cadence-host.o
obj-$(CONFIG_PCIE_CADENCE_EP) += pcie-cadence-ep.o
obj-$(CONFIG_PCIE_CADENCE_PLAT) += pcie-cadence-plat.o
obj-$(CONFIG_PCI_J721E) += pci-j721e.o
+obj-$(CONFIG_PCIE_SG2042_HOST) += pcie-sg2042.o
diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c
index 6c93f39d0288..5bc5ab20aa6d 100644
--- a/drivers/pci/controller/cadence/pci-j721e.c
+++ b/drivers/pci/controller/cadence/pci-j721e.c
@@ -284,6 +284,25 @@ static int j721e_pcie_ctrl_init(struct j721e_pcie *pcie)
if (!ret)
offset = args.args[0];
+ /*
+ * The PCIe Controller's registers have different "reset-values"
+ * depending on the "strap" settings programmed into the PCIEn_CTRL
+ * register within the CTRL_MMR memory-mapped register space.
+ * The registers latch onto a "reset-value" based on the "strap"
+ * settings sampled after the PCIe Controller is powered on.
+ * To ensure that the "reset-values" are sampled accurately, power
+ * off the PCIe Controller before programming the "strap" settings
+ * and power it on after that. The runtime PM APIs namely
+ * pm_runtime_put_sync() and pm_runtime_get_sync() will decrement and
+ * increment the usage counter respectively, causing GENPD to power off
+ * and power on the PCIe Controller.
+ */
+ ret = pm_runtime_put_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to power off PCIe Controller\n");
+ return ret;
+ }
+
ret = j721e_pcie_set_mode(pcie, syscon, offset);
if (ret < 0) {
dev_err(dev, "Failed to set pci mode\n");
@@ -302,6 +321,12 @@ static int j721e_pcie_ctrl_init(struct j721e_pcie *pcie)
return ret;
}
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to power on PCIe Controller\n");
+ return ret;
+ }
+
/* Enable ACSPCIE refclk output if the optional property exists */
syscon = syscon_regmap_lookup_by_phandle_optional(node,
"ti,syscon-acspcie-proxy-ctrl");
@@ -440,6 +465,7 @@ static const struct of_device_id of_j721e_pcie_match[] = {
},
{},
};
+MODULE_DEVICE_TABLE(of, of_j721e_pcie_match);
static int j721e_pcie_probe(struct platform_device *pdev)
{
@@ -549,7 +575,7 @@ static int j721e_pcie_probe(struct platform_device *pdev)
ret = j721e_pcie_ctrl_init(pcie);
if (ret < 0) {
- dev_err_probe(dev, ret, "pm_runtime_get_sync failed\n");
+ dev_err_probe(dev, ret, "j721e_pcie_ctrl_init failed\n");
goto err_get_sync;
}
diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c
index 77c5a19b2ab1..1eac012a8226 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-ep.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c
@@ -21,12 +21,13 @@
static u8 cdns_pcie_get_fn_from_vfn(struct cdns_pcie *pcie, u8 fn, u8 vfn)
{
- u32 cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET;
u32 first_vf_offset, stride;
+ u16 cap;
if (vfn == 0)
return fn;
+ cap = cdns_pcie_find_ext_capability(pcie, PCI_EXT_CAP_ID_SRIOV);
first_vf_offset = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_SRIOV_VF_OFFSET);
stride = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_SRIOV_VF_STRIDE);
fn = fn + first_vf_offset + ((vfn - 1) * stride);
@@ -38,10 +39,11 @@ static int cdns_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn,
struct pci_epf_header *hdr)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
- u32 cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET;
struct cdns_pcie *pcie = &ep->pcie;
u32 reg;
+ u16 cap;
+ cap = cdns_pcie_find_ext_capability(pcie, PCI_EXT_CAP_ID_SRIOV);
if (vfn > 1) {
dev_err(&epc->dev, "Only Virtual Function #1 has deviceID\n");
return -EINVAL;
@@ -227,9 +229,10 @@ static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn, u8 nr_irqs)
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
u8 mmc = order_base_2(nr_irqs);
- u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
u16 flags;
+ u8 cap;
+ cap = cdns_pcie_find_capability(pcie, PCI_CAP_ID_MSI);
fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
/*
@@ -249,9 +252,10 @@ static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
- u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
u16 flags, mme;
+ u8 cap;
+ cap = cdns_pcie_find_capability(pcie, PCI_CAP_ID_MSIX);
fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
/* Validate that the MSI feature is actually enabled. */
@@ -272,9 +276,10 @@ static int cdns_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
- u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET;
u32 val, reg;
+ u8 cap;
+ cap = cdns_pcie_find_capability(pcie, PCI_CAP_ID_MSIX);
func_no = cdns_pcie_get_fn_from_vfn(pcie, func_no, vfunc_no);
reg = cap + PCI_MSIX_FLAGS;
@@ -292,9 +297,10 @@ static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u8 vfn,
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
- u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET;
u32 val, reg;
+ u8 cap;
+ cap = cdns_pcie_find_capability(pcie, PCI_CAP_ID_MSIX);
fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
reg = cap + PCI_MSIX_FLAGS;
@@ -380,11 +386,11 @@ static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
u8 interrupt_num)
{
struct cdns_pcie *pcie = &ep->pcie;
- u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
u16 flags, mme, data, data_mask;
- u8 msi_count;
u64 pci_addr, pci_addr_mask = 0xff;
+ u8 msi_count, cap;
+ cap = cdns_pcie_find_capability(pcie, PCI_CAP_ID_MSI);
fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
/* Check whether the MSI feature has been enabled by the PCI host. */
@@ -432,14 +438,14 @@ static int cdns_pcie_ep_map_msi_irq(struct pci_epc *epc, u8 fn, u8 vfn,
u32 *msi_addr_offset)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
- u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
struct cdns_pcie *pcie = &ep->pcie;
u64 pci_addr, pci_addr_mask = 0xff;
u16 flags, mme, data, data_mask;
- u8 msi_count;
+ u8 msi_count, cap;
int ret;
int i;
+ cap = cdns_pcie_find_capability(pcie, PCI_CAP_ID_MSI);
fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
/* Check whether the MSI feature has been enabled by the PCI host. */
@@ -482,16 +488,16 @@ static int cdns_pcie_ep_map_msi_irq(struct pci_epc *epc, u8 fn, u8 vfn,
static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
u16 interrupt_num)
{
- u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET;
u32 tbl_offset, msg_data, reg;
struct cdns_pcie *pcie = &ep->pcie;
struct pci_epf_msix_tbl *msix_tbl;
struct cdns_pcie_epf *epf;
u64 pci_addr_mask = 0xff;
u64 msg_addr;
+ u8 bir, cap;
u16 flags;
- u8 bir;
+ cap = cdns_pcie_find_capability(pcie, PCI_CAP_ID_MSIX);
epf = &ep->epf[fn];
if (vfn > 0)
epf = &epf->epf[vfn - 1];
@@ -565,7 +571,9 @@ static int cdns_pcie_ep_start(struct pci_epc *epc)
int max_epfs = sizeof(epc->function_num_map) * 8;
int ret, epf, last_fn;
u32 reg, value;
+ u8 cap;
+ cap = cdns_pcie_find_capability(pcie, PCI_CAP_ID_EXP);
/*
* BIT(0) is hardwired to 1, hence function 0 is always enabled
* and can't be disabled anyway.
@@ -589,12 +597,10 @@ static int cdns_pcie_ep_start(struct pci_epc *epc)
continue;
value = cdns_pcie_ep_fn_readl(pcie, epf,
- CDNS_PCIE_EP_FUNC_DEV_CAP_OFFSET +
- PCI_EXP_DEVCAP);
+ cap + PCI_EXP_DEVCAP);
value &= ~PCI_EXP_DEVCAP_FLR;
cdns_pcie_ep_fn_writel(pcie, epf,
- CDNS_PCIE_EP_FUNC_DEV_CAP_OFFSET +
- PCI_EXP_DEVCAP, value);
+ cap + PCI_EXP_DEVCAP, value);
}
}
@@ -608,14 +614,12 @@ static int cdns_pcie_ep_start(struct pci_epc *epc)
}
static const struct pci_epc_features cdns_pcie_epc_vf_features = {
- .linkup_notifier = false,
.msi_capable = true,
.msix_capable = true,
.align = 65536,
};
static const struct pci_epc_features cdns_pcie_epc_features = {
- .linkup_notifier = false,
.msi_capable = true,
.msix_capable = true,
.align = 256,
diff --git a/drivers/pci/controller/cadence/pcie-cadence-host.c b/drivers/pci/controller/cadence/pcie-cadence-host.c
index 59a4631de79f..fffd63d6665e 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-host.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-host.c
@@ -531,7 +531,7 @@ static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc)
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(0), addr1);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(0), desc1);
- if (pcie->ops->cpu_addr_fixup)
+ if (pcie->ops && pcie->ops->cpu_addr_fixup)
cpu_addr = pcie->ops->cpu_addr_fixup(pcie, cpu_addr);
addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(12) |
diff --git a/drivers/pci/controller/cadence/pcie-cadence.c b/drivers/pci/controller/cadence/pcie-cadence.c
index 70a19573440e..bd683d0fecb2 100644
--- a/drivers/pci/controller/cadence/pcie-cadence.c
+++ b/drivers/pci/controller/cadence/pcie-cadence.c
@@ -8,6 +8,20 @@
#include <linux/of.h>
#include "pcie-cadence.h"
+#include "../../pci.h"
+
+u8 cdns_pcie_find_capability(struct cdns_pcie *pcie, u8 cap)
+{
+ return PCI_FIND_NEXT_CAP(cdns_pcie_read_cfg, PCI_CAPABILITY_LIST,
+ cap, pcie);
+}
+EXPORT_SYMBOL_GPL(cdns_pcie_find_capability);
+
+u16 cdns_pcie_find_ext_capability(struct cdns_pcie *pcie, u8 cap)
+{
+ return PCI_FIND_NEXT_EXT_CAP(cdns_pcie_read_cfg, 0, cap, pcie);
+}
+EXPORT_SYMBOL_GPL(cdns_pcie_find_ext_capability);
void cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie *pcie)
{
@@ -92,7 +106,7 @@ void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn,
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), desc1);
/* Set the CPU address */
- if (pcie->ops->cpu_addr_fixup)
+ if (pcie->ops && pcie->ops->cpu_addr_fixup)
cpu_addr = pcie->ops->cpu_addr_fixup(pcie, cpu_addr);
addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) |
@@ -123,7 +137,7 @@ void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie,
}
/* Set the CPU address */
- if (pcie->ops->cpu_addr_fixup)
+ if (pcie->ops && pcie->ops->cpu_addr_fixup)
cpu_addr = pcie->ops->cpu_addr_fixup(pcie, cpu_addr);
addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(17) |
diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h
index 1d81c4bf6c6d..e2a853d2c0ab 100644
--- a/drivers/pci/controller/cadence/pcie-cadence.h
+++ b/drivers/pci/controller/cadence/pcie-cadence.h
@@ -125,11 +125,6 @@
*/
#define CDNS_PCIE_EP_FUNC_BASE(fn) (((fn) << 12) & GENMASK(19, 12))
-#define CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET 0x90
-#define CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET 0xb0
-#define CDNS_PCIE_EP_FUNC_DEV_CAP_OFFSET 0xc0
-#define CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET 0x200
-
/*
* Endpoint PF Registers
*/
@@ -367,6 +362,37 @@ static inline u32 cdns_pcie_readl(struct cdns_pcie *pcie, u32 reg)
return readl(pcie->reg_base + reg);
}
+static inline u16 cdns_pcie_readw(struct cdns_pcie *pcie, u32 reg)
+{
+ return readw(pcie->reg_base + reg);
+}
+
+static inline u8 cdns_pcie_readb(struct cdns_pcie *pcie, u32 reg)
+{
+ return readb(pcie->reg_base + reg);
+}
+
+static inline int cdns_pcie_read_cfg_byte(struct cdns_pcie *pcie, int where,
+ u8 *val)
+{
+ *val = cdns_pcie_readb(pcie, where);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static inline int cdns_pcie_read_cfg_word(struct cdns_pcie *pcie, int where,
+ u16 *val)
+{
+ *val = cdns_pcie_readw(pcie, where);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static inline int cdns_pcie_read_cfg_dword(struct cdns_pcie *pcie, int where,
+ u32 *val)
+{
+ *val = cdns_pcie_readl(pcie, where);
+ return PCIBIOS_SUCCESSFUL;
+}
+
static inline u32 cdns_pcie_read_sz(void __iomem *addr, int size)
{
void __iomem *aligned_addr = PTR_ALIGN_DOWN(addr, 0x4);
@@ -468,7 +494,7 @@ static inline u32 cdns_pcie_ep_fn_readl(struct cdns_pcie *pcie, u8 fn, u32 reg)
static inline int cdns_pcie_start_link(struct cdns_pcie *pcie)
{
- if (pcie->ops->start_link)
+ if (pcie->ops && pcie->ops->start_link)
return pcie->ops->start_link(pcie);
return 0;
@@ -476,13 +502,13 @@ static inline int cdns_pcie_start_link(struct cdns_pcie *pcie)
static inline void cdns_pcie_stop_link(struct cdns_pcie *pcie)
{
- if (pcie->ops->stop_link)
+ if (pcie->ops && pcie->ops->stop_link)
pcie->ops->stop_link(pcie);
}
static inline bool cdns_pcie_link_up(struct cdns_pcie *pcie)
{
- if (pcie->ops->link_up)
+ if (pcie->ops && pcie->ops->link_up)
return pcie->ops->link_up(pcie);
return true;
@@ -536,6 +562,9 @@ static inline void cdns_pcie_ep_disable(struct cdns_pcie_ep *ep)
}
#endif
+u8 cdns_pcie_find_capability(struct cdns_pcie *pcie, u8 cap);
+u16 cdns_pcie_find_ext_capability(struct cdns_pcie *pcie, u8 cap);
+
void cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie *pcie);
void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn,
diff --git a/drivers/pci/controller/cadence/pcie-sg2042.c b/drivers/pci/controller/cadence/pcie-sg2042.c
new file mode 100644
index 000000000000..a077b28d4894
--- /dev/null
+++ b/drivers/pci/controller/cadence/pcie-sg2042.c
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * pcie-sg2042 - PCIe controller driver for Sophgo SG2042 SoC
+ *
+ * Copyright (C) 2025 Sophgo Technology Inc.
+ * Copyright (C) 2025 Chen Wang <unicorn_wang@outlook.com>
+ */
+
+#include <linux/mod_devicetable.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include "pcie-cadence.h"
+
+/*
+ * SG2042 only supports 4-byte aligned access, so for the rootbus (i.e. to
+ * read/write the Root Port itself, read32/write32 is required. For
+ * non-rootbus (i.e. to read/write the PCIe peripheral registers, supports
+ * 1/2/4 byte aligned access, so directly using read/write should be fine.
+ */
+
+static struct pci_ops sg2042_pcie_root_ops = {
+ .map_bus = cdns_pci_map_bus,
+ .read = pci_generic_config_read32,
+ .write = pci_generic_config_write32,
+};
+
+static struct pci_ops sg2042_pcie_child_ops = {
+ .map_bus = cdns_pci_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+};
+
+static int sg2042_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct pci_host_bridge *bridge;
+ struct cdns_pcie *pcie;
+ struct cdns_pcie_rc *rc;
+ int ret;
+
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc));
+ if (!bridge)
+ return dev_err_probe(dev, -ENOMEM, "Failed to alloc host bridge!\n");
+
+ bridge->ops = &sg2042_pcie_root_ops;
+ bridge->child_ops = &sg2042_pcie_child_ops;
+
+ rc = pci_host_bridge_priv(bridge);
+ pcie = &rc->pcie;
+ pcie->dev = dev;
+
+ platform_set_drvdata(pdev, pcie);
+
+ pm_runtime_set_active(dev);
+ pm_runtime_no_callbacks(dev);
+ devm_pm_runtime_enable(dev);
+
+ ret = cdns_pcie_init_phy(dev, pcie);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to init phy!\n");
+
+ ret = cdns_pcie_host_setup(rc);
+ if (ret) {
+ dev_err_probe(dev, ret, "Failed to setup host!\n");
+ cdns_pcie_disable_phy(pcie);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void sg2042_pcie_remove(struct platform_device *pdev)
+{
+ struct cdns_pcie *pcie = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ struct cdns_pcie_rc *rc;
+
+ rc = container_of(pcie, struct cdns_pcie_rc, pcie);
+ cdns_pcie_host_disable(rc);
+
+ cdns_pcie_disable_phy(pcie);
+
+ pm_runtime_disable(dev);
+}
+
+static int sg2042_pcie_suspend_noirq(struct device *dev)
+{
+ struct cdns_pcie *pcie = dev_get_drvdata(dev);
+
+ cdns_pcie_disable_phy(pcie);
+
+ return 0;
+}
+
+static int sg2042_pcie_resume_noirq(struct device *dev)
+{
+ struct cdns_pcie *pcie = dev_get_drvdata(dev);
+ int ret;
+
+ ret = cdns_pcie_enable_phy(pcie);
+ if (ret) {
+ dev_err(dev, "failed to enable PHY\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static DEFINE_NOIRQ_DEV_PM_OPS(sg2042_pcie_pm_ops,
+ sg2042_pcie_suspend_noirq,
+ sg2042_pcie_resume_noirq);
+
+static const struct of_device_id sg2042_pcie_of_match[] = {
+ { .compatible = "sophgo,sg2042-pcie-host" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sg2042_pcie_of_match);
+
+static struct platform_driver sg2042_pcie_driver = {
+ .driver = {
+ .name = "sg2042-pcie",
+ .of_match_table = sg2042_pcie_of_match,
+ .pm = pm_sleep_ptr(&sg2042_pcie_pm_ops),
+ },
+ .probe = sg2042_pcie_probe,
+ .remove = sg2042_pcie_remove,
+};
+module_platform_driver(sg2042_pcie_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("PCIe controller driver for SG2042 SoCs");
+MODULE_AUTHOR("Chen Wang <unicorn_wang@outlook.com>");
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig
index ff6b6d9e18ec..349d4657393c 100644
--- a/drivers/pci/controller/dwc/Kconfig
+++ b/drivers/pci/controller/dwc/Kconfig
@@ -20,6 +20,7 @@ config PCIE_DW_HOST
bool
select PCIE_DW
select IRQ_MSI_LIB
+ select PCI_HOST_COMMON
config PCIE_DW_EP
bool
@@ -298,6 +299,7 @@ config PCIE_QCOM
select CRC8
select PCIE_QCOM_COMMON
select PCI_HOST_COMMON
+ select PCI_PWRCTRL_SLOT
help
Say Y here to enable PCIe controller support on Qualcomm SoCs. The
PCIe controller uses the DesignWare core plus Qualcomm-specific
@@ -422,6 +424,30 @@ config PCIE_SPEAR13XX
help
Say Y here if you want PCIe support on SPEAr13XX SoCs.
+config PCIE_STM32_HOST
+ tristate "STMicroelectronics STM32MP25 PCIe Controller (host mode)"
+ depends on ARCH_STM32 || COMPILE_TEST
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ help
+ Enables Root Complex (RC) support for the DesignWare core based PCIe
+ controller found in STM32MP25 SoC.
+
+ This driver can also be built as a module. If so, the module
+ will be called pcie-stm32.
+
+config PCIE_STM32_EP
+ tristate "STMicroelectronics STM32MP25 PCIe Controller (endpoint mode)"
+ depends on ARCH_STM32 || COMPILE_TEST
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ help
+ Enables Endpoint (EP) support for the DesignWare core based PCIe
+ controller found in STM32MP25 SoC.
+
+ This driver can also be built as a module. If so, the module
+ will be called pcie-stm32-ep.
+
config PCI_DRA7XX
tristate
diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile
index 6919d27798d1..7ae28f3b0fb3 100644
--- a/drivers/pci/controller/dwc/Makefile
+++ b/drivers/pci/controller/dwc/Makefile
@@ -31,6 +31,8 @@ obj-$(CONFIG_PCIE_UNIPHIER) += pcie-uniphier.o
obj-$(CONFIG_PCIE_UNIPHIER_EP) += pcie-uniphier-ep.o
obj-$(CONFIG_PCIE_VISCONTI_HOST) += pcie-visconti.o
obj-$(CONFIG_PCIE_RCAR_GEN4) += pcie-rcar-gen4.o
+obj-$(CONFIG_PCIE_STM32_HOST) += pcie-stm32.o
+obj-$(CONFIG_PCIE_STM32_EP) += pcie-stm32-ep.o
# The following drivers are for devices that use the generic ACPI
# pci_root.c driver but don't support standard ECAM config access.
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c
index f97f5266d196..01cfd9aeb0b8 100644
--- a/drivers/pci/controller/dwc/pci-dra7xx.c
+++ b/drivers/pci/controller/dwc/pci-dra7xx.c
@@ -426,7 +426,6 @@ static int dra7xx_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
static const struct pci_epc_features dra7xx_pcie_epc_features = {
.linkup_notifier = true,
.msi_capable = true,
- .msix_capable = false,
};
static const struct pci_epc_features*
diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c
index 1f0e98d07109..0bb7d4f5d784 100644
--- a/drivers/pci/controller/dwc/pci-exynos.c
+++ b/drivers/pci/controller/dwc/pci-exynos.c
@@ -53,7 +53,6 @@
struct exynos_pcie {
struct dw_pcie pci;
- void __iomem *elbi_base;
struct clk_bulk_data *clks;
struct phy *phy;
struct regulator_bulk_data supplies[2];
@@ -71,73 +70,78 @@ static u32 exynos_pcie_readl(void __iomem *base, u32 reg)
static void exynos_pcie_sideband_dbi_w_mode(struct exynos_pcie *ep, bool on)
{
+ struct dw_pcie *pci = &ep->pci;
u32 val;
- val = exynos_pcie_readl(ep->elbi_base, PCIE_ELBI_SLV_AWMISC);
+ val = exynos_pcie_readl(pci->elbi_base, PCIE_ELBI_SLV_AWMISC);
if (on)
val |= PCIE_ELBI_SLV_DBI_ENABLE;
else
val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
- exynos_pcie_writel(ep->elbi_base, val, PCIE_ELBI_SLV_AWMISC);
+ exynos_pcie_writel(pci->elbi_base, val, PCIE_ELBI_SLV_AWMISC);
}
static void exynos_pcie_sideband_dbi_r_mode(struct exynos_pcie *ep, bool on)
{
+ struct dw_pcie *pci = &ep->pci;
u32 val;
- val = exynos_pcie_readl(ep->elbi_base, PCIE_ELBI_SLV_ARMISC);
+ val = exynos_pcie_readl(pci->elbi_base, PCIE_ELBI_SLV_ARMISC);
if (on)
val |= PCIE_ELBI_SLV_DBI_ENABLE;
else
val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
- exynos_pcie_writel(ep->elbi_base, val, PCIE_ELBI_SLV_ARMISC);
+ exynos_pcie_writel(pci->elbi_base, val, PCIE_ELBI_SLV_ARMISC);
}
static void exynos_pcie_assert_core_reset(struct exynos_pcie *ep)
{
+ struct dw_pcie *pci = &ep->pci;
u32 val;
- val = exynos_pcie_readl(ep->elbi_base, PCIE_CORE_RESET);
+ val = exynos_pcie_readl(pci->elbi_base, PCIE_CORE_RESET);
val &= ~PCIE_CORE_RESET_ENABLE;
- exynos_pcie_writel(ep->elbi_base, val, PCIE_CORE_RESET);
- exynos_pcie_writel(ep->elbi_base, 0, PCIE_STICKY_RESET);
- exynos_pcie_writel(ep->elbi_base, 0, PCIE_NONSTICKY_RESET);
+ exynos_pcie_writel(pci->elbi_base, val, PCIE_CORE_RESET);
+ exynos_pcie_writel(pci->elbi_base, 0, PCIE_STICKY_RESET);
+ exynos_pcie_writel(pci->elbi_base, 0, PCIE_NONSTICKY_RESET);
}
static void exynos_pcie_deassert_core_reset(struct exynos_pcie *ep)
{
+ struct dw_pcie *pci = &ep->pci;
u32 val;
- val = exynos_pcie_readl(ep->elbi_base, PCIE_CORE_RESET);
+ val = exynos_pcie_readl(pci->elbi_base, PCIE_CORE_RESET);
val |= PCIE_CORE_RESET_ENABLE;
- exynos_pcie_writel(ep->elbi_base, val, PCIE_CORE_RESET);
- exynos_pcie_writel(ep->elbi_base, 1, PCIE_STICKY_RESET);
- exynos_pcie_writel(ep->elbi_base, 1, PCIE_NONSTICKY_RESET);
- exynos_pcie_writel(ep->elbi_base, 1, PCIE_APP_INIT_RESET);
- exynos_pcie_writel(ep->elbi_base, 0, PCIE_APP_INIT_RESET);
+ exynos_pcie_writel(pci->elbi_base, val, PCIE_CORE_RESET);
+ exynos_pcie_writel(pci->elbi_base, 1, PCIE_STICKY_RESET);
+ exynos_pcie_writel(pci->elbi_base, 1, PCIE_NONSTICKY_RESET);
+ exynos_pcie_writel(pci->elbi_base, 1, PCIE_APP_INIT_RESET);
+ exynos_pcie_writel(pci->elbi_base, 0, PCIE_APP_INIT_RESET);
}
static int exynos_pcie_start_link(struct dw_pcie *pci)
{
- struct exynos_pcie *ep = to_exynos_pcie(pci);
u32 val;
- val = exynos_pcie_readl(ep->elbi_base, PCIE_SW_WAKE);
+ val = exynos_pcie_readl(pci->elbi_base, PCIE_SW_WAKE);
val &= ~PCIE_BUS_EN;
- exynos_pcie_writel(ep->elbi_base, val, PCIE_SW_WAKE);
+ exynos_pcie_writel(pci->elbi_base, val, PCIE_SW_WAKE);
/* assert LTSSM enable */
- exynos_pcie_writel(ep->elbi_base, PCIE_ELBI_LTSSM_ENABLE,
+ exynos_pcie_writel(pci->elbi_base, PCIE_ELBI_LTSSM_ENABLE,
PCIE_APP_LTSSM_ENABLE);
return 0;
}
static void exynos_pcie_clear_irq_pulse(struct exynos_pcie *ep)
{
- u32 val = exynos_pcie_readl(ep->elbi_base, PCIE_IRQ_PULSE);
+ struct dw_pcie *pci = &ep->pci;
- exynos_pcie_writel(ep->elbi_base, val, PCIE_IRQ_PULSE);
+ u32 val = exynos_pcie_readl(pci->elbi_base, PCIE_IRQ_PULSE);
+
+ exynos_pcie_writel(pci->elbi_base, val, PCIE_IRQ_PULSE);
}
static irqreturn_t exynos_pcie_irq_handler(int irq, void *arg)
@@ -150,12 +154,14 @@ static irqreturn_t exynos_pcie_irq_handler(int irq, void *arg)
static void exynos_pcie_enable_irq_pulse(struct exynos_pcie *ep)
{
+ struct dw_pcie *pci = &ep->pci;
+
u32 val = IRQ_INTA_ASSERT | IRQ_INTB_ASSERT |
IRQ_INTC_ASSERT | IRQ_INTD_ASSERT;
- exynos_pcie_writel(ep->elbi_base, val, PCIE_IRQ_EN_PULSE);
- exynos_pcie_writel(ep->elbi_base, 0, PCIE_IRQ_EN_LEVEL);
- exynos_pcie_writel(ep->elbi_base, 0, PCIE_IRQ_EN_SPECIAL);
+ exynos_pcie_writel(pci->elbi_base, val, PCIE_IRQ_EN_PULSE);
+ exynos_pcie_writel(pci->elbi_base, 0, PCIE_IRQ_EN_LEVEL);
+ exynos_pcie_writel(pci->elbi_base, 0, PCIE_IRQ_EN_SPECIAL);
}
static u32 exynos_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base,
@@ -211,8 +217,7 @@ static struct pci_ops exynos_pci_ops = {
static bool exynos_pcie_link_up(struct dw_pcie *pci)
{
- struct exynos_pcie *ep = to_exynos_pcie(pci);
- u32 val = exynos_pcie_readl(ep->elbi_base, PCIE_ELBI_RDLH_LINKUP);
+ u32 val = exynos_pcie_readl(pci->elbi_base, PCIE_ELBI_RDLH_LINKUP);
return val & PCIE_ELBI_XMLH_LINKUP;
}
@@ -295,11 +300,6 @@ static int exynos_pcie_probe(struct platform_device *pdev)
if (IS_ERR(ep->phy))
return PTR_ERR(ep->phy);
- /* External Local Bus interface (ELBI) registers */
- ep->elbi_base = devm_platform_ioremap_resource_byname(pdev, "elbi");
- if (IS_ERR(ep->elbi_base))
- return PTR_ERR(ep->elbi_base);
-
ret = devm_clk_bulk_get_all_enabled(dev, &ep->clks);
if (ret < 0)
return ret;
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 80e48746bbaf..4668fc9648bf 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -1387,9 +1387,7 @@ static int imx_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
}
static const struct pci_epc_features imx8m_pcie_epc_features = {
- .linkup_notifier = false,
.msi_capable = true,
- .msix_capable = false,
.bar[BAR_1] = { .type = BAR_RESERVED, },
.bar[BAR_3] = { .type = BAR_RESERVED, },
.bar[BAR_4] = { .type = BAR_FIXED, .fixed_size = SZ_256, },
@@ -1398,9 +1396,7 @@ static const struct pci_epc_features imx8m_pcie_epc_features = {
};
static const struct pci_epc_features imx8q_pcie_epc_features = {
- .linkup_notifier = false,
.msi_capable = true,
- .msix_capable = false,
.bar[BAR_1] = { .type = BAR_RESERVED, },
.bar[BAR_3] = { .type = BAR_RESERVED, },
.bar[BAR_5] = { .type = BAR_RESERVED, },
@@ -1745,6 +1741,10 @@ static int imx_pcie_probe(struct platform_device *pdev)
pci->max_link_speed = 1;
of_property_read_u32(node, "fsl,max-link-speed", &pci->max_link_speed);
+ ret = devm_regulator_get_enable_optional(&pdev->dev, "vpcie3v3aux");
+ if (ret < 0 && ret != -ENODEV)
+ return dev_err_probe(dev, ret, "failed to enable Vaux supply\n");
+
imx_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie");
if (IS_ERR(imx_pcie->vpcie)) {
if (PTR_ERR(imx_pcie->vpcie) != -ENODEV)
diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
index 2b2632e513b5..eb00aa380722 100644
--- a/drivers/pci/controller/dwc/pci-keystone.c
+++ b/drivers/pci/controller/dwc/pci-keystone.c
@@ -960,7 +960,6 @@ static int ks_pcie_am654_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
}
static const struct pci_epc_features ks_pcie_am654_epc_features = {
- .linkup_notifier = false,
.msi_capable = true,
.msix_capable = true,
.bar[BAR_0] = { .type = BAR_RESERVED, },
@@ -1201,8 +1200,8 @@ static int ks_pcie_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- ret = request_irq(irq, ks_pcie_err_irq_handler, IRQF_SHARED,
- "ks-pcie-error-irq", ks_pcie);
+ ret = devm_request_irq(dev, irq, ks_pcie_err_irq_handler, IRQF_SHARED,
+ "ks-pcie-error-irq", ks_pcie);
if (ret < 0) {
dev_err(dev, "failed to request error IRQ %d\n",
irq);
@@ -1213,11 +1212,11 @@ static int ks_pcie_probe(struct platform_device *pdev)
if (ret)
num_lanes = 1;
- phy = devm_kzalloc(dev, sizeof(*phy) * num_lanes, GFP_KERNEL);
+ phy = devm_kcalloc(dev, num_lanes, sizeof(*phy), GFP_KERNEL);
if (!phy)
return -ENOMEM;
- link = devm_kzalloc(dev, sizeof(*link) * num_lanes, GFP_KERNEL);
+ link = devm_kcalloc(dev, num_lanes, sizeof(*link), GFP_KERNEL);
if (!link)
return -ENOMEM;
diff --git a/drivers/pci/controller/dwc/pcie-al.c b/drivers/pci/controller/dwc/pcie-al.c
index 643115f74092..345c281c74fe 100644
--- a/drivers/pci/controller/dwc/pcie-al.c
+++ b/drivers/pci/controller/dwc/pcie-al.c
@@ -352,6 +352,7 @@ static int al_pcie_probe(struct platform_device *pdev)
return -ENOENT;
}
al_pcie->ecam_size = resource_size(ecam_res);
+ pci->pp.native_ecam = true;
controller_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"controller");
diff --git a/drivers/pci/controller/dwc/pcie-amd-mdb.c b/drivers/pci/controller/dwc/pcie-amd-mdb.c
index 9f7251a16d32..3c6e837465bb 100644
--- a/drivers/pci/controller/dwc/pcie-amd-mdb.c
+++ b/drivers/pci/controller/dwc/pcie-amd-mdb.c
@@ -18,6 +18,7 @@
#include <linux/resource.h>
#include <linux/types.h>
+#include "../../pci.h"
#include "pcie-designware.h"
#define AMD_MDB_TLP_IR_STATUS_MISC 0x4C0
@@ -56,6 +57,7 @@
* @slcr: MDB System Level Control and Status Register (SLCR) base
* @intx_domain: INTx IRQ domain pointer
* @mdb_domain: MDB IRQ domain pointer
+ * @perst_gpio: GPIO descriptor for PERST# signal handling
* @intx_irq: INTx IRQ interrupt number
*/
struct amd_mdb_pcie {
@@ -63,6 +65,7 @@ struct amd_mdb_pcie {
void __iomem *slcr;
struct irq_domain *intx_domain;
struct irq_domain *mdb_domain;
+ struct gpio_desc *perst_gpio;
int intx_irq;
};
@@ -284,7 +287,7 @@ static int amd_mdb_pcie_init_irq_domains(struct amd_mdb_pcie *pcie,
struct device_node *pcie_intc_node;
int err;
- pcie_intc_node = of_get_next_child(node, NULL);
+ pcie_intc_node = of_get_child_by_name(node, "interrupt-controller");
if (!pcie_intc_node) {
dev_err(dev, "No PCIe Intc node found\n");
return -ENODEV;
@@ -402,6 +405,28 @@ static int amd_mdb_setup_irq(struct amd_mdb_pcie *pcie,
return 0;
}
+static int amd_mdb_parse_pcie_port(struct amd_mdb_pcie *pcie)
+{
+ struct device *dev = pcie->pci.dev;
+ struct device_node *pcie_port_node __maybe_unused;
+
+ /*
+ * This platform currently supports only one Root Port, so the loop
+ * will execute only once.
+ * TODO: Enhance the driver to handle multiple Root Ports in the future.
+ */
+ for_each_child_of_node_with_prefix(dev->of_node, pcie_port_node, "pcie") {
+ pcie->perst_gpio = devm_fwnode_gpiod_get(dev, of_fwnode_handle(pcie_port_node),
+ "reset", GPIOD_OUT_HIGH, NULL);
+ if (IS_ERR(pcie->perst_gpio))
+ return dev_err_probe(dev, PTR_ERR(pcie->perst_gpio),
+ "Failed to request reset GPIO\n");
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
static int amd_mdb_add_pcie_port(struct amd_mdb_pcie *pcie,
struct platform_device *pdev)
{
@@ -426,6 +451,12 @@ static int amd_mdb_add_pcie_port(struct amd_mdb_pcie *pcie,
pp->ops = &amd_mdb_pcie_host_ops;
+ if (pcie->perst_gpio) {
+ mdelay(PCIE_T_PVPERL_MS);
+ gpiod_set_value_cansleep(pcie->perst_gpio, 0);
+ mdelay(PCIE_RESET_CONFIG_WAIT_MS);
+ }
+
err = dw_pcie_host_init(pp);
if (err) {
dev_err(dev, "Failed to initialize host, err=%d\n", err);
@@ -444,6 +475,7 @@ static int amd_mdb_pcie_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct amd_mdb_pcie *pcie;
struct dw_pcie *pci;
+ int ret;
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
@@ -454,6 +486,24 @@ static int amd_mdb_pcie_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pcie);
+ ret = amd_mdb_parse_pcie_port(pcie);
+ /*
+ * If amd_mdb_parse_pcie_port returns -ENODEV, it indicates that the
+ * PCIe Bridge node was not found in the device tree. This is not
+ * considered a fatal error and will trigger a fallback where the
+ * reset GPIO is acquired directly from the PCIe Host Bridge node.
+ */
+ if (ret) {
+ if (ret != -ENODEV)
+ return ret;
+
+ pcie->perst_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(pcie->perst_gpio))
+ return dev_err_probe(dev, PTR_ERR(pcie->perst_gpio),
+ "Failed to request reset GPIO\n");
+ }
+
return amd_mdb_add_pcie_port(pcie, pdev);
}
diff --git a/drivers/pci/controller/dwc/pcie-artpec6.c b/drivers/pci/controller/dwc/pcie-artpec6.c
index 234c8cbcae3a..f4a136ee2daf 100644
--- a/drivers/pci/controller/dwc/pcie-artpec6.c
+++ b/drivers/pci/controller/dwc/pcie-artpec6.c
@@ -370,9 +370,7 @@ static int artpec6_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
}
static const struct pci_epc_features artpec6_pcie_epc_features = {
- .linkup_notifier = false,
.msi_capable = true,
- .msix_capable = false,
};
static const struct pci_epc_features *
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index 0ae54a94809b..7f2112c2fb21 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -69,37 +69,10 @@ void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
}
EXPORT_SYMBOL_GPL(dw_pcie_ep_reset_bar);
-static u8 __dw_pcie_ep_find_next_cap(struct dw_pcie_ep *ep, u8 func_no,
- u8 cap_ptr, u8 cap)
-{
- u8 cap_id, next_cap_ptr;
- u16 reg;
-
- if (!cap_ptr)
- return 0;
-
- reg = dw_pcie_ep_readw_dbi(ep, func_no, cap_ptr);
- cap_id = (reg & 0x00ff);
-
- if (cap_id > PCI_CAP_ID_MAX)
- return 0;
-
- if (cap_id == cap)
- return cap_ptr;
-
- next_cap_ptr = (reg & 0xff00) >> 8;
- return __dw_pcie_ep_find_next_cap(ep, func_no, next_cap_ptr, cap);
-}
-
static u8 dw_pcie_ep_find_capability(struct dw_pcie_ep *ep, u8 func_no, u8 cap)
{
- u8 next_cap_ptr;
- u16 reg;
-
- reg = dw_pcie_ep_readw_dbi(ep, func_no, PCI_CAPABILITY_LIST);
- next_cap_ptr = (reg & 0x00ff);
-
- return __dw_pcie_ep_find_next_cap(ep, func_no, next_cap_ptr, cap);
+ return PCI_FIND_NEXT_CAP(dw_pcie_ep_read_cfg, PCI_CAPABILITY_LIST,
+ cap, ep, func_no);
}
/**
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index 952f8594b501..20c9333bcb1c 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -8,6 +8,7 @@
* Author: Jingoo Han <jg1.han@samsung.com>
*/
+#include <linux/align.h>
#include <linux/iopoll.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqchip/irq-msi-lib.h>
@@ -32,6 +33,8 @@ static struct pci_ops dw_child_pcie_ops;
MSI_FLAG_PCI_MSIX | \
MSI_GENERIC_FLAGS_MASK)
+#define IS_256MB_ALIGNED(x) IS_ALIGNED(x, SZ_256M)
+
static const struct msi_parent_ops dw_pcie_msi_parent_ops = {
.required_flags = DW_PCIE_MSI_FLAGS_REQUIRED,
.supported_flags = DW_PCIE_MSI_FLAGS_SUPPORTED,
@@ -413,6 +416,95 @@ static void dw_pcie_host_request_msg_tlp_res(struct dw_pcie_rp *pp)
}
}
+static int dw_pcie_config_ecam_iatu(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct dw_pcie_ob_atu_cfg atu = {0};
+ resource_size_t bus_range_max;
+ struct resource_entry *bus;
+ int ret;
+
+ bus = resource_list_first_type(&pp->bridge->windows, IORESOURCE_BUS);
+
+ /*
+ * Root bus under the host bridge doesn't require any iATU configuration
+ * as DBI region will be used to access root bus config space.
+ * Immediate bus under Root Bus, needs type 0 iATU configuration and
+ * remaining buses need type 1 iATU configuration.
+ */
+ atu.index = 0;
+ atu.type = PCIE_ATU_TYPE_CFG0;
+ atu.parent_bus_addr = pp->cfg0_base + SZ_1M;
+ /* 1MiB is to cover 1 (bus) * 32 (devices) * 8 (functions) */
+ atu.size = SZ_1M;
+ atu.ctrl2 = PCIE_ATU_CFG_SHIFT_MODE_ENABLE;
+ ret = dw_pcie_prog_outbound_atu(pci, &atu);
+ if (ret)
+ return ret;
+
+ bus_range_max = resource_size(bus->res);
+
+ if (bus_range_max < 2)
+ return 0;
+
+ /* Configure remaining buses in type 1 iATU configuration */
+ atu.index = 1;
+ atu.type = PCIE_ATU_TYPE_CFG1;
+ atu.parent_bus_addr = pp->cfg0_base + SZ_2M;
+ atu.size = (SZ_1M * bus_range_max) - SZ_2M;
+ atu.ctrl2 = PCIE_ATU_CFG_SHIFT_MODE_ENABLE;
+
+ return dw_pcie_prog_outbound_atu(pci, &atu);
+}
+
+static int dw_pcie_create_ecam_window(struct dw_pcie_rp *pp, struct resource *res)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct device *dev = pci->dev;
+ struct resource_entry *bus;
+
+ bus = resource_list_first_type(&pp->bridge->windows, IORESOURCE_BUS);
+ if (!bus)
+ return -ENODEV;
+
+ pp->cfg = pci_ecam_create(dev, res, bus->res, &pci_generic_ecam_ops);
+ if (IS_ERR(pp->cfg))
+ return PTR_ERR(pp->cfg);
+
+ pci->dbi_base = pp->cfg->win;
+ pci->dbi_phys_addr = res->start;
+
+ return 0;
+}
+
+static bool dw_pcie_ecam_enabled(struct dw_pcie_rp *pp, struct resource *config_res)
+{
+ struct resource *bus_range;
+ u64 nr_buses;
+
+ /* Vendor glue drivers may implement their own ECAM mechanism */
+ if (pp->native_ecam)
+ return false;
+
+ /*
+ * PCIe spec r6.0, sec 7.2.2 mandates the base address used for ECAM to
+ * be aligned on a 2^(n+20) byte boundary, where n is the number of bits
+ * used for representing 'bus' in BDF. Since the DWC cores always use 8
+ * bits for representing 'bus', the base address has to be aligned to
+ * 2^28 byte boundary, which is 256 MiB.
+ */
+ if (!IS_256MB_ALIGNED(config_res->start))
+ return false;
+
+ bus_range = resource_list_first_type(&pp->bridge->windows, IORESOURCE_BUS)->res;
+ if (!bus_range)
+ return false;
+
+ nr_buses = resource_size(config_res) >> PCIE_ECAM_BUS_SHIFT;
+
+ return nr_buses >= resource_size(bus_range);
+}
+
static int dw_pcie_host_get_resources(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
@@ -422,10 +514,6 @@ static int dw_pcie_host_get_resources(struct dw_pcie_rp *pp)
struct resource *res;
int ret;
- ret = dw_pcie_get_resources(pci);
- if (ret)
- return ret;
-
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
if (!res) {
dev_err(dev, "Missing \"config\" reg space\n");
@@ -435,9 +523,32 @@ static int dw_pcie_host_get_resources(struct dw_pcie_rp *pp)
pp->cfg0_size = resource_size(res);
pp->cfg0_base = res->start;
- pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res);
- if (IS_ERR(pp->va_cfg0_base))
- return PTR_ERR(pp->va_cfg0_base);
+ pp->ecam_enabled = dw_pcie_ecam_enabled(pp, res);
+ if (pp->ecam_enabled) {
+ ret = dw_pcie_create_ecam_window(pp, res);
+ if (ret)
+ return ret;
+
+ pp->bridge->ops = (struct pci_ops *)&pci_generic_ecam_ops.pci_ops;
+ pp->bridge->sysdata = pp->cfg;
+ pp->cfg->priv = pp;
+ } else {
+ pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(pp->va_cfg0_base))
+ return PTR_ERR(pp->va_cfg0_base);
+
+ /* Set default bus ops */
+ pp->bridge->ops = &dw_pcie_ops;
+ pp->bridge->child_ops = &dw_child_pcie_ops;
+ pp->bridge->sysdata = pp;
+ }
+
+ ret = dw_pcie_get_resources(pci);
+ if (ret) {
+ if (pp->cfg)
+ pci_ecam_free(pp->cfg);
+ return ret;
+ }
/* Get the I/O range from DT */
win = resource_list_first_type(&pp->bridge->windows, IORESOURCE_IO);
@@ -476,14 +587,10 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp)
if (ret)
return ret;
- /* Set default bus ops */
- bridge->ops = &dw_pcie_ops;
- bridge->child_ops = &dw_child_pcie_ops;
-
if (pp->ops->init) {
ret = pp->ops->init(pp);
if (ret)
- return ret;
+ goto err_free_ecam;
}
if (pci_msi_enabled()) {
@@ -525,6 +632,14 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp)
if (ret)
goto err_free_msi;
+ if (pp->ecam_enabled) {
+ ret = dw_pcie_config_ecam_iatu(pp);
+ if (ret) {
+ dev_err(dev, "Failed to configure iATU in ECAM mode\n");
+ goto err_free_msi;
+ }
+ }
+
/*
* Allocate the resource for MSG TLP before programming the iATU
* outbound window in dw_pcie_setup_rc(). Since the allocation depends
@@ -560,8 +675,6 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp)
/* Ignore errors, the link may come up later */
dw_pcie_wait_for_link(pci);
- bridge->sysdata = pp;
-
ret = pci_host_probe(bridge);
if (ret)
goto err_stop_link;
@@ -587,6 +700,10 @@ err_deinit_host:
if (pp->ops->deinit)
pp->ops->deinit(pp);
+err_free_ecam:
+ if (pp->cfg)
+ pci_ecam_free(pp->cfg);
+
return ret;
}
EXPORT_SYMBOL_GPL(dw_pcie_host_init);
@@ -609,6 +726,9 @@ void dw_pcie_host_deinit(struct dw_pcie_rp *pp)
if (pp->ops->deinit)
pp->ops->deinit(pp);
+
+ if (pp->cfg)
+ pci_ecam_free(pp->cfg);
}
EXPORT_SYMBOL_GPL(dw_pcie_host_deinit);
diff --git a/drivers/pci/controller/dwc/pcie-designware-plat.c b/drivers/pci/controller/dwc/pcie-designware-plat.c
index 771b9d9be077..12f41886c65d 100644
--- a/drivers/pci/controller/dwc/pcie-designware-plat.c
+++ b/drivers/pci/controller/dwc/pcie-designware-plat.c
@@ -61,7 +61,6 @@ static int dw_plat_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
}
static const struct pci_epc_features dw_plat_pcie_epc_features = {
- .linkup_notifier = false,
.msi_capable = true,
.msix_capable = true,
};
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
index 89aad5a08928..c644216995f6 100644
--- a/drivers/pci/controller/dwc/pcie-designware.c
+++ b/drivers/pci/controller/dwc/pcie-designware.c
@@ -167,6 +167,14 @@ int dw_pcie_get_resources(struct dw_pcie *pci)
}
}
+ /* ELBI is an optional resource */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi");
+ if (res) {
+ pci->elbi_base = devm_ioremap_resource(pci->dev, res);
+ if (IS_ERR(pci->elbi_base))
+ return PTR_ERR(pci->elbi_base);
+ }
+
/* LLDD is supposed to manually switch the clocks and resets state */
if (dw_pcie_cap_is(pci, REQ_RES)) {
ret = dw_pcie_get_clocks(pci);
@@ -213,83 +221,16 @@ void dw_pcie_version_detect(struct dw_pcie *pci)
pci->type = ver;
}
-/*
- * These interfaces resemble the pci_find_*capability() interfaces, but these
- * are for configuring host controllers, which are bridges *to* PCI devices but
- * are not PCI devices themselves.
- */
-static u8 __dw_pcie_find_next_cap(struct dw_pcie *pci, u8 cap_ptr,
- u8 cap)
-{
- u8 cap_id, next_cap_ptr;
- u16 reg;
-
- if (!cap_ptr)
- return 0;
-
- reg = dw_pcie_readw_dbi(pci, cap_ptr);
- cap_id = (reg & 0x00ff);
-
- if (cap_id > PCI_CAP_ID_MAX)
- return 0;
-
- if (cap_id == cap)
- return cap_ptr;
-
- next_cap_ptr = (reg & 0xff00) >> 8;
- return __dw_pcie_find_next_cap(pci, next_cap_ptr, cap);
-}
-
u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap)
{
- u8 next_cap_ptr;
- u16 reg;
-
- reg = dw_pcie_readw_dbi(pci, PCI_CAPABILITY_LIST);
- next_cap_ptr = (reg & 0x00ff);
-
- return __dw_pcie_find_next_cap(pci, next_cap_ptr, cap);
+ return PCI_FIND_NEXT_CAP(dw_pcie_read_cfg, PCI_CAPABILITY_LIST, cap,
+ pci);
}
EXPORT_SYMBOL_GPL(dw_pcie_find_capability);
-static u16 dw_pcie_find_next_ext_capability(struct dw_pcie *pci, u16 start,
- u8 cap)
-{
- u32 header;
- int ttl;
- int pos = PCI_CFG_SPACE_SIZE;
-
- /* minimum 8 bytes per capability */
- ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
-
- if (start)
- pos = start;
-
- header = dw_pcie_readl_dbi(pci, pos);
- /*
- * If we have no capabilities, this is indicated by cap ID,
- * cap version and next pointer all being 0.
- */
- if (header == 0)
- return 0;
-
- while (ttl-- > 0) {
- if (PCI_EXT_CAP_ID(header) == cap && pos != start)
- return pos;
-
- pos = PCI_EXT_CAP_NEXT(header);
- if (pos < PCI_CFG_SPACE_SIZE)
- break;
-
- header = dw_pcie_readl_dbi(pci, pos);
- }
-
- return 0;
-}
-
u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap)
{
- return dw_pcie_find_next_ext_capability(pci, 0, cap);
+ return PCI_FIND_NEXT_EXT_CAP(dw_pcie_read_cfg, 0, cap, pci);
}
EXPORT_SYMBOL_GPL(dw_pcie_find_ext_capability);
@@ -302,8 +243,8 @@ static u16 __dw_pcie_find_vsec_capability(struct dw_pcie *pci, u16 vendor_id,
if (vendor_id != dw_pcie_readw_dbi(pci, PCI_VENDOR_ID))
return 0;
- while ((vsec = dw_pcie_find_next_ext_capability(pci, vsec,
- PCI_EXT_CAP_ID_VNDR))) {
+ while ((vsec = PCI_FIND_NEXT_EXT_CAP(dw_pcie_read_cfg, vsec,
+ PCI_EXT_CAP_ID_VNDR, pci))) {
header = dw_pcie_readl_dbi(pci, vsec + PCI_VNDR_HEADER);
if (PCI_VNDR_HEADER_ID(header) == vsec_id)
return vsec;
@@ -567,7 +508,7 @@ int dw_pcie_prog_outbound_atu(struct dw_pcie *pci,
val = dw_pcie_enable_ecrc(val);
dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_REGION_CTRL1, val);
- val = PCIE_ATU_ENABLE;
+ val = PCIE_ATU_ENABLE | atu->ctrl2;
if (atu->type == PCIE_ATU_TYPE_MSG) {
/* The data-less messages only for now */
val |= PCIE_ATU_INHIBIT_PAYLOAD | atu->code;
@@ -841,6 +782,9 @@ static void dw_pcie_link_set_max_link_width(struct dw_pcie *pci, u32 num_lanes)
case 8:
plc |= PORT_LINK_MODE_8_LANES;
break;
+ case 16:
+ plc |= PORT_LINK_MODE_16_LANES;
+ break;
default:
dev_err(pci->dev, "num-lanes %u: invalid value\n", num_lanes);
return;
@@ -1045,9 +989,7 @@ static int dw_pcie_edma_irq_verify(struct dw_pcie *pci)
char name[15];
int ret;
- if (pci->edma.nr_irqs == 1)
- return 0;
- else if (pci->edma.nr_irqs > 1)
+ if (pci->edma.nr_irqs > 1)
return pci->edma.nr_irqs != ch_cnt ? -EINVAL : 0;
ret = platform_get_irq_byname_optional(pdev, "dma");
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index 00f52d472dcd..e995f692a1ec 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -20,6 +20,7 @@
#include <linux/irq.h>
#include <linux/msi.h>
#include <linux/pci.h>
+#include <linux/pci-ecam.h>
#include <linux/reset.h>
#include <linux/pci-epc.h>
@@ -90,6 +91,7 @@
#define PORT_LINK_MODE_2_LANES PORT_LINK_MODE(0x3)
#define PORT_LINK_MODE_4_LANES PORT_LINK_MODE(0x7)
#define PORT_LINK_MODE_8_LANES PORT_LINK_MODE(0xf)
+#define PORT_LINK_MODE_16_LANES PORT_LINK_MODE(0x1f)
#define PCIE_PORT_LANE_SKEW 0x714
#define PORT_LANE_SKEW_INSERT_MASK GENMASK(23, 0)
@@ -123,7 +125,6 @@
#define GEN3_RELATED_OFF_GEN3_EQ_DISABLE BIT(16)
#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT 24
#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK GENMASK(25, 24)
-#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_16_0GT 0x1
#define GEN3_EQ_CONTROL_OFF 0x8A8
#define GEN3_EQ_CONTROL_OFF_FB_MODE GENMASK(3, 0)
@@ -134,8 +135,8 @@
#define GEN3_EQ_FB_MODE_DIR_CHANGE_OFF 0x8AC
#define GEN3_EQ_FMDC_T_MIN_PHASE23 GENMASK(4, 0)
#define GEN3_EQ_FMDC_N_EVALS GENMASK(9, 5)
-#define GEN3_EQ_FMDC_MAX_PRE_CUSROR_DELTA GENMASK(13, 10)
-#define GEN3_EQ_FMDC_MAX_POST_CUSROR_DELTA GENMASK(17, 14)
+#define GEN3_EQ_FMDC_MAX_PRE_CURSOR_DELTA GENMASK(13, 10)
+#define GEN3_EQ_FMDC_MAX_POST_CURSOR_DELTA GENMASK(17, 14)
#define PCIE_PORT_MULTI_LANE_CTRL 0x8C0
#define PORT_MLTI_UPCFG_SUPPORT BIT(7)
@@ -169,6 +170,7 @@
#define PCIE_ATU_REGION_CTRL2 0x004
#define PCIE_ATU_ENABLE BIT(31)
#define PCIE_ATU_BAR_MODE_ENABLE BIT(30)
+#define PCIE_ATU_CFG_SHIFT_MODE_ENABLE BIT(28)
#define PCIE_ATU_INHIBIT_PAYLOAD BIT(22)
#define PCIE_ATU_FUNC_NUM_MATCH_EN BIT(19)
#define PCIE_ATU_LOWER_BASE 0x008
@@ -387,6 +389,7 @@ struct dw_pcie_ob_atu_cfg {
u8 func_no;
u8 code;
u8 routing;
+ u32 ctrl2;
u64 parent_bus_addr;
u64 pci_addr;
u64 size;
@@ -425,6 +428,9 @@ struct dw_pcie_rp {
struct resource *msg_res;
bool use_linkup_irq;
struct pci_eq_presets presets;
+ struct pci_config_window *cfg;
+ bool ecam_enabled;
+ bool native_ecam;
};
struct dw_pcie_ep_ops {
@@ -492,6 +498,7 @@ struct dw_pcie {
resource_size_t dbi_phys_addr;
void __iomem *dbi_base2;
void __iomem *atu_base;
+ void __iomem *elbi_base;
resource_size_t atu_phys_addr;
size_t atu_size;
resource_size_t parent_bus_offset;
@@ -609,6 +616,27 @@ static inline void dw_pcie_writel_dbi2(struct dw_pcie *pci, u32 reg, u32 val)
dw_pcie_write_dbi2(pci, reg, 0x4, val);
}
+static inline int dw_pcie_read_cfg_byte(struct dw_pcie *pci, int where,
+ u8 *val)
+{
+ *val = dw_pcie_readb_dbi(pci, where);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static inline int dw_pcie_read_cfg_word(struct dw_pcie *pci, int where,
+ u16 *val)
+{
+ *val = dw_pcie_readw_dbi(pci, where);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static inline int dw_pcie_read_cfg_dword(struct dw_pcie *pci, int where,
+ u32 *val)
+{
+ *val = dw_pcie_readl_dbi(pci, where);
+ return PCIBIOS_SUCCESSFUL;
+}
+
static inline unsigned int dw_pcie_ep_get_dbi_offset(struct dw_pcie_ep *ep,
u8 func_no)
{
@@ -674,6 +702,27 @@ static inline u8 dw_pcie_ep_readb_dbi(struct dw_pcie_ep *ep, u8 func_no,
return dw_pcie_ep_read_dbi(ep, func_no, reg, 0x1);
}
+static inline int dw_pcie_ep_read_cfg_byte(struct dw_pcie_ep *ep, u8 func_no,
+ int where, u8 *val)
+{
+ *val = dw_pcie_ep_readb_dbi(ep, func_no, where);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static inline int dw_pcie_ep_read_cfg_word(struct dw_pcie_ep *ep, u8 func_no,
+ int where, u16 *val)
+{
+ *val = dw_pcie_ep_readw_dbi(ep, func_no, where);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static inline int dw_pcie_ep_read_cfg_dword(struct dw_pcie_ep *ep, u8 func_no,
+ int where, u32 *val)
+{
+ *val = dw_pcie_ep_readl_dbi(ep, func_no, where);
+ return PCIBIOS_SUCCESSFUL;
+}
+
static inline unsigned int dw_pcie_ep_get_dbi2_offset(struct dw_pcie_ep *ep,
u8 func_no)
{
diff --git a/drivers/pci/controller/dwc/pcie-dw-rockchip.c b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
index b5f5eee5a50e..3e2752c7dd09 100644
--- a/drivers/pci/controller/dwc/pcie-dw-rockchip.c
+++ b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
@@ -11,6 +11,7 @@
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/gpio/consumer.h>
+#include <linux/hw_bitfield.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/mfd/syscon.h>
@@ -29,18 +30,18 @@
* The upper 16 bits of PCIE_CLIENT_CONFIG are a write
* mask for the lower 16 bits.
*/
-#define HIWORD_UPDATE(mask, val) (((mask) << 16) | (val))
-#define HIWORD_UPDATE_BIT(val) HIWORD_UPDATE(val, val)
-#define HIWORD_DISABLE_BIT(val) HIWORD_UPDATE(val, ~val)
#define to_rockchip_pcie(x) dev_get_drvdata((x)->dev)
/* General Control Register */
#define PCIE_CLIENT_GENERAL_CON 0x0
-#define PCIE_CLIENT_RC_MODE HIWORD_UPDATE_BIT(0x40)
-#define PCIE_CLIENT_EP_MODE HIWORD_UPDATE(0xf0, 0x0)
-#define PCIE_CLIENT_ENABLE_LTSSM HIWORD_UPDATE_BIT(0xc)
-#define PCIE_CLIENT_DISABLE_LTSSM HIWORD_UPDATE(0x0c, 0x8)
+#define PCIE_CLIENT_MODE_MASK GENMASK(7, 4)
+#define PCIE_CLIENT_MODE_EP 0x0UL
+#define PCIE_CLIENT_MODE_RC 0x4UL
+#define PCIE_CLIENT_SET_MODE(x) FIELD_PREP_WM16(PCIE_CLIENT_MODE_MASK, (x))
+#define PCIE_CLIENT_LD_RQ_RST_GRT FIELD_PREP_WM16(BIT(3), 1)
+#define PCIE_CLIENT_ENABLE_LTSSM FIELD_PREP_WM16(BIT(2), 1)
+#define PCIE_CLIENT_DISABLE_LTSSM FIELD_PREP_WM16(BIT(2), 0)
/* Interrupt Status Register Related to Legacy Interrupt */
#define PCIE_CLIENT_INTR_STATUS_LEGACY 0x8
@@ -52,6 +53,11 @@
/* Interrupt Mask Register Related to Legacy Interrupt */
#define PCIE_CLIENT_INTR_MASK_LEGACY 0x1c
+#define PCIE_INTR_MASK GENMASK(7, 0)
+#define PCIE_INTR_CLAMP(_x) ((BIT((_x)) & PCIE_INTR_MASK))
+#define PCIE_INTR_LEGACY_MASK(x) (PCIE_INTR_CLAMP((x)) | \
+ (PCIE_INTR_CLAMP((x)) << 16))
+#define PCIE_INTR_LEGACY_UNMASK(x) (PCIE_INTR_CLAMP((x)) << 16)
/* Interrupt Mask Register Related to Miscellaneous Operation */
#define PCIE_CLIENT_INTR_MASK_MISC 0x24
@@ -116,14 +122,14 @@ static void rockchip_pcie_intx_handler(struct irq_desc *desc)
static void rockchip_intx_mask(struct irq_data *data)
{
rockchip_pcie_writel_apb(irq_data_get_irq_chip_data(data),
- HIWORD_UPDATE_BIT(BIT(data->hwirq)),
+ PCIE_INTR_LEGACY_MASK(data->hwirq),
PCIE_CLIENT_INTR_MASK_LEGACY);
};
static void rockchip_intx_unmask(struct irq_data *data)
{
rockchip_pcie_writel_apb(irq_data_get_irq_chip_data(data),
- HIWORD_DISABLE_BIT(BIT(data->hwirq)),
+ PCIE_INTR_LEGACY_UNMASK(data->hwirq),
PCIE_CLIENT_INTR_MASK_LEGACY);
};
@@ -325,7 +331,6 @@ static const struct pci_epc_features rockchip_pcie_epc_features_rk3568 = {
.linkup_notifier = true,
.msi_capable = true,
.msix_capable = true,
- .intx_capable = false,
.align = SZ_64K,
.bar[BAR_0] = { .type = BAR_RESIZABLE, },
.bar[BAR_1] = { .type = BAR_RESIZABLE, },
@@ -346,7 +351,6 @@ static const struct pci_epc_features rockchip_pcie_epc_features_rk3588 = {
.linkup_notifier = true,
.msi_capable = true,
.msix_capable = true,
- .intx_capable = false,
.align = SZ_64K,
.bar[BAR_0] = { .type = BAR_RESIZABLE, },
.bar[BAR_1] = { .type = BAR_RESIZABLE, },
@@ -489,7 +493,7 @@ static irqreturn_t rockchip_pcie_ep_sys_irq_thread(int irq, void *arg)
dev_dbg(dev, "hot reset or link-down reset\n");
dw_pcie_ep_linkdown(&pci->ep);
/* Stop delaying link training. */
- val = HIWORD_UPDATE_BIT(PCIE_LTSSM_APP_DLY2_DONE);
+ val = FIELD_PREP_WM16(PCIE_LTSSM_APP_DLY2_DONE, 1);
rockchip_pcie_writel_apb(rockchip, val,
PCIE_CLIENT_HOT_RESET_CTRL);
}
@@ -528,10 +532,11 @@ static int rockchip_pcie_configure_rc(struct platform_device *pdev,
}
/* LTSSM enable control mode */
- val = HIWORD_UPDATE_BIT(PCIE_LTSSM_ENABLE_ENHANCE);
+ val = FIELD_PREP_WM16(PCIE_LTSSM_ENABLE_ENHANCE, 1);
rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_HOT_RESET_CTRL);
- rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_RC_MODE,
+ rockchip_pcie_writel_apb(rockchip,
+ PCIE_CLIENT_SET_MODE(PCIE_CLIENT_MODE_RC),
PCIE_CLIENT_GENERAL_CON);
pp = &rockchip->pci.pp;
@@ -545,7 +550,7 @@ static int rockchip_pcie_configure_rc(struct platform_device *pdev,
}
/* unmask DLL up/down indicator */
- val = HIWORD_UPDATE(PCIE_RDLH_LINK_UP_CHGED, 0);
+ val = FIELD_PREP_WM16(PCIE_RDLH_LINK_UP_CHGED, 0);
rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_INTR_MASK_MISC);
return ret;
@@ -577,10 +582,12 @@ static int rockchip_pcie_configure_ep(struct platform_device *pdev,
* LTSSM enable control mode, and automatically delay link training on
* hot reset/link-down reset.
*/
- val = HIWORD_UPDATE_BIT(PCIE_LTSSM_ENABLE_ENHANCE | PCIE_LTSSM_APP_DLY2_EN);
+ val = FIELD_PREP_WM16(PCIE_LTSSM_ENABLE_ENHANCE, 1) |
+ FIELD_PREP_WM16(PCIE_LTSSM_APP_DLY2_EN, 1);
rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_HOT_RESET_CTRL);
- rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_EP_MODE,
+ rockchip_pcie_writel_apb(rockchip,
+ PCIE_CLIENT_SET_MODE(PCIE_CLIENT_MODE_EP),
PCIE_CLIENT_GENERAL_CON);
rockchip->pci.ep.ops = &rockchip_pcie_ep_ops;
@@ -604,7 +611,8 @@ static int rockchip_pcie_configure_ep(struct platform_device *pdev,
pci_epc_init_notify(rockchip->pci.ep.epc);
/* unmask DLL up/down indicator and hot reset/link-down reset */
- val = HIWORD_UPDATE(PCIE_RDLH_LINK_UP_CHGED | PCIE_LINK_REQ_RST_NOT_INT, 0);
+ val = FIELD_PREP_WM16(PCIE_RDLH_LINK_UP_CHGED, 0) |
+ FIELD_PREP_WM16(PCIE_LINK_REQ_RST_NOT_INT, 0);
rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_INTR_MASK_MISC);
return ret;
diff --git a/drivers/pci/controller/dwc/pcie-keembay.c b/drivers/pci/controller/dwc/pcie-keembay.c
index 67dd3337b447..60e74ac782af 100644
--- a/drivers/pci/controller/dwc/pcie-keembay.c
+++ b/drivers/pci/controller/dwc/pcie-keembay.c
@@ -309,7 +309,6 @@ static int keembay_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
}
static const struct pci_epc_features keembay_pcie_epc_features = {
- .linkup_notifier = false,
.msi_capable = true,
.msix_capable = true,
.bar[BAR_0] = { .only_64bit = true, },
diff --git a/drivers/pci/controller/dwc/pcie-qcom-common.c b/drivers/pci/controller/dwc/pcie-qcom-common.c
index 3aad19b56da8..01c5387e53bf 100644
--- a/drivers/pci/controller/dwc/pcie-qcom-common.c
+++ b/drivers/pci/controller/dwc/pcie-qcom-common.c
@@ -8,9 +8,11 @@
#include "pcie-designware.h"
#include "pcie-qcom-common.h"
-void qcom_pcie_common_set_16gt_equalization(struct dw_pcie *pci)
+void qcom_pcie_common_set_equalization(struct dw_pcie *pci)
{
+ struct device *dev = pci->dev;
u32 reg;
+ u16 speed;
/*
* GEN3_RELATED_OFF register is repurposed to apply equalization
@@ -19,32 +21,40 @@ void qcom_pcie_common_set_16gt_equalization(struct dw_pcie *pci)
* determines the data rate for which these equalization settings are
* applied.
*/
- reg = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
- reg &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
- reg &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK;
- reg |= FIELD_PREP(GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK,
- GEN3_RELATED_OFF_RATE_SHADOW_SEL_16_0GT);
- dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, reg);
- reg = dw_pcie_readl_dbi(pci, GEN3_EQ_FB_MODE_DIR_CHANGE_OFF);
- reg &= ~(GEN3_EQ_FMDC_T_MIN_PHASE23 |
- GEN3_EQ_FMDC_N_EVALS |
- GEN3_EQ_FMDC_MAX_PRE_CUSROR_DELTA |
- GEN3_EQ_FMDC_MAX_POST_CUSROR_DELTA);
- reg |= FIELD_PREP(GEN3_EQ_FMDC_T_MIN_PHASE23, 0x1) |
- FIELD_PREP(GEN3_EQ_FMDC_N_EVALS, 0xd) |
- FIELD_PREP(GEN3_EQ_FMDC_MAX_PRE_CUSROR_DELTA, 0x5) |
- FIELD_PREP(GEN3_EQ_FMDC_MAX_POST_CUSROR_DELTA, 0x5);
- dw_pcie_writel_dbi(pci, GEN3_EQ_FB_MODE_DIR_CHANGE_OFF, reg);
+ for (speed = PCIE_SPEED_8_0GT; speed <= pcie_link_speed[pci->max_link_speed]; speed++) {
+ if (speed > PCIE_SPEED_32_0GT) {
+ dev_warn(dev, "Skipped equalization settings for unsupported data rate\n");
+ break;
+ }
- reg = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);
- reg &= ~(GEN3_EQ_CONTROL_OFF_FB_MODE |
- GEN3_EQ_CONTROL_OFF_PHASE23_EXIT_MODE |
- GEN3_EQ_CONTROL_OFF_FOM_INC_INITIAL_EVAL |
- GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC);
- dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, reg);
+ reg = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
+ reg &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
+ reg &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK;
+ reg |= FIELD_PREP(GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK,
+ speed - PCIE_SPEED_8_0GT);
+ dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, reg);
+
+ reg = dw_pcie_readl_dbi(pci, GEN3_EQ_FB_MODE_DIR_CHANGE_OFF);
+ reg &= ~(GEN3_EQ_FMDC_T_MIN_PHASE23 |
+ GEN3_EQ_FMDC_N_EVALS |
+ GEN3_EQ_FMDC_MAX_PRE_CURSOR_DELTA |
+ GEN3_EQ_FMDC_MAX_POST_CURSOR_DELTA);
+ reg |= FIELD_PREP(GEN3_EQ_FMDC_T_MIN_PHASE23, 0x1) |
+ FIELD_PREP(GEN3_EQ_FMDC_N_EVALS, 0xd) |
+ FIELD_PREP(GEN3_EQ_FMDC_MAX_PRE_CURSOR_DELTA, 0x5) |
+ FIELD_PREP(GEN3_EQ_FMDC_MAX_POST_CURSOR_DELTA, 0x5);
+ dw_pcie_writel_dbi(pci, GEN3_EQ_FB_MODE_DIR_CHANGE_OFF, reg);
+
+ reg = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);
+ reg &= ~(GEN3_EQ_CONTROL_OFF_FB_MODE |
+ GEN3_EQ_CONTROL_OFF_PHASE23_EXIT_MODE |
+ GEN3_EQ_CONTROL_OFF_FOM_INC_INITIAL_EVAL |
+ GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC);
+ dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, reg);
+ }
}
-EXPORT_SYMBOL_GPL(qcom_pcie_common_set_16gt_equalization);
+EXPORT_SYMBOL_GPL(qcom_pcie_common_set_equalization);
void qcom_pcie_common_set_16gt_lane_margining(struct dw_pcie *pci)
{
diff --git a/drivers/pci/controller/dwc/pcie-qcom-common.h b/drivers/pci/controller/dwc/pcie-qcom-common.h
index 7d88d29e4766..7f5ca2fd9a72 100644
--- a/drivers/pci/controller/dwc/pcie-qcom-common.h
+++ b/drivers/pci/controller/dwc/pcie-qcom-common.h
@@ -8,7 +8,7 @@
struct dw_pcie;
-void qcom_pcie_common_set_16gt_equalization(struct dw_pcie *pci);
+void qcom_pcie_common_set_equalization(struct dw_pcie *pci);
void qcom_pcie_common_set_16gt_lane_margining(struct dw_pcie *pci);
#endif
diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c
index bf7c6ac0f3e3..f1bc0ac81a92 100644
--- a/drivers/pci/controller/dwc/pcie-qcom-ep.c
+++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c
@@ -179,7 +179,6 @@ struct qcom_pcie_ep_cfg {
* struct qcom_pcie_ep - Qualcomm PCIe Endpoint Controller
* @pci: Designware PCIe controller struct
* @parf: Qualcomm PCIe specific PARF register base
- * @elbi: Designware PCIe specific ELBI register base
* @mmio: MMIO register base
* @perst_map: PERST regmap
* @mmio_res: MMIO region resource
@@ -202,7 +201,6 @@ struct qcom_pcie_ep {
struct dw_pcie pci;
void __iomem *parf;
- void __iomem *elbi;
void __iomem *mmio;
struct regmap *perst_map;
struct resource *mmio_res;
@@ -267,10 +265,9 @@ static void qcom_pcie_ep_configure_tcsr(struct qcom_pcie_ep *pcie_ep)
static bool qcom_pcie_dw_link_up(struct dw_pcie *pci)
{
- struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
u32 reg;
- reg = readl_relaxed(pcie_ep->elbi + ELBI_SYS_STTS);
+ reg = readl_relaxed(pci->elbi_base + ELBI_SYS_STTS);
return reg & XMLH_LINK_UP;
}
@@ -294,16 +291,15 @@ static void qcom_pcie_dw_stop_link(struct dw_pcie *pci)
static void qcom_pcie_dw_write_dbi2(struct dw_pcie *pci, void __iomem *base,
u32 reg, size_t size, u32 val)
{
- struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
int ret;
- writel(1, pcie_ep->elbi + ELBI_CS2_ENABLE);
+ writel(1, pci->elbi_base + ELBI_CS2_ENABLE);
ret = dw_pcie_write(pci->dbi_base2 + reg, size, val);
if (ret)
dev_err(pci->dev, "Failed to write DBI2 register (0x%x): %d\n", reg, ret);
- writel(0, pcie_ep->elbi + ELBI_CS2_ENABLE);
+ writel(0, pci->elbi_base + ELBI_CS2_ENABLE);
}
static void qcom_pcie_ep_icc_update(struct qcom_pcie_ep *pcie_ep)
@@ -511,10 +507,10 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
goto err_disable_resources;
}
- if (pcie_link_speed[pci->max_link_speed] == PCIE_SPEED_16_0GT) {
- qcom_pcie_common_set_16gt_equalization(pci);
+ qcom_pcie_common_set_equalization(pci);
+
+ if (pcie_link_speed[pci->max_link_speed] == PCIE_SPEED_16_0GT)
qcom_pcie_common_set_16gt_lane_margining(pci);
- }
/*
* The physical address of the MMIO region which is exposed as the BAR
@@ -583,11 +579,6 @@ static int qcom_pcie_ep_get_io_resources(struct platform_device *pdev,
return PTR_ERR(pci->dbi_base);
pci->dbi_base2 = pci->dbi_base;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi");
- pcie_ep->elbi = devm_pci_remap_cfg_resource(dev, res);
- if (IS_ERR(pcie_ep->elbi))
- return PTR_ERR(pcie_ep->elbi);
-
pcie_ep->mmio_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"mmio");
if (!pcie_ep->mmio_res) {
@@ -831,7 +822,6 @@ static void qcom_pcie_ep_init_debugfs(struct qcom_pcie_ep *pcie_ep)
static const struct pci_epc_features qcom_pcie_epc_features = {
.linkup_notifier = true,
.msi_capable = true,
- .msix_capable = false,
.align = SZ_4K,
.bar[BAR_0] = { .only_64bit = true, },
.bar[BAR_1] = { .type = BAR_RESERVED, },
@@ -874,7 +864,6 @@ static int qcom_pcie_ep_probe(struct platform_device *pdev)
pcie_ep->pci.dev = dev;
pcie_ep->pci.ops = &pci_ops;
pcie_ep->pci.ep.ops = &pci_ep_ops;
- pcie_ep->pci.edma.nr_irqs = 1;
pcie_ep->cfg = of_device_get_match_data(dev);
if (pcie_ep->cfg && pcie_ep->cfg->hdma_support) {
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index 294babe1816e..805edbbfe7eb 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -55,6 +55,7 @@
#define PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1a8
#define PARF_Q2A_FLUSH 0x1ac
#define PARF_LTSSM 0x1b0
+#define PARF_SLV_DBI_ELBI 0x1b4
#define PARF_INT_ALL_STATUS 0x224
#define PARF_INT_ALL_CLEAR 0x228
#define PARF_INT_ALL_MASK 0x22c
@@ -64,6 +65,16 @@
#define PARF_DBI_BASE_ADDR_V2_HI 0x354
#define PARF_SLV_ADDR_SPACE_SIZE_V2 0x358
#define PARF_SLV_ADDR_SPACE_SIZE_V2_HI 0x35c
+#define PARF_BLOCK_SLV_AXI_WR_BASE 0x360
+#define PARF_BLOCK_SLV_AXI_WR_BASE_HI 0x364
+#define PARF_BLOCK_SLV_AXI_WR_LIMIT 0x368
+#define PARF_BLOCK_SLV_AXI_WR_LIMIT_HI 0x36c
+#define PARF_BLOCK_SLV_AXI_RD_BASE 0x370
+#define PARF_BLOCK_SLV_AXI_RD_BASE_HI 0x374
+#define PARF_BLOCK_SLV_AXI_RD_LIMIT 0x378
+#define PARF_BLOCK_SLV_AXI_RD_LIMIT_HI 0x37c
+#define PARF_ECAM_BASE 0x380
+#define PARF_ECAM_BASE_HI 0x384
#define PARF_NO_SNOOP_OVERRIDE 0x3d4
#define PARF_ATU_BASE_ADDR 0x634
#define PARF_ATU_BASE_ADDR_HI 0x638
@@ -87,6 +98,7 @@
/* PARF_SYS_CTRL register fields */
#define MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN BIT(29)
+#define PCIE_ECAM_BLOCKER_EN BIT(26)
#define MST_WAKEUP_EN BIT(13)
#define SLV_WAKEUP_EN BIT(12)
#define MSTR_ACLK_CGC_DIS BIT(10)
@@ -134,6 +146,9 @@
/* PARF_LTSSM register fields */
#define LTSSM_EN BIT(8)
+/* PARF_SLV_DBI_ELBI */
+#define SLV_DBI_ELBI_ADDR_BASE GENMASK(11, 0)
+
/* PARF_INT_ALL_{STATUS/CLEAR/MASK} register fields */
#define PARF_INT_ALL_LINK_UP BIT(13)
#define PARF_INT_MSI_DEV_0_7 GENMASK(30, 23)
@@ -247,7 +262,6 @@ struct qcom_pcie_ops {
int (*get_resources)(struct qcom_pcie *pcie);
int (*init)(struct qcom_pcie *pcie);
int (*post_init)(struct qcom_pcie *pcie);
- void (*host_post_init)(struct qcom_pcie *pcie);
void (*deinit)(struct qcom_pcie *pcie);
void (*ltssm_enable)(struct qcom_pcie *pcie);
int (*config_sid)(struct qcom_pcie *pcie);
@@ -276,11 +290,8 @@ struct qcom_pcie_port {
struct qcom_pcie {
struct dw_pcie *pci;
void __iomem *parf; /* DT parf */
- void __iomem *elbi; /* DT elbi */
void __iomem *mhi;
union qcom_pcie_resources res;
- struct phy *phy;
- struct gpio_desc *reset;
struct icc_path *icc_mem;
struct icc_path *icc_cpu;
const struct qcom_pcie_cfg *cfg;
@@ -297,11 +308,8 @@ static void qcom_perst_assert(struct qcom_pcie *pcie, bool assert)
struct qcom_pcie_port *port;
int val = assert ? 1 : 0;
- if (list_empty(&pcie->ports))
- gpiod_set_value_cansleep(pcie->reset, val);
- else
- list_for_each_entry(port, &pcie->ports, list)
- gpiod_set_value_cansleep(port->reset, val);
+ list_for_each_entry(port, &pcie->ports, list)
+ gpiod_set_value_cansleep(port->reset, val);
usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
}
@@ -318,14 +326,55 @@ static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
qcom_perst_assert(pcie, false);
}
+static void qcom_pci_config_ecam(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct qcom_pcie *pcie = to_qcom_pcie(pci);
+ u64 addr, addr_end;
+ u32 val;
+
+ writel_relaxed(lower_32_bits(pci->dbi_phys_addr), pcie->parf + PARF_ECAM_BASE);
+ writel_relaxed(upper_32_bits(pci->dbi_phys_addr), pcie->parf + PARF_ECAM_BASE_HI);
+
+ /*
+ * The only device on the root bus is a single Root Port. If we try to
+ * access any devices other than Device/Function 00.0 on Bus 0, the TLP
+ * will go outside of the controller to the PCI bus. But with CFG Shift
+ * Feature (ECAM) enabled in iATU, there is no guarantee that the
+ * response is going to be all F's. Hence, to make sure that the
+ * requester gets all F's response for accesses other than the Root
+ * Port, configure iATU to block the transactions starting from
+ * function 1 of the root bus to the end of the root bus (i.e., from
+ * dbi_base + 4KB to dbi_base + 1MB).
+ */
+ addr = pci->dbi_phys_addr + SZ_4K;
+ writel_relaxed(lower_32_bits(addr), pcie->parf + PARF_BLOCK_SLV_AXI_WR_BASE);
+ writel_relaxed(upper_32_bits(addr), pcie->parf + PARF_BLOCK_SLV_AXI_WR_BASE_HI);
+
+ writel_relaxed(lower_32_bits(addr), pcie->parf + PARF_BLOCK_SLV_AXI_RD_BASE);
+ writel_relaxed(upper_32_bits(addr), pcie->parf + PARF_BLOCK_SLV_AXI_RD_BASE_HI);
+
+ addr_end = pci->dbi_phys_addr + SZ_1M - 1;
+
+ writel_relaxed(lower_32_bits(addr_end), pcie->parf + PARF_BLOCK_SLV_AXI_WR_LIMIT);
+ writel_relaxed(upper_32_bits(addr_end), pcie->parf + PARF_BLOCK_SLV_AXI_WR_LIMIT_HI);
+
+ writel_relaxed(lower_32_bits(addr_end), pcie->parf + PARF_BLOCK_SLV_AXI_RD_LIMIT);
+ writel_relaxed(upper_32_bits(addr_end), pcie->parf + PARF_BLOCK_SLV_AXI_RD_LIMIT_HI);
+
+ val = readl_relaxed(pcie->parf + PARF_SYS_CTRL);
+ val |= PCIE_ECAM_BLOCKER_EN;
+ writel_relaxed(val, pcie->parf + PARF_SYS_CTRL);
+}
+
static int qcom_pcie_start_link(struct dw_pcie *pci)
{
struct qcom_pcie *pcie = to_qcom_pcie(pci);
- if (pcie_link_speed[pci->max_link_speed] == PCIE_SPEED_16_0GT) {
- qcom_pcie_common_set_16gt_equalization(pci);
+ qcom_pcie_common_set_equalization(pci);
+
+ if (pcie_link_speed[pci->max_link_speed] == PCIE_SPEED_16_0GT)
qcom_pcie_common_set_16gt_lane_margining(pci);
- }
/* Enable Link Training state machine */
if (pcie->cfg->ops->ltssm_enable)
@@ -414,12 +463,17 @@ static void qcom_pcie_configure_dbi_atu_base(struct qcom_pcie *pcie)
static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie)
{
+ struct dw_pcie *pci = pcie->pci;
u32 val;
+ if (!pci->elbi_base) {
+ dev_err(pci->dev, "ELBI is not present\n");
+ return;
+ }
/* enable link training */
- val = readl(pcie->elbi + ELBI_SYS_CTRL);
+ val = readl(pci->elbi_base + ELBI_SYS_CTRL);
val |= ELBI_SYS_CTRL_LT_ENABLE;
- writel(val, pcie->elbi + ELBI_SYS_CTRL);
+ writel(val, pci->elbi_base + ELBI_SYS_CTRL);
}
static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
@@ -1040,25 +1094,6 @@ static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie)
return 0;
}
-static int qcom_pcie_enable_aspm(struct pci_dev *pdev, void *userdata)
-{
- /*
- * Downstream devices need to be in D0 state before enabling PCI PM
- * substates.
- */
- pci_set_power_state_locked(pdev, PCI_D0);
- pci_enable_link_state_locked(pdev, PCIE_LINK_STATE_ALL);
-
- return 0;
-}
-
-static void qcom_pcie_host_post_init_2_7_0(struct qcom_pcie *pcie)
-{
- struct dw_pcie_rp *pp = &pcie->pci->pp;
-
- pci_walk_bus(pp->bridge->bus, qcom_pcie_enable_aspm, NULL);
-}
-
static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie)
{
struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
@@ -1253,63 +1288,39 @@ static bool qcom_pcie_link_up(struct dw_pcie *pci)
return val & PCI_EXP_LNKSTA_DLLLA;
}
-static void qcom_pcie_phy_exit(struct qcom_pcie *pcie)
-{
- struct qcom_pcie_port *port;
-
- if (list_empty(&pcie->ports))
- phy_exit(pcie->phy);
- else
- list_for_each_entry(port, &pcie->ports, list)
- phy_exit(port->phy);
-}
-
static void qcom_pcie_phy_power_off(struct qcom_pcie *pcie)
{
struct qcom_pcie_port *port;
- if (list_empty(&pcie->ports)) {
- phy_power_off(pcie->phy);
- } else {
- list_for_each_entry(port, &pcie->ports, list)
- phy_power_off(port->phy);
- }
+ list_for_each_entry(port, &pcie->ports, list)
+ phy_power_off(port->phy);
}
static int qcom_pcie_phy_power_on(struct qcom_pcie *pcie)
{
struct qcom_pcie_port *port;
- int ret = 0;
+ int ret;
- if (list_empty(&pcie->ports)) {
- ret = phy_set_mode_ext(pcie->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_RC);
+ list_for_each_entry(port, &pcie->ports, list) {
+ ret = phy_set_mode_ext(port->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_RC);
if (ret)
return ret;
- ret = phy_power_on(pcie->phy);
- if (ret)
+ ret = phy_power_on(port->phy);
+ if (ret) {
+ qcom_pcie_phy_power_off(pcie);
return ret;
- } else {
- list_for_each_entry(port, &pcie->ports, list) {
- ret = phy_set_mode_ext(port->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_RC);
- if (ret)
- return ret;
-
- ret = phy_power_on(port->phy);
- if (ret) {
- qcom_pcie_phy_power_off(pcie);
- return ret;
- }
}
}
- return ret;
+ return 0;
}
static int qcom_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct qcom_pcie *pcie = to_qcom_pcie(pci);
+ u16 offset;
int ret;
qcom_ep_reset_assert(pcie);
@@ -1318,6 +1329,17 @@ static int qcom_pcie_host_init(struct dw_pcie_rp *pp)
if (ret)
return ret;
+ if (pp->ecam_enabled) {
+ /*
+ * Override ELBI when ECAM is enabled, as when ECAM is enabled,
+ * ELBI moves under the 'config' space.
+ */
+ offset = FIELD_GET(SLV_DBI_ELBI_ADDR_BASE, readl(pcie->parf + PARF_SLV_DBI_ELBI));
+ pci->elbi_base = pci->dbi_base + offset;
+
+ qcom_pci_config_ecam(pp);
+ }
+
ret = qcom_pcie_phy_power_on(pcie);
if (ret)
goto err_deinit;
@@ -1358,19 +1380,9 @@ static void qcom_pcie_host_deinit(struct dw_pcie_rp *pp)
pcie->cfg->ops->deinit(pcie);
}
-static void qcom_pcie_host_post_init(struct dw_pcie_rp *pp)
-{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct qcom_pcie *pcie = to_qcom_pcie(pci);
-
- if (pcie->cfg->ops->host_post_init)
- pcie->cfg->ops->host_post_init(pcie);
-}
-
static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {
.init = qcom_pcie_host_init,
.deinit = qcom_pcie_host_deinit,
- .post_init = qcom_pcie_host_post_init,
};
/* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */
@@ -1432,7 +1444,6 @@ static const struct qcom_pcie_ops ops_1_9_0 = {
.get_resources = qcom_pcie_get_resources_2_7_0,
.init = qcom_pcie_init_2_7_0,
.post_init = qcom_pcie_post_init_2_7_0,
- .host_post_init = qcom_pcie_host_post_init_2_7_0,
.deinit = qcom_pcie_deinit_2_7_0,
.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
.config_sid = qcom_pcie_config_sid_1_9_0,
@@ -1443,7 +1454,6 @@ static const struct qcom_pcie_ops ops_1_21_0 = {
.get_resources = qcom_pcie_get_resources_2_7_0,
.init = qcom_pcie_init_2_7_0,
.post_init = qcom_pcie_post_init_2_7_0,
- .host_post_init = qcom_pcie_host_post_init_2_7_0,
.deinit = qcom_pcie_deinit_2_7_0,
.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
};
@@ -1740,6 +1750,8 @@ static int qcom_pcie_parse_ports(struct qcom_pcie *pcie)
int ret = -ENOENT;
for_each_available_child_of_node_scoped(dev->of_node, of_port) {
+ if (!of_node_is_type(of_port, "pci"))
+ continue;
ret = qcom_pcie_parse_port(pcie, of_port);
if (ret)
goto err_port_del;
@@ -1748,8 +1760,10 @@ static int qcom_pcie_parse_ports(struct qcom_pcie *pcie)
return ret;
err_port_del:
- list_for_each_entry_safe(port, tmp, &pcie->ports, list)
+ list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
+ phy_exit(port->phy);
list_del(&port->list);
+ }
return ret;
}
@@ -1757,20 +1771,32 @@ err_port_del:
static int qcom_pcie_parse_legacy_binding(struct qcom_pcie *pcie)
{
struct device *dev = pcie->pci->dev;
+ struct qcom_pcie_port *port;
+ struct gpio_desc *reset;
+ struct phy *phy;
int ret;
- pcie->phy = devm_phy_optional_get(dev, "pciephy");
- if (IS_ERR(pcie->phy))
- return PTR_ERR(pcie->phy);
+ phy = devm_phy_optional_get(dev, "pciephy");
+ if (IS_ERR(phy))
+ return PTR_ERR(phy);
- pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH);
- if (IS_ERR(pcie->reset))
- return PTR_ERR(pcie->reset);
+ reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH);
+ if (IS_ERR(reset))
+ return PTR_ERR(reset);
- ret = phy_init(pcie->phy);
+ ret = phy_init(phy);
if (ret)
return ret;
+ port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ port->reset = reset;
+ port->phy = phy;
+ INIT_LIST_HEAD(&port->list);
+ list_add_tail(&port->list, &pcie->ports);
+
return 0;
}
@@ -1861,12 +1887,6 @@ static int qcom_pcie_probe(struct platform_device *pdev)
goto err_pm_runtime_put;
}
- pcie->elbi = devm_platform_ioremap_resource_byname(pdev, "elbi");
- if (IS_ERR(pcie->elbi)) {
- ret = PTR_ERR(pcie->elbi);
- goto err_pm_runtime_put;
- }
-
/* MHI region is optional */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mhi");
if (res) {
@@ -1984,9 +2004,10 @@ static int qcom_pcie_probe(struct platform_device *pdev)
err_host_deinit:
dw_pcie_host_deinit(pp);
err_phy_exit:
- qcom_pcie_phy_exit(pcie);
- list_for_each_entry_safe(port, tmp, &pcie->ports, list)
+ list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
+ phy_exit(port->phy);
list_del(&port->list);
+ }
err_pm_runtime_put:
pm_runtime_put(dev);
pm_runtime_disable(dev);
diff --git a/drivers/pci/controller/dwc/pcie-rcar-gen4.c b/drivers/pci/controller/dwc/pcie-rcar-gen4.c
index 18055807a4f5..80778917d2dd 100644
--- a/drivers/pci/controller/dwc/pcie-rcar-gen4.c
+++ b/drivers/pci/controller/dwc/pcie-rcar-gen4.c
@@ -182,8 +182,17 @@ static int rcar_gen4_pcie_common_init(struct rcar_gen4_pcie *rcar)
return ret;
}
- if (!reset_control_status(dw->core_rsts[DW_PCIE_PWR_RST].rstc))
+ if (!reset_control_status(dw->core_rsts[DW_PCIE_PWR_RST].rstc)) {
reset_control_assert(dw->core_rsts[DW_PCIE_PWR_RST].rstc);
+ /*
+ * R-Car V4H Reference Manual R19UH0186EJ0130 Rev.1.30 Apr.
+ * 21, 2025 page 585 Figure 9.3.2 Software Reset flow (B)
+ * indicates that for peripherals in HSC domain, after
+ * reset has been asserted by writing a matching reset bit
+ * into register SRCR, it is mandatory to wait 1ms.
+ */
+ fsleep(1000);
+ }
val = readl(rcar->base + PCIEMSR0);
if (rcar->drvdata->mode == DW_PCIE_RC_TYPE) {
@@ -204,6 +213,19 @@ static int rcar_gen4_pcie_common_init(struct rcar_gen4_pcie *rcar)
if (ret)
goto err_unprepare;
+ /*
+ * Assure the reset is latched and the core is ready for DBI access.
+ * On R-Car V4H, the PCIe reset is asynchronous and does not take
+ * effect immediately, but needs a short time to complete. In case
+ * DBI access happens in that short time, that access generates an
+ * SError. To make sure that condition can never happen, read back the
+ * state of the reset, which should turn the asynchronous reset into
+ * synchronous one, and wait a little over 1ms to add additional
+ * safety margin.
+ */
+ reset_control_status(dw->core_rsts[DW_PCIE_PWR_RST].rstc);
+ fsleep(1000);
+
if (rcar->drvdata->additional_common_init)
rcar->drvdata->additional_common_init(rcar);
@@ -398,9 +420,7 @@ static int rcar_gen4_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
}
static const struct pci_epc_features rcar_gen4_pcie_epc_features = {
- .linkup_notifier = false,
.msi_capable = true,
- .msix_capable = false,
.bar[BAR_1] = { .type = BAR_RESERVED, },
.bar[BAR_3] = { .type = BAR_RESERVED, },
.bar[BAR_4] = { .type = BAR_FIXED, .fixed_size = 256 },
@@ -701,7 +721,7 @@ static int rcar_gen4_pcie_ltssm_control(struct rcar_gen4_pcie *rcar, bool enable
rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x148, GENMASK(23, 22), BIT(22));
rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x148, GENMASK(18, 16), GENMASK(17, 16));
rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x148, GENMASK(7, 6), BIT(6));
- rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x148, GENMASK(2, 0), GENMASK(11, 0));
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x148, GENMASK(2, 0), GENMASK(1, 0));
rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x1d4, GENMASK(16, 15), GENMASK(16, 15));
rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x514, BIT(26), BIT(26));
rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x0f8, BIT(16), 0);
@@ -711,7 +731,7 @@ static int rcar_gen4_pcie_ltssm_control(struct rcar_gen4_pcie *rcar, bool enable
val &= ~APP_HOLD_PHY_RST;
writel(val, rcar->base + PCIERSTCTRL1);
- ret = readl_poll_timeout(rcar->phy_base + 0x0f8, val, !(val & BIT(18)), 100, 10000);
+ ret = readl_poll_timeout(rcar->phy_base + 0x0f8, val, val & BIT(18), 100, 10000);
if (ret < 0)
return ret;
diff --git a/drivers/pci/controller/dwc/pcie-stm32-ep.c b/drivers/pci/controller/dwc/pcie-stm32-ep.c
new file mode 100644
index 000000000000..3400c7cd2d88
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-stm32-ep.c
@@ -0,0 +1,364 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * STMicroelectronics STM32MP25 PCIe endpoint driver.
+ *
+ * Copyright (C) 2025 STMicroelectronics
+ * Author: Christian Bruel <christian.bruel@foss.st.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_platform.h>
+#include <linux/of_gpio.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include "pcie-designware.h"
+#include "pcie-stm32.h"
+
+struct stm32_pcie {
+ struct dw_pcie pci;
+ struct regmap *regmap;
+ struct reset_control *rst;
+ struct phy *phy;
+ struct clk *clk;
+ struct gpio_desc *perst_gpio;
+ unsigned int perst_irq;
+};
+
+static void stm32_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ enum pci_barno bar;
+
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
+ dw_pcie_ep_reset_bar(pci, bar);
+}
+
+static int stm32_pcie_enable_link(struct dw_pcie *pci)
+{
+ struct stm32_pcie *stm32_pcie = to_stm32_pcie(pci);
+
+ regmap_update_bits(stm32_pcie->regmap, SYSCFG_PCIECR,
+ STM32MP25_PCIECR_LTSSM_EN,
+ STM32MP25_PCIECR_LTSSM_EN);
+
+ return dw_pcie_wait_for_link(pci);
+}
+
+static void stm32_pcie_disable_link(struct dw_pcie *pci)
+{
+ struct stm32_pcie *stm32_pcie = to_stm32_pcie(pci);
+
+ regmap_update_bits(stm32_pcie->regmap, SYSCFG_PCIECR, STM32MP25_PCIECR_LTSSM_EN, 0);
+}
+
+static int stm32_pcie_start_link(struct dw_pcie *pci)
+{
+ struct stm32_pcie *stm32_pcie = to_stm32_pcie(pci);
+ int ret;
+
+ dev_dbg(pci->dev, "Enable link\n");
+
+ ret = stm32_pcie_enable_link(pci);
+ if (ret) {
+ dev_err(pci->dev, "PCIe cannot establish link: %d\n", ret);
+ return ret;
+ }
+
+ enable_irq(stm32_pcie->perst_irq);
+
+ return 0;
+}
+
+static void stm32_pcie_stop_link(struct dw_pcie *pci)
+{
+ struct stm32_pcie *stm32_pcie = to_stm32_pcie(pci);
+
+ dev_dbg(pci->dev, "Disable link\n");
+
+ disable_irq(stm32_pcie->perst_irq);
+
+ stm32_pcie_disable_link(pci);
+}
+
+static int stm32_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ unsigned int type, u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ switch (type) {
+ case PCI_IRQ_INTX:
+ return dw_pcie_ep_raise_intx_irq(ep, func_no);
+ case PCI_IRQ_MSI:
+ return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
+ default:
+ dev_err(pci->dev, "UNKNOWN IRQ type\n");
+ return -EINVAL;
+ }
+}
+
+static const struct pci_epc_features stm32_pcie_epc_features = {
+ .msi_capable = true,
+ .align = SZ_64K,
+};
+
+static const struct pci_epc_features*
+stm32_pcie_get_features(struct dw_pcie_ep *ep)
+{
+ return &stm32_pcie_epc_features;
+}
+
+static const struct dw_pcie_ep_ops stm32_pcie_ep_ops = {
+ .init = stm32_pcie_ep_init,
+ .raise_irq = stm32_pcie_raise_irq,
+ .get_features = stm32_pcie_get_features,
+};
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .start_link = stm32_pcie_start_link,
+ .stop_link = stm32_pcie_stop_link,
+};
+
+static int stm32_pcie_enable_resources(struct stm32_pcie *stm32_pcie)
+{
+ int ret;
+
+ ret = phy_init(stm32_pcie->phy);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(stm32_pcie->clk);
+ if (ret)
+ phy_exit(stm32_pcie->phy);
+
+ return ret;
+}
+
+static void stm32_pcie_disable_resources(struct stm32_pcie *stm32_pcie)
+{
+ clk_disable_unprepare(stm32_pcie->clk);
+
+ phy_exit(stm32_pcie->phy);
+}
+
+static void stm32_pcie_perst_assert(struct dw_pcie *pci)
+{
+ struct stm32_pcie *stm32_pcie = to_stm32_pcie(pci);
+ struct dw_pcie_ep *ep = &stm32_pcie->pci.ep;
+ struct device *dev = pci->dev;
+
+ dev_dbg(dev, "PERST asserted by host\n");
+
+ pci_epc_deinit_notify(ep->epc);
+
+ stm32_pcie_disable_resources(stm32_pcie);
+
+ pm_runtime_put_sync(dev);
+}
+
+static void stm32_pcie_perst_deassert(struct dw_pcie *pci)
+{
+ struct stm32_pcie *stm32_pcie = to_stm32_pcie(pci);
+ struct device *dev = pci->dev;
+ struct dw_pcie_ep *ep = &pci->ep;
+ int ret;
+
+ dev_dbg(dev, "PERST de-asserted by host\n");
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to resume runtime PM: %d\n", ret);
+ return;
+ }
+
+ ret = stm32_pcie_enable_resources(stm32_pcie);
+ if (ret) {
+ dev_err(dev, "Failed to enable resources: %d\n", ret);
+ goto err_pm_put_sync;
+ }
+
+ /*
+ * Reprogram the configuration space registers here because the DBI
+ * registers were reset by the PHY RCC during phy_init().
+ */
+ ret = dw_pcie_ep_init_registers(ep);
+ if (ret) {
+ dev_err(dev, "Failed to complete initialization: %d\n", ret);
+ goto err_disable_resources;
+ }
+
+ pci_epc_init_notify(ep->epc);
+
+ return;
+
+err_disable_resources:
+ stm32_pcie_disable_resources(stm32_pcie);
+
+err_pm_put_sync:
+ pm_runtime_put_sync(dev);
+}
+
+static irqreturn_t stm32_pcie_ep_perst_irq_thread(int irq, void *data)
+{
+ struct stm32_pcie *stm32_pcie = data;
+ struct dw_pcie *pci = &stm32_pcie->pci;
+ u32 perst;
+
+ perst = gpiod_get_value(stm32_pcie->perst_gpio);
+ if (perst)
+ stm32_pcie_perst_assert(pci);
+ else
+ stm32_pcie_perst_deassert(pci);
+
+ irq_set_irq_type(gpiod_to_irq(stm32_pcie->perst_gpio),
+ (perst ? IRQF_TRIGGER_HIGH : IRQF_TRIGGER_LOW));
+
+ return IRQ_HANDLED;
+}
+
+static int stm32_add_pcie_ep(struct stm32_pcie *stm32_pcie,
+ struct platform_device *pdev)
+{
+ struct dw_pcie_ep *ep = &stm32_pcie->pci.ep;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ ret = regmap_update_bits(stm32_pcie->regmap, SYSCFG_PCIECR,
+ STM32MP25_PCIECR_TYPE_MASK,
+ STM32MP25_PCIECR_EP);
+ if (ret)
+ return ret;
+
+ reset_control_assert(stm32_pcie->rst);
+ reset_control_deassert(stm32_pcie->rst);
+
+ ep->ops = &stm32_pcie_ep_ops;
+
+ ret = dw_pcie_ep_init(ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize ep: %d\n", ret);
+ return ret;
+ }
+
+ ret = stm32_pcie_enable_resources(stm32_pcie);
+ if (ret) {
+ dev_err(dev, "Failed to enable resources: %d\n", ret);
+ dw_pcie_ep_deinit(ep);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int stm32_pcie_probe(struct platform_device *pdev)
+{
+ struct stm32_pcie *stm32_pcie;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ stm32_pcie = devm_kzalloc(dev, sizeof(*stm32_pcie), GFP_KERNEL);
+ if (!stm32_pcie)
+ return -ENOMEM;
+
+ stm32_pcie->pci.dev = dev;
+ stm32_pcie->pci.ops = &dw_pcie_ops;
+
+ stm32_pcie->regmap = syscon_regmap_lookup_by_compatible("st,stm32mp25-syscfg");
+ if (IS_ERR(stm32_pcie->regmap))
+ return dev_err_probe(dev, PTR_ERR(stm32_pcie->regmap),
+ "No syscfg specified\n");
+
+ stm32_pcie->phy = devm_phy_get(dev, NULL);
+ if (IS_ERR(stm32_pcie->phy))
+ return dev_err_probe(dev, PTR_ERR(stm32_pcie->phy),
+ "failed to get pcie-phy\n");
+
+ stm32_pcie->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(stm32_pcie->clk))
+ return dev_err_probe(dev, PTR_ERR(stm32_pcie->clk),
+ "Failed to get PCIe clock source\n");
+
+ stm32_pcie->rst = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(stm32_pcie->rst))
+ return dev_err_probe(dev, PTR_ERR(stm32_pcie->rst),
+ "Failed to get PCIe reset\n");
+
+ stm32_pcie->perst_gpio = devm_gpiod_get(dev, "reset", GPIOD_IN);
+ if (IS_ERR(stm32_pcie->perst_gpio))
+ return dev_err_probe(dev, PTR_ERR(stm32_pcie->perst_gpio),
+ "Failed to get reset GPIO\n");
+
+ ret = phy_set_mode(stm32_pcie->phy, PHY_MODE_PCIE);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, stm32_pcie);
+
+ pm_runtime_get_noresume(dev);
+
+ ret = devm_pm_runtime_enable(dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&pdev->dev);
+ return dev_err_probe(dev, ret, "Failed to enable runtime PM\n");
+ }
+
+ stm32_pcie->perst_irq = gpiod_to_irq(stm32_pcie->perst_gpio);
+
+ /* Will be enabled in start_link when device is initialized. */
+ irq_set_status_flags(stm32_pcie->perst_irq, IRQ_NOAUTOEN);
+
+ ret = devm_request_threaded_irq(dev, stm32_pcie->perst_irq, NULL,
+ stm32_pcie_ep_perst_irq_thread,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ "perst_irq", stm32_pcie);
+ if (ret) {
+ pm_runtime_put_noidle(&pdev->dev);
+ return dev_err_probe(dev, ret, "Failed to request PERST IRQ\n");
+ }
+
+ ret = stm32_add_pcie_ep(stm32_pcie, pdev);
+ if (ret)
+ pm_runtime_put_noidle(&pdev->dev);
+
+ return ret;
+}
+
+static void stm32_pcie_remove(struct platform_device *pdev)
+{
+ struct stm32_pcie *stm32_pcie = platform_get_drvdata(pdev);
+ struct dw_pcie *pci = &stm32_pcie->pci;
+ struct dw_pcie_ep *ep = &pci->ep;
+
+ dw_pcie_stop_link(pci);
+
+ pci_epc_deinit_notify(ep->epc);
+ dw_pcie_ep_deinit(ep);
+
+ stm32_pcie_disable_resources(stm32_pcie);
+
+ pm_runtime_put_sync(&pdev->dev);
+}
+
+static const struct of_device_id stm32_pcie_ep_of_match[] = {
+ { .compatible = "st,stm32mp25-pcie-ep" },
+ {},
+};
+
+static struct platform_driver stm32_pcie_ep_driver = {
+ .probe = stm32_pcie_probe,
+ .remove = stm32_pcie_remove,
+ .driver = {
+ .name = "stm32-ep-pcie",
+ .of_match_table = stm32_pcie_ep_of_match,
+ },
+};
+
+module_platform_driver(stm32_pcie_ep_driver);
+
+MODULE_AUTHOR("Christian Bruel <christian.bruel@foss.st.com>");
+MODULE_DESCRIPTION("STM32MP25 PCIe Endpoint Controller driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(of, stm32_pcie_ep_of_match);
diff --git a/drivers/pci/controller/dwc/pcie-stm32.c b/drivers/pci/controller/dwc/pcie-stm32.c
new file mode 100644
index 000000000000..96a5fb893af4
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-stm32.c
@@ -0,0 +1,358 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * STMicroelectronics STM32MP25 PCIe root complex driver.
+ *
+ * Copyright (C) 2025 STMicroelectronics
+ * Author: Christian Bruel <christian.bruel@foss.st.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_platform.h>
+#include <linux/phy/phy.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_wakeirq.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include "pcie-designware.h"
+#include "pcie-stm32.h"
+#include "../../pci.h"
+
+struct stm32_pcie {
+ struct dw_pcie pci;
+ struct regmap *regmap;
+ struct reset_control *rst;
+ struct phy *phy;
+ struct clk *clk;
+ struct gpio_desc *perst_gpio;
+ struct gpio_desc *wake_gpio;
+};
+
+static void stm32_pcie_deassert_perst(struct stm32_pcie *stm32_pcie)
+{
+ if (stm32_pcie->perst_gpio) {
+ msleep(PCIE_T_PVPERL_MS);
+ gpiod_set_value(stm32_pcie->perst_gpio, 0);
+ }
+
+ msleep(PCIE_RESET_CONFIG_WAIT_MS);
+}
+
+static void stm32_pcie_assert_perst(struct stm32_pcie *stm32_pcie)
+{
+ gpiod_set_value(stm32_pcie->perst_gpio, 1);
+}
+
+static int stm32_pcie_start_link(struct dw_pcie *pci)
+{
+ struct stm32_pcie *stm32_pcie = to_stm32_pcie(pci);
+
+ return regmap_update_bits(stm32_pcie->regmap, SYSCFG_PCIECR,
+ STM32MP25_PCIECR_LTSSM_EN,
+ STM32MP25_PCIECR_LTSSM_EN);
+}
+
+static void stm32_pcie_stop_link(struct dw_pcie *pci)
+{
+ struct stm32_pcie *stm32_pcie = to_stm32_pcie(pci);
+
+ regmap_update_bits(stm32_pcie->regmap, SYSCFG_PCIECR,
+ STM32MP25_PCIECR_LTSSM_EN, 0);
+}
+
+static int stm32_pcie_suspend_noirq(struct device *dev)
+{
+ struct stm32_pcie *stm32_pcie = dev_get_drvdata(dev);
+ int ret;
+
+ ret = dw_pcie_suspend_noirq(&stm32_pcie->pci);
+ if (ret)
+ return ret;
+
+ stm32_pcie_assert_perst(stm32_pcie);
+
+ clk_disable_unprepare(stm32_pcie->clk);
+
+ if (!device_wakeup_path(dev))
+ phy_exit(stm32_pcie->phy);
+
+ return pinctrl_pm_select_sleep_state(dev);
+}
+
+static int stm32_pcie_resume_noirq(struct device *dev)
+{
+ struct stm32_pcie *stm32_pcie = dev_get_drvdata(dev);
+ int ret;
+
+ /*
+ * The core clock is gated with CLKREQ# from the COMBOPHY REFCLK,
+ * thus if no device is present, must deassert it with a GPIO from
+ * pinctrl pinmux before accessing the DBI registers.
+ */
+ ret = pinctrl_pm_select_init_state(dev);
+ if (ret) {
+ dev_err(dev, "Failed to activate pinctrl pm state: %d\n", ret);
+ return ret;
+ }
+
+ if (!device_wakeup_path(dev)) {
+ ret = phy_init(stm32_pcie->phy);
+ if (ret) {
+ pinctrl_pm_select_default_state(dev);
+ return ret;
+ }
+ }
+
+ ret = clk_prepare_enable(stm32_pcie->clk);
+ if (ret)
+ goto err_phy_exit;
+
+ stm32_pcie_deassert_perst(stm32_pcie);
+
+ ret = dw_pcie_resume_noirq(&stm32_pcie->pci);
+ if (ret)
+ goto err_disable_clk;
+
+ pinctrl_pm_select_default_state(dev);
+
+ return 0;
+
+err_disable_clk:
+ stm32_pcie_assert_perst(stm32_pcie);
+ clk_disable_unprepare(stm32_pcie->clk);
+
+err_phy_exit:
+ phy_exit(stm32_pcie->phy);
+ pinctrl_pm_select_default_state(dev);
+
+ return ret;
+}
+
+static const struct dev_pm_ops stm32_pcie_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(stm32_pcie_suspend_noirq,
+ stm32_pcie_resume_noirq)
+};
+
+static const struct dw_pcie_host_ops stm32_pcie_host_ops = {
+};
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .start_link = stm32_pcie_start_link,
+ .stop_link = stm32_pcie_stop_link
+};
+
+static int stm32_add_pcie_port(struct stm32_pcie *stm32_pcie)
+{
+ struct device *dev = stm32_pcie->pci.dev;
+ unsigned int wake_irq;
+ int ret;
+
+ ret = phy_set_mode(stm32_pcie->phy, PHY_MODE_PCIE);
+ if (ret)
+ return ret;
+
+ ret = phy_init(stm32_pcie->phy);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(stm32_pcie->regmap, SYSCFG_PCIECR,
+ STM32MP25_PCIECR_TYPE_MASK,
+ STM32MP25_PCIECR_RC);
+ if (ret)
+ goto err_phy_exit;
+
+ stm32_pcie_deassert_perst(stm32_pcie);
+
+ if (stm32_pcie->wake_gpio) {
+ wake_irq = gpiod_to_irq(stm32_pcie->wake_gpio);
+ ret = dev_pm_set_dedicated_wake_irq(dev, wake_irq);
+ if (ret) {
+ dev_err(dev, "Failed to enable wakeup irq %d\n", ret);
+ goto err_assert_perst;
+ }
+ irq_set_irq_type(wake_irq, IRQ_TYPE_EDGE_FALLING);
+ }
+
+ return 0;
+
+err_assert_perst:
+ stm32_pcie_assert_perst(stm32_pcie);
+
+err_phy_exit:
+ phy_exit(stm32_pcie->phy);
+
+ return ret;
+}
+
+static void stm32_remove_pcie_port(struct stm32_pcie *stm32_pcie)
+{
+ dev_pm_clear_wake_irq(stm32_pcie->pci.dev);
+
+ stm32_pcie_assert_perst(stm32_pcie);
+
+ phy_exit(stm32_pcie->phy);
+}
+
+static int stm32_pcie_parse_port(struct stm32_pcie *stm32_pcie)
+{
+ struct device *dev = stm32_pcie->pci.dev;
+ struct device_node *root_port;
+
+ root_port = of_get_next_available_child(dev->of_node, NULL);
+
+ stm32_pcie->phy = devm_of_phy_get(dev, root_port, NULL);
+ if (IS_ERR(stm32_pcie->phy)) {
+ of_node_put(root_port);
+ return dev_err_probe(dev, PTR_ERR(stm32_pcie->phy),
+ "Failed to get pcie-phy\n");
+ }
+
+ stm32_pcie->perst_gpio = devm_fwnode_gpiod_get(dev, of_fwnode_handle(root_port),
+ "reset", GPIOD_OUT_HIGH, NULL);
+ if (IS_ERR(stm32_pcie->perst_gpio)) {
+ if (PTR_ERR(stm32_pcie->perst_gpio) != -ENOENT) {
+ of_node_put(root_port);
+ return dev_err_probe(dev, PTR_ERR(stm32_pcie->perst_gpio),
+ "Failed to get reset GPIO\n");
+ }
+ stm32_pcie->perst_gpio = NULL;
+ }
+
+ stm32_pcie->wake_gpio = devm_fwnode_gpiod_get(dev, of_fwnode_handle(root_port),
+ "wake", GPIOD_IN, NULL);
+
+ if (IS_ERR(stm32_pcie->wake_gpio)) {
+ if (PTR_ERR(stm32_pcie->wake_gpio) != -ENOENT) {
+ of_node_put(root_port);
+ return dev_err_probe(dev, PTR_ERR(stm32_pcie->wake_gpio),
+ "Failed to get wake GPIO\n");
+ }
+ stm32_pcie->wake_gpio = NULL;
+ }
+
+ of_node_put(root_port);
+
+ return 0;
+}
+
+static int stm32_pcie_probe(struct platform_device *pdev)
+{
+ struct stm32_pcie *stm32_pcie;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ stm32_pcie = devm_kzalloc(dev, sizeof(*stm32_pcie), GFP_KERNEL);
+ if (!stm32_pcie)
+ return -ENOMEM;
+
+ stm32_pcie->pci.dev = dev;
+ stm32_pcie->pci.ops = &dw_pcie_ops;
+ stm32_pcie->pci.pp.ops = &stm32_pcie_host_ops;
+
+ stm32_pcie->regmap = syscon_regmap_lookup_by_compatible("st,stm32mp25-syscfg");
+ if (IS_ERR(stm32_pcie->regmap))
+ return dev_err_probe(dev, PTR_ERR(stm32_pcie->regmap),
+ "No syscfg specified\n");
+
+ stm32_pcie->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(stm32_pcie->clk))
+ return dev_err_probe(dev, PTR_ERR(stm32_pcie->clk),
+ "Failed to get PCIe clock source\n");
+
+ stm32_pcie->rst = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(stm32_pcie->rst))
+ return dev_err_probe(dev, PTR_ERR(stm32_pcie->rst),
+ "Failed to get PCIe reset\n");
+
+ ret = stm32_pcie_parse_port(stm32_pcie);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, stm32_pcie);
+
+ ret = stm32_add_pcie_port(stm32_pcie);
+ if (ret)
+ return ret;
+
+ reset_control_assert(stm32_pcie->rst);
+ reset_control_deassert(stm32_pcie->rst);
+
+ ret = clk_prepare_enable(stm32_pcie->clk);
+ if (ret) {
+ dev_err(dev, "Core clock enable failed %d\n", ret);
+ goto err_remove_port;
+ }
+
+ ret = pm_runtime_set_active(dev);
+ if (ret < 0) {
+ dev_err_probe(dev, ret, "Failed to activate runtime PM\n");
+ goto err_disable_clk;
+ }
+
+ pm_runtime_no_callbacks(dev);
+
+ ret = devm_pm_runtime_enable(dev);
+ if (ret < 0) {
+ dev_err_probe(dev, ret, "Failed to enable runtime PM\n");
+ goto err_disable_clk;
+ }
+
+ ret = dw_pcie_host_init(&stm32_pcie->pci.pp);
+ if (ret)
+ goto err_disable_clk;
+
+ if (stm32_pcie->wake_gpio)
+ device_init_wakeup(dev, true);
+
+ return 0;
+
+err_disable_clk:
+ clk_disable_unprepare(stm32_pcie->clk);
+
+err_remove_port:
+ stm32_remove_pcie_port(stm32_pcie);
+
+ return ret;
+}
+
+static void stm32_pcie_remove(struct platform_device *pdev)
+{
+ struct stm32_pcie *stm32_pcie = platform_get_drvdata(pdev);
+ struct dw_pcie_rp *pp = &stm32_pcie->pci.pp;
+
+ if (stm32_pcie->wake_gpio)
+ device_init_wakeup(&pdev->dev, false);
+
+ dw_pcie_host_deinit(pp);
+
+ clk_disable_unprepare(stm32_pcie->clk);
+
+ stm32_remove_pcie_port(stm32_pcie);
+
+ pm_runtime_put_noidle(&pdev->dev);
+}
+
+static const struct of_device_id stm32_pcie_of_match[] = {
+ { .compatible = "st,stm32mp25-pcie-rc" },
+ {},
+};
+
+static struct platform_driver stm32_pcie_driver = {
+ .probe = stm32_pcie_probe,
+ .remove = stm32_pcie_remove,
+ .driver = {
+ .name = "stm32-pcie",
+ .of_match_table = stm32_pcie_of_match,
+ .pm = &stm32_pcie_pm_ops,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+
+module_platform_driver(stm32_pcie_driver);
+
+MODULE_AUTHOR("Christian Bruel <christian.bruel@foss.st.com>");
+MODULE_DESCRIPTION("STM32MP25 PCIe Controller driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(of, stm32_pcie_of_match);
diff --git a/drivers/pci/controller/dwc/pcie-stm32.h b/drivers/pci/controller/dwc/pcie-stm32.h
new file mode 100644
index 000000000000..09d39f04e469
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-stm32.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * ST PCIe driver definitions for STM32-MP25 SoC
+ *
+ * Copyright (C) 2025 STMicroelectronics - All Rights Reserved
+ * Author: Christian Bruel <christian.bruel@foss.st.com>
+ */
+
+#define to_stm32_pcie(x) dev_get_drvdata((x)->dev)
+
+#define STM32MP25_PCIECR_TYPE_MASK GENMASK(11, 8)
+#define STM32MP25_PCIECR_EP 0
+#define STM32MP25_PCIECR_LTSSM_EN BIT(2)
+#define STM32MP25_PCIECR_RC BIT(10)
+
+#define SYSCFG_PCIECR 0x6000
diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
index 4f26086f25da..10e74458e667 100644
--- a/drivers/pci/controller/dwc/pcie-tegra194.c
+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -1214,6 +1214,7 @@ static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie,
struct mrq_uphy_response resp;
struct tegra_bpmp_message msg;
struct mrq_uphy_request req;
+ int err;
/*
* Controller-5 doesn't need to have its state set by BPMP-FW in
@@ -1236,7 +1237,13 @@ static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie,
msg.rx.data = &resp;
msg.rx.size = sizeof(resp);
- return tegra_bpmp_transfer(pcie->bpmp, &msg);
+ err = tegra_bpmp_transfer(pcie->bpmp, &msg);
+ if (err)
+ return err;
+ if (msg.rx.ret)
+ return -EINVAL;
+
+ return 0;
}
static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie,
@@ -1245,6 +1252,7 @@ static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie,
struct mrq_uphy_response resp;
struct tegra_bpmp_message msg;
struct mrq_uphy_request req;
+ int err;
memset(&req, 0, sizeof(req));
memset(&resp, 0, sizeof(resp));
@@ -1264,13 +1272,19 @@ static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie,
msg.rx.data = &resp;
msg.rx.size = sizeof(resp);
- return tegra_bpmp_transfer(pcie->bpmp, &msg);
+ err = tegra_bpmp_transfer(pcie->bpmp, &msg);
+ if (err)
+ return err;
+ if (msg.rx.ret)
+ return -EINVAL;
+
+ return 0;
}
static void tegra_pcie_downstream_dev_to_D0(struct tegra_pcie_dw *pcie)
{
struct dw_pcie_rp *pp = &pcie->pci.pp;
- struct pci_bus *child, *root_bus = NULL;
+ struct pci_bus *child, *root_port_bus = NULL;
struct pci_dev *pdev;
/*
@@ -1283,19 +1297,19 @@ static void tegra_pcie_downstream_dev_to_D0(struct tegra_pcie_dw *pcie)
*/
list_for_each_entry(child, &pp->bridge->bus->children, node) {
- /* Bring downstream devices to D0 if they are not already in */
if (child->parent == pp->bridge->bus) {
- root_bus = child;
+ root_port_bus = child;
break;
}
}
- if (!root_bus) {
- dev_err(pcie->dev, "Failed to find downstream devices\n");
+ if (!root_port_bus) {
+ dev_err(pcie->dev, "Failed to find downstream bus of Root Port\n");
return;
}
- list_for_each_entry(pdev, &root_bus->devices, bus_list) {
+ /* Bring downstream devices to D0 if they are not already in */
+ list_for_each_entry(pdev, &root_port_bus->devices, bus_list) {
if (PCI_SLOT(pdev->devfn) == 0) {
if (pci_set_power_state(pdev, PCI_D0))
dev_err(pcie->dev,
@@ -1722,9 +1736,9 @@ static void pex_ep_event_pex_rst_assert(struct tegra_pcie_dw *pcie)
ret);
}
- ret = tegra_pcie_bpmp_set_pll_state(pcie, false);
+ ret = tegra_pcie_bpmp_set_ctrl_state(pcie, false);
if (ret)
- dev_err(pcie->dev, "Failed to turn off UPHY: %d\n", ret);
+ dev_err(pcie->dev, "Failed to disable controller: %d\n", ret);
pcie->ep_state = EP_STATE_DISABLED;
dev_dbg(pcie->dev, "Uninitialization of endpoint is completed\n");
@@ -1941,6 +1955,15 @@ static irqreturn_t tegra_pcie_ep_pex_rst_irq(int irq, void *arg)
return IRQ_HANDLED;
}
+static void tegra_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ enum pci_barno bar;
+
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
+ dw_pcie_ep_reset_bar(pci, bar);
+};
+
static int tegra_pcie_ep_raise_intx_irq(struct tegra_pcie_dw *pcie, u16 irq)
{
/* Tegra194 supports only INTA */
@@ -1955,10 +1978,10 @@ static int tegra_pcie_ep_raise_intx_irq(struct tegra_pcie_dw *pcie, u16 irq)
static int tegra_pcie_ep_raise_msi_irq(struct tegra_pcie_dw *pcie, u16 irq)
{
- if (unlikely(irq > 31))
+ if (unlikely(irq > 32))
return -EINVAL;
- appl_writel(pcie, BIT(irq), APPL_MSI_CTRL_1);
+ appl_writel(pcie, BIT(irq - 1), APPL_MSI_CTRL_1);
return 0;
}
@@ -1998,8 +2021,7 @@ static int tegra_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
static const struct pci_epc_features tegra_pcie_epc_features = {
.linkup_notifier = true,
- .msi_capable = false,
- .msix_capable = false,
+ .msi_capable = true,
.bar[BAR_0] = { .type = BAR_FIXED, .fixed_size = SZ_1M,
.only_64bit = true, },
.bar[BAR_1] = { .type = BAR_RESERVED, },
@@ -2017,6 +2039,7 @@ tegra_pcie_ep_get_features(struct dw_pcie_ep *ep)
}
static const struct dw_pcie_ep_ops pcie_ep_ops = {
+ .init = tegra_pcie_ep_init,
.raise_irq = tegra_pcie_ep_raise_irq,
.get_features = tegra_pcie_ep_get_features,
};
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index d2b7e8ea710b..146b43981b27 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -1680,7 +1680,6 @@ static void hv_int_desc_free(struct hv_pci_dev *hpdev,
/**
* hv_msi_free() - Free the MSI.
* @domain: The interrupt domain pointer
- * @info: Extra MSI-related context
* @irq: Identifies the IRQ.
*
* The Hyper-V parent partition and hypervisor are tracking the
@@ -1688,8 +1687,7 @@ static void hv_int_desc_free(struct hv_pci_dev *hpdev,
* table up to date. This callback sends a message that frees
* the IRT entry and related tracking nonsense.
*/
-static void hv_msi_free(struct irq_domain *domain, struct msi_domain_info *info,
- unsigned int irq)
+static void hv_msi_free(struct irq_domain *domain, unsigned int irq)
{
struct hv_pcibus_device *hbus;
struct hv_pci_dev *hpdev;
@@ -2181,10 +2179,8 @@ static int hv_pcie_domain_alloc(struct irq_domain *d, unsigned int virq, unsigne
static void hv_pcie_domain_free(struct irq_domain *d, unsigned int virq, unsigned int nr_irqs)
{
- struct msi_domain_info *info = d->host_data;
-
for (int i = 0; i < nr_irqs; i++)
- hv_msi_free(d, info, virq + i);
+ hv_msi_free(d, virq + i);
irq_domain_free_irqs_top(d, virq, nr_irqs);
}
diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c
index 755651f33811..a72aa57591c0 100644
--- a/drivers/pci/controller/pci-mvebu.c
+++ b/drivers/pci/controller/pci-mvebu.c
@@ -1168,12 +1168,6 @@ static void __iomem *mvebu_pcie_map_registers(struct platform_device *pdev,
return devm_ioremap_resource(&pdev->dev, &port->regs);
}
-#define DT_FLAGS_TO_TYPE(flags) (((flags) >> 24) & 0x03)
-#define DT_TYPE_IO 0x1
-#define DT_TYPE_MEM32 0x2
-#define DT_CPUADDR_TO_TARGET(cpuaddr) (((cpuaddr) >> 56) & 0xFF)
-#define DT_CPUADDR_TO_ATTR(cpuaddr) (((cpuaddr) >> 48) & 0xFF)
-
static int mvebu_get_tgt_attr(struct device_node *np, int devfn,
unsigned long type,
unsigned int *tgt,
@@ -1189,19 +1183,12 @@ static int mvebu_get_tgt_attr(struct device_node *np, int devfn,
return -EINVAL;
for_each_of_range(&parser, &range) {
- unsigned long rtype;
u32 slot = upper_32_bits(range.bus_addr);
- if (DT_FLAGS_TO_TYPE(range.flags) == DT_TYPE_IO)
- rtype = IORESOURCE_IO;
- else if (DT_FLAGS_TO_TYPE(range.flags) == DT_TYPE_MEM32)
- rtype = IORESOURCE_MEM;
- else
- continue;
-
- if (slot == PCI_SLOT(devfn) && type == rtype) {
- *tgt = DT_CPUADDR_TO_TARGET(range.cpu_addr);
- *attr = DT_CPUADDR_TO_ATTR(range.cpu_addr);
+ if (slot == PCI_SLOT(devfn) &&
+ type == (range.flags & IORESOURCE_TYPE_BITS)) {
+ *tgt = (range.parent_bus_addr >> 56) & 0xFF;
+ *attr = (range.parent_bus_addr >> 48) & 0xFF;
return 0;
}
}
diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c
index 467ddc701adc..942ddfca3bf6 100644
--- a/drivers/pci/controller/pci-tegra.c
+++ b/drivers/pci/controller/pci-tegra.c
@@ -14,6 +14,7 @@
*/
#include <linux/clk.h>
+#include <linux/cleanup.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/export.h>
@@ -270,7 +271,7 @@ struct tegra_msi {
DECLARE_BITMAP(used, INT_PCI_MSI_NR);
struct irq_domain *domain;
struct mutex map_lock;
- spinlock_t mask_lock;
+ raw_spinlock_t mask_lock;
void *virt;
dma_addr_t phys;
int irq;
@@ -1344,7 +1345,7 @@ static int tegra_pcie_port_get_phys(struct tegra_pcie_port *port)
unsigned int i;
int err;
- port->phys = devm_kcalloc(dev, sizeof(phy), port->lanes, GFP_KERNEL);
+ port->phys = devm_kcalloc(dev, port->lanes, sizeof(phy), GFP_KERNEL);
if (!port->phys)
return -ENOMEM;
@@ -1581,14 +1582,13 @@ static void tegra_msi_irq_mask(struct irq_data *d)
struct tegra_msi *msi = irq_data_get_irq_chip_data(d);
struct tegra_pcie *pcie = msi_to_pcie(msi);
unsigned int index = d->hwirq / 32;
- unsigned long flags;
u32 value;
- spin_lock_irqsave(&msi->mask_lock, flags);
- value = afi_readl(pcie, AFI_MSI_EN_VEC(index));
- value &= ~BIT(d->hwirq % 32);
- afi_writel(pcie, value, AFI_MSI_EN_VEC(index));
- spin_unlock_irqrestore(&msi->mask_lock, flags);
+ scoped_guard(raw_spinlock_irqsave, &msi->mask_lock) {
+ value = afi_readl(pcie, AFI_MSI_EN_VEC(index));
+ value &= ~BIT(d->hwirq % 32);
+ afi_writel(pcie, value, AFI_MSI_EN_VEC(index));
+ }
}
static void tegra_msi_irq_unmask(struct irq_data *d)
@@ -1596,14 +1596,13 @@ static void tegra_msi_irq_unmask(struct irq_data *d)
struct tegra_msi *msi = irq_data_get_irq_chip_data(d);
struct tegra_pcie *pcie = msi_to_pcie(msi);
unsigned int index = d->hwirq / 32;
- unsigned long flags;
u32 value;
- spin_lock_irqsave(&msi->mask_lock, flags);
- value = afi_readl(pcie, AFI_MSI_EN_VEC(index));
- value |= BIT(d->hwirq % 32);
- afi_writel(pcie, value, AFI_MSI_EN_VEC(index));
- spin_unlock_irqrestore(&msi->mask_lock, flags);
+ scoped_guard(raw_spinlock_irqsave, &msi->mask_lock) {
+ value = afi_readl(pcie, AFI_MSI_EN_VEC(index));
+ value |= BIT(d->hwirq % 32);
+ afi_writel(pcie, value, AFI_MSI_EN_VEC(index));
+ }
}
static void tegra_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
@@ -1711,7 +1710,7 @@ static int tegra_pcie_msi_setup(struct tegra_pcie *pcie)
int err;
mutex_init(&msi->map_lock);
- spin_lock_init(&msi->mask_lock);
+ raw_spin_lock_init(&msi->mask_lock);
if (IS_ENABLED(CONFIG_PCI_MSI)) {
err = tegra_allocate_domains(msi);
diff --git a/drivers/pci/controller/pci-xgene-msi.c b/drivers/pci/controller/pci-xgene-msi.c
index 0a37a3f1809c..654639bccd10 100644
--- a/drivers/pci/controller/pci-xgene-msi.c
+++ b/drivers/pci/controller/pci-xgene-msi.c
@@ -311,7 +311,7 @@ static int xgene_msi_handler_setup(struct platform_device *pdev)
msi_val = xgene_msi_int_read(xgene_msi, i);
if (msi_val) {
dev_err(&pdev->dev, "Failed to clear spurious IRQ\n");
- return EINVAL;
+ return -EINVAL;
}
irq = platform_get_irq(pdev, i);
diff --git a/drivers/pci/controller/pcie-mediatek-gen3.c b/drivers/pci/controller/pcie-mediatek-gen3.c
index 97147f43e41c..75ddb8bee168 100644
--- a/drivers/pci/controller/pcie-mediatek-gen3.c
+++ b/drivers/pci/controller/pcie-mediatek-gen3.c
@@ -102,6 +102,9 @@
#define PCIE_MSI_SET_ADDR_HI_BASE 0xc80
#define PCIE_MSI_SET_ADDR_HI_OFFSET 0x04
+#define PCIE_RESOURCE_CTRL_REG 0xd2c
+#define PCIE_RSRC_SYS_CLK_RDY_TIME_MASK GENMASK(7, 0)
+
#define PCIE_ICMD_PM_REG 0x198
#define PCIE_TURN_OFF_LINK BIT(4)
@@ -149,6 +152,7 @@ enum mtk_gen3_pcie_flags {
* struct mtk_gen3_pcie_pdata - differentiate between host generations
* @power_up: pcie power_up callback
* @phy_resets: phy reset lines SoC data.
+ * @sys_clk_rdy_time_us: System clock ready time override (microseconds)
* @flags: pcie device flags.
*/
struct mtk_gen3_pcie_pdata {
@@ -157,6 +161,7 @@ struct mtk_gen3_pcie_pdata {
const char *id[MAX_NUM_PHY_RESETS];
int num_resets;
} phy_resets;
+ u8 sys_clk_rdy_time_us;
u32 flags;
};
@@ -435,6 +440,14 @@ static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie)
writel_relaxed(val, pcie->base + PCIE_CONF_LINK2_CTL_STS);
}
+ /* If parameter is present, adjust SYS_CLK_RDY_TIME to avoid glitching */
+ if (pcie->soc->sys_clk_rdy_time_us) {
+ val = readl_relaxed(pcie->base + PCIE_RESOURCE_CTRL_REG);
+ FIELD_MODIFY(PCIE_RSRC_SYS_CLK_RDY_TIME_MASK, &val,
+ pcie->soc->sys_clk_rdy_time_us);
+ writel_relaxed(val, pcie->base + PCIE_RESOURCE_CTRL_REG);
+ }
+
/* Set class code */
val = readl_relaxed(pcie->base + PCIE_PCI_IDS_1);
val &= ~GENMASK(31, 8);
@@ -1327,6 +1340,15 @@ static const struct mtk_gen3_pcie_pdata mtk_pcie_soc_mt8192 = {
},
};
+static const struct mtk_gen3_pcie_pdata mtk_pcie_soc_mt8196 = {
+ .power_up = mtk_pcie_power_up,
+ .phy_resets = {
+ .id[0] = "phy",
+ .num_resets = 1,
+ },
+ .sys_clk_rdy_time_us = 10,
+};
+
static const struct mtk_gen3_pcie_pdata mtk_pcie_soc_en7581 = {
.power_up = mtk_pcie_en7581_power_up,
.phy_resets = {
@@ -1341,6 +1363,7 @@ static const struct mtk_gen3_pcie_pdata mtk_pcie_soc_en7581 = {
static const struct of_device_id mtk_pcie_of_match[] = {
{ .compatible = "airoha,en7581-pcie", .data = &mtk_pcie_soc_en7581 },
{ .compatible = "mediatek,mt8192-pcie", .data = &mtk_pcie_soc_mt8192 },
+ { .compatible = "mediatek,mt8196-pcie", .data = &mtk_pcie_soc_mt8196 },
{},
};
MODULE_DEVICE_TABLE(of, mtk_pcie_of_match);
diff --git a/drivers/pci/controller/pcie-rcar-ep.c b/drivers/pci/controller/pcie-rcar-ep.c
index a8a966844cf3..657875ef4657 100644
--- a/drivers/pci/controller/pcie-rcar-ep.c
+++ b/drivers/pci/controller/pcie-rcar-ep.c
@@ -436,9 +436,7 @@ static void rcar_pcie_ep_stop(struct pci_epc *epc)
}
static const struct pci_epc_features rcar_pcie_epc_features = {
- .linkup_notifier = false,
.msi_capable = true,
- .msix_capable = false,
/* use 64-bit BARs so mark BAR[1,3,5] as reserved */
.bar[BAR_0] = { .type = BAR_FIXED, .fixed_size = 128,
.only_64bit = true, },
diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c
index fe288fd770c4..213028052aa5 100644
--- a/drivers/pci/controller/pcie-rcar-host.c
+++ b/drivers/pci/controller/pcie-rcar-host.c
@@ -12,6 +12,7 @@
*/
#include <linux/bitops.h>
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
@@ -38,7 +39,7 @@ struct rcar_msi {
DECLARE_BITMAP(used, INT_PCI_MSI_NR);
struct irq_domain *domain;
struct mutex map_lock;
- spinlock_t mask_lock;
+ raw_spinlock_t mask_lock;
int irq1;
int irq2;
};
@@ -52,20 +53,13 @@ struct rcar_pcie_host {
int (*phy_init_fn)(struct rcar_pcie_host *host);
};
-static DEFINE_SPINLOCK(pmsr_lock);
-
static int rcar_pcie_wakeup(struct device *pcie_dev, void __iomem *pcie_base)
{
- unsigned long flags;
u32 pmsr, val;
int ret = 0;
- spin_lock_irqsave(&pmsr_lock, flags);
-
- if (!pcie_base || pm_runtime_suspended(pcie_dev)) {
- ret = -EINVAL;
- goto unlock_exit;
- }
+ if (!pcie_base || pm_runtime_suspended(pcie_dev))
+ return -EINVAL;
pmsr = readl(pcie_base + PMSR);
@@ -87,8 +81,6 @@ static int rcar_pcie_wakeup(struct device *pcie_dev, void __iomem *pcie_base)
writel(L1FAEG | PMEL1RX, pcie_base + PMSR);
}
-unlock_exit:
- spin_unlock_irqrestore(&pmsr_lock, flags);
return ret;
}
@@ -584,7 +576,7 @@ static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
unsigned int index = find_first_bit(&reg, 32);
int ret;
- ret = generic_handle_domain_irq(msi->domain->parent, index);
+ ret = generic_handle_domain_irq(msi->domain, index);
if (ret) {
/* Unknown MSI, just clear it */
dev_dbg(dev, "unexpected MSI\n");
@@ -611,28 +603,26 @@ static void rcar_msi_irq_mask(struct irq_data *d)
{
struct rcar_msi *msi = irq_data_get_irq_chip_data(d);
struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
- unsigned long flags;
u32 value;
- spin_lock_irqsave(&msi->mask_lock, flags);
- value = rcar_pci_read_reg(pcie, PCIEMSIIER);
- value &= ~BIT(d->hwirq);
- rcar_pci_write_reg(pcie, value, PCIEMSIIER);
- spin_unlock_irqrestore(&msi->mask_lock, flags);
+ scoped_guard(raw_spinlock_irqsave, &msi->mask_lock) {
+ value = rcar_pci_read_reg(pcie, PCIEMSIIER);
+ value &= ~BIT(d->hwirq);
+ rcar_pci_write_reg(pcie, value, PCIEMSIIER);
+ }
}
static void rcar_msi_irq_unmask(struct irq_data *d)
{
struct rcar_msi *msi = irq_data_get_irq_chip_data(d);
struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
- unsigned long flags;
u32 value;
- spin_lock_irqsave(&msi->mask_lock, flags);
- value = rcar_pci_read_reg(pcie, PCIEMSIIER);
- value |= BIT(d->hwirq);
- rcar_pci_write_reg(pcie, value, PCIEMSIIER);
- spin_unlock_irqrestore(&msi->mask_lock, flags);
+ scoped_guard(raw_spinlock_irqsave, &msi->mask_lock) {
+ value = rcar_pci_read_reg(pcie, PCIEMSIIER);
+ value |= BIT(d->hwirq);
+ rcar_pci_write_reg(pcie, value, PCIEMSIIER);
+ }
}
static void rcar_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
@@ -745,7 +735,7 @@ static int rcar_pcie_enable_msi(struct rcar_pcie_host *host)
int err;
mutex_init(&msi->map_lock);
- spin_lock_init(&msi->mask_lock);
+ raw_spin_lock_init(&msi->mask_lock);
err = of_address_to_resource(dev->of_node, 0, &res);
if (err)
diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c
index 300cd85fa035..799461335762 100644
--- a/drivers/pci/controller/pcie-rockchip-ep.c
+++ b/drivers/pci/controller/pcie-rockchip-ep.c
@@ -694,7 +694,6 @@ static int rockchip_pcie_ep_setup_irq(struct pci_epc *epc)
static const struct pci_epc_features rockchip_pcie_epc_features = {
.linkup_notifier = true,
.msi_capable = true,
- .msix_capable = false,
.intx_capable = true,
.align = ROCKCHIP_PCIE_AT_SIZE_ALIGN,
};
diff --git a/drivers/pci/controller/pcie-rockchip.h b/drivers/pci/controller/pcie-rockchip.h
index 72a2c045f6fe..3e82a69b9c00 100644
--- a/drivers/pci/controller/pcie-rockchip.h
+++ b/drivers/pci/controller/pcie-rockchip.h
@@ -12,6 +12,7 @@
#define _PCIE_ROCKCHIP_H
#include <linux/clk.h>
+#include <linux/hw_bitfield.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/pci-ecam.h>
@@ -21,10 +22,10 @@
* The upper 16 bits of PCIE_CLIENT_CONFIG are a write mask for the lower 16
* bits. This allows atomic updates of the register without locking.
*/
-#define HIWORD_UPDATE(mask, val) (((mask) << 16) | (val))
-#define HIWORD_UPDATE_BIT(val) HIWORD_UPDATE(val, val)
+#define HWORD_SET_BIT(val) (FIELD_PREP_WM16_CONST((val), 1))
+#define HWORD_CLR_BIT(val) (FIELD_PREP_WM16_CONST((val), 0))
-#define ENCODE_LANES(x) ((((x) >> 1) & 3) << 4)
+#define ENCODE_LANES(x) ((((x) >> 1) & 3))
#define MAX_LANE_NUM 4
#define MAX_REGION_LIMIT 32
#define MIN_EP_APERTURE 28
@@ -32,21 +33,21 @@
#define PCIE_CLIENT_BASE 0x0
#define PCIE_CLIENT_CONFIG (PCIE_CLIENT_BASE + 0x00)
-#define PCIE_CLIENT_CONF_ENABLE HIWORD_UPDATE_BIT(0x0001)
-#define PCIE_CLIENT_CONF_DISABLE HIWORD_UPDATE(0x0001, 0)
-#define PCIE_CLIENT_LINK_TRAIN_ENABLE HIWORD_UPDATE_BIT(0x0002)
-#define PCIE_CLIENT_LINK_TRAIN_DISABLE HIWORD_UPDATE(0x0002, 0)
-#define PCIE_CLIENT_ARI_ENABLE HIWORD_UPDATE_BIT(0x0008)
-#define PCIE_CLIENT_CONF_LANE_NUM(x) HIWORD_UPDATE(0x0030, ENCODE_LANES(x))
-#define PCIE_CLIENT_MODE_RC HIWORD_UPDATE_BIT(0x0040)
-#define PCIE_CLIENT_MODE_EP HIWORD_UPDATE(0x0040, 0)
-#define PCIE_CLIENT_GEN_SEL_1 HIWORD_UPDATE(0x0080, 0)
-#define PCIE_CLIENT_GEN_SEL_2 HIWORD_UPDATE_BIT(0x0080)
+#define PCIE_CLIENT_CONF_ENABLE HWORD_SET_BIT(0x0001)
+#define PCIE_CLIENT_CONF_DISABLE HWORD_CLR_BIT(0x0001)
+#define PCIE_CLIENT_LINK_TRAIN_ENABLE HWORD_SET_BIT(0x0002)
+#define PCIE_CLIENT_LINK_TRAIN_DISABLE HWORD_CLR_BIT(0x0002)
+#define PCIE_CLIENT_ARI_ENABLE HWORD_SET_BIT(0x0008)
+#define PCIE_CLIENT_CONF_LANE_NUM(x) FIELD_PREP_WM16(0x0030, ENCODE_LANES(x))
+#define PCIE_CLIENT_MODE_RC HWORD_SET_BIT(0x0040)
+#define PCIE_CLIENT_MODE_EP HWORD_CLR_BIT(0x0040)
+#define PCIE_CLIENT_GEN_SEL_1 HWORD_CLR_BIT(0x0080)
+#define PCIE_CLIENT_GEN_SEL_2 HWORD_SET_BIT(0x0080)
#define PCIE_CLIENT_LEGACY_INT_CTRL (PCIE_CLIENT_BASE + 0x0c)
-#define PCIE_CLIENT_INT_IN_ASSERT HIWORD_UPDATE_BIT(0x0002)
-#define PCIE_CLIENT_INT_IN_DEASSERT HIWORD_UPDATE(0x0002, 0)
-#define PCIE_CLIENT_INT_PEND_ST_PEND HIWORD_UPDATE_BIT(0x0001)
-#define PCIE_CLIENT_INT_PEND_ST_NORMAL HIWORD_UPDATE(0x0001, 0)
+#define PCIE_CLIENT_INT_IN_ASSERT HWORD_SET_BIT(0x0002)
+#define PCIE_CLIENT_INT_IN_DEASSERT HWORD_CLR_BIT(0x0002)
+#define PCIE_CLIENT_INT_PEND_ST_PEND HWORD_SET_BIT(0x0001)
+#define PCIE_CLIENT_INT_PEND_ST_NORMAL HWORD_CLR_BIT(0x0001)
#define PCIE_CLIENT_SIDE_BAND_STATUS (PCIE_CLIENT_BASE + 0x20)
#define PCIE_CLIENT_PHY_ST BIT(12)
#define PCIE_CLIENT_DEBUG_OUT_0 (PCIE_CLIENT_BASE + 0x3c)
diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c
index 05b8c205493c..7db2c96c6cec 100644
--- a/drivers/pci/controller/pcie-xilinx-nwl.c
+++ b/drivers/pci/controller/pcie-xilinx-nwl.c
@@ -718,9 +718,10 @@ static int nwl_pcie_bridge_init(struct nwl_pcie *pcie)
nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, E_ECAM_CONTROL) |
E_ECAM_CR_ENABLE, E_ECAM_CONTROL);
- nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, E_ECAM_CONTROL) |
- (NWL_ECAM_MAX_SIZE << E_ECAM_SIZE_SHIFT),
- E_ECAM_CONTROL);
+ ecam_val = nwl_bridge_readl(pcie, E_ECAM_CONTROL);
+ ecam_val &= ~E_ECAM_SIZE_LOC;
+ ecam_val |= NWL_ECAM_MAX_SIZE << E_ECAM_SIZE_SHIFT;
+ nwl_bridge_writel(pcie, ecam_val, E_ECAM_CONTROL);
nwl_bridge_writel(pcie, lower_32_bits(pcie->phys_ecam_base),
E_ECAM_BASE_LO);
diff --git a/drivers/pci/controller/pcie-xilinx.c b/drivers/pci/controller/pcie-xilinx.c
index f121836c3cf4..937ea6ae1ac4 100644
--- a/drivers/pci/controller/pcie-xilinx.c
+++ b/drivers/pci/controller/pcie-xilinx.c
@@ -400,7 +400,7 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
if (val & XILINX_PCIE_RPIFR1_MSI_INTR) {
val = pcie_read(pcie, XILINX_PCIE_REG_RPIFR2) &
XILINX_PCIE_RPIFR2_MSG_DATA;
- domain = pcie->msi_domain->parent;
+ domain = pcie->msi_domain;
} else {
val = (val & XILINX_PCIE_RPIFR1_INTR_MASK) >>
XILINX_PCIE_RPIFR1_INTR_SHIFT;
diff --git a/drivers/pci/controller/plda/pcie-plda-host.c b/drivers/pci/controller/plda/pcie-plda-host.c
index 8e2db2e5b64b..3c2f68383010 100644
--- a/drivers/pci/controller/plda/pcie-plda-host.c
+++ b/drivers/pci/controller/plda/pcie-plda-host.c
@@ -599,8 +599,7 @@ int plda_pcie_host_init(struct plda_pcie_rp *port, struct pci_ops *ops,
bridge = devm_pci_alloc_host_bridge(dev, 0);
if (!bridge)
- return dev_err_probe(dev, -ENOMEM,
- "failed to alloc bridge\n");
+ return -ENOMEM;
if (port->host_ops && port->host_ops->host_init) {
ret = port->host_ops->host_init(port);
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index 9bbb0ff4cc15..1bd5bf4a6097 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -280,10 +280,12 @@ static int vmd_msi_alloc(struct irq_domain *domain, unsigned int virq,
static void vmd_msi_free(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs)
{
+ struct irq_data *irq_data;
struct vmd_irq *vmdirq;
for (int i = 0; i < nr_irqs; ++i) {
- vmdirq = irq_get_chip_data(virq + i);
+ irq_data = irq_domain_get_irq_data(domain, virq + i);
+ vmdirq = irq_data->chip_data;
synchronize_srcu(&vmdirq->irq->srcu);
@@ -304,9 +306,6 @@ static bool vmd_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
struct irq_domain *real_parent,
struct msi_domain_info *info)
{
- if (WARN_ON_ONCE(info->bus_token != DOMAIN_BUS_PCI_DEVICE_MSIX))
- return false;
-
if (!msi_lib_init_dev_msi_info(dev, domain, real_parent, info))
return false;
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index e091193bd8a8..31617772ad51 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -301,15 +301,20 @@ static void pci_epf_test_clean_dma_chan(struct pci_epf_test *epf_test)
if (!epf_test->dma_supported)
return;
- dma_release_channel(epf_test->dma_chan_tx);
- if (epf_test->dma_chan_tx == epf_test->dma_chan_rx) {
+ if (epf_test->dma_chan_tx) {
+ dma_release_channel(epf_test->dma_chan_tx);
+ if (epf_test->dma_chan_tx == epf_test->dma_chan_rx) {
+ epf_test->dma_chan_tx = NULL;
+ epf_test->dma_chan_rx = NULL;
+ return;
+ }
epf_test->dma_chan_tx = NULL;
- epf_test->dma_chan_rx = NULL;
- return;
}
- dma_release_channel(epf_test->dma_chan_rx);
- epf_test->dma_chan_rx = NULL;
+ if (epf_test->dma_chan_rx) {
+ dma_release_channel(epf_test->dma_chan_rx);
+ epf_test->dma_chan_rx = NULL;
+ }
}
static void pci_epf_test_print_rate(struct pci_epf_test *epf_test,
@@ -772,12 +777,24 @@ static void pci_epf_test_disable_doorbell(struct pci_epf_test *epf_test,
u32 status = le32_to_cpu(reg->status);
struct pci_epf *epf = epf_test->epf;
struct pci_epc *epc = epf->epc;
+ int ret;
if (bar < BAR_0)
goto set_status_err;
pci_epf_test_doorbell_cleanup(epf_test);
- pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no, &epf_test->db_bar);
+
+ /*
+ * The doorbell feature temporarily overrides the inbound translation
+ * to point to the address stored in epf_test->db_bar.phys_addr, i.e.,
+ * it calls set_bar() twice without ever calling clear_bar(), as
+ * calling clear_bar() would clear the BAR's PCI address assigned by
+ * the host. Thus, when disabling the doorbell, restore the inbound
+ * translation to point to the memory allocated for the BAR.
+ */
+ ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no, &epf->bar[bar]);
+ if (ret)
+ goto set_status_err;
status |= STATUS_DOORBELL_DISABLE_SUCCESS;
reg->status = cpu_to_le32(status);
@@ -1050,7 +1067,12 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf)
if (bar == test_reg_bar)
continue;
- base = pci_epf_alloc_space(epf, bar_size[bar], bar,
+ if (epc_features->bar[bar].type == BAR_FIXED)
+ test_reg_size = epc_features->bar[bar].fixed_size;
+ else
+ test_reg_size = bar_size[bar];
+
+ base = pci_epf_alloc_space(epf, test_reg_size, bar,
epc_features, PRIMARY_INTERFACE);
if (!base)
dev_err(dev, "Failed to allocate space for BAR%d\n",
diff --git a/drivers/pci/endpoint/pci-ep-msi.c b/drivers/pci/endpoint/pci-ep-msi.c
index 9ca89cbfec15..1b58357b905f 100644
--- a/drivers/pci/endpoint/pci-ep-msi.c
+++ b/drivers/pci/endpoint/pci-ep-msi.c
@@ -24,7 +24,7 @@ static void pci_epf_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
struct pci_epf *epf;
epc = pci_epc_get(dev_name(msi_desc_to_dev(desc)));
- if (!epc)
+ if (IS_ERR(epc))
return;
epf = list_first_entry_or_null(&epc->pci_epf, struct pci_epf, list);
diff --git a/drivers/pci/hotplug/cpqphp_pci.c b/drivers/pci/hotplug/cpqphp_pci.c
index ef7534a3ca40..88929360fe77 100644
--- a/drivers/pci/hotplug/cpqphp_pci.c
+++ b/drivers/pci/hotplug/cpqphp_pci.c
@@ -1302,7 +1302,7 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
dbg("found io_node(base, length) = %x, %x\n",
io_node->base, io_node->length);
- dbg("populated slot =%d \n", populated_slot);
+ dbg("populated slot = %d\n", populated_slot);
if (!populated_slot) {
io_node->next = ctrl->io_head;
ctrl->io_head = io_node;
@@ -1325,7 +1325,7 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
dbg("found mem_node(base, length) = %x, %x\n",
mem_node->base, mem_node->length);
- dbg("populated slot =%d \n", populated_slot);
+ dbg("populated slot = %d\n", populated_slot);
if (!populated_slot) {
mem_node->next = ctrl->mem_head;
ctrl->mem_head = mem_node;
@@ -1349,7 +1349,7 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
p_mem_node->length = pre_mem_length << 16;
dbg("found p_mem_node(base, length) = %x, %x\n",
p_mem_node->base, p_mem_node->length);
- dbg("populated slot =%d \n", populated_slot);
+ dbg("populated slot = %d\n", populated_slot);
if (!populated_slot) {
p_mem_node->next = ctrl->p_mem_head;
@@ -1373,7 +1373,7 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
bus_node->length = max_bus - secondary_bus + 1;
dbg("found bus_node(base, length) = %x, %x\n",
bus_node->base, bus_node->length);
- dbg("populated slot =%d \n", populated_slot);
+ dbg("populated slot = %d\n", populated_slot);
if (!populated_slot) {
bus_node->next = ctrl->bus_head;
ctrl->bus_head = bus_node;
diff --git a/drivers/pci/hotplug/ibmphp_hpc.c b/drivers/pci/hotplug/ibmphp_hpc.c
index a5720d12e573..2324167656a6 100644
--- a/drivers/pci/hotplug/ibmphp_hpc.c
+++ b/drivers/pci/hotplug/ibmphp_hpc.c
@@ -124,7 +124,7 @@ static u8 i2c_ctrl_read(struct controller *ctlr_ptr, void __iomem *WPGBbar, u8 i
unsigned long ultemp;
unsigned long data; // actual data HILO format
- debug_polling("%s - Entry WPGBbar[%p] index[%x] \n", __func__, WPGBbar, index);
+ debug_polling("%s - Entry WPGBbar[%p] index[%x]\n", __func__, WPGBbar, index);
//--------------------------------------------------------------------
// READ - step 1
@@ -147,7 +147,7 @@ static u8 i2c_ctrl_read(struct controller *ctlr_ptr, void __iomem *WPGBbar, u8 i
ultemp = ultemp << 8;
data |= ultemp;
} else {
- err("this controller type is not supported \n");
+ err("this controller type is not supported\n");
return HPC_ERROR;
}
@@ -258,7 +258,7 @@ static u8 i2c_ctrl_write(struct controller *ctlr_ptr, void __iomem *WPGBbar, u8
ultemp = ultemp << 8;
data |= ultemp;
} else {
- err("this controller type is not supported \n");
+ err("this controller type is not supported\n");
return HPC_ERROR;
}
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index ac4375954c94..77dee43b7858 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -629,15 +629,18 @@ static int sriov_add_vfs(struct pci_dev *dev, u16 num_vfs)
if (dev->no_vf_scan)
return 0;
+ pci_lock_rescan_remove();
for (i = 0; i < num_vfs; i++) {
rc = pci_iov_add_virtfn(dev, i);
if (rc)
goto failed;
}
+ pci_unlock_rescan_remove();
return 0;
failed:
while (i--)
pci_iov_remove_virtfn(dev, i);
+ pci_unlock_rescan_remove();
return rc;
}
@@ -762,8 +765,10 @@ static void sriov_del_vfs(struct pci_dev *dev)
struct pci_sriov *iov = dev->sriov;
int i;
+ pci_lock_rescan_remove();
for (i = 0; i < iov->num_VFs; i++)
pci_iov_remove_virtfn(dev, i);
+ pci_unlock_rescan_remove();
}
static void sriov_disable(struct pci_dev *dev)
diff --git a/drivers/pci/msi/irqdomain.c b/drivers/pci/msi/irqdomain.c
index 0938ef7ebabf..ce741ed9dc3f 100644
--- a/drivers/pci/msi/irqdomain.c
+++ b/drivers/pci/msi/irqdomain.c
@@ -148,20 +148,43 @@ static void pci_device_domain_set_desc(msi_alloc_info_t *arg, struct msi_desc *d
arg->hwirq = desc->msi_index;
}
-static __always_inline void cond_mask_parent(struct irq_data *data)
+static void cond_shutdown_parent(struct irq_data *data)
{
struct msi_domain_info *info = data->domain->host_data;
- if (unlikely(info->flags & MSI_FLAG_PCI_MSI_MASK_PARENT))
+ if (unlikely(info->flags & MSI_FLAG_PCI_MSI_STARTUP_PARENT))
+ irq_chip_shutdown_parent(data);
+ else if (unlikely(info->flags & MSI_FLAG_PCI_MSI_MASK_PARENT))
irq_chip_mask_parent(data);
}
-static __always_inline void cond_unmask_parent(struct irq_data *data)
+static unsigned int cond_startup_parent(struct irq_data *data)
{
struct msi_domain_info *info = data->domain->host_data;
- if (unlikely(info->flags & MSI_FLAG_PCI_MSI_MASK_PARENT))
+ if (unlikely(info->flags & MSI_FLAG_PCI_MSI_STARTUP_PARENT))
+ return irq_chip_startup_parent(data);
+ else if (unlikely(info->flags & MSI_FLAG_PCI_MSI_MASK_PARENT))
irq_chip_unmask_parent(data);
+
+ return 0;
+}
+
+static void pci_irq_shutdown_msi(struct irq_data *data)
+{
+ struct msi_desc *desc = irq_data_get_msi_desc(data);
+
+ pci_msi_mask(desc, BIT(data->irq - desc->irq));
+ cond_shutdown_parent(data);
+}
+
+static unsigned int pci_irq_startup_msi(struct irq_data *data)
+{
+ struct msi_desc *desc = irq_data_get_msi_desc(data);
+ unsigned int ret = cond_startup_parent(data);
+
+ pci_msi_unmask(desc, BIT(data->irq - desc->irq));
+ return ret;
}
static void pci_irq_mask_msi(struct irq_data *data)
@@ -169,14 +192,12 @@ static void pci_irq_mask_msi(struct irq_data *data)
struct msi_desc *desc = irq_data_get_msi_desc(data);
pci_msi_mask(desc, BIT(data->irq - desc->irq));
- cond_mask_parent(data);
}
static void pci_irq_unmask_msi(struct irq_data *data)
{
struct msi_desc *desc = irq_data_get_msi_desc(data);
- cond_unmask_parent(data);
pci_msi_unmask(desc, BIT(data->irq - desc->irq));
}
@@ -194,6 +215,8 @@ static void pci_irq_unmask_msi(struct irq_data *data)
static const struct msi_domain_template pci_msi_template = {
.chip = {
.name = "PCI-MSI",
+ .irq_startup = pci_irq_startup_msi,
+ .irq_shutdown = pci_irq_shutdown_msi,
.irq_mask = pci_irq_mask_msi,
.irq_unmask = pci_irq_unmask_msi,
.irq_write_msi_msg = pci_msi_domain_write_msg,
@@ -210,15 +233,27 @@ static const struct msi_domain_template pci_msi_template = {
},
};
+static void pci_irq_shutdown_msix(struct irq_data *data)
+{
+ pci_msix_mask(irq_data_get_msi_desc(data));
+ cond_shutdown_parent(data);
+}
+
+static unsigned int pci_irq_startup_msix(struct irq_data *data)
+{
+ unsigned int ret = cond_startup_parent(data);
+
+ pci_msix_unmask(irq_data_get_msi_desc(data));
+ return ret;
+}
+
static void pci_irq_mask_msix(struct irq_data *data)
{
pci_msix_mask(irq_data_get_msi_desc(data));
- cond_mask_parent(data);
}
static void pci_irq_unmask_msix(struct irq_data *data)
{
- cond_unmask_parent(data);
pci_msix_unmask(irq_data_get_msi_desc(data));
}
@@ -234,6 +269,8 @@ EXPORT_SYMBOL_GPL(pci_msix_prepare_desc);
static const struct msi_domain_template pci_msix_template = {
.chip = {
.name = "PCI-MSIX",
+ .irq_startup = pci_irq_startup_msix,
+ .irq_shutdown = pci_irq_shutdown_msix,
.irq_mask = pci_irq_mask_msix,
.irq_unmask = pci_irq_unmask_msix,
.irq_write_msi_msg = pci_msi_domain_write_msg,
@@ -422,7 +459,7 @@ u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev)
pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid);
of_node = irq_domain_get_of_node(domain);
- rid = of_node ? of_msi_map_id(&pdev->dev, of_node, rid) :
+ rid = of_node ? of_msi_xlate(&pdev->dev, &of_node, rid) :
iort_msi_map_id(&pdev->dev, rid);
return rid;
diff --git a/drivers/pci/of_property.c b/drivers/pci/of_property.c
index 506fcd507113..7aae46f333d9 100644
--- a/drivers/pci/of_property.c
+++ b/drivers/pci/of_property.c
@@ -279,13 +279,21 @@ static int of_pci_prop_intr_map(struct pci_dev *pdev, struct of_changeset *ocs,
mapp++;
*mapp = out_irq[i].np->phandle;
mapp++;
- if (addr_sz[i]) {
- ret = of_property_read_u32_array(out_irq[i].np,
- "reg", mapp,
- addr_sz[i]);
- if (ret)
- goto failed;
- }
+
+ /*
+ * A device address does not affect the device <->
+ * interrupt-controller HW connection for all
+ * modern interrupt controllers; moreover, the
+ * kernel (i.e., of_irq_parse_raw()) ignores the
+ * values in the parent unit address cells while
+ * parsing the interrupt-map property because they
+ * are irrelevant for interrupt mapping in modern
+ * systems.
+ *
+ * Leave the parent unit address initialized to 0 --
+ * just take into account the #address-cells size
+ * to build the property properly.
+ */
mapp += addr_sz[i];
memcpy(mapp, out_irq[i].args,
out_irq[i].args_count * sizeof(u32));
diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c
index da5657a02007..78e108e47254 100644
--- a/drivers/pci/p2pdma.c
+++ b/drivers/pci/p2pdma.c
@@ -360,7 +360,7 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
pages_free:
devm_memunmap_pages(&pdev->dev, pgmap);
pgmap_free:
- devm_kfree(&pdev->dev, pgmap);
+ devm_kfree(&pdev->dev, p2p_pgmap);
return error;
}
EXPORT_SYMBOL_GPL(pci_p2pdma_add_resource);
@@ -738,7 +738,7 @@ EXPORT_SYMBOL_GPL(pci_p2pdma_distance_many);
* pci_has_p2pmem - check if a given PCI device has published any p2pmem
* @pdev: PCI device to check
*/
-bool pci_has_p2pmem(struct pci_dev *pdev)
+static bool pci_has_p2pmem(struct pci_dev *pdev)
{
struct pci_p2pdma *p2pdma;
bool res;
@@ -750,7 +750,6 @@ bool pci_has_p2pmem(struct pci_dev *pdev)
return res;
}
-EXPORT_SYMBOL_GPL(pci_has_p2pmem);
/**
* pci_p2pmem_find_many - find a peer-to-peer DMA memory device compatible with
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index ddb25960ea47..9369377725fa 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -122,6 +122,8 @@ phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle)
bool pci_acpi_preserve_config(struct pci_host_bridge *host_bridge)
{
+ bool ret = false;
+
if (ACPI_HANDLE(&host_bridge->dev)) {
union acpi_object *obj;
@@ -135,11 +137,11 @@ bool pci_acpi_preserve_config(struct pci_host_bridge *host_bridge)
1, DSM_PCI_PRESERVE_BOOT_CONFIG,
NULL, ACPI_TYPE_INTEGER);
if (obj && obj->integer.value == 0)
- return true;
+ ret = true;
ACPI_FREE(obj);
}
- return false;
+ return ret;
}
/* _HPX PCI Setting Record (Type 0); same as _HPP */
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 63665240ae87..302d61783f6c 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -1582,7 +1582,7 @@ static int pci_uevent(const struct device *dev, struct kobj_uevent_env *env)
return 0;
}
-#if defined(CONFIG_PCIEAER) || defined(CONFIG_EEH)
+#if defined(CONFIG_PCIEAER) || defined(CONFIG_EEH) || defined(CONFIG_S390)
/**
* pci_uevent_ers - emit a uevent during recovery path of PCI device
* @pdev: PCI device undergoing error recovery
@@ -1596,6 +1596,7 @@ void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type)
switch (err_type) {
case PCI_ERS_RESULT_NONE:
case PCI_ERS_RESULT_CAN_RECOVER:
+ case PCI_ERS_RESULT_NEED_RESET:
envp[idx++] = "ERROR_EVENT=BEGIN_RECOVERY";
envp[idx++] = "DEVICE_ONLINE=0";
break;
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 5eea14c1f7f5..9d6f74bd95f8 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -30,6 +30,7 @@
#include <linux/msi.h>
#include <linux/of.h>
#include <linux/aperture.h>
+#include <linux/unaligned.h>
#include "pci.h"
#ifndef ARCH_PCI_DEV_GROUPS
@@ -177,6 +178,13 @@ static ssize_t resource_show(struct device *dev, struct device_attribute *attr,
for (i = 0; i < max; i++) {
struct resource *res = &pci_dev->resource[i];
+ struct resource zerores = {};
+
+ /* For backwards compatibility */
+ if (i >= PCI_BRIDGE_RESOURCES && i <= PCI_BRIDGE_RESOURCE_END &&
+ res->flags & (IORESOURCE_UNSET | IORESOURCE_DISABLED))
+ res = &zerores;
+
pci_resource_to_user(pci_dev, i, res, &start, &end);
len += sysfs_emit_at(buf, len, "0x%016llx 0x%016llx 0x%016llx\n",
(unsigned long long)start,
@@ -201,8 +209,14 @@ static ssize_t max_link_width_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct pci_dev *pdev = to_pci_dev(dev);
+ ssize_t ret;
- return sysfs_emit(buf, "%u\n", pcie_get_width_cap(pdev));
+ /* We read PCI_EXP_LNKCAP, so we need the device to be accessible. */
+ pci_config_pm_runtime_get(pdev);
+ ret = sysfs_emit(buf, "%u\n", pcie_get_width_cap(pdev));
+ pci_config_pm_runtime_put(pdev);
+
+ return ret;
}
static DEVICE_ATTR_RO(max_link_width);
@@ -214,7 +228,10 @@ static ssize_t current_link_speed_show(struct device *dev,
int err;
enum pci_bus_speed speed;
+ pci_config_pm_runtime_get(pci_dev);
err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat);
+ pci_config_pm_runtime_put(pci_dev);
+
if (err)
return -EINVAL;
@@ -231,7 +248,10 @@ static ssize_t current_link_width_show(struct device *dev,
u16 linkstat;
int err;
+ pci_config_pm_runtime_get(pci_dev);
err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat);
+ pci_config_pm_runtime_put(pci_dev);
+
if (err)
return -EINVAL;
@@ -247,7 +267,10 @@ static ssize_t secondary_bus_number_show(struct device *dev,
u8 sec_bus;
int err;
+ pci_config_pm_runtime_get(pci_dev);
err = pci_read_config_byte(pci_dev, PCI_SECONDARY_BUS, &sec_bus);
+ pci_config_pm_runtime_put(pci_dev);
+
if (err)
return -EINVAL;
@@ -263,7 +286,10 @@ static ssize_t subordinate_bus_number_show(struct device *dev,
u8 sub_bus;
int err;
+ pci_config_pm_runtime_get(pci_dev);
err = pci_read_config_byte(pci_dev, PCI_SUBORDINATE_BUS, &sub_bus);
+ pci_config_pm_runtime_put(pci_dev);
+
if (err)
return -EINVAL;
@@ -694,6 +720,22 @@ static ssize_t boot_vga_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RO(boot_vga);
+static ssize_t serial_number_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ u64 dsn;
+ u8 bytes[8];
+
+ dsn = pci_get_dsn(pci_dev);
+ if (!dsn)
+ return -EIO;
+
+ put_unaligned_be64(dsn, bytes);
+ return sysfs_emit(buf, "%8phD\n", bytes);
+}
+static DEVICE_ATTR_ADMIN_RO(serial_number);
+
static ssize_t pci_read_config(struct file *filp, struct kobject *kobj,
const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
@@ -1475,8 +1517,9 @@ static ssize_t reset_method_store(struct device *dev,
return count;
}
- pm_runtime_get_sync(dev);
- struct device *pmdev __free(pm_runtime_put) = dev;
+ ACQUIRE(pm_runtime_active_try, pm)(dev);
+ if (ACQUIRE_ERR(pm_runtime_active_try, &pm))
+ return -ENXIO;
if (sysfs_streq(buf, "default")) {
pci_init_reset_methods(pdev);
@@ -1555,13 +1598,19 @@ static ssize_t __resource_resize_store(struct device *dev, int n,
const char *buf, size_t count)
{
struct pci_dev *pdev = to_pci_dev(dev);
- unsigned long size, flags;
+ struct pci_bus *bus = pdev->bus;
+ struct resource *b_win, *res;
+ unsigned long size;
int ret, i;
u16 cmd;
if (kstrtoul(buf, 0, &size) < 0)
return -EINVAL;
+ b_win = pbus_select_window(bus, pci_resource_n(pdev, n));
+ if (!b_win)
+ return -EINVAL;
+
device_lock(dev);
if (dev->driver || pci_num_vf(pdev)) {
ret = -EBUSY;
@@ -1581,19 +1630,19 @@ static ssize_t __resource_resize_store(struct device *dev, int n,
pci_write_config_word(pdev, PCI_COMMAND,
cmd & ~PCI_COMMAND_MEMORY);
- flags = pci_resource_flags(pdev, n);
-
pci_remove_resource_files(pdev);
- for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) {
- if (pci_resource_len(pdev, i) &&
- pci_resource_flags(pdev, i) == flags)
+ pci_dev_for_each_resource(pdev, res, i) {
+ if (i >= PCI_BRIDGE_RESOURCES)
+ break;
+
+ if (b_win == pbus_select_window(bus, res))
pci_release_resource(pdev, i);
}
ret = pci_resize_resource(pdev, n, size);
- pci_assign_unassigned_bus_resources(pdev->bus);
+ pci_assign_unassigned_bus_resources(bus);
if (pci_create_resource_files(pdev))
pci_warn(pdev, "Failed to recreate resource files after BAR resizing\n");
@@ -1698,6 +1747,7 @@ late_initcall(pci_sysfs_init);
static struct attribute *pci_dev_dev_attrs[] = {
&dev_attr_boot_vga.attr,
+ &dev_attr_serial_number.attr,
NULL,
};
@@ -1710,6 +1760,9 @@ static umode_t pci_dev_attrs_are_visible(struct kobject *kobj,
if (a == &dev_attr_boot_vga.attr && pci_is_vga(pdev))
return a->mode;
+ if (a == &dev_attr_serial_number.attr && pci_get_dsn(pdev))
+ return a->mode;
+
return 0;
}
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index b0f4d98036cd..b14dd064006c 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -423,36 +423,10 @@ found:
return 1;
}
-static u8 __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
- u8 pos, int cap, int *ttl)
-{
- u8 id;
- u16 ent;
-
- pci_bus_read_config_byte(bus, devfn, pos, &pos);
-
- while ((*ttl)--) {
- if (pos < 0x40)
- break;
- pos &= ~3;
- pci_bus_read_config_word(bus, devfn, pos, &ent);
-
- id = ent & 0xff;
- if (id == 0xff)
- break;
- if (id == cap)
- return pos;
- pos = (ent >> 8);
- }
- return 0;
-}
-
static u8 __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
u8 pos, int cap)
{
- int ttl = PCI_FIND_CAP_TTL;
-
- return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
+ return PCI_FIND_NEXT_CAP(pci_bus_read_config, pos, cap, bus, devfn);
}
u8 pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
@@ -553,42 +527,11 @@ EXPORT_SYMBOL(pci_bus_find_capability);
*/
u16 pci_find_next_ext_capability(struct pci_dev *dev, u16 start, int cap)
{
- u32 header;
- int ttl;
- u16 pos = PCI_CFG_SPACE_SIZE;
-
- /* minimum 8 bytes per capability */
- ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
-
if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
return 0;
- if (start)
- pos = start;
-
- if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
- return 0;
-
- /*
- * If we have no capabilities, this is indicated by cap ID,
- * cap version and next pointer all being 0.
- */
- if (header == 0)
- return 0;
-
- while (ttl-- > 0) {
- if (PCI_EXT_CAP_ID(header) == cap && pos != start)
- return pos;
-
- pos = PCI_EXT_CAP_NEXT(header);
- if (pos < PCI_CFG_SPACE_SIZE)
- break;
-
- if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
- break;
- }
-
- return 0;
+ return PCI_FIND_NEXT_EXT_CAP(pci_bus_read_config, start, cap,
+ dev->bus, dev->devfn);
}
EXPORT_SYMBOL_GPL(pci_find_next_ext_capability);
@@ -648,7 +591,7 @@ EXPORT_SYMBOL_GPL(pci_get_dsn);
static u8 __pci_find_next_ht_cap(struct pci_dev *dev, u8 pos, int ht_cap)
{
- int rc, ttl = PCI_FIND_CAP_TTL;
+ int rc;
u8 cap, mask;
if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
@@ -656,8 +599,8 @@ static u8 __pci_find_next_ht_cap(struct pci_dev *dev, u8 pos, int ht_cap)
else
mask = HT_5BIT_CAP_MASK;
- pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
- PCI_CAP_ID_HT, &ttl);
+ pos = PCI_FIND_NEXT_CAP(pci_bus_read_config, pos,
+ PCI_CAP_ID_HT, dev->bus, dev->devfn);
while (pos) {
rc = pci_read_config_byte(dev, pos + 3, &cap);
if (rc != PCIBIOS_SUCCESSFUL)
@@ -666,9 +609,10 @@ static u8 __pci_find_next_ht_cap(struct pci_dev *dev, u8 pos, int ht_cap)
if ((cap & mask) == ht_cap)
return pos;
- pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
- pos + PCI_CAP_LIST_NEXT,
- PCI_CAP_ID_HT, &ttl);
+ pos = PCI_FIND_NEXT_CAP(pci_bus_read_config,
+ pos + PCI_CAP_LIST_NEXT,
+ PCI_CAP_ID_HT, dev->bus,
+ dev->devfn);
}
return 0;
@@ -1374,6 +1318,11 @@ int pci_power_up(struct pci_dev *dev)
return -EIO;
}
+ if (pci_dev_is_disconnected(dev)) {
+ dev->current_state = PCI_D3cold;
+ return -EIO;
+ }
+
pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
if (PCI_POSSIBLE_ERROR(pmcsr)) {
pci_err(dev, "Unable to change power state from %s to D0, device inaccessible\n",
@@ -5932,6 +5881,7 @@ int pcie_set_readrq(struct pci_dev *dev, int rq)
{
u16 v;
int ret;
+ unsigned int firstbit;
struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
@@ -5949,7 +5899,10 @@ int pcie_set_readrq(struct pci_dev *dev, int rq)
rq = mps;
}
- v = FIELD_PREP(PCI_EXP_DEVCTL_READRQ, ffs(rq) - 8);
+ firstbit = ffs(rq);
+ if (firstbit < 8)
+ return -EINVAL;
+ v = FIELD_PREP(PCI_EXP_DEVCTL_READRQ, firstbit - 8);
if (bridge->no_inc_mrrs) {
int max_mrrs = pcie_get_readrq(dev);
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 34f65d69662e..4492b809094b 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -2,12 +2,15 @@
#ifndef DRIVERS_PCI_H
#define DRIVERS_PCI_H
+#include <linux/align.h>
+#include <linux/bitfield.h>
#include <linux/pci.h>
struct pcie_tlp_log;
/* Number of possible devfns: 0.0 to 1f.7 inclusive */
#define MAX_NR_DEVFNS 256
+#define PCI_MAX_NR_DEVS 32
#define MAX_NR_LANES 16
@@ -81,13 +84,102 @@ struct pcie_tlp_log;
#define PCIE_MSG_CODE_DEASSERT_INTC 0x26
#define PCIE_MSG_CODE_DEASSERT_INTD 0x27
+#define PCI_BUS_BRIDGE_IO_WINDOW 0
+#define PCI_BUS_BRIDGE_MEM_WINDOW 1
+#define PCI_BUS_BRIDGE_PREF_MEM_WINDOW 2
+
extern const unsigned char pcie_link_speed[];
extern bool pci_early_dump;
+extern struct mutex pci_rescan_remove_lock;
+
bool pcie_cap_has_lnkctl(const struct pci_dev *dev);
bool pcie_cap_has_lnkctl2(const struct pci_dev *dev);
bool pcie_cap_has_rtctl(const struct pci_dev *dev);
+/* Standard Capability finder */
+/**
+ * PCI_FIND_NEXT_CAP - Find a PCI standard capability
+ * @read_cfg: Function pointer for reading PCI config space
+ * @start: Starting position to begin search
+ * @cap: Capability ID to find
+ * @args: Arguments to pass to read_cfg function
+ *
+ * Search the capability list in PCI config space to find @cap.
+ * Implements TTL (time-to-live) protection against infinite loops.
+ *
+ * Return: Position of the capability if found, 0 otherwise.
+ */
+#define PCI_FIND_NEXT_CAP(read_cfg, start, cap, args...) \
+({ \
+ int __ttl = PCI_FIND_CAP_TTL; \
+ u8 __id, __found_pos = 0; \
+ u8 __pos = (start); \
+ u16 __ent; \
+ \
+ read_cfg##_byte(args, __pos, &__pos); \
+ \
+ while (__ttl--) { \
+ if (__pos < PCI_STD_HEADER_SIZEOF) \
+ break; \
+ \
+ __pos = ALIGN_DOWN(__pos, 4); \
+ read_cfg##_word(args, __pos, &__ent); \
+ \
+ __id = FIELD_GET(PCI_CAP_ID_MASK, __ent); \
+ if (__id == 0xff) \
+ break; \
+ \
+ if (__id == (cap)) { \
+ __found_pos = __pos; \
+ break; \
+ } \
+ \
+ __pos = FIELD_GET(PCI_CAP_LIST_NEXT_MASK, __ent); \
+ } \
+ __found_pos; \
+})
+
+/* Extended Capability finder */
+/**
+ * PCI_FIND_NEXT_EXT_CAP - Find a PCI extended capability
+ * @read_cfg: Function pointer for reading PCI config space
+ * @start: Starting position to begin search (0 for initial search)
+ * @cap: Extended capability ID to find
+ * @args: Arguments to pass to read_cfg function
+ *
+ * Search the extended capability list in PCI config space to find @cap.
+ * Implements TTL protection against infinite loops using a calculated
+ * maximum search count.
+ *
+ * Return: Position of the capability if found, 0 otherwise.
+ */
+#define PCI_FIND_NEXT_EXT_CAP(read_cfg, start, cap, args...) \
+({ \
+ u16 __pos = (start) ?: PCI_CFG_SPACE_SIZE; \
+ u16 __found_pos = 0; \
+ int __ttl, __ret; \
+ u32 __header; \
+ \
+ __ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8; \
+ while (__ttl-- > 0 && __pos >= PCI_CFG_SPACE_SIZE) { \
+ __ret = read_cfg##_dword(args, __pos, &__header); \
+ if (__ret != PCIBIOS_SUCCESSFUL) \
+ break; \
+ \
+ if (__header == 0) \
+ break; \
+ \
+ if (PCI_EXT_CAP_ID(__header) == (cap) && __pos != start) {\
+ __found_pos = __pos; \
+ break; \
+ } \
+ \
+ __pos = PCI_EXT_CAP_NEXT(__header); \
+ } \
+ __found_pos; \
+})
+
/* Functions internal to the PCI core code */
#ifdef CONFIG_DMI
@@ -330,7 +422,7 @@ struct device *pci_get_host_bridge_device(struct pci_dev *dev);
void pci_put_host_bridge_device(struct device *dev);
unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge);
-int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type);
+int pbus_reassign_bridge_resources(struct pci_bus *bus, struct resource *res);
int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align);
int pci_configure_extended_tags(struct pci_dev *dev, void *ign);
@@ -381,6 +473,8 @@ static inline int pci_resource_num(const struct pci_dev *dev,
return resno;
}
+struct resource *pbus_select_window(struct pci_bus *bus,
+ const struct resource *res);
void pci_reassigndev_resource_alignment(struct pci_dev *dev);
void pci_disable_bridge_window(struct pci_dev *dev);
struct pci_bus *pci_bus_get(struct pci_bus *bus);
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index e286c197d716..0b5ed4722ac3 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -43,7 +43,7 @@
#define AER_ERROR_SOURCES_MAX 128
#define AER_MAX_TYPEOF_COR_ERRS 16 /* as per PCI_ERR_COR_STATUS */
-#define AER_MAX_TYPEOF_UNCOR_ERRS 27 /* as per PCI_ERR_UNCOR_STATUS*/
+#define AER_MAX_TYPEOF_UNCOR_ERRS 32 /* as per PCI_ERR_UNCOR_STATUS*/
struct aer_err_source {
u32 status; /* PCI_ERR_ROOT_STATUS */
@@ -96,11 +96,21 @@ struct aer_info {
};
#define AER_LOG_TLP_MASKS (PCI_ERR_UNC_POISON_TLP| \
+ PCI_ERR_UNC_POISON_BLK | \
PCI_ERR_UNC_ECRC| \
PCI_ERR_UNC_UNSUP| \
PCI_ERR_UNC_COMP_ABORT| \
PCI_ERR_UNC_UNX_COMP| \
- PCI_ERR_UNC_MALF_TLP)
+ PCI_ERR_UNC_ACSV | \
+ PCI_ERR_UNC_MCBTLP | \
+ PCI_ERR_UNC_ATOMEG | \
+ PCI_ERR_UNC_DMWR_BLK | \
+ PCI_ERR_UNC_XLAT_BLK | \
+ PCI_ERR_UNC_TLPPRE | \
+ PCI_ERR_UNC_MALF_TLP | \
+ PCI_ERR_UNC_IDE_CHECK | \
+ PCI_ERR_UNC_MISR_IDE | \
+ PCI_ERR_UNC_PCRC_CHECK)
#define SYSTEM_ERROR_INTR_ON_MESG_MASK (PCI_EXP_RTCTL_SECEE| \
PCI_EXP_RTCTL_SENFEE| \
@@ -383,6 +393,10 @@ void pci_aer_init(struct pci_dev *dev)
return;
dev->aer_info = kzalloc(sizeof(*dev->aer_info), GFP_KERNEL);
+ if (!dev->aer_info) {
+ dev->aer_cap = 0;
+ return;
+ }
ratelimit_state_init(&dev->aer_info->correctable_ratelimit,
DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
@@ -525,11 +539,11 @@ static const char *aer_uncorrectable_error_string[] = {
"AtomicOpBlocked", /* Bit Position 24 */
"TLPBlockedErr", /* Bit Position 25 */
"PoisonTLPBlocked", /* Bit Position 26 */
- NULL, /* Bit Position 27 */
- NULL, /* Bit Position 28 */
- NULL, /* Bit Position 29 */
- NULL, /* Bit Position 30 */
- NULL, /* Bit Position 31 */
+ "DMWrReqBlocked", /* Bit Position 27 */
+ "IDECheck", /* Bit Position 28 */
+ "MisIDETLP", /* Bit Position 29 */
+ "PCRC_CHECK", /* Bit Position 30 */
+ "TLPXlatBlocked", /* Bit Position 31 */
};
static const char *aer_agent_string[] = {
@@ -786,6 +800,9 @@ static void pci_rootport_aer_stats_incr(struct pci_dev *pdev,
static int aer_ratelimit(struct pci_dev *dev, unsigned int severity)
{
+ if (!dev->aer_info)
+ return 1;
+
switch (severity) {
case AER_NONFATAL:
return __ratelimit(&dev->aer_info->nonfatal_ratelimit);
@@ -796,6 +813,20 @@ static int aer_ratelimit(struct pci_dev *dev, unsigned int severity)
}
}
+static bool tlp_header_logged(u32 status, u32 capctl)
+{
+ /* Errors for which a header is always logged (PCIe r7.0 sec 6.2.7) */
+ if (status & AER_LOG_TLP_MASKS)
+ return true;
+
+ /* Completion Timeout header is only logged on capable devices */
+ if (status & PCI_ERR_UNC_COMP_TIME &&
+ capctl & PCI_ERR_CAP_COMP_TIME_LOG)
+ return true;
+
+ return false;
+}
+
static void __aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
{
const char **strings;
@@ -910,7 +941,7 @@ void pci_print_aer(struct pci_dev *dev, int aer_severity,
status = aer->uncor_status;
mask = aer->uncor_mask;
info.level = KERN_ERR;
- tlp_header_valid = status & AER_LOG_TLP_MASKS;
+ tlp_header_valid = tlp_header_logged(status, aer->cap_control);
}
info.status = status;
@@ -1401,7 +1432,7 @@ int aer_get_device_error_info(struct aer_err_info *info, int i)
pci_read_config_dword(dev, aer + PCI_ERR_CAP, &aercc);
info->first_error = PCI_ERR_CAP_FEP(aercc);
- if (info->status & AER_LOG_TLP_MASKS) {
+ if (tlp_header_logged(info->status, aercc)) {
info->tlp_header_valid = 1;
pcie_read_tlp_log(dev, aer + PCI_ERR_HEADER_LOG,
aer + PCI_ERR_PREFIX_LOG,
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 919a05b97647..7cc8281e7011 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -15,6 +15,7 @@
#include <linux/math.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/of.h>
#include <linux/pci.h>
#include <linux/pci_regs.h>
#include <linux/errno.h>
@@ -235,13 +236,15 @@ struct pcie_link_state {
u32 aspm_support:7; /* Supported ASPM state */
u32 aspm_enabled:7; /* Enabled ASPM state */
u32 aspm_capable:7; /* Capable ASPM state with latency */
- u32 aspm_default:7; /* Default ASPM state by BIOS */
+ u32 aspm_default:7; /* Default ASPM state by BIOS or
+ override */
u32 aspm_disable:7; /* Disabled ASPM state */
/* Clock PM state */
u32 clkpm_capable:1; /* Clock PM capable? */
u32 clkpm_enabled:1; /* Current Clock PM state */
- u32 clkpm_default:1; /* Default Clock PM state by BIOS */
+ u32 clkpm_default:1; /* Default Clock PM state by BIOS or
+ override */
u32 clkpm_disable:1; /* Clock PM disabled */
};
@@ -373,6 +376,18 @@ static void pcie_set_clkpm(struct pcie_link_state *link, int enable)
pcie_set_clkpm_nocheck(link, enable);
}
+static void pcie_clkpm_override_default_link_state(struct pcie_link_state *link,
+ int enabled)
+{
+ struct pci_dev *pdev = link->downstream;
+
+ /* For devicetree platforms, enable ClockPM by default */
+ if (of_have_populated_dt() && !enabled) {
+ link->clkpm_default = 1;
+ pci_info(pdev, "ASPM: DT platform, enabling ClockPM\n");
+ }
+}
+
static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
{
int capable = 1, enabled = 1;
@@ -395,6 +410,7 @@ static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
}
link->clkpm_enabled = enabled;
link->clkpm_default = enabled;
+ pcie_clkpm_override_default_link_state(link, enabled);
link->clkpm_capable = capable;
link->clkpm_disable = blacklist ? 1 : 0;
}
@@ -788,6 +804,29 @@ static void aspm_l1ss_init(struct pcie_link_state *link)
aspm_calc_l12_info(link, parent_l1ss_cap, child_l1ss_cap);
}
+#define FLAG(x, y, d) (((x) & (PCIE_LINK_STATE_##y)) ? d : "")
+
+static void pcie_aspm_override_default_link_state(struct pcie_link_state *link)
+{
+ struct pci_dev *pdev = link->downstream;
+ u32 override;
+
+ /* For devicetree platforms, enable all ASPM states by default */
+ if (of_have_populated_dt()) {
+ link->aspm_default = PCIE_LINK_STATE_ASPM_ALL;
+ override = link->aspm_default & ~link->aspm_enabled;
+ if (override)
+ pci_info(pdev, "ASPM: DT platform, enabling%s%s%s%s%s%s%s\n",
+ FLAG(override, L0S_UP, " L0s-up"),
+ FLAG(override, L0S_DW, " L0s-dw"),
+ FLAG(override, L1, " L1"),
+ FLAG(override, L1_1, " ASPM-L1.1"),
+ FLAG(override, L1_2, " ASPM-L1.2"),
+ FLAG(override, L1_1_PCIPM, " PCI-PM-L1.1"),
+ FLAG(override, L1_2_PCIPM, " PCI-PM-L1.2"));
+ }
+}
+
static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
{
struct pci_dev *child = link->downstream, *parent = link->pdev;
@@ -868,6 +907,8 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
/* Save default state */
link->aspm_default = link->aspm_enabled;
+ pcie_aspm_override_default_link_state(link);
+
/* Setup initial capable state. Will be updated later */
link->aspm_capable = link->aspm_support;
diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c
index de6381c690f5..bebe4bc111d7 100644
--- a/drivers/pci/pcie/err.c
+++ b/drivers/pci/pcie/err.c
@@ -108,6 +108,24 @@ static int report_normal_detected(struct pci_dev *dev, void *data)
return report_error_detected(dev, pci_channel_io_normal, data);
}
+static int report_perm_failure_detected(struct pci_dev *dev, void *data)
+{
+ struct pci_driver *pdrv;
+ const struct pci_error_handlers *err_handler;
+
+ device_lock(&dev->dev);
+ pdrv = dev->driver;
+ if (!pdrv || !pdrv->err_handler || !pdrv->err_handler->error_detected)
+ goto out;
+
+ err_handler = pdrv->err_handler;
+ err_handler->error_detected(dev, pci_channel_io_perm_failure);
+out:
+ pci_uevent_ers(dev, PCI_ERS_RESULT_DISCONNECT);
+ device_unlock(&dev->dev);
+ return 0;
+}
+
static int report_mmio_enabled(struct pci_dev *dev, void *data)
{
struct pci_driver *pdrv;
@@ -135,7 +153,8 @@ static int report_slot_reset(struct pci_dev *dev, void *data)
device_lock(&dev->dev);
pdrv = dev->driver;
- if (!pdrv || !pdrv->err_handler || !pdrv->err_handler->slot_reset)
+ if (!pci_dev_set_io_state(dev, pci_channel_io_normal) ||
+ !pdrv || !pdrv->err_handler || !pdrv->err_handler->slot_reset)
goto out;
err_handler = pdrv->err_handler;
@@ -217,15 +236,10 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev,
pci_walk_bridge(bridge, pci_pm_runtime_get_sync, NULL);
pci_dbg(bridge, "broadcast error_detected message\n");
- if (state == pci_channel_io_frozen) {
+ if (state == pci_channel_io_frozen)
pci_walk_bridge(bridge, report_frozen_detected, &status);
- if (reset_subordinates(bridge) != PCI_ERS_RESULT_RECOVERED) {
- pci_warn(bridge, "subordinate device reset failed\n");
- goto failed;
- }
- } else {
+ else
pci_walk_bridge(bridge, report_normal_detected, &status);
- }
if (status == PCI_ERS_RESULT_CAN_RECOVER) {
status = PCI_ERS_RESULT_RECOVERED;
@@ -233,6 +247,14 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev,
pci_walk_bridge(bridge, report_mmio_enabled, &status);
}
+ if (status == PCI_ERS_RESULT_NEED_RESET ||
+ state == pci_channel_io_frozen) {
+ if (reset_subordinates(bridge) != PCI_ERS_RESULT_RECOVERED) {
+ pci_warn(bridge, "subordinate device reset failed\n");
+ goto failed;
+ }
+ }
+
if (status == PCI_ERS_RESULT_NEED_RESET) {
/*
* TODO: Should call platform-specific
@@ -269,7 +291,7 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev,
failed:
pci_walk_bridge(bridge, pci_pm_runtime_put, NULL);
- pci_uevent_ers(bridge, PCI_ERS_RESULT_DISCONNECT);
+ pci_walk_bridge(bridge, report_perm_failure_detected, NULL);
pci_info(bridge, "device recovery failed\n");
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index f41128f91ca7..c83e75a0ec12 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -3,6 +3,7 @@
* PCI detection and setup code
*/
+#include <linux/array_size.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/init.h>
@@ -419,13 +420,17 @@ static void pci_read_bridge_io(struct pci_dev *dev, struct resource *res,
limit |= ((unsigned long) io_limit_hi << 16);
}
+ res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
+
if (base <= limit) {
- res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
region.start = base;
region.end = limit + io_granularity - 1;
pcibios_bus_to_resource(dev->bus, res, &region);
if (log)
pci_info(dev, " bridge window %pR\n", res);
+ } else {
+ resource_set_range(res, 0, 0);
+ res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
}
}
@@ -440,13 +445,18 @@ static void pci_read_bridge_mmio(struct pci_dev *dev, struct resource *res,
pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo);
base = ((unsigned long) mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
limit = ((unsigned long) mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16;
+
+ res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
+
if (base <= limit) {
- res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
region.start = base;
region.end = limit + 0xfffff;
pcibios_bus_to_resource(dev->bus, res, &region);
if (log)
pci_info(dev, " bridge window %pR\n", res);
+ } else {
+ resource_set_range(res, 0, 0);
+ res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
}
}
@@ -489,16 +499,20 @@ static void pci_read_bridge_mmio_pref(struct pci_dev *dev, struct resource *res,
return;
}
+ res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) | IORESOURCE_MEM |
+ IORESOURCE_PREFETCH;
+ if (res->flags & PCI_PREF_RANGE_TYPE_64)
+ res->flags |= IORESOURCE_MEM_64;
+
if (base <= limit) {
- res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) |
- IORESOURCE_MEM | IORESOURCE_PREFETCH;
- if (res->flags & PCI_PREF_RANGE_TYPE_64)
- res->flags |= IORESOURCE_MEM_64;
region.start = base;
region.end = limit + 0xfffff;
pcibios_bus_to_resource(dev->bus, res, &region);
if (log)
pci_info(dev, " bridge window %pR\n", res);
+ } else {
+ resource_set_range(res, 0, 0);
+ res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
}
}
@@ -524,10 +538,14 @@ static void pci_read_bridge_windows(struct pci_dev *bridge)
}
if (io) {
bridge->io_window = 1;
- pci_read_bridge_io(bridge, &res, true);
+ pci_read_bridge_io(bridge,
+ pci_resource_n(bridge, PCI_BRIDGE_IO_WINDOW),
+ true);
}
- pci_read_bridge_mmio(bridge, &res, true);
+ pci_read_bridge_mmio(bridge,
+ pci_resource_n(bridge, PCI_BRIDGE_MEM_WINDOW),
+ true);
/*
* DECchip 21050 pass 2 errata: the bridge may miss an address
@@ -565,7 +583,10 @@ static void pci_read_bridge_windows(struct pci_dev *bridge)
bridge->pref_64_window = 1;
}
- pci_read_bridge_mmio_pref(bridge, &res, true);
+ pci_read_bridge_mmio_pref(bridge,
+ pci_resource_n(bridge,
+ PCI_BRIDGE_PREF_MEM_WINDOW),
+ true);
}
void pci_read_bridge_bases(struct pci_bus *child)
@@ -585,9 +606,13 @@ void pci_read_bridge_bases(struct pci_bus *child)
for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i];
- pci_read_bridge_io(child->self, child->resource[0], false);
- pci_read_bridge_mmio(child->self, child->resource[1], false);
- pci_read_bridge_mmio_pref(child->self, child->resource[2], false);
+ pci_read_bridge_io(child->self,
+ child->resource[PCI_BUS_BRIDGE_IO_WINDOW], false);
+ pci_read_bridge_mmio(child->self,
+ child->resource[PCI_BUS_BRIDGE_MEM_WINDOW], false);
+ pci_read_bridge_mmio_pref(child->self,
+ child->resource[PCI_BUS_BRIDGE_PREF_MEM_WINDOW],
+ false);
if (!dev->transparent)
return;
@@ -1912,16 +1937,16 @@ static int pci_intx_mask_broken(struct pci_dev *dev)
static void early_dump_pci_device(struct pci_dev *pdev)
{
- u32 value[256 / 4];
+ u32 value[PCI_CFG_SPACE_SIZE / sizeof(u32)];
int i;
pci_info(pdev, "config space:\n");
- for (i = 0; i < 256; i += 4)
- pci_read_config_dword(pdev, i, &value[i / 4]);
+ for (i = 0; i < ARRAY_SIZE(value); i++)
+ pci_read_config_dword(pdev, i * sizeof(u32), &value[i]);
print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1,
- value, 256, false);
+ value, ARRAY_SIZE(value) * sizeof(u32), false);
}
static const char *pci_type_str(struct pci_dev *dev)
@@ -1985,8 +2010,8 @@ int pci_setup_device(struct pci_dev *dev)
dev->sysdata = dev->bus->sysdata;
dev->dev.parent = dev->bus->bridge;
dev->dev.bus = &pci_bus_type;
- dev->hdr_type = hdr_type & 0x7f;
- dev->multifunction = !!(hdr_type & 0x80);
+ dev->hdr_type = FIELD_GET(PCI_HEADER_TYPE_MASK, hdr_type);
+ dev->multifunction = FIELD_GET(PCI_HEADER_TYPE_MFD, hdr_type);
dev->error_state = pci_channel_io_normal;
set_pcie_port_type(dev);
@@ -2516,9 +2541,15 @@ static struct platform_device *pci_pwrctrl_create_device(struct pci_bus *bus, in
struct device_node *np;
np = of_pci_find_child_device(dev_of_node(&bus->dev), devfn);
- if (!np || of_find_device_by_node(np))
+ if (!np)
return NULL;
+ pdev = of_find_device_by_node(np);
+ if (pdev) {
+ put_device(&pdev->dev);
+ goto err_put_of_node;
+ }
+
/*
* First check whether the pwrctrl device really needs to be created or
* not. This is decided based on at least one of the power supplies
@@ -2526,17 +2557,24 @@ static struct platform_device *pci_pwrctrl_create_device(struct pci_bus *bus, in
*/
if (!of_pci_supply_present(np)) {
pr_debug("PCI/pwrctrl: Skipping OF node: %s\n", np->name);
- return NULL;
+ goto err_put_of_node;
}
/* Now create the pwrctrl device */
pdev = of_platform_device_create(np, NULL, &host->dev);
if (!pdev) {
pr_err("PCI/pwrctrl: Failed to create pwrctrl device for node: %s\n", np->name);
- return NULL;
+ goto err_put_of_node;
}
+ of_node_put(np);
+
return pdev;
+
+err_put_of_node:
+ of_node_put(np);
+
+ return NULL;
}
#else
static struct platform_device *pci_pwrctrl_create_device(struct pci_bus *bus, int devfn)
@@ -3045,14 +3083,14 @@ static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
{
unsigned int used_buses, normal_bridges = 0, hotplug_bridges = 0;
unsigned int start = bus->busn_res.start;
- unsigned int devfn, cmax, max = start;
+ unsigned int devnr, cmax, max = start;
struct pci_dev *dev;
dev_dbg(&bus->dev, "scanning bus\n");
/* Go find them, Rover! */
- for (devfn = 0; devfn < 256; devfn += 8)
- pci_scan_slot(bus, devfn);
+ for (devnr = 0; devnr < PCI_MAX_NR_DEVS; devnr++)
+ pci_scan_slot(bus, PCI_DEVFN(devnr, 0));
/* Reserve buses for SR-IOV capability */
used_buses = pci_iov_bus_range(bus);
@@ -3469,7 +3507,7 @@ EXPORT_SYMBOL_GPL(pci_rescan_bus);
* pci_rescan_bus(), pci_rescan_bus_bridge_resize() and PCI device removal
* routines should always be executed under this mutex.
*/
-static DEFINE_MUTEX(pci_rescan_remove_lock);
+DEFINE_MUTEX(pci_rescan_remove_lock);
void pci_lock_rescan_remove(void)
{
diff --git a/drivers/pci/pwrctrl/slot.c b/drivers/pci/pwrctrl/slot.c
index 6e138310b45b..3320494b62d8 100644
--- a/drivers/pci/pwrctrl/slot.c
+++ b/drivers/pci/pwrctrl/slot.c
@@ -49,13 +49,14 @@ static int pci_pwrctrl_slot_probe(struct platform_device *pdev)
ret = regulator_bulk_enable(slot->num_supplies, slot->supplies);
if (ret < 0) {
dev_err_probe(dev, ret, "Failed to enable slot regulators\n");
- goto err_regulator_free;
+ regulator_bulk_free(slot->num_supplies, slot->supplies);
+ return ret;
}
ret = devm_add_action_or_reset(dev, devm_pci_pwrctrl_slot_power_off,
slot);
if (ret)
- goto err_regulator_disable;
+ return ret;
clk = devm_clk_get_optional_enabled(dev, NULL);
if (IS_ERR(clk)) {
@@ -70,13 +71,6 @@ static int pci_pwrctrl_slot_probe(struct platform_device *pdev)
return dev_err_probe(dev, ret, "Failed to register pwrctrl driver\n");
return 0;
-
-err_regulator_disable:
- regulator_bulk_disable(slot->num_supplies, slot->supplies);
-err_regulator_free:
- regulator_bulk_free(slot->num_supplies, slot->supplies);
-
- return ret;
}
static const struct of_device_id pci_pwrctrl_slot_of_match[] = {
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index d97335a40193..214ed060ca1b 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -2717,6 +2717,7 @@ static void quirk_disable_msi(struct pci_dev *dev)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, 0xa238, quirk_disable_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x5a3f, quirk_disable_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_RDC, 0x1031, quirk_disable_msi);
/*
* The APC bridge device in AMD 780 family northbridges has some random
@@ -3829,7 +3830,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MELLANOX, 0xcf80, quirk_no_pm_reset);
*/
static void quirk_thunderbolt_hotplug_msi(struct pci_dev *pdev)
{
- if (pdev->is_hotplug_bridge &&
+ if (pdev->is_pciehp &&
(pdev->device != PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C ||
pdev->revision <= 1))
pdev->no_msi = 1;
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index 445afdfa6498..ce5c25adef55 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -31,6 +31,8 @@ static void pci_pwrctrl_unregister(struct device *dev)
return;
of_device_unregister(pdev);
+ put_device(&pdev->dev);
+
of_node_clear_flag(np, OF_POPULATED);
}
@@ -138,6 +140,7 @@ static void pci_remove_bus_device(struct pci_dev *dev)
*/
void pci_stop_and_remove_bus_device(struct pci_dev *dev)
{
+ lockdep_assert_held(&pci_rescan_remove_lock);
pci_stop_bus_device(dev);
pci_remove_bus_device(dev);
}
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 7853ac6999e2..4a8735b275e4 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -28,6 +28,10 @@
#include <linux/acpi.h>
#include "pci.h"
+#define PCI_RES_TYPE_MASK \
+ (IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH |\
+ IORESOURCE_MEM_64)
+
unsigned int pci_flags;
EXPORT_SYMBOL_GPL(pci_flags);
@@ -136,6 +140,139 @@ static void restore_dev_resource(struct pci_dev_resource *dev_res)
res->flags = dev_res->flags;
}
+/*
+ * Helper function for sizing routines. Assigned resources have non-NULL
+ * parent resource.
+ *
+ * Return first unassigned resource of the correct type. If there is none,
+ * return first assigned resource of the correct type. If none of the
+ * above, return NULL.
+ *
+ * Returning an assigned resource of the correct type allows the caller to
+ * distinguish between already assigned and no resource of the correct type.
+ */
+static struct resource *find_bus_resource_of_type(struct pci_bus *bus,
+ unsigned long type_mask,
+ unsigned long type)
+{
+ struct resource *r, *r_assigned = NULL;
+
+ pci_bus_for_each_resource(bus, r) {
+ if (!r || r == &ioport_resource || r == &iomem_resource)
+ continue;
+
+ if ((r->flags & type_mask) != type)
+ continue;
+
+ if (!r->parent)
+ return r;
+ if (!r_assigned)
+ r_assigned = r;
+ }
+ return r_assigned;
+}
+
+/**
+ * pbus_select_window_for_type - Select bridge window for a resource type
+ * @bus: PCI bus
+ * @type: Resource type (resource flags can be passed as is)
+ *
+ * Select the bridge window based on a resource @type.
+ *
+ * For memory resources, the selection is done as follows:
+ *
+ * Any non-prefetchable resource is put into the non-prefetchable window.
+ *
+ * If there is no prefetchable MMIO window, put all memory resources into the
+ * non-prefetchable window.
+ *
+ * If there's a 64-bit prefetchable MMIO window, put all 64-bit prefetchable
+ * resources into it and place 32-bit prefetchable memory into the
+ * non-prefetchable window.
+ *
+ * Otherwise, put all prefetchable resources into the prefetchable window.
+ *
+ * Return: the bridge window resource or NULL if no bridge window is found.
+ */
+static struct resource *pbus_select_window_for_type(struct pci_bus *bus,
+ unsigned long type)
+{
+ int iores_type = type & IORESOURCE_TYPE_BITS; /* w/o 64bit & pref */
+ struct resource *mmio, *mmio_pref, *win;
+
+ type &= PCI_RES_TYPE_MASK; /* with 64bit & pref */
+
+ if ((iores_type != IORESOURCE_IO) && (iores_type != IORESOURCE_MEM))
+ return NULL;
+
+ if (pci_is_root_bus(bus)) {
+ win = find_bus_resource_of_type(bus, type, type);
+ if (win)
+ return win;
+
+ type &= ~IORESOURCE_MEM_64;
+ win = find_bus_resource_of_type(bus, type, type);
+ if (win)
+ return win;
+
+ type &= ~IORESOURCE_PREFETCH;
+ return find_bus_resource_of_type(bus, type, type);
+ }
+
+ switch (iores_type) {
+ case IORESOURCE_IO:
+ return pci_bus_resource_n(bus, PCI_BUS_BRIDGE_IO_WINDOW);
+
+ case IORESOURCE_MEM:
+ mmio = pci_bus_resource_n(bus, PCI_BUS_BRIDGE_MEM_WINDOW);
+ mmio_pref = pci_bus_resource_n(bus, PCI_BUS_BRIDGE_PREF_MEM_WINDOW);
+
+ if (!(type & IORESOURCE_PREFETCH) ||
+ !(mmio_pref->flags & IORESOURCE_MEM))
+ return mmio;
+
+ if ((type & IORESOURCE_MEM_64) ||
+ !(mmio_pref->flags & IORESOURCE_MEM_64))
+ return mmio_pref;
+
+ return mmio;
+ default:
+ return NULL;
+ }
+}
+
+/**
+ * pbus_select_window - Select bridge window for a resource
+ * @bus: PCI bus
+ * @res: Resource
+ *
+ * Select the bridge window for @res. If the resource is already assigned,
+ * return the current bridge window.
+ *
+ * For memory resources, the selection is done as follows:
+ *
+ * Any non-prefetchable resource is put into the non-prefetchable window.
+ *
+ * If there is no prefetchable MMIO window, put all memory resources into the
+ * non-prefetchable window.
+ *
+ * If there's a 64-bit prefetchable MMIO window, put all 64-bit prefetchable
+ * resources into it and place 32-bit prefetchable memory into the
+ * non-prefetchable window.
+ *
+ * Otherwise, put all prefetchable resources into the prefetchable window.
+ *
+ * Return: the bridge window resource or NULL if no bridge window is found.
+ */
+struct resource *pbus_select_window(struct pci_bus *bus,
+ const struct resource *res)
+{
+ if (res->parent)
+ return res->parent;
+
+ return pbus_select_window_for_type(bus, res->flags);
+}
+
static bool pdev_resources_assignable(struct pci_dev *dev)
{
u16 class = dev->class >> 8, command;
@@ -154,6 +291,31 @@ static bool pdev_resources_assignable(struct pci_dev *dev)
return true;
}
+static bool pdev_resource_assignable(struct pci_dev *dev, struct resource *res)
+{
+ int idx = pci_resource_num(dev, res);
+
+ if (!res->flags)
+ return false;
+
+ if (idx >= PCI_BRIDGE_RESOURCES && idx <= PCI_BRIDGE_RESOURCE_END &&
+ res->flags & IORESOURCE_DISABLED)
+ return false;
+
+ return true;
+}
+
+static bool pdev_resource_should_fit(struct pci_dev *dev, struct resource *res)
+{
+ if (res->parent)
+ return false;
+
+ if (res->flags & IORESOURCE_PCI_FIXED)
+ return false;
+
+ return pdev_resource_assignable(dev, res);
+}
+
/* Sort resources by alignment */
static void pdev_sort_resources(struct pci_dev *dev, struct list_head *head)
{
@@ -169,10 +331,7 @@ static void pdev_sort_resources(struct pci_dev *dev, struct list_head *head)
resource_size_t r_align;
struct list_head *n;
- if (r->flags & IORESOURCE_PCI_FIXED)
- continue;
-
- if (!(r->flags) || r->parent)
+ if (!pdev_resource_should_fit(dev, r))
continue;
r_align = pci_resource_alignment(dev, r);
@@ -221,8 +380,15 @@ bool pci_resource_is_optional(const struct pci_dev *dev, int resno)
return false;
}
-static inline void reset_resource(struct resource *res)
+static inline void reset_resource(struct pci_dev *dev, struct resource *res)
{
+ int idx = pci_resource_num(dev, res);
+
+ if (idx >= PCI_BRIDGE_RESOURCES && idx <= PCI_BRIDGE_RESOURCE_END) {
+ res->flags |= IORESOURCE_UNSET;
+ return;
+ }
+
res->start = 0;
res->end = 0;
res->flags = 0;
@@ -384,13 +550,19 @@ static bool pci_need_to_release(unsigned long mask, struct resource *res)
}
/* Return: @true if assignment of a required resource failed. */
-static bool pci_required_resource_failed(struct list_head *fail_head)
+static bool pci_required_resource_failed(struct list_head *fail_head,
+ unsigned long type)
{
struct pci_dev_resource *fail_res;
+ type &= PCI_RES_TYPE_MASK;
+
list_for_each_entry(fail_res, fail_head, list) {
int idx = pci_resource_num(fail_res->dev, fail_res->res);
+ if (type && (fail_res->flags & PCI_RES_TYPE_MASK) != type)
+ continue;
+
if (!pci_resource_is_optional(fail_res->dev, idx))
return true;
}
@@ -431,8 +603,6 @@ static void __assign_resources_sorted(struct list_head *head,
struct pci_dev_resource *dev_res, *tmp_res, *dev_res2;
struct resource *res;
struct pci_dev *dev;
- const char *res_name;
- int idx;
unsigned long fail_type;
resource_size_t add_align, align;
@@ -504,7 +674,7 @@ assign:
}
/* Without realloc_head and only optional fails, nothing more to do. */
- if (!pci_required_resource_failed(&local_fail_head) &&
+ if (!pci_required_resource_failed(&local_fail_head, 0) &&
list_empty(realloc_head)) {
list_for_each_entry(save_res, &save_head, list) {
struct resource *res = save_res->res;
@@ -540,14 +710,7 @@ assign:
res = dev_res->res;
dev = dev_res->dev;
- if (!res->parent)
- continue;
-
- idx = pci_resource_num(dev, res);
- res_name = pci_resource_name(dev, idx);
- pci_dbg(dev, "%s %pR: releasing\n", res_name, res);
-
- release_resource(res);
+ pci_release_resource(dev, pci_resource_num(dev, res));
restore_dev_resource(dev_res);
}
/* Restore start/end/flags from saved list */
@@ -577,7 +740,7 @@ out:
0 /* don't care */);
}
- reset_resource(res);
+ reset_resource(dev, res);
}
free_list(head);
@@ -618,7 +781,7 @@ void pci_setup_cardbus(struct pci_bus *bus)
res = bus->resource[0];
pcibios_resource_to_bus(bridge->bus, &region, res);
- if (res->flags & IORESOURCE_IO) {
+ if (res->parent && res->flags & IORESOURCE_IO) {
/*
* The IO resource is allocated a range twice as large as it
* would normally need. This allows us to set both IO regs.
@@ -632,7 +795,7 @@ void pci_setup_cardbus(struct pci_bus *bus)
res = bus->resource[1];
pcibios_resource_to_bus(bridge->bus, &region, res);
- if (res->flags & IORESOURCE_IO) {
+ if (res->parent && res->flags & IORESOURCE_IO) {
pci_info(bridge, " bridge window %pR\n", res);
pci_write_config_dword(bridge, PCI_CB_IO_BASE_1,
region.start);
@@ -642,7 +805,7 @@ void pci_setup_cardbus(struct pci_bus *bus)
res = bus->resource[2];
pcibios_resource_to_bus(bridge->bus, &region, res);
- if (res->flags & IORESOURCE_MEM) {
+ if (res->parent && res->flags & IORESOURCE_MEM) {
pci_info(bridge, " bridge window %pR\n", res);
pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_0,
region.start);
@@ -652,7 +815,7 @@ void pci_setup_cardbus(struct pci_bus *bus)
res = bus->resource[3];
pcibios_resource_to_bus(bridge->bus, &region, res);
- if (res->flags & IORESOURCE_MEM) {
+ if (res->parent && res->flags & IORESOURCE_MEM) {
pci_info(bridge, " bridge window %pR\n", res);
pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_1,
region.start);
@@ -693,7 +856,7 @@ static void pci_setup_bridge_io(struct pci_dev *bridge)
res = &bridge->resource[PCI_BRIDGE_IO_WINDOW];
res_name = pci_resource_name(bridge, PCI_BRIDGE_IO_WINDOW);
pcibios_resource_to_bus(bridge->bus, &region, res);
- if (res->flags & IORESOURCE_IO) {
+ if (res->parent && res->flags & IORESOURCE_IO) {
pci_read_config_word(bridge, PCI_IO_BASE, &l);
io_base_lo = (region.start >> 8) & io_mask;
io_limit_lo = (region.end >> 8) & io_mask;
@@ -725,7 +888,7 @@ static void pci_setup_bridge_mmio(struct pci_dev *bridge)
res = &bridge->resource[PCI_BRIDGE_MEM_WINDOW];
res_name = pci_resource_name(bridge, PCI_BRIDGE_MEM_WINDOW);
pcibios_resource_to_bus(bridge->bus, &region, res);
- if (res->flags & IORESOURCE_MEM) {
+ if (res->parent && res->flags & IORESOURCE_MEM) {
l = (region.start >> 16) & 0xfff0;
l |= region.end & 0xfff00000;
pci_info(bridge, " %s %pR\n", res_name, res);
@@ -754,7 +917,7 @@ static void pci_setup_bridge_mmio_pref(struct pci_dev *bridge)
res = &bridge->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
res_name = pci_resource_name(bridge, PCI_BRIDGE_PREF_MEM_WINDOW);
pcibios_resource_to_bus(bridge->bus, &region, res);
- if (res->flags & IORESOURCE_PREFETCH) {
+ if (res->parent && res->flags & IORESOURCE_PREFETCH) {
l = (region.start >> 16) & 0xfff0;
l |= region.end & 0xfff00000;
if (res->flags & IORESOURCE_MEM_64) {
@@ -790,6 +953,23 @@ static void __pci_setup_bridge(struct pci_bus *bus, unsigned long type)
pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl);
}
+static void pci_setup_one_bridge_window(struct pci_dev *bridge, int resno)
+{
+ switch (resno) {
+ case PCI_BRIDGE_IO_WINDOW:
+ pci_setup_bridge_io(bridge);
+ break;
+ case PCI_BRIDGE_MEM_WINDOW:
+ pci_setup_bridge_mmio(bridge);
+ break;
+ case PCI_BRIDGE_PREF_MEM_WINDOW:
+ pci_setup_bridge_mmio_pref(bridge);
+ break;
+ default:
+ return;
+ }
+}
+
void __weak pcibios_setup_bridge(struct pci_bus *bus, unsigned long type)
{
}
@@ -806,6 +986,8 @@ static void pci_setup_bridge(struct pci_bus *bus)
int pci_claim_bridge_resource(struct pci_dev *bridge, int i)
{
+ int ret = -EINVAL;
+
if (i < PCI_BRIDGE_RESOURCES || i > PCI_BRIDGE_RESOURCE_END)
return 0;
@@ -815,27 +997,16 @@ int pci_claim_bridge_resource(struct pci_dev *bridge, int i)
if ((bridge->class >> 8) != PCI_CLASS_BRIDGE_PCI)
return 0;
- if (!pci_bus_clip_resource(bridge, i))
- return -EINVAL; /* Clipping didn't change anything */
-
- switch (i) {
- case PCI_BRIDGE_IO_WINDOW:
- pci_setup_bridge_io(bridge);
- break;
- case PCI_BRIDGE_MEM_WINDOW:
- pci_setup_bridge_mmio(bridge);
- break;
- case PCI_BRIDGE_PREF_MEM_WINDOW:
- pci_setup_bridge_mmio_pref(bridge);
- break;
- default:
+ if (i > PCI_BRIDGE_PREF_MEM_WINDOW)
return -EINVAL;
- }
- if (pci_claim_resource(bridge, i) == 0)
- return 0; /* Claimed a smaller window */
+ /* Try to clip the resource and claim the smaller window */
+ if (pci_bus_clip_resource(bridge, i))
+ ret = pci_claim_resource(bridge, i);
+
+ pci_setup_one_bridge_window(bridge, i);
- return -EINVAL;
+ return ret;
}
/*
@@ -866,34 +1037,6 @@ static void pci_bridge_check_ranges(struct pci_bus *bus)
}
}
-/*
- * Helper function for sizing routines. Assigned resources have non-NULL
- * parent resource.
- *
- * Return first unassigned resource of the correct type. If there is none,
- * return first assigned resource of the correct type. If none of the
- * above, return NULL.
- *
- * Returning an assigned resource of the correct type allows the caller to
- * distinguish between already assigned and no resource of the correct type.
- */
-static struct resource *find_bus_resource_of_type(struct pci_bus *bus,
- unsigned long type_mask,
- unsigned long type)
-{
- struct resource *r, *r_assigned = NULL;
-
- pci_bus_for_each_resource(bus, r) {
- if (r == &ioport_resource || r == &iomem_resource)
- continue;
- if (r && (r->flags & type_mask) == type && !r->parent)
- return r;
- if (r && (r->flags & type_mask) == type && !r_assigned)
- r_assigned = r;
- }
- return r_assigned;
-}
-
static resource_size_t calculate_iosize(resource_size_t size,
resource_size_t min_size,
resource_size_t size1,
@@ -984,8 +1127,7 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
struct list_head *realloc_head)
{
struct pci_dev *dev;
- struct resource *b_res = find_bus_resource_of_type(bus, IORESOURCE_IO,
- IORESOURCE_IO);
+ struct resource *b_res = pbus_select_window_for_type(bus, IORESOURCE_IO);
resource_size_t size = 0, size0 = 0, size1 = 0;
resource_size_t children_add_size = 0;
resource_size_t min_align, align;
@@ -1006,8 +1148,11 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
if (r->parent || !(r->flags & IORESOURCE_IO))
continue;
- r_size = resource_size(r);
+ if (!pdev_resource_assignable(dev, r))
+ continue;
+
+ r_size = resource_size(r);
if (r_size < SZ_1K)
/* Might be re-aligned for ISA */
size += r_size;
@@ -1026,6 +1171,9 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
size0 = calculate_iosize(size, min_size, size1, 0, 0,
resource_size(b_res), min_align);
+ if (size0)
+ b_res->flags &= ~IORESOURCE_DISABLED;
+
size1 = size0;
if (realloc_head && (add_size > 0 || children_add_size > 0)) {
size1 = calculate_iosize(size, min_size, size1, add_size,
@@ -1037,13 +1185,14 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
if (bus->self && (b_res->start || b_res->end))
pci_info(bus->self, "disabling bridge window %pR to %pR (unused)\n",
b_res, &bus->busn_res);
- b_res->flags = 0;
+ b_res->flags |= IORESOURCE_DISABLED;
return;
}
resource_set_range(b_res, min_align, size0);
b_res->flags |= IORESOURCE_STARTALIGN;
if (bus->self && size1 > size0 && realloc_head) {
+ b_res->flags &= ~IORESOURCE_DISABLED;
add_to_list(realloc_head, bus->self, b_res, size1-size0,
min_align);
pci_info(bus->self, "bridge window %pR to %pR add_size %llx\n",
@@ -1077,19 +1226,20 @@ static inline resource_size_t calculate_mem_align(resource_size_t *aligns,
/**
* pbus_upstream_space_available - Check no upstream resource limits allocation
* @bus: The bus
- * @mask: Mask the resource flag, then compare it with type
- * @type: The type of resource from bridge
+ * @res: The resource to help select the correct bridge window
* @size: The size required from the bridge window
* @align: Required alignment for the resource
*
- * Checks that @size can fit inside the upstream bridge resources that are
- * already assigned.
+ * Check that @size can fit inside the upstream bridge resources that are
+ * already assigned. Select the upstream bridge window based on the type of
+ * @res.
*
* Return: %true if enough space is available on all assigned upstream
* resources.
*/
-static bool pbus_upstream_space_available(struct pci_bus *bus, unsigned long mask,
- unsigned long type, resource_size_t size,
+static bool pbus_upstream_space_available(struct pci_bus *bus,
+ struct resource *res,
+ resource_size_t size,
resource_size_t align)
{
struct resource_constraint constraint = {
@@ -1097,39 +1247,39 @@ static bool pbus_upstream_space_available(struct pci_bus *bus, unsigned long mas
.align = align,
};
struct pci_bus *downstream = bus;
- struct resource *r;
while ((bus = bus->parent)) {
if (pci_is_root_bus(bus))
break;
- pci_bus_for_each_resource(bus, r) {
- if (!r || !r->parent || (r->flags & mask) != type)
- continue;
-
- if (resource_size(r) >= size) {
- struct resource gap = {};
+ res = pbus_select_window(bus, res);
+ if (!res)
+ return false;
+ if (!res->parent)
+ continue;
- if (find_resource_space(r, &gap, size, &constraint) == 0) {
- gap.flags = type;
- pci_dbg(bus->self,
- "Assigned bridge window %pR to %pR free space at %pR\n",
- r, &bus->busn_res, &gap);
- return true;
- }
- }
+ if (resource_size(res) >= size) {
+ struct resource gap = {};
- if (bus->self) {
- pci_info(bus->self,
- "Assigned bridge window %pR to %pR cannot fit 0x%llx required for %s bridging to %pR\n",
- r, &bus->busn_res,
- (unsigned long long)size,
- pci_name(downstream->self),
- &downstream->busn_res);
+ if (find_resource_space(res, &gap, size, &constraint) == 0) {
+ gap.flags = res->flags;
+ pci_dbg(bus->self,
+ "Assigned bridge window %pR to %pR free space at %pR\n",
+ res, &bus->busn_res, &gap);
+ return true;
}
+ }
- return false;
+ if (bus->self) {
+ pci_info(bus->self,
+ "Assigned bridge window %pR to %pR cannot fit 0x%llx required for %s bridging to %pR\n",
+ res, &bus->busn_res,
+ (unsigned long long)size,
+ pci_name(downstream->self),
+ &downstream->busn_res);
}
+
+ return false;
}
return true;
@@ -1139,24 +1289,22 @@ static bool pbus_upstream_space_available(struct pci_bus *bus, unsigned long mas
* pbus_size_mem() - Size the memory window of a given bus
*
* @bus: The bus
- * @mask: Mask the resource flag, then compare it with type
- * @type: The type of free resource from bridge
- * @type2: Second match type
- * @type3: Third match type
+ * @type: The type of bridge resource
* @min_size: The minimum memory window that must be allocated
* @add_size: Additional optional memory window
* @realloc_head: Track the additional memory window on this list
*
- * Calculate the size of the bus and minimal alignment which guarantees
- * that all child resources fit in this size.
+ * Calculate the size of the bus resource for @type and minimal alignment
+ * which guarantees that all child resources fit in this size.
+ *
+ * Set the bus resource start/end to indicate the required size if there an
+ * available unassigned bus resource of the desired @type.
*
- * Return -ENOSPC if there's no available bus resource of the desired
- * type. Otherwise, set the bus resource start/end to indicate the
- * required size, add things to realloc_head (if supplied), and return 0.
+ * Add optional resource requests to the @realloc_head list if it is
+ * supplied.
*/
-static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
- unsigned long type, unsigned long type2,
- unsigned long type3, resource_size_t min_size,
+static void pbus_size_mem(struct pci_bus *bus, unsigned long type,
+ resource_size_t min_size,
resource_size_t add_size,
struct list_head *realloc_head)
{
@@ -1164,18 +1312,19 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
resource_size_t min_align, win_align, align, size, size0, size1 = 0;
resource_size_t aligns[28]; /* Alignments from 1MB to 128TB */
int order, max_order;
- struct resource *b_res = find_bus_resource_of_type(bus,
- mask | IORESOURCE_PREFETCH, type);
+ struct resource *b_res = pbus_select_window_for_type(bus, type);
resource_size_t children_add_size = 0;
resource_size_t children_add_align = 0;
resource_size_t add_align = 0;
+ resource_size_t relaxed_align;
+ resource_size_t old_size;
if (!b_res)
- return -ENOSPC;
+ return;
/* If resource is already assigned, nothing more to do */
if (b_res->parent)
- return 0;
+ return;
memset(aligns, 0, sizeof(aligns));
max_order = 0;
@@ -1189,11 +1338,12 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
const char *r_name = pci_resource_name(dev, i);
resource_size_t r_size;
- if (r->parent || (r->flags & IORESOURCE_PCI_FIXED) ||
- ((r->flags & mask) != type &&
- (r->flags & mask) != type2 &&
- (r->flags & mask) != type3))
+ if (!pdev_resources_assignable(dev) ||
+ !pdev_resource_should_fit(dev, r))
continue;
+ if (b_res != pbus_select_window(bus, r))
+ continue;
+
r_size = resource_size(r);
/* Put SRIOV requested res to the optional list */
@@ -1238,17 +1388,24 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
}
}
+ old_size = resource_size(b_res);
win_align = window_alignment(bus, b_res->flags);
min_align = calculate_mem_align(aligns, max_order);
min_align = max(min_align, win_align);
- size0 = calculate_memsize(size, min_size, 0, 0, resource_size(b_res), min_align);
+ size0 = calculate_memsize(size, min_size, 0, 0, old_size, min_align);
+
+ if (size0) {
+ resource_set_range(b_res, min_align, size0);
+ b_res->flags &= ~IORESOURCE_DISABLED;
+ }
if (bus->self && size0 &&
- !pbus_upstream_space_available(bus, mask | IORESOURCE_PREFETCH, type,
- size0, min_align)) {
- min_align = 1ULL << (max_order + __ffs(SZ_1M));
- min_align = max(min_align, win_align);
- size0 = calculate_memsize(size, min_size, 0, 0, resource_size(b_res), win_align);
+ !pbus_upstream_space_available(bus, b_res, size0, min_align)) {
+ relaxed_align = 1ULL << (max_order + __ffs(SZ_1M));
+ relaxed_align = max(relaxed_align, win_align);
+ min_align = min(min_align, relaxed_align);
+ size0 = calculate_memsize(size, min_size, 0, 0, old_size, win_align);
+ resource_set_range(b_res, min_align, size0);
pci_info(bus->self, "bridge window %pR to %pR requires relaxed alignment rules\n",
b_res, &bus->busn_res);
}
@@ -1256,15 +1413,15 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
if (realloc_head && (add_size > 0 || children_add_size > 0)) {
add_align = max(min_align, add_align);
size1 = calculate_memsize(size, min_size, add_size, children_add_size,
- resource_size(b_res), add_align);
+ old_size, add_align);
if (bus->self && size1 &&
- !pbus_upstream_space_available(bus, mask | IORESOURCE_PREFETCH, type,
- size1, add_align)) {
- min_align = 1ULL << (max_order + __ffs(SZ_1M));
- min_align = max(min_align, win_align);
+ !pbus_upstream_space_available(bus, b_res, size1, add_align)) {
+ relaxed_align = 1ULL << (max_order + __ffs(SZ_1M));
+ relaxed_align = max(relaxed_align, win_align);
+ min_align = min(min_align, relaxed_align);
size1 = calculate_memsize(size, min_size, add_size, children_add_size,
- resource_size(b_res), win_align);
+ old_size, win_align);
pci_info(bus->self,
"bridge window %pR to %pR requires relaxed alignment rules\n",
b_res, &bus->busn_res);
@@ -1275,20 +1432,20 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
if (bus->self && (b_res->start || b_res->end))
pci_info(bus->self, "disabling bridge window %pR to %pR (unused)\n",
b_res, &bus->busn_res);
- b_res->flags = 0;
- return 0;
+ b_res->flags |= IORESOURCE_DISABLED;
+ return;
}
resource_set_range(b_res, min_align, size0);
b_res->flags |= IORESOURCE_STARTALIGN;
if (bus->self && size1 > size0 && realloc_head) {
+ b_res->flags &= ~IORESOURCE_DISABLED;
add_to_list(realloc_head, bus->self, b_res, size1-size0, add_align);
pci_info(bus->self, "bridge window %pR to %pR add_size %llx add_align %llx\n",
b_res, &bus->busn_res,
(unsigned long long) (size1 - size0),
(unsigned long long) add_align);
}
- return 0;
}
unsigned long pci_cardbus_resource_alignment(struct resource *res)
@@ -1393,12 +1550,11 @@ handle_done:
void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head)
{
struct pci_dev *dev;
- unsigned long mask, prefmask, type2 = 0, type3 = 0;
resource_size_t additional_io_size = 0, additional_mmio_size = 0,
additional_mmio_pref_size = 0;
struct resource *pref;
struct pci_host_bridge *host;
- int hdr_type, ret;
+ int hdr_type;
list_for_each_entry(dev, &bus->devices, bus_list) {
struct pci_bus *b = dev->subordinate;
@@ -1448,71 +1604,15 @@ void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head)
pbus_size_io(bus, realloc_head ? 0 : additional_io_size,
additional_io_size, realloc_head);
- /*
- * If there's a 64-bit prefetchable MMIO window, compute
- * the size required to put all 64-bit prefetchable
- * resources in it.
- */
- mask = IORESOURCE_MEM;
- prefmask = IORESOURCE_MEM | IORESOURCE_PREFETCH;
- if (pref && (pref->flags & IORESOURCE_MEM_64)) {
- prefmask |= IORESOURCE_MEM_64;
- ret = pbus_size_mem(bus, prefmask, prefmask,
- prefmask, prefmask,
- realloc_head ? 0 : additional_mmio_pref_size,
- additional_mmio_pref_size, realloc_head);
-
- /*
- * If successful, all non-prefetchable resources
- * and any 32-bit prefetchable resources will go in
- * the non-prefetchable window.
- */
- if (ret == 0) {
- mask = prefmask;
- type2 = prefmask & ~IORESOURCE_MEM_64;
- type3 = prefmask & ~IORESOURCE_PREFETCH;
- }
- }
-
- /*
- * If there is no 64-bit prefetchable window, compute the
- * size required to put all prefetchable resources in the
- * 32-bit prefetchable window (if there is one).
- */
- if (!type2) {
- prefmask &= ~IORESOURCE_MEM_64;
- ret = pbus_size_mem(bus, prefmask, prefmask,
- prefmask, prefmask,
- realloc_head ? 0 : additional_mmio_pref_size,
- additional_mmio_pref_size, realloc_head);
-
- /*
- * If successful, only non-prefetchable resources
- * will go in the non-prefetchable window.
- */
- if (ret == 0)
- mask = prefmask;
- else
- additional_mmio_size += additional_mmio_pref_size;
-
- type2 = type3 = IORESOURCE_MEM;
+ if (pref) {
+ pbus_size_mem(bus,
+ IORESOURCE_MEM | IORESOURCE_PREFETCH |
+ (pref->flags & IORESOURCE_MEM_64),
+ realloc_head ? 0 : additional_mmio_pref_size,
+ additional_mmio_pref_size, realloc_head);
}
- /*
- * Compute the size required to put everything else in the
- * non-prefetchable window. This includes:
- *
- * - all non-prefetchable resources
- * - 32-bit prefetchable resources if there's a 64-bit
- * prefetchable window or no prefetchable window at all
- * - 64-bit prefetchable resources if there's no prefetchable
- * window at all
- *
- * Note that the strategy in __pci_assign_resource() must match
- * that used here. Specifically, we cannot put a 32-bit
- * prefetchable resource in a 64-bit prefetchable window.
- */
- pbus_size_mem(bus, mask, IORESOURCE_MEM, type2, type3,
+ pbus_size_mem(bus, IORESOURCE_MEM,
realloc_head ? 0 : additional_mmio_size,
additional_mmio_size, realloc_head);
break;
@@ -1704,66 +1804,25 @@ static void __pci_bridge_assign_resources(const struct pci_dev *bridge,
}
}
-#define PCI_RES_TYPE_MASK \
- (IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH |\
- IORESOURCE_MEM_64)
-
static void pci_bridge_release_resources(struct pci_bus *bus,
- unsigned long type)
+ struct resource *b_win)
{
struct pci_dev *dev = bus->self;
- struct resource *r;
- unsigned int old_flags;
- struct resource *b_res;
- int idx = 1;
+ int idx, ret;
- b_res = &dev->resource[PCI_BRIDGE_RESOURCES];
-
- /*
- * 1. If IO port assignment fails, release bridge IO port.
- * 2. If non pref MMIO assignment fails, release bridge nonpref MMIO.
- * 3. If 64bit pref MMIO assignment fails, and bridge pref is 64bit,
- * release bridge pref MMIO.
- * 4. If pref MMIO assignment fails, and bridge pref is 32bit,
- * release bridge pref MMIO.
- * 5. If pref MMIO assignment fails, and bridge pref is not
- * assigned, release bridge nonpref MMIO.
- */
- if (type & IORESOURCE_IO)
- idx = 0;
- else if (!(type & IORESOURCE_PREFETCH))
- idx = 1;
- else if ((type & IORESOURCE_MEM_64) &&
- (b_res[2].flags & IORESOURCE_MEM_64))
- idx = 2;
- else if (!(b_res[2].flags & IORESOURCE_MEM_64) &&
- (b_res[2].flags & IORESOURCE_PREFETCH))
- idx = 2;
- else
- idx = 1;
-
- r = &b_res[idx];
-
- if (!r->parent)
+ if (!b_win->parent)
return;
+ idx = pci_resource_num(dev, b_win);
+
/* If there are children, release them all */
- release_child_resources(r);
- if (!release_resource(r)) {
- type = old_flags = r->flags & PCI_RES_TYPE_MASK;
- pci_info(dev, "resource %d %pR released\n",
- PCI_BRIDGE_RESOURCES + idx, r);
- /* Keep the old size */
- resource_set_range(r, 0, resource_size(r));
- r->flags = 0;
+ release_child_resources(b_win);
- /* Avoiding touch the one without PREF */
- if (type & IORESOURCE_PREFETCH)
- type = IORESOURCE_PREFETCH;
- __pci_setup_bridge(bus, type);
- /* For next child res under same bridge */
- r->flags = old_flags;
- }
+ ret = pci_release_resource(dev, idx);
+ if (ret)
+ return;
+
+ pci_setup_one_bridge_window(dev, idx);
}
enum release_type {
@@ -1776,7 +1835,7 @@ enum release_type {
* a larger window later.
*/
static void pci_bus_release_bridge_resources(struct pci_bus *bus,
- unsigned long type,
+ struct resource *b_win,
enum release_type rel_type)
{
struct pci_dev *dev;
@@ -1784,6 +1843,8 @@ static void pci_bus_release_bridge_resources(struct pci_bus *bus,
list_for_each_entry(dev, &bus->devices, bus_list) {
struct pci_bus *b = dev->subordinate;
+ struct resource *res;
+
if (!b)
continue;
@@ -1792,9 +1853,15 @@ static void pci_bus_release_bridge_resources(struct pci_bus *bus,
if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI)
continue;
- if (rel_type == whole_subtree)
- pci_bus_release_bridge_resources(b, type,
- whole_subtree);
+ if (rel_type != whole_subtree)
+ continue;
+
+ pci_bus_for_each_resource(b, res) {
+ if (res->parent != b_win)
+ continue;
+
+ pci_bus_release_bridge_resources(b, res, rel_type);
+ }
}
if (pci_is_root_bus(bus))
@@ -1804,7 +1871,7 @@ static void pci_bus_release_bridge_resources(struct pci_bus *bus,
return;
if ((rel_type == whole_subtree) || is_leaf_bridge)
- pci_bridge_release_resources(bus, type);
+ pci_bridge_release_resources(bus, b_win);
}
static void pci_bus_dump_res(struct pci_bus *bus)
@@ -1979,33 +2046,21 @@ static void remove_dev_resource(struct resource *avail, struct pci_dev *dev,
avail->start = min(avail->start + tmp, avail->end + 1);
}
-static void remove_dev_resources(struct pci_dev *dev, struct resource *io,
- struct resource *mmio,
- struct resource *mmio_pref)
+static void remove_dev_resources(struct pci_dev *dev,
+ struct resource available[PCI_P2P_BRIDGE_RESOURCE_NUM])
{
- struct resource *res;
+ struct resource *res, *b_win;
+ int idx;
pci_dev_for_each_resource(dev, res) {
- if (resource_type(res) == IORESOURCE_IO) {
- remove_dev_resource(io, dev, res);
- } else if (resource_type(res) == IORESOURCE_MEM) {
+ b_win = pbus_select_window(dev->bus, res);
+ if (!b_win)
+ continue;
- /*
- * Make sure prefetchable memory is reduced from
- * the correct resource. Specifically we put 32-bit
- * prefetchable memory in non-prefetchable window
- * if there is a 64-bit prefetchable window.
- *
- * See comments in __pci_bus_size_bridges() for
- * more information.
- */
- if ((res->flags & IORESOURCE_PREFETCH) &&
- ((res->flags & IORESOURCE_MEM_64) ==
- (mmio_pref->flags & IORESOURCE_MEM_64)))
- remove_dev_resource(mmio_pref, dev, res);
- else
- remove_dev_resource(mmio, dev, res);
- }
+ idx = pci_resource_num(dev->bus->self, b_win);
+ idx -= PCI_BRIDGE_RESOURCES;
+
+ remove_dev_resource(&available[idx], dev, res);
}
}
@@ -2019,45 +2074,40 @@ static void remove_dev_resources(struct pci_dev *dev, struct resource *io,
* shared with the bridges.
*/
static void pci_bus_distribute_available_resources(struct pci_bus *bus,
- struct list_head *add_list,
- struct resource io,
- struct resource mmio,
- struct resource mmio_pref)
+ struct list_head *add_list,
+ struct resource available_in[PCI_P2P_BRIDGE_RESOURCE_NUM])
{
+ struct resource available[PCI_P2P_BRIDGE_RESOURCE_NUM];
unsigned int normal_bridges = 0, hotplug_bridges = 0;
- struct resource *io_res, *mmio_res, *mmio_pref_res;
struct pci_dev *dev, *bridge = bus->self;
- resource_size_t io_per_b, mmio_per_b, mmio_pref_per_b, align;
-
- io_res = &bridge->resource[PCI_BRIDGE_IO_WINDOW];
- mmio_res = &bridge->resource[PCI_BRIDGE_MEM_WINDOW];
- mmio_pref_res = &bridge->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
+ resource_size_t per_bridge[PCI_P2P_BRIDGE_RESOURCE_NUM];
+ resource_size_t align;
+ int i;
- /*
- * The alignment of this bridge is yet to be considered, hence it must
- * be done now before extending its bridge window.
- */
- align = pci_resource_alignment(bridge, io_res);
- if (!io_res->parent && align)
- io.start = min(ALIGN(io.start, align), io.end + 1);
+ for (i = 0; i < PCI_P2P_BRIDGE_RESOURCE_NUM; i++) {
+ struct resource *res =
+ pci_resource_n(bridge, PCI_BRIDGE_RESOURCES + i);
- align = pci_resource_alignment(bridge, mmio_res);
- if (!mmio_res->parent && align)
- mmio.start = min(ALIGN(mmio.start, align), mmio.end + 1);
+ available[i] = available_in[i];
- align = pci_resource_alignment(bridge, mmio_pref_res);
- if (!mmio_pref_res->parent && align)
- mmio_pref.start = min(ALIGN(mmio_pref.start, align),
- mmio_pref.end + 1);
+ /*
+ * The alignment of this bridge is yet to be considered,
+ * hence it must be done now before extending its bridge
+ * window.
+ */
+ align = pci_resource_alignment(bridge, res);
+ if (!res->parent && align)
+ available[i].start = min(ALIGN(available[i].start, align),
+ available[i].end + 1);
- /*
- * Now that we have adjusted for alignment, update the bridge window
- * resources to fill as much remaining resource space as possible.
- */
- adjust_bridge_window(bridge, io_res, add_list, resource_size(&io));
- adjust_bridge_window(bridge, mmio_res, add_list, resource_size(&mmio));
- adjust_bridge_window(bridge, mmio_pref_res, add_list,
- resource_size(&mmio_pref));
+ /*
+ * Now that we have adjusted for alignment, update the
+ * bridge window resources to fill as much remaining
+ * resource space as possible.
+ */
+ adjust_bridge_window(bridge, res, add_list,
+ resource_size(&available[i]));
+ }
/*
* Calculate how many hotplug bridges and normal bridges there
@@ -2081,7 +2131,7 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus,
*/
list_for_each_entry(dev, &bus->devices, bus_list) {
if (!dev->is_virtfn)
- remove_dev_resources(dev, &io, &mmio, &mmio_pref);
+ remove_dev_resources(dev, available);
}
/*
@@ -2093,16 +2143,9 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus,
* split between non-hotplug bridges. This is to allow possible
* hotplug bridges below them to get the extra space as well.
*/
- if (hotplug_bridges) {
- io_per_b = div64_ul(resource_size(&io), hotplug_bridges);
- mmio_per_b = div64_ul(resource_size(&mmio), hotplug_bridges);
- mmio_pref_per_b = div64_ul(resource_size(&mmio_pref),
- hotplug_bridges);
- } else {
- io_per_b = div64_ul(resource_size(&io), normal_bridges);
- mmio_per_b = div64_ul(resource_size(&mmio), normal_bridges);
- mmio_pref_per_b = div64_ul(resource_size(&mmio_pref),
- normal_bridges);
+ for (i = 0; i < PCI_P2P_BRIDGE_RESOURCE_NUM; i++) {
+ per_bridge[i] = div64_ul(resource_size(&available[i]),
+ hotplug_bridges ?: normal_bridges);
}
for_each_pci_bridge(dev, bus) {
@@ -2115,49 +2158,41 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus,
if (hotplug_bridges && !dev->is_hotplug_bridge)
continue;
- res = &dev->resource[PCI_BRIDGE_IO_WINDOW];
+ for (i = 0; i < PCI_P2P_BRIDGE_RESOURCE_NUM; i++) {
+ res = pci_resource_n(dev, PCI_BRIDGE_RESOURCES + i);
- /*
- * Make sure the split resource space is properly aligned
- * for bridge windows (align it down to avoid going above
- * what is available).
- */
- align = pci_resource_alignment(dev, res);
- resource_set_size(&io, ALIGN_DOWN_IF_NONZERO(io_per_b, align));
-
- /*
- * The x_per_b holds the extra resource space that can be
- * added for each bridge but there is the minimal already
- * reserved as well so adjust x.start down accordingly to
- * cover the whole space.
- */
- io.start -= resource_size(res);
-
- res = &dev->resource[PCI_BRIDGE_MEM_WINDOW];
- align = pci_resource_alignment(dev, res);
- resource_set_size(&mmio,
- ALIGN_DOWN_IF_NONZERO(mmio_per_b,align));
- mmio.start -= resource_size(res);
+ /*
+ * Make sure the split resource space is properly
+ * aligned for bridge windows (align it down to
+ * avoid going above what is available).
+ */
+ align = pci_resource_alignment(dev, res);
+ resource_set_size(&available[i],
+ ALIGN_DOWN_IF_NONZERO(per_bridge[i],
+ align));
- res = &dev->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
- align = pci_resource_alignment(dev, res);
- resource_set_size(&mmio_pref,
- ALIGN_DOWN_IF_NONZERO(mmio_pref_per_b, align));
- mmio_pref.start -= resource_size(res);
+ /*
+ * The per_bridge holds the extra resource space
+ * that can be added for each bridge but there is
+ * the minimal already reserved as well so adjust
+ * x.start down accordingly to cover the whole
+ * space.
+ */
+ available[i].start -= resource_size(res);
+ }
- pci_bus_distribute_available_resources(b, add_list, io, mmio,
- mmio_pref);
+ pci_bus_distribute_available_resources(b, add_list, available);
- io.start += io.end + 1;
- mmio.start += mmio.end + 1;
- mmio_pref.start += mmio_pref.end + 1;
+ for (i = 0; i < PCI_P2P_BRIDGE_RESOURCE_NUM; i++)
+ available[i].start += available[i].end + 1;
}
}
static void pci_bridge_distribute_available_resources(struct pci_dev *bridge,
struct list_head *add_list)
{
- struct resource available_io, available_mmio, available_mmio_pref;
+ struct resource *res, available[PCI_P2P_BRIDGE_RESOURCE_NUM];
+ unsigned int i;
if (!bridge->is_hotplug_bridge)
return;
@@ -2165,14 +2200,13 @@ static void pci_bridge_distribute_available_resources(struct pci_dev *bridge,
pci_dbg(bridge, "distributing available resources\n");
/* Take the initial extra resources from the hotplug port */
- available_io = bridge->resource[PCI_BRIDGE_IO_WINDOW];
- available_mmio = bridge->resource[PCI_BRIDGE_MEM_WINDOW];
- available_mmio_pref = bridge->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
+ for (i = 0; i < PCI_P2P_BRIDGE_RESOURCE_NUM; i++) {
+ res = pci_resource_n(bridge, PCI_BRIDGE_RESOURCES + i);
+ available[i] = *res;
+ }
pci_bus_distribute_available_resources(bridge->subordinate,
- add_list, available_io,
- available_mmio,
- available_mmio_pref);
+ add_list, available);
}
static bool pci_bridge_resources_not_assigned(struct pci_dev *dev)
@@ -2235,27 +2269,19 @@ static void pci_prepare_next_assign_round(struct list_head *fail_head,
* enough to contain child device resources.
*/
list_for_each_entry(fail_res, fail_head, list) {
- pci_bus_release_bridge_resources(fail_res->dev->bus,
- fail_res->flags & PCI_RES_TYPE_MASK,
- rel_type);
+ struct pci_bus *bus = fail_res->dev->bus;
+ struct resource *b_win;
+
+ b_win = pbus_select_window_for_type(bus, fail_res->flags);
+ if (!b_win)
+ continue;
+ pci_bus_release_bridge_resources(bus, b_win, rel_type);
}
/* Restore size and flags */
- list_for_each_entry(fail_res, fail_head, list) {
- struct resource *res = fail_res->res;
- struct pci_dev *dev = fail_res->dev;
- int idx = pci_resource_num(dev, res);
-
+ list_for_each_entry(fail_res, fail_head, list)
restore_dev_resource(fail_res);
- if (!pci_is_bridge(dev))
- continue;
-
- if (idx >= PCI_BRIDGE_RESOURCES &&
- idx <= PCI_BRIDGE_RESOURCE_END)
- res->flags = 0;
- }
-
free_list(fail_head);
}
@@ -2389,10 +2415,16 @@ void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge)
}
EXPORT_SYMBOL_GPL(pci_assign_unassigned_bridge_resources);
-int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type)
+/*
+ * Walk to the root bus, find the bridge window relevant for @res and
+ * release it when possible. If the bridge window contains assigned
+ * resources, it cannot be released.
+ */
+int pbus_reassign_bridge_resources(struct pci_bus *bus, struct resource *res)
{
+ unsigned long type = res->flags;
struct pci_dev_resource *dev_res;
- struct pci_dev *next;
+ struct pci_dev *bridge;
LIST_HEAD(saved);
LIST_HEAD(added);
LIST_HEAD(failed);
@@ -2401,39 +2433,31 @@ int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type)
down_read(&pci_bus_sem);
- /* Walk to the root hub, releasing bridge BARs when possible */
- next = bridge;
- do {
- bridge = next;
- for (i = PCI_BRIDGE_RESOURCES; i < PCI_BRIDGE_RESOURCE_END;
- i++) {
- struct resource *res = &bridge->resource[i];
- const char *res_name = pci_resource_name(bridge, i);
-
- if ((res->flags ^ type) & PCI_RES_TYPE_MASK)
- continue;
+ while (!pci_is_root_bus(bus)) {
+ bridge = bus->self;
+ res = pbus_select_window(bus, res);
+ if (!res)
+ break;
- /* Ignore BARs which are still in use */
- if (res->child)
- continue;
+ i = pci_resource_num(bridge, res);
+ /* Ignore BARs which are still in use */
+ if (!res->child) {
ret = add_to_list(&saved, bridge, res, 0, 0);
if (ret)
goto cleanup;
- pci_info(bridge, "%s %pR: releasing\n", res_name, res);
+ pci_release_resource(bridge, i);
+ } else {
+ const char *res_name = pci_resource_name(bridge, i);
- if (res->parent)
- release_resource(res);
- res->start = 0;
- res->end = 0;
- break;
+ pci_warn(bridge,
+ "%s %pR: was not released (still contains assigned resources)\n",
+ res_name, res);
}
- if (i == PCI_BRIDGE_RESOURCE_END)
- break;
- next = bridge->bus ? bridge->bus->self : NULL;
- } while (next);
+ bus = bus->parent;
+ }
if (list_empty(&saved)) {
up_read(&pci_bus_sem);
@@ -2446,8 +2470,12 @@ int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type)
free_list(&added);
if (!list_empty(&failed)) {
- ret = -ENOSPC;
- goto cleanup;
+ if (pci_required_resource_failed(&failed, type)) {
+ ret = -ENOSPC;
+ goto cleanup;
+ }
+ /* Only resources with unrelated types failed (again) */
+ free_list(&failed);
}
list_for_each_entry(dev_res, &saved, list) {
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index d2b3ed51e880..c3ba4ccecd43 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -359,6 +359,9 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
res->flags &= ~IORESOURCE_UNSET;
res->flags &= ~IORESOURCE_STARTALIGN;
+ if (resno >= PCI_BRIDGE_RESOURCES && resno <= PCI_BRIDGE_RESOURCE_END)
+ res->flags &= ~IORESOURCE_DISABLED;
+
pci_info(dev, "%s %pR: assigned\n", res_name, res);
if (resno < PCI_BRIDGE_RESOURCES)
pci_update_resource(dev, resno);
@@ -406,20 +409,25 @@ int pci_reassign_resource(struct pci_dev *dev, int resno,
return 0;
}
-void pci_release_resource(struct pci_dev *dev, int resno)
+int pci_release_resource(struct pci_dev *dev, int resno)
{
struct resource *res = pci_resource_n(dev, resno);
const char *res_name = pci_resource_name(dev, resno);
+ int ret;
if (!res->parent)
- return;
+ return 0;
pci_info(dev, "%s %pR: releasing\n", res_name, res);
- release_resource(res);
+ ret = release_resource(res);
+ if (ret)
+ return ret;
res->end = resource_size(res) - 1;
res->start = 0;
res->flags |= IORESOURCE_UNSET;
+
+ return 0;
}
EXPORT_SYMBOL(pci_release_resource);
@@ -488,7 +496,7 @@ int pci_resize_resource(struct pci_dev *dev, int resno, int size)
/* Check if the new config works by trying to assign everything. */
if (dev->bus->self) {
- ret = pci_reassign_bridge_resources(dev->bus->self, res->flags);
+ ret = pbus_reassign_bridge_resources(dev->bus, res);
if (ret)
goto error_resize;
}
@@ -522,22 +530,26 @@ int pci_enable_resources(struct pci_dev *dev, int mask)
if (pci_resource_is_optional(dev, i))
continue;
- if (r->flags & IORESOURCE_UNSET) {
- pci_err(dev, "%s %pR: not assigned; can't enable device\n",
- r_name, r);
- return -EINVAL;
+ if (i < PCI_BRIDGE_RESOURCES) {
+ if (r->flags & IORESOURCE_UNSET) {
+ pci_err(dev, "%s %pR: not assigned; can't enable device\n",
+ r_name, r);
+ return -EINVAL;
+ }
+
+ if (!r->parent) {
+ pci_err(dev, "%s %pR: not claimed; can't enable device\n",
+ r_name, r);
+ return -EINVAL;
+ }
}
- if (!r->parent) {
- pci_err(dev, "%s %pR: not claimed; can't enable device\n",
- r_name, r);
- return -EINVAL;
+ if (r->parent) {
+ if (r->flags & IORESOURCE_IO)
+ cmd |= PCI_COMMAND_IO;
+ if (r->flags & IORESOURCE_MEM)
+ cmd |= PCI_COMMAND_MEMORY;
}
-
- if (r->flags & IORESOURCE_IO)
- cmd |= PCI_COMMAND_IO;
- if (r->flags & IORESOURCE_MEM)
- cmd |= PCI_COMMAND_MEMORY;
}
if (cmd != old_cmd) {
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index b14dfab04d84..5ff84fb8fb0f 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -269,10 +269,9 @@ static void mrpc_event_work(struct work_struct *work)
dev_dbg(&stdev->dev, "%s\n", __func__);
- mutex_lock(&stdev->mrpc_mutex);
+ guard(mutex)(&stdev->mrpc_mutex);
cancel_delayed_work(&stdev->mrpc_timeout);
mrpc_complete_cmd(stdev);
- mutex_unlock(&stdev->mrpc_mutex);
}
static void mrpc_error_complete_cmd(struct switchtec_dev *stdev)
@@ -1322,18 +1321,18 @@ static void stdev_kill(struct switchtec_dev *stdev)
cancel_delayed_work_sync(&stdev->mrpc_timeout);
/* Mark the hardware as unavailable and complete all completions */
- mutex_lock(&stdev->mrpc_mutex);
- stdev->alive = false;
-
- /* Wake up and kill any users waiting on an MRPC request */
- list_for_each_entry_safe(stuser, tmpuser, &stdev->mrpc_queue, list) {
- stuser->cmd_done = true;
- wake_up_interruptible(&stuser->cmd_comp);
- list_del_init(&stuser->list);
- stuser_put(stuser);
- }
+ scoped_guard (mutex, &stdev->mrpc_mutex) {
+ stdev->alive = false;
+
+ /* Wake up and kill any users waiting on an MRPC request */
+ list_for_each_entry_safe(stuser, tmpuser, &stdev->mrpc_queue, list) {
+ stuser->cmd_done = true;
+ wake_up_interruptible(&stuser->cmd_comp);
+ list_del_init(&stuser->list);
+ stuser_put(stuser);
+ }
- mutex_unlock(&stdev->mrpc_mutex);
+ }
/* Wake up any users waiting on event_wq */
wake_up_interruptible(&stdev->event_wq);
diff --git a/drivers/pci/vgaarb.c b/drivers/pci/vgaarb.c
index 78748e8d2dba..b58f94ee4891 100644
--- a/drivers/pci/vgaarb.c
+++ b/drivers/pci/vgaarb.c
@@ -556,34 +556,13 @@ EXPORT_SYMBOL(vga_put);
static bool vga_is_firmware_default(struct pci_dev *pdev)
{
-#if defined(CONFIG_X86)
- u64 base = screen_info.lfb_base;
- u64 size = screen_info.lfb_size;
- struct resource *r;
- u64 limit;
+#ifdef CONFIG_SCREEN_INFO
+ struct screen_info *si = &screen_info;
- /* Select the device owning the boot framebuffer if there is one */
-
- if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE)
- base |= (u64)screen_info.ext_lfb_base << 32;
-
- limit = base + size;
-
- /* Does firmware framebuffer belong to us? */
- pci_dev_for_each_resource(pdev, r) {
- if (resource_type(r) != IORESOURCE_MEM)
- continue;
-
- if (!r->start || !r->end)
- continue;
-
- if (base < r->start || limit >= r->end)
- continue;
-
- return true;
- }
-#endif
+ return pdev == screen_info_pci_dev(si);
+#else
return false;
+#endif
}
static bool vga_arb_integrated_gpu(struct device *dev)
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index dddb235dd020..660a95805524 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -250,7 +250,4 @@ config ELECTRA_CF
config PCCARD_NONSTATIC
bool
-config PCCARD_IODYN
- bool
-
endif # PCCARD
diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile
index c9d51b150682..d16a0317ce43 100644
--- a/drivers/pcmcia/Makefile
+++ b/drivers/pcmcia/Makefile
@@ -12,7 +12,6 @@ obj-$(CONFIG_PCMCIA) += pcmcia.o
pcmcia_rsrc-y += rsrc_mgr.o
pcmcia_rsrc-$(CONFIG_PCCARD_NONSTATIC) += rsrc_nonstatic.o
-pcmcia_rsrc-$(CONFIG_PCCARD_IODYN) += rsrc_iodyn.o
obj-$(CONFIG_PCCARD) += pcmcia_rsrc.o
diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c
index c75f55e1250a..adbc486af2ea 100644
--- a/drivers/pcmcia/cs.c
+++ b/drivers/pcmcia/cs.c
@@ -229,23 +229,6 @@ void pcmcia_unregister_socket(struct pcmcia_socket *socket)
EXPORT_SYMBOL(pcmcia_unregister_socket);
-struct pcmcia_socket *pcmcia_get_socket_by_nr(unsigned int nr)
-{
- struct pcmcia_socket *s;
-
- down_read(&pcmcia_socket_list_rwsem);
- list_for_each_entry(s, &pcmcia_socket_list, socket_list)
- if (s->sock == nr) {
- up_read(&pcmcia_socket_list_rwsem);
- return s;
- }
- up_read(&pcmcia_socket_list_rwsem);
-
- return NULL;
-
-}
-EXPORT_SYMBOL(pcmcia_get_socket_by_nr);
-
static int socket_reset(struct pcmcia_socket *skt)
{
int status, i;
diff --git a/drivers/pcmcia/cs_internal.h b/drivers/pcmcia/cs_internal.h
index 02a83ca44e77..5ac810ffda31 100644
--- a/drivers/pcmcia/cs_internal.h
+++ b/drivers/pcmcia/cs_internal.h
@@ -116,7 +116,6 @@ extern struct list_head pcmcia_socket_list;
extern const struct class pcmcia_socket_class;
int pccard_register_pcmcia(struct pcmcia_socket *s, struct pcmcia_callback *c);
-struct pcmcia_socket *pcmcia_get_socket_by_nr(unsigned int nr);
void pcmcia_parse_uevents(struct pcmcia_socket *socket, unsigned int events);
#define PCMCIA_UEVENT_EJECT 0x0001
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index da6f66f357cc..18f4eef28dbc 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -1308,7 +1308,7 @@ static int pcmcia_bus_early_resume(struct pcmcia_socket *skt)
* physically present, even if the call to this function returns
* non-NULL. Furthermore, the device driver most likely is unbound
* almost immediately, so the timeframe where pcmcia_dev_present
- * returns NULL is probably really really small.
+ * returns NULL is probably really, really small.
*/
struct pcmcia_device *pcmcia_dev_present(struct pcmcia_device *_p_dev)
{
diff --git a/drivers/pcmcia/omap_cf.c b/drivers/pcmcia/omap_cf.c
index 1b1dff56ec7b..d6f24c7d1562 100644
--- a/drivers/pcmcia/omap_cf.c
+++ b/drivers/pcmcia/omap_cf.c
@@ -215,6 +215,8 @@ static int __init omap_cf_probe(struct platform_device *pdev)
return -EINVAL;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
cf = kzalloc(sizeof *cf, GFP_KERNEL);
if (!cf)
@@ -302,7 +304,13 @@ static void __exit omap_cf_remove(struct platform_device *pdev)
kfree(cf);
}
-static struct platform_driver omap_cf_driver = {
+/*
+ * omap_cf_remove() lives in .exit.text. For drivers registered via
+ * platform_driver_probe() this is ok because they cannot get unbound at
+ * runtime. So mark the driver struct with __refdata to prevent modpost
+ * triggering a section mismatch warning.
+ */
+static struct platform_driver omap_cf_driver __refdata = {
.driver = {
.name = driver_name,
},
diff --git a/drivers/pcmcia/rsrc_iodyn.c b/drivers/pcmcia/rsrc_iodyn.c
deleted file mode 100644
index b04b16496b0c..000000000000
--- a/drivers/pcmcia/rsrc_iodyn.c
+++ /dev/null
@@ -1,168 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * rsrc_iodyn.c -- Resource management routines for MEM-static sockets.
- *
- * The initial developer of the original code is David A. Hinds
- * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
- * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
- *
- * (C) 1999 David A. Hinds
- */
-
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-
-#include <pcmcia/ss.h>
-#include <pcmcia/cistpl.h>
-#include "cs_internal.h"
-
-
-struct pcmcia_align_data {
- unsigned long mask;
- unsigned long offset;
-};
-
-static resource_size_t pcmcia_align(void *align_data,
- const struct resource *res,
- resource_size_t size, resource_size_t align)
-{
- struct pcmcia_align_data *data = align_data;
- resource_size_t start;
-
- start = (res->start & ~data->mask) + data->offset;
- if (start < res->start)
- start += data->mask + 1;
-
-#ifdef CONFIG_X86
- if (res->flags & IORESOURCE_IO) {
- if (start & 0x300)
- start = (start + 0x3ff) & ~0x3ff;
- }
-#endif
-
-#ifdef CONFIG_M68K
- if (res->flags & IORESOURCE_IO) {
- if ((res->start + size - 1) >= 1024)
- start = res->end;
- }
-#endif
-
- return start;
-}
-
-
-static struct resource *__iodyn_find_io_region(struct pcmcia_socket *s,
- unsigned long base, int num,
- unsigned long align)
-{
- struct resource *res = pcmcia_make_resource(0, num, IORESOURCE_IO,
- dev_name(&s->dev));
- struct pcmcia_align_data data;
- unsigned long min = base;
- int ret;
-
- data.mask = align - 1;
- data.offset = base & data.mask;
-
-#ifdef CONFIG_PCI
- if (s->cb_dev) {
- ret = pci_bus_alloc_resource(s->cb_dev->bus, res, num, 1,
- min, 0, pcmcia_align, &data);
- } else
-#endif
- ret = allocate_resource(&ioport_resource, res, num, min, ~0UL,
- 1, pcmcia_align, &data);
-
- if (ret != 0) {
- kfree(res);
- res = NULL;
- }
- return res;
-}
-
-static int iodyn_find_io(struct pcmcia_socket *s, unsigned int attr,
- unsigned int *base, unsigned int num,
- unsigned int align, struct resource **parent)
-{
- int i, ret = 0;
-
- /* Check for an already-allocated window that must conflict with
- * what was asked for. It is a hack because it does not catch all
- * potential conflicts, just the most obvious ones.
- */
- for (i = 0; i < MAX_IO_WIN; i++) {
- if (!s->io[i].res)
- continue;
-
- if (!*base)
- continue;
-
- if ((s->io[i].res->start & (align-1)) == *base)
- return -EBUSY;
- }
-
- for (i = 0; i < MAX_IO_WIN; i++) {
- struct resource *res = s->io[i].res;
- unsigned int try;
-
- if (res && (res->flags & IORESOURCE_BITS) !=
- (attr & IORESOURCE_BITS))
- continue;
-
- if (!res) {
- if (align == 0)
- align = 0x10000;
-
- res = s->io[i].res = __iodyn_find_io_region(s, *base,
- num, align);
- if (!res)
- return -EINVAL;
-
- *base = res->start;
- s->io[i].res->flags =
- ((res->flags & ~IORESOURCE_BITS) |
- (attr & IORESOURCE_BITS));
- s->io[i].InUse = num;
- *parent = res;
- return 0;
- }
-
- /* Try to extend top of window */
- try = res->end + 1;
- if ((*base == 0) || (*base == try)) {
- if (adjust_resource(s->io[i].res, res->start,
- resource_size(res) + num))
- continue;
- *base = try;
- s->io[i].InUse += num;
- *parent = res;
- return 0;
- }
-
- /* Try to extend bottom of window */
- try = res->start - num;
- if ((*base == 0) || (*base == try)) {
- if (adjust_resource(s->io[i].res,
- res->start - num,
- resource_size(res) + num))
- continue;
- *base = try;
- s->io[i].InUse += num;
- *parent = res;
- return 0;
- }
- }
-
- return -EINVAL;
-}
-
-
-struct pccard_resource_ops pccard_iodyn_ops = {
- .validate_mem = NULL,
- .find_io = iodyn_find_io,
- .find_mem = NULL,
- .init = static_init,
- .exit = NULL,
-};
-EXPORT_SYMBOL(pccard_iodyn_ops);
diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c
index bf9d070a4496..da494fe451ba 100644
--- a/drivers/pcmcia/rsrc_nonstatic.c
+++ b/drivers/pcmcia/rsrc_nonstatic.c
@@ -375,7 +375,9 @@ static int do_validate_mem(struct pcmcia_socket *s,
if (validate && !s->fake_cis) {
/* move it to the validated data set */
- add_interval(&s_data->mem_db_valid, base, size);
+ ret = add_interval(&s_data->mem_db_valid, base, size);
+ if (ret)
+ return ret;
sub_interval(&s_data->mem_db, base, size);
}
diff --git a/drivers/pcmcia/socket_sysfs.c b/drivers/pcmcia/socket_sysfs.c
index c7a906664c36..4eadd0485066 100644
--- a/drivers/pcmcia/socket_sysfs.c
+++ b/drivers/pcmcia/socket_sysfs.c
@@ -10,6 +10,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/string.h>
+#include <linux/string_choices.h>
#include <linux/major.h>
#include <linux/errno.h>
#include <linux/mm.h>
@@ -98,7 +99,7 @@ static ssize_t pccard_show_card_pm_state(struct device *dev,
char *buf)
{
struct pcmcia_socket *s = to_socket(dev);
- return sysfs_emit(buf, "%s\n", s->state & SOCKET_SUSPEND ? "off" : "on");
+ return sysfs_emit(buf, "%s\n", str_off_on(s->state & SOCKET_SUSPEND));
}
static ssize_t pccard_store_card_pm_state(struct device *dev,
@@ -177,7 +178,7 @@ static ssize_t pccard_show_resource(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct pcmcia_socket *s = to_socket(dev);
- return sysfs_emit(buf, "%s\n", s->resource_setup_done ? "yes" : "no");
+ return sysfs_emit(buf, "%s\n", str_yes_no(s->resource_setup_done));
}
static ssize_t pccard_store_resource(struct device *dev,
diff --git a/drivers/peci/controller/peci-npcm.c b/drivers/peci/controller/peci-npcm.c
index c77591ca583d..931868991241 100644
--- a/drivers/peci/controller/peci-npcm.c
+++ b/drivers/peci/controller/peci-npcm.c
@@ -221,7 +221,6 @@ static const struct regmap_config npcm_peci_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = NPCM_PECI_MAX_REG,
- .fast_io = true,
};
static const struct peci_controller_ops npcm_ops = {
diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
index a9188dec36fe..638321fc9800 100644
--- a/drivers/perf/Kconfig
+++ b/drivers/perf/Kconfig
@@ -178,6 +178,15 @@ config FSL_IMX9_DDR_PMU
can give information about memory throughput and other related
events.
+config FUJITSU_UNCORE_PMU
+ tristate "Fujitsu Uncore PMU"
+ depends on (ARM64 && ACPI) || (COMPILE_TEST && 64BIT)
+ help
+ Provides support for the Uncore performance monitor unit (PMU)
+ in Fujitsu processors.
+ Adds the Uncore PMU into the perf events subsystem for
+ monitoring Uncore events.
+
config QCOM_L2_PMU
bool "Qualcomm Technologies L2-cache PMU"
depends on ARCH_QCOM && ARM64 && ACPI
diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile
index 192fc8b16204..ea52711a87e3 100644
--- a/drivers/perf/Makefile
+++ b/drivers/perf/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_ARM_XSCALE_PMU) += arm_xscale_pmu.o
obj-$(CONFIG_ARM_SMMU_V3_PMU) += arm_smmuv3_pmu.o
obj-$(CONFIG_FSL_IMX8_DDR_PMU) += fsl_imx8_ddr_perf.o
obj-$(CONFIG_FSL_IMX9_DDR_PMU) += fsl_imx9_ddr_perf.o
+obj-$(CONFIG_FUJITSU_UNCORE_PMU) += fujitsu_uncore_pmu.o
obj-$(CONFIG_HISI_PMU) += hisilicon/
obj-$(CONFIG_QCOM_L2_PMU) += qcom_l2_pmu.o
obj-$(CONFIG_QCOM_L3_PMU) += qcom_l3_pmu.o
diff --git a/drivers/perf/arm-ccn.c b/drivers/perf/arm-ccn.c
index 1a0d0e1a2263..8af3563fdf60 100644
--- a/drivers/perf/arm-ccn.c
+++ b/drivers/perf/arm-ccn.c
@@ -565,7 +565,7 @@ module_param_named(pmu_poll_period_us, arm_ccn_pmu_poll_period_us, uint,
static ktime_t arm_ccn_pmu_timer_period(void)
{
- return ns_to_ktime((u64)arm_ccn_pmu_poll_period_us * 1000);
+ return us_to_ktime((u64)arm_ccn_pmu_poll_period_us);
}
diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
index 11fb2234b10f..23245352a3fc 100644
--- a/drivers/perf/arm-cmn.c
+++ b/drivers/perf/arm-cmn.c
@@ -65,7 +65,7 @@
/* PMU registers occupy the 3rd 4KB page of each node's region */
#define CMN_PMU_OFFSET 0x2000
/* ...except when they don't :( */
-#define CMN_S3_DTM_OFFSET 0xa000
+#define CMN_S3_R1_DTM_OFFSET 0xa000
#define CMN_S3_PMU_OFFSET 0xd900
/* For most nodes, this is all there is */
@@ -233,6 +233,9 @@ enum cmn_revision {
REV_CMN700_R1P0,
REV_CMN700_R2P0,
REV_CMN700_R3P0,
+ REV_CMNS3_R0P0 = 0,
+ REV_CMNS3_R0P1,
+ REV_CMNS3_R1P0,
REV_CI700_R0P0 = 0,
REV_CI700_R1P0,
REV_CI700_R2P0,
@@ -425,8 +428,8 @@ static enum cmn_model arm_cmn_model(const struct arm_cmn *cmn)
static int arm_cmn_pmu_offset(const struct arm_cmn *cmn, const struct arm_cmn_node *dn)
{
if (cmn->part == PART_CMN_S3) {
- if (dn->type == CMN_TYPE_XP)
- return CMN_S3_DTM_OFFSET;
+ if (cmn->rev >= REV_CMNS3_R1P0 && dn->type == CMN_TYPE_XP)
+ return CMN_S3_R1_DTM_OFFSET;
return CMN_S3_PMU_OFFSET;
}
return CMN_PMU_OFFSET;
diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c
index f6d7bab5d555..69c5cc8f5606 100644
--- a/drivers/perf/arm_pmuv3.c
+++ b/drivers/perf/arm_pmuv3.c
@@ -978,6 +978,32 @@ static int armv8pmu_get_chain_idx(struct pmu_hw_events *cpuc,
return -EAGAIN;
}
+static bool armv8pmu_can_use_pmccntr(struct pmu_hw_events *cpuc,
+ struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ unsigned long evtype = hwc->config_base & ARMV8_PMU_EVTYPE_EVENT;
+
+ if (evtype != ARMV8_PMUV3_PERFCTR_CPU_CYCLES)
+ return false;
+
+ /*
+ * A CPU_CYCLES event with threshold counting cannot use PMCCNTR_EL0
+ * since it lacks threshold support.
+ */
+ if (armv8pmu_event_get_threshold(&event->attr))
+ return false;
+
+ /*
+ * PMCCNTR_EL0 is not affected by BRBE controls like BRBCR_ELx.FZP.
+ * So don't use it for branch events.
+ */
+ if (has_branch_stack(event))
+ return false;
+
+ return true;
+}
+
static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
struct perf_event *event)
{
@@ -986,8 +1012,7 @@ static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
unsigned long evtype = hwc->config_base & ARMV8_PMU_EVTYPE_EVENT;
/* Always prefer to place a cycle counter into the cycle counter. */
- if ((evtype == ARMV8_PMUV3_PERFCTR_CPU_CYCLES) &&
- !armv8pmu_event_get_threshold(&event->attr) && !has_branch_stack(event)) {
+ if (armv8pmu_can_use_pmccntr(cpuc, event)) {
if (!test_and_set_bit(ARMV8_PMU_CYCLE_IDX, cpuc->used_mask))
return ARMV8_PMU_CYCLE_IDX;
else if (armv8pmu_event_is_64bit(event) &&
diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
index 369e77ad5f13..fa50645fedda 100644
--- a/drivers/perf/arm_spe_pmu.c
+++ b/drivers/perf/arm_spe_pmu.c
@@ -86,9 +86,11 @@ struct arm_spe_pmu {
#define SPE_PMU_FEAT_ERND (1UL << 5)
#define SPE_PMU_FEAT_INV_FILT_EVT (1UL << 6)
#define SPE_PMU_FEAT_DISCARD (1UL << 7)
+#define SPE_PMU_FEAT_EFT (1UL << 8)
#define SPE_PMU_FEAT_DEV_PROBED (1UL << 63)
u64 features;
+ u64 pmsevfr_res0;
u16 max_record_sz;
u16 align;
struct perf_output_handle __percpu *handle;
@@ -97,7 +99,8 @@ struct arm_spe_pmu {
#define to_spe_pmu(p) (container_of(p, struct arm_spe_pmu, pmu))
/* Convert a free-running index from perf into an SPE buffer offset */
-#define PERF_IDX2OFF(idx, buf) ((idx) % ((buf)->nr_pages << PAGE_SHIFT))
+#define PERF_IDX2OFF(idx, buf) \
+ ((idx) % ((unsigned long)(buf)->nr_pages << PAGE_SHIFT))
/* Keep track of our dynamic hotplug state */
static enum cpuhp_state arm_spe_pmu_online;
@@ -115,6 +118,7 @@ enum arm_spe_pmu_capabilities {
SPE_PMU_CAP_FEAT_MAX,
SPE_PMU_CAP_CNT_SZ = SPE_PMU_CAP_FEAT_MAX,
SPE_PMU_CAP_MIN_IVAL,
+ SPE_PMU_CAP_EVENT_FILTER,
};
static int arm_spe_pmu_feat_caps[SPE_PMU_CAP_FEAT_MAX] = {
@@ -122,7 +126,7 @@ static int arm_spe_pmu_feat_caps[SPE_PMU_CAP_FEAT_MAX] = {
[SPE_PMU_CAP_ERND] = SPE_PMU_FEAT_ERND,
};
-static u32 arm_spe_pmu_cap_get(struct arm_spe_pmu *spe_pmu, int cap)
+static u64 arm_spe_pmu_cap_get(struct arm_spe_pmu *spe_pmu, int cap)
{
if (cap < SPE_PMU_CAP_FEAT_MAX)
return !!(spe_pmu->features & arm_spe_pmu_feat_caps[cap]);
@@ -132,6 +136,8 @@ static u32 arm_spe_pmu_cap_get(struct arm_spe_pmu *spe_pmu, int cap)
return spe_pmu->counter_sz;
case SPE_PMU_CAP_MIN_IVAL:
return spe_pmu->min_period;
+ case SPE_PMU_CAP_EVENT_FILTER:
+ return ~spe_pmu->pmsevfr_res0;
default:
WARN(1, "unknown cap %d\n", cap);
}
@@ -148,7 +154,19 @@ static ssize_t arm_spe_pmu_cap_show(struct device *dev,
container_of(attr, struct dev_ext_attribute, attr);
int cap = (long)ea->var;
- return sysfs_emit(buf, "%u\n", arm_spe_pmu_cap_get(spe_pmu, cap));
+ return sysfs_emit(buf, "%llu\n", arm_spe_pmu_cap_get(spe_pmu, cap));
+}
+
+static ssize_t arm_spe_pmu_cap_show_hex(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct arm_spe_pmu *spe_pmu = dev_get_drvdata(dev);
+ struct dev_ext_attribute *ea =
+ container_of(attr, struct dev_ext_attribute, attr);
+ int cap = (long)ea->var;
+
+ return sysfs_emit(buf, "0x%llx\n", arm_spe_pmu_cap_get(spe_pmu, cap));
}
#define SPE_EXT_ATTR_ENTRY(_name, _func, _var) \
@@ -158,12 +176,15 @@ static ssize_t arm_spe_pmu_cap_show(struct device *dev,
#define SPE_CAP_EXT_ATTR_ENTRY(_name, _var) \
SPE_EXT_ATTR_ENTRY(_name, arm_spe_pmu_cap_show, _var)
+#define SPE_CAP_EXT_ATTR_ENTRY_HEX(_name, _var) \
+ SPE_EXT_ATTR_ENTRY(_name, arm_spe_pmu_cap_show_hex, _var)
static struct attribute *arm_spe_pmu_cap_attr[] = {
SPE_CAP_EXT_ATTR_ENTRY(arch_inst, SPE_PMU_CAP_ARCH_INST),
SPE_CAP_EXT_ATTR_ENTRY(ernd, SPE_PMU_CAP_ERND),
SPE_CAP_EXT_ATTR_ENTRY(count_size, SPE_PMU_CAP_CNT_SZ),
SPE_CAP_EXT_ATTR_ENTRY(min_interval, SPE_PMU_CAP_MIN_IVAL),
+ SPE_CAP_EXT_ATTR_ENTRY_HEX(event_filter, SPE_PMU_CAP_EVENT_FILTER),
NULL,
};
@@ -197,6 +218,27 @@ static const struct attribute_group arm_spe_pmu_cap_group = {
#define ATTR_CFG_FLD_discard_CFG config /* PMBLIMITR_EL1.FM = DISCARD */
#define ATTR_CFG_FLD_discard_LO 35
#define ATTR_CFG_FLD_discard_HI 35
+#define ATTR_CFG_FLD_branch_filter_mask_CFG config /* PMSFCR_EL1.Bm */
+#define ATTR_CFG_FLD_branch_filter_mask_LO 36
+#define ATTR_CFG_FLD_branch_filter_mask_HI 36
+#define ATTR_CFG_FLD_load_filter_mask_CFG config /* PMSFCR_EL1.LDm */
+#define ATTR_CFG_FLD_load_filter_mask_LO 37
+#define ATTR_CFG_FLD_load_filter_mask_HI 37
+#define ATTR_CFG_FLD_store_filter_mask_CFG config /* PMSFCR_EL1.STm */
+#define ATTR_CFG_FLD_store_filter_mask_LO 38
+#define ATTR_CFG_FLD_store_filter_mask_HI 38
+#define ATTR_CFG_FLD_simd_filter_CFG config /* PMSFCR_EL1.SIMD */
+#define ATTR_CFG_FLD_simd_filter_LO 39
+#define ATTR_CFG_FLD_simd_filter_HI 39
+#define ATTR_CFG_FLD_simd_filter_mask_CFG config /* PMSFCR_EL1.SIMDm */
+#define ATTR_CFG_FLD_simd_filter_mask_LO 40
+#define ATTR_CFG_FLD_simd_filter_mask_HI 40
+#define ATTR_CFG_FLD_float_filter_CFG config /* PMSFCR_EL1.FP */
+#define ATTR_CFG_FLD_float_filter_LO 41
+#define ATTR_CFG_FLD_float_filter_HI 41
+#define ATTR_CFG_FLD_float_filter_mask_CFG config /* PMSFCR_EL1.FPm */
+#define ATTR_CFG_FLD_float_filter_mask_LO 42
+#define ATTR_CFG_FLD_float_filter_mask_HI 42
#define ATTR_CFG_FLD_event_filter_CFG config1 /* PMSEVFR_EL1 */
#define ATTR_CFG_FLD_event_filter_LO 0
@@ -215,8 +257,15 @@ GEN_PMU_FORMAT_ATTR(pa_enable);
GEN_PMU_FORMAT_ATTR(pct_enable);
GEN_PMU_FORMAT_ATTR(jitter);
GEN_PMU_FORMAT_ATTR(branch_filter);
+GEN_PMU_FORMAT_ATTR(branch_filter_mask);
GEN_PMU_FORMAT_ATTR(load_filter);
+GEN_PMU_FORMAT_ATTR(load_filter_mask);
GEN_PMU_FORMAT_ATTR(store_filter);
+GEN_PMU_FORMAT_ATTR(store_filter_mask);
+GEN_PMU_FORMAT_ATTR(simd_filter);
+GEN_PMU_FORMAT_ATTR(simd_filter_mask);
+GEN_PMU_FORMAT_ATTR(float_filter);
+GEN_PMU_FORMAT_ATTR(float_filter_mask);
GEN_PMU_FORMAT_ATTR(event_filter);
GEN_PMU_FORMAT_ATTR(inv_event_filter);
GEN_PMU_FORMAT_ATTR(min_latency);
@@ -228,8 +277,15 @@ static struct attribute *arm_spe_pmu_formats_attr[] = {
&format_attr_pct_enable.attr,
&format_attr_jitter.attr,
&format_attr_branch_filter.attr,
+ &format_attr_branch_filter_mask.attr,
&format_attr_load_filter.attr,
+ &format_attr_load_filter_mask.attr,
&format_attr_store_filter.attr,
+ &format_attr_store_filter_mask.attr,
+ &format_attr_simd_filter.attr,
+ &format_attr_simd_filter_mask.attr,
+ &format_attr_float_filter.attr,
+ &format_attr_float_filter_mask.attr,
&format_attr_event_filter.attr,
&format_attr_inv_event_filter.attr,
&format_attr_min_latency.attr,
@@ -250,6 +306,16 @@ static umode_t arm_spe_pmu_format_attr_is_visible(struct kobject *kobj,
if (attr == &format_attr_inv_event_filter.attr && !(spe_pmu->features & SPE_PMU_FEAT_INV_FILT_EVT))
return 0;
+ if ((attr == &format_attr_branch_filter_mask.attr ||
+ attr == &format_attr_load_filter_mask.attr ||
+ attr == &format_attr_store_filter_mask.attr ||
+ attr == &format_attr_simd_filter.attr ||
+ attr == &format_attr_simd_filter_mask.attr ||
+ attr == &format_attr_float_filter.attr ||
+ attr == &format_attr_float_filter_mask.attr) &&
+ !(spe_pmu->features & SPE_PMU_FEAT_EFT))
+ return 0;
+
return attr->mode;
}
@@ -345,8 +411,15 @@ static u64 arm_spe_event_to_pmsfcr(struct perf_event *event)
u64 reg = 0;
reg |= FIELD_PREP(PMSFCR_EL1_LD, ATTR_CFG_GET_FLD(attr, load_filter));
+ reg |= FIELD_PREP(PMSFCR_EL1_LDm, ATTR_CFG_GET_FLD(attr, load_filter_mask));
reg |= FIELD_PREP(PMSFCR_EL1_ST, ATTR_CFG_GET_FLD(attr, store_filter));
+ reg |= FIELD_PREP(PMSFCR_EL1_STm, ATTR_CFG_GET_FLD(attr, store_filter_mask));
reg |= FIELD_PREP(PMSFCR_EL1_B, ATTR_CFG_GET_FLD(attr, branch_filter));
+ reg |= FIELD_PREP(PMSFCR_EL1_Bm, ATTR_CFG_GET_FLD(attr, branch_filter_mask));
+ reg |= FIELD_PREP(PMSFCR_EL1_SIMD, ATTR_CFG_GET_FLD(attr, simd_filter));
+ reg |= FIELD_PREP(PMSFCR_EL1_SIMDm, ATTR_CFG_GET_FLD(attr, simd_filter_mask));
+ reg |= FIELD_PREP(PMSFCR_EL1_FP, ATTR_CFG_GET_FLD(attr, float_filter));
+ reg |= FIELD_PREP(PMSFCR_EL1_FPm, ATTR_CFG_GET_FLD(attr, float_filter_mask));
if (reg)
reg |= PMSFCR_EL1_FT;
@@ -697,20 +770,6 @@ static irqreturn_t arm_spe_pmu_irq_handler(int irq, void *dev)
return IRQ_HANDLED;
}
-static u64 arm_spe_pmsevfr_res0(u16 pmsver)
-{
- switch (pmsver) {
- case ID_AA64DFR0_EL1_PMSVer_IMP:
- return PMSEVFR_EL1_RES0_IMP;
- case ID_AA64DFR0_EL1_PMSVer_V1P1:
- return PMSEVFR_EL1_RES0_V1P1;
- case ID_AA64DFR0_EL1_PMSVer_V1P2:
- /* Return the highest version we support in default */
- default:
- return PMSEVFR_EL1_RES0_V1P2;
- }
-}
-
/* Perf callbacks */
static int arm_spe_pmu_event_init(struct perf_event *event)
{
@@ -726,10 +785,10 @@ static int arm_spe_pmu_event_init(struct perf_event *event)
!cpumask_test_cpu(event->cpu, &spe_pmu->supported_cpus))
return -ENOENT;
- if (arm_spe_event_to_pmsevfr(event) & arm_spe_pmsevfr_res0(spe_pmu->pmsver))
+ if (arm_spe_event_to_pmsevfr(event) & spe_pmu->pmsevfr_res0)
return -EOPNOTSUPP;
- if (arm_spe_event_to_pmsnevfr(event) & arm_spe_pmsevfr_res0(spe_pmu->pmsver))
+ if (arm_spe_event_to_pmsnevfr(event) & spe_pmu->pmsevfr_res0)
return -EOPNOTSUPP;
if (attr->exclude_idle)
@@ -762,6 +821,16 @@ static int arm_spe_pmu_event_init(struct perf_event *event)
!(spe_pmu->features & SPE_PMU_FEAT_FILT_LAT))
return -EOPNOTSUPP;
+ if ((FIELD_GET(PMSFCR_EL1_LDm, reg) ||
+ FIELD_GET(PMSFCR_EL1_STm, reg) ||
+ FIELD_GET(PMSFCR_EL1_Bm, reg) ||
+ FIELD_GET(PMSFCR_EL1_SIMD, reg) ||
+ FIELD_GET(PMSFCR_EL1_SIMDm, reg) ||
+ FIELD_GET(PMSFCR_EL1_FP, reg) ||
+ FIELD_GET(PMSFCR_EL1_FPm, reg)) &&
+ !(spe_pmu->features & SPE_PMU_FEAT_EFT))
+ return -EOPNOTSUPP;
+
if (ATTR_CFG_GET_FLD(&event->attr, discard) &&
!(spe_pmu->features & SPE_PMU_FEAT_DISCARD))
return -EOPNOTSUPP;
@@ -1053,6 +1122,9 @@ static void __arm_spe_pmu_dev_probe(void *info)
if (spe_pmu->pmsver >= ID_AA64DFR0_EL1_PMSVer_V1P2)
spe_pmu->features |= SPE_PMU_FEAT_DISCARD;
+ if (FIELD_GET(PMSIDR_EL1_EFT, reg))
+ spe_pmu->features |= SPE_PMU_FEAT_EFT;
+
/* This field has a spaced out encoding, so just use a look-up */
fld = FIELD_GET(PMSIDR_EL1_INTERVAL, reg);
switch (fld) {
@@ -1107,6 +1179,10 @@ static void __arm_spe_pmu_dev_probe(void *info)
spe_pmu->counter_sz = 16;
}
+ /* Write all 1s and then read back. Unsupported filter bits are RAZ/WI. */
+ write_sysreg_s(U64_MAX, SYS_PMSEVFR_EL1);
+ spe_pmu->pmsevfr_res0 = ~read_sysreg_s(SYS_PMSEVFR_EL1);
+
dev_info(dev,
"probed SPEv1.%d for CPUs %*pbl [max_record_sz %u, align %u, features 0x%llx]\n",
spe_pmu->pmsver - 1, cpumask_pr_args(&spe_pmu->supported_cpus),
diff --git a/drivers/perf/dwc_pcie_pmu.c b/drivers/perf/dwc_pcie_pmu.c
index 146ff57813fb..22f73ac894e9 100644
--- a/drivers/perf/dwc_pcie_pmu.c
+++ b/drivers/perf/dwc_pcie_pmu.c
@@ -39,6 +39,10 @@
#define DWC_PCIE_EVENT_CLEAR GENMASK(1, 0)
#define DWC_PCIE_EVENT_PER_CLEAR 0x1
+/* Event Selection Field has two subfields */
+#define DWC_PCIE_CNT_EVENT_SEL_GROUP GENMASK(11, 8)
+#define DWC_PCIE_CNT_EVENT_SEL_EVID GENMASK(7, 0)
+
#define DWC_PCIE_EVENT_CNT_DATA 0xC
#define DWC_PCIE_TIME_BASED_ANAL_CTL 0x10
@@ -73,6 +77,10 @@ enum dwc_pcie_event_type {
DWC_PCIE_EVENT_TYPE_MAX,
};
+#define DWC_PCIE_LANE_GROUP_6 6
+#define DWC_PCIE_LANE_GROUP_7 7
+#define DWC_PCIE_LANE_MAX_EVENTS_PER_GROUP 256
+
#define DWC_PCIE_LANE_EVENT_MAX_PERIOD GENMASK_ULL(31, 0)
#define DWC_PCIE_MAX_PERIOD GENMASK_ULL(63, 0)
@@ -82,8 +90,11 @@ struct dwc_pcie_pmu {
u16 ras_des_offset;
u32 nr_lanes;
+ /* Groups #6 and #7 */
+ DECLARE_BITMAP(lane_events, 2 * DWC_PCIE_LANE_MAX_EVENTS_PER_GROUP);
+ struct perf_event *time_based_event;
+
struct hlist_node cpuhp_node;
- struct perf_event *event[DWC_PCIE_EVENT_TYPE_MAX];
int on_cpu;
};
@@ -246,19 +257,26 @@ static const struct attribute_group *dwc_pcie_attr_groups[] = {
};
static void dwc_pcie_pmu_lane_event_enable(struct dwc_pcie_pmu *pcie_pmu,
+ struct perf_event *event,
bool enable)
{
struct pci_dev *pdev = pcie_pmu->pdev;
u16 ras_des_offset = pcie_pmu->ras_des_offset;
+ int event_id = DWC_PCIE_EVENT_ID(event);
+ int lane = DWC_PCIE_EVENT_LANE(event);
+ u32 ctrl;
+
+ ctrl = FIELD_PREP(DWC_PCIE_CNT_EVENT_SEL, event_id) |
+ FIELD_PREP(DWC_PCIE_CNT_LANE_SEL, lane) |
+ FIELD_PREP(DWC_PCIE_EVENT_CLEAR, DWC_PCIE_EVENT_PER_CLEAR);
if (enable)
- pci_clear_and_set_config_dword(pdev,
- ras_des_offset + DWC_PCIE_EVENT_CNT_CTL,
- DWC_PCIE_CNT_ENABLE, DWC_PCIE_PER_EVENT_ON);
+ ctrl |= FIELD_PREP(DWC_PCIE_CNT_ENABLE, DWC_PCIE_PER_EVENT_ON);
else
- pci_clear_and_set_config_dword(pdev,
- ras_des_offset + DWC_PCIE_EVENT_CNT_CTL,
- DWC_PCIE_CNT_ENABLE, DWC_PCIE_PER_EVENT_OFF);
+ ctrl |= FIELD_PREP(DWC_PCIE_CNT_ENABLE, DWC_PCIE_PER_EVENT_OFF);
+
+ pci_write_config_dword(pdev, ras_des_offset + DWC_PCIE_EVENT_CNT_CTL,
+ ctrl);
}
static void dwc_pcie_pmu_time_based_event_enable(struct dwc_pcie_pmu *pcie_pmu,
@@ -276,11 +294,22 @@ static u64 dwc_pcie_pmu_read_lane_event_counter(struct perf_event *event)
{
struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu);
struct pci_dev *pdev = pcie_pmu->pdev;
+ int event_id = DWC_PCIE_EVENT_ID(event);
+ int lane = DWC_PCIE_EVENT_LANE(event);
u16 ras_des_offset = pcie_pmu->ras_des_offset;
- u32 val;
+ u32 val, ctrl;
+ ctrl = FIELD_PREP(DWC_PCIE_CNT_EVENT_SEL, event_id) |
+ FIELD_PREP(DWC_PCIE_CNT_LANE_SEL, lane) |
+ FIELD_PREP(DWC_PCIE_CNT_ENABLE, DWC_PCIE_PER_EVENT_ON);
+ pci_write_config_dword(pdev, ras_des_offset + DWC_PCIE_EVENT_CNT_CTL,
+ ctrl);
pci_read_config_dword(pdev, ras_des_offset + DWC_PCIE_EVENT_CNT_DATA, &val);
+ ctrl |= FIELD_PREP(DWC_PCIE_EVENT_CLEAR, DWC_PCIE_EVENT_PER_CLEAR);
+ pci_write_config_dword(pdev, ras_des_offset + DWC_PCIE_EVENT_CNT_CTL,
+ ctrl);
+
return val;
}
@@ -329,26 +358,77 @@ static void dwc_pcie_pmu_event_update(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
enum dwc_pcie_event_type type = DWC_PCIE_EVENT_TYPE(event);
- u64 delta, prev, now = 0;
+ u64 delta, prev, now;
+
+ if (type == DWC_PCIE_LANE_EVENT) {
+ now = dwc_pcie_pmu_read_lane_event_counter(event) &
+ DWC_PCIE_LANE_EVENT_MAX_PERIOD;
+ local64_add(now, &event->count);
+ return;
+ }
do {
prev = local64_read(&hwc->prev_count);
-
- if (type == DWC_PCIE_LANE_EVENT)
- now = dwc_pcie_pmu_read_lane_event_counter(event);
- else if (type == DWC_PCIE_TIME_BASE_EVENT)
- now = dwc_pcie_pmu_read_time_based_counter(event);
+ now = dwc_pcie_pmu_read_time_based_counter(event);
} while (local64_cmpxchg(&hwc->prev_count, prev, now) != prev);
delta = (now - prev) & DWC_PCIE_MAX_PERIOD;
- /* 32-bit counter for Lane Event Counting */
- if (type == DWC_PCIE_LANE_EVENT)
- delta &= DWC_PCIE_LANE_EVENT_MAX_PERIOD;
-
local64_add(delta, &event->count);
}
+static int dwc_pcie_pmu_validate_add_lane_event(struct perf_event *event,
+ unsigned long val_lane_events[])
+{
+ int event_id, event_nr, group;
+
+ event_id = DWC_PCIE_EVENT_ID(event);
+ event_nr = FIELD_GET(DWC_PCIE_CNT_EVENT_SEL_EVID, event_id);
+ group = FIELD_GET(DWC_PCIE_CNT_EVENT_SEL_GROUP, event_id);
+
+ if (group != DWC_PCIE_LANE_GROUP_6 && group != DWC_PCIE_LANE_GROUP_7)
+ return -EINVAL;
+
+ group -= DWC_PCIE_LANE_GROUP_6;
+
+ if (test_and_set_bit(group * DWC_PCIE_LANE_MAX_EVENTS_PER_GROUP + event_nr,
+ val_lane_events))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int dwc_pcie_pmu_validate_group(struct perf_event *event)
+{
+ struct perf_event *sibling, *leader = event->group_leader;
+ DECLARE_BITMAP(val_lane_events, 2 * DWC_PCIE_LANE_MAX_EVENTS_PER_GROUP);
+ bool time_event = false;
+ int type;
+
+ type = DWC_PCIE_EVENT_TYPE(leader);
+ if (type == DWC_PCIE_TIME_BASE_EVENT)
+ time_event = true;
+ else
+ if (dwc_pcie_pmu_validate_add_lane_event(leader, val_lane_events))
+ return -ENOSPC;
+
+ for_each_sibling_event(sibling, leader) {
+ type = DWC_PCIE_EVENT_TYPE(sibling);
+ if (type == DWC_PCIE_TIME_BASE_EVENT) {
+ if (time_event)
+ return -ENOSPC;
+
+ time_event = true;
+ continue;
+ }
+
+ if (dwc_pcie_pmu_validate_add_lane_event(sibling, val_lane_events))
+ return -ENOSPC;
+ }
+
+ return 0;
+}
+
static int dwc_pcie_pmu_event_init(struct perf_event *event)
{
struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu);
@@ -367,10 +447,6 @@ static int dwc_pcie_pmu_event_init(struct perf_event *event)
if (event->cpu < 0 || event->attach_state & PERF_ATTACH_TASK)
return -EINVAL;
- if (event->group_leader != event &&
- !is_software_event(event->group_leader))
- return -EINVAL;
-
for_each_sibling_event(sibling, event->group_leader) {
if (sibling->pmu != event->pmu && !is_software_event(sibling))
return -EINVAL;
@@ -385,6 +461,9 @@ static int dwc_pcie_pmu_event_init(struct perf_event *event)
return -EINVAL;
}
+ if (dwc_pcie_pmu_validate_group(event))
+ return -ENOSPC;
+
event->cpu = pcie_pmu->on_cpu;
return 0;
@@ -400,7 +479,7 @@ static void dwc_pcie_pmu_event_start(struct perf_event *event, int flags)
local64_set(&hwc->prev_count, 0);
if (type == DWC_PCIE_LANE_EVENT)
- dwc_pcie_pmu_lane_event_enable(pcie_pmu, true);
+ dwc_pcie_pmu_lane_event_enable(pcie_pmu, event, true);
else if (type == DWC_PCIE_TIME_BASE_EVENT)
dwc_pcie_pmu_time_based_event_enable(pcie_pmu, true);
}
@@ -414,12 +493,13 @@ static void dwc_pcie_pmu_event_stop(struct perf_event *event, int flags)
if (event->hw.state & PERF_HES_STOPPED)
return;
+ dwc_pcie_pmu_event_update(event);
+
if (type == DWC_PCIE_LANE_EVENT)
- dwc_pcie_pmu_lane_event_enable(pcie_pmu, false);
+ dwc_pcie_pmu_lane_event_enable(pcie_pmu, event, false);
else if (type == DWC_PCIE_TIME_BASE_EVENT)
dwc_pcie_pmu_time_based_event_enable(pcie_pmu, false);
- dwc_pcie_pmu_event_update(event);
hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
}
@@ -434,14 +514,17 @@ static int dwc_pcie_pmu_event_add(struct perf_event *event, int flags)
u16 ras_des_offset = pcie_pmu->ras_des_offset;
u32 ctrl;
- /* one counter for each type and it is in use */
- if (pcie_pmu->event[type])
- return -ENOSPC;
-
- pcie_pmu->event[type] = event;
hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
if (type == DWC_PCIE_LANE_EVENT) {
+ int event_nr = FIELD_GET(DWC_PCIE_CNT_EVENT_SEL_EVID, event_id);
+ int group = FIELD_GET(DWC_PCIE_CNT_EVENT_SEL_GROUP, event_id) -
+ DWC_PCIE_LANE_GROUP_6;
+
+ if (test_and_set_bit(group * DWC_PCIE_LANE_MAX_EVENTS_PER_GROUP + event_nr,
+ pcie_pmu->lane_events))
+ return -ENOSPC;
+
/* EVENT_COUNTER_DATA_REG needs clear manually */
ctrl = FIELD_PREP(DWC_PCIE_CNT_EVENT_SEL, event_id) |
FIELD_PREP(DWC_PCIE_CNT_LANE_SEL, lane) |
@@ -450,6 +533,11 @@ static int dwc_pcie_pmu_event_add(struct perf_event *event, int flags)
pci_write_config_dword(pdev, ras_des_offset + DWC_PCIE_EVENT_CNT_CTL,
ctrl);
} else if (type == DWC_PCIE_TIME_BASE_EVENT) {
+ if (pcie_pmu->time_based_event)
+ return -ENOSPC;
+
+ pcie_pmu->time_based_event = event;
+
/*
* TIME_BASED_ANAL_DATA_REG is a 64 bit register, we can safely
* use it with any manually controlled duration. And it is
@@ -478,7 +566,18 @@ static void dwc_pcie_pmu_event_del(struct perf_event *event, int flags)
dwc_pcie_pmu_event_stop(event, flags | PERF_EF_UPDATE);
perf_event_update_userpage(event);
- pcie_pmu->event[type] = NULL;
+
+ if (type == DWC_PCIE_TIME_BASE_EVENT) {
+ pcie_pmu->time_based_event = NULL;
+ } else {
+ int event_id = DWC_PCIE_EVENT_ID(event);
+ int event_nr = FIELD_GET(DWC_PCIE_CNT_EVENT_SEL_EVID, event_id);
+ int group = FIELD_GET(DWC_PCIE_CNT_EVENT_SEL_GROUP, event_id) -
+ DWC_PCIE_LANE_GROUP_6;
+
+ clear_bit(group * DWC_PCIE_LANE_MAX_EVENTS_PER_GROUP + event_nr,
+ pcie_pmu->lane_events);
+ }
}
static void dwc_pcie_pmu_remove_cpuhp_instance(void *hotplug_node)
diff --git a/drivers/perf/fsl_imx9_ddr_perf.c b/drivers/perf/fsl_imx9_ddr_perf.c
index 267754fdf581..7050b48c0467 100644
--- a/drivers/perf/fsl_imx9_ddr_perf.c
+++ b/drivers/perf/fsl_imx9_ddr_perf.c
@@ -104,6 +104,11 @@ static const struct imx_ddr_devtype_data imx93_devtype_data = {
.filter_ver = DDR_PERF_AXI_FILTER_V1
};
+static const struct imx_ddr_devtype_data imx94_devtype_data = {
+ .identifier = "imx94",
+ .filter_ver = DDR_PERF_AXI_FILTER_V2
+};
+
static const struct imx_ddr_devtype_data imx95_devtype_data = {
.identifier = "imx95",
.filter_ver = DDR_PERF_AXI_FILTER_V2
@@ -122,6 +127,7 @@ static inline bool axi_filter_v2(struct ddr_pmu *pmu)
static const struct of_device_id imx_ddr_pmu_dt_ids[] = {
{ .compatible = "fsl,imx91-ddr-pmu", .data = &imx91_devtype_data },
{ .compatible = "fsl,imx93-ddr-pmu", .data = &imx93_devtype_data },
+ { .compatible = "fsl,imx94-ddr-pmu", .data = &imx94_devtype_data },
{ .compatible = "fsl,imx95-ddr-pmu", .data = &imx95_devtype_data },
{ /* sentinel */ }
};
diff --git a/drivers/perf/fujitsu_uncore_pmu.c b/drivers/perf/fujitsu_uncore_pmu.c
new file mode 100644
index 000000000000..c3c6f56474ad
--- /dev/null
+++ b/drivers/perf/fujitsu_uncore_pmu.c
@@ -0,0 +1,613 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Driver for the Uncore PMUs in Fujitsu chips.
+ *
+ * See Documentation/admin-guide/perf/fujitsu_uncore_pmu.rst for more details.
+ *
+ * Copyright (c) 2025 Fujitsu. All rights reserved.
+ */
+
+#include <linux/acpi.h>
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/perf_event.h>
+#include <linux/platform_device.h>
+
+/* Number of counters on each PMU */
+#define MAC_NUM_COUNTERS 8
+#define PCI_NUM_COUNTERS 8
+/* Mask for the event type field within perf_event_attr.config and EVTYPE reg */
+#define UNCORE_EVTYPE_MASK 0xFF
+
+/* Perfmon registers */
+#define PM_EVCNTR(__cntr) (0x000 + (__cntr) * 8)
+#define PM_CNTCTL(__cntr) (0x100 + (__cntr) * 8)
+#define PM_CNTCTL_RESET 0
+#define PM_EVTYPE(__cntr) (0x200 + (__cntr) * 8)
+#define PM_EVTYPE_EVSEL(__val) FIELD_GET(UNCORE_EVTYPE_MASK, __val)
+#define PM_CR 0x400
+#define PM_CR_RESET BIT(1)
+#define PM_CR_ENABLE BIT(0)
+#define PM_CNTENSET 0x410
+#define PM_CNTENSET_IDX(__cntr) BIT(__cntr)
+#define PM_CNTENCLR 0x418
+#define PM_CNTENCLR_IDX(__cntr) BIT(__cntr)
+#define PM_CNTENCLR_RESET 0xFF
+#define PM_INTENSET 0x420
+#define PM_INTENSET_IDX(__cntr) BIT(__cntr)
+#define PM_INTENCLR 0x428
+#define PM_INTENCLR_IDX(__cntr) BIT(__cntr)
+#define PM_INTENCLR_RESET 0xFF
+#define PM_OVSR 0x440
+#define PM_OVSR_OVSRCLR_RESET 0xFF
+
+enum fujitsu_uncore_pmu {
+ FUJITSU_UNCORE_PMU_MAC = 1,
+ FUJITSU_UNCORE_PMU_PCI = 2,
+};
+
+struct uncore_pmu {
+ int num_counters;
+ struct pmu pmu;
+ struct hlist_node node;
+ void __iomem *regs;
+ struct perf_event **events;
+ unsigned long *used_mask;
+ int cpu;
+ int irq;
+ struct device *dev;
+};
+
+#define to_uncore_pmu(p) (container_of(p, struct uncore_pmu, pmu))
+
+static int uncore_pmu_cpuhp_state;
+
+static void fujitsu_uncore_counter_start(struct perf_event *event)
+{
+ struct uncore_pmu *uncorepmu = to_uncore_pmu(event->pmu);
+ int idx = event->hw.idx;
+
+ /* Initialize the hardware counter and reset prev_count*/
+ local64_set(&event->hw.prev_count, 0);
+ writeq_relaxed(0, uncorepmu->regs + PM_EVCNTR(idx));
+
+ /* Set the event type */
+ writeq_relaxed(PM_EVTYPE_EVSEL(event->attr.config), uncorepmu->regs + PM_EVTYPE(idx));
+
+ /* Enable interrupt generation by this counter */
+ writeq_relaxed(PM_INTENSET_IDX(idx), uncorepmu->regs + PM_INTENSET);
+
+ /* Finally, enable the counter */
+ writeq_relaxed(PM_CNTCTL_RESET, uncorepmu->regs + PM_CNTCTL(idx));
+ writeq_relaxed(PM_CNTENSET_IDX(idx), uncorepmu->regs + PM_CNTENSET);
+}
+
+static void fujitsu_uncore_counter_stop(struct perf_event *event)
+{
+ struct uncore_pmu *uncorepmu = to_uncore_pmu(event->pmu);
+ int idx = event->hw.idx;
+
+ /* Disable the counter */
+ writeq_relaxed(PM_CNTENCLR_IDX(idx), uncorepmu->regs + PM_CNTENCLR);
+
+ /* Disable interrupt generation by this counter */
+ writeq_relaxed(PM_INTENCLR_IDX(idx), uncorepmu->regs + PM_INTENCLR);
+}
+
+static void fujitsu_uncore_counter_update(struct perf_event *event)
+{
+ struct uncore_pmu *uncorepmu = to_uncore_pmu(event->pmu);
+ int idx = event->hw.idx;
+ u64 prev, new;
+
+ do {
+ prev = local64_read(&event->hw.prev_count);
+ new = readq_relaxed(uncorepmu->regs + PM_EVCNTR(idx));
+ } while (local64_cmpxchg(&event->hw.prev_count, prev, new) != prev);
+
+ local64_add(new - prev, &event->count);
+}
+
+static inline void fujitsu_uncore_init(struct uncore_pmu *uncorepmu)
+{
+ int i;
+
+ writeq_relaxed(PM_CR_RESET, uncorepmu->regs + PM_CR);
+
+ writeq_relaxed(PM_CNTENCLR_RESET, uncorepmu->regs + PM_CNTENCLR);
+ writeq_relaxed(PM_INTENCLR_RESET, uncorepmu->regs + PM_INTENCLR);
+ writeq_relaxed(PM_OVSR_OVSRCLR_RESET, uncorepmu->regs + PM_OVSR);
+
+ for (i = 0; i < uncorepmu->num_counters; ++i) {
+ writeq_relaxed(PM_CNTCTL_RESET, uncorepmu->regs + PM_CNTCTL(i));
+ writeq_relaxed(PM_EVTYPE_EVSEL(0), uncorepmu->regs + PM_EVTYPE(i));
+ }
+ writeq_relaxed(PM_CR_ENABLE, uncorepmu->regs + PM_CR);
+}
+
+static irqreturn_t fujitsu_uncore_handle_irq(int irq_num, void *data)
+{
+ struct uncore_pmu *uncorepmu = data;
+ /* Read the overflow status register */
+ long status = readq_relaxed(uncorepmu->regs + PM_OVSR);
+ int idx;
+
+ if (status == 0)
+ return IRQ_NONE;
+
+ /* Clear the bits we read on the overflow status register */
+ writeq_relaxed(status, uncorepmu->regs + PM_OVSR);
+
+ for_each_set_bit(idx, &status, uncorepmu->num_counters) {
+ struct perf_event *event;
+
+ event = uncorepmu->events[idx];
+ if (!event)
+ continue;
+
+ fujitsu_uncore_counter_update(event);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void fujitsu_uncore_pmu_enable(struct pmu *pmu)
+{
+ writeq_relaxed(PM_CR_ENABLE, to_uncore_pmu(pmu)->regs + PM_CR);
+}
+
+static void fujitsu_uncore_pmu_disable(struct pmu *pmu)
+{
+ writeq_relaxed(0, to_uncore_pmu(pmu)->regs + PM_CR);
+}
+
+static bool fujitsu_uncore_validate_event_group(struct perf_event *event)
+{
+ struct uncore_pmu *uncorepmu = to_uncore_pmu(event->pmu);
+ struct perf_event *leader = event->group_leader;
+ struct perf_event *sibling;
+ int counters = 1;
+
+ if (leader == event)
+ return true;
+
+ if (leader->pmu == event->pmu)
+ counters++;
+
+ for_each_sibling_event(sibling, leader) {
+ if (sibling->pmu == event->pmu)
+ counters++;
+ }
+
+ /*
+ * If the group requires more counters than the HW has, it
+ * cannot ever be scheduled.
+ */
+ return counters <= uncorepmu->num_counters;
+}
+
+static int fujitsu_uncore_event_init(struct perf_event *event)
+{
+ struct uncore_pmu *uncorepmu = to_uncore_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+
+ /* Is the event for this PMU? */
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ /*
+ * Sampling not supported since these events are not
+ * core-attributable.
+ */
+ if (is_sampling_event(event))
+ return -EINVAL;
+
+ /*
+ * Task mode not available, we run the counters as socket counters,
+ * not attributable to any CPU and therefore cannot attribute per-task.
+ */
+ if (event->cpu < 0)
+ return -EINVAL;
+
+ /* Validate the group */
+ if (!fujitsu_uncore_validate_event_group(event))
+ return -EINVAL;
+
+ hwc->idx = -1;
+
+ event->cpu = uncorepmu->cpu;
+
+ return 0;
+}
+
+static void fujitsu_uncore_event_start(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ hwc->state = 0;
+ fujitsu_uncore_counter_start(event);
+}
+
+static void fujitsu_uncore_event_stop(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (hwc->state & PERF_HES_STOPPED)
+ return;
+
+ fujitsu_uncore_counter_stop(event);
+ if (flags & PERF_EF_UPDATE)
+ fujitsu_uncore_counter_update(event);
+ hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+}
+
+static int fujitsu_uncore_event_add(struct perf_event *event, int flags)
+{
+ struct uncore_pmu *uncorepmu = to_uncore_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx;
+
+ /* Try to allocate a counter. */
+ idx = bitmap_find_free_region(uncorepmu->used_mask, uncorepmu->num_counters, 0);
+ if (idx < 0)
+ /* The counters are all in use. */
+ return -EAGAIN;
+
+ hwc->idx = idx;
+ hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+ uncorepmu->events[idx] = event;
+
+ if (flags & PERF_EF_START)
+ fujitsu_uncore_event_start(event, 0);
+
+ /* Propagate changes to the userspace mapping. */
+ perf_event_update_userpage(event);
+
+ return 0;
+}
+
+static void fujitsu_uncore_event_del(struct perf_event *event, int flags)
+{
+ struct uncore_pmu *uncorepmu = to_uncore_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+
+ /* Stop and clean up */
+ fujitsu_uncore_event_stop(event, flags | PERF_EF_UPDATE);
+ uncorepmu->events[hwc->idx] = NULL;
+ bitmap_release_region(uncorepmu->used_mask, hwc->idx, 0);
+
+ /* Propagate changes to the userspace mapping. */
+ perf_event_update_userpage(event);
+}
+
+static void fujitsu_uncore_event_read(struct perf_event *event)
+{
+ fujitsu_uncore_counter_update(event);
+}
+
+#define UNCORE_PMU_FORMAT_ATTR(_name, _config) \
+ (&((struct dev_ext_attribute[]) { \
+ { .attr = __ATTR(_name, 0444, device_show_string, NULL), \
+ .var = (void *)_config, } \
+ })[0].attr.attr)
+
+static struct attribute *fujitsu_uncore_pmu_formats[] = {
+ UNCORE_PMU_FORMAT_ATTR(event, "config:0-7"),
+ NULL
+};
+
+static const struct attribute_group fujitsu_uncore_pmu_format_group = {
+ .name = "format",
+ .attrs = fujitsu_uncore_pmu_formats,
+};
+
+static ssize_t fujitsu_uncore_pmu_event_show(struct device *dev,
+ struct device_attribute *attr, char *page)
+{
+ struct perf_pmu_events_attr *pmu_attr;
+
+ pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
+ return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id);
+}
+
+#define MAC_EVENT_ATTR(_name, _id) \
+ PMU_EVENT_ATTR_ID(_name, fujitsu_uncore_pmu_event_show, _id)
+
+static struct attribute *fujitsu_uncore_mac_pmu_events[] = {
+ MAC_EVENT_ATTR(cycles, 0x00),
+ MAC_EVENT_ATTR(read-count, 0x10),
+ MAC_EVENT_ATTR(read-count-request, 0x11),
+ MAC_EVENT_ATTR(read-count-return, 0x12),
+ MAC_EVENT_ATTR(read-count-request-pftgt, 0x13),
+ MAC_EVENT_ATTR(read-count-request-normal, 0x14),
+ MAC_EVENT_ATTR(read-count-return-pftgt-hit, 0x15),
+ MAC_EVENT_ATTR(read-count-return-pftgt-miss, 0x16),
+ MAC_EVENT_ATTR(read-wait, 0x17),
+ MAC_EVENT_ATTR(write-count, 0x20),
+ MAC_EVENT_ATTR(write-count-write, 0x21),
+ MAC_EVENT_ATTR(write-count-pwrite, 0x22),
+ MAC_EVENT_ATTR(memory-read-count, 0x40),
+ MAC_EVENT_ATTR(memory-write-count, 0x50),
+ MAC_EVENT_ATTR(memory-pwrite-count, 0x60),
+ MAC_EVENT_ATTR(ea-mac, 0x80),
+ MAC_EVENT_ATTR(ea-memory, 0x90),
+ MAC_EVENT_ATTR(ea-memory-mac-write, 0x92),
+ MAC_EVENT_ATTR(ea-ha, 0xa0),
+ NULL
+};
+
+#define PCI_EVENT_ATTR(_name, _id) \
+ PMU_EVENT_ATTR_ID(_name, fujitsu_uncore_pmu_event_show, _id)
+
+static struct attribute *fujitsu_uncore_pci_pmu_events[] = {
+ PCI_EVENT_ATTR(pci-port0-cycles, 0x00),
+ PCI_EVENT_ATTR(pci-port0-read-count, 0x10),
+ PCI_EVENT_ATTR(pci-port0-read-count-bus, 0x14),
+ PCI_EVENT_ATTR(pci-port0-write-count, 0x20),
+ PCI_EVENT_ATTR(pci-port0-write-count-bus, 0x24),
+ PCI_EVENT_ATTR(pci-port1-cycles, 0x40),
+ PCI_EVENT_ATTR(pci-port1-read-count, 0x50),
+ PCI_EVENT_ATTR(pci-port1-read-count-bus, 0x54),
+ PCI_EVENT_ATTR(pci-port1-write-count, 0x60),
+ PCI_EVENT_ATTR(pci-port1-write-count-bus, 0x64),
+ PCI_EVENT_ATTR(ea-pci, 0x80),
+ NULL
+};
+
+static const struct attribute_group fujitsu_uncore_mac_pmu_events_group = {
+ .name = "events",
+ .attrs = fujitsu_uncore_mac_pmu_events,
+};
+
+static const struct attribute_group fujitsu_uncore_pci_pmu_events_group = {
+ .name = "events",
+ .attrs = fujitsu_uncore_pci_pmu_events,
+};
+
+static ssize_t cpumask_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct uncore_pmu *uncorepmu = to_uncore_pmu(dev_get_drvdata(dev));
+
+ return cpumap_print_to_pagebuf(true, buf, cpumask_of(uncorepmu->cpu));
+}
+static DEVICE_ATTR_RO(cpumask);
+
+static struct attribute *fujitsu_uncore_pmu_cpumask_attrs[] = {
+ &dev_attr_cpumask.attr,
+ NULL
+};
+
+static const struct attribute_group fujitsu_uncore_pmu_cpumask_attr_group = {
+ .attrs = fujitsu_uncore_pmu_cpumask_attrs,
+};
+
+static const struct attribute_group *fujitsu_uncore_mac_pmu_attr_grps[] = {
+ &fujitsu_uncore_pmu_format_group,
+ &fujitsu_uncore_mac_pmu_events_group,
+ &fujitsu_uncore_pmu_cpumask_attr_group,
+ NULL
+};
+
+static const struct attribute_group *fujitsu_uncore_pci_pmu_attr_grps[] = {
+ &fujitsu_uncore_pmu_format_group,
+ &fujitsu_uncore_pci_pmu_events_group,
+ &fujitsu_uncore_pmu_cpumask_attr_group,
+ NULL
+};
+
+static void fujitsu_uncore_pmu_migrate(struct uncore_pmu *uncorepmu, unsigned int cpu)
+{
+ perf_pmu_migrate_context(&uncorepmu->pmu, uncorepmu->cpu, cpu);
+ irq_set_affinity(uncorepmu->irq, cpumask_of(cpu));
+ uncorepmu->cpu = cpu;
+}
+
+static int fujitsu_uncore_pmu_online_cpu(unsigned int cpu, struct hlist_node *cpuhp_node)
+{
+ struct uncore_pmu *uncorepmu;
+ int node;
+
+ uncorepmu = hlist_entry_safe(cpuhp_node, struct uncore_pmu, node);
+ node = dev_to_node(uncorepmu->dev);
+ if (cpu_to_node(uncorepmu->cpu) != node && cpu_to_node(cpu) == node)
+ fujitsu_uncore_pmu_migrate(uncorepmu, cpu);
+
+ return 0;
+}
+
+static int fujitsu_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *cpuhp_node)
+{
+ struct uncore_pmu *uncorepmu;
+ unsigned int target;
+ int node;
+
+ uncorepmu = hlist_entry_safe(cpuhp_node, struct uncore_pmu, node);
+ if (cpu != uncorepmu->cpu)
+ return 0;
+
+ node = dev_to_node(uncorepmu->dev);
+ target = cpumask_any_and_but(cpumask_of_node(node), cpu_online_mask, cpu);
+ if (target >= nr_cpu_ids)
+ target = cpumask_any_but(cpu_online_mask, cpu);
+
+ if (target < nr_cpu_ids)
+ fujitsu_uncore_pmu_migrate(uncorepmu, target);
+
+ return 0;
+}
+
+static int fujitsu_uncore_pmu_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ unsigned long device_type = (unsigned long)device_get_match_data(dev);
+ const struct attribute_group **attr_groups;
+ struct uncore_pmu *uncorepmu;
+ struct resource *memrc;
+ size_t alloc_size;
+ char *name;
+ int ret;
+ int irq;
+ u64 uid;
+
+ ret = acpi_dev_uid_to_integer(ACPI_COMPANION(dev), &uid);
+ if (ret)
+ return dev_err_probe(dev, ret, "unable to read ACPI uid\n");
+
+ uncorepmu = devm_kzalloc(dev, sizeof(*uncorepmu), GFP_KERNEL);
+ if (!uncorepmu)
+ return -ENOMEM;
+ uncorepmu->dev = dev;
+ uncorepmu->cpu = cpumask_local_spread(0, dev_to_node(dev));
+ platform_set_drvdata(pdev, uncorepmu);
+
+ switch (device_type) {
+ case FUJITSU_UNCORE_PMU_MAC:
+ uncorepmu->num_counters = MAC_NUM_COUNTERS;
+ attr_groups = fujitsu_uncore_mac_pmu_attr_grps;
+ name = devm_kasprintf(dev, GFP_KERNEL, "mac_iod%llu_mac%llu_ch%llu",
+ (uid >> 8) & 0xF, (uid >> 4) & 0xF, uid & 0xF);
+ break;
+ case FUJITSU_UNCORE_PMU_PCI:
+ uncorepmu->num_counters = PCI_NUM_COUNTERS;
+ attr_groups = fujitsu_uncore_pci_pmu_attr_grps;
+ name = devm_kasprintf(dev, GFP_KERNEL, "pci_iod%llu_pci%llu",
+ (uid >> 4) & 0xF, uid & 0xF);
+ break;
+ default:
+ return dev_err_probe(dev, -EINVAL, "illegal device type: %lu\n", device_type);
+ }
+ if (!name)
+ return -ENOMEM;
+
+ uncorepmu->pmu = (struct pmu) {
+ .parent = dev,
+ .task_ctx_nr = perf_invalid_context,
+
+ .attr_groups = attr_groups,
+
+ .pmu_enable = fujitsu_uncore_pmu_enable,
+ .pmu_disable = fujitsu_uncore_pmu_disable,
+ .event_init = fujitsu_uncore_event_init,
+ .add = fujitsu_uncore_event_add,
+ .del = fujitsu_uncore_event_del,
+ .start = fujitsu_uncore_event_start,
+ .stop = fujitsu_uncore_event_stop,
+ .read = fujitsu_uncore_event_read,
+
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT,
+ };
+
+ alloc_size = sizeof(uncorepmu->events[0]) * uncorepmu->num_counters;
+ uncorepmu->events = devm_kzalloc(dev, alloc_size, GFP_KERNEL);
+ if (!uncorepmu->events)
+ return -ENOMEM;
+
+ alloc_size = sizeof(uncorepmu->used_mask[0]) * BITS_TO_LONGS(uncorepmu->num_counters);
+ uncorepmu->used_mask = devm_kzalloc(dev, alloc_size, GFP_KERNEL);
+ if (!uncorepmu->used_mask)
+ return -ENOMEM;
+
+ uncorepmu->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &memrc);
+ if (IS_ERR(uncorepmu->regs))
+ return PTR_ERR(uncorepmu->regs);
+
+ fujitsu_uncore_init(uncorepmu);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(dev, irq, fujitsu_uncore_handle_irq,
+ IRQF_NOBALANCING | IRQF_NO_THREAD,
+ name, uncorepmu);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to request IRQ:%d\n", irq);
+
+ ret = irq_set_affinity(irq, cpumask_of(uncorepmu->cpu));
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to set irq affinity:%d\n", irq);
+
+ uncorepmu->irq = irq;
+
+ /* Add this instance to the list used by the offline callback */
+ ret = cpuhp_state_add_instance(uncore_pmu_cpuhp_state, &uncorepmu->node);
+ if (ret)
+ return dev_err_probe(dev, ret, "Error registering hotplug");
+
+ ret = perf_pmu_register(&uncorepmu->pmu, name, -1);
+ if (ret < 0) {
+ cpuhp_state_remove_instance_nocalls(uncore_pmu_cpuhp_state, &uncorepmu->node);
+ return dev_err_probe(dev, ret, "Failed to register %s PMU\n", name);
+ }
+
+ dev_dbg(dev, "Registered %s, type: %d\n", name, uncorepmu->pmu.type);
+
+ return 0;
+}
+
+static void fujitsu_uncore_pmu_remove(struct platform_device *pdev)
+{
+ struct uncore_pmu *uncorepmu = platform_get_drvdata(pdev);
+
+ writeq_relaxed(0, uncorepmu->regs + PM_CR);
+
+ perf_pmu_unregister(&uncorepmu->pmu);
+ cpuhp_state_remove_instance_nocalls(uncore_pmu_cpuhp_state, &uncorepmu->node);
+}
+
+static const struct acpi_device_id fujitsu_uncore_pmu_acpi_match[] = {
+ { "FUJI200C", FUJITSU_UNCORE_PMU_MAC },
+ { "FUJI200D", FUJITSU_UNCORE_PMU_PCI },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, fujitsu_uncore_pmu_acpi_match);
+
+static struct platform_driver fujitsu_uncore_pmu_driver = {
+ .driver = {
+ .name = "fujitsu-uncore-pmu",
+ .acpi_match_table = fujitsu_uncore_pmu_acpi_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = fujitsu_uncore_pmu_probe,
+ .remove = fujitsu_uncore_pmu_remove,
+};
+
+static int __init fujitsu_uncore_pmu_init(void)
+{
+ int ret;
+
+ /* Install a hook to update the reader CPU in case it goes offline */
+ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+ "perf/fujitsu/uncore:online",
+ fujitsu_uncore_pmu_online_cpu,
+ fujitsu_uncore_pmu_offline_cpu);
+ if (ret < 0)
+ return ret;
+
+ uncore_pmu_cpuhp_state = ret;
+
+ ret = platform_driver_register(&fujitsu_uncore_pmu_driver);
+ if (ret)
+ cpuhp_remove_multi_state(uncore_pmu_cpuhp_state);
+
+ return ret;
+}
+
+static void __exit fujitsu_uncore_pmu_exit(void)
+{
+ platform_driver_unregister(&fujitsu_uncore_pmu_driver);
+ cpuhp_remove_multi_state(uncore_pmu_cpuhp_state);
+}
+
+module_init(fujitsu_uncore_pmu_init);
+module_exit(fujitsu_uncore_pmu_exit);
+
+MODULE_AUTHOR("Koichi Okuno <fj2767dz@fujitsu.com>");
+MODULE_DESCRIPTION("Fujitsu Uncore PMU driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/perf/hisilicon/Makefile b/drivers/perf/hisilicon/Makefile
index 48dcc8381ea7..186be3d02238 100644
--- a/drivers/perf/hisilicon/Makefile
+++ b/drivers/perf/hisilicon/Makefile
@@ -1,7 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_HISI_PMU) += hisi_uncore_pmu.o hisi_uncore_l3c_pmu.o \
hisi_uncore_hha_pmu.o hisi_uncore_ddrc_pmu.o hisi_uncore_sllc_pmu.o \
- hisi_uncore_pa_pmu.o hisi_uncore_cpa_pmu.o hisi_uncore_uc_pmu.o
+ hisi_uncore_pa_pmu.o hisi_uncore_cpa_pmu.o hisi_uncore_uc_pmu.o \
+ hisi_uncore_noc_pmu.o hisi_uncore_mn_pmu.o
obj-$(CONFIG_HISI_PCIE_PMU) += hisi_pcie_pmu.o
obj-$(CONFIG_HNS3_PMU) += hns3_pmu.o
diff --git a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
index 412fc3a97963..f963e4f9e552 100644
--- a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
@@ -39,6 +39,7 @@
/* L3C has 8-counters */
#define L3C_NR_COUNTERS 0x8
+#define L3C_MAX_EXT 2
#define L3C_PERF_CTRL_EN 0x10000
#define L3C_TRACETAG_EN BIT(31)
@@ -55,59 +56,172 @@
#define L3C_V1_NR_EVENTS 0x59
#define L3C_V2_NR_EVENTS 0xFF
-HISI_PMU_EVENT_ATTR_EXTRACTOR(tt_core, config1, 7, 0);
+HISI_PMU_EVENT_ATTR_EXTRACTOR(ext, config, 17, 16);
+/*
+ * Remain the config1:0-7 for backward compatibility if some existing users
+ * hardcode the config1:0-7 directly without parsing the sysfs attribute.
+ */
+HISI_PMU_EVENT_ATTR_EXTRACTOR(tt_core_deprecated, config1, 7, 0);
HISI_PMU_EVENT_ATTR_EXTRACTOR(tt_req, config1, 10, 8);
HISI_PMU_EVENT_ATTR_EXTRACTOR(datasrc_cfg, config1, 15, 11);
HISI_PMU_EVENT_ATTR_EXTRACTOR(datasrc_skt, config1, 16, 16);
+HISI_PMU_EVENT_ATTR_EXTRACTOR(tt_core, config2, 15, 0);
-static void hisi_l3c_pmu_config_req_tracetag(struct perf_event *event)
+struct hisi_l3c_pmu {
+ struct hisi_pmu l3c_pmu;
+
+ /* MMIO and IRQ resources for extension events */
+ void __iomem *ext_base[L3C_MAX_EXT];
+ int ext_irq[L3C_MAX_EXT];
+ int ext_num;
+};
+
+#define to_hisi_l3c_pmu(_l3c_pmu) \
+ container_of(_l3c_pmu, struct hisi_l3c_pmu, l3c_pmu)
+
+/*
+ * The hardware counter idx used in counter enable/disable,
+ * interrupt enable/disable and status check, etc.
+ */
+#define L3C_HW_IDX(_cntr_idx) ((_cntr_idx) % L3C_NR_COUNTERS)
+
+/* Range of ext counters in used mask. */
+#define L3C_CNTR_EXT_L(_ext) (((_ext) + 1) * L3C_NR_COUNTERS)
+#define L3C_CNTR_EXT_H(_ext) (((_ext) + 2) * L3C_NR_COUNTERS)
+
+struct hisi_l3c_pmu_ext {
+ bool support_ext;
+};
+
+static bool support_ext(struct hisi_l3c_pmu *pmu)
+{
+ struct hisi_l3c_pmu_ext *l3c_pmu_ext = pmu->l3c_pmu.dev_info->private;
+
+ return l3c_pmu_ext->support_ext;
+}
+
+/*
+ * tt_core was extended to cover all the CPUs sharing the L3 and was moved from
+ * config1:0-7 to config2:0-*. Try it first and fallback to tt_core_deprecated
+ * if user's still using the deprecated one.
+ */
+static u32 hisi_l3c_pmu_get_tt_core(struct perf_event *event)
+{
+ u32 core = hisi_get_tt_core(event);
+
+ if (core)
+ return core;
+
+ return hisi_get_tt_core_deprecated(event);
+}
+
+static int hisi_l3c_pmu_get_event_idx(struct perf_event *event)
{
struct hisi_pmu *l3c_pmu = to_hisi_pmu(event->pmu);
+ struct hisi_l3c_pmu *hisi_l3c_pmu = to_hisi_l3c_pmu(l3c_pmu);
+ unsigned long *used_mask = l3c_pmu->pmu_events.used_mask;
+ int ext = hisi_get_ext(event);
+ int idx;
+
+ /*
+ * For an L3C PMU that supports extension events, we can monitor
+ * maximum 2 * num_counters to 3 * num_counters events, depending on
+ * the number of ext regions supported by hardware. Thus use bit
+ * [0, num_counters - 1] for normal events and bit
+ * [ext * num_counters, (ext + 1) * num_counters - 1] for extension
+ * events. The idx allocation will keep unchanged for normal events and
+ * we can also use the idx to distinguish whether it's an extension
+ * event or not.
+ *
+ * Since normal events and extension events locates on the different
+ * address space, save the base address to the event->hw.event_base.
+ */
+ if (ext && !support_ext(hisi_l3c_pmu))
+ return -EOPNOTSUPP;
+
+ if (ext)
+ event->hw.event_base = (unsigned long)hisi_l3c_pmu->ext_base[ext - 1];
+ else
+ event->hw.event_base = (unsigned long)l3c_pmu->base;
+
+ ext -= 1;
+ idx = find_next_zero_bit(used_mask, L3C_CNTR_EXT_H(ext), L3C_CNTR_EXT_L(ext));
+
+ if (idx >= L3C_CNTR_EXT_H(ext))
+ return -EAGAIN;
+
+ set_bit(idx, used_mask);
+
+ return idx;
+}
+
+static u32 hisi_l3c_pmu_event_readl(struct hw_perf_event *hwc, u32 reg)
+{
+ return readl((void __iomem *)hwc->event_base + reg);
+}
+
+static void hisi_l3c_pmu_event_writel(struct hw_perf_event *hwc, u32 reg, u32 val)
+{
+ writel(val, (void __iomem *)hwc->event_base + reg);
+}
+
+static u64 hisi_l3c_pmu_event_readq(struct hw_perf_event *hwc, u32 reg)
+{
+ return readq((void __iomem *)hwc->event_base + reg);
+}
+
+static void hisi_l3c_pmu_event_writeq(struct hw_perf_event *hwc, u32 reg, u64 val)
+{
+ writeq(val, (void __iomem *)hwc->event_base + reg);
+}
+
+static void hisi_l3c_pmu_config_req_tracetag(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
u32 tt_req = hisi_get_tt_req(event);
if (tt_req) {
u32 val;
/* Set request-type for tracetag */
- val = readl(l3c_pmu->base + L3C_TRACETAG_CTRL);
+ val = hisi_l3c_pmu_event_readl(hwc, L3C_TRACETAG_CTRL);
val |= tt_req << L3C_TRACETAG_REQ_SHIFT;
val |= L3C_TRACETAG_REQ_EN;
- writel(val, l3c_pmu->base + L3C_TRACETAG_CTRL);
+ hisi_l3c_pmu_event_writel(hwc, L3C_TRACETAG_CTRL, val);
/* Enable request-tracetag statistics */
- val = readl(l3c_pmu->base + L3C_PERF_CTRL);
+ val = hisi_l3c_pmu_event_readl(hwc, L3C_PERF_CTRL);
val |= L3C_TRACETAG_EN;
- writel(val, l3c_pmu->base + L3C_PERF_CTRL);
+ hisi_l3c_pmu_event_writel(hwc, L3C_PERF_CTRL, val);
}
}
static void hisi_l3c_pmu_clear_req_tracetag(struct perf_event *event)
{
- struct hisi_pmu *l3c_pmu = to_hisi_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
u32 tt_req = hisi_get_tt_req(event);
if (tt_req) {
u32 val;
/* Clear request-type */
- val = readl(l3c_pmu->base + L3C_TRACETAG_CTRL);
+ val = hisi_l3c_pmu_event_readl(hwc, L3C_TRACETAG_CTRL);
val &= ~(tt_req << L3C_TRACETAG_REQ_SHIFT);
val &= ~L3C_TRACETAG_REQ_EN;
- writel(val, l3c_pmu->base + L3C_TRACETAG_CTRL);
+ hisi_l3c_pmu_event_writel(hwc, L3C_TRACETAG_CTRL, val);
/* Disable request-tracetag statistics */
- val = readl(l3c_pmu->base + L3C_PERF_CTRL);
+ val = hisi_l3c_pmu_event_readl(hwc, L3C_PERF_CTRL);
val &= ~L3C_TRACETAG_EN;
- writel(val, l3c_pmu->base + L3C_PERF_CTRL);
+ hisi_l3c_pmu_event_writel(hwc, L3C_PERF_CTRL, val);
}
}
static void hisi_l3c_pmu_write_ds(struct perf_event *event, u32 ds_cfg)
{
- struct hisi_pmu *l3c_pmu = to_hisi_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
u32 reg, reg_idx, shift, val;
- int idx = hwc->idx;
+ int idx = L3C_HW_IDX(hwc->idx);
/*
* Select the appropriate datasource register(L3C_DATSRC_TYPE0/1).
@@ -120,15 +234,15 @@ static void hisi_l3c_pmu_write_ds(struct perf_event *event, u32 ds_cfg)
reg_idx = idx % 4;
shift = 8 * reg_idx;
- val = readl(l3c_pmu->base + reg);
+ val = hisi_l3c_pmu_event_readl(hwc, reg);
val &= ~(L3C_DATSRC_MASK << shift);
val |= ds_cfg << shift;
- writel(val, l3c_pmu->base + reg);
+ hisi_l3c_pmu_event_writel(hwc, reg, val);
}
static void hisi_l3c_pmu_config_ds(struct perf_event *event)
{
- struct hisi_pmu *l3c_pmu = to_hisi_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
u32 ds_cfg = hisi_get_datasrc_cfg(event);
u32 ds_skt = hisi_get_datasrc_skt(event);
@@ -138,15 +252,15 @@ static void hisi_l3c_pmu_config_ds(struct perf_event *event)
if (ds_skt) {
u32 val;
- val = readl(l3c_pmu->base + L3C_DATSRC_CTRL);
+ val = hisi_l3c_pmu_event_readl(hwc, L3C_DATSRC_CTRL);
val |= L3C_DATSRC_SKT_EN;
- writel(val, l3c_pmu->base + L3C_DATSRC_CTRL);
+ hisi_l3c_pmu_event_writel(hwc, L3C_DATSRC_CTRL, val);
}
}
static void hisi_l3c_pmu_clear_ds(struct perf_event *event)
{
- struct hisi_pmu *l3c_pmu = to_hisi_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
u32 ds_cfg = hisi_get_datasrc_cfg(event);
u32 ds_skt = hisi_get_datasrc_skt(event);
@@ -156,57 +270,63 @@ static void hisi_l3c_pmu_clear_ds(struct perf_event *event)
if (ds_skt) {
u32 val;
- val = readl(l3c_pmu->base + L3C_DATSRC_CTRL);
+ val = hisi_l3c_pmu_event_readl(hwc, L3C_DATSRC_CTRL);
val &= ~L3C_DATSRC_SKT_EN;
- writel(val, l3c_pmu->base + L3C_DATSRC_CTRL);
+ hisi_l3c_pmu_event_writel(hwc, L3C_DATSRC_CTRL, val);
}
}
static void hisi_l3c_pmu_config_core_tracetag(struct perf_event *event)
{
- struct hisi_pmu *l3c_pmu = to_hisi_pmu(event->pmu);
- u32 core = hisi_get_tt_core(event);
+ struct hw_perf_event *hwc = &event->hw;
+ u32 core = hisi_l3c_pmu_get_tt_core(event);
if (core) {
u32 val;
/* Config and enable core information */
- writel(core, l3c_pmu->base + L3C_CORE_CTRL);
- val = readl(l3c_pmu->base + L3C_PERF_CTRL);
+ hisi_l3c_pmu_event_writel(hwc, L3C_CORE_CTRL, core);
+ val = hisi_l3c_pmu_event_readl(hwc, L3C_PERF_CTRL);
val |= L3C_CORE_EN;
- writel(val, l3c_pmu->base + L3C_PERF_CTRL);
+ hisi_l3c_pmu_event_writel(hwc, L3C_PERF_CTRL, val);
/* Enable core-tracetag statistics */
- val = readl(l3c_pmu->base + L3C_TRACETAG_CTRL);
+ val = hisi_l3c_pmu_event_readl(hwc, L3C_TRACETAG_CTRL);
val |= L3C_TRACETAG_CORE_EN;
- writel(val, l3c_pmu->base + L3C_TRACETAG_CTRL);
+ hisi_l3c_pmu_event_writel(hwc, L3C_TRACETAG_CTRL, val);
}
}
static void hisi_l3c_pmu_clear_core_tracetag(struct perf_event *event)
{
- struct hisi_pmu *l3c_pmu = to_hisi_pmu(event->pmu);
- u32 core = hisi_get_tt_core(event);
+ struct hw_perf_event *hwc = &event->hw;
+ u32 core = hisi_l3c_pmu_get_tt_core(event);
if (core) {
u32 val;
/* Clear core information */
- writel(L3C_COER_NONE, l3c_pmu->base + L3C_CORE_CTRL);
- val = readl(l3c_pmu->base + L3C_PERF_CTRL);
+ hisi_l3c_pmu_event_writel(hwc, L3C_CORE_CTRL, L3C_COER_NONE);
+ val = hisi_l3c_pmu_event_readl(hwc, L3C_PERF_CTRL);
val &= ~L3C_CORE_EN;
- writel(val, l3c_pmu->base + L3C_PERF_CTRL);
+ hisi_l3c_pmu_event_writel(hwc, L3C_PERF_CTRL, val);
/* Disable core-tracetag statistics */
- val = readl(l3c_pmu->base + L3C_TRACETAG_CTRL);
+ val = hisi_l3c_pmu_event_readl(hwc, L3C_TRACETAG_CTRL);
val &= ~L3C_TRACETAG_CORE_EN;
- writel(val, l3c_pmu->base + L3C_TRACETAG_CTRL);
+ hisi_l3c_pmu_event_writel(hwc, L3C_TRACETAG_CTRL, val);
}
}
+static bool hisi_l3c_pmu_have_filter(struct perf_event *event)
+{
+ return hisi_get_tt_req(event) || hisi_l3c_pmu_get_tt_core(event) ||
+ hisi_get_datasrc_cfg(event) || hisi_get_datasrc_skt(event);
+}
+
static void hisi_l3c_pmu_enable_filter(struct perf_event *event)
{
- if (event->attr.config1 != 0x0) {
+ if (hisi_l3c_pmu_have_filter(event)) {
hisi_l3c_pmu_config_req_tracetag(event);
hisi_l3c_pmu_config_core_tracetag(event);
hisi_l3c_pmu_config_ds(event);
@@ -215,38 +335,56 @@ static void hisi_l3c_pmu_enable_filter(struct perf_event *event)
static void hisi_l3c_pmu_disable_filter(struct perf_event *event)
{
- if (event->attr.config1 != 0x0) {
+ if (hisi_l3c_pmu_have_filter(event)) {
hisi_l3c_pmu_clear_ds(event);
hisi_l3c_pmu_clear_core_tracetag(event);
hisi_l3c_pmu_clear_req_tracetag(event);
}
}
+static int hisi_l3c_pmu_check_filter(struct perf_event *event)
+{
+ struct hisi_pmu *l3c_pmu = to_hisi_pmu(event->pmu);
+ struct hisi_l3c_pmu *hisi_l3c_pmu = to_hisi_l3c_pmu(l3c_pmu);
+ int ext = hisi_get_ext(event);
+
+ if (ext < 0 || ext > hisi_l3c_pmu->ext_num)
+ return -EINVAL;
+
+ if (hisi_get_tt_core(event) && hisi_get_tt_core_deprecated(event))
+ return -EINVAL;
+
+ return 0;
+}
+
/*
* Select the counter register offset using the counter index
*/
static u32 hisi_l3c_pmu_get_counter_offset(int cntr_idx)
{
- return (L3C_CNTR0_LOWER + (cntr_idx * 8));
+ return L3C_CNTR0_LOWER + L3C_HW_IDX(cntr_idx) * 8;
}
static u64 hisi_l3c_pmu_read_counter(struct hisi_pmu *l3c_pmu,
struct hw_perf_event *hwc)
{
- return readq(l3c_pmu->base + hisi_l3c_pmu_get_counter_offset(hwc->idx));
+ return hisi_l3c_pmu_event_readq(hwc, hisi_l3c_pmu_get_counter_offset(hwc->idx));
}
static void hisi_l3c_pmu_write_counter(struct hisi_pmu *l3c_pmu,
struct hw_perf_event *hwc, u64 val)
{
- writeq(val, l3c_pmu->base + hisi_l3c_pmu_get_counter_offset(hwc->idx));
+ hisi_l3c_pmu_event_writeq(hwc, hisi_l3c_pmu_get_counter_offset(hwc->idx), val);
}
static void hisi_l3c_pmu_write_evtype(struct hisi_pmu *l3c_pmu, int idx,
u32 type)
{
+ struct hw_perf_event *hwc = &l3c_pmu->pmu_events.hw_events[idx]->hw;
u32 reg, reg_idx, shift, val;
+ idx = L3C_HW_IDX(idx);
+
/*
* Select the appropriate event select register(L3C_EVENT_TYPE0/1).
* There are 2 event select registers for the 8 hardware counters.
@@ -259,36 +397,72 @@ static void hisi_l3c_pmu_write_evtype(struct hisi_pmu *l3c_pmu, int idx,
shift = 8 * reg_idx;
/* Write event code to L3C_EVENT_TYPEx Register */
- val = readl(l3c_pmu->base + reg);
+ val = hisi_l3c_pmu_event_readl(hwc, reg);
val &= ~(L3C_EVTYPE_NONE << shift);
- val |= (type << shift);
- writel(val, l3c_pmu->base + reg);
+ val |= type << shift;
+ hisi_l3c_pmu_event_writel(hwc, reg, val);
}
static void hisi_l3c_pmu_start_counters(struct hisi_pmu *l3c_pmu)
{
+ struct hisi_l3c_pmu *hisi_l3c_pmu = to_hisi_l3c_pmu(l3c_pmu);
+ unsigned long *used_mask = l3c_pmu->pmu_events.used_mask;
+ unsigned long used_cntr = find_first_bit(used_mask, l3c_pmu->num_counters);
u32 val;
+ int i;
/*
- * Set perf_enable bit in L3C_PERF_CTRL register to start counting
- * for all enabled counters.
+ * Check if any counter belongs to the normal range (instead of ext
+ * range). If so, enable it.
*/
- val = readl(l3c_pmu->base + L3C_PERF_CTRL);
- val |= L3C_PERF_CTRL_EN;
- writel(val, l3c_pmu->base + L3C_PERF_CTRL);
+ if (used_cntr < L3C_NR_COUNTERS) {
+ val = readl(l3c_pmu->base + L3C_PERF_CTRL);
+ val |= L3C_PERF_CTRL_EN;
+ writel(val, l3c_pmu->base + L3C_PERF_CTRL);
+ }
+
+ /* If not, do enable it on ext ranges. */
+ for (i = 0; i < hisi_l3c_pmu->ext_num; i++) {
+ /* Find used counter in this ext range, skip the range if not. */
+ used_cntr = find_next_bit(used_mask, L3C_CNTR_EXT_H(i), L3C_CNTR_EXT_L(i));
+ if (used_cntr >= L3C_CNTR_EXT_H(i))
+ continue;
+
+ val = readl(hisi_l3c_pmu->ext_base[i] + L3C_PERF_CTRL);
+ val |= L3C_PERF_CTRL_EN;
+ writel(val, hisi_l3c_pmu->ext_base[i] + L3C_PERF_CTRL);
+ }
}
static void hisi_l3c_pmu_stop_counters(struct hisi_pmu *l3c_pmu)
{
+ struct hisi_l3c_pmu *hisi_l3c_pmu = to_hisi_l3c_pmu(l3c_pmu);
+ unsigned long *used_mask = l3c_pmu->pmu_events.used_mask;
+ unsigned long used_cntr = find_first_bit(used_mask, l3c_pmu->num_counters);
u32 val;
+ int i;
/*
- * Clear perf_enable bit in L3C_PERF_CTRL register to stop counting
- * for all enabled counters.
+ * Check if any counter belongs to the normal range (instead of ext
+ * range). If so, stop it.
*/
- val = readl(l3c_pmu->base + L3C_PERF_CTRL);
- val &= ~(L3C_PERF_CTRL_EN);
- writel(val, l3c_pmu->base + L3C_PERF_CTRL);
+ if (used_cntr < L3C_NR_COUNTERS) {
+ val = readl(l3c_pmu->base + L3C_PERF_CTRL);
+ val &= ~L3C_PERF_CTRL_EN;
+ writel(val, l3c_pmu->base + L3C_PERF_CTRL);
+ }
+
+ /* If not, do stop it on ext ranges. */
+ for (i = 0; i < hisi_l3c_pmu->ext_num; i++) {
+ /* Find used counter in this ext range, skip the range if not. */
+ used_cntr = find_next_bit(used_mask, L3C_CNTR_EXT_H(i), L3C_CNTR_EXT_L(i));
+ if (used_cntr >= L3C_CNTR_EXT_H(i))
+ continue;
+
+ val = readl(hisi_l3c_pmu->ext_base[i] + L3C_PERF_CTRL);
+ val &= ~L3C_PERF_CTRL_EN;
+ writel(val, hisi_l3c_pmu->ext_base[i] + L3C_PERF_CTRL);
+ }
}
static void hisi_l3c_pmu_enable_counter(struct hisi_pmu *l3c_pmu,
@@ -297,9 +471,9 @@ static void hisi_l3c_pmu_enable_counter(struct hisi_pmu *l3c_pmu,
u32 val;
/* Enable counter index in L3C_EVENT_CTRL register */
- val = readl(l3c_pmu->base + L3C_EVENT_CTRL);
- val |= (1 << hwc->idx);
- writel(val, l3c_pmu->base + L3C_EVENT_CTRL);
+ val = hisi_l3c_pmu_event_readl(hwc, L3C_EVENT_CTRL);
+ val |= 1 << L3C_HW_IDX(hwc->idx);
+ hisi_l3c_pmu_event_writel(hwc, L3C_EVENT_CTRL, val);
}
static void hisi_l3c_pmu_disable_counter(struct hisi_pmu *l3c_pmu,
@@ -308,9 +482,9 @@ static void hisi_l3c_pmu_disable_counter(struct hisi_pmu *l3c_pmu,
u32 val;
/* Clear counter index in L3C_EVENT_CTRL register */
- val = readl(l3c_pmu->base + L3C_EVENT_CTRL);
- val &= ~(1 << hwc->idx);
- writel(val, l3c_pmu->base + L3C_EVENT_CTRL);
+ val = hisi_l3c_pmu_event_readl(hwc, L3C_EVENT_CTRL);
+ val &= ~(1 << L3C_HW_IDX(hwc->idx));
+ hisi_l3c_pmu_event_writel(hwc, L3C_EVENT_CTRL, val);
}
static void hisi_l3c_pmu_enable_counter_int(struct hisi_pmu *l3c_pmu,
@@ -318,10 +492,10 @@ static void hisi_l3c_pmu_enable_counter_int(struct hisi_pmu *l3c_pmu,
{
u32 val;
- val = readl(l3c_pmu->base + L3C_INT_MASK);
+ val = hisi_l3c_pmu_event_readl(hwc, L3C_INT_MASK);
/* Write 0 to enable interrupt */
- val &= ~(1 << hwc->idx);
- writel(val, l3c_pmu->base + L3C_INT_MASK);
+ val &= ~(1 << L3C_HW_IDX(hwc->idx));
+ hisi_l3c_pmu_event_writel(hwc, L3C_INT_MASK, val);
}
static void hisi_l3c_pmu_disable_counter_int(struct hisi_pmu *l3c_pmu,
@@ -329,28 +503,37 @@ static void hisi_l3c_pmu_disable_counter_int(struct hisi_pmu *l3c_pmu,
{
u32 val;
- val = readl(l3c_pmu->base + L3C_INT_MASK);
+ val = hisi_l3c_pmu_event_readl(hwc, L3C_INT_MASK);
/* Write 1 to mask interrupt */
- val |= (1 << hwc->idx);
- writel(val, l3c_pmu->base + L3C_INT_MASK);
+ val |= 1 << L3C_HW_IDX(hwc->idx);
+ hisi_l3c_pmu_event_writel(hwc, L3C_INT_MASK, val);
}
static u32 hisi_l3c_pmu_get_int_status(struct hisi_pmu *l3c_pmu)
{
- return readl(l3c_pmu->base + L3C_INT_STATUS);
+ struct hisi_l3c_pmu *hisi_l3c_pmu = to_hisi_l3c_pmu(l3c_pmu);
+ u32 ext_int, status, status_ext = 0;
+ int i;
+
+ status = readl(l3c_pmu->base + L3C_INT_STATUS);
+
+ if (!support_ext(hisi_l3c_pmu))
+ return status;
+
+ for (i = 0; i < hisi_l3c_pmu->ext_num; i++) {
+ ext_int = readl(hisi_l3c_pmu->ext_base[i] + L3C_INT_STATUS);
+ status_ext |= ext_int << (L3C_NR_COUNTERS * i);
+ }
+
+ return status | (status_ext << L3C_NR_COUNTERS);
}
static void hisi_l3c_pmu_clear_int_status(struct hisi_pmu *l3c_pmu, int idx)
{
- writel(1 << idx, l3c_pmu->base + L3C_INT_CLEAR);
-}
+ struct hw_perf_event *hwc = &l3c_pmu->pmu_events.hw_events[idx]->hw;
-static const struct acpi_device_id hisi_l3c_pmu_acpi_match[] = {
- { "HISI0213", },
- { "HISI0214", },
- {}
-};
-MODULE_DEVICE_TABLE(acpi, hisi_l3c_pmu_acpi_match);
+ hisi_l3c_pmu_event_writel(hwc, L3C_INT_CLEAR, 1 << L3C_HW_IDX(idx));
+}
static int hisi_l3c_pmu_init_data(struct platform_device *pdev,
struct hisi_pmu *l3c_pmu)
@@ -371,6 +554,10 @@ static int hisi_l3c_pmu_init_data(struct platform_device *pdev,
return -EINVAL;
}
+ l3c_pmu->dev_info = device_get_match_data(&pdev->dev);
+ if (!l3c_pmu->dev_info)
+ return -ENODEV;
+
l3c_pmu->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(l3c_pmu->base)) {
dev_err(&pdev->dev, "ioremap failed for l3c_pmu resource\n");
@@ -382,6 +569,50 @@ static int hisi_l3c_pmu_init_data(struct platform_device *pdev,
return 0;
}
+static int hisi_l3c_pmu_init_ext(struct hisi_pmu *l3c_pmu, struct platform_device *pdev)
+{
+ struct hisi_l3c_pmu *hisi_l3c_pmu = to_hisi_l3c_pmu(l3c_pmu);
+ int ret, irq, ext_num, i;
+ char *irqname;
+
+ /* HiSilicon L3C PMU supporting ext should have more than 1 irq resources. */
+ ext_num = platform_irq_count(pdev);
+ if (ext_num < L3C_MAX_EXT)
+ return -ENODEV;
+
+ /*
+ * The number of ext supported equals the number of irq - 1, since one
+ * of the irqs belongs to the normal part of PMU.
+ */
+ hisi_l3c_pmu->ext_num = ext_num - 1;
+
+ for (i = 0; i < hisi_l3c_pmu->ext_num; i++) {
+ hisi_l3c_pmu->ext_base[i] = devm_platform_ioremap_resource(pdev, i + 1);
+ if (IS_ERR(hisi_l3c_pmu->ext_base[i]))
+ return PTR_ERR(hisi_l3c_pmu->ext_base[i]);
+
+ irq = platform_get_irq(pdev, i + 1);
+ if (irq < 0)
+ return irq;
+
+ irqname = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s ext%d",
+ dev_name(&pdev->dev), i + 1);
+ if (!irqname)
+ return -ENOMEM;
+
+ ret = devm_request_irq(&pdev->dev, irq, hisi_uncore_pmu_isr,
+ IRQF_NOBALANCING | IRQF_NO_THREAD,
+ irqname, l3c_pmu);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret,
+ "Fail to request EXT IRQ: %d.\n", irq);
+
+ hisi_l3c_pmu->ext_irq[i] = irq;
+ }
+
+ return 0;
+}
+
static struct attribute *hisi_l3c_pmu_v1_format_attr[] = {
HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
NULL,
@@ -394,10 +625,11 @@ static const struct attribute_group hisi_l3c_pmu_v1_format_group = {
static struct attribute *hisi_l3c_pmu_v2_format_attr[] = {
HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
- HISI_PMU_FORMAT_ATTR(tt_core, "config1:0-7"),
+ HISI_PMU_FORMAT_ATTR(tt_core_deprecated, "config1:0-7"),
HISI_PMU_FORMAT_ATTR(tt_req, "config1:8-10"),
HISI_PMU_FORMAT_ATTR(datasrc_cfg, "config1:11-15"),
HISI_PMU_FORMAT_ATTR(datasrc_skt, "config1:16"),
+ HISI_PMU_FORMAT_ATTR(tt_core, "config2:0-15"),
NULL
};
@@ -406,6 +638,20 @@ static const struct attribute_group hisi_l3c_pmu_v2_format_group = {
.attrs = hisi_l3c_pmu_v2_format_attr,
};
+static struct attribute *hisi_l3c_pmu_v3_format_attr[] = {
+ HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
+ HISI_PMU_FORMAT_ATTR(ext, "config:16-17"),
+ HISI_PMU_FORMAT_ATTR(tt_core_deprecated, "config1:0-7"),
+ HISI_PMU_FORMAT_ATTR(tt_req, "config1:8-10"),
+ HISI_PMU_FORMAT_ATTR(tt_core, "config2:0-15"),
+ NULL
+};
+
+static const struct attribute_group hisi_l3c_pmu_v3_format_group = {
+ .name = "format",
+ .attrs = hisi_l3c_pmu_v3_format_attr,
+};
+
static struct attribute *hisi_l3c_pmu_v1_events_attr[] = {
HISI_PMU_EVENT_ATTR(rd_cpipe, 0x00),
HISI_PMU_EVENT_ATTR(wr_cpipe, 0x01),
@@ -441,6 +687,26 @@ static const struct attribute_group hisi_l3c_pmu_v2_events_group = {
.attrs = hisi_l3c_pmu_v2_events_attr,
};
+static struct attribute *hisi_l3c_pmu_v3_events_attr[] = {
+ HISI_PMU_EVENT_ATTR(rd_spipe, 0x18),
+ HISI_PMU_EVENT_ATTR(rd_hit_spipe, 0x19),
+ HISI_PMU_EVENT_ATTR(wr_spipe, 0x1a),
+ HISI_PMU_EVENT_ATTR(wr_hit_spipe, 0x1b),
+ HISI_PMU_EVENT_ATTR(io_rd_spipe, 0x1c),
+ HISI_PMU_EVENT_ATTR(io_rd_hit_spipe, 0x1d),
+ HISI_PMU_EVENT_ATTR(io_wr_spipe, 0x1e),
+ HISI_PMU_EVENT_ATTR(io_wr_hit_spipe, 0x1f),
+ HISI_PMU_EVENT_ATTR(cycles, 0x7f),
+ HISI_PMU_EVENT_ATTR(l3c_ref, 0xbc),
+ HISI_PMU_EVENT_ATTR(l3c2ring, 0xbd),
+ NULL
+};
+
+static const struct attribute_group hisi_l3c_pmu_v3_events_group = {
+ .name = "events",
+ .attrs = hisi_l3c_pmu_v3_events_attr,
+};
+
static const struct attribute_group *hisi_l3c_pmu_v1_attr_groups[] = {
&hisi_l3c_pmu_v1_format_group,
&hisi_l3c_pmu_v1_events_group,
@@ -457,9 +723,46 @@ static const struct attribute_group *hisi_l3c_pmu_v2_attr_groups[] = {
NULL
};
+static const struct attribute_group *hisi_l3c_pmu_v3_attr_groups[] = {
+ &hisi_l3c_pmu_v3_format_group,
+ &hisi_l3c_pmu_v3_events_group,
+ &hisi_pmu_cpumask_attr_group,
+ &hisi_pmu_identifier_group,
+ NULL
+};
+
+static struct hisi_l3c_pmu_ext hisi_l3c_pmu_support_ext = {
+ .support_ext = true,
+};
+
+static struct hisi_l3c_pmu_ext hisi_l3c_pmu_not_support_ext = {
+ .support_ext = false,
+};
+
+static const struct hisi_pmu_dev_info hisi_l3c_pmu_v1 = {
+ .attr_groups = hisi_l3c_pmu_v1_attr_groups,
+ .counter_bits = 48,
+ .check_event = L3C_V1_NR_EVENTS,
+ .private = &hisi_l3c_pmu_not_support_ext,
+};
+
+static const struct hisi_pmu_dev_info hisi_l3c_pmu_v2 = {
+ .attr_groups = hisi_l3c_pmu_v2_attr_groups,
+ .counter_bits = 64,
+ .check_event = L3C_V2_NR_EVENTS,
+ .private = &hisi_l3c_pmu_not_support_ext,
+};
+
+static const struct hisi_pmu_dev_info hisi_l3c_pmu_v3 = {
+ .attr_groups = hisi_l3c_pmu_v3_attr_groups,
+ .counter_bits = 64,
+ .check_event = L3C_V2_NR_EVENTS,
+ .private = &hisi_l3c_pmu_support_ext,
+};
+
static const struct hisi_uncore_ops hisi_uncore_l3c_ops = {
.write_evtype = hisi_l3c_pmu_write_evtype,
- .get_event_idx = hisi_uncore_pmu_get_event_idx,
+ .get_event_idx = hisi_l3c_pmu_get_event_idx,
.start_counters = hisi_l3c_pmu_start_counters,
.stop_counters = hisi_l3c_pmu_stop_counters,
.enable_counter = hisi_l3c_pmu_enable_counter,
@@ -472,11 +775,14 @@ static const struct hisi_uncore_ops hisi_uncore_l3c_ops = {
.clear_int_status = hisi_l3c_pmu_clear_int_status,
.enable_filter = hisi_l3c_pmu_enable_filter,
.disable_filter = hisi_l3c_pmu_disable_filter,
+ .check_filter = hisi_l3c_pmu_check_filter,
};
static int hisi_l3c_pmu_dev_probe(struct platform_device *pdev,
struct hisi_pmu *l3c_pmu)
{
+ struct hisi_l3c_pmu *hisi_l3c_pmu = to_hisi_l3c_pmu(l3c_pmu);
+ struct hisi_l3c_pmu_ext *l3c_pmu_dev_ext;
int ret;
ret = hisi_l3c_pmu_init_data(pdev, l3c_pmu);
@@ -487,42 +793,55 @@ static int hisi_l3c_pmu_dev_probe(struct platform_device *pdev,
if (ret)
return ret;
- if (l3c_pmu->identifier >= HISI_PMU_V2) {
- l3c_pmu->counter_bits = 64;
- l3c_pmu->check_event = L3C_V2_NR_EVENTS;
- l3c_pmu->pmu_events.attr_groups = hisi_l3c_pmu_v2_attr_groups;
- } else {
- l3c_pmu->counter_bits = 48;
- l3c_pmu->check_event = L3C_V1_NR_EVENTS;
- l3c_pmu->pmu_events.attr_groups = hisi_l3c_pmu_v1_attr_groups;
- }
-
+ l3c_pmu->pmu_events.attr_groups = l3c_pmu->dev_info->attr_groups;
+ l3c_pmu->counter_bits = l3c_pmu->dev_info->counter_bits;
+ l3c_pmu->check_event = l3c_pmu->dev_info->check_event;
l3c_pmu->num_counters = L3C_NR_COUNTERS;
l3c_pmu->ops = &hisi_uncore_l3c_ops;
l3c_pmu->dev = &pdev->dev;
l3c_pmu->on_cpu = -1;
+ l3c_pmu_dev_ext = l3c_pmu->dev_info->private;
+ if (l3c_pmu_dev_ext->support_ext) {
+ ret = hisi_l3c_pmu_init_ext(l3c_pmu, pdev);
+ if (ret)
+ return ret;
+ /*
+ * The extension events have their own counters with the
+ * same number of the normal events counters. So we can
+ * have at maximum num_counters * ext events monitored.
+ */
+ l3c_pmu->num_counters += hisi_l3c_pmu->ext_num * L3C_NR_COUNTERS;
+ }
+
return 0;
}
static int hisi_l3c_pmu_probe(struct platform_device *pdev)
{
+ struct hisi_l3c_pmu *hisi_l3c_pmu;
struct hisi_pmu *l3c_pmu;
char *name;
int ret;
- l3c_pmu = devm_kzalloc(&pdev->dev, sizeof(*l3c_pmu), GFP_KERNEL);
- if (!l3c_pmu)
+ hisi_l3c_pmu = devm_kzalloc(&pdev->dev, sizeof(*hisi_l3c_pmu), GFP_KERNEL);
+ if (!hisi_l3c_pmu)
return -ENOMEM;
+ l3c_pmu = &hisi_l3c_pmu->l3c_pmu;
platform_set_drvdata(pdev, l3c_pmu);
ret = hisi_l3c_pmu_dev_probe(pdev, l3c_pmu);
if (ret)
return ret;
- name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%d_l3c%d",
- l3c_pmu->topo.sccl_id, l3c_pmu->topo.ccl_id);
+ if (l3c_pmu->topo.sub_id >= 0)
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%d_l3c%d_%d",
+ l3c_pmu->topo.sccl_id, l3c_pmu->topo.ccl_id,
+ l3c_pmu->topo.sub_id);
+ else
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%d_l3c%d",
+ l3c_pmu->topo.sccl_id, l3c_pmu->topo.ccl_id);
if (!name)
return -ENOMEM;
@@ -554,6 +873,14 @@ static void hisi_l3c_pmu_remove(struct platform_device *pdev)
&l3c_pmu->node);
}
+static const struct acpi_device_id hisi_l3c_pmu_acpi_match[] = {
+ { "HISI0213", (kernel_ulong_t)&hisi_l3c_pmu_v1 },
+ { "HISI0214", (kernel_ulong_t)&hisi_l3c_pmu_v2 },
+ { "HISI0215", (kernel_ulong_t)&hisi_l3c_pmu_v3 },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, hisi_l3c_pmu_acpi_match);
+
static struct platform_driver hisi_l3c_pmu_driver = {
.driver = {
.name = "hisi_l3c_pmu",
@@ -564,14 +891,60 @@ static struct platform_driver hisi_l3c_pmu_driver = {
.remove = hisi_l3c_pmu_remove,
};
+static int hisi_l3c_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
+{
+ struct hisi_pmu *l3c_pmu = hlist_entry_safe(node, struct hisi_pmu, node);
+ struct hisi_l3c_pmu *hisi_l3c_pmu = to_hisi_l3c_pmu(l3c_pmu);
+ int ret, i;
+
+ ret = hisi_uncore_pmu_online_cpu(cpu, node);
+ if (ret)
+ return ret;
+
+ /* Avoid L3C pmu not supporting ext from ext irq migrating. */
+ if (!support_ext(hisi_l3c_pmu))
+ return 0;
+
+ for (i = 0; i < hisi_l3c_pmu->ext_num; i++)
+ WARN_ON(irq_set_affinity(hisi_l3c_pmu->ext_irq[i],
+ cpumask_of(l3c_pmu->on_cpu)));
+
+ return 0;
+}
+
+static int hisi_l3c_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
+{
+ struct hisi_pmu *l3c_pmu = hlist_entry_safe(node, struct hisi_pmu, node);
+ struct hisi_l3c_pmu *hisi_l3c_pmu = to_hisi_l3c_pmu(l3c_pmu);
+ int ret, i;
+
+ ret = hisi_uncore_pmu_offline_cpu(cpu, node);
+ if (ret)
+ return ret;
+
+ /* If failed to find any available CPU, skip irq migration. */
+ if (l3c_pmu->on_cpu < 0)
+ return 0;
+
+ /* Avoid L3C pmu not supporting ext from ext irq migrating. */
+ if (!support_ext(hisi_l3c_pmu))
+ return 0;
+
+ for (i = 0; i < hisi_l3c_pmu->ext_num; i++)
+ WARN_ON(irq_set_affinity(hisi_l3c_pmu->ext_irq[i],
+ cpumask_of(l3c_pmu->on_cpu)));
+
+ return 0;
+}
+
static int __init hisi_l3c_pmu_module_init(void)
{
int ret;
ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE,
"AP_PERF_ARM_HISI_L3_ONLINE",
- hisi_uncore_pmu_online_cpu,
- hisi_uncore_pmu_offline_cpu);
+ hisi_l3c_pmu_online_cpu,
+ hisi_l3c_pmu_offline_cpu);
if (ret) {
pr_err("L3C PMU: Error setup hotplug, ret = %d\n", ret);
return ret;
diff --git a/drivers/perf/hisilicon/hisi_uncore_mn_pmu.c b/drivers/perf/hisilicon/hisi_uncore_mn_pmu.c
new file mode 100644
index 000000000000..4df4eebe243e
--- /dev/null
+++ b/drivers/perf/hisilicon/hisi_uncore_mn_pmu.c
@@ -0,0 +1,411 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * HiSilicon SoC MN uncore Hardware event counters support
+ *
+ * Copyright (c) 2025 HiSilicon Technologies Co., Ltd.
+ */
+#include <linux/cpuhotplug.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/irq.h>
+#include <linux/list.h>
+#include <linux/mod_devicetable.h>
+#include <linux/property.h>
+
+#include "hisi_uncore_pmu.h"
+
+/* Dynamic CPU hotplug state used by MN PMU */
+static enum cpuhp_state hisi_mn_pmu_online;
+
+/* MN register definition */
+#define HISI_MN_DYNAMIC_CTRL_REG 0x400
+#define HISI_MN_DYNAMIC_CTRL_EN BIT(0)
+#define HISI_MN_PERF_CTRL_REG 0x408
+#define HISI_MN_PERF_CTRL_EN BIT(6)
+#define HISI_MN_INT_MASK_REG 0x800
+#define HISI_MN_INT_STATUS_REG 0x808
+#define HISI_MN_INT_CLEAR_REG 0x80C
+#define HISI_MN_EVENT_CTRL_REG 0x1C00
+#define HISI_MN_VERSION_REG 0x1C04
+#define HISI_MN_EVTYPE0_REG 0x1d00
+#define HISI_MN_EVTYPE_MASK GENMASK(7, 0)
+#define HISI_MN_CNTR0_REG 0x1e00
+#define HISI_MN_EVTYPE_REGn(evtype0, n) ((evtype0) + (n) * 4)
+#define HISI_MN_CNTR_REGn(cntr0, n) ((cntr0) + (n) * 8)
+
+#define HISI_MN_NR_COUNTERS 4
+#define HISI_MN_TIMEOUT_US 500U
+
+struct hisi_mn_pmu_regs {
+ u32 version;
+ u32 dyn_ctrl;
+ u32 perf_ctrl;
+ u32 int_mask;
+ u32 int_clear;
+ u32 int_status;
+ u32 event_ctrl;
+ u32 event_type0;
+ u32 event_cntr0;
+};
+
+/*
+ * Each event request takes a certain amount of time to complete. If
+ * we counting the latency related event, we need to wait for the all
+ * requests complete. Otherwise, the value of counter is slightly larger.
+ */
+static void hisi_mn_pmu_counter_flush(struct hisi_pmu *mn_pmu)
+{
+ struct hisi_mn_pmu_regs *reg_info = mn_pmu->dev_info->private;
+ int ret;
+ u32 val;
+
+ val = readl(mn_pmu->base + reg_info->dyn_ctrl);
+ val |= HISI_MN_DYNAMIC_CTRL_EN;
+ writel(val, mn_pmu->base + reg_info->dyn_ctrl);
+
+ ret = readl_poll_timeout_atomic(mn_pmu->base + reg_info->dyn_ctrl,
+ val, !(val & HISI_MN_DYNAMIC_CTRL_EN),
+ 1, HISI_MN_TIMEOUT_US);
+ if (ret)
+ dev_warn(mn_pmu->dev, "Counter flush timeout\n");
+}
+
+static u64 hisi_mn_pmu_read_counter(struct hisi_pmu *mn_pmu,
+ struct hw_perf_event *hwc)
+{
+ struct hisi_mn_pmu_regs *reg_info = mn_pmu->dev_info->private;
+
+ return readq(mn_pmu->base + HISI_MN_CNTR_REGn(reg_info->event_cntr0, hwc->idx));
+}
+
+static void hisi_mn_pmu_write_counter(struct hisi_pmu *mn_pmu,
+ struct hw_perf_event *hwc, u64 val)
+{
+ struct hisi_mn_pmu_regs *reg_info = mn_pmu->dev_info->private;
+
+ writeq(val, mn_pmu->base + HISI_MN_CNTR_REGn(reg_info->event_cntr0, hwc->idx));
+}
+
+static void hisi_mn_pmu_write_evtype(struct hisi_pmu *mn_pmu, int idx, u32 type)
+{
+ struct hisi_mn_pmu_regs *reg_info = mn_pmu->dev_info->private;
+ u32 val;
+
+ /*
+ * Select the appropriate event select register.
+ * There are 2 32-bit event select registers for the
+ * 8 hardware counters, each event code is 8-bit wide.
+ */
+ val = readl(mn_pmu->base + HISI_MN_EVTYPE_REGn(reg_info->event_type0, idx / 4));
+ val &= ~(HISI_MN_EVTYPE_MASK << HISI_PMU_EVTYPE_SHIFT(idx));
+ val |= (type << HISI_PMU_EVTYPE_SHIFT(idx));
+ writel(val, mn_pmu->base + HISI_MN_EVTYPE_REGn(reg_info->event_type0, idx / 4));
+}
+
+static void hisi_mn_pmu_start_counters(struct hisi_pmu *mn_pmu)
+{
+ struct hisi_mn_pmu_regs *reg_info = mn_pmu->dev_info->private;
+ u32 val;
+
+ val = readl(mn_pmu->base + reg_info->perf_ctrl);
+ val |= HISI_MN_PERF_CTRL_EN;
+ writel(val, mn_pmu->base + reg_info->perf_ctrl);
+}
+
+static void hisi_mn_pmu_stop_counters(struct hisi_pmu *mn_pmu)
+{
+ struct hisi_mn_pmu_regs *reg_info = mn_pmu->dev_info->private;
+ u32 val;
+
+ val = readl(mn_pmu->base + reg_info->perf_ctrl);
+ val &= ~HISI_MN_PERF_CTRL_EN;
+ writel(val, mn_pmu->base + reg_info->perf_ctrl);
+
+ hisi_mn_pmu_counter_flush(mn_pmu);
+}
+
+static void hisi_mn_pmu_enable_counter(struct hisi_pmu *mn_pmu,
+ struct hw_perf_event *hwc)
+{
+ struct hisi_mn_pmu_regs *reg_info = mn_pmu->dev_info->private;
+ u32 val;
+
+ val = readl(mn_pmu->base + reg_info->event_ctrl);
+ val |= BIT(hwc->idx);
+ writel(val, mn_pmu->base + reg_info->event_ctrl);
+}
+
+static void hisi_mn_pmu_disable_counter(struct hisi_pmu *mn_pmu,
+ struct hw_perf_event *hwc)
+{
+ struct hisi_mn_pmu_regs *reg_info = mn_pmu->dev_info->private;
+ u32 val;
+
+ val = readl(mn_pmu->base + reg_info->event_ctrl);
+ val &= ~BIT(hwc->idx);
+ writel(val, mn_pmu->base + reg_info->event_ctrl);
+}
+
+static void hisi_mn_pmu_enable_counter_int(struct hisi_pmu *mn_pmu,
+ struct hw_perf_event *hwc)
+{
+ struct hisi_mn_pmu_regs *reg_info = mn_pmu->dev_info->private;
+ u32 val;
+
+ val = readl(mn_pmu->base + reg_info->int_mask);
+ val &= ~BIT(hwc->idx);
+ writel(val, mn_pmu->base + reg_info->int_mask);
+}
+
+static void hisi_mn_pmu_disable_counter_int(struct hisi_pmu *mn_pmu,
+ struct hw_perf_event *hwc)
+{
+ struct hisi_mn_pmu_regs *reg_info = mn_pmu->dev_info->private;
+ u32 val;
+
+ val = readl(mn_pmu->base + reg_info->int_mask);
+ val |= BIT(hwc->idx);
+ writel(val, mn_pmu->base + reg_info->int_mask);
+}
+
+static u32 hisi_mn_pmu_get_int_status(struct hisi_pmu *mn_pmu)
+{
+ struct hisi_mn_pmu_regs *reg_info = mn_pmu->dev_info->private;
+
+ return readl(mn_pmu->base + reg_info->int_status);
+}
+
+static void hisi_mn_pmu_clear_int_status(struct hisi_pmu *mn_pmu, int idx)
+{
+ struct hisi_mn_pmu_regs *reg_info = mn_pmu->dev_info->private;
+
+ writel(BIT(idx), mn_pmu->base + reg_info->int_clear);
+}
+
+static struct attribute *hisi_mn_pmu_format_attr[] = {
+ HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
+ NULL
+};
+
+static const struct attribute_group hisi_mn_pmu_format_group = {
+ .name = "format",
+ .attrs = hisi_mn_pmu_format_attr,
+};
+
+static struct attribute *hisi_mn_pmu_events_attr[] = {
+ HISI_PMU_EVENT_ATTR(req_eobarrier_num, 0x00),
+ HISI_PMU_EVENT_ATTR(req_ecbarrier_num, 0x01),
+ HISI_PMU_EVENT_ATTR(req_dvmop_num, 0x02),
+ HISI_PMU_EVENT_ATTR(req_dvmsync_num, 0x03),
+ HISI_PMU_EVENT_ATTR(req_retry_num, 0x04),
+ HISI_PMU_EVENT_ATTR(req_writenosnp_num, 0x05),
+ HISI_PMU_EVENT_ATTR(req_readnosnp_num, 0x06),
+ HISI_PMU_EVENT_ATTR(snp_dvm_num, 0x07),
+ HISI_PMU_EVENT_ATTR(snp_dvmsync_num, 0x08),
+ HISI_PMU_EVENT_ATTR(l3t_req_dvm_num, 0x09),
+ HISI_PMU_EVENT_ATTR(l3t_req_dvmsync_num, 0x0A),
+ HISI_PMU_EVENT_ATTR(mn_req_dvm_num, 0x0B),
+ HISI_PMU_EVENT_ATTR(mn_req_dvmsync_num, 0x0C),
+ HISI_PMU_EVENT_ATTR(pa_req_dvm_num, 0x0D),
+ HISI_PMU_EVENT_ATTR(pa_req_dvmsync_num, 0x0E),
+ HISI_PMU_EVENT_ATTR(snp_dvm_latency, 0x80),
+ HISI_PMU_EVENT_ATTR(snp_dvmsync_latency, 0x81),
+ HISI_PMU_EVENT_ATTR(l3t_req_dvm_latency, 0x82),
+ HISI_PMU_EVENT_ATTR(l3t_req_dvmsync_latency, 0x83),
+ HISI_PMU_EVENT_ATTR(mn_req_dvm_latency, 0x84),
+ HISI_PMU_EVENT_ATTR(mn_req_dvmsync_latency, 0x85),
+ HISI_PMU_EVENT_ATTR(pa_req_dvm_latency, 0x86),
+ HISI_PMU_EVENT_ATTR(pa_req_dvmsync_latency, 0x87),
+ NULL
+};
+
+static const struct attribute_group hisi_mn_pmu_events_group = {
+ .name = "events",
+ .attrs = hisi_mn_pmu_events_attr,
+};
+
+static const struct attribute_group *hisi_mn_pmu_attr_groups[] = {
+ &hisi_mn_pmu_format_group,
+ &hisi_mn_pmu_events_group,
+ &hisi_pmu_cpumask_attr_group,
+ &hisi_pmu_identifier_group,
+ NULL
+};
+
+static const struct hisi_uncore_ops hisi_uncore_mn_ops = {
+ .write_evtype = hisi_mn_pmu_write_evtype,
+ .get_event_idx = hisi_uncore_pmu_get_event_idx,
+ .start_counters = hisi_mn_pmu_start_counters,
+ .stop_counters = hisi_mn_pmu_stop_counters,
+ .enable_counter = hisi_mn_pmu_enable_counter,
+ .disable_counter = hisi_mn_pmu_disable_counter,
+ .enable_counter_int = hisi_mn_pmu_enable_counter_int,
+ .disable_counter_int = hisi_mn_pmu_disable_counter_int,
+ .write_counter = hisi_mn_pmu_write_counter,
+ .read_counter = hisi_mn_pmu_read_counter,
+ .get_int_status = hisi_mn_pmu_get_int_status,
+ .clear_int_status = hisi_mn_pmu_clear_int_status,
+};
+
+static int hisi_mn_pmu_dev_init(struct platform_device *pdev,
+ struct hisi_pmu *mn_pmu)
+{
+ struct hisi_mn_pmu_regs *reg_info;
+ int ret;
+
+ hisi_uncore_pmu_init_topology(mn_pmu, &pdev->dev);
+
+ if (mn_pmu->topo.scl_id < 0)
+ return dev_err_probe(&pdev->dev, -EINVAL,
+ "Failed to read MN scl id\n");
+
+ if (mn_pmu->topo.index_id < 0)
+ return dev_err_probe(&pdev->dev, -EINVAL,
+ "Failed to read MN index id\n");
+
+ mn_pmu->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mn_pmu->base))
+ return dev_err_probe(&pdev->dev, PTR_ERR(mn_pmu->base),
+ "Failed to ioremap resource\n");
+
+ ret = hisi_uncore_pmu_init_irq(mn_pmu, pdev);
+ if (ret)
+ return ret;
+
+ mn_pmu->dev_info = device_get_match_data(&pdev->dev);
+ if (!mn_pmu->dev_info)
+ return -ENODEV;
+
+ mn_pmu->pmu_events.attr_groups = mn_pmu->dev_info->attr_groups;
+ mn_pmu->counter_bits = mn_pmu->dev_info->counter_bits;
+ mn_pmu->check_event = mn_pmu->dev_info->check_event;
+ mn_pmu->num_counters = HISI_MN_NR_COUNTERS;
+ mn_pmu->ops = &hisi_uncore_mn_ops;
+ mn_pmu->dev = &pdev->dev;
+ mn_pmu->on_cpu = -1;
+
+ reg_info = mn_pmu->dev_info->private;
+ mn_pmu->identifier = readl(mn_pmu->base + reg_info->version);
+
+ return 0;
+}
+
+static void hisi_mn_pmu_remove_cpuhp(void *hotplug_node)
+{
+ cpuhp_state_remove_instance_nocalls(hisi_mn_pmu_online, hotplug_node);
+}
+
+static void hisi_mn_pmu_unregister(void *pmu)
+{
+ perf_pmu_unregister(pmu);
+}
+
+static int hisi_mn_pmu_probe(struct platform_device *pdev)
+{
+ struct hisi_pmu *mn_pmu;
+ char *name;
+ int ret;
+
+ mn_pmu = devm_kzalloc(&pdev->dev, sizeof(*mn_pmu), GFP_KERNEL);
+ if (!mn_pmu)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, mn_pmu);
+
+ ret = hisi_mn_pmu_dev_init(pdev, mn_pmu);
+ if (ret)
+ return ret;
+
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_scl%d_mn%d",
+ mn_pmu->topo.scl_id, mn_pmu->topo.index_id);
+ if (!name)
+ return -ENOMEM;
+
+ ret = cpuhp_state_add_instance(hisi_mn_pmu_online, &mn_pmu->node);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Failed to register cpu hotplug\n");
+
+ ret = devm_add_action_or_reset(&pdev->dev, hisi_mn_pmu_remove_cpuhp, &mn_pmu->node);
+ if (ret)
+ return ret;
+
+ hisi_pmu_init(mn_pmu, THIS_MODULE);
+
+ ret = perf_pmu_register(&mn_pmu->pmu, name, -1);
+ if (ret)
+ return dev_err_probe(mn_pmu->dev, ret, "Failed to register MN PMU\n");
+
+ return devm_add_action_or_reset(&pdev->dev, hisi_mn_pmu_unregister, &mn_pmu->pmu);
+}
+
+static struct hisi_mn_pmu_regs hisi_mn_v1_pmu_regs = {
+ .version = HISI_MN_VERSION_REG,
+ .dyn_ctrl = HISI_MN_DYNAMIC_CTRL_REG,
+ .perf_ctrl = HISI_MN_PERF_CTRL_REG,
+ .int_mask = HISI_MN_INT_MASK_REG,
+ .int_clear = HISI_MN_INT_CLEAR_REG,
+ .int_status = HISI_MN_INT_STATUS_REG,
+ .event_ctrl = HISI_MN_EVENT_CTRL_REG,
+ .event_type0 = HISI_MN_EVTYPE0_REG,
+ .event_cntr0 = HISI_MN_CNTR0_REG,
+};
+
+static const struct hisi_pmu_dev_info hisi_mn_v1 = {
+ .attr_groups = hisi_mn_pmu_attr_groups,
+ .counter_bits = 48,
+ .check_event = HISI_MN_EVTYPE_MASK,
+ .private = &hisi_mn_v1_pmu_regs,
+};
+
+static const struct acpi_device_id hisi_mn_pmu_acpi_match[] = {
+ { "HISI0222", (kernel_ulong_t) &hisi_mn_v1 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, hisi_mn_pmu_acpi_match);
+
+static struct platform_driver hisi_mn_pmu_driver = {
+ .driver = {
+ .name = "hisi_mn_pmu",
+ .acpi_match_table = hisi_mn_pmu_acpi_match,
+ /*
+ * We have not worked out a safe bind/unbind process,
+ * Forcefully unbinding during sampling will lead to a
+ * kernel panic, so this is not supported yet.
+ */
+ .suppress_bind_attrs = true,
+ },
+ .probe = hisi_mn_pmu_probe,
+};
+
+static int __init hisi_mn_pmu_module_init(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "perf/hisi/mn:online",
+ hisi_uncore_pmu_online_cpu,
+ hisi_uncore_pmu_offline_cpu);
+ if (ret < 0) {
+ pr_err("hisi_mn_pmu: Failed to setup MN PMU hotplug: %d\n", ret);
+ return ret;
+ }
+ hisi_mn_pmu_online = ret;
+
+ ret = platform_driver_register(&hisi_mn_pmu_driver);
+ if (ret)
+ cpuhp_remove_multi_state(hisi_mn_pmu_online);
+
+ return ret;
+}
+module_init(hisi_mn_pmu_module_init);
+
+static void __exit hisi_mn_pmu_module_exit(void)
+{
+ platform_driver_unregister(&hisi_mn_pmu_driver);
+ cpuhp_remove_multi_state(hisi_mn_pmu_online);
+}
+module_exit(hisi_mn_pmu_module_exit);
+
+MODULE_IMPORT_NS("HISI_PMU");
+MODULE_DESCRIPTION("HiSilicon SoC MN uncore PMU driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Junhao He <hejunhao3@huawei.com>");
diff --git a/drivers/perf/hisilicon/hisi_uncore_noc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_noc_pmu.c
new file mode 100644
index 000000000000..de3b9cc7aada
--- /dev/null
+++ b/drivers/perf/hisilicon/hisi_uncore_noc_pmu.c
@@ -0,0 +1,443 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for HiSilicon Uncore NoC (Network on Chip) PMU device
+ *
+ * Copyright (c) 2025 HiSilicon Technologies Co., Ltd.
+ * Author: Yicong Yang <yangyicong@hisilicon.com>
+ */
+#include <linux/bitops.h>
+#include <linux/cpuhotplug.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/sysfs.h>
+
+#include "hisi_uncore_pmu.h"
+
+#define NOC_PMU_VERSION 0x1e00
+#define NOC_PMU_GLOBAL_CTRL 0x1e04
+#define NOC_PMU_GLOBAL_CTRL_PMU_EN BIT(0)
+#define NOC_PMU_GLOBAL_CTRL_TT_EN BIT(1)
+#define NOC_PMU_CNT_INFO 0x1e08
+#define NOC_PMU_CNT_INFO_OVERFLOW(n) BIT(n)
+#define NOC_PMU_EVENT_CTRL0 0x1e20
+#define NOC_PMU_EVENT_CTRL_TYPE GENMASK(4, 0)
+/*
+ * Note channel of 0x0 will reset the counter value, so don't do it before
+ * we read out the counter.
+ */
+#define NOC_PMU_EVENT_CTRL_CHANNEL GENMASK(10, 8)
+#define NOC_PMU_EVENT_CTRL_EN BIT(11)
+#define NOC_PMU_EVENT_COUNTER0 0x1e80
+
+#define NOC_PMU_NR_COUNTERS 4
+#define NOC_PMU_CH_DEFAULT 0x7
+
+#define NOC_PMU_EVENT_CTRLn(ctrl0, n) ((ctrl0) + 4 * (n))
+#define NOC_PMU_EVENT_CNTRn(cntr0, n) ((cntr0) + 8 * (n))
+
+HISI_PMU_EVENT_ATTR_EXTRACTOR(ch, config1, 2, 0);
+HISI_PMU_EVENT_ATTR_EXTRACTOR(tt_en, config1, 3, 3);
+
+/* Dynamic CPU hotplug state used by this PMU driver */
+static enum cpuhp_state hisi_noc_pmu_cpuhp_state;
+
+struct hisi_noc_pmu_regs {
+ u32 version;
+ u32 pmu_ctrl;
+ u32 event_ctrl0;
+ u32 event_cntr0;
+ u32 overflow_status;
+};
+
+/*
+ * Tracetag filtering is not per event and all the events should keep
+ * the consistence. Return true if the new comer doesn't match the
+ * tracetag filtering configuration of the current scheduled events.
+ */
+static bool hisi_noc_pmu_check_global_filter(struct perf_event *curr,
+ struct perf_event *new)
+{
+ return hisi_get_tt_en(curr) == hisi_get_tt_en(new);
+}
+
+static void hisi_noc_pmu_write_evtype(struct hisi_pmu *noc_pmu, int idx, u32 type)
+{
+ struct hisi_noc_pmu_regs *reg_info = noc_pmu->dev_info->private;
+ u32 reg;
+
+ reg = readl(noc_pmu->base + NOC_PMU_EVENT_CTRLn(reg_info->event_ctrl0, idx));
+ reg &= ~NOC_PMU_EVENT_CTRL_TYPE;
+ reg |= FIELD_PREP(NOC_PMU_EVENT_CTRL_TYPE, type);
+ writel(reg, noc_pmu->base + NOC_PMU_EVENT_CTRLn(reg_info->event_ctrl0, idx));
+}
+
+static int hisi_noc_pmu_get_event_idx(struct perf_event *event)
+{
+ struct hisi_pmu *noc_pmu = to_hisi_pmu(event->pmu);
+ struct hisi_pmu_hwevents *pmu_events = &noc_pmu->pmu_events;
+ int cur_idx;
+
+ cur_idx = find_first_bit(pmu_events->used_mask, noc_pmu->num_counters);
+ if (cur_idx != noc_pmu->num_counters &&
+ !hisi_noc_pmu_check_global_filter(pmu_events->hw_events[cur_idx], event))
+ return -EAGAIN;
+
+ return hisi_uncore_pmu_get_event_idx(event);
+}
+
+static u64 hisi_noc_pmu_read_counter(struct hisi_pmu *noc_pmu,
+ struct hw_perf_event *hwc)
+{
+ struct hisi_noc_pmu_regs *reg_info = noc_pmu->dev_info->private;
+
+ return readq(noc_pmu->base + NOC_PMU_EVENT_CNTRn(reg_info->event_cntr0, hwc->idx));
+}
+
+static void hisi_noc_pmu_write_counter(struct hisi_pmu *noc_pmu,
+ struct hw_perf_event *hwc, u64 val)
+{
+ struct hisi_noc_pmu_regs *reg_info = noc_pmu->dev_info->private;
+
+ writeq(val, noc_pmu->base + NOC_PMU_EVENT_CNTRn(reg_info->event_cntr0, hwc->idx));
+}
+
+static void hisi_noc_pmu_enable_counter(struct hisi_pmu *noc_pmu,
+ struct hw_perf_event *hwc)
+{
+ struct hisi_noc_pmu_regs *reg_info = noc_pmu->dev_info->private;
+ u32 reg;
+
+ reg = readl(noc_pmu->base + NOC_PMU_EVENT_CTRLn(reg_info->event_ctrl0, hwc->idx));
+ reg |= NOC_PMU_EVENT_CTRL_EN;
+ writel(reg, noc_pmu->base + NOC_PMU_EVENT_CTRLn(reg_info->event_ctrl0, hwc->idx));
+}
+
+static void hisi_noc_pmu_disable_counter(struct hisi_pmu *noc_pmu,
+ struct hw_perf_event *hwc)
+{
+ struct hisi_noc_pmu_regs *reg_info = noc_pmu->dev_info->private;
+ u32 reg;
+
+ reg = readl(noc_pmu->base + NOC_PMU_EVENT_CTRLn(reg_info->event_ctrl0, hwc->idx));
+ reg &= ~NOC_PMU_EVENT_CTRL_EN;
+ writel(reg, noc_pmu->base + NOC_PMU_EVENT_CTRLn(reg_info->event_ctrl0, hwc->idx));
+}
+
+static void hisi_noc_pmu_enable_counter_int(struct hisi_pmu *noc_pmu,
+ struct hw_perf_event *hwc)
+{
+ /* We don't support interrupt, so a stub here. */
+}
+
+static void hisi_noc_pmu_disable_counter_int(struct hisi_pmu *noc_pmu,
+ struct hw_perf_event *hwc)
+{
+}
+
+static void hisi_noc_pmu_start_counters(struct hisi_pmu *noc_pmu)
+{
+ struct hisi_noc_pmu_regs *reg_info = noc_pmu->dev_info->private;
+ u32 reg;
+
+ reg = readl(noc_pmu->base + reg_info->pmu_ctrl);
+ reg |= NOC_PMU_GLOBAL_CTRL_PMU_EN;
+ writel(reg, noc_pmu->base + reg_info->pmu_ctrl);
+}
+
+static void hisi_noc_pmu_stop_counters(struct hisi_pmu *noc_pmu)
+{
+ struct hisi_noc_pmu_regs *reg_info = noc_pmu->dev_info->private;
+ u32 reg;
+
+ reg = readl(noc_pmu->base + reg_info->pmu_ctrl);
+ reg &= ~NOC_PMU_GLOBAL_CTRL_PMU_EN;
+ writel(reg, noc_pmu->base + reg_info->pmu_ctrl);
+}
+
+static u32 hisi_noc_pmu_get_int_status(struct hisi_pmu *noc_pmu)
+{
+ struct hisi_noc_pmu_regs *reg_info = noc_pmu->dev_info->private;
+
+ return readl(noc_pmu->base + reg_info->overflow_status);
+}
+
+static void hisi_noc_pmu_clear_int_status(struct hisi_pmu *noc_pmu, int idx)
+{
+ struct hisi_noc_pmu_regs *reg_info = noc_pmu->dev_info->private;
+ u32 reg;
+
+ reg = readl(noc_pmu->base + reg_info->overflow_status);
+ reg &= ~NOC_PMU_CNT_INFO_OVERFLOW(idx);
+ writel(reg, noc_pmu->base + reg_info->overflow_status);
+}
+
+static void hisi_noc_pmu_enable_filter(struct perf_event *event)
+{
+ struct hisi_pmu *noc_pmu = to_hisi_pmu(event->pmu);
+ struct hisi_noc_pmu_regs *reg_info = noc_pmu->dev_info->private;
+ struct hw_perf_event *hwc = &event->hw;
+ u32 tt_en = hisi_get_tt_en(event);
+ u32 ch = hisi_get_ch(event);
+ u32 reg;
+
+ if (!ch)
+ ch = NOC_PMU_CH_DEFAULT;
+
+ reg = readl(noc_pmu->base + NOC_PMU_EVENT_CTRLn(reg_info->event_ctrl0, hwc->idx));
+ reg &= ~NOC_PMU_EVENT_CTRL_CHANNEL;
+ reg |= FIELD_PREP(NOC_PMU_EVENT_CTRL_CHANNEL, ch);
+ writel(reg, noc_pmu->base + NOC_PMU_EVENT_CTRLn(reg_info->event_ctrl0, hwc->idx));
+
+ /*
+ * Since tracetag filter applies to all the counters, don't touch it
+ * if user doesn't specify it explicitly.
+ */
+ if (tt_en) {
+ reg = readl(noc_pmu->base + reg_info->pmu_ctrl);
+ reg |= NOC_PMU_GLOBAL_CTRL_TT_EN;
+ writel(reg, noc_pmu->base + reg_info->pmu_ctrl);
+ }
+}
+
+static void hisi_noc_pmu_disable_filter(struct perf_event *event)
+{
+ struct hisi_pmu *noc_pmu = to_hisi_pmu(event->pmu);
+ struct hisi_noc_pmu_regs *reg_info = noc_pmu->dev_info->private;
+ u32 tt_en = hisi_get_tt_en(event);
+ u32 reg;
+
+ /*
+ * If we're not the last counter, don't touch the global tracetag
+ * configuration.
+ */
+ if (bitmap_weight(noc_pmu->pmu_events.used_mask, noc_pmu->num_counters) > 1)
+ return;
+
+ if (tt_en) {
+ reg = readl(noc_pmu->base + reg_info->pmu_ctrl);
+ reg &= ~NOC_PMU_GLOBAL_CTRL_TT_EN;
+ writel(reg, noc_pmu->base + reg_info->pmu_ctrl);
+ }
+}
+
+static const struct hisi_uncore_ops hisi_uncore_noc_ops = {
+ .write_evtype = hisi_noc_pmu_write_evtype,
+ .get_event_idx = hisi_noc_pmu_get_event_idx,
+ .read_counter = hisi_noc_pmu_read_counter,
+ .write_counter = hisi_noc_pmu_write_counter,
+ .enable_counter = hisi_noc_pmu_enable_counter,
+ .disable_counter = hisi_noc_pmu_disable_counter,
+ .enable_counter_int = hisi_noc_pmu_enable_counter_int,
+ .disable_counter_int = hisi_noc_pmu_disable_counter_int,
+ .start_counters = hisi_noc_pmu_start_counters,
+ .stop_counters = hisi_noc_pmu_stop_counters,
+ .get_int_status = hisi_noc_pmu_get_int_status,
+ .clear_int_status = hisi_noc_pmu_clear_int_status,
+ .enable_filter = hisi_noc_pmu_enable_filter,
+ .disable_filter = hisi_noc_pmu_disable_filter,
+};
+
+static struct attribute *hisi_noc_pmu_format_attrs[] = {
+ HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
+ HISI_PMU_FORMAT_ATTR(ch, "config1:0-2"),
+ HISI_PMU_FORMAT_ATTR(tt_en, "config1:3"),
+ NULL
+};
+
+static const struct attribute_group hisi_noc_pmu_format_group = {
+ .name = "format",
+ .attrs = hisi_noc_pmu_format_attrs,
+};
+
+static struct attribute *hisi_noc_pmu_events_attrs[] = {
+ HISI_PMU_EVENT_ATTR(cycles, 0x0e),
+ /* Flux on/off the ring */
+ HISI_PMU_EVENT_ATTR(ingress_flow_sum, 0x1a),
+ HISI_PMU_EVENT_ATTR(egress_flow_sum, 0x17),
+ /* Buffer full duration on/off the ring */
+ HISI_PMU_EVENT_ATTR(ingress_buf_full, 0x19),
+ HISI_PMU_EVENT_ATTR(egress_buf_full, 0x12),
+ /* Failure packets count on/off the ring */
+ HISI_PMU_EVENT_ATTR(cw_ingress_fail, 0x01),
+ HISI_PMU_EVENT_ATTR(cc_ingress_fail, 0x09),
+ HISI_PMU_EVENT_ATTR(cw_egress_fail, 0x03),
+ HISI_PMU_EVENT_ATTR(cc_egress_fail, 0x0b),
+ /* Flux of the ring */
+ HISI_PMU_EVENT_ATTR(cw_main_flow_sum, 0x05),
+ HISI_PMU_EVENT_ATTR(cc_main_flow_sum, 0x0d),
+ NULL
+};
+
+static const struct attribute_group hisi_noc_pmu_events_group = {
+ .name = "events",
+ .attrs = hisi_noc_pmu_events_attrs,
+};
+
+static const struct attribute_group *hisi_noc_pmu_attr_groups[] = {
+ &hisi_noc_pmu_format_group,
+ &hisi_noc_pmu_events_group,
+ &hisi_pmu_cpumask_attr_group,
+ &hisi_pmu_identifier_group,
+ NULL
+};
+
+static int hisi_noc_pmu_dev_init(struct platform_device *pdev, struct hisi_pmu *noc_pmu)
+{
+ struct hisi_noc_pmu_regs *reg_info;
+
+ hisi_uncore_pmu_init_topology(noc_pmu, &pdev->dev);
+
+ if (noc_pmu->topo.scl_id < 0)
+ return dev_err_probe(&pdev->dev, -EINVAL, "failed to get scl-id\n");
+
+ if (noc_pmu->topo.index_id < 0)
+ return dev_err_probe(&pdev->dev, -EINVAL, "failed to get idx-id\n");
+
+ if (noc_pmu->topo.sub_id < 0)
+ return dev_err_probe(&pdev->dev, -EINVAL, "failed to get sub-id\n");
+
+ noc_pmu->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(noc_pmu->base))
+ return dev_err_probe(&pdev->dev, PTR_ERR(noc_pmu->base),
+ "fail to remap io memory\n");
+
+ noc_pmu->dev_info = device_get_match_data(&pdev->dev);
+ if (!noc_pmu->dev_info)
+ return -ENODEV;
+
+ noc_pmu->pmu_events.attr_groups = noc_pmu->dev_info->attr_groups;
+ noc_pmu->counter_bits = noc_pmu->dev_info->counter_bits;
+ noc_pmu->check_event = noc_pmu->dev_info->check_event;
+ noc_pmu->num_counters = NOC_PMU_NR_COUNTERS;
+ noc_pmu->ops = &hisi_uncore_noc_ops;
+ noc_pmu->dev = &pdev->dev;
+ noc_pmu->on_cpu = -1;
+
+ reg_info = noc_pmu->dev_info->private;
+ noc_pmu->identifier = readl(noc_pmu->base + reg_info->version);
+
+ return 0;
+}
+
+static void hisi_noc_pmu_remove_cpuhp_instance(void *hotplug_node)
+{
+ cpuhp_state_remove_instance_nocalls(hisi_noc_pmu_cpuhp_state, hotplug_node);
+}
+
+static void hisi_noc_pmu_unregister_pmu(void *pmu)
+{
+ perf_pmu_unregister(pmu);
+}
+
+static int hisi_noc_pmu_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct hisi_pmu *noc_pmu;
+ char *name;
+ int ret;
+
+ noc_pmu = devm_kzalloc(dev, sizeof(*noc_pmu), GFP_KERNEL);
+ if (!noc_pmu)
+ return -ENOMEM;
+
+ /*
+ * HiSilicon Uncore PMU framework needs to get common hisi_pmu device
+ * from device's drvdata.
+ */
+ platform_set_drvdata(pdev, noc_pmu);
+
+ ret = hisi_noc_pmu_dev_init(pdev, noc_pmu);
+ if (ret)
+ return ret;
+
+ ret = cpuhp_state_add_instance(hisi_noc_pmu_cpuhp_state, &noc_pmu->node);
+ if (ret)
+ return dev_err_probe(dev, ret, "Fail to register cpuhp instance\n");
+
+ ret = devm_add_action_or_reset(dev, hisi_noc_pmu_remove_cpuhp_instance,
+ &noc_pmu->node);
+ if (ret)
+ return ret;
+
+ hisi_pmu_init(noc_pmu, THIS_MODULE);
+
+ name = devm_kasprintf(dev, GFP_KERNEL, "hisi_scl%d_noc%d_%d",
+ noc_pmu->topo.scl_id, noc_pmu->topo.index_id,
+ noc_pmu->topo.sub_id);
+ if (!name)
+ return -ENOMEM;
+
+ ret = perf_pmu_register(&noc_pmu->pmu, name, -1);
+ if (ret)
+ return dev_err_probe(dev, ret, "Fail to register PMU\n");
+
+ return devm_add_action_or_reset(dev, hisi_noc_pmu_unregister_pmu,
+ &noc_pmu->pmu);
+}
+
+static struct hisi_noc_pmu_regs hisi_noc_v1_pmu_regs = {
+ .version = NOC_PMU_VERSION,
+ .pmu_ctrl = NOC_PMU_GLOBAL_CTRL,
+ .event_ctrl0 = NOC_PMU_EVENT_CTRL0,
+ .event_cntr0 = NOC_PMU_EVENT_COUNTER0,
+ .overflow_status = NOC_PMU_CNT_INFO,
+};
+
+static const struct hisi_pmu_dev_info hisi_noc_v1 = {
+ .attr_groups = hisi_noc_pmu_attr_groups,
+ .counter_bits = 64,
+ .check_event = NOC_PMU_EVENT_CTRL_TYPE,
+ .private = &hisi_noc_v1_pmu_regs,
+};
+
+static const struct acpi_device_id hisi_noc_pmu_ids[] = {
+ { "HISI04E0", (kernel_ulong_t) &hisi_noc_v1 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, hisi_noc_pmu_ids);
+
+static struct platform_driver hisi_noc_pmu_driver = {
+ .driver = {
+ .name = "hisi_noc_pmu",
+ .acpi_match_table = hisi_noc_pmu_ids,
+ .suppress_bind_attrs = true,
+ },
+ .probe = hisi_noc_pmu_probe,
+};
+
+static int __init hisi_noc_pmu_module_init(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "perf/hisi/noc:online",
+ hisi_uncore_pmu_online_cpu,
+ hisi_uncore_pmu_offline_cpu);
+ if (ret < 0) {
+ pr_err("hisi_noc_pmu: Fail to setup cpuhp callbacks, ret = %d\n", ret);
+ return ret;
+ }
+ hisi_noc_pmu_cpuhp_state = ret;
+
+ ret = platform_driver_register(&hisi_noc_pmu_driver);
+ if (ret)
+ cpuhp_remove_multi_state(hisi_noc_pmu_cpuhp_state);
+
+ return ret;
+}
+module_init(hisi_noc_pmu_module_init);
+
+static void __exit hisi_noc_pmu_module_exit(void)
+{
+ platform_driver_unregister(&hisi_noc_pmu_driver);
+ cpuhp_remove_multi_state(hisi_noc_pmu_cpuhp_state);
+}
+module_exit(hisi_noc_pmu_module_exit);
+
+MODULE_IMPORT_NS("HISI_PMU");
+MODULE_DESCRIPTION("HiSilicon SoC Uncore NoC PMU driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Yicong Yang <yangyicong@hisilicon.com>");
diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pmu.c
index a449651f79c9..de71dcf11653 100644
--- a/drivers/perf/hisilicon/hisi_uncore_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_pmu.c
@@ -149,7 +149,7 @@ static void hisi_uncore_pmu_clear_event_idx(struct hisi_pmu *hisi_pmu, int idx)
clear_bit(idx, hisi_pmu->pmu_events.used_mask);
}
-static irqreturn_t hisi_uncore_pmu_isr(int irq, void *data)
+irqreturn_t hisi_uncore_pmu_isr(int irq, void *data)
{
struct hisi_pmu *hisi_pmu = data;
struct perf_event *event;
@@ -178,6 +178,7 @@ static irqreturn_t hisi_uncore_pmu_isr(int irq, void *data)
return IRQ_HANDLED;
}
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_isr, "HISI_PMU");
int hisi_uncore_pmu_init_irq(struct hisi_pmu *hisi_pmu,
struct platform_device *pdev)
@@ -234,7 +235,7 @@ int hisi_uncore_pmu_event_init(struct perf_event *event)
return -EINVAL;
hisi_pmu = to_hisi_pmu(event->pmu);
- if (event->attr.config > hisi_pmu->check_event)
+ if ((event->attr.config & HISI_EVENTID_MASK) > hisi_pmu->check_event)
return -EINVAL;
if (hisi_pmu->on_cpu == -1)
diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.h b/drivers/perf/hisilicon/hisi_uncore_pmu.h
index 777675838b80..3ffe6acda653 100644
--- a/drivers/perf/hisilicon/hisi_uncore_pmu.h
+++ b/drivers/perf/hisilicon/hisi_uncore_pmu.h
@@ -24,7 +24,7 @@
#define pr_fmt(fmt) "hisi_pmu: " fmt
#define HISI_PMU_V2 0x30
-#define HISI_MAX_COUNTERS 0x10
+#define HISI_MAX_COUNTERS 0x18
#define to_hisi_pmu(p) (container_of(p, struct hisi_pmu, pmu))
#define HISI_PMU_ATTR(_name, _func, _config) \
@@ -43,7 +43,8 @@
return FIELD_GET(GENMASK_ULL(hi, lo), event->attr.config); \
}
-#define HISI_GET_EVENTID(ev) (ev->hw.config_base & 0xff)
+#define HISI_EVENTID_MASK GENMASK(7, 0)
+#define HISI_GET_EVENTID(ev) ((ev)->hw.config_base & HISI_EVENTID_MASK)
#define HISI_PMU_EVTYPE_BITS 8
#define HISI_PMU_EVTYPE_SHIFT(idx) ((idx) % 4 * HISI_PMU_EVTYPE_BITS)
@@ -164,6 +165,7 @@ int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node);
ssize_t hisi_uncore_pmu_identifier_attr_show(struct device *dev,
struct device_attribute *attr,
char *page);
+irqreturn_t hisi_uncore_pmu_isr(int irq, void *data);
int hisi_uncore_pmu_init_irq(struct hisi_pmu *hisi_pmu,
struct platform_device *pdev);
void hisi_uncore_pmu_init_topology(struct hisi_pmu *hisi_pmu, struct device *dev);
diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
index 698de8ddf895..e255c1b069ec 100644
--- a/drivers/perf/riscv_pmu_sbi.c
+++ b/drivers/perf/riscv_pmu_sbi.c
@@ -59,10 +59,11 @@ asm volatile(ALTERNATIVE( \
#define PERF_EVENT_FLAG_USER_ACCESS BIT(SYSCTL_USER_ACCESS)
#define PERF_EVENT_FLAG_LEGACY BIT(SYSCTL_LEGACY)
-PMU_FORMAT_ATTR(event, "config:0-47");
+PMU_FORMAT_ATTR(event, "config:0-55");
PMU_FORMAT_ATTR(firmware, "config:62-63");
static bool sbi_v2_available;
+static bool sbi_v3_available;
static DEFINE_STATIC_KEY_FALSE(sbi_pmu_snapshot_available);
#define sbi_pmu_snapshot_available() \
static_branch_unlikely(&sbi_pmu_snapshot_available)
@@ -99,6 +100,7 @@ static unsigned int riscv_pmu_irq;
/* Cache the available counters in a bitmask */
static unsigned long cmask;
+static int pmu_event_find_cache(u64 config);
struct sbi_pmu_event_data {
union {
union {
@@ -298,6 +300,66 @@ static struct sbi_pmu_event_data pmu_cache_event_map[PERF_COUNT_HW_CACHE_MAX]
},
};
+static int pmu_sbi_check_event_info(void)
+{
+ int num_events = ARRAY_SIZE(pmu_hw_event_map) + PERF_COUNT_HW_CACHE_MAX *
+ PERF_COUNT_HW_CACHE_OP_MAX * PERF_COUNT_HW_CACHE_RESULT_MAX;
+ struct riscv_pmu_event_info *event_info_shmem;
+ phys_addr_t base_addr;
+ int i, j, k, result = 0, count = 0;
+ struct sbiret ret;
+
+ event_info_shmem = kcalloc(num_events, sizeof(*event_info_shmem), GFP_KERNEL);
+ if (!event_info_shmem)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(pmu_hw_event_map); i++)
+ event_info_shmem[count++].event_idx = pmu_hw_event_map[i].event_idx;
+
+ for (i = 0; i < ARRAY_SIZE(pmu_cache_event_map); i++) {
+ for (j = 0; j < ARRAY_SIZE(pmu_cache_event_map[i]); j++) {
+ for (k = 0; k < ARRAY_SIZE(pmu_cache_event_map[i][j]); k++)
+ event_info_shmem[count++].event_idx =
+ pmu_cache_event_map[i][j][k].event_idx;
+ }
+ }
+
+ base_addr = __pa(event_info_shmem);
+ if (IS_ENABLED(CONFIG_32BIT))
+ ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_EVENT_GET_INFO, lower_32_bits(base_addr),
+ upper_32_bits(base_addr), count, 0, 0, 0);
+ else
+ ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_EVENT_GET_INFO, base_addr, 0,
+ count, 0, 0, 0);
+ if (ret.error) {
+ result = -EOPNOTSUPP;
+ goto free_mem;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(pmu_hw_event_map); i++) {
+ if (!(event_info_shmem[i].output & RISCV_PMU_EVENT_INFO_OUTPUT_MASK))
+ pmu_hw_event_map[i].event_idx = -ENOENT;
+ }
+
+ count = ARRAY_SIZE(pmu_hw_event_map);
+
+ for (i = 0; i < ARRAY_SIZE(pmu_cache_event_map); i++) {
+ for (j = 0; j < ARRAY_SIZE(pmu_cache_event_map[i]); j++) {
+ for (k = 0; k < ARRAY_SIZE(pmu_cache_event_map[i][j]); k++) {
+ if (!(event_info_shmem[count].output &
+ RISCV_PMU_EVENT_INFO_OUTPUT_MASK))
+ pmu_cache_event_map[i][j][k].event_idx = -ENOENT;
+ count++;
+ }
+ }
+ }
+
+free_mem:
+ kfree(event_info_shmem);
+
+ return result;
+}
+
static void pmu_sbi_check_event(struct sbi_pmu_event_data *edata)
{
struct sbiret ret;
@@ -315,6 +377,15 @@ static void pmu_sbi_check_event(struct sbi_pmu_event_data *edata)
static void pmu_sbi_check_std_events(struct work_struct *work)
{
+ int ret;
+
+ if (sbi_v3_available) {
+ ret = pmu_sbi_check_event_info();
+ if (ret)
+ pr_err("pmu_sbi_check_event_info failed with error %d\n", ret);
+ return;
+ }
+
for (int i = 0; i < ARRAY_SIZE(pmu_hw_event_map); i++)
pmu_sbi_check_event(&pmu_hw_event_map[i]);
@@ -339,9 +410,74 @@ static bool pmu_sbi_ctr_is_fw(int cidx)
if (!info)
return false;
- return (info->type == SBI_PMU_CTR_TYPE_FW) ? true : false;
+ return info->type == SBI_PMU_CTR_TYPE_FW;
}
+int riscv_pmu_get_event_info(u32 type, u64 config, u64 *econfig)
+{
+ int ret = -ENOENT;
+
+ switch (type) {
+ case PERF_TYPE_HARDWARE:
+ if (config >= PERF_COUNT_HW_MAX)
+ return -EINVAL;
+ ret = pmu_hw_event_map[config].event_idx;
+ break;
+ case PERF_TYPE_HW_CACHE:
+ ret = pmu_event_find_cache(config);
+ break;
+ case PERF_TYPE_RAW:
+ /*
+ * As per SBI v0.3 specification,
+ * -- the upper 16 bits must be unused for a hardware raw event.
+ * As per SBI v2.0 specification,
+ * -- the upper 8 bits must be unused for a hardware raw event.
+ * Bits 63:62 are used to distinguish between raw events
+ * 00 - Hardware raw event
+ * 10 - SBI firmware events
+ * 11 - Risc-V platform specific firmware event
+ */
+ switch (config >> 62) {
+ case 0:
+ if (sbi_v3_available) {
+ /* Return error any bits [56-63] is set as it is not allowed by the spec */
+ if (!(config & ~RISCV_PMU_RAW_EVENT_V2_MASK)) {
+ if (econfig)
+ *econfig = config & RISCV_PMU_RAW_EVENT_V2_MASK;
+ ret = RISCV_PMU_RAW_EVENT_V2_IDX;
+ }
+ /* Return error any bits [48-63] is set as it is not allowed by the spec */
+ } else if (!(config & ~RISCV_PMU_RAW_EVENT_MASK)) {
+ if (econfig)
+ *econfig = config & RISCV_PMU_RAW_EVENT_MASK;
+ ret = RISCV_PMU_RAW_EVENT_IDX;
+ }
+ break;
+ case 2:
+ ret = (config & 0xFFFF) | (SBI_PMU_EVENT_TYPE_FW << 16);
+ break;
+ case 3:
+ /*
+ * For Risc-V platform specific firmware events
+ * Event code - 0xFFFF
+ * Event data - raw event encoding
+ */
+ ret = SBI_PMU_EVENT_TYPE_FW << 16 | RISCV_PLAT_FW_EVENT;
+ if (econfig)
+ *econfig = config & RISCV_PMU_PLAT_FW_EVENT_MASK;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(riscv_pmu_get_event_info);
+
/*
* Returns the counter width of a programmable counter and number of hardware
* counters. As we don't support heterogeneous CPUs yet, it is okay to just
@@ -507,7 +643,6 @@ static int pmu_sbi_event_map(struct perf_event *event, u64 *econfig)
{
u32 type = event->attr.type;
u64 config = event->attr.config;
- int ret = -ENOENT;
/*
* Ensure we are finished checking standard hardware events for
@@ -515,54 +650,7 @@ static int pmu_sbi_event_map(struct perf_event *event, u64 *econfig)
*/
flush_work(&check_std_events_work);
- switch (type) {
- case PERF_TYPE_HARDWARE:
- if (config >= PERF_COUNT_HW_MAX)
- return -EINVAL;
- ret = pmu_hw_event_map[event->attr.config].event_idx;
- break;
- case PERF_TYPE_HW_CACHE:
- ret = pmu_event_find_cache(config);
- break;
- case PERF_TYPE_RAW:
- /*
- * As per SBI specification, the upper 16 bits must be unused
- * for a hardware raw event.
- * Bits 63:62 are used to distinguish between raw events
- * 00 - Hardware raw event
- * 10 - SBI firmware events
- * 11 - Risc-V platform specific firmware event
- */
-
- switch (config >> 62) {
- case 0:
- /* Return error any bits [48-63] is set as it is not allowed by the spec */
- if (!(config & ~RISCV_PMU_RAW_EVENT_MASK)) {
- *econfig = config & RISCV_PMU_RAW_EVENT_MASK;
- ret = RISCV_PMU_RAW_EVENT_IDX;
- }
- break;
- case 2:
- ret = (config & 0xFFFF) | (SBI_PMU_EVENT_TYPE_FW << 16);
- break;
- case 3:
- /*
- * For Risc-V platform specific firmware events
- * Event code - 0xFFFF
- * Event data - raw event encoding
- */
- ret = SBI_PMU_EVENT_TYPE_FW << 16 | RISCV_PLAT_FW_EVENT;
- *econfig = config & RISCV_PMU_PLAT_FW_EVENT_MASK;
- break;
- default:
- break;
- }
- break;
- default:
- break;
- }
-
- return ret;
+ return riscv_pmu_get_event_info(type, config, econfig);
}
static void pmu_sbi_snapshot_free(struct riscv_pmu *pmu)
@@ -877,8 +965,10 @@ static inline void pmu_sbi_start_ovf_ctrs_sbi(struct cpu_hw_events *cpu_hw_evt,
for (i = 0; i < BITS_TO_LONGS(RISCV_MAX_COUNTERS); i++) {
ctr_start_mask = cpu_hw_evt->used_hw_ctrs[i] & ~ctr_ovf_mask;
/* Start all the counters that did not overflow in a single shot */
- sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, i * BITS_PER_LONG, ctr_start_mask,
- 0, 0, 0, 0);
+ if (ctr_start_mask) {
+ sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, i * BITS_PER_LONG,
+ ctr_start_mask, 0, 0, 0, 0);
+ }
}
/* Reinitialize and start all the counter that overflowed */
@@ -1452,6 +1542,9 @@ static int __init pmu_sbi_devinit(void)
if (sbi_spec_version >= sbi_mk_version(2, 0))
sbi_v2_available = true;
+ if (sbi_spec_version >= sbi_mk_version(3, 0))
+ sbi_v3_available = true;
+
ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_RISCV_STARTING,
"perf/riscv/pmu:starting",
pmu_sbi_starting_cpu, pmu_sbi_dying_cpu);
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index 58c911e1b2d2..678dd0452f0a 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -122,6 +122,7 @@ source "drivers/phy/renesas/Kconfig"
source "drivers/phy/rockchip/Kconfig"
source "drivers/phy/samsung/Kconfig"
source "drivers/phy/socionext/Kconfig"
+source "drivers/phy/sophgo/Kconfig"
source "drivers/phy/st/Kconfig"
source "drivers/phy/starfive/Kconfig"
source "drivers/phy/sunplus/Kconfig"
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index c670a8dac468..bfb27fb5a494 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -35,6 +35,7 @@ obj-y += allwinner/ \
rockchip/ \
samsung/ \
socionext/ \
+ sophgo/ \
st/ \
starfive/ \
sunplus/ \
diff --git a/drivers/phy/allwinner/phy-sun4i-usb.c b/drivers/phy/allwinner/phy-sun4i-usb.c
index 8873aed3a52a..59d38d88efb0 100644
--- a/drivers/phy/allwinner/phy-sun4i-usb.c
+++ b/drivers/phy/allwinner/phy-sun4i-usb.c
@@ -97,7 +97,6 @@
#define POLL_TIME msecs_to_jiffies(250)
struct sun4i_usb_phy_cfg {
- int num_phys;
int hsic_index;
u32 disc_thresh;
u32 hci_phy_ctl_clear;
@@ -115,6 +114,7 @@ struct sun4i_usb_phy_data {
const struct sun4i_usb_phy_cfg *cfg;
enum usb_dr_mode dr_mode;
spinlock_t reg_lock; /* guard access to phyctl reg */
+ int num_phys;
struct sun4i_usb_phy {
struct phy *phy;
void __iomem *pmu;
@@ -686,7 +686,7 @@ static struct phy *sun4i_usb_phy_xlate(struct device *dev,
{
struct sun4i_usb_phy_data *data = dev_get_drvdata(dev);
- if (args->args[0] >= data->cfg->num_phys)
+ if (args->args[0] >= data->num_phys)
return ERR_PTR(-ENODEV);
if (data->cfg->missing_phys & BIT(args->args[0]))
@@ -779,13 +779,22 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
return ret;
}
- for (i = 0; i < data->cfg->num_phys; i++) {
+ for (i = 0; i < MAX_PHYS; i++) {
struct sun4i_usb_phy *phy = data->phys + i;
char name[32];
if (data->cfg->missing_phys & BIT(i))
continue;
+ snprintf(name, sizeof(name), "usb%d_reset", i);
+ phy->reset = devm_reset_control_get(dev, name);
+ if (IS_ERR(phy->reset)) {
+ if (PTR_ERR(phy->reset) == -ENOENT)
+ break;
+ dev_err(dev, "failed to get reset %s\n", name);
+ return PTR_ERR(phy->reset);
+ }
+
snprintf(name, sizeof(name), "usb%d_vbus", i);
phy->vbus = devm_regulator_get_optional(dev, name);
if (IS_ERR(phy->vbus)) {
@@ -828,13 +837,6 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
}
}
- snprintf(name, sizeof(name), "usb%d_reset", i);
- phy->reset = devm_reset_control_get(dev, name);
- if (IS_ERR(phy->reset)) {
- dev_err(dev, "failed to get reset %s\n", name);
- return PTR_ERR(phy->reset);
- }
-
if (i || data->cfg->phy0_dual_route) { /* No pmu for musb */
snprintf(name, sizeof(name), "pmu%d", i);
phy->pmu = devm_platform_ioremap_resource_byname(pdev, name);
@@ -851,6 +853,7 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
phy->index = i;
phy_set_drvdata(phy->phy, &data->phys[i]);
}
+ data->num_phys = i;
data->id_det_irq = gpiod_to_irq(data->id_det_gpio);
if (data->id_det_irq > 0) {
@@ -901,28 +904,24 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
}
static const struct sun4i_usb_phy_cfg suniv_f1c100s_cfg = {
- .num_phys = 1,
.disc_thresh = 3,
.phyctl_offset = REG_PHYCTL_A10,
.dedicated_clocks = true,
};
static const struct sun4i_usb_phy_cfg sun4i_a10_cfg = {
- .num_phys = 3,
.disc_thresh = 3,
.phyctl_offset = REG_PHYCTL_A10,
.dedicated_clocks = false,
};
static const struct sun4i_usb_phy_cfg sun5i_a13_cfg = {
- .num_phys = 2,
.disc_thresh = 2,
.phyctl_offset = REG_PHYCTL_A10,
.dedicated_clocks = false,
};
static const struct sun4i_usb_phy_cfg sun6i_a31_cfg = {
- .num_phys = 3,
.disc_thresh = 3,
.phyctl_offset = REG_PHYCTL_A10,
.dedicated_clocks = true,
@@ -930,14 +929,12 @@ static const struct sun4i_usb_phy_cfg sun6i_a31_cfg = {
};
static const struct sun4i_usb_phy_cfg sun7i_a20_cfg = {
- .num_phys = 3,
.disc_thresh = 2,
.phyctl_offset = REG_PHYCTL_A10,
.dedicated_clocks = false,
};
static const struct sun4i_usb_phy_cfg sun8i_a23_cfg = {
- .num_phys = 2,
.disc_thresh = 3,
.phyctl_offset = REG_PHYCTL_A10,
.dedicated_clocks = true,
@@ -945,7 +942,6 @@ static const struct sun4i_usb_phy_cfg sun8i_a23_cfg = {
};
static const struct sun4i_usb_phy_cfg sun8i_a33_cfg = {
- .num_phys = 2,
.disc_thresh = 3,
.phyctl_offset = REG_PHYCTL_A33,
.dedicated_clocks = true,
@@ -953,7 +949,6 @@ static const struct sun4i_usb_phy_cfg sun8i_a33_cfg = {
};
static const struct sun4i_usb_phy_cfg sun8i_a83t_cfg = {
- .num_phys = 3,
.hsic_index = 2,
.phyctl_offset = REG_PHYCTL_A33,
.dedicated_clocks = true,
@@ -961,7 +956,6 @@ static const struct sun4i_usb_phy_cfg sun8i_a83t_cfg = {
};
static const struct sun4i_usb_phy_cfg sun8i_h3_cfg = {
- .num_phys = 4,
.disc_thresh = 3,
.phyctl_offset = REG_PHYCTL_A33,
.dedicated_clocks = true,
@@ -970,7 +964,6 @@ static const struct sun4i_usb_phy_cfg sun8i_h3_cfg = {
};
static const struct sun4i_usb_phy_cfg sun8i_r40_cfg = {
- .num_phys = 3,
.disc_thresh = 3,
.phyctl_offset = REG_PHYCTL_A33,
.dedicated_clocks = true,
@@ -979,7 +972,6 @@ static const struct sun4i_usb_phy_cfg sun8i_r40_cfg = {
};
static const struct sun4i_usb_phy_cfg sun8i_v3s_cfg = {
- .num_phys = 1,
.disc_thresh = 3,
.phyctl_offset = REG_PHYCTL_A33,
.dedicated_clocks = true,
@@ -988,7 +980,6 @@ static const struct sun4i_usb_phy_cfg sun8i_v3s_cfg = {
};
static const struct sun4i_usb_phy_cfg sun20i_d1_cfg = {
- .num_phys = 2,
.phyctl_offset = REG_PHYCTL_A33,
.dedicated_clocks = true,
.hci_phy_ctl_clear = PHY_CTL_SIDDQ,
@@ -997,7 +988,6 @@ static const struct sun4i_usb_phy_cfg sun20i_d1_cfg = {
};
static const struct sun4i_usb_phy_cfg sun50i_a64_cfg = {
- .num_phys = 2,
.disc_thresh = 3,
.phyctl_offset = REG_PHYCTL_A33,
.dedicated_clocks = true,
@@ -1006,7 +996,6 @@ static const struct sun4i_usb_phy_cfg sun50i_a64_cfg = {
};
static const struct sun4i_usb_phy_cfg sun50i_h6_cfg = {
- .num_phys = 4,
.phyctl_offset = REG_PHYCTL_A33,
.dedicated_clocks = true,
.phy0_dual_route = true,
@@ -1015,7 +1004,6 @@ static const struct sun4i_usb_phy_cfg sun50i_h6_cfg = {
};
static const struct sun4i_usb_phy_cfg sun50i_h616_cfg = {
- .num_phys = 4,
.disc_thresh = 3,
.phyctl_offset = REG_PHYCTL_A33,
.dedicated_clocks = true,
diff --git a/drivers/phy/broadcom/phy-brcm-sata.c b/drivers/phy/broadcom/phy-brcm-sata.c
index d52dd065e862..fb69e21a0292 100644
--- a/drivers/phy/broadcom/phy-brcm-sata.c
+++ b/drivers/phy/broadcom/phy-brcm-sata.c
@@ -850,4 +850,3 @@ MODULE_DESCRIPTION("Broadcom SATA PHY driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Marc Carino");
MODULE_AUTHOR("Brian Norris");
-MODULE_ALIAS("platform:phy-brcm-sata");
diff --git a/drivers/phy/broadcom/phy-brcm-usb.c b/drivers/phy/broadcom/phy-brcm-usb.c
index 0666864c2f77..59d756a10d6c 100644
--- a/drivers/phy/broadcom/phy-brcm-usb.c
+++ b/drivers/phy/broadcom/phy-brcm-usb.c
@@ -691,7 +691,6 @@ static struct platform_driver brcm_usb_driver = {
module_platform_driver(brcm_usb_driver);
-MODULE_ALIAS("platform:brcmstb-usb-phy");
MODULE_AUTHOR("Al Cooper <acooper@broadcom.com>");
MODULE_DESCRIPTION("BRCM USB PHY driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/cadence/cdns-dphy-rx.c b/drivers/phy/cadence/cdns-dphy-rx.c
index 7729cf80a9bd..3ac80141189c 100644
--- a/drivers/phy/cadence/cdns-dphy-rx.c
+++ b/drivers/phy/cadence/cdns-dphy-rx.c
@@ -12,6 +12,7 @@
#include <linux/phy/phy.h>
#include <linux/phy/phy-mipi-dphy.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/sys_soc.h>
#define DPHY_PMA_CMN(reg) (reg)
@@ -265,7 +266,7 @@ static int cdns_dphy_rx_probe(struct platform_device *pdev)
return PTR_ERR(provider);
}
- return 0;
+ return devm_pm_runtime_enable(dev);
}
static const struct of_device_id cdns_dphy_rx_of_match[] = {
diff --git a/drivers/phy/cadence/cdns-dphy.c b/drivers/phy/cadence/cdns-dphy.c
index ed87a3970f83..d5b0e516b93c 100644
--- a/drivers/phy/cadence/cdns-dphy.c
+++ b/drivers/phy/cadence/cdns-dphy.c
@@ -30,6 +30,7 @@
#define DPHY_CMN_SSM DPHY_PMA_CMN(0x20)
#define DPHY_CMN_SSM_EN BIT(0)
+#define DPHY_CMN_SSM_CAL_WAIT_TIME GENMASK(8, 1)
#define DPHY_CMN_TX_MODE_EN BIT(9)
#define DPHY_CMN_PWM DPHY_PMA_CMN(0x40)
@@ -55,14 +56,6 @@
#define DPHY_PSM_CFG_FROM_REG BIT(0)
#define DPHY_PSM_CLK_DIV(x) ((x) << 1)
-#define DSI_HBP_FRAME_OVERHEAD 12
-#define DSI_HSA_FRAME_OVERHEAD 14
-#define DSI_HFP_FRAME_OVERHEAD 6
-#define DSI_HSS_VSS_VSE_FRAME_OVERHEAD 4
-#define DSI_BLANKING_FRAME_OVERHEAD 6
-#define DSI_NULL_FRAME_OVERHEAD 6
-#define DSI_EOT_PKT_SIZE 4
-
#define DPHY_TX_J721E_WIZ_PLL_CTRL 0xF04
#define DPHY_TX_J721E_WIZ_STATUS 0xF08
#define DPHY_TX_J721E_WIZ_RST_CTRL 0xF0C
@@ -79,6 +72,7 @@ struct cdns_dphy_cfg {
u8 pll_ipdiv;
u8 pll_opdiv;
u16 pll_fbdiv;
+ u32 hs_clk_rate;
unsigned int nlanes;
};
@@ -99,6 +93,8 @@ struct cdns_dphy_ops {
void (*set_pll_cfg)(struct cdns_dphy *dphy,
const struct cdns_dphy_cfg *cfg);
unsigned long (*get_wakeup_time_ns)(struct cdns_dphy *dphy);
+ int (*wait_for_pll_lock)(struct cdns_dphy *dphy);
+ int (*wait_for_cmn_ready)(struct cdns_dphy *dphy);
};
struct cdns_dphy {
@@ -108,6 +104,8 @@ struct cdns_dphy {
struct clk *pll_ref_clk;
const struct cdns_dphy_ops *ops;
struct phy *phy;
+ bool is_configured;
+ bool is_powered;
};
/* Order of bands is important since the index is the band number. */
@@ -116,10 +114,9 @@ static const unsigned int tx_bands[] = {
870, 950, 1000, 1200, 1400, 1600, 1800, 2000, 2200, 2500
};
-static int cdns_dsi_get_dphy_pll_cfg(struct cdns_dphy *dphy,
- struct cdns_dphy_cfg *cfg,
- struct phy_configure_opts_mipi_dphy *opts,
- unsigned int *dsi_hfp_ext)
+static int cdns_dphy_get_pll_cfg(struct cdns_dphy *dphy,
+ struct cdns_dphy_cfg *cfg,
+ struct phy_configure_opts_mipi_dphy *opts)
{
unsigned long pll_ref_hz = clk_get_rate(dphy->pll_ref_clk);
u64 dlane_bps;
@@ -139,7 +136,7 @@ static int cdns_dsi_get_dphy_pll_cfg(struct cdns_dphy *dphy,
dlane_bps = opts->hs_clk_rate;
- if (dlane_bps > 2500000000UL || dlane_bps < 160000000UL)
+ if (dlane_bps > 2500000000UL || dlane_bps < 80000000UL)
return -EINVAL;
else if (dlane_bps >= 1250000000)
cfg->pll_opdiv = 1;
@@ -149,11 +146,16 @@ static int cdns_dsi_get_dphy_pll_cfg(struct cdns_dphy *dphy,
cfg->pll_opdiv = 4;
else if (dlane_bps >= 160000000)
cfg->pll_opdiv = 8;
+ else if (dlane_bps >= 80000000)
+ cfg->pll_opdiv = 16;
cfg->pll_fbdiv = DIV_ROUND_UP_ULL(dlane_bps * 2 * cfg->pll_opdiv *
cfg->pll_ipdiv,
pll_ref_hz);
+ cfg->hs_clk_rate = div_u64((u64)pll_ref_hz * cfg->pll_fbdiv,
+ 2 * cfg->pll_opdiv * cfg->pll_ipdiv);
+
return 0;
}
@@ -191,6 +193,16 @@ static unsigned long cdns_dphy_get_wakeup_time_ns(struct cdns_dphy *dphy)
return dphy->ops->get_wakeup_time_ns(dphy);
}
+static int cdns_dphy_wait_for_pll_lock(struct cdns_dphy *dphy)
+{
+ return dphy->ops->wait_for_pll_lock ? dphy->ops->wait_for_pll_lock(dphy) : 0;
+}
+
+static int cdns_dphy_wait_for_cmn_ready(struct cdns_dphy *dphy)
+{
+ return dphy->ops->wait_for_cmn_ready ? dphy->ops->wait_for_cmn_ready(dphy) : 0;
+}
+
static unsigned long cdns_dphy_ref_get_wakeup_time_ns(struct cdns_dphy *dphy)
{
/* Default wakeup time is 800 ns (in a simulated environment). */
@@ -232,7 +244,6 @@ static unsigned long cdns_dphy_j721e_get_wakeup_time_ns(struct cdns_dphy *dphy)
static void cdns_dphy_j721e_set_pll_cfg(struct cdns_dphy *dphy,
const struct cdns_dphy_cfg *cfg)
{
- u32 status;
/*
* set the PWM and PLL Byteclk divider settings to recommended values
@@ -249,13 +260,6 @@ static void cdns_dphy_j721e_set_pll_cfg(struct cdns_dphy *dphy,
writel(DPHY_TX_J721E_WIZ_LANE_RSTB,
dphy->regs + DPHY_TX_J721E_WIZ_RST_CTRL);
-
- readl_poll_timeout(dphy->regs + DPHY_TX_J721E_WIZ_PLL_CTRL, status,
- (status & DPHY_TX_WIZ_PLL_LOCK), 0, POLL_TIMEOUT_US);
-
- readl_poll_timeout(dphy->regs + DPHY_TX_J721E_WIZ_STATUS, status,
- (status & DPHY_TX_WIZ_O_CMN_READY), 0,
- POLL_TIMEOUT_US);
}
static void cdns_dphy_j721e_set_psm_div(struct cdns_dphy *dphy, u8 div)
@@ -263,6 +267,23 @@ static void cdns_dphy_j721e_set_psm_div(struct cdns_dphy *dphy, u8 div)
writel(div, dphy->regs + DPHY_TX_J721E_WIZ_PSM_FREQ);
}
+static int cdns_dphy_j721e_wait_for_pll_lock(struct cdns_dphy *dphy)
+{
+ u32 status;
+
+ return readl_poll_timeout(dphy->regs + DPHY_TX_J721E_WIZ_PLL_CTRL, status,
+ status & DPHY_TX_WIZ_PLL_LOCK, 0, POLL_TIMEOUT_US);
+}
+
+static int cdns_dphy_j721e_wait_for_cmn_ready(struct cdns_dphy *dphy)
+{
+ u32 status;
+
+ return readl_poll_timeout(dphy->regs + DPHY_TX_J721E_WIZ_STATUS, status,
+ status & DPHY_TX_WIZ_O_CMN_READY, 0,
+ POLL_TIMEOUT_US);
+}
+
/*
* This is the reference implementation of DPHY hooks. Specific integration of
* this IP may have to re-implement some of them depending on how they decided
@@ -278,6 +299,8 @@ static const struct cdns_dphy_ops j721e_dphy_ops = {
.get_wakeup_time_ns = cdns_dphy_j721e_get_wakeup_time_ns,
.set_pll_cfg = cdns_dphy_j721e_set_pll_cfg,
.set_psm_div = cdns_dphy_j721e_set_psm_div,
+ .wait_for_pll_lock = cdns_dphy_j721e_wait_for_pll_lock,
+ .wait_for_cmn_ready = cdns_dphy_j721e_wait_for_cmn_ready,
};
static int cdns_dphy_config_from_opts(struct phy *phy,
@@ -285,18 +308,17 @@ static int cdns_dphy_config_from_opts(struct phy *phy,
struct cdns_dphy_cfg *cfg)
{
struct cdns_dphy *dphy = phy_get_drvdata(phy);
- unsigned int dsi_hfp_ext = 0;
int ret;
ret = phy_mipi_dphy_config_validate(opts);
if (ret)
return ret;
- ret = cdns_dsi_get_dphy_pll_cfg(dphy, cfg,
- opts, &dsi_hfp_ext);
+ ret = cdns_dphy_get_pll_cfg(dphy, cfg, opts);
if (ret)
return ret;
+ opts->hs_clk_rate = cfg->hs_clk_rate;
opts->wakeup = cdns_dphy_get_wakeup_time_ns(dphy) / 1000;
return 0;
@@ -334,21 +356,36 @@ static int cdns_dphy_validate(struct phy *phy, enum phy_mode mode, int submode,
static int cdns_dphy_configure(struct phy *phy, union phy_configure_opts *opts)
{
struct cdns_dphy *dphy = phy_get_drvdata(phy);
- struct cdns_dphy_cfg cfg = { 0 };
- int ret, band_ctrl;
- unsigned int reg;
+ int ret;
- ret = cdns_dphy_config_from_opts(phy, &opts->mipi_dphy, &cfg);
- if (ret)
- return ret;
+ ret = cdns_dphy_config_from_opts(phy, &opts->mipi_dphy, &dphy->cfg);
+ if (!ret)
+ dphy->is_configured = true;
+
+ return ret;
+}
+
+static int cdns_dphy_power_on(struct phy *phy)
+{
+ struct cdns_dphy *dphy = phy_get_drvdata(phy);
+ int ret;
+ u32 reg;
+
+ if (!dphy->is_configured || dphy->is_powered)
+ return -EINVAL;
+
+ clk_prepare_enable(dphy->psm_clk);
+ clk_prepare_enable(dphy->pll_ref_clk);
/*
* Configure the internal PSM clk divider so that the DPHY has a
* 1MHz clk (or something close).
*/
ret = cdns_dphy_setup_psm(dphy);
- if (ret)
- return ret;
+ if (ret) {
+ dev_err(&dphy->phy->dev, "Failed to setup PSM with error %d\n", ret);
+ goto err_power_on;
+ }
/*
* Configure attach clk lanes to data lanes: the DPHY has 2 clk lanes
@@ -363,40 +400,61 @@ static int cdns_dphy_configure(struct phy *phy, union phy_configure_opts *opts)
* Configure the DPHY PLL that will be used to generate the TX byte
* clk.
*/
- cdns_dphy_set_pll_cfg(dphy, &cfg);
+ cdns_dphy_set_pll_cfg(dphy, &dphy->cfg);
- band_ctrl = cdns_dphy_tx_get_band_ctrl(opts->mipi_dphy.hs_clk_rate);
- if (band_ctrl < 0)
- return band_ctrl;
+ ret = cdns_dphy_tx_get_band_ctrl(dphy->cfg.hs_clk_rate);
+ if (ret < 0) {
+ dev_err(&dphy->phy->dev, "Failed to get band control value with error %d\n", ret);
+ goto err_power_on;
+ }
- reg = FIELD_PREP(DPHY_BAND_CFG_LEFT_BAND, band_ctrl) |
- FIELD_PREP(DPHY_BAND_CFG_RIGHT_BAND, band_ctrl);
+ reg = FIELD_PREP(DPHY_BAND_CFG_LEFT_BAND, ret) |
+ FIELD_PREP(DPHY_BAND_CFG_RIGHT_BAND, ret);
writel(reg, dphy->regs + DPHY_BAND_CFG);
- return 0;
-}
+ /* Start TX state machine. */
+ reg = readl(dphy->regs + DPHY_CMN_SSM);
+ writel((reg & DPHY_CMN_SSM_CAL_WAIT_TIME) | DPHY_CMN_SSM_EN | DPHY_CMN_TX_MODE_EN,
+ dphy->regs + DPHY_CMN_SSM);
-static int cdns_dphy_power_on(struct phy *phy)
-{
- struct cdns_dphy *dphy = phy_get_drvdata(phy);
+ ret = cdns_dphy_wait_for_pll_lock(dphy);
+ if (ret) {
+ dev_err(&dphy->phy->dev, "Failed to lock PLL with error %d\n", ret);
+ goto err_power_on;
+ }
- clk_prepare_enable(dphy->psm_clk);
- clk_prepare_enable(dphy->pll_ref_clk);
+ ret = cdns_dphy_wait_for_cmn_ready(dphy);
+ if (ret) {
+ dev_err(&dphy->phy->dev, "O_CMN_READY signal failed to assert with error %d\n",
+ ret);
+ goto err_power_on;
+ }
- /* Start TX state machine. */
- writel(DPHY_CMN_SSM_EN | DPHY_CMN_TX_MODE_EN,
- dphy->regs + DPHY_CMN_SSM);
+ dphy->is_powered = true;
return 0;
+
+err_power_on:
+ clk_disable_unprepare(dphy->pll_ref_clk);
+ clk_disable_unprepare(dphy->psm_clk);
+
+ return ret;
}
static int cdns_dphy_power_off(struct phy *phy)
{
struct cdns_dphy *dphy = phy_get_drvdata(phy);
+ u32 reg;
clk_disable_unprepare(dphy->pll_ref_clk);
clk_disable_unprepare(dphy->psm_clk);
+ /* Stop TX state machine. */
+ reg = readl(dphy->regs + DPHY_CMN_SSM);
+ writel(reg & ~DPHY_CMN_SSM_EN, dphy->regs + DPHY_CMN_SSM);
+
+ dphy->is_powered = false;
+
return 0;
}
diff --git a/drivers/phy/cadence/phy-cadence-sierra.c b/drivers/phy/cadence/phy-cadence-sierra.c
index 74613382ccb0..92ab1a31646a 100644
--- a/drivers/phy/cadence/phy-cadence-sierra.c
+++ b/drivers/phy/cadence/phy-cadence-sierra.c
@@ -2919,7 +2919,6 @@ static struct platform_driver cdns_sierra_driver = {
};
module_platform_driver(cdns_sierra_driver);
-MODULE_ALIAS("platform:cdns_sierra");
MODULE_AUTHOR("Cadence Design Systems");
MODULE_DESCRIPTION("CDNS sierra phy driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/freescale/phy-fsl-lynx-28g.c b/drivers/phy/freescale/phy-fsl-lynx-28g.c
index f7994e8983c8..c20d2636c5e9 100644
--- a/drivers/phy/freescale/phy-fsl-lynx-28g.c
+++ b/drivers/phy/freescale/phy-fsl-lynx-28g.c
@@ -188,6 +188,10 @@ static struct lynx_28g_pll *lynx_28g_pll_get(struct lynx_28g_priv *priv,
return pll;
}
+ /* no pll supports requested mode, either caller forgot to check
+ * lynx_28g_supports_lane_mode, or this is a bug.
+ */
+ dev_WARN_ONCE(priv->dev, 1, "no pll for interface %s\n", phy_modes(intf));
return NULL;
}
@@ -276,8 +280,12 @@ static void lynx_28g_lane_set_sgmii(struct lynx_28g_lane *lane)
lynx_28g_lane_rmw(lane, LNaGCR0, PROTO_SEL_SGMII, PROTO_SEL_MSK);
lynx_28g_lane_rmw(lane, LNaGCR0, IF_WIDTH_10_BIT, IF_WIDTH_MSK);
- /* Switch to the PLL that works with this interface type */
+ /* Find the PLL that works with this interface type */
pll = lynx_28g_pll_get(priv, PHY_INTERFACE_MODE_SGMII);
+ if (unlikely(pll == NULL))
+ return;
+
+ /* Switch to the PLL that works with this interface type */
lynx_28g_lane_set_pll(lane, pll);
/* Choose the portion of clock net to be used on this lane */
@@ -312,8 +320,12 @@ static void lynx_28g_lane_set_10gbaser(struct lynx_28g_lane *lane)
lynx_28g_lane_rmw(lane, LNaGCR0, PROTO_SEL_XFI, PROTO_SEL_MSK);
lynx_28g_lane_rmw(lane, LNaGCR0, IF_WIDTH_20_BIT, IF_WIDTH_MSK);
- /* Switch to the PLL that works with this interface type */
+ /* Find the PLL that works with this interface type */
pll = lynx_28g_pll_get(priv, PHY_INTERFACE_MODE_10GBASER);
+ if (unlikely(pll == NULL))
+ return;
+
+ /* Switch to the PLL that works with this interface type */
lynx_28g_lane_set_pll(lane, pll);
/* Choose the portion of clock net to be used on this lane */
diff --git a/drivers/phy/hisilicon/phy-hi6220-usb.c b/drivers/phy/hisilicon/phy-hi6220-usb.c
index 97bd363dfe87..22d8d8a8dabe 100644
--- a/drivers/phy/hisilicon/phy-hi6220-usb.c
+++ b/drivers/phy/hisilicon/phy-hi6220-usb.c
@@ -161,5 +161,4 @@ static struct platform_driver hi6220_phy_driver = {
module_platform_driver(hi6220_phy_driver);
MODULE_DESCRIPTION("HISILICON HI6220 USB PHY driver");
-MODULE_ALIAS("platform:hi6220-usb-phy");
MODULE_LICENSE("GPL");
diff --git a/drivers/phy/hisilicon/phy-histb-combphy.c b/drivers/phy/hisilicon/phy-histb-combphy.c
index 7436dcae3981..9dd0bd00b4e4 100644
--- a/drivers/phy/hisilicon/phy-histb-combphy.c
+++ b/drivers/phy/hisilicon/phy-histb-combphy.c
@@ -73,7 +73,7 @@ static void nano_register_write(struct histb_combphy_priv *priv,
static int is_mode_fixed(struct histb_combphy_mode *mode)
{
- return (mode->fixed != PHY_NONE) ? true : false;
+ return mode->fixed != PHY_NONE;
}
static int histb_combphy_set_mode(struct histb_combphy_priv *priv)
diff --git a/drivers/phy/ingenic/phy-ingenic-usb.c b/drivers/phy/ingenic/phy-ingenic-usb.c
index eb2721f72a4c..7e62d46850fd 100644
--- a/drivers/phy/ingenic/phy-ingenic-usb.c
+++ b/drivers/phy/ingenic/phy-ingenic-usb.c
@@ -339,17 +339,13 @@ static int ingenic_usb_phy_probe(struct platform_device *pdev)
priv->clk = devm_clk_get(dev, NULL);
if (IS_ERR(priv->clk)) {
err = PTR_ERR(priv->clk);
- if (err != -EPROBE_DEFER)
- dev_err(dev, "Failed to get clock\n");
- return err;
+ return dev_err_probe(dev, err, "Failed to get clock\n");
}
priv->vcc_supply = devm_regulator_get(dev, "vcc");
if (IS_ERR(priv->vcc_supply)) {
err = PTR_ERR(priv->vcc_supply);
- if (err != -EPROBE_DEFER)
- dev_err(dev, "Failed to get regulator\n");
- return err;
+ return dev_err_probe(dev, err, "Failed to get regulator\n");
}
priv->phy = devm_phy_create(dev, NULL, &ingenic_usb_phy_ops);
diff --git a/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c b/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
index e0f2acc8109c..651a12b59bc8 100644
--- a/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
+++ b/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
@@ -82,6 +82,14 @@ static const struct eusb2_repeater_cfg pm8550b_eusb2_cfg = {
.num_vregs = ARRAY_SIZE(pm8550b_vreg_l),
};
+static const struct eusb2_repeater_cfg pmiv0104_eusb2_cfg = {
+ /* No PMIC-specific init sequence, only board level tuning via DT */
+ .init_tbl = (struct eusb2_repeater_init_tbl_reg[]) {},
+ .init_tbl_num = 0,
+ .vreg_list = pm8550b_vreg_l,
+ .num_vregs = ARRAY_SIZE(pm8550b_vreg_l),
+};
+
static const struct eusb2_repeater_cfg smb2360_eusb2_cfg = {
.init_tbl = smb2360_init_tbl,
.init_tbl_num = ARRAY_SIZE(smb2360_init_tbl),
@@ -127,15 +135,18 @@ static int eusb2_repeater_init(struct phy *phy)
rptr->cfg->init_tbl[i].value);
/* Override registers from devicetree values */
- if (!of_property_read_u8(np, "qcom,tune-usb2-amplitude", &val))
+ if (!of_property_read_u8(np, "qcom,tune-usb2-preem", &val))
regmap_write(regmap, base + EUSB2_TUNE_USB2_PREEM, val);
if (!of_property_read_u8(np, "qcom,tune-usb2-disc-thres", &val))
regmap_write(regmap, base + EUSB2_TUNE_HSDISC, val);
- if (!of_property_read_u8(np, "qcom,tune-usb2-preem", &val))
+ if (!of_property_read_u8(np, "qcom,tune-usb2-amplitude", &val))
regmap_write(regmap, base + EUSB2_TUNE_IUSB2, val);
+ if (!of_property_read_u8(np, "qcom,tune-res-fsdif", &val))
+ regmap_write(regmap, base + EUSB2_TUNE_RES_FSDIF, val);
+
/* Wait for status OK */
ret = regmap_read_poll_timeout(regmap, base + EUSB2_RPTR_STATUS, poll_val,
poll_val & RPTR_OK, 10, 5);
@@ -260,6 +271,10 @@ static const struct of_device_id eusb2_repeater_of_match_table[] = {
.data = &pm8550b_eusb2_cfg,
},
{
+ .compatible = "qcom,pmiv0104-eusb2-repeater",
+ .data = &pmiv0104_eusb2_cfg,
+ },
+ {
.compatible = "qcom,smb2360-eusb2-repeater",
.data = &smb2360_eusb2_cfg,
},
diff --git a/drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c b/drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c
index 06392ed7c91b..f22c0000479f 100644
--- a/drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c
+++ b/drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c
@@ -559,7 +559,6 @@ static struct platform_driver qcom_ipq806x_usb_phy_driver = {
module_platform_driver(qcom_ipq806x_usb_phy_driver);
-MODULE_ALIAS("platform:phy-qcom-ipq806x-usb");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Andy Gross <agross@codeaurora.org>");
MODULE_AUTHOR("Ivan T. Ivanov <iivanov@mm-sol.com>");
diff --git a/drivers/phy/qualcomm/phy-qcom-m31-eusb2.c b/drivers/phy/qualcomm/phy-qcom-m31-eusb2.c
index bf32572566c4..0a0d2d9fc846 100644
--- a/drivers/phy/qualcomm/phy-qcom-m31-eusb2.c
+++ b/drivers/phy/qualcomm/phy-qcom-m31-eusb2.c
@@ -196,7 +196,7 @@ static int m31eusb2_phy_init(struct phy *uphy)
ret = clk_prepare_enable(phy->clk);
if (ret) {
- dev_err(&uphy->dev, "failed to enable cfg ahb clock, %d\n", ret);
+ dev_err(&uphy->dev, "failed to enable ref clock, %d\n", ret);
goto disable_repeater;
}
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
index f07d097b129f..7b5af30f1d02 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
@@ -19,6 +19,7 @@
#include <linux/reset.h>
#include <linux/slab.h>
#include <linux/usb/typec.h>
+#include <linux/usb/typec_dp.h>
#include <linux/usb/typec_mux.h>
#include <drm/bridge/aux-bridge.h>
@@ -62,6 +63,12 @@
#define PHY_INIT_COMPLETE_TIMEOUT 10000
+enum qmpphy_mode {
+ QMPPHY_MODE_USB3DP = 0,
+ QMPPHY_MODE_DP_ONLY,
+ QMPPHY_MODE_USB3_ONLY,
+};
+
/* set of registers with offsets different per-PHY */
enum qphy_reg_layout {
/* PCS registers */
@@ -1844,15 +1851,17 @@ struct qmp_combo {
struct mutex phy_mutex;
int init_count;
+ enum qmpphy_mode qmpphy_mode;
struct phy *usb_phy;
- enum phy_mode mode;
+ enum phy_mode phy_mode;
unsigned int usb_init_count;
struct phy *dp_phy;
unsigned int dp_aux_cfg;
struct phy_configure_opts_dp dp_opts;
unsigned int dp_init_count;
+ bool dp_powered_on;
struct clk_fixed_rate pipe_clk_fixed;
struct clk_hw dp_link_hw;
@@ -1860,6 +1869,8 @@ struct qmp_combo {
struct typec_switch_dev *sw;
enum typec_orientation orientation;
+
+ struct typec_mux_dev *mux;
};
static void qmp_v3_dp_aux_init(struct qmp_combo *qmp);
@@ -3036,12 +3047,33 @@ static int qmp_combo_com_init(struct qmp_combo *qmp, bool force)
if (qmp->orientation == TYPEC_ORIENTATION_REVERSE)
val |= SW_PORTSELECT_VAL;
writel(val, com + QPHY_V3_DP_COM_TYPEC_CTRL);
- writel(USB3_MODE | DP_MODE, com + QPHY_V3_DP_COM_PHY_MODE_CTRL);
- /* bring both QMP USB and QMP DP PHYs PCS block out of reset */
- qphy_clrbits(com, QPHY_V3_DP_COM_RESET_OVRD_CTRL,
- SW_DPPHY_RESET_MUX | SW_DPPHY_RESET |
- SW_USB3PHY_RESET_MUX | SW_USB3PHY_RESET);
+ switch (qmp->qmpphy_mode) {
+ case QMPPHY_MODE_USB3DP:
+ writel(USB3_MODE | DP_MODE, com + QPHY_V3_DP_COM_PHY_MODE_CTRL);
+
+ /* bring both QMP USB and QMP DP PHYs PCS block out of reset */
+ qphy_clrbits(com, QPHY_V3_DP_COM_RESET_OVRD_CTRL,
+ SW_DPPHY_RESET_MUX | SW_DPPHY_RESET |
+ SW_USB3PHY_RESET_MUX | SW_USB3PHY_RESET);
+ break;
+
+ case QMPPHY_MODE_DP_ONLY:
+ writel(DP_MODE, com + QPHY_V3_DP_COM_PHY_MODE_CTRL);
+
+ /* bring QMP DP PHY PCS block out of reset */
+ qphy_clrbits(com, QPHY_V3_DP_COM_RESET_OVRD_CTRL,
+ SW_DPPHY_RESET_MUX | SW_DPPHY_RESET);
+ break;
+
+ case QMPPHY_MODE_USB3_ONLY:
+ writel(USB3_MODE, com + QPHY_V3_DP_COM_PHY_MODE_CTRL);
+
+ /* bring QMP USB PHY PCS block out of reset */
+ qphy_clrbits(com, QPHY_V3_DP_COM_RESET_OVRD_CTRL,
+ SW_USB3PHY_RESET_MUX | SW_USB3PHY_RESET);
+ break;
+ }
qphy_clrbits(com, QPHY_V3_DP_COM_SWI_CTRL, 0x03);
qphy_clrbits(com, QPHY_V3_DP_COM_SW_RESET, SW_RESET);
@@ -3133,6 +3165,8 @@ static int qmp_combo_dp_power_on(struct phy *phy)
/* Configure link rate, swing, etc. */
cfg->configure_dp_phy(qmp);
+ qmp->dp_powered_on = true;
+
mutex_unlock(&qmp->phy_mutex);
return 0;
@@ -3147,6 +3181,8 @@ static int qmp_combo_dp_power_off(struct phy *phy)
/* Assert DP PHY power down */
writel(DP_PHY_PD_CTL_PSR_PWRDN, qmp->dp_dp_phy + QSERDES_DP_PHY_PD_CTL);
+ qmp->dp_powered_on = false;
+
mutex_unlock(&qmp->phy_mutex);
return 0;
@@ -3282,7 +3318,7 @@ static int qmp_combo_usb_set_mode(struct phy *phy, enum phy_mode mode, int submo
{
struct qmp_combo *qmp = phy_get_drvdata(phy);
- qmp->mode = mode;
+ qmp->phy_mode = mode;
return 0;
}
@@ -3311,8 +3347,8 @@ static void qmp_combo_enable_autonomous_mode(struct qmp_combo *qmp)
void __iomem *pcs_misc = qmp->pcs_misc;
u32 intr_mask;
- if (qmp->mode == PHY_MODE_USB_HOST_SS ||
- qmp->mode == PHY_MODE_USB_DEVICE_SS)
+ if (qmp->phy_mode == PHY_MODE_USB_HOST_SS ||
+ qmp->phy_mode == PHY_MODE_USB_DEVICE_SS)
intr_mask = ARCVR_DTCT_EN | ALFPS_DTCT_EN;
else
intr_mask = ARCVR_DTCT_EN | ARCVR_DTCT_EVENT_SEL;
@@ -3355,7 +3391,7 @@ static int __maybe_unused qmp_combo_runtime_suspend(struct device *dev)
{
struct qmp_combo *qmp = dev_get_drvdata(dev);
- dev_vdbg(dev, "Suspending QMP phy, mode:%d\n", qmp->mode);
+ dev_vdbg(dev, "Suspending QMP phy, mode:%d\n", qmp->phy_mode);
if (!qmp->init_count) {
dev_vdbg(dev, "PHY not initialized, bailing out\n");
@@ -3375,7 +3411,7 @@ static int __maybe_unused qmp_combo_runtime_resume(struct device *dev)
struct qmp_combo *qmp = dev_get_drvdata(dev);
int ret = 0;
- dev_vdbg(dev, "Resuming QMP phy, mode:%d\n", qmp->mode);
+ dev_vdbg(dev, "Resuming QMP phy, mode:%d\n", qmp->phy_mode);
if (!qmp->init_count) {
dev_vdbg(dev, "PHY not initialized, bailing out\n");
@@ -3769,17 +3805,109 @@ static int qmp_combo_typec_switch_set(struct typec_switch_dev *sw,
return 0;
}
-static void qmp_combo_typec_unregister(void *data)
+static int qmp_combo_typec_mux_set(struct typec_mux_dev *mux, struct typec_mux_state *state)
+{
+ struct qmp_combo *qmp = typec_mux_get_drvdata(mux);
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ enum qmpphy_mode new_mode;
+ unsigned int svid;
+
+ guard(mutex)(&qmp->phy_mutex);
+
+ if (state->alt)
+ svid = state->alt->svid;
+ else
+ svid = 0;
+
+ if (svid == USB_TYPEC_DP_SID) {
+ switch (state->mode) {
+ /* DP Only */
+ case TYPEC_DP_STATE_C:
+ case TYPEC_DP_STATE_E:
+ new_mode = QMPPHY_MODE_DP_ONLY;
+ break;
+
+ /* DP + USB */
+ case TYPEC_DP_STATE_D:
+ case TYPEC_DP_STATE_F:
+
+ /* Safe fallback...*/
+ default:
+ new_mode = QMPPHY_MODE_USB3DP;
+ break;
+ }
+ } else {
+ /* No DP SVID => don't care, assume it's just USB3 */
+ new_mode = QMPPHY_MODE_USB3_ONLY;
+ }
+
+ if (new_mode == qmp->qmpphy_mode) {
+ dev_dbg(qmp->dev, "typec_mux_set: same qmpphy mode, bail out\n");
+ return 0;
+ }
+
+ if (qmp->qmpphy_mode != QMPPHY_MODE_USB3_ONLY && qmp->dp_powered_on) {
+ dev_dbg(qmp->dev, "typec_mux_set: DP PHY is still in use, delaying switch\n");
+ return 0;
+ }
+
+ dev_dbg(qmp->dev, "typec_mux_set: switching from qmpphy mode %d to %d\n",
+ qmp->qmpphy_mode, new_mode);
+
+ qmp->qmpphy_mode = new_mode;
+
+ if (qmp->init_count) {
+ if (qmp->usb_init_count)
+ qmp_combo_usb_power_off(qmp->usb_phy);
+
+ if (qmp->dp_init_count)
+ writel(DP_PHY_PD_CTL_PSR_PWRDN, qmp->dp_dp_phy + QSERDES_DP_PHY_PD_CTL);
+
+ qmp_combo_com_exit(qmp, true);
+
+ /* Now everything's powered down, power up the right PHYs */
+ qmp_combo_com_init(qmp, true);
+
+ if (new_mode == QMPPHY_MODE_DP_ONLY) {
+ if (qmp->usb_init_count)
+ qmp->usb_init_count--;
+ }
+
+ if (new_mode == QMPPHY_MODE_USB3DP || new_mode == QMPPHY_MODE_USB3_ONLY) {
+ qmp_combo_usb_power_on(qmp->usb_phy);
+ if (!qmp->usb_init_count)
+ qmp->usb_init_count++;
+ }
+
+ if (new_mode == QMPPHY_MODE_DP_ONLY || new_mode == QMPPHY_MODE_USB3DP) {
+ if (qmp->dp_init_count)
+ cfg->dp_aux_init(qmp);
+ }
+ }
+
+ return 0;
+}
+
+static void qmp_combo_typec_switch_unregister(void *data)
{
struct qmp_combo *qmp = data;
typec_switch_unregister(qmp->sw);
}
-static int qmp_combo_typec_switch_register(struct qmp_combo *qmp)
+static void qmp_combo_typec_mux_unregister(void *data)
+{
+ struct qmp_combo *qmp = data;
+
+ typec_mux_unregister(qmp->mux);
+}
+
+static int qmp_combo_typec_register(struct qmp_combo *qmp)
{
struct typec_switch_desc sw_desc = {};
+ struct typec_mux_desc mux_desc = { };
struct device *dev = qmp->dev;
+ int ret;
sw_desc.drvdata = qmp;
sw_desc.fwnode = dev->fwnode;
@@ -3790,10 +3918,23 @@ static int qmp_combo_typec_switch_register(struct qmp_combo *qmp)
return PTR_ERR(qmp->sw);
}
- return devm_add_action_or_reset(dev, qmp_combo_typec_unregister, qmp);
+ ret = devm_add_action_or_reset(dev, qmp_combo_typec_switch_unregister, qmp);
+ if (ret)
+ return ret;
+
+ mux_desc.drvdata = qmp;
+ mux_desc.fwnode = dev->fwnode;
+ mux_desc.set = qmp_combo_typec_mux_set;
+ qmp->mux = typec_mux_register(dev, &mux_desc);
+ if (IS_ERR(qmp->mux)) {
+ dev_err(dev, "Unable to register typec mux: %pe\n", qmp->mux);
+ return PTR_ERR(qmp->mux);
+ }
+
+ return devm_add_action_or_reset(dev, qmp_combo_typec_mux_unregister, qmp);
}
#else
-static int qmp_combo_typec_switch_register(struct qmp_combo *qmp)
+static int qmp_combo_typec_register(struct qmp_combo *qmp)
{
return 0;
}
@@ -4026,7 +4167,7 @@ static int qmp_combo_probe(struct platform_device *pdev)
if (ret)
goto err_node_put;
- ret = qmp_combo_typec_switch_register(qmp);
+ ret = qmp_combo_typec_register(qmp);
if (ret)
goto err_node_put;
@@ -4048,6 +4189,12 @@ static int qmp_combo_probe(struct platform_device *pdev)
if (ret)
goto err_node_put;
+ /*
+ * The hw default is USB3_ONLY, but USB3+DP mode lets us more easily
+ * check both sub-blocks' init tables for blunders at probe time.
+ */
+ qmp->qmpphy_mode = QMPPHY_MODE_USB3DP;
+
qmp->usb_phy = devm_phy_create(dev, usb_np, &qmp_combo_usb_phy_ops);
if (IS_ERR(qmp->usb_phy)) {
ret = PTR_ERR(qmp->usb_phy);
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
index 95830dcfdec9..62b1c845b627 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
@@ -93,6 +93,13 @@ static const unsigned int pciephy_v6_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_PCS_POWER_DOWN_CONTROL] = QPHY_V6_PCS_POWER_DOWN_CONTROL,
};
+static const unsigned int pciephy_v7_regs_layout[QPHY_LAYOUT_SIZE] = {
+ [QPHY_SW_RESET] = QPHY_V7_PCS_SW_RESET,
+ [QPHY_START_CTRL] = QPHY_V7_PCS_START_CONTROL,
+ [QPHY_PCS_STATUS] = QPHY_V7_PCS_PCS_STATUS1,
+ [QPHY_PCS_POWER_DOWN_CONTROL] = QPHY_V7_PCS_POWER_DOWN_CONTROL,
+};
+
static const struct qmp_phy_init_tbl msm8998_pcie_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN, 0x14),
QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30),
@@ -2590,6 +2597,108 @@ static const struct qmp_phy_init_tbl sm8650_qmp_gen4x2_pcie_rx_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE3_B6, 0xff),
};
+static const struct qmp_phy_init_tbl sm8750_qmp_gen3x2_pcie_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_SSC_EN_CENTER, 0x1),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_SSC_PER1, 0x62),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_SSC_PER2, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_SSC_STEP_SIZE1_MODE0, 0xf8),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_SSC_STEP_SIZE2_MODE0, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_SSC_STEP_SIZE1_MODE1, 0x93),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_SSC_STEP_SIZE2_MODE1, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_CLK_ENABLE1, 0x90),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_SYS_CLK_CTRL, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_PLL_IVCO, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_CP_CTRL_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_CP_CTRL_MODE1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_PLL_CCTRL_MODE1, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_SYSCLK_EN_SEL, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_BG_TIMER, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_LOCK_CMP_EN, 0x42),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_LOCK_CMP1_MODE0, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_LOCK_CMP2_MODE0, 0x0d),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_LOCK_CMP1_MODE1, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_LOCK_CMP2_MODE1, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_DEC_START_MODE0, 0x41),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_DEC_START_MODE1, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_DIV_FRAC_START1_MODE0, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_DIV_FRAC_START2_MODE0, 0xaa),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_DIV_FRAC_START3_MODE0, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_DIV_FRAC_START1_MODE1, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_DIV_FRAC_START2_MODE1, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_DIV_FRAC_START3_MODE1, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_VCO_TUNE_MAP, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_CLK_SELECT, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_HSCLK_SEL_1, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_CORECLK_DIV_MODE1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_CMN_CONFIG_1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_ADDITIONAL_MISC_3, 0x0F),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_CORE_CLK_EN, 0xA0),
+};
+
+static const struct qmp_phy_init_tbl sm8750_qmp_gen3x2_pcie_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_GM_CAL, 0x11),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_MODE_00_HIGH, 0xBF),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_MODE_00_HIGH2, 0xBF),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_MODE_00_HIGH3, 0xB7),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_MODE_00_HIGH4, 0xEA),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_MODE_00_LOW, 0x3F),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_MODE_01_HIGH, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_MODE_01_HIGH2, 0x49),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_MODE_01_HIGH3, 0x1B),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_MODE_01_HIGH4, 0x9C),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_MODE_01_LOW, 0xD1),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_MODE_10_HIGH, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_MODE_10_HIGH2, 0x49),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_MODE_10_HIGH3, 0x1B),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_MODE_10_HIGH4, 0x9C),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_MODE_10_LOW, 0xD1),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_TX_ADAPT_PRE_THRESH1, 0x3E),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_TX_ADAPT_PRE_THRESH2, 0x1E),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_TX_ADAPT_POST_THRESH, 0xD2),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_UCDR_FO_GAIN, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_UCDR_SO_GAIN, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_UCDR_SB2_THRESH1, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_UCDR_SB2_THRESH2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_VGA_CAL_CNTRL2, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_SIGDET_ENABLES, 0x1C),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_SIGDET_CNTRL, 0x60),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_IDAC_TSETTLE_LOW, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_SIGDET_CAL_TRIM, 0x08),
+};
+
+static const struct qmp_phy_init_tbl sm8750_qmp_gen3x2_pcie_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V7_TX_LANE_MODE_1, 0x35),
+ QMP_PHY_INIT_CFG(QSERDES_V7_TX_LANE_MODE_3, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_V7_TX_LANE_MODE_4, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_V7_TX_LANE_MODE_5, 0x7F),
+ QMP_PHY_INIT_CFG(QSERDES_V7_TX_PI_QEC_CTRL, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V7_TX_RES_CODE_LANE_OFFSET_RX, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V7_TX_RES_CODE_LANE_OFFSET_TX, 0x14),
+};
+
+static const struct qmp_phy_init_tbl sm8750_qmp_gen3x2_pcie_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V7_PCS_REFGEN_REQ_CONFIG1, 0x05),
+ QMP_PHY_INIT_CFG(QPHY_V7_PCS_RX_SIGDET_LVL, 0x77),
+ QMP_PHY_INIT_CFG(QPHY_V7_PCS_RATE_SLEW_CNTRL1, 0x0B),
+ QMP_PHY_INIT_CFG(QPHY_V7_PCS_EQ_CONFIG2, 0x0F),
+ QMP_PHY_INIT_CFG(QPHY_V7_PCS_PCS_TX_RX_CONFIG, 0x8C),
+ QMP_PHY_INIT_CFG(QPHY_V7_PCS_G12S1_TXDEEMPH_M6DB, 0x17),
+ QMP_PHY_INIT_CFG(QPHY_V7_PCS_G3S2_PRE_GAIN, 0x2E),
+};
+
+static const struct qmp_phy_init_tbl sm8750_qmp_gen3x2_pcie_pcs_misc_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_PCIE_V6_PCS_PCIE_EQ_CONFIG1, 0x1E),
+ QMP_PHY_INIT_CFG(QPHY_PCIE_V6_PCS_PCIE_RXEQEVAL_TIME, 0x27),
+ QMP_PHY_INIT_CFG(QPHY_PCIE_V6_PCS_PCIE_POWER_STATE_CONFIG2, 0x1D),
+ QMP_PHY_INIT_CFG(QPHY_PCIE_V6_PCS_PCIE_POWER_STATE_CONFIG4, 0x07),
+ QMP_PHY_INIT_CFG(QPHY_PCIE_V6_PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xC1),
+ QMP_PHY_INIT_CFG(QPHY_PCIE_V6_PCS_PCIE_OSC_DTCT_ACTIONS, 0x00),
+};
+
static const struct qmp_phy_init_tbl sa8775p_qmp_gen4x2_pcie_serdes_alt_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIAS_EN_CLKBUFLR_EN, 0x14),
QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_IVCO, 0x0f),
@@ -3067,6 +3176,14 @@ struct qmp_pcie {
struct clk_fixed_rate aux_clk_fixed;
};
+static bool qphy_checkbits(const void __iomem *base, u32 offset, u32 val)
+{
+ u32 reg;
+
+ reg = readl(base + offset);
+ return (reg & val) == val;
+}
+
static inline void qphy_setbits(void __iomem *base, u32 offset, u32 val)
{
u32 reg;
@@ -3207,6 +3324,16 @@ static const struct qmp_pcie_offsets qmp_pcie_offsets_v5_30 = {
.rx2 = 0x3a00,
};
+static const struct qmp_pcie_offsets qmp_pcie_offsets_v7 = {
+ .serdes = 0x0,
+ .pcs = 0x400,
+ .pcs_misc = 0x800,
+ .tx = 0x1000,
+ .rx = 0x1200,
+ .tx2 = 0x1800,
+ .rx2 = 0x1a00,
+};
+
static const struct qmp_pcie_offsets qmp_pcie_offsets_v6_20 = {
.serdes = 0x1000,
.pcs = 0x1200,
@@ -3996,6 +4123,33 @@ static const struct qmp_phy_cfg sm8550_qmp_gen3x2_pciephy_cfg = {
.phy_status = PHYSTATUS,
};
+static const struct qmp_phy_cfg sm8750_qmp_gen3x2_pciephy_cfg = {
+ .lanes = 2,
+
+ .offsets = &qmp_pcie_offsets_v7,
+
+ .tbls = {
+ .serdes = sm8750_qmp_gen3x2_pcie_serdes_tbl,
+ .serdes_num = ARRAY_SIZE(sm8750_qmp_gen3x2_pcie_serdes_tbl),
+ .tx = sm8750_qmp_gen3x2_pcie_tx_tbl,
+ .tx_num = ARRAY_SIZE(sm8750_qmp_gen3x2_pcie_tx_tbl),
+ .rx = sm8750_qmp_gen3x2_pcie_rx_tbl,
+ .rx_num = ARRAY_SIZE(sm8750_qmp_gen3x2_pcie_rx_tbl),
+ .pcs = sm8750_qmp_gen3x2_pcie_pcs_tbl,
+ .pcs_num = ARRAY_SIZE(sm8750_qmp_gen3x2_pcie_pcs_tbl),
+ .pcs_misc = sm8750_qmp_gen3x2_pcie_pcs_misc_tbl,
+ .pcs_misc_num = ARRAY_SIZE(sm8750_qmp_gen3x2_pcie_pcs_misc_tbl),
+ },
+ .reset_list = sdm845_pciephy_reset_l,
+ .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = pciephy_v7_regs_layout,
+
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+ .phy_status = PHYSTATUS,
+};
+
static const struct qmp_phy_cfg sm8550_qmp_gen4x2_pciephy_cfg = {
.lanes = 2,
@@ -4339,16 +4493,21 @@ static int qmp_pcie_init(struct phy *phy)
struct qmp_pcie *qmp = phy_get_drvdata(phy);
const struct qmp_phy_cfg *cfg = qmp->cfg;
void __iomem *pcs = qmp->pcs;
- bool phy_initialized = !!(readl(pcs + cfg->regs[QPHY_START_CTRL]));
int ret;
- qmp->skip_init = qmp->nocsr_reset && phy_initialized;
/*
- * We need to check the existence of init sequences in two cases:
- * 1. The PHY doesn't support no_csr reset.
- * 2. The PHY supports no_csr reset but isn't initialized by bootloader.
- * As we can't skip init in these two cases.
+ * We can skip PHY initialization if all of the following conditions
+ * are met:
+ * 1. The PHY supports the nocsr_reset that preserves the PHY config.
+ * 2. The PHY was started (and not powered down again) by the
+ * bootloader, with all of the expected bits set correctly.
+ * In this case, we can continue without having the init sequence
+ * defined in the driver.
*/
+ qmp->skip_init = qmp->nocsr_reset &&
+ qphy_checkbits(pcs, cfg->regs[QPHY_START_CTRL], SERDES_START | PCS_START) &&
+ qphy_checkbits(pcs, cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL], cfg->pwrdn_ctrl);
+
if (!qmp->skip_init && !cfg->tbls.serdes_num) {
dev_err(qmp->dev, "Init sequence not available\n");
return -ENODATA;
@@ -5100,6 +5259,9 @@ static const struct of_device_id qmp_pcie_of_match_table[] = {
.compatible = "qcom,sm8650-qmp-gen4x2-pcie-phy",
.data = &sm8650_qmp_gen4x2_pciephy_cfg,
}, {
+ .compatible = "qcom,sm8750-qmp-gen3x2-pcie-phy",
+ .data = &sm8750_qmp_gen3x2_pciephy_cfg,
+ }, {
.compatible = "qcom,x1e80100-qmp-gen3x2-pcie-phy",
.data = &sm8550_qmp_gen3x2_pciephy_cfg,
}, {
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v7.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v7.h
index c7759892ed2e..4b7fcaa6a374 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v7.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v7.h
@@ -17,6 +17,8 @@
#define QPHY_V7_PCS_LOCK_DETECT_CONFIG3 0x0cc
#define QPHY_V7_PCS_LOCK_DETECT_CONFIG6 0x0d8
#define QPHY_V7_PCS_REFGEN_REQ_CONFIG1 0x0dc
+#define QPHY_V7_PCS_G12S1_TXDEEMPH_M6DB 0x168
+#define QPHY_V7_PCS_G3S2_PRE_GAIN 0x170
#define QPHY_V7_PCS_RX_SIGDET_LVL 0x188
#define QPHY_V7_PCS_RCVR_DTCT_DLY_P1U2_L 0x190
#define QPHY_V7_PCS_RCVR_DTCT_DLY_P1U2_H 0x194
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v7.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v7.h
index 91f865b11347..6ab943ff57ff 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v7.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v7.h
@@ -40,6 +40,8 @@
#define QSERDES_V7_RX_UCDR_SB2_GAIN1 0x54
#define QSERDES_V7_RX_UCDR_SB2_GAIN2 0x58
#define QSERDES_V7_RX_AUX_DATA_TCOARSE_TFINE 0x60
+#define QSERDES_V7_RX_TX_ADAPT_PRE_THRESH1 0xc4
+#define QSERDES_V7_RX_TX_ADAPT_PRE_THRESH2 0xc8
#define QSERDES_V7_RX_TX_ADAPT_POST_THRESH 0xcc
#define QSERDES_V7_RX_VGA_CAL_CNTRL1 0xd4
#define QSERDES_V7_RX_VGA_CAL_CNTRL2 0xd8
@@ -50,7 +52,7 @@
#define QSERDES_V7_RX_RX_IDAC_TSETTLE_LOW 0xf8
#define QSERDES_V7_RX_RX_IDAC_TSETTLE_HIGH 0xfc
#define QSERDES_V7_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1 0x110
-#define QSERDES_V7_RX_SIDGET_ENABLES 0x118
+#define QSERDES_V7_RX_SIGDET_ENABLES 0x118
#define QSERDES_V7_RX_SIGDET_CNTRL 0x11c
#define QSERDES_V7_RX_SIGDET_DEGLITCH_CNTRL 0x124
#define QSERDES_V7_RX_RX_MODE_00_LOW 0x15c
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c b/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c
index 9c69c77d10c8..8a280433a42b 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c
@@ -1107,7 +1107,7 @@ struct qmp_phy_cfg {
const struct qmp_phy_cfg_tbls tbls_hs_overlay[NUM_OVERLAY];
/* regulators to be requested */
- const char * const *vreg_list;
+ const struct regulator_bulk_data *vreg_list;
int num_vregs;
/* array of registers with different offsets */
@@ -1164,9 +1164,80 @@ static inline void qphy_clrbits(void __iomem *base, u32 offset, u32 val)
readl(base + offset);
}
-/* list of regulators */
-static const char * const qmp_phy_vreg_l[] = {
- "vdda-phy", "vdda-pll",
+/* Regulator bulk data with load values for specific configurations */
+static const struct regulator_bulk_data msm8996_ufsphy_vreg_l[] = {
+ { .supply = "vdda-phy", .init_load_uA = 51400 },
+ { .supply = "vdda-pll", .init_load_uA = 14600 },
+};
+
+static const struct regulator_bulk_data sa8775p_ufsphy_vreg_l[] = {
+ { .supply = "vdda-phy", .init_load_uA = 137000 },
+ { .supply = "vdda-pll", .init_load_uA = 18300 },
+};
+
+static const struct regulator_bulk_data sc7280_ufsphy_vreg_l[] = {
+ { .supply = "vdda-phy", .init_load_uA = 97500 },
+ { .supply = "vdda-pll", .init_load_uA = 18400 },
+};
+
+static const struct regulator_bulk_data sc8280xp_ufsphy_vreg_l[] = {
+ { .supply = "vdda-phy", .init_load_uA = 85700 },
+ { .supply = "vdda-pll", .init_load_uA = 18300 },
+};
+
+static const struct regulator_bulk_data sdm845_ufsphy_vreg_l[] = {
+ { .supply = "vdda-phy", .init_load_uA = 51400 },
+ { .supply = "vdda-pll", .init_load_uA = 14600 },
+};
+
+static const struct regulator_bulk_data sm6115_ufsphy_vreg_l[] = {
+ { .supply = "vdda-phy", .init_load_uA = 51400 },
+ { .supply = "vdda-pll", .init_load_uA = 14200 },
+};
+
+static const struct regulator_bulk_data sm7150_ufsphy_vreg_l[] = {
+ { .supply = "vdda-phy", .init_load_uA = 62900 },
+ { .supply = "vdda-pll", .init_load_uA = 18300 },
+};
+
+static const struct regulator_bulk_data sm8150_ufsphy_vreg_l[] = {
+ { .supply = "vdda-phy", .init_load_uA = 90200 },
+ { .supply = "vdda-pll", .init_load_uA = 19000 },
+};
+
+static const struct regulator_bulk_data sm8250_ufsphy_vreg_l[] = {
+ { .supply = "vdda-phy", .init_load_uA = 89900 },
+ { .supply = "vdda-pll", .init_load_uA = 18800 },
+};
+
+static const struct regulator_bulk_data sm8350_ufsphy_vreg_l[] = {
+ { .supply = "vdda-phy", .init_load_uA = 91600 },
+ { .supply = "vdda-pll", .init_load_uA = 19000 },
+};
+
+static const struct regulator_bulk_data sm8450_ufsphy_vreg_l[] = {
+ { .supply = "vdda-phy", .init_load_uA = 173000 },
+ { .supply = "vdda-pll", .init_load_uA = 24900 },
+};
+
+static const struct regulator_bulk_data sm8475_ufsphy_vreg_l[] = {
+ { .supply = "vdda-phy", .init_load_uA = 213030 },
+ { .supply = "vdda-pll", .init_load_uA = 18340 },
+};
+
+static const struct regulator_bulk_data sm8550_ufsphy_vreg_l[] = {
+ { .supply = "vdda-phy", .init_load_uA = 188000 },
+ { .supply = "vdda-pll", .init_load_uA = 18300 },
+};
+
+static const struct regulator_bulk_data sm8650_ufsphy_vreg_l[] = {
+ { .supply = "vdda-phy", .init_load_uA = 205000 },
+ { .supply = "vdda-pll", .init_load_uA = 17500 },
+};
+
+static const struct regulator_bulk_data sm8750_ufsphy_vreg_l[] = {
+ { .supply = "vdda-phy", .init_load_uA = 213000 },
+ { .supply = "vdda-pll", .init_load_uA = 18300 },
};
static const struct qmp_ufs_offsets qmp_ufs_offsets = {
@@ -1202,8 +1273,8 @@ static const struct qmp_phy_cfg msm8996_ufsphy_cfg = {
.rx_num = ARRAY_SIZE(msm8996_ufsphy_rx),
},
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .vreg_list = msm8996_ufsphy_vreg_l,
+ .num_vregs = ARRAY_SIZE(msm8996_ufsphy_vreg_l),
.regs = ufsphy_v2_regs_layout,
@@ -1239,8 +1310,8 @@ static const struct qmp_phy_cfg sa8775p_ufsphy_cfg = {
.pcs_num = ARRAY_SIZE(sm8350_ufsphy_g4_pcs),
.max_gear = UFS_HS_G4,
},
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .vreg_list = sa8775p_ufsphy_vreg_l,
+ .num_vregs = ARRAY_SIZE(sa8775p_ufsphy_vreg_l),
.regs = ufsphy_v5_regs_layout,
};
@@ -1273,8 +1344,8 @@ static const struct qmp_phy_cfg sc7280_ufsphy_cfg = {
.pcs_num = ARRAY_SIZE(sm8150_ufsphy_hs_g4_pcs),
.max_gear = UFS_HS_G4,
},
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .vreg_list = sc7280_ufsphy_vreg_l,
+ .num_vregs = ARRAY_SIZE(sc7280_ufsphy_vreg_l),
.regs = ufsphy_v4_regs_layout,
};
@@ -1307,8 +1378,8 @@ static const struct qmp_phy_cfg sc8280xp_ufsphy_cfg = {
.pcs_num = ARRAY_SIZE(sm8350_ufsphy_g4_pcs),
.max_gear = UFS_HS_G4,
},
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .vreg_list = sc8280xp_ufsphy_vreg_l,
+ .num_vregs = ARRAY_SIZE(sc8280xp_ufsphy_vreg_l),
.regs = ufsphy_v5_regs_layout,
};
@@ -1332,8 +1403,8 @@ static const struct qmp_phy_cfg sdm845_ufsphy_cfg = {
.serdes = sdm845_ufsphy_hs_b_serdes,
.serdes_num = ARRAY_SIZE(sdm845_ufsphy_hs_b_serdes),
},
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .vreg_list = sdm845_ufsphy_vreg_l,
+ .num_vregs = ARRAY_SIZE(sdm845_ufsphy_vreg_l),
.regs = ufsphy_v3_regs_layout,
.no_pcs_sw_reset = true,
@@ -1359,8 +1430,8 @@ static const struct qmp_phy_cfg sm6115_ufsphy_cfg = {
.serdes = sm6115_ufsphy_hs_b_serdes,
.serdes_num = ARRAY_SIZE(sm6115_ufsphy_hs_b_serdes),
},
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .vreg_list = sm6115_ufsphy_vreg_l,
+ .num_vregs = ARRAY_SIZE(sm6115_ufsphy_vreg_l),
.regs = ufsphy_v2_regs_layout,
.no_pcs_sw_reset = true,
@@ -1386,8 +1457,8 @@ static const struct qmp_phy_cfg sm7150_ufsphy_cfg = {
.serdes = sdm845_ufsphy_hs_b_serdes,
.serdes_num = ARRAY_SIZE(sdm845_ufsphy_hs_b_serdes),
},
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .vreg_list = sm7150_ufsphy_vreg_l,
+ .num_vregs = ARRAY_SIZE(sm7150_ufsphy_vreg_l),
.regs = ufsphy_v3_regs_layout,
.no_pcs_sw_reset = true,
@@ -1422,8 +1493,8 @@ static const struct qmp_phy_cfg sm8150_ufsphy_cfg = {
.pcs_num = ARRAY_SIZE(sm8150_ufsphy_hs_g4_pcs),
.max_gear = UFS_HS_G4,
},
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .vreg_list = sm8150_ufsphy_vreg_l,
+ .num_vregs = ARRAY_SIZE(sm8150_ufsphy_vreg_l),
.regs = ufsphy_v4_regs_layout,
};
@@ -1456,8 +1527,8 @@ static const struct qmp_phy_cfg sm8250_ufsphy_cfg = {
.pcs_num = ARRAY_SIZE(sm8150_ufsphy_hs_g4_pcs),
.max_gear = UFS_HS_G4,
},
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .vreg_list = sm8250_ufsphy_vreg_l,
+ .num_vregs = ARRAY_SIZE(sm8250_ufsphy_vreg_l),
.regs = ufsphy_v4_regs_layout,
};
@@ -1490,8 +1561,8 @@ static const struct qmp_phy_cfg sm8350_ufsphy_cfg = {
.pcs_num = ARRAY_SIZE(sm8350_ufsphy_g4_pcs),
.max_gear = UFS_HS_G4,
},
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .vreg_list = sm8350_ufsphy_vreg_l,
+ .num_vregs = ARRAY_SIZE(sm8350_ufsphy_vreg_l),
.regs = ufsphy_v5_regs_layout,
};
@@ -1524,8 +1595,8 @@ static const struct qmp_phy_cfg sm8450_ufsphy_cfg = {
.pcs_num = ARRAY_SIZE(sm8350_ufsphy_g4_pcs),
.max_gear = UFS_HS_G4,
},
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .vreg_list = sm8450_ufsphy_vreg_l,
+ .num_vregs = ARRAY_SIZE(sm8450_ufsphy_vreg_l),
.regs = ufsphy_v5_regs_layout,
};
@@ -1560,8 +1631,8 @@ static const struct qmp_phy_cfg sm8475_ufsphy_cfg = {
.pcs_num = ARRAY_SIZE(sm8475_ufsphy_g4_pcs),
.max_gear = UFS_HS_G4,
},
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .vreg_list = sm8475_ufsphy_vreg_l,
+ .num_vregs = ARRAY_SIZE(sm8475_ufsphy_vreg_l),
.regs = ufsphy_v6_regs_layout,
};
@@ -1605,8 +1676,8 @@ static const struct qmp_phy_cfg sm8550_ufsphy_cfg = {
.pcs_num = ARRAY_SIZE(sm8550_ufsphy_g5_pcs),
.max_gear = UFS_HS_G5,
},
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .vreg_list = sm8550_ufsphy_vreg_l,
+ .num_vregs = ARRAY_SIZE(sm8550_ufsphy_vreg_l),
.regs = ufsphy_v6_regs_layout,
};
@@ -1637,8 +1708,8 @@ static const struct qmp_phy_cfg sm8650_ufsphy_cfg = {
.max_gear = UFS_HS_G5,
},
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .vreg_list = sm8650_ufsphy_vreg_l,
+ .num_vregs = ARRAY_SIZE(sm8650_ufsphy_vreg_l),
.regs = ufsphy_v6_regs_layout,
};
@@ -1675,8 +1746,8 @@ static const struct qmp_phy_cfg sm8750_ufsphy_cfg = {
.max_gear = UFS_HS_G5,
},
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .vreg_list = sm8750_ufsphy_vreg_l,
+ .num_vregs = ARRAY_SIZE(sm8750_ufsphy_vreg_l),
.regs = ufsphy_v6_regs_layout,
};
@@ -1890,22 +1961,6 @@ static const struct phy_ops qcom_qmp_ufs_phy_ops = {
.owner = THIS_MODULE,
};
-static int qmp_ufs_vreg_init(struct qmp_ufs *qmp)
-{
- const struct qmp_phy_cfg *cfg = qmp->cfg;
- struct device *dev = qmp->dev;
- int num = cfg->num_vregs;
- int i;
-
- qmp->vregs = devm_kcalloc(dev, num, sizeof(*qmp->vregs), GFP_KERNEL);
- if (!qmp->vregs)
- return -ENOMEM;
-
- for (i = 0; i < num; i++)
- qmp->vregs[i].supply = cfg->vreg_list[i];
-
- return devm_regulator_bulk_get(dev, num, qmp->vregs);
-}
static int qmp_ufs_clk_init(struct qmp_ufs *qmp)
{
@@ -2068,7 +2123,9 @@ static int qmp_ufs_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = qmp_ufs_vreg_init(qmp);
+ ret = devm_regulator_bulk_get_const(dev, qmp->cfg->num_vregs,
+ qmp->cfg->vreg_list,
+ &qmp->vregs);
if (ret)
return ret;
diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
index 47beb94cd424..3f6b480e1092 100644
--- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c
+++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
@@ -9,6 +9,8 @@
* Copyright (C) 2014 Cogent Embedded, Inc.
*/
+#include <linux/bitfield.h>
+#include <linux/bits.h>
#include <linux/cleanup.h>
#include <linux/extcon-provider.h>
#include <linux/interrupt.h>
@@ -69,14 +71,20 @@
#define USB2_COMMCTRL_OTG_PERI BIT(31) /* 1 = Peripheral mode */
/* OBINTSTA and OBINTEN */
+#define USB2_OBINTSTA_CLEAR GENMASK(31, 0)
#define USB2_OBINT_SESSVLDCHG BIT(12)
#define USB2_OBINT_IDDIGCHG BIT(11)
-#define USB2_OBINT_BITS (USB2_OBINT_SESSVLDCHG | \
- USB2_OBINT_IDDIGCHG)
+#define USB2_OBINT_VBSTAINT BIT(3)
+#define USB2_OBINT_IDCHG_EN BIT(0) /* RZ/G2L specific */
/* VBCTRL */
+#define USB2_VBCTRL_VBSTA_MASK GENMASK(31, 28)
+#define USB2_VBCTRL_VBSTA_DEFAULT 2
+#define USB2_VBCTRL_VBLVL_MASK GENMASK(23, 20)
+#define USB2_VBCTRL_VBLVL(m) FIELD_PREP_CONST(USB2_VBCTRL_VBLVL_MASK, (m))
#define USB2_VBCTRL_OCCLREN BIT(16)
#define USB2_VBCTRL_DRVVBUSSEL BIT(8)
+#define USB2_VBCTRL_SIDDQREL BIT(2)
#define USB2_VBCTRL_VBOUT BIT(0)
/* LINECTRL1 */
@@ -89,11 +97,11 @@
/* ADPCTRL */
#define USB2_ADPCTRL_OTGSESSVLD BIT(20)
#define USB2_ADPCTRL_IDDIG BIT(19)
+#define USB2_ADPCTRL_VBUSVALID BIT(18)
#define USB2_ADPCTRL_IDPULLUP BIT(5) /* 1 = ID sampling is enabled */
#define USB2_ADPCTRL_DRVVBUS BIT(4)
/* RZ/G2L specific */
-#define USB2_OBINT_IDCHG_EN BIT(0)
#define USB2_LINECTRL1_USB2_IDMON BIT(0)
#define NUM_OF_PHYS 4
@@ -122,6 +130,7 @@ struct rcar_gen3_phy {
struct rcar_gen3_chan {
void __iomem *base;
struct device *dev; /* platform_device's device */
+ const struct rcar_gen3_phy_drv_data *phy_data;
struct extcon_dev *extcon;
struct rcar_gen3_phy rphys[NUM_OF_PHYS];
struct regulator *vbus;
@@ -129,12 +138,9 @@ struct rcar_gen3_chan {
struct work_struct work;
spinlock_t lock; /* protects access to hardware and driver data structure. */
enum usb_dr_mode dr_mode;
- u32 obint_enable_bits;
bool extcon_host;
bool is_otg_channel;
bool uses_otg_pins;
- bool soc_no_adp_ctrl;
- bool utmi_ctrl;
};
struct rcar_gen3_phy_drv_data {
@@ -142,6 +148,8 @@ struct rcar_gen3_phy_drv_data {
bool no_adp_ctrl;
bool init_bus;
bool utmi_ctrl;
+ bool vblvl_ctrl;
+ u32 obint_enable_bits;
};
/*
@@ -203,8 +211,7 @@ static void rcar_gen3_enable_vbus_ctrl(struct rcar_gen3_chan *ch, int vbus)
u32 vbus_ctrl_val = USB2_ADPCTRL_DRVVBUS;
u32 val;
- dev_vdbg(ch->dev, "%s: %08x, %d\n", __func__, val, vbus);
- if (ch->soc_no_adp_ctrl) {
+ if (ch->phy_data->no_adp_ctrl || ch->phy_data->vblvl_ctrl) {
if (ch->vbus)
regulator_hardware_enable(ch->vbus, vbus);
@@ -217,6 +224,7 @@ static void rcar_gen3_enable_vbus_ctrl(struct rcar_gen3_chan *ch, int vbus)
val |= vbus_ctrl_val;
else
val &= ~vbus_ctrl_val;
+ dev_vdbg(ch->dev, "%s: %08x, %d\n", __func__, val, vbus);
writel(val, usb2_base + vbus_ctrl_reg);
}
@@ -226,9 +234,9 @@ static void rcar_gen3_control_otg_irq(struct rcar_gen3_chan *ch, int enable)
u32 val = readl(usb2_base + USB2_OBINTEN);
if (ch->uses_otg_pins && enable)
- val |= ch->obint_enable_bits;
+ val |= ch->phy_data->obint_enable_bits;
else
- val &= ~ch->obint_enable_bits;
+ val &= ~ch->phy_data->obint_enable_bits;
writel(val, usb2_base + USB2_OBINTEN);
}
@@ -287,10 +295,20 @@ static void rcar_gen3_init_from_a_peri_to_a_host(struct rcar_gen3_chan *ch)
static bool rcar_gen3_check_id(struct rcar_gen3_chan *ch)
{
+ if (ch->phy_data->vblvl_ctrl) {
+ bool vbus_valid;
+ bool device;
+
+ device = !!(readl(ch->base + USB2_ADPCTRL) & USB2_ADPCTRL_IDDIG);
+ vbus_valid = !!(readl(ch->base + USB2_ADPCTRL) & USB2_ADPCTRL_VBUSVALID);
+
+ return vbus_valid ? device : !device;
+ }
+
if (!ch->uses_otg_pins)
- return (ch->dr_mode == USB_DR_MODE_HOST) ? false : true;
+ return ch->dr_mode != USB_DR_MODE_HOST;
- if (ch->soc_no_adp_ctrl)
+ if (ch->phy_data->no_adp_ctrl)
return !!(readl(ch->base + USB2_LINECTRL1) & USB2_LINECTRL1_USB2_IDMON);
return !!(readl(ch->base + USB2_ADPCTRL) & USB2_ADPCTRL_IDDIG);
@@ -421,21 +439,47 @@ static void rcar_gen3_init_otg(struct rcar_gen3_chan *ch)
USB2_LINECTRL1_DMRPD_EN | USB2_LINECTRL1_DM_RPD;
writel(val, usb2_base + USB2_LINECTRL1);
- if (!ch->soc_no_adp_ctrl) {
- val = readl(usb2_base + USB2_VBCTRL);
- val &= ~USB2_VBCTRL_OCCLREN;
- writel(val | USB2_VBCTRL_DRVVBUSSEL, usb2_base + USB2_VBCTRL);
- val = readl(usb2_base + USB2_ADPCTRL);
- writel(val | USB2_ADPCTRL_IDPULLUP, usb2_base + USB2_ADPCTRL);
+ if (!ch->phy_data->no_adp_ctrl) {
+ if (ch->phy_data->vblvl_ctrl) {
+ val = readl(usb2_base + USB2_VBCTRL);
+ val = (val & ~USB2_VBCTRL_VBLVL_MASK) | USB2_VBCTRL_VBLVL(2);
+ writel(val, usb2_base + USB2_VBCTRL);
+ val = readl(usb2_base + USB2_ADPCTRL);
+ writel(val | USB2_ADPCTRL_IDPULLUP | USB2_ADPCTRL_DRVVBUS,
+ usb2_base + USB2_ADPCTRL);
+ } else {
+ val = readl(usb2_base + USB2_VBCTRL);
+ val &= ~USB2_VBCTRL_OCCLREN;
+ writel(val | USB2_VBCTRL_DRVVBUSSEL, usb2_base + USB2_VBCTRL);
+ val = readl(usb2_base + USB2_ADPCTRL);
+ writel(val | USB2_ADPCTRL_IDPULLUP, usb2_base + USB2_ADPCTRL);
+ }
}
mdelay(20);
writel(0xffffffff, usb2_base + USB2_OBINTSTA);
- writel(ch->obint_enable_bits, usb2_base + USB2_OBINTEN);
+ writel(ch->phy_data->obint_enable_bits, usb2_base + USB2_OBINTEN);
rcar_gen3_device_recognition(ch);
}
+static void rcar_gen3_configure_vblvl_ctrl(struct rcar_gen3_chan *ch)
+{
+ void __iomem *usb2_base = ch->base;
+ u32 val;
+
+ if (!ch->phy_data->vblvl_ctrl)
+ return;
+
+ val = readl(usb2_base + USB2_VBCTRL);
+ if ((val & USB2_VBCTRL_VBSTA_MASK) ==
+ FIELD_PREP_CONST(USB2_VBCTRL_VBSTA_MASK, USB2_VBCTRL_VBSTA_DEFAULT))
+ val &= ~USB2_VBCTRL_VBLVL_MASK;
+ else
+ val |= USB2_VBCTRL_VBLVL(USB2_VBCTRL_VBSTA_DEFAULT);
+ writel(val, usb2_base + USB2_VBCTRL);
+}
+
static irqreturn_t rcar_gen3_phy_usb2_irq(int irq, void *_ch)
{
struct rcar_gen3_chan *ch = _ch;
@@ -451,10 +495,14 @@ static irqreturn_t rcar_gen3_phy_usb2_irq(int irq, void *_ch)
scoped_guard(spinlock, &ch->lock) {
status = readl(usb2_base + USB2_OBINTSTA);
- if (status & ch->obint_enable_bits) {
+ if (status & ch->phy_data->obint_enable_bits) {
dev_vdbg(dev, "%s: %08x\n", __func__, status);
- writel(ch->obint_enable_bits, usb2_base + USB2_OBINTSTA);
+ if (ch->phy_data->vblvl_ctrl)
+ writel(USB2_OBINTSTA_CLEAR, usb2_base + USB2_OBINTSTA);
+ else
+ writel(ch->phy_data->obint_enable_bits, usb2_base + USB2_OBINTSTA);
rcar_gen3_device_recognition(ch);
+ rcar_gen3_configure_vblvl_ctrl(ch);
ret = IRQ_HANDLED;
}
}
@@ -487,7 +535,14 @@ static int rcar_gen3_phy_usb2_init(struct phy *p)
if (rphy->int_enable_bits)
rcar_gen3_init_otg(channel);
- if (channel->utmi_ctrl) {
+ if (channel->phy_data->vblvl_ctrl) {
+ /* SIDDQ mode release */
+ writel(readl(usb2_base + USB2_VBCTRL) | USB2_VBCTRL_SIDDQREL,
+ usb2_base + USB2_VBCTRL);
+ udelay(250);
+ }
+
+ if (channel->phy_data->utmi_ctrl) {
val = readl(usb2_base + USB2_REGEN_CG_CTRL) | USB2_REGEN_CG_CTRL_UPHY_WEN;
writel(val, usb2_base + USB2_REGEN_CG_CTRL);
@@ -592,28 +647,41 @@ static const struct phy_ops rz_g1c_phy_usb2_ops = {
static const struct rcar_gen3_phy_drv_data rcar_gen3_phy_usb2_data = {
.phy_usb2_ops = &rcar_gen3_phy_usb2_ops,
.no_adp_ctrl = false,
+ .obint_enable_bits = USB2_OBINT_SESSVLDCHG |
+ USB2_OBINT_IDDIGCHG,
};
static const struct rcar_gen3_phy_drv_data rz_g1c_phy_usb2_data = {
.phy_usb2_ops = &rz_g1c_phy_usb2_ops,
.no_adp_ctrl = false,
+ .obint_enable_bits = USB2_OBINT_SESSVLDCHG |
+ USB2_OBINT_IDDIGCHG,
};
static const struct rcar_gen3_phy_drv_data rz_g2l_phy_usb2_data = {
.phy_usb2_ops = &rcar_gen3_phy_usb2_ops,
.no_adp_ctrl = true,
+ .obint_enable_bits = USB2_OBINT_IDCHG_EN,
};
static const struct rcar_gen3_phy_drv_data rz_g3s_phy_usb2_data = {
.phy_usb2_ops = &rcar_gen3_phy_usb2_ops,
.no_adp_ctrl = true,
.init_bus = true,
+ .obint_enable_bits = USB2_OBINT_IDCHG_EN,
+};
+
+static const struct rcar_gen3_phy_drv_data rz_t2h_phy_usb2_data = {
+ .phy_usb2_ops = &rcar_gen3_phy_usb2_ops,
+ .vblvl_ctrl = true,
+ .obint_enable_bits = USB2_OBINT_IDCHG_EN | USB2_OBINT_VBSTAINT,
};
static const struct rcar_gen3_phy_drv_data rz_v2h_phy_usb2_data = {
.phy_usb2_ops = &rcar_gen3_phy_usb2_ops,
.no_adp_ctrl = true,
.utmi_ctrl = true,
+ .obint_enable_bits = USB2_OBINT_IDCHG_EN,
};
static const struct of_device_id rcar_gen3_phy_usb2_match_table[] = {
@@ -642,6 +710,10 @@ static const struct of_device_id rcar_gen3_phy_usb2_match_table[] = {
.data = &rz_v2h_phy_usb2_data,
},
{
+ .compatible = "renesas,usb2-phy-r9a09g077",
+ .data = &rz_t2h_phy_usb2_data,
+ },
+ {
.compatible = "renesas,rzg2l-usb2-phy",
.data = &rz_g2l_phy_usb2_data,
},
@@ -730,7 +802,6 @@ rpm_put:
static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
{
- const struct rcar_gen3_phy_drv_data *phy_data;
struct device *dev = &pdev->dev;
struct rcar_gen3_chan *channel;
struct phy_provider *provider;
@@ -749,7 +820,6 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
if (IS_ERR(channel->base))
return PTR_ERR(channel->base);
- channel->obint_enable_bits = USB2_OBINT_BITS;
channel->dr_mode = rcar_gen3_get_dr_mode(dev->of_node);
if (channel->dr_mode != USB_DR_MODE_UNKNOWN) {
channel->is_otg_channel = true;
@@ -773,8 +843,8 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
*/
pm_runtime_enable(dev);
- phy_data = of_device_get_match_data(dev);
- if (!phy_data) {
+ channel->phy_data = of_device_get_match_data(dev);
+ if (!channel->phy_data) {
ret = -EINVAL;
goto error;
}
@@ -782,22 +852,16 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, channel);
channel->dev = dev;
- if (phy_data->init_bus) {
+ if (channel->phy_data->init_bus) {
ret = rcar_gen3_phy_usb2_init_bus(channel);
if (ret)
goto error;
}
- channel->soc_no_adp_ctrl = phy_data->no_adp_ctrl;
- if (phy_data->no_adp_ctrl)
- channel->obint_enable_bits = USB2_OBINT_IDCHG_EN;
-
- channel->utmi_ctrl = phy_data->utmi_ctrl;
-
spin_lock_init(&channel->lock);
for (i = 0; i < NUM_OF_PHYS; i++) {
channel->rphys[i].phy = devm_phy_create(dev, NULL,
- phy_data->phy_usb2_ops);
+ channel->phy_data->phy_usb2_ops);
if (IS_ERR(channel->rphys[i].phy)) {
dev_err(dev, "Failed to create USB2 PHY\n");
ret = PTR_ERR(channel->rphys[i].phy);
@@ -808,7 +872,7 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
phy_set_drvdata(channel->rphys[i].phy, &channel->rphys[i]);
}
- if (channel->soc_no_adp_ctrl && channel->is_otg_channel)
+ if (channel->phy_data->no_adp_ctrl && channel->is_otg_channel)
channel->vbus = devm_regulator_get_exclusive(dev, "vbus");
else
channel->vbus = devm_regulator_get_optional(dev, "vbus");
diff --git a/drivers/phy/renesas/r8a779f0-ether-serdes.c b/drivers/phy/renesas/r8a779f0-ether-serdes.c
index 3b2d8cef75e5..8a6b6f366fe3 100644
--- a/drivers/phy/renesas/r8a779f0-ether-serdes.c
+++ b/drivers/phy/renesas/r8a779f0-ether-serdes.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Renesas Ethernet SERDES device driver
*
- * Copyright (C) 2022 Renesas Electronics Corporation
+ * Copyright (C) 2022-2025 Renesas Electronics Corporation
*/
#include <linux/delay.h>
@@ -49,6 +49,13 @@ static void r8a779f0_eth_serdes_write32(void __iomem *addr, u32 offs, u32 bank,
iowrite32(data, addr + offs);
}
+static u32 r8a779f0_eth_serdes_read32(void __iomem *addr, u32 offs, u32 bank)
+{
+ iowrite32(bank, addr + R8A779F0_ETH_SERDES_BANK_SELECT);
+
+ return ioread32(addr + offs);
+}
+
static int
r8a779f0_eth_serdes_reg_wait(struct r8a779f0_eth_serdes_channel *channel,
u32 offs, u32 bank, u32 mask, u32 expected)
@@ -92,17 +99,18 @@ r8a779f0_eth_serdes_common_setting(struct r8a779f0_eth_serdes_channel *channel)
{
struct r8a779f0_eth_serdes_drv_data *dd = channel->dd;
- switch (channel->phy_interface) {
- case PHY_INTERFACE_MODE_SGMII:
- r8a779f0_eth_serdes_write32(dd->addr, 0x0244, 0x180, 0x0097);
- r8a779f0_eth_serdes_write32(dd->addr, 0x01d0, 0x180, 0x0060);
- r8a779f0_eth_serdes_write32(dd->addr, 0x01d8, 0x180, 0x2200);
- r8a779f0_eth_serdes_write32(dd->addr, 0x01d4, 0x180, 0x0000);
- r8a779f0_eth_serdes_write32(dd->addr, 0x01e0, 0x180, 0x003d);
- return 0;
- default:
- return -EOPNOTSUPP;
- }
+ /* Set combination mode */
+ r8a779f0_eth_serdes_write32(dd->addr, 0x0244, 0x180, 0x00d7);
+ r8a779f0_eth_serdes_write32(dd->addr, 0x01cc, 0x180, 0xc200);
+ r8a779f0_eth_serdes_write32(dd->addr, 0x01c4, 0x180, 0x0042);
+ r8a779f0_eth_serdes_write32(dd->addr, 0x01c8, 0x180, 0x0000);
+ r8a779f0_eth_serdes_write32(dd->addr, 0x01dc, 0x180, 0x002f);
+ r8a779f0_eth_serdes_write32(dd->addr, 0x01d0, 0x180, 0x0060);
+ r8a779f0_eth_serdes_write32(dd->addr, 0x01d8, 0x180, 0x2200);
+ r8a779f0_eth_serdes_write32(dd->addr, 0x01d4, 0x180, 0x0000);
+ r8a779f0_eth_serdes_write32(dd->addr, 0x01e0, 0x180, 0x003d);
+
+ return 0;
}
static int
@@ -155,6 +163,42 @@ r8a779f0_eth_serdes_chan_setting(struct r8a779f0_eth_serdes_channel *channel)
r8a779f0_eth_serdes_write32(channel->addr, 0x0028, 0x1f80, 0x07a1);
r8a779f0_eth_serdes_write32(channel->addr, 0x0000, 0x1f80, 0x0208);
break;
+
+ case PHY_INTERFACE_MODE_USXGMII:
+ r8a779f0_eth_serdes_write32(channel->addr, 0x001c, 0x300, 0x0000);
+ r8a779f0_eth_serdes_write32(channel->addr, 0x0014, 0x380, 0x0050);
+ r8a779f0_eth_serdes_write32(channel->addr, 0x0000, 0x380, 0x2200);
+ r8a779f0_eth_serdes_write32(channel->addr, 0x001c, 0x380, 0x0400);
+ r8a779f0_eth_serdes_write32(channel->addr, 0x01c0, 0x180, 0x0001);
+ r8a779f0_eth_serdes_write32(channel->addr, 0x0248, 0x180, 0x056a);
+ r8a779f0_eth_serdes_write32(channel->addr, 0x0258, 0x180, 0x0015);
+ r8a779f0_eth_serdes_write32(channel->addr, 0x0144, 0x180, 0x1100);
+ r8a779f0_eth_serdes_write32(channel->addr, 0x01a0, 0x180, 0x0001);
+ r8a779f0_eth_serdes_write32(channel->addr, 0x00d0, 0x180, 0x0001);
+ r8a779f0_eth_serdes_write32(channel->addr, 0x0150, 0x180, 0x0001);
+ r8a779f0_eth_serdes_write32(channel->addr, 0x00c8, 0x180, 0x0300);
+ r8a779f0_eth_serdes_write32(channel->addr, 0x0148, 0x180, 0x0300);
+ r8a779f0_eth_serdes_write32(channel->addr, 0x0174, 0x180, 0x0000);
+ r8a779f0_eth_serdes_write32(channel->addr, 0x0160, 0x180, 0x0004);
+ r8a779f0_eth_serdes_write32(channel->addr, 0x01ac, 0x180, 0x0000);
+ r8a779f0_eth_serdes_write32(channel->addr, 0x00c4, 0x180, 0x0310);
+ r8a779f0_eth_serdes_write32(channel->addr, 0x00c8, 0x180, 0x0301);
+ ret = r8a779f0_eth_serdes_reg_wait(channel, 0x00c8, 0x180, BIT(0), 0);
+ if (ret)
+ return ret;
+ r8a779f0_eth_serdes_write32(channel->addr, 0x0148, 0x180, 0x0301);
+ ret = r8a779f0_eth_serdes_reg_wait(channel, 0x0148, 0x180, BIT(0), 0);
+ if (ret)
+ return ret;
+ r8a779f0_eth_serdes_write32(channel->addr, 0x00c4, 0x180, 0x1310);
+ r8a779f0_eth_serdes_write32(channel->addr, 0x00d8, 0x180, 0x1800);
+ r8a779f0_eth_serdes_write32(channel->addr, 0x00dc, 0x180, 0x0000);
+ r8a779f0_eth_serdes_write32(channel->addr, 0x0000, 0x380, 0x2300);
+ ret = r8a779f0_eth_serdes_reg_wait(channel, 0x0000, 0x380, BIT(8), 0);
+ if (ret)
+ return ret;
+ break;
+
default:
return -EOPNOTSUPP;
}
@@ -179,6 +223,14 @@ r8a779f0_eth_serdes_chan_speed(struct r8a779f0_eth_serdes_channel *channel)
return ret;
r8a779f0_eth_serdes_write32(channel->addr, 0x0008, 0x1f80, 0x0000);
break;
+ case PHY_INTERFACE_MODE_USXGMII:
+ r8a779f0_eth_serdes_write32(channel->addr, 0x0000, 0x1f00, 0x0120);
+ usleep_range(10, 20);
+ r8a779f0_eth_serdes_write32(channel->addr, 0x0000, 0x380, 0x2600);
+ ret = r8a779f0_eth_serdes_reg_wait(channel, 0x0000, 0x380, BIT(10), 0);
+ if (ret)
+ return ret;
+ break;
default:
return -EOPNOTSUPP;
}
@@ -274,6 +326,7 @@ static int r8a779f0_eth_serdes_hw_init_late(struct r8a779f0_eth_serdes_channel
*channel)
{
int ret;
+ u32 val;
ret = r8a779f0_eth_serdes_chan_setting(channel);
if (ret)
@@ -287,6 +340,26 @@ static int r8a779f0_eth_serdes_hw_init_late(struct r8a779f0_eth_serdes_channel
r8a779f0_eth_serdes_write32(channel->addr, 0x03d0, 0x380, 0x0000);
+ val = r8a779f0_eth_serdes_read32(channel->addr, 0x00c0, 0x180);
+ r8a779f0_eth_serdes_write32(channel->addr, 0x00c0, 0x180, val | BIT(8));
+ ret = r8a779f0_eth_serdes_reg_wait(channel, 0x0100, 0x180, BIT(0), 1);
+ if (ret)
+ return ret;
+ r8a779f0_eth_serdes_write32(channel->addr, 0x00c0, 0x180, val & ~BIT(8));
+ ret = r8a779f0_eth_serdes_reg_wait(channel, 0x0100, 0x180, BIT(0), 0);
+ if (ret)
+ return ret;
+
+ val = r8a779f0_eth_serdes_read32(channel->addr, 0x0144, 0x180);
+ r8a779f0_eth_serdes_write32(channel->addr, 0x0144, 0x180, val | BIT(4));
+ ret = r8a779f0_eth_serdes_reg_wait(channel, 0x0180, 0x180, BIT(0), 1);
+ if (ret)
+ return ret;
+ r8a779f0_eth_serdes_write32(channel->addr, 0x0144, 0x180, val & ~BIT(4));
+ ret = r8a779f0_eth_serdes_reg_wait(channel, 0x0180, 0x180, BIT(0), 0);
+ if (ret)
+ return ret;
+
return r8a779f0_eth_serdes_monitor_linkup(channel);
}
diff --git a/drivers/phy/rockchip/phy-rockchip-emmc.c b/drivers/phy/rockchip/phy-rockchip-emmc.c
index 20023f6eb994..5187983c58e5 100644
--- a/drivers/phy/rockchip/phy-rockchip-emmc.c
+++ b/drivers/phy/rockchip/phy-rockchip-emmc.c
@@ -8,6 +8,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/hw_bitfield.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -21,7 +22,7 @@
* only if BIT(x + 16) set to 1 the BIT(x) can be written.
*/
#define HIWORD_UPDATE(val, mask, shift) \
- ((val) << (shift) | (mask) << ((shift) + 16))
+ (FIELD_PREP_WM16((mask) << (shift), (val)))
/* Register definition */
#define GRF_EMMCPHY_CON0 0x0
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-csidphy.c b/drivers/phy/rockchip/phy-rockchip-inno-csidphy.c
index 2ab99e1d47eb..c79fb53d8ee5 100644
--- a/drivers/phy/rockchip/phy-rockchip-inno-csidphy.c
+++ b/drivers/phy/rockchip/phy-rockchip-inno-csidphy.c
@@ -30,6 +30,8 @@
#define RK3568_GRF_VI_CON0 0x0340
#define RK3568_GRF_VI_CON1 0x0344
+#define RK3588_CSIDPHY_GRF_CON0 0x0000
+
/* PHY */
#define CSIDPHY_CTRL_LANE_ENABLE 0x00
#define CSIDPHY_CTRL_LANE_ENABLE_CK BIT(6)
@@ -67,6 +69,8 @@
#define RK1808_CSIDPHY_CLK_CALIB_EN 0x168
#define RK3568_CSIDPHY_CLK_CALIB_EN 0x168
+#define RESETS_MAX 2
+
/*
* The higher 16-bit of this register is used for write protection
* only if BIT(x + 16) set to 1 the BIT(x) can be written.
@@ -87,10 +91,11 @@ struct dphy_reg {
u32 offset;
u32 mask;
u32 shift;
+ u8 valid;
};
#define PHY_REG(_offset, _width, _shift) \
- { .offset = _offset, .mask = BIT(_width) - 1, .shift = _shift, }
+ { .offset = _offset, .mask = BIT(_width) - 1, .shift = _shift, .valid = 1, }
static const struct dphy_reg rk1808_grf_dphy_regs[] = {
[GRF_DPHY_CSIPHY_FORCERXMODE] = PHY_REG(RK1808_GRF_PD_VI_CON_OFFSET, 4, 0),
@@ -114,6 +119,12 @@ static const struct dphy_reg rk3568_grf_dphy_regs[] = {
[GRF_DPHY_CSIPHY_CLKLANE_EN] = PHY_REG(RK3568_GRF_VI_CON0, 1, 8),
};
+static const struct dphy_reg rk3588_grf_dphy_regs[] = {
+ [GRF_DPHY_CSIPHY_FORCERXMODE] = PHY_REG(RK3588_CSIDPHY_GRF_CON0, 4, 0),
+ [GRF_DPHY_CSIPHY_DATALANE_EN] = PHY_REG(RK3588_CSIDPHY_GRF_CON0, 4, 4),
+ [GRF_DPHY_CSIPHY_CLKLANE_EN] = PHY_REG(RK3588_CSIDPHY_GRF_CON0, 1, 8),
+};
+
struct hsfreq_range {
u32 range_h;
u8 cfg_bit;
@@ -126,6 +137,8 @@ struct dphy_drv_data {
const struct hsfreq_range *hsfreq_ranges;
int num_hsfreq_ranges;
const struct dphy_reg *grf_regs;
+ const char *const *resets;
+ unsigned int resets_num;
};
struct rockchip_inno_csidphy {
@@ -133,7 +146,8 @@ struct rockchip_inno_csidphy {
void __iomem *phy_base;
struct clk *pclk;
struct regmap *grf;
- struct reset_control *rst;
+ struct reset_control_bulk_data resets[RESETS_MAX];
+ unsigned int resets_num;
const struct dphy_drv_data *drv_data;
struct phy_configure_opts_mipi_dphy config;
u8 hsfreq;
@@ -145,7 +159,7 @@ static inline void write_grf_reg(struct rockchip_inno_csidphy *priv,
const struct dphy_drv_data *drv_data = priv->drv_data;
const struct dphy_reg *reg = &drv_data->grf_regs[index];
- if (reg->offset)
+ if (reg->valid)
regmap_write(priv->grf, reg->offset,
HIWORD_UPDATE(value, reg->mask, reg->shift));
}
@@ -173,6 +187,15 @@ static const struct hsfreq_range rk3368_mipidphy_hsfreq_ranges[] = {
{1249, 0x0c}, {1349, 0x0d}, {1500, 0x0e}
};
+static const char *const rk3368_reset_names[] = {
+ "apb"
+};
+
+static const char *const rk3588_reset_names[] = {
+ "apb",
+ "phy"
+};
+
static void rockchip_inno_csidphy_ths_settle(struct rockchip_inno_csidphy *priv,
int hsfreq, int offset)
{
@@ -343,6 +366,8 @@ static const struct dphy_drv_data rk1808_mipidphy_drv_data = {
.hsfreq_ranges = rk1808_mipidphy_hsfreq_ranges,
.num_hsfreq_ranges = ARRAY_SIZE(rk1808_mipidphy_hsfreq_ranges),
.grf_regs = rk1808_grf_dphy_regs,
+ .resets = rk3368_reset_names,
+ .resets_num = ARRAY_SIZE(rk3368_reset_names),
};
static const struct dphy_drv_data rk3326_mipidphy_drv_data = {
@@ -352,6 +377,8 @@ static const struct dphy_drv_data rk3326_mipidphy_drv_data = {
.hsfreq_ranges = rk3326_mipidphy_hsfreq_ranges,
.num_hsfreq_ranges = ARRAY_SIZE(rk3326_mipidphy_hsfreq_ranges),
.grf_regs = rk3326_grf_dphy_regs,
+ .resets = rk3368_reset_names,
+ .resets_num = ARRAY_SIZE(rk3368_reset_names),
};
static const struct dphy_drv_data rk3368_mipidphy_drv_data = {
@@ -361,6 +388,8 @@ static const struct dphy_drv_data rk3368_mipidphy_drv_data = {
.hsfreq_ranges = rk3368_mipidphy_hsfreq_ranges,
.num_hsfreq_ranges = ARRAY_SIZE(rk3368_mipidphy_hsfreq_ranges),
.grf_regs = rk3368_grf_dphy_regs,
+ .resets = rk3368_reset_names,
+ .resets_num = ARRAY_SIZE(rk3368_reset_names),
};
static const struct dphy_drv_data rk3568_mipidphy_drv_data = {
@@ -370,6 +399,19 @@ static const struct dphy_drv_data rk3568_mipidphy_drv_data = {
.hsfreq_ranges = rk1808_mipidphy_hsfreq_ranges,
.num_hsfreq_ranges = ARRAY_SIZE(rk1808_mipidphy_hsfreq_ranges),
.grf_regs = rk3568_grf_dphy_regs,
+ .resets = rk3368_reset_names,
+ .resets_num = ARRAY_SIZE(rk3368_reset_names),
+};
+
+static const struct dphy_drv_data rk3588_mipidphy_drv_data = {
+ .pwrctl_offset = -1,
+ .ths_settle_offset = RK3568_CSIDPHY_CLK_WR_THS_SETTLE,
+ .calib_offset = RK3568_CSIDPHY_CLK_CALIB_EN,
+ .hsfreq_ranges = rk1808_mipidphy_hsfreq_ranges,
+ .num_hsfreq_ranges = ARRAY_SIZE(rk1808_mipidphy_hsfreq_ranges),
+ .grf_regs = rk3588_grf_dphy_regs,
+ .resets = rk3588_reset_names,
+ .resets_num = ARRAY_SIZE(rk3588_reset_names),
};
static const struct of_device_id rockchip_inno_csidphy_match_id[] = {
@@ -393,6 +435,10 @@ static const struct of_device_id rockchip_inno_csidphy_match_id[] = {
.compatible = "rockchip,rk3568-csi-dphy",
.data = &rk3568_mipidphy_drv_data,
},
+ {
+ .compatible = "rockchip,rk3588-csi-dphy",
+ .data = &rk3588_mipidphy_drv_data,
+ },
{}
};
MODULE_DEVICE_TABLE(of, rockchip_inno_csidphy_match_id);
@@ -403,6 +449,7 @@ static int rockchip_inno_csidphy_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct phy_provider *phy_provider;
struct phy *phy;
+ int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -434,10 +481,18 @@ static int rockchip_inno_csidphy_probe(struct platform_device *pdev)
return PTR_ERR(priv->pclk);
}
- priv->rst = devm_reset_control_get(dev, "apb");
- if (IS_ERR(priv->rst)) {
+ if (priv->drv_data->resets_num > RESETS_MAX) {
+ dev_err(dev, "invalid number of resets\n");
+ return -EINVAL;
+ }
+ priv->resets_num = priv->drv_data->resets_num;
+ for (unsigned int i = 0; i < priv->resets_num; i++)
+ priv->resets[i].id = priv->drv_data->resets[i];
+ ret = devm_reset_control_bulk_get_exclusive(dev, priv->resets_num,
+ priv->resets);
+ if (ret) {
dev_err(dev, "failed to get system reset control\n");
- return PTR_ERR(priv->rst);
+ return ret;
}
phy = devm_phy_create(dev, NULL, &rockchip_inno_csidphy_ops);
diff --git a/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c b/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
index ce91fb1d5167..a3ef19807b9e 100644
--- a/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
+++ b/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
@@ -20,79 +20,120 @@
#define REF_CLOCK_25MHz (25 * HZ_PER_MHZ)
#define REF_CLOCK_100MHz (100 * HZ_PER_MHZ)
-/* COMBO PHY REG */
-#define PHYREG6 0x14
-#define PHYREG6_PLL_DIV_MASK GENMASK(7, 6)
-#define PHYREG6_PLL_DIV_SHIFT 6
-#define PHYREG6_PLL_DIV_2 1
-
-#define PHYREG7 0x18
-#define PHYREG7_TX_RTERM_MASK GENMASK(7, 4)
-#define PHYREG7_TX_RTERM_SHIFT 4
-#define PHYREG7_TX_RTERM_50OHM 8
-#define PHYREG7_RX_RTERM_MASK GENMASK(3, 0)
-#define PHYREG7_RX_RTERM_SHIFT 0
-#define PHYREG7_RX_RTERM_44OHM 15
-
-#define PHYREG8 0x1C
-#define PHYREG8_SSC_EN BIT(4)
-
-#define PHYREG10 0x24
-#define PHYREG10_SSC_PCM_MASK GENMASK(3, 0)
-#define PHYREG10_SSC_PCM_3500PPM 7
-
-#define PHYREG11 0x28
-#define PHYREG11_SU_TRIM_0_7 0xF0
-
-#define PHYREG12 0x2C
-#define PHYREG12_PLL_LPF_ADJ_VALUE 4
-
-#define PHYREG13 0x30
-#define PHYREG13_RESISTER_MASK GENMASK(5, 4)
-#define PHYREG13_RESISTER_SHIFT 0x4
-#define PHYREG13_RESISTER_HIGH_Z 3
-#define PHYREG13_CKRCV_AMP0 BIT(7)
-
-#define PHYREG14 0x34
-#define PHYREG14_CKRCV_AMP1 BIT(0)
-
-#define PHYREG15 0x38
-#define PHYREG15_CTLE_EN BIT(0)
-#define PHYREG15_SSC_CNT_MASK GENMASK(7, 6)
-#define PHYREG15_SSC_CNT_SHIFT 6
-#define PHYREG15_SSC_CNT_VALUE 1
-
-#define PHYREG16 0x3C
-#define PHYREG16_SSC_CNT_VALUE 0x5f
-
-#define PHYREG17 0x40
-
-#define PHYREG18 0x44
-#define PHYREG18_PLL_LOOP 0x32
-
-#define PHYREG21 0x50
-#define PHYREG21_RX_SQUELCH_VAL 0x0D
-
-#define PHYREG27 0x6C
-#define PHYREG27_RX_TRIM_RK3588 0x4C
-
-#define PHYREG30 0x74
-
-#define PHYREG32 0x7C
-#define PHYREG32_SSC_MASK GENMASK(7, 4)
-#define PHYREG32_SSC_DIR_MASK GENMASK(5, 4)
-#define PHYREG32_SSC_DIR_SHIFT 4
-#define PHYREG32_SSC_UPWARD 0
-#define PHYREG32_SSC_DOWNWARD 1
-#define PHYREG32_SSC_OFFSET_MASK GENMASK(7, 6)
-#define PHYREG32_SSC_OFFSET_SHIFT 6
-#define PHYREG32_SSC_OFFSET_500PPM 1
-
-#define PHYREG33 0x80
-#define PHYREG33_PLL_KVCO_MASK GENMASK(4, 2)
-#define PHYREG33_PLL_KVCO_SHIFT 2
-#define PHYREG33_PLL_KVCO_VALUE 2
-#define PHYREG33_PLL_KVCO_VALUE_RK3576 4
+/* RK3528 COMBO PHY REG */
+#define RK3528_PHYREG6 0x18
+#define RK3528_PHYREG6_PLL_KVCO GENMASK(12, 10)
+#define RK3528_PHYREG6_PLL_KVCO_VALUE 0x2
+#define RK3528_PHYREG6_SSC_DIR GENMASK(5, 4)
+#define RK3528_PHYREG6_SSC_UPWARD 0
+#define RK3528_PHYREG6_SSC_DOWNWARD 1
+
+#define RK3528_PHYREG40 0x100
+#define RK3528_PHYREG40_SSC_EN BIT(20)
+#define RK3528_PHYREG40_SSC_CNT GENMASK(10, 0)
+#define RK3528_PHYREG40_SSC_CNT_VALUE 0x17d
+
+#define RK3528_PHYREG42 0x108
+#define RK3528_PHYREG42_CKDRV_CLK_SEL BIT(29)
+#define RK3528_PHYREG42_CKDRV_CLK_PLL 0
+#define RK3528_PHYREG42_CKDRV_CLK_CKRCV 1
+#define RK3528_PHYREG42_PLL_LPF_R1_ADJ GENMASK(10, 7)
+#define RK3528_PHYREG42_PLL_LPF_R1_ADJ_VALUE 0x9
+#define RK3528_PHYREG42_PLL_CHGPUMP_CUR_ADJ GENMASK(6, 4)
+#define RK3528_PHYREG42_PLL_CHGPUMP_CUR_ADJ_VALUE 0x7
+#define RK3528_PHYREG42_PLL_KVCO_ADJ GENMASK(2, 0)
+#define RK3528_PHYREG42_PLL_KVCO_ADJ_VALUE 0x0
+
+#define RK3528_PHYREG80 0x200
+#define RK3528_PHYREG80_CTLE_EN BIT(17)
+
+#define RK3528_PHYREG81 0x204
+#define RK3528_PHYREG81_CDR_PHASE_PATH_GAIN_2X BIT(5)
+#define RK3528_PHYREG81_SLEW_RATE_CTRL GENMASK(2, 0)
+#define RK3528_PHYREG81_SLEW_RATE_CTRL_SLOW 0x7
+
+#define RK3528_PHYREG83 0x20c
+#define RK3528_PHYREG83_RX_SQUELCH GENMASK(2, 0)
+#define RK3528_PHYREG83_RX_SQUELCH_VALUE 0x6
+
+#define RK3528_PHYREG86 0x218
+#define RK3528_PHYREG86_RTERM_DET_CLK_EN BIT(14)
+
+/* RK3568 COMBO PHY REG */
+#define RK3568_PHYREG6 0x14
+#define RK3568_PHYREG6_PLL_DIV_MASK GENMASK(7, 6)
+#define RK3568_PHYREG6_PLL_DIV_SHIFT 6
+#define RK3568_PHYREG6_PLL_DIV_2 1
+
+#define RK3568_PHYREG7 0x18
+#define RK3568_PHYREG7_TX_RTERM_MASK GENMASK(7, 4)
+#define RK3568_PHYREG7_TX_RTERM_SHIFT 4
+#define RK3568_PHYREG7_TX_RTERM_50OHM 8
+#define RK3568_PHYREG7_RX_RTERM_MASK GENMASK(3, 0)
+#define RK3568_PHYREG7_RX_RTERM_SHIFT 0
+#define RK3568_PHYREG7_RX_RTERM_44OHM 15
+
+#define RK3568_PHYREG8 0x1C
+#define RK3568_PHYREG8_SSC_EN BIT(4)
+
+#define RK3568_PHYREG11 0x28
+#define RK3568_PHYREG11_SU_TRIM_0_7 0xF0
+
+#define RK3568_PHYREG12 0x2C
+#define RK3568_PHYREG12_PLL_LPF_ADJ_VALUE 4
+
+#define RK3568_PHYREG13 0x30
+#define RK3568_PHYREG13_RESISTER_MASK GENMASK(5, 4)
+#define RK3568_PHYREG13_RESISTER_SHIFT 0x4
+#define RK3568_PHYREG13_RESISTER_HIGH_Z 3
+#define RK3568_PHYREG13_CKRCV_AMP0 BIT(7)
+
+#define RK3568_PHYREG14 0x34
+#define RK3568_PHYREG14_CKRCV_AMP1 BIT(0)
+
+#define RK3568_PHYREG15 0x38
+#define RK3568_PHYREG15_CTLE_EN BIT(0)
+#define RK3568_PHYREG15_SSC_CNT_MASK GENMASK(7, 6)
+#define RK3568_PHYREG15_SSC_CNT_SHIFT 6
+#define RK3568_PHYREG15_SSC_CNT_VALUE 1
+
+#define RK3568_PHYREG16 0x3C
+#define RK3568_PHYREG16_SSC_CNT_VALUE 0x5f
+
+#define RK3568_PHYREG18 0x44
+#define RK3568_PHYREG18_PLL_LOOP 0x32
+
+#define RK3568_PHYREG32 0x7C
+#define RK3568_PHYREG32_SSC_MASK GENMASK(7, 4)
+#define RK3568_PHYREG32_SSC_DIR_MASK GENMASK(5, 4)
+#define RK3568_PHYREG32_SSC_DIR_SHIFT 4
+#define RK3568_PHYREG32_SSC_UPWARD 0
+#define RK3568_PHYREG32_SSC_DOWNWARD 1
+#define RK3568_PHYREG32_SSC_OFFSET_MASK GENMASK(7, 6)
+#define RK3568_PHYREG32_SSC_OFFSET_SHIFT 6
+#define RK3568_PHYREG32_SSC_OFFSET_500PPM 1
+
+#define RK3568_PHYREG33 0x80
+#define RK3568_PHYREG33_PLL_KVCO_MASK GENMASK(4, 2)
+#define RK3568_PHYREG33_PLL_KVCO_SHIFT 2
+#define RK3568_PHYREG33_PLL_KVCO_VALUE 2
+#define RK3576_PHYREG33_PLL_KVCO_VALUE 4
+
+/* RK3588 COMBO PHY registers */
+#define RK3588_PHYREG27 0x6C
+#define RK3588_PHYREG27_RX_TRIM 0x4C
+
+/* RK3576 COMBO PHY registers */
+#define RK3576_PHYREG10 0x24
+#define RK3576_PHYREG10_SSC_PCM_MASK GENMASK(3, 0)
+#define RK3576_PHYREG10_SSC_PCM_3500PPM 7
+
+#define RK3576_PHYREG17 0x40
+
+#define RK3576_PHYREG21 0x50
+#define RK3576_PHYREG21_RX_SQUELCH_VAL 0x0D
+
+#define RK3576_PHYREG30 0x74
struct rockchip_combphy_priv;
@@ -137,6 +178,8 @@ struct rockchip_combphy_grfcfg {
struct combphy_reg pipe_xpcs_phy_ready;
struct combphy_reg pipe_pcie1l0_sel;
struct combphy_reg pipe_pcie1l1_sel;
+ struct combphy_reg u3otg0_port_en;
+ struct combphy_reg u3otg1_port_en;
};
struct rockchip_combphy_cfg {
@@ -396,6 +439,150 @@ static int rockchip_combphy_probe(struct platform_device *pdev)
return PTR_ERR_OR_ZERO(phy_provider);
}
+static int rk3528_combphy_cfg(struct rockchip_combphy_priv *priv)
+{
+ const struct rockchip_combphy_grfcfg *cfg = priv->cfg->grfcfg;
+ unsigned long rate;
+ u32 val;
+
+ /* Set SSC downward spread spectrum */
+ val = FIELD_PREP(RK3528_PHYREG6_SSC_DIR, RK3528_PHYREG6_SSC_DOWNWARD);
+ rockchip_combphy_updatel(priv, RK3528_PHYREG6_SSC_DIR, val, RK3528_PHYREG6);
+
+ switch (priv->type) {
+ case PHY_TYPE_PCIE:
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->con0_for_pcie, true);
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->con1_for_pcie, true);
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->con2_for_pcie, true);
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->con3_for_pcie, true);
+ break;
+ case PHY_TYPE_USB3:
+ /* Enable adaptive CTLE for USB3.0 Rx */
+ rockchip_combphy_updatel(priv, RK3528_PHYREG80_CTLE_EN, RK3528_PHYREG80_CTLE_EN,
+ RK3528_PHYREG80);
+
+ /* Set slow slew rate control for PI */
+ val = FIELD_PREP(RK3528_PHYREG81_SLEW_RATE_CTRL,
+ RK3528_PHYREG81_SLEW_RATE_CTRL_SLOW);
+ rockchip_combphy_updatel(priv, RK3528_PHYREG81_SLEW_RATE_CTRL, val,
+ RK3528_PHYREG81);
+
+ /* Set CDR phase path with 2x gain */
+ rockchip_combphy_updatel(priv, RK3528_PHYREG81_CDR_PHASE_PATH_GAIN_2X,
+ RK3528_PHYREG81_CDR_PHASE_PATH_GAIN_2X, RK3528_PHYREG81);
+
+ /* Set Rx squelch input filler bandwidth */
+ val = FIELD_PREP(RK3528_PHYREG83_RX_SQUELCH, RK3528_PHYREG83_RX_SQUELCH_VALUE);
+ rockchip_combphy_updatel(priv, RK3528_PHYREG83_RX_SQUELCH, val, RK3528_PHYREG83);
+
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_txcomp_sel, false);
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_txelec_sel, false);
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->usb_mode_set, true);
+ rockchip_combphy_param_write(priv->pipe_grf, &cfg->u3otg0_port_en, true);
+ break;
+ default:
+ dev_err(priv->dev, "incompatible PHY type\n");
+ return -EINVAL;
+ }
+
+ rate = clk_get_rate(priv->refclk);
+
+ switch (rate) {
+ case REF_CLOCK_24MHz:
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_clk_24m, true);
+ if (priv->type == PHY_TYPE_USB3) {
+ /* Set ssc_cnt[10:0]=00101111101 & 31.5KHz */
+ val = FIELD_PREP(RK3528_PHYREG40_SSC_CNT, RK3528_PHYREG40_SSC_CNT_VALUE);
+ rockchip_combphy_updatel(priv, RK3528_PHYREG40_SSC_CNT, val,
+ RK3528_PHYREG40);
+ } else if (priv->type == PHY_TYPE_PCIE) {
+ /* tx_trim[14]=1, Enable the counting clock of the rterm detect */
+ rockchip_combphy_updatel(priv, RK3528_PHYREG86_RTERM_DET_CLK_EN,
+ RK3528_PHYREG86_RTERM_DET_CLK_EN, RK3528_PHYREG86);
+ }
+ break;
+ case REF_CLOCK_100MHz:
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_clk_100m, true);
+ if (priv->type == PHY_TYPE_PCIE) {
+ /* PLL KVCO tuning fine */
+ val = FIELD_PREP(RK3528_PHYREG6_PLL_KVCO, RK3528_PHYREG6_PLL_KVCO_VALUE);
+ rockchip_combphy_updatel(priv, RK3528_PHYREG6_PLL_KVCO, val,
+ RK3528_PHYREG6);
+
+ /* su_trim[6:4]=111, [10:7]=1001, [2:0]=000, swing 650mv */
+ writel(0x570804f0, priv->mmio + RK3528_PHYREG42);
+ }
+ break;
+ default:
+ dev_err(priv->dev, "Unsupported rate: %lu\n", rate);
+ return -EINVAL;
+ }
+
+ if (device_property_read_bool(priv->dev, "rockchip,ext-refclk")) {
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_clk_ext, true);
+
+ if (priv->type == PHY_TYPE_PCIE && rate == REF_CLOCK_100MHz) {
+ val = FIELD_PREP(RK3528_PHYREG42_CKDRV_CLK_SEL,
+ RK3528_PHYREG42_CKDRV_CLK_CKRCV);
+ val |= FIELD_PREP(RK3528_PHYREG42_PLL_LPF_R1_ADJ,
+ RK3528_PHYREG42_PLL_LPF_R1_ADJ_VALUE);
+ val |= FIELD_PREP(RK3528_PHYREG42_PLL_CHGPUMP_CUR_ADJ,
+ RK3528_PHYREG42_PLL_CHGPUMP_CUR_ADJ_VALUE);
+ val |= FIELD_PREP(RK3528_PHYREG42_PLL_KVCO_ADJ,
+ RK3528_PHYREG42_PLL_KVCO_ADJ_VALUE);
+ rockchip_combphy_updatel(priv,
+ RK3528_PHYREG42_CKDRV_CLK_SEL |
+ RK3528_PHYREG42_PLL_LPF_R1_ADJ |
+ RK3528_PHYREG42_PLL_CHGPUMP_CUR_ADJ |
+ RK3528_PHYREG42_PLL_KVCO_ADJ,
+ val, RK3528_PHYREG42);
+
+ val = FIELD_PREP(RK3528_PHYREG6_PLL_KVCO, RK3528_PHYREG6_PLL_KVCO_VALUE);
+ rockchip_combphy_updatel(priv, RK3528_PHYREG6_PLL_KVCO, val,
+ RK3528_PHYREG6);
+ }
+ }
+
+ if (priv->type == PHY_TYPE_PCIE) {
+ if (device_property_read_bool(priv->dev, "rockchip,enable-ssc"))
+ rockchip_combphy_updatel(priv, RK3528_PHYREG40_SSC_EN,
+ RK3528_PHYREG40_SSC_EN, RK3528_PHYREG40);
+ }
+
+ return 0;
+}
+
+static const struct rockchip_combphy_grfcfg rk3528_combphy_grfcfgs = {
+ /* pipe-phy-grf */
+ .pcie_mode_set = { 0x0000, 5, 0, 0x00, 0x11 },
+ .usb_mode_set = { 0x0000, 5, 0, 0x00, 0x04 },
+ .pipe_rxterm_set = { 0x0000, 12, 12, 0x00, 0x01 },
+ .pipe_txelec_set = { 0x0004, 1, 1, 0x00, 0x01 },
+ .pipe_txcomp_set = { 0x0004, 4, 4, 0x00, 0x01 },
+ .pipe_clk_24m = { 0x0004, 14, 13, 0x00, 0x00 },
+ .pipe_clk_100m = { 0x0004, 14, 13, 0x00, 0x02 },
+ .pipe_rxterm_sel = { 0x0008, 8, 8, 0x00, 0x01 },
+ .pipe_txelec_sel = { 0x0008, 12, 12, 0x00, 0x01 },
+ .pipe_txcomp_sel = { 0x0008, 15, 15, 0x00, 0x01 },
+ .pipe_clk_ext = { 0x000c, 9, 8, 0x02, 0x01 },
+ .pipe_phy_status = { 0x0034, 6, 6, 0x01, 0x00 },
+ .con0_for_pcie = { 0x0000, 15, 0, 0x00, 0x110 },
+ .con1_for_pcie = { 0x0004, 15, 0, 0x00, 0x00 },
+ .con2_for_pcie = { 0x0008, 15, 0, 0x00, 0x101 },
+ .con3_for_pcie = { 0x000c, 15, 0, 0x00, 0x0200 },
+ /* pipe-grf */
+ .u3otg0_port_en = { 0x0044, 15, 0, 0x0181, 0x1100 },
+};
+
+static const struct rockchip_combphy_cfg rk3528_combphy_cfgs = {
+ .num_phys = 1,
+ .phy_ids = {
+ 0xffdc0000,
+ },
+ .grfcfg = &rk3528_combphy_grfcfgs,
+ .combphy_cfg = rk3528_combphy_cfg,
+};
+
static int rk3562_combphy_cfg(struct rockchip_combphy_priv *priv)
{
const struct rockchip_combphy_grfcfg *cfg = priv->cfg->grfcfg;
@@ -405,9 +592,8 @@ static int rk3562_combphy_cfg(struct rockchip_combphy_priv *priv)
switch (priv->type) {
case PHY_TYPE_PCIE:
/* Set SSC downward spread spectrum */
- rockchip_combphy_updatel(priv, PHYREG32_SSC_MASK,
- PHYREG32_SSC_DOWNWARD << PHYREG32_SSC_DIR_SHIFT,
- PHYREG32);
+ val = RK3568_PHYREG32_SSC_DOWNWARD << RK3568_PHYREG32_SSC_DIR_SHIFT;
+ rockchip_combphy_updatel(priv, RK3568_PHYREG32_SSC_MASK, val, RK3568_PHYREG32);
rockchip_combphy_param_write(priv->phy_grf, &cfg->con0_for_pcie, true);
rockchip_combphy_param_write(priv->phy_grf, &cfg->con1_for_pcie, true);
@@ -416,29 +602,30 @@ static int rk3562_combphy_cfg(struct rockchip_combphy_priv *priv)
break;
case PHY_TYPE_USB3:
/* Set SSC downward spread spectrum */
- rockchip_combphy_updatel(priv, PHYREG32_SSC_MASK,
- PHYREG32_SSC_DOWNWARD << PHYREG32_SSC_DIR_SHIFT,
- PHYREG32);
+ val = RK3568_PHYREG32_SSC_DOWNWARD << RK3568_PHYREG32_SSC_DIR_SHIFT;
+ rockchip_combphy_updatel(priv, RK3568_PHYREG32_SSC_MASK, val,
+ RK3568_PHYREG32);
/* Enable adaptive CTLE for USB3.0 Rx */
- rockchip_combphy_updatel(priv, PHYREG15_CTLE_EN,
- PHYREG15_CTLE_EN, PHYREG15);
+ rockchip_combphy_updatel(priv, RK3568_PHYREG15_CTLE_EN,
+ RK3568_PHYREG15_CTLE_EN, RK3568_PHYREG15);
/* Set PLL KVCO fine tuning signals */
- rockchip_combphy_updatel(priv, PHYREG33_PLL_KVCO_MASK, BIT(3), PHYREG33);
+ rockchip_combphy_updatel(priv, RK3568_PHYREG33_PLL_KVCO_MASK,
+ BIT(3), RK3568_PHYREG33);
/* Set PLL LPF R1 to su_trim[10:7]=1001 */
- writel(PHYREG12_PLL_LPF_ADJ_VALUE, priv->mmio + PHYREG12);
+ writel(RK3568_PHYREG12_PLL_LPF_ADJ_VALUE, priv->mmio + RK3568_PHYREG12);
/* Set PLL input clock divider 1/2 */
- val = FIELD_PREP(PHYREG6_PLL_DIV_MASK, PHYREG6_PLL_DIV_2);
- rockchip_combphy_updatel(priv, PHYREG6_PLL_DIV_MASK, val, PHYREG6);
+ val = FIELD_PREP(RK3568_PHYREG6_PLL_DIV_MASK, RK3568_PHYREG6_PLL_DIV_2);
+ rockchip_combphy_updatel(priv, RK3568_PHYREG6_PLL_DIV_MASK, val, RK3568_PHYREG6);
/* Set PLL loop divider */
- writel(PHYREG18_PLL_LOOP, priv->mmio + PHYREG18);
+ writel(RK3568_PHYREG18_PLL_LOOP, priv->mmio + RK3568_PHYREG18);
/* Set PLL KVCO to min and set PLL charge pump current to max */
- writel(PHYREG11_SU_TRIM_0_7, priv->mmio + PHYREG11);
+ writel(RK3568_PHYREG11_SU_TRIM_0_7, priv->mmio + RK3568_PHYREG11);
rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_sel_usb, true);
rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_txcomp_sel, false);
@@ -456,11 +643,12 @@ static int rk3562_combphy_cfg(struct rockchip_combphy_priv *priv)
case REF_CLOCK_24MHz:
if (priv->type == PHY_TYPE_USB3) {
/* Set ssc_cnt[9:0]=0101111101 & 31.5KHz */
- val = FIELD_PREP(PHYREG15_SSC_CNT_MASK, PHYREG15_SSC_CNT_VALUE);
- rockchip_combphy_updatel(priv, PHYREG15_SSC_CNT_MASK,
- val, PHYREG15);
+ val = FIELD_PREP(RK3568_PHYREG15_SSC_CNT_MASK,
+ RK3568_PHYREG15_SSC_CNT_VALUE);
+ rockchip_combphy_updatel(priv, RK3568_PHYREG15_SSC_CNT_MASK,
+ val, RK3568_PHYREG15);
- writel(PHYREG16_SSC_CNT_VALUE, priv->mmio + PHYREG16);
+ writel(RK3568_PHYREG16_SSC_CNT_VALUE, priv->mmio + RK3568_PHYREG16);
}
break;
case REF_CLOCK_25MHz:
@@ -470,19 +658,20 @@ static int rk3562_combphy_cfg(struct rockchip_combphy_priv *priv)
rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_clk_100m, true);
if (priv->type == PHY_TYPE_PCIE) {
/* PLL KVCO tuning fine */
- val = FIELD_PREP(PHYREG33_PLL_KVCO_MASK, PHYREG33_PLL_KVCO_VALUE);
- rockchip_combphy_updatel(priv, PHYREG33_PLL_KVCO_MASK,
- val, PHYREG33);
+ val = FIELD_PREP(RK3568_PHYREG33_PLL_KVCO_MASK,
+ RK3568_PHYREG33_PLL_KVCO_VALUE);
+ rockchip_combphy_updatel(priv, RK3568_PHYREG33_PLL_KVCO_MASK,
+ val, RK3568_PHYREG33);
/* Enable controlling random jitter, aka RMJ */
- writel(0x4, priv->mmio + PHYREG12);
+ writel(0x4, priv->mmio + RK3568_PHYREG12);
- val = PHYREG6_PLL_DIV_2 << PHYREG6_PLL_DIV_SHIFT;
- rockchip_combphy_updatel(priv, PHYREG6_PLL_DIV_MASK,
- val, PHYREG6);
+ val = RK3568_PHYREG6_PLL_DIV_2 << RK3568_PHYREG6_PLL_DIV_SHIFT;
+ rockchip_combphy_updatel(priv, RK3568_PHYREG6_PLL_DIV_MASK,
+ val, RK3568_PHYREG6);
- writel(0x32, priv->mmio + PHYREG18);
- writel(0xf0, priv->mmio + PHYREG11);
+ writel(0x32, priv->mmio + RK3568_PHYREG18);
+ writel(0xf0, priv->mmio + RK3568_PHYREG11);
}
break;
default:
@@ -493,20 +682,21 @@ static int rk3562_combphy_cfg(struct rockchip_combphy_priv *priv)
if (priv->ext_refclk) {
rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_clk_ext, true);
if (priv->type == PHY_TYPE_PCIE && rate == REF_CLOCK_100MHz) {
- val = PHYREG13_RESISTER_HIGH_Z << PHYREG13_RESISTER_SHIFT;
- val |= PHYREG13_CKRCV_AMP0;
- rockchip_combphy_updatel(priv, PHYREG13_RESISTER_MASK, val, PHYREG13);
-
- val = readl(priv->mmio + PHYREG14);
- val |= PHYREG14_CKRCV_AMP1;
- writel(val, priv->mmio + PHYREG14);
+ val = RK3568_PHYREG13_RESISTER_HIGH_Z << RK3568_PHYREG13_RESISTER_SHIFT;
+ val |= RK3568_PHYREG13_CKRCV_AMP0;
+ rockchip_combphy_updatel(priv, RK3568_PHYREG13_RESISTER_MASK, val,
+ RK3568_PHYREG13);
+
+ val = readl(priv->mmio + RK3568_PHYREG14);
+ val |= RK3568_PHYREG14_CKRCV_AMP1;
+ writel(val, priv->mmio + RK3568_PHYREG14);
}
}
if (priv->enable_ssc) {
- val = readl(priv->mmio + PHYREG8);
- val |= PHYREG8_SSC_EN;
- writel(val, priv->mmio + PHYREG8);
+ val = readl(priv->mmio + RK3568_PHYREG8);
+ val |= RK3568_PHYREG8_SSC_EN;
+ writel(val, priv->mmio + RK3568_PHYREG8);
}
return 0;
@@ -553,9 +743,9 @@ static int rk3568_combphy_cfg(struct rockchip_combphy_priv *priv)
switch (priv->type) {
case PHY_TYPE_PCIE:
/* Set SSC downward spread spectrum. */
- rockchip_combphy_updatel(priv, PHYREG32_SSC_MASK,
- PHYREG32_SSC_DOWNWARD << PHYREG32_SSC_DIR_SHIFT,
- PHYREG32);
+ val = RK3568_PHYREG32_SSC_DOWNWARD << RK3568_PHYREG32_SSC_DIR_SHIFT;
+
+ rockchip_combphy_updatel(priv, RK3568_PHYREG32_SSC_MASK, val, RK3568_PHYREG32);
rockchip_combphy_param_write(priv->phy_grf, &cfg->con0_for_pcie, true);
rockchip_combphy_param_write(priv->phy_grf, &cfg->con1_for_pcie, true);
@@ -565,49 +755,55 @@ static int rk3568_combphy_cfg(struct rockchip_combphy_priv *priv)
case PHY_TYPE_USB3:
/* Set SSC downward spread spectrum. */
- rockchip_combphy_updatel(priv, PHYREG32_SSC_MASK,
- PHYREG32_SSC_DOWNWARD << PHYREG32_SSC_DIR_SHIFT,
- PHYREG32);
+ val = RK3568_PHYREG32_SSC_DOWNWARD << RK3568_PHYREG32_SSC_DIR_SHIFT,
+ rockchip_combphy_updatel(priv, RK3568_PHYREG32_SSC_MASK, val, RK3568_PHYREG32);
/* Enable adaptive CTLE for USB3.0 Rx. */
- val = readl(priv->mmio + PHYREG15);
- val |= PHYREG15_CTLE_EN;
- writel(val, priv->mmio + PHYREG15);
+ val = readl(priv->mmio + RK3568_PHYREG15);
+ val |= RK3568_PHYREG15_CTLE_EN;
+ writel(val, priv->mmio + RK3568_PHYREG15);
/* Set PLL KVCO fine tuning signals. */
- rockchip_combphy_updatel(priv, PHYREG33_PLL_KVCO_MASK,
- PHYREG33_PLL_KVCO_VALUE << PHYREG33_PLL_KVCO_SHIFT,
- PHYREG33);
+ val = RK3568_PHYREG33_PLL_KVCO_VALUE << RK3568_PHYREG33_PLL_KVCO_SHIFT;
+ rockchip_combphy_updatel(priv, RK3568_PHYREG33_PLL_KVCO_MASK, val, RK3568_PHYREG33);
/* Enable controlling random jitter. */
- writel(PHYREG12_PLL_LPF_ADJ_VALUE, priv->mmio + PHYREG12);
+ writel(RK3568_PHYREG12_PLL_LPF_ADJ_VALUE, priv->mmio + RK3568_PHYREG12);
/* Set PLL input clock divider 1/2. */
- rockchip_combphy_updatel(priv, PHYREG6_PLL_DIV_MASK,
- PHYREG6_PLL_DIV_2 << PHYREG6_PLL_DIV_SHIFT,
- PHYREG6);
+ rockchip_combphy_updatel(priv, RK3568_PHYREG6_PLL_DIV_MASK,
+ RK3568_PHYREG6_PLL_DIV_2 << RK3568_PHYREG6_PLL_DIV_SHIFT,
+ RK3568_PHYREG6);
- writel(PHYREG18_PLL_LOOP, priv->mmio + PHYREG18);
- writel(PHYREG11_SU_TRIM_0_7, priv->mmio + PHYREG11);
+ writel(RK3568_PHYREG18_PLL_LOOP, priv->mmio + RK3568_PHYREG18);
+ writel(RK3568_PHYREG11_SU_TRIM_0_7, priv->mmio + RK3568_PHYREG11);
rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_sel_usb, true);
rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_txcomp_sel, false);
rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_txelec_sel, false);
rockchip_combphy_param_write(priv->phy_grf, &cfg->usb_mode_set, true);
+ switch (priv->id) {
+ case 0:
+ rockchip_combphy_param_write(priv->pipe_grf, &cfg->u3otg0_port_en, true);
+ break;
+ case 1:
+ rockchip_combphy_param_write(priv->pipe_grf, &cfg->u3otg1_port_en, true);
+ break;
+ }
break;
case PHY_TYPE_SATA:
/* Enable adaptive CTLE for SATA Rx. */
- val = readl(priv->mmio + PHYREG15);
- val |= PHYREG15_CTLE_EN;
- writel(val, priv->mmio + PHYREG15);
+ val = readl(priv->mmio + RK3568_PHYREG15);
+ val |= RK3568_PHYREG15_CTLE_EN;
+ writel(val, priv->mmio + RK3568_PHYREG15);
/*
* Set tx_rterm=50ohm and rx_rterm=44ohm for SATA.
* 0: 60ohm, 8: 50ohm 15: 44ohm (by step abort 1ohm)
*/
- val = PHYREG7_TX_RTERM_50OHM << PHYREG7_TX_RTERM_SHIFT;
- val |= PHYREG7_RX_RTERM_44OHM << PHYREG7_RX_RTERM_SHIFT;
- writel(val, priv->mmio + PHYREG7);
+ val = RK3568_PHYREG7_TX_RTERM_50OHM << RK3568_PHYREG7_TX_RTERM_SHIFT;
+ val |= RK3568_PHYREG7_RX_RTERM_44OHM << RK3568_PHYREG7_RX_RTERM_SHIFT;
+ writel(val, priv->mmio + RK3568_PHYREG7);
rockchip_combphy_param_write(priv->phy_grf, &cfg->con0_for_sata, true);
rockchip_combphy_param_write(priv->phy_grf, &cfg->con1_for_sata, true);
@@ -642,11 +838,11 @@ static int rk3568_combphy_cfg(struct rockchip_combphy_priv *priv)
case REF_CLOCK_24MHz:
if (priv->type == PHY_TYPE_USB3 || priv->type == PHY_TYPE_SATA) {
/* Set ssc_cnt[9:0]=0101111101 & 31.5KHz. */
- val = PHYREG15_SSC_CNT_VALUE << PHYREG15_SSC_CNT_SHIFT;
- rockchip_combphy_updatel(priv, PHYREG15_SSC_CNT_MASK,
- val, PHYREG15);
+ val = RK3568_PHYREG15_SSC_CNT_VALUE << RK3568_PHYREG15_SSC_CNT_SHIFT;
+ rockchip_combphy_updatel(priv, RK3568_PHYREG15_SSC_CNT_MASK,
+ val, RK3568_PHYREG15);
- writel(PHYREG16_SSC_CNT_VALUE, priv->mmio + PHYREG16);
+ writel(RK3568_PHYREG16_SSC_CNT_VALUE, priv->mmio + RK3568_PHYREG16);
}
break;
@@ -658,24 +854,26 @@ static int rk3568_combphy_cfg(struct rockchip_combphy_priv *priv)
rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_clk_100m, true);
if (priv->type == PHY_TYPE_PCIE) {
/* PLL KVCO fine tuning. */
- val = PHYREG33_PLL_KVCO_VALUE << PHYREG33_PLL_KVCO_SHIFT;
- rockchip_combphy_updatel(priv, PHYREG33_PLL_KVCO_MASK,
- val, PHYREG33);
+ val = RK3568_PHYREG33_PLL_KVCO_VALUE << RK3568_PHYREG33_PLL_KVCO_SHIFT;
+ rockchip_combphy_updatel(priv, RK3568_PHYREG33_PLL_KVCO_MASK,
+ val, RK3568_PHYREG33);
/* Enable controlling random jitter. */
- writel(PHYREG12_PLL_LPF_ADJ_VALUE, priv->mmio + PHYREG12);
+ writel(RK3568_PHYREG12_PLL_LPF_ADJ_VALUE, priv->mmio + RK3568_PHYREG12);
- val = PHYREG6_PLL_DIV_2 << PHYREG6_PLL_DIV_SHIFT;
- rockchip_combphy_updatel(priv, PHYREG6_PLL_DIV_MASK,
- val, PHYREG6);
+ val = RK3568_PHYREG6_PLL_DIV_2 << RK3568_PHYREG6_PLL_DIV_SHIFT;
+ rockchip_combphy_updatel(priv, RK3568_PHYREG6_PLL_DIV_MASK,
+ val, RK3568_PHYREG6);
- writel(PHYREG18_PLL_LOOP, priv->mmio + PHYREG18);
- writel(PHYREG11_SU_TRIM_0_7, priv->mmio + PHYREG11);
+ writel(RK3568_PHYREG18_PLL_LOOP, priv->mmio + RK3568_PHYREG18);
+ writel(RK3568_PHYREG11_SU_TRIM_0_7, priv->mmio + RK3568_PHYREG11);
} else if (priv->type == PHY_TYPE_SATA) {
/* downward spread spectrum +500ppm */
- val = PHYREG32_SSC_DOWNWARD << PHYREG32_SSC_DIR_SHIFT;
- val |= PHYREG32_SSC_OFFSET_500PPM << PHYREG32_SSC_OFFSET_SHIFT;
- rockchip_combphy_updatel(priv, PHYREG32_SSC_MASK, val, PHYREG32);
+ val = RK3568_PHYREG32_SSC_DOWNWARD << RK3568_PHYREG32_SSC_DIR_SHIFT;
+ val |= RK3568_PHYREG32_SSC_OFFSET_500PPM <<
+ RK3568_PHYREG32_SSC_OFFSET_SHIFT;
+ rockchip_combphy_updatel(priv, RK3568_PHYREG32_SSC_MASK, val,
+ RK3568_PHYREG32);
}
break;
@@ -687,20 +885,21 @@ static int rk3568_combphy_cfg(struct rockchip_combphy_priv *priv)
if (priv->ext_refclk) {
rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_clk_ext, true);
if (priv->type == PHY_TYPE_PCIE && rate == REF_CLOCK_100MHz) {
- val = PHYREG13_RESISTER_HIGH_Z << PHYREG13_RESISTER_SHIFT;
- val |= PHYREG13_CKRCV_AMP0;
- rockchip_combphy_updatel(priv, PHYREG13_RESISTER_MASK, val, PHYREG13);
-
- val = readl(priv->mmio + PHYREG14);
- val |= PHYREG14_CKRCV_AMP1;
- writel(val, priv->mmio + PHYREG14);
+ val = RK3568_PHYREG13_RESISTER_HIGH_Z << RK3568_PHYREG13_RESISTER_SHIFT;
+ val |= RK3568_PHYREG13_CKRCV_AMP0;
+ rockchip_combphy_updatel(priv, RK3568_PHYREG13_RESISTER_MASK, val,
+ RK3568_PHYREG13);
+
+ val = readl(priv->mmio + RK3568_PHYREG14);
+ val |= RK3568_PHYREG14_CKRCV_AMP1;
+ writel(val, priv->mmio + RK3568_PHYREG14);
}
}
if (priv->enable_ssc) {
- val = readl(priv->mmio + PHYREG8);
- val |= PHYREG8_SSC_EN;
- writel(val, priv->mmio + PHYREG8);
+ val = readl(priv->mmio + RK3568_PHYREG8);
+ val |= RK3568_PHYREG8_SSC_EN;
+ writel(val, priv->mmio + RK3568_PHYREG8);
}
return 0;
@@ -737,6 +936,8 @@ static const struct rockchip_combphy_grfcfg rk3568_combphy_grfcfgs = {
/* pipe-grf */
.pipe_con0_for_sata = { 0x0000, 15, 0, 0x00, 0x2220 },
.pipe_xpcs_phy_ready = { 0x0040, 2, 2, 0x00, 0x01 },
+ .u3otg0_port_en = { 0x0104, 15, 0, 0x0181, 0x1100 },
+ .u3otg1_port_en = { 0x0144, 15, 0, 0x0181, 0x1100 },
};
static const struct rockchip_combphy_cfg rk3568_combphy_cfgs = {
@@ -759,8 +960,8 @@ static int rk3576_combphy_cfg(struct rockchip_combphy_priv *priv)
switch (priv->type) {
case PHY_TYPE_PCIE:
/* Set SSC downward spread spectrum */
- val = FIELD_PREP(PHYREG32_SSC_MASK, PHYREG32_SSC_DOWNWARD);
- rockchip_combphy_updatel(priv, PHYREG32_SSC_MASK, val, PHYREG32);
+ val = FIELD_PREP(RK3568_PHYREG32_SSC_MASK, RK3568_PHYREG32_SSC_DOWNWARD);
+ rockchip_combphy_updatel(priv, RK3568_PHYREG32_SSC_MASK, val, RK3568_PHYREG32);
rockchip_combphy_param_write(priv->phy_grf, &cfg->con0_for_pcie, true);
rockchip_combphy_param_write(priv->phy_grf, &cfg->con1_for_pcie, true);
@@ -770,32 +971,33 @@ static int rk3576_combphy_cfg(struct rockchip_combphy_priv *priv)
case PHY_TYPE_USB3:
/* Set SSC downward spread spectrum */
- val = FIELD_PREP(PHYREG32_SSC_MASK, PHYREG32_SSC_DOWNWARD);
- rockchip_combphy_updatel(priv, PHYREG32_SSC_MASK, val, PHYREG32);
+ val = FIELD_PREP(RK3568_PHYREG32_SSC_MASK, RK3568_PHYREG32_SSC_DOWNWARD);
+ rockchip_combphy_updatel(priv, RK3568_PHYREG32_SSC_MASK, val, RK3568_PHYREG32);
/* Enable adaptive CTLE for USB3.0 Rx */
- val = readl(priv->mmio + PHYREG15);
- val |= PHYREG15_CTLE_EN;
- writel(val, priv->mmio + PHYREG15);
+ val = readl(priv->mmio + RK3568_PHYREG15);
+ val |= RK3568_PHYREG15_CTLE_EN;
+ writel(val, priv->mmio + RK3568_PHYREG15);
/* Set PLL KVCO fine tuning signals */
- rockchip_combphy_updatel(priv, PHYREG33_PLL_KVCO_MASK, BIT(3), PHYREG33);
+ rockchip_combphy_updatel(priv, RK3568_PHYREG33_PLL_KVCO_MASK, BIT(3),
+ RK3568_PHYREG33);
/* Set PLL LPF R1 to su_trim[10:7]=1001 */
- writel(PHYREG12_PLL_LPF_ADJ_VALUE, priv->mmio + PHYREG12);
+ writel(RK3568_PHYREG12_PLL_LPF_ADJ_VALUE, priv->mmio + RK3568_PHYREG12);
/* Set PLL input clock divider 1/2 */
- val = FIELD_PREP(PHYREG6_PLL_DIV_MASK, PHYREG6_PLL_DIV_2);
- rockchip_combphy_updatel(priv, PHYREG6_PLL_DIV_MASK, val, PHYREG6);
+ val = FIELD_PREP(RK3568_PHYREG6_PLL_DIV_MASK, RK3568_PHYREG6_PLL_DIV_2);
+ rockchip_combphy_updatel(priv, RK3568_PHYREG6_PLL_DIV_MASK, val, RK3568_PHYREG6);
/* Set PLL loop divider */
- writel(PHYREG18_PLL_LOOP, priv->mmio + PHYREG18);
+ writel(RK3568_PHYREG18_PLL_LOOP, priv->mmio + RK3568_PHYREG18);
/* Set PLL KVCO to min and set PLL charge pump current to max */
- writel(PHYREG11_SU_TRIM_0_7, priv->mmio + PHYREG11);
+ writel(RK3568_PHYREG11_SU_TRIM_0_7, priv->mmio + RK3568_PHYREG11);
/* Set Rx squelch input filler bandwidth */
- writel(PHYREG21_RX_SQUELCH_VAL, priv->mmio + PHYREG21);
+ writel(RK3576_PHYREG21_RX_SQUELCH_VAL, priv->mmio + RK3576_PHYREG21);
rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_txcomp_sel, false);
rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_txelec_sel, false);
@@ -804,14 +1006,14 @@ static int rk3576_combphy_cfg(struct rockchip_combphy_priv *priv)
case PHY_TYPE_SATA:
/* Enable adaptive CTLE for SATA Rx */
- val = readl(priv->mmio + PHYREG15);
- val |= PHYREG15_CTLE_EN;
- writel(val, priv->mmio + PHYREG15);
+ val = readl(priv->mmio + RK3568_PHYREG15);
+ val |= RK3568_PHYREG15_CTLE_EN;
+ writel(val, priv->mmio + RK3568_PHYREG15);
/* Set tx_rterm = 50 ohm and rx_rterm = 43.5 ohm */
- val = PHYREG7_TX_RTERM_50OHM << PHYREG7_TX_RTERM_SHIFT;
- val |= PHYREG7_RX_RTERM_44OHM << PHYREG7_RX_RTERM_SHIFT;
- writel(val, priv->mmio + PHYREG7);
+ val = RK3568_PHYREG7_TX_RTERM_50OHM << RK3568_PHYREG7_TX_RTERM_SHIFT;
+ val |= RK3568_PHYREG7_RX_RTERM_44OHM << RK3568_PHYREG7_RX_RTERM_SHIFT;
+ writel(val, priv->mmio + RK3568_PHYREG7);
rockchip_combphy_param_write(priv->phy_grf, &cfg->con0_for_sata, true);
rockchip_combphy_param_write(priv->phy_grf, &cfg->con1_for_sata, true);
@@ -833,19 +1035,21 @@ static int rk3576_combphy_cfg(struct rockchip_combphy_priv *priv)
rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_clk_24m, true);
if (priv->type == PHY_TYPE_USB3 || priv->type == PHY_TYPE_SATA) {
/* Set ssc_cnt[9:0]=0101111101 & 31.5KHz */
- val = FIELD_PREP(PHYREG15_SSC_CNT_MASK, PHYREG15_SSC_CNT_VALUE);
- rockchip_combphy_updatel(priv, PHYREG15_SSC_CNT_MASK,
- val, PHYREG15);
+ val = FIELD_PREP(RK3568_PHYREG15_SSC_CNT_MASK,
+ RK3568_PHYREG15_SSC_CNT_VALUE);
+ rockchip_combphy_updatel(priv, RK3568_PHYREG15_SSC_CNT_MASK,
+ val, RK3568_PHYREG15);
- writel(PHYREG16_SSC_CNT_VALUE, priv->mmio + PHYREG16);
+ writel(RK3568_PHYREG16_SSC_CNT_VALUE, priv->mmio + RK3568_PHYREG16);
} else if (priv->type == PHY_TYPE_PCIE) {
/* PLL KVCO tuning fine */
- val = FIELD_PREP(PHYREG33_PLL_KVCO_MASK, PHYREG33_PLL_KVCO_VALUE_RK3576);
- rockchip_combphy_updatel(priv, PHYREG33_PLL_KVCO_MASK,
- val, PHYREG33);
+ val = FIELD_PREP(RK3568_PHYREG33_PLL_KVCO_MASK,
+ RK3576_PHYREG33_PLL_KVCO_VALUE);
+ rockchip_combphy_updatel(priv, RK3568_PHYREG33_PLL_KVCO_MASK,
+ val, RK3568_PHYREG33);
/* Set up rx_pck invert and rx msb to disable */
- writel(0x00, priv->mmio + PHYREG27);
+ writel(0x00, priv->mmio + RK3588_PHYREG27);
/*
* Set up SU adjust signal:
@@ -853,11 +1057,11 @@ static int rk3576_combphy_cfg(struct rockchip_combphy_priv *priv)
* su_trim[15:8], PLL LPF R1 adujst bits[9:7]=3'b011
* su_trim[31:24], CKDRV adjust
*/
- writel(0x90, priv->mmio + PHYREG11);
- writel(0x02, priv->mmio + PHYREG12);
- writel(0x57, priv->mmio + PHYREG14);
+ writel(0x90, priv->mmio + RK3568_PHYREG11);
+ writel(0x02, priv->mmio + RK3568_PHYREG12);
+ writel(0x57, priv->mmio + RK3568_PHYREG14);
- writel(PHYREG16_SSC_CNT_VALUE, priv->mmio + PHYREG16);
+ writel(RK3568_PHYREG16_SSC_CNT_VALUE, priv->mmio + RK3568_PHYREG16);
}
break;
@@ -869,15 +1073,16 @@ static int rk3576_combphy_cfg(struct rockchip_combphy_priv *priv)
rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_clk_100m, true);
if (priv->type == PHY_TYPE_PCIE) {
/* gate_tx_pck_sel length select work for L1SS */
- writel(0xc0, priv->mmio + PHYREG30);
+ writel(0xc0, priv->mmio + RK3576_PHYREG30);
/* PLL KVCO tuning fine */
- val = FIELD_PREP(PHYREG33_PLL_KVCO_MASK, PHYREG33_PLL_KVCO_VALUE_RK3576);
- rockchip_combphy_updatel(priv, PHYREG33_PLL_KVCO_MASK,
- val, PHYREG33);
+ val = FIELD_PREP(RK3568_PHYREG33_PLL_KVCO_MASK,
+ RK3576_PHYREG33_PLL_KVCO_VALUE);
+ rockchip_combphy_updatel(priv, RK3568_PHYREG33_PLL_KVCO_MASK,
+ val, RK3568_PHYREG33);
/* Set up rx_trim: PLL LPF C1 85pf R1 1.25kohm */
- writel(0x4c, priv->mmio + PHYREG27);
+ writel(0x4c, priv->mmio + RK3588_PHYREG27);
/*
* Set up SU adjust signal:
@@ -887,20 +1092,23 @@ static int rk3576_combphy_cfg(struct rockchip_combphy_priv *priv)
* su_trim[23:16], CKRCV adjust
* su_trim[31:24], CKDRV adjust
*/
- writel(0x90, priv->mmio + PHYREG11);
- writel(0x43, priv->mmio + PHYREG12);
- writel(0x88, priv->mmio + PHYREG13);
- writel(0x56, priv->mmio + PHYREG14);
+ writel(0x90, priv->mmio + RK3568_PHYREG11);
+ writel(0x43, priv->mmio + RK3568_PHYREG12);
+ writel(0x88, priv->mmio + RK3568_PHYREG13);
+ writel(0x56, priv->mmio + RK3568_PHYREG14);
} else if (priv->type == PHY_TYPE_SATA) {
/* downward spread spectrum +500ppm */
- val = FIELD_PREP(PHYREG32_SSC_DIR_MASK, PHYREG32_SSC_DOWNWARD);
- val |= FIELD_PREP(PHYREG32_SSC_OFFSET_MASK, PHYREG32_SSC_OFFSET_500PPM);
- rockchip_combphy_updatel(priv, PHYREG32_SSC_MASK, val, PHYREG32);
+ val = FIELD_PREP(RK3568_PHYREG32_SSC_DIR_MASK,
+ RK3568_PHYREG32_SSC_DOWNWARD);
+ val |= FIELD_PREP(RK3568_PHYREG32_SSC_OFFSET_MASK,
+ RK3568_PHYREG32_SSC_OFFSET_500PPM);
+ rockchip_combphy_updatel(priv, RK3568_PHYREG32_SSC_MASK, val,
+ RK3568_PHYREG32);
/* ssc ppm adjust to 3500ppm */
- rockchip_combphy_updatel(priv, PHYREG10_SSC_PCM_MASK,
- PHYREG10_SSC_PCM_3500PPM,
- PHYREG10);
+ rockchip_combphy_updatel(priv, RK3576_PHYREG10_SSC_PCM_MASK,
+ RK3576_PHYREG10_SSC_PCM_3500PPM,
+ RK3576_PHYREG10);
}
break;
@@ -912,12 +1120,13 @@ static int rk3576_combphy_cfg(struct rockchip_combphy_priv *priv)
if (priv->ext_refclk) {
rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_clk_ext, true);
if (priv->type == PHY_TYPE_PCIE && rate == REF_CLOCK_100MHz) {
- val = FIELD_PREP(PHYREG33_PLL_KVCO_MASK, PHYREG33_PLL_KVCO_VALUE_RK3576);
- rockchip_combphy_updatel(priv, PHYREG33_PLL_KVCO_MASK,
- val, PHYREG33);
+ val = FIELD_PREP(RK3568_PHYREG33_PLL_KVCO_MASK,
+ RK3576_PHYREG33_PLL_KVCO_VALUE);
+ rockchip_combphy_updatel(priv, RK3568_PHYREG33_PLL_KVCO_MASK,
+ val, RK3568_PHYREG33);
/* Set up rx_trim: PLL LPF C1 85pf R1 2.5kohm */
- writel(0x0c, priv->mmio + PHYREG27);
+ writel(0x0c, priv->mmio + RK3588_PHYREG27);
/*
* Set up SU adjust signal:
@@ -927,25 +1136,25 @@ static int rk3576_combphy_cfg(struct rockchip_combphy_priv *priv)
* su_trim[23:16], CKRCV adjust
* su_trim[31:24], CKDRV adjust
*/
- writel(0x90, priv->mmio + PHYREG11);
- writel(0x43, priv->mmio + PHYREG12);
- writel(0x88, priv->mmio + PHYREG13);
- writel(0x56, priv->mmio + PHYREG14);
+ writel(0x90, priv->mmio + RK3568_PHYREG11);
+ writel(0x43, priv->mmio + RK3568_PHYREG12);
+ writel(0x88, priv->mmio + RK3568_PHYREG13);
+ writel(0x56, priv->mmio + RK3568_PHYREG14);
}
}
if (priv->enable_ssc) {
- val = readl(priv->mmio + PHYREG8);
- val |= PHYREG8_SSC_EN;
- writel(val, priv->mmio + PHYREG8);
+ val = readl(priv->mmio + RK3568_PHYREG8);
+ val |= RK3568_PHYREG8_SSC_EN;
+ writel(val, priv->mmio + RK3568_PHYREG8);
if (priv->type == PHY_TYPE_PCIE && rate == REF_CLOCK_24MHz) {
/* Set PLL loop divider */
- writel(0x00, priv->mmio + PHYREG17);
- writel(PHYREG18_PLL_LOOP, priv->mmio + PHYREG18);
+ writel(0x00, priv->mmio + RK3576_PHYREG17);
+ writel(RK3568_PHYREG18_PLL_LOOP, priv->mmio + RK3568_PHYREG18);
/* Set up rx_pck invert and rx msb to disable */
- writel(0x00, priv->mmio + PHYREG27);
+ writel(0x00, priv->mmio + RK3588_PHYREG27);
/*
* Set up SU adjust signal:
@@ -954,16 +1163,17 @@ static int rk3576_combphy_cfg(struct rockchip_combphy_priv *priv)
* su_trim[23:16], CKRCV adjust
* su_trim[31:24], CKDRV adjust
*/
- writel(0x90, priv->mmio + PHYREG11);
- writel(0x02, priv->mmio + PHYREG12);
- writel(0x08, priv->mmio + PHYREG13);
- writel(0x57, priv->mmio + PHYREG14);
- writel(0x40, priv->mmio + PHYREG15);
+ writel(0x90, priv->mmio + RK3568_PHYREG11);
+ writel(0x02, priv->mmio + RK3568_PHYREG12);
+ writel(0x08, priv->mmio + RK3568_PHYREG13);
+ writel(0x57, priv->mmio + RK3568_PHYREG14);
+ writel(0x40, priv->mmio + RK3568_PHYREG15);
- writel(PHYREG16_SSC_CNT_VALUE, priv->mmio + PHYREG16);
+ writel(RK3568_PHYREG16_SSC_CNT_VALUE, priv->mmio + RK3568_PHYREG16);
- val = FIELD_PREP(PHYREG33_PLL_KVCO_MASK, PHYREG33_PLL_KVCO_VALUE_RK3576);
- writel(val, priv->mmio + PHYREG33);
+ val = FIELD_PREP(RK3568_PHYREG33_PLL_KVCO_MASK,
+ RK3576_PHYREG33_PLL_KVCO_VALUE);
+ writel(val, priv->mmio + RK3568_PHYREG33);
}
}
@@ -1033,30 +1243,28 @@ static int rk3588_combphy_cfg(struct rockchip_combphy_priv *priv)
break;
case PHY_TYPE_USB3:
/* Set SSC downward spread spectrum */
- rockchip_combphy_updatel(priv, PHYREG32_SSC_MASK,
- PHYREG32_SSC_DOWNWARD << PHYREG32_SSC_DIR_SHIFT,
- PHYREG32);
+ val = RK3568_PHYREG32_SSC_DOWNWARD << RK3568_PHYREG32_SSC_DIR_SHIFT;
+ rockchip_combphy_updatel(priv, RK3568_PHYREG32_SSC_MASK, val, RK3568_PHYREG32);
/* Enable adaptive CTLE for USB3.0 Rx. */
- val = readl(priv->mmio + PHYREG15);
- val |= PHYREG15_CTLE_EN;
- writel(val, priv->mmio + PHYREG15);
+ val = readl(priv->mmio + RK3568_PHYREG15);
+ val |= RK3568_PHYREG15_CTLE_EN;
+ writel(val, priv->mmio + RK3568_PHYREG15);
/* Set PLL KVCO fine tuning signals. */
- rockchip_combphy_updatel(priv, PHYREG33_PLL_KVCO_MASK,
- PHYREG33_PLL_KVCO_VALUE << PHYREG33_PLL_KVCO_SHIFT,
- PHYREG33);
+ val = RK3568_PHYREG33_PLL_KVCO_VALUE << RK3568_PHYREG33_PLL_KVCO_SHIFT;
+ rockchip_combphy_updatel(priv, RK3568_PHYREG33_PLL_KVCO_MASK, val, RK3568_PHYREG33);
/* Enable controlling random jitter. */
- writel(PHYREG12_PLL_LPF_ADJ_VALUE, priv->mmio + PHYREG12);
+ writel(RK3568_PHYREG12_PLL_LPF_ADJ_VALUE, priv->mmio + RK3568_PHYREG12);
/* Set PLL input clock divider 1/2. */
- rockchip_combphy_updatel(priv, PHYREG6_PLL_DIV_MASK,
- PHYREG6_PLL_DIV_2 << PHYREG6_PLL_DIV_SHIFT,
- PHYREG6);
+ rockchip_combphy_updatel(priv, RK3568_PHYREG6_PLL_DIV_MASK,
+ RK3568_PHYREG6_PLL_DIV_2 << RK3568_PHYREG6_PLL_DIV_SHIFT,
+ RK3568_PHYREG6);
- writel(PHYREG18_PLL_LOOP, priv->mmio + PHYREG18);
- writel(PHYREG11_SU_TRIM_0_7, priv->mmio + PHYREG11);
+ writel(RK3568_PHYREG18_PLL_LOOP, priv->mmio + RK3568_PHYREG18);
+ writel(RK3568_PHYREG11_SU_TRIM_0_7, priv->mmio + RK3568_PHYREG11);
rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_txcomp_sel, false);
rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_txelec_sel, false);
@@ -1064,16 +1272,16 @@ static int rk3588_combphy_cfg(struct rockchip_combphy_priv *priv)
break;
case PHY_TYPE_SATA:
/* Enable adaptive CTLE for SATA Rx. */
- val = readl(priv->mmio + PHYREG15);
- val |= PHYREG15_CTLE_EN;
- writel(val, priv->mmio + PHYREG15);
+ val = readl(priv->mmio + RK3568_PHYREG15);
+ val |= RK3568_PHYREG15_CTLE_EN;
+ writel(val, priv->mmio + RK3568_PHYREG15);
/*
* Set tx_rterm=50ohm and rx_rterm=44ohm for SATA.
* 0: 60ohm, 8: 50ohm 15: 44ohm (by step abort 1ohm)
*/
- val = PHYREG7_TX_RTERM_50OHM << PHYREG7_TX_RTERM_SHIFT;
- val |= PHYREG7_RX_RTERM_44OHM << PHYREG7_RX_RTERM_SHIFT;
- writel(val, priv->mmio + PHYREG7);
+ val = RK3568_PHYREG7_TX_RTERM_50OHM << RK3568_PHYREG7_TX_RTERM_SHIFT;
+ val |= RK3568_PHYREG7_RX_RTERM_44OHM << RK3568_PHYREG7_RX_RTERM_SHIFT;
+ writel(val, priv->mmio + RK3568_PHYREG7);
rockchip_combphy_param_write(priv->phy_grf, &cfg->con0_for_sata, true);
rockchip_combphy_param_write(priv->phy_grf, &cfg->con1_for_sata, true);
@@ -1095,11 +1303,11 @@ static int rk3588_combphy_cfg(struct rockchip_combphy_priv *priv)
case REF_CLOCK_24MHz:
if (priv->type == PHY_TYPE_USB3 || priv->type == PHY_TYPE_SATA) {
/* Set ssc_cnt[9:0]=0101111101 & 31.5KHz. */
- val = PHYREG15_SSC_CNT_VALUE << PHYREG15_SSC_CNT_SHIFT;
- rockchip_combphy_updatel(priv, PHYREG15_SSC_CNT_MASK,
- val, PHYREG15);
+ val = RK3568_PHYREG15_SSC_CNT_VALUE << RK3568_PHYREG15_SSC_CNT_SHIFT;
+ rockchip_combphy_updatel(priv, RK3568_PHYREG15_SSC_CNT_MASK,
+ val, RK3568_PHYREG15);
- writel(PHYREG16_SSC_CNT_VALUE, priv->mmio + PHYREG16);
+ writel(RK3568_PHYREG16_SSC_CNT_VALUE, priv->mmio + RK3568_PHYREG16);
}
break;
@@ -1110,23 +1318,25 @@ static int rk3588_combphy_cfg(struct rockchip_combphy_priv *priv)
rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_clk_100m, true);
if (priv->type == PHY_TYPE_PCIE) {
/* PLL KVCO fine tuning. */
- val = 4 << PHYREG33_PLL_KVCO_SHIFT;
- rockchip_combphy_updatel(priv, PHYREG33_PLL_KVCO_MASK,
- val, PHYREG33);
+ val = 4 << RK3568_PHYREG33_PLL_KVCO_SHIFT;
+ rockchip_combphy_updatel(priv, RK3568_PHYREG33_PLL_KVCO_MASK,
+ val, RK3568_PHYREG33);
/* Enable controlling random jitter. */
- writel(PHYREG12_PLL_LPF_ADJ_VALUE, priv->mmio + PHYREG12);
+ writel(RK3568_PHYREG12_PLL_LPF_ADJ_VALUE, priv->mmio + RK3568_PHYREG12);
/* Set up rx_trim: PLL LPF C1 85pf R1 1.25kohm */
- writel(PHYREG27_RX_TRIM_RK3588, priv->mmio + PHYREG27);
+ writel(RK3588_PHYREG27_RX_TRIM, priv->mmio + RK3588_PHYREG27);
/* Set up su_trim: */
- writel(PHYREG11_SU_TRIM_0_7, priv->mmio + PHYREG11);
+ writel(RK3568_PHYREG11_SU_TRIM_0_7, priv->mmio + RK3568_PHYREG11);
} else if (priv->type == PHY_TYPE_SATA) {
/* downward spread spectrum +500ppm */
- val = PHYREG32_SSC_DOWNWARD << PHYREG32_SSC_DIR_SHIFT;
- val |= PHYREG32_SSC_OFFSET_500PPM << PHYREG32_SSC_OFFSET_SHIFT;
- rockchip_combphy_updatel(priv, PHYREG32_SSC_MASK, val, PHYREG32);
+ val = RK3568_PHYREG32_SSC_DOWNWARD << RK3568_PHYREG32_SSC_DIR_SHIFT;
+ val |= RK3568_PHYREG32_SSC_OFFSET_500PPM <<
+ RK3568_PHYREG32_SSC_OFFSET_SHIFT;
+ rockchip_combphy_updatel(priv, RK3568_PHYREG32_SSC_MASK, val,
+ RK3568_PHYREG32);
}
break;
default:
@@ -1137,20 +1347,21 @@ static int rk3588_combphy_cfg(struct rockchip_combphy_priv *priv)
if (priv->ext_refclk) {
rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_clk_ext, true);
if (priv->type == PHY_TYPE_PCIE && rate == REF_CLOCK_100MHz) {
- val = PHYREG13_RESISTER_HIGH_Z << PHYREG13_RESISTER_SHIFT;
- val |= PHYREG13_CKRCV_AMP0;
- rockchip_combphy_updatel(priv, PHYREG13_RESISTER_MASK, val, PHYREG13);
-
- val = readl(priv->mmio + PHYREG14);
- val |= PHYREG14_CKRCV_AMP1;
- writel(val, priv->mmio + PHYREG14);
+ val = RK3568_PHYREG13_RESISTER_HIGH_Z << RK3568_PHYREG13_RESISTER_SHIFT;
+ val |= RK3568_PHYREG13_CKRCV_AMP0;
+ rockchip_combphy_updatel(priv, RK3568_PHYREG13_RESISTER_MASK, val,
+ RK3568_PHYREG13);
+
+ val = readl(priv->mmio + RK3568_PHYREG14);
+ val |= RK3568_PHYREG14_CKRCV_AMP1;
+ writel(val, priv->mmio + RK3568_PHYREG14);
}
}
if (priv->enable_ssc) {
- val = readl(priv->mmio + PHYREG8);
- val |= PHYREG8_SSC_EN;
- writel(val, priv->mmio + PHYREG8);
+ val = readl(priv->mmio + RK3568_PHYREG8);
+ val |= RK3568_PHYREG8_SSC_EN;
+ writel(val, priv->mmio + RK3568_PHYREG8);
}
return 0;
@@ -1198,6 +1409,10 @@ static const struct rockchip_combphy_cfg rk3588_combphy_cfgs = {
static const struct of_device_id rockchip_combphy_of_match[] = {
{
+ .compatible = "rockchip,rk3528-naneng-combphy",
+ .data = &rk3528_combphy_cfgs,
+ },
+ {
.compatible = "rockchip,rk3562-naneng-combphy",
.data = &rk3562_combphy_cfgs,
},
diff --git a/drivers/phy/rockchip/phy-rockchip-pcie.c b/drivers/phy/rockchip/phy-rockchip-pcie.c
index 4e2dfd01adf2..126306c01454 100644
--- a/drivers/phy/rockchip/phy-rockchip-pcie.c
+++ b/drivers/phy/rockchip/phy-rockchip-pcie.c
@@ -8,6 +8,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/hw_bitfield.h>
#include <linux/io.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
@@ -18,22 +19,13 @@
#include <linux/regmap.h>
#include <linux/reset.h>
-/*
- * The higher 16-bit of this register is used for write protection
- * only if BIT(x + 16) set to 1 the BIT(x) can be written.
- */
-#define HIWORD_UPDATE(val, mask, shift) \
- ((val) << (shift) | (mask) << ((shift) + 16))
#define PHY_MAX_LANE_NUM 4
-#define PHY_CFG_DATA_SHIFT 7
-#define PHY_CFG_ADDR_SHIFT 1
-#define PHY_CFG_DATA_MASK 0xf
-#define PHY_CFG_ADDR_MASK 0x3f
+#define PHY_CFG_DATA_MASK GENMASK(10, 7)
+#define PHY_CFG_ADDR_MASK GENMASK(6, 1)
#define PHY_CFG_WR_ENABLE 1
#define PHY_CFG_WR_DISABLE 0
-#define PHY_CFG_WR_SHIFT 0
-#define PHY_CFG_WR_MASK 1
+#define PHY_CFG_WR_MASK BIT(0)
#define PHY_CFG_PLL_LOCK 0x10
#define PHY_CFG_CLK_TEST 0x10
#define PHY_CFG_CLK_SCC 0x12
@@ -48,11 +40,7 @@
#define PHY_LANE_RX_DET_SHIFT 11
#define PHY_LANE_RX_DET_TH 0x1
#define PHY_LANE_IDLE_OFF 0x1
-#define PHY_LANE_IDLE_MASK 0x1
-#define PHY_LANE_IDLE_A_SHIFT 3
-#define PHY_LANE_IDLE_B_SHIFT 4
-#define PHY_LANE_IDLE_C_SHIFT 5
-#define PHY_LANE_IDLE_D_SHIFT 6
+#define PHY_LANE_IDLE_MASK BIT(3)
struct rockchip_pcie_data {
unsigned int pcie_conf;
@@ -99,22 +87,14 @@ static inline void phy_wr_cfg(struct rockchip_pcie_phy *rk_phy,
u32 addr, u32 data)
{
regmap_write(rk_phy->reg_base, rk_phy->phy_data->pcie_conf,
- HIWORD_UPDATE(data,
- PHY_CFG_DATA_MASK,
- PHY_CFG_DATA_SHIFT) |
- HIWORD_UPDATE(addr,
- PHY_CFG_ADDR_MASK,
- PHY_CFG_ADDR_SHIFT));
+ FIELD_PREP_WM16(PHY_CFG_DATA_MASK, data) |
+ FIELD_PREP_WM16(PHY_CFG_ADDR_MASK, addr));
udelay(1);
regmap_write(rk_phy->reg_base, rk_phy->phy_data->pcie_conf,
- HIWORD_UPDATE(PHY_CFG_WR_ENABLE,
- PHY_CFG_WR_MASK,
- PHY_CFG_WR_SHIFT));
+ FIELD_PREP_WM16(PHY_CFG_WR_MASK, PHY_CFG_WR_ENABLE));
udelay(1);
regmap_write(rk_phy->reg_base, rk_phy->phy_data->pcie_conf,
- HIWORD_UPDATE(PHY_CFG_WR_DISABLE,
- PHY_CFG_WR_MASK,
- PHY_CFG_WR_SHIFT));
+ FIELD_PREP_WM16(PHY_CFG_WR_MASK, PHY_CFG_WR_DISABLE));
}
static int rockchip_pcie_phy_power_off(struct phy *phy)
@@ -125,11 +105,9 @@ static int rockchip_pcie_phy_power_off(struct phy *phy)
guard(mutex)(&rk_phy->pcie_mutex);
- regmap_write(rk_phy->reg_base,
- rk_phy->phy_data->pcie_laneoff,
- HIWORD_UPDATE(PHY_LANE_IDLE_OFF,
- PHY_LANE_IDLE_MASK,
- PHY_LANE_IDLE_A_SHIFT + inst->index));
+ regmap_write(rk_phy->reg_base, rk_phy->phy_data->pcie_laneoff,
+ FIELD_PREP_WM16(PHY_LANE_IDLE_MASK,
+ PHY_LANE_IDLE_OFF) << inst->index);
if (--rk_phy->pwr_cnt) {
return 0;
@@ -139,11 +117,9 @@ static int rockchip_pcie_phy_power_off(struct phy *phy)
if (err) {
dev_err(&phy->dev, "assert phy_rst err %d\n", err);
rk_phy->pwr_cnt++;
- regmap_write(rk_phy->reg_base,
- rk_phy->phy_data->pcie_laneoff,
- HIWORD_UPDATE(!PHY_LANE_IDLE_OFF,
- PHY_LANE_IDLE_MASK,
- PHY_LANE_IDLE_A_SHIFT + inst->index));
+ regmap_write(rk_phy->reg_base, rk_phy->phy_data->pcie_laneoff,
+ FIELD_PREP_WM16(PHY_LANE_IDLE_MASK,
+ !PHY_LANE_IDLE_OFF) << inst->index);
return err;
}
@@ -159,11 +135,9 @@ static int rockchip_pcie_phy_power_on(struct phy *phy)
guard(mutex)(&rk_phy->pcie_mutex);
- regmap_write(rk_phy->reg_base,
- rk_phy->phy_data->pcie_laneoff,
- HIWORD_UPDATE(!PHY_LANE_IDLE_OFF,
- PHY_LANE_IDLE_MASK,
- PHY_LANE_IDLE_A_SHIFT + inst->index));
+ regmap_write(rk_phy->reg_base, rk_phy->phy_data->pcie_laneoff,
+ FIELD_PREP_WM16(PHY_LANE_IDLE_MASK,
+ !PHY_LANE_IDLE_OFF) << inst->index);
if (rk_phy->pwr_cnt++) {
return 0;
@@ -177,9 +151,7 @@ static int rockchip_pcie_phy_power_on(struct phy *phy)
}
regmap_write(rk_phy->reg_base, rk_phy->phy_data->pcie_conf,
- HIWORD_UPDATE(PHY_CFG_PLL_LOCK,
- PHY_CFG_ADDR_MASK,
- PHY_CFG_ADDR_SHIFT));
+ FIELD_PREP_WM16(PHY_CFG_ADDR_MASK, PHY_CFG_PLL_LOCK));
/*
* No documented timeout value for phy operation below,
@@ -210,9 +182,7 @@ static int rockchip_pcie_phy_power_on(struct phy *phy)
}
regmap_write(rk_phy->reg_base, rk_phy->phy_data->pcie_conf,
- HIWORD_UPDATE(PHY_CFG_PLL_LOCK,
- PHY_CFG_ADDR_MASK,
- PHY_CFG_ADDR_SHIFT));
+ FIELD_PREP_WM16(PHY_CFG_ADDR_MASK, PHY_CFG_PLL_LOCK));
err = regmap_read_poll_timeout(rk_phy->reg_base,
rk_phy->phy_data->pcie_status,
diff --git a/drivers/phy/rockchip/phy-rockchip-samsung-dcphy.c b/drivers/phy/rockchip/phy-rockchip-samsung-dcphy.c
index 28a052e17366..4508a3147272 100644
--- a/drivers/phy/rockchip/phy-rockchip-samsung-dcphy.c
+++ b/drivers/phy/rockchip/phy-rockchip-samsung-dcphy.c
@@ -8,6 +8,7 @@
#include <dt-bindings/phy/phy.h>
#include <linux/bitfield.h>
#include <linux/clk.h>
+#include <linux/hw_bitfield.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
@@ -20,12 +21,6 @@
#include <linux/regmap.h>
#include <linux/reset.h>
-#define FIELD_PREP_HIWORD(_mask, _val) \
- ( \
- FIELD_PREP((_mask), (_val)) | \
- ((_mask) << 16) \
- )
-
#define BIAS_CON0 0x0000
#define I_RES_CNTL_MASK GENMASK(6, 4)
#define I_RES_CNTL(x) FIELD_PREP(I_RES_CNTL_MASK, x)
@@ -252,8 +247,8 @@
/* MIPI_CDPHY_GRF registers */
#define MIPI_DCPHY_GRF_CON0 0x0000
-#define S_CPHY_MODE FIELD_PREP_HIWORD(BIT(3), 1)
-#define M_CPHY_MODE FIELD_PREP_HIWORD(BIT(0), 1)
+#define S_CPHY_MODE FIELD_PREP_WM16(BIT(3), 1)
+#define M_CPHY_MODE FIELD_PREP_WM16(BIT(0), 1)
enum hs_drv_res_ohm {
STRENGTH_30_OHM = 0x8,
diff --git a/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c b/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c
index 79db57ee90d1..01bbf668e05e 100644
--- a/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c
+++ b/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c
@@ -795,7 +795,6 @@ static const struct regmap_config rk_hdptx_phy_regmap_config = {
.val_bits = 32,
.writeable_reg = rk_hdptx_phy_is_rw_reg,
.readable_reg = rk_hdptx_phy_is_rw_reg,
- .fast_io = true,
.max_register = 0x18b4,
};
diff --git a/drivers/phy/rockchip/phy-rockchip-usb.c b/drivers/phy/rockchip/phy-rockchip-usb.c
index 666a896c8f0a..c3c30df29c3e 100644
--- a/drivers/phy/rockchip/phy-rockchip-usb.c
+++ b/drivers/phy/rockchip/phy-rockchip-usb.c
@@ -8,6 +8,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/hw_bitfield.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -24,9 +25,6 @@
static int enable_usb_uart;
-#define HIWORD_UPDATE(val, mask) \
- ((val) | (mask) << 16)
-
#define UOC_CON0 0x00
#define UOC_CON0_SIDDQ BIT(13)
#define UOC_CON0_DISABLE BIT(4)
@@ -38,10 +36,10 @@ static int enable_usb_uart;
#define UOC_CON3 0x0c
/* bits present on rk3188 and rk3288 phys */
#define UOC_CON3_UTMI_TERMSEL_FULLSPEED BIT(5)
-#define UOC_CON3_UTMI_XCVRSEELCT_FSTRANSC (1 << 3)
-#define UOC_CON3_UTMI_XCVRSEELCT_MASK (3 << 3)
-#define UOC_CON3_UTMI_OPMODE_NODRIVING (1 << 1)
-#define UOC_CON3_UTMI_OPMODE_MASK (3 << 1)
+#define UOC_CON3_UTMI_XCVRSEELCT_FSTRANSC 1UL
+#define UOC_CON3_UTMI_XCVRSEELCT_MASK GENMASK(4, 3)
+#define UOC_CON3_UTMI_OPMODE_NODRIVING 1UL
+#define UOC_CON3_UTMI_OPMODE_MASK GENMASK(2, 1)
#define UOC_CON3_UTMI_SUSPENDN BIT(0)
struct rockchip_usb_phys {
@@ -79,7 +77,7 @@ struct rockchip_usb_phy {
static int rockchip_usb_phy_power(struct rockchip_usb_phy *phy,
bool siddq)
{
- u32 val = HIWORD_UPDATE(siddq ? UOC_CON0_SIDDQ : 0, UOC_CON0_SIDDQ);
+ u32 val = FIELD_PREP_WM16(UOC_CON0_SIDDQ, siddq);
return regmap_write(phy->base->reg_base, phy->reg_offset, val);
}
@@ -332,29 +330,24 @@ static int __init rockchip_init_usb_uart_common(struct regmap *grf,
* but were not present in the original code.
* Also disable the analog phy components to save power.
*/
- val = HIWORD_UPDATE(UOC_CON0_COMMON_ON_N
- | UOC_CON0_DISABLE
- | UOC_CON0_SIDDQ,
- UOC_CON0_COMMON_ON_N
- | UOC_CON0_DISABLE
- | UOC_CON0_SIDDQ);
+ val = FIELD_PREP_WM16(UOC_CON0_COMMON_ON_N, 1) |
+ FIELD_PREP_WM16(UOC_CON0_DISABLE, 1) |
+ FIELD_PREP_WM16(UOC_CON0_SIDDQ, 1);
ret = regmap_write(grf, regoffs + UOC_CON0, val);
if (ret)
return ret;
- val = HIWORD_UPDATE(UOC_CON2_SOFT_CON_SEL,
- UOC_CON2_SOFT_CON_SEL);
+ val = FIELD_PREP_WM16(UOC_CON2_SOFT_CON_SEL, 1);
ret = regmap_write(grf, regoffs + UOC_CON2, val);
if (ret)
return ret;
- val = HIWORD_UPDATE(UOC_CON3_UTMI_OPMODE_NODRIVING
- | UOC_CON3_UTMI_XCVRSEELCT_FSTRANSC
- | UOC_CON3_UTMI_TERMSEL_FULLSPEED,
- UOC_CON3_UTMI_SUSPENDN
- | UOC_CON3_UTMI_OPMODE_MASK
- | UOC_CON3_UTMI_XCVRSEELCT_MASK
- | UOC_CON3_UTMI_TERMSEL_FULLSPEED);
+ val = FIELD_PREP_WM16(UOC_CON3_UTMI_SUSPENDN, 0) |
+ FIELD_PREP_WM16(UOC_CON3_UTMI_OPMODE_MASK,
+ UOC_CON3_UTMI_OPMODE_NODRIVING) |
+ FIELD_PREP_WM16(UOC_CON3_UTMI_XCVRSEELCT_MASK,
+ UOC_CON3_UTMI_XCVRSEELCT_FSTRANSC) |
+ FIELD_PREP_WM16(UOC_CON3_UTMI_TERMSEL_FULLSPEED, 1);
ret = regmap_write(grf, UOC_CON3, val);
if (ret)
return ret;
@@ -380,10 +373,8 @@ static int __init rk3188_init_usb_uart(struct regmap *grf,
if (ret)
return ret;
- val = HIWORD_UPDATE(RK3188_UOC0_CON0_BYPASSSEL
- | RK3188_UOC0_CON0_BYPASSDMEN,
- RK3188_UOC0_CON0_BYPASSSEL
- | RK3188_UOC0_CON0_BYPASSDMEN);
+ val = FIELD_PREP_WM16(RK3188_UOC0_CON0_BYPASSSEL, 1) |
+ FIELD_PREP_WM16(RK3188_UOC0_CON0_BYPASSDMEN, 1);
ret = regmap_write(grf, RK3188_UOC0_CON0, val);
if (ret)
return ret;
@@ -430,10 +421,8 @@ static int __init rk3288_init_usb_uart(struct regmap *grf,
if (ret)
return ret;
- val = HIWORD_UPDATE(RK3288_UOC0_CON3_BYPASSSEL
- | RK3288_UOC0_CON3_BYPASSDMEN,
- RK3288_UOC0_CON3_BYPASSSEL
- | RK3288_UOC0_CON3_BYPASSDMEN);
+ val = FIELD_PREP_WM16(RK3288_UOC0_CON3_BYPASSSEL, 1) |
+ FIELD_PREP_WM16(RK3288_UOC0_CON3_BYPASSDMEN, 1);
ret = regmap_write(grf, RK3288_UOC0_CON3, val);
if (ret)
return ret;
diff --git a/drivers/phy/rockchip/phy-rockchip-usbdp.c b/drivers/phy/rockchip/phy-rockchip-usbdp.c
index c066cc0a7b4f..fba35510d88c 100644
--- a/drivers/phy/rockchip/phy-rockchip-usbdp.c
+++ b/drivers/phy/rockchip/phy-rockchip-usbdp.c
@@ -666,7 +666,7 @@ static int rk_udphy_orien_sw_set(struct typec_switch_dev *sw,
goto unlock_ret;
}
- udphy->flip = (orien == TYPEC_ORIENTATION_REVERSE) ? true : false;
+ udphy->flip = orien == TYPEC_ORIENTATION_REVERSE;
rk_udphy_set_typec_default_mapping(udphy);
rk_udphy_usb_bvalid_enable(udphy, true);
@@ -1430,7 +1430,6 @@ static const struct regmap_config rk_udphy_pma_regmap_cfg = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
- .fast_io = true,
.max_register = 0x20dc,
};
diff --git a/drivers/phy/samsung/phy-exynos5-usbdrd.c b/drivers/phy/samsung/phy-exynos5-usbdrd.c
index dd660ebe8045..a88ba95bdc8f 100644
--- a/drivers/phy/samsung/phy-exynos5-usbdrd.c
+++ b/drivers/phy/samsung/phy-exynos5-usbdrd.c
@@ -2417,4 +2417,3 @@ module_platform_driver(exynos5_usb3drd_phy);
MODULE_DESCRIPTION("Samsung Exynos5 SoCs USB 3.0 DRD controller PHY driver");
MODULE_AUTHOR("Vivek Gautam <gautam.vivek@samsung.com>");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:exynos5_usb3drd_phy");
diff --git a/drivers/phy/samsung/phy-samsung-usb2.c b/drivers/phy/samsung/phy-samsung-usb2.c
index 9de744cd6f39..d2749b67cf8f 100644
--- a/drivers/phy/samsung/phy-samsung-usb2.c
+++ b/drivers/phy/samsung/phy-samsung-usb2.c
@@ -258,4 +258,3 @@ module_platform_driver(samsung_usb2_phy_driver);
MODULE_DESCRIPTION("Samsung S5P/Exynos SoC USB PHY driver");
MODULE_AUTHOR("Kamil Debski <k.debski@samsung.com>");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:samsung-usb2-phy");
diff --git a/drivers/phy/sophgo/Kconfig b/drivers/phy/sophgo/Kconfig
new file mode 100644
index 000000000000..2c943bbe1f81
--- /dev/null
+++ b/drivers/phy/sophgo/Kconfig
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Phy drivers for Sophgo platforms
+#
+
+if ARCH_SOPHGO || COMPILE_TEST
+
+config PHY_SOPHGO_CV1800_USB2
+ tristate "Sophgo CV18XX/SG200X USB 2.0 PHY support"
+ depends on MFD_SYSCON
+ depends on USB_SUPPORT
+ select GENERIC_PHY
+ help
+ Enable this to support the USB 2.0 PHY used with
+ the DWC2 USB controller in Sophgo CV18XX/SG200X
+ series SoC.
+ If unsure, say N.
+
+endif # ARCH_SOPHGO || COMPILE_TEST
diff --git a/drivers/phy/sophgo/Makefile b/drivers/phy/sophgo/Makefile
new file mode 100644
index 000000000000..318060661759
--- /dev/null
+++ b/drivers/phy/sophgo/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_PHY_SOPHGO_CV1800_USB2) += phy-cv1800-usb2.o
diff --git a/drivers/phy/sophgo/phy-cv1800-usb2.c b/drivers/phy/sophgo/phy-cv1800-usb2.c
new file mode 100644
index 000000000000..64f8e37b4b52
--- /dev/null
+++ b/drivers/phy/sophgo/phy-cv1800-usb2.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025 Inochi Amaoto <inochiama@outlook.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/bitfield.h>
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/phy/phy.h>
+#include <linux/regmap.h>
+#include <linux/spinlock.h>
+
+#define REG_USB_PHY_CTRL 0x048
+
+#define PHY_VBUS_POWER_EN BIT(0)
+#define PHY_VBUS_POWER BIT(1)
+#define PHY_ID_OVERWRITE_EN BIT(6)
+#define PHY_ID_OVERWRITE_MODE BIT(7)
+#define PHY_ID_OVERWRITE_MODE_HOST FIELD_PREP(BIT(7), 0)
+#define PHY_ID_OVERWRITE_MODE_DEVICE FIELD_PREP(BIT(7), 1)
+
+#define PHY_APP_CLK_RATE 125000000
+#define PHY_LPM_CLK_RATE 12000000
+#define PHY_STB_CLK_RATE 333334
+
+struct cv1800_usb_phy {
+ struct phy *phy;
+ struct regmap *syscon;
+ spinlock_t lock;
+ struct clk *usb_app_clk;
+ struct clk *usb_lpm_clk;
+ struct clk *usb_stb_clk;
+ bool support_otg;
+};
+
+static int cv1800_usb_phy_set_mode(struct phy *_phy,
+ enum phy_mode mode, int submode)
+{
+ struct cv1800_usb_phy *phy = phy_get_drvdata(_phy);
+ unsigned int regval = 0;
+ int ret;
+
+ dev_info(&phy->phy->dev, "set mode %d", (int)mode);
+
+ switch (mode) {
+ case PHY_MODE_USB_DEVICE:
+ regval = PHY_ID_OVERWRITE_EN | PHY_ID_OVERWRITE_MODE_DEVICE;
+ regmap_clear_bits(phy->syscon, REG_USB_PHY_CTRL, PHY_VBUS_POWER);
+ break;
+ case PHY_MODE_USB_HOST:
+ regval = PHY_ID_OVERWRITE_EN | PHY_ID_OVERWRITE_MODE_HOST;
+ regmap_set_bits(phy->syscon, REG_USB_PHY_CTRL, PHY_VBUS_POWER);
+ break;
+ case PHY_MODE_USB_OTG:
+ if (!phy->support_otg)
+ return 0;
+
+ ret = regmap_read(phy->syscon, REG_USB_PHY_CTRL, &regval);
+ if (ret)
+ return ret;
+
+ regval = FIELD_GET(PHY_ID_OVERWRITE_MODE, regval);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return regmap_update_bits(phy->syscon, REG_USB_PHY_CTRL,
+ PHY_ID_OVERWRITE_EN | PHY_ID_OVERWRITE_MODE,
+ regval);
+}
+
+static int cv1800_usb_phy_set_clock(struct cv1800_usb_phy *phy)
+{
+ int ret;
+
+ ret = clk_set_rate(phy->usb_app_clk, PHY_APP_CLK_RATE);
+ if (ret)
+ return ret;
+
+ ret = clk_set_rate(phy->usb_lpm_clk, PHY_LPM_CLK_RATE);
+ if (ret)
+ return ret;
+
+ return clk_set_rate(phy->usb_stb_clk, PHY_STB_CLK_RATE);
+}
+
+static const struct phy_ops cv1800_usb_phy_ops = {
+ .set_mode = cv1800_usb_phy_set_mode,
+ .owner = THIS_MODULE,
+};
+
+static int cv1800_usb_phy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device *parent = dev->parent;
+ struct cv1800_usb_phy *phy;
+ struct phy_provider *phy_provider;
+ int ret;
+
+ if (!parent)
+ return -ENODEV;
+
+ phy = devm_kmalloc(dev, sizeof(*phy), GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ phy->syscon = syscon_node_to_regmap(parent->of_node);
+ if (IS_ERR_OR_NULL(phy->syscon))
+ return -ENODEV;
+
+ phy->support_otg = false;
+
+ spin_lock_init(&phy->lock);
+
+ phy->usb_app_clk = devm_clk_get_enabled(dev, "app");
+ if (IS_ERR(phy->usb_app_clk))
+ return dev_err_probe(dev, PTR_ERR(phy->usb_app_clk),
+ "Failed to get app clock\n");
+
+ phy->usb_lpm_clk = devm_clk_get_enabled(dev, "lpm");
+ if (IS_ERR(phy->usb_lpm_clk))
+ return dev_err_probe(dev, PTR_ERR(phy->usb_lpm_clk),
+ "Failed to get lpm clock\n");
+
+ phy->usb_stb_clk = devm_clk_get_enabled(dev, "stb");
+ if (IS_ERR(phy->usb_stb_clk))
+ return dev_err_probe(dev, PTR_ERR(phy->usb_stb_clk),
+ "Failed to get stb clock\n");
+
+ phy->phy = devm_phy_create(dev, NULL, &cv1800_usb_phy_ops);
+ if (IS_ERR(phy->phy))
+ return dev_err_probe(dev, PTR_ERR(phy->phy),
+ "Failed to create phy\n");
+
+ ret = cv1800_usb_phy_set_clock(phy);
+ if (ret)
+ return ret;
+
+ phy_set_drvdata(phy->phy, phy);
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static const struct of_device_id cv1800_usb_phy_ids[] = {
+ { .compatible = "sophgo,cv1800b-usb2-phy" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, cv1800_usb_phy_ids);
+
+static struct platform_driver cv1800_usb_phy_driver = {
+ .probe = cv1800_usb_phy_probe,
+ .driver = {
+ .name = "cv1800-usb2-phy",
+ .of_match_table = cv1800_usb_phy_ids,
+ },
+};
+module_platform_driver(cv1800_usb_phy_driver);
+
+MODULE_AUTHOR("Inochi Amaoto <inochiama@outlook.com>");
+MODULE_DESCRIPTION("CV1800/SG2000 SoC USB 2.0 PHY driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/phy/tegra/xusb-tegra210.c b/drivers/phy/tegra/xusb-tegra210.c
index ebc8a7e21a31..3409924498e9 100644
--- a/drivers/phy/tegra/xusb-tegra210.c
+++ b/drivers/phy/tegra/xusb-tegra210.c
@@ -3164,18 +3164,22 @@ tegra210_xusb_padctl_probe(struct device *dev,
}
pdev = of_find_device_by_node(np);
+ of_node_put(np);
if (!pdev) {
dev_warn(dev, "PMC device is not available\n");
goto out;
}
- if (!platform_get_drvdata(pdev))
+ if (!platform_get_drvdata(pdev)) {
+ put_device(&pdev->dev);
return ERR_PTR(-EPROBE_DEFER);
+ }
padctl->regmap = dev_get_regmap(&pdev->dev, "usb_sleepwalk");
if (!padctl->regmap)
dev_info(dev, "failed to find PMC regmap\n");
+ put_device(&pdev->dev);
out:
return &padctl->base;
}
diff --git a/drivers/phy/ti/Kconfig b/drivers/phy/ti/Kconfig
index b905902d5750..b40f28019131 100644
--- a/drivers/phy/ti/Kconfig
+++ b/drivers/phy/ti/Kconfig
@@ -62,7 +62,7 @@ config OMAP_CONTROL_PHY
config OMAP_USB2
tristate "OMAP USB2 PHY Driver"
- depends on ARCH_OMAP2PLUS || ARCH_K3
+ depends on ARCH_OMAP2PLUS || ARCH_K3 || COMPILE_TEST
depends on USB_SUPPORT
select GENERIC_PHY
select USB_PHY
diff --git a/drivers/phy/ti/phy-am654-serdes.c b/drivers/phy/ti/phy-am654-serdes.c
index 431b223996e0..5b6c27aa7e8b 100644
--- a/drivers/phy/ti/phy-am654-serdes.c
+++ b/drivers/phy/ti/phy-am654-serdes.c
@@ -99,7 +99,6 @@ static const struct regmap_config serdes_am654_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
- .fast_io = true,
.max_register = 0x1ffc,
};
diff --git a/drivers/phy/ti/phy-dm816x-usb.c b/drivers/phy/ti/phy-dm816x-usb.c
index e8f842d4e841..d274831b731c 100644
--- a/drivers/phy/ti/phy-dm816x-usb.c
+++ b/drivers/phy/ti/phy-dm816x-usb.c
@@ -269,7 +269,6 @@ static struct platform_driver dm816x_usb_phy_driver = {
module_platform_driver(dm816x_usb_phy_driver);
-MODULE_ALIAS("platform:dm816x_usb");
MODULE_AUTHOR("Tony Lindgren <tony@atomide.com>");
MODULE_DESCRIPTION("dm816x usb phy driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/ti/phy-gmii-sel.c b/drivers/phy/ti/phy-gmii-sel.c
index ff5d5e29629f..50adabb867cb 100644
--- a/drivers/phy/ti/phy-gmii-sel.c
+++ b/drivers/phy/ti/phy-gmii-sel.c
@@ -34,6 +34,7 @@ enum {
PHY_GMII_SEL_PORT_MODE = 0,
PHY_GMII_SEL_RGMII_ID_MODE,
PHY_GMII_SEL_RMII_IO_CLK_EN,
+ PHY_GMII_SEL_FIXED_TX_DELAY,
PHY_GMII_SEL_LAST,
};
@@ -127,6 +128,11 @@ static int phy_gmii_sel_mode(struct phy *phy, enum phy_mode mode, int submode)
goto unsupported;
}
+ /* With a fixed delay, some modes are not supported at all. */
+ if (soc_data->features & BIT(PHY_GMII_SEL_FIXED_TX_DELAY) &&
+ rgmii_id != 0)
+ return -EINVAL;
+
if_phy->phy_if_mode = submode;
dev_dbg(dev, "%s id:%u mode:%u rgmii_id:%d rmii_clk_ext:%d\n",
@@ -210,25 +216,46 @@ struct phy_gmii_sel_soc_data phy_gmii_sel_soc_dm814 = {
static const
struct reg_field phy_gmii_sel_fields_am654[][PHY_GMII_SEL_LAST] = {
- { [PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0x0, 0, 2), },
- { [PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0x4, 0, 2), },
- { [PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0x8, 0, 2), },
- { [PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0xC, 0, 2), },
- { [PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0x10, 0, 2), },
- { [PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0x14, 0, 2), },
- { [PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0x18, 0, 2), },
- { [PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0x1C, 0, 2), },
+ {
+ [PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0x0, 0, 2),
+ [PHY_GMII_SEL_RGMII_ID_MODE] = REG_FIELD(0x0, 4, 4),
+ }, {
+ [PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0x4, 0, 2),
+ [PHY_GMII_SEL_RGMII_ID_MODE] = REG_FIELD(0x4, 4, 4),
+ }, {
+ [PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0x8, 0, 2),
+ [PHY_GMII_SEL_RGMII_ID_MODE] = REG_FIELD(0x8, 4, 4),
+ }, {
+ [PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0xC, 0, 2),
+ [PHY_GMII_SEL_RGMII_ID_MODE] = REG_FIELD(0xC, 4, 4),
+ }, {
+ [PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0x10, 0, 2),
+ [PHY_GMII_SEL_RGMII_ID_MODE] = REG_FIELD(0x10, 4, 4),
+ }, {
+ [PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0x14, 0, 2),
+ [PHY_GMII_SEL_RGMII_ID_MODE] = REG_FIELD(0x14, 4, 4),
+ }, {
+ [PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0x18, 0, 2),
+ [PHY_GMII_SEL_RGMII_ID_MODE] = REG_FIELD(0x18, 4, 4),
+ }, {
+ [PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0x1C, 0, 2),
+ [PHY_GMII_SEL_RGMII_ID_MODE] = REG_FIELD(0x1C, 4, 4),
+ },
};
static const
struct phy_gmii_sel_soc_data phy_gmii_sel_soc_am654 = {
.use_of_data = true,
+ .features = BIT(PHY_GMII_SEL_RGMII_ID_MODE) |
+ BIT(PHY_GMII_SEL_FIXED_TX_DELAY),
.regfields = phy_gmii_sel_fields_am654,
};
static const
struct phy_gmii_sel_soc_data phy_gmii_sel_cpsw5g_soc_j7200 = {
.use_of_data = true,
+ .features = BIT(PHY_GMII_SEL_RGMII_ID_MODE) |
+ BIT(PHY_GMII_SEL_FIXED_TX_DELAY),
.regfields = phy_gmii_sel_fields_am654,
.extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) | BIT(PHY_INTERFACE_MODE_SGMII) |
BIT(PHY_INTERFACE_MODE_USXGMII),
@@ -239,6 +266,8 @@ struct phy_gmii_sel_soc_data phy_gmii_sel_cpsw5g_soc_j7200 = {
static const
struct phy_gmii_sel_soc_data phy_gmii_sel_cpsw9g_soc_j721e = {
.use_of_data = true,
+ .features = BIT(PHY_GMII_SEL_RGMII_ID_MODE) |
+ BIT(PHY_GMII_SEL_FIXED_TX_DELAY),
.regfields = phy_gmii_sel_fields_am654,
.extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) | BIT(PHY_INTERFACE_MODE_SGMII),
.num_ports = 8,
@@ -248,6 +277,8 @@ struct phy_gmii_sel_soc_data phy_gmii_sel_cpsw9g_soc_j721e = {
static const
struct phy_gmii_sel_soc_data phy_gmii_sel_cpsw9g_soc_j784s4 = {
.use_of_data = true,
+ .features = BIT(PHY_GMII_SEL_RGMII_ID_MODE) |
+ BIT(PHY_GMII_SEL_FIXED_TX_DELAY),
.regfields = phy_gmii_sel_fields_am654,
.extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) | BIT(PHY_INTERFACE_MODE_SGMII) |
BIT(PHY_INTERFACE_MODE_USXGMII),
diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c
index ab2a4f2c0a5b..a8b440c6c46b 100644
--- a/drivers/phy/ti/phy-j721e-wiz.c
+++ b/drivers/phy/ti/phy-j721e-wiz.c
@@ -1319,7 +1319,6 @@ static const struct regmap_config wiz_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
- .fast_io = true,
};
static struct wiz_data j721e_16g_data = {
diff --git a/drivers/phy/ti/phy-omap-control.c b/drivers/phy/ti/phy-omap-control.c
index 2fdb8f4241c7..4968434312f8 100644
--- a/drivers/phy/ti/phy-omap-control.c
+++ b/drivers/phy/ti/phy-omap-control.c
@@ -334,7 +334,6 @@ static void __exit omap_control_phy_exit(void)
}
module_exit(omap_control_phy_exit);
-MODULE_ALIAS("platform:omap_control_phy");
MODULE_AUTHOR("Texas Instruments Inc.");
MODULE_DESCRIPTION("OMAP Control Module PHY Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/ti/phy-omap-usb2.c b/drivers/phy/ti/phy-omap-usb2.c
index c1a0ef979142..1eb252604441 100644
--- a/drivers/phy/ti/phy-omap-usb2.c
+++ b/drivers/phy/ti/phy-omap-usb2.c
@@ -363,6 +363,13 @@ static void omap_usb2_init_errata(struct omap_usb *phy)
phy->flags |= OMAP_USB2_DISABLE_CHRG_DET;
}
+static void omap_usb2_put_device(void *_dev)
+{
+ struct device *dev = _dev;
+
+ put_device(dev);
+}
+
static int omap_usb2_probe(struct platform_device *pdev)
{
struct omap_usb *phy;
@@ -373,6 +380,7 @@ static int omap_usb2_probe(struct platform_device *pdev)
struct device_node *control_node;
struct platform_device *control_pdev;
const struct usb_phy_data *phy_data;
+ int ret;
phy_data = device_get_match_data(&pdev->dev);
if (!phy_data)
@@ -423,6 +431,11 @@ static int omap_usb2_probe(struct platform_device *pdev)
return -EINVAL;
}
phy->control_dev = &control_pdev->dev;
+
+ ret = devm_add_action_or_reset(&pdev->dev, omap_usb2_put_device,
+ phy->control_dev);
+ if (ret)
+ return ret;
} else {
if (of_property_read_u32_index(node,
"syscon-phy-power", 1,
@@ -520,7 +533,6 @@ static struct platform_driver omap_usb2_driver = {
module_platform_driver(omap_usb2_driver);
-MODULE_ALIAS("platform:omap_usb2");
MODULE_AUTHOR("Texas Instruments Inc.");
MODULE_DESCRIPTION("OMAP USB2 phy driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/ti/phy-ti-pipe3.c b/drivers/phy/ti/phy-ti-pipe3.c
index da2cbacb982c..b5543b5c674c 100644
--- a/drivers/phy/ti/phy-ti-pipe3.c
+++ b/drivers/phy/ti/phy-ti-pipe3.c
@@ -667,12 +667,20 @@ static int ti_pipe3_get_clk(struct ti_pipe3 *phy)
return 0;
}
+static void ti_pipe3_put_device(void *_dev)
+{
+ struct device *dev = _dev;
+
+ put_device(dev);
+}
+
static int ti_pipe3_get_sysctrl(struct ti_pipe3 *phy)
{
struct device *dev = phy->dev;
struct device_node *node = dev->of_node;
struct device_node *control_node;
struct platform_device *control_pdev;
+ int ret;
phy->phy_power_syscon = syscon_regmap_lookup_by_phandle(node,
"syscon-phy-power");
@@ -704,6 +712,11 @@ static int ti_pipe3_get_sysctrl(struct ti_pipe3 *phy)
}
phy->control_dev = &control_pdev->dev;
+
+ ret = devm_add_action_or_reset(dev, ti_pipe3_put_device,
+ phy->control_dev);
+ if (ret)
+ return ret;
}
if (phy->mode == PIPE3_MODE_PCIE) {
@@ -929,7 +942,6 @@ static struct platform_driver ti_pipe3_driver = {
module_platform_driver(ti_pipe3_driver);
-MODULE_ALIAS("platform:ti_pipe3");
MODULE_AUTHOR("Texas Instruments Inc.");
MODULE_DESCRIPTION("TI PIPE3 phy driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index ddd11668457c..4f8507ebbdac 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -211,6 +211,8 @@ config PINCTRL_EIC7700
depends on ARCH_ESWIN || COMPILE_TEST
select PINMUX
select GENERIC_PINCONF
+ select REGULATOR
+ select REGULATOR_FIXED_VOLTAGE
help
This driver support for the pin controller in ESWIN's EIC7700 SoC,
which supports pin multiplexing, pin configuration,and rgmii voltage
@@ -358,6 +360,17 @@ config PINCTRL_LPC18XX
help
Pinctrl driver for NXP LPC18xx/43xx System Control Unit (SCU).
+config PINCTRL_MAX7360
+ tristate "MAX7360 Pincontrol support"
+ depends on MFD_MAX7360
+ select PINMUX
+ select GENERIC_PINCONF
+ help
+ Say Y here to enable pin control support for Maxim MAX7360 keypad
+ controller.
+ This keypad controller has 8 GPIO pins that may work as GPIO, or PWM,
+ or rotary encoder alternate modes.
+
config PINCTRL_MAX77620
tristate "MAX77620/MAX20024 Pincontrol support"
depends on MFD_MAX77620 && OF
@@ -539,6 +552,7 @@ config PINCTRL_STMFX
tristate "STMicroelectronics STMFX GPIO expander pinctrl driver"
depends on I2C
depends on OF_GPIO
+ depends on HAS_IOMEM
select GENERIC_PINCONF
select GPIOLIB_IRQCHIP
select MFD_STMFX
@@ -550,7 +564,7 @@ config PINCTRL_STMFX
interrupt-controller.
config PINCTRL_SX150X
- bool "Semtech SX150x I2C GPIO expander pinctrl driver"
+ tristate "Semtech SX150x I2C GPIO expander pinctrl driver"
depends on I2C=y
select PINMUX
select PINCONF
@@ -600,6 +614,25 @@ config PINCTRL_TH1520
This driver is needed for RISC-V development boards like
the BeagleV Ahead and the LicheePi 4A.
+config PINCTRL_UPBOARD
+ tristate "AAeon UP board FPGA pin controller"
+ depends on MFD_UPBOARD_FPGA
+ select PINMUX
+ select GENERIC_PINCTRL_GROUPS
+ select GENERIC_PINMUX_FUNCTIONS
+ select GPIOLIB
+ select GPIO_AGGREGATOR
+ help
+ Pin controller for the FPGA GPIO lines on UP boards. Due to the
+ hardware layout, the driver controls the FPGA pins in tandem with
+ their corresponding Intel SoC GPIOs.
+
+ Currently supported:
+ - UP Squared
+
+ To compile this driver as a module, choose M here: the module
+ will be called pinctrl-upboard.
+
config PINCTRL_ZYNQ
bool "Pinctrl driver for Xilinx Zynq"
depends on ARCH_ZYNQ || COMPILE_TEST
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index 909ab89a56d2..e0cfb9b7c99b 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -37,6 +37,7 @@ obj-$(CONFIG_PINCTRL_FALCON) += pinctrl-falcon.o
obj-$(CONFIG_PINCTRL_LOONGSON2) += pinctrl-loongson2.o
obj-$(CONFIG_PINCTRL_XWAY) += pinctrl-xway.o
obj-$(CONFIG_PINCTRL_LPC18XX) += pinctrl-lpc18xx.o
+obj-$(CONFIG_PINCTRL_MAX7360) += pinctrl-max7360.o
obj-$(CONFIG_PINCTRL_MAX77620) += pinctrl-max77620.o
obj-$(CONFIG_PINCTRL_MCP23S08_I2C) += pinctrl-mcp23s08_i2c.o
obj-$(CONFIG_PINCTRL_MCP23S08_SPI) += pinctrl-mcp23s08_spi.o
@@ -59,6 +60,7 @@ obj-$(CONFIG_PINCTRL_SX150X) += pinctrl-sx150x.o
obj-$(CONFIG_PINCTRL_TB10X) += pinctrl-tb10x.o
obj-$(CONFIG_PINCTRL_TPS6594) += pinctrl-tps6594.o
obj-$(CONFIG_PINCTRL_TH1520) += pinctrl-th1520.o
+obj-$(CONFIG_PINCTRL_UPBOARD) += pinctrl-upboard.o
obj-$(CONFIG_PINCTRL_ZYNQMP) += pinctrl-zynqmp.o
obj-$(CONFIG_PINCTRL_ZYNQ) += pinctrl-zynq.o
diff --git a/drivers/pinctrl/actions/pinctrl-owl.c b/drivers/pinctrl/actions/pinctrl-owl.c
index 86f3d5c69e36..1f0ef4727ba7 100644
--- a/drivers/pinctrl/actions/pinctrl-owl.c
+++ b/drivers/pinctrl/actions/pinctrl-owl.c
@@ -962,7 +962,7 @@ int owl_pinctrl_probe(struct platform_device *pdev,
pctrl->chip.direction_input = owl_gpio_direction_input;
pctrl->chip.direction_output = owl_gpio_direction_output;
pctrl->chip.get = owl_gpio_get;
- pctrl->chip.set_rv = owl_gpio_set;
+ pctrl->chip.set = owl_gpio_set;
pctrl->chip.request = owl_gpio_request;
pctrl->chip.free = owl_gpio_free;
diff --git a/drivers/pinctrl/bcm/Kconfig b/drivers/pinctrl/bcm/Kconfig
index 35b51ce4298e..096d0778427e 100644
--- a/drivers/pinctrl/bcm/Kconfig
+++ b/drivers/pinctrl/bcm/Kconfig
@@ -106,6 +106,18 @@ config PINCTRL_BCM63268
help
Say Y here to enable the Broadcom BCM63268 GPIO driver.
+config PINCTRL_BRCMSTB
+ tristate "Broadcom STB product line pin controller driver"
+ depends on OF && (ARCH_BCM2835 || ARCH_BRCMSTB || COMPILE_TEST)
+ select PINMUX
+ select PINCONF
+ select GENERIC_PINCONF
+ help
+ Enable pin muxing and configuration functionality
+ for Broadcom STB product line chipsets.
+
+source "drivers/pinctrl/bcm/Kconfig.stb"
+
config PINCTRL_IPROC_GPIO
bool "Broadcom iProc GPIO (with PINCONF) driver"
depends on OF_GPIO && (ARCH_BCM_IPROC || COMPILE_TEST)
diff --git a/drivers/pinctrl/bcm/Kconfig.stb b/drivers/pinctrl/bcm/Kconfig.stb
new file mode 100644
index 000000000000..c1ba361e1d78
--- /dev/null
+++ b/drivers/pinctrl/bcm/Kconfig.stb
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+if PINCTRL_BRCMSTB
+
+config PINCTRL_BCM2712
+ tristate "BCM2712 SoC pin controller driver"
+ help
+ Driver for BCM2712 integrated pin controller,
+ commonly found on Raspberry Pi 5.
+
+endif
diff --git a/drivers/pinctrl/bcm/Makefile b/drivers/pinctrl/bcm/Makefile
index 82b868ec1471..482d769b1a81 100644
--- a/drivers/pinctrl/bcm/Makefile
+++ b/drivers/pinctrl/bcm/Makefile
@@ -11,6 +11,8 @@ obj-$(CONFIG_PINCTRL_BCM6358) += pinctrl-bcm6358.o
obj-$(CONFIG_PINCTRL_BCM6362) += pinctrl-bcm6362.o
obj-$(CONFIG_PINCTRL_BCM6368) += pinctrl-bcm6368.o
obj-$(CONFIG_PINCTRL_BCM63268) += pinctrl-bcm63268.o
+obj-$(CONFIG_PINCTRL_BRCMSTB) += pinctrl-brcmstb.o
+obj-$(CONFIG_PINCTRL_BCM2712) += pinctrl-brcmstb-bcm2712.o
obj-$(CONFIG_PINCTRL_IPROC_GPIO) += pinctrl-iproc-gpio.o
obj-$(CONFIG_PINCTRL_CYGNUS_MUX) += pinctrl-cygnus-mux.o
obj-$(CONFIG_PINCTRL_NS) += pinctrl-ns.o
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index 826827800474..c165674c5b4d 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -397,7 +397,7 @@ static const struct gpio_chip bcm2835_gpio_chip = {
.direction_output = bcm2835_gpio_direction_output,
.get_direction = bcm2835_gpio_get_direction,
.get = bcm2835_gpio_get,
- .set_rv = bcm2835_gpio_set,
+ .set = bcm2835_gpio_set,
.set_config = gpiochip_generic_config,
.base = -1,
.ngpio = BCM2835_NUM_GPIOS,
@@ -414,7 +414,7 @@ static const struct gpio_chip bcm2711_gpio_chip = {
.direction_output = bcm2835_gpio_direction_output,
.get_direction = bcm2835_gpio_get_direction,
.get = bcm2835_gpio_get,
- .set_rv = bcm2835_gpio_set,
+ .set = bcm2835_gpio_set,
.set_config = gpiochip_generic_config,
.base = -1,
.ngpio = BCM2711_NUM_GPIOS,
@@ -1023,7 +1023,7 @@ static int bcm2835_pinconf_get(struct pinctrl_dev *pctldev,
/* No way to read back bias config in HW */
switch (param) {
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
if (fsel != BCM2835_FSEL_GPIO_OUT)
return -EINVAL;
@@ -1091,7 +1091,7 @@ static int bcm2835_pinconf_set(struct pinctrl_dev *pctldev,
break;
/* Set output-high or output-low */
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
bcm2835_gpio_set_bit(pc, arg ? GPSET0 : GPCLR0, pin);
break;
@@ -1202,7 +1202,7 @@ static int bcm2711_pinconf_set(struct pinctrl_dev *pctldev,
break;
/* Set output-high or output-low */
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
bcm2835_gpio_set_bit(pc, arg ? GPSET0 : GPCLR0, pin);
break;
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm6358.c b/drivers/pinctrl/bcm/pinctrl-bcm6358.c
index 891de49d76e7..4c8cd65fc31e 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm6358.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm6358.c
@@ -343,10 +343,8 @@ static int bcm6358_pinctrl_probe(struct platform_device *pdev)
pc = platform_get_drvdata(pdev);
priv->overlays = devm_regmap_field_alloc(dev, pc->regs, overlays);
- if (IS_ERR(priv->overlays))
- return PTR_ERR(priv->overlays);
- return 0;
+ return PTR_ERR_OR_ZERO(priv->overlays);
}
static const struct of_device_id bcm6358_pinctrl_match[] = {
diff --git a/drivers/pinctrl/bcm/pinctrl-brcmstb-bcm2712.c b/drivers/pinctrl/bcm/pinctrl-brcmstb-bcm2712.c
new file mode 100644
index 000000000000..752b78e2c0d8
--- /dev/null
+++ b/drivers/pinctrl/bcm/pinctrl-brcmstb-bcm2712.c
@@ -0,0 +1,747 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for Broadcom brcmstb GPIO units (pinctrl only)
+ *
+ * Copyright (C) 2024-2025 Ivan T. Ivanov, Andrea della Porta
+ * Copyright (C) 2021-3 Raspberry Pi Ltd.
+ * Copyright (C) 2012 Chris Boot, Simon Arlott, Stephen Warren
+ *
+ * Based heavily on the BCM2835 GPIO & pinctrl driver, which was inspired by:
+ * pinctrl-nomadik.c, please see original file for copyright information
+ * pinctrl-tegra.c, please see original file for copyright information
+ */
+
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/of.h>
+#include "pinctrl-brcmstb.h"
+
+#define BRCMSTB_FSEL_COUNT 8
+#define BRCMSTB_FSEL_MASK 0xf
+
+#define BRCMSTB_PIN(i, f1, f2, f3, f4, f5, f6, f7, f8) \
+ [i] = { \
+ .funcs = (u8[]) { \
+ func_##f1, \
+ func_##f2, \
+ func_##f3, \
+ func_##f4, \
+ func_##f5, \
+ func_##f6, \
+ func_##f7, \
+ func_##f8, \
+ }, \
+ .n_funcs = BRCMSTB_FSEL_COUNT, \
+ .func_mask = BRCMSTB_FSEL_MASK, \
+ }
+
+enum bcm2712_funcs {
+ func_gpio,
+ func_alt1,
+ func_alt2,
+ func_alt3,
+ func_alt4,
+ func_alt5,
+ func_alt6,
+ func_alt7,
+ func_alt8,
+ func_aon_cpu_standbyb,
+ func_aon_fp_4sec_resetb,
+ func_aon_gpclk,
+ func_aon_pwm,
+ func_arm_jtag,
+ func_aud_fs_clk0,
+ func_avs_pmu_bsc,
+ func_bsc_m0,
+ func_bsc_m1,
+ func_bsc_m2,
+ func_bsc_m3,
+ func_clk_observe,
+ func_ctl_hdmi_5v,
+ func_enet0,
+ func_enet0_mii,
+ func_enet0_rgmii,
+ func_ext_sc_clk,
+ func_fl0,
+ func_fl1,
+ func_gpclk0,
+ func_gpclk1,
+ func_gpclk2,
+ func_hdmi_tx0_auto_i2c,
+ func_hdmi_tx0_bsc,
+ func_hdmi_tx1_auto_i2c,
+ func_hdmi_tx1_bsc,
+ func_i2s_in,
+ func_i2s_out,
+ func_ir_in,
+ func_mtsif,
+ func_mtsif_alt,
+ func_mtsif_alt1,
+ func_pdm,
+ func_pkt,
+ func_pm_led_out,
+ func_sc0,
+ func_sd0,
+ func_sd2,
+ func_sd_card_a,
+ func_sd_card_b,
+ func_sd_card_c,
+ func_sd_card_d,
+ func_sd_card_e,
+ func_sd_card_f,
+ func_sd_card_g,
+ func_spdif_out,
+ func_spi_m,
+ func_spi_s,
+ func_sr_edm_sense,
+ func_te0,
+ func_te1,
+ func_tsio,
+ func_uart0,
+ func_uart1,
+ func_uart2,
+ func_usb_pwr,
+ func_usb_vbus,
+ func_uui,
+ func_vc_i2c0,
+ func_vc_i2c3,
+ func_vc_i2c4,
+ func_vc_i2c5,
+ func_vc_i2csl,
+ func_vc_pcm,
+ func_vc_pwm0,
+ func_vc_pwm1,
+ func_vc_spi0,
+ func_vc_spi3,
+ func_vc_spi4,
+ func_vc_spi5,
+ func_vc_uart0,
+ func_vc_uart2,
+ func_vc_uart3,
+ func_vc_uart4,
+ func__,
+ func_count = func__
+};
+
+static const struct pin_regs bcm2712_c0_gpio_pin_regs[] = {
+ GPIO_REGS(0, 0, 0, 7, 7),
+ GPIO_REGS(1, 0, 1, 7, 8),
+ GPIO_REGS(2, 0, 2, 7, 9),
+ GPIO_REGS(3, 0, 3, 7, 10),
+ GPIO_REGS(4, 0, 4, 7, 11),
+ GPIO_REGS(5, 0, 5, 7, 12),
+ GPIO_REGS(6, 0, 6, 7, 13),
+ GPIO_REGS(7, 0, 7, 7, 14),
+ GPIO_REGS(8, 1, 0, 8, 0),
+ GPIO_REGS(9, 1, 1, 8, 1),
+ GPIO_REGS(10, 1, 2, 8, 2),
+ GPIO_REGS(11, 1, 3, 8, 3),
+ GPIO_REGS(12, 1, 4, 8, 4),
+ GPIO_REGS(13, 1, 5, 8, 5),
+ GPIO_REGS(14, 1, 6, 8, 6),
+ GPIO_REGS(15, 1, 7, 8, 7),
+ GPIO_REGS(16, 2, 0, 8, 8),
+ GPIO_REGS(17, 2, 1, 8, 9),
+ GPIO_REGS(18, 2, 2, 8, 10),
+ GPIO_REGS(19, 2, 3, 8, 11),
+ GPIO_REGS(20, 2, 4, 8, 12),
+ GPIO_REGS(21, 2, 5, 8, 13),
+ GPIO_REGS(22, 2, 6, 8, 14),
+ GPIO_REGS(23, 2, 7, 9, 0),
+ GPIO_REGS(24, 3, 0, 9, 1),
+ GPIO_REGS(25, 3, 1, 9, 2),
+ GPIO_REGS(26, 3, 2, 9, 3),
+ GPIO_REGS(27, 3, 3, 9, 4),
+ GPIO_REGS(28, 3, 4, 9, 5),
+ GPIO_REGS(29, 3, 5, 9, 6),
+ GPIO_REGS(30, 3, 6, 9, 7),
+ GPIO_REGS(31, 3, 7, 9, 8),
+ GPIO_REGS(32, 4, 0, 9, 9),
+ GPIO_REGS(33, 4, 1, 9, 10),
+ GPIO_REGS(34, 4, 2, 9, 11),
+ GPIO_REGS(35, 4, 3, 9, 12),
+ GPIO_REGS(36, 4, 4, 9, 13),
+ GPIO_REGS(37, 4, 5, 9, 14),
+ GPIO_REGS(38, 4, 6, 10, 0),
+ GPIO_REGS(39, 4, 7, 10, 1),
+ GPIO_REGS(40, 5, 0, 10, 2),
+ GPIO_REGS(41, 5, 1, 10, 3),
+ GPIO_REGS(42, 5, 2, 10, 4),
+ GPIO_REGS(43, 5, 3, 10, 5),
+ GPIO_REGS(44, 5, 4, 10, 6),
+ GPIO_REGS(45, 5, 5, 10, 7),
+ GPIO_REGS(46, 5, 6, 10, 8),
+ GPIO_REGS(47, 5, 7, 10, 9),
+ GPIO_REGS(48, 6, 0, 10, 10),
+ GPIO_REGS(49, 6, 1, 10, 11),
+ GPIO_REGS(50, 6, 2, 10, 12),
+ GPIO_REGS(51, 6, 3, 10, 13),
+ GPIO_REGS(52, 6, 4, 10, 14),
+ GPIO_REGS(53, 6, 5, 11, 0),
+ EMMC_REGS(54, 11, 1), /* EMMC_CMD */
+ EMMC_REGS(55, 11, 2), /* EMMC_DS */
+ EMMC_REGS(56, 11, 3), /* EMMC_CLK */
+ EMMC_REGS(57, 11, 4), /* EMMC_DAT0 */
+ EMMC_REGS(58, 11, 5), /* EMMC_DAT1 */
+ EMMC_REGS(59, 11, 6), /* EMMC_DAT2 */
+ EMMC_REGS(60, 11, 7), /* EMMC_DAT3 */
+ EMMC_REGS(61, 11, 8), /* EMMC_DAT4 */
+ EMMC_REGS(62, 11, 9), /* EMMC_DAT5 */
+ EMMC_REGS(63, 11, 10), /* EMMC_DAT6 */
+ EMMC_REGS(64, 11, 11), /* EMMC_DAT7 */
+};
+
+static struct pin_regs bcm2712_c0_aon_gpio_pin_regs[] = {
+ AON_GPIO_REGS(0, 3, 0, 6, 10),
+ AON_GPIO_REGS(1, 3, 1, 6, 11),
+ AON_GPIO_REGS(2, 3, 2, 6, 12),
+ AON_GPIO_REGS(3, 3, 3, 6, 13),
+ AON_GPIO_REGS(4, 3, 4, 6, 14),
+ AON_GPIO_REGS(5, 3, 5, 7, 0),
+ AON_GPIO_REGS(6, 3, 6, 7, 1),
+ AON_GPIO_REGS(7, 3, 7, 7, 2),
+ AON_GPIO_REGS(8, 4, 0, 7, 3),
+ AON_GPIO_REGS(9, 4, 1, 7, 4),
+ AON_GPIO_REGS(10, 4, 2, 7, 5),
+ AON_GPIO_REGS(11, 4, 3, 7, 6),
+ AON_GPIO_REGS(12, 4, 4, 7, 7),
+ AON_GPIO_REGS(13, 4, 5, 7, 8),
+ AON_GPIO_REGS(14, 4, 6, 7, 9),
+ AON_GPIO_REGS(15, 4, 7, 7, 10),
+ AON_GPIO_REGS(16, 5, 0, 7, 11),
+ AON_SGPIO_REGS(0, 0, 0),
+ AON_SGPIO_REGS(1, 0, 1),
+ AON_SGPIO_REGS(2, 0, 2),
+ AON_SGPIO_REGS(3, 0, 3),
+ AON_SGPIO_REGS(4, 1, 0),
+ AON_SGPIO_REGS(5, 2, 0),
+};
+
+static const struct pinctrl_pin_desc bcm2712_c0_gpio_pins[] = {
+ GPIO_PIN(0),
+ GPIO_PIN(1),
+ GPIO_PIN(2),
+ GPIO_PIN(3),
+ GPIO_PIN(4),
+ GPIO_PIN(5),
+ GPIO_PIN(6),
+ GPIO_PIN(7),
+ GPIO_PIN(8),
+ GPIO_PIN(9),
+ GPIO_PIN(10),
+ GPIO_PIN(11),
+ GPIO_PIN(12),
+ GPIO_PIN(13),
+ GPIO_PIN(14),
+ GPIO_PIN(15),
+ GPIO_PIN(16),
+ GPIO_PIN(17),
+ GPIO_PIN(18),
+ GPIO_PIN(19),
+ GPIO_PIN(20),
+ GPIO_PIN(21),
+ GPIO_PIN(22),
+ GPIO_PIN(23),
+ GPIO_PIN(24),
+ GPIO_PIN(25),
+ GPIO_PIN(26),
+ GPIO_PIN(27),
+ GPIO_PIN(28),
+ GPIO_PIN(29),
+ GPIO_PIN(30),
+ GPIO_PIN(31),
+ GPIO_PIN(32),
+ GPIO_PIN(33),
+ GPIO_PIN(34),
+ GPIO_PIN(35),
+ GPIO_PIN(36),
+ GPIO_PIN(37),
+ GPIO_PIN(38),
+ GPIO_PIN(39),
+ GPIO_PIN(40),
+ GPIO_PIN(41),
+ GPIO_PIN(42),
+ GPIO_PIN(43),
+ GPIO_PIN(44),
+ GPIO_PIN(45),
+ GPIO_PIN(46),
+ GPIO_PIN(47),
+ GPIO_PIN(48),
+ GPIO_PIN(49),
+ GPIO_PIN(50),
+ GPIO_PIN(51),
+ GPIO_PIN(52),
+ GPIO_PIN(53),
+ PINCTRL_PIN(54, "emmc_cmd"),
+ PINCTRL_PIN(55, "emmc_ds"),
+ PINCTRL_PIN(56, "emmc_clk"),
+ PINCTRL_PIN(57, "emmc_dat0"),
+ PINCTRL_PIN(58, "emmc_dat1"),
+ PINCTRL_PIN(59, "emmc_dat2"),
+ PINCTRL_PIN(60, "emmc_dat3"),
+ PINCTRL_PIN(61, "emmc_dat4"),
+ PINCTRL_PIN(62, "emmc_dat5"),
+ PINCTRL_PIN(63, "emmc_dat6"),
+ PINCTRL_PIN(64, "emmc_dat7"),
+};
+
+static struct pinctrl_pin_desc bcm2712_c0_aon_gpio_pins[] = {
+ AON_GPIO_PIN(0), AON_GPIO_PIN(1), AON_GPIO_PIN(2), AON_GPIO_PIN(3),
+ AON_GPIO_PIN(4), AON_GPIO_PIN(5), AON_GPIO_PIN(6), AON_GPIO_PIN(7),
+ AON_GPIO_PIN(8), AON_GPIO_PIN(9), AON_GPIO_PIN(10), AON_GPIO_PIN(11),
+ AON_GPIO_PIN(12), AON_GPIO_PIN(13), AON_GPIO_PIN(14), AON_GPIO_PIN(15),
+ AON_GPIO_PIN(16), AON_SGPIO_PIN(0), AON_SGPIO_PIN(1), AON_SGPIO_PIN(2),
+ AON_SGPIO_PIN(3), AON_SGPIO_PIN(4), AON_SGPIO_PIN(5),
+};
+
+static const struct pin_regs bcm2712_d0_gpio_pin_regs[] = {
+ GPIO_REGS(1, 0, 0, 4, 5),
+ GPIO_REGS(2, 0, 1, 4, 6),
+ GPIO_REGS(3, 0, 2, 4, 7),
+ GPIO_REGS(4, 0, 3, 4, 8),
+ GPIO_REGS(10, 0, 4, 4, 9),
+ GPIO_REGS(11, 0, 5, 4, 10),
+ GPIO_REGS(12, 0, 6, 4, 11),
+ GPIO_REGS(13, 0, 7, 4, 12),
+ GPIO_REGS(14, 1, 0, 4, 13),
+ GPIO_REGS(15, 1, 1, 4, 14),
+ GPIO_REGS(18, 1, 2, 5, 0),
+ GPIO_REGS(19, 1, 3, 5, 1),
+ GPIO_REGS(20, 1, 4, 5, 2),
+ GPIO_REGS(21, 1, 5, 5, 3),
+ GPIO_REGS(22, 1, 6, 5, 4),
+ GPIO_REGS(23, 1, 7, 5, 5),
+ GPIO_REGS(24, 2, 0, 5, 6),
+ GPIO_REGS(25, 2, 1, 5, 7),
+ GPIO_REGS(26, 2, 2, 5, 8),
+ GPIO_REGS(27, 2, 3, 5, 9),
+ GPIO_REGS(28, 2, 4, 5, 10),
+ GPIO_REGS(29, 2, 5, 5, 11),
+ GPIO_REGS(30, 2, 6, 5, 12),
+ GPIO_REGS(31, 2, 7, 5, 13),
+ GPIO_REGS(32, 3, 0, 5, 14),
+ GPIO_REGS(33, 3, 1, 6, 0),
+ GPIO_REGS(34, 3, 2, 6, 1),
+ GPIO_REGS(35, 3, 3, 6, 2),
+ EMMC_REGS(36, 6, 3), /* EMMC_CMD */
+ EMMC_REGS(37, 6, 4), /* EMMC_DS */
+ EMMC_REGS(38, 6, 5), /* EMMC_CLK */
+ EMMC_REGS(39, 6, 6), /* EMMC_DAT0 */
+ EMMC_REGS(40, 6, 7), /* EMMC_DAT1 */
+ EMMC_REGS(41, 6, 8), /* EMMC_DAT2 */
+ EMMC_REGS(42, 6, 9), /* EMMC_DAT3 */
+ EMMC_REGS(43, 6, 10), /* EMMC_DAT4 */
+ EMMC_REGS(44, 6, 11), /* EMMC_DAT5 */
+ EMMC_REGS(45, 6, 12), /* EMMC_DAT6 */
+ EMMC_REGS(46, 6, 13), /* EMMC_DAT7 */
+};
+
+static struct pin_regs bcm2712_d0_aon_gpio_pin_regs[] = {
+ AON_GPIO_REGS(0, 3, 0, 5, 9),
+ AON_GPIO_REGS(1, 3, 1, 5, 10),
+ AON_GPIO_REGS(2, 3, 2, 5, 11),
+ AON_GPIO_REGS(3, 3, 3, 5, 12),
+ AON_GPIO_REGS(4, 3, 4, 5, 13),
+ AON_GPIO_REGS(5, 3, 5, 5, 14),
+ AON_GPIO_REGS(6, 3, 6, 6, 0),
+ AON_GPIO_REGS(8, 3, 7, 6, 1),
+ AON_GPIO_REGS(9, 4, 0, 6, 2),
+ AON_GPIO_REGS(12, 4, 1, 6, 3),
+ AON_GPIO_REGS(13, 4, 2, 6, 4),
+ AON_GPIO_REGS(14, 4, 3, 6, 5),
+ AON_SGPIO_REGS(0, 0, 0),
+ AON_SGPIO_REGS(1, 0, 1),
+ AON_SGPIO_REGS(2, 0, 2),
+ AON_SGPIO_REGS(3, 0, 3),
+ AON_SGPIO_REGS(4, 1, 0),
+ AON_SGPIO_REGS(5, 2, 0),
+};
+
+static const struct pinctrl_pin_desc bcm2712_d0_gpio_pins[] = {
+ GPIO_PIN(1),
+ GPIO_PIN(2),
+ GPIO_PIN(3),
+ GPIO_PIN(4),
+ GPIO_PIN(10),
+ GPIO_PIN(11),
+ GPIO_PIN(12),
+ GPIO_PIN(13),
+ GPIO_PIN(14),
+ GPIO_PIN(15),
+ GPIO_PIN(18),
+ GPIO_PIN(19),
+ GPIO_PIN(20),
+ GPIO_PIN(21),
+ GPIO_PIN(22),
+ GPIO_PIN(23),
+ GPIO_PIN(24),
+ GPIO_PIN(25),
+ GPIO_PIN(26),
+ GPIO_PIN(27),
+ GPIO_PIN(28),
+ GPIO_PIN(29),
+ GPIO_PIN(30),
+ GPIO_PIN(31),
+ GPIO_PIN(32),
+ GPIO_PIN(33),
+ GPIO_PIN(34),
+ GPIO_PIN(35),
+ PINCTRL_PIN(36, "emmc_cmd"),
+ PINCTRL_PIN(37, "emmc_ds"),
+ PINCTRL_PIN(38, "emmc_clk"),
+ PINCTRL_PIN(39, "emmc_dat0"),
+ PINCTRL_PIN(40, "emmc_dat1"),
+ PINCTRL_PIN(41, "emmc_dat2"),
+ PINCTRL_PIN(42, "emmc_dat3"),
+ PINCTRL_PIN(43, "emmc_dat4"),
+ PINCTRL_PIN(44, "emmc_dat5"),
+ PINCTRL_PIN(45, "emmc_dat6"),
+ PINCTRL_PIN(46, "emmc_dat7"),
+};
+
+static struct pinctrl_pin_desc bcm2712_d0_aon_gpio_pins[] = {
+ AON_GPIO_PIN(0), AON_GPIO_PIN(1), AON_GPIO_PIN(2), AON_GPIO_PIN(3),
+ AON_GPIO_PIN(4), AON_GPIO_PIN(5), AON_GPIO_PIN(6), AON_GPIO_PIN(8),
+ AON_GPIO_PIN(9), AON_GPIO_PIN(12), AON_GPIO_PIN(13), AON_GPIO_PIN(14),
+ AON_SGPIO_PIN(0), AON_SGPIO_PIN(1), AON_SGPIO_PIN(2),
+ AON_SGPIO_PIN(3), AON_SGPIO_PIN(4), AON_SGPIO_PIN(5),
+};
+
+static const char * const bcm2712_func_names[] = {
+ BRCMSTB_FUNC(gpio),
+ BRCMSTB_FUNC(alt1),
+ BRCMSTB_FUNC(alt2),
+ BRCMSTB_FUNC(alt3),
+ BRCMSTB_FUNC(alt4),
+ BRCMSTB_FUNC(alt5),
+ BRCMSTB_FUNC(alt6),
+ BRCMSTB_FUNC(alt7),
+ BRCMSTB_FUNC(alt8),
+ BRCMSTB_FUNC(aon_cpu_standbyb),
+ BRCMSTB_FUNC(aon_fp_4sec_resetb),
+ BRCMSTB_FUNC(aon_gpclk),
+ BRCMSTB_FUNC(aon_pwm),
+ BRCMSTB_FUNC(arm_jtag),
+ BRCMSTB_FUNC(aud_fs_clk0),
+ BRCMSTB_FUNC(avs_pmu_bsc),
+ BRCMSTB_FUNC(bsc_m0),
+ BRCMSTB_FUNC(bsc_m1),
+ BRCMSTB_FUNC(bsc_m2),
+ BRCMSTB_FUNC(bsc_m3),
+ BRCMSTB_FUNC(clk_observe),
+ BRCMSTB_FUNC(ctl_hdmi_5v),
+ BRCMSTB_FUNC(enet0),
+ BRCMSTB_FUNC(enet0_mii),
+ BRCMSTB_FUNC(enet0_rgmii),
+ BRCMSTB_FUNC(ext_sc_clk),
+ BRCMSTB_FUNC(fl0),
+ BRCMSTB_FUNC(fl1),
+ BRCMSTB_FUNC(gpclk0),
+ BRCMSTB_FUNC(gpclk1),
+ BRCMSTB_FUNC(gpclk2),
+ BRCMSTB_FUNC(hdmi_tx0_auto_i2c),
+ BRCMSTB_FUNC(hdmi_tx0_bsc),
+ BRCMSTB_FUNC(hdmi_tx1_auto_i2c),
+ BRCMSTB_FUNC(hdmi_tx1_bsc),
+ BRCMSTB_FUNC(i2s_in),
+ BRCMSTB_FUNC(i2s_out),
+ BRCMSTB_FUNC(ir_in),
+ BRCMSTB_FUNC(mtsif),
+ BRCMSTB_FUNC(mtsif_alt),
+ BRCMSTB_FUNC(mtsif_alt1),
+ BRCMSTB_FUNC(pdm),
+ BRCMSTB_FUNC(pkt),
+ BRCMSTB_FUNC(pm_led_out),
+ BRCMSTB_FUNC(sc0),
+ BRCMSTB_FUNC(sd0),
+ BRCMSTB_FUNC(sd2),
+ BRCMSTB_FUNC(sd_card_a),
+ BRCMSTB_FUNC(sd_card_b),
+ BRCMSTB_FUNC(sd_card_c),
+ BRCMSTB_FUNC(sd_card_d),
+ BRCMSTB_FUNC(sd_card_e),
+ BRCMSTB_FUNC(sd_card_f),
+ BRCMSTB_FUNC(sd_card_g),
+ BRCMSTB_FUNC(spdif_out),
+ BRCMSTB_FUNC(spi_m),
+ BRCMSTB_FUNC(spi_s),
+ BRCMSTB_FUNC(sr_edm_sense),
+ BRCMSTB_FUNC(te0),
+ BRCMSTB_FUNC(te1),
+ BRCMSTB_FUNC(tsio),
+ BRCMSTB_FUNC(uart0),
+ BRCMSTB_FUNC(uart1),
+ BRCMSTB_FUNC(uart2),
+ BRCMSTB_FUNC(usb_pwr),
+ BRCMSTB_FUNC(usb_vbus),
+ BRCMSTB_FUNC(uui),
+ BRCMSTB_FUNC(vc_i2c0),
+ BRCMSTB_FUNC(vc_i2c3),
+ BRCMSTB_FUNC(vc_i2c4),
+ BRCMSTB_FUNC(vc_i2c5),
+ BRCMSTB_FUNC(vc_i2csl),
+ BRCMSTB_FUNC(vc_pcm),
+ BRCMSTB_FUNC(vc_pwm0),
+ BRCMSTB_FUNC(vc_pwm1),
+ BRCMSTB_FUNC(vc_spi0),
+ BRCMSTB_FUNC(vc_spi3),
+ BRCMSTB_FUNC(vc_spi4),
+ BRCMSTB_FUNC(vc_spi5),
+ BRCMSTB_FUNC(vc_uart0),
+ BRCMSTB_FUNC(vc_uart2),
+ BRCMSTB_FUNC(vc_uart3),
+ BRCMSTB_FUNC(vc_uart4),
+};
+
+static const struct brcmstb_pin_funcs bcm2712_c0_aon_gpio_pin_funcs[] = {
+ BRCMSTB_PIN(0, ir_in, vc_spi0, vc_uart3, vc_i2c3, te0, vc_i2c0, _, _),
+ BRCMSTB_PIN(1, vc_pwm0, vc_spi0, vc_uart3, vc_i2c3, te1, aon_pwm, vc_i2c0, vc_pwm1),
+ BRCMSTB_PIN(2, vc_pwm0, vc_spi0, vc_uart3, ctl_hdmi_5v, fl0, aon_pwm, ir_in, vc_pwm1),
+ BRCMSTB_PIN(3, ir_in, vc_spi0, vc_uart3, aon_fp_4sec_resetb, fl1, sd_card_g, aon_gpclk, _),
+ BRCMSTB_PIN(4, gpclk0, vc_spi0, vc_i2csl, aon_gpclk, pm_led_out, aon_pwm, sd_card_g, vc_pwm0),
+ BRCMSTB_PIN(5, gpclk1, ir_in, vc_i2csl, clk_observe, aon_pwm, sd_card_g, vc_pwm0, _),
+ BRCMSTB_PIN(6, uart1, vc_uart4, gpclk2, ctl_hdmi_5v, vc_uart0, vc_spi3, _, _),
+ BRCMSTB_PIN(7, uart1, vc_uart4, gpclk0, aon_pwm, vc_uart0, vc_spi3, _, _),
+ BRCMSTB_PIN(8, uart1, vc_uart4, vc_i2csl, ctl_hdmi_5v, vc_uart0, vc_spi3, _, _),
+ BRCMSTB_PIN(9, uart1, vc_uart4, vc_i2csl, aon_pwm, vc_uart0, vc_spi3, _, _),
+ BRCMSTB_PIN(10, tsio, ctl_hdmi_5v, sc0, spdif_out, vc_spi5, usb_pwr, aon_gpclk, sd_card_f),
+ BRCMSTB_PIN(11, tsio, uart0, sc0, aud_fs_clk0, vc_spi5, usb_vbus, vc_uart2, sd_card_f),
+ BRCMSTB_PIN(12, tsio, uart0, vc_uart0, tsio, vc_spi5, usb_pwr, vc_uart2, sd_card_f),
+ BRCMSTB_PIN(13, bsc_m1, uart0, vc_uart0, uui, vc_spi5, arm_jtag, vc_uart2, vc_i2c3),
+ BRCMSTB_PIN(14, bsc_m1, uart0, vc_uart0, uui, vc_spi5, arm_jtag, vc_uart2, vc_i2c3),
+ BRCMSTB_PIN(15, ir_in, aon_fp_4sec_resetb, vc_uart0, pm_led_out, ctl_hdmi_5v, aon_pwm, aon_gpclk, _),
+ BRCMSTB_PIN(16, aon_cpu_standbyb, gpclk0, pm_led_out, ctl_hdmi_5v, vc_pwm0, usb_pwr, aud_fs_clk0, _),
+};
+
+static const struct brcmstb_pin_funcs bcm2712_c0_gpio_pin_funcs[] = {
+ BRCMSTB_PIN(0, bsc_m3, vc_i2c0, gpclk0, enet0, vc_pwm1, vc_spi0, ir_in, _),
+ BRCMSTB_PIN(1, bsc_m3, vc_i2c0, gpclk1, enet0, vc_pwm1, sr_edm_sense, vc_spi0, vc_uart3),
+ BRCMSTB_PIN(2, pdm, i2s_in, gpclk2, vc_spi4, pkt, vc_spi0, vc_uart3, _),
+ BRCMSTB_PIN(3, pdm, i2s_in, vc_spi4, pkt, vc_spi0, vc_uart3, _, _),
+ BRCMSTB_PIN(4, pdm, i2s_in, arm_jtag, vc_spi4, pkt, vc_spi0, vc_uart3, _),
+ BRCMSTB_PIN(5, pdm, vc_i2c3, arm_jtag, sd_card_e, vc_spi4, pkt, vc_pcm, vc_i2c5),
+ BRCMSTB_PIN(6, pdm, vc_i2c3, arm_jtag, sd_card_e, vc_spi4, pkt, vc_pcm, vc_i2c5),
+ BRCMSTB_PIN(7, i2s_out, spdif_out, arm_jtag, sd_card_e, vc_i2c3, enet0_rgmii, vc_pcm, vc_spi4),
+ BRCMSTB_PIN(8, i2s_out, aud_fs_clk0, arm_jtag, sd_card_e, vc_i2c3, enet0_mii, vc_pcm, vc_spi4),
+ BRCMSTB_PIN(9, i2s_out, aud_fs_clk0, arm_jtag, sd_card_e, enet0_mii, sd_card_c, vc_spi4, _),
+ BRCMSTB_PIN(10, bsc_m3, mtsif_alt1, i2s_in, i2s_out, vc_spi5, enet0_mii, sd_card_c, vc_spi4),
+ BRCMSTB_PIN(11, bsc_m3, mtsif_alt1, i2s_in, i2s_out, vc_spi5, enet0_mii, sd_card_c, vc_spi4),
+ BRCMSTB_PIN(12, spi_s, mtsif_alt1, i2s_in, i2s_out, vc_spi5, vc_i2csl, sd0, sd_card_d),
+ BRCMSTB_PIN(13, spi_s, mtsif_alt1, i2s_out, usb_vbus, vc_spi5, vc_i2csl, sd0, sd_card_d),
+ BRCMSTB_PIN(14, spi_s, vc_i2csl, enet0_rgmii, arm_jtag, vc_spi5, vc_pwm0, vc_i2c4, sd_card_d),
+ BRCMSTB_PIN(15, spi_s, vc_i2csl, vc_spi3, arm_jtag, vc_pwm0, vc_i2c4, gpclk0, _),
+ BRCMSTB_PIN(16, sd_card_b, i2s_out, vc_spi3, i2s_in, sd0, enet0_rgmii, gpclk1, _),
+ BRCMSTB_PIN(17, sd_card_b, i2s_out, vc_spi3, i2s_in, ext_sc_clk, sd0, enet0_rgmii, gpclk2),
+ BRCMSTB_PIN(18, sd_card_b, i2s_out, vc_spi3, i2s_in, sd0, enet0_rgmii, vc_pwm1, _),
+ BRCMSTB_PIN(19, sd_card_b, usb_pwr, vc_spi3, pkt, spdif_out, sd0, ir_in, vc_pwm1),
+ BRCMSTB_PIN(20, sd_card_b, uui, vc_uart0, arm_jtag, uart2, usb_pwr, vc_pcm, vc_uart4),
+ BRCMSTB_PIN(21, usb_pwr, uui, vc_uart0, arm_jtag, uart2, sd_card_b, vc_pcm, vc_uart4),
+ BRCMSTB_PIN(22, usb_pwr, enet0, vc_uart0, mtsif, uart2, usb_vbus, vc_pcm, vc_i2c5),
+ BRCMSTB_PIN(23, usb_vbus, enet0, vc_uart0, mtsif, uart2, i2s_out, vc_pcm, vc_i2c5),
+ BRCMSTB_PIN(24, mtsif, pkt, uart0, enet0_rgmii, enet0_rgmii, vc_i2c4, vc_uart3, _),
+ BRCMSTB_PIN(25, mtsif, pkt, sc0, uart0, enet0_rgmii, enet0_rgmii, vc_i2c4, vc_uart3),
+ BRCMSTB_PIN(26, mtsif, pkt, sc0, uart0, enet0_rgmii, vc_uart4, vc_spi5, _),
+ BRCMSTB_PIN(27, mtsif, pkt, sc0, uart0, enet0_rgmii, vc_uart4, vc_spi5, _),
+ BRCMSTB_PIN(28, mtsif, pkt, sc0, enet0_rgmii, vc_uart4, vc_spi5, _, _),
+ BRCMSTB_PIN(29, mtsif, pkt, sc0, enet0_rgmii, vc_uart4, vc_spi5, _, _),
+ BRCMSTB_PIN(30, mtsif, pkt, sc0, sd2, enet0_rgmii, gpclk0, vc_pwm0, _),
+ BRCMSTB_PIN(31, mtsif, pkt, sc0, sd2, enet0_rgmii, vc_spi3, vc_pwm0, _),
+ BRCMSTB_PIN(32, mtsif, pkt, sc0, sd2, enet0_rgmii, vc_spi3, vc_uart3, _),
+ BRCMSTB_PIN(33, mtsif, pkt, sd2, enet0_rgmii, vc_spi3, vc_uart3, _, _),
+ BRCMSTB_PIN(34, mtsif, pkt, ext_sc_clk, sd2, enet0_rgmii, vc_spi3, vc_i2c5, _),
+ BRCMSTB_PIN(35, mtsif, pkt, sd2, enet0_rgmii, vc_spi3, vc_i2c5, _, _),
+ BRCMSTB_PIN(36, sd0, mtsif, sc0, i2s_in, vc_uart3, vc_uart2, _, _),
+ BRCMSTB_PIN(37, sd0, mtsif, sc0, vc_spi0, i2s_in, vc_uart3, vc_uart2, _),
+ BRCMSTB_PIN(38, sd0, mtsif_alt, sc0, vc_spi0, i2s_in, vc_uart3, vc_uart2, _),
+ BRCMSTB_PIN(39, sd0, mtsif_alt, sc0, vc_spi0, vc_uart3, vc_uart2, _, _),
+ BRCMSTB_PIN(40, sd0, mtsif_alt, sc0, vc_spi0, bsc_m3, _, _, _),
+ BRCMSTB_PIN(41, sd0, mtsif_alt, sc0, vc_spi0, bsc_m3, _, _, _),
+ BRCMSTB_PIN(42, vc_spi0, mtsif_alt, vc_i2c0, sd_card_a, mtsif_alt1, arm_jtag, pdm, spi_m),
+ BRCMSTB_PIN(43, vc_spi0, mtsif_alt, vc_i2c0, sd_card_a, mtsif_alt1, arm_jtag, pdm, spi_m),
+ BRCMSTB_PIN(44, vc_spi0, mtsif_alt, enet0, sd_card_a, mtsif_alt1, arm_jtag, pdm, spi_m),
+ BRCMSTB_PIN(45, vc_spi0, mtsif_alt, enet0, sd_card_a, mtsif_alt1, arm_jtag, pdm, spi_m),
+ BRCMSTB_PIN(46, vc_spi0, mtsif_alt, sd_card_a, mtsif_alt1, arm_jtag, pdm, spi_m, _),
+ BRCMSTB_PIN(47, enet0, mtsif_alt, i2s_out, mtsif_alt1, arm_jtag, _, _, _),
+ BRCMSTB_PIN(48, sc0, usb_pwr, spdif_out, mtsif, _, _, _, _),
+ BRCMSTB_PIN(49, sc0, usb_pwr, aud_fs_clk0, mtsif, _, _, _, _),
+ BRCMSTB_PIN(50, sc0, usb_vbus, sc0, _, _, _, _, _),
+ BRCMSTB_PIN(51, sc0, enet0, sc0, sr_edm_sense, _, _, _, _),
+ BRCMSTB_PIN(52, sc0, enet0, vc_pwm1, _, _, _, _, _),
+ BRCMSTB_PIN(53, sc0, enet0_rgmii, ext_sc_clk, _, _, _, _, _),
+};
+
+static const struct brcmstb_pin_funcs bcm2712_d0_aon_gpio_pin_funcs[] = {
+ BRCMSTB_PIN(0, ir_in, vc_spi0, vc_uart0, vc_i2c3, uart0, vc_i2c0, _, _),
+ BRCMSTB_PIN(1, vc_pwm0, vc_spi0, vc_uart0, vc_i2c3, uart0, aon_pwm, vc_i2c0, vc_pwm1),
+ BRCMSTB_PIN(2, vc_pwm0, vc_spi0, vc_uart0, ctl_hdmi_5v, uart0, aon_pwm, ir_in, vc_pwm1),
+ BRCMSTB_PIN(3, ir_in, vc_spi0, vc_uart0, uart0, sd_card_g, aon_gpclk, _, _),
+ BRCMSTB_PIN(4, gpclk0, vc_spi0, pm_led_out, aon_pwm, sd_card_g, vc_pwm0, _, _),
+ BRCMSTB_PIN(5, gpclk1, ir_in, aon_pwm, sd_card_g, vc_pwm0, _, _, _),
+ BRCMSTB_PIN(6, uart1, vc_uart2, ctl_hdmi_5v, gpclk2, vc_spi3, _, _, _),
+ BRCMSTB_PIN(7, _, _, _, _, _, _, _, _), /* non-existent on D0 silicon */
+ BRCMSTB_PIN(8, uart1, vc_uart2, ctl_hdmi_5v, vc_spi0, vc_spi3, _, _, _),
+ BRCMSTB_PIN(9, uart1, vc_uart2, vc_uart0, aon_pwm, vc_spi0, vc_uart2, vc_spi3, _),
+ BRCMSTB_PIN(10, _, _, _, _, _, _, _, _), /* non-existent on D0 silicon */
+ BRCMSTB_PIN(11, _, _, _, _, _, _, _, _), /* non-existent on D0 silicon */
+ BRCMSTB_PIN(12, uart1, vc_uart2, vc_uart0, vc_spi0, usb_pwr, vc_uart2, vc_spi3, _),
+ BRCMSTB_PIN(13, bsc_m1, vc_uart0, uui, vc_spi0, arm_jtag, vc_uart2, vc_i2c3, _),
+ BRCMSTB_PIN(14, bsc_m1, aon_gpclk, vc_uart0, uui, vc_spi0, arm_jtag, vc_uart2, vc_i2c3),
+};
+
+static const struct brcmstb_pin_funcs bcm2712_d0_gpio_pin_funcs[] = {
+ BRCMSTB_PIN(1, vc_i2c0, usb_pwr, gpclk0, sd_card_e, vc_spi3, sr_edm_sense, vc_spi0, vc_uart0),
+ BRCMSTB_PIN(2, vc_i2c0, usb_pwr, gpclk1, sd_card_e, vc_spi3, clk_observe, vc_spi0, vc_uart0),
+ BRCMSTB_PIN(3, vc_i2c3, usb_vbus, gpclk2, sd_card_e, vc_spi3, vc_spi0, vc_uart0, _),
+ BRCMSTB_PIN(4, vc_i2c3, vc_pwm1, vc_spi3, sd_card_e, vc_spi3, vc_spi0, vc_uart0, _),
+ BRCMSTB_PIN(10, bsc_m3, vc_pwm1, vc_spi3, sd_card_e, vc_spi3, gpclk0, _, _),
+ BRCMSTB_PIN(11, bsc_m3, vc_spi3, clk_observe, sd_card_c, gpclk1, _, _, _),
+ BRCMSTB_PIN(12, spi_s, vc_spi3, sd_card_c, sd_card_d, _, _, _, _),
+ BRCMSTB_PIN(13, spi_s, vc_spi3, sd_card_c, sd_card_d, _, _, _, _),
+ BRCMSTB_PIN(14, spi_s, uui, arm_jtag, vc_pwm0, vc_i2c0, sd_card_d, _, _),
+ BRCMSTB_PIN(15, spi_s, uui, arm_jtag, vc_pwm0, vc_i2c0, gpclk0, _, _),
+ BRCMSTB_PIN(18, sd_card_f, vc_pwm1, _, _, _, _, _, _),
+ BRCMSTB_PIN(19, sd_card_f, usb_pwr, vc_pwm1, _, _, _, _, _),
+ BRCMSTB_PIN(20, vc_i2c3, uui, vc_uart0, arm_jtag, vc_uart2, _, _, _),
+ BRCMSTB_PIN(21, vc_i2c3, uui, vc_uart0, arm_jtag, vc_uart2, _, _, _),
+ BRCMSTB_PIN(22, sd_card_f, vc_uart0, vc_i2c3, _, _, _, _, _),
+ BRCMSTB_PIN(23, vc_uart0, vc_i2c3, _, _, _, _, _, _),
+ BRCMSTB_PIN(24, sd_card_b, vc_spi0, arm_jtag, uart0, usb_pwr, vc_uart2, vc_uart0, _),
+ BRCMSTB_PIN(25, sd_card_b, vc_spi0, arm_jtag, uart0, usb_pwr, vc_uart2, vc_uart0, _),
+ BRCMSTB_PIN(26, sd_card_b, vc_spi0, arm_jtag, uart0, usb_vbus, vc_uart2, vc_spi0, _),
+ BRCMSTB_PIN(27, sd_card_b, vc_spi0, arm_jtag, uart0, vc_uart2, vc_spi0, _, _),
+ BRCMSTB_PIN(28, sd_card_b, vc_spi0, arm_jtag, vc_i2c0, vc_spi0, _, _, _),
+ BRCMSTB_PIN(29, arm_jtag, vc_i2c0, vc_spi0, _, _, _, _, _),
+ BRCMSTB_PIN(30, sd2, gpclk0, vc_pwm0, _, _, _, _, _),
+ BRCMSTB_PIN(31, sd2, vc_spi3, vc_pwm0, _, _, _, _, _),
+ BRCMSTB_PIN(32, sd2, vc_spi3, vc_uart3, _, _, _, _, _),
+ BRCMSTB_PIN(33, sd2, vc_spi3, vc_uart3, _, _, _, _, _),
+ BRCMSTB_PIN(34, sd2, vc_spi3, vc_i2c5, _, _, _, _, _),
+ BRCMSTB_PIN(35, sd2, vc_spi3, vc_i2c5, _, _, _, _, _),
+};
+
+static const struct pinctrl_desc bcm2712_c0_pinctrl_desc = {
+ .name = "pinctrl-bcm2712",
+ .pins = bcm2712_c0_gpio_pins,
+ .npins = ARRAY_SIZE(bcm2712_c0_gpio_pins),
+};
+
+static const struct pinctrl_desc bcm2712_c0_aon_pinctrl_desc = {
+ .name = "aon-pinctrl-bcm2712",
+ .pins = bcm2712_c0_aon_gpio_pins,
+ .npins = ARRAY_SIZE(bcm2712_c0_aon_gpio_pins),
+};
+
+static const struct pinctrl_desc bcm2712_d0_pinctrl_desc = {
+ .name = "pinctrl-bcm2712",
+ .pins = bcm2712_d0_gpio_pins,
+ .npins = ARRAY_SIZE(bcm2712_d0_gpio_pins),
+};
+
+static const struct pinctrl_desc bcm2712_d0_aon_pinctrl_desc = {
+ .name = "aon-pinctrl-bcm2712",
+ .pins = bcm2712_d0_aon_gpio_pins,
+ .npins = ARRAY_SIZE(bcm2712_d0_aon_gpio_pins),
+};
+
+static const struct pinctrl_gpio_range bcm2712_c0_pinctrl_gpio_range = {
+ .name = "pinctrl-bcm2712",
+ .npins = ARRAY_SIZE(bcm2712_c0_gpio_pins),
+};
+
+static const struct pinctrl_gpio_range bcm2712_c0_aon_pinctrl_gpio_range = {
+ .name = "aon-pinctrl-bcm2712",
+ .npins = ARRAY_SIZE(bcm2712_c0_aon_gpio_pins),
+};
+
+static const struct pinctrl_gpio_range bcm2712_d0_pinctrl_gpio_range = {
+ .name = "pinctrl-bcm2712",
+ .npins = ARRAY_SIZE(bcm2712_d0_gpio_pins),
+};
+
+static const struct pinctrl_gpio_range bcm2712_d0_aon_pinctrl_gpio_range = {
+ .name = "aon-pinctrl-bcm2712",
+ .npins = ARRAY_SIZE(bcm2712_d0_aon_gpio_pins),
+};
+
+static const struct brcmstb_pdata bcm2712_c0_pdata = {
+ .pctl_desc = &bcm2712_c0_pinctrl_desc,
+ .gpio_range = &bcm2712_c0_pinctrl_gpio_range,
+ .pin_regs = bcm2712_c0_gpio_pin_regs,
+ .pin_funcs = bcm2712_c0_gpio_pin_funcs,
+ .func_count = func_count,
+ .func_gpio = func_gpio,
+ .func_names = bcm2712_func_names,
+};
+
+static const struct brcmstb_pdata bcm2712_c0_aon_pdata = {
+ .pctl_desc = &bcm2712_c0_aon_pinctrl_desc,
+ .gpio_range = &bcm2712_c0_aon_pinctrl_gpio_range,
+ .pin_regs = bcm2712_c0_aon_gpio_pin_regs,
+ .pin_funcs = bcm2712_c0_aon_gpio_pin_funcs,
+ .func_count = func_count,
+ .func_gpio = func_gpio,
+ .func_names = bcm2712_func_names,
+};
+
+static const struct brcmstb_pdata bcm2712_d0_pdata = {
+ .pctl_desc = &bcm2712_d0_pinctrl_desc,
+ .gpio_range = &bcm2712_d0_pinctrl_gpio_range,
+ .pin_regs = bcm2712_d0_gpio_pin_regs,
+ .pin_funcs = bcm2712_d0_gpio_pin_funcs,
+ .func_count = func_count,
+ .func_gpio = func_gpio,
+ .func_names = bcm2712_func_names,
+};
+
+static const struct brcmstb_pdata bcm2712_d0_aon_pdata = {
+ .pctl_desc = &bcm2712_d0_aon_pinctrl_desc,
+ .gpio_range = &bcm2712_d0_aon_pinctrl_gpio_range,
+ .pin_regs = bcm2712_d0_aon_gpio_pin_regs,
+ .pin_funcs = bcm2712_d0_aon_gpio_pin_funcs,
+ .func_count = func_count,
+ .func_gpio = func_gpio,
+ .func_names = bcm2712_func_names,
+};
+
+static int bcm2712_pinctrl_probe(struct platform_device *pdev)
+{
+ return brcmstb_pinctrl_probe(pdev);
+}
+
+static const struct of_device_id bcm2712_pinctrl_match[] = {
+ {
+ .compatible = "brcm,bcm2712c0-pinctrl",
+ .data = &bcm2712_c0_pdata
+ },
+ {
+ .compatible = "brcm,bcm2712c0-aon-pinctrl",
+ .data = &bcm2712_c0_aon_pdata
+ },
+
+ {
+ .compatible = "brcm,bcm2712d0-pinctrl",
+ .data = &bcm2712_d0_pdata
+ },
+ {
+ .compatible = "brcm,bcm2712d0-aon-pinctrl",
+ .data = &bcm2712_d0_aon_pdata
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, bcm2712_pinctrl_match);
+
+static struct platform_driver bcm2712_pinctrl_driver = {
+ .probe = bcm2712_pinctrl_probe,
+ .driver = {
+ .name = "pinctrl-bcm2712",
+ .of_match_table = bcm2712_pinctrl_match,
+ .suppress_bind_attrs = true,
+ },
+};
+module_platform_driver(bcm2712_pinctrl_driver);
+
+MODULE_AUTHOR("Phil Elwell");
+MODULE_AUTHOR("Jonathan Bell");
+MODULE_AUTHOR("Ivan T. Ivanov");
+MODULE_AUTHOR("Andrea della Porta");
+MODULE_DESCRIPTION("Broadcom BCM2712 pinctrl driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/bcm/pinctrl-brcmstb.c b/drivers/pinctrl/bcm/pinctrl-brcmstb.c
new file mode 100644
index 000000000000..f46b27155c3c
--- /dev/null
+++ b/drivers/pinctrl/bcm/pinctrl-brcmstb.c
@@ -0,0 +1,442 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for Broadcom brcmstb GPIO units (pinctrl only)
+ *
+ * Copyright (C) 2024-2025 Ivan T. Ivanov, Andrea della Porta
+ * Copyright (C) 2021-3 Raspberry Pi Ltd.
+ * Copyright (C) 2012 Chris Boot, Simon Arlott, Stephen Warren
+ *
+ * Based heavily on the BCM2835 GPIO & pinctrl driver, which was inspired by:
+ * pinctrl-nomadik.c, please see original file for copyright information
+ * pinctrl-tegra.c, please see original file for copyright information
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/cleanup.h>
+
+#include "pinctrl-brcmstb.h"
+
+#define BRCMSTB_PULL_NONE 0
+#define BRCMSTB_PULL_DOWN 1
+#define BRCMSTB_PULL_UP 2
+#define BRCMSTB_PULL_MASK 0x3
+
+#define BIT_TO_REG(b) (((b) >> 5) << 2)
+#define BIT_TO_SHIFT(b) ((b) & 0x1f)
+
+struct brcmstb_pinctrl {
+ struct device *dev;
+ void __iomem *base;
+ struct pinctrl_dev *pctl_dev;
+ struct pinctrl_desc pctl_desc;
+ const struct pin_regs *pin_regs;
+ const struct brcmstb_pin_funcs *pin_funcs;
+ const char * const *func_names;
+ unsigned int func_count;
+ unsigned int func_gpio;
+ const char *const *gpio_groups;
+ struct pinctrl_gpio_range gpio_range;
+ /* Protect FSEL registers */
+ spinlock_t fsel_lock;
+};
+
+static unsigned int brcmstb_pinctrl_fsel_get(struct brcmstb_pinctrl *pc,
+ unsigned int pin)
+{
+ u32 bit = pc->pin_regs[pin].mux_bit;
+ unsigned int func;
+ int fsel;
+ u32 val;
+
+ if (!bit)
+ return pc->func_gpio;
+
+ bit &= ~MUX_BIT_VALID;
+
+ val = readl(pc->base + BIT_TO_REG(bit));
+ fsel = (val >> BIT_TO_SHIFT(bit)) & pc->pin_funcs[pin].func_mask;
+ func = pc->pin_funcs[pin].funcs[fsel];
+
+ if (func >= pc->func_count)
+ func = fsel;
+
+ dev_dbg(pc->dev, "get %04x: %08x (%u => %s)\n",
+ BIT_TO_REG(bit), val, pin,
+ pc->func_names[func]);
+
+ return func;
+}
+
+static int brcmstb_pinctrl_fsel_set(struct brcmstb_pinctrl *pc,
+ unsigned int pin, unsigned int func)
+{
+ u32 bit = pc->pin_regs[pin].mux_bit, val, fsel_mask;
+ const u8 *pin_funcs;
+ int fsel;
+ int cur;
+ int i;
+
+ if (!bit || func >= pc->func_count)
+ return -EINVAL;
+
+ bit &= ~MUX_BIT_VALID;
+
+ fsel = pc->pin_funcs[pin].n_funcs + 1;
+ fsel_mask = pc->pin_funcs[pin].func_mask;
+
+ if (func >= fsel) {
+ /* Convert to an fsel number */
+ pin_funcs = pc->pin_funcs[pin].funcs;
+ for (i = 1; i < fsel; i++) {
+ if (pin_funcs[i - 1] == func) {
+ fsel = i;
+ break;
+ }
+ }
+ } else {
+ fsel = func;
+ }
+
+ if (fsel >= pc->pin_funcs[pin].n_funcs + 1)
+ return -EINVAL;
+
+ guard(spinlock_irqsave)(&pc->fsel_lock);
+
+ val = readl(pc->base + BIT_TO_REG(bit));
+ cur = (val >> BIT_TO_SHIFT(bit)) & fsel_mask;
+
+ dev_dbg(pc->dev, "read %04x: %08x (%u => %s)\n",
+ BIT_TO_REG(bit), val, pin,
+ pc->func_names[cur]);
+
+ if (cur != fsel) {
+ val &= ~(fsel_mask << BIT_TO_SHIFT(bit));
+ val |= fsel << BIT_TO_SHIFT(bit);
+
+ dev_dbg(pc->dev, "write %04x: %08x (%u <= %s)\n",
+ BIT_TO_REG(bit), val, pin,
+ pc->func_names[fsel]);
+ writel(val, pc->base + BIT_TO_REG(bit));
+ }
+
+ return 0;
+}
+
+static int brcmstb_pctl_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ struct brcmstb_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+
+ return pc->pctl_desc.npins;
+}
+
+static const char *brcmstb_pctl_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ struct brcmstb_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+
+ return pc->gpio_groups[selector];
+}
+
+static int brcmstb_pctl_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const unsigned int **pins,
+ unsigned int *num_pins)
+{
+ struct brcmstb_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+
+ *pins = &pc->pctl_desc.pins[selector].number;
+ *num_pins = 1;
+
+ return 0;
+}
+
+static void brcmstb_pctl_pin_dbg_show(struct pinctrl_dev *pctldev,
+ struct seq_file *s, unsigned int offset)
+{
+ struct brcmstb_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+ unsigned int fsel = brcmstb_pinctrl_fsel_get(pc, offset);
+ const char *fname = pc->func_names[fsel];
+
+ seq_printf(s, "function %s", fname);
+}
+
+static void brcmstb_pctl_dt_free_map(struct pinctrl_dev *pctldev,
+ struct pinctrl_map *maps,
+ unsigned int num_maps)
+{
+ int i;
+
+ for (i = 0; i < num_maps; i++)
+ if (maps[i].type == PIN_MAP_TYPE_CONFIGS_PIN)
+ kfree(maps[i].data.configs.configs);
+
+ kfree(maps);
+}
+
+static const struct pinctrl_ops brcmstb_pctl_ops = {
+ .get_groups_count = brcmstb_pctl_get_groups_count,
+ .get_group_name = brcmstb_pctl_get_group_name,
+ .get_group_pins = brcmstb_pctl_get_group_pins,
+ .pin_dbg_show = brcmstb_pctl_pin_dbg_show,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_all,
+ .dt_free_map = brcmstb_pctl_dt_free_map,
+};
+
+static int brcmstb_pmx_free(struct pinctrl_dev *pctldev, unsigned int offset)
+{
+ struct brcmstb_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+
+ /* disable by setting to GPIO */
+ return brcmstb_pinctrl_fsel_set(pc, offset, pc->func_gpio);
+}
+
+static int brcmstb_pmx_get_functions_count(struct pinctrl_dev *pctldev)
+{
+ struct brcmstb_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+
+ return pc->func_count;
+}
+
+static const char *brcmstb_pmx_get_function_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ struct brcmstb_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+
+ return (selector < pc->func_count) ? pc->func_names[selector] : NULL;
+}
+
+static int brcmstb_pmx_get_function_groups(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const char *const **groups,
+ unsigned *const num_groups)
+{
+ struct brcmstb_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+
+ *groups = pc->gpio_groups;
+ *num_groups = pc->pctl_desc.npins;
+
+ return 0;
+}
+
+static int brcmstb_pmx_set(struct pinctrl_dev *pctldev,
+ unsigned int func_selector,
+ unsigned int group_selector)
+{
+ struct brcmstb_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+ const struct pinctrl_desc *pctldesc = &pc->pctl_desc;
+ const struct pinctrl_pin_desc *pindesc;
+
+ if (group_selector >= pctldesc->npins)
+ return -EINVAL;
+
+ pindesc = &pctldesc->pins[group_selector];
+ return brcmstb_pinctrl_fsel_set(pc, pindesc->number, func_selector);
+}
+
+static int brcmstb_pmx_gpio_request_enable(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int pin)
+{
+ struct brcmstb_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+
+ return brcmstb_pinctrl_fsel_set(pc, pin, pc->func_gpio);
+}
+
+static void brcmstb_pmx_gpio_disable_free(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int offset)
+{
+ struct brcmstb_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+
+ /* disable by setting to GPIO */
+ (void)brcmstb_pinctrl_fsel_set(pc, offset, pc->func_gpio);
+}
+
+static bool brcmstb_pmx_function_is_gpio(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ struct brcmstb_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+
+ return pc->func_gpio == selector;
+}
+
+static const struct pinmux_ops brcmstb_pmx_ops = {
+ .free = brcmstb_pmx_free,
+ .get_functions_count = brcmstb_pmx_get_functions_count,
+ .get_function_name = brcmstb_pmx_get_function_name,
+ .get_function_groups = brcmstb_pmx_get_function_groups,
+ .set_mux = brcmstb_pmx_set,
+ .gpio_request_enable = brcmstb_pmx_gpio_request_enable,
+ .gpio_disable_free = brcmstb_pmx_gpio_disable_free,
+ .function_is_gpio = brcmstb_pmx_function_is_gpio,
+ .strict = true,
+};
+
+static unsigned int brcmstb_pull_config_get(struct brcmstb_pinctrl *pc,
+ unsigned int pin)
+{
+ u32 bit = pc->pin_regs[pin].pad_bit, val;
+
+ if (bit == PAD_BIT_INVALID)
+ return BRCMSTB_PULL_NONE;
+
+ val = readl(pc->base + BIT_TO_REG(bit));
+ return (val >> BIT_TO_SHIFT(bit)) & BRCMSTB_PULL_MASK;
+}
+
+static int brcmstb_pull_config_set(struct brcmstb_pinctrl *pc,
+ unsigned int pin, unsigned int arg)
+{
+ u32 bit = pc->pin_regs[pin].pad_bit, val;
+
+ if (bit == PAD_BIT_INVALID) {
+ dev_warn(pc->dev, "Can't set pulls for %s\n",
+ pc->gpio_groups[pin]);
+ return -EINVAL;
+ }
+
+ guard(spinlock_irqsave)(&pc->fsel_lock);
+
+ val = readl(pc->base + BIT_TO_REG(bit));
+ val &= ~(BRCMSTB_PULL_MASK << BIT_TO_SHIFT(bit));
+ val |= (arg << BIT_TO_SHIFT(bit));
+ writel(val, pc->base + BIT_TO_REG(bit));
+
+ return 0;
+}
+
+static int brcmstb_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *config)
+{
+ struct brcmstb_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+ enum pin_config_param param = pinconf_to_config_param(*config);
+ u32 arg;
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ arg = (brcmstb_pull_config_get(pc, pin) == BRCMSTB_PULL_NONE);
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ arg = (brcmstb_pull_config_get(pc, pin) == BRCMSTB_PULL_DOWN);
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ arg = (brcmstb_pull_config_get(pc, pin) == BRCMSTB_PULL_UP);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ *config = pinconf_to_config_packed(param, arg);
+
+ return 0;
+}
+
+static int brcmstb_pinconf_set(struct pinctrl_dev *pctldev,
+ unsigned int pin, unsigned long *configs,
+ unsigned int num_configs)
+{
+ struct brcmstb_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
+ int ret = 0;
+ u32 param;
+ int i;
+
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ ret = brcmstb_pull_config_set(pc, pin, BRCMSTB_PULL_NONE);
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ ret = brcmstb_pull_config_set(pc, pin, BRCMSTB_PULL_DOWN);
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ ret = brcmstb_pull_config_set(pc, pin, BRCMSTB_PULL_UP);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+ }
+
+ return ret;
+}
+
+static const struct pinconf_ops brcmstb_pinconf_ops = {
+ .is_generic = true,
+ .pin_config_get = brcmstb_pinconf_get,
+ .pin_config_set = brcmstb_pinconf_set,
+};
+
+int brcmstb_pinctrl_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ const struct brcmstb_pdata *pdata;
+ struct brcmstb_pinctrl *pc;
+ const char **names;
+ int num_pins, i;
+
+ pdata = of_device_get_match_data(dev);
+
+ pc = devm_kzalloc(dev, sizeof(*pc), GFP_KERNEL);
+ if (!pc)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, pc);
+ pc->dev = dev;
+ spin_lock_init(&pc->fsel_lock);
+
+ pc->base = devm_of_iomap(dev, np, 0, NULL);
+ if (IS_ERR(pc->base))
+ return dev_err_probe(&pdev->dev, PTR_ERR(pc->base),
+ "Could not get IO memory\n");
+
+ pc->pctl_desc = *pdata->pctl_desc;
+ pc->pctl_desc.pctlops = &brcmstb_pctl_ops;
+ pc->pctl_desc.pmxops = &brcmstb_pmx_ops;
+ pc->pctl_desc.confops = &brcmstb_pinconf_ops;
+ pc->pctl_desc.owner = THIS_MODULE;
+ num_pins = pc->pctl_desc.npins;
+ names = devm_kmalloc_array(dev, num_pins, sizeof(const char *),
+ GFP_KERNEL);
+ if (!names)
+ return -ENOMEM;
+
+ for (i = 0; i < num_pins; i++)
+ names[i] = pc->pctl_desc.pins[i].name;
+
+ pc->gpio_groups = names;
+ pc->pin_regs = pdata->pin_regs;
+ pc->pin_funcs = pdata->pin_funcs;
+ pc->func_count = pdata->func_count;
+ pc->func_names = pdata->func_names;
+
+ pc->pctl_dev = devm_pinctrl_register(dev, &pc->pctl_desc, pc);
+ if (IS_ERR(pc->pctl_dev))
+ return dev_err_probe(&pdev->dev, PTR_ERR(pc->pctl_dev),
+ "Failed to register pinctrl device\n");
+
+ pc->gpio_range = *pdata->gpio_range;
+ pinctrl_add_gpio_range(pc->pctl_dev, &pc->gpio_range);
+
+ return 0;
+}
+EXPORT_SYMBOL(brcmstb_pinctrl_probe);
+
+MODULE_AUTHOR("Phil Elwell");
+MODULE_AUTHOR("Jonathan Bell");
+MODULE_AUTHOR("Ivan T. Ivanov");
+MODULE_AUTHOR("Andrea della Porta");
+MODULE_DESCRIPTION("Broadcom brcmstb pinctrl driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/bcm/pinctrl-brcmstb.h b/drivers/pinctrl/bcm/pinctrl-brcmstb.h
new file mode 100644
index 000000000000..c3459103e056
--- /dev/null
+++ b/drivers/pinctrl/bcm/pinctrl-brcmstb.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Header for Broadcom brcmstb GPIO based drivers
+ *
+ * Copyright (C) 2024-2025 Ivan T. Ivanov, Andrea della Porta
+ * Copyright (C) 2021-3 Raspberry Pi Ltd.
+ * Copyright (C) 2012 Chris Boot, Simon Arlott, Stephen Warren
+ *
+ * Based heavily on the BCM2835 GPIO & pinctrl driver, which was inspired by:
+ * pinctrl-nomadik.c, please see original file for copyright information
+ * pinctrl-tegra.c, please see original file for copyright information
+ */
+
+#ifndef __PINCTRL_BRCMSTB_H__
+#define __PINCTRL_BRCMSTB_H__
+
+#include <linux/types.h>
+#include <linux/platform_device.h>
+
+#define BRCMSTB_FUNC(f) \
+ [func_##f] = #f
+
+#define MUX_BIT_VALID 0x8000
+#define PAD_BIT_INVALID 0xffff
+
+#define MUX_BIT(muxreg, muxshift) \
+ (MUX_BIT_VALID + ((muxreg) << 5) + ((muxshift) << 2))
+#define PAD_BIT(padreg, padshift) \
+ (((padreg) << 5) + ((padshift) << 1))
+
+#define GPIO_REGS(n, muxreg, muxshift, padreg, padshift) \
+ [n] = { MUX_BIT(muxreg, muxshift), PAD_BIT(padreg, padshift) }
+
+#define EMMC_REGS(n, padreg, padshift) \
+ [n] = { 0, PAD_BIT(padreg, padshift) }
+
+#define AON_GPIO_REGS(n, muxreg, muxshift, padreg, padshift) \
+ GPIO_REGS(n, muxreg, muxshift, padreg, padshift)
+
+#define AON_SGPIO_REGS(n, muxreg, muxshift) \
+ [(n) + 32] = { MUX_BIT(muxreg, muxshift), PAD_BIT_INVALID }
+
+#define GPIO_PIN(n) PINCTRL_PIN(n, "gpio" #n)
+/**
+ * AON pins are in the Always-On power domain. SGPIOs are also 'Safe'
+ * being 5V tolerant (necessary for the HDMI I2C pins), and can be driven
+ * while the power is off.
+ */
+#define AON_GPIO_PIN(n) PINCTRL_PIN(n, "aon_gpio" #n)
+#define AON_SGPIO_PIN(n) PINCTRL_PIN((n) + 32, "aon_sgpio" #n)
+
+struct pin_regs {
+ u16 mux_bit;
+ u16 pad_bit;
+};
+
+/**
+ * struct brcmstb_pin_funcs - pins provide their primary/alternate
+ * functions in this struct
+ * @func_mask: mask representing valid bits of the function selector
+ * in the registers
+ * @funcs: array of function identifiers
+ * @n_funcs: number of identifiers of the @funcs array above
+ */
+struct brcmstb_pin_funcs {
+ const u32 func_mask;
+ const u8 *funcs;
+ const unsigned int n_funcs;
+};
+
+/**
+ * struct brcmstb_pdata - specific data for a pinctrl chip implementation
+ * @pctl_desc: pin controller descriptor for this implementation
+ * @gpio_range: range of GPIOs served by this controller
+ * @pin_regs: array of register descriptors for each pin
+ * @pin_funcs: array of all possible assignable function for each pin
+ * @func_count: total number of functions
+ * @func_gpio: which function number is GPIO (usually 0)
+ * @func_names: an array listing all function names
+ */
+struct brcmstb_pdata {
+ const struct pinctrl_desc *pctl_desc;
+ const struct pinctrl_gpio_range *gpio_range;
+ const struct pin_regs *pin_regs;
+ const struct brcmstb_pin_funcs *pin_funcs;
+ const unsigned int func_count;
+ const unsigned int func_gpio;
+ const char * const *func_names;
+};
+
+int brcmstb_pinctrl_probe(struct platform_device *pdev);
+
+#endif
diff --git a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
index 1d08b8d4cdd7..8c353676f2af 100644
--- a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
@@ -865,7 +865,7 @@ static int iproc_gpio_probe(struct platform_device *pdev)
gc->direction_input = iproc_gpio_direction_input;
gc->direction_output = iproc_gpio_direction_output;
gc->get_direction = iproc_gpio_get_direction;
- gc->set_rv = iproc_gpio_set;
+ gc->set = iproc_gpio_set;
gc->get = iproc_gpio_get;
chip->pinmux_is_supported = of_property_read_bool(dev->of_node,
diff --git a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
index b08f8480ddc6..b425ecacd1b0 100644
--- a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
@@ -656,7 +656,7 @@ static int nsp_gpio_probe(struct platform_device *pdev)
gc->direction_input = nsp_gpio_direction_input;
gc->direction_output = nsp_gpio_direction_output;
gc->get_direction = nsp_gpio_get_direction;
- gc->set_rv = nsp_gpio_set;
+ gc->set = nsp_gpio_set;
gc->get = nsp_gpio_get;
/* optional GPIO interrupt support */
diff --git a/drivers/pinctrl/cirrus/pinctrl-cs42l43.c b/drivers/pinctrl/cirrus/pinctrl-cs42l43.c
index 4e47710eb3d5..68abb6d6cecd 100644
--- a/drivers/pinctrl/cirrus/pinctrl-cs42l43.c
+++ b/drivers/pinctrl/cirrus/pinctrl-cs42l43.c
@@ -555,7 +555,7 @@ static int cs42l43_pin_probe(struct platform_device *pdev)
priv->gpio_chip.direction_output = cs42l43_gpio_direction_out;
priv->gpio_chip.add_pin_ranges = cs42l43_gpio_add_pin_ranges;
priv->gpio_chip.get = cs42l43_gpio_get;
- priv->gpio_chip.set_rv = cs42l43_gpio_set;
+ priv->gpio_chip.set = cs42l43_gpio_set;
priv->gpio_chip.label = dev_name(priv->dev);
priv->gpio_chip.parent = priv->dev;
priv->gpio_chip.can_sleep = true;
diff --git a/drivers/pinctrl/cirrus/pinctrl-lochnagar.c b/drivers/pinctrl/cirrus/pinctrl-lochnagar.c
index dcc0a2f3c7dd..ca6ae566082b 100644
--- a/drivers/pinctrl/cirrus/pinctrl-lochnagar.c
+++ b/drivers/pinctrl/cirrus/pinctrl-lochnagar.c
@@ -1161,7 +1161,7 @@ static int lochnagar_pin_probe(struct platform_device *pdev)
priv->gpio_chip.request = gpiochip_generic_request;
priv->gpio_chip.free = gpiochip_generic_free;
priv->gpio_chip.direction_output = lochnagar_gpio_direction_out;
- priv->gpio_chip.set_rv = lochnagar_gpio_set;
+ priv->gpio_chip.set = lochnagar_gpio_set;
priv->gpio_chip.can_sleep = true;
priv->gpio_chip.parent = dev;
priv->gpio_chip.base = -1;
diff --git a/drivers/pinctrl/cirrus/pinctrl-madera-core.c b/drivers/pinctrl/cirrus/pinctrl-madera-core.c
index d19ef13224cc..1d9481b17091 100644
--- a/drivers/pinctrl/cirrus/pinctrl-madera-core.c
+++ b/drivers/pinctrl/cirrus/pinctrl-madera-core.c
@@ -804,7 +804,7 @@ static int madera_pin_conf_get(struct pinctrl_dev *pctldev, unsigned int pin,
if (conf[0] & MADERA_GP1_IP_CFG_MASK)
result = 1;
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
if ((conf[1] & MADERA_GP1_DIR_MASK) &&
(conf[0] & MADERA_GP1_LVL_MASK))
result = 1;
@@ -902,7 +902,7 @@ static int madera_pin_conf_set(struct pinctrl_dev *pctldev, unsigned int pin,
mask[1] |= MADERA_GP1_DIR_MASK;
conf[1] |= MADERA_GP1_DIR;
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
val = pinconf_to_config_argument(*configs);
mask[0] |= MADERA_GP1_LVL_MASK;
if (val)
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 73b78d6eac67..c5dbf4e9db84 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -1656,6 +1656,19 @@ int pinctrl_pm_select_default_state(struct device *dev)
EXPORT_SYMBOL_GPL(pinctrl_pm_select_default_state);
/**
+ * pinctrl_pm_select_init_state() - select init pinctrl state for PM
+ * @dev: device to select init state for
+ */
+int pinctrl_pm_select_init_state(struct device *dev)
+{
+ if (!dev->pins)
+ return 0;
+
+ return pinctrl_select_bound_state(dev, dev->pins->init_state);
+}
+EXPORT_SYMBOL_GPL(pinctrl_pm_select_init_state);
+
+/**
* pinctrl_pm_select_sleep_state() - select sleep pinctrl state for PM
* @dev: device to select sleep state for
*/
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
index 18de31328540..731c58ad43ee 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx.c
@@ -245,7 +245,7 @@ static int imx_pmx_set(struct pinctrl_dev *pctldev, unsigned selector,
{
struct imx_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
const struct imx_pinctrl_soc_info *info = ipctl->info;
- struct function_desc *func;
+ const struct function_desc *func;
struct group_desc *grp;
struct imx_pin *pin;
unsigned int npins;
@@ -266,7 +266,7 @@ static int imx_pmx_set(struct pinctrl_dev *pctldev, unsigned selector,
npins = grp->grp.npins;
dev_dbg(ipctl->dev, "enable function %s group %s\n",
- func->func.name, grp->grp.name);
+ func->func->name, grp->grp.name);
for (i = 0; i < npins; i++) {
/*
@@ -580,33 +580,38 @@ static int imx_pinctrl_parse_functions(struct device_node *np,
u32 index)
{
struct pinctrl_dev *pctl = ipctl->pctl;
- struct function_desc *func;
+ struct pinfunction *func;
struct group_desc *grp;
const char **group_names;
+ int ret;
u32 i;
dev_dbg(pctl->dev, "parse function(%d): %pOFn\n", index, np);
- func = pinmux_generic_get_function(pctl, index);
+ func = devm_kzalloc(ipctl->dev, sizeof(*func), GFP_KERNEL);
if (!func)
- return -EINVAL;
+ return -ENOMEM;
/* Initialise function */
- func->func.name = np->name;
- func->func.ngroups = of_get_child_count(np);
- if (func->func.ngroups == 0) {
+ func->name = np->name;
+ func->ngroups = of_get_child_count(np);
+ if (func->ngroups == 0) {
dev_info(ipctl->dev, "no groups defined in %pOF\n", np);
return -EINVAL;
}
- group_names = devm_kcalloc(ipctl->dev, func->func.ngroups,
- sizeof(*func->func.groups), GFP_KERNEL);
+ group_names = devm_kcalloc(ipctl->dev, func->ngroups,
+ sizeof(*func->groups), GFP_KERNEL);
if (!group_names)
return -ENOMEM;
i = 0;
for_each_child_of_node_scoped(np, child)
group_names[i++] = child->name;
- func->func.groups = group_names;
+ func->groups = group_names;
+
+ ret = pinmux_generic_add_pinfunction(pctl, func, NULL);
+ if (ret < 0)
+ return ret;
i = 0;
for_each_child_of_node_scoped(np, child) {
@@ -615,6 +620,10 @@ static int imx_pinctrl_parse_functions(struct device_node *np,
return -ENOMEM;
mutex_lock(&ipctl->mutex);
+ /*
+ * FIXME: This should use pinctrl_generic_add_group() and not
+ * access the private radix tree directly.
+ */
radix_tree_insert(&pctl->pin_group_tree,
ipctl->group_index++, grp);
mutex_unlock(&ipctl->mutex);
@@ -669,20 +678,6 @@ static int imx_pinctrl_probe_dt(struct platform_device *pdev,
}
}
- for (i = 0; i < nfuncs; i++) {
- struct function_desc *function;
-
- function = devm_kzalloc(&pdev->dev, sizeof(*function),
- GFP_KERNEL);
- if (!function)
- return -ENOMEM;
-
- mutex_lock(&ipctl->mutex);
- radix_tree_insert(&pctl->pin_function_tree, i, function);
- mutex_unlock(&ipctl->mutex);
- }
- pctl->num_functions = nfuncs;
-
ipctl->group_index = 0;
if (flat_funcs) {
pctl->num_groups = of_get_child_count(np);
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index 6eb649f1ffd6..5fd107a00ef8 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -1231,7 +1231,7 @@ static const struct gpio_chip byt_gpio_chip = {
.direction_input = byt_gpio_direction_input,
.direction_output = byt_gpio_direction_output,
.get = byt_gpio_get,
- .set_rv = byt_gpio_set,
+ .set = byt_gpio_set,
.set_config = gpiochip_generic_config,
.dbg_show = byt_gpio_dbg_show,
};
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 769e8c4102a5..f81f7929cd3b 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1168,7 +1168,7 @@ static const struct gpio_chip chv_gpio_chip = {
.direction_input = chv_gpio_direction_input,
.direction_output = chv_gpio_direction_output,
.get = chv_gpio_get,
- .set_rv = chv_gpio_set,
+ .set = chv_gpio_set,
};
static void chv_gpio_irq_ack(struct irq_data *d)
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index f2ff71e5ea6f..d68cef4ec52a 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -1114,7 +1114,7 @@ static const struct gpio_chip intel_gpio_chip = {
.direction_input = intel_gpio_direction_input,
.direction_output = intel_gpio_direction_output,
.get = intel_gpio_get,
- .set_rv = intel_gpio_set,
+ .set = intel_gpio_set,
.set_config = gpiochip_generic_config,
};
diff --git a/drivers/pinctrl/intel/pinctrl-lynxpoint.c b/drivers/pinctrl/intel/pinctrl-lynxpoint.c
index 5d4a5dd493d1..3fb628309fb2 100644
--- a/drivers/pinctrl/intel/pinctrl-lynxpoint.c
+++ b/drivers/pinctrl/intel/pinctrl-lynxpoint.c
@@ -777,7 +777,7 @@ static int lp_gpio_probe(struct platform_device *pdev)
gc->direction_input = lp_gpio_direction_input;
gc->direction_output = lp_gpio_direction_output;
gc->get = lp_gpio_get;
- gc->set_rv = lp_gpio_set;
+ gc->set = lp_gpio_set;
gc->set_config = gpiochip_generic_config;
gc->get_direction = lp_gpio_get_direction;
gc->base = -1;
diff --git a/drivers/pinctrl/mediatek/pinctrl-airoha.c b/drivers/pinctrl/mediatek/pinctrl-airoha.c
index 1737b88530c3..f1cf2578fe42 100644
--- a/drivers/pinctrl/mediatek/pinctrl-airoha.c
+++ b/drivers/pinctrl/mediatek/pinctrl-airoha.c
@@ -35,13 +35,8 @@
#define PINCTRL_FUNC_DESC(id) \
{ \
- .desc = { \
- .func = { \
- .name = #id, \
- .groups = id##_groups, \
- .ngroups = ARRAY_SIZE(id##_groups), \
- } \
- }, \
+ .desc = PINCTRL_PINFUNCTION(#id, id##_groups, \
+ ARRAY_SIZE(id##_groups)), \
.groups = id##_func_group, \
.group_size = ARRAY_SIZE(id##_func_group), \
}
@@ -108,6 +103,9 @@
#define JTAG_UDI_EN_MASK BIT(4)
#define JTAG_DFD_EN_MASK BIT(3)
+#define REG_FORCE_GPIO_EN 0x0228
+#define FORCE_GPIO_EN(n) BIT(n)
+
/* LED MAP */
#define REG_LAN_LED0_MAPPING 0x027c
#define REG_LAN_LED1_MAPPING 0x0280
@@ -334,7 +332,7 @@ struct airoha_pinctrl_func_group {
};
struct airoha_pinctrl_func {
- const struct function_desc desc;
+ const struct pinfunction desc;
const struct airoha_pinctrl_func_group *groups;
u8 group_size;
};
@@ -719,16 +717,16 @@ static const struct airoha_pinctrl_func_group mdio_func_group[] = {
.name = "mdio",
.regmap[0] = {
AIROHA_FUNC_MUX,
- REG_GPIO_PON_MODE,
- GPIO_SGMII_MDIO_MODE_MASK,
- GPIO_SGMII_MDIO_MODE_MASK
- },
- .regmap[1] = {
- AIROHA_FUNC_MUX,
REG_GPIO_2ND_I2C_MODE,
GPIO_MDC_IO_MASTER_MODE_MODE,
GPIO_MDC_IO_MASTER_MODE_MODE
},
+ .regmap[1] = {
+ AIROHA_FUNC_MUX,
+ REG_FORCE_GPIO_EN,
+ FORCE_GPIO_EN(1) | FORCE_GPIO_EN(2),
+ FORCE_GPIO_EN(1) | FORCE_GPIO_EN(2)
+ },
.regmap_size = 2,
},
};
@@ -1752,8 +1750,8 @@ static const struct airoha_pinctrl_func_group phy1_led1_func_group[] = {
.regmap[0] = {
AIROHA_FUNC_MUX,
REG_GPIO_2ND_I2C_MODE,
- GPIO_LAN3_LED0_MODE_MASK,
- GPIO_LAN3_LED0_MODE_MASK
+ GPIO_LAN3_LED1_MODE_MASK,
+ GPIO_LAN3_LED1_MODE_MASK
},
.regmap[1] = {
AIROHA_FUNC_MUX,
@@ -1816,8 +1814,8 @@ static const struct airoha_pinctrl_func_group phy2_led1_func_group[] = {
.regmap[0] = {
AIROHA_FUNC_MUX,
REG_GPIO_2ND_I2C_MODE,
- GPIO_LAN3_LED0_MODE_MASK,
- GPIO_LAN3_LED0_MODE_MASK
+ GPIO_LAN3_LED1_MODE_MASK,
+ GPIO_LAN3_LED1_MODE_MASK
},
.regmap[1] = {
AIROHA_FUNC_MUX,
@@ -1880,8 +1878,8 @@ static const struct airoha_pinctrl_func_group phy3_led1_func_group[] = {
.regmap[0] = {
AIROHA_FUNC_MUX,
REG_GPIO_2ND_I2C_MODE,
- GPIO_LAN3_LED0_MODE_MASK,
- GPIO_LAN3_LED0_MODE_MASK
+ GPIO_LAN3_LED1_MODE_MASK,
+ GPIO_LAN3_LED1_MODE_MASK
},
.regmap[1] = {
AIROHA_FUNC_MUX,
@@ -1944,8 +1942,8 @@ static const struct airoha_pinctrl_func_group phy4_led1_func_group[] = {
.regmap[0] = {
AIROHA_FUNC_MUX,
REG_GPIO_2ND_I2C_MODE,
- GPIO_LAN3_LED0_MODE_MASK,
- GPIO_LAN3_LED0_MODE_MASK
+ GPIO_LAN3_LED1_MODE_MASK,
+ GPIO_LAN3_LED1_MODE_MASK
},
.regmap[1] = {
AIROHA_FUNC_MUX,
@@ -2418,7 +2416,7 @@ static int airoha_pinctrl_add_gpiochip(struct airoha_pinctrl *pinctrl,
gc->free = gpiochip_generic_free;
gc->direction_input = pinctrl_gpio_direction_input;
gc->direction_output = airoha_gpio_direction_output;
- gc->set_rv = airoha_gpio_set;
+ gc->set = airoha_gpio_set;
gc->get = airoha_gpio_get;
gc->base = -1;
gc->ngpio = AIROHA_NUM_PINS;
@@ -2448,7 +2446,7 @@ static int airoha_pinmux_set_mux(struct pinctrl_dev *pctrl_dev,
{
struct airoha_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrl_dev);
const struct airoha_pinctrl_func *func;
- struct function_desc *desc;
+ const struct function_desc *desc;
struct group_desc *grp;
int i;
@@ -2461,7 +2459,7 @@ static int airoha_pinmux_set_mux(struct pinctrl_dev *pctrl_dev,
return -EINVAL;
dev_dbg(pctrl_dev->dev, "enable function %s group %s\n",
- desc->func.name, grp->grp.name);
+ desc->func->name, grp->grp.name);
func = desc->data;
for (i = 0; i < func->group_size; i++) {
@@ -2696,7 +2694,7 @@ static int airoha_pinconf_get(struct pinctrl_dev *pctrl_dev,
arg = 1;
break;
default:
- return -EOPNOTSUPP;
+ return -ENOTSUPP;
}
*config = pinconf_to_config_packed(param, arg);
@@ -2770,7 +2768,7 @@ static int airoha_pinconf_set(struct pinctrl_dev *pctrl_dev,
break;
case PIN_CONFIG_OUTPUT_ENABLE:
case PIN_CONFIG_INPUT_ENABLE:
- case PIN_CONFIG_OUTPUT: {
+ case PIN_CONFIG_LEVEL: {
bool input = param == PIN_CONFIG_INPUT_ENABLE;
int err;
@@ -2779,7 +2777,7 @@ static int airoha_pinconf_set(struct pinctrl_dev *pctrl_dev,
if (err)
return err;
- if (param == PIN_CONFIG_OUTPUT) {
+ if (param == PIN_CONFIG_LEVEL) {
err = airoha_pinconf_set_pin_value(pctrl_dev,
pin, !!arg);
if (err)
@@ -2788,7 +2786,7 @@ static int airoha_pinconf_set(struct pinctrl_dev *pctrl_dev,
break;
}
default:
- return -EOPNOTSUPP;
+ return -ENOTSUPP;
}
}
@@ -2805,10 +2803,10 @@ static int airoha_pinconf_group_get(struct pinctrl_dev *pctrl_dev,
if (airoha_pinconf_get(pctrl_dev,
airoha_pinctrl_groups[group].pins[i],
config))
- return -EOPNOTSUPP;
+ return -ENOTSUPP;
if (i && cur_config != *config)
- return -EOPNOTSUPP;
+ return -ENOTSUPP;
cur_config = *config;
}
@@ -2908,11 +2906,11 @@ static int airoha_pinctrl_probe(struct platform_device *pdev)
func = &airoha_pinctrl_funcs[i];
err = pinmux_generic_add_pinfunction(pinctrl->ctrl,
- &func->desc.func,
+ &func->desc,
(void *)func);
if (err < 0) {
dev_err(dev, "Failed to register function %s\n",
- func->desc.func.name);
+ func->desc.name);
return err;
}
}
diff --git a/drivers/pinctrl/mediatek/pinctrl-moore.c b/drivers/pinctrl/mediatek/pinctrl-moore.c
index ba0d6f880c6e..70f608347a5f 100644
--- a/drivers/pinctrl/mediatek/pinctrl-moore.c
+++ b/drivers/pinctrl/mediatek/pinctrl-moore.c
@@ -43,7 +43,7 @@ static int mtk_pinmux_set_mux(struct pinctrl_dev *pctldev,
unsigned int selector, unsigned int group)
{
struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev);
- struct function_desc *func;
+ const struct function_desc *func;
struct group_desc *grp;
int i, err;
@@ -56,7 +56,7 @@ static int mtk_pinmux_set_mux(struct pinctrl_dev *pctldev,
return -EINVAL;
dev_dbg(pctldev->dev, "enable function %s group %s\n",
- func->func.name, grp->grp.name);
+ func->func->name, grp->grp.name);
for (i = 0; i < grp->grp.npins; i++) {
const struct mtk_pin_desc *desc;
@@ -332,7 +332,7 @@ static int mtk_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
goto err;
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DIR,
MTK_OUTPUT);
if (err)
@@ -569,7 +569,7 @@ static int mtk_build_gpiochip(struct mtk_pinctrl *hw)
chip->direction_input = pinctrl_gpio_direction_input;
chip->direction_output = mtk_gpio_direction_output;
chip->get = mtk_gpio_get;
- chip->set_rv = mtk_gpio_set;
+ chip->set = mtk_gpio_set;
chip->to_irq = mtk_gpio_to_irq;
chip->set_config = mtk_gpio_set_config;
chip->base = -1;
@@ -622,11 +622,9 @@ static int mtk_build_functions(struct mtk_pinctrl *hw)
int i, err;
for (i = 0; i < hw->soc->nfuncs ; i++) {
- const struct function_desc *function = hw->soc->funcs + i;
- const struct pinfunction *func = &function->func;
+ const struct pinfunction *func = hw->soc->funcs + i;
- err = pinmux_generic_add_pinfunction(hw->pctrl, func,
- function->data);
+ err = pinmux_generic_add_pinfunction(hw->pctrl, func, NULL);
if (err < 0) {
dev_err(hw->dev, "Failed to register function %s\n",
func->name);
diff --git a/drivers/pinctrl/mediatek/pinctrl-moore.h b/drivers/pinctrl/mediatek/pinctrl-moore.h
index 229d19561e22..fe1f087cacd0 100644
--- a/drivers/pinctrl/mediatek/pinctrl-moore.h
+++ b/drivers/pinctrl/mediatek/pinctrl-moore.h
@@ -43,11 +43,8 @@
.data = id##_funcs, \
}
-#define PINCTRL_PIN_FUNCTION(_name_, id) \
- { \
- .func = PINCTRL_PINFUNCTION(_name_, id##_groups, ARRAY_SIZE(id##_groups)), \
- .data = NULL, \
- }
+#define PINCTRL_PIN_FUNCTION(_name_, id) \
+ PINCTRL_PINFUNCTION(_name_, id##_groups, ARRAY_SIZE(id##_groups))
int mtk_moore_pinctrl_probe(struct platform_device *pdev,
const struct mtk_pin_soc *soc);
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt7622.c b/drivers/pinctrl/mediatek/pinctrl-mt7622.c
index 2dc101991066..d5777889448a 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt7622.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt7622.c
@@ -822,7 +822,7 @@ static const char *mt7622_uart_groups[] = { "uart0_0_tx_rx",
"uart4_2_rts_cts",};
static const char *mt7622_wdt_groups[] = { "watchdog", };
-static const struct function_desc mt7622_functions[] = {
+static const struct pinfunction mt7622_functions[] = {
PINCTRL_PIN_FUNCTION("antsel", mt7622_antsel),
PINCTRL_PIN_FUNCTION("emmc", mt7622_emmc),
PINCTRL_PIN_FUNCTION("eth", mt7622_ethernet),
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt7623.c b/drivers/pinctrl/mediatek/pinctrl-mt7623.c
index 3e59eada2825..69c06c2c0e21 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt7623.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt7623.c
@@ -1340,7 +1340,7 @@ static const char *mt7623_uart_groups[] = { "uart0_0_txd_rxd",
"uart3_rts_cts", };
static const char *mt7623_wdt_groups[] = { "watchdog_0", "watchdog_1", };
-static const struct function_desc mt7623_functions[] = {
+static const struct pinfunction mt7623_functions[] = {
PINCTRL_PIN_FUNCTION("audck", mt7623_aud_clk),
PINCTRL_PIN_FUNCTION("disp", mt7623_disp_pwm),
PINCTRL_PIN_FUNCTION("eth", mt7623_ethernet),
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt7629.c b/drivers/pinctrl/mediatek/pinctrl-mt7629.c
index 98142e8c9801..cc0694881ac9 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt7629.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt7629.c
@@ -384,7 +384,7 @@ static const char *mt7629_wdt_groups[] = { "watchdog", };
static const char *mt7629_wifi_groups[] = { "wf0_5g", "wf0_2g", };
static const char *mt7629_flash_groups[] = { "snfi", "spi_nor" };
-static const struct function_desc mt7629_functions[] = {
+static const struct pinfunction mt7629_functions[] = {
PINCTRL_PIN_FUNCTION("eth", mt7629_ethernet),
PINCTRL_PIN_FUNCTION("i2c", mt7629_i2c),
PINCTRL_PIN_FUNCTION("led", mt7629_led),
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt7981.c b/drivers/pinctrl/mediatek/pinctrl-mt7981.c
index 83092be5b614..6216c2e057f6 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt7981.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt7981.c
@@ -977,7 +977,7 @@ static const char *mt7981_ethernet_groups[] = { "smi_mdc_mdio", "gbe_ext_mdc_mdi
"wf0_mode1", "wf0_mode3", "mt7531_int", };
static const char *mt7981_ant_groups[] = { "ant_sel", };
-static const struct function_desc mt7981_functions[] = {
+static const struct pinfunction mt7981_functions[] = {
PINCTRL_PIN_FUNCTION("wa_aice", mt7981_wa_aice),
PINCTRL_PIN_FUNCTION("dfd", mt7981_dfd),
PINCTRL_PIN_FUNCTION("jtag", mt7981_jtag),
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt7986.c b/drivers/pinctrl/mediatek/pinctrl-mt7986.c
index 5816b5fdb7ca..2a762ade9c35 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt7986.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt7986.c
@@ -878,7 +878,7 @@ static const char *mt7986_uart_groups[] = {
static const char *mt7986_wdt_groups[] = { "watchdog", };
static const char *mt7986_wf_groups[] = { "wf_2g", "wf_5g", "wf_dbdc", };
-static const struct function_desc mt7986_functions[] = {
+static const struct pinfunction mt7986_functions[] = {
PINCTRL_PIN_FUNCTION("audio", mt7986_audio),
PINCTRL_PIN_FUNCTION("emmc", mt7986_emmc),
PINCTRL_PIN_FUNCTION("eth", mt7986_ethernet),
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt7988.c b/drivers/pinctrl/mediatek/pinctrl-mt7988.c
index 68b4097792b8..9569e8c0cec1 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt7988.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt7988.c
@@ -1464,33 +1464,23 @@ static const char * const mt7988_usb_groups[] = {
"drv_vbus_p1",
};
-static const struct function_desc mt7988_functions[] = {
- { { "audio", mt7988_audio_groups, ARRAY_SIZE(mt7988_audio_groups) },
- NULL },
- { { "jtag", mt7988_jtag_groups, ARRAY_SIZE(mt7988_jtag_groups) },
- NULL },
- { { "int_usxgmii", mt7988_int_usxgmii_groups,
- ARRAY_SIZE(mt7988_int_usxgmii_groups) },
- NULL },
- { { "pwm", mt7988_pwm_groups, ARRAY_SIZE(mt7988_pwm_groups) }, NULL },
- { { "dfd", mt7988_dfd_groups, ARRAY_SIZE(mt7988_dfd_groups) }, NULL },
- { { "i2c", mt7988_i2c_groups, ARRAY_SIZE(mt7988_i2c_groups) }, NULL },
- { { "eth", mt7988_ethernet_groups, ARRAY_SIZE(mt7988_ethernet_groups) },
- NULL },
- { { "pcie", mt7988_pcie_groups, ARRAY_SIZE(mt7988_pcie_groups) },
- NULL },
- { { "pmic", mt7988_pmic_groups, ARRAY_SIZE(mt7988_pmic_groups) },
- NULL },
- { { "watchdog", mt7988_wdt_groups, ARRAY_SIZE(mt7988_wdt_groups) },
- NULL },
- { { "spi", mt7988_spi_groups, ARRAY_SIZE(mt7988_spi_groups) }, NULL },
- { { "flash", mt7988_flash_groups, ARRAY_SIZE(mt7988_flash_groups) },
- NULL },
- { { "uart", mt7988_uart_groups, ARRAY_SIZE(mt7988_uart_groups) },
- NULL },
- { { "udi", mt7988_udi_groups, ARRAY_SIZE(mt7988_udi_groups) }, NULL },
- { { "usb", mt7988_usb_groups, ARRAY_SIZE(mt7988_usb_groups) }, NULL },
- { { "led", mt7988_led_groups, ARRAY_SIZE(mt7988_led_groups) }, NULL },
+static const struct pinfunction mt7988_functions[] = {
+ PINCTRL_PIN_FUNCTION("audio", mt7988_audio),
+ PINCTRL_PIN_FUNCTION("jtag", mt7988_jtag),
+ PINCTRL_PIN_FUNCTION("int_usxgmii", mt7988_int_usxgmii),
+ PINCTRL_PIN_FUNCTION("pwm", mt7988_pwm),
+ PINCTRL_PIN_FUNCTION("dfd", mt7988_dfd),
+ PINCTRL_PIN_FUNCTION("i2c", mt7988_i2c),
+ PINCTRL_PIN_FUNCTION("eth", mt7988_ethernet),
+ PINCTRL_PIN_FUNCTION("pcie", mt7988_pcie),
+ PINCTRL_PIN_FUNCTION("pmic", mt7988_pmic),
+ PINCTRL_PIN_FUNCTION("watchdog", mt7988_wdt),
+ PINCTRL_PIN_FUNCTION("spi", mt7988_spi),
+ PINCTRL_PIN_FUNCTION("flash", mt7988_flash),
+ PINCTRL_PIN_FUNCTION("uart", mt7988_uart),
+ PINCTRL_PIN_FUNCTION("udi", mt7988_udi),
+ PINCTRL_PIN_FUNCTION("usb", mt7988_usb),
+ PINCTRL_PIN_FUNCTION("led", mt7988_led),
};
static const struct mtk_eint_hw mt7988_eint_hw = {
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h
index 36d2898037dd..fa7c0ed49346 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h
@@ -238,7 +238,7 @@ struct mtk_pin_soc {
unsigned int npins;
const struct group_desc *grps;
unsigned int ngrps;
- const struct function_desc *funcs;
+ const struct pinfunction *funcs;
unsigned int nfuncs;
const struct mtk_eint_regs *eint_regs;
const struct mtk_eint_hw *eint_hw;
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
index a4cb6d511fcd..d6a46fe0cda8 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -384,7 +384,7 @@ static int mtk_pconf_parse_conf(struct pinctrl_dev *pctldev,
mtk_pmx_gpio_set_direction(pctldev, NULL, pin, true);
ret = mtk_pconf_set_ies_smt(pctl, pin, arg, param);
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
mtk_gpio_set(pctl->chip, pin, arg);
ret = mtk_pmx_gpio_set_direction(pctldev, NULL, pin, false);
break;
@@ -898,7 +898,7 @@ static const struct gpio_chip mtk_gpio_chip = {
.direction_input = pinctrl_gpio_direction_input,
.direction_output = mtk_gpio_direction_output,
.get = mtk_gpio_get,
- .set_rv = mtk_gpio_set,
+ .set = mtk_gpio_set,
.to_irq = mtk_gpio_to_irq,
.set_config = mtk_gpio_set_config,
};
diff --git a/drivers/pinctrl/mediatek/pinctrl-paris.c b/drivers/pinctrl/mediatek/pinctrl-paris.c
index 89ef4e530fcc..6bf37d8085fa 100644
--- a/drivers/pinctrl/mediatek/pinctrl-paris.c
+++ b/drivers/pinctrl/mediatek/pinctrl-paris.c
@@ -169,7 +169,7 @@ static int mtk_pinconf_get(struct pinctrl_dev *pctldev,
if (!ret)
err = -EINVAL;
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DIR, &ret);
if (err)
break;
@@ -292,7 +292,7 @@ static int mtk_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
/* regard all non-zero value as enable */
err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_SR, !!arg);
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DO,
arg);
if (err)
@@ -949,7 +949,7 @@ static int mtk_build_gpiochip(struct mtk_pinctrl *hw)
chip->direction_input = mtk_gpio_direction_input;
chip->direction_output = mtk_gpio_direction_output;
chip->get = mtk_gpio_get;
- chip->set_rv = mtk_gpio_set;
+ chip->set = mtk_gpio_set;
chip->to_irq = mtk_gpio_to_irq;
chip->set_config = mtk_gpio_set_config;
chip->base = -1;
diff --git a/drivers/pinctrl/meson/pinctrl-amlogic-a4.c b/drivers/pinctrl/meson/pinctrl-amlogic-a4.c
index c8958222df8c..d9e3a8d5932a 100644
--- a/drivers/pinctrl/meson/pinctrl-amlogic-a4.c
+++ b/drivers/pinctrl/meson/pinctrl-amlogic-a4.c
@@ -422,7 +422,7 @@ static int aml_pinconf_get(struct pinctrl_dev *pcdev, unsigned int pin,
return -EINVAL;
arg = 1;
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
ret = aml_pinconf_get_output(info, pin);
if (ret <= 0)
return -EINVAL;
@@ -568,7 +568,7 @@ static int aml_pinconf_set(struct pinctrl_dev *pcdev, unsigned int pin,
switch (param) {
case PIN_CONFIG_DRIVE_STRENGTH_UA:
case PIN_CONFIG_OUTPUT_ENABLE:
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
arg = pinconf_to_config_argument(configs[i]);
break;
@@ -592,7 +592,7 @@ static int aml_pinconf_set(struct pinctrl_dev *pcdev, unsigned int pin,
case PIN_CONFIG_OUTPUT_ENABLE:
ret = aml_pinconf_set_output(info, pin, arg);
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
ret = aml_pinconf_set_output_drive(info, pin, arg);
break;
default:
@@ -888,7 +888,7 @@ static const struct gpio_chip aml_gpio_template = {
.request = gpiochip_generic_request,
.free = gpiochip_generic_free,
.set_config = gpiochip_generic_config,
- .set_rv = aml_gpio_set,
+ .set = aml_gpio_set,
.get = aml_gpio_get,
.direction_input = aml_gpio_direction_input,
.direction_output = aml_gpio_direction_output,
@@ -1093,7 +1093,7 @@ static const struct of_device_id aml_pctl_of_match[] = {
{ .compatible = "amlogic,pinctrl-s6", .data = &s6_priv_data, },
{ /* sentinel */ }
};
-MODULE_DEVICE_TABLE(of, aml_pctl_dt_match);
+MODULE_DEVICE_TABLE(of, aml_pctl_of_match);
static struct platform_driver aml_pctl_driver = {
.driver = {
diff --git a/drivers/pinctrl/meson/pinctrl-meson-g12a.c b/drivers/pinctrl/meson/pinctrl-meson-g12a.c
index 8b9130c6e170..117e72b4ffcb 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-g12a.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-g12a.c
@@ -442,6 +442,8 @@ static const unsigned int tdm_c_dout1_z_pins[] = { GPIOZ_3 };
static const unsigned int tdm_c_dout2_z_pins[] = { GPIOZ_4 };
static const unsigned int tdm_c_dout3_z_pins[] = { GPIOZ_5 };
+static const unsigned int pcie_clkreqn_pins[] = { GPIOC_7 };
+
static const struct meson_pmx_group meson_g12a_periphs_groups[] = {
GPIO_GROUP(GPIOZ_0),
GPIO_GROUP(GPIOZ_1),
@@ -721,6 +723,7 @@ static const struct meson_pmx_group meson_g12a_periphs_groups[] = {
GROUP(pdm_din2_c, 4),
GROUP(pdm_din3_c, 4),
GROUP(pdm_dclk_c, 4),
+ GROUP(pcie_clkreqn, 1),
/* bank GPIOH */
GROUP(spi1_mosi, 3),
@@ -1183,6 +1186,10 @@ static const char * const tdm_c_groups[] = {
"tdm_c_dout2_z", "tdm_c_dout3_z",
};
+static const char * const pcie_clkreqn_groups[] = {
+ "pcie_clkreqn"
+};
+
static const char * const gpio_aobus_groups[] = {
"GPIOAO_0", "GPIOAO_1", "GPIOAO_2", "GPIOAO_3", "GPIOAO_4",
"GPIOAO_5", "GPIOAO_6", "GPIOAO_7", "GPIOAO_8", "GPIOAO_9",
@@ -1309,6 +1316,7 @@ static const struct meson_pmx_func meson_g12a_periphs_functions[] = {
FUNCTION(tdm_a),
FUNCTION(tdm_b),
FUNCTION(tdm_c),
+ FUNCTION(pcie_clkreqn),
};
static const struct meson_pmx_func meson_g12a_aobus_functions[] = {
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxl.c b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
index 9171de657f97..a75762e4d264 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxl.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
@@ -187,6 +187,9 @@ static const unsigned int i2c_sda_c_pins[] = { GPIODV_28 };
static const unsigned int i2c_sck_c_dv19_pins[] = { GPIODV_19 };
static const unsigned int i2c_sda_c_dv18_pins[] = { GPIODV_18 };
+static const unsigned int i2c_sck_d_pins[] = { GPIOX_11 };
+static const unsigned int i2c_sda_d_pins[] = { GPIOX_10 };
+
static const unsigned int eth_mdio_pins[] = { GPIOZ_0 };
static const unsigned int eth_mdc_pins[] = { GPIOZ_1 };
static const unsigned int eth_clk_rx_clk_pins[] = { GPIOZ_2 };
@@ -411,6 +414,8 @@ static const struct meson_pmx_group meson_gxl_periphs_groups[] = {
GPIO_GROUP(GPIO_TEST_N),
/* Bank X */
+ GROUP(i2c_sda_d, 5, 5),
+ GROUP(i2c_sck_d, 5, 4),
GROUP(sdio_d0, 5, 31),
GROUP(sdio_d1, 5, 30),
GROUP(sdio_d2, 5, 29),
@@ -651,6 +656,10 @@ static const char * const i2c_c_groups[] = {
"i2c_sck_c", "i2c_sda_c", "i2c_sda_c_dv18", "i2c_sck_c_dv19",
};
+static const char * const i2c_d_groups[] = {
+ "i2c_sck_d", "i2c_sda_d",
+};
+
static const char * const eth_groups[] = {
"eth_mdio", "eth_mdc", "eth_clk_rx_clk", "eth_rx_dv",
"eth_rxd0", "eth_rxd1", "eth_rxd2", "eth_rxd3",
@@ -777,6 +786,7 @@ static const struct meson_pmx_func meson_gxl_periphs_functions[] = {
FUNCTION(i2c_a),
FUNCTION(i2c_b),
FUNCTION(i2c_c),
+ FUNCTION(i2c_d),
FUNCTION(eth),
FUNCTION(pwm_a),
FUNCTION(pwm_b),
diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c
index f5be61f2ede4..18295b15ecd9 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.c
+++ b/drivers/pinctrl/meson/pinctrl-meson.c
@@ -360,7 +360,7 @@ static int meson_pinconf_set(struct pinctrl_dev *pcdev, unsigned int pin,
switch (param) {
case PIN_CONFIG_DRIVE_STRENGTH_UA:
case PIN_CONFIG_OUTPUT_ENABLE:
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
arg = pinconf_to_config_argument(configs[i]);
break;
@@ -384,7 +384,7 @@ static int meson_pinconf_set(struct pinctrl_dev *pcdev, unsigned int pin,
case PIN_CONFIG_OUTPUT_ENABLE:
ret = meson_pinconf_set_output(pc, pin, arg);
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
ret = meson_pinconf_set_output_drive(pc, pin, arg);
break;
default:
@@ -502,7 +502,7 @@ static int meson_pinconf_get(struct pinctrl_dev *pcdev, unsigned int pin,
return -EINVAL;
arg = 1;
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
ret = meson_pinconf_get_output(pc, pin);
if (ret <= 0)
return -EINVAL;
@@ -616,7 +616,7 @@ static int meson_gpiolib_register(struct meson_pinctrl *pc)
pc->chip.direction_input = meson_gpio_direction_input;
pc->chip.direction_output = meson_gpio_direction_output;
pc->chip.get = meson_gpio_get;
- pc->chip.set_rv = meson_gpio_set;
+ pc->chip.set = meson_gpio_set;
pc->chip.base = -1;
pc->chip.ngpio = pc->data->num_pins;
pc->chip.can_sleep = false;
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
index a6b106984e12..81dfbd5e7f07 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
@@ -420,7 +420,8 @@ static int armada_37xx_gpio_direction_output(struct gpio_chip *chip,
struct armada_37xx_pinctrl *info = gpiochip_get_data(chip);
unsigned int en_offset = offset;
unsigned int reg = OUTPUT_VAL;
- unsigned int mask, val, ret;
+ unsigned int mask, val;
+ int ret;
armada_37xx_update_reg(&reg, &offset);
mask = BIT(offset);
@@ -518,7 +519,7 @@ static const struct pinmux_ops armada_37xx_pmx_ops = {
static const struct gpio_chip armada_37xx_gpiolib_chip = {
.request = gpiochip_generic_request,
.free = gpiochip_generic_free,
- .set_rv = armada_37xx_gpio_set,
+ .set = armada_37xx_gpio_set,
.get = armada_37xx_gpio_get,
.get_direction = armada_37xx_gpio_get_direction,
.direction_input = armada_37xx_gpio_direction_input,
@@ -634,8 +635,9 @@ static int armada_37xx_edge_both_irq_swap_pol(struct armada_37xx_pinctrl *info,
{
u32 reg_idx = pin_idx / GPIO_PER_REG;
u32 bit_num = pin_idx % GPIO_PER_REG;
- u32 p, l, ret;
unsigned long flags;
+ u32 p, l;
+ int ret;
regmap_read(info->regmap, INPUT_VAL + 4*reg_idx, &l);
diff --git a/drivers/pinctrl/nomadik/pinctrl-abx500.c b/drivers/pinctrl/nomadik/pinctrl-abx500.c
index 2f55f83127cf..fc7ebeda8440 100644
--- a/drivers/pinctrl/nomadik/pinctrl-abx500.c
+++ b/drivers/pinctrl/nomadik/pinctrl-abx500.c
@@ -536,7 +536,7 @@ static const struct gpio_chip abx500gpio_chip = {
.direction_input = abx500_gpio_direction_input,
.get = abx500_gpio_get,
.direction_output = abx500_gpio_direction_output,
- .set_rv = abx500_gpio_set,
+ .set = abx500_gpio_set,
.to_irq = abx500_gpio_to_irq,
.dbg_show = abx500_gpio_dbg_show,
};
@@ -860,8 +860,8 @@ static int abx500_pin_config_set(struct pinctrl_dev *pctldev,
dev_dbg(chip->parent, "pin %d [%#lx]: %s %s\n",
pin, configs[i],
- (param == PIN_CONFIG_OUTPUT) ? "output " : "input",
- (param == PIN_CONFIG_OUTPUT) ?
+ (param == PIN_CONFIG_LEVEL) ? "output " : "input",
+ (param == PIN_CONFIG_LEVEL) ?
str_high_low(argument) :
(argument ? "pull up" : "pull down"));
@@ -907,7 +907,7 @@ static int abx500_pin_config_set(struct pinctrl_dev *pctldev,
ret = abx500_gpio_direction_input(chip, offset);
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
ret = abx500_gpio_direction_output(chip, offset,
argument);
break;
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index 8940e04fcf4c..db0311b14132 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -584,7 +584,7 @@ static void nmk_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
seq_printf(s, "invalid pin offset");
return;
}
- nmk_gpio_dbg_show_one(s, pctldev, chip, offset - chip->base, offset);
+ nmk_gpio_dbg_show_one(s, pctldev, chip, offset - chip->base);
}
static int nmk_dt_add_map_mux(struct pinctrl_map **map, unsigned int *reserved_maps,
diff --git a/drivers/pinctrl/nuvoton/pinctrl-ma35.c b/drivers/pinctrl/nuvoton/pinctrl-ma35.c
index da5220da5149..cdad01d68a37 100644
--- a/drivers/pinctrl/nuvoton/pinctrl-ma35.c
+++ b/drivers/pinctrl/nuvoton/pinctrl-ma35.c
@@ -526,7 +526,7 @@ static int ma35_gpiolib_register(struct platform_device *pdev, struct ma35_pinct
bank->chip.direction_input = ma35_gpio_core_direction_in;
bank->chip.direction_output = ma35_gpio_core_direction_out;
bank->chip.get = ma35_gpio_core_get;
- bank->chip.set_rv = ma35_gpio_core_set;
+ bank->chip.set = ma35_gpio_core_set;
bank->chip.base = -1;
bank->chip.ngpio = bank->nr_pins;
bank->chip.can_sleep = false;
@@ -1038,7 +1038,8 @@ static int ma35_pinctrl_parse_functions(struct fwnode_handle *fwnode, struct ma3
struct group_desc *grp;
static u32 grp_index;
const char **groups;
- u32 ret, i = 0;
+ u32 i = 0;
+ int ret;
dev_dbg(npctl->dev, "parse function(%d): %s\n", index, np->name);
diff --git a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
index b8872d8f5930..13ed87d5d30c 100644
--- a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
+++ b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
@@ -4,6 +4,7 @@
#include <linux/device.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/mfd/syscon.h>
@@ -77,7 +78,7 @@
/* Structure for register banks */
struct npcm7xx_gpio {
void __iomem *base;
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
int irqbase;
int irq;
u32 pinctrl_id;
@@ -99,32 +100,26 @@ struct npcm7xx_pinctrl {
};
/* GPIO handling in the pinctrl driver */
-static void npcm_gpio_set(struct gpio_chip *gc, void __iomem *reg,
+static void npcm_gpio_set(struct gpio_generic_chip *chip, void __iomem *reg,
unsigned int pinmask)
{
- unsigned long flags;
unsigned long val;
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(chip);
val = ioread32(reg) | pinmask;
iowrite32(val, reg);
-
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
}
-static void npcm_gpio_clr(struct gpio_chip *gc, void __iomem *reg,
+static void npcm_gpio_clr(struct gpio_generic_chip *chip, void __iomem *reg,
unsigned int pinmask)
{
- unsigned long flags;
unsigned long val;
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(chip);
val = ioread32(reg) & ~pinmask;
iowrite32(val, reg);
-
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
}
static void npcmgpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
@@ -132,9 +127,9 @@ static void npcmgpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
struct npcm7xx_gpio *bank = gpiochip_get_data(chip);
seq_printf(s, "-- module %d [gpio%d - %d]\n",
- bank->gc.base / bank->gc.ngpio,
- bank->gc.base,
- bank->gc.base + bank->gc.ngpio);
+ bank->chip.gc.base / bank->chip.gc.ngpio,
+ bank->chip.gc.base,
+ bank->chip.gc.base + bank->chip.gc.ngpio);
seq_printf(s, "DIN :%.8x DOUT:%.8x IE :%.8x OE :%.8x\n",
ioread32(bank->base + NPCM7XX_GP_N_DIN),
ioread32(bank->base + NPCM7XX_GP_N_DOUT),
@@ -220,7 +215,7 @@ static void npcmgpio_irq_handler(struct irq_desc *desc)
chained_irq_enter(chip, desc);
sts = ioread32(bank->base + NPCM7XX_GP_N_EVST);
en = ioread32(bank->base + NPCM7XX_GP_N_EVEN);
- dev_dbg(bank->gc.parent, "==> got irq sts %.8lx %.8lx\n", sts,
+ dev_dbg(bank->chip.gc.parent, "==> got irq sts %.8lx %.8lx\n", sts,
en);
sts &= en;
@@ -235,42 +230,42 @@ static int npcmgpio_set_irq_type(struct irq_data *d, unsigned int type)
struct npcm7xx_gpio *bank = gpiochip_get_data(gc);
unsigned int gpio = BIT(irqd_to_hwirq(d));
- dev_dbg(bank->gc.parent, "setirqtype: %u.%u = %u\n", gpio,
+ dev_dbg(bank->chip.gc.parent, "setirqtype: %u.%u = %u\n", gpio,
d->irq, type);
switch (type) {
case IRQ_TYPE_EDGE_RISING:
- dev_dbg(bank->gc.parent, "edge.rising\n");
- npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_EVBE, gpio);
- npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_POL, gpio);
+ dev_dbg(bank->chip.gc.parent, "edge.rising\n");
+ npcm_gpio_clr(&bank->chip, bank->base + NPCM7XX_GP_N_EVBE, gpio);
+ npcm_gpio_clr(&bank->chip, bank->base + NPCM7XX_GP_N_POL, gpio);
break;
case IRQ_TYPE_EDGE_FALLING:
- dev_dbg(bank->gc.parent, "edge.falling\n");
- npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_EVBE, gpio);
- npcm_gpio_set(&bank->gc, bank->base + NPCM7XX_GP_N_POL, gpio);
+ dev_dbg(bank->chip.gc.parent, "edge.falling\n");
+ npcm_gpio_clr(&bank->chip, bank->base + NPCM7XX_GP_N_EVBE, gpio);
+ npcm_gpio_set(&bank->chip, bank->base + NPCM7XX_GP_N_POL, gpio);
break;
case IRQ_TYPE_EDGE_BOTH:
- dev_dbg(bank->gc.parent, "edge.both\n");
- npcm_gpio_set(&bank->gc, bank->base + NPCM7XX_GP_N_EVBE, gpio);
+ dev_dbg(bank->chip.gc.parent, "edge.both\n");
+ npcm_gpio_set(&bank->chip, bank->base + NPCM7XX_GP_N_EVBE, gpio);
break;
case IRQ_TYPE_LEVEL_LOW:
- dev_dbg(bank->gc.parent, "level.low\n");
- npcm_gpio_set(&bank->gc, bank->base + NPCM7XX_GP_N_POL, gpio);
+ dev_dbg(bank->chip.gc.parent, "level.low\n");
+ npcm_gpio_set(&bank->chip, bank->base + NPCM7XX_GP_N_POL, gpio);
break;
case IRQ_TYPE_LEVEL_HIGH:
- dev_dbg(bank->gc.parent, "level.high\n");
- npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_POL, gpio);
+ dev_dbg(bank->chip.gc.parent, "level.high\n");
+ npcm_gpio_clr(&bank->chip, bank->base + NPCM7XX_GP_N_POL, gpio);
break;
default:
- dev_dbg(bank->gc.parent, "invalid irq type\n");
+ dev_dbg(bank->chip.gc.parent, "invalid irq type\n");
return -EINVAL;
}
if (type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
- npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_EVTYP, gpio);
+ npcm_gpio_clr(&bank->chip, bank->base + NPCM7XX_GP_N_EVTYP, gpio);
irq_set_handler_locked(d, handle_level_irq);
} else if (type & (IRQ_TYPE_EDGE_BOTH | IRQ_TYPE_EDGE_RISING
| IRQ_TYPE_EDGE_FALLING)) {
- npcm_gpio_set(&bank->gc, bank->base + NPCM7XX_GP_N_EVTYP, gpio);
+ npcm_gpio_set(&bank->chip, bank->base + NPCM7XX_GP_N_EVTYP, gpio);
irq_set_handler_locked(d, handle_edge_irq);
}
@@ -283,7 +278,7 @@ static void npcmgpio_irq_ack(struct irq_data *d)
struct npcm7xx_gpio *bank = gpiochip_get_data(gc);
unsigned int gpio = irqd_to_hwirq(d);
- dev_dbg(bank->gc.parent, "irq_ack: %u.%u\n", gpio, d->irq);
+ dev_dbg(bank->chip.gc.parent, "irq_ack: %u.%u\n", gpio, d->irq);
iowrite32(BIT(gpio), bank->base + NPCM7XX_GP_N_EVST);
}
@@ -295,7 +290,7 @@ static void npcmgpio_irq_mask(struct irq_data *d)
unsigned int gpio = irqd_to_hwirq(d);
/* Clear events */
- dev_dbg(bank->gc.parent, "irq_mask: %u.%u\n", gpio, d->irq);
+ dev_dbg(bank->chip.gc.parent, "irq_mask: %u.%u\n", gpio, d->irq);
iowrite32(BIT(gpio), bank->base + NPCM7XX_GP_N_EVENC);
gpiochip_disable_irq(gc, gpio);
}
@@ -309,7 +304,7 @@ static void npcmgpio_irq_unmask(struct irq_data *d)
/* Enable events */
gpiochip_enable_irq(gc, gpio);
- dev_dbg(bank->gc.parent, "irq_unmask: %u.%u\n", gpio, d->irq);
+ dev_dbg(bank->chip.gc.parent, "irq_unmask: %u.%u\n", gpio, d->irq);
iowrite32(BIT(gpio), bank->base + NPCM7XX_GP_N_EVENS);
}
@@ -1423,7 +1418,7 @@ static int npcm7xx_get_slew_rate(struct npcm7xx_gpio *bank,
struct regmap *gcr_regmap, unsigned int pin)
{
u32 val;
- int gpio = (pin % bank->gc.ngpio);
+ int gpio = (pin % bank->chip.gc.ngpio);
unsigned long pinmask = BIT(gpio);
if (pincfg[pin].flag & SLEW)
@@ -1443,16 +1438,16 @@ static int npcm7xx_set_slew_rate(struct npcm7xx_gpio *bank,
struct regmap *gcr_regmap, unsigned int pin,
int arg)
{
- int gpio = BIT(pin % bank->gc.ngpio);
+ int gpio = BIT(pin % bank->chip.gc.ngpio);
if (pincfg[pin].flag & SLEW) {
switch (arg) {
case 0:
- npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_OSRC,
+ npcm_gpio_clr(&bank->chip, bank->base + NPCM7XX_GP_N_OSRC,
gpio);
return 0;
case 1:
- npcm_gpio_set(&bank->gc, bank->base + NPCM7XX_GP_N_OSRC,
+ npcm_gpio_set(&bank->chip, bank->base + NPCM7XX_GP_N_OSRC,
gpio);
return 0;
default:
@@ -1485,7 +1480,7 @@ static int npcm7xx_get_drive_strength(struct pinctrl_dev *pctldev,
struct npcm7xx_pinctrl *npcm = pinctrl_dev_get_drvdata(pctldev);
struct npcm7xx_gpio *bank =
&npcm->gpio_bank[pin / NPCM7XX_GPIO_PER_BANK];
- int gpio = (pin % bank->gc.ngpio);
+ int gpio = (pin % bank->chip.gc.ngpio);
unsigned long pinmask = BIT(gpio);
u32 ds = 0;
int flg, val;
@@ -1496,7 +1491,7 @@ static int npcm7xx_get_drive_strength(struct pinctrl_dev *pctldev,
val = ioread32(bank->base + NPCM7XX_GP_N_ODSC)
& pinmask;
ds = val ? DSHI(flg) : DSLO(flg);
- dev_dbg(bank->gc.parent,
+ dev_dbg(bank->chip.gc.parent,
"pin %d strength %d = %d\n", pin, val, ds);
return ds;
}
@@ -1511,20 +1506,20 @@ static int npcm7xx_set_drive_strength(struct npcm7xx_pinctrl *npcm,
int v;
struct npcm7xx_gpio *bank =
&npcm->gpio_bank[pin / NPCM7XX_GPIO_PER_BANK];
- int gpio = BIT(pin % bank->gc.ngpio);
+ int gpio = BIT(pin % bank->chip.gc.ngpio);
v = (pincfg[pin].flag & DRIVE_STRENGTH_MASK);
if (!nval || !v)
return -ENOTSUPP;
if (DSLO(v) == nval) {
- dev_dbg(bank->gc.parent,
+ dev_dbg(bank->chip.gc.parent,
"setting pin %d to low strength [%d]\n", pin, nval);
- npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_ODSC, gpio);
+ npcm_gpio_clr(&bank->chip, bank->base + NPCM7XX_GP_N_ODSC, gpio);
return 0;
} else if (DSHI(v) == nval) {
- dev_dbg(bank->gc.parent,
+ dev_dbg(bank->chip.gc.parent,
"setting pin %d to high strength [%d]\n", pin, nval);
- npcm_gpio_set(&bank->gc, bank->base + NPCM7XX_GP_N_ODSC, gpio);
+ npcm_gpio_set(&bank->chip, bank->base + NPCM7XX_GP_N_ODSC, gpio);
return 0;
}
@@ -1657,9 +1652,9 @@ static int npcm_gpio_set_direction(struct pinctrl_dev *pctldev,
struct npcm7xx_pinctrl *npcm = pinctrl_dev_get_drvdata(pctldev);
struct npcm7xx_gpio *bank =
&npcm->gpio_bank[offset / NPCM7XX_GPIO_PER_BANK];
- int gpio = BIT(offset % bank->gc.ngpio);
+ int gpio = BIT(offset % bank->chip.gc.ngpio);
- dev_dbg(bank->gc.parent, "GPIO Set Direction: %d = %d\n", offset,
+ dev_dbg(bank->chip.gc.parent, "GPIO Set Direction: %d = %d\n", offset,
input);
if (input)
iowrite32(gpio, bank->base + NPCM7XX_GP_N_OEC);
@@ -1687,7 +1682,7 @@ static int npcm7xx_config_get(struct pinctrl_dev *pctldev, unsigned int pin,
struct npcm7xx_pinctrl *npcm = pinctrl_dev_get_drvdata(pctldev);
struct npcm7xx_gpio *bank =
&npcm->gpio_bank[pin / NPCM7XX_GPIO_PER_BANK];
- int gpio = (pin % bank->gc.ngpio);
+ int gpio = (pin % bank->chip.gc.ngpio);
unsigned long pinmask = BIT(gpio);
u32 ie, oe, pu, pd;
int rc = 0;
@@ -1705,13 +1700,13 @@ static int npcm7xx_config_get(struct pinctrl_dev *pctldev, unsigned int pin,
else if (param == PIN_CONFIG_BIAS_PULL_DOWN)
rc = (!pu && pd);
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
case PIN_CONFIG_INPUT_ENABLE:
ie = ioread32(bank->base + NPCM7XX_GP_N_IEM) & pinmask;
oe = ioread32(bank->base + NPCM7XX_GP_N_OE) & pinmask;
if (param == PIN_CONFIG_INPUT_ENABLE)
rc = (ie && !oe);
- else if (param == PIN_CONFIG_OUTPUT)
+ else if (param == PIN_CONFIG_LEVEL)
rc = (!ie && oe);
break;
case PIN_CONFIG_DRIVE_PUSH_PULL:
@@ -1750,38 +1745,38 @@ static int npcm7xx_config_set_one(struct npcm7xx_pinctrl *npcm,
u16 arg = pinconf_to_config_argument(config);
struct npcm7xx_gpio *bank =
&npcm->gpio_bank[pin / NPCM7XX_GPIO_PER_BANK];
- int gpio = BIT(pin % bank->gc.ngpio);
+ int gpio = BIT(pin % bank->chip.gc.ngpio);
- dev_dbg(bank->gc.parent, "param=%d %d[GPIO]\n", param, pin);
+ dev_dbg(bank->chip.gc.parent, "param=%d %d[GPIO]\n", param, pin);
switch (param) {
case PIN_CONFIG_BIAS_DISABLE:
- npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_PU, gpio);
- npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_PD, gpio);
+ npcm_gpio_clr(&bank->chip, bank->base + NPCM7XX_GP_N_PU, gpio);
+ npcm_gpio_clr(&bank->chip, bank->base + NPCM7XX_GP_N_PD, gpio);
break;
case PIN_CONFIG_BIAS_PULL_DOWN:
- npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_PU, gpio);
- npcm_gpio_set(&bank->gc, bank->base + NPCM7XX_GP_N_PD, gpio);
+ npcm_gpio_clr(&bank->chip, bank->base + NPCM7XX_GP_N_PU, gpio);
+ npcm_gpio_set(&bank->chip, bank->base + NPCM7XX_GP_N_PD, gpio);
break;
case PIN_CONFIG_BIAS_PULL_UP:
- npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_PD, gpio);
- npcm_gpio_set(&bank->gc, bank->base + NPCM7XX_GP_N_PU, gpio);
+ npcm_gpio_clr(&bank->chip, bank->base + NPCM7XX_GP_N_PD, gpio);
+ npcm_gpio_set(&bank->chip, bank->base + NPCM7XX_GP_N_PU, gpio);
break;
case PIN_CONFIG_INPUT_ENABLE:
iowrite32(gpio, bank->base + NPCM7XX_GP_N_OEC);
- bank->direction_input(&bank->gc, pin % bank->gc.ngpio);
+ bank->direction_input(&bank->chip.gc, pin % bank->chip.gc.ngpio);
break;
- case PIN_CONFIG_OUTPUT:
- bank->direction_output(&bank->gc, pin % bank->gc.ngpio, arg);
+ case PIN_CONFIG_LEVEL:
+ bank->direction_output(&bank->chip.gc, pin % bank->chip.gc.ngpio, arg);
iowrite32(gpio, bank->base + NPCM7XX_GP_N_OES);
break;
case PIN_CONFIG_DRIVE_PUSH_PULL:
- npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_OTYP, gpio);
+ npcm_gpio_clr(&bank->chip, bank->base + NPCM7XX_GP_N_OTYP, gpio);
break;
case PIN_CONFIG_DRIVE_OPEN_DRAIN:
- npcm_gpio_set(&bank->gc, bank->base + NPCM7XX_GP_N_OTYP, gpio);
+ npcm_gpio_set(&bank->chip, bank->base + NPCM7XX_GP_N_OTYP, gpio);
break;
case PIN_CONFIG_INPUT_DEBOUNCE:
- npcm_gpio_set(&bank->gc, bank->base + NPCM7XX_GP_N_DBNC, gpio);
+ npcm_gpio_set(&bank->chip, bank->base + NPCM7XX_GP_N_DBNC, gpio);
break;
case PIN_CONFIG_SLEW_RATE:
return npcm7xx_set_slew_rate(bank, npcm->gcr_regmap, pin, arg);
@@ -1829,6 +1824,7 @@ static const struct pinctrl_desc npcm7xx_pinctrl_desc = {
static int npcm7xx_gpio_of(struct npcm7xx_pinctrl *pctrl)
{
+ struct gpio_generic_chip_config config;
int ret = -ENXIO;
struct device *dev = pctrl->dev;
struct fwnode_reference_args args;
@@ -1840,15 +1836,18 @@ static int npcm7xx_gpio_of(struct npcm7xx_pinctrl *pctrl)
if (!pctrl->gpio_bank[id].base)
return -EINVAL;
- ret = bgpio_init(&pctrl->gpio_bank[id].gc, dev, 4,
- pctrl->gpio_bank[id].base + NPCM7XX_GP_N_DIN,
- pctrl->gpio_bank[id].base + NPCM7XX_GP_N_DOUT,
- NULL,
- NULL,
- pctrl->gpio_bank[id].base + NPCM7XX_GP_N_IEM,
- BGPIOF_READ_OUTPUT_REG_SET);
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = pctrl->gpio_bank[id].base + NPCM7XX_GP_N_DIN,
+ .set = pctrl->gpio_bank[id].base + NPCM7XX_GP_N_DOUT,
+ .dirin = pctrl->gpio_bank[id].base + NPCM7XX_GP_N_IEM,
+ .flags = GPIO_GENERIC_READ_OUTPUT_REG_SET,
+ };
+
+ ret = gpio_generic_chip_init(&pctrl->gpio_bank[id].chip, &config);
if (ret) {
- dev_err(dev, "bgpio_init() failed\n");
+ dev_err(dev, "failed to initialize the generic GPIO chip\n");
return ret;
}
@@ -1866,23 +1865,23 @@ static int npcm7xx_gpio_of(struct npcm7xx_pinctrl *pctrl)
pctrl->gpio_bank[id].irq = ret;
pctrl->gpio_bank[id].irqbase = id * NPCM7XX_GPIO_PER_BANK;
pctrl->gpio_bank[id].pinctrl_id = args.args[0];
- pctrl->gpio_bank[id].gc.base = args.args[1];
- pctrl->gpio_bank[id].gc.ngpio = args.args[2];
- pctrl->gpio_bank[id].gc.owner = THIS_MODULE;
- pctrl->gpio_bank[id].gc.parent = dev;
- pctrl->gpio_bank[id].gc.fwnode = child;
- pctrl->gpio_bank[id].gc.label = devm_kasprintf(dev, GFP_KERNEL, "%pfw", child);
- if (pctrl->gpio_bank[id].gc.label == NULL)
+ pctrl->gpio_bank[id].chip.gc.base = args.args[1];
+ pctrl->gpio_bank[id].chip.gc.ngpio = args.args[2];
+ pctrl->gpio_bank[id].chip.gc.owner = THIS_MODULE;
+ pctrl->gpio_bank[id].chip.gc.parent = dev;
+ pctrl->gpio_bank[id].chip.gc.fwnode = child;
+ pctrl->gpio_bank[id].chip.gc.label = devm_kasprintf(dev, GFP_KERNEL, "%pfw", child);
+ if (pctrl->gpio_bank[id].chip.gc.label == NULL)
return -ENOMEM;
- pctrl->gpio_bank[id].gc.dbg_show = npcmgpio_dbg_show;
- pctrl->gpio_bank[id].direction_input = pctrl->gpio_bank[id].gc.direction_input;
- pctrl->gpio_bank[id].gc.direction_input = npcmgpio_direction_input;
- pctrl->gpio_bank[id].direction_output = pctrl->gpio_bank[id].gc.direction_output;
- pctrl->gpio_bank[id].gc.direction_output = npcmgpio_direction_output;
- pctrl->gpio_bank[id].request = pctrl->gpio_bank[id].gc.request;
- pctrl->gpio_bank[id].gc.request = npcmgpio_gpio_request;
- pctrl->gpio_bank[id].gc.free = pinctrl_gpio_free;
+ pctrl->gpio_bank[id].chip.gc.dbg_show = npcmgpio_dbg_show;
+ pctrl->gpio_bank[id].direction_input = pctrl->gpio_bank[id].chip.gc.direction_input;
+ pctrl->gpio_bank[id].chip.gc.direction_input = npcmgpio_direction_input;
+ pctrl->gpio_bank[id].direction_output = pctrl->gpio_bank[id].chip.gc.direction_output;
+ pctrl->gpio_bank[id].chip.gc.direction_output = npcmgpio_direction_output;
+ pctrl->gpio_bank[id].request = pctrl->gpio_bank[id].chip.gc.request;
+ pctrl->gpio_bank[id].chip.gc.request = npcmgpio_gpio_request;
+ pctrl->gpio_bank[id].chip.gc.free = pinctrl_gpio_free;
id++;
}
@@ -1897,7 +1896,7 @@ static int npcm7xx_gpio_register(struct npcm7xx_pinctrl *pctrl)
for (id = 0 ; id < pctrl->bank_num ; id++) {
struct gpio_irq_chip *girq;
- girq = &pctrl->gpio_bank[id].gc.irq;
+ girq = &pctrl->gpio_bank[id].chip.gc.irq;
gpio_irq_chip_set_chip(girq, &npcmgpio_irqchip);
girq->parent_handler = npcmgpio_irq_handler;
girq->num_parents = 1;
@@ -1912,21 +1911,21 @@ static int npcm7xx_gpio_register(struct npcm7xx_pinctrl *pctrl)
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_level_irq;
ret = devm_gpiochip_add_data(pctrl->dev,
- &pctrl->gpio_bank[id].gc,
+ &pctrl->gpio_bank[id].chip.gc,
&pctrl->gpio_bank[id]);
if (ret) {
dev_err(pctrl->dev, "Failed to add GPIO chip %u\n", id);
goto err_register;
}
- ret = gpiochip_add_pin_range(&pctrl->gpio_bank[id].gc,
+ ret = gpiochip_add_pin_range(&pctrl->gpio_bank[id].chip.gc,
dev_name(pctrl->dev),
pctrl->gpio_bank[id].pinctrl_id,
- pctrl->gpio_bank[id].gc.base,
- pctrl->gpio_bank[id].gc.ngpio);
+ pctrl->gpio_bank[id].chip.gc.base,
+ pctrl->gpio_bank[id].chip.gc.ngpio);
if (ret < 0) {
dev_err(pctrl->dev, "Failed to add GPIO bank %u\n", id);
- gpiochip_remove(&pctrl->gpio_bank[id].gc);
+ gpiochip_remove(&pctrl->gpio_bank[id].chip.gc);
goto err_register;
}
}
@@ -1935,7 +1934,7 @@ static int npcm7xx_gpio_register(struct npcm7xx_pinctrl *pctrl)
err_register:
for (; id > 0; id--)
- gpiochip_remove(&pctrl->gpio_bank[id - 1].gc);
+ gpiochip_remove(&pctrl->gpio_bank[id - 1].chip.gc);
return ret;
}
diff --git a/drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c b/drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c
index 3c3b9d8d3681..0aae1a253459 100644
--- a/drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c
+++ b/drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c
@@ -4,6 +4,7 @@
#include <linux/bits.h>
#include <linux/device.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/mfd/syscon.h>
@@ -90,7 +91,7 @@ struct debounce_time {
};
struct npcm8xx_gpio {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
void __iomem *base;
struct debounce_time debounce;
int irqbase;
@@ -115,24 +116,20 @@ struct npcm8xx_pinctrl {
};
/* GPIO handling in the pinctrl driver */
-static void npcm_gpio_set(struct gpio_chip *gc, void __iomem *reg,
+static void npcm_gpio_set(struct gpio_generic_chip *chip, void __iomem *reg,
unsigned int pinmask)
{
- unsigned long flags;
+ guard(gpio_generic_lock_irqsave)(chip);
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
iowrite32(ioread32(reg) | pinmask, reg);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
}
-static void npcm_gpio_clr(struct gpio_chip *gc, void __iomem *reg,
+static void npcm_gpio_clr(struct gpio_generic_chip *chip, void __iomem *reg,
unsigned int pinmask)
{
- unsigned long flags;
+ guard(gpio_generic_lock_irqsave)(chip);
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
iowrite32(ioread32(reg) & ~pinmask, reg);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
}
static void npcmgpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
@@ -233,32 +230,32 @@ static int npcmgpio_set_irq_type(struct irq_data *d, unsigned int type)
switch (type) {
case IRQ_TYPE_EDGE_RISING:
- npcm_gpio_clr(&bank->gc, bank->base + NPCM8XX_GP_N_EVBE, gpio);
- npcm_gpio_clr(&bank->gc, bank->base + NPCM8XX_GP_N_POL, gpio);
+ npcm_gpio_clr(&bank->chip, bank->base + NPCM8XX_GP_N_EVBE, gpio);
+ npcm_gpio_clr(&bank->chip, bank->base + NPCM8XX_GP_N_POL, gpio);
break;
case IRQ_TYPE_EDGE_FALLING:
- npcm_gpio_clr(&bank->gc, bank->base + NPCM8XX_GP_N_EVBE, gpio);
- npcm_gpio_set(&bank->gc, bank->base + NPCM8XX_GP_N_POL, gpio);
+ npcm_gpio_clr(&bank->chip, bank->base + NPCM8XX_GP_N_EVBE, gpio);
+ npcm_gpio_set(&bank->chip, bank->base + NPCM8XX_GP_N_POL, gpio);
break;
case IRQ_TYPE_EDGE_BOTH:
- npcm_gpio_clr(&bank->gc, bank->base + NPCM8XX_GP_N_POL, gpio);
- npcm_gpio_set(&bank->gc, bank->base + NPCM8XX_GP_N_EVBE, gpio);
+ npcm_gpio_clr(&bank->chip, bank->base + NPCM8XX_GP_N_POL, gpio);
+ npcm_gpio_set(&bank->chip, bank->base + NPCM8XX_GP_N_EVBE, gpio);
break;
case IRQ_TYPE_LEVEL_LOW:
- npcm_gpio_set(&bank->gc, bank->base + NPCM8XX_GP_N_POL, gpio);
+ npcm_gpio_set(&bank->chip, bank->base + NPCM8XX_GP_N_POL, gpio);
break;
case IRQ_TYPE_LEVEL_HIGH:
- npcm_gpio_clr(&bank->gc, bank->base + NPCM8XX_GP_N_POL, gpio);
+ npcm_gpio_clr(&bank->chip, bank->base + NPCM8XX_GP_N_POL, gpio);
break;
default:
return -EINVAL;
}
if (type & IRQ_TYPE_LEVEL_MASK) {
- npcm_gpio_clr(&bank->gc, bank->base + NPCM8XX_GP_N_EVTYP, gpio);
+ npcm_gpio_clr(&bank->chip, bank->base + NPCM8XX_GP_N_EVTYP, gpio);
irq_set_handler_locked(d, handle_level_irq);
} else if (type & IRQ_TYPE_EDGE_BOTH) {
- npcm_gpio_set(&bank->gc, bank->base + NPCM8XX_GP_N_EVTYP, gpio);
+ npcm_gpio_set(&bank->chip, bank->base + NPCM8XX_GP_N_EVTYP, gpio);
irq_set_handler_locked(d, handle_edge_irq);
}
@@ -1842,7 +1839,7 @@ static void npcm8xx_setfunc(struct regmap *gcr_regmap, const unsigned int *pin,
static int npcm8xx_get_slew_rate(struct npcm8xx_gpio *bank,
struct regmap *gcr_regmap, unsigned int pin)
{
- int gpio = pin % bank->gc.ngpio;
+ int gpio = pin % bank->chip.gc.ngpio;
unsigned long pinmask = BIT(gpio);
u32 val;
@@ -1862,15 +1859,15 @@ static int npcm8xx_set_slew_rate(struct npcm8xx_gpio *bank,
int arg)
{
void __iomem *OSRC_Offset = bank->base + NPCM8XX_GP_N_OSRC;
- int gpio = BIT(pin % bank->gc.ngpio);
+ int gpio = BIT(pin % bank->chip.gc.ngpio);
if (pincfg[pin].flag & SLEW) {
switch (arg) {
case 0:
- npcm_gpio_clr(&bank->gc, OSRC_Offset, gpio);
+ npcm_gpio_clr(&bank->chip, OSRC_Offset, gpio);
return 0;
case 1:
- npcm_gpio_set(&bank->gc, OSRC_Offset, gpio);
+ npcm_gpio_set(&bank->chip, OSRC_Offset, gpio);
return 0;
default:
return -EINVAL;
@@ -1902,7 +1899,7 @@ static int npcm8xx_get_drive_strength(struct pinctrl_dev *pctldev,
struct npcm8xx_pinctrl *npcm = pinctrl_dev_get_drvdata(pctldev);
struct npcm8xx_gpio *bank =
&npcm->gpio_bank[pin / NPCM8XX_GPIO_PER_BANK];
- int gpio = pin % bank->gc.ngpio;
+ int gpio = pin % bank->chip.gc.ngpio;
unsigned long pinmask = BIT(gpio);
int flg, val;
u32 ds = 0;
@@ -1913,7 +1910,7 @@ static int npcm8xx_get_drive_strength(struct pinctrl_dev *pctldev,
val = ioread32(bank->base + NPCM8XX_GP_N_ODSC) & pinmask;
ds = val ? DSHI(flg) : DSLO(flg);
- dev_dbg(bank->gc.parent, "pin %d strength %d = %d\n", pin, val, ds);
+ dev_dbg(bank->chip.gc.parent, "pin %d strength %d = %d\n", pin, val, ds);
return ds;
}
@@ -1923,15 +1920,15 @@ static int npcm8xx_set_drive_strength(struct npcm8xx_pinctrl *npcm,
{
struct npcm8xx_gpio *bank =
&npcm->gpio_bank[pin / NPCM8XX_GPIO_PER_BANK];
- int gpio = BIT(pin % bank->gc.ngpio);
+ int gpio = BIT(pin % bank->chip.gc.ngpio);
int v;
v = pincfg[pin].flag & DRIVE_STRENGTH_MASK;
if (DSLO(v) == nval)
- npcm_gpio_clr(&bank->gc, bank->base + NPCM8XX_GP_N_ODSC, gpio);
+ npcm_gpio_clr(&bank->chip, bank->base + NPCM8XX_GP_N_ODSC, gpio);
else if (DSHI(v) == nval)
- npcm_gpio_set(&bank->gc, bank->base + NPCM8XX_GP_N_ODSC, gpio);
+ npcm_gpio_set(&bank->chip, bank->base + NPCM8XX_GP_N_ODSC, gpio);
else
return -ENOTSUPP;
@@ -2054,7 +2051,7 @@ static int npcm_gpio_set_direction(struct pinctrl_dev *pctldev,
struct npcm8xx_pinctrl *npcm = pinctrl_dev_get_drvdata(pctldev);
struct npcm8xx_gpio *bank =
&npcm->gpio_bank[offset / NPCM8XX_GPIO_PER_BANK];
- int gpio = BIT(offset % bank->gc.ngpio);
+ int gpio = BIT(offset % bank->chip.gc.ngpio);
if (input)
iowrite32(gpio, bank->base + NPCM8XX_GP_N_OEC);
@@ -2085,7 +2082,7 @@ static int debounce_timing_setting(struct npcm8xx_gpio *bank, u32 gpio,
if (bank->debounce.set_val[i]) {
if (bank->debounce.nanosec_val[i] == nanosecs) {
debounce_select = i << gpio_debounce;
- npcm_gpio_set(&bank->gc, DBNCS_offset,
+ npcm_gpio_set(&bank->chip, DBNCS_offset,
debounce_select);
break;
}
@@ -2093,7 +2090,7 @@ static int debounce_timing_setting(struct npcm8xx_gpio *bank, u32 gpio,
bank->debounce.set_val[i] = true;
bank->debounce.nanosec_val[i] = nanosecs;
debounce_select = i << gpio_debounce;
- npcm_gpio_set(&bank->gc, DBNCS_offset, debounce_select);
+ npcm_gpio_set(&bank->chip, DBNCS_offset, debounce_select);
switch (nanosecs) {
case 1 ... 1040:
iowrite32(0, bank->base + NPCM8XX_GP_N_DBNCP0 + (i * 4));
@@ -2145,21 +2142,21 @@ static int npcm_set_debounce(struct npcm8xx_pinctrl *npcm, unsigned int pin,
{
struct npcm8xx_gpio *bank =
&npcm->gpio_bank[pin / NPCM8XX_GPIO_PER_BANK];
- int gpio = BIT(pin % bank->gc.ngpio);
+ int gpio = BIT(pin % bank->chip.gc.ngpio);
int ret;
if (nanosecs) {
- ret = debounce_timing_setting(bank, pin % bank->gc.ngpio,
+ ret = debounce_timing_setting(bank, pin % bank->chip.gc.ngpio,
nanosecs);
if (ret)
dev_err(npcm->dev, "Pin %d, All four debounce timing values are used, please use one of exist debounce values\n", pin);
else
- npcm_gpio_set(&bank->gc, bank->base + NPCM8XX_GP_N_DBNC,
+ npcm_gpio_set(&bank->chip, bank->base + NPCM8XX_GP_N_DBNC,
gpio);
return ret;
}
- npcm_gpio_clr(&bank->gc, bank->base + NPCM8XX_GP_N_DBNC, gpio);
+ npcm_gpio_clr(&bank->chip, bank->base + NPCM8XX_GP_N_DBNC, gpio);
return 0;
}
@@ -2172,7 +2169,7 @@ static int npcm8xx_config_get(struct pinctrl_dev *pctldev, unsigned int pin,
struct npcm8xx_pinctrl *npcm = pinctrl_dev_get_drvdata(pctldev);
struct npcm8xx_gpio *bank =
&npcm->gpio_bank[pin / NPCM8XX_GPIO_PER_BANK];
- int gpio = pin % bank->gc.ngpio;
+ int gpio = pin % bank->chip.gc.ngpio;
unsigned long pinmask = BIT(gpio);
u32 ie, oe, pu, pd;
int rc = 0;
@@ -2190,13 +2187,13 @@ static int npcm8xx_config_get(struct pinctrl_dev *pctldev, unsigned int pin,
else if (param == PIN_CONFIG_BIAS_PULL_DOWN)
rc = !pu && pd;
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
case PIN_CONFIG_INPUT_ENABLE:
ie = ioread32(bank->base + NPCM8XX_GP_N_IEM) & pinmask;
oe = ioread32(bank->base + NPCM8XX_GP_N_OE) & pinmask;
if (param == PIN_CONFIG_INPUT_ENABLE)
rc = (ie && !oe);
- else if (param == PIN_CONFIG_OUTPUT)
+ else if (param == PIN_CONFIG_LEVEL)
rc = (!ie && oe);
break;
case PIN_CONFIG_DRIVE_PUSH_PULL:
@@ -2235,34 +2232,34 @@ static int npcm8xx_config_set_one(struct npcm8xx_pinctrl *npcm,
struct npcm8xx_gpio *bank =
&npcm->gpio_bank[pin / NPCM8XX_GPIO_PER_BANK];
u32 arg = pinconf_to_config_argument(config);
- int gpio = BIT(pin % bank->gc.ngpio);
+ int gpio = BIT(pin % bank->chip.gc.ngpio);
switch (param) {
case PIN_CONFIG_BIAS_DISABLE:
- npcm_gpio_clr(&bank->gc, bank->base + NPCM8XX_GP_N_PU, gpio);
- npcm_gpio_clr(&bank->gc, bank->base + NPCM8XX_GP_N_PD, gpio);
+ npcm_gpio_clr(&bank->chip, bank->base + NPCM8XX_GP_N_PU, gpio);
+ npcm_gpio_clr(&bank->chip, bank->base + NPCM8XX_GP_N_PD, gpio);
break;
case PIN_CONFIG_BIAS_PULL_DOWN:
- npcm_gpio_clr(&bank->gc, bank->base + NPCM8XX_GP_N_PU, gpio);
- npcm_gpio_set(&bank->gc, bank->base + NPCM8XX_GP_N_PD, gpio);
+ npcm_gpio_clr(&bank->chip, bank->base + NPCM8XX_GP_N_PU, gpio);
+ npcm_gpio_set(&bank->chip, bank->base + NPCM8XX_GP_N_PD, gpio);
break;
case PIN_CONFIG_BIAS_PULL_UP:
- npcm_gpio_clr(&bank->gc, bank->base + NPCM8XX_GP_N_PD, gpio);
- npcm_gpio_set(&bank->gc, bank->base + NPCM8XX_GP_N_PU, gpio);
+ npcm_gpio_clr(&bank->chip, bank->base + NPCM8XX_GP_N_PD, gpio);
+ npcm_gpio_set(&bank->chip, bank->base + NPCM8XX_GP_N_PU, gpio);
break;
case PIN_CONFIG_INPUT_ENABLE:
iowrite32(gpio, bank->base + NPCM8XX_GP_N_OEC);
- bank->direction_input(&bank->gc, pin % bank->gc.ngpio);
+ bank->direction_input(&bank->chip.gc, pin % bank->chip.gc.ngpio);
break;
- case PIN_CONFIG_OUTPUT:
- bank->direction_output(&bank->gc, pin % bank->gc.ngpio, arg);
+ case PIN_CONFIG_LEVEL:
+ bank->direction_output(&bank->chip.gc, pin % bank->chip.gc.ngpio, arg);
iowrite32(gpio, bank->base + NPCM8XX_GP_N_OES);
break;
case PIN_CONFIG_DRIVE_PUSH_PULL:
- npcm_gpio_clr(&bank->gc, bank->base + NPCM8XX_GP_N_OTYP, gpio);
+ npcm_gpio_clr(&bank->chip, bank->base + NPCM8XX_GP_N_OTYP, gpio);
break;
case PIN_CONFIG_DRIVE_OPEN_DRAIN:
- npcm_gpio_set(&bank->gc, bank->base + NPCM8XX_GP_N_OTYP, gpio);
+ npcm_gpio_set(&bank->chip, bank->base + NPCM8XX_GP_N_OTYP, gpio);
break;
case PIN_CONFIG_INPUT_DEBOUNCE:
return npcm_set_debounce(npcm, pin, arg * 1000);
@@ -2313,13 +2310,14 @@ static int npcmgpio_add_pin_ranges(struct gpio_chip *chip)
{
struct npcm8xx_gpio *bank = gpiochip_get_data(chip);
- return gpiochip_add_pin_range(&bank->gc, dev_name(chip->parent),
- bank->pinctrl_id, bank->gc.base,
- bank->gc.ngpio);
+ return gpiochip_add_pin_range(&bank->chip.gc, dev_name(chip->parent),
+ bank->pinctrl_id, bank->chip.gc.base,
+ bank->chip.gc.ngpio);
}
static int npcm8xx_gpio_fw(struct npcm8xx_pinctrl *pctrl)
{
+ struct gpio_generic_chip_config config;
struct fwnode_reference_args args;
struct device *dev = pctrl->dev;
struct fwnode_handle *child;
@@ -2331,15 +2329,19 @@ static int npcm8xx_gpio_fw(struct npcm8xx_pinctrl *pctrl)
if (!pctrl->gpio_bank[id].base)
return dev_err_probe(dev, -ENXIO, "fwnode_iomap id %d failed\n", id);
- ret = bgpio_init(&pctrl->gpio_bank[id].gc, dev, 4,
- pctrl->gpio_bank[id].base + NPCM8XX_GP_N_DIN,
- pctrl->gpio_bank[id].base + NPCM8XX_GP_N_DOUT,
- NULL,
- NULL,
- pctrl->gpio_bank[id].base + NPCM8XX_GP_N_IEM,
- BGPIOF_READ_OUTPUT_REG_SET);
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = pctrl->gpio_bank[id].base + NPCM8XX_GP_N_DIN,
+ .set = pctrl->gpio_bank[id].base + NPCM8XX_GP_N_DOUT,
+ .dirin = pctrl->gpio_bank[id].base + NPCM8XX_GP_N_IEM,
+ .flags = GPIO_GENERIC_READ_OUTPUT_REG_SET,
+ };
+
+ ret = gpio_generic_chip_init(&pctrl->gpio_bank[id].chip, &config);
if (ret)
- return dev_err_probe(dev, ret, "bgpio_init() failed\n");
+ return dev_err_probe(dev, ret,
+ "failed to initialize the generic GPIO chip\n");
ret = fwnode_property_get_reference_args(child, "gpio-ranges", NULL, 3, 0, &args);
if (ret < 0)
@@ -2353,26 +2355,26 @@ static int npcm8xx_gpio_fw(struct npcm8xx_pinctrl *pctrl)
pctrl->gpio_bank[id].irq_chip = npcmgpio_irqchip;
pctrl->gpio_bank[id].irqbase = id * NPCM8XX_GPIO_PER_BANK;
pctrl->gpio_bank[id].pinctrl_id = args.args[0];
- pctrl->gpio_bank[id].gc.base = -1;
- pctrl->gpio_bank[id].gc.ngpio = args.args[2];
- pctrl->gpio_bank[id].gc.owner = THIS_MODULE;
- pctrl->gpio_bank[id].gc.parent = dev;
- pctrl->gpio_bank[id].gc.fwnode = child;
- pctrl->gpio_bank[id].gc.label = devm_kasprintf(dev, GFP_KERNEL, "%pfw", child);
- if (pctrl->gpio_bank[id].gc.label == NULL)
+ pctrl->gpio_bank[id].chip.gc.base = -1;
+ pctrl->gpio_bank[id].chip.gc.ngpio = args.args[2];
+ pctrl->gpio_bank[id].chip.gc.owner = THIS_MODULE;
+ pctrl->gpio_bank[id].chip.gc.parent = dev;
+ pctrl->gpio_bank[id].chip.gc.fwnode = child;
+ pctrl->gpio_bank[id].chip.gc.label = devm_kasprintf(dev, GFP_KERNEL, "%pfw", child);
+ if (pctrl->gpio_bank[id].chip.gc.label == NULL)
return -ENOMEM;
- pctrl->gpio_bank[id].gc.dbg_show = npcmgpio_dbg_show;
- pctrl->gpio_bank[id].direction_input = pctrl->gpio_bank[id].gc.direction_input;
- pctrl->gpio_bank[id].gc.direction_input = npcmgpio_direction_input;
- pctrl->gpio_bank[id].direction_output = pctrl->gpio_bank[id].gc.direction_output;
- pctrl->gpio_bank[id].gc.direction_output = npcmgpio_direction_output;
- pctrl->gpio_bank[id].request = pctrl->gpio_bank[id].gc.request;
- pctrl->gpio_bank[id].gc.request = npcmgpio_gpio_request;
- pctrl->gpio_bank[id].gc.free = pinctrl_gpio_free;
+ pctrl->gpio_bank[id].chip.gc.dbg_show = npcmgpio_dbg_show;
+ pctrl->gpio_bank[id].direction_input = pctrl->gpio_bank[id].chip.gc.direction_input;
+ pctrl->gpio_bank[id].chip.gc.direction_input = npcmgpio_direction_input;
+ pctrl->gpio_bank[id].direction_output = pctrl->gpio_bank[id].chip.gc.direction_output;
+ pctrl->gpio_bank[id].chip.gc.direction_output = npcmgpio_direction_output;
+ pctrl->gpio_bank[id].request = pctrl->gpio_bank[id].chip.gc.request;
+ pctrl->gpio_bank[id].chip.gc.request = npcmgpio_gpio_request;
+ pctrl->gpio_bank[id].chip.gc.free = pinctrl_gpio_free;
for (i = 0 ; i < NPCM8XX_DEBOUNCE_MAX ; i++)
pctrl->gpio_bank[id].debounce.set_val[i] = false;
- pctrl->gpio_bank[id].gc.add_pin_ranges = npcmgpio_add_pin_ranges;
+ pctrl->gpio_bank[id].chip.gc.add_pin_ranges = npcmgpio_add_pin_ranges;
id++;
}
@@ -2387,7 +2389,7 @@ static int npcm8xx_gpio_register(struct npcm8xx_pinctrl *pctrl)
for (id = 0 ; id < pctrl->bank_num ; id++) {
struct gpio_irq_chip *girq;
- girq = &pctrl->gpio_bank[id].gc.irq;
+ girq = &pctrl->gpio_bank[id].chip.gc.irq;
girq->chip = &pctrl->gpio_bank[id].irq_chip;
girq->parent_handler = npcmgpio_irq_handler;
girq->num_parents = 1;
@@ -2401,7 +2403,7 @@ static int npcm8xx_gpio_register(struct npcm8xx_pinctrl *pctrl)
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_level_irq;
ret = devm_gpiochip_add_data(pctrl->dev,
- &pctrl->gpio_bank[id].gc,
+ &pctrl->gpio_bank[id].chip.gc,
&pctrl->gpio_bank[id]);
if (ret)
return dev_err_probe(pctrl->dev, ret, "Failed to add GPIO chip %u\n", id);
diff --git a/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c b/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c
index 8d8314ba0e4c..d624a4d302a8 100644
--- a/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c
+++ b/drivers/pinctrl/nuvoton/pinctrl-wpcm450.c
@@ -11,6 +11,7 @@
#include <linux/device.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/mfd/syscon.h>
@@ -47,7 +48,7 @@ struct wpcm450_pinctrl;
struct wpcm450_bank;
struct wpcm450_gpio {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
struct wpcm450_pinctrl *pctrl;
const struct wpcm450_bank *bank;
};
@@ -184,11 +185,12 @@ static void wpcm450_gpio_irq_unmask(struct irq_data *d)
}
/*
- * This is an implementation of the gpio_chip->get() function, for use in
- * wpcm450_gpio_fix_evpol. Unfortunately, we can't use the bgpio-provided
- * implementation there, because it would require taking gpio_chip->bgpio_lock,
- * which is a spin lock, but wpcm450_gpio_fix_evpol must work in contexts where
- * a raw spin lock is held.
+ * FIXME: This is an implementation of the gpio_chip->get() function, for use
+ * in wpcm450_gpio_fix_evpol(). It was implemented back when gpio-mmio used a
+ * regular spinlock internally, while wpcm450_gpio_fix_evpol() needed to work
+ * in contexts with a raw spinlock held. Since then, the gpio generic chip has
+ * been switched to using a raw spinlock so this should be converted to using
+ * the locking interfaces provided in linux/gpio/gneneric.h.
*/
static int wpcm450_gpio_get(struct wpcm450_gpio *gpio, int offset)
{
@@ -329,7 +331,7 @@ static void wpcm450_gpio_irqhandler(struct irq_desc *desc)
for_each_set_bit(bit, &pending, 32) {
int offset = wpcm450_irq_bitnum_to_gpio(gpio, bit);
- generic_handle_domain_irq(gpio->gc.irq.domain, offset);
+ generic_handle_domain_irq(gpio->chip.gc.irq.domain, offset);
}
chained_irq_exit(chip, desc);
}
@@ -1012,7 +1014,7 @@ static int wpcm450_gpio_add_pin_ranges(struct gpio_chip *chip)
struct wpcm450_gpio *gpio = gpiochip_get_data(chip);
const struct wpcm450_bank *bank = gpio->bank;
- return gpiochip_add_pin_range(&gpio->gc, dev_name(gpio->pctrl->dev),
+ return gpiochip_add_pin_range(&gpio->chip.gc, dev_name(gpio->pctrl->dev),
0, bank->base, bank->length);
}
@@ -1029,6 +1031,7 @@ static int wpcm450_gpio_register(struct platform_device *pdev,
"Resource fail for GPIO controller\n");
for_each_gpiochip_node(dev, child) {
+ struct gpio_generic_chip_config config;
void __iomem *dat = NULL;
void __iomem *set = NULL;
void __iomem *dirout = NULL;
@@ -1058,19 +1061,28 @@ static int wpcm450_gpio_register(struct platform_device *pdev,
set = pctrl->gpio_base + bank->dataout;
dirout = pctrl->gpio_base + bank->cfg0;
} else {
- flags = BGPIOF_NO_OUTPUT;
+ flags = GPIO_GENERIC_NO_OUTPUT;
}
- ret = bgpio_init(&gpio->gc, dev, 4,
- dat, set, NULL, dirout, NULL, flags);
+
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = dat,
+ .set = set,
+ .dirout = dirout,
+ .flags = flags,
+ };
+
+ ret = gpio_generic_chip_init(&gpio->chip, &config);
if (ret < 0)
return dev_err_probe(dev, ret, "GPIO initialization failed\n");
- gpio->gc.ngpio = bank->length;
- gpio->gc.set_config = wpcm450_gpio_set_config;
- gpio->gc.fwnode = child;
- gpio->gc.add_pin_ranges = wpcm450_gpio_add_pin_ranges;
+ gpio->chip.gc.ngpio = bank->length;
+ gpio->chip.gc.set_config = wpcm450_gpio_set_config;
+ gpio->chip.gc.fwnode = child;
+ gpio->chip.gc.add_pin_ranges = wpcm450_gpio_add_pin_ranges;
- girq = &gpio->gc.irq;
+ girq = &gpio->chip.gc.irq;
gpio_irq_chip_set_chip(girq, &wpcm450_gpio_irqchip);
girq->parent_handler = wpcm450_gpio_irqhandler;
girq->parents = devm_kcalloc(dev, WPCM450_NUM_GPIO_IRQS,
@@ -1094,7 +1106,7 @@ static int wpcm450_gpio_register(struct platform_device *pdev,
girq->num_parents++;
}
- ret = devm_gpiochip_add_data(dev, &gpio->gc, gpio);
+ ret = devm_gpiochip_add_data(dev, &gpio->chip.gc, gpio);
if (ret)
return dev_err_probe(dev, ret, "Failed to add GPIO chip\n");
}
diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c
index d67838afb085..5de6ff62c69b 100644
--- a/drivers/pinctrl/pinconf-generic.c
+++ b/drivers/pinctrl/pinconf-generic.c
@@ -48,7 +48,7 @@ static const struct pin_config_item conf_items[] = {
PCONFDUMP(PIN_CONFIG_INPUT_SCHMITT_ENABLE, "input schmitt enabled", NULL, false),
PCONFDUMP(PIN_CONFIG_MODE_LOW_POWER, "pin low power", "mode", true),
PCONFDUMP(PIN_CONFIG_OUTPUT_ENABLE, "output enabled", NULL, false),
- PCONFDUMP(PIN_CONFIG_OUTPUT, "pin output", "level", true),
+ PCONFDUMP(PIN_CONFIG_LEVEL, "pin output", "level", true),
PCONFDUMP(PIN_CONFIG_OUTPUT_IMPEDANCE_OHMS, "output impedance", "ohms", true),
PCONFDUMP(PIN_CONFIG_POWER_SOURCE, "pin power source", "selector", true),
PCONFDUMP(PIN_CONFIG_SLEEP_HARDWARE_STATE, "sleep hardware state", NULL, false),
@@ -183,9 +183,9 @@ static const struct pinconf_generic_params dt_params[] = {
{ "low-power-enable", PIN_CONFIG_MODE_LOW_POWER, 1 },
{ "output-disable", PIN_CONFIG_OUTPUT_ENABLE, 0 },
{ "output-enable", PIN_CONFIG_OUTPUT_ENABLE, 1 },
- { "output-high", PIN_CONFIG_OUTPUT, 1, },
+ { "output-high", PIN_CONFIG_LEVEL, 1, },
{ "output-impedance-ohms", PIN_CONFIG_OUTPUT_IMPEDANCE_OHMS, 0 },
- { "output-low", PIN_CONFIG_OUTPUT, 0, },
+ { "output-low", PIN_CONFIG_LEVEL, 0, },
{ "power-source", PIN_CONFIG_POWER_SOURCE, 0 },
{ "sleep-hardware-state", PIN_CONFIG_SLEEP_HARDWARE_STATE, 0 },
{ "slew-rate", PIN_CONFIG_SLEW_RATE, 0 },
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index b90ef3a26ae8..2dac5c71eb00 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -383,14 +383,15 @@ static void amd_gpio_irq_enable(struct irq_data *d)
unsigned long flags;
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
- gpiochip_enable_irq(gc, d->hwirq);
+ gpiochip_enable_irq(gc, hwirq);
raw_spin_lock_irqsave(&gpio_dev->lock, flags);
- pin_reg = readl(gpio_dev->base + (d->hwirq)*4);
+ pin_reg = readl(gpio_dev->base + hwirq * 4);
pin_reg |= BIT(INTERRUPT_ENABLE_OFF);
pin_reg |= BIT(INTERRUPT_MASK_OFF);
- writel(pin_reg, gpio_dev->base + (d->hwirq)*4);
+ writel(pin_reg, gpio_dev->base + hwirq * 4);
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
}
@@ -400,15 +401,16 @@ static void amd_gpio_irq_disable(struct irq_data *d)
unsigned long flags;
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
raw_spin_lock_irqsave(&gpio_dev->lock, flags);
- pin_reg = readl(gpio_dev->base + (d->hwirq)*4);
+ pin_reg = readl(gpio_dev->base + hwirq * 4);
pin_reg &= ~BIT(INTERRUPT_ENABLE_OFF);
pin_reg &= ~BIT(INTERRUPT_MASK_OFF);
- writel(pin_reg, gpio_dev->base + (d->hwirq)*4);
+ writel(pin_reg, gpio_dev->base + hwirq * 4);
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
- gpiochip_disable_irq(gc, d->hwirq);
+ gpiochip_disable_irq(gc, hwirq);
}
static void amd_gpio_irq_mask(struct irq_data *d)
@@ -417,11 +419,12 @@ static void amd_gpio_irq_mask(struct irq_data *d)
unsigned long flags;
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
raw_spin_lock_irqsave(&gpio_dev->lock, flags);
- pin_reg = readl(gpio_dev->base + (d->hwirq)*4);
+ pin_reg = readl(gpio_dev->base + hwirq * 4);
pin_reg &= ~BIT(INTERRUPT_MASK_OFF);
- writel(pin_reg, gpio_dev->base + (d->hwirq)*4);
+ writel(pin_reg, gpio_dev->base + hwirq * 4);
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
}
@@ -431,11 +434,12 @@ static void amd_gpio_irq_unmask(struct irq_data *d)
unsigned long flags;
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
raw_spin_lock_irqsave(&gpio_dev->lock, flags);
- pin_reg = readl(gpio_dev->base + (d->hwirq)*4);
+ pin_reg = readl(gpio_dev->base + hwirq * 4);
pin_reg |= BIT(INTERRUPT_MASK_OFF);
- writel(pin_reg, gpio_dev->base + (d->hwirq)*4);
+ writel(pin_reg, gpio_dev->base + hwirq * 4);
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
}
@@ -446,17 +450,21 @@ static int amd_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
u32 wake_mask = BIT(WAKE_CNTRL_OFF_S0I3) | BIT(WAKE_CNTRL_OFF_S3);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
int err;
+ pm_pr_dbg("Setting wake for GPIO %lu to %s\n",
+ hwirq, str_enable_disable(on));
+
raw_spin_lock_irqsave(&gpio_dev->lock, flags);
- pin_reg = readl(gpio_dev->base + (d->hwirq)*4);
+ pin_reg = readl(gpio_dev->base + hwirq * 4);
if (on)
pin_reg |= wake_mask;
else
pin_reg &= ~wake_mask;
- writel(pin_reg, gpio_dev->base + (d->hwirq)*4);
+ writel(pin_reg, gpio_dev->base + hwirq * 4);
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
if (on)
@@ -492,9 +500,10 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
unsigned long flags;
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
raw_spin_lock_irqsave(&gpio_dev->lock, flags);
- pin_reg = readl(gpio_dev->base + (d->hwirq)*4);
+ pin_reg = readl(gpio_dev->base + hwirq * 4);
switch (type & IRQ_TYPE_SENSE_MASK) {
case IRQ_TYPE_EDGE_RISING:
@@ -560,10 +569,10 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
pin_reg_irq_en = pin_reg;
pin_reg_irq_en |= mask;
pin_reg_irq_en &= ~BIT(INTERRUPT_MASK_OFF);
- writel(pin_reg_irq_en, gpio_dev->base + (d->hwirq)*4);
- while ((readl(gpio_dev->base + (d->hwirq)*4) & mask) != mask)
+ writel(pin_reg_irq_en, gpio_dev->base + hwirq * 4);
+ while ((readl(gpio_dev->base + hwirq * 4) & mask) != mask)
continue;
- writel(pin_reg, gpio_dev->base + (d->hwirq)*4);
+ writel(pin_reg, gpio_dev->base + hwirq * 4);
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
return ret;
@@ -1187,7 +1196,7 @@ static int amd_gpio_probe(struct platform_device *pdev)
gpio_dev->gc.direction_input = amd_gpio_direction_input;
gpio_dev->gc.direction_output = amd_gpio_direction_output;
gpio_dev->gc.get = amd_gpio_get_value;
- gpio_dev->gc.set_rv = amd_gpio_set_value;
+ gpio_dev->gc.set = amd_gpio_set_value;
gpio_dev->gc.set_config = amd_gpio_set_config;
gpio_dev->gc.dbg_show = amd_gpio_dbg_show;
diff --git a/drivers/pinctrl/pinctrl-amdisp.c b/drivers/pinctrl/pinctrl-amdisp.c
index 2e706bf8bcde..efbf40c776ea 100644
--- a/drivers/pinctrl/pinctrl-amdisp.c
+++ b/drivers/pinctrl/pinctrl-amdisp.c
@@ -151,7 +151,7 @@ static int amdisp_gpiochip_add(struct platform_device *pdev,
gc->direction_input = amdisp_gpio_direction_input;
gc->direction_output = amdisp_gpio_direction_output;
gc->get = amdisp_gpio_get;
- gc->set_rv = amdisp_gpio_set;
+ gc->set = amdisp_gpio_set;
gc->base = -1;
gc->ngpio = ARRAY_SIZE(amdisp_range_pins);
diff --git a/drivers/pinctrl/pinctrl-apple-gpio.c b/drivers/pinctrl/pinctrl-apple-gpio.c
index dcf3a921b4df..e1a7bc8cf765 100644
--- a/drivers/pinctrl/pinctrl-apple-gpio.c
+++ b/drivers/pinctrl/pinctrl-apple-gpio.c
@@ -378,7 +378,7 @@ static int apple_gpio_register(struct apple_gpio_pinctrl *pctl)
pctl->gpio_chip.direction_input = apple_gpio_direction_input;
pctl->gpio_chip.direction_output = apple_gpio_direction_output;
pctl->gpio_chip.get = apple_gpio_get;
- pctl->gpio_chip.set_rv = apple_gpio_set;
+ pctl->gpio_chip.set = apple_gpio_set;
pctl->gpio_chip.base = -1;
pctl->gpio_chip.ngpio = pctl->pinctrl_desc.npins;
pctl->gpio_chip.parent = pctl->dev;
@@ -515,6 +515,7 @@ static int apple_gpio_pinctrl_probe(struct platform_device *pdev)
}
static const struct of_device_id apple_gpio_pinctrl_of_match[] = {
+ { .compatible = "apple,t8103-pinctrl", },
{ .compatible = "apple,pinctrl", },
{ }
};
diff --git a/drivers/pinctrl/pinctrl-as3722.c b/drivers/pinctrl/pinctrl-as3722.c
index 30ed758bbe9d..e713dea98aa8 100644
--- a/drivers/pinctrl/pinctrl-as3722.c
+++ b/drivers/pinctrl/pinctrl-as3722.c
@@ -529,7 +529,7 @@ static const struct gpio_chip as3722_gpio_chip = {
.request = gpiochip_generic_request,
.free = gpiochip_generic_free,
.get = as3722_gpio_get,
- .set_rv = as3722_gpio_set,
+ .set = as3722_gpio_set,
.direction_input = pinctrl_gpio_direction_input,
.direction_output = as3722_gpio_direction_output,
.to_irq = as3722_gpio_to_irq,
diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
index 57f105ac962d..ec5351fc282e 100644
--- a/drivers/pinctrl/pinctrl-at91-pio4.c
+++ b/drivers/pinctrl/pinctrl-at91-pio4.c
@@ -442,8 +442,8 @@ static struct gpio_chip atmel_gpio_chip = {
.get = atmel_gpio_get,
.get_multiple = atmel_gpio_get_multiple,
.direction_output = atmel_gpio_direction_output,
- .set_rv = atmel_gpio_set,
- .set_multiple_rv = atmel_gpio_set_multiple,
+ .set = atmel_gpio_set,
+ .set_multiple = atmel_gpio_set_multiple,
.to_irq = atmel_gpio_to_irq,
.base = 0,
};
@@ -862,7 +862,7 @@ static int atmel_conf_pin_config_group_set(struct pinctrl_dev *pctldev,
conf |= ATMEL_PIO_IFSCEN_MASK;
}
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
conf |= ATMEL_PIO_DIR_MASK;
bank = ATMEL_PIO_BANK(pin_id);
pin = ATMEL_PIO_LINE(pin_id);
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index 6c2727bd55bc..0a57ed51d4c9 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -1801,8 +1801,8 @@ static const struct gpio_chip at91_gpio_template = {
.direction_input = at91_gpio_direction_input,
.get = at91_gpio_get,
.direction_output = at91_gpio_direction_output,
- .set_rv = at91_gpio_set,
- .set_multiple_rv = at91_gpio_set_multiple,
+ .set = at91_gpio_set,
+ .set_multiple = at91_gpio_set_multiple,
.dbg_show = at91_gpio_dbg_show,
.can_sleep = false,
.ngpio = MAX_NB_GPIO_PER_BANK,
diff --git a/drivers/pinctrl/pinctrl-aw9523.c b/drivers/pinctrl/pinctrl-aw9523.c
index 9570ef346af6..479553a79216 100644
--- a/drivers/pinctrl/pinctrl-aw9523.c
+++ b/drivers/pinctrl/pinctrl-aw9523.c
@@ -215,7 +215,7 @@ static int aw9523_pcfg_param_to_reg(enum pin_config_param pcp, int pin, u8 *r)
case PIN_CONFIG_OUTPUT_ENABLE:
reg = AW9523_REG_CONF_STATE(pin);
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
reg = AW9523_REG_OUT_STATE(pin);
break;
default:
@@ -249,7 +249,7 @@ static int aw9523_pconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
switch (param) {
case PIN_CONFIG_BIAS_PULL_UP:
case PIN_CONFIG_INPUT_ENABLE:
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
val &= BIT(regbit);
break;
case PIN_CONFIG_BIAS_PULL_DOWN:
@@ -301,7 +301,7 @@ static int aw9523_pconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
goto end;
switch (param) {
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
/* First, enable pin output */
rc = regmap_update_bits(awi->regmap,
AW9523_REG_CONF_STATE(pin),
@@ -785,8 +785,8 @@ static int aw9523_init_gpiochip(struct aw9523 *awi, unsigned int npins)
gc->direction_output = aw9523_direction_output;
gc->get = aw9523_gpio_get;
gc->get_multiple = aw9523_gpio_get_multiple;
- gc->set_rv = aw9523_gpio_set;
- gc->set_multiple_rv = aw9523_gpio_set_multiple;
+ gc->set = aw9523_gpio_set;
+ gc->set_multiple = aw9523_gpio_set_multiple;
gc->set_config = gpiochip_generic_config;
gc->parent = dev;
gc->owner = THIS_MODULE;
diff --git a/drivers/pinctrl/pinctrl-axp209.c b/drivers/pinctrl/pinctrl-axp209.c
index fff408b60c4a..2bd8487484a8 100644
--- a/drivers/pinctrl/pinctrl-axp209.c
+++ b/drivers/pinctrl/pinctrl-axp209.c
@@ -192,7 +192,7 @@ static int axp20x_gpio_get_direction(struct gpio_chip *chip,
static int axp20x_gpio_output(struct gpio_chip *chip, unsigned int offset,
int value)
{
- return chip->set_rv(chip, offset, value);
+ return chip->set(chip, offset, value);
}
static int axp20x_gpio_set(struct gpio_chip *chip, unsigned int offset,
@@ -463,7 +463,7 @@ static int axp20x_pctl_probe(struct platform_device *pdev)
pctl->chip.owner = THIS_MODULE;
pctl->chip.get = axp20x_gpio_get;
pctl->chip.get_direction = axp20x_gpio_get_direction;
- pctl->chip.set_rv = axp20x_gpio_set;
+ pctl->chip.set = axp20x_gpio_set;
pctl->chip.direction_input = pinctrl_gpio_direction_input;
pctl->chip.direction_output = axp20x_gpio_output;
diff --git a/drivers/pinctrl/pinctrl-cy8c95x0.c b/drivers/pinctrl/pinctrl-cy8c95x0.c
index 8a2fd632bdd4..a4b04bf6d081 100644
--- a/drivers/pinctrl/pinctrl-cy8c95x0.c
+++ b/drivers/pinctrl/pinctrl-cy8c95x0.c
@@ -808,7 +808,7 @@ static int cy8c95x0_gpio_get_pincfg(struct cy8c95x0_pinctrl *chip,
case PIN_CONFIG_MODE_PWM:
reg = CY8C95X0_SELPWM;
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
reg = CY8C95X0_OUTPUT;
break;
case PIN_CONFIG_OUTPUT_ENABLE:
@@ -939,10 +939,10 @@ static int cy8c95x0_setup_gpiochip(struct cy8c95x0_pinctrl *chip)
gc->direction_input = cy8c95x0_gpio_direction_input;
gc->direction_output = cy8c95x0_gpio_direction_output;
gc->get = cy8c95x0_gpio_get_value;
- gc->set_rv = cy8c95x0_gpio_set_value;
+ gc->set = cy8c95x0_gpio_set_value;
gc->get_direction = cy8c95x0_gpio_get_direction;
gc->get_multiple = cy8c95x0_gpio_get_multiple;
- gc->set_multiple_rv = cy8c95x0_gpio_set_multiple;
+ gc->set_multiple = cy8c95x0_gpio_set_multiple;
gc->set_config = gpiochip_generic_config;
gc->can_sleep = true;
gc->add_pin_ranges = cy8c95x0_add_pin_ranges;
diff --git a/drivers/pinctrl/pinctrl-da9062.c b/drivers/pinctrl/pinctrl-da9062.c
index 3295b09dfc3d..53298cbcc5cf 100644
--- a/drivers/pinctrl/pinctrl-da9062.c
+++ b/drivers/pinctrl/pinctrl-da9062.c
@@ -233,7 +233,7 @@ static int da9062_gpio_to_irq(struct gpio_chip *gc, unsigned int offset)
static const struct gpio_chip reference_gc = {
.owner = THIS_MODULE,
.get = da9062_gpio_get,
- .set_rv = da9062_gpio_set,
+ .set = da9062_gpio_set,
.get_direction = da9062_gpio_get_direction,
.direction_input = da9062_gpio_direction_input,
.direction_output = da9062_gpio_direction_output,
diff --git a/drivers/pinctrl/pinctrl-digicolor.c b/drivers/pinctrl/pinctrl-digicolor.c
index 1676cb3cc4c9..2e16f09aeb47 100644
--- a/drivers/pinctrl/pinctrl-digicolor.c
+++ b/drivers/pinctrl/pinctrl-digicolor.c
@@ -248,7 +248,7 @@ static int dc_gpiochip_add(struct dc_pinmap *pmap)
chip->direction_input = dc_gpio_direction_input;
chip->direction_output = dc_gpio_direction_output;
chip->get = dc_gpio_get;
- chip->set_rv = dc_gpio_set;
+ chip->set = dc_gpio_set;
chip->base = -1;
chip->ngpio = PINS_COUNT;
diff --git a/drivers/pinctrl/pinctrl-eic7700.c b/drivers/pinctrl/pinctrl-eic7700.c
index 4874b5532343..ffcd0ec5c2dc 100644
--- a/drivers/pinctrl/pinctrl-eic7700.c
+++ b/drivers/pinctrl/pinctrl-eic7700.c
@@ -634,7 +634,7 @@ static int eic7700_pinctrl_probe(struct platform_device *pdev)
return PTR_ERR(pc->base);
regulator = devm_regulator_get(dev, "vrgmii");
- if (IS_ERR_OR_NULL(regulator)) {
+ if (IS_ERR(regulator)) {
return dev_err_probe(dev, PTR_ERR(regulator),
"failed to get vrgmii regulator\n");
}
diff --git a/drivers/pinctrl/pinctrl-equilibrium.c b/drivers/pinctrl/pinctrl-equilibrium.c
index fce804d42e7d..2d04829b29c9 100644
--- a/drivers/pinctrl/pinctrl-equilibrium.c
+++ b/drivers/pinctrl/pinctrl-equilibrium.c
@@ -2,6 +2,7 @@
/* Copyright (C) 2019 Intel Corporation */
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -179,7 +180,7 @@ static int gpiochip_setup(struct device *dev, struct eqbr_gpio_ctrl *gctrl)
struct gpio_irq_chip *girq;
struct gpio_chip *gc;
- gc = &gctrl->chip;
+ gc = &gctrl->chip.gc;
gc->label = gctrl->name;
gc->fwnode = gctrl->fwnode;
gc->request = gpiochip_generic_request;
@@ -191,7 +192,7 @@ static int gpiochip_setup(struct device *dev, struct eqbr_gpio_ctrl *gctrl)
return 0;
}
- girq = &gctrl->chip.irq;
+ girq = &gctrl->chip.gc.irq;
gpio_irq_chip_set_chip(girq, &eqbr_irq_chip);
girq->parent_handler = eqbr_irq_handler;
girq->num_parents = 1;
@@ -208,6 +209,7 @@ static int gpiochip_setup(struct device *dev, struct eqbr_gpio_ctrl *gctrl)
static int gpiolib_reg(struct eqbr_pinctrl_drv_data *drvdata)
{
+ struct gpio_generic_chip_config config;
struct device *dev = drvdata->dev;
struct eqbr_gpio_ctrl *gctrl;
struct device_node *np;
@@ -239,12 +241,16 @@ static int gpiolib_reg(struct eqbr_pinctrl_drv_data *drvdata)
}
raw_spin_lock_init(&gctrl->lock);
- ret = bgpio_init(&gctrl->chip, dev, gctrl->bank->nr_pins / 8,
- gctrl->membase + GPIO_IN,
- gctrl->membase + GPIO_OUTSET,
- gctrl->membase + GPIO_OUTCLR,
- gctrl->membase + GPIO_DIR,
- NULL, 0);
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = gctrl->bank->nr_pins / 8,
+ .dat = gctrl->membase + GPIO_IN,
+ .set = gctrl->membase + GPIO_OUTSET,
+ .clr = gctrl->membase + GPIO_OUTCLR,
+ .dirout = gctrl->membase + GPIO_DIR,
+ };
+
+ ret = gpio_generic_chip_init(&gctrl->chip, &config);
if (ret) {
dev_err(dev, "unable to init generic GPIO\n");
return ret;
@@ -254,7 +260,7 @@ static int gpiolib_reg(struct eqbr_pinctrl_drv_data *drvdata)
if (ret)
return ret;
- ret = devm_gpiochip_add_data(dev, &gctrl->chip, gctrl);
+ ret = devm_gpiochip_add_data(dev, &gctrl->chip.gc, gctrl);
if (ret)
return ret;
}
@@ -319,7 +325,7 @@ static int eqbr_pinmux_set_mux(struct pinctrl_dev *pctldev,
unsigned int selector, unsigned int group)
{
struct eqbr_pinctrl_drv_data *pctl = pinctrl_dev_get_drvdata(pctldev);
- struct function_desc *func;
+ const struct function_desc *func;
struct group_desc *grp;
unsigned int *pinmux;
int i;
@@ -439,7 +445,7 @@ static int eqbr_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
}
raw_spin_unlock_irqrestore(&pctl->lock, flags);
*config = pinconf_to_config_packed(param, val);
-;
+
return 0;
}
@@ -499,7 +505,7 @@ static int eqbr_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
bank->pin_base, pin);
return -ENODEV;
}
- gc = &gctrl->chip;
+ gc = &gctrl->chip.gc;
gc->direction_output(gc, offset, 0);
continue;
default:
diff --git a/drivers/pinctrl/pinctrl-equilibrium.h b/drivers/pinctrl/pinctrl-equilibrium.h
index b4d149bde39d..b56124d7fe91 100644
--- a/drivers/pinctrl/pinctrl-equilibrium.h
+++ b/drivers/pinctrl/pinctrl-equilibrium.h
@@ -96,7 +96,7 @@ struct fwnode_handle;
* @lock: spin lock to protect gpio register write.
*/
struct eqbr_gpio_ctrl {
- struct gpio_chip chip;
+ struct gpio_generic_chip chip;
struct fwnode_handle *fwnode;
struct eqbr_pin_bank *bank;
void __iomem *membase;
diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
index 79119cf20efc..c7f14546de05 100644
--- a/drivers/pinctrl/pinctrl-ingenic.c
+++ b/drivers/pinctrl/pinctrl-ingenic.c
@@ -96,11 +96,8 @@
.data = (void *)func, \
}
-#define INGENIC_PIN_FUNCTION(_name_, id) \
- { \
- .func = PINCTRL_PINFUNCTION(_name_, id##_groups, ARRAY_SIZE(id##_groups)), \
- .data = NULL, \
- }
+#define INGENIC_PIN_FUNCTION(_name_, id) \
+ PINCTRL_PINFUNCTION(_name_, id##_groups, ARRAY_SIZE(id##_groups))
enum jz_version {
ID_JZ4730,
@@ -128,7 +125,7 @@ struct ingenic_chip_info {
const struct group_desc *groups;
unsigned int num_groups;
- const struct function_desc *functions;
+ const struct pinfunction *functions;
unsigned int num_functions;
const u32 *pull_ups, *pull_downs;
@@ -263,7 +260,7 @@ static const char *jz4730_pwm1_groups[] = { "pwm1", };
static const char *jz4730_mii_groups[] = { "mii", };
static const char *jz4730_i2s_groups[] = { "i2s-data", "i2s-master", "i2s-slave", };
-static const struct function_desc jz4730_functions[] = {
+static const struct pinfunction jz4730_functions[] = {
INGENIC_PIN_FUNCTION("mmc", jz4730_mmc),
INGENIC_PIN_FUNCTION("uart0", jz4730_uart0),
INGENIC_PIN_FUNCTION("uart1", jz4730_uart1),
@@ -370,7 +367,7 @@ static const char *jz4740_pwm5_groups[] = { "pwm5", };
static const char *jz4740_pwm6_groups[] = { "pwm6", };
static const char *jz4740_pwm7_groups[] = { "pwm7", };
-static const struct function_desc jz4740_functions[] = {
+static const struct pinfunction jz4740_functions[] = {
INGENIC_PIN_FUNCTION("mmc", jz4740_mmc),
INGENIC_PIN_FUNCTION("uart0", jz4740_uart0),
INGENIC_PIN_FUNCTION("uart1", jz4740_uart1),
@@ -474,7 +471,7 @@ static const char *jz4725b_pwm3_groups[] = { "pwm3", };
static const char *jz4725b_pwm4_groups[] = { "pwm4", };
static const char *jz4725b_pwm5_groups[] = { "pwm5", };
-static const struct function_desc jz4725b_functions[] = {
+static const struct pinfunction jz4725b_functions[] = {
INGENIC_PIN_FUNCTION("mmc0", jz4725b_mmc0),
INGENIC_PIN_FUNCTION("mmc1", jz4725b_mmc1),
INGENIC_PIN_FUNCTION("uart", jz4725b_uart),
@@ -606,7 +603,7 @@ static const char *jz4750_pwm3_groups[] = { "pwm3", };
static const char *jz4750_pwm4_groups[] = { "pwm4", };
static const char *jz4750_pwm5_groups[] = { "pwm5", };
-static const struct function_desc jz4750_functions[] = {
+static const struct pinfunction jz4750_functions[] = {
INGENIC_PIN_FUNCTION("uart0", jz4750_uart0),
INGENIC_PIN_FUNCTION("uart1", jz4750_uart1),
INGENIC_PIN_FUNCTION("uart2", jz4750_uart2),
@@ -771,7 +768,7 @@ static const char *jz4755_pwm3_groups[] = { "pwm3", };
static const char *jz4755_pwm4_groups[] = { "pwm4", };
static const char *jz4755_pwm5_groups[] = { "pwm5", };
-static const struct function_desc jz4755_functions[] = {
+static const struct pinfunction jz4755_functions[] = {
INGENIC_PIN_FUNCTION("uart0", jz4755_uart0),
INGENIC_PIN_FUNCTION("uart1", jz4755_uart1),
INGENIC_PIN_FUNCTION("uart2", jz4755_uart2),
@@ -1106,7 +1103,7 @@ static const char *jz4760_pwm6_groups[] = { "pwm6", };
static const char *jz4760_pwm7_groups[] = { "pwm7", };
static const char *jz4760_otg_groups[] = { "otg-vbus", };
-static const struct function_desc jz4760_functions[] = {
+static const struct pinfunction jz4760_functions[] = {
INGENIC_PIN_FUNCTION("uart0", jz4760_uart0),
INGENIC_PIN_FUNCTION("uart1", jz4760_uart1),
INGENIC_PIN_FUNCTION("uart2", jz4760_uart2),
@@ -1444,7 +1441,7 @@ static const char *jz4770_pwm6_groups[] = { "pwm6", };
static const char *jz4770_pwm7_groups[] = { "pwm7", };
static const char *jz4770_mac_groups[] = { "mac-rmii", "mac-mii", };
-static const struct function_desc jz4770_functions[] = {
+static const struct pinfunction jz4770_functions[] = {
INGENIC_PIN_FUNCTION("uart0", jz4770_uart0),
INGENIC_PIN_FUNCTION("uart1", jz4770_uart1),
INGENIC_PIN_FUNCTION("uart2", jz4770_uart2),
@@ -1723,7 +1720,7 @@ static const char *jz4775_mac_groups[] = {
};
static const char *jz4775_otg_groups[] = { "otg-vbus", };
-static const struct function_desc jz4775_functions[] = {
+static const struct pinfunction jz4775_functions[] = {
INGENIC_PIN_FUNCTION("uart0", jz4775_uart0),
INGENIC_PIN_FUNCTION("uart1", jz4775_uart1),
INGENIC_PIN_FUNCTION("uart2", jz4775_uart2),
@@ -1976,7 +1973,7 @@ static const char *jz4780_dmic_groups[] = { "dmic", };
static const char *jz4780_cim_groups[] = { "cim-data", };
static const char *jz4780_hdmi_ddc_groups[] = { "hdmi-ddc", };
-static const struct function_desc jz4780_functions[] = {
+static const struct pinfunction jz4780_functions[] = {
INGENIC_PIN_FUNCTION("uart0", jz4770_uart0),
INGENIC_PIN_FUNCTION("uart1", jz4770_uart1),
INGENIC_PIN_FUNCTION("uart2", jz4780_uart2),
@@ -2211,7 +2208,7 @@ static const char *x1000_pwm3_groups[] = { "pwm3", };
static const char *x1000_pwm4_groups[] = { "pwm4", };
static const char *x1000_mac_groups[] = { "mac", };
-static const struct function_desc x1000_functions[] = {
+static const struct pinfunction x1000_functions[] = {
INGENIC_PIN_FUNCTION("uart0", x1000_uart0),
INGENIC_PIN_FUNCTION("uart1", x1000_uart1),
INGENIC_PIN_FUNCTION("uart2", x1000_uart2),
@@ -2341,7 +2338,7 @@ static const char *x1500_pwm2_groups[] = { "pwm2", };
static const char *x1500_pwm3_groups[] = { "pwm3", };
static const char *x1500_pwm4_groups[] = { "pwm4", };
-static const struct function_desc x1500_functions[] = {
+static const struct pinfunction x1500_functions[] = {
INGENIC_PIN_FUNCTION("uart0", x1500_uart0),
INGENIC_PIN_FUNCTION("uart1", x1500_uart1),
INGENIC_PIN_FUNCTION("uart2", x1500_uart2),
@@ -2562,7 +2559,7 @@ static const char * const x1600_pwm7_groups[] = { "pwm7-b10", "pwm7-b21", };
static const char * const x1600_mac_groups[] = { "mac", };
-static const struct function_desc x1600_functions[] = {
+static const struct pinfunction x1600_functions[] = {
INGENIC_PIN_FUNCTION("uart0", x1600_uart0),
INGENIC_PIN_FUNCTION("uart1", x1600_uart1),
INGENIC_PIN_FUNCTION("uart2", x1600_uart2),
@@ -2779,7 +2776,7 @@ static const char *x1830_pwm6_groups[] = { "pwm6-c-17", "pwm6-c-27", };
static const char *x1830_pwm7_groups[] = { "pwm7-c-18", "pwm7-c-28", };
static const char *x1830_mac_groups[] = { "mac", };
-static const struct function_desc x1830_functions[] = {
+static const struct pinfunction x1830_functions[] = {
INGENIC_PIN_FUNCTION("uart0", x1830_uart0),
INGENIC_PIN_FUNCTION("uart1", x1830_uart1),
INGENIC_PIN_FUNCTION("sfc", x1830_sfc),
@@ -3225,7 +3222,7 @@ static const char *x2000_mac0_groups[] = { "mac0-rmii", "mac0-rgmii", };
static const char *x2000_mac1_groups[] = { "mac1-rmii", "mac1-rgmii", };
static const char *x2000_otg_groups[] = { "otg-vbus", };
-static const struct function_desc x2000_functions[] = {
+static const struct pinfunction x2000_functions[] = {
INGENIC_PIN_FUNCTION("uart0", x2000_uart0),
INGENIC_PIN_FUNCTION("uart1", x2000_uart1),
INGENIC_PIN_FUNCTION("uart2", x2000_uart2),
@@ -3449,7 +3446,7 @@ static const struct group_desc x2100_groups[] = {
static const char *x2100_mac_groups[] = { "mac", };
-static const struct function_desc x2100_functions[] = {
+static const struct pinfunction x2100_functions[] = {
INGENIC_PIN_FUNCTION("uart0", x2000_uart0),
INGENIC_PIN_FUNCTION("uart1", x2000_uart1),
INGENIC_PIN_FUNCTION("uart2", x2000_uart2),
@@ -4003,7 +4000,7 @@ static int ingenic_pinmux_set_mux(struct pinctrl_dev *pctldev,
unsigned int selector, unsigned int group)
{
struct ingenic_pinctrl *jzpc = pinctrl_dev_get_drvdata(pctldev);
- struct function_desc *func;
+ const struct function_desc *func;
struct group_desc *grp;
unsigned int i;
uintptr_t mode;
@@ -4018,7 +4015,7 @@ static int ingenic_pinmux_set_mux(struct pinctrl_dev *pctldev,
return -EINVAL;
dev_dbg(pctldev->dev, "enable function %s group %s\n",
- func->func.name, grp->grp.name);
+ func->func->name, grp->grp.name);
mode = (uintptr_t)grp->data;
if (mode <= 3) {
@@ -4267,7 +4264,7 @@ static int ingenic_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
case PIN_CONFIG_BIAS_PULL_UP:
case PIN_CONFIG_BIAS_PULL_DOWN:
case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
case PIN_CONFIG_SLEW_RATE:
continue;
default:
@@ -4308,7 +4305,7 @@ static int ingenic_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
ingenic_set_schmitt_trigger(jzpc, pin, arg);
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
ret = pinctrl_gpio_direction_output(jzpc->gc,
pin - jzpc->gc->base);
if (ret)
@@ -4451,7 +4448,7 @@ static int __init ingenic_gpio_probe(struct ingenic_pinctrl *jzpc,
jzgc->gc.fwnode = fwnode;
jzgc->gc.owner = THIS_MODULE;
- jzgc->gc.set_rv = ingenic_gpio_set;
+ jzgc->gc.set = ingenic_gpio_set;
jzgc->gc.get = ingenic_gpio_get;
jzgc->gc.direction_input = pinctrl_gpio_direction_input;
jzgc->gc.direction_output = ingenic_gpio_direction_output;
@@ -4571,11 +4568,9 @@ static int __init ingenic_pinctrl_probe(struct platform_device *pdev)
}
for (i = 0; i < chip_info->num_functions; i++) {
- const struct function_desc *function = &chip_info->functions[i];
- const struct pinfunction *func = &function->func;
+ const struct pinfunction *func = &chip_info->functions[i];
- err = pinmux_generic_add_pinfunction(jzpc->pctl, func,
- function->data);
+ err = pinmux_generic_add_pinfunction(jzpc->pctl, func, NULL);
if (err < 0) {
dev_err(dev, "Failed to register function %s\n", func->name);
return err;
diff --git a/drivers/pinctrl/pinctrl-k210.c b/drivers/pinctrl/pinctrl-k210.c
index 66c04120c29d..ddd6d6bfd513 100644
--- a/drivers/pinctrl/pinctrl-k210.c
+++ b/drivers/pinctrl/pinctrl-k210.c
@@ -551,7 +551,7 @@ static int k210_pinconf_set_param(struct pinctrl_dev *pctldev,
else
val &= ~K210_PC_ST;
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
k210_pinmux_set_pin_function(pctldev, pin, K210_PCF_CONSTANT);
val = readl(&pdata->fpioa->pins[pin]);
val |= K210_PC_MODE_OUT;
diff --git a/drivers/pinctrl/pinctrl-keembay.c b/drivers/pinctrl/pinctrl-keembay.c
index 30e641571cfe..3241d3ae6219 100644
--- a/drivers/pinctrl/pinctrl-keembay.c
+++ b/drivers/pinctrl/pinctrl-keembay.c
@@ -135,6 +135,11 @@ struct keembay_pin_soc {
const struct pinctrl_pin_desc *pins;
};
+struct keembay_pinfunction {
+ struct pinfunction func;
+ u8 mux_mode;
+};
+
static const struct pinctrl_pin_desc keembay_pins[] = {
KEEMBAY_PIN_DESC(0, "GPIO0",
KEEMBAY_MUX(0x0, "I2S0_M0"),
@@ -930,7 +935,7 @@ static int keembay_set_mux(struct pinctrl_dev *pctldev, unsigned int fun_sel,
unsigned int grp_sel)
{
struct keembay_pinctrl *kpc = pinctrl_dev_get_drvdata(pctldev);
- struct function_desc *func;
+ const struct function_desc *func;
struct group_desc *grp;
unsigned int val;
u8 pin_mode;
@@ -1481,7 +1486,7 @@ static int keembay_gpiochip_probe(struct keembay_pinctrl *kpc,
gc->direction_input = keembay_gpio_set_direction_in;
gc->direction_output = keembay_gpio_set_direction_out;
gc->get = keembay_gpio_get;
- gc->set_rv = keembay_gpio_set;
+ gc->set = keembay_gpio_set;
gc->set_config = gpiochip_generic_config;
gc->base = -1;
gc->ngpio = kpc->npins;
@@ -1556,13 +1561,13 @@ static int keembay_pinctrl_reg(struct keembay_pinctrl *kpc, struct device *dev)
}
static int keembay_add_functions(struct keembay_pinctrl *kpc,
- struct function_desc *functions)
+ struct keembay_pinfunction *functions)
{
unsigned int i;
/* Assign the groups for each function */
for (i = 0; i < kpc->nfuncs; i++) {
- struct function_desc *func = &functions[i];
+ struct keembay_pinfunction *func = &functions[i];
const char **group_names;
unsigned int grp_idx = 0;
int j;
@@ -1588,14 +1593,14 @@ static int keembay_add_functions(struct keembay_pinctrl *kpc,
/* Add all functions */
for (i = 0; i < kpc->nfuncs; i++)
pinmux_generic_add_pinfunction(kpc->pctrl, &functions[i].func,
- functions[i].data);
+ &functions[i].mux_mode);
return 0;
}
static int keembay_build_functions(struct keembay_pinctrl *kpc)
{
- struct function_desc *keembay_funcs, *new_funcs;
+ struct keembay_pinfunction *keembay_funcs, *new_funcs;
int i;
/*
@@ -1603,7 +1608,8 @@ static int keembay_build_functions(struct keembay_pinctrl *kpc)
* being part of 8 (hw maximum) globally unique muxes.
*/
kpc->nfuncs = 0;
- keembay_funcs = kcalloc(kpc->npins * 8, sizeof(*keembay_funcs), GFP_KERNEL);
+ keembay_funcs = devm_kcalloc(kpc->dev, kpc->npins * 8,
+ sizeof(*keembay_funcs), GFP_KERNEL);
if (!keembay_funcs)
return -ENOMEM;
@@ -1613,7 +1619,7 @@ static int keembay_build_functions(struct keembay_pinctrl *kpc)
struct keembay_mux_desc *mux;
for (mux = pdesc->drv_data; mux->name; mux++) {
- struct function_desc *fdesc;
+ struct keembay_pinfunction *fdesc;
/* Check if we already have function for this mux */
for (fdesc = keembay_funcs; fdesc->func.name; fdesc++) {
@@ -1627,18 +1633,18 @@ static int keembay_build_functions(struct keembay_pinctrl *kpc)
if (!fdesc->func.name) {
fdesc->func.name = mux->name;
fdesc->func.ngroups = 1;
- fdesc->data = &mux->mode;
+ fdesc->mux_mode = mux->mode;
kpc->nfuncs++;
}
}
}
/* Reallocate memory based on actual number of functions */
- new_funcs = krealloc(keembay_funcs, kpc->nfuncs * sizeof(*new_funcs), GFP_KERNEL);
- if (!new_funcs) {
- kfree(keembay_funcs);
+ new_funcs = devm_krealloc_array(kpc->dev, keembay_funcs,
+ kpc->nfuncs, sizeof(*new_funcs),
+ GFP_KERNEL);
+ if (!new_funcs)
return -ENOMEM;
- }
return keembay_add_functions(kpc, new_funcs);
}
diff --git a/drivers/pinctrl/pinctrl-max7360.c b/drivers/pinctrl/pinctrl-max7360.c
new file mode 100644
index 000000000000..abfaff468bad
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-max7360.c
@@ -0,0 +1,215 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2025 Bootlin
+ *
+ * Author: Mathieu Dubois-Briand <mathieu.dubois-briand@bootlin.com>
+ */
+
+#include <linux/array_size.h>
+#include <linux/dev_printk.h>
+#include <linux/device.h>
+#include <linux/device/devres.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/mfd/max7360.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/stddef.h>
+
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinmux.h>
+
+#include "core.h"
+#include "pinmux.h"
+
+struct max7360_pinctrl {
+ struct pinctrl_dev *pctldev;
+ struct pinctrl_desc pinctrl_desc;
+};
+
+static const struct pinctrl_pin_desc max7360_pins[] = {
+ PINCTRL_PIN(0, "PORT0"),
+ PINCTRL_PIN(1, "PORT1"),
+ PINCTRL_PIN(2, "PORT2"),
+ PINCTRL_PIN(3, "PORT3"),
+ PINCTRL_PIN(4, "PORT4"),
+ PINCTRL_PIN(5, "PORT5"),
+ PINCTRL_PIN(6, "PORT6"),
+ PINCTRL_PIN(7, "PORT7"),
+};
+
+static const unsigned int port0_pins[] = {0};
+static const unsigned int port1_pins[] = {1};
+static const unsigned int port2_pins[] = {2};
+static const unsigned int port3_pins[] = {3};
+static const unsigned int port4_pins[] = {4};
+static const unsigned int port5_pins[] = {5};
+static const unsigned int port6_pins[] = {6};
+static const unsigned int port7_pins[] = {7};
+static const unsigned int rotary_pins[] = {6, 7};
+
+static const struct pingroup max7360_groups[] = {
+ PINCTRL_PINGROUP("PORT0", port0_pins, ARRAY_SIZE(port0_pins)),
+ PINCTRL_PINGROUP("PORT1", port1_pins, ARRAY_SIZE(port1_pins)),
+ PINCTRL_PINGROUP("PORT2", port2_pins, ARRAY_SIZE(port2_pins)),
+ PINCTRL_PINGROUP("PORT3", port3_pins, ARRAY_SIZE(port3_pins)),
+ PINCTRL_PINGROUP("PORT4", port4_pins, ARRAY_SIZE(port4_pins)),
+ PINCTRL_PINGROUP("PORT5", port5_pins, ARRAY_SIZE(port5_pins)),
+ PINCTRL_PINGROUP("PORT6", port6_pins, ARRAY_SIZE(port6_pins)),
+ PINCTRL_PINGROUP("PORT7", port7_pins, ARRAY_SIZE(port7_pins)),
+ PINCTRL_PINGROUP("ROTARY", rotary_pins, ARRAY_SIZE(rotary_pins)),
+};
+
+static int max7360_pinctrl_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(max7360_groups);
+}
+
+static const char *max7360_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned int group)
+{
+ return max7360_groups[group].name;
+}
+
+static int max7360_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned int group,
+ const unsigned int **pins,
+ unsigned int *num_pins)
+{
+ *pins = max7360_groups[group].pins;
+ *num_pins = max7360_groups[group].npins;
+ return 0;
+}
+
+static const struct pinctrl_ops max7360_pinctrl_ops = {
+ .get_groups_count = max7360_pinctrl_get_groups_count,
+ .get_group_name = max7360_pinctrl_get_group_name,
+ .get_group_pins = max7360_pinctrl_get_group_pins,
+#ifdef CONFIG_OF
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
+ .dt_free_map = pinconf_generic_dt_free_map,
+#endif
+};
+
+static const char * const simple_groups[] = {
+ "PORT0", "PORT1", "PORT2", "PORT3",
+ "PORT4", "PORT5", "PORT6", "PORT7",
+};
+
+static const char * const rotary_groups[] = { "ROTARY" };
+
+#define MAX7360_PINCTRL_FN_GPIO 0
+#define MAX7360_PINCTRL_FN_PWM 1
+#define MAX7360_PINCTRL_FN_ROTARY 2
+static const struct pinfunction max7360_functions[] = {
+ [MAX7360_PINCTRL_FN_GPIO] = PINCTRL_PINFUNCTION("gpio", simple_groups,
+ ARRAY_SIZE(simple_groups)),
+ [MAX7360_PINCTRL_FN_PWM] = PINCTRL_PINFUNCTION("pwm", simple_groups,
+ ARRAY_SIZE(simple_groups)),
+ [MAX7360_PINCTRL_FN_ROTARY] = PINCTRL_PINFUNCTION("rotary", rotary_groups,
+ ARRAY_SIZE(rotary_groups)),
+};
+
+static int max7360_get_functions_count(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(max7360_functions);
+}
+
+static const char *max7360_get_function_name(struct pinctrl_dev *pctldev, unsigned int selector)
+{
+ return max7360_functions[selector].name;
+}
+
+static int max7360_get_function_groups(struct pinctrl_dev *pctldev, unsigned int selector,
+ const char * const **groups,
+ unsigned int * const num_groups)
+{
+ *groups = max7360_functions[selector].groups;
+ *num_groups = max7360_functions[selector].ngroups;
+
+ return 0;
+}
+
+static int max7360_set_mux(struct pinctrl_dev *pctldev, unsigned int selector,
+ unsigned int group)
+{
+ struct regmap *regmap = dev_get_regmap(pctldev->dev->parent, NULL);
+ int val;
+
+ /*
+ * GPIO and PWM functions are the same: we only need to handle the
+ * rotary encoder function, on pins 6 and 7.
+ */
+ if (max7360_groups[group].pins[0] >= 6) {
+ if (selector == MAX7360_PINCTRL_FN_ROTARY)
+ val = MAX7360_GPIO_CFG_RTR_EN;
+ else
+ val = 0;
+
+ return regmap_write_bits(regmap, MAX7360_REG_GPIOCFG, MAX7360_GPIO_CFG_RTR_EN, val);
+ }
+
+ return 0;
+}
+
+static const struct pinmux_ops max7360_pmxops = {
+ .get_functions_count = max7360_get_functions_count,
+ .get_function_name = max7360_get_function_name,
+ .get_function_groups = max7360_get_function_groups,
+ .set_mux = max7360_set_mux,
+ .strict = true,
+};
+
+static int max7360_pinctrl_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ struct pinctrl_desc *pd;
+ struct max7360_pinctrl *chip;
+ struct device *dev = &pdev->dev;
+
+ regmap = dev_get_regmap(dev->parent, NULL);
+ if (!regmap)
+ return dev_err_probe(dev, -ENODEV, "Could not get parent regmap\n");
+
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ pd = &chip->pinctrl_desc;
+
+ pd->pctlops = &max7360_pinctrl_ops;
+ pd->pmxops = &max7360_pmxops;
+ pd->name = dev_name(dev);
+ pd->pins = max7360_pins;
+ pd->npins = MAX7360_MAX_GPIO;
+ pd->owner = THIS_MODULE;
+
+ /*
+ * This MFD sub-device does not have any associated device tree node:
+ * properties are stored in the device node of the parent (MFD) device
+ * and this same node is used in phandles of client devices.
+ * Reuse this device tree node here, as otherwise the pinctrl subsystem
+ * would be confused by this topology.
+ */
+ device_set_of_node_from_dev(dev, dev->parent);
+
+ chip->pctldev = devm_pinctrl_register(dev, pd, chip);
+ if (IS_ERR(chip->pctldev))
+ return dev_err_probe(dev, PTR_ERR(chip->pctldev), "can't register controller\n");
+
+ return 0;
+}
+
+static struct platform_driver max7360_pinctrl_driver = {
+ .driver = {
+ .name = "max7360-pinctrl",
+ },
+ .probe = max7360_pinctrl_probe,
+};
+module_platform_driver(max7360_pinctrl_driver);
+
+MODULE_DESCRIPTION("MAX7360 pinctrl driver");
+MODULE_AUTHOR("Mathieu Dubois-Briand <mathieu.dubois-briand@bootlin.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
index c8027ef03ecc..a17fcaddf490 100644
--- a/drivers/pinctrl/pinctrl-mcp23s08.c
+++ b/drivers/pinctrl/pinctrl-mcp23s08.c
@@ -632,8 +632,8 @@ int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
mcp->chip.get = mcp23s08_get;
mcp->chip.get_multiple = mcp23s08_get_multiple;
mcp->chip.direction_output = mcp23s08_direction_output;
- mcp->chip.set_rv = mcp23s08_set;
- mcp->chip.set_multiple_rv = mcp23s08_set_multiple;
+ mcp->chip.set = mcp23s08_set;
+ mcp->chip.set_multiple = mcp23s08_set_multiple;
mcp->chip.base = base;
mcp->chip.can_sleep = true;
diff --git a/drivers/pinctrl/pinctrl-microchip-sgpio.c b/drivers/pinctrl/pinctrl-microchip-sgpio.c
index 88c2f14cfc6b..b6363f3cdce9 100644
--- a/drivers/pinctrl/pinctrl-microchip-sgpio.c
+++ b/drivers/pinctrl/pinctrl-microchip-sgpio.c
@@ -371,7 +371,7 @@ static int sgpio_pinconf_get(struct pinctrl_dev *pctldev,
val = !bank->is_input;
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
if (bank->is_input)
return -EINVAL;
val = sgpio_output_get(priv, &addr);
@@ -402,7 +402,7 @@ static int sgpio_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
arg = pinconf_to_config_argument(configs[cfg]);
switch (param) {
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
if (bank->is_input)
return -EINVAL;
err = sgpio_output_set(priv, &addr, arg);
@@ -824,7 +824,7 @@ static int microchip_sgpio_register_bank(struct device *dev,
pctl_desc->confops = &sgpio_confops;
pctl_desc->owner = THIS_MODULE;
- pins = devm_kzalloc(dev, sizeof(*pins)*ngpios, GFP_KERNEL);
+ pins = devm_kcalloc(dev, ngpios, sizeof(*pins), GFP_KERNEL);
if (!pins)
return -ENOMEM;
@@ -858,7 +858,7 @@ static int microchip_sgpio_register_bank(struct device *dev,
gc->direction_input = microchip_sgpio_direction_input;
gc->direction_output = microchip_sgpio_direction_output;
gc->get = microchip_sgpio_get_value;
- gc->set_rv = microchip_sgpio_set_value;
+ gc->set = microchip_sgpio_set_value;
gc->request = gpiochip_generic_request;
gc->free = gpiochip_generic_free;
gc->of_xlate = microchip_sgpio_of_xlate;
diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c
index fbb3d43746bb..70da3f37567a 100644
--- a/drivers/pinctrl/pinctrl-ocelot.c
+++ b/drivers/pinctrl/pinctrl-ocelot.c
@@ -1656,7 +1656,7 @@ static int ocelot_pinconf_get(struct pinctrl_dev *pctldev,
return err;
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
err = regmap_read(info->map, REG(OCELOT_GPIO_OUT, info, pin),
&val);
if (err)
@@ -1735,7 +1735,7 @@ static int ocelot_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
case PIN_CONFIG_OUTPUT_ENABLE:
case PIN_CONFIG_INPUT_ENABLE:
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
p = pin % 32;
if (arg)
regmap_write(info->map,
@@ -1997,7 +1997,7 @@ static int ocelot_gpio_direction_output(struct gpio_chip *chip,
static const struct gpio_chip ocelot_gpiolib_chip = {
.request = gpiochip_generic_request,
.free = gpiochip_generic_free,
- .set_rv = ocelot_gpio_set,
+ .set = ocelot_gpio_set,
.get = ocelot_gpio_get,
.get_direction = ocelot_gpio_get_direction,
.direction_input = pinctrl_gpio_direction_input,
diff --git a/drivers/pinctrl/pinctrl-pic32.c b/drivers/pinctrl/pinctrl-pic32.c
index 6d64cab97e81..e8b481e87c77 100644
--- a/drivers/pinctrl/pinctrl-pic32.c
+++ b/drivers/pinctrl/pinctrl-pic32.c
@@ -1905,7 +1905,7 @@ static int pic32_pinconf_get(struct pinctrl_dev *pctldev, unsigned pin,
case PIN_CONFIG_INPUT_ENABLE:
arg = !!(readl(bank->reg_base + TRIS_REG) & mask);
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
arg = !(readl(bank->reg_base + TRIS_REG) & mask);
break;
default:
@@ -1960,7 +1960,7 @@ static int pic32_pinconf_set(struct pinctrl_dev *pctldev, unsigned pin,
case PIN_CONFIG_INPUT_ENABLE:
pic32_gpio_direction_input(&bank->gpio_chip, offset);
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
pic32_gpio_direction_output(&bank->gpio_chip,
offset, arg);
break;
@@ -2120,7 +2120,7 @@ static void pic32_gpio_irq_handler(struct irq_desc *desc)
.direction_input = pic32_gpio_direction_input, \
.direction_output = pic32_gpio_direction_output, \
.get = pic32_gpio_get, \
- .set_rv = pic32_gpio_set, \
+ .set = pic32_gpio_set, \
.ngpio = _npins, \
.base = GPIO_BANK_START(_bank), \
.owner = THIS_MODULE, \
diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c
index 7f8b562c81c9..0b33b01dbaad 100644
--- a/drivers/pinctrl/pinctrl-pistachio.c
+++ b/drivers/pinctrl/pinctrl-pistachio.c
@@ -1331,7 +1331,7 @@ static void pistachio_gpio_irq_handler(struct irq_desc *desc)
.direction_input = pistachio_gpio_direction_input, \
.direction_output = pistachio_gpio_direction_output, \
.get = pistachio_gpio_get, \
- .set_rv = pistachio_gpio_set, \
+ .set = pistachio_gpio_set, \
.base = _pin_base, \
.ngpio = _npins, \
}, \
diff --git a/drivers/pinctrl/pinctrl-rk805.c b/drivers/pinctrl/pinctrl-rk805.c
index fc0e330b1d11..22f576337faa 100644
--- a/drivers/pinctrl/pinctrl-rk805.c
+++ b/drivers/pinctrl/pinctrl-rk805.c
@@ -378,7 +378,7 @@ static const struct gpio_chip rk805_gpio_chip = {
.free = gpiochip_generic_free,
.get_direction = rk805_gpio_get_direction,
.get = rk805_gpio_get,
- .set_rv = rk805_gpio_set,
+ .set = rk805_gpio_set,
.direction_input = pinctrl_gpio_direction_input,
.direction_output = rk805_gpio_direction_output,
.can_sleep = true,
@@ -541,7 +541,7 @@ static int rk805_pinconf_get(struct pinctrl_dev *pctldev,
u32 arg = 0;
switch (param) {
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
case PIN_CONFIG_INPUT_ENABLE:
arg = rk805_gpio_get(&pci->gpio_chip, pin);
break;
@@ -568,7 +568,7 @@ static int rk805_pinconf_set(struct pinctrl_dev *pctldev,
arg = pinconf_to_config_argument(configs[i]);
switch (param) {
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
rk805_gpio_set(&pci->gpio_chip, pin, arg);
rk805_pmx_gpio_set_direction(pctldev, NULL, pin, false);
break;
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index 930c454e0cec..7a68a6237649 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -3272,7 +3272,7 @@ static int rockchip_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
param = pinconf_to_config_param(configs[i]);
arg = pinconf_to_config_argument(configs[i]);
- if (param == PIN_CONFIG_OUTPUT || param == PIN_CONFIG_INPUT_ENABLE) {
+ if (param == PIN_CONFIG_LEVEL || param == PIN_CONFIG_INPUT_ENABLE) {
/*
* Check for gpio driver not being probed yet.
* The lock makes sure that either gpio-probe has completed
@@ -3313,7 +3313,7 @@ static int rockchip_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
if (rc)
return rc;
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
rc = rockchip_set_mux(bank, pin - bank->pin_base,
RK_FUNC_GPIO);
if (rc != RK_FUNC_GPIO)
@@ -3392,7 +3392,7 @@ static int rockchip_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
arg = 1;
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
rc = rockchip_get_mux(bank, pin - bank->pin_base);
if (rc != RK_FUNC_GPIO)
return -EINVAL;
diff --git a/drivers/pinctrl/pinctrl-rp1.c b/drivers/pinctrl/pinctrl-rp1.c
index 6080b57a5d87..ffc2f0b460a6 100644
--- a/drivers/pinctrl/pinctrl-rp1.c
+++ b/drivers/pinctrl/pinctrl-rp1.c
@@ -851,7 +851,7 @@ static const struct gpio_chip rp1_gpio_chip = {
.direction_output = rp1_gpio_direction_output,
.get_direction = rp1_gpio_get_direction,
.get = rp1_gpio_get,
- .set_rv = rp1_gpio_set,
+ .set = rp1_gpio_set,
.base = -1,
.set_config = rp1_gpio_set_config,
.ngpio = RP1_NUM_GPIOS,
@@ -1440,7 +1440,7 @@ static int rp1_pinconf_set(struct pinctrl_dev *pctldev, unsigned int offset,
rp1_output_enable(pin, arg);
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
rp1_set_value(pin, arg);
rp1_set_dir(pin, RP1_DIR_OUTPUT);
rp1_set_fsel(pin, RP1_FSEL_GPIO);
@@ -1623,12 +1623,94 @@ MODULE_DEVICE_TABLE(of, rp1_pinctrl_match);
static struct rp1_pinctrl rp1_pinctrl_data = {};
-static const struct regmap_config rp1_pinctrl_regmap_cfg = {
+static const struct regmap_range rp1_gpio_reg_ranges[] = {
+ /* BANK 0 */
+ regmap_reg_range(0x2004, 0x20dc),
+ regmap_reg_range(0x3004, 0x30dc),
+ regmap_reg_range(0x0004, 0x00dc),
+ regmap_reg_range(0x0124, 0x0124),
+ regmap_reg_range(0x211c, 0x211c),
+ regmap_reg_range(0x311c, 0x311c),
+ /* BANK 1 */
+ regmap_reg_range(0x6004, 0x602c),
+ regmap_reg_range(0x7004, 0x702c),
+ regmap_reg_range(0x4004, 0x402c),
+ regmap_reg_range(0x4124, 0x4124),
+ regmap_reg_range(0x611c, 0x611c),
+ regmap_reg_range(0x711c, 0x711c),
+ /* BANK 2 */
+ regmap_reg_range(0xa004, 0xa09c),
+ regmap_reg_range(0xb004, 0xb09c),
+ regmap_reg_range(0x8004, 0x809c),
+ regmap_reg_range(0x8124, 0x8124),
+ regmap_reg_range(0xa11c, 0xa11c),
+ regmap_reg_range(0xb11c, 0xb11c),
+};
+
+static const struct regmap_range rp1_rio_reg_ranges[] = {
+ /* BANK 0 */
+ regmap_reg_range(0x2000, 0x2004),
+ regmap_reg_range(0x3000, 0x3004),
+ regmap_reg_range(0x0004, 0x0008),
+ /* BANK 1 */
+ regmap_reg_range(0x6000, 0x6004),
+ regmap_reg_range(0x7000, 0x7004),
+ regmap_reg_range(0x4004, 0x4008),
+ /* BANK 2 */
+ regmap_reg_range(0xa000, 0xa004),
+ regmap_reg_range(0xb000, 0xb004),
+ regmap_reg_range(0x8004, 0x8008),
+};
+
+static const struct regmap_range rp1_pads_reg_ranges[] = {
+ /* BANK 0 */
+ regmap_reg_range(0x0004, 0x0070),
+ /* BANK 1 */
+ regmap_reg_range(0x4004, 0x4018),
+ /* BANK 2 */
+ regmap_reg_range(0x8004, 0x8050),
+};
+
+static const struct regmap_access_table rp1_gpio_reg_table = {
+ .yes_ranges = rp1_gpio_reg_ranges,
+ .n_yes_ranges = ARRAY_SIZE(rp1_gpio_reg_ranges),
+};
+
+static const struct regmap_access_table rp1_rio_reg_table = {
+ .yes_ranges = rp1_rio_reg_ranges,
+ .n_yes_ranges = ARRAY_SIZE(rp1_rio_reg_ranges),
+};
+
+static const struct regmap_access_table rp1_pads_reg_table = {
+ .yes_ranges = rp1_pads_reg_ranges,
+ .n_yes_ranges = ARRAY_SIZE(rp1_pads_reg_ranges),
+};
+
+static const struct regmap_config rp1_pinctrl_gpio_regmap_cfg = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .rd_table = &rp1_gpio_reg_table,
+ .name = "rp1-gpio",
+ .max_register = 0xb11c,
+};
+
+static const struct regmap_config rp1_pinctrl_rio_regmap_cfg = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .rd_table = &rp1_rio_reg_table,
+ .name = "rp1-rio",
+ .max_register = 0xb004,
+};
+
+static const struct regmap_config rp1_pinctrl_pads_regmap_cfg = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
- .fast_io = true,
- .name = "rp1-pinctrl",
+ .rd_table = &rp1_pads_reg_table,
+ .name = "rp1-pads",
+ .max_register = 0x8050,
};
static int rp1_gen_regfield(struct device *dev,
@@ -1685,17 +1767,17 @@ static int rp1_pinctrl_probe(struct platform_device *pdev)
return dev_err_probe(dev, PTR_ERR(pc->pads_base), "could not get PADS IO memory\n");
gpio_regmap = devm_regmap_init_mmio(dev, pc->gpio_base,
- &rp1_pinctrl_regmap_cfg);
+ &rp1_pinctrl_gpio_regmap_cfg);
if (IS_ERR(gpio_regmap))
return dev_err_probe(dev, PTR_ERR(gpio_regmap), "could not init GPIO regmap\n");
rio_regmap = devm_regmap_init_mmio(dev, pc->rio_base,
- &rp1_pinctrl_regmap_cfg);
+ &rp1_pinctrl_rio_regmap_cfg);
if (IS_ERR(rio_regmap))
return dev_err_probe(dev, PTR_ERR(rio_regmap), "could not init RIO regmap\n");
pads_regmap = devm_regmap_init_mmio(dev, pc->pads_base,
- &rp1_pinctrl_regmap_cfg);
+ &rp1_pinctrl_pads_regmap_cfg);
if (IS_ERR(pads_regmap))
return dev_err_probe(dev, PTR_ERR(pads_regmap), "could not init PADS regmap\n");
diff --git a/drivers/pinctrl/pinctrl-scmi.c b/drivers/pinctrl/pinctrl-scmi.c
index 383681041e4c..d14528b9aa31 100644
--- a/drivers/pinctrl/pinctrl-scmi.c
+++ b/drivers/pinctrl/pinctrl-scmi.c
@@ -253,7 +253,7 @@ static int pinctrl_scmi_map_pinconf_type(enum pin_config_param param,
case PIN_CONFIG_MODE_LOW_POWER:
*type = SCMI_PIN_LOW_POWER_MODE;
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
*type = SCMI_PIN_OUTPUT_VALUE;
break;
case PIN_CONFIG_OUTPUT_ENABLE:
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index 5cda6201b60f..6d580aa282ec 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -336,7 +336,7 @@ static int pcs_get_function(struct pinctrl_dev *pctldev, unsigned pin,
struct pcs_device *pcs = pinctrl_dev_get_drvdata(pctldev);
struct pin_desc *pdesc = pin_desc_get(pctldev, pin);
const struct pinctrl_setting_mux *setting;
- struct function_desc *function;
+ const struct function_desc *function;
unsigned fselector;
/* If pin is not described in DTS & enabled, mux_setting is NULL. */
@@ -360,7 +360,7 @@ static int pcs_set_mux(struct pinctrl_dev *pctldev, unsigned fselector,
unsigned group)
{
struct pcs_device *pcs;
- struct function_desc *function;
+ const struct function_desc *function;
struct pcs_function *func;
int i;
@@ -589,8 +589,10 @@ static int pcs_pinconf_set(struct pinctrl_dev *pctldev,
/* 4 parameters */
case PIN_CONFIG_BIAS_PULL_DOWN:
case PIN_CONFIG_BIAS_PULL_UP:
- if (arg)
+ if (arg) {
pcs_pinconf_clear_bias(pctldev, pin);
+ data = pcs->read(pcs->base + offset);
+ }
fallthrough;
case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
data &= ~func->conf[i].mask;
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index 574fe2cbfbec..d3cea3437d7f 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -1467,7 +1467,7 @@ static const struct gpio_chip st_gpio_template = {
.request = gpiochip_generic_request,
.free = gpiochip_generic_free,
.get = st_gpio_get,
- .set_rv = st_gpio_set,
+ .set = st_gpio_set,
.direction_input = pinctrl_gpio_direction_input,
.direction_output = st_gpio_direction_output,
.get_direction = st_gpio_get_direction,
diff --git a/drivers/pinctrl/pinctrl-stmfx.c b/drivers/pinctrl/pinctrl-stmfx.c
index f4fdcaa043e6..03ee13844b50 100644
--- a/drivers/pinctrl/pinctrl-stmfx.c
+++ b/drivers/pinctrl/pinctrl-stmfx.c
@@ -267,7 +267,7 @@ static int stmfx_pinconf_get(struct pinctrl_dev *pctldev,
if ((!dir && !type) || (dir && type))
arg = 1;
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
if (dir)
return -EINVAL;
@@ -334,7 +334,7 @@ static int stmfx_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
if (ret)
return ret;
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
ret = stmfx_gpio_direction_output(&pctl->gpio_chip,
pin, arg);
if (ret)
@@ -697,7 +697,7 @@ static int stmfx_pinctrl_probe(struct platform_device *pdev)
pctl->gpio_chip.direction_input = stmfx_gpio_direction_input;
pctl->gpio_chip.direction_output = stmfx_gpio_direction_output;
pctl->gpio_chip.get = stmfx_gpio_get;
- pctl->gpio_chip.set_rv = stmfx_gpio_set;
+ pctl->gpio_chip.set = stmfx_gpio_set;
pctl->gpio_chip.set_config = gpiochip_generic_config;
pctl->gpio_chip.base = -1;
pctl->gpio_chip.ngpio = pctl->pctl_desc.npins;
diff --git a/drivers/pinctrl/pinctrl-sx150x.c b/drivers/pinctrl/pinctrl-sx150x.c
index d3a12c1c0de2..1d6760ffe809 100644
--- a/drivers/pinctrl/pinctrl-sx150x.c
+++ b/drivers/pinctrl/pinctrl-sx150x.c
@@ -611,7 +611,7 @@ static int sx150x_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
if (sx150x_pin_is_oscio(pctl, pin)) {
switch (param) {
case PIN_CONFIG_DRIVE_PUSH_PULL:
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
ret = regmap_read(pctl->regmap,
pctl->data->pri.x789.reg_clock,
&data);
@@ -705,7 +705,7 @@ static int sx150x_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
}
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
ret = sx150x_gpio_get_direction(&pctl->gpio, pin);
if (ret < 0)
return ret;
@@ -744,7 +744,7 @@ static int sx150x_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
arg = pinconf_to_config_argument(configs[i]);
if (sx150x_pin_is_oscio(pctl, pin)) {
- if (param == PIN_CONFIG_OUTPUT) {
+ if (param == PIN_CONFIG_LEVEL) {
ret = sx150x_gpio_direction_output(&pctl->gpio,
pin, arg);
if (ret < 0)
@@ -816,7 +816,7 @@ static int sx150x_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
ret = sx150x_gpio_direction_output(&pctl->gpio,
pin, arg);
if (ret < 0)
@@ -863,6 +863,7 @@ static const struct of_device_id sx150x_of_match[] = {
{ .compatible = "semtech,sx1509q", .data = &sx1509q_device_data },
{},
};
+MODULE_DEVICE_TABLE(of, sx150x_of_match);
static int sx150x_reset(struct sx150x_pinctrl *pctl)
{
@@ -1176,7 +1177,7 @@ static int sx150x_probe(struct i2c_client *client)
pctl->gpio.direction_input = sx150x_gpio_direction_input;
pctl->gpio.direction_output = sx150x_gpio_direction_output;
pctl->gpio.get = sx150x_gpio_get;
- pctl->gpio.set_rv = sx150x_gpio_set;
+ pctl->gpio.set = sx150x_gpio_set;
pctl->gpio.set_config = gpiochip_generic_config;
pctl->gpio.parent = dev;
pctl->gpio.can_sleep = true;
@@ -1191,7 +1192,7 @@ static int sx150x_probe(struct i2c_client *client)
* would require locking that is not in place at this time.
*/
if (pctl->data->model != SX150X_789)
- pctl->gpio.set_multiple_rv = sx150x_gpio_set_multiple;
+ pctl->gpio.set_multiple = sx150x_gpio_set_multiple;
/* Add Interrupt support if an irq is specified */
if (client->irq > 0) {
@@ -1266,3 +1267,6 @@ static int __init sx150x_init(void)
return i2c_add_driver(&sx150x_driver);
}
subsys_initcall(sx150x_init);
+
+MODULE_DESCRIPTION("Semtech SX150x I2C GPIO expander pinctrl driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/pinctrl-upboard.c b/drivers/pinctrl/pinctrl-upboard.c
new file mode 100644
index 000000000000..f8c8b9d84990
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-upboard.c
@@ -0,0 +1,1070 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * UP board pin control driver.
+ *
+ * Copyright (C) 2025 Bootlin
+ *
+ * Author: Thomas Richard <thomas.richard@bootlin.com>
+ */
+
+#include <linux/array_size.h>
+#include <linux/container_of.h>
+#include <linux/device.h>
+#include <linux/dmi.h>
+#include <linux/err.h>
+#include <linux/gpio/forwarder.h>
+#include <linux/mfd/upboard-fpga.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/seq_file.h>
+#include <linux/stddef.h>
+#include <linux/string_choices.h>
+#include <linux/types.h>
+
+#include <linux/pinctrl/consumer.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+
+#include <linux/gpio/driver.h>
+#include <linux/gpio/consumer.h>
+
+#include "core.h"
+#include "pinmux.h"
+
+enum upboard_pin_mode {
+ UPBOARD_PIN_MODE_FUNCTION,
+ UPBOARD_PIN_MODE_GPIO_IN,
+ UPBOARD_PIN_MODE_GPIO_OUT,
+ UPBOARD_PIN_MODE_DISABLED,
+};
+
+struct upboard_pin {
+ struct regmap_field *funcbit;
+ struct regmap_field *enbit;
+ struct regmap_field *dirbit;
+};
+
+struct upboard_pingroup {
+ struct pingroup grp;
+ enum upboard_pin_mode mode;
+ const enum upboard_pin_mode *modes;
+};
+
+struct upboard_pinctrl_data {
+ const struct upboard_pingroup *groups;
+ size_t ngroups;
+ const struct pinfunction *funcs;
+ size_t nfuncs;
+ const unsigned int *pin_header;
+ size_t ngpio;
+};
+
+struct upboard_pinctrl {
+ struct device *dev;
+ struct pinctrl_dev *pctldev;
+ const struct upboard_pinctrl_data *pctrl_data;
+ struct gpio_pin_range pin_range;
+ struct upboard_pin *pins;
+};
+
+struct upboard_pinctrl_map {
+ const struct pinctrl_map *maps;
+ size_t nmaps;
+};
+
+enum upboard_func0_fpgabit {
+ UPBOARD_FUNC_I2C0_EN = 8,
+ UPBOARD_FUNC_I2C1_EN = 9,
+ UPBOARD_FUNC_CEC0_EN = 12,
+ UPBOARD_FUNC_ADC0_EN = 14,
+};
+
+static const struct reg_field upboard_i2c0_reg =
+ REG_FIELD(UPBOARD_REG_FUNC_EN0, UPBOARD_FUNC_I2C0_EN, UPBOARD_FUNC_I2C0_EN);
+
+static const struct reg_field upboard_i2c1_reg =
+ REG_FIELD(UPBOARD_REG_FUNC_EN0, UPBOARD_FUNC_I2C1_EN, UPBOARD_FUNC_I2C1_EN);
+
+static const struct reg_field upboard_adc0_reg =
+ REG_FIELD(UPBOARD_REG_FUNC_EN0, UPBOARD_FUNC_ADC0_EN, UPBOARD_FUNC_ADC0_EN);
+
+#define UPBOARD_UP_BIT_TO_PIN(bit) UPBOARD_UP_BIT_##bit
+
+#define UPBOARD_UP_PIN_NAME(id) \
+ { \
+ .number = UPBOARD_UP_BIT_##id, \
+ .name = #id, \
+ }
+
+#define UPBOARD_UP_PIN_MUX(bit, data) \
+ { \
+ .number = UPBOARD_UP_BIT_##bit, \
+ .name = "PINMUX_"#bit, \
+ .drv_data = (void *)(data), \
+ }
+
+#define UPBOARD_UP_PIN_FUNC(id, data) \
+ { \
+ .number = UPBOARD_UP_BIT_##id, \
+ .name = #id, \
+ .drv_data = (void *)(data), \
+ }
+
+enum upboard_up_fpgabit {
+ UPBOARD_UP_BIT_I2C1_SDA,
+ UPBOARD_UP_BIT_I2C1_SCL,
+ UPBOARD_UP_BIT_ADC0,
+ UPBOARD_UP_BIT_UART1_RTS,
+ UPBOARD_UP_BIT_GPIO27,
+ UPBOARD_UP_BIT_GPIO22,
+ UPBOARD_UP_BIT_SPI_MOSI,
+ UPBOARD_UP_BIT_SPI_MISO,
+ UPBOARD_UP_BIT_SPI_CLK,
+ UPBOARD_UP_BIT_I2C0_SDA,
+ UPBOARD_UP_BIT_GPIO5,
+ UPBOARD_UP_BIT_GPIO6,
+ UPBOARD_UP_BIT_PWM1,
+ UPBOARD_UP_BIT_I2S_FRM,
+ UPBOARD_UP_BIT_GPIO26,
+ UPBOARD_UP_BIT_UART1_TX,
+ UPBOARD_UP_BIT_UART1_RX,
+ UPBOARD_UP_BIT_I2S_CLK,
+ UPBOARD_UP_BIT_GPIO23,
+ UPBOARD_UP_BIT_GPIO24,
+ UPBOARD_UP_BIT_GPIO25,
+ UPBOARD_UP_BIT_SPI_CS0,
+ UPBOARD_UP_BIT_SPI_CS1,
+ UPBOARD_UP_BIT_I2C0_SCL,
+ UPBOARD_UP_BIT_PWM0,
+ UPBOARD_UP_BIT_UART1_CTS,
+ UPBOARD_UP_BIT_I2S_DIN,
+ UPBOARD_UP_BIT_I2S_DOUT,
+};
+
+static const struct pinctrl_pin_desc upboard_up_pins[] = {
+ UPBOARD_UP_PIN_FUNC(I2C1_SDA, &upboard_i2c1_reg),
+ UPBOARD_UP_PIN_FUNC(I2C1_SCL, &upboard_i2c1_reg),
+ UPBOARD_UP_PIN_FUNC(ADC0, &upboard_adc0_reg),
+ UPBOARD_UP_PIN_NAME(UART1_RTS),
+ UPBOARD_UP_PIN_NAME(GPIO27),
+ UPBOARD_UP_PIN_NAME(GPIO22),
+ UPBOARD_UP_PIN_NAME(SPI_MOSI),
+ UPBOARD_UP_PIN_NAME(SPI_MISO),
+ UPBOARD_UP_PIN_NAME(SPI_CLK),
+ UPBOARD_UP_PIN_FUNC(I2C0_SDA, &upboard_i2c0_reg),
+ UPBOARD_UP_PIN_NAME(GPIO5),
+ UPBOARD_UP_PIN_NAME(GPIO6),
+ UPBOARD_UP_PIN_NAME(PWM1),
+ UPBOARD_UP_PIN_NAME(I2S_FRM),
+ UPBOARD_UP_PIN_NAME(GPIO26),
+ UPBOARD_UP_PIN_NAME(UART1_TX),
+ UPBOARD_UP_PIN_NAME(UART1_RX),
+ UPBOARD_UP_PIN_NAME(I2S_CLK),
+ UPBOARD_UP_PIN_NAME(GPIO23),
+ UPBOARD_UP_PIN_NAME(GPIO24),
+ UPBOARD_UP_PIN_NAME(GPIO25),
+ UPBOARD_UP_PIN_NAME(SPI_CS0),
+ UPBOARD_UP_PIN_NAME(SPI_CS1),
+ UPBOARD_UP_PIN_FUNC(I2C0_SCL, &upboard_i2c0_reg),
+ UPBOARD_UP_PIN_NAME(PWM0),
+ UPBOARD_UP_PIN_NAME(UART1_CTS),
+ UPBOARD_UP_PIN_NAME(I2S_DIN),
+ UPBOARD_UP_PIN_NAME(I2S_DOUT),
+};
+
+static const unsigned int upboard_up_pin_header[] = {
+ UPBOARD_UP_BIT_TO_PIN(I2C0_SDA),
+ UPBOARD_UP_BIT_TO_PIN(I2C0_SCL),
+ UPBOARD_UP_BIT_TO_PIN(I2C1_SDA),
+ UPBOARD_UP_BIT_TO_PIN(I2C1_SCL),
+ UPBOARD_UP_BIT_TO_PIN(ADC0),
+ UPBOARD_UP_BIT_TO_PIN(GPIO5),
+ UPBOARD_UP_BIT_TO_PIN(GPIO6),
+ UPBOARD_UP_BIT_TO_PIN(SPI_CS1),
+ UPBOARD_UP_BIT_TO_PIN(SPI_CS0),
+ UPBOARD_UP_BIT_TO_PIN(SPI_MISO),
+ UPBOARD_UP_BIT_TO_PIN(SPI_MOSI),
+ UPBOARD_UP_BIT_TO_PIN(SPI_CLK),
+ UPBOARD_UP_BIT_TO_PIN(PWM0),
+ UPBOARD_UP_BIT_TO_PIN(PWM1),
+ UPBOARD_UP_BIT_TO_PIN(UART1_TX),
+ UPBOARD_UP_BIT_TO_PIN(UART1_RX),
+ UPBOARD_UP_BIT_TO_PIN(UART1_CTS),
+ UPBOARD_UP_BIT_TO_PIN(UART1_RTS),
+ UPBOARD_UP_BIT_TO_PIN(I2S_CLK),
+ UPBOARD_UP_BIT_TO_PIN(I2S_FRM),
+ UPBOARD_UP_BIT_TO_PIN(I2S_DIN),
+ UPBOARD_UP_BIT_TO_PIN(I2S_DOUT),
+ UPBOARD_UP_BIT_TO_PIN(GPIO22),
+ UPBOARD_UP_BIT_TO_PIN(GPIO23),
+ UPBOARD_UP_BIT_TO_PIN(GPIO24),
+ UPBOARD_UP_BIT_TO_PIN(GPIO25),
+ UPBOARD_UP_BIT_TO_PIN(GPIO26),
+ UPBOARD_UP_BIT_TO_PIN(GPIO27),
+};
+
+static const unsigned int upboard_up_uart1_pins[] = {
+ UPBOARD_UP_BIT_TO_PIN(UART1_TX),
+ UPBOARD_UP_BIT_TO_PIN(UART1_RX),
+ UPBOARD_UP_BIT_TO_PIN(UART1_RTS),
+ UPBOARD_UP_BIT_TO_PIN(UART1_CTS),
+};
+
+static const enum upboard_pin_mode upboard_up_uart1_modes[] = {
+ UPBOARD_PIN_MODE_GPIO_OUT,
+ UPBOARD_PIN_MODE_GPIO_IN,
+ UPBOARD_PIN_MODE_GPIO_OUT,
+ UPBOARD_PIN_MODE_GPIO_IN,
+};
+
+static_assert(ARRAY_SIZE(upboard_up_uart1_modes) == ARRAY_SIZE(upboard_up_uart1_pins));
+
+static const unsigned int upboard_up_i2c0_pins[] = {
+ UPBOARD_UP_BIT_TO_PIN(I2C0_SCL),
+ UPBOARD_UP_BIT_TO_PIN(I2C0_SDA),
+};
+
+static const unsigned int upboard_up_i2c1_pins[] = {
+ UPBOARD_UP_BIT_TO_PIN(I2C1_SCL),
+ UPBOARD_UP_BIT_TO_PIN(I2C1_SDA),
+};
+
+static const unsigned int upboard_up_spi2_pins[] = {
+ UPBOARD_UP_BIT_TO_PIN(SPI_MOSI),
+ UPBOARD_UP_BIT_TO_PIN(SPI_MISO),
+ UPBOARD_UP_BIT_TO_PIN(SPI_CLK),
+ UPBOARD_UP_BIT_TO_PIN(SPI_CS0),
+ UPBOARD_UP_BIT_TO_PIN(SPI_CS1),
+};
+
+static const enum upboard_pin_mode upboard_up_spi2_modes[] = {
+ UPBOARD_PIN_MODE_GPIO_OUT,
+ UPBOARD_PIN_MODE_GPIO_IN,
+ UPBOARD_PIN_MODE_GPIO_OUT,
+ UPBOARD_PIN_MODE_GPIO_OUT,
+ UPBOARD_PIN_MODE_GPIO_OUT,
+};
+
+static_assert(ARRAY_SIZE(upboard_up_spi2_modes) == ARRAY_SIZE(upboard_up_spi2_pins));
+
+static const unsigned int upboard_up_i2s0_pins[] = {
+ UPBOARD_UP_BIT_TO_PIN(I2S_FRM),
+ UPBOARD_UP_BIT_TO_PIN(I2S_CLK),
+ UPBOARD_UP_BIT_TO_PIN(I2S_DIN),
+ UPBOARD_UP_BIT_TO_PIN(I2S_DOUT),
+};
+
+static const enum upboard_pin_mode upboard_up_i2s0_modes[] = {
+ UPBOARD_PIN_MODE_GPIO_OUT,
+ UPBOARD_PIN_MODE_GPIO_OUT,
+ UPBOARD_PIN_MODE_GPIO_IN,
+ UPBOARD_PIN_MODE_GPIO_OUT,
+};
+
+static_assert(ARRAY_SIZE(upboard_up_i2s0_pins) == ARRAY_SIZE(upboard_up_i2s0_modes));
+
+static const unsigned int upboard_up_pwm0_pins[] = {
+ UPBOARD_UP_BIT_TO_PIN(PWM0),
+};
+
+static const unsigned int upboard_up_pwm1_pins[] = {
+ UPBOARD_UP_BIT_TO_PIN(PWM1),
+};
+
+static const unsigned int upboard_up_adc0_pins[] = {
+ UPBOARD_UP_BIT_TO_PIN(ADC0),
+};
+
+#define UPBOARD_PINGROUP(n, p, m) \
+{ \
+ .grp = PINCTRL_PINGROUP(n, p, ARRAY_SIZE(p)), \
+ .mode = __builtin_choose_expr( \
+ __builtin_types_compatible_p(typeof(m), const enum upboard_pin_mode *), \
+ 0, m), \
+ .modes = __builtin_choose_expr( \
+ __builtin_types_compatible_p(typeof(m), const enum upboard_pin_mode *), \
+ m, NULL), \
+}
+
+static const struct upboard_pingroup upboard_up_pin_groups[] = {
+ UPBOARD_PINGROUP("uart1_grp", upboard_up_uart1_pins, &upboard_up_uart1_modes[0]),
+ UPBOARD_PINGROUP("i2c0_grp", upboard_up_i2c0_pins, UPBOARD_PIN_MODE_GPIO_OUT),
+ UPBOARD_PINGROUP("i2c1_grp", upboard_up_i2c1_pins, UPBOARD_PIN_MODE_GPIO_OUT),
+ UPBOARD_PINGROUP("spi2_grp", upboard_up_spi2_pins, &upboard_up_spi2_modes[0]),
+ UPBOARD_PINGROUP("i2s0_grp", upboard_up_i2s0_pins, &upboard_up_i2s0_modes[0]),
+ UPBOARD_PINGROUP("pwm0_grp", upboard_up_pwm0_pins, UPBOARD_PIN_MODE_GPIO_OUT),
+ UPBOARD_PINGROUP("pwm1_grp", upboard_up_pwm1_pins, UPBOARD_PIN_MODE_GPIO_OUT),
+ UPBOARD_PINGROUP("adc0_grp", upboard_up_adc0_pins, UPBOARD_PIN_MODE_GPIO_IN),
+};
+
+static const char * const upboard_up_uart1_groups[] = { "uart1_grp" };
+static const char * const upboard_up_i2c0_groups[] = { "i2c0_grp" };
+static const char * const upboard_up_i2c1_groups[] = { "i2c1_grp" };
+static const char * const upboard_up_spi2_groups[] = { "spi2_grp" };
+static const char * const upboard_up_i2s0_groups[] = { "i2s0_grp" };
+static const char * const upboard_up_pwm0_groups[] = { "pwm0_grp" };
+static const char * const upboard_up_pwm1_groups[] = { "pwm1_grp" };
+static const char * const upboard_up_adc0_groups[] = { "adc0_grp" };
+
+#define UPBOARD_FUNCTION(func, groups) PINCTRL_PINFUNCTION(func, groups, ARRAY_SIZE(groups))
+
+static const struct pinfunction upboard_up_pin_functions[] = {
+ UPBOARD_FUNCTION("uart1", upboard_up_uart1_groups),
+ UPBOARD_FUNCTION("i2c0", upboard_up_i2c0_groups),
+ UPBOARD_FUNCTION("i2c1", upboard_up_i2c1_groups),
+ UPBOARD_FUNCTION("spi2", upboard_up_spi2_groups),
+ UPBOARD_FUNCTION("i2s0", upboard_up_i2s0_groups),
+ UPBOARD_FUNCTION("pwm0", upboard_up_pwm0_groups),
+ UPBOARD_FUNCTION("pwm1", upboard_up_pwm1_groups),
+ UPBOARD_FUNCTION("adc0", upboard_up_adc0_groups),
+};
+
+static const struct upboard_pinctrl_data upboard_up_pinctrl_data = {
+ .groups = &upboard_up_pin_groups[0],
+ .ngroups = ARRAY_SIZE(upboard_up_pin_groups),
+ .funcs = &upboard_up_pin_functions[0],
+ .nfuncs = ARRAY_SIZE(upboard_up_pin_functions),
+ .pin_header = &upboard_up_pin_header[0],
+ .ngpio = ARRAY_SIZE(upboard_up_pin_header),
+};
+
+#define UPBOARD_UP2_BIT_TO_PIN(bit) UPBOARD_UP2_BIT_##bit
+
+#define UPBOARD_UP2_PIN_NAME(id) \
+ { \
+ .number = UPBOARD_UP2_BIT_##id, \
+ .name = #id, \
+ }
+
+#define UPBOARD_UP2_PIN_MUX(bit, data) \
+ { \
+ .number = UPBOARD_UP2_BIT_##bit, \
+ .name = "PINMUX_"#bit, \
+ .drv_data = (void *)(data), \
+ }
+
+#define UPBOARD_UP2_PIN_FUNC(id, data) \
+ { \
+ .number = UPBOARD_UP2_BIT_##id, \
+ .name = #id, \
+ .drv_data = (void *)(data), \
+ }
+
+enum upboard_up2_fpgabit {
+ UPBOARD_UP2_BIT_UART1_TXD,
+ UPBOARD_UP2_BIT_UART1_RXD,
+ UPBOARD_UP2_BIT_UART1_RTS,
+ UPBOARD_UP2_BIT_UART1_CTS,
+ UPBOARD_UP2_BIT_GPIO3_ADC0,
+ UPBOARD_UP2_BIT_GPIO5_ADC2,
+ UPBOARD_UP2_BIT_GPIO6_ADC3,
+ UPBOARD_UP2_BIT_GPIO11,
+ UPBOARD_UP2_BIT_EXHAT_LVDS1n,
+ UPBOARD_UP2_BIT_EXHAT_LVDS1p,
+ UPBOARD_UP2_BIT_SPI2_TXD,
+ UPBOARD_UP2_BIT_SPI2_RXD,
+ UPBOARD_UP2_BIT_SPI2_FS1,
+ UPBOARD_UP2_BIT_SPI2_FS0,
+ UPBOARD_UP2_BIT_SPI2_CLK,
+ UPBOARD_UP2_BIT_SPI1_TXD,
+ UPBOARD_UP2_BIT_SPI1_RXD,
+ UPBOARD_UP2_BIT_SPI1_FS1,
+ UPBOARD_UP2_BIT_SPI1_FS0,
+ UPBOARD_UP2_BIT_SPI1_CLK,
+ UPBOARD_UP2_BIT_I2C0_SCL,
+ UPBOARD_UP2_BIT_I2C0_SDA,
+ UPBOARD_UP2_BIT_I2C1_SCL,
+ UPBOARD_UP2_BIT_I2C1_SDA,
+ UPBOARD_UP2_BIT_PWM1,
+ UPBOARD_UP2_BIT_PWM0,
+ UPBOARD_UP2_BIT_EXHAT_LVDS0n,
+ UPBOARD_UP2_BIT_EXHAT_LVDS0p,
+ UPBOARD_UP2_BIT_GPIO24,
+ UPBOARD_UP2_BIT_GPIO10,
+ UPBOARD_UP2_BIT_GPIO2,
+ UPBOARD_UP2_BIT_GPIO1,
+ UPBOARD_UP2_BIT_EXHAT_LVDS3n,
+ UPBOARD_UP2_BIT_EXHAT_LVDS3p,
+ UPBOARD_UP2_BIT_EXHAT_LVDS4n,
+ UPBOARD_UP2_BIT_EXHAT_LVDS4p,
+ UPBOARD_UP2_BIT_EXHAT_LVDS5n,
+ UPBOARD_UP2_BIT_EXHAT_LVDS5p,
+ UPBOARD_UP2_BIT_I2S_SDO,
+ UPBOARD_UP2_BIT_I2S_SDI,
+ UPBOARD_UP2_BIT_I2S_WS_SYNC,
+ UPBOARD_UP2_BIT_I2S_BCLK,
+ UPBOARD_UP2_BIT_EXHAT_LVDS6n,
+ UPBOARD_UP2_BIT_EXHAT_LVDS6p,
+ UPBOARD_UP2_BIT_EXHAT_LVDS7n,
+ UPBOARD_UP2_BIT_EXHAT_LVDS7p,
+ UPBOARD_UP2_BIT_EXHAT_LVDS2n,
+ UPBOARD_UP2_BIT_EXHAT_LVDS2p,
+};
+
+static const struct pinctrl_pin_desc upboard_up2_pins[] = {
+ UPBOARD_UP2_PIN_NAME(UART1_TXD),
+ UPBOARD_UP2_PIN_NAME(UART1_RXD),
+ UPBOARD_UP2_PIN_NAME(UART1_RTS),
+ UPBOARD_UP2_PIN_NAME(UART1_CTS),
+ UPBOARD_UP2_PIN_NAME(GPIO3_ADC0),
+ UPBOARD_UP2_PIN_NAME(GPIO5_ADC2),
+ UPBOARD_UP2_PIN_NAME(GPIO6_ADC3),
+ UPBOARD_UP2_PIN_NAME(GPIO11),
+ UPBOARD_UP2_PIN_NAME(EXHAT_LVDS1n),
+ UPBOARD_UP2_PIN_NAME(EXHAT_LVDS1p),
+ UPBOARD_UP2_PIN_NAME(SPI2_TXD),
+ UPBOARD_UP2_PIN_NAME(SPI2_RXD),
+ UPBOARD_UP2_PIN_NAME(SPI2_FS1),
+ UPBOARD_UP2_PIN_NAME(SPI2_FS0),
+ UPBOARD_UP2_PIN_NAME(SPI2_CLK),
+ UPBOARD_UP2_PIN_NAME(SPI1_TXD),
+ UPBOARD_UP2_PIN_NAME(SPI1_RXD),
+ UPBOARD_UP2_PIN_NAME(SPI1_FS1),
+ UPBOARD_UP2_PIN_NAME(SPI1_FS0),
+ UPBOARD_UP2_PIN_NAME(SPI1_CLK),
+ UPBOARD_UP2_PIN_MUX(I2C0_SCL, &upboard_i2c0_reg),
+ UPBOARD_UP2_PIN_MUX(I2C0_SDA, &upboard_i2c0_reg),
+ UPBOARD_UP2_PIN_MUX(I2C1_SCL, &upboard_i2c1_reg),
+ UPBOARD_UP2_PIN_MUX(I2C1_SDA, &upboard_i2c1_reg),
+ UPBOARD_UP2_PIN_NAME(PWM1),
+ UPBOARD_UP2_PIN_NAME(PWM0),
+ UPBOARD_UP2_PIN_NAME(EXHAT_LVDS0n),
+ UPBOARD_UP2_PIN_NAME(EXHAT_LVDS0p),
+ UPBOARD_UP2_PIN_MUX(GPIO24, &upboard_i2c0_reg),
+ UPBOARD_UP2_PIN_MUX(GPIO10, &upboard_i2c0_reg),
+ UPBOARD_UP2_PIN_MUX(GPIO2, &upboard_i2c1_reg),
+ UPBOARD_UP2_PIN_MUX(GPIO1, &upboard_i2c1_reg),
+ UPBOARD_UP2_PIN_NAME(EXHAT_LVDS3n),
+ UPBOARD_UP2_PIN_NAME(EXHAT_LVDS3p),
+ UPBOARD_UP2_PIN_NAME(EXHAT_LVDS4n),
+ UPBOARD_UP2_PIN_NAME(EXHAT_LVDS4p),
+ UPBOARD_UP2_PIN_NAME(EXHAT_LVDS5n),
+ UPBOARD_UP2_PIN_NAME(EXHAT_LVDS5p),
+ UPBOARD_UP2_PIN_NAME(I2S_SDO),
+ UPBOARD_UP2_PIN_NAME(I2S_SDI),
+ UPBOARD_UP2_PIN_NAME(I2S_WS_SYNC),
+ UPBOARD_UP2_PIN_NAME(I2S_BCLK),
+ UPBOARD_UP2_PIN_NAME(EXHAT_LVDS6n),
+ UPBOARD_UP2_PIN_NAME(EXHAT_LVDS6p),
+ UPBOARD_UP2_PIN_NAME(EXHAT_LVDS7n),
+ UPBOARD_UP2_PIN_NAME(EXHAT_LVDS7p),
+ UPBOARD_UP2_PIN_NAME(EXHAT_LVDS2n),
+ UPBOARD_UP2_PIN_NAME(EXHAT_LVDS2p),
+};
+
+static const unsigned int upboard_up2_pin_header[] = {
+ UPBOARD_UP2_BIT_TO_PIN(GPIO10),
+ UPBOARD_UP2_BIT_TO_PIN(GPIO24),
+ UPBOARD_UP2_BIT_TO_PIN(GPIO1),
+ UPBOARD_UP2_BIT_TO_PIN(GPIO2),
+ UPBOARD_UP2_BIT_TO_PIN(GPIO3_ADC0),
+ UPBOARD_UP2_BIT_TO_PIN(GPIO11),
+ UPBOARD_UP2_BIT_TO_PIN(SPI2_CLK),
+ UPBOARD_UP2_BIT_TO_PIN(SPI1_FS1),
+ UPBOARD_UP2_BIT_TO_PIN(SPI1_FS0),
+ UPBOARD_UP2_BIT_TO_PIN(SPI1_RXD),
+ UPBOARD_UP2_BIT_TO_PIN(SPI1_TXD),
+ UPBOARD_UP2_BIT_TO_PIN(SPI1_CLK),
+ UPBOARD_UP2_BIT_TO_PIN(PWM0),
+ UPBOARD_UP2_BIT_TO_PIN(PWM1),
+ UPBOARD_UP2_BIT_TO_PIN(UART1_TXD),
+ UPBOARD_UP2_BIT_TO_PIN(UART1_RXD),
+ UPBOARD_UP2_BIT_TO_PIN(UART1_CTS),
+ UPBOARD_UP2_BIT_TO_PIN(UART1_RTS),
+ UPBOARD_UP2_BIT_TO_PIN(I2S_BCLK),
+ UPBOARD_UP2_BIT_TO_PIN(I2S_WS_SYNC),
+ UPBOARD_UP2_BIT_TO_PIN(I2S_SDI),
+ UPBOARD_UP2_BIT_TO_PIN(I2S_SDO),
+ UPBOARD_UP2_BIT_TO_PIN(GPIO6_ADC3),
+ UPBOARD_UP2_BIT_TO_PIN(SPI2_FS1),
+ UPBOARD_UP2_BIT_TO_PIN(SPI2_RXD),
+ UPBOARD_UP2_BIT_TO_PIN(SPI2_TXD),
+ UPBOARD_UP2_BIT_TO_PIN(SPI2_FS0),
+ UPBOARD_UP2_BIT_TO_PIN(GPIO5_ADC2),
+};
+
+static const unsigned int upboard_up2_uart1_pins[] = {
+ UPBOARD_UP2_BIT_TO_PIN(UART1_TXD),
+ UPBOARD_UP2_BIT_TO_PIN(UART1_RXD),
+ UPBOARD_UP2_BIT_TO_PIN(UART1_RTS),
+ UPBOARD_UP2_BIT_TO_PIN(UART1_CTS),
+};
+
+static const enum upboard_pin_mode upboard_up2_uart1_modes[] = {
+ UPBOARD_PIN_MODE_GPIO_OUT,
+ UPBOARD_PIN_MODE_GPIO_IN,
+ UPBOARD_PIN_MODE_GPIO_OUT,
+ UPBOARD_PIN_MODE_GPIO_IN,
+};
+
+static_assert(ARRAY_SIZE(upboard_up2_uart1_modes) == ARRAY_SIZE(upboard_up2_uart1_pins));
+
+static const unsigned int upboard_up2_i2c0_pins[] = {
+ UPBOARD_UP2_BIT_TO_PIN(I2C0_SCL),
+ UPBOARD_UP2_BIT_TO_PIN(I2C0_SDA),
+ UPBOARD_UP2_BIT_TO_PIN(GPIO24),
+ UPBOARD_UP2_BIT_TO_PIN(GPIO10),
+};
+
+static const unsigned int upboard_up2_i2c1_pins[] = {
+ UPBOARD_UP2_BIT_TO_PIN(I2C1_SCL),
+ UPBOARD_UP2_BIT_TO_PIN(I2C1_SDA),
+ UPBOARD_UP2_BIT_TO_PIN(GPIO2),
+ UPBOARD_UP2_BIT_TO_PIN(GPIO1),
+};
+
+static const unsigned int upboard_up2_spi1_pins[] = {
+ UPBOARD_UP2_BIT_TO_PIN(SPI1_TXD),
+ UPBOARD_UP2_BIT_TO_PIN(SPI1_RXD),
+ UPBOARD_UP2_BIT_TO_PIN(SPI1_FS1),
+ UPBOARD_UP2_BIT_TO_PIN(SPI1_FS0),
+ UPBOARD_UP2_BIT_TO_PIN(SPI1_CLK),
+};
+
+static const unsigned int upboard_up2_spi2_pins[] = {
+ UPBOARD_UP2_BIT_TO_PIN(SPI2_TXD),
+ UPBOARD_UP2_BIT_TO_PIN(SPI2_RXD),
+ UPBOARD_UP2_BIT_TO_PIN(SPI2_FS1),
+ UPBOARD_UP2_BIT_TO_PIN(SPI2_FS0),
+ UPBOARD_UP2_BIT_TO_PIN(SPI2_CLK),
+};
+
+static const enum upboard_pin_mode upboard_up2_spi_modes[] = {
+ UPBOARD_PIN_MODE_GPIO_OUT,
+ UPBOARD_PIN_MODE_GPIO_IN,
+ UPBOARD_PIN_MODE_GPIO_OUT,
+ UPBOARD_PIN_MODE_GPIO_OUT,
+ UPBOARD_PIN_MODE_GPIO_OUT,
+};
+
+static_assert(ARRAY_SIZE(upboard_up2_spi_modes) == ARRAY_SIZE(upboard_up2_spi1_pins));
+
+static_assert(ARRAY_SIZE(upboard_up2_spi_modes) == ARRAY_SIZE(upboard_up2_spi2_pins));
+
+static const unsigned int upboard_up2_i2s0_pins[] = {
+ UPBOARD_UP2_BIT_TO_PIN(I2S_BCLK),
+ UPBOARD_UP2_BIT_TO_PIN(I2S_WS_SYNC),
+ UPBOARD_UP2_BIT_TO_PIN(I2S_SDI),
+ UPBOARD_UP2_BIT_TO_PIN(I2S_SDO),
+};
+
+static const enum upboard_pin_mode upboard_up2_i2s0_modes[] = {
+ UPBOARD_PIN_MODE_GPIO_OUT,
+ UPBOARD_PIN_MODE_GPIO_OUT,
+ UPBOARD_PIN_MODE_GPIO_IN,
+ UPBOARD_PIN_MODE_GPIO_OUT,
+};
+
+static_assert(ARRAY_SIZE(upboard_up2_i2s0_modes) == ARRAY_SIZE(upboard_up2_i2s0_pins));
+
+static const unsigned int upboard_up2_pwm0_pins[] = {
+ UPBOARD_UP2_BIT_TO_PIN(PWM0),
+};
+
+static const unsigned int upboard_up2_pwm1_pins[] = {
+ UPBOARD_UP2_BIT_TO_PIN(PWM1),
+};
+
+static const unsigned int upboard_up2_adc0_pins[] = {
+ UPBOARD_UP2_BIT_TO_PIN(GPIO3_ADC0),
+};
+
+static const unsigned int upboard_up2_adc2_pins[] = {
+ UPBOARD_UP2_BIT_TO_PIN(GPIO5_ADC2),
+};
+
+static const unsigned int upboard_up2_adc3_pins[] = {
+ UPBOARD_UP2_BIT_TO_PIN(GPIO6_ADC3),
+};
+
+static const struct upboard_pingroup upboard_up2_pin_groups[] = {
+ UPBOARD_PINGROUP("uart1_grp", upboard_up2_uart1_pins, &upboard_up2_uart1_modes[0]),
+ UPBOARD_PINGROUP("i2c0_grp", upboard_up2_i2c0_pins, UPBOARD_PIN_MODE_FUNCTION),
+ UPBOARD_PINGROUP("i2c1_grp", upboard_up2_i2c1_pins, UPBOARD_PIN_MODE_FUNCTION),
+ UPBOARD_PINGROUP("spi1_grp", upboard_up2_spi1_pins, &upboard_up2_spi_modes[0]),
+ UPBOARD_PINGROUP("spi2_grp", upboard_up2_spi2_pins, &upboard_up2_spi_modes[0]),
+ UPBOARD_PINGROUP("i2s0_grp", upboard_up2_i2s0_pins, &upboard_up2_i2s0_modes[0]),
+ UPBOARD_PINGROUP("pwm0_grp", upboard_up2_pwm0_pins, UPBOARD_PIN_MODE_GPIO_OUT),
+ UPBOARD_PINGROUP("pwm1_grp", upboard_up2_pwm1_pins, UPBOARD_PIN_MODE_GPIO_OUT),
+ UPBOARD_PINGROUP("adc0_grp", upboard_up2_adc0_pins, UPBOARD_PIN_MODE_GPIO_IN),
+ UPBOARD_PINGROUP("adc2_grp", upboard_up2_adc2_pins, UPBOARD_PIN_MODE_GPIO_IN),
+ UPBOARD_PINGROUP("adc3_grp", upboard_up2_adc3_pins, UPBOARD_PIN_MODE_GPIO_IN),
+};
+
+static const char * const upboard_up2_uart1_groups[] = { "uart1_grp" };
+static const char * const upboard_up2_i2c0_groups[] = { "i2c0_grp" };
+static const char * const upboard_up2_i2c1_groups[] = { "i2c1_grp" };
+static const char * const upboard_up2_spi1_groups[] = { "spi1_grp" };
+static const char * const upboard_up2_spi2_groups[] = { "spi2_grp" };
+static const char * const upboard_up2_i2s0_groups[] = { "i2s0_grp" };
+static const char * const upboard_up2_pwm0_groups[] = { "pwm0_grp" };
+static const char * const upboard_up2_pwm1_groups[] = { "pwm1_grp" };
+static const char * const upboard_up2_adc0_groups[] = { "adc0_grp" };
+static const char * const upboard_up2_adc2_groups[] = { "adc2_grp" };
+static const char * const upboard_up2_adc3_groups[] = { "adc3_grp" };
+
+static const struct pinfunction upboard_up2_pin_functions[] = {
+ UPBOARD_FUNCTION("uart1", upboard_up2_uart1_groups),
+ UPBOARD_FUNCTION("i2c0", upboard_up2_i2c0_groups),
+ UPBOARD_FUNCTION("i2c1", upboard_up2_i2c1_groups),
+ UPBOARD_FUNCTION("spi1", upboard_up2_spi1_groups),
+ UPBOARD_FUNCTION("spi2", upboard_up2_spi2_groups),
+ UPBOARD_FUNCTION("i2s0", upboard_up2_i2s0_groups),
+ UPBOARD_FUNCTION("pwm0", upboard_up2_pwm0_groups),
+ UPBOARD_FUNCTION("pwm1", upboard_up2_pwm1_groups),
+ UPBOARD_FUNCTION("adc0", upboard_up2_adc0_groups),
+ UPBOARD_FUNCTION("adc2", upboard_up2_adc2_groups),
+ UPBOARD_FUNCTION("adc3", upboard_up2_adc3_groups),
+};
+
+static const struct upboard_pinctrl_data upboard_up2_pinctrl_data = {
+ .groups = &upboard_up2_pin_groups[0],
+ .ngroups = ARRAY_SIZE(upboard_up2_pin_groups),
+ .funcs = &upboard_up2_pin_functions[0],
+ .nfuncs = ARRAY_SIZE(upboard_up2_pin_functions),
+ .pin_header = &upboard_up2_pin_header[0],
+ .ngpio = ARRAY_SIZE(upboard_up2_pin_header),
+};
+
+static int upboard_pinctrl_set_function(struct pinctrl_dev *pctldev, unsigned int offset)
+{
+ struct upboard_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct upboard_pin *p = &pctrl->pins[offset];
+ int ret;
+
+ if (!p->funcbit)
+ return -EPERM;
+
+ ret = regmap_field_write(p->enbit, 0);
+ if (ret)
+ return ret;
+
+ return regmap_field_write(p->funcbit, 1);
+}
+
+static int upboard_pinctrl_gpio_commit_enable(struct pinctrl_dev *pctldev, unsigned int offset)
+{
+ struct upboard_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct upboard_pin *p = &pctrl->pins[offset];
+ int ret;
+
+ if (p->funcbit) {
+ ret = regmap_field_write(p->funcbit, 0);
+ if (ret)
+ return ret;
+ }
+
+ return regmap_field_write(p->enbit, 1);
+}
+
+static int upboard_pinctrl_gpio_request_enable(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int offset)
+{
+ return upboard_pinctrl_gpio_commit_enable(pctldev, offset);
+}
+
+static void upboard_pinctrl_gpio_commit_disable(struct pinctrl_dev *pctldev, unsigned int offset)
+{
+ struct upboard_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct upboard_pin *p = &pctrl->pins[offset];
+
+ regmap_field_write(p->enbit, 0);
+};
+
+static void upboard_pinctrl_gpio_disable_free(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range, unsigned int offset)
+{
+ return upboard_pinctrl_gpio_commit_disable(pctldev, offset);
+}
+
+static int upboard_pinctrl_gpio_commit_direction(struct pinctrl_dev *pctldev, unsigned int offset,
+ bool input)
+{
+ struct upboard_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct upboard_pin *p = &pctrl->pins[offset];
+
+ return regmap_field_write(p->dirbit, input);
+}
+
+static int upboard_pinctrl_gpio_set_direction(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int offset, bool input)
+{
+ return upboard_pinctrl_gpio_commit_direction(pctldev, offset, input);
+}
+
+static int upboard_pinctrl_set_mux(struct pinctrl_dev *pctldev, unsigned int func_selector,
+ unsigned int group_selector)
+{
+ struct upboard_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ const struct upboard_pinctrl_data *pctrl_data = pctrl->pctrl_data;
+ const struct upboard_pingroup *upgroups = pctrl_data->groups;
+ struct group_desc *grp;
+ unsigned int mode, i;
+ int ret;
+
+ grp = pinctrl_generic_get_group(pctldev, group_selector);
+ if (!grp)
+ return -EINVAL;
+
+ for (i = 0; i < grp->grp.npins; i++) {
+ mode = upgroups[group_selector].mode ?: upgroups[group_selector].modes[i];
+ if (mode == UPBOARD_PIN_MODE_FUNCTION) {
+ ret = upboard_pinctrl_set_function(pctldev, grp->grp.pins[i]);
+ if (ret)
+ return ret;
+
+ continue;
+ }
+
+ ret = upboard_pinctrl_gpio_commit_enable(pctldev, grp->grp.pins[i]);
+ if (ret)
+ return ret;
+
+ ret = upboard_pinctrl_gpio_commit_direction(pctldev, grp->grp.pins[i],
+ mode == UPBOARD_PIN_MODE_GPIO_IN);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct pinmux_ops upboard_pinmux_ops = {
+ .get_functions_count = pinmux_generic_get_function_count,
+ .get_function_name = pinmux_generic_get_function_name,
+ .get_function_groups = pinmux_generic_get_function_groups,
+ .set_mux = upboard_pinctrl_set_mux,
+ .gpio_request_enable = upboard_pinctrl_gpio_request_enable,
+ .gpio_disable_free = upboard_pinctrl_gpio_disable_free,
+ .gpio_set_direction = upboard_pinctrl_gpio_set_direction,
+};
+
+static int upboard_pinctrl_pin_get_mode(struct pinctrl_dev *pctldev, unsigned int pin)
+{
+ struct upboard_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct upboard_pin *p = &pctrl->pins[pin];
+ unsigned int val;
+ int ret;
+
+ if (p->funcbit) {
+ ret = regmap_field_read(p->funcbit, &val);
+ if (ret)
+ return ret;
+ if (val)
+ return UPBOARD_PIN_MODE_FUNCTION;
+ }
+
+ ret = regmap_field_read(p->enbit, &val);
+ if (ret)
+ return ret;
+ if (!val)
+ return UPBOARD_PIN_MODE_DISABLED;
+
+ ret = regmap_field_read(p->dirbit, &val);
+ if (ret)
+ return ret;
+
+ return val ? UPBOARD_PIN_MODE_GPIO_IN : UPBOARD_PIN_MODE_GPIO_OUT;
+}
+
+static void upboard_pinctrl_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
+ unsigned int offset)
+{
+ int ret;
+
+ ret = upboard_pinctrl_pin_get_mode(pctldev, offset);
+ if (ret == UPBOARD_PIN_MODE_FUNCTION)
+ seq_puts(s, "mode function ");
+ else if (ret == UPBOARD_PIN_MODE_DISABLED)
+ seq_puts(s, "HIGH-Z ");
+ else if (ret < 0)
+ seq_puts(s, "N/A ");
+ else
+ seq_printf(s, "GPIO (%s) ", str_input_output(ret == UPBOARD_PIN_MODE_GPIO_IN));
+}
+
+static const struct pinctrl_ops upboard_pinctrl_ops = {
+ .get_groups_count = pinctrl_generic_get_group_count,
+ .get_group_name = pinctrl_generic_get_group_name,
+ .get_group_pins = pinctrl_generic_get_group_pins,
+ .pin_dbg_show = upboard_pinctrl_dbg_show,
+};
+
+static int upboard_gpio_request(struct gpio_chip *gc, unsigned int offset)
+{
+ struct gpiochip_fwd *fwd = gpiochip_get_data(gc);
+ struct upboard_pinctrl *pctrl = gpiochip_fwd_get_data(fwd);
+ unsigned int pin = pctrl->pctrl_data->pin_header[offset];
+ struct gpio_desc *desc;
+ int ret;
+
+ ret = pinctrl_gpio_request(gc, offset);
+ if (ret)
+ return ret;
+
+ desc = gpiod_get_index(pctrl->dev, "external", pin, 0);
+ if (IS_ERR(desc)) {
+ pinctrl_gpio_free(gc, offset);
+ return PTR_ERR(desc);
+ }
+
+ return gpiochip_fwd_desc_add(fwd, desc, offset);
+}
+
+static void upboard_gpio_free(struct gpio_chip *gc, unsigned int offset)
+{
+ struct gpiochip_fwd *fwd = gpiochip_get_data(gc);
+
+ gpiochip_fwd_desc_free(fwd, offset);
+ pinctrl_gpio_free(gc, offset);
+}
+
+static int upboard_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
+{
+ struct gpiochip_fwd *fwd = gpiochip_get_data(gc);
+ struct upboard_pinctrl *pctrl = gpiochip_fwd_get_data(fwd);
+ unsigned int pin = pctrl->pctrl_data->pin_header[offset];
+ int mode;
+
+ /* If the pin is in function mode or high-z, input direction is returned */
+ mode = upboard_pinctrl_pin_get_mode(pctrl->pctldev, pin);
+ if (mode < 0)
+ return mode;
+
+ if (mode == UPBOARD_PIN_MODE_GPIO_OUT)
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
+}
+
+static int upboard_gpio_direction_input(struct gpio_chip *gc, unsigned int offset)
+{
+ struct gpiochip_fwd *fwd = gpiochip_get_data(gc);
+ int ret;
+
+ ret = pinctrl_gpio_direction_input(gc, offset);
+ if (ret)
+ return ret;
+
+ return gpiochip_fwd_gpio_direction_input(fwd, offset);
+}
+
+static int upboard_gpio_direction_output(struct gpio_chip *gc, unsigned int offset, int value)
+{
+ struct gpiochip_fwd *fwd = gpiochip_get_data(gc);
+ int ret;
+
+ ret = pinctrl_gpio_direction_output(gc, offset);
+ if (ret)
+ return ret;
+
+ return gpiochip_fwd_gpio_direction_output(fwd, offset, value);
+}
+
+static int upboard_pinctrl_register_groups(struct upboard_pinctrl *pctrl)
+{
+ const struct upboard_pingroup *groups = pctrl->pctrl_data->groups;
+ size_t ngroups = pctrl->pctrl_data->ngroups;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < ngroups; i++) {
+ ret = pinctrl_generic_add_group(pctrl->pctldev, groups[i].grp.name,
+ groups[i].grp.pins, groups[i].grp.npins, pctrl);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int upboard_pinctrl_register_functions(struct upboard_pinctrl *pctrl)
+{
+ const struct pinfunction *funcs = pctrl->pctrl_data->funcs;
+ size_t nfuncs = pctrl->pctrl_data->nfuncs;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < nfuncs ; i++) {
+ ret = pinmux_generic_add_function(pctrl->pctldev, funcs[i].name,
+ funcs[i].groups, funcs[i].ngroups, NULL);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct pinctrl_map pinctrl_map_apl01[] = {
+ PIN_MAP_MUX_GROUP_DEFAULT("upboard-pinctrl", "INT3452:00", "pwm0_grp", "pwm0"),
+ PIN_MAP_MUX_GROUP_DEFAULT("upboard-pinctrl", "INT3452:00", "pwm1_grp", "pwm1"),
+ PIN_MAP_MUX_GROUP_DEFAULT("upboard-pinctrl", "INT3452:00", "uart1_grp", "uart1"),
+ PIN_MAP_MUX_GROUP_DEFAULT("upboard-pinctrl", "INT3452:02", "i2c0_grp", "i2c0"),
+ PIN_MAP_MUX_GROUP_DEFAULT("upboard-pinctrl", "INT3452:02", "i2c1_grp", "i2c1"),
+ PIN_MAP_MUX_GROUP_DEFAULT("upboard-pinctrl", "INT3452:01", "ssp0_grp", "ssp0"),
+};
+
+static const struct upboard_pinctrl_map upboard_pinctrl_map_apl01 = {
+ .maps = &pinctrl_map_apl01[0],
+ .nmaps = ARRAY_SIZE(pinctrl_map_apl01),
+};
+
+static const struct dmi_system_id dmi_platform_info[] = {
+ {
+ /* UP Squared */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AAEON"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "UP-APL01"),
+ },
+ .driver_data = (void *)&upboard_pinctrl_map_apl01,
+ },
+ { }
+};
+
+static int upboard_pinctrl_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct upboard_fpga *fpga = dev_get_drvdata(dev->parent);
+ const struct upboard_pinctrl_map *board_map;
+ const struct dmi_system_id *dmi_id;
+ struct pinctrl_desc *pctldesc;
+ struct upboard_pinctrl *pctrl;
+ struct upboard_pin *pins;
+ struct gpiochip_fwd *fwd;
+ struct pinctrl *pinctrl;
+ struct gpio_chip *chip;
+ unsigned int i;
+ int ret;
+
+ pctldesc = devm_kzalloc(dev, sizeof(*pctldesc), GFP_KERNEL);
+ if (!pctldesc)
+ return -ENOMEM;
+
+ pctrl = devm_kzalloc(dev, sizeof(*pctrl), GFP_KERNEL);
+ if (!pctrl)
+ return -ENOMEM;
+
+ switch (fpga->fpga_data->type) {
+ case UPBOARD_UP_FPGA:
+ pctldesc->pins = upboard_up_pins;
+ pctldesc->npins = ARRAY_SIZE(upboard_up_pins);
+ pctrl->pctrl_data = &upboard_up_pinctrl_data;
+ break;
+ case UPBOARD_UP2_FPGA:
+ pctldesc->pins = upboard_up2_pins;
+ pctldesc->npins = ARRAY_SIZE(upboard_up2_pins);
+ pctrl->pctrl_data = &upboard_up2_pinctrl_data;
+ break;
+ default:
+ return dev_err_probe(dev, -ENODEV, "Unsupported device type %d\n",
+ fpga->fpga_data->type);
+ }
+
+ dmi_id = dmi_first_match(dmi_platform_info);
+ if (!dmi_id)
+ return dev_err_probe(dev, -ENODEV, "Unsupported board\n");
+
+ board_map = (const struct upboard_pinctrl_map *)dmi_id->driver_data;
+
+ pctldesc->name = dev_name(dev);
+ pctldesc->owner = THIS_MODULE;
+ pctldesc->pctlops = &upboard_pinctrl_ops;
+ pctldesc->pmxops = &upboard_pinmux_ops;
+
+ pctrl->dev = dev;
+
+ pins = devm_kcalloc(dev, pctldesc->npins, sizeof(*pins), GFP_KERNEL);
+ if (!pins)
+ return -ENOMEM;
+
+ /* Initialize pins */
+ for (i = 0; i < pctldesc->npins; i++) {
+ const struct pinctrl_pin_desc *pin_desc = &pctldesc->pins[i];
+ unsigned int regoff = pin_desc->number / UPBOARD_REGISTER_SIZE;
+ unsigned int lsb = pin_desc->number % UPBOARD_REGISTER_SIZE;
+ struct reg_field * const fld_func = pin_desc->drv_data;
+ struct upboard_pin *pin = &pins[i];
+ struct reg_field fldconf = {};
+
+ if (fld_func) {
+ pin->funcbit = devm_regmap_field_alloc(dev, fpga->regmap, *fld_func);
+ if (IS_ERR(pin->funcbit))
+ return PTR_ERR(pin->funcbit);
+ }
+
+ fldconf.reg = UPBOARD_REG_GPIO_EN0 + regoff;
+ fldconf.lsb = lsb;
+ fldconf.msb = lsb;
+ pin->enbit = devm_regmap_field_alloc(dev, fpga->regmap, fldconf);
+ if (IS_ERR(pin->enbit))
+ return PTR_ERR(pin->enbit);
+
+ fldconf.reg = UPBOARD_REG_GPIO_DIR0 + regoff;
+ fldconf.lsb = lsb;
+ fldconf.msb = lsb;
+ pin->dirbit = devm_regmap_field_alloc(dev, fpga->regmap, fldconf);
+ if (IS_ERR(pin->dirbit))
+ return PTR_ERR(pin->dirbit);
+ }
+
+ pctrl->pins = pins;
+
+ ret = devm_pinctrl_register_and_init(dev, pctldesc, pctrl, &pctrl->pctldev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to register pinctrl\n");
+
+ ret = upboard_pinctrl_register_groups(pctrl);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to register groups\n");
+
+ ret = upboard_pinctrl_register_functions(pctrl);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to register functions\n");
+
+ ret = devm_pinctrl_register_mappings(dev, board_map->maps, board_map->nmaps);
+ if (ret)
+ return ret;
+
+ pinctrl = devm_pinctrl_get_select_default(dev);
+ if (IS_ERR(pinctrl))
+ return dev_err_probe(dev, PTR_ERR(pinctrl), "Failed to select pinctrl\n");
+
+ ret = pinctrl_enable(pctrl->pctldev);
+ if (ret)
+ return ret;
+
+ fwd = devm_gpiochip_fwd_alloc(dev, pctrl->pctrl_data->ngpio);
+ if (IS_ERR(fwd))
+ return dev_err_probe(dev, PTR_ERR(fwd), "Failed to allocate the gpiochip forwarder\n");
+
+ chip = gpiochip_fwd_get_gpiochip(fwd);
+ chip->request = upboard_gpio_request;
+ chip->free = upboard_gpio_free;
+ chip->get_direction = upboard_gpio_get_direction;
+ chip->direction_output = upboard_gpio_direction_output;
+ chip->direction_input = upboard_gpio_direction_input;
+
+ ret = gpiochip_fwd_register(fwd, pctrl);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to register the gpiochip forwarder\n");
+
+ return gpiochip_add_sparse_pin_range(chip, dev_name(dev), 0, pctrl->pctrl_data->pin_header,
+ pctrl->pctrl_data->ngpio);
+}
+
+static struct platform_driver upboard_pinctrl_driver = {
+ .driver = {
+ .name = "upboard-pinctrl",
+ },
+ .probe = upboard_pinctrl_probe,
+};
+module_platform_driver(upboard_pinctrl_driver);
+
+MODULE_AUTHOR("Thomas Richard <thomas.richard@bootlin.com");
+MODULE_DESCRIPTION("UP Board HAT pin controller driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:upboard-pinctrl");
+MODULE_IMPORT_NS("GPIO_FORWARDER");
diff --git a/drivers/pinctrl/pinctrl-xway.c b/drivers/pinctrl/pinctrl-xway.c
index 53c6c22ff24d..3d4ad61d0da9 100644
--- a/drivers/pinctrl/pinctrl-xway.c
+++ b/drivers/pinctrl/pinctrl-xway.c
@@ -1354,7 +1354,7 @@ static struct gpio_chip xway_chip = {
.direction_input = xway_gpio_dir_in,
.direction_output = xway_gpio_dir_out,
.get = xway_gpio_get,
- .set_rv = xway_gpio_set,
+ .set = xway_gpio_set,
.request = gpiochip_generic_request,
.free = gpiochip_generic_free,
.to_irq = xway_gpio_to_irq,
diff --git a/drivers/pinctrl/pinctrl-zynqmp.c b/drivers/pinctrl/pinctrl-zynqmp.c
index fddf0fef4b13..585fe1661e17 100644
--- a/drivers/pinctrl/pinctrl-zynqmp.c
+++ b/drivers/pinctrl/pinctrl-zynqmp.c
@@ -919,7 +919,7 @@ static int versal_pinctrl_prepare_pin_desc(struct device *dev,
if (ret)
return ret;
- pins = devm_kzalloc(dev, sizeof(*pins) * *npins, GFP_KERNEL);
+ pins = devm_kcalloc(dev, *npins, sizeof(*pins), GFP_KERNEL);
if (!pins)
return -ENOMEM;
diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c
index 79814758a084..3a8dd184ba3d 100644
--- a/drivers/pinctrl/pinmux.c
+++ b/drivers/pinctrl/pinmux.c
@@ -89,13 +89,20 @@ bool pinmux_can_be_used_for_gpio(struct pinctrl_dev *pctldev, unsigned int pin)
{
struct pin_desc *desc = pin_desc_get(pctldev, pin);
const struct pinmux_ops *ops = pctldev->desc->pmxops;
+ const struct pinctrl_setting_mux *mux_setting;
+ bool func_is_gpio = false;
/* Can't inspect pin, assume it can be used */
if (!desc || !ops)
return true;
+ mux_setting = desc->mux_setting;
+
guard(mutex)(&desc->mux_lock);
- if (ops->strict && desc->mux_usecount)
+ if (mux_setting && ops->function_is_gpio)
+ func_is_gpio = ops->function_is_gpio(pctldev, mux_setting->func);
+
+ if (ops->strict && desc->mux_usecount && !func_is_gpio)
return false;
return !(ops->strict && !!desc->gpio_owner);
@@ -116,7 +123,9 @@ static int pin_request(struct pinctrl_dev *pctldev,
{
struct pin_desc *desc;
const struct pinmux_ops *ops = pctldev->desc->pmxops;
+ const struct pinctrl_setting_mux *mux_setting;
int status = -EINVAL;
+ bool gpio_ok = false;
desc = pin_desc_get(pctldev, pin);
if (desc == NULL) {
@@ -126,11 +135,21 @@ static int pin_request(struct pinctrl_dev *pctldev,
goto out;
}
+ mux_setting = desc->mux_setting;
+
dev_dbg(pctldev->dev, "request pin %d (%s) for %s\n",
pin, desc->name, owner);
scoped_guard(mutex, &desc->mux_lock) {
- if ((!gpio_range || ops->strict) &&
+ if (mux_setting) {
+ if (ops->function_is_gpio)
+ gpio_ok = ops->function_is_gpio(pctldev,
+ mux_setting->func);
+ } else {
+ gpio_ok = true;
+ }
+
+ if ((!gpio_range || ops->strict) && !gpio_ok &&
desc->mux_usecount && strcmp(desc->mux_owner, owner)) {
dev_err(pctldev->dev,
"pin %s already requested by %s; cannot claim for %s\n",
@@ -138,7 +157,7 @@ static int pin_request(struct pinctrl_dev *pctldev,
goto out;
}
- if ((gpio_range || ops->strict) && desc->gpio_owner) {
+ if ((gpio_range || ops->strict) && !gpio_ok && desc->gpio_owner) {
dev_err(pctldev->dev,
"pin %s already requested by %s; cannot claim for %s\n",
desc->name, desc->gpio_owner, owner);
@@ -337,7 +356,7 @@ static int pinmux_func_name_to_selector(struct pinctrl_dev *pctldev,
while (selector < nfuncs) {
const char *fname = ops->get_function_name(pctldev, selector);
- if (!strcmp(function, fname))
+ if (fname && !strcmp(function, fname))
return selector;
selector++;
@@ -810,7 +829,7 @@ pinmux_generic_get_function_name(struct pinctrl_dev *pctldev,
if (!function)
return NULL;
- return function->func.name;
+ return function->func->name;
}
EXPORT_SYMBOL_GPL(pinmux_generic_get_function_name);
@@ -835,8 +854,8 @@ int pinmux_generic_get_function_groups(struct pinctrl_dev *pctldev,
__func__, selector);
return -EINVAL;
}
- *groups = function->func.groups;
- *ngroups = function->func.ngroups;
+ *groups = function->func->groups;
+ *ngroups = function->func->ngroups;
return 0;
}
@@ -847,8 +866,8 @@ EXPORT_SYMBOL_GPL(pinmux_generic_get_function_groups);
* @pctldev: pin controller device
* @selector: function number
*/
-struct function_desc *pinmux_generic_get_function(struct pinctrl_dev *pctldev,
- unsigned int selector)
+const struct function_desc *
+pinmux_generic_get_function(struct pinctrl_dev *pctldev, unsigned int selector)
{
struct function_desc *function;
@@ -862,6 +881,27 @@ struct function_desc *pinmux_generic_get_function(struct pinctrl_dev *pctldev,
EXPORT_SYMBOL_GPL(pinmux_generic_get_function);
/**
+ * pinmux_generic_function_is_gpio() - returns true if given function is a GPIO
+ * @pctldev: pin controller device
+ * @selector: function number
+ *
+ * Returns:
+ * True if given function is a GPIO, false otherwise.
+ */
+bool pinmux_generic_function_is_gpio(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ struct function_desc *function;
+
+ function = radix_tree_lookup(&pctldev->pin_function_tree, selector);
+ if (!function)
+ return false;
+
+ return function->func->flags & PINFUNCTION_FLAG_GPIO;
+}
+EXPORT_SYMBOL_GPL(pinmux_generic_function_is_gpio);
+
+/**
* pinmux_generic_add_function() - adds a function group
* @pctldev: pin controller device
* @name: name of the function
@@ -903,7 +943,17 @@ int pinmux_generic_add_pinfunction(struct pinctrl_dev *pctldev,
if (!function)
return -ENOMEM;
- function->func = *func;
+ /*
+ * FIXME: It's generally a bad idea to use devres in subsystem core
+ * code - managed interfaces are aimed at drivers - but pinctrl already
+ * uses it all over the place so it's a larger piece of technical debt
+ * to fix.
+ */
+ function->func = devm_kmemdup_const(pctldev->dev, func,
+ sizeof(*func), GFP_KERNEL);
+ if (!function->func)
+ return -ENOMEM;
+
function->data = data;
error = radix_tree_insert(&pctldev->pin_function_tree, selector, function);
diff --git a/drivers/pinctrl/pinmux.h b/drivers/pinctrl/pinmux.h
index bdb5be1a636e..4e826c1a5246 100644
--- a/drivers/pinctrl/pinmux.h
+++ b/drivers/pinctrl/pinmux.h
@@ -137,7 +137,7 @@ static inline void pinmux_init_device_debugfs(struct dentry *devroot,
* @data: pin controller driver specific data
*/
struct function_desc {
- struct pinfunction func;
+ const struct pinfunction *func;
void *data;
};
@@ -152,8 +152,8 @@ int pinmux_generic_get_function_groups(struct pinctrl_dev *pctldev,
const char * const **groups,
unsigned int * const ngroups);
-struct function_desc *pinmux_generic_get_function(struct pinctrl_dev *pctldev,
- unsigned int selector);
+const struct function_desc *
+pinmux_generic_get_function(struct pinctrl_dev *pctldev, unsigned int selector);
int pinmux_generic_add_function(struct pinctrl_dev *pctldev,
const char *name,
@@ -169,6 +169,9 @@ int pinmux_generic_remove_function(struct pinctrl_dev *pctldev,
void pinmux_generic_free_functions(struct pinctrl_dev *pctldev);
+bool pinmux_generic_function_is_gpio(struct pinctrl_dev *pctldev,
+ unsigned int selector);
+
#else
static inline void pinmux_generic_free_functions(struct pinctrl_dev *pctldev)
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index dd9bbe8f3e11..c480e8b78503 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -8,6 +8,7 @@ config PINCTRL_MSM
depends on OF
select QCOM_SCM
select PINMUX
+ select GENERIC_PINMUX_FUNCTIONS
select PINCONF
select GENERIC_PINCONF
select GPIOLIB_IRQCHIP
@@ -68,6 +69,16 @@ config PINCTRL_SC7280_LPASS_LPI
Qualcomm Technologies Inc LPASS (Low Power Audio SubSystem) LPI
(Low Power Island) found on the Qualcomm Technologies Inc SC7280 platform.
+config PINCTRL_SDM660_LPASS_LPI
+ tristate "Qualcomm Technologies Inc SDM660 LPASS LPI pin controller driver"
+ depends on GPIOLIB
+ depends on ARM64 || COMPILE_TEST
+ depends on PINCTRL_LPASS_LPI
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm Technologies Inc LPASS (Low Power Audio SubSystem) LPI
+ (Low Power Island) found on the Qualcomm Technologies Inc SDM660 platform.
+
config PINCTRL_SM4250_LPASS_LPI
tristate "Qualcomm Technologies Inc SM4250 LPASS LPI pin controller driver"
depends on ARM64 || COMPILE_TEST
diff --git a/drivers/pinctrl/qcom/Kconfig.msm b/drivers/pinctrl/qcom/Kconfig.msm
index 6dad942b00a3..69a5b47adedc 100644
--- a/drivers/pinctrl/qcom/Kconfig.msm
+++ b/drivers/pinctrl/qcom/Kconfig.msm
@@ -15,6 +15,16 @@ config PINCTRL_APQ8084
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
Qualcomm TLMM block found in the Qualcomm APQ8084 platform.
+config PINCTRL_GLYMUR
+ tristate "Qualcomm Technologies Inc Glymur pin controller driver"
+ depends on ARM64 || COMPILE_TEST
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm Technologies Inc Top Level Mode Multiplexer block (TLMM)
+ block found on the Qualcomm Technologies Inc Glymur platform.
+ Say Y here to compile statically, or M here to compile it as a module.
+ If unsure, say N.
+
config PINCTRL_IPQ4019
tristate "Qualcomm IPQ4019 pin controller driver"
depends on ARM || COMPILE_TEST
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
index 2acff520a285..567d3051e760 100644
--- a/drivers/pinctrl/qcom/Makefile
+++ b/drivers/pinctrl/qcom/Makefile
@@ -3,6 +3,7 @@
obj-$(CONFIG_PINCTRL_MSM) += pinctrl-msm.o
obj-$(CONFIG_PINCTRL_APQ8064) += pinctrl-apq8064.o
obj-$(CONFIG_PINCTRL_APQ8084) += pinctrl-apq8084.o
+obj-$(CONFIG_PINCTRL_GLYMUR) += pinctrl-glymur.o
obj-$(CONFIG_PINCTRL_IPQ4019) += pinctrl-ipq4019.o
obj-$(CONFIG_PINCTRL_IPQ5018) += pinctrl-ipq5018.o
obj-$(CONFIG_PINCTRL_IPQ8064) += pinctrl-ipq8064.o
@@ -44,6 +45,7 @@ obj-$(CONFIG_PINCTRL_SC7280_LPASS_LPI) += pinctrl-sc7280-lpass-lpi.o
obj-$(CONFIG_PINCTRL_SC8180X) += pinctrl-sc8180x.o
obj-$(CONFIG_PINCTRL_SC8280XP) += pinctrl-sc8280xp.o
obj-$(CONFIG_PINCTRL_SDM660) += pinctrl-sdm660.o
+obj-$(CONFIG_PINCTRL_SDM660_LPASS_LPI) += pinctrl-sdm660-lpass-lpi.o
obj-$(CONFIG_PINCTRL_SDM670) += pinctrl-sdm670.o
obj-$(CONFIG_PINCTRL_SDM845) += pinctrl-sdm845.o
obj-$(CONFIG_PINCTRL_SDX55) += pinctrl-sdx55.o
diff --git a/drivers/pinctrl/qcom/pinctrl-glymur.c b/drivers/pinctrl/qcom/pinctrl-glymur.c
new file mode 100644
index 000000000000..9913f98e9531
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-glymur.c
@@ -0,0 +1,1777 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-msm.h"
+
+#define REG_SIZE 0x1000
+#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11) \
+ { \
+ .grp = PINCTRL_PINGROUP("gpio" #id, \
+ gpio##id##_pins, \
+ ARRAY_SIZE(gpio##id##_pins)), \
+ .ctl_reg = REG_SIZE * id, \
+ .io_reg = 0x4 + REG_SIZE * id, \
+ .intr_cfg_reg = 0x8 + REG_SIZE * id, \
+ .intr_status_reg = 0xc + REG_SIZE * id, \
+ .intr_target_reg = 0x8 + REG_SIZE * id, \
+ .mux_bit = 2, \
+ .pull_bit = 0, \
+ .drv_bit = 6, \
+ .egpio_enable = 12, \
+ .egpio_present = 11, \
+ .oe_bit = 9, \
+ .in_bit = 0, \
+ .out_bit = 1, \
+ .intr_enable_bit = 0, \
+ .intr_status_bit = 0, \
+ .intr_target_bit = 5, \
+ .intr_target_kpss_val = 3, \
+ .intr_raw_status_bit = 4, \
+ .intr_polarity_bit = 1, \
+ .intr_detection_bit = 2, \
+ .intr_detection_width = 2, \
+ .funcs = (int[]){ \
+ msm_mux_gpio, /* gpio mode */ \
+ msm_mux_##f1, \
+ msm_mux_##f2, \
+ msm_mux_##f3, \
+ msm_mux_##f4, \
+ msm_mux_##f5, \
+ msm_mux_##f6, \
+ msm_mux_##f7, \
+ msm_mux_##f8, \
+ msm_mux_##f9, \
+ msm_mux_##f10, \
+ msm_mux_##f11 /* egpio mode */ \
+ }, \
+ .nfuncs = 12, \
+ }
+
+#define SDC_QDSD_PINGROUP(pg_name, ctl, pull, drv) \
+ { \
+ .grp = PINCTRL_PINGROUP(#pg_name, \
+ pg_name##_pins, \
+ ARRAY_SIZE(pg_name##_pins)), \
+ .ctl_reg = ctl, \
+ .io_reg = 0, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .mux_bit = -1, \
+ .pull_bit = pull, \
+ .drv_bit = drv, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = -1, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+
+#define UFS_RESET(pg_name, ctl, io) \
+ { \
+ .grp = PINCTRL_PINGROUP(#pg_name, \
+ pg_name##_pins, \
+ ARRAY_SIZE(pg_name##_pins)), \
+ .ctl_reg = ctl, \
+ .io_reg = io, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .mux_bit = -1, \
+ .pull_bit = 3, \
+ .drv_bit = 0, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = 0, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+
+static const struct pinctrl_pin_desc glymur_pins[] = {
+ PINCTRL_PIN(0, "GPIO_0"),
+ PINCTRL_PIN(1, "GPIO_1"),
+ PINCTRL_PIN(2, "GPIO_2"),
+ PINCTRL_PIN(3, "GPIO_3"),
+ PINCTRL_PIN(4, "GPIO_4"),
+ PINCTRL_PIN(5, "GPIO_5"),
+ PINCTRL_PIN(6, "GPIO_6"),
+ PINCTRL_PIN(7, "GPIO_7"),
+ PINCTRL_PIN(8, "GPIO_8"),
+ PINCTRL_PIN(9, "GPIO_9"),
+ PINCTRL_PIN(10, "GPIO_10"),
+ PINCTRL_PIN(11, "GPIO_11"),
+ PINCTRL_PIN(12, "GPIO_12"),
+ PINCTRL_PIN(13, "GPIO_13"),
+ PINCTRL_PIN(14, "GPIO_14"),
+ PINCTRL_PIN(15, "GPIO_15"),
+ PINCTRL_PIN(16, "GPIO_16"),
+ PINCTRL_PIN(17, "GPIO_17"),
+ PINCTRL_PIN(18, "GPIO_18"),
+ PINCTRL_PIN(19, "GPIO_19"),
+ PINCTRL_PIN(20, "GPIO_20"),
+ PINCTRL_PIN(21, "GPIO_21"),
+ PINCTRL_PIN(22, "GPIO_22"),
+ PINCTRL_PIN(23, "GPIO_23"),
+ PINCTRL_PIN(24, "GPIO_24"),
+ PINCTRL_PIN(25, "GPIO_25"),
+ PINCTRL_PIN(26, "GPIO_26"),
+ PINCTRL_PIN(27, "GPIO_27"),
+ PINCTRL_PIN(28, "GPIO_28"),
+ PINCTRL_PIN(29, "GPIO_29"),
+ PINCTRL_PIN(30, "GPIO_30"),
+ PINCTRL_PIN(31, "GPIO_31"),
+ PINCTRL_PIN(32, "GPIO_32"),
+ PINCTRL_PIN(33, "GPIO_33"),
+ PINCTRL_PIN(34, "GPIO_34"),
+ PINCTRL_PIN(35, "GPIO_35"),
+ PINCTRL_PIN(36, "GPIO_36"),
+ PINCTRL_PIN(37, "GPIO_37"),
+ PINCTRL_PIN(38, "GPIO_38"),
+ PINCTRL_PIN(39, "GPIO_39"),
+ PINCTRL_PIN(40, "GPIO_40"),
+ PINCTRL_PIN(41, "GPIO_41"),
+ PINCTRL_PIN(42, "GPIO_42"),
+ PINCTRL_PIN(43, "GPIO_43"),
+ PINCTRL_PIN(44, "GPIO_44"),
+ PINCTRL_PIN(45, "GPIO_45"),
+ PINCTRL_PIN(46, "GPIO_46"),
+ PINCTRL_PIN(47, "GPIO_47"),
+ PINCTRL_PIN(48, "GPIO_48"),
+ PINCTRL_PIN(49, "GPIO_49"),
+ PINCTRL_PIN(50, "GPIO_50"),
+ PINCTRL_PIN(51, "GPIO_51"),
+ PINCTRL_PIN(52, "GPIO_52"),
+ PINCTRL_PIN(53, "GPIO_53"),
+ PINCTRL_PIN(54, "GPIO_54"),
+ PINCTRL_PIN(55, "GPIO_55"),
+ PINCTRL_PIN(56, "GPIO_56"),
+ PINCTRL_PIN(57, "GPIO_57"),
+ PINCTRL_PIN(58, "GPIO_58"),
+ PINCTRL_PIN(59, "GPIO_59"),
+ PINCTRL_PIN(60, "GPIO_60"),
+ PINCTRL_PIN(61, "GPIO_61"),
+ PINCTRL_PIN(62, "GPIO_62"),
+ PINCTRL_PIN(63, "GPIO_63"),
+ PINCTRL_PIN(64, "GPIO_64"),
+ PINCTRL_PIN(65, "GPIO_65"),
+ PINCTRL_PIN(66, "GPIO_66"),
+ PINCTRL_PIN(67, "GPIO_67"),
+ PINCTRL_PIN(68, "GPIO_68"),
+ PINCTRL_PIN(69, "GPIO_69"),
+ PINCTRL_PIN(70, "GPIO_70"),
+ PINCTRL_PIN(71, "GPIO_71"),
+ PINCTRL_PIN(72, "GPIO_72"),
+ PINCTRL_PIN(73, "GPIO_73"),
+ PINCTRL_PIN(74, "GPIO_74"),
+ PINCTRL_PIN(75, "GPIO_75"),
+ PINCTRL_PIN(76, "GPIO_76"),
+ PINCTRL_PIN(77, "GPIO_77"),
+ PINCTRL_PIN(78, "GPIO_78"),
+ PINCTRL_PIN(79, "GPIO_79"),
+ PINCTRL_PIN(80, "GPIO_80"),
+ PINCTRL_PIN(81, "GPIO_81"),
+ PINCTRL_PIN(82, "GPIO_82"),
+ PINCTRL_PIN(83, "GPIO_83"),
+ PINCTRL_PIN(84, "GPIO_84"),
+ PINCTRL_PIN(85, "GPIO_85"),
+ PINCTRL_PIN(86, "GPIO_86"),
+ PINCTRL_PIN(87, "GPIO_87"),
+ PINCTRL_PIN(88, "GPIO_88"),
+ PINCTRL_PIN(89, "GPIO_89"),
+ PINCTRL_PIN(90, "GPIO_90"),
+ PINCTRL_PIN(91, "GPIO_91"),
+ PINCTRL_PIN(92, "GPIO_92"),
+ PINCTRL_PIN(93, "GPIO_93"),
+ PINCTRL_PIN(94, "GPIO_94"),
+ PINCTRL_PIN(95, "GPIO_95"),
+ PINCTRL_PIN(96, "GPIO_96"),
+ PINCTRL_PIN(97, "GPIO_97"),
+ PINCTRL_PIN(98, "GPIO_98"),
+ PINCTRL_PIN(99, "GPIO_99"),
+ PINCTRL_PIN(100, "GPIO_100"),
+ PINCTRL_PIN(101, "GPIO_101"),
+ PINCTRL_PIN(102, "GPIO_102"),
+ PINCTRL_PIN(103, "GPIO_103"),
+ PINCTRL_PIN(104, "GPIO_104"),
+ PINCTRL_PIN(105, "GPIO_105"),
+ PINCTRL_PIN(106, "GPIO_106"),
+ PINCTRL_PIN(107, "GPIO_107"),
+ PINCTRL_PIN(108, "GPIO_108"),
+ PINCTRL_PIN(109, "GPIO_109"),
+ PINCTRL_PIN(110, "GPIO_110"),
+ PINCTRL_PIN(111, "GPIO_111"),
+ PINCTRL_PIN(112, "GPIO_112"),
+ PINCTRL_PIN(113, "GPIO_113"),
+ PINCTRL_PIN(114, "GPIO_114"),
+ PINCTRL_PIN(115, "GPIO_115"),
+ PINCTRL_PIN(116, "GPIO_116"),
+ PINCTRL_PIN(117, "GPIO_117"),
+ PINCTRL_PIN(118, "GPIO_118"),
+ PINCTRL_PIN(119, "GPIO_119"),
+ PINCTRL_PIN(120, "GPIO_120"),
+ PINCTRL_PIN(121, "GPIO_121"),
+ PINCTRL_PIN(122, "GPIO_122"),
+ PINCTRL_PIN(123, "GPIO_123"),
+ PINCTRL_PIN(124, "GPIO_124"),
+ PINCTRL_PIN(125, "GPIO_125"),
+ PINCTRL_PIN(126, "GPIO_126"),
+ PINCTRL_PIN(127, "GPIO_127"),
+ PINCTRL_PIN(128, "GPIO_128"),
+ PINCTRL_PIN(129, "GPIO_129"),
+ PINCTRL_PIN(130, "GPIO_130"),
+ PINCTRL_PIN(131, "GPIO_131"),
+ PINCTRL_PIN(132, "GPIO_132"),
+ PINCTRL_PIN(133, "GPIO_133"),
+ PINCTRL_PIN(134, "GPIO_134"),
+ PINCTRL_PIN(135, "GPIO_135"),
+ PINCTRL_PIN(136, "GPIO_136"),
+ PINCTRL_PIN(137, "GPIO_137"),
+ PINCTRL_PIN(138, "GPIO_138"),
+ PINCTRL_PIN(139, "GPIO_139"),
+ PINCTRL_PIN(140, "GPIO_140"),
+ PINCTRL_PIN(141, "GPIO_141"),
+ PINCTRL_PIN(142, "GPIO_142"),
+ PINCTRL_PIN(143, "GPIO_143"),
+ PINCTRL_PIN(144, "GPIO_144"),
+ PINCTRL_PIN(145, "GPIO_145"),
+ PINCTRL_PIN(146, "GPIO_146"),
+ PINCTRL_PIN(147, "GPIO_147"),
+ PINCTRL_PIN(148, "GPIO_148"),
+ PINCTRL_PIN(149, "GPIO_149"),
+ PINCTRL_PIN(150, "GPIO_150"),
+ PINCTRL_PIN(151, "GPIO_151"),
+ PINCTRL_PIN(152, "GPIO_152"),
+ PINCTRL_PIN(153, "GPIO_153"),
+ PINCTRL_PIN(154, "GPIO_154"),
+ PINCTRL_PIN(155, "GPIO_155"),
+ PINCTRL_PIN(156, "GPIO_156"),
+ PINCTRL_PIN(157, "GPIO_157"),
+ PINCTRL_PIN(158, "GPIO_158"),
+ PINCTRL_PIN(159, "GPIO_159"),
+ PINCTRL_PIN(160, "GPIO_160"),
+ PINCTRL_PIN(161, "GPIO_161"),
+ PINCTRL_PIN(162, "GPIO_162"),
+ PINCTRL_PIN(163, "GPIO_163"),
+ PINCTRL_PIN(164, "GPIO_164"),
+ PINCTRL_PIN(165, "GPIO_165"),
+ PINCTRL_PIN(166, "GPIO_166"),
+ PINCTRL_PIN(167, "GPIO_167"),
+ PINCTRL_PIN(168, "GPIO_168"),
+ PINCTRL_PIN(169, "GPIO_169"),
+ PINCTRL_PIN(170, "GPIO_170"),
+ PINCTRL_PIN(171, "GPIO_171"),
+ PINCTRL_PIN(172, "GPIO_172"),
+ PINCTRL_PIN(173, "GPIO_173"),
+ PINCTRL_PIN(174, "GPIO_174"),
+ PINCTRL_PIN(175, "GPIO_175"),
+ PINCTRL_PIN(176, "GPIO_176"),
+ PINCTRL_PIN(177, "GPIO_177"),
+ PINCTRL_PIN(178, "GPIO_178"),
+ PINCTRL_PIN(179, "GPIO_179"),
+ PINCTRL_PIN(180, "GPIO_180"),
+ PINCTRL_PIN(181, "GPIO_181"),
+ PINCTRL_PIN(182, "GPIO_182"),
+ PINCTRL_PIN(183, "GPIO_183"),
+ PINCTRL_PIN(184, "GPIO_184"),
+ PINCTRL_PIN(185, "GPIO_185"),
+ PINCTRL_PIN(186, "GPIO_186"),
+ PINCTRL_PIN(187, "GPIO_187"),
+ PINCTRL_PIN(188, "GPIO_188"),
+ PINCTRL_PIN(189, "GPIO_189"),
+ PINCTRL_PIN(190, "GPIO_190"),
+ PINCTRL_PIN(191, "GPIO_191"),
+ PINCTRL_PIN(192, "GPIO_192"),
+ PINCTRL_PIN(193, "GPIO_193"),
+ PINCTRL_PIN(194, "GPIO_194"),
+ PINCTRL_PIN(195, "GPIO_195"),
+ PINCTRL_PIN(196, "GPIO_196"),
+ PINCTRL_PIN(197, "GPIO_197"),
+ PINCTRL_PIN(198, "GPIO_198"),
+ PINCTRL_PIN(199, "GPIO_199"),
+ PINCTRL_PIN(200, "GPIO_200"),
+ PINCTRL_PIN(201, "GPIO_201"),
+ PINCTRL_PIN(202, "GPIO_202"),
+ PINCTRL_PIN(203, "GPIO_203"),
+ PINCTRL_PIN(204, "GPIO_204"),
+ PINCTRL_PIN(205, "GPIO_205"),
+ PINCTRL_PIN(206, "GPIO_206"),
+ PINCTRL_PIN(207, "GPIO_207"),
+ PINCTRL_PIN(208, "GPIO_208"),
+ PINCTRL_PIN(209, "GPIO_209"),
+ PINCTRL_PIN(210, "GPIO_210"),
+ PINCTRL_PIN(211, "GPIO_211"),
+ PINCTRL_PIN(212, "GPIO_212"),
+ PINCTRL_PIN(213, "GPIO_213"),
+ PINCTRL_PIN(214, "GPIO_214"),
+ PINCTRL_PIN(215, "GPIO_215"),
+ PINCTRL_PIN(216, "GPIO_216"),
+ PINCTRL_PIN(217, "GPIO_217"),
+ PINCTRL_PIN(218, "GPIO_218"),
+ PINCTRL_PIN(219, "GPIO_219"),
+ PINCTRL_PIN(220, "GPIO_220"),
+ PINCTRL_PIN(221, "GPIO_221"),
+ PINCTRL_PIN(222, "GPIO_222"),
+ PINCTRL_PIN(223, "GPIO_223"),
+ PINCTRL_PIN(224, "GPIO_224"),
+ PINCTRL_PIN(225, "GPIO_225"),
+ PINCTRL_PIN(226, "GPIO_226"),
+ PINCTRL_PIN(227, "GPIO_227"),
+ PINCTRL_PIN(228, "GPIO_228"),
+ PINCTRL_PIN(229, "GPIO_229"),
+ PINCTRL_PIN(230, "GPIO_230"),
+ PINCTRL_PIN(231, "GPIO_231"),
+ PINCTRL_PIN(232, "GPIO_232"),
+ PINCTRL_PIN(233, "GPIO_233"),
+ PINCTRL_PIN(234, "GPIO_234"),
+ PINCTRL_PIN(235, "GPIO_235"),
+ PINCTRL_PIN(236, "GPIO_236"),
+ PINCTRL_PIN(237, "GPIO_237"),
+ PINCTRL_PIN(238, "GPIO_238"),
+ PINCTRL_PIN(239, "GPIO_239"),
+ PINCTRL_PIN(240, "GPIO_240"),
+ PINCTRL_PIN(241, "GPIO_241"),
+ PINCTRL_PIN(242, "GPIO_242"),
+ PINCTRL_PIN(243, "GPIO_243"),
+ PINCTRL_PIN(244, "GPIO_244"),
+ PINCTRL_PIN(245, "GPIO_245"),
+ PINCTRL_PIN(246, "GPIO_246"),
+ PINCTRL_PIN(247, "GPIO_247"),
+ PINCTRL_PIN(248, "GPIO_248"),
+ PINCTRL_PIN(249, "GPIO_249"),
+};
+
+#define DECLARE_MSM_GPIO_PINS(pin) \
+ static const unsigned int gpio##pin##_pins[] = { pin }
+DECLARE_MSM_GPIO_PINS(0);
+DECLARE_MSM_GPIO_PINS(1);
+DECLARE_MSM_GPIO_PINS(2);
+DECLARE_MSM_GPIO_PINS(3);
+DECLARE_MSM_GPIO_PINS(4);
+DECLARE_MSM_GPIO_PINS(5);
+DECLARE_MSM_GPIO_PINS(6);
+DECLARE_MSM_GPIO_PINS(7);
+DECLARE_MSM_GPIO_PINS(8);
+DECLARE_MSM_GPIO_PINS(9);
+DECLARE_MSM_GPIO_PINS(10);
+DECLARE_MSM_GPIO_PINS(11);
+DECLARE_MSM_GPIO_PINS(12);
+DECLARE_MSM_GPIO_PINS(13);
+DECLARE_MSM_GPIO_PINS(14);
+DECLARE_MSM_GPIO_PINS(15);
+DECLARE_MSM_GPIO_PINS(16);
+DECLARE_MSM_GPIO_PINS(17);
+DECLARE_MSM_GPIO_PINS(18);
+DECLARE_MSM_GPIO_PINS(19);
+DECLARE_MSM_GPIO_PINS(20);
+DECLARE_MSM_GPIO_PINS(21);
+DECLARE_MSM_GPIO_PINS(22);
+DECLARE_MSM_GPIO_PINS(23);
+DECLARE_MSM_GPIO_PINS(24);
+DECLARE_MSM_GPIO_PINS(25);
+DECLARE_MSM_GPIO_PINS(26);
+DECLARE_MSM_GPIO_PINS(27);
+DECLARE_MSM_GPIO_PINS(28);
+DECLARE_MSM_GPIO_PINS(29);
+DECLARE_MSM_GPIO_PINS(30);
+DECLARE_MSM_GPIO_PINS(31);
+DECLARE_MSM_GPIO_PINS(32);
+DECLARE_MSM_GPIO_PINS(33);
+DECLARE_MSM_GPIO_PINS(34);
+DECLARE_MSM_GPIO_PINS(35);
+DECLARE_MSM_GPIO_PINS(36);
+DECLARE_MSM_GPIO_PINS(37);
+DECLARE_MSM_GPIO_PINS(38);
+DECLARE_MSM_GPIO_PINS(39);
+DECLARE_MSM_GPIO_PINS(40);
+DECLARE_MSM_GPIO_PINS(41);
+DECLARE_MSM_GPIO_PINS(42);
+DECLARE_MSM_GPIO_PINS(43);
+DECLARE_MSM_GPIO_PINS(44);
+DECLARE_MSM_GPIO_PINS(45);
+DECLARE_MSM_GPIO_PINS(46);
+DECLARE_MSM_GPIO_PINS(47);
+DECLARE_MSM_GPIO_PINS(48);
+DECLARE_MSM_GPIO_PINS(49);
+DECLARE_MSM_GPIO_PINS(50);
+DECLARE_MSM_GPIO_PINS(51);
+DECLARE_MSM_GPIO_PINS(52);
+DECLARE_MSM_GPIO_PINS(53);
+DECLARE_MSM_GPIO_PINS(54);
+DECLARE_MSM_GPIO_PINS(55);
+DECLARE_MSM_GPIO_PINS(56);
+DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(58);
+DECLARE_MSM_GPIO_PINS(59);
+DECLARE_MSM_GPIO_PINS(60);
+DECLARE_MSM_GPIO_PINS(61);
+DECLARE_MSM_GPIO_PINS(62);
+DECLARE_MSM_GPIO_PINS(63);
+DECLARE_MSM_GPIO_PINS(64);
+DECLARE_MSM_GPIO_PINS(65);
+DECLARE_MSM_GPIO_PINS(66);
+DECLARE_MSM_GPIO_PINS(67);
+DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(69);
+DECLARE_MSM_GPIO_PINS(70);
+DECLARE_MSM_GPIO_PINS(71);
+DECLARE_MSM_GPIO_PINS(72);
+DECLARE_MSM_GPIO_PINS(73);
+DECLARE_MSM_GPIO_PINS(74);
+DECLARE_MSM_GPIO_PINS(75);
+DECLARE_MSM_GPIO_PINS(76);
+DECLARE_MSM_GPIO_PINS(77);
+DECLARE_MSM_GPIO_PINS(78);
+DECLARE_MSM_GPIO_PINS(79);
+DECLARE_MSM_GPIO_PINS(80);
+DECLARE_MSM_GPIO_PINS(81);
+DECLARE_MSM_GPIO_PINS(82);
+DECLARE_MSM_GPIO_PINS(83);
+DECLARE_MSM_GPIO_PINS(84);
+DECLARE_MSM_GPIO_PINS(85);
+DECLARE_MSM_GPIO_PINS(86);
+DECLARE_MSM_GPIO_PINS(87);
+DECLARE_MSM_GPIO_PINS(88);
+DECLARE_MSM_GPIO_PINS(89);
+DECLARE_MSM_GPIO_PINS(90);
+DECLARE_MSM_GPIO_PINS(91);
+DECLARE_MSM_GPIO_PINS(92);
+DECLARE_MSM_GPIO_PINS(93);
+DECLARE_MSM_GPIO_PINS(94);
+DECLARE_MSM_GPIO_PINS(95);
+DECLARE_MSM_GPIO_PINS(96);
+DECLARE_MSM_GPIO_PINS(97);
+DECLARE_MSM_GPIO_PINS(98);
+DECLARE_MSM_GPIO_PINS(99);
+DECLARE_MSM_GPIO_PINS(100);
+DECLARE_MSM_GPIO_PINS(101);
+DECLARE_MSM_GPIO_PINS(102);
+DECLARE_MSM_GPIO_PINS(103);
+DECLARE_MSM_GPIO_PINS(104);
+DECLARE_MSM_GPIO_PINS(105);
+DECLARE_MSM_GPIO_PINS(106);
+DECLARE_MSM_GPIO_PINS(107);
+DECLARE_MSM_GPIO_PINS(108);
+DECLARE_MSM_GPIO_PINS(109);
+DECLARE_MSM_GPIO_PINS(110);
+DECLARE_MSM_GPIO_PINS(111);
+DECLARE_MSM_GPIO_PINS(112);
+DECLARE_MSM_GPIO_PINS(113);
+DECLARE_MSM_GPIO_PINS(114);
+DECLARE_MSM_GPIO_PINS(115);
+DECLARE_MSM_GPIO_PINS(116);
+DECLARE_MSM_GPIO_PINS(117);
+DECLARE_MSM_GPIO_PINS(118);
+DECLARE_MSM_GPIO_PINS(119);
+DECLARE_MSM_GPIO_PINS(120);
+DECLARE_MSM_GPIO_PINS(121);
+DECLARE_MSM_GPIO_PINS(122);
+DECLARE_MSM_GPIO_PINS(123);
+DECLARE_MSM_GPIO_PINS(124);
+DECLARE_MSM_GPIO_PINS(125);
+DECLARE_MSM_GPIO_PINS(126);
+DECLARE_MSM_GPIO_PINS(127);
+DECLARE_MSM_GPIO_PINS(128);
+DECLARE_MSM_GPIO_PINS(129);
+DECLARE_MSM_GPIO_PINS(130);
+DECLARE_MSM_GPIO_PINS(131);
+DECLARE_MSM_GPIO_PINS(132);
+DECLARE_MSM_GPIO_PINS(133);
+DECLARE_MSM_GPIO_PINS(134);
+DECLARE_MSM_GPIO_PINS(135);
+DECLARE_MSM_GPIO_PINS(136);
+DECLARE_MSM_GPIO_PINS(137);
+DECLARE_MSM_GPIO_PINS(138);
+DECLARE_MSM_GPIO_PINS(139);
+DECLARE_MSM_GPIO_PINS(140);
+DECLARE_MSM_GPIO_PINS(141);
+DECLARE_MSM_GPIO_PINS(142);
+DECLARE_MSM_GPIO_PINS(143);
+DECLARE_MSM_GPIO_PINS(144);
+DECLARE_MSM_GPIO_PINS(145);
+DECLARE_MSM_GPIO_PINS(146);
+DECLARE_MSM_GPIO_PINS(147);
+DECLARE_MSM_GPIO_PINS(148);
+DECLARE_MSM_GPIO_PINS(149);
+DECLARE_MSM_GPIO_PINS(150);
+DECLARE_MSM_GPIO_PINS(151);
+DECLARE_MSM_GPIO_PINS(152);
+DECLARE_MSM_GPIO_PINS(153);
+DECLARE_MSM_GPIO_PINS(154);
+DECLARE_MSM_GPIO_PINS(155);
+DECLARE_MSM_GPIO_PINS(156);
+DECLARE_MSM_GPIO_PINS(157);
+DECLARE_MSM_GPIO_PINS(158);
+DECLARE_MSM_GPIO_PINS(159);
+DECLARE_MSM_GPIO_PINS(160);
+DECLARE_MSM_GPIO_PINS(161);
+DECLARE_MSM_GPIO_PINS(162);
+DECLARE_MSM_GPIO_PINS(163);
+DECLARE_MSM_GPIO_PINS(164);
+DECLARE_MSM_GPIO_PINS(165);
+DECLARE_MSM_GPIO_PINS(166);
+DECLARE_MSM_GPIO_PINS(167);
+DECLARE_MSM_GPIO_PINS(168);
+DECLARE_MSM_GPIO_PINS(169);
+DECLARE_MSM_GPIO_PINS(170);
+DECLARE_MSM_GPIO_PINS(171);
+DECLARE_MSM_GPIO_PINS(172);
+DECLARE_MSM_GPIO_PINS(173);
+DECLARE_MSM_GPIO_PINS(174);
+DECLARE_MSM_GPIO_PINS(175);
+DECLARE_MSM_GPIO_PINS(176);
+DECLARE_MSM_GPIO_PINS(177);
+DECLARE_MSM_GPIO_PINS(178);
+DECLARE_MSM_GPIO_PINS(179);
+DECLARE_MSM_GPIO_PINS(180);
+DECLARE_MSM_GPIO_PINS(181);
+DECLARE_MSM_GPIO_PINS(182);
+DECLARE_MSM_GPIO_PINS(183);
+DECLARE_MSM_GPIO_PINS(184);
+DECLARE_MSM_GPIO_PINS(185);
+DECLARE_MSM_GPIO_PINS(186);
+DECLARE_MSM_GPIO_PINS(187);
+DECLARE_MSM_GPIO_PINS(188);
+DECLARE_MSM_GPIO_PINS(189);
+DECLARE_MSM_GPIO_PINS(190);
+DECLARE_MSM_GPIO_PINS(191);
+DECLARE_MSM_GPIO_PINS(192);
+DECLARE_MSM_GPIO_PINS(193);
+DECLARE_MSM_GPIO_PINS(194);
+DECLARE_MSM_GPIO_PINS(195);
+DECLARE_MSM_GPIO_PINS(196);
+DECLARE_MSM_GPIO_PINS(197);
+DECLARE_MSM_GPIO_PINS(198);
+DECLARE_MSM_GPIO_PINS(199);
+DECLARE_MSM_GPIO_PINS(200);
+DECLARE_MSM_GPIO_PINS(201);
+DECLARE_MSM_GPIO_PINS(202);
+DECLARE_MSM_GPIO_PINS(203);
+DECLARE_MSM_GPIO_PINS(204);
+DECLARE_MSM_GPIO_PINS(205);
+DECLARE_MSM_GPIO_PINS(206);
+DECLARE_MSM_GPIO_PINS(207);
+DECLARE_MSM_GPIO_PINS(208);
+DECLARE_MSM_GPIO_PINS(209);
+DECLARE_MSM_GPIO_PINS(210);
+DECLARE_MSM_GPIO_PINS(211);
+DECLARE_MSM_GPIO_PINS(212);
+DECLARE_MSM_GPIO_PINS(213);
+DECLARE_MSM_GPIO_PINS(214);
+DECLARE_MSM_GPIO_PINS(215);
+DECLARE_MSM_GPIO_PINS(216);
+DECLARE_MSM_GPIO_PINS(217);
+DECLARE_MSM_GPIO_PINS(218);
+DECLARE_MSM_GPIO_PINS(219);
+DECLARE_MSM_GPIO_PINS(220);
+DECLARE_MSM_GPIO_PINS(221);
+DECLARE_MSM_GPIO_PINS(222);
+DECLARE_MSM_GPIO_PINS(223);
+DECLARE_MSM_GPIO_PINS(224);
+DECLARE_MSM_GPIO_PINS(225);
+DECLARE_MSM_GPIO_PINS(226);
+DECLARE_MSM_GPIO_PINS(227);
+DECLARE_MSM_GPIO_PINS(228);
+DECLARE_MSM_GPIO_PINS(229);
+DECLARE_MSM_GPIO_PINS(230);
+DECLARE_MSM_GPIO_PINS(231);
+DECLARE_MSM_GPIO_PINS(232);
+DECLARE_MSM_GPIO_PINS(233);
+DECLARE_MSM_GPIO_PINS(234);
+DECLARE_MSM_GPIO_PINS(235);
+DECLARE_MSM_GPIO_PINS(236);
+DECLARE_MSM_GPIO_PINS(237);
+DECLARE_MSM_GPIO_PINS(238);
+DECLARE_MSM_GPIO_PINS(239);
+DECLARE_MSM_GPIO_PINS(240);
+DECLARE_MSM_GPIO_PINS(241);
+DECLARE_MSM_GPIO_PINS(242);
+DECLARE_MSM_GPIO_PINS(243);
+DECLARE_MSM_GPIO_PINS(244);
+DECLARE_MSM_GPIO_PINS(245);
+DECLARE_MSM_GPIO_PINS(246);
+DECLARE_MSM_GPIO_PINS(247);
+DECLARE_MSM_GPIO_PINS(248);
+DECLARE_MSM_GPIO_PINS(249);
+
+static const unsigned int ufs_reset_pins[] = { 250 };
+static const unsigned int sdc2_clk_pins[] = { 251 };
+static const unsigned int sdc2_cmd_pins[] = { 252 };
+static const unsigned int sdc2_data_pins[] = { 253 };
+
+enum glymur_functions {
+ msm_mux_gpio,
+ msm_mux_resout_gpio_n,
+ msm_mux_aoss_cti,
+ msm_mux_asc_cci,
+ msm_mux_atest_char,
+ msm_mux_atest_usb,
+ msm_mux_audio_ext_mclk0,
+ msm_mux_audio_ext_mclk1,
+ msm_mux_audio_ref_clk,
+ msm_mux_cam_asc_mclk4,
+ msm_mux_cam_mclk,
+ msm_mux_cci_async_in,
+ msm_mux_cci_i2c_scl,
+ msm_mux_cci_i2c_sda,
+ msm_mux_cci_timer,
+ msm_mux_cmu_rng,
+ msm_mux_cri_trng,
+ msm_mux_dbg_out_clk,
+ msm_mux_ddr_bist_complete,
+ msm_mux_ddr_bist_fail,
+ msm_mux_ddr_bist_start,
+ msm_mux_ddr_bist_stop,
+ msm_mux_ddr_pxi,
+ msm_mux_edp0_hot,
+ msm_mux_edp0_lcd,
+ msm_mux_edp1_lcd,
+ msm_mux_egpio,
+ msm_mux_eusb_ac_en,
+ msm_mux_gcc_gp1,
+ msm_mux_gcc_gp2,
+ msm_mux_gcc_gp3,
+ msm_mux_host2wlan_sol,
+ msm_mux_i2c0_s_scl,
+ msm_mux_i2c0_s_sda,
+ msm_mux_i2s0_data,
+ msm_mux_i2s0_sck,
+ msm_mux_i2s0_ws,
+ msm_mux_i2s1_data,
+ msm_mux_i2s1_sck,
+ msm_mux_i2s1_ws,
+ msm_mux_ibi_i3c,
+ msm_mux_jitter_bist,
+ msm_mux_mdp_vsync_out,
+ msm_mux_mdp_vsync_e,
+ msm_mux_mdp_vsync_p,
+ msm_mux_mdp_vsync_s,
+ msm_mux_pcie3a_clk,
+ msm_mux_pcie3a_rst_n,
+ msm_mux_pcie3b_clk,
+ msm_mux_pcie4_clk_req_n,
+ msm_mux_pcie5_clk_req_n,
+ msm_mux_pcie6_clk_req_n,
+ msm_mux_phase_flag,
+ msm_mux_pll_bist_sync,
+ msm_mux_pll_clk_aux,
+ msm_mux_pmc_oca_n,
+ msm_mux_pmc_uva_n,
+ msm_mux_prng_rosc,
+ msm_mux_qdss_cti,
+ msm_mux_qdss_gpio,
+ msm_mux_qspi0,
+ msm_mux_qup0_se0,
+ msm_mux_qup0_se1,
+ msm_mux_qup0_se2,
+ msm_mux_qup0_se3,
+ msm_mux_qup0_se4,
+ msm_mux_qup0_se5,
+ msm_mux_qup0_se6,
+ msm_mux_qup0_se7,
+ msm_mux_qup1_se0,
+ msm_mux_qup1_se1,
+ msm_mux_qup1_se2,
+ msm_mux_qup1_se3,
+ msm_mux_qup1_se4,
+ msm_mux_qup1_se5,
+ msm_mux_qup1_se6,
+ msm_mux_qup1_se7,
+ msm_mux_qup2_se0,
+ msm_mux_qup2_se1,
+ msm_mux_qup2_se2,
+ msm_mux_qup2_se3,
+ msm_mux_qup2_se4,
+ msm_mux_qup2_se5,
+ msm_mux_qup2_se6,
+ msm_mux_qup2_se7,
+ msm_mux_qup3_se0,
+ msm_mux_qup3_se1,
+ msm_mux_sd_write_protect,
+ msm_mux_sdc4_clk,
+ msm_mux_sdc4_cmd,
+ msm_mux_sdc4_data,
+ msm_mux_smb_acok_n,
+ msm_mux_sys_throttle,
+ msm_mux_tb_trig_sdc2,
+ msm_mux_tb_trig_sdc4,
+ msm_mux_tmess_prng,
+ msm_mux_tsense_pwm,
+ msm_mux_tsense_therm,
+ msm_mux_usb0_dp,
+ msm_mux_usb0_phy_ps,
+ msm_mux_usb0_sbrx,
+ msm_mux_usb0_sbtx,
+ msm_mux_usb0_tmu,
+ msm_mux_usb1_dbg,
+ msm_mux_usb1_dp,
+ msm_mux_usb1_phy_ps,
+ msm_mux_usb1_sbrx,
+ msm_mux_usb1_sbtx,
+ msm_mux_usb1_tmu,
+ msm_mux_usb2_dp,
+ msm_mux_usb2_phy_ps,
+ msm_mux_usb2_sbrx,
+ msm_mux_usb2_sbtx,
+ msm_mux_usb2_tmu,
+ msm_mux_vsense_trigger_mirnat,
+ msm_mux_wcn_sw,
+ msm_mux_wcn_sw_ctrl,
+ msm_mux__,
+};
+
+static const char *const gpio_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5",
+ "gpio6", "gpio7", "gpio8", "gpio9", "gpio10", "gpio11",
+ "gpio12", "gpio13", "gpio14", "gpio15", "gpio16", "gpio17",
+ "gpio18", "gpio19", "gpio20", "gpio21", "gpio22", "gpio23",
+ "gpio24", "gpio25", "gpio26", "gpio27", "gpio28", "gpio29",
+ "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+ "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41",
+ "gpio42", "gpio43", "gpio44", "gpio45", "gpio46", "gpio47",
+ "gpio48", "gpio49", "gpio50", "gpio51", "gpio52", "gpio53",
+ "gpio54", "gpio55", "gpio56", "gpio57", "gpio58", "gpio59",
+ "gpio60", "gpio61", "gpio62", "gpio63", "gpio64", "gpio65",
+ "gpio66", "gpio67", "gpio68", "gpio69", "gpio70", "gpio71",
+ "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
+ "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83",
+ "gpio84", "gpio85", "gpio86", "gpio87", "gpio88", "gpio89",
+ "gpio90", "gpio91", "gpio92", "gpio93", "gpio94", "gpio95",
+ "gpio96", "gpio97", "gpio98", "gpio99", "gpio100", "gpio101",
+ "gpio102", "gpio103", "gpio104", "gpio105", "gpio106", "gpio107",
+ "gpio108", "gpio109", "gpio110", "gpio111", "gpio112", "gpio113",
+ "gpio114", "gpio115", "gpio116", "gpio117", "gpio118", "gpio119",
+ "gpio120", "gpio121", "gpio122", "gpio123", "gpio124", "gpio125",
+ "gpio126", "gpio127", "gpio128", "gpio129", "gpio130", "gpio131",
+ "gpio132", "gpio133", "gpio134", "gpio135", "gpio136", "gpio137",
+ "gpio138", "gpio139", "gpio140", "gpio141", "gpio142", "gpio143",
+ "gpio144", "gpio145", "gpio146", "gpio147", "gpio148", "gpio149",
+ "gpio150", "gpio151", "gpio152", "gpio153", "gpio154", "gpio155",
+ "gpio156", "gpio157", "gpio158", "gpio159", "gpio160", "gpio161",
+ "gpio162", "gpio163", "gpio164", "gpio165", "gpio166", "gpio167",
+ "gpio168", "gpio169", "gpio170", "gpio171", "gpio172", "gpio173",
+ "gpio174", "gpio175", "gpio176", "gpio177", "gpio178", "gpio179",
+ "gpio180", "gpio181", "gpio182", "gpio183", "gpio184", "gpio185",
+ "gpio186", "gpio187", "gpio188", "gpio189", "gpio190", "gpio191",
+ "gpio192", "gpio193", "gpio194", "gpio195", "gpio196", "gpio197",
+ "gpio198", "gpio199", "gpio200", "gpio201", "gpio202", "gpio203",
+ "gpio204", "gpio205", "gpio206", "gpio207", "gpio208", "gpio209",
+ "gpio210", "gpio211", "gpio212", "gpio213", "gpio214", "gpio215",
+ "gpio216", "gpio217", "gpio218", "gpio219", "gpio220", "gpio221",
+ "gpio222", "gpio223", "gpio224", "gpio225", "gpio226", "gpio227",
+ "gpio228", "gpio229", "gpio230", "gpio231", "gpio232", "gpio233",
+ "gpio234", "gpio235", "gpio236", "gpio237", "gpio238", "gpio239",
+ "gpio240", "gpio241", "gpio242", "gpio243", "gpio244", "gpio245",
+ "gpio246", "gpio247", "gpio248", "gpio249",
+};
+
+static const char *const resout_gpio_n_groups[] = {
+ "gpio160",
+};
+
+static const char *const aoss_cti_groups[] = {
+ "gpio60",
+ "gpio61",
+ "gpio62",
+ "gpio63",
+};
+
+static const char *const asc_cci_groups[] = {
+ "gpio235",
+ "gpio236",
+};
+
+static const char *const atest_char_groups[] = {
+ "gpio172", "gpio184", "gpio188", "gpio164",
+ "gpio163",
+};
+
+static const char *const atest_usb_groups[] = {
+ "gpio39", "gpio40", "gpio41", "gpio38",
+ "gpio44", "gpio45", "gpio42", "gpio43",
+ "gpio49", "gpio50", "gpio51", "gpio48",
+ "gpio54", "gpio55", "gpio52", "gpio53",
+ "gpio65", "gpio66", "gpio46", "gpio47",
+ "gpio72", "gpio73", "gpio80", "gpio81",
+};
+
+static const char *const audio_ext_mclk0_groups[] = {
+ "gpio134",
+};
+
+static const char *const audio_ext_mclk1_groups[] = {
+ "gpio142",
+};
+
+static const char *const audio_ref_clk_groups[] = {
+ "gpio142",
+};
+
+static const char *const cam_asc_mclk4_groups[] = {
+ "gpio100",
+};
+
+static const char *const cam_mclk_groups[] = {
+ "gpio96",
+ "gpio97",
+ "gpio98",
+ "gpio99",
+};
+
+static const char *const cci_async_in_groups[] = {
+ "gpio113", "gpio112", "gpio111",
+};
+
+static const char *const cci_i2c_scl_groups[] = {
+ "gpio102", "gpio104", "gpio106",
+};
+
+static const char *const cci_i2c_sda_groups[] = {
+ "gpio101", "gpio103", "gpio105",
+};
+
+static const char *const cci_timer_groups[] = {
+ "gpio109", "gpio110", "gpio111", "gpio112",
+ "gpio113",
+};
+
+static const char *const cmu_rng_groups[] = {
+ "gpio48", "gpio47", "gpio46", "gpio45",
+};
+
+static const char *const cri_trng_groups[] = {
+ "gpio173",
+};
+
+static const char *const dbg_out_clk_groups[] = {
+ "gpio51",
+};
+
+static const char *const ddr_bist_complete_groups[] = {
+ "gpio57",
+};
+
+static const char *const ddr_bist_fail_groups[] = {
+ "gpio56",
+};
+
+static const char *const ddr_bist_start_groups[] = {
+ "gpio54",
+};
+
+static const char *const ddr_bist_stop_groups[] = {
+ "gpio55",
+};
+
+static const char *const ddr_pxi_groups[] = {
+ "gpio38", "gpio39", "gpio40", "gpio41",
+ "gpio72", "gpio73", "gpio80", "gpio81",
+ "gpio42", "gpio43", "gpio44", "gpio45",
+ "gpio46", "gpio47", "gpio48", "gpio49",
+ "gpio50", "gpio51", "gpio52", "gpio53",
+ "gpio54", "gpio55", "gpio65", "gpio66",
+};
+
+static const char *const edp0_hot_groups[] = {
+ "gpio119",
+};
+
+static const char *const edp0_lcd_groups[] = {
+ "gpio120",
+};
+
+static const char *const edp1_lcd_groups[] = {
+ "gpio115",
+ "gpio119",
+};
+
+static const char *const egpio_groups[] = {
+ "gpio192", "gpio193", "gpio194", "gpio195", "gpio196", "gpio197",
+ "gpio198", "gpio199", "gpio200", "gpio201", "gpio202", "gpio203",
+ "gpio204", "gpio205", "gpio206", "gpio207", "gpio208", "gpio209",
+ "gpio210", "gpio211", "gpio212", "gpio213", "gpio214", "gpio215",
+ "gpio216", "gpio217", "gpio218", "gpio219", "gpio220", "gpio221",
+ "gpio222", "gpio223", "gpio224", "gpio225", "gpio226", "gpio227",
+ "gpio228", "gpio229", "gpio230", "gpio231", "gpio232", "gpio233",
+ "gpio234", "gpio235", "gpio236", "gpio237", "gpio238", "gpio239",
+ "gpio240", "gpio241", "gpio242", "gpio243", "gpio244",
+};
+
+static const char *const eusb_ac_en_groups[] = {
+ "gpio168", "gpio177", "gpio186", "gpio69",
+ "gpio187", "gpio178",
+};
+
+static const char *const gcc_gp1_groups[] = {
+ "gpio71",
+ "gpio72",
+};
+
+static const char *const gcc_gp2_groups[] = {
+ "gpio64",
+ "gpio73",
+};
+
+static const char *const gcc_gp3_groups[] = {
+ "gpio74",
+ "gpio82",
+};
+
+static const char *const host2wlan_sol_groups[] = {
+ "gpio118",
+};
+
+static const char *const i2c0_s_scl_groups[] = {
+ "gpio7",
+};
+
+static const char *const i2c0_s_sda_groups[] = {
+ "gpio6",
+};
+
+static const char *const i2s0_data_groups[] = {
+ "gpio136", "gpio137",
+};
+
+static const char *const i2s0_sck_groups[] = {
+ "gpio135",
+};
+
+static const char *const i2s0_ws_groups[] = {
+ "gpio138",
+};
+
+static const char *const i2s1_data_groups[] = {
+ "gpio140", "gpio142",
+};
+
+static const char *const i2s1_sck_groups[] = {
+ "gpio139",
+};
+
+static const char *const i2s1_ws_groups[] = {
+ "gpio141",
+};
+
+static const char *const ibi_i3c_groups[] = {
+ "gpio0", "gpio1", "gpio4", "gpio5", "gpio32", "gpio33",
+ "gpio36", "gpio37", "gpio64", "gpio65", "gpio68", "gpio69",
+};
+
+static const char *const jitter_bist_groups[] = {
+ "gpio52",
+};
+
+static const char *const mdp_vsync_out_groups[] = {
+ "gpio114", "gpio114", "gpio115", "gpio115",
+ "gpio109", "gpio110", "gpio111", "gpio112",
+ "gpio113",
+};
+
+static const char *const mdp_vsync_e_groups[] = {
+ "gpio106",
+};
+
+static const char *const mdp_vsync_p_groups[] = {
+ "gpio98",
+};
+
+static const char *const mdp_vsync_s_groups[] = {
+ "gpio105",
+};
+
+static const char *const pcie3a_clk_groups[] = {
+ "gpio144",
+};
+
+static const char *const pcie3a_rst_n_groups[] = {
+ "gpio143",
+};
+
+static const char *const pcie3b_clk_groups[] = {
+ "gpio156",
+};
+
+static const char *const pcie4_clk_req_n_groups[] = {
+ "gpio147",
+};
+
+static const char *const pcie5_clk_req_n_groups[] = {
+ "gpio153",
+};
+
+static const char *const pcie6_clk_req_n_groups[] = {
+ "gpio150",
+};
+
+static const char *const phase_flag_groups[] = {
+ "gpio6", "gpio7", "gpio16", "gpio17",
+ "gpio18", "gpio19", "gpio20", "gpio21",
+ "gpio22", "gpio23", "gpio24", "gpio25",
+ "gpio8", "gpio26", "gpio27", "gpio163",
+ "gpio164", "gpio188", "gpio184", "gpio172",
+ "gpio186", "gpio173", "gpio76", "gpio9",
+ "gpio77", "gpio78", "gpio10", "gpio11",
+ "gpio12", "gpio13", "gpio14", "gpio15",
+};
+
+static const char *const pll_bist_sync_groups[] = {
+ "gpio28",
+};
+
+static const char *const pll_clk_aux_groups[] = {
+ "gpio35",
+};
+
+static const char *const pmc_oca_n_groups[] = {
+ "gpio249",
+};
+
+static const char *const pmc_uva_n_groups[] = {
+ "gpio248",
+};
+
+static const char *const prng_rosc_groups[] = {
+ "gpio186", "gpio188", "gpio164", "gpio163",
+};
+
+static const char *const qdss_cti_groups[] = {
+ "gpio18", "gpio19", "gpio23", "gpio27",
+ "gpio161", "gpio162", "gpio215", "gpio217",
+};
+
+static const char *const qdss_gpio_groups[] = {
+ "gpio104", "gpio151", "gpio227", "gpio228",
+ "gpio96", "gpio219", "gpio97", "gpio220",
+ "gpio108", "gpio231", "gpio109", "gpio232",
+ "gpio110", "gpio233", "gpio111", "gpio234",
+ "gpio112", "gpio235", "gpio113", "gpio236",
+ "gpio149", "gpio221", "gpio99", "gpio222",
+ "gpio100", "gpio223", "gpio101", "gpio224",
+ "gpio102", "gpio225", "gpio103", "gpio226",
+ "gpio152", "gpio237", "gpio107", "gpio238",
+};
+
+static const char *const qspi0_groups[] = {
+ "gpio127", "gpio132", "gpio133", "gpio128",
+ "gpio129", "gpio130", "gpio131",
+};
+
+static const char *const qup0_se0_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3",
+};
+
+static const char *const qup0_se1_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7",
+};
+
+static const char *const qup0_se2_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio11",
+ "gpio17", "gpio18", "gpio19",
+};
+
+static const char *const qup0_se3_groups[] = {
+ "gpio12", "gpio13", "gpio14", "gpio15",
+ "gpio21", "gpio22", "gpio23",
+};
+
+static const char *const qup0_se4_groups[] = {
+ "gpio16", "gpio17", "gpio18", "gpio19",
+};
+
+static const char *const qup0_se5_groups[] = {
+ "gpio20", "gpio21", "gpio22", "gpio23",
+};
+
+static const char *const qup0_se6_groups[] = {
+ "gpio6", "gpio7", "gpio4", "gpio5",
+};
+
+static const char *const qup0_se7_groups[] = {
+ "gpio14", "gpio15", "gpio12", "gpio13",
+};
+
+static const char *const qup1_se0_groups[] = {
+ "gpio32", "gpio33", "gpio34", "gpio35",
+};
+
+static const char *const qup1_se1_groups[] = {
+ "gpio36", "gpio37", "gpio38", "gpio39",
+};
+
+static const char *const qup1_se2_groups[] = {
+ "gpio40", "gpio41", "gpio42", "gpio43",
+ "gpio49", "gpio50", "gpio51",
+};
+
+static const char *const qup1_se3_groups[] = {
+ "gpio44", "gpio45", "gpio46", "gpio47",
+ "gpio33", "gpio34", "gpio35",
+};
+
+static const char *const qup1_se4_groups[] = {
+ "gpio48", "gpio49", "gpio50", "gpio51",
+};
+
+static const char *const qup1_se5_groups[] = {
+ "gpio52", "gpio53", "gpio54", "gpio55",
+};
+
+static const char *const qup1_se6_groups[] = {
+ "gpio56", "gpio57", "gpio58", "gpio59",
+};
+
+static const char *const qup1_se7_groups[] = {
+ "gpio54", "gpio55", "gpio52", "gpio53",
+};
+
+static const char *const qup2_se0_groups[] = {
+ "gpio64", "gpio65", "gpio66", "gpio67",
+};
+
+static const char *const qup2_se1_groups[] = {
+ "gpio68", "gpio69", "gpio70", "gpio71",
+};
+
+static const char *const qup2_se2_groups[] = {
+ "gpio72", "gpio73", "gpio74", "gpio75",
+ "gpio81", "gpio82", "gpio83",
+};
+
+static const char *const qup2_se3_groups[] = {
+ "gpio76", "gpio77", "gpio78", "gpio79",
+ "gpio65", "gpio66", "gpio67",
+};
+
+static const char *const qup2_se4_groups[] = {
+ "gpio80", "gpio81", "gpio82", "gpio83",
+};
+
+static const char *const qup2_se5_groups[] = {
+ "gpio84", "gpio85", "gpio86", "gpio87",
+};
+
+static const char *const qup2_se6_groups[] = {
+ "gpio88", "gpio89", "gpio90", "gpio91",
+};
+
+static const char *const qup2_se7_groups[] = {
+ "gpio80", "gpio81", "gpio82", "gpio83",
+};
+
+static const char *const qup3_se0_groups[] = {
+ "gpio128", "gpio129", "gpio127", "gpio132",
+ "gpio130", "gpio131", "gpio133", "gpio247",
+};
+
+static const char *const qup3_se1_groups[] = {
+ "gpio40", "gpio41", "gpio42", "gpio43",
+ "gpio49", "gpio50", "gpio51", "gpio48",
+};
+
+static const char *const sd_write_protect_groups[] = {
+ "gpio162",
+};
+
+static const char *const sdc4_clk_groups[] = {
+ "gpio127",
+};
+
+static const char *const sdc4_cmd_groups[] = {
+ "gpio132",
+};
+
+static const char *const sdc4_data_groups[] = {
+ "gpio128",
+ "gpio129",
+ "gpio130",
+ "gpio131",
+};
+
+static const char *const smb_acok_n_groups[] = {
+ "gpio245",
+};
+
+static const char *const sys_throttle_groups[] = {
+ "gpio39",
+ "gpio94",
+};
+
+static const char *const tb_trig_sdc2_groups[] = {
+ "gpio137",
+};
+
+static const char *const tb_trig_sdc4_groups[] = {
+ "gpio133",
+};
+
+static const char *const tmess_prng_groups[] = {
+ "gpio92", "gpio93", "gpio94", "gpio95",
+};
+
+static const char *const tsense_pwm_groups[] = {
+ "gpio28", "gpio29", "gpio30", "gpio31",
+ "gpio34", "gpio138", "gpio139", "gpio140",
+};
+
+static const char *const tsense_therm_groups[] = {
+ "gpio141",
+};
+
+static const char *const usb0_dp_groups[] = {
+ "gpio122",
+};
+
+static const char *const usb0_phy_ps_groups[] = {
+ "gpio121",
+};
+
+static const char *const usb0_sbrx_groups[] = {
+ "gpio163",
+};
+
+static const char *const usb0_sbtx_groups[] = {
+ "gpio164",
+ "gpio165",
+};
+
+static const char *const usb0_tmu_groups[] = {
+ "gpio98",
+};
+
+static const char *const usb1_dbg_groups[] = {
+ "gpio105",
+ "gpio106",
+};
+
+static const char *const usb1_dp_groups[] = {
+ "gpio124",
+};
+
+static const char *const usb1_phy_ps_groups[] = {
+ "gpio123",
+};
+
+static const char *const usb1_sbrx_groups[] = {
+ "gpio172",
+};
+
+static const char *const usb1_sbtx_groups[] = {
+ "gpio173",
+ "gpio174",
+};
+
+static const char *const usb1_tmu_groups[] = {
+ "gpio98",
+};
+
+static const char *const usb2_dp_groups[] = {
+ "gpio126",
+};
+
+static const char *const usb2_phy_ps_groups[] = {
+ "gpio125",
+};
+
+static const char *const usb2_sbrx_groups[] = {
+ "gpio181",
+};
+
+static const char *const usb2_sbtx_groups[] = {
+ "gpio182",
+ "gpio183",
+};
+
+static const char *const usb2_tmu_groups[] = {
+ "gpio98",
+};
+
+static const char *const vsense_trigger_mirnat_groups[] = {
+ "gpio38",
+};
+
+static const char *const wcn_sw_groups[] = {
+ "gpio221",
+};
+
+static const char *const wcn_sw_ctrl_groups[] = {
+ "gpio214",
+};
+
+static const struct pinfunction glymur_functions[] = {
+ MSM_PIN_FUNCTION(gpio),
+ MSM_PIN_FUNCTION(resout_gpio_n),
+ MSM_PIN_FUNCTION(aoss_cti),
+ MSM_PIN_FUNCTION(asc_cci),
+ MSM_PIN_FUNCTION(atest_char),
+ MSM_PIN_FUNCTION(atest_usb),
+ MSM_PIN_FUNCTION(audio_ext_mclk0),
+ MSM_PIN_FUNCTION(audio_ext_mclk1),
+ MSM_PIN_FUNCTION(audio_ref_clk),
+ MSM_PIN_FUNCTION(cam_asc_mclk4),
+ MSM_PIN_FUNCTION(cam_mclk),
+ MSM_PIN_FUNCTION(cci_async_in),
+ MSM_PIN_FUNCTION(cci_i2c_scl),
+ MSM_PIN_FUNCTION(cci_i2c_sda),
+ MSM_PIN_FUNCTION(cci_timer),
+ MSM_PIN_FUNCTION(cmu_rng),
+ MSM_PIN_FUNCTION(cri_trng),
+ MSM_PIN_FUNCTION(dbg_out_clk),
+ MSM_PIN_FUNCTION(ddr_bist_complete),
+ MSM_PIN_FUNCTION(ddr_bist_fail),
+ MSM_PIN_FUNCTION(ddr_bist_start),
+ MSM_PIN_FUNCTION(ddr_bist_stop),
+ MSM_PIN_FUNCTION(ddr_pxi),
+ MSM_PIN_FUNCTION(edp0_hot),
+ MSM_PIN_FUNCTION(edp0_lcd),
+ MSM_PIN_FUNCTION(edp1_lcd),
+ MSM_PIN_FUNCTION(egpio),
+ MSM_PIN_FUNCTION(eusb_ac_en),
+ MSM_PIN_FUNCTION(gcc_gp1),
+ MSM_PIN_FUNCTION(gcc_gp2),
+ MSM_PIN_FUNCTION(gcc_gp3),
+ MSM_PIN_FUNCTION(host2wlan_sol),
+ MSM_PIN_FUNCTION(i2c0_s_scl),
+ MSM_PIN_FUNCTION(i2c0_s_sda),
+ MSM_PIN_FUNCTION(i2s0_data),
+ MSM_PIN_FUNCTION(i2s0_sck),
+ MSM_PIN_FUNCTION(i2s0_ws),
+ MSM_PIN_FUNCTION(i2s1_data),
+ MSM_PIN_FUNCTION(i2s1_sck),
+ MSM_PIN_FUNCTION(i2s1_ws),
+ MSM_PIN_FUNCTION(ibi_i3c),
+ MSM_PIN_FUNCTION(jitter_bist),
+ MSM_PIN_FUNCTION(mdp_vsync_out),
+ MSM_PIN_FUNCTION(mdp_vsync_e),
+ MSM_PIN_FUNCTION(mdp_vsync_p),
+ MSM_PIN_FUNCTION(mdp_vsync_s),
+ MSM_PIN_FUNCTION(pcie3a_clk),
+ MSM_PIN_FUNCTION(pcie3a_rst_n),
+ MSM_PIN_FUNCTION(pcie3b_clk),
+ MSM_PIN_FUNCTION(pcie4_clk_req_n),
+ MSM_PIN_FUNCTION(pcie5_clk_req_n),
+ MSM_PIN_FUNCTION(pcie6_clk_req_n),
+ MSM_PIN_FUNCTION(phase_flag),
+ MSM_PIN_FUNCTION(pll_bist_sync),
+ MSM_PIN_FUNCTION(pll_clk_aux),
+ MSM_PIN_FUNCTION(pmc_oca_n),
+ MSM_PIN_FUNCTION(pmc_uva_n),
+ MSM_PIN_FUNCTION(prng_rosc),
+ MSM_PIN_FUNCTION(qdss_cti),
+ MSM_PIN_FUNCTION(qdss_gpio),
+ MSM_PIN_FUNCTION(qspi0),
+ MSM_PIN_FUNCTION(qup0_se0),
+ MSM_PIN_FUNCTION(qup0_se1),
+ MSM_PIN_FUNCTION(qup0_se2),
+ MSM_PIN_FUNCTION(qup0_se3),
+ MSM_PIN_FUNCTION(qup0_se4),
+ MSM_PIN_FUNCTION(qup0_se5),
+ MSM_PIN_FUNCTION(qup0_se6),
+ MSM_PIN_FUNCTION(qup0_se7),
+ MSM_PIN_FUNCTION(qup1_se0),
+ MSM_PIN_FUNCTION(qup1_se1),
+ MSM_PIN_FUNCTION(qup1_se2),
+ MSM_PIN_FUNCTION(qup1_se3),
+ MSM_PIN_FUNCTION(qup1_se4),
+ MSM_PIN_FUNCTION(qup1_se5),
+ MSM_PIN_FUNCTION(qup1_se6),
+ MSM_PIN_FUNCTION(qup1_se7),
+ MSM_PIN_FUNCTION(qup2_se0),
+ MSM_PIN_FUNCTION(qup2_se1),
+ MSM_PIN_FUNCTION(qup2_se2),
+ MSM_PIN_FUNCTION(qup2_se3),
+ MSM_PIN_FUNCTION(qup2_se4),
+ MSM_PIN_FUNCTION(qup2_se5),
+ MSM_PIN_FUNCTION(qup2_se6),
+ MSM_PIN_FUNCTION(qup2_se7),
+ MSM_PIN_FUNCTION(qup3_se0),
+ MSM_PIN_FUNCTION(qup3_se1),
+ MSM_PIN_FUNCTION(sd_write_protect),
+ MSM_PIN_FUNCTION(sdc4_clk),
+ MSM_PIN_FUNCTION(sdc4_cmd),
+ MSM_PIN_FUNCTION(sdc4_data),
+ MSM_PIN_FUNCTION(smb_acok_n),
+ MSM_PIN_FUNCTION(sys_throttle),
+ MSM_PIN_FUNCTION(tb_trig_sdc2),
+ MSM_PIN_FUNCTION(tb_trig_sdc4),
+ MSM_PIN_FUNCTION(tmess_prng),
+ MSM_PIN_FUNCTION(tsense_pwm),
+ MSM_PIN_FUNCTION(tsense_therm),
+ MSM_PIN_FUNCTION(usb0_dp),
+ MSM_PIN_FUNCTION(usb0_phy_ps),
+ MSM_PIN_FUNCTION(usb0_sbrx),
+ MSM_PIN_FUNCTION(usb0_sbtx),
+ MSM_PIN_FUNCTION(usb0_tmu),
+ MSM_PIN_FUNCTION(usb1_dbg),
+ MSM_PIN_FUNCTION(usb1_dp),
+ MSM_PIN_FUNCTION(usb1_phy_ps),
+ MSM_PIN_FUNCTION(usb1_sbrx),
+ MSM_PIN_FUNCTION(usb1_sbtx),
+ MSM_PIN_FUNCTION(usb1_tmu),
+ MSM_PIN_FUNCTION(usb2_dp),
+ MSM_PIN_FUNCTION(usb2_phy_ps),
+ MSM_PIN_FUNCTION(usb2_sbrx),
+ MSM_PIN_FUNCTION(usb2_sbtx),
+ MSM_PIN_FUNCTION(usb2_tmu),
+ MSM_PIN_FUNCTION(vsense_trigger_mirnat),
+ MSM_PIN_FUNCTION(wcn_sw),
+ MSM_PIN_FUNCTION(wcn_sw_ctrl),
+};
+
+static const struct msm_pingroup glymur_groups[] = {
+ [0] = PINGROUP(0, qup0_se0, ibi_i3c, _, _, _, _, _, _, _, _, _),
+ [1] = PINGROUP(1, qup0_se0, ibi_i3c, _, _, _, _, _, _, _, _, _),
+ [2] = PINGROUP(2, qup0_se0, _, _, _, _, _, _, _, _, _, _),
+ [3] = PINGROUP(3, qup0_se0, _, _, _, _, _, _, _, _, _, _),
+ [4] = PINGROUP(4, qup0_se1, qup0_se6, ibi_i3c, _, _, _, _, _, _, _, _),
+ [5] = PINGROUP(5, qup0_se1, qup0_se6, ibi_i3c, _, _, _, _, _, _, _, _),
+ [6] = PINGROUP(6, qup0_se1, qup0_se6, i2c0_s_sda, phase_flag, _, _, _, _, _, _, _),
+ [7] = PINGROUP(7, qup0_se1, qup0_se6, i2c0_s_scl, phase_flag, _, _, _, _, _, _, _),
+ [8] = PINGROUP(8, qup0_se2, phase_flag, _, _, _, _, _, _, _, _, _),
+ [9] = PINGROUP(9, qup0_se2, phase_flag, _, _, _, _, _, _, _, _, _),
+ [10] = PINGROUP(10, qup0_se2, phase_flag, _, _, _, _, _, _, _, _, _),
+ [11] = PINGROUP(11, qup0_se2, phase_flag, _, _, _, _, _, _, _, _, _),
+ [12] = PINGROUP(12, qup0_se3, qup0_se7, phase_flag, _, _, _, _, _, _, _, _),
+ [13] = PINGROUP(13, qup0_se3, qup0_se7, phase_flag, _, _, _, _, _, _, _, _),
+ [14] = PINGROUP(14, qup0_se3, qup0_se7, phase_flag, _, _, _, _, _, _, _, _),
+ [15] = PINGROUP(15, qup0_se3, qup0_se7, phase_flag, _, _, _, _, _, _, _, _),
+ [16] = PINGROUP(16, qup0_se4, phase_flag, _, _, _, _, _, _, _, _, _),
+ [17] = PINGROUP(17, qup0_se4, qup0_se2, phase_flag, _, _, _, _, _, _, _, _),
+ [18] = PINGROUP(18, qup0_se4, qup0_se2, phase_flag, _, qdss_cti, _, _, _, _, _, _),
+ [19] = PINGROUP(19, qup0_se4, qup0_se2, phase_flag, _, qdss_cti, _, _, _, _, _, _),
+ [20] = PINGROUP(20, qup0_se5, _, phase_flag, _, _, _, _, _, _, _, _),
+ [21] = PINGROUP(21, qup0_se5, qup0_se3, _, phase_flag, _, _, _, _, _, _, _),
+ [22] = PINGROUP(22, qup0_se5, qup0_se3, _, phase_flag, _, _, _, _, _, _, _),
+ [23] = PINGROUP(23, qup0_se5, qup0_se3, phase_flag, _, qdss_cti, _, _, _, _, _, _),
+ [24] = PINGROUP(24, phase_flag, _, _, _, _, _, _, _, _, _, _),
+ [25] = PINGROUP(25, phase_flag, _, _, _, _, _, _, _, _, _, _),
+ [26] = PINGROUP(26, phase_flag, _, _, _, _, _, _, _, _, _, _),
+ [27] = PINGROUP(27, phase_flag, _, qdss_cti, _, _, _, _, _, _, _, _),
+ [28] = PINGROUP(28, pll_bist_sync, tsense_pwm, _, _, _, _, _, _, _, _, _),
+ [29] = PINGROUP(29, tsense_pwm, _, _, _, _, _, _, _, _, _, _),
+ [30] = PINGROUP(30, tsense_pwm, _, _, _, _, _, _, _, _, _, _),
+ [31] = PINGROUP(31, tsense_pwm, _, _, _, _, _, _, _, _, _, _),
+ [32] = PINGROUP(32, qup1_se0, ibi_i3c, _, _, _, _, _, _, _, _, _),
+ [33] = PINGROUP(33, qup1_se0, ibi_i3c, qup1_se3, _, _, _, _, _, _, _, _),
+ [34] = PINGROUP(34, qup1_se0, qup1_se3, tsense_pwm, _, _, _, _, _, _, _, _),
+ [35] = PINGROUP(35, qup1_se0, qup1_se3, pll_clk_aux, _, _, _, _, _, _, _, _),
+ [36] = PINGROUP(36, qup1_se1, ibi_i3c, _, _, _, _, _, _, _, _, _),
+ [37] = PINGROUP(37, qup1_se1, ibi_i3c, _, _, _, _, _, _, _, _, _),
+ [38] = PINGROUP(38, qup1_se1, atest_usb, ddr_pxi, vsense_trigger_mirnat, _, _, _, _,
+ _, _, _),
+ [39] = PINGROUP(39, qup1_se1, sys_throttle, _, atest_usb, ddr_pxi, _, _, _, _, _, _),
+ [40] = PINGROUP(40, qup1_se2, qup3_se1, _, atest_usb, ddr_pxi, _, _, _, _, _, _),
+ [41] = PINGROUP(41, qup1_se2, qup3_se1, qup3_se0, atest_usb, ddr_pxi, _, _, _, _,
+ _, _),
+ [42] = PINGROUP(42, qup1_se2, qup3_se1, qup0_se1, atest_usb, ddr_pxi, _, _, _, _,
+ _, _),
+ [43] = PINGROUP(43, qup1_se2, qup3_se1, _, atest_usb, ddr_pxi, _, _, _, _, _, _),
+ [44] = PINGROUP(44, qup1_se3, _, atest_usb, ddr_pxi, _, _, _, _, _, _, _),
+ [45] = PINGROUP(45, qup1_se3, cmu_rng, _, atest_usb, ddr_pxi, _, _, _, _, _, _),
+ [46] = PINGROUP(46, qup1_se3, cmu_rng, _, atest_usb, ddr_pxi, _, _, _, _, _, _),
+ [47] = PINGROUP(47, qup1_se3, cmu_rng, _, atest_usb, ddr_pxi, _, _, _, _, _, _),
+ [48] = PINGROUP(48, qup1_se4, qup3_se1, cmu_rng, _, atest_usb, ddr_pxi, _, _, _,
+ _, _),
+ [49] = PINGROUP(49, qup1_se4, qup1_se2, qup3_se1, _, atest_usb, ddr_pxi, _, _,
+ _, _, _),
+ [50] = PINGROUP(50, qup1_se4, qup1_se2, qup3_se1, _, atest_usb, ddr_pxi, _, _,
+ _, _, _),
+ [51] = PINGROUP(51, qup1_se4, qup1_se2, qup3_se1, dbg_out_clk, atest_usb,
+ ddr_pxi, _, _, _, _, _),
+ [52] = PINGROUP(52, qup1_se5, qup1_se7, jitter_bist, atest_usb, ddr_pxi, _, _, _,
+ _, _, _),
+ [53] = PINGROUP(53, qup1_se5, qup1_se7, _, atest_usb, ddr_pxi, _, _, _, _, _, _),
+ [54] = PINGROUP(54, qup1_se5, qup1_se7, ddr_bist_start, atest_usb, ddr_pxi, _, _,
+ _, _, _, _),
+ [55] = PINGROUP(55, qup1_se5, qup1_se7, ddr_bist_stop, atest_usb, ddr_pxi, _, _,
+ _, _, _, _),
+ [56] = PINGROUP(56, qup1_se6, ddr_bist_fail, _, _, _, _, _, _, _, _, _),
+ [57] = PINGROUP(57, qup1_se6, ddr_bist_complete, _, _, _, _, _, _, _, _, _),
+ [58] = PINGROUP(58, qup1_se6, _, _, _, _, _, _, _, _, _, _),
+ [59] = PINGROUP(59, qup1_se6, _, _, _, _, _, _, _, _, _, _),
+ [60] = PINGROUP(60, aoss_cti, _, _, _, _, _, _, _, _, _, _),
+ [61] = PINGROUP(61, aoss_cti, _, _, _, _, _, _, _, _, _, _),
+ [62] = PINGROUP(62, aoss_cti, _, _, _, _, _, _, _, _, _, _),
+ [63] = PINGROUP(63, aoss_cti, _, _, _, _, _, _, _, _, _, _),
+ [64] = PINGROUP(64, qup2_se0, ibi_i3c, gcc_gp2, _, _, _, _, _, _, _, _),
+ [65] = PINGROUP(65, qup2_se0, qup2_se3, ibi_i3c, atest_usb, ddr_pxi, _, _, _, _,
+ _, _),
+ [66] = PINGROUP(66, qup2_se0, qup2_se3, atest_usb, ddr_pxi, _, _, _, _, _, _, _),
+ [67] = PINGROUP(67, qup2_se0, qup2_se3, _, _, _, _, _, _, _, _, _),
+ [68] = PINGROUP(68, qup2_se1, ibi_i3c, _, _, _, _, _, _, _, _, _),
+ [69] = PINGROUP(69, qup2_se1, ibi_i3c, _, _, _, _, _, _, _, _, _),
+ [70] = PINGROUP(70, qup2_se1, _, _, _, _, _, _, _, _, _, _),
+ [71] = PINGROUP(71, qup2_se1, gcc_gp1, _, _, _, _, _, _, _, _, _),
+ [72] = PINGROUP(72, qup2_se2, gcc_gp1, atest_usb, ddr_pxi, _, _, _, _, _, _, _),
+ [73] = PINGROUP(73, qup2_se2, gcc_gp2, atest_usb, ddr_pxi, _, _, _, _, _, _, _),
+ [74] = PINGROUP(74, qup2_se2, gcc_gp3, _, _, _, _, _, _, _, _, _),
+ [75] = PINGROUP(75, qup2_se2, _, _, _, _, _, _, _, _, _, _),
+ [76] = PINGROUP(76, qup2_se3, phase_flag, _, _, _, _, _, _, _, _, _),
+ [77] = PINGROUP(77, qup2_se3, phase_flag, _, _, _, _, _, _, _, _, _),
+ [78] = PINGROUP(78, qup2_se3, phase_flag, _, _, _, _, _, _, _, _, _),
+ [79] = PINGROUP(79, qup2_se3, _, _, _, _, _, _, _, _, _, _),
+ [80] = PINGROUP(80, qup2_se4, qup2_se7, atest_usb, ddr_pxi, _, _, _, _, _, _, _),
+ [81] = PINGROUP(81, qup2_se4, qup2_se2, qup2_se7, atest_usb, ddr_pxi, _, _, _,
+ _, _, _),
+ [82] = PINGROUP(82, qup2_se4, qup2_se2, qup2_se7, gcc_gp3, _, _, _, _, _, _, _),
+ [83] = PINGROUP(83, qup2_se4, qup2_se2, qup2_se7, _, _, _, _, _, _, _, _),
+ [84] = PINGROUP(84, qup2_se5, _, _, _, _, _, _, _, _, _, _),
+ [85] = PINGROUP(85, qup2_se5, _, _, _, _, _, _, _, _, _, _),
+ [86] = PINGROUP(86, qup2_se5, _, _, _, _, _, _, _, _, _, _),
+ [87] = PINGROUP(87, qup2_se5, _, _, _, _, _, _, _, _, _, _),
+ [88] = PINGROUP(88, qup2_se6, _, _, _, _, _, _, _, _, _, _),
+ [89] = PINGROUP(89, qup2_se6, _, _, _, _, _, _, _, _, _, _),
+ [90] = PINGROUP(90, qup2_se6, _, _, _, _, _, _, _, _, _, _),
+ [91] = PINGROUP(91, qup2_se6, _, _, _, _, _, _, _, _, _, _),
+ [92] = PINGROUP(92, tmess_prng, _, _, _, _, _, _, _, _, _, _),
+ [93] = PINGROUP(93, tmess_prng, _, _, _, _, _, _, _, _, _, _),
+ [94] = PINGROUP(94, sys_throttle, tmess_prng, _, _, _, _, _, _, _, _, _),
+ [95] = PINGROUP(95, tmess_prng, _, _, _, _, _, _, _, _, _, _),
+ [96] = PINGROUP(96, cam_mclk, qdss_gpio, _, _, _, _, _, _, _, _, _),
+ [97] = PINGROUP(97, cam_mclk, qdss_gpio, _, _, _, _, _, _, _, _, _),
+ [98] = PINGROUP(98, cam_mclk, mdp_vsync_p, usb0_tmu, usb1_tmu, usb2_tmu, _, _, _, _, _, _),
+ [99] = PINGROUP(99, cam_mclk, qdss_gpio, _, _, _, _, _, _, _, _, _),
+ [100] = PINGROUP(100, cam_asc_mclk4, qdss_gpio, _, _, _, _, _, _, _, _, _),
+ [101] = PINGROUP(101, cci_i2c_sda, qdss_gpio, _, _, _, _, _, _, _, _, _),
+ [102] = PINGROUP(102, cci_i2c_scl, qdss_gpio, _, _, _, _, _, _, _, _, _),
+ [103] = PINGROUP(103, cci_i2c_sda, qdss_gpio, _, _, _, _, _, _, _, _, _),
+ [104] = PINGROUP(104, cci_i2c_scl, qdss_gpio, _, _, _, _, _, _, _, _, _),
+ [105] = PINGROUP(105, cci_i2c_sda, mdp_vsync_s, usb1_dbg, _, _, _, _, _, _, _, _),
+ [106] = PINGROUP(106, cci_i2c_scl, mdp_vsync_e, usb1_dbg, _, _, _, _, _, _, _, _),
+ [107] = PINGROUP(107, qdss_gpio, _, _, _, _, _, _, _, _, _, _),
+ [108] = PINGROUP(108, qdss_gpio, _, _, _, _, _, _, _, _, _, _),
+ [109] = PINGROUP(109, cci_timer, mdp_vsync_out, qdss_gpio, _, _, _, _, _, _, _, _),
+ [110] = PINGROUP(110, cci_timer, mdp_vsync_out, qdss_gpio, _, _, _, _, _, _, _, _),
+ [111] = PINGROUP(111, cci_timer, cci_async_in, mdp_vsync_out, qdss_gpio, _, _, _, _,
+ _, _, _),
+ [112] = PINGROUP(112, cci_timer, cci_async_in, mdp_vsync_out, qdss_gpio, _, _, _, _,
+ _, _, _),
+ [113] = PINGROUP(113, cci_timer, cci_async_in, mdp_vsync_out, qdss_gpio, _, _, _, _,
+ _, _, _),
+ [114] = PINGROUP(114, mdp_vsync_out, mdp_vsync_out, _, _, _, _, _, _, _, _, _),
+ [115] = PINGROUP(115, mdp_vsync_out, mdp_vsync_out, edp1_lcd, _, _, _, _, _, _, _, _),
+ [116] = PINGROUP(116, _, _, _, _, _, _, _, _, _, _, _),
+ [117] = PINGROUP(117, _, _, _, _, _, _, _, _, _, _, _),
+ [118] = PINGROUP(118, host2wlan_sol, _, _, _, _, _, _, _, _, _, _),
+ [119] = PINGROUP(119, edp0_hot, edp1_lcd, _, _, _, _, _, _, _, _, _),
+ [120] = PINGROUP(120, edp0_lcd, _, _, _, _, _, _, _, _, _, _),
+ [121] = PINGROUP(121, usb0_phy_ps, _, _, _, _, _, _, _, _, _, _),
+ [122] = PINGROUP(122, usb0_dp, _, _, _, _, _, _, _, _, _, _),
+ [123] = PINGROUP(123, usb1_phy_ps, _, _, _, _, _, _, _, _, _, _),
+ [124] = PINGROUP(124, usb1_dp, _, _, _, _, _, _, _, _, _, _),
+ [125] = PINGROUP(125, usb2_phy_ps, _, _, _, _, _, _, _, _, _, _),
+ [126] = PINGROUP(126, usb2_dp, _, _, _, _, _, _, _, _, _, _),
+ [127] = PINGROUP(127, qspi0, sdc4_clk, qup3_se0, _, _, _, _, _, _, _, _),
+ [128] = PINGROUP(128, qspi0, sdc4_data, qup3_se0, _, _, _, _, _, _, _, _),
+ [129] = PINGROUP(129, qspi0, sdc4_data, qup3_se0, _, _, _, _, _, _, _, _),
+ [130] = PINGROUP(130, qspi0, sdc4_data, qup3_se0, _, _, _, _, _, _, _, _),
+ [131] = PINGROUP(131, qspi0, sdc4_data, qup3_se0, _, _, _, _, _, _, _, _),
+ [132] = PINGROUP(132, qspi0, sdc4_cmd, qup3_se0, _, _, _, _, _, _, _, _),
+ [133] = PINGROUP(133, qspi0, tb_trig_sdc4, qup3_se0, _, _, _, _, _, _, _, _),
+ [134] = PINGROUP(134, audio_ext_mclk0, _, _, _, _, _, _, _, _, _, _),
+ [135] = PINGROUP(135, i2s0_sck, _, _, _, _, _, _, _, _, _, _),
+ [136] = PINGROUP(136, i2s0_data, _, _, _, _, _, _, _, _, _, _),
+ [137] = PINGROUP(137, i2s0_data, tb_trig_sdc2, _, _, _, _, _, _, _, _, _),
+ [138] = PINGROUP(138, i2s0_ws, tsense_pwm, _, _, _, _, _, _, _, _, _),
+ [139] = PINGROUP(139, i2s1_sck, tsense_pwm, _, _, _, _, _, _, _, _, _),
+ [140] = PINGROUP(140, i2s1_data, tsense_pwm, _, _, _, _, _, _, _, _, _),
+ [141] = PINGROUP(141, i2s1_ws, tsense_therm, _, _, _, _, _, _, _, _, _),
+ [142] = PINGROUP(142, i2s1_data, audio_ext_mclk1, audio_ref_clk, _, _, _, _, _, _, _, _),
+ [143] = PINGROUP(143, pcie3a_rst_n, _, _, _, _, _, _, _, _, _, _),
+ [144] = PINGROUP(144, pcie3a_clk, _, _, _, _, _, _, _, _, _, _),
+ [145] = PINGROUP(145, _, _, _, _, _, _, _, _, _, _, _),
+ [146] = PINGROUP(146, _, _, _, _, _, _, _, _, _, _, _),
+ [147] = PINGROUP(147, pcie4_clk_req_n, _, _, _, _, _, _, _, _, _, _),
+ [148] = PINGROUP(148, _, _, _, _, _, _, _, _, _, _, _),
+ [149] = PINGROUP(149, qdss_gpio, _, _, _, _, _, _, _, _, _, _),
+ [150] = PINGROUP(150, pcie6_clk_req_n, _, _, _, _, _, _, _, _, _, _),
+ [151] = PINGROUP(151, qdss_gpio, _, _, _, _, _, _, _, _, _, _),
+ [152] = PINGROUP(152, qdss_gpio, _, _, _, _, _, _, _, _, _, _),
+ [153] = PINGROUP(153, pcie5_clk_req_n, _, _, _, _, _, _, _, _, _, _),
+ [154] = PINGROUP(154, _, _, _, _, _, _, _, _, _, _, _),
+ [155] = PINGROUP(155, _, _, _, _, _, _, _, _, _, _, _),
+ [156] = PINGROUP(156, pcie3b_clk, _, _, _, _, _, _, _, _, _, _),
+ [157] = PINGROUP(157, _, _, _, _, _, _, _, _, _, _, _),
+ [158] = PINGROUP(158, _, _, _, _, _, _, _, _, _, _, _),
+ [159] = PINGROUP(159, _, _, _, _, _, _, _, _, _, _, _),
+ [160] = PINGROUP(160, resout_gpio_n, _, _, _, _, _, _, _, _, _, _),
+ [161] = PINGROUP(161, qdss_cti, _, _, _, _, _, _, _, _, _, _),
+ [162] = PINGROUP(162, sd_write_protect, qdss_cti, _, _, _, _, _, _, _, _, _),
+ [163] = PINGROUP(163, usb0_sbrx, prng_rosc, phase_flag, _, atest_char, _, _, _,
+ _, _, _),
+ [164] = PINGROUP(164, usb0_sbtx, prng_rosc, phase_flag, _, atest_char, _, _, _, _, _,
+ _),
+ [165] = PINGROUP(165, usb0_sbtx, _, _, _, _, _, _, _, _, _, _),
+ [166] = PINGROUP(166, _, _, _, _, _, _, _, _, _, _, _),
+ [167] = PINGROUP(167, _, _, _, _, _, _, _, _, _, _, _),
+ [168] = PINGROUP(168, eusb_ac_en, _, _, _, _, _, _, _, _, _, _),
+ [169] = PINGROUP(169, eusb_ac_en, _, _, _, _, _, _, _, _, _, _),
+ [170] = PINGROUP(170, _, _, _, _, _, _, _, _, _, _, _),
+ [171] = PINGROUP(171, _, _, _, _, _, _, _, _, _, _, _),
+ [172] = PINGROUP(172, usb1_sbrx, phase_flag, _, atest_char, _, _, _, _, _, _, _),
+ [173] = PINGROUP(173, usb1_sbtx, cri_trng, phase_flag, _, _, _, _, _, _, _, _),
+ [174] = PINGROUP(174, usb1_sbtx, _, _, _, _, _, _, _, _, _, _),
+ [175] = PINGROUP(175, _, _, _, _, _, _, _, _, _, _, _),
+ [176] = PINGROUP(176, _, _, _, _, _, _, _, _, _, _, _),
+ [177] = PINGROUP(177, eusb_ac_en, _, _, _, _, _, _, _, _, _, _),
+ [178] = PINGROUP(178, eusb_ac_en, _, _, _, _, _, _, _, _, _, _),
+ [179] = PINGROUP(179, _, _, _, _, _, _, _, _, _, _, _),
+ [180] = PINGROUP(180, _, _, _, _, _, _, _, _, _, _, _),
+ [181] = PINGROUP(181, usb2_sbrx, _, _, _, _, _, _, _, _, _, _),
+ [182] = PINGROUP(182, usb2_sbtx, _, _, _, _, _, _, _, _, _, _),
+ [183] = PINGROUP(183, usb2_sbtx, _, _, _, _, _, _, _, _, _, _),
+ [184] = PINGROUP(184, phase_flag, _, atest_char, _, _, _, _, _, _, _, _),
+ [185] = PINGROUP(185, _, _, _, _, _, _, _, _, _, _, _),
+ [186] = PINGROUP(186, eusb_ac_en, prng_rosc, phase_flag, _, _, _, _, _, _, _, _),
+ [187] = PINGROUP(187, eusb_ac_en, _, _, _, _, _, _, _, _, _, _),
+ [188] = PINGROUP(188, prng_rosc, phase_flag, _, atest_char, _, _, _, _, _, _, _),
+ [189] = PINGROUP(189, _, _, _, _, _, _, _, _, _, _, _),
+ [190] = PINGROUP(190, _, _, _, _, _, _, _, _, _, _, _),
+ [191] = PINGROUP(191, _, _, _, _, _, _, _, _, _, _, _),
+ [192] = PINGROUP(192, _, _, _, _, _, _, _, _, _, _, egpio),
+ [193] = PINGROUP(193, _, _, _, _, _, _, _, _, _, _, egpio),
+ [194] = PINGROUP(194, _, _, _, _, _, _, _, _, _, _, egpio),
+ [195] = PINGROUP(195, _, _, _, _, _, _, _, _, _, _, egpio),
+ [196] = PINGROUP(196, _, _, _, _, _, _, _, _, _, _, egpio),
+ [197] = PINGROUP(197, _, _, _, _, _, _, _, _, _, _, egpio),
+ [198] = PINGROUP(198, _, _, _, _, _, _, _, _, _, _, egpio),
+ [199] = PINGROUP(199, _, _, _, _, _, _, _, _, _, _, egpio),
+ [200] = PINGROUP(200, _, _, _, _, _, _, _, _, _, _, egpio),
+ [201] = PINGROUP(201, _, _, _, _, _, _, _, _, _, _, egpio),
+ [202] = PINGROUP(202, _, _, _, _, _, _, _, _, _, _, egpio),
+ [203] = PINGROUP(203, _, _, _, _, _, _, _, _, _, _, egpio),
+ [204] = PINGROUP(204, _, _, _, _, _, _, _, _, _, _, egpio),
+ [205] = PINGROUP(205, _, _, _, _, _, _, _, _, _, _, egpio),
+ [206] = PINGROUP(206, _, _, _, _, _, _, _, _, _, _, egpio),
+ [207] = PINGROUP(207, _, _, _, _, _, _, _, _, _, _, egpio),
+ [208] = PINGROUP(208, _, _, _, _, _, _, _, _, _, _, egpio),
+ [209] = PINGROUP(209, _, _, _, _, _, _, _, _, _, _, egpio),
+ [210] = PINGROUP(210, _, _, _, _, _, _, _, _, _, _, egpio),
+ [211] = PINGROUP(211, _, _, _, _, _, _, _, _, _, _, egpio),
+ [212] = PINGROUP(212, _, _, _, _, _, _, _, _, _, _, egpio),
+ [213] = PINGROUP(213, _, _, _, _, _, _, _, _, _, _, egpio),
+ [214] = PINGROUP(214, wcn_sw_ctrl, _, _, _, _, _, _, _, _, _, egpio),
+ [215] = PINGROUP(215, _, qdss_cti, _, _, _, _, _, _, _, _, egpio),
+ [216] = PINGROUP(216, _, _, _, _, _, _, _, _, _, _, egpio),
+ [217] = PINGROUP(217, _, qdss_cti, _, _, _, _, _, _, _, _, egpio),
+ [218] = PINGROUP(218, _, _, _, _, _, _, _, _, _, _, egpio),
+ [219] = PINGROUP(219, _, qdss_gpio, _, _, _, _, _, _, _, _, egpio),
+ [220] = PINGROUP(220, _, qdss_gpio, _, _, _, _, _, _, _, _, egpio),
+ [221] = PINGROUP(221, wcn_sw, _, qdss_gpio, _, _, _, _, _, _, _, egpio),
+ [222] = PINGROUP(222, _, qdss_gpio, _, _, _, _, _, _, _, _, egpio),
+ [223] = PINGROUP(223, _, qdss_gpio, _, _, _, _, _, _, _, _, egpio),
+ [224] = PINGROUP(224, _, qdss_gpio, _, _, _, _, _, _, _, _, egpio),
+ [225] = PINGROUP(225, _, qdss_gpio, _, _, _, _, _, _, _, _, egpio),
+ [226] = PINGROUP(226, _, qdss_gpio, _, _, _, _, _, _, _, _, egpio),
+ [227] = PINGROUP(227, _, qdss_gpio, _, _, _, _, _, _, _, _, egpio),
+ [228] = PINGROUP(228, _, qdss_gpio, _, _, _, _, _, _, _, _, egpio),
+ [229] = PINGROUP(229, _, _, _, _, _, _, _, _, _, _, egpio),
+ [230] = PINGROUP(230, _, _, _, _, _, _, _, _, _, _, egpio),
+ [231] = PINGROUP(231, qdss_gpio, _, _, _, _, _, _, _, _, _, egpio),
+ [232] = PINGROUP(232, qdss_gpio, _, _, _, _, _, _, _, _, _, egpio),
+ [233] = PINGROUP(233, qdss_gpio, _, _, _, _, _, _, _, _, _, egpio),
+ [234] = PINGROUP(234, qdss_gpio, _, _, _, _, _, _, _, _, _, egpio),
+ [235] = PINGROUP(235, asc_cci, qdss_gpio, _, _, _, _, _, _, _, _, egpio),
+ [236] = PINGROUP(236, asc_cci, qdss_gpio, _, _, _, _, _, _, _, _, egpio),
+ [237] = PINGROUP(237, qdss_gpio, _, _, _, _, _, _, _, _, _, egpio),
+ [238] = PINGROUP(238, qdss_gpio, _, _, _, _, _, _, _, _, _, egpio),
+ [239] = PINGROUP(239, _, _, _, _, _, _, _, _, _, _, egpio),
+ [240] = PINGROUP(240, _, _, _, _, _, _, _, _, _, _, egpio),
+ [241] = PINGROUP(241, _, _, _, _, _, _, _, _, _, _, egpio),
+ [242] = PINGROUP(242, _, _, _, _, _, _, _, _, _, _, egpio),
+ [243] = PINGROUP(243, _, _, _, _, _, _, _, _, _, _, egpio),
+ [244] = PINGROUP(244, _, _, _, _, _, _, _, _, _, _, egpio),
+ [245] = PINGROUP(245, smb_acok_n, _, _, _, _, _, _, _, _, _, _),
+ [246] = PINGROUP(246, _, _, _, _, _, _, _, _, _, _, _),
+ [247] = PINGROUP(247, qup3_se0, _, _, _, _, _, _, _, _, _, _),
+ [248] = PINGROUP(248, pmc_uva_n, _, _, _, _, _, _, _, _, _, _),
+ [249] = PINGROUP(249, pmc_oca_n, _, _, _, _, _, _, _, _, _, _),
+ [250] = UFS_RESET(ufs_reset, 0x104004, 0x105000),
+ [251] = SDC_QDSD_PINGROUP(sdc2_clk, 0xff000, 14, 6),
+ [252] = SDC_QDSD_PINGROUP(sdc2_cmd, 0xff000, 11, 3),
+ [253] = SDC_QDSD_PINGROUP(sdc2_data, 0xff000, 9, 0),
+};
+
+static const struct msm_gpio_wakeirq_map glymur_pdc_map[] = {
+ { 0, 116 }, { 2, 114 }, { 3, 115 }, { 4, 175 }, { 5, 176 },
+ { 7, 111 }, { 11, 129 }, { 13, 130 }, { 15, 112 }, { 19, 113 },
+ { 23, 187 }, { 27, 188 }, { 28, 121 }, { 29, 122 }, { 30, 136 },
+ { 31, 203 }, { 32, 189 }, { 34, 174 }, { 35, 190 }, { 36, 191 },
+ { 39, 124 }, { 43, 192 }, { 47, 193 }, { 51, 123 }, { 53, 133 },
+ { 55, 125 }, { 59, 131 }, { 64, 134 }, { 65, 150 }, { 66, 186 },
+ { 67, 132 }, { 68, 195 }, { 71, 135 }, { 75, 196 }, { 79, 197 },
+ { 83, 198 }, { 84, 181 }, { 85, 199 }, { 87, 200 }, { 91, 201 },
+ { 92, 182 }, { 93, 183 }, { 94, 184 }, { 95, 185 }, { 98, 202 },
+ { 105, 157 }, { 113, 128 }, { 121, 117 }, { 123, 118 }, { 125, 119 },
+ { 129, 120 }, { 131, 126 }, { 132, 160 }, { 133, 194 }, { 134, 127 },
+ { 141, 137 }, { 143, 159 }, { 144, 138 }, { 145, 139 }, { 147, 140 },
+ { 148, 141 }, { 150, 146 }, { 151, 147 }, { 153, 148 }, { 154, 144 },
+ { 156, 149 }, { 157, 151 }, { 163, 142 }, { 172, 143 }, { 181, 145 },
+ { 193, 161 }, { 196, 152 }, { 203, 177 }, { 208, 178 }, { 215, 162 },
+ { 217, 153 }, { 220, 154 }, { 221, 155 }, { 228, 179 }, { 230, 180 },
+ { 232, 206 }, { 234, 172 }, { 235, 173 }, { 242, 158 }, { 244, 156 },
+};
+
+static const struct msm_pinctrl_soc_data glymur_tlmm = {
+ .pins = glymur_pins,
+ .npins = ARRAY_SIZE(glymur_pins),
+ .functions = glymur_functions,
+ .nfunctions = ARRAY_SIZE(glymur_functions),
+ .groups = glymur_groups,
+ .ngroups = ARRAY_SIZE(glymur_groups),
+ .ngpios = 251,
+ .wakeirq_map = glymur_pdc_map,
+ .nwakeirq_map = ARRAY_SIZE(glymur_pdc_map),
+ .egpio_func = 11,
+};
+
+static const struct of_device_id glymur_tlmm_of_match[] = {
+ { .compatible = "qcom,glymur-tlmm", .data = &glymur_tlmm },
+ { }
+};
+
+static int glymur_tlmm_probe(struct platform_device *pdev)
+{
+ return msm_pinctrl_probe(pdev, &glymur_tlmm);
+}
+
+static struct platform_driver glymur_tlmm_driver = {
+ .driver = {
+ .name = "glymur-tlmm",
+ .of_match_table = glymur_tlmm_of_match,
+ },
+ .probe = glymur_tlmm_probe,
+};
+
+static int __init glymur_tlmm_init(void)
+{
+ return platform_driver_register(&glymur_tlmm_driver);
+}
+arch_initcall(glymur_tlmm_init);
+
+static void __exit glymur_tlmm_exit(void)
+{
+ platform_driver_unregister(&glymur_tlmm_driver);
+}
+module_exit(glymur_tlmm_exit);
+
+MODULE_DESCRIPTION("QTI GLYMUR TLMM driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(of, glymur_tlmm_of_match);
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq5018.c b/drivers/pinctrl/qcom/pinctrl-ipq5018.c
index 10b99d5d8a11..cbf34854f882 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq5018.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq5018.c
@@ -630,7 +630,7 @@ static const struct pinfunction ipq5018_functions[] = {
MSM_PIN_FUNCTION(eud_gpio),
MSM_PIN_FUNCTION(gcc_plltest),
MSM_PIN_FUNCTION(gcc_tlmm),
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(led0),
MSM_PIN_FUNCTION(led2),
MSM_PIN_FUNCTION(mac0),
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq5332.c b/drivers/pinctrl/qcom/pinctrl-ipq5332.c
index 1ac2fc09c119..239cbe75f198 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq5332.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq5332.c
@@ -692,7 +692,7 @@ static const struct pinfunction ipq5332_functions[] = {
MSM_PIN_FUNCTION(dbg_out),
MSM_PIN_FUNCTION(gcc_plltest),
MSM_PIN_FUNCTION(gcc_tlmm),
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(lock_det),
MSM_PIN_FUNCTION(mac0),
MSM_PIN_FUNCTION(mac1),
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq5424.c b/drivers/pinctrl/qcom/pinctrl-ipq5424.c
index 7ff1f8acc1a3..67b452a033d6 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq5424.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq5424.c
@@ -641,7 +641,7 @@ static const struct pinfunction ipq5424_functions[] = {
MSM_PIN_FUNCTION(dbg_out),
MSM_PIN_FUNCTION(gcc_plltest),
MSM_PIN_FUNCTION(gcc_tlmm),
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(i2c0_scl),
MSM_PIN_FUNCTION(i2c0_sda),
MSM_PIN_FUNCTION(i2c1_scl),
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq6018.c b/drivers/pinctrl/qcom/pinctrl-ipq6018.c
index a4ba980252e1..be177fb0a92d 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq6018.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq6018.c
@@ -891,7 +891,7 @@ static const struct pinfunction ipq6018_functions[] = {
MSM_PIN_FUNCTION(dbg_out),
MSM_PIN_FUNCTION(gcc_plltest),
MSM_PIN_FUNCTION(gcc_tlmm),
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(lpass_aud),
MSM_PIN_FUNCTION(lpass_aud0),
MSM_PIN_FUNCTION(lpass_aud1),
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq8074.c b/drivers/pinctrl/qcom/pinctrl-ipq8074.c
index 482f13282fc2..e94de9083314 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq8074.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq8074.c
@@ -838,7 +838,7 @@ static const struct pinfunction ipq8074_functions[] = {
MSM_PIN_FUNCTION(dbg_out),
MSM_PIN_FUNCTION(gcc_plltest),
MSM_PIN_FUNCTION(gcc_tlmm),
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(ldo_en),
MSM_PIN_FUNCTION(ldo_update),
MSM_PIN_FUNCTION(led0),
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq9574.c b/drivers/pinctrl/qcom/pinctrl-ipq9574.c
index 89c05d8eb550..3ed093ea8eb9 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq9574.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq9574.c
@@ -651,7 +651,7 @@ static const struct pinfunction ipq9574_functions[] = {
MSM_PIN_FUNCTION(dwc_ddrphy),
MSM_PIN_FUNCTION(gcc_plltest),
MSM_PIN_FUNCTION(gcc_tlmm),
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(mac),
MSM_PIN_FUNCTION(mdc),
MSM_PIN_FUNCTION(mdio),
diff --git a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
index 57fefeb603f0..1c97ec44aa5f 100644
--- a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
+++ b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
@@ -41,13 +41,27 @@ struct lpi_pinctrl {
static int lpi_gpio_read(struct lpi_pinctrl *state, unsigned int pin,
unsigned int addr)
{
- return ioread32(state->tlmm_base + LPI_TLMM_REG_OFFSET * pin + addr);
+ u32 pin_offset;
+
+ if (state->data->flags & LPI_FLAG_USE_PREDEFINED_PIN_OFFSET)
+ pin_offset = state->data->groups[pin].pin_offset;
+ else
+ pin_offset = LPI_TLMM_REG_OFFSET * pin;
+
+ return ioread32(state->tlmm_base + pin_offset + addr);
}
static int lpi_gpio_write(struct lpi_pinctrl *state, unsigned int pin,
unsigned int addr, unsigned int val)
{
- iowrite32(val, state->tlmm_base + LPI_TLMM_REG_OFFSET * pin + addr);
+ u32 pin_offset;
+
+ if (state->data->flags & LPI_FLAG_USE_PREDEFINED_PIN_OFFSET)
+ pin_offset = state->data->groups[pin].pin_offset;
+ else
+ pin_offset = LPI_TLMM_REG_OFFSET * pin;
+
+ iowrite32(val, state->tlmm_base + pin_offset + addr);
return 0;
}
@@ -174,7 +188,7 @@ static int lpi_config_get(struct pinctrl_dev *pctldev,
arg = 1;
break;
case PIN_CONFIG_INPUT_ENABLE:
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
if (is_out)
arg = 1;
break;
@@ -252,7 +266,7 @@ static int lpi_config_set(struct pinctrl_dev *pctldev, unsigned int group,
case PIN_CONFIG_INPUT_ENABLE:
output_enabled = false;
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
output_enabled = true;
value = arg;
break;
@@ -314,7 +328,7 @@ static int lpi_gpio_direction_output(struct gpio_chip *chip,
struct lpi_pinctrl *state = gpiochip_get_data(chip);
unsigned long config;
- config = pinconf_to_config_packed(PIN_CONFIG_OUTPUT, val);
+ config = pinconf_to_config_packed(PIN_CONFIG_LEVEL, val);
return lpi_config_set(state->ctrl, pin, &config, 1);
}
@@ -332,7 +346,7 @@ static int lpi_gpio_set(struct gpio_chip *chip, unsigned int pin, int value)
struct lpi_pinctrl *state = gpiochip_get_data(chip);
unsigned long config;
- config = pinconf_to_config_packed(PIN_CONFIG_OUTPUT, value);
+ config = pinconf_to_config_packed(PIN_CONFIG_LEVEL, value);
return lpi_config_set(state->ctrl, pin, &config, 1);
}
@@ -398,7 +412,7 @@ static const struct gpio_chip lpi_gpio_template = {
.direction_input = lpi_gpio_direction_input,
.direction_output = lpi_gpio_direction_output,
.get = lpi_gpio_get,
- .set_rv = lpi_gpio_set,
+ .set = lpi_gpio_set,
.request = gpiochip_generic_request,
.free = gpiochip_generic_free,
.dbg_show = lpi_gpio_dbg_show,
diff --git a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.h b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.h
index a9b2f65c1ebe..f48368492861 100644
--- a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.h
+++ b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.h
@@ -55,6 +55,22 @@ struct pinctrl_pin_desc;
LPI_MUX_##f4, \
}, \
.nfuncs = 5, \
+ .pin_offset = 0, \
+ }
+
+#define LPI_PINGROUP_OFFSET(id, soff, f1, f2, f3, f4, poff) \
+ { \
+ .pin = id, \
+ .slew_offset = soff, \
+ .funcs = (int[]){ \
+ LPI_MUX_gpio, \
+ LPI_MUX_##f1, \
+ LPI_MUX_##f2, \
+ LPI_MUX_##f3, \
+ LPI_MUX_##f4, \
+ }, \
+ .nfuncs = 5, \
+ .pin_offset = poff, \
}
/*
@@ -62,6 +78,7 @@ struct pinctrl_pin_desc;
* pin configuration.
*/
#define LPI_FLAG_SLEW_RATE_SAME_REG BIT(0)
+#define LPI_FLAG_USE_PREDEFINED_PIN_OFFSET BIT(1)
struct lpi_pingroup {
unsigned int pin;
@@ -69,6 +86,7 @@ struct lpi_pingroup {
int slew_offset;
unsigned int *funcs;
unsigned int nfuncs;
+ unsigned int pin_offset;
};
struct lpi_function {
diff --git a/drivers/pinctrl/qcom/pinctrl-mdm9607.c b/drivers/pinctrl/qcom/pinctrl-mdm9607.c
index 3e18ba124fed..cef330547ce7 100644
--- a/drivers/pinctrl/qcom/pinctrl-mdm9607.c
+++ b/drivers/pinctrl/qcom/pinctrl-mdm9607.c
@@ -861,7 +861,7 @@ static const struct pinfunction mdm9607_functions[] = {
MSM_PIN_FUNCTION(gcc_plltest),
MSM_PIN_FUNCTION(gcc_tlmm),
MSM_PIN_FUNCTION(gmac_mdio),
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(gsm0_tx),
MSM_PIN_FUNCTION(lcd_rst),
MSM_PIN_FUNCTION(ldo_en),
diff --git a/drivers/pinctrl/qcom/pinctrl-mdm9615.c b/drivers/pinctrl/qcom/pinctrl-mdm9615.c
index bea1ca3d1b7f..729fe3d7e14e 100644
--- a/drivers/pinctrl/qcom/pinctrl-mdm9615.c
+++ b/drivers/pinctrl/qcom/pinctrl-mdm9615.c
@@ -313,7 +313,7 @@ static const char * const cdc_mclk_groups[] = {
};
static const struct pinfunction mdm9615_functions[] = {
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(gsbi2_i2c),
MSM_PIN_FUNCTION(gsbi3),
MSM_PIN_FUNCTION(gsbi4),
diff --git a/drivers/pinctrl/qcom/pinctrl-milos.c b/drivers/pinctrl/qcom/pinctrl-milos.c
index d11a7bbcd733..19abd5233a2c 100644
--- a/drivers/pinctrl/qcom/pinctrl-milos.c
+++ b/drivers/pinctrl/qcom/pinctrl-milos.c
@@ -974,7 +974,7 @@ static const char *const wcn_sw_ctrl_groups[] = {
};
static const struct pinfunction milos_functions[] = {
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(aoss_cti),
MSM_PIN_FUNCTION(atest_char),
MSM_PIN_FUNCTION(atest_usb),
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index f713c80d7f3e..67525d542c5b 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -31,6 +31,7 @@
#include "../core.h"
#include "../pinconf.h"
#include "../pinctrl-utils.h"
+#include "../pinmux.h"
#include "pinctrl-msm.h"
@@ -150,33 +151,6 @@ static int msm_pinmux_request(struct pinctrl_dev *pctldev, unsigned offset)
return gpiochip_line_is_valid(chip, offset) ? 0 : -EINVAL;
}
-static int msm_get_functions_count(struct pinctrl_dev *pctldev)
-{
- struct msm_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
-
- return pctrl->soc->nfunctions;
-}
-
-static const char *msm_get_function_name(struct pinctrl_dev *pctldev,
- unsigned function)
-{
- struct msm_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
-
- return pctrl->soc->functions[function].name;
-}
-
-static int msm_get_function_groups(struct pinctrl_dev *pctldev,
- unsigned function,
- const char * const **groups,
- unsigned * const num_groups)
-{
- struct msm_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
-
- *groups = pctrl->soc->functions[function].groups;
- *num_groups = pctrl->soc->functions[function].ngroups;
- return 0;
-}
-
static int msm_pinmux_set_mux(struct pinctrl_dev *pctldev,
unsigned function,
unsigned group)
@@ -288,11 +262,13 @@ static int msm_pinmux_request_gpio(struct pinctrl_dev *pctldev,
static const struct pinmux_ops msm_pinmux_ops = {
.request = msm_pinmux_request,
- .get_functions_count = msm_get_functions_count,
- .get_function_name = msm_get_function_name,
- .get_function_groups = msm_get_function_groups,
+ .get_functions_count = pinmux_generic_get_function_count,
+ .get_function_name = pinmux_generic_get_function_name,
+ .get_function_groups = pinmux_generic_get_function_groups,
+ .function_is_gpio = pinmux_generic_function_is_gpio,
.gpio_request_enable = msm_pinmux_request_gpio,
.set_mux = msm_pinmux_set_mux,
+ .strict = true,
};
static int msm_config_reg(struct msm_pinctrl *pctrl,
@@ -319,7 +295,7 @@ static int msm_config_reg(struct msm_pinctrl *pctrl,
*bit = g->drv_bit;
*mask = 7;
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
case PIN_CONFIG_INPUT_ENABLE:
case PIN_CONFIG_OUTPUT_ENABLE:
*bit = g->oe_bit;
@@ -409,7 +385,7 @@ static int msm_config_group_get(struct pinctrl_dev *pctldev,
case PIN_CONFIG_DRIVE_STRENGTH:
arg = msm_regval_to_drive(arg);
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
/* Pin is not output */
if (!arg)
return -EINVAL;
@@ -488,7 +464,7 @@ static int msm_config_group_set(struct pinctrl_dev *pctldev,
else
arg = (arg / 2) - 1;
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
/* set output value */
raw_spin_lock_irqsave(&pctrl->lock, flags);
val = msm_readl_io(pctrl, g);
@@ -792,7 +768,7 @@ static const struct gpio_chip msm_gpio_template = {
.direction_output = msm_gpio_direction_output,
.get_direction = msm_gpio_get_direction,
.get = msm_gpio_get,
- .set_rv = msm_gpio_set,
+ .set = msm_gpio_set,
.request = gpiochip_generic_request,
.free = gpiochip_generic_free,
.dbg_show = msm_gpio_dbg_show,
@@ -1552,6 +1528,7 @@ EXPORT_SYMBOL(msm_pinctrl_dev_pm_ops);
int msm_pinctrl_probe(struct platform_device *pdev,
const struct msm_pinctrl_soc_data *soc_data)
{
+ const struct pinfunction *func;
struct msm_pinctrl *pctrl;
struct resource *res;
int ret;
@@ -1606,6 +1583,14 @@ int msm_pinctrl_probe(struct platform_device *pdev,
return PTR_ERR(pctrl->pctrl);
}
+ for (i = 0; i < soc_data->nfunctions; i++) {
+ func = &soc_data->functions[i];
+
+ ret = pinmux_generic_add_pinfunction(pctrl->pctrl, func, NULL);
+ if (ret < 0)
+ return ret;
+ }
+
ret = msm_gpio_init(pctrl);
if (ret)
return ret;
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.h b/drivers/pinctrl/qcom/pinctrl-msm.h
index d7dc0947bb16..4625fa5320a9 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.h
+++ b/drivers/pinctrl/qcom/pinctrl-msm.h
@@ -29,6 +29,11 @@ struct pinctrl_pin_desc;
fname##_groups, \
ARRAY_SIZE(fname##_groups))
+#define MSM_GPIO_PIN_FUNCTION(fname) \
+ [msm_mux_##fname] = PINCTRL_GPIO_PINFUNCTION(#fname, \
+ fname##_groups, \
+ ARRAY_SIZE(fname##_groups))
+
#define QCA_PIN_FUNCTION(fname) \
[qca_mux_##fname] = PINCTRL_PINFUNCTION(#fname, \
fname##_groups, \
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8226.c b/drivers/pinctrl/qcom/pinctrl-msm8226.c
index f9a957347340..a81aa092ef12 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8226.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8226.c
@@ -483,7 +483,7 @@ static const struct pinfunction msm8226_functions[] = {
MSM_PIN_FUNCTION(cci_i2c0),
MSM_PIN_FUNCTION(gp0_clk),
MSM_PIN_FUNCTION(gp1_clk),
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(sdc3),
MSM_PIN_FUNCTION(wlan),
};
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8660.c b/drivers/pinctrl/qcom/pinctrl-msm8660.c
index 4dbc19ffd80e..5ded00396cd9 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8660.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8660.c
@@ -714,7 +714,7 @@ static const char * const ebi2_groups[] = {
};
static const struct pinfunction msm8660_functions[] = {
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(cam_mclk),
MSM_PIN_FUNCTION(dsub),
MSM_PIN_FUNCTION(ext_gps),
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8909.c b/drivers/pinctrl/qcom/pinctrl-msm8909.c
index 0aa4f77b774f..544a52fb8f3d 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8909.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8909.c
@@ -696,7 +696,7 @@ static const struct pinfunction msm8909_functions[] = {
MSM_PIN_FUNCTION(gcc_gp3_clk_a),
MSM_PIN_FUNCTION(gcc_gp3_clk_b),
MSM_PIN_FUNCTION(gcc_plltest),
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(gsm0_tx),
MSM_PIN_FUNCTION(ldo_en),
MSM_PIN_FUNCTION(ldo_update),
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8916.c b/drivers/pinctrl/qcom/pinctrl-msm8916.c
index 0dfc6dd33d58..b1b6934bb4b6 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8916.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8916.c
@@ -743,7 +743,7 @@ static const struct pinfunction msm8916_functions[] = {
MSM_PIN_FUNCTION(gcc_gp2_clk_b),
MSM_PIN_FUNCTION(gcc_gp3_clk_a),
MSM_PIN_FUNCTION(gcc_gp3_clk_b),
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(gsm0_tx0),
MSM_PIN_FUNCTION(gsm0_tx1),
MSM_PIN_FUNCTION(gsm1_tx0),
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8917.c b/drivers/pinctrl/qcom/pinctrl-msm8917.c
index 2e1a94ab18b2..f23d92d6615b 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8917.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8917.c
@@ -1302,7 +1302,7 @@ static const struct pinfunction msm8917_functions[] = {
MSM_PIN_FUNCTION(gcc_gp3_clk_b),
MSM_PIN_FUNCTION(gcc_plltest),
MSM_PIN_FUNCTION(gcc_tlmm),
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(gsm0_tx),
MSM_PIN_FUNCTION(key_focus),
MSM_PIN_FUNCTION(key_snapshot),
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8953.c b/drivers/pinctrl/qcom/pinctrl-msm8953.c
index 956383341a7a..67db062fdf56 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8953.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8953.c
@@ -1533,7 +1533,7 @@ static const struct pinfunction msm8953_functions[] = {
MSM_PIN_FUNCTION(gcc_gp3_clk_b),
MSM_PIN_FUNCTION(gcc_plltest),
MSM_PIN_FUNCTION(gcc_tlmm),
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(gsm0_tx),
MSM_PIN_FUNCTION(gsm1_tx),
MSM_PIN_FUNCTION(gyro_int),
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8960.c b/drivers/pinctrl/qcom/pinctrl-msm8960.c
index a937ea867de7..2fb15208aba0 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8960.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8960.c
@@ -974,7 +974,7 @@ static const struct pinfunction msm8960_functions[] = {
MSM_PIN_FUNCTION(gp_pdm_1b),
MSM_PIN_FUNCTION(gp_pdm_2a),
MSM_PIN_FUNCTION(gp_pdm_2b),
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(gsbi1),
MSM_PIN_FUNCTION(gsbi1_spi_cs1_n),
MSM_PIN_FUNCTION(gsbi1_spi_cs2a_n),
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8976.c b/drivers/pinctrl/qcom/pinctrl-msm8976.c
index 3bcb03387781..345539b9e696 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8976.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8976.c
@@ -812,7 +812,7 @@ static const char * const ss_switch_groups[] = {
};
static const struct pinfunction msm8976_functions[] = {
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(blsp_spi1),
MSM_PIN_FUNCTION(smb_int),
MSM_PIN_FUNCTION(blsp_i2c1),
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8994.c b/drivers/pinctrl/qcom/pinctrl-msm8994.c
index 7a3b6cbccb68..94e042d1f4b2 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8994.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8994.c
@@ -1071,7 +1071,7 @@ static const struct pinfunction msm8994_functions[] = {
MSM_PIN_FUNCTION(uim2),
MSM_PIN_FUNCTION(uim3),
MSM_PIN_FUNCTION(uim4),
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
};
static const struct msm_pingroup msm8994_groups[] = {
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8996.c b/drivers/pinctrl/qcom/pinctrl-msm8996.c
index d86d83106d3b..e5b55693d023 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8996.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8996.c
@@ -1532,7 +1532,7 @@ static const struct pinfunction msm8996_functions[] = {
MSM_PIN_FUNCTION(gcc_gp2_clk_b),
MSM_PIN_FUNCTION(gcc_gp3_clk_a),
MSM_PIN_FUNCTION(gcc_gp3_clk_b),
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(gsm_tx),
MSM_PIN_FUNCTION(hdmi_cec),
MSM_PIN_FUNCTION(hdmi_ddc),
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8998.c b/drivers/pinctrl/qcom/pinctrl-msm8998.c
index 1daee815888f..b727593af34a 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8998.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8998.c
@@ -1160,7 +1160,7 @@ static const char * const mss_lte_groups[] = {
};
static const struct pinfunction msm8998_functions[] = {
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(adsp_ext),
MSM_PIN_FUNCTION(agera_pll),
MSM_PIN_FUNCTION(atest_char),
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8x74.c b/drivers/pinctrl/qcom/pinctrl-msm8x74.c
index 8253aa25775b..202bec003e96 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8x74.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8x74.c
@@ -778,7 +778,7 @@ static const char * const slimbus_groups[] = { "gpio70", "gpio71" };
static const char * const hsic_ctl_groups[] = { "hsic_strobe", "hsic_data" };
static const struct pinfunction msm8x74_functions[] = {
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(cci_i2c0),
MSM_PIN_FUNCTION(cci_i2c1),
MSM_PIN_FUNCTION(uim1),
diff --git a/drivers/pinctrl/qcom/pinctrl-qcm2290.c b/drivers/pinctrl/qcom/pinctrl-qcm2290.c
index eeeec6434f6a..38200957451e 100644
--- a/drivers/pinctrl/qcom/pinctrl-qcm2290.c
+++ b/drivers/pinctrl/qcom/pinctrl-qcm2290.c
@@ -870,11 +870,11 @@ static const struct pinfunction qcm2290_functions[] = {
MSM_PIN_FUNCTION(ddr_pxi1),
MSM_PIN_FUNCTION(ddr_pxi2),
MSM_PIN_FUNCTION(ddr_pxi3),
- MSM_PIN_FUNCTION(egpio),
+ MSM_GPIO_PIN_FUNCTION(egpio),
MSM_PIN_FUNCTION(gcc_gp1),
MSM_PIN_FUNCTION(gcc_gp2),
MSM_PIN_FUNCTION(gcc_gp3),
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(gp_pdm0),
MSM_PIN_FUNCTION(gp_pdm1),
MSM_PIN_FUNCTION(gp_pdm2),
diff --git a/drivers/pinctrl/qcom/pinctrl-qcs404.c b/drivers/pinctrl/qcom/pinctrl-qcs404.c
index 54e3b4435349..0b8db2c7e58a 100644
--- a/drivers/pinctrl/qcom/pinctrl-qcs404.c
+++ b/drivers/pinctrl/qcom/pinctrl-qcs404.c
@@ -1296,7 +1296,7 @@ static const char * const i2s_3_ws_a_groups[] = {
};
static const struct pinfunction qcs404_functions[] = {
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(hdmi_tx),
MSM_PIN_FUNCTION(hdmi_ddc),
MSM_PIN_FUNCTION(blsp_uart_tx_a2),
diff --git a/drivers/pinctrl/qcom/pinctrl-qcs615.c b/drivers/pinctrl/qcom/pinctrl-qcs615.c
index 2a943bc46a62..4dfa820d4e77 100644
--- a/drivers/pinctrl/qcom/pinctrl-qcs615.c
+++ b/drivers/pinctrl/qcom/pinctrl-qcs615.c
@@ -819,7 +819,7 @@ static const char *const wsa_data_groups[] = {
};
static const struct pinfunction qcs615_functions[] = {
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(adsp_ext),
MSM_PIN_FUNCTION(agera_pll),
MSM_PIN_FUNCTION(aoss_cti),
diff --git a/drivers/pinctrl/qcom/pinctrl-qcs8300.c b/drivers/pinctrl/qcom/pinctrl-qcs8300.c
index d6437e26392b..f1af1a620684 100644
--- a/drivers/pinctrl/qcom/pinctrl-qcs8300.c
+++ b/drivers/pinctrl/qcom/pinctrl-qcs8300.c
@@ -929,7 +929,7 @@ static const char *const vsense_trigger_groups[] = {
};
static const struct pinfunction qcs8300_functions[] = {
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(aoss_cti),
MSM_PIN_FUNCTION(atest_char),
MSM_PIN_FUNCTION(atest_usb2),
@@ -949,7 +949,7 @@ static const struct pinfunction qcs8300_functions[] = {
MSM_PIN_FUNCTION(edp0_hot),
MSM_PIN_FUNCTION(edp0_lcd),
MSM_PIN_FUNCTION(edp1_lcd),
- MSM_PIN_FUNCTION(egpio),
+ MSM_GPIO_PIN_FUNCTION(egpio),
MSM_PIN_FUNCTION(emac0_mcg0),
MSM_PIN_FUNCTION(emac0_mcg1),
MSM_PIN_FUNCTION(emac0_mcg2),
diff --git a/drivers/pinctrl/qcom/pinctrl-qdu1000.c b/drivers/pinctrl/qcom/pinctrl-qdu1000.c
index eacb89fa3888..7c535698a780 100644
--- a/drivers/pinctrl/qcom/pinctrl-qdu1000.c
+++ b/drivers/pinctrl/qcom/pinctrl-qdu1000.c
@@ -904,7 +904,7 @@ static const char * const vsense_trigger_groups[] = {
};
static const struct pinfunction qdu1000_functions[] = {
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(cmo_pri),
MSM_PIN_FUNCTION(si5518_int),
MSM_PIN_FUNCTION(atest_char),
diff --git a/drivers/pinctrl/qcom/pinctrl-sa8775p.c b/drivers/pinctrl/qcom/pinctrl-sa8775p.c
index 1b62eb3e6620..53f28b9c49ba 100644
--- a/drivers/pinctrl/qcom/pinctrl-sa8775p.c
+++ b/drivers/pinctrl/qcom/pinctrl-sa8775p.c
@@ -1181,7 +1181,7 @@ static const char * const vsense_trigger_groups[] = {
};
static const struct pinfunction sa8775p_functions[] = {
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(atest_char),
MSM_PIN_FUNCTION(atest_usb2),
MSM_PIN_FUNCTION(audio_ref),
@@ -1217,7 +1217,7 @@ static const struct pinfunction sa8775p_functions[] = {
MSM_PIN_FUNCTION(edp2_lcd),
MSM_PIN_FUNCTION(edp3_hot),
MSM_PIN_FUNCTION(edp3_lcd),
- MSM_PIN_FUNCTION(egpio),
+ MSM_GPIO_PIN_FUNCTION(egpio),
MSM_PIN_FUNCTION(emac0_mcg0),
MSM_PIN_FUNCTION(emac0_mcg1),
MSM_PIN_FUNCTION(emac0_mcg2),
diff --git a/drivers/pinctrl/qcom/pinctrl-sar2130p.c b/drivers/pinctrl/qcom/pinctrl-sar2130p.c
index 3dd1b5e5cfee..4a53f4ee2041 100644
--- a/drivers/pinctrl/qcom/pinctrl-sar2130p.c
+++ b/drivers/pinctrl/qcom/pinctrl-sar2130p.c
@@ -1128,7 +1128,7 @@ static const char * const vsense_trigger_groups[] = {
};
static const struct pinfunction sar2130p_functions[] = {
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(qup0),
MSM_PIN_FUNCTION(ibi_i3c),
MSM_PIN_FUNCTION(jitter_bist),
diff --git a/drivers/pinctrl/qcom/pinctrl-sc7180.c b/drivers/pinctrl/qcom/pinctrl-sc7180.c
index c43fe10b71ad..3eae51472b13 100644
--- a/drivers/pinctrl/qcom/pinctrl-sc7180.c
+++ b/drivers/pinctrl/qcom/pinctrl-sc7180.c
@@ -903,7 +903,7 @@ static const struct pinfunction sc7180_functions[] = {
MSM_PIN_FUNCTION(gcc_gp1),
MSM_PIN_FUNCTION(gcc_gp2),
MSM_PIN_FUNCTION(gcc_gp3),
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(gp_pdm0),
MSM_PIN_FUNCTION(gp_pdm1),
MSM_PIN_FUNCTION(gp_pdm2),
diff --git a/drivers/pinctrl/qcom/pinctrl-sc7280.c b/drivers/pinctrl/qcom/pinctrl-sc7280.c
index 1b070e9d41f5..44e09608aad0 100644
--- a/drivers/pinctrl/qcom/pinctrl-sc7280.c
+++ b/drivers/pinctrl/qcom/pinctrl-sc7280.c
@@ -1153,11 +1153,11 @@ static const struct pinfunction sc7280_functions[] = {
MSM_PIN_FUNCTION(dp_lcd),
MSM_PIN_FUNCTION(edp_hot),
MSM_PIN_FUNCTION(edp_lcd),
- MSM_PIN_FUNCTION(egpio),
+ MSM_GPIO_PIN_FUNCTION(egpio),
MSM_PIN_FUNCTION(gcc_gp1),
MSM_PIN_FUNCTION(gcc_gp2),
MSM_PIN_FUNCTION(gcc_gp3),
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(host2wlan_sol),
MSM_PIN_FUNCTION(ibi_i3c),
MSM_PIN_FUNCTION(jitter_bist),
diff --git a/drivers/pinctrl/qcom/pinctrl-sc8180x.c b/drivers/pinctrl/qcom/pinctrl-sc8180x.c
index 26dd165d1543..d9f9e3dd9dd1 100644
--- a/drivers/pinctrl/qcom/pinctrl-sc8180x.c
+++ b/drivers/pinctrl/qcom/pinctrl-sc8180x.c
@@ -1272,7 +1272,7 @@ static const struct pinfunction sc8180x_functions[] = {
MSM_PIN_FUNCTION(gcc_gp3),
MSM_PIN_FUNCTION(gcc_gp4),
MSM_PIN_FUNCTION(gcc_gp5),
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(gps),
MSM_PIN_FUNCTION(grfc),
MSM_PIN_FUNCTION(hs1_mi2s),
@@ -1634,7 +1634,7 @@ static int sc8180x_pinctrl_add_tile_resources(struct platform_device *pdev)
return 0;
/* Allocate for new resources */
- nres = devm_kzalloc(&pdev->dev, sizeof(*nres) * nres_num, GFP_KERNEL);
+ nres = devm_kcalloc(&pdev->dev, nres_num, sizeof(*nres), GFP_KERNEL);
if (!nres)
return -ENOMEM;
diff --git a/drivers/pinctrl/qcom/pinctrl-sc8280xp.c b/drivers/pinctrl/qcom/pinctrl-sc8280xp.c
index 6ccd7e5648d4..cf8297e8b8f8 100644
--- a/drivers/pinctrl/qcom/pinctrl-sc8280xp.c
+++ b/drivers/pinctrl/qcom/pinctrl-sc8280xp.c
@@ -1506,7 +1506,7 @@ static const struct pinfunction sc8280xp_functions[] = {
MSM_PIN_FUNCTION(edp2_lcd),
MSM_PIN_FUNCTION(edp3_lcd),
MSM_PIN_FUNCTION(edp_hot),
- MSM_PIN_FUNCTION(egpio),
+ MSM_GPIO_PIN_FUNCTION(egpio),
MSM_PIN_FUNCTION(emac0_dll),
MSM_PIN_FUNCTION(emac0_mcg0),
MSM_PIN_FUNCTION(emac0_mcg1),
@@ -1527,7 +1527,7 @@ static const struct pinfunction sc8280xp_functions[] = {
MSM_PIN_FUNCTION(gcc_gp3),
MSM_PIN_FUNCTION(gcc_gp4),
MSM_PIN_FUNCTION(gcc_gp5),
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(hs1_mi2s),
MSM_PIN_FUNCTION(hs2_mi2s),
MSM_PIN_FUNCTION(hs3_mi2s),
diff --git a/drivers/pinctrl/qcom/pinctrl-sdm660-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-sdm660-lpass-lpi.c
new file mode 100644
index 000000000000..d93af5f0e8d3
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-sdm660-lpass-lpi.c
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * This driver is solely based on the limited information in downstream code.
+ * Any verification with schematics would be greatly appreciated.
+ *
+ * Copyright (c) 2023, Richard Acayan. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-lpass-lpi.h"
+
+enum lpass_lpi_functions {
+ LPI_MUX_comp_rx,
+ LPI_MUX_dmic1_clk,
+ LPI_MUX_dmic1_data,
+ LPI_MUX_dmic2_clk,
+ LPI_MUX_dmic2_data,
+ LPI_MUX_mclk0,
+ LPI_MUX_pdm_tx,
+ LPI_MUX_pdm_clk,
+ LPI_MUX_pdm_rx,
+ LPI_MUX_pdm_sync,
+
+ LPI_MUX_gpio,
+ LPI_MUX__,
+};
+
+static const struct pinctrl_pin_desc sdm660_lpi_pinctrl_pins[] = {
+ PINCTRL_PIN(0, "gpio0"),
+ PINCTRL_PIN(1, "gpio1"),
+ PINCTRL_PIN(2, "gpio2"),
+ PINCTRL_PIN(3, "gpio3"),
+ PINCTRL_PIN(4, "gpio4"),
+ PINCTRL_PIN(5, "gpio5"),
+ PINCTRL_PIN(6, "gpio6"),
+ PINCTRL_PIN(7, "gpio7"),
+ PINCTRL_PIN(8, "gpio8"),
+ PINCTRL_PIN(9, "gpio9"),
+ PINCTRL_PIN(10, "gpio10"),
+ PINCTRL_PIN(11, "gpio11"),
+ PINCTRL_PIN(12, "gpio12"),
+ PINCTRL_PIN(13, "gpio13"),
+ PINCTRL_PIN(14, "gpio14"),
+ PINCTRL_PIN(15, "gpio15"),
+ PINCTRL_PIN(16, "gpio16"),
+ PINCTRL_PIN(17, "gpio17"),
+ PINCTRL_PIN(18, "gpio18"),
+ PINCTRL_PIN(19, "gpio19"),
+ PINCTRL_PIN(20, "gpio20"),
+ PINCTRL_PIN(21, "gpio21"),
+ PINCTRL_PIN(22, "gpio22"),
+ PINCTRL_PIN(23, "gpio23"),
+ PINCTRL_PIN(24, "gpio24"),
+ PINCTRL_PIN(25, "gpio25"),
+ PINCTRL_PIN(26, "gpio26"),
+ PINCTRL_PIN(27, "gpio27"),
+ PINCTRL_PIN(28, "gpio28"),
+ PINCTRL_PIN(29, "gpio29"),
+ PINCTRL_PIN(30, "gpio30"),
+ PINCTRL_PIN(31, "gpio31"),
+};
+
+static const char * const comp_rx_groups[] = { "gpio22", "gpio24" };
+static const char * const dmic1_clk_groups[] = { "gpio26" };
+static const char * const dmic1_data_groups[] = { "gpio27" };
+static const char * const dmic2_clk_groups[] = { "gpio28" };
+static const char * const dmic2_data_groups[] = { "gpio29" };
+static const char * const mclk0_groups[] = { "gpio18" };
+static const char * const pdm_tx_groups[] = { "gpio20" };
+static const char * const pdm_clk_groups[] = { "gpio18" };
+static const char * const pdm_rx_groups[] = { "gpio21", "gpio23", "gpio25" };
+static const char * const pdm_sync_groups[] = { "gpio19" };
+
+const struct lpi_pingroup sdm660_lpi_pinctrl_groups[] = {
+ LPI_PINGROUP_OFFSET(0, LPI_NO_SLEW, _, _, _, _, 0x0000),
+ LPI_PINGROUP_OFFSET(1, LPI_NO_SLEW, _, _, _, _, 0x1000),
+ LPI_PINGROUP_OFFSET(2, LPI_NO_SLEW, _, _, _, _, 0x2000),
+ LPI_PINGROUP_OFFSET(3, LPI_NO_SLEW, _, _, _, _, 0x2010),
+ LPI_PINGROUP_OFFSET(4, LPI_NO_SLEW, _, _, _, _, 0x3000),
+ LPI_PINGROUP_OFFSET(5, LPI_NO_SLEW, _, _, _, _, 0x3010),
+ LPI_PINGROUP_OFFSET(6, LPI_NO_SLEW, _, _, _, _, 0x4000),
+ LPI_PINGROUP_OFFSET(7, LPI_NO_SLEW, _, _, _, _, 0x4010),
+ LPI_PINGROUP_OFFSET(8, LPI_NO_SLEW, _, _, _, _, 0x5000),
+ LPI_PINGROUP_OFFSET(9, LPI_NO_SLEW, _, _, _, _, 0x5010),
+ LPI_PINGROUP_OFFSET(10, LPI_NO_SLEW, _, _, _, _, 0x5020),
+ LPI_PINGROUP_OFFSET(11, LPI_NO_SLEW, _, _, _, _, 0x5030),
+ LPI_PINGROUP_OFFSET(12, LPI_NO_SLEW, _, _, _, _, 0x6000),
+ LPI_PINGROUP_OFFSET(13, LPI_NO_SLEW, _, _, _, _, 0x6010),
+ LPI_PINGROUP_OFFSET(14, LPI_NO_SLEW, _, _, _, _, 0x7000),
+ LPI_PINGROUP_OFFSET(15, LPI_NO_SLEW, _, _, _, _, 0x7010),
+ LPI_PINGROUP_OFFSET(16, LPI_NO_SLEW, _, _, _, _, 0x5040),
+ LPI_PINGROUP_OFFSET(17, LPI_NO_SLEW, _, _, _, _, 0x5050),
+
+ LPI_PINGROUP_OFFSET(18, LPI_NO_SLEW, pdm_clk, mclk0, _, _, 0x8000),
+ LPI_PINGROUP_OFFSET(19, LPI_NO_SLEW, pdm_sync, _, _, _, 0x8010),
+ LPI_PINGROUP_OFFSET(20, LPI_NO_SLEW, pdm_tx, _, _, _, 0x8020),
+ LPI_PINGROUP_OFFSET(21, LPI_NO_SLEW, pdm_rx, _, _, _, 0x8030),
+ LPI_PINGROUP_OFFSET(22, LPI_NO_SLEW, comp_rx, _, _, _, 0x8040),
+ LPI_PINGROUP_OFFSET(23, LPI_NO_SLEW, pdm_rx, _, _, _, 0x8050),
+ LPI_PINGROUP_OFFSET(24, LPI_NO_SLEW, comp_rx, _, _, _, 0x8060),
+ LPI_PINGROUP_OFFSET(25, LPI_NO_SLEW, pdm_rx, _, _, _, 0x8070),
+ LPI_PINGROUP_OFFSET(26, LPI_NO_SLEW, dmic1_clk, _, _, _, 0x9000),
+ LPI_PINGROUP_OFFSET(27, LPI_NO_SLEW, dmic1_data, _, _, _, 0x9010),
+ LPI_PINGROUP_OFFSET(28, LPI_NO_SLEW, dmic2_clk, _, _, _, 0xa000),
+ LPI_PINGROUP_OFFSET(29, LPI_NO_SLEW, dmic2_data, _, _, _, 0xa010),
+
+ LPI_PINGROUP_OFFSET(30, LPI_NO_SLEW, _, _, _, _, 0xb000),
+ LPI_PINGROUP_OFFSET(31, LPI_NO_SLEW, _, _, _, _, 0xb010),
+};
+
+const struct lpi_function sdm660_lpi_pinctrl_functions[] = {
+ LPI_FUNCTION(comp_rx),
+ LPI_FUNCTION(dmic1_clk),
+ LPI_FUNCTION(dmic1_data),
+ LPI_FUNCTION(dmic2_clk),
+ LPI_FUNCTION(dmic2_data),
+ LPI_FUNCTION(mclk0),
+ LPI_FUNCTION(pdm_tx),
+ LPI_FUNCTION(pdm_clk),
+ LPI_FUNCTION(pdm_rx),
+ LPI_FUNCTION(pdm_sync),
+};
+
+static const struct lpi_pinctrl_variant_data sdm660_lpi_pinctrl_data = {
+ .pins = sdm660_lpi_pinctrl_pins,
+ .npins = ARRAY_SIZE(sdm660_lpi_pinctrl_pins),
+ .groups = sdm660_lpi_pinctrl_groups,
+ .ngroups = ARRAY_SIZE(sdm660_lpi_pinctrl_groups),
+ .functions = sdm660_lpi_pinctrl_functions,
+ .nfunctions = ARRAY_SIZE(sdm660_lpi_pinctrl_functions),
+ .flags = LPI_FLAG_SLEW_RATE_SAME_REG | LPI_FLAG_USE_PREDEFINED_PIN_OFFSET
+};
+
+static const struct of_device_id sdm660_lpi_pinctrl_of_match[] = {
+ {
+ .compatible = "qcom,sdm660-lpass-lpi-pinctrl",
+ .data = &sdm660_lpi_pinctrl_data,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sdm660_lpi_pinctrl_of_match);
+
+static struct platform_driver sdm660_lpi_pinctrl_driver = {
+ .driver = {
+ .name = "qcom-sdm660-lpass-lpi-pinctrl",
+ .of_match_table = sdm660_lpi_pinctrl_of_match,
+ },
+ .probe = lpi_pinctrl_probe,
+ .remove = lpi_pinctrl_remove,
+};
+module_platform_driver(sdm660_lpi_pinctrl_driver);
+
+MODULE_AUTHOR("Richard Acayan <mailingradian@gmail.com>");
+MODULE_DESCRIPTION("QTI SDM660 LPI GPIO pin control driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/qcom/pinctrl-sdm660.c b/drivers/pinctrl/qcom/pinctrl-sdm660.c
index 1a78288f1bc8..687d986de75c 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdm660.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdm660.c
@@ -1157,7 +1157,7 @@ static const struct pinfunction sdm660_functions[] = {
MSM_PIN_FUNCTION(gcc_gp1),
MSM_PIN_FUNCTION(gcc_gp2),
MSM_PIN_FUNCTION(gcc_gp3),
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(gps_tx_a),
MSM_PIN_FUNCTION(gps_tx_b),
MSM_PIN_FUNCTION(gps_tx_c),
diff --git a/drivers/pinctrl/qcom/pinctrl-sdm670.c b/drivers/pinctrl/qcom/pinctrl-sdm670.c
index 0fe1fa94cd6d..486b72edf7b4 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdm670.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdm670.c
@@ -991,7 +991,7 @@ static const char * const mss_lte_groups[] = {
};
static const struct pinfunction sdm670_functions[] = {
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(adsp_ext),
MSM_PIN_FUNCTION(agera_pll),
MSM_PIN_FUNCTION(atest_char),
diff --git a/drivers/pinctrl/qcom/pinctrl-sdm845.c b/drivers/pinctrl/qcom/pinctrl-sdm845.c
index 0446e291aa48..4cf8575797a0 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdm845.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdm845.c
@@ -976,7 +976,7 @@ static const char * const tsif1_sync_groups[] = {
};
static const struct pinfunction sdm845_functions[] = {
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(adsp_ext),
MSM_PIN_FUNCTION(agera_pll),
MSM_PIN_FUNCTION(atest_char),
diff --git a/drivers/pinctrl/qcom/pinctrl-sdx55.c b/drivers/pinctrl/qcom/pinctrl-sdx55.c
index 2c17bf889146..79a7010b73f1 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdx55.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdx55.c
@@ -796,7 +796,7 @@ static const struct pinfunction sdx55_functions[] = {
MSM_PIN_FUNCTION(gcc_gp2),
MSM_PIN_FUNCTION(gcc_gp3),
MSM_PIN_FUNCTION(gcc_plltest),
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(i2s_mclk),
MSM_PIN_FUNCTION(jitter_bist),
MSM_PIN_FUNCTION(ldo_en),
diff --git a/drivers/pinctrl/qcom/pinctrl-sdx65.c b/drivers/pinctrl/qcom/pinctrl-sdx65.c
index 85b5c0206dbd..cc8a99a6a91e 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdx65.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdx65.c
@@ -732,7 +732,7 @@ static const struct pinfunction sdx65_functions[] = {
MSM_PIN_FUNCTION(gcc_gp2),
MSM_PIN_FUNCTION(gcc_gp3),
MSM_PIN_FUNCTION(gcc_plltest),
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(i2s_mclk),
MSM_PIN_FUNCTION(jitter_bist),
MSM_PIN_FUNCTION(ldo_en),
diff --git a/drivers/pinctrl/qcom/pinctrl-sdx75.c b/drivers/pinctrl/qcom/pinctrl-sdx75.c
index ab13a3a57a83..4078d83d818c 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdx75.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdx75.c
@@ -852,7 +852,7 @@ static const struct pinfunction sdx75_functions[] = {
MSM_PIN_FUNCTION(gcc_gp2_clk),
MSM_PIN_FUNCTION(gcc_gp3_clk),
MSM_PIN_FUNCTION(gcc_plltest),
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(i2s_mclk),
MSM_PIN_FUNCTION(jitter_bist),
MSM_PIN_FUNCTION(ldo_en),
diff --git a/drivers/pinctrl/qcom/pinctrl-sm4450.c b/drivers/pinctrl/qcom/pinctrl-sm4450.c
index 1ecdf1ab4f27..d51e271e3361 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm4450.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm4450.c
@@ -722,7 +722,7 @@ static const char * const wlan1_adc_dtest1_groups[] = {
};
static const struct pinfunction sm4450_functions[] = {
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(atest_char),
MSM_PIN_FUNCTION(atest_usb0),
MSM_PIN_FUNCTION(audio_ref_clk),
diff --git a/drivers/pinctrl/qcom/pinctrl-sm6115.c b/drivers/pinctrl/qcom/pinctrl-sm6115.c
index c273efa43996..06700685ea2a 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm6115.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm6115.c
@@ -687,7 +687,7 @@ static const struct pinfunction sm6115_functions[] = {
MSM_PIN_FUNCTION(gcc_gp1),
MSM_PIN_FUNCTION(gcc_gp2),
MSM_PIN_FUNCTION(gcc_gp3),
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(gp_pdm0),
MSM_PIN_FUNCTION(gp_pdm1),
MSM_PIN_FUNCTION(gp_pdm2),
diff --git a/drivers/pinctrl/qcom/pinctrl-sm6125.c b/drivers/pinctrl/qcom/pinctrl-sm6125.c
index 5092f20e0c1b..5d3d1e402345 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm6125.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm6125.c
@@ -943,7 +943,7 @@ static const char * const dmic1_data_groups[] = {
static const struct pinfunction sm6125_functions[] = {
MSM_PIN_FUNCTION(qup00),
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(qdss),
MSM_PIN_FUNCTION(qup01),
MSM_PIN_FUNCTION(qup02),
diff --git a/drivers/pinctrl/qcom/pinctrl-sm6350.c b/drivers/pinctrl/qcom/pinctrl-sm6350.c
index ba4686c86c54..220fb582cac9 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm6350.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm6350.c
@@ -1048,7 +1048,7 @@ static const struct pinfunction sm6350_functions[] = {
MSM_PIN_FUNCTION(gp_pdm0),
MSM_PIN_FUNCTION(gp_pdm1),
MSM_PIN_FUNCTION(gp_pdm2),
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(gps_tx),
MSM_PIN_FUNCTION(ibi_i3c),
MSM_PIN_FUNCTION(jitter_bist),
diff --git a/drivers/pinctrl/qcom/pinctrl-sm6375.c b/drivers/pinctrl/qcom/pinctrl-sm6375.c
index 49031571e65e..08b8ef6efaf0 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm6375.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm6375.c
@@ -1172,7 +1172,7 @@ static const struct pinfunction sm6375_functions[] = {
MSM_PIN_FUNCTION(gp_pdm0),
MSM_PIN_FUNCTION(gp_pdm1),
MSM_PIN_FUNCTION(gp_pdm2),
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(gps_tx),
MSM_PIN_FUNCTION(ibi_i3c),
MSM_PIN_FUNCTION(jitter_bist),
diff --git a/drivers/pinctrl/qcom/pinctrl-sm7150.c b/drivers/pinctrl/qcom/pinctrl-sm7150.c
index 6e89966cd70e..78dd8153a4d4 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm7150.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm7150.c
@@ -960,7 +960,7 @@ static const char * const wsa_data_groups[] = {
};
static const struct pinfunction sm7150_functions[] = {
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(adsp_ext),
MSM_PIN_FUNCTION(agera_pll),
MSM_PIN_FUNCTION(aoss_cti),
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8150.c b/drivers/pinctrl/qcom/pinctrl-sm8150.c
index 794ed99463f7..ad861cd66958 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8150.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8150.c
@@ -1217,7 +1217,7 @@ static const struct pinfunction sm8150_functions[] = {
MSM_PIN_FUNCTION(gcc_gp1),
MSM_PIN_FUNCTION(gcc_gp2),
MSM_PIN_FUNCTION(gcc_gp3),
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(hs1_mi2s),
MSM_PIN_FUNCTION(hs2_mi2s),
MSM_PIN_FUNCTION(hs3_mi2s),
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8250.c b/drivers/pinctrl/qcom/pinctrl-sm8250.c
index fb6f005d64f5..f05361f3100d 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8250.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8250.c
@@ -49,6 +49,8 @@ enum {
.mux_bit = 2, \
.pull_bit = 0, \
.drv_bit = 6, \
+ .egpio_enable = 12, \
+ .egpio_present = 11, \
.oe_bit = 9, \
.in_bit = 0, \
.out_bit = 1, \
@@ -511,6 +513,7 @@ enum sm8250_functions {
msm_mux_ddr_pxi2,
msm_mux_ddr_pxi3,
msm_mux_dp_hot,
+ msm_mux_egpio,
msm_mux_dp_lcd,
msm_mux_gcc_gp1,
msm_mux_gcc_gp2,
@@ -830,6 +833,14 @@ static const char * const gpio_groups[] = {
"gpio171", "gpio172", "gpio173", "gpio174", "gpio175", "gpio176",
"gpio177", "gpio178", "gpio179",
};
+static const char * const egpio_groups[] = {
+ "gpio146", "gpio147", "gpio148", "gpio149", "gpio150", "gpio151",
+ "gpio152", "gpio153", "gpio154", "gpio155", "gpio156", "gpio157",
+ "gpio158", "gpio159", "gpio160", "gpio161", "gpio162", "gpio163",
+ "gpio164", "gpio165", "gpio166", "gpio167", "gpio168", "gpio169",
+ "gpio170", "gpio171", "gpio172", "gpio173", "gpio174", "gpio175",
+ "gpio176", "gpio177", "gpio178", "gpio179",
+};
static const char * const qdss_cti_groups[] = {
"gpio0", "gpio2", "gpio2", "gpio44", "gpio45", "gpio46", "gpio92",
"gpio93",
@@ -1018,10 +1029,11 @@ static const struct pinfunction sm8250_functions[] = {
MSM_PIN_FUNCTION(ddr_pxi3),
MSM_PIN_FUNCTION(dp_hot),
MSM_PIN_FUNCTION(dp_lcd),
+ MSM_PIN_FUNCTION(egpio),
MSM_PIN_FUNCTION(gcc_gp1),
MSM_PIN_FUNCTION(gcc_gp2),
MSM_PIN_FUNCTION(gcc_gp3),
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(ibi_i3c),
MSM_PIN_FUNCTION(jitter_bist),
MSM_PIN_FUNCTION(lpass_slimbus),
@@ -1265,40 +1277,40 @@ static const struct msm_pingroup sm8250_groups[] = {
[143] = PINGROUP(143, WEST, lpass_slimbus, mi2s1_data0, ddr_bist, _, _, _, _, _, _),
[144] = PINGROUP(144, WEST, lpass_slimbus, mi2s1_data1, ddr_bist, _, _, _, _, _, _),
[145] = PINGROUP(145, WEST, lpass_slimbus, mi2s1_ws, _, _, _, _, _, _, _),
- [146] = PINGROUP(146, WEST, _, _, _, _, _, _, _, _, _),
- [147] = PINGROUP(147, WEST, _, _, _, _, _, _, _, _, _),
- [148] = PINGROUP(148, WEST, _, _, _, _, _, _, _, _, _),
- [149] = PINGROUP(149, WEST, _, _, _, _, _, _, _, _, _),
- [150] = PINGROUP(150, WEST, _, _, _, _, _, _, _, _, _),
- [151] = PINGROUP(151, WEST, _, _, _, _, _, _, _, _, _),
- [152] = PINGROUP(152, WEST, _, _, _, _, _, _, _, _, _),
- [153] = PINGROUP(153, WEST, _, _, _, _, _, _, _, _, _),
- [154] = PINGROUP(154, WEST, _, _, _, _, _, _, _, _, _),
- [155] = PINGROUP(155, WEST, _, _, _, _, _, _, _, _, _),
- [156] = PINGROUP(156, WEST, _, _, _, _, _, _, _, _, _),
- [157] = PINGROUP(157, WEST, _, _, _, _, _, _, _, _, _),
- [158] = PINGROUP(158, WEST, _, _, _, _, _, _, _, _, _),
- [159] = PINGROUP(159, WEST, cri_trng0, _, _, _, _, _, _, _, _),
- [160] = PINGROUP(160, WEST, cri_trng1, qdss_gpio, _, _, _, _, _, _, _),
- [161] = PINGROUP(161, WEST, cri_trng, qdss_gpio, _, _, _, _, _, _, _),
- [162] = PINGROUP(162, WEST, sp_cmu, qdss_gpio, _, _, _, _, _, _, _),
- [163] = PINGROUP(163, WEST, prng_rosc, qdss_gpio, _, _, _, _, _, _, _),
- [164] = PINGROUP(164, WEST, qdss_gpio, _, _, _, _, _, _, _, _),
- [165] = PINGROUP(165, WEST, qdss_gpio, _, _, _, _, _, _, _, _),
- [166] = PINGROUP(166, WEST, qdss_gpio, _, _, _, _, _, _, _, _),
- [167] = PINGROUP(167, WEST, qdss_gpio, _, _, _, _, _, _, _, _),
- [168] = PINGROUP(168, WEST, qdss_gpio, _, _, _, _, _, _, _, _),
- [169] = PINGROUP(169, WEST, qdss_gpio, _, _, _, _, _, _, _, _),
- [170] = PINGROUP(170, WEST, qdss_gpio, _, _, _, _, _, _, _, _),
- [171] = PINGROUP(171, WEST, qdss_gpio, _, _, _, _, _, _, _, _),
- [172] = PINGROUP(172, WEST, qdss_gpio, _, _, _, _, _, _, _, _),
- [173] = PINGROUP(173, WEST, qdss_gpio, _, _, _, _, _, _, _, _),
- [174] = PINGROUP(174, WEST, qdss_gpio, _, _, _, _, _, _, _, _),
- [175] = PINGROUP(175, WEST, qdss_gpio, _, _, _, _, _, _, _, _),
- [176] = PINGROUP(176, WEST, qdss_gpio, _, _, _, _, _, _, _, _),
- [177] = PINGROUP(177, WEST, qdss_gpio, _, _, _, _, _, _, _, _),
- [178] = PINGROUP(178, WEST, _, _, _, _, _, _, _, _, _),
- [179] = PINGROUP(179, WEST, _, _, _, _, _, _, _, _, _),
+ [146] = PINGROUP(146, WEST, _, _, _, _, _, _, _, _, egpio),
+ [147] = PINGROUP(147, WEST, _, _, _, _, _, _, _, _, egpio),
+ [148] = PINGROUP(148, WEST, _, _, _, _, _, _, _, _, egpio),
+ [149] = PINGROUP(149, WEST, _, _, _, _, _, _, _, _, egpio),
+ [150] = PINGROUP(150, WEST, _, _, _, _, _, _, _, _, egpio),
+ [151] = PINGROUP(151, WEST, _, _, _, _, _, _, _, _, egpio),
+ [152] = PINGROUP(152, WEST, _, _, _, _, _, _, _, _, egpio),
+ [153] = PINGROUP(153, WEST, _, _, _, _, _, _, _, _, egpio),
+ [154] = PINGROUP(154, WEST, _, _, _, _, _, _, _, _, egpio),
+ [155] = PINGROUP(155, WEST, _, _, _, _, _, _, _, _, egpio),
+ [156] = PINGROUP(156, WEST, _, _, _, _, _, _, _, _, egpio),
+ [157] = PINGROUP(157, WEST, _, _, _, _, _, _, _, _, egpio),
+ [158] = PINGROUP(158, WEST, _, _, _, _, _, _, _, _, egpio),
+ [159] = PINGROUP(159, WEST, cri_trng0, _, _, _, _, _, _, _, egpio),
+ [160] = PINGROUP(160, WEST, cri_trng1, qdss_gpio, _, _, _, _, _, _, egpio),
+ [161] = PINGROUP(161, WEST, cri_trng, qdss_gpio, _, _, _, _, _, _, egpio),
+ [162] = PINGROUP(162, WEST, sp_cmu, qdss_gpio, _, _, _, _, _, _, egpio),
+ [163] = PINGROUP(163, WEST, prng_rosc, qdss_gpio, _, _, _, _, _, _, egpio),
+ [164] = PINGROUP(164, WEST, qdss_gpio, _, _, _, _, _, _, _, egpio),
+ [165] = PINGROUP(165, WEST, qdss_gpio, _, _, _, _, _, _, _, egpio),
+ [166] = PINGROUP(166, WEST, qdss_gpio, _, _, _, _, _, _, _, egpio),
+ [167] = PINGROUP(167, WEST, qdss_gpio, _, _, _, _, _, _, _, egpio),
+ [168] = PINGROUP(168, WEST, qdss_gpio, _, _, _, _, _, _, _, egpio),
+ [169] = PINGROUP(169, WEST, qdss_gpio, _, _, _, _, _, _, _, egpio),
+ [170] = PINGROUP(170, WEST, qdss_gpio, _, _, _, _, _, _, _, egpio),
+ [171] = PINGROUP(171, WEST, qdss_gpio, _, _, _, _, _, _, _, egpio),
+ [172] = PINGROUP(172, WEST, qdss_gpio, _, _, _, _, _, _, _, egpio),
+ [173] = PINGROUP(173, WEST, qdss_gpio, _, _, _, _, _, _, _, egpio),
+ [174] = PINGROUP(174, WEST, qdss_gpio, _, _, _, _, _, _, _, egpio),
+ [175] = PINGROUP(175, WEST, qdss_gpio, _, _, _, _, _, _, _, egpio),
+ [176] = PINGROUP(176, WEST, qdss_gpio, _, _, _, _, _, _, _, egpio),
+ [177] = PINGROUP(177, WEST, qdss_gpio, _, _, _, _, _, _, _, egpio),
+ [178] = PINGROUP(178, WEST, _, _, _, _, _, _, _, _, egpio),
+ [179] = PINGROUP(179, WEST, _, _, _, _, _, _, _, _, egpio),
[180] = UFS_RESET(ufs_reset, 0xb8000),
[181] = SDC_PINGROUP(sdc2_clk, 0xb7000, 14, 6),
[182] = SDC_PINGROUP(sdc2_cmd, 0xb7000, 11, 3),
@@ -1333,6 +1345,7 @@ static const struct msm_pinctrl_soc_data sm8250_pinctrl = {
.ntiles = ARRAY_SIZE(sm8250_tiles),
.wakeirq_map = sm8250_pdc_map,
.nwakeirq_map = ARRAY_SIZE(sm8250_pdc_map),
+ .egpio_func = 9,
};
static int sm8250_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8350.c b/drivers/pinctrl/qcom/pinctrl-sm8350.c
index c8a3f39ce6f1..99949b552021 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8350.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8350.c
@@ -1267,7 +1267,7 @@ static const struct pinfunction sm8350_functions[] = {
MSM_PIN_FUNCTION(gcc_gp1),
MSM_PIN_FUNCTION(gcc_gp2),
MSM_PIN_FUNCTION(gcc_gp3),
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(ibi_i3c),
MSM_PIN_FUNCTION(jitter_bist),
MSM_PIN_FUNCTION(lpass_slimbus),
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8450.c b/drivers/pinctrl/qcom/pinctrl-sm8450.c
index f2e52d5a0f93..9889fc5dc2cd 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8450.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8450.c
@@ -1269,7 +1269,7 @@ static const char * const vsense_trigger_groups[] = {
};
static const struct pinfunction sm8450_functions[] = {
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(aon_cam),
MSM_PIN_FUNCTION(atest_char),
MSM_PIN_FUNCTION(atest_usb),
@@ -1291,7 +1291,7 @@ static const struct pinfunction sm8450_functions[] = {
MSM_PIN_FUNCTION(ddr_pxi2),
MSM_PIN_FUNCTION(ddr_pxi3),
MSM_PIN_FUNCTION(dp_hot),
- MSM_PIN_FUNCTION(egpio),
+ MSM_GPIO_PIN_FUNCTION(egpio),
MSM_PIN_FUNCTION(gcc_gp1),
MSM_PIN_FUNCTION(gcc_gp2),
MSM_PIN_FUNCTION(gcc_gp3),
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8550.c b/drivers/pinctrl/qcom/pinctrl-sm8550.c
index 1b4496cb39eb..10a62031fdfd 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8550.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8550.c
@@ -1340,7 +1340,7 @@ static const char *const vsense_trigger_mirnat_groups[] = {
};
static const struct pinfunction sm8550_functions[] = {
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(aon_cci),
MSM_PIN_FUNCTION(aoss_cti),
MSM_PIN_FUNCTION(atest_char),
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8650.c b/drivers/pinctrl/qcom/pinctrl-sm8650.c
index 449a0077f4b1..e2ae03800206 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8650.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8650.c
@@ -1328,7 +1328,7 @@ static const char *const vsense_trigger_mirnat_groups[] = {
};
static const struct pinfunction sm8650_functions[] = {
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(aoss_cti),
MSM_PIN_FUNCTION(atest_char),
MSM_PIN_FUNCTION(atest_usb),
@@ -1359,7 +1359,7 @@ static const struct pinfunction sm8650_functions[] = {
MSM_PIN_FUNCTION(ddr_pxi3),
MSM_PIN_FUNCTION(do_not),
MSM_PIN_FUNCTION(dp_hot),
- MSM_PIN_FUNCTION(egpio),
+ MSM_GPIO_PIN_FUNCTION(egpio),
MSM_PIN_FUNCTION(gcc_gp1),
MSM_PIN_FUNCTION(gcc_gp2),
MSM_PIN_FUNCTION(gcc_gp3),
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8750.c b/drivers/pinctrl/qcom/pinctrl-sm8750.c
index 8516693d1db5..6f92f176edd4 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8750.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8750.c
@@ -1290,7 +1290,7 @@ static const char *const wcn_sw_ctrl_groups[] = {
};
static const struct pinfunction sm8750_functions[] = {
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(aoss_cti),
MSM_PIN_FUNCTION(atest_char),
MSM_PIN_FUNCTION(atest_usb),
@@ -1319,7 +1319,7 @@ static const struct pinfunction sm8750_functions[] = {
MSM_PIN_FUNCTION(ddr_pxi2),
MSM_PIN_FUNCTION(ddr_pxi3),
MSM_PIN_FUNCTION(dp_hot),
- MSM_PIN_FUNCTION(egpio),
+ MSM_GPIO_PIN_FUNCTION(egpio),
MSM_PIN_FUNCTION(gcc_gp1),
MSM_PIN_FUNCTION(gcc_gp2),
MSM_PIN_FUNCTION(gcc_gp3),
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index 606becc160eb..485b68cc93f8 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -438,7 +438,7 @@ static int pmic_gpio_config_get(struct pinctrl_dev *pctldev,
case PIN_CONFIG_OUTPUT_ENABLE:
arg = pad->output_enabled;
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
arg = pad->out_value;
break;
case PMIC_GPIO_CONF_PULL_UP:
@@ -530,7 +530,7 @@ static int pmic_gpio_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
case PIN_CONFIG_OUTPUT_ENABLE:
pad->output_enabled = arg ? true : false;
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
pad->output_enabled = true;
pad->out_value = arg;
break;
@@ -737,7 +737,7 @@ static int pmic_gpio_direction_output(struct gpio_chip *chip,
struct pmic_gpio_state *state = gpiochip_get_data(chip);
unsigned long config;
- config = pinconf_to_config_packed(PIN_CONFIG_OUTPUT, val);
+ config = pinconf_to_config_packed(PIN_CONFIG_LEVEL, val);
return pmic_gpio_config_set(state->ctrl, pin, &config, 1);
}
@@ -769,7 +769,7 @@ static int pmic_gpio_set(struct gpio_chip *chip, unsigned int pin, int value)
struct pmic_gpio_state *state = gpiochip_get_data(chip);
unsigned long config;
- config = pinconf_to_config_packed(PIN_CONFIG_OUTPUT, value);
+ config = pinconf_to_config_packed(PIN_CONFIG_LEVEL, value);
return pmic_gpio_config_set(state->ctrl, pin, &config, 1);
}
@@ -802,7 +802,7 @@ static const struct gpio_chip pmic_gpio_gpio_template = {
.direction_input = pmic_gpio_direction_input,
.direction_output = pmic_gpio_direction_output,
.get = pmic_gpio_get,
- .set_rv = pmic_gpio_set,
+ .set = pmic_gpio_set,
.request = gpiochip_generic_request,
.free = gpiochip_generic_free,
.of_xlate = pmic_gpio_of_xlate,
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
index ba9084978f90..64f8024a865c 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
@@ -370,7 +370,7 @@ static int pmic_mpp_config_get(struct pinctrl_dev *pctldev,
return -EINVAL;
arg = 1;
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
arg = pad->out_value;
break;
case PMIC_MPP_CONF_DTEST_SELECTOR:
@@ -447,7 +447,7 @@ static int pmic_mpp_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
case PIN_CONFIG_INPUT_ENABLE:
pad->input_enabled = arg ? true : false;
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
pad->output_enabled = true;
pad->out_value = arg;
break;
@@ -576,7 +576,7 @@ static int pmic_mpp_direction_output(struct gpio_chip *chip,
struct pmic_mpp_state *state = gpiochip_get_data(chip);
unsigned long config;
- config = pinconf_to_config_packed(PIN_CONFIG_OUTPUT, val);
+ config = pinconf_to_config_packed(PIN_CONFIG_LEVEL, val);
return pmic_mpp_config_set(state->ctrl, pin, &config, 1);
}
@@ -605,7 +605,7 @@ static int pmic_mpp_set(struct gpio_chip *chip, unsigned int pin, int value)
struct pmic_mpp_state *state = gpiochip_get_data(chip);
unsigned long config;
- config = pinconf_to_config_packed(PIN_CONFIG_OUTPUT, value);
+ config = pinconf_to_config_packed(PIN_CONFIG_LEVEL, value);
return pmic_mpp_config_set(state->ctrl, pin, &config, 1);
}
@@ -638,7 +638,7 @@ static const struct gpio_chip pmic_mpp_gpio_template = {
.direction_input = pmic_mpp_direction_input,
.direction_output = pmic_mpp_direction_output,
.get = pmic_mpp_get,
- .set_rv = pmic_mpp_set,
+ .set = pmic_mpp_set,
.request = gpiochip_generic_request,
.free = gpiochip_generic_free,
.of_xlate = pmic_mpp_of_xlate,
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
index 3a8014ebf064..5c966d51eda7 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
@@ -282,7 +282,7 @@ static int pm8xxx_pin_config_get(struct pinctrl_dev *pctldev,
return -EINVAL;
arg = 1;
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
if (pin->mode & PM8XXX_GPIO_MODE_OUTPUT)
arg = pin->output_value;
else
@@ -364,7 +364,7 @@ static int pm8xxx_pin_config_set(struct pinctrl_dev *pctldev,
pin->mode = PM8XXX_GPIO_MODE_INPUT;
banks |= BIT(0) | BIT(1);
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
pin->mode = PM8XXX_GPIO_MODE_OUTPUT;
pin->output_value = !!arg;
banks |= BIT(0) | BIT(1);
@@ -597,7 +597,7 @@ static const struct gpio_chip pm8xxx_gpio_template = {
.direction_input = pm8xxx_gpio_direction_input,
.direction_output = pm8xxx_gpio_direction_output,
.get = pm8xxx_gpio_get,
- .set_rv = pm8xxx_gpio_set,
+ .set = pm8xxx_gpio_set,
.of_xlate = pm8xxx_gpio_of_xlate,
.dbg_show = pm8xxx_gpio_dbg_show,
.owner = THIS_MODULE,
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
index 087c37d304fc..7970fa6e1557 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
@@ -337,7 +337,7 @@ static int pm8xxx_pin_config_get(struct pinctrl_dev *pctldev,
case PIN_CONFIG_INPUT_ENABLE:
arg = pin->input;
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
arg = pin->output_value;
break;
case PIN_CONFIG_POWER_SOURCE:
@@ -392,7 +392,7 @@ static int pm8xxx_pin_config_set(struct pinctrl_dev *pctldev,
case PIN_CONFIG_INPUT_ENABLE:
pin->input = true;
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
pin->output = true;
pin->output_value = !!arg;
break;
@@ -634,7 +634,7 @@ static const struct gpio_chip pm8xxx_mpp_template = {
.direction_input = pm8xxx_mpp_direction_input,
.direction_output = pm8xxx_mpp_direction_output,
.get = pm8xxx_mpp_get,
- .set_rv = pm8xxx_mpp_set,
+ .set = pm8xxx_mpp_set,
.of_xlate = pm8xxx_mpp_of_xlate,
.dbg_show = pm8xxx_mpp_dbg_show,
.owner = THIS_MODULE,
diff --git a/drivers/pinctrl/qcom/pinctrl-x1e80100.c b/drivers/pinctrl/qcom/pinctrl-x1e80100.c
index d4b215f34c39..bb36f40b19fa 100644
--- a/drivers/pinctrl/qcom/pinctrl-x1e80100.c
+++ b/drivers/pinctrl/qcom/pinctrl-x1e80100.c
@@ -1407,7 +1407,7 @@ static const char * const vsense_trigger_groups[] = {
};
static const struct pinfunction x1e80100_functions[] = {
- MSM_PIN_FUNCTION(gpio),
+ MSM_GPIO_PIN_FUNCTION(gpio),
MSM_PIN_FUNCTION(RESOUT_GPIO),
MSM_PIN_FUNCTION(aon_cci),
MSM_PIN_FUNCTION(aoss_cti),
diff --git a/drivers/pinctrl/renesas/Kconfig b/drivers/pinctrl/renesas/Kconfig
index 99ae34a56871..8cbd79a13414 100644
--- a/drivers/pinctrl/renesas/Kconfig
+++ b/drivers/pinctrl/renesas/Kconfig
@@ -44,6 +44,8 @@ config PINCTRL_RENESAS
select PINCTRL_RZG2L if ARCH_R9A09G047
select PINCTRL_RZG2L if ARCH_R9A09G056
select PINCTRL_RZG2L if ARCH_R9A09G057
+ select PINCTRL_RZT2H if ARCH_R9A09G077
+ select PINCTRL_RZT2H if ARCH_R9A09G087
select PINCTRL_PFC_SH7203 if CPU_SUBTYPE_SH7203
select PINCTRL_PFC_SH7264 if CPU_SUBTYPE_SH7264
select PINCTRL_PFC_SH7269 if CPU_SUBTYPE_SH7269
@@ -302,6 +304,17 @@ config PINCTRL_RZN1
help
This selects pinctrl driver for Renesas RZ/N1 devices.
+config PINCTRL_RZT2H
+ bool "pin control support for RZ/N2H and RZ/T2H" if COMPILE_TEST
+ depends on 64BIT && OF
+ select GPIOLIB
+ select GENERIC_PINCTRL_GROUPS
+ select GENERIC_PINMUX_FUNCTIONS
+ select GENERIC_PINCONF
+ help
+ This selects GPIO and pinctrl driver for Renesas RZ/T2H
+ platforms.
+
config PINCTRL_RZV2M
bool "pin control support for RZ/V2M" if COMPILE_TEST
depends on OF
diff --git a/drivers/pinctrl/renesas/Makefile b/drivers/pinctrl/renesas/Makefile
index 2ba623e04bf8..1c5144a1c4b8 100644
--- a/drivers/pinctrl/renesas/Makefile
+++ b/drivers/pinctrl/renesas/Makefile
@@ -50,6 +50,7 @@ obj-$(CONFIG_PINCTRL_RZA1) += pinctrl-rza1.o
obj-$(CONFIG_PINCTRL_RZA2) += pinctrl-rza2.o
obj-$(CONFIG_PINCTRL_RZG2L) += pinctrl-rzg2l.o
obj-$(CONFIG_PINCTRL_RZN1) += pinctrl-rzn1.o
+obj-$(CONFIG_PINCTRL_RZT2H) += pinctrl-rzt2h.o
obj-$(CONFIG_PINCTRL_RZV2M) += pinctrl-rzv2m.o
ifeq ($(CONFIG_COMPILE_TEST),y)
diff --git a/drivers/pinctrl/renesas/gpio.c b/drivers/pinctrl/renesas/gpio.c
index 8efbdc1b0078..2293af642849 100644
--- a/drivers/pinctrl/renesas/gpio.c
+++ b/drivers/pinctrl/renesas/gpio.c
@@ -234,7 +234,7 @@ static int gpio_pin_setup(struct sh_pfc_chip *chip)
gc->direction_input = gpio_pin_direction_input;
gc->get = gpio_pin_get;
gc->direction_output = gpio_pin_direction_output;
- gc->set_rv = gpio_pin_set;
+ gc->set = gpio_pin_set;
gc->to_irq = gpio_pin_to_irq;
gc->label = pfc->info->name;
diff --git a/drivers/pinctrl/renesas/pfc-r8a779g0.c b/drivers/pinctrl/renesas/pfc-r8a779g0.c
index cae3e6553499..218c5eff9b67 100644
--- a/drivers/pinctrl/renesas/pfc-r8a779g0.c
+++ b/drivers/pinctrl/renesas/pfc-r8a779g0.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * R8A779A0 processor support - PFC hardware block.
+ * R8A779G0 processor support - PFC hardware block.
*
* Copyright (C) 2021 Renesas Electronics Corp.
*
diff --git a/drivers/pinctrl/renesas/pinctrl-rza1.c b/drivers/pinctrl/renesas/pinctrl-rza1.c
index 3d8492c91710..f24e5915cbe4 100644
--- a/drivers/pinctrl/renesas/pinctrl-rza1.c
+++ b/drivers/pinctrl/renesas/pinctrl-rza1.c
@@ -846,7 +846,7 @@ static const struct gpio_chip rza1_gpiochip_template = {
.direction_input = rza1_gpio_direction_input,
.direction_output = rza1_gpio_direction_output,
.get = rza1_gpio_get,
- .set_rv = rza1_gpio_set,
+ .set = rza1_gpio_set,
};
/* ----------------------------------------------------------------------------
* pinctrl operations
@@ -933,7 +933,7 @@ static int rza1_parse_pinmux_node(struct rza1_pinctrl *rza1_pctl,
case PIN_CONFIG_INPUT_ENABLE:
pinmux_flags |= MUX_FLAGS_SWIO_INPUT;
break;
- case PIN_CONFIG_OUTPUT: /* for DT backwards compatibility */
+ case PIN_CONFIG_LEVEL: /* for DT backwards compatibility */
case PIN_CONFIG_OUTPUT_ENABLE:
pinmux_flags |= MUX_FLAGS_SWIO_OUTPUT;
break;
@@ -1120,7 +1120,7 @@ static int rza1_set_mux(struct pinctrl_dev *pctldev, unsigned int selector,
{
struct rza1_pinctrl *rza1_pctl = pinctrl_dev_get_drvdata(pctldev);
struct rza1_mux_conf *mux_confs;
- struct function_desc *func;
+ const struct function_desc *func;
struct group_desc *grp;
int i;
diff --git a/drivers/pinctrl/renesas/pinctrl-rza2.c b/drivers/pinctrl/renesas/pinctrl-rza2.c
index 7a0b268d3eb9..29a9db197599 100644
--- a/drivers/pinctrl/renesas/pinctrl-rza2.c
+++ b/drivers/pinctrl/renesas/pinctrl-rza2.c
@@ -237,7 +237,7 @@ static struct gpio_chip chip = {
.direction_input = rza2_chip_direction_input,
.direction_output = rza2_chip_direction_output,
.get = rza2_chip_get,
- .set_rv = rza2_chip_set,
+ .set = rza2_chip_set,
};
static int rza2_gpio_register(struct rza2_pinctrl_priv *priv)
@@ -442,7 +442,7 @@ static int rza2_set_mux(struct pinctrl_dev *pctldev, unsigned int selector,
unsigned int group)
{
struct rza2_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
- struct function_desc *func;
+ const struct function_desc *func;
unsigned int i, *psel_val;
struct group_desc *grp;
diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
index 2a10ae0bf5bd..f524af6f586f 100644
--- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
@@ -146,8 +146,6 @@
#define SD_CH(off, ch) ((off) + (ch) * 4)
#define ETH_POC(off, ch) ((off) + (ch) * 4)
#define QSPI (0x3008)
-#define ETH_MODE (0x3018)
-#define PFC_OEN (0x3C40) /* known on RZ/V2H(P) only */
#define PVDD_2500 2 /* I/O domain voltage 2.5V */
#define PVDD_1800 1 /* I/O domain voltage <= 1.8V */
@@ -221,11 +219,13 @@ static const struct pin_config_item renesas_rzv2h_conf_items[] = {
* @pwpr: PWPR register offset
* @sd_ch: SD_CH register offset
* @eth_poc: ETH_POC register offset
+ * @oen: OEN register offset
*/
struct rzg2l_register_offsets {
u16 pwpr;
u16 sd_ch;
u16 eth_poc;
+ u16 oen;
};
/**
@@ -254,6 +254,7 @@ enum rzg2l_iolh_index {
* @iolh_groupb_oi: IOLH group B output impedance specific values
* @tint_start_index: the start index for the TINT interrupts
* @drive_strength_ua: drive strength in uA is supported (otherwise mA is supported)
+ * @oen_pwpr_lock: flag indicating if the OEN register is locked by PWPR
* @func_base: base number for port function (see register PFC)
* @oen_max_pin: the maximum pin number supporting output enable
* @oen_max_port: the maximum port number supporting output enable
@@ -266,6 +267,7 @@ struct rzg2l_hwcfg {
u16 iolh_groupb_oi[4];
u16 tint_start_index;
bool drive_strength_ua;
+ bool oen_pwpr_lock;
u8 func_base;
u8 oen_max_pin;
u8 oen_max_port;
@@ -295,8 +297,7 @@ struct rzg2l_pinctrl_data {
#endif
void (*pwpr_pfc_lock_unlock)(struct rzg2l_pinctrl *pctrl, bool lock);
void (*pmc_writeb)(struct rzg2l_pinctrl *pctrl, u8 val, u16 offset);
- u32 (*oen_read)(struct rzg2l_pinctrl *pctrl, unsigned int _pin);
- int (*oen_write)(struct rzg2l_pinctrl *pctrl, unsigned int _pin, u8 oen);
+ int (*pin_to_oen_bit)(struct rzg2l_pinctrl *pctrl, unsigned int _pin);
int (*hw_to_bias_param)(unsigned int val);
int (*bias_param_to_hw)(enum pin_config_param param);
};
@@ -320,9 +321,10 @@ struct rzg2l_pinctrl_pin_settings {
* @iolh: IOLH registers cache
* @pupd: PUPD registers cache
* @ien: IEN registers cache
+ * @smt: SMT registers cache
* @sd_ch: SD_CH registers cache
* @eth_poc: ET_POC registers cache
- * @eth_mode: ETH_MODE register cache
+ * @oen: Output Enable register cache
* @qspi: QSPI registers cache
*/
struct rzg2l_pinctrl_reg_cache {
@@ -333,9 +335,10 @@ struct rzg2l_pinctrl_reg_cache {
u32 *iolh[2];
u32 *ien[2];
u32 *pupd[2];
+ u32 *smt;
u8 sd_ch[2];
u8 eth_poc[2];
- u8 eth_mode;
+ u8 oen;
u8 qspi;
};
@@ -394,6 +397,14 @@ static const u64 r9a09g047_variable_pin_cfg[] = {
RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PA, 5, RZV2H_MPXED_PIN_FUNCS),
RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PA, 6, RZV2H_MPXED_PIN_FUNCS),
RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PA, 7, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PB, 0, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PB, 1, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_OEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PB, 2, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PB, 3, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PB, 4, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PB, 5, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PB, 6, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PB, 7, RZV2H_MPXED_PIN_FUNCS),
RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PD, 0, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PD, 1, RZV2H_MPXED_PIN_FUNCS),
RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PD, 2, RZV2H_MPXED_PIN_FUNCS),
@@ -402,6 +413,14 @@ static const u64 r9a09g047_variable_pin_cfg[] = {
RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PD, 5, RZV2H_MPXED_PIN_FUNCS),
RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PD, 6, RZV2H_MPXED_PIN_FUNCS),
RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PD, 7, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PE, 0, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PE, 1, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_OEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PE, 2, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PE, 3, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PE, 4, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PE, 5, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PE, 6, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PE, 7, RZV2H_MPXED_PIN_FUNCS),
RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PG, 0, RZV2H_MPXED_PIN_FUNCS),
RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PG, 1, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PG, 2, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_IEN),
@@ -421,6 +440,14 @@ static const u64 r9a09g047_variable_pin_cfg[] = {
RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PJ, 2, RZV2H_MPXED_PIN_FUNCS),
RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PJ, 3, RZV2H_MPXED_PIN_FUNCS),
RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PJ, 4, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PL, 0, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_OEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PL, 1, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_OEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PL, 2, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_OEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PL, 3, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PL, 4, RZV2H_MPXED_PIN_FUNCS | PIN_CFG_OEN),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PL, 5, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PL, 6, RZV2H_MPXED_PIN_FUNCS),
+ RZG2L_VARIABLE_PIN_CFG_PACK(RZG3E_PL, 7, RZV2H_MPXED_PIN_FUNCS),
};
static const u64 r9a09g057_variable_pin_cfg[] = {
@@ -549,7 +576,7 @@ static int rzg2l_pinctrl_set_mux(struct pinctrl_dev *pctldev,
{
struct rzg2l_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
const struct rzg2l_hwcfg *hwcfg = pctrl->data->hwcfg;
- struct function_desc *func;
+ const struct function_desc *func;
unsigned int i, *psel_val;
struct group_desc *group;
const unsigned int *pins;
@@ -1065,34 +1092,48 @@ static int rzg2l_pin_to_oen_bit(struct rzg2l_pinctrl *pctrl, unsigned int _pin)
return -EINVAL;
}
-static u32 rzg2l_read_oen(struct rzg2l_pinctrl *pctrl, unsigned int _pin)
+static int rzg2l_read_oen(struct rzg2l_pinctrl *pctrl, unsigned int _pin)
{
int bit;
- bit = rzg2l_pin_to_oen_bit(pctrl, _pin);
+ if (!pctrl->data->pin_to_oen_bit)
+ return -EOPNOTSUPP;
+
+ bit = pctrl->data->pin_to_oen_bit(pctrl, _pin);
if (bit < 0)
- return 0;
+ return -EINVAL;
- return !(readb(pctrl->base + ETH_MODE) & BIT(bit));
+ return !(readb(pctrl->base + pctrl->data->hwcfg->regs.oen) & BIT(bit));
}
static int rzg2l_write_oen(struct rzg2l_pinctrl *pctrl, unsigned int _pin, u8 oen)
{
+ const struct rzg2l_register_offsets *regs = &pctrl->data->hwcfg->regs;
+ u16 oen_offset = pctrl->data->hwcfg->regs.oen;
unsigned long flags;
+ u8 val, pwpr;
int bit;
- u8 val;
- bit = rzg2l_pin_to_oen_bit(pctrl, _pin);
+ if (!pctrl->data->pin_to_oen_bit)
+ return -EOPNOTSUPP;
+
+ bit = pctrl->data->pin_to_oen_bit(pctrl, _pin);
if (bit < 0)
- return bit;
+ return -EINVAL;
spin_lock_irqsave(&pctrl->lock, flags);
- val = readb(pctrl->base + ETH_MODE);
+ val = readb(pctrl->base + oen_offset);
if (oen)
val &= ~BIT(bit);
else
val |= BIT(bit);
- writeb(val, pctrl->base + ETH_MODE);
+ if (pctrl->data->hwcfg->oen_pwpr_lock) {
+ pwpr = readb(pctrl->base + regs->pwpr);
+ writeb(pwpr | PWPR_REGWE_B, pctrl->base + regs->pwpr);
+ }
+ writeb(val, pctrl->base + oen_offset);
+ if (pctrl->data->hwcfg->oen_pwpr_lock)
+ writeb(pwpr & ~PWPR_REGWE_B, pctrl->base + regs->pwpr);
spin_unlock_irqrestore(&pctrl->lock, flags);
return 0;
@@ -1118,39 +1159,6 @@ static int rzg3s_pin_to_oen_bit(struct rzg2l_pinctrl *pctrl, unsigned int _pin)
return bit;
}
-static u32 rzg3s_oen_read(struct rzg2l_pinctrl *pctrl, unsigned int _pin)
-{
- int bit;
-
- bit = rzg3s_pin_to_oen_bit(pctrl, _pin);
- if (bit < 0)
- return bit;
-
- return !(readb(pctrl->base + ETH_MODE) & BIT(bit));
-}
-
-static int rzg3s_oen_write(struct rzg2l_pinctrl *pctrl, unsigned int _pin, u8 oen)
-{
- unsigned long flags;
- int bit;
- u8 val;
-
- bit = rzg3s_pin_to_oen_bit(pctrl, _pin);
- if (bit < 0)
- return bit;
-
- spin_lock_irqsave(&pctrl->lock, flags);
- val = readb(pctrl->base + ETH_MODE);
- if (oen)
- val &= ~BIT(bit);
- else
- val |= BIT(bit);
- writeb(val, pctrl->base + ETH_MODE);
- spin_unlock_irqrestore(&pctrl->lock, flags);
-
- return 0;
-}
-
static int rzg2l_hw_to_bias_param(unsigned int bias)
{
switch (bias) {
@@ -1216,55 +1224,37 @@ static int rzv2h_bias_param_to_hw(enum pin_config_param param)
return -EINVAL;
}
-static u8 rzv2h_pin_to_oen_bit(struct rzg2l_pinctrl *pctrl, unsigned int _pin)
+static int rzg2l_pin_names_to_oen_bit(struct rzg2l_pinctrl *pctrl, unsigned int _pin,
+ const char * const pin_names[], unsigned int count)
{
- static const char * const pin_names[] = { "ET0_TXC_TXCLK", "ET1_TXC_TXCLK",
- "XSPI0_RESET0N", "XSPI0_CS0N",
- "XSPI0_CKN", "XSPI0_CKP" };
const struct pinctrl_pin_desc *pin_desc = &pctrl->desc.pins[_pin];
unsigned int i;
- for (i = 0; i < ARRAY_SIZE(pin_names); i++) {
+ for (i = 0; i < count; i++) {
if (!strcmp(pin_desc->name, pin_names[i]))
return i;
}
- /* Should not happen. */
- return 0;
+ return -EINVAL;
}
-static u32 rzv2h_oen_read(struct rzg2l_pinctrl *pctrl, unsigned int _pin)
+static int rzv2h_pin_to_oen_bit(struct rzg2l_pinctrl *pctrl, unsigned int _pin)
{
- u8 bit;
-
- bit = rzv2h_pin_to_oen_bit(pctrl, _pin);
+ static const char * const pin_names[] = {
+ "ET0_TXC_TXCLK", "ET1_TXC_TXCLK", "XSPI0_RESET0N",
+ "XSPI0_CS0N", "XSPI0_CKN", "XSPI0_CKP"
+ };
- return !(readb(pctrl->base + PFC_OEN) & BIT(bit));
+ return rzg2l_pin_names_to_oen_bit(pctrl, _pin, pin_names, ARRAY_SIZE(pin_names));
}
-static int rzv2h_oen_write(struct rzg2l_pinctrl *pctrl, unsigned int _pin, u8 oen)
+static int rzg3e_pin_to_oen_bit(struct rzg2l_pinctrl *pctrl, unsigned int _pin)
{
- const struct rzg2l_hwcfg *hwcfg = pctrl->data->hwcfg;
- const struct rzg2l_register_offsets *regs = &hwcfg->regs;
- unsigned long flags;
- u8 val, bit;
- u8 pwpr;
-
- bit = rzv2h_pin_to_oen_bit(pctrl, _pin);
- spin_lock_irqsave(&pctrl->lock, flags);
- val = readb(pctrl->base + PFC_OEN);
- if (oen)
- val &= ~BIT(bit);
- else
- val |= BIT(bit);
-
- pwpr = readb(pctrl->base + regs->pwpr);
- writeb(pwpr | PWPR_REGWE_B, pctrl->base + regs->pwpr);
- writeb(val, pctrl->base + PFC_OEN);
- writeb(pwpr & ~PWPR_REGWE_B, pctrl->base + regs->pwpr);
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ static const char * const pin_names[] = {
+ "PB1", "PE1", "PL4", "PL1", "PL2", "PL0"
+ };
- return 0;
+ return rzg2l_pin_names_to_oen_bit(pctrl, _pin, pin_names, ARRAY_SIZE(pin_names));
}
static int rzg2l_pinctrl_pinconf_get(struct pinctrl_dev *pctldev,
@@ -1308,11 +1298,10 @@ static int rzg2l_pinctrl_pinconf_get(struct pinctrl_dev *pctldev,
case PIN_CONFIG_OUTPUT_ENABLE:
if (!(cfg & PIN_CFG_OEN))
return -EINVAL;
- if (!pctrl->data->oen_read)
- return -EOPNOTSUPP;
- arg = pctrl->data->oen_read(pctrl, _pin);
- if (!arg)
- return -EINVAL;
+ ret = rzg2l_read_oen(pctrl, _pin);
+ if (ret < 0)
+ return ret;
+ arg = ret;
break;
case PIN_CONFIG_POWER_SOURCE:
@@ -1471,9 +1460,7 @@ static int rzg2l_pinctrl_pinconf_set(struct pinctrl_dev *pctldev,
case PIN_CONFIG_OUTPUT_ENABLE:
if (!(cfg & PIN_CFG_OEN))
return -EINVAL;
- if (!pctrl->data->oen_write)
- return -EOPNOTSUPP;
- ret = pctrl->data->oen_write(pctrl, _pin, !!arg);
+ ret = rzg2l_write_oen(pctrl, _pin, !!arg);
if (ret)
return ret;
break;
@@ -2058,17 +2045,17 @@ static const u64 r9a09g047_gpio_configs[] = {
RZG2L_GPIO_PORT_PACK(6, 0x28, RZV2H_MPXED_PIN_FUNCS), /* P8 */
0x0,
RZG2L_GPIO_PORT_PACK_VARIABLE(8, 0x2a), /* PA */
- RZG2L_GPIO_PORT_PACK(8, 0x2b, RZV2H_MPXED_PIN_FUNCS), /* PB */
+ RZG2L_GPIO_PORT_PACK_VARIABLE(8, 0x2b), /* PB */
RZG2L_GPIO_PORT_PACK(3, 0x2c, RZV2H_MPXED_PIN_FUNCS), /* PC */
RZG2L_GPIO_PORT_PACK_VARIABLE(8, 0x2d), /* PD */
- RZG2L_GPIO_PORT_PACK(8, 0x2e, RZV2H_MPXED_PIN_FUNCS), /* PE */
+ RZG2L_GPIO_PORT_PACK_VARIABLE(8, 0x2e), /* PE */
RZG2L_GPIO_PORT_PACK(3, 0x2f, RZV2H_MPXED_PIN_FUNCS), /* PF */
RZG2L_GPIO_PORT_PACK_VARIABLE(8, 0x30), /* PG */
RZG2L_GPIO_PORT_PACK_VARIABLE(6, 0x31), /* PH */
0x0,
RZG2L_GPIO_PORT_PACK_VARIABLE(5, 0x33), /* PJ */
RZG2L_GPIO_PORT_PACK(4, 0x34, RZV2H_MPXED_PIN_FUNCS), /* PK */
- RZG2L_GPIO_PORT_PACK(8, 0x35, RZV2H_MPXED_PIN_FUNCS), /* PL */
+ RZG2L_GPIO_PORT_PACK_VARIABLE(8, 0x35), /* PL */
RZG2L_GPIO_PORT_PACK(8, 0x36, RZV2H_MPXED_PIN_FUNCS), /* PM */
0x0,
0x0,
@@ -2719,6 +2706,10 @@ static int rzg2l_pinctrl_reg_cache_alloc(struct rzg2l_pinctrl *pctrl)
if (!cache->pfc)
return -ENOMEM;
+ cache->smt = devm_kcalloc(pctrl->dev, nports, sizeof(*cache->smt), GFP_KERNEL);
+ if (!cache->smt)
+ return -ENOMEM;
+
for (u8 i = 0; i < 2; i++) {
u32 n_dedicated_pins = pctrl->data->n_dedicated_pins;
@@ -2795,7 +2786,7 @@ static int rzg2l_gpio_register(struct rzg2l_pinctrl *pctrl)
chip->direction_input = rzg2l_gpio_direction_input;
chip->direction_output = rzg2l_gpio_direction_output;
chip->get = rzg2l_gpio_get;
- chip->set_rv = rzg2l_gpio_set;
+ chip->set = rzg2l_gpio_set;
chip->label = name;
chip->parent = pctrl->dev;
chip->owner = THIS_MODULE;
@@ -2980,7 +2971,7 @@ static void rzg2l_pinctrl_pm_setup_regs(struct rzg2l_pinctrl *pctrl, bool suspen
struct rzg2l_pinctrl_reg_cache *cache = pctrl->cache;
for (u32 port = 0; port < nports; port++) {
- bool has_iolh, has_ien, has_pupd;
+ bool has_iolh, has_ien, has_pupd, has_smt;
u32 off, caps;
u8 pincnt;
u64 cfg;
@@ -2993,6 +2984,7 @@ static void rzg2l_pinctrl_pm_setup_regs(struct rzg2l_pinctrl *pctrl, bool suspen
has_iolh = !!(caps & (PIN_CFG_IOLH_A | PIN_CFG_IOLH_B | PIN_CFG_IOLH_C));
has_ien = !!(caps & PIN_CFG_IEN);
has_pupd = !!(caps & PIN_CFG_PUPD);
+ has_smt = !!(caps & PIN_CFG_SMT);
if (suspend)
RZG2L_PCTRL_REG_ACCESS32(suspend, pctrl->base + PFC(off), cache->pfc[port]);
@@ -3031,6 +3023,9 @@ static void rzg2l_pinctrl_pm_setup_regs(struct rzg2l_pinctrl *pctrl, bool suspen
cache->ien[1][port]);
}
}
+
+ if (has_smt)
+ RZG2L_PCTRL_REG_ACCESS32(suspend, pctrl->base + SMT(off), cache->smt[port]);
}
}
@@ -3164,7 +3159,7 @@ static int rzg2l_pinctrl_suspend_noirq(struct device *dev)
}
cache->qspi = readb(pctrl->base + QSPI);
- cache->eth_mode = readb(pctrl->base + ETH_MODE);
+ cache->oen = readb(pctrl->base + pctrl->data->hwcfg->regs.oen);
if (!atomic_read(&pctrl->wakeup_path))
clk_disable_unprepare(pctrl->clk);
@@ -3180,6 +3175,8 @@ static int rzg2l_pinctrl_resume_noirq(struct device *dev)
const struct rzg2l_hwcfg *hwcfg = pctrl->data->hwcfg;
const struct rzg2l_register_offsets *regs = &hwcfg->regs;
struct rzg2l_pinctrl_reg_cache *cache = pctrl->cache;
+ unsigned long flags;
+ u8 pwpr;
int ret;
if (!atomic_read(&pctrl->wakeup_path)) {
@@ -3189,7 +3186,16 @@ static int rzg2l_pinctrl_resume_noirq(struct device *dev)
}
writeb(cache->qspi, pctrl->base + QSPI);
- writeb(cache->eth_mode, pctrl->base + ETH_MODE);
+ if (pctrl->data->hwcfg->oen_pwpr_lock) {
+ spin_lock_irqsave(&pctrl->lock, flags);
+ pwpr = readb(pctrl->base + regs->pwpr);
+ writeb(pwpr | PWPR_REGWE_B, pctrl->base + regs->pwpr);
+ }
+ writeb(cache->oen, pctrl->base + pctrl->data->hwcfg->regs.oen);
+ if (pctrl->data->hwcfg->oen_pwpr_lock) {
+ writeb(pwpr & ~PWPR_REGWE_B, pctrl->base + regs->pwpr);
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+ }
for (u8 i = 0; i < 2; i++) {
if (regs->sd_ch)
writeb(cache->sd_ch[i], pctrl->base + SD_CH(regs->sd_ch, i));
@@ -3241,6 +3247,7 @@ static const struct rzg2l_hwcfg rzg2l_hwcfg = {
.pwpr = 0x3014,
.sd_ch = 0x3000,
.eth_poc = 0x300c,
+ .oen = 0x3018,
},
.iolh_groupa_ua = {
/* 3v3 power source */
@@ -3256,6 +3263,7 @@ static const struct rzg2l_hwcfg rzg3s_hwcfg = {
.pwpr = 0x3000,
.sd_ch = 0x3004,
.eth_poc = 0x3010,
+ .oen = 0x3018,
},
.iolh_groupa_ua = {
/* 1v8 power source */
@@ -3287,8 +3295,10 @@ static const struct rzg2l_hwcfg rzg3s_hwcfg = {
static const struct rzg2l_hwcfg rzv2h_hwcfg = {
.regs = {
.pwpr = 0x3c04,
+ .oen = 0x3c40,
},
.tint_start_index = 17,
+ .oen_pwpr_lock = true,
};
static struct rzg2l_pinctrl_data r9a07g043_data = {
@@ -3305,8 +3315,7 @@ static struct rzg2l_pinctrl_data r9a07g043_data = {
#endif
.pwpr_pfc_lock_unlock = &rzg2l_pwpr_pfc_lock_unlock,
.pmc_writeb = &rzg2l_pmc_writeb,
- .oen_read = &rzg2l_read_oen,
- .oen_write = &rzg2l_write_oen,
+ .pin_to_oen_bit = &rzg2l_pin_to_oen_bit,
.hw_to_bias_param = &rzg2l_hw_to_bias_param,
.bias_param_to_hw = &rzg2l_bias_param_to_hw,
};
@@ -3322,8 +3331,7 @@ static struct rzg2l_pinctrl_data r9a07g044_data = {
.hwcfg = &rzg2l_hwcfg,
.pwpr_pfc_lock_unlock = &rzg2l_pwpr_pfc_lock_unlock,
.pmc_writeb = &rzg2l_pmc_writeb,
- .oen_read = &rzg2l_read_oen,
- .oen_write = &rzg2l_write_oen,
+ .pin_to_oen_bit = &rzg2l_pin_to_oen_bit,
.hw_to_bias_param = &rzg2l_hw_to_bias_param,
.bias_param_to_hw = &rzg2l_bias_param_to_hw,
};
@@ -3338,8 +3346,7 @@ static struct rzg2l_pinctrl_data r9a08g045_data = {
.hwcfg = &rzg3s_hwcfg,
.pwpr_pfc_lock_unlock = &rzg2l_pwpr_pfc_lock_unlock,
.pmc_writeb = &rzg2l_pmc_writeb,
- .oen_read = &rzg3s_oen_read,
- .oen_write = &rzg3s_oen_write,
+ .pin_to_oen_bit = &rzg3s_pin_to_oen_bit,
.hw_to_bias_param = &rzg2l_hw_to_bias_param,
.bias_param_to_hw = &rzg2l_bias_param_to_hw,
};
@@ -3361,8 +3368,7 @@ static struct rzg2l_pinctrl_data r9a09g047_data = {
#endif
.pwpr_pfc_lock_unlock = &rzv2h_pwpr_pfc_lock_unlock,
.pmc_writeb = &rzv2h_pmc_writeb,
- .oen_read = &rzv2h_oen_read,
- .oen_write = &rzv2h_oen_write,
+ .pin_to_oen_bit = &rzg3e_pin_to_oen_bit,
.hw_to_bias_param = &rzv2h_hw_to_bias_param,
.bias_param_to_hw = &rzv2h_bias_param_to_hw,
};
@@ -3384,8 +3390,7 @@ static struct rzg2l_pinctrl_data r9a09g056_data = {
#endif
.pwpr_pfc_lock_unlock = &rzv2h_pwpr_pfc_lock_unlock,
.pmc_writeb = &rzv2h_pmc_writeb,
- .oen_read = &rzv2h_oen_read,
- .oen_write = &rzv2h_oen_write,
+ .pin_to_oen_bit = &rzv2h_pin_to_oen_bit,
.hw_to_bias_param = &rzv2h_hw_to_bias_param,
.bias_param_to_hw = &rzv2h_bias_param_to_hw,
};
@@ -3408,8 +3413,7 @@ static struct rzg2l_pinctrl_data r9a09g057_data = {
#endif
.pwpr_pfc_lock_unlock = &rzv2h_pwpr_pfc_lock_unlock,
.pmc_writeb = &rzv2h_pmc_writeb,
- .oen_read = &rzv2h_oen_read,
- .oen_write = &rzv2h_oen_write,
+ .pin_to_oen_bit = &rzv2h_pin_to_oen_bit,
.hw_to_bias_param = &rzv2h_hw_to_bias_param,
.bias_param_to_hw = &rzv2h_bias_param_to_hw,
};
diff --git a/drivers/pinctrl/renesas/pinctrl-rzt2h.c b/drivers/pinctrl/renesas/pinctrl-rzt2h.c
new file mode 100644
index 000000000000..3872638f5ebb
--- /dev/null
+++ b/drivers/pinctrl/renesas/pinctrl-rzt2h.c
@@ -0,0 +1,813 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas RZ/T2H Pin Control and GPIO driver core
+ *
+ * Based on drivers/pinctrl/renesas/pinctrl-rzg2l.c
+ *
+ * Copyright (C) 2025 Renesas Electronics Corporation.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/clk.h>
+#include <linux/gpio/driver.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include <linux/pinctrl/consumer.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+
+#include <dt-bindings/pinctrl/renesas,r9a09g077-pinctrl.h>
+
+#include "../core.h"
+#include "../pinconf.h"
+#include "../pinmux.h"
+
+#define DRV_NAME "pinctrl-rzt2h"
+
+#define P(m) (0x001 * (m))
+#define PM(m) (0x200 + 2 * (m))
+#define PMC(m) (0x400 + (m))
+#define PFC(m) (0x600 + 8 * (m))
+#define PIN(m) (0x800 + (m))
+#define RSELP(m) (0xc00 + (m))
+
+#define PM_MASK GENMASK(1, 0)
+#define PM_PIN_MASK(pin) (PM_MASK << ((pin) * 2))
+#define PM_INPUT BIT(0)
+#define PM_OUTPUT BIT(1)
+
+#define PFC_MASK GENMASK_ULL(5, 0)
+#define PFC_PIN_MASK(pin) (PFC_MASK << ((pin) * 8))
+
+/*
+ * Use 16 lower bits [15:0] for pin identifier
+ * Use 8 higher bits [23:16] for pin mux function
+ */
+#define MUX_PIN_ID_MASK GENMASK(15, 0)
+#define MUX_FUNC_MASK GENMASK(23, 16)
+
+#define RZT2H_PIN_ID_TO_PORT(id) ((id) / RZT2H_PINS_PER_PORT)
+#define RZT2H_PIN_ID_TO_PIN(id) ((id) % RZT2H_PINS_PER_PORT)
+
+#define RZT2H_MAX_SAFETY_PORTS 12
+
+struct rzt2h_pinctrl_data {
+ unsigned int n_port_pins;
+ const u8 *port_pin_configs;
+ unsigned int n_ports;
+};
+
+struct rzt2h_pinctrl {
+ struct pinctrl_dev *pctl;
+ struct pinctrl_desc desc;
+ struct pinctrl_pin_desc *pins;
+ const struct rzt2h_pinctrl_data *data;
+ void __iomem *base0, *base1;
+ struct device *dev;
+ struct gpio_chip gpio_chip;
+ struct pinctrl_gpio_range gpio_range;
+ spinlock_t lock; /* lock read/write registers */
+ struct mutex mutex; /* serialize adding groups and functions */
+ bool safety_port_enabled;
+};
+
+#define RZT2H_GET_BASE(pctrl, port) \
+ ((port) > RZT2H_MAX_SAFETY_PORTS ? (pctrl)->base0 : (pctrl)->base1)
+
+#define RZT2H_PINCTRL_REG_ACCESS(size, type) \
+static inline void rzt2h_pinctrl_write##size(struct rzt2h_pinctrl *pctrl, u8 port, \
+ type val, unsigned int offset) \
+{ \
+ write##size(val, RZT2H_GET_BASE(pctrl, port) + offset); \
+} \
+static inline type rzt2h_pinctrl_read##size(struct rzt2h_pinctrl *pctrl, u8 port, \
+ unsigned int offset) \
+{ \
+ return read##size(RZT2H_GET_BASE(pctrl, port) + offset); \
+}
+
+RZT2H_PINCTRL_REG_ACCESS(b, u8)
+RZT2H_PINCTRL_REG_ACCESS(w, u16)
+RZT2H_PINCTRL_REG_ACCESS(q, u64)
+
+static int rzt2h_validate_pin(struct rzt2h_pinctrl *pctrl, unsigned int offset)
+{
+ u8 port = RZT2H_PIN_ID_TO_PORT(offset);
+ u8 pin = RZT2H_PIN_ID_TO_PIN(offset);
+ u8 pincfg;
+
+ if (offset >= pctrl->data->n_port_pins || port >= pctrl->data->n_ports)
+ return -EINVAL;
+
+ if (!pctrl->safety_port_enabled && port <= RZT2H_MAX_SAFETY_PORTS)
+ return -EINVAL;
+
+ pincfg = pctrl->data->port_pin_configs[port];
+ return (pincfg & BIT(pin)) ? 0 : -EINVAL;
+}
+
+static void rzt2h_pinctrl_set_pfc_mode(struct rzt2h_pinctrl *pctrl,
+ u8 port, u8 pin, u8 func)
+{
+ u64 reg64;
+ u16 reg16;
+
+ guard(spinlock_irqsave)(&pctrl->lock);
+
+ /* Set pin to 'Non-use (Hi-Z input protection)' */
+ reg16 = rzt2h_pinctrl_readw(pctrl, port, PM(port));
+ reg16 &= ~PM_PIN_MASK(pin);
+ rzt2h_pinctrl_writew(pctrl, port, reg16, PM(port));
+
+ /* Temporarily switch to GPIO mode with PMC register */
+ reg16 = rzt2h_pinctrl_readb(pctrl, port, PMC(port));
+ rzt2h_pinctrl_writeb(pctrl, port, reg16 & ~BIT(pin), PMC(port));
+
+ /* Select Pin function mode with PFC register */
+ reg64 = rzt2h_pinctrl_readq(pctrl, port, PFC(port));
+ reg64 &= ~PFC_PIN_MASK(pin);
+ rzt2h_pinctrl_writeq(pctrl, port, reg64 | ((u64)func << (pin * 8)), PFC(port));
+
+ /* Switch to Peripheral pin function with PMC register */
+ reg16 = rzt2h_pinctrl_readb(pctrl, port, PMC(port));
+ rzt2h_pinctrl_writeb(pctrl, port, reg16 | BIT(pin), PMC(port));
+};
+
+static int rzt2h_pinctrl_set_mux(struct pinctrl_dev *pctldev,
+ unsigned int func_selector,
+ unsigned int group_selector)
+{
+ struct rzt2h_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ const struct function_desc *func;
+ struct group_desc *group;
+ const unsigned int *pins;
+ unsigned int i;
+ u8 *psel_val;
+ int ret;
+
+ func = pinmux_generic_get_function(pctldev, func_selector);
+ if (!func)
+ return -EINVAL;
+
+ group = pinctrl_generic_get_group(pctldev, group_selector);
+ if (!group)
+ return -EINVAL;
+
+ psel_val = func->data;
+ pins = group->grp.pins;
+
+ for (i = 0; i < group->grp.npins; i++) {
+ dev_dbg(pctrl->dev, "port:%u pin:%u PSEL:%u\n",
+ RZT2H_PIN_ID_TO_PORT(pins[i]), RZT2H_PIN_ID_TO_PIN(pins[i]),
+ psel_val[i]);
+ ret = rzt2h_validate_pin(pctrl, pins[i]);
+ if (ret)
+ return ret;
+
+ rzt2h_pinctrl_set_pfc_mode(pctrl, RZT2H_PIN_ID_TO_PORT(pins[i]),
+ RZT2H_PIN_ID_TO_PIN(pins[i]), psel_val[i]);
+ }
+
+ return 0;
+};
+
+static int rzt2h_map_add_config(struct pinctrl_map *map,
+ const char *group_or_pin,
+ enum pinctrl_map_type type,
+ unsigned long *configs,
+ unsigned int num_configs)
+{
+ unsigned long *cfgs;
+
+ cfgs = kmemdup_array(configs, num_configs, sizeof(*cfgs), GFP_KERNEL);
+ if (!cfgs)
+ return -ENOMEM;
+
+ map->type = type;
+ map->data.configs.group_or_pin = group_or_pin;
+ map->data.configs.configs = cfgs;
+ map->data.configs.num_configs = num_configs;
+
+ return 0;
+}
+
+static int rzt2h_dt_subnode_to_map(struct pinctrl_dev *pctldev,
+ struct device_node *np,
+ struct device_node *parent,
+ struct pinctrl_map **map,
+ unsigned int *num_maps,
+ unsigned int *index)
+{
+ struct rzt2h_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct pinctrl_map *maps = *map;
+ unsigned int nmaps = *num_maps;
+ unsigned long *configs = NULL;
+ unsigned int num_pinmux = 0;
+ unsigned int idx = *index;
+ unsigned int num_pins, i;
+ unsigned int num_configs;
+ struct property *pinmux;
+ struct property *prop;
+ int ret, gsel, fsel;
+ const char **pin_fn;
+ unsigned int *pins;
+ const char *name;
+ const char *pin;
+ u8 *psel_val;
+
+ pinmux = of_find_property(np, "pinmux", NULL);
+ if (pinmux)
+ num_pinmux = pinmux->length / sizeof(u32);
+
+ ret = of_property_count_strings(np, "pins");
+ if (ret == -EINVAL) {
+ num_pins = 0;
+ } else if (ret < 0) {
+ dev_err(pctrl->dev, "Invalid pins list in DT\n");
+ return ret;
+ } else {
+ num_pins = ret;
+ }
+
+ if (!num_pinmux && !num_pins)
+ return 0;
+
+ if (num_pinmux && num_pins) {
+ dev_err(pctrl->dev,
+ "DT node must contain either a pinmux or pins and not both\n");
+ return -EINVAL;
+ }
+
+ ret = pinconf_generic_parse_dt_config(np, pctldev, &configs, &num_configs);
+ if (ret < 0)
+ return ret;
+
+ if (num_pins && !num_configs) {
+ dev_err(pctrl->dev, "DT node must contain a config\n");
+ ret = -ENODEV;
+ goto done;
+ }
+
+ if (num_pinmux) {
+ nmaps += 1;
+ if (num_configs)
+ nmaps += 1;
+ }
+
+ if (num_pins)
+ nmaps += num_pins;
+
+ maps = krealloc_array(maps, nmaps, sizeof(*maps), GFP_KERNEL);
+ if (!maps) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ *map = maps;
+ *num_maps = nmaps;
+ if (num_pins) {
+ of_property_for_each_string(np, "pins", prop, pin) {
+ ret = rzt2h_map_add_config(&maps[idx], pin,
+ PIN_MAP_TYPE_CONFIGS_PIN,
+ configs, num_configs);
+ if (ret < 0)
+ goto done;
+
+ idx++;
+ }
+ ret = 0;
+ goto done;
+ }
+
+ pins = devm_kcalloc(pctrl->dev, num_pinmux, sizeof(*pins), GFP_KERNEL);
+ psel_val = devm_kcalloc(pctrl->dev, num_pinmux, sizeof(*psel_val),
+ GFP_KERNEL);
+ pin_fn = devm_kzalloc(pctrl->dev, sizeof(*pin_fn), GFP_KERNEL);
+ if (!pins || !psel_val || !pin_fn) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ /* Collect pin locations and mux settings from DT properties */
+ for (i = 0; i < num_pinmux; ++i) {
+ u32 value;
+
+ ret = of_property_read_u32_index(np, "pinmux", i, &value);
+ if (ret)
+ goto done;
+ pins[i] = FIELD_GET(MUX_PIN_ID_MASK, value);
+ psel_val[i] = FIELD_GET(MUX_FUNC_MASK, value);
+ }
+
+ if (parent) {
+ name = devm_kasprintf(pctrl->dev, GFP_KERNEL, "%pOFn.%pOFn",
+ parent, np);
+ if (!name) {
+ ret = -ENOMEM;
+ goto done;
+ }
+ } else {
+ name = np->name;
+ }
+
+ if (num_configs) {
+ ret = rzt2h_map_add_config(&maps[idx], name,
+ PIN_MAP_TYPE_CONFIGS_GROUP,
+ configs, num_configs);
+ if (ret < 0)
+ goto done;
+
+ idx++;
+ }
+
+ scoped_guard(mutex, &pctrl->mutex) {
+ /* Register a single pin group listing all the pins we read from DT */
+ gsel = pinctrl_generic_add_group(pctldev, name, pins, num_pinmux, NULL);
+ if (gsel < 0) {
+ ret = gsel;
+ goto done;
+ }
+
+ /*
+ * Register a single group function where the 'data' is an array PSEL
+ * register values read from DT.
+ */
+ pin_fn[0] = name;
+ fsel = pinmux_generic_add_function(pctldev, name, pin_fn, 1, psel_val);
+ if (fsel < 0) {
+ ret = fsel;
+ goto remove_group;
+ }
+ }
+
+ maps[idx].type = PIN_MAP_TYPE_MUX_GROUP;
+ maps[idx].data.mux.group = name;
+ maps[idx].data.mux.function = name;
+ idx++;
+
+ dev_dbg(pctrl->dev, "Parsed %pOF with %d pins\n", np, num_pinmux);
+ ret = 0;
+ goto done;
+
+remove_group:
+ pinctrl_generic_remove_group(pctldev, gsel);
+done:
+ *index = idx;
+ kfree(configs);
+ return ret;
+}
+
+static void rzt2h_dt_free_map(struct pinctrl_dev *pctldev,
+ struct pinctrl_map *map,
+ unsigned int num_maps)
+{
+ unsigned int i;
+
+ if (!map)
+ return;
+
+ for (i = 0; i < num_maps; ++i) {
+ if (map[i].type == PIN_MAP_TYPE_CONFIGS_GROUP ||
+ map[i].type == PIN_MAP_TYPE_CONFIGS_PIN)
+ kfree(map[i].data.configs.configs);
+ }
+ kfree(map);
+}
+
+static int rzt2h_dt_node_to_map(struct pinctrl_dev *pctldev,
+ struct device_node *np,
+ struct pinctrl_map **map,
+ unsigned int *num_maps)
+{
+ struct rzt2h_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ unsigned int index;
+ int ret;
+
+ *map = NULL;
+ *num_maps = 0;
+ index = 0;
+
+ for_each_child_of_node_scoped(np, child) {
+ ret = rzt2h_dt_subnode_to_map(pctldev, child, np, map,
+ num_maps, &index);
+ if (ret < 0)
+ goto done;
+ }
+
+ if (*num_maps == 0) {
+ ret = rzt2h_dt_subnode_to_map(pctldev, np, NULL, map,
+ num_maps, &index);
+ if (ret < 0)
+ goto done;
+ }
+
+ if (*num_maps)
+ return 0;
+
+ dev_err(pctrl->dev, "no mapping found in node %pOF\n", np);
+ ret = -EINVAL;
+
+done:
+ rzt2h_dt_free_map(pctldev, *map, *num_maps);
+ return ret;
+}
+
+static const struct pinctrl_ops rzt2h_pinctrl_pctlops = {
+ .get_groups_count = pinctrl_generic_get_group_count,
+ .get_group_name = pinctrl_generic_get_group_name,
+ .get_group_pins = pinctrl_generic_get_group_pins,
+ .dt_node_to_map = rzt2h_dt_node_to_map,
+ .dt_free_map = rzt2h_dt_free_map,
+};
+
+static const struct pinmux_ops rzt2h_pinctrl_pmxops = {
+ .get_functions_count = pinmux_generic_get_function_count,
+ .get_function_name = pinmux_generic_get_function_name,
+ .get_function_groups = pinmux_generic_get_function_groups,
+ .set_mux = rzt2h_pinctrl_set_mux,
+ .strict = true,
+};
+
+static int rzt2h_gpio_request(struct gpio_chip *chip, unsigned int offset)
+{
+ struct rzt2h_pinctrl *pctrl = gpiochip_get_data(chip);
+ u8 port = RZT2H_PIN_ID_TO_PORT(offset);
+ u8 bit = RZT2H_PIN_ID_TO_PIN(offset);
+ int ret;
+ u8 reg;
+
+ ret = rzt2h_validate_pin(pctrl, offset);
+ if (ret)
+ return ret;
+
+ ret = pinctrl_gpio_request(chip, offset);
+ if (ret)
+ return ret;
+
+ guard(spinlock_irqsave)(&pctrl->lock);
+
+ /* Select GPIO mode in PMC Register */
+ reg = rzt2h_pinctrl_readb(pctrl, port, PMC(port));
+ reg &= ~BIT(bit);
+ rzt2h_pinctrl_writeb(pctrl, port, reg, PMC(port));
+
+ return 0;
+}
+
+static void rzt2h_gpio_set_direction(struct rzt2h_pinctrl *pctrl, u32 port,
+ u8 bit, bool output)
+{
+ u16 reg;
+
+ guard(spinlock_irqsave)(&pctrl->lock);
+
+ reg = rzt2h_pinctrl_readw(pctrl, port, PM(port));
+ reg &= ~PM_PIN_MASK(bit);
+
+ reg |= (output ? PM_OUTPUT : PM_INPUT) << (bit * 2);
+ rzt2h_pinctrl_writew(pctrl, port, reg, PM(port));
+}
+
+static int rzt2h_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
+{
+ struct rzt2h_pinctrl *pctrl = gpiochip_get_data(chip);
+ u8 port = RZT2H_PIN_ID_TO_PORT(offset);
+ u8 bit = RZT2H_PIN_ID_TO_PIN(offset);
+ u16 reg;
+ int ret;
+
+ ret = rzt2h_validate_pin(pctrl, offset);
+ if (ret)
+ return ret;
+
+ if (rzt2h_pinctrl_readb(pctrl, port, PMC(port)) & BIT(bit))
+ return -EINVAL;
+
+ reg = rzt2h_pinctrl_readw(pctrl, port, PM(port));
+ reg = (reg >> (bit * 2)) & PM_MASK;
+ if (reg & PM_OUTPUT)
+ return GPIO_LINE_DIRECTION_OUT;
+ if (reg & PM_INPUT)
+ return GPIO_LINE_DIRECTION_IN;
+
+ return -EINVAL;
+}
+
+static int rzt2h_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ struct rzt2h_pinctrl *pctrl = gpiochip_get_data(chip);
+ u8 port = RZT2H_PIN_ID_TO_PORT(offset);
+ u8 bit = RZT2H_PIN_ID_TO_PIN(offset);
+ u8 reg;
+
+ guard(spinlock_irqsave)(&pctrl->lock);
+
+ reg = rzt2h_pinctrl_readb(pctrl, port, P(port));
+ if (value)
+ rzt2h_pinctrl_writeb(pctrl, port, reg | BIT(bit), P(port));
+ else
+ rzt2h_pinctrl_writeb(pctrl, port, reg & ~BIT(bit), P(port));
+
+ return 0;
+}
+
+static int rzt2h_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct rzt2h_pinctrl *pctrl = gpiochip_get_data(chip);
+ u8 port = RZT2H_PIN_ID_TO_PORT(offset);
+ u8 bit = RZT2H_PIN_ID_TO_PIN(offset);
+ u16 reg;
+
+ reg = rzt2h_pinctrl_readw(pctrl, port, PM(port));
+ reg = (reg >> (bit * 2)) & PM_MASK;
+ if (reg & PM_INPUT)
+ return !!(rzt2h_pinctrl_readb(pctrl, port, PIN(port)) & BIT(bit));
+ if (reg & PM_OUTPUT)
+ return !!(rzt2h_pinctrl_readb(pctrl, port, P(port)) & BIT(bit));
+
+ return -EINVAL;
+}
+
+static int rzt2h_gpio_direction_input(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct rzt2h_pinctrl *pctrl = gpiochip_get_data(chip);
+ u8 port = RZT2H_PIN_ID_TO_PORT(offset);
+ u8 bit = RZT2H_PIN_ID_TO_PIN(offset);
+
+ rzt2h_gpio_set_direction(pctrl, port, bit, false);
+
+ return 0;
+}
+
+static int rzt2h_gpio_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ struct rzt2h_pinctrl *pctrl = gpiochip_get_data(chip);
+ u8 port = RZT2H_PIN_ID_TO_PORT(offset);
+ u8 bit = RZT2H_PIN_ID_TO_PIN(offset);
+
+ rzt2h_gpio_set(chip, offset, value);
+ rzt2h_gpio_set_direction(pctrl, port, bit, true);
+
+ return 0;
+}
+
+static void rzt2h_gpio_free(struct gpio_chip *chip, unsigned int offset)
+{
+ pinctrl_gpio_free(chip, offset);
+
+ /*
+ * Set the GPIO as an input to ensure that the next GPIO request won't
+ * drive the GPIO pin as an output.
+ */
+ rzt2h_gpio_direction_input(chip, offset);
+}
+
+static const char * const rzt2h_gpio_names[] = {
+ "P00_0", "P00_1", "P00_2", "P00_3", "P00_4", "P00_5", "P00_6", "P00_7",
+ "P01_0", "P01_1", "P01_2", "P01_3", "P01_4", "P01_5", "P01_6", "P01_7",
+ "P02_0", "P02_1", "P02_2", "P02_3", "P02_4", "P02_5", "P02_6", "P02_7",
+ "P03_0", "P03_1", "P03_2", "P03_3", "P03_4", "P03_5", "P03_6", "P03_7",
+ "P04_0", "P04_1", "P04_2", "P04_3", "P04_4", "P04_5", "P04_6", "P04_7",
+ "P05_0", "P05_1", "P05_2", "P05_3", "P05_4", "P05_5", "P05_6", "P05_7",
+ "P06_0", "P06_1", "P06_2", "P06_3", "P06_4", "P06_5", "P06_6", "P06_7",
+ "P07_0", "P07_1", "P07_2", "P07_3", "P07_4", "P07_5", "P07_6", "P07_7",
+ "P08_0", "P08_1", "P08_2", "P08_3", "P08_4", "P08_5", "P08_6", "P08_7",
+ "P09_0", "P09_1", "P09_2", "P09_3", "P09_4", "P09_5", "P09_6", "P09_7",
+ "P10_0", "P10_1", "P10_2", "P10_3", "P10_4", "P10_5", "P10_6", "P10_7",
+ "P11_0", "P11_1", "P11_2", "P11_3", "P11_4", "P11_5", "P11_6", "P11_7",
+ "P12_0", "P12_1", "P12_2", "P12_3", "P12_4", "P12_5", "P12_6", "P12_7",
+ "P13_0", "P13_1", "P13_2", "P13_3", "P13_4", "P13_5", "P13_6", "P13_7",
+ "P14_0", "P14_1", "P14_2", "P14_3", "P14_4", "P14_5", "P14_6", "P14_7",
+ "P15_0", "P15_1", "P15_2", "P15_3", "P15_4", "P15_5", "P15_6", "P15_7",
+ "P16_0", "P16_1", "P16_2", "P16_3", "P16_4", "P16_5", "P16_6", "P16_7",
+ "P17_0", "P17_1", "P17_2", "P17_3", "P17_4", "P17_5", "P17_6", "P17_7",
+ "P18_0", "P18_1", "P18_2", "P18_3", "P18_4", "P18_5", "P18_6", "P18_7",
+ "P19_0", "P19_1", "P19_2", "P19_3", "P19_4", "P19_5", "P19_6", "P19_7",
+ "P20_0", "P20_1", "P20_2", "P20_3", "P20_4", "P20_5", "P20_6", "P20_7",
+ "P21_0", "P21_1", "P21_2", "P21_3", "P21_4", "P21_5", "P21_6", "P21_7",
+ "P22_0", "P22_1", "P22_2", "P22_3", "P22_4", "P22_5", "P22_6", "P22_7",
+ "P23_0", "P23_1", "P23_2", "P23_3", "P23_4", "P23_5", "P23_6", "P23_7",
+ "P24_0", "P24_1", "P24_2", "P24_3", "P24_4", "P24_5", "P24_6", "P24_7",
+ "P25_0", "P25_1", "P25_2", "P25_3", "P25_4", "P25_5", "P25_6", "P25_7",
+ "P26_0", "P26_1", "P26_2", "P26_3", "P26_4", "P26_5", "P26_6", "P26_7",
+ "P27_0", "P27_1", "P27_2", "P27_3", "P27_4", "P27_5", "P27_6", "P27_7",
+ "P28_0", "P28_1", "P28_2", "P28_3", "P28_4", "P28_5", "P28_6", "P28_7",
+ "P29_0", "P29_1", "P29_2", "P29_3", "P29_4", "P29_5", "P29_6", "P29_7",
+ "P30_0", "P30_1", "P30_2", "P30_3", "P30_4", "P30_5", "P30_6", "P30_7",
+ "P31_0", "P31_1", "P31_2", "P31_3", "P31_4", "P31_5", "P31_6", "P31_7",
+ "P32_0", "P32_1", "P32_2", "P32_3", "P32_4", "P32_5", "P32_6", "P32_7",
+ "P33_0", "P33_1", "P33_2", "P33_3", "P33_4", "P33_5", "P33_6", "P33_7",
+ "P34_0", "P34_1", "P34_2", "P34_3", "P34_4", "P34_5", "P34_6", "P34_7",
+ "P35_0", "P35_1", "P35_2", "P35_3", "P35_4", "P35_5", "P35_6", "P35_7",
+};
+
+static int rzt2h_gpio_register(struct rzt2h_pinctrl *pctrl)
+{
+ struct pinctrl_gpio_range *range = &pctrl->gpio_range;
+ struct gpio_chip *chip = &pctrl->gpio_chip;
+ struct device *dev = pctrl->dev;
+ struct of_phandle_args of_args;
+ int ret;
+
+ ret = of_parse_phandle_with_fixed_args(dev->of_node, "gpio-ranges", 3, 0, &of_args);
+ if (ret)
+ return dev_err_probe(dev, ret, "Unable to parse gpio-ranges\n");
+
+ if (of_args.args[0] != 0 || of_args.args[1] != 0 ||
+ of_args.args[2] != pctrl->data->n_port_pins)
+ return dev_err_probe(dev, -EINVAL,
+ "gpio-ranges does not match selected SOC\n");
+
+ chip->base = -1;
+ chip->parent = dev;
+ chip->owner = THIS_MODULE;
+ chip->ngpio = of_args.args[2];
+ chip->names = rzt2h_gpio_names;
+ chip->request = rzt2h_gpio_request;
+ chip->free = rzt2h_gpio_free;
+ chip->get_direction = rzt2h_gpio_get_direction;
+ chip->direction_input = rzt2h_gpio_direction_input;
+ chip->direction_output = rzt2h_gpio_direction_output;
+ chip->get = rzt2h_gpio_get;
+ chip->set = rzt2h_gpio_set;
+ chip->label = dev_name(dev);
+
+ range->id = 0;
+ range->pin_base = 0;
+ range->base = 0;
+ range->npins = chip->ngpio;
+ range->name = chip->label;
+ range->gc = chip;
+
+ ret = devm_gpiochip_add_data(dev, chip, pctrl);
+ if (ret)
+ return dev_err_probe(dev, ret, "gpiochip registration failed\n");
+
+ return ret;
+}
+
+static int rzt2h_pinctrl_register(struct rzt2h_pinctrl *pctrl)
+{
+ struct pinctrl_desc *desc = &pctrl->desc;
+ struct device *dev = pctrl->dev;
+ struct pinctrl_pin_desc *pins;
+ unsigned int i, j;
+ int ret;
+
+ desc->name = DRV_NAME;
+ desc->npins = pctrl->data->n_port_pins;
+ desc->pctlops = &rzt2h_pinctrl_pctlops;
+ desc->pmxops = &rzt2h_pinctrl_pmxops;
+ desc->owner = THIS_MODULE;
+
+ pins = devm_kcalloc(dev, desc->npins, sizeof(*pins), GFP_KERNEL);
+ if (!pins)
+ return -ENOMEM;
+
+ pctrl->pins = pins;
+ desc->pins = pins;
+
+ for (i = 0, j = 0; i < pctrl->data->n_port_pins; i++) {
+ pins[i].number = i;
+ pins[i].name = rzt2h_gpio_names[i];
+ if (i && !(i % RZT2H_PINS_PER_PORT))
+ j++;
+ }
+
+ ret = devm_pinctrl_register_and_init(dev, desc, pctrl, &pctrl->pctl);
+ if (ret)
+ return dev_err_probe(dev, ret, "pinctrl registration failed\n");
+
+ ret = pinctrl_enable(pctrl->pctl);
+ if (ret)
+ return dev_err_probe(dev, ret, "pinctrl enable failed\n");
+
+ return rzt2h_gpio_register(pctrl);
+}
+
+static int rzt2h_pinctrl_cfg_regions(struct platform_device *pdev,
+ struct rzt2h_pinctrl *pctrl)
+{
+ struct resource *res;
+
+ pctrl->base0 = devm_platform_ioremap_resource_byname(pdev, "nsr");
+ if (IS_ERR(pctrl->base0))
+ return PTR_ERR(pctrl->base0);
+
+ /*
+ * Open-coded instead of using devm_platform_ioremap_resource_byname()
+ * because the "srs" region is optional.
+ */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "srs");
+ if (res) {
+ u8 port;
+
+ pctrl->base1 = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pctrl->base1))
+ return PTR_ERR(pctrl->base1);
+
+ pctrl->safety_port_enabled = true;
+
+ /* Configure to select safety region 0x812c0xxx */
+ for (port = 0; port <= RZT2H_MAX_SAFETY_PORTS; port++)
+ writeb(0x0, pctrl->base1 + RSELP(port));
+ }
+
+ return 0;
+}
+
+static int rzt2h_pinctrl_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rzt2h_pinctrl *pctrl;
+ int ret;
+
+ pctrl = devm_kzalloc(dev, sizeof(*pctrl), GFP_KERNEL);
+ if (!pctrl)
+ return -ENOMEM;
+
+ pctrl->dev = dev;
+ pctrl->data = of_device_get_match_data(dev);
+
+ ret = rzt2h_pinctrl_cfg_regions(pdev, pctrl);
+ if (ret)
+ return ret;
+
+ spin_lock_init(&pctrl->lock);
+ mutex_init(&pctrl->mutex);
+ platform_set_drvdata(pdev, pctrl);
+
+ return rzt2h_pinctrl_register(pctrl);
+}
+
+static const u8 r9a09g077_gpio_configs[] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f,
+};
+
+static const u8 r9a09g087_gpio_configs[] = {
+ 0x1f, 0xff, 0xff, 0x1f, 0x00, 0xfe, 0xff, 0x00, 0x7e, 0xf0, 0xff, 0x01,
+ 0xff, 0xff, 0xff, 0x00, 0xe0, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x01,
+ 0xe0, 0xff, 0xff, 0x7f, 0x00, 0xfe, 0xff, 0x7f, 0x00, 0xfc, 0x7f,
+};
+
+static struct rzt2h_pinctrl_data r9a09g077_data = {
+ .n_port_pins = ARRAY_SIZE(r9a09g077_gpio_configs) * RZT2H_PINS_PER_PORT,
+ .port_pin_configs = r9a09g077_gpio_configs,
+ .n_ports = ARRAY_SIZE(r9a09g077_gpio_configs),
+};
+
+static struct rzt2h_pinctrl_data r9a09g087_data = {
+ .n_port_pins = ARRAY_SIZE(r9a09g087_gpio_configs) * RZT2H_PINS_PER_PORT,
+ .port_pin_configs = r9a09g087_gpio_configs,
+ .n_ports = ARRAY_SIZE(r9a09g087_gpio_configs),
+};
+
+static const struct of_device_id rzt2h_pinctrl_of_table[] = {
+ {
+ .compatible = "renesas,r9a09g077-pinctrl",
+ .data = &r9a09g077_data,
+ },
+ {
+ .compatible = "renesas,r9a09g087-pinctrl",
+ .data = &r9a09g087_data,
+ },
+ { /* sentinel */ }
+};
+
+static struct platform_driver rzt2h_pinctrl_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = of_match_ptr(rzt2h_pinctrl_of_table),
+ .suppress_bind_attrs = true,
+ },
+ .probe = rzt2h_pinctrl_probe,
+};
+
+static int __init rzt2h_pinctrl_init(void)
+{
+ return platform_driver_register(&rzt2h_pinctrl_driver);
+}
+core_initcall(rzt2h_pinctrl_init);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Thierry Bultel <thierry.bultel.yh@bp.renesas.com>");
+MODULE_AUTHOR("Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>");
+MODULE_DESCRIPTION("Pin and gpio controller driver for the RZ/T2H family");
diff --git a/drivers/pinctrl/renesas/pinctrl-rzv2m.c b/drivers/pinctrl/renesas/pinctrl-rzv2m.c
index a17b68b4c466..dce68f93d2d5 100644
--- a/drivers/pinctrl/renesas/pinctrl-rzv2m.c
+++ b/drivers/pinctrl/renesas/pinctrl-rzv2m.c
@@ -162,7 +162,7 @@ static int rzv2m_pinctrl_set_mux(struct pinctrl_dev *pctldev,
unsigned int group_selector)
{
struct rzv2m_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
- struct function_desc *func;
+ const struct function_desc *func;
unsigned int i, *psel_val;
struct group_desc *group;
const unsigned int *pins;
@@ -957,7 +957,7 @@ static int rzv2m_gpio_register(struct rzv2m_pinctrl *pctrl)
chip->direction_input = rzv2m_gpio_direction_input;
chip->direction_output = rzv2m_gpio_direction_output;
chip->get = rzv2m_gpio_get;
- chip->set_rv = rzv2m_gpio_set;
+ chip->set = rzv2m_gpio_set;
chip->label = name;
chip->parent = pctrl->dev;
chip->owner = THIS_MODULE;
diff --git a/drivers/pinctrl/renesas/pinctrl.c b/drivers/pinctrl/renesas/pinctrl.c
index 29d16c9c1bd1..3a742f74ecd1 100644
--- a/drivers/pinctrl/renesas/pinctrl.c
+++ b/drivers/pinctrl/renesas/pinctrl.c
@@ -726,7 +726,8 @@ static int sh_pfc_pinconf_group_set(struct pinctrl_dev *pctldev, unsigned group,
struct sh_pfc_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
const unsigned int *pins;
unsigned int num_pins;
- unsigned int i, ret;
+ unsigned int i;
+ int ret;
pins = pmx->pfc->info->groups[group].pins;
num_pins = pmx->pfc->info->groups[group].nr_pins;
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c b/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
index 5fe7c4b9f7bd..323487dfa8c2 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
@@ -76,6 +76,15 @@ static const struct samsung_pin_bank_type exynos8895_bank_type_off = {
.reg_offset = { 0x00, 0x04, 0x08, 0x0c, 0x10, 0x14, },
};
+/*
+ * Bank type for non-alive type. Bit fields:
+ * CON: 4, DAT: 1, PUD: 4, DRV: 4
+ */
+static const struct samsung_pin_bank_type artpec_bank_type_off = {
+ .fld_width = { 4, 1, 4, 4, },
+ .reg_offset = { 0x00, 0x04, 0x08, 0x0c, },
+};
+
/* Pad retention control code for accessing PMU regmap */
static atomic_t exynos_shared_retention_refcnt;
@@ -1816,3 +1825,44 @@ const struct samsung_pinctrl_of_match_data gs101_of_data __initconst = {
.ctrl = gs101_pin_ctrl,
.num_ctrl = ARRAY_SIZE(gs101_pin_ctrl),
};
+
+/* pin banks of artpec8 pin-controller (FSYS0) */
+static const struct samsung_pin_bank_data artpec8_pin_banks0[] __initconst = {
+ ARTPEC_PIN_BANK_EINTG(5, 0x000, "gpf0", 0x00),
+ ARTPEC_PIN_BANK_EINTG(4, 0x020, "gpf1", 0x04),
+ ARTPEC_PIN_BANK_EINTG(8, 0x040, "gpf2", 0x08),
+ ARTPEC_PIN_BANK_EINTG(4, 0x060, "gpf3", 0x0c),
+ ARTPEC_PIN_BANK_EINTG(7, 0x080, "gpf4", 0x10),
+ ARTPEC_PIN_BANK_EINTG(8, 0x0a0, "gpe0", 0x14),
+ ARTPEC_PIN_BANK_EINTG(8, 0x0c0, "gpe1", 0x18),
+ ARTPEC_PIN_BANK_EINTG(6, 0x0e0, "gpe2", 0x1c),
+ ARTPEC_PIN_BANK_EINTG(8, 0x100, "gps0", 0x20),
+ ARTPEC_PIN_BANK_EINTG(8, 0x120, "gps1", 0x24),
+};
+
+/* pin banks of artpec8 pin-controller (PERIC) */
+static const struct samsung_pin_bank_data artpec8_pin_banks1[] __initconst = {
+ ARTPEC_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
+ ARTPEC_PIN_BANK_EINTG(8, 0x020, "gpa1", 0x04),
+ ARTPEC_PIN_BANK_EINTG(8, 0x040, "gpa2", 0x08),
+ ARTPEC_PIN_BANK_EINTG(2, 0x060, "gpk0", 0x0c),
+};
+
+static const struct samsung_pin_ctrl artpec8_pin_ctrl[] __initconst = {
+ {
+ /* pin-controller instance 0 FSYS data */
+ .pin_banks = artpec8_pin_banks0,
+ .nr_banks = ARRAY_SIZE(artpec8_pin_banks0),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ }, {
+ /* pin-controller instance 1 PERIC data */
+ .pin_banks = artpec8_pin_banks1,
+ .nr_banks = ARRAY_SIZE(artpec8_pin_banks1),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ },
+};
+
+const struct samsung_pinctrl_of_match_data artpec8_of_data __initconst = {
+ .ctrl = artpec8_pin_ctrl,
+ .num_ctrl = ARRAY_SIZE(artpec8_pin_ctrl),
+};
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.h b/drivers/pinctrl/samsung/pinctrl-exynos.h
index 362dc533186f..c9c38f8988dd 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.h
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.h
@@ -236,6 +236,16 @@
.name = id \
}
+#define ARTPEC_PIN_BANK_EINTG(pins, reg, id, offs) \
+ { \
+ .type = &artpec_bank_type_off, \
+ .pctl_offset = reg, \
+ .nr_pins = pins, \
+ .eint_type = EINT_TYPE_GPIO, \
+ .eint_offset = offs, \
+ .name = id \
+ }
+
/**
* struct exynos_weint_data: irq specific data for all the wakeup interrupts
* generated by the external wakeup interrupt controller.
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index fe1ac82b9d79..c099195fc464 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -1067,7 +1067,7 @@ static int samsung_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
static const struct gpio_chip samsung_gpiolib_chip = {
.request = gpiochip_generic_request,
.free = gpiochip_generic_free,
- .set_rv = samsung_gpio_set,
+ .set = samsung_gpio_set,
.get = samsung_gpio_get,
.direction_input = samsung_gpio_direction_input,
.direction_output = samsung_gpio_direction_output,
@@ -1482,6 +1482,8 @@ static const struct of_device_id samsung_pinctrl_dt_match[] = {
.data = &s5pv210_of_data },
#endif
#ifdef CONFIG_PINCTRL_EXYNOS_ARM64
+ { .compatible = "axis,artpec8-pinctrl",
+ .data = &artpec8_of_data },
{ .compatible = "google,gs101-pinctrl",
.data = &gs101_of_data },
{ .compatible = "samsung,exynos2200-pinctrl",
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.h b/drivers/pinctrl/samsung/pinctrl-samsung.h
index 1cabcbe1401a..3e8ef91d94a3 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.h
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.h
@@ -381,6 +381,7 @@ struct samsung_pmx_func {
};
/* list of all exported SoC specific data */
+extern const struct samsung_pinctrl_of_match_data artpec8_of_data;
extern const struct samsung_pinctrl_of_match_data exynos2200_of_data;
extern const struct samsung_pinctrl_of_match_data exynos3250_of_data;
extern const struct samsung_pinctrl_of_match_data exynos4210_of_data;
@@ -402,10 +403,6 @@ extern const struct samsung_pinctrl_of_match_data exynosautov920_of_data;
extern const struct samsung_pinctrl_of_match_data fsd_of_data;
extern const struct samsung_pinctrl_of_match_data gs101_of_data;
extern const struct samsung_pinctrl_of_match_data s3c64xx_of_data;
-extern const struct samsung_pinctrl_of_match_data s3c2412_of_data;
-extern const struct samsung_pinctrl_of_match_data s3c2416_of_data;
-extern const struct samsung_pinctrl_of_match_data s3c2440_of_data;
-extern const struct samsung_pinctrl_of_match_data s3c2450_of_data;
extern const struct samsung_pinctrl_of_match_data s5pv210_of_data;
#endif /* __PINCTRL_SAMSUNG_H */
diff --git a/drivers/pinctrl/spacemit/pinctrl-k1.c b/drivers/pinctrl/spacemit/pinctrl-k1.c
index 9996b1c4a07e..33af9b5791c1 100644
--- a/drivers/pinctrl/spacemit/pinctrl-k1.c
+++ b/drivers/pinctrl/spacemit/pinctrl-k1.c
@@ -707,7 +707,7 @@ static void spacemit_pinconf_dbg_show(struct pinctrl_dev *pctldev,
spacemit_get_drive_strength_mA(IO_TYPE_1V8, tmp),
spacemit_get_drive_strength_mA(IO_TYPE_3V3, tmp));
- seq_printf(seq, ", register (0x%04x)\n", value);
+ seq_printf(seq, ", register (0x%04x)", value);
}
static const struct pinconf_ops spacemit_pinconf_ops = {
@@ -847,7 +847,7 @@ static const struct pinctrl_pin_desc k1_pin_desc[] = {
PINCTRL_PIN(67, "GPIO_67"),
PINCTRL_PIN(68, "GPIO_68"),
PINCTRL_PIN(69, "GPIO_69"),
- PINCTRL_PIN(70, "GPIO_70/PRI_DTI"),
+ PINCTRL_PIN(70, "GPIO_70/PRI_TDI"),
PINCTRL_PIN(71, "GPIO_71/PRI_TMS"),
PINCTRL_PIN(72, "GPIO_72/PRI_TCK"),
PINCTRL_PIN(73, "GPIO_73/PRI_TDO"),
diff --git a/drivers/pinctrl/spear/pinctrl-plgpio.c b/drivers/pinctrl/spear/pinctrl-plgpio.c
index e8234d2156da..1ec22010a3f9 100644
--- a/drivers/pinctrl/spear/pinctrl-plgpio.c
+++ b/drivers/pinctrl/spear/pinctrl-plgpio.c
@@ -582,7 +582,7 @@ static int plgpio_probe(struct platform_device *pdev)
plgpio->chip.direction_input = plgpio_direction_input;
plgpio->chip.direction_output = plgpio_direction_output;
plgpio->chip.get = plgpio_get_value;
- plgpio->chip.set_rv = plgpio_set_value;
+ plgpio->chip.set = plgpio_set_value;
plgpio->chip.label = dev_name(&pdev->dev);
plgpio->chip.parent = &pdev->dev;
plgpio->chip.owner = THIS_MODULE;
diff --git a/drivers/pinctrl/sprd/pinctrl-sprd.c b/drivers/pinctrl/sprd/pinctrl-sprd.c
index c4a1d99dfed0..16cf9d15f247 100644
--- a/drivers/pinctrl/sprd/pinctrl-sprd.c
+++ b/drivers/pinctrl/sprd/pinctrl-sprd.c
@@ -258,8 +258,7 @@ static int sprd_dt_node_to_map(struct pinctrl_dev *pctldev,
grp = sprd_pinctrl_find_group_by_name(pctl, np->name);
if (!grp) {
- dev_err(pctl->dev, "unable to find group for node %s\n",
- of_node_full_name(np));
+ dev_err(pctl->dev, "unable to find group for node %pOF\n", np);
return -EINVAL;
}
@@ -276,16 +275,14 @@ static int sprd_dt_node_to_map(struct pinctrl_dev *pctldev,
if (ret < 0) {
if (ret != -EINVAL)
dev_err(pctl->dev,
- "%s: could not parse property function\n",
- of_node_full_name(np));
+ "%pOF: could not parse property function\n", np);
function = NULL;
}
ret = pinconf_generic_parse_dt_config(np, pctldev, &configs,
&num_configs);
if (ret < 0) {
- dev_err(pctl->dev, "%s: could not parse node property\n",
- of_node_full_name(np));
+ dev_err(pctl->dev, "%pOF: could not parse node property\n", np);
return ret;
}
diff --git a/drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c b/drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c
index b729ca4de422..7fa13f282b85 100644
--- a/drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c
+++ b/drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c
@@ -1302,7 +1302,7 @@ static int starfive_probe(struct platform_device *pdev)
sfp->gc.direction_input = starfive_gpio_direction_input;
sfp->gc.direction_output = starfive_gpio_direction_output;
sfp->gc.get = starfive_gpio_get;
- sfp->gc.set_rv = starfive_gpio_set;
+ sfp->gc.set = starfive_gpio_set;
sfp->gc.set_config = starfive_gpio_set_config;
sfp->gc.add_pin_ranges = starfive_gpio_add_pin_ranges;
sfp->gc.base = -1;
diff --git a/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c b/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c
index 082bb1c6cea9..05e3af75b09f 100644
--- a/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c
+++ b/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c
@@ -935,7 +935,7 @@ int jh7110_pinctrl_probe(struct platform_device *pdev)
sfp->gc.direction_input = jh7110_gpio_direction_input;
sfp->gc.direction_output = jh7110_gpio_direction_output;
sfp->gc.get = jh7110_gpio_get;
- sfp->gc.set_rv = jh7110_gpio_set;
+ sfp->gc.set = jh7110_gpio_set;
sfp->gc.set_config = jh7110_gpio_set_config;
sfp->gc.add_pin_ranges = jh7110_gpio_add_pin_ranges;
sfp->gc.base = info->gc_base;
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32-hdp.c b/drivers/pinctrl/stm32/pinctrl-stm32-hdp.c
index e91442eb566b..0b1dff01e04c 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32-hdp.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32-hdp.c
@@ -6,6 +6,7 @@
#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -45,7 +46,7 @@ struct stm32_hdp {
void __iomem *base;
struct clk *clk;
struct pinctrl_dev *pctl_dev;
- struct gpio_chip gpio_chip;
+ struct gpio_generic_chip gpio_chip;
u32 mux_conf;
u32 gposet_conf;
const char * const *func_name;
@@ -575,7 +576,7 @@ static const struct pinmux_ops stm32_hdp_pinmux_ops = {
.gpio_set_direction = NULL,
};
-static struct pinctrl_desc stm32_hdp_pdesc = {
+static const struct pinctrl_desc stm32_hdp_pdesc = {
.name = DRIVER_NAME,
.pins = stm32_hdp_pins,
.npins = ARRAY_SIZE(stm32_hdp_pins),
@@ -603,6 +604,7 @@ MODULE_DEVICE_TABLE(of, stm32_hdp_of_match);
static int stm32_hdp_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct device *dev = &pdev->dev;
struct stm32_hdp *hdp;
u8 version;
@@ -635,21 +637,25 @@ static int stm32_hdp_probe(struct platform_device *pdev)
if (err)
return dev_err_probe(dev, err, "Failed to enable pinctrl\n");
- hdp->gpio_chip.get_direction = stm32_hdp_gpio_get_direction;
- hdp->gpio_chip.ngpio = ARRAY_SIZE(stm32_hdp_pins);
- hdp->gpio_chip.can_sleep = true;
- hdp->gpio_chip.names = stm32_hdp_pins_group;
-
- err = bgpio_init(&hdp->gpio_chip, dev, 4,
- hdp->base + HDP_GPOVAL,
- hdp->base + HDP_GPOSET,
- hdp->base + HDP_GPOCLR,
- NULL, NULL, BGPIOF_NO_INPUT);
+ hdp->gpio_chip.gc.get_direction = stm32_hdp_gpio_get_direction;
+ hdp->gpio_chip.gc.ngpio = ARRAY_SIZE(stm32_hdp_pins);
+ hdp->gpio_chip.gc.can_sleep = true;
+ hdp->gpio_chip.gc.names = stm32_hdp_pins_group;
+
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = hdp->base + HDP_GPOVAL,
+ .set = hdp->base + HDP_GPOSET,
+ .clr = hdp->base + HDP_GPOCLR,
+ .flags = GPIO_GENERIC_NO_INPUT,
+ };
+
+ err = gpio_generic_chip_init(&hdp->gpio_chip, &config);
if (err)
- return dev_err_probe(dev, err, "Failed to init bgpio\n");
-
+ return dev_err_probe(dev, err, "Failed to init the generic GPIO chip\n");
- err = devm_gpiochip_add_data(dev, &hdp->gpio_chip, hdp);
+ err = devm_gpiochip_add_data(dev, &hdp->gpio_chip.gc, hdp);
if (err)
return dev_err_probe(dev, err, "Failed to add gpiochip\n");
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index f47c4e6f12b4..3ebb468de830 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -433,7 +433,7 @@ static const struct gpio_chip stm32_gpio_template = {
.request = stm32_gpio_request,
.free = stm32_gpio_free,
.get = stm32_gpio_get,
- .set_rv = stm32_gpio_set,
+ .set = stm32_gpio_set,
.direction_input = pinctrl_gpio_direction_input,
.direction_output = stm32_gpio_direction_output,
.to_irq = stm32_gpio_to_irq,
@@ -1236,7 +1236,7 @@ static int stm32_pconf_parse_conf(struct pinctrl_dev *pctldev,
case PIN_CONFIG_BIAS_PULL_DOWN:
ret = stm32_pconf_set_bias(bank, offset, 2);
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
__stm32_gpio_set(bank, offset, arg);
ret = stm32_pmx_gpio_set_direction(pctldev, range, pin, false);
break;
diff --git a/drivers/pinctrl/sunplus/sppctl.c b/drivers/pinctrl/sunplus/sppctl.c
index 3c3357f80889..fabe7efaa837 100644
--- a/drivers/pinctrl/sunplus/sppctl.c
+++ b/drivers/pinctrl/sunplus/sppctl.c
@@ -488,7 +488,7 @@ static int sppctl_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
case PIN_CONFIG_INPUT_ENABLE:
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
return sppctl_gpio_direction_output(chip, offset, 0);
case PIN_CONFIG_PERSIST_STATE:
@@ -547,7 +547,7 @@ static int sppctl_gpio_new(struct platform_device *pdev, struct sppctl_pdata *pc
gchip->direction_input = sppctl_gpio_direction_input;
gchip->direction_output = sppctl_gpio_direction_output;
gchip->get = sppctl_gpio_get;
- gchip->set_rv = sppctl_gpio_set;
+ gchip->set = sppctl_gpio_set;
gchip->set_config = sppctl_gpio_set_config;
gchip->dbg_show = IS_ENABLED(CONFIG_DEBUG_FS) ?
sppctl_gpio_dbg_show : NULL;
@@ -580,7 +580,7 @@ static int sppctl_pin_config_get(struct pinctrl_dev *pctldev, unsigned int pin,
arg = 0;
break;
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
if (!sppctl_first_get(&pctl->spp_gchip->chip, pin))
return -EINVAL;
if (!sppctl_master_get(&pctl->spp_gchip->chip, pin))
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi-dt.c b/drivers/pinctrl/sunxi/pinctrl-sunxi-dt.c
index 4e34b0cd3b73..50a16f3bd131 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi-dt.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi-dt.c
@@ -103,7 +103,7 @@ static struct sunxi_desc_pin *init_pins_table(struct device *dev,
return ERR_PTR(-EINVAL);
}
- pins = devm_kzalloc(dev, desc->npins * sizeof(*pins), GFP_KERNEL);
+ pins = devm_kcalloc(dev, desc->npins, sizeof(*pins), GFP_KERNEL);
if (!pins)
return ERR_PTR(-ENOMEM);
@@ -199,7 +199,7 @@ static int prepare_function_table(struct device *dev, struct device_node *pnode,
* Allocate the memory needed for the functions in one table.
* We later use pointers into this table to mark each pin.
*/
- func = devm_kzalloc(dev, num_funcs * sizeof(*func), GFP_KERNEL);
+ func = devm_kcalloc(dev, num_funcs, sizeof(*func), GFP_KERNEL);
if (!func)
return -ENOMEM;
@@ -274,8 +274,7 @@ static void fill_pin_function(struct device *dev, struct device_node *node,
if (!strcmp(pins[pin].pin.name, name))
break;
if (pin == npins) {
- dev_warn(dev, "%s: cannot find pin %s\n",
- of_node_full_name(node), name);
+ dev_warn(dev, "%pOF: cannot find pin %s\n", node, name);
index++;
continue;
}
@@ -283,8 +282,8 @@ static void fill_pin_function(struct device *dev, struct device_node *node,
/* Read the associated mux value. */
muxval = sunxi_pinctrl_dt_read_pinmux(node, index);
if (muxval == INVALID_MUX) {
- dev_warn(dev, "%s: invalid mux value for pin %s\n",
- of_node_full_name(node), name);
+ dev_warn(dev, "%pOF: invalid mux value for pin %s\n",
+ node, name);
index++;
continue;
}
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index 0db8429a013f..0fb057a07dcc 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -1604,7 +1604,7 @@ int sunxi_pinctrl_init_with_flags(struct platform_device *pdev,
pctl->chip->direction_input = sunxi_pinctrl_gpio_direction_input;
pctl->chip->direction_output = sunxi_pinctrl_gpio_direction_output;
pctl->chip->get = sunxi_pinctrl_gpio_get;
- pctl->chip->set_rv = sunxi_pinctrl_gpio_set;
+ pctl->chip->set = sunxi_pinctrl_gpio_set;
pctl->chip->of_xlate = sunxi_pinctrl_gpio_of_xlate;
pctl->chip->to_irq = sunxi_pinctrl_gpio_to_irq;
pctl->chip->of_gpio_n_cells = 3;
diff --git a/drivers/pinctrl/tegra/Kconfig b/drivers/pinctrl/tegra/Kconfig
index 4e87d19323ba..660d101ea367 100644
--- a/drivers/pinctrl/tegra/Kconfig
+++ b/drivers/pinctrl/tegra/Kconfig
@@ -24,6 +24,10 @@ config PINCTRL_TEGRA210
bool
select PINCTRL_TEGRA
+config PINCTRL_TEGRA186
+ bool
+ select PINCTRL_TEGRA
+
config PINCTRL_TEGRA194
bool
select PINCTRL_TEGRA
diff --git a/drivers/pinctrl/tegra/Makefile b/drivers/pinctrl/tegra/Makefile
index a93973701d4c..82176526549e 100644
--- a/drivers/pinctrl/tegra/Makefile
+++ b/drivers/pinctrl/tegra/Makefile
@@ -5,6 +5,7 @@ obj-$(CONFIG_PINCTRL_TEGRA30) += pinctrl-tegra30.o
obj-$(CONFIG_PINCTRL_TEGRA114) += pinctrl-tegra114.o
obj-$(CONFIG_PINCTRL_TEGRA124) += pinctrl-tegra124.o
obj-$(CONFIG_PINCTRL_TEGRA210) += pinctrl-tegra210.o
+obj-$(CONFIG_PINCTRL_TEGRA186) += pinctrl-tegra186.o
obj-$(CONFIG_PINCTRL_TEGRA194) += pinctrl-tegra194.o
obj-$(CONFIG_PINCTRL_TEGRA234) += pinctrl-tegra234.o
obj-$(CONFIG_PINCTRL_TEGRA_XUSB) += pinctrl-tegra-xusb.o
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra186.c b/drivers/pinctrl/tegra/pinctrl-tegra186.c
new file mode 100644
index 000000000000..4a1d6476af9b
--- /dev/null
+++ b/drivers/pinctrl/tegra/pinctrl-tegra186.c
@@ -0,0 +1,1979 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Pinctrl data for the NVIDIA Tegra186 pinmux
+ *
+ * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+
+#include "pinctrl-tegra.h"
+
+/* Define unique ID for each pins */
+enum {
+ TEGRA_PIN_PEX_L0_RST_N_PA0,
+ TEGRA_PIN_PEX_L0_CLKREQ_N_PA1,
+ TEGRA_PIN_PEX_WAKE_N_PA2,
+ TEGRA_PIN_PEX_L1_RST_N_PA3,
+ TEGRA_PIN_PEX_L1_CLKREQ_N_PA4,
+ TEGRA_PIN_PEX_L2_RST_N_PA5,
+ TEGRA_PIN_PEX_L2_CLKREQ_N_PA6,
+ TEGRA_PIN_UART4_TX_PB0,
+ TEGRA_PIN_UART4_RX_PB1,
+ TEGRA_PIN_UART4_RTS_PB2,
+ TEGRA_PIN_UART4_CTS_PB3,
+ TEGRA_PIN_GPIO_WAN1_PB4,
+ TEGRA_PIN_GPIO_WAN2_PB5,
+ TEGRA_PIN_GPIO_WAN3_PB6,
+ TEGRA_PIN_GPIO_WAN4_PC0,
+ TEGRA_PIN_DAP2_SCLK_PC1,
+ TEGRA_PIN_DAP2_DOUT_PC2,
+ TEGRA_PIN_DAP2_DIN_PC3,
+ TEGRA_PIN_DAP2_FS_PC4,
+ TEGRA_PIN_GEN1_I2C_SCL_PC5,
+ TEGRA_PIN_GEN1_I2C_SDA_PC6,
+ TEGRA_PIN_SDMMC1_CLK_PD0,
+ TEGRA_PIN_SDMMC1_CMD_PD1,
+ TEGRA_PIN_SDMMC1_DAT0_PD2,
+ TEGRA_PIN_SDMMC1_DAT1_PD3,
+ TEGRA_PIN_SDMMC1_DAT2_PD4,
+ TEGRA_PIN_SDMMC1_DAT3_PD5,
+ TEGRA_PIN_EQOS_TXC_PE0,
+ TEGRA_PIN_EQOS_TD0_PE1,
+ TEGRA_PIN_EQOS_TD1_PE2,
+ TEGRA_PIN_EQOS_TD2_PE3,
+ TEGRA_PIN_EQOS_TD3_PE4,
+ TEGRA_PIN_EQOS_TX_CTL_PE5,
+ TEGRA_PIN_EQOS_RD0_PE6,
+ TEGRA_PIN_EQOS_RD1_PE7,
+ TEGRA_PIN_EQOS_RD2_PF0,
+ TEGRA_PIN_EQOS_RD3_PF1,
+ TEGRA_PIN_EQOS_RX_CTL_PF2,
+ TEGRA_PIN_EQOS_RXC_PF3,
+ TEGRA_PIN_EQOS_MDIO_PF4,
+ TEGRA_PIN_EQOS_MDC_PF5,
+ TEGRA_PIN_SDMMC3_CLK_PG0,
+ TEGRA_PIN_SDMMC3_CMD_PG1,
+ TEGRA_PIN_SDMMC3_DAT0_PG2,
+ TEGRA_PIN_SDMMC3_DAT1_PG3,
+ TEGRA_PIN_SDMMC3_DAT2_PG4,
+ TEGRA_PIN_SDMMC3_DAT3_PG5,
+ TEGRA_PIN_GPIO_WAN5_PH0,
+ TEGRA_PIN_GPIO_WAN6_PH1,
+ TEGRA_PIN_GPIO_WAN7_PH2,
+ TEGRA_PIN_GPIO_WAN8_PH3,
+ TEGRA_PIN_BCPU_PWR_REQ_PH4,
+ TEGRA_PIN_MCPU_PWR_REQ_PH5,
+ TEGRA_PIN_GPU_PWR_REQ_PH6,
+ TEGRA_PIN_GPIO_PQ0_PI0,
+ TEGRA_PIN_GPIO_PQ1_PI1,
+ TEGRA_PIN_GPIO_PQ2_PI2,
+ TEGRA_PIN_GPIO_PQ3_PI3,
+ TEGRA_PIN_GPIO_PQ4_PI4,
+ TEGRA_PIN_GPIO_PQ5_PI5,
+ TEGRA_PIN_GPIO_PQ6_PI6,
+ TEGRA_PIN_GPIO_PQ7_PI7,
+ TEGRA_PIN_DAP1_SCLK_PJ0,
+ TEGRA_PIN_DAP1_DOUT_PJ1,
+ TEGRA_PIN_DAP1_DIN_PJ2,
+ TEGRA_PIN_DAP1_FS_PJ3,
+ TEGRA_PIN_AUD_MCLK_PJ4,
+ TEGRA_PIN_GPIO_AUD0_PJ5,
+ TEGRA_PIN_GPIO_AUD1_PJ6,
+ TEGRA_PIN_GPIO_AUD2_PJ7,
+ TEGRA_PIN_GPIO_AUD3_PK0,
+ TEGRA_PIN_GEN7_I2C_SCL_PL0,
+ TEGRA_PIN_GEN7_I2C_SDA_PL1,
+ TEGRA_PIN_GEN9_I2C_SCL_PL2,
+ TEGRA_PIN_GEN9_I2C_SDA_PL3,
+ TEGRA_PIN_USB_VBUS_EN0_PL4,
+ TEGRA_PIN_USB_VBUS_EN1_PL5,
+ TEGRA_PIN_GP_PWM6_PL6,
+ TEGRA_PIN_GP_PWM7_PL7,
+ TEGRA_PIN_DMIC1_DAT_PM0,
+ TEGRA_PIN_DMIC1_CLK_PM1,
+ TEGRA_PIN_DMIC2_DAT_PM2,
+ TEGRA_PIN_DMIC2_CLK_PM3,
+ TEGRA_PIN_DMIC4_DAT_PM4,
+ TEGRA_PIN_DMIC4_CLK_PM5,
+ TEGRA_PIN_GPIO_CAM1_PN0,
+ TEGRA_PIN_GPIO_CAM2_PN1,
+ TEGRA_PIN_GPIO_CAM3_PN2,
+ TEGRA_PIN_GPIO_CAM4_PN3,
+ TEGRA_PIN_GPIO_CAM5_PN4,
+ TEGRA_PIN_GPIO_CAM6_PN5,
+ TEGRA_PIN_GPIO_CAM7_PN6,
+ TEGRA_PIN_EXTPERIPH1_CLK_PO0,
+ TEGRA_PIN_EXTPERIPH2_CLK_PO1,
+ TEGRA_PIN_CAM_I2C_SCL_PO2,
+ TEGRA_PIN_CAM_I2C_SDA_PO3,
+ TEGRA_PIN_DP_AUX_CH0_HPD_PP0,
+ TEGRA_PIN_DP_AUX_CH1_HPD_PP1,
+ TEGRA_PIN_HDMI_CEC_PP2,
+ TEGRA_PIN_GPIO_EDP0_PP3,
+ TEGRA_PIN_GPIO_EDP1_PP4,
+ TEGRA_PIN_GPIO_EDP2_PP5,
+ TEGRA_PIN_GPIO_EDP3_PP6,
+ TEGRA_PIN_DIRECTDC1_CLK_PQ0,
+ TEGRA_PIN_DIRECTDC1_IN_PQ1,
+ TEGRA_PIN_DIRECTDC1_OUT0_PQ2,
+ TEGRA_PIN_DIRECTDC1_OUT1_PQ3,
+ TEGRA_PIN_DIRECTDC1_OUT2_PQ4,
+ TEGRA_PIN_DIRECTDC1_OUT3_PQ5,
+ TEGRA_PIN_QSPI_SCK_PR0,
+ TEGRA_PIN_QSPI_IO0_PR1,
+ TEGRA_PIN_QSPI_IO1_PR2,
+ TEGRA_PIN_QSPI_IO2_PR3,
+ TEGRA_PIN_QSPI_IO3_PR4,
+ TEGRA_PIN_QSPI_CS_N_PR5,
+ TEGRA_PIN_UART1_TX_PT0,
+ TEGRA_PIN_UART1_RX_PT1,
+ TEGRA_PIN_UART1_RTS_PT2,
+ TEGRA_PIN_UART1_CTS_PT3,
+ TEGRA_PIN_UART2_TX_PX0,
+ TEGRA_PIN_UART2_RX_PX1,
+ TEGRA_PIN_UART2_RTS_PX2,
+ TEGRA_PIN_UART2_CTS_PX3,
+ TEGRA_PIN_UART5_TX_PX4,
+ TEGRA_PIN_UART5_RX_PX5,
+ TEGRA_PIN_UART5_RTS_PX6,
+ TEGRA_PIN_UART5_CTS_PX7,
+ TEGRA_PIN_GPIO_MDM1_PY0,
+ TEGRA_PIN_GPIO_MDM2_PY1,
+ TEGRA_PIN_GPIO_MDM3_PY2,
+ TEGRA_PIN_GPIO_MDM4_PY3,
+ TEGRA_PIN_GPIO_MDM5_PY4,
+ TEGRA_PIN_GPIO_MDM6_PY5,
+ TEGRA_PIN_GPIO_MDM7_PY6,
+ TEGRA_PIN_UFS0_REF_CLK_PBB0,
+ TEGRA_PIN_UFS0_RST_PBB1,
+ TEGRA_PIN_DAP4_SCLK_PCC0,
+ TEGRA_PIN_DAP4_DOUT_PCC1,
+ TEGRA_PIN_DAP4_DIN_PCC2,
+ TEGRA_PIN_DAP4_FS_PCC3,
+ TEGRA_PIN_DIRECTDC_COMP,
+ TEGRA_PIN_SDMMC1_COMP,
+ TEGRA_PIN_EQOS_COMP,
+ TEGRA_PIN_SDMMC3_COMP,
+ TEGRA_PIN_QSPI_COMP,
+};
+
+enum {
+ TEGRA_PIN_PWR_I2C_SCL_PS0,
+ TEGRA_PIN_PWR_I2C_SDA_PS1,
+ TEGRA_PIN_BATT_OC_PS2,
+ TEGRA_PIN_SAFE_STATE_PS3,
+ TEGRA_PIN_VCOMP_ALERT_PS4,
+ TEGRA_PIN_GPIO_DIS0_PU0,
+ TEGRA_PIN_GPIO_DIS1_PU1,
+ TEGRA_PIN_GPIO_DIS2_PU2,
+ TEGRA_PIN_GPIO_DIS3_PU3,
+ TEGRA_PIN_GPIO_DIS4_PU4,
+ TEGRA_PIN_GPIO_DIS5_PU5,
+ TEGRA_PIN_GPIO_SEN0_PV0,
+ TEGRA_PIN_GPIO_SEN1_PV1,
+ TEGRA_PIN_GPIO_SEN2_PV2,
+ TEGRA_PIN_GPIO_SEN3_PV3,
+ TEGRA_PIN_GPIO_SEN4_PV4,
+ TEGRA_PIN_GPIO_SEN5_PV5,
+ TEGRA_PIN_GPIO_SEN6_PV6,
+ TEGRA_PIN_GPIO_SEN7_PV7,
+ TEGRA_PIN_GEN8_I2C_SCL_PW0,
+ TEGRA_PIN_GEN8_I2C_SDA_PW1,
+ TEGRA_PIN_UART3_TX_PW2,
+ TEGRA_PIN_UART3_RX_PW3,
+ TEGRA_PIN_UART3_RTS_PW4,
+ TEGRA_PIN_UART3_CTS_PW5,
+ TEGRA_PIN_UART7_TX_PW6,
+ TEGRA_PIN_UART7_RX_PW7,
+ TEGRA_PIN_CAN1_DOUT_PZ0,
+ TEGRA_PIN_CAN1_DIN_PZ1,
+ TEGRA_PIN_CAN0_DOUT_PZ2,
+ TEGRA_PIN_CAN0_DIN_PZ3,
+ TEGRA_PIN_CAN_GPIO0_PAA0,
+ TEGRA_PIN_CAN_GPIO1_PAA1,
+ TEGRA_PIN_CAN_GPIO2_PAA2,
+ TEGRA_PIN_CAN_GPIO3_PAA3,
+ TEGRA_PIN_CAN_GPIO4_PAA4,
+ TEGRA_PIN_CAN_GPIO5_PAA5,
+ TEGRA_PIN_CAN_GPIO6_PAA6,
+ TEGRA_PIN_CAN_GPIO7_PAA7,
+ TEGRA_PIN_GPIO_SEN8_PEE0,
+ TEGRA_PIN_GPIO_SEN9_PEE1,
+ TEGRA_PIN_TOUCH_CLK_PEE2,
+ TEGRA_PIN_POWER_ON_PFF0,
+ TEGRA_PIN_GPIO_SW1_PFF1,
+ TEGRA_PIN_GPIO_SW2_PFF2,
+ TEGRA_PIN_GPIO_SW3_PFF3,
+ TEGRA_PIN_GPIO_SW4_PFF4,
+ TEGRA_PIN_SHUTDOWN,
+ TEGRA_PIN_PMU_INT,
+ TEGRA_PIN_SOC_PWR_REQ,
+ TEGRA_PIN_CLK_32K_IN,
+};
+
+/* Table for pin descriptor */
+static const struct pinctrl_pin_desc tegra186_pins[] = {
+ PINCTRL_PIN(TEGRA_PIN_PEX_L0_RST_N_PA0, "PEX_L0_RST_N_PA0"),
+ PINCTRL_PIN(TEGRA_PIN_PEX_L0_CLKREQ_N_PA1, "PEX_L0_CLKREQ_N_PA1"),
+ PINCTRL_PIN(TEGRA_PIN_PEX_WAKE_N_PA2, "PEX_WAKE_N_PA2"),
+ PINCTRL_PIN(TEGRA_PIN_PEX_L1_RST_N_PA3, "PEX_L1_RST_N_PA3"),
+ PINCTRL_PIN(TEGRA_PIN_PEX_L1_CLKREQ_N_PA4, "PEX_L1_CLKREQ_N_PA4"),
+ PINCTRL_PIN(TEGRA_PIN_PEX_L2_RST_N_PA5, "PEX_L2_RST_N_PA5"),
+ PINCTRL_PIN(TEGRA_PIN_PEX_L2_CLKREQ_N_PA6, "PEX_L2_CLKREQ_N_PA6"),
+ PINCTRL_PIN(TEGRA_PIN_UART4_TX_PB0, "UART4_TX_PB0"),
+ PINCTRL_PIN(TEGRA_PIN_UART4_RX_PB1, "UART4_RX_PB1"),
+ PINCTRL_PIN(TEGRA_PIN_UART4_RTS_PB2, "UART4_RTS_PB2"),
+ PINCTRL_PIN(TEGRA_PIN_UART4_CTS_PB3, "UART4_CTS_PB3"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_WAN1_PB4, "GPIO_WAN1_PB4"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_WAN2_PB5, "GPIO_WAN2_PB5"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_WAN3_PB6, "GPIO_WAN3_PB6"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_WAN4_PC0, "GPIO_WAN4_PC0"),
+ PINCTRL_PIN(TEGRA_PIN_DAP2_SCLK_PC1, "DAP2_SCLK_PC1"),
+ PINCTRL_PIN(TEGRA_PIN_DAP2_DOUT_PC2, "DAP2_DOUT_PC2"),
+ PINCTRL_PIN(TEGRA_PIN_DAP2_DIN_PC3, "DAP2_DIN_PC3"),
+ PINCTRL_PIN(TEGRA_PIN_DAP2_FS_PC4, "DAP2_FS_PC4"),
+ PINCTRL_PIN(TEGRA_PIN_GEN1_I2C_SCL_PC5, "GEN1_I2C_SCL_PC5"),
+ PINCTRL_PIN(TEGRA_PIN_GEN1_I2C_SDA_PC6, "GEN1_I2C_SDA_PC6"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC1_CLK_PD0, "SDMMC1_CLK_PD0"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC1_CMD_PD1, "SDMMC1_CMD_PD1"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC1_DAT0_PD2, "SDMMC1_DAT0_PD2"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC1_DAT1_PD3, "SDMMC1_DAT1_PD3"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC1_DAT2_PD4, "SDMMC1_DAT2_PD4"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC1_DAT3_PD5, "SDMMC1_DAT3_PD5"),
+ PINCTRL_PIN(TEGRA_PIN_EQOS_TXC_PE0, "EQOS_TXC_PE0"),
+ PINCTRL_PIN(TEGRA_PIN_EQOS_TD0_PE1, "EQOS_TD0_PE1"),
+ PINCTRL_PIN(TEGRA_PIN_EQOS_TD1_PE2, "EQOS_TD1_PE2"),
+ PINCTRL_PIN(TEGRA_PIN_EQOS_TD2_PE3, "EQOS_TD2_PE3"),
+ PINCTRL_PIN(TEGRA_PIN_EQOS_TD3_PE4, "EQOS_TD3_PE4"),
+ PINCTRL_PIN(TEGRA_PIN_EQOS_TX_CTL_PE5, "EQOS_TX_CTL_PE5"),
+ PINCTRL_PIN(TEGRA_PIN_EQOS_RD0_PE6, "EQOS_RD0_PE6"),
+ PINCTRL_PIN(TEGRA_PIN_EQOS_RD1_PE7, "EQOS_RD1_PE7"),
+ PINCTRL_PIN(TEGRA_PIN_EQOS_RD2_PF0, "EQOS_RD2_PF0"),
+ PINCTRL_PIN(TEGRA_PIN_EQOS_RD3_PF1, "EQOS_RD3_PF1"),
+ PINCTRL_PIN(TEGRA_PIN_EQOS_RX_CTL_PF2, "EQOS_RX_CTL_PF2"),
+ PINCTRL_PIN(TEGRA_PIN_EQOS_RXC_PF3, "EQOS_RXC_PF3"),
+ PINCTRL_PIN(TEGRA_PIN_EQOS_MDIO_PF4, "EQOS_MDIO_PF4"),
+ PINCTRL_PIN(TEGRA_PIN_EQOS_MDC_PF5, "EQOS_MDC_PF5"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC3_CLK_PG0, "SDMMC3_CLK_PG0"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC3_CMD_PG1, "SDMMC3_CMD_PG1"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC3_DAT0_PG2, "SDMMC3_DAT0_PG2"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC3_DAT1_PG3, "SDMMC3_DAT1_PG3"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC3_DAT2_PG4, "SDMMC3_DAT2_PG4"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC3_DAT3_PG5, "SDMMC3_DAT3_PG5"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_WAN5_PH0, "GPIO_WAN5_PH0"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_WAN6_PH1, "GPIO_WAN6_PH1"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_WAN7_PH2, "GPIO_WAN7_PH2"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_WAN8_PH3, "GPIO_WAN8_PH3"),
+ PINCTRL_PIN(TEGRA_PIN_BCPU_PWR_REQ_PH4, "BCPU_PWR_REQ_PH4"),
+ PINCTRL_PIN(TEGRA_PIN_MCPU_PWR_REQ_PH5, "MCPU_PWR_REQ_PH5"),
+ PINCTRL_PIN(TEGRA_PIN_GPU_PWR_REQ_PH6, "GPU_PWR_REQ_PH6"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_PQ0_PI0, "GPIO_PQ0_PI0"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_PQ1_PI1, "GPIO_PQ1_PI1"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_PQ2_PI2, "GPIO_PQ2_PI2"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_PQ3_PI3, "GPIO_PQ3_PI3"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_PQ4_PI4, "GPIO_PQ4_PI4"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_PQ5_PI5, "GPIO_PQ5_PI5"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_PQ6_PI6, "GPIO_PQ6_PI6"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_PQ7_PI7, "GPIO_PQ7_PI7"),
+ PINCTRL_PIN(TEGRA_PIN_DAP1_SCLK_PJ0, "DAP1_SCLK_PJ0"),
+ PINCTRL_PIN(TEGRA_PIN_DAP1_DOUT_PJ1, "DAP1_DOUT_PJ1"),
+ PINCTRL_PIN(TEGRA_PIN_DAP1_DIN_PJ2, "DAP1_DIN_PJ2"),
+ PINCTRL_PIN(TEGRA_PIN_DAP1_FS_PJ3, "DAP1_FS_PJ3"),
+ PINCTRL_PIN(TEGRA_PIN_AUD_MCLK_PJ4, "AUD_MCLK_PJ4"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_AUD0_PJ5, "GPIO_AUD0_PJ5"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_AUD1_PJ6, "GPIO_AUD1_PJ6"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_AUD2_PJ7, "GPIO_AUD2_PJ7"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_AUD3_PK0, "GPIO_AUD3_PK0"),
+ PINCTRL_PIN(TEGRA_PIN_GEN7_I2C_SCL_PL0, "GEN7_I2C_SCL_PL0"),
+ PINCTRL_PIN(TEGRA_PIN_GEN7_I2C_SDA_PL1, "GEN7_I2C_SDA_PL1"),
+ PINCTRL_PIN(TEGRA_PIN_GEN9_I2C_SCL_PL2, "GEN9_I2C_SCL_PL2"),
+ PINCTRL_PIN(TEGRA_PIN_GEN9_I2C_SDA_PL3, "GEN9_I2C_SDA_PL3"),
+ PINCTRL_PIN(TEGRA_PIN_USB_VBUS_EN0_PL4, "USB_VBUS_EN0_PL4"),
+ PINCTRL_PIN(TEGRA_PIN_USB_VBUS_EN1_PL5, "USB_VBUS_EN1_PL5"),
+ PINCTRL_PIN(TEGRA_PIN_GP_PWM6_PL6, "GP_PWM6_PL6"),
+ PINCTRL_PIN(TEGRA_PIN_GP_PWM7_PL7, "GP_PWM7_PL7"),
+ PINCTRL_PIN(TEGRA_PIN_DMIC1_DAT_PM0, "DMIC1_DAT_PM0"),
+ PINCTRL_PIN(TEGRA_PIN_DMIC1_CLK_PM1, "DMIC1_CLK_PM1"),
+ PINCTRL_PIN(TEGRA_PIN_DMIC2_DAT_PM2, "DMIC2_DAT_PM2"),
+ PINCTRL_PIN(TEGRA_PIN_DMIC2_CLK_PM3, "DMIC2_CLK_PM3"),
+ PINCTRL_PIN(TEGRA_PIN_DMIC4_DAT_PM4, "DMIC4_DAT_PM4"),
+ PINCTRL_PIN(TEGRA_PIN_DMIC4_CLK_PM5, "DMIC4_CLK_PM5"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_CAM1_PN0, "GPIO_CAM1_PN0"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_CAM2_PN1, "GPIO_CAM2_PN1"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_CAM3_PN2, "GPIO_CAM3_PN2"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_CAM4_PN3, "GPIO_CAM4_PN3"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_CAM5_PN4, "GPIO_CAM6_PN5"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_CAM6_PN5, "GPIO_CAM6_PN5"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_CAM7_PN6, "GPIO_CAM7_PN6"),
+ PINCTRL_PIN(TEGRA_PIN_EXTPERIPH1_CLK_PO0, "EXTPERIPH1_CLK_PO0"),
+ PINCTRL_PIN(TEGRA_PIN_EXTPERIPH2_CLK_PO1, "EXTPERIPH2_CLK_PO1"),
+ PINCTRL_PIN(TEGRA_PIN_CAM_I2C_SCL_PO2, "CAM_I2C_SCL_PO2"),
+ PINCTRL_PIN(TEGRA_PIN_CAM_I2C_SDA_PO3, "CAM_I2C_SDA_PO3"),
+ PINCTRL_PIN(TEGRA_PIN_DP_AUX_CH0_HPD_PP0, "DP_AUX_CH0_HPD_PP0"),
+ PINCTRL_PIN(TEGRA_PIN_DP_AUX_CH1_HPD_PP1, "DP_AUX_CH1_HPD_PP1"),
+ PINCTRL_PIN(TEGRA_PIN_HDMI_CEC_PP2, "HDMI_CEC_PP2"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_EDP0_PP3, "GPIO_EDP0_PP3"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_EDP1_PP4, "GPIO_EDP1_PP4"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_EDP2_PP5, "GPIO_EDP2_PP5"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_EDP3_PP6, "GPIO_EDP3_PP6"),
+ PINCTRL_PIN(TEGRA_PIN_DIRECTDC1_CLK_PQ0, "DIRECTDC1_CLK_PQ0"),
+ PINCTRL_PIN(TEGRA_PIN_DIRECTDC1_IN_PQ1, "DIRECTDC1_IN_PQ1"),
+ PINCTRL_PIN(TEGRA_PIN_DIRECTDC1_OUT0_PQ2, "DIRECTDC1_OUT0_PQ2"),
+ PINCTRL_PIN(TEGRA_PIN_DIRECTDC1_OUT1_PQ3, "DIRECTDC1_OUT1_PQ3"),
+ PINCTRL_PIN(TEGRA_PIN_DIRECTDC1_OUT2_PQ4, "DIRECTDC1_OUT2_PQ4"),
+ PINCTRL_PIN(TEGRA_PIN_DIRECTDC1_OUT3_PQ5, "DIRECTDC1_OUT3_PQ5"),
+ PINCTRL_PIN(TEGRA_PIN_QSPI_SCK_PR0, "QSPI_SCK_PR0"),
+ PINCTRL_PIN(TEGRA_PIN_QSPI_IO0_PR1, "QSPI_IO0_PR1"),
+ PINCTRL_PIN(TEGRA_PIN_QSPI_IO1_PR2, "QSPI_IO1_PR2"),
+ PINCTRL_PIN(TEGRA_PIN_QSPI_IO2_PR3, "QSPI_IO2_PR3"),
+ PINCTRL_PIN(TEGRA_PIN_QSPI_IO3_PR4, "QSPI_IO3_PR4"),
+ PINCTRL_PIN(TEGRA_PIN_QSPI_CS_N_PR5, "QSPI_CS_N_PR5"),
+ PINCTRL_PIN(TEGRA_PIN_UART1_TX_PT0, "UART1_TX_PT0"),
+ PINCTRL_PIN(TEGRA_PIN_UART1_RX_PT1, "UART1_RX_PT1"),
+ PINCTRL_PIN(TEGRA_PIN_UART1_RTS_PT2, "UART1_RTS_PT2"),
+ PINCTRL_PIN(TEGRA_PIN_UART1_CTS_PT3, "UART1_CTS_PT3"),
+ PINCTRL_PIN(TEGRA_PIN_UART2_TX_PX0, "UART2_TX_PX0"),
+ PINCTRL_PIN(TEGRA_PIN_UART2_RX_PX1, "UART2_RX_PX1"),
+ PINCTRL_PIN(TEGRA_PIN_UART2_RTS_PX2, "UART2_RTS_PX2"),
+ PINCTRL_PIN(TEGRA_PIN_UART2_CTS_PX3, "UART2_CTS_PX3"),
+ PINCTRL_PIN(TEGRA_PIN_UART5_TX_PX4, "UART5_TX_PX4"),
+ PINCTRL_PIN(TEGRA_PIN_UART5_RX_PX5, "UART5_RX_PX5"),
+ PINCTRL_PIN(TEGRA_PIN_UART5_RTS_PX6, "UART5_RTS_PX6"),
+ PINCTRL_PIN(TEGRA_PIN_UART5_CTS_PX7, "UART5_CTS_PX7"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_MDM1_PY0, "GPIO_MDM1_PY0"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_MDM2_PY1, "GPIO_MDM2_PY1"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_MDM3_PY2, "GPIO_MDM3_PY2"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_MDM4_PY3, "GPIO_MDM4_PY3"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_MDM5_PY4, "GPIO_MDM5_PY4"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_MDM6_PY5, "GPIO_MDM6_PY5"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_MDM7_PY6, "GPIO_MDM7_PY6"),
+ PINCTRL_PIN(TEGRA_PIN_UFS0_REF_CLK_PBB0, "UFS0_REF_CLK_PBB0"),
+ PINCTRL_PIN(TEGRA_PIN_UFS0_RST_PBB1, "UFS0_RST_PBB1"),
+ PINCTRL_PIN(TEGRA_PIN_DAP4_SCLK_PCC0, "DAP4_SCLK_PCC0"),
+ PINCTRL_PIN(TEGRA_PIN_DAP4_DOUT_PCC1, "DAP4_DOUT_PCC1"),
+ PINCTRL_PIN(TEGRA_PIN_DAP4_DIN_PCC2, "DAP4_DIN_PCC2"),
+ PINCTRL_PIN(TEGRA_PIN_DAP4_FS_PCC3, "DAP4_FS_PCC3"),
+ PINCTRL_PIN(TEGRA_PIN_DIRECTDC_COMP, "DIRECTDC_COMP"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC1_COMP, "SDMMC1_COMP"),
+ PINCTRL_PIN(TEGRA_PIN_EQOS_COMP, "EQOS_COMP"),
+ PINCTRL_PIN(TEGRA_PIN_SDMMC3_COMP, "SDMMC3_COMP"),
+ PINCTRL_PIN(TEGRA_PIN_QSPI_COMP, "QSPI_COMP"),
+};
+
+static const unsigned int pex_l0_rst_n_pa0_pins[] = {
+ TEGRA_PIN_PEX_L0_RST_N_PA0,
+};
+
+static const unsigned int pex_l0_clkreq_n_pa1_pins[] = {
+ TEGRA_PIN_PEX_L0_CLKREQ_N_PA1,
+};
+
+static const unsigned int pex_wake_n_pa2_pins[] = {
+ TEGRA_PIN_PEX_WAKE_N_PA2,
+};
+
+static const unsigned int pex_l1_rst_n_pa3_pins[] = {
+ TEGRA_PIN_PEX_L1_RST_N_PA3,
+};
+
+static const unsigned int pex_l1_clkreq_n_pa4_pins[] = {
+ TEGRA_PIN_PEX_L1_CLKREQ_N_PA4,
+};
+
+static const unsigned int pex_l2_rst_n_pa5_pins[] = {
+ TEGRA_PIN_PEX_L2_RST_N_PA5,
+};
+
+static const unsigned int pex_l2_clkreq_n_pa6_pins[] = {
+ TEGRA_PIN_PEX_L2_CLKREQ_N_PA6,
+};
+
+static const unsigned int uart4_tx_pb0_pins[] = {
+ TEGRA_PIN_UART4_TX_PB0,
+};
+
+static const unsigned int uart4_rx_pb1_pins[] = {
+ TEGRA_PIN_UART4_RX_PB1,
+};
+
+static const unsigned int uart4_rts_pb2_pins[] = {
+ TEGRA_PIN_UART4_RTS_PB2,
+};
+
+static const unsigned int uart4_cts_pb3_pins[] = {
+ TEGRA_PIN_UART4_CTS_PB3,
+};
+
+static const unsigned int gpio_wan1_pb4_pins[] = {
+ TEGRA_PIN_GPIO_WAN1_PB4,
+};
+
+static const unsigned int gpio_wan2_pb5_pins[] = {
+ TEGRA_PIN_GPIO_WAN2_PB5,
+};
+
+static const unsigned int gpio_wan3_pb6_pins[] = {
+ TEGRA_PIN_GPIO_WAN3_PB6,
+};
+
+static const unsigned int gpio_wan4_pc0_pins[] = {
+ TEGRA_PIN_GPIO_WAN4_PC0,
+};
+
+static const unsigned int dap2_sclk_pc1_pins[] = {
+ TEGRA_PIN_DAP2_SCLK_PC1,
+};
+
+static const unsigned int dap2_dout_pc2_pins[] = {
+ TEGRA_PIN_DAP2_DOUT_PC2,
+};
+
+static const unsigned int dap2_din_pc3_pins[] = {
+ TEGRA_PIN_DAP2_DIN_PC3,
+};
+
+static const unsigned int dap2_fs_pc4_pins[] = {
+ TEGRA_PIN_DAP2_FS_PC4,
+};
+
+static const unsigned int gen1_i2c_scl_pc5_pins[] = {
+ TEGRA_PIN_GEN1_I2C_SCL_PC5,
+};
+
+static const unsigned int gen1_i2c_sda_pc6_pins[] = {
+ TEGRA_PIN_GEN1_I2C_SDA_PC6,
+};
+
+static const unsigned int sdmmc1_clk_pd0_pins[] = {
+ TEGRA_PIN_SDMMC1_CLK_PD0,
+};
+
+static const unsigned int sdmmc1_cmd_pd1_pins[] = {
+ TEGRA_PIN_SDMMC1_CMD_PD1,
+};
+
+static const unsigned int sdmmc1_dat0_pd2_pins[] = {
+ TEGRA_PIN_SDMMC1_DAT0_PD2,
+};
+
+static const unsigned int sdmmc1_dat1_pd3_pins[] = {
+ TEGRA_PIN_SDMMC1_DAT1_PD3,
+};
+
+static const unsigned int sdmmc1_dat2_pd4_pins[] = {
+ TEGRA_PIN_SDMMC1_DAT2_PD4,
+};
+
+static const unsigned int sdmmc1_dat3_pd5_pins[] = {
+ TEGRA_PIN_SDMMC1_DAT3_PD5,
+};
+
+static const unsigned int eqos_txc_pe0_pins[] = {
+ TEGRA_PIN_EQOS_TXC_PE0,
+};
+
+static const unsigned int eqos_td0_pe1_pins[] = {
+ TEGRA_PIN_EQOS_TD0_PE1,
+};
+
+static const unsigned int eqos_td1_pe2_pins[] = {
+ TEGRA_PIN_EQOS_TD1_PE2,
+};
+
+static const unsigned int eqos_td2_pe3_pins[] = {
+ TEGRA_PIN_EQOS_TD2_PE3,
+};
+
+static const unsigned int eqos_td3_pe4_pins[] = {
+ TEGRA_PIN_EQOS_TD3_PE4,
+};
+
+static const unsigned int eqos_tx_ctl_pe5_pins[] = {
+ TEGRA_PIN_EQOS_TX_CTL_PE5,
+};
+
+static const unsigned int eqos_rd0_pe6_pins[] = {
+ TEGRA_PIN_EQOS_RD0_PE6,
+};
+
+static const unsigned int eqos_rd1_pe7_pins[] = {
+ TEGRA_PIN_EQOS_RD1_PE7,
+};
+
+static const unsigned int eqos_rd2_pf0_pins[] = {
+ TEGRA_PIN_EQOS_RD2_PF0,
+};
+
+static const unsigned int eqos_rd3_pf1_pins[] = {
+ TEGRA_PIN_EQOS_RD3_PF1,
+};
+
+static const unsigned int eqos_rx_ctl_pf2_pins[] = {
+ TEGRA_PIN_EQOS_RX_CTL_PF2,
+};
+
+static const unsigned int eqos_rxc_pf3_pins[] = {
+ TEGRA_PIN_EQOS_RXC_PF3,
+};
+
+static const unsigned int eqos_mdio_pf4_pins[] = {
+ TEGRA_PIN_EQOS_MDIO_PF4,
+};
+
+static const unsigned int eqos_mdc_pf5_pins[] = {
+ TEGRA_PIN_EQOS_MDC_PF5,
+};
+
+static const unsigned int sdmmc3_clk_pg0_pins[] = {
+ TEGRA_PIN_SDMMC3_CLK_PG0,
+};
+
+static const unsigned int sdmmc3_cmd_pg1_pins[] = {
+ TEGRA_PIN_SDMMC3_CMD_PG1,
+};
+
+static const unsigned int sdmmc3_dat0_pg2_pins[] = {
+ TEGRA_PIN_SDMMC3_DAT0_PG2,
+};
+
+static const unsigned int sdmmc3_dat1_pg3_pins[] = {
+ TEGRA_PIN_SDMMC3_DAT1_PG3,
+};
+
+static const unsigned int sdmmc3_dat2_pg4_pins[] = {
+ TEGRA_PIN_SDMMC3_DAT2_PG4,
+};
+
+static const unsigned int sdmmc3_dat3_pg5_pins[] = {
+ TEGRA_PIN_SDMMC3_DAT3_PG5,
+};
+
+static const unsigned int gpio_wan5_ph0_pins[] = {
+ TEGRA_PIN_GPIO_WAN5_PH0,
+};
+
+static const unsigned int gpio_wan6_ph1_pins[] = {
+ TEGRA_PIN_GPIO_WAN6_PH1,
+};
+
+static const unsigned int gpio_wan7_ph2_pins[] = {
+ TEGRA_PIN_GPIO_WAN7_PH2,
+};
+
+static const unsigned int gpio_wan8_ph3_pins[] = {
+ TEGRA_PIN_GPIO_WAN8_PH3,
+};
+
+static const unsigned int bcpu_pwr_req_ph4_pins[] = {
+ TEGRA_PIN_BCPU_PWR_REQ_PH4,
+};
+
+static const unsigned int mcpu_pwr_req_ph5_pins[] = {
+ TEGRA_PIN_MCPU_PWR_REQ_PH5,
+};
+
+static const unsigned int gpu_pwr_req_ph6_pins[] = {
+ TEGRA_PIN_GPU_PWR_REQ_PH6,
+};
+
+static const unsigned int gpio_pq0_pi0_pins[] = {
+ TEGRA_PIN_GPIO_PQ0_PI0,
+};
+
+static const unsigned int gpio_pq1_pi1_pins[] = {
+ TEGRA_PIN_GPIO_PQ1_PI1,
+};
+
+static const unsigned int gpio_pq2_pi2_pins[] = {
+ TEGRA_PIN_GPIO_PQ2_PI2,
+};
+
+static const unsigned int gpio_pq3_pi3_pins[] = {
+ TEGRA_PIN_GPIO_PQ3_PI3,
+};
+
+static const unsigned int gpio_pq4_pi4_pins[] = {
+ TEGRA_PIN_GPIO_PQ4_PI4,
+};
+
+static const unsigned int gpio_pq5_pi5_pins[] = {
+ TEGRA_PIN_GPIO_PQ5_PI5,
+};
+
+static const unsigned int gpio_pq6_pi6_pins[] = {
+ TEGRA_PIN_GPIO_PQ6_PI6,
+};
+
+static const unsigned int gpio_pq7_pi7_pins[] = {
+ TEGRA_PIN_GPIO_PQ7_PI7,
+};
+
+static const unsigned int dap1_sclk_pj0_pins[] = {
+ TEGRA_PIN_DAP1_SCLK_PJ0,
+};
+
+static const unsigned int dap1_dout_pj1_pins[] = {
+ TEGRA_PIN_DAP1_DOUT_PJ1,
+};
+
+static const unsigned int dap1_din_pj2_pins[] = {
+ TEGRA_PIN_DAP1_DIN_PJ2,
+};
+
+static const unsigned int dap1_fs_pj3_pins[] = {
+ TEGRA_PIN_DAP1_FS_PJ3,
+};
+
+static const unsigned int aud_mclk_pj4_pins[] = {
+ TEGRA_PIN_AUD_MCLK_PJ4,
+};
+
+static const unsigned int gpio_aud0_pj5_pins[] = {
+ TEGRA_PIN_GPIO_AUD0_PJ5,
+};
+
+static const unsigned int gpio_aud1_pj6_pins[] = {
+ TEGRA_PIN_GPIO_AUD1_PJ6,
+};
+
+static const unsigned int gpio_aud2_pj7_pins[] = {
+ TEGRA_PIN_GPIO_AUD2_PJ7,
+};
+
+static const unsigned int gpio_aud3_pk0_pins[] = {
+ TEGRA_PIN_GPIO_AUD3_PK0,
+};
+
+static const unsigned int gen7_i2c_scl_pl0_pins[] = {
+ TEGRA_PIN_GEN7_I2C_SCL_PL0,
+};
+
+static const unsigned int gen7_i2c_sda_pl1_pins[] = {
+ TEGRA_PIN_GEN7_I2C_SDA_PL1,
+};
+
+static const unsigned int gen9_i2c_scl_pl2_pins[] = {
+ TEGRA_PIN_GEN9_I2C_SCL_PL2,
+};
+
+static const unsigned int gen9_i2c_sda_pl3_pins[] = {
+ TEGRA_PIN_GEN9_I2C_SDA_PL3,
+};
+
+static const unsigned int usb_vbus_en0_pl4_pins[] = {
+ TEGRA_PIN_USB_VBUS_EN0_PL4,
+};
+
+static const unsigned int usb_vbus_en1_pl5_pins[] = {
+ TEGRA_PIN_USB_VBUS_EN1_PL5,
+};
+
+static const unsigned int gp_pwm6_pl6_pins[] = {
+ TEGRA_PIN_GP_PWM6_PL6,
+};
+
+static const unsigned int gp_pwm7_pl7_pins[] = {
+ TEGRA_PIN_GP_PWM7_PL7,
+};
+
+static const unsigned int dmic1_dat_pm0_pins[] = {
+ TEGRA_PIN_DMIC1_DAT_PM0,
+};
+
+static const unsigned int dmic1_clk_pm1_pins[] = {
+ TEGRA_PIN_DMIC1_CLK_PM1,
+};
+
+static const unsigned int dmic2_dat_pm2_pins[] = {
+ TEGRA_PIN_DMIC2_DAT_PM2,
+};
+
+static const unsigned int dmic2_clk_pm3_pins[] = {
+ TEGRA_PIN_DMIC2_CLK_PM3,
+};
+
+static const unsigned int dmic4_dat_pm4_pins[] = {
+ TEGRA_PIN_DMIC4_DAT_PM4,
+};
+
+static const unsigned int dmic4_clk_pm5_pins[] = {
+ TEGRA_PIN_DMIC4_CLK_PM5,
+};
+
+static const unsigned int gpio_cam1_pn0_pins[] = {
+ TEGRA_PIN_GPIO_CAM1_PN0,
+};
+
+static const unsigned int gpio_cam2_pn1_pins[] = {
+ TEGRA_PIN_GPIO_CAM2_PN1,
+};
+
+static const unsigned int gpio_cam3_pn2_pins[] = {
+ TEGRA_PIN_GPIO_CAM3_PN2,
+};
+
+static const unsigned int gpio_cam4_pn3_pins[] = {
+ TEGRA_PIN_GPIO_CAM4_PN3,
+};
+
+static const unsigned int gpio_cam5_pn4_pins[] = {
+ TEGRA_PIN_GPIO_CAM5_PN4,
+};
+
+static const unsigned int gpio_cam6_pn5_pins[] = {
+ TEGRA_PIN_GPIO_CAM6_PN5,
+};
+
+static const unsigned int gpio_cam7_pn6_pins[] = {
+ TEGRA_PIN_GPIO_CAM7_PN6,
+};
+
+static const unsigned int extperiph1_clk_po0_pins[] = {
+ TEGRA_PIN_EXTPERIPH1_CLK_PO0,
+};
+
+static const unsigned int extperiph2_clk_po1_pins[] = {
+ TEGRA_PIN_EXTPERIPH2_CLK_PO1,
+};
+
+static const unsigned int cam_i2c_scl_po2_pins[] = {
+ TEGRA_PIN_CAM_I2C_SCL_PO2,
+};
+
+static const unsigned int cam_i2c_sda_po3_pins[] = {
+ TEGRA_PIN_CAM_I2C_SDA_PO3,
+};
+
+static const unsigned int dp_aux_ch0_hpd_pp0_pins[] = {
+ TEGRA_PIN_DP_AUX_CH0_HPD_PP0,
+};
+
+static const unsigned int dp_aux_ch1_hpd_pp1_pins[] = {
+ TEGRA_PIN_DP_AUX_CH1_HPD_PP1,
+};
+
+static const unsigned int hdmi_cec_pp2_pins[] = {
+ TEGRA_PIN_HDMI_CEC_PP2,
+};
+
+static const unsigned int gpio_edp0_pp3_pins[] = {
+ TEGRA_PIN_GPIO_EDP0_PP3,
+};
+
+static const unsigned int gpio_edp1_pp4_pins[] = {
+ TEGRA_PIN_GPIO_EDP1_PP4,
+};
+
+static const unsigned int gpio_edp2_pp5_pins[] = {
+ TEGRA_PIN_GPIO_EDP2_PP5,
+};
+
+static const unsigned int gpio_edp3_pp6_pins[] = {
+ TEGRA_PIN_GPIO_EDP3_PP6,
+};
+
+static const unsigned int directdc1_clk_pq0_pins[] = {
+ TEGRA_PIN_DIRECTDC1_CLK_PQ0,
+};
+
+static const unsigned int directdc1_in_pq1_pins[] = {
+ TEGRA_PIN_DIRECTDC1_IN_PQ1,
+};
+
+static const unsigned int directdc1_out0_pq2_pins[] = {
+ TEGRA_PIN_DIRECTDC1_OUT0_PQ2,
+};
+
+static const unsigned int directdc1_out1_pq3_pins[] = {
+ TEGRA_PIN_DIRECTDC1_OUT1_PQ3,
+};
+
+static const unsigned int directdc1_out2_pq4_pins[] = {
+ TEGRA_PIN_DIRECTDC1_OUT2_PQ4,
+};
+
+static const unsigned int directdc1_out3_pq5_pins[] = {
+ TEGRA_PIN_DIRECTDC1_OUT3_PQ5,
+};
+
+static const unsigned int qspi_sck_pr0_pins[] = {
+ TEGRA_PIN_QSPI_SCK_PR0,
+};
+
+static const unsigned int qspi_io0_pr1_pins[] = {
+ TEGRA_PIN_QSPI_IO0_PR1,
+};
+
+static const unsigned int qspi_io1_pr2_pins[] = {
+ TEGRA_PIN_QSPI_IO1_PR2,
+};
+
+static const unsigned int qspi_io2_pr3_pins[] = {
+ TEGRA_PIN_QSPI_IO2_PR3,
+};
+
+static const unsigned int qspi_io3_pr4_pins[] = {
+ TEGRA_PIN_QSPI_IO3_PR4,
+};
+
+static const unsigned int qspi_cs_n_pr5_pins[] = {
+ TEGRA_PIN_QSPI_CS_N_PR5,
+};
+
+static const unsigned int pwr_i2c_scl_ps0_pins[] = {
+ TEGRA_PIN_PWR_I2C_SCL_PS0,
+};
+
+static const unsigned int pwr_i2c_sda_ps1_pins[] = {
+ TEGRA_PIN_PWR_I2C_SDA_PS1,
+};
+
+static const unsigned int batt_oc_ps2_pins[] = {
+ TEGRA_PIN_BATT_OC_PS2,
+};
+
+static const unsigned int safe_state_ps3_pins[] = {
+ TEGRA_PIN_SAFE_STATE_PS3,
+};
+
+static const unsigned int vcomp_alert_ps4_pins[] = {
+ TEGRA_PIN_VCOMP_ALERT_PS4,
+};
+
+static const unsigned int uart1_tx_pt0_pins[] = {
+ TEGRA_PIN_UART1_TX_PT0,
+};
+
+static const unsigned int uart1_rx_pt1_pins[] = {
+ TEGRA_PIN_UART1_RX_PT1,
+};
+
+static const unsigned int uart1_rts_pt2_pins[] = {
+ TEGRA_PIN_UART1_RTS_PT2,
+};
+
+static const unsigned int uart1_cts_pt3_pins[] = {
+ TEGRA_PIN_UART1_CTS_PT3,
+};
+
+static const unsigned int gpio_dis0_pu0_pins[] = {
+ TEGRA_PIN_GPIO_DIS0_PU0,
+};
+
+static const unsigned int gpio_dis1_pu1_pins[] = {
+ TEGRA_PIN_GPIO_DIS1_PU1,
+};
+
+static const unsigned int gpio_dis2_pu2_pins[] = {
+ TEGRA_PIN_GPIO_DIS2_PU2,
+};
+
+static const unsigned int gpio_dis3_pu3_pins[] = {
+ TEGRA_PIN_GPIO_DIS3_PU3,
+};
+
+static const unsigned int gpio_dis4_pu4_pins[] = {
+ TEGRA_PIN_GPIO_DIS4_PU4,
+};
+
+static const unsigned int gpio_dis5_pu5_pins[] = {
+ TEGRA_PIN_GPIO_DIS5_PU5,
+};
+
+static const unsigned int gpio_sen0_pv0_pins[] = {
+ TEGRA_PIN_GPIO_SEN0_PV0,
+};
+
+static const unsigned int gpio_sen1_pv1_pins[] = {
+ TEGRA_PIN_GPIO_SEN1_PV1,
+};
+
+static const unsigned int gpio_sen2_pv2_pins[] = {
+ TEGRA_PIN_GPIO_SEN2_PV2,
+};
+
+static const unsigned int gpio_sen3_pv3_pins[] = {
+ TEGRA_PIN_GPIO_SEN3_PV3,
+};
+
+static const unsigned int gpio_sen4_pv4_pins[] = {
+ TEGRA_PIN_GPIO_SEN4_PV4,
+};
+
+static const unsigned int gpio_sen5_pv5_pins[] = {
+ TEGRA_PIN_GPIO_SEN5_PV5,
+};
+
+static const unsigned int gpio_sen6_pv6_pins[] = {
+ TEGRA_PIN_GPIO_SEN6_PV6,
+};
+
+static const unsigned int gpio_sen7_pv7_pins[] = {
+ TEGRA_PIN_GPIO_SEN7_PV7,
+};
+
+static const unsigned int gen8_i2c_scl_pw0_pins[] = {
+ TEGRA_PIN_GEN8_I2C_SCL_PW0,
+};
+
+static const unsigned int gen8_i2c_sda_pw1_pins[] = {
+ TEGRA_PIN_GEN8_I2C_SDA_PW1,
+};
+
+static const unsigned int uart3_tx_pw2_pins[] = {
+ TEGRA_PIN_UART3_TX_PW2,
+};
+
+static const unsigned int uart3_rx_pw3_pins[] = {
+ TEGRA_PIN_UART3_RX_PW3,
+};
+
+static const unsigned int uart3_rts_pw4_pins[] = {
+ TEGRA_PIN_UART3_RTS_PW4,
+};
+
+static const unsigned int uart3_cts_pw5_pins[] = {
+ TEGRA_PIN_UART3_CTS_PW5,
+};
+
+static const unsigned int uart7_tx_pw6_pins[] = {
+ TEGRA_PIN_UART7_TX_PW6,
+};
+
+static const unsigned int uart7_rx_pw7_pins[] = {
+ TEGRA_PIN_UART7_RX_PW7,
+};
+
+static const unsigned int uart2_tx_px0_pins[] = {
+ TEGRA_PIN_UART2_TX_PX0,
+};
+
+static const unsigned int uart2_rx_px1_pins[] = {
+ TEGRA_PIN_UART2_RX_PX1,
+};
+
+static const unsigned int uart2_rts_px2_pins[] = {
+ TEGRA_PIN_UART2_RTS_PX2,
+};
+
+static const unsigned int uart2_cts_px3_pins[] = {
+ TEGRA_PIN_UART2_CTS_PX3,
+};
+
+static const unsigned int uart5_tx_px4_pins[] = {
+ TEGRA_PIN_UART5_TX_PX4,
+};
+
+static const unsigned int uart5_rx_px5_pins[] = {
+ TEGRA_PIN_UART5_RX_PX5,
+};
+
+static const unsigned int uart5_rts_px6_pins[] = {
+ TEGRA_PIN_UART5_RTS_PX6,
+};
+
+static const unsigned int uart5_cts_px7_pins[] = {
+ TEGRA_PIN_UART5_CTS_PX7,
+};
+
+static const unsigned int gpio_mdm1_py0_pins[] = {
+ TEGRA_PIN_GPIO_MDM1_PY0,
+};
+
+static const unsigned int gpio_mdm2_py1_pins[] = {
+ TEGRA_PIN_GPIO_MDM2_PY1,
+};
+
+static const unsigned int gpio_mdm3_py2_pins[] = {
+ TEGRA_PIN_GPIO_MDM3_PY2,
+};
+
+static const unsigned int gpio_mdm4_py3_pins[] = {
+ TEGRA_PIN_GPIO_MDM4_PY3,
+};
+
+static const unsigned int gpio_mdm5_py4_pins[] = {
+ TEGRA_PIN_GPIO_MDM5_PY4,
+};
+
+static const unsigned int gpio_mdm6_py5_pins[] = {
+ TEGRA_PIN_GPIO_MDM6_PY5,
+};
+
+static const unsigned int gpio_mdm7_py6_pins[] = {
+ TEGRA_PIN_GPIO_MDM7_PY6,
+};
+
+static const unsigned int can1_dout_pz0_pins[] = {
+ TEGRA_PIN_CAN1_DOUT_PZ0,
+};
+
+static const unsigned int can1_din_pz1_pins[] = {
+ TEGRA_PIN_CAN1_DIN_PZ1,
+};
+
+static const unsigned int can0_dout_pz2_pins[] = {
+ TEGRA_PIN_CAN0_DOUT_PZ2,
+};
+
+static const unsigned int can0_din_pz3_pins[] = {
+ TEGRA_PIN_CAN0_DIN_PZ3,
+};
+
+static const unsigned int can_gpio0_paa0_pins[] = {
+ TEGRA_PIN_CAN_GPIO0_PAA0,
+};
+
+static const unsigned int can_gpio1_paa1_pins[] = {
+ TEGRA_PIN_CAN_GPIO1_PAA1,
+};
+
+static const unsigned int can_gpio2_paa2_pins[] = {
+ TEGRA_PIN_CAN_GPIO2_PAA2,
+};
+
+static const unsigned int can_gpio3_paa3_pins[] = {
+ TEGRA_PIN_CAN_GPIO3_PAA3,
+};
+
+static const unsigned int can_gpio4_paa4_pins[] = {
+ TEGRA_PIN_CAN_GPIO4_PAA4,
+};
+
+static const unsigned int can_gpio5_paa5_pins[] = {
+ TEGRA_PIN_CAN_GPIO5_PAA5,
+};
+
+static const unsigned int can_gpio6_paa6_pins[] = {
+ TEGRA_PIN_CAN_GPIO6_PAA6,
+};
+
+static const unsigned int can_gpio7_paa7_pins[] = {
+ TEGRA_PIN_CAN_GPIO7_PAA7,
+};
+
+static const unsigned int ufs0_ref_clk_pbb0_pins[] = {
+ TEGRA_PIN_UFS0_REF_CLK_PBB0,
+};
+
+static const unsigned int ufs0_rst_pbb1_pins[] = {
+ TEGRA_PIN_UFS0_RST_PBB1,
+};
+
+static const unsigned int dap4_sclk_pcc0_pins[] = {
+ TEGRA_PIN_DAP4_SCLK_PCC0,
+};
+
+static const unsigned int dap4_dout_pcc1_pins[] = {
+ TEGRA_PIN_DAP4_DOUT_PCC1,
+};
+
+static const unsigned int dap4_din_pcc2_pins[] = {
+ TEGRA_PIN_DAP4_DIN_PCC2,
+};
+
+static const unsigned int dap4_fs_pcc3_pins[] = {
+ TEGRA_PIN_DAP4_FS_PCC3,
+};
+
+static const unsigned int gpio_sen8_pee0_pins[] = {
+ TEGRA_PIN_GPIO_SEN8_PEE0,
+};
+
+static const unsigned int gpio_sen9_pee1_pins[] = {
+ TEGRA_PIN_GPIO_SEN9_PEE1,
+};
+
+static const unsigned int touch_clk_pee2_pins[] = {
+ TEGRA_PIN_TOUCH_CLK_PEE2,
+};
+
+static const unsigned int power_on_pff0_pins[] = {
+ TEGRA_PIN_POWER_ON_PFF0,
+};
+
+static const unsigned int gpio_sw1_pff1_pins[] = {
+ TEGRA_PIN_GPIO_SW1_PFF1,
+};
+
+static const unsigned int gpio_sw2_pff2_pins[] = {
+ TEGRA_PIN_GPIO_SW2_PFF2,
+};
+
+static const unsigned int gpio_sw3_pff3_pins[] = {
+ TEGRA_PIN_GPIO_SW3_PFF3,
+};
+
+static const unsigned int gpio_sw4_pff4_pins[] = {
+ TEGRA_PIN_GPIO_SW4_PFF4,
+};
+
+static const unsigned int directdc_comp_pins[] = {
+ TEGRA_PIN_DIRECTDC_COMP,
+};
+
+static const unsigned int sdmmc1_comp_pins[] = {
+ TEGRA_PIN_SDMMC1_COMP,
+};
+
+static const unsigned int eqos_comp_pins[] = {
+ TEGRA_PIN_EQOS_COMP,
+};
+
+static const unsigned int sdmmc3_comp_pins[] = {
+ TEGRA_PIN_SDMMC3_COMP,
+};
+
+static const unsigned int qspi_comp_pins[] = {
+ TEGRA_PIN_QSPI_COMP,
+};
+
+static const unsigned int shutdown_pins[] = {
+ TEGRA_PIN_SHUTDOWN,
+};
+
+static const unsigned int pmu_int_pins[] = {
+ TEGRA_PIN_PMU_INT,
+};
+
+static const unsigned int soc_pwr_req_pins[] = {
+ TEGRA_PIN_SOC_PWR_REQ,
+};
+
+static const unsigned int clk_32k_in_pins[] = {
+ TEGRA_PIN_CLK_32K_IN,
+};
+
+static const unsigned int sdmmc4_clk_pins[] = {};
+
+static const unsigned int sdmmc4_cmd_pins[] = {};
+
+static const unsigned int sdmmc4_dqs_pins[] = {};
+
+static const unsigned int sdmmc4_dat7_pins[] = {};
+
+static const unsigned int sdmmc4_dat6_pins[] = {};
+
+static const unsigned int sdmmc4_dat5_pins[] = {};
+
+static const unsigned int sdmmc4_dat4_pins[] = {};
+
+static const unsigned int sdmmc4_dat3_pins[] = {};
+
+static const unsigned int sdmmc4_dat2_pins[] = {};
+
+static const unsigned int sdmmc4_dat1_pins[] = {};
+
+static const unsigned int sdmmc4_dat0_pins[] = {};
+
+/* Define unique ID for each function */
+enum tegra_mux_dt {
+ TEGRA_MUX_RSVD0,
+ TEGRA_MUX_RSVD1,
+ TEGRA_MUX_RSVD2,
+ TEGRA_MUX_RSVD3,
+ TEGRA_MUX_TOUCH,
+ TEGRA_MUX_UARTC,
+ TEGRA_MUX_I2C8,
+ TEGRA_MUX_UARTG,
+ TEGRA_MUX_SPI2,
+ TEGRA_MUX_GP,
+ TEGRA_MUX_DCA,
+ TEGRA_MUX_WDT,
+ TEGRA_MUX_I2C2,
+ TEGRA_MUX_CAN1,
+ TEGRA_MUX_CAN0,
+ TEGRA_MUX_DMIC3,
+ TEGRA_MUX_DMIC5,
+ TEGRA_MUX_GPIO,
+ TEGRA_MUX_DSPK1,
+ TEGRA_MUX_DSPK0,
+ TEGRA_MUX_SPDIF,
+ TEGRA_MUX_AUD,
+ TEGRA_MUX_I2S1,
+ TEGRA_MUX_DMIC1,
+ TEGRA_MUX_DMIC2,
+ TEGRA_MUX_I2S3,
+ TEGRA_MUX_DMIC4,
+ TEGRA_MUX_I2S4,
+ TEGRA_MUX_EXTPERIPH2,
+ TEGRA_MUX_EXTPERIPH1,
+ TEGRA_MUX_I2C3,
+ TEGRA_MUX_VGP1,
+ TEGRA_MUX_VGP2,
+ TEGRA_MUX_VGP3,
+ TEGRA_MUX_VGP4,
+ TEGRA_MUX_VGP5,
+ TEGRA_MUX_VGP6,
+ TEGRA_MUX_EXTPERIPH3,
+ TEGRA_MUX_EXTPERIPH4,
+ TEGRA_MUX_SPI4,
+ TEGRA_MUX_I2S2,
+ TEGRA_MUX_UARTD,
+ TEGRA_MUX_I2C1,
+ TEGRA_MUX_UARTA,
+ TEGRA_MUX_DIRECTDC1,
+ TEGRA_MUX_DIRECTDC,
+ TEGRA_MUX_IQC0,
+ TEGRA_MUX_IQC1,
+ TEGRA_MUX_I2S6,
+ TEGRA_MUX_DTV,
+ TEGRA_MUX_UARTF,
+ TEGRA_MUX_SDMMC3,
+ TEGRA_MUX_SDMMC4,
+ TEGRA_MUX_SDMMC1,
+ TEGRA_MUX_DP,
+ TEGRA_MUX_HDMI,
+ TEGRA_MUX_PE2,
+ TEGRA_MUX_SATA,
+ TEGRA_MUX_PE,
+ TEGRA_MUX_PE1,
+ TEGRA_MUX_PE0,
+ TEGRA_MUX_SOC,
+ TEGRA_MUX_EQOS,
+ TEGRA_MUX_SDMMC2,
+ TEGRA_MUX_QSPI,
+ TEGRA_MUX_SCE,
+ TEGRA_MUX_I2C5,
+ TEGRA_MUX_DISPLAYA,
+ TEGRA_MUX_DISPLAYB,
+ TEGRA_MUX_DCC,
+ TEGRA_MUX_DCB,
+ TEGRA_MUX_SPI1,
+ TEGRA_MUX_UARTB,
+ TEGRA_MUX_UARTE,
+ TEGRA_MUX_SPI3,
+ TEGRA_MUX_NV,
+ TEGRA_MUX_CCLA,
+ TEGRA_MUX_I2C7,
+ TEGRA_MUX_I2C9,
+ TEGRA_MUX_I2S5,
+ TEGRA_MUX_USB,
+ TEGRA_MUX_UFS0,
+};
+
+/* Make list of each function name */
+#define TEGRA_PIN_FUNCTION(lid) #lid
+
+static const char * const tegra186_functions[] = {
+ TEGRA_PIN_FUNCTION(rsvd0),
+ TEGRA_PIN_FUNCTION(rsvd1),
+ TEGRA_PIN_FUNCTION(rsvd2),
+ TEGRA_PIN_FUNCTION(rsvd3),
+ TEGRA_PIN_FUNCTION(touch),
+ TEGRA_PIN_FUNCTION(uartc),
+ TEGRA_PIN_FUNCTION(i2c8),
+ TEGRA_PIN_FUNCTION(uartg),
+ TEGRA_PIN_FUNCTION(spi2),
+ TEGRA_PIN_FUNCTION(gp),
+ TEGRA_PIN_FUNCTION(dca),
+ TEGRA_PIN_FUNCTION(wdt),
+ TEGRA_PIN_FUNCTION(i2c2),
+ TEGRA_PIN_FUNCTION(can1),
+ TEGRA_PIN_FUNCTION(can0),
+ TEGRA_PIN_FUNCTION(dmic3),
+ TEGRA_PIN_FUNCTION(dmic5),
+ TEGRA_PIN_FUNCTION(gpio),
+ TEGRA_PIN_FUNCTION(dspk1),
+ TEGRA_PIN_FUNCTION(dspk0),
+ TEGRA_PIN_FUNCTION(spdif),
+ TEGRA_PIN_FUNCTION(aud),
+ TEGRA_PIN_FUNCTION(i2s1),
+ TEGRA_PIN_FUNCTION(dmic1),
+ TEGRA_PIN_FUNCTION(dmic2),
+ TEGRA_PIN_FUNCTION(i2s3),
+ TEGRA_PIN_FUNCTION(dmic4),
+ TEGRA_PIN_FUNCTION(i2s4),
+ TEGRA_PIN_FUNCTION(extperiph2),
+ TEGRA_PIN_FUNCTION(extperiph1),
+ TEGRA_PIN_FUNCTION(i2c3),
+ TEGRA_PIN_FUNCTION(vgp1),
+ TEGRA_PIN_FUNCTION(vgp2),
+ TEGRA_PIN_FUNCTION(vgp3),
+ TEGRA_PIN_FUNCTION(vgp4),
+ TEGRA_PIN_FUNCTION(vgp5),
+ TEGRA_PIN_FUNCTION(vgp6),
+ TEGRA_PIN_FUNCTION(extperiph3),
+ TEGRA_PIN_FUNCTION(extperiph4),
+ TEGRA_PIN_FUNCTION(spi4),
+ TEGRA_PIN_FUNCTION(i2s2),
+ TEGRA_PIN_FUNCTION(uartd),
+ TEGRA_PIN_FUNCTION(i2c1),
+ TEGRA_PIN_FUNCTION(uarta),
+ TEGRA_PIN_FUNCTION(directdc1),
+ TEGRA_PIN_FUNCTION(directdc),
+ TEGRA_PIN_FUNCTION(iqc0),
+ TEGRA_PIN_FUNCTION(iqc1),
+ TEGRA_PIN_FUNCTION(i2s6),
+ TEGRA_PIN_FUNCTION(dtv),
+ TEGRA_PIN_FUNCTION(uartf),
+ TEGRA_PIN_FUNCTION(sdmmc3),
+ TEGRA_PIN_FUNCTION(sdmmc4),
+ TEGRA_PIN_FUNCTION(sdmmc1),
+ TEGRA_PIN_FUNCTION(dp),
+ TEGRA_PIN_FUNCTION(hdmi),
+ TEGRA_PIN_FUNCTION(pe2),
+ TEGRA_PIN_FUNCTION(sata),
+ TEGRA_PIN_FUNCTION(pe),
+ TEGRA_PIN_FUNCTION(pe1),
+ TEGRA_PIN_FUNCTION(pe0),
+ TEGRA_PIN_FUNCTION(soc),
+ TEGRA_PIN_FUNCTION(eqos),
+ TEGRA_PIN_FUNCTION(sdmmc2),
+ TEGRA_PIN_FUNCTION(qspi),
+ TEGRA_PIN_FUNCTION(sce),
+ TEGRA_PIN_FUNCTION(i2c5),
+ TEGRA_PIN_FUNCTION(displaya),
+ TEGRA_PIN_FUNCTION(displayb),
+ TEGRA_PIN_FUNCTION(dcc),
+ TEGRA_PIN_FUNCTION(dcb),
+ TEGRA_PIN_FUNCTION(spi1),
+ TEGRA_PIN_FUNCTION(uartb),
+ TEGRA_PIN_FUNCTION(uarte),
+ TEGRA_PIN_FUNCTION(spi3),
+ TEGRA_PIN_FUNCTION(nv),
+ TEGRA_PIN_FUNCTION(ccla),
+ TEGRA_PIN_FUNCTION(i2c7),
+ TEGRA_PIN_FUNCTION(i2c9),
+ TEGRA_PIN_FUNCTION(i2s5),
+ TEGRA_PIN_FUNCTION(usb),
+ TEGRA_PIN_FUNCTION(ufs0),
+};
+
+#define PINGROUP_REG_Y(r) ((r))
+#define PINGROUP_REG_N(r) -1
+
+#define DRV_PINGROUP_Y(r) ((r))
+#define DRV_PINGROUP_N(r) -1
+
+#define DRV_PINGROUP_ENTRY_N(pg_name) \
+ .drv_reg = -1, \
+ .drv_bank = -1, \
+ .drvdn_bit = -1, \
+ .drvdn_width = -1, \
+ .drvup_bit = -1, \
+ .drvup_width = -1, \
+ .slwr_bit = -1, \
+ .slwr_width = -1, \
+ .slwf_bit = -1, \
+ .slwf_width = -1
+
+#define DRV_PINGROUP_ENTRY_Y(r, drvdn_b, drvdn_w, drvup_b, \
+ drvup_w, slwr_b, slwr_w, slwf_b, \
+ slwf_w, bank) \
+ .drv_reg = ((r)), \
+ .drv_bank = bank, \
+ .drvdn_bit = drvdn_b, \
+ .drvdn_width = drvdn_w, \
+ .drvup_bit = drvup_b, \
+ .drvup_width = drvup_w, \
+ .slwr_bit = slwr_b, \
+ .slwr_width = slwr_w, \
+ .slwf_bit = slwf_b, \
+ .slwf_width = slwf_w
+
+#define PIN_PINGROUP_ENTRY_N(pg_name) \
+ .mux_reg = -1, \
+ .pupd_reg = -1, \
+ .tri_reg = -1, \
+ .einput_bit = -1, \
+ .e_io_hv_bit = -1, \
+ .odrain_bit = -1, \
+ .lock_bit = -1, \
+ .parked_bit = -1, \
+ .lpmd_bit = -1, \
+ .drvtype_bit = -1, \
+ .lpdr_bit = -1, \
+ .pbias_buf_bit = -1, \
+ .preemp_bit = -1, \
+ .rfu_in_bit = -1
+
+#define PIN_PINGROUP_ENTRY_Y(r, bank, pupd, e_io_hv, e_lpbk, e_input, \
+ e_lpdr, e_pbias_buf, gpio_sfio_sel, \
+ e_od, schmitt_b, drvtype, epreemp, \
+ io_reset, rfu_in) \
+ .mux_reg = PINGROUP_REG_Y(r), \
+ .lpmd_bit = -1, \
+ .lock_bit = -1, \
+ .hsm_bit = -1, \
+ .mux_bank = bank, \
+ .mux_bit = 0, \
+ .pupd_reg = PINGROUP_REG_##pupd(r), \
+ .pupd_bank = bank, \
+ .pupd_bit = 2, \
+ .tri_reg = PINGROUP_REG_Y(r), \
+ .tri_bank = bank, \
+ .tri_bit = 4, \
+ .einput_bit = e_input, \
+ .sfsel_bit = gpio_sfio_sel, \
+ .odrain_bit = e_od, \
+ .schmitt_bit = schmitt_b, \
+ .drvtype_bit = 13, \
+ .lpdr_bit = e_lpdr, \
+
+/* main drive pin groups */
+#define drive_gpio_aud3_pk0 DRV_PINGROUP_ENTRY_Y(0x1004, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gpio_aud2_pj7 DRV_PINGROUP_ENTRY_Y(0x100c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gpio_aud1_pj6 DRV_PINGROUP_ENTRY_Y(0x1014, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gpio_aud0_pj5 DRV_PINGROUP_ENTRY_Y(0x101c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_aud_mclk_pj4 DRV_PINGROUP_ENTRY_Y(0x1024, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_dap1_fs_pj3 DRV_PINGROUP_ENTRY_Y(0x102c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_dap1_din_pj2 DRV_PINGROUP_ENTRY_Y(0x1034, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_dap1_dout_pj1 DRV_PINGROUP_ENTRY_Y(0x103c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_dap1_sclk_pj0 DRV_PINGROUP_ENTRY_Y(0x1044, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_dmic1_clk_pm1 DRV_PINGROUP_ENTRY_Y(0x2004, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_dmic1_dat_pm0 DRV_PINGROUP_ENTRY_Y(0x200c, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_dmic2_dat_pm2 DRV_PINGROUP_ENTRY_Y(0x2014, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_dmic2_clk_pm3 DRV_PINGROUP_ENTRY_Y(0x201c, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_dmic4_dat_pm4 DRV_PINGROUP_ENTRY_Y(0x2024, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_dmic4_clk_pm5 DRV_PINGROUP_ENTRY_Y(0x202c, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_dap4_fs_pcc3 DRV_PINGROUP_ENTRY_Y(0x2034, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_dap4_din_pcc2 DRV_PINGROUP_ENTRY_Y(0x203c, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_dap4_dout_pcc1 DRV_PINGROUP_ENTRY_Y(0x2044, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_dap4_sclk_pcc0 DRV_PINGROUP_ENTRY_Y(0x204c, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_extperiph2_clk_po1 DRV_PINGROUP_ENTRY_Y(0x0004, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_extperiph1_clk_po0 DRV_PINGROUP_ENTRY_Y(0x000c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_cam_i2c_sda_po3 DRV_PINGROUP_ENTRY_Y(0x0014, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_cam_i2c_scl_po2 DRV_PINGROUP_ENTRY_Y(0x001c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gpio_cam1_pn0 DRV_PINGROUP_ENTRY_Y(0x0024, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gpio_cam2_pn1 DRV_PINGROUP_ENTRY_Y(0x002c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gpio_cam3_pn2 DRV_PINGROUP_ENTRY_Y(0x0034, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gpio_cam4_pn3 DRV_PINGROUP_ENTRY_Y(0x003c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gpio_cam5_pn4 DRV_PINGROUP_ENTRY_Y(0x0044, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gpio_cam6_pn5 DRV_PINGROUP_ENTRY_Y(0x004c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gpio_cam7_pn6 DRV_PINGROUP_ENTRY_Y(0x0054, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_dap2_din_pc3 DRV_PINGROUP_ENTRY_Y(0x4004, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_dap2_dout_pc2 DRV_PINGROUP_ENTRY_Y(0x400c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_dap2_fs_pc4 DRV_PINGROUP_ENTRY_Y(0x4014, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_dap2_sclk_pc1 DRV_PINGROUP_ENTRY_Y(0x401c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_uart4_cts_pb3 DRV_PINGROUP_ENTRY_Y(0x4024, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_uart4_rts_pb2 DRV_PINGROUP_ENTRY_Y(0x402c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_uart4_rx_pb1 DRV_PINGROUP_ENTRY_Y(0x4034, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_uart4_tx_pb0 DRV_PINGROUP_ENTRY_Y(0x403c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gpio_wan4_pc0 DRV_PINGROUP_ENTRY_Y(0x4044, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gpio_wan3_pb6 DRV_PINGROUP_ENTRY_Y(0x404c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gpio_wan2_pb5 DRV_PINGROUP_ENTRY_Y(0x4054, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gpio_wan1_pb4 DRV_PINGROUP_ENTRY_Y(0x405c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gen1_i2c_scl_pc5 DRV_PINGROUP_ENTRY_Y(0x4064, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gen1_i2c_sda_pc6 DRV_PINGROUP_ENTRY_Y(0x406c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_uart1_cts_pt3 DRV_PINGROUP_ENTRY_Y(0x5004, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_uart1_rts_pt2 DRV_PINGROUP_ENTRY_Y(0x500c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_uart1_rx_pt1 DRV_PINGROUP_ENTRY_Y(0x5014, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_uart1_tx_pt0 DRV_PINGROUP_ENTRY_Y(0x501c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_directdc1_out3_pq5 DRV_PINGROUP_ENTRY_Y(0x502c, 12, 9, 24, 8, -1, -1, -1, -1, 0)
+#define drive_directdc1_out2_pq4 DRV_PINGROUP_ENTRY_Y(0x5034, 12, 9, 24, 8, -1, -1, -1, -1, 0)
+#define drive_directdc1_out1_pq3 DRV_PINGROUP_ENTRY_Y(0x503c, 12, 9, 24, 8, -1, -1, -1, -1, 0)
+#define drive_directdc1_out0_pq2 DRV_PINGROUP_ENTRY_Y(0x5044, 12, 9, 24, 8, -1, -1, -1, -1, 0)
+#define drive_directdc1_in_pq1 DRV_PINGROUP_ENTRY_Y(0x504c, 12, 9, 24, 8, -1, -1, -1, -1, 0)
+#define drive_directdc1_clk_pq0 DRV_PINGROUP_ENTRY_Y(0x5054, 12, 9, 24, 8, -1, -1, -1, -1, 0)
+#define drive_gpio_pq0_pi0 DRV_PINGROUP_ENTRY_Y(0x3004, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_gpio_pq1_pi1 DRV_PINGROUP_ENTRY_Y(0x300c, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_gpio_pq2_pi2 DRV_PINGROUP_ENTRY_Y(0x3014, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_gpio_pq3_pi3 DRV_PINGROUP_ENTRY_Y(0x301c, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_gpio_pq4_pi4 DRV_PINGROUP_ENTRY_Y(0x3024, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_gpio_pq5_pi5 DRV_PINGROUP_ENTRY_Y(0x302c, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_gpio_pq6_pi6 DRV_PINGROUP_ENTRY_Y(0x3034, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_gpio_pq7_pi7 DRV_PINGROUP_ENTRY_Y(0x303c, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_gpio_edp2_pp5 DRV_PINGROUP_ENTRY_Y(0x10004, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gpio_edp3_pp6 DRV_PINGROUP_ENTRY_Y(0x1000c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gpio_edp0_pp3 DRV_PINGROUP_ENTRY_Y(0x10014, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gpio_edp1_pp4 DRV_PINGROUP_ENTRY_Y(0x1001c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_dp_aux_ch0_hpd_pp0 DRV_PINGROUP_ENTRY_Y(0x10024, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_dp_aux_ch1_hpd_pp1 DRV_PINGROUP_ENTRY_Y(0x1002c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_hdmi_cec_pp2 DRV_PINGROUP_ENTRY_Y(0x10034, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_pex_l2_clkreq_n_pa6 DRV_PINGROUP_ENTRY_Y(0x7004, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_pex_wake_n_pa2 DRV_PINGROUP_ENTRY_Y(0x700c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_pex_l1_clkreq_n_pa4 DRV_PINGROUP_ENTRY_Y(0x7014, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_pex_l1_rst_n_pa3 DRV_PINGROUP_ENTRY_Y(0x701c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_pex_l0_clkreq_n_pa1 DRV_PINGROUP_ENTRY_Y(0x7024, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_pex_l0_rst_n_pa0 DRV_PINGROUP_ENTRY_Y(0x702c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_pex_l2_rst_n_pa5 DRV_PINGROUP_ENTRY_Y(0x7034, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_sdmmc1_clk_pd0 DRV_PINGROUP_ENTRY_Y(0x8004, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_sdmmc1_cmd_pd1 DRV_PINGROUP_ENTRY_Y(0x800c, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_sdmmc1_dat3_pd5 DRV_PINGROUP_ENTRY_Y(0x8018, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_sdmmc1_dat2_pd4 DRV_PINGROUP_ENTRY_Y(0x8020, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_sdmmc1_dat1_pd3 DRV_PINGROUP_ENTRY_Y(0x8028, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_sdmmc1_dat0_pd2 DRV_PINGROUP_ENTRY_Y(0x8030, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_eqos_td3_pe4 DRV_PINGROUP_ENTRY_Y(0x9004, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_eqos_td2_pe3 DRV_PINGROUP_ENTRY_Y(0x900c, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_eqos_td1_pe2 DRV_PINGROUP_ENTRY_Y(0x9014, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_eqos_td0_pe1 DRV_PINGROUP_ENTRY_Y(0x901c, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_eqos_rd3_pf1 DRV_PINGROUP_ENTRY_Y(0x9024, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_eqos_rd2_pf0 DRV_PINGROUP_ENTRY_Y(0x902c, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_eqos_rd1_pe7 DRV_PINGROUP_ENTRY_Y(0x9034, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_eqos_mdio_pf4 DRV_PINGROUP_ENTRY_Y(0x903c, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_eqos_rd0_pe6 DRV_PINGROUP_ENTRY_Y(0x9044, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_eqos_mdc_pf5 DRV_PINGROUP_ENTRY_Y(0x904c, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_eqos_txc_pe0 DRV_PINGROUP_ENTRY_Y(0x9058, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_eqos_rxc_pf3 DRV_PINGROUP_ENTRY_Y(0x9060, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_eqos_tx_ctl_pe5 DRV_PINGROUP_ENTRY_Y(0x9068, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_eqos_rx_ctl_pf2 DRV_PINGROUP_ENTRY_Y(0x9070, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_sdmmc3_dat3_pg5 DRV_PINGROUP_ENTRY_Y(0xa004, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_sdmmc3_dat2_pg4 DRV_PINGROUP_ENTRY_Y(0xa00c, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_sdmmc3_dat1_pg3 DRV_PINGROUP_ENTRY_Y(0xa014, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_sdmmc3_dat0_pg2 DRV_PINGROUP_ENTRY_Y(0xa01c, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_sdmmc3_cmd_pg1 DRV_PINGROUP_ENTRY_Y(0xa028, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_sdmmc3_clk_pg0 DRV_PINGROUP_ENTRY_Y(0xa030, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_qspi_io3_pr4 DRV_PINGROUP_ENTRY_Y(0xB004, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_qspi_io2_pr3 DRV_PINGROUP_ENTRY_Y(0xB00C, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_qspi_io1_pr2 DRV_PINGROUP_ENTRY_Y(0xB014, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_qspi_io0_pr1 DRV_PINGROUP_ENTRY_Y(0xB01C, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_qspi_sck_pr0 DRV_PINGROUP_ENTRY_Y(0xB024, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_qspi_cs_n_pr5 DRV_PINGROUP_ENTRY_Y(0xB02C, -1, -1, -1, -1, 28, 2, 30, 2, 0)
+#define drive_gpio_wan8_ph3 DRV_PINGROUP_ENTRY_Y(0xd004, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gpio_wan7_ph2 DRV_PINGROUP_ENTRY_Y(0xd00c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gpio_wan6_ph1 DRV_PINGROUP_ENTRY_Y(0xd014, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gpio_wan5_ph0 DRV_PINGROUP_ENTRY_Y(0xd01c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_uart2_tx_px0 DRV_PINGROUP_ENTRY_Y(0xd024, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_uart2_rx_px1 DRV_PINGROUP_ENTRY_Y(0xd02c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_uart2_rts_px2 DRV_PINGROUP_ENTRY_Y(0xd034, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_uart2_cts_px3 DRV_PINGROUP_ENTRY_Y(0xd03c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_uart5_rx_px5 DRV_PINGROUP_ENTRY_Y(0xd044, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_uart5_tx_px4 DRV_PINGROUP_ENTRY_Y(0xd04c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_uart5_rts_px6 DRV_PINGROUP_ENTRY_Y(0xd054, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_uart5_cts_px7 DRV_PINGROUP_ENTRY_Y(0xd05c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gpio_mdm1_py0 DRV_PINGROUP_ENTRY_Y(0xd064, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gpio_mdm2_py1 DRV_PINGROUP_ENTRY_Y(0xd06c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gpio_mdm3_py2 DRV_PINGROUP_ENTRY_Y(0xd074, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gpio_mdm4_py3 DRV_PINGROUP_ENTRY_Y(0xd07c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gpio_mdm5_py4 DRV_PINGROUP_ENTRY_Y(0xd084, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gpio_mdm6_py5 DRV_PINGROUP_ENTRY_Y(0xd08c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gpio_mdm7_py6 DRV_PINGROUP_ENTRY_Y(0xd094, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_bcpu_pwr_req_ph4 DRV_PINGROUP_ENTRY_Y(0xd09c, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_mcpu_pwr_req_ph5 DRV_PINGROUP_ENTRY_Y(0xd0a4, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gpu_pwr_req_ph6 DRV_PINGROUP_ENTRY_Y(0xd0ac, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gen7_i2c_scl_pl0 DRV_PINGROUP_ENTRY_Y(0xd0b4, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gen7_i2c_sda_pl1 DRV_PINGROUP_ENTRY_Y(0xd0bc, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gen9_i2c_sda_pl3 DRV_PINGROUP_ENTRY_Y(0xd0c4, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gen9_i2c_scl_pl2 DRV_PINGROUP_ENTRY_Y(0xd0cc, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_usb_vbus_en0_pl4 DRV_PINGROUP_ENTRY_Y(0xd0d4, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_usb_vbus_en1_pl5 DRV_PINGROUP_ENTRY_Y(0xd0dc, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gp_pwm7_pl7 DRV_PINGROUP_ENTRY_Y(0xd0e4, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_gp_pwm6_pl6 DRV_PINGROUP_ENTRY_Y(0xd0ec, 12, 5, 20, 5, -1, -1, -1, -1, 0)
+#define drive_ufs0_rst_pbb1 DRV_PINGROUP_ENTRY_Y(0x11004, 12, 9, 24, 8, -1, -1, -1, -1, 0)
+#define drive_ufs0_ref_clk_pbb0 DRV_PINGROUP_ENTRY_Y(0x1100c, 12, 9, 24, 8, -1, -1, -1, -1, 0)
+
+#define drive_directdc_comp DRV_PINGROUP_ENTRY_N(no_entry)
+#define drive_sdmmc1_comp DRV_PINGROUP_ENTRY_N(no_entry)
+#define drive_eqos_comp DRV_PINGROUP_ENTRY_N(no_entry)
+#define drive_sdmmc3_comp DRV_PINGROUP_ENTRY_N(no_entry)
+#define drive_sdmmc4_clk DRV_PINGROUP_ENTRY_N(no_entry)
+#define drive_sdmmc4_cmd DRV_PINGROUP_ENTRY_N(no_entry)
+#define drive_sdmmc4_dqs DRV_PINGROUP_ENTRY_N(no_entry)
+#define drive_sdmmc4_dat7 DRV_PINGROUP_ENTRY_N(no_entry)
+#define drive_sdmmc4_dat6 DRV_PINGROUP_ENTRY_N(no_entry)
+#define drive_sdmmc4_dat5 DRV_PINGROUP_ENTRY_N(no_entry)
+#define drive_sdmmc4_dat4 DRV_PINGROUP_ENTRY_N(no_entry)
+#define drive_sdmmc4_dat3 DRV_PINGROUP_ENTRY_N(no_entry)
+#define drive_sdmmc4_dat2 DRV_PINGROUP_ENTRY_N(no_entry)
+#define drive_sdmmc4_dat1 DRV_PINGROUP_ENTRY_N(no_entry)
+#define drive_sdmmc4_dat0 DRV_PINGROUP_ENTRY_N(no_entry)
+#define drive_qspi_comp DRV_PINGROUP_ENTRY_N(no_entry)
+
+/* AON drive pin groups */
+#define drive_touch_clk_pee2 DRV_PINGROUP_ENTRY_Y(0x2004, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_uart3_cts_pw5 DRV_PINGROUP_ENTRY_Y(0x200c, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_uart3_rts_pw4 DRV_PINGROUP_ENTRY_Y(0x2014, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_uart3_rx_pw3 DRV_PINGROUP_ENTRY_Y(0x201c, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_uart3_tx_pw2 DRV_PINGROUP_ENTRY_Y(0x2024, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_gen8_i2c_sda_pw1 DRV_PINGROUP_ENTRY_Y(0x202c, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_gen8_i2c_scl_pw0 DRV_PINGROUP_ENTRY_Y(0x2034, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_uart7_rx_pw7 DRV_PINGROUP_ENTRY_Y(0x203c, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_uart7_tx_pw6 DRV_PINGROUP_ENTRY_Y(0x2044, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_gpio_sen0_pv0 DRV_PINGROUP_ENTRY_Y(0x204c, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_gpio_sen1_pv1 DRV_PINGROUP_ENTRY_Y(0x2054, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_gpio_sen2_pv2 DRV_PINGROUP_ENTRY_Y(0x205c, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_gpio_sen3_pv3 DRV_PINGROUP_ENTRY_Y(0x2064, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_gpio_sen4_pv4 DRV_PINGROUP_ENTRY_Y(0x206c, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_gpio_sen5_pv5 DRV_PINGROUP_ENTRY_Y(0x2074, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_gpio_sen6_pv6 DRV_PINGROUP_ENTRY_Y(0x207c, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_gpio_sen7_pv7 DRV_PINGROUP_ENTRY_Y(0x2084, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_gpio_sen8_pee0 DRV_PINGROUP_ENTRY_Y(0x208c, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_gpio_sen9_pee1 DRV_PINGROUP_ENTRY_Y(0x2094, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_can_gpio7_paa7 DRV_PINGROUP_ENTRY_Y(0x3004, -1, -1, -1, -1, 28, 2, 30, 2, 1)
+#define drive_can1_dout_pz0 DRV_PINGROUP_ENTRY_Y(0x300C, -1, -1, -1, -1, 28, 2, 30, 2, 1)
+#define drive_can1_din_pz1 DRV_PINGROUP_ENTRY_Y(0x3014, -1, -1, -1, -1, 28, 2, 30, 2, 1)
+#define drive_can0_dout_pz2 DRV_PINGROUP_ENTRY_Y(0x301c, -1, -1, -1, -1, 28, 2, 30, 2, 1)
+#define drive_can0_din_pz3 DRV_PINGROUP_ENTRY_Y(0x3024, -1, -1, -1, -1, 28, 2, 30, 2, 1)
+#define drive_can_gpio0_paa0 DRV_PINGROUP_ENTRY_Y(0x302c, -1, -1, -1, -1, 28, 2, 30, 2, 1)
+#define drive_can_gpio1_paa1 DRV_PINGROUP_ENTRY_Y(0x3034, -1, -1, -1, -1, 28, 2, 30, 2, 1)
+#define drive_can_gpio2_paa2 DRV_PINGROUP_ENTRY_Y(0x303c, -1, -1, -1, -1, 28, 2, 30, 2, 1)
+#define drive_can_gpio3_paa3 DRV_PINGROUP_ENTRY_Y(0x3044, -1, -1, -1, -1, 28, 2, 30, 2, 1)
+#define drive_can_gpio4_paa4 DRV_PINGROUP_ENTRY_Y(0x304c, -1, -1, -1, -1, 28, 2, 30, 2, 1)
+#define drive_can_gpio5_paa5 DRV_PINGROUP_ENTRY_Y(0x3054, -1, -1, -1, -1, 28, 2, 30, 2, 1)
+#define drive_can_gpio6_paa6 DRV_PINGROUP_ENTRY_Y(0x305c, -1, -1, -1, -1, 28, 2, 30, 2, 1)
+#define drive_gpio_sw1_pff1 DRV_PINGROUP_ENTRY_Y(0x1004, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_gpio_sw2_pff2 DRV_PINGROUP_ENTRY_Y(0x100c, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_gpio_sw3_pff3 DRV_PINGROUP_ENTRY_Y(0x1014, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_gpio_sw4_pff4 DRV_PINGROUP_ENTRY_Y(0x101c, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_shutdown DRV_PINGROUP_ENTRY_Y(0x1024, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_pmu_int DRV_PINGROUP_ENTRY_Y(0x102C, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_safe_state_ps3 DRV_PINGROUP_ENTRY_Y(0x1034, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_vcomp_alert_ps4 DRV_PINGROUP_ENTRY_Y(0x103c, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_soc_pwr_req DRV_PINGROUP_ENTRY_Y(0x1044, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_batt_oc_ps2 DRV_PINGROUP_ENTRY_Y(0x104c, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_clk_32k_in DRV_PINGROUP_ENTRY_Y(0x1054, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_power_on_pff0 DRV_PINGROUP_ENTRY_Y(0x105c, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_pwr_i2c_scl_ps0 DRV_PINGROUP_ENTRY_Y(0x1064, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_pwr_i2c_sda_ps1 DRV_PINGROUP_ENTRY_Y(0x106c, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_gpio_dis0_pu0 DRV_PINGROUP_ENTRY_Y(0x1084, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_gpio_dis1_pu1 DRV_PINGROUP_ENTRY_Y(0x108c, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_gpio_dis2_pu2 DRV_PINGROUP_ENTRY_Y(0x1094, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_gpio_dis3_pu3 DRV_PINGROUP_ENTRY_Y(0x109c, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_gpio_dis4_pu4 DRV_PINGROUP_ENTRY_Y(0x10a4, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+#define drive_gpio_dis5_pu5 DRV_PINGROUP_ENTRY_Y(0x10ac, 12, 5, 20, 5, -1, -1, -1, -1, 1)
+
+#define PINGROUP(pg_name, f0, f1, f2, f3, r, bank, pupd, e_io_hv, e_lpbk, e_input, e_lpdr, e_pbias_buf, \
+ gpio_sfio_sel, e_od, schmitt_b, drvtype, epreemp, io_reset, rfu_in) \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = ARRAY_SIZE(pg_name##_pins), \
+ .funcs = { \
+ TEGRA_MUX_##f0, \
+ TEGRA_MUX_##f1, \
+ TEGRA_MUX_##f2, \
+ TEGRA_MUX_##f3, \
+ }, \
+ PIN_PINGROUP_ENTRY_Y(r, bank, pupd, e_io_hv, e_lpbk, \
+ e_input, e_lpdr, e_pbias_buf, \
+ gpio_sfio_sel, e_od, \
+ schmitt_b, drvtype, \
+ epreemp, io_reset, \
+ rfu_in) \
+ drive_##pg_name, \
+ }
+
+static const struct tegra_pingroup tegra186_groups[] = {
+ PINGROUP(gpio_aud3_pk0, RSVD0, DSPK1, SPDIF, RSVD3, 0x1000, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_aud2_pj7, RSVD0, DSPK1, SPDIF, RSVD3, 0x1008, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_aud1_pj6, RSVD0, RSVD1, RSVD2, RSVD3, 0x1010, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_aud0_pj5, RSVD0, RSVD1, RSVD2, RSVD3, 0x1018, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(aud_mclk_pj4, AUD, RSVD1, RSVD2, RSVD3, 0x1020, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(dap1_fs_pj3, I2S1, RSVD1, RSVD2, RSVD3, 0x1028, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(dap1_din_pj2, I2S1, RSVD1, RSVD2, RSVD3, 0x1030, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(dap1_dout_pj1, I2S1, RSVD1, RSVD2, RSVD3, 0x1038, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(dap1_sclk_pj0, I2S1, RSVD1, RSVD2, RSVD3, 0x1040, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(dmic1_clk_pm1, DMIC1, I2S3, RSVD2, RSVD3, 0x2000, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(dmic1_dat_pm0, DMIC1, I2S3, RSVD2, RSVD3, 0x2008, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(dmic2_dat_pm2, DMIC2, I2S3, RSVD2, RSVD3, 0x2010, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(dmic2_clk_pm3, DMIC2, I2S3, RSVD2, RSVD3, 0x2018, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(dmic4_dat_pm4, DMIC4, DSPK0, RSVD2, RSVD3, 0x2020, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(dmic4_clk_pm5, DMIC4, DSPK0, RSVD2, RSVD3, 0x2028, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(dap4_fs_pcc3, I2S4, RSVD1, RSVD2, RSVD3, 0x2030, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(dap4_din_pcc2, I2S4, RSVD1, RSVD2, RSVD3, 0x2038, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(dap4_dout_pcc1, I2S4, RSVD1, RSVD2, RSVD3, 0x2040, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(dap4_sclk_pcc0, I2S4, RSVD1, RSVD2, RSVD3, 0x2048, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(extperiph2_clk_po1, EXTPERIPH2, RSVD1, RSVD2, RSVD3, 0x0000, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(extperiph1_clk_po0, EXTPERIPH1, RSVD1, RSVD2, RSVD3, 0x0008, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(cam_i2c_sda_po3, I2C3, RSVD1, RSVD2, RSVD3, 0x0010, 0, Y, 5, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(cam_i2c_scl_po2, I2C3, RSVD1, RSVD2, RSVD3, 0x0018, 0, Y, 5, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_cam1_pn0, VGP1, RSVD1, RSVD2, RSVD3, 0x0020, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_cam2_pn1, VGP2, EXTPERIPH3, RSVD2, RSVD3, 0x0028, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_cam3_pn2, VGP3, EXTPERIPH4, RSVD2, RSVD3, 0x0030, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_cam4_pn3, VGP4, SPI4, RSVD2, RSVD3, 0x0038, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_cam5_pn4, VGP5, SPI4, RSVD2, RSVD3, 0x0040, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_cam6_pn5, VGP6, SPI4, RSVD2, RSVD3, 0x0048, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_cam7_pn6, RSVD0, SPI4, RSVD2, RSVD3, 0x0050, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(dap2_din_pc3, I2S2, RSVD1, RSVD2, RSVD3, 0x4000, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(dap2_dout_pc2, I2S2, RSVD1, RSVD2, RSVD3, 0x4008, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(dap2_fs_pc4, I2S2, RSVD1, RSVD2, RSVD3, 0x4010, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(dap2_sclk_pc1, I2S2, RSVD1, RSVD2, RSVD3, 0x4018, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(uart4_cts_pb3, UARTD, RSVD1, RSVD2, RSVD3, 0x4020, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(uart4_rts_pb2, UARTD, RSVD1, RSVD2, RSVD3, 0x4028, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(uart4_rx_pb1, UARTD, RSVD1, RSVD2, RSVD3, 0x4030, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(uart4_tx_pb0, UARTD, RSVD1, RSVD2, RSVD3, 0x4038, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_wan4_pc0, RSVD0, RSVD1, RSVD2, RSVD3, 0x4040, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_wan3_pb6, RSVD0, RSVD1, RSVD2, RSVD3, 0x4048, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_wan2_pb5, RSVD0, RSVD1, RSVD2, RSVD3, 0x4050, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_wan1_pb4, RSVD0, RSVD1, RSVD2, RSVD3, 0x4058, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gen1_i2c_scl_pc5, I2C1, RSVD1, RSVD2, RSVD3, 0x4060, 0, Y, 5, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gen1_i2c_sda_pc6, I2C1, RSVD1, RSVD2, RSVD3, 0x4068, 0, Y, 5, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(uart1_cts_pt3, UARTA, RSVD1, RSVD2, RSVD3, 0x5000, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(uart1_rts_pt2, UARTA, RSVD1, RSVD2, RSVD3, 0x5008, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(uart1_rx_pt1, UARTA, RSVD1, RSVD2, RSVD3, 0x5010, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(uart1_tx_pt0, UARTA, RSVD1, RSVD2, RSVD3, 0x5018, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(directdc1_out3_pq5, DIRECTDC1, RSVD1, RSVD2, RSVD3, 0x5028, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, 15, 17, Y),
+ PINGROUP(directdc1_out2_pq4, DIRECTDC1, RSVD1, RSVD2, RSVD3, 0x5030, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, 15, 17, Y),
+ PINGROUP(directdc1_out1_pq3, DIRECTDC1, RSVD1, RSVD2, RSVD3, 0x5038, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, 15, 17, Y),
+ PINGROUP(directdc1_out0_pq2, DIRECTDC1, RSVD1, RSVD2, RSVD3, 0x5040, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, 15, 17, Y),
+ PINGROUP(directdc1_in_pq1, DIRECTDC1, RSVD1, RSVD2, RSVD3, 0x5048, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, 15, 17, Y),
+ PINGROUP(directdc1_clk_pq0, DIRECTDC1, RSVD1, RSVD2, RSVD3, 0x5050, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, 15, 17, Y),
+ PINGROUP(directdc_comp, DIRECTDC, RSVD1, RSVD2, RSVD3, 0x5058, 0, Y, -1, -1, -1, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(gpio_pq0_pi0, RSVD0, IQC0, I2S6, RSVD3, 0x3000, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(gpio_pq1_pi1, RSVD0, IQC0, I2S6, RSVD3, 0x3008, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(gpio_pq2_pi2, RSVD0, IQC0, I2S6, RSVD3, 0x3010, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(gpio_pq3_pi3, RSVD0, IQC0, I2S6, RSVD3, 0x3018, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(gpio_pq4_pi4, RSVD0, IQC1, DTV, RSVD3, 0x3020, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(gpio_pq5_pi5, RSVD0, IQC1, DTV, RSVD3, 0x3028, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(gpio_pq6_pi6, RSVD0, IQC1, DTV, RSVD3, 0x3030, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(gpio_pq7_pi7, RSVD0, IQC1, DTV, RSVD3, 0x3038, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(gpio_edp2_pp5, RSVD0, UARTF, SDMMC3, RSVD3, 0x10000, 0, Y, 5, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_edp3_pp6, RSVD0, UARTF, SDMMC1, RSVD3, 0x10008, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_edp0_pp3, RSVD0, UARTF, SDMMC3, RSVD3, 0x10010, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_edp1_pp4, RSVD0, UARTF, SDMMC1, RSVD3, 0x10018, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(dp_aux_ch0_hpd_pp0, DP, RSVD1, RSVD2, RSVD3, 0x10020, 0, Y, 5, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(dp_aux_ch1_hpd_pp1, DP, RSVD1, RSVD2, RSVD3, 0x10028, 0, Y, 5, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(hdmi_cec_pp2, HDMI, RSVD1, RSVD2, RSVD3, 0x10030, 0, Y, 5, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(pex_l2_clkreq_n_pa6, PE2, GP, SATA, RSVD3, 0x7000, 0, Y, 5, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(pex_wake_n_pa2, PE, RSVD1, RSVD2, RSVD3, 0x7008, 0, Y, 5, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(pex_l1_clkreq_n_pa4, PE1, RSVD1, RSVD2, RSVD3, 0x7010, 0, Y, 5, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(pex_l1_rst_n_pa3, PE1, RSVD1, RSVD2, RSVD3, 0x7018, 0, Y, 5, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(pex_l0_clkreq_n_pa1, PE0, RSVD1, RSVD2, RSVD3, 0x7020, 0, Y, 5, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(pex_l0_rst_n_pa0, PE0, RSVD1, RSVD2, RSVD3, 0x7028, 0, Y, 5, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(pex_l2_rst_n_pa5, PE2, SOC, SATA, RSVD3, 0x7030, 0, Y, 5, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(sdmmc1_clk_pd0, SDMMC1, RSVD1, RSVD2, RSVD3, 0x8000, 0, Y, 5, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(sdmmc1_cmd_pd1, SDMMC1, RSVD1, RSVD2, RSVD3, 0x8008, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(sdmmc1_comp, SDMMC1, RSVD1, RSVD2, RSVD3, 0x8010, 0, Y, -1, -1, 6, -1, -1, -1, -1, -1, N, -1, -1, N),
+ PINGROUP(sdmmc1_dat3_pd5, SDMMC1, RSVD1, RSVD2, RSVD3, 0x8014, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(sdmmc1_dat2_pd4, SDMMC1, RSVD1, RSVD2, RSVD3, 0x801c, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(sdmmc1_dat1_pd3, SDMMC1, RSVD1, RSVD2, RSVD3, 0x8024, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(sdmmc1_dat0_pd2, SDMMC1, RSVD1, RSVD2, RSVD3, 0x802c, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(eqos_td3_pe4, EQOS, SDMMC2, RSVD2, RSVD3, 0x9000, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(eqos_td2_pe3, EQOS, SDMMC2, RSVD2, RSVD3, 0x9008, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(eqos_td1_pe2, EQOS, SDMMC2, RSVD2, RSVD3, 0x9010, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(eqos_td0_pe1, EQOS, SDMMC2, RSVD2, RSVD3, 0x9018, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(eqos_rd3_pf1, EQOS, SDMMC2, RSVD2, RSVD3, 0x9020, 0, Y, -1, 5, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(eqos_rd2_pf0, EQOS, SDMMC2, RSVD2, RSVD3, 0x9028, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(eqos_rd1_pe7, EQOS, SDMMC2, RSVD2, RSVD3, 0x9030, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(eqos_mdio_pf4, EQOS, SOC, RSVD2, RSVD3, 0x9038, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(eqos_rd0_pe6, EQOS, SDMMC2, RSVD2, RSVD3, 0x9040, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(eqos_mdc_pf5, EQOS, RSVD1, RSVD2, RSVD3, 0x9048, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(eqos_comp, EQOS, SDMMC2, RSVD2, RSVD3, 0x9050, 0, Y, -1, -1, -1, -1, -1, -1, -1, -1, N, -1, -1, N),
+ PINGROUP(eqos_txc_pe0, EQOS, SDMMC2, RSVD2, RSVD3, 0x9054, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(eqos_rxc_pf3, EQOS, SDMMC2, RSVD2, RSVD3, 0x905c, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(eqos_tx_ctl_pe5, EQOS, SDMMC2, RSVD2, RSVD3, 0x9064, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(eqos_rx_ctl_pf2, EQOS, SDMMC2, RSVD2, RSVD3, 0x906c, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(sdmmc3_dat3_pg5, SDMMC3, RSVD1, RSVD2, RSVD3, 0xa000, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(sdmmc3_dat2_pg4, SDMMC3, RSVD1, RSVD2, RSVD3, 0xa008, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(sdmmc3_dat1_pg3, SDMMC3, RSVD1, RSVD2, RSVD3, 0xa010, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(sdmmc3_dat0_pg2, SDMMC3, RSVD1, RSVD2, RSVD3, 0xa018, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(sdmmc3_comp, SDMMC3, RSVD1, RSVD2, RSVD3, 0xa020, 0, Y, -1, -1, -1, -1, -1, -1, -1, -1, N, -1, -1, N),
+ PINGROUP(sdmmc3_cmd_pg1, SDMMC3, RSVD1, RSVD2, RSVD3, 0xa024, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(sdmmc3_clk_pg0, SDMMC3, RSVD1, RSVD1, RSVD3, 0xa02c, 0, Y, -1, 5, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(sdmmc4_clk, SDMMC4, RSVD1, RSVD2, RSVD3, 0x6004, 0, Y, -1, 5, 6, -1, 9, -1, -1, 12, Y, -1, -1, Y),
+ PINGROUP(sdmmc4_cmd, SDMMC4, RSVD1, RSVD2, RSVD3, 0x6008, 0, Y, -1, -1, 6, -1, 9, -1, -1, 12, Y, -1, -1, Y),
+ PINGROUP(sdmmc4_dqs, SDMMC4, RSVD1, RSVD2, RSVD3, 0x600c, 0, Y, -1, -1, 6, -1, 9, -1, -1, 12, Y, -1, -1, Y),
+ PINGROUP(sdmmc4_dat7, SDMMC4, RSVD1, RSVD2, RSVD3, 0x6010, 0, Y, -1, -1, 6, -1, 9, -1, -1, 12, Y, -1, -1, Y),
+ PINGROUP(sdmmc4_dat6, SDMMC4, RSVD1, RSVD2, RSVD3, 0x6014, 0, Y, -1, -1, 6, -1, 9, -1, -1, 12, Y, -1, -1, Y),
+ PINGROUP(sdmmc4_dat5, SDMMC4, RSVD1, RSVD2, RSVD3, 0x6018, 0, Y, -1, -1, 6, -1, 9, -1, -1, 12, Y, -1, -1, Y),
+ PINGROUP(sdmmc4_dat4, SDMMC4, RSVD1, RSVD2, RSVD3, 0x601c, 0, Y, -1, -1, 6, -1, 9, -1, -1, 12, Y, -1, -1, Y),
+ PINGROUP(sdmmc4_dat3, SDMMC4, RSVD1, RSVD2, RSVD3, 0x6020, 0, Y, -1, -1, 6, -1, 9, -1, -1, 12, Y, -1, -1, Y),
+ PINGROUP(sdmmc4_dat2, SDMMC4, RSVD1, RSVD2, RSVD3, 0x6024, 0, Y, -1, -1, 6, -1, 9, -1, -1, 12, Y, -1, -1, Y),
+ PINGROUP(sdmmc4_dat1, SDMMC4, RSVD1, RSVD2, RSVD3, 0x6028, 0, Y, -1, -1, 6, -1, 9, -1, -1, 12, Y, -1, -1, Y),
+ PINGROUP(sdmmc4_dat0, SDMMC4, RSVD1, RSVD2, RSVD3, 0x602c, 0, Y, -1, -1, 6, -1, 9, -1, -1, 12, Y, -1, -1, Y),
+ PINGROUP(qspi_io3_pr4, QSPI, RSVD1, RSVD2, RSVD3, 0xB000, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, 15, 17, Y),
+ PINGROUP(qspi_io2_pr3, QSPI, RSVD1, RSVD2, RSVD3, 0xB008, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, 15, 17, Y),
+ PINGROUP(qspi_io1_pr2, QSPI, RSVD1, RSVD2, RSVD3, 0xB010, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, 15, 17, Y),
+ PINGROUP(qspi_io0_pr1, QSPI, RSVD1, RSVD2, RSVD3, 0xB018, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, 15, 17, Y),
+ PINGROUP(qspi_sck_pr0, QSPI, RSVD1, RSVD2, RSVD3, 0xB020, 0, Y, -1, 5, 6, -1, 9, 10, -1, 12, Y, 15, 17, Y),
+ PINGROUP(qspi_cs_n_pr5, QSPI, RSVD1, RSVD2, RSVD3, 0xB028, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, 15, 17, Y),
+ PINGROUP(qspi_comp, QSPI, RSVD1, RSVD2, RSVD3, 0xB030, 0, Y, -1, -1, -1, -1, -1, -1, -1, -1, Y, -1, -1, Y),
+ PINGROUP(gpio_wan8_ph3, RSVD0, RSVD1, SPI1, RSVD3, 0xd000, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_wan7_ph2, RSVD0, RSVD1, SPI1, RSVD3, 0xd008, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_wan6_ph1, RSVD0, RSVD1, SPI1, RSVD3, 0xd010, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_wan5_ph0, RSVD0, RSVD1, SPI1, RSVD3, 0xd018, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(uart2_tx_px0, UARTB, RSVD1, RSVD2, RSVD3, 0xd020, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(uart2_rx_px1, UARTB, RSVD1, RSVD2, RSVD3, 0xd028, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(uart2_rts_px2, UARTB, RSVD1, RSVD2, RSVD3, 0xd030, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(uart2_cts_px3, UARTB, RSVD1, RSVD2, RSVD3, 0xd038, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(uart5_rx_px5, UARTE, SPI3, GP, RSVD3, 0xd040, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(uart5_tx_px4, UARTE, SPI3, NV, RSVD3, 0xd048, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(uart5_rts_px6, UARTE, SPI3, RSVD2, RSVD3, 0xd050, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(uart5_cts_px7, UARTE, SPI3, RSVD2, RSVD3, 0xd058, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_mdm1_py0, RSVD0, RSVD1, RSVD2, RSVD3, 0xd060, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_mdm2_py1, RSVD0, RSVD1, RSVD2, RSVD3, 0xd068, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_mdm3_py2, RSVD0, RSVD1, RSVD2, RSVD3, 0xd070, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_mdm4_py3, RSVD0, SPI1, CCLA, RSVD3, 0xd078, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_mdm5_py4, RSVD0, SPI1, RSVD2, RSVD3, 0xd080, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_mdm6_py5, SOC, RSVD1, RSVD2, RSVD3, 0xd088, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_mdm7_py6, RSVD0, RSVD1, RSVD2, RSVD3, 0xd090, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(bcpu_pwr_req_ph4, RSVD0, RSVD1, RSVD2, RSVD3, 0xd098, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(mcpu_pwr_req_ph5, RSVD0, RSVD1, RSVD2, RSVD3, 0xd0a0, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpu_pwr_req_ph6, RSVD0, RSVD1, RSVD2, RSVD3, 0xd0a8, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gen7_i2c_scl_pl0, I2C7, I2S5, RSVD2, RSVD3, 0xd0b0, 0, Y, 5, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gen7_i2c_sda_pl1, I2C7, I2S5, RSVD2, RSVD3, 0xd0b8, 0, Y, 5, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gen9_i2c_sda_pl3, I2C9, I2S5, RSVD2, RSVD3, 0xd0c0, 0, Y, 5, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gen9_i2c_scl_pl2, I2C9, I2S5, RSVD2, RSVD3, 0xd0c8, 0, Y, 5, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(usb_vbus_en0_pl4, USB, RSVD1, RSVD2, RSVD3, 0xd0d0, 0, Y, 5, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(usb_vbus_en1_pl5, USB, RSVD1, RSVD2, RSVD3, 0xd0d8, 0, Y, 5, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gp_pwm7_pl7, GP, RSVD1, RSVD2, RSVD3, 0xd0e0, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gp_pwm6_pl6, GP, RSVD1, RSVD2, RSVD3, 0xd0e8, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(ufs0_rst_pbb1, UFS0, RSVD1, RSVD2, RSVD3, 0x11000, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, 15, 17, Y),
+ PINGROUP(ufs0_ref_clk_pbb0, UFS0, RSVD1, RSVD2, RSVD3, 0x11008, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, 15, 17, Y),
+};
+
+static const struct tegra_pinctrl_soc_data tegra186_pinctrl = {
+ .pins = tegra186_pins,
+ .npins = ARRAY_SIZE(tegra186_pins),
+ .functions = tegra186_functions,
+ .nfunctions = ARRAY_SIZE(tegra186_functions),
+ .groups = tegra186_groups,
+ .ngroups = ARRAY_SIZE(tegra186_groups),
+ .hsm_in_mux = false,
+ .schmitt_in_mux = true,
+ .drvtype_in_mux = true,
+ .sfsel_in_mux = true,
+};
+
+static const struct pinctrl_pin_desc tegra186_aon_pins[] = {
+ PINCTRL_PIN(TEGRA_PIN_PWR_I2C_SCL_PS0, "PWR_I2C_SCL_PS0"),
+ PINCTRL_PIN(TEGRA_PIN_PWR_I2C_SDA_PS1, "PWR_I2C_SDA_PS1"),
+ PINCTRL_PIN(TEGRA_PIN_BATT_OC_PS2, "BATT_OC_PS2"),
+ PINCTRL_PIN(TEGRA_PIN_SAFE_STATE_PS3, "SAFE_STATE_PS3"),
+ PINCTRL_PIN(TEGRA_PIN_VCOMP_ALERT_PS4, "VCOMP_ALERT_PS4"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_DIS0_PU0, "GPIO_DIS0_PU0"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_DIS1_PU1, "GPIO_DIS1_PU1"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_DIS2_PU2, "GPIO_DIS2_PU2"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_DIS3_PU3, "GPIO_DIS3_PU3"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_DIS4_PU4, "GPIO_DIS4_PU4"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_DIS5_PU5, "GPIO_DIS5_PU5"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_SEN0_PV0, "GPIO_SEN0_PV0"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_SEN1_PV1, "GPIO_SEN1_PV1"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_SEN2_PV2, "GPIO_SEN2_PV2"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_SEN3_PV3, "GPIO_SEN3_PV3"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_SEN4_PV4, "GPIO_SEN4_PV4"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_SEN5_PV5, "GPIO_SEN5_PV5"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_SEN6_PV6, "GPIO_SEN6_PV6"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_SEN7_PV7, "GPIO_SEN7_PV7"),
+ PINCTRL_PIN(TEGRA_PIN_GEN8_I2C_SCL_PW0, "GEN8_I2C_SCL_PW0"),
+ PINCTRL_PIN(TEGRA_PIN_GEN8_I2C_SDA_PW1, "GEN8_I2C_SDA_PW1"),
+ PINCTRL_PIN(TEGRA_PIN_UART3_TX_PW2, "UART3_TX_PW2"),
+ PINCTRL_PIN(TEGRA_PIN_UART3_RX_PW3, "UART3_RX_PW3"),
+ PINCTRL_PIN(TEGRA_PIN_UART3_RTS_PW4, "UART3_RTS_PW4"),
+ PINCTRL_PIN(TEGRA_PIN_UART3_CTS_PW5, "UART3_CTS_PW5"),
+ PINCTRL_PIN(TEGRA_PIN_UART7_TX_PW6, "UART7_TX_PW6"),
+ PINCTRL_PIN(TEGRA_PIN_UART7_RX_PW7, "UART7_RX_PW7"),
+ PINCTRL_PIN(TEGRA_PIN_CAN1_DOUT_PZ0, "CAN1_DOUT_PZ0"),
+ PINCTRL_PIN(TEGRA_PIN_CAN1_DIN_PZ1, "CAN1_DIN_PZ1"),
+ PINCTRL_PIN(TEGRA_PIN_CAN0_DOUT_PZ2, "CAN0_DOUT_PZ2"),
+ PINCTRL_PIN(TEGRA_PIN_CAN0_DIN_PZ3, "CAN0_DIN_PZ3"),
+ PINCTRL_PIN(TEGRA_PIN_CAN_GPIO0_PAA0, "CAN_GPIO0_PAA0"),
+ PINCTRL_PIN(TEGRA_PIN_CAN_GPIO1_PAA1, "CAN_GPIO1_PAA1"),
+ PINCTRL_PIN(TEGRA_PIN_CAN_GPIO2_PAA2, "CAN_GPIO2_PAA2"),
+ PINCTRL_PIN(TEGRA_PIN_CAN_GPIO3_PAA3, "CAN_GPIO3_PAA3"),
+ PINCTRL_PIN(TEGRA_PIN_CAN_GPIO4_PAA4, "CAN_GPIO4_PAA4"),
+ PINCTRL_PIN(TEGRA_PIN_CAN_GPIO5_PAA5, "CAN_GPIO5_PAA5"),
+ PINCTRL_PIN(TEGRA_PIN_CAN_GPIO6_PAA6, "CAN_GPIO6_PAA6"),
+ PINCTRL_PIN(TEGRA_PIN_CAN_GPIO7_PAA7, "CAN_GPIO7_PAA7"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_SEN8_PEE0, "GPIO_SEN8_PEE0"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_SEN9_PEE1, "GPIO_SEN9_PEE1"),
+ PINCTRL_PIN(TEGRA_PIN_TOUCH_CLK_PEE2, "TOUCH_CLK_PEE2"),
+ PINCTRL_PIN(TEGRA_PIN_POWER_ON_PFF0, "POWER_ON_PFF0"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_SW1_PFF1, "GPIO_SW1_PFF1"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_SW2_PFF2, "GPIO_SW2_PFF2"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_SW3_PFF3, "GPIO_SW3_PFF3"),
+ PINCTRL_PIN(TEGRA_PIN_GPIO_SW4_PFF4, "GPIO_SW4_PFF4"),
+ PINCTRL_PIN(TEGRA_PIN_SHUTDOWN, "SHUTDOWN"),
+ PINCTRL_PIN(TEGRA_PIN_PMU_INT, "PMU_INT"),
+ PINCTRL_PIN(TEGRA_PIN_SOC_PWR_REQ, "SOC_PWR_REQ"),
+ PINCTRL_PIN(TEGRA_PIN_CLK_32K_IN, "CLK_32K_IN"),
+};
+
+static const struct tegra_pingroup tegra186_aon_groups[] = {
+ PINGROUP(touch_clk_pee2, TOUCH, RSVD1, RSVD2, RSVD3, 0x2000, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(uart3_cts_pw5, UARTC, RSVD1, RSVD2, RSVD3, 0x2008, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(uart3_rts_pw4, UARTC, RSVD1, RSVD2, RSVD3, 0x2010, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(uart3_rx_pw3, UARTC, RSVD1, RSVD2, RSVD3, 0x2018, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(uart3_tx_pw2, UARTC, RSVD1, RSVD2, RSVD3, 0x2020, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gen8_i2c_sda_pw1, I2C8, RSVD1, RSVD2, RSVD3, 0x2028, 0, Y, 5, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gen8_i2c_scl_pw0, I2C8, RSVD1, RSVD2, RSVD3, 0x2030, 0, Y, 5, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(uart7_rx_pw7, UARTG, RSVD1, RSVD2, RSVD3, 0x2038, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(uart7_tx_pw6, UARTG, RSVD1, RSVD2, RSVD3, 0x2040, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_sen0_pv0, RSVD0, RSVD1, RSVD2, RSVD3, 0x2048, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_sen1_pv1, SPI2, RSVD1, RSVD2, RSVD3, 0x2050, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_sen2_pv2, SPI2, RSVD1, RSVD2, RSVD3, 0x2058, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_sen3_pv3, SPI2, RSVD1, RSVD2, RSVD3, 0x2060, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_sen4_pv4, SPI2, RSVD1, RSVD2, RSVD3, 0x2068, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_sen5_pv5, RSVD0, RSVD1, RSVD2, RSVD3, 0x2070, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_sen6_pv6, RSVD0, GP, RSVD2, RSVD3, 0x2078, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_sen7_pv7, RSVD0, WDT, RSVD2, RSVD3, 0x2080, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_sen8_pee0, RSVD0, I2C2, RSVD2, RSVD3, 0x2088, 0, Y, 5, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_sen9_pee1, RSVD0, I2C2, RSVD2, RSVD3, 0x2090, 0, Y, 5, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(can_gpio7_paa7, RSVD0, WDT, RSVD2, RSVD3, 0x3000, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(can1_dout_pz0, CAN1, RSVD1, RSVD2, RSVD3, 0x3008, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(can1_din_pz1, CAN1, RSVD1, RSVD2, RSVD3, 0x3010, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(can0_dout_pz2, CAN0, RSVD1, RSVD2, RSVD3, 0x3018, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(can0_din_pz3, CAN0, RSVD1, RSVD2, RSVD3, 0x3020, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(can_gpio0_paa0, RSVD0, DMIC3, DMIC5, RSVD3, 0x3028, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(can_gpio1_paa1, RSVD0, DMIC3, DMIC5, RSVD3, 0x3030, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(can_gpio2_paa2, GPIO, RSVD1, RSVD2, RSVD3, 0x3038, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(can_gpio3_paa3, RSVD0, RSVD1, RSVD2, RSVD3, 0x3040, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(can_gpio4_paa4, RSVD0, RSVD1, RSVD2, RSVD3, 0x3048, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(can_gpio5_paa5, RSVD0, RSVD1, RSVD2, RSVD3, 0x3050, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(can_gpio6_paa6, RSVD0, RSVD1, RSVD2, RSVD3, 0x3058, 0, Y, -1, -1, 6, -1, 9, 10, -1, 12, Y, -1, -1, Y),
+ PINGROUP(gpio_sw1_pff1, RSVD0, RSVD1, RSVD2, RSVD3, 0x1000, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_sw2_pff2, RSVD0, RSVD1, RSVD2, RSVD3, 0x1008, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_sw3_pff3, RSVD0, RSVD1, RSVD2, RSVD3, 0x1010, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_sw4_pff4, RSVD0, RSVD1, RSVD2, RSVD3, 0x1018, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(shutdown, RSVD0, RSVD1, RSVD2, RSVD3, 0x1020, 0, Y, -1, -1, 6, 8, -1, -1, -1, 12, N, -1, -1, N),
+ PINGROUP(pmu_int, RSVD0, RSVD1, RSVD2, RSVD3, 0x1028, 0, Y, -1, -1, 6, 8, -1, -1, -1, 12, N, -1, -1, N),
+ PINGROUP(safe_state_ps3, SCE, RSVD1, RSVD2, RSVD3, 0x1030, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(vcomp_alert_ps4, SOC, RSVD1, RSVD2, RSVD3, 0x1038, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(soc_pwr_req, RSVD0, RSVD1, RSVD2, RSVD3, 0x1040, 0, Y, -1, -1, 6, 8, -1, -1, -1, 12, N, -1, -1, N),
+ PINGROUP(batt_oc_ps2, SOC, RSVD1, RSVD2, RSVD3, 0x1048, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(clk_32k_in, RSVD0, RSVD1, RSVD2, RSVD3, 0x1050, 0, Y, -1, -1, 6, 8, -1, -1, -1, -1, N, -1, -1, N),
+ PINGROUP(power_on_pff0, RSVD0, RSVD1, RSVD2, RSVD3, 0x1058, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(pwr_i2c_scl_ps0, I2C5, RSVD1, RSVD2, RSVD3, 0x1060, 0, Y, 5, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(pwr_i2c_sda_ps1, I2C5, RSVD1, RSVD2, RSVD3, 0x1068, 0, Y, 5, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_dis0_pu0, RSVD0, GP, DCB, DCC, 0x1080, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_dis1_pu1, RSVD0, RSVD1, DISPLAYA, RSVD3, 0x1088, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_dis2_pu2, RSVD0, GP, DCA, RSVD3, 0x1090, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_dis3_pu3, RSVD0, RSVD1, DISPLAYB, DCC, 0x1098, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_dis4_pu4, RSVD0, SOC, DCA, RSVD3, 0x10a0, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+ PINGROUP(gpio_dis5_pu5, RSVD0, GP, DCC, DCB, 0x10a8, 0, Y, -1, -1, 6, 8, -1, 10, 11, 12, N, -1, -1, N),
+};
+
+static const struct tegra_pinctrl_soc_data tegra186_pinctrl_aon = {
+ .pins = tegra186_aon_pins,
+ .npins = ARRAY_SIZE(tegra186_aon_pins),
+ .functions = tegra186_functions,
+ .nfunctions = ARRAY_SIZE(tegra186_functions),
+ .groups = tegra186_aon_groups,
+ .ngroups = ARRAY_SIZE(tegra186_aon_groups),
+ .hsm_in_mux = false,
+ .schmitt_in_mux = true,
+ .drvtype_in_mux = true,
+ .sfsel_in_mux = true,
+};
+
+static int tegra186_pinctrl_probe(struct platform_device *pdev)
+{
+ const struct tegra_pinctrl_soc_data *soc = of_device_get_match_data(&pdev->dev);
+
+ return tegra_pinctrl_probe(pdev, soc);
+}
+
+static const struct of_device_id tegra186_pinctrl_of_match[] = {
+ { .compatible = "nvidia,tegra186-pinmux", .data = &tegra186_pinctrl },
+ { .compatible = "nvidia,tegra186-pinmux-aon", .data = &tegra186_pinctrl_aon },
+ { },
+};
+
+static struct platform_driver tegra186_pinctrl_driver = {
+ .driver = {
+ .name = "tegra186-pinctrl",
+ .of_match_table = tegra186_pinctrl_of_match,
+ },
+ .probe = tegra186_pinctrl_probe,
+};
+
+static int __init tegra186_pinctrl_init(void)
+{
+ return platform_driver_register(&tegra186_pinctrl_driver);
+}
+arch_initcall(tegra186_pinctrl_init);
diff --git a/drivers/pinctrl/vt8500/pinctrl-wmt.c b/drivers/pinctrl/vt8500/pinctrl-wmt.c
index 767c6808a463..7213a8d4bf09 100644
--- a/drivers/pinctrl/vt8500/pinctrl-wmt.c
+++ b/drivers/pinctrl/vt8500/pinctrl-wmt.c
@@ -549,7 +549,7 @@ static const struct gpio_chip wmt_gpio_chip = {
.direction_input = pinctrl_gpio_direction_input,
.direction_output = wmt_gpio_direction_output,
.get = wmt_gpio_get_value,
- .set_rv = wmt_gpio_set_value,
+ .set = wmt_gpio_set_value,
.can_sleep = false,
};
diff --git a/drivers/platform/arm64/Kconfig b/drivers/platform/arm64/Kconfig
index 06288aebc559..10f905d7d6bf 100644
--- a/drivers/platform/arm64/Kconfig
+++ b/drivers/platform/arm64/Kconfig
@@ -70,4 +70,24 @@ config EC_LENOVO_YOGA_C630
Say M or Y here to include this support.
+config EC_LENOVO_THINKPAD_T14S
+ tristate "Lenovo Thinkpad T14s Embedded Controller driver"
+ depends on ARCH_QCOM || COMPILE_TEST
+ depends on I2C
+ depends on INPUT
+ select INPUT_SPARSEKMAP
+ select LEDS_CLASS
+ select NEW_LEDS
+ select SND_CTL_LED if SND
+ help
+ Driver for the Embedded Controller in the Qualcomm Snapdragon-based
+ Lenovo Thinkpad T14s, which provides access to keyboard backlight
+ and status LEDs.
+
+ This driver provides support for the mentioned laptop where this
+ information is not properly exposed via the standard Qualcomm
+ devices.
+
+ Say M or Y here to include this support.
+
endif # ARM64_PLATFORM_DEVICES
diff --git a/drivers/platform/arm64/Makefile b/drivers/platform/arm64/Makefile
index 46a99eba3264..60c131cff6a1 100644
--- a/drivers/platform/arm64/Makefile
+++ b/drivers/platform/arm64/Makefile
@@ -8,3 +8,4 @@
obj-$(CONFIG_EC_ACER_ASPIRE1) += acer-aspire1-ec.o
obj-$(CONFIG_EC_HUAWEI_GAOKUN) += huawei-gaokun-ec.o
obj-$(CONFIG_EC_LENOVO_YOGA_C630) += lenovo-yoga-c630.o
+obj-$(CONFIG_EC_LENOVO_THINKPAD_T14S) += lenovo-thinkpad-t14s.o
diff --git a/drivers/platform/arm64/lenovo-thinkpad-t14s.c b/drivers/platform/arm64/lenovo-thinkpad-t14s.c
new file mode 100644
index 000000000000..1d5d11adaf32
--- /dev/null
+++ b/drivers/platform/arm64/lenovo-thinkpad-t14s.c
@@ -0,0 +1,616 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025, Sebastian Reichel
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/container_of.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/dev_printk.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/input/sparse-keymap.h>
+#include <linux/interrupt.h>
+#include <linux/leds.h>
+#include <linux/lockdep.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#define T14S_EC_CMD_ECRD 0x02
+#define T14S_EC_CMD_ECWR 0x03
+#define T14S_EC_CMD_EVT 0xf0
+
+#define T14S_EC_REG_LED 0x0c
+#define T14S_EC_REG_KBD_BL1 0x0d
+#define T14S_EC_REG_KBD_BL2 0xe1
+#define T14S_EC_KBD_BL1_MASK GENMASK_U8(7, 6)
+#define T14S_EC_KBD_BL2_MASK GENMASK_U8(3, 2)
+#define T14S_EC_REG_AUD 0x30
+#define T14S_EC_MIC_MUTE_LED BIT(5)
+#define T14S_EC_SPK_MUTE_LED BIT(6)
+
+#define T14S_EC_EVT_NONE 0x00
+#define T14S_EC_EVT_KEY_FN_4 0x13
+#define T14S_EC_EVT_KEY_FN_F7 0x16
+#define T14S_EC_EVT_KEY_FN_SPACE 0x1f
+#define T14S_EC_EVT_KEY_TP_DOUBLE_TAP 0x20
+#define T14S_EC_EVT_AC_CONNECTED 0x26
+#define T14S_EC_EVT_AC_DISCONNECTED 0x27
+#define T14S_EC_EVT_KEY_POWER 0x28
+#define T14S_EC_EVT_LID_OPEN 0x2a
+#define T14S_EC_EVT_LID_CLOSED 0x2b
+#define T14S_EC_EVT_THERMAL_TZ40 0x5c
+#define T14S_EC_EVT_THERMAL_TZ42 0x5d
+#define T14S_EC_EVT_THERMAL_TZ39 0x5e
+#define T14S_EC_EVT_KEY_FN_F12 0x62
+#define T14S_EC_EVT_KEY_FN_TAB 0x63
+#define T14S_EC_EVT_KEY_FN_F8 0x64
+#define T14S_EC_EVT_KEY_FN_F10 0x65
+#define T14S_EC_EVT_KEY_FN_F4 0x6a
+#define T14S_EC_EVT_KEY_FN_D 0x6b
+#define T14S_EC_EVT_KEY_FN_T 0x6c
+#define T14S_EC_EVT_KEY_FN_H 0x6d
+#define T14S_EC_EVT_KEY_FN_M 0x6e
+#define T14S_EC_EVT_KEY_FN_L 0x6f
+#define T14S_EC_EVT_KEY_FN_RIGHT_SHIFT 0x71
+#define T14S_EC_EVT_KEY_FN_ESC 0x74
+#define T14S_EC_EVT_KEY_FN_N 0x79
+#define T14S_EC_EVT_KEY_FN_F11 0x7a
+#define T14S_EC_EVT_KEY_FN_G 0x7e
+
+/* Hardware LED blink rate is 1 Hz (500ms off, 500ms on) */
+#define T14S_EC_BLINK_RATE_ON_OFF_MS 500
+
+/*
+ * Add a virtual offset on all key event codes for sparse keymap handling,
+ * since the sparse keymap infrastructure does not map some raw key event
+ * codes used by the EC. For example 0x16 (T14S_EC_EVT_KEY_FN_F7) is mapped
+ * to KEY_MUTE if no offset is applied.
+ */
+#define T14S_EC_KEY_EVT_OFFSET 0x1000
+#define T14S_EC_KEY_ENTRY(key, value) \
+ { KE_KEY, T14S_EC_KEY_EVT_OFFSET + T14S_EC_EVT_KEY_##key, { value } }
+
+enum t14s_ec_led_status_t {
+ T14S_EC_LED_OFF = 0x00,
+ T14S_EC_LED_ON = 0x80,
+ T14S_EC_LED_BLINK = 0xc0,
+};
+
+struct t14s_ec_led_classdev {
+ struct led_classdev led_classdev;
+ int led;
+ enum t14s_ec_led_status_t cache;
+ struct t14s_ec *ec;
+};
+
+struct t14s_ec {
+ struct regmap *regmap;
+ struct device *dev;
+ struct t14s_ec_led_classdev led_pwr_btn;
+ struct t14s_ec_led_classdev led_chrg_orange;
+ struct t14s_ec_led_classdev led_chrg_white;
+ struct t14s_ec_led_classdev led_lid_logo_dot;
+ struct led_classdev kbd_backlight;
+ struct led_classdev led_mic_mute;
+ struct led_classdev led_spk_mute;
+ struct input_dev *inputdev;
+};
+
+static const struct regmap_config t14s_ec_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xff,
+};
+
+static int t14s_ec_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ struct t14s_ec *ec = context;
+ struct i2c_client *client = to_i2c_client(ec->dev);
+ u8 buf[5] = {T14S_EC_CMD_ECWR, reg, 0x00, 0x01, val};
+ int ret;
+
+ ret = i2c_master_send(client, buf, sizeof(buf));
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int t14s_ec_read(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ struct t14s_ec *ec = context;
+ struct i2c_client *client = to_i2c_client(ec->dev);
+ u8 buf[4] = {T14S_EC_CMD_ECRD, reg, 0x00, 0x01};
+ struct i2c_msg request, response;
+ u8 result;
+ int ret;
+
+ request.addr = client->addr;
+ request.flags = I2C_M_STOP;
+ request.len = sizeof(buf);
+ request.buf = buf;
+ response.addr = client->addr;
+ response.flags = I2C_M_RD;
+ response.len = 1;
+ response.buf = &result;
+
+ i2c_lock_bus(client->adapter, I2C_LOCK_SEGMENT);
+
+ ret = __i2c_transfer(client->adapter, &request, 1);
+ if (ret < 0)
+ goto out;
+
+ ret = __i2c_transfer(client->adapter, &response, 1);
+ if (ret < 0)
+ goto out;
+
+ *val = result;
+ ret = 0;
+
+out:
+ i2c_unlock_bus(client->adapter, I2C_LOCK_SEGMENT);
+ return ret;
+}
+
+static const struct regmap_bus t14s_ec_regmap_bus = {
+ .reg_write = t14s_ec_write,
+ .reg_read = t14s_ec_read,
+};
+
+static int t14s_ec_read_evt(struct t14s_ec *ec, u8 *val)
+{
+ struct i2c_client *client = to_i2c_client(ec->dev);
+ u8 buf[4] = {T14S_EC_CMD_EVT, 0x00, 0x00, 0x01};
+ struct i2c_msg request, response;
+ int ret;
+
+ request.addr = client->addr;
+ request.flags = I2C_M_STOP;
+ request.len = sizeof(buf);
+ request.buf = buf;
+ response.addr = client->addr;
+ response.flags = I2C_M_RD;
+ response.len = 1;
+ response.buf = val;
+
+ i2c_lock_bus(client->adapter, I2C_LOCK_SEGMENT);
+
+ ret = __i2c_transfer(client->adapter, &request, 1);
+ if (ret < 0)
+ goto out;
+
+ ret = __i2c_transfer(client->adapter, &response, 1);
+ if (ret < 0)
+ goto out;
+
+ ret = 0;
+
+out:
+ i2c_unlock_bus(client->adapter, I2C_LOCK_SEGMENT);
+ return ret;
+}
+
+static int t14s_led_set_status(struct t14s_ec *ec,
+ struct t14s_ec_led_classdev *led,
+ const enum t14s_ec_led_status_t ledstatus)
+{
+ int ret;
+
+ ret = regmap_write(ec->regmap, T14S_EC_REG_LED,
+ led->led | ledstatus);
+ if (ret < 0)
+ return ret;
+
+ led->cache = ledstatus;
+ return 0;
+}
+
+static int t14s_led_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct t14s_ec_led_classdev *led = container_of(led_cdev,
+ struct t14s_ec_led_classdev, led_classdev);
+ enum t14s_ec_led_status_t new_state;
+
+ if (brightness == LED_OFF)
+ new_state = T14S_EC_LED_OFF;
+ else if (led->cache == T14S_EC_LED_BLINK)
+ new_state = T14S_EC_LED_BLINK;
+ else
+ new_state = T14S_EC_LED_ON;
+
+ return t14s_led_set_status(led->ec, led, new_state);
+}
+
+static int t14s_led_blink_set(struct led_classdev *led_cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ struct t14s_ec_led_classdev *led = container_of(led_cdev,
+ struct t14s_ec_led_classdev, led_classdev);
+
+ if (*delay_on == 0 && *delay_off == 0) {
+ /* Userspace does not provide a blink rate; we can choose it */
+ *delay_on = T14S_EC_BLINK_RATE_ON_OFF_MS;
+ *delay_off = T14S_EC_BLINK_RATE_ON_OFF_MS;
+ } else if ((*delay_on != T14S_EC_BLINK_RATE_ON_OFF_MS) ||
+ (*delay_off != T14S_EC_BLINK_RATE_ON_OFF_MS))
+ return -EINVAL;
+
+ return t14s_led_set_status(led->ec, led, T14S_EC_LED_BLINK);
+}
+
+static int t14s_init_led(struct t14s_ec *ec, struct t14s_ec_led_classdev *led,
+ u8 id, const char *name)
+{
+ led->led_classdev.name = name;
+ led->led_classdev.flags = LED_RETAIN_AT_SHUTDOWN;
+ led->led_classdev.max_brightness = 1;
+ led->led_classdev.brightness_set_blocking = t14s_led_brightness_set;
+ led->led_classdev.blink_set = t14s_led_blink_set;
+ led->ec = ec;
+ led->led = id;
+
+ return devm_led_classdev_register(ec->dev, &led->led_classdev);
+}
+
+static int t14s_leds_probe(struct t14s_ec *ec)
+{
+ int ret;
+
+ ret = t14s_init_led(ec, &ec->led_pwr_btn, 0, "platform::power");
+ if (ret)
+ return ret;
+
+ ret = t14s_init_led(ec, &ec->led_chrg_orange, 1,
+ "platform:amber:battery-charging");
+ if (ret)
+ return ret;
+
+ ret = t14s_init_led(ec, &ec->led_chrg_white, 2,
+ "platform:white:battery-full");
+ if (ret)
+ return ret;
+
+ ret = t14s_init_led(ec, &ec->led_lid_logo_dot, 10,
+ "platform::lid_logo_dot");
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int t14s_kbd_bl_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct t14s_ec *ec = container_of(led_cdev, struct t14s_ec,
+ kbd_backlight);
+ int ret;
+ u8 val;
+
+ val = FIELD_PREP(T14S_EC_KBD_BL1_MASK, brightness);
+ ret = regmap_update_bits(ec->regmap, T14S_EC_REG_KBD_BL1,
+ T14S_EC_KBD_BL1_MASK, val);
+ if (ret < 0)
+ return ret;
+
+ val = FIELD_PREP(T14S_EC_KBD_BL2_MASK, brightness);
+ ret = regmap_update_bits(ec->regmap, T14S_EC_REG_KBD_BL2,
+ T14S_EC_KBD_BL2_MASK, val);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static enum led_brightness t14s_kbd_bl_get(struct led_classdev *led_cdev)
+{
+ struct t14s_ec *ec = container_of(led_cdev, struct t14s_ec,
+ kbd_backlight);
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(ec->regmap, T14S_EC_REG_KBD_BL1, &val);
+ if (ret < 0)
+ return ret;
+
+ return FIELD_GET(T14S_EC_KBD_BL1_MASK, val);
+}
+
+static void t14s_kbd_bl_update(struct t14s_ec *ec)
+{
+ enum led_brightness brightness = t14s_kbd_bl_get(&ec->kbd_backlight);
+
+ led_classdev_notify_brightness_hw_changed(&ec->kbd_backlight, brightness);
+}
+
+static int t14s_kbd_backlight_probe(struct t14s_ec *ec)
+{
+ ec->kbd_backlight.name = "platform::kbd_backlight";
+ ec->kbd_backlight.flags = LED_BRIGHT_HW_CHANGED;
+ ec->kbd_backlight.max_brightness = 2;
+ ec->kbd_backlight.brightness_set_blocking = t14s_kbd_bl_set;
+ ec->kbd_backlight.brightness_get = t14s_kbd_bl_get;
+
+ return devm_led_classdev_register(ec->dev, &ec->kbd_backlight);
+}
+
+static enum led_brightness t14s_audio_led_get(struct t14s_ec *ec, u8 led_bit)
+{
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(ec->regmap, T14S_EC_REG_AUD, &val);
+ if (ret < 0)
+ return ret;
+
+ return !!(val & led_bit) ? LED_ON : LED_OFF;
+}
+
+static enum led_brightness t14s_audio_led_set(struct t14s_ec *ec,
+ u8 led_mask,
+ enum led_brightness brightness)
+{
+ return regmap_assign_bits(ec->regmap, T14S_EC_REG_AUD, led_mask, brightness > 0);
+}
+
+static enum led_brightness t14s_mic_mute_led_get(struct led_classdev *led_cdev)
+{
+ struct t14s_ec *ec = container_of(led_cdev, struct t14s_ec,
+ led_mic_mute);
+
+ return t14s_audio_led_get(ec, T14S_EC_MIC_MUTE_LED);
+}
+
+static int t14s_mic_mute_led_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct t14s_ec *ec = container_of(led_cdev, struct t14s_ec,
+ led_mic_mute);
+
+ return t14s_audio_led_set(ec, T14S_EC_MIC_MUTE_LED, brightness);
+}
+
+static enum led_brightness t14s_spk_mute_led_get(struct led_classdev *led_cdev)
+{
+ struct t14s_ec *ec = container_of(led_cdev, struct t14s_ec,
+ led_spk_mute);
+
+ return t14s_audio_led_get(ec, T14S_EC_SPK_MUTE_LED);
+}
+
+static int t14s_spk_mute_led_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct t14s_ec *ec = container_of(led_cdev, struct t14s_ec,
+ led_spk_mute);
+
+ return t14s_audio_led_set(ec, T14S_EC_SPK_MUTE_LED, brightness);
+}
+
+static int t14s_kbd_audio_led_probe(struct t14s_ec *ec)
+{
+ int ret;
+
+ ec->led_mic_mute.name = "platform::micmute";
+ ec->led_mic_mute.max_brightness = 1;
+ ec->led_mic_mute.default_trigger = "audio-micmute";
+ ec->led_mic_mute.brightness_set_blocking = t14s_mic_mute_led_set;
+ ec->led_mic_mute.brightness_get = t14s_mic_mute_led_get;
+
+ ec->led_spk_mute.name = "platform::mute";
+ ec->led_spk_mute.max_brightness = 1;
+ ec->led_spk_mute.default_trigger = "audio-mute";
+ ec->led_spk_mute.brightness_set_blocking = t14s_spk_mute_led_set;
+ ec->led_spk_mute.brightness_get = t14s_spk_mute_led_get;
+
+ ret = devm_led_classdev_register(ec->dev, &ec->led_mic_mute);
+ if (ret)
+ return ret;
+
+ return devm_led_classdev_register(ec->dev, &ec->led_spk_mute);
+}
+
+static const struct key_entry t14s_keymap[] = {
+ T14S_EC_KEY_ENTRY(FN_4, KEY_SLEEP),
+ T14S_EC_KEY_ENTRY(FN_N, KEY_VENDOR),
+ T14S_EC_KEY_ENTRY(FN_F4, KEY_MICMUTE),
+ T14S_EC_KEY_ENTRY(FN_F7, KEY_SWITCHVIDEOMODE),
+ T14S_EC_KEY_ENTRY(FN_F8, KEY_PERFORMANCE),
+ T14S_EC_KEY_ENTRY(FN_F10, KEY_SELECTIVE_SCREENSHOT),
+ T14S_EC_KEY_ENTRY(FN_F11, KEY_LINK_PHONE),
+ T14S_EC_KEY_ENTRY(FN_F12, KEY_BOOKMARKS),
+ T14S_EC_KEY_ENTRY(FN_SPACE, KEY_KBDILLUMTOGGLE),
+ T14S_EC_KEY_ENTRY(FN_ESC, KEY_FN_ESC),
+ T14S_EC_KEY_ENTRY(FN_TAB, KEY_ZOOM),
+ T14S_EC_KEY_ENTRY(FN_RIGHT_SHIFT, KEY_FN_RIGHT_SHIFT),
+ T14S_EC_KEY_ENTRY(TP_DOUBLE_TAP, KEY_PROG4),
+ { KE_END }
+};
+
+static int t14s_input_probe(struct t14s_ec *ec)
+{
+ int ret;
+
+ ec->inputdev = devm_input_allocate_device(ec->dev);
+ if (!ec->inputdev)
+ return -ENOMEM;
+
+ ec->inputdev->name = "ThinkPad Extra Buttons";
+ ec->inputdev->phys = "thinkpad/input0";
+ ec->inputdev->id.bustype = BUS_HOST;
+ ec->inputdev->dev.parent = ec->dev;
+
+ ret = sparse_keymap_setup(ec->inputdev, t14s_keymap, NULL);
+ if (ret)
+ return ret;
+
+ return input_register_device(ec->inputdev);
+}
+
+static irqreturn_t t14s_ec_irq_handler(int irq, void *data)
+{
+ struct t14s_ec *ec = data;
+ int ret;
+ u8 val;
+
+ ret = t14s_ec_read_evt(ec, &val);
+ if (ret < 0) {
+ dev_err(ec->dev, "Failed to read event\n");
+ return IRQ_HANDLED;
+ }
+
+ switch (val) {
+ case T14S_EC_EVT_NONE:
+ break;
+ case T14S_EC_EVT_KEY_FN_SPACE:
+ t14s_kbd_bl_update(ec);
+ fallthrough;
+ case T14S_EC_EVT_KEY_FN_F4:
+ case T14S_EC_EVT_KEY_FN_F7:
+ case T14S_EC_EVT_KEY_FN_4:
+ case T14S_EC_EVT_KEY_FN_F8:
+ case T14S_EC_EVT_KEY_FN_F12:
+ case T14S_EC_EVT_KEY_FN_TAB:
+ case T14S_EC_EVT_KEY_FN_F10:
+ case T14S_EC_EVT_KEY_FN_N:
+ case T14S_EC_EVT_KEY_FN_F11:
+ case T14S_EC_EVT_KEY_FN_ESC:
+ case T14S_EC_EVT_KEY_FN_RIGHT_SHIFT:
+ case T14S_EC_EVT_KEY_TP_DOUBLE_TAP:
+ sparse_keymap_report_event(ec->inputdev,
+ T14S_EC_KEY_EVT_OFFSET + val, 1, true);
+ break;
+ case T14S_EC_EVT_AC_CONNECTED:
+ dev_dbg(ec->dev, "AC connected\n");
+ break;
+ case T14S_EC_EVT_AC_DISCONNECTED:
+ dev_dbg(ec->dev, "AC disconnected\n");
+ break;
+ case T14S_EC_EVT_KEY_POWER:
+ dev_dbg(ec->dev, "power button\n");
+ break;
+ case T14S_EC_EVT_LID_OPEN:
+ dev_dbg(ec->dev, "LID open\n");
+ break;
+ case T14S_EC_EVT_LID_CLOSED:
+ dev_dbg(ec->dev, "LID closed\n");
+ break;
+ case T14S_EC_EVT_THERMAL_TZ40:
+ dev_dbg(ec->dev, "Thermal Zone 40 Status Change Event (CPU/GPU)\n");
+ break;
+ case T14S_EC_EVT_THERMAL_TZ42:
+ dev_dbg(ec->dev, "Thermal Zone 42 Status Change Event (Battery)\n");
+ break;
+ case T14S_EC_EVT_THERMAL_TZ39:
+ dev_dbg(ec->dev, "Thermal Zone 39 Status Change Event (CPU/GPU)\n");
+ break;
+ case T14S_EC_EVT_KEY_FN_G:
+ dev_dbg(ec->dev, "FN + G - toggle double-tapping\n");
+ break;
+ case T14S_EC_EVT_KEY_FN_L:
+ dev_dbg(ec->dev, "FN + L - low performance mode\n");
+ break;
+ case T14S_EC_EVT_KEY_FN_M:
+ dev_dbg(ec->dev, "FN + M - medium performance mode\n");
+ break;
+ case T14S_EC_EVT_KEY_FN_H:
+ dev_dbg(ec->dev, "FN + H - high performance mode\n");
+ break;
+ case T14S_EC_EVT_KEY_FN_T:
+ dev_dbg(ec->dev, "FN + T - toggle intelligent cooling mode\n");
+ break;
+ case T14S_EC_EVT_KEY_FN_D:
+ dev_dbg(ec->dev, "FN + D - toggle privacy guard mode\n");
+ break;
+ default:
+ dev_info(ec->dev, "Unknown EC event: 0x%02x\n", val);
+ break;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int t14s_ec_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct t14s_ec *ec;
+ int ret;
+
+ ec = devm_kzalloc(dev, sizeof(*ec), GFP_KERNEL);
+ if (!ec)
+ return -ENOMEM;
+
+ ec->dev = dev;
+
+ ec->regmap = devm_regmap_init(dev, &t14s_ec_regmap_bus,
+ ec, &t14s_ec_regmap_config);
+ if (IS_ERR(ec->regmap))
+ return dev_err_probe(dev, PTR_ERR(ec->regmap),
+ "Failed to init regmap\n");
+
+ ret = devm_request_threaded_irq(dev, client->irq, NULL,
+ t14s_ec_irq_handler,
+ IRQF_ONESHOT, dev_name(dev), ec);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to get IRQ\n");
+
+ ret = t14s_leds_probe(ec);
+ if (ret < 0)
+ return ret;
+
+ ret = t14s_kbd_backlight_probe(ec);
+ if (ret < 0)
+ return ret;
+
+ ret = t14s_kbd_audio_led_probe(ec);
+ if (ret < 0)
+ return ret;
+
+ ret = t14s_input_probe(ec);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Disable wakeup support by default, because the driver currently does
+ * not support masking any events and the laptop should not wake up when
+ * the LID is closed.
+ */
+ device_wakeup_disable(dev);
+
+ return 0;
+}
+
+static const struct of_device_id t14s_ec_of_match[] = {
+ { .compatible = "lenovo,thinkpad-t14s-ec" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, t14s_ec_of_match);
+
+static const struct i2c_device_id t14s_ec_i2c_id_table[] = {
+ { "thinkpad-t14s-ec", },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, t14s_ec_i2c_id_table);
+
+static struct i2c_driver t14s_ec_i2c_driver = {
+ .driver = {
+ .name = "thinkpad-t14s-ec",
+ .of_match_table = t14s_ec_of_match,
+ },
+ .probe = t14s_ec_probe,
+ .id_table = t14s_ec_i2c_id_table,
+};
+module_i2c_driver(t14s_ec_i2c_driver);
+
+MODULE_AUTHOR("Sebastian Reichel <sre@kernel.org>");
+MODULE_DESCRIPTION("Lenovo Thinkpad T14s Embedded Controller");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/chrome/cros_ec.c b/drivers/platform/chrome/cros_ec.c
index fd58781a2fb7..1da79e3d215b 100644
--- a/drivers/platform/chrome/cros_ec.c
+++ b/drivers/platform/chrome/cros_ec.c
@@ -9,6 +9,7 @@
* battery charging and regulator control, firmware update.
*/
+#include <linux/cleanup.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of_platform.h>
@@ -30,6 +31,56 @@ static struct cros_ec_platform pd_p = {
.cmd_offset = EC_CMD_PASSTHRU_OFFSET(CROS_EC_DEV_PD_INDEX),
};
+static void cros_ec_device_free(void *data)
+{
+ struct cros_ec_device *ec_dev = data;
+
+ mutex_destroy(&ec_dev->lock);
+ lockdep_unregister_key(&ec_dev->lockdep_key);
+}
+
+struct cros_ec_device *cros_ec_device_alloc(struct device *dev)
+{
+ struct cros_ec_device *ec_dev;
+
+ ec_dev = devm_kzalloc(dev, sizeof(*ec_dev), GFP_KERNEL);
+ if (!ec_dev)
+ return NULL;
+
+ ec_dev->din_size = sizeof(struct ec_host_response) +
+ sizeof(struct ec_response_get_protocol_info) +
+ EC_MAX_RESPONSE_OVERHEAD;
+ ec_dev->dout_size = sizeof(struct ec_host_request) +
+ sizeof(struct ec_params_rwsig_action) +
+ EC_MAX_REQUEST_OVERHEAD;
+
+ ec_dev->din = devm_kzalloc(dev, ec_dev->din_size, GFP_KERNEL);
+ if (!ec_dev->din)
+ return NULL;
+
+ ec_dev->dout = devm_kzalloc(dev, ec_dev->dout_size, GFP_KERNEL);
+ if (!ec_dev->dout)
+ return NULL;
+
+ ec_dev->dev = dev;
+ ec_dev->max_response = sizeof(struct ec_response_get_protocol_info);
+ ec_dev->max_request = sizeof(struct ec_params_rwsig_action);
+ ec_dev->suspend_timeout_ms = EC_HOST_SLEEP_TIMEOUT_DEFAULT;
+
+ BLOCKING_INIT_NOTIFIER_HEAD(&ec_dev->event_notifier);
+ BLOCKING_INIT_NOTIFIER_HEAD(&ec_dev->panic_notifier);
+
+ lockdep_register_key(&ec_dev->lockdep_key);
+ mutex_init(&ec_dev->lock);
+ lockdep_set_class(&ec_dev->lock, &ec_dev->lockdep_key);
+
+ if (devm_add_action_or_reset(dev, cros_ec_device_free, ec_dev))
+ return NULL;
+
+ return ec_dev;
+}
+EXPORT_SYMBOL(cros_ec_device_alloc);
+
/**
* cros_ec_irq_handler() - top half part of the interrupt handler
* @irq: IRQ id
@@ -102,14 +153,13 @@ EXPORT_SYMBOL(cros_ec_irq_thread);
static int cros_ec_sleep_event(struct cros_ec_device *ec_dev, u8 sleep_event)
{
int ret;
- struct {
- struct cros_ec_command msg;
+ TRAILING_OVERLAP(struct cros_ec_command, msg, data,
union {
struct ec_params_host_sleep_event req0;
struct ec_params_host_sleep_event_v1 req1;
struct ec_response_host_sleep_event_v1 resp1;
} u;
- } __packed buf;
+ ) __packed buf;
memset(&buf, 0, sizeof(buf));
@@ -180,29 +230,7 @@ static int cros_ec_ready_event(struct notifier_block *nb,
int cros_ec_register(struct cros_ec_device *ec_dev)
{
struct device *dev = ec_dev->dev;
- int err = 0;
-
- BLOCKING_INIT_NOTIFIER_HEAD(&ec_dev->event_notifier);
- BLOCKING_INIT_NOTIFIER_HEAD(&ec_dev->panic_notifier);
-
- ec_dev->max_request = sizeof(struct ec_params_hello);
- ec_dev->max_response = sizeof(struct ec_response_get_protocol_info);
- ec_dev->max_passthru = 0;
- ec_dev->ec = NULL;
- ec_dev->pd = NULL;
- ec_dev->suspend_timeout_ms = EC_HOST_SLEEP_TIMEOUT_DEFAULT;
-
- ec_dev->din = devm_kzalloc(dev, ec_dev->din_size, GFP_KERNEL);
- if (!ec_dev->din)
- return -ENOMEM;
-
- ec_dev->dout = devm_kzalloc(dev, ec_dev->dout_size, GFP_KERNEL);
- if (!ec_dev->dout)
- return -ENOMEM;
-
- lockdep_register_key(&ec_dev->lockdep_key);
- mutex_init(&ec_dev->lock);
- lockdep_set_class(&ec_dev->lock, &ec_dev->lockdep_key);
+ int err;
/* Send RWSIG continue to jump to RW for devices using RWSIG. */
err = cros_ec_rwsig_continue(ec_dev);
@@ -289,6 +317,9 @@ int cros_ec_register(struct cros_ec_device *ec_dev)
goto exit;
}
+ scoped_guard(mutex, &ec_dev->lock)
+ ec_dev->registered = true;
+
dev_info(dev, "Chrome EC device registered\n");
/*
@@ -302,8 +333,6 @@ int cros_ec_register(struct cros_ec_device *ec_dev)
exit:
platform_device_unregister(ec_dev->ec);
platform_device_unregister(ec_dev->pd);
- mutex_destroy(&ec_dev->lock);
- lockdep_unregister_key(&ec_dev->lockdep_key);
return err;
}
EXPORT_SYMBOL(cros_ec_register);
@@ -318,13 +347,14 @@ EXPORT_SYMBOL(cros_ec_register);
*/
void cros_ec_unregister(struct cros_ec_device *ec_dev)
{
+ scoped_guard(mutex, &ec_dev->lock)
+ ec_dev->registered = false;
+
if (ec_dev->mkbp_event_supported)
blocking_notifier_chain_unregister(&ec_dev->event_notifier,
&ec_dev->notifier_ready);
platform_device_unregister(ec_dev->pd);
platform_device_unregister(ec_dev->ec);
- mutex_destroy(&ec_dev->lock);
- lockdep_unregister_key(&ec_dev->lockdep_key);
}
EXPORT_SYMBOL(cros_ec_unregister);
diff --git a/drivers/platform/chrome/cros_ec.h b/drivers/platform/chrome/cros_ec.h
index 6b95f1e0bace..cd4643bc5367 100644
--- a/drivers/platform/chrome/cros_ec.h
+++ b/drivers/platform/chrome/cros_ec.h
@@ -11,6 +11,9 @@
#include <linux/interrupt.h>
struct cros_ec_device;
+struct device;
+
+struct cros_ec_device *cros_ec_device_alloc(struct device *dev);
int cros_ec_register(struct cros_ec_device *ec_dev);
void cros_ec_unregister(struct cros_ec_device *ec_dev);
diff --git a/drivers/platform/chrome/cros_ec_chardev.c b/drivers/platform/chrome/cros_ec_chardev.c
index 21a484385fc5..c9d80ad5b57e 100644
--- a/drivers/platform/chrome/cros_ec_chardev.c
+++ b/drivers/platform/chrome/cros_ec_chardev.c
@@ -31,18 +31,14 @@
/* Arbitrary bounded size for the event queue */
#define CROS_MAX_EVENT_LEN PAGE_SIZE
-struct chardev_data {
- struct cros_ec_dev *ec_dev;
- struct miscdevice misc;
-};
-
struct chardev_priv {
- struct cros_ec_dev *ec_dev;
+ struct cros_ec_device *ec_dev;
struct notifier_block notifier;
wait_queue_head_t wait_event;
unsigned long event_mask;
struct list_head events;
size_t event_len;
+ u16 cmd_offset;
};
struct ec_event {
@@ -52,7 +48,7 @@ struct ec_event {
u8 data[];
};
-static int ec_get_version(struct cros_ec_dev *ec, char *str, int maxlen)
+static int ec_get_version(struct chardev_priv *priv, char *str, int maxlen)
{
static const char * const current_image_name[] = {
"unknown", "read-only", "read-write", "invalid",
@@ -65,10 +61,10 @@ static int ec_get_version(struct cros_ec_dev *ec, char *str, int maxlen)
if (!msg)
return -ENOMEM;
- msg->command = EC_CMD_GET_VERSION + ec->cmd_offset;
+ msg->command = EC_CMD_GET_VERSION + priv->cmd_offset;
msg->insize = sizeof(*resp);
- ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
+ ret = cros_ec_cmd_xfer_status(priv->ec_dev, msg);
if (ret < 0) {
snprintf(str, maxlen,
"Unknown EC version, returned error: %d\n",
@@ -96,7 +92,7 @@ static int cros_ec_chardev_mkbp_event(struct notifier_block *nb,
{
struct chardev_priv *priv = container_of(nb, struct chardev_priv,
notifier);
- struct cros_ec_device *ec_dev = priv->ec_dev->ec_dev;
+ struct cros_ec_device *ec_dev = priv->ec_dev;
struct ec_event *event;
unsigned long event_bit = 1 << ec_dev->event_data.event_type;
int total_size = sizeof(*event) + ec_dev->event_size;
@@ -161,7 +157,8 @@ out:
static int cros_ec_chardev_open(struct inode *inode, struct file *filp)
{
struct miscdevice *mdev = filp->private_data;
- struct cros_ec_dev *ec_dev = dev_get_drvdata(mdev->parent);
+ struct cros_ec_dev *ec = dev_get_drvdata(mdev->parent);
+ struct cros_ec_device *ec_dev = ec->ec_dev;
struct chardev_priv *priv;
int ret;
@@ -170,13 +167,14 @@ static int cros_ec_chardev_open(struct inode *inode, struct file *filp)
return -ENOMEM;
priv->ec_dev = ec_dev;
+ priv->cmd_offset = ec->cmd_offset;
filp->private_data = priv;
INIT_LIST_HEAD(&priv->events);
init_waitqueue_head(&priv->wait_event);
nonseekable_open(inode, filp);
priv->notifier.notifier_call = cros_ec_chardev_mkbp_event;
- ret = blocking_notifier_chain_register(&ec_dev->ec_dev->event_notifier,
+ ret = blocking_notifier_chain_register(&ec_dev->event_notifier,
&priv->notifier);
if (ret) {
dev_err(ec_dev->dev, "failed to register event notifier\n");
@@ -204,7 +202,6 @@ static ssize_t cros_ec_chardev_read(struct file *filp, char __user *buffer,
char msg[sizeof(struct ec_response_get_version) +
sizeof(CROS_EC_DEV_VERSION)];
struct chardev_priv *priv = filp->private_data;
- struct cros_ec_dev *ec_dev = priv->ec_dev;
size_t count;
int ret;
@@ -238,7 +235,7 @@ static ssize_t cros_ec_chardev_read(struct file *filp, char __user *buffer,
if (*offset != 0)
return 0;
- ret = ec_get_version(ec_dev, msg, sizeof(msg));
+ ret = ec_get_version(priv, msg, sizeof(msg));
if (ret)
return ret;
@@ -254,10 +251,10 @@ static ssize_t cros_ec_chardev_read(struct file *filp, char __user *buffer,
static int cros_ec_chardev_release(struct inode *inode, struct file *filp)
{
struct chardev_priv *priv = filp->private_data;
- struct cros_ec_dev *ec_dev = priv->ec_dev;
+ struct cros_ec_device *ec_dev = priv->ec_dev;
struct ec_event *event, *e;
- blocking_notifier_chain_unregister(&ec_dev->ec_dev->event_notifier,
+ blocking_notifier_chain_unregister(&ec_dev->event_notifier,
&priv->notifier);
list_for_each_entry_safe(event, e, &priv->events, node) {
@@ -272,7 +269,7 @@ static int cros_ec_chardev_release(struct inode *inode, struct file *filp)
/*
* Ioctls
*/
-static long cros_ec_chardev_ioctl_xcmd(struct cros_ec_dev *ec, void __user *arg)
+static long cros_ec_chardev_ioctl_xcmd(struct chardev_priv *priv, void __user *arg)
{
struct cros_ec_command *s_cmd;
struct cros_ec_command u_cmd;
@@ -301,8 +298,8 @@ static long cros_ec_chardev_ioctl_xcmd(struct cros_ec_dev *ec, void __user *arg)
goto exit;
}
- s_cmd->command += ec->cmd_offset;
- ret = cros_ec_cmd_xfer(ec->ec_dev, s_cmd);
+ s_cmd->command += priv->cmd_offset;
+ ret = cros_ec_cmd_xfer(priv->ec_dev, s_cmd);
/* Only copy data to userland if data was received. */
if (ret < 0)
goto exit;
@@ -314,10 +311,9 @@ exit:
return ret;
}
-static long cros_ec_chardev_ioctl_readmem(struct cros_ec_dev *ec,
- void __user *arg)
+static long cros_ec_chardev_ioctl_readmem(struct chardev_priv *priv, void __user *arg)
{
- struct cros_ec_device *ec_dev = ec->ec_dev;
+ struct cros_ec_device *ec_dev = priv->ec_dev;
struct cros_ec_readmem s_mem = { };
long num;
@@ -346,16 +342,15 @@ static long cros_ec_chardev_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
struct chardev_priv *priv = filp->private_data;
- struct cros_ec_dev *ec = priv->ec_dev;
if (_IOC_TYPE(cmd) != CROS_EC_DEV_IOC)
return -ENOTTY;
switch (cmd) {
case CROS_EC_DEV_IOCXCMD:
- return cros_ec_chardev_ioctl_xcmd(ec, (void __user *)arg);
+ return cros_ec_chardev_ioctl_xcmd(priv, (void __user *)arg);
case CROS_EC_DEV_IOCRDMEM:
- return cros_ec_chardev_ioctl_readmem(ec, (void __user *)arg);
+ return cros_ec_chardev_ioctl_readmem(priv, (void __user *)arg);
case CROS_EC_DEV_IOCEVENTMASK:
priv->event_mask = arg;
return 0;
@@ -377,31 +372,30 @@ static const struct file_operations chardev_fops = {
static int cros_ec_chardev_probe(struct platform_device *pdev)
{
- struct cros_ec_dev *ec_dev = dev_get_drvdata(pdev->dev.parent);
- struct cros_ec_platform *ec_platform = dev_get_platdata(ec_dev->dev);
- struct chardev_data *data;
+ struct cros_ec_dev *ec = dev_get_drvdata(pdev->dev.parent);
+ struct cros_ec_platform *ec_platform = dev_get_platdata(ec->dev);
+ struct miscdevice *misc;
/* Create a char device: we want to create it anew */
- data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
- if (!data)
+ misc = devm_kzalloc(&pdev->dev, sizeof(*misc), GFP_KERNEL);
+ if (!misc)
return -ENOMEM;
- data->ec_dev = ec_dev;
- data->misc.minor = MISC_DYNAMIC_MINOR;
- data->misc.fops = &chardev_fops;
- data->misc.name = ec_platform->ec_name;
- data->misc.parent = pdev->dev.parent;
+ misc->minor = MISC_DYNAMIC_MINOR;
+ misc->fops = &chardev_fops;
+ misc->name = ec_platform->ec_name;
+ misc->parent = pdev->dev.parent;
- dev_set_drvdata(&pdev->dev, data);
+ dev_set_drvdata(&pdev->dev, misc);
- return misc_register(&data->misc);
+ return misc_register(misc);
}
static void cros_ec_chardev_remove(struct platform_device *pdev)
{
- struct chardev_data *data = dev_get_drvdata(&pdev->dev);
+ struct miscdevice *misc = dev_get_drvdata(&pdev->dev);
- misc_deregister(&data->misc);
+ misc_deregister(misc);
}
static const struct platform_device_id cros_ec_chardev_id[] = {
diff --git a/drivers/platform/chrome/cros_ec_i2c.c b/drivers/platform/chrome/cros_ec_i2c.c
index 38af97cdaab2..def1144a077e 100644
--- a/drivers/platform/chrome/cros_ec_i2c.c
+++ b/drivers/platform/chrome/cros_ec_i2c.c
@@ -289,24 +289,19 @@ done:
static int cros_ec_i2c_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
- struct cros_ec_device *ec_dev = NULL;
+ struct cros_ec_device *ec_dev;
int err;
- ec_dev = devm_kzalloc(dev, sizeof(*ec_dev), GFP_KERNEL);
+ ec_dev = cros_ec_device_alloc(dev);
if (!ec_dev)
return -ENOMEM;
i2c_set_clientdata(client, ec_dev);
- ec_dev->dev = dev;
ec_dev->priv = client;
ec_dev->irq = client->irq;
ec_dev->cmd_xfer = cros_ec_cmd_xfer_i2c;
ec_dev->pkt_xfer = cros_ec_pkt_xfer_i2c;
ec_dev->phys_name = client->adapter->name;
- ec_dev->din_size = sizeof(struct ec_host_response_i2c) +
- sizeof(struct ec_response_get_protocol_info);
- ec_dev->dout_size = sizeof(struct ec_host_request_i2c) +
- sizeof(struct ec_params_rwsig_action);
err = cros_ec_register(ec_dev);
if (err) {
diff --git a/drivers/platform/chrome/cros_ec_ishtp.c b/drivers/platform/chrome/cros_ec_ishtp.c
index 7e7190b30cbb..4e74e702c5a2 100644
--- a/drivers/platform/chrome/cros_ec_ishtp.c
+++ b/drivers/platform/chrome/cros_ec_ishtp.c
@@ -543,21 +543,17 @@ static int cros_ec_dev_init(struct ishtp_cl_data *client_data)
struct cros_ec_device *ec_dev;
struct device *dev = cl_data_to_dev(client_data);
- ec_dev = devm_kzalloc(dev, sizeof(*ec_dev), GFP_KERNEL);
+ ec_dev = cros_ec_device_alloc(dev);
if (!ec_dev)
return -ENOMEM;
client_data->ec_dev = ec_dev;
dev->driver_data = ec_dev;
- ec_dev->dev = dev;
ec_dev->priv = client_data->cros_ish_cl;
ec_dev->cmd_xfer = NULL;
ec_dev->pkt_xfer = cros_ec_pkt_xfer_ish;
ec_dev->phys_name = dev_name(dev);
- ec_dev->din_size = sizeof(struct cros_ish_in_msg) +
- sizeof(struct ec_response_get_protocol_info);
- ec_dev->dout_size = sizeof(struct cros_ish_out_msg) + sizeof(struct ec_params_rwsig_action);
return cros_ec_register(ec_dev);
}
diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c
index 7d9a78289c96..78cfff80cdea 100644
--- a/drivers/platform/chrome/cros_ec_lpc.c
+++ b/drivers/platform/chrome/cros_ec_lpc.c
@@ -637,19 +637,15 @@ static int cros_ec_lpc_probe(struct platform_device *pdev)
}
}
- ec_dev = devm_kzalloc(dev, sizeof(*ec_dev), GFP_KERNEL);
+ ec_dev = cros_ec_device_alloc(dev);
if (!ec_dev)
return -ENOMEM;
platform_set_drvdata(pdev, ec_dev);
- ec_dev->dev = dev;
ec_dev->phys_name = dev_name(dev);
ec_dev->cmd_xfer = cros_ec_cmd_xfer_lpc;
ec_dev->pkt_xfer = cros_ec_pkt_xfer_lpc;
ec_dev->cmd_readmem = cros_ec_lpc_readmem;
- ec_dev->din_size = sizeof(struct ec_host_response) +
- sizeof(struct ec_response_get_protocol_info);
- ec_dev->dout_size = sizeof(struct ec_host_request) + sizeof(struct ec_params_rwsig_action);
ec_dev->priv = ec_lpc;
/*
diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
index 3e94a0a82173..1d8d9168ec1a 100644
--- a/drivers/platform/chrome/cros_ec_proto.c
+++ b/drivers/platform/chrome/cros_ec_proto.c
@@ -3,6 +3,7 @@
//
// Copyright (C) 2015 Google, Inc
+#include <linux/cleanup.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/limits.h>
@@ -1153,5 +1154,19 @@ int cros_ec_get_cmd_versions(struct cros_ec_device *ec_dev, u16 cmd)
}
EXPORT_SYMBOL_GPL(cros_ec_get_cmd_versions);
+/**
+ * cros_ec_device_registered - Return if the ec_dev is registered.
+ *
+ * @ec_dev: EC device
+ *
+ * Return: true if registered. Otherwise, false.
+ */
+bool cros_ec_device_registered(struct cros_ec_device *ec_dev)
+{
+ guard(mutex)(&ec_dev->lock);
+ return ec_dev->registered;
+}
+EXPORT_SYMBOL_GPL(cros_ec_device_registered);
+
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("ChromeOS EC communication protocol helpers");
diff --git a/drivers/platform/chrome/cros_ec_rpmsg.c b/drivers/platform/chrome/cros_ec_rpmsg.c
index bc2666491db1..09bd9e49464e 100644
--- a/drivers/platform/chrome/cros_ec_rpmsg.c
+++ b/drivers/platform/chrome/cros_ec_rpmsg.c
@@ -216,7 +216,7 @@ static int cros_ec_rpmsg_probe(struct rpmsg_device *rpdev)
struct cros_ec_device *ec_dev;
int ret;
- ec_dev = devm_kzalloc(dev, sizeof(*ec_dev), GFP_KERNEL);
+ ec_dev = cros_ec_device_alloc(dev);
if (!ec_dev)
return -ENOMEM;
@@ -224,14 +224,10 @@ static int cros_ec_rpmsg_probe(struct rpmsg_device *rpdev)
if (!ec_rpmsg)
return -ENOMEM;
- ec_dev->dev = dev;
ec_dev->priv = ec_rpmsg;
ec_dev->cmd_xfer = cros_ec_cmd_xfer_rpmsg;
ec_dev->pkt_xfer = cros_ec_pkt_xfer_rpmsg;
ec_dev->phys_name = dev_name(&rpdev->dev);
- ec_dev->din_size = sizeof(struct ec_host_response) +
- sizeof(struct ec_response_get_protocol_info);
- ec_dev->dout_size = sizeof(struct ec_host_request) + sizeof(struct ec_params_rwsig_action);
dev_set_drvdata(dev, ec_dev);
ec_rpmsg->rpdev = rpdev;
diff --git a/drivers/platform/chrome/cros_ec_spi.c b/drivers/platform/chrome/cros_ec_spi.c
index 8ca0f854e7ac..28fa82f8cb07 100644
--- a/drivers/platform/chrome/cros_ec_spi.c
+++ b/drivers/platform/chrome/cros_ec_spi.c
@@ -749,7 +749,7 @@ static int cros_ec_spi_probe(struct spi_device *spi)
if (ec_spi == NULL)
return -ENOMEM;
ec_spi->spi = spi;
- ec_dev = devm_kzalloc(dev, sizeof(*ec_dev), GFP_KERNEL);
+ ec_dev = cros_ec_device_alloc(dev);
if (!ec_dev)
return -ENOMEM;
@@ -757,16 +757,11 @@ static int cros_ec_spi_probe(struct spi_device *spi)
cros_ec_spi_dt_probe(ec_spi, dev);
spi_set_drvdata(spi, ec_dev);
- ec_dev->dev = dev;
ec_dev->priv = ec_spi;
ec_dev->irq = spi->irq;
ec_dev->cmd_xfer = cros_ec_cmd_xfer_spi;
ec_dev->pkt_xfer = cros_ec_pkt_xfer_spi;
ec_dev->phys_name = dev_name(&ec_spi->spi->dev);
- ec_dev->din_size = EC_MSG_PREAMBLE_COUNT +
- sizeof(struct ec_host_response) +
- sizeof(struct ec_response_get_protocol_info);
- ec_dev->dout_size = sizeof(struct ec_host_request) + sizeof(struct ec_params_rwsig_action);
ec_spi->last_transfer_ns = ktime_get_ns();
diff --git a/drivers/platform/chrome/cros_ec_uart.c b/drivers/platform/chrome/cros_ec_uart.c
index 19c179d49c90..d5b37414ff12 100644
--- a/drivers/platform/chrome/cros_ec_uart.c
+++ b/drivers/platform/chrome/cros_ec_uart.c
@@ -259,7 +259,7 @@ static int cros_ec_uart_probe(struct serdev_device *serdev)
if (!ec_uart)
return -ENOMEM;
- ec_dev = devm_kzalloc(dev, sizeof(*ec_dev), GFP_KERNEL);
+ ec_dev = cros_ec_device_alloc(dev);
if (!ec_dev)
return -ENOMEM;
@@ -276,14 +276,10 @@ static int cros_ec_uart_probe(struct serdev_device *serdev)
/* Initialize ec_dev for cros_ec */
ec_dev->phys_name = dev_name(dev);
- ec_dev->dev = dev;
ec_dev->priv = ec_uart;
ec_dev->irq = ec_uart->irq;
ec_dev->cmd_xfer = NULL;
ec_dev->pkt_xfer = cros_ec_uart_pkt_xfer;
- ec_dev->din_size = sizeof(struct ec_host_response) +
- sizeof(struct ec_response_get_protocol_info);
- ec_dev->dout_size = sizeof(struct ec_host_request) + sizeof(struct ec_params_rwsig_action);
serdev_device_set_client_ops(serdev, &cros_ec_uart_client_ops);
diff --git a/drivers/platform/chrome/wilco_ec/telemetry.c b/drivers/platform/chrome/wilco_ec/telemetry.c
index 7d8ae2cbf72f..b18043e31ae4 100644
--- a/drivers/platform/chrome/wilco_ec/telemetry.c
+++ b/drivers/platform/chrome/wilco_ec/telemetry.c
@@ -388,7 +388,7 @@ static int telem_device_probe(struct platform_device *pdev)
dev_set_name(&dev_data->dev, TELEM_DEV_NAME_FMT, minor);
device_initialize(&dev_data->dev);
- /* Initialize the character device and add it to userspace */;
+ /* Initialize the character device and add it to userspace */
cdev_init(&dev_data->cdev, &telem_fops);
error = cdev_device_add(&dev_data->cdev, &dev_data->dev);
if (error) {
diff --git a/drivers/platform/cznic/turris-omnia-mcu-gpio.c b/drivers/platform/cznic/turris-omnia-mcu-gpio.c
index 77184c8b42ea..7f0ada4fa606 100644
--- a/drivers/platform/cznic/turris-omnia-mcu-gpio.c
+++ b/drivers/platform/cznic/turris-omnia-mcu-gpio.c
@@ -1024,8 +1024,8 @@ int omnia_mcu_register_gpiochip(struct omnia_mcu *mcu)
mcu->gc.direction_output = omnia_gpio_direction_output;
mcu->gc.get = omnia_gpio_get;
mcu->gc.get_multiple = omnia_gpio_get_multiple;
- mcu->gc.set_rv = omnia_gpio_set;
- mcu->gc.set_multiple_rv = omnia_gpio_set_multiple;
+ mcu->gc.set = omnia_gpio_set;
+ mcu->gc.set_multiple = omnia_gpio_set_multiple;
mcu->gc.init_valid_mask = omnia_gpio_init_valid_mask;
mcu->gc.can_sleep = true;
mcu->gc.names = omnia_mcu_gpio_names;
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 6d238e120dce..46e62feeda3c 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -118,6 +118,18 @@ config XIAOMI_WMI
To compile this driver as a module, choose M here: the module will
be called xiaomi-wmi.
+config REDMI_WMI
+ tristate "Redmibook WMI key driver"
+ depends on ACPI_WMI
+ depends on INPUT
+ select INPUT_SPARSEKMAP
+ help
+ Say Y here if you want support for WMI-based hotkey events on
+ Xiaomi Redmibook devices.
+
+ To compile this driver as a module, choose M here: the module will
+ be called redmi-wmi.
+
config GIGABYTE_WMI
tristate "Gigabyte WMI temperature driver"
depends on ACPI_WMI
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index a0c5848513e3..c7db2a88c11a 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_HUAWEI_WMI) += huawei-wmi.o
obj-$(CONFIG_MXM_WMI) += mxm-wmi.o
obj-$(CONFIG_NVIDIA_WMI_EC_BACKLIGHT) += nvidia-wmi-ec-backlight.o
obj-$(CONFIG_XIAOMI_WMI) += xiaomi-wmi.o
+obj-$(CONFIG_REDMI_WMI) += redmi-wmi.o
obj-$(CONFIG_GIGABYTE_WMI) += gigabyte-wmi.o
# Acer
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index 69336bd778ee..13eb22b35aa8 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -129,6 +129,7 @@ enum acer_wmi_predator_v4_oc {
enum acer_wmi_gaming_misc_setting {
ACER_WMID_MISC_SETTING_OC_1 = 0x0005,
ACER_WMID_MISC_SETTING_OC_2 = 0x0007,
+ /* Unreliable on some models */
ACER_WMID_MISC_SETTING_SUPPORTED_PROFILES = 0x000A,
ACER_WMID_MISC_SETTING_PLATFORM_PROFILE = 0x000B,
};
@@ -794,9 +795,6 @@ static bool platform_profile_support;
*/
static int last_non_turbo_profile = INT_MIN;
-/* The most performant supported profile */
-static int acer_predator_v4_max_perf;
-
enum acer_predator_v4_thermal_profile {
ACER_PREDATOR_V4_THERMAL_PROFILE_QUIET = 0x00,
ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED = 0x01,
@@ -2014,7 +2012,7 @@ acer_predator_v4_platform_profile_set(struct device *dev,
if (err)
return err;
- if (tp != acer_predator_v4_max_perf)
+ if (tp != ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO)
last_non_turbo_profile = tp;
return 0;
@@ -2023,55 +2021,14 @@ acer_predator_v4_platform_profile_set(struct device *dev,
static int
acer_predator_v4_platform_profile_probe(void *drvdata, unsigned long *choices)
{
- unsigned long supported_profiles;
- int err;
+ set_bit(PLATFORM_PROFILE_PERFORMANCE, choices);
+ set_bit(PLATFORM_PROFILE_BALANCED_PERFORMANCE, choices);
+ set_bit(PLATFORM_PROFILE_BALANCED, choices);
+ set_bit(PLATFORM_PROFILE_QUIET, choices);
+ set_bit(PLATFORM_PROFILE_LOW_POWER, choices);
- err = WMID_gaming_get_misc_setting(ACER_WMID_MISC_SETTING_SUPPORTED_PROFILES,
- (u8 *)&supported_profiles);
- if (err)
- return err;
-
- /* Iterate through supported profiles in order of increasing performance */
- if (test_bit(ACER_PREDATOR_V4_THERMAL_PROFILE_ECO, &supported_profiles)) {
- set_bit(PLATFORM_PROFILE_LOW_POWER, choices);
- acer_predator_v4_max_perf = ACER_PREDATOR_V4_THERMAL_PROFILE_ECO;
- last_non_turbo_profile = ACER_PREDATOR_V4_THERMAL_PROFILE_ECO;
- }
-
- if (test_bit(ACER_PREDATOR_V4_THERMAL_PROFILE_QUIET, &supported_profiles)) {
- set_bit(PLATFORM_PROFILE_QUIET, choices);
- acer_predator_v4_max_perf = ACER_PREDATOR_V4_THERMAL_PROFILE_QUIET;
- last_non_turbo_profile = ACER_PREDATOR_V4_THERMAL_PROFILE_QUIET;
- }
-
- if (test_bit(ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED, &supported_profiles)) {
- set_bit(PLATFORM_PROFILE_BALANCED, choices);
- acer_predator_v4_max_perf = ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED;
- last_non_turbo_profile = ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED;
- }
-
- if (test_bit(ACER_PREDATOR_V4_THERMAL_PROFILE_PERFORMANCE, &supported_profiles)) {
- set_bit(PLATFORM_PROFILE_BALANCED_PERFORMANCE, choices);
- acer_predator_v4_max_perf = ACER_PREDATOR_V4_THERMAL_PROFILE_PERFORMANCE;
-
- /* We only use this profile as a fallback option in case no prior
- * profile is supported.
- */
- if (last_non_turbo_profile < 0)
- last_non_turbo_profile = ACER_PREDATOR_V4_THERMAL_PROFILE_PERFORMANCE;
- }
-
- if (test_bit(ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO, &supported_profiles)) {
- set_bit(PLATFORM_PROFILE_PERFORMANCE, choices);
- acer_predator_v4_max_perf = ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO;
-
- /* We need to handle the hypothetical case where only the turbo profile
- * is supported. In this case the turbo toggle will essentially be a
- * no-op.
- */
- if (last_non_turbo_profile < 0)
- last_non_turbo_profile = ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO;
- }
+ /* Set default non-turbo profile */
+ last_non_turbo_profile = ACER_PREDATOR_V4_THERMAL_PROFILE_BALANCED;
return 0;
}
@@ -2108,19 +2065,15 @@ static int acer_thermal_profile_change(void)
if (cycle_gaming_thermal_profile) {
platform_profile_cycle();
} else {
- /* Do nothing if no suitable platform profiles where found */
- if (last_non_turbo_profile < 0)
- return 0;
-
err = WMID_gaming_get_misc_setting(
ACER_WMID_MISC_SETTING_PLATFORM_PROFILE, &current_tp);
if (err)
return err;
- if (current_tp == acer_predator_v4_max_perf)
+ if (current_tp == ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO)
tp = last_non_turbo_profile;
else
- tp = acer_predator_v4_max_perf;
+ tp = ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO;
err = WMID_gaming_set_misc_setting(
ACER_WMID_MISC_SETTING_PLATFORM_PROFILE, tp);
@@ -2128,7 +2081,7 @@ static int acer_thermal_profile_change(void)
return err;
/* Store last profile for toggle */
- if (current_tp != acer_predator_v4_max_perf)
+ if (current_tp != ACER_PREDATOR_V4_THERMAL_PROFILE_TURBO)
last_non_turbo_profile = current_tp;
platform_profile_notify(platform_profile_device);
diff --git a/drivers/platform/x86/amd/hfi/hfi.c b/drivers/platform/x86/amd/hfi/hfi.c
index 4f56149b3774..a465ac6f607e 100644
--- a/drivers/platform/x86/amd/hfi/hfi.c
+++ b/drivers/platform/x86/amd/hfi/hfi.c
@@ -385,12 +385,16 @@ static int amd_hfi_metadata_parser(struct platform_device *pdev,
amd_hfi_data->pcct_entry = pcct_entry;
pcct_ext = (struct acpi_pcct_ext_pcc_slave *)pcct_entry;
- if (pcct_ext->length <= 0)
- return -EINVAL;
+ if (pcct_ext->length <= 0) {
+ ret = -EINVAL;
+ goto out;
+ }
amd_hfi_data->shmem = devm_kzalloc(amd_hfi_data->dev, pcct_ext->length, GFP_KERNEL);
- if (!amd_hfi_data->shmem)
- return -ENOMEM;
+ if (!amd_hfi_data->shmem) {
+ ret = -ENOMEM;
+ goto out;
+ }
pcc_chan->shmem_base_addr = pcct_ext->base_address;
pcc_chan->shmem_size = pcct_ext->length;
@@ -398,6 +402,8 @@ static int amd_hfi_metadata_parser(struct platform_device *pdev,
/* parse the shared memory info from the PCCT table */
ret = amd_hfi_fill_metadata(amd_hfi_data);
+out:
+ /* Don't leak any ACPI memory */
acpi_put_table(pcct_tbl);
return ret;
diff --git a/drivers/platform/x86/amd/hsmp/acpi.c b/drivers/platform/x86/amd/hsmp/acpi.c
index 54986a752f7d..d0b74d243ce4 100644
--- a/drivers/platform/x86/amd/hsmp/acpi.c
+++ b/drivers/platform/x86/amd/hsmp/acpi.c
@@ -495,16 +495,16 @@ static int init_acpi(struct device *dev)
if (hsmp_pdev->proto_ver == HSMP_PROTO_VER6) {
ret = hsmp_get_tbl_dram_base(sock_ind);
if (ret)
- dev_err(dev, "Failed to init metric table\n");
+ dev_info(dev, "Failed to init metric table\n");
}
ret = hsmp_create_sensor(dev, sock_ind);
if (ret)
- dev_err(dev, "Failed to register HSMP sensors with hwmon\n");
+ dev_info(dev, "Failed to register HSMP sensors with hwmon\n");
dev_set_drvdata(dev, &hsmp_pdev->sock[sock_ind]);
- return ret;
+ return 0;
}
static const struct bin_attribute hsmp_metric_tbl_attr = {
diff --git a/drivers/platform/x86/amd/hsmp/hsmp.c b/drivers/platform/x86/amd/hsmp/hsmp.c
index 885e2f8136fd..19f82c1d3090 100644
--- a/drivers/platform/x86/amd/hsmp/hsmp.c
+++ b/drivers/platform/x86/amd/hsmp/hsmp.c
@@ -356,6 +356,11 @@ ssize_t hsmp_metric_tbl_read(struct hsmp_socket *sock, char *buf, size_t size)
if (!sock || !buf)
return -EINVAL;
+ if (!sock->metric_tbl_addr) {
+ dev_err(sock->dev, "Metrics table address not available\n");
+ return -ENOMEM;
+ }
+
/* Do not support lseek(), also don't allow more than the size of metric table */
if (size != sizeof(struct hsmp_metric_table)) {
dev_err(sock->dev, "Wrong buffer size\n");
diff --git a/drivers/platform/x86/amd/hsmp/plat.c b/drivers/platform/x86/amd/hsmp/plat.c
index 22f50b6235d6..e07f68575055 100644
--- a/drivers/platform/x86/amd/hsmp/plat.c
+++ b/drivers/platform/x86/amd/hsmp/plat.c
@@ -189,13 +189,13 @@ static int init_platform_device(struct device *dev)
if (hsmp_pdev->proto_ver == HSMP_PROTO_VER6) {
ret = hsmp_get_tbl_dram_base(i);
if (ret)
- dev_err(dev, "Failed to init metric table\n");
+ dev_info(dev, "Failed to init metric table\n");
}
/* Register with hwmon interface for reporting power */
ret = hsmp_create_sensor(dev, i);
if (ret)
- dev_err(dev, "Failed to register HSMP sensors with hwmon\n");
+ dev_info(dev, "Failed to register HSMP sensors with hwmon\n");
}
return 0;
diff --git a/drivers/platform/x86/amd/pmc/pmc-quirks.c b/drivers/platform/x86/amd/pmc/pmc-quirks.c
index ded4c84f5ed1..d63aaad7ef59 100644
--- a/drivers/platform/x86/amd/pmc/pmc-quirks.c
+++ b/drivers/platform/x86/amd/pmc/pmc-quirks.c
@@ -28,10 +28,15 @@ static struct quirk_entry quirk_spurious_8042 = {
.spurious_8042 = true,
};
+static struct quirk_entry quirk_s2idle_spurious_8042 = {
+ .s2idle_bug_mmio = FCH_PM_BASE + FCH_PM_SCRATCH,
+ .spurious_8042 = true,
+};
+
static const struct dmi_system_id fwbug_list[] = {
{
.ident = "L14 Gen2 AMD",
- .driver_data = &quirk_s2idle_bug,
+ .driver_data = &quirk_s2idle_spurious_8042,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "20X5"),
@@ -39,7 +44,7 @@ static const struct dmi_system_id fwbug_list[] = {
},
{
.ident = "T14s Gen2 AMD",
- .driver_data = &quirk_s2idle_bug,
+ .driver_data = &quirk_s2idle_spurious_8042,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "20XF"),
@@ -47,7 +52,7 @@ static const struct dmi_system_id fwbug_list[] = {
},
{
.ident = "X13 Gen2 AMD",
- .driver_data = &quirk_s2idle_bug,
+ .driver_data = &quirk_s2idle_spurious_8042,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "20XH"),
@@ -55,7 +60,7 @@ static const struct dmi_system_id fwbug_list[] = {
},
{
.ident = "T14 Gen2 AMD",
- .driver_data = &quirk_s2idle_bug,
+ .driver_data = &quirk_s2idle_spurious_8042,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "20XK"),
@@ -63,7 +68,7 @@ static const struct dmi_system_id fwbug_list[] = {
},
{
.ident = "T14 Gen1 AMD",
- .driver_data = &quirk_s2idle_bug,
+ .driver_data = &quirk_s2idle_spurious_8042,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "20UD"),
@@ -71,7 +76,7 @@ static const struct dmi_system_id fwbug_list[] = {
},
{
.ident = "T14 Gen1 AMD",
- .driver_data = &quirk_s2idle_bug,
+ .driver_data = &quirk_s2idle_spurious_8042,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "20UE"),
@@ -79,7 +84,7 @@ static const struct dmi_system_id fwbug_list[] = {
},
{
.ident = "T14s Gen1 AMD",
- .driver_data = &quirk_s2idle_bug,
+ .driver_data = &quirk_s2idle_spurious_8042,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "20UH"),
@@ -87,7 +92,7 @@ static const struct dmi_system_id fwbug_list[] = {
},
{
.ident = "T14s Gen1 AMD",
- .driver_data = &quirk_s2idle_bug,
+ .driver_data = &quirk_s2idle_spurious_8042,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "20UJ"),
@@ -95,7 +100,7 @@ static const struct dmi_system_id fwbug_list[] = {
},
{
.ident = "P14s Gen1 AMD",
- .driver_data = &quirk_s2idle_bug,
+ .driver_data = &quirk_s2idle_spurious_8042,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "20Y1"),
@@ -103,7 +108,7 @@ static const struct dmi_system_id fwbug_list[] = {
},
{
.ident = "P14s Gen2 AMD",
- .driver_data = &quirk_s2idle_bug,
+ .driver_data = &quirk_s2idle_spurious_8042,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "21A0"),
@@ -111,7 +116,7 @@ static const struct dmi_system_id fwbug_list[] = {
},
{
.ident = "P14s Gen2 AMD",
- .driver_data = &quirk_s2idle_bug,
+ .driver_data = &quirk_s2idle_spurious_8042,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "21A1"),
@@ -152,7 +157,7 @@ static const struct dmi_system_id fwbug_list[] = {
},
{
.ident = "IdeaPad 1 14AMN7",
- .driver_data = &quirk_s2idle_bug,
+ .driver_data = &quirk_s2idle_spurious_8042,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "82VF"),
@@ -160,7 +165,7 @@ static const struct dmi_system_id fwbug_list[] = {
},
{
.ident = "IdeaPad 1 15AMN7",
- .driver_data = &quirk_s2idle_bug,
+ .driver_data = &quirk_s2idle_spurious_8042,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "82VG"),
@@ -168,7 +173,7 @@ static const struct dmi_system_id fwbug_list[] = {
},
{
.ident = "IdeaPad 1 15AMN7",
- .driver_data = &quirk_s2idle_bug,
+ .driver_data = &quirk_s2idle_spurious_8042,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "82X5"),
@@ -176,7 +181,7 @@ static const struct dmi_system_id fwbug_list[] = {
},
{
.ident = "IdeaPad Slim 3 14AMN8",
- .driver_data = &quirk_s2idle_bug,
+ .driver_data = &quirk_s2idle_spurious_8042,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "82XN"),
@@ -184,7 +189,7 @@ static const struct dmi_system_id fwbug_list[] = {
},
{
.ident = "IdeaPad Slim 3 15AMN8",
- .driver_data = &quirk_s2idle_bug,
+ .driver_data = &quirk_s2idle_spurious_8042,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "82XQ"),
@@ -193,7 +198,7 @@ static const struct dmi_system_id fwbug_list[] = {
/* https://gitlab.freedesktop.org/drm/amd/-/issues/4434 */
{
.ident = "Lenovo Yoga 6 13ALC6",
- .driver_data = &quirk_s2idle_bug,
+ .driver_data = &quirk_s2idle_spurious_8042,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "82ND"),
@@ -202,7 +207,7 @@ static const struct dmi_system_id fwbug_list[] = {
/* https://gitlab.freedesktop.org/drm/amd/-/issues/2684 */
{
.ident = "HP Laptop 15s-eq2xxx",
- .driver_data = &quirk_s2idle_bug,
+ .driver_data = &quirk_s2idle_spurious_8042,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP Laptop 15s-eq2xxx"),
@@ -234,6 +239,14 @@ static const struct dmi_system_id fwbug_list[] = {
DMI_MATCH(DMI_BOARD_NAME, "WUJIE14-GX4HRXL"),
}
},
+ {
+ .ident = "MECHREVO Yilong15Pro Series GM5HG7A",
+ .driver_data = &quirk_spurious_8042,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MECHREVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Yilong15Pro Series GM5HG7A"),
+ }
+ },
/* https://bugzilla.kernel.org/show_bug.cgi?id=220116 */
{
.ident = "PCSpecialist Lafite Pro V 14M",
@@ -243,6 +256,27 @@ static const struct dmi_system_id fwbug_list[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Lafite Pro V 14M"),
}
},
+ {
+ .ident = "TUXEDO Stellaris Slim 15 AMD Gen6",
+ .driver_data = &quirk_spurious_8042,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GMxHGxx"),
+ }
+ },
+ {
+ .ident = "TUXEDO InfinityBook Pro 14/15 AMD Gen10",
+ .driver_data = &quirk_spurious_8042,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "XxHP4NAx"),
+ }
+ },
+ {
+ .ident = "TUXEDO InfinityBook Pro 14/15 AMD Gen10",
+ .driver_data = &quirk_spurious_8042,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "XxKK4NAx_XxSP4NAx"),
+ }
+ },
{}
};
@@ -285,6 +319,16 @@ void amd_pmc_quirks_init(struct amd_pmc_dev *dev)
{
const struct dmi_system_id *dmi_id;
+ /*
+ * IRQ1 may cause an interrupt during resume even without a keyboard
+ * press.
+ *
+ * Affects Renoir, Cezanne and Barcelo SoCs
+ *
+ * A solution is available in PMFW 64.66.0, but it must be activated by
+ * SBIOS. If SBIOS is known to have the fix a quirk can be added for
+ * a given system to avoid workaround.
+ */
if (dev->cpu_id == AMD_CPU_ID_CZN)
dev->disable_8042_wakeup = true;
@@ -295,6 +339,5 @@ void amd_pmc_quirks_init(struct amd_pmc_dev *dev)
if (dev->quirks->s2idle_bug_mmio)
pr_info("Using s2idle quirk to avoid %s platform firmware bug\n",
dmi_id->ident);
- if (dev->quirks->spurious_8042)
- dev->disable_8042_wakeup = true;
+ dev->disable_8042_wakeup = dev->quirks->spurious_8042;
}
diff --git a/drivers/platform/x86/amd/pmc/pmc.c b/drivers/platform/x86/amd/pmc/pmc.c
index 0b9b23eb7c2c..bd318fd02ccf 100644
--- a/drivers/platform/x86/amd/pmc/pmc.c
+++ b/drivers/platform/x86/amd/pmc/pmc.c
@@ -530,19 +530,6 @@ static int amd_pmc_get_os_hint(struct amd_pmc_dev *dev)
static int amd_pmc_wa_irq1(struct amd_pmc_dev *pdev)
{
struct device *d;
- int rc;
-
- /* cezanne platform firmware has a fix in 64.66.0 */
- if (pdev->cpu_id == AMD_CPU_ID_CZN) {
- if (!pdev->major) {
- rc = amd_pmc_get_smu_version(pdev);
- if (rc)
- return rc;
- }
-
- if (pdev->major > 64 || (pdev->major == 64 && pdev->minor > 65))
- return 0;
- }
d = bus_find_device_by_name(&serio_bus, NULL, "serio0");
if (!d)
diff --git a/drivers/platform/x86/amd/pmf/acpi.c b/drivers/platform/x86/amd/pmf/acpi.c
index f75f7ecd8cd9..13c4fec2c7ef 100644
--- a/drivers/platform/x86/amd/pmf/acpi.c
+++ b/drivers/platform/x86/amd/pmf/acpi.c
@@ -161,6 +161,11 @@ int is_apmf_func_supported(struct amd_pmf_dev *pdev, unsigned long index)
return !!(pdev->supported_func & BIT(index - 1));
}
+int is_apmf_bios_input_notifications_supported(struct amd_pmf_dev *pdev)
+{
+ return !!(pdev->notifications & CUSTOM_BIOS_INPUT_BITS);
+}
+
int apts_get_static_slider_granular_v2(struct amd_pmf_dev *pdev,
struct amd_pmf_apts_granular_output *data, u32 apts_idx)
{
@@ -315,12 +320,26 @@ int apmf_get_sbios_requests_v2(struct amd_pmf_dev *pdev, struct apmf_sbios_req_v
return apmf_if_call_store_buffer(pdev, APMF_FUNC_SBIOS_REQUESTS, req, sizeof(*req));
}
+int apmf_get_sbios_requests_v1(struct amd_pmf_dev *pdev, struct apmf_sbios_req_v1 *req)
+{
+ return apmf_if_call_store_buffer(pdev, APMF_FUNC_SBIOS_REQUESTS, req, sizeof(*req));
+}
+
int apmf_get_sbios_requests(struct amd_pmf_dev *pdev, struct apmf_sbios_req *req)
{
return apmf_if_call_store_buffer(pdev, APMF_FUNC_SBIOS_REQUESTS,
req, sizeof(*req));
}
+static void amd_pmf_handle_early_preq(struct amd_pmf_dev *pdev)
+{
+ if (!pdev->cb_flag)
+ return;
+
+ amd_pmf_invoke_cmd_enact(pdev);
+ pdev->cb_flag = false;
+}
+
static void apmf_event_handler_v2(acpi_handle handle, u32 event, void *data)
{
struct amd_pmf_dev *pmf_dev = data;
@@ -329,8 +348,32 @@ static void apmf_event_handler_v2(acpi_handle handle, u32 event, void *data)
guard(mutex)(&pmf_dev->cb_mutex);
ret = apmf_get_sbios_requests_v2(pmf_dev, &pmf_dev->req);
- if (ret)
+ if (ret) {
dev_err(pmf_dev->dev, "Failed to get v2 SBIOS requests: %d\n", ret);
+ return;
+ }
+
+ dev_dbg(pmf_dev->dev, "Pending request (preq): 0x%x\n", pmf_dev->req.pending_req);
+
+ amd_pmf_handle_early_preq(pmf_dev);
+}
+
+static void apmf_event_handler_v1(acpi_handle handle, u32 event, void *data)
+{
+ struct amd_pmf_dev *pmf_dev = data;
+ int ret;
+
+ guard(mutex)(&pmf_dev->cb_mutex);
+
+ ret = apmf_get_sbios_requests_v1(pmf_dev, &pmf_dev->req1);
+ if (ret) {
+ dev_err(pmf_dev->dev, "Failed to get v1 SBIOS requests: %d\n", ret);
+ return;
+ }
+
+ dev_dbg(pmf_dev->dev, "Pending request (preq1): 0x%x\n", pmf_dev->req1.pending_req);
+
+ amd_pmf_handle_early_preq(pmf_dev);
}
static void apmf_event_handler(acpi_handle handle, u32 event, void *data)
@@ -385,6 +428,7 @@ static int apmf_if_verify_interface(struct amd_pmf_dev *pdev)
pdev->pmf_if_version = output.version;
+ pdev->notifications = output.notification_mask;
return 0;
}
@@ -421,6 +465,11 @@ int apmf_get_dyn_slider_def_dc(struct amd_pmf_dev *pdev, struct apmf_dyn_slider_
return apmf_if_call_store_buffer(pdev, APMF_FUNC_DYN_SLIDER_DC, data, sizeof(*data));
}
+static apmf_event_handler_t apmf_event_handlers[] = {
+ [PMF_IF_V1] = apmf_event_handler_v1,
+ [PMF_IF_V2] = apmf_event_handler_v2,
+};
+
int apmf_install_handler(struct amd_pmf_dev *pmf_dev)
{
acpi_handle ahandle = ACPI_HANDLE(pmf_dev->dev);
@@ -440,13 +489,26 @@ int apmf_install_handler(struct amd_pmf_dev *pmf_dev)
apmf_event_handler(ahandle, 0, pmf_dev);
}
- if (pmf_dev->smart_pc_enabled && pmf_dev->pmf_if_version == PMF_IF_V2) {
+ if (!pmf_dev->smart_pc_enabled)
+ return -EINVAL;
+
+ switch (pmf_dev->pmf_if_version) {
+ case PMF_IF_V1:
+ if (!is_apmf_bios_input_notifications_supported(pmf_dev))
+ break;
+ fallthrough;
+ case PMF_IF_V2:
status = acpi_install_notify_handler(ahandle, ACPI_ALL_NOTIFY,
- apmf_event_handler_v2, pmf_dev);
+ apmf_event_handlers[pmf_dev->pmf_if_version], pmf_dev);
if (ACPI_FAILURE(status)) {
- dev_err(pmf_dev->dev, "failed to install notify handler for custom BIOS inputs\n");
+ dev_err(pmf_dev->dev,
+ "failed to install notify handler v%d for custom BIOS inputs\n",
+ pmf_dev->pmf_if_version);
return -ENODEV;
}
+ break;
+ default:
+ break;
}
return 0;
@@ -500,8 +562,21 @@ void apmf_acpi_deinit(struct amd_pmf_dev *pmf_dev)
is_apmf_func_supported(pmf_dev, APMF_FUNC_SBIOS_REQUESTS))
acpi_remove_notify_handler(ahandle, ACPI_ALL_NOTIFY, apmf_event_handler);
- if (pmf_dev->smart_pc_enabled && pmf_dev->pmf_if_version == PMF_IF_V2)
- acpi_remove_notify_handler(ahandle, ACPI_ALL_NOTIFY, apmf_event_handler_v2);
+ if (!pmf_dev->smart_pc_enabled)
+ return;
+
+ switch (pmf_dev->pmf_if_version) {
+ case PMF_IF_V1:
+ if (!is_apmf_bios_input_notifications_supported(pmf_dev))
+ break;
+ fallthrough;
+ case PMF_IF_V2:
+ acpi_remove_notify_handler(ahandle, ACPI_ALL_NOTIFY,
+ apmf_event_handlers[pmf_dev->pmf_if_version]);
+ break;
+ default:
+ break;
+ }
}
int apmf_acpi_init(struct amd_pmf_dev *pmf_dev)
diff --git a/drivers/platform/x86/amd/pmf/core.c b/drivers/platform/x86/amd/pmf/core.c
index ef988605c4da..bc544a4a5266 100644
--- a/drivers/platform/x86/amd/pmf/core.c
+++ b/drivers/platform/x86/amd/pmf/core.c
@@ -403,6 +403,7 @@ static const struct acpi_device_id amd_pmf_acpi_ids[] = {
{"AMDI0103", 0},
{"AMDI0105", 0},
{"AMDI0107", 0},
+ {"AMDI0108", 0},
{ }
};
MODULE_DEVICE_TABLE(acpi, amd_pmf_acpi_ids);
diff --git a/drivers/platform/x86/amd/pmf/pmf.h b/drivers/platform/x86/amd/pmf/pmf.h
index 45b60238d527..bd19f2a6bc78 100644
--- a/drivers/platform/x86/amd/pmf/pmf.h
+++ b/drivers/platform/x86/amd/pmf/pmf.h
@@ -93,6 +93,8 @@ struct cookie_header {
#define PMF_POLICY_BIOS_OUTPUT_1 10
#define PMF_POLICY_BIOS_OUTPUT_2 11
#define PMF_POLICY_P3T 38
+#define PMF_POLICY_PMF_PPT 54
+#define PMF_POLICY_PMF_PPT_APU_ONLY 55
#define PMF_POLICY_BIOS_OUTPUT_3 57
#define PMF_POLICY_BIOS_OUTPUT_4 58
#define PMF_POLICY_BIOS_OUTPUT_5 59
@@ -116,6 +118,9 @@ struct cookie_header {
#define PMF_IF_V2 2
#define APTS_MAX_STATES 16
+#define CUSTOM_BIOS_INPUT_BITS GENMASK(16, 7)
+
+typedef void (*apmf_event_handler_t)(acpi_handle handle, u32 event, void *data);
/* APTS PMF BIOS Interface */
struct amd_pmf_apts_output {
@@ -184,6 +189,24 @@ struct apmf_sbios_req {
u8 skin_temp_hs2;
} __packed;
+/* As per APMF spec 1.3 */
+struct apmf_sbios_req_v1 {
+ u16 size;
+ u32 pending_req;
+ u8 rsvd;
+ u8 cql_event;
+ u8 amt_event;
+ u32 fppt;
+ u32 sppt;
+ u32 sppt_apu_only;
+ u32 spl;
+ u32 stt_min_limit;
+ u8 skin_temp_apu;
+ u8 skin_temp_hs2;
+ u8 enable_cnqf;
+ u32 custom_policy[10];
+} __packed;
+
struct apmf_sbios_req_v2 {
u16 size;
u32 pending_req;
@@ -331,6 +354,10 @@ enum power_modes_v2 {
POWER_MODE_V2_MAX,
};
+struct pmf_bios_inputs_prev {
+ u32 custom_bios_inputs[10];
+};
+
struct amd_pmf_dev {
void __iomem *regbase;
void __iomem *smu_virt_addr;
@@ -375,6 +402,10 @@ struct amd_pmf_dev {
struct resource *res;
struct apmf_sbios_req_v2 req; /* To get custom bios pending request */
struct mutex cb_mutex;
+ u32 notifications;
+ struct apmf_sbios_req_v1 req1;
+ struct pmf_bios_inputs_prev cb_prev; /* To preserve custom BIOS inputs */
+ bool cb_flag; /* To handle first custom BIOS input */
};
struct apmf_sps_prop_granular_v2 {
@@ -621,14 +652,35 @@ enum ta_slider {
TA_MAX,
};
-enum apmf_smartpc_custom_bios_inputs {
- APMF_SMARTPC_CUSTOM_BIOS_INPUT1,
- APMF_SMARTPC_CUSTOM_BIOS_INPUT2,
+struct amd_pmf_pb_bitmap {
+ const char *name;
+ u32 bit_mask;
+};
+
+static const struct amd_pmf_pb_bitmap custom_bios_inputs[] __used = {
+ {"NOTIFY_CUSTOM_BIOS_INPUT1", BIT(5)},
+ {"NOTIFY_CUSTOM_BIOS_INPUT2", BIT(6)},
+ {"NOTIFY_CUSTOM_BIOS_INPUT3", BIT(7)},
+ {"NOTIFY_CUSTOM_BIOS_INPUT4", BIT(8)},
+ {"NOTIFY_CUSTOM_BIOS_INPUT5", BIT(9)},
+ {"NOTIFY_CUSTOM_BIOS_INPUT6", BIT(10)},
+ {"NOTIFY_CUSTOM_BIOS_INPUT7", BIT(11)},
+ {"NOTIFY_CUSTOM_BIOS_INPUT8", BIT(12)},
+ {"NOTIFY_CUSTOM_BIOS_INPUT9", BIT(13)},
+ {"NOTIFY_CUSTOM_BIOS_INPUT10", BIT(14)},
};
-enum apmf_preq_smartpc {
- NOTIFY_CUSTOM_BIOS_INPUT1 = 5,
- NOTIFY_CUSTOM_BIOS_INPUT2,
+static const struct amd_pmf_pb_bitmap custom_bios_inputs_v1[] __used = {
+ {"NOTIFY_CUSTOM_BIOS_INPUT1", BIT(7)},
+ {"NOTIFY_CUSTOM_BIOS_INPUT2", BIT(8)},
+ {"NOTIFY_CUSTOM_BIOS_INPUT3", BIT(9)},
+ {"NOTIFY_CUSTOM_BIOS_INPUT4", BIT(10)},
+ {"NOTIFY_CUSTOM_BIOS_INPUT5", BIT(11)},
+ {"NOTIFY_CUSTOM_BIOS_INPUT6", BIT(12)},
+ {"NOTIFY_CUSTOM_BIOS_INPUT7", BIT(13)},
+ {"NOTIFY_CUSTOM_BIOS_INPUT8", BIT(14)},
+ {"NOTIFY_CUSTOM_BIOS_INPUT9", BIT(15)},
+ {"NOTIFY_CUSTOM_BIOS_INPUT10", BIT(16)},
};
enum platform_type {
@@ -677,6 +729,8 @@ struct pmf_action_table {
u32 stt_skintemp_apu; /* in C */
u32 stt_skintemp_hs2; /* in C */
u32 p3t_limit; /* in mW */
+ u32 pmf_ppt; /* in mW */
+ u32 pmf_ppt_apu_only; /* in mW */
};
/* Input conditions */
@@ -686,8 +740,7 @@ struct ta_pmf_condition_info {
u32 power_slider;
u32 lid_state;
bool user_present;
- u32 bios_input1;
- u32 bios_input2;
+ u32 bios_input_1[2];
u32 monitor_count;
u32 rsvd2[2];
u32 bat_design;
@@ -711,7 +764,9 @@ struct ta_pmf_condition_info {
u32 workload_type;
u32 display_type;
u32 display_state;
- u32 rsvd5[150];
+ u32 rsvd5_1[17];
+ u32 bios_input_2[8];
+ u32 rsvd5[125];
};
struct ta_pmf_load_policy_table {
@@ -737,6 +792,7 @@ struct ta_pmf_enact_table {
struct ta_pmf_action {
u32 action_index;
u32 value;
+ u32 spl_arg;
};
/* Output actions from TA */
@@ -778,6 +834,7 @@ int apmf_os_power_slider_update(struct amd_pmf_dev *dev, u8 flag);
int amd_pmf_set_dram_addr(struct amd_pmf_dev *dev, bool alloc_buffer);
int amd_pmf_notify_sbios_heartbeat_event_v2(struct amd_pmf_dev *dev, u8 flag);
u32 fixp_q88_fromint(u32 val);
+int is_apmf_bios_input_notifications_supported(struct amd_pmf_dev *pdev);
/* SPS Layer */
int amd_pmf_get_pprof_modes(struct amd_pmf_dev *pmf);
@@ -805,6 +862,7 @@ void amd_pmf_init_auto_mode(struct amd_pmf_dev *dev);
void amd_pmf_deinit_auto_mode(struct amd_pmf_dev *dev);
void amd_pmf_trans_automode(struct amd_pmf_dev *dev, int socket_power, ktime_t time_elapsed_ms);
int apmf_get_sbios_requests(struct amd_pmf_dev *pdev, struct apmf_sbios_req *req);
+int apmf_get_sbios_requests_v1(struct amd_pmf_dev *pdev, struct apmf_sbios_req_v1 *req);
int apmf_get_sbios_requests_v2(struct amd_pmf_dev *pdev, struct apmf_sbios_req_v2 *req);
void amd_pmf_update_2_cql(struct amd_pmf_dev *dev, bool is_cql_event);
@@ -828,5 +886,6 @@ int amd_pmf_smartpc_apply_bios_output(struct amd_pmf_dev *dev, u32 val, u32 preq
/* Smart PC - TA interfaces */
void amd_pmf_populate_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in);
void amd_pmf_dump_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in);
+int amd_pmf_invoke_cmd_enact(struct amd_pmf_dev *dev);
#endif /* PMF_H */
diff --git a/drivers/platform/x86/amd/pmf/spc.c b/drivers/platform/x86/amd/pmf/spc.c
index 1d90f9382024..85192c7536b8 100644
--- a/drivers/platform/x86/amd/pmf/spc.c
+++ b/drivers/platform/x86/amd/pmf/spc.c
@@ -70,8 +70,22 @@ static const char *ta_slider_as_str(unsigned int state)
}
}
+static u32 amd_pmf_get_ta_custom_bios_inputs(struct ta_pmf_enact_table *in, int index)
+{
+ switch (index) {
+ case 0 ... 1:
+ return in->ev_info.bios_input_1[index];
+ case 2 ... 9:
+ return in->ev_info.bios_input_2[index - 2];
+ default:
+ return 0;
+ }
+}
+
void amd_pmf_dump_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in)
{
+ int i;
+
dev_dbg(dev->dev, "==== TA inputs START ====\n");
dev_dbg(dev->dev, "Slider State: %s\n", ta_slider_as_str(in->ev_info.power_slider));
dev_dbg(dev->dev, "Power Source: %s\n", amd_pmf_source_as_str(in->ev_info.power_source));
@@ -90,33 +104,81 @@ void amd_pmf_dump_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *
dev_dbg(dev->dev, "Platform type: %s\n", platform_type_as_str(in->ev_info.platform_type));
dev_dbg(dev->dev, "Laptop placement: %s\n",
laptop_placement_as_str(in->ev_info.device_state));
- dev_dbg(dev->dev, "Custom BIOS input1: %u\n", in->ev_info.bios_input1);
- dev_dbg(dev->dev, "Custom BIOS input2: %u\n", in->ev_info.bios_input2);
+ for (i = 0; i < ARRAY_SIZE(custom_bios_inputs); i++)
+ dev_dbg(dev->dev, "Custom BIOS input%d: %u\n", i + 1,
+ amd_pmf_get_ta_custom_bios_inputs(in, i));
dev_dbg(dev->dev, "==== TA inputs END ====\n");
}
#else
void amd_pmf_dump_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in) {}
#endif
+/*
+ * This helper function sets the appropriate BIOS input value in the TA enact
+ * table based on the provided index. We need this approach because the custom
+ * BIOS input array is not continuous, due to the existing TA structure layout.
+ */
+static void amd_pmf_set_ta_custom_bios_input(struct ta_pmf_enact_table *in, int index, u32 value)
+{
+ switch (index) {
+ case 0 ... 1:
+ in->ev_info.bios_input_1[index] = value;
+ break;
+ case 2 ... 9:
+ in->ev_info.bios_input_2[index - 2] = value;
+ break;
+ default:
+ return;
+ }
+}
+
+static void amd_pmf_update_bios_inputs(struct amd_pmf_dev *pdev, u32 pending_req,
+ const struct amd_pmf_pb_bitmap *inputs,
+ const u32 *custom_policy, struct ta_pmf_enact_table *in)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(custom_bios_inputs); i++) {
+ if (!(pending_req & inputs[i].bit_mask))
+ continue;
+ amd_pmf_set_ta_custom_bios_input(in, i, custom_policy[i]);
+ pdev->cb_prev.custom_bios_inputs[i] = custom_policy[i];
+ dev_dbg(pdev->dev, "Custom BIOS Input[%d]: %u\n", i, custom_policy[i]);
+ }
+}
+
static void amd_pmf_get_custom_bios_inputs(struct amd_pmf_dev *pdev,
struct ta_pmf_enact_table *in)
{
- if (!pdev->req.pending_req)
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(custom_bios_inputs); i++)
+ amd_pmf_set_ta_custom_bios_input(in, i, pdev->cb_prev.custom_bios_inputs[i]);
+
+ if (!(pdev->req.pending_req || pdev->req1.pending_req))
return;
- switch (pdev->req.pending_req) {
- case BIT(NOTIFY_CUSTOM_BIOS_INPUT1):
- in->ev_info.bios_input1 = pdev->req.custom_policy[APMF_SMARTPC_CUSTOM_BIOS_INPUT1];
+ if (!pdev->smart_pc_enabled)
+ return;
+
+ switch (pdev->pmf_if_version) {
+ case PMF_IF_V1:
+ if (!is_apmf_bios_input_notifications_supported(pdev))
+ return;
+ amd_pmf_update_bios_inputs(pdev, pdev->req1.pending_req, custom_bios_inputs_v1,
+ pdev->req1.custom_policy, in);
break;
- case BIT(NOTIFY_CUSTOM_BIOS_INPUT2):
- in->ev_info.bios_input2 = pdev->req.custom_policy[APMF_SMARTPC_CUSTOM_BIOS_INPUT2];
+ case PMF_IF_V2:
+ amd_pmf_update_bios_inputs(pdev, pdev->req.pending_req, custom_bios_inputs,
+ pdev->req.custom_policy, in);
break;
default:
- dev_dbg(pdev->dev, "Invalid preq for BIOS input: 0x%x\n", pdev->req.pending_req);
+ break;
}
/* Clear pending requests after handling */
memset(&pdev->req, 0, sizeof(pdev->req));
+ memset(&pdev->req1, 0, sizeof(pdev->req1));
}
static void amd_pmf_get_c0_residency(u16 *core_res, size_t size, struct ta_pmf_enact_table *in)
diff --git a/drivers/platform/x86/amd/pmf/sps.c b/drivers/platform/x86/amd/pmf/sps.c
index 49e14ca94a9e..c28f3c5744c2 100644
--- a/drivers/platform/x86/amd/pmf/sps.c
+++ b/drivers/platform/x86/amd/pmf/sps.c
@@ -283,7 +283,7 @@ int amd_pmf_set_sps_power_limits(struct amd_pmf_dev *pmf)
bool is_pprof_balanced(struct amd_pmf_dev *pmf)
{
- return (pmf->current_profile == PLATFORM_PROFILE_BALANCED) ? true : false;
+ return pmf->current_profile == PLATFORM_PROFILE_BALANCED;
}
static int amd_pmf_profile_get(struct device *dev,
diff --git a/drivers/platform/x86/amd/pmf/tee-if.c b/drivers/platform/x86/amd/pmf/tee-if.c
index 4f626ebcb619..6e8116bef4f6 100644
--- a/drivers/platform/x86/amd/pmf/tee-if.c
+++ b/drivers/platform/x86/amd/pmf/tee-if.c
@@ -147,6 +147,22 @@ static void amd_pmf_apply_policies(struct amd_pmf_dev *dev, struct ta_pmf_enact_
}
break;
+ case PMF_POLICY_PMF_PPT:
+ if (dev->prev_data->pmf_ppt != val) {
+ amd_pmf_send_cmd(dev, SET_PMF_PPT, false, val, NULL);
+ dev_dbg(dev->dev, "update PMF PPT: %u\n", val);
+ dev->prev_data->pmf_ppt = val;
+ }
+ break;
+
+ case PMF_POLICY_PMF_PPT_APU_ONLY:
+ if (dev->prev_data->pmf_ppt_apu_only != val) {
+ amd_pmf_send_cmd(dev, SET_PMF_PPT_APU_ONLY, false, val, NULL);
+ dev_dbg(dev->dev, "update PMF PPT APU ONLY: %u\n", val);
+ dev->prev_data->pmf_ppt_apu_only = val;
+ }
+ break;
+
case PMF_POLICY_SYSTEM_STATE:
switch (val) {
case 0:
@@ -209,7 +225,7 @@ static void amd_pmf_apply_policies(struct amd_pmf_dev *dev, struct ta_pmf_enact_
}
}
-static int amd_pmf_invoke_cmd_enact(struct amd_pmf_dev *dev)
+int amd_pmf_invoke_cmd_enact(struct amd_pmf_dev *dev)
{
struct ta_pmf_shared_memory *ta_sm = NULL;
struct ta_pmf_enact_result *out = NULL;
@@ -561,8 +577,10 @@ int amd_pmf_init_smart_pc(struct amd_pmf_dev *dev)
ret = amd_pmf_start_policy_engine(dev);
dev_dbg(dev->dev, "start policy engine ret: %d\n", ret);
status = ret == TA_PMF_TYPE_SUCCESS;
- if (status)
+ if (status) {
+ dev->cb_flag = true;
break;
+ }
amd_pmf_tee_deinit(dev);
}
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index f84c3d03c1de..6a62bc5b02fd 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -147,7 +147,12 @@ static struct quirk_entry quirk_asus_ignore_fan = {
};
static struct quirk_entry quirk_asus_zenbook_duo_kbd = {
- .ignore_key_wlan = true,
+ .key_wlan_event = ASUS_WMI_KEY_IGNORE,
+};
+
+static struct quirk_entry quirk_asus_z13 = {
+ .key_wlan_event = ASUS_WMI_KEY_ARMOURY,
+ .tablet_switch_mode = asus_wmi_kbd_dock_devid,
};
static int dmi_matched(const struct dmi_system_id *dmi)
@@ -539,6 +544,15 @@ static const struct dmi_system_id asus_quirks[] = {
},
.driver_data = &quirk_asus_zenbook_duo_kbd,
},
+ {
+ .callback = dmi_matched,
+ .ident = "ASUS ROG Z13",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ROG Flow Z13"),
+ },
+ .driver_data = &quirk_asus_z13,
+ },
{},
};
@@ -618,6 +632,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
{ KE_KEY, 0x93, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + TV + DVI */
{ KE_KEY, 0x95, { KEY_MEDIA } },
{ KE_KEY, 0x99, { KEY_PHONE } }, /* Conflicts with fan mode switch */
+ { KE_KEY, 0X9D, { KEY_FN_F } },
{ KE_KEY, 0xA0, { KEY_SWITCHVIDEOMODE } }, /* SDSP HDMI only */
{ KE_KEY, 0xA1, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + HDMI */
{ KE_KEY, 0xA2, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT + HDMI */
@@ -632,10 +647,13 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
{ KE_IGNORE, 0xC0, }, /* External display connect/disconnect notification */
{ KE_KEY, 0xC4, { KEY_KBDILLUMUP } },
{ KE_KEY, 0xC5, { KEY_KBDILLUMDOWN } },
+ { KE_KEY, 0xCA, { KEY_F13 } }, /* Noise cancelling on Expertbook B9 */
+ { KE_KEY, 0xCB, { KEY_F14 } }, /* Fn+noise-cancel */
{ KE_IGNORE, 0xC6, }, /* Ambient Light Sensor notification */
{ KE_IGNORE, 0xCF, }, /* AC mode */
{ KE_KEY, 0xFA, { KEY_PROG2 } }, /* Lid flip action */
{ KE_KEY, 0xBD, { KEY_PROG2 } }, /* Lid flip action on ROG xflow laptops */
+ { KE_KEY, ASUS_WMI_KEY_ARMOURY, { KEY_PROG3 } },
{ KE_END, 0},
};
@@ -656,10 +674,10 @@ static void asus_nb_wmi_key_filter(struct asus_wmi_driver *asus_wmi, int *code,
*code = ASUS_WMI_KEY_IGNORE;
break;
case 0x5D: /* Wireless console Toggle */
- case 0x5E: /* Wireless console Enable */
- case 0x5F: /* Wireless console Disable */
- if (quirks->ignore_key_wlan)
- *code = ASUS_WMI_KEY_IGNORE;
+ case 0x5E: /* Wireless console Enable / Keyboard Attach, Detach */
+ case 0x5F: /* Wireless console Disable / Special Key */
+ if (quirks->key_wlan_event)
+ *code = quirks->key_wlan_event;
break;
}
}
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index f7191fdded14..e72a2b5d158e 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -5088,16 +5088,22 @@ static int asus_wmi_probe(struct platform_device *pdev)
asus_s2idle_check_register();
- return asus_wmi_add(pdev);
+ ret = asus_wmi_add(pdev);
+ if (ret)
+ asus_s2idle_check_unregister();
+
+ return ret;
}
static bool used;
+static DEFINE_MUTEX(register_mutex);
int __init_or_module asus_wmi_register_driver(struct asus_wmi_driver *driver)
{
struct platform_driver *platform_driver;
struct platform_device *platform_device;
+ guard(mutex)(&register_mutex);
if (used)
return -EBUSY;
@@ -5120,6 +5126,7 @@ EXPORT_SYMBOL_GPL(asus_wmi_register_driver);
void asus_wmi_unregister_driver(struct asus_wmi_driver *driver)
{
+ guard(mutex)(&register_mutex);
asus_s2idle_check_unregister();
platform_device_unregister(driver->platform_device);
diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h
index 018dfde4025e..5cd4392b964e 100644
--- a/drivers/platform/x86/asus-wmi.h
+++ b/drivers/platform/x86/asus-wmi.h
@@ -18,6 +18,7 @@
#include <linux/i8042.h>
#define ASUS_WMI_KEY_IGNORE (-1)
+#define ASUS_WMI_KEY_ARMOURY 0xffff01
#define ASUS_WMI_BRN_DOWN 0x2e
#define ASUS_WMI_BRN_UP 0x2f
@@ -40,7 +41,7 @@ struct quirk_entry {
bool wmi_force_als_set;
bool wmi_ignore_fan;
bool filter_i8042_e1_extended_codes;
- bool ignore_key_wlan;
+ int key_wlan_event;
enum asus_wmi_tablet_switch_mode tablet_switch_mode;
int wapf;
/*
diff --git a/drivers/platform/x86/barco-p50-gpio.c b/drivers/platform/x86/barco-p50-gpio.c
index bb3393bbfb89..6f13e81f98fb 100644
--- a/drivers/platform/x86/barco-p50-gpio.c
+++ b/drivers/platform/x86/barco-p50-gpio.c
@@ -11,6 +11,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/delay.h>
+#include <linux/dev_printk.h>
#include <linux/dmi.h>
#include <linux/err.h>
#include <linux/io.h>
@@ -18,10 +19,11 @@
#include <linux/leds.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/gpio_keys.h>
#include <linux/gpio/driver.h>
#include <linux/gpio/machine.h>
-#include <linux/input.h>
+#include <linux/gpio/property.h>
+#include <linux/input-event-codes.h>
+#include <linux/property.h>
#define DRIVER_NAME "barco-p50-gpio"
@@ -78,44 +80,57 @@ static const char * const gpio_names[] = {
[P50_GPIO_LINE_BTN] = "identify-button",
};
-
-static struct gpiod_lookup_table p50_gpio_led_table = {
- .dev_id = "leds-gpio",
- .table = {
- GPIO_LOOKUP_IDX(DRIVER_NAME, P50_GPIO_LINE_LED, NULL, 0, GPIO_ACTIVE_HIGH),
- {}
- }
+static const struct software_node gpiochip_node = {
+ .name = DRIVER_NAME,
};
/* GPIO LEDs */
-static struct gpio_led leds[] = {
- { .name = "identify" }
+static const struct software_node gpio_leds_node = {
+ .name = "gpio-leds-identify",
};
-static struct gpio_led_platform_data leds_pdata = {
- .num_leds = ARRAY_SIZE(leds),
- .leds = leds,
+static const struct property_entry identify_led_props[] = {
+ PROPERTY_ENTRY_GPIO("gpios", &gpiochip_node, P50_GPIO_LINE_LED, GPIO_ACTIVE_HIGH),
+ { }
+};
+
+static const struct software_node identify_led_node = {
+ .parent = &gpio_leds_node,
+ .name = "identify",
+ .properties = identify_led_props,
};
/* GPIO keyboard */
-static struct gpio_keys_button buttons[] = {
- {
- .code = KEY_VENDOR,
- .gpio = P50_GPIO_LINE_BTN,
- .active_low = 1,
- .type = EV_KEY,
- .value = 1,
- },
+static const struct property_entry gpio_keys_props[] = {
+ PROPERTY_ENTRY_STRING("label", "identify"),
+ PROPERTY_ENTRY_U32("poll-interval", 100),
+ { }
};
-static struct gpio_keys_platform_data keys_pdata = {
- .buttons = buttons,
- .nbuttons = ARRAY_SIZE(buttons),
- .poll_interval = 100,
- .rep = 0,
- .name = "identify",
+static const struct software_node gpio_keys_node = {
+ .name = "gpio-keys-identify",
+ .properties = gpio_keys_props,
};
+static struct property_entry vendor_key_props[] = {
+ PROPERTY_ENTRY_U32("linux,code", KEY_VENDOR),
+ PROPERTY_ENTRY_GPIO("gpios", &gpiochip_node, P50_GPIO_LINE_BTN, GPIO_ACTIVE_LOW),
+ { }
+};
+
+static const struct software_node vendor_key_node = {
+ .parent = &gpio_keys_node,
+ .properties = vendor_key_props,
+};
+
+static const struct software_node *p50_swnodes[] = {
+ &gpiochip_node,
+ &gpio_leds_node,
+ &identify_led_node,
+ &gpio_keys_node,
+ &vendor_key_node,
+ NULL
+};
/* low level access routines */
@@ -285,6 +300,16 @@ static int p50_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
static int p50_gpio_probe(struct platform_device *pdev)
{
+ struct platform_device_info key_info = {
+ .name = "gpio-keys-polled",
+ .id = PLATFORM_DEVID_NONE,
+ .parent = &pdev->dev,
+ };
+ struct platform_device_info led_info = {
+ .name = "leds-gpio",
+ .id = PLATFORM_DEVID_NONE,
+ .parent = &pdev->dev,
+ };
struct p50_gpio *p50;
struct resource *res;
int ret;
@@ -316,7 +341,7 @@ static int p50_gpio_probe(struct platform_device *pdev)
p50->gc.base = -1;
p50->gc.get_direction = p50_gpio_get_direction;
p50->gc.get = p50_gpio_get;
- p50->gc.set_rv = p50_gpio_set;
+ p50->gc.set = p50_gpio_set;
/* reset mbox */
@@ -339,25 +364,20 @@ static int p50_gpio_probe(struct platform_device *pdev)
return ret;
}
- gpiod_add_lookup_table(&p50_gpio_led_table);
-
- p50->leds_pdev = platform_device_register_data(&pdev->dev,
- "leds-gpio", PLATFORM_DEVID_NONE, &leds_pdata, sizeof(leds_pdata));
+ ret = software_node_register_node_group(p50_swnodes);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "failed to register software nodes");
+ led_info.fwnode = software_node_fwnode(&gpio_leds_node);
+ p50->leds_pdev = platform_device_register_full(&led_info);
if (IS_ERR(p50->leds_pdev)) {
ret = PTR_ERR(p50->leds_pdev);
dev_err(&pdev->dev, "Could not register leds-gpio: %d\n", ret);
goto err_leds;
}
- /* gpio-keys-polled uses old-style gpio interface, pass the right identifier */
- buttons[0].gpio += p50->gc.base;
-
- p50->keys_pdev =
- platform_device_register_data(&pdev->dev, "gpio-keys-polled",
- PLATFORM_DEVID_NONE,
- &keys_pdata, sizeof(keys_pdata));
-
+ key_info.fwnode = software_node_fwnode(&gpio_keys_node);
+ p50->keys_pdev = platform_device_register_full(&key_info);
if (IS_ERR(p50->keys_pdev)) {
ret = PTR_ERR(p50->keys_pdev);
dev_err(&pdev->dev, "Could not register gpio-keys-polled: %d\n", ret);
@@ -369,7 +389,7 @@ static int p50_gpio_probe(struct platform_device *pdev)
err_keys:
platform_device_unregister(p50->leds_pdev);
err_leds:
- gpiod_remove_lookup_table(&p50_gpio_led_table);
+ software_node_unregister_node_group(p50_swnodes);
return ret;
}
@@ -381,7 +401,7 @@ static void p50_gpio_remove(struct platform_device *pdev)
platform_device_unregister(p50->keys_pdev);
platform_device_unregister(p50->leds_pdev);
- gpiod_remove_lookup_table(&p50_gpio_led_table);
+ software_node_unregister_node_group(p50_swnodes);
}
static struct platform_driver p50_gpio_driver = {
diff --git a/drivers/platform/x86/dell/dell-lis3lv02d.c b/drivers/platform/x86/dell/dell-lis3lv02d.c
index 732de5f556f8..77905a9ddde9 100644
--- a/drivers/platform/x86/dell/dell-lis3lv02d.c
+++ b/drivers/platform/x86/dell/dell-lis3lv02d.c
@@ -48,6 +48,7 @@ static const struct dmi_system_id lis3lv02d_devices[] __initconst = {
DELL_LIS3LV02D_DMI_ENTRY("Latitude 5500", 0x29),
DELL_LIS3LV02D_DMI_ENTRY("Latitude E6330", 0x29),
DELL_LIS3LV02D_DMI_ENTRY("Latitude E6430", 0x29),
+ DELL_LIS3LV02D_DMI_ENTRY("Latitude E6530", 0x29),
DELL_LIS3LV02D_DMI_ENTRY("Precision 3540", 0x29),
DELL_LIS3LV02D_DMI_ENTRY("Precision 3551", 0x29),
DELL_LIS3LV02D_DMI_ENTRY("Precision M6800", 0x29),
diff --git a/drivers/platform/x86/dell/dell-pc.c b/drivers/platform/x86/dell/dell-pc.c
index 48cc7511905a..becdd9aaef29 100644
--- a/drivers/platform/x86/dell/dell-pc.c
+++ b/drivers/platform/x86/dell/dell-pc.c
@@ -228,6 +228,8 @@ static int thermal_platform_profile_get(struct device *dev,
static int thermal_platform_profile_probe(void *drvdata, unsigned long *choices)
{
+ int current_mode;
+
if (supported_modes & DELL_QUIET)
__set_bit(PLATFORM_PROFILE_QUIET, choices);
if (supported_modes & DELL_COOL_BOTTOM)
@@ -237,6 +239,13 @@ static int thermal_platform_profile_probe(void *drvdata, unsigned long *choices)
if (supported_modes & DELL_PERFORMANCE)
__set_bit(PLATFORM_PROFILE_PERFORMANCE, choices);
+ /* Make sure that ACPI is in sync with the profile set by USTT */
+ current_mode = thermal_get_mode();
+ if (current_mode < 0)
+ return current_mode;
+
+ thermal_set_mode(current_mode);
+
return 0;
}
diff --git a/drivers/platform/x86/dell/dell-smbios-base.c b/drivers/platform/x86/dell/dell-smbios-base.c
index 01c72b91a50d..444786102f02 100644
--- a/drivers/platform/x86/dell/dell-smbios-base.c
+++ b/drivers/platform/x86/dell/dell-smbios-base.c
@@ -39,6 +39,7 @@ struct token_sysfs_data {
struct smbios_device {
struct list_head list;
struct device *device;
+ int priority;
int (*call_fn)(struct calling_interface_buffer *arg);
};
@@ -145,7 +146,7 @@ int dell_smbios_error(int value)
}
EXPORT_SYMBOL_GPL(dell_smbios_error);
-int dell_smbios_register_device(struct device *d, void *call_fn)
+int dell_smbios_register_device(struct device *d, int priority, void *call_fn)
{
struct smbios_device *priv;
@@ -154,6 +155,7 @@ int dell_smbios_register_device(struct device *d, void *call_fn)
return -ENOMEM;
get_device(d);
priv->device = d;
+ priv->priority = priority;
priv->call_fn = call_fn;
mutex_lock(&smbios_mutex);
list_add_tail(&priv->list, &smbios_device_list);
@@ -292,28 +294,25 @@ EXPORT_SYMBOL_GPL(dell_smbios_call_filter);
int dell_smbios_call(struct calling_interface_buffer *buffer)
{
- int (*call_fn)(struct calling_interface_buffer *) = NULL;
- struct device *selected_dev = NULL;
+ struct smbios_device *selected = NULL;
struct smbios_device *priv;
int ret;
mutex_lock(&smbios_mutex);
list_for_each_entry(priv, &smbios_device_list, list) {
- if (!selected_dev || priv->device->id >= selected_dev->id) {
- dev_dbg(priv->device, "Trying device ID: %d\n",
- priv->device->id);
- call_fn = priv->call_fn;
- selected_dev = priv->device;
+ if (!selected || priv->priority >= selected->priority) {
+ dev_dbg(priv->device, "Trying device ID: %d\n", priv->priority);
+ selected = priv;
}
}
- if (!selected_dev) {
+ if (!selected) {
ret = -ENODEV;
pr_err("No dell-smbios drivers are loaded\n");
goto out_smbios_call;
}
- ret = call_fn(buffer);
+ ret = selected->call_fn(buffer);
out_smbios_call:
mutex_unlock(&smbios_mutex);
diff --git a/drivers/platform/x86/dell/dell-smbios-smm.c b/drivers/platform/x86/dell/dell-smbios-smm.c
index 4d375985c85f..7055e2c40f34 100644
--- a/drivers/platform/x86/dell/dell-smbios-smm.c
+++ b/drivers/platform/x86/dell/dell-smbios-smm.c
@@ -125,8 +125,7 @@ int init_dell_smbios_smm(void)
if (ret)
goto fail_platform_device_add;
- ret = dell_smbios_register_device(&platform_device->dev,
- &dell_smbios_smm_call);
+ ret = dell_smbios_register_device(&platform_device->dev, 0, &dell_smbios_smm_call);
if (ret)
goto fail_register;
diff --git a/drivers/platform/x86/dell/dell-smbios-wmi.c b/drivers/platform/x86/dell/dell-smbios-wmi.c
index ae9012549560..a7dca8c59d60 100644
--- a/drivers/platform/x86/dell/dell-smbios-wmi.c
+++ b/drivers/platform/x86/dell/dell-smbios-wmi.c
@@ -264,9 +264,7 @@ static int dell_smbios_wmi_probe(struct wmi_device *wdev, const void *context)
if (ret)
return ret;
- /* ID is used by dell-smbios to set priority of drivers */
- wdev->dev.id = 1;
- ret = dell_smbios_register_device(&wdev->dev, &dell_smbios_wmi_call);
+ ret = dell_smbios_register_device(&wdev->dev, 1, &dell_smbios_wmi_call);
if (ret)
return ret;
diff --git a/drivers/platform/x86/dell/dell-smbios.h b/drivers/platform/x86/dell/dell-smbios.h
index 77baa15eb523..f421b8533a9e 100644
--- a/drivers/platform/x86/dell/dell-smbios.h
+++ b/drivers/platform/x86/dell/dell-smbios.h
@@ -64,7 +64,7 @@ struct calling_interface_structure {
struct calling_interface_token tokens[];
} __packed;
-int dell_smbios_register_device(struct device *d, void *call_fn);
+int dell_smbios_register_device(struct device *d, int priority, void *call_fn);
void dell_smbios_unregister_device(struct device *d);
int dell_smbios_error(int value);
diff --git a/drivers/platform/x86/dell/dell_rbu.c b/drivers/platform/x86/dell/dell_rbu.c
index 2a140d1c656a..403df9bd9522 100644
--- a/drivers/platform/x86/dell/dell_rbu.c
+++ b/drivers/platform/x86/dell/dell_rbu.c
@@ -232,7 +232,8 @@ static int packetize_data(const u8 *data, size_t length)
done = 1;
}
- if ((rc = create_packet(temp, packet_length)))
+ rc = create_packet(temp, packet_length);
+ if (rc)
return rc;
pr_debug("%p:%td\n", temp, (end - temp));
@@ -276,7 +277,7 @@ static int do_packet_read(char *data, struct packet_data *newpacket,
return bytes_copied;
}
-static int packet_read_list(char *data, size_t * pread_length)
+static int packet_read_list(char *data, size_t *pread_length)
{
struct packet_data *newpacket;
int temp_count = 0;
@@ -445,7 +446,8 @@ static ssize_t read_packet_data(char *buffer, loff_t pos, size_t count)
bytes_left = rbu_data.imagesize - pos;
data_length = min(bytes_left, count);
- if ((retval = packet_read_list(ptempBuf, &data_length)) < 0)
+ retval = packet_read_list(ptempBuf, &data_length);
+ if (retval < 0)
goto read_rbu_data_exit;
if ((pos + count) > rbu_data.imagesize) {
diff --git a/drivers/platform/x86/hp/hp-wmi.c b/drivers/platform/x86/hp/hp-wmi.c
index db5fdee2109c..8b3533d6ba09 100644
--- a/drivers/platform/x86/hp/hp-wmi.c
+++ b/drivers/platform/x86/hp/hp-wmi.c
@@ -92,9 +92,9 @@ static const char * const victus_thermal_profile_boards[] = {
"8A25"
};
-/* DMI Board names of Victus 16-s1000 laptops */
+/* DMI Board names of Victus 16-r1000 and Victus 16-s1000 laptops */
static const char * const victus_s_thermal_profile_boards[] = {
- "8C9C"
+ "8C99", "8C9C"
};
enum hp_wmi_radio {
@@ -122,6 +122,7 @@ enum hp_wmi_event_ids {
HPWMI_BATTERY_CHARGE_PERIOD = 0x10,
HPWMI_SANITIZATION_MODE = 0x17,
HPWMI_CAMERA_TOGGLE = 0x1A,
+ HPWMI_FN_P_HOTKEY = 0x1B,
HPWMI_OMEN_KEY = 0x1D,
HPWMI_SMART_EXPERIENCE_APP = 0x21,
};
@@ -981,6 +982,9 @@ static void hp_wmi_notify(union acpi_object *obj, void *context)
key_code, 1, true))
pr_info("Unknown key code - 0x%x\n", key_code);
break;
+ case HPWMI_FN_P_HOTKEY:
+ platform_profile_cycle();
+ break;
case HPWMI_OMEN_KEY:
if (event_data) /* Only should be true for HP Omen */
key_code = event_data;
diff --git a/drivers/platform/x86/intel/int0002_vgpio.c b/drivers/platform/x86/intel/int0002_vgpio.c
index 9bc24ed19c64..6f5629dc3f8d 100644
--- a/drivers/platform/x86/intel/int0002_vgpio.c
+++ b/drivers/platform/x86/intel/int0002_vgpio.c
@@ -193,7 +193,7 @@ static int int0002_probe(struct platform_device *pdev)
chip->parent = dev;
chip->owner = THIS_MODULE;
chip->get = int0002_gpio_get;
- chip->set_rv = int0002_gpio_set;
+ chip->set = int0002_gpio_set;
chip->direction_input = int0002_gpio_get;
chip->direction_output = int0002_gpio_direction_output;
chip->base = -1;
diff --git a/drivers/platform/x86/intel/int3472/discrete.c b/drivers/platform/x86/intel/int3472/discrete.c
index 4c0aed6e626f..1505fc3ef7a8 100644
--- a/drivers/platform/x86/intel/int3472/discrete.c
+++ b/drivers/platform/x86/intel/int3472/discrete.c
@@ -129,6 +129,7 @@ skl_int3472_gpiod_get_from_temp_lookup(struct int3472_discrete_device *int3472,
* @hid: The ACPI HID of the device without the instance number e.g. INT347E
* @type_from: The GPIO type from ACPI ?SDT
* @type_to: The assigned GPIO type, typically same as @type_from
+ * @enable_time_us: Enable time in usec for GPIOs mapped to regulators
* @con_id: The name of the GPIO for the device
* @polarity_low: GPIO_ACTIVE_LOW true if the @polarity_low is true,
* GPIO_ACTIVE_HIGH otherwise
@@ -138,18 +139,36 @@ struct int3472_gpio_map {
u8 type_from;
u8 type_to;
bool polarity_low;
+ unsigned int enable_time_us;
const char *con_id;
};
static const struct int3472_gpio_map int3472_gpio_map[] = {
- /* mt9m114 designs declare a powerdown pin which controls the regulators */
- { "INT33F0", INT3472_GPIO_TYPE_POWERDOWN, INT3472_GPIO_TYPE_POWER_ENABLE, false, "vdd" },
- /* ov7251 driver / DT-bindings expect "enable" as con_id for reset */
- { "INT347E", INT3472_GPIO_TYPE_RESET, INT3472_GPIO_TYPE_RESET, false, "enable" },
+ { /* mt9m114 designs declare a powerdown pin which controls the regulators */
+ .hid = "INT33F0",
+ .type_from = INT3472_GPIO_TYPE_POWERDOWN,
+ .type_to = INT3472_GPIO_TYPE_POWER_ENABLE,
+ .con_id = "vdd",
+ .enable_time_us = GPIO_REGULATOR_ENABLE_TIME,
+ },
+ { /* ov7251 driver / DT-bindings expect "enable" as con_id for reset */
+ .hid = "INT347E",
+ .type_from = INT3472_GPIO_TYPE_RESET,
+ .type_to = INT3472_GPIO_TYPE_RESET,
+ .con_id = "enable",
+ },
+ { /* ov08x40's handshake pin needs a 45 ms delay on some HP laptops */
+ .hid = "OVTI08F4",
+ .type_from = INT3472_GPIO_TYPE_HANDSHAKE,
+ .type_to = INT3472_GPIO_TYPE_HANDSHAKE,
+ .con_id = "dvdd",
+ .enable_time_us = 45 * USEC_PER_MSEC,
+ },
};
static void int3472_get_con_id_and_polarity(struct int3472_discrete_device *int3472, u8 *type,
- const char **con_id, unsigned long *gpio_flags)
+ const char **con_id, unsigned long *gpio_flags,
+ unsigned int *enable_time_us)
{
struct acpi_device *adev = int3472->sensor;
unsigned int i;
@@ -173,9 +192,12 @@ static void int3472_get_con_id_and_polarity(struct int3472_discrete_device *int3
*gpio_flags = int3472_gpio_map[i].polarity_low ?
GPIO_ACTIVE_LOW : GPIO_ACTIVE_HIGH;
*con_id = int3472_gpio_map[i].con_id;
+ *enable_time_us = int3472_gpio_map[i].enable_time_us;
return;
}
+ *enable_time_us = GPIO_REGULATOR_ENABLE_TIME;
+
switch (*type) {
case INT3472_GPIO_TYPE_RESET:
*con_id = "reset";
@@ -193,6 +215,10 @@ static void int3472_get_con_id_and_polarity(struct int3472_discrete_device *int3
*con_id = "privacy-led";
*gpio_flags = GPIO_ACTIVE_HIGH;
break;
+ case INT3472_GPIO_TYPE_HOTPLUG_DETECT:
+ *con_id = "hpd";
+ *gpio_flags = GPIO_ACTIVE_HIGH;
+ break;
case INT3472_GPIO_TYPE_POWER_ENABLE:
*con_id = "avdd";
*gpio_flags = GPIO_ACTIVE_HIGH;
@@ -200,6 +226,8 @@ static void int3472_get_con_id_and_polarity(struct int3472_discrete_device *int3
case INT3472_GPIO_TYPE_HANDSHAKE:
*con_id = "dvdd";
*gpio_flags = GPIO_ACTIVE_HIGH;
+ /* Setups using a handshake pin need 25 ms enable delay */
+ *enable_time_us = 25 * USEC_PER_MSEC;
break;
default:
*con_id = "unknown";
@@ -223,6 +251,7 @@ static void int3472_get_con_id_and_polarity(struct int3472_discrete_device *int3
* 0x0b Power enable
* 0x0c Clock enable
* 0x0d Privacy LED
+ * 0x13 Hotplug detect
*
* There are some known platform specific quirks where that does not quite
* hold up; for example where a pin with type 0x01 (Power down) is mapped to
@@ -244,13 +273,15 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares,
void *data)
{
struct int3472_discrete_device *int3472 = data;
+ const char *second_sensor = NULL;
struct acpi_resource_gpio *agpio;
+ unsigned int enable_time_us;
u8 active_value, pin, type;
+ unsigned long gpio_flags;
union acpi_object *obj;
struct gpio_desc *gpio;
const char *err_msg;
const char *con_id;
- unsigned long gpio_flags;
int ret;
if (!acpi_gpio_get_io_resource(ares, &agpio))
@@ -273,7 +304,7 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares,
type = FIELD_GET(INT3472_GPIO_DSM_TYPE, obj->integer.value);
- int3472_get_con_id_and_polarity(int3472, &type, &con_id, &gpio_flags);
+ int3472_get_con_id_and_polarity(int3472, &type, &con_id, &gpio_flags, &enable_time_us);
pin = FIELD_GET(INT3472_GPIO_DSM_PIN, obj->integer.value);
/* Pin field is not really used under Windows and wraps around at 8 bits */
@@ -292,6 +323,7 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares,
switch (type) {
case INT3472_GPIO_TYPE_RESET:
case INT3472_GPIO_TYPE_POWERDOWN:
+ case INT3472_GPIO_TYPE_HOTPLUG_DETECT:
ret = skl_int3472_map_gpio_to_sensor(int3472, agpio, con_id, gpio_flags);
if (ret)
err_msg = "Failed to map GPIO pin to sensor\n";
@@ -322,21 +354,13 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares,
break;
case INT3472_GPIO_TYPE_POWER_ENABLE:
- ret = skl_int3472_register_regulator(int3472, gpio,
- GPIO_REGULATOR_ENABLE_TIME,
- con_id,
- int3472->quirks.avdd_second_sensor);
- if (ret)
- err_msg = "Failed to map power-enable to sensor\n";
-
- break;
+ second_sensor = int3472->quirks.avdd_second_sensor;
+ fallthrough;
case INT3472_GPIO_TYPE_HANDSHAKE:
- /* Setups using a handshake pin need 25 ms enable delay */
- ret = skl_int3472_register_regulator(int3472, gpio,
- 25 * USEC_PER_MSEC,
- con_id, NULL);
+ ret = skl_int3472_register_regulator(int3472, gpio, enable_time_us,
+ con_id, second_sensor);
if (ret)
- err_msg = "Failed to map handshake to sensor\n";
+ err_msg = "Failed to register regulator\n";
break;
default: /* Never reached */
diff --git a/drivers/platform/x86/intel/pmc/Makefile b/drivers/platform/x86/intel/pmc/Makefile
index 5f68c8503a56..bb960c8721d7 100644
--- a/drivers/platform/x86/intel/pmc/Makefile
+++ b/drivers/platform/x86/intel/pmc/Makefile
@@ -4,7 +4,7 @@
#
intel_pmc_core-y := core.o spt.o cnp.o icl.o \
- tgl.o adl.o mtl.o arl.o lnl.o ptl.o
+ tgl.o adl.o mtl.o arl.o lnl.o ptl.o wcl.o
obj-$(CONFIG_INTEL_PMC_CORE) += intel_pmc_core.o
intel_pmc_core_pltdrv-y := pltdrv.o
obj-$(CONFIG_INTEL_PMC_CORE) += intel_pmc_core_pltdrv.o
diff --git a/drivers/platform/x86/intel/pmc/arl.c b/drivers/platform/x86/intel/pmc/arl.c
index 9d66d65e7577..17ad87b392ab 100644
--- a/drivers/platform/x86/intel/pmc/arl.c
+++ b/drivers/platform/x86/intel/pmc/arl.c
@@ -725,9 +725,11 @@ struct pmc_dev_info arl_pmc_dev = {
.dmu_guid = ARL_PMT_DMU_GUID,
.regmap_list = arl_pmc_info_list,
.map = &arl_socs_reg_map,
+ .sub_req_show = &pmc_core_substate_req_regs_fops,
.suspend = cnl_suspend,
.resume = arl_resume,
.init = arl_core_init,
+ .sub_req = pmc_core_pmt_get_lpm_req,
};
struct pmc_dev_info arl_h_pmc_dev = {
@@ -735,7 +737,9 @@ struct pmc_dev_info arl_h_pmc_dev = {
.dmu_guid = ARL_PMT_DMU_GUID,
.regmap_list = arl_pmc_info_list,
.map = &mtl_socm_reg_map,
+ .sub_req_show = &pmc_core_substate_req_regs_fops,
.suspend = cnl_suspend,
.resume = arl_h_resume,
.init = arl_h_core_init,
+ .sub_req = pmc_core_pmt_get_lpm_req,
};
diff --git a/drivers/platform/x86/intel/pmc/core.c b/drivers/platform/x86/intel/pmc/core.c
index 540cd2fb0673..ac3d19ae8c56 100644
--- a/drivers/platform/x86/intel/pmc/core.c
+++ b/drivers/platform/x86/intel/pmc/core.c
@@ -11,6 +11,11 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+enum header_type {
+ HEADER_STATUS,
+ HEADER_VALUE,
+};
+
#include <linux/bitfield.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
@@ -828,19 +833,86 @@ static int pmc_core_substate_l_sts_regs_show(struct seq_file *s, void *unused)
}
DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_l_sts_regs);
-static void pmc_core_substate_req_header_show(struct seq_file *s, int pmc_index)
+static void pmc_core_substate_req_header_show(struct seq_file *s, int pmc_index,
+ enum header_type type)
{
struct pmc_dev *pmcdev = s->private;
int mode;
- seq_printf(s, "%30s |", "Element");
+ seq_printf(s, "%40s |", "Element");
pmc_for_each_mode(mode, pmcdev)
seq_printf(s, " %9s |", pmc_lpm_modes[mode]);
- seq_printf(s, " %9s |", "Status");
- seq_printf(s, " %11s |\n", "Live Status");
+ if (type == HEADER_STATUS) {
+ seq_printf(s, " %9s |", "Status");
+ seq_printf(s, " %11s |\n", "Live Status");
+ } else {
+ seq_printf(s, " %9s |\n", "Value");
+ }
}
+static int pmc_core_substate_blk_req_show(struct seq_file *s, void *unused)
+{
+ struct pmc_dev *pmcdev = s->private;
+ unsigned int pmc_idx;
+
+ for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); pmc_idx++) {
+ const struct pmc_bit_map **maps;
+ unsigned int arr_size, r_idx;
+ u32 offset, counter;
+ u32 *lpm_req_regs;
+ struct pmc *pmc;
+
+ pmc = pmcdev->pmcs[pmc_idx];
+ if (!pmc || !pmc->lpm_req_regs)
+ continue;
+
+ lpm_req_regs = pmc->lpm_req_regs;
+ maps = pmc->map->s0ix_blocker_maps;
+ offset = pmc->map->s0ix_blocker_offset;
+ arr_size = pmc_core_lpm_get_arr_size(maps);
+
+ /* Display the header */
+ pmc_core_substate_req_header_show(s, pmc_idx, HEADER_VALUE);
+
+ for (r_idx = 0; r_idx < arr_size; r_idx++) {
+ const struct pmc_bit_map *map;
+
+ for (map = maps[r_idx]; map->name; map++) {
+ int mode;
+
+ if (!map->blk)
+ continue;
+
+ counter = pmc_core_reg_read(pmc, offset);
+ seq_printf(s, "pmc%u: %34s |", pmc_idx, map->name);
+ pmc_for_each_mode(mode, pmcdev) {
+ bool required = *lpm_req_regs & BIT(mode);
+
+ seq_printf(s, " %9s |", required ? "Required" : " ");
+ }
+ seq_printf(s, " %9u |\n", counter);
+ offset += map->blk * S0IX_BLK_SIZE;
+ lpm_req_regs++;
+ }
+ }
+ }
+ return 0;
+}
+
+static int pmc_core_substate_blk_req_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pmc_core_substate_blk_req_show, inode->i_private);
+}
+
+const struct file_operations pmc_core_substate_blk_req_fops = {
+ .owner = THIS_MODULE,
+ .open = pmc_core_substate_blk_req_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
static int pmc_core_substate_req_regs_show(struct seq_file *s, void *unused)
{
struct pmc_dev *pmcdev = s->private;
@@ -872,7 +944,7 @@ static int pmc_core_substate_req_regs_show(struct seq_file *s, void *unused)
continue;
/* Display the header */
- pmc_core_substate_req_header_show(s, pmc_index);
+ pmc_core_substate_req_header_show(s, pmc_index, HEADER_STATUS);
/* Loop over maps */
for (mp = 0; mp < num_maps; mp++) {
@@ -910,7 +982,7 @@ static int pmc_core_substate_req_regs_show(struct seq_file *s, void *unused)
}
/* Display the element name in the first column */
- seq_printf(s, "pmc%d: %26s |", pmc_index, map[i].name);
+ seq_printf(s, "pmc%d: %34s |", pmc_index, map[i].name);
/* Loop over the enabled states and display if required */
pmc_for_each_mode(mode, pmcdev) {
@@ -931,7 +1003,19 @@ static int pmc_core_substate_req_regs_show(struct seq_file *s, void *unused)
}
return 0;
}
-DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_req_regs);
+
+static int pmc_core_substate_req_regs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pmc_core_substate_req_regs_show, inode->i_private);
+}
+
+const struct file_operations pmc_core_substate_req_regs_fops = {
+ .owner = THIS_MODULE,
+ .open = pmc_core_substate_req_regs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
static unsigned int pmc_core_get_crystal_freq(void)
{
@@ -1160,7 +1244,7 @@ void pmc_core_get_low_power_modes(struct pmc_dev *pmcdev)
for (mode = 0; mode < LPM_MAX_NUM_MODES; mode++)
pri_order[mode_order[mode]] = mode;
else
- dev_warn(&pmcdev->pdev->dev,
+ dev_dbg(&pmcdev->pdev->dev,
"Assuming a default substate order for this platform\n");
/*
@@ -1264,7 +1348,7 @@ static void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev)
debugfs_remove_recursive(pmcdev->dbgfs_dir);
}
-static void pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
+static void pmc_core_dbgfs_register(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_info)
{
struct pmc *primary_pmc = pmcdev->pmcs[PMC_IDX_MAIN];
struct dentry *dir;
@@ -1331,7 +1415,7 @@ static void pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
if (primary_pmc->lpm_req_regs) {
debugfs_create_file("substate_requirements", 0444,
pmcdev->dbgfs_dir, pmcdev,
- &pmc_core_substate_req_regs_fops);
+ pmc_dev_info->sub_req_show);
}
if (primary_pmc->map->pson_residency_offset && pmc_core_is_pson_residency_enabled(pmcdev)) {
@@ -1399,36 +1483,22 @@ static u32 pmc_core_find_guid(struct pmc_info *list, const struct pmc_reg_map *m
* +----+---------------------------------------------------------+
*
*/
-static int pmc_core_get_lpm_req(struct pmc_dev *pmcdev, struct pmc *pmc, struct pci_dev *pcidev)
+int pmc_core_pmt_get_lpm_req(struct pmc_dev *pmcdev, struct pmc *pmc, struct telem_endpoint *ep)
{
- struct telem_endpoint *ep;
const u8 *lpm_indices;
int num_maps, mode_offset = 0;
int ret, mode;
int lpm_size;
- u32 guid;
lpm_indices = pmc->map->lpm_reg_index;
num_maps = pmc->map->lpm_num_maps;
lpm_size = LPM_MAX_NUM_MODES * num_maps;
- guid = pmc_core_find_guid(pmcdev->regmap_list, pmc->map);
- if (!guid)
- return -ENXIO;
-
- ep = pmt_telem_find_and_register_endpoint(pcidev, guid, 0);
- if (IS_ERR(ep)) {
- dev_dbg(&pmcdev->pdev->dev, "couldn't get telem endpoint %pe", ep);
- return -EPROBE_DEFER;
- }
-
pmc->lpm_req_regs = devm_kzalloc(&pmcdev->pdev->dev,
lpm_size * sizeof(u32),
GFP_KERNEL);
- if (!pmc->lpm_req_regs) {
- ret = -ENOMEM;
- goto unregister_ep;
- }
+ if (!pmc->lpm_req_regs)
+ return -ENOMEM;
mode_offset = LPM_HEADER_OFFSET + LPM_MODE_OFFSET;
pmc_for_each_mode(mode, pmcdev) {
@@ -1442,34 +1512,74 @@ static int pmc_core_get_lpm_req(struct pmc_dev *pmcdev, struct pmc *pmc, struct
if (ret) {
dev_err(&pmcdev->pdev->dev,
"couldn't read Low Power Mode requirements: %d\n", ret);
- goto unregister_ep;
+ return ret;
}
++req_offset;
}
mode_offset += LPM_REG_COUNT + LPM_MODE_OFFSET;
}
+ return ret;
+}
+
+int pmc_core_pmt_get_blk_sub_req(struct pmc_dev *pmcdev, struct pmc *pmc,
+ struct telem_endpoint *ep)
+{
+ u32 num_blocker, sample_offset;
+ unsigned int index;
+ u32 *req_offset;
+ int ret;
-unregister_ep:
- pmt_telem_unregister_endpoint(ep);
+ num_blocker = pmc->map->num_s0ix_blocker;
+ sample_offset = pmc->map->blocker_req_offset;
- return ret;
+ pmc->lpm_req_regs = devm_kcalloc(&pmcdev->pdev->dev, num_blocker,
+ sizeof(u32), GFP_KERNEL);
+ if (!pmc->lpm_req_regs)
+ return -ENOMEM;
+
+ req_offset = pmc->lpm_req_regs;
+ for (index = 0; index < num_blocker; index++, req_offset++) {
+ ret = pmt_telem_read32(ep, index + sample_offset, req_offset, 1);
+ if (ret) {
+ dev_err(&pmcdev->pdev->dev,
+ "couldn't read Low Power Mode requirements: %d\n", ret);
+ return ret;
+ }
+ }
+ return 0;
}
-static int pmc_core_ssram_get_lpm_reqs(struct pmc_dev *pmcdev, int func)
+static int pmc_core_get_telem_info(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_info)
{
struct pci_dev *pcidev __free(pci_dev_put) = NULL;
+ struct telem_endpoint *ep;
unsigned int i;
+ u32 guid;
int ret;
- pcidev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(20, func));
+ pcidev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(20, pmc_dev_info->pci_func));
if (!pcidev)
return -ENODEV;
for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
- if (!pmcdev->pmcs[i])
+ struct pmc *pmc;
+
+ pmc = pmcdev->pmcs[i];
+ if (!pmc)
continue;
- ret = pmc_core_get_lpm_req(pmcdev, pmcdev->pmcs[i], pcidev);
+ guid = pmc_core_find_guid(pmcdev->regmap_list, pmc->map);
+ if (!guid)
+ return -ENXIO;
+
+ ep = pmt_telem_find_and_register_endpoint(pcidev, guid, 0);
+ if (IS_ERR(ep)) {
+ dev_dbg(&pmcdev->pdev->dev, "couldn't get telem endpoint %pe", ep);
+ return -EPROBE_DEFER;
+ }
+
+ ret = pmc_dev_info->sub_req(pmcdev, pmc, ep);
+ pmt_telem_unregister_endpoint(ep);
if (ret)
return ret;
}
@@ -1583,7 +1693,7 @@ int generic_core_init(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_info)
pmc_core_punit_pmt_init(pmcdev, pmc_dev_info->dmu_guid);
if (ssram) {
- ret = pmc_core_ssram_get_lpm_reqs(pmcdev, pmc_dev_info->pci_func);
+ ret = pmc_core_get_telem_info(pmcdev, pmc_dev_info);
if (ret)
goto unmap_regbase;
}
@@ -1625,12 +1735,14 @@ static const struct x86_cpu_id intel_pmc_core_ids[] = {
X86_MATCH_VFM(INTEL_RAPTORLAKE_P, &tgl_l_pmc_dev),
X86_MATCH_VFM(INTEL_RAPTORLAKE, &adl_pmc_dev),
X86_MATCH_VFM(INTEL_RAPTORLAKE_S, &adl_pmc_dev),
+ X86_MATCH_VFM(INTEL_BARTLETTLAKE, &adl_pmc_dev),
X86_MATCH_VFM(INTEL_METEORLAKE_L, &mtl_pmc_dev),
X86_MATCH_VFM(INTEL_ARROWLAKE, &arl_pmc_dev),
X86_MATCH_VFM(INTEL_ARROWLAKE_H, &arl_h_pmc_dev),
X86_MATCH_VFM(INTEL_ARROWLAKE_U, &arl_h_pmc_dev),
X86_MATCH_VFM(INTEL_LUNARLAKE_M, &lnl_pmc_dev),
X86_MATCH_VFM(INTEL_PANTHERLAKE_L, &ptl_pmc_dev),
+ X86_MATCH_VFM(INTEL_WILDCATLAKE_L, &wcl_pmc_dev),
{}
};
@@ -1757,7 +1869,7 @@ static int pmc_core_probe(struct platform_device *pdev)
pmcdev->pmc_xram_read_bit = pmc_core_check_read_lock_bit(primary_pmc);
pmc_core_do_dmi_quirks(primary_pmc);
- pmc_core_dbgfs_register(pmcdev);
+ pmc_core_dbgfs_register(pmcdev, pmc_dev_info);
pm_report_max_hw_sleep(FIELD_MAX(SLP_S0_RES_COUNTER_MASK) *
pmc_core_adjust_slp_s0_step(primary_pmc, 1));
diff --git a/drivers/platform/x86/intel/pmc/core.h b/drivers/platform/x86/intel/pmc/core.h
index 4a94a4ee031e..f4dadb696a31 100644
--- a/drivers/platform/x86/intel/pmc/core.h
+++ b/drivers/platform/x86/intel/pmc/core.h
@@ -297,6 +297,12 @@ enum ppfear_regs {
#define PTL_PMC_LTR_CUR_ASLT 0x1C28
#define PTL_PMC_LTR_CUR_PLT 0x1C2C
#define PTL_PCD_PMC_MMIO_REG_LEN 0x31A8
+#define PTL_NUM_S0IX_BLOCKER 106
+#define PTL_BLK_REQ_OFFSET 55
+
+/* Wildcat Lake */
+#define WCL_PMC_LTR_RESERVED 0x1B64
+#define WCL_PCD_PMC_MMIO_REG_LEN 0x3178
/* SSRAM PMC Device ID */
/* LNL */
@@ -306,6 +312,9 @@ enum ppfear_regs {
#define PMC_DEVID_PTL_PCDH 0xe37f
#define PMC_DEVID_PTL_PCDP 0xe47f
+/* WCL */
+#define PMC_DEVID_WCL_PCDN 0x4d7f
+
/* ARL */
#define PMC_DEVID_ARL_SOCM 0x777f
#define PMC_DEVID_ARL_SOCS 0xae7f
@@ -344,6 +353,8 @@ struct pmc_bit_map {
* @pm_read_disable_bit: Bit index to read PMC_READ_DISABLE
* @slps0_dbg_offset: PWRMBASE offset to SLP_S0_DEBUG_REG*
* @s0ix_blocker_offset PWRMBASE offset to S0ix blocker counter
+ * @num_s0ix_blocker: Number of S0ix blockers
+ * @blocker_req_offset: Telemetry offset to S0ix blocker low power mode substate requirement table
*
* Each PCH has unique set of register offsets and bit indexes. This structure
* captures them to have a common implementation.
@@ -369,6 +380,8 @@ struct pmc_reg_map {
const u32 ltr_ignore_max;
const u32 pm_vric1_offset;
const u32 s0ix_blocker_offset;
+ const u32 num_s0ix_blocker;
+ const u32 blocker_req_offset;
/* Low Power Mode registers */
const int lpm_num_maps;
const int lpm_num_modes;
@@ -474,18 +487,22 @@ enum pmc_index {
* SSRAM support.
* @map: Pointer to a pmc_reg_map struct that contains platform
* specific attributes of the primary PMC
+ * @sub_req_show: File operations to show substate requirements
* @suspend: Function to perform platform specific suspend
* @resume: Function to perform platform specific resume
* @init: Function to perform platform specific init action
+ * @sub_req: Function to achieve low power mode substate requirements
*/
struct pmc_dev_info {
u8 pci_func;
u32 dmu_guid;
struct pmc_info *regmap_list;
const struct pmc_reg_map *map;
+ const struct file_operations *sub_req_show;
void (*suspend)(struct pmc_dev *pmcdev);
int (*resume)(struct pmc_dev *pmcdev);
int (*init)(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_info);
+ int (*sub_req)(struct pmc_dev *pmcdev, struct pmc *pmc, struct telem_endpoint *ep);
};
extern const struct pmc_bit_map msr_map[];
@@ -505,6 +522,9 @@ extern const struct pmc_bit_map mtl_socm_vnn_misc_status_map[];
extern const struct pmc_bit_map mtl_socm_signal_status_map[];
extern const struct pmc_reg_map mtl_socm_reg_map;
extern const struct pmc_reg_map mtl_ioep_reg_map;
+extern const struct pmc_bit_map ptl_pcdp_clocksource_status_map[];
+extern const struct pmc_bit_map ptl_pcdp_vnn_req_status_3_map[];
+extern const struct pmc_bit_map ptl_pcdp_signal_status_map[];
void pmc_core_get_tgl_lpm_reqs(struct platform_device *pdev);
int pmc_core_send_ltr_ignore(struct pmc_dev *pmcdev, u32 value, int ignore);
@@ -528,9 +548,16 @@ extern struct pmc_dev_info arl_pmc_dev;
extern struct pmc_dev_info arl_h_pmc_dev;
extern struct pmc_dev_info lnl_pmc_dev;
extern struct pmc_dev_info ptl_pmc_dev;
+extern struct pmc_dev_info wcl_pmc_dev;
void cnl_suspend(struct pmc_dev *pmcdev);
int cnl_resume(struct pmc_dev *pmcdev);
+int pmc_core_pmt_get_lpm_req(struct pmc_dev *pmcdev, struct pmc *pmc, struct telem_endpoint *ep);
+int pmc_core_pmt_get_blk_sub_req(struct pmc_dev *pmcdev, struct pmc *pmc,
+ struct telem_endpoint *ep);
+
+extern const struct file_operations pmc_core_substate_req_regs_fops;
+extern const struct file_operations pmc_core_substate_blk_req_fops;
#define pmc_for_each_mode(mode, pmcdev) \
for (unsigned int __i = 0, __cond; \
diff --git a/drivers/platform/x86/intel/pmc/lnl.c b/drivers/platform/x86/intel/pmc/lnl.c
index da513c234714..6fa027e7071f 100644
--- a/drivers/platform/x86/intel/pmc/lnl.c
+++ b/drivers/platform/x86/intel/pmc/lnl.c
@@ -13,6 +13,10 @@
#include "core.h"
+#define SOCM_LPM_REQ_GUID 0x15099748
+
+static const u8 LNL_LPM_REG_INDEX[] = {0, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15, 16, 20};
+
static const struct pmc_bit_map lnl_ltr_show_map[] = {
{"SOUTHPORT_A", CNP_PMC_LTR_SPA},
{"SOUTHPORT_B", CNP_PMC_LTR_SPB},
@@ -528,6 +532,16 @@ static const struct pmc_reg_map lnl_socm_reg_map = {
.lpm_live_status_offset = MTL_LPM_LIVE_STATUS_OFFSET,
.s0ix_blocker_maps = lnl_blk_maps,
.s0ix_blocker_offset = LNL_S0IX_BLOCKER_OFFSET,
+ .lpm_reg_index = LNL_LPM_REG_INDEX,
+};
+
+static struct pmc_info lnl_pmc_info_list[] = {
+ {
+ .guid = SOCM_LPM_REQ_GUID,
+ .devid = PMC_DEVID_LNL_SOCM,
+ .map = &lnl_socm_reg_map,
+ },
+ {}
};
#define LNL_NPU_PCI_DEV 0x643e
@@ -557,8 +571,12 @@ static int lnl_core_init(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_in
}
struct pmc_dev_info lnl_pmc_dev = {
+ .pci_func = 2,
+ .regmap_list = lnl_pmc_info_list,
.map = &lnl_socm_reg_map,
+ .sub_req_show = &pmc_core_substate_req_regs_fops,
.suspend = cnl_suspend,
.resume = lnl_resume,
.init = lnl_core_init,
+ .sub_req = pmc_core_pmt_get_lpm_req,
};
diff --git a/drivers/platform/x86/intel/pmc/mtl.c b/drivers/platform/x86/intel/pmc/mtl.c
index faa13a7ee688..0b87e10f864e 100644
--- a/drivers/platform/x86/intel/pmc/mtl.c
+++ b/drivers/platform/x86/intel/pmc/mtl.c
@@ -997,7 +997,9 @@ struct pmc_dev_info mtl_pmc_dev = {
.dmu_guid = MTL_PMT_DMU_GUID,
.regmap_list = mtl_pmc_info_list,
.map = &mtl_socm_reg_map,
+ .sub_req_show = &pmc_core_substate_req_regs_fops,
.suspend = cnl_suspend,
.resume = mtl_resume,
.init = mtl_core_init,
+ .sub_req = pmc_core_pmt_get_lpm_req,
};
diff --git a/drivers/platform/x86/intel/pmc/ptl.c b/drivers/platform/x86/intel/pmc/ptl.c
index 394515af60d6..1b35b84e06fa 100644
--- a/drivers/platform/x86/intel/pmc/ptl.c
+++ b/drivers/platform/x86/intel/pmc/ptl.c
@@ -10,6 +10,17 @@
#include "core.h"
+/* PMC SSRAM PMT Telemetry GUIDS */
+#define PCDP_LPM_REQ_GUID 0x47179370
+
+/*
+ * Die Mapping to Product.
+ * Product PCDDie
+ * PTL-H PCD-H
+ * PTL-P PCD-P
+ * PTL-U PCD-P
+ */
+
static const struct pmc_bit_map ptl_pcdp_pfear_map[] = {
{"PMC_0", BIT(0)},
{"FUSE_OSSE", BIT(1)},
@@ -162,7 +173,7 @@ static const struct pmc_bit_map ptl_pcdp_ltr_show_map[] = {
{}
};
-static const struct pmc_bit_map ptl_pcdp_clocksource_status_map[] = {
+const struct pmc_bit_map ptl_pcdp_clocksource_status_map[] = {
{"AON2_OFF_STS", BIT(0), 1},
{"AON3_OFF_STS", BIT(1), 0},
{"AON4_OFF_STS", BIT(2), 1},
@@ -382,7 +393,7 @@ static const struct pmc_bit_map ptl_pcdp_vnn_req_status_2_map[] = {
{}
};
-static const struct pmc_bit_map ptl_pcdp_vnn_req_status_3_map[] = {
+const struct pmc_bit_map ptl_pcdp_vnn_req_status_3_map[] = {
{"DTS0_VNN_REQ_STS", BIT(7), 0},
{"GPIOCOM5_VNN_REQ_STS", BIT(11), 1},
{}
@@ -421,7 +432,7 @@ static const struct pmc_bit_map ptl_pcdp_vnn_misc_status_map[] = {
{}
};
-static const struct pmc_bit_map ptl_pcdp_signal_status_map[] = {
+const struct pmc_bit_map ptl_pcdp_signal_status_map[] = {
{"LSX_Wake0_STS", BIT(0), 0},
{"LSX_Wake1_STS", BIT(1), 0},
{"LSX_Wake2_STS", BIT(2), 0},
@@ -515,6 +526,22 @@ static const struct pmc_reg_map ptl_pcdp_reg_map = {
.lpm_live_status_offset = MTL_LPM_LIVE_STATUS_OFFSET,
.s0ix_blocker_maps = ptl_pcdp_blk_maps,
.s0ix_blocker_offset = LNL_S0IX_BLOCKER_OFFSET,
+ .num_s0ix_blocker = PTL_NUM_S0IX_BLOCKER,
+ .blocker_req_offset = PTL_BLK_REQ_OFFSET,
+};
+
+static struct pmc_info ptl_pmc_info_list[] = {
+ {
+ .guid = PCDP_LPM_REQ_GUID,
+ .devid = PMC_DEVID_PTL_PCDH,
+ .map = &ptl_pcdp_reg_map,
+ },
+ {
+ .guid = PCDP_LPM_REQ_GUID,
+ .devid = PMC_DEVID_PTL_PCDP,
+ .map = &ptl_pcdp_reg_map,
+ },
+ {}
};
#define PTL_NPU_PCI_DEV 0xb03e
@@ -543,8 +570,12 @@ static int ptl_core_init(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_in
}
struct pmc_dev_info ptl_pmc_dev = {
+ .pci_func = 2,
+ .regmap_list = ptl_pmc_info_list,
.map = &ptl_pcdp_reg_map,
+ .sub_req_show = &pmc_core_substate_blk_req_fops,
.suspend = cnl_suspend,
.resume = ptl_resume,
.init = ptl_core_init,
+ .sub_req = pmc_core_pmt_get_blk_sub_req,
};
diff --git a/drivers/platform/x86/intel/pmc/ssram_telemetry.c b/drivers/platform/x86/intel/pmc/ssram_telemetry.c
index 93579152188e..03fad9331fc0 100644
--- a/drivers/platform/x86/intel/pmc/ssram_telemetry.c
+++ b/drivers/platform/x86/intel/pmc/ssram_telemetry.c
@@ -190,6 +190,7 @@ static const struct pci_device_id intel_pmc_ssram_telemetry_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PMC_DEVID_LNL_SOCM) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PMC_DEVID_PTL_PCDH) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PMC_DEVID_PTL_PCDP) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PMC_DEVID_WCL_PCDN) },
{ }
};
MODULE_DEVICE_TABLE(pci, intel_pmc_ssram_telemetry_pci_ids);
diff --git a/drivers/platform/x86/intel/pmc/tgl.c b/drivers/platform/x86/intel/pmc/tgl.c
index 02e731ed3391..fc5b4cacc1c6 100644
--- a/drivers/platform/x86/intel/pmc/tgl.c
+++ b/drivers/platform/x86/intel/pmc/tgl.c
@@ -273,8 +273,8 @@ void pmc_core_get_tgl_lpm_reqs(struct platform_device *pdev)
addr = (u32 *)out_obj->buffer.pointer;
- lpm_req_regs = devm_kzalloc(&pdev->dev, lpm_size * sizeof(u32),
- GFP_KERNEL);
+ lpm_req_regs = devm_kcalloc(&pdev->dev, lpm_size, sizeof(u32),
+ GFP_KERNEL);
if (!lpm_req_regs)
goto free_acpi_obj;
diff --git a/drivers/platform/x86/intel/pmc/wcl.c b/drivers/platform/x86/intel/pmc/wcl.c
new file mode 100644
index 000000000000..85e90a639e65
--- /dev/null
+++ b/drivers/platform/x86/intel/pmc/wcl.c
@@ -0,0 +1,486 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This file contains platform specific structure definitions
+ * and init function used by Wildcat Lake PCH.
+ *
+ * Copyright (c) 2025, Intel Corporation.
+ */
+
+#include <linux/bits.h>
+#include <linux/pci.h>
+
+#include "core.h"
+
+static const struct pmc_bit_map wcl_pcdn_pfear_map[] = {
+ {"PMC_0", BIT(0)},
+ {"FUSE_OSSE", BIT(1)},
+ {"ESPISPI", BIT(2)},
+ {"XHCI", BIT(3)},
+ {"SPA", BIT(4)},
+ {"RSVD", BIT(5)},
+ {"MPFPW2", BIT(6)},
+ {"GBE", BIT(7)},
+
+ {"SBR16B21", BIT(0)},
+ {"SBR16B5", BIT(1)},
+ {"SBR8B1", BIT(2)},
+ {"SBR8B0", BIT(3)},
+ {"P2SB0", BIT(4)},
+ {"D2D_DISP_1", BIT(5)},
+ {"LPSS", BIT(6)},
+ {"LPC", BIT(7)},
+
+ {"SMB", BIT(0)},
+ {"ISH", BIT(1)},
+ {"DBG_SBR16B", BIT(2)},
+ {"NPK_0", BIT(3)},
+ {"D2D_NOC_1", BIT(4)},
+ {"FIA_P", BIT(5)},
+ {"FUSE", BIT(6)},
+ {"DBG_PSF", BIT(7)},
+
+ {"DISP_PGA1", BIT(0)},
+ {"XDCI", BIT(1)},
+ {"EXI", BIT(2)},
+ {"CSE", BIT(3)},
+ {"KVMCC", BIT(4)},
+ {"PMT", BIT(5)},
+ {"CLINK", BIT(6)},
+ {"PTIO", BIT(7)},
+
+ {"USBR0", BIT(0)},
+ {"SBR16B22", BIT(1)},
+ {"SMT1", BIT(2)},
+ {"MPFPW1", BIT(3)},
+ {"SMS2", BIT(4)},
+ {"SMS1", BIT(5)},
+ {"CSMERTC", BIT(6)},
+ {"CSMEPSF", BIT(7)},
+
+ {"D2D_NOC_0", BIT(0)},
+ {"ESE", BIT(1)},
+ {"FIACPCB_P", BIT(2)},
+ {"RSVD", BIT(3)},
+ {"SBR8B2", BIT(4)},
+ {"OSSE_SMT1", BIT(5)},
+ {"D2D_DISP", BIT(6)},
+ {"P2SB1", BIT(7)},
+
+ {"U3FPW1", BIT(0)},
+ {"SBR16B3", BIT(1)},
+ {"PSF4", BIT(2)},
+ {"CNVI", BIT(3)},
+ {"UFSX2", BIT(4)},
+ {"ENDBG", BIT(5)},
+ {"DBC", BIT(6)},
+ {"SBRG", BIT(7)},
+
+ {"RSVD", BIT(0)},
+ {"NPK1", BIT(1)},
+ {"SBR16B7", BIT(2)},
+ {"SBR16B4", BIT(3)},
+ {"FIA_XG", BIT(4)},
+ {"PSF6", BIT(5)},
+ {"UFSPW1", BIT(6)},
+ {"FIA_U", BIT(7)},
+
+ {"PSF8", BIT(0)},
+ {"PSF0", BIT(1)},
+ {"RSVD", BIT(2)},
+ {"FIACPCB_U", BIT(3)},
+ {"TAM", BIT(4)},
+ {"SBR16B0", BIT(5)},
+ {"TBTLSX", BIT(6)},
+ {"THC0", BIT(7)},
+
+ {"THC1", BIT(0)},
+ {"PMC_1", BIT(1)},
+ {"FIACPCB_XG", BIT(2)},
+ {"TCSS", BIT(3)},
+ {"DISP_PGA", BIT(4)},
+ {"SBR16B20", BIT(5)},
+ {"SBR8B20", BIT(6)},
+ {"DBG_SBR", BIT(7)},
+
+ {"SPC", BIT(0)},
+ {"ACE_0", BIT(1)},
+ {"ACE_1", BIT(2)},
+ {"ACE_2", BIT(3)},
+ {"ACE_3", BIT(4)},
+ {"ACE_4", BIT(5)},
+ {"ACE_5", BIT(6)},
+ {"ACE_6", BIT(7)},
+
+ {"ACE_7", BIT(0)},
+ {"ACE_8", BIT(1)},
+ {"ACE_9", BIT(2)},
+ {"ACE_10", BIT(3)},
+ {"SBR16B2", BIT(4)},
+ {"SBR8B4", BIT(5)},
+ {"OSSE", BIT(6)},
+ {"SBR16B1", BIT(7)},
+ {}
+};
+
+static const struct pmc_bit_map *ext_wcl_pcdn_pfear_map[] = {
+ wcl_pcdn_pfear_map,
+ NULL
+};
+
+static const struct pmc_bit_map wcl_pcdn_ltr_show_map[] = {
+ {"SOUTHPORT_A", CNP_PMC_LTR_SPA},
+ {"RSVD", WCL_PMC_LTR_RESERVED},
+ {"SATA", CNP_PMC_LTR_SATA},
+ {"GIGABIT_ETHERNET", CNP_PMC_LTR_GBE},
+ {"XHCI", CNP_PMC_LTR_XHCI},
+ {"SOUTHPORT_F", ADL_PMC_LTR_SPF},
+ {"ME", CNP_PMC_LTR_ME},
+ {"SATA1", CNP_PMC_LTR_EVA},
+ {"SOUTHPORT_C", CNP_PMC_LTR_SPC},
+ {"HD_AUDIO", CNP_PMC_LTR_AZ},
+ {"CNV", CNP_PMC_LTR_CNV},
+ {"LPSS", CNP_PMC_LTR_LPSS},
+ {"SOUTHPORT_D", CNP_PMC_LTR_SPD},
+ {"SOUTHPORT_E", CNP_PMC_LTR_SPE},
+ {"SATA2", PTL_PMC_LTR_SATA2},
+ {"ESPI", CNP_PMC_LTR_ESPI},
+ {"SCC", CNP_PMC_LTR_SCC},
+ {"ISH", CNP_PMC_LTR_ISH},
+ {"UFSX2", CNP_PMC_LTR_UFSX2},
+ {"EMMC", CNP_PMC_LTR_EMMC},
+ {"WIGIG", ICL_PMC_LTR_WIGIG},
+ {"THC0", TGL_PMC_LTR_THC0},
+ {"THC1", TGL_PMC_LTR_THC1},
+ {"SOUTHPORT_G", MTL_PMC_LTR_SPG},
+ {"ESE", MTL_PMC_LTR_ESE},
+ {"IOE_PMC", MTL_PMC_LTR_IOE_PMC},
+ {"DMI3", ARL_PMC_LTR_DMI3},
+ {"OSSE", LNL_PMC_LTR_OSSE},
+
+ /* Below two cannot be used for LTR_IGNORE */
+ {"CURRENT_PLATFORM", PTL_PMC_LTR_CUR_PLT},
+ {"AGGREGATED_SYSTEM", PTL_PMC_LTR_CUR_ASLT},
+ {}
+};
+
+static const struct pmc_bit_map wcl_pcdn_power_gating_status_0_map[] = {
+ {"PMC_PGD0_PG_STS", BIT(0), 0},
+ {"FUSE_OSSE_PGD0_PG_STS", BIT(1), 0},
+ {"ESPISPI_PGD0_PG_STS", BIT(2), 0},
+ {"XHCI_PGD0_PG_STS", BIT(3), 1},
+ {"SPA_PGD0_PG_STS", BIT(4), 1},
+ {"RSVD_5", BIT(5), 0},
+ {"MPFPW2_PGD0_PG_STS", BIT(6), 0},
+ {"GBE_PGD0_PG_STS", BIT(7), 1},
+ {"SBR16B21_PGD0_PG_STS", BIT(8), 0},
+ {"SBR16B5_PGD0_PG_STS", BIT(9), 0},
+ {"SBR8B1_PGD0_PG_STS", BIT(10), 0},
+ {"SBR8B0_PGD0_PG_STS", BIT(11), 0},
+ {"P2SB0_PG_STS", BIT(12), 1},
+ {"D2D_DISP_PGD1_PG_STS", BIT(13), 0},
+ {"LPSS_PGD0_PG_STS", BIT(14), 1},
+ {"LPC_PGD0_PG_STS", BIT(15), 0},
+ {"SMB_PGD0_PG_STS", BIT(16), 0},
+ {"ISH_PGD0_PG_STS", BIT(17), 0},
+ {"DBG_SBR16B_PGD0_PG_STS", BIT(18), 0},
+ {"NPK_PGD0_PG_STS", BIT(19), 0},
+ {"D2D_NOC_PGD1_PG_STS", BIT(20), 0},
+ {"FIA_P_PGD0_PG_STS", BIT(21), 0},
+ {"FUSE_PGD0_PG_STS", BIT(22), 0},
+ {"DBG_PSF_PGD0_PG_STS", BIT(23), 0},
+ {"DISP_PGA1_PGD0_PG_STS", BIT(24), 0},
+ {"XDCI_PGD0_PG_STS", BIT(25), 1},
+ {"EXI_PGD0_PG_STS", BIT(26), 0},
+ {"CSE_PGD0_PG_STS", BIT(27), 1},
+ {"KVMCC_PGD0_PG_STS", BIT(28), 1},
+ {"PMT_PGD0_PG_STS", BIT(29), 1},
+ {"CLINK_PGD0_PG_STS", BIT(30), 1},
+ {"PTIO_PGD0_PG_STS", BIT(31), 1},
+ {}
+};
+
+static const struct pmc_bit_map wcl_pcdn_power_gating_status_1_map[] = {
+ {"USBR0_PGD0_PG_STS", BIT(0), 1},
+ {"SBR16B22_PGD0_PG_STS", BIT(1), 0},
+ {"SMT1_PGD0_PG_STS", BIT(2), 1},
+ {"MPFPW1_PGD0_PG_STS", BIT(3), 0},
+ {"SMS2_PGD0_PG_STS", BIT(4), 1},
+ {"SMS1_PGD0_PG_STS", BIT(5), 1},
+ {"CSMERTC_PGD0_PG_STS", BIT(6), 0},
+ {"CSMEPSF_PGD0_PG_STS", BIT(7), 0},
+ {"D2D_NOC_PGD0_PG_STS", BIT(8), 0},
+ {"ESE_PGD0_PG_STS", BIT(9), 1},
+ {"FIACPCB_P_PGD0_PG_STS", BIT(10), 0},
+ {"SBR8B2_PGD0_PG_STS", BIT(12), 0},
+ {"OSSE_SMT1_PGD0_PG_STS", BIT(13), 1},
+ {"D2D_DISP_PGD0_PG_STS", BIT(14), 0},
+ {"P2SB1_PGD0_PG_STS", BIT(15), 1},
+ {"U3FPW1_PGD0_PG_STS", BIT(16), 0},
+ {"SBR16B3_PGD0_PG_STS", BIT(17), 0},
+ {"PSF4_PGD0_PG_STS", BIT(18), 0},
+ {"CNVI_PGD0_PG_STS", BIT(19), 0},
+ {"UFSX2_PGD0_PG_STS", BIT(20), 1},
+ {"ENDBG_PGD0_PG_STS", BIT(21), 0},
+ {"DBC_PGD0_PG_STS", BIT(22), 0},
+ {"SBRG_PGD0_PG_STS", BIT(23), 0},
+ {"NPK_PGD1_PG_STS", BIT(25), 0},
+ {"SBR16B7_PGD0_PG_STS", BIT(26), 0},
+ {"SBR16B4_PGD0_PG_STS", BIT(27), 0},
+ {"FIA_XG_PSF_PGD0_PG_STS", BIT(28), 0},
+ {"PSF6_PGD0_PG_STS", BIT(29), 0},
+ {"UFSPW1_PGD0_PG_STS", BIT(30), 0},
+ {"FIA_U_PGD0_PG_STS", BIT(31), 0},
+ {}
+};
+
+static const struct pmc_bit_map wcl_pcdn_power_gating_status_2_map[] = {
+ {"PSF8_PGD0_PG_STS", BIT(0), 0},
+ {"PSF0_PGD0_PG_STS", BIT(1), 0},
+ {"FIACPCB_U_PGD0_PG_STS", BIT(3), 0},
+ {"TAM_PGD0_PG_STS", BIT(4), 1},
+ {"SBR16B0_PGD0_PG_STS", BIT(5), 0},
+ {"TBTLSX_PGD0_PG_STS", BIT(6), 1},
+ {"THC0_PGD0_PG_STS", BIT(7), 1},
+ {"THC1_PGD0_PG_STS", BIT(8), 1},
+ {"PMC_PGD1_PG_STS", BIT(9), 0},
+ {"FIACPCB_XG_PGD0_PG_STS", BIT(10), 0},
+ {"TCSS_PGD0_PG_STS", BIT(11), 0},
+ {"DISP_PGA_PGD0_PG_STS", BIT(12), 0},
+ {"SBR8B4_PGD0_PG_STS", BIT(13), 0},
+ {"SBR8B20_PGD0_PG_STS", BIT(14), 0},
+ {"DBG_PGD0_PG_STS", BIT(15), 0},
+ {"SPC_PGD0_PG_STS", BIT(16), 1},
+ {"ACE_PGD0_PG_STS", BIT(17), 0},
+ {"ACE_PGD1_PG_STS", BIT(18), 0},
+ {"ACE_PGD2_PG_STS", BIT(19), 0},
+ {"ACE_PGD3_PG_STS", BIT(20), 0},
+ {"ACE_PGD4_PG_STS", BIT(21), 0},
+ {"ACE_PGD5_PG_STS", BIT(22), 0},
+ {"ACE_PGD6_PG_STS", BIT(23), 0},
+ {"ACE_PGD7_PG_STS", BIT(24), 0},
+ {"ACE_PGD8_PG_STS", BIT(25), 0},
+ {"ACE_PGD9_PG_STS", BIT(26), 0},
+ {"ACE_PGD10_PG_STS", BIT(27), 0},
+ {"SBR16B2_PG_PGD0_PG_STS", BIT(28), 0},
+ {"SBR16B20_PGD0_PG_STS", BIT(29), 0},
+ {"OSSE_PGD0_PG_STS", BIT(30), 1},
+ {"SBR16B1_PGD0_PG_STS", BIT(31), 0},
+ {}
+};
+
+static const struct pmc_bit_map wcl_pcdn_d3_status_0_map[] = {
+ {"LPSS_D3_STS", BIT(3), 1},
+ {"XDCI_D3_STS", BIT(4), 1},
+ {"XHCI_D3_STS", BIT(5), 1},
+ {"SPA_D3_STS", BIT(12), 0},
+ {"SPC_D3_STS", BIT(14), 0},
+ {"OSSE_D3_STS", BIT(15), 0},
+ {"ESPISPI_D3_STS", BIT(18), 0},
+ {"PSTH_D3_STS", BIT(21), 0},
+ {}
+};
+
+static const struct pmc_bit_map wcl_pcdn_d3_status_1_map[] = {
+ {"OSSE_SMT1_D3_STS", BIT(16), 0},
+ {"GBE_D3_STS", BIT(19), 0},
+ {"ITSS_D3_STS", BIT(23), 0},
+ {"CNVI_D3_STS", BIT(27), 0},
+ {"UFSX2_D3_STS", BIT(28), 0},
+ {}
+};
+
+static const struct pmc_bit_map wcl_pcdn_d3_status_2_map[] = {
+ {"CSMERTC_D3_STS", BIT(1), 0},
+ {"ESE_D3_STS", BIT(2), 0},
+ {"CSE_D3_STS", BIT(4), 0},
+ {"KVMCC_D3_STS", BIT(5), 0},
+ {"USBR0_D3_STS", BIT(6), 0},
+ {"ISH_D3_STS", BIT(7), 0},
+ {"SMT1_D3_STS", BIT(8), 0},
+ {"SMT2_D3_STS", BIT(9), 0},
+ {"SMT3_D3_STS", BIT(10), 0},
+ {"CLINK_D3_STS", BIT(14), 0},
+ {"PTIO_D3_STS", BIT(16), 0},
+ {"PMT_D3_STS", BIT(17), 0},
+ {"SMS1_D3_STS", BIT(18), 0},
+ {"SMS2_D3_STS", BIT(19), 0},
+ {"OSSE_SMT2_D3_STS", BIT(22), 0},
+ {}
+};
+
+static const struct pmc_bit_map wcl_pcdn_d3_status_3_map[] = {
+ {"THC0_D3_STS", BIT(14), 1},
+ {"THC1_D3_STS", BIT(15), 1},
+ {"OSSE_SMT3_D3_STS", BIT(16), 0},
+ {"ACE_D3_STS", BIT(23), 0},
+ {}
+};
+
+static const struct pmc_bit_map wcl_pcdn_vnn_req_status_0_map[] = {
+ {"LPSS_VNN_REQ_STS", BIT(3), 1},
+ {"OSSE_VNN_REQ_STS", BIT(15), 1},
+ {"ESPISPI_VNN_REQ_STS", BIT(18), 1},
+ {}
+};
+
+static const struct pmc_bit_map wcl_pcdn_vnn_req_status_1_map[] = {
+ {"NPK_VNN_REQ_STS", BIT(4), 1},
+ {"DFXAGG_VNN_REQ_STS", BIT(8), 0},
+ {"EXI_VNN_REQ_STS", BIT(9), 1},
+ {"OSSE_SMT1_VNN_REQ_STS", BIT(16), 1},
+ {"P2D_VNN_REQ_STS", BIT(18), 1},
+ {"GBE_VNN_REQ_STS", BIT(19), 1},
+ {"SMB_VNN_REQ_STS", BIT(25), 1},
+ {"LPC_VNN_REQ_STS", BIT(26), 0},
+ {}
+};
+
+static const struct pmc_bit_map wcl_pcdn_vnn_req_status_2_map[] = {
+ {"CSMERTC_VNN_REQ_STS", BIT(1), 1},
+ {"ESE_VNN_REQ_STS", BIT(2), 1},
+ {"CSE_VNN_REQ_STS", BIT(4), 1},
+ {"ISH_VNN_REQ_STS", BIT(7), 1},
+ {"SMT1_VNN_REQ_STS", BIT(8), 1},
+ {"CLINK_VNN_REQ_STS", BIT(14), 1},
+ {"SMS1_VNN_REQ_STS", BIT(18), 1},
+ {"SMS2_VNN_REQ_STS", BIT(19), 1},
+ {"GPIOCOM4_VNN_REQ_STS", BIT(20), 1},
+ {"GPIOCOM3_VNN_REQ_STS", BIT(21), 1},
+ {"GPIOCOM1_VNN_REQ_STS", BIT(23), 1},
+ {"GPIOCOM0_VNN_REQ_STS", BIT(24), 1},
+ {"DISP_SHIM_VNN_REQ_STS", BIT(31), 1},
+ {}
+};
+
+static const struct pmc_bit_map wcl_pcdn_vnn_misc_status_map[] = {
+ {"CPU_C10_REQ_STS", BIT(0), 0},
+ {"TS_OFF_REQ_STS", BIT(1), 0},
+ {"PNDE_MET_REQ_STS", BIT(2), 1},
+ {"FW_THROTTLE_ALLOWED_REQ_STS", BIT(4), 0},
+ {"VNN_SOC_REQ_STS", BIT(6), 1},
+ {"ISH_VNNAON_REQ_STS", BIT(7), 0},
+ {"D2D_NOC_CFI_QACTIVE_REQ_STS", BIT(8), 1},
+ {"D2D_NOC_GPSB_QACTIVE_REQ_STS", BIT(9), 1},
+ {"PLT_GREATER_REQ_STS", BIT(11), 1},
+ {"ALL_SBR_IDLE_REQ_STS", BIT(12), 0},
+ {"PMC_IDLE_FB_OCP_REQ_STS", BIT(13), 0},
+ {"PM_SYNC_STATES_REQ_STS", BIT(14), 0},
+ {"EA_REQ_STS", BIT(15), 0},
+ {"MPHY_CORE_OFF_REQ_STS", BIT(16), 0},
+ {"BRK_EV_EN_REQ_STS", BIT(17), 0},
+ {"AUTO_DEMO_EN_REQ_STS", BIT(18), 0},
+ {"ITSS_CLK_SRC_REQ_STS", BIT(19), 1},
+ {"ARC_IDLE_REQ_STS", BIT(21), 0},
+ {"FIA_DEEP_PM_REQ_STS", BIT(23), 0},
+ {"XDCI_ATTACHED_REQ_STS", BIT(24), 1},
+ {"ARC_INTERRUPT_WAKE_REQ_STS", BIT(25), 0},
+ {"D2D_DISP_DDI_QACTIVE_REQ_STS", BIT(26), 1},
+ {"PRE_WAKE0_REQ_STS", BIT(27), 1},
+ {"PRE_WAKE1_REQ_STS", BIT(28), 1},
+ {"PRE_WAKE2_REQ_STS", BIT(29), 1},
+ {}
+};
+
+static const struct pmc_bit_map wcl_pcdn_rsc_status_map[] = {
+ {"Memory", 0, 1},
+ {"PSF0", 0, 1},
+ {"PSF6", 0, 1},
+ {"PSF8", 0, 1},
+ {"SAF_CFI_LINK", 0, 1},
+ {"SB", 0, 1},
+ {}
+};
+
+static const struct pmc_bit_map *wcl_pcdn_lpm_maps[] = {
+ ptl_pcdp_clocksource_status_map,
+ wcl_pcdn_power_gating_status_0_map,
+ wcl_pcdn_power_gating_status_1_map,
+ wcl_pcdn_power_gating_status_2_map,
+ wcl_pcdn_d3_status_0_map,
+ wcl_pcdn_d3_status_1_map,
+ wcl_pcdn_d3_status_2_map,
+ wcl_pcdn_d3_status_3_map,
+ wcl_pcdn_vnn_req_status_0_map,
+ wcl_pcdn_vnn_req_status_1_map,
+ wcl_pcdn_vnn_req_status_2_map,
+ ptl_pcdp_vnn_req_status_3_map,
+ wcl_pcdn_vnn_misc_status_map,
+ ptl_pcdp_signal_status_map,
+ NULL
+};
+
+static const struct pmc_bit_map *wcl_pcdn_blk_maps[] = {
+ wcl_pcdn_power_gating_status_0_map,
+ wcl_pcdn_power_gating_status_1_map,
+ wcl_pcdn_power_gating_status_2_map,
+ wcl_pcdn_rsc_status_map,
+ wcl_pcdn_vnn_req_status_0_map,
+ wcl_pcdn_vnn_req_status_1_map,
+ wcl_pcdn_vnn_req_status_2_map,
+ ptl_pcdp_vnn_req_status_3_map,
+ wcl_pcdn_d3_status_0_map,
+ wcl_pcdn_d3_status_1_map,
+ wcl_pcdn_d3_status_2_map,
+ wcl_pcdn_d3_status_3_map,
+ ptl_pcdp_clocksource_status_map,
+ wcl_pcdn_vnn_misc_status_map,
+ ptl_pcdp_signal_status_map,
+ NULL
+};
+
+static const struct pmc_reg_map wcl_pcdn_reg_map = {
+ .pfear_sts = ext_wcl_pcdn_pfear_map,
+ .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
+ .slp_s0_res_counter_step = TGL_PMC_SLP_S0_RES_COUNTER_STEP,
+ .ltr_show_sts = wcl_pcdn_ltr_show_map,
+ .msr_sts = msr_map,
+ .ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET,
+ .regmap_length = WCL_PCD_PMC_MMIO_REG_LEN,
+ .ppfear0_offset = CNP_PMC_HOST_PPFEAR0A,
+ .ppfear_buckets = LNL_PPFEAR_NUM_ENTRIES,
+ .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
+ .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
+ .lpm_num_maps = PTL_LPM_NUM_MAPS,
+ .ltr_ignore_max = LNL_NUM_IP_IGN_ALLOWED,
+ .lpm_res_counter_step_x2 = TGL_PMC_LPM_RES_COUNTER_STEP_X2,
+ .etr3_offset = ETR3_OFFSET,
+ .lpm_sts_latch_en_offset = MTL_LPM_STATUS_LATCH_EN_OFFSET,
+ .lpm_priority_offset = MTL_LPM_PRI_OFFSET,
+ .lpm_en_offset = MTL_LPM_EN_OFFSET,
+ .lpm_residency_offset = MTL_LPM_RESIDENCY_OFFSET,
+ .lpm_sts = wcl_pcdn_lpm_maps,
+ .lpm_status_offset = MTL_LPM_STATUS_OFFSET,
+ .lpm_live_status_offset = MTL_LPM_LIVE_STATUS_OFFSET,
+ .s0ix_blocker_maps = wcl_pcdn_blk_maps,
+ .s0ix_blocker_offset = LNL_S0IX_BLOCKER_OFFSET,
+};
+
+#define WCL_NPU_PCI_DEV 0xfd3e
+
+/*
+ * Set power state of select devices that do not have drivers to D3
+ * so that they do not block Package C entry.
+ */
+static void wcl_d3_fixup(void)
+{
+ pmc_core_set_device_d3(WCL_NPU_PCI_DEV);
+}
+
+static int wcl_resume(struct pmc_dev *pmcdev)
+{
+ wcl_d3_fixup();
+ return cnl_resume(pmcdev);
+}
+
+static int wcl_core_init(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_info)
+{
+ wcl_d3_fixup();
+ return generic_core_init(pmcdev, pmc_dev_info);
+}
+
+struct pmc_dev_info wcl_pmc_dev = {
+ .map = &wcl_pcdn_reg_map,
+ .suspend = cnl_suspend,
+ .resume = wcl_resume,
+ .init = wcl_core_init,
+};
diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
index 71e104a068e9..7449873c3d40 100644
--- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
+++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
@@ -790,7 +790,7 @@ static const struct x86_cpu_id isst_cpu_ids[] = {
X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, SST_HPM_SUPPORTED),
X86_MATCH_VFM(INTEL_ICELAKE_D, 0),
X86_MATCH_VFM(INTEL_ICELAKE_X, 0),
- X86_MATCH_VFM(INTEL_PANTHERCOVE_X, SST_HPM_SUPPORTED),
+ X86_MATCH_VFM(INTEL_DIAMONDRAPIDS_X, SST_HPM_SUPPORTED),
X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, 0),
X86_MATCH_VFM(INTEL_SKYLAKE_X, SST_MBOX_SUPPORTED),
{}
diff --git a/drivers/platform/x86/intel/tpmi_power_domains.c b/drivers/platform/x86/intel/tpmi_power_domains.c
index 9d8247bb9cfa..7d93119a4c30 100644
--- a/drivers/platform/x86/intel/tpmi_power_domains.c
+++ b/drivers/platform/x86/intel/tpmi_power_domains.c
@@ -85,7 +85,7 @@ static const struct x86_cpu_id tpmi_cpu_ids[] = {
X86_MATCH_VFM(INTEL_ATOM_CRESTMONT, NULL),
X86_MATCH_VFM(INTEL_ATOM_DARKMONT_X, NULL),
X86_MATCH_VFM(INTEL_GRANITERAPIDS_D, NULL),
- X86_MATCH_VFM(INTEL_PANTHERCOVE_X, NULL),
+ X86_MATCH_VFM(INTEL_DIAMONDRAPIDS_X, NULL),
{}
};
MODULE_DEVICE_TABLE(x86cpu, tpmi_cpu_ids);
@@ -178,7 +178,7 @@ static int tpmi_get_logical_id(unsigned int cpu, struct tpmi_cpu_info *info)
info->punit_thread_id = FIELD_GET(LP_ID_MASK, data);
info->punit_core_id = FIELD_GET(MODULE_ID_MASK, data);
- info->pkg_id = topology_physical_package_id(cpu);
+ info->pkg_id = topology_logical_package_id(cpu);
info->linux_cpu = cpu;
return 0;
diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c
index 6df55c8e16b7..1237d9570886 100644
--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c
+++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c
@@ -192,9 +192,14 @@ static int uncore_read_control_freq(struct uncore_data *data, unsigned int *valu
static int write_eff_lat_ctrl(struct uncore_data *data, unsigned int val, enum uncore_index index)
{
struct tpmi_uncore_cluster_info *cluster_info;
+ struct tpmi_uncore_struct *uncore_root;
u64 control;
cluster_info = container_of(data, struct tpmi_uncore_cluster_info, uncore_data);
+ uncore_root = cluster_info->uncore_root;
+
+ if (uncore_root->write_blocked)
+ return -EPERM;
if (cluster_info->root_domain)
return -ENODATA;
@@ -369,6 +374,77 @@ static void uncore_set_agent_type(struct tpmi_uncore_cluster_info *cluster_info)
cluster_info->uncore_data.agent_type_mask = FIELD_GET(UNCORE_AGENT_TYPES, status);
}
+#define MAX_PARTITIONS 2
+
+/* IO domain ID start index for a partition */
+static u8 io_die_start[MAX_PARTITIONS];
+
+/* Next IO domain ID index after the current partition IO die IDs */
+static u8 io_die_index_next;
+
+/* Lock to protect io_die_start, io_die_index_next */
+static DEFINE_MUTEX(domain_lock);
+
+static void set_domain_id(int id, int num_resources,
+ struct oobmsm_plat_info *plat_info,
+ struct tpmi_uncore_cluster_info *cluster_info)
+{
+ u8 part_io_index, cdie_range, pkg_io_index, max_dies;
+
+ if (plat_info->partition >= MAX_PARTITIONS) {
+ cluster_info->uncore_data.domain_id = id;
+ return;
+ }
+
+ if (cluster_info->uncore_data.agent_type_mask & AGENT_TYPE_CORE) {
+ cluster_info->uncore_data.domain_id = cluster_info->cdie_id;
+ return;
+ }
+
+ /* Unlikely but cdie_mask may have holes, so take range */
+ cdie_range = fls(plat_info->cdie_mask) - ffs(plat_info->cdie_mask) + 1;
+ max_dies = topology_max_dies_per_package();
+
+ /*
+ * If the CPU doesn't enumerate dies, then use current cdie range
+ * as the max.
+ */
+ if (cdie_range > max_dies)
+ max_dies = cdie_range;
+
+ guard(mutex)(&domain_lock);
+
+ if (!io_die_index_next)
+ io_die_index_next = max_dies;
+
+ if (!io_die_start[plat_info->partition]) {
+ io_die_start[plat_info->partition] = io_die_index_next;
+ /*
+ * number of IO dies = num_resources - cdie_range. Hence
+ * next partition io_die_index_next is set after IO dies
+ * in the current partition.
+ */
+ io_die_index_next += (num_resources - cdie_range);
+ }
+
+ /*
+ * Index from IO die start within the partition:
+ * This is the first valid domain after the cdies.
+ * For example the current resource index 5 and cdies end at
+ * index 3 (cdie_cnt = 4). Then the IO only index 5 - 4 = 1.
+ */
+ part_io_index = id - cdie_range;
+
+ /*
+ * Add to the IO die start index for this partition in this package
+ * to make unique in the package.
+ */
+ pkg_io_index = io_die_start[plat_info->partition] + part_io_index;
+
+ /* Assign this to domain ID */
+ cluster_info->uncore_data.domain_id = pkg_io_index;
+}
+
/* Callback for sysfs read for TPMI uncore values. Called under mutex locks. */
static int uncore_read(struct uncore_data *data, unsigned int *value, enum uncore_index index)
{
@@ -605,11 +681,12 @@ static int uncore_probe(struct auxiliary_device *auxdev, const struct auxiliary_
cluster_info->uncore_data.package_id = pkg;
/* There are no dies like Cascade Lake */
cluster_info->uncore_data.die_id = 0;
- cluster_info->uncore_data.domain_id = i;
cluster_info->uncore_data.cluster_id = j;
set_cdie_id(i, cluster_info, plat_info);
+ set_domain_id(i, num_resources, plat_info, cluster_info);
+
cluster_info->uncore_root = tpmi_uncore;
if (TPMI_MINOR_VERSION(pd_info->ufs_header_ver) >= UNCORE_ELC_SUPPORTED_VERSION)
@@ -633,7 +710,7 @@ static int uncore_probe(struct auxiliary_device *auxdev, const struct auxiliary_
auxiliary_set_drvdata(auxdev, tpmi_uncore);
- if (topology_max_dies_per_package() > 1)
+ if (topology_max_dies_per_package() > 1 || plat_info->partition)
return 0;
tpmi_uncore->root_cluster.root_domain = true;
diff --git a/drivers/platform/x86/lenovo/think-lmi.c b/drivers/platform/x86/lenovo/think-lmi.c
index 0992b41b6221..540b472b1bf3 100644
--- a/drivers/platform/x86/lenovo/think-lmi.c
+++ b/drivers/platform/x86/lenovo/think-lmi.c
@@ -119,6 +119,7 @@ MODULE_PARM_DESC(debug_support, "Enable debug command support");
* You must reboot the computer before the changes will take effect.
*/
#define LENOVO_SET_BIOS_CERT_GUID "26861C9F-47E9-44C4-BD8B-DFE7FA2610FE"
+#define LENOVO_TC_SET_BIOS_CERT_GUID "955aaf7d-8bc4-4f04-90aa-97469512f167"
/*
* Name: UpdateBiosCert
@@ -128,6 +129,7 @@ MODULE_PARM_DESC(debug_support, "Enable debug command support");
* You must reboot the computer before the changes will take effect.
*/
#define LENOVO_UPDATE_BIOS_CERT_GUID "9AA3180A-9750-41F7-B9F7-D5D3B1BAC3CE"
+#define LENOVO_TC_UPDATE_BIOS_CERT_GUID "5f5bbbb2-c72f-4fb8-8129-228eef4fdbed"
/*
* Name: ClearBiosCert
@@ -137,6 +139,8 @@ MODULE_PARM_DESC(debug_support, "Enable debug command support");
* You must reboot the computer before the changes will take effect.
*/
#define LENOVO_CLEAR_BIOS_CERT_GUID "B2BC39A7-78DD-4D71-B059-A510DEC44890"
+#define LENOVO_TC_CLEAR_BIOS_CERT_GUID "97849cb6-cb44-42d1-a750-26a596a9eec4"
+
/*
* Name: CertToPassword
* Description: Switch from certificate to password authentication.
@@ -145,6 +149,7 @@ MODULE_PARM_DESC(debug_support, "Enable debug command support");
* You must reboot the computer before the changes will take effect.
*/
#define LENOVO_CERT_TO_PASSWORD_GUID "0DE8590D-5510-4044-9621-77C227F5A70D"
+#define LENOVO_TC_CERT_TO_PASSWORD_GUID "ef65480d-38c9-420d-b700-ab3d6c8ebaca"
/*
* Name: SetBiosSettingCert
@@ -153,6 +158,7 @@ MODULE_PARM_DESC(debug_support, "Enable debug command support");
* Format: "Item,Value,Signature"
*/
#define LENOVO_SET_BIOS_SETTING_CERT_GUID "34A008CC-D205-4B62-9E67-31DFA8B90003"
+#define LENOVO_TC_SET_BIOS_SETTING_CERT_GUID "19ecba3b-b318-4192-a89b-43d94bc60cea"
/*
* Name: SaveBiosSettingCert
@@ -161,6 +167,7 @@ MODULE_PARM_DESC(debug_support, "Enable debug command support");
* Format: "Signature"
*/
#define LENOVO_SAVE_BIOS_SETTING_CERT_GUID "C050FB9D-DF5F-4606-B066-9EFC401B2551"
+#define LENOVO_TC_SAVE_BIOS_SETTING_CERT_GUID "0afaf46f-7cca-450a-b455-a826a0bf1af5"
/*
* Name: CertThumbprint
@@ -177,12 +184,43 @@ MODULE_PARM_DESC(debug_support, "Enable debug command support");
#define TLMI_CERT_SVC BIT(7) /* Admin Certificate Based */
#define TLMI_CERT_SMC BIT(8) /* System Certificate Based */
+static const struct tlmi_cert_guids thinkpad_cert_guid = {
+ .thumbprint = LENOVO_CERT_THUMBPRINT_GUID,
+ .set_bios_setting = LENOVO_SET_BIOS_SETTING_CERT_GUID,
+ .save_bios_setting = LENOVO_SAVE_BIOS_SETTING_CERT_GUID,
+ .cert_to_password = LENOVO_CERT_TO_PASSWORD_GUID,
+ .clear_bios_cert = LENOVO_CLEAR_BIOS_CERT_GUID,
+ .update_bios_cert = LENOVO_UPDATE_BIOS_CERT_GUID,
+ .set_bios_cert = LENOVO_SET_BIOS_CERT_GUID,
+};
+
+static const struct tlmi_cert_guids thinkcenter_cert_guid = {
+ .thumbprint = NULL,
+ .set_bios_setting = LENOVO_TC_SET_BIOS_SETTING_CERT_GUID,
+ .save_bios_setting = LENOVO_TC_SAVE_BIOS_SETTING_CERT_GUID,
+ .cert_to_password = LENOVO_TC_CERT_TO_PASSWORD_GUID,
+ .clear_bios_cert = LENOVO_TC_CLEAR_BIOS_CERT_GUID,
+ .update_bios_cert = LENOVO_TC_UPDATE_BIOS_CERT_GUID,
+ .set_bios_cert = LENOVO_TC_SET_BIOS_CERT_GUID,
+};
+
static const struct tlmi_err_codes tlmi_errs[] = {
{"Success", 0},
+ {"Set Certificate operation was successful.", 0},
{"Not Supported", -EOPNOTSUPP},
{"Invalid Parameter", -EINVAL},
{"Access Denied", -EACCES},
{"System Busy", -EBUSY},
+ {"Set Certificate operation failed with status:Invalid Parameter.", -EINVAL},
+ {"Set Certificate operation failed with status:Invalid certificate type.", -EINVAL},
+ {"Set Certificate operation failed with status:Invalid password format.", -EINVAL},
+ {"Set Certificate operation failed with status:Password retry count exceeded.", -EACCES},
+ {"Set Certificate operation failed with status:Password Invalid.", -EACCES},
+ {"Set Certificate operation failed with status:Operation aborted.", -EBUSY},
+ {"Set Certificate operation failed with status:No free slots to write.", -ENOSPC},
+ {"Set Certificate operation failed with status:Certificate not found.", -EEXIST},
+ {"Set Certificate operation failed with status:Internal error.", -EFAULT},
+ {"Set Certificate operation failed with status:Certificate too large.", -EFBIG},
};
static const char * const encoding_options[] = {
@@ -668,7 +706,10 @@ static ssize_t cert_thumbprint(char *buf, const char *arg, int count)
const union acpi_object *obj;
acpi_status status;
- status = wmi_evaluate_method(LENOVO_CERT_THUMBPRINT_GUID, 0, 0, &input, &output);
+ if (!tlmi_priv.cert_guid->thumbprint)
+ return -EOPNOTSUPP;
+
+ status = wmi_evaluate_method(tlmi_priv.cert_guid->thumbprint, 0, 0, &input, &output);
if (ACPI_FAILURE(status)) {
kfree(output.pointer);
return -EIO;
@@ -751,7 +792,7 @@ static ssize_t cert_to_password_store(struct kobject *kobj,
kfree_sensitive(passwd);
return -ENOMEM;
}
- ret = tlmi_simple_call(LENOVO_CERT_TO_PASSWORD_GUID, auth_str);
+ ret = tlmi_simple_call(tlmi_priv.cert_guid->cert_to_password, auth_str);
kfree(auth_str);
kfree_sensitive(passwd);
@@ -774,7 +815,7 @@ static ssize_t certificate_store(struct kobject *kobj,
char *auth_str, *new_cert;
const char *serial;
char *signature;
- char *guid;
+ const char *guid;
int ret;
if (!capable(CAP_SYS_ADMIN))
@@ -797,7 +838,7 @@ static ssize_t certificate_store(struct kobject *kobj,
if (!auth_str)
return -ENOMEM;
- ret = tlmi_simple_call(LENOVO_CLEAR_BIOS_CERT_GUID, auth_str);
+ ret = tlmi_simple_call(tlmi_priv.cert_guid->clear_bios_cert, auth_str);
kfree(auth_str);
return ret ?: count;
@@ -834,7 +875,7 @@ static ssize_t certificate_store(struct kobject *kobj,
kfree(new_cert);
return -EACCES;
}
- guid = LENOVO_UPDATE_BIOS_CERT_GUID;
+ guid = tlmi_priv.cert_guid->update_bios_cert;
/* Format: 'Certificate,Signature' */
auth_str = cert_command(setting, new_cert, signature);
} else {
@@ -845,9 +886,17 @@ static ssize_t certificate_store(struct kobject *kobj,
kfree(new_cert);
return -EACCES;
}
- guid = LENOVO_SET_BIOS_CERT_GUID;
- /* Format: 'Certificate, password' */
- auth_str = cert_command(setting, new_cert, setting->password);
+ guid = tlmi_priv.cert_guid->set_bios_cert;
+ if (tlmi_priv.thinkcenter_mode) {
+ /* Format: 'Certificate, password, encoding, kbdlang' */
+ auth_str = kasprintf(GFP_KERNEL, "%s,%s,%s,%s", new_cert,
+ setting->password,
+ encoding_options[setting->encoding],
+ setting->kbdlang);
+ } else {
+ /* Format: 'Certificate, password' */
+ auth_str = cert_command(setting, new_cert, setting->password);
+ }
}
kfree(new_cert);
if (!auth_str)
@@ -1071,13 +1120,13 @@ static ssize_t current_value_store(struct kobject *kobj,
goto out;
}
- ret = tlmi_simple_call(LENOVO_SET_BIOS_SETTING_CERT_GUID, set_str);
+ ret = tlmi_simple_call(tlmi_priv.cert_guid->set_bios_setting, set_str);
if (ret)
goto out;
if (tlmi_priv.save_mode == TLMI_SAVE_BULK)
tlmi_priv.save_required = true;
else
- ret = tlmi_simple_call(LENOVO_SAVE_BIOS_SETTING_CERT_GUID,
+ ret = tlmi_simple_call(tlmi_priv.cert_guid->save_bios_setting,
tlmi_priv.pwd_admin->save_signature);
} else if (tlmi_priv.opcode_support) {
/*
@@ -1282,7 +1331,7 @@ static ssize_t save_settings_store(struct kobject *kobj, struct kobj_attribute *
ret = -EINVAL;
goto out;
}
- ret = tlmi_simple_call(LENOVO_SAVE_BIOS_SETTING_CERT_GUID,
+ ret = tlmi_simple_call(tlmi_priv.cert_guid->save_bios_setting,
tlmi_priv.pwd_admin->save_signature);
if (ret)
goto out;
@@ -1583,6 +1632,15 @@ static int tlmi_analyze(struct wmi_device *wdev)
wmi_has_guid(LENOVO_SAVE_BIOS_SETTING_CERT_GUID))
tlmi_priv.certificate_support = true;
+ /* ThinkCenter uses different GUIDs for certificate support */
+ if (wmi_has_guid(LENOVO_TC_SET_BIOS_CERT_GUID) &&
+ wmi_has_guid(LENOVO_TC_SET_BIOS_SETTING_CERT_GUID) &&
+ wmi_has_guid(LENOVO_TC_SAVE_BIOS_SETTING_CERT_GUID)) {
+ tlmi_priv.certificate_support = true;
+ tlmi_priv.thinkcenter_mode = true;
+ pr_info("ThinkCenter modified support being used\n");
+ }
+
/*
* Try to find the number of valid settings of this machine
* and use it to create sysfs attributes.
@@ -1728,10 +1786,16 @@ static int tlmi_analyze(struct wmi_device *wdev)
}
if (tlmi_priv.certificate_support) {
- tlmi_priv.pwd_admin->cert_installed =
- tlmi_priv.pwdcfg.core.password_state & TLMI_CERT_SVC;
- tlmi_priv.pwd_system->cert_installed =
- tlmi_priv.pwdcfg.core.password_state & TLMI_CERT_SMC;
+ if (tlmi_priv.thinkcenter_mode) {
+ tlmi_priv.cert_guid = &thinkcenter_cert_guid;
+ tlmi_priv.pwd_admin->cert_installed = tlmi_priv.pwdcfg.core.password_mode;
+ } else {
+ tlmi_priv.cert_guid = &thinkpad_cert_guid;
+ tlmi_priv.pwd_admin->cert_installed =
+ tlmi_priv.pwdcfg.core.password_state & TLMI_CERT_SVC;
+ tlmi_priv.pwd_system->cert_installed =
+ tlmi_priv.pwdcfg.core.password_state & TLMI_CERT_SMC;
+ }
}
return 0;
diff --git a/drivers/platform/x86/lenovo/think-lmi.h b/drivers/platform/x86/lenovo/think-lmi.h
index 9b014644d316..017644323d46 100644
--- a/drivers/platform/x86/lenovo/think-lmi.h
+++ b/drivers/platform/x86/lenovo/think-lmi.h
@@ -41,6 +41,17 @@ enum save_mode {
TLMI_SAVE_SAVE,
};
+/* GUIDs can differ between platforms */
+struct tlmi_cert_guids {
+ const char *thumbprint;
+ const char *set_bios_setting;
+ const char *save_bios_setting;
+ const char *cert_to_password;
+ const char *clear_bios_cert;
+ const char *update_bios_cert;
+ const char *set_bios_cert;
+};
+
/* password configuration details */
#define TLMI_PWDCFG_MODE_LEGACY 0
#define TLMI_PWDCFG_MODE_PASSWORD 1
@@ -109,6 +120,7 @@ struct think_lmi {
enum save_mode save_mode;
bool save_required;
bool reboot_required;
+ bool thinkcenter_mode;
struct tlmi_attr_setting *setting[TLMI_SETTINGS_COUNT];
struct device *class_dev;
@@ -121,6 +133,8 @@ struct think_lmi {
struct tlmi_pwd_setting *pwd_system;
struct tlmi_pwd_setting *pwd_hdd;
struct tlmi_pwd_setting *pwd_nvme;
+
+ const struct tlmi_cert_guids *cert_guid;
};
#endif /* !_THINK_LMI_H_ */
diff --git a/drivers/platform/x86/lenovo/wmi-capdata01.c b/drivers/platform/x86/lenovo/wmi-capdata01.c
index c922680b3cba..fc7e3454e71d 100644
--- a/drivers/platform/x86/lenovo/wmi-capdata01.c
+++ b/drivers/platform/x86/lenovo/wmi-capdata01.c
@@ -93,7 +93,7 @@ int lwmi_cd01_get_data(struct cd01_list *list, u32 attribute_id, struct capdata0
continue;
memcpy(output, &list->data[idx], sizeof(list->data[idx]));
return 0;
- };
+ }
return -EINVAL;
}
diff --git a/drivers/platform/x86/lenovo/yoga-tab2-pro-1380-fastcharger.c b/drivers/platform/x86/lenovo/yoga-tab2-pro-1380-fastcharger.c
index 1b33c977f6d7..8551ab4d2c7d 100644
--- a/drivers/platform/x86/lenovo/yoga-tab2-pro-1380-fastcharger.c
+++ b/drivers/platform/x86/lenovo/yoga-tab2-pro-1380-fastcharger.c
@@ -255,6 +255,11 @@ static int yt2_1380_fc_pdev_probe(struct platform_device *pdev)
if (!serdev)
return -ENOMEM;
+ /* Propagate pdev-fwnode set by x86-android-tablets to serdev */
+ device_set_node(&serdev->dev, dev_fwnode(&pdev->dev));
+ /* The fwnode is a managed node, so it will be auto-put on serdev_device_put() */
+ fwnode_handle_get(dev_fwnode(&serdev->dev));
+
ret = serdev_device_add(serdev);
if (ret) {
serdev_device_put(serdev);
diff --git a/drivers/platform/x86/lg-laptop.c b/drivers/platform/x86/lg-laptop.c
index 4b57102c7f62..6af6cf477c5b 100644
--- a/drivers/platform/x86/lg-laptop.c
+++ b/drivers/platform/x86/lg-laptop.c
@@ -8,6 +8,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/acpi.h>
+#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/device.h>
#include <linux/dev_printk.h>
@@ -75,6 +76,9 @@ MODULE_PARM_DESC(fw_debug, "Enable printing of firmware debug messages");
#define WMBB_USB_CHARGE 0x10B
#define WMBB_BATT_LIMIT 0x10C
+#define FAN_MODE_LOWER GENMASK(1, 0)
+#define FAN_MODE_UPPER GENMASK(5, 4)
+
#define PLATFORM_NAME "lg-laptop"
MODULE_ALIAS("wmi:" WMI_EVENT_GUID0);
@@ -274,29 +278,19 @@ static ssize_t fan_mode_store(struct device *dev,
struct device_attribute *attr,
const char *buffer, size_t count)
{
- bool value;
+ unsigned long value;
union acpi_object *r;
- u32 m;
int ret;
- ret = kstrtobool(buffer, &value);
+ ret = kstrtoul(buffer, 10, &value);
if (ret)
return ret;
+ if (value >= 3)
+ return -EINVAL;
- r = lg_wmab(dev, WM_FAN_MODE, WM_GET, 0);
- if (!r)
- return -EIO;
-
- if (r->type != ACPI_TYPE_INTEGER) {
- kfree(r);
- return -EIO;
- }
-
- m = r->integer.value;
- kfree(r);
- r = lg_wmab(dev, WM_FAN_MODE, WM_SET, (m & 0xffffff0f) | (value << 4));
- kfree(r);
- r = lg_wmab(dev, WM_FAN_MODE, WM_SET, (m & 0xfffffff0) | value);
+ r = lg_wmab(dev, WM_FAN_MODE, WM_SET,
+ FIELD_PREP(FAN_MODE_LOWER, value) |
+ FIELD_PREP(FAN_MODE_UPPER, value));
kfree(r);
return count;
@@ -305,7 +299,7 @@ static ssize_t fan_mode_store(struct device *dev,
static ssize_t fan_mode_show(struct device *dev,
struct device_attribute *attr, char *buffer)
{
- unsigned int status;
+ unsigned int mode;
union acpi_object *r;
r = lg_wmab(dev, WM_FAN_MODE, WM_GET, 0);
@@ -317,10 +311,10 @@ static ssize_t fan_mode_show(struct device *dev,
return -EIO;
}
- status = r->integer.value & 0x01;
+ mode = FIELD_GET(FAN_MODE_LOWER, r->integer.value);
kfree(r);
- return sysfs_emit(buffer, "%d\n", status);
+ return sysfs_emit(buffer, "%d\n", mode);
}
static ssize_t usb_charge_store(struct device *dev,
diff --git a/drivers/platform/x86/meraki-mx100.c b/drivers/platform/x86/meraki-mx100.c
index 3751ed36a980..8c5276d98512 100644
--- a/drivers/platform/x86/meraki-mx100.c
+++ b/drivers/platform/x86/meraki-mx100.c
@@ -15,135 +15,256 @@
#include <linux/dmi.h>
#include <linux/err.h>
-#include <linux/gpio_keys.h>
#include <linux/gpio/machine.h>
-#include <linux/input.h>
+#include <linux/gpio/property.h>
+#include <linux/input-event-codes.h>
#include <linux/io.h>
#include <linux/kernel.h>
-#include <linux/leds.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#define TINK_GPIO_DRIVER_NAME "gpio_ich"
+static const struct software_node gpio_ich_node = {
+ .name = TINK_GPIO_DRIVER_NAME,
+};
+
/* LEDs */
-static const struct gpio_led tink_leds[] = {
- {
- .name = "mx100:green:internet",
- .default_trigger = "default-on",
- },
- {
- .name = "mx100:green:lan2",
- },
- {
- .name = "mx100:green:lan3",
- },
- {
- .name = "mx100:green:lan4",
- },
- {
- .name = "mx100:green:lan5",
- },
- {
- .name = "mx100:green:lan6",
- },
- {
- .name = "mx100:green:lan7",
- },
- {
- .name = "mx100:green:lan8",
- },
- {
- .name = "mx100:green:lan9",
- },
- {
- .name = "mx100:green:lan10",
- },
- {
- .name = "mx100:green:lan11",
- },
- {
- .name = "mx100:green:ha",
- },
- {
- .name = "mx100:orange:ha",
- },
- {
- .name = "mx100:green:usb",
- },
- {
- .name = "mx100:orange:usb",
- },
+static const struct software_node tink_gpio_leds_node = {
+ .name = "meraki-mx100-leds",
};
-static const struct gpio_led_platform_data tink_leds_pdata = {
- .num_leds = ARRAY_SIZE(tink_leds),
- .leds = tink_leds,
-};
-
-static struct gpiod_lookup_table tink_leds_table = {
- .dev_id = "leds-gpio",
- .table = {
- GPIO_LOOKUP_IDX(TINK_GPIO_DRIVER_NAME, 11,
- NULL, 0, GPIO_ACTIVE_LOW),
- GPIO_LOOKUP_IDX(TINK_GPIO_DRIVER_NAME, 18,
- NULL, 1, GPIO_ACTIVE_HIGH),
- GPIO_LOOKUP_IDX(TINK_GPIO_DRIVER_NAME, 20,
- NULL, 2, GPIO_ACTIVE_HIGH),
- GPIO_LOOKUP_IDX(TINK_GPIO_DRIVER_NAME, 22,
- NULL, 3, GPIO_ACTIVE_HIGH),
- GPIO_LOOKUP_IDX(TINK_GPIO_DRIVER_NAME, 23,
- NULL, 4, GPIO_ACTIVE_HIGH),
- GPIO_LOOKUP_IDX(TINK_GPIO_DRIVER_NAME, 32,
- NULL, 5, GPIO_ACTIVE_HIGH),
- GPIO_LOOKUP_IDX(TINK_GPIO_DRIVER_NAME, 34,
- NULL, 6, GPIO_ACTIVE_HIGH),
- GPIO_LOOKUP_IDX(TINK_GPIO_DRIVER_NAME, 35,
- NULL, 7, GPIO_ACTIVE_HIGH),
- GPIO_LOOKUP_IDX(TINK_GPIO_DRIVER_NAME, 36,
- NULL, 8, GPIO_ACTIVE_HIGH),
- GPIO_LOOKUP_IDX(TINK_GPIO_DRIVER_NAME, 37,
- NULL, 9, GPIO_ACTIVE_HIGH),
- GPIO_LOOKUP_IDX(TINK_GPIO_DRIVER_NAME, 48,
- NULL, 10, GPIO_ACTIVE_HIGH),
- GPIO_LOOKUP_IDX(TINK_GPIO_DRIVER_NAME, 16,
- NULL, 11, GPIO_ACTIVE_LOW),
- GPIO_LOOKUP_IDX(TINK_GPIO_DRIVER_NAME, 7,
- NULL, 12, GPIO_ACTIVE_LOW),
- GPIO_LOOKUP_IDX(TINK_GPIO_DRIVER_NAME, 21,
- NULL, 13, GPIO_ACTIVE_LOW),
- GPIO_LOOKUP_IDX(TINK_GPIO_DRIVER_NAME, 19,
- NULL, 14, GPIO_ACTIVE_LOW),
- {} /* Terminating entry */
- }
+static const struct property_entry tink_internet_led_props[] = {
+ PROPERTY_ENTRY_STRING("label", "mx100:green:internet"),
+ PROPERTY_ENTRY_STRING("linux,default-trigger", "default-on"),
+ PROPERTY_ENTRY_GPIO("gpios", &gpio_ich_node, 11, GPIO_ACTIVE_LOW),
+ { }
+};
+
+static const struct software_node tink_internet_led_node = {
+ .name = "internet-led",
+ .parent = &tink_gpio_leds_node,
+ .properties = tink_internet_led_props,
+};
+
+static const struct property_entry tink_lan2_led_props[] = {
+ PROPERTY_ENTRY_STRING("label", "mx100:green:lan2"),
+ PROPERTY_ENTRY_GPIO("gpios", &gpio_ich_node, 18, GPIO_ACTIVE_HIGH),
+ { }
+};
+
+static const struct software_node tink_lan2_led_node = {
+ .name = "lan2-led",
+ .parent = &tink_gpio_leds_node,
+ .properties = tink_lan2_led_props,
+};
+
+static const struct property_entry tink_lan3_led_props[] = {
+ PROPERTY_ENTRY_STRING("label", "mx100:green:lan3"),
+ PROPERTY_ENTRY_GPIO("gpios", &gpio_ich_node, 20, GPIO_ACTIVE_HIGH),
+ { }
+};
+
+static const struct software_node tink_lan3_led_node = {
+ .name = "lan3-led",
+ .parent = &tink_gpio_leds_node,
+ .properties = tink_lan3_led_props,
+};
+
+static const struct property_entry tink_lan4_led_props[] = {
+ PROPERTY_ENTRY_STRING("label", "mx100:green:lan4"),
+ PROPERTY_ENTRY_GPIO("gpios", &gpio_ich_node, 22, GPIO_ACTIVE_HIGH),
+ { }
+};
+
+static const struct software_node tink_lan4_led_node = {
+ .name = "lan4-led",
+ .parent = &tink_gpio_leds_node,
+ .properties = tink_lan4_led_props,
+};
+
+static const struct property_entry tink_lan5_led_props[] = {
+ PROPERTY_ENTRY_STRING("label", "mx100:green:lan5"),
+ PROPERTY_ENTRY_GPIO("gpios", &gpio_ich_node, 23, GPIO_ACTIVE_HIGH),
+ { }
+};
+
+static const struct software_node tink_lan5_led_node = {
+ .name = "lan5-led",
+ .parent = &tink_gpio_leds_node,
+ .properties = tink_lan5_led_props,
+};
+
+static const struct property_entry tink_lan6_led_props[] = {
+ PROPERTY_ENTRY_STRING("label", "mx100:green:lan6"),
+ PROPERTY_ENTRY_GPIO("gpios", &gpio_ich_node, 32, GPIO_ACTIVE_HIGH),
+ { }
+};
+
+static const struct software_node tink_lan6_led_node = {
+ .name = "lan6-led",
+ .parent = &tink_gpio_leds_node,
+ .properties = tink_lan6_led_props,
+};
+
+static const struct property_entry tink_lan7_led_props[] = {
+ PROPERTY_ENTRY_STRING("label", "mx100:green:lan7"),
+ PROPERTY_ENTRY_GPIO("gpios", &gpio_ich_node, 34, GPIO_ACTIVE_HIGH),
+ { }
+};
+
+static const struct software_node tink_lan7_led_node = {
+ .name = "lan7-led",
+ .parent = &tink_gpio_leds_node,
+ .properties = tink_lan7_led_props,
+};
+
+static const struct property_entry tink_lan8_led_props[] = {
+ PROPERTY_ENTRY_STRING("label", "mx100:green:lan8"),
+ PROPERTY_ENTRY_GPIO("gpios", &gpio_ich_node, 35, GPIO_ACTIVE_HIGH),
+ { }
+};
+
+static const struct software_node tink_lan8_led_node = {
+ .name = "lan8-led",
+ .parent = &tink_gpio_leds_node,
+ .properties = tink_lan8_led_props,
+};
+
+static const struct property_entry tink_lan9_led_props[] = {
+ PROPERTY_ENTRY_STRING("label", "mx100:green:lan9"),
+ PROPERTY_ENTRY_GPIO("gpios", &gpio_ich_node, 36, GPIO_ACTIVE_HIGH),
+ { }
+};
+
+static const struct software_node tink_lan9_led_node = {
+ .name = "lan9-led",
+ .parent = &tink_gpio_leds_node,
+ .properties = tink_lan9_led_props,
+};
+
+static const struct property_entry tink_lan10_led_props[] = {
+ PROPERTY_ENTRY_STRING("label", "mx100:green:lan10"),
+ PROPERTY_ENTRY_GPIO("gpios", &gpio_ich_node, 37, GPIO_ACTIVE_HIGH),
+ { }
+};
+
+static const struct software_node tink_lan10_led_node = {
+ .name = "lan10-led",
+ .parent = &tink_gpio_leds_node,
+ .properties = tink_lan10_led_props,
+};
+
+static const struct property_entry tink_lan11_led_props[] = {
+ PROPERTY_ENTRY_STRING("label", "mx100:green:lan11"),
+ PROPERTY_ENTRY_GPIO("gpios", &gpio_ich_node, 48, GPIO_ACTIVE_HIGH),
+ { }
+};
+
+static const struct software_node tink_lan11_led_node = {
+ .name = "lan11-led",
+ .parent = &tink_gpio_leds_node,
+ .properties = tink_lan11_led_props,
+};
+
+static const struct property_entry tink_ha_green_led_props[] = {
+ PROPERTY_ENTRY_STRING("label", "mx100:green:ha"),
+ PROPERTY_ENTRY_GPIO("gpios", &gpio_ich_node, 16, GPIO_ACTIVE_LOW),
+ { }
+};
+
+static const struct software_node tink_ha_green_led_node = {
+ .name = "ha-green-led",
+ .parent = &tink_gpio_leds_node,
+ .properties = tink_ha_green_led_props,
+};
+
+static const struct property_entry tink_ha_orange_led_props[] = {
+ PROPERTY_ENTRY_STRING("label", "mx100:orange:ha"),
+ PROPERTY_ENTRY_GPIO("gpios", &gpio_ich_node, 7, GPIO_ACTIVE_LOW),
+ { }
+};
+
+static const struct software_node tink_ha_orange_led_node = {
+ .name = "ha-orange-led",
+ .parent = &tink_gpio_leds_node,
+ .properties = tink_ha_orange_led_props,
+};
+
+static const struct property_entry tink_usb_green_led_props[] = {
+ PROPERTY_ENTRY_STRING("label", "mx100:green:usb"),
+ PROPERTY_ENTRY_GPIO("gpios", &gpio_ich_node, 21, GPIO_ACTIVE_LOW),
+ { }
+};
+
+static const struct software_node tink_usb_green_led_node = {
+ .name = "usb-green-led",
+ .parent = &tink_gpio_leds_node,
+ .properties = tink_usb_green_led_props,
+};
+
+static const struct property_entry tink_usb_orange_led_props[] = {
+ PROPERTY_ENTRY_STRING("label", "mx100:orange:usb"),
+ PROPERTY_ENTRY_GPIO("gpios", &gpio_ich_node, 19, GPIO_ACTIVE_LOW),
+ { }
+};
+
+static const struct software_node tink_usb_orange_led_node = {
+ .name = "usb-orange-led",
+ .parent = &tink_gpio_leds_node,
+ .properties = tink_usb_orange_led_props,
};
/* Reset Button */
-static struct gpio_keys_button tink_buttons[] = {
- {
- .desc = "Reset",
- .type = EV_KEY,
- .code = KEY_RESTART,
- .active_low = 1,
- .debounce_interval = 100,
- },
+static const struct property_entry tink_gpio_keys_props[] = {
+ PROPERTY_ENTRY_U32("poll-interval", 20),
+ { }
};
-static const struct gpio_keys_platform_data tink_buttons_pdata = {
- .buttons = tink_buttons,
- .nbuttons = ARRAY_SIZE(tink_buttons),
- .poll_interval = 20,
- .rep = 0,
- .name = "mx100-keys",
+static const struct software_node tink_gpio_keys_node = {
+ .name = "mx100-keys",
+ .properties = tink_gpio_keys_props,
};
-static struct gpiod_lookup_table tink_keys_table = {
- .dev_id = "gpio-keys-polled",
- .table = {
- GPIO_LOOKUP_IDX(TINK_GPIO_DRIVER_NAME, 60,
- NULL, 0, GPIO_ACTIVE_LOW),
- {} /* Terminating entry */
- }
+static const struct property_entry tink_reset_key_props[] = {
+ PROPERTY_ENTRY_U32("linux,code", KEY_RESTART),
+ PROPERTY_ENTRY_STRING("label", "Reset"),
+ PROPERTY_ENTRY_GPIO("gpios", &gpio_ich_node, 60, GPIO_ACTIVE_LOW),
+ PROPERTY_ENTRY_U32("linux,input-type", EV_KEY),
+ PROPERTY_ENTRY_U32("debounce-interval", 100),
+ { }
+};
+
+static const struct software_node tink_reset_key_node = {
+ .name = "reset",
+ .parent = &tink_gpio_keys_node,
+ .properties = tink_reset_key_props,
+};
+
+static const struct software_node *tink_swnodes[] = {
+ &gpio_ich_node,
+ /* LEDs nodes */
+ &tink_gpio_leds_node,
+ &tink_internet_led_node,
+ &tink_lan2_led_node,
+ &tink_lan3_led_node,
+ &tink_lan4_led_node,
+ &tink_lan5_led_node,
+ &tink_lan6_led_node,
+ &tink_lan7_led_node,
+ &tink_lan8_led_node,
+ &tink_lan9_led_node,
+ &tink_lan10_led_node,
+ &tink_lan11_led_node,
+ &tink_ha_green_led_node,
+ &tink_ha_orange_led_node,
+ &tink_usb_green_led_node,
+ &tink_usb_orange_led_node,
+ /* Keys nodes */
+ &tink_gpio_keys_node,
+ &tink_reset_key_node,
+ NULL
};
/* Board setup */
@@ -161,22 +282,17 @@ MODULE_DEVICE_TABLE(dmi, tink_systems);
static struct platform_device *tink_leds_pdev;
static struct platform_device *tink_keys_pdev;
-static struct platform_device * __init tink_create_dev(
- const char *name, const void *pdata, size_t sz)
-{
- struct platform_device *pdev;
-
- pdev = platform_device_register_data(NULL,
- name, PLATFORM_DEVID_NONE, pdata, sz);
- if (IS_ERR(pdev))
- pr_err("failed registering %s: %ld\n", name, PTR_ERR(pdev));
-
- return pdev;
-}
-
static int __init tink_board_init(void)
{
- int ret;
+ struct platform_device_info keys_info = {
+ .name = "gpio-keys-polled",
+ .id = PLATFORM_DEVID_NONE,
+ };
+ struct platform_device_info leds_info = {
+ .name = "leds-gpio",
+ .id = PLATFORM_DEVID_NONE,
+ };
+ int err;
if (!dmi_first_match(tink_systems))
return -ENODEV;
@@ -188,30 +304,35 @@ static int __init tink_board_init(void)
*/
outl(inl(0x530) | BIT(28), 0x530);
- gpiod_add_lookup_table(&tink_leds_table);
- gpiod_add_lookup_table(&tink_keys_table);
+ err = software_node_register_node_group(tink_swnodes);
+ if (err) {
+ pr_err("failed to register software nodes: %d\n", err);
+ return err;
+ }
- tink_leds_pdev = tink_create_dev("leds-gpio",
- &tink_leds_pdata, sizeof(tink_leds_pdata));
+ leds_info.fwnode = software_node_fwnode(&tink_gpio_leds_node);
+ tink_leds_pdev = platform_device_register_full(&leds_info);
if (IS_ERR(tink_leds_pdev)) {
- ret = PTR_ERR(tink_leds_pdev);
- goto err;
+ err = PTR_ERR(tink_leds_pdev);
+ pr_err("failed to create LED device: %d\n", err);
+ goto err_unregister_swnodes;
}
- tink_keys_pdev = tink_create_dev("gpio-keys-polled",
- &tink_buttons_pdata, sizeof(tink_buttons_pdata));
+ keys_info.fwnode = software_node_fwnode(&tink_gpio_keys_node);
+ tink_keys_pdev = platform_device_register_full(&keys_info);
if (IS_ERR(tink_keys_pdev)) {
- ret = PTR_ERR(tink_keys_pdev);
- platform_device_unregister(tink_leds_pdev);
- goto err;
+ err = PTR_ERR(tink_keys_pdev);
+ pr_err("failed to create key device: %d\n", err);
+ goto err_unregister_leds;
}
return 0;
-err:
- gpiod_remove_lookup_table(&tink_keys_table);
- gpiod_remove_lookup_table(&tink_leds_table);
- return ret;
+err_unregister_leds:
+ platform_device_unregister(tink_leds_pdev);
+err_unregister_swnodes:
+ software_node_unregister_node_group(tink_swnodes);
+ return err;
}
module_init(tink_board_init);
@@ -219,8 +340,7 @@ static void __exit tink_board_exit(void)
{
platform_device_unregister(tink_keys_pdev);
platform_device_unregister(tink_leds_pdev);
- gpiod_remove_lookup_table(&tink_keys_table);
- gpiod_remove_lookup_table(&tink_leds_table);
+ software_node_unregister_node_group(tink_swnodes);
}
module_exit(tink_board_exit);
diff --git a/drivers/platform/x86/oxpec.c b/drivers/platform/x86/oxpec.c
index eb076bb4099b..54377b282ff8 100644
--- a/drivers/platform/x86/oxpec.c
+++ b/drivers/platform/x86/oxpec.c
@@ -126,6 +126,13 @@ static const struct dmi_system_id dmi_table[] = {
},
{
.matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AOKZOE"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "AOKZOE A1X"),
+ },
+ .driver_data = (void *)oxp_fly,
+ },
+ {
+ .matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
DMI_MATCH(DMI_BOARD_NAME, "AYANEO 2"),
},
@@ -306,6 +313,13 @@ static const struct dmi_system_id dmi_table[] = {
},
.driver_data = (void *)oxp_x1,
},
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ONE-NETBOOK"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "ONEXPLAYER X1Pro EVA-02"),
+ },
+ .driver_data = (void *)oxp_x1,
+ },
{},
};
diff --git a/drivers/platform/x86/pcengines-apuv2.c b/drivers/platform/x86/pcengines-apuv2.c
index 3aa63b18a2e1..3b086863c6ac 100644
--- a/drivers/platform/x86/pcengines-apuv2.c
+++ b/drivers/platform/x86/pcengines-apuv2.c
@@ -12,13 +12,13 @@
#include <linux/dmi.h>
#include <linux/err.h>
+#include <linux/gpio/machine.h>
+#include <linux/gpio/property.h>
+#include <linux/input-event-codes.h>
#include <linux/kernel.h>
-#include <linux/leds.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/gpio_keys.h>
-#include <linux/gpio/machine.h>
-#include <linux/input.h>
+#include <linux/property.h>
#include <linux/platform_data/gpio/gpio-amd-fch.h>
/*
@@ -72,60 +72,91 @@ static const struct amd_fch_gpio_pdata board_apu2 = {
.gpio_names = apu2_gpio_names,
};
+static const struct software_node apu2_gpiochip_node = {
+ .name = AMD_FCH_GPIO_DRIVER_NAME,
+};
+
/* GPIO LEDs device */
+static const struct software_node apu2_leds_node = {
+ .name = "apu2-leds",
+};
-static const struct gpio_led apu2_leds[] = {
- { .name = "apu:green:1" },
- { .name = "apu:green:2" },
- { .name = "apu:green:3" },
+static const struct property_entry apu2_led1_props[] = {
+ PROPERTY_ENTRY_STRING("label", "apu:green:1"),
+ PROPERTY_ENTRY_GPIO("gpios", &apu2_gpiochip_node,
+ APU2_GPIO_LINE_LED1, GPIO_ACTIVE_LOW),
+ { }
};
-static const struct gpio_led_platform_data apu2_leds_pdata = {
- .num_leds = ARRAY_SIZE(apu2_leds),
- .leds = apu2_leds,
+static const struct software_node apu2_led1_swnode = {
+ .name = "led-1",
+ .parent = &apu2_leds_node,
+ .properties = apu2_led1_props,
};
-static struct gpiod_lookup_table gpios_led_table = {
- .dev_id = "leds-gpio",
- .table = {
- GPIO_LOOKUP_IDX(AMD_FCH_GPIO_DRIVER_NAME, APU2_GPIO_LINE_LED1,
- NULL, 0, GPIO_ACTIVE_LOW),
- GPIO_LOOKUP_IDX(AMD_FCH_GPIO_DRIVER_NAME, APU2_GPIO_LINE_LED2,
- NULL, 1, GPIO_ACTIVE_LOW),
- GPIO_LOOKUP_IDX(AMD_FCH_GPIO_DRIVER_NAME, APU2_GPIO_LINE_LED3,
- NULL, 2, GPIO_ACTIVE_LOW),
- {} /* Terminating entry */
- }
+static const struct property_entry apu2_led2_props[] = {
+ PROPERTY_ENTRY_STRING("label", "apu:green:2"),
+ PROPERTY_ENTRY_GPIO("gpios", &apu2_gpiochip_node,
+ APU2_GPIO_LINE_LED2, GPIO_ACTIVE_LOW),
+ { }
+};
+
+static const struct software_node apu2_led2_swnode = {
+ .name = "led-2",
+ .parent = &apu2_leds_node,
+ .properties = apu2_led2_props,
+};
+
+static const struct property_entry apu2_led3_props[] = {
+ PROPERTY_ENTRY_STRING("label", "apu:green:3"),
+ PROPERTY_ENTRY_GPIO("gpios", &apu2_gpiochip_node,
+ APU2_GPIO_LINE_LED3, GPIO_ACTIVE_LOW),
+ { }
+};
+
+static const struct software_node apu2_led3_swnode = {
+ .name = "led-3",
+ .parent = &apu2_leds_node,
+ .properties = apu2_led3_props,
};
/* GPIO keyboard device */
+static const struct property_entry apu2_keys_props[] = {
+ PROPERTY_ENTRY_U32("poll-interval", 100),
+ { }
+};
-static struct gpio_keys_button apu2_keys_buttons[] = {
- {
- .code = KEY_RESTART,
- .active_low = 1,
- .desc = "front button",
- .type = EV_KEY,
- .debounce_interval = 10,
- .value = 1,
- },
+static const struct software_node apu2_keys_node = {
+ .name = "apu2-keys",
+ .properties = apu2_keys_props,
};
-static const struct gpio_keys_platform_data apu2_keys_pdata = {
- .buttons = apu2_keys_buttons,
- .nbuttons = ARRAY_SIZE(apu2_keys_buttons),
- .poll_interval = 100,
- .rep = 0,
- .name = "apu2-keys",
+static const struct property_entry apu2_front_button_props[] = {
+ PROPERTY_ENTRY_STRING("label", "front button"),
+ PROPERTY_ENTRY_U32("linux,code", KEY_RESTART),
+ PROPERTY_ENTRY_GPIO("gpios", &apu2_gpiochip_node,
+ APU2_GPIO_LINE_MODESW, GPIO_ACTIVE_LOW),
+ PROPERTY_ENTRY_U32("debounce-interval", 10),
+ { }
};
-static struct gpiod_lookup_table gpios_key_table = {
- .dev_id = "gpio-keys-polled",
- .table = {
- GPIO_LOOKUP_IDX(AMD_FCH_GPIO_DRIVER_NAME, APU2_GPIO_LINE_MODESW,
- NULL, 0, GPIO_ACTIVE_LOW),
- {} /* Terminating entry */
- }
+static const struct software_node apu2_front_button_swnode = {
+ .name = "front-button",
+ .parent = &apu2_keys_node,
+ .properties = apu2_front_button_props,
+};
+
+static const struct software_node *apu2_swnodes[] = {
+ &apu2_gpiochip_node,
+ /* LEDs nodes */
+ &apu2_leds_node,
+ &apu2_led1_swnode,
+ &apu2_led2_swnode,
+ &apu2_led3_swnode,
+ /* Keys nodes */
+ &apu2_keys_node,
+ &apu2_front_button_swnode,
+ NULL
};
/* Board setup */
@@ -222,23 +253,25 @@ static struct platform_device *apu_gpio_pdev;
static struct platform_device *apu_leds_pdev;
static struct platform_device *apu_keys_pdev;
-static struct platform_device * __init apu_create_pdev(
- const char *name,
- const void *pdata,
- size_t sz)
+static struct platform_device * __init apu_create_pdev(const char *name,
+ const void *data, size_t size,
+ const struct software_node *swnode)
{
+ struct platform_device_info pdev_info = {
+ .name = name,
+ .id = PLATFORM_DEVID_NONE,
+ .data = data,
+ .size_data = size,
+ .fwnode = software_node_fwnode(swnode),
+ };
struct platform_device *pdev;
+ int err;
- pdev = platform_device_register_resndata(NULL,
- name,
- PLATFORM_DEVID_NONE,
- NULL,
- 0,
- pdata,
- sz);
+ pdev = platform_device_register_full(&pdev_info);
- if (IS_ERR(pdev))
- pr_err("failed registering %s: %ld\n", name, PTR_ERR(pdev));
+ err = PTR_ERR_OR_ZERO(pdev);
+ if (err)
+ pr_err("failed registering %s: %d\n", name, err);
return pdev;
}
@@ -246,6 +279,7 @@ static struct platform_device * __init apu_create_pdev(
static int __init apu_board_init(void)
{
const struct dmi_system_id *id;
+ int err;
id = dmi_first_match(apu_gpio_dmi_table);
if (!id) {
@@ -253,35 +287,45 @@ static int __init apu_board_init(void)
return -ENODEV;
}
- gpiod_add_lookup_table(&gpios_led_table);
- gpiod_add_lookup_table(&gpios_key_table);
+ err = software_node_register_node_group(apu2_swnodes);
+ if (err) {
+ pr_err("failed to register software nodes: %d\n", err);
+ return err;
+ }
- apu_gpio_pdev = apu_create_pdev(
- AMD_FCH_GPIO_DRIVER_NAME,
- id->driver_data,
- sizeof(struct amd_fch_gpio_pdata));
+ apu_gpio_pdev = apu_create_pdev(AMD_FCH_GPIO_DRIVER_NAME,
+ id->driver_data, sizeof(struct amd_fch_gpio_pdata), NULL);
+ err = PTR_ERR_OR_ZERO(apu_gpio_pdev);
+ if (err)
+ goto err_unregister_swnodes;
- apu_leds_pdev = apu_create_pdev(
- "leds-gpio",
- &apu2_leds_pdata,
- sizeof(apu2_leds_pdata));
+ apu_leds_pdev = apu_create_pdev("leds-gpio", NULL, 0, &apu2_leds_node);
+ err = PTR_ERR_OR_ZERO(apu_leds_pdev);
+ if (err)
+ goto err_unregister_gpio;
- apu_keys_pdev = apu_create_pdev(
- "gpio-keys-polled",
- &apu2_keys_pdata,
- sizeof(apu2_keys_pdata));
+ apu_keys_pdev = apu_create_pdev("gpio-keys-polled", NULL, 0, &apu2_keys_node);
+ err = PTR_ERR_OR_ZERO(apu_keys_pdev);
+ if (err)
+ goto err_unregister_leds;
return 0;
+
+err_unregister_leds:
+ platform_device_unregister(apu_leds_pdev);
+err_unregister_gpio:
+ platform_device_unregister(apu_gpio_pdev);
+err_unregister_swnodes:
+ software_node_unregister_node_group(apu2_swnodes);
+ return err;
}
static void __exit apu_board_exit(void)
{
- gpiod_remove_lookup_table(&gpios_led_table);
- gpiod_remove_lookup_table(&gpios_key_table);
-
platform_device_unregister(apu_keys_pdev);
platform_device_unregister(apu_leds_pdev);
platform_device_unregister(apu_gpio_pdev);
+ software_node_unregister_node_group(apu2_swnodes);
}
module_init(apu_board_init);
diff --git a/drivers/platform/x86/portwell-ec.c b/drivers/platform/x86/portwell-ec.c
index 3e019c51913e..ac506ea40eff 100644
--- a/drivers/platform/x86/portwell-ec.c
+++ b/drivers/platform/x86/portwell-ec.c
@@ -5,15 +5,13 @@
* Tested on:
* - Portwell NANO-6064
*
- * This driver provides support for GPIO and Watchdog Timer
- * functionalities of the Portwell boards with ITE embedded controller (EC).
+ * This driver supports Portwell boards with an ITE embedded controller (EC).
* The EC is accessed through I/O ports and provides:
+ * - Temperature and voltage readings (hwmon)
* - 8 GPIO pins for control and monitoring
* - Hardware watchdog with 1-15300 second timeout range
*
- * It integrates with the Linux GPIO and Watchdog subsystems, allowing
- * userspace interaction with EC GPIO pins and watchdog control,
- * ensuring system stability and configurability.
+ * It integrates with the Linux hwmon, GPIO and Watchdog subsystems.
*
* (C) Copyright 2025 Portwell, Inc.
* Author: Yen-Chi Huang (jesse.huang@portwell.com.tw)
@@ -22,16 +20,20 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/acpi.h>
+#include <linux/bits.h>
#include <linux/bitfield.h>
#include <linux/dmi.h>
#include <linux/gpio/driver.h>
+#include <linux/hwmon.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm.h>
#include <linux/sizes.h>
#include <linux/string.h>
+#include <linux/units.h>
#include <linux/watchdog.h>
#define PORTWELL_EC_IOSPACE 0xe300
@@ -41,6 +43,9 @@
#define PORTWELL_GPIO_DIR_REG 0x2b
#define PORTWELL_GPIO_VAL_REG 0x2c
+#define PORTWELL_HWMON_TEMP_NUM 3
+#define PORTWELL_HWMON_VOLT_NUM 5
+
#define PORTWELL_WDT_EC_CONFIG_ADDR 0x06
#define PORTWELL_WDT_CONFIG_ENABLE 0x1
#define PORTWELL_WDT_CONFIG_DISABLE 0x0
@@ -52,16 +57,60 @@
#define PORTWELL_EC_FW_VENDOR_LENGTH 3
#define PORTWELL_EC_FW_VENDOR_NAME "PWG"
+#define PORTWELL_EC_ADC_MAX 1023
+
static bool force;
module_param(force, bool, 0444);
MODULE_PARM_DESC(force, "Force loading EC driver without checking DMI boardname");
+/* A sensor's metadata (label, scale, and register) */
+struct pwec_sensor_prop {
+ const char *label;
+ u8 reg;
+ u32 scale;
+};
+
+/* Master configuration with properties for all possible sensors */
+static const struct {
+ const struct pwec_sensor_prop temp_props[PORTWELL_HWMON_TEMP_NUM];
+ const struct pwec_sensor_prop in_props[PORTWELL_HWMON_VOLT_NUM];
+} pwec_master_data = {
+ .temp_props = {
+ { "CPU Temperature", 0x00, 0 },
+ { "System Temperature", 0x02, 0 },
+ { "Aux Temperature", 0x04, 0 },
+ },
+ .in_props = {
+ { "Vcore", 0x20, 3000 },
+ { "3.3V", 0x22, 6000 },
+ { "5V", 0x24, 9600 },
+ { "12V", 0x30, 19800 },
+ { "VDIMM", 0x32, 3000 },
+ },
+};
+
+struct pwec_board_info {
+ u32 temp_mask; /* bit N = temperature channel N */
+ u32 in_mask; /* bit N = voltage channel N */
+};
+
+static const struct pwec_board_info pwec_board_info_default = {
+ .temp_mask = GENMASK(PORTWELL_HWMON_TEMP_NUM - 1, 0),
+ .in_mask = GENMASK(PORTWELL_HWMON_VOLT_NUM - 1, 0),
+};
+
+static const struct pwec_board_info pwec_board_info_nano = {
+ .temp_mask = BIT(0) | BIT(1),
+ .in_mask = GENMASK(4, 0),
+};
+
static const struct dmi_system_id pwec_dmi_table[] = {
{
.ident = "NANO-6064 series",
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "NANO-6064"),
},
+ .driver_data = (void *)&pwec_board_info_nano,
},
{ }
};
@@ -79,6 +128,20 @@ static u8 pwec_read(u8 address)
return inb(PORTWELL_EC_IOSPACE + address);
}
+/* Ensure consistent 16-bit read across potential MSB rollover. */
+static u16 pwec_read16_stable(u8 lsb_reg)
+{
+ u8 lsb, msb, old_msb;
+
+ do {
+ old_msb = pwec_read(lsb_reg + 1);
+ lsb = pwec_read(lsb_reg);
+ msb = pwec_read(lsb_reg + 1);
+ } while (msb != old_msb);
+
+ return (msb << 8) | lsb;
+}
+
/* GPIO functions */
static int pwec_gpio_get(struct gpio_chip *chip, unsigned int offset)
@@ -86,7 +149,7 @@ static int pwec_gpio_get(struct gpio_chip *chip, unsigned int offset)
return pwec_read(PORTWELL_GPIO_VAL_REG) & BIT(offset) ? 1 : 0;
}
-static int pwec_gpio_set_rv(struct gpio_chip *chip, unsigned int offset, int val)
+static int pwec_gpio_set(struct gpio_chip *chip, unsigned int offset, int val)
{
u8 tmp = pwec_read(PORTWELL_GPIO_VAL_REG);
@@ -130,7 +193,7 @@ static struct gpio_chip pwec_gpio_chip = {
.direction_input = pwec_gpio_direction_input,
.direction_output = pwec_gpio_direction_output,
.get = pwec_gpio_get,
- .set_rv = pwec_gpio_set_rv,
+ .set = pwec_gpio_set,
.base = -1,
.ngpio = PORTWELL_GPIO_PINS,
};
@@ -204,6 +267,81 @@ static struct watchdog_device ec_wdt_dev = {
.max_timeout = PORTWELL_WDT_EC_MAX_COUNT_SECOND,
};
+/* HWMON functions */
+
+static umode_t pwec_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ const struct pwec_board_info *info = drvdata;
+
+ switch (type) {
+ case hwmon_temp:
+ return (info->temp_mask & BIT(channel)) ? 0444 : 0;
+ case hwmon_in:
+ return (info->in_mask & BIT(channel)) ? 0444 : 0;
+ default:
+ return 0;
+ }
+}
+
+static int pwec_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ u16 tmp16;
+
+ switch (type) {
+ case hwmon_temp:
+ *val = pwec_read(pwec_master_data.temp_props[channel].reg) * MILLIDEGREE_PER_DEGREE;
+ return 0;
+ case hwmon_in:
+ tmp16 = pwec_read16_stable(pwec_master_data.in_props[channel].reg);
+ *val = (tmp16 * pwec_master_data.in_props[channel].scale) / PORTWELL_EC_ADC_MAX;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int pwec_hwmon_read_string(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, const char **str)
+{
+ switch (type) {
+ case hwmon_temp:
+ *str = pwec_master_data.temp_props[channel].label;
+ return 0;
+ case hwmon_in:
+ *str = pwec_master_data.in_props[channel].label;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct hwmon_channel_info *pwec_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL),
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL),
+ NULL
+};
+
+static const struct hwmon_ops pwec_hwmon_ops = {
+ .is_visible = pwec_hwmon_is_visible,
+ .read = pwec_hwmon_read,
+ .read_string = pwec_hwmon_read_string,
+};
+
+static const struct hwmon_chip_info pwec_chip_info = {
+ .ops = &pwec_hwmon_ops,
+ .info = pwec_hwmon_info,
+};
+
static int pwec_firmware_vendor_check(void)
{
u8 buf[PORTWELL_EC_FW_VENDOR_LENGTH + 1];
@@ -218,6 +356,8 @@ static int pwec_firmware_vendor_check(void)
static int pwec_probe(struct platform_device *pdev)
{
+ struct device *hwmon_dev;
+ void *drvdata = dev_get_platdata(&pdev->dev);
int ret;
if (!devm_request_region(&pdev->dev, PORTWELL_EC_IOSPACE,
@@ -236,19 +376,40 @@ static int pwec_probe(struct platform_device *pdev)
return ret;
}
- ec_wdt_dev.parent = &pdev->dev;
- ret = devm_watchdog_register_device(&pdev->dev, &ec_wdt_dev);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to register Portwell EC Watchdog\n");
- return ret;
+ if (IS_REACHABLE(CONFIG_HWMON)) {
+ hwmon_dev = devm_hwmon_device_register_with_info(&pdev->dev,
+ "portwell_ec", drvdata, &pwec_chip_info, NULL);
+ ret = PTR_ERR_OR_ZERO(hwmon_dev);
+ if (ret)
+ return ret;
}
+ ec_wdt_dev.parent = &pdev->dev;
+ return devm_watchdog_register_device(&pdev->dev, &ec_wdt_dev);
+}
+
+static int pwec_suspend(struct device *dev)
+{
+ if (watchdog_active(&ec_wdt_dev))
+ return pwec_wdt_stop(&ec_wdt_dev);
+
return 0;
}
+static int pwec_resume(struct device *dev)
+{
+ if (watchdog_active(&ec_wdt_dev))
+ return pwec_wdt_start(&ec_wdt_dev);
+
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(pwec_dev_pm_ops, pwec_suspend, pwec_resume);
+
static struct platform_driver pwec_driver = {
.driver = {
.name = "portwell-ec",
+ .pm = pm_sleep_ptr(&pwec_dev_pm_ops),
},
.probe = pwec_probe,
};
@@ -257,19 +418,26 @@ static struct platform_device *pwec_dev;
static int __init pwec_init(void)
{
+ const struct dmi_system_id *match;
+ const struct pwec_board_info *hwmon_data;
int ret;
- if (!dmi_check_system(pwec_dmi_table)) {
+ match = dmi_first_match(pwec_dmi_table);
+ if (!match) {
if (!force)
return -ENODEV;
- pr_warn("force load portwell-ec without DMI check\n");
+ hwmon_data = &pwec_board_info_default;
+ pr_warn("force load portwell-ec without DMI check, using full display config\n");
+ } else {
+ hwmon_data = match->driver_data;
}
ret = platform_driver_register(&pwec_driver);
if (ret)
return ret;
- pwec_dev = platform_device_register_simple("portwell-ec", -1, NULL, 0);
+ pwec_dev = platform_device_register_data(NULL, "portwell-ec", PLATFORM_DEVID_NONE,
+ hwmon_data, sizeof(*hwmon_data));
if (IS_ERR(pwec_dev)) {
platform_driver_unregister(&pwec_driver);
return PTR_ERR(pwec_dev);
diff --git a/drivers/platform/x86/quickstart.c b/drivers/platform/x86/quickstart.c
index c332c7cdaff5..acb58518be37 100644
--- a/drivers/platform/x86/quickstart.c
+++ b/drivers/platform/x86/quickstart.c
@@ -154,13 +154,6 @@ static void quickstart_notify_remove(void *context)
acpi_remove_notify_handler(handle, ACPI_DEVICE_NOTIFY, quickstart_notify);
}
-static void quickstart_mutex_destroy(void *data)
-{
- struct mutex *lock = data;
-
- mutex_destroy(lock);
-}
-
static int quickstart_probe(struct platform_device *pdev)
{
struct quickstart_data *data;
@@ -179,8 +172,7 @@ static int quickstart_probe(struct platform_device *pdev)
data->dev = &pdev->dev;
dev_set_drvdata(&pdev->dev, data);
- mutex_init(&data->input_lock);
- ret = devm_add_action_or_reset(&pdev->dev, quickstart_mutex_destroy, &data->input_lock);
+ ret = devm_mutex_init(&pdev->dev, &data->input_lock);
if (ret < 0)
return ret;
diff --git a/drivers/platform/x86/redmi-wmi.c b/drivers/platform/x86/redmi-wmi.c
new file mode 100644
index 000000000000..949236b93a32
--- /dev/null
+++ b/drivers/platform/x86/redmi-wmi.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0
+/* WMI driver for Xiaomi Redmibooks */
+
+#include <linux/acpi.h>
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/input.h>
+#include <linux/input/sparse-keymap.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/unaligned.h>
+#include <linux/wmi.h>
+
+#include <uapi/linux/input-event-codes.h>
+
+#define WMI_REDMIBOOK_KEYBOARD_EVENT_GUID "46C93E13-EE9B-4262-8488-563BCA757FEF"
+
+#define AI_KEY_VALUE_MASK BIT(8)
+
+static const struct key_entry redmi_wmi_keymap[] = {
+ {KE_KEY, 0x00000201, {KEY_SELECTIVE_SCREENSHOT}},
+ {KE_KEY, 0x00000301, {KEY_ALL_APPLICATIONS}},
+ {KE_KEY, 0x00001b01, {KEY_SETUP}},
+
+ /* AI button has code for each position */
+ {KE_KEY, 0x00011801, {KEY_ASSISTANT}},
+ {KE_KEY, 0x00011901, {KEY_ASSISTANT}},
+
+ /* Keyboard backlight */
+ {KE_IGNORE, 0x00000501, {}},
+ {KE_IGNORE, 0x00800501, {}},
+ {KE_IGNORE, 0x00050501, {}},
+ {KE_IGNORE, 0x000a0501, {}},
+
+ {KE_END}
+};
+
+struct redmi_wmi {
+ struct input_dev *input_dev;
+ /* Protects the key event sequence */
+ struct mutex key_lock;
+};
+
+static int redmi_wmi_probe(struct wmi_device *wdev, const void *context)
+{
+ struct redmi_wmi *data;
+ int err;
+
+ /* Init dev */
+ data = devm_kzalloc(&wdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ dev_set_drvdata(&wdev->dev, data);
+
+ err = devm_mutex_init(&wdev->dev, &data->key_lock);
+ if (err)
+ return err;
+
+ data->input_dev = devm_input_allocate_device(&wdev->dev);
+ if (!data->input_dev)
+ return -ENOMEM;
+
+ data->input_dev->name = "Redmibook WMI keys";
+ data->input_dev->phys = "wmi/input0";
+
+ err = sparse_keymap_setup(data->input_dev, redmi_wmi_keymap, NULL);
+ if (err)
+ return err;
+
+ return input_register_device(data->input_dev);
+}
+
+static void redmi_wmi_notify(struct wmi_device *wdev, union acpi_object *obj)
+{
+ struct key_entry *entry;
+ struct redmi_wmi *data = dev_get_drvdata(&wdev->dev);
+ bool autorelease = true;
+ u32 payload;
+ int value = 1;
+
+ if (obj->type != ACPI_TYPE_BUFFER) {
+ dev_err(&wdev->dev, "Bad response type %u\n", obj->type);
+ return;
+ }
+
+ if (obj->buffer.length < 32) {
+ dev_err(&wdev->dev, "Invalid buffer length %u\n", obj->buffer.length);
+ return;
+ }
+
+ payload = get_unaligned_le32(obj->buffer.pointer);
+ entry = sparse_keymap_entry_from_scancode(data->input_dev, payload);
+
+ if (!entry) {
+ dev_dbg(&wdev->dev, "Unknown WMI event with payload %u", payload);
+ return;
+ }
+
+ /* AI key quirk */
+ if (entry->keycode == KEY_ASSISTANT) {
+ value = !(payload & AI_KEY_VALUE_MASK);
+ autorelease = false;
+ }
+
+ guard(mutex)(&data->key_lock);
+ sparse_keymap_report_entry(data->input_dev, entry, value, autorelease);
+}
+
+static const struct wmi_device_id redmi_wmi_id_table[] = {
+ { WMI_REDMIBOOK_KEYBOARD_EVENT_GUID, NULL },
+ { }
+};
+
+static struct wmi_driver redmi_wmi_driver = {
+ .driver = {
+ .name = "redmi-wmi",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+ .id_table = redmi_wmi_id_table,
+ .probe = redmi_wmi_probe,
+ .notify = redmi_wmi_notify,
+ .no_singleton = true,
+};
+module_wmi_driver(redmi_wmi_driver);
+
+MODULE_DEVICE_TABLE(wmi, redmi_wmi_id_table);
+MODULE_AUTHOR("Gladyshev Ilya <foxido@foxido.dev>");
+MODULE_DESCRIPTION("Redmibook WMI driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/silicom-platform.c b/drivers/platform/x86/silicom-platform.c
index 63b5da410ed5..266f7bc5e416 100644
--- a/drivers/platform/x86/silicom-platform.c
+++ b/drivers/platform/x86/silicom-platform.c
@@ -466,7 +466,7 @@ static struct gpio_chip silicom_gpio_chip = {
.direction_input = silicom_gpio_direction_input,
.direction_output = silicom_gpio_direction_output,
.get = silicom_gpio_get,
- .set_rv = silicom_gpio_set,
+ .set = silicom_gpio_set,
.base = -1,
.ngpio = ARRAY_SIZE(plat_0222_gpio_channels),
.names = plat_0222_gpio_names,
diff --git a/drivers/platform/x86/x86-android-tablets/Makefile b/drivers/platform/x86/x86-android-tablets/Makefile
index 313be30548bc..a2cf8cbdb351 100644
--- a/drivers/platform/x86/x86-android-tablets/Makefile
+++ b/drivers/platform/x86/x86-android-tablets/Makefile
@@ -6,4 +6,4 @@
obj-$(CONFIG_X86_ANDROID_TABLETS) += vexia_atla10_ec.o
obj-$(CONFIG_X86_ANDROID_TABLETS) += x86-android-tablets.o
x86-android-tablets-y := core.o dmi.o shared-psy-info.o \
- asus.o lenovo.o other.o
+ acer.o asus.o lenovo.o other.o
diff --git a/drivers/platform/x86/x86-android-tablets/acer.c b/drivers/platform/x86/x86-android-tablets/acer.c
new file mode 100644
index 000000000000..d48c70ffd992
--- /dev/null
+++ b/drivers/platform/x86/x86-android-tablets/acer.c
@@ -0,0 +1,247 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Board info for Acer X86 tablets which ship with Android as the factory image
+ * and which have broken DSDT tables. The factory kernels shipped on these
+ * devices typically have a bunch of things hardcoded, rather than specified
+ * in their DSDT.
+ *
+ * Copyright (C) 2021-2025 Hans de Goede <hansg@kernel.org>
+ */
+
+#include <linux/gpio/machine.h>
+#include <linux/gpio/property.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+
+#include "shared-psy-info.h"
+#include "x86-android-tablets.h"
+
+/* Acer Iconia One 8 A1-840 (non FHD version) */
+static const struct property_entry acer_a1_840_bq24190_props[] = {
+ PROPERTY_ENTRY_REF("monitored-battery", &generic_lipo_4v2_battery_node),
+ PROPERTY_ENTRY_BOOL("omit-battery-class"),
+ PROPERTY_ENTRY_BOOL("disable-reset"),
+ { }
+};
+
+static const struct software_node acer_a1_840_bq24190_node = {
+ .properties = acer_a1_840_bq24190_props,
+};
+
+static const struct property_entry acer_a1_840_touchscreen_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 800),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1280),
+ PROPERTY_ENTRY_GPIO("reset-gpios", &baytrail_gpiochip_nodes[1], 26, GPIO_ACTIVE_LOW),
+ { }
+};
+
+static const struct software_node acer_a1_840_touchscreen_node = {
+ .properties = acer_a1_840_touchscreen_props,
+};
+
+static const struct x86_i2c_client_info acer_a1_840_i2c_clients[] __initconst = {
+ {
+ /* BQ24297 charger IC */
+ .board_info = {
+ .type = "bq24297",
+ .addr = 0x6b,
+ .dev_name = "bq24297",
+ .swnode = &acer_a1_840_bq24190_node,
+ .platform_data = &bq24190_pdata,
+ },
+ .adapter_path = "\\_SB_.I2C1",
+ .irq_data = {
+ .type = X86_ACPI_IRQ_TYPE_GPIOINT,
+ .chip = "INT33FC:02",
+ .index = 2,
+ .trigger = ACPI_EDGE_SENSITIVE,
+ .polarity = ACPI_ACTIVE_LOW,
+ .con_id = "bq24297_irq",
+ },
+ }, {
+ /* MPU6515 sensors */
+ .board_info = {
+ .type = "mpu6515",
+ .addr = 0x69,
+ .dev_name = "mpu6515",
+ },
+ .adapter_path = "\\_SB_.I2C3",
+ .irq_data = {
+ .type = X86_ACPI_IRQ_TYPE_APIC,
+ .index = 0x47,
+ .trigger = ACPI_EDGE_SENSITIVE,
+ .polarity = ACPI_ACTIVE_HIGH,
+ },
+ }, {
+ /* FT5416 touchscreen controller */
+ .board_info = {
+ .type = "edt-ft5x06",
+ .addr = 0x38,
+ .dev_name = "ft5416",
+ .swnode = &acer_a1_840_touchscreen_node,
+ },
+ .adapter_path = "\\_SB_.I2C4",
+ .irq_data = {
+ .type = X86_ACPI_IRQ_TYPE_APIC,
+ .index = 0x45,
+ .trigger = ACPI_EDGE_SENSITIVE,
+ .polarity = ACPI_ACTIVE_HIGH,
+ },
+ }
+};
+
+static const struct property_entry acer_a1_840_int3496_props[] __initconst = {
+ PROPERTY_ENTRY_GPIO("mux-gpios", &baytrail_gpiochip_nodes[2], 1, GPIO_ACTIVE_HIGH),
+ PROPERTY_ENTRY_GPIO("id-gpios", &baytrail_gpiochip_nodes[2], 18, GPIO_ACTIVE_HIGH),
+ { }
+};
+
+static const struct platform_device_info acer_a1_840_pdevs[] __initconst = {
+ {
+ /* For micro USB ID pin handling */
+ .name = "intel-int3496",
+ .id = PLATFORM_DEVID_NONE,
+ .properties = acer_a1_840_int3496_props,
+ },
+};
+
+/* Properties for the Dollar Cove TI PMIC battery MFD child used as fuel-gauge */
+static const struct property_entry acer_a1_840_fg_props[] = {
+ PROPERTY_ENTRY_REF("monitored-battery", &generic_lipo_4v2_battery_node),
+ PROPERTY_ENTRY_STRING_ARRAY_LEN("supplied-from", bq24190_psy, 1),
+ PROPERTY_ENTRY_GPIO("charged-gpios", &baytrail_gpiochip_nodes[2], 10, GPIO_ACTIVE_HIGH),
+ { }
+};
+
+static struct device *acer_a1_840_fg_dev;
+static struct fwnode_handle *acer_a1_840_fg_node;
+
+static int __init acer_a1_840_init(struct device *dev)
+{
+ int ret;
+
+ acer_a1_840_fg_dev = bus_find_device_by_name(&platform_bus_type, NULL, "chtdc_ti_battery");
+ if (!acer_a1_840_fg_dev)
+ return dev_err_probe(dev, -EPROBE_DEFER, "getting chtdc_ti_battery dev\n");
+
+ acer_a1_840_fg_node = fwnode_create_software_node(acer_a1_840_fg_props, NULL);
+ if (IS_ERR(acer_a1_840_fg_node)) {
+ ret = PTR_ERR(acer_a1_840_fg_node);
+ goto err_put;
+ }
+
+ ret = device_add_software_node(acer_a1_840_fg_dev,
+ to_software_node(acer_a1_840_fg_node));
+ if (ret)
+ goto err_put;
+
+ return 0;
+
+err_put:
+ fwnode_handle_put(acer_a1_840_fg_node);
+ acer_a1_840_fg_node = NULL;
+ put_device(acer_a1_840_fg_dev);
+ acer_a1_840_fg_dev = NULL;
+ return ret;
+}
+
+static void acer_a1_840_exit(void)
+{
+ device_remove_software_node(acer_a1_840_fg_dev);
+ /*
+ * Skip fwnode_handle_put(acer_a1_840_fg_node), instead leak the node.
+ * The intel_dc_ti_battery driver may still reference the strdup-ed
+ * "supplied-from" string. This string will be free-ed if the node
+ * is released.
+ */
+ acer_a1_840_fg_node = NULL;
+ put_device(acer_a1_840_fg_dev);
+ acer_a1_840_fg_dev = NULL;
+}
+
+static const char * const acer_a1_840_modules[] __initconst = {
+ "bq24190_charger", /* For the Vbus regulator for intel-int3496 */
+ NULL
+};
+
+const struct x86_dev_info acer_a1_840_info __initconst = {
+ .i2c_client_info = acer_a1_840_i2c_clients,
+ .i2c_client_count = ARRAY_SIZE(acer_a1_840_i2c_clients),
+ .pdev_info = acer_a1_840_pdevs,
+ .pdev_count = ARRAY_SIZE(acer_a1_840_pdevs),
+ .gpiochip_type = X86_GPIOCHIP_BAYTRAIL,
+ .swnode_group = generic_lipo_4v2_battery_swnodes,
+ .modules = acer_a1_840_modules,
+ .init = acer_a1_840_init,
+ .exit = acer_a1_840_exit,
+};
+
+/* Acer Iconia One 7 B1-750 has an Android factory image with everything hardcoded */
+static const char * const acer_b1_750_mount_matrix[] = {
+ "-1", "0", "0",
+ "0", "1", "0",
+ "0", "0", "1"
+};
+
+static const struct property_entry acer_b1_750_bma250e_props[] = {
+ PROPERTY_ENTRY_STRING_ARRAY("mount-matrix", acer_b1_750_mount_matrix),
+ { }
+};
+
+static const struct software_node acer_b1_750_bma250e_node = {
+ .properties = acer_b1_750_bma250e_props,
+};
+
+static const struct property_entry acer_b1_750_novatek_props[] = {
+ PROPERTY_ENTRY_GPIO("reset-gpios", &baytrail_gpiochip_nodes[1], 26, GPIO_ACTIVE_LOW),
+ { }
+};
+
+static const struct software_node acer_b1_750_novatek_node = {
+ .properties = acer_b1_750_novatek_props,
+};
+
+static const struct x86_i2c_client_info acer_b1_750_i2c_clients[] __initconst = {
+ {
+ /* Novatek NVT-ts touchscreen */
+ .board_info = {
+ .type = "nt11205-ts",
+ .addr = 0x34,
+ .dev_name = "NVT-ts",
+ .swnode = &acer_b1_750_novatek_node,
+ },
+ .adapter_path = "\\_SB_.I2C4",
+ .irq_data = {
+ .type = X86_ACPI_IRQ_TYPE_GPIOINT,
+ .chip = "INT33FC:02",
+ .index = 3,
+ .trigger = ACPI_EDGE_SENSITIVE,
+ .polarity = ACPI_ACTIVE_LOW,
+ .con_id = "NVT-ts_irq",
+ },
+ }, {
+ /* BMA250E accelerometer */
+ .board_info = {
+ .type = "bma250e",
+ .addr = 0x18,
+ .swnode = &acer_b1_750_bma250e_node,
+ },
+ .adapter_path = "\\_SB_.I2C3",
+ .irq_data = {
+ .type = X86_ACPI_IRQ_TYPE_GPIOINT,
+ .chip = "INT33FC:02",
+ .index = 25,
+ .trigger = ACPI_LEVEL_SENSITIVE,
+ .polarity = ACPI_ACTIVE_HIGH,
+ .con_id = "bma250e_irq",
+ },
+ },
+};
+
+const struct x86_dev_info acer_b1_750_info __initconst = {
+ .i2c_client_info = acer_b1_750_i2c_clients,
+ .i2c_client_count = ARRAY_SIZE(acer_b1_750_i2c_clients),
+ .pdev_info = int3496_pdevs,
+ .pdev_count = 1,
+ .gpiochip_type = X86_GPIOCHIP_BAYTRAIL,
+};
diff --git a/drivers/platform/x86/x86-android-tablets/asus.c b/drivers/platform/x86/x86-android-tablets/asus.c
index 97cd14c1fd23..7d29c7654d21 100644
--- a/drivers/platform/x86/x86-android-tablets/asus.c
+++ b/drivers/platform/x86/x86-android-tablets/asus.c
@@ -5,36 +5,55 @@
* devices typically have a bunch of things hardcoded, rather than specified
* in their DSDT.
*
- * Copyright (C) 2021-2023 Hans de Goede <hdegoede@redhat.com>
+ * Copyright (C) 2021-2023 Hans de Goede <hansg@kernel.org>
*/
#include <linux/gpio/machine.h>
-#include <linux/input.h>
+#include <linux/gpio/property.h>
+#include <linux/input-event-codes.h>
#include <linux/platform_device.h>
#include "shared-psy-info.h"
#include "x86-android-tablets.h"
/* Asus ME176C and TF103C tablets shared data */
-static struct gpiod_lookup_table int3496_gpo2_pin22_gpios = {
- .dev_id = "intel-int3496",
- .table = {
- GPIO_LOOKUP("INT33FC:02", 22, "id", GPIO_ACTIVE_HIGH),
- { }
- },
+static const struct property_entry asus_me176c_tf103c_int3496_props[] __initconst = {
+ PROPERTY_ENTRY_GPIO("id-gpios", &baytrail_gpiochip_nodes[2], 22, GPIO_ACTIVE_HIGH),
+ { }
};
-static const struct x86_gpio_button asus_me176c_tf103c_lid __initconst = {
- .button = {
- .code = SW_LID,
- .active_low = true,
- .desc = "lid_sw",
- .type = EV_SW,
- .wakeup = true,
- .debounce_interval = 50,
+static const struct platform_device_info asus_me176c_tf103c_pdevs[] __initconst = {
+ {
+ /* For micro USB ID pin handling */
+ .name = "intel-int3496",
+ .id = PLATFORM_DEVID_NONE,
+ .properties = asus_me176c_tf103c_int3496_props,
},
- .chip = "INT33FC:02",
- .pin = 12,
+};
+
+static const struct software_node asus_me176c_tf103c_gpio_keys_node = {
+ .name = "lid_sw",
+};
+
+static const struct property_entry asus_me176c_tf103c_lid_props[] = {
+ PROPERTY_ENTRY_U32("linux,input-type", EV_SW),
+ PROPERTY_ENTRY_U32("linux,code", SW_LID),
+ PROPERTY_ENTRY_STRING("label", "lid_sw"),
+ PROPERTY_ENTRY_GPIO("gpios", &baytrail_gpiochip_nodes[2], 12, GPIO_ACTIVE_LOW),
+ PROPERTY_ENTRY_U32("debounce-interval", 50),
+ PROPERTY_ENTRY_BOOL("wakeup-source"),
+ { }
+};
+
+static const struct software_node asus_me176c_tf103c_lid_node = {
+ .parent = &asus_me176c_tf103c_gpio_keys_node,
+ .properties = asus_me176c_tf103c_lid_props,
+};
+
+static const struct software_node *asus_me176c_tf103c_lid_swnodes[] = {
+ &asus_me176c_tf103c_gpio_keys_node,
+ &asus_me176c_tf103c_lid_node,
+ NULL
};
/* Asus ME176C tablets have an Android factory image with everything hardcoded */
@@ -77,6 +96,16 @@ static const struct software_node asus_me176c_ug3105_node = {
.properties = asus_me176c_ug3105_props,
};
+static const struct property_entry asus_me176c_touchscreen_props[] = {
+ PROPERTY_ENTRY_GPIO("reset-gpios", &baytrail_gpiochip_nodes[0], 60, GPIO_ACTIVE_HIGH),
+ PROPERTY_ENTRY_GPIO("irq-gpios", &baytrail_gpiochip_nodes[2], 28, GPIO_ACTIVE_HIGH),
+ { }
+};
+
+static const struct software_node asus_me176c_touchscreen_node = {
+ .properties = asus_me176c_touchscreen_props,
+};
+
static const struct x86_i2c_client_info asus_me176c_i2c_clients[] __initconst = {
{
/* bq24297 battery charger */
@@ -132,6 +161,7 @@ static const struct x86_i2c_client_info asus_me176c_i2c_clients[] __initconst =
.type = "GDIX1001:00",
.addr = 0x14,
.dev_name = "goodix_ts",
+ .swnode = &asus_me176c_touchscreen_node,
},
.adapter_path = "\\_SB_.I2C6",
.irq_data = {
@@ -152,33 +182,17 @@ static const struct x86_serdev_info asus_me176c_serdevs[] __initconst = {
},
};
-static struct gpiod_lookup_table asus_me176c_goodix_gpios = {
- .dev_id = "i2c-goodix_ts",
- .table = {
- GPIO_LOOKUP("INT33FC:00", 60, "reset", GPIO_ACTIVE_HIGH),
- GPIO_LOOKUP("INT33FC:02", 28, "irq", GPIO_ACTIVE_HIGH),
- { }
- },
-};
-
-static struct gpiod_lookup_table * const asus_me176c_gpios[] = {
- &int3496_gpo2_pin22_gpios,
- &asus_me176c_goodix_gpios,
- NULL
-};
-
const struct x86_dev_info asus_me176c_info __initconst = {
.i2c_client_info = asus_me176c_i2c_clients,
.i2c_client_count = ARRAY_SIZE(asus_me176c_i2c_clients),
- .pdev_info = int3496_pdevs,
- .pdev_count = 1,
+ .pdev_info = asus_me176c_tf103c_pdevs,
+ .pdev_count = ARRAY_SIZE(asus_me176c_tf103c_pdevs),
.serdev_info = asus_me176c_serdevs,
.serdev_count = ARRAY_SIZE(asus_me176c_serdevs),
- .gpio_button = &asus_me176c_tf103c_lid,
- .gpio_button_count = 1,
- .gpiod_lookup_tables = asus_me176c_gpios,
- .bat_swnode = &generic_lipo_hv_4v35_battery_node,
+ .gpio_button_swnodes = asus_me176c_tf103c_lid_swnodes,
+ .swnode_group = generic_lipo_hv_4v35_battery_swnodes,
.modules = bq24190_modules,
+ .gpiochip_type = X86_GPIOCHIP_BAYTRAIL,
};
/* Asus TF103C tablets have an Android factory image with everything hardcoded */
@@ -293,19 +307,13 @@ static const struct x86_i2c_client_info asus_tf103c_i2c_clients[] __initconst =
},
};
-static struct gpiod_lookup_table * const asus_tf103c_gpios[] = {
- &int3496_gpo2_pin22_gpios,
- NULL
-};
-
const struct x86_dev_info asus_tf103c_info __initconst = {
.i2c_client_info = asus_tf103c_i2c_clients,
.i2c_client_count = ARRAY_SIZE(asus_tf103c_i2c_clients),
- .pdev_info = int3496_pdevs,
- .pdev_count = 1,
- .gpio_button = &asus_me176c_tf103c_lid,
- .gpio_button_count = 1,
- .gpiod_lookup_tables = asus_tf103c_gpios,
- .bat_swnode = &generic_lipo_4v2_battery_node,
+ .pdev_info = asus_me176c_tf103c_pdevs,
+ .pdev_count = ARRAY_SIZE(asus_me176c_tf103c_pdevs),
+ .gpio_button_swnodes = asus_me176c_tf103c_lid_swnodes,
+ .swnode_group = generic_lipo_4v2_battery_swnodes,
.modules = bq24190_modules,
+ .gpiochip_type = X86_GPIOCHIP_BAYTRAIL,
};
diff --git a/drivers/platform/x86/x86-android-tablets/core.c b/drivers/platform/x86/x86-android-tablets/core.c
index 2a9c47178505..6588fae30356 100644
--- a/drivers/platform/x86/x86-android-tablets/core.c
+++ b/drivers/platform/x86/x86-android-tablets/core.c
@@ -5,7 +5,7 @@
* devices typically have a bunch of things hardcoded, rather than specified
* in their DSDT.
*
- * Copyright (C) 2021-2023 Hans de Goede <hdegoede@redhat.com>
+ * Copyright (C) 2021-2023 Hans de Goede <hansg@kernel.org>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -152,9 +152,9 @@ static struct i2c_client **i2c_clients;
static struct spi_device **spi_devs;
static struct platform_device **pdevs;
static struct serdev_device **serdevs;
-static struct gpio_keys_button *buttons;
-static struct gpiod_lookup_table * const *gpiod_lookup_tables;
-static const struct software_node *bat_swnode;
+static const struct software_node **gpio_button_swnodes;
+static const struct software_node **swnode_group;
+static const struct software_node **gpiochip_node_group;
static void (*exit_handler)(void);
static __init struct i2c_adapter *
@@ -265,8 +265,7 @@ static __init int x86_instantiate_spi_dev(const struct x86_dev_info *dev_info, i
spi_devs[idx] = spi_new_device(controller, &board_info);
put_device(&controller->dev);
if (!spi_devs[idx])
- return dev_err_probe(&controller->dev, -ENOMEM,
- "creating SPI-device %d\n", idx);
+ return -ENOMEM;
return 0;
}
@@ -277,8 +276,10 @@ get_serdev_controller_by_pci_parent(const struct x86_serdev_info *info)
struct pci_dev *pdev;
pdev = pci_get_domain_bus_and_slot(0, 0, info->ctrl.pci.devfn);
- if (!pdev)
- return ERR_PTR(-EPROBE_DEFER);
+ if (!pdev) {
+ pr_err("error could not get PCI serdev at devfn 0x%02x\n", info->ctrl.pci.devfn);
+ return ERR_PTR(-ENODEV);
+ }
/* This puts our reference on pdev and returns a ref on the ctrl */
return get_serdev_controller_from_parent(&pdev->dev, 0, info->ctrl_devname);
@@ -331,6 +332,34 @@ put_ctrl_dev:
return ret;
}
+const struct software_node baytrail_gpiochip_nodes[] = {
+ { .name = "INT33FC:00" },
+ { .name = "INT33FC:01" },
+ { .name = "INT33FC:02" },
+};
+
+static const struct software_node *baytrail_gpiochip_node_group[] = {
+ &baytrail_gpiochip_nodes[0],
+ &baytrail_gpiochip_nodes[1],
+ &baytrail_gpiochip_nodes[2],
+ NULL
+};
+
+const struct software_node cherryview_gpiochip_nodes[] = {
+ { .name = "INT33FF:00" },
+ { .name = "INT33FF:01" },
+ { .name = "INT33FF:02" },
+ { .name = "INT33FF:03" },
+};
+
+static const struct software_node *cherryview_gpiochip_node_group[] = {
+ &cherryview_gpiochip_nodes[0],
+ &cherryview_gpiochip_nodes[1],
+ &cherryview_gpiochip_nodes[2],
+ &cherryview_gpiochip_nodes[3],
+ NULL
+};
+
static void x86_android_tablet_remove(struct platform_device *pdev)
{
int i;
@@ -346,7 +375,6 @@ static void x86_android_tablet_remove(struct platform_device *pdev)
platform_device_unregister(pdevs[i]);
kfree(pdevs);
- kfree(buttons);
for (i = spi_dev_count - 1; i >= 0; i--)
spi_unregister_device(spi_devs[i]);
@@ -361,10 +389,9 @@ static void x86_android_tablet_remove(struct platform_device *pdev)
if (exit_handler)
exit_handler();
- for (i = 0; gpiod_lookup_tables && gpiod_lookup_tables[i]; i++)
- gpiod_remove_lookup_table(gpiod_lookup_tables[i]);
-
- software_node_unregister(bat_swnode);
+ software_node_unregister_node_group(gpio_button_swnodes);
+ software_node_unregister_node_group(swnode_group);
+ software_node_unregister_node_group(gpiochip_node_group);
}
static __init int x86_android_tablet_probe(struct platform_device *pdev)
@@ -388,16 +415,28 @@ static __init int x86_android_tablet_probe(struct platform_device *pdev)
for (i = 0; dev_info->modules && dev_info->modules[i]; i++)
request_module(dev_info->modules[i]);
- bat_swnode = dev_info->bat_swnode;
- if (bat_swnode) {
- ret = software_node_register(bat_swnode);
- if (ret)
- return ret;
+ switch (dev_info->gpiochip_type) {
+ case X86_GPIOCHIP_BAYTRAIL:
+ gpiochip_node_group = baytrail_gpiochip_node_group;
+ break;
+ case X86_GPIOCHIP_CHERRYVIEW:
+ gpiochip_node_group = cherryview_gpiochip_node_group;
+ break;
+ case X86_GPIOCHIP_UNSPECIFIED:
+ gpiochip_node_group = NULL;
+ break;
}
- gpiod_lookup_tables = dev_info->gpiod_lookup_tables;
- for (i = 0; gpiod_lookup_tables && gpiod_lookup_tables[i]; i++)
- gpiod_add_lookup_table(gpiod_lookup_tables[i]);
+ ret = software_node_register_node_group(gpiochip_node_group);
+ if (ret)
+ return ret;
+
+ ret = software_node_register_node_group(dev_info->swnode_group);
+ if (ret) {
+ x86_android_tablet_remove(pdev);
+ return ret;
+ }
+ swnode_group = dev_info->swnode_group;
if (dev_info->init) {
ret = dev_info->init(&pdev->dev);
@@ -470,38 +509,22 @@ static __init int x86_android_tablet_probe(struct platform_device *pdev)
}
}
- if (dev_info->gpio_button_count) {
- struct gpio_keys_platform_data pdata = { };
- struct gpio_desc *gpiod;
+ if (dev_info->gpio_button_swnodes) {
+ struct platform_device_info button_info = {
+ .name = "gpio-keys",
+ .id = PLATFORM_DEVID_AUTO,
+ };
- buttons = kcalloc(dev_info->gpio_button_count, sizeof(*buttons), GFP_KERNEL);
- if (!buttons) {
+ ret = software_node_register_node_group(dev_info->gpio_button_swnodes);
+ if (ret < 0) {
x86_android_tablet_remove(pdev);
- return -ENOMEM;
- }
-
- for (i = 0; i < dev_info->gpio_button_count; i++) {
- ret = x86_android_tablet_get_gpiod(dev_info->gpio_button[i].chip,
- dev_info->gpio_button[i].pin,
- dev_info->gpio_button[i].button.desc,
- false, GPIOD_IN, &gpiod);
- if (ret < 0) {
- x86_android_tablet_remove(pdev);
- return ret;
- }
-
- buttons[i] = dev_info->gpio_button[i].button;
- buttons[i].gpio = desc_to_gpio(gpiod);
- /* Release GPIO descriptor so that gpio-keys can request it */
- devm_gpiod_put(&x86_android_tablet_device->dev, gpiod);
+ return ret;
}
- pdata.buttons = buttons;
- pdata.nbuttons = dev_info->gpio_button_count;
+ gpio_button_swnodes = dev_info->gpio_button_swnodes;
- pdevs[pdev_count] = platform_device_register_data(&pdev->dev, "gpio-keys",
- PLATFORM_DEVID_AUTO,
- &pdata, sizeof(pdata));
+ button_info.fwnode = software_node_fwnode(dev_info->gpio_button_swnodes[0]);
+ pdevs[pdev_count] = platform_device_register_full(&button_info);
if (IS_ERR(pdevs[pdev_count])) {
ret = PTR_ERR(pdevs[pdev_count]);
x86_android_tablet_remove(pdev);
@@ -537,6 +560,6 @@ static void __exit x86_android_tablet_exit(void)
}
module_exit(x86_android_tablet_exit);
-MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_AUTHOR("Hans de Goede <hansg@kernel.org>");
MODULE_DESCRIPTION("X86 Android tablets DSDT fixups driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/x86-android-tablets/dmi.c b/drivers/platform/x86/x86-android-tablets/dmi.c
index 278c6d151dc4..4a5720d6fc1d 100644
--- a/drivers/platform/x86/x86-android-tablets/dmi.c
+++ b/drivers/platform/x86/x86-android-tablets/dmi.c
@@ -5,7 +5,7 @@
* devices typically have a bunch of things hardcoded, rather than specified
* in their DSDT.
*
- * Copyright (C) 2021-2023 Hans de Goede <hdegoede@redhat.com>
+ * Copyright (C) 2021-2023 Hans de Goede <hansg@kernel.org>
*/
#include <linux/dmi.h>
@@ -17,6 +17,16 @@
const struct dmi_system_id x86_android_tablet_ids[] __initconst = {
{
+ /* Acer Iconia One 8 A1-840 (non FHD version) */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "BayTrail"),
+ /* Above strings are too generic also match BIOS date */
+ DMI_MATCH(DMI_BIOS_DATE, "04/01/2014"),
+ },
+ .driver_data = (void *)&acer_a1_840_info,
+ },
+ {
/* Acer Iconia One 7 B1-750 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
diff --git a/drivers/platform/x86/x86-android-tablets/lenovo.c b/drivers/platform/x86/x86-android-tablets/lenovo.c
index 1241a97cda39..e3d3a8290949 100644
--- a/drivers/platform/x86/x86-android-tablets/lenovo.c
+++ b/drivers/platform/x86/x86-android-tablets/lenovo.c
@@ -5,13 +5,15 @@
* devices typically have a bunch of things hardcoded, rather than specified
* in their DSDT.
*
- * Copyright (C) 2021-2023 Hans de Goede <hdegoede@redhat.com>
+ * Copyright (C) 2021-2023 Hans de Goede <hansg@kernel.org>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/efi.h>
#include <linux/gpio/machine.h>
+#include <linux/gpio/property.h>
+#include <linux/input-event-codes.h>
#include <linux/mfd/arizona/pdata.h>
#include <linux/mfd/arizona/registers.h>
#include <linux/mfd/intel_soc_pmic.h>
@@ -59,11 +61,30 @@ static struct lp855x_platform_data lenovo_lp8557_reg_only_pdata = {
.initial_brightness = 128,
};
+static const struct software_node arizona_gpiochip_node = {
+ .name = "arizona",
+};
+
+static const struct software_node crystalcove_gpiochip_node = {
+ .name = "gpio_crystalcove",
+};
+
/* Lenovo Yoga Book X90F / X90L's Android factory image has everything hardcoded */
+static const struct property_entry lenovo_yb1_x90_goodix_props[] = {
+ PROPERTY_ENTRY_GPIO("reset-gpios", &cherryview_gpiochip_nodes[1], 53, GPIO_ACTIVE_HIGH),
+ PROPERTY_ENTRY_GPIO("irq-gpios", &cherryview_gpiochip_nodes[1], 56, GPIO_ACTIVE_HIGH),
+ { }
+};
+
+static const struct software_node lenovo_yb1_x90_goodix_node = {
+ .properties = lenovo_yb1_x90_goodix_props,
+};
+
static const struct property_entry lenovo_yb1_x90_wacom_props[] = {
PROPERTY_ENTRY_U32("hid-descr-addr", 0x0001),
PROPERTY_ENTRY_U32("post-reset-deassert-delay-ms", 150),
+ PROPERTY_ENTRY_GPIO("reset-gpios", &cherryview_gpiochip_nodes[0], 82, GPIO_ACTIVE_LOW),
{ }
};
@@ -85,6 +106,7 @@ static const struct property_entry lenovo_yb1_x90_hideep_ts_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-y", 1920),
PROPERTY_ENTRY_U32("touchscreen-max-pressure", 16384),
PROPERTY_ENTRY_BOOL("hideep,force-native-protocol"),
+ PROPERTY_ENTRY_GPIO("reset-gpios", &cherryview_gpiochip_nodes[0], 7, GPIO_ACTIVE_LOW),
{ }
};
@@ -108,6 +130,7 @@ static const struct x86_i2c_client_info lenovo_yb1_x90_i2c_clients[] __initconst
.type = "GDIX1001:00",
.addr = 0x14,
.dev_name = "goodix_ts",
+ .swnode = &lenovo_yb1_x90_goodix_node,
},
.adapter_path = "\\_SB_.PCI0.I2C2",
.irq_data = {
@@ -185,48 +208,33 @@ static const struct x86_serdev_info lenovo_yb1_x90_serdevs[] __initconst = {
},
};
-static const struct x86_gpio_button lenovo_yb1_x90_lid __initconst = {
- .button = {
- .code = SW_LID,
- .active_low = true,
- .desc = "lid_sw",
- .type = EV_SW,
- .wakeup = true,
- .debounce_interval = 50,
- },
- .chip = "INT33FF:02",
- .pin = 19,
-};
-
-static struct gpiod_lookup_table lenovo_yb1_x90_goodix_gpios = {
- .dev_id = "i2c-goodix_ts",
- .table = {
- GPIO_LOOKUP("INT33FF:01", 53, "reset", GPIO_ACTIVE_HIGH),
- GPIO_LOOKUP("INT33FF:01", 56, "irq", GPIO_ACTIVE_HIGH),
- { }
- },
+/*
+ * Software node attached to gpio-keys device representing the LID and
+ * serving as a parent to software nodes representing individual keys/buttons
+ * as required by the device tree binding.
+ */
+static const struct software_node lenovo_lid_gpio_keys_node = {
+ .name = "lid_sw",
};
-static struct gpiod_lookup_table lenovo_yb1_x90_hideep_gpios = {
- .dev_id = "i2c-hideep_ts",
- .table = {
- GPIO_LOOKUP("INT33FF:00", 7, "reset", GPIO_ACTIVE_LOW),
- { }
- },
+static const struct property_entry lenovo_yb1_x90_lid_props[] = {
+ PROPERTY_ENTRY_U32("linux,input-type", EV_SW),
+ PROPERTY_ENTRY_U32("linux,code", SW_LID),
+ PROPERTY_ENTRY_STRING("label", "lid_sw"),
+ PROPERTY_ENTRY_GPIO("gpios", &cherryview_gpiochip_nodes[2], 19, GPIO_ACTIVE_LOW),
+ PROPERTY_ENTRY_U32("debounce-interval", 50),
+ PROPERTY_ENTRY_BOOL("wakeup-source"),
+ { }
};
-static struct gpiod_lookup_table lenovo_yb1_x90_wacom_gpios = {
- .dev_id = "i2c-wacom",
- .table = {
- GPIO_LOOKUP("INT33FF:00", 82, "reset", GPIO_ACTIVE_LOW),
- { }
- },
+static const struct software_node lenovo_yb1_x90_lid_node = {
+ .parent = &lenovo_lid_gpio_keys_node,
+ .properties = lenovo_yb1_x90_lid_props,
};
-static struct gpiod_lookup_table * const lenovo_yb1_x90_gpios[] = {
- &lenovo_yb1_x90_hideep_gpios,
- &lenovo_yb1_x90_goodix_gpios,
- &lenovo_yb1_x90_wacom_gpios,
+static const struct software_node *lenovo_yb1_x90_lid_swnodes[] = {
+ &lenovo_lid_gpio_keys_node,
+ &lenovo_yb1_x90_lid_node,
NULL
};
@@ -256,9 +264,8 @@ const struct x86_dev_info lenovo_yogabook_x90_info __initconst = {
.pdev_count = ARRAY_SIZE(lenovo_yb1_x90_pdevs),
.serdev_info = lenovo_yb1_x90_serdevs,
.serdev_count = ARRAY_SIZE(lenovo_yb1_x90_serdevs),
- .gpio_button = &lenovo_yb1_x90_lid,
- .gpio_button_count = 1,
- .gpiod_lookup_tables = lenovo_yb1_x90_gpios,
+ .gpio_button_swnodes = lenovo_yb1_x90_lid_swnodes,
+ .gpiochip_type = X86_GPIOCHIP_CHERRYVIEW,
.init = lenovo_yb1_x90_init,
};
@@ -294,17 +301,25 @@ static const struct software_node lenovo_yoga_tab2_830_1050_bq24190_node = {
.properties = lenovo_yoga_tab2_830_1050_bq24190_props,
};
-static const struct x86_gpio_button lenovo_yoga_tab2_830_1050_lid __initconst = {
- .button = {
- .code = SW_LID,
- .active_low = true,
- .desc = "lid_sw",
- .type = EV_SW,
- .wakeup = true,
- .debounce_interval = 50,
- },
- .chip = "INT33FC:02",
- .pin = 26,
+static const struct property_entry lenovo_yoga_tab2_830_1050_lid_props[] = {
+ PROPERTY_ENTRY_U32("linux,input-type", EV_SW),
+ PROPERTY_ENTRY_U32("linux,code", SW_LID),
+ PROPERTY_ENTRY_STRING("label", "lid_sw"),
+ PROPERTY_ENTRY_GPIO("gpios", &baytrail_gpiochip_nodes[2], 26, GPIO_ACTIVE_LOW),
+ PROPERTY_ENTRY_U32("debounce-interval", 50),
+ PROPERTY_ENTRY_BOOL("wakeup-source"),
+ { }
+};
+
+static const struct software_node lenovo_yoga_tab2_830_1050_lid_node = {
+ .parent = &lenovo_lid_gpio_keys_node,
+ .properties = lenovo_yoga_tab2_830_1050_lid_props,
+};
+
+static const struct software_node *lenovo_yoga_tab2_830_1050_lid_swnodes[] = {
+ &lenovo_lid_gpio_keys_node,
+ &lenovo_yoga_tab2_830_1050_lid_node,
+ NULL
};
/* This gets filled by lenovo_yoga_tab2_830_1050_init() */
@@ -384,47 +399,65 @@ static struct x86_i2c_client_info lenovo_yoga_tab2_830_1050_i2c_clients[] __init
},
};
-static struct gpiod_lookup_table lenovo_yoga_tab2_830_1050_int3496_gpios = {
- .dev_id = "intel-int3496",
- .table = {
- GPIO_LOOKUP("INT33FC:02", 1, "mux", GPIO_ACTIVE_LOW),
- GPIO_LOOKUP("INT33FC:02", 24, "id", GPIO_ACTIVE_HIGH),
- { }
+static const struct property_entry lenovo_yoga_tab2_830_1050_int3496_props[] __initconst = {
+ PROPERTY_ENTRY_GPIO("mux-gpios", &baytrail_gpiochip_nodes[2], 1, GPIO_ACTIVE_LOW),
+ PROPERTY_ENTRY_GPIO("id-gpios", &baytrail_gpiochip_nodes[2], 24, GPIO_ACTIVE_HIGH),
+ { }
+};
+
+static const struct platform_device_info lenovo_yoga_tab2_830_1050_pdevs[] __initconst = {
+ {
+ /* For micro USB ID pin handling */
+ .name = "intel-int3496",
+ .id = PLATFORM_DEVID_NONE,
+ .properties = lenovo_yoga_tab2_830_1050_int3496_props,
},
};
#define LENOVO_YOGA_TAB2_830_1050_CODEC_NAME "spi-10WM5102:00"
-static struct gpiod_lookup_table lenovo_yoga_tab2_830_1050_codec_gpios = {
- .dev_id = LENOVO_YOGA_TAB2_830_1050_CODEC_NAME,
- .table = {
- GPIO_LOOKUP("gpio_crystalcove", 3, "reset", GPIO_ACTIVE_HIGH),
- GPIO_LOOKUP("INT33FC:01", 23, "wlf,ldoena", GPIO_ACTIVE_HIGH),
- GPIO_LOOKUP("arizona", 2, "wlf,spkvdd-ena", GPIO_ACTIVE_HIGH),
- GPIO_LOOKUP("arizona", 4, "wlf,micd-pol", GPIO_ACTIVE_LOW),
- { }
- },
+static const struct property_entry lenovo_yoga_tab2_830_1050_wm1502_props[] = {
+ PROPERTY_ENTRY_GPIO("reset-gpios",
+ &crystalcove_gpiochip_node, 3, GPIO_ACTIVE_HIGH),
+ PROPERTY_ENTRY_GPIO("wlf,ldoena-gpios",
+ &baytrail_gpiochip_nodes[1], 23, GPIO_ACTIVE_HIGH),
+ PROPERTY_ENTRY_GPIO("wlf,spkvdd-ena-gpios",
+ &arizona_gpiochip_node, 2, GPIO_ACTIVE_HIGH),
+ PROPERTY_ENTRY_GPIO("wlf,micd-pol-gpios",
+ &arizona_gpiochip_node, 4, GPIO_ACTIVE_LOW),
+ { }
+};
+
+static const struct software_node lenovo_yoga_tab2_830_1050_wm5102 = {
+ .properties = lenovo_yoga_tab2_830_1050_wm1502_props,
};
-static struct gpiod_lookup_table * const lenovo_yoga_tab2_830_1050_gpios[] = {
- &lenovo_yoga_tab2_830_1050_int3496_gpios,
- &lenovo_yoga_tab2_830_1050_codec_gpios,
+static const struct software_node *lenovo_yoga_tab2_830_1050_swnodes[] = {
+ &crystalcove_gpiochip_node,
+ &arizona_gpiochip_node,
+ &lenovo_yoga_tab2_830_1050_wm5102,
+ &generic_lipo_hv_4v35_battery_node,
NULL
};
static int __init lenovo_yoga_tab2_830_1050_init(struct device *dev);
static void lenovo_yoga_tab2_830_1050_exit(void);
+static const char * const lenovo_yoga_tab2_modules[] __initconst = {
+ "spi_pxa2xx_platform", /* For the SPI codec device */
+ "bq24190_charger", /* For the Vbus regulator for int3496/lc824206xa */
+ NULL
+};
+
const struct x86_dev_info lenovo_yoga_tab2_830_1050_info __initconst = {
.i2c_client_info = lenovo_yoga_tab2_830_1050_i2c_clients,
.i2c_client_count = ARRAY_SIZE(lenovo_yoga_tab2_830_1050_i2c_clients),
- .pdev_info = int3496_pdevs,
- .pdev_count = 1,
- .gpio_button = &lenovo_yoga_tab2_830_1050_lid,
- .gpio_button_count = 1,
- .gpiod_lookup_tables = lenovo_yoga_tab2_830_1050_gpios,
- .bat_swnode = &generic_lipo_hv_4v35_battery_node,
- .modules = bq24190_modules,
+ .pdev_info = lenovo_yoga_tab2_830_1050_pdevs,
+ .pdev_count = ARRAY_SIZE(lenovo_yoga_tab2_830_1050_pdevs),
+ .gpio_button_swnodes = lenovo_yoga_tab2_830_1050_lid_swnodes,
+ .swnode_group = lenovo_yoga_tab2_830_1050_swnodes,
+ .modules = lenovo_yoga_tab2_modules,
+ .gpiochip_type = X86_GPIOCHIP_BAYTRAIL,
.init = lenovo_yoga_tab2_830_1050_init,
.exit = lenovo_yoga_tab2_830_1050_exit,
};
@@ -481,6 +514,7 @@ static const struct pinctrl_map lenovo_yoga_tab2_830_1050_codec_pinctrl_map =
PIN_MAP_MUX_GROUP(LENOVO_YOGA_TAB2_830_1050_CODEC_NAME, "codec_32khz_clk",
"INT33FC:02", "pmu_clk2_grp", "pmu_clk");
+static struct device *lenovo_yoga_tab2_830_1050_codec_dev;
static struct pinctrl *lenovo_yoga_tab2_830_1050_codec_pinctrl;
static struct sys_off_handler *lenovo_yoga_tab2_830_1050_sys_off_handler;
@@ -507,12 +541,18 @@ static int __init lenovo_yoga_tab2_830_1050_init_codec(void)
goto err_unregister_mappings;
}
- /* We're done with the codec_dev now */
- put_device(codec_dev);
+ ret = device_add_software_node(codec_dev, &lenovo_yoga_tab2_830_1050_wm5102);
+ if (ret) {
+ ret = dev_err_probe(codec_dev, ret, "adding software node\n");
+ goto err_put_pinctrl;
+ }
+ lenovo_yoga_tab2_830_1050_codec_dev = codec_dev;
lenovo_yoga_tab2_830_1050_codec_pinctrl = pinctrl;
return 0;
+err_put_pinctrl:
+ pinctrl_put(lenovo_yoga_tab2_830_1050_codec_pinctrl);
err_unregister_mappings:
pinctrl_unregister_mappings(&lenovo_yoga_tab2_830_1050_codec_pinctrl_map);
err_put_device:
@@ -560,10 +600,10 @@ static void lenovo_yoga_tab2_830_1050_exit(void)
{
unregister_sys_off_handler(lenovo_yoga_tab2_830_1050_sys_off_handler);
- if (lenovo_yoga_tab2_830_1050_codec_pinctrl) {
- pinctrl_put(lenovo_yoga_tab2_830_1050_codec_pinctrl);
- pinctrl_unregister_mappings(&lenovo_yoga_tab2_830_1050_codec_pinctrl_map);
- }
+ device_remove_software_node(lenovo_yoga_tab2_830_1050_codec_dev);
+ pinctrl_put(lenovo_yoga_tab2_830_1050_codec_pinctrl);
+ pinctrl_unregister_mappings(&lenovo_yoga_tab2_830_1050_codec_pinctrl_map);
+ put_device(lenovo_yoga_tab2_830_1050_codec_dev);
}
/*
@@ -718,19 +758,21 @@ static const struct x86_i2c_client_info lenovo_yoga_tab2_1380_i2c_clients[] __in
}
};
+static const struct property_entry lenovo_yoga_tab2_1380_fc_props[] __initconst = {
+ PROPERTY_ENTRY_GPIO("uart3_txd-gpios", &baytrail_gpiochip_nodes[0], 57, GPIO_ACTIVE_HIGH),
+ PROPERTY_ENTRY_GPIO("uart3_rxd-gpios", &baytrail_gpiochip_nodes[0], 61, GPIO_ACTIVE_HIGH),
+ { }
+};
+
static const struct platform_device_info lenovo_yoga_tab2_1380_pdevs[] __initconst = {
{
/* For the Tablet 2 Pro 1380's custom fast charging driver */
.name = "lenovo-yoga-tab2-pro-1380-fastcharger",
.id = PLATFORM_DEVID_NONE,
+ .properties = lenovo_yoga_tab2_1380_fc_props,
},
};
-static const char * const lenovo_yoga_tab2_1380_modules[] __initconst = {
- "bq24190_charger", /* For the Vbus regulator for lc824206xa */
- NULL
-};
-
static int __init lenovo_yoga_tab2_1380_init(struct device *dev)
{
int ret;
@@ -752,31 +794,15 @@ static int __init lenovo_yoga_tab2_1380_init(struct device *dev)
return 0;
}
-static struct gpiod_lookup_table lenovo_yoga_tab2_1380_fc_gpios = {
- .dev_id = "serial0-0",
- .table = {
- GPIO_LOOKUP("INT33FC:00", 57, "uart3_txd", GPIO_ACTIVE_HIGH),
- GPIO_LOOKUP("INT33FC:00", 61, "uart3_rxd", GPIO_ACTIVE_HIGH),
- { }
- },
-};
-
-static struct gpiod_lookup_table * const lenovo_yoga_tab2_1380_gpios[] = {
- &lenovo_yoga_tab2_830_1050_codec_gpios,
- &lenovo_yoga_tab2_1380_fc_gpios,
- NULL
-};
-
const struct x86_dev_info lenovo_yoga_tab2_1380_info __initconst = {
.i2c_client_info = lenovo_yoga_tab2_1380_i2c_clients,
.i2c_client_count = ARRAY_SIZE(lenovo_yoga_tab2_1380_i2c_clients),
.pdev_info = lenovo_yoga_tab2_1380_pdevs,
.pdev_count = ARRAY_SIZE(lenovo_yoga_tab2_1380_pdevs),
- .gpio_button = &lenovo_yoga_tab2_830_1050_lid,
- .gpio_button_count = 1,
- .gpiod_lookup_tables = lenovo_yoga_tab2_1380_gpios,
- .bat_swnode = &generic_lipo_hv_4v35_battery_node,
- .modules = lenovo_yoga_tab2_1380_modules,
+ .gpio_button_swnodes = lenovo_yoga_tab2_830_1050_lid_swnodes,
+ .swnode_group = lenovo_yoga_tab2_830_1050_swnodes,
+ .modules = lenovo_yoga_tab2_modules,
+ .gpiochip_type = X86_GPIOCHIP_BAYTRAIL,
.init = lenovo_yoga_tab2_1380_init,
.exit = lenovo_yoga_tab2_830_1050_exit,
};
@@ -824,6 +850,7 @@ static const struct property_entry lenovo_yt3_hideep_ts_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1600),
PROPERTY_ENTRY_U32("touchscreen-size-y", 2560),
PROPERTY_ENTRY_U32("touchscreen-max-pressure", 255),
+ PROPERTY_ENTRY_GPIO("reset-gpios", &cherryview_gpiochip_nodes[0], 7, GPIO_ACTIVE_LOW),
{ }
};
@@ -958,12 +985,34 @@ static struct arizona_pdata lenovo_yt3_wm5102_pdata = {
},
};
+static const struct property_entry lenovo_yt3_wm1502_props[] = {
+ PROPERTY_ENTRY_GPIO("wlf,spkvdd-ena-gpios",
+ &cherryview_gpiochip_nodes[0], 75, GPIO_ACTIVE_HIGH),
+ PROPERTY_ENTRY_GPIO("wlf,ldoena-gpios",
+ &cherryview_gpiochip_nodes[0], 81, GPIO_ACTIVE_HIGH),
+ PROPERTY_ENTRY_GPIO("reset-gpios", &cherryview_gpiochip_nodes[0], 82, GPIO_ACTIVE_HIGH),
+ PROPERTY_ENTRY_GPIO("wlf,micd-pol-gpios", &arizona_gpiochip_node, 2, GPIO_ACTIVE_HIGH),
+ { }
+};
+
+static const struct software_node lenovo_yt3_wm5102 = {
+ .properties = lenovo_yt3_wm1502_props,
+ .name = "wm5102",
+};
+
+static const struct software_node *lenovo_yt3_swnodes[] = {
+ &arizona_gpiochip_node,
+ &lenovo_yt3_wm5102,
+ NULL
+};
+
static const struct x86_spi_dev_info lenovo_yt3_spi_devs[] __initconst = {
{
/* WM5102 codec */
.board_info = {
.modalias = "wm5102",
.platform_data = &lenovo_yt3_wm5102_pdata,
+ .swnode = &lenovo_yt3_wm5102,
.max_speed_hz = 5000000,
},
.ctrl_path = "\\_SB_.PCI0.SPI1",
@@ -1013,28 +1062,8 @@ static int __init lenovo_yt3_init(struct device *dev)
return 0;
}
-static struct gpiod_lookup_table lenovo_yt3_hideep_gpios = {
- .dev_id = "i2c-hideep_ts",
- .table = {
- GPIO_LOOKUP("INT33FF:00", 7, "reset", GPIO_ACTIVE_LOW),
- { }
- },
-};
-
-static struct gpiod_lookup_table lenovo_yt3_wm5102_gpios = {
- .dev_id = "spi1.0",
- .table = {
- GPIO_LOOKUP("INT33FF:00", 75, "wlf,spkvdd-ena", GPIO_ACTIVE_HIGH),
- GPIO_LOOKUP("INT33FF:00", 81, "wlf,ldoena", GPIO_ACTIVE_HIGH),
- GPIO_LOOKUP("INT33FF:00", 82, "reset", GPIO_ACTIVE_HIGH),
- GPIO_LOOKUP("arizona", 2, "wlf,micd-pol", GPIO_ACTIVE_HIGH),
- { }
- },
-};
-
-static struct gpiod_lookup_table * const lenovo_yt3_gpios[] = {
- &lenovo_yt3_hideep_gpios,
- &lenovo_yt3_wm5102_gpios,
+static const char * const lenovo_yt3_modules[] __initconst = {
+ "spi_pxa2xx_platform", /* For the SPI codec device */
NULL
};
@@ -1043,6 +1072,8 @@ const struct x86_dev_info lenovo_yt3_info __initconst = {
.i2c_client_count = ARRAY_SIZE(lenovo_yt3_i2c_clients),
.spi_dev_info = lenovo_yt3_spi_devs,
.spi_dev_count = ARRAY_SIZE(lenovo_yt3_spi_devs),
- .gpiod_lookup_tables = lenovo_yt3_gpios,
+ .swnode_group = lenovo_yt3_swnodes,
+ .modules = lenovo_yt3_modules,
+ .gpiochip_type = X86_GPIOCHIP_CHERRYVIEW,
.init = lenovo_yt3_init,
};
diff --git a/drivers/platform/x86/x86-android-tablets/other.c b/drivers/platform/x86/x86-android-tablets/other.c
index f7bd9f863c85..7532af2d72d1 100644
--- a/drivers/platform/x86/x86-android-tablets/other.c
+++ b/drivers/platform/x86/x86-android-tablets/other.c
@@ -5,12 +5,13 @@
* devices typically have a bunch of things hardcoded, rather than specified
* in their DSDT.
*
- * Copyright (C) 2021-2023 Hans de Goede <hdegoede@redhat.com>
+ * Copyright (C) 2021-2023 Hans de Goede <hansg@kernel.org>
*/
#include <linux/acpi.h>
#include <linux/gpio/machine.h>
-#include <linux/input.h>
+#include <linux/gpio/property.h>
+#include <linux/input-event-codes.h>
#include <linux/leds.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
@@ -21,102 +22,38 @@
#include "shared-psy-info.h"
#include "x86-android-tablets.h"
-/* Acer Iconia One 7 B1-750 has an Android factory image with everything hardcoded */
-static const char * const acer_b1_750_mount_matrix[] = {
- "-1", "0", "0",
- "0", "1", "0",
- "0", "0", "1"
+/*
+ * Advantech MICA-071
+ * This is a standard Windows tablet, but it has an extra "quick launch" button
+ * which is not described in the ACPI tables in anyway.
+ * Use the x86-android-tablets infra to create a gpio-keys device for this.
+ */
+static const struct software_node advantech_mica_071_gpio_keys_node = {
+ .name = "prog1_key",
};
-static const struct property_entry acer_b1_750_bma250e_props[] = {
- PROPERTY_ENTRY_STRING_ARRAY("mount-matrix", acer_b1_750_mount_matrix),
+static const struct property_entry advantech_mica_071_prog1_key_props[] = {
+ PROPERTY_ENTRY_U32("linux,code", KEY_PROG1),
+ PROPERTY_ENTRY_STRING("label", "prog1_key"),
+ PROPERTY_ENTRY_GPIO("gpios", &baytrail_gpiochip_nodes[0], 2, GPIO_ACTIVE_LOW),
+ PROPERTY_ENTRY_U32("debounce-interval", 50),
{ }
};
-static const struct software_node acer_b1_750_bma250e_node = {
- .properties = acer_b1_750_bma250e_props,
+static const struct software_node advantech_mica_071_prog1_key_node = {
+ .parent = &advantech_mica_071_gpio_keys_node,
+ .properties = advantech_mica_071_prog1_key_props,
};
-static const struct x86_i2c_client_info acer_b1_750_i2c_clients[] __initconst = {
- {
- /* Novatek NVT-ts touchscreen */
- .board_info = {
- .type = "nt11205-ts",
- .addr = 0x34,
- .dev_name = "NVT-ts",
- },
- .adapter_path = "\\_SB_.I2C4",
- .irq_data = {
- .type = X86_ACPI_IRQ_TYPE_GPIOINT,
- .chip = "INT33FC:02",
- .index = 3,
- .trigger = ACPI_EDGE_SENSITIVE,
- .polarity = ACPI_ACTIVE_LOW,
- .con_id = "NVT-ts_irq",
- },
- }, {
- /* BMA250E accelerometer */
- .board_info = {
- .type = "bma250e",
- .addr = 0x18,
- .swnode = &acer_b1_750_bma250e_node,
- },
- .adapter_path = "\\_SB_.I2C3",
- .irq_data = {
- .type = X86_ACPI_IRQ_TYPE_GPIOINT,
- .chip = "INT33FC:02",
- .index = 25,
- .trigger = ACPI_LEVEL_SENSITIVE,
- .polarity = ACPI_ACTIVE_HIGH,
- .con_id = "bma250e_irq",
- },
- },
-};
-
-static struct gpiod_lookup_table acer_b1_750_nvt_ts_gpios = {
- .dev_id = "i2c-NVT-ts",
- .table = {
- GPIO_LOOKUP("INT33FC:01", 26, "reset", GPIO_ACTIVE_LOW),
- { }
- },
-};
-
-static struct gpiod_lookup_table * const acer_b1_750_gpios[] = {
- &acer_b1_750_nvt_ts_gpios,
- &int3496_reference_gpios,
+static const struct software_node *advantech_mica_071_button_swnodes[] = {
+ &advantech_mica_071_gpio_keys_node,
+ &advantech_mica_071_prog1_key_node,
NULL
};
-const struct x86_dev_info acer_b1_750_info __initconst = {
- .i2c_client_info = acer_b1_750_i2c_clients,
- .i2c_client_count = ARRAY_SIZE(acer_b1_750_i2c_clients),
- .pdev_info = int3496_pdevs,
- .pdev_count = 1,
- .gpiod_lookup_tables = acer_b1_750_gpios,
-};
-
-/*
- * Advantech MICA-071
- * This is a standard Windows tablet, but it has an extra "quick launch" button
- * which is not described in the ACPI tables in anyway.
- * Use the x86-android-tablets infra to create a gpio-keys device for this.
- */
-static const struct x86_gpio_button advantech_mica_071_button __initconst = {
- .button = {
- .code = KEY_PROG1,
- .active_low = true,
- .desc = "prog1_key",
- .type = EV_KEY,
- .wakeup = false,
- .debounce_interval = 50,
- },
- .chip = "INT33FC:00",
- .pin = 2,
-};
-
const struct x86_dev_info advantech_mica_071_info __initconst = {
- .gpio_button = &advantech_mica_071_button,
- .gpio_button_count = 1,
+ .gpio_button_swnodes = advantech_mica_071_button_swnodes,
+ .gpiochip_type = X86_GPIOCHIP_BAYTRAIL,
};
/*
@@ -212,36 +149,46 @@ const struct x86_dev_info chuwi_hi8_info __initconst = {
* in the button row with the power + volume-buttons labeled P and F.
* Use the x86-android-tablets infra to create a gpio-keys device for these.
*/
-static const struct x86_gpio_button cyberbook_t116_buttons[] __initconst = {
- {
- .button = {
- .code = KEY_PROG1,
- .active_low = true,
- .desc = "prog1_key",
- .type = EV_KEY,
- .wakeup = false,
- .debounce_interval = 50,
- },
- .chip = "INT33FF:00",
- .pin = 30,
- },
- {
- .button = {
- .code = KEY_PROG2,
- .active_low = true,
- .desc = "prog2_key",
- .type = EV_KEY,
- .wakeup = false,
- .debounce_interval = 50,
- },
- .chip = "INT33FF:03",
- .pin = 48,
- },
+static const struct software_node cyberbook_t116_gpio_keys_node = {
+ .name = "prog_keys",
+};
+
+static const struct property_entry cyberbook_t116_prog1_key_props[] = {
+ PROPERTY_ENTRY_U32("linux,code", KEY_PROG1),
+ PROPERTY_ENTRY_STRING("label", "prog1_key"),
+ PROPERTY_ENTRY_GPIO("gpios", &cherryview_gpiochip_nodes[0], 30, GPIO_ACTIVE_LOW),
+ PROPERTY_ENTRY_U32("debounce-interval", 50),
+ { }
+};
+
+static const struct software_node cyberbook_t116_prog1_key_node = {
+ .parent = &cyberbook_t116_gpio_keys_node,
+ .properties = cyberbook_t116_prog1_key_props,
+};
+
+static const struct property_entry cyberbook_t116_prog2_key_props[] = {
+ PROPERTY_ENTRY_U32("linux,code", KEY_PROG2),
+ PROPERTY_ENTRY_STRING("label", "prog2_key"),
+ PROPERTY_ENTRY_GPIO("gpios", &cherryview_gpiochip_nodes[3], 48, GPIO_ACTIVE_LOW),
+ PROPERTY_ENTRY_U32("debounce-interval", 50),
+ { }
+};
+
+static const struct software_node cyberbook_t116_prog2_key_node = {
+ .parent = &cyberbook_t116_gpio_keys_node,
+ .properties = cyberbook_t116_prog2_key_props,
+};
+
+static const struct software_node *cyberbook_t116_buttons_swnodes[] = {
+ &cyberbook_t116_gpio_keys_node,
+ &cyberbook_t116_prog1_key_node,
+ &cyberbook_t116_prog2_key_node,
+ NULL
};
const struct x86_dev_info cyberbook_t116_info __initconst = {
- .gpio_button = cyberbook_t116_buttons,
- .gpio_button_count = ARRAY_SIZE(cyberbook_t116_buttons),
+ .gpio_button_swnodes = cyberbook_t116_buttons_swnodes,
+ .gpiochip_type = X86_GPIOCHIP_CHERRYVIEW,
};
#define CZC_EC_EXTRA_PORT 0x68
@@ -297,6 +244,8 @@ static const struct software_node medion_lifetab_s10346_accel_node = {
static const struct property_entry medion_lifetab_s10346_touchscreen_props[] = {
PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ PROPERTY_ENTRY_GPIO("reset-gpios", &baytrail_gpiochip_nodes[1], 26, GPIO_ACTIVE_HIGH),
+ PROPERTY_ENTRY_GPIO("irq-gpios", &baytrail_gpiochip_nodes[2], 3, GPIO_ACTIVE_HIGH),
{ }
};
@@ -340,24 +289,10 @@ static const struct x86_i2c_client_info medion_lifetab_s10346_i2c_clients[] __in
},
};
-static struct gpiod_lookup_table medion_lifetab_s10346_goodix_gpios = {
- .dev_id = "i2c-goodix_ts",
- .table = {
- GPIO_LOOKUP("INT33FC:01", 26, "reset", GPIO_ACTIVE_HIGH),
- GPIO_LOOKUP("INT33FC:02", 3, "irq", GPIO_ACTIVE_HIGH),
- { }
- },
-};
-
-static struct gpiod_lookup_table * const medion_lifetab_s10346_gpios[] = {
- &medion_lifetab_s10346_goodix_gpios,
- NULL
-};
-
const struct x86_dev_info medion_lifetab_s10346_info __initconst = {
.i2c_client_info = medion_lifetab_s10346_i2c_clients,
.i2c_client_count = ARRAY_SIZE(medion_lifetab_s10346_i2c_clients),
- .gpiod_lookup_tables = medion_lifetab_s10346_gpios,
+ .gpiochip_type = X86_GPIOCHIP_BAYTRAIL,
};
/* Nextbook Ares 8 (BYT) tablets have an Android factory image with everything hardcoded */
@@ -416,17 +351,12 @@ static const struct x86_i2c_client_info nextbook_ares8_i2c_clients[] __initconst
},
};
-static struct gpiod_lookup_table * const nextbook_ares8_gpios[] = {
- &int3496_reference_gpios,
- NULL
-};
-
const struct x86_dev_info nextbook_ares8_info __initconst = {
.i2c_client_info = nextbook_ares8_i2c_clients,
.i2c_client_count = ARRAY_SIZE(nextbook_ares8_i2c_clients),
.pdev_info = int3496_pdevs,
.pdev_count = 1,
- .gpiod_lookup_tables = nextbook_ares8_gpios,
+ .gpiochip_type = X86_GPIOCHIP_BAYTRAIL,
};
/* Nextbook Ares 8A (CHT) tablets have an Android factory image with everything hardcoded */
@@ -445,6 +375,17 @@ static const struct software_node nextbook_ares8a_accel_node = {
.properties = nextbook_ares8a_accel_props,
};
+static const struct property_entry nextbook_ares8a_ft5416_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 800),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1280),
+ PROPERTY_ENTRY_GPIO("reset-gpios", &cherryview_gpiochip_nodes[1], 25, GPIO_ACTIVE_LOW),
+ { }
+};
+
+static const struct software_node nextbook_ares8a_ft5416_node = {
+ .properties = nextbook_ares8a_ft5416_props,
+};
+
static const struct x86_i2c_client_info nextbook_ares8a_i2c_clients[] __initconst = {
{
/* Freescale MMA8653FC accelerometer */
@@ -461,7 +402,7 @@ static const struct x86_i2c_client_info nextbook_ares8a_i2c_clients[] __initcons
.type = "edt-ft5x06",
.addr = 0x38,
.dev_name = "ft5416",
- .swnode = &nextbook_ares8_touchscreen_node,
+ .swnode = &nextbook_ares8a_ft5416_node,
},
.adapter_path = "\\_SB_.PCI0.I2C6",
.irq_data = {
@@ -475,23 +416,10 @@ static const struct x86_i2c_client_info nextbook_ares8a_i2c_clients[] __initcons
},
};
-static struct gpiod_lookup_table nextbook_ares8a_ft5416_gpios = {
- .dev_id = "i2c-ft5416",
- .table = {
- GPIO_LOOKUP("INT33FF:01", 25, "reset", GPIO_ACTIVE_LOW),
- { }
- },
-};
-
-static struct gpiod_lookup_table * const nextbook_ares8a_gpios[] = {
- &nextbook_ares8a_ft5416_gpios,
- NULL
-};
-
const struct x86_dev_info nextbook_ares8a_info __initconst = {
.i2c_client_info = nextbook_ares8a_i2c_clients,
.i2c_client_count = ARRAY_SIZE(nextbook_ares8a_i2c_clients),
- .gpiod_lookup_tables = nextbook_ares8a_gpios,
+ .gpiochip_type = X86_GPIOCHIP_CHERRYVIEW,
};
/*
@@ -500,22 +428,32 @@ const struct x86_dev_info nextbook_ares8a_info __initconst = {
* This button has a WMI interface, but that is broken. Instead of trying to
* use the broken WMI interface, instantiate a gpio-keys device for this.
*/
-static const struct x86_gpio_button peaq_c1010_button __initconst = {
- .button = {
- .code = KEY_SOUND,
- .active_low = true,
- .desc = "dolby_key",
- .type = EV_KEY,
- .wakeup = false,
- .debounce_interval = 50,
- },
- .chip = "INT33FC:00",
- .pin = 3,
+static const struct software_node peaq_c1010_gpio_keys_node = {
+ .name = "gpio_keys",
+};
+
+static const struct property_entry peaq_c1010_dolby_key_props[] = {
+ PROPERTY_ENTRY_U32("linux,code", KEY_SOUND),
+ PROPERTY_ENTRY_STRING("label", "dolby_key"),
+ PROPERTY_ENTRY_GPIO("gpios", &baytrail_gpiochip_nodes[0], 3, GPIO_ACTIVE_LOW),
+ PROPERTY_ENTRY_U32("debounce-interval", 50),
+ { }
+};
+
+static const struct software_node peaq_c1010_dolby_key_node = {
+ .parent = &peaq_c1010_gpio_keys_node,
+ .properties = peaq_c1010_dolby_key_props,
+};
+
+static const struct software_node *peaq_c1010_button_swnodes[] = {
+ &peaq_c1010_gpio_keys_node,
+ &peaq_c1010_dolby_key_node,
+ NULL
};
const struct x86_dev_info peaq_c1010_info __initconst = {
- .gpio_button = &peaq_c1010_button,
- .gpio_button_count = 1,
+ .gpio_button_swnodes = peaq_c1010_button_swnodes,
+ .gpiochip_type = X86_GPIOCHIP_BAYTRAIL,
};
/*
@@ -543,6 +481,8 @@ static const struct property_entry whitelabel_tm800a550l_goodix_props[] = {
PROPERTY_ENTRY_STRING("firmware-name", "gt912-tm800a550l.fw"),
PROPERTY_ENTRY_STRING("goodix,config-name", "gt912-tm800a550l.cfg"),
PROPERTY_ENTRY_U32("goodix,main-clk", 54),
+ PROPERTY_ENTRY_GPIO("reset-gpios", &baytrail_gpiochip_nodes[1], 26, GPIO_ACTIVE_HIGH),
+ PROPERTY_ENTRY_GPIO("irq-gpios", &baytrail_gpiochip_nodes[2], 3, GPIO_ACTIVE_HIGH),
{ }
};
@@ -578,24 +518,10 @@ static const struct x86_i2c_client_info whitelabel_tm800a550l_i2c_clients[] __in
},
};
-static struct gpiod_lookup_table whitelabel_tm800a550l_goodix_gpios = {
- .dev_id = "i2c-goodix_ts",
- .table = {
- GPIO_LOOKUP("INT33FC:01", 26, "reset", GPIO_ACTIVE_HIGH),
- GPIO_LOOKUP("INT33FC:02", 3, "irq", GPIO_ACTIVE_HIGH),
- { }
- },
-};
-
-static struct gpiod_lookup_table * const whitelabel_tm800a550l_gpios[] = {
- &whitelabel_tm800a550l_goodix_gpios,
- NULL
-};
-
const struct x86_dev_info whitelabel_tm800a550l_info __initconst = {
.i2c_client_info = whitelabel_tm800a550l_i2c_clients,
.i2c_client_count = ARRAY_SIZE(whitelabel_tm800a550l_i2c_clients),
- .gpiod_lookup_tables = whitelabel_tm800a550l_gpios,
+ .gpiochip_type = X86_GPIOCHIP_BAYTRAIL,
};
/*
@@ -605,6 +531,7 @@ const struct x86_dev_info whitelabel_tm800a550l_info __initconst = {
static const struct property_entry vexia_edu_atla10_5v_touchscreen_props[] = {
PROPERTY_ENTRY_U32("hid-descr-addr", 0x0000),
PROPERTY_ENTRY_U32("post-reset-deassert-delay-ms", 120),
+ PROPERTY_ENTRY_GPIO("reset-gpios", &baytrail_gpiochip_nodes[1], 26, GPIO_ACTIVE_LOW),
{ }
};
@@ -639,23 +566,10 @@ static const struct x86_i2c_client_info vexia_edu_atla10_5v_i2c_clients[] __init
}
};
-static struct gpiod_lookup_table vexia_edu_atla10_5v_ft5416_gpios = {
- .dev_id = "i2c-FTSC1000",
- .table = {
- GPIO_LOOKUP("INT33FC:01", 26, "reset", GPIO_ACTIVE_LOW),
- { }
- },
-};
-
-static struct gpiod_lookup_table * const vexia_edu_atla10_5v_gpios[] = {
- &vexia_edu_atla10_5v_ft5416_gpios,
- NULL
-};
-
const struct x86_dev_info vexia_edu_atla10_5v_info __initconst = {
.i2c_client_info = vexia_edu_atla10_5v_i2c_clients,
.i2c_client_count = ARRAY_SIZE(vexia_edu_atla10_5v_i2c_clients),
- .gpiod_lookup_tables = vexia_edu_atla10_5v_gpios,
+ .gpiochip_type = X86_GPIOCHIP_BAYTRAIL,
};
/*
@@ -691,6 +605,7 @@ static const struct software_node vexia_edu_atla10_9v_accel_node = {
static const struct property_entry vexia_edu_atla10_9v_touchscreen_props[] = {
PROPERTY_ENTRY_U32("hid-descr-addr", 0x0000),
PROPERTY_ENTRY_U32("post-reset-deassert-delay-ms", 120),
+ PROPERTY_ENTRY_GPIO("reset-gpios", &baytrail_gpiochip_nodes[0], 60, GPIO_ACTIVE_LOW),
{ }
};
@@ -783,19 +698,6 @@ static const struct x86_serdev_info vexia_edu_atla10_9v_serdevs[] __initconst =
},
};
-static struct gpiod_lookup_table vexia_edu_atla10_9v_ft5416_gpios = {
- .dev_id = "i2c-FTSC1000",
- .table = {
- GPIO_LOOKUP("INT33FC:00", 60, "reset", GPIO_ACTIVE_LOW),
- { }
- },
-};
-
-static struct gpiod_lookup_table * const vexia_edu_atla10_9v_gpios[] = {
- &vexia_edu_atla10_9v_ft5416_gpios,
- NULL
-};
-
static int __init vexia_edu_atla10_9v_init(struct device *dev)
{
struct pci_dev *pdev;
@@ -809,8 +711,10 @@ static int __init vexia_edu_atla10_9v_init(struct device *dev)
/* Reprobe the SDIO controller to enumerate the now enabled Wifi module */
pdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0x11, 0));
- if (!pdev)
- return -EPROBE_DEFER;
+ if (!pdev) {
+ pr_warn("Could not get PCI SDIO at devfn 0x%02x\n", PCI_DEVFN(0x11, 0));
+ return 0;
+ }
ret = device_reprobe(&pdev->dev);
if (ret)
@@ -825,9 +729,9 @@ const struct x86_dev_info vexia_edu_atla10_9v_info __initconst = {
.i2c_client_count = ARRAY_SIZE(vexia_edu_atla10_9v_i2c_clients),
.serdev_info = vexia_edu_atla10_9v_serdevs,
.serdev_count = ARRAY_SIZE(vexia_edu_atla10_9v_serdevs),
- .gpiod_lookup_tables = vexia_edu_atla10_9v_gpios,
.init = vexia_edu_atla10_9v_init,
.use_pci = true,
+ .gpiochip_type = X86_GPIOCHIP_BAYTRAIL,
};
/*
@@ -923,7 +827,6 @@ static int xiaomi_mipad2_brightness_set(struct led_classdev *led_cdev,
static int __init xiaomi_mipad2_init(struct device *dev)
{
struct led_classdev *led_cdev;
- int ret;
xiaomi_mipad2_led_pwm = devm_pwm_get(dev, "pwm_soc_lpss_2");
if (IS_ERR(xiaomi_mipad2_led_pwm))
@@ -940,16 +843,7 @@ static int __init xiaomi_mipad2_init(struct device *dev)
/* Turn LED off during suspend */
led_cdev->flags = LED_CORE_SUSPENDRESUME;
- ret = devm_led_classdev_register(dev, led_cdev);
- if (ret)
- return dev_err_probe(dev, ret, "registering LED\n");
-
- return software_node_register_node_group(ktd2026_node_group);
-}
-
-static void xiaomi_mipad2_exit(void)
-{
- software_node_unregister_node_group(ktd2026_node_group);
+ return devm_led_classdev_register(dev, led_cdev);
}
/*
@@ -984,6 +878,6 @@ static const struct x86_i2c_client_info xiaomi_mipad2_i2c_clients[] __initconst
const struct x86_dev_info xiaomi_mipad2_info __initconst = {
.i2c_client_info = xiaomi_mipad2_i2c_clients,
.i2c_client_count = ARRAY_SIZE(xiaomi_mipad2_i2c_clients),
+ .swnode_group = ktd2026_node_group,
.init = xiaomi_mipad2_init,
- .exit = xiaomi_mipad2_exit,
};
diff --git a/drivers/platform/x86/x86-android-tablets/shared-psy-info.c b/drivers/platform/x86/x86-android-tablets/shared-psy-info.c
index fe34cedb6257..29fc466f76fe 100644
--- a/drivers/platform/x86/x86-android-tablets/shared-psy-info.c
+++ b/drivers/platform/x86/x86-android-tablets/shared-psy-info.c
@@ -5,16 +5,18 @@
* devices typically have a bunch of things hardcoded, rather than specified
* in their DSDT.
*
- * Copyright (C) 2021-2023 Hans de Goede <hdegoede@redhat.com>
+ * Copyright (C) 2021-2023 Hans de Goede <hansg@kernel.org>
*/
#include <linux/gpio/machine.h>
+#include <linux/gpio/property.h>
#include <linux/platform_device.h>
#include <linux/power/bq24190_charger.h>
#include <linux/property.h>
#include <linux/regulator/machine.h>
#include "shared-psy-info.h"
+#include "x86-android-tablets.h"
/* Generic / shared charger / battery settings */
const char * const tusb1211_chg_det_psy[] = { "tusb1211-charger-detect" };
@@ -111,6 +113,11 @@ const struct software_node generic_lipo_4v2_battery_node = {
.properties = generic_lipo_4v2_battery_props,
};
+const struct software_node *generic_lipo_4v2_battery_swnodes[] = {
+ &generic_lipo_4v2_battery_node,
+ NULL
+};
+
/* LiPo HighVoltage (max 4.35V) settings used by most devs with a HV battery */
static const struct property_entry generic_lipo_hv_4v35_battery_props[] = {
PROPERTY_ENTRY_STRING("compatible", "simple-battery"),
@@ -131,6 +138,11 @@ const struct software_node generic_lipo_hv_4v35_battery_node = {
.properties = generic_lipo_hv_4v35_battery_props,
};
+const struct software_node *generic_lipo_hv_4v35_battery_swnodes[] = {
+ &generic_lipo_hv_4v35_battery_node,
+ NULL
+};
+
/* For enabling the bq24190 5V boost based on id-pin */
static struct regulator_consumer_supply intel_int3496_consumer = {
.supply = "vbus",
@@ -156,21 +168,19 @@ const char * const bq24190_modules[] __initconst = {
NULL
};
-/* Generic platform device array and GPIO lookup table for micro USB ID pin handling */
+static const struct property_entry int3496_reference_props[] __initconst = {
+ PROPERTY_ENTRY_GPIO("vbus-gpios", &baytrail_gpiochip_nodes[1], 15, GPIO_ACTIVE_HIGH),
+ PROPERTY_ENTRY_GPIO("mux-gpios", &baytrail_gpiochip_nodes[2], 1, GPIO_ACTIVE_HIGH),
+ PROPERTY_ENTRY_GPIO("id-gpios", &baytrail_gpiochip_nodes[2], 18, GPIO_ACTIVE_HIGH),
+ { }
+};
+
+/* Generic pdevs array and gpio-lookups for micro USB ID pin handling */
const struct platform_device_info int3496_pdevs[] __initconst = {
{
/* For micro USB ID pin handling */
.name = "intel-int3496",
.id = PLATFORM_DEVID_NONE,
- },
-};
-
-struct gpiod_lookup_table int3496_reference_gpios = {
- .dev_id = "intel-int3496",
- .table = {
- GPIO_LOOKUP("INT33FC:01", 15, "vbus", GPIO_ACTIVE_HIGH),
- GPIO_LOOKUP("INT33FC:02", 1, "mux", GPIO_ACTIVE_HIGH),
- GPIO_LOOKUP("INT33FC:02", 18, "id", GPIO_ACTIVE_HIGH),
- { }
+ .properties = int3496_reference_props,
},
};
diff --git a/drivers/platform/x86/x86-android-tablets/shared-psy-info.h b/drivers/platform/x86/x86-android-tablets/shared-psy-info.h
index bcf9845ad275..149befba3330 100644
--- a/drivers/platform/x86/x86-android-tablets/shared-psy-info.h
+++ b/drivers/platform/x86/x86-android-tablets/shared-psy-info.h
@@ -5,13 +5,12 @@
* devices typically have a bunch of things hardcoded, rather than specified
* in their DSDT.
*
- * Copyright (C) 2021-2023 Hans de Goede <hdegoede@redhat.com>
+ * Copyright (C) 2021-2023 Hans de Goede <hansg@kernel.org>
*/
#ifndef __PDX86_SHARED_PSY_INFO_H
#define __PDX86_SHARED_PSY_INFO_H
struct bq24190_platform_data;
-struct gpiod_lookup_table;
struct platform_device_info;
struct software_node;
@@ -21,13 +20,16 @@ extern const char * const bq25890_psy[];
extern const struct software_node fg_bq24190_supply_node;
extern const struct software_node fg_bq25890_supply_node;
+
extern const struct software_node generic_lipo_4v2_battery_node;
+extern const struct software_node *generic_lipo_4v2_battery_swnodes[];
+
extern const struct software_node generic_lipo_hv_4v35_battery_node;
+extern const struct software_node *generic_lipo_hv_4v35_battery_swnodes[];
extern struct bq24190_platform_data bq24190_pdata;
extern const char * const bq24190_modules[];
extern const struct platform_device_info int3496_pdevs[];
-extern struct gpiod_lookup_table int3496_reference_gpios;
#endif
diff --git a/drivers/platform/x86/x86-android-tablets/vexia_atla10_ec.c b/drivers/platform/x86/x86-android-tablets/vexia_atla10_ec.c
index 5d02af1c5aaa..2f8cd8d9e0ab 100644
--- a/drivers/platform/x86/x86-android-tablets/vexia_atla10_ec.c
+++ b/drivers/platform/x86/x86-android-tablets/vexia_atla10_ec.c
@@ -256,6 +256,6 @@ static struct i2c_driver atla10_ec_driver = {
};
module_i2c_driver(atla10_ec_driver);
-MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_AUTHOR("Hans de Goede <hansg@kernel.org>");
MODULE_DESCRIPTION("Battery driver for Vexia EDU ATLA 10 tablet EC");
MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h b/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h
index dcf8d49e3b5f..2498390958ad 100644
--- a/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h
+++ b/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h
@@ -5,19 +5,17 @@
* devices typically have a bunch of things hardcoded, rather than specified
* in their DSDT.
*
- * Copyright (C) 2021-2023 Hans de Goede <hdegoede@redhat.com>
+ * Copyright (C) 2021-2023 Hans de Goede <hansg@kernel.org>
*/
#ifndef __PDX86_X86_ANDROID_TABLETS_H
#define __PDX86_X86_ANDROID_TABLETS_H
#include <linux/gpio/consumer.h>
-#include <linux/gpio_keys.h>
#include <linux/i2c.h>
#include <linux/irqdomain_defs.h>
#include <linux/spi/spi.h>
struct gpio_desc;
-struct gpiod_lookup_table;
struct platform_device_info;
struct software_node;
@@ -32,6 +30,12 @@ enum x86_acpi_irq_type {
X86_ACPI_IRQ_TYPE_PMIC,
};
+enum x86_gpiochip_type {
+ X86_GPIOCHIP_UNSPECIFIED = 0,
+ X86_GPIOCHIP_BAYTRAIL,
+ X86_GPIOCHIP_CHERRYVIEW,
+};
+
struct x86_acpi_irq_data {
char *chip; /* GPIO chip label (GPIOINT) or PMIC ACPI path (PMIC) */
enum x86_acpi_irq_type type;
@@ -76,29 +80,22 @@ struct x86_serdev_info {
const char *serdev_hid;
};
-struct x86_gpio_button {
- struct gpio_keys_button button;
- const char *chip;
- int pin;
-};
-
struct x86_dev_info {
const char * const *modules;
- const struct software_node *bat_swnode;
- struct gpiod_lookup_table * const *gpiod_lookup_tables;
+ const struct software_node **swnode_group;
const struct x86_i2c_client_info *i2c_client_info;
const struct x86_spi_dev_info *spi_dev_info;
const struct platform_device_info *pdev_info;
const struct x86_serdev_info *serdev_info;
- const struct x86_gpio_button *gpio_button;
+ const struct software_node **gpio_button_swnodes;
int i2c_client_count;
int spi_dev_count;
int pdev_count;
int serdev_count;
- int gpio_button_count;
int (*init)(struct device *dev);
void (*exit)(void);
bool use_pci;
+ enum x86_gpiochip_type gpiochip_type;
};
int x86_android_tablet_get_gpiod(const char *chip, int pin, const char *con_id,
@@ -106,10 +103,15 @@ int x86_android_tablet_get_gpiod(const char *chip, int pin, const char *con_id,
struct gpio_desc **desc);
int x86_acpi_irq_helper_get(const struct x86_acpi_irq_data *data);
+/* Software nodes representing GPIO chips used by various tablets */
+extern const struct software_node baytrail_gpiochip_nodes[];
+extern const struct software_node cherryview_gpiochip_nodes[];
+
/*
* Extern declarations of x86_dev_info structs so there can be a single
* MODULE_DEVICE_TABLE(dmi, ...), while splitting the board descriptions.
*/
+extern const struct x86_dev_info acer_a1_840_info;
extern const struct x86_dev_info acer_b1_750_info;
extern const struct x86_dev_info advantech_mica_071_info;
extern const struct x86_dev_info asus_me176c_info;
diff --git a/drivers/platform/x86/xiaomi-wmi.c b/drivers/platform/x86/xiaomi-wmi.c
index cbed29ca502a..b892007b9863 100644
--- a/drivers/platform/x86/xiaomi-wmi.c
+++ b/drivers/platform/x86/xiaomi-wmi.c
@@ -26,13 +26,6 @@ struct xiaomi_wmi {
unsigned int key_code;
};
-static void xiaomi_mutex_destroy(void *data)
-{
- struct mutex *lock = data;
-
- mutex_destroy(lock);
-}
-
static int xiaomi_wmi_probe(struct wmi_device *wdev, const void *context)
{
struct xiaomi_wmi *data;
@@ -46,8 +39,7 @@ static int xiaomi_wmi_probe(struct wmi_device *wdev, const void *context)
return -ENOMEM;
dev_set_drvdata(&wdev->dev, data);
- mutex_init(&data->key_lock);
- ret = devm_add_action_or_reset(&wdev->dev, xiaomi_mutex_destroy, &data->key_lock);
+ ret = devm_mutex_init(&wdev->dev, &data->key_lock);
if (ret < 0)
return ret;
diff --git a/drivers/pmdomain/Kconfig b/drivers/pmdomain/Kconfig
index 91f04ace35d4..23076ae90e66 100644
--- a/drivers/pmdomain/Kconfig
+++ b/drivers/pmdomain/Kconfig
@@ -7,6 +7,7 @@ source "drivers/pmdomain/apple/Kconfig"
source "drivers/pmdomain/arm/Kconfig"
source "drivers/pmdomain/bcm/Kconfig"
source "drivers/pmdomain/imx/Kconfig"
+source "drivers/pmdomain/marvell/Kconfig"
source "drivers/pmdomain/mediatek/Kconfig"
source "drivers/pmdomain/qcom/Kconfig"
source "drivers/pmdomain/renesas/Kconfig"
diff --git a/drivers/pmdomain/Makefile b/drivers/pmdomain/Makefile
index 7030f44a49df..ebc802f13eb9 100644
--- a/drivers/pmdomain/Makefile
+++ b/drivers/pmdomain/Makefile
@@ -5,6 +5,7 @@ obj-y += apple/
obj-y += arm/
obj-y += bcm/
obj-y += imx/
+obj-y += marvell/
obj-y += mediatek/
obj-y += qcom/
obj-y += renesas/
diff --git a/drivers/pmdomain/amlogic/meson-secure-pwrc.c b/drivers/pmdomain/amlogic/meson-secure-pwrc.c
index e8bda60078c4..1d2f371d2d7f 100644
--- a/drivers/pmdomain/amlogic/meson-secure-pwrc.c
+++ b/drivers/pmdomain/amlogic/meson-secure-pwrc.c
@@ -16,6 +16,9 @@
#include <dt-bindings/power/amlogic,t7-pwrc.h>
#include <dt-bindings/power/amlogic,a4-pwrc.h>
#include <dt-bindings/power/amlogic,a5-pwrc.h>
+#include <dt-bindings/power/amlogic,s6-pwrc.h>
+#include <dt-bindings/power/amlogic,s7-pwrc.h>
+#include <dt-bindings/power/amlogic,s7d-pwrc.h>
#include <linux/arm-smccc.h>
#include <linux/firmware/meson/meson_sm.h>
#include <linux/module.h>
@@ -201,6 +204,71 @@ static const struct meson_secure_pwrc_domain_desc s4_pwrc_domains[] = {
SEC_PD(S4_AUDIO, 0),
};
+static const struct meson_secure_pwrc_domain_desc s6_pwrc_domains[] = {
+ SEC_PD(S6_DSPA, 0),
+ SEC_PD(S6_DOS_HEVC, 0),
+ SEC_PD(S6_DOS_VDEC, 0),
+ SEC_PD(S6_VPU_HDMI, 0),
+ SEC_PD(S6_U2DRD, 0),
+ SEC_PD(S6_U3DRD, 0),
+ SEC_PD(S6_SD_EMMC_C, 0),
+ SEC_PD(S6_GE2D, 0),
+ SEC_PD(S6_AMFC, 0),
+ SEC_PD(S6_VC9000E, 0),
+ SEC_PD(S6_DEWARP, 0),
+ SEC_PD(S6_VICP, 0),
+ SEC_PD(S6_SD_EMMC_A, 0),
+ SEC_PD(S6_SD_EMMC_B, 0),
+ /* ETH is for ethernet online wakeup, and should be always on */
+ SEC_PD(S6_ETH, GENPD_FLAG_ALWAYS_ON),
+ SEC_PD(S6_PCIE, 0),
+ SEC_PD(S6_NNA_4T, 0),
+ SEC_PD(S6_AUDIO, 0),
+ SEC_PD(S6_AUCPU, 0),
+ SEC_PD(S6_ADAPT, 0),
+};
+
+static const struct meson_secure_pwrc_domain_desc s7_pwrc_domains[] = {
+ SEC_PD(S7_DOS_HEVC, 0),
+ SEC_PD(S7_DOS_VDEC, 0),
+ SEC_PD(S7_VPU_HDMI, 0),
+ SEC_PD(S7_USB_COMB, 0),
+ SEC_PD(S7_SD_EMMC_C, 0),
+ SEC_PD(S7_GE2D, 0),
+ SEC_PD(S7_SD_EMMC_A, 0),
+ SEC_PD(S7_SD_EMMC_B, 0),
+ /* ETH is for ethernet online wakeup, and should be always on */
+ SEC_PD(S7_ETH, GENPD_FLAG_ALWAYS_ON),
+ SEC_PD(S7_AUCPU, 0),
+ SEC_PD(S7_AUDIO, 0),
+};
+
+static const struct meson_secure_pwrc_domain_desc s7d_pwrc_domains[] = {
+ SEC_PD(S7D_DOS_HCODEC, 0),
+ SEC_PD(S7D_DOS_HEVC, 0),
+ SEC_PD(S7D_DOS_VDEC, 0),
+ SEC_PD(S7D_VPU_HDMI, 0),
+ SEC_PD(S7D_USB_U2DRD, 0),
+ SEC_PD(S7D_USB_U2H, 0),
+ SEC_PD(S7D_SSD_EMMC_C, 0),
+ SEC_PD(S7D_GE2D, 0),
+ SEC_PD(S7D_AMFC, 0),
+ SEC_PD(S7D_EMMC_A, 0),
+ SEC_PD(S7D_EMMC_B, 0),
+ /* ETH is for ethernet online wakeup, and should be always on */
+ SEC_PD(S7D_ETH, GENPD_FLAG_ALWAYS_ON),
+ SEC_PD(S7D_AUCPU, 0),
+ SEC_PD(S7D_AUDIO, 0),
+ /* SRAMA is used as ATF runtime memory, and should be always on */
+ SEC_PD(S7D_SRAMA, GENPD_FLAG_ALWAYS_ON),
+ /* DMC0 is for DDR PHY ana/dig and DMC, and should be always on */
+ SEC_PD(S7D_DMC0, GENPD_FLAG_ALWAYS_ON),
+ /* DMC1 is for DDR PHY ana/dig and DMC, and should be always on */
+ SEC_PD(S7D_DMC1, GENPD_FLAG_ALWAYS_ON),
+ /* DDR should be always on */
+ SEC_PD(S7D_DDR, GENPD_FLAG_ALWAYS_ON),
+};
+
static const struct meson_secure_pwrc_domain_desc t7_pwrc_domains[] = {
SEC_PD(T7_DSPA, 0),
SEC_PD(T7_DSPB, 0),
@@ -367,6 +435,21 @@ static const struct meson_secure_pwrc_domain_data meson_secure_s4_pwrc_data = {
.count = ARRAY_SIZE(s4_pwrc_domains),
};
+static const struct meson_secure_pwrc_domain_data amlogic_secure_s6_pwrc_data = {
+ .domains = s6_pwrc_domains,
+ .count = ARRAY_SIZE(s6_pwrc_domains),
+};
+
+static const struct meson_secure_pwrc_domain_data amlogic_secure_s7_pwrc_data = {
+ .domains = s7_pwrc_domains,
+ .count = ARRAY_SIZE(s7_pwrc_domains),
+};
+
+static const struct meson_secure_pwrc_domain_data amlogic_secure_s7d_pwrc_data = {
+ .domains = s7d_pwrc_domains,
+ .count = ARRAY_SIZE(s7d_pwrc_domains),
+};
+
static const struct meson_secure_pwrc_domain_data amlogic_secure_t7_pwrc_data = {
.domains = t7_pwrc_domains,
.count = ARRAY_SIZE(t7_pwrc_domains),
@@ -394,6 +477,18 @@ static const struct of_device_id meson_secure_pwrc_match_table[] = {
.data = &meson_secure_s4_pwrc_data,
},
{
+ .compatible = "amlogic,s6-pwrc",
+ .data = &amlogic_secure_s6_pwrc_data,
+ },
+ {
+ .compatible = "amlogic,s7-pwrc",
+ .data = &amlogic_secure_s7_pwrc_data,
+ },
+ {
+ .compatible = "amlogic,s7d-pwrc",
+ .data = &amlogic_secure_s7d_pwrc_data,
+ },
+ {
.compatible = "amlogic,t7-pwrc",
.data = &amlogic_secure_t7_pwrc_data,
},
diff --git a/drivers/pmdomain/apple/pmgr-pwrstate.c b/drivers/pmdomain/apple/pmgr-pwrstate.c
index 9467235110f4..82c33cf727a8 100644
--- a/drivers/pmdomain/apple/pmgr-pwrstate.c
+++ b/drivers/pmdomain/apple/pmgr-pwrstate.c
@@ -306,6 +306,7 @@ err_remove:
}
static const struct of_device_id apple_pmgr_ps_of_match[] = {
+ { .compatible = "apple,t8103-pmgr-pwrstate" },
{ .compatible = "apple,pmgr-pwrstate" },
{}
};
diff --git a/drivers/pmdomain/core.c b/drivers/pmdomain/core.c
index 0006ab3d0789..61c2277c9ce3 100644
--- a/drivers/pmdomain/core.c
+++ b/drivers/pmdomain/core.c
@@ -187,6 +187,7 @@ static const struct genpd_lock_ops genpd_raw_spin_ops = {
#define genpd_is_opp_table_fw(genpd) (genpd->flags & GENPD_FLAG_OPP_TABLE_FW)
#define genpd_is_dev_name_fw(genpd) (genpd->flags & GENPD_FLAG_DEV_NAME_FW)
#define genpd_is_no_sync_state(genpd) (genpd->flags & GENPD_FLAG_NO_SYNC_STATE)
+#define genpd_is_no_stay_on(genpd) (genpd->flags & GENPD_FLAG_NO_STAY_ON)
static inline bool irq_safe_dev_in_sleep_domain(struct device *dev,
const struct generic_pm_domain *genpd)
@@ -1357,7 +1358,6 @@ err_poweroff:
return ret;
}
-#ifndef CONFIG_PM_GENERIC_DOMAINS_OF
static bool pd_ignore_unused;
static int __init pd_ignore_unused_setup(char *__unused)
{
@@ -1382,9 +1382,6 @@ static int __init genpd_power_off_unused(void)
mutex_lock(&gpd_list_lock);
list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
- genpd_lock(genpd);
- genpd->stay_on = false;
- genpd_unlock(genpd);
genpd_queue_power_off_work(genpd);
}
@@ -1393,7 +1390,6 @@ static int __init genpd_power_off_unused(void)
return 0;
}
late_initcall_sync(genpd_power_off_unused);
-#endif
#ifdef CONFIG_PM_SLEEP
@@ -2367,6 +2363,18 @@ static void genpd_lock_init(struct generic_pm_domain *genpd)
}
}
+#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
+static void genpd_set_stay_on(struct generic_pm_domain *genpd, bool is_off)
+{
+ genpd->stay_on = !genpd_is_no_stay_on(genpd) && !is_off;
+}
+#else
+static void genpd_set_stay_on(struct generic_pm_domain *genpd, bool is_off)
+{
+ genpd->stay_on = false;
+}
+#endif
+
/**
* pm_genpd_init - Initialize a generic I/O PM domain object.
* @genpd: PM domain object to initialize.
@@ -2392,7 +2400,7 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
atomic_set(&genpd->sd_count, 0);
genpd->status = is_off ? GENPD_STATE_OFF : GENPD_STATE_ON;
- genpd->stay_on = !is_off;
+ genpd_set_stay_on(genpd, is_off);
genpd->sync_state = GENPD_SYNC_STATE_OFF;
genpd->device_count = 0;
genpd->provider = NULL;
diff --git a/drivers/pmdomain/imx/gpc.c b/drivers/pmdomain/imx/gpc.c
index f18c7e6e75dd..33991f3c6b55 100644
--- a/drivers/pmdomain/imx/gpc.c
+++ b/drivers/pmdomain/imx/gpc.c
@@ -343,7 +343,6 @@ static const struct regmap_config imx_gpc_regmap_config = {
.rd_table = &access_table,
.wr_table = &access_table,
.max_register = 0x2ac,
- .fast_io = true,
};
static struct generic_pm_domain *imx_gpc_onecell_domains[] = {
diff --git a/drivers/pmdomain/imx/imx93-blk-ctrl.c b/drivers/pmdomain/imx/imx93-blk-ctrl.c
index 0e2ba8ec55d7..e094fe5a42bf 100644
--- a/drivers/pmdomain/imx/imx93-blk-ctrl.c
+++ b/drivers/pmdomain/imx/imx93-blk-ctrl.c
@@ -86,6 +86,7 @@ struct imx93_blk_ctrl_domain {
struct imx93_blk_ctrl_data {
const struct imx93_blk_ctrl_domain_data *domains;
+ u32 skip_mask;
int num_domains;
const char * const *clk_names;
int num_clks;
@@ -250,6 +251,8 @@ static int imx93_blk_ctrl_probe(struct platform_device *pdev)
int j;
domain->data = data;
+ if (bc_data->skip_mask & BIT(i))
+ continue;
for (j = 0; j < data->num_clks; j++)
domain->clks[j].id = data->clk_names[j];
@@ -418,16 +421,32 @@ static const struct regmap_access_table imx93_media_blk_ctl_access_table = {
.n_yes_ranges = ARRAY_SIZE(imx93_media_blk_ctl_yes_ranges),
};
+static const char * const media_blk_clk_names[] = {
+ "axi", "apb", "nic"
+};
+
+static const struct imx93_blk_ctrl_data imx91_media_blk_ctl_dev_data = {
+ .domains = imx93_media_blk_ctl_domain_data,
+ .skip_mask = BIT(IMX93_MEDIABLK_PD_MIPI_DSI) | BIT(IMX93_MEDIABLK_PD_PXP),
+ .num_domains = ARRAY_SIZE(imx93_media_blk_ctl_domain_data),
+ .clk_names = media_blk_clk_names,
+ .num_clks = ARRAY_SIZE(media_blk_clk_names),
+ .reg_access_table = &imx93_media_blk_ctl_access_table,
+};
+
static const struct imx93_blk_ctrl_data imx93_media_blk_ctl_dev_data = {
.domains = imx93_media_blk_ctl_domain_data,
.num_domains = ARRAY_SIZE(imx93_media_blk_ctl_domain_data),
- .clk_names = (const char *[]){ "axi", "apb", "nic", },
- .num_clks = 3,
+ .clk_names = media_blk_clk_names,
+ .num_clks = ARRAY_SIZE(media_blk_clk_names),
.reg_access_table = &imx93_media_blk_ctl_access_table,
};
static const struct of_device_id imx93_blk_ctrl_of_match[] = {
{
+ .compatible = "fsl,imx91-media-blk-ctrl",
+ .data = &imx91_media_blk_ctl_dev_data
+ }, {
.compatible = "fsl,imx93-media-blk-ctrl",
.data = &imx93_media_blk_ctl_dev_data
}, {
diff --git a/drivers/pmdomain/marvell/Kconfig b/drivers/pmdomain/marvell/Kconfig
new file mode 100644
index 000000000000..6c4084c82667
--- /dev/null
+++ b/drivers/pmdomain/marvell/Kconfig
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+menu "Marvell PM Domains"
+ depends on ARCH_MMP || COMPILE_TEST
+
+config PXA1908_PM_DOMAINS
+ tristate "Marvell PXA1908 power domains"
+ depends on OF
+ depends on PM
+ default y if ARCH_MMP && ARM64
+ select AUXILIARY_BUS
+ select MFD_SYSCON
+ select PM_GENERIC_DOMAINS
+ select PM_GENERIC_DOMAINS_OF
+ help
+ Say Y here to enable support for Marvell PXA1908's power domanis.
+
+endmenu
diff --git a/drivers/pmdomain/marvell/Makefile b/drivers/pmdomain/marvell/Makefile
new file mode 100644
index 000000000000..22c25013f6c8
--- /dev/null
+++ b/drivers/pmdomain/marvell/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_PXA1908_PM_DOMAINS) += pxa1908-power-controller.o
diff --git a/drivers/pmdomain/marvell/pxa1908-power-controller.c b/drivers/pmdomain/marvell/pxa1908-power-controller.c
new file mode 100644
index 000000000000..ff5e6e82d3f8
--- /dev/null
+++ b/drivers/pmdomain/marvell/pxa1908-power-controller.c
@@ -0,0 +1,274 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2025 Duje Mihanović <duje@dujemihanovic.xyz>
+ */
+
+#include <linux/auxiliary_bus.h>
+#include <linux/container_of.h>
+#include <linux/dev_printk.h>
+#include <linux/device.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/pm_domain.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/power/marvell,pxa1908-power.h>
+
+/* VPU, GPU, ISP */
+#define APMU_PWR_CTRL_REG 0xd8
+#define APMU_PWR_BLK_TMR_REG 0xdc
+#define APMU_PWR_STATUS_REG 0xf0
+
+/* DSI */
+#define APMU_DEBUG 0x88
+#define DSI_PHY_DVM_MASK BIT(31)
+
+#define POWER_ON_LATENCY_US 300
+#define POWER_OFF_LATENCY_US 20
+#define POWER_POLL_TIMEOUT_US (25 * USEC_PER_MSEC)
+#define POWER_POLL_SLEEP_US 6
+
+#define NR_DOMAINS 5
+
+#define to_pxa1908_pd(_genpd) container_of(_genpd, struct pxa1908_pd, genpd)
+
+struct pxa1908_pd_ctrl {
+ struct generic_pm_domain *domains[NR_DOMAINS];
+ struct genpd_onecell_data onecell_data;
+ struct regmap *base;
+ struct device *dev;
+};
+
+struct pxa1908_pd_data {
+ u32 reg_clk_res_ctrl;
+ u32 pwr_state;
+ u32 hw_mode;
+ bool keep_on;
+ int id;
+};
+
+struct pxa1908_pd {
+ const struct pxa1908_pd_data data;
+ struct pxa1908_pd_ctrl *ctrl;
+ struct generic_pm_domain genpd;
+ bool initialized;
+};
+
+static inline bool pxa1908_pd_is_on(struct pxa1908_pd *pd)
+{
+ struct pxa1908_pd_ctrl *ctrl = pd->ctrl;
+
+ return pd->data.id != PXA1908_POWER_DOMAIN_DSI
+ ? regmap_test_bits(ctrl->base, APMU_PWR_STATUS_REG, pd->data.pwr_state)
+ : regmap_test_bits(ctrl->base, APMU_DEBUG, DSI_PHY_DVM_MASK);
+}
+
+static int pxa1908_pd_power_on(struct generic_pm_domain *genpd)
+{
+ struct pxa1908_pd *pd = to_pxa1908_pd(genpd);
+ const struct pxa1908_pd_data *data = &pd->data;
+ struct pxa1908_pd_ctrl *ctrl = pd->ctrl;
+ unsigned int status;
+ int ret = 0;
+
+ regmap_set_bits(ctrl->base, data->reg_clk_res_ctrl, data->hw_mode);
+ if (data->id != PXA1908_POWER_DOMAIN_ISP)
+ regmap_write(ctrl->base, APMU_PWR_BLK_TMR_REG, 0x20001fff);
+ regmap_set_bits(ctrl->base, APMU_PWR_CTRL_REG, data->pwr_state);
+
+ ret = regmap_read_poll_timeout(ctrl->base, APMU_PWR_STATUS_REG, status,
+ status & data->pwr_state, POWER_POLL_SLEEP_US,
+ POWER_ON_LATENCY_US + POWER_POLL_TIMEOUT_US);
+ if (ret == -ETIMEDOUT)
+ dev_err(ctrl->dev, "timed out powering on domain '%s'\n", pd->genpd.name);
+
+ return ret;
+}
+
+static int pxa1908_pd_power_off(struct generic_pm_domain *genpd)
+{
+ struct pxa1908_pd *pd = to_pxa1908_pd(genpd);
+ const struct pxa1908_pd_data *data = &pd->data;
+ struct pxa1908_pd_ctrl *ctrl = pd->ctrl;
+ unsigned int status;
+ int ret;
+
+ regmap_clear_bits(ctrl->base, APMU_PWR_CTRL_REG, data->pwr_state);
+
+ ret = regmap_read_poll_timeout(ctrl->base, APMU_PWR_STATUS_REG, status,
+ !(status & data->pwr_state), POWER_POLL_SLEEP_US,
+ POWER_OFF_LATENCY_US + POWER_POLL_TIMEOUT_US);
+ if (ret == -ETIMEDOUT) {
+ dev_err(ctrl->dev, "timed out powering off domain '%s'\n", pd->genpd.name);
+ return ret;
+ }
+
+ return regmap_clear_bits(ctrl->base, data->reg_clk_res_ctrl, data->hw_mode);
+}
+
+static inline int pxa1908_dsi_power_on(struct generic_pm_domain *genpd)
+{
+ struct pxa1908_pd *pd = to_pxa1908_pd(genpd);
+ struct pxa1908_pd_ctrl *ctrl = pd->ctrl;
+
+ return regmap_set_bits(ctrl->base, APMU_DEBUG, DSI_PHY_DVM_MASK);
+}
+
+static inline int pxa1908_dsi_power_off(struct generic_pm_domain *genpd)
+{
+ struct pxa1908_pd *pd = to_pxa1908_pd(genpd);
+ struct pxa1908_pd_ctrl *ctrl = pd->ctrl;
+
+ return regmap_clear_bits(ctrl->base, APMU_DEBUG, DSI_PHY_DVM_MASK);
+}
+
+#define DOMAIN(_id, _name, ctrl, mode, state) \
+ [_id] = { \
+ .data = { \
+ .reg_clk_res_ctrl = ctrl, \
+ .hw_mode = BIT(mode), \
+ .pwr_state = BIT(state), \
+ .id = _id, \
+ }, \
+ .genpd = { \
+ .name = _name, \
+ .power_on = pxa1908_pd_power_on, \
+ .power_off = pxa1908_pd_power_off, \
+ }, \
+ }
+
+static struct pxa1908_pd domains[NR_DOMAINS] = {
+ DOMAIN(PXA1908_POWER_DOMAIN_VPU, "vpu", 0xa4, 19, 2),
+ DOMAIN(PXA1908_POWER_DOMAIN_GPU, "gpu", 0xcc, 11, 0),
+ DOMAIN(PXA1908_POWER_DOMAIN_GPU2D, "gpu2d", 0xf4, 11, 6),
+ DOMAIN(PXA1908_POWER_DOMAIN_ISP, "isp", 0x38, 15, 4),
+ [PXA1908_POWER_DOMAIN_DSI] = {
+ .genpd = {
+ .name = "dsi",
+ .power_on = pxa1908_dsi_power_on,
+ .power_off = pxa1908_dsi_power_off,
+ /*
+ * TODO: There is no DSI driver written yet and until then we probably
+ * don't want to power off the DSI PHY ever.
+ */
+ .flags = GENPD_FLAG_ALWAYS_ON,
+ },
+ .data = {
+ /* See above. */
+ .keep_on = true,
+ },
+ },
+};
+
+static void pxa1908_pd_remove(struct auxiliary_device *auxdev)
+{
+ struct pxa1908_pd *pd;
+ int ret;
+
+ for (int i = NR_DOMAINS - 1; i >= 0; i--) {
+ pd = &domains[i];
+
+ if (!pd->initialized)
+ continue;
+
+ if (pxa1908_pd_is_on(pd) && !pd->data.keep_on)
+ pxa1908_pd_power_off(&pd->genpd);
+
+ ret = pm_genpd_remove(&pd->genpd);
+ if (ret)
+ dev_err(&auxdev->dev, "failed to remove domain '%s': %d\n",
+ pd->genpd.name, ret);
+ }
+}
+
+static int
+pxa1908_pd_init(struct pxa1908_pd_ctrl *ctrl, int id, struct device *dev)
+{
+ struct pxa1908_pd *pd = &domains[id];
+ int ret;
+
+ ctrl->domains[id] = &pd->genpd;
+
+ pd->ctrl = ctrl;
+
+ /* Make sure the state of the hardware is synced with the domain table above. */
+ if (pd->data.keep_on) {
+ ret = pd->genpd.power_on(&pd->genpd);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to power on domain '%s'\n",
+ pd->genpd.name);
+ } else {
+ if (pxa1908_pd_is_on(pd)) {
+ dev_warn(dev,
+ "domain '%s' is on despite being default off; powering off\n",
+ pd->genpd.name);
+
+ ret = pd->genpd.power_off(&pd->genpd);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to power off domain '%s'\n",
+ pd->genpd.name);
+ }
+ }
+
+ ret = pm_genpd_init(&pd->genpd, NULL, !pd->data.keep_on);
+ if (ret)
+ return dev_err_probe(dev, ret, "domain '%s' failed to initialize\n",
+ pd->genpd.name);
+
+ pd->initialized = true;
+
+ return 0;
+}
+
+static int
+pxa1908_pd_probe(struct auxiliary_device *auxdev, const struct auxiliary_device_id *aux_id)
+{
+ struct pxa1908_pd_ctrl *ctrl;
+ struct device *dev = &auxdev->dev;
+ int ret;
+
+ ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ return -ENOMEM;
+
+ auxiliary_set_drvdata(auxdev, ctrl);
+
+ ctrl->base = syscon_node_to_regmap(dev->parent->of_node);
+ if (IS_ERR(ctrl->base))
+ return dev_err_probe(dev, PTR_ERR(ctrl->base), "no regmap available\n");
+
+ ctrl->dev = dev;
+ ctrl->onecell_data.domains = ctrl->domains;
+ ctrl->onecell_data.num_domains = NR_DOMAINS;
+
+ for (int i = 0; i < NR_DOMAINS; i++) {
+ ret = pxa1908_pd_init(ctrl, i, dev);
+ if (ret)
+ goto err;
+ }
+
+ return of_genpd_add_provider_onecell(dev->parent->of_node, &ctrl->onecell_data);
+
+err:
+ pxa1908_pd_remove(auxdev);
+ return ret;
+}
+
+static const struct auxiliary_device_id pxa1908_pd_id[] = {
+ { .name = "clk_pxa1908_apmu.power" },
+ { }
+};
+MODULE_DEVICE_TABLE(auxiliary, pxa1908_pd_id);
+
+static struct auxiliary_driver pxa1908_pd_driver = {
+ .probe = pxa1908_pd_probe,
+ .remove = pxa1908_pd_remove,
+ .id_table = pxa1908_pd_id,
+};
+module_auxiliary_driver(pxa1908_pd_driver);
+
+MODULE_AUTHOR("Duje Mihanović <duje@dujemihanovic.xyz>");
+MODULE_DESCRIPTION("Marvell PXA1908 power domain driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pmdomain/mediatek/airoha-cpu-pmdomain.c b/drivers/pmdomain/mediatek/airoha-cpu-pmdomain.c
index 0fd88d2f9ac2..3b1d202f89dc 100644
--- a/drivers/pmdomain/mediatek/airoha-cpu-pmdomain.c
+++ b/drivers/pmdomain/mediatek/airoha-cpu-pmdomain.c
@@ -21,10 +21,10 @@ struct airoha_cpu_pmdomain_priv {
struct generic_pm_domain pd;
};
-static long airoha_cpu_pmdomain_clk_round(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int airoha_cpu_pmdomain_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- return rate;
+ return 0;
}
static unsigned long airoha_cpu_pmdomain_clk_get(struct clk_hw *hw,
@@ -48,7 +48,7 @@ static int airoha_cpu_pmdomain_clk_is_enabled(struct clk_hw *hw)
static const struct clk_ops airoha_cpu_pmdomain_clk_ops = {
.recalc_rate = airoha_cpu_pmdomain_clk_get,
.is_enabled = airoha_cpu_pmdomain_clk_is_enabled,
- .round_rate = airoha_cpu_pmdomain_clk_round,
+ .determine_rate = airoha_cpu_pmdomain_clk_determine_rate,
};
static int airoha_cpu_pmdomain_set_performance_state(struct generic_pm_domain *domain,
diff --git a/drivers/pmdomain/mediatek/mt6795-pm-domains.h b/drivers/pmdomain/mediatek/mt6795-pm-domains.h
index a3f7785b04bd..dc8e9f8877ad 100644
--- a/drivers/pmdomain/mediatek/mt6795-pm-domains.h
+++ b/drivers/pmdomain/mediatek/mt6795-pm-domains.h
@@ -9,6 +9,9 @@
/*
* MT6795 power domain support
*/
+static enum scpsys_bus_prot_block scpsys_bus_prot_blocks_mt6795[] = {
+ BUS_PROT_BLOCK_INFRA
+};
static const struct scpsys_domain_data scpsys_domain_data_mt6795[] = {
[MT6795_POWER_DOMAIN_VDEC] = {
@@ -107,6 +110,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt6795[] = {
static const struct scpsys_soc_data mt6795_scpsys_data = {
.domains_data = scpsys_domain_data_mt6795,
.num_domains = ARRAY_SIZE(scpsys_domain_data_mt6795),
+ .bus_prot_blocks = scpsys_bus_prot_blocks_mt6795,
+ .num_bus_prot_blocks = ARRAY_SIZE(scpsys_bus_prot_blocks_mt6795),
};
#endif /* __SOC_MEDIATEK_MT6795_PM_DOMAINS_H */
diff --git a/drivers/pmdomain/mediatek/mt8167-pm-domains.h b/drivers/pmdomain/mediatek/mt8167-pm-domains.h
index 8a0e898b79ab..f6ee48a711a1 100644
--- a/drivers/pmdomain/mediatek/mt8167-pm-domains.h
+++ b/drivers/pmdomain/mediatek/mt8167-pm-domains.h
@@ -12,6 +12,9 @@
/*
* MT8167 power domain support
*/
+static enum scpsys_bus_prot_block scpsys_bus_prot_blocks_mt8167[] = {
+ BUS_PROT_BLOCK_INFRA
+};
static const struct scpsys_domain_data scpsys_domain_data_mt8167[] = {
[MT8167_POWER_DOMAIN_MM] = {
@@ -99,6 +102,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8167[] = {
static const struct scpsys_soc_data mt8167_scpsys_data = {
.domains_data = scpsys_domain_data_mt8167,
.num_domains = ARRAY_SIZE(scpsys_domain_data_mt8167),
+ .bus_prot_blocks = scpsys_bus_prot_blocks_mt8167,
+ .num_bus_prot_blocks = ARRAY_SIZE(scpsys_bus_prot_blocks_mt8167),
};
#endif /* __SOC_MEDIATEK_MT8167_PM_DOMAINS_H */
diff --git a/drivers/pmdomain/mediatek/mt8173-pm-domains.h b/drivers/pmdomain/mediatek/mt8173-pm-domains.h
index 7be0f47f5214..561a644b5d1c 100644
--- a/drivers/pmdomain/mediatek/mt8173-pm-domains.h
+++ b/drivers/pmdomain/mediatek/mt8173-pm-domains.h
@@ -9,6 +9,9 @@
/*
* MT8173 power domain support
*/
+static enum scpsys_bus_prot_block scpsys_bus_prot_blocks_mt8173[] = {
+ BUS_PROT_BLOCK_INFRA
+};
static const struct scpsys_domain_data scpsys_domain_data_mt8173[] = {
[MT8173_POWER_DOMAIN_VDEC] = {
@@ -118,6 +121,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8173[] = {
static const struct scpsys_soc_data mt8173_scpsys_data = {
.domains_data = scpsys_domain_data_mt8173,
.num_domains = ARRAY_SIZE(scpsys_domain_data_mt8173),
+ .bus_prot_blocks = scpsys_bus_prot_blocks_mt8173,
+ .num_bus_prot_blocks = ARRAY_SIZE(scpsys_bus_prot_blocks_mt8173),
};
#endif /* __SOC_MEDIATEK_MT8173_PM_DOMAINS_H */
diff --git a/drivers/pmdomain/mediatek/mt8183-pm-domains.h b/drivers/pmdomain/mediatek/mt8183-pm-domains.h
index c4c1b63d85b1..3742782a2702 100644
--- a/drivers/pmdomain/mediatek/mt8183-pm-domains.h
+++ b/drivers/pmdomain/mediatek/mt8183-pm-domains.h
@@ -9,6 +9,9 @@
/*
* MT8183 power domain support
*/
+static enum scpsys_bus_prot_block scpsys_bus_prot_blocks_mt8183[] = {
+ BUS_PROT_BLOCK_INFRA, BUS_PROT_BLOCK_SMI
+};
static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
[MT8183_POWER_DOMAIN_AUDIO] = {
@@ -290,6 +293,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
static const struct scpsys_soc_data mt8183_scpsys_data = {
.domains_data = scpsys_domain_data_mt8183,
.num_domains = ARRAY_SIZE(scpsys_domain_data_mt8183),
+ .bus_prot_blocks = scpsys_bus_prot_blocks_mt8183,
+ .num_bus_prot_blocks = ARRAY_SIZE(scpsys_bus_prot_blocks_mt8183),
};
#endif /* __SOC_MEDIATEK_MT8183_PM_DOMAINS_H */
diff --git a/drivers/pmdomain/mediatek/mt8186-pm-domains.h b/drivers/pmdomain/mediatek/mt8186-pm-domains.h
index cbac715c38fa..00b9861af7c9 100644
--- a/drivers/pmdomain/mediatek/mt8186-pm-domains.h
+++ b/drivers/pmdomain/mediatek/mt8186-pm-domains.h
@@ -13,6 +13,9 @@
/*
* MT8186 power domain support
*/
+static enum scpsys_bus_prot_block scpsys_bus_prot_blocks_mt8186[] = {
+ BUS_PROT_BLOCK_INFRA
+};
static const struct scpsys_domain_data scpsys_domain_data_mt8186[] = {
[MT8186_POWER_DOMAIN_MFG0] = {
@@ -361,6 +364,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8186[] = {
static const struct scpsys_soc_data mt8186_scpsys_data = {
.domains_data = scpsys_domain_data_mt8186,
.num_domains = ARRAY_SIZE(scpsys_domain_data_mt8186),
+ .bus_prot_blocks = scpsys_bus_prot_blocks_mt8186,
+ .num_bus_prot_blocks = ARRAY_SIZE(scpsys_bus_prot_blocks_mt8186),
};
#endif /* __SOC_MEDIATEK_MT8186_PM_DOMAINS_H */
diff --git a/drivers/pmdomain/mediatek/mt8188-pm-domains.h b/drivers/pmdomain/mediatek/mt8188-pm-domains.h
index 007235be9efe..3a989e83e9b7 100644
--- a/drivers/pmdomain/mediatek/mt8188-pm-domains.h
+++ b/drivers/pmdomain/mediatek/mt8188-pm-domains.h
@@ -14,6 +14,10 @@
* MT8188 power domain support
*/
+static enum scpsys_bus_prot_block scpsys_bus_prot_blocks_mt8188[] = {
+ BUS_PROT_BLOCK_INFRA
+};
+
static const struct scpsys_domain_data scpsys_domain_data_mt8188[] = {
[MT8188_POWER_DOMAIN_MFG0] = {
.name = "mfg0",
@@ -685,6 +689,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8188[] = {
static const struct scpsys_soc_data mt8188_scpsys_data = {
.domains_data = scpsys_domain_data_mt8188,
.num_domains = ARRAY_SIZE(scpsys_domain_data_mt8188),
+ .bus_prot_blocks = scpsys_bus_prot_blocks_mt8188,
+ .num_bus_prot_blocks = ARRAY_SIZE(scpsys_bus_prot_blocks_mt8188),
};
#endif /* __SOC_MEDIATEK_MT8188_PM_DOMAINS_H */
diff --git a/drivers/pmdomain/mediatek/mt8192-pm-domains.h b/drivers/pmdomain/mediatek/mt8192-pm-domains.h
index 6f139eed3769..5d62fac5f682 100644
--- a/drivers/pmdomain/mediatek/mt8192-pm-domains.h
+++ b/drivers/pmdomain/mediatek/mt8192-pm-domains.h
@@ -9,6 +9,9 @@
/*
* MT8192 power domain support
*/
+static enum scpsys_bus_prot_block scpsys_bus_prot_blocks_mt8192[] = {
+ BUS_PROT_BLOCK_INFRA
+};
static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
[MT8192_POWER_DOMAIN_AUDIO] = {
@@ -380,6 +383,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
static const struct scpsys_soc_data mt8192_scpsys_data = {
.domains_data = scpsys_domain_data_mt8192,
.num_domains = ARRAY_SIZE(scpsys_domain_data_mt8192),
+ .bus_prot_blocks = scpsys_bus_prot_blocks_mt8192,
+ .num_bus_prot_blocks = ARRAY_SIZE(scpsys_bus_prot_blocks_mt8192),
};
#endif /* __SOC_MEDIATEK_MT8192_PM_DOMAINS_H */
diff --git a/drivers/pmdomain/mediatek/mt8195-pm-domains.h b/drivers/pmdomain/mediatek/mt8195-pm-domains.h
index 59aa031ae632..1d3ca195ac75 100644
--- a/drivers/pmdomain/mediatek/mt8195-pm-domains.h
+++ b/drivers/pmdomain/mediatek/mt8195-pm-domains.h
@@ -13,6 +13,9 @@
/*
* MT8195 power domain support
*/
+static enum scpsys_bus_prot_block scpsys_bus_prot_blocks_mt8195[] = {
+ BUS_PROT_BLOCK_INFRA
+};
static const struct scpsys_domain_data scpsys_domain_data_mt8195[] = {
[MT8195_POWER_DOMAIN_PCIE_MAC_P0] = {
@@ -123,6 +126,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8195[] = {
MT8195_TOP_AXI_PROT_EN_2_CLR,
MT8195_TOP_AXI_PROT_EN_2_STA1),
},
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF | MTK_SCPD_ACTIVE_WAKEUP,
},
[MT8195_POWER_DOMAIN_MFG0] = {
.name = "mfg0",
@@ -661,6 +665,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8195[] = {
static const struct scpsys_soc_data mt8195_scpsys_data = {
.domains_data = scpsys_domain_data_mt8195,
.num_domains = ARRAY_SIZE(scpsys_domain_data_mt8195),
+ .bus_prot_blocks = scpsys_bus_prot_blocks_mt8195,
+ .num_bus_prot_blocks = ARRAY_SIZE(scpsys_bus_prot_blocks_mt8195),
};
#endif /* __SOC_MEDIATEK_MT8195_PM_DOMAINS_H */
diff --git a/drivers/pmdomain/mediatek/mt8365-pm-domains.h b/drivers/pmdomain/mediatek/mt8365-pm-domains.h
index 3d83d49eaa7c..33265ab8ce76 100644
--- a/drivers/pmdomain/mediatek/mt8365-pm-domains.h
+++ b/drivers/pmdomain/mediatek/mt8365-pm-domains.h
@@ -29,11 +29,13 @@
MT8365_SMI_COMMON_CLAMP_EN)
#define MT8365_BUS_PROT_WAY_EN(_set_mask, _set, _sta_mask, _sta) \
- _BUS_PROT(_set_mask, _set, _set, _sta_mask, _sta, \
- BUS_PROT_COMPONENT_INFRA | \
- BUS_PROT_STA_COMPONENT_INFRA_NAO | \
- BUS_PROT_INVERTED | \
- BUS_PROT_REG_UPDATE)
+ _BUS_PROT_STA(INFRA, INFRA_NAO, _set_mask, _set, _set, \
+ _sta_mask, _sta, \
+ BUS_PROT_INVERTED | BUS_PROT_REG_UPDATE)
+
+static enum scpsys_bus_prot_block scpsys_bus_prot_blocks_mt8365[] = {
+ BUS_PROT_BLOCK_INFRA, BUS_PROT_BLOCK_INFRA_NAO, BUS_PROT_BLOCK_SMI
+};
static const struct scpsys_domain_data scpsys_domain_data_mt8365[] = {
[MT8365_POWER_DOMAIN_MM] = {
@@ -192,6 +194,8 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8365[] = {
static const struct scpsys_soc_data mt8365_scpsys_data = {
.domains_data = scpsys_domain_data_mt8365,
.num_domains = ARRAY_SIZE(scpsys_domain_data_mt8365),
+ .bus_prot_blocks = scpsys_bus_prot_blocks_mt8365,
+ .num_bus_prot_blocks = ARRAY_SIZE(scpsys_bus_prot_blocks_mt8365),
};
#endif /* __SOC_MEDIATEK_MT8365_PM_DOMAINS_H */
diff --git a/drivers/pmdomain/mediatek/mtk-pm-domains.c b/drivers/pmdomain/mediatek/mtk-pm-domains.c
index a58ed7e2d9a4..0ebe7379b94e 100644
--- a/drivers/pmdomain/mediatek/mtk-pm-domains.c
+++ b/drivers/pmdomain/mediatek/mtk-pm-domains.c
@@ -39,6 +39,12 @@
#define PWR_SRAM_CLKISO_BIT BIT(5)
#define PWR_SRAM_ISOINT_B_BIT BIT(6)
+#define PWR_RTFF_SAVE BIT(24)
+#define PWR_RTFF_NRESTORE BIT(25)
+#define PWR_RTFF_CLK_DIS BIT(26)
+#define PWR_RTFF_SAVE_FLAG BIT(27)
+#define PWR_RTFF_UFS_CLK_DIS BIT(28)
+
struct scpsys_domain {
struct generic_pm_domain genpd;
const struct scpsys_domain_data *data;
@@ -47,9 +53,6 @@ struct scpsys_domain {
struct clk_bulk_data *clks;
int num_subsys_clks;
struct clk_bulk_data *subsys_clks;
- struct regmap *infracfg_nao;
- struct regmap *infracfg;
- struct regmap *smi;
struct regulator *supply;
};
@@ -57,6 +60,8 @@ struct scpsys {
struct device *dev;
struct regmap *base;
const struct scpsys_soc_data *soc_data;
+ u8 bus_prot_index[BUS_PROT_BLOCK_COUNT];
+ struct regmap **bus_prot;
struct genpd_onecell_data pd_data;
struct generic_pm_domain *domains[];
};
@@ -80,16 +85,23 @@ static bool scpsys_domain_is_on(struct scpsys_domain *pd)
static int scpsys_sram_enable(struct scpsys_domain *pd)
{
- u32 pdn_ack = pd->data->sram_pdn_ack_bits;
+ u32 expected_ack, pdn_ack = pd->data->sram_pdn_ack_bits;
struct scpsys *scpsys = pd->scpsys;
unsigned int tmp;
int ret;
- regmap_clear_bits(scpsys->base, pd->data->ctl_offs, pd->data->sram_pdn_bits);
+ if (MTK_SCPD_CAPS(pd, MTK_SCPD_SRAM_PDN_INVERTED)) {
+ regmap_set_bits(scpsys->base, pd->data->ctl_offs, pd->data->sram_pdn_bits);
+ expected_ack = pdn_ack;
+ } else {
+ regmap_clear_bits(scpsys->base, pd->data->ctl_offs, pd->data->sram_pdn_bits);
+ expected_ack = 0;
+ }
/* Either wait until SRAM_PDN_ACK all 1 or 0 */
ret = regmap_read_poll_timeout(scpsys->base, pd->data->ctl_offs, tmp,
- (tmp & pdn_ack) == 0, MTK_POLL_DELAY_US, MTK_POLL_TIMEOUT);
+ (tmp & pdn_ack) == expected_ack,
+ MTK_POLL_DELAY_US, MTK_POLL_TIMEOUT);
if (ret < 0)
return ret;
@@ -104,7 +116,7 @@ static int scpsys_sram_enable(struct scpsys_domain *pd)
static int scpsys_sram_disable(struct scpsys_domain *pd)
{
- u32 pdn_ack = pd->data->sram_pdn_ack_bits;
+ u32 expected_ack, pdn_ack = pd->data->sram_pdn_ack_bits;
struct scpsys *scpsys = pd->scpsys;
unsigned int tmp;
@@ -114,30 +126,36 @@ static int scpsys_sram_disable(struct scpsys_domain *pd)
regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_SRAM_ISOINT_B_BIT);
}
- regmap_set_bits(scpsys->base, pd->data->ctl_offs, pd->data->sram_pdn_bits);
+ if (MTK_SCPD_CAPS(pd, MTK_SCPD_SRAM_PDN_INVERTED)) {
+ regmap_clear_bits(scpsys->base, pd->data->ctl_offs, pd->data->sram_pdn_bits);
+ expected_ack = 0;
+ } else {
+ regmap_set_bits(scpsys->base, pd->data->ctl_offs, pd->data->sram_pdn_bits);
+ expected_ack = pdn_ack;
+ }
/* Either wait until SRAM_PDN_ACK all 1 or 0 */
return regmap_read_poll_timeout(scpsys->base, pd->data->ctl_offs, tmp,
- (tmp & pdn_ack) == pdn_ack, MTK_POLL_DELAY_US,
- MTK_POLL_TIMEOUT);
+ (tmp & pdn_ack) == expected_ack,
+ MTK_POLL_DELAY_US, MTK_POLL_TIMEOUT);
}
static struct regmap *scpsys_bus_protect_get_regmap(struct scpsys_domain *pd,
const struct scpsys_bus_prot_data *bpd)
{
- if (bpd->flags & BUS_PROT_COMPONENT_SMI)
- return pd->smi;
- else
- return pd->infracfg;
+ struct scpsys *scpsys = pd->scpsys;
+ unsigned short block_idx = scpsys->bus_prot_index[bpd->bus_prot_block];
+
+ return scpsys->bus_prot[block_idx];
}
static struct regmap *scpsys_bus_protect_get_sta_regmap(struct scpsys_domain *pd,
const struct scpsys_bus_prot_data *bpd)
{
- if (bpd->flags & BUS_PROT_STA_COMPONENT_INFRA_NAO)
- return pd->infracfg_nao;
- else
- return scpsys_bus_protect_get_regmap(pd, bpd);
+ struct scpsys *scpsys = pd->scpsys;
+ int block_idx = scpsys->bus_prot_index[bpd->bus_prot_sta_block];
+
+ return scpsys->bus_prot[block_idx];
}
static int scpsys_bus_protect_clear(struct scpsys_domain *pd,
@@ -149,7 +167,7 @@ static int scpsys_bus_protect_clear(struct scpsys_domain *pd,
u32 expected_ack;
u32 val;
- expected_ack = (bpd->flags & BUS_PROT_STA_COMPONENT_INFRA_NAO ? sta_mask : 0);
+ expected_ack = (bpd->bus_prot_sta_block == BUS_PROT_BLOCK_INFRA_NAO ? sta_mask : 0);
if (bpd->flags & BUS_PROT_REG_UPDATE)
regmap_clear_bits(regmap, bpd->bus_prot_clr, bpd->bus_prot_set_clr_mask);
@@ -232,11 +250,161 @@ static int scpsys_regulator_disable(struct regulator *supply)
return supply ? regulator_disable(supply) : 0;
}
+static int scpsys_ctl_pwrseq_on(struct scpsys_domain *pd)
+{
+ struct scpsys *scpsys = pd->scpsys;
+ bool do_rtff_nrestore, tmp;
+ int ret;
+
+ /* subsys power on */
+ regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_ON_BIT);
+ regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_ON_2ND_BIT);
+
+ /* wait until PWR_ACK = 1 */
+ ret = readx_poll_timeout(scpsys_domain_is_on, pd, tmp, tmp, MTK_POLL_DELAY_US,
+ MTK_POLL_TIMEOUT);
+ if (ret < 0)
+ return ret;
+
+ if (pd->data->rtff_type == SCPSYS_RTFF_TYPE_PCIE_PHY)
+ regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_CLK_DIS);
+
+ regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_CLK_DIS_BIT);
+ regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_ISO_BIT);
+
+ /* Wait for RTFF HW to sync buck isolation state if this is PCIe PHY RTFF */
+ if (pd->data->rtff_type == SCPSYS_RTFF_TYPE_PCIE_PHY)
+ udelay(5);
+
+ regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_RST_B_BIT);
+
+ /*
+ * RTFF HW state may be modified by secure world or remote processors.
+ *
+ * With the only exception of STOR_UFS, which always needs save/restore,
+ * check if this power domain's RTFF is already on before trying to do
+ * the NRESTORE procedure, otherwise the system will lock up.
+ */
+ switch (pd->data->rtff_type) {
+ case SCPSYS_RTFF_TYPE_GENERIC:
+ case SCPSYS_RTFF_TYPE_PCIE_PHY:
+ {
+ u32 ctl_status;
+
+ regmap_read(scpsys->base, pd->data->ctl_offs, &ctl_status);
+ do_rtff_nrestore = ctl_status & PWR_RTFF_SAVE_FLAG;
+ break;
+ }
+ case SCPSYS_RTFF_TYPE_STOR_UFS:
+ /* STOR_UFS always needs NRESTORE */
+ do_rtff_nrestore = true;
+ break;
+ default:
+ do_rtff_nrestore = false;
+ break;
+ }
+
+ /* Return early if RTFF NRESTORE shall not be done */
+ if (!do_rtff_nrestore)
+ return 0;
+
+ switch (pd->data->rtff_type) {
+ case SCPSYS_RTFF_TYPE_GENERIC:
+ regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_SAVE_FLAG);
+ regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_CLK_DIS);
+ regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_NRESTORE);
+ regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_NRESTORE);
+ regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_CLK_DIS);
+ break;
+ case SCPSYS_RTFF_TYPE_PCIE_PHY:
+ regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_SAVE_FLAG);
+ regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_NRESTORE);
+ regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_NRESTORE);
+ regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_CLK_DIS);
+ break;
+ case SCPSYS_RTFF_TYPE_STOR_UFS:
+ regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_UFS_CLK_DIS);
+ regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_NRESTORE);
+ regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_NRESTORE);
+ regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_UFS_CLK_DIS);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static void scpsys_ctl_pwrseq_off(struct scpsys_domain *pd)
+{
+ struct scpsys *scpsys = pd->scpsys;
+
+ switch (pd->data->rtff_type) {
+ case SCPSYS_RTFF_TYPE_GENERIC:
+ case SCPSYS_RTFF_TYPE_PCIE_PHY:
+ regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_CLK_DIS);
+ regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_SAVE);
+ regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_SAVE);
+ regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_CLK_DIS);
+ regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_SAVE_FLAG);
+ break;
+ case SCPSYS_RTFF_TYPE_STOR_UFS:
+ regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_UFS_CLK_DIS);
+ regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_SAVE);
+ regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_SAVE);
+ regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RTFF_UFS_CLK_DIS);
+ break;
+ default:
+ break;
+ }
+
+ /* subsys power off */
+ regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_ISO_BIT);
+
+ /* Wait for RTFF HW to sync buck isolation state if this is PCIe PHY RTFF */
+ if (pd->data->rtff_type == SCPSYS_RTFF_TYPE_PCIE_PHY)
+ udelay(1);
+
+ regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_CLK_DIS_BIT);
+ regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RST_B_BIT);
+ regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_ON_2ND_BIT);
+ regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_ON_BIT);
+}
+
+static int scpsys_modem_pwrseq_on(struct scpsys_domain *pd)
+{
+ struct scpsys *scpsys = pd->scpsys;
+ bool tmp;
+ int ret;
+
+ if (!MTK_SCPD_CAPS(pd, MTK_SCPD_SKIP_RESET_B))
+ regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_RST_B_BIT);
+
+ regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_ON_BIT);
+
+ /* wait until PWR_ACK = 1 */
+ ret = readx_poll_timeout(scpsys_domain_is_on, pd, tmp, tmp, MTK_POLL_DELAY_US,
+ MTK_POLL_TIMEOUT);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void scpsys_modem_pwrseq_off(struct scpsys_domain *pd)
+{
+ struct scpsys *scpsys = pd->scpsys;
+
+ regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_ON_BIT);
+
+ if (!MTK_SCPD_CAPS(pd, MTK_SCPD_SKIP_RESET_B))
+ regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RST_B_BIT);
+}
+
static int scpsys_power_on(struct generic_pm_domain *genpd)
{
struct scpsys_domain *pd = container_of(genpd, struct scpsys_domain, genpd);
struct scpsys *scpsys = pd->scpsys;
- bool tmp;
int ret;
ret = scpsys_regulator_enable(pd->supply);
@@ -251,20 +419,14 @@ static int scpsys_power_on(struct generic_pm_domain *genpd)
regmap_clear_bits(scpsys->base, pd->data->ext_buck_iso_offs,
pd->data->ext_buck_iso_mask);
- /* subsys power on */
- regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_ON_BIT);
- regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_ON_2ND_BIT);
+ if (MTK_SCPD_CAPS(pd, MTK_SCPD_MODEM_PWRSEQ))
+ ret = scpsys_modem_pwrseq_on(pd);
+ else
+ ret = scpsys_ctl_pwrseq_on(pd);
- /* wait until PWR_ACK = 1 */
- ret = readx_poll_timeout(scpsys_domain_is_on, pd, tmp, tmp, MTK_POLL_DELAY_US,
- MTK_POLL_TIMEOUT);
- if (ret < 0)
+ if (ret)
goto err_pwr_ack;
- regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_CLK_DIS_BIT);
- regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_ISO_BIT);
- regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_RST_B_BIT);
-
/*
* In few Mediatek platforms(e.g. MT6779), the bus protect policy is
* stricter, which leads to bus protect release must be prior to bus
@@ -330,12 +492,10 @@ static int scpsys_power_off(struct generic_pm_domain *genpd)
clk_bulk_disable_unprepare(pd->num_subsys_clks, pd->subsys_clks);
- /* subsys power off */
- regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_ISO_BIT);
- regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_CLK_DIS_BIT);
- regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RST_B_BIT);
- regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_ON_2ND_BIT);
- regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_ON_BIT);
+ if (MTK_SCPD_CAPS(pd, MTK_SCPD_MODEM_PWRSEQ))
+ scpsys_modem_pwrseq_off(pd);
+ else
+ scpsys_ctl_pwrseq_off(pd);
/* wait until PWR_ACK = 0 */
ret = readx_poll_timeout(scpsys_domain_is_on, pd, tmp, !tmp, MTK_POLL_DELAY_US,
@@ -355,7 +515,6 @@ generic_pm_domain *scpsys_add_one_domain(struct scpsys *scpsys, struct device_no
{
const struct scpsys_domain_data *domain_data;
struct scpsys_domain *pd;
- struct device_node *smi_node;
struct property *prop;
const char *clk_name;
int i, ret, num_clks;
@@ -396,32 +555,6 @@ generic_pm_domain *scpsys_add_one_domain(struct scpsys *scpsys, struct device_no
node);
}
- pd->infracfg = syscon_regmap_lookup_by_phandle_optional(node, "mediatek,infracfg");
- if (IS_ERR(pd->infracfg))
- return dev_err_cast_probe(scpsys->dev, pd->infracfg,
- "%pOF: failed to get infracfg regmap\n",
- node);
-
- smi_node = of_parse_phandle(node, "mediatek,smi", 0);
- if (smi_node) {
- pd->smi = device_node_to_regmap(smi_node);
- of_node_put(smi_node);
- if (IS_ERR(pd->smi))
- return dev_err_cast_probe(scpsys->dev, pd->smi,
- "%pOF: failed to get SMI regmap\n",
- node);
- }
-
- if (MTK_SCPD_CAPS(pd, MTK_SCPD_HAS_INFRA_NAO)) {
- pd->infracfg_nao = syscon_regmap_lookup_by_phandle(node, "mediatek,infracfg-nao");
- if (IS_ERR(pd->infracfg_nao))
- return dev_err_cast_probe(scpsys->dev, pd->infracfg_nao,
- "%pOF: failed to get infracfg-nao regmap\n",
- node);
- } else {
- pd->infracfg_nao = NULL;
- }
-
num_clks = of_clk_get_parent_count(node);
if (num_clks > 0) {
/* Calculate number of subsys_clks */
@@ -615,6 +748,136 @@ static void scpsys_domain_cleanup(struct scpsys *scpsys)
}
}
+static int scpsys_get_bus_protection_legacy(struct device *dev, struct scpsys *scpsys)
+{
+ const u8 bp_blocks[3] = {
+ BUS_PROT_BLOCK_INFRA, BUS_PROT_BLOCK_SMI, BUS_PROT_BLOCK_INFRA_NAO
+ };
+ struct device_node *np = dev->of_node;
+ struct device_node *node, *smi_np;
+ int num_regmaps = 0, i, j;
+ struct regmap *regmap[3];
+
+ /*
+ * Legacy code retrieves a maximum of three bus protection handles:
+ * some may be optional, or may not be, so the array of bp blocks
+ * that is normally passed in as platform data must be dynamically
+ * built in this case.
+ *
+ * Here, try to retrieve all of the regmaps that the legacy code
+ * supported and then count the number of the ones that are present,
+ * this makes it then possible to allocate the array of bus_prot
+ * regmaps and convert all to the new style handling.
+ */
+ node = of_find_node_with_property(np, "mediatek,infracfg");
+ if (node) {
+ regmap[0] = syscon_regmap_lookup_by_phandle(node, "mediatek,infracfg");
+ of_node_put(node);
+ num_regmaps++;
+ if (IS_ERR(regmap[0]))
+ return dev_err_probe(dev, PTR_ERR(regmap[0]),
+ "%pOF: failed to get infracfg regmap\n",
+ node);
+ } else {
+ regmap[0] = NULL;
+ }
+
+ node = of_find_node_with_property(np, "mediatek,smi");
+ if (node) {
+ smi_np = of_parse_phandle(node, "mediatek,smi", 0);
+ of_node_put(node);
+ if (!smi_np)
+ return -ENODEV;
+
+ regmap[1] = device_node_to_regmap(smi_np);
+ num_regmaps++;
+ of_node_put(smi_np);
+ if (IS_ERR(regmap[1]))
+ return dev_err_probe(dev, PTR_ERR(regmap[1]),
+ "%pOF: failed to get SMI regmap\n",
+ node);
+ } else {
+ regmap[1] = NULL;
+ }
+
+ node = of_find_node_with_property(np, "mediatek,infracfg-nao");
+ if (node) {
+ regmap[2] = syscon_regmap_lookup_by_phandle(node, "mediatek,infracfg-nao");
+ num_regmaps++;
+ of_node_put(node);
+ if (IS_ERR(regmap[2]))
+ return dev_err_probe(dev, PTR_ERR(regmap[2]),
+ "%pOF: failed to get infracfg regmap\n",
+ node);
+ } else {
+ regmap[2] = NULL;
+ }
+
+ scpsys->bus_prot = devm_kmalloc_array(dev, num_regmaps,
+ sizeof(*scpsys->bus_prot), GFP_KERNEL);
+ if (!scpsys->bus_prot)
+ return -ENOMEM;
+
+ for (i = 0, j = 0; i < ARRAY_SIZE(bp_blocks); i++) {
+ enum scpsys_bus_prot_block bp_type;
+
+ if (!regmap[i])
+ continue;
+
+ bp_type = bp_blocks[i];
+ scpsys->bus_prot_index[bp_type] = j;
+ scpsys->bus_prot[j] = regmap[i];
+
+ j++;
+ }
+
+ return 0;
+}
+
+static int scpsys_get_bus_protection(struct device *dev, struct scpsys *scpsys)
+{
+ const struct scpsys_soc_data *soc = scpsys->soc_data;
+ struct device_node *np = dev->of_node;
+ int i, num_handles;
+
+ num_handles = of_count_phandle_with_args(np, "access-controllers", NULL);
+ if (num_handles < 0 || num_handles != soc->num_bus_prot_blocks)
+ return dev_err_probe(dev, -EINVAL,
+ "Cannot get access controllers: expected %u, got %d\n",
+ soc->num_bus_prot_blocks, num_handles);
+
+ scpsys->bus_prot = devm_kmalloc_array(dev, soc->num_bus_prot_blocks,
+ sizeof(*scpsys->bus_prot), GFP_KERNEL);
+ if (!scpsys->bus_prot)
+ return -ENOMEM;
+
+ for (i = 0; i < soc->num_bus_prot_blocks; i++) {
+ enum scpsys_bus_prot_block bp_type;
+ struct device_node *node;
+
+ node = of_parse_phandle(np, "access-controllers", i);
+ if (!node)
+ return -EINVAL;
+
+ /*
+ * Index the bus protection regmaps so that we don't have to
+ * find the right one by type with a loop at every execution
+ * of power sequence(s).
+ */
+ bp_type = soc->bus_prot_blocks[i];
+ scpsys->bus_prot_index[bp_type] = i;
+
+ scpsys->bus_prot[i] = device_node_to_regmap(node);
+ of_node_put(node);
+ if (IS_ERR_OR_NULL(scpsys->bus_prot[i]))
+ return dev_err_probe(dev, scpsys->bus_prot[i] ?
+ PTR_ERR(scpsys->bus_prot[i]) : -ENXIO,
+ "Cannot get regmap for access controller %d\n", i);
+ }
+
+ return 0;
+}
+
static const struct of_device_id scpsys_of_match[] = {
{
.compatible = "mediatek,mt6735-power-controller",
@@ -701,6 +964,14 @@ static int scpsys_probe(struct platform_device *pdev)
return PTR_ERR(scpsys->base);
}
+ if (of_find_property(np, "access-controllers", NULL))
+ ret = scpsys_get_bus_protection(dev, scpsys);
+ else
+ ret = scpsys_get_bus_protection_legacy(dev, scpsys);
+
+ if (ret)
+ return ret;
+
ret = -ENODEV;
for_each_available_child_of_node(np, node) {
struct generic_pm_domain *domain;
diff --git a/drivers/pmdomain/mediatek/mtk-pm-domains.h b/drivers/pmdomain/mediatek/mtk-pm-domains.h
index 7085fa2976e9..b2e3dee03831 100644
--- a/drivers/pmdomain/mediatek/mtk-pm-domains.h
+++ b/drivers/pmdomain/mediatek/mtk-pm-domains.h
@@ -13,6 +13,9 @@
#define MTK_SCPD_EXT_BUCK_ISO BIT(6)
#define MTK_SCPD_HAS_INFRA_NAO BIT(7)
#define MTK_SCPD_STRICT_BUS_PROTECTION BIT(8)
+#define MTK_SCPD_SRAM_PDN_INVERTED BIT(9)
+#define MTK_SCPD_MODEM_PWRSEQ BIT(10)
+#define MTK_SCPD_SKIP_RESET_B BIT(11)
#define MTK_SCPD_CAPS(_scpd, _x) ((_scpd)->data->caps & (_x))
#define SPM_VDE_PWR_CON 0x0210
@@ -50,30 +53,43 @@ enum scpsys_bus_prot_flags {
BUS_PROT_REG_UPDATE = BIT(1),
BUS_PROT_IGNORE_CLR_ACK = BIT(2),
BUS_PROT_INVERTED = BIT(3),
- BUS_PROT_COMPONENT_INFRA = BIT(4),
- BUS_PROT_COMPONENT_SMI = BIT(5),
- BUS_PROT_STA_COMPONENT_INFRA_NAO = BIT(6),
};
-#define _BUS_PROT(_set_clr_mask, _set, _clr, _sta_mask, _sta, _flags) { \
- .bus_prot_set_clr_mask = (_set_clr_mask), \
- .bus_prot_set = _set, \
- .bus_prot_clr = _clr, \
- .bus_prot_sta_mask = (_sta_mask), \
- .bus_prot_sta = _sta, \
- .flags = _flags \
+enum scpsys_bus_prot_block {
+ BUS_PROT_BLOCK_INFRA,
+ BUS_PROT_BLOCK_INFRA_NAO,
+ BUS_PROT_BLOCK_SMI,
+ BUS_PROT_BLOCK_COUNT,
+};
+
+#define _BUS_PROT_STA(_hwip, _sta_hwip, _set_clr_mask, _set, _clr, \
+ _sta_mask, _sta, _flags) \
+ { \
+ .bus_prot_block = BUS_PROT_BLOCK_##_hwip, \
+ .bus_prot_sta_block = BUS_PROT_BLOCK_##_sta_hwip, \
+ .bus_prot_set_clr_mask = (_set_clr_mask), \
+ .bus_prot_set = _set, \
+ .bus_prot_clr = _clr, \
+ .bus_prot_sta_mask = (_sta_mask), \
+ .bus_prot_sta = _sta, \
+ .flags = _flags \
}
-#define BUS_PROT_WR(_hwip, _mask, _set, _clr, _sta) \
- _BUS_PROT(_mask, _set, _clr, _mask, _sta, BUS_PROT_COMPONENT_##_hwip)
+#define _BUS_PROT(_hwip, _set_clr_mask, _set, _clr, _sta_mask, \
+ _sta, _flags) \
+ _BUS_PROT_STA(_hwip, _hwip, _set_clr_mask, _set, _clr, \
+ _sta_mask, _sta, _flags)
-#define BUS_PROT_WR_IGN(_hwip, _mask, _set, _clr, _sta) \
- _BUS_PROT(_mask, _set, _clr, _mask, _sta, \
- BUS_PROT_COMPONENT_##_hwip | BUS_PROT_IGNORE_CLR_ACK)
+#define BUS_PROT_WR(_hwip, _mask, _set, _clr, _sta) \
+ _BUS_PROT(_hwip, _mask, _set, _clr, _mask, _sta, 0)
-#define BUS_PROT_UPDATE(_hwip, _mask, _set, _clr, _sta) \
- _BUS_PROT(_mask, _set, _clr, _mask, _sta, \
- BUS_PROT_COMPONENT_##_hwip | BUS_PROT_REG_UPDATE)
+#define BUS_PROT_WR_IGN(_hwip, _mask, _set, _clr, _sta) \
+ _BUS_PROT(_hwip, _mask, _set, _clr, _mask, _sta, \
+ BUS_PROT_IGNORE_CLR_ACK)
+
+#define BUS_PROT_UPDATE(_hwip, _mask, _set, _clr, _sta) \
+ _BUS_PROT(_hwip, _mask, _set, _clr, _mask, _sta, \
+ BUS_PROT_REG_UPDATE)
#define BUS_PROT_INFRA_UPDATE_TOPAXI(_mask) \
BUS_PROT_UPDATE(INFRA, _mask, \
@@ -82,6 +98,8 @@ enum scpsys_bus_prot_flags {
INFRA_TOPAXI_PROTECTSTA1)
struct scpsys_bus_prot_data {
+ u8 bus_prot_block;
+ u8 bus_prot_sta_block;
u32 bus_prot_set_clr_mask;
u32 bus_prot_set;
u32 bus_prot_clr;
@@ -91,6 +109,22 @@ struct scpsys_bus_prot_data {
};
/**
+ * enum scpsys_rtff_type - Type of RTFF Hardware for power domain
+ * @SCPSYS_RTFF_NONE: RTFF HW not present or domain not RTFF managed
+ * @SCPSYS_RTFF_TYPE_GENERIC: Non-CPU, peripheral-generic RTFF HW
+ * @SCPSYS_RTFF_TYPE_PCIE_PHY: PCI-Express PHY specific RTFF HW
+ * @SCPSYS_RTFF_TYPE_STOR_UFS: Storage (UFS) specific RTFF HW
+ * @SCPSYS_RTFF_TYPE_MAX: Number of supported RTFF HW Types
+ */
+enum scpsys_rtff_type {
+ SCPSYS_RTFF_NONE = 0,
+ SCPSYS_RTFF_TYPE_GENERIC,
+ SCPSYS_RTFF_TYPE_PCIE_PHY,
+ SCPSYS_RTFF_TYPE_STOR_UFS,
+ SCPSYS_RTFF_TYPE_MAX
+};
+
+/**
* struct scpsys_domain_data - scp domain data for power on/off flow
* @name: The name of the power domain.
* @sta_mask: The mask for power on/off status bit.
@@ -100,6 +134,7 @@ struct scpsys_bus_prot_data {
* @ext_buck_iso_offs: The offset for external buck isolation
* @ext_buck_iso_mask: The mask for external buck isolation
* @caps: The flag for active wake-up action.
+ * @rtff_type: The power domain RTFF HW type
* @bp_cfg: bus protection configuration for any subsystem
*/
struct scpsys_domain_data {
@@ -111,6 +146,7 @@ struct scpsys_domain_data {
int ext_buck_iso_offs;
u32 ext_buck_iso_mask;
u16 caps;
+ enum scpsys_rtff_type rtff_type;
const struct scpsys_bus_prot_data bp_cfg[SPM_MAX_BUS_PROT_DATA];
int pwr_sta_offs;
int pwr_sta2nd_offs;
@@ -119,6 +155,8 @@ struct scpsys_domain_data {
struct scpsys_soc_data {
const struct scpsys_domain_data *domains_data;
int num_domains;
+ enum scpsys_bus_prot_block *bus_prot_blocks;
+ int num_bus_prot_blocks;
};
#endif /* __SOC_MEDIATEK_MTK_PM_DOMAINS_H */
diff --git a/drivers/pmdomain/qcom/rpmpd.c b/drivers/pmdomain/qcom/rpmpd.c
index 833c46944600..f8580ec0f737 100644
--- a/drivers/pmdomain/qcom/rpmpd.c
+++ b/drivers/pmdomain/qcom/rpmpd.c
@@ -631,12 +631,12 @@ static struct rpmpd ssc_mx_rwsm0_vfl = {
};
static struct rpmpd *mdm9607_rpmpds[] = {
- [MDM9607_VDDCX] = &cx_s3a_lvl,
- [MDM9607_VDDCX_AO] = &cx_s3a_lvl_ao,
- [MDM9607_VDDCX_VFL] = &cx_s3a_vfl,
- [MDM9607_VDDMX] = &mx_l12a_lvl,
- [MDM9607_VDDMX_AO] = &mx_l12a_lvl_ao,
- [MDM9607_VDDMX_VFL] = &mx_l12a_vfl,
+ [RPMPD_VDDCX] = &cx_s3a_lvl,
+ [RPMPD_VDDCX_AO] = &cx_s3a_lvl_ao,
+ [RPMPD_VDDCX_VFL] = &cx_s3a_vfl,
+ [RPMPD_VDDMX] = &mx_l12a_lvl,
+ [RPMPD_VDDMX_AO] = &mx_l12a_lvl_ao,
+ [RPMPD_VDDMX_VFL] = &mx_l12a_vfl,
};
static const struct rpmpd_desc mdm9607_desc = {
@@ -646,9 +646,9 @@ static const struct rpmpd_desc mdm9607_desc = {
};
static struct rpmpd *msm8226_rpmpds[] = {
- [MSM8226_VDDCX] = &cx_s1a_corner,
- [MSM8226_VDDCX_AO] = &cx_s1a_corner_ao,
- [MSM8226_VDDCX_VFC] = &cx_s1a_vfc,
+ [RPMPD_VDDCX] = &cx_s1a_corner,
+ [RPMPD_VDDCX_AO] = &cx_s1a_corner_ao,
+ [RPMPD_VDDCX_VFC] = &cx_s1a_vfc,
};
static const struct rpmpd_desc msm8226_desc = {
@@ -675,11 +675,11 @@ static const struct rpmpd_desc msm8939_desc = {
};
static struct rpmpd *msm8916_rpmpds[] = {
- [MSM8916_VDDCX] = &cx_s1a_corner,
- [MSM8916_VDDCX_AO] = &cx_s1a_corner_ao,
- [MSM8916_VDDCX_VFC] = &cx_s1a_vfc,
- [MSM8916_VDDMX] = &mx_l3a_corner,
- [MSM8916_VDDMX_AO] = &mx_l3a_corner_ao,
+ [RPMPD_VDDCX] = &cx_s1a_corner,
+ [RPMPD_VDDCX_AO] = &cx_s1a_corner_ao,
+ [RPMPD_VDDCX_VFC] = &cx_s1a_vfc,
+ [RPMPD_VDDMX] = &mx_l3a_corner,
+ [RPMPD_VDDMX_AO] = &mx_l3a_corner_ao,
};
static const struct rpmpd_desc msm8916_desc = {
@@ -689,11 +689,11 @@ static const struct rpmpd_desc msm8916_desc = {
};
static struct rpmpd *msm8917_rpmpds[] = {
- [MSM8917_VDDCX] = &cx_s2a_lvl,
- [MSM8917_VDDCX_AO] = &cx_s2a_lvl_ao,
- [MSM8917_VDDCX_VFL] = &cx_s2a_vfl,
- [MSM8917_VDDMX] = &mx_l3a_lvl,
- [MSM8917_VDDMX_AO] = &mx_l3a_lvl_ao,
+ [RPMPD_VDDCX] = &cx_s2a_lvl,
+ [RPMPD_VDDCX_AO] = &cx_s2a_lvl_ao,
+ [RPMPD_VDDCX_VFL] = &cx_s2a_vfl,
+ [RPMPD_VDDMX] = &mx_l3a_lvl,
+ [RPMPD_VDDMX_AO] = &mx_l3a_lvl_ao,
};
static const struct rpmpd_desc msm8917_desc = {
@@ -747,12 +747,12 @@ static const struct rpmpd_desc msm8974pro_pma8084_desc = {
};
static struct rpmpd *msm8976_rpmpds[] = {
- [MSM8976_VDDCX] = &cx_s2a_lvl,
- [MSM8976_VDDCX_AO] = &cx_s2a_lvl_ao,
- [MSM8976_VDDCX_VFL] = &cx_rwsc2_vfl,
- [MSM8976_VDDMX] = &mx_s6a_lvl,
- [MSM8976_VDDMX_AO] = &mx_s6a_lvl_ao,
- [MSM8976_VDDMX_VFL] = &mx_rwsm6_vfl,
+ [RPMPD_VDDCX] = &cx_s2a_lvl,
+ [RPMPD_VDDCX_AO] = &cx_s2a_lvl_ao,
+ [RPMPD_VDDCX_VFL] = &cx_rwsc2_vfl,
+ [RPMPD_VDDMX] = &mx_s6a_lvl,
+ [RPMPD_VDDMX_AO] = &mx_s6a_lvl_ao,
+ [RPMPD_VDDMX_VFL] = &mx_rwsm6_vfl,
};
static const struct rpmpd_desc msm8976_desc = {
@@ -796,16 +796,16 @@ static const struct rpmpd_desc msm8996_desc = {
};
static struct rpmpd *msm8998_rpmpds[] = {
- [MSM8998_VDDCX] = &cx_rwcx0_lvl,
- [MSM8998_VDDCX_AO] = &cx_rwcx0_lvl_ao,
- [MSM8998_VDDCX_VFL] = &cx_rwcx0_vfl,
- [MSM8998_VDDMX] = &mx_rwmx0_lvl,
- [MSM8998_VDDMX_AO] = &mx_rwmx0_lvl_ao,
- [MSM8998_VDDMX_VFL] = &mx_rwmx0_vfl,
- [MSM8998_SSCCX] = &ssc_cx_rwsc0_lvl,
- [MSM8998_SSCCX_VFL] = &ssc_cx_rwsc0_vfl,
- [MSM8998_SSCMX] = &ssc_mx_rwsm0_lvl,
- [MSM8998_SSCMX_VFL] = &ssc_mx_rwsm0_vfl,
+ [RPMPD_VDDCX] = &cx_rwcx0_lvl,
+ [RPMPD_VDDCX_AO] = &cx_rwcx0_lvl_ao,
+ [RPMPD_VDDCX_VFL] = &cx_rwcx0_vfl,
+ [RPMPD_VDDMX] = &mx_rwmx0_lvl,
+ [RPMPD_VDDMX_AO] = &mx_rwmx0_lvl_ao,
+ [RPMPD_VDDMX_VFL] = &mx_rwmx0_vfl,
+ [RPMPD_SSCCX] = &ssc_cx_rwsc0_lvl,
+ [RPMPD_SSCCX_VFL] = &ssc_cx_rwsc0_vfl,
+ [RPMPD_SSCMX] = &ssc_mx_rwsm0_lvl,
+ [RPMPD_SSCMX_VFL] = &ssc_mx_rwsm0_vfl,
};
static const struct rpmpd_desc msm8998_desc = {
@@ -831,11 +831,11 @@ static const struct rpmpd_desc qcs404_desc = {
};
static struct rpmpd *qm215_rpmpds[] = {
- [QM215_VDDCX] = &cx_s1a_lvl,
- [QM215_VDDCX_AO] = &cx_s1a_lvl_ao,
- [QM215_VDDCX_VFL] = &cx_s1a_vfl,
- [QM215_VDDMX] = &mx_l2a_lvl,
- [QM215_VDDMX_AO] = &mx_l2a_lvl_ao,
+ [RPMPD_VDDCX] = &cx_s1a_lvl,
+ [RPMPD_VDDCX_AO] = &cx_s1a_lvl_ao,
+ [RPMPD_VDDCX_VFL] = &cx_s1a_vfl,
+ [RPMPD_VDDMX] = &mx_l2a_lvl,
+ [RPMPD_VDDMX_AO] = &mx_l2a_lvl_ao,
};
static const struct rpmpd_desc qm215_desc = {
@@ -845,16 +845,16 @@ static const struct rpmpd_desc qm215_desc = {
};
static struct rpmpd *sdm660_rpmpds[] = {
- [SDM660_VDDCX] = &cx_rwcx0_lvl,
- [SDM660_VDDCX_AO] = &cx_rwcx0_lvl_ao,
- [SDM660_VDDCX_VFL] = &cx_rwcx0_vfl,
- [SDM660_VDDMX] = &mx_rwmx0_lvl,
- [SDM660_VDDMX_AO] = &mx_rwmx0_lvl_ao,
- [SDM660_VDDMX_VFL] = &mx_rwmx0_vfl,
- [SDM660_SSCCX] = &ssc_cx_rwlc0_lvl,
- [SDM660_SSCCX_VFL] = &ssc_cx_rwlc0_vfl,
- [SDM660_SSCMX] = &ssc_mx_rwlm0_lvl,
- [SDM660_SSCMX_VFL] = &ssc_mx_rwlm0_vfl,
+ [RPMPD_VDDCX] = &cx_rwcx0_lvl,
+ [RPMPD_VDDCX_AO] = &cx_rwcx0_lvl_ao,
+ [RPMPD_VDDCX_VFL] = &cx_rwcx0_vfl,
+ [RPMPD_VDDMX] = &mx_rwmx0_lvl,
+ [RPMPD_VDDMX_AO] = &mx_rwmx0_lvl_ao,
+ [RPMPD_VDDMX_VFL] = &mx_rwmx0_vfl,
+ [RPMPD_SSCCX] = &ssc_cx_rwlc0_lvl,
+ [RPMPD_SSCCX_VFL] = &ssc_cx_rwlc0_vfl,
+ [RPMPD_SSCMX] = &ssc_mx_rwlm0_lvl,
+ [RPMPD_SSCMX_VFL] = &ssc_mx_rwlm0_vfl,
};
static const struct rpmpd_desc sdm660_desc = {
@@ -881,12 +881,12 @@ static const struct rpmpd_desc sm6115_desc = {
};
static struct rpmpd *sm6125_rpmpds[] = {
- [SM6125_VDDCX] = &cx_rwcx0_lvl,
- [SM6125_VDDCX_AO] = &cx_rwcx0_lvl_ao,
- [SM6125_VDDCX_VFL] = &cx_rwcx0_vfl,
- [SM6125_VDDMX] = &mx_rwmx0_lvl,
- [SM6125_VDDMX_AO] = &mx_rwmx0_lvl_ao,
- [SM6125_VDDMX_VFL] = &mx_rwmx0_vfl,
+ [RPMPD_VDDCX] = &cx_rwcx0_lvl,
+ [RPMPD_VDDCX_AO] = &cx_rwcx0_lvl_ao,
+ [RPMPD_VDDCX_VFL] = &cx_rwcx0_vfl,
+ [RPMPD_VDDMX] = &mx_rwmx0_lvl,
+ [RPMPD_VDDMX_AO] = &mx_rwmx0_lvl_ao,
+ [RPMPD_VDDMX_VFL] = &mx_rwmx0_vfl,
};
static const struct rpmpd_desc sm6125_desc = {
diff --git a/drivers/pmdomain/renesas/rcar-gen4-sysc.c b/drivers/pmdomain/renesas/rcar-gen4-sysc.c
index 5aa7fa1df8fe..7434bf42d215 100644
--- a/drivers/pmdomain/renesas/rcar-gen4-sysc.c
+++ b/drivers/pmdomain/renesas/rcar-gen4-sysc.c
@@ -251,6 +251,7 @@ static int __init rcar_gen4_sysc_pd_setup(struct rcar_gen4_sysc_pd *pd)
genpd->detach_dev = cpg_mssr_detach_dev;
}
+ genpd->flags |= GENPD_FLAG_NO_STAY_ON;
genpd->power_off = rcar_gen4_sysc_pd_power_off;
genpd->power_on = rcar_gen4_sysc_pd_power_on;
diff --git a/drivers/pmdomain/renesas/rcar-sysc.c b/drivers/pmdomain/renesas/rcar-sysc.c
index 4b310c1d35fa..d8a8ffcde38d 100644
--- a/drivers/pmdomain/renesas/rcar-sysc.c
+++ b/drivers/pmdomain/renesas/rcar-sysc.c
@@ -241,6 +241,7 @@ static int __init rcar_sysc_pd_setup(struct rcar_sysc_pd *pd)
}
}
+ genpd->flags |= GENPD_FLAG_NO_STAY_ON;
genpd->power_off = rcar_sysc_pd_power_off;
genpd->power_on = rcar_sysc_pd_power_on;
@@ -342,7 +343,7 @@ struct rcar_pm_domains {
};
static struct genpd_onecell_data *rcar_sysc_onecell_data;
-static struct device_node *rcar_sysc_onecell_np;
+static struct device_node *rcar_sysc_onecell_np __initdata = NULL;
static int __init rcar_sysc_pd_init(void)
{
diff --git a/drivers/pmdomain/renesas/rmobile-sysc.c b/drivers/pmdomain/renesas/rmobile-sysc.c
index 8eedc9a1d825..a6bf7295e909 100644
--- a/drivers/pmdomain/renesas/rmobile-sysc.c
+++ b/drivers/pmdomain/renesas/rmobile-sysc.c
@@ -100,7 +100,8 @@ static void rmobile_init_pm_domain(struct rmobile_pm_domain *rmobile_pd)
struct generic_pm_domain *genpd = &rmobile_pd->genpd;
struct dev_power_governor *gov = rmobile_pd->gov;
- genpd->flags |= GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP;
+ genpd->flags |= GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP |
+ GENPD_FLAG_NO_STAY_ON;
genpd->attach_dev = cpg_mstp_attach_dev;
genpd->detach_dev = cpg_mstp_detach_dev;
diff --git a/drivers/pmdomain/rockchip/Kconfig b/drivers/pmdomain/rockchip/Kconfig
index 218d43186e5b..17f2e6fe86b6 100644
--- a/drivers/pmdomain/rockchip/Kconfig
+++ b/drivers/pmdomain/rockchip/Kconfig
@@ -3,6 +3,7 @@ if ARCH_ROCKCHIP || COMPILE_TEST
config ROCKCHIP_PM_DOMAINS
bool "Rockchip generic power domain"
+ default ARCH_ROCKCHIP
depends on PM
depends on HAVE_ARM_SMCCC_DISCOVERY
depends on REGULATOR
diff --git a/drivers/pmdomain/rockchip/pm-domains.c b/drivers/pmdomain/rockchip/pm-domains.c
index 242570c505fb..1955c6d453e4 100644
--- a/drivers/pmdomain/rockchip/pm-domains.c
+++ b/drivers/pmdomain/rockchip/pm-domains.c
@@ -865,7 +865,7 @@ static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
pd->genpd.power_on = rockchip_pd_power_on;
pd->genpd.attach_dev = rockchip_pd_attach_dev;
pd->genpd.detach_dev = rockchip_pd_detach_dev;
- pd->genpd.flags = GENPD_FLAG_PM_CLK;
+ pd->genpd.flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_NO_STAY_ON;
if (pd_info->active_wakeup)
pd->genpd.flags |= GENPD_FLAG_ACTIVE_WAKEUP;
pm_genpd_init(&pd->genpd, NULL,
diff --git a/drivers/pmdomain/thead/th1520-pm-domains.c b/drivers/pmdomain/thead/th1520-pm-domains.c
index 9040b698e7f7..d7cb9633c7c8 100644
--- a/drivers/pmdomain/thead/th1520-pm-domains.c
+++ b/drivers/pmdomain/thead/th1520-pm-domains.c
@@ -173,6 +173,18 @@ static int th1520_pd_pwrseq_gpu_init(struct device *dev)
adev);
}
+static int th1520_pd_reboot_init(struct device *dev,
+ struct th1520_aon_chan *aon_chan)
+{
+ struct auxiliary_device *adev;
+
+ adev = devm_auxiliary_device_create(dev, "reboot", aon_chan);
+ if (!adev)
+ return -ENODEV;
+
+ return 0;
+}
+
static int th1520_pd_probe(struct platform_device *pdev)
{
struct generic_pm_domain **domains;
@@ -235,6 +247,10 @@ static int th1520_pd_probe(struct platform_device *pdev)
if (ret)
goto err_clean_provider;
+ ret = th1520_pd_reboot_init(dev, aon_chan);
+ if (ret)
+ goto err_clean_provider;
+
return 0;
err_clean_provider:
diff --git a/drivers/pmdomain/ti/ti_sci_pm_domains.c b/drivers/pmdomain/ti/ti_sci_pm_domains.c
index 82df7e44250b..e5d1934f78d9 100644
--- a/drivers/pmdomain/ti/ti_sci_pm_domains.c
+++ b/drivers/pmdomain/ti/ti_sci_pm_domains.c
@@ -200,6 +200,23 @@ static bool ti_sci_pm_idx_exists(struct ti_sci_genpd_provider *pd_provider, u32
return false;
}
+static bool ti_sci_pm_pd_is_on(struct ti_sci_genpd_provider *pd_provider,
+ int pd_idx)
+{
+ bool is_on;
+ int ret;
+
+ if (!pd_provider->ti_sci->ops.dev_ops.is_on)
+ return false;
+
+ ret = pd_provider->ti_sci->ops.dev_ops.is_on(pd_provider->ti_sci,
+ pd_idx, NULL, &is_on);
+ if (ret)
+ return false;
+
+ return is_on;
+}
+
static int ti_sci_pm_domain_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -231,6 +248,8 @@ static int ti_sci_pm_domain_probe(struct platform_device *pdev)
index, &args)) {
if (args.args_count >= 1 && args.np == dev->of_node) {
+ bool is_on;
+
of_node_put(args.np);
if (args.args[0] > max_id) {
max_id = args.args[0];
@@ -264,7 +283,10 @@ static int ti_sci_pm_domain_probe(struct platform_device *pdev)
pd_provider->ti_sci->ops.pm_ops.set_latency_constraint)
pd->pd.domain.ops.suspend = ti_sci_pd_suspend;
- pm_genpd_init(&pd->pd, NULL, true);
+ is_on = ti_sci_pm_pd_is_on(pd_provider,
+ pd->idx);
+
+ pm_genpd_init(&pd->pd, NULL, !is_on);
list_add(&pd->node, &pd_provider->pd_list);
} else {
diff --git a/drivers/pnp/isapnp/core.c b/drivers/pnp/isapnp/core.c
index d2ff76e74a05..219f96f2aaaf 100644
--- a/drivers/pnp/isapnp/core.c
+++ b/drivers/pnp/isapnp/core.c
@@ -27,6 +27,7 @@
#include <linux/init.h>
#include <linux/isapnp.h>
#include <linux/mutex.h>
+#include <linux/string_choices.h>
#include <asm/io.h>
#include "../base.h"
@@ -1037,7 +1038,7 @@ static int __init isapnp_init(void)
if (cards)
printk(KERN_INFO
"isapnp: %i Plug & Play card%s detected total\n", cards,
- cards > 1 ? "s" : "");
+ str_plural(cards));
else
printk(KERN_INFO "isapnp: No Plug & Play card found\n");
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index 77ea3129c708..8248895ca903 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -225,6 +225,13 @@ config POWER_RESET_ST
help
Reset support for STMicroelectronics boards.
+config POWER_RESET_TH1520_AON
+ tristate "T-Head TH1520 AON firmware poweroff and reset driver"
+ depends on TH1520_PM_DOMAINS
+ help
+ This driver supports power-off and reset operations for T-Head
+ TH1520 SoCs running the AON firmware.
+
config POWER_RESET_TORADEX_EC
tristate "Toradex Embedded Controller power-off and reset driver"
depends on ARCH_MXC || COMPILE_TEST
diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile
index b7c2b5940be9..51da87e05ce7 100644
--- a/drivers/power/reset/Makefile
+++ b/drivers/power/reset/Makefile
@@ -25,6 +25,7 @@ obj-$(CONFIG_POWER_RESET_QNAP) += qnap-poweroff.o
obj-$(CONFIG_POWER_RESET_REGULATOR) += regulator-poweroff.o
obj-$(CONFIG_POWER_RESET_RESTART) += restart-poweroff.o
obj-$(CONFIG_POWER_RESET_ST) += st-poweroff.o
+obj-$(CONFIG_POWER_RESET_TH1520_AON) += th1520-aon-reboot.o
obj-$(CONFIG_POWER_RESET_TORADEX_EC) += tdx-ec-poweroff.o
obj-$(CONFIG_POWER_RESET_TPS65086) += tps65086-restart.o
obj-$(CONFIG_POWER_RESET_VERSATILE) += arm-versatile-reboot.o
diff --git a/drivers/power/reset/th1520-aon-reboot.c b/drivers/power/reset/th1520-aon-reboot.c
new file mode 100644
index 000000000000..ec249667a0ff
--- /dev/null
+++ b/drivers/power/reset/th1520-aon-reboot.c
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * T-HEAD TH1520 AON Firmware Reboot Driver
+ *
+ * Copyright (c) 2025 Icenowy Zheng <uwu@icenowy.me>
+ */
+
+#include <linux/auxiliary_bus.h>
+#include <linux/firmware/thead/thead,th1520-aon.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+
+#define TH1520_AON_REBOOT_PRIORITY 200
+
+struct th1520_aon_msg_empty_body {
+ struct th1520_aon_rpc_msg_hdr hdr;
+ u16 reserved[12];
+} __packed __aligned(1);
+
+static int th1520_aon_pwroff_handler(struct sys_off_data *data)
+{
+ struct th1520_aon_chan *aon_chan = data->cb_data;
+ struct th1520_aon_msg_empty_body msg = {};
+
+ msg.hdr.svc = TH1520_AON_RPC_SVC_WDG;
+ msg.hdr.func = TH1520_AON_WDG_FUNC_POWER_OFF;
+ msg.hdr.size = TH1520_AON_RPC_MSG_NUM;
+
+ th1520_aon_call_rpc(aon_chan, &msg);
+
+ return NOTIFY_DONE;
+}
+
+static int th1520_aon_restart_handler(struct sys_off_data *data)
+{
+ struct th1520_aon_chan *aon_chan = data->cb_data;
+ struct th1520_aon_msg_empty_body msg = {};
+
+ msg.hdr.svc = TH1520_AON_RPC_SVC_WDG;
+ msg.hdr.func = TH1520_AON_WDG_FUNC_RESTART;
+ msg.hdr.size = TH1520_AON_RPC_MSG_NUM;
+
+ th1520_aon_call_rpc(aon_chan, &msg);
+
+ return NOTIFY_DONE;
+}
+
+static int th1520_aon_reboot_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct device *dev = &adev->dev;
+ int ret;
+
+ /* Expect struct th1520_aon_chan to be passed via platform_data */
+ ret = devm_register_sys_off_handler(dev, SYS_OFF_MODE_POWER_OFF,
+ TH1520_AON_REBOOT_PRIORITY,
+ th1520_aon_pwroff_handler,
+ adev->dev.platform_data);
+
+ if (ret) {
+ dev_err(dev, "Failed to register power off handler\n");
+ return ret;
+ }
+
+ ret = devm_register_sys_off_handler(dev, SYS_OFF_MODE_RESTART,
+ TH1520_AON_REBOOT_PRIORITY,
+ th1520_aon_restart_handler,
+ adev->dev.platform_data);
+
+ if (ret) {
+ dev_err(dev, "Failed to register restart handler\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct auxiliary_device_id th1520_aon_reboot_id_table[] = {
+ { .name = "th1520_pm_domains.reboot" },
+ {},
+};
+MODULE_DEVICE_TABLE(auxiliary, th1520_aon_reboot_id_table);
+
+static struct auxiliary_driver th1520_aon_reboot_driver = {
+ .driver = {
+ .name = "th1520-aon-reboot",
+ },
+ .probe = th1520_aon_reboot_probe,
+ .id_table = th1520_aon_reboot_id_table,
+};
+module_auxiliary_driver(th1520_aon_reboot_driver);
+
+MODULE_AUTHOR("Icenowy Zheng <uwu@icenowy.me>");
+MODULE_DESCRIPTION("T-HEAD TH1520 AON-firmware-based reboot driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/supply/88pm860x_charger.c b/drivers/power/supply/88pm860x_charger.c
index 2b9fcb7e71d7..8d99c6ff72ed 100644
--- a/drivers/power/supply/88pm860x_charger.c
+++ b/drivers/power/supply/88pm860x_charger.c
@@ -284,8 +284,8 @@ static int set_charging_fsm(struct pm860x_charger_info *info)
{
struct power_supply *psy;
union power_supply_propval data;
- unsigned char fsm_state[][16] = { "init", "discharge", "precharge",
- "fastcharge",
+ static const unsigned char fsm_state[][16] = {
+ "init", "discharge", "precharge", "fastcharge",
};
int ret;
int vbatt;
@@ -313,7 +313,7 @@ static int set_charging_fsm(struct pm860x_charger_info *info)
dev_dbg(info->dev, "Entering FSM:%s, Charger:%s, Battery:%s, "
"Allowed:%d\n",
- &fsm_state[info->state][0],
+ fsm_state[info->state],
(info->online) ? "online" : "N/A",
(info->present) ? "present" : "N/A", info->allowed);
dev_dbg(info->dev, "set_charging_fsm:vbatt:%d(mV)\n", vbatt);
@@ -385,7 +385,7 @@ static int set_charging_fsm(struct pm860x_charger_info *info)
}
dev_dbg(info->dev,
"Out FSM:%s, Charger:%s, Battery:%s, Allowed:%d\n",
- &fsm_state[info->state][0],
+ fsm_state[info->state],
(info->online) ? "online" : "N/A",
(info->present) ? "present" : "N/A", info->allowed);
mutex_unlock(&info->lock);
diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig
index 79ddb006e2da..dca4be23ee70 100644
--- a/drivers/power/supply/Kconfig
+++ b/drivers/power/supply/Kconfig
@@ -35,6 +35,9 @@ config APM_POWER
Say Y here to enable support APM status emulation using
battery class devices.
+config ADC_BATTERY_HELPER
+ tristate
+
config GENERIC_ADC_BATTERY
tristate "Generic battery support using IIO"
depends on IIO
@@ -244,6 +247,18 @@ config BATTERY_INGENIC
This driver can also be built as a module. If so, the module will be
called ingenic-battery.
+config BATTERY_INTEL_DC_TI
+ tristate "Intel Bay / Cherry Trail Dollar Cove TI battery driver"
+ depends on INTEL_SOC_PMIC_CHTDC_TI && INTEL_DC_TI_ADC && IIO && ACPI
+ select ADC_BATTERY_HELPER
+ help
+ Choose this option if you want to monitor battery status on Intel
+ Bay Trail / Cherry Trail tablets using the Dollar Cove TI PMIC's
+ coulomb-counter as fuel-gauge.
+
+ To compile this driver as a module, choose M here: the module will be
+ called intel_dc_ti_battery.
+
config BATTERY_IPAQ_MICRO
tristate "iPAQ Atmel Micro ASIC battery driver"
depends on MFD_IPAQ_MICRO
@@ -767,6 +782,13 @@ config CHARGER_BQ2515X
rail, ADC for battery and system monitoring, and push-button
controller.
+config CHARGER_BQ257XX
+ tristate "TI BQ257XX battery charger family"
+ depends on MFD_BQ257XX
+ help
+ Say Y to enable support for the TI BQ257XX family of battery
+ charging integrated circuits.
+
config CHARGER_BQ25890
tristate "TI BQ25890 battery charger driver"
depends on I2C
@@ -1043,6 +1065,7 @@ config CHARGER_SURFACE
config BATTERY_UG3105
tristate "uPI uG3105 battery monitor driver"
depends on I2C
+ select ADC_BATTERY_HELPER
help
Battery monitor driver for the uPI uG3105 battery monitor.
diff --git a/drivers/power/supply/Makefile b/drivers/power/supply/Makefile
index f943c9150b32..99a820d38197 100644
--- a/drivers/power/supply/Makefile
+++ b/drivers/power/supply/Makefile
@@ -7,6 +7,7 @@ power_supply-$(CONFIG_LEDS_TRIGGERS) += power_supply_leds.o
obj-$(CONFIG_POWER_SUPPLY) += power_supply.o
obj-$(CONFIG_POWER_SUPPLY_HWMON) += power_supply_hwmon.o
+obj-$(CONFIG_ADC_BATTERY_HELPER) += adc-battery-helper.o
obj-$(CONFIG_GENERIC_ADC_BATTERY) += generic-adc-battery.o
obj-$(CONFIG_APM_POWER) += apm_power.o
@@ -41,6 +42,7 @@ obj-$(CONFIG_BATTERY_OLPC) += olpc_battery.o
obj-$(CONFIG_BATTERY_SAMSUNG_SDI) += samsung-sdi-battery.o
obj-$(CONFIG_BATTERY_COLLIE) += collie_battery.o
obj-$(CONFIG_BATTERY_INGENIC) += ingenic-battery.o
+obj-$(CONFIG_BATTERY_INTEL_DC_TI) += intel_dc_ti_battery.o
obj-$(CONFIG_BATTERY_IPAQ_MICRO) += ipaq_micro_battery.o
obj-$(CONFIG_BATTERY_WM97XX) += wm97xx_battery.o
obj-$(CONFIG_BATTERY_SBS) += sbs-battery.o
@@ -97,6 +99,7 @@ obj-$(CONFIG_CHARGER_BQ24190) += bq24190_charger.o
obj-$(CONFIG_CHARGER_BQ24257) += bq24257_charger.o
obj-$(CONFIG_CHARGER_BQ24735) += bq24735-charger.o
obj-$(CONFIG_CHARGER_BQ2515X) += bq2515x_charger.o
+obj-$(CONFIG_CHARGER_BQ257XX) += bq257xx_charger.o
obj-$(CONFIG_CHARGER_BQ25890) += bq25890_charger.o
obj-$(CONFIG_CHARGER_BQ25980) += bq25980_charger.o
obj-$(CONFIG_CHARGER_BQ256XX) += bq256xx_charger.o
diff --git a/drivers/power/supply/ab8500_btemp.c b/drivers/power/supply/ab8500_btemp.c
index b00c84fbc33c..e5202a7b6209 100644
--- a/drivers/power/supply/ab8500_btemp.c
+++ b/drivers/power/supply/ab8500_btemp.c
@@ -667,7 +667,8 @@ static int ab8500_btemp_bind(struct device *dev, struct device *master,
/* Create a work queue for the btemp */
di->btemp_wq =
- alloc_workqueue("ab8500_btemp_wq", WQ_MEM_RECLAIM, 0);
+ alloc_workqueue("ab8500_btemp_wq", WQ_MEM_RECLAIM | WQ_PERCPU,
+ 0);
if (di->btemp_wq == NULL) {
dev_err(dev, "failed to create work queue\n");
return -ENOMEM;
diff --git a/drivers/power/supply/adc-battery-helper.c b/drivers/power/supply/adc-battery-helper.c
new file mode 100644
index 000000000000..6e0f5b6d73d7
--- /dev/null
+++ b/drivers/power/supply/adc-battery-helper.c
@@ -0,0 +1,327 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Helper for batteries with accurate current and voltage measurement, but
+ * without temperature measurement or without a "resistance-temp-table".
+ *
+ * Some fuel-gauges are not full-featured autonomous fuel-gauges.
+ * These fuel-gauges offer accurate current and voltage measurements but
+ * their coulomb-counters are intended to work together with an always on
+ * micro-controller monitoring the fuel-gauge.
+ *
+ * This adc-battery-helper code offers open-circuit-voltage (ocv) and through
+ * that capacity estimation for devices where such limited functionality
+ * fuel-gauges are exposed directly to Linux.
+ *
+ * This helper requires the hw to provide accurate battery current_now and
+ * voltage_now measurement and this helper the provides the following properties
+ * based on top of those readings:
+ *
+ * POWER_SUPPLY_PROP_STATUS
+ * POWER_SUPPLY_PROP_VOLTAGE_OCV
+ * POWER_SUPPLY_PROP_VOLTAGE_NOW
+ * POWER_SUPPLY_PROP_CURRENT_NOW
+ * POWER_SUPPLY_PROP_CAPACITY
+ *
+ * As well as optional the following properties assuming an always present
+ * system-scope battery, allowing direct use of adc_battery_helper_get_prop()
+ * in this common case:
+ * POWER_SUPPLY_PROP_PRESENT
+ * POWER_SUPPLY_PROP_SCOPE
+ *
+ * Using this helper is as simple as:
+ *
+ * 1. Embed a struct adc_battery_helper this MUST be the first member of
+ * the battery driver's data struct.
+ * 2. Use adc_battery_helper_props[] or add the above properties to
+ * the list of properties in power_supply_desc
+ * 3. Call adc_battery_helper_init() after registering the power_supply and
+ * before returning from the probe() function
+ * 4. Use adc_battery_helper_get_prop() as the power-supply's get_property()
+ * method, or call it for the above properties.
+ * 5. Use adc_battery_helper_external_power_changed() as the power-supply's
+ * external_power_changed() method or call it from that method.
+ * 6. Use adc_battery_helper_[suspend|resume]() as suspend-resume methods or
+ * call them from the driver's suspend-resume methods.
+ *
+ * The provided get_voltage_and_current_now() method will be called by this
+ * helper at adc_battery_helper_init() time and later.
+ *
+ * Copyright (c) 2021-2025 Hans de Goede <hansg@kernel.org>
+ */
+
+#include <linux/cleanup.h>
+#include <linux/devm-helpers.h>
+#include <linux/gpio/consumer.h>
+#include <linux/mutex.h>
+#include <linux/power_supply.h>
+#include <linux/workqueue.h>
+
+#include "adc-battery-helper.h"
+
+#define MOV_AVG_WINDOW_SIZE ADC_BAT_HELPER_MOV_AVG_WINDOW_SIZE
+#define INIT_POLL_TIME (5 * HZ)
+#define POLL_TIME (30 * HZ)
+#define SETTLE_TIME (1 * HZ)
+
+#define INIT_POLL_COUNT 30
+
+#define CURR_HYST_UA 65000
+
+#define LOW_BAT_UV 3700000
+#define FULL_BAT_HYST_UV 38000
+
+#define AMBIENT_TEMP_CELSIUS 25
+
+static int adc_battery_helper_get_status(struct adc_battery_helper *help)
+{
+ int full_uv =
+ help->psy->battery_info->constant_charge_voltage_max_uv - FULL_BAT_HYST_UV;
+
+ if (help->curr_ua > CURR_HYST_UA)
+ return POWER_SUPPLY_STATUS_CHARGING;
+
+ if (help->curr_ua < -CURR_HYST_UA)
+ return POWER_SUPPLY_STATUS_DISCHARGING;
+
+ if (help->supplied) {
+ bool full;
+
+ if (help->charge_finished)
+ full = gpiod_get_value_cansleep(help->charge_finished);
+ else
+ full = help->ocv_avg_uv > full_uv;
+
+ if (full)
+ return POWER_SUPPLY_STATUS_FULL;
+ }
+
+ return POWER_SUPPLY_STATUS_NOT_CHARGING;
+}
+
+static void adc_battery_helper_work(struct work_struct *work)
+{
+ struct adc_battery_helper *help = container_of(work, struct adc_battery_helper,
+ work.work);
+ int i, curr_diff_ua, volt_diff_uv, res_mohm, ret, win_size;
+ struct device *dev = help->psy->dev.parent;
+ int volt_uv, prev_volt_uv = help->volt_uv;
+ int curr_ua, prev_curr_ua = help->curr_ua;
+ bool prev_supplied = help->supplied;
+ int prev_status = help->status;
+
+ guard(mutex)(&help->lock);
+
+ ret = help->get_voltage_and_current_now(help->psy, &volt_uv, &curr_ua);
+ if (ret)
+ goto out;
+
+ help->volt_uv = volt_uv;
+ help->curr_ua = curr_ua;
+
+ help->ocv_uv[help->ocv_avg_index] =
+ help->volt_uv - help->curr_ua * help->intern_res_avg_mohm / 1000;
+ dev_dbg(dev, "volt-now: %d, curr-now: %d, volt-ocv: %d\n",
+ help->volt_uv, help->curr_ua, help->ocv_uv[help->ocv_avg_index]);
+ help->ocv_avg_index = (help->ocv_avg_index + 1) % MOV_AVG_WINDOW_SIZE;
+ help->poll_count++;
+
+ help->ocv_avg_uv = 0;
+ win_size = min(help->poll_count, MOV_AVG_WINDOW_SIZE);
+ for (i = 0; i < win_size; i++)
+ help->ocv_avg_uv += help->ocv_uv[i];
+ help->ocv_avg_uv /= win_size;
+
+ help->supplied = power_supply_am_i_supplied(help->psy);
+ help->status = adc_battery_helper_get_status(help);
+ if (help->status == POWER_SUPPLY_STATUS_FULL)
+ help->capacity = 100;
+ else
+ help->capacity = power_supply_batinfo_ocv2cap(help->psy->battery_info,
+ help->ocv_avg_uv,
+ AMBIENT_TEMP_CELSIUS);
+
+ /*
+ * Skip internal resistance calc on charger [un]plug and
+ * when the battery is almost empty (voltage low).
+ */
+ if (help->supplied != prev_supplied ||
+ help->volt_uv < LOW_BAT_UV ||
+ help->poll_count < 2)
+ goto out;
+
+ /*
+ * Assuming that the OCV voltage does not change significantly
+ * between 2 polls, then we can calculate the internal resistance
+ * on a significant current change by attributing all voltage
+ * change between the 2 readings to the internal resistance.
+ */
+ curr_diff_ua = abs(help->curr_ua - prev_curr_ua);
+ if (curr_diff_ua < CURR_HYST_UA)
+ goto out;
+
+ volt_diff_uv = abs(help->volt_uv - prev_volt_uv);
+ res_mohm = volt_diff_uv * 1000 / curr_diff_ua;
+
+ if ((res_mohm < (help->intern_res_avg_mohm * 2 / 3)) ||
+ (res_mohm > (help->intern_res_avg_mohm * 4 / 3))) {
+ dev_dbg(dev, "Ignoring outlier internal resistance %d mOhm\n", res_mohm);
+ goto out;
+ }
+
+ dev_dbg(dev, "Internal resistance %d mOhm\n", res_mohm);
+
+ help->intern_res_mohm[help->intern_res_avg_index] = res_mohm;
+ help->intern_res_avg_index = (help->intern_res_avg_index + 1) % MOV_AVG_WINDOW_SIZE;
+ help->intern_res_poll_count++;
+
+ help->intern_res_avg_mohm = 0;
+ win_size = min(help->intern_res_poll_count, MOV_AVG_WINDOW_SIZE);
+ for (i = 0; i < win_size; i++)
+ help->intern_res_avg_mohm += help->intern_res_mohm[i];
+ help->intern_res_avg_mohm /= win_size;
+
+out:
+ queue_delayed_work(system_percpu_wq, &help->work,
+ (help->poll_count <= INIT_POLL_COUNT) ?
+ INIT_POLL_TIME : POLL_TIME);
+
+ if (help->status != prev_status)
+ power_supply_changed(help->psy);
+}
+
+const enum power_supply_property adc_battery_helper_properties[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_VOLTAGE_OCV,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_SCOPE,
+};
+EXPORT_SYMBOL_GPL(adc_battery_helper_properties);
+
+static_assert(ARRAY_SIZE(adc_battery_helper_properties) ==
+ ADC_HELPER_NUM_PROPERTIES);
+
+int adc_battery_helper_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct adc_battery_helper *help = power_supply_get_drvdata(psy);
+ int dummy, ret = 0;
+
+ /*
+ * Avoid racing with adc_battery_helper_work() while it is updating
+ * variables and avoid calling get_voltage_and_current_now() reentrantly.
+ */
+ guard(mutex)(&help->lock);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ val->intval = help->status;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ ret = help->get_voltage_and_current_now(psy, &val->intval, &dummy);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_OCV:
+ val->intval = help->ocv_avg_uv;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ ret = help->get_voltage_and_current_now(psy, &dummy, &val->intval);
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ val->intval = help->capacity;
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = 1;
+ break;
+ case POWER_SUPPLY_PROP_SCOPE:
+ val->intval = POWER_SUPPLY_SCOPE_SYSTEM;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adc_battery_helper_get_property);
+
+void adc_battery_helper_external_power_changed(struct power_supply *psy)
+{
+ struct adc_battery_helper *help = power_supply_get_drvdata(psy);
+
+ dev_dbg(help->psy->dev.parent, "external power changed\n");
+ mod_delayed_work(system_percpu_wq, &help->work, SETTLE_TIME);
+}
+EXPORT_SYMBOL_GPL(adc_battery_helper_external_power_changed);
+
+static void adc_battery_helper_start_work(struct adc_battery_helper *help)
+{
+ help->poll_count = 0;
+ help->ocv_avg_index = 0;
+
+ queue_delayed_work(system_percpu_wq, &help->work, 0);
+ flush_delayed_work(&help->work);
+}
+
+int adc_battery_helper_init(struct adc_battery_helper *help, struct power_supply *psy,
+ adc_battery_helper_get_func get_voltage_and_current_now,
+ struct gpio_desc *charge_finished_gpio)
+{
+ struct device *dev = psy->dev.parent;
+ int ret;
+
+ help->psy = psy;
+ help->get_voltage_and_current_now = get_voltage_and_current_now;
+ help->charge_finished = charge_finished_gpio;
+
+ ret = devm_mutex_init(dev, &help->lock);
+ if (ret)
+ return ret;
+
+ ret = devm_delayed_work_autocancel(dev, &help->work, adc_battery_helper_work);
+ if (ret)
+ return ret;
+
+ if (!help->psy->battery_info ||
+ help->psy->battery_info->factory_internal_resistance_uohm == -EINVAL ||
+ help->psy->battery_info->constant_charge_voltage_max_uv == -EINVAL ||
+ !psy->battery_info->ocv_table[0]) {
+ dev_err(dev, "error required properties are missing\n");
+ return -ENODEV;
+ }
+
+ /* Use provided internal resistance as start point (in milli-ohm) */
+ help->intern_res_avg_mohm =
+ help->psy->battery_info->factory_internal_resistance_uohm / 1000;
+ /* Also add it to the internal resistance moving average window */
+ help->intern_res_mohm[0] = help->intern_res_avg_mohm;
+ help->intern_res_avg_index = 1;
+ help->intern_res_poll_count = 1;
+
+ adc_battery_helper_start_work(help);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adc_battery_helper_init);
+
+int adc_battery_helper_suspend(struct device *dev)
+{
+ struct adc_battery_helper *help = dev_get_drvdata(dev);
+
+ cancel_delayed_work_sync(&help->work);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adc_battery_helper_suspend);
+
+int adc_battery_helper_resume(struct device *dev)
+{
+ struct adc_battery_helper *help = dev_get_drvdata(dev);
+
+ adc_battery_helper_start_work(help);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adc_battery_helper_resume);
+
+MODULE_AUTHOR("Hans de Goede <hansg@kernel.org>");
+MODULE_DESCRIPTION("ADC battery capacity estimation helper");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/supply/adc-battery-helper.h b/drivers/power/supply/adc-battery-helper.h
new file mode 100644
index 000000000000..4e42181c8983
--- /dev/null
+++ b/drivers/power/supply/adc-battery-helper.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Helper for batteries with accurate current and voltage measurement, but
+ * without temperature measurement or without a "resistance-temp-table".
+ * Copyright (c) 2021-2025 Hans de Goede <hansg@kernel.org>
+ */
+
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+
+#define ADC_BAT_HELPER_MOV_AVG_WINDOW_SIZE 8
+
+struct power_supply;
+struct gpio_desc;
+
+/*
+ * The adc battery helper code needs voltage- and current-now to be sampled as
+ * close to each other (in sample-time) as possible. A single getter function is
+ * used to allow the battery driver to handle this in the best way possible.
+ */
+typedef int (*adc_battery_helper_get_func)(struct power_supply *psy, int *volt, int *curr);
+
+struct adc_battery_helper {
+ struct power_supply *psy;
+ struct gpio_desc *charge_finished;
+ struct delayed_work work;
+ struct mutex lock;
+ adc_battery_helper_get_func get_voltage_and_current_now;
+ int ocv_uv[ADC_BAT_HELPER_MOV_AVG_WINDOW_SIZE]; /* micro-volt */
+ int intern_res_mohm[ADC_BAT_HELPER_MOV_AVG_WINDOW_SIZE]; /* milli-ohm */
+ int poll_count;
+ int ocv_avg_index;
+ int ocv_avg_uv; /* micro-volt */
+ int intern_res_poll_count;
+ int intern_res_avg_index;
+ int intern_res_avg_mohm; /* milli-ohm */
+ int volt_uv; /* micro-volt */
+ int curr_ua; /* micro-ampere */
+ int capacity; /* percent */
+ int status;
+ bool supplied;
+};
+
+extern const enum power_supply_property adc_battery_helper_properties[];
+/* Must be const cannot be an external. Asserted in adc-battery-helper.c */
+#define ADC_HELPER_NUM_PROPERTIES 7
+
+int adc_battery_helper_init(struct adc_battery_helper *help, struct power_supply *psy,
+ adc_battery_helper_get_func get_voltage_and_current_now,
+ struct gpio_desc *charge_finished_gpio);
+/*
+ * The below functions can be directly used as power-supply / suspend-resume
+ * callbacks. They cast the power_supply_get_drvdata() / dev_get_drvdata() data
+ * directly to struct adc_battery_helper. Therefor struct adc_battery_helper
+ * MUST be the first member of the battery driver's data struct.
+ */
+int adc_battery_helper_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val);
+void adc_battery_helper_external_power_changed(struct power_supply *psy);
+int adc_battery_helper_suspend(struct device *dev);
+int adc_battery_helper_resume(struct device *dev);
diff --git a/drivers/power/supply/bq2415x_charger.c b/drivers/power/supply/bq2415x_charger.c
index 917c26ee56bc..b50a28b9dd38 100644
--- a/drivers/power/supply/bq2415x_charger.c
+++ b/drivers/power/supply/bq2415x_charger.c
@@ -842,7 +842,7 @@ static int bq2415x_notifier_call(struct notifier_block *nb,
if (bq->automode < 1)
return NOTIFY_OK;
- mod_delayed_work(system_wq, &bq->work, 0);
+ mod_delayed_work(system_percpu_wq, &bq->work, 0);
return NOTIFY_OK;
}
@@ -1516,7 +1516,7 @@ static int bq2415x_power_supply_init(struct bq2415x_device *bq)
ret = bq2415x_detect_revision(bq);
if (ret < 0)
- strcpy(revstr, "unknown");
+ strscpy(revstr, "unknown", sizeof(revstr));
else
sprintf(revstr, "1.%d", ret);
diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c
index e1510c7fdab3..ed0ceae8d90b 100644
--- a/drivers/power/supply/bq24190_charger.c
+++ b/drivers/power/supply/bq24190_charger.c
@@ -1467,7 +1467,7 @@ static void bq24190_charger_external_power_changed(struct power_supply *psy)
* too low default 500mA iinlim. Delay setting the input-current-limit
* for 300ms to avoid this.
*/
- queue_delayed_work(system_wq, &bdi->input_current_limit_work,
+ queue_delayed_work(system_percpu_wq, &bdi->input_current_limit_work,
msecs_to_jiffies(300));
}
diff --git a/drivers/power/supply/bq257xx_charger.c b/drivers/power/supply/bq257xx_charger.c
new file mode 100644
index 000000000000..02c7d8b61e82
--- /dev/null
+++ b/drivers/power/supply/bq257xx_charger.c
@@ -0,0 +1,755 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * BQ257XX Battery Charger Driver
+ * Copyright (C) 2025 Chris Morgan <macromorgan@hotmail.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/bq257xx.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+
+/* Forward declaration of driver data. */
+struct bq257xx_chg;
+
+/**
+ * struct bq257xx_chip_info - chip specific routines
+ * @bq257xx_hw_init: init function for hw
+ * @bq257xx_hw_shutdown: shutdown function for hw
+ * @bq257xx_get_state: get and update state of hardware
+ * @bq257xx_set_ichg: set maximum charge current (in uA)
+ * @bq257xx_set_vbatreg: set maximum charge voltage (in uV)
+ * @bq257xx_set_iindpm: set maximum input current (in uA)
+ */
+struct bq257xx_chip_info {
+ int (*bq257xx_hw_init)(struct bq257xx_chg *pdata);
+ void (*bq257xx_hw_shutdown)(struct bq257xx_chg *pdata);
+ int (*bq257xx_get_state)(struct bq257xx_chg *pdata);
+ int (*bq257xx_set_ichg)(struct bq257xx_chg *pdata, int ichg);
+ int (*bq257xx_set_vbatreg)(struct bq257xx_chg *pdata, int vbatreg);
+ int (*bq257xx_set_iindpm)(struct bq257xx_chg *pdata, int iindpm);
+};
+
+/**
+ * struct bq257xx_chg - driver data for charger
+ * @chip: hw specific functions
+ * @bq: parent MFD device
+ * @charger: power supply device
+ * @online: charger input is present
+ * @fast_charge: charger is in fast charge mode
+ * @pre_charge: charger is in pre-charge mode
+ * @ov_fault: charger reports over voltage fault
+ * @batoc_fault: charger reports battery over current fault
+ * @oc_fault: charger reports over current fault
+ * @usb_type: USB type reported from parent power supply
+ * @supplied: Status of parent power supply
+ * @iindpm_max: maximum input current limit (uA)
+ * @vbat_max: maximum charge voltage (uV)
+ * @ichg_max: maximum charge current (uA)
+ * @vsys_min: minimum system voltage (uV)
+ */
+struct bq257xx_chg {
+ const struct bq257xx_chip_info *chip;
+ struct bq257xx_device *bq;
+ struct power_supply *charger;
+ bool online;
+ bool fast_charge;
+ bool pre_charge;
+ bool ov_fault;
+ bool batoc_fault;
+ bool oc_fault;
+ int usb_type;
+ int supplied;
+ u32 iindpm_max;
+ u32 vbat_max;
+ u32 ichg_max;
+ u32 vsys_min;
+};
+
+/**
+ * bq25703_get_state() - Get the current state of the device
+ * @pdata: driver platform data
+ *
+ * Get the current state of the charger. Check if the charger is
+ * powered, what kind of charge state (if any) the device is in,
+ * and if there are any active faults.
+ *
+ * Return: Returns 0 on success, or error on failure to read device.
+ */
+static int bq25703_get_state(struct bq257xx_chg *pdata)
+{
+ unsigned int reg;
+ int ret;
+
+ ret = regmap_read(pdata->bq->regmap, BQ25703_CHARGER_STATUS, &reg);
+ if (ret)
+ return ret;
+
+ pdata->online = reg & BQ25703_STS_AC_STAT;
+ pdata->fast_charge = reg & BQ25703_STS_IN_FCHRG;
+ pdata->pre_charge = reg & BQ25703_STS_IN_PCHRG;
+ pdata->ov_fault = reg & BQ25703_STS_FAULT_ACOV;
+ pdata->batoc_fault = reg & BQ25703_STS_FAULT_BATOC;
+ pdata->oc_fault = reg & BQ25703_STS_FAULT_ACOC;
+
+ return 0;
+}
+
+/**
+ * bq25703_get_min_vsys() - Get the minimum system voltage
+ * @pdata: driver platform data
+ * @intval: value for minimum voltage
+ *
+ * Return: Returns 0 on success or error on failure to read.
+ */
+static int bq25703_get_min_vsys(struct bq257xx_chg *pdata, int *intval)
+{
+ unsigned int reg;
+ int ret;
+
+ ret = regmap_read(pdata->bq->regmap, BQ25703_MIN_VSYS,
+ &reg);
+ if (ret)
+ return ret;
+
+ reg = FIELD_GET(BQ25703_MINVSYS_MASK, reg);
+ *intval = (reg * BQ25703_MINVSYS_STEP_UV) + BQ25703_MINVSYS_MIN_UV;
+
+ return ret;
+}
+
+/**
+ * bq25703_set_min_vsys() - Set the minimum system voltage
+ * @pdata: driver platform data
+ * @vsys: voltage value to set in uV.
+ *
+ * This function takes a requested minimum system voltage value, clamps
+ * it between the minimum supported value by the charger and a user
+ * defined minimum system value, and then writes the value to the
+ * appropriate register.
+ *
+ * Return: Returns 0 on success or error if an error occurs.
+ */
+static int bq25703_set_min_vsys(struct bq257xx_chg *pdata, int vsys)
+{
+ unsigned int reg;
+ int vsys_min = pdata->vsys_min;
+
+ vsys = clamp(vsys, BQ25703_MINVSYS_MIN_UV, vsys_min);
+ reg = ((vsys - BQ25703_MINVSYS_MIN_UV) / BQ25703_MINVSYS_STEP_UV);
+ reg = FIELD_PREP(BQ25703_MINVSYS_MASK, reg);
+
+ return regmap_write(pdata->bq->regmap, BQ25703_MIN_VSYS,
+ reg);
+}
+
+/**
+ * bq25703_get_cur() - Get the reported current from the battery
+ * @pdata: driver platform data
+ * @intval: value of reported battery current
+ *
+ * Read the reported current from the battery. Since value is always
+ * positive set sign to negative if discharging.
+ *
+ * Return: Returns 0 on success or error if unable to read value.
+ */
+static int bq25703_get_cur(struct bq257xx_chg *pdata, int *intval)
+{
+ unsigned int reg;
+ int ret;
+
+ ret = regmap_read(pdata->bq->regmap, BQ25703_ADCIBAT_CHG, &reg);
+ if (ret < 0)
+ return ret;
+
+ if (pdata->online)
+ *intval = FIELD_GET(BQ25703_ADCIBAT_CHG_MASK, reg) *
+ BQ25703_ADCIBAT_CHG_STEP_UA;
+ else
+ *intval = -(FIELD_GET(BQ25703_ADCIBAT_DISCHG_MASK, reg) *
+ BQ25703_ADCIBAT_DIS_STEP_UA);
+
+ return ret;
+}
+
+/**
+ * bq25703_get_ichg_cur() - Get the maximum reported charge current
+ * @pdata: driver platform data
+ * @intval: value of maximum reported charge current
+ *
+ * Get the maximum reported charge current from the battery.
+ *
+ * Return: Returns 0 on success or error if unable to read value.
+ */
+static int bq25703_get_ichg_cur(struct bq257xx_chg *pdata, int *intval)
+{
+ unsigned int reg;
+ int ret;
+
+ ret = regmap_read(pdata->bq->regmap, BQ25703_CHARGE_CURRENT, &reg);
+ if (ret)
+ return ret;
+
+ *intval = FIELD_GET(BQ25703_ICHG_MASK, reg) * BQ25703_ICHG_STEP_UA;
+
+ return ret;
+}
+
+/**
+ * bq25703_set_ichg_cur() - Set the maximum charge current
+ * @pdata: driver platform data
+ * @ichg: current value to set in uA.
+ *
+ * This function takes a requested maximum charge current value, clamps
+ * it between the minimum supported value by the charger and a user
+ * defined maximum charging value, and then writes the value to the
+ * appropriate register.
+ *
+ * Return: Returns 0 on success or error if an error occurs.
+ */
+static int bq25703_set_ichg_cur(struct bq257xx_chg *pdata, int ichg)
+{
+ unsigned int reg;
+ int ichg_max = pdata->ichg_max;
+
+ ichg = clamp(ichg, BQ25703_ICHG_MIN_UA, ichg_max);
+ reg = FIELD_PREP(BQ25703_ICHG_MASK, (ichg / BQ25703_ICHG_STEP_UA));
+
+ return regmap_write(pdata->bq->regmap, BQ25703_CHARGE_CURRENT,
+ reg);
+}
+
+/**
+ * bq25703_get_chrg_volt() - Get the maximum set charge voltage
+ * @pdata: driver platform data
+ * @intval: maximum charge voltage value
+ *
+ * Return: Returns 0 on success or error if unable to read value.
+ */
+static int bq25703_get_chrg_volt(struct bq257xx_chg *pdata, int *intval)
+{
+ unsigned int reg;
+ int ret;
+
+ ret = regmap_read(pdata->bq->regmap, BQ25703_MAX_CHARGE_VOLT,
+ &reg);
+ if (ret)
+ return ret;
+
+ *intval = FIELD_GET(BQ25703_MAX_CHARGE_VOLT_MASK, reg) *
+ BQ25703_VBATREG_STEP_UV;
+
+ return ret;
+}
+
+/**
+ * bq25703_set_chrg_volt() - Set the maximum charge voltage
+ * @pdata: driver platform data
+ * @vbat: voltage value to set in uV.
+ *
+ * This function takes a requested maximum charge voltage value, clamps
+ * it between the minimum supported value by the charger and a user
+ * defined maximum charging value, and then writes the value to the
+ * appropriate register.
+ *
+ * Return: Returns 0 on success or error if an error occurs.
+ */
+static int bq25703_set_chrg_volt(struct bq257xx_chg *pdata, int vbat)
+{
+ unsigned int reg;
+ int vbat_max = pdata->vbat_max;
+
+ vbat = clamp(vbat, BQ25703_VBATREG_MIN_UV, vbat_max);
+
+ reg = FIELD_PREP(BQ25703_MAX_CHARGE_VOLT_MASK,
+ (vbat / BQ25703_VBATREG_STEP_UV));
+
+ return regmap_write(pdata->bq->regmap, BQ25703_MAX_CHARGE_VOLT,
+ reg);
+}
+
+/**
+ * bq25703_get_iindpm() - Get the maximum set input current
+ * @pdata: driver platform data
+ * @intval: maximum input current value
+ *
+ * Read the actual input current limit from the device into intval.
+ * This can differ from the value programmed due to some autonomous
+ * functions that may be enabled (but are not currently). This is why
+ * there is a different register used.
+ *
+ * Return: Returns 0 on success or error if unable to read register
+ * value.
+ */
+static int bq25703_get_iindpm(struct bq257xx_chg *pdata, int *intval)
+{
+ unsigned int reg;
+ int ret;
+
+ ret = regmap_read(pdata->bq->regmap, BQ25703_IIN_DPM, &reg);
+ if (ret)
+ return ret;
+
+ reg = FIELD_GET(BQ25703_IINDPM_MASK, reg);
+ *intval = (reg * BQ25703_IINDPM_STEP_UA) + BQ25703_IINDPM_OFFSET_UA;
+
+ return ret;
+}
+
+/**
+ * bq25703_set_iindpm() - Set the maximum input current
+ * @pdata: driver platform data
+ * @iindpm: current value in uA.
+ *
+ * This function takes a requested maximum input current value, clamps
+ * it between the minimum supported value by the charger and a user
+ * defined maximum input value, and then writes the value to the
+ * appropriate register.
+ *
+ * Return: Returns 0 on success or error if an error occurs.
+ */
+static int bq25703_set_iindpm(struct bq257xx_chg *pdata, int iindpm)
+{
+ unsigned int reg;
+ int iindpm_max = pdata->iindpm_max;
+
+ iindpm = clamp(iindpm, BQ25703_IINDPM_MIN_UA, iindpm_max);
+
+ reg = ((iindpm - BQ25703_IINDPM_OFFSET_UA) / BQ25703_IINDPM_STEP_UA);
+
+ return regmap_write(pdata->bq->regmap, BQ25703_IIN_HOST,
+ FIELD_PREP(BQ25703_IINDPM_MASK, reg));
+}
+
+/**
+ * bq25703_get_vbat() - Get the reported voltage from the battery
+ * @pdata: driver platform data
+ * @intval: value of reported battery voltage
+ *
+ * Read value of battery voltage into intval.
+ *
+ * Return: Returns 0 on success or error if unable to read value.
+ */
+static int bq25703_get_vbat(struct bq257xx_chg *pdata, int *intval)
+{
+ unsigned int reg;
+ int ret;
+
+ ret = regmap_read(pdata->bq->regmap, BQ25703_ADCVSYSVBAT, &reg);
+ if (ret)
+ return ret;
+
+ reg = FIELD_GET(BQ25703_ADCVBAT_MASK, reg);
+ *intval = (reg * BQ25703_ADCVSYSVBAT_STEP) + BQ25703_ADCVSYSVBAT_OFFSET_UV;
+
+ return ret;
+}
+
+/**
+ * bq25703_hw_init() - Set all the required registers to init the charger
+ * @pdata: driver platform data
+ *
+ * Initialize the BQ25703 by first disabling the watchdog timer (which
+ * shuts off the charger in the absence of periodic writes). Then, set
+ * the charge current, charge voltage, minimum system voltage, and
+ * input current limit. Disable low power mode to allow ADCs and
+ * interrupts. Enable the ADC, start the ADC, set the ADC scale to
+ * full, and enable each individual ADC channel.
+ *
+ * Return: Returns 0 on success or error code on error.
+ */
+static int bq25703_hw_init(struct bq257xx_chg *pdata)
+{
+ struct regmap *regmap = pdata->bq->regmap;
+ int ret = 0;
+
+ regmap_update_bits(regmap, BQ25703_CHARGE_OPTION_0,
+ BQ25703_WDTMR_ADJ_MASK,
+ FIELD_PREP(BQ25703_WDTMR_ADJ_MASK,
+ BQ25703_WDTMR_DISABLE));
+
+ ret = pdata->chip->bq257xx_set_ichg(pdata, pdata->ichg_max);
+ if (ret)
+ return ret;
+
+ ret = pdata->chip->bq257xx_set_vbatreg(pdata, pdata->vbat_max);
+ if (ret)
+ return ret;
+
+ ret = bq25703_set_min_vsys(pdata, pdata->vsys_min);
+ if (ret)
+ return ret;
+
+ ret = pdata->chip->bq257xx_set_iindpm(pdata, pdata->iindpm_max);
+ if (ret)
+ return ret;
+
+ /* Disable low power mode by writing 0 to the register. */
+ regmap_update_bits(regmap, BQ25703_CHARGE_OPTION_0,
+ BQ25703_EN_LWPWR, 0);
+
+ /* Enable the ADC. */
+ regmap_update_bits(regmap, BQ25703_ADC_OPTION,
+ BQ25703_ADC_CONV_EN, BQ25703_ADC_CONV_EN);
+
+ /* Start the ADC. */
+ regmap_update_bits(regmap, BQ25703_ADC_OPTION,
+ BQ25703_ADC_START, BQ25703_ADC_START);
+
+ /* Set the scale of the ADC. */
+ regmap_update_bits(regmap, BQ25703_ADC_OPTION,
+ BQ25703_ADC_FULL_SCALE, BQ25703_ADC_FULL_SCALE);
+
+ /* Enable each of the ADC channels available. */
+ regmap_update_bits(regmap, BQ25703_ADC_OPTION,
+ BQ25703_ADC_CH_MASK,
+ (BQ25703_ADC_CMPIN_EN | BQ25703_ADC_VBUS_EN |
+ BQ25703_ADC_PSYS_EN | BQ25703_ADC_IIN_EN |
+ BQ25703_ADC_IDCHG_EN | BQ25703_ADC_ICHG_EN |
+ BQ25703_ADC_VSYS_EN | BQ25703_ADC_VBAT_EN));
+
+ return ret;
+}
+
+/**
+ * bq25703_hw_shutdown() - Set registers for shutdown
+ * @pdata: driver platform data
+ *
+ * Enable low power mode for the device while in shutdown.
+ */
+static void bq25703_hw_shutdown(struct bq257xx_chg *pdata)
+{
+ regmap_update_bits(pdata->bq->regmap, BQ25703_CHARGE_OPTION_0,
+ BQ25703_EN_LWPWR, BQ25703_EN_LWPWR);
+}
+
+static int bq257xx_set_charger_property(struct power_supply *psy,
+ enum power_supply_property prop,
+ const union power_supply_propval *val)
+{
+ struct bq257xx_chg *pdata = power_supply_get_drvdata(psy);
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ return pdata->chip->bq257xx_set_iindpm(pdata, val->intval);
+
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX:
+ return pdata->chip->bq257xx_set_vbatreg(pdata, val->intval);
+
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+ return pdata->chip->bq257xx_set_ichg(pdata, val->intval);
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int bq257xx_get_charger_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct bq257xx_chg *pdata = power_supply_get_drvdata(psy);
+ int ret = 0;
+
+ ret = pdata->chip->bq257xx_get_state(pdata);
+ if (ret)
+ return ret;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ if (!pdata->online)
+ val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+ else if (pdata->fast_charge || pdata->pre_charge)
+ val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ else
+ val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ break;
+
+ case POWER_SUPPLY_PROP_HEALTH:
+ if (pdata->ov_fault || pdata->batoc_fault)
+ val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+ else if (pdata->oc_fault)
+ val->intval = POWER_SUPPLY_HEALTH_OVERCURRENT;
+ else
+ val->intval = POWER_SUPPLY_HEALTH_GOOD;
+ break;
+
+ case POWER_SUPPLY_PROP_MANUFACTURER:
+ val->strval = "Texas Instruments";
+ break;
+
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = pdata->online;
+ break;
+
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ return bq25703_get_iindpm(pdata, &val->intval);
+
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX:
+ return bq25703_get_chrg_volt(pdata, &val->intval);
+
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ return bq25703_get_cur(pdata, &val->intval);
+
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ return bq25703_get_vbat(pdata, &val->intval);
+
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+ return bq25703_get_ichg_cur(pdata, &val->intval);
+
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN:
+ return bq25703_get_min_vsys(pdata, &val->intval);
+
+ case POWER_SUPPLY_PROP_USB_TYPE:
+ val->intval = pdata->usb_type;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static enum power_supply_property bq257xx_power_supply_props[] = {
+ POWER_SUPPLY_PROP_MANUFACTURER,
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN,
+ POWER_SUPPLY_PROP_USB_TYPE,
+};
+
+static int bq257xx_property_is_writeable(struct power_supply *psy,
+ enum power_supply_property prop)
+{
+ switch (prop) {
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX:
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * bq257xx_external_power_changed() - Handler for external power change
+ * @psy: Power supply data
+ *
+ * When the external power into the charger is changed, check the USB
+ * type so that it can be reported. Additionally, update the max input
+ * current and max charging current to the value reported if it is a
+ * USB PD charger, otherwise use the default value. Note that each time
+ * a charger is removed the max charge current register is erased, so
+ * it must be set again each time the input changes or the device will
+ * not charge.
+ */
+static void bq257xx_external_power_changed(struct power_supply *psy)
+{
+ struct bq257xx_chg *pdata = power_supply_get_drvdata(psy);
+ union power_supply_propval val;
+ int ret;
+ int imax = pdata->iindpm_max;
+
+ pdata->chip->bq257xx_get_state(pdata);
+
+ pdata->supplied = power_supply_am_i_supplied(pdata->charger);
+ if (pdata->supplied < 0)
+ return;
+
+ if (pdata->supplied == 0)
+ goto out;
+
+ ret = power_supply_get_property_from_supplier(psy,
+ POWER_SUPPLY_PROP_USB_TYPE,
+ &val);
+ if (ret)
+ return;
+
+ pdata->usb_type = val.intval;
+
+ if ((pdata->usb_type == POWER_SUPPLY_USB_TYPE_PD) ||
+ (pdata->usb_type == POWER_SUPPLY_USB_TYPE_PD_DRP) ||
+ (pdata->usb_type == POWER_SUPPLY_USB_TYPE_PD_PPS)) {
+ ret = power_supply_get_property_from_supplier(psy,
+ POWER_SUPPLY_PROP_CURRENT_MAX,
+ &val);
+ if (ret)
+ return;
+
+ if (val.intval)
+ imax = val.intval;
+ }
+
+ if (pdata->supplied) {
+ pdata->chip->bq257xx_set_ichg(pdata, pdata->ichg_max);
+ pdata->chip->bq257xx_set_iindpm(pdata, imax);
+ pdata->chip->bq257xx_set_vbatreg(pdata, pdata->vbat_max);
+ }
+
+out:
+ power_supply_changed(psy);
+}
+
+static irqreturn_t bq257xx_irq_handler_thread(int irq, void *private)
+{
+ struct bq257xx_chg *pdata = private;
+
+ bq257xx_external_power_changed(pdata->charger);
+ return IRQ_HANDLED;
+}
+
+static const struct power_supply_desc bq257xx_power_supply_desc = {
+ .name = "bq257xx-charger",
+ .type = POWER_SUPPLY_TYPE_USB,
+ .usb_types = BIT(POWER_SUPPLY_USB_TYPE_C) |
+ BIT(POWER_SUPPLY_USB_TYPE_PD) |
+ BIT(POWER_SUPPLY_USB_TYPE_PD_DRP) |
+ BIT(POWER_SUPPLY_USB_TYPE_PD_PPS) |
+ BIT(POWER_SUPPLY_USB_TYPE_UNKNOWN),
+ .properties = bq257xx_power_supply_props,
+ .num_properties = ARRAY_SIZE(bq257xx_power_supply_props),
+ .get_property = bq257xx_get_charger_property,
+ .set_property = bq257xx_set_charger_property,
+ .property_is_writeable = bq257xx_property_is_writeable,
+ .external_power_changed = bq257xx_external_power_changed,
+};
+
+static const struct bq257xx_chip_info bq25703_chip_info = {
+ .bq257xx_hw_init = &bq25703_hw_init,
+ .bq257xx_hw_shutdown = &bq25703_hw_shutdown,
+ .bq257xx_get_state = &bq25703_get_state,
+ .bq257xx_set_ichg = &bq25703_set_ichg_cur,
+ .bq257xx_set_vbatreg = &bq25703_set_chrg_volt,
+ .bq257xx_set_iindpm = &bq25703_set_iindpm,
+};
+
+/**
+ * bq257xx_parse_dt() - Parse the device tree for required properties
+ * @pdata: driver platform data
+ * @psy_cfg: power supply config data
+ * @dev: device struct
+ *
+ * Read the device tree to identify the minimum system voltage, the
+ * maximum charge current, the maximum charge voltage, and the maximum
+ * input current.
+ *
+ * Return: Returns 0 on success or error code on error.
+ */
+static int bq257xx_parse_dt(struct bq257xx_chg *pdata,
+ struct power_supply_config *psy_cfg, struct device *dev)
+{
+ struct power_supply_battery_info *bat_info;
+ int ret;
+
+ ret = power_supply_get_battery_info(pdata->charger,
+ &bat_info);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Unable to get battery info\n");
+
+ if ((bat_info->voltage_min_design_uv <= 0) ||
+ (bat_info->constant_charge_voltage_max_uv <= 0) ||
+ (bat_info->constant_charge_current_max_ua <= 0))
+ return dev_err_probe(dev, -EINVAL,
+ "Required bat info missing or invalid\n");
+
+ pdata->vsys_min = bat_info->voltage_min_design_uv;
+ pdata->vbat_max = bat_info->constant_charge_voltage_max_uv;
+ pdata->ichg_max = bat_info->constant_charge_current_max_ua;
+
+ power_supply_put_battery_info(pdata->charger, bat_info);
+
+ ret = device_property_read_u32(dev,
+ "input-current-limit-microamp",
+ &pdata->iindpm_max);
+ if (ret)
+ pdata->iindpm_max = BQ25703_IINDPM_DEFAULT_UA;
+
+ return 0;
+}
+
+static int bq257xx_charger_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct bq257xx_device *bq = dev_get_drvdata(pdev->dev.parent);
+ struct bq257xx_chg *pdata;
+ struct power_supply_config psy_cfg = { };
+ int ret;
+
+ device_set_of_node_from_dev(dev, pdev->dev.parent);
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ pdata->bq = bq;
+ pdata->chip = &bq25703_chip_info;
+
+ platform_set_drvdata(pdev, pdata);
+
+ psy_cfg.drv_data = pdata;
+ psy_cfg.fwnode = dev_fwnode(dev);
+
+ pdata->charger = devm_power_supply_register(dev,
+ &bq257xx_power_supply_desc,
+ &psy_cfg);
+ if (IS_ERR(pdata->charger))
+ return dev_err_probe(dev, PTR_ERR(pdata->charger),
+ "Power supply register charger failed\n");
+
+ ret = bq257xx_parse_dt(pdata, &psy_cfg, dev);
+ if (ret)
+ return ret;
+
+ ret = pdata->chip->bq257xx_hw_init(pdata);
+ if (ret)
+ return dev_err_probe(dev, ret, "Cannot initialize the charger\n");
+
+ platform_set_drvdata(pdev, pdata);
+
+ if (bq->client->irq) {
+ ret = devm_request_threaded_irq(dev, bq->client->irq, NULL,
+ bq257xx_irq_handler_thread,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ dev_name(&bq->client->dev), pdata);
+ if (ret < 0)
+ dev_err_probe(dev, ret, "Charger get irq failed\n");
+ }
+
+ return ret;
+}
+
+static void bq257xx_charger_shutdown(struct platform_device *pdev)
+{
+ struct bq257xx_chg *pdata = platform_get_drvdata(pdev);
+
+ pdata->chip->bq257xx_hw_shutdown(pdata);
+}
+
+static struct platform_driver bq257xx_chg_driver = {
+ .driver = {
+ .name = "bq257xx-charger",
+ },
+ .probe = bq257xx_charger_probe,
+ .shutdown = bq257xx_charger_shutdown,
+};
+module_platform_driver(bq257xx_chg_driver);
+
+MODULE_DESCRIPTION("bq257xx charger driver");
+MODULE_AUTHOR("Chris Morgan <macromorgan@hotmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
index 93dcebbe1141..19445e39651c 100644
--- a/drivers/power/supply/bq27xxx_battery.c
+++ b/drivers/power/supply/bq27xxx_battery.c
@@ -1127,7 +1127,7 @@ static int poll_interval_param_set(const char *val, const struct kernel_param *k
mutex_lock(&bq27xxx_list_lock);
list_for_each_entry(di, &bq27xxx_battery_devices, list)
- mod_delayed_work(system_wq, &di->work, 0);
+ mod_delayed_work(system_percpu_wq, &di->work, 0);
mutex_unlock(&bq27xxx_list_lock);
return ret;
@@ -1919,8 +1919,8 @@ static void bq27xxx_battery_update_unlocked(struct bq27xxx_device_info *di)
bool has_singe_flag = di->opts & BQ27XXX_O_ZERO;
cache.flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, has_singe_flag);
- if ((cache.flags & 0xff) == 0xff)
- cache.flags = -1; /* read error */
+ if (di->chip == BQ27000 && (cache.flags & 0xff) == 0xff)
+ cache.flags = -ENODEV; /* bq27000 hdq read error */
if (cache.flags >= 0) {
cache.capacity = bq27xxx_battery_read_soc(di);
@@ -1945,7 +1945,7 @@ static void bq27xxx_battery_update_unlocked(struct bq27xxx_device_info *di)
di->last_update = jiffies;
if (!di->removed && poll_interval > 0)
- mod_delayed_work(system_wq, &di->work, poll_interval * HZ);
+ mod_delayed_work(system_percpu_wq, &di->work, poll_interval * HZ);
}
void bq27xxx_battery_update(struct bq27xxx_device_info *di)
@@ -2221,14 +2221,7 @@ static void bq27xxx_external_power_changed(struct power_supply *psy)
struct bq27xxx_device_info *di = power_supply_get_drvdata(psy);
/* After charger plug in/out wait 0.5s for things to stabilize */
- mod_delayed_work(system_wq, &di->work, HZ / 2);
-}
-
-static void bq27xxx_battery_mutex_destroy(void *data)
-{
- struct mutex *lock = data;
-
- mutex_destroy(lock);
+ mod_delayed_work(system_percpu_wq, &di->work, HZ / 2);
}
int bq27xxx_battery_setup(struct bq27xxx_device_info *di)
@@ -2242,9 +2235,7 @@ int bq27xxx_battery_setup(struct bq27xxx_device_info *di)
int ret;
INIT_DELAYED_WORK(&di->work, bq27xxx_battery_poll);
- mutex_init(&di->lock);
- ret = devm_add_action_or_reset(di->dev, bq27xxx_battery_mutex_destroy,
- &di->lock);
+ ret = devm_mutex_init(di->dev, &di->lock);
if (ret)
return ret;
diff --git a/drivers/power/supply/cw2015_battery.c b/drivers/power/supply/cw2015_battery.c
index f63c3c410451..2263d5d3448f 100644
--- a/drivers/power/supply/cw2015_battery.c
+++ b/drivers/power/supply/cw2015_battery.c
@@ -506,10 +506,7 @@ static int cw_battery_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_CHARGE_FULL:
case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
- if (cw_bat->battery->charge_full_design_uah > 0)
- val->intval = cw_bat->battery->charge_full_design_uah;
- else
- val->intval = 0;
+ val->intval = max(cw_bat->battery->charge_full_design_uah, 0);
break;
case POWER_SUPPLY_PROP_CHARGE_NOW:
@@ -702,8 +699,7 @@ static int cw_bat_probe(struct i2c_client *client)
if (!cw_bat->battery_workqueue)
return -ENOMEM;
- devm_delayed_work_autocancel(&client->dev,
- &cw_bat->battery_delay_work, cw_bat_work);
+ devm_delayed_work_autocancel(&client->dev, &cw_bat->battery_delay_work, cw_bat_work);
queue_delayed_work(cw_bat->battery_workqueue,
&cw_bat->battery_delay_work, msecs_to_jiffies(10));
return 0;
diff --git a/drivers/power/supply/gpio-charger.c b/drivers/power/supply/gpio-charger.c
index 1b2da9b5fb65..2504190eba82 100644
--- a/drivers/power/supply/gpio-charger.c
+++ b/drivers/power/supply/gpio-charger.c
@@ -79,7 +79,8 @@ static int set_charge_current_limit(struct gpio_charger *gpio_charger, int val)
for (i = 0; i < ndescs; i++) {
bool val = (mapping.gpiodata >> i) & 1;
- gpiod_set_value_cansleep(gpios[ndescs-i-1], val);
+
+ gpiod_set_value_cansleep(gpios[ndescs - i - 1], val);
}
gpio_charger->charge_current_limit = mapping.limit_ua;
@@ -226,14 +227,14 @@ static int init_charge_current_limit(struct device *dev,
gpio_charger->current_limit_map_size = len / 2;
len = device_property_read_u32_array(dev, "charge-current-limit-mapping",
- (u32*) gpio_charger->current_limit_map, len);
+ (u32 *) gpio_charger->current_limit_map, len);
if (len < 0)
return len;
set_def_limit = !device_property_read_u32(dev,
"charge-current-limit-default-microamp",
&def_limit);
- for (i=0; i < gpio_charger->current_limit_map_size; i++) {
+ for (i = 0; i < gpio_charger->current_limit_map_size; i++) {
if (gpio_charger->current_limit_map[i].limit_ua > cur_limit) {
dev_err(dev, "charge-current-limit-mapping not sorted by current in descending order\n");
return -EINVAL;
diff --git a/drivers/power/supply/intel_dc_ti_battery.c b/drivers/power/supply/intel_dc_ti_battery.c
new file mode 100644
index 000000000000..56b0c92e9d28
--- /dev/null
+++ b/drivers/power/supply/intel_dc_ti_battery.c
@@ -0,0 +1,389 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Battery driver for the coulomb-counter of the Intel Dollar Cove TI PMIC
+ *
+ * Note the Intel Dollar Cove TI PMIC coulomb-counter is not a full-featured
+ * autonomous fuel-gauge. It is intended to work together with an always on
+ * micro-controller monitoring it.
+ *
+ * Since Linux does not monitor coulomb-counter changes while the device
+ * is off or suspended, voltage based capacity estimation from
+ * the adc-battery-helper code is used.
+ *
+ * Copyright (C) 2024 Hans de Goede <hansg@kernel.org>
+ *
+ * Register definitions and calibration code was taken from
+ * kernel/drivers/platform/x86/dc_ti_cc.c from the Acer A1-840 Android kernel
+ * which has the following copyright header:
+ *
+ * Copyright (C) 2014 Intel Corporation
+ * Author: Ramakrishna Pallala <ramakrishna.pallala@intel.com>
+ *
+ * dc_ti_cc.c is part of the Acer A1-840 Android kernel source-code archive
+ * named: "App. Guide_Acer_20151221_A_A.zip"
+ * which is distributed by Acer from the Acer A1-840 support page:
+ * https://www.acer.com/us-en/support/product-support/A1-840/downloads
+ */
+
+#include <linux/acpi.h>
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/cleanup.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/iio/consumer.h>
+#include <linux/mfd/intel_soc_pmic.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/power_supply.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/timekeeping.h>
+
+#include "adc-battery-helper.h"
+
+#define DC_TI_PMIC_VERSION_REG 0x00
+#define PMIC_VERSION_A0 0xC0
+#define PMIC_VERSION_A1 0xC1
+
+#define DC_TI_CC_CNTL_REG 0x60
+#define CC_CNTL_CC_CTR_EN BIT(0)
+#define CC_CNTL_CC_CLR_EN BIT(1)
+#define CC_CNTL_CC_CAL_EN BIT(2)
+#define CC_CNTL_CC_OFFSET_EN BIT(3)
+#define CC_CNTL_SMPL_INTVL GENMASK(5, 4)
+#define CC_CNTL_SMPL_INTVL_15MS FIELD_PREP(CC_CNTL_SMPL_INTVL, 0)
+#define CC_CNTL_SMPL_INTVL_62MS FIELD_PREP(CC_CNTL_SMPL_INTVL, 1)
+#define CC_CNTL_SMPL_INTVL_125MS FIELD_PREP(CC_CNTL_SMPL_INTVL, 2)
+#define CC_CNTL_SMPL_INTVL_250MS FIELD_PREP(CC_CNTL_SMPL_INTVL, 3)
+
+#define DC_TI_SMPL_CTR0_REG 0x69
+#define DC_TI_SMPL_CTR1_REG 0x68
+#define DC_TI_SMPL_CTR2_REG 0x67
+
+#define DC_TI_CC_OFFSET_HI_REG 0x61
+#define CC_OFFSET_HI_MASK 0x3F
+#define DC_TI_CC_OFFSET_LO_REG 0x62
+
+#define DC_TI_SW_OFFSET_REG 0x6C
+
+#define DC_TI_CC_ACC3_REG 0x63
+#define DC_TI_CC_ACC2_REG 0x64
+#define DC_TI_CC_ACC1_REG 0x65
+#define DC_TI_CC_ACC0_REG 0x66
+
+#define DC_TI_CC_INTG1_REG 0x6A
+#define DC_TI_CC_INTG1_MASK 0x3F
+#define DC_TI_CC_INTG0_REG 0x6B
+
+#define DC_TI_EEPROM_ACCESS_CONTROL 0x88
+#define EEPROM_UNLOCK 0xDA
+#define EEPROM_LOCK 0x00
+
+#define DC_TI_EEPROM_CC_GAIN_REG 0xF4
+#define CC_TRIM_REVISION GENMASK(3, 0)
+#define CC_GAIN_CORRECTION GENMASK(7, 4)
+
+#define PMIC_VERSION_A0_TRIM_REV 3
+#define PMIC_VERSION_A1_MIN_TRIM_REV 1
+
+#define DC_TI_EEPROM_CC_OFFSET_REG 0xFD
+
+#define DC_TI_EEPROM_CTRL 0xFE
+#define EEPROM_BANK0_SEL 0x01
+#define EEPROM_BANK1_SEL 0x02
+
+#define SMPL_INTVL_US 15000
+#define SMPL_INTVL_MS (SMPL_INTVL_US / USEC_PER_MSEC)
+#define CALIBRATION_TIME_US (10 * SMPL_INTVL_US)
+#define SLEEP_SLACK_US 2500
+
+/* CC gain correction is in 0.0025 increments */
+#define CC_GAIN_STEP 25
+#define CC_GAIN_DIV 10000
+
+/* CC offset is in 0.5 units per 250ms (default sample interval) */
+#define CC_OFFSET_DIV 2
+#define CC_OFFSET_SMPL_INTVL_MS 250
+
+/* CC accumulator scale is 366.2 ųCoulumb / unit */
+#define CC_ACC_TO_UA(acc, smpl_ctr) \
+ ((acc) * (3662 * MSEC_PER_SEC / 10) / ((smpl_ctr) * SMPL_INTVL_MS))
+
+#define DEV_NAME "chtdc_ti_battery"
+
+struct dc_ti_battery_chip {
+ /* Must be the first member see adc-battery-helper documentation */
+ struct adc_battery_helper helper;
+ struct device *dev;
+ struct regmap *regmap;
+ struct iio_channel *vbat_channel;
+ struct power_supply *psy;
+ int cc_gain;
+ int cc_offset;
+};
+
+static int dc_ti_battery_get_voltage_and_current_now(struct power_supply *psy, int *volt, int *curr)
+{
+ struct dc_ti_battery_chip *chip = power_supply_get_drvdata(psy);
+ s64 cnt_start_usec, now_usec, sleep_usec;
+ unsigned int reg_val;
+ s32 acc, smpl_ctr;
+ int ret;
+
+ /*
+ * Enable coulomb-counter before reading Vbat from ADC, so that the CC
+ * samples are from the same time period as the Vbat reading.
+ */
+ ret = regmap_write(chip->regmap, DC_TI_CC_CNTL_REG,
+ CC_CNTL_SMPL_INTVL_15MS | CC_CNTL_CC_OFFSET_EN | CC_CNTL_CC_CTR_EN);
+ if (ret)
+ goto out_err;
+
+ cnt_start_usec = ktime_get_ns() / NSEC_PER_USEC;
+
+ /* Read Vbat, convert IIO mV to power-supply ųV */
+ ret = iio_read_channel_processed_scale(chip->vbat_channel, volt, 1000);
+ if (ret < 0)
+ goto out_err;
+
+ /* Sleep at least 3 sample-times + slack to get 3+ CC samples */
+ now_usec = ktime_get_ns() / NSEC_PER_USEC;
+ sleep_usec = 3 * SMPL_INTVL_US + SLEEP_SLACK_US - (now_usec - cnt_start_usec);
+ if (sleep_usec > 0 && sleep_usec < 1000000)
+ usleep_range(sleep_usec, sleep_usec + SLEEP_SLACK_US);
+
+ /*
+ * The PMIC latches the coulomb- and sample-counters upon reading the
+ * CC_ACC0 register. Reading multiple registers at once is not supported.
+ *
+ * Step 1: Read CC_ACC0 - CC_ACC3
+ */
+ ret = regmap_read(chip->regmap, DC_TI_CC_ACC0_REG, &reg_val);
+ if (ret)
+ goto out_err;
+
+ acc = reg_val;
+
+ ret = regmap_read(chip->regmap, DC_TI_CC_ACC1_REG, &reg_val);
+ if (ret)
+ goto out_err;
+
+ acc |= reg_val << 8;
+
+ ret = regmap_read(chip->regmap, DC_TI_CC_ACC2_REG, &reg_val);
+ if (ret)
+ goto out_err;
+
+ acc |= reg_val << 16;
+
+ ret = regmap_read(chip->regmap, DC_TI_CC_ACC3_REG, &reg_val);
+ if (ret)
+ goto out_err;
+
+ acc |= reg_val << 24;
+
+ /* Step 2: Read SMPL_CTR0 - SMPL_CTR2 */
+ ret = regmap_read(chip->regmap, DC_TI_SMPL_CTR0_REG, &reg_val);
+ if (ret)
+ goto out_err;
+
+ smpl_ctr = reg_val;
+
+ ret = regmap_read(chip->regmap, DC_TI_SMPL_CTR1_REG, &reg_val);
+ if (ret)
+ goto out_err;
+
+ smpl_ctr |= reg_val << 8;
+
+ ret = regmap_read(chip->regmap, DC_TI_SMPL_CTR2_REG, &reg_val);
+ if (ret)
+ goto out_err;
+
+ smpl_ctr |= reg_val << 16;
+
+ /* Disable the coulumb-counter again */
+ ret = regmap_write(chip->regmap, DC_TI_CC_CNTL_REG,
+ CC_CNTL_SMPL_INTVL_15MS | CC_CNTL_CC_OFFSET_EN);
+ if (ret)
+ goto out_err;
+
+ /* Apply calibration */
+ acc -= chip->cc_offset * smpl_ctr * SMPL_INTVL_MS /
+ (CC_OFFSET_SMPL_INTVL_MS * CC_OFFSET_DIV);
+ acc = acc * (CC_GAIN_DIV - chip->cc_gain * CC_GAIN_STEP) / CC_GAIN_DIV;
+ *curr = CC_ACC_TO_UA(acc, smpl_ctr);
+
+ return 0;
+
+out_err:
+ dev_err(chip->dev, "IO-error %d communicating with PMIC\n", ret);
+ return ret;
+}
+
+static const struct power_supply_desc dc_ti_battery_psy_desc = {
+ .name = "intel_dc_ti_battery",
+ .type = POWER_SUPPLY_TYPE_BATTERY,
+ .get_property = adc_battery_helper_get_property,
+ .external_power_changed = adc_battery_helper_external_power_changed,
+ .properties = adc_battery_helper_properties,
+ .num_properties = ADC_HELPER_NUM_PROPERTIES,
+};
+
+static int dc_ti_battery_hw_init(struct dc_ti_battery_chip *chip)
+{
+ u8 pmic_version, cc_trim_rev;
+ unsigned int reg_val;
+ int ret;
+
+ /* Set sample rate to 15 ms and calibrate the coulomb-counter */
+ ret = regmap_write(chip->regmap, DC_TI_CC_CNTL_REG,
+ CC_CNTL_SMPL_INTVL_15MS | CC_CNTL_CC_OFFSET_EN |
+ CC_CNTL_CC_CAL_EN | CC_CNTL_CC_CTR_EN);
+ if (ret)
+ goto out;
+
+ fsleep(CALIBRATION_TIME_US);
+
+ /* Disable coulomb-counter it is only used while getting the current */
+ ret = regmap_write(chip->regmap, DC_TI_CC_CNTL_REG,
+ CC_CNTL_SMPL_INTVL_15MS | CC_CNTL_CC_OFFSET_EN);
+ if (ret)
+ goto out;
+
+ ret = regmap_read(chip->regmap, DC_TI_PMIC_VERSION_REG, &reg_val);
+ if (ret)
+ goto out;
+
+ pmic_version = reg_val;
+
+ /*
+ * As per the PMIC vendor (TI), the calibration offset and gain err
+ * values are stored in EEPROM Bank 0 and Bank 1 of the PMIC.
+ * We need to read the stored offset and gain margins and need
+ * to apply the corrections to the raw coulomb counter value.
+ */
+
+ /* Unlock the EEPROM Access */
+ ret = regmap_write(chip->regmap, DC_TI_EEPROM_ACCESS_CONTROL, EEPROM_UNLOCK);
+ if (ret)
+ goto out;
+
+ /* Select Bank 1 to read CC GAIN Err correction */
+ ret = regmap_write(chip->regmap, DC_TI_EEPROM_CTRL, EEPROM_BANK1_SEL);
+ if (ret)
+ goto out;
+
+ ret = regmap_read(chip->regmap, DC_TI_EEPROM_CC_GAIN_REG, &reg_val);
+ if (ret)
+ goto out;
+
+ cc_trim_rev = FIELD_GET(CC_TRIM_REVISION, reg_val);
+
+ dev_dbg(chip->dev, "pmic-ver 0x%02x trim-rev %d\n", pmic_version, cc_trim_rev);
+
+ if (!(pmic_version == PMIC_VERSION_A0 && cc_trim_rev == PMIC_VERSION_A0_TRIM_REV) &&
+ !(pmic_version == PMIC_VERSION_A1 && cc_trim_rev >= PMIC_VERSION_A1_MIN_TRIM_REV)) {
+ dev_dbg(chip->dev, "unsupported trim-revision, using uncalibrated CC values\n");
+ goto out_relock;
+ }
+
+ chip->cc_gain = 1 - (int)FIELD_GET(CC_GAIN_CORRECTION, reg_val);
+
+ /* Select Bank 0 to read CC OFFSET Correction */
+ ret = regmap_write(chip->regmap, DC_TI_EEPROM_CTRL, EEPROM_BANK0_SEL);
+ if (ret)
+ goto out_relock;
+
+ ret = regmap_read(chip->regmap, DC_TI_EEPROM_CC_OFFSET_REG, &reg_val);
+ if (ret)
+ goto out_relock;
+
+ chip->cc_offset = (s8)reg_val;
+
+ dev_dbg(chip->dev, "cc-offset %d cc-gain %d\n", chip->cc_offset, chip->cc_gain);
+
+out_relock:
+ /* Re-lock the EEPROM Access */
+ regmap_write(chip->regmap, DC_TI_EEPROM_ACCESS_CONTROL, EEPROM_LOCK);
+out:
+ if (ret)
+ dev_err(chip->dev, "IO-error %d initializing PMIC\n", ret);
+
+ return ret;
+}
+
+static int dc_ti_battery_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct intel_soc_pmic *pmic = dev_get_drvdata(dev->parent);
+ struct power_supply_config psy_cfg = {};
+ struct fwnode_reference_args args;
+ struct gpio_desc *charge_finished;
+ struct dc_ti_battery_chip *chip;
+ int ret;
+
+ /* On most devices with a Dollar Cove TI the battery is handled by ACPI */
+ if (!acpi_quirk_skip_acpi_ac_and_battery())
+ return -ENODEV;
+
+ /* ACPI glue code adds a "monitored-battery" fwnode, wait for this */
+ ret = fwnode_property_get_reference_args(dev_fwnode(dev), "monitored-battery",
+ NULL, 0, 0, &args);
+ if (ret) {
+ dev_dbg(dev, "fwnode_property_get_ref() ret %d\n", ret);
+ return dev_err_probe(dev, -EPROBE_DEFER, "Waiting for monitored-battery fwnode\n");
+ }
+
+ fwnode_handle_put(args.fwnode);
+
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->dev = dev;
+ chip->regmap = pmic->regmap;
+
+ chip->vbat_channel = devm_iio_channel_get(dev, "VBAT");
+ if (IS_ERR(chip->vbat_channel)) {
+ dev_dbg(dev, "devm_iio_channel_get() ret %ld\n", PTR_ERR(chip->vbat_channel));
+ return dev_err_probe(dev, -EPROBE_DEFER, "Waiting for VBAT IIO channel\n");
+ }
+
+ charge_finished = devm_gpiod_get_optional(dev, "charged", GPIOD_IN);
+ if (IS_ERR(charge_finished))
+ return dev_err_probe(dev, PTR_ERR(charge_finished), "Getting charged GPIO\n");
+
+ ret = dc_ti_battery_hw_init(chip);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, chip);
+
+ psy_cfg.drv_data = chip;
+ chip->psy = devm_power_supply_register(dev, &dc_ti_battery_psy_desc, &psy_cfg);
+ if (IS_ERR(chip->psy))
+ return PTR_ERR(chip->psy);
+
+ return adc_battery_helper_init(&chip->helper, chip->psy,
+ dc_ti_battery_get_voltage_and_current_now,
+ charge_finished);
+}
+
+static DEFINE_RUNTIME_DEV_PM_OPS(dc_ti_battery_pm_ops, adc_battery_helper_suspend,
+ adc_battery_helper_resume, NULL);
+
+static struct platform_driver dc_ti_battery_driver = {
+ .driver = {
+ .name = DEV_NAME,
+ .pm = pm_sleep_ptr(&dc_ti_battery_pm_ops),
+ },
+ .probe = dc_ti_battery_probe,
+};
+module_platform_driver(dc_ti_battery_driver);
+
+MODULE_ALIAS("platform:" DEV_NAME);
+MODULE_AUTHOR("Hans de Goede <hansg@kernel.org>");
+MODULE_DESCRIPTION("Intel Dollar Cove (TI) battery driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/supply/ipaq_micro_battery.c b/drivers/power/supply/ipaq_micro_battery.c
index 7e0568a5353f..ff8573a5ca6d 100644
--- a/drivers/power/supply/ipaq_micro_battery.c
+++ b/drivers/power/supply/ipaq_micro_battery.c
@@ -232,7 +232,8 @@ static int micro_batt_probe(struct platform_device *pdev)
return -ENOMEM;
mb->micro = dev_get_drvdata(pdev->dev.parent);
- mb->wq = alloc_workqueue("ipaq-battery-wq", WQ_MEM_RECLAIM, 0);
+ mb->wq = alloc_workqueue("ipaq-battery-wq",
+ WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (!mb->wq)
return -ENOMEM;
diff --git a/drivers/power/supply/max77705_charger.c b/drivers/power/supply/max77705_charger.c
index 329b430d0e50..b1a227bf72e2 100644
--- a/drivers/power/supply/max77705_charger.c
+++ b/drivers/power/supply/max77705_charger.c
@@ -40,31 +40,30 @@ static enum power_supply_property max77705_charger_props[] = {
POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
};
-static int max77705_chgin_irq(void *irq_drv_data)
+static irqreturn_t max77705_chgin_irq(int irq, void *irq_drv_data)
{
- struct max77705_charger_data *charger = irq_drv_data;
+ struct max77705_charger_data *chg = irq_drv_data;
- queue_work(charger->wqueue, &charger->chgin_work);
+ queue_work(chg->wqueue, &chg->chgin_work);
- return 0;
+ return IRQ_HANDLED;
}
static const struct regmap_irq max77705_charger_irqs[] = {
- { .mask = MAX77705_BYP_IM, },
- { .mask = MAX77705_INP_LIMIT_IM, },
- { .mask = MAX77705_BATP_IM, },
- { .mask = MAX77705_BAT_IM, },
- { .mask = MAX77705_CHG_IM, },
- { .mask = MAX77705_WCIN_IM, },
- { .mask = MAX77705_CHGIN_IM, },
- { .mask = MAX77705_AICL_IM, },
+ REGMAP_IRQ_REG_LINE(MAX77705_BYP_I, BITS_PER_BYTE),
+ REGMAP_IRQ_REG_LINE(MAX77705_INP_LIMIT_I, BITS_PER_BYTE),
+ REGMAP_IRQ_REG_LINE(MAX77705_BATP_I, BITS_PER_BYTE),
+ REGMAP_IRQ_REG_LINE(MAX77705_BAT_I, BITS_PER_BYTE),
+ REGMAP_IRQ_REG_LINE(MAX77705_CHG_I, BITS_PER_BYTE),
+ REGMAP_IRQ_REG_LINE(MAX77705_WCIN_I, BITS_PER_BYTE),
+ REGMAP_IRQ_REG_LINE(MAX77705_CHGIN_I, BITS_PER_BYTE),
+ REGMAP_IRQ_REG_LINE(MAX77705_AICL_I, BITS_PER_BYTE),
};
static struct regmap_irq_chip max77705_charger_irq_chip = {
.name = "max77705-charger",
.status_base = MAX77705_CHG_REG_INT,
.mask_base = MAX77705_CHG_REG_INT_MASK,
- .handle_post_irq = max77705_chgin_irq,
.num_regs = 1,
.irqs = max77705_charger_irqs,
.num_irqs = ARRAY_SIZE(max77705_charger_irqs),
@@ -74,8 +73,7 @@ static int max77705_charger_enable(struct max77705_charger_data *chg)
{
int rv;
- rv = regmap_update_bits(chg->regmap, MAX77705_CHG_REG_CNFG_09,
- MAX77705_CHG_EN_MASK, MAX77705_CHG_EN_MASK);
+ rv = regmap_field_write(chg->rfield[MAX77705_CHG_EN], 1);
if (rv)
dev_err(chg->dev, "unable to enable the charger: %d\n", rv);
@@ -87,10 +85,7 @@ static void max77705_charger_disable(void *data)
struct max77705_charger_data *chg = data;
int rv;
- rv = regmap_update_bits(chg->regmap,
- MAX77705_CHG_REG_CNFG_09,
- MAX77705_CHG_EN_MASK,
- MAX77705_CHG_DISABLE);
+ rv = regmap_field_write(chg->rfield[MAX77705_CHG_EN], MAX77705_CHG_DISABLE);
if (rv)
dev_err(chg->dev, "unable to disable the charger: %d\n", rv);
}
@@ -109,19 +104,30 @@ static int max77705_get_online(struct regmap *regmap, int *val)
return 0;
}
-static int max77705_check_battery(struct max77705_charger_data *charger, int *val)
+static int max77705_set_integer(struct max77705_charger_data *chg, enum max77705_field_idx fidx,
+ unsigned int clamp_min, unsigned int clamp_max,
+ unsigned int div, int val)
+{
+ unsigned int regval;
+
+ regval = clamp_val(val, clamp_min, clamp_max) / div;
+
+ return regmap_field_write(chg->rfield[fidx], regval);
+}
+
+static int max77705_check_battery(struct max77705_charger_data *chg, int *val)
{
unsigned int reg_data;
unsigned int reg_data2;
- struct regmap *regmap = charger->regmap;
+ struct regmap *regmap = chg->regmap;
regmap_read(regmap, MAX77705_CHG_REG_INT_OK, &reg_data);
- dev_dbg(charger->dev, "CHG_INT_OK(0x%x)\n", reg_data);
+ dev_dbg(chg->dev, "CHG_INT_OK(0x%x)\n", reg_data);
regmap_read(regmap, MAX77705_CHG_REG_DETAILS_00, &reg_data2);
- dev_dbg(charger->dev, "CHG_DETAILS00(0x%x)\n", reg_data2);
+ dev_dbg(chg->dev, "CHG_DETAILS00(0x%x)\n", reg_data2);
if ((reg_data & MAX77705_BATP_OK) || !(reg_data2 & MAX77705_BATP_DTLS))
*val = true;
@@ -131,13 +137,13 @@ static int max77705_check_battery(struct max77705_charger_data *charger, int *va
return 0;
}
-static int max77705_get_charge_type(struct max77705_charger_data *charger, int *val)
+static int max77705_get_charge_type(struct max77705_charger_data *chg, int *val)
{
- struct regmap *regmap = charger->regmap;
- unsigned int reg_data;
+ struct regmap *regmap = chg->regmap;
+ unsigned int reg_data, chg_en;
- regmap_read(regmap, MAX77705_CHG_REG_CNFG_09, &reg_data);
- if (!MAX77705_CHARGER_CHG_CHARGING(reg_data)) {
+ regmap_field_read(chg->rfield[MAX77705_CHG_EN], &chg_en);
+ if (!chg_en) {
*val = POWER_SUPPLY_CHARGE_TYPE_NONE;
return 0;
}
@@ -159,13 +165,13 @@ static int max77705_get_charge_type(struct max77705_charger_data *charger, int *
return 0;
}
-static int max77705_get_status(struct max77705_charger_data *charger, int *val)
+static int max77705_get_status(struct max77705_charger_data *chg, int *val)
{
- struct regmap *regmap = charger->regmap;
- unsigned int reg_data;
+ struct regmap *regmap = chg->regmap;
+ unsigned int reg_data, chg_en;
- regmap_read(regmap, MAX77705_CHG_REG_CNFG_09, &reg_data);
- if (!MAX77705_CHARGER_CHG_CHARGING(reg_data)) {
+ regmap_field_read(chg->rfield[MAX77705_CHG_EN], &chg_en);
+ if (!chg_en) {
*val = POWER_SUPPLY_CHARGE_TYPE_NONE;
return 0;
}
@@ -234,10 +240,10 @@ static int max77705_get_vbus_state(struct regmap *regmap, int *value)
return 0;
}
-static int max77705_get_battery_health(struct max77705_charger_data *charger,
+static int max77705_get_battery_health(struct max77705_charger_data *chg,
int *value)
{
- struct regmap *regmap = charger->regmap;
+ struct regmap *regmap = chg->regmap;
unsigned int bat_dtls;
regmap_read(regmap, MAX77705_CHG_REG_DETAILS_01, &bat_dtls);
@@ -245,16 +251,16 @@ static int max77705_get_battery_health(struct max77705_charger_data *charger,
switch (bat_dtls) {
case MAX77705_BATTERY_NOBAT:
- dev_dbg(charger->dev, "%s: No battery and the charger is suspended\n",
+ dev_dbg(chg->dev, "%s: No battery and the chg is suspended\n",
__func__);
*value = POWER_SUPPLY_HEALTH_NO_BATTERY;
break;
case MAX77705_BATTERY_PREQUALIFICATION:
- dev_dbg(charger->dev, "%s: battery is okay but its voltage is low(~VPQLB)\n",
+ dev_dbg(chg->dev, "%s: battery is okay but its voltage is low(~VPQLB)\n",
__func__);
break;
case MAX77705_BATTERY_DEAD:
- dev_dbg(charger->dev, "%s: battery dead\n", __func__);
+ dev_dbg(chg->dev, "%s: battery dead\n", __func__);
*value = POWER_SUPPLY_HEALTH_DEAD;
break;
case MAX77705_BATTERY_GOOD:
@@ -262,11 +268,11 @@ static int max77705_get_battery_health(struct max77705_charger_data *charger,
*value = POWER_SUPPLY_HEALTH_GOOD;
break;
case MAX77705_BATTERY_OVERVOLTAGE:
- dev_dbg(charger->dev, "%s: battery ovp\n", __func__);
+ dev_dbg(chg->dev, "%s: battery ovp\n", __func__);
*value = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
break;
default:
- dev_dbg(charger->dev, "%s: battery unknown\n", __func__);
+ dev_dbg(chg->dev, "%s: battery unknown\n", __func__);
*value = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
break;
}
@@ -274,9 +280,9 @@ static int max77705_get_battery_health(struct max77705_charger_data *charger,
return 0;
}
-static int max77705_get_health(struct max77705_charger_data *charger, int *val)
+static int max77705_get_health(struct max77705_charger_data *chg, int *val)
{
- struct regmap *regmap = charger->regmap;
+ struct regmap *regmap = chg->regmap;
int ret, is_online = 0;
ret = max77705_get_online(regmap, &is_online);
@@ -287,24 +293,19 @@ static int max77705_get_health(struct max77705_charger_data *charger, int *val)
if (ret || (*val != POWER_SUPPLY_HEALTH_GOOD))
return ret;
}
- return max77705_get_battery_health(charger, val);
+ return max77705_get_battery_health(chg, val);
}
-static int max77705_get_input_current(struct max77705_charger_data *charger,
+static int max77705_get_input_current(struct max77705_charger_data *chg,
int *val)
{
unsigned int reg_data;
int get_current = 0;
- struct regmap *regmap = charger->regmap;
-
- regmap_read(regmap, MAX77705_CHG_REG_CNFG_09, &reg_data);
- reg_data &= MAX77705_CHG_CHGIN_LIM_MASK;
+ regmap_field_read(chg->rfield[MAX77705_CHG_CHGIN_LIM], &reg_data);
if (reg_data <= 3)
get_current = MAX77705_CURRENT_CHGIN_MIN;
- else if (reg_data >= MAX77705_CHG_CHGIN_LIM_MASK)
- get_current = MAX77705_CURRENT_CHGIN_MAX;
else
get_current = (reg_data + 1) * MAX77705_CURRENT_CHGIN_STEP;
@@ -313,26 +314,23 @@ static int max77705_get_input_current(struct max77705_charger_data *charger,
return 0;
}
-static int max77705_get_charge_current(struct max77705_charger_data *charger,
+static int max77705_get_charge_current(struct max77705_charger_data *chg,
int *val)
{
unsigned int reg_data;
- struct regmap *regmap = charger->regmap;
- regmap_read(regmap, MAX77705_CHG_REG_CNFG_02, &reg_data);
- reg_data &= MAX77705_CHG_CC;
+ regmap_field_read(chg->rfield[MAX77705_CHG_CC_LIM], &reg_data);
*val = reg_data <= 0x2 ? MAX77705_CURRENT_CHGIN_MIN : reg_data * MAX77705_CURRENT_CHG_STEP;
return 0;
}
-static int max77705_set_float_voltage(struct max77705_charger_data *charger,
+static int max77705_set_float_voltage(struct max77705_charger_data *chg,
int float_voltage)
{
int float_voltage_mv;
unsigned int reg_data = 0;
- struct regmap *regmap = charger->regmap;
float_voltage_mv = float_voltage / 1000;
reg_data = float_voltage_mv <= 4000 ? 0x0 :
@@ -340,20 +338,16 @@ static int max77705_set_float_voltage(struct max77705_charger_data *charger,
(float_voltage_mv <= 4200) ? (float_voltage_mv - 4000) / 50 :
(((float_voltage_mv - 4200) / 10) + 0x04);
- return regmap_update_bits(regmap, MAX77705_CHG_REG_CNFG_04,
- MAX77705_CHG_CV_PRM_MASK,
- (reg_data << MAX77705_CHG_CV_PRM_SHIFT));
+ return regmap_field_write(chg->rfield[MAX77705_CHG_CV_PRM], reg_data);
}
-static int max77705_get_float_voltage(struct max77705_charger_data *charger,
+static int max77705_get_float_voltage(struct max77705_charger_data *chg,
int *val)
{
unsigned int reg_data = 0;
int voltage_mv;
- struct regmap *regmap = charger->regmap;
- regmap_read(regmap, MAX77705_CHG_REG_CNFG_04, &reg_data);
- reg_data &= MAX77705_CHG_PRM_MASK;
+ regmap_field_read(chg->rfield[MAX77705_CHG_CV_PRM], &reg_data);
voltage_mv = reg_data <= 0x04 ? reg_data * 50 + 4000 :
(reg_data - 4) * 10 + 4200;
*val = voltage_mv * 1000;
@@ -365,28 +359,28 @@ static int max77705_chg_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
- struct max77705_charger_data *charger = power_supply_get_drvdata(psy);
- struct regmap *regmap = charger->regmap;
+ struct max77705_charger_data *chg = power_supply_get_drvdata(psy);
+ struct regmap *regmap = chg->regmap;
switch (psp) {
case POWER_SUPPLY_PROP_ONLINE:
return max77705_get_online(regmap, &val->intval);
case POWER_SUPPLY_PROP_PRESENT:
- return max77705_check_battery(charger, &val->intval);
+ return max77705_check_battery(chg, &val->intval);
case POWER_SUPPLY_PROP_STATUS:
- return max77705_get_status(charger, &val->intval);
+ return max77705_get_status(chg, &val->intval);
case POWER_SUPPLY_PROP_CHARGE_TYPE:
- return max77705_get_charge_type(charger, &val->intval);
+ return max77705_get_charge_type(chg, &val->intval);
case POWER_SUPPLY_PROP_HEALTH:
- return max77705_get_health(charger, &val->intval);
+ return max77705_get_health(chg, &val->intval);
case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
- return max77705_get_input_current(charger, &val->intval);
+ return max77705_get_input_current(chg, &val->intval);
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
- return max77705_get_charge_current(charger, &val->intval);
+ return max77705_get_charge_current(chg, &val->intval);
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
- return max77705_get_float_voltage(charger, &val->intval);
+ return max77705_get_float_voltage(chg, &val->intval);
case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
- val->intval = charger->bat_info->voltage_max_design_uv;
+ val->intval = chg->bat_info->voltage_max_design_uv;
break;
case POWER_SUPPLY_PROP_MODEL_NAME:
val->strval = max77705_charger_model;
@@ -400,74 +394,131 @@ static int max77705_chg_get_property(struct power_supply *psy,
return 0;
}
+static int max77705_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct max77705_charger_data *chg = power_supply_get_drvdata(psy);
+ int err = 0;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+ err = max77705_set_integer(chg, MAX77705_CHG_CC_LIM,
+ MAX77705_CURRENT_CHGIN_MIN,
+ MAX77705_CURRENT_CHGIN_MAX,
+ MAX77705_CURRENT_CHG_STEP,
+ val->intval);
+ break;
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ err = max77705_set_integer(chg, MAX77705_CHG_CHGIN_LIM,
+ MAX77705_CURRENT_CHGIN_MIN,
+ MAX77705_CURRENT_CHGIN_MAX,
+ MAX77705_CURRENT_CHGIN_STEP,
+ val->intval);
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ return err;
+};
+
+static int max77705_property_is_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ return true;
+ default:
+ return false;
+ }
+}
+
static const struct power_supply_desc max77705_charger_psy_desc = {
.name = "max77705-charger",
- .type = POWER_SUPPLY_TYPE_USB,
+ .type = POWER_SUPPLY_TYPE_USB,
.properties = max77705_charger_props,
+ .property_is_writeable = max77705_property_is_writeable,
.num_properties = ARRAY_SIZE(max77705_charger_props),
.get_property = max77705_chg_get_property,
+ .set_property = max77705_set_property,
};
static void max77705_chgin_isr_work(struct work_struct *work)
{
- struct max77705_charger_data *charger =
+ struct max77705_charger_data *chg =
container_of(work, struct max77705_charger_data, chgin_work);
- power_supply_changed(charger->psy_chg);
+ power_supply_changed(chg->psy_chg);
}
-static void max77705_charger_initialize(struct max77705_charger_data *chg)
+static int max77705_charger_initialize(struct max77705_charger_data *chg)
{
- u8 reg_data;
struct power_supply_battery_info *info;
struct regmap *regmap = chg->regmap;
+ int err;
- if (power_supply_get_battery_info(chg->psy_chg, &info) < 0)
- return;
+ err = power_supply_get_battery_info(chg->psy_chg, &info);
+ if (err)
+ return dev_err_probe(chg->dev, err, "error on getting battery info");
chg->bat_info = info;
/* unlock charger setting protect */
/* slowest LX slope */
- reg_data = MAX77705_CHGPROT_MASK | MAX77705_SLOWEST_LX_SLOPE;
- regmap_update_bits(regmap, MAX77705_CHG_REG_CNFG_06, reg_data,
- reg_data);
+ err = regmap_field_write(chg->rfield[MAX77705_CHGPROT], MAX77705_CHGPROT_UNLOCKED);
+ if (err)
+ goto err;
+
+ err = regmap_field_write(chg->rfield[MAX77705_LX_SLOPE], MAX77705_SLOWEST_LX_SLOPE);
+ if (err)
+ goto err;
/* fast charge timer disable */
/* restart threshold disable */
/* pre-qual charge disable */
- reg_data = (MAX77705_FCHGTIME_DISABLE << MAX77705_FCHGTIME_SHIFT) |
- (MAX77705_CHG_RSTRT_DISABLE << MAX77705_CHG_RSTRT_SHIFT) |
- (MAX77705_CHG_PQEN_DISABLE << MAX77705_PQEN_SHIFT);
- regmap_update_bits(regmap, MAX77705_CHG_REG_CNFG_01,
- (MAX77705_FCHGTIME_MASK |
- MAX77705_CHG_RSTRT_MASK |
- MAX77705_PQEN_MASK),
- reg_data);
-
- /* OTG off(UNO on), boost off */
- regmap_update_bits(regmap, MAX77705_CHG_REG_CNFG_00,
- MAX77705_OTG_CTRL, 0);
+ err = regmap_field_write(chg->rfield[MAX77705_FCHGTIME], MAX77705_FCHGTIME_DISABLE);
+ if (err)
+ goto err;
+
+ err = regmap_field_write(chg->rfield[MAX77705_CHG_RSTRT], MAX77705_CHG_RSTRT_DISABLE);
+ if (err)
+ goto err;
+
+ err = regmap_field_write(chg->rfield[MAX77705_CHG_PQEN], MAX77705_CHG_PQEN_DISABLE);
+ if (err)
+ goto err;
+
+ err = regmap_field_write(chg->rfield[MAX77705_MODE],
+ MAX77705_CHG_MASK | MAX77705_BUCK_MASK);
+ if (err)
+ goto err;
/* charge current 450mA(default) */
/* otg current limit 900mA */
- regmap_update_bits(regmap, MAX77705_CHG_REG_CNFG_02,
- MAX77705_OTG_ILIM_MASK,
- MAX77705_OTG_ILIM_900 << MAX77705_OTG_ILIM_SHIFT);
+ err = regmap_field_write(chg->rfield[MAX77705_OTG_ILIM], MAX77705_OTG_ILIM_900);
+ if (err)
+ goto err;
/* BAT to SYS OCP 4.80A */
- regmap_update_bits(regmap, MAX77705_CHG_REG_CNFG_05,
- MAX77705_REG_B2SOVRC_MASK,
- MAX77705_B2SOVRC_4_8A << MAX77705_REG_B2SOVRC_SHIFT);
+ err = regmap_field_write(chg->rfield[MAX77705_REG_B2SOVRC], MAX77705_B2SOVRC_4_8A);
+ if (err)
+ goto err;
+
/* top off current 150mA */
/* top off timer 30min */
- reg_data = (MAX77705_TO_ITH_150MA << MAX77705_TO_ITH_SHIFT) |
- (MAX77705_TO_TIME_30M << MAX77705_TO_TIME_SHIFT) |
- (MAX77705_SYS_TRACK_DISABLE << MAX77705_SYS_TRACK_DIS_SHIFT);
- regmap_update_bits(regmap, MAX77705_CHG_REG_CNFG_03,
- (MAX77705_TO_ITH_MASK |
- MAX77705_TO_TIME_MASK |
- MAX77705_SYS_TRACK_DIS_MASK), reg_data);
+ err = regmap_field_write(chg->rfield[MAX77705_TO], MAX77705_TO_ITH_150MA);
+ if (err)
+ goto err;
+
+ err = regmap_field_write(chg->rfield[MAX77705_TO_TIME], MAX77705_TO_TIME_30M);
+ if (err)
+ goto err;
+
+ err = regmap_field_write(chg->rfield[MAX77705_SYS_TRACK], MAX77705_SYS_TRACK_DISABLE);
+ if (err)
+ goto err;
/* cv voltage 4.2V or 4.35V */
/* MINVSYS 3.6V(default) */
@@ -478,28 +529,38 @@ static void max77705_charger_initialize(struct max77705_charger_data *chg)
max77705_set_float_voltage(chg, info->voltage_max_design_uv);
}
- regmap_update_bits(regmap, MAX77705_CHG_REG_CNFG_12,
- MAX77705_VCHGIN_REG_MASK, MAX77705_VCHGIN_4_5);
- regmap_update_bits(regmap, MAX77705_CHG_REG_CNFG_12,
- MAX77705_WCIN_REG_MASK, MAX77705_WCIN_4_5);
+ err = regmap_field_write(chg->rfield[MAX77705_VCHGIN], MAX77705_VCHGIN_4_5);
+ if (err)
+ goto err;
+
+ err = regmap_field_write(chg->rfield[MAX77705_WCIN], MAX77705_WCIN_4_5);
+ if (err)
+ goto err;
/* Watchdog timer */
regmap_update_bits(regmap, MAX77705_CHG_REG_CNFG_00,
MAX77705_WDTEN_MASK, 0);
- /* Active Discharge Enable */
- regmap_update_bits(regmap, MAX77705_PMIC_REG_MAINCTRL1, 1, 1);
-
/* VBYPSET=5.0V */
- regmap_update_bits(regmap, MAX77705_CHG_REG_CNFG_11, MAX77705_VBYPSET_MASK, 0);
+ err = regmap_field_write(chg->rfield[MAX77705_VBYPSET], 0);
+ if (err)
+ goto err;
/* Switching Frequency : 1.5MHz */
- regmap_update_bits(regmap, MAX77705_CHG_REG_CNFG_08, MAX77705_REG_FSW_MASK,
- (MAX77705_CHG_FSW_1_5MHz << MAX77705_REG_FSW_SHIFT));
+ err = regmap_field_write(chg->rfield[MAX77705_REG_FSW], MAX77705_CHG_FSW_1_5MHz);
+ if (err)
+ goto err;
/* Auto skip mode */
- regmap_update_bits(regmap, MAX77705_CHG_REG_CNFG_12, MAX77705_REG_DISKIP_MASK,
- (MAX77705_AUTO_SKIP << MAX77705_REG_DISKIP_SHIFT));
+ err = regmap_field_write(chg->rfield[MAX77705_REG_DISKIP], MAX77705_AUTO_SKIP);
+ if (err)
+ goto err;
+
+ return 0;
+
+err:
+ return dev_err_probe(chg->dev, err, "error while configuring");
+
}
static int max77705_charger_probe(struct i2c_client *i2c)
@@ -523,11 +584,13 @@ static int max77705_charger_probe(struct i2c_client *i2c)
if (IS_ERR(chg->regmap))
return PTR_ERR(chg->regmap);
- ret = regmap_update_bits(chg->regmap,
- MAX77705_CHG_REG_INT_MASK,
- MAX77705_CHGIN_IM, 0);
- if (ret)
- return ret;
+ for (int i = 0; i < MAX77705_N_REGMAP_FIELDS; i++) {
+ chg->rfield[i] = devm_regmap_field_alloc(dev, chg->regmap,
+ max77705_reg_field[i]);
+ if (IS_ERR(chg->rfield[i]))
+ return dev_err_probe(dev, PTR_ERR(chg->rfield[i]),
+ "cannot allocate regmap field\n");
+ }
pscfg.fwnode = dev_fwnode(dev);
pscfg.drv_data = chg;
@@ -538,7 +601,7 @@ static int max77705_charger_probe(struct i2c_client *i2c)
max77705_charger_irq_chip.irq_drv_data = chg;
ret = devm_regmap_add_irq_chip(chg->dev, chg->regmap, i2c->irq,
- IRQF_ONESHOT | IRQF_SHARED, 0,
+ IRQF_ONESHOT, 0,
&max77705_charger_irq_chip,
&irq_data);
if (ret)
@@ -546,7 +609,7 @@ static int max77705_charger_probe(struct i2c_client *i2c)
chg->wqueue = create_singlethread_workqueue(dev_name(dev));
if (!chg->wqueue)
- return dev_err_probe(dev, -ENOMEM, "failed to create workqueue\n");
+ return -ENOMEM;
ret = devm_work_autocancel(dev, &chg->chgin_work, max77705_chgin_isr_work);
if (ret) {
@@ -554,7 +617,20 @@ static int max77705_charger_probe(struct i2c_client *i2c)
goto destroy_wq;
}
- max77705_charger_initialize(chg);
+ ret = max77705_charger_initialize(chg);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to initialize charger IC\n");
+ goto destroy_wq;
+ }
+
+ ret = devm_request_threaded_irq(dev, regmap_irq_get_virq(irq_data, MAX77705_CHGIN_I),
+ NULL, max77705_chgin_irq,
+ IRQF_TRIGGER_NONE,
+ "chgin-irq", chg);
+ if (ret) {
+ dev_err_probe(dev, ret, "Failed to Request chgin IRQ\n");
+ goto destroy_wq;
+ }
ret = max77705_charger_enable(chg);
if (ret) {
diff --git a/drivers/power/supply/max77976_charger.c b/drivers/power/supply/max77976_charger.c
index e6fe68cebc32..3d6ff4005533 100644
--- a/drivers/power/supply/max77976_charger.c
+++ b/drivers/power/supply/max77976_charger.c
@@ -292,10 +292,10 @@ static int max77976_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_ONLINE:
err = max77976_get_online(chg, &val->intval);
break;
- case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX:
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
val->intval = MAX77976_CHG_CC_MAX;
break;
- case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT:
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
err = max77976_get_integer(chg, CHG_CC,
MAX77976_CHG_CC_MIN,
MAX77976_CHG_CC_MAX,
@@ -330,7 +330,7 @@ static int max77976_set_property(struct power_supply *psy,
int err = 0;
switch (psp) {
- case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT:
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
err = max77976_set_integer(chg, CHG_CC,
MAX77976_CHG_CC_MIN,
MAX77976_CHG_CC_MAX,
@@ -355,7 +355,7 @@ static int max77976_property_is_writeable(struct power_supply *psy,
enum power_supply_property psp)
{
switch (psp) {
- case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT:
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
return true;
default:
@@ -368,8 +368,8 @@ static enum power_supply_property max77976_psy_props[] = {
POWER_SUPPLY_PROP_CHARGE_TYPE,
POWER_SUPPLY_PROP_HEALTH,
POWER_SUPPLY_PROP_ONLINE,
- POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT,
- POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
POWER_SUPPLY_PROP_MODEL_NAME,
POWER_SUPPLY_PROP_MANUFACTURER,
diff --git a/drivers/power/supply/mt6370-charger.c b/drivers/power/supply/mt6370-charger.c
index 98579998b300..e6db961d5818 100644
--- a/drivers/power/supply/mt6370-charger.c
+++ b/drivers/power/supply/mt6370-charger.c
@@ -761,13 +761,6 @@ static int mt6370_chg_init_psy(struct mt6370_priv *priv)
return PTR_ERR_OR_ZERO(priv->psy);
}
-static void mt6370_chg_destroy_attach_lock(void *data)
-{
- struct mutex *attach_lock = data;
-
- mutex_destroy(attach_lock);
-}
-
static void mt6370_chg_destroy_wq(void *data)
{
struct workqueue_struct *wq = data;
@@ -894,22 +887,19 @@ static int mt6370_chg_probe(struct platform_device *pdev)
if (ret)
return dev_err_probe(dev, ret, "Failed to init psy\n");
- mutex_init(&priv->attach_lock);
- ret = devm_add_action_or_reset(dev, mt6370_chg_destroy_attach_lock,
- &priv->attach_lock);
+ ret = devm_mutex_init(dev, &priv->attach_lock);
if (ret)
- return dev_err_probe(dev, ret, "Failed to init attach lock\n");
+ return ret;
priv->attach = MT6370_ATTACH_STAT_DETACH;
priv->wq = create_singlethread_workqueue(dev_name(priv->dev));
if (!priv->wq)
- return dev_err_probe(dev, -ENOMEM,
- "Failed to create workqueue\n");
+ return -ENOMEM;
ret = devm_add_action_or_reset(dev, mt6370_chg_destroy_wq, priv->wq);
if (ret)
- return dev_err_probe(dev, ret, "Failed to init wq\n");
+ return ret;
ret = devm_work_autocancel(dev, &priv->bc12_work, mt6370_chg_bc12_work_func);
if (ret)
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index 18e5e84a81c6..198405f7126f 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -223,6 +223,8 @@ static struct power_supply_attr power_supply_attrs[] __ro_after_init = {
POWER_SUPPLY_ATTR(MANUFACTURE_YEAR),
POWER_SUPPLY_ATTR(MANUFACTURE_MONTH),
POWER_SUPPLY_ATTR(MANUFACTURE_DAY),
+ POWER_SUPPLY_ATTR(INTERNAL_RESISTANCE),
+ POWER_SUPPLY_ATTR(STATE_OF_HEALTH),
/* Properties of type `const char *' */
POWER_SUPPLY_ATTR(MODEL_NAME),
POWER_SUPPLY_ATTR(MANUFACTURER),
diff --git a/drivers/power/supply/qcom_battmgr.c b/drivers/power/supply/qcom_battmgr.c
index 99808ea9851f..3c2837ef3461 100644
--- a/drivers/power/supply/qcom_battmgr.c
+++ b/drivers/power/supply/qcom_battmgr.c
@@ -2,10 +2,12 @@
/*
* Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2022, Linaro Ltd
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include <linux/auxiliary_bus.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/nvmem-consumer.h>
#include <linux/of_device.h>
#include <linux/power_supply.h>
#include <linux/property.h>
@@ -18,8 +20,10 @@
#define BATTMGR_STRING_LEN 128
enum qcom_battmgr_variant {
- QCOM_BATTMGR_SM8350,
QCOM_BATTMGR_SC8280XP,
+ QCOM_BATTMGR_SM8350,
+ QCOM_BATTMGR_SM8550,
+ QCOM_BATTMGR_X1E80100,
};
#define BATTMGR_BAT_STATUS 0x1
@@ -30,8 +34,9 @@ enum qcom_battmgr_variant {
#define NOTIF_BAT_PROPERTY 0x30
#define NOTIF_USB_PROPERTY 0x32
#define NOTIF_WLS_PROPERTY 0x34
-#define NOTIF_BAT_INFO 0x81
#define NOTIF_BAT_STATUS 0x80
+#define NOTIF_BAT_INFO 0x81
+#define NOTIF_BAT_CHARGING_STATE 0x83
#define BATTMGR_BAT_INFO 0x9
@@ -65,6 +70,9 @@ enum qcom_battmgr_variant {
#define BATT_RESISTANCE 21
#define BATT_POWER_NOW 22
#define BATT_POWER_AVG 23
+#define BATT_CHG_CTRL_EN 24
+#define BATT_CHG_CTRL_START_THR 25
+#define BATT_CHG_CTRL_END_THR 26
#define BATTMGR_USB_PROPERTY_GET 0x32
#define BATTMGR_USB_PROPERTY_SET 0x33
@@ -89,6 +97,13 @@ enum qcom_battmgr_variant {
#define WLS_TYPE 5
#define WLS_BOOST_EN 6
+#define BATTMGR_CHG_CTRL_LIMIT_EN 0x48
+#define CHARGE_CTRL_START_THR_MIN 50
+#define CHARGE_CTRL_START_THR_MAX 95
+#define CHARGE_CTRL_END_THR_MIN 55
+#define CHARGE_CTRL_END_THR_MAX 100
+#define CHARGE_CTRL_DELTA_SOC 5
+
struct qcom_battmgr_enable_request {
struct pmic_glink_hdr hdr;
__le32 battery_id;
@@ -123,6 +138,13 @@ struct qcom_battmgr_discharge_time_request {
__le32 reserved;
};
+struct qcom_battmgr_charge_ctrl_request {
+ struct pmic_glink_hdr hdr;
+ __le32 enable;
+ __le32 target_soc;
+ __le32 delta_soc;
+};
+
struct qcom_battmgr_message {
struct pmic_glink_hdr hdr;
union {
@@ -235,6 +257,8 @@ struct qcom_battmgr_info {
unsigned int capacity_warning;
unsigned int cycle_count;
unsigned int charge_count;
+ unsigned int charge_ctrl_start;
+ unsigned int charge_ctrl_end;
char model_number[BATTMGR_STRING_LEN];
char serial_number[BATTMGR_STRING_LEN];
char oem_info[BATTMGR_STRING_LEN];
@@ -254,6 +278,8 @@ struct qcom_battmgr_status {
unsigned int voltage_now;
unsigned int voltage_ocv;
unsigned int temperature;
+ unsigned int resistance;
+ unsigned int soh_percent;
unsigned int discharge_time;
unsigned int charge_time;
@@ -418,7 +444,11 @@ static const u8 sm8350_bat_prop_map[] = {
[POWER_SUPPLY_PROP_MODEL_NAME] = BATT_MODEL_NAME,
[POWER_SUPPLY_PROP_TIME_TO_FULL_AVG] = BATT_TTF_AVG,
[POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG] = BATT_TTE_AVG,
+ [POWER_SUPPLY_PROP_INTERNAL_RESISTANCE] = BATT_RESISTANCE,
+ [POWER_SUPPLY_PROP_STATE_OF_HEALTH] = BATT_SOH,
[POWER_SUPPLY_PROP_POWER_NOW] = BATT_POWER_NOW,
+ [POWER_SUPPLY_PROP_CHARGE_CONTROL_START_THRESHOLD] = BATT_CHG_CTRL_START_THR,
+ [POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD] = BATT_CHG_CTRL_END_THR,
};
static int qcom_battmgr_bat_sm8350_update(struct qcom_battmgr *battmgr,
@@ -489,7 +519,8 @@ static int qcom_battmgr_bat_get_property(struct power_supply *psy,
if (!battmgr->service_up)
return -EAGAIN;
- if (battmgr->variant == QCOM_BATTMGR_SC8280XP)
+ if (battmgr->variant == QCOM_BATTMGR_SC8280XP ||
+ battmgr->variant == QCOM_BATTMGR_X1E80100)
ret = qcom_battmgr_bat_sc8280xp_update(battmgr, psp);
else
ret = qcom_battmgr_bat_sm8350_update(battmgr, psp);
@@ -584,12 +615,24 @@ static int qcom_battmgr_bat_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_TEMP:
val->intval = battmgr->status.temperature;
break;
+ case POWER_SUPPLY_PROP_INTERNAL_RESISTANCE:
+ val->intval = battmgr->status.resistance;
+ break;
+ case POWER_SUPPLY_PROP_STATE_OF_HEALTH:
+ val->intval = battmgr->status.soh_percent;
+ break;
case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG:
val->intval = battmgr->status.discharge_time;
break;
case POWER_SUPPLY_PROP_TIME_TO_FULL_AVG:
val->intval = battmgr->status.charge_time;
break;
+ case POWER_SUPPLY_PROP_CHARGE_CONTROL_START_THRESHOLD:
+ val->intval = battmgr->info.charge_ctrl_start;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD:
+ val->intval = battmgr->info.charge_ctrl_end;
+ break;
case POWER_SUPPLY_PROP_MANUFACTURE_YEAR:
val->intval = battmgr->info.year;
break;
@@ -615,6 +658,149 @@ static int qcom_battmgr_bat_get_property(struct power_supply *psy,
return 0;
}
+static int qcom_battmgr_set_charge_control(struct qcom_battmgr *battmgr,
+ u32 target_soc, u32 delta_soc)
+{
+ struct qcom_battmgr_charge_ctrl_request request = {
+ .hdr.owner = cpu_to_le32(PMIC_GLINK_OWNER_BATTMGR),
+ .hdr.type = cpu_to_le32(PMIC_GLINK_REQ_RESP),
+ .hdr.opcode = cpu_to_le32(BATTMGR_CHG_CTRL_LIMIT_EN),
+ .enable = cpu_to_le32(1),
+ .target_soc = cpu_to_le32(target_soc),
+ .delta_soc = cpu_to_le32(delta_soc),
+ };
+
+ return qcom_battmgr_request(battmgr, &request, sizeof(request));
+}
+
+static int qcom_battmgr_set_charge_start_threshold(struct qcom_battmgr *battmgr, int start_soc)
+{
+ u32 target_soc, delta_soc;
+ int ret;
+
+ if (start_soc < CHARGE_CTRL_START_THR_MIN ||
+ start_soc > CHARGE_CTRL_START_THR_MAX) {
+ dev_err(battmgr->dev, "charge control start threshold exceed range: [%u - %u]\n",
+ CHARGE_CTRL_START_THR_MIN, CHARGE_CTRL_START_THR_MAX);
+ return -EINVAL;
+ }
+
+ /*
+ * If the new start threshold is larger than the old end threshold,
+ * move the end threshold one step (DELTA_SOC) after the new start
+ * threshold.
+ */
+ if (start_soc > battmgr->info.charge_ctrl_end) {
+ target_soc = start_soc + CHARGE_CTRL_DELTA_SOC;
+ target_soc = min_t(u32, target_soc, CHARGE_CTRL_END_THR_MAX);
+ delta_soc = target_soc - start_soc;
+ delta_soc = min_t(u32, delta_soc, CHARGE_CTRL_DELTA_SOC);
+ } else {
+ target_soc = battmgr->info.charge_ctrl_end;
+ delta_soc = battmgr->info.charge_ctrl_end - start_soc;
+ }
+
+ mutex_lock(&battmgr->lock);
+ ret = qcom_battmgr_set_charge_control(battmgr, target_soc, delta_soc);
+ mutex_unlock(&battmgr->lock);
+ if (!ret) {
+ battmgr->info.charge_ctrl_start = start_soc;
+ battmgr->info.charge_ctrl_end = target_soc;
+ }
+
+ return 0;
+}
+
+static int qcom_battmgr_set_charge_end_threshold(struct qcom_battmgr *battmgr, int end_soc)
+{
+ u32 delta_soc = CHARGE_CTRL_DELTA_SOC;
+ int ret;
+
+ if (end_soc < CHARGE_CTRL_END_THR_MIN ||
+ end_soc > CHARGE_CTRL_END_THR_MAX) {
+ dev_err(battmgr->dev, "charge control end threshold exceed range: [%u - %u]\n",
+ CHARGE_CTRL_END_THR_MIN, CHARGE_CTRL_END_THR_MAX);
+ return -EINVAL;
+ }
+
+ if (battmgr->info.charge_ctrl_start && end_soc > battmgr->info.charge_ctrl_start)
+ delta_soc = end_soc - battmgr->info.charge_ctrl_start;
+
+ mutex_lock(&battmgr->lock);
+ ret = qcom_battmgr_set_charge_control(battmgr, end_soc, delta_soc);
+ mutex_unlock(&battmgr->lock);
+ if (!ret) {
+ battmgr->info.charge_ctrl_start = end_soc - delta_soc;
+ battmgr->info.charge_ctrl_end = end_soc;
+ }
+
+ return 0;
+}
+
+static int qcom_battmgr_charge_control_thresholds_init(struct qcom_battmgr *battmgr)
+{
+ int ret;
+ u8 en, end_soc, start_soc, delta_soc;
+
+ ret = nvmem_cell_read_u8(battmgr->dev->parent, "charge_limit_en", &en);
+ if (!ret && en != 0) {
+ ret = nvmem_cell_read_u8(battmgr->dev->parent, "charge_limit_end", &end_soc);
+ if (ret < 0)
+ return ret;
+
+ ret = nvmem_cell_read_u8(battmgr->dev->parent, "charge_limit_delta", &delta_soc);
+ if (ret < 0)
+ return ret;
+
+ if (delta_soc >= end_soc)
+ return -EINVAL;
+
+ start_soc = end_soc - delta_soc;
+ end_soc = clamp(end_soc, CHARGE_CTRL_END_THR_MIN, CHARGE_CTRL_END_THR_MAX);
+ start_soc = clamp(start_soc, CHARGE_CTRL_START_THR_MIN, CHARGE_CTRL_START_THR_MAX);
+
+ battmgr->info.charge_ctrl_start = start_soc;
+ battmgr->info.charge_ctrl_end = end_soc;
+ }
+
+ return 0;
+}
+
+static int qcom_battmgr_bat_is_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CHARGE_CONTROL_START_THRESHOLD:
+ case POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD:
+ return 1;
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+static int qcom_battmgr_bat_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *pval)
+{
+ struct qcom_battmgr *battmgr = power_supply_get_drvdata(psy);
+
+ if (!battmgr->service_up)
+ return -EAGAIN;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CHARGE_CONTROL_START_THRESHOLD:
+ return qcom_battmgr_set_charge_start_threshold(battmgr, pval->intval);
+ case POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD:
+ return qcom_battmgr_set_charge_end_threshold(battmgr, pval->intval);
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static const enum power_supply_property sc8280xp_bat_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_PRESENT,
@@ -649,6 +835,43 @@ static const struct power_supply_desc sc8280xp_bat_psy_desc = {
.get_property = qcom_battmgr_bat_get_property,
};
+static const enum power_supply_property x1e80100_bat_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_CYCLE_COUNT,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_POWER_NOW,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_EMPTY,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN,
+ POWER_SUPPLY_PROP_ENERGY_FULL,
+ POWER_SUPPLY_PROP_ENERGY_EMPTY,
+ POWER_SUPPLY_PROP_ENERGY_NOW,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_MANUFACTURE_YEAR,
+ POWER_SUPPLY_PROP_MANUFACTURE_MONTH,
+ POWER_SUPPLY_PROP_MANUFACTURE_DAY,
+ POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+ POWER_SUPPLY_PROP_SERIAL_NUMBER,
+ POWER_SUPPLY_PROP_CHARGE_CONTROL_START_THRESHOLD,
+ POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD,
+};
+
+static const struct power_supply_desc x1e80100_bat_psy_desc = {
+ .name = "qcom-battmgr-bat",
+ .type = POWER_SUPPLY_TYPE_BATTERY,
+ .properties = x1e80100_bat_props,
+ .num_properties = ARRAY_SIZE(x1e80100_bat_props),
+ .get_property = qcom_battmgr_bat_get_property,
+ .set_property = qcom_battmgr_bat_set_property,
+ .property_is_writeable = qcom_battmgr_bat_is_writeable,
+};
+
static const enum power_supply_property sm8350_bat_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_HEALTH,
@@ -668,6 +891,8 @@ static const enum power_supply_property sm8350_bat_props[] = {
POWER_SUPPLY_PROP_MODEL_NAME,
POWER_SUPPLY_PROP_TIME_TO_FULL_AVG,
POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG,
+ POWER_SUPPLY_PROP_INTERNAL_RESISTANCE,
+ POWER_SUPPLY_PROP_STATE_OF_HEALTH,
POWER_SUPPLY_PROP_POWER_NOW,
};
@@ -679,6 +904,42 @@ static const struct power_supply_desc sm8350_bat_psy_desc = {
.get_property = qcom_battmgr_bat_get_property,
};
+static const enum power_supply_property sm8550_bat_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_CHARGE_TYPE,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_VOLTAGE_OCV,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_CHARGE_COUNTER,
+ POWER_SUPPLY_PROP_CYCLE_COUNT,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_TIME_TO_FULL_AVG,
+ POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG,
+ POWER_SUPPLY_PROP_INTERNAL_RESISTANCE,
+ POWER_SUPPLY_PROP_STATE_OF_HEALTH,
+ POWER_SUPPLY_PROP_POWER_NOW,
+ POWER_SUPPLY_PROP_CHARGE_CONTROL_START_THRESHOLD,
+ POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD,
+};
+
+static const struct power_supply_desc sm8550_bat_psy_desc = {
+ .name = "qcom-battmgr-bat",
+ .type = POWER_SUPPLY_TYPE_BATTERY,
+ .properties = sm8550_bat_props,
+ .num_properties = ARRAY_SIZE(sm8550_bat_props),
+ .get_property = qcom_battmgr_bat_get_property,
+ .set_property = qcom_battmgr_bat_set_property,
+ .property_is_writeable = qcom_battmgr_bat_is_writeable,
+};
+
static int qcom_battmgr_ac_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
@@ -754,7 +1015,8 @@ static int qcom_battmgr_usb_get_property(struct power_supply *psy,
if (!battmgr->service_up)
return -EAGAIN;
- if (battmgr->variant == QCOM_BATTMGR_SC8280XP)
+ if (battmgr->variant == QCOM_BATTMGR_SC8280XP ||
+ battmgr->variant == QCOM_BATTMGR_X1E80100)
ret = qcom_battmgr_bat_sc8280xp_update(battmgr, psp);
else
ret = qcom_battmgr_usb_sm8350_update(battmgr, psp);
@@ -876,7 +1138,8 @@ static int qcom_battmgr_wls_get_property(struct power_supply *psy,
if (!battmgr->service_up)
return -EAGAIN;
- if (battmgr->variant == QCOM_BATTMGR_SC8280XP)
+ if (battmgr->variant == QCOM_BATTMGR_SC8280XP ||
+ battmgr->variant == QCOM_BATTMGR_X1E80100)
ret = qcom_battmgr_bat_sc8280xp_update(battmgr, psp);
else
ret = qcom_battmgr_wls_sm8350_update(battmgr, psp);
@@ -947,12 +1210,14 @@ static void qcom_battmgr_notification(struct qcom_battmgr *battmgr,
}
notification = le32_to_cpu(msg->notification);
+ notification &= 0xff;
switch (notification) {
case NOTIF_BAT_INFO:
battmgr->info.valid = false;
fallthrough;
case NOTIF_BAT_STATUS:
case NOTIF_BAT_PROPERTY:
+ case NOTIF_BAT_CHARGING_STATE:
power_supply_changed(battmgr->bat_psy);
break;
case NOTIF_USB_PROPERTY:
@@ -982,7 +1247,8 @@ static void qcom_battmgr_sc8280xp_strcpy(char *dest, const char *src)
static unsigned int qcom_battmgr_sc8280xp_parse_technology(const char *chemistry)
{
- if (!strncmp(chemistry, "LIO", BATTMGR_CHEMISTRY_LEN))
+ if ((!strncmp(chemistry, "LIO", BATTMGR_CHEMISTRY_LEN)) ||
+ (!strncmp(chemistry, "OOI", BATTMGR_CHEMISTRY_LEN)))
return POWER_SUPPLY_TECHNOLOGY_LION;
if (!strncmp(chemistry, "LIP", BATTMGR_CHEMISTRY_LEN))
return POWER_SUPPLY_TECHNOLOGY_LIPO;
@@ -1095,6 +1361,9 @@ static void qcom_battmgr_sc8280xp_callback(struct qcom_battmgr *battmgr,
case BATTMGR_BAT_CHARGE_TIME:
battmgr->status.charge_time = le32_to_cpu(resp->time);
break;
+ case BATTMGR_CHG_CTRL_LIMIT_EN:
+ battmgr->error = 0;
+ break;
default:
dev_warn(battmgr->dev, "unknown message %#x\n", opcode);
break;
@@ -1159,6 +1428,9 @@ static void qcom_battmgr_sm8350_callback(struct qcom_battmgr *battmgr,
case BATT_CAPACITY:
battmgr->status.percent = le32_to_cpu(resp->intval.value) / 100;
break;
+ case BATT_SOH:
+ battmgr->status.soh_percent = le32_to_cpu(resp->intval.value);
+ break;
case BATT_VOLT_OCV:
battmgr->status.voltage_ocv = le32_to_cpu(resp->intval.value);
break;
@@ -1199,9 +1471,18 @@ static void qcom_battmgr_sm8350_callback(struct qcom_battmgr *battmgr,
case BATT_TTE_AVG:
battmgr->status.discharge_time = le32_to_cpu(resp->intval.value);
break;
+ case BATT_RESISTANCE:
+ battmgr->status.resistance = le32_to_cpu(resp->intval.value);
+ break;
case BATT_POWER_NOW:
battmgr->status.power_now = le32_to_cpu(resp->intval.value);
break;
+ case BATT_CHG_CTRL_START_THR:
+ battmgr->info.charge_ctrl_start = le32_to_cpu(resp->intval.value);
+ break;
+ case BATT_CHG_CTRL_END_THR:
+ battmgr->info.charge_ctrl_end = le32_to_cpu(resp->intval.value);
+ break;
default:
dev_warn(battmgr->dev, "unknown property %#x\n", property);
break;
@@ -1284,6 +1565,7 @@ static void qcom_battmgr_sm8350_callback(struct qcom_battmgr *battmgr,
}
break;
case BATTMGR_REQUEST_NOTIFICATION:
+ case BATTMGR_CHG_CTRL_LIMIT_EN:
battmgr->error = 0;
break;
default:
@@ -1303,7 +1585,8 @@ static void qcom_battmgr_callback(const void *data, size_t len, void *priv)
if (opcode == BATTMGR_NOTIFICATION)
qcom_battmgr_notification(battmgr, data, len);
- else if (battmgr->variant == QCOM_BATTMGR_SC8280XP)
+ else if (battmgr->variant == QCOM_BATTMGR_SC8280XP ||
+ battmgr->variant == QCOM_BATTMGR_X1E80100)
qcom_battmgr_sc8280xp_callback(battmgr, data, len);
else
qcom_battmgr_sm8350_callback(battmgr, data, len);
@@ -1339,7 +1622,8 @@ static void qcom_battmgr_pdr_notify(void *priv, int state)
static const struct of_device_id qcom_battmgr_of_variants[] = {
{ .compatible = "qcom,sc8180x-pmic-glink", .data = (void *)QCOM_BATTMGR_SC8280XP },
{ .compatible = "qcom,sc8280xp-pmic-glink", .data = (void *)QCOM_BATTMGR_SC8280XP },
- { .compatible = "qcom,x1e80100-pmic-glink", .data = (void *)QCOM_BATTMGR_SC8280XP },
+ { .compatible = "qcom,sm8550-pmic-glink", .data = (void *)QCOM_BATTMGR_SM8550 },
+ { .compatible = "qcom,x1e80100-pmic-glink", .data = (void *)QCOM_BATTMGR_X1E80100 },
/* Unmatched devices falls back to QCOM_BATTMGR_SM8350 */
{}
};
@@ -1349,11 +1633,13 @@ static char *qcom_battmgr_battery[] = { "battery" };
static int qcom_battmgr_probe(struct auxiliary_device *adev,
const struct auxiliary_device_id *id)
{
+ const struct power_supply_desc *psy_desc;
struct power_supply_config psy_cfg_supply = {};
struct power_supply_config psy_cfg = {};
const struct of_device_id *match;
struct qcom_battmgr *battmgr;
struct device *dev = &adev->dev;
+ int ret;
battmgr = devm_kzalloc(dev, sizeof(*battmgr), GFP_KERNEL);
if (!battmgr)
@@ -1379,8 +1665,19 @@ static int qcom_battmgr_probe(struct auxiliary_device *adev,
else
battmgr->variant = QCOM_BATTMGR_SM8350;
- if (battmgr->variant == QCOM_BATTMGR_SC8280XP) {
- battmgr->bat_psy = devm_power_supply_register(dev, &sc8280xp_bat_psy_desc, &psy_cfg);
+ ret = qcom_battmgr_charge_control_thresholds_init(battmgr);
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "failed to init battery charge control thresholds\n");
+
+ if (battmgr->variant == QCOM_BATTMGR_SC8280XP ||
+ battmgr->variant == QCOM_BATTMGR_X1E80100) {
+ if (battmgr->variant == QCOM_BATTMGR_X1E80100)
+ psy_desc = &x1e80100_bat_psy_desc;
+ else
+ psy_desc = &sc8280xp_bat_psy_desc;
+
+ battmgr->bat_psy = devm_power_supply_register(dev, psy_desc, &psy_cfg);
if (IS_ERR(battmgr->bat_psy))
return dev_err_probe(dev, PTR_ERR(battmgr->bat_psy),
"failed to register battery power supply\n");
@@ -1400,7 +1697,12 @@ static int qcom_battmgr_probe(struct auxiliary_device *adev,
return dev_err_probe(dev, PTR_ERR(battmgr->wls_psy),
"failed to register wireless charing power supply\n");
} else {
- battmgr->bat_psy = devm_power_supply_register(dev, &sm8350_bat_psy_desc, &psy_cfg);
+ if (battmgr->variant == QCOM_BATTMGR_SM8550)
+ psy_desc = &sm8550_bat_psy_desc;
+ else
+ psy_desc = &sm8350_bat_psy_desc;
+
+ battmgr->bat_psy = devm_power_supply_register(dev, psy_desc, &psy_cfg);
if (IS_ERR(battmgr->bat_psy))
return dev_err_probe(dev, PTR_ERR(battmgr->bat_psy),
"failed to register battery power supply\n");
diff --git a/drivers/power/supply/rk817_charger.c b/drivers/power/supply/rk817_charger.c
index 1251022eb052..9436c6bbf51f 100644
--- a/drivers/power/supply/rk817_charger.c
+++ b/drivers/power/supply/rk817_charger.c
@@ -1046,7 +1046,7 @@ static void rk817_charging_monitor(struct work_struct *work)
rk817_read_props(charger);
/* Run every 8 seconds like the BSP driver did. */
- queue_delayed_work(system_wq, &charger->work, msecs_to_jiffies(8000));
+ queue_delayed_work(system_percpu_wq, &charger->work, msecs_to_jiffies(8000));
}
static void rk817_cleanup_node(void *data)
@@ -1206,7 +1206,7 @@ static int rk817_charger_probe(struct platform_device *pdev)
return ret;
/* Force the first update immediately. */
- mod_delayed_work(system_wq, &charger->work, 0);
+ mod_delayed_work(system_percpu_wq, &charger->work, 0);
return 0;
}
@@ -1226,7 +1226,7 @@ static int __maybe_unused rk817_resume(struct device *dev)
struct rk817_charger *charger = dev_get_drvdata(dev);
/* force an immediate update */
- mod_delayed_work(system_wq, &charger->work, 0);
+ mod_delayed_work(system_percpu_wq, &charger->work, 0);
return 0;
}
diff --git a/drivers/power/supply/rt9467-charger.c b/drivers/power/supply/rt9467-charger.c
index e9aba9ad393c..fe773dd8b404 100644
--- a/drivers/power/supply/rt9467-charger.c
+++ b/drivers/power/supply/rt9467-charger.c
@@ -633,7 +633,9 @@ out:
static const enum power_supply_property rt9467_chg_properties[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_CURRENT_MAX,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT,
POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
@@ -656,6 +658,8 @@ static int rt9467_psy_get_property(struct power_supply *psy,
return rt9467_psy_get_status(data, &val->intval);
case POWER_SUPPLY_PROP_ONLINE:
return regmap_field_read(data->rm_field[F_PWR_RDY], &val->intval);
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ return rt9467_get_adc(data, RT9467_ADC_VBUS_DIV5, &val->intval);
case POWER_SUPPLY_PROP_CURRENT_MAX:
mutex_lock(&data->attach_lock);
if (data->psy_usb_type == POWER_SUPPLY_USB_TYPE_UNKNOWN ||
@@ -665,6 +669,8 @@ static int rt9467_psy_get_property(struct power_supply *psy,
val->intval = 1500000;
mutex_unlock(&data->attach_lock);
return 0;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ return rt9467_get_adc(data, RT9467_ADC_IBUS, &val->intval);
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
mutex_lock(&data->ichg_ieoc_lock);
val->intval = data->ichg_ua;
@@ -1141,27 +1147,6 @@ static int rt9467_reset_chip(struct rt9467_chg_data *data)
return regmap_field_write(data->rm_field[F_RST], 1);
}
-static void rt9467_chg_destroy_adc_lock(void *data)
-{
- struct mutex *adc_lock = data;
-
- mutex_destroy(adc_lock);
-}
-
-static void rt9467_chg_destroy_attach_lock(void *data)
-{
- struct mutex *attach_lock = data;
-
- mutex_destroy(attach_lock);
-}
-
-static void rt9467_chg_destroy_ichg_ieoc_lock(void *data)
-{
- struct mutex *ichg_ieoc_lock = data;
-
- mutex_destroy(ichg_ieoc_lock);
-}
-
static void rt9467_chg_complete_aicl_done(void *data)
{
struct completion *aicl_done = data;
@@ -1214,29 +1199,23 @@ static int rt9467_charger_probe(struct i2c_client *i2c)
if (ret)
return dev_err_probe(dev, ret, "Failed to add irq chip\n");
- mutex_init(&data->adc_lock);
- ret = devm_add_action_or_reset(dev, rt9467_chg_destroy_adc_lock,
- &data->adc_lock);
+ ret = devm_mutex_init(dev, &data->adc_lock);
if (ret)
- return dev_err_probe(dev, ret, "Failed to init ADC lock\n");
+ return ret;
- mutex_init(&data->attach_lock);
- ret = devm_add_action_or_reset(dev, rt9467_chg_destroy_attach_lock,
- &data->attach_lock);
+ ret = devm_mutex_init(dev, &data->attach_lock);
if (ret)
- return dev_err_probe(dev, ret, "Failed to init attach lock\n");
+ return ret;
- mutex_init(&data->ichg_ieoc_lock);
- ret = devm_add_action_or_reset(dev, rt9467_chg_destroy_ichg_ieoc_lock,
- &data->ichg_ieoc_lock);
+ ret = devm_mutex_init(dev, &data->ichg_ieoc_lock);
if (ret)
- return dev_err_probe(dev, ret, "Failed to init ICHG/IEOC lock\n");
+ return ret;
init_completion(&data->aicl_done);
ret = devm_add_action_or_reset(dev, rt9467_chg_complete_aicl_done,
&data->aicl_done);
if (ret)
- return dev_err_probe(dev, ret, "Failed to init AICL done completion\n");
+ return ret;
ret = rt9467_do_charger_init(data);
if (ret)
diff --git a/drivers/power/supply/rx51_battery.c b/drivers/power/supply/rx51_battery.c
index 7cdcd415e868..b0220ec2d926 100644
--- a/drivers/power/supply/rx51_battery.c
+++ b/drivers/power/supply/rx51_battery.c
@@ -116,7 +116,7 @@ static int rx51_battery_read_temperature(struct rx51_device_info *di)
int mid = (max + min) / 2;
if (rx51_temp_table2[mid] <= raw)
min = mid;
- else if (rx51_temp_table2[mid] > raw)
+ else
max = mid;
if (rx51_temp_table2[mid] == raw)
break;
diff --git a/drivers/power/supply/sbs-charger.c b/drivers/power/supply/sbs-charger.c
index 27764123b929..7d5e67620580 100644
--- a/drivers/power/supply/sbs-charger.c
+++ b/drivers/power/supply/sbs-charger.c
@@ -154,8 +154,7 @@ static const struct regmap_config sbs_regmap = {
.val_format_endian = REGMAP_ENDIAN_LITTLE, /* since based on SMBus */
};
-static const struct power_supply_desc sbs_desc = {
- .name = "sbs-charger",
+static const struct power_supply_desc sbs_default_desc = {
.type = POWER_SUPPLY_TYPE_MAINS,
.properties = sbs_properties,
.num_properties = ARRAY_SIZE(sbs_properties),
@@ -165,9 +164,20 @@ static const struct power_supply_desc sbs_desc = {
static int sbs_probe(struct i2c_client *client)
{
struct power_supply_config psy_cfg = {};
+ struct power_supply_desc *sbs_desc;
struct sbs_info *chip;
int ret, val;
+ sbs_desc = devm_kmemdup(&client->dev, &sbs_default_desc,
+ sizeof(*sbs_desc), GFP_KERNEL);
+ if (!sbs_desc)
+ return -ENOMEM;
+
+ sbs_desc->name = devm_kasprintf(&client->dev, GFP_KERNEL, "sbs-%s",
+ dev_name(&client->dev));
+ if (!sbs_desc->name)
+ return -ENOMEM;
+
chip = devm_kzalloc(&client->dev, sizeof(struct sbs_info), GFP_KERNEL);
if (!chip)
return -ENOMEM;
@@ -191,7 +201,7 @@ static int sbs_probe(struct i2c_client *client)
return dev_err_probe(&client->dev, ret, "Failed to get device status\n");
chip->last_state = val;
- chip->power_supply = devm_power_supply_register(&client->dev, &sbs_desc, &psy_cfg);
+ chip->power_supply = devm_power_supply_register(&client->dev, sbs_desc, &psy_cfg);
if (IS_ERR(chip->power_supply))
return dev_err_probe(&client->dev, PTR_ERR(chip->power_supply),
"Failed to register power supply\n");
diff --git a/drivers/power/supply/sbs-manager.c b/drivers/power/supply/sbs-manager.c
index 869729dfcd66..6fe526222f7f 100644
--- a/drivers/power/supply/sbs-manager.c
+++ b/drivers/power/supply/sbs-manager.c
@@ -348,7 +348,7 @@ static int sbsm_probe(struct i2c_client *client)
data->muxc = i2c_mux_alloc(adapter, dev, SBSM_MAX_BATS, 0,
I2C_MUX_LOCKED, &sbsm_select, NULL);
if (!data->muxc)
- return dev_err_probe(dev, -ENOMEM, "failed to alloc i2c mux\n");
+ return -ENOMEM;
data->muxc->priv = data;
ret = devm_add_action_or_reset(dev, sbsm_del_mux_adapter, data);
diff --git a/drivers/power/supply/ucs1002_power.c b/drivers/power/supply/ucs1002_power.c
index d32a7633f9e7..fe94435340de 100644
--- a/drivers/power/supply/ucs1002_power.c
+++ b/drivers/power/supply/ucs1002_power.c
@@ -493,7 +493,7 @@ static irqreturn_t ucs1002_alert_irq(int irq, void *data)
{
struct ucs1002_info *info = data;
- mod_delayed_work(system_wq, &info->health_poll, 0);
+ mod_delayed_work(system_percpu_wq, &info->health_poll, 0);
return IRQ_HANDLED;
}
diff --git a/drivers/power/supply/ug3105_battery.c b/drivers/power/supply/ug3105_battery.c
index e8a1de7cade0..210e0f9aa5e0 100644
--- a/drivers/power/supply/ug3105_battery.c
+++ b/drivers/power/supply/ug3105_battery.c
@@ -10,7 +10,22 @@
* is off or suspended, the coulomb counter is not used atm.
*
* Possible improvements:
- * 1. Activate commented out total_coulomb_count code
+ * 1. Add coulumb counter reading, e.g. something like this:
+ * Read + reset coulomb counter every 10 polls (every 300 seconds)
+ *
+ * if ((chip->poll_count % 10) == 0) {
+ * val = ug3105_read_word(chip->client, UG3105_REG_COULOMB_CNT);
+ * if (val < 0)
+ * goto out;
+ *
+ * i2c_smbus_write_byte_data(chip->client, UG3105_REG_CTRL1,
+ * UG3105_CTRL1_RESET_COULOMB_CNT);
+ *
+ * chip->total_coulomb_count += (s16)val;
+ * dev_dbg(&chip->client->dev, "coulomb count %d total %d\n",
+ * (s16)val, chip->total_coulomb_count);
+ * }
+ *
* 2. Reset total_coulomb_count val to 0 when the battery is as good as empty
* and remember that we did this (and clear the flag for this on susp/resume)
* 3. When the battery is full check if the flag that we set total_coulomb_count
@@ -31,24 +46,16 @@
* has shown that an estimated 7404mWh increase of the battery's energy results
* in a total_coulomb_count increase of 3277 units with a 5 milli-ohm sense R.
*
- * Copyright (C) 2021 Hans de Goede <hdegoede@redhat.com>
+ * Copyright (C) 2021 - 2025 Hans de Goede <hansg@kernel.org>
*/
-#include <linux/devm-helpers.h>
#include <linux/module.h>
-#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/mod_devicetable.h>
#include <linux/power_supply.h>
-#include <linux/workqueue.h>
-
-#define UG3105_MOV_AVG_WINDOW 8
-#define UG3105_INIT_POLL_TIME (5 * HZ)
-#define UG3105_POLL_TIME (30 * HZ)
-#define UG3105_SETTLE_TIME (1 * HZ)
-#define UG3105_INIT_POLL_COUNT 30
+#include "adc-battery-helper.h"
#define UG3105_REG_MODE 0x00
#define UG3105_REG_CTRL1 0x01
@@ -61,34 +68,13 @@
#define UG3105_CTRL1_RESET_COULOMB_CNT 0x03
-#define UG3105_CURR_HYST_UA 65000
-
-#define UG3105_LOW_BAT_UV 3700000
-#define UG3105_FULL_BAT_HYST_UV 38000
-
-#define AMBIENT_TEMP_CELCIUS 25
-
struct ug3105_chip {
+ /* Must be the first member see adc-battery-helper documentation */
+ struct adc_battery_helper helper;
struct i2c_client *client;
struct power_supply *psy;
- struct delayed_work work;
- struct mutex lock;
- int ocv[UG3105_MOV_AVG_WINDOW]; /* micro-volt */
- int intern_res[UG3105_MOV_AVG_WINDOW]; /* milli-ohm */
- int poll_count;
- int ocv_avg_index;
- int ocv_avg; /* micro-volt */
- int intern_res_poll_count;
- int intern_res_avg_index;
- int intern_res_avg; /* milli-ohm */
- int volt; /* micro-volt */
- int curr; /* micro-ampere */
- int total_coulomb_count;
int uv_per_unit;
int ua_per_unit;
- int status;
- int capacity;
- bool supplied;
};
static int ug3105_read_word(struct i2c_client *client, u8 reg)
@@ -102,230 +88,43 @@ static int ug3105_read_word(struct i2c_client *client, u8 reg)
return val;
}
-static int ug3105_get_status(struct ug3105_chip *chip)
-{
- int full = chip->psy->battery_info->constant_charge_voltage_max_uv -
- UG3105_FULL_BAT_HYST_UV;
-
- if (chip->curr > UG3105_CURR_HYST_UA)
- return POWER_SUPPLY_STATUS_CHARGING;
-
- if (chip->curr < -UG3105_CURR_HYST_UA)
- return POWER_SUPPLY_STATUS_DISCHARGING;
-
- if (chip->supplied && chip->ocv_avg > full)
- return POWER_SUPPLY_STATUS_FULL;
-
- return POWER_SUPPLY_STATUS_NOT_CHARGING;
-}
-
-static void ug3105_work(struct work_struct *work)
-{
- struct ug3105_chip *chip = container_of(work, struct ug3105_chip,
- work.work);
- int i, val, curr_diff, volt_diff, res, win_size;
- bool prev_supplied = chip->supplied;
- int prev_status = chip->status;
- int prev_volt = chip->volt;
- int prev_curr = chip->curr;
- struct power_supply *psy;
-
- mutex_lock(&chip->lock);
-
- psy = chip->psy;
- if (!psy)
- goto out;
-
- val = ug3105_read_word(chip->client, UG3105_REG_BAT_VOLT);
- if (val < 0)
- goto out;
- chip->volt = val * chip->uv_per_unit;
-
- val = ug3105_read_word(chip->client, UG3105_REG_BAT_CURR);
- if (val < 0)
- goto out;
- chip->curr = (s16)val * chip->ua_per_unit;
-
- chip->ocv[chip->ocv_avg_index] =
- chip->volt - chip->curr * chip->intern_res_avg / 1000;
- chip->ocv_avg_index = (chip->ocv_avg_index + 1) % UG3105_MOV_AVG_WINDOW;
- chip->poll_count++;
-
- /*
- * See possible improvements comment above.
- *
- * Read + reset coulomb counter every 10 polls (every 300 seconds)
- * if ((chip->poll_count % 10) == 0) {
- * val = ug3105_read_word(chip->client, UG3105_REG_COULOMB_CNT);
- * if (val < 0)
- * goto out;
- *
- * i2c_smbus_write_byte_data(chip->client, UG3105_REG_CTRL1,
- * UG3105_CTRL1_RESET_COULOMB_CNT);
- *
- * chip->total_coulomb_count += (s16)val;
- * dev_dbg(&chip->client->dev, "coulomb count %d total %d\n",
- * (s16)val, chip->total_coulomb_count);
- * }
- */
-
- chip->ocv_avg = 0;
- win_size = min(chip->poll_count, UG3105_MOV_AVG_WINDOW);
- for (i = 0; i < win_size; i++)
- chip->ocv_avg += chip->ocv[i];
- chip->ocv_avg /= win_size;
-
- chip->supplied = power_supply_am_i_supplied(psy);
- chip->status = ug3105_get_status(chip);
- if (chip->status == POWER_SUPPLY_STATUS_FULL)
- chip->capacity = 100;
- else
- chip->capacity = power_supply_batinfo_ocv2cap(chip->psy->battery_info,
- chip->ocv_avg,
- AMBIENT_TEMP_CELCIUS);
-
- /*
- * Skip internal resistance calc on charger [un]plug and
- * when the battery is almost empty (voltage low).
- */
- if (chip->supplied != prev_supplied ||
- chip->volt < UG3105_LOW_BAT_UV ||
- chip->poll_count < 2)
- goto out;
-
- /*
- * Assuming that the OCV voltage does not change significantly
- * between 2 polls, then we can calculate the internal resistance
- * on a significant current change by attributing all voltage
- * change between the 2 readings to the internal resistance.
- */
- curr_diff = abs(chip->curr - prev_curr);
- if (curr_diff < UG3105_CURR_HYST_UA)
- goto out;
-
- volt_diff = abs(chip->volt - prev_volt);
- res = volt_diff * 1000 / curr_diff;
-
- if ((res < (chip->intern_res_avg * 2 / 3)) ||
- (res > (chip->intern_res_avg * 4 / 3))) {
- dev_dbg(&chip->client->dev, "Ignoring outlier internal resistance %d mOhm\n", res);
- goto out;
- }
-
- dev_dbg(&chip->client->dev, "Internal resistance %d mOhm\n", res);
-
- chip->intern_res[chip->intern_res_avg_index] = res;
- chip->intern_res_avg_index = (chip->intern_res_avg_index + 1) % UG3105_MOV_AVG_WINDOW;
- chip->intern_res_poll_count++;
-
- chip->intern_res_avg = 0;
- win_size = min(chip->intern_res_poll_count, UG3105_MOV_AVG_WINDOW);
- for (i = 0; i < win_size; i++)
- chip->intern_res_avg += chip->intern_res[i];
- chip->intern_res_avg /= win_size;
-
-out:
- mutex_unlock(&chip->lock);
-
- queue_delayed_work(system_wq, &chip->work,
- (chip->poll_count <= UG3105_INIT_POLL_COUNT) ?
- UG3105_INIT_POLL_TIME : UG3105_POLL_TIME);
-
- if (chip->status != prev_status && psy)
- power_supply_changed(psy);
-}
-
-static enum power_supply_property ug3105_battery_props[] = {
- POWER_SUPPLY_PROP_STATUS,
- POWER_SUPPLY_PROP_PRESENT,
- POWER_SUPPLY_PROP_SCOPE,
- POWER_SUPPLY_PROP_VOLTAGE_NOW,
- POWER_SUPPLY_PROP_VOLTAGE_OCV,
- POWER_SUPPLY_PROP_CURRENT_NOW,
- POWER_SUPPLY_PROP_CAPACITY,
-};
-
-static int ug3105_get_property(struct power_supply *psy,
- enum power_supply_property psp,
- union power_supply_propval *val)
+static int ug3105_get_voltage_and_current_now(struct power_supply *psy, int *volt, int *curr)
{
struct ug3105_chip *chip = power_supply_get_drvdata(psy);
- int ret = 0;
-
- mutex_lock(&chip->lock);
+ int ret;
- if (!chip->psy) {
- ret = -EAGAIN;
- goto out;
- }
+ ret = ug3105_read_word(chip->client, UG3105_REG_BAT_VOLT);
+ if (ret < 0)
+ return ret;
- switch (psp) {
- case POWER_SUPPLY_PROP_STATUS:
- val->intval = chip->status;
- break;
- case POWER_SUPPLY_PROP_PRESENT:
- val->intval = 1;
- break;
- case POWER_SUPPLY_PROP_SCOPE:
- val->intval = POWER_SUPPLY_SCOPE_SYSTEM;
- break;
- case POWER_SUPPLY_PROP_VOLTAGE_NOW:
- ret = ug3105_read_word(chip->client, UG3105_REG_BAT_VOLT);
- if (ret < 0)
- break;
- val->intval = ret * chip->uv_per_unit;
- ret = 0;
- break;
- case POWER_SUPPLY_PROP_VOLTAGE_OCV:
- val->intval = chip->ocv_avg;
- break;
- case POWER_SUPPLY_PROP_CURRENT_NOW:
- ret = ug3105_read_word(chip->client, UG3105_REG_BAT_CURR);
- if (ret < 0)
- break;
- val->intval = (s16)ret * chip->ua_per_unit;
- ret = 0;
- break;
- case POWER_SUPPLY_PROP_CAPACITY:
- val->intval = chip->capacity;
- break;
- default:
- ret = -EINVAL;
- }
+ *volt = ret * chip->uv_per_unit;
-out:
- mutex_unlock(&chip->lock);
- return ret;
-}
-
-static void ug3105_external_power_changed(struct power_supply *psy)
-{
- struct ug3105_chip *chip = power_supply_get_drvdata(psy);
+ ret = ug3105_read_word(chip->client, UG3105_REG_BAT_CURR);
+ if (ret < 0)
+ return ret;
- dev_dbg(&chip->client->dev, "external power changed\n");
- mod_delayed_work(system_wq, &chip->work, UG3105_SETTLE_TIME);
+ *curr = (s16)ret * chip->ua_per_unit;
+ return 0;
}
static const struct power_supply_desc ug3105_psy_desc = {
.name = "ug3105_battery",
.type = POWER_SUPPLY_TYPE_BATTERY,
- .get_property = ug3105_get_property,
- .external_power_changed = ug3105_external_power_changed,
- .properties = ug3105_battery_props,
- .num_properties = ARRAY_SIZE(ug3105_battery_props),
+ .get_property = adc_battery_helper_get_property,
+ .external_power_changed = adc_battery_helper_external_power_changed,
+ .properties = adc_battery_helper_properties,
+ .num_properties = ADC_HELPER_NUM_PROPERTIES,
};
-static void ug3105_init(struct ug3105_chip *chip)
+static void ug3105_start(struct i2c_client *client)
+{
+ i2c_smbus_write_byte_data(client, UG3105_REG_MODE, UG3105_MODE_RUN);
+ i2c_smbus_write_byte_data(client, UG3105_REG_CTRL1, UG3105_CTRL1_RESET_COULOMB_CNT);
+}
+
+static void ug3105_stop(struct i2c_client *client)
{
- chip->poll_count = 0;
- chip->ocv_avg_index = 0;
- chip->total_coulomb_count = 0;
- i2c_smbus_write_byte_data(chip->client, UG3105_REG_MODE,
- UG3105_MODE_RUN);
- i2c_smbus_write_byte_data(chip->client, UG3105_REG_CTRL1,
- UG3105_CTRL1_RESET_COULOMB_CNT);
- queue_delayed_work(system_wq, &chip->work, 0);
- flush_delayed_work(&chip->work);
+ i2c_smbus_write_byte_data(client, UG3105_REG_MODE, UG3105_MODE_STANDBY);
}
static int ug3105_probe(struct i2c_client *client)
@@ -333,7 +132,6 @@ static int ug3105_probe(struct i2c_client *client)
struct power_supply_config psy_cfg = {};
struct device *dev = &client->dev;
u32 curr_sense_res_uohm = 10000;
- struct power_supply *psy;
struct ug3105_chip *chip;
int ret;
@@ -342,23 +140,8 @@ static int ug3105_probe(struct i2c_client *client)
return -ENOMEM;
chip->client = client;
- mutex_init(&chip->lock);
- ret = devm_delayed_work_autocancel(dev, &chip->work, ug3105_work);
- if (ret)
- return ret;
- psy_cfg.drv_data = chip;
- psy = devm_power_supply_register(dev, &ug3105_psy_desc, &psy_cfg);
- if (IS_ERR(psy))
- return PTR_ERR(psy);
-
- if (!psy->battery_info ||
- psy->battery_info->factory_internal_resistance_uohm == -EINVAL ||
- psy->battery_info->constant_charge_voltage_max_uv == -EINVAL ||
- !psy->battery_info->ocv_table[0]) {
- dev_err(dev, "error required properties are missing\n");
- return -ENODEV;
- }
+ ug3105_start(client);
device_property_read_u32(dev, "upisemi,rsns-microohm", &curr_sense_res_uohm);
@@ -366,35 +149,36 @@ static int ug3105_probe(struct i2c_client *client)
* DAC maximum is 4.5V divided by 65536 steps + an unknown factor of 10
* coming from somewhere for some reason (verified with a volt-meter).
*/
- chip->uv_per_unit = 45000000/65536;
+ chip->uv_per_unit = 45000000 / 65536;
/* Datasheet says 8.1 uV per unit for the current ADC */
chip->ua_per_unit = 8100000 / curr_sense_res_uohm;
- /* Use provided internal resistance as start point (in milli-ohm) */
- chip->intern_res_avg = psy->battery_info->factory_internal_resistance_uohm / 1000;
- /* Also add it to the internal resistance moving average window */
- chip->intern_res[0] = chip->intern_res_avg;
- chip->intern_res_avg_index = 1;
- chip->intern_res_poll_count = 1;
-
- mutex_lock(&chip->lock);
- chip->psy = psy;
- mutex_unlock(&chip->lock);
+ psy_cfg.drv_data = chip;
+ chip->psy = devm_power_supply_register(dev, &ug3105_psy_desc, &psy_cfg);
+ if (IS_ERR(chip->psy)) {
+ ret = PTR_ERR(chip->psy);
+ goto stop;
+ }
- ug3105_init(chip);
+ ret = adc_battery_helper_init(&chip->helper, chip->psy,
+ ug3105_get_voltage_and_current_now, NULL);
+ if (ret)
+ goto stop;
i2c_set_clientdata(client, chip);
return 0;
+
+stop:
+ ug3105_stop(client);
+ return ret;
}
static int __maybe_unused ug3105_suspend(struct device *dev)
{
struct ug3105_chip *chip = dev_get_drvdata(dev);
- cancel_delayed_work_sync(&chip->work);
- i2c_smbus_write_byte_data(chip->client, UG3105_REG_MODE,
- UG3105_MODE_STANDBY);
-
+ adc_battery_helper_suspend(dev);
+ ug3105_stop(chip->client);
return 0;
}
@@ -402,8 +186,8 @@ static int __maybe_unused ug3105_resume(struct device *dev)
{
struct ug3105_chip *chip = dev_get_drvdata(dev);
- ug3105_init(chip);
-
+ ug3105_start(chip->client);
+ adc_battery_helper_resume(dev);
return 0;
}
@@ -422,10 +206,12 @@ static struct i2c_driver ug3105_i2c_driver = {
.pm = &ug3105_pm_ops,
},
.probe = ug3105_probe,
+ .remove = ug3105_stop,
+ .shutdown = ug3105_stop,
.id_table = ug3105_id,
};
module_i2c_driver(ug3105_i2c_driver);
-MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com");
+MODULE_AUTHOR("Hans de Goede <hansg@kernel.org");
MODULE_DESCRIPTION("uPI uG3105 battery monitor driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/powercap/idle_inject.c b/drivers/powercap/idle_inject.c
index 5ad7cc438068..a25eb2018acd 100644
--- a/drivers/powercap/idle_inject.c
+++ b/drivers/powercap/idle_inject.c
@@ -133,7 +133,7 @@ static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer)
duration_us = READ_ONCE(ii_dev->run_duration_us);
duration_us += READ_ONCE(ii_dev->idle_duration_us);
- hrtimer_forward_now(timer, ns_to_ktime(duration_us * NSEC_PER_USEC));
+ hrtimer_forward_now(timer, us_to_ktime(duration_us));
return HRTIMER_RESTART;
}
@@ -232,8 +232,7 @@ int idle_inject_start(struct idle_inject_device *ii_dev)
idle_inject_wakeup(ii_dev);
hrtimer_start(&ii_dev->timer,
- ns_to_ktime((idle_duration_us + run_duration_us) *
- NSEC_PER_USEC),
+ us_to_ktime(idle_duration_us + run_duration_us),
HRTIMER_MODE_REL);
return 0;
diff --git a/drivers/pps/kapi.c b/drivers/pps/kapi.c
index 92d1b62ea239..e9389876229e 100644
--- a/drivers/pps/kapi.c
+++ b/drivers/pps/kapi.c
@@ -109,16 +109,13 @@ struct pps_device *pps_register_source(struct pps_source_info *info,
if (err < 0) {
pr_err("%s: unable to create char device\n",
info->name);
- goto kfree_pps;
+ goto pps_register_source_exit;
}
dev_dbg(&pps->dev, "new PPS source %s\n", info->name);
return pps;
-kfree_pps:
- kfree(pps);
-
pps_register_source_exit:
pr_err("%s: unable to register source\n", info->name);
diff --git a/drivers/pps/pps.c b/drivers/pps/pps.c
index 9463232af8d2..c6b8b6478276 100644
--- a/drivers/pps/pps.c
+++ b/drivers/pps/pps.c
@@ -374,6 +374,7 @@ int pps_register_cdev(struct pps_device *pps)
pps->info.name);
err = -EBUSY;
}
+ kfree(pps);
goto out_unlock;
}
pps->id = err;
@@ -383,13 +384,11 @@ int pps_register_cdev(struct pps_device *pps)
pps->dev.devt = MKDEV(pps_major, pps->id);
dev_set_drvdata(&pps->dev, pps);
dev_set_name(&pps->dev, "pps%d", pps->id);
+ pps->dev.release = pps_device_destruct;
err = device_register(&pps->dev);
if (err)
goto free_idr;
- /* Override the release function with our own */
- pps->dev.release = pps_device_destruct;
-
pr_debug("source %s got cdev (%d:%d)\n", pps->info.name, pps_major,
pps->id);
diff --git a/drivers/ps3/ps3stor_lib.c b/drivers/ps3/ps3stor_lib.c
index a12a1ad9b5fe..3d4d343ee0c8 100644
--- a/drivers/ps3/ps3stor_lib.c
+++ b/drivers/ps3/ps3stor_lib.c
@@ -8,6 +8,7 @@
#include <linux/dma-mapping.h>
#include <linux/module.h>
+#include <linux/string_choices.h>
#include <asm/lv1call.h>
#include <asm/ps3stor.h>
@@ -265,7 +266,7 @@ u64 ps3stor_read_write_sectors(struct ps3_storage_device *dev, u64 lpar,
u64 start_sector, u64 sectors, int write)
{
unsigned int region_id = dev->regions[dev->region_idx].id;
- const char *op = write ? "write" : "read";
+ const char *op = str_write_read(write);
int res;
dev_dbg(&dev->sbd.core, "%s:%u: %s %llu sectors starting at %llu\n",
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index 204278eb215e..5f8ea34d11d6 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -67,7 +67,7 @@ config PTP_1588_CLOCK_QORIQ
packets using the SO_TIMESTAMPING API.
To compile this driver as a module, choose M here: the module
- will be called ptp-qoriq.
+ will be called ptp_qoriq.
comment "Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks."
depends on PHYLIB=n || NETWORK_PHY_TIMESTAMPING=n
@@ -252,4 +252,15 @@ config PTP_S390
driver provides the raw clock value without the delta to
userspace. That way userspace programs like chrony could steer
the kernel clock.
+
+config PTP_NETC_V4_TIMER
+ tristate "NXP NETC V4 Timer PTP Driver"
+ depends on PTP_1588_CLOCK
+ depends on PCI_MSI
+ help
+ This driver adds support for using the NXP NETC V4 Timer as a PTP
+ clock, the clock is used by ENETC V4 or NETC V4 Switch for PTP time
+ synchronization. It also supports periodic output signal (e.g. PPS)
+ and external trigger timestamping.
+
endmenu
diff --git a/drivers/ptp/Makefile b/drivers/ptp/Makefile
index 25f846fe48c9..bdc47e284f14 100644
--- a/drivers/ptp/Makefile
+++ b/drivers/ptp/Makefile
@@ -12,9 +12,7 @@ obj-$(CONFIG_PTP_1588_CLOCK_INES) += ptp_ines.o
obj-$(CONFIG_PTP_1588_CLOCK_PCH) += ptp_pch.o
obj-$(CONFIG_PTP_1588_CLOCK_KVM) += ptp_kvm.o
obj-$(CONFIG_PTP_1588_CLOCK_VMCLOCK) += ptp_vmclock.o
-obj-$(CONFIG_PTP_1588_CLOCK_QORIQ) += ptp-qoriq.o
-ptp-qoriq-y += ptp_qoriq.o
-ptp-qoriq-$(CONFIG_DEBUG_FS) += ptp_qoriq_debugfs.o
+obj-$(CONFIG_PTP_1588_CLOCK_QORIQ) += ptp_qoriq.o
obj-$(CONFIG_PTP_1588_CLOCK_IDTCM) += ptp_clockmatrix.o
obj-$(CONFIG_PTP_1588_CLOCK_FC3W) += ptp_fc3.o
obj-$(CONFIG_PTP_1588_CLOCK_IDT82P33) += ptp_idt82p33.o
@@ -23,3 +21,4 @@ obj-$(CONFIG_PTP_1588_CLOCK_VMW) += ptp_vmw.o
obj-$(CONFIG_PTP_1588_CLOCK_OCP) += ptp_ocp.o
obj-$(CONFIG_PTP_DFL_TOD) += ptp_dfl_tod.o
obj-$(CONFIG_PTP_S390) += ptp_s390.o
+obj-$(CONFIG_PTP_NETC_V4_TIMER) += ptp_netc.o
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index 4ca5a464a46a..8106eb617c8c 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -47,6 +47,26 @@ static int ptp_disable_pinfunc(struct ptp_clock_info *ops,
return err;
}
+void ptp_disable_all_events(struct ptp_clock *ptp)
+{
+ struct ptp_clock_info *info = ptp->info;
+ unsigned int i;
+
+ mutex_lock(&ptp->pincfg_mux);
+ /* Disable any pins that may raise EXTTS events */
+ for (i = 0; i < info->n_pins; i++)
+ if (info->pin_config[i].func == PTP_PF_EXTTS)
+ ptp_disable_pinfunc(info, info->pin_config[i].func,
+ info->pin_config[i].chan);
+
+ /* Disable the PPS event if the driver has PPS support */
+ if (info->pps) {
+ struct ptp_clock_request req = { .type = PTP_CLK_REQ_PPS };
+ info->enable(info, &req, 0);
+ }
+ mutex_unlock(&ptp->pincfg_mux);
+}
+
int ptp_set_pinfunc(struct ptp_clock *ptp, unsigned int pin,
enum ptp_pin_function func, unsigned int chan)
{
@@ -91,12 +111,18 @@ int ptp_set_pinfunc(struct ptp_clock *ptp, unsigned int pin,
return -EOPNOTSUPP;
}
- /* Disable whatever function was previously assigned. */
+ /* Disable whichever pin was previously assigned to this function and
+ * channel.
+ */
if (pin1) {
ptp_disable_pinfunc(info, func, chan);
pin1->func = PTP_PF_NONE;
pin1->chan = 0;
}
+
+ /* Disable whatever function was previously assigned to the requested
+ * pin.
+ */
ptp_disable_pinfunc(info, pin2->func, pin2->chan);
pin2->func = func;
pin2->chan = chan;
@@ -285,17 +311,21 @@ static long ptp_enable_pps(struct ptp_clock *ptp, bool enable)
return ops->enable(ops, &req, enable);
}
-static long ptp_sys_offset_precise(struct ptp_clock *ptp, void __user *arg)
+typedef int (*ptp_crosststamp_fn)(struct ptp_clock_info *,
+ struct system_device_crosststamp *);
+
+static long ptp_sys_offset_precise(struct ptp_clock *ptp, void __user *arg,
+ ptp_crosststamp_fn crosststamp_fn)
{
struct ptp_sys_offset_precise precise_offset;
struct system_device_crosststamp xtstamp;
struct timespec64 ts;
int err;
- if (!ptp->info->getcrosststamp)
+ if (!crosststamp_fn)
return -EOPNOTSUPP;
- err = ptp->info->getcrosststamp(ptp->info, &xtstamp);
+ err = crosststamp_fn(ptp->info, &xtstamp);
if (err)
return err;
@@ -313,12 +343,17 @@ static long ptp_sys_offset_precise(struct ptp_clock *ptp, void __user *arg)
return copy_to_user(arg, &precise_offset, sizeof(precise_offset)) ? -EFAULT : 0;
}
-static long ptp_sys_offset_extended(struct ptp_clock *ptp, void __user *arg)
+typedef int (*ptp_gettimex_fn)(struct ptp_clock_info *,
+ struct timespec64 *,
+ struct ptp_system_timestamp *);
+
+static long ptp_sys_offset_extended(struct ptp_clock *ptp, void __user *arg,
+ ptp_gettimex_fn gettimex_fn)
{
struct ptp_sys_offset_extended *extoff __free(kfree) = NULL;
struct ptp_system_timestamp sts;
- if (!ptp->info->gettimex64)
+ if (!gettimex_fn)
return -EOPNOTSUPP;
extoff = memdup_user(arg, sizeof(*extoff));
@@ -346,7 +381,7 @@ static long ptp_sys_offset_extended(struct ptp_clock *ptp, void __user *arg)
struct timespec64 ts;
int err;
- err = ptp->info->gettimex64(ptp->info, &ts, &sts);
+ err = gettimex_fn(ptp->info, &ts, &sts);
if (err)
return err;
@@ -497,11 +532,13 @@ long ptp_ioctl(struct posix_clock_context *pccontext, unsigned int cmd,
case PTP_SYS_OFFSET_PRECISE:
case PTP_SYS_OFFSET_PRECISE2:
- return ptp_sys_offset_precise(ptp, argptr);
+ return ptp_sys_offset_precise(ptp, argptr,
+ ptp->info->getcrosststamp);
case PTP_SYS_OFFSET_EXTENDED:
case PTP_SYS_OFFSET_EXTENDED2:
- return ptp_sys_offset_extended(ptp, argptr);
+ return ptp_sys_offset_extended(ptp, argptr,
+ ptp->info->gettimex64);
case PTP_SYS_OFFSET:
case PTP_SYS_OFFSET2:
@@ -523,6 +560,13 @@ long ptp_ioctl(struct posix_clock_context *pccontext, unsigned int cmd,
case PTP_MASK_EN_SINGLE:
return ptp_mask_en_single(pccontext->private_clkdata, argptr);
+ case PTP_SYS_OFFSET_PRECISE_CYCLES:
+ return ptp_sys_offset_precise(ptp, argptr,
+ ptp->info->getcrosscycles);
+
+ case PTP_SYS_OFFSET_EXTENDED_CYCLES:
+ return ptp_sys_offset_extended(ptp, argptr,
+ ptp->info->getcyclesx64);
default:
return -ENOTTY;
}
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index 1cc06b7cb17e..ef020599b771 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/posix-clock.h>
#include <linux/pps_kernel.h>
+#include <linux/property.h>
#include <linux/slab.h>
#include <linux/syscalls.h>
#include <linux/uaccess.h>
@@ -100,6 +101,9 @@ static int ptp_clock_settime(struct posix_clock *pc, const struct timespec64 *tp
return -EBUSY;
}
+ if (!timespec64_valid_settod(tp))
+ return -EINVAL;
+
return ptp->info->settime64(ptp->info, tp);
}
@@ -130,7 +134,7 @@ static int ptp_clock_adjtime(struct posix_clock *pc, struct __kernel_timex *tx)
ops = ptp->info;
if (tx->modes & ADJ_SETOFFSET) {
- struct timespec64 ts;
+ struct timespec64 ts, ts2;
ktime_t kt;
s64 delta;
@@ -143,6 +147,14 @@ static int ptp_clock_adjtime(struct posix_clock *pc, struct __kernel_timex *tx)
if ((unsigned long) ts.tv_nsec >= NSEC_PER_SEC)
return -EINVAL;
+ /* Make sure the offset is valid */
+ err = ptp_clock_gettime(pc, &ts2);
+ if (err)
+ return err;
+ ts2 = timespec64_add(ts2, ts);
+ if (!timespec64_valid_settod(&ts2))
+ return -EINVAL;
+
kt = timespec64_to_ktime(ts);
delta = ktime_to_ns(kt);
err = ops->adjtime(ops, delta);
@@ -236,6 +248,69 @@ static void ptp_aux_kworker(struct kthread_work *work)
kthread_queue_delayed_work(ptp->kworker, &ptp->aux_work, delay);
}
+static ssize_t ptp_n_perout_loopback_read(struct file *filep,
+ char __user *buffer,
+ size_t count, loff_t *pos)
+{
+ struct ptp_clock *ptp = filep->private_data;
+ char buf[12] = {};
+
+ snprintf(buf, sizeof(buf), "%d\n", ptp->info->n_per_lp);
+
+ return simple_read_from_buffer(buffer, count, pos, buf, strlen(buf));
+}
+
+static const struct file_operations ptp_n_perout_loopback_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = ptp_n_perout_loopback_read,
+};
+
+static ssize_t ptp_perout_loopback_write(struct file *filep,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct ptp_clock *ptp = filep->private_data;
+ struct ptp_clock_info *ops = ptp->info;
+ unsigned int index, enable;
+ int len, cnt, err;
+ char buf[32] = {};
+
+ if (*ppos || !count)
+ return -EINVAL;
+
+ if (count >= sizeof(buf))
+ return -ENOSPC;
+
+ len = simple_write_to_buffer(buf, sizeof(buf) - 1,
+ ppos, buffer, count);
+ if (len < 0)
+ return len;
+
+ buf[len] = '\0';
+ cnt = sscanf(buf, "%u %u", &index, &enable);
+ if (cnt != 2)
+ return -EINVAL;
+
+ if (index >= ops->n_per_lp)
+ return -EINVAL;
+
+ if (enable != 0 && enable != 1)
+ return -EINVAL;
+
+ err = ops->perout_loopback(ops, index, enable);
+ if (err)
+ return err;
+
+ return count;
+}
+
+static const struct file_operations ptp_perout_loopback_ops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .write = ptp_perout_loopback_write,
+};
+
/* public interface */
struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
@@ -377,6 +452,12 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
/* Debugfs initialization */
snprintf(debugfsname, sizeof(debugfsname), "ptp%d", ptp->index);
ptp->debugfs_root = debugfs_create_dir(debugfsname, NULL);
+ if (info->n_per_lp > 0 && info->perout_loopback) {
+ debugfs_create_file("n_perout_loopback", 0400, ptp->debugfs_root,
+ ptp, &ptp_n_perout_loopback_fops);
+ debugfs_create_file("perout_loopback", 0200, ptp->debugfs_root,
+ ptp, &ptp_perout_loopback_ops);
+ }
return ptp;
@@ -417,9 +498,21 @@ int ptp_clock_unregister(struct ptp_clock *ptp)
device_for_each_child(&ptp->dev, NULL, unregister_vclock);
}
+ /* Get the device to stop posix_clock_unregister() doing the last put
+ * and freeing the structure(s)
+ */
+ get_device(&ptp->dev);
+
+ /* Wake up any userspace waiting for an event. */
ptp->defunct = 1;
wake_up_interruptible(&ptp->tsev_wq);
+ /* Tear down the POSIX clock, which removes the user interface. */
+ posix_clock_unregister(&ptp->clock);
+
+ /* Disable all sources of event generation. */
+ ptp_disable_all_events(ptp);
+
if (ptp->kworker) {
kthread_cancel_delayed_work_sync(&ptp->aux_work);
kthread_destroy_worker(ptp->kworker);
@@ -429,7 +522,8 @@ int ptp_clock_unregister(struct ptp_clock *ptp)
if (ptp->pps_source)
pps_unregister_source(ptp->pps_source);
- posix_clock_unregister(&ptp->clock);
+ /* The final put, normally here, will invoke ptp_clock_release(). */
+ put_device(&ptp->dev);
return 0;
}
@@ -477,6 +571,58 @@ int ptp_clock_index(struct ptp_clock *ptp)
}
EXPORT_SYMBOL(ptp_clock_index);
+static int ptp_clock_of_node_match(struct device *dev, const void *data)
+{
+ const struct device_node *parent_np = data;
+
+ return (dev->parent && dev_of_node(dev->parent) == parent_np);
+}
+
+int ptp_clock_index_by_of_node(struct device_node *np)
+{
+ struct ptp_clock *ptp;
+ struct device *dev;
+ int phc_index;
+
+ dev = class_find_device(&ptp_class, NULL, np,
+ ptp_clock_of_node_match);
+ if (!dev)
+ return -1;
+
+ ptp = dev_get_drvdata(dev);
+ phc_index = ptp_clock_index(ptp);
+ put_device(dev);
+
+ return phc_index;
+}
+EXPORT_SYMBOL_GPL(ptp_clock_index_by_of_node);
+
+static int ptp_clock_dev_match(struct device *dev, const void *data)
+{
+ const struct device *parent = data;
+
+ return dev->parent == parent;
+}
+
+int ptp_clock_index_by_dev(struct device *parent)
+{
+ struct ptp_clock *ptp;
+ struct device *dev;
+ int phc_index;
+
+ dev = class_find_device(&ptp_class, NULL, parent,
+ ptp_clock_dev_match);
+ if (!dev)
+ return -1;
+
+ ptp = dev_get_drvdata(dev);
+ phc_index = ptp_clock_index(ptp);
+ put_device(dev);
+
+ return phc_index;
+}
+EXPORT_SYMBOL_GPL(ptp_clock_index_by_dev);
+
int ptp_find_pin(struct ptp_clock *ptp,
enum ptp_pin_function func, unsigned int chan)
{
diff --git a/drivers/ptp/ptp_clockmatrix.c b/drivers/ptp/ptp_clockmatrix.c
index b8d4df8c6da2..59cd6bbb33f3 100644
--- a/drivers/ptp/ptp_clockmatrix.c
+++ b/drivers/ptp/ptp_clockmatrix.c
@@ -1161,7 +1161,7 @@ static int set_pll_output_mask(struct idtcm *idtcm, u16 addr, u8 val)
SET_U16_MSB(idtcm->channel[3].output_mask, val);
break;
default:
- err = -EFAULT; /* Bad address */;
+ err = -EFAULT; /* Bad address */
break;
}
diff --git a/drivers/ptp/ptp_netc.c b/drivers/ptp/ptp_netc.c
new file mode 100644
index 000000000000..94e952ee6990
--- /dev/null
+++ b/drivers/ptp/ptp_netc.c
@@ -0,0 +1,1043 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * NXP NETC V4 Timer driver
+ * Copyright 2025 NXP
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/fsl/netc_global.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/pci.h>
+#include <linux/ptp_clock_kernel.h>
+
+#define NETC_TMR_PCI_VENDOR_NXP 0x1131
+
+#define NETC_TMR_CTRL 0x0080
+#define TMR_CTRL_CK_SEL GENMASK(1, 0)
+#define TMR_CTRL_TE BIT(2)
+#define TMR_ETEP(i) BIT(8 + (i))
+#define TMR_COMP_MODE BIT(15)
+#define TMR_CTRL_TCLK_PERIOD GENMASK(25, 16)
+#define TMR_CTRL_PPL(i) BIT(27 - (i))
+#define TMR_CTRL_FS BIT(28)
+
+#define NETC_TMR_TEVENT 0x0084
+#define TMR_TEVNET_PPEN(i) BIT(7 - (i))
+#define TMR_TEVENT_PPEN_ALL GENMASK(7, 5)
+#define TMR_TEVENT_ALMEN(i) BIT(16 + (i))
+#define TMR_TEVENT_ETS_THREN(i) BIT(20 + (i))
+#define TMR_TEVENT_ETSEN(i) BIT(24 + (i))
+#define TMR_TEVENT_ETS_OVEN(i) BIT(28 + (i))
+#define TMR_TEVENT_ETS(i) (TMR_TEVENT_ETS_THREN(i) | \
+ TMR_TEVENT_ETSEN(i) | \
+ TMR_TEVENT_ETS_OVEN(i))
+
+#define NETC_TMR_TEMASK 0x0088
+#define NETC_TMR_STAT 0x0094
+#define TMR_STAT_ETS_VLD(i) BIT(24 + (i))
+
+#define NETC_TMR_CNT_L 0x0098
+#define NETC_TMR_CNT_H 0x009c
+#define NETC_TMR_ADD 0x00a0
+#define NETC_TMR_PRSC 0x00a8
+#define NETC_TMR_ECTRL 0x00ac
+#define NETC_TMR_OFF_L 0x00b0
+#define NETC_TMR_OFF_H 0x00b4
+
+/* i = 0, 1, i indicates the index of TMR_ALARM */
+#define NETC_TMR_ALARM_L(i) (0x00b8 + (i) * 8)
+#define NETC_TMR_ALARM_H(i) (0x00bc + (i) * 8)
+
+/* i = 0, 1, 2. i indicates the index of TMR_FIPER. */
+#define NETC_TMR_FIPER(i) (0x00d0 + (i) * 4)
+
+#define NETC_TMR_FIPER_CTRL 0x00dc
+#define FIPER_CTRL_DIS(i) (BIT(7) << (i) * 8)
+#define FIPER_CTRL_PG(i) (BIT(6) << (i) * 8)
+#define FIPER_CTRL_FS_ALARM(i) (BIT(5) << (i) * 8)
+#define FIPER_CTRL_PW(i) (GENMASK(4, 0) << (i) * 8)
+#define FIPER_CTRL_SET_PW(i, v) (((v) & GENMASK(4, 0)) << 8 * (i))
+
+/* i = 0, 1, i indicates the index of TMR_ETTS */
+#define NETC_TMR_ETTS_L(i) (0x00e0 + (i) * 8)
+#define NETC_TMR_ETTS_H(i) (0x00e4 + (i) * 8)
+#define NETC_TMR_CUR_TIME_L 0x00f0
+#define NETC_TMR_CUR_TIME_H 0x00f4
+
+#define NETC_TMR_REGS_BAR 0
+#define NETC_GLOBAL_OFFSET 0x10000
+#define NETC_GLOBAL_IPBRR0 0xbf8
+#define IPBRR0_IP_REV GENMASK(15, 0)
+#define NETC_REV_4_1 0x0401
+
+#define NETC_TMR_FIPER_NUM 3
+#define NETC_TMR_INVALID_CHANNEL NETC_TMR_FIPER_NUM
+#define NETC_TMR_DEFAULT_PRSC 2
+#define NETC_TMR_DEFAULT_ALARM GENMASK_ULL(63, 0)
+#define NETC_TMR_DEFAULT_FIPER GENMASK(31, 0)
+#define NETC_TMR_FIPER_MAX_PW GENMASK(4, 0)
+#define NETC_TMR_ALARM_NUM 2
+#define NETC_TMR_DEFAULT_ETTF_THR 7
+
+/* 1588 timer reference clock source select */
+#define NETC_TMR_CCM_TIMER1 0 /* enet_timer1_clk_root, from CCM */
+#define NETC_TMR_SYSTEM_CLK 1 /* enet_clk_root/2, from CCM */
+#define NETC_TMR_EXT_OSC 2 /* tmr_1588_clk, from IO pins */
+
+#define NETC_TMR_SYSCLK_333M 333333333U
+
+enum netc_pp_type {
+ NETC_PP_PPS = 1,
+ NETC_PP_PEROUT,
+};
+
+struct netc_pp {
+ enum netc_pp_type type;
+ bool enabled;
+ int alarm_id;
+ u32 period; /* pulse period, ns */
+ u64 stime; /* start time, ns */
+};
+
+struct netc_timer {
+ void __iomem *base;
+ struct pci_dev *pdev;
+ spinlock_t lock; /* Prevent concurrent access to registers */
+
+ struct ptp_clock *clock;
+ struct ptp_clock_info caps;
+ u32 clk_select;
+ u32 clk_freq;
+ u32 oclk_prsc;
+ /* High 32-bit is integer part, low 32-bit is fractional part */
+ u64 period;
+
+ int irq;
+ char irq_name[24];
+ int revision;
+ u32 tmr_emask;
+ u8 pps_channel;
+ u8 fs_alarm_num;
+ u8 fs_alarm_bitmap;
+ struct netc_pp pp[NETC_TMR_FIPER_NUM]; /* periodic pulse */
+};
+
+#define netc_timer_rd(p, o) netc_read((p)->base + (o))
+#define netc_timer_wr(p, o, v) netc_write((p)->base + (o), v)
+#define ptp_to_netc_timer(ptp) container_of((ptp), struct netc_timer, caps)
+
+static const char *const timer_clk_src[] = {
+ "ccm",
+ "ext"
+};
+
+static void netc_timer_cnt_write(struct netc_timer *priv, u64 ns)
+{
+ u32 tmr_cnt_h = upper_32_bits(ns);
+ u32 tmr_cnt_l = lower_32_bits(ns);
+
+ /* Writes to the TMR_CNT_L register copies the written value
+ * into the shadow TMR_CNT_L register. Writes to the TMR_CNT_H
+ * register copies the values written into the shadow TMR_CNT_H
+ * register. Contents of the shadow registers are copied into
+ * the TMR_CNT_L and TMR_CNT_H registers following a write into
+ * the TMR_CNT_H register. So the user must writes to TMR_CNT_L
+ * register first. Other H/L registers should have the same
+ * behavior.
+ */
+ netc_timer_wr(priv, NETC_TMR_CNT_L, tmr_cnt_l);
+ netc_timer_wr(priv, NETC_TMR_CNT_H, tmr_cnt_h);
+}
+
+static u64 netc_timer_offset_read(struct netc_timer *priv)
+{
+ u32 tmr_off_l, tmr_off_h;
+ u64 offset;
+
+ tmr_off_l = netc_timer_rd(priv, NETC_TMR_OFF_L);
+ tmr_off_h = netc_timer_rd(priv, NETC_TMR_OFF_H);
+ offset = (((u64)tmr_off_h) << 32) | tmr_off_l;
+
+ return offset;
+}
+
+static void netc_timer_offset_write(struct netc_timer *priv, u64 offset)
+{
+ u32 tmr_off_h = upper_32_bits(offset);
+ u32 tmr_off_l = lower_32_bits(offset);
+
+ netc_timer_wr(priv, NETC_TMR_OFF_L, tmr_off_l);
+ netc_timer_wr(priv, NETC_TMR_OFF_H, tmr_off_h);
+}
+
+static u64 netc_timer_cur_time_read(struct netc_timer *priv)
+{
+ u32 time_h, time_l;
+ u64 ns;
+
+ /* The user should read NETC_TMR_CUR_TIME_L first to
+ * get correct current time.
+ */
+ time_l = netc_timer_rd(priv, NETC_TMR_CUR_TIME_L);
+ time_h = netc_timer_rd(priv, NETC_TMR_CUR_TIME_H);
+ ns = (u64)time_h << 32 | time_l;
+
+ return ns;
+}
+
+static void netc_timer_alarm_write(struct netc_timer *priv,
+ u64 alarm, int index)
+{
+ u32 alarm_h = upper_32_bits(alarm);
+ u32 alarm_l = lower_32_bits(alarm);
+
+ netc_timer_wr(priv, NETC_TMR_ALARM_L(index), alarm_l);
+ netc_timer_wr(priv, NETC_TMR_ALARM_H(index), alarm_h);
+}
+
+static u32 netc_timer_get_integral_period(struct netc_timer *priv)
+{
+ u32 tmr_ctrl, integral_period;
+
+ tmr_ctrl = netc_timer_rd(priv, NETC_TMR_CTRL);
+ integral_period = FIELD_GET(TMR_CTRL_TCLK_PERIOD, tmr_ctrl);
+
+ return integral_period;
+}
+
+static u32 netc_timer_calculate_fiper_pw(struct netc_timer *priv,
+ u32 fiper)
+{
+ u64 divisor, pulse_width;
+
+ /* Set the FIPER pulse width to half FIPER interval by default.
+ * pulse_width = (fiper / 2) / TMR_GCLK_period,
+ * TMR_GCLK_period = NSEC_PER_SEC / TMR_GCLK_freq,
+ * TMR_GCLK_freq = (clk_freq / oclk_prsc) Hz,
+ * so pulse_width = fiper * clk_freq / (2 * NSEC_PER_SEC * oclk_prsc).
+ */
+ divisor = mul_u32_u32(2 * NSEC_PER_SEC, priv->oclk_prsc);
+ pulse_width = div64_u64(mul_u32_u32(fiper, priv->clk_freq), divisor);
+
+ /* The FIPER_PW field only has 5 bits, need to update oclk_prsc */
+ if (pulse_width > NETC_TMR_FIPER_MAX_PW)
+ pulse_width = NETC_TMR_FIPER_MAX_PW;
+
+ return pulse_width;
+}
+
+static void netc_timer_set_pps_alarm(struct netc_timer *priv, int channel,
+ u32 integral_period)
+{
+ struct netc_pp *pp = &priv->pp[channel];
+ u64 alarm;
+
+ /* Get the alarm value */
+ alarm = netc_timer_cur_time_read(priv) + NSEC_PER_MSEC;
+ alarm = roundup_u64(alarm, NSEC_PER_SEC);
+ alarm = roundup_u64(alarm, integral_period);
+
+ netc_timer_alarm_write(priv, alarm, pp->alarm_id);
+}
+
+static void netc_timer_set_perout_alarm(struct netc_timer *priv, int channel,
+ u32 integral_period)
+{
+ u64 cur_time = netc_timer_cur_time_read(priv);
+ struct netc_pp *pp = &priv->pp[channel];
+ u64 alarm, delta, min_time;
+ u32 period = pp->period;
+ u64 stime = pp->stime;
+
+ min_time = cur_time + NSEC_PER_MSEC + period;
+ if (stime < min_time) {
+ delta = min_time - stime;
+ stime += roundup_u64(delta, period);
+ }
+
+ alarm = roundup_u64(stime - period, integral_period);
+ netc_timer_alarm_write(priv, alarm, pp->alarm_id);
+}
+
+static int netc_timer_get_alarm_id(struct netc_timer *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->fs_alarm_num; i++) {
+ if (!(priv->fs_alarm_bitmap & BIT(i))) {
+ priv->fs_alarm_bitmap |= BIT(i);
+ break;
+ }
+ }
+
+ return i;
+}
+
+static u64 netc_timer_get_gclk_period(struct netc_timer *priv)
+{
+ /* TMR_GCLK_freq = (clk_freq / oclk_prsc) Hz.
+ * TMR_GCLK_period = NSEC_PER_SEC / TMR_GCLK_freq.
+ * TMR_GCLK_period = (NSEC_PER_SEC * oclk_prsc) / clk_freq
+ */
+
+ return div_u64(mul_u32_u32(NSEC_PER_SEC, priv->oclk_prsc),
+ priv->clk_freq);
+}
+
+static void netc_timer_enable_periodic_pulse(struct netc_timer *priv,
+ u8 channel)
+{
+ u32 fiper_pw, fiper, fiper_ctrl, integral_period;
+ struct netc_pp *pp = &priv->pp[channel];
+ int alarm_id = pp->alarm_id;
+
+ integral_period = netc_timer_get_integral_period(priv);
+ /* Set to desired FIPER interval in ns - TCLK_PERIOD */
+ fiper = pp->period - integral_period;
+ fiper_pw = netc_timer_calculate_fiper_pw(priv, fiper);
+
+ fiper_ctrl = netc_timer_rd(priv, NETC_TMR_FIPER_CTRL);
+ fiper_ctrl &= ~(FIPER_CTRL_DIS(channel) | FIPER_CTRL_PW(channel) |
+ FIPER_CTRL_FS_ALARM(channel));
+ fiper_ctrl |= FIPER_CTRL_SET_PW(channel, fiper_pw);
+ fiper_ctrl |= alarm_id ? FIPER_CTRL_FS_ALARM(channel) : 0;
+
+ priv->tmr_emask |= TMR_TEVENT_ALMEN(alarm_id);
+
+ if (pp->type == NETC_PP_PPS) {
+ priv->tmr_emask |= TMR_TEVNET_PPEN(channel);
+ netc_timer_set_pps_alarm(priv, channel, integral_period);
+ } else {
+ netc_timer_set_perout_alarm(priv, channel, integral_period);
+ }
+
+ netc_timer_wr(priv, NETC_TMR_TEMASK, priv->tmr_emask);
+ netc_timer_wr(priv, NETC_TMR_FIPER(channel), fiper);
+ netc_timer_wr(priv, NETC_TMR_FIPER_CTRL, fiper_ctrl);
+}
+
+static void netc_timer_disable_periodic_pulse(struct netc_timer *priv,
+ u8 channel)
+{
+ struct netc_pp *pp = &priv->pp[channel];
+ int alarm_id = pp->alarm_id;
+ u32 fiper_ctrl;
+
+ if (!pp->enabled)
+ return;
+
+ priv->tmr_emask &= ~(TMR_TEVNET_PPEN(channel) |
+ TMR_TEVENT_ALMEN(alarm_id));
+
+ fiper_ctrl = netc_timer_rd(priv, NETC_TMR_FIPER_CTRL);
+ fiper_ctrl |= FIPER_CTRL_DIS(channel);
+
+ netc_timer_alarm_write(priv, NETC_TMR_DEFAULT_ALARM, alarm_id);
+ netc_timer_wr(priv, NETC_TMR_TEMASK, priv->tmr_emask);
+ netc_timer_wr(priv, NETC_TMR_FIPER(channel), NETC_TMR_DEFAULT_FIPER);
+ netc_timer_wr(priv, NETC_TMR_FIPER_CTRL, fiper_ctrl);
+}
+
+static u8 netc_timer_select_pps_channel(struct netc_timer *priv)
+{
+ int i;
+
+ for (i = 0; i < NETC_TMR_FIPER_NUM; i++) {
+ if (!priv->pp[i].enabled)
+ return i;
+ }
+
+ return NETC_TMR_INVALID_CHANNEL;
+}
+
+/* Note that users should not use this API to output PPS signal on
+ * external pins, because PTP_CLK_REQ_PPS trigger internal PPS event
+ * for input into kernel PPS subsystem. See:
+ * https://lore.kernel.org/r/20201117213826.18235-1-a.fatoum@pengutronix.de
+ */
+static int netc_timer_enable_pps(struct netc_timer *priv,
+ struct ptp_clock_request *rq, int on)
+{
+ struct device *dev = &priv->pdev->dev;
+ unsigned long flags;
+ struct netc_pp *pp;
+ int err = 0;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ if (on) {
+ int alarm_id;
+ u8 channel;
+
+ if (priv->pps_channel < NETC_TMR_FIPER_NUM) {
+ channel = priv->pps_channel;
+ } else {
+ channel = netc_timer_select_pps_channel(priv);
+ if (channel == NETC_TMR_INVALID_CHANNEL) {
+ dev_err(dev, "No available FIPERs\n");
+ err = -EBUSY;
+ goto unlock_spinlock;
+ }
+ }
+
+ pp = &priv->pp[channel];
+ if (pp->enabled)
+ goto unlock_spinlock;
+
+ alarm_id = netc_timer_get_alarm_id(priv);
+ if (alarm_id == priv->fs_alarm_num) {
+ dev_err(dev, "No available ALARMs\n");
+ err = -EBUSY;
+ goto unlock_spinlock;
+ }
+
+ pp->enabled = true;
+ pp->type = NETC_PP_PPS;
+ pp->alarm_id = alarm_id;
+ pp->period = NSEC_PER_SEC;
+ priv->pps_channel = channel;
+
+ netc_timer_enable_periodic_pulse(priv, channel);
+ } else {
+ /* pps_channel is invalid if PPS is not enabled, so no
+ * processing is needed.
+ */
+ if (priv->pps_channel >= NETC_TMR_FIPER_NUM)
+ goto unlock_spinlock;
+
+ netc_timer_disable_periodic_pulse(priv, priv->pps_channel);
+ pp = &priv->pp[priv->pps_channel];
+ priv->fs_alarm_bitmap &= ~BIT(pp->alarm_id);
+ memset(pp, 0, sizeof(*pp));
+ priv->pps_channel = NETC_TMR_INVALID_CHANNEL;
+ }
+
+unlock_spinlock:
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return err;
+}
+
+static int net_timer_enable_perout(struct netc_timer *priv,
+ struct ptp_clock_request *rq, int on)
+{
+ struct device *dev = &priv->pdev->dev;
+ u32 channel = rq->perout.index;
+ unsigned long flags;
+ struct netc_pp *pp;
+ int err = 0;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ pp = &priv->pp[channel];
+ if (pp->type == NETC_PP_PPS) {
+ dev_err(dev, "FIPER%u is being used for PPS\n", channel);
+ err = -EBUSY;
+ goto unlock_spinlock;
+ }
+
+ if (on) {
+ u64 period_ns, gclk_period, max_period, min_period;
+ struct timespec64 period, stime;
+ u32 integral_period;
+ int alarm_id;
+
+ period.tv_sec = rq->perout.period.sec;
+ period.tv_nsec = rq->perout.period.nsec;
+ period_ns = timespec64_to_ns(&period);
+
+ integral_period = netc_timer_get_integral_period(priv);
+ max_period = (u64)NETC_TMR_DEFAULT_FIPER + integral_period;
+ gclk_period = netc_timer_get_gclk_period(priv);
+ min_period = gclk_period * 4 + integral_period;
+ if (period_ns > max_period || period_ns < min_period) {
+ dev_err(dev, "The period range is %llu ~ %llu\n",
+ min_period, max_period);
+ err = -EINVAL;
+ goto unlock_spinlock;
+ }
+
+ if (pp->enabled) {
+ alarm_id = pp->alarm_id;
+ } else {
+ alarm_id = netc_timer_get_alarm_id(priv);
+ if (alarm_id == priv->fs_alarm_num) {
+ dev_err(dev, "No available ALARMs\n");
+ err = -EBUSY;
+ goto unlock_spinlock;
+ }
+
+ pp->type = NETC_PP_PEROUT;
+ pp->enabled = true;
+ pp->alarm_id = alarm_id;
+ }
+
+ stime.tv_sec = rq->perout.start.sec;
+ stime.tv_nsec = rq->perout.start.nsec;
+ pp->stime = timespec64_to_ns(&stime);
+ pp->period = period_ns;
+
+ netc_timer_enable_periodic_pulse(priv, channel);
+ } else {
+ netc_timer_disable_periodic_pulse(priv, channel);
+ priv->fs_alarm_bitmap &= ~BIT(pp->alarm_id);
+ memset(pp, 0, sizeof(*pp));
+ }
+
+unlock_spinlock:
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return err;
+}
+
+static void netc_timer_handle_etts_event(struct netc_timer *priv, int index,
+ bool update_event)
+{
+ struct ptp_clock_event event;
+ u32 etts_l = 0, etts_h = 0;
+
+ while (netc_timer_rd(priv, NETC_TMR_STAT) & TMR_STAT_ETS_VLD(index)) {
+ etts_l = netc_timer_rd(priv, NETC_TMR_ETTS_L(index));
+ etts_h = netc_timer_rd(priv, NETC_TMR_ETTS_H(index));
+ }
+
+ /* Invalid time stamp */
+ if (!etts_l && !etts_h)
+ return;
+
+ if (update_event) {
+ event.type = PTP_CLOCK_EXTTS;
+ event.index = index;
+ event.timestamp = (u64)etts_h << 32;
+ event.timestamp |= etts_l;
+ ptp_clock_event(priv->clock, &event);
+ }
+}
+
+static int netc_timer_enable_extts(struct netc_timer *priv,
+ struct ptp_clock_request *rq, int on)
+{
+ int index = rq->extts.index;
+ unsigned long flags;
+ u32 tmr_ctrl;
+
+ /* Reject requests to enable time stamping on both edges */
+ if ((rq->extts.flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES)
+ return -EOPNOTSUPP;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ netc_timer_handle_etts_event(priv, rq->extts.index, false);
+ if (on) {
+ tmr_ctrl = netc_timer_rd(priv, NETC_TMR_CTRL);
+ if (rq->extts.flags & PTP_FALLING_EDGE)
+ tmr_ctrl |= TMR_ETEP(index);
+ else
+ tmr_ctrl &= ~TMR_ETEP(index);
+
+ netc_timer_wr(priv, NETC_TMR_CTRL, tmr_ctrl);
+ priv->tmr_emask |= TMR_TEVENT_ETS(index);
+ } else {
+ priv->tmr_emask &= ~TMR_TEVENT_ETS(index);
+ }
+
+ netc_timer_wr(priv, NETC_TMR_TEMASK, priv->tmr_emask);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+static void netc_timer_disable_fiper(struct netc_timer *priv)
+{
+ u32 fiper_ctrl = netc_timer_rd(priv, NETC_TMR_FIPER_CTRL);
+ int i;
+
+ for (i = 0; i < NETC_TMR_FIPER_NUM; i++) {
+ if (!priv->pp[i].enabled)
+ continue;
+
+ fiper_ctrl |= FIPER_CTRL_DIS(i);
+ netc_timer_wr(priv, NETC_TMR_FIPER(i), NETC_TMR_DEFAULT_FIPER);
+ }
+
+ netc_timer_wr(priv, NETC_TMR_FIPER_CTRL, fiper_ctrl);
+}
+
+static void netc_timer_enable_fiper(struct netc_timer *priv)
+{
+ u32 integral_period = netc_timer_get_integral_period(priv);
+ u32 fiper_ctrl = netc_timer_rd(priv, NETC_TMR_FIPER_CTRL);
+ int i;
+
+ for (i = 0; i < NETC_TMR_FIPER_NUM; i++) {
+ struct netc_pp *pp = &priv->pp[i];
+ u32 fiper;
+
+ if (!pp->enabled)
+ continue;
+
+ fiper_ctrl &= ~FIPER_CTRL_DIS(i);
+
+ if (pp->type == NETC_PP_PPS)
+ netc_timer_set_pps_alarm(priv, i, integral_period);
+ else if (pp->type == NETC_PP_PEROUT)
+ netc_timer_set_perout_alarm(priv, i, integral_period);
+
+ fiper = pp->period - integral_period;
+ netc_timer_wr(priv, NETC_TMR_FIPER(i), fiper);
+ }
+
+ netc_timer_wr(priv, NETC_TMR_FIPER_CTRL, fiper_ctrl);
+}
+
+static int netc_timer_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct netc_timer *priv = ptp_to_netc_timer(ptp);
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_PPS:
+ return netc_timer_enable_pps(priv, rq, on);
+ case PTP_CLK_REQ_PEROUT:
+ return net_timer_enable_perout(priv, rq, on);
+ case PTP_CLK_REQ_EXTTS:
+ return netc_timer_enable_extts(priv, rq, on);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int netc_timer_perout_loopback(struct ptp_clock_info *ptp,
+ unsigned int index, int on)
+{
+ struct netc_timer *priv = ptp_to_netc_timer(ptp);
+ unsigned long flags;
+ u32 tmr_ctrl;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ tmr_ctrl = netc_timer_rd(priv, NETC_TMR_CTRL);
+ if (on)
+ tmr_ctrl |= TMR_CTRL_PPL(index);
+ else
+ tmr_ctrl &= ~TMR_CTRL_PPL(index);
+
+ netc_timer_wr(priv, NETC_TMR_CTRL, tmr_ctrl);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+static void netc_timer_adjust_period(struct netc_timer *priv, u64 period)
+{
+ u32 fractional_period = lower_32_bits(period);
+ u32 integral_period = upper_32_bits(period);
+ u32 tmr_ctrl, old_tmr_ctrl;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ old_tmr_ctrl = netc_timer_rd(priv, NETC_TMR_CTRL);
+ tmr_ctrl = u32_replace_bits(old_tmr_ctrl, integral_period,
+ TMR_CTRL_TCLK_PERIOD);
+ if (tmr_ctrl != old_tmr_ctrl) {
+ netc_timer_disable_fiper(priv);
+ netc_timer_wr(priv, NETC_TMR_CTRL, tmr_ctrl);
+ netc_timer_enable_fiper(priv);
+ }
+
+ netc_timer_wr(priv, NETC_TMR_ADD, fractional_period);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static int netc_timer_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct netc_timer *priv = ptp_to_netc_timer(ptp);
+ u64 new_period;
+
+ new_period = adjust_by_scaled_ppm(priv->period, scaled_ppm);
+ netc_timer_adjust_period(priv, new_period);
+
+ return 0;
+}
+
+static int netc_timer_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct netc_timer *priv = ptp_to_netc_timer(ptp);
+ unsigned long flags;
+ s64 tmr_off;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ netc_timer_disable_fiper(priv);
+
+ /* Adjusting TMROFF instead of TMR_CNT is that the timer
+ * counter keeps increasing during reading and writing
+ * TMR_CNT, which will cause latency.
+ */
+ tmr_off = netc_timer_offset_read(priv);
+ tmr_off += delta;
+ netc_timer_offset_write(priv, tmr_off);
+
+ netc_timer_enable_fiper(priv);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+static int netc_timer_gettimex64(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct netc_timer *priv = ptp_to_netc_timer(ptp);
+ unsigned long flags;
+ u64 ns;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ ptp_read_system_prets(sts);
+ ns = netc_timer_cur_time_read(priv);
+ ptp_read_system_postts(sts);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ *ts = ns_to_timespec64(ns);
+
+ return 0;
+}
+
+static int netc_timer_settime64(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct netc_timer *priv = ptp_to_netc_timer(ptp);
+ u64 ns = timespec64_to_ns(ts);
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ netc_timer_disable_fiper(priv);
+ netc_timer_offset_write(priv, 0);
+ netc_timer_cnt_write(priv, ns);
+ netc_timer_enable_fiper(priv);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+static const struct ptp_clock_info netc_timer_ptp_caps = {
+ .owner = THIS_MODULE,
+ .name = "NETC Timer PTP clock",
+ .max_adj = 500000000,
+ .n_pins = 0,
+ .n_alarm = 2,
+ .pps = 1,
+ .n_per_out = 3,
+ .n_ext_ts = 2,
+ .n_per_lp = 2,
+ .supported_extts_flags = PTP_RISING_EDGE | PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS,
+ .adjfine = netc_timer_adjfine,
+ .adjtime = netc_timer_adjtime,
+ .gettimex64 = netc_timer_gettimex64,
+ .settime64 = netc_timer_settime64,
+ .enable = netc_timer_enable,
+ .perout_loopback = netc_timer_perout_loopback,
+};
+
+static void netc_timer_init(struct netc_timer *priv)
+{
+ u32 fractional_period = lower_32_bits(priv->period);
+ u32 integral_period = upper_32_bits(priv->period);
+ u32 tmr_ctrl, fiper_ctrl;
+ struct timespec64 now;
+ u64 ns;
+ int i;
+
+ /* Software must enable timer first and the clock selected must be
+ * active, otherwise, the registers which are in the timer clock
+ * domain are not accessible.
+ */
+ tmr_ctrl = FIELD_PREP(TMR_CTRL_CK_SEL, priv->clk_select) |
+ TMR_CTRL_TE | TMR_CTRL_FS;
+ netc_timer_wr(priv, NETC_TMR_CTRL, tmr_ctrl);
+ netc_timer_wr(priv, NETC_TMR_PRSC, priv->oclk_prsc);
+
+ /* Disable FIPER by default */
+ fiper_ctrl = netc_timer_rd(priv, NETC_TMR_FIPER_CTRL);
+ for (i = 0; i < NETC_TMR_FIPER_NUM; i++) {
+ fiper_ctrl |= FIPER_CTRL_DIS(i);
+ fiper_ctrl &= ~FIPER_CTRL_PG(i);
+ }
+ netc_timer_wr(priv, NETC_TMR_FIPER_CTRL, fiper_ctrl);
+ netc_timer_wr(priv, NETC_TMR_ECTRL, NETC_TMR_DEFAULT_ETTF_THR);
+
+ ktime_get_real_ts64(&now);
+ ns = timespec64_to_ns(&now);
+ netc_timer_cnt_write(priv, ns);
+
+ /* Allow atomic writes to TCLK_PERIOD and TMR_ADD, An update to
+ * TCLK_PERIOD does not take effect until TMR_ADD is written.
+ */
+ tmr_ctrl |= FIELD_PREP(TMR_CTRL_TCLK_PERIOD, integral_period) |
+ TMR_COMP_MODE;
+ netc_timer_wr(priv, NETC_TMR_CTRL, tmr_ctrl);
+ netc_timer_wr(priv, NETC_TMR_ADD, fractional_period);
+}
+
+static int netc_timer_pci_probe(struct pci_dev *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct netc_timer *priv;
+ int err;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ pcie_flr(pdev);
+ err = pci_enable_device_mem(pdev);
+ if (err)
+ return dev_err_probe(dev, err, "Failed to enable device\n");
+
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ err = pci_request_mem_regions(pdev, KBUILD_MODNAME);
+ if (err) {
+ dev_err(dev, "pci_request_regions() failed, err:%pe\n",
+ ERR_PTR(err));
+ goto disable_dev;
+ }
+
+ pci_set_master(pdev);
+
+ priv->pdev = pdev;
+ priv->base = pci_ioremap_bar(pdev, NETC_TMR_REGS_BAR);
+ if (!priv->base) {
+ err = -ENOMEM;
+ goto release_mem_regions;
+ }
+
+ pci_set_drvdata(pdev, priv);
+
+ return 0;
+
+release_mem_regions:
+ pci_release_mem_regions(pdev);
+disable_dev:
+ pci_disable_device(pdev);
+
+ return err;
+}
+
+static void netc_timer_pci_remove(struct pci_dev *pdev)
+{
+ struct netc_timer *priv = pci_get_drvdata(pdev);
+
+ iounmap(priv->base);
+ pci_release_mem_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+static int netc_timer_get_reference_clk_source(struct netc_timer *priv)
+{
+ struct device *dev = &priv->pdev->dev;
+ struct clk *clk;
+ int i;
+
+ /* Select NETC system clock as the reference clock by default */
+ priv->clk_select = NETC_TMR_SYSTEM_CLK;
+ priv->clk_freq = NETC_TMR_SYSCLK_333M;
+
+ /* Update the clock source of the reference clock if the clock
+ * is specified in DT node.
+ */
+ for (i = 0; i < ARRAY_SIZE(timer_clk_src); i++) {
+ clk = devm_clk_get_optional_enabled(dev, timer_clk_src[i]);
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk),
+ "Failed to enable clock\n");
+
+ if (clk) {
+ priv->clk_freq = clk_get_rate(clk);
+ priv->clk_select = i ? NETC_TMR_EXT_OSC :
+ NETC_TMR_CCM_TIMER1;
+ break;
+ }
+ }
+
+ /* The period is a 64-bit number, the high 32-bit is the integer
+ * part of the period, the low 32-bit is the fractional part of
+ * the period. In order to get the desired 32-bit fixed-point
+ * format, multiply the numerator of the fraction by 2^32.
+ */
+ priv->period = div_u64((u64)NSEC_PER_SEC << 32, priv->clk_freq);
+
+ return 0;
+}
+
+static int netc_timer_parse_dt(struct netc_timer *priv)
+{
+ return netc_timer_get_reference_clk_source(priv);
+}
+
+static irqreturn_t netc_timer_isr(int irq, void *data)
+{
+ struct netc_timer *priv = data;
+ struct ptp_clock_event event;
+ u32 tmr_event;
+
+ spin_lock(&priv->lock);
+
+ tmr_event = netc_timer_rd(priv, NETC_TMR_TEVENT);
+ tmr_event &= priv->tmr_emask;
+ /* Clear interrupts status */
+ netc_timer_wr(priv, NETC_TMR_TEVENT, tmr_event);
+
+ if (tmr_event & TMR_TEVENT_ALMEN(0))
+ netc_timer_alarm_write(priv, NETC_TMR_DEFAULT_ALARM, 0);
+
+ if (tmr_event & TMR_TEVENT_ALMEN(1))
+ netc_timer_alarm_write(priv, NETC_TMR_DEFAULT_ALARM, 1);
+
+ if (tmr_event & TMR_TEVENT_PPEN_ALL) {
+ event.type = PTP_CLOCK_PPS;
+ ptp_clock_event(priv->clock, &event);
+ }
+
+ if (tmr_event & TMR_TEVENT_ETS(0))
+ netc_timer_handle_etts_event(priv, 0, true);
+
+ if (tmr_event & TMR_TEVENT_ETS(1))
+ netc_timer_handle_etts_event(priv, 1, true);
+
+ spin_unlock(&priv->lock);
+
+ return IRQ_HANDLED;
+}
+
+static int netc_timer_init_msix_irq(struct netc_timer *priv)
+{
+ struct pci_dev *pdev = priv->pdev;
+ int err, n;
+
+ n = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX);
+ if (n != 1) {
+ err = (n < 0) ? n : -EPERM;
+ dev_err(&pdev->dev, "pci_alloc_irq_vectors() failed\n");
+ return err;
+ }
+
+ priv->irq = pci_irq_vector(pdev, 0);
+ err = request_irq(priv->irq, netc_timer_isr, 0, priv->irq_name, priv);
+ if (err) {
+ dev_err(&pdev->dev, "request_irq() failed\n");
+ pci_free_irq_vectors(pdev);
+
+ return err;
+ }
+
+ return 0;
+}
+
+static void netc_timer_free_msix_irq(struct netc_timer *priv)
+{
+ struct pci_dev *pdev = priv->pdev;
+
+ disable_irq(priv->irq);
+ free_irq(priv->irq, priv);
+ pci_free_irq_vectors(pdev);
+}
+
+static int netc_timer_get_global_ip_rev(struct netc_timer *priv)
+{
+ u32 val;
+
+ val = netc_timer_rd(priv, NETC_GLOBAL_OFFSET + NETC_GLOBAL_IPBRR0);
+
+ return val & IPBRR0_IP_REV;
+}
+
+static int netc_timer_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ struct netc_timer *priv;
+ int err;
+
+ err = netc_timer_pci_probe(pdev);
+ if (err)
+ return err;
+
+ priv = pci_get_drvdata(pdev);
+ priv->revision = netc_timer_get_global_ip_rev(priv);
+ if (priv->revision == NETC_REV_4_1)
+ priv->fs_alarm_num = 1;
+ else
+ priv->fs_alarm_num = NETC_TMR_ALARM_NUM;
+
+ err = netc_timer_parse_dt(priv);
+ if (err)
+ goto timer_pci_remove;
+
+ priv->caps = netc_timer_ptp_caps;
+ priv->oclk_prsc = NETC_TMR_DEFAULT_PRSC;
+ priv->pps_channel = NETC_TMR_INVALID_CHANNEL;
+ spin_lock_init(&priv->lock);
+ snprintf(priv->irq_name, sizeof(priv->irq_name), "ptp-netc %s",
+ pci_name(pdev));
+
+ err = netc_timer_init_msix_irq(priv);
+ if (err)
+ goto timer_pci_remove;
+
+ netc_timer_init(priv);
+ priv->clock = ptp_clock_register(&priv->caps, dev);
+ if (IS_ERR(priv->clock)) {
+ err = PTR_ERR(priv->clock);
+ goto free_msix_irq;
+ }
+
+ return 0;
+
+free_msix_irq:
+ netc_timer_free_msix_irq(priv);
+timer_pci_remove:
+ netc_timer_pci_remove(pdev);
+
+ return err;
+}
+
+static void netc_timer_remove(struct pci_dev *pdev)
+{
+ struct netc_timer *priv = pci_get_drvdata(pdev);
+
+ netc_timer_wr(priv, NETC_TMR_TEMASK, 0);
+ netc_timer_wr(priv, NETC_TMR_CTRL, 0);
+ ptp_clock_unregister(priv->clock);
+ netc_timer_free_msix_irq(priv);
+ netc_timer_pci_remove(pdev);
+}
+
+static const struct pci_device_id netc_timer_id_table[] = {
+ { PCI_DEVICE(NETC_TMR_PCI_VENDOR_NXP, 0xee02) },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, netc_timer_id_table);
+
+static struct pci_driver netc_timer_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = netc_timer_id_table,
+ .probe = netc_timer_probe,
+ .remove = netc_timer_remove,
+};
+module_pci_driver(netc_timer_driver);
+
+MODULE_DESCRIPTION("NXP NETC Timer PTP Driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c
index d39073dc4072..794ec6e71990 100644
--- a/drivers/ptp/ptp_ocp.c
+++ b/drivers/ptp/ptp_ocp.c
@@ -1485,6 +1485,8 @@ static const struct ptp_clock_info ptp_ocp_clock_info = {
.pps = true,
.n_ext_ts = 6,
.n_per_out = 5,
+ .supported_extts_flags = PTP_STRICT_FLAGS | PTP_RISING_EDGE,
+ .supported_perout_flags = PTP_PEROUT_DUTY_CYCLE | PTP_PEROUT_PHASE,
};
static void
@@ -2095,10 +2097,6 @@ ptp_ocp_signal_from_perout(struct ptp_ocp *bp, int gen,
{
struct ptp_ocp_signal s = { };
- if (req->flags & ~(PTP_PEROUT_DUTY_CYCLE |
- PTP_PEROUT_PHASE))
- return -EOPNOTSUPP;
-
s.polarity = bp->signal[gen].polarity;
s.period = ktime_set(req->period.sec, req->period.nsec);
if (!s.period)
@@ -4557,8 +4555,7 @@ ptp_ocp_detach(struct ptp_ocp *bp)
ptp_ocp_debugfs_remove_device(bp);
ptp_ocp_detach_sysfs(bp);
ptp_ocp_attr_group_del(bp);
- if (timer_pending(&bp->watchdog))
- timer_delete_sync(&bp->watchdog);
+ timer_delete_sync(&bp->watchdog);
if (bp->ts0)
ptp_ocp_unregister_ext(bp->ts0);
if (bp->ts1)
diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
index a6aad743c282..db4039d642b4 100644
--- a/drivers/ptp/ptp_private.h
+++ b/drivers/ptp/ptp_private.h
@@ -22,8 +22,14 @@
#define PTP_MAX_TIMESTAMPS 128
#define PTP_BUF_TIMESTAMPS 30
#define PTP_DEFAULT_MAX_VCLOCKS 20
+#define PTP_MAX_VCLOCKS_LIMIT (KMALLOC_MAX_SIZE/(sizeof(int)))
#define PTP_MAX_CHANNELS 2048
+enum {
+ PTP_LOCK_PHYSICAL = 0,
+ PTP_LOCK_VIRTUAL,
+};
+
struct timestamp_event_queue {
struct ptp_extts_event buf[PTP_MAX_TIMESTAMPS];
int head;
@@ -136,6 +142,8 @@ extern const struct class ptp_class;
* see ptp_chardev.c
*/
+void ptp_disable_all_events(struct ptp_clock *ptp);
+
/* caller must hold pincfg_mux */
int ptp_set_pinfunc(struct ptp_clock *ptp, unsigned int pin,
enum ptp_pin_function func, unsigned int chan);
diff --git a/drivers/ptp/ptp_qoriq.c b/drivers/ptp/ptp_qoriq.c
index 4d488c1f1941..8da995e36aeb 100644
--- a/drivers/ptp/ptp_qoriq.c
+++ b/drivers/ptp/ptp_qoriq.c
@@ -465,6 +465,25 @@ static int ptp_qoriq_auto_config(struct ptp_qoriq *ptp_qoriq,
return 0;
}
+static int ptp_qoriq_perout_loopback(struct ptp_clock_info *ptp,
+ unsigned int index, int on)
+{
+ struct ptp_qoriq *ptp_qoriq = container_of(ptp, struct ptp_qoriq, caps);
+ struct ptp_qoriq_registers *regs = &ptp_qoriq->regs;
+ u32 loopback_bit = index ? PP2L : PP1L;
+ u32 tmr_ctrl;
+
+ tmr_ctrl = ptp_qoriq->read(&regs->ctrl_regs->tmr_ctrl);
+ if (on)
+ tmr_ctrl |= loopback_bit;
+ else
+ tmr_ctrl &= ~loopback_bit;
+
+ ptp_qoriq->write(&regs->ctrl_regs->tmr_ctrl, tmr_ctrl);
+
+ return 0;
+}
+
int ptp_qoriq_init(struct ptp_qoriq *ptp_qoriq, void __iomem *base,
const struct ptp_clock_info *caps)
{
@@ -479,6 +498,8 @@ int ptp_qoriq_init(struct ptp_qoriq *ptp_qoriq, void __iomem *base,
ptp_qoriq->base = base;
ptp_qoriq->caps = *caps;
+ ptp_qoriq->caps.n_per_lp = 2;
+ ptp_qoriq->caps.perout_loopback = ptp_qoriq_perout_loopback;
if (of_property_read_u32(node, "fsl,cksel", &ptp_qoriq->cksel))
ptp_qoriq->cksel = DEFAULT_CKSEL;
@@ -568,7 +589,7 @@ int ptp_qoriq_init(struct ptp_qoriq *ptp_qoriq, void __iomem *base,
return PTR_ERR(ptp_qoriq->clock);
ptp_qoriq->phc_index = ptp_clock_index(ptp_qoriq->clock);
- ptp_qoriq_create_debugfs(ptp_qoriq);
+
return 0;
}
EXPORT_SYMBOL_GPL(ptp_qoriq_init);
@@ -580,7 +601,6 @@ void ptp_qoriq_free(struct ptp_qoriq *ptp_qoriq)
ptp_qoriq->write(&regs->ctrl_regs->tmr_temask, 0);
ptp_qoriq->write(&regs->ctrl_regs->tmr_ctrl, 0);
- ptp_qoriq_remove_debugfs(ptp_qoriq);
ptp_clock_unregister(ptp_qoriq->clock);
iounmap(ptp_qoriq->base);
free_irq(ptp_qoriq->irq, ptp_qoriq);
diff --git a/drivers/ptp/ptp_qoriq_debugfs.c b/drivers/ptp/ptp_qoriq_debugfs.c
deleted file mode 100644
index e8dddcedf288..000000000000
--- a/drivers/ptp/ptp_qoriq_debugfs.c
+++ /dev/null
@@ -1,101 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/* Copyright 2019 NXP
- */
-#include <linux/device.h>
-#include <linux/debugfs.h>
-#include <linux/fsl/ptp_qoriq.h>
-
-static int ptp_qoriq_fiper1_lpbk_get(void *data, u64 *val)
-{
- struct ptp_qoriq *ptp_qoriq = data;
- struct ptp_qoriq_registers *regs = &ptp_qoriq->regs;
- u32 ctrl;
-
- ctrl = ptp_qoriq->read(&regs->ctrl_regs->tmr_ctrl);
- *val = ctrl & PP1L ? 1 : 0;
-
- return 0;
-}
-
-static int ptp_qoriq_fiper1_lpbk_set(void *data, u64 val)
-{
- struct ptp_qoriq *ptp_qoriq = data;
- struct ptp_qoriq_registers *regs = &ptp_qoriq->regs;
- u32 ctrl;
-
- ctrl = ptp_qoriq->read(&regs->ctrl_regs->tmr_ctrl);
- if (val == 0)
- ctrl &= ~PP1L;
- else
- ctrl |= PP1L;
-
- ptp_qoriq->write(&regs->ctrl_regs->tmr_ctrl, ctrl);
- return 0;
-}
-
-DEFINE_DEBUGFS_ATTRIBUTE(ptp_qoriq_fiper1_fops, ptp_qoriq_fiper1_lpbk_get,
- ptp_qoriq_fiper1_lpbk_set, "%llu\n");
-
-static int ptp_qoriq_fiper2_lpbk_get(void *data, u64 *val)
-{
- struct ptp_qoriq *ptp_qoriq = data;
- struct ptp_qoriq_registers *regs = &ptp_qoriq->regs;
- u32 ctrl;
-
- ctrl = ptp_qoriq->read(&regs->ctrl_regs->tmr_ctrl);
- *val = ctrl & PP2L ? 1 : 0;
-
- return 0;
-}
-
-static int ptp_qoriq_fiper2_lpbk_set(void *data, u64 val)
-{
- struct ptp_qoriq *ptp_qoriq = data;
- struct ptp_qoriq_registers *regs = &ptp_qoriq->regs;
- u32 ctrl;
-
- ctrl = ptp_qoriq->read(&regs->ctrl_regs->tmr_ctrl);
- if (val == 0)
- ctrl &= ~PP2L;
- else
- ctrl |= PP2L;
-
- ptp_qoriq->write(&regs->ctrl_regs->tmr_ctrl, ctrl);
- return 0;
-}
-
-DEFINE_DEBUGFS_ATTRIBUTE(ptp_qoriq_fiper2_fops, ptp_qoriq_fiper2_lpbk_get,
- ptp_qoriq_fiper2_lpbk_set, "%llu\n");
-
-void ptp_qoriq_create_debugfs(struct ptp_qoriq *ptp_qoriq)
-{
- struct dentry *root;
-
- root = debugfs_create_dir(dev_name(ptp_qoriq->dev), NULL);
- if (IS_ERR(root))
- return;
- if (!root)
- goto err_root;
-
- ptp_qoriq->debugfs_root = root;
-
- if (!debugfs_create_file_unsafe("fiper1-loopback", 0600, root,
- ptp_qoriq, &ptp_qoriq_fiper1_fops))
- goto err_node;
- if (!debugfs_create_file_unsafe("fiper2-loopback", 0600, root,
- ptp_qoriq, &ptp_qoriq_fiper2_fops))
- goto err_node;
- return;
-
-err_node:
- debugfs_remove_recursive(root);
- ptp_qoriq->debugfs_root = NULL;
-err_root:
- dev_err(ptp_qoriq->dev, "failed to initialize debugfs\n");
-}
-
-void ptp_qoriq_remove_debugfs(struct ptp_qoriq *ptp_qoriq)
-{
- debugfs_remove_recursive(ptp_qoriq->debugfs_root);
- ptp_qoriq->debugfs_root = NULL;
-}
diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
index 6b1b8f57cd95..200eaf500696 100644
--- a/drivers/ptp/ptp_sysfs.c
+++ b/drivers/ptp/ptp_sysfs.c
@@ -284,7 +284,7 @@ static ssize_t max_vclocks_store(struct device *dev,
size_t size;
u32 max;
- if (kstrtou32(buf, 0, &max) || max == 0)
+ if (kstrtou32(buf, 0, &max) || max == 0 || max > PTP_MAX_VCLOCKS_LIMIT)
return -EINVAL;
if (max == ptp->max_vclocks)
diff --git a/drivers/ptp/ptp_vclock.c b/drivers/ptp/ptp_vclock.c
index 2fdeedd60e21..64c950456517 100644
--- a/drivers/ptp/ptp_vclock.c
+++ b/drivers/ptp/ptp_vclock.c
@@ -154,6 +154,11 @@ static long ptp_vclock_refresh(struct ptp_clock_info *ptp)
return PTP_VCLOCK_REFRESH_INTERVAL;
}
+static void ptp_vclock_set_subclass(struct ptp_clock *ptp)
+{
+ lockdep_set_subclass(&ptp->clock.rwsem, PTP_LOCK_VIRTUAL);
+}
+
static const struct ptp_clock_info ptp_vclock_info = {
.owner = THIS_MODULE,
.name = "ptp virtual clock",
@@ -213,6 +218,8 @@ struct ptp_vclock *ptp_vclock_register(struct ptp_clock *pclock)
return NULL;
}
+ ptp_vclock_set_subclass(vclock->clock);
+
timecounter_init(&vclock->tc, &vclock->cc, 0);
ptp_schedule_worker(vclock->clock, PTP_VCLOCK_REFRESH_INTERVAL);
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index f00ce973dddf..c2fd3f4b62d9 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -38,6 +38,15 @@ config PWM_DEBUG
It is expected to introduce some runtime overhead and diagnostic
output to the kernel log, so only enable while working on a driver.
+config PWM_PROVIDE_GPIO
+ bool "Provide a GPIO chip for each PWM chip"
+ depends on GPIOLIB
+ help
+ Most PWMs can emit both a constant active high and a constant active
+ low signal and so they can be used as GPIO. Say Y here to let each
+ PWM chip provide a GPIO chip and so be easily plugged into consumers
+ that know how to handle GPIOs but not PWMs.
+
config PWM_AB8500
tristate "AB8500 PWM support"
depends on AB8500_CORE && ARCH_U8500
@@ -432,6 +441,16 @@ config PWM_LPSS_PLATFORM
To compile this driver as a module, choose M here: the module
will be called pwm-lpss-platform.
+config PWM_MAX7360
+ tristate "MAX7360 PWMs"
+ depends on MFD_MAX7360
+ help
+ PWM driver for Maxim Integrated MAX7360 multifunction device, with
+ support for up to 8 PWM outputs.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-max7360.
+
config PWM_MC33XS2410
tristate "MC33XS2410 PWM support"
depends on OF
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index ff4f47e5fb7a..dfa8b4966ee1 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -38,6 +38,7 @@ obj-$(CONFIG_PWM_LPC32XX) += pwm-lpc32xx.o
obj-$(CONFIG_PWM_LPSS) += pwm-lpss.o
obj-$(CONFIG_PWM_LPSS_PCI) += pwm-lpss-pci.o
obj-$(CONFIG_PWM_LPSS_PLATFORM) += pwm-lpss-platform.o
+obj-$(CONFIG_PWM_MAX7360) += pwm-max7360.o
obj-$(CONFIG_PWM_MC33XS2410) += pwm-mc33xs2410.o
obj-$(CONFIG_PWM_MEDIATEK) += pwm-mediatek.o
obj-$(CONFIG_PWM_MESON) += pwm-meson.o
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index 0d66376a83ec..ea2ccf42e814 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -276,7 +276,7 @@ int pwm_round_waveform_might_sleep(struct pwm_device *pwm, struct pwm_waveform *
if (IS_ENABLED(CONFIG_PWM_DEBUG) && ret_fromhw > 0)
dev_err(&chip->dev, "Unexpected return value from __pwm_round_waveform_fromhw: requested %llu/%llu [+%llu], return value %d\n",
- wf_req.duty_length_ns, wf_req.period_length_ns, wf_req.duty_offset_ns, ret_tohw);
+ wf_req.duty_length_ns, wf_req.period_length_ns, wf_req.duty_offset_ns, ret_fromhw);
if (IS_ENABLED(CONFIG_PWM_DEBUG) &&
(ret_tohw == 0) != pwm_check_rounding(&wf_req, wf))
@@ -497,6 +497,13 @@ static void pwm_apply_debug(struct pwm_device *pwm,
return;
/*
+ * If a disabled PWM was requested the result is unspecified, so nothing
+ * to check.
+ */
+ if (!state->enabled)
+ return;
+
+ /*
* *state was just applied. Read out the hardware state and do some
* checks.
*/
@@ -508,25 +515,31 @@ static void pwm_apply_debug(struct pwm_device *pwm,
return;
/*
+ * If the PWM was disabled that's maybe strange but there is nothing
+ * that can be sensibly checked then. So return early.
+ */
+ if (!s1.enabled)
+ return;
+
+ /*
* The lowlevel driver either ignored .polarity (which is a bug) or as
* best effort inverted .polarity and fixed .duty_cycle respectively.
* Undo this inversion and fixup for further tests.
*/
- if (s1.enabled && s1.polarity != state->polarity) {
+ if (s1.polarity != state->polarity) {
s2.polarity = state->polarity;
s2.duty_cycle = s1.period - s1.duty_cycle;
s2.period = s1.period;
- s2.enabled = s1.enabled;
+ s2.enabled = true;
} else {
s2 = s1;
}
if (s2.polarity != state->polarity &&
- state->duty_cycle < state->period)
+ s2.duty_cycle < s2.period)
dev_warn(pwmchip_parent(chip), ".apply ignored .polarity\n");
- if (state->enabled && s2.enabled &&
- last->polarity == state->polarity &&
+ if (last->polarity == state->polarity &&
last->period > s2.period &&
last->period <= state->period)
dev_warn(pwmchip_parent(chip),
@@ -537,13 +550,12 @@ static void pwm_apply_debug(struct pwm_device *pwm,
* Rounding period up is fine only if duty_cycle is 0 then, because a
* flat line doesn't have a characteristic period.
*/
- if (state->enabled && s2.enabled && state->period < s2.period && s2.duty_cycle)
+ if (state->period < s2.period && s2.duty_cycle)
dev_warn(pwmchip_parent(chip),
".apply is supposed to round down period (requested: %llu, applied: %llu)\n",
state->period, s2.period);
- if (state->enabled &&
- last->polarity == state->polarity &&
+ if (last->polarity == state->polarity &&
last->period == s2.period &&
last->duty_cycle > s2.duty_cycle &&
last->duty_cycle <= state->duty_cycle)
@@ -553,16 +565,12 @@ static void pwm_apply_debug(struct pwm_device *pwm,
s2.duty_cycle, s2.period,
last->duty_cycle, last->period);
- if (state->enabled && s2.enabled && state->duty_cycle < s2.duty_cycle)
+ if (state->duty_cycle < s2.duty_cycle)
dev_warn(pwmchip_parent(chip),
".apply is supposed to round down duty_cycle (requested: %llu/%llu, applied: %llu/%llu)\n",
state->duty_cycle, state->period,
s2.duty_cycle, s2.period);
- if (!state->enabled && s2.enabled && s2.duty_cycle > 0)
- dev_warn(pwmchip_parent(chip),
- "requested disabled, but yielded enabled with duty > 0\n");
-
/* reapply the state that the driver reported being configured. */
err = chip->ops->apply(chip, pwm, &s1);
trace_pwm_apply(pwm, &s1, err);
@@ -2383,6 +2391,51 @@ static const struct file_operations pwm_cdev_fileops = {
static dev_t pwm_devt;
+static int pwm_gpio_request(struct gpio_chip *gc, unsigned int offset)
+{
+ struct pwm_chip *chip = gpiochip_get_data(gc);
+ struct pwm_device *pwm;
+
+ pwm = pwm_request_from_chip(chip, offset, "pwm-gpio");
+ if (IS_ERR(pwm))
+ return PTR_ERR(pwm);
+
+ return 0;
+}
+
+static void pwm_gpio_free(struct gpio_chip *gc, unsigned int offset)
+{
+ struct pwm_chip *chip = gpiochip_get_data(gc);
+
+ pwm_put(&chip->pwms[offset]);
+}
+
+static int pwm_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
+{
+ return GPIO_LINE_DIRECTION_OUT;
+}
+
+static int pwm_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
+{
+ struct pwm_chip *chip = gpiochip_get_data(gc);
+ struct pwm_device *pwm = &chip->pwms[offset];
+ int ret;
+ struct pwm_waveform wf = {
+ .period_length_ns = 1,
+ };
+
+ ret = pwm_round_waveform_might_sleep(pwm, &wf);
+ if (ret < 0)
+ return ret;
+
+ if (value)
+ wf.duty_length_ns = wf.period_length_ns;
+ else
+ wf.duty_length_ns = 0;
+
+ return pwm_set_waveform_might_sleep(pwm, &wf, true);
+}
+
/**
* __pwmchip_add() - register a new PWM chip
* @chip: the PWM chip to add
@@ -2449,9 +2502,33 @@ int __pwmchip_add(struct pwm_chip *chip, struct module *owner)
if (ret)
goto err_device_add;
+ if (IS_ENABLED(CONFIG_PWM_PROVIDE_GPIO) && chip->ops->write_waveform) {
+ struct device *parent = pwmchip_parent(chip);
+
+ chip->gpio = (typeof(chip->gpio)){
+ .label = dev_name(parent),
+ .parent = parent,
+ .request = pwm_gpio_request,
+ .free = pwm_gpio_free,
+ .get_direction = pwm_gpio_get_direction,
+ .set = pwm_gpio_set,
+ .base = -1,
+ .ngpio = chip->npwm,
+ .can_sleep = true,
+ };
+
+ ret = gpiochip_add_data(&chip->gpio, chip);
+ if (ret)
+ goto err_gpiochip_add;
+ }
+
return 0;
+err_gpiochip_add:
+
+ cdev_device_del(&chip->cdev, &chip->dev);
err_device_add:
+
scoped_guard(pwmchip, chip)
chip->operational = false;
@@ -2472,6 +2549,9 @@ EXPORT_SYMBOL_GPL(__pwmchip_add);
*/
void pwmchip_remove(struct pwm_chip *chip)
{
+ if (IS_ENABLED(CONFIG_PWM_PROVIDE_GPIO) && chip->ops->write_waveform)
+ gpiochip_remove(&chip->gpio);
+
pwmchip_sysfs_unexport(chip);
scoped_guard(mutex, &pwm_lock) {
diff --git a/drivers/pwm/pwm-berlin.c b/drivers/pwm/pwm-berlin.c
index 831aed228caf..858d36991374 100644
--- a/drivers/pwm/pwm-berlin.c
+++ b/drivers/pwm/pwm-berlin.c
@@ -234,7 +234,7 @@ static int berlin_pwm_suspend(struct device *dev)
for (i = 0; i < chip->npwm; i++) {
struct berlin_pwm_channel *channel = &bpc->channel[i];
- channel->enable = berlin_pwm_readl(bpc, i, BERLIN_PWM_ENABLE);
+ channel->enable = berlin_pwm_readl(bpc, i, BERLIN_PWM_EN);
channel->ctrl = berlin_pwm_readl(bpc, i, BERLIN_PWM_CONTROL);
channel->duty = berlin_pwm_readl(bpc, i, BERLIN_PWM_DUTY);
channel->tcnt = berlin_pwm_readl(bpc, i, BERLIN_PWM_TCNT);
@@ -262,7 +262,7 @@ static int berlin_pwm_resume(struct device *dev)
berlin_pwm_writel(bpc, i, channel->ctrl, BERLIN_PWM_CONTROL);
berlin_pwm_writel(bpc, i, channel->duty, BERLIN_PWM_DUTY);
berlin_pwm_writel(bpc, i, channel->tcnt, BERLIN_PWM_TCNT);
- berlin_pwm_writel(bpc, i, channel->enable, BERLIN_PWM_ENABLE);
+ berlin_pwm_writel(bpc, i, channel->enable, BERLIN_PWM_EN);
}
return 0;
diff --git a/drivers/pwm/pwm-cros-ec.c b/drivers/pwm/pwm-cros-ec.c
index 189301dc395e..67cfa17f58e0 100644
--- a/drivers/pwm/pwm-cros-ec.c
+++ b/drivers/pwm/pwm-cros-ec.c
@@ -49,10 +49,9 @@ static int cros_ec_pwm_set_duty(struct cros_ec_pwm_device *ec_pwm, u8 index,
u16 duty)
{
struct cros_ec_device *ec = ec_pwm->ec;
- struct {
- struct cros_ec_command msg;
+ TRAILING_OVERLAP(struct cros_ec_command, msg, data,
struct ec_params_pwm_set_duty params;
- } __packed buf;
+ ) __packed buf;
struct ec_params_pwm_set_duty *params = &buf.params;
struct cros_ec_command *msg = &buf.msg;
int ret;
@@ -83,13 +82,12 @@ static int cros_ec_pwm_set_duty(struct cros_ec_pwm_device *ec_pwm, u8 index,
static int cros_ec_pwm_get_duty(struct cros_ec_device *ec, bool use_pwm_type, u8 index)
{
- struct {
- struct cros_ec_command msg;
+ TRAILING_OVERLAP(struct cros_ec_command, msg, data,
union {
struct ec_params_pwm_get_duty params;
struct ec_response_pwm_get_duty resp;
};
- } __packed buf;
+ ) __packed buf;
struct ec_params_pwm_get_duty *params = &buf.params;
struct ec_response_pwm_get_duty *resp = &buf.resp;
struct cros_ec_command *msg = &buf.msg;
diff --git a/drivers/pwm/pwm-fsl-ftm.c b/drivers/pwm/pwm-fsl-ftm.c
index 6683931872fc..35406b2e1925 100644
--- a/drivers/pwm/pwm-fsl-ftm.c
+++ b/drivers/pwm/pwm-fsl-ftm.c
@@ -3,6 +3,7 @@
* Freescale FlexTimer Module (FTM) PWM Driver
*
* Copyright 2012-2013 Freescale Semiconductor, Inc.
+ * Copyright 2020-2025 NXP
*/
#include <linux/clk.h>
@@ -30,6 +31,8 @@ enum fsl_pwm_clk {
struct fsl_ftm_soc {
bool has_enable_bits;
+ bool has_flt_reg;
+ unsigned int npwm;
};
struct fsl_pwm_periodcfg {
@@ -374,6 +377,20 @@ static bool fsl_pwm_volatile_reg(struct device *dev, unsigned int reg)
return false;
}
+static bool fsl_pwm_is_reg(struct device *dev, unsigned int reg)
+{
+ struct pwm_chip *chip = dev_get_drvdata(dev);
+ struct fsl_pwm_chip *fpc = to_fsl_chip(chip);
+
+ if (reg >= FTM_CSC(fpc->soc->npwm) && reg < FTM_CNTIN)
+ return false;
+
+ if ((reg == FTM_FLTCTRL || reg == FTM_FLTPOL) && !fpc->soc->has_flt_reg)
+ return false;
+
+ return true;
+}
+
static const struct regmap_config fsl_pwm_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
@@ -382,21 +399,24 @@ static const struct regmap_config fsl_pwm_regmap_config = {
.max_register = FTM_PWMLOAD,
.volatile_reg = fsl_pwm_volatile_reg,
.cache_type = REGCACHE_FLAT,
+ .writeable_reg = fsl_pwm_is_reg,
+ .readable_reg = fsl_pwm_is_reg,
};
static int fsl_pwm_probe(struct platform_device *pdev)
{
+ const struct fsl_ftm_soc *soc = of_device_get_match_data(&pdev->dev);
struct pwm_chip *chip;
struct fsl_pwm_chip *fpc;
void __iomem *base;
int ret;
- chip = devm_pwmchip_alloc(&pdev->dev, 8, sizeof(*fpc));
+ chip = devm_pwmchip_alloc(&pdev->dev, soc->npwm, sizeof(*fpc));
if (IS_ERR(chip))
return PTR_ERR(chip);
fpc = to_fsl_chip(chip);
- fpc->soc = of_device_get_match_data(&pdev->dev);
+ fpc->soc = soc;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
@@ -512,15 +532,26 @@ static const struct dev_pm_ops fsl_pwm_pm_ops = {
static const struct fsl_ftm_soc vf610_ftm_pwm = {
.has_enable_bits = false,
+ .has_flt_reg = true,
+ .npwm = 8,
};
static const struct fsl_ftm_soc imx8qm_ftm_pwm = {
.has_enable_bits = true,
+ .has_flt_reg = true,
+ .npwm = 8,
+};
+
+static const struct fsl_ftm_soc s32g2_ftm_pwm = {
+ .has_enable_bits = true,
+ .has_flt_reg = false,
+ .npwm = 6,
};
static const struct of_device_id fsl_pwm_dt_ids[] = {
{ .compatible = "fsl,vf610-ftm-pwm", .data = &vf610_ftm_pwm },
{ .compatible = "fsl,imx8qm-ftm-pwm", .data = &imx8qm_ftm_pwm },
+ { .compatible = "nxp,s32g2-ftm-pwm", .data = &s32g2_ftm_pwm },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, fsl_pwm_dt_ids);
diff --git a/drivers/pwm/pwm-loongson.c b/drivers/pwm/pwm-loongson.c
index 1ba16168cbb4..31a57edecfd0 100644
--- a/drivers/pwm/pwm-loongson.c
+++ b/drivers/pwm/pwm-loongson.c
@@ -49,7 +49,7 @@
#define LOONGSON_PWM_CTRL_REG_DZONE BIT(10) /* Anti-dead Zone Enable Bit */
/* default input clk frequency for the ACPI case */
-#define LOONGSON_PWM_FREQ_DEFAULT 50000 /* Hz */
+#define LOONGSON_PWM_FREQ_DEFAULT 50000000 /* Hz */
struct pwm_loongson_ddata {
struct clk *clk;
diff --git a/drivers/pwm/pwm-max7360.c b/drivers/pwm/pwm-max7360.c
new file mode 100644
index 000000000000..ebf93a7aee5b
--- /dev/null
+++ b/drivers/pwm/pwm-max7360.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2025 Bootlin
+ *
+ * Author: Kamel BOUHARA <kamel.bouhara@bootlin.com>
+ * Author: Mathieu Dubois-Briand <mathieu.dubois-briand@bootlin.com>
+ *
+ * PWM functionality of the MAX7360 multi-function device.
+ * https://www.analog.com/media/en/technical-documentation/data-sheets/MAX7360.pdf
+ *
+ * Limitations:
+ * - Only supports normal polarity.
+ * - The period is fixed to 2 ms.
+ * - Only the duty cycle can be changed, new values are applied at the beginning
+ * of the next cycle.
+ * - When disabled, the output is put in Hi-Z immediately.
+ */
+#include <linux/bits.h>
+#include <linux/dev_printk.h>
+#include <linux/err.h>
+#include <linux/math64.h>
+#include <linux/mfd/max7360.h>
+#include <linux/minmax.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/regmap.h>
+#include <linux/time.h>
+#include <linux/types.h>
+
+#define MAX7360_NUM_PWMS 8
+#define MAX7360_PWM_MAX 255
+#define MAX7360_PWM_STEPS 256
+#define MAX7360_PWM_PERIOD_NS (2 * NSEC_PER_MSEC)
+
+struct max7360_pwm_waveform {
+ u8 duty_steps;
+ bool enabled;
+};
+
+static int max7360_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct regmap *regmap = pwmchip_get_drvdata(chip);
+
+ /*
+ * Make sure we use the individual PWM configuration register and not
+ * the global one.
+ * We never need to use the global one, so there is no need to revert
+ * that in the .free() callback.
+ */
+ return regmap_write_bits(regmap, MAX7360_REG_PWMCFG(pwm->hwpwm),
+ MAX7360_PORT_CFG_COMMON_PWM, 0);
+}
+
+static int max7360_pwm_round_waveform_tohw(struct pwm_chip *chip,
+ struct pwm_device *pwm,
+ const struct pwm_waveform *wf,
+ void *_wfhw)
+{
+ struct max7360_pwm_waveform *wfhw = _wfhw;
+ u64 duty_steps;
+
+ /*
+ * Ignore user provided values for period_length_ns and duty_offset_ns:
+ * we only support fixed period of MAX7360_PWM_PERIOD_NS and offset of 0.
+ * Values from 0 to 254 as duty_steps will provide duty cycles of 0/256
+ * to 254/256, while value 255 will provide a duty cycle of 100%.
+ */
+ if (wf->duty_length_ns >= MAX7360_PWM_PERIOD_NS) {
+ duty_steps = MAX7360_PWM_MAX;
+ } else {
+ duty_steps = (u32)wf->duty_length_ns * MAX7360_PWM_STEPS / MAX7360_PWM_PERIOD_NS;
+ if (duty_steps == MAX7360_PWM_MAX)
+ duty_steps = MAX7360_PWM_MAX - 1;
+ }
+
+ wfhw->duty_steps = min(MAX7360_PWM_MAX, duty_steps);
+ wfhw->enabled = !!wf->period_length_ns;
+
+ if (wf->period_length_ns && wf->period_length_ns < MAX7360_PWM_PERIOD_NS)
+ return 1;
+ else
+ return 0;
+}
+
+static int max7360_pwm_round_waveform_fromhw(struct pwm_chip *chip, struct pwm_device *pwm,
+ const void *_wfhw, struct pwm_waveform *wf)
+{
+ const struct max7360_pwm_waveform *wfhw = _wfhw;
+
+ wf->period_length_ns = wfhw->enabled ? MAX7360_PWM_PERIOD_NS : 0;
+ wf->duty_offset_ns = 0;
+
+ if (wfhw->enabled) {
+ if (wfhw->duty_steps == MAX7360_PWM_MAX)
+ wf->duty_length_ns = MAX7360_PWM_PERIOD_NS;
+ else
+ wf->duty_length_ns = DIV_ROUND_UP(wfhw->duty_steps * MAX7360_PWM_PERIOD_NS,
+ MAX7360_PWM_STEPS);
+ } else {
+ wf->duty_length_ns = 0;
+ }
+
+ return 0;
+}
+
+static int max7360_pwm_write_waveform(struct pwm_chip *chip,
+ struct pwm_device *pwm,
+ const void *_wfhw)
+{
+ struct regmap *regmap = pwmchip_get_drvdata(chip);
+ const struct max7360_pwm_waveform *wfhw = _wfhw;
+ unsigned int val;
+ int ret;
+
+ if (wfhw->enabled) {
+ ret = regmap_write(regmap, MAX7360_REG_PWM(pwm->hwpwm), wfhw->duty_steps);
+ if (ret)
+ return ret;
+ }
+
+ val = wfhw->enabled ? BIT(pwm->hwpwm) : 0;
+ return regmap_write_bits(regmap, MAX7360_REG_GPIOCTRL, BIT(pwm->hwpwm), val);
+}
+
+static int max7360_pwm_read_waveform(struct pwm_chip *chip,
+ struct pwm_device *pwm,
+ void *_wfhw)
+{
+ struct regmap *regmap = pwmchip_get_drvdata(chip);
+ struct max7360_pwm_waveform *wfhw = _wfhw;
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(regmap, MAX7360_REG_GPIOCTRL, &val);
+ if (ret)
+ return ret;
+
+ if (val & BIT(pwm->hwpwm)) {
+ wfhw->enabled = true;
+ ret = regmap_read(regmap, MAX7360_REG_PWM(pwm->hwpwm), &val);
+ if (ret)
+ return ret;
+
+ wfhw->duty_steps = val;
+ } else {
+ wfhw->enabled = false;
+ wfhw->duty_steps = 0;
+ }
+
+ return 0;
+}
+
+static const struct pwm_ops max7360_pwm_ops = {
+ .request = max7360_pwm_request,
+ .round_waveform_tohw = max7360_pwm_round_waveform_tohw,
+ .round_waveform_fromhw = max7360_pwm_round_waveform_fromhw,
+ .read_waveform = max7360_pwm_read_waveform,
+ .write_waveform = max7360_pwm_write_waveform,
+};
+
+static int max7360_pwm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct pwm_chip *chip;
+ struct regmap *regmap;
+ int ret;
+
+ regmap = dev_get_regmap(dev->parent, NULL);
+ if (!regmap)
+ return dev_err_probe(dev, -ENODEV, "Could not get parent regmap\n");
+
+ /*
+ * This MFD sub-device does not have any associated device tree node:
+ * properties are stored in the device node of the parent (MFD) device
+ * and this same node is used in phandles of client devices.
+ * Reuse this device tree node here, as otherwise the PWM subsystem
+ * would be confused by this topology.
+ */
+ device_set_of_node_from_dev(dev, dev->parent);
+
+ chip = devm_pwmchip_alloc(dev, MAX7360_NUM_PWMS, 0);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ chip->ops = &max7360_pwm_ops;
+
+ pwmchip_set_drvdata(chip, regmap);
+
+ ret = devm_pwmchip_add(dev, chip);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add PWM chip\n");
+
+ return 0;
+}
+
+static struct platform_driver max7360_pwm_driver = {
+ .driver = {
+ .name = "max7360-pwm",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+ .probe = max7360_pwm_probe,
+};
+module_platform_driver(max7360_pwm_driver);
+
+MODULE_DESCRIPTION("MAX7360 PWM driver");
+MODULE_AUTHOR("Kamel BOUHARA <kamel.bouhara@bootlin.com>");
+MODULE_AUTHOR("Mathieu Dubois-Briand <mathieu.dubois-briand@bootlin.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pwm/pwm-mediatek.c b/drivers/pwm/pwm-mediatek.c
index e4b595fc5a5e..4291072a13a7 100644
--- a/drivers/pwm/pwm-mediatek.c
+++ b/drivers/pwm/pwm-mediatek.c
@@ -7,6 +7,7 @@
*
*/
+#include <linux/bitfield.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/ioport.h>
@@ -21,24 +22,26 @@
/* PWM registers and bits definitions */
#define PWMCON 0x00
+#define PWMCON_CLKDIV GENMASK(2, 0)
#define PWMHDUR 0x04
#define PWMLDUR 0x08
#define PWMGDUR 0x0c
#define PWMWAVENUM 0x28
#define PWMDWIDTH 0x2c
+#define PWMDWIDTH_PERIOD GENMASK(12, 0)
#define PWM45DWIDTH_FIXUP 0x30
#define PWMTHRES 0x30
+#define PWMTHRES_DUTY GENMASK(12, 0)
#define PWM45THRES_FIXUP 0x34
#define PWM_CK_26M_SEL_V3 0x74
#define PWM_CK_26M_SEL 0x210
-#define PWM_CLK_DIV_MAX 7
-
struct pwm_mediatek_of_data {
unsigned int num_pwms;
bool pwm45_fixup;
u16 pwm_ck_26m_sel_reg;
- const unsigned int *reg_offset;
+ unsigned int chanreg_base;
+ unsigned int chanreg_width;
};
/**
@@ -46,28 +49,18 @@ struct pwm_mediatek_of_data {
* @regs: base address of PWM chip
* @clk_top: the top clock generator
* @clk_main: the clock used by PWM core
- * @clk_pwms: the clock used by each PWM channel
* @soc: pointer to chip's platform data
+ * @clk_pwms: the clock and clkrate used by each PWM channel
*/
struct pwm_mediatek_chip {
void __iomem *regs;
struct clk *clk_top;
struct clk *clk_main;
- struct clk **clk_pwms;
const struct pwm_mediatek_of_data *soc;
-};
-
-static const unsigned int mtk_pwm_reg_offset_v1[] = {
- 0x0010, 0x0050, 0x0090, 0x00d0, 0x0110, 0x0150, 0x0190, 0x0220
-};
-
-static const unsigned int mtk_pwm_reg_offset_v2[] = {
- 0x0080, 0x00c0, 0x0100, 0x0140, 0x0180, 0x01c0, 0x0200, 0x0240
-};
-
-/* PWM IP Version 3.0.2 */
-static const unsigned int mtk_pwm_reg_offset_v3[] = {
- 0x0100, 0x0200, 0x0300, 0x0400, 0x0500, 0x0600, 0x0700, 0x0800
+ struct {
+ struct clk *clk;
+ unsigned long rate;
+ } clk_pwms[];
};
static inline struct pwm_mediatek_chip *
@@ -76,10 +69,9 @@ to_pwm_mediatek_chip(struct pwm_chip *chip)
return pwmchip_get_drvdata(chip);
}
-static int pwm_mediatek_clk_enable(struct pwm_chip *chip,
- struct pwm_device *pwm)
+static int pwm_mediatek_clk_enable(struct pwm_mediatek_chip *pc,
+ unsigned int hwpwm)
{
- struct pwm_mediatek_chip *pc = to_pwm_mediatek_chip(chip);
int ret;
ret = clk_prepare_enable(pc->clk_top);
@@ -90,12 +82,28 @@ static int pwm_mediatek_clk_enable(struct pwm_chip *chip,
if (ret < 0)
goto disable_clk_top;
- ret = clk_prepare_enable(pc->clk_pwms[pwm->hwpwm]);
+ ret = clk_prepare_enable(pc->clk_pwms[hwpwm].clk);
if (ret < 0)
goto disable_clk_main;
+ if (!pc->clk_pwms[hwpwm].rate) {
+ pc->clk_pwms[hwpwm].rate = clk_get_rate(pc->clk_pwms[hwpwm].clk);
+
+ /*
+ * With the clk running with not more than 1 GHz the
+ * calculations in .apply() won't overflow.
+ */
+ if (!pc->clk_pwms[hwpwm].rate ||
+ pc->clk_pwms[hwpwm].rate > 1000000000) {
+ ret = -EINVAL;
+ goto disable_clk_hwpwm;
+ }
+ }
+
return 0;
+disable_clk_hwpwm:
+ clk_disable_unprepare(pc->clk_pwms[hwpwm].clk);
disable_clk_main:
clk_disable_unprepare(pc->clk_main);
disable_clk_top:
@@ -104,12 +112,10 @@ disable_clk_top:
return ret;
}
-static void pwm_mediatek_clk_disable(struct pwm_chip *chip,
- struct pwm_device *pwm)
+static void pwm_mediatek_clk_disable(struct pwm_mediatek_chip *pc,
+ unsigned int hwpwm)
{
- struct pwm_mediatek_chip *pc = to_pwm_mediatek_chip(chip);
-
- clk_disable_unprepare(pc->clk_pwms[pwm->hwpwm]);
+ clk_disable_unprepare(pc->clk_pwms[hwpwm].clk);
clk_disable_unprepare(pc->clk_main);
clk_disable_unprepare(pc->clk_top);
}
@@ -118,7 +124,15 @@ static inline void pwm_mediatek_writel(struct pwm_mediatek_chip *chip,
unsigned int num, unsigned int offset,
u32 value)
{
- writel(value, chip->regs + chip->soc->reg_offset[num] + offset);
+ writel(value, chip->regs + chip->soc->chanreg_base +
+ num * chip->soc->chanreg_width + offset);
+}
+
+static inline u32 pwm_mediatek_readl(struct pwm_mediatek_chip *chip,
+ unsigned int num, unsigned int offset)
+{
+ return readl(chip->regs + chip->soc->chanreg_base +
+ num * chip->soc->chanreg_width + offset);
}
static void pwm_mediatek_enable(struct pwm_chip *chip, struct pwm_device *pwm)
@@ -142,50 +156,59 @@ static void pwm_mediatek_disable(struct pwm_chip *chip, struct pwm_device *pwm)
}
static int pwm_mediatek_config(struct pwm_chip *chip, struct pwm_device *pwm,
- int duty_ns, int period_ns)
+ u64 duty_ns, u64 period_ns)
{
struct pwm_mediatek_chip *pc = to_pwm_mediatek_chip(chip);
- u32 clkdiv = 0, cnt_period, cnt_duty, reg_width = PWMDWIDTH,
- reg_thres = PWMTHRES;
+ u32 clkdiv, enable;
+ u32 reg_width = PWMDWIDTH, reg_thres = PWMTHRES;
+ u64 cnt_period, cnt_duty;
unsigned long clk_rate;
- u64 resolution;
int ret;
- ret = pwm_mediatek_clk_enable(chip, pwm);
+ ret = pwm_mediatek_clk_enable(pc, pwm->hwpwm);
if (ret < 0)
return ret;
- clk_rate = clk_get_rate(pc->clk_pwms[pwm->hwpwm]);
- if (!clk_rate) {
- ret = -EINVAL;
- goto out;
- }
+ clk_rate = pc->clk_pwms[pwm->hwpwm].rate;
/* Make sure we use the bus clock and not the 26MHz clock */
if (pc->soc->pwm_ck_26m_sel_reg)
writel(0, pc->regs + pc->soc->pwm_ck_26m_sel_reg);
- /* Using resolution in picosecond gets accuracy higher */
- resolution = (u64)NSEC_PER_SEC * 1000;
- do_div(resolution, clk_rate);
-
- cnt_period = DIV_ROUND_CLOSEST_ULL((u64)period_ns * 1000, resolution);
- if (!cnt_period)
- return -EINVAL;
+ cnt_period = mul_u64_u64_div_u64(period_ns, clk_rate, NSEC_PER_SEC);
+ if (cnt_period == 0) {
+ ret = -ERANGE;
+ goto out;
+ }
- while (cnt_period > 8192) {
- resolution *= 2;
- clkdiv++;
- cnt_period = DIV_ROUND_CLOSEST_ULL((u64)period_ns * 1000,
- resolution);
+ if (cnt_period > FIELD_MAX(PWMDWIDTH_PERIOD) + 1) {
+ if (cnt_period >= ((FIELD_MAX(PWMDWIDTH_PERIOD) + 1) << FIELD_MAX(PWMCON_CLKDIV))) {
+ clkdiv = FIELD_MAX(PWMCON_CLKDIV);
+ cnt_period = FIELD_MAX(PWMDWIDTH_PERIOD) + 1;
+ } else {
+ clkdiv = ilog2(cnt_period) - ilog2(FIELD_MAX(PWMDWIDTH_PERIOD));
+ cnt_period >>= clkdiv;
+ }
+ } else {
+ clkdiv = 0;
}
- if (clkdiv > PWM_CLK_DIV_MAX) {
- dev_err(pwmchip_parent(chip), "period of %d ns not supported\n", period_ns);
- ret = -EINVAL;
- goto out;
+ cnt_duty = mul_u64_u64_div_u64(duty_ns, clk_rate, NSEC_PER_SEC) >> clkdiv;
+ if (cnt_duty > cnt_period)
+ cnt_duty = cnt_period;
+
+ if (cnt_duty) {
+ cnt_duty -= 1;
+ enable = BIT(pwm->hwpwm);
+ } else {
+ enable = 0;
}
+ cnt_period -= 1;
+
+ dev_dbg(&chip->dev, "pwm#%u: %lld/%lld @%lu -> CON: %x, PERIOD: %llx, DUTY: %llx\n",
+ pwm->hwpwm, duty_ns, period_ns, clk_rate, clkdiv, cnt_period, cnt_duty);
+
if (pc->soc->pwm45_fixup && pwm->hwpwm > 2) {
/*
* PWM[4,5] has distinct offset for PWMDWIDTH and PWMTHRES
@@ -195,20 +218,18 @@ static int pwm_mediatek_config(struct pwm_chip *chip, struct pwm_device *pwm,
reg_thres = PWM45THRES_FIXUP;
}
- cnt_duty = DIV_ROUND_CLOSEST_ULL((u64)duty_ns * 1000, resolution);
-
pwm_mediatek_writel(pc, pwm->hwpwm, PWMCON, BIT(15) | clkdiv);
- pwm_mediatek_writel(pc, pwm->hwpwm, reg_width, cnt_period - 1);
+ pwm_mediatek_writel(pc, pwm->hwpwm, reg_width, cnt_period);
- if (cnt_duty) {
- pwm_mediatek_writel(pc, pwm->hwpwm, reg_thres, cnt_duty - 1);
+ if (enable) {
+ pwm_mediatek_writel(pc, pwm->hwpwm, reg_thres, cnt_duty);
pwm_mediatek_enable(chip, pwm);
} else {
pwm_mediatek_disable(chip, pwm);
}
out:
- pwm_mediatek_clk_disable(chip, pwm);
+ pwm_mediatek_clk_disable(pc, pwm->hwpwm);
return ret;
}
@@ -216,6 +237,7 @@ out:
static int pwm_mediatek_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
+ struct pwm_mediatek_chip *pc = to_pwm_mediatek_chip(chip);
int err;
if (state->polarity != PWM_POLARITY_NORMAL)
@@ -224,7 +246,7 @@ static int pwm_mediatek_apply(struct pwm_chip *chip, struct pwm_device *pwm,
if (!state->enabled) {
if (pwm->state.enabled) {
pwm_mediatek_disable(chip, pwm);
- pwm_mediatek_clk_disable(chip, pwm);
+ pwm_mediatek_clk_disable(pc, pwm->hwpwm);
}
return 0;
@@ -235,15 +257,115 @@ static int pwm_mediatek_apply(struct pwm_chip *chip, struct pwm_device *pwm,
return err;
if (!pwm->state.enabled)
- err = pwm_mediatek_clk_enable(chip, pwm);
+ err = pwm_mediatek_clk_enable(pc, pwm->hwpwm);
return err;
}
+static int pwm_mediatek_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ struct pwm_mediatek_chip *pc = to_pwm_mediatek_chip(chip);
+ int ret;
+ u32 enable;
+ u32 reg_width = PWMDWIDTH, reg_thres = PWMTHRES;
+
+ if (pc->soc->pwm45_fixup && pwm->hwpwm > 2) {
+ /*
+ * PWM[4,5] has distinct offset for PWMDWIDTH and PWMTHRES
+ * from the other PWMs on MT7623.
+ */
+ reg_width = PWM45DWIDTH_FIXUP;
+ reg_thres = PWM45THRES_FIXUP;
+ }
+
+ ret = pwm_mediatek_clk_enable(pc, pwm->hwpwm);
+ if (ret < 0)
+ return ret;
+
+ enable = readl(pc->regs);
+ if (enable & BIT(pwm->hwpwm)) {
+ u32 clkdiv, cnt_period, cnt_duty;
+ unsigned long clk_rate;
+
+ clk_rate = pc->clk_pwms[pwm->hwpwm].rate;
+
+ state->enabled = true;
+ state->polarity = PWM_POLARITY_NORMAL;
+
+ clkdiv = FIELD_GET(PWMCON_CLKDIV,
+ pwm_mediatek_readl(pc, pwm->hwpwm, PWMCON));
+ cnt_period = FIELD_GET(PWMDWIDTH_PERIOD,
+ pwm_mediatek_readl(pc, pwm->hwpwm, reg_width));
+ cnt_duty = FIELD_GET(PWMTHRES_DUTY,
+ pwm_mediatek_readl(pc, pwm->hwpwm, reg_thres));
+
+ /*
+ * cnt_period is a 13 bit value, NSEC_PER_SEC is 30 bits wide
+ * and clkdiv is less than 8, so the multiplication doesn't
+ * overflow an u64.
+ */
+ state->period =
+ DIV_ROUND_UP_ULL((u64)cnt_period * NSEC_PER_SEC << clkdiv, clk_rate);
+ state->duty_cycle =
+ DIV_ROUND_UP_ULL((u64)cnt_duty * NSEC_PER_SEC << clkdiv, clk_rate);
+ } else {
+ state->enabled = false;
+ }
+
+ pwm_mediatek_clk_disable(pc, pwm->hwpwm);
+
+ return ret;
+}
+
static const struct pwm_ops pwm_mediatek_ops = {
.apply = pwm_mediatek_apply,
+ .get_state = pwm_mediatek_get_state,
};
+static int pwm_mediatek_init_used_clks(struct pwm_mediatek_chip *pc)
+{
+ const struct pwm_mediatek_of_data *soc = pc->soc;
+ unsigned int hwpwm;
+ u32 enabled, handled = 0;
+ int ret;
+
+ ret = clk_prepare_enable(pc->clk_top);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(pc->clk_main);
+ if (ret)
+ goto err_enable_main;
+
+ enabled = readl(pc->regs) & GENMASK(soc->num_pwms - 1, 0);
+
+ while (enabled & ~handled) {
+ hwpwm = ilog2(enabled & ~handled);
+
+ ret = pwm_mediatek_clk_enable(pc, hwpwm);
+ if (ret) {
+ while (handled) {
+ hwpwm = ilog2(handled);
+
+ pwm_mediatek_clk_disable(pc, hwpwm);
+ handled &= ~BIT(hwpwm);
+ }
+
+ break;
+ }
+
+ handled |= BIT(hwpwm);
+ }
+
+ clk_disable_unprepare(pc->clk_main);
+err_enable_main:
+
+ clk_disable_unprepare(pc->clk_top);
+
+ return ret;
+}
+
static int pwm_mediatek_probe(struct platform_device *pdev)
{
struct pwm_chip *chip;
@@ -254,7 +376,8 @@ static int pwm_mediatek_probe(struct platform_device *pdev)
soc = of_device_get_match_data(&pdev->dev);
- chip = devm_pwmchip_alloc(&pdev->dev, soc->num_pwms, sizeof(*pc));
+ chip = devm_pwmchip_alloc(&pdev->dev, soc->num_pwms,
+ sizeof(*pc) + soc->num_pwms * sizeof(*pc->clk_pwms));
if (IS_ERR(chip))
return PTR_ERR(chip);
pc = to_pwm_mediatek_chip(chip);
@@ -265,11 +388,6 @@ static int pwm_mediatek_probe(struct platform_device *pdev)
if (IS_ERR(pc->regs))
return PTR_ERR(pc->regs);
- pc->clk_pwms = devm_kmalloc_array(&pdev->dev, soc->num_pwms,
- sizeof(*pc->clk_pwms), GFP_KERNEL);
- if (!pc->clk_pwms)
- return -ENOMEM;
-
pc->clk_top = devm_clk_get(&pdev->dev, "top");
if (IS_ERR(pc->clk_top))
return dev_err_probe(&pdev->dev, PTR_ERR(pc->clk_top),
@@ -285,12 +403,21 @@ static int pwm_mediatek_probe(struct platform_device *pdev)
snprintf(name, sizeof(name), "pwm%d", i + 1);
- pc->clk_pwms[i] = devm_clk_get(&pdev->dev, name);
- if (IS_ERR(pc->clk_pwms[i]))
- return dev_err_probe(&pdev->dev, PTR_ERR(pc->clk_pwms[i]),
+ pc->clk_pwms[i].clk = devm_clk_get(&pdev->dev, name);
+ if (IS_ERR(pc->clk_pwms[i].clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(pc->clk_pwms[i].clk),
"Failed to get %s clock\n", name);
+
+ ret = devm_clk_rate_exclusive_get(&pdev->dev, pc->clk_pwms[i].clk);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to lock clock rate for %s\n", name);
}
+ ret = pwm_mediatek_init_used_clks(pc);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Failed to initialize used clocks\n");
+
chip->ops = &pwm_mediatek_ops;
ret = devm_pwmchip_add(&pdev->dev, chip);
@@ -303,86 +430,99 @@ static int pwm_mediatek_probe(struct platform_device *pdev)
static const struct pwm_mediatek_of_data mt2712_pwm_data = {
.num_pwms = 8,
.pwm45_fixup = false,
- .reg_offset = mtk_pwm_reg_offset_v1,
+ .chanreg_base = 0x10,
+ .chanreg_width = 0x40,
};
static const struct pwm_mediatek_of_data mt6795_pwm_data = {
.num_pwms = 7,
.pwm45_fixup = false,
- .reg_offset = mtk_pwm_reg_offset_v1,
+ .chanreg_base = 0x10,
+ .chanreg_width = 0x40,
};
static const struct pwm_mediatek_of_data mt7622_pwm_data = {
.num_pwms = 6,
.pwm45_fixup = false,
.pwm_ck_26m_sel_reg = PWM_CK_26M_SEL,
- .reg_offset = mtk_pwm_reg_offset_v1,
+ .chanreg_base = 0x10,
+ .chanreg_width = 0x40,
};
static const struct pwm_mediatek_of_data mt7623_pwm_data = {
.num_pwms = 5,
.pwm45_fixup = true,
- .reg_offset = mtk_pwm_reg_offset_v1,
+ .chanreg_base = 0x10,
+ .chanreg_width = 0x40,
};
static const struct pwm_mediatek_of_data mt7628_pwm_data = {
.num_pwms = 4,
.pwm45_fixup = true,
- .reg_offset = mtk_pwm_reg_offset_v1,
+ .chanreg_base = 0x10,
+ .chanreg_width = 0x40,
};
static const struct pwm_mediatek_of_data mt7629_pwm_data = {
.num_pwms = 1,
.pwm45_fixup = false,
- .reg_offset = mtk_pwm_reg_offset_v1,
+ .chanreg_base = 0x10,
+ .chanreg_width = 0x40,
};
static const struct pwm_mediatek_of_data mt7981_pwm_data = {
.num_pwms = 3,
.pwm45_fixup = false,
.pwm_ck_26m_sel_reg = PWM_CK_26M_SEL,
- .reg_offset = mtk_pwm_reg_offset_v2,
+ .chanreg_base = 0x80,
+ .chanreg_width = 0x40,
};
static const struct pwm_mediatek_of_data mt7986_pwm_data = {
.num_pwms = 2,
.pwm45_fixup = false,
.pwm_ck_26m_sel_reg = PWM_CK_26M_SEL,
- .reg_offset = mtk_pwm_reg_offset_v1,
+ .chanreg_base = 0x10,
+ .chanreg_width = 0x40,
};
static const struct pwm_mediatek_of_data mt7988_pwm_data = {
.num_pwms = 8,
.pwm45_fixup = false,
- .reg_offset = mtk_pwm_reg_offset_v2,
+ .chanreg_base = 0x80,
+ .chanreg_width = 0x40,
};
static const struct pwm_mediatek_of_data mt8183_pwm_data = {
.num_pwms = 4,
.pwm45_fixup = false,
.pwm_ck_26m_sel_reg = PWM_CK_26M_SEL,
- .reg_offset = mtk_pwm_reg_offset_v1,
+ .chanreg_base = 0x10,
+ .chanreg_width = 0x40,
};
static const struct pwm_mediatek_of_data mt8365_pwm_data = {
.num_pwms = 3,
.pwm45_fixup = false,
.pwm_ck_26m_sel_reg = PWM_CK_26M_SEL,
- .reg_offset = mtk_pwm_reg_offset_v1,
+ .chanreg_base = 0x10,
+ .chanreg_width = 0x40,
};
static const struct pwm_mediatek_of_data mt8516_pwm_data = {
.num_pwms = 5,
.pwm45_fixup = false,
.pwm_ck_26m_sel_reg = PWM_CK_26M_SEL,
- .reg_offset = mtk_pwm_reg_offset_v1,
+ .chanreg_base = 0x10,
+ .chanreg_width = 0x40,
};
static const struct pwm_mediatek_of_data mt6991_pwm_data = {
.num_pwms = 4,
.pwm45_fixup = false,
.pwm_ck_26m_sel_reg = PWM_CK_26M_SEL_V3,
- .reg_offset = mtk_pwm_reg_offset_v3,
+ .chanreg_base = 0x100,
+ .chanreg_width = 0x100,
};
static const struct of_device_id pwm_mediatek_of_match[] = {
diff --git a/drivers/pwm/pwm-pca9685.c b/drivers/pwm/pwm-pca9685.c
index eb03ccd5b688..107bebec3546 100644
--- a/drivers/pwm/pwm-pca9685.c
+++ b/drivers/pwm/pwm-pca9685.c
@@ -26,7 +26,6 @@
* that is enabled is allowed to change the prescale register.
* PWM channels requested afterwards must use a period that results in the same
* prescale setting as the one set by the first requested channel.
- * GPIOs do not count as enabled PWMs as they are not using the prescaler.
*/
#define PCA9685_MODE1 0x00
@@ -50,7 +49,14 @@
#define PCA9685_PRESCALE_MAX 0xFF /* => min. frequency of 24 Hz */
#define PCA9685_COUNTER_RANGE 4096
-#define PCA9685_OSC_CLOCK_MHZ 25 /* Internal oscillator with 25 MHz */
+#define PCA9685_OSC_CLOCK_HZ 25000000 /* Internal oscillator with 25 MHz */
+
+/*
+ * The time value of one counter tick. Note that NSEC_PER_SEC is an integer
+ * multiple of PCA9685_OSC_CLOCK_HZ, so there is no rounding involved and we're
+ * not loosing precision due to the early division.
+ */
+#define PCA9685_QUANTUM_NS(_prescale) ((NSEC_PER_SEC / PCA9685_OSC_CLOCK_HZ) * (_prescale + 1))
#define PCA9685_NUMREGS 0xFF
#define PCA9685_MAXCHAN 0x10
@@ -61,6 +67,8 @@
#define MODE1_SUB2 BIT(2)
#define MODE1_SUB1 BIT(3)
#define MODE1_SLEEP BIT(4)
+#define MODE1_AI BIT(5)
+
#define MODE2_INVRT BIT(4)
#define MODE2_OUTDRV BIT(2)
@@ -78,10 +86,6 @@ struct pca9685 {
struct regmap *regmap;
struct mutex lock;
DECLARE_BITMAP(pwms_enabled, PCA9685_MAXCHAN + 1);
-#if IS_ENABLED(CONFIG_GPIOLIB)
- struct gpio_chip gpio;
- DECLARE_BITMAP(pwms_inuse, PCA9685_MAXCHAN + 1);
-#endif
};
static inline struct pca9685 *to_pca(struct pwm_chip *chip)
@@ -131,355 +135,232 @@ static int pca9685_write_reg(struct pwm_chip *chip, unsigned int reg, unsigned i
return err;
}
-/* Helper function to set the duty cycle ratio to duty/4096 (e.g. duty=2048 -> 50%) */
-static void pca9685_pwm_set_duty(struct pwm_chip *chip, int channel, unsigned int duty)
+static int pca9685_write_4reg(struct pwm_chip *chip, unsigned int reg, u8 val[4])
{
- struct pwm_device *pwm = &chip->pwms[channel];
- unsigned int on, off;
-
- if (duty == 0) {
- /* Set the full OFF bit, which has the highest precedence */
- pca9685_write_reg(chip, REG_OFF_H(channel), LED_FULL);
- return;
- } else if (duty >= PCA9685_COUNTER_RANGE) {
- /* Set the full ON bit and clear the full OFF bit */
- pca9685_write_reg(chip, REG_ON_H(channel), LED_FULL);
- pca9685_write_reg(chip, REG_OFF_H(channel), 0);
- return;
- }
+ struct pca9685 *pca = to_pca(chip);
+ struct device *dev = pwmchip_parent(chip);
+ int err;
+ err = regmap_bulk_write(pca->regmap, reg, val, 4);
+ if (err)
+ dev_err(dev, "regmap_write to register 0x%x failed: %pe\n", reg, ERR_PTR(err));
- if (pwm->state.usage_power && channel < PCA9685_MAXCHAN) {
- /*
- * If usage_power is set, the pca9685 driver will phase shift
- * the individual channels relative to their channel number.
- * This improves EMI because the enabled channels no longer
- * turn on at the same time, while still maintaining the
- * configured duty cycle / power output.
- */
- on = channel * PCA9685_COUNTER_RANGE / PCA9685_MAXCHAN;
- } else
- on = 0;
-
- off = (on + duty) % PCA9685_COUNTER_RANGE;
-
- /* Set ON time (clears full ON bit) */
- pca9685_write_reg(chip, REG_ON_L(channel), on & 0xff);
- pca9685_write_reg(chip, REG_ON_H(channel), (on >> 8) & 0xf);
- /* Set OFF time (clears full OFF bit) */
- pca9685_write_reg(chip, REG_OFF_L(channel), off & 0xff);
- pca9685_write_reg(chip, REG_OFF_H(channel), (off >> 8) & 0xf);
+ return err;
}
-static unsigned int pca9685_pwm_get_duty(struct pwm_chip *chip, int channel)
+static int pca9685_set_sleep_mode(struct pwm_chip *chip, bool enable)
{
- struct pwm_device *pwm = &chip->pwms[channel];
- unsigned int off = 0, on = 0, val = 0;
-
- if (WARN_ON(channel >= PCA9685_MAXCHAN)) {
- /* HW does not support reading state of "all LEDs" channel */
- return 0;
- }
+ struct pca9685 *pca = to_pca(chip);
+ int err;
- pca9685_read_reg(chip, LED_N_OFF_H(channel), &off);
- if (off & LED_FULL) {
- /* Full OFF bit is set */
- return 0;
- }
+ err = regmap_update_bits(pca->regmap, PCA9685_MODE1,
+ MODE1_SLEEP, enable ? MODE1_SLEEP : 0);
+ if (err)
+ return err;
- pca9685_read_reg(chip, LED_N_ON_H(channel), &on);
- if (on & LED_FULL) {
- /* Full ON bit is set */
- return PCA9685_COUNTER_RANGE;
+ if (!enable) {
+ /* Wait 500us for the oscillator to be back up */
+ udelay(500);
}
- pca9685_read_reg(chip, LED_N_OFF_L(channel), &val);
- off = ((off & 0xf) << 8) | (val & 0xff);
- if (!pwm->state.usage_power)
- return off;
-
- /* Read ON register to calculate duty cycle of staggered output */
- if (pca9685_read_reg(chip, LED_N_ON_L(channel), &val)) {
- /* Reset val to 0 in case reading LED_N_ON_L failed */
- val = 0;
- }
- on = ((on & 0xf) << 8) | (val & 0xff);
- return (off - on) & (PCA9685_COUNTER_RANGE - 1);
+ return 0;
}
-#if IS_ENABLED(CONFIG_GPIOLIB)
-static bool pca9685_pwm_test_and_set_inuse(struct pca9685 *pca, int pwm_idx)
+struct pca9685_waveform {
+ u8 onoff[4];
+ u8 prescale;
+};
+
+static int pca9685_round_waveform_tohw(struct pwm_chip *chip, struct pwm_device *pwm, const struct pwm_waveform *wf, void *_wfhw)
{
- bool is_inuse;
+ struct pca9685_waveform *wfhw = _wfhw;
+ struct pca9685 *pca = to_pca(chip);
+ unsigned int best_prescale;
+ u8 prescale;
+ unsigned int period_ns, duty;
+ int ret_tohw = 0;
- mutex_lock(&pca->lock);
- if (pwm_idx >= PCA9685_MAXCHAN) {
- /*
- * "All LEDs" channel:
- * pretend already in use if any of the PWMs are requested
- */
- if (!bitmap_empty(pca->pwms_inuse, PCA9685_MAXCHAN)) {
- is_inuse = true;
- goto out;
- }
- } else {
- /*
- * Regular channel:
- * pretend already in use if the "all LEDs" channel is requested
- */
- if (test_bit(PCA9685_MAXCHAN, pca->pwms_inuse)) {
- is_inuse = true;
- goto out;
- }
+ if (!wf->period_length_ns) {
+ *wfhw = (typeof(*wfhw)){
+ .onoff = { 0, 0, 0, LED_FULL, },
+ .prescale = 0,
+ };
+
+ dev_dbg(&chip->dev, "pwm#%u: %lld/%lld [+%lld] -> [%hhx %hhx %hhx %hhx] PSC:%hhx\n",
+ pwm->hwpwm, wf->duty_length_ns, wf->period_length_ns, wf->duty_offset_ns,
+ wfhw->onoff[0], wfhw->onoff[1], wfhw->onoff[2], wfhw->onoff[3], wfhw->prescale);
+
+ return 0;
}
- is_inuse = test_and_set_bit(pwm_idx, pca->pwms_inuse);
-out:
- mutex_unlock(&pca->lock);
- return is_inuse;
-}
-static void pca9685_pwm_clear_inuse(struct pca9685 *pca, int pwm_idx)
-{
- mutex_lock(&pca->lock);
- clear_bit(pwm_idx, pca->pwms_inuse);
- mutex_unlock(&pca->lock);
-}
+ if (wf->period_length_ns >= PCA9685_COUNTER_RANGE * PCA9685_QUANTUM_NS(255)) {
+ best_prescale = 255;
+ } else if (wf->period_length_ns < PCA9685_COUNTER_RANGE * PCA9685_QUANTUM_NS(3)) {
+ best_prescale = 3;
+ ret_tohw = 1;
+ } else {
+ best_prescale = (unsigned int)wf->period_length_ns / (PCA9685_COUNTER_RANGE * (NSEC_PER_SEC / PCA9685_OSC_CLOCK_HZ)) - 1;
+ }
-static int pca9685_pwm_gpio_request(struct gpio_chip *gpio, unsigned int offset)
-{
- struct pwm_chip *chip = gpiochip_get_data(gpio);
- struct pca9685 *pca = to_pca(chip);
+ guard(mutex)(&pca->lock);
- if (pca9685_pwm_test_and_set_inuse(pca, offset))
- return -EBUSY;
- pm_runtime_get_sync(pwmchip_parent(chip));
- return 0;
-}
+ if (!pca9685_prescaler_can_change(pca, pwm->hwpwm)) {
+ unsigned int current_prescale;
+ int ret;
-static int pca9685_pwm_gpio_get(struct gpio_chip *gpio, unsigned int offset)
-{
- struct pwm_chip *chip = gpiochip_get_data(gpio);
+ ret = regmap_read(pca->regmap, PCA9685_PRESCALE, &current_prescale);
+ if (ret)
+ return ret;
- return pca9685_pwm_get_duty(chip, offset) != 0;
-}
+ if (current_prescale > best_prescale)
+ ret_tohw = 1;
-static int pca9685_pwm_gpio_set(struct gpio_chip *gpio, unsigned int offset,
- int value)
-{
- struct pwm_chip *chip = gpiochip_get_data(gpio);
+ prescale = current_prescale;
+ } else {
+ prescale = best_prescale;
+ }
- pca9685_pwm_set_duty(chip, offset, value ? PCA9685_COUNTER_RANGE : 0);
+ period_ns = PCA9685_COUNTER_RANGE * PCA9685_QUANTUM_NS(prescale);
- return 0;
-}
+ duty = (unsigned)min_t(u64, wf->duty_length_ns, period_ns) / PCA9685_QUANTUM_NS(prescale);
-static void pca9685_pwm_gpio_free(struct gpio_chip *gpio, unsigned int offset)
-{
- struct pwm_chip *chip = gpiochip_get_data(gpio);
- struct pca9685 *pca = to_pca(chip);
+ if (duty < PCA9685_COUNTER_RANGE) {
+ unsigned int on, off;
- pca9685_pwm_set_duty(chip, offset, 0);
- pm_runtime_put(pwmchip_parent(chip));
- pca9685_pwm_clear_inuse(pca, offset);
-}
+ on = (unsigned)min_t(u64, wf->duty_offset_ns, period_ns) / PCA9685_QUANTUM_NS(prescale);
+ off = (on + duty) % PCA9685_COUNTER_RANGE;
-static int pca9685_pwm_gpio_get_direction(struct gpio_chip *chip,
- unsigned int offset)
-{
- /* Always out */
- return GPIO_LINE_DIRECTION_OUT;
-}
+ /*
+ * With a zero duty cycle, it doesn't matter if period was
+ * rounded up
+ */
+ if (!duty)
+ ret_tohw = 0;
-static int pca9685_pwm_gpio_direction_input(struct gpio_chip *gpio,
- unsigned int offset)
-{
- return -EINVAL;
-}
+ *wfhw = (typeof(*wfhw)){
+ .onoff = { on & 0xff, (on >> 8) & 0xf, off & 0xff, (off >> 8) & 0xf },
+ .prescale = prescale,
+ };
+ } else {
+ *wfhw = (typeof(*wfhw)){
+ .onoff = { 0, LED_FULL, 0, 0, },
+ .prescale = prescale,
+ };
+ }
-static int pca9685_pwm_gpio_direction_output(struct gpio_chip *gpio,
- unsigned int offset, int value)
-{
- pca9685_pwm_gpio_set(gpio, offset, value);
+ dev_dbg(&chip->dev, "pwm#%u: %lld/%lld [+%lld] -> %s[%hhx %hhx %hhx %hhx] PSC:%hhx\n",
+ pwm->hwpwm, wf->duty_length_ns, wf->period_length_ns, wf->duty_offset_ns,
+ ret_tohw ? "#" : "", wfhw->onoff[0], wfhw->onoff[1], wfhw->onoff[2], wfhw->onoff[3], wfhw->prescale);
- return 0;
+ return ret_tohw;
}
-/*
- * The PCA9685 has a bit for turning the PWM output full off or on. Some
- * boards like Intel Galileo actually uses these as normal GPIOs so we
- * expose a GPIO chip here which can exclusively take over the underlying
- * PWM channel.
- */
-static int pca9685_pwm_gpio_probe(struct pwm_chip *chip)
+static int pca9685_round_waveform_fromhw(struct pwm_chip *chip, struct pwm_device *pwm,
+ const void *_wfhw, struct pwm_waveform *wf)
{
+ const struct pca9685_waveform *wfhw = _wfhw;
struct pca9685 *pca = to_pca(chip);
- struct device *dev = pwmchip_parent(chip);
+ unsigned int prescale;
- pca->gpio.label = dev_name(dev);
- pca->gpio.parent = dev;
- pca->gpio.request = pca9685_pwm_gpio_request;
- pca->gpio.free = pca9685_pwm_gpio_free;
- pca->gpio.get_direction = pca9685_pwm_gpio_get_direction;
- pca->gpio.direction_input = pca9685_pwm_gpio_direction_input;
- pca->gpio.direction_output = pca9685_pwm_gpio_direction_output;
- pca->gpio.get = pca9685_pwm_gpio_get;
- pca->gpio.set_rv = pca9685_pwm_gpio_set;
- pca->gpio.base = -1;
- pca->gpio.ngpio = PCA9685_MAXCHAN;
- pca->gpio.can_sleep = true;
-
- return devm_gpiochip_add_data(dev, &pca->gpio, chip);
-}
-#else
-static inline bool pca9685_pwm_test_and_set_inuse(struct pca9685 *pca,
- int pwm_idx)
-{
- return false;
-}
+ if (wfhw->prescale)
+ prescale = wfhw->prescale;
+ else
+ scoped_guard(mutex, &pca->lock) {
+ int ret;
-static inline void
-pca9685_pwm_clear_inuse(struct pca9685 *pca, int pwm_idx)
-{
-}
+ ret = regmap_read(pca->regmap, PCA9685_PRESCALE, &prescale);
+ if (ret)
+ return ret;
+ }
-static inline int pca9685_pwm_gpio_probe(struct pwm_chip *chip)
-{
- return 0;
-}
-#endif
+ wf->period_length_ns = PCA9685_COUNTER_RANGE * PCA9685_QUANTUM_NS(prescale);
-static void pca9685_set_sleep_mode(struct pwm_chip *chip, bool enable)
-{
- struct device *dev = pwmchip_parent(chip);
- struct pca9685 *pca = to_pca(chip);
- int err = regmap_update_bits(pca->regmap, PCA9685_MODE1,
- MODE1_SLEEP, enable ? MODE1_SLEEP : 0);
- if (err) {
- dev_err(dev, "regmap_update_bits of register 0x%x failed: %pe\n",
- PCA9685_MODE1, ERR_PTR(err));
- return;
- }
+ if (wfhw->onoff[3] & LED_FULL) {
+ wf->duty_length_ns = 0;
+ wf->duty_offset_ns = 0;
+ } else if (wfhw->onoff[1] & LED_FULL) {
+ wf->duty_length_ns = wf->period_length_ns;
+ wf->duty_offset_ns = 0;
+ } else {
+ unsigned int on = wfhw->onoff[0] | (wfhw->onoff[1] & 0xf) << 8;
+ unsigned int off = wfhw->onoff[2] | (wfhw->onoff[3] & 0xf) << 8;
- if (!enable) {
- /* Wait 500us for the oscillator to be back up */
- udelay(500);
+ wf->duty_length_ns = (off - on) % PCA9685_COUNTER_RANGE * PCA9685_QUANTUM_NS(prescale);
+ wf->duty_offset_ns = on * PCA9685_QUANTUM_NS(prescale);
}
+
+ dev_dbg(&chip->dev, "pwm#%u: [%hhx %hhx %hhx %hhx] PSC:%hhx -> %lld/%lld [+%lld]\n",
+ pwm->hwpwm,
+ wfhw->onoff[0], wfhw->onoff[1], wfhw->onoff[2], wfhw->onoff[3], wfhw->prescale,
+ wf->duty_length_ns, wf->period_length_ns, wf->duty_offset_ns);
+
+ return 0;
}
-static int __pca9685_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
- const struct pwm_state *state)
+static int pca9685_read_waveform(struct pwm_chip *chip, struct pwm_device *pwm, void *_wfhw)
{
+ struct pca9685_waveform *wfhw = _wfhw;
struct pca9685 *pca = to_pca(chip);
- unsigned long long duty, prescale;
- unsigned int val = 0;
-
- if (state->polarity != PWM_POLARITY_NORMAL)
- return -EINVAL;
-
- prescale = DIV_ROUND_CLOSEST_ULL(PCA9685_OSC_CLOCK_MHZ * state->period,
- PCA9685_COUNTER_RANGE * 1000) - 1;
- if (prescale < PCA9685_PRESCALE_MIN || prescale > PCA9685_PRESCALE_MAX) {
- dev_err(pwmchip_parent(chip), "pwm not changed: period out of bounds!\n");
- return -EINVAL;
- }
+ unsigned int prescale;
+ int ret;
- if (!state->enabled) {
- pca9685_pwm_set_duty(chip, pwm->hwpwm, 0);
- return 0;
- }
+ guard(mutex)(&pca->lock);
- pca9685_read_reg(chip, PCA9685_PRESCALE, &val);
- if (prescale != val) {
- if (!pca9685_prescaler_can_change(pca, pwm->hwpwm)) {
- dev_err(pwmchip_parent(chip),
- "pwm not changed: periods of enabled pwms must match!\n");
- return -EBUSY;
- }
+ ret = regmap_bulk_read(pca->regmap, REG_ON_L(pwm->hwpwm), &wfhw->onoff, 4);
+ if (ret)
+ return ret;
- /*
- * Putting the chip briefly into SLEEP mode
- * at this point won't interfere with the
- * pm_runtime framework, because the pm_runtime
- * state is guaranteed active here.
- */
- /* Put chip into sleep mode */
- pca9685_set_sleep_mode(chip, true);
+ ret = regmap_read(pca->regmap, PCA9685_PRESCALE, &prescale);
+ if (ret)
+ return ret;
- /* Change the chip-wide output frequency */
- pca9685_write_reg(chip, PCA9685_PRESCALE, prescale);
+ wfhw->prescale = prescale;
- /* Wake the chip up */
- pca9685_set_sleep_mode(chip, false);
- }
-
- duty = PCA9685_COUNTER_RANGE * state->duty_cycle;
- duty = DIV_ROUND_UP_ULL(duty, state->period);
- pca9685_pwm_set_duty(chip, pwm->hwpwm, duty);
return 0;
}
-static int pca9685_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
- const struct pwm_state *state)
+static int pca9685_write_waveform(struct pwm_chip *chip, struct pwm_device *pwm, const void *_wfhw)
{
+ const struct pca9685_waveform *wfhw = _wfhw;
struct pca9685 *pca = to_pca(chip);
+ unsigned int current_prescale;
int ret;
- mutex_lock(&pca->lock);
- ret = __pca9685_pwm_apply(chip, pwm, state);
- if (ret == 0) {
- if (state->enabled)
- set_bit(pwm->hwpwm, pca->pwms_enabled);
- else
- clear_bit(pwm->hwpwm, pca->pwms_enabled);
- }
- mutex_unlock(&pca->lock);
+ guard(mutex)(&pca->lock);
- return ret;
-}
+ if (wfhw->prescale) {
+ ret = regmap_read(pca->regmap, PCA9685_PRESCALE, &current_prescale);
+ if (ret)
+ return ret;
-static int pca9685_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
- struct pwm_state *state)
-{
- unsigned long long duty;
- unsigned int val = 0;
+ if (current_prescale != wfhw->prescale) {
+ if (!pca9685_prescaler_can_change(pca, pwm->hwpwm))
+ return -EBUSY;
- /* Calculate (chip-wide) period from prescale value */
- pca9685_read_reg(chip, PCA9685_PRESCALE, &val);
- /*
- * PCA9685_OSC_CLOCK_MHZ is 25, i.e. an integer divider of 1000.
- * The following calculation is therefore only a multiplication
- * and we are not losing precision.
- */
- state->period = (PCA9685_COUNTER_RANGE * 1000 / PCA9685_OSC_CLOCK_MHZ) *
- (val + 1);
+ /* Put chip into sleep mode */
+ ret = pca9685_set_sleep_mode(chip, true);
+ if (ret)
+ return ret;
- /* The (per-channel) polarity is fixed */
- state->polarity = PWM_POLARITY_NORMAL;
+ /* Change the chip-wide output frequency */
+ ret = regmap_write(pca->regmap, PCA9685_PRESCALE, wfhw->prescale);
+ if (ret)
+ return ret;
- if (pwm->hwpwm >= PCA9685_MAXCHAN) {
- /*
- * The "all LEDs" channel does not support HW readout
- * Return 0 and disabled for backwards compatibility
- */
- state->duty_cycle = 0;
- state->enabled = false;
- return 0;
+ /* Wake the chip up */
+ ret = pca9685_set_sleep_mode(chip, false);
+ if (ret)
+ return ret;
+ }
}
- state->enabled = true;
- duty = pca9685_pwm_get_duty(chip, pwm->hwpwm);
- state->duty_cycle = DIV_ROUND_DOWN_ULL(duty * state->period, PCA9685_COUNTER_RANGE);
-
- return 0;
+ return regmap_bulk_write(pca->regmap, REG_ON_L(pwm->hwpwm), &wfhw->onoff, 4);
}
static int pca9685_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct pca9685 *pca = to_pca(chip);
- if (pca9685_pwm_test_and_set_inuse(pca, pwm->hwpwm))
- return -EBUSY;
-
if (pwm->hwpwm < PCA9685_MAXCHAN) {
/* PWMs - except the "all LEDs" channel - default to enabled */
mutex_lock(&pca->lock);
@@ -497,26 +378,52 @@ static void pca9685_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
struct pca9685 *pca = to_pca(chip);
mutex_lock(&pca->lock);
- pca9685_pwm_set_duty(chip, pwm->hwpwm, 0);
clear_bit(pwm->hwpwm, pca->pwms_enabled);
mutex_unlock(&pca->lock);
pm_runtime_put(pwmchip_parent(chip));
- pca9685_pwm_clear_inuse(pca, pwm->hwpwm);
}
static const struct pwm_ops pca9685_pwm_ops = {
- .apply = pca9685_pwm_apply,
- .get_state = pca9685_pwm_get_state,
+ .sizeof_wfhw = sizeof(struct pca9685_waveform),
+ .round_waveform_tohw = pca9685_round_waveform_tohw,
+ .round_waveform_fromhw = pca9685_round_waveform_fromhw,
+ .read_waveform = pca9685_read_waveform,
+ .write_waveform = pca9685_write_waveform,
.request = pca9685_pwm_request,
.free = pca9685_pwm_free,
};
+static bool pca9685_readable_reg(struct device *dev, unsigned int reg)
+{
+ /* The ALL_LED registers are readable but read as zero */
+ return reg <= REG_OFF_H(15) || reg >= PCA9685_PRESCALE;
+}
+
+static bool pca9685_writeable_reg(struct device *dev, unsigned int reg)
+{
+ return reg <= REG_OFF_H(15) || reg >= PCA9685_ALL_LED_ON_L;
+}
+
+static bool pca9685_volatile_reg(struct device *dev, unsigned int reg)
+{
+ /*
+ * Writing to an ALL_LED register affects all LEDi registers, so they
+ * are not cachable. :-\
+ */
+ return reg < PCA9685_PRESCALE;
+}
+
static const struct regmap_config pca9685_regmap_i2c_config = {
.reg_bits = 8,
.val_bits = 8,
+
+ .readable_reg = pca9685_readable_reg,
+ .writeable_reg = pca9685_writeable_reg,
+ .volatile_reg = pca9685_volatile_reg,
+
.max_register = PCA9685_NUMREGS,
- .cache_type = REGCACHE_NONE,
+ .cache_type = REGCACHE_MAPLE,
};
static int pca9685_pwm_probe(struct i2c_client *client)
@@ -544,9 +451,8 @@ static int pca9685_pwm_probe(struct i2c_client *client)
mutex_init(&pca->lock);
- ret = pca9685_read_reg(chip, PCA9685_MODE2, &reg);
- if (ret)
- return ret;
+ /* clear MODE2_OCH */
+ reg = 0;
if (device_property_read_bool(&client->dev, "invert"))
reg |= MODE2_INVRT;
@@ -562,16 +468,19 @@ static int pca9685_pwm_probe(struct i2c_client *client)
if (ret)
return ret;
- /* Disable all LED ALLCALL and SUBx addresses to avoid bus collisions */
+ /*
+ * Disable all LED ALLCALL and SUBx addresses to avoid bus collisions,
+ * enable Auto-Increment.
+ */
pca9685_read_reg(chip, PCA9685_MODE1, &reg);
reg &= ~(MODE1_ALLCALL | MODE1_SUB1 | MODE1_SUB2 | MODE1_SUB3);
+ reg |= MODE1_AI;
pca9685_write_reg(chip, PCA9685_MODE1, reg);
/* Reset OFF/ON registers to POR default */
- pca9685_write_reg(chip, PCA9685_ALL_LED_OFF_L, 0);
- pca9685_write_reg(chip, PCA9685_ALL_LED_OFF_H, LED_FULL);
- pca9685_write_reg(chip, PCA9685_ALL_LED_ON_L, 0);
- pca9685_write_reg(chip, PCA9685_ALL_LED_ON_H, LED_FULL);
+ ret = pca9685_write_4reg(chip, PCA9685_ALL_LED_ON_L, (u8[]){ 0, LED_FULL, 0, LED_FULL });
+ if (ret < 0)
+ return dev_err_probe(&client->dev, ret, "Failed to reset ON/OFF registers\n");
chip->ops = &pca9685_pwm_ops;
@@ -579,12 +488,6 @@ static int pca9685_pwm_probe(struct i2c_client *client)
if (ret < 0)
return ret;
- ret = pca9685_pwm_gpio_probe(chip);
- if (ret < 0) {
- pwmchip_remove(chip);
- return ret;
- }
-
pm_runtime_enable(&client->dev);
if (pm_runtime_enabled(&client->dev)) {
diff --git a/drivers/pwm/pwm-tiecap.c b/drivers/pwm/pwm-tiecap.c
index d91b2bdc88fc..67cc5e8bdb0e 100644
--- a/drivers/pwm/pwm-tiecap.c
+++ b/drivers/pwm/pwm-tiecap.c
@@ -3,6 +3,10 @@
* ECAP PWM driver
*
* Copyright (C) 2012 Texas Instruments, Inc. - https://www.ti.com/
+ *
+ * Hardware properties:
+ * - On disable the PWM pin becomes an input, so the behaviour depends on
+ * external wiring.
*/
#include <linux/module.h>
diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c
index 0125e73b98df..7a86cb090f76 100644
--- a/drivers/pwm/pwm-tiehrpwm.c
+++ b/drivers/pwm/pwm-tiehrpwm.c
@@ -36,7 +36,7 @@
#define CLKDIV_MAX 7
#define HSPCLKDIV_MAX 7
-#define PERIOD_MAX 0xFFFF
+#define PERIOD_MAX 0x10000
/* compare module registers */
#define CMPA 0x12
@@ -65,14 +65,10 @@
#define AQCTL_ZRO_FRCHIGH BIT(1)
#define AQCTL_ZRO_FRCTOGGLE (BIT(1) | BIT(0))
-#define AQCTL_CHANA_POLNORMAL (AQCTL_CAU_FRCLOW | AQCTL_PRD_FRCHIGH | \
- AQCTL_ZRO_FRCHIGH)
-#define AQCTL_CHANA_POLINVERSED (AQCTL_CAU_FRCHIGH | AQCTL_PRD_FRCLOW | \
- AQCTL_ZRO_FRCLOW)
-#define AQCTL_CHANB_POLNORMAL (AQCTL_CBU_FRCLOW | AQCTL_PRD_FRCHIGH | \
- AQCTL_ZRO_FRCHIGH)
-#define AQCTL_CHANB_POLINVERSED (AQCTL_CBU_FRCHIGH | AQCTL_PRD_FRCLOW | \
- AQCTL_ZRO_FRCLOW)
+#define AQCTL_CHANA_POLNORMAL (AQCTL_CAU_FRCLOW | AQCTL_ZRO_FRCHIGH)
+#define AQCTL_CHANA_POLINVERSED (AQCTL_CAU_FRCHIGH | AQCTL_ZRO_FRCLOW)
+#define AQCTL_CHANB_POLNORMAL (AQCTL_CBU_FRCLOW | AQCTL_ZRO_FRCHIGH)
+#define AQCTL_CHANB_POLINVERSED (AQCTL_CBU_FRCHIGH | AQCTL_ZRO_FRCLOW)
#define AQSFRC_RLDCSF_MASK (BIT(7) | BIT(6))
#define AQSFRC_RLDCSF_ZRO 0
@@ -108,7 +104,6 @@ struct ehrpwm_pwm_chip {
unsigned long clk_rate;
void __iomem *mmio_base;
unsigned long period_cycles[NUM_PWM_CHANNEL];
- enum pwm_polarity polarity[NUM_PWM_CHANNEL];
struct clk *tbclk;
struct ehrpwm_context ctx;
};
@@ -166,7 +161,7 @@ static int set_prescale_div(unsigned long rqst_prescaler, u16 *prescale_div,
*prescale_div = (1 << clkdiv) *
(hspclkdiv ? (hspclkdiv * 2) : 1);
- if (*prescale_div > rqst_prescaler) {
+ if (*prescale_div >= rqst_prescaler) {
*tb_clk_div = (clkdiv << TBCTL_CLKDIV_SHIFT) |
(hspclkdiv << TBCTL_HSPCLKDIV_SHIFT);
return 0;
@@ -177,51 +172,20 @@ static int set_prescale_div(unsigned long rqst_prescaler, u16 *prescale_div,
return 1;
}
-static void configure_polarity(struct ehrpwm_pwm_chip *pc, int chan)
-{
- u16 aqctl_val, aqctl_mask;
- unsigned int aqctl_reg;
-
- /*
- * Configure PWM output to HIGH/LOW level on counter
- * reaches compare register value and LOW/HIGH level
- * on counter value reaches period register value and
- * zero value on counter
- */
- if (chan == 1) {
- aqctl_reg = AQCTLB;
- aqctl_mask = AQCTL_CBU_MASK;
-
- if (pc->polarity[chan] == PWM_POLARITY_INVERSED)
- aqctl_val = AQCTL_CHANB_POLINVERSED;
- else
- aqctl_val = AQCTL_CHANB_POLNORMAL;
- } else {
- aqctl_reg = AQCTLA;
- aqctl_mask = AQCTL_CAU_MASK;
-
- if (pc->polarity[chan] == PWM_POLARITY_INVERSED)
- aqctl_val = AQCTL_CHANA_POLINVERSED;
- else
- aqctl_val = AQCTL_CHANA_POLNORMAL;
- }
-
- aqctl_mask |= AQCTL_PRD_MASK | AQCTL_ZRO_MASK;
- ehrpwm_modify(pc->mmio_base, aqctl_reg, aqctl_mask, aqctl_val);
-}
-
/*
* period_ns = 10^9 * (ps_divval * period_cycles) / PWM_CLK_RATE
* duty_ns = 10^9 * (ps_divval * duty_cycles) / PWM_CLK_RATE
*/
static int ehrpwm_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
- u64 duty_ns, u64 period_ns)
+ u64 duty_ns, u64 period_ns, enum pwm_polarity polarity)
{
struct ehrpwm_pwm_chip *pc = to_ehrpwm_pwm_chip(chip);
u32 period_cycles, duty_cycles;
u16 ps_divval, tb_divval;
unsigned int i, cmp_reg;
unsigned long long c;
+ u16 aqctl_val, aqctl_mask;
+ unsigned int aqctl_reg;
if (period_ns > NSEC_PER_SEC)
return -ERANGE;
@@ -231,15 +195,10 @@ static int ehrpwm_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
do_div(c, NSEC_PER_SEC);
period_cycles = (unsigned long)c;
- if (period_cycles < 1) {
- period_cycles = 1;
- duty_cycles = 1;
- } else {
- c = pc->clk_rate;
- c = c * duty_ns;
- do_div(c, NSEC_PER_SEC);
- duty_cycles = (unsigned long)c;
- }
+ c = pc->clk_rate;
+ c = c * duty_ns;
+ do_div(c, NSEC_PER_SEC);
+ duty_cycles = (unsigned long)c;
/*
* Period values should be same for multiple PWM channels as IP uses
@@ -265,52 +224,73 @@ static int ehrpwm_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
pc->period_cycles[pwm->hwpwm] = period_cycles;
/* Configure clock prescaler to support Low frequency PWM wave */
- if (set_prescale_div(period_cycles/PERIOD_MAX, &ps_divval,
+ if (set_prescale_div(DIV_ROUND_UP(period_cycles, PERIOD_MAX), &ps_divval,
&tb_divval)) {
dev_err(pwmchip_parent(chip), "Unsupported values\n");
return -EINVAL;
}
- pm_runtime_get_sync(pwmchip_parent(chip));
-
- /* Update clock prescaler values */
- ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_CLKDIV_MASK, tb_divval);
-
/* Update period & duty cycle with presacler division */
period_cycles = period_cycles / ps_divval;
duty_cycles = duty_cycles / ps_divval;
- /* Configure shadow loading on Period register */
- ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_PRDLD_MASK, TBCTL_PRDLD_SHDW);
+ if (period_cycles < 1)
+ period_cycles = 1;
- ehrpwm_write(pc->mmio_base, TBPRD, period_cycles);
+ pm_runtime_get_sync(pwmchip_parent(chip));
- /* Configure ehrpwm counter for up-count mode */
- ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_CTRMODE_MASK,
- TBCTL_CTRMODE_UP);
+ /* Update clock prescaler values */
+ ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_CLKDIV_MASK, tb_divval);
- if (pwm->hwpwm == 1)
+ if (pwm->hwpwm == 1) {
/* Channel 1 configured with compare B register */
cmp_reg = CMPB;
- else
+
+ aqctl_reg = AQCTLB;
+ aqctl_mask = AQCTL_CBU_MASK;
+
+ if (polarity == PWM_POLARITY_INVERSED)
+ aqctl_val = AQCTL_CHANB_POLINVERSED;
+ else
+ aqctl_val = AQCTL_CHANB_POLNORMAL;
+
+ /* if duty_cycle is big, don't toggle on CBU */
+ if (duty_cycles > period_cycles)
+ aqctl_val &= ~AQCTL_CBU_MASK;
+
+ } else {
/* Channel 0 configured with compare A register */
cmp_reg = CMPA;
- ehrpwm_write(pc->mmio_base, cmp_reg, duty_cycles);
+ aqctl_reg = AQCTLA;
+ aqctl_mask = AQCTL_CAU_MASK;
- pm_runtime_put_sync(pwmchip_parent(chip));
+ if (polarity == PWM_POLARITY_INVERSED)
+ aqctl_val = AQCTL_CHANA_POLINVERSED;
+ else
+ aqctl_val = AQCTL_CHANA_POLNORMAL;
- return 0;
-}
+ /* if duty_cycle is big, don't toggle on CAU */
+ if (duty_cycles > period_cycles)
+ aqctl_val &= ~AQCTL_CAU_MASK;
+ }
-static int ehrpwm_pwm_set_polarity(struct pwm_chip *chip,
- struct pwm_device *pwm,
- enum pwm_polarity polarity)
-{
- struct ehrpwm_pwm_chip *pc = to_ehrpwm_pwm_chip(chip);
+ aqctl_mask |= AQCTL_PRD_MASK | AQCTL_ZRO_MASK;
+ ehrpwm_modify(pc->mmio_base, aqctl_reg, aqctl_mask, aqctl_val);
+
+ /* Configure shadow loading on Period register */
+ ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_PRDLD_MASK, TBCTL_PRDLD_SHDW);
+
+ ehrpwm_write(pc->mmio_base, TBPRD, period_cycles - 1);
- /* Configuration of polarity in hardware delayed, do at enable */
- pc->polarity[pwm->hwpwm] = polarity;
+ /* Configure ehrpwm counter for up-count mode */
+ ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_CTRMODE_MASK,
+ TBCTL_CTRMODE_UP);
+
+ if (!(duty_cycles > period_cycles))
+ ehrpwm_write(pc->mmio_base, cmp_reg, duty_cycles);
+
+ pm_runtime_put_sync(pwmchip_parent(chip));
return 0;
}
@@ -339,9 +319,6 @@ static int ehrpwm_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
ehrpwm_modify(pc->mmio_base, AQCSFRC, aqcsfrc_mask, aqcsfrc_val);
- /* Channels polarity can be configured from action qualifier module */
- configure_polarity(pc, pwm->hwpwm);
-
/* Enable TBCLK */
ret = clk_enable(pc->tbclk);
if (ret) {
@@ -391,12 +368,7 @@ static void ehrpwm_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct ehrpwm_pwm_chip *pc = to_ehrpwm_pwm_chip(chip);
- if (pwm_is_enabled(pwm)) {
- dev_warn(pwmchip_parent(chip), "Removing PWM device without disabling\n");
- pm_runtime_put_sync(pwmchip_parent(chip));
- }
-
- /* set period value to zero on free */
+ /* Don't let a pwm without consumer block requests to the other channel */
pc->period_cycles[pwm->hwpwm] = 0;
}
@@ -411,10 +383,6 @@ static int ehrpwm_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
ehrpwm_pwm_disable(chip, pwm);
enabled = false;
}
-
- err = ehrpwm_pwm_set_polarity(chip, pwm, state->polarity);
- if (err)
- return err;
}
if (!state->enabled) {
@@ -423,7 +391,7 @@ static int ehrpwm_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
return 0;
}
- err = ehrpwm_pwm_config(chip, pwm, state->duty_cycle, state->period);
+ err = ehrpwm_pwm_config(chip, pwm, state->duty_cycle, state->period, state->polarity);
if (err)
return err;
diff --git a/drivers/ras/ras.c b/drivers/ras/ras.c
index a6e4792a1b2e..ac0e132ccc3e 100644
--- a/drivers/ras/ras.c
+++ b/drivers/ras/ras.c
@@ -51,6 +51,7 @@ void log_non_standard_event(const guid_t *sec_type, const guid_t *fru_id,
{
trace_non_standard_event(sec_type, fru_id, fru_text, sev, err, len);
}
+EXPORT_SYMBOL_GPL(log_non_standard_event);
void log_arm_hw_error(struct cper_sec_proc_arm *err)
{
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index eaa6df1c9f80..d84f3d054c59 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -297,6 +297,14 @@ config REGULATOR_BD96801
This driver can also be built as a module. If so, the module
will be called bd96801-regulator.
+config REGULATOR_BQ257XX
+ tristate "TI BQ257XX regulator family"
+ depends on MFD_BQ257XX
+ depends on GPIOLIB || COMPILE_TEST
+ help
+ Say Y to enable support for the boost regulator function of
+ the BQ257XX family of charger circuits.
+
config REGULATOR_CPCAP
tristate "Motorola CPCAP regulator"
depends on MFD_CPCAP
@@ -777,6 +785,15 @@ config REGULATOR_MAX77826
It includes support for control of output voltage. This
regulator is found on the Samsung Galaxy S5 (klte) smartphone.
+config REGULATOR_MAX77838
+ tristate "Maxim 77838 regulator"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ This driver controls a Maxim 77838 regulator via I2C bus.
+ The regulator include 4 LDOs and a BUCK regulator. It's
+ present on the Samsung Galaxy S7 lineup of smartphones.
+
config REGULATOR_MC13XXX_CORE
tristate
@@ -1006,6 +1023,26 @@ config REGULATOR_PCAP
This driver provides support for the voltage regulators of the
PCAP2 PMIC.
+config REGULATOR_PF0900
+ tristate "NXP PF0900/PF0901/PF09XX regulator driver"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Say y here to support the NXP PF0900/PF0901/PF09XX PMIC
+ regulator driver.
+
+config REGULATOR_PF530X
+ tristate "NXP PF5300/PF5301/PF5302 regulator driver"
+ depends on I2C && OF
+ select REGMAP_I2C
+ help
+ Say y here to support the regulators found on the NXP
+ PF5300/PF5301/PF5302 PMIC.
+
+ Say M here if you want to support for the regulators found
+ on the NXP PF5300/PF5301/PF5302 PMIC. The module will be named
+ "pf530x-regulator".
+
config REGULATOR_PF8X00
tristate "NXP PF8100/PF8121A/PF8200 regulator driver"
depends on I2C && OF
@@ -1240,6 +1277,18 @@ config REGULATOR_RT5120
600mV to 1395mV, per step 6.250mV. The others are all fixed voltage
by external hardware circuit.
+config REGULATOR_RT5133
+ tristate "Richtek RT5133 PMIC Regulators"
+ depends on I2C && GPIOLIB && OF
+ select REGMAP
+ select CRC8
+ select OF_GPIO
+ help
+ This driver adds support for RT5133 PMIC regulators.
+ RT5133 is an integrated chip. It includes 8 LDOs and 3 GPOs that
+ can be used to drive output high/low purpose. The dependency of the
+ GPO block is internally LDO1 Voltage.
+
config REGULATOR_RT5190A
tristate "Richtek RT5190A PMIC"
depends on I2C
@@ -1344,6 +1393,14 @@ config REGULATOR_RTQ2208
and two ldos. It features wide output voltage range from 0.4V to 2.05V
and the capability to configure the corresponding power stages.
+config REGULATOR_S2DOS05
+ tristate "Samsung S2DOS05 voltage regulator"
+ depends on MFD_SEC_CORE || COMPILE_TEST
+ help
+ This driver provides support for the voltage regulators of the S2DOS05.
+ The S2DOS05 is a companion power management IC for the smart phones.
+ The S2DOS05 has 4 LDOs and 1 BUCK outputs.
+
config REGULATOR_S2MPA01
tristate "Samsung S2MPA01 voltage regulator"
depends on MFD_SEC_CORE || COMPILE_TEST
@@ -1395,6 +1452,19 @@ config REGULATOR_SLG51000
The SLG51000 is seven compact and customizable low dropout
regulators.
+config REGULATOR_SPACEMIT_P1
+ tristate "SpacemiT P1 regulators"
+ depends on ARCH_SPACEMIT || COMPILE_TEST
+ depends on I2C
+ select MFD_SPACEMIT_P1
+ default ARCH_SPACEMIT
+ help
+ Enable support for regulators implemented by the SpacemiT P1
+ power controller. The P1 implements 6 high-efficiency buck
+ converters and 12 programmable LDO regulators. To compile this
+ driver as a module, choose M here. The module will be called
+ "spacemit-pmic".
+
config REGULATOR_STM32_BOOSTER
tristate "STMicroelectronics STM32 BOOSTER"
depends on ARCH_STM32 || COMPILE_TEST
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index be98b29d6675..b3101376029d 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -38,6 +38,7 @@ obj-$(CONFIG_REGULATOR_BD71828) += bd71828-regulator.o
obj-$(CONFIG_REGULATOR_BD718XX) += bd718x7-regulator.o
obj-$(CONFIG_REGULATOR_BD9571MWV) += bd9571mwv-regulator.o
obj-$(CONFIG_REGULATOR_BD957XMUF) += bd9576-regulator.o
+obj-$(CONFIG_REGULATOR_BQ257XX) += bq257xx-regulator.o
obj-$(CONFIG_REGULATOR_DA903X) += da903x-regulator.o
obj-$(CONFIG_REGULATOR_BD96801) += bd96801-regulator.o
obj-$(CONFIG_REGULATOR_DA9052) += da9052-regulator.o
@@ -92,6 +93,7 @@ obj-$(CONFIG_REGULATOR_MAX77686) += max77686-regulator.o
obj-$(CONFIG_REGULATOR_MAX77693) += max77693-regulator.o
obj-$(CONFIG_REGULATOR_MAX77802) += max77802-regulator.o
obj-$(CONFIG_REGULATOR_MAX77826) += max77826-regulator.o
+obj-$(CONFIG_REGULATOR_MAX77838) += max77838-regulator.o
obj-$(CONFIG_REGULATOR_MAX77857) += max77857-regulator.o
obj-$(CONFIG_REGULATOR_MC13783) += mc13783-regulator.o
obj-$(CONFIG_REGULATOR_MC13892) += mc13892-regulator.o
@@ -124,7 +126,9 @@ obj-$(CONFIG_REGULATOR_QCOM_SPMI) += qcom_spmi-regulator.o
obj-$(CONFIG_REGULATOR_QCOM_USB_VBUS) += qcom_usb_vbus-regulator.o
obj-$(CONFIG_REGULATOR_PALMAS) += palmas-regulator.o
obj-$(CONFIG_REGULATOR_PCA9450) += pca9450-regulator.o
+obj-$(CONFIG_REGULATOR_PF0900) += pf0900-regulator.o
obj-$(CONFIG_REGULATOR_PF9453) += pf9453-regulator.o
+obj-$(CONFIG_REGULATOR_PF530X) += pf530x-regulator.o
obj-$(CONFIG_REGULATOR_PF8X00) += pf8x00-regulator.o
obj-$(CONFIG_REGULATOR_PFUZE100) += pfuze100-regulator.o
obj-$(CONFIG_REGULATOR_PV88060) += pv88060-regulator.o
@@ -146,6 +150,7 @@ obj-$(CONFIG_REGULATOR_RT4803) += rt4803.o
obj-$(CONFIG_REGULATOR_RT4831) += rt4831-regulator.o
obj-$(CONFIG_REGULATOR_RT5033) += rt5033-regulator.o
obj-$(CONFIG_REGULATOR_RT5120) += rt5120-regulator.o
+obj-$(CONFIG_REGULATOR_RT5133) += rt5133-regulator.o
obj-$(CONFIG_REGULATOR_RT5190A) += rt5190a-regulator.o
obj-$(CONFIG_REGULATOR_RT5739) += rt5739.o
obj-$(CONFIG_REGULATOR_RT5759) += rt5759-regulator.o
@@ -156,12 +161,14 @@ obj-$(CONFIG_REGULATOR_RTMV20) += rtmv20-regulator.o
obj-$(CONFIG_REGULATOR_RTQ2134) += rtq2134-regulator.o
obj-$(CONFIG_REGULATOR_RTQ6752) += rtq6752-regulator.o
obj-$(CONFIG_REGULATOR_RTQ2208) += rtq2208-regulator.o
+obj-$(CONFIG_REGULATOR_S2DOS05) += s2dos05-regulator.o
obj-$(CONFIG_REGULATOR_S2MPA01) += s2mpa01.o
obj-$(CONFIG_REGULATOR_S2MPS11) += s2mps11.o
obj-$(CONFIG_REGULATOR_S5M8767) += s5m8767.o
obj-$(CONFIG_REGULATOR_SC2731) += sc2731-regulator.o
obj-$(CONFIG_REGULATOR_SKY81452) += sky81452-regulator.o
obj-$(CONFIG_REGULATOR_SLG51000) += slg51000-regulator.o
+obj-$(CONFIG_REGULATOR_SPACEMIT_P1) += spacemit-p1.o
obj-$(CONFIG_REGULATOR_STM32_BOOSTER) += stm32-booster.o
obj-$(CONFIG_REGULATOR_STM32_VREFBUF) += stm32-vrefbuf.o
obj-$(CONFIG_REGULATOR_STM32_PWR) += stm32-pwr.o
diff --git a/drivers/regulator/bd718x7-regulator.c b/drivers/regulator/bd718x7-regulator.c
index e803cc59d68a..022d98f3c32a 100644
--- a/drivers/regulator/bd718x7-regulator.c
+++ b/drivers/regulator/bd718x7-regulator.c
@@ -1598,7 +1598,7 @@ static int setup_feedback_loop(struct device *dev, struct device_node *np,
if (desc->n_linear_ranges && desc->linear_ranges) {
struct linear_range *new;
- new = devm_kzalloc(dev, desc->n_linear_ranges *
+ new = devm_kcalloc(dev, desc->n_linear_ranges,
sizeof(struct linear_range),
GFP_KERNEL);
if (!new)
diff --git a/drivers/regulator/bq257xx-regulator.c b/drivers/regulator/bq257xx-regulator.c
new file mode 100644
index 000000000000..fc1ccede4468
--- /dev/null
+++ b/drivers/regulator/bq257xx-regulator.c
@@ -0,0 +1,186 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * BQ257XX Battery Charger Driver
+ * Copyright (C) 2025 Chris Morgan <macromorgan@hotmail.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/mfd/bq257xx.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+
+struct bq257xx_reg_data {
+ struct bq257xx_device *bq;
+ struct regulator_dev *bq257xx_reg;
+ struct gpio_desc *otg_en_gpio;
+ struct regulator_desc desc;
+};
+
+static int bq25703_vbus_get_cur_limit(struct regulator_dev *rdev)
+{
+ struct bq257xx_reg_data *pdata = rdev_get_drvdata(rdev);
+ int ret;
+ unsigned int reg;
+
+ ret = regmap_read(pdata->bq->regmap, BQ25703_OTG_CURRENT, &reg);
+ if (ret)
+ return ret;
+ return FIELD_GET(BQ25703_OTG_CUR_MASK, reg) * BQ25703_OTG_CUR_STEP_UA;
+}
+
+/*
+ * Check if the minimum current and maximum current requested are
+ * sane values, then set the register accordingly.
+ */
+static int bq25703_vbus_set_cur_limit(struct regulator_dev *rdev,
+ int min_uA, int max_uA)
+{
+ struct bq257xx_reg_data *pdata = rdev_get_drvdata(rdev);
+ unsigned int reg;
+
+ if ((min_uA > BQ25703_OTG_CUR_MAX_UA) || (max_uA < 0))
+ return -EINVAL;
+
+ reg = (max_uA / BQ25703_OTG_CUR_STEP_UA);
+
+ /* Catch rounding errors since our step is 50000uA. */
+ if ((reg * BQ25703_OTG_CUR_STEP_UA) < min_uA)
+ return -EINVAL;
+
+ return regmap_write(pdata->bq->regmap, BQ25703_OTG_CURRENT,
+ FIELD_PREP(BQ25703_OTG_CUR_MASK, reg));
+}
+
+static int bq25703_vbus_enable(struct regulator_dev *rdev)
+{
+ struct bq257xx_reg_data *pdata = rdev_get_drvdata(rdev);
+
+ if (pdata->otg_en_gpio)
+ gpiod_set_value_cansleep(pdata->otg_en_gpio, 1);
+ return regulator_enable_regmap(rdev);
+}
+
+static int bq25703_vbus_disable(struct regulator_dev *rdev)
+{
+ struct bq257xx_reg_data *pdata = rdev_get_drvdata(rdev);
+
+ if (pdata->otg_en_gpio)
+ gpiod_set_value_cansleep(pdata->otg_en_gpio, 0);
+ return regulator_disable_regmap(rdev);
+}
+
+static const struct regulator_ops bq25703_vbus_ops = {
+ .enable = bq25703_vbus_enable,
+ .disable = bq25703_vbus_disable,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_current_limit = bq25703_vbus_get_cur_limit,
+ .set_current_limit = bq25703_vbus_set_cur_limit,
+};
+
+static const struct regulator_desc bq25703_vbus_desc = {
+ .name = "vbus",
+ .of_match = of_match_ptr("vbus"),
+ .regulators_node = of_match_ptr("regulators"),
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .ops = &bq25703_vbus_ops,
+ .min_uV = BQ25703_OTG_VOLT_MIN_UV,
+ .uV_step = BQ25703_OTG_VOLT_STEP_UV,
+ .n_voltages = BQ25703_OTG_VOLT_NUM_VOLT,
+ .enable_mask = BQ25703_EN_OTG_MASK,
+ .enable_reg = BQ25703_CHARGE_OPTION_3,
+ .enable_val = BQ25703_EN_OTG_MASK,
+ .disable_val = 0,
+ .vsel_reg = BQ25703_OTG_VOLT,
+ .vsel_mask = BQ25703_OTG_VOLT_MASK,
+};
+
+/* Get optional GPIO for OTG regulator enable. */
+static void bq257xx_reg_dt_parse_gpio(struct platform_device *pdev)
+{
+ struct device_node *child, *subchild;
+ struct bq257xx_reg_data *pdata = platform_get_drvdata(pdev);
+
+ child = of_get_child_by_name(pdev->dev.of_node,
+ pdata->desc.regulators_node);
+ if (!child)
+ return;
+
+ subchild = of_get_child_by_name(child, pdata->desc.of_match);
+ if (!subchild)
+ return;
+
+ of_node_put(child);
+
+ pdata->otg_en_gpio = devm_fwnode_gpiod_get_index(&pdev->dev,
+ of_fwnode_handle(subchild),
+ "enable", 0,
+ GPIOD_OUT_LOW,
+ pdata->desc.of_match);
+
+ of_node_put(subchild);
+
+ if (IS_ERR(pdata->otg_en_gpio)) {
+ dev_err(&pdev->dev, "Error getting enable gpio: %ld\n",
+ PTR_ERR(pdata->otg_en_gpio));
+ return;
+ }
+}
+
+static int bq257xx_regulator_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct bq257xx_device *bq = dev_get_drvdata(pdev->dev.parent);
+ struct bq257xx_reg_data *pdata;
+ struct device_node *np = dev->of_node;
+ struct regulator_config cfg = {};
+
+ pdev->dev.of_node = pdev->dev.parent->of_node;
+ pdev->dev.of_node_reused = true;
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(struct bq257xx_reg_data), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ pdata->bq = bq;
+ pdata->desc = bq25703_vbus_desc;
+
+ platform_set_drvdata(pdev, pdata);
+ bq257xx_reg_dt_parse_gpio(pdev);
+
+ cfg.dev = &pdev->dev;
+ cfg.driver_data = pdata;
+ cfg.of_node = np;
+ cfg.regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!cfg.regmap)
+ return -ENODEV;
+
+ pdata->bq257xx_reg = devm_regulator_register(dev, &pdata->desc, &cfg);
+ if (IS_ERR(pdata->bq257xx_reg)) {
+ return dev_err_probe(&pdev->dev, PTR_ERR(pdata->bq257xx_reg),
+ "error registering bq257xx regulator");
+ }
+
+ return 0;
+}
+
+static struct platform_driver bq257xx_reg_driver = {
+ .driver = {
+ .name = "bq257xx-regulator",
+ },
+ .probe = bq257xx_regulator_probe,
+};
+
+module_platform_driver(bq257xx_reg_driver);
+
+MODULE_DESCRIPTION("bq257xx regulator driver");
+MODULE_AUTHOR("Chris Morgan <macromorgan@hotmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 8ed9b96518cf..dd7b10e768c0 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1586,8 +1586,8 @@ static int set_machine_constraints(struct regulator_dev *rdev)
}
if (rdev->constraints->active_discharge && ops->set_active_discharge) {
- bool ad_state = (rdev->constraints->active_discharge ==
- REGULATOR_ACTIVE_DISCHARGE_ENABLE) ? true : false;
+ bool ad_state = rdev->constraints->active_discharge ==
+ REGULATOR_ACTIVE_DISCHARGE_ENABLE;
ret = ops->set_active_discharge(rdev, ad_state);
if (ret < 0) {
@@ -3884,7 +3884,7 @@ static int regulator_set_voltage_unlocked(struct regulator *regulator,
new_delta = ret;
/* check that voltage is converging quickly enough */
- if (new_delta - delta > rdev->constraints->max_uV_step) {
+ if (delta - new_delta < rdev->constraints->max_uV_step) {
ret = -EWOULDBLOCK;
goto out;
}
diff --git a/drivers/regulator/max77838-regulator.c b/drivers/regulator/max77838-regulator.c
new file mode 100644
index 000000000000..9faddbfd25fd
--- /dev/null
+++ b/drivers/regulator/max77838-regulator.c
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+//
+// regulator driver for Maxim MAX77838
+//
+// based on max77826-regulator.c
+//
+// Copyright (c) 2025, Ivaylo Ivanov <ivo.ivanov.ivanov1@gmail.com>
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+
+enum max77838_registers {
+ MAX77838_REG_DEVICE_ID = 0x00,
+ MAX77838_REG_TOPSYS_STAT,
+ MAX77838_REG_STAT,
+ MAX77838_REG_EN,
+ MAX77838_REG_GPIO_PD_CTRL,
+ MAX77838_REG_UVLO_CFG1,
+ /* 0x06 - 0x0B: reserved */
+ MAX77838_REG_I2C_CFG = 0x0C,
+ /* 0x0D - 0x0F: reserved */
+ MAX77838_REG_LDO1_CFG = 0x10,
+ MAX77838_REG_LDO2_CFG,
+ MAX77838_REG_LDO3_CFG,
+ MAX77838_REG_LDO4_CFG,
+ /* 0x14 - 0x1F: reserved */
+ MAX77838_REG_BUCK_CFG1 = 0x20,
+ MAX77838_REG_BUCK_VOUT,
+};
+
+enum max77838_regulators {
+ MAX77838_LDO1 = 0,
+ MAX77838_LDO2,
+ MAX77838_LDO3,
+ MAX77838_LDO4,
+ MAX77838_BUCK,
+ MAX77838_MAX_REGULATORS,
+};
+
+#define MAX77838_MASK_LDO 0x7f
+#define MAX77838_MASK_BUCK 0xff
+
+#define MAX77838_LDO1_EN BIT(0)
+#define MAX77838_LDO2_EN BIT(1)
+#define MAX77838_LDO3_EN BIT(2)
+#define MAX77838_LDO4_EN BIT(3)
+#define MAX77838_BUCK_EN BIT(4)
+
+#define MAX77838_BUCK_AD BIT(3)
+#define MAX77838_LDO_AD BIT(7)
+
+#define MAX77838_LDO_VOLT_MIN 600000
+#define MAX77838_LDO_VOLT_MAX 3775000
+#define MAX77838_LDO_VOLT_STEP 25000
+
+#define MAX77838_BUCK_VOLT_MIN 500000
+#define MAX77838_BUCK_VOLT_MAX 2093750
+#define MAX77838_BUCK_VOLT_STEP 6250
+
+#define MAX77838_VOLT_RANGE(_type) \
+ ((MAX77838_ ## _type ## _VOLT_MAX - \
+ MAX77838_ ## _type ## _VOLT_MIN) / \
+ MAX77838_ ## _type ## _VOLT_STEP + 1)
+
+#define MAX77838_LDO(_id) \
+ [MAX77838_LDO ## _id] = { \
+ .id = MAX77838_LDO ## _id, \
+ .name = "ldo"#_id, \
+ .of_match = of_match_ptr("ldo"#_id), \
+ .regulators_node = "regulators", \
+ .ops = &max77838_regulator_ops, \
+ .min_uV = MAX77838_LDO_VOLT_MIN, \
+ .uV_step = MAX77838_LDO_VOLT_STEP, \
+ .n_voltages = MAX77838_VOLT_RANGE(LDO), \
+ .enable_reg = MAX77838_REG_EN, \
+ .enable_mask = MAX77838_LDO ## _id ## _EN, \
+ .vsel_reg = MAX77838_REG_LDO ## _id ## _CFG, \
+ .vsel_mask = MAX77838_MASK_LDO, \
+ .active_discharge_off = 0, \
+ .active_discharge_on = MAX77838_LDO_AD, \
+ .active_discharge_mask = MAX77838_LDO_AD, \
+ .active_discharge_reg = MAX77838_REG_LDO ## _id ## _CFG, \
+ .owner = THIS_MODULE, \
+ }
+
+#define MAX77838_BUCK_DESC \
+ [MAX77838_BUCK] = { \
+ .id = MAX77838_BUCK, \
+ .name = "buck", \
+ .of_match = of_match_ptr("buck"), \
+ .regulators_node = "regulators", \
+ .ops = &max77838_regulator_ops, \
+ .min_uV = MAX77838_BUCK_VOLT_MIN, \
+ .uV_step = MAX77838_BUCK_VOLT_STEP, \
+ .n_voltages = MAX77838_VOLT_RANGE(BUCK), \
+ .enable_reg = MAX77838_REG_EN, \
+ .enable_mask = MAX77838_BUCK_EN, \
+ .vsel_reg = MAX77838_REG_BUCK_VOUT, \
+ .vsel_mask = MAX77838_MASK_BUCK, \
+ .active_discharge_off = 0, \
+ .active_discharge_on = MAX77838_BUCK_AD, \
+ .active_discharge_mask = MAX77838_BUCK_AD, \
+ .active_discharge_reg = MAX77838_REG_BUCK_CFG1, \
+ .owner = THIS_MODULE, \
+ }
+
+struct max77838_regulator_info {
+ struct regmap *regmap;
+};
+
+static const struct regmap_config max77838_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = MAX77838_REG_BUCK_VOUT,
+};
+
+static const struct regulator_ops max77838_regulator_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .set_active_discharge = regulator_set_active_discharge_regmap,
+};
+
+static const struct regulator_desc max77838_regulators_desc[] = {
+ MAX77838_LDO(1),
+ MAX77838_LDO(2),
+ MAX77838_LDO(3),
+ MAX77838_LDO(4),
+ MAX77838_BUCK_DESC,
+};
+
+static int max77838_read_device_id(struct regmap *regmap, struct device *dev)
+{
+ unsigned int device_id;
+ int ret;
+
+ ret = regmap_read(regmap, MAX77838_REG_DEVICE_ID, &device_id);
+ if (!ret)
+ dev_dbg(dev, "DEVICE_ID: 0x%x\n", device_id);
+
+ return ret;
+}
+
+static int max77838_i2c_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct max77838_regulator_info *info;
+ struct regulator_config config = {};
+ struct regulator_dev *rdev;
+ struct regmap *regmap;
+ int i;
+
+ info = devm_kzalloc(dev, sizeof(struct max77838_regulator_info),
+ GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ regmap = devm_regmap_init_i2c(client, &max77838_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Failed to allocate regmap!\n");
+ return PTR_ERR(regmap);
+ }
+
+ info->regmap = regmap;
+ i2c_set_clientdata(client, info);
+
+ config.dev = dev;
+ config.regmap = regmap;
+ config.driver_data = info;
+
+ for (i = 0; i < MAX77838_MAX_REGULATORS; i++) {
+ rdev = devm_regulator_register(dev,
+ &max77838_regulators_desc[i],
+ &config);
+ if (IS_ERR(rdev)) {
+ dev_err(dev, "Failed to register regulator!\n");
+ return PTR_ERR(rdev);
+ }
+ }
+
+ return max77838_read_device_id(regmap, dev);
+}
+
+static const struct of_device_id __maybe_unused max77838_of_match[] = {
+ { .compatible = "maxim,max77838" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, max77838_of_match);
+
+static const struct i2c_device_id max77838_id[] = {
+ { "max77838-regulator" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(i2c, max77838_id);
+
+static struct i2c_driver max77838_regulator_driver = {
+ .driver = {
+ .name = "max77838",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ .of_match_table = of_match_ptr(max77838_of_match),
+ },
+ .probe = max77838_i2c_probe,
+ .id_table = max77838_id,
+};
+module_i2c_driver(max77838_regulator_driver);
+
+MODULE_AUTHOR("Ivaylo Ivanov <ivo.ivanov.ivanov1@gmail.com>");
+MODULE_DESCRIPTION("MAX77838 PMIC regulator driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/pca9450-regulator.c b/drivers/regulator/pca9450-regulator.c
index feadb21a8f30..4be270f4d6c3 100644
--- a/drivers/regulator/pca9450-regulator.c
+++ b/drivers/regulator/pca9450-regulator.c
@@ -40,7 +40,6 @@ struct pca9450 {
struct device *dev;
struct regmap *regmap;
struct gpio_desc *sd_vsel_gpio;
- struct notifier_block restart_nb;
enum pca9450_chip_type type;
unsigned int rcnt;
int irq;
@@ -1100,10 +1099,9 @@ static irqreturn_t pca9450_irq_handler(int irq, void *data)
return IRQ_HANDLED;
}
-static int pca9450_i2c_restart_handler(struct notifier_block *nb,
- unsigned long action, void *data)
+static int pca9450_i2c_restart_handler(struct sys_off_data *data)
{
- struct pca9450 *pca9450 = container_of(nb, struct pca9450, restart_nb);
+ struct pca9450 *pca9450 = data->cb_data;
struct i2c_client *i2c = container_of(pca9450->dev, struct i2c_client, dev);
dev_dbg(&i2c->dev, "Restarting device..\n");
@@ -1261,10 +1259,9 @@ static int pca9450_i2c_probe(struct i2c_client *i2c)
pca9450->sd_vsel_fixed_low =
of_property_read_bool(ldo5->dev.of_node, "nxp,sd-vsel-fixed-low");
- pca9450->restart_nb.notifier_call = pca9450_i2c_restart_handler;
- pca9450->restart_nb.priority = PCA9450_RESTART_HANDLER_PRIORITY;
-
- if (register_restart_handler(&pca9450->restart_nb))
+ if (devm_register_sys_off_handler(&i2c->dev, SYS_OFF_MODE_RESTART,
+ PCA9450_RESTART_HANDLER_PRIORITY,
+ pca9450_i2c_restart_handler, pca9450))
dev_warn(&i2c->dev, "Failed to register restart handler\n");
dev_info(&i2c->dev, "%s probed.\n",
diff --git a/drivers/regulator/pf0900-regulator.c b/drivers/regulator/pf0900-regulator.c
new file mode 100644
index 000000000000..b5effee32917
--- /dev/null
+++ b/drivers/regulator/pf0900-regulator.c
@@ -0,0 +1,975 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright 2025 NXP.
+// NXP PF0900 pmic driver
+
+#include <linux/bitfield.h>
+#include <linux/crc8.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+
+enum pf0900_regulators {
+ PF0900_SW1 = 0,
+ PF0900_SW2,
+ PF0900_SW3,
+ PF0900_SW4,
+ PF0900_SW5,
+ PF0900_LDO1,
+ PF0900_LDO2,
+ PF0900_LDO3,
+ PF0900_VAON,
+ PF0900_REGULATOR_CNT,
+};
+
+enum {
+ PF0900_DVS_LEVEL_RUN = 0,
+ PF0900_DVS_LEVEL_STANDBY,
+ PF0900_DVS_LEVEL_MAX,
+};
+
+
+#define PF0900_VAON_VOLTAGE_NUM 0x03
+#define PF0900_SW_VOLTAGE_NUM 0x100
+#define PF0900_LDO_VOLTAGE_NUM 0x20
+
+#define REGU_SW_CNT 0x5
+#define REGU_LDO_VAON_CNT 0x4
+
+enum {
+ PF0900_REG_DEV_ID = 0x00,
+ PF0900_REG_DEV_FAM = 0x01,
+ PF0900_REG_REV_ID = 0x02,
+ PF0900_REG_PROG_ID1 = 0x03,
+ PF0900_REG_PROG_ID2 = 0x04,
+ PF0900_REG_SYSTEM_INT = 0x05,
+ PF0900_REG_STATUS1_INT = 0x06,
+ PF0900_REG_STATUS1_MSK = 0x07,
+ PF0900_REG_STATUS1_SNS = 0x08,
+ PF0900_REG_STATUS2_INT = 0x09,
+ PF0900_REG_STATUS2_MSK = 0x0A,
+ PF0900_REG_STATUS2_SNS = 0x0B,
+ PF0900_REG_STATUS3_INT = 0x0C,
+ PF0900_REG_STATUS3_MSK = 0x0D,
+ PF0900_REG_SW_MODE_INT = 0x0E,
+ PF0900_REG_SW_MODE_MSK = 0x0F,
+ PF0900_REG_SW_ILIM_INT = 0x10,
+ PF0900_REG_SW_ILIM_MSK = 0x11,
+ PF0900_REG_SW_ILIM_SNS = 0x12,
+ PF0900_REG_LDO_ILIM_INT = 0x13,
+ PF0900_REG_LDO_ILIM_MSK = 0x14,
+ PF0900_REG_LDO_ILIM_SNS = 0x15,
+ PF0900_REG_SW_UV_INT = 0x16,
+ PF0900_REG_SW_UV_MSK = 0x17,
+ PF0900_REG_SW_UV_SNS = 0x18,
+ PF0900_REG_SW_OV_INT = 0x19,
+ PF0900_REG_SW_OV_MSK = 0x1A,
+ PF0900_REG_SW_OV_SNS = 0x1B,
+ PF0900_REG_LDO_UV_INT = 0x1C,
+ PF0900_REG_LDO_UV_MSK = 0x1D,
+ PF0900_REG_LDO_UV_SNS = 0x1E,
+ PF0900_REG_LDO_OV_INT = 0x1F,
+ PF0900_REG_LDO_OV_MSK = 0x20,
+ PF0900_REG_LDO_OV_SNS = 0x21,
+ PF0900_REG_PWRON_INT = 0x22,
+ PF0900_REG_IO_INT = 0x24,
+ PF0900_REG_IO_MSK = 0x25,
+ PF0900_REG_IO_SNS = 0x26,
+ PF0900_REG_IOSHORT_SNS = 0x27,
+ PF0900_REG_ABIST_OV1 = 0x28,
+ PF0900_REG_ABIST_OV2 = 0x29,
+ PF0900_REG_ABIST_UV1 = 0x2A,
+ PF0900_REG_ABIST_UV2 = 0x2B,
+ PF0900_REG_ABIST_IO = 0x2C,
+ PF0900_REG_TEST_FLAGS = 0x2D,
+ PF0900_REG_HFAULT_FLAGS = 0x2E,
+ PF0900_REG_FAULT_FLAGS = 0x2F,
+ PF0900_REG_FS0B_CFG = 0x30,
+ PF0900_REG_FCCU_CFG = 0x31,
+ PF0900_REG_RSTB_CFG1 = 0x32,
+ PF0900_REG_SYSTEM_CMD = 0x33,
+ PF0900_REG_FS0B_CMD = 0x34,
+ PF0900_REG_SECURE_WR1 = 0x35,
+ PF0900_REG_SECURE_WR2 = 0x36,
+ PF0900_REG_VMON_CFG1 = 0x37,
+ PF0900_REG_SYS_CFG1 = 0x38,
+ PF0900_REG_GPO_CFG = 0x39,
+ PF0900_REG_GPO_CTRL = 0x3A,
+ PF0900_REG_PWRUP_CFG = 0x3B,
+ PF0900_REG_RSTB_PWRUP = 0x3C,
+ PF0900_REG_GPIO1_PWRUP = 0x3D,
+ PF0900_REG_GPIO2_PWRUP = 0x3E,
+ PF0900_REG_GPIO3_PWRUP = 0x3F,
+ PF0900_REG_GPIO4_PWRUP = 0x40,
+ PF0900_REG_VMON1_PWRUP = 0x41,
+ PF0900_REG_VMON2_PWRUP = 0x42,
+ PF0900_REG_SW1_PWRUP = 0x43,
+ PF0900_REG_SW2_PWRUP = 0x44,
+ PF0900_REG_SW3_PWRUP = 0x45,
+ PF0900_REG_SW4_PWRUP = 0x46,
+ PF0900_REG_SW5_PWRUP = 0x47,
+ PF0900_REG_LDO1_PWRUP = 0x48,
+ PF0900_REG_LDO2_PWRUP = 0x49,
+ PF0900_REG_LDO3_PWRUP = 0x4A,
+ PF0900_REG_VAON_PWRUP = 0x4B,
+ PF0900_REG_FREQ_CTRL = 0x4C,
+ PF0900_REG_PWRON_CFG = 0x4D,
+ PF0900_REG_WD_CTRL1 = 0x4E,
+ PF0900_REG_WD_CTRL2 = 0x4F,
+ PF0900_REG_WD_CFG1 = 0x50,
+ PF0900_REG_WD_CFG2 = 0x51,
+ PF0900_REG_WD_CNT1 = 0x52,
+ PF0900_REG_WD_CNT2 = 0x53,
+ PF0900_REG_FAULT_CFG = 0x54,
+ PF0900_REG_FAULT_CNT = 0x55,
+ PF0900_REG_DFS_CNT = 0x56,
+ PF0900_REG_AMUX_CFG = 0x57,
+ PF0900_REG_VMON1_RUN_CFG = 0x58,
+ PF0900_REG_VMON1_STBY_CFG = 0x59,
+ PF0900_REG_VMON1_CTRL = 0x5A,
+ PF0900_REG_VMON2_RUN_CFG = 0x5B,
+ PF0900_REG_VMON2_STBY_CFG = 0x5C,
+ PF0900_REG_VMON2_CTRL = 0x5D,
+ PF0900_REG_SW1_VRUN = 0x5E,
+ PF0900_REG_SW1_VSTBY = 0x5F,
+ PF0900_REG_SW1_MODE = 0x60,
+ PF0900_REG_SW1_CFG1 = 0x61,
+ PF0900_REG_SW1_CFG2 = 0x62,
+ PF0900_REG_SW2_VRUN = 0x63,
+ PF0900_REG_SW2_VSTBY = 0x64,
+ PF0900_REG_SW2_MODE = 0x65,
+ PF0900_REG_SW2_CFG1 = 0x66,
+ PF0900_REG_SW2_CFG2 = 0x67,
+ PF0900_REG_SW3_VRUN = 0x68,
+ PF0900_REG_SW3_VSTBY = 0x69,
+ PF0900_REG_SW3_MODE = 0x6A,
+ PF0900_REG_SW3_CFG1 = 0x6B,
+ PF0900_REG_SW3_CFG2 = 0x6C,
+ PF0900_REG_SW4_VRUN = 0x6D,
+ PF0900_REG_SW4_VSTBY = 0x6E,
+ PF0900_REG_SW4_MODE = 0x6F,
+ PF0900_REG_SW4_CFG1 = 0x70,
+ PF0900_REG_SW4_CFG2 = 0x71,
+ PF0900_REG_SW5_VRUN = 0x72,
+ PF0900_REG_SW5_VSTBY = 0x73,
+ PF0900_REG_SW5_MODE = 0x74,
+ PF0900_REG_SW5_CFG1 = 0x75,
+ PF0900_REG_SW5_CFG2 = 0x76,
+ PF0900_REG_LDO1_RUN = 0x77,
+ PF0900_REG_LDO1_STBY = 0x78,
+ PF0900_REG_LDO1_CFG2 = 0x79,
+ PF0900_REG_LDO2_RUN = 0x7A,
+ PF0900_REG_LDO2_STBY = 0x7B,
+ PF0900_REG_LDO2_CFG2 = 0x7C,
+ PF0900_REG_LDO3_RUN = 0x7D,
+ PF0900_REG_LDO3_STBY = 0x7E,
+ PF0900_REG_LDO3_CFG2 = 0x7F,
+ PF0900_REG_VAON_CFG1 = 0x80,
+ PF0900_REG_VAON_CFG2 = 0x81,
+ PF0900_REG_SYS_DIAG = 0x82,
+ PF0900_MAX_REGISTER,
+};
+
+/* PF0900 SW MODE */
+#define SW_RUN_MODE_OFF 0x00
+#define SW_RUN_MODE_PWM 0x01
+#define SW_RUN_MODE_PFM 0x02
+#define SW_STBY_MODE_OFF 0x00
+#define SW_STBY_MODE_PWM 0x04
+#define SW_STBY_MODE_PFM 0x08
+
+/* PF0900 SW MODE MASK */
+#define SW_RUN_MODE_MASK GENMASK(1, 0)
+#define SW_STBY_MODE_MASK GENMASK(3, 2)
+
+/* PF0900 SW VRUN/VSTBY MASK */
+#define PF0900_SW_VOL_MASK GENMASK(7, 0)
+
+/* PF0900_REG_VAON_CFG1 bits */
+#define PF0900_VAON_1P8V 0x01
+
+#define PF0900_VAON_MASK GENMASK(1, 0)
+
+/* PF0900_REG_SWX_CFG1 MASK */
+#define PF0900_SW_DVS_MASK GENMASK(4, 3)
+
+/* PF0900_REG_LDO_RUN MASK */
+#define VLDO_RUN_MASK GENMASK(4, 0)
+#define LDO_RUN_EN_MASK BIT(5)
+
+/* PF0900_REG_STATUS1_INT bits */
+#define PF0900_IRQ_PWRUP BIT(3)
+
+/* PF0900_REG_ILIM_INT bits */
+#define PF0900_IRQ_SW1_IL BIT(0)
+#define PF0900_IRQ_SW2_IL BIT(1)
+#define PF0900_IRQ_SW3_IL BIT(2)
+#define PF0900_IRQ_SW4_IL BIT(3)
+#define PF0900_IRQ_SW5_IL BIT(4)
+
+#define PF0900_IRQ_LDO1_IL BIT(0)
+#define PF0900_IRQ_LDO2_IL BIT(1)
+#define PF0900_IRQ_LDO3_IL BIT(2)
+
+/* PF0900_REG_UV_INT bits */
+#define PF0900_IRQ_SW1_UV BIT(0)
+#define PF0900_IRQ_SW2_UV BIT(1)
+#define PF0900_IRQ_SW3_UV BIT(2)
+#define PF0900_IRQ_SW4_UV BIT(3)
+#define PF0900_IRQ_SW5_UV BIT(4)
+
+#define PF0900_IRQ_LDO1_UV BIT(0)
+#define PF0900_IRQ_LDO2_UV BIT(1)
+#define PF0900_IRQ_LDO3_UV BIT(2)
+#define PF0900_IRQ_VAON_UV BIT(3)
+
+/* PF0900_REG_OV_INT bits */
+#define PF0900_IRQ_SW1_OV BIT(0)
+#define PF0900_IRQ_SW2_OV BIT(1)
+#define PF0900_IRQ_SW3_OV BIT(2)
+#define PF0900_IRQ_SW4_OV BIT(3)
+#define PF0900_IRQ_SW5_OV BIT(4)
+
+#define PF0900_IRQ_LDO1_OV BIT(0)
+#define PF0900_IRQ_LDO2_OV BIT(1)
+#define PF0900_IRQ_LDO3_OV BIT(2)
+#define PF0900_IRQ_VAON_OV BIT(3)
+
+struct pf0900_regulator_desc {
+ struct regulator_desc desc;
+ unsigned int suspend_enable_mask;
+ unsigned int suspend_voltage_reg;
+ unsigned int suspend_voltage_cache;
+};
+
+struct pf0900_drvdata {
+ const struct pf0900_regulator_desc *desc;
+ unsigned int rcnt;
+};
+
+struct pf0900 {
+ struct device *dev;
+ struct regmap *regmap;
+ const struct pf0900_drvdata *drvdata;
+ struct regulator_dev *rdevs[PF0900_REGULATOR_CNT];
+ int irq;
+ unsigned short addr;
+ bool crc_en;
+};
+
+enum pf0900_regulator_type {
+ PF0900_SW = 0,
+ PF0900_LDO,
+};
+
+#define PF0900_REGU_IRQ(_reg, _type, _event) \
+ { \
+ .reg = _reg, \
+ .type = _type, \
+ .event = _event, \
+ }
+
+struct pf0900_regulator_irq {
+ unsigned int reg;
+ unsigned int type;
+ unsigned int event;
+};
+
+static const struct regmap_range pf0900_range = {
+ .range_min = PF0900_REG_DEV_ID,
+ .range_max = PF0900_REG_SYS_DIAG,
+};
+
+static const struct regmap_access_table pf0900_volatile_regs = {
+ .yes_ranges = &pf0900_range,
+ .n_yes_ranges = 1,
+};
+
+static const struct regmap_config pf0900_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .volatile_table = &pf0900_volatile_regs,
+ .max_register = PF0900_MAX_REGISTER - 1,
+ .cache_type = REGCACHE_MAPLE,
+};
+
+static uint8_t crc8_j1850(unsigned short addr, unsigned int reg,
+ unsigned int val)
+{
+ uint8_t crcBuf[3];
+ uint8_t t_crc;
+ uint8_t i, j;
+
+ crcBuf[0] = addr;
+ crcBuf[1] = reg;
+ crcBuf[2] = val;
+ t_crc = 0xFF;
+
+ /*
+ * The CRC calculation is based on the standard CRC-8-SAE as
+ * defined in the SAE-J1850 specification with the following
+ * characteristics.
+ * Polynomial = 0x1D
+ * Initial Value = 0xFF
+ * The CRC byte is calculated by shifting 24-bit data through
+ * the CRC polynomial.The 24-bits package is built as follows:
+ * DEVICE_ADDR[b8] + REGISTER_ADDR [b8] +DATA[b8]
+ * The DEVICE_ADDR is calculated as the 7-bit slave address
+ * shifted left one space plus the corresponding read/write bit.
+ * (7Bit Address [b7] << 1 ) + R/W = DEVICE_ADDR[b8]
+ */
+ for (i = 0; i < sizeof(crcBuf); i++) {
+ t_crc ^= crcBuf[i];
+ for (j = 0; j < 8; j++) {
+ if ((t_crc & 0x80) != 0) {
+ t_crc <<= 1;
+ t_crc ^= 0x1D;
+ } else {
+ t_crc <<= 1;
+ }
+ }
+ }
+
+ return t_crc;
+}
+
+static int pf0900_regmap_read(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ struct device *dev = context;
+ struct i2c_client *i2c = to_i2c_client(dev);
+ struct pf0900 *pf0900 = dev_get_drvdata(dev);
+ int ret;
+ u8 crc;
+
+ if (!pf0900 || !pf0900->dev)
+ return -EINVAL;
+
+ if (reg >= PF0900_MAX_REGISTER) {
+ dev_err(pf0900->dev, "Invalid register address: 0x%x\n", reg);
+ return -EINVAL;
+ }
+
+ if (pf0900->crc_en) {
+ ret = i2c_smbus_read_word_data(i2c, reg);
+ if (ret < 0) {
+ dev_err(pf0900->dev, "Read error at reg=0x%x: %d\n", reg, ret);
+ return ret;
+ }
+
+ *val = (u16)ret;
+ crc = crc8_j1850(pf0900->addr << 1 | 0x1, reg, FIELD_GET(GENMASK(7, 0), *val));
+ if (crc != FIELD_GET(GENMASK(15, 8), *val)) {
+ dev_err(pf0900->dev, "Crc check error!\n");
+ return -EINVAL;
+ }
+ *val = FIELD_GET(GENMASK(7, 0), *val);
+ } else {
+ ret = i2c_smbus_read_byte_data(i2c, reg);
+ if (ret < 0) {
+ dev_err(pf0900->dev, "Read error at reg=0x%x: %d\n", reg, ret);
+ return ret;
+ }
+ *val = ret;
+ }
+
+ return 0;
+}
+
+static int pf0900_regmap_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ struct device *dev = context;
+ struct i2c_client *i2c = to_i2c_client(dev);
+ struct pf0900 *pf0900 = dev_get_drvdata(dev);
+ uint8_t data[2];
+ int ret;
+
+ if (!pf0900 || !pf0900->dev)
+ return -EINVAL;
+
+ if (reg >= PF0900_MAX_REGISTER) {
+ dev_err(pf0900->dev, "Invalid register address: 0x%x\n", reg);
+ return -EINVAL;
+ }
+
+ data[0] = val;
+ if (pf0900->crc_en) {
+ /* Get CRC */
+ data[1] = crc8_j1850(pf0900->addr << 1, reg, data[0]);
+ val = FIELD_PREP(GENMASK(15, 8), data[1]) | data[0];
+ ret = i2c_smbus_write_word_data(i2c, reg, val);
+ } else {
+ ret = i2c_smbus_write_byte_data(i2c, reg, data[0]);
+ }
+
+ if (ret) {
+ dev_err(pf0900->dev, "Write reg=0x%x error!\n", reg);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int pf0900_suspend_enable(struct regulator_dev *rdev)
+{
+ struct pf0900_regulator_desc *rdata = rdev_get_drvdata(rdev);
+ struct regmap *rmap = rdev_get_regmap(rdev);
+
+ return regmap_update_bits(rmap, rdata->desc.enable_reg,
+ rdata->suspend_enable_mask, SW_STBY_MODE_PFM);
+}
+
+static int pf0900_suspend_disable(struct regulator_dev *rdev)
+{
+ struct pf0900_regulator_desc *rdata = rdev_get_drvdata(rdev);
+ struct regmap *rmap = rdev_get_regmap(rdev);
+
+ return regmap_update_bits(rmap, rdata->desc.enable_reg,
+ rdata->suspend_enable_mask, SW_STBY_MODE_OFF);
+}
+
+static int pf0900_set_suspend_voltage(struct regulator_dev *rdev, int uV)
+{
+ struct pf0900_regulator_desc *rdata = rdev_get_drvdata(rdev);
+ struct regmap *rmap = rdev_get_regmap(rdev);
+ int ret;
+
+ if (rdata->suspend_voltage_cache == uV)
+ return 0;
+
+ ret = regulator_map_voltage_iterate(rdev, uV, uV);
+ if (ret < 0) {
+ dev_err(rdev_get_dev(rdev), "failed to map %i uV\n", uV);
+ return ret;
+ }
+
+ dev_dbg(rdev_get_dev(rdev), "uV: %i, reg: 0x%x, msk: 0x%x, val: 0x%x\n",
+ uV, rdata->suspend_voltage_reg, rdata->desc.vsel_mask, ret);
+ ret = regmap_update_bits(rmap, rdata->suspend_voltage_reg,
+ rdata->desc.vsel_mask, ret);
+ if (ret < 0) {
+ dev_err(rdev_get_dev(rdev), "failed to set %i uV\n", uV);
+ return ret;
+ }
+
+ rdata->suspend_voltage_cache = uV;
+
+ return 0;
+}
+
+static const struct regmap_bus pf0900_regmap_bus = {
+ .reg_read = pf0900_regmap_read,
+ .reg_write = pf0900_regmap_write,
+};
+
+static const struct regulator_ops pf0900_avon_regulator_ops = {
+ .list_voltage = regulator_list_voltage_table,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+};
+
+static const struct regulator_ops pf0900_dvs_sw_regulator_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .set_ramp_delay = regulator_set_ramp_delay_regmap,
+ .set_suspend_enable = pf0900_suspend_enable,
+ .set_suspend_disable = pf0900_suspend_disable,
+ .set_suspend_voltage = pf0900_set_suspend_voltage,
+};
+
+static const struct regulator_ops pf0900_ldo_regulator_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+};
+
+/*
+ * SW1/2/3/4/5
+ * SW1_DVS[1:0] SW1 DVS ramp rate setting
+ * 00: 15.6mV/8usec
+ * 01: 15.6mV/4usec
+ * 10: 15.6mV/2usec
+ * 11: 15.6mV/1usec
+ */
+static const unsigned int pf0900_dvs_sw_ramp_table[] = {
+ 1950, 3900, 7800, 15600
+};
+
+/* VAON 1.8V, 3.0V, or 3.3V */
+static const int pf0900_vaon_voltages[] = {
+ 0, 1800000, 3000000, 3300000,
+};
+
+/*
+ * SW1 0.5V to 3.3V
+ * 0.5V to 1.35V (6.25mV step)
+ * 1.8V to 2.5V (125mV step)
+ * 2.8V to 3.3V (250mV step)
+ */
+static const struct linear_range pf0900_dvs_sw1_volts[] = {
+ REGULATOR_LINEAR_RANGE(0, 0x00, 0x08, 0),
+ REGULATOR_LINEAR_RANGE(500000, 0x09, 0x91, 6250),
+ REGULATOR_LINEAR_RANGE(0, 0x92, 0x9E, 0),
+ REGULATOR_LINEAR_RANGE(1500000, 0x9F, 0x9F, 0),
+ REGULATOR_LINEAR_RANGE(1800000, 0xA0, 0xD8, 12500),
+ REGULATOR_LINEAR_RANGE(0, 0xD9, 0xDF, 0),
+ REGULATOR_LINEAR_RANGE(2800000, 0xE0, 0xF4, 25000),
+ REGULATOR_LINEAR_RANGE(0, 0xF5, 0xFF, 0),
+};
+
+/*
+ * SW2/3/4/5 0.3V to 3.3V
+ * 0.45V to 1.35V (6.25mV step)
+ * 1.8V to 2.5V (125mV step)
+ * 2.8V to 3.3V (250mV step)
+ */
+static const struct linear_range pf0900_dvs_sw2345_volts[] = {
+ REGULATOR_LINEAR_RANGE(300000, 0x00, 0x00, 0),
+ REGULATOR_LINEAR_RANGE(450000, 0x01, 0x91, 6250),
+ REGULATOR_LINEAR_RANGE(0, 0x92, 0x9E, 0),
+ REGULATOR_LINEAR_RANGE(1500000, 0x9F, 0x9F, 0),
+ REGULATOR_LINEAR_RANGE(1800000, 0xA0, 0xD8, 12500),
+ REGULATOR_LINEAR_RANGE(0, 0xD9, 0xDF, 0),
+ REGULATOR_LINEAR_RANGE(2800000, 0xE0, 0xF4, 25000),
+ REGULATOR_LINEAR_RANGE(0, 0xF5, 0xFF, 0),
+};
+
+/*
+ * LDO1
+ * 0.75V to 3.3V
+ */
+static const struct linear_range pf0900_ldo1_volts[] = {
+ REGULATOR_LINEAR_RANGE(750000, 0x00, 0x0F, 50000),
+ REGULATOR_LINEAR_RANGE(1800000, 0x10, 0x1F, 100000),
+};
+
+/*
+ * LDO2/3
+ * 0.65V to 3.3V (50mV step)
+ */
+static const struct linear_range pf0900_ldo23_volts[] = {
+ REGULATOR_LINEAR_RANGE(650000, 0x00, 0x0D, 50000),
+ REGULATOR_LINEAR_RANGE(1400000, 0x0E, 0x0F, 100000),
+ REGULATOR_LINEAR_RANGE(1800000, 0x10, 0x1F, 100000),
+};
+
+static const struct pf0900_regulator_desc pf0900_regulators[] = {
+ {
+ .desc = {
+ .name = "sw1",
+ .of_match = of_match_ptr("sw1"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = PF0900_SW1,
+ .ops = &pf0900_dvs_sw_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = PF0900_SW_VOLTAGE_NUM,
+ .linear_ranges = pf0900_dvs_sw1_volts,
+ .n_linear_ranges = ARRAY_SIZE(pf0900_dvs_sw1_volts),
+ .vsel_reg = PF0900_REG_SW1_VRUN,
+ .vsel_mask = PF0900_SW_VOL_MASK,
+ .enable_reg = PF0900_REG_SW1_MODE,
+ .enable_mask = SW_RUN_MODE_MASK,
+ .enable_val = SW_RUN_MODE_PWM,
+ .ramp_reg = PF0900_REG_SW1_CFG1,
+ .ramp_mask = PF0900_SW_DVS_MASK,
+ .ramp_delay_table = pf0900_dvs_sw_ramp_table,
+ .n_ramp_values = ARRAY_SIZE(pf0900_dvs_sw_ramp_table),
+ .owner = THIS_MODULE,
+ },
+ .suspend_enable_mask = SW_STBY_MODE_MASK,
+ .suspend_voltage_reg = PF0900_REG_SW1_VSTBY,
+ },
+ {
+ .desc = {
+ .name = "sw2",
+ .of_match = of_match_ptr("sw2"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = PF0900_SW2,
+ .ops = &pf0900_dvs_sw_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = PF0900_SW_VOLTAGE_NUM,
+ .linear_ranges = pf0900_dvs_sw2345_volts,
+ .n_linear_ranges = ARRAY_SIZE(pf0900_dvs_sw2345_volts),
+ .vsel_reg = PF0900_REG_SW2_VRUN,
+ .vsel_mask = PF0900_SW_VOL_MASK,
+ .enable_reg = PF0900_REG_SW2_MODE,
+ .enable_mask = SW_RUN_MODE_MASK,
+ .enable_val = SW_RUN_MODE_PWM,
+ .ramp_reg = PF0900_REG_SW2_CFG1,
+ .ramp_mask = PF0900_SW_DVS_MASK,
+ .ramp_delay_table = pf0900_dvs_sw_ramp_table,
+ .n_ramp_values = ARRAY_SIZE(pf0900_dvs_sw_ramp_table),
+ .owner = THIS_MODULE,
+ },
+ .suspend_enable_mask = SW_STBY_MODE_MASK,
+ .suspend_voltage_reg = PF0900_REG_SW2_VSTBY,
+ },
+ {
+ .desc = {
+ .name = "sw3",
+ .of_match = of_match_ptr("sw3"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = PF0900_SW3,
+ .ops = &pf0900_dvs_sw_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = PF0900_SW_VOLTAGE_NUM,
+ .linear_ranges = pf0900_dvs_sw2345_volts,
+ .n_linear_ranges = ARRAY_SIZE(pf0900_dvs_sw2345_volts),
+ .vsel_reg = PF0900_REG_SW3_VRUN,
+ .vsel_mask = PF0900_SW_VOL_MASK,
+ .enable_reg = PF0900_REG_SW3_MODE,
+ .enable_mask = SW_RUN_MODE_MASK,
+ .enable_val = SW_RUN_MODE_PWM,
+ .ramp_reg = PF0900_REG_SW3_CFG1,
+ .ramp_mask = PF0900_SW_DVS_MASK,
+ .ramp_delay_table = pf0900_dvs_sw_ramp_table,
+ .n_ramp_values = ARRAY_SIZE(pf0900_dvs_sw_ramp_table),
+ .owner = THIS_MODULE,
+ },
+ .suspend_enable_mask = SW_STBY_MODE_MASK,
+ .suspend_voltage_reg = PF0900_REG_SW3_VSTBY,
+ },
+ {
+ .desc = {
+ .name = "sw4",
+ .of_match = of_match_ptr("sw4"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = PF0900_SW4,
+ .ops = &pf0900_dvs_sw_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = PF0900_SW_VOLTAGE_NUM,
+ .linear_ranges = pf0900_dvs_sw2345_volts,
+ .n_linear_ranges = ARRAY_SIZE(pf0900_dvs_sw2345_volts),
+ .vsel_reg = PF0900_REG_SW4_VRUN,
+ .vsel_mask = PF0900_SW_VOL_MASK,
+ .enable_reg = PF0900_REG_SW4_MODE,
+ .enable_mask = SW_RUN_MODE_MASK,
+ .enable_val = SW_RUN_MODE_PWM,
+ .ramp_reg = PF0900_REG_SW4_CFG1,
+ .ramp_mask = PF0900_SW_DVS_MASK,
+ .ramp_delay_table = pf0900_dvs_sw_ramp_table,
+ .n_ramp_values = ARRAY_SIZE(pf0900_dvs_sw_ramp_table),
+ .owner = THIS_MODULE,
+ },
+ .suspend_enable_mask = SW_STBY_MODE_MASK,
+ .suspend_voltage_reg = PF0900_REG_SW4_VSTBY,
+ },
+ {
+ .desc = {
+ .name = "sw5",
+ .of_match = of_match_ptr("sw5"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = PF0900_SW5,
+ .ops = &pf0900_dvs_sw_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = PF0900_SW_VOLTAGE_NUM,
+ .linear_ranges = pf0900_dvs_sw2345_volts,
+ .n_linear_ranges = ARRAY_SIZE(pf0900_dvs_sw2345_volts),
+ .vsel_reg = PF0900_REG_SW5_VRUN,
+ .vsel_mask = PF0900_SW_VOL_MASK,
+ .enable_reg = PF0900_REG_SW5_MODE,
+ .enable_mask = SW_RUN_MODE_MASK,
+ .enable_val = SW_RUN_MODE_PWM,
+ .ramp_reg = PF0900_REG_SW5_CFG1,
+ .ramp_mask = PF0900_SW_DVS_MASK,
+ .ramp_delay_table = pf0900_dvs_sw_ramp_table,
+ .n_ramp_values = ARRAY_SIZE(pf0900_dvs_sw_ramp_table),
+ .owner = THIS_MODULE,
+ },
+ .suspend_enable_mask = SW_STBY_MODE_MASK,
+ .suspend_voltage_reg = PF0900_REG_SW5_VSTBY,
+ },
+ {
+ .desc = {
+ .name = "ldo1",
+ .of_match = of_match_ptr("ldo1"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = PF0900_LDO1,
+ .ops = &pf0900_ldo_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = PF0900_LDO_VOLTAGE_NUM,
+ .linear_ranges = pf0900_ldo1_volts,
+ .n_linear_ranges = ARRAY_SIZE(pf0900_ldo1_volts),
+ .vsel_reg = PF0900_REG_LDO1_RUN,
+ .vsel_mask = VLDO_RUN_MASK,
+ .enable_reg = PF0900_REG_LDO1_RUN,
+ .enable_mask = LDO_RUN_EN_MASK,
+ .owner = THIS_MODULE,
+ },
+ },
+ {
+ .desc = {
+ .name = "ldo2",
+ .of_match = of_match_ptr("ldo2"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = PF0900_LDO2,
+ .ops = &pf0900_ldo_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = PF0900_LDO_VOLTAGE_NUM,
+ .linear_ranges = pf0900_ldo23_volts,
+ .n_linear_ranges = ARRAY_SIZE(pf0900_ldo23_volts),
+ .vsel_reg = PF0900_REG_LDO2_RUN,
+ .vsel_mask = VLDO_RUN_MASK,
+ .enable_reg = PF0900_REG_LDO2_RUN,
+ .enable_mask = LDO_RUN_EN_MASK,
+ .owner = THIS_MODULE,
+ },
+ },
+ {
+ .desc = {
+ .name = "ldo3",
+ .of_match = of_match_ptr("ldo3"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = PF0900_LDO3,
+ .ops = &pf0900_ldo_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = PF0900_LDO_VOLTAGE_NUM,
+ .linear_ranges = pf0900_ldo23_volts,
+ .n_linear_ranges = ARRAY_SIZE(pf0900_ldo23_volts),
+ .vsel_reg = PF0900_REG_LDO3_RUN,
+ .vsel_mask = VLDO_RUN_MASK,
+ .enable_reg = PF0900_REG_LDO3_RUN,
+ .enable_mask = LDO_RUN_EN_MASK,
+ .owner = THIS_MODULE,
+ },
+ },
+ {
+ .desc = {
+ .name = "vaon",
+ .of_match = of_match_ptr("vaon"),
+ .regulators_node = of_match_ptr("regulators"),
+ .id = PF0900_VAON,
+ .ops = &pf0900_avon_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = PF0900_VAON_VOLTAGE_NUM,
+ .volt_table = pf0900_vaon_voltages,
+ .enable_reg = PF0900_REG_VAON_CFG1,
+ .enable_mask = PF0900_VAON_MASK,
+ .enable_val = PF0900_VAON_1P8V,
+ .vsel_reg = PF0900_REG_VAON_CFG1,
+ .vsel_mask = PF0900_VAON_MASK,
+ .owner = THIS_MODULE,
+ },
+ },
+};
+
+struct pf0900_regulator_irq regu_irqs[] = {
+ PF0900_REGU_IRQ(PF0900_REG_SW_ILIM_INT, PF0900_SW, REGULATOR_ERROR_OVER_CURRENT_WARN),
+ PF0900_REGU_IRQ(PF0900_REG_LDO_ILIM_INT, PF0900_LDO, REGULATOR_ERROR_OVER_CURRENT_WARN),
+ PF0900_REGU_IRQ(PF0900_REG_SW_UV_INT, PF0900_SW, REGULATOR_ERROR_UNDER_VOLTAGE_WARN),
+ PF0900_REGU_IRQ(PF0900_REG_LDO_UV_INT, PF0900_LDO, REGULATOR_ERROR_UNDER_VOLTAGE_WARN),
+ PF0900_REGU_IRQ(PF0900_REG_SW_OV_INT, PF0900_SW, REGULATOR_ERROR_OVER_VOLTAGE_WARN),
+ PF0900_REGU_IRQ(PF0900_REG_LDO_OV_INT, PF0900_LDO, REGULATOR_ERROR_OVER_VOLTAGE_WARN),
+};
+
+static irqreturn_t pf0900_irq_handler(int irq, void *data)
+{
+ unsigned int val, regu, i, index;
+ struct pf0900 *pf0900 = data;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(regu_irqs); i++) {
+ ret = regmap_read(pf0900->regmap, regu_irqs[i].reg, &val);
+ if (ret < 0) {
+ dev_err(pf0900->dev, "Failed to read %d\n", ret);
+ return IRQ_NONE;
+ }
+ if (val) {
+ ret = regmap_write_bits(pf0900->regmap, regu_irqs[i].reg, val, val);
+ if (ret < 0) {
+ dev_err(pf0900->dev, "Failed to update %d\n", ret);
+ return IRQ_NONE;
+ }
+
+ if (regu_irqs[i].type == PF0900_SW) {
+ for (index = 0; index < REGU_SW_CNT; index++) {
+ if (val & BIT(index)) {
+ regu = (enum pf0900_regulators)index;
+ regulator_notifier_call_chain(pf0900->rdevs[regu],
+ regu_irqs[i].event,
+ NULL);
+ }
+ }
+ } else if (regu_irqs[i].type == PF0900_LDO) {
+ for (index = 0; index < REGU_LDO_VAON_CNT; index++) {
+ if (val & BIT(index)) {
+ regu = (enum pf0900_regulators)index + PF0900_LDO1;
+ regulator_notifier_call_chain(pf0900->rdevs[regu],
+ regu_irqs[i].event,
+ NULL);
+ }
+ }
+ }
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int pf0900_i2c_probe(struct i2c_client *i2c)
+{
+ const struct pf0900_regulator_desc *regulator_desc;
+ const struct pf0900_drvdata *drvdata = NULL;
+ struct device_node *np = i2c->dev.of_node;
+ unsigned int device_id, device_fam, i;
+ struct regulator_config config = { };
+ struct pf0900 *pf0900;
+ int ret;
+
+ if (!i2c->irq)
+ return dev_err_probe(&i2c->dev, -EINVAL, "No IRQ configured?\n");
+
+ pf0900 = devm_kzalloc(&i2c->dev, sizeof(struct pf0900), GFP_KERNEL);
+ if (!pf0900)
+ return -ENOMEM;
+
+ drvdata = device_get_match_data(&i2c->dev);
+ if (!drvdata)
+ return dev_err_probe(&i2c->dev, -EINVAL, "unable to find driver data\n");
+
+ regulator_desc = drvdata->desc;
+ pf0900->drvdata = drvdata;
+ pf0900->crc_en = of_property_read_bool(np, "nxp,i2c-crc-enable");
+ pf0900->irq = i2c->irq;
+ pf0900->dev = &i2c->dev;
+ pf0900->addr = i2c->addr;
+
+ dev_set_drvdata(&i2c->dev, pf0900);
+
+ pf0900->regmap = devm_regmap_init(&i2c->dev, &pf0900_regmap_bus, &i2c->dev,
+ &pf0900_regmap_config);
+ if (IS_ERR(pf0900->regmap))
+ return dev_err_probe(&i2c->dev, PTR_ERR(pf0900->regmap),
+ "regmap initialization failed\n");
+ ret = regmap_read(pf0900->regmap, PF0900_REG_DEV_ID, &device_id);
+ if (ret)
+ return dev_err_probe(&i2c->dev, ret, "Read device id error\n");
+
+ ret = regmap_read(pf0900->regmap, PF0900_REG_DEV_FAM, &device_fam);
+ if (ret)
+ return dev_err_probe(&i2c->dev, ret, "Read device fam error\n");
+
+ /* Check your board and dts for match the right pmic */
+ if (device_fam == 0x09 && (device_id & 0x1F) != 0x0)
+ return dev_err_probe(&i2c->dev, -EINVAL, "Device id(%x) mismatched\n",
+ device_id >> 4);
+
+ for (i = 0; i < drvdata->rcnt; i++) {
+ const struct regulator_desc *desc;
+ const struct pf0900_regulator_desc *r;
+
+ r = &regulator_desc[i];
+ desc = &r->desc;
+ config.regmap = pf0900->regmap;
+ config.driver_data = (void *)r;
+ config.dev = pf0900->dev;
+
+ pf0900->rdevs[i] = devm_regulator_register(pf0900->dev, desc, &config);
+ if (IS_ERR(pf0900->rdevs[i]))
+ return dev_err_probe(pf0900->dev, PTR_ERR(pf0900->rdevs[i]),
+ "Failed to register regulator(%s)\n", desc->name);
+ }
+
+ ret = devm_request_threaded_irq(pf0900->dev, pf0900->irq, NULL,
+ pf0900_irq_handler,
+ (IRQF_TRIGGER_FALLING | IRQF_ONESHOT),
+ "pf0900-irq", pf0900);
+
+ if (ret != 0)
+ return dev_err_probe(pf0900->dev, ret, "Failed to request IRQ: %d\n",
+ pf0900->irq);
+ /*
+ * The PWRUP_M is unmasked by default. When the device enter in RUN state,
+ * it will assert the PWRUP_I interrupt and assert the INTB pin to inform
+ * the MCU that it has finished the power up sequence properly.
+ */
+ ret = regmap_write_bits(pf0900->regmap, PF0900_REG_STATUS1_INT, PF0900_IRQ_PWRUP,
+ PF0900_IRQ_PWRUP);
+ if (ret)
+ return dev_err_probe(&i2c->dev, ret, "Clean PWRUP_I error\n");
+
+ /* mask interrupt PWRUP */
+ ret = regmap_update_bits(pf0900->regmap, PF0900_REG_STATUS1_MSK, PF0900_IRQ_PWRUP,
+ PF0900_IRQ_PWRUP);
+ if (ret)
+ return dev_err_probe(&i2c->dev, ret, "Unmask irq error\n");
+
+ ret = regmap_update_bits(pf0900->regmap, PF0900_REG_SW_ILIM_MSK, PF0900_IRQ_SW1_IL |
+ PF0900_IRQ_SW2_IL | PF0900_IRQ_SW3_IL | PF0900_IRQ_SW4_IL |
+ PF0900_IRQ_SW5_IL, 0);
+ if (ret)
+ return dev_err_probe(&i2c->dev, ret, "Unmask irq error\n");
+
+ ret = regmap_update_bits(pf0900->regmap, PF0900_REG_SW_UV_MSK, PF0900_IRQ_SW1_UV |
+ PF0900_IRQ_SW2_UV | PF0900_IRQ_SW3_UV | PF0900_IRQ_SW4_UV |
+ PF0900_IRQ_SW5_UV, 0);
+ if (ret)
+ return dev_err_probe(&i2c->dev, ret, "Unmask irq error\n");
+
+ ret = regmap_update_bits(pf0900->regmap, PF0900_REG_SW_OV_MSK, PF0900_IRQ_SW1_OV |
+ PF0900_IRQ_SW2_OV | PF0900_IRQ_SW3_OV | PF0900_IRQ_SW4_OV |
+ PF0900_IRQ_SW5_OV, 0);
+ if (ret)
+ return dev_err_probe(&i2c->dev, ret, "Unmask irq error\n");
+
+ ret = regmap_update_bits(pf0900->regmap, PF0900_REG_LDO_ILIM_MSK, PF0900_IRQ_LDO1_IL |
+ PF0900_IRQ_LDO2_IL | PF0900_IRQ_LDO3_IL, 0);
+ if (ret)
+ return dev_err_probe(&i2c->dev, ret, "Unmask irq error\n");
+
+ ret = regmap_update_bits(pf0900->regmap, PF0900_REG_LDO_UV_MSK, PF0900_IRQ_LDO1_UV |
+ PF0900_IRQ_LDO2_UV | PF0900_IRQ_LDO3_UV | PF0900_IRQ_VAON_UV, 0);
+ if (ret)
+ return dev_err_probe(&i2c->dev, ret, "Unmask irq error\n");
+
+ ret = regmap_update_bits(pf0900->regmap, PF0900_REG_LDO_OV_MSK, PF0900_IRQ_LDO1_OV |
+ PF0900_IRQ_LDO2_OV | PF0900_IRQ_LDO3_OV | PF0900_IRQ_VAON_OV, 0);
+ if (ret)
+ return dev_err_probe(&i2c->dev, ret, "Unmask irq error\n");
+
+ return 0;
+}
+
+static struct pf0900_drvdata pf0900_drvdata = {
+ .desc = pf0900_regulators,
+ .rcnt = ARRAY_SIZE(pf0900_regulators),
+};
+
+static const struct of_device_id pf0900_of_match[] = {
+ { .compatible = "nxp,pf0900", .data = &pf0900_drvdata},
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, pf0900_of_match);
+
+static struct i2c_driver pf0900_i2c_driver = {
+ .driver = {
+ .name = "nxp-pf0900",
+ .of_match_table = pf0900_of_match,
+ },
+ .probe = pf0900_i2c_probe,
+};
+
+module_i2c_driver(pf0900_i2c_driver);
+
+MODULE_AUTHOR("Joy Zou <joy.zou@nxp.com>");
+MODULE_DESCRIPTION("NXP PF0900 Power Management IC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/pf530x-regulator.c b/drivers/regulator/pf530x-regulator.c
new file mode 100644
index 000000000000..f789c4b6a499
--- /dev/null
+++ b/drivers/regulator/pf530x-regulator.c
@@ -0,0 +1,375 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+// documentation of this device is available at
+// https://www.nxp.com/docs/en/data-sheet/PF5300.pdf
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+
+/* registers */
+#define PF530X_DEVICEID 0x00
+#define PF530X_REV 0x01
+#define PF530X_EMREV 0x02
+#define PF530X_PROGID 0x03
+#define PF530X_CONFIG1 0x04
+#define PF530X_INT_STATUS1 0x05
+#define PF530X_INT_SENSE1 0x06
+#define PF530X_INT_STATUS2 0x07
+#define PF530X_INT_SENSE2 0x08
+#define PF530X_BIST_STAT1 0x09
+#define PF530X_BIST_CTRL 0x0a
+#define PF530X_STATE 0x0b
+#define PF530X_STATE_CTRL 0x0c
+#define PF530X_SW1_VOLT 0x0d
+#define PF530X_SW1_STBY_VOLT 0x0e
+#define PF530X_SW1_CTRL1 0x0f
+#define PF530X_SW1_CTRL2 0x10
+#define PF530X_CLK_CTRL 0x11
+#define PF530X_SEQ_CTRL1 0x12
+#define PF530X_SEQ_CTRL2 0x13
+#define PF530X_RANDOM_CHK 0x14
+#define PF530X_RANDOM_GEN 0x15
+#define PF530X_WD_CTRL1 0x16
+#define PF530X_WD_SEED 0x17
+#define PF530X_WD_ANSWER 0x18
+#define PF530X_FLT_CNT1 0x19
+#define PF530X_FLT_CNT2 0x1a
+#define PF530X_OTP_MODE 0x2f
+
+enum pf530x_states {
+ PF530X_STATE_POF,
+ PF530X_STATE_FUSE_LOAD,
+ PF530X_STATE_LP_OFF,
+ PF530X_STATE_SELF_TEST,
+ PF530X_STATE_POWER_UP,
+ PF530X_STATE_INIT,
+ PF530X_STATE_IO_RELEASE,
+ PF530X_STATE_RUN,
+ PF530X_STATE_STANDBY,
+ PF530X_STATE_FAULT,
+ PF530X_STATE_FAILSAFE,
+ PF530X_STATE_POWER_DOWN,
+ PF530X_STATE_2MS_SELFTEST_RETRY,
+ PF530X_STATE_OFF_DLY,
+};
+
+#define PF530_FAM 0x50
+enum pf530x_devid {
+ PF5300 = 0x3,
+ PF5301 = 0x4,
+ PF5302 = 0x5,
+};
+
+#define PF530x_FAM 0x50
+#define PF530x_DEVICE_FAM_MASK GENMASK(7, 4)
+#define PF530x_DEVICE_ID_MASK GENMASK(3, 0)
+
+#define PF530x_STATE_MASK GENMASK(3, 0)
+#define PF530x_STATE_RUN 0x07
+#define PF530x_STATE_STANDBY 0x08
+#define PF530x_STATE_LP_OFF 0x02
+
+#define PF530X_OTP_STBY_MODE GENMASK(3, 2)
+#define PF530X_OTP_RUN_MODE GENMASK(1, 0)
+
+#define PF530X_INT_STATUS_OV BIT(1)
+#define PF530X_INT_STATUS_UV BIT(2)
+#define PF530X_INT_STATUS_ILIM BIT(3)
+
+#define SW1_ILIM_S BIT(0)
+#define VMON_UV_S BIT(1)
+#define VMON_OV_S BIT(2)
+#define VIN_OVLO_S BIT(3)
+#define BG_ERR_S BIT(6)
+
+#define THERM_155_S BIT(3)
+#define THERM_140_S BIT(2)
+#define THERM_125_S BIT(1)
+#define THERM_110_S BIT(0)
+
+struct pf530x_chip {
+ struct regmap *regmap;
+ struct device *dev;
+};
+
+static const struct regmap_config pf530x_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = PF530X_OTP_MODE,
+ .cache_type = REGCACHE_MAPLE,
+};
+
+static int pf530x_get_status(struct regulator_dev *rdev)
+{
+ unsigned int state;
+ int ret;
+
+ ret = regmap_read(rdev->regmap, PF530X_INT_SENSE1, &state);
+ if (ret != 0)
+ return ret;
+
+ if ((state & (BG_ERR_S | SW1_ILIM_S | VMON_UV_S | VMON_OV_S | VIN_OVLO_S))
+ != 0)
+ return REGULATOR_STATUS_ERROR;
+
+ // no errors, check if what non-error state we're in
+ ret = regmap_read(rdev->regmap, PF530X_STATE, &state);
+ if (ret != 0)
+ return ret;
+
+ state &= PF530x_STATE_MASK;
+
+ switch (state) {
+ case PF530x_STATE_RUN:
+ ret = REGULATOR_STATUS_NORMAL;
+ break;
+ case PF530x_STATE_STANDBY:
+ ret = REGULATOR_STATUS_STANDBY;
+ break;
+ case PF530x_STATE_LP_OFF:
+ ret = REGULATOR_STATUS_OFF;
+ break;
+ default:
+ ret = REGULATOR_STATUS_ERROR;
+ break;
+ }
+ return ret;
+}
+
+static int pf530x_get_error_flags(struct regulator_dev *rdev, unsigned int *flags)
+{
+ unsigned int status;
+ int ret;
+
+ ret = regmap_read(rdev->regmap, PF530X_INT_STATUS1, &status);
+
+ if (ret != 0)
+ return ret;
+
+ *flags = 0;
+
+ if (status & PF530X_INT_STATUS_OV)
+ *flags |= REGULATOR_ERROR_OVER_VOLTAGE_WARN;
+
+ if (status & PF530X_INT_STATUS_UV)
+ *flags |= REGULATOR_ERROR_UNDER_VOLTAGE;
+
+ if (status & PF530X_INT_STATUS_ILIM)
+ *flags |= REGULATOR_ERROR_OVER_CURRENT;
+
+ ret = regmap_read(rdev->regmap, PF530X_INT_SENSE2, &status);
+
+ if (ret != 0)
+ return ret;
+
+ if ((status & (THERM_155_S |
+ THERM_140_S |
+ THERM_125_S |
+ THERM_110_S)) != 0)
+ *flags |= REGULATOR_ERROR_OVER_TEMP_WARN;
+
+ return 0;
+}
+
+static const struct regulator_ops pf530x_regulator_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .map_voltage = regulator_map_voltage_linear_range,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .get_status = pf530x_get_status,
+ .get_error_flags = pf530x_get_error_flags,
+ .set_bypass = regulator_set_bypass_regmap,
+ .get_bypass = regulator_get_bypass_regmap,
+};
+
+static const struct linear_range vrange = REGULATOR_LINEAR_RANGE(500000, 0, 140, 5000);
+
+static const struct regulator_desc pf530x_reg_desc = {
+ .name = "SW1",
+ .ops = &pf530x_regulator_ops,
+ .linear_ranges = &vrange,
+ .n_linear_ranges = 1,
+ .type = REGULATOR_VOLTAGE,
+ .id = 0,
+ .owner = THIS_MODULE,
+ .vsel_reg = PF530X_SW1_VOLT,
+ .vsel_mask = 0xFF,
+ .bypass_reg = PF530X_SW1_CTRL2,
+ .bypass_mask = 0x07,
+ .bypass_val_on = 0x07,
+ .bypass_val_off = 0x00,
+ .enable_reg = PF530X_SW1_CTRL1,
+ .enable_mask = GENMASK(5, 2),
+ .enable_val = GENMASK(5, 2),
+ .disable_val = 0,
+};
+
+static int pf530x_identify(struct pf530x_chip *chip)
+{
+ unsigned int value;
+ u8 dev_fam, dev_id, full_layer_rev, metal_layer_rev, prog_idh, prog_idl, emrev;
+ const char *name = NULL;
+ int ret;
+
+ ret = regmap_read(chip->regmap, PF530X_DEVICEID, &value);
+ if (ret) {
+ dev_err(chip->dev, "failed to read chip family\n");
+ return ret;
+ }
+
+ dev_fam = value & PF530x_DEVICE_FAM_MASK;
+ switch (dev_fam) {
+ case PF530x_FAM:
+ break;
+ default:
+ dev_err(chip->dev,
+ "Chip 0x%x is not from PF530X family\n", dev_fam);
+ return ret;
+ }
+
+ dev_id = value & PF530x_DEVICE_ID_MASK;
+ switch (dev_id) {
+ case PF5300:
+ name = "PF5300";
+ break;
+ case PF5301:
+ name = "PF5301";
+ break;
+ case PF5302:
+ name = "PF5302";
+ break;
+ default:
+ dev_err(chip->dev, "Unknown pf530x device id 0x%x\n", dev_id);
+ return -ENODEV;
+ }
+
+ ret = regmap_read(chip->regmap, PF530X_REV, &value);
+ if (ret) {
+ dev_err(chip->dev, "failed to read chip rev\n");
+ return ret;
+ }
+
+ full_layer_rev = ((value & 0xF0) == 0) ? '0' : ((((value & 0xF0) >> 4) - 1) + 'A');
+ metal_layer_rev = value & 0xF;
+
+ ret = regmap_read(chip->regmap, PF530X_EMREV, &value);
+ if (ret) {
+ dev_err(chip->dev, "failed to read chip emrev register\n");
+ return ret;
+ }
+
+ prog_idh = (value >> 4) + 'A';
+ // prog_idh skips 'O', per page 96 of the datasheet
+ if (prog_idh >= 'O')
+ prog_idh += 1;
+
+ emrev = value & 0x7;
+
+ ret = regmap_read(chip->regmap, PF530X_PROGID, &value);
+ if (ret) {
+ dev_err(chip->dev, "failed to read chip progid register\n");
+ return ret;
+ }
+
+ if (value >= 0x22) {
+ dev_err(chip->dev, "invalid value for progid register\n");
+ return -ENODEV;
+ } else if (value < 10) {
+ prog_idl = value + '0';
+ } else {
+ prog_idl = (value - 10) + 'A';
+ // prog_idh skips 'O', per page 97 of the datasheet
+ if (prog_idl >= 'O')
+ prog_idl += 1;
+ }
+
+ dev_info(chip->dev, "%s Regulator found (Rev %c%d ProgID %c%c EMREV %x).\n",
+ name, full_layer_rev, metal_layer_rev, prog_idh, prog_idl, emrev);
+
+ return 0;
+}
+
+static int pf530x_i2c_probe(struct i2c_client *client)
+{
+ struct regulator_config config = { NULL, };
+ struct pf530x_chip *chip;
+ int ret;
+ struct regulator_dev *rdev;
+ struct regulator_init_data *init_data;
+
+ chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, chip);
+ chip->dev = &client->dev;
+
+ chip->regmap = devm_regmap_init_i2c(client, &pf530x_regmap_config);
+ if (IS_ERR(chip->regmap)) {
+ ret = PTR_ERR(chip->regmap);
+ dev_err(&client->dev,
+ "regmap allocation failed with err %d\n", ret);
+ return ret;
+ }
+
+ ret = pf530x_identify(chip);
+ if (ret)
+ return ret;
+
+ init_data = of_get_regulator_init_data(chip->dev, chip->dev->of_node, &pf530x_reg_desc);
+ if (!init_data)
+ return -ENODATA;
+
+ config.dev = chip->dev;
+ config.of_node = chip->dev->of_node;
+ config.regmap = chip->regmap;
+ config.init_data = init_data;
+
+ // the config parameter gets copied, it's ok to pass a pointer on the stack here
+ rdev = devm_regulator_register(&client->dev, &pf530x_reg_desc, &config);
+ if (IS_ERR(rdev)) {
+ dev_err(&client->dev, "failed to register %s regulator\n", pf530x_reg_desc.name);
+ return PTR_ERR(rdev);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id pf530x_dt_ids[] = {
+ { .compatible = "nxp,pf5300",},
+ { }
+};
+MODULE_DEVICE_TABLE(of, pf530x_dt_ids);
+
+static const struct i2c_device_id pf530x_i2c_id[] = {
+ { "pf5300", 0 },
+ { "pf5301", 0 },
+ { "pf5302", 0 },
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, pf530x_i2c_id);
+
+static struct i2c_driver pf530x_regulator_driver = {
+ .id_table = pf530x_i2c_id,
+ .driver = {
+ .name = "pf530x",
+ .of_match_table = pf530x_dt_ids,
+ },
+ .probe = pf530x_i2c_probe,
+};
+module_i2c_driver(pf530x_regulator_driver);
+
+MODULE_AUTHOR("Woodrow Douglass <wdouglass@carnegierobotics.com>");
+MODULE_DESCRIPTION("Regulator Driver for NXP's PF5300/PF5301/PF5302 PMIC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/qcom-pm8008-regulator.c b/drivers/regulator/qcom-pm8008-regulator.c
index da017c1969d0..90c78ee1c37b 100644
--- a/drivers/regulator/qcom-pm8008-regulator.c
+++ b/drivers/regulator/qcom-pm8008-regulator.c
@@ -96,7 +96,7 @@ static int pm8008_regulator_get_voltage_sel(struct regulator_dev *rdev)
uV = le16_to_cpu(val) * 1000;
- return (uV - preg->desc.min_uV) / preg->desc.uV_step;
+ return regulator_map_voltage_linear_range(rdev, uV, INT_MAX);
}
static const struct regulator_ops pm8008_regulator_ops = {
diff --git a/drivers/regulator/qcom-refgen-regulator.c b/drivers/regulator/qcom-refgen-regulator.c
index cfa72ce85bc8..299ac3c8c3bc 100644
--- a/drivers/regulator/qcom-refgen-regulator.c
+++ b/drivers/regulator/qcom-refgen-regulator.c
@@ -94,7 +94,6 @@ static const struct regmap_config qcom_refgen_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
- .fast_io = true,
};
static int qcom_refgen_probe(struct platform_device *pdev)
diff --git a/drivers/regulator/rpi-panel-attiny-regulator.c b/drivers/regulator/rpi-panel-attiny-regulator.c
index 58dbf8bffa5d..3020839b9ef1 100644
--- a/drivers/regulator/rpi-panel-attiny-regulator.c
+++ b/drivers/regulator/rpi-panel-attiny-regulator.c
@@ -351,7 +351,7 @@ static int attiny_i2c_probe(struct i2c_client *i2c)
state->gc.base = -1;
state->gc.ngpio = NUM_GPIO;
- state->gc.set_rv = attiny_gpio_set;
+ state->gc.set = attiny_gpio_set;
state->gc.get_direction = attiny_gpio_get_direction;
state->gc.can_sleep = true;
diff --git a/drivers/regulator/rt5133-regulator.c b/drivers/regulator/rt5133-regulator.c
new file mode 100644
index 000000000000..129b1f13c880
--- /dev/null
+++ b/drivers/regulator/rt5133-regulator.c
@@ -0,0 +1,642 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2025 Richtek Technology Corp.
+// Author: ChiYuan Huang <cy_huang@richtek.com>
+// Author: ShihChia Chang <jeff_chang@richtek.com>
+
+#include <linux/crc8.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/driver.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+
+#define RT5133_REG_CHIP_INFO 0x00
+#define RT5133_REG_RST_CTRL 0x06
+#define RT5133_REG_BASE_CTRL 0x09
+#define RT5133_REG_GPIO_CTRL 0x0B
+#define RT5133_REG_BASE_EVT 0x10
+#define RT5133_REG_LDO_PGB_STAT 0x15
+#define RT5133_REG_BASE_MASK 0x16
+#define RT5133_REG_LDO_SHDN 0x19
+#define RT5133_REG_LDO_ON 0x1A
+#define RT5133_REG_LDO_OFF 0x1B
+#define RT5133_REG_LDO1_CTRL1 0x20
+#define RT5133_REG_LDO1_CTRL2 0x21
+#define RT5133_REG_LDO1_CTRL3 0x22
+#define RT5133_REG_LDO2_CTRL1 0x24
+#define RT5133_REG_LDO2_CTRL2 0x25
+#define RT5133_REG_LDO2_CTRL3 0x26
+#define RT5133_REG_LDO3_CTRL1 0x28
+#define RT5133_REG_LDO3_CTRL2 0x29
+#define RT5133_REG_LDO3_CTRL3 0x2A
+#define RT5133_REG_LDO4_CTRL1 0x2C
+#define RT5133_REG_LDO4_CTRL2 0x2D
+#define RT5133_REG_LDO4_CTRL3 0x2E
+#define RT5133_REG_LDO5_CTRL1 0x30
+#define RT5133_REG_LDO5_CTRL2 0x31
+#define RT5133_REG_LDO5_CTRL3 0x32
+#define RT5133_REG_LDO6_CTRL1 0x34
+#define RT5133_REG_LDO6_CTRL2 0x35
+#define RT5133_REG_LDO6_CTRL3 0x36
+#define RT5133_REG_LDO7_CTRL1 0x38
+#define RT5133_REG_LDO7_CTRL2 0x39
+#define RT5133_REG_LDO7_CTRL3 0x3A
+#define RT5133_REG_LDO8_CTRL1 0x3C
+#define RT5133_REG_LDO8_CTRL2 0x3D
+#define RT5133_REG_LDO8_CTRL3 0x3E
+#define RT5133_REG_LDO8_CTRL4 0x3F
+
+#define RT5133_LDO_REG_BASE(_id) (0x20 + ((_id) - 1) * 4)
+
+#define RT5133_VENDOR_ID_MASK GENMASK(7, 4)
+#define RT5133_RESET_CODE 0xB1
+
+#define RT5133_FOFF_BASE_MASK BIT(1)
+#define RT5133_OCSHDN_ALL_MASK BIT(7)
+#define RT5133_OCSHDN_ALL_SHIFT (7)
+#define RT5133_PGBSHDN_ALL_MASK BIT(6)
+#define RT5133_PGBSHDN_ALL_SHIFT (6)
+
+#define RT5133_OCPTSEL_MASK BIT(5)
+#define RT5133_PGBPTSEL_MASK BIT(4)
+#define RT5133_STBTDSEL_MASK GENMASK(1, 0)
+
+#define RT5133_LDO_ENABLE_MASK BIT(7)
+#define RT5133_LDO_VSEL_MASK GENMASK(7, 5)
+#define RT5133_LDO_AD_MASK BIT(2)
+#define RT5133_LDO_SOFT_START_MASK GENMASK(1, 0)
+
+#define RT5133_GPIO_NR 3
+
+#define RT5133_LDO_PGB_EVT_MASK GENMASK(23, 16)
+#define RT5133_LDO_PGB_EVT_SHIFT 16
+#define RT5133_LDO_OC_EVT_MASK GENMASK(15, 8)
+#define RT5133_LDO_OC_EVT_SHIFT 8
+#define RT5133_VREF_EVT_MASK BIT(6)
+#define RT5133_BASE_EVT_MASK GENMASK(7, 0)
+#define RT5133_INTR_CLR_MASK GENMASK(23, 0)
+#define RT5133_INTR_BYTE_NR 3
+
+#define RT5133_MAX_I2C_BLOCK_SIZE 1
+
+#define RT5133_CRC8_POLYNOMIAL 0x7
+
+#define RT5133_I2C_ADDR_LEN 1
+#define RT5133_PREDATA_LEN 2
+#define RT5133_I2C_CRC_LEN 1
+#define RT5133_REG_ADDR_LEN 1
+#define RT5133_I2C_DUMMY_LEN 1
+
+#define I2C_ADDR_XLATE_8BIT(_addr, _rw) ((((_addr) & 0x7F) << 1) | (_rw))
+
+enum {
+ RT5133_REGULATOR_BASE = 0,
+ RT5133_REGULATOR_LDO1,
+ RT5133_REGULATOR_LDO2,
+ RT5133_REGULATOR_LDO3,
+ RT5133_REGULATOR_LDO4,
+ RT5133_REGULATOR_LDO5,
+ RT5133_REGULATOR_LDO6,
+ RT5133_REGULATOR_LDO7,
+ RT5133_REGULATOR_LDO8,
+ RT5133_REGULATOR_MAX
+};
+
+struct chip_data {
+ const struct regulator_desc *regulators;
+ const u8 vendor_id;
+};
+
+struct rt5133_priv {
+ struct device *dev;
+ struct regmap *regmap;
+ struct gpio_desc *enable_gpio;
+ struct regulator_dev *rdev[RT5133_REGULATOR_MAX];
+ struct gpio_chip gc;
+ const struct chip_data *cdata;
+ unsigned int gpio_output_flag;
+ u8 crc8_tbls[CRC8_TABLE_SIZE];
+};
+
+static const unsigned int vout_type1_tables[] = {
+ 1800000, 2500000, 2700000, 2800000, 2900000, 3000000, 3100000, 3200000
+};
+
+static const unsigned int vout_type2_tables[] = {
+ 1700000, 1800000, 1900000, 2500000, 2700000, 2800000, 2900000, 3000000
+};
+
+static const unsigned int vout_type3_tables[] = {
+ 900000, 950000, 1000000, 1050000, 1100000, 1150000, 1200000, 1800000
+};
+
+static const unsigned int vout_type4_tables[] = {
+ 855000, 900000, 950000, 1000000, 1040000, 1090000, 1140000, 1710000
+};
+
+static const struct regulator_ops rt5133_regulator_ops = {
+ .list_voltage = regulator_list_voltage_table,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .set_active_discharge = regulator_set_active_discharge_regmap,
+};
+
+static const struct regulator_ops rt5133_base_regulator_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+};
+
+#define RT5133_REGULATOR_DESC(_name, _node_name, vtables, _supply) \
+{\
+ .name = #_name,\
+ .id = RT5133_REGULATOR_##_name,\
+ .of_match = of_match_ptr(#_node_name),\
+ .regulators_node = of_match_ptr("regulators"),\
+ .supply_name = _supply,\
+ .type = REGULATOR_VOLTAGE,\
+ .owner = THIS_MODULE,\
+ .ops = &rt5133_regulator_ops,\
+ .n_voltages = ARRAY_SIZE(vtables),\
+ .volt_table = vtables,\
+ .enable_reg = RT5133_REG_##_name##_CTRL1,\
+ .enable_mask = RT5133_LDO_ENABLE_MASK,\
+ .vsel_reg = RT5133_REG_##_name##_CTRL2,\
+ .vsel_mask = RT5133_LDO_VSEL_MASK,\
+ .active_discharge_reg = RT5133_REG_##_name##_CTRL3,\
+ .active_discharge_mask = RT5133_LDO_AD_MASK,\
+}
+
+static const struct regulator_desc rt5133_regulators[] = {
+ /* For digital part, base current control */
+ {
+ .name = "base",
+ .id = RT5133_REGULATOR_BASE,
+ .of_match = of_match_ptr("base"),
+ .regulators_node = of_match_ptr("regulators"),
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .ops = &rt5133_base_regulator_ops,
+ .enable_reg = RT5133_REG_BASE_CTRL,
+ .enable_mask = RT5133_FOFF_BASE_MASK,
+ .enable_is_inverted = true,
+ },
+ RT5133_REGULATOR_DESC(LDO1, ldo1, vout_type1_tables, "base"),
+ RT5133_REGULATOR_DESC(LDO2, ldo2, vout_type1_tables, "base"),
+ RT5133_REGULATOR_DESC(LDO3, ldo3, vout_type2_tables, "base"),
+ RT5133_REGULATOR_DESC(LDO4, ldo4, vout_type2_tables, "base"),
+ RT5133_REGULATOR_DESC(LDO5, ldo5, vout_type2_tables, "base"),
+ RT5133_REGULATOR_DESC(LDO6, ldo6, vout_type2_tables, "base"),
+ RT5133_REGULATOR_DESC(LDO7, ldo7, vout_type3_tables, "vin"),
+ RT5133_REGULATOR_DESC(LDO8, ldo8, vout_type3_tables, "vin"),
+};
+
+static const struct regulator_desc rt5133a_regulators[] = {
+ /* For digital part, base current control */
+ {
+ .name = "base",
+ .id = RT5133_REGULATOR_BASE,
+ .of_match = of_match_ptr("base"),
+ .regulators_node = of_match_ptr("regulators"),
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .ops = &rt5133_base_regulator_ops,
+ .enable_reg = RT5133_REG_BASE_CTRL,
+ .enable_mask = RT5133_FOFF_BASE_MASK,
+ .enable_is_inverted = true,
+ },
+ RT5133_REGULATOR_DESC(LDO1, ldo1, vout_type1_tables, "base"),
+ RT5133_REGULATOR_DESC(LDO2, ldo2, vout_type1_tables, "base"),
+ RT5133_REGULATOR_DESC(LDO3, ldo3, vout_type2_tables, "base"),
+ RT5133_REGULATOR_DESC(LDO4, ldo4, vout_type2_tables, "base"),
+ RT5133_REGULATOR_DESC(LDO5, ldo5, vout_type2_tables, "base"),
+ RT5133_REGULATOR_DESC(LDO6, ldo6, vout_type2_tables, "base"),
+ RT5133_REGULATOR_DESC(LDO7, ldo7, vout_type3_tables, "vin"),
+ RT5133_REGULATOR_DESC(LDO8, ldo8, vout_type4_tables, "vin"),
+};
+
+static const struct chip_data regulator_data[] = {
+ { rt5133_regulators, 0x70},
+ { rt5133a_regulators, 0x80},
+};
+
+static int rt5133_gpio_direction_output(struct gpio_chip *gpio,
+ unsigned int offset, int value)
+{
+ struct rt5133_priv *priv = gpiochip_get_data(gpio);
+
+ if (offset >= RT5133_GPIO_NR)
+ return -EINVAL;
+
+ return regmap_update_bits(priv->regmap, RT5133_REG_GPIO_CTRL,
+ BIT(7 - offset) | BIT(3 - offset),
+ value ? BIT(7 - offset) | BIT(3 - offset) : 0);
+}
+
+static int rt5133_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct rt5133_priv *priv = gpiochip_get_data(chip);
+
+ return !!(priv->gpio_output_flag & BIT(offset));
+}
+
+static int rt5133_get_gpioen_mask(unsigned int offset, unsigned int *mask)
+{
+ if (offset >= RT5133_GPIO_NR)
+ return -EINVAL;
+
+ *mask = (BIT(7 - offset) | BIT(3 - offset));
+
+ return 0;
+}
+
+static int rt5133_gpio_set(struct gpio_chip *chip, unsigned int offset, int set_val)
+{
+ struct rt5133_priv *priv = gpiochip_get_data(chip);
+ unsigned int mask = 0, val = 0, next_flag = priv->gpio_output_flag;
+ int ret = 0;
+
+ ret = rt5133_get_gpioen_mask(offset, &mask);
+ if (ret) {
+ dev_err(priv->dev, "%s get gpion en mask failed, offset(%d)\n", __func__, offset);
+ return ret;
+ }
+
+ val = set_val ? mask : 0;
+
+ if (set_val)
+ next_flag |= BIT(offset);
+ else
+ next_flag &= ~BIT(offset);
+
+ ret = regmap_update_bits(priv->regmap, RT5133_REG_GPIO_CTRL, mask, val);
+ if (ret) {
+ dev_err(priv->dev, "Failed to set gpio [%d] val %d\n", offset,
+ set_val);
+ return ret;
+ }
+
+ priv->gpio_output_flag = next_flag;
+ return 0;
+}
+
+static irqreturn_t rt5133_intr_handler(int irq_number, void *data)
+{
+ struct rt5133_priv *priv = data;
+ u32 intr_evts = 0, handle_evts;
+ int i, ret;
+
+ ret = regmap_bulk_read(priv->regmap, RT5133_REG_BASE_EVT, &intr_evts,
+ RT5133_INTR_BYTE_NR);
+ if (ret) {
+ dev_err(priv->dev, "%s, read event failed\n", __func__);
+ return IRQ_NONE;
+ }
+
+ handle_evts = intr_evts & RT5133_BASE_EVT_MASK;
+ /*
+ * VREF_EVT is a special case, if base off
+ * this event will also be trigger. Skip it
+ */
+ if (handle_evts & ~RT5133_VREF_EVT_MASK)
+ dev_dbg(priv->dev, "base event occurred [0x%02x]\n",
+ handle_evts);
+
+ handle_evts = (intr_evts & RT5133_LDO_OC_EVT_MASK) >>
+ RT5133_LDO_OC_EVT_SHIFT;
+
+ for (i = RT5133_REGULATOR_LDO1; i < RT5133_REGULATOR_MAX && handle_evts; i++) {
+ if (!(handle_evts & BIT(i - 1)))
+ continue;
+ regulator_notifier_call_chain(priv->rdev[i],
+ REGULATOR_EVENT_OVER_CURRENT,
+ &i);
+ }
+
+ handle_evts = (intr_evts & RT5133_LDO_PGB_EVT_MASK) >>
+ RT5133_LDO_PGB_EVT_SHIFT;
+ for (i = RT5133_REGULATOR_LDO1; i < RT5133_REGULATOR_MAX && handle_evts; i++) {
+ if (!(handle_evts & BIT(i - 1)))
+ continue;
+ regulator_notifier_call_chain(priv->rdev[i],
+ REGULATOR_EVENT_FAIL, &i);
+ }
+
+ ret = regmap_bulk_write(priv->regmap, RT5133_REG_BASE_EVT, &intr_evts,
+ RT5133_INTR_BYTE_NR);
+ if (ret)
+ dev_err(priv->dev, "%s, clear event failed\n", __func__);
+
+ return IRQ_HANDLED;
+}
+
+static int rt5133_enable_interrupts(int irq_no, struct rt5133_priv *priv)
+{
+ u32 mask = RT5133_INTR_CLR_MASK;
+ int ret;
+
+ /* Force to write clear all events */
+ ret = regmap_bulk_write(priv->regmap, RT5133_REG_BASE_EVT, &mask,
+ RT5133_INTR_BYTE_NR);
+ if (ret) {
+ dev_err(priv->dev, "Failed to clear all interrupts\n");
+ return ret;
+ }
+
+ /* Unmask all interrupts */
+ mask = 0;
+ ret = regmap_bulk_write(priv->regmap, RT5133_REG_BASE_MASK, &mask,
+ RT5133_INTR_BYTE_NR);
+ if (ret) {
+ dev_err(priv->dev, "Failed to unmask all interrupts\n");
+ return ret;
+ }
+
+ return devm_request_threaded_irq(priv->dev, irq_no, NULL,
+ rt5133_intr_handler, IRQF_ONESHOT,
+ dev_name(priv->dev), priv);
+}
+
+static int rt5133_regmap_hw_read(void *context, const void *reg_buf,
+ size_t reg_size, void *val_buf,
+ size_t val_size)
+{
+ struct rt5133_priv *priv = context;
+ struct i2c_client *client = to_i2c_client(priv->dev);
+ u8 reg = *(u8 *)reg_buf, crc;
+ u8 *buf;
+ int buf_len = RT5133_PREDATA_LEN + val_size + RT5133_I2C_CRC_LEN;
+ int read_len, ret;
+
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ buf[0] = I2C_ADDR_XLATE_8BIT(client->addr, I2C_SMBUS_READ);
+ buf[1] = reg;
+
+ read_len = val_size + RT5133_I2C_CRC_LEN;
+ ret = i2c_smbus_read_i2c_block_data(client, reg, read_len,
+ buf + RT5133_PREDATA_LEN);
+
+ if (ret < 0)
+ goto out_read_err;
+
+ if (ret != read_len) {
+ ret = -EIO;
+ goto out_read_err;
+ }
+
+ crc = crc8(priv->crc8_tbls, buf, RT5133_PREDATA_LEN + val_size, 0);
+ if (crc != buf[RT5133_PREDATA_LEN + val_size]) {
+ ret = -EIO;
+ goto out_read_err;
+ }
+
+ memcpy(val_buf, buf + RT5133_PREDATA_LEN, val_size);
+ dev_dbg(priv->dev, "%s, reg = 0x%02x, data = 0x%02x\n", __func__, reg, *(u8 *)val_buf);
+
+out_read_err:
+ kfree(buf);
+ return (ret < 0) ? ret : 0;
+}
+
+static int rt5133_regmap_hw_write(void *context, const void *data, size_t count)
+{
+ struct rt5133_priv *priv = context;
+ struct i2c_client *client = to_i2c_client(priv->dev);
+ u8 reg = *(u8 *)data, crc;
+ u8 *buf;
+ int buf_len = RT5133_I2C_ADDR_LEN + count + RT5133_I2C_CRC_LEN +
+ RT5133_I2C_DUMMY_LEN;
+ int write_len, ret;
+
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ buf[0] = I2C_ADDR_XLATE_8BIT(client->addr, I2C_SMBUS_WRITE);
+ buf[1] = reg;
+ memcpy(buf + RT5133_PREDATA_LEN, data + RT5133_REG_ADDR_LEN,
+ count - RT5133_REG_ADDR_LEN);
+
+ crc = crc8(priv->crc8_tbls, buf, RT5133_I2C_ADDR_LEN + count, 0);
+ buf[RT5133_I2C_ADDR_LEN + count] = crc;
+
+ write_len = count - RT5133_REG_ADDR_LEN + RT5133_I2C_CRC_LEN +
+ RT5133_I2C_DUMMY_LEN;
+ ret = i2c_smbus_write_i2c_block_data(client, reg, write_len,
+ buf + RT5133_PREDATA_LEN);
+
+ dev_dbg(priv->dev, "%s, reg = 0x%02x, data = 0x%02x\n", __func__, reg,
+ *(u8 *)(buf + RT5133_PREDATA_LEN));
+ kfree(buf);
+ return ret;
+}
+
+static const struct regmap_bus rt5133_regmap_bus = {
+ .read = rt5133_regmap_hw_read,
+ .write = rt5133_regmap_hw_write,
+ /* Due to crc, the block read/write length has the limit */
+ .max_raw_read = RT5133_MAX_I2C_BLOCK_SIZE,
+ .max_raw_write = RT5133_MAX_I2C_BLOCK_SIZE,
+};
+
+static bool rt5133_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case RT5133_REG_CHIP_INFO:
+ case RT5133_REG_BASE_EVT...RT5133_REG_LDO_PGB_STAT:
+ case RT5133_REG_LDO_ON...RT5133_REG_LDO_OFF:
+ case RT5133_REG_LDO1_CTRL1:
+ case RT5133_REG_LDO2_CTRL1:
+ case RT5133_REG_LDO3_CTRL1:
+ case RT5133_REG_LDO4_CTRL1:
+ case RT5133_REG_LDO5_CTRL1:
+ case RT5133_REG_LDO6_CTRL1:
+ case RT5133_REG_LDO7_CTRL1:
+ case RT5133_REG_LDO8_CTRL1:
+ return true;
+ default:
+ return false;
+ };
+}
+
+static const struct regmap_config rt5133_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = RT5133_REG_LDO8_CTRL4,
+ .cache_type = REGCACHE_FLAT,
+ .num_reg_defaults_raw = RT5133_REG_LDO8_CTRL4 + 1,
+ .volatile_reg = rt5133_is_volatile_reg,
+};
+
+static int rt5133_chip_reset(struct rt5133_priv *priv)
+{
+ int ret;
+
+ ret = regmap_write(priv->regmap, RT5133_REG_RST_CTRL,
+ RT5133_RESET_CODE);
+ if (ret)
+ return ret;
+
+ /* Wait for register reset to take effect */
+ udelay(2);
+
+ return 0;
+}
+
+static int rt5133_validate_vendor_info(struct rt5133_priv *priv)
+{
+ unsigned int val = 0;
+ int i, ret;
+
+ ret = regmap_read(priv->regmap, RT5133_REG_CHIP_INFO, &val);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(regulator_data); i++) {
+ if ((val & RT5133_VENDOR_ID_MASK) ==
+ regulator_data[i].vendor_id){
+ priv->cdata = &regulator_data[i];
+ break;
+ }
+ }
+ if (!priv->cdata) {
+ dev_err(priv->dev, "Failed to find regulator match version\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int rt5133_parse_dt(struct rt5133_priv *priv)
+{
+ unsigned int val = 0;
+ int ret = 0;
+
+ if (!device_property_read_bool(priv->dev, "richtek,oc-shutdown-all"))
+ val = 0;
+ else
+ val = 1 << RT5133_OCSHDN_ALL_SHIFT;
+ ret = regmap_update_bits(priv->regmap, RT5133_REG_LDO_SHDN,
+ RT5133_OCSHDN_ALL_MASK, val);
+ if (ret)
+ return ret;
+
+ if (!device_property_read_bool(priv->dev, "richtek,pgb-shutdown-all"))
+ val = 0;
+ else
+ val = 1 << RT5133_PGBSHDN_ALL_SHIFT;
+ return regmap_update_bits(priv->regmap, RT5133_REG_LDO_SHDN,
+ RT5133_PGBSHDN_ALL_MASK, val);
+}
+
+static int rt5133_probe(struct i2c_client *i2c)
+{
+ struct rt5133_priv *priv;
+ struct regulator_config config = {0};
+ int i, ret;
+
+ priv = devm_kzalloc(&i2c->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = &i2c->dev;
+ crc8_populate_msb(priv->crc8_tbls, RT5133_CRC8_POLYNOMIAL);
+
+ priv->enable_gpio = devm_gpiod_get_optional(&i2c->dev, "enable",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(priv->enable_gpio))
+ dev_err(&i2c->dev, "Failed to request HWEN gpio, check if default en=high\n");
+
+ priv->regmap = devm_regmap_init(&i2c->dev, &rt5133_regmap_bus, priv,
+ &rt5133_regmap_config);
+ if (IS_ERR(priv->regmap)) {
+ dev_err(&i2c->dev, "Failed to register regmap\n");
+ return PTR_ERR(priv->regmap);
+ }
+
+ ret = rt5133_validate_vendor_info(priv);
+ if (ret) {
+ dev_err(&i2c->dev, "Failed to check vendor info [%d]\n", ret);
+ return ret;
+ }
+
+ ret = rt5133_chip_reset(priv);
+ if (ret) {
+ dev_err(&i2c->dev, "Failed to execute sw reset\n");
+ return ret;
+ }
+
+ config.dev = &i2c->dev;
+ config.driver_data = priv;
+ config.regmap = priv->regmap;
+
+ for (i = 0; i < RT5133_REGULATOR_MAX; i++) {
+ priv->rdev[i] = devm_regulator_register(&i2c->dev,
+ priv->cdata->regulators + i,
+ &config);
+ if (IS_ERR(priv->rdev[i])) {
+ dev_err(&i2c->dev,
+ "Failed to register [%d] regulator\n", i);
+ return PTR_ERR(priv->rdev[i]);
+ }
+ }
+
+ ret = rt5133_parse_dt(priv);
+ if (ret) {
+ dev_err(&i2c->dev, "%s, Failed to parse dt\n", __func__);
+ return ret;
+ }
+
+ priv->gc.label = dev_name(&i2c->dev);
+ priv->gc.parent = &i2c->dev;
+ priv->gc.base = -1;
+ priv->gc.ngpio = RT5133_GPIO_NR;
+ priv->gc.set = rt5133_gpio_set;
+ priv->gc.get = rt5133_gpio_get;
+ priv->gc.direction_output = rt5133_gpio_direction_output;
+ priv->gc.can_sleep = true;
+
+ ret = devm_gpiochip_add_data(&i2c->dev, &priv->gc, priv);
+ if (ret)
+ return ret;
+
+ ret = rt5133_enable_interrupts(i2c->irq, priv);
+ if (ret) {
+ dev_err(&i2c->dev, "enable interrupt failed\n");
+ return ret;
+ }
+
+ i2c_set_clientdata(i2c, priv);
+
+ return ret;
+}
+
+static const struct of_device_id __maybe_unused rt5133_of_match_table[] = {
+ { .compatible = "richtek,rt5133", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, rt5133_of_match_table);
+
+static struct i2c_driver rt5133_driver = {
+ .driver = {
+ .name = "rt5133",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ .of_match_table = rt5133_of_match_table,
+ },
+ .probe = rt5133_probe,
+};
+module_i2c_driver(rt5133_driver);
+
+MODULE_DESCRIPTION("RT5133 Regulator Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/s2dos05-regulator.c b/drivers/regulator/s2dos05-regulator.c
new file mode 100644
index 000000000000..1463585c4565
--- /dev/null
+++ b/drivers/regulator/s2dos05-regulator.c
@@ -0,0 +1,165 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// s2dos05.c - Regulator driver for the Samsung s2dos05
+//
+// Copyright (C) 2025 Dzmitry Sankouski <dsankouski@gmail.com>
+
+#include <linux/bug.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/mfd/samsung/core.h>
+#include <linux/regulator/s2dos05.h>
+#include <linux/i2c.h>
+
+struct s2dos05_data {
+ struct regmap *regmap;
+ struct device *dev;
+};
+
+#define _BUCK(macro) S2DOS05_BUCK##macro
+#define _buck_ops(num) s2dos05_ops##num
+#define _LDO(macro) S2DOS05_LDO##macro
+#define _REG(ctrl) S2DOS05_REG##ctrl
+#define _ldo_ops(num) s2dos05_ops##num
+#define _MASK(macro) S2DOS05_ENABLE_MASK##macro
+#define _TIME(macro) S2DOS05_ENABLE_TIME##macro
+
+#define BUCK_DESC(_name, _id, _ops, m, s, v, e, em, t, a) { \
+ .name = _name, \
+ .id = _id, \
+ .ops = _ops, \
+ .of_match = of_match_ptr(_name), \
+ .of_match_full_name = true, \
+ .regulators_node = of_match_ptr("regulators"), \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = m, \
+ .uV_step = s, \
+ .n_voltages = S2DOS05_BUCK_N_VOLTAGES, \
+ .vsel_reg = v, \
+ .vsel_mask = S2DOS05_BUCK_VSEL_MASK, \
+ .enable_reg = e, \
+ .enable_mask = em, \
+ .enable_time = t, \
+ .active_discharge_off = 0, \
+ .active_discharge_on = S2DOS05_BUCK_FD_MASK, \
+ .active_discharge_reg = a, \
+ .active_discharge_mask = S2DOS05_BUCK_FD_MASK \
+}
+
+#define LDO_DESC(_name, _id, _ops, m, s, v, e, em, t, a) { \
+ .name = _name, \
+ .id = _id, \
+ .ops = _ops, \
+ .of_match = of_match_ptr(_name), \
+ .of_match_full_name = true, \
+ .regulators_node = of_match_ptr("regulators"), \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = m, \
+ .uV_step = s, \
+ .n_voltages = S2DOS05_LDO_N_VOLTAGES, \
+ .vsel_reg = v, \
+ .vsel_mask = S2DOS05_LDO_VSEL_MASK, \
+ .enable_reg = e, \
+ .enable_mask = em, \
+ .enable_time = t, \
+ .active_discharge_off = 0, \
+ .active_discharge_on = S2DOS05_LDO_FD_MASK, \
+ .active_discharge_reg = a, \
+ .active_discharge_mask = S2DOS05_LDO_FD_MASK \
+}
+
+static const struct regulator_ops s2dos05_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .set_active_discharge = regulator_set_active_discharge_regmap,
+};
+
+static const struct regulator_desc regulators[S2DOS05_REGULATOR_MAX] = {
+ // name, id, ops, min_uv, uV_step, vsel_reg, enable_reg
+ LDO_DESC("ldo1", _LDO(1), &_ldo_ops(), _LDO(_MIN1),
+ _LDO(_STEP1), _REG(_LDO1_CFG),
+ _REG(_EN), _MASK(_L1), _TIME(_LDO), _REG(_LDO1_CFG)),
+ LDO_DESC("ldo2", _LDO(2), &_ldo_ops(), _LDO(_MIN1),
+ _LDO(_STEP1), _REG(_LDO2_CFG),
+ _REG(_EN), _MASK(_L2), _TIME(_LDO), _REG(_LDO2_CFG)),
+ LDO_DESC("ldo3", _LDO(3), &_ldo_ops(), _LDO(_MIN2),
+ _LDO(_STEP1), _REG(_LDO3_CFG),
+ _REG(_EN), _MASK(_L3), _TIME(_LDO), _REG(_LDO3_CFG)),
+ LDO_DESC("ldo4", _LDO(4), &_ldo_ops(), _LDO(_MIN2),
+ _LDO(_STEP1), _REG(_LDO4_CFG),
+ _REG(_EN), _MASK(_L4), _TIME(_LDO), _REG(_LDO4_CFG)),
+ BUCK_DESC("buck", _BUCK(1), &_buck_ops(), _BUCK(_MIN1),
+ _BUCK(_STEP1), _REG(_BUCK_VOUT),
+ _REG(_EN), _MASK(_B1), _TIME(_BUCK), _REG(_BUCK_CFG)),
+};
+
+static int s2dos05_pmic_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sec_pmic_dev *iodev = dev_get_drvdata(pdev->dev.parent);
+ struct s2dos05_data *s2dos05;
+ struct regulator_config config = { };
+ unsigned int rdev_num = ARRAY_SIZE(regulators);
+
+ s2dos05 = devm_kzalloc(dev, sizeof(*s2dos05), GFP_KERNEL);
+ if (!s2dos05)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, s2dos05);
+
+ s2dos05->regmap = iodev->regmap_pmic;
+ s2dos05->dev = dev;
+ if (!dev->of_node)
+ dev->of_node = dev->parent->of_node;
+
+ config.dev = dev;
+ config.driver_data = s2dos05;
+
+ for (int i = 0; i < rdev_num; i++) {
+ struct regulator_dev *regulator;
+
+ regulator = devm_regulator_register(&pdev->dev,
+ &regulators[i], &config);
+ if (IS_ERR(regulator)) {
+ return dev_err_probe(&pdev->dev, PTR_ERR(regulator),
+ "regulator init failed for %d\n", i);
+ }
+ }
+
+ return 0;
+}
+
+static const struct platform_device_id s2dos05_pmic_id[] = {
+ { "s2dos05-regulator" },
+ { },
+};
+MODULE_DEVICE_TABLE(platform, s2dos05_pmic_id);
+
+static struct platform_driver s2dos05_platform_driver = {
+ .driver = {
+ .name = "s2dos05",
+ },
+ .probe = s2dos05_pmic_probe,
+ .id_table = s2dos05_pmic_id,
+};
+module_platform_driver(s2dos05_platform_driver);
+
+MODULE_AUTHOR("Dzmitry Sankouski <dsankouski@gmail.com>");
+MODULE_DESCRIPTION("Samsung S2DOS05 Regulator Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/scmi-regulator.c b/drivers/regulator/scmi-regulator.c
index 9df726f10ad1..6d609c42e479 100644
--- a/drivers/regulator/scmi-regulator.c
+++ b/drivers/regulator/scmi-regulator.c
@@ -257,7 +257,8 @@ static int process_scmi_regulator_of_node(struct scmi_device *sdev,
struct device_node *np,
struct scmi_regulator_info *rinfo)
{
- u32 dom, ret;
+ u32 dom;
+ int ret;
ret = of_property_read_u32(np, "reg", &dom);
if (ret)
diff --git a/drivers/regulator/spacemit-p1.c b/drivers/regulator/spacemit-p1.c
new file mode 100644
index 000000000000..d437e6738ea1
--- /dev/null
+++ b/drivers/regulator/spacemit-p1.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for regulators found in the SpacemiT P1 PMIC
+ *
+ * Copyright (C) 2025 by RISCstar Solutions Corporation. All rights reserved.
+ * Derived from code from SpacemiT.
+ * Copyright (c) 2023, SPACEMIT Co., Ltd
+ */
+
+#include <linux/array_size.h>
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/linear_range.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+
+#define MOD_NAME "spacemit-p1-regulator"
+
+enum p1_regulator_id {
+ P1_BUCK1,
+ P1_BUCK2,
+ P1_BUCK3,
+ P1_BUCK4,
+ P1_BUCK5,
+ P1_BUCK6,
+
+ P1_ALDO1,
+ P1_ALDO2,
+ P1_ALDO3,
+ P1_ALDO4,
+
+ P1_DLDO1,
+ P1_DLDO2,
+ P1_DLDO3,
+ P1_DLDO4,
+ P1_DLDO5,
+ P1_DLDO6,
+ P1_DLDO7,
+};
+
+static const struct regulator_ops p1_regulator_ops = {
+ .list_voltage = regulator_list_voltage_linear_range,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+};
+
+/* Selector value 255 can be used to disable the buck converter on sleep */
+static const struct linear_range p1_buck_ranges[] = {
+ REGULATOR_LINEAR_RANGE(500000, 0, 170, 5000),
+ REGULATOR_LINEAR_RANGE(1375000, 171, 254, 25000),
+};
+
+/* Selector value 0 can be used for suspend */
+static const struct linear_range p1_ldo_ranges[] = {
+ REGULATOR_LINEAR_RANGE(500000, 11, 127, 25000),
+};
+
+/* These define the voltage selector field for buck and LDO regulators */
+#define BUCK_MASK GENMASK(7, 0)
+#define LDO_MASK GENMASK(6, 0)
+
+#define P1_ID(_TYPE, _n) P1_ ## _TYPE ## _n
+#define P1_ENABLE_REG(_off, _n) ((_off) + 3 * ((_n) - 1))
+
+#define P1_REG_DESC(_TYPE, _type, _n, _s, _off, _mask, _nv, _ranges) \
+ { \
+ .name = #_type #_n, \
+ .supply_name = _s, \
+ .of_match = of_match_ptr(#_type #_n), \
+ .regulators_node = of_match_ptr("regulators"), \
+ .id = P1_ID(_TYPE, _n), \
+ .n_voltages = _nv, \
+ .ops = &p1_regulator_ops, \
+ .owner = THIS_MODULE, \
+ .linear_ranges = _ranges, \
+ .n_linear_ranges = ARRAY_SIZE(_ranges), \
+ .vsel_reg = P1_ENABLE_REG(_off, _n) + 1, \
+ .vsel_mask = _mask, \
+ .enable_reg = P1_ENABLE_REG(_off, _n), \
+ .enable_mask = BIT(0), \
+ }
+
+#define P1_BUCK_DESC(_n) \
+ P1_REG_DESC(BUCK, buck, _n, "vcc", 0x47, BUCK_MASK, 254, p1_buck_ranges)
+
+#define P1_ALDO_DESC(_n) \
+ P1_REG_DESC(ALDO, aldo, _n, "vcc", 0x5b, LDO_MASK, 117, p1_ldo_ranges)
+
+#define P1_DLDO_DESC(_n) \
+ P1_REG_DESC(DLDO, dldo, _n, "buck5", 0x67, LDO_MASK, 117, p1_ldo_ranges)
+
+static const struct regulator_desc p1_regulator_desc[] = {
+ P1_BUCK_DESC(1),
+ P1_BUCK_DESC(2),
+ P1_BUCK_DESC(3),
+ P1_BUCK_DESC(4),
+ P1_BUCK_DESC(5),
+ P1_BUCK_DESC(6),
+
+ P1_ALDO_DESC(1),
+ P1_ALDO_DESC(2),
+ P1_ALDO_DESC(3),
+ P1_ALDO_DESC(4),
+
+ P1_DLDO_DESC(1),
+ P1_DLDO_DESC(2),
+ P1_DLDO_DESC(3),
+ P1_DLDO_DESC(4),
+ P1_DLDO_DESC(5),
+ P1_DLDO_DESC(6),
+ P1_DLDO_DESC(7),
+};
+
+static int p1_regulator_probe(struct platform_device *pdev)
+{
+ struct regulator_config config = { };
+ struct device *dev = &pdev->dev;
+ u32 i;
+
+ /*
+ * The parent device (PMIC) owns the regmap. Since we don't
+ * provide one in the config structure, that one will be used.
+ */
+ config.dev = dev->parent;
+
+ for (i = 0; i < ARRAY_SIZE(p1_regulator_desc); i++) {
+ const struct regulator_desc *desc = &p1_regulator_desc[i];
+ struct regulator_dev *rdev;
+
+ rdev = devm_regulator_register(dev, desc, &config);
+ if (IS_ERR(rdev))
+ return dev_err_probe(dev, PTR_ERR(rdev),
+ "error registering regulator %s\n",
+ desc->name);
+ }
+
+ return 0;
+}
+
+static struct platform_driver p1_regulator_driver = {
+ .probe = p1_regulator_probe,
+ .driver = {
+ .name = MOD_NAME,
+ },
+};
+
+module_platform_driver(p1_regulator_driver);
+
+MODULE_DESCRIPTION("SpacemiT P1 regulator driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" MOD_NAME);
diff --git a/drivers/regulator/sy7636a-regulator.c b/drivers/regulator/sy7636a-regulator.c
index d1e7ba1fb3e1..27e3d939b7bb 100644
--- a/drivers/regulator/sy7636a-regulator.c
+++ b/drivers/regulator/sy7636a-regulator.c
@@ -83,9 +83,11 @@ static int sy7636a_regulator_probe(struct platform_device *pdev)
if (!regmap)
return -EPROBE_DEFER;
- gdp = devm_gpiod_get(pdev->dev.parent, "epd-pwr-good", GPIOD_IN);
+ device_set_of_node_from_dev(&pdev->dev, pdev->dev.parent);
+
+ gdp = devm_gpiod_get(&pdev->dev, "epd-pwr-good", GPIOD_IN);
if (IS_ERR(gdp)) {
- dev_err(pdev->dev.parent, "Power good GPIO fault %ld\n", PTR_ERR(gdp));
+ dev_err(&pdev->dev, "Power good GPIO fault %ld\n", PTR_ERR(gdp));
return PTR_ERR(gdp);
}
@@ -105,7 +107,6 @@ static int sy7636a_regulator_probe(struct platform_device *pdev)
}
config.dev = &pdev->dev;
- config.dev->of_node = pdev->dev.parent->of_node;
config.regmap = regmap;
rdev = devm_regulator_register(&pdev->dev, &desc, &config);
diff --git a/drivers/regulator/tps65219-regulator.c b/drivers/regulator/tps65219-regulator.c
index 5e67fdc88f49..d77ca486879f 100644
--- a/drivers/regulator/tps65219-regulator.c
+++ b/drivers/regulator/tps65219-regulator.c
@@ -454,9 +454,9 @@ static int tps65219_regulator_probe(struct platform_device *pdev)
irq_type->irq_name,
irq_data);
if (error)
- return dev_err_probe(tps->dev, PTR_ERR(rdev),
- "Failed to request %s IRQ %d: %d\n",
- irq_type->irq_name, irq, error);
+ return dev_err_probe(tps->dev, error,
+ "Failed to request %s IRQ %d\n",
+ irq_type->irq_name, irq);
}
for (i = 0; i < pmic->dev_irq_size; ++i) {
@@ -477,9 +477,9 @@ static int tps65219_regulator_probe(struct platform_device *pdev)
irq_type->irq_name,
irq_data);
if (error)
- return dev_err_probe(tps->dev, PTR_ERR(rdev),
- "Failed to request %s IRQ %d: %d\n",
- irq_type->irq_name, irq, error);
+ return dev_err_probe(tps->dev, error,
+ "Failed to request %s IRQ %d\n",
+ irq_type->irq_name, irq);
}
return 0;
diff --git a/drivers/regulator/tps6524x-regulator.c b/drivers/regulator/tps6524x-regulator.c
index 3fee7e38c68b..6beb51293e8e 100644
--- a/drivers/regulator/tps6524x-regulator.c
+++ b/drivers/regulator/tps6524x-regulator.c
@@ -598,7 +598,6 @@ static int pmic_probe(struct spi_device *spi)
spi_set_drvdata(spi, hw);
- memset(hw, 0, sizeof(struct tps6524x));
hw->dev = dev;
hw->spi = spi;
mutex_init(&hw->lock);
diff --git a/drivers/regulator/tps6594-regulator.c b/drivers/regulator/tps6594-regulator.c
index ab882daec7c5..645e83462c64 100644
--- a/drivers/regulator/tps6594-regulator.c
+++ b/drivers/regulator/tps6594-regulator.c
@@ -647,7 +647,7 @@ static int tps6594_regulator_probe(struct platform_device *pdev)
default:
dev_err(tps->dev, "unknown chip_id %lu\n", tps->chip_id);
return -EINVAL;
- };
+ }
enum {
MULTI_BUCK12,
diff --git a/drivers/remoteproc/da8xx_remoteproc.c b/drivers/remoteproc/da8xx_remoteproc.c
index 93031f0867d1..e418a2bf5d2e 100644
--- a/drivers/remoteproc/da8xx_remoteproc.c
+++ b/drivers/remoteproc/da8xx_remoteproc.c
@@ -233,6 +233,13 @@ static int da8xx_rproc_get_internal_memories(struct platform_device *pdev,
return 0;
}
+static void da8xx_rproc_mem_release(void *data)
+{
+ struct device *dev = data;
+
+ of_reserved_mem_device_release(dev);
+}
+
static int da8xx_rproc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -274,14 +281,13 @@ static int da8xx_rproc_probe(struct platform_device *pdev)
ret = of_reserved_mem_device_init(dev);
if (ret)
return dev_err_probe(dev, ret, "device does not have specific CMA pool\n");
+ devm_add_action_or_reset(&pdev->dev, da8xx_rproc_mem_release, &pdev->dev);
}
- rproc = rproc_alloc(dev, "dsp", &da8xx_rproc_ops, da8xx_fw_name,
- sizeof(*drproc));
- if (!rproc) {
- ret = -ENOMEM;
- goto free_mem;
- }
+ rproc = devm_rproc_alloc(dev, "dsp", &da8xx_rproc_ops, da8xx_fw_name,
+ sizeof(*drproc));
+ if (!rproc)
+ return -ENOMEM;
/* error recovery is not supported at present */
rproc->recovery_disabled = true;
@@ -294,9 +300,7 @@ static int da8xx_rproc_probe(struct platform_device *pdev)
ret = da8xx_rproc_get_internal_memories(pdev, drproc);
if (ret)
- goto free_rproc;
-
- platform_set_drvdata(pdev, rproc);
+ return ret;
/* everything the ISR needs is now setup, so hook it up */
ret = devm_request_threaded_irq(dev, irq, da8xx_rproc_callback,
@@ -304,7 +308,7 @@ static int da8xx_rproc_probe(struct platform_device *pdev)
rproc);
if (ret) {
dev_err(dev, "devm_request_threaded_irq error: %d\n", ret);
- goto free_rproc;
+ return ret;
}
/*
@@ -314,7 +318,7 @@ static int da8xx_rproc_probe(struct platform_device *pdev)
*/
ret = reset_control_assert(dsp_reset);
if (ret)
- goto free_rproc;
+ return ret;
drproc->chipsig = chipsig;
drproc->bootreg = bootreg;
@@ -322,39 +326,13 @@ static int da8xx_rproc_probe(struct platform_device *pdev)
drproc->irq_data = irq_data;
drproc->irq = irq;
- ret = rproc_add(rproc);
+ ret = devm_rproc_add(dev, rproc);
if (ret) {
dev_err(dev, "rproc_add failed: %d\n", ret);
- goto free_rproc;
+ return ret;
}
return 0;
-
-free_rproc:
- rproc_free(rproc);
-free_mem:
- if (dev->of_node)
- of_reserved_mem_device_release(dev);
- return ret;
-}
-
-static void da8xx_rproc_remove(struct platform_device *pdev)
-{
- struct rproc *rproc = platform_get_drvdata(pdev);
- struct da8xx_rproc *drproc = rproc->priv;
- struct device *dev = &pdev->dev;
-
- /*
- * The devm subsystem might end up releasing things before
- * freeing the irq, thus allowing an interrupt to sneak in while
- * the device is being removed. This should prevent that.
- */
- disable_irq(drproc->irq);
-
- rproc_del(rproc);
- rproc_free(rproc);
- if (dev->of_node)
- of_reserved_mem_device_release(dev);
}
static const struct of_device_id davinci_rproc_of_match[] __maybe_unused = {
@@ -365,7 +343,6 @@ MODULE_DEVICE_TABLE(of, davinci_rproc_of_match);
static struct platform_driver da8xx_rproc_driver = {
.probe = da8xx_rproc_probe,
- .remove = da8xx_rproc_remove,
.driver = {
.name = "davinci-rproc",
.of_match_table = of_match_ptr(davinci_rproc_of_match),
diff --git a/drivers/remoteproc/imx_dsp_rproc.c b/drivers/remoteproc/imx_dsp_rproc.c
index 5ee622bf5352..6e78a01755c7 100644
--- a/drivers/remoteproc/imx_dsp_rproc.c
+++ b/drivers/remoteproc/imx_dsp_rproc.c
@@ -774,7 +774,6 @@ static int imx_dsp_rproc_prepare(struct rproc *rproc)
{
struct imx_dsp_rproc *priv = rproc->priv;
struct device *dev = rproc->dev.parent;
- struct rproc_mem_entry *carveout;
int ret;
ret = imx_dsp_rproc_add_carveout(priv);
@@ -785,15 +784,6 @@ static int imx_dsp_rproc_prepare(struct rproc *rproc)
pm_runtime_get_sync(dev);
- /*
- * Clear buffers after pm rumtime for internal ocram is not
- * accessible if power and clock are not enabled.
- */
- list_for_each_entry(carveout, &rproc->carveouts, node) {
- if (carveout->va)
- memset(carveout->va, 0, carveout->len);
- }
-
return 0;
}
@@ -1022,13 +1012,39 @@ static int imx_dsp_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw
return 0;
}
+static int imx_dsp_rproc_load(struct rproc *rproc, const struct firmware *fw)
+{
+ struct imx_dsp_rproc *priv = rproc->priv;
+ const struct imx_dsp_rproc_dcfg *dsp_dcfg = priv->dsp_dcfg;
+ struct rproc_mem_entry *carveout;
+ int ret;
+
+ /* Reset DSP if needed */
+ if (dsp_dcfg->reset)
+ dsp_dcfg->reset(priv);
+ /*
+ * Clear buffers after pm rumtime for internal ocram is not
+ * accessible if power and clock are not enabled.
+ */
+ list_for_each_entry(carveout, &rproc->carveouts, node) {
+ if (carveout->va)
+ memset(carveout->va, 0, carveout->len);
+ }
+
+ ret = imx_dsp_rproc_elf_load_segments(rproc, fw);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static const struct rproc_ops imx_dsp_rproc_ops = {
.prepare = imx_dsp_rproc_prepare,
.unprepare = imx_dsp_rproc_unprepare,
.start = imx_dsp_rproc_start,
.stop = imx_dsp_rproc_stop,
.kick = imx_dsp_rproc_kick,
- .load = imx_dsp_rproc_elf_load_segments,
+ .load = imx_dsp_rproc_load,
.parse_fw = imx_dsp_rproc_parse_fw,
.handle_rsc = imx_dsp_rproc_handle_rsc,
.find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table,
@@ -1189,6 +1205,8 @@ static int imx_dsp_rproc_probe(struct platform_device *pdev)
goto err_detach_domains;
}
+ rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_XTENSA);
+
pm_runtime_enable(dev);
return 0;
@@ -1214,7 +1232,6 @@ static int imx_dsp_runtime_resume(struct device *dev)
{
struct rproc *rproc = dev_get_drvdata(dev);
struct imx_dsp_rproc *priv = rproc->priv;
- const struct imx_dsp_rproc_dcfg *dsp_dcfg = priv->dsp_dcfg;
int ret;
/*
@@ -1235,10 +1252,6 @@ static int imx_dsp_runtime_resume(struct device *dev)
return ret;
}
- /* Reset DSP if needed */
- if (dsp_dcfg->reset)
- dsp_dcfg->reset(priv);
-
return 0;
}
diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c
index a6eef0080ca9..bb25221a4a89 100644
--- a/drivers/remoteproc/imx_rproc.c
+++ b/drivers/remoteproc/imx_rproc.c
@@ -285,161 +285,101 @@ static const struct imx_rproc_att imx_rproc_att_imx6sx[] = {
{ 0x80000000, 0x80000000, 0x60000000, 0 },
};
-static const struct imx_rproc_dcfg imx_rproc_cfg_imx8mn_mmio = {
- .src_reg = IMX7D_SRC_SCR,
- .src_mask = IMX7D_M4_RST_MASK,
- .src_start = IMX7D_M4_START,
- .src_stop = IMX8M_M7_STOP,
- .gpr_reg = IMX8M_GPR22,
- .gpr_wait = IMX8M_GPR22_CM7_CPUWAIT,
- .att = imx_rproc_att_imx8mn,
- .att_size = ARRAY_SIZE(imx_rproc_att_imx8mn),
- .method = IMX_RPROC_MMIO,
-};
-
-static const struct imx_rproc_dcfg imx_rproc_cfg_imx8mn = {
- .att = imx_rproc_att_imx8mn,
- .att_size = ARRAY_SIZE(imx_rproc_att_imx8mn),
- .method = IMX_RPROC_SMC,
-};
-
-static const struct imx_rproc_dcfg imx_rproc_cfg_imx8mq = {
- .src_reg = IMX7D_SRC_SCR,
- .src_mask = IMX7D_M4_RST_MASK,
- .src_start = IMX7D_M4_START,
- .src_stop = IMX7D_M4_STOP,
- .att = imx_rproc_att_imx8mq,
- .att_size = ARRAY_SIZE(imx_rproc_att_imx8mq),
- .method = IMX_RPROC_MMIO,
-};
+static int imx_rproc_arm_smc_start(struct rproc *rproc)
+{
+ struct arm_smccc_res res;
-static const struct imx_rproc_dcfg imx_rproc_cfg_imx8qm = {
- .att = imx_rproc_att_imx8qm,
- .att_size = ARRAY_SIZE(imx_rproc_att_imx8qm),
- .method = IMX_RPROC_SCU_API,
-};
+ arm_smccc_smc(IMX_SIP_RPROC, IMX_SIP_RPROC_START, 0, 0, 0, 0, 0, 0, &res);
-static const struct imx_rproc_dcfg imx_rproc_cfg_imx8qxp = {
- .att = imx_rproc_att_imx8qxp,
- .att_size = ARRAY_SIZE(imx_rproc_att_imx8qxp),
- .method = IMX_RPROC_SCU_API,
-};
+ return res.a0;
+}
-static const struct imx_rproc_dcfg imx_rproc_cfg_imx8ulp = {
- .att = imx_rproc_att_imx8ulp,
- .att_size = ARRAY_SIZE(imx_rproc_att_imx8ulp),
- .method = IMX_RPROC_NONE,
-};
+static int imx_rproc_mmio_start(struct rproc *rproc)
+{
+ struct imx_rproc *priv = rproc->priv;
+ const struct imx_rproc_dcfg *dcfg = priv->dcfg;
-static const struct imx_rproc_dcfg imx_rproc_cfg_imx7ulp = {
- .att = imx_rproc_att_imx7ulp,
- .att_size = ARRAY_SIZE(imx_rproc_att_imx7ulp),
- .method = IMX_RPROC_NONE,
- .flags = IMX_RPROC_NEED_SYSTEM_OFF,
-};
+ if (priv->gpr)
+ return regmap_clear_bits(priv->gpr, dcfg->gpr_reg, dcfg->gpr_wait);
-static const struct imx_rproc_dcfg imx_rproc_cfg_imx7d = {
- .src_reg = IMX7D_SRC_SCR,
- .src_mask = IMX7D_M4_RST_MASK,
- .src_start = IMX7D_M4_START,
- .src_stop = IMX7D_M4_STOP,
- .att = imx_rproc_att_imx7d,
- .att_size = ARRAY_SIZE(imx_rproc_att_imx7d),
- .method = IMX_RPROC_MMIO,
-};
+ return regmap_update_bits(priv->regmap, dcfg->src_reg, dcfg->src_mask, dcfg->src_start);
+}
-static const struct imx_rproc_dcfg imx_rproc_cfg_imx6sx = {
- .src_reg = IMX6SX_SRC_SCR,
- .src_mask = IMX6SX_M4_RST_MASK,
- .src_start = IMX6SX_M4_START,
- .src_stop = IMX6SX_M4_STOP,
- .att = imx_rproc_att_imx6sx,
- .att_size = ARRAY_SIZE(imx_rproc_att_imx6sx),
- .method = IMX_RPROC_MMIO,
-};
+static int imx_rproc_scu_api_start(struct rproc *rproc)
+{
+ struct imx_rproc *priv = rproc->priv;
-static const struct imx_rproc_dcfg imx_rproc_cfg_imx93 = {
- .att = imx_rproc_att_imx93,
- .att_size = ARRAY_SIZE(imx_rproc_att_imx93),
- .method = IMX_RPROC_SMC,
-};
+ return imx_sc_pm_cpu_start(priv->ipc_handle, priv->rsrc_id, true, priv->entry);
+}
static int imx_rproc_start(struct rproc *rproc)
{
struct imx_rproc *priv = rproc->priv;
const struct imx_rproc_dcfg *dcfg = priv->dcfg;
struct device *dev = priv->dev;
- struct arm_smccc_res res;
int ret;
ret = imx_rproc_xtr_mbox_init(rproc, true);
if (ret)
return ret;
- switch (dcfg->method) {
- case IMX_RPROC_MMIO:
- if (priv->gpr) {
- ret = regmap_clear_bits(priv->gpr, dcfg->gpr_reg,
- dcfg->gpr_wait);
- } else {
- ret = regmap_update_bits(priv->regmap, dcfg->src_reg,
- dcfg->src_mask,
- dcfg->src_start);
- }
- break;
- case IMX_RPROC_SMC:
- arm_smccc_smc(IMX_SIP_RPROC, IMX_SIP_RPROC_START, 0, 0, 0, 0, 0, 0, &res);
- ret = res.a0;
- break;
- case IMX_RPROC_SCU_API:
- ret = imx_sc_pm_cpu_start(priv->ipc_handle, priv->rsrc_id, true, priv->entry);
- break;
- default:
+ if (!dcfg->ops || !dcfg->ops->start)
return -EOPNOTSUPP;
- }
+ ret = dcfg->ops->start(rproc);
if (ret)
dev_err(dev, "Failed to enable remote core!\n");
return ret;
}
-static int imx_rproc_stop(struct rproc *rproc)
+static int imx_rproc_arm_smc_stop(struct rproc *rproc)
{
struct imx_rproc *priv = rproc->priv;
- const struct imx_rproc_dcfg *dcfg = priv->dcfg;
- struct device *dev = priv->dev;
struct arm_smccc_res res;
+
+ arm_smccc_smc(IMX_SIP_RPROC, IMX_SIP_RPROC_STOP, 0, 0, 0, 0, 0, 0, &res);
+ if (res.a1)
+ dev_info(priv->dev, "Not in wfi, force stopped\n");
+
+ return res.a0;
+}
+
+static int imx_rproc_mmio_stop(struct rproc *rproc)
+{
+ struct imx_rproc *priv = rproc->priv;
+ const struct imx_rproc_dcfg *dcfg = priv->dcfg;
int ret;
- switch (dcfg->method) {
- case IMX_RPROC_MMIO:
- if (priv->gpr) {
- ret = regmap_set_bits(priv->gpr, dcfg->gpr_reg,
- dcfg->gpr_wait);
- if (ret) {
- dev_err(priv->dev,
- "Failed to quiescence M4 platform!\n");
- return ret;
- }
+ if (priv->gpr) {
+ ret = regmap_set_bits(priv->gpr, dcfg->gpr_reg, dcfg->gpr_wait);
+ if (ret) {
+ dev_err(priv->dev, "Failed to quiescence M4 platform!\n");
+ return ret;
}
+ }
+
+ return regmap_update_bits(priv->regmap, dcfg->src_reg, dcfg->src_mask, dcfg->src_stop);
+}
+
+static int imx_rproc_scu_api_stop(struct rproc *rproc)
+{
+ struct imx_rproc *priv = rproc->priv;
+
+ return imx_sc_pm_cpu_start(priv->ipc_handle, priv->rsrc_id, false, priv->entry);
+}
+
+static int imx_rproc_stop(struct rproc *rproc)
+{
+ struct imx_rproc *priv = rproc->priv;
+ const struct imx_rproc_dcfg *dcfg = priv->dcfg;
+ struct device *dev = priv->dev;
+ int ret;
- ret = regmap_update_bits(priv->regmap, dcfg->src_reg, dcfg->src_mask,
- dcfg->src_stop);
- break;
- case IMX_RPROC_SMC:
- arm_smccc_smc(IMX_SIP_RPROC, IMX_SIP_RPROC_STOP, 0, 0, 0, 0, 0, 0, &res);
- ret = res.a0;
- if (res.a1)
- dev_info(dev, "Not in wfi, force stopped\n");
- break;
- case IMX_RPROC_SCU_API:
- ret = imx_sc_pm_cpu_start(priv->ipc_handle, priv->rsrc_id, false, priv->entry);
- break;
- default:
+ if (!dcfg->ops || !dcfg->ops->stop)
return -EOPNOTSUPP;
- }
+ ret = dcfg->ops->stop(rproc);
if (ret)
dev_err(dev, "Failed to stop remote core\n");
else
@@ -922,84 +862,27 @@ static int imx_rproc_attach_pd(struct imx_rproc *priv)
return 0;
}
-static int imx_rproc_detect_mode(struct imx_rproc *priv)
+static int imx_rproc_arm_smc_detect_mode(struct rproc *rproc)
{
- struct regmap_config config = { .name = "imx-rproc" };
- const struct imx_rproc_dcfg *dcfg = priv->dcfg;
- struct device *dev = priv->dev;
- struct regmap *regmap;
+ struct imx_rproc *priv = rproc->priv;
struct arm_smccc_res res;
- int ret;
- u32 val;
- u8 pt;
- switch (dcfg->method) {
- case IMX_RPROC_NONE:
+ arm_smccc_smc(IMX_SIP_RPROC, IMX_SIP_RPROC_STARTED, 0, 0, 0, 0, 0, 0, &res);
+ if (res.a0)
priv->rproc->state = RPROC_DETACHED;
- return 0;
- case IMX_RPROC_SMC:
- arm_smccc_smc(IMX_SIP_RPROC, IMX_SIP_RPROC_STARTED, 0, 0, 0, 0, 0, 0, &res);
- if (res.a0)
- priv->rproc->state = RPROC_DETACHED;
- return 0;
- case IMX_RPROC_SCU_API:
- ret = imx_scu_get_handle(&priv->ipc_handle);
- if (ret)
- return ret;
- ret = of_property_read_u32(dev->of_node, "fsl,resource-id", &priv->rsrc_id);
- if (ret) {
- dev_err(dev, "No fsl,resource-id property\n");
- return ret;
- }
-
- if (priv->rsrc_id == IMX_SC_R_M4_1_PID0)
- priv->core_index = 1;
- else
- priv->core_index = 0;
- /*
- * If Mcore resource is not owned by Acore partition, It is kicked by ROM,
- * and Linux could only do IPC with Mcore and nothing else.
- */
- if (imx_sc_rm_is_resource_owned(priv->ipc_handle, priv->rsrc_id)) {
- if (of_property_read_u32(dev->of_node, "fsl,entry-address", &priv->entry))
- return -EINVAL;
-
- return imx_rproc_attach_pd(priv);
- }
-
- priv->rproc->state = RPROC_DETACHED;
- priv->rproc->recovery_disabled = false;
- rproc_set_feature(priv->rproc, RPROC_FEAT_ATTACH_ON_RECOVERY);
-
- /* Get partition id and enable irq in SCFW */
- ret = imx_sc_rm_get_resource_owner(priv->ipc_handle, priv->rsrc_id, &pt);
- if (ret) {
- dev_err(dev, "not able to get resource owner\n");
- return ret;
- }
-
- priv->rproc_pt = pt;
- priv->rproc_nb.notifier_call = imx_rproc_partition_notify;
-
- ret = imx_scu_irq_register_notifier(&priv->rproc_nb);
- if (ret) {
- dev_err(dev, "register scu notifier failed, %d\n", ret);
- return ret;
- }
-
- ret = imx_scu_irq_group_enable(IMX_SC_IRQ_GROUP_REBOOTED, BIT(priv->rproc_pt),
- true);
- if (ret) {
- imx_scu_irq_unregister_notifier(&priv->rproc_nb);
- dev_err(dev, "Enable irq failed, %d\n", ret);
- return ret;
- }
+ return 0;
+}
- return 0;
- default:
- break;
- }
+static int imx_rproc_mmio_detect_mode(struct rproc *rproc)
+{
+ const struct regmap_config config = { .name = "imx-rproc" };
+ struct imx_rproc *priv = rproc->priv;
+ const struct imx_rproc_dcfg *dcfg = priv->dcfg;
+ struct device *dev = priv->dev;
+ struct regmap *regmap;
+ u32 val;
+ int ret;
priv->gpr = syscon_regmap_lookup_by_phandle(dev->of_node, "fsl,iomuxc-gpr");
if (IS_ERR(priv->gpr))
@@ -1039,6 +922,85 @@ static int imx_rproc_detect_mode(struct imx_rproc *priv)
return 0;
}
+static int imx_rproc_scu_api_detect_mode(struct rproc *rproc)
+{
+ struct imx_rproc *priv = rproc->priv;
+ struct device *dev = priv->dev;
+ int ret;
+ u8 pt;
+
+ ret = imx_scu_get_handle(&priv->ipc_handle);
+ if (ret)
+ return ret;
+ ret = of_property_read_u32(dev->of_node, "fsl,resource-id", &priv->rsrc_id);
+ if (ret) {
+ dev_err(dev, "No fsl,resource-id property\n");
+ return ret;
+ }
+
+ if (priv->rsrc_id == IMX_SC_R_M4_1_PID0)
+ priv->core_index = 1;
+ else
+ priv->core_index = 0;
+
+ /*
+ * If Mcore resource is not owned by Acore partition, It is kicked by ROM,
+ * and Linux could only do IPC with Mcore and nothing else.
+ */
+ if (imx_sc_rm_is_resource_owned(priv->ipc_handle, priv->rsrc_id)) {
+ if (of_property_read_u32(dev->of_node, "fsl,entry-address", &priv->entry))
+ return -EINVAL;
+
+ return imx_rproc_attach_pd(priv);
+ }
+
+ priv->rproc->state = RPROC_DETACHED;
+ priv->rproc->recovery_disabled = false;
+ rproc_set_feature(priv->rproc, RPROC_FEAT_ATTACH_ON_RECOVERY);
+
+ /* Get partition id and enable irq in SCFW */
+ ret = imx_sc_rm_get_resource_owner(priv->ipc_handle, priv->rsrc_id, &pt);
+ if (ret) {
+ dev_err(dev, "not able to get resource owner\n");
+ return ret;
+ }
+
+ priv->rproc_pt = pt;
+ priv->rproc_nb.notifier_call = imx_rproc_partition_notify;
+
+ ret = imx_scu_irq_register_notifier(&priv->rproc_nb);
+ if (ret) {
+ dev_err(dev, "register scu notifier failed, %d\n", ret);
+ return ret;
+ }
+
+ ret = imx_scu_irq_group_enable(IMX_SC_IRQ_GROUP_REBOOTED, BIT(priv->rproc_pt),
+ true);
+ if (ret) {
+ imx_scu_irq_unregister_notifier(&priv->rproc_nb);
+ dev_err(dev, "Enable irq failed, %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int imx_rproc_detect_mode(struct imx_rproc *priv)
+{
+ const struct imx_rproc_dcfg *dcfg = priv->dcfg;
+
+ /*
+ * To i.MX{7,8} ULP, Linux is under control of RTOS, no need
+ * dcfg->ops or dcfg->ops->detect_mode, it is state RPROC_DETACHED.
+ */
+ if (!dcfg->ops || !dcfg->ops->detect_mode) {
+ priv->rproc->state = RPROC_DETACHED;
+ return 0;
+ }
+
+ return dcfg->ops->detect_mode(priv->rproc);
+}
+
static int imx_rproc_clk_enable(struct imx_rproc *priv)
{
const struct imx_rproc_dcfg *dcfg = priv->dcfg;
@@ -1207,6 +1169,111 @@ static void imx_rproc_remove(struct platform_device *pdev)
destroy_workqueue(priv->workqueue);
}
+static const struct imx_rproc_plat_ops imx_rproc_ops_arm_smc = {
+ .start = imx_rproc_arm_smc_start,
+ .stop = imx_rproc_arm_smc_stop,
+ .detect_mode = imx_rproc_arm_smc_detect_mode,
+};
+
+static const struct imx_rproc_plat_ops imx_rproc_ops_mmio = {
+ .start = imx_rproc_mmio_start,
+ .stop = imx_rproc_mmio_stop,
+ .detect_mode = imx_rproc_mmio_detect_mode,
+};
+
+static const struct imx_rproc_plat_ops imx_rproc_ops_scu_api = {
+ .start = imx_rproc_scu_api_start,
+ .stop = imx_rproc_scu_api_stop,
+ .detect_mode = imx_rproc_scu_api_detect_mode,
+};
+
+static const struct imx_rproc_dcfg imx_rproc_cfg_imx8mn_mmio = {
+ .src_reg = IMX7D_SRC_SCR,
+ .src_mask = IMX7D_M4_RST_MASK,
+ .src_start = IMX7D_M4_START,
+ .src_stop = IMX8M_M7_STOP,
+ .gpr_reg = IMX8M_GPR22,
+ .gpr_wait = IMX8M_GPR22_CM7_CPUWAIT,
+ .att = imx_rproc_att_imx8mn,
+ .att_size = ARRAY_SIZE(imx_rproc_att_imx8mn),
+ .method = IMX_RPROC_MMIO,
+ .ops = &imx_rproc_ops_mmio,
+};
+
+static const struct imx_rproc_dcfg imx_rproc_cfg_imx8mn = {
+ .att = imx_rproc_att_imx8mn,
+ .att_size = ARRAY_SIZE(imx_rproc_att_imx8mn),
+ .method = IMX_RPROC_SMC,
+ .ops = &imx_rproc_ops_arm_smc,
+};
+
+static const struct imx_rproc_dcfg imx_rproc_cfg_imx8mq = {
+ .src_reg = IMX7D_SRC_SCR,
+ .src_mask = IMX7D_M4_RST_MASK,
+ .src_start = IMX7D_M4_START,
+ .src_stop = IMX7D_M4_STOP,
+ .att = imx_rproc_att_imx8mq,
+ .att_size = ARRAY_SIZE(imx_rproc_att_imx8mq),
+ .method = IMX_RPROC_MMIO,
+ .ops = &imx_rproc_ops_mmio,
+};
+
+static const struct imx_rproc_dcfg imx_rproc_cfg_imx8qm = {
+ .att = imx_rproc_att_imx8qm,
+ .att_size = ARRAY_SIZE(imx_rproc_att_imx8qm),
+ .method = IMX_RPROC_SCU_API,
+ .ops = &imx_rproc_ops_scu_api,
+};
+
+static const struct imx_rproc_dcfg imx_rproc_cfg_imx8qxp = {
+ .att = imx_rproc_att_imx8qxp,
+ .att_size = ARRAY_SIZE(imx_rproc_att_imx8qxp),
+ .method = IMX_RPROC_SCU_API,
+ .ops = &imx_rproc_ops_scu_api,
+};
+
+static const struct imx_rproc_dcfg imx_rproc_cfg_imx8ulp = {
+ .att = imx_rproc_att_imx8ulp,
+ .att_size = ARRAY_SIZE(imx_rproc_att_imx8ulp),
+ .method = IMX_RPROC_NONE,
+};
+
+static const struct imx_rproc_dcfg imx_rproc_cfg_imx7ulp = {
+ .att = imx_rproc_att_imx7ulp,
+ .att_size = ARRAY_SIZE(imx_rproc_att_imx7ulp),
+ .method = IMX_RPROC_NONE,
+ .flags = IMX_RPROC_NEED_SYSTEM_OFF,
+};
+
+static const struct imx_rproc_dcfg imx_rproc_cfg_imx7d = {
+ .src_reg = IMX7D_SRC_SCR,
+ .src_mask = IMX7D_M4_RST_MASK,
+ .src_start = IMX7D_M4_START,
+ .src_stop = IMX7D_M4_STOP,
+ .att = imx_rproc_att_imx7d,
+ .att_size = ARRAY_SIZE(imx_rproc_att_imx7d),
+ .method = IMX_RPROC_MMIO,
+ .ops = &imx_rproc_ops_mmio,
+};
+
+static const struct imx_rproc_dcfg imx_rproc_cfg_imx6sx = {
+ .src_reg = IMX6SX_SRC_SCR,
+ .src_mask = IMX6SX_M4_RST_MASK,
+ .src_start = IMX6SX_M4_START,
+ .src_stop = IMX6SX_M4_STOP,
+ .att = imx_rproc_att_imx6sx,
+ .att_size = ARRAY_SIZE(imx_rproc_att_imx6sx),
+ .method = IMX_RPROC_MMIO,
+ .ops = &imx_rproc_ops_mmio,
+};
+
+static const struct imx_rproc_dcfg imx_rproc_cfg_imx93 = {
+ .att = imx_rproc_att_imx93,
+ .att_size = ARRAY_SIZE(imx_rproc_att_imx93),
+ .method = IMX_RPROC_SMC,
+ .ops = &imx_rproc_ops_arm_smc,
+};
+
static const struct of_device_id imx_rproc_of_match[] = {
{ .compatible = "fsl,imx7ulp-cm4", .data = &imx_rproc_cfg_imx7ulp },
{ .compatible = "fsl,imx7d-cm4", .data = &imx_rproc_cfg_imx7d },
diff --git a/drivers/remoteproc/imx_rproc.h b/drivers/remoteproc/imx_rproc.h
index cfd38d37e146..3a9adaaf048b 100644
--- a/drivers/remoteproc/imx_rproc.h
+++ b/drivers/remoteproc/imx_rproc.h
@@ -31,6 +31,12 @@ enum imx_rproc_method {
/* dcfg flags */
#define IMX_RPROC_NEED_SYSTEM_OFF BIT(0)
+struct imx_rproc_plat_ops {
+ int (*start)(struct rproc *rproc);
+ int (*stop)(struct rproc *rproc);
+ int (*detect_mode)(struct rproc *rproc);
+};
+
struct imx_rproc_dcfg {
u32 src_reg;
u32 src_mask;
@@ -42,6 +48,7 @@ struct imx_rproc_dcfg {
size_t att_size;
enum imx_rproc_method method;
u32 flags;
+ const struct imx_rproc_plat_ops *ops;
};
#endif /* _IMX_RPROC_H */
diff --git a/drivers/remoteproc/keystone_remoteproc.c b/drivers/remoteproc/keystone_remoteproc.c
index 7b41b4547fa8..4d6550b48567 100644
--- a/drivers/remoteproc/keystone_remoteproc.c
+++ b/drivers/remoteproc/keystone_remoteproc.c
@@ -349,6 +349,20 @@ static int keystone_rproc_of_get_dev_syscon(struct platform_device *pdev,
return 0;
}
+static void keystone_rproc_mem_release(void *data)
+{
+ struct device *dev = data;
+
+ of_reserved_mem_device_release(dev);
+}
+
+static void keystone_rproc_pm_runtime_put(void *data)
+{
+ struct device *dev = data;
+
+ pm_runtime_put_sync(dev);
+}
+
static int keystone_rproc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -394,80 +408,58 @@ static int keystone_rproc_probe(struct platform_device *pdev)
return PTR_ERR(ksproc->reset);
/* enable clock for accessing DSP internal memories */
- pm_runtime_enable(dev);
+ ret = devm_pm_runtime_enable(dev);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to enable runtime PM\n");
+
ret = pm_runtime_resume_and_get(dev);
- if (ret < 0) {
- dev_err(dev, "failed to enable clock, status = %d\n", ret);
- goto disable_rpm;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to enable clock\n");
+
+ ret = devm_add_action_or_reset(dev, keystone_rproc_pm_runtime_put, dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to add disable pm devm action\n");
ret = keystone_rproc_of_get_memories(pdev, ksproc);
if (ret)
- goto disable_clk;
+ return ret;
ksproc->irq_ring = platform_get_irq_byname(pdev, "vring");
- if (ksproc->irq_ring < 0) {
- ret = ksproc->irq_ring;
- goto disable_clk;
- }
+ if (ksproc->irq_ring < 0)
+ return ksproc->irq_ring;
ksproc->irq_fault = platform_get_irq_byname(pdev, "exception");
- if (ksproc->irq_fault < 0) {
- ret = ksproc->irq_fault;
- goto disable_clk;
- }
+ if (ksproc->irq_fault < 0)
+ return ksproc->irq_fault;
- ksproc->kick_gpio = gpiod_get(dev, "kick", GPIOD_ASIS);
+ ksproc->kick_gpio = devm_gpiod_get(dev, "kick", GPIOD_ASIS);
ret = PTR_ERR_OR_ZERO(ksproc->kick_gpio);
- if (ret) {
- dev_err(dev, "failed to get gpio for virtio kicks, status = %d\n",
- ret);
- goto disable_clk;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to get gpio for virtio kicks\n");
- if (of_reserved_mem_device_init(dev))
+ ret = of_reserved_mem_device_init(dev);
+ if (ret) {
dev_warn(dev, "device does not have specific CMA pool\n");
+ } else {
+ ret = devm_add_action_or_reset(dev, keystone_rproc_mem_release, dev);
+ if (ret)
+ return ret;
+ }
/* ensure the DSP is in reset before loading firmware */
ret = reset_control_status(ksproc->reset);
if (ret < 0) {
- dev_err(dev, "failed to get reset status, status = %d\n", ret);
- goto release_mem;
+ return dev_err_probe(dev, ret, "failed to get reset status\n");
} else if (ret == 0) {
WARN(1, "device is not in reset\n");
keystone_rproc_dsp_reset(ksproc);
}
- ret = rproc_add(rproc);
- if (ret) {
- dev_err(dev, "failed to add register device with remoteproc core, status = %d\n",
- ret);
- goto release_mem;
- }
-
- platform_set_drvdata(pdev, ksproc);
+ ret = devm_rproc_add(dev, rproc);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to register device with remoteproc core\n");
return 0;
-
-release_mem:
- of_reserved_mem_device_release(dev);
- gpiod_put(ksproc->kick_gpio);
-disable_clk:
- pm_runtime_put_sync(dev);
-disable_rpm:
- pm_runtime_disable(dev);
- return ret;
-}
-
-static void keystone_rproc_remove(struct platform_device *pdev)
-{
- struct keystone_rproc *ksproc = platform_get_drvdata(pdev);
-
- rproc_del(ksproc->rproc);
- gpiod_put(ksproc->kick_gpio);
- pm_runtime_put_sync(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
- of_reserved_mem_device_release(&pdev->dev);
}
static const struct of_device_id keystone_rproc_of_match[] = {
@@ -481,7 +473,6 @@ MODULE_DEVICE_TABLE(of, keystone_rproc_of_match);
static struct platform_driver keystone_rproc_driver = {
.probe = keystone_rproc_probe,
- .remove = keystone_rproc_remove,
.driver = {
.name = "keystone-rproc",
.of_match_table = keystone_rproc_of_match,
diff --git a/drivers/remoteproc/pru_rproc.c b/drivers/remoteproc/pru_rproc.c
index 842e4b6cc5f9..5e3eb7b86a0e 100644
--- a/drivers/remoteproc/pru_rproc.c
+++ b/drivers/remoteproc/pru_rproc.c
@@ -340,7 +340,7 @@ EXPORT_SYMBOL_GPL(pru_rproc_put);
*/
int pru_rproc_set_ctable(struct rproc *rproc, enum pru_ctable_idx c, u32 addr)
{
- struct pru_rproc *pru = rproc->priv;
+ struct pru_rproc *pru;
unsigned int reg;
u32 mask, set;
u16 idx;
@@ -352,6 +352,7 @@ int pru_rproc_set_ctable(struct rproc *rproc, enum pru_ctable_idx c, u32 addr)
if (!rproc->dev.parent || !is_pru_rproc(rproc->dev.parent))
return -ENODEV;
+ pru = rproc->priv;
/* pointer is 16 bit and index is 8-bit so mask out the rest */
idx_mask = (c >= PRU_C28) ? 0xFFFF : 0xFF;
diff --git a/drivers/remoteproc/qcom_q6v5.c b/drivers/remoteproc/qcom_q6v5.c
index 4ee5e67a9f03..58d5b85e58cd 100644
--- a/drivers/remoteproc/qcom_q6v5.c
+++ b/drivers/remoteproc/qcom_q6v5.c
@@ -156,9 +156,6 @@ int qcom_q6v5_wait_for_start(struct qcom_q6v5 *q6v5, int timeout)
int ret;
ret = wait_for_completion_timeout(&q6v5->start_done, timeout);
- if (!ret)
- disable_irq(q6v5->handover_irq);
-
return !ret ? -ETIMEDOUT : 0;
}
EXPORT_SYMBOL_GPL(qcom_q6v5_wait_for_start);
@@ -167,6 +164,11 @@ static irqreturn_t q6v5_handover_interrupt(int irq, void *data)
{
struct qcom_q6v5 *q6v5 = data;
+ if (q6v5->handover_issued) {
+ dev_err(q6v5->dev, "Handover signaled, but it already happened\n");
+ return IRQ_HANDLED;
+ }
+
if (q6v5->handover)
q6v5->handover(q6v5);
diff --git a/drivers/remoteproc/qcom_q6v5_adsp.c b/drivers/remoteproc/qcom_q6v5_adsp.c
index 94af77baa7a1..e98b7e03162c 100644
--- a/drivers/remoteproc/qcom_q6v5_adsp.c
+++ b/drivers/remoteproc/qcom_q6v5_adsp.c
@@ -317,7 +317,7 @@ static int adsp_load(struct rproc *rproc, const struct firmware *fw)
struct qcom_adsp *adsp = rproc->priv;
int ret;
- ret = qcom_mdt_load_no_init(adsp->dev, fw, rproc->firmware, 0,
+ ret = qcom_mdt_load_no_init(adsp->dev, fw, rproc->firmware,
adsp->mem_region, adsp->mem_phys,
adsp->mem_size, &adsp->mem_reloc);
if (ret)
diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c
index 0c0199fb0e68..3087d895b87f 100644
--- a/drivers/remoteproc/qcom_q6v5_mss.c
+++ b/drivers/remoteproc/qcom_q6v5_mss.c
@@ -498,6 +498,8 @@ static void q6v5_debug_policy_load(struct q6v5 *qproc, void *mba_region)
release_firmware(dp_fw);
}
+#define MSM8974_B00_OFFSET 0x1000
+
static int q6v5_load(struct rproc *rproc, const struct firmware *fw)
{
struct q6v5 *qproc = rproc->priv;
@@ -516,7 +518,14 @@ static int q6v5_load(struct rproc *rproc, const struct firmware *fw)
return -EBUSY;
}
- memcpy(mba_region, fw->data, fw->size);
+ if ((qproc->version == MSS_MSM8974 ||
+ qproc->version == MSS_MSM8226 ||
+ qproc->version == MSS_MSM8926) &&
+ fw->size > MSM8974_B00_OFFSET &&
+ !memcmp(fw->data, ELFMAG, SELFMAG))
+ memcpy(mba_region, fw->data + MSM8974_B00_OFFSET, fw->size - MSM8974_B00_OFFSET);
+ else
+ memcpy(mba_region, fw->data, fw->size);
q6v5_debug_policy_load(qproc, mba_region);
memunmap(mba_region);
diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c
index 02e29171cbbe..158bcd6cc85c 100644
--- a/drivers/remoteproc/qcom_q6v5_pas.c
+++ b/drivers/remoteproc/qcom_q6v5_pas.c
@@ -42,6 +42,7 @@ struct qcom_pas_data {
int pas_id;
int dtb_pas_id;
int lite_pas_id;
+ int lite_dtb_pas_id;
unsigned int minidump_id;
bool auto_boot;
bool decrypt_shutdown;
@@ -80,6 +81,7 @@ struct qcom_pas {
int pas_id;
int dtb_pas_id;
int lite_pas_id;
+ int lite_dtb_pas_id;
unsigned int minidump_id;
int crash_reason_smem;
unsigned int smem_host_id;
@@ -225,7 +227,9 @@ static int qcom_pas_load(struct rproc *rproc, const struct firmware *fw)
pas->firmware = fw;
if (pas->lite_pas_id)
- ret = qcom_scm_pas_shutdown(pas->lite_pas_id);
+ qcom_scm_pas_shutdown(pas->lite_pas_id);
+ if (pas->lite_dtb_pas_id)
+ qcom_scm_pas_shutdown(pas->lite_dtb_pas_id);
if (pas->dtb_pas_id) {
ret = request_firmware(&pas->dtb_firmware, pas->dtb_firmware_name, pas->dev);
@@ -242,9 +246,8 @@ static int qcom_pas_load(struct rproc *rproc, const struct firmware *fw)
goto release_dtb_firmware;
ret = qcom_mdt_load_no_init(pas->dev, pas->dtb_firmware, pas->dtb_firmware_name,
- pas->dtb_pas_id, pas->dtb_mem_region,
- pas->dtb_mem_phys, pas->dtb_mem_size,
- &pas->dtb_mem_reloc);
+ pas->dtb_mem_region, pas->dtb_mem_phys,
+ pas->dtb_mem_size, &pas->dtb_mem_reloc);
if (ret)
goto release_dtb_metadata;
}
@@ -307,7 +310,7 @@ static int qcom_pas_start(struct rproc *rproc)
if (ret)
goto disable_px_supply;
- ret = qcom_mdt_load_no_init(pas->dev, pas->firmware, rproc->firmware, pas->pas_id,
+ ret = qcom_mdt_load_no_init(pas->dev, pas->firmware, rproc->firmware,
pas->mem_region, pas->mem_phys, pas->mem_size,
&pas->mem_reloc);
if (ret)
@@ -722,6 +725,7 @@ static int qcom_pas_probe(struct platform_device *pdev)
pas->minidump_id = desc->minidump_id;
pas->pas_id = desc->pas_id;
pas->lite_pas_id = desc->lite_pas_id;
+ pas->lite_dtb_pas_id = desc->lite_dtb_pas_id;
pas->info_name = desc->sysmon_name;
pas->smem_host_id = desc->smem_host_id;
pas->decrypt_shutdown = desc->decrypt_shutdown;
@@ -1085,6 +1089,7 @@ static const struct qcom_pas_data x1e80100_adsp_resource = {
.pas_id = 1,
.dtb_pas_id = 0x24,
.lite_pas_id = 0x1f,
+ .lite_dtb_pas_id = 0x29,
.minidump_id = 5,
.auto_boot = true,
.proxy_pd_names = (char*[]){
@@ -1256,6 +1261,26 @@ static const struct qcom_pas_data sdx55_mpss_resource = {
.ssctl_id = 0x22,
};
+static const struct qcom_pas_data milos_cdsp_resource = {
+ .crash_reason_smem = 601,
+ .firmware_name = "cdsp.mbn",
+ .dtb_firmware_name = "cdsp_dtb.mbn",
+ .pas_id = 18,
+ .dtb_pas_id = 0x25,
+ .minidump_id = 7,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "cx",
+ "mx",
+ NULL
+ },
+ .load_state = "cdsp",
+ .ssr_name = "cdsp",
+ .sysmon_name = "cdsp",
+ .ssctl_id = 0x17,
+ .smem_host_id = 5,
+};
+
static const struct qcom_pas_data sm8450_mpss_resource = {
.crash_reason_smem = 421,
.firmware_name = "modem.mdt",
@@ -1430,6 +1455,10 @@ static const struct qcom_pas_data sm8750_mpss_resource = {
};
static const struct of_device_id qcom_pas_of_match[] = {
+ { .compatible = "qcom,milos-adsp-pas", .data = &sm8550_adsp_resource},
+ { .compatible = "qcom,milos-cdsp-pas", .data = &milos_cdsp_resource},
+ { .compatible = "qcom,milos-mpss-pas", .data = &sm8450_mpss_resource},
+ { .compatible = "qcom,milos-wpss-pas", .data = &sc7280_wpss_resource},
{ .compatible = "qcom,msm8226-adsp-pil", .data = &msm8996_adsp_resource},
{ .compatible = "qcom,msm8953-adsp-pil", .data = &msm8996_adsp_resource},
{ .compatible = "qcom,msm8974-adsp-pil", .data = &adsp_resource_init},
diff --git a/drivers/remoteproc/qcom_q6v5_wcss.c b/drivers/remoteproc/qcom_q6v5_wcss.c
index 93648734a2f2..07c88623f597 100644
--- a/drivers/remoteproc/qcom_q6v5_wcss.c
+++ b/drivers/remoteproc/qcom_q6v5_wcss.c
@@ -757,7 +757,7 @@ static int q6v5_wcss_load(struct rproc *rproc, const struct firmware *fw)
int ret;
ret = qcom_mdt_load_no_init(wcss->dev, fw, rproc->firmware,
- 0, wcss->mem_region, wcss->mem_phys,
+ wcss->mem_region, wcss->mem_phys,
wcss->mem_size, &wcss->mem_reloc);
if (ret)
return ret;
diff --git a/drivers/remoteproc/ti_k3_common.c b/drivers/remoteproc/ti_k3_common.c
index d4f20900f33b..56b71652e449 100644
--- a/drivers/remoteproc/ti_k3_common.c
+++ b/drivers/remoteproc/ti_k3_common.c
@@ -155,6 +155,13 @@ int k3_rproc_release(struct k3_rproc *kproc)
}
EXPORT_SYMBOL_GPL(k3_rproc_release);
+static void k3_rproc_free_channel(void *data)
+{
+ struct k3_rproc *kproc = data;
+
+ mbox_free_channel(kproc->mbox);
+}
+
int k3_rproc_request_mbox(struct rproc *rproc)
{
struct k3_rproc *kproc = rproc->priv;
@@ -173,19 +180,9 @@ int k3_rproc_request_mbox(struct rproc *rproc)
return dev_err_probe(dev, PTR_ERR(kproc->mbox),
"mbox_request_channel failed\n");
- /*
- * Ping the remote processor, this is only for sanity-sake for now;
- * there is no functional effect whatsoever.
- *
- * Note that the reply will _not_ arrive immediately: this message
- * will wait in the mailbox fifo until the remote processor is booted.
- */
- ret = mbox_send_message(kproc->mbox, (void *)RP_MBOX_ECHO_REQUEST);
- if (ret < 0) {
- dev_err(dev, "mbox_send_message failed (%pe)\n", ERR_PTR(ret));
- mbox_free_channel(kproc->mbox);
+ ret = devm_add_action_or_reset(dev, k3_rproc_free_channel, kproc);
+ if (ret)
return ret;
- }
return 0;
}
diff --git a/drivers/remoteproc/ti_k3_dsp_remoteproc.c b/drivers/remoteproc/ti_k3_dsp_remoteproc.c
index 7a72933bd403..d6ceea6dc920 100644
--- a/drivers/remoteproc/ti_k3_dsp_remoteproc.c
+++ b/drivers/remoteproc/ti_k3_dsp_remoteproc.c
@@ -175,8 +175,6 @@ static void k3_dsp_rproc_remove(struct platform_device *pdev)
if (ret)
dev_err(dev, "failed to detach proc (%pe)\n", ERR_PTR(ret));
}
-
- mbox_free_channel(kproc->mbox);
}
static const struct k3_rproc_mem_data c66_mems[] = {
diff --git a/drivers/remoteproc/ti_k3_r5_remoteproc.c b/drivers/remoteproc/ti_k3_r5_remoteproc.c
index ca5ff280d2dc..04f23295ffc1 100644
--- a/drivers/remoteproc/ti_k3_r5_remoteproc.c
+++ b/drivers/remoteproc/ti_k3_r5_remoteproc.c
@@ -1206,8 +1206,6 @@ static void k3_r5_cluster_rproc_exit(void *data)
return;
}
}
-
- mbox_free_channel(kproc->mbox);
}
}
diff --git a/drivers/remoteproc/wkup_m3_rproc.c b/drivers/remoteproc/wkup_m3_rproc.c
index d8be21e71721..2d5bfbefcacc 100644
--- a/drivers/remoteproc/wkup_m3_rproc.c
+++ b/drivers/remoteproc/wkup_m3_rproc.c
@@ -125,6 +125,13 @@ static const struct of_device_id wkup_m3_rproc_of_match[] = {
};
MODULE_DEVICE_TABLE(of, wkup_m3_rproc_of_match);
+static void wkup_m3_rproc_pm_runtime_put(void *data)
+{
+ struct device *dev = data;
+
+ pm_runtime_put_sync(dev);
+}
+
static int wkup_m3_rproc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -148,19 +155,20 @@ static int wkup_m3_rproc_probe(struct platform_device *pdev)
return -ENODEV;
}
- pm_runtime_enable(&pdev->dev);
+ ret = devm_pm_runtime_enable(dev);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to enable runtime PM\n");
ret = pm_runtime_get_sync(&pdev->dev);
- if (ret < 0) {
- dev_err(&pdev->dev, "pm_runtime_get_sync() failed\n");
- goto err;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "pm_runtime_get_sync() failed\n");
+ ret = devm_add_action_or_reset(dev, wkup_m3_rproc_pm_runtime_put, dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to add disable pm devm action\n");
- rproc = rproc_alloc(dev, "wkup_m3", &wkup_m3_rproc_ops,
- fw_name, sizeof(*wkupm3));
- if (!rproc) {
- ret = -ENOMEM;
- goto err;
- }
+ rproc = devm_rproc_alloc(dev, "wkup_m3", &wkup_m3_rproc_ops,
+ fw_name, sizeof(*wkupm3));
+ if (!rproc)
+ return -ENOMEM;
rproc->auto_boot = false;
rproc->sysfs_read_only = true;
@@ -175,9 +183,7 @@ static int wkup_m3_rproc_probe(struct platform_device *pdev)
if (!wkupm3->rsts) {
if (!(pdata && pdata->deassert_reset && pdata->assert_reset &&
pdata->reset_name)) {
- dev_err(dev, "Platform data missing!\n");
- ret = -ENODEV;
- goto err_put_rproc;
+ return dev_err_probe(dev, -ENODEV, "Platform data missing!\n");
}
}
@@ -185,12 +191,9 @@ static int wkup_m3_rproc_probe(struct platform_device *pdev)
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
mem_names[i]);
wkupm3->mem[i].cpu_addr = devm_ioremap_resource(dev, res);
- if (IS_ERR(wkupm3->mem[i].cpu_addr)) {
- dev_err(&pdev->dev, "devm_ioremap_resource failed for resource %d\n",
- i);
- ret = PTR_ERR(wkupm3->mem[i].cpu_addr);
- goto err_put_rproc;
- }
+ if (IS_ERR(wkupm3->mem[i].cpu_addr))
+ return dev_err_probe(dev, PTR_ERR(wkupm3->mem[i].cpu_addr),
+ "devm_ioremap_resource failed for resource %d\n", i);
wkupm3->mem[i].bus_addr = res->start;
wkupm3->mem[i].size = resource_size(res);
addrp = of_get_address(dev->of_node, i, &size, NULL);
@@ -207,30 +210,11 @@ static int wkup_m3_rproc_probe(struct platform_device *pdev)
dev_set_drvdata(dev, rproc);
- ret = rproc_add(rproc);
- if (ret) {
- dev_err(dev, "rproc_add failed\n");
- goto err_put_rproc;
- }
+ ret = devm_rproc_add(dev, rproc);
+ if (ret)
+ return dev_err_probe(dev, ret, "rproc_add failed\n");
return 0;
-
-err_put_rproc:
- rproc_free(rproc);
-err:
- pm_runtime_put_noidle(dev);
- pm_runtime_disable(dev);
- return ret;
-}
-
-static void wkup_m3_rproc_remove(struct platform_device *pdev)
-{
- struct rproc *rproc = platform_get_drvdata(pdev);
-
- rproc_del(rproc);
- rproc_free(rproc);
- pm_runtime_put_sync(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
}
#ifdef CONFIG_PM
@@ -251,7 +235,6 @@ static const struct dev_pm_ops wkup_m3_rproc_pm_ops = {
static struct platform_driver wkup_m3_rproc_driver = {
.probe = wkup_m3_rproc_probe,
- .remove = wkup_m3_rproc_remove,
.driver = {
.name = "wkup_m3_rproc",
.of_match_table = wkup_m3_rproc_of_match,
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index 635eef469ab7..78b7078478d4 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -22,6 +22,13 @@ config RESET_A10SR
This option enables support for the external reset functions for
peripheral PHYs on the Altera Arria10 System Resource Chip.
+config RESET_ASPEED
+ tristate "ASPEED Reset Driver"
+ depends on ARCH_ASPEED || COMPILE_TEST
+ select AUXILIARY_BUS
+ help
+ This enables the reset controller driver for AST2700.
+
config RESET_ATH79
bool "AR71xx Reset Driver" if COMPILE_TEST
default ATH79
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index a917d2522e8d..f7934f9fb90b 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -6,6 +6,7 @@ obj-y += starfive/
obj-y += sti/
obj-y += tegra/
obj-$(CONFIG_RESET_A10SR) += reset-a10sr.o
+obj-$(CONFIG_RESET_ASPEED) += reset-aspeed.o
obj-$(CONFIG_RESET_ATH79) += reset-ath79.o
obj-$(CONFIG_RESET_AXS10X) += reset-axs10x.o
obj-$(CONFIG_RESET_BCM6345) += reset-bcm6345.o
diff --git a/drivers/reset/reset-aspeed.c b/drivers/reset/reset-aspeed.c
new file mode 100644
index 000000000000..dd2f860a69d7
--- /dev/null
+++ b/drivers/reset/reset-aspeed.c
@@ -0,0 +1,253 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2024 ASPEED Technology Inc.
+ */
+
+#include <linux/auxiliary_bus.h>
+#include <linux/cleanup.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/reset-controller.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/reset/aspeed,ast2700-scu.h>
+
+#define SCU0_RESET_CTRL1 0x200
+#define SCU0_RESET_CTRL2 0x220
+#define SCU1_RESET_CTRL1 0x200
+#define SCU1_RESET_CTRL2 0x220
+#define SCU1_PCIE3_CTRL 0x908
+
+struct ast2700_reset_signal {
+ bool dedicated_clr; /* dedicated reset clr offset */
+ u32 offset, bit;
+};
+
+struct aspeed_reset_info {
+ unsigned int nr_resets;
+ const struct ast2700_reset_signal *signal;
+};
+
+struct aspeed_reset {
+ struct reset_controller_dev rcdev;
+ struct aspeed_reset_info *info;
+ spinlock_t lock; /* Protect read-modify-write cycle */
+ void __iomem *base;
+};
+
+static const struct ast2700_reset_signal ast2700_reset0_signals[] = {
+ [SCU0_RESET_SDRAM] = { true, SCU0_RESET_CTRL1, BIT(0) },
+ [SCU0_RESET_DDRPHY] = { true, SCU0_RESET_CTRL1, BIT(1) },
+ [SCU0_RESET_RSA] = { true, SCU0_RESET_CTRL1, BIT(2) },
+ [SCU0_RESET_SHA3] = { true, SCU0_RESET_CTRL1, BIT(3) },
+ [SCU0_RESET_HACE] = { true, SCU0_RESET_CTRL1, BIT(4) },
+ [SCU0_RESET_SOC] = { true, SCU0_RESET_CTRL1, BIT(5) },
+ [SCU0_RESET_VIDEO] = { true, SCU0_RESET_CTRL1, BIT(6) },
+ [SCU0_RESET_2D] = { true, SCU0_RESET_CTRL1, BIT(7) },
+ [SCU0_RESET_PCIS] = { true, SCU0_RESET_CTRL1, BIT(8) },
+ [SCU0_RESET_RVAS0] = { true, SCU0_RESET_CTRL1, BIT(9) },
+ [SCU0_RESET_RVAS1] = { true, SCU0_RESET_CTRL1, BIT(10) },
+ [SCU0_RESET_SM3] = { true, SCU0_RESET_CTRL1, BIT(11) },
+ [SCU0_RESET_SM4] = { true, SCU0_RESET_CTRL1, BIT(12) },
+ [SCU0_RESET_CRT0] = { true, SCU0_RESET_CTRL1, BIT(13) },
+ [SCU0_RESET_ECC] = { true, SCU0_RESET_CTRL1, BIT(14) },
+ [SCU0_RESET_DP_PCI] = { true, SCU0_RESET_CTRL1, BIT(15) },
+ [SCU0_RESET_UFS] = { true, SCU0_RESET_CTRL1, BIT(16) },
+ [SCU0_RESET_EMMC] = { true, SCU0_RESET_CTRL1, BIT(17) },
+ [SCU0_RESET_PCIE1RST] = { true, SCU0_RESET_CTRL1, BIT(18) },
+ [SCU0_RESET_PCIE1RSTOE] = { true, SCU0_RESET_CTRL1, BIT(19) },
+ [SCU0_RESET_PCIE0RST] = { true, SCU0_RESET_CTRL1, BIT(20) },
+ [SCU0_RESET_PCIE0RSTOE] = { true, SCU0_RESET_CTRL1, BIT(21) },
+ [SCU0_RESET_JTAG] = { true, SCU0_RESET_CTRL1, BIT(22) },
+ [SCU0_RESET_MCTP0] = { true, SCU0_RESET_CTRL1, BIT(23) },
+ [SCU0_RESET_MCTP1] = { true, SCU0_RESET_CTRL1, BIT(24) },
+ [SCU0_RESET_XDMA0] = { true, SCU0_RESET_CTRL1, BIT(25) },
+ [SCU0_RESET_XDMA1] = { true, SCU0_RESET_CTRL1, BIT(26) },
+ [SCU0_RESET_H2X1] = { true, SCU0_RESET_CTRL1, BIT(27) },
+ [SCU0_RESET_DP] = { true, SCU0_RESET_CTRL1, BIT(28) },
+ [SCU0_RESET_DP_MCU] = { true, SCU0_RESET_CTRL1, BIT(29) },
+ [SCU0_RESET_SSP] = { true, SCU0_RESET_CTRL1, BIT(30) },
+ [SCU0_RESET_H2X0] = { true, SCU0_RESET_CTRL1, BIT(31) },
+ [SCU0_RESET_PORTA_VHUB] = { true, SCU0_RESET_CTRL2, BIT(0) },
+ [SCU0_RESET_PORTA_PHY3] = { true, SCU0_RESET_CTRL2, BIT(1) },
+ [SCU0_RESET_PORTA_XHCI] = { true, SCU0_RESET_CTRL2, BIT(2) },
+ [SCU0_RESET_PORTB_VHUB] = { true, SCU0_RESET_CTRL2, BIT(3) },
+ [SCU0_RESET_PORTB_PHY3] = { true, SCU0_RESET_CTRL2, BIT(4) },
+ [SCU0_RESET_PORTB_XHCI] = { true, SCU0_RESET_CTRL2, BIT(5) },
+ [SCU0_RESET_PORTA_VHUB_EHCI] = { true, SCU0_RESET_CTRL2, BIT(6) },
+ [SCU0_RESET_PORTB_VHUB_EHCI] = { true, SCU0_RESET_CTRL2, BIT(7) },
+ [SCU0_RESET_UHCI] = { true, SCU0_RESET_CTRL2, BIT(8) },
+ [SCU0_RESET_TSP] = { true, SCU0_RESET_CTRL2, BIT(9) },
+ [SCU0_RESET_E2M0] = { true, SCU0_RESET_CTRL2, BIT(10) },
+ [SCU0_RESET_E2M1] = { true, SCU0_RESET_CTRL2, BIT(11) },
+ [SCU0_RESET_VLINK] = { true, SCU0_RESET_CTRL2, BIT(12) },
+};
+
+static const struct ast2700_reset_signal ast2700_reset1_signals[] = {
+ [SCU1_RESET_LPC0] = { true, SCU1_RESET_CTRL1, BIT(0) },
+ [SCU1_RESET_LPC1] = { true, SCU1_RESET_CTRL1, BIT(1) },
+ [SCU1_RESET_MII] = { true, SCU1_RESET_CTRL1, BIT(2) },
+ [SCU1_RESET_PECI] = { true, SCU1_RESET_CTRL1, BIT(3) },
+ [SCU1_RESET_PWM] = { true, SCU1_RESET_CTRL1, BIT(4) },
+ [SCU1_RESET_MAC0] = { true, SCU1_RESET_CTRL1, BIT(5) },
+ [SCU1_RESET_MAC1] = { true, SCU1_RESET_CTRL1, BIT(6) },
+ [SCU1_RESET_MAC2] = { true, SCU1_RESET_CTRL1, BIT(7) },
+ [SCU1_RESET_ADC] = { true, SCU1_RESET_CTRL1, BIT(8) },
+ [SCU1_RESET_SD] = { true, SCU1_RESET_CTRL1, BIT(9) },
+ [SCU1_RESET_ESPI0] = { true, SCU1_RESET_CTRL1, BIT(10) },
+ [SCU1_RESET_ESPI1] = { true, SCU1_RESET_CTRL1, BIT(11) },
+ [SCU1_RESET_JTAG1] = { true, SCU1_RESET_CTRL1, BIT(12) },
+ [SCU1_RESET_SPI0] = { true, SCU1_RESET_CTRL1, BIT(13) },
+ [SCU1_RESET_SPI1] = { true, SCU1_RESET_CTRL1, BIT(14) },
+ [SCU1_RESET_SPI2] = { true, SCU1_RESET_CTRL1, BIT(15) },
+ [SCU1_RESET_I3C0] = { true, SCU1_RESET_CTRL1, BIT(16) },
+ [SCU1_RESET_I3C1] = { true, SCU1_RESET_CTRL1, BIT(17) },
+ [SCU1_RESET_I3C2] = { true, SCU1_RESET_CTRL1, BIT(18) },
+ [SCU1_RESET_I3C3] = { true, SCU1_RESET_CTRL1, BIT(19) },
+ [SCU1_RESET_I3C4] = { true, SCU1_RESET_CTRL1, BIT(20) },
+ [SCU1_RESET_I3C5] = { true, SCU1_RESET_CTRL1, BIT(21) },
+ [SCU1_RESET_I3C6] = { true, SCU1_RESET_CTRL1, BIT(22) },
+ [SCU1_RESET_I3C7] = { true, SCU1_RESET_CTRL1, BIT(23) },
+ [SCU1_RESET_I3C8] = { true, SCU1_RESET_CTRL1, BIT(24) },
+ [SCU1_RESET_I3C9] = { true, SCU1_RESET_CTRL1, BIT(25) },
+ [SCU1_RESET_I3C10] = { true, SCU1_RESET_CTRL1, BIT(26) },
+ [SCU1_RESET_I3C11] = { true, SCU1_RESET_CTRL1, BIT(27) },
+ [SCU1_RESET_I3C12] = { true, SCU1_RESET_CTRL1, BIT(28) },
+ [SCU1_RESET_I3C13] = { true, SCU1_RESET_CTRL1, BIT(29) },
+ [SCU1_RESET_I3C14] = { true, SCU1_RESET_CTRL1, BIT(30) },
+ [SCU1_RESET_I3C15] = { true, SCU1_RESET_CTRL1, BIT(31) },
+ [SCU1_RESET_MCU0] = { true, SCU1_RESET_CTRL2, BIT(0) },
+ [SCU1_RESET_MCU1] = { true, SCU1_RESET_CTRL2, BIT(1) },
+ [SCU1_RESET_H2A_SPI1] = { true, SCU1_RESET_CTRL2, BIT(2) },
+ [SCU1_RESET_H2A_SPI2] = { true, SCU1_RESET_CTRL2, BIT(3) },
+ [SCU1_RESET_UART0] = { true, SCU1_RESET_CTRL2, BIT(4) },
+ [SCU1_RESET_UART1] = { true, SCU1_RESET_CTRL2, BIT(5) },
+ [SCU1_RESET_UART2] = { true, SCU1_RESET_CTRL2, BIT(6) },
+ [SCU1_RESET_UART3] = { true, SCU1_RESET_CTRL2, BIT(7) },
+ [SCU1_RESET_I2C_FILTER] = { true, SCU1_RESET_CTRL2, BIT(8) },
+ [SCU1_RESET_CALIPTRA] = { true, SCU1_RESET_CTRL2, BIT(9) },
+ [SCU1_RESET_XDMA] = { true, SCU1_RESET_CTRL2, BIT(10) },
+ [SCU1_RESET_FSI] = { true, SCU1_RESET_CTRL2, BIT(12) },
+ [SCU1_RESET_CAN] = { true, SCU1_RESET_CTRL2, BIT(13) },
+ [SCU1_RESET_MCTP] = { true, SCU1_RESET_CTRL2, BIT(14) },
+ [SCU1_RESET_I2C] = { true, SCU1_RESET_CTRL2, BIT(15) },
+ [SCU1_RESET_UART6] = { true, SCU1_RESET_CTRL2, BIT(16) },
+ [SCU1_RESET_UART7] = { true, SCU1_RESET_CTRL2, BIT(17) },
+ [SCU1_RESET_UART8] = { true, SCU1_RESET_CTRL2, BIT(18) },
+ [SCU1_RESET_UART9] = { true, SCU1_RESET_CTRL2, BIT(19) },
+ [SCU1_RESET_LTPI0] = { true, SCU1_RESET_CTRL2, BIT(20) },
+ [SCU1_RESET_VGAL] = { true, SCU1_RESET_CTRL2, BIT(21) },
+ [SCU1_RESET_LTPI1] = { true, SCU1_RESET_CTRL2, BIT(22) },
+ [SCU1_RESET_ACE] = { true, SCU1_RESET_CTRL2, BIT(23) },
+ [SCU1_RESET_E2M] = { true, SCU1_RESET_CTRL2, BIT(24) },
+ [SCU1_RESET_UHCI] = { true, SCU1_RESET_CTRL2, BIT(25) },
+ [SCU1_RESET_PORTC_USB2UART] = { true, SCU1_RESET_CTRL2, BIT(26) },
+ [SCU1_RESET_PORTC_VHUB_EHCI] = { true, SCU1_RESET_CTRL2, BIT(27) },
+ [SCU1_RESET_PORTD_USB2UART] = { true, SCU1_RESET_CTRL2, BIT(28) },
+ [SCU1_RESET_PORTD_VHUB_EHCI] = { true, SCU1_RESET_CTRL2, BIT(29) },
+ [SCU1_RESET_H2X] = { true, SCU1_RESET_CTRL2, BIT(30) },
+ [SCU1_RESET_I3CDMA] = { true, SCU1_RESET_CTRL2, BIT(31) },
+ [SCU1_RESET_PCIE2RST] = { false, SCU1_PCIE3_CTRL, BIT(0) },
+};
+
+static inline struct aspeed_reset *to_aspeed_reset(struct reset_controller_dev *rcdev)
+{
+ return container_of(rcdev, struct aspeed_reset, rcdev);
+}
+
+static int aspeed_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ struct aspeed_reset *rc = to_aspeed_reset(rcdev);
+ void __iomem *reg_offset = rc->base + rc->info->signal[id].offset;
+
+ if (rc->info->signal[id].dedicated_clr) {
+ writel(rc->info->signal[id].bit, reg_offset);
+ } else {
+ guard(spinlock_irqsave)(&rc->lock);
+ writel(readl(reg_offset) & ~rc->info->signal[id].bit, reg_offset);
+ }
+
+ return 0;
+}
+
+static int aspeed_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ struct aspeed_reset *rc = to_aspeed_reset(rcdev);
+ void __iomem *reg_offset = rc->base + rc->info->signal[id].offset;
+
+ if (rc->info->signal[id].dedicated_clr) {
+ writel(rc->info->signal[id].bit, reg_offset + 0x04);
+ } else {
+ guard(spinlock_irqsave)(&rc->lock);
+ writel(readl(reg_offset) | rc->info->signal[id].bit, reg_offset);
+ }
+
+ return 0;
+}
+
+static int aspeed_reset_status(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ struct aspeed_reset *rc = to_aspeed_reset(rcdev);
+ void __iomem *reg_offset = rc->base + rc->info->signal[id].offset;
+
+ return (readl(reg_offset) & rc->info->signal[id].bit) ? 1 : 0;
+}
+
+static const struct reset_control_ops aspeed_reset_ops = {
+ .assert = aspeed_reset_assert,
+ .deassert = aspeed_reset_deassert,
+ .status = aspeed_reset_status,
+};
+
+static int aspeed_reset_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct aspeed_reset *reset;
+ struct device *dev = &adev->dev;
+
+ reset = devm_kzalloc(dev, sizeof(*reset), GFP_KERNEL);
+ if (!reset)
+ return -ENOMEM;
+
+ spin_lock_init(&reset->lock);
+
+ reset->info = (struct aspeed_reset_info *)id->driver_data;
+ reset->rcdev.owner = THIS_MODULE;
+ reset->rcdev.nr_resets = reset->info->nr_resets;
+ reset->rcdev.ops = &aspeed_reset_ops;
+ reset->rcdev.of_node = dev->parent->of_node;
+ reset->rcdev.dev = dev;
+ reset->rcdev.of_reset_n_cells = 1;
+ reset->base = (void __iomem *)adev->dev.platform_data;
+
+ return devm_reset_controller_register(dev, &reset->rcdev);
+}
+
+static const struct aspeed_reset_info ast2700_reset0_info = {
+ .nr_resets = ARRAY_SIZE(ast2700_reset0_signals),
+ .signal = ast2700_reset0_signals,
+};
+
+static const struct aspeed_reset_info ast2700_reset1_info = {
+ .nr_resets = ARRAY_SIZE(ast2700_reset1_signals),
+ .signal = ast2700_reset1_signals,
+};
+
+static const struct auxiliary_device_id aspeed_reset_ids[] = {
+ { .name = "clk_ast2700.reset0", .driver_data = (kernel_ulong_t)&ast2700_reset0_info },
+ { .name = "clk_ast2700.reset1", .driver_data = (kernel_ulong_t)&ast2700_reset1_info },
+ { }
+};
+MODULE_DEVICE_TABLE(auxiliary, aspeed_reset_ids);
+
+static struct auxiliary_driver aspeed_reset_driver = {
+ .probe = aspeed_reset_probe,
+ .id_table = aspeed_reset_ids,
+};
+
+module_auxiliary_driver(aspeed_reset_driver);
+
+MODULE_AUTHOR("Ryan Chen <ryan_chen@aspeedtech.com>");
+MODULE_DESCRIPTION("ASPEED SoC Reset Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/reset/reset-bcm6345.c b/drivers/reset/reset-bcm6345.c
index aa9353439e70..56518f7bfbb3 100644
--- a/drivers/reset/reset-bcm6345.c
+++ b/drivers/reset/reset-bcm6345.c
@@ -119,6 +119,7 @@ static int bcm6345_reset_probe(struct platform_device *pdev)
static const struct of_device_id bcm6345_reset_of_match[] = {
{ .compatible = "brcm,bcm6345-reset" },
+ { .compatible = "brcm,bcm63xx-ephy-ctrl" },
{ /* sentinel */ },
};
diff --git a/drivers/reset/reset-eyeq.c b/drivers/reset/reset-eyeq.c
index 02d50041048b..2d3998368a1c 100644
--- a/drivers/reset/reset-eyeq.c
+++ b/drivers/reset/reset-eyeq.c
@@ -410,6 +410,13 @@ static int eqr_of_xlate_twocells(struct reset_controller_dev *rcdev,
return eqr_of_xlate_internal(rcdev, reset_spec->args[0], reset_spec->args[1]);
}
+static void eqr_of_node_put(void *_dev)
+{
+ struct device *dev = _dev;
+
+ of_node_put(dev->of_node);
+}
+
static int eqr_probe(struct auxiliary_device *adev,
const struct auxiliary_device_id *id)
{
@@ -428,6 +435,10 @@ static int eqr_probe(struct auxiliary_device *adev,
if (!dev->of_node)
return -ENODEV;
+ ret = devm_add_action_or_reset(dev, eqr_of_node_put, dev);
+ if (ret)
+ return ret;
+
/*
* Using our newfound OF node, we can get match data. We cannot use
* device_get_match_data() because it does not match reused OF nodes.
diff --git a/drivers/reset/reset-intel-gw.c b/drivers/reset/reset-intel-gw.c
index a5a01388ae7f..a5ce3350cb5e 100644
--- a/drivers/reset/reset-intel-gw.c
+++ b/drivers/reset/reset-intel-gw.c
@@ -40,7 +40,6 @@ static const struct regmap_config intel_rcu_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
- .fast_io = true,
};
/*
diff --git a/drivers/reset/reset-qcom-pdc.c b/drivers/reset/reset-qcom-pdc.c
index dce1fc1a68ad..ae2b5aba7a59 100644
--- a/drivers/reset/reset-qcom-pdc.c
+++ b/drivers/reset/reset-qcom-pdc.c
@@ -36,7 +36,6 @@ static const struct regmap_config pdc_regmap_config = {
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x20000,
- .fast_io = true,
};
static const struct qcom_pdc_reset_map sdm845_pdc_resets[] = {
diff --git a/drivers/reset/reset-th1520.c b/drivers/reset/reset-th1520.c
index 7874f0693e1b..14d964a9c6b6 100644
--- a/drivers/reset/reset-th1520.c
+++ b/drivers/reset/reset-th1520.c
@@ -14,10 +14,20 @@
/* register offset in VOSYS_REGMAP */
#define TH1520_GPU_RST_CFG 0x0
#define TH1520_GPU_RST_CFG_MASK GENMASK(1, 0)
+#define TH1520_DPU_RST_CFG 0x4
+#define TH1520_DSI0_RST_CFG 0x8
+#define TH1520_DSI1_RST_CFG 0xc
+#define TH1520_HDMI_RST_CFG 0x14
/* register values */
#define TH1520_GPU_SW_GPU_RST BIT(0)
#define TH1520_GPU_SW_CLKGEN_RST BIT(1)
+#define TH1520_DPU_SW_DPU_HRST BIT(0)
+#define TH1520_DPU_SW_DPU_ARST BIT(1)
+#define TH1520_DPU_SW_DPU_CRST BIT(2)
+#define TH1520_DSI_SW_DSI_PRST BIT(0)
+#define TH1520_HDMI_SW_MAIN_RST BIT(0)
+#define TH1520_HDMI_SW_PRST BIT(1)
struct th1520_reset_priv {
struct reset_controller_dev rcdev;
@@ -37,7 +47,35 @@ static const struct th1520_reset_map th1520_resets[] = {
[TH1520_RESET_ID_GPU_CLKGEN] = {
.bit = TH1520_GPU_SW_CLKGEN_RST,
.reg = TH1520_GPU_RST_CFG,
- }
+ },
+ [TH1520_RESET_ID_DPU_AHB] = {
+ .bit = TH1520_DPU_SW_DPU_HRST,
+ .reg = TH1520_DPU_RST_CFG,
+ },
+ [TH1520_RESET_ID_DPU_AXI] = {
+ .bit = TH1520_DPU_SW_DPU_ARST,
+ .reg = TH1520_DPU_RST_CFG,
+ },
+ [TH1520_RESET_ID_DPU_CORE] = {
+ .bit = TH1520_DPU_SW_DPU_CRST,
+ .reg = TH1520_DPU_RST_CFG,
+ },
+ [TH1520_RESET_ID_DSI0_APB] = {
+ .bit = TH1520_DSI_SW_DSI_PRST,
+ .reg = TH1520_DSI0_RST_CFG,
+ },
+ [TH1520_RESET_ID_DSI1_APB] = {
+ .bit = TH1520_DSI_SW_DSI_PRST,
+ .reg = TH1520_DSI1_RST_CFG,
+ },
+ [TH1520_RESET_ID_HDMI] = {
+ .bit = TH1520_HDMI_SW_MAIN_RST,
+ .reg = TH1520_HDMI_RST_CFG,
+ },
+ [TH1520_RESET_ID_HDMI_APB] = {
+ .bit = TH1520_HDMI_SW_PRST,
+ .reg = TH1520_HDMI_RST_CFG,
+ },
};
static inline struct th1520_reset_priv *
@@ -78,7 +116,6 @@ static const struct regmap_config th1520_reset_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
- .fast_io = true,
};
static int th1520_reset_probe(struct platform_device *pdev)
diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
index a2f9d85c7156..820a6ca5b1d7 100644
--- a/drivers/rpmsg/qcom_glink_native.c
+++ b/drivers/rpmsg/qcom_glink_native.c
@@ -1663,7 +1663,7 @@ static int qcom_glink_rx_open(struct qcom_glink *glink, unsigned int rcid,
}
rpdev->ept = &channel->ept;
- strscpy_pad(rpdev->id.name, name, RPMSG_NAME_SIZE);
+ strscpy(rpdev->id.name, name);
rpdev->src = RPMSG_ADDR_ANY;
rpdev->dst = RPMSG_ADDR_ANY;
rpdev->ops = &glink_device_ops;
diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c
index 87c944d4b4f3..42594f5ee438 100644
--- a/drivers/rpmsg/qcom_smd.c
+++ b/drivers/rpmsg/qcom_smd.c
@@ -1089,7 +1089,7 @@ static int qcom_smd_create_device(struct qcom_smd_channel *channel)
/* Assign public information to the rpmsg_device */
rpdev = &qsdev->rpdev;
- strscpy_pad(rpdev->id.name, channel->name, RPMSG_NAME_SIZE);
+ strscpy(rpdev->id.name, channel->name);
rpdev->src = RPMSG_ADDR_ANY;
rpdev->dst = RPMSG_ADDR_ANY;
@@ -1368,7 +1368,7 @@ static int qcom_smd_parse_edge(struct device *dev,
edge->mbox_client.knows_txdone = true;
edge->mbox_chan = mbox_request_channel(&edge->mbox_client, 0);
if (IS_ERR(edge->mbox_chan)) {
- if (PTR_ERR(edge->mbox_chan) != -ENODEV) {
+ if (PTR_ERR(edge->mbox_chan) != -ENOENT) {
ret = dev_err_probe(dev, PTR_ERR(edge->mbox_chan),
"failed to acquire IPC mailbox\n");
goto put_node;
diff --git a/drivers/rpmsg/rpmsg_char.c b/drivers/rpmsg/rpmsg_char.c
index eec7642d2686..96fcdd2d7093 100644
--- a/drivers/rpmsg/rpmsg_char.c
+++ b/drivers/rpmsg/rpmsg_char.c
@@ -522,8 +522,10 @@ static void rpmsg_chrdev_remove(struct rpmsg_device *rpdev)
static struct rpmsg_device_id rpmsg_chrdev_id_table[] = {
{ .name = "rpmsg-raw" },
+ { .name = "rpmsg_chrdev" },
{ },
};
+MODULE_DEVICE_TABLE(rpmsg, rpmsg_chrdev_id_table);
static struct rpmsg_driver rpmsg_chrdev_driver = {
.probe = rpmsg_chrdev_probe,
@@ -565,6 +567,5 @@ static void rpmsg_chrdev_exit(void)
}
module_exit(rpmsg_chrdev_exit);
-MODULE_ALIAS("rpmsg:rpmsg_chrdev");
MODULE_DESCRIPTION("RPMSG device interface");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c
index bece5e635ee9..5d661681a9b6 100644
--- a/drivers/rpmsg/rpmsg_core.c
+++ b/drivers/rpmsg/rpmsg_core.c
@@ -479,7 +479,8 @@ static int rpmsg_dev_probe(struct device *dev)
struct rpmsg_endpoint *ept = NULL;
int err;
- err = dev_pm_domain_attach(dev, PD_FLAG_ATTACH_POWER_ON);
+ err = dev_pm_domain_attach(dev, PD_FLAG_ATTACH_POWER_ON |
+ PD_FLAG_DETACH_POWER_OFF);
if (err)
goto out;
@@ -538,8 +539,6 @@ static void rpmsg_dev_remove(struct device *dev)
if (rpdrv->remove)
rpdrv->remove(rpdev);
- dev_pm_domain_detach(dev, true);
-
if (rpdev->ept)
rpmsg_destroy_ept(rpdev->ept);
}
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 64f6e9756aff..2933c41c77c8 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -406,6 +406,16 @@ config RTC_DRV_MAX77686
This driver can also be built as a module. If so, the module
will be called rtc-max77686.
+config RTC_DRV_SPACEMIT_P1
+ tristate "SpacemiT P1 RTC"
+ depends on ARCH_SPACEMIT || COMPILE_TEST
+ select MFD_SPACEMIT_P1
+ default ARCH_SPACEMIT
+ help
+ Enable support for the RTC function in the SpacemiT P1 PMIC.
+ This driver can also be built as a module, which will be called
+ "spacemit-p1-rtc".
+
config RTC_DRV_NCT3018Y
tristate "Nuvoton NCT3018Y"
depends on OF
@@ -416,6 +426,16 @@ config RTC_DRV_NCT3018Y
This driver can also be built as a module, if so, the module will be
called "rtc-nct3018y".
+config RTC_DRV_NCT6694
+ tristate "Nuvoton NCT6694 RTC support"
+ depends on MFD_NCT6694
+ help
+ If you say yes to this option, support will be included for Nuvoton
+ NCT6694, a USB device to RTC.
+
+ This driver can also be built as a module. If so, the module will
+ be called rtc-nct6694.
+
config RTC_DRV_RK808
tristate "Rockchip RK805/RK808/RK809/RK817/RK818 RTC"
depends on MFD_RK8XX
@@ -2034,20 +2054,6 @@ config RTC_DRV_RENESAS_RTCA3
This driver can also be built as a module, if so, the module
will be called "rtc-rtca3".
-comment "HID Sensor RTC drivers"
-
-config RTC_DRV_HID_SENSOR_TIME
- tristate "HID Sensor Time"
- depends on USB_HID
- depends on HID_SENSOR_HUB && IIO
- select HID_SENSOR_IIO_COMMON
- help
- Say yes here to build support for the HID Sensors of type Time.
- This drivers makes such sensors available as RTCs.
-
- If this driver is compiled as a module, it will be named
- rtc-hid-sensor-time.
-
config RTC_DRV_GOLDFISH
tristate "Goldfish Real Time Clock"
depends on HAS_IOMEM
@@ -2122,4 +2128,18 @@ config RTC_DRV_S32G
This RTC module can be used as a wakeup source.
Please note that it is not battery-powered.
+comment "HID Sensor RTC drivers"
+
+config RTC_DRV_HID_SENSOR_TIME
+ tristate "HID Sensor Time"
+ depends on USB_HID
+ depends on HID_SENSOR_HUB && IIO
+ select HID_SENSOR_IIO_COMMON
+ help
+ Say yes here to build support for the HID Sensors of type Time.
+ This drivers makes such sensors available as RTCs.
+
+ If this driver is compiled as a module, it will be named
+ rtc-hid-sensor-time.
+
endif # RTC_CLASS
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 789bddfea99d..8221bda6e6dc 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -119,6 +119,7 @@ obj-$(CONFIG_RTC_DRV_MXC) += rtc-mxc.o
obj-$(CONFIG_RTC_DRV_MXC_V2) += rtc-mxc_v2.o
obj-$(CONFIG_RTC_DRV_GAMECUBE) += rtc-gamecube.o
obj-$(CONFIG_RTC_DRV_NCT3018Y) += rtc-nct3018y.o
+obj-$(CONFIG_RTC_DRV_NCT6694) += rtc-nct6694.o
obj-$(CONFIG_RTC_DRV_NTXEC) += rtc-ntxec.o
obj-$(CONFIG_RTC_DRV_OMAP) += rtc-omap.o
obj-$(CONFIG_RTC_DRV_OPAL) += rtc-opal.o
@@ -171,6 +172,7 @@ obj-$(CONFIG_RTC_DRV_SD2405AL) += rtc-sd2405al.o
obj-$(CONFIG_RTC_DRV_SD3078) += rtc-sd3078.o
obj-$(CONFIG_RTC_DRV_SH) += rtc-sh.o
obj-$(CONFIG_RTC_DRV_SNVS) += rtc-snvs.o
+obj-$(CONFIG_RTC_DRV_SPACEMIT_P1) += rtc-spacemit-p1.o
obj-$(CONFIG_RTC_DRV_SPEAR) += rtc-spear.o
obj-$(CONFIG_RTC_DRV_STARFIRE) += rtc-starfire.o
obj-$(CONFIG_RTC_DRV_STK17TA8) += rtc-stk17ta8.o
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index dc741ba29fa3..b8b298efd9a9 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -443,6 +443,29 @@ static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
else
err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
+ /*
+ * Check for potential race described above. If the waiting for next
+ * second, and the second just ticked since the check above, either
+ *
+ * 1) It ticked after the alarm was set, and an alarm irq should be
+ * generated.
+ *
+ * 2) It ticked before the alarm was set, and alarm irq most likely will
+ * not be generated.
+ *
+ * While we cannot easily check for which of these two scenarios we
+ * are in, we can return -ETIME to signal that the timer has already
+ * expired, which is true in both cases.
+ */
+ if ((scheduled - now) <= 1) {
+ err = __rtc_read_time(rtc, &tm);
+ if (err)
+ return err;
+ now = rtc_tm_to_time64(&tm);
+ if (scheduled <= now)
+ return -ETIME;
+ }
+
trace_rtc_set_alarm(rtc_tm_to_time64(&alarm->time), err);
return err;
}
@@ -594,6 +617,10 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled)
rtc->uie_rtctimer.node.expires = ktime_add(now, onesec);
rtc->uie_rtctimer.period = ktime_set(1, 0);
err = rtc_timer_enqueue(rtc, &rtc->uie_rtctimer);
+ if (!err && rtc->ops && rtc->ops->alarm_irq_enable)
+ err = rtc->ops->alarm_irq_enable(rtc->dev.parent, 1);
+ if (err)
+ goto out;
} else {
rtc_timer_remove(rtc, &rtc->uie_rtctimer);
}
diff --git a/drivers/rtc/rtc-amlogic-a4.c b/drivers/rtc/rtc-amlogic-a4.c
index 09d78c2cc691..1928b29c1045 100644
--- a/drivers/rtc/rtc-amlogic-a4.c
+++ b/drivers/rtc/rtc-amlogic-a4.c
@@ -72,13 +72,6 @@ struct aml_rtc_data {
const struct aml_rtc_config *config;
};
-static const struct regmap_config aml_rtc_regmap_config = {
- .reg_bits = 32,
- .val_bits = 32,
- .reg_stride = 4,
- .max_register = RTC_REAL_TIME,
-};
-
static inline u32 gray_to_binary(u32 gray)
{
u32 bcd = gray;
@@ -328,6 +321,13 @@ static int aml_rtc_probe(struct platform_device *pdev)
void __iomem *base;
int ret = 0;
+ const struct regmap_config aml_rtc_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = RTC_REAL_TIME,
+ };
+
rtc = devm_kzalloc(dev, sizeof(*rtc), GFP_KERNEL);
if (!rtc)
return -ENOMEM;
diff --git a/drivers/rtc/rtc-cpcap.c b/drivers/rtc/rtc-cpcap.c
index c170345ac076..8b6b35716f53 100644
--- a/drivers/rtc/rtc-cpcap.c
+++ b/drivers/rtc/rtc-cpcap.c
@@ -268,6 +268,7 @@ static int cpcap_rtc_probe(struct platform_device *pdev)
return err;
rtc->alarm_irq = platform_get_irq(pdev, 0);
+ rtc->alarm_enabled = true;
err = devm_request_threaded_irq(dev, rtc->alarm_irq, NULL,
cpcap_rtc_alarm_irq,
IRQF_TRIGGER_NONE | IRQF_ONESHOT,
diff --git a/drivers/rtc/rtc-efi.c b/drivers/rtc/rtc-efi.c
index fa8bf82df948..b4f44999ef0f 100644
--- a/drivers/rtc/rtc-efi.c
+++ b/drivers/rtc/rtc-efi.c
@@ -112,48 +112,6 @@ convert_from_efi_time(efi_time_t *eft, struct rtc_time *wtime)
return true;
}
-static int efi_read_alarm(struct device *dev, struct rtc_wkalrm *wkalrm)
-{
- efi_time_t eft;
- efi_status_t status;
-
- /*
- * As of EFI v1.10, this call always returns an unsupported status
- */
- status = efi.get_wakeup_time((efi_bool_t *)&wkalrm->enabled,
- (efi_bool_t *)&wkalrm->pending, &eft);
-
- if (status != EFI_SUCCESS)
- return -EINVAL;
-
- if (!convert_from_efi_time(&eft, &wkalrm->time))
- return -EIO;
-
- return rtc_valid_tm(&wkalrm->time);
-}
-
-static int efi_set_alarm(struct device *dev, struct rtc_wkalrm *wkalrm)
-{
- efi_time_t eft;
- efi_status_t status;
-
- convert_to_efi_time(&wkalrm->time, &eft);
-
- /*
- * XXX Fixme:
- * As of EFI 0.92 with the firmware I have on my
- * machine this call does not seem to work quite
- * right
- *
- * As of v1.10, this call always returns an unsupported status
- */
- status = efi.set_wakeup_time((efi_bool_t)wkalrm->enabled, &eft);
-
- dev_warn(dev, "write status is %d\n", (int)status);
-
- return status == EFI_SUCCESS ? 0 : -EINVAL;
-}
-
static int efi_read_time(struct device *dev, struct rtc_time *tm)
{
efi_status_t status;
@@ -188,17 +146,13 @@ static int efi_set_time(struct device *dev, struct rtc_time *tm)
static int efi_procfs(struct device *dev, struct seq_file *seq)
{
- efi_time_t eft, alm;
+ efi_time_t eft;
efi_time_cap_t cap;
- efi_bool_t enabled, pending;
- struct rtc_device *rtc = dev_get_drvdata(dev);
memset(&eft, 0, sizeof(eft));
- memset(&alm, 0, sizeof(alm));
memset(&cap, 0, sizeof(cap));
efi.get_time(&eft, &cap);
- efi.get_wakeup_time(&enabled, &pending, &alm);
seq_printf(seq,
"Time\t\t: %u:%u:%u.%09u\n"
@@ -214,26 +168,6 @@ static int efi_procfs(struct device *dev, struct seq_file *seq)
/* XXX fixme: convert to string? */
seq_printf(seq, "Timezone\t: %u\n", eft.timezone);
- if (test_bit(RTC_FEATURE_ALARM, rtc->features)) {
- seq_printf(seq,
- "Alarm Time\t: %u:%u:%u.%09u\n"
- "Alarm Date\t: %u-%u-%u\n"
- "Alarm Daylight\t: %u\n"
- "Enabled\t\t: %s\n"
- "Pending\t\t: %s\n",
- alm.hour, alm.minute, alm.second, alm.nanosecond,
- alm.year, alm.month, alm.day,
- alm.daylight,
- enabled == 1 ? "yes" : "no",
- pending == 1 ? "yes" : "no");
-
- if (alm.timezone == EFI_UNSPECIFIED_TIMEZONE)
- seq_puts(seq, "Timezone\t: unspecified\n");
- else
- /* XXX fixme: convert to string? */
- seq_printf(seq, "Timezone\t: %u\n", alm.timezone);
- }
-
/*
* now prints the capabilities
*/
@@ -249,8 +183,6 @@ static int efi_procfs(struct device *dev, struct seq_file *seq)
static const struct rtc_class_ops efi_rtc_ops = {
.read_time = efi_read_time,
.set_time = efi_set_time,
- .read_alarm = efi_read_alarm,
- .set_alarm = efi_set_alarm,
.proc = efi_procfs,
};
@@ -271,11 +203,7 @@ static int __init efi_rtc_probe(struct platform_device *dev)
platform_set_drvdata(dev, rtc);
rtc->ops = &efi_rtc_ops;
- clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->features);
- if (efi_rt_services_supported(EFI_RT_SUPPORTED_WAKEUP_SERVICES))
- set_bit(RTC_FEATURE_ALARM_WAKEUP_ONLY, rtc->features);
- else
- clear_bit(RTC_FEATURE_ALARM, rtc->features);
+ clear_bit(RTC_FEATURE_ALARM, rtc->features);
device_init_wakeup(&dev->dev, true);
diff --git a/drivers/rtc/rtc-isl12022.c b/drivers/rtc/rtc-isl12022.c
index 9b44839a7402..5fc52dc64213 100644
--- a/drivers/rtc/rtc-isl12022.c
+++ b/drivers/rtc/rtc-isl12022.c
@@ -413,6 +413,7 @@ static int isl12022_setup_irq(struct device *dev, int irq)
if (ret)
return ret;
+ isl12022->irq_enabled = true;
ret = devm_request_threaded_irq(dev, irq, NULL,
isl12022_rtc_interrupt,
IRQF_SHARED | IRQF_ONESHOT,
diff --git a/drivers/rtc/rtc-mc13xxx.c b/drivers/rtc/rtc-mc13xxx.c
index e7b87130e624..2494d13fd767 100644
--- a/drivers/rtc/rtc-mc13xxx.c
+++ b/drivers/rtc/rtc-mc13xxx.c
@@ -137,10 +137,6 @@ static int mc13xxx_rtc_set_time(struct device *dev, struct rtc_time *tm)
}
if (!priv->valid) {
- ret = mc13xxx_irq_ack(priv->mc13xxx, MC13XXX_IRQ_RTCRST);
- if (unlikely(ret))
- goto out;
-
ret = mc13xxx_irq_unmask(priv->mc13xxx, MC13XXX_IRQ_RTCRST);
}
@@ -208,10 +204,6 @@ static int mc13xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
if (unlikely(ret))
goto out;
- ret = mc13xxx_irq_ack(priv->mc13xxx, MC13XXX_IRQ_TODA);
- if (unlikely(ret))
- goto out;
-
s1970 = rtc_tm_to_time64(&alarm->time);
dev_dbg(dev, "%s: %s %lld\n", __func__, alarm->enabled ? "on" : "off",
@@ -239,12 +231,9 @@ out:
static irqreturn_t mc13xxx_rtc_alarm_handler(int irq, void *dev)
{
struct mc13xxx_rtc *priv = dev;
- struct mc13xxx *mc13xxx = priv->mc13xxx;
rtc_update_irq(priv->rtc, 1, RTC_IRQF | RTC_AF);
- mc13xxx_irq_ack(mc13xxx, irq);
-
return IRQ_HANDLED;
}
@@ -293,8 +282,6 @@ static int __init mc13xxx_rtc_probe(struct platform_device *pdev)
mc13xxx_lock(mc13xxx);
- mc13xxx_irq_ack(mc13xxx, MC13XXX_IRQ_RTCRST);
-
ret = mc13xxx_irq_request(mc13xxx, MC13XXX_IRQ_RTCRST,
mc13xxx_rtc_reset_handler, DRIVER_NAME, priv);
if (ret)
diff --git a/drivers/rtc/rtc-meson.c b/drivers/rtc/rtc-meson.c
index 47e9ebf58ffc..21eceb9e2e13 100644
--- a/drivers/rtc/rtc-meson.c
+++ b/drivers/rtc/rtc-meson.c
@@ -72,7 +72,6 @@ static const struct regmap_config meson_rtc_peripheral_regmap_config = {
.val_bits = 32,
.reg_stride = 4,
.max_register = RTC_REG4,
- .fast_io = true,
};
/* RTC front-end serialiser controls */
diff --git a/drivers/rtc/rtc-nct6694.c b/drivers/rtc/rtc-nct6694.c
new file mode 100644
index 000000000000..35401a0d9cf5
--- /dev/null
+++ b/drivers/rtc/rtc-nct6694.c
@@ -0,0 +1,297 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Nuvoton NCT6694 RTC driver based on USB interface.
+ *
+ * Copyright (C) 2025 Nuvoton Technology Corp.
+ */
+
+#include <linux/bcd.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/mfd/nct6694.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/slab.h>
+
+/*
+ * USB command module type for NCT6694 RTC controller.
+ * This defines the module type used for communication with the NCT6694
+ * RTC controller over the USB interface.
+ */
+#define NCT6694_RTC_MOD 0x08
+
+/* Command 00h - RTC Time */
+#define NCT6694_RTC_TIME 0x0000
+#define NCT6694_RTC_TIME_SEL 0x00
+
+/* Command 01h - RTC Alarm */
+#define NCT6694_RTC_ALARM 0x01
+#define NCT6694_RTC_ALARM_SEL 0x00
+
+/* Command 02h - RTC Status */
+#define NCT6694_RTC_STATUS 0x02
+#define NCT6694_RTC_STATUS_SEL 0x00
+
+#define NCT6694_RTC_IRQ_INT_EN BIT(0) /* Transmit a USB INT-in when RTC alarm */
+#define NCT6694_RTC_IRQ_GPO_EN BIT(5) /* Trigger a GPO Low Pulse when RTC alarm */
+
+#define NCT6694_RTC_IRQ_EN (NCT6694_RTC_IRQ_INT_EN | NCT6694_RTC_IRQ_GPO_EN)
+#define NCT6694_RTC_IRQ_STS BIT(0) /* Write 1 clear IRQ status */
+
+struct __packed nct6694_rtc_time {
+ u8 sec;
+ u8 min;
+ u8 hour;
+ u8 week;
+ u8 day;
+ u8 month;
+ u8 year;
+};
+
+struct __packed nct6694_rtc_alarm {
+ u8 sec;
+ u8 min;
+ u8 hour;
+ u8 alarm_en;
+ u8 alarm_pend;
+};
+
+struct __packed nct6694_rtc_status {
+ u8 irq_en;
+ u8 irq_pend;
+};
+
+union __packed nct6694_rtc_msg {
+ struct nct6694_rtc_time time;
+ struct nct6694_rtc_alarm alarm;
+ struct nct6694_rtc_status sts;
+};
+
+struct nct6694_rtc_data {
+ struct nct6694 *nct6694;
+ struct rtc_device *rtc;
+ union nct6694_rtc_msg *msg;
+ int irq;
+};
+
+static int nct6694_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct nct6694_rtc_data *data = dev_get_drvdata(dev);
+ struct nct6694_rtc_time *time = &data->msg->time;
+ static const struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_RTC_MOD,
+ .cmd = NCT6694_RTC_TIME,
+ .sel = NCT6694_RTC_TIME_SEL,
+ .len = cpu_to_le16(sizeof(*time))
+ };
+ int ret;
+
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd, time);
+ if (ret)
+ return ret;
+
+ tm->tm_sec = bcd2bin(time->sec); /* tm_sec expect 0 ~ 59 */
+ tm->tm_min = bcd2bin(time->min); /* tm_min expect 0 ~ 59 */
+ tm->tm_hour = bcd2bin(time->hour); /* tm_hour expect 0 ~ 23 */
+ tm->tm_wday = bcd2bin(time->week) - 1; /* tm_wday expect 0 ~ 6 */
+ tm->tm_mday = bcd2bin(time->day); /* tm_mday expect 1 ~ 31 */
+ tm->tm_mon = bcd2bin(time->month) - 1; /* tm_month expect 0 ~ 11 */
+ tm->tm_year = bcd2bin(time->year) + 100; /* tm_year expect since 1900 */
+
+ return ret;
+}
+
+static int nct6694_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct nct6694_rtc_data *data = dev_get_drvdata(dev);
+ struct nct6694_rtc_time *time = &data->msg->time;
+ static const struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_RTC_MOD,
+ .cmd = NCT6694_RTC_TIME,
+ .sel = NCT6694_RTC_TIME_SEL,
+ .len = cpu_to_le16(sizeof(*time))
+ };
+
+ time->sec = bin2bcd(tm->tm_sec);
+ time->min = bin2bcd(tm->tm_min);
+ time->hour = bin2bcd(tm->tm_hour);
+ time->week = bin2bcd(tm->tm_wday + 1);
+ time->day = bin2bcd(tm->tm_mday);
+ time->month = bin2bcd(tm->tm_mon + 1);
+ time->year = bin2bcd(tm->tm_year - 100);
+
+ return nct6694_write_msg(data->nct6694, &cmd_hd, time);
+}
+
+static int nct6694_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct nct6694_rtc_data *data = dev_get_drvdata(dev);
+ struct nct6694_rtc_alarm *alarm = &data->msg->alarm;
+ static const struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_RTC_MOD,
+ .cmd = NCT6694_RTC_ALARM,
+ .sel = NCT6694_RTC_ALARM_SEL,
+ .len = cpu_to_le16(sizeof(*alarm))
+ };
+ int ret;
+
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd, alarm);
+ if (ret)
+ return ret;
+
+ alrm->time.tm_sec = bcd2bin(alarm->sec);
+ alrm->time.tm_min = bcd2bin(alarm->min);
+ alrm->time.tm_hour = bcd2bin(alarm->hour);
+ alrm->enabled = alarm->alarm_en;
+ alrm->pending = alarm->alarm_pend;
+
+ return ret;
+}
+
+static int nct6694_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct nct6694_rtc_data *data = dev_get_drvdata(dev);
+ struct nct6694_rtc_alarm *alarm = &data->msg->alarm;
+ static const struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_RTC_MOD,
+ .cmd = NCT6694_RTC_ALARM,
+ .sel = NCT6694_RTC_ALARM_SEL,
+ .len = cpu_to_le16(sizeof(*alarm))
+ };
+
+ alarm->sec = bin2bcd(alrm->time.tm_sec);
+ alarm->min = bin2bcd(alrm->time.tm_min);
+ alarm->hour = bin2bcd(alrm->time.tm_hour);
+ alarm->alarm_en = alrm->enabled ? NCT6694_RTC_IRQ_EN : 0;
+ alarm->alarm_pend = 0;
+
+ return nct6694_write_msg(data->nct6694, &cmd_hd, alarm);
+}
+
+static int nct6694_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct nct6694_rtc_data *data = dev_get_drvdata(dev);
+ struct nct6694_rtc_status *sts = &data->msg->sts;
+ static const struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_RTC_MOD,
+ .cmd = NCT6694_RTC_STATUS,
+ .sel = NCT6694_RTC_STATUS_SEL,
+ .len = cpu_to_le16(sizeof(*sts))
+ };
+
+ if (enabled)
+ sts->irq_en |= NCT6694_RTC_IRQ_EN;
+ else
+ sts->irq_en &= ~NCT6694_RTC_IRQ_EN;
+
+ sts->irq_pend = 0;
+
+ return nct6694_write_msg(data->nct6694, &cmd_hd, sts);
+}
+
+static const struct rtc_class_ops nct6694_rtc_ops = {
+ .read_time = nct6694_rtc_read_time,
+ .set_time = nct6694_rtc_set_time,
+ .read_alarm = nct6694_rtc_read_alarm,
+ .set_alarm = nct6694_rtc_set_alarm,
+ .alarm_irq_enable = nct6694_rtc_alarm_irq_enable,
+};
+
+static irqreturn_t nct6694_irq(int irq, void *dev_id)
+{
+ struct nct6694_rtc_data *data = dev_id;
+ struct nct6694_rtc_status *sts = &data->msg->sts;
+ static const struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_RTC_MOD,
+ .cmd = NCT6694_RTC_STATUS,
+ .sel = NCT6694_RTC_STATUS_SEL,
+ .len = cpu_to_le16(sizeof(*sts))
+ };
+ int ret;
+
+ rtc_lock(data->rtc);
+
+ sts->irq_en = NCT6694_RTC_IRQ_EN;
+ sts->irq_pend = NCT6694_RTC_IRQ_STS;
+ ret = nct6694_write_msg(data->nct6694, &cmd_hd, sts);
+ if (ret) {
+ rtc_unlock(data->rtc);
+ return IRQ_NONE;
+ }
+
+ rtc_update_irq(data->rtc, 1, RTC_IRQF | RTC_AF);
+
+ rtc_unlock(data->rtc);
+
+ return IRQ_HANDLED;
+}
+
+static void nct6694_irq_dispose_mapping(void *d)
+{
+ struct nct6694_rtc_data *data = d;
+
+ irq_dispose_mapping(data->irq);
+}
+
+static int nct6694_rtc_probe(struct platform_device *pdev)
+{
+ struct nct6694_rtc_data *data;
+ struct nct6694 *nct6694 = dev_get_drvdata(pdev->dev.parent);
+ int ret;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->msg = devm_kzalloc(&pdev->dev, sizeof(union nct6694_rtc_msg),
+ GFP_KERNEL);
+ if (!data->msg)
+ return -ENOMEM;
+
+ data->irq = irq_create_mapping(nct6694->domain, NCT6694_IRQ_RTC);
+ if (!data->irq)
+ return -EINVAL;
+
+ ret = devm_add_action_or_reset(&pdev->dev, nct6694_irq_dispose_mapping,
+ data);
+ if (ret)
+ return ret;
+
+ ret = devm_device_init_wakeup(&pdev->dev);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Failed to init wakeup\n");
+
+ data->rtc = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(data->rtc))
+ return PTR_ERR(data->rtc);
+
+ data->nct6694 = nct6694;
+ data->rtc->ops = &nct6694_rtc_ops;
+ data->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ data->rtc->range_max = RTC_TIMESTAMP_END_2099;
+
+ platform_set_drvdata(pdev, data);
+
+ ret = devm_request_threaded_irq(&pdev->dev, data->irq, NULL,
+ nct6694_irq, IRQF_ONESHOT,
+ "rtc-nct6694", data);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "Failed to request irq\n");
+
+ return devm_rtc_register_device(data->rtc);
+}
+
+static struct platform_driver nct6694_rtc_driver = {
+ .driver = {
+ .name = "nct6694-rtc",
+ },
+ .probe = nct6694_rtc_probe,
+};
+
+module_platform_driver(nct6694_rtc_driver);
+
+MODULE_DESCRIPTION("USB-RTC driver for NCT6694");
+MODULE_AUTHOR("Ming Yu <tmyu0@nuvoton.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:nct6694-rtc");
diff --git a/drivers/rtc/rtc-optee.c b/drivers/rtc/rtc-optee.c
index 9f8b5d4a8f6b..184c6d142801 100644
--- a/drivers/rtc/rtc-optee.c
+++ b/drivers/rtc/rtc-optee.c
@@ -5,19 +5,104 @@
#include <linux/device.h>
#include <linux/kernel.h>
+#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/rtc.h>
#include <linux/tee_drv.h>
-#define RTC_INFO_VERSION 0x1
+#define RTC_INFO_VERSION 0x1
-#define TA_CMD_RTC_GET_INFO 0x0
-#define TA_CMD_RTC_GET_TIME 0x1
-#define TA_CMD_RTC_SET_TIME 0x2
-#define TA_CMD_RTC_GET_OFFSET 0x3
-#define TA_CMD_RTC_SET_OFFSET 0x4
+#define TA_RTC_FEATURE_CORRECTION BIT(0)
+#define TA_RTC_FEATURE_ALARM BIT(1)
+#define TA_RTC_FEATURE_WAKEUP_ALARM BIT(2)
-#define TA_RTC_FEATURE_CORRECTION BIT(0)
+enum rtc_optee_pta_cmd {
+ /* PTA_CMD_RTC_GET_INFO - Get RTC information
+ *
+ * [out] memref[0] RTC buffer memory reference containing a struct pta_rtc_info
+ */
+ PTA_CMD_RTC_GET_INF = 0x0,
+
+ /*
+ * PTA_CMD_RTC_GET_TIME - Get time from RTC
+ *
+ * [out] memref[0] RTC buffer memory reference containing a struct pta_rtc_time
+ */
+ PTA_CMD_RTC_GET_TIME = 0x1,
+
+ /*
+ * PTA_CMD_RTC_SET_TIME - Set time from RTC
+ *
+ * [in] memref[0] RTC buffer memory reference containing a struct pta_rtc_time to be
+ * used as RTC time
+ */
+ PTA_CMD_RTC_SET_TIME = 0x2,
+
+ /*
+ * PTA_CMD_RTC_GET_OFFSET - Get RTC offset
+ *
+ * [out] value[0].a RTC offset (signed 32bit value)
+ */
+ PTA_CMD_RTC_GET_OFFSET = 0x3,
+
+ /*
+ * PTA_CMD_RTC_SET_OFFSET - Set RTC offset
+ *
+ * [in] value[0].a RTC offset to be set (signed 32bit value)
+ */
+ PTA_CMD_RTC_SET_OFFSET = 0x4,
+
+ /*
+ * PTA_CMD_RTC_READ_ALARM - Read RTC alarm
+ *
+ * [out] memref[0] RTC buffer memory reference containing a struct pta_rtc_alarm
+ */
+ PTA_CMD_RTC_READ_ALARM = 0x5,
+
+ /*
+ * PTA_CMD_RTC_SET_ALARM - Set RTC alarm
+ *
+ * [in] memref[0] RTC buffer memory reference containing a struct pta_rtc_alarm to be
+ * used as RTC alarm
+ */
+ PTA_CMD_RTC_SET_ALARM = 0x6,
+
+ /*
+ * PTA_CMD_RTC_ENABLE_ALARM - Enable Alarm
+ *
+ * [in] value[0].a RTC IRQ flag (uint32_t), 0 to disable the alarm, 1 to enable
+ */
+ PTA_CMD_RTC_ENABLE_ALARM = 0x7,
+
+ /*
+ * PTA_CMD_RTC_WAIT_ALARM - Get alarm event
+ *
+ * [out] value[0].a RTC wait alarm return status (uint32_t):
+ * - 0: No alarm event
+ * - 1: Alarm event occurred
+ * - 2: Alarm event canceled
+ */
+ PTA_CMD_RTC_WAIT_ALARM = 0x8,
+
+ /*
+ * PTA_CMD_RTC_CANCEL_WAIT - Cancel wait for alarm event
+ */
+ PTA_CMD_RTC_CANCEL_WAIT = 0x9,
+
+ /*
+ * PTA_CMD_RTC_SET_WAKE_ALARM_STATUS - Set RTC wake alarm status flag
+ *
+ * [in] value[0].a RTC IRQ wake alarm flag (uint32_t), 0 to disable the wake up
+ * capability, 1 to enable.
+ */
+ PTA_CMD_RTC_SET_WAKE_ALARM_STATUS = 0xA,
+};
+
+enum rtc_wait_alarm_status {
+ WAIT_ALARM_RESET = 0x0,
+ WAIT_ALARM_ALARM_OCCURRED = 0x1,
+ WAIT_ALARM_CANCELED = 0x2,
+};
struct optee_rtc_time {
u32 tm_sec;
@@ -29,6 +114,12 @@ struct optee_rtc_time {
u32 tm_wday;
};
+struct optee_rtc_alarm {
+ u8 enabled;
+ u8 pending;
+ struct optee_rtc_time time;
+};
+
struct optee_rtc_info {
u64 version;
u64 features;
@@ -41,15 +132,21 @@ struct optee_rtc_info {
* @dev: OP-TEE based RTC device.
* @ctx: OP-TEE context handler.
* @session_id: RTC TA session identifier.
+ * @session2_id: RTC wait alarm session identifier.
* @shm: Memory pool shared with RTC device.
* @features: Bitfield of RTC features
+ * @alarm_task: RTC wait alamr task.
+ * @rtc: RTC device.
*/
struct optee_rtc {
struct device *dev;
struct tee_context *ctx;
u32 session_id;
+ u32 session2_id;
struct tee_shm *shm;
u64 features;
+ struct task_struct *alarm_task;
+ struct rtc_device *rtc;
};
static int optee_rtc_readtime(struct device *dev, struct rtc_time *tm)
@@ -60,7 +157,7 @@ static int optee_rtc_readtime(struct device *dev, struct rtc_time *tm)
struct tee_param param[4] = {0};
int ret;
- inv_arg.func = TA_CMD_RTC_GET_TIME;
+ inv_arg.func = PTA_CMD_RTC_GET_TIME;
inv_arg.session = priv->session_id;
inv_arg.num_params = 4;
@@ -97,19 +194,10 @@ static int optee_rtc_settime(struct device *dev, struct rtc_time *tm)
struct optee_rtc *priv = dev_get_drvdata(dev);
struct tee_ioctl_invoke_arg inv_arg = {0};
struct tee_param param[4] = {0};
- struct optee_rtc_time optee_tm;
- void *rtc_data;
+ struct optee_rtc_time *optee_tm;
int ret;
- optee_tm.tm_sec = tm->tm_sec;
- optee_tm.tm_min = tm->tm_min;
- optee_tm.tm_hour = tm->tm_hour;
- optee_tm.tm_mday = tm->tm_mday;
- optee_tm.tm_mon = tm->tm_mon;
- optee_tm.tm_year = tm->tm_year + 1900;
- optee_tm.tm_wday = tm->tm_wday;
-
- inv_arg.func = TA_CMD_RTC_SET_TIME;
+ inv_arg.func = PTA_CMD_RTC_SET_TIME;
inv_arg.session = priv->session_id;
inv_arg.num_params = 4;
@@ -117,11 +205,17 @@ static int optee_rtc_settime(struct device *dev, struct rtc_time *tm)
param[0].u.memref.shm = priv->shm;
param[0].u.memref.size = sizeof(struct optee_rtc_time);
- rtc_data = tee_shm_get_va(priv->shm, 0);
- if (IS_ERR(rtc_data))
- return PTR_ERR(rtc_data);
+ optee_tm = tee_shm_get_va(priv->shm, 0);
+ if (IS_ERR(optee_tm))
+ return PTR_ERR(optee_tm);
- memcpy(rtc_data, &optee_tm, sizeof(struct optee_rtc_time));
+ optee_tm->tm_min = tm->tm_min;
+ optee_tm->tm_sec = tm->tm_sec;
+ optee_tm->tm_hour = tm->tm_hour;
+ optee_tm->tm_mday = tm->tm_mday;
+ optee_tm->tm_mon = tm->tm_mon;
+ optee_tm->tm_year = tm->tm_year + 1900;
+ optee_tm->tm_wday = tm->tm_wday;
ret = tee_client_invoke_func(priv->ctx, &inv_arg, param);
if (ret < 0 || inv_arg.ret != 0)
@@ -140,7 +234,7 @@ static int optee_rtc_readoffset(struct device *dev, long *offset)
if (!(priv->features & TA_RTC_FEATURE_CORRECTION))
return -EOPNOTSUPP;
- inv_arg.func = TA_CMD_RTC_GET_OFFSET;
+ inv_arg.func = PTA_CMD_RTC_GET_OFFSET;
inv_arg.session = priv->session_id;
inv_arg.num_params = 4;
@@ -165,7 +259,7 @@ static int optee_rtc_setoffset(struct device *dev, long offset)
if (!(priv->features & TA_RTC_FEATURE_CORRECTION))
return -EOPNOTSUPP;
- inv_arg.func = TA_CMD_RTC_SET_OFFSET;
+ inv_arg.func = PTA_CMD_RTC_SET_OFFSET;
inv_arg.session = priv->session_id;
inv_arg.num_params = 4;
@@ -179,13 +273,228 @@ static int optee_rtc_setoffset(struct device *dev, long offset)
return 0;
}
+static int optee_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ struct optee_rtc *priv = dev_get_drvdata(dev);
+ struct tee_ioctl_invoke_arg inv_arg = {0};
+ struct optee_rtc_alarm *optee_alarm;
+ struct tee_param param[1] = {0};
+ int ret;
+
+ if (!(priv->features & TA_RTC_FEATURE_ALARM))
+ return -EOPNOTSUPP;
+
+ inv_arg.func = PTA_CMD_RTC_READ_ALARM;
+ inv_arg.session = priv->session_id;
+ inv_arg.num_params = 1;
+
+ /* Fill invoke cmd params */
+ param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT;
+ param[0].u.memref.shm = priv->shm;
+ param[0].u.memref.size = sizeof(struct optee_rtc_alarm);
+
+ ret = tee_client_invoke_func(priv->ctx, &inv_arg, param);
+ if (ret < 0 || inv_arg.ret != 0)
+ return ret ? ret : -EPROTO;
+
+ optee_alarm = tee_shm_get_va(priv->shm, 0);
+ if (IS_ERR(optee_alarm))
+ return PTR_ERR(optee_alarm);
+
+ if (param[0].u.memref.size != sizeof(*optee_alarm))
+ return -EPROTO;
+
+ alarm->enabled = optee_alarm->enabled;
+ alarm->pending = optee_alarm->pending;
+ alarm->time.tm_sec = optee_alarm->time.tm_sec;
+ alarm->time.tm_min = optee_alarm->time.tm_min;
+ alarm->time.tm_hour = optee_alarm->time.tm_hour;
+ alarm->time.tm_mday = optee_alarm->time.tm_mday;
+ alarm->time.tm_mon = optee_alarm->time.tm_mon;
+ alarm->time.tm_year = optee_alarm->time.tm_year - 1900;
+ alarm->time.tm_wday = optee_alarm->time.tm_wday;
+ alarm->time.tm_yday = rtc_year_days(alarm->time.tm_mday,
+ alarm->time.tm_mon,
+ alarm->time.tm_year);
+
+ return 0;
+}
+
+static int optee_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ struct optee_rtc *priv = dev_get_drvdata(dev);
+ struct tee_ioctl_invoke_arg inv_arg = {0};
+ struct optee_rtc_alarm *optee_alarm;
+ struct tee_param param[1] = {0};
+ int ret;
+
+ if (!(priv->features & TA_RTC_FEATURE_ALARM))
+ return -EOPNOTSUPP;
+
+ inv_arg.func = PTA_CMD_RTC_SET_ALARM;
+ inv_arg.session = priv->session_id;
+ inv_arg.num_params = 1;
+
+ param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
+ param[0].u.memref.shm = priv->shm;
+ param[0].u.memref.size = sizeof(struct optee_rtc_alarm);
+
+ optee_alarm = tee_shm_get_va(priv->shm, 0);
+ if (IS_ERR(optee_alarm))
+ return PTR_ERR(optee_alarm);
+
+ optee_alarm->enabled = alarm->enabled;
+ optee_alarm->pending = alarm->pending;
+ optee_alarm->time.tm_sec = alarm->time.tm_sec;
+ optee_alarm->time.tm_min = alarm->time.tm_min;
+ optee_alarm->time.tm_hour = alarm->time.tm_hour;
+ optee_alarm->time.tm_mday = alarm->time.tm_mday;
+ optee_alarm->time.tm_mon = alarm->time.tm_mon;
+ optee_alarm->time.tm_year = alarm->time.tm_year + 1900;
+ optee_alarm->time.tm_wday = alarm->time.tm_wday;
+
+ ret = tee_client_invoke_func(priv->ctx, &inv_arg, param);
+ if (ret < 0 || inv_arg.ret != 0)
+ return ret ? ret : -EPROTO;
+
+ return 0;
+}
+
+static int optee_rtc_enable_alarm(struct device *dev, unsigned int enabled)
+{
+ struct optee_rtc *priv = dev_get_drvdata(dev);
+ struct tee_ioctl_invoke_arg inv_arg = {0};
+ struct tee_param param[1] = {0};
+ int ret;
+
+ if (!(priv->features & TA_RTC_FEATURE_ALARM))
+ return -EOPNOTSUPP;
+
+ inv_arg.func = PTA_CMD_RTC_ENABLE_ALARM;
+ inv_arg.session = priv->session_id;
+ inv_arg.num_params = 1;
+
+ param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
+ param[0].u.value.a = (bool)enabled;
+
+ ret = tee_client_invoke_func(priv->ctx, &inv_arg, param);
+ if (ret < 0 || inv_arg.ret != 0)
+ return ret ? ret : -EPROTO;
+
+ return 0;
+}
+
static const struct rtc_class_ops optee_rtc_ops = {
- .read_time = optee_rtc_readtime,
- .set_time = optee_rtc_settime,
- .set_offset = optee_rtc_setoffset,
- .read_offset = optee_rtc_readoffset,
+ .read_time = optee_rtc_readtime,
+ .set_time = optee_rtc_settime,
+ .set_offset = optee_rtc_setoffset,
+ .read_offset = optee_rtc_readoffset,
+ .read_alarm = optee_rtc_read_alarm,
+ .set_alarm = optee_rtc_set_alarm,
+ .alarm_irq_enable = optee_rtc_enable_alarm,
};
+static int optee_rtc_wait_alarm(struct device *dev, int *return_status)
+{
+ struct optee_rtc *priv = dev_get_drvdata(dev);
+ struct tee_ioctl_invoke_arg inv_arg = {0};
+ struct tee_param param[1] = {0};
+ int ret;
+
+ if (!(priv->features & TA_RTC_FEATURE_ALARM))
+ return -EOPNOTSUPP;
+
+ inv_arg.func = PTA_CMD_RTC_WAIT_ALARM;
+ inv_arg.session = priv->session2_id;
+ inv_arg.num_params = 1;
+
+ param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT;
+
+ ret = tee_client_invoke_func(priv->ctx, &inv_arg, param);
+ if (ret < 0 || inv_arg.ret != 0)
+ return ret ? ret : -EPROTO;
+
+ *return_status = param[0].u.value.a;
+
+ return 0;
+}
+
+static int optee_rtc_cancel_wait_alarm(struct device *dev)
+{
+ struct optee_rtc *priv = dev_get_drvdata(dev);
+ struct tee_ioctl_invoke_arg inv_arg = {0};
+ struct tee_param param[1] = {0};
+ int ret;
+
+ if (!(priv->features & TA_RTC_FEATURE_ALARM))
+ return -EOPNOTSUPP;
+
+ inv_arg.func = PTA_CMD_RTC_CANCEL_WAIT;
+ inv_arg.session = priv->session_id;
+ inv_arg.num_params = 0;
+
+ ret = tee_client_invoke_func(priv->ctx, &inv_arg, param);
+ if (ret < 0 || inv_arg.ret != 0)
+ return ret ? ret : -EPROTO;
+
+ return 0;
+}
+
+static int optee_rtc_set_alarm_wake_status(struct device *dev, bool status)
+{
+ struct optee_rtc *priv = dev_get_drvdata(dev);
+ struct tee_ioctl_invoke_arg inv_arg = {0};
+ struct tee_param param[1] = {0};
+ int ret;
+
+ if (!(priv->features & TA_RTC_FEATURE_ALARM))
+ return -EOPNOTSUPP;
+
+ inv_arg.func = PTA_CMD_RTC_SET_WAKE_ALARM_STATUS;
+ inv_arg.session = priv->session_id;
+ inv_arg.num_params = 1;
+
+ param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
+ param[0].u.value.a = status;
+
+ ret = tee_client_invoke_func(priv->ctx, &inv_arg, param);
+
+ if (ret < 0 || inv_arg.ret != 0)
+ return ret ? ret : -EPROTO;
+
+ return 0;
+}
+
+static int optee_rtc_handle_alarm_event(void *data)
+{
+ struct optee_rtc *priv = (struct optee_rtc *)data;
+ int wait_alarm_return_status = 0;
+ int ret;
+
+ while (!kthread_should_stop()) {
+ ret = optee_rtc_wait_alarm(priv->dev, &wait_alarm_return_status);
+ if (ret) {
+ dev_err(priv->dev, "Failed to wait for alarm: %d\n", ret);
+ return ret;
+ }
+ switch (wait_alarm_return_status) {
+ case WAIT_ALARM_ALARM_OCCURRED:
+ dev_dbg(priv->dev, "Alarm occurred\n");
+ rtc_update_irq(priv->rtc, 1, RTC_IRQF | RTC_AF);
+ break;
+ case WAIT_ALARM_CANCELED:
+ dev_dbg(priv->dev, "Alarm canceled\n");
+ break;
+ default:
+ dev_warn(priv->dev, "Unknown return status: %d\n",
+ wait_alarm_return_status);
+ break;
+ }
+ }
+
+ return 0;
+}
+
static int optee_rtc_read_info(struct device *dev, struct rtc_device *rtc,
u64 *features)
{
@@ -196,7 +505,7 @@ static int optee_rtc_read_info(struct device *dev, struct rtc_device *rtc,
struct optee_rtc_time *tm;
int ret;
- inv_arg.func = TA_CMD_RTC_GET_INFO;
+ inv_arg.func = PTA_CMD_RTC_GET_INF;
inv_arg.session = priv->session_id;
inv_arg.num_params = 4;
@@ -241,14 +550,13 @@ static int optee_ctx_match(struct tee_ioctl_version_data *ver, const void *data)
static int optee_rtc_probe(struct device *dev)
{
struct tee_client_device *rtc_device = to_tee_client_device(dev);
- struct tee_ioctl_open_session_arg sess_arg;
+ struct tee_ioctl_open_session_arg sess2_arg = {0};
+ struct tee_ioctl_open_session_arg sess_arg = {0};
struct optee_rtc *priv;
struct rtc_device *rtc;
struct tee_shm *shm;
int ret, err;
- memset(&sess_arg, 0, sizeof(sess_arg));
-
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -257,12 +565,14 @@ static int optee_rtc_probe(struct device *dev)
if (IS_ERR(rtc))
return PTR_ERR(rtc);
+ priv->rtc = rtc;
+
/* Open context with TEE driver */
priv->ctx = tee_client_open_context(NULL, optee_ctx_match, NULL, NULL);
if (IS_ERR(priv->ctx))
return -ENODEV;
- /* Open session with rtc Trusted App */
+ /* Open first session with rtc Pseudo Trusted App */
export_uuid(sess_arg.uuid, &rtc_device->id.uuid);
sess_arg.clnt_login = TEE_IOCTL_LOGIN_REE_KERNEL;
@@ -274,6 +584,11 @@ static int optee_rtc_probe(struct device *dev)
}
priv->session_id = sess_arg.session;
+ /*
+ * Shared memory is used for passing an instance of either struct optee_rtc_info,
+ * struct optee_rtc_time or struct optee_rtc_alarm to OP-TEE service.
+ * The former is by definition large enough to cover both parameter cases.
+ */
shm = tee_shm_alloc_kernel_buf(priv->ctx, sizeof(struct optee_rtc_info));
if (IS_ERR(shm)) {
dev_err(priv->dev, "tee_shm_alloc_kernel_buf failed\n");
@@ -293,19 +608,70 @@ static int optee_rtc_probe(struct device *dev)
goto out_shm;
}
+ /* Handle feature's related setup before registering to rtc framework */
+ if (priv->features & TA_RTC_FEATURE_ALARM) {
+ priv->alarm_task = kthread_create(optee_rtc_handle_alarm_event,
+ priv, "rtc_alarm_evt");
+ if (IS_ERR(priv->alarm_task)) {
+ dev_err(dev, "Failed to create alarm thread\n");
+ err = PTR_ERR(priv->alarm_task);
+ goto out_shm;
+ }
+
+ /*
+ * In case of supported alarm feature on optee side, we create a kthread
+ * that will, in a new optee session, call a PTA interface "rtc_wait_alarm".
+ * This call return in case of alarm and in case of canceled alarm.
+ * The new optee session is therefore only needed in this case as we cannot
+ * use the same session for parallel calls to optee PTA.
+ * Hence one session is reserved to wait for alarms and the other to make
+ * standard calls to RTC PTA.
+ */
+
+ /* Open second session with rtc Trusted App */
+ export_uuid(sess2_arg.uuid, &rtc_device->id.uuid);
+ sess2_arg.clnt_login = TEE_IOCTL_LOGIN_REE_KERNEL;
+
+ ret = tee_client_open_session(priv->ctx, &sess2_arg, NULL);
+ if (ret < 0 || sess2_arg.ret != 0) {
+ dev_err(dev, "tee_client_open_session failed, err: %x\n", sess2_arg.ret);
+ err = -EINVAL;
+ goto out_thrd;
+ }
+ priv->session2_id = sess2_arg.session;
+
+ if (priv->features & TA_RTC_FEATURE_WAKEUP_ALARM)
+ device_init_wakeup(dev, true);
+ }
+
err = devm_rtc_register_device(rtc);
if (err)
- goto out_shm;
+ goto out_wk;
/*
- * We must clear this bit after registering because rtc_register_device
- * will set it if it sees that .set_offset is provided.
+ * We must clear those bits after registering because registering a rtc_device
+ * will set them if it sees that .set_offset and .set_alarm are provided.
*/
if (!(priv->features & TA_RTC_FEATURE_CORRECTION))
clear_bit(RTC_FEATURE_CORRECTION, rtc->features);
+ if (!(priv->features & TA_RTC_FEATURE_ALARM))
+ clear_bit(RTC_FEATURE_ALARM, rtc->features);
- return 0;
+ /* Start the thread after the rtc is setup */
+ if (priv->alarm_task) {
+ wake_up_process(priv->alarm_task);
+ dev_dbg(dev, "Wait alarm thread successfully started\n");
+ }
+ return 0;
+out_wk:
+ if (priv->features & TA_RTC_FEATURE_ALARM) {
+ device_init_wakeup(dev, false);
+ tee_client_close_session(priv->ctx, priv->session2_id);
+ }
+out_thrd:
+ if (priv->features & TA_RTC_FEATURE_ALARM)
+ kthread_stop(priv->alarm_task);
out_shm:
tee_shm_free(priv->shm);
out_sess:
@@ -320,12 +686,34 @@ static int optee_rtc_remove(struct device *dev)
{
struct optee_rtc *priv = dev_get_drvdata(dev);
+ if (priv->features & TA_RTC_FEATURE_ALARM) {
+ optee_rtc_cancel_wait_alarm(dev);
+ kthread_stop(priv->alarm_task);
+ device_init_wakeup(dev, false);
+ tee_client_close_session(priv->ctx, priv->session2_id);
+ }
+
+ tee_shm_free(priv->shm);
tee_client_close_session(priv->ctx, priv->session_id);
tee_client_close_context(priv->ctx);
return 0;
}
+static int optee_rtc_suspend(struct device *dev)
+{
+ int res = optee_rtc_set_alarm_wake_status(dev, device_may_wakeup(dev));
+
+ if (res) {
+ dev_err(dev, "Unable to transmit wakeup information to optee rtc\n");
+ return res;
+ }
+
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(optee_rtc_pm_ops, optee_rtc_suspend, NULL);
+
static const struct tee_client_device_id optee_rtc_id_table[] = {
{UUID_INIT(0xf389f8c8, 0x845f, 0x496c,
0x8b, 0xbe, 0xd6, 0x4b, 0xd2, 0x4c, 0x92, 0xfd)},
@@ -341,6 +729,7 @@ static struct tee_client_driver optee_rtc_driver = {
.bus = &tee_bus_type,
.probe = optee_rtc_probe,
.remove = optee_rtc_remove,
+ .pm = pm_sleep_ptr(&optee_rtc_pm_ops),
},
};
diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c
index 2e1ac0c42e93..bb4fe81d3d62 100644
--- a/drivers/rtc/rtc-pcf2127.c
+++ b/drivers/rtc/rtc-pcf2127.c
@@ -42,6 +42,7 @@
#define PCF2127_BIT_CTRL2_AF BIT(4)
#define PCF2127_BIT_CTRL2_TSF2 BIT(5)
#define PCF2127_BIT_CTRL2_WDTF BIT(6)
+#define PCF2127_BIT_CTRL2_MSF BIT(7)
/* Control register 3 */
#define PCF2127_REG_CTRL3 0x02
#define PCF2127_BIT_CTRL3_BLIE BIT(0)
@@ -96,7 +97,8 @@
#define PCF2127_CTRL2_IRQ_MASK ( \
PCF2127_BIT_CTRL2_AF | \
PCF2127_BIT_CTRL2_WDTF | \
- PCF2127_BIT_CTRL2_TSF2)
+ PCF2127_BIT_CTRL2_TSF2 | \
+ PCF2127_BIT_CTRL2_MSF)
#define PCF2127_MAX_TS_SUPPORTED 4
@@ -606,6 +608,21 @@ static int pcf2127_watchdog_init(struct device *dev, struct pcf2127 *pcf2127)
set_bit(WDOG_HW_RUNNING, &pcf2127->wdd.status);
}
+ /*
+ * When using interrupt pin (INT A) as watchdog output, only allow
+ * watchdog interrupt (PCF2131_BIT_INT_WD_CD) and disable (mask) all
+ * other interrupts.
+ */
+ if (pcf2127->cfg->type == PCF2131) {
+ ret = regmap_write(pcf2127->regmap,
+ PCF2131_REG_INT_A_MASK1,
+ PCF2131_BIT_INT_BLIE |
+ PCF2131_BIT_INT_BIE |
+ PCF2131_BIT_INT_AIE |
+ PCF2131_BIT_INT_SI |
+ PCF2131_BIT_INT_MI);
+ }
+
return devm_watchdog_register_device(dev, &pcf2127->wdd);
}
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 79b2a16f15ad..291c0ccb0acd 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -331,7 +331,7 @@ static const struct rtc_class_ops s3c_rtcops = {
.alarm_irq_enable = s3c_rtc_setaie,
};
-static void s3c24xx_rtc_enable(struct s3c_rtc *info)
+static void s3c6410_rtc_enable(struct s3c_rtc *info)
{
unsigned int con, tmp;
@@ -361,19 +361,6 @@ static void s3c24xx_rtc_enable(struct s3c_rtc *info)
}
}
-static void s3c24xx_rtc_disable(struct s3c_rtc *info)
-{
- unsigned int con;
-
- con = readw(info->base + S3C2410_RTCCON);
- con &= ~S3C2410_RTCCON_RTCEN;
- writew(con, info->base + S3C2410_RTCCON);
-
- con = readb(info->base + S3C2410_TICNT);
- con &= ~S3C2410_TICNT_ENABLE;
- writeb(con, info->base + S3C2410_TICNT);
-}
-
static void s3c6410_rtc_disable(struct s3c_rtc *info)
{
unsigned int con;
@@ -538,53 +525,21 @@ static int s3c_rtc_resume(struct device *dev)
#endif
static SIMPLE_DEV_PM_OPS(s3c_rtc_pm_ops, s3c_rtc_suspend, s3c_rtc_resume);
-static void s3c24xx_rtc_irq(struct s3c_rtc *info, int mask)
-{
- rtc_update_irq(info->rtc, 1, RTC_AF | RTC_IRQF);
-}
-
static void s3c6410_rtc_irq(struct s3c_rtc *info, int mask)
{
rtc_update_irq(info->rtc, 1, RTC_AF | RTC_IRQF);
writeb(mask, info->base + S3C2410_INTP);
}
-static const struct s3c_rtc_data s3c2410_rtc_data = {
- .irq_handler = s3c24xx_rtc_irq,
- .enable = s3c24xx_rtc_enable,
- .disable = s3c24xx_rtc_disable,
-};
-
-static const struct s3c_rtc_data s3c2416_rtc_data = {
- .irq_handler = s3c24xx_rtc_irq,
- .enable = s3c24xx_rtc_enable,
- .disable = s3c24xx_rtc_disable,
-};
-
-static const struct s3c_rtc_data s3c2443_rtc_data = {
- .irq_handler = s3c24xx_rtc_irq,
- .enable = s3c24xx_rtc_enable,
- .disable = s3c24xx_rtc_disable,
-};
-
static const struct s3c_rtc_data s3c6410_rtc_data = {
.needs_src_clk = true,
.irq_handler = s3c6410_rtc_irq,
- .enable = s3c24xx_rtc_enable,
+ .enable = s3c6410_rtc_enable,
.disable = s3c6410_rtc_disable,
};
static const __maybe_unused struct of_device_id s3c_rtc_dt_match[] = {
{
- .compatible = "samsung,s3c2410-rtc",
- .data = &s3c2410_rtc_data,
- }, {
- .compatible = "samsung,s3c2416-rtc",
- .data = &s3c2416_rtc_data,
- }, {
- .compatible = "samsung,s3c2443-rtc",
- .data = &s3c2443_rtc_data,
- }, {
.compatible = "samsung,s3c6410-rtc",
.data = &s3c6410_rtc_data,
}, {
diff --git a/drivers/rtc/rtc-s3c.h b/drivers/rtc/rtc-s3c.h
index 3552914aa611..11d7a1255ce4 100644
--- a/drivers/rtc/rtc-s3c.h
+++ b/drivers/rtc/rtc-s3c.h
@@ -21,25 +21,6 @@
#define S3C2443_RTCCON_TICSEL (1 << 4)
#define S3C64XX_RTCCON_TICEN (1 << 8)
-#define S3C2410_TICNT S3C2410_RTCREG(0x44)
-#define S3C2410_TICNT_ENABLE (1 << 7)
-
-/* S3C2443: tick count is 15 bit wide
- * TICNT[6:0] contains upper 7 bits
- * TICNT1[7:0] contains lower 8 bits
- */
-#define S3C2443_TICNT_PART(x) ((x & 0x7f00) >> 8)
-#define S3C2443_TICNT1 S3C2410_RTCREG(0x4C)
-#define S3C2443_TICNT1_PART(x) (x & 0xff)
-
-/* S3C2416: tick count is 32 bit wide
- * TICNT[6:0] contains bits [14:8]
- * TICNT1[7:0] contains lower 8 bits
- * TICNT2[16:0] contains upper 17 bits
- */
-#define S3C2416_TICNT2 S3C2410_RTCREG(0x48)
-#define S3C2416_TICNT2_PART(x) ((x & 0xffff8000) >> 15)
-
#define S3C2410_RTCALM S3C2410_RTCREG(0x50)
#define S3C2410_RTCALM_ALMEN (1 << 6)
#define S3C2410_RTCALM_YEAREN (1 << 5)
diff --git a/drivers/rtc/rtc-sd2405al.c b/drivers/rtc/rtc-sd2405al.c
index 00c3033e8079..708ea5d964de 100644
--- a/drivers/rtc/rtc-sd2405al.c
+++ b/drivers/rtc/rtc-sd2405al.c
@@ -5,7 +5,9 @@
* Datasheet:
* https://image.dfrobot.com/image/data/TOY0021/SD2405AL%20datasheet%20(Angelo%20v0.1).pdf
*
- * Copyright (C) 2024 Tóth János <gomba007@gmail.com>
+ * I2C slave address: 0x32
+ *
+ * Copyright (C) 2024-2025 Tóth János <gomba007@gmail.com>
*/
#include <linux/bcd.h>
diff --git a/drivers/rtc/rtc-spacemit-p1.c b/drivers/rtc/rtc-spacemit-p1.c
new file mode 100644
index 000000000000..43ab62494bb4
--- /dev/null
+++ b/drivers/rtc/rtc-spacemit-p1.c
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for the RTC found in the SpacemiT P1 PMIC
+ *
+ * Copyright (C) 2025 by RISCstar Solutions Corporation. All rights reserved.
+ */
+
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/rtc.h>
+
+#define MOD_NAME "spacemit-p1-rtc"
+
+/*
+ * Six consecutive 1-byte registers hold the seconds, minutes, hours,
+ * day-of-month, month, and year (respectively).
+ *
+ * The range of values in these registers is:
+ * seconds 0-59
+ * minutes 0-59
+ * hours 0-59
+ * day 0-30 (struct tm is 1-31)
+ * month 0-11
+ * year years since 2000 (struct tm is since 1900)
+ *
+ * Note that the day and month must be converted after reading and
+ * before writing.
+ */
+#define RTC_TIME 0x0d /* Offset of the seconds register */
+
+#define RTC_CTRL 0x1d
+#define RTC_EN BIT(2)
+
+/* Number of attempts to read a consistent time stamp before giving up */
+#define RTC_READ_TRIES 20 /* At least 1 */
+
+struct p1_rtc {
+ struct regmap *regmap;
+ struct rtc_device *rtc;
+};
+
+/*
+ * The P1 hardware documentation states that the register values are
+ * latched to ensure a consistent time snapshot within the registers,
+ * but these are in fact unstable due to a bug in the hardware design.
+ * So we loop until we get two identical readings.
+ */
+static int p1_rtc_read_time(struct device *dev, struct rtc_time *t)
+{
+ struct p1_rtc *p1 = dev_get_drvdata(dev);
+ struct regmap *regmap = p1->regmap;
+ u32 count = RTC_READ_TRIES;
+ u8 seconds;
+ u8 time[6];
+ int ret;
+
+ if (!regmap_test_bits(regmap, RTC_CTRL, RTC_EN))
+ return -EINVAL; /* RTC is disabled */
+
+ ret = regmap_bulk_read(regmap, RTC_TIME, time, sizeof(time));
+ if (ret)
+ return ret;
+
+ do {
+ seconds = time[0];
+ ret = regmap_bulk_read(regmap, RTC_TIME, time, sizeof(time));
+ if (ret)
+ return ret;
+ } while (time[0] != seconds && --count);
+
+ if (!count)
+ return -EIO; /* Unable to get a consistent result */
+
+ t->tm_sec = time[0] & GENMASK(5, 0);
+ t->tm_min = time[1] & GENMASK(5, 0);
+ t->tm_hour = time[2] & GENMASK(4, 0);
+ t->tm_mday = (time[3] & GENMASK(4, 0)) + 1;
+ t->tm_mon = time[4] & GENMASK(3, 0);
+ t->tm_year = (time[5] & GENMASK(5, 0)) + 100;
+
+ return 0;
+}
+
+/*
+ * The P1 hardware documentation states that values in the registers are
+ * latched so when written they represent a consistent time snapshot.
+ * Nevertheless, this is not guaranteed by the implementation, so we must
+ * disable the RTC while updating it.
+ */
+static int p1_rtc_set_time(struct device *dev, struct rtc_time *t)
+{
+ struct p1_rtc *p1 = dev_get_drvdata(dev);
+ struct regmap *regmap = p1->regmap;
+ u8 time[6];
+ int ret;
+
+ time[0] = t->tm_sec;
+ time[1] = t->tm_min;
+ time[2] = t->tm_hour;
+ time[3] = t->tm_mday - 1;
+ time[4] = t->tm_mon;
+ time[5] = t->tm_year - 100;
+
+ /* Disable the RTC to update; re-enable again when done */
+ ret = regmap_clear_bits(regmap, RTC_CTRL, RTC_EN);
+ if (ret)
+ return ret;
+
+ /* If something goes wrong, leave the RTC disabled */
+ ret = regmap_bulk_write(regmap, RTC_TIME, time, sizeof(time));
+ if (ret)
+ return ret;
+
+ return regmap_set_bits(regmap, RTC_CTRL, RTC_EN);
+}
+
+static const struct rtc_class_ops p1_rtc_class_ops = {
+ .read_time = p1_rtc_read_time,
+ .set_time = p1_rtc_set_time,
+};
+
+static int p1_rtc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rtc_device *rtc;
+ struct p1_rtc *p1;
+
+ p1 = devm_kzalloc(dev, sizeof(*p1), GFP_KERNEL);
+ if (!p1)
+ return -ENOMEM;
+ dev_set_drvdata(dev, p1);
+
+ p1->regmap = dev_get_regmap(dev->parent, NULL);
+ if (!p1->regmap)
+ return dev_err_probe(dev, -ENODEV, "failed to get regmap\n");
+
+ rtc = devm_rtc_allocate_device(dev);
+ if (IS_ERR(rtc))
+ return dev_err_probe(dev, PTR_ERR(rtc),
+ "error allocating device\n");
+ p1->rtc = rtc;
+
+ rtc->ops = &p1_rtc_class_ops;
+ rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ rtc->range_max = RTC_TIMESTAMP_END_2063;
+
+ clear_bit(RTC_FEATURE_ALARM, rtc->features);
+ clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->features);
+
+ return devm_rtc_register_device(rtc);
+}
+
+static struct platform_driver p1_rtc_driver = {
+ .probe = p1_rtc_probe,
+ .driver = {
+ .name = MOD_NAME,
+ },
+};
+
+module_platform_driver(p1_rtc_driver);
+
+MODULE_DESCRIPTION("SpacemiT P1 RTC driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" MOD_NAME);
diff --git a/drivers/rtc/rtc-tps6586x.c b/drivers/rtc/rtc-tps6586x.c
index 54c8429b16bf..76ecf7b798f0 100644
--- a/drivers/rtc/rtc-tps6586x.c
+++ b/drivers/rtc/rtc-tps6586x.c
@@ -258,6 +258,7 @@ static int tps6586x_rtc_probe(struct platform_device *pdev)
irq_set_status_flags(rtc->irq, IRQ_NOAUTOEN);
+ rtc->irq_en = true;
ret = devm_request_threaded_irq(&pdev->dev, rtc->irq, NULL,
tps6586x_rtc_irq,
IRQF_ONESHOT,
diff --git a/drivers/rtc/rtc-x1205.c b/drivers/rtc/rtc-x1205.c
index 4bcd7ca32f27..b8a0fccef14e 100644
--- a/drivers/rtc/rtc-x1205.c
+++ b/drivers/rtc/rtc-x1205.c
@@ -669,7 +669,7 @@ static const struct i2c_device_id x1205_id[] = {
MODULE_DEVICE_TABLE(i2c, x1205_id);
static const struct of_device_id x1205_dt_ids[] = {
- { .compatible = "xircom,x1205", },
+ { .compatible = "xicor,x1205", },
{},
};
MODULE_DEVICE_TABLE(of, x1205_dt_ids);
diff --git a/drivers/rtc/rtc-zynqmp.c b/drivers/rtc/rtc-zynqmp.c
index f39102b66eac..3baa2b481d9f 100644
--- a/drivers/rtc/rtc-zynqmp.c
+++ b/drivers/rtc/rtc-zynqmp.c
@@ -277,6 +277,10 @@ static irqreturn_t xlnx_rtc_interrupt(int irq, void *id)
static int xlnx_rtc_probe(struct platform_device *pdev)
{
struct xlnx_rtc_dev *xrtcdev;
+ bool is_alarm_set = false;
+ u32 pending_alrm_irq;
+ u32 current_time;
+ u32 alarm_time;
int ret;
xrtcdev = devm_kzalloc(&pdev->dev, sizeof(*xrtcdev), GFP_KERNEL);
@@ -296,6 +300,17 @@ static int xlnx_rtc_probe(struct platform_device *pdev)
if (IS_ERR(xrtcdev->reg_base))
return PTR_ERR(xrtcdev->reg_base);
+ /* Clear any pending alarm interrupts from previous kernel/boot */
+ pending_alrm_irq = readl(xrtcdev->reg_base + RTC_INT_STS) & RTC_INT_ALRM;
+ if (pending_alrm_irq)
+ writel(pending_alrm_irq, xrtcdev->reg_base + RTC_INT_STS);
+
+ /* Check if a valid alarm is already set from previous kernel/boot */
+ alarm_time = readl(xrtcdev->reg_base + RTC_ALRM);
+ current_time = readl(xrtcdev->reg_base + RTC_CUR_TM);
+ if (alarm_time > current_time && alarm_time != 0)
+ is_alarm_set = true;
+
xrtcdev->alarm_irq = platform_get_irq_byname(pdev, "alarm");
if (xrtcdev->alarm_irq < 0)
return xrtcdev->alarm_irq;
@@ -337,6 +352,10 @@ static int xlnx_rtc_probe(struct platform_device *pdev)
xlnx_init_rtc(xrtcdev);
+ /* Re-enable alarm interrupt if a valid alarm was found */
+ if (is_alarm_set)
+ writel(RTC_INT_ALRM, xrtcdev->reg_base + RTC_INT_EN);
+
device_init_wakeup(&pdev->dev, true);
return devm_rtc_register_device(xrtcdev->rtc);
diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig
index 8c1c908d2c6e..877a9bc7f04b 100644
--- a/drivers/s390/block/Kconfig
+++ b/drivers/s390/block/Kconfig
@@ -5,19 +5,11 @@ comment "S/390 block device drivers"
config DCSSBLK
def_tristate m
prompt "DCSSBLK support"
- depends on S390 && BLOCK && (DAX || DAX=n)
+ depends on S390 && BLOCK && ZONE_DEVICE
+ select FS_DAX
help
Support for dcss block device
-config DCSSBLK_DAX
- def_bool y
- depends on DCSSBLK
- # requires S390 ZONE_DEVICE support
- depends on BROKEN
- prompt "DCSSBLK DAX support"
- help
- Enable DAX operation for the dcss block device
-
config DASD
def_tristate y
prompt "Support for DASD devices"
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 506a947d00a5..7765e40f7cea 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -334,6 +334,11 @@ static int dasd_state_basic_to_ready(struct dasd_device *device)
lim.max_dev_sectors = device->discipline->max_sectors(block);
lim.max_hw_sectors = lim.max_dev_sectors;
lim.logical_block_size = block->bp_block;
+ /*
+ * Adjust dma_alignment to match block_size - 1
+ * to ensure proper buffer alignment checks in the block layer.
+ */
+ lim.dma_alignment = lim.logical_block_size - 1;
if (device->discipline->has_discard) {
unsigned int max_bytes;
@@ -3114,12 +3119,14 @@ static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx,
PTR_ERR(cqr) == -ENOMEM ||
PTR_ERR(cqr) == -EAGAIN) {
rc = BLK_STS_RESOURCE;
- goto out;
+ } else if (PTR_ERR(cqr) == -EINVAL) {
+ rc = BLK_STS_INVAL;
+ } else {
+ DBF_DEV_EVENT(DBF_ERR, basedev,
+ "CCW creation failed (rc=%ld) on request %p",
+ PTR_ERR(cqr), req);
+ rc = BLK_STS_IOERR;
}
- DBF_DEV_EVENT(DBF_ERR, basedev,
- "CCW creation failed (rc=%ld) on request %p",
- PTR_ERR(cqr), req);
- rc = BLK_STS_IOERR;
goto out;
}
/*
@@ -3317,11 +3324,11 @@ static void dasd_release(struct gendisk *disk)
/*
* Return disk geometry.
*/
-static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+static int dasd_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
struct dasd_device *base;
- base = dasd_device_from_gendisk(bdev->bd_disk);
+ base = dasd_device_from_gendisk(disk);
if (!base)
return -ENODEV;
@@ -3331,7 +3338,8 @@ static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
return -EINVAL;
}
base->discipline->fill_geometry(base->block, geo);
- geo->start = get_start_sect(bdev) >> base->block->s2b_shift;
+ // geo->start is left unchanged by the above
+ geo->start >>= base->block->s2b_shift;
dasd_put_device(base);
return 0;
}
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 94fa5edecadd..86fef4b15015 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -79,6 +79,8 @@ struct dcssblk_dev_info {
int num_of_segments;
struct list_head seg_list;
struct dax_device *dax_dev;
+ struct dev_pagemap pgmap;
+ void *pgmap_addr;
};
struct segment_info {
@@ -415,6 +417,8 @@ removeseg:
dax_remove_host(dev_info->gd);
kill_dax(dev_info->dax_dev);
put_dax(dev_info->dax_dev);
+ if (dev_info->pgmap_addr)
+ devm_memunmap_pages(&dev_info->dev, &dev_info->pgmap);
del_gendisk(dev_info->gd);
put_disk(dev_info->gd);
@@ -537,9 +541,6 @@ static int dcssblk_setup_dax(struct dcssblk_dev_info *dev_info)
{
struct dax_device *dax_dev;
- if (!IS_ENABLED(CONFIG_DCSSBLK_DAX))
- return 0;
-
dax_dev = alloc_dax(dev_info, &dcssblk_dax_ops);
if (IS_ERR(dax_dev))
return PTR_ERR(dax_dev);
@@ -562,6 +563,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
struct dcssblk_dev_info *dev_info;
struct segment_info *seg_info, *temp;
char *local_buf;
+ void *addr;
unsigned long seg_byte_size;
dev_info = NULL;
@@ -687,9 +689,26 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
if (rc)
goto put_dev;
- rc = dcssblk_setup_dax(dev_info);
- if (rc)
- goto out_dax;
+ if (!IS_ALIGNED(dev_info->start, SUBSECTION_SIZE) ||
+ !IS_ALIGNED(dev_info->end + 1, SUBSECTION_SIZE)) {
+ pr_info("DCSS %s is not aligned to %lu bytes, DAX support disabled\n",
+ local_buf, SUBSECTION_SIZE);
+ } else {
+ dev_info->pgmap.type = MEMORY_DEVICE_FS_DAX;
+ dev_info->pgmap.range.start = dev_info->start;
+ dev_info->pgmap.range.end = dev_info->end;
+ dev_info->pgmap.nr_range = 1;
+ addr = devm_memremap_pages(&dev_info->dev, &dev_info->pgmap);
+ if (IS_ERR(addr)) {
+ rc = PTR_ERR(addr);
+ goto put_dev;
+ }
+ dev_info->pgmap_addr = addr;
+ rc = dcssblk_setup_dax(dev_info);
+ if (rc)
+ goto out_dax;
+ pr_info("DAX support enabled for DCSS %s\n", local_buf);
+ }
get_device(&dev_info->dev);
rc = device_add_disk(&dev_info->dev, dev_info->gd, NULL);
@@ -716,6 +735,8 @@ out_dax_host:
out_dax:
kill_dax(dev_info->dax_dev);
put_dax(dev_info->dax_dev);
+ if (dev_info->pgmap_addr)
+ devm_memunmap_pages(&dev_info->dev, &dev_info->pgmap);
put_dev:
list_del(&dev_info->lh);
put_disk(dev_info->gd);
@@ -801,6 +822,8 @@ dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const ch
dax_remove_host(dev_info->gd);
kill_dax(dev_info->dax_dev);
put_dax(dev_info->dax_dev);
+ if (dev_info->pgmap_addr)
+ devm_memunmap_pages(&dev_info->dev, &dev_info->pgmap);
del_gendisk(dev_info->gd);
put_disk(dev_info->gd);
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index 81d6744e1861..dcbd51152ee3 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -21,6 +21,7 @@ obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \
sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o sclp_ctl.o \
sclp_early.o sclp_early_core.o sclp_sd.o
+obj-$(CONFIG_MEMORY_HOTPLUG) += sclp_mem.o
obj-$(CONFIG_TN3270) += raw3270.o con3270.o
obj-$(CONFIG_TN3270_FS) += fs3270.o
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index b78b86e8f281..a367f95c7c53 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -970,8 +970,6 @@ static void tty3270_resize(struct raw3270_view *view,
char **old_rcl_lines, **new_rcl_lines;
char *old_prompt, *new_prompt;
char *old_input, *new_input;
- struct tty_struct *tty;
- struct winsize ws;
size_t prompt_sz;
int new_allocated, old_allocated = tp->allocated_lines;
@@ -1023,14 +1021,14 @@ static void tty3270_resize(struct raw3270_view *view,
kfree(old_prompt);
tty3270_free_recall(old_rcl_lines);
tty3270_set_timer(tp, 1);
- /* Informat tty layer about new size */
- tty = tty_port_tty_get(&tp->port);
- if (!tty)
- return;
- ws.ws_row = tty3270_tty_rows(tp);
- ws.ws_col = tp->view.cols;
- tty_do_resize(tty, &ws);
- tty_kref_put(tty);
+ /* Inform the tty layer about new size */
+ scoped_guard(tty_port_tty, &tp->port) {
+ struct winsize ws = {
+ .ws_row = tty3270_tty_rows(tp),
+ .ws_col = tp->view.cols,
+ };
+ tty_do_resize(scoped_tty(), &ws);
+ }
return;
out_screen:
tty3270_free_screen(screen, new_rows);
diff --git a/drivers/s390/char/hmcdrv_dev.c b/drivers/s390/char/hmcdrv_dev.c
index e069dd685899..b26fcf6849f2 100644
--- a/drivers/s390/char/hmcdrv_dev.c
+++ b/drivers/s390/char/hmcdrv_dev.c
@@ -244,24 +244,17 @@ static ssize_t hmcdrv_dev_write(struct file *fp, const char __user *ubuf,
size_t len, loff_t *pos)
{
ssize_t retlen;
+ void *pdata;
pr_debug("writing file '/dev/%pD' at pos. %lld with length %zd\n",
fp, (long long) *pos, len);
if (!fp->private_data) { /* first expect a cmd write */
- fp->private_data = kmalloc(len + 1, GFP_KERNEL);
-
- if (!fp->private_data)
- return -ENOMEM;
-
- if (!copy_from_user(fp->private_data, ubuf, len)) {
- ((char *)fp->private_data)[len] = '\0';
- return len;
- }
-
- kfree(fp->private_data);
- fp->private_data = NULL;
- return -EFAULT;
+ pdata = memdup_user_nul(ubuf, len);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+ fp->private_data = pdata;
+ return len;
}
retlen = hmcdrv_dev_transfer((char *) fp->private_data,
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index f2e42c1d51aa..98e334724a62 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -77,6 +77,13 @@ unsigned long sclp_console_full;
/* The currently active SCLP command word. */
static sclp_cmdw_t active_cmd;
+static inline struct sccb_header *sclpint_to_sccb(u32 sccb_int)
+{
+ if (sccb_int)
+ return __va(sccb_int);
+ return NULL;
+}
+
static inline void sclp_trace(int prio, char *id, u32 a, u64 b, bool err)
{
struct sclp_trace_entry e;
@@ -620,7 +627,7 @@ __sclp_find_req(u32 sccb)
static bool ok_response(u32 sccb_int, sclp_cmdw_t cmd)
{
- struct sccb_header *sccb = (struct sccb_header *)__va(sccb_int);
+ struct sccb_header *sccb = sclpint_to_sccb(sccb_int);
struct evbuf_header *evbuf;
u16 response;
@@ -659,7 +666,7 @@ static void sclp_interrupt_handler(struct ext_code ext_code,
/* INT: Interrupt received (a=intparm, b=cmd) */
sclp_trace_sccb(0, "INT", param32, active_cmd, active_cmd,
- (struct sccb_header *)__va(finished_sccb),
+ sclpint_to_sccb(finished_sccb),
!ok_response(finished_sccb, active_cmd));
if (finished_sccb) {
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index 16469678548f..3480198eac02 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -8,31 +8,46 @@
#define KMSG_COMPONENT "sclp_cmd"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
-#include <linux/cpufeature.h>
#include <linux/completion.h>
-#include <linux/init.h>
-#include <linux/errno.h>
#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/init.h>
#include <linux/slab.h>
#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/mmzone.h>
-#include <linux/memory.h>
-#include <linux/memory_hotplug.h>
-#include <linux/module.h>
-#include <asm/ctlreg.h>
#include <asm/chpid.h>
-#include <asm/setup.h>
-#include <asm/page.h>
+#include <asm/ctlreg.h>
#include <asm/sclp.h>
-#include <asm/numa.h>
-#include <asm/facility.h>
-#include <asm/page-states.h>
#include "sclp.h"
-#define SCLP_CMDW_ASSIGN_STORAGE 0x000d0001
-#define SCLP_CMDW_UNASSIGN_STORAGE 0x000c0001
+/* CPU configuration related functions */
+#define SCLP_CMDW_CONFIGURE_CPU 0x00110001
+#define SCLP_CMDW_DECONFIGURE_CPU 0x00100001
+/* Channel path configuration related functions */
+#define SCLP_CMDW_CONFIGURE_CHPATH 0x000f0001
+#define SCLP_CMDW_DECONFIGURE_CHPATH 0x000e0001
+#define SCLP_CMDW_READ_CHPATH_INFORMATION 0x00030001
+
+struct cpu_configure_sccb {
+ struct sccb_header header;
+} __packed __aligned(8);
+
+struct chp_cfg_sccb {
+ struct sccb_header header;
+ u8 ccm;
+ u8 reserved[6];
+ u8 cssid;
+} __packed;
+
+struct chp_info_sccb {
+ struct sccb_header header;
+ u8 recognized[SCLP_CHP_INFO_MASK_SIZE];
+ u8 standby[SCLP_CHP_INFO_MASK_SIZE];
+ u8 configured[SCLP_CHP_INFO_MASK_SIZE];
+ u8 ccm;
+ u8 reserved[6];
+ u8 cssid;
+} __packed;
static void sclp_sync_callback(struct sclp_req *req, void *data)
{
@@ -64,13 +79,11 @@ int sclp_sync_request_timeout(sclp_cmdw_t cmd, void *sccb, int timeout)
request->callback_data = &completion;
init_completion(&completion);
- /* Perform sclp request. */
rc = sclp_add_request(request);
if (rc)
goto out;
wait_for_completion(&completion);
- /* Check response. */
if (request->status != SCLP_REQ_DONE) {
pr_warn("sync request failed (cmd=0x%08x, status=0x%02x)\n",
cmd, request->status);
@@ -81,22 +94,15 @@ out:
return rc;
}
-/*
- * CPU configuration related functions.
- */
-
-#define SCLP_CMDW_CONFIGURE_CPU 0x00110001
-#define SCLP_CMDW_DECONFIGURE_CPU 0x00100001
-
int _sclp_get_core_info(struct sclp_core_info *info)
{
- int rc;
- int length = test_facility(140) ? EXT_SCCB_READ_CPU : PAGE_SIZE;
struct read_cpu_info_sccb *sccb;
+ int rc, length;
if (!SCLP_HAS_CPU_INFO)
return -EOPNOTSUPP;
+ length = test_facility(140) ? EXT_SCCB_READ_CPU : PAGE_SIZE;
sccb = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA | __GFP_ZERO, get_order(length));
if (!sccb)
return -ENOMEM;
@@ -114,14 +120,10 @@ int _sclp_get_core_info(struct sclp_core_info *info)
}
sclp_fill_core_info(info, sccb);
out:
- free_pages((unsigned long) sccb, get_order(length));
+ free_pages((unsigned long)sccb, get_order(length));
return rc;
}
-struct cpu_configure_sccb {
- struct sccb_header header;
-} __attribute__((packed, aligned(8)));
-
static int do_core_configure(sclp_cmdw_t cmd)
{
struct cpu_configure_sccb *sccb;
@@ -130,8 +132,8 @@ static int do_core_configure(sclp_cmdw_t cmd)
if (!SCLP_HAS_CPU_RECONFIG)
return -EOPNOTSUPP;
/*
- * This is not going to cross a page boundary since we force
- * kmalloc to have a minimum alignment of 8 bytes on s390.
+ * Use kmalloc to have a minimum alignment of 8 bytes and ensure sccb
+ * is not going to cross a page boundary.
*/
sccb = kzalloc(sizeof(*sccb), GFP_KERNEL | GFP_DMA);
if (!sccb)
@@ -165,394 +167,6 @@ int sclp_core_deconfigure(u8 core)
return do_core_configure(SCLP_CMDW_DECONFIGURE_CPU | core << 8);
}
-#ifdef CONFIG_MEMORY_HOTPLUG
-
-static DEFINE_MUTEX(sclp_mem_mutex);
-static LIST_HEAD(sclp_mem_list);
-static u8 sclp_max_storage_id;
-static DECLARE_BITMAP(sclp_storage_ids, 256);
-
-struct memory_increment {
- struct list_head list;
- u16 rn;
- int standby;
-};
-
-struct assign_storage_sccb {
- struct sccb_header header;
- u16 rn;
-} __packed;
-
-int arch_get_memory_phys_device(unsigned long start_pfn)
-{
- if (!sclp.rzm)
- return 0;
- return PFN_PHYS(start_pfn) >> ilog2(sclp.rzm);
-}
-
-static unsigned long long rn2addr(u16 rn)
-{
- return (unsigned long long) (rn - 1) * sclp.rzm;
-}
-
-static int do_assign_storage(sclp_cmdw_t cmd, u16 rn)
-{
- struct assign_storage_sccb *sccb;
- int rc;
-
- sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
- if (!sccb)
- return -ENOMEM;
- sccb->header.length = PAGE_SIZE;
- sccb->rn = rn;
- rc = sclp_sync_request_timeout(cmd, sccb, SCLP_QUEUE_INTERVAL);
- if (rc)
- goto out;
- switch (sccb->header.response_code) {
- case 0x0020:
- case 0x0120:
- break;
- default:
- pr_warn("assign storage failed (cmd=0x%08x, response=0x%04x, rn=0x%04x)\n",
- cmd, sccb->header.response_code, rn);
- rc = -EIO;
- break;
- }
-out:
- free_page((unsigned long) sccb);
- return rc;
-}
-
-static int sclp_assign_storage(u16 rn)
-{
- unsigned long long start;
- int rc;
-
- rc = do_assign_storage(SCLP_CMDW_ASSIGN_STORAGE, rn);
- if (rc)
- return rc;
- start = rn2addr(rn);
- storage_key_init_range(start, start + sclp.rzm);
- return 0;
-}
-
-static int sclp_unassign_storage(u16 rn)
-{
- return do_assign_storage(SCLP_CMDW_UNASSIGN_STORAGE, rn);
-}
-
-struct attach_storage_sccb {
- struct sccb_header header;
- u16 :16;
- u16 assigned;
- u32 :32;
- u32 entries[];
-} __packed;
-
-static int sclp_attach_storage(u8 id)
-{
- struct attach_storage_sccb *sccb;
- int rc;
- int i;
-
- sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
- if (!sccb)
- return -ENOMEM;
- sccb->header.length = PAGE_SIZE;
- sccb->header.function_code = 0x40;
- rc = sclp_sync_request_timeout(0x00080001 | id << 8, sccb,
- SCLP_QUEUE_INTERVAL);
- if (rc)
- goto out;
- switch (sccb->header.response_code) {
- case 0x0020:
- set_bit(id, sclp_storage_ids);
- for (i = 0; i < sccb->assigned; i++) {
- if (sccb->entries[i])
- sclp_unassign_storage(sccb->entries[i] >> 16);
- }
- break;
- default:
- rc = -EIO;
- break;
- }
-out:
- free_page((unsigned long) sccb);
- return rc;
-}
-
-static int sclp_mem_change_state(unsigned long start, unsigned long size,
- int online)
-{
- struct memory_increment *incr;
- unsigned long long istart;
- int rc = 0;
-
- list_for_each_entry(incr, &sclp_mem_list, list) {
- istart = rn2addr(incr->rn);
- if (start + size - 1 < istart)
- break;
- if (start > istart + sclp.rzm - 1)
- continue;
- if (online)
- rc |= sclp_assign_storage(incr->rn);
- else
- sclp_unassign_storage(incr->rn);
- if (rc == 0)
- incr->standby = online ? 0 : 1;
- }
- return rc ? -EIO : 0;
-}
-
-static bool contains_standby_increment(unsigned long start, unsigned long end)
-{
- struct memory_increment *incr;
- unsigned long istart;
-
- list_for_each_entry(incr, &sclp_mem_list, list) {
- istart = rn2addr(incr->rn);
- if (end - 1 < istart)
- continue;
- if (start > istart + sclp.rzm - 1)
- continue;
- if (incr->standby)
- return true;
- }
- return false;
-}
-
-static int sclp_mem_notifier(struct notifier_block *nb,
- unsigned long action, void *data)
-{
- unsigned long start, size;
- struct memory_notify *arg;
- unsigned char id;
- int rc = 0;
-
- arg = data;
- start = arg->start_pfn << PAGE_SHIFT;
- size = arg->nr_pages << PAGE_SHIFT;
- mutex_lock(&sclp_mem_mutex);
- for_each_clear_bit(id, sclp_storage_ids, sclp_max_storage_id + 1)
- sclp_attach_storage(id);
- switch (action) {
- case MEM_GOING_OFFLINE:
- /*
- * We do not allow to set memory blocks offline that contain
- * standby memory. This is done to simplify the "memory online"
- * case.
- */
- if (contains_standby_increment(start, start + size))
- rc = -EPERM;
- break;
- case MEM_PREPARE_ONLINE:
- /*
- * Access the altmap_start_pfn and altmap_nr_pages fields
- * within the struct memory_notify specifically when dealing
- * with only MEM_PREPARE_ONLINE/MEM_FINISH_OFFLINE notifiers.
- *
- * When altmap is in use, take the specified memory range
- * online, which includes the altmap.
- */
- if (arg->altmap_nr_pages) {
- start = PFN_PHYS(arg->altmap_start_pfn);
- size += PFN_PHYS(arg->altmap_nr_pages);
- }
- rc = sclp_mem_change_state(start, size, 1);
- if (rc || !arg->altmap_nr_pages)
- break;
- /*
- * Set CMMA state to nodat here, since the struct page memory
- * at the beginning of the memory block will not go through the
- * buddy allocator later.
- */
- __arch_set_page_nodat((void *)__va(start), arg->altmap_nr_pages);
- break;
- case MEM_FINISH_OFFLINE:
- /*
- * When altmap is in use, take the specified memory range
- * offline, which includes the altmap.
- */
- if (arg->altmap_nr_pages) {
- start = PFN_PHYS(arg->altmap_start_pfn);
- size += PFN_PHYS(arg->altmap_nr_pages);
- }
- sclp_mem_change_state(start, size, 0);
- break;
- default:
- break;
- }
- mutex_unlock(&sclp_mem_mutex);
- return rc ? NOTIFY_BAD : NOTIFY_OK;
-}
-
-static struct notifier_block sclp_mem_nb = {
- .notifier_call = sclp_mem_notifier,
-};
-
-static void __init align_to_block_size(unsigned long long *start,
- unsigned long long *size,
- unsigned long long alignment)
-{
- unsigned long long start_align, size_align;
-
- start_align = roundup(*start, alignment);
- size_align = rounddown(*start + *size, alignment) - start_align;
-
- pr_info("Standby memory at 0x%llx (%lluM of %lluM usable)\n",
- *start, size_align >> 20, *size >> 20);
- *start = start_align;
- *size = size_align;
-}
-
-static void __init add_memory_merged(u16 rn)
-{
- unsigned long long start, size, addr, block_size;
- static u16 first_rn, num;
-
- if (rn && first_rn && (first_rn + num == rn)) {
- num++;
- return;
- }
- if (!first_rn)
- goto skip_add;
- start = rn2addr(first_rn);
- size = (unsigned long long) num * sclp.rzm;
- if (start >= ident_map_size)
- goto skip_add;
- if (start + size > ident_map_size)
- size = ident_map_size - start;
- block_size = memory_block_size_bytes();
- align_to_block_size(&start, &size, block_size);
- if (!size)
- goto skip_add;
- for (addr = start; addr < start + size; addr += block_size)
- add_memory(0, addr, block_size,
- cpu_has_edat1() ?
- MHP_MEMMAP_ON_MEMORY | MHP_OFFLINE_INACCESSIBLE : MHP_NONE);
-skip_add:
- first_rn = rn;
- num = 1;
-}
-
-static void __init sclp_add_standby_memory(void)
-{
- struct memory_increment *incr;
-
- list_for_each_entry(incr, &sclp_mem_list, list)
- if (incr->standby)
- add_memory_merged(incr->rn);
- add_memory_merged(0);
-}
-
-static void __init insert_increment(u16 rn, int standby, int assigned)
-{
- struct memory_increment *incr, *new_incr;
- struct list_head *prev;
- u16 last_rn;
-
- new_incr = kzalloc(sizeof(*new_incr), GFP_KERNEL);
- if (!new_incr)
- return;
- new_incr->rn = rn;
- new_incr->standby = standby;
- last_rn = 0;
- prev = &sclp_mem_list;
- list_for_each_entry(incr, &sclp_mem_list, list) {
- if (assigned && incr->rn > rn)
- break;
- if (!assigned && incr->rn - last_rn > 1)
- break;
- last_rn = incr->rn;
- prev = &incr->list;
- }
- if (!assigned)
- new_incr->rn = last_rn + 1;
- if (new_incr->rn > sclp.rnmax) {
- kfree(new_incr);
- return;
- }
- list_add(&new_incr->list, prev);
-}
-
-static int __init sclp_detect_standby_memory(void)
-{
- struct read_storage_sccb *sccb;
- int i, id, assigned, rc;
-
- if (oldmem_data.start) /* No standby memory in kdump mode */
- return 0;
- if ((sclp.facilities & 0xe00000000000ULL) != 0xe00000000000ULL)
- return 0;
- rc = -ENOMEM;
- sccb = (void *) __get_free_page(GFP_KERNEL | GFP_DMA);
- if (!sccb)
- goto out;
- assigned = 0;
- for (id = 0; id <= sclp_max_storage_id; id++) {
- memset(sccb, 0, PAGE_SIZE);
- sccb->header.length = PAGE_SIZE;
- rc = sclp_sync_request(SCLP_CMDW_READ_STORAGE_INFO | id << 8, sccb);
- if (rc)
- goto out;
- switch (sccb->header.response_code) {
- case 0x0010:
- set_bit(id, sclp_storage_ids);
- for (i = 0; i < sccb->assigned; i++) {
- if (!sccb->entries[i])
- continue;
- assigned++;
- insert_increment(sccb->entries[i] >> 16, 0, 1);
- }
- break;
- case 0x0310:
- break;
- case 0x0410:
- for (i = 0; i < sccb->assigned; i++) {
- if (!sccb->entries[i])
- continue;
- assigned++;
- insert_increment(sccb->entries[i] >> 16, 1, 1);
- }
- break;
- default:
- rc = -EIO;
- break;
- }
- if (!rc)
- sclp_max_storage_id = sccb->max_id;
- }
- if (rc || list_empty(&sclp_mem_list))
- goto out;
- for (i = 1; i <= sclp.rnmax - assigned; i++)
- insert_increment(0, 1, 0);
- rc = register_memory_notifier(&sclp_mem_nb);
- if (rc)
- goto out;
- sclp_add_standby_memory();
-out:
- free_page((unsigned long) sccb);
- return rc;
-}
-__initcall(sclp_detect_standby_memory);
-
-#endif /* CONFIG_MEMORY_HOTPLUG */
-
-/*
- * Channel path configuration related functions.
- */
-
-#define SCLP_CMDW_CONFIGURE_CHPATH 0x000f0001
-#define SCLP_CMDW_DECONFIGURE_CHPATH 0x000e0001
-#define SCLP_CMDW_READ_CHPATH_INFORMATION 0x00030001
-
-struct chp_cfg_sccb {
- struct sccb_header header;
- u8 ccm;
- u8 reserved[6];
- u8 cssid;
-} __attribute__((packed));
-
static int do_chp_configure(sclp_cmdw_t cmd)
{
struct chp_cfg_sccb *sccb;
@@ -560,8 +174,7 @@ static int do_chp_configure(sclp_cmdw_t cmd)
if (!SCLP_HAS_CHP_RECONFIG)
return -EOPNOTSUPP;
- /* Prepare sccb. */
- sccb = (struct chp_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ sccb = (struct chp_cfg_sccb *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccb)
return -ENOMEM;
sccb->header.length = sizeof(*sccb);
@@ -581,7 +194,7 @@ static int do_chp_configure(sclp_cmdw_t cmd)
break;
}
out:
- free_page((unsigned long) sccb);
+ free_page((unsigned long)sccb);
return rc;
}
@@ -609,16 +222,6 @@ int sclp_chp_deconfigure(struct chp_id chpid)
return do_chp_configure(SCLP_CMDW_DECONFIGURE_CHPATH | chpid.id << 8);
}
-struct chp_info_sccb {
- struct sccb_header header;
- u8 recognized[SCLP_CHP_INFO_MASK_SIZE];
- u8 standby[SCLP_CHP_INFO_MASK_SIZE];
- u8 configured[SCLP_CHP_INFO_MASK_SIZE];
- u8 ccm;
- u8 reserved[6];
- u8 cssid;
-} __attribute__((packed));
-
/**
* sclp_chp_read_info - perform read channel-path information sclp command
* @info: resulting channel-path information data
@@ -634,8 +237,7 @@ int sclp_chp_read_info(struct sclp_chp_info *info)
if (!SCLP_HAS_CHP_INFO)
return -EOPNOTSUPP;
- /* Prepare sccb. */
- sccb = (struct chp_info_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ sccb = (struct chp_info_sccb *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccb)
return -ENOMEM;
sccb->header.length = sizeof(*sccb);
@@ -652,6 +254,6 @@ int sclp_chp_read_info(struct sclp_chp_info *info)
memcpy(info->standby, sccb->standby, SCLP_CHP_INFO_MASK_SIZE);
memcpy(info->configured, sccb->configured, SCLP_CHP_INFO_MASK_SIZE);
out:
- free_page((unsigned long) sccb);
+ free_page((unsigned long)sccb);
return rc;
}
diff --git a/drivers/s390/char/sclp_early_core.c b/drivers/s390/char/sclp_early_core.c
index b5bd40f13c75..55e50d428aab 100644
--- a/drivers/s390/char/sclp_early_core.c
+++ b/drivers/s390/char/sclp_early_core.c
@@ -51,7 +51,7 @@ void sclp_early_wait_irq(void)
" stg %[addr],%[psw_wait_addr]\n"
" stg %[addr],%[psw_ext_addr]\n"
" lpswe %[psw_wait]\n"
- "0:\n"
+ "0:"
: [addr] "=&d" (addr),
[psw_wait_addr] "=Q" (psw_wait.addr),
[psw_ext_addr] "=Q" (get_lowcore()->external_new_psw.addr)
diff --git a/drivers/s390/char/sclp_mem.c b/drivers/s390/char/sclp_mem.c
new file mode 100644
index 000000000000..27f49f5fd358
--- /dev/null
+++ b/drivers/s390/char/sclp_mem.c
@@ -0,0 +1,399 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Memory hotplug support via sclp
+ *
+ * Copyright IBM Corp. 2025
+ */
+
+#define KMSG_COMPONENT "sclp_mem"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/cpufeature.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/memory.h>
+#include <linux/memory_hotplug.h>
+#include <linux/mm.h>
+#include <linux/mmzone.h>
+#include <linux/slab.h>
+#include <asm/facility.h>
+#include <asm/page.h>
+#include <asm/page-states.h>
+#include <asm/sclp.h>
+
+#include "sclp.h"
+
+#define SCLP_CMDW_ASSIGN_STORAGE 0x000d0001
+#define SCLP_CMDW_UNASSIGN_STORAGE 0x000c0001
+
+static DEFINE_MUTEX(sclp_mem_mutex);
+static LIST_HEAD(sclp_mem_list);
+static u8 sclp_max_storage_id;
+static DECLARE_BITMAP(sclp_storage_ids, 256);
+
+struct memory_increment {
+ struct list_head list;
+ u16 rn;
+ int standby;
+};
+
+struct assign_storage_sccb {
+ struct sccb_header header;
+ u16 rn;
+} __packed;
+
+struct attach_storage_sccb {
+ struct sccb_header header;
+ u16 :16;
+ u16 assigned;
+ u32 :32;
+ u32 entries[];
+} __packed;
+
+int arch_get_memory_phys_device(unsigned long start_pfn)
+{
+ if (!sclp.rzm)
+ return 0;
+ return PFN_PHYS(start_pfn) >> ilog2(sclp.rzm);
+}
+
+static unsigned long rn2addr(u16 rn)
+{
+ return (unsigned long)(rn - 1) * sclp.rzm;
+}
+
+static int do_assign_storage(sclp_cmdw_t cmd, u16 rn)
+{
+ struct assign_storage_sccb *sccb;
+ int rc;
+
+ sccb = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!sccb)
+ return -ENOMEM;
+ sccb->header.length = PAGE_SIZE;
+ sccb->rn = rn;
+ rc = sclp_sync_request_timeout(cmd, sccb, SCLP_QUEUE_INTERVAL);
+ if (rc)
+ goto out;
+ switch (sccb->header.response_code) {
+ case 0x0020:
+ case 0x0120:
+ break;
+ default:
+ pr_warn("assign storage failed (cmd=0x%08x, response=0x%04x, rn=0x%04x)\n",
+ cmd, sccb->header.response_code, rn);
+ rc = -EIO;
+ break;
+ }
+out:
+ free_page((unsigned long)sccb);
+ return rc;
+}
+
+static int sclp_assign_storage(u16 rn)
+{
+ unsigned long start;
+ int rc;
+
+ rc = do_assign_storage(SCLP_CMDW_ASSIGN_STORAGE, rn);
+ if (rc)
+ return rc;
+ start = rn2addr(rn);
+ storage_key_init_range(start, start + sclp.rzm);
+ return 0;
+}
+
+static int sclp_unassign_storage(u16 rn)
+{
+ return do_assign_storage(SCLP_CMDW_UNASSIGN_STORAGE, rn);
+}
+
+static int sclp_attach_storage(u8 id)
+{
+ struct attach_storage_sccb *sccb;
+ int rc, i;
+
+ sccb = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!sccb)
+ return -ENOMEM;
+ sccb->header.length = PAGE_SIZE;
+ sccb->header.function_code = 0x40;
+ rc = sclp_sync_request_timeout(0x00080001 | id << 8, sccb,
+ SCLP_QUEUE_INTERVAL);
+ if (rc)
+ goto out;
+ switch (sccb->header.response_code) {
+ case 0x0020:
+ set_bit(id, sclp_storage_ids);
+ for (i = 0; i < sccb->assigned; i++) {
+ if (sccb->entries[i])
+ sclp_unassign_storage(sccb->entries[i] >> 16);
+ }
+ break;
+ default:
+ rc = -EIO;
+ break;
+ }
+out:
+ free_page((unsigned long)sccb);
+ return rc;
+}
+
+static int sclp_mem_change_state(unsigned long start, unsigned long size,
+ int online)
+{
+ struct memory_increment *incr;
+ unsigned long istart;
+ int rc = 0;
+
+ list_for_each_entry(incr, &sclp_mem_list, list) {
+ istart = rn2addr(incr->rn);
+ if (start + size - 1 < istart)
+ break;
+ if (start > istart + sclp.rzm - 1)
+ continue;
+ if (online)
+ rc |= sclp_assign_storage(incr->rn);
+ else
+ sclp_unassign_storage(incr->rn);
+ if (rc == 0)
+ incr->standby = online ? 0 : 1;
+ }
+ return rc ? -EIO : 0;
+}
+
+static bool contains_standby_increment(unsigned long start, unsigned long end)
+{
+ struct memory_increment *incr;
+ unsigned long istart;
+
+ list_for_each_entry(incr, &sclp_mem_list, list) {
+ istart = rn2addr(incr->rn);
+ if (end - 1 < istart)
+ continue;
+ if (start > istart + sclp.rzm - 1)
+ continue;
+ if (incr->standby)
+ return true;
+ }
+ return false;
+}
+
+static int sclp_mem_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ unsigned long start, size;
+ struct memory_notify *arg;
+ unsigned char id;
+ int rc = 0;
+
+ arg = data;
+ start = arg->start_pfn << PAGE_SHIFT;
+ size = arg->nr_pages << PAGE_SHIFT;
+ mutex_lock(&sclp_mem_mutex);
+ for_each_clear_bit(id, sclp_storage_ids, sclp_max_storage_id + 1)
+ sclp_attach_storage(id);
+ switch (action) {
+ case MEM_GOING_OFFLINE:
+ /*
+ * Do not allow to set memory blocks offline that contain
+ * standby memory. This is done to simplify the "memory online"
+ * case.
+ */
+ if (contains_standby_increment(start, start + size))
+ rc = -EPERM;
+ break;
+ case MEM_PREPARE_ONLINE:
+ /*
+ * Access the altmap_start_pfn and altmap_nr_pages fields
+ * within the struct memory_notify specifically when dealing
+ * with only MEM_PREPARE_ONLINE/MEM_FINISH_OFFLINE notifiers.
+ *
+ * When altmap is in use, take the specified memory range
+ * online, which includes the altmap.
+ */
+ if (arg->altmap_nr_pages) {
+ start = PFN_PHYS(arg->altmap_start_pfn);
+ size += PFN_PHYS(arg->altmap_nr_pages);
+ }
+ rc = sclp_mem_change_state(start, size, 1);
+ if (rc || !arg->altmap_nr_pages)
+ break;
+ /*
+ * Set CMMA state to nodat here, since the struct page memory
+ * at the beginning of the memory block will not go through the
+ * buddy allocator later.
+ */
+ __arch_set_page_nodat((void *)__va(start), arg->altmap_nr_pages);
+ break;
+ case MEM_FINISH_OFFLINE:
+ /*
+ * When altmap is in use, take the specified memory range
+ * offline, which includes the altmap.
+ */
+ if (arg->altmap_nr_pages) {
+ start = PFN_PHYS(arg->altmap_start_pfn);
+ size += PFN_PHYS(arg->altmap_nr_pages);
+ }
+ sclp_mem_change_state(start, size, 0);
+ break;
+ default:
+ break;
+ }
+ mutex_unlock(&sclp_mem_mutex);
+ return rc ? NOTIFY_BAD : NOTIFY_OK;
+}
+
+static struct notifier_block sclp_mem_nb = {
+ .notifier_call = sclp_mem_notifier,
+};
+
+static void __init align_to_block_size(unsigned long *start,
+ unsigned long *size,
+ unsigned long alignment)
+{
+ unsigned long start_align, size_align;
+
+ start_align = roundup(*start, alignment);
+ size_align = rounddown(*start + *size, alignment) - start_align;
+
+ pr_info("Standby memory at 0x%lx (%luM of %luM usable)\n",
+ *start, size_align >> 20, *size >> 20);
+ *start = start_align;
+ *size = size_align;
+}
+
+static void __init add_memory_merged(u16 rn)
+{
+ unsigned long start, size, addr, block_size;
+ static u16 first_rn, num;
+
+ if (rn && first_rn && (first_rn + num == rn)) {
+ num++;
+ return;
+ }
+ if (!first_rn)
+ goto skip_add;
+ start = rn2addr(first_rn);
+ size = (unsigned long)num * sclp.rzm;
+ if (start >= ident_map_size)
+ goto skip_add;
+ if (start + size > ident_map_size)
+ size = ident_map_size - start;
+ block_size = memory_block_size_bytes();
+ align_to_block_size(&start, &size, block_size);
+ if (!size)
+ goto skip_add;
+ for (addr = start; addr < start + size; addr += block_size) {
+ add_memory(0, addr, block_size,
+ cpu_has_edat1() ?
+ MHP_MEMMAP_ON_MEMORY | MHP_OFFLINE_INACCESSIBLE : MHP_NONE);
+ }
+skip_add:
+ first_rn = rn;
+ num = 1;
+}
+
+static void __init sclp_add_standby_memory(void)
+{
+ struct memory_increment *incr;
+
+ list_for_each_entry(incr, &sclp_mem_list, list) {
+ if (incr->standby)
+ add_memory_merged(incr->rn);
+ }
+ add_memory_merged(0);
+}
+
+static void __init insert_increment(u16 rn, int standby, int assigned)
+{
+ struct memory_increment *incr, *new_incr;
+ struct list_head *prev;
+ u16 last_rn;
+
+ new_incr = kzalloc(sizeof(*new_incr), GFP_KERNEL);
+ if (!new_incr)
+ return;
+ new_incr->rn = rn;
+ new_incr->standby = standby;
+ last_rn = 0;
+ prev = &sclp_mem_list;
+ list_for_each_entry(incr, &sclp_mem_list, list) {
+ if (assigned && incr->rn > rn)
+ break;
+ if (!assigned && incr->rn - last_rn > 1)
+ break;
+ last_rn = incr->rn;
+ prev = &incr->list;
+ }
+ if (!assigned)
+ new_incr->rn = last_rn + 1;
+ if (new_incr->rn > sclp.rnmax) {
+ kfree(new_incr);
+ return;
+ }
+ list_add(&new_incr->list, prev);
+}
+
+static int __init sclp_detect_standby_memory(void)
+{
+ struct read_storage_sccb *sccb;
+ int i, id, assigned, rc;
+
+ /* No standby memory in kdump mode */
+ if (oldmem_data.start)
+ return 0;
+ if ((sclp.facilities & 0xe00000000000UL) != 0xe00000000000UL)
+ return 0;
+ rc = -ENOMEM;
+ sccb = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
+ if (!sccb)
+ goto out;
+ assigned = 0;
+ for (id = 0; id <= sclp_max_storage_id; id++) {
+ memset(sccb, 0, PAGE_SIZE);
+ sccb->header.length = PAGE_SIZE;
+ rc = sclp_sync_request(SCLP_CMDW_READ_STORAGE_INFO | id << 8, sccb);
+ if (rc)
+ goto out;
+ switch (sccb->header.response_code) {
+ case 0x0010:
+ set_bit(id, sclp_storage_ids);
+ for (i = 0; i < sccb->assigned; i++) {
+ if (!sccb->entries[i])
+ continue;
+ assigned++;
+ insert_increment(sccb->entries[i] >> 16, 0, 1);
+ }
+ break;
+ case 0x0310:
+ break;
+ case 0x0410:
+ for (i = 0; i < sccb->assigned; i++) {
+ if (!sccb->entries[i])
+ continue;
+ assigned++;
+ insert_increment(sccb->entries[i] >> 16, 1, 1);
+ }
+ break;
+ default:
+ rc = -EIO;
+ break;
+ }
+ if (!rc)
+ sclp_max_storage_id = sccb->max_id;
+ }
+ if (rc || list_empty(&sclp_mem_list))
+ goto out;
+ for (i = 1; i <= sclp.rnmax - assigned; i++)
+ insert_increment(0, 1, 0);
+ rc = register_memory_notifier(&sclp_mem_nb);
+ if (rc)
+ goto out;
+ sclp_add_standby_memory();
+out:
+ free_page((unsigned long)sccb);
+ return rc;
+}
+__initcall(sclp_detect_standby_memory);
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index a1bafaf73f87..2a2931d303cb 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -1671,7 +1671,7 @@ tape_3590_init(void)
DBF_EVENT(3, "3590 init\n");
- tape_3590_wq = alloc_workqueue("tape_3590", 0, 0);
+ tape_3590_wq = alloc_workqueue("tape_3590", WQ_PERCPU, 0);
if (!tape_3590_wq)
return -ENOMEM;
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index fdab760f1f28..b7048f2b036e 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -167,7 +167,7 @@ static inline void cmf_activate(void *area, unsigned int onoff)
asm volatile(
" lgr 1,%[r1]\n"
" lgr 2,%[mbo]\n"
- " schm\n"
+ " schm"
:
: [r1] "d" ((unsigned long)onoff),
[mbo] "d" (virt_to_phys(area))
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index fb2c07cb4d3d..4b2dae6eb376 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -1316,23 +1316,34 @@ void ccw_device_schedule_recovery(void)
spin_unlock_irqrestore(&recovery_lock, flags);
}
-static int purge_fn(struct device *dev, void *data)
+static int purge_fn(struct subchannel *sch, void *data)
{
- struct ccw_device *cdev = to_ccwdev(dev);
- struct ccw_dev_id *id = &cdev->private->dev_id;
- struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_device *cdev;
- spin_lock_irq(cdev->ccwlock);
- if (is_blacklisted(id->ssid, id->devno) &&
- (cdev->private->state == DEV_STATE_OFFLINE) &&
- (atomic_cmpxchg(&cdev->private->onoff, 0, 1) == 0)) {
- CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", id->ssid,
- id->devno);
+ spin_lock_irq(&sch->lock);
+ if (sch->st != SUBCHANNEL_TYPE_IO || !sch->schib.pmcw.dnv)
+ goto unlock;
+
+ if (!is_blacklisted(sch->schid.ssid, sch->schib.pmcw.dev))
+ goto unlock;
+
+ cdev = sch_get_cdev(sch);
+ if (cdev) {
+ if (cdev->private->state != DEV_STATE_OFFLINE)
+ goto unlock;
+
+ if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
+ goto unlock;
ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
- css_sched_sch_todo(sch, SCH_TODO_UNREG);
atomic_set(&cdev->private->onoff, 0);
}
- spin_unlock_irq(cdev->ccwlock);
+
+ css_sched_sch_todo(sch, SCH_TODO_UNREG);
+ CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x%s\n", sch->schid.ssid,
+ sch->schib.pmcw.dev, cdev ? "" : " (no cdev)");
+
+unlock:
+ spin_unlock_irq(&sch->lock);
/* Abort loop in case of pending signal. */
if (signal_pending(current))
return -EINTR;
@@ -1348,7 +1359,7 @@ static int purge_fn(struct device *dev, void *data)
int ccw_purge_blacklisted(void)
{
CIO_MSG_EVENT(2, "ccw: purging blacklisted devices\n");
- bus_for_each_dev(&ccw_bus_type, NULL, NULL, purge_fn);
+ for_each_subchannel_staged(purge_fn, NULL, NULL);
return 0;
}
diff --git a/drivers/s390/cio/ioasm.c b/drivers/s390/cio/ioasm.c
index a540045b64a6..8b06b234e110 100644
--- a/drivers/s390/cio/ioasm.c
+++ b/drivers/s390/cio/ioasm.c
@@ -253,11 +253,10 @@ static inline int __xsch(struct subchannel_id schid)
asm volatile(
" lgr 1,%[r1]\n"
" xsch\n"
- " ipm %[cc]\n"
- " srl %[cc],28\n"
- : [cc] "=&d" (ccode)
+ CC_IPM(cc)
+ : CC_OUT(cc, ccode)
: [r1] "d" (r1)
- : "cc", "1");
+ : CC_CLOBBER_LIST("1"));
return CC_TRANSFORM(ccode);
}
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 88b625ba1978..4b7ffa840563 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -180,7 +180,7 @@ struct ap_card {
atomic64_t total_request_count; /* # requests ever for this AP device.*/
};
-#define TAPQ_CARD_HWINFO_MASK 0xFEFF0000FFFF0F0FUL
+#define TAPQ_CARD_HWINFO_MASK 0xFFFF0000FFFF0F0FUL
#define ASSOC_IDX_INVALID 0x10000
#define to_ap_card(x) container_of((x), struct ap_card, ap_dev.device)
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
index 766557547f83..eb5ff49f6fe7 100644
--- a/drivers/s390/crypto/vfio_ap_ops.c
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -354,7 +354,7 @@ static int vfio_ap_validate_nib(struct kvm_vcpu *vcpu, dma_addr_t *nib)
if (!*nib)
return -EINVAL;
- if (kvm_is_error_hva(gfn_to_hva(vcpu->kvm, *nib >> PAGE_SHIFT)))
+ if (!kvm_s390_is_gpa_in_memslot(vcpu->kvm, *nib))
return -EINVAL;
return 0;
diff --git a/drivers/s390/crypto/zcrypt_ep11misc.c b/drivers/s390/crypto/zcrypt_ep11misc.c
index 3bf09a89a089..e92e2fd8ce5d 100644
--- a/drivers/s390/crypto/zcrypt_ep11misc.c
+++ b/drivers/s390/crypto/zcrypt_ep11misc.c
@@ -1405,7 +1405,9 @@ int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
/* Step 3: import the encrypted key value as a new key */
rc = ep11_unwrapkey(card, domain, kek, keklen,
encbuf, encbuflen, 0, def_iv,
- keybitsize, 0, keybuf, keybufsize, keytype, xflags);
+ keybitsize, keygenflags,
+ keybuf, keybufsize,
+ keytype, xflags);
if (rc) {
ZCRYPT_DBF_ERR("%s importing key value as new key failed, rc=%d\n",
__func__, rc);
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index 2b43f6f28362..0fd700c5745a 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -81,8 +81,7 @@ config CCWGROUP
config ISM
tristate "Support for ISM vPCI Adapter"
- depends on PCI
- imply SMC
+ depends on PCI && DIBS
default n
help
Select this option if you want to use the Internal Shared Memory
diff --git a/drivers/s390/net/ism.h b/drivers/s390/net/ism.h
index 047fa6101555..08d17956cb36 100644
--- a/drivers/s390/net/ism.h
+++ b/drivers/s390/net/ism.h
@@ -5,11 +5,13 @@
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/pci.h>
-#include <linux/ism.h>
-#include <net/smc.h>
+#include <linux/dibs.h>
#include <asm/pci_insn.h>
#define UTIL_STR_LEN 16
+#define ISM_ERROR 0xFFFF
+
+#define ISM_NR_DMBS 1920
/*
* Do not use the first word of the DMB bits to ensure 8 byte aligned access.
@@ -32,6 +34,23 @@
#define ISM_UNREG_SBA 0x11
#define ISM_UNREG_IEQ 0x12
+enum ism_event_type {
+ ISM_EVENT_BUF = 0x00,
+ ISM_EVENT_DEV = 0x01,
+ ISM_EVENT_SWR = 0x02
+};
+
+enum ism_event_code {
+ ISM_BUF_DMB_UNREGISTERED = 0x04,
+ ISM_BUF_USING_ISM_DEV_DISABLED = 0x08,
+ ISM_BUF_OWNING_ISM_DEV_IN_ERR_STATE = 0x02,
+ ISM_BUF_USING_ISM_DEV_IN_ERR_STATE = 0x03,
+ ISM_BUF_VLAN_MISMATCH_WITH_OWNER = 0x05,
+ ISM_BUF_VLAN_MISMATCH_WITH_USER = 0x06,
+ ISM_DEV_GID_DISABLED = 0x07,
+ ISM_DEV_GID_ERR_STATE = 0x01
+};
+
struct ism_req_hdr {
u32 cmd;
u16 : 16;
@@ -65,6 +84,15 @@ union ism_reg_ieq {
} response;
} __aligned(16);
+/* ISM-vPCI devices provide 64 Bit GIDs
+ * Map them to ISM UUID GIDs like this:
+ * _________________________________________
+ * | 64 Bit ISM-vPCI GID | 00000000_00000000 |
+ * -----------------------------------------
+ * This will be interpreted as a UIID variant, that is reserved
+ * for NCS backward compatibility. So it will not collide with
+ * proper UUIDs.
+ */
union ism_read_gid {
struct {
struct ism_req_hdr hdr;
@@ -174,6 +202,14 @@ struct ism_eq_header {
u64 : 64;
};
+struct ism_event {
+ u32 type;
+ u32 code;
+ u64 tok;
+ u64 time;
+ u64 info;
+};
+
struct ism_eq {
struct ism_eq_header header;
struct ism_event entry[15];
@@ -188,6 +224,19 @@ struct ism_sba {
u16 dmbe_mask[ISM_NR_DMBS];
};
+struct ism_dev {
+ spinlock_t cmd_lock; /* serializes cmds */
+ struct dibs_dev *dibs;
+ struct pci_dev *pdev;
+ struct ism_sba *sba;
+ dma_addr_t sba_dma_addr;
+ DECLARE_BITMAP(sba_bitmap, ISM_NR_DMBS);
+
+ struct ism_eq *ieq;
+ dma_addr_t ieq_dma_addr;
+ int ieq_idx;
+};
+
#define ISM_CREATE_REQ(dmb, idx, sf, offset) \
((dmb) | (idx) << 24 | (sf) << 23 | (offset))
diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
index 6cd60b174315..f84aa2e676e9 100644
--- a/drivers/s390/net/ism_drv.c
+++ b/drivers/s390/net/ism_drv.c
@@ -31,101 +31,6 @@ MODULE_DEVICE_TABLE(pci, ism_device_table);
static debug_info_t *ism_debug_info;
-#define NO_CLIENT 0xff /* must be >= MAX_CLIENTS */
-static struct ism_client *clients[MAX_CLIENTS]; /* use an array rather than */
- /* a list for fast mapping */
-static u8 max_client;
-static DEFINE_MUTEX(clients_lock);
-static bool ism_v2_capable;
-struct ism_dev_list {
- struct list_head list;
- struct mutex mutex; /* protects ism device list */
-};
-
-static struct ism_dev_list ism_dev_list = {
- .list = LIST_HEAD_INIT(ism_dev_list.list),
- .mutex = __MUTEX_INITIALIZER(ism_dev_list.mutex),
-};
-
-static void ism_setup_forwarding(struct ism_client *client, struct ism_dev *ism)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ism->lock, flags);
- ism->subs[client->id] = client;
- spin_unlock_irqrestore(&ism->lock, flags);
-}
-
-int ism_register_client(struct ism_client *client)
-{
- struct ism_dev *ism;
- int i, rc = -ENOSPC;
-
- mutex_lock(&ism_dev_list.mutex);
- mutex_lock(&clients_lock);
- for (i = 0; i < MAX_CLIENTS; ++i) {
- if (!clients[i]) {
- clients[i] = client;
- client->id = i;
- if (i == max_client)
- max_client++;
- rc = 0;
- break;
- }
- }
- mutex_unlock(&clients_lock);
-
- if (i < MAX_CLIENTS) {
- /* initialize with all devices that we got so far */
- list_for_each_entry(ism, &ism_dev_list.list, list) {
- ism->priv[i] = NULL;
- client->add(ism);
- ism_setup_forwarding(client, ism);
- }
- }
- mutex_unlock(&ism_dev_list.mutex);
-
- return rc;
-}
-EXPORT_SYMBOL_GPL(ism_register_client);
-
-int ism_unregister_client(struct ism_client *client)
-{
- struct ism_dev *ism;
- unsigned long flags;
- int rc = 0;
-
- mutex_lock(&ism_dev_list.mutex);
- list_for_each_entry(ism, &ism_dev_list.list, list) {
- spin_lock_irqsave(&ism->lock, flags);
- /* Stop forwarding IRQs and events */
- ism->subs[client->id] = NULL;
- for (int i = 0; i < ISM_NR_DMBS; ++i) {
- if (ism->sba_client_arr[i] == client->id) {
- WARN(1, "%s: attempt to unregister '%s' with registered dmb(s)\n",
- __func__, client->name);
- rc = -EBUSY;
- goto err_reg_dmb;
- }
- }
- spin_unlock_irqrestore(&ism->lock, flags);
- }
- mutex_unlock(&ism_dev_list.mutex);
-
- mutex_lock(&clients_lock);
- clients[client->id] = NULL;
- if (client->id + 1 == max_client)
- max_client--;
- mutex_unlock(&clients_lock);
- return rc;
-
-err_reg_dmb:
- spin_unlock_irqrestore(&ism->lock, flags);
- mutex_unlock(&ism_dev_list.mutex);
- return rc;
-}
-EXPORT_SYMBOL_GPL(ism_unregister_client);
-
static int ism_cmd(struct ism_dev *ism, void *cmd)
{
struct ism_req_hdr *req = cmd;
@@ -273,8 +178,9 @@ static int unregister_ieq(struct ism_dev *ism)
return 0;
}
-static int ism_read_local_gid(struct ism_dev *ism)
+static int ism_read_local_gid(struct dibs_dev *dibs)
{
+ struct ism_dev *ism = dibs->drv_priv;
union ism_read_gid cmd;
int ret;
@@ -286,20 +192,43 @@ static int ism_read_local_gid(struct ism_dev *ism)
if (ret)
goto out;
- ism->local_gid = cmd.response.gid;
+ memset(&dibs->gid, 0, sizeof(dibs->gid));
+ memcpy(&dibs->gid, &cmd.response.gid, sizeof(cmd.response.gid));
out:
return ret;
}
-static void ism_free_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
+static int ism_query_rgid(struct dibs_dev *dibs, const uuid_t *rgid,
+ u32 vid_valid, u32 vid)
{
- clear_bit(dmb->sba_idx, ism->sba_bitmap);
+ struct ism_dev *ism = dibs->drv_priv;
+ union ism_query_rgid cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.request.hdr.cmd = ISM_QUERY_RGID;
+ cmd.request.hdr.len = sizeof(cmd.request);
+
+ memcpy(&cmd.request.rgid, rgid, sizeof(cmd.request.rgid));
+ cmd.request.vlan_valid = vid_valid;
+ cmd.request.vlan_id = vid;
+
+ return ism_cmd(ism, &cmd);
+}
+
+static int ism_max_dmbs(void)
+{
+ return ISM_NR_DMBS;
+}
+
+static void ism_free_dmb(struct ism_dev *ism, struct dibs_dmb *dmb)
+{
+ clear_bit(dmb->idx, ism->sba_bitmap);
dma_unmap_page(&ism->pdev->dev, dmb->dma_addr, dmb->dmb_len,
DMA_FROM_DEVICE);
folio_put(virt_to_folio(dmb->cpu_addr));
}
-static int ism_alloc_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
+static int ism_alloc_dmb(struct ism_dev *ism, struct dibs_dmb *dmb)
{
struct folio *folio;
unsigned long bit;
@@ -308,16 +237,16 @@ static int ism_alloc_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
if (PAGE_ALIGN(dmb->dmb_len) > dma_get_max_seg_size(&ism->pdev->dev))
return -EINVAL;
- if (!dmb->sba_idx) {
+ if (!dmb->idx) {
bit = find_next_zero_bit(ism->sba_bitmap, ISM_NR_DMBS,
ISM_DMB_BIT_OFFSET);
if (bit == ISM_NR_DMBS)
return -ENOSPC;
- dmb->sba_idx = bit;
+ dmb->idx = bit;
}
- if (dmb->sba_idx < ISM_DMB_BIT_OFFSET ||
- test_and_set_bit(dmb->sba_idx, ism->sba_bitmap))
+ if (dmb->idx < ISM_DMB_BIT_OFFSET ||
+ test_and_set_bit(dmb->idx, ism->sba_bitmap))
return -EINVAL;
folio = folio_alloc(GFP_KERNEL | __GFP_NOWARN | __GFP_NOMEMALLOC |
@@ -342,13 +271,14 @@ static int ism_alloc_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
out_free:
kfree(dmb->cpu_addr);
out_bit:
- clear_bit(dmb->sba_idx, ism->sba_bitmap);
+ clear_bit(dmb->idx, ism->sba_bitmap);
return rc;
}
-int ism_register_dmb(struct ism_dev *ism, struct ism_dmb *dmb,
- struct ism_client *client)
+static int ism_register_dmb(struct dibs_dev *dibs, struct dibs_dmb *dmb,
+ struct dibs_client *client)
{
+ struct ism_dev *ism = dibs->drv_priv;
union ism_reg_dmb cmd;
unsigned long flags;
int ret;
@@ -363,10 +293,10 @@ int ism_register_dmb(struct ism_dev *ism, struct ism_dmb *dmb,
cmd.request.dmb = dmb->dma_addr;
cmd.request.dmb_len = dmb->dmb_len;
- cmd.request.sba_idx = dmb->sba_idx;
+ cmd.request.sba_idx = dmb->idx;
cmd.request.vlan_valid = dmb->vlan_valid;
cmd.request.vlan_id = dmb->vlan_id;
- cmd.request.rgid = dmb->rgid;
+ memcpy(&cmd.request.rgid, &dmb->rgid, sizeof(u64));
ret = ism_cmd(ism, &cmd);
if (ret) {
@@ -374,16 +304,16 @@ int ism_register_dmb(struct ism_dev *ism, struct ism_dmb *dmb,
goto out;
}
dmb->dmb_tok = cmd.response.dmb_tok;
- spin_lock_irqsave(&ism->lock, flags);
- ism->sba_client_arr[dmb->sba_idx - ISM_DMB_BIT_OFFSET] = client->id;
- spin_unlock_irqrestore(&ism->lock, flags);
+ spin_lock_irqsave(&dibs->lock, flags);
+ dibs->dmb_clientid_arr[dmb->idx - ISM_DMB_BIT_OFFSET] = client->id;
+ spin_unlock_irqrestore(&dibs->lock, flags);
out:
return ret;
}
-EXPORT_SYMBOL_GPL(ism_register_dmb);
-int ism_unregister_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
+static int ism_unregister_dmb(struct dibs_dev *dibs, struct dibs_dmb *dmb)
{
+ struct ism_dev *ism = dibs->drv_priv;
union ism_unreg_dmb cmd;
unsigned long flags;
int ret;
@@ -394,9 +324,9 @@ int ism_unregister_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
cmd.request.dmb_tok = dmb->dmb_tok;
- spin_lock_irqsave(&ism->lock, flags);
- ism->sba_client_arr[dmb->sba_idx - ISM_DMB_BIT_OFFSET] = NO_CLIENT;
- spin_unlock_irqrestore(&ism->lock, flags);
+ spin_lock_irqsave(&dibs->lock, flags);
+ dibs->dmb_clientid_arr[dmb->idx - ISM_DMB_BIT_OFFSET] = NO_DIBS_CLIENT;
+ spin_unlock_irqrestore(&dibs->lock, flags);
ret = ism_cmd(ism, &cmd);
if (ret && ret != ISM_ERROR)
@@ -406,10 +336,10 @@ int ism_unregister_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
out:
return ret;
}
-EXPORT_SYMBOL_GPL(ism_unregister_dmb);
-static int ism_add_vlan_id(struct ism_dev *ism, u64 vlan_id)
+static int ism_add_vlan_id(struct dibs_dev *dibs, u64 vlan_id)
{
+ struct ism_dev *ism = dibs->drv_priv;
union ism_set_vlan_id cmd;
memset(&cmd, 0, sizeof(cmd));
@@ -421,8 +351,9 @@ static int ism_add_vlan_id(struct ism_dev *ism, u64 vlan_id)
return ism_cmd(ism, &cmd);
}
-static int ism_del_vlan_id(struct ism_dev *ism, u64 vlan_id)
+static int ism_del_vlan_id(struct dibs_dev *dibs, u64 vlan_id)
{
+ struct ism_dev *ism = dibs->drv_priv;
union ism_set_vlan_id cmd;
memset(&cmd, 0, sizeof(cmd));
@@ -434,15 +365,35 @@ static int ism_del_vlan_id(struct ism_dev *ism, u64 vlan_id)
return ism_cmd(ism, &cmd);
}
+static int ism_signal_ieq(struct dibs_dev *dibs, const uuid_t *rgid,
+ u32 trigger_irq, u32 event_code, u64 info)
+{
+ struct ism_dev *ism = dibs->drv_priv;
+ union ism_sig_ieq cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.request.hdr.cmd = ISM_SIGNAL_IEQ;
+ cmd.request.hdr.len = sizeof(cmd.request);
+
+ memcpy(&cmd.request.rgid, rgid, sizeof(cmd.request.rgid));
+ cmd.request.trigger_irq = trigger_irq;
+ cmd.request.event_code = event_code;
+ cmd.request.info = info;
+
+ return ism_cmd(ism, &cmd);
+}
+
static unsigned int max_bytes(unsigned int start, unsigned int len,
unsigned int boundary)
{
return min(boundary - (start & (boundary - 1)), len);
}
-int ism_move(struct ism_dev *ism, u64 dmb_tok, unsigned int idx, bool sf,
- unsigned int offset, void *data, unsigned int size)
+static int ism_move(struct dibs_dev *dibs, u64 dmb_tok, unsigned int idx,
+ bool sf, unsigned int offset, void *data,
+ unsigned int size)
{
+ struct ism_dev *ism = dibs->drv_priv;
unsigned int bytes;
u64 dmb_req;
int ret;
@@ -463,24 +414,79 @@ int ism_move(struct ism_dev *ism, u64 dmb_tok, unsigned int idx, bool sf,
return 0;
}
-EXPORT_SYMBOL_GPL(ism_move);
+
+static u16 ism_get_chid(struct dibs_dev *dibs)
+{
+ struct ism_dev *ism = dibs->drv_priv;
+
+ if (!ism || !ism->pdev)
+ return 0;
+
+ return to_zpci(ism->pdev)->pchid;
+}
+
+static int ism_match_event_type(u32 s390_event_type)
+{
+ switch (s390_event_type) {
+ case ISM_EVENT_BUF:
+ return DIBS_BUF_EVENT;
+ case ISM_EVENT_DEV:
+ return DIBS_DEV_EVENT;
+ case ISM_EVENT_SWR:
+ return DIBS_SW_EVENT;
+ default:
+ return DIBS_OTHER_TYPE;
+ }
+}
+
+static int ism_match_event_subtype(u32 s390_event_subtype)
+{
+ switch (s390_event_subtype) {
+ case ISM_BUF_DMB_UNREGISTERED:
+ return DIBS_BUF_UNREGISTERED;
+ case ISM_DEV_GID_DISABLED:
+ return DIBS_DEV_DISABLED;
+ case ISM_DEV_GID_ERR_STATE:
+ return DIBS_DEV_ERR_STATE;
+ default:
+ return DIBS_OTHER_SUBTYPE;
+ }
+}
static void ism_handle_event(struct ism_dev *ism)
{
+ struct dibs_dev *dibs = ism->dibs;
+ struct dibs_event event;
struct ism_event *entry;
- struct ism_client *clt;
+ struct dibs_client *clt;
int i;
while ((ism->ieq_idx + 1) != READ_ONCE(ism->ieq->header.idx)) {
- if (++(ism->ieq_idx) == ARRAY_SIZE(ism->ieq->entry))
+ if (++ism->ieq_idx == ARRAY_SIZE(ism->ieq->entry))
ism->ieq_idx = 0;
entry = &ism->ieq->entry[ism->ieq_idx];
debug_event(ism_debug_info, 2, entry, sizeof(*entry));
- for (i = 0; i < max_client; ++i) {
- clt = ism->subs[i];
+ __memset(&event, 0, sizeof(event));
+ event.type = ism_match_event_type(entry->type);
+ if (event.type == DIBS_SW_EVENT)
+ event.subtype = entry->code;
+ else
+ event.subtype = ism_match_event_subtype(entry->code);
+ event.time = entry->time;
+ event.data = entry->info;
+ switch (event.type) {
+ case DIBS_BUF_EVENT:
+ event.buffer_tok = entry->tok;
+ break;
+ case DIBS_DEV_EVENT:
+ case DIBS_SW_EVENT:
+ memcpy(&event.gid, &entry->tok, sizeof(u64));
+ }
+ for (i = 0; i < MAX_DIBS_CLIENTS; ++i) {
+ clt = dibs->subs[i];
if (clt)
- clt->handle_event(ism, entry);
+ clt->ops->handle_event(dibs, &event);
}
}
}
@@ -489,14 +495,17 @@ static irqreturn_t ism_handle_irq(int irq, void *data)
{
struct ism_dev *ism = data;
unsigned long bit, end;
+ struct dibs_dev *dibs;
unsigned long *bv;
u16 dmbemask;
u8 client_id;
+ dibs = ism->dibs;
+
bv = (void *) &ism->sba->dmb_bits[ISM_DMB_WORD_OFFSET];
end = sizeof(ism->sba->dmb_bits) * BITS_PER_BYTE - ISM_DMB_BIT_OFFSET;
- spin_lock(&ism->lock);
+ spin_lock(&dibs->lock);
ism->sba->s = 0;
barrier();
for (bit = 0;;) {
@@ -508,10 +517,13 @@ static irqreturn_t ism_handle_irq(int irq, void *data)
dmbemask = ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET];
ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET] = 0;
barrier();
- client_id = ism->sba_client_arr[bit];
- if (unlikely(client_id == NO_CLIENT || !ism->subs[client_id]))
+ client_id = dibs->dmb_clientid_arr[bit];
+ if (unlikely(client_id == NO_DIBS_CLIENT ||
+ !dibs->subs[client_id]))
continue;
- ism->subs[client_id]->handle_irq(ism, bit + ISM_DMB_BIT_OFFSET, dmbemask);
+ dibs->subs[client_id]->ops->handle_irq(dibs,
+ bit + ISM_DMB_BIT_OFFSET,
+ dmbemask);
}
if (ism->sba->e) {
@@ -519,28 +531,35 @@ static irqreturn_t ism_handle_irq(int irq, void *data)
barrier();
ism_handle_event(ism);
}
- spin_unlock(&ism->lock);
+ spin_unlock(&dibs->lock);
return IRQ_HANDLED;
}
+static const struct dibs_dev_ops ism_ops = {
+ .get_fabric_id = ism_get_chid,
+ .query_remote_gid = ism_query_rgid,
+ .max_dmbs = ism_max_dmbs,
+ .register_dmb = ism_register_dmb,
+ .unregister_dmb = ism_unregister_dmb,
+ .move_data = ism_move,
+ .add_vlan_id = ism_add_vlan_id,
+ .del_vlan_id = ism_del_vlan_id,
+ .signal_event = ism_signal_ieq,
+};
+
static int ism_dev_init(struct ism_dev *ism)
{
struct pci_dev *pdev = ism->pdev;
- int i, ret;
+ int ret;
ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
if (ret <= 0)
goto out;
- ism->sba_client_arr = kzalloc(ISM_NR_DMBS, GFP_KERNEL);
- if (!ism->sba_client_arr)
- goto free_vectors;
- memset(ism->sba_client_arr, NO_CLIENT, ISM_NR_DMBS);
-
ret = request_irq(pci_irq_vector(pdev, 0), ism_handle_irq, 0,
pci_name(pdev), ism);
if (ret)
- goto free_client_arr;
+ goto free_vectors;
ret = register_sba(ism);
if (ret)
@@ -550,57 +569,33 @@ static int ism_dev_init(struct ism_dev *ism)
if (ret)
goto unreg_sba;
- ret = ism_read_local_gid(ism);
- if (ret)
- goto unreg_ieq;
-
- if (!ism_add_vlan_id(ism, ISM_RESERVED_VLANID))
- /* hardware is V2 capable */
- ism_v2_capable = true;
- else
- ism_v2_capable = false;
-
- mutex_lock(&ism_dev_list.mutex);
- mutex_lock(&clients_lock);
- for (i = 0; i < max_client; ++i) {
- if (clients[i]) {
- clients[i]->add(ism);
- ism_setup_forwarding(clients[i], ism);
- }
- }
- mutex_unlock(&clients_lock);
-
- list_add(&ism->list, &ism_dev_list.list);
- mutex_unlock(&ism_dev_list.mutex);
-
query_info(ism);
return 0;
-unreg_ieq:
- unregister_ieq(ism);
unreg_sba:
unregister_sba(ism);
free_irq:
free_irq(pci_irq_vector(pdev, 0), ism);
-free_client_arr:
- kfree(ism->sba_client_arr);
free_vectors:
pci_free_irq_vectors(pdev);
out:
return ret;
}
-static void ism_dev_release(struct device *dev)
+static void ism_dev_exit(struct ism_dev *ism)
{
- struct ism_dev *ism;
-
- ism = container_of(dev, struct ism_dev, dev);
+ struct pci_dev *pdev = ism->pdev;
- kfree(ism);
+ unregister_ieq(ism);
+ unregister_sba(ism);
+ free_irq(pci_irq_vector(pdev, 0), ism);
+ pci_free_irq_vectors(pdev);
}
static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
+ struct dibs_dev *dibs;
+ struct zpci_dev *zdev;
struct ism_dev *ism;
int ret;
@@ -608,21 +603,13 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (!ism)
return -ENOMEM;
- spin_lock_init(&ism->lock);
spin_lock_init(&ism->cmd_lock);
dev_set_drvdata(&pdev->dev, ism);
ism->pdev = pdev;
- ism->dev.parent = &pdev->dev;
- ism->dev.release = ism_dev_release;
- device_initialize(&ism->dev);
- dev_set_name(&ism->dev, "%s", dev_name(&pdev->dev));
- ret = device_add(&ism->dev);
- if (ret)
- goto err_dev;
ret = pci_enable_device_mem(pdev);
if (ret)
- goto err;
+ goto err_dev;
ret = pci_request_mem_regions(pdev, DRV_NAME);
if (ret)
@@ -636,66 +623,69 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dma_set_max_seg_size(&pdev->dev, SZ_1M);
pci_set_master(pdev);
+ dibs = dibs_dev_alloc();
+ if (!dibs) {
+ ret = -ENOMEM;
+ goto err_resource;
+ }
+ /* set this up before we enable interrupts */
+ ism->dibs = dibs;
+ dibs->drv_priv = ism;
+ dibs->ops = &ism_ops;
+
+ /* enable ism device, but any interrupts and events will be ignored
+ * before dibs_dev_add() adds it to any clients.
+ */
ret = ism_dev_init(ism);
if (ret)
- goto err_resource;
+ goto err_dibs;
+
+ /* after ism_dev_init() we can call ism function to set gid */
+ ret = ism_read_local_gid(dibs);
+ if (ret)
+ goto err_ism;
+
+ dibs->dev.parent = &pdev->dev;
+
+ zdev = to_zpci(pdev);
+ dev_set_name(&dibs->dev, "ism%x", zdev->uid ? zdev->uid : zdev->fid);
+
+ ret = dibs_dev_add(dibs);
+ if (ret)
+ goto err_ism;
return 0;
+err_ism:
+ ism_dev_exit(ism);
+err_dibs:
+ /* pairs with dibs_dev_alloc() */
+ put_device(&dibs->dev);
err_resource:
pci_release_mem_regions(pdev);
err_disable:
pci_disable_device(pdev);
-err:
- device_del(&ism->dev);
err_dev:
dev_set_drvdata(&pdev->dev, NULL);
- put_device(&ism->dev);
+ kfree(ism);
return ret;
}
-static void ism_dev_exit(struct ism_dev *ism)
-{
- struct pci_dev *pdev = ism->pdev;
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&ism->lock, flags);
- for (i = 0; i < max_client; ++i)
- ism->subs[i] = NULL;
- spin_unlock_irqrestore(&ism->lock, flags);
-
- mutex_lock(&ism_dev_list.mutex);
- mutex_lock(&clients_lock);
- for (i = 0; i < max_client; ++i) {
- if (clients[i])
- clients[i]->remove(ism);
- }
- mutex_unlock(&clients_lock);
-
- if (ism_v2_capable)
- ism_del_vlan_id(ism, ISM_RESERVED_VLANID);
- unregister_ieq(ism);
- unregister_sba(ism);
- free_irq(pci_irq_vector(pdev, 0), ism);
- kfree(ism->sba_client_arr);
- pci_free_irq_vectors(pdev);
- list_del_init(&ism->list);
- mutex_unlock(&ism_dev_list.mutex);
-}
-
static void ism_remove(struct pci_dev *pdev)
{
struct ism_dev *ism = dev_get_drvdata(&pdev->dev);
+ struct dibs_dev *dibs = ism->dibs;
+ dibs_dev_del(dibs);
ism_dev_exit(ism);
+ /* pairs with dibs_dev_alloc() */
+ put_device(&dibs->dev);
pci_release_mem_regions(pdev);
pci_disable_device(pdev);
- device_del(&ism->dev);
dev_set_drvdata(&pdev->dev, NULL);
- put_device(&ism->dev);
+ kfree(ism);
}
static struct pci_driver ism_driver = {
@@ -713,8 +703,6 @@ static int __init ism_init(void)
if (!ism_debug_info)
return -ENODEV;
- memset(clients, 0, sizeof(clients));
- max_client = 0;
debug_register_view(ism_debug_info, &debug_hex_ascii_view);
ret = pci_register_driver(&ism_driver);
if (ret)
@@ -731,150 +719,3 @@ static void __exit ism_exit(void)
module_init(ism_init);
module_exit(ism_exit);
-
-/*************************** SMC-D Implementation *****************************/
-
-#if IS_ENABLED(CONFIG_SMC)
-static int ism_query_rgid(struct ism_dev *ism, u64 rgid, u32 vid_valid,
- u32 vid)
-{
- union ism_query_rgid cmd;
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.request.hdr.cmd = ISM_QUERY_RGID;
- cmd.request.hdr.len = sizeof(cmd.request);
-
- cmd.request.rgid = rgid;
- cmd.request.vlan_valid = vid_valid;
- cmd.request.vlan_id = vid;
-
- return ism_cmd(ism, &cmd);
-}
-
-static int smcd_query_rgid(struct smcd_dev *smcd, struct smcd_gid *rgid,
- u32 vid_valid, u32 vid)
-{
- return ism_query_rgid(smcd->priv, rgid->gid, vid_valid, vid);
-}
-
-static int smcd_register_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb,
- void *client)
-{
- return ism_register_dmb(smcd->priv, (struct ism_dmb *)dmb, client);
-}
-
-static int smcd_unregister_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb)
-{
- return ism_unregister_dmb(smcd->priv, (struct ism_dmb *)dmb);
-}
-
-static int smcd_add_vlan_id(struct smcd_dev *smcd, u64 vlan_id)
-{
- return ism_add_vlan_id(smcd->priv, vlan_id);
-}
-
-static int smcd_del_vlan_id(struct smcd_dev *smcd, u64 vlan_id)
-{
- return ism_del_vlan_id(smcd->priv, vlan_id);
-}
-
-static int smcd_set_vlan_required(struct smcd_dev *smcd)
-{
- return ism_cmd_simple(smcd->priv, ISM_SET_VLAN);
-}
-
-static int smcd_reset_vlan_required(struct smcd_dev *smcd)
-{
- return ism_cmd_simple(smcd->priv, ISM_RESET_VLAN);
-}
-
-static int ism_signal_ieq(struct ism_dev *ism, u64 rgid, u32 trigger_irq,
- u32 event_code, u64 info)
-{
- union ism_sig_ieq cmd;
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.request.hdr.cmd = ISM_SIGNAL_IEQ;
- cmd.request.hdr.len = sizeof(cmd.request);
-
- cmd.request.rgid = rgid;
- cmd.request.trigger_irq = trigger_irq;
- cmd.request.event_code = event_code;
- cmd.request.info = info;
-
- return ism_cmd(ism, &cmd);
-}
-
-static int smcd_signal_ieq(struct smcd_dev *smcd, struct smcd_gid *rgid,
- u32 trigger_irq, u32 event_code, u64 info)
-{
- return ism_signal_ieq(smcd->priv, rgid->gid,
- trigger_irq, event_code, info);
-}
-
-static int smcd_move(struct smcd_dev *smcd, u64 dmb_tok, unsigned int idx,
- bool sf, unsigned int offset, void *data,
- unsigned int size)
-{
- return ism_move(smcd->priv, dmb_tok, idx, sf, offset, data, size);
-}
-
-static int smcd_supports_v2(void)
-{
- return ism_v2_capable;
-}
-
-static u64 ism_get_local_gid(struct ism_dev *ism)
-{
- return ism->local_gid;
-}
-
-static void smcd_get_local_gid(struct smcd_dev *smcd,
- struct smcd_gid *smcd_gid)
-{
- smcd_gid->gid = ism_get_local_gid(smcd->priv);
- smcd_gid->gid_ext = 0;
-}
-
-static u16 ism_get_chid(struct ism_dev *ism)
-{
- if (!ism || !ism->pdev)
- return 0;
-
- return to_zpci(ism->pdev)->pchid;
-}
-
-static u16 smcd_get_chid(struct smcd_dev *smcd)
-{
- return ism_get_chid(smcd->priv);
-}
-
-static inline struct device *smcd_get_dev(struct smcd_dev *dev)
-{
- struct ism_dev *ism = dev->priv;
-
- return &ism->dev;
-}
-
-static const struct smcd_ops ism_ops = {
- .query_remote_gid = smcd_query_rgid,
- .register_dmb = smcd_register_dmb,
- .unregister_dmb = smcd_unregister_dmb,
- .add_vlan_id = smcd_add_vlan_id,
- .del_vlan_id = smcd_del_vlan_id,
- .set_vlan_required = smcd_set_vlan_required,
- .reset_vlan_required = smcd_reset_vlan_required,
- .signal_event = smcd_signal_ieq,
- .move_data = smcd_move,
- .supports_v2 = smcd_supports_v2,
- .get_local_gid = smcd_get_local_gid,
- .get_chid = smcd_get_chid,
- .get_dev = smcd_get_dev,
-};
-
-const struct smcd_ops *ism_get_smcd_ops(void)
-{
- return &ism_ops;
-}
-EXPORT_SYMBOL_GPL(ism_get_smcd_ops);
-#endif
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 883d4a12a172..a377a6f6900a 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -1695,7 +1695,7 @@ out:
} /* End twa_reset_sequence() */
/* This funciton returns unit geometry in cylinders/heads/sectors */
-static int twa_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[])
+static int twa_scsi_biosparam(struct scsi_device *sdev, struct gendisk *unused, sector_t capacity, int geom[])
{
int heads, sectors, cylinders;
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index 8d4174c7107e..e319be7d369c 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -1404,7 +1404,7 @@ out:
} /* End twl_reset_device_extension() */
/* This funciton returns unit geometry in cylinders/heads/sectors */
-static int twl_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[])
+static int twl_scsi_biosparam(struct scsi_device *sdev, struct gendisk *unused, sector_t capacity, int geom[])
{
int heads, sectors;
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index 89bd56f78ef9..0306a228c702 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -1340,7 +1340,7 @@ static int tw_reset_device_extension(TW_Device_Extension *tw_dev)
} /* End tw_reset_device_extension() */
/* This funciton returns unit geometry in cylinders/heads/sectors */
-static int tw_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev,
+static int tw_scsi_biosparam(struct scsi_device *sdev, struct gendisk *unused,
sector_t capacity, int geom[])
{
int heads, sectors, cylinders;
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index 1f100270cd38..a86d780d1ba4 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -3240,7 +3240,7 @@ static int blogic_resetadapter(struct blogic_adapter *adapter, bool hard_reset)
the BIOS, and a warning may be displayed.
*/
-static int blogic_diskparam(struct scsi_device *sdev, struct block_device *dev,
+static int blogic_diskparam(struct scsi_device *sdev, struct gendisk *disk,
sector_t capacity, int *params)
{
struct blogic_adapter *adapter =
@@ -3261,7 +3261,7 @@ static int blogic_diskparam(struct scsi_device *sdev, struct block_device *dev,
diskparam->sectors = 32;
}
diskparam->cylinders = (unsigned long) capacity / (diskparam->heads * diskparam->sectors);
- buf = scsi_bios_ptable(dev);
+ buf = scsi_bios_ptable(disk);
if (buf == NULL)
return 0;
/*
@@ -3715,7 +3715,6 @@ static void __exit blogic_exit(void)
__setup("BusLogic=", blogic_setup);
-#ifdef MODULE
/*static const struct pci_device_id blogic_pci_tbl[] = {
{ PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
@@ -3725,13 +3724,12 @@ __setup("BusLogic=", blogic_setup);
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ }
};*/
-static const struct pci_device_id blogic_pci_tbl[] = {
+static const struct pci_device_id blogic_pci_tbl[] __maybe_unused = {
{PCI_DEVICE(PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER)},
{PCI_DEVICE(PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER_NC)},
{PCI_DEVICE(PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_FLASHPOINT)},
{0, },
};
-#endif
MODULE_DEVICE_TABLE(pci, blogic_pci_tbl);
module_init(blogic_init);
diff --git a/drivers/scsi/BusLogic.h b/drivers/scsi/BusLogic.h
index 61bf26d4fc10..79de815e33b0 100644
--- a/drivers/scsi/BusLogic.h
+++ b/drivers/scsi/BusLogic.h
@@ -1273,7 +1273,7 @@ static inline void blogic_incszbucket(unsigned int *cmdsz_buckets,
static const char *blogic_drvr_info(struct Scsi_Host *);
static int blogic_qcmd(struct Scsi_Host *h, struct scsi_cmnd *);
-static int blogic_diskparam(struct scsi_device *, struct block_device *, sector_t, int *);
+static int blogic_diskparam(struct scsi_device *, struct gendisk *, sector_t, int *);
static int blogic_sdev_configure(struct scsi_device *,
struct queue_limits *lim);
static void blogic_qcompleted_ccb(struct blogic_ccb *);
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 5522310bab8d..19d0884479a2 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -589,7 +589,7 @@ config XEN_SCSI_FRONTEND
config HYPERV_STORAGE
tristate "Microsoft Hyper-V virtual storage driver"
- depends on SCSI && HYPERV
+ depends on SCSI && HYPERV_VMBUS
depends on m || SCSI_FC_ATTRS != m
default HYPERV
help
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 4b12e6dd8f07..ea66196ef7c7 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -273,7 +273,7 @@ struct aac_driver_ident* aac_get_driver_ident(int devtype)
/**
* aac_biosparm - return BIOS parameters for disk
* @sdev: The scsi device corresponding to the disk
- * @bdev: the block device corresponding to the disk
+ * @disk: the gendisk corresponding to the disk
* @capacity: the sector capacity of the disk
* @geom: geometry block to fill in
*
@@ -292,7 +292,7 @@ struct aac_driver_ident* aac_get_driver_ident(int devtype)
* be displayed.
*/
-static int aac_biosparm(struct scsi_device *sdev, struct block_device *bdev,
+static int aac_biosparm(struct scsi_device *sdev, struct gendisk *disk,
sector_t capacity, int *geom)
{
struct diskparm *param = (struct diskparm *)geom;
@@ -324,7 +324,7 @@ static int aac_biosparm(struct scsi_device *sdev, struct block_device *bdev,
* entry whose end_head matches one of the standard geometry
* translations ( 64/32, 128/32, 255/63 ).
*/
- buf = scsi_bios_ptable(bdev);
+ buf = scsi_bios_ptable(disk);
if (!buf)
return 0;
if (*(__le16 *)(buf + 0x40) == cpu_to_le16(MSDOS_LABEL_MAGIC)) {
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 3a2c336307c0..063e1b5818d3 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -7096,7 +7096,7 @@ static int advansys_reset(struct scsi_cmnd *scp)
* ip[2]: cylinders
*/
static int
-advansys_biosparam(struct scsi_device *sdev, struct block_device *bdev,
+advansys_biosparam(struct scsi_device *sdev, struct gendisk *unused,
sector_t capacity, int ip[])
{
struct asc_board *boardp = shost_priv(sdev->host);
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index e94c0a19c435..182aa80ec4c6 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -1246,7 +1246,7 @@ int aha152x_host_reset_host(struct Scsi_Host *shpnt)
* Return the "logical geometry"
*
*/
-static int aha152x_biosparam(struct scsi_device *sdev, struct block_device *bdev,
+static int aha152x_biosparam(struct scsi_device *sdev, struct gendisk *disk,
sector_t capacity, int *info_array)
{
struct Scsi_Host *shpnt = sdev->host;
@@ -1261,7 +1261,7 @@ static int aha152x_biosparam(struct scsi_device *sdev, struct block_device *bdev
int info[3];
/* try to figure out the geometry from the partition table */
- if (scsicam_bios_param(bdev, capacity, info) < 0 ||
+ if (scsicam_bios_param(disk, capacity, info) < 0 ||
!((info[0] == 64 && info[1] == 32) || (info[0] == 255 && info[1] == 63))) {
if (EXT_TRANS) {
printk(KERN_NOTICE
diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c
index 389499d3e00a..371e8300f029 100644
--- a/drivers/scsi/aha1542.c
+++ b/drivers/scsi/aha1542.c
@@ -992,7 +992,7 @@ static int aha1542_host_reset(struct scsi_cmnd *cmd)
}
static int aha1542_biosparam(struct scsi_device *sdev,
- struct block_device *bdev, sector_t capacity, int geom[])
+ struct gendisk *unused, sector_t capacity, int geom[])
{
struct aha1542_hostdata *aha1542 = shost_priv(sdev->host);
diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c
index be7ebbbb9ba8..b234621f6b37 100644
--- a/drivers/scsi/aha1740.c
+++ b/drivers/scsi/aha1740.c
@@ -510,7 +510,7 @@ static void aha1740_getconfig(unsigned int base, unsigned int *irq_level,
}
static int aha1740_biosparam(struct scsi_device *sdev,
- struct block_device *dev,
+ struct gendisk *unused,
sector_t capacity, int* ip)
{
int size = capacity;
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index 17dfc3c72110..c3d1b9dd24ae 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -720,7 +720,7 @@ ahd_linux_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
* Return the disk geometry for the given SCSI device.
*/
static int
-ahd_linux_biosparam(struct scsi_device *sdev, struct block_device *bdev,
+ahd_linux_biosparam(struct scsi_device *sdev, struct gendisk *disk,
sector_t capacity, int geom[])
{
int heads;
@@ -731,7 +731,7 @@ ahd_linux_biosparam(struct scsi_device *sdev, struct block_device *bdev,
ahd = *((struct ahd_softc **)sdev->host->hostdata);
- if (scsi_partsize(bdev, capacity, geom))
+ if (scsi_partsize(disk, capacity, geom))
return 0;
heads = 64;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index cebf8c5d0caf..8b2b98666d61 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -683,7 +683,7 @@ ahc_linux_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
* Return the disk geometry for the given SCSI device.
*/
static int
-ahc_linux_biosparam(struct scsi_device *sdev, struct block_device *bdev,
+ahc_linux_biosparam(struct scsi_device *sdev, struct gendisk *disk,
sector_t capacity, int geom[])
{
int heads;
@@ -696,7 +696,7 @@ ahc_linux_biosparam(struct scsi_device *sdev, struct block_device *bdev,
ahc = *((struct ahc_softc **)sdev->host->hostdata);
channel = sdev_channel(sdev);
- if (scsi_partsize(bdev, capacity, geom))
+ if (scsi_partsize(disk, capacity, geom))
return 0;
heads = 64;
diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c
index 4bfd03724ad6..b26a468ddc98 100644
--- a/drivers/scsi/aic94xx/aic94xx_task.c
+++ b/drivers/scsi/aic94xx/aic94xx_task.c
@@ -488,7 +488,6 @@ static int asd_build_ssp_ascb(struct asd_ascb *ascb, struct sas_task *task,
scb->ssp_task.conn_handle = cpu_to_le16(
(u16)(unsigned long)dev->lldd_dev);
scb->ssp_task.data_dir = data_dir_flags[task->data_dir];
- scb->ssp_task.retry_count = scb->ssp_task.retry_count;
ascb->tasklet_complete = asd_task_tasklet_complete;
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index fb57343a97bd..f0c5a30ce51b 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -112,7 +112,7 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb);
static int arcmsr_abort(struct scsi_cmnd *);
static int arcmsr_bus_reset(struct scsi_cmnd *);
static int arcmsr_bios_param(struct scsi_device *sdev,
- struct block_device *bdev, sector_t capacity, int *info);
+ struct gendisk *disk, sector_t capacity, int *info);
static int arcmsr_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
static int arcmsr_probe(struct pci_dev *pdev,
const struct pci_device_id *id);
@@ -377,11 +377,11 @@ static irqreturn_t arcmsr_do_interrupt(int irq, void *dev_id)
}
static int arcmsr_bios_param(struct scsi_device *sdev,
- struct block_device *bdev, sector_t capacity, int *geom)
+ struct gendisk *disk, sector_t capacity, int *geom)
{
int heads, sectors, cylinders, total_capacity;
- if (scsi_partsize(bdev, capacity, geom))
+ if (scsi_partsize(disk, capacity, geom))
return 0;
total_capacity = capacity;
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
index 401242912855..df6f40b51deb 100644
--- a/drivers/scsi/atp870u.c
+++ b/drivers/scsi/atp870u.c
@@ -1692,7 +1692,7 @@ static int atp870u_show_info(struct seq_file *m, struct Scsi_Host *HBAptr)
}
-static int atp870u_biosparam(struct scsi_device *disk, struct block_device *dev,
+static int atp870u_biosparam(struct scsi_device *disk, struct gendisk *unused,
sector_t capacity, int *ip)
{
int heads, sectors, cylinders;
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c
index a99a101b95ef..2559df8baa05 100644
--- a/drivers/scsi/bfa/bfa_core.c
+++ b/drivers/scsi/bfa/bfa_core.c
@@ -1282,7 +1282,6 @@ bfa_iocfc_cfgrsp(struct bfa_s *bfa)
struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
struct bfa_iocfc_fwcfg_s *fwcfg = &cfgrsp->fwcfg;
- fwcfg->num_cqs = fwcfg->num_cqs;
fwcfg->num_ioim_reqs = be16_to_cpu(fwcfg->num_ioim_reqs);
fwcfg->num_fwtio_reqs = be16_to_cpu(fwcfg->num_fwtio_reqs);
fwcfg->num_tskim_reqs = be16_to_cpu(fwcfg->num_tskim_reqs);
diff --git a/drivers/scsi/csiostor/csio_wr.c b/drivers/scsi/csiostor/csio_wr.c
index a516df019c22..010a1df37f15 100644
--- a/drivers/scsi/csiostor/csio_wr.c
+++ b/drivers/scsi/csiostor/csio_wr.c
@@ -960,7 +960,7 @@ csio_wr_copy_to_wrp(void *data_buf, struct csio_wr_pair *wrp,
memcpy((uint8_t *) wrp->addr1 + wr_off, data_buf, nbytes);
data_len -= nbytes;
- /* Write the remaining data from the begining of circular buffer */
+ /* Write the remaining data from the beginning of circular buffer */
if (data_len) {
CSIO_DB_ASSERT(data_len <= wrp->size2);
CSIO_DB_ASSERT(wrp->addr2 != NULL);
@@ -1224,7 +1224,7 @@ csio_wr_process_iq(struct csio_hw *hw, struct csio_q *q,
/*
* We need to re-arm SGE interrupts in case we got a stray interrupt,
- * especially in msix mode. With INTx, this may be a common occurence.
+ * especially in msix mode. With INTx, this may be a common occurrence.
*/
if (unlikely(!q->inc_idx)) {
CSIO_INC_STATS(q, n_stray_comp);
diff --git a/drivers/scsi/fdomain.c b/drivers/scsi/fdomain.c
index 504c4e0c5d17..c0b2a980db34 100644
--- a/drivers/scsi/fdomain.c
+++ b/drivers/scsi/fdomain.c
@@ -469,10 +469,10 @@ static int fdomain_host_reset(struct scsi_cmnd *cmd)
}
static int fdomain_biosparam(struct scsi_device *sdev,
- struct block_device *bdev, sector_t capacity,
+ struct gendisk *disk, sector_t capacity,
int geom[])
{
- unsigned char *p = scsi_bios_ptable(bdev);
+ unsigned char *p = scsi_bios_ptable(disk);
if (p && p[65] == 0xaa && p[64] == 0x55 /* Partition table valid */
&& p[4]) { /* Partition type */
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index c2fdc6553e62..1199d701c3f5 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -323,8 +323,6 @@ enum fnic_state {
FNIC_IN_ETH_TRANS_FC_MODE,
};
-struct mempool;
-
enum fnic_role_e {
FNIC_ROLE_FCP_INITIATOR = 0,
};
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index d1a4cc69d408..30a9c6612651 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -876,7 +876,7 @@ static int hisi_sas_dev_found(struct domain_device *device)
device->lldd_dev = sas_dev;
hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
- if (parent_dev && dev_is_expander(parent_dev->dev_type)) {
+ if (dev_parent_is_expander(device)) {
int phy_no;
phy_no = sas_find_attached_phy_id(&parent_dev->ex_dev, device);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 4431698a5d78..f3516a0611dd 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -925,7 +925,6 @@ static void setup_itct_v2_hw(struct hisi_hba *hisi_hba,
struct device *dev = hisi_hba->dev;
u64 qw0, device_id = sas_dev->device_id;
struct hisi_sas_itct *itct = &hisi_hba->itct[device_id];
- struct domain_device *parent_dev = device->parent;
struct asd_sas_port *sas_port = device->port;
struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
u64 sas_addr;
@@ -942,7 +941,7 @@ static void setup_itct_v2_hw(struct hisi_hba *hisi_hba,
break;
case SAS_SATA_DEV:
case SAS_SATA_PENDING:
- if (parent_dev && dev_is_expander(parent_dev->dev_type))
+ if (dev_parent_is_expander(device))
qw0 = HISI_SAS_DEV_TYPE_STP << ITCT_HDR_DEV_TYPE_OFF;
else
qw0 = HISI_SAS_DEV_TYPE_SATA << ITCT_HDR_DEV_TYPE_OFF;
@@ -2494,7 +2493,6 @@ static void prep_ata_v2_hw(struct hisi_hba *hisi_hba,
{
struct sas_task *task = slot->task;
struct domain_device *device = task->dev;
- struct domain_device *parent_dev = device->parent;
struct hisi_sas_device *sas_dev = device->lldd_dev;
struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
struct asd_sas_port *sas_port = device->port;
@@ -2509,7 +2507,7 @@ static void prep_ata_v2_hw(struct hisi_hba *hisi_hba,
/* create header */
/* dw0 */
dw0 = port->id << CMD_HDR_PORT_OFF;
- if (parent_dev && dev_is_expander(parent_dev->dev_type)) {
+ if (dev_parent_is_expander(device)) {
dw0 |= 3 << CMD_HDR_CMD_OFF;
} else {
phy_id = device->phy->identify.phy_identifier;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 2f3d61abab3a..2f9e01717ef3 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -874,7 +874,6 @@ static void setup_itct_v3_hw(struct hisi_hba *hisi_hba,
struct device *dev = hisi_hba->dev;
u64 qw0, device_id = sas_dev->device_id;
struct hisi_sas_itct *itct = &hisi_hba->itct[device_id];
- struct domain_device *parent_dev = device->parent;
struct asd_sas_port *sas_port = device->port;
struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
u64 sas_addr;
@@ -891,7 +890,7 @@ static void setup_itct_v3_hw(struct hisi_hba *hisi_hba,
break;
case SAS_SATA_DEV:
case SAS_SATA_PENDING:
- if (parent_dev && dev_is_expander(parent_dev->dev_type))
+ if (dev_parent_is_expander(device))
qw0 = HISI_SAS_DEV_TYPE_STP << ITCT_HDR_DEV_TYPE_OFF;
else
qw0 = HISI_SAS_DEV_TYPE_SATA << ITCT_HDR_DEV_TYPE_OFF;
@@ -1476,7 +1475,6 @@ static void prep_ata_v3_hw(struct hisi_hba *hisi_hba,
{
struct sas_task *task = slot->task;
struct domain_device *device = task->dev;
- struct domain_device *parent_dev = device->parent;
struct hisi_sas_device *sas_dev = device->lldd_dev;
struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
struct asd_sas_port *sas_port = device->port;
@@ -1487,7 +1485,7 @@ static void prep_ata_v3_hw(struct hisi_hba *hisi_hba,
u32 dw1 = 0, dw2 = 0;
hdr->dw0 = cpu_to_le32(port->id << CMD_HDR_PORT_OFF);
- if (parent_dev && dev_is_expander(parent_dev->dev_type)) {
+ if (dev_parent_is_expander(device)) {
hdr->dw0 |= cpu_to_le32(3 << CMD_HDR_CMD_OFF);
} else {
phy_id = device->phy->identify.phy_identifier;
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index c73a71ac3c29..3654b12c5d5a 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -2662,10 +2662,8 @@ static void complete_scsi_command(struct CommandList *cp)
case CMD_TARGET_STATUS:
cmd->result |= ei->ScsiStatus;
/* copy the sense data */
- if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
- sense_data_size = SCSI_SENSE_BUFFERSIZE;
- else
- sense_data_size = sizeof(ei->SenseInfo);
+ sense_data_size = min_t(unsigned long, SCSI_SENSE_BUFFERSIZE,
+ sizeof(ei->SenseInfo));
if (ei->SenseLen < sense_data_size)
sense_data_size = ei->SenseLen;
memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
@@ -3628,10 +3626,7 @@ static bool hpsa_vpd_page_supported(struct ctlr_info *h,
if (rc != 0)
goto exit_unsupported;
pages = buf[3];
- if ((pages + HPSA_VPD_HEADER_SZ) <= 255)
- bufsize = pages + HPSA_VPD_HEADER_SZ;
- else
- bufsize = 255;
+ bufsize = min(pages + HPSA_VPD_HEADER_SZ, 255);
/* Get the whole VPD page list */
rc = hpsa_scsi_do_inquiry(h, scsi3addr,
@@ -6407,18 +6402,14 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h,
return -EINVAL;
}
if (iocommand->buf_size > 0) {
- buff = kmalloc(iocommand->buf_size, GFP_KERNEL);
- if (buff == NULL)
- return -ENOMEM;
if (iocommand->Request.Type.Direction & XFER_WRITE) {
- /* Copy the data into the buffer we created */
- if (copy_from_user(buff, iocommand->buf,
- iocommand->buf_size)) {
- rc = -EFAULT;
- goto out_kfree;
- }
+ buff = memdup_user(iocommand->buf, iocommand->buf_size);
+ if (IS_ERR(buff))
+ return PTR_ERR(buff);
} else {
- memset(buff, 0, iocommand->buf_size);
+ buff = kzalloc(iocommand->buf_size, GFP_KERNEL);
+ if (!buff)
+ return -ENOMEM;
}
}
c = cmd_alloc(h);
@@ -6478,7 +6469,6 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h,
}
out:
cmd_free(h, c);
-out_kfree:
kfree(buff);
return rc;
}
@@ -6522,18 +6512,21 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h,
while (left) {
sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
buff_size[sg_used] = sz;
- buff[sg_used] = kmalloc(sz, GFP_KERNEL);
- if (buff[sg_used] == NULL) {
- status = -ENOMEM;
- goto cleanup1;
- }
+
if (ioc->Request.Type.Direction & XFER_WRITE) {
- if (copy_from_user(buff[sg_used], data_ptr, sz)) {
- status = -EFAULT;
+ buff[sg_used] = memdup_user(data_ptr, sz);
+ if (IS_ERR(buff[sg_used])) {
+ status = PTR_ERR(buff[sg_used]);
goto cleanup1;
}
- } else
- memset(buff[sg_used], 0, sz);
+ } else {
+ buff[sg_used] = kzalloc(sz, GFP_KERNEL);
+ if (!buff[sg_used]) {
+ status = -ENOMEM;
+ goto cleanup1;
+ }
+ }
+
left -= sz;
data_ptr += sz;
sg_used++;
@@ -7632,8 +7625,8 @@ static void hpsa_free_cfgtables(struct ctlr_info *h)
}
/* Find and map CISS config table and transfer table
-+ * several items must be unmapped (freed) later
-+ * */
+ * several items must be unmapped (freed) later
+ */
static int hpsa_find_cfgtables(struct ctlr_info *h)
{
u64 cfg_offset;
diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c
index 0821cf994b98..5c602c057798 100644
--- a/drivers/scsi/imm.c
+++ b/drivers/scsi/imm.c
@@ -954,7 +954,7 @@ static DEF_SCSI_QCMD(imm_queuecommand)
* be done in sd.c. Even if it gets fixed there, this will still
* work.
*/
-static int imm_biosparam(struct scsi_device *sdev, struct block_device *dev,
+static int imm_biosparam(struct scsi_device *sdev, struct gendisk *unused,
sector_t capacity, int ip[])
{
ip[0] = 0x40;
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
index 8648bd965287..ed34ad92c807 100644
--- a/drivers/scsi/initio.c
+++ b/drivers/scsi/initio.c
@@ -2645,7 +2645,7 @@ static int i91u_bus_reset(struct scsi_cmnd * cmnd)
/**
* i91u_biosparam - return the "logical geometry
* @sdev: SCSI device
- * @dev: Matching block device
+ * @unused: Matching gendisk
* @capacity: Sector size of drive
* @info_array: Return space for BIOS geometry
*
@@ -2655,7 +2655,7 @@ static int i91u_bus_reset(struct scsi_cmnd * cmnd)
* FIXME: limited to 2^32 sector devices.
*/
-static int i91u_biosparam(struct scsi_device *sdev, struct block_device *dev,
+static int i91u_biosparam(struct scsi_device *sdev, struct gendisk *unused,
sector_t capacity, int *info_array)
{
struct initio_host *host; /* Point to Host adapter control block */
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index d06b79f03538..44214884deaf 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -4281,11 +4281,11 @@ static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
}
if (ioa_cfg->sis64)
- ioa_data = vmalloc(array_size(IPR_FMT3_MAX_NUM_DUMP_PAGES,
- sizeof(__be32 *)));
+ ioa_data = vmalloc_array(IPR_FMT3_MAX_NUM_DUMP_PAGES,
+ sizeof(__be32 *));
else
- ioa_data = vmalloc(array_size(IPR_FMT2_MAX_NUM_DUMP_PAGES,
- sizeof(__be32 *)));
+ ioa_data = vmalloc_array(IPR_FMT2_MAX_NUM_DUMP_PAGES,
+ sizeof(__be32 *));
if (!ioa_data) {
ipr_err("Dump memory allocation failed\n");
@@ -4644,10 +4644,10 @@ ATTRIBUTE_GROUPS(ipr_dev);
/**
* ipr_biosparam - Return the HSC mapping
- * @sdev: scsi device struct
- * @block_device: block device pointer
+ * @sdev: scsi device struct
+ * @unused: gendisk pointer
* @capacity: capacity of the device
- * @parm: Array containing returned HSC values.
+ * @parm: Array containing returned HSC values.
*
* This function generates the HSC parms that fdisk uses.
* We want to make sure we return something that places partitions
@@ -4657,7 +4657,7 @@ ATTRIBUTE_GROUPS(ipr_dev);
* 0 on success
**/
static int ipr_biosparam(struct scsi_device *sdev,
- struct block_device *block_device,
+ struct gendisk *unused,
sector_t capacity, int *parm)
{
int heads, sectors;
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index 94adb6ac02a4..3393a288fd23 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -1123,7 +1123,7 @@ static DEF_SCSI_QCMD(ips_queue)
/* Set bios geometry for the controller */
/* */
/****************************************************************************/
-static int ips_biosparam(struct scsi_device *sdev, struct block_device *bdev,
+static int ips_biosparam(struct scsi_device *sdev, struct gendisk *unused,
sector_t capacity, int geom[])
{
ips_ha_t *ha = (ips_ha_t *) sdev->host->hostdata;
diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
index 8ac932ec4444..30a4d4a580e9 100644
--- a/drivers/scsi/ips.h
+++ b/drivers/scsi/ips.h
@@ -398,7 +398,7 @@
/*
* Scsi_Host Template
*/
- static int ips_biosparam(struct scsi_device *sdev, struct block_device *bdev,
+ static int ips_biosparam(struct scsi_device *sdev, struct gendisk *unused,
sector_t capacity, int geom[]);
static int ips_sdev_configure(struct scsi_device *SDptr,
struct queue_limits *lim);
diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c
index 82deb6a83a8c..4c7462965ea1 100644
--- a/drivers/scsi/isci/remote_device.c
+++ b/drivers/scsi/isci/remote_device.c
@@ -1434,7 +1434,7 @@ static enum sci_status isci_remote_device_construct(struct isci_port *iport,
struct domain_device *dev = idev->domain_dev;
enum sci_status status;
- if (dev->parent && dev_is_expander(dev->parent->dev_type))
+ if (dev_parent_is_expander(dev))
status = sci_remote_device_ea_construct(iport, idev);
else
status = sci_remote_device_da_construct(iport, idev);
diff --git a/drivers/scsi/libfc/fc_encode.h b/drivers/scsi/libfc/fc_encode.h
index 02e31db31d68..e046091a549a 100644
--- a/drivers/scsi/libfc/fc_encode.h
+++ b/drivers/scsi/libfc/fc_encode.h
@@ -356,7 +356,7 @@ static inline int fc_ct_ms_fill(struct fc_lport *lport,
put_unaligned_be16(len, &entry->len);
snprintf((char *)&entry->value,
FC_FDMI_HBA_ATTR_OSNAMEVERSION_LEN,
- "%s v%s",
+ "%.62s v%.62s",
init_utsname()->sysname,
init_utsname()->release);
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 869b5d4db44c..d953225f6cc2 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -1313,10 +1313,7 @@ static int sas_check_parent_topology(struct domain_device *child)
int i;
int res = 0;
- if (!child->parent)
- return 0;
-
- if (!dev_is_expander(child->parent->dev_type))
+ if (!dev_parent_is_expander(child))
return 0;
parent_ex = &child->parent->ex_dev;
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 928723c90b75..ffa5b49aaf08 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -845,7 +845,7 @@ int sas_change_queue_depth(struct scsi_device *sdev, int depth)
EXPORT_SYMBOL_GPL(sas_change_queue_depth);
int sas_bios_param(struct scsi_device *scsi_dev,
- struct block_device *bdev,
+ struct gendisk *unused,
sector_t capacity, int *hsc)
{
hsc[0] = 255;
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index fe4fb67eb50c..224edacf2d8e 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -661,15 +661,12 @@ struct lpfc_vport {
uint32_t num_disc_nodes; /* in addition to hba_state */
uint32_t gidft_inp; /* cnt of outstanding GID_FTs */
- uint32_t fc_nlp_cnt; /* outstanding NODELIST requests */
uint32_t fc_rscn_id_cnt; /* count of RSCNs payloads in list */
uint32_t fc_rscn_flush; /* flag use of fc_rscn_id_list */
struct lpfc_dmabuf *fc_rscn_id_list[FC_MAX_HOLD_RSCN];
struct lpfc_name fc_nodename; /* fc nodename */
struct lpfc_name fc_portname; /* fc portname */
- struct lpfc_work_evt disc_timeout_evt;
-
struct timer_list fc_disctmo; /* Discovery rescue timer */
uint8_t fc_ns_retry; /* retries for fabric nameserver */
uint32_t fc_prli_sent; /* cntr for outstanding PRLIs */
@@ -744,12 +741,6 @@ struct lpfc_vport {
struct lpfc_vmid_priority_info vmid_priority;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- struct dentry *debug_disc_trc;
- struct dentry *debug_nodelist;
- struct dentry *debug_nvmestat;
- struct dentry *debug_scsistat;
- struct dentry *debug_ioktime;
- struct dentry *debug_hdwqstat;
struct dentry *vport_debugfs_root;
struct lpfc_debugfs_trc *disc_trc;
atomic_t disc_trc_cnt;
@@ -767,7 +758,6 @@ struct lpfc_vport {
/* There is a single nvme instance per vport. */
struct nvme_fc_local_port *localport;
uint8_t nvmei_support; /* driver supports NVME Initiator */
- uint32_t last_fcp_wqidx;
uint32_t rcv_flogi_cnt; /* How many unsol FLOGIs ACK'd. */
};
@@ -1060,8 +1050,6 @@ struct lpfc_hba {
struct lpfc_dmabuf hbqslimp;
- uint16_t pci_cfg_value;
-
uint8_t fc_linkspeed; /* Link speed after last READ_LA */
uint32_t fc_eventTag; /* event tag for link attention */
@@ -1088,7 +1076,6 @@ struct lpfc_hba {
struct lpfc_stats fc_stat;
- struct lpfc_nodelist fc_fcpnodev; /* nodelist entry for no device */
uint32_t nport_event_cnt; /* timestamp for nlplist entry */
uint8_t wwnn[8];
@@ -1229,9 +1216,6 @@ struct lpfc_hba {
uint32_t hbq_count; /* Count of configured HBQs */
struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */
- atomic_t fcp_qidx; /* next FCP WQ (RR Policy) */
- atomic_t nvme_qidx; /* next NVME WQ (RR Policy) */
-
phys_addr_t pci_bar0_map; /* Physical address for PCI BAR0 */
phys_addr_t pci_bar1_map; /* Physical address for PCI BAR1 */
phys_addr_t pci_bar2_map; /* Physical address for PCI BAR2 */
@@ -1348,30 +1332,9 @@ struct lpfc_hba {
unsigned long last_ramp_down_time;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
struct dentry *hba_debugfs_root;
- atomic_t debugfs_vport_count;
- struct dentry *debug_multixri_pools;
- struct dentry *debug_hbqinfo;
- struct dentry *debug_dumpHostSlim;
- struct dentry *debug_dumpHBASlim;
- struct dentry *debug_InjErrLBA; /* LBA to inject errors at */
- struct dentry *debug_InjErrNPortID; /* NPortID to inject errors at */
- struct dentry *debug_InjErrWWPN; /* WWPN to inject errors at */
- struct dentry *debug_writeGuard; /* inject write guard_tag errors */
- struct dentry *debug_writeApp; /* inject write app_tag errors */
- struct dentry *debug_writeRef; /* inject write ref_tag errors */
- struct dentry *debug_readGuard; /* inject read guard_tag errors */
- struct dentry *debug_readApp; /* inject read app_tag errors */
- struct dentry *debug_readRef; /* inject read ref_tag errors */
-
- struct dentry *debug_nvmeio_trc;
+ unsigned int debugfs_vport_count;
+
struct lpfc_debugfs_nvmeio_trc *nvmeio_trc;
- struct dentry *debug_hdwqinfo;
-#ifdef LPFC_HDWQ_LOCK_STAT
- struct dentry *debug_lockstat;
-#endif
- struct dentry *debug_cgn_buffer;
- struct dentry *debug_rx_monitor;
- struct dentry *debug_ras_log;
atomic_t nvmeio_trc_cnt;
uint32_t nvmeio_trc_size;
uint32_t nvmeio_trc_output_idx;
@@ -1388,19 +1351,10 @@ struct lpfc_hba {
sector_t lpfc_injerr_lba;
#define LPFC_INJERR_LBA_OFF (sector_t)(-1)
- struct dentry *debug_slow_ring_trc;
struct lpfc_debugfs_trc *slow_ring_trc;
atomic_t slow_ring_trc_cnt;
/* iDiag debugfs sub-directory */
struct dentry *idiag_root;
- struct dentry *idiag_pci_cfg;
- struct dentry *idiag_bar_acc;
- struct dentry *idiag_que_info;
- struct dentry *idiag_que_acc;
- struct dentry *idiag_drb_acc;
- struct dentry *idiag_ctl_acc;
- struct dentry *idiag_mbx_acc;
- struct dentry *idiag_ext_acc;
uint8_t lpfc_idiag_last_eq;
#endif
uint16_t nvmeio_trc_on;
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 2db8d9529b8f..92b5b2dbe847 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -2373,93 +2373,117 @@ out:
static ssize_t
lpfc_debugfs_dif_err_read(struct file *file, char __user *buf,
- size_t nbytes, loff_t *ppos)
+ size_t nbytes, loff_t *ppos)
{
struct lpfc_hba *phba = file->private_data;
int kind = debugfs_get_aux_num(file);
- char cbuf[32];
- uint64_t tmp = 0;
+ char cbuf[32] = {0};
int cnt = 0;
- if (kind == writeGuard)
- cnt = scnprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wgrd_cnt);
- else if (kind == writeApp)
- cnt = scnprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wapp_cnt);
- else if (kind == writeRef)
- cnt = scnprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wref_cnt);
- else if (kind == readGuard)
- cnt = scnprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rgrd_cnt);
- else if (kind == readApp)
- cnt = scnprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rapp_cnt);
- else if (kind == readRef)
- cnt = scnprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rref_cnt);
- else if (kind == InjErrNPortID)
- cnt = scnprintf(cbuf, 32, "0x%06x\n",
+ switch (kind) {
+ case writeGuard:
+ cnt = scnprintf(cbuf, sizeof(cbuf), "%u\n",
+ phba->lpfc_injerr_wgrd_cnt);
+ break;
+ case writeApp:
+ cnt = scnprintf(cbuf, sizeof(cbuf), "%u\n",
+ phba->lpfc_injerr_wapp_cnt);
+ break;
+ case writeRef:
+ cnt = scnprintf(cbuf, sizeof(cbuf), "%u\n",
+ phba->lpfc_injerr_wref_cnt);
+ break;
+ case readGuard:
+ cnt = scnprintf(cbuf, sizeof(cbuf), "%u\n",
+ phba->lpfc_injerr_rgrd_cnt);
+ break;
+ case readApp:
+ cnt = scnprintf(cbuf, sizeof(cbuf), "%u\n",
+ phba->lpfc_injerr_rapp_cnt);
+ break;
+ case readRef:
+ cnt = scnprintf(cbuf, sizeof(cbuf), "%u\n",
+ phba->lpfc_injerr_rref_cnt);
+ break;
+ case InjErrNPortID:
+ cnt = scnprintf(cbuf, sizeof(cbuf), "0x%06x\n",
phba->lpfc_injerr_nportid);
- else if (kind == InjErrWWPN) {
- memcpy(&tmp, &phba->lpfc_injerr_wwpn, sizeof(struct lpfc_name));
- tmp = cpu_to_be64(tmp);
- cnt = scnprintf(cbuf, 32, "0x%016llx\n", tmp);
- } else if (kind == InjErrLBA) {
- if (phba->lpfc_injerr_lba == (sector_t)(-1))
- cnt = scnprintf(cbuf, 32, "off\n");
+ break;
+ case InjErrWWPN:
+ cnt = scnprintf(cbuf, sizeof(cbuf), "0x%016llx\n",
+ be64_to_cpu(phba->lpfc_injerr_wwpn.u.wwn_be));
+ break;
+ case InjErrLBA:
+ if (phba->lpfc_injerr_lba == LPFC_INJERR_LBA_OFF)
+ cnt = scnprintf(cbuf, sizeof(cbuf), "off\n");
else
- cnt = scnprintf(cbuf, 32, "0x%llx\n",
- (uint64_t) phba->lpfc_injerr_lba);
- } else
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0547 Unknown debugfs error injection entry\n");
+ cnt = scnprintf(cbuf, sizeof(cbuf), "0x%llx\n",
+ (uint64_t)phba->lpfc_injerr_lba);
+ break;
+ default:
+ lpfc_log_msg(phba, KERN_WARNING, LOG_INIT,
+ "0547 Unknown debugfs error injection entry\n");
+ break;
+ }
return simple_read_from_buffer(buf, nbytes, ppos, &cbuf, cnt);
}
static ssize_t
lpfc_debugfs_dif_err_write(struct file *file, const char __user *buf,
- size_t nbytes, loff_t *ppos)
+ size_t nbytes, loff_t *ppos)
{
struct lpfc_hba *phba = file->private_data;
int kind = debugfs_get_aux_num(file);
- char dstbuf[33];
- uint64_t tmp = 0;
- int size;
+ char dstbuf[33] = {0};
+ unsigned long long tmp;
+ unsigned long size;
- memset(dstbuf, 0, 33);
- size = (nbytes < 32) ? nbytes : 32;
+ size = (nbytes < (sizeof(dstbuf) - 1)) ? nbytes : (sizeof(dstbuf) - 1);
if (copy_from_user(dstbuf, buf, size))
return -EFAULT;
- if (kind == InjErrLBA) {
- if ((dstbuf[0] == 'o') && (dstbuf[1] == 'f') &&
- (dstbuf[2] == 'f'))
- tmp = (uint64_t)(-1);
+ if (kstrtoull(dstbuf, 0, &tmp)) {
+ if (kind != InjErrLBA || !strstr(dstbuf, "off"))
+ return -EINVAL;
}
- if ((tmp == 0) && (kstrtoull(dstbuf, 0, &tmp)))
- return -EINVAL;
-
- if (kind == writeGuard)
+ switch (kind) {
+ case writeGuard:
phba->lpfc_injerr_wgrd_cnt = (uint32_t)tmp;
- else if (kind == writeApp)
+ break;
+ case writeApp:
phba->lpfc_injerr_wapp_cnt = (uint32_t)tmp;
- else if (kind == writeRef)
+ break;
+ case writeRef:
phba->lpfc_injerr_wref_cnt = (uint32_t)tmp;
- else if (kind == readGuard)
+ break;
+ case readGuard:
phba->lpfc_injerr_rgrd_cnt = (uint32_t)tmp;
- else if (kind == readApp)
+ break;
+ case readApp:
phba->lpfc_injerr_rapp_cnt = (uint32_t)tmp;
- else if (kind == readRef)
+ break;
+ case readRef:
phba->lpfc_injerr_rref_cnt = (uint32_t)tmp;
- else if (kind == InjErrLBA)
- phba->lpfc_injerr_lba = (sector_t)tmp;
- else if (kind == InjErrNPortID)
+ break;
+ case InjErrLBA:
+ if (strstr(dstbuf, "off"))
+ phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
+ else
+ phba->lpfc_injerr_lba = (sector_t)tmp;
+ break;
+ case InjErrNPortID:
phba->lpfc_injerr_nportid = (uint32_t)(tmp & Mask_DID);
- else if (kind == InjErrWWPN) {
- tmp = cpu_to_be64(tmp);
- memcpy(&phba->lpfc_injerr_wwpn, &tmp, sizeof(struct lpfc_name));
- } else
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0548 Unknown debugfs error injection entry\n");
-
+ break;
+ case InjErrWWPN:
+ phba->lpfc_injerr_wwpn.u.wwn_be = cpu_to_be64(tmp);
+ break;
+ default:
+ lpfc_log_msg(phba, KERN_WARNING, LOG_INIT,
+ "0548 Unknown debugfs error injection entry\n");
+ break;
+ }
return nbytes;
}
@@ -5728,7 +5752,7 @@ static const struct file_operations lpfc_debugfs_op_slow_ring_trc = {
};
static struct dentry *lpfc_debugfs_root = NULL;
-static atomic_t lpfc_debugfs_hba_count;
+static unsigned int lpfc_debugfs_hba_count;
/*
* File operations for the iDiag debugfs
@@ -6050,7 +6074,12 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
/* Setup lpfc root directory */
if (!lpfc_debugfs_root) {
lpfc_debugfs_root = debugfs_create_dir("lpfc", NULL);
- atomic_set(&lpfc_debugfs_hba_count, 0);
+ lpfc_debugfs_hba_count = 0;
+ if (IS_ERR(lpfc_debugfs_root)) {
+ lpfc_vlog_msg(vport, KERN_WARNING, LOG_INIT,
+ "0527 Cannot create debugfs lpfc\n");
+ return;
+ }
}
if (!lpfc_debugfs_start_time)
lpfc_debugfs_start_time = jiffies;
@@ -6061,150 +6090,96 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
pport_setup = true;
phba->hba_debugfs_root =
debugfs_create_dir(name, lpfc_debugfs_root);
- atomic_inc(&lpfc_debugfs_hba_count);
- atomic_set(&phba->debugfs_vport_count, 0);
+ phba->debugfs_vport_count = 0;
+ if (IS_ERR(phba->hba_debugfs_root)) {
+ lpfc_vlog_msg(vport, KERN_WARNING, LOG_INIT,
+ "0528 Cannot create debugfs %s\n", name);
+ return;
+ }
+ lpfc_debugfs_hba_count++;
/* Multi-XRI pools */
- snprintf(name, sizeof(name), "multixripools");
- phba->debug_multixri_pools =
- debugfs_create_file(name, S_IFREG | 0644,
- phba->hba_debugfs_root,
- phba,
- &lpfc_debugfs_op_multixripools);
- if (IS_ERR(phba->debug_multixri_pools)) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0527 Cannot create debugfs multixripools\n");
- goto debug_failed;
- }
+ debugfs_create_file("multixripools", 0644,
+ phba->hba_debugfs_root, phba,
+ &lpfc_debugfs_op_multixripools);
/* Congestion Info Buffer */
- scnprintf(name, sizeof(name), "cgn_buffer");
- phba->debug_cgn_buffer =
- debugfs_create_file(name, S_IFREG | 0644,
- phba->hba_debugfs_root,
- phba, &lpfc_cgn_buffer_op);
- if (IS_ERR(phba->debug_cgn_buffer)) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "6527 Cannot create debugfs "
- "cgn_buffer\n");
- goto debug_failed;
- }
+ debugfs_create_file("cgn_buffer", 0644, phba->hba_debugfs_root,
+ phba, &lpfc_cgn_buffer_op);
/* RX Monitor */
- scnprintf(name, sizeof(name), "rx_monitor");
- phba->debug_rx_monitor =
- debugfs_create_file(name, S_IFREG | 0644,
- phba->hba_debugfs_root,
- phba, &lpfc_rx_monitor_op);
- if (IS_ERR(phba->debug_rx_monitor)) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "6528 Cannot create debugfs "
- "rx_monitor\n");
- goto debug_failed;
- }
+ debugfs_create_file("rx_monitor", 0644, phba->hba_debugfs_root,
+ phba, &lpfc_rx_monitor_op);
/* RAS log */
- snprintf(name, sizeof(name), "ras_log");
- phba->debug_ras_log =
- debugfs_create_file(name, 0644,
- phba->hba_debugfs_root,
- phba, &lpfc_debugfs_ras_log);
- if (IS_ERR(phba->debug_ras_log)) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "6148 Cannot create debugfs"
- " ras_log\n");
- goto debug_failed;
- }
+ debugfs_create_file("ras_log", 0644, phba->hba_debugfs_root,
+ phba, &lpfc_debugfs_ras_log);
/* Setup hbqinfo */
- snprintf(name, sizeof(name), "hbqinfo");
- phba->debug_hbqinfo =
- debugfs_create_file(name, S_IFREG | 0644,
- phba->hba_debugfs_root,
- phba, &lpfc_debugfs_op_hbqinfo);
+ debugfs_create_file("hbqinfo", 0644, phba->hba_debugfs_root,
+ phba, &lpfc_debugfs_op_hbqinfo);
#ifdef LPFC_HDWQ_LOCK_STAT
/* Setup lockstat */
- snprintf(name, sizeof(name), "lockstat");
- phba->debug_lockstat =
- debugfs_create_file(name, S_IFREG | 0644,
- phba->hba_debugfs_root,
- phba, &lpfc_debugfs_op_lockstat);
- if (IS_ERR(phba->debug_lockstat)) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "4610 Can't create debugfs lockstat\n");
- goto debug_failed;
- }
+ debugfs_create_file("lockstat", 0644, phba->hba_debugfs_root,
+ phba, &lpfc_debugfs_op_lockstat);
#endif
-
- /* Setup dumpHBASlim */
if (phba->sli_rev < LPFC_SLI_REV4) {
- snprintf(name, sizeof(name), "dumpHBASlim");
- phba->debug_dumpHBASlim =
- debugfs_create_file(name,
- S_IFREG|S_IRUGO|S_IWUSR,
- phba->hba_debugfs_root,
- phba, &lpfc_debugfs_op_dumpHBASlim);
- } else
- phba->debug_dumpHBASlim = NULL;
+ /* Setup dumpHBASlim */
+ debugfs_create_file("dumpHBASlim", 0644,
+ phba->hba_debugfs_root, phba,
+ &lpfc_debugfs_op_dumpHBASlim);
+ }
- /* Setup dumpHostSlim */
if (phba->sli_rev < LPFC_SLI_REV4) {
- snprintf(name, sizeof(name), "dumpHostSlim");
- phba->debug_dumpHostSlim =
- debugfs_create_file(name,
- S_IFREG|S_IRUGO|S_IWUSR,
- phba->hba_debugfs_root,
- phba, &lpfc_debugfs_op_dumpHostSlim);
- } else
- phba->debug_dumpHostSlim = NULL;
+ /* Setup dumpHostSlim */
+ debugfs_create_file("dumpHostSlim", 0644,
+ phba->hba_debugfs_root, phba,
+ &lpfc_debugfs_op_dumpHostSlim);
+ }
/* Setup DIF Error Injections */
- phba->debug_InjErrLBA =
- debugfs_create_file_aux_num("InjErrLBA", 0644,
- phba->hba_debugfs_root,
- phba, InjErrLBA, &lpfc_debugfs_op_dif_err);
+ debugfs_create_file_aux_num("InjErrLBA", 0644,
+ phba->hba_debugfs_root, phba,
+ InjErrLBA,
+ &lpfc_debugfs_op_dif_err);
phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
- phba->debug_InjErrNPortID =
- debugfs_create_file_aux_num("InjErrNPortID", 0644,
- phba->hba_debugfs_root,
- phba, InjErrNPortID, &lpfc_debugfs_op_dif_err);
-
- phba->debug_InjErrWWPN =
- debugfs_create_file_aux_num("InjErrWWPN", 0644,
- phba->hba_debugfs_root,
- phba, InjErrWWPN, &lpfc_debugfs_op_dif_err);
-
- phba->debug_writeGuard =
- debugfs_create_file_aux_num("writeGuardInjErr", 0644,
- phba->hba_debugfs_root,
- phba, writeGuard, &lpfc_debugfs_op_dif_err);
-
- phba->debug_writeApp =
- debugfs_create_file_aux_num("writeAppInjErr", 0644,
- phba->hba_debugfs_root,
- phba, writeApp, &lpfc_debugfs_op_dif_err);
-
- phba->debug_writeRef =
- debugfs_create_file_aux_num("writeRefInjErr", 0644,
- phba->hba_debugfs_root,
- phba, writeRef, &lpfc_debugfs_op_dif_err);
-
- phba->debug_readGuard =
- debugfs_create_file_aux_num("readGuardInjErr", 0644,
- phba->hba_debugfs_root,
- phba, readGuard, &lpfc_debugfs_op_dif_err);
-
- phba->debug_readApp =
- debugfs_create_file_aux_num("readAppInjErr", 0644,
- phba->hba_debugfs_root,
- phba, readApp, &lpfc_debugfs_op_dif_err);
-
- phba->debug_readRef =
- debugfs_create_file_aux_num("readRefInjErr", 0644,
- phba->hba_debugfs_root,
- phba, readRef, &lpfc_debugfs_op_dif_err);
+ debugfs_create_file_aux_num("InjErrNPortID", 0644,
+ phba->hba_debugfs_root, phba,
+ InjErrNPortID,
+ &lpfc_debugfs_op_dif_err);
+
+ debugfs_create_file_aux_num("InjErrWWPN", 0644,
+ phba->hba_debugfs_root, phba,
+ InjErrWWPN,
+ &lpfc_debugfs_op_dif_err);
+
+ debugfs_create_file_aux_num("writeGuardInjErr", 0644,
+ phba->hba_debugfs_root, phba,
+ writeGuard,
+ &lpfc_debugfs_op_dif_err);
+
+ debugfs_create_file_aux_num("writeAppInjErr", 0644,
+ phba->hba_debugfs_root, phba,
+ writeApp, &lpfc_debugfs_op_dif_err);
+
+ debugfs_create_file_aux_num("writeRefInjErr", 0644,
+ phba->hba_debugfs_root, phba,
+ writeRef, &lpfc_debugfs_op_dif_err);
+
+ debugfs_create_file_aux_num("readGuardInjErr", 0644,
+ phba->hba_debugfs_root, phba,
+ readGuard,
+ &lpfc_debugfs_op_dif_err);
+
+ debugfs_create_file_aux_num("readAppInjErr", 0644,
+ phba->hba_debugfs_root, phba,
+ readApp, &lpfc_debugfs_op_dif_err);
+
+ debugfs_create_file_aux_num("readRefInjErr", 0644,
+ phba->hba_debugfs_root, phba,
+ readRef, &lpfc_debugfs_op_dif_err);
/* Setup slow ring trace */
if (lpfc_debugfs_max_slow_ring_trc) {
@@ -6224,11 +6199,9 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
}
}
- snprintf(name, sizeof(name), "slow_ring_trace");
- phba->debug_slow_ring_trc =
- debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
- phba->hba_debugfs_root,
- phba, &lpfc_debugfs_op_slow_ring_trc);
+ debugfs_create_file("slow_ring_trace", 0644,
+ phba->hba_debugfs_root, phba,
+ &lpfc_debugfs_op_slow_ring_trc);
if (!phba->slow_ring_trc) {
phba->slow_ring_trc = kcalloc(
lpfc_debugfs_max_slow_ring_trc,
@@ -6238,16 +6211,13 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"0416 Cannot create debugfs "
"slow_ring buffer\n");
- goto debug_failed;
+ goto out;
}
atomic_set(&phba->slow_ring_trc_cnt, 0);
}
- snprintf(name, sizeof(name), "nvmeio_trc");
- phba->debug_nvmeio_trc =
- debugfs_create_file(name, 0644,
- phba->hba_debugfs_root,
- phba, &lpfc_debugfs_op_nvmeio_trc);
+ debugfs_create_file("nvmeio_trc", 0644, phba->hba_debugfs_root,
+ phba, &lpfc_debugfs_op_nvmeio_trc);
atomic_set(&phba->nvmeio_trc_cnt, 0);
if (lpfc_debugfs_max_nvmeio_trc) {
@@ -6280,7 +6250,6 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
}
phba->nvmeio_trc_on = 1;
phba->nvmeio_trc_output_idx = 0;
- phba->nvmeio_trc = NULL;
} else {
nvmeio_off:
phba->nvmeio_trc_size = 0;
@@ -6294,7 +6263,12 @@ nvmeio_off:
if (!vport->vport_debugfs_root) {
vport->vport_debugfs_root =
debugfs_create_dir(name, phba->hba_debugfs_root);
- atomic_inc(&phba->debugfs_vport_count);
+ if (IS_ERR(vport->vport_debugfs_root)) {
+ lpfc_vlog_msg(vport, KERN_WARNING, LOG_INIT,
+ "0529 Cannot create debugfs %s\n", name);
+ return;
+ }
+ phba->debugfs_vport_count++;
}
if (lpfc_debugfs_max_disc_trc) {
@@ -6321,54 +6295,27 @@ nvmeio_off:
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"0418 Cannot create debugfs disc trace "
"buffer\n");
- goto debug_failed;
+ goto out;
}
atomic_set(&vport->disc_trc_cnt, 0);
- snprintf(name, sizeof(name), "discovery_trace");
- vport->debug_disc_trc =
- debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
- vport->vport_debugfs_root,
- vport, &lpfc_debugfs_op_disc_trc);
- snprintf(name, sizeof(name), "nodelist");
- vport->debug_nodelist =
- debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
- vport->vport_debugfs_root,
- vport, &lpfc_debugfs_op_nodelist);
-
- snprintf(name, sizeof(name), "nvmestat");
- vport->debug_nvmestat =
- debugfs_create_file(name, 0644,
- vport->vport_debugfs_root,
- vport, &lpfc_debugfs_op_nvmestat);
-
- snprintf(name, sizeof(name), "scsistat");
- vport->debug_scsistat =
- debugfs_create_file(name, 0644,
- vport->vport_debugfs_root,
- vport, &lpfc_debugfs_op_scsistat);
- if (IS_ERR(vport->debug_scsistat)) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "4611 Cannot create debugfs scsistat\n");
- goto debug_failed;
- }
+ debugfs_create_file("discovery_trace", 0644, vport->vport_debugfs_root,
+ vport, &lpfc_debugfs_op_disc_trc);
- snprintf(name, sizeof(name), "ioktime");
- vport->debug_ioktime =
- debugfs_create_file(name, 0644,
- vport->vport_debugfs_root,
- vport, &lpfc_debugfs_op_ioktime);
- if (IS_ERR(vport->debug_ioktime)) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0815 Cannot create debugfs ioktime\n");
- goto debug_failed;
- }
+ debugfs_create_file("nodelist", 0644, vport->vport_debugfs_root, vport,
+ &lpfc_debugfs_op_nodelist);
+
+ debugfs_create_file("nvmestat", 0644, vport->vport_debugfs_root, vport,
+ &lpfc_debugfs_op_nvmestat);
- snprintf(name, sizeof(name), "hdwqstat");
- vport->debug_hdwqstat =
- debugfs_create_file(name, 0644,
- vport->vport_debugfs_root,
- vport, &lpfc_debugfs_op_hdwqstat);
+ debugfs_create_file("scsistat", 0644, vport->vport_debugfs_root, vport,
+ &lpfc_debugfs_op_scsistat);
+
+ debugfs_create_file("ioktime", 0644, vport->vport_debugfs_root, vport,
+ &lpfc_debugfs_op_ioktime);
+
+ debugfs_create_file("hdwqstat", 0644, vport->vport_debugfs_root, vport,
+ &lpfc_debugfs_op_hdwqstat);
/*
* The following section is for additional directories/files for the
@@ -6376,93 +6323,58 @@ nvmeio_off:
*/
if (!pport_setup)
- goto debug_failed;
+ return;
/*
* iDiag debugfs root entry points for SLI4 device only
*/
if (phba->sli_rev < LPFC_SLI_REV4)
- goto debug_failed;
+ return;
- snprintf(name, sizeof(name), "iDiag");
if (!phba->idiag_root) {
phba->idiag_root =
- debugfs_create_dir(name, phba->hba_debugfs_root);
+ debugfs_create_dir("iDiag", phba->hba_debugfs_root);
/* Initialize iDiag data structure */
memset(&idiag, 0, sizeof(idiag));
}
/* iDiag read PCI config space */
- snprintf(name, sizeof(name), "pciCfg");
- if (!phba->idiag_pci_cfg) {
- phba->idiag_pci_cfg =
- debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
- phba->idiag_root, phba, &lpfc_idiag_op_pciCfg);
- idiag.offset.last_rd = 0;
- }
+ debugfs_create_file("pciCfg", 0644, phba->idiag_root, phba,
+ &lpfc_idiag_op_pciCfg);
+ idiag.offset.last_rd = 0;
/* iDiag PCI BAR access */
- snprintf(name, sizeof(name), "barAcc");
- if (!phba->idiag_bar_acc) {
- phba->idiag_bar_acc =
- debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
- phba->idiag_root, phba, &lpfc_idiag_op_barAcc);
- idiag.offset.last_rd = 0;
- }
+ debugfs_create_file("barAcc", 0644, phba->idiag_root, phba,
+ &lpfc_idiag_op_barAcc);
+ idiag.offset.last_rd = 0;
/* iDiag get PCI function queue information */
- snprintf(name, sizeof(name), "queInfo");
- if (!phba->idiag_que_info) {
- phba->idiag_que_info =
- debugfs_create_file(name, S_IFREG|S_IRUGO,
- phba->idiag_root, phba, &lpfc_idiag_op_queInfo);
- }
+ debugfs_create_file("queInfo", 0444, phba->idiag_root, phba,
+ &lpfc_idiag_op_queInfo);
/* iDiag access PCI function queue */
- snprintf(name, sizeof(name), "queAcc");
- if (!phba->idiag_que_acc) {
- phba->idiag_que_acc =
- debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
- phba->idiag_root, phba, &lpfc_idiag_op_queAcc);
- }
+ debugfs_create_file("queAcc", 0644, phba->idiag_root, phba,
+ &lpfc_idiag_op_queAcc);
/* iDiag access PCI function doorbell registers */
- snprintf(name, sizeof(name), "drbAcc");
- if (!phba->idiag_drb_acc) {
- phba->idiag_drb_acc =
- debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
- phba->idiag_root, phba, &lpfc_idiag_op_drbAcc);
- }
+ debugfs_create_file("drbAcc", 0644, phba->idiag_root, phba,
+ &lpfc_idiag_op_drbAcc);
/* iDiag access PCI function control registers */
- snprintf(name, sizeof(name), "ctlAcc");
- if (!phba->idiag_ctl_acc) {
- phba->idiag_ctl_acc =
- debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
- phba->idiag_root, phba, &lpfc_idiag_op_ctlAcc);
- }
+ debugfs_create_file("ctlAcc", 0644, phba->idiag_root, phba,
+ &lpfc_idiag_op_ctlAcc);
/* iDiag access mbox commands */
- snprintf(name, sizeof(name), "mbxAcc");
- if (!phba->idiag_mbx_acc) {
- phba->idiag_mbx_acc =
- debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
- phba->idiag_root, phba, &lpfc_idiag_op_mbxAcc);
- }
+ debugfs_create_file("mbxAcc", 0644, phba->idiag_root, phba,
+ &lpfc_idiag_op_mbxAcc);
/* iDiag extents access commands */
if (phba->sli4_hba.extents_in_use) {
- snprintf(name, sizeof(name), "extAcc");
- if (!phba->idiag_ext_acc) {
- phba->idiag_ext_acc =
- debugfs_create_file(name,
- S_IFREG|S_IRUGO|S_IWUSR,
- phba->idiag_root, phba,
- &lpfc_idiag_op_extAcc);
- }
+ debugfs_create_file("extAcc", 0644, phba->idiag_root, phba,
+ &lpfc_idiag_op_extAcc);
}
-
-debug_failed:
+out:
+ /* alloc'ed items are kfree'd in lpfc_debugfs_terminate */
return;
#endif
}
@@ -6487,145 +6399,26 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
kfree(vport->disc_trc);
vport->disc_trc = NULL;
- debugfs_remove(vport->debug_disc_trc); /* discovery_trace */
- vport->debug_disc_trc = NULL;
-
- debugfs_remove(vport->debug_nodelist); /* nodelist */
- vport->debug_nodelist = NULL;
-
- debugfs_remove(vport->debug_nvmestat); /* nvmestat */
- vport->debug_nvmestat = NULL;
-
- debugfs_remove(vport->debug_scsistat); /* scsistat */
- vport->debug_scsistat = NULL;
-
- debugfs_remove(vport->debug_ioktime); /* ioktime */
- vport->debug_ioktime = NULL;
-
- debugfs_remove(vport->debug_hdwqstat); /* hdwqstat */
- vport->debug_hdwqstat = NULL;
-
if (vport->vport_debugfs_root) {
debugfs_remove(vport->vport_debugfs_root); /* vportX */
vport->vport_debugfs_root = NULL;
- atomic_dec(&phba->debugfs_vport_count);
+ phba->debugfs_vport_count--;
}
- if (atomic_read(&phba->debugfs_vport_count) == 0) {
-
- debugfs_remove(phba->debug_multixri_pools); /* multixripools*/
- phba->debug_multixri_pools = NULL;
-
- debugfs_remove(phba->debug_hbqinfo); /* hbqinfo */
- phba->debug_hbqinfo = NULL;
-
- debugfs_remove(phba->debug_cgn_buffer);
- phba->debug_cgn_buffer = NULL;
-
- debugfs_remove(phba->debug_rx_monitor);
- phba->debug_rx_monitor = NULL;
-
- debugfs_remove(phba->debug_ras_log);
- phba->debug_ras_log = NULL;
-
-#ifdef LPFC_HDWQ_LOCK_STAT
- debugfs_remove(phba->debug_lockstat); /* lockstat */
- phba->debug_lockstat = NULL;
-#endif
- debugfs_remove(phba->debug_dumpHBASlim); /* HBASlim */
- phba->debug_dumpHBASlim = NULL;
-
- debugfs_remove(phba->debug_dumpHostSlim); /* HostSlim */
- phba->debug_dumpHostSlim = NULL;
-
- debugfs_remove(phba->debug_InjErrLBA); /* InjErrLBA */
- phba->debug_InjErrLBA = NULL;
-
- debugfs_remove(phba->debug_InjErrNPortID);
- phba->debug_InjErrNPortID = NULL;
-
- debugfs_remove(phba->debug_InjErrWWPN); /* InjErrWWPN */
- phba->debug_InjErrWWPN = NULL;
-
- debugfs_remove(phba->debug_writeGuard); /* writeGuard */
- phba->debug_writeGuard = NULL;
-
- debugfs_remove(phba->debug_writeApp); /* writeApp */
- phba->debug_writeApp = NULL;
-
- debugfs_remove(phba->debug_writeRef); /* writeRef */
- phba->debug_writeRef = NULL;
-
- debugfs_remove(phba->debug_readGuard); /* readGuard */
- phba->debug_readGuard = NULL;
-
- debugfs_remove(phba->debug_readApp); /* readApp */
- phba->debug_readApp = NULL;
-
- debugfs_remove(phba->debug_readRef); /* readRef */
- phba->debug_readRef = NULL;
-
+ if (!phba->debugfs_vport_count) {
kfree(phba->slow_ring_trc);
phba->slow_ring_trc = NULL;
- /* slow_ring_trace */
- debugfs_remove(phba->debug_slow_ring_trc);
- phba->debug_slow_ring_trc = NULL;
-
- debugfs_remove(phba->debug_nvmeio_trc);
- phba->debug_nvmeio_trc = NULL;
-
kfree(phba->nvmeio_trc);
phba->nvmeio_trc = NULL;
- /*
- * iDiag release
- */
- if (phba->sli_rev == LPFC_SLI_REV4) {
- /* iDiag extAcc */
- debugfs_remove(phba->idiag_ext_acc);
- phba->idiag_ext_acc = NULL;
-
- /* iDiag mbxAcc */
- debugfs_remove(phba->idiag_mbx_acc);
- phba->idiag_mbx_acc = NULL;
-
- /* iDiag ctlAcc */
- debugfs_remove(phba->idiag_ctl_acc);
- phba->idiag_ctl_acc = NULL;
-
- /* iDiag drbAcc */
- debugfs_remove(phba->idiag_drb_acc);
- phba->idiag_drb_acc = NULL;
-
- /* iDiag queAcc */
- debugfs_remove(phba->idiag_que_acc);
- phba->idiag_que_acc = NULL;
-
- /* iDiag queInfo */
- debugfs_remove(phba->idiag_que_info);
- phba->idiag_que_info = NULL;
-
- /* iDiag barAcc */
- debugfs_remove(phba->idiag_bar_acc);
- phba->idiag_bar_acc = NULL;
-
- /* iDiag pciCfg */
- debugfs_remove(phba->idiag_pci_cfg);
- phba->idiag_pci_cfg = NULL;
-
- /* Finally remove the iDiag debugfs root */
- debugfs_remove(phba->idiag_root);
- phba->idiag_root = NULL;
- }
-
if (phba->hba_debugfs_root) {
debugfs_remove(phba->hba_debugfs_root); /* fnX */
phba->hba_debugfs_root = NULL;
- atomic_dec(&lpfc_debugfs_hba_count);
+ lpfc_debugfs_hba_count--;
}
- if (atomic_read(&lpfc_debugfs_hba_count) == 0) {
+ if (!lpfc_debugfs_hba_count) {
debugfs_remove(lpfc_debugfs_root); /* lpfc */
lpfc_debugfs_root = NULL;
}
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
index f319f3af0400..a1464f8ac331 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.h
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2007-2011 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -44,6 +44,9 @@
/* hbqinfo output buffer size */
#define LPFC_HBQINFO_SIZE 8192
+/* hdwqinfo output buffer size */
+#define LPFC_HDWQINFO_SIZE 8192
+
/* nvmestat output buffer size */
#define LPFC_NVMESTAT_SIZE 8192
#define LPFC_IOKTIME_SIZE 8192
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index fca81e0c7c2e..b71db7d7d747 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -3762,7 +3762,7 @@ lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry)
memset(prdf, 0, cmdsize);
prdf->rdf.fpin_cmd = ELS_RDF;
prdf->rdf.desc_len = cpu_to_be32(sizeof(struct lpfc_els_rdf_req) -
- sizeof(struct fc_els_rdf));
+ sizeof(struct fc_els_rdf_hdr));
prdf->reg_d1.reg_desc.desc_tag = cpu_to_be32(ELS_DTAG_FPIN_REGISTER);
prdf->reg_d1.reg_desc.desc_len = cpu_to_be32(
FC_TLV_DESC_LENGTH_FROM_SZ(prdf->reg_d1));
@@ -5339,12 +5339,12 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
ulp_status, ulp_word4, did);
/* ELS response tag <ulpIoTag> completes */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
- "0110 ELS response tag x%x completes "
+ "0110 ELS response tag x%x completes fc_flag x%lx"
"Data: x%x x%x x%x x%x x%lx x%x x%x x%x %p %p\n",
- iotag, ulp_status, ulp_word4, tmo,
+ iotag, vport->fc_flag, ulp_status, ulp_word4, tmo,
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
ndlp->nlp_rpi, kref_read(&ndlp->kref), mbox, ndlp);
- if (mbox) {
+ if (mbox && !test_bit(FC_PT2PT, &vport->fc_flag)) {
if (ulp_status == 0 &&
test_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag)) {
if (!lpfc_unreg_rpi(vport, ndlp) &&
@@ -5403,6 +5403,10 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
out_free_mbox:
lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
+ } else if (mbox && test_bit(FC_PT2PT, &vport->fc_flag) &&
+ test_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag)) {
+ lpfc_mbx_cmpl_reg_login(phba, mbox);
+ clear_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag);
}
out:
if (ndlp && shost) {
@@ -11259,6 +11263,11 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS,
"0126 FDISC cmpl status: x%x/x%x)\n",
ulp_status, ulp_word4);
+
+ /* drop initial reference */
+ if (!test_and_set_bit(NLP_DROPPED, &ndlp->nlp_flag))
+ lpfc_nlp_put(ndlp);
+
goto fdisc_failed;
}
@@ -12008,7 +12017,11 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
sglq_entry->state = SGL_FREED;
spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock,
iflag);
-
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_SLI |
+ LOG_DISCOVERY | LOG_NODE,
+ "0732 ELS XRI ABORT on Node: ndlp=x%px "
+ "xri=x%x\n",
+ ndlp, xri);
if (ndlp) {
lpfc_set_rrq_active(phba, ndlp,
sglq_entry->sli4_lxritag,
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 32298285ea5e..3bc0efa7453e 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -366,6 +366,7 @@ struct lpfc_name {
} s;
uint8_t wwn[8];
uint64_t name __packed __aligned(4);
+ __be64 wwn_be __packed __aligned(4);
} u;
};
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index bc709786e6af..a7f7ed86d2b0 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -4909,18 +4909,18 @@ struct send_frame_wqe {
#define ELS_RDF_REG_TAG_CNT 4
struct lpfc_els_rdf_reg_desc {
- struct fc_df_desc_fpin_reg reg_desc; /* descriptor header */
+ struct fc_df_desc_fpin_reg_hdr reg_desc; /* descriptor header */
__be32 desc_tags[ELS_RDF_REG_TAG_CNT];
/* tags in reg_desc */
};
struct lpfc_els_rdf_req {
- struct fc_els_rdf rdf; /* hdr up to descriptors */
+ struct fc_els_rdf_hdr rdf; /* hdr up to descriptors */
struct lpfc_els_rdf_reg_desc reg_d1; /* 1st descriptor */
};
struct lpfc_els_rdf_rsp {
- struct fc_els_rdf_resp rdf_resp; /* hdr up to descriptors */
+ struct fc_els_rdf_resp_hdr rdf_resp; /* hdr up to descriptors */
struct lpfc_els_rdf_reg_desc reg_d1; /* 1st descriptor */
};
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 4081d2a358ee..f206267d9ecd 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -3057,13 +3057,6 @@ lpfc_cleanup(struct lpfc_vport *vport)
lpfc_vmid_vport_cleanup(vport);
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
- if (vport->port_type != LPFC_PHYSICAL_PORT &&
- ndlp->nlp_DID == Fabric_DID) {
- /* Just free up ndlp with Fabric_DID for vports */
- lpfc_nlp_put(ndlp);
- continue;
- }
-
if (ndlp->nlp_DID == Fabric_Cntl_DID &&
ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
lpfc_nlp_put(ndlp);
@@ -8300,10 +8293,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt,
phba->cfg_nvme_seg_cnt);
- if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE)
- i = phba->cfg_sg_dma_buf_size;
- else
- i = SLI4_PAGE_SIZE;
+ i = min(phba->cfg_sg_dma_buf_size, SLI4_PAGE_SIZE);
phba->lpfc_sg_dma_buf_pool =
dma_pool_create("lpfc_sg_dma_buf_pool",
@@ -14377,7 +14367,7 @@ lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
* as desired.
*
* Return codes
- * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
+ * PCI_ERS_RESULT_CAN_RECOVER - can be recovered without reset
* PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
* PCI_ERS_RESULT_DISCONNECT - device could not be recovered
**/
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index a596b80d03d4..1e5ef93e67e3 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -326,8 +326,14 @@ lpfc_defer_plogi_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *login_mbox)
/* Now that REG_RPI completed successfully,
* we can now proceed with sending the PLOGI ACC.
*/
- rc = lpfc_els_rsp_acc(login_mbox->vport, ELS_CMD_PLOGI,
- save_iocb, ndlp, NULL);
+ if (test_bit(FC_PT2PT, &ndlp->vport->fc_flag)) {
+ rc = lpfc_els_rsp_acc(login_mbox->vport, ELS_CMD_PLOGI,
+ save_iocb, ndlp, login_mbox);
+ } else {
+ rc = lpfc_els_rsp_acc(login_mbox->vport, ELS_CMD_PLOGI,
+ save_iocb, ndlp, NULL);
+ }
+
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"4576 PLOGI ACC fails pt2pt discovery: "
@@ -335,9 +341,16 @@ lpfc_defer_plogi_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *login_mbox)
}
}
- /* Now process the REG_RPI cmpl */
- lpfc_mbx_cmpl_reg_login(phba, login_mbox);
- clear_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag);
+ /* If this is a fabric topology, complete the reg_rpi and prli now.
+ * For Pt2Pt, the reg_rpi and PRLI are deferred until after the LS_ACC
+ * completes. This ensures, in Pt2Pt, that the PLOGI LS_ACC is sent
+ * before the PRLI.
+ */
+ if (!test_bit(FC_PT2PT, &ndlp->vport->fc_flag)) {
+ /* Now process the REG_RPI cmpl */
+ lpfc_mbx_cmpl_reg_login(phba, login_mbox);
+ clear_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag);
+ }
kfree(save_iocb);
}
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index a6647dd360d1..e6f632521cff 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -1234,12 +1234,8 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
if ((phba->cfg_nvme_enable_fb) &&
test_bit(NLP_FIRSTBURST, &pnode->nlp_flag)) {
req_len = lpfc_ncmd->nvmeCmd->payload_length;
- if (req_len < pnode->nvme_fb_size)
- wqe->fcp_iwrite.initial_xfer_len =
- req_len;
- else
- wqe->fcp_iwrite.initial_xfer_len =
- pnode->nvme_fb_size;
+ wqe->fcp_iwrite.initial_xfer_len = min(req_len,
+ pnode->nvme_fb_size);
} else {
wqe->fcp_iwrite.initial_xfer_len = 0;
}
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index fba2e62027b7..4cfc928bcf2d 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -1243,7 +1243,7 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
struct lpfc_nvmet_tgtport *tgtp;
struct lpfc_async_xchg_ctx *ctxp =
container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
- struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
+ struct rqb_dmabuf *nvmebuf;
struct lpfc_hba *phba = ctxp->phba;
unsigned long iflag;
@@ -1251,13 +1251,18 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
ctxp->oxid, ctxp->size, raw_smp_processor_id());
+ spin_lock_irqsave(&ctxp->ctxlock, iflag);
+ nvmebuf = ctxp->rqb_buffer;
if (!nvmebuf) {
+ spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
"6425 Defer rcv: no buffer oxid x%x: "
"flg %x ste %x\n",
ctxp->oxid, ctxp->flag, ctxp->state);
return;
}
+ ctxp->rqb_buffer = NULL;
+ spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
tgtp = phba->targetport->private;
if (tgtp)
@@ -1265,9 +1270,6 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
/* Free the nvmebuf since a new buffer already replaced it */
nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
- spin_lock_irqsave(&ctxp->ctxlock, iflag);
- ctxp->rqb_buffer = NULL;
- spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
}
/**
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 508ceeecf2d9..6d9d8c196936 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -5935,7 +5935,7 @@ lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct fc_rport *rport)
/**
* lpfc_reset_flush_io_context -
* @vport: The virtual port (scsi_host) for the flush context
- * @tgt_id: If aborting by Target contect - specifies the target id
+ * @tgt_id: If aborting by Target context - specifies the target id
* @lun_id: If aborting by Lun context - specifies the lun id
* @context: specifies the context level to flush at.
*
@@ -6109,8 +6109,14 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
spin_unlock_irqrestore(&pnode->lock, flags);
}
- lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
- LPFC_CTX_TGT);
+ status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
+ LPFC_CTX_TGT);
+ if (status != SUCCESS) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+ "0726 Target Reset flush status x%x\n",
+ status);
+ return status;
+ }
return FAST_IO_FAIL;
}
@@ -6202,7 +6208,7 @@ lpfc_host_reset_handler(struct scsi_cmnd *cmnd)
int rc, ret = SUCCESS;
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
- "3172 SCSI layer issued Host Reset Data:\n");
+ "3172 SCSI layer issued Host Reset\n");
lpfc_offline_prep(phba, LPFC_MBX_WAIT);
lpfc_offline(phba);
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index a8fbdf7119d8..7ea7c4245c69 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -8820,7 +8820,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
if (unlikely(rc)) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0381 Error %d during queue setup.\n", rc);
- goto out_stop_timers;
+ goto out_destroy_queue;
}
/* Initialize the driver internal SLI layer lists. */
lpfc_sli4_setup(phba);
@@ -9103,7 +9103,6 @@ out_free_iocblist:
lpfc_free_iocb_list(phba);
out_destroy_queue:
lpfc_sli4_queue_destroy(phba);
-out_stop_timers:
lpfc_stop_hba_timers(phba);
out_free_mbox:
mempool_free(mboxq, phba->mbox_mem_pool);
@@ -12439,19 +12438,11 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
}
/*
- * If we're unloading, don't abort iocb on the ELS ring, but change
- * the callback so that nothing happens when it finishes.
+ * Always abort the outstanding WQE and set the IA bit correctly
+ * for the context. This is necessary for correctly removing
+ * outstanding ndlp reference counts when the CQE completes with
+ * the XB bit set.
*/
- if (test_bit(FC_UNLOADING, &vport->load_flag) &&
- pring->ringno == LPFC_ELS_RING) {
- if (cmdiocb->cmd_flag & LPFC_IO_FABRIC)
- cmdiocb->fabric_cmd_cmpl = lpfc_ignore_els_cmpl;
- else
- cmdiocb->cmd_cmpl = lpfc_ignore_els_cmpl;
- return retval;
- }
-
- /* issue ABTS for this IOCB based on iotag */
abtsiocbp = __lpfc_sli_get_iocbq(phba);
if (abtsiocbp == NULL)
return IOCB_NORESOURCE;
@@ -21373,7 +21364,7 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
struct lpfc_sglq *sglq;
struct lpfc_sli_ring *pring;
unsigned long iflags;
- uint32_t ret = 0;
+ int ret = 0;
/* NVME_LS and NVME_LS ABTS requests. */
if (pwqe->cmd_flag & LPFC_IO_NVME_LS) {
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 9ee3a3a4ec4d..31c3c5abdca6 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "14.4.0.10"
+#define LPFC_DRIVER_VERSION "14.4.0.11"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 2797aa75a689..aff6c9d5e7c2 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -666,7 +666,7 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
* Take early refcount for outstanding I/O requests we schedule during
* delete processing for unreg_vpi. Always keep this before
* scsi_remove_host() as we can no longer obtain a reference through
- * scsi_host_get() after scsi_host_remove as shost is set to SHOST_DEL.
+ * scsi_host_get() after scsi_remove_host as shost is set to SHOST_DEL.
*/
if (!scsi_host_get(shost))
return VPORT_INVAL;
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 2006094af418..a00622c0c526 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -2780,7 +2780,7 @@ static inline void mega_create_proc_entry(int index, struct proc_dir_entry *pare
* Return the disk geometry for a particular disk
*/
static int
-megaraid_biosparam(struct scsi_device *sdev, struct block_device *bdev,
+megaraid_biosparam(struct scsi_device *sdev, struct gendisk *disk,
sector_t capacity, int geom[])
{
adapter_t *adapter;
@@ -2813,7 +2813,7 @@ megaraid_biosparam(struct scsi_device *sdev, struct block_device *bdev,
geom[2] = cylinders;
}
else {
- if (scsi_partsize(bdev, capacity, geom))
+ if (scsi_partsize(disk, capacity, geom))
return 0;
dev_info(&adapter->dev->dev,
diff --git a/drivers/scsi/megaraid.h b/drivers/scsi/megaraid.h
index 013fbfb911b9..d6bfd26a8843 100644
--- a/drivers/scsi/megaraid.h
+++ b/drivers/scsi/megaraid.h
@@ -975,7 +975,7 @@ static void mega_free_scb(adapter_t *, scb_t *);
static int megaraid_abort(struct scsi_cmnd *);
static int megaraid_reset(struct scsi_cmnd *);
static int megaraid_abort_and_reset(adapter_t *, struct scsi_cmnd *, int);
-static int megaraid_biosparam(struct scsi_device *, struct block_device *,
+static int megaraid_biosparam(struct scsi_device *, struct gendisk *,
sector_t, int []);
static int mega_build_sglist (adapter_t *adapter, scb_t *scb,
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 615e06fd4ee8..abbbc4b36cd1 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -3137,12 +3137,12 @@ static int megasas_reset_target(struct scsi_cmnd *scmd)
/**
* megasas_bios_param - Returns disk geometry for a disk
* @sdev: device handle
- * @bdev: block device
+ * @unused: gendisk
* @capacity: drive capacity
* @geom: geometry parameters
*/
static int
-megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev,
+megasas_bios_param(struct scsi_device *sdev, struct gendisk *unused,
sector_t capacity, int geom[])
{
int heads;
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h b/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
index 96401eb7e231..8c8bfbbdd34e 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
@@ -322,6 +322,9 @@ struct mpi3_man6_gpio_entry {
#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_TRIGGER_MASK (0x01)
#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_TRIGGER_EDGE (0x00)
#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_TRIGGER_LEVEL (0x01)
+#define MPI3_MAN6_GPIO_OVER_TEMP_PARAM1_LEVEL_WARNING (0x00)
+#define MPI3_MAN6_GPIO_OVER_TEMP_PARAM1_LEVEL_CRITICAL (0x01)
+#define MPI3_MAN6_GPIO_OVER_TEMP_PARAM1_LEVEL_FATAL (0x02)
#define MPI3_MAN6_GPIO_PORT_GREEN_PARAM1_PHY_STATUS_ALL_UP (0x00)
#define MPI3_MAN6_GPIO_PORT_GREEN_PARAM1_PHY_STATUS_ONE_OR_MORE_UP (0x01)
#define MPI3_MAN6_GPIO_CABLE_MGMT_PARAM1_INTERFACE_MODULE_PRESENT (0x00)
@@ -1250,6 +1253,37 @@ struct mpi3_io_unit_page17 {
__le32 current_key[];
};
#define MPI3_IOUNIT17_PAGEVERSION (0x00)
+struct mpi3_io_unit_page18 {
+ struct mpi3_config_page_header header;
+ u8 flags;
+ u8 poll_interval;
+ __le16 reserved0a;
+ __le32 reserved0c;
+};
+
+#define MPI3_IOUNIT18_PAGEVERSION (0x00)
+#define MPI3_IOUNIT18_FLAGS_DIRECTATTACHED_ENABLE (0x01)
+#define MPI3_IOUNIT18_POLLINTERVAL_DISABLE (0x00)
+#ifndef MPI3_IOUNIT19_DEVICE_MAX
+#define MPI3_IOUNIT19_DEVICE_MAX (1)
+#endif
+struct mpi3_iounit19_device {
+ __le16 temperature;
+ __le16 dev_handle;
+ __le16 persistent_id;
+ __le16 reserved06;
+};
+
+#define MPI3_IOUNIT19_DEVICE_TEMPERATURE_UNAVAILABLE (0x8000)
+struct mpi3_io_unit_page19 {
+ struct mpi3_config_page_header header;
+ __le16 num_devices;
+ __le16 reserved0a;
+ __le32 reserved0c;
+ struct mpi3_iounit19_device device[MPI3_IOUNIT19_DEVICE_MAX];
+};
+
+#define MPI3_IOUNIT19_PAGEVERSION (0x00)
struct mpi3_ioc_page0 {
struct mpi3_config_page_header header;
__le32 reserved08;
@@ -2356,7 +2390,9 @@ struct mpi3_device0_vd_format {
__le16 io_throttle_group;
__le16 io_throttle_group_low;
__le16 io_throttle_group_high;
- __le32 reserved0c;
+ u8 vd_abort_to;
+ u8 vd_reset_to;
+ __le16 reserved0e;
};
#define MPI3_DEVICE0_VD_STATE_OFFLINE (0x00)
#define MPI3_DEVICE0_VD_STATE_PARTIALLY_DEGRADED (0x01)
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_pci.h b/drivers/scsi/mpi3mr/mpi/mpi30_pci.h
index 7c15e5851ce4..4eeb11c3c73e 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_pci.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_pci.h
@@ -9,9 +9,11 @@
#define MPI3_NVME_ENCAP_CMD_MAX (1)
#endif
#define MPI3_NVME_FLAGS_FORCE_ADMIN_ERR_REPLY_MASK (0x0002)
+#define MPI3_NVME_FLAGS_FORCE_ADMIN_ERR_REPLY_SHIFT (1)
#define MPI3_NVME_FLAGS_FORCE_ADMIN_ERR_REPLY_FAIL_ONLY (0x0000)
#define MPI3_NVME_FLAGS_FORCE_ADMIN_ERR_REPLY_ALL (0x0002)
#define MPI3_NVME_FLAGS_SUBMISSIONQ_MASK (0x0001)
+#define MPI3_NVME_FLAGS_SUBMISSIONQ_SHIFT (0)
#define MPI3_NVME_FLAGS_SUBMISSIONQ_IO (0x0000)
#define MPI3_NVME_FLAGS_SUBMISSIONQ_ADMIN (0x0001)
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_sas.h b/drivers/scsi/mpi3mr/mpi/mpi30_sas.h
index 4a93c67d335f..190b06508b00 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_sas.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_sas.h
@@ -11,6 +11,7 @@
#define MPI3_SAS_DEVICE_INFO_STP_INITIATOR (0x00000010)
#define MPI3_SAS_DEVICE_INFO_SMP_INITIATOR (0x00000008)
#define MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_MASK (0x00000007)
+#define MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_SHIFT (0)
#define MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_NO_DEVICE (0x00000000)
#define MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_END_DEVICE (0x00000001)
#define MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_EXPANDER (0x00000002)
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_transport.h b/drivers/scsi/mpi3mr/mpi/mpi30_transport.h
index 5c522e2531c3..28ab2efb3baa 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_transport.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_transport.h
@@ -18,7 +18,7 @@ union mpi3_version_union {
#define MPI3_VERSION_MAJOR (3)
#define MPI3_VERSION_MINOR (0)
-#define MPI3_VERSION_UNIT (35)
+#define MPI3_VERSION_UNIT (37)
#define MPI3_VERSION_DEV (0)
#define MPI3_DEVHANDLE_INVALID (0xffff)
struct mpi3_sysif_oper_queue_indexes {
diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h
index 8d4ef49e04d1..6742684e2990 100644
--- a/drivers/scsi/mpi3mr/mpi3mr.h
+++ b/drivers/scsi/mpi3mr/mpi3mr.h
@@ -56,8 +56,8 @@ extern struct list_head mrioc_list;
extern int prot_mask;
extern atomic64_t event_counter;
-#define MPI3MR_DRIVER_VERSION "8.14.0.5.50"
-#define MPI3MR_DRIVER_RELDATE "27-June-2025"
+#define MPI3MR_DRIVER_VERSION "8.15.0.5.50"
+#define MPI3MR_DRIVER_RELDATE "12-August-2025"
#define MPI3MR_DRIVER_NAME "mpi3mr"
#define MPI3MR_DRIVER_LICENSE "GPL"
@@ -697,6 +697,8 @@ struct tgt_dev_vd {
u16 tg_id;
u32 tg_high;
u32 tg_low;
+ u8 abort_to;
+ u8 reset_to;
struct mpi3mr_throttle_group_info *tg;
};
@@ -738,6 +740,8 @@ enum mpi3mr_dev_state {
* @wwid: World wide ID
* @enclosure_logical_id: Enclosure logical identifier
* @dev_spec: Device type specific information
+ * @abort_to: Timeout for abort TM
+ * @reset_to: Timeout for Target/LUN reset TM
* @ref_count: Reference count
* @state: device state
*/
diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
index 0152d31d430a..8fe6e0bf342e 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
@@ -2353,6 +2353,8 @@ static int mpi3mr_create_op_queues(struct mpi3mr_ioc *mrioc)
{
int retval = 0;
u16 num_queues = 0, i = 0, msix_count_op_q = 1;
+ u32 ioc_status;
+ enum mpi3mr_iocstate ioc_state;
num_queues = min_t(int, mrioc->facts.max_op_reply_q,
mrioc->facts.max_op_req_q);
@@ -2408,6 +2410,14 @@ static int mpi3mr_create_op_queues(struct mpi3mr_ioc *mrioc)
retval = -1;
goto out_failed;
}
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ ioc_state = mpi3mr_get_iocstate(mrioc);
+ if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
+ ioc_state != MRIOC_STATE_READY) {
+ mpi3mr_print_fault_info(mrioc);
+ retval = -1;
+ goto out_failed;
+ }
mrioc->num_op_reply_q = mrioc->num_op_req_q = i;
ioc_info(mrioc,
"successfully created %d operational queue pairs(default/polled) queue = (%d/%d)\n",
@@ -5420,6 +5430,7 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
mpi3mr_reset_rc_name(reset_reason));
mrioc->device_refresh_on = 0;
+ scsi_block_requests(mrioc->shost);
mrioc->reset_in_progress = 1;
mrioc->stop_bsgs = 1;
mrioc->prev_reset_result = -1;
@@ -5528,6 +5539,7 @@ out:
if (!retval) {
mrioc->diagsave_timeout = 0;
mrioc->reset_in_progress = 0;
+ scsi_unblock_requests(mrioc->shost);
mrioc->pel_abort_requested = 0;
if (mrioc->pel_enabled) {
mrioc->pel_cmds.retry_count = 0;
@@ -5552,6 +5564,7 @@ out:
mrioc->device_refresh_on = 0;
mrioc->unrecoverable = 1;
mrioc->reset_in_progress = 0;
+ scsi_unblock_requests(mrioc->shost);
mrioc->stop_bsgs = 0;
retval = -1;
mpi3mr_flush_cmds_for_unrecovered_controller(mrioc);
diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
index e467b56949e9..b88633e1efe2 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
@@ -1308,6 +1308,12 @@ static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc,
if (vdinf->vd_state == MPI3_DEVICE0_VD_STATE_OFFLINE)
tgtdev->is_hidden = 1;
tgtdev->non_stl = 1;
+ tgtdev->dev_spec.vd_inf.reset_to =
+ max_t(u8, vdinf->vd_reset_to,
+ MPI3MR_INTADMCMD_TIMEOUT);
+ tgtdev->dev_spec.vd_inf.abort_to =
+ max_t(u8, vdinf->vd_abort_to,
+ MPI3MR_INTADMCMD_TIMEOUT);
tgtdev->dev_spec.vd_inf.tg_id = vdinf_io_throttle_group;
tgtdev->dev_spec.vd_inf.tg_high =
le16_to_cpu(vdinf->io_throttle_group_high) * 2048;
@@ -2049,8 +2055,8 @@ static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc,
if (!fwevt->process_evt)
goto evt_ack;
- dprint_event_bh(mrioc, "processing event(0x%02x) in the bottom half handler\n",
- fwevt->event_id);
+ dprint_event_bh(mrioc, "processing event(0x%02x) -(0x%08x) in the bottom half handler\n",
+ fwevt->event_id, fwevt->evt_ctx);
switch (fwevt->event_id) {
case MPI3_EVENT_DEVICE_ADDED:
@@ -2866,12 +2872,14 @@ static void mpi3mr_preparereset_evt_th(struct mpi3mr_ioc *mrioc,
"prepare for reset event top half with rc=start\n");
if (mrioc->prepare_for_reset)
return;
+ scsi_block_requests(mrioc->shost);
mrioc->prepare_for_reset = 1;
mrioc->prepare_for_reset_timeout_counter = 0;
} else if (evtdata->reason_code == MPI3_EVENT_PREPARE_RESET_RC_ABORT) {
dprint_event_th(mrioc,
"prepare for reset top half with rc=abort\n");
mrioc->prepare_for_reset = 0;
+ scsi_unblock_requests(mrioc->shost);
mrioc->prepare_for_reset_timeout_counter = 0;
}
if ((event_reply->msg_flags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK)
@@ -3076,8 +3084,8 @@ void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc,
}
if (process_evt_bh || ack_req) {
dprint_event_th(mrioc,
- "scheduling bottom half handler for event(0x%02x),ack_required=%d\n",
- evt_type, ack_req);
+ "scheduling bottom half handler for event(0x%02x) - (0x%08x), ack_required=%d\n",
+ evt_type, le32_to_cpu(event_reply->event_context), ack_req);
sz = event_reply->event_data_length * 4;
fwevt = mpi3mr_alloc_fwevt(sz);
if (!fwevt) {
@@ -3915,11 +3923,13 @@ int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type,
if (scsi_tgt_priv_data)
atomic_inc(&scsi_tgt_priv_data->block_io);
- if (tgtdev && (tgtdev->dev_type == MPI3_DEVICE_DEVFORM_PCIE)) {
- if (cmd_priv && tgtdev->dev_spec.pcie_inf.abort_to)
- timeout = tgtdev->dev_spec.pcie_inf.abort_to;
- else if (!cmd_priv && tgtdev->dev_spec.pcie_inf.reset_to)
- timeout = tgtdev->dev_spec.pcie_inf.reset_to;
+ if (tgtdev) {
+ if (tgtdev->dev_type == MPI3_DEVICE_DEVFORM_PCIE)
+ timeout = cmd_priv ? tgtdev->dev_spec.pcie_inf.abort_to
+ : tgtdev->dev_spec.pcie_inf.reset_to;
+ else if (tgtdev->dev_type == MPI3_DEVICE_DEVFORM_VD)
+ timeout = cmd_priv ? tgtdev->dev_spec.vd_inf.abort_to
+ : tgtdev->dev_spec.vd_inf.reset_to;
}
init_completion(&drv_cmd->done);
@@ -4031,7 +4041,7 @@ out:
/**
* mpi3mr_bios_param - BIOS param callback
* @sdev: SCSI device reference
- * @bdev: Block device reference
+ * @unused: gendisk reference
* @capacity: Capacity in logical sectors
* @params: Parameter array
*
@@ -4040,7 +4050,7 @@ out:
* Return: 0 always
*/
static int mpi3mr_bios_param(struct scsi_device *sdev,
- struct block_device *bdev, sector_t capacity, int params[])
+ struct gendisk *unused, sector_t capacity, int params[])
{
int heads;
int sectors;
diff --git a/drivers/scsi/mpi3mr/mpi3mr_transport.c b/drivers/scsi/mpi3mr/mpi3mr_transport.c
index c8d6ced5640e..d70f002d6487 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_transport.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_transport.c
@@ -413,9 +413,11 @@ static void mpi3mr_remove_device_by_sas_address(struct mpi3mr_ioc *mrioc,
sas_address, hba_port);
if (tgtdev) {
if (!list_empty(&tgtdev->list)) {
- list_del_init(&tgtdev->list);
was_on_tgtdev_list = 1;
- mpi3mr_tgtdev_put(tgtdev);
+ if (tgtdev->state == MPI3MR_DEV_REMOVE_HS_STARTED) {
+ list_del_init(&tgtdev->list);
+ mpi3mr_tgtdev_put(tgtdev);
+ }
}
}
spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
@@ -2079,6 +2081,8 @@ int mpi3mr_expander_add(struct mpi3mr_ioc *mrioc, u16 handle)
link_rate = (expander_pg1.negotiated_link_rate &
MPI3_SAS_NEG_LINK_RATE_LOGICAL_MASK) >>
MPI3_SAS_NEG_LINK_RATE_LOGICAL_SHIFT;
+ if (link_rate < MPI3_SAS_NEG_LINK_RATE_1_5)
+ link_rate = MPI3_SAS_NEG_LINK_RATE_1_5;
mpi3mr_update_links(mrioc, sas_address_parent,
handle, i, link_rate, hba_port);
}
@@ -2388,6 +2392,9 @@ int mpi3mr_report_tgtdev_to_sas_transport(struct mpi3mr_ioc *mrioc,
link_rate = mpi3mr_get_sas_negotiated_logical_linkrate(mrioc, tgtdev);
+ if (link_rate < MPI3_SAS_NEG_LINK_RATE_1_5)
+ link_rate = MPI3_SAS_NEG_LINK_RATE_1_5;
+
mpi3mr_update_links(mrioc, sas_address_parent, tgtdev->dev_handle,
parent_phy_number, link_rate, hba_port);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index bd3efa5b46c7..0d652db8fe24 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -1420,7 +1420,13 @@ _base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
loginfo = le32_to_cpu(mpi_reply->IOCLogInfo);
- _base_sas_log_info(ioc, loginfo);
+ if (ioc->logging_level & MPT_DEBUG_REPLY)
+ _base_sas_log_info(ioc, loginfo);
+ else {
+ if (!((ioc_status & MPI2_IOCSTATUS_MASK) &
+ MPI2_IOCSTATUS_CONFIG_INVALID_PAGE))
+ _base_sas_log_info(ioc, loginfo);
+ }
}
if (ioc_status || loginfo) {
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 939141cde3ca..e6a6f21d309b 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -77,8 +77,8 @@
#define MPT3SAS_DRIVER_NAME "mpt3sas"
#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
-#define MPT3SAS_DRIVER_VERSION "52.100.00.00"
-#define MPT3SAS_MAJOR_VERSION 52
+#define MPT3SAS_DRIVER_VERSION "54.100.00.00"
+#define MPT3SAS_MAJOR_VERSION 54
#define MPT3SAS_MINOR_VERSION 100
#define MPT3SAS_BUILD_VERSION 00
#define MPT3SAS_RELEASE_VERSION 00
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 967af259118e..7092d0debef3 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -2754,7 +2754,7 @@ scsih_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
/**
* scsih_bios_param - fetch head, sector, cylinder info for a disk
* @sdev: scsi device struct
- * @bdev: pointer to block device context
+ * @unused: pointer to gendisk
* @capacity: device size (in 512 byte sectors)
* @params: three element array to place output:
* params[0] number of heads (max 255)
@@ -2762,7 +2762,7 @@ scsih_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim)
* params[2] number of cylinders
*/
static int
-scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
+scsih_bios_param(struct scsi_device *sdev, struct gendisk *unused,
sector_t capacity, int params[])
{
int heads;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index dc74ebc6405a..f3400d01cc2a 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -166,6 +166,9 @@ _transport_convert_phy_link_rate(u8 link_rate)
case MPI25_SAS_NEG_LINK_RATE_12_0:
rc = SAS_LINK_RATE_12_0_GBPS;
break;
+ case MPI26_SAS_NEG_LINK_RATE_22_5:
+ rc = SAS_LINK_RATE_22_5_GBPS;
+ break;
case MPI2_SAS_NEG_LINK_RATE_PHY_DISABLED:
rc = SAS_PHY_DISABLED;
break;
@@ -987,11 +990,9 @@ mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
list_for_each_entry_safe(mpt3sas_phy, next_phy,
&mpt3sas_port->phy_list, port_siblings) {
if ((ioc->logging_level & MPT_DEBUG_TRANSPORT))
- dev_printk(KERN_INFO, &mpt3sas_port->port->dev,
- "remove: sas_addr(0x%016llx), phy(%d)\n",
- (unsigned long long)
- mpt3sas_port->remote_identify.sas_address,
- mpt3sas_phy->phy_id);
+ ioc_info(ioc, "remove: sas_addr(0x%016llx), phy(%d)\n",
+ (unsigned long long) mpt3sas_port->remote_identify.sas_address,
+ mpt3sas_phy->phy_id);
mpt3sas_phy->phy_belongs_to_port = 0;
if (!ioc->remove_host)
sas_port_delete_phy(mpt3sas_port->port,
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index 2c72da6b8cf0..7f1ad305eee6 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -124,7 +124,7 @@ static void mvs_free(struct mvs_info *mvi)
if (mvi->shost)
scsi_host_put(mvi->shost);
list_for_each_entry(mwq, &mvi->wq_list, entry)
- cancel_delayed_work(&mwq->work_q);
+ cancel_delayed_work_sync(&mwq->work_q);
kfree(mvi->rsvd_tags);
kfree(mvi);
}
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index 15b3d9d55a4b..f2e7997d5b9d 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -1175,7 +1175,7 @@ static int mvs_dev_found_notify(struct domain_device *dev, int lock)
mvi_device->dev_type = dev->dev_type;
mvi_device->mvi_info = mvi;
mvi_device->sas_device = dev;
- if (parent_dev && dev_is_expander(parent_dev->dev_type)) {
+ if (dev_parent_is_expander(dev)) {
int phy_id;
phy_id = sas_find_attached_phy_id(&parent_dev->ex_dev, dev);
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
index 96549e7f5705..bdc2f2f17753 100644
--- a/drivers/scsi/mvumi.c
+++ b/drivers/scsi/mvumi.c
@@ -2142,7 +2142,7 @@ static enum scsi_timeout_action mvumi_timed_out(struct scsi_cmnd *scmd)
}
static int
-mvumi_bios_param(struct scsi_device *sdev, struct block_device *bdev,
+mvumi_bios_param(struct scsi_device *sdev, struct gendisk *unused,
sector_t capacity, int geom[])
{
int heads, sectors;
diff --git a/drivers/scsi/myrb.c b/drivers/scsi/myrb.c
index 486db5b2f05d..b8453c0333dc 100644
--- a/drivers/scsi/myrb.c
+++ b/drivers/scsi/myrb.c
@@ -1745,7 +1745,7 @@ static void myrb_sdev_destroy(struct scsi_device *sdev)
kfree(sdev->hostdata);
}
-static int myrb_biosparam(struct scsi_device *sdev, struct block_device *bdev,
+static int myrb_biosparam(struct scsi_device *sdev, struct gendisk *unused,
sector_t capacity, int geom[])
{
struct myrb_hba *cb = shost_priv(sdev->host);
diff --git a/drivers/scsi/myrs.c b/drivers/scsi/myrs.c
index 95af3bb03834..a58abd796603 100644
--- a/drivers/scsi/myrs.c
+++ b/drivers/scsi/myrs.c
@@ -498,14 +498,14 @@ static bool myrs_enable_mmio_mbox(struct myrs_hba *cs,
/* Temporary dma mapping, used only in the scope of this function */
mbox = dma_alloc_coherent(&pdev->dev, sizeof(union myrs_cmd_mbox),
&mbox_addr, GFP_KERNEL);
- if (dma_mapping_error(&pdev->dev, mbox_addr))
+ if (!mbox)
return false;
/* These are the base addresses for the command memory mailbox array */
cs->cmd_mbox_size = MYRS_MAX_CMD_MBOX * sizeof(union myrs_cmd_mbox);
cmd_mbox = dma_alloc_coherent(&pdev->dev, cs->cmd_mbox_size,
&cs->cmd_mbox_addr, GFP_KERNEL);
- if (dma_mapping_error(&pdev->dev, cs->cmd_mbox_addr)) {
+ if (!cmd_mbox) {
dev_err(&pdev->dev, "Failed to map command mailbox\n");
goto out_free;
}
@@ -520,7 +520,7 @@ static bool myrs_enable_mmio_mbox(struct myrs_hba *cs,
cs->stat_mbox_size = MYRS_MAX_STAT_MBOX * sizeof(struct myrs_stat_mbox);
stat_mbox = dma_alloc_coherent(&pdev->dev, cs->stat_mbox_size,
&cs->stat_mbox_addr, GFP_KERNEL);
- if (dma_mapping_error(&pdev->dev, cs->stat_mbox_addr)) {
+ if (!stat_mbox) {
dev_err(&pdev->dev, "Failed to map status mailbox\n");
goto out_free;
}
@@ -533,7 +533,7 @@ static bool myrs_enable_mmio_mbox(struct myrs_hba *cs,
cs->fwstat_buf = dma_alloc_coherent(&pdev->dev,
sizeof(struct myrs_fwstat),
&cs->fwstat_addr, GFP_KERNEL);
- if (dma_mapping_error(&pdev->dev, cs->fwstat_addr)) {
+ if (!cs->fwstat_buf) {
dev_err(&pdev->dev, "Failed to map firmware health buffer\n");
cs->fwstat_buf = NULL;
goto out_free;
diff --git a/drivers/scsi/pcmcia/sym53c500_cs.c b/drivers/scsi/pcmcia/sym53c500_cs.c
index 278c78d066c4..a3b505240351 100644
--- a/drivers/scsi/pcmcia/sym53c500_cs.c
+++ b/drivers/scsi/pcmcia/sym53c500_cs.c
@@ -597,7 +597,7 @@ SYM53C500_host_reset(struct scsi_cmnd *SCpnt)
static int
SYM53C500_biosparm(struct scsi_device *disk,
- struct block_device *dev,
+ struct gendisk *unused,
sector_t capacity, int *info_array)
{
int size;
diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c
index 7618f9cc9986..cbfda8c04e95 100644
--- a/drivers/scsi/pm8001/pm8001_ctl.c
+++ b/drivers/scsi/pm8001/pm8001_ctl.c
@@ -534,23 +534,25 @@ static ssize_t pm8001_ctl_iop_log_show(struct device *cdev,
char *str = buf;
u32 read_size =
pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_size / 1024;
- static u32 start, end, count;
u32 max_read_times = 32;
u32 max_count = (read_size * 1024) / (max_read_times * 4);
u32 *temp = (u32 *)pm8001_ha->memoryMap.region[IOP].virt_ptr;
- if ((count % max_count) == 0) {
- start = 0;
- end = max_read_times;
- count = 0;
+ mutex_lock(&pm8001_ha->iop_log_lock);
+
+ if ((pm8001_ha->iop_log_count % max_count) == 0) {
+ pm8001_ha->iop_log_start = 0;
+ pm8001_ha->iop_log_end = max_read_times;
+ pm8001_ha->iop_log_count = 0;
} else {
- start = end;
- end = end + max_read_times;
+ pm8001_ha->iop_log_start = pm8001_ha->iop_log_end;
+ pm8001_ha->iop_log_end = pm8001_ha->iop_log_end + max_read_times;
}
- for (; start < end; start++)
- str += sprintf(str, "%08x ", *(temp+start));
- count++;
+ for (; pm8001_ha->iop_log_start < pm8001_ha->iop_log_end; pm8001_ha->iop_log_start++)
+ str += sprintf(str, "%08x ", *(temp+pm8001_ha->iop_log_start));
+ pm8001_ha->iop_log_count++;
+ mutex_unlock(&pm8001_ha->iop_log_lock);
return str - buf;
}
static DEVICE_ATTR(iop_log, S_IRUGO, pm8001_ctl_iop_log_show, NULL);
@@ -680,7 +682,7 @@ static int pm8001_set_nvmd(struct pm8001_hba_info *pm8001_ha)
struct pm8001_ioctl_payload *payload;
DECLARE_COMPLETION_ONSTACK(completion);
u8 *ioctlbuffer;
- u32 ret;
+ int ret;
u32 length = 1024 * 5 + sizeof(*payload) - 1;
if (pm8001_ha->fw_image->size > 4096) {
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 42a4eeac24c9..8005995a317c 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -2163,8 +2163,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
/* Print sas address of IO failed device */
if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
(status != IO_UNDERFLOW)) {
- if (!((t->dev->parent) &&
- (dev_is_expander(t->dev->parent->dev_type)))) {
+ if (!dev_parent_is_expander(t->dev)) {
for (i = 0, j = 4; j <= 7 && i <= 3; i++, j++)
sata_addr_low[i] = pm8001_ha->sas_addr[j];
for (i = 0, j = 0; j <= 3 && i <= 3; i++, j++)
@@ -4168,7 +4167,6 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
u16 firstBurstSize = 0;
u16 ITNT = 2000;
struct domain_device *dev = pm8001_dev->sas_device;
- struct domain_device *parent_dev = dev->parent;
struct pm8001_port *port = dev->port->lldd_port;
memset(&payload, 0, sizeof(payload));
@@ -4186,10 +4184,9 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
dev_is_expander(pm8001_dev->dev_type))
stp_sspsmp_sata = 0x01; /*ssp or smp*/
}
- if (parent_dev && dev_is_expander(parent_dev->dev_type))
- phy_id = parent_dev->ex_dev.ex_phy->phy_id;
- else
- phy_id = pm8001_dev->attached_phy;
+
+ phy_id = pm80xx_get_local_phy_id(dev);
+
opc = OPC_INB_REG_DEV;
linkrate = (pm8001_dev->sas_device->linkrate < dev->port->linkrate) ?
pm8001_dev->sas_device->linkrate : dev->port->linkrate;
diff --git a/drivers/scsi/pm8001/pm8001_hwi.h b/drivers/scsi/pm8001/pm8001_hwi.h
index fc2127dcb58d..f1ce8df082b0 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.h
+++ b/drivers/scsi/pm8001/pm8001_hwi.h
@@ -339,8 +339,10 @@ struct ssp_completion_resp {
__le32 status;
__le32 param;
__le32 ssptag_rescv_rescpad;
+
+ /* Must be last --ends in a flexible-array member. */
struct ssp_response_iu ssp_resp_iu;
- __le32 residual_count;
+ /* __le32 residual_count; */
} __attribute__((packed, aligned(4)));
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 599410bcdfea..8ff4b89ff81e 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -552,6 +552,7 @@ static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev,
pm8001_ha->id = pm8001_id++;
pm8001_ha->logging_level = logging_level;
pm8001_ha->non_fatal_count = 0;
+ mutex_init(&pm8001_ha->iop_log_lock);
if (link_rate >= 1 && link_rate <= 15)
pm8001_ha->link_rate = (link_rate << 8);
else {
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index f7067878b34f..6a8d35aea93a 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -130,6 +130,16 @@ static void pm80xx_get_tag_opcodes(struct sas_task *task, int *ata_op,
}
}
+u32 pm80xx_get_local_phy_id(struct domain_device *dev)
+{
+ struct pm8001_device *pm8001_dev = dev->lldd_dev;
+
+ if (dev_parent_is_expander(dev))
+ return dev->parent->ex_dev.ex_phy->phy_id;
+
+ return pm8001_dev->attached_phy;
+}
+
void pm80xx_show_pending_commands(struct pm8001_hba_info *pm8001_ha,
struct pm8001_device *target_pm8001_dev)
{
@@ -477,7 +487,7 @@ int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags)
struct pm8001_device *pm8001_dev = dev->lldd_dev;
bool internal_abort = sas_is_internal_abort(task);
struct pm8001_hba_info *pm8001_ha;
- struct pm8001_port *port = NULL;
+ struct pm8001_port *port;
struct pm8001_ccb_info *ccb;
unsigned long flags;
u32 n_elem = 0;
@@ -502,8 +512,7 @@ int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags)
spin_lock_irqsave(&pm8001_ha->lock, flags);
- pm8001_dev = dev->lldd_dev;
- port = pm8001_ha->phy[pm8001_dev->attached_phy].port;
+ port = dev->port->lldd_port;
if (!internal_abort &&
(DEV_IS_GONE(pm8001_dev) || !port || !port->port_attached)) {
@@ -701,7 +710,7 @@ static int pm8001_dev_found_notify(struct domain_device *dev)
dev->lldd_dev = pm8001_device;
pm8001_device->dev_type = dev->dev_type;
pm8001_device->dcompletion = &completion;
- if (parent_dev && dev_is_expander(parent_dev->dev_type)) {
+ if (dev_parent_is_expander(dev)) {
int phy_id;
phy_id = sas_find_attached_phy_id(&parent_dev->ex_dev, dev);
@@ -766,7 +775,16 @@ static void pm8001_dev_gone_notify(struct domain_device *dev)
spin_lock_irqsave(&pm8001_ha->lock, flags);
}
PM8001_CHIP_DISP->dereg_dev_req(pm8001_ha, device_id);
- pm8001_ha->phy[pm8001_dev->attached_phy].phy_attached = 0;
+
+ /*
+ * The phy array only contains local phys. Thus, we cannot clear
+ * phy_attached for a device behind an expander.
+ */
+ if (!dev_parent_is_expander(dev)) {
+ u32 phy_id = pm80xx_get_local_phy_id(dev);
+
+ pm8001_ha->phy[phy_id].phy_attached = 0;
+ }
pm8001_free_dev(pm8001_dev);
} else {
pm8001_dbg(pm8001_ha, DISC, "Found dev has gone.\n");
@@ -1048,7 +1066,7 @@ int pm8001_abort_task(struct sas_task *task)
struct pm8001_hba_info *pm8001_ha;
struct pm8001_device *pm8001_dev;
int rc = TMF_RESP_FUNC_FAILED, ret;
- u32 phy_id, port_id;
+ u32 port_id;
struct sas_task_slow slow_task;
if (!task->lldd_task || !task->dev)
@@ -1057,7 +1075,6 @@ int pm8001_abort_task(struct sas_task *task)
dev = task->dev;
pm8001_dev = dev->lldd_dev;
pm8001_ha = pm8001_find_ha_by_dev(dev);
- phy_id = pm8001_dev->attached_phy;
if (PM8001_CHIP_DISP->fatal_errors(pm8001_ha)) {
// If the controller is seeing fatal errors
@@ -1089,7 +1106,8 @@ int pm8001_abort_task(struct sas_task *task)
if (pm8001_ha->chip_id == chip_8006) {
DECLARE_COMPLETION_ONSTACK(completion_reset);
DECLARE_COMPLETION_ONSTACK(completion);
- struct pm8001_phy *phy = pm8001_ha->phy + phy_id;
+ u32 phy_id = pm80xx_get_local_phy_id(dev);
+ struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
port_id = phy->port->port_id;
/* 1. Set Device state as Recovery */
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 334485bb2c12..b63b6ffcaaf5 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -547,6 +547,10 @@ struct pm8001_hba_info {
u32 ci_offset;
u32 pi_offset;
u32 max_memcnt;
+ u32 iop_log_start;
+ u32 iop_log_end;
+ u32 iop_log_count;
+ struct mutex iop_log_lock;
};
struct pm8001_work {
@@ -798,6 +802,7 @@ void pm8001_setds_completion(struct domain_device *dev);
void pm8001_tmf_aborted(struct sas_task *task);
void pm80xx_show_pending_commands(struct pm8001_hba_info *pm8001_ha,
struct pm8001_device *dev);
+u32 pm80xx_get_local_phy_id(struct domain_device *dev);
#endif
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index c1bae995a412..31960b72c1e9 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -2340,8 +2340,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha,
/* Print sas address of IO failed device */
if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
(status != IO_UNDERFLOW)) {
- if (!((t->dev->parent) &&
- (dev_is_expander(t->dev->parent->dev_type)))) {
+ if (!dev_parent_is_expander(t->dev)) {
for (i = 0, j = 4; i <= 3 && j <= 7; i++, j++)
sata_addr_low[i] = pm8001_ha->sas_addr[j];
for (i = 0, j = 0; i <= 3 && j <= 3; i++, j++)
@@ -4780,7 +4779,6 @@ static int pm80xx_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
u16 firstBurstSize = 0;
u16 ITNT = 2000;
struct domain_device *dev = pm8001_dev->sas_device;
- struct domain_device *parent_dev = dev->parent;
struct pm8001_port *port = dev->port->lldd_port;
memset(&payload, 0, sizeof(payload));
@@ -4799,10 +4797,8 @@ static int pm80xx_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
dev_is_expander(pm8001_dev->dev_type))
stp_sspsmp_sata = 0x01; /*ssp or smp*/
}
- if (parent_dev && dev_is_expander(parent_dev->dev_type))
- phy_id = parent_dev->ex_dev.ex_phy->phy_id;
- else
- phy_id = pm8001_dev->attached_phy;
+
+ phy_id = pm80xx_get_local_phy_id(dev);
opc = OPC_INB_REG_DEV;
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.h b/drivers/scsi/pm8001/pm80xx_hwi.h
index eb8fd37b2066..d8a63b7fed6a 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.h
+++ b/drivers/scsi/pm8001/pm80xx_hwi.h
@@ -558,8 +558,10 @@ struct ssp_completion_resp {
__le32 status;
__le32 param;
__le32 ssptag_rescv_rescpad;
+
+ /* Must be last --ends in a flexible-array member. */
struct ssp_response_iu ssp_resp_iu;
- __le32 residual_count;
+ /* __le32 residual_count; */
} __attribute__((packed, aligned(4)));
#define SSP_RESCV_BIT 0x00010000
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
index 1ed3171f1797..ea682f3044b6 100644
--- a/drivers/scsi/ppa.c
+++ b/drivers/scsi/ppa.c
@@ -845,7 +845,7 @@ static DEF_SCSI_QCMD(ppa_queuecommand)
* be done in sd.c. Even if it gets fixed there, this will still
* work.
*/
-static int ppa_biosparam(struct scsi_device *sdev, struct block_device *dev,
+static int ppa_biosparam(struct scsi_device *sdev, struct gendisk *unused,
sector_t capacity, int ip[])
{
ip[0] = 0x40;
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 6af018f1ca22..ef841f643171 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -1023,7 +1023,7 @@ qla1280_eh_adapter_reset(struct scsi_cmnd *cmd)
}
static int
-qla1280_biosparam(struct scsi_device *sdev, struct block_device *bdev,
+qla1280_biosparam(struct scsi_device *sdev, struct gendisk *unused,
sector_t capacity, int geom[])
{
int heads, sectors, cylinders;
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 10431a67d202..ccfc2d26dd37 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -3106,8 +3106,8 @@ static bool qla_bsg_found(struct qla_qpair *qpair, struct bsg_job *bsg_job)
switch (rval) {
case QLA_SUCCESS:
/* Wait for the command completion. */
- ratov_j = ha->r_a_tov / 10 * 4 * 1000;
- ratov_j = msecs_to_jiffies(ratov_j);
+ ratov_j = ha->r_a_tov / 10 * 4;
+ ratov_j = secs_to_jiffies(ratov_j);
if (!wait_for_completion_timeout(&comp, ratov_j)) {
ql_log(ql_log_info, vha, 0x7089,
diff --git a/drivers/scsi/qla2xxx/qla_edif.c b/drivers/scsi/qla2xxx/qla_edif.c
index 91bbd3b75bff..ccd4485087a1 100644
--- a/drivers/scsi/qla2xxx/qla_edif.c
+++ b/drivers/scsi/qla2xxx/qla_edif.c
@@ -1798,7 +1798,7 @@ retry:
switch (rval) {
case QLA_SUCCESS:
break;
- case EAGAIN:
+ case -EAGAIN:
msleep(EDIF_MSLEEP_INTERVAL);
cnt++;
if (cnt < EDIF_RETRY_COUNT)
@@ -3649,7 +3649,7 @@ retry:
p->e.extra_rx_xchg_address, p->e.extra_control_flags,
sp->handle, sp->remap.req.len, bsg_job);
break;
- case EAGAIN:
+ case -EAGAIN:
msleep(EDIF_MSLEEP_INTERVAL);
cnt++;
if (cnt < EDIF_RETRY_COUNT)
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index be211ff22acb..6a2e1c7fd125 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -2059,11 +2059,11 @@ static void qla_marker_sp_done(srb_t *sp, int res)
int cnt = 5; \
do { \
if (_chip_gen != sp->vha->hw->chip_reset || _login_gen != sp->fcport->login_gen) {\
- _rval = EINVAL; \
+ _rval = -EINVAL; \
break; \
} \
_rval = qla2x00_start_sp(_sp); \
- if (_rval == EAGAIN) \
+ if (_rval == -EAGAIN) \
msleep(1); \
else \
break; \
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 8ee2e337c9e1..316594aa40cc 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -419,7 +419,7 @@ retry:
switch (rval) {
case QLA_SUCCESS:
break;
- case EAGAIN:
+ case -EAGAIN:
msleep(PURLS_MSLEEP_INTERVAL);
cnt++;
if (cnt < PURLS_RETRY_COUNT)
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index d4b484c0fd9d..5ffd94586652 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1291,8 +1291,8 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
"Abort command mbx cmd=%p, rval=%x.\n", cmd, rval);
/* Wait for the command completion. */
- ratov_j = ha->r_a_tov/10 * 4 * 1000;
- ratov_j = msecs_to_jiffies(ratov_j);
+ ratov_j = ha->r_a_tov / 10 * 4;
+ ratov_j = secs_to_jiffies(ratov_j);
switch (rval) {
case QLA_SUCCESS:
if (!wait_for_completion_timeout(&comp, ratov_j)) {
@@ -1806,8 +1806,8 @@ static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res,
rval = ha->isp_ops->abort_command(sp);
/* Wait for command completion. */
ret_cmd = false;
- ratov_j = ha->r_a_tov/10 * 4 * 1000;
- ratov_j = msecs_to_jiffies(ratov_j);
+ ratov_j = ha->r_a_tov / 10 * 4;
+ ratov_j = secs_to_jiffies(ratov_j);
switch (rval) {
case QLA_SUCCESS:
if (wait_for_completion_timeout(&comp, ratov_j)) {
@@ -7883,11 +7883,6 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
"Slot Reset.\n");
ha->pci_error_state = QLA_PCI_SLOT_RESET;
- /* Workaround: qla2xxx driver which access hardware earlier
- * needs error state to be pci_channel_io_online.
- * Otherwise mailbox command timesout.
- */
- pdev->error_state = pci_channel_io_normal;
pci_restore_state(pdev);
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index a39f1da4ce47..a761c0aa5127 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -6606,6 +6606,8 @@ static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
ep = qla4xxx_ep_connect(ha->host, (struct sockaddr *)dst_addr, 0);
vfree(dst_addr);
+ if (IS_ERR(ep))
+ return NULL;
return ep;
}
diff --git a/drivers/scsi/qlogicfas408.c b/drivers/scsi/qlogicfas408.c
index 3e065d5fc80c..1ce469b7db99 100644
--- a/drivers/scsi/qlogicfas408.c
+++ b/drivers/scsi/qlogicfas408.c
@@ -492,7 +492,7 @@ DEF_SCSI_QCMD(qlogicfas408_queuecommand)
* Return bios parameters
*/
-int qlogicfas408_biosparam(struct scsi_device *disk, struct block_device *dev,
+int qlogicfas408_biosparam(struct scsi_device *disk, struct gendisk *unused,
sector_t capacity, int ip[])
{
/* This should mimic the DOS Qlogic driver's behavior exactly */
diff --git a/drivers/scsi/qlogicfas408.h b/drivers/scsi/qlogicfas408.h
index a971db11d293..83ef86c71f2f 100644
--- a/drivers/scsi/qlogicfas408.h
+++ b/drivers/scsi/qlogicfas408.h
@@ -106,7 +106,7 @@ struct qlogicfas408_priv {
irqreturn_t qlogicfas408_ihandl(int irq, void *dev_id);
int qlogicfas408_queuecommand(struct Scsi_Host *h, struct scsi_cmnd * cmd);
int qlogicfas408_biosparam(struct scsi_device * disk,
- struct block_device *dev,
+ struct gendisk *unused,
sector_t capacity, int ip[]);
int qlogicfas408_abort(struct scsi_cmnd * cmd);
extern int qlogicfas408_host_reset(struct scsi_cmnd *cmd);
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 0847767d4d43..b2ab97be5db3 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -1155,14 +1155,9 @@ static ssize_t sdebug_error_write(struct file *file, const char __user *ubuf,
struct sdebug_err_inject *inject;
struct scsi_device *sdev = (struct scsi_device *)file->f_inode->i_private;
- buf = kzalloc(count + 1, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- if (copy_from_user(buf, ubuf, count)) {
- kfree(buf);
- return -EFAULT;
- }
+ buf = memdup_user_nul(ubuf, count);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
if (buf[0] == '-')
return sdebug_err_remove(sdev, buf, count);
@@ -2674,8 +2669,10 @@ static int resp_rsup_tmfs(struct scsi_cmnd *scp,
static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
{ /* Read-Write Error Recovery page for mode_sense */
- unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
- 5, 0, 0xff, 0xff};
+ static const unsigned char err_recov_pg[] = {
+ 0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
+ 5, 0, 0xff, 0xff
+ };
memcpy(p, err_recov_pg, sizeof(err_recov_pg));
if (1 == pcontrol)
@@ -2685,8 +2682,10 @@ static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
{ /* Disconnect-Reconnect page for mode_sense */
- unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0};
+ static const unsigned char disconnect_pg[] = {
+ 0x2, 0xe, 128, 128, 0, 10, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0
+ };
memcpy(p, disconnect_pg, sizeof(disconnect_pg));
if (1 == pcontrol)
@@ -2696,9 +2695,11 @@ static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
static int resp_format_pg(unsigned char *p, int pcontrol, int target)
{ /* Format device page for mode_sense */
- unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0x40, 0, 0, 0};
+ static const unsigned char format_pg[] = {
+ 0x3, 0x16, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0x40, 0, 0, 0
+ };
memcpy(p, format_pg, sizeof(format_pg));
put_unaligned_be16(sdebug_sectors_per, p + 10);
@@ -2716,10 +2717,14 @@ static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
{ /* Caching page for mode_sense */
- unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
- unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
- 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
+ static const unsigned char ch_caching_pg[] = {
+ /* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ };
+ static const unsigned char d_caching_pg[] = {
+ 0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
+ 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0
+ };
if (SDEBUG_OPT_N_WCE & sdebug_opts)
caching_pg[2] &= ~0x4; /* set WCE=0 (default WCE=1) */
@@ -2738,8 +2743,10 @@ static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
{ /* Control mode page for mode_sense */
unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
0, 0, 0, 0};
- unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
- 0, 0, 0x2, 0x4b};
+ static const unsigned char d_ctrl_m_pg[] = {
+ 0xa, 10, 2, 0, 0, 0, 0, 0,
+ 0, 0, 0x2, 0x4b
+ };
if (sdebug_dsense)
ctrl_m_pg[2] |= 0x4;
@@ -2794,10 +2801,14 @@ static int resp_grouping_m_pg(unsigned char *p, int pcontrol, int target)
static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
{ /* Informational Exceptions control mode page for mode_sense */
- unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
- 0, 0, 0x0, 0x0};
- unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
- 0, 0, 0x0, 0x0};
+ static const unsigned char ch_iec_m_pg[] = {
+ /* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
+ 0, 0, 0x0, 0x0
+ };
+ static const unsigned char d_iec_m_pg[] = {
+ 0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
+ 0, 0, 0x0, 0x0
+ };
memcpy(p, iec_m_pg, sizeof(iec_m_pg));
if (1 == pcontrol)
@@ -2809,8 +2820,9 @@ static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
{ /* SAS SSP mode page - short format for mode_sense */
- unsigned char sas_sf_m_pg[] = {0x19, 0x6,
- 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
+ static const unsigned char sas_sf_m_pg[] = {
+ 0x19, 0x6, 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0
+ };
memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
if (1 == pcontrol)
@@ -2854,9 +2866,10 @@ static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
{ /* SAS SSP shared protocol specific port mode subpage */
- unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- };
+ static const unsigned char sas_sha_m_pg[] = {
+ 0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ };
memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
if (1 == pcontrol)
@@ -2923,8 +2936,10 @@ static int process_medium_part_m_pg(struct sdebug_dev_info *devip,
static int resp_compression_m_pg(unsigned char *p, int pcontrol, int target,
unsigned char dce)
{ /* Compression page for mode_sense (tape) */
- unsigned char compression_pg[] = {0x0f, 14, 0x40, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 00, 00};
+ static const unsigned char compression_pg[] = {
+ 0x0f, 14, 0x40, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0
+ };
memcpy(p, compression_pg, sizeof(compression_pg));
if (dce)
@@ -3282,9 +3297,10 @@ bad_pcode:
static int resp_temp_l_pg(unsigned char *arr)
{
- unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
- 0x0, 0x1, 0x3, 0x2, 0x0, 65,
- };
+ static const unsigned char temp_l_pg[] = {
+ 0x0, 0x0, 0x3, 0x2, 0x0, 38,
+ 0x0, 0x1, 0x3, 0x2, 0x0, 65,
+ };
memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
return sizeof(temp_l_pg);
@@ -3292,8 +3308,9 @@ static int resp_temp_l_pg(unsigned char *arr)
static int resp_ie_l_pg(unsigned char *arr)
{
- unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
- };
+ static const unsigned char ie_l_pg[] = {
+ 0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
+ };
memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
if (iec_m_pg[2] & 0x4) { /* TEST bit set */
@@ -3305,11 +3322,12 @@ static int resp_ie_l_pg(unsigned char *arr)
static int resp_env_rep_l_spg(unsigned char *arr)
{
- unsigned char env_rep_l_spg[] = {0x0, 0x0, 0x23, 0x8,
- 0x0, 40, 72, 0xff, 45, 18, 0, 0,
- 0x1, 0x0, 0x23, 0x8,
- 0x0, 55, 72, 35, 55, 45, 0, 0,
- };
+ static const unsigned char env_rep_l_spg[] = {
+ 0x0, 0x0, 0x23, 0x8,
+ 0x0, 40, 72, 0xff, 45, 18, 0, 0,
+ 0x1, 0x0, 0x23, 0x8,
+ 0x0, 55, 72, 35, 55, 45, 0, 0,
+ };
memcpy(arr, env_rep_l_spg, sizeof(env_rep_l_spg));
return sizeof(env_rep_l_spg);
@@ -8782,8 +8800,8 @@ static int sdebug_add_store(void)
/* Logical Block Provisioning */
if (scsi_debug_lbp()) {
map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
- sip->map_storep = vmalloc(array_size(sizeof(long),
- BITS_TO_LONGS(map_size)));
+ sip->map_storep = vcalloc(BITS_TO_LONGS(map_size),
+ sizeof(long));
pr_info("%lu provisioning blocks\n", map_size);
@@ -8792,8 +8810,6 @@ static int sdebug_add_store(void)
goto err;
}
- bitmap_zero(sip->map_storep, map_size);
-
/* Map first 1KB for partition table */
if (sdebug_num_parts)
map_region(sip, 0, 2);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 0c65ecfedfbd..d7e42293b864 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -3148,8 +3148,7 @@ void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
/* Offset starting from the beginning of first page in this sg-entry */
*offset = *offset - len_complete + sg->offset;
- /* Assumption: contiguous pages can be accessed as "page + i" */
- page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT));
+ page = sg_page(sg) + (*offset >> PAGE_SHIFT);
*offset &= ~PAGE_MASK;
/* Bytes in this sg-entry from *offset to the end of the page */
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 169af7d47ce7..15ba493d2138 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -265,7 +265,7 @@ show_shost_supported_mode(struct device *dev, struct device_attribute *attr,
return show_shost_mode(supported_mode, buf);
}
-static DEVICE_ATTR(supported_mode, S_IRUGO | S_IWUSR, show_shost_supported_mode, NULL);
+static DEVICE_ATTR(supported_mode, S_IRUGO, show_shost_supported_mode, NULL);
static ssize_t
show_shost_active_mode(struct device *dev,
@@ -279,7 +279,7 @@ show_shost_active_mode(struct device *dev,
return show_shost_mode(shost->active_mode, buf);
}
-static DEVICE_ATTR(active_mode, S_IRUGO | S_IWUSR, show_shost_active_mode, NULL);
+static DEVICE_ATTR(active_mode, S_IRUGO, show_shost_active_mode, NULL);
static int check_reset_type(const char *str)
{
diff --git a/drivers/scsi/scsicam.c b/drivers/scsi/scsicam.c
index 19e6c3852d50..887de505bcf9 100644
--- a/drivers/scsi/scsicam.c
+++ b/drivers/scsi/scsicam.c
@@ -30,9 +30,9 @@
* starting at offset %0x1be.
* Returns: partition table in kmalloc(GFP_KERNEL) memory, or NULL on error.
*/
-unsigned char *scsi_bios_ptable(struct block_device *dev)
+unsigned char *scsi_bios_ptable(struct gendisk *dev)
{
- struct address_space *mapping = bdev_whole(dev)->bd_mapping;
+ struct address_space *mapping = dev->part0->bd_mapping;
unsigned char *res = NULL;
struct folio *folio;
@@ -48,7 +48,7 @@ EXPORT_SYMBOL(scsi_bios_ptable);
/**
* scsi_partsize - Parse cylinders/heads/sectors from PC partition table
- * @bdev: block device to parse
+ * @disk: gendisk of the disk to parse
* @capacity: size of the disk in sectors
* @geom: output in form of [hds, cylinders, sectors]
*
@@ -57,7 +57,7 @@ EXPORT_SYMBOL(scsi_bios_ptable);
*
* Returns: %false on failure, %true on success.
*/
-bool scsi_partsize(struct block_device *bdev, sector_t capacity, int geom[3])
+bool scsi_partsize(struct gendisk *disk, sector_t capacity, int geom[3])
{
int cyl, ext_cyl, end_head, end_cyl, end_sector;
unsigned int logical_end, physical_end, ext_physical_end;
@@ -65,7 +65,7 @@ bool scsi_partsize(struct block_device *bdev, sector_t capacity, int geom[3])
void *buf;
int ret = false;
- buf = scsi_bios_ptable(bdev);
+ buf = scsi_bios_ptable(disk);
if (!buf)
return false;
@@ -205,7 +205,7 @@ static int setsize(unsigned long capacity, unsigned int *cyls, unsigned int *hds
/**
* scsicam_bios_param - Determine geometry of a disk in cylinders/heads/sectors.
- * @bdev: which device
+ * @disk: which device
* @capacity: size of the disk in sectors
* @ip: return value: ip[0]=heads, ip[1]=sectors, ip[2]=cylinders
*
@@ -215,13 +215,13 @@ static int setsize(unsigned long capacity, unsigned int *cyls, unsigned int *hds
*
* Returns : -1 on failure, 0 on success.
*/
-int scsicam_bios_param(struct block_device *bdev, sector_t capacity, int *ip)
+int scsicam_bios_param(struct gendisk *disk, sector_t capacity, int *ip)
{
u64 capacity64 = capacity; /* Suppress gcc warning */
int ret = 0;
/* try to infer mapping from partition table */
- if (scsi_partsize(bdev, capacity, ip))
+ if (scsi_partsize(disk, capacity, ip))
return 0;
if (capacity64 < (1ULL << 32)) {
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 5b8668accf8e..0252d3f6bed1 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -106,7 +106,7 @@ static void sd_config_discard(struct scsi_disk *sdkp, struct queue_limits *lim,
unsigned int mode);
static void sd_config_write_same(struct scsi_disk *sdkp,
struct queue_limits *lim);
-static int sd_revalidate_disk(struct gendisk *);
+static void sd_revalidate_disk(struct gendisk *);
static void sd_unlock_native_capacity(struct gendisk *disk);
static void sd_shutdown(struct device *);
static void scsi_disk_release(struct device *cdev);
@@ -1599,9 +1599,9 @@ static void sd_release(struct gendisk *disk)
scsi_device_put(sdev);
}
-static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+static int sd_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
- struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
+ struct scsi_disk *sdkp = scsi_disk(disk);
struct scsi_device *sdp = sdkp->device;
struct Scsi_Host *host = sdp->host;
sector_t capacity = logical_to_sectors(sdp, sdkp->capacity);
@@ -1614,9 +1614,9 @@ static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
/* override with calculated, extended default, or driver values */
if (host->hostt->bios_param)
- host->hostt->bios_param(sdp, bdev, capacity, diskinfo);
+ host->hostt->bios_param(sdp, disk, capacity, diskinfo);
else
- scsicam_bios_param(bdev, capacity, diskinfo);
+ scsicam_bios_param(disk, capacity, diskinfo);
geo->heads = diskinfo[0];
geo->sectors = diskinfo[1];
@@ -3691,13 +3691,13 @@ static void sd_read_block_zero(struct scsi_disk *sdkp)
* performs disk spin up, read_capacity, etc.
* @disk: struct gendisk we care about
**/
-static int sd_revalidate_disk(struct gendisk *disk)
+static void sd_revalidate_disk(struct gendisk *disk)
{
struct scsi_disk *sdkp = scsi_disk(disk);
struct scsi_device *sdp = sdkp->device;
sector_t old_capacity = sdkp->capacity;
- struct queue_limits lim;
- unsigned char *buffer;
+ struct queue_limits *lim = NULL;
+ unsigned char *buffer = NULL;
unsigned int dev_max;
int err;
@@ -3709,25 +3709,26 @@ static int sd_revalidate_disk(struct gendisk *disk)
* of the other niceties.
*/
if (!scsi_device_online(sdp))
- goto out;
+ return;
+
+ lim = kmalloc(sizeof(*lim), GFP_KERNEL);
+ if (!lim)
+ return;
buffer = kmalloc(SD_BUF_SIZE, GFP_KERNEL);
- if (!buffer) {
- sd_printk(KERN_WARNING, sdkp, "sd_revalidate_disk: Memory "
- "allocation failure.\n");
+ if (!buffer)
goto out;
- }
sd_spinup_disk(sdkp);
- lim = queue_limits_start_update(sdkp->disk->queue);
+ *lim = queue_limits_start_update(sdkp->disk->queue);
/*
* Without media there is no reason to ask; moreover, some devices
* react badly if we do.
*/
if (sdkp->media_present) {
- sd_read_capacity(sdkp, &lim, buffer);
+ sd_read_capacity(sdkp, lim, buffer);
/*
* Some USB/UAS devices return generic values for mode pages
* until the media has been accessed. Trigger a READ operation
@@ -3741,17 +3742,17 @@ static int sd_revalidate_disk(struct gendisk *disk)
* cause this to be updated correctly and any device which
* doesn't support it should be treated as rotational.
*/
- lim.features |= (BLK_FEAT_ROTATIONAL | BLK_FEAT_ADD_RANDOM);
+ lim->features |= (BLK_FEAT_ROTATIONAL | BLK_FEAT_ADD_RANDOM);
if (scsi_device_supports_vpd(sdp)) {
sd_read_block_provisioning(sdkp);
- sd_read_block_limits(sdkp, &lim);
+ sd_read_block_limits(sdkp, lim);
sd_read_block_limits_ext(sdkp);
- sd_read_block_characteristics(sdkp, &lim);
- sd_zbc_read_zones(sdkp, &lim, buffer);
+ sd_read_block_characteristics(sdkp, lim);
+ sd_zbc_read_zones(sdkp, lim, buffer);
}
- sd_config_discard(sdkp, &lim, sd_discard_mode(sdkp));
+ sd_config_discard(sdkp, lim, sd_discard_mode(sdkp));
sd_print_capacity(sdkp, old_capacity);
@@ -3761,47 +3762,46 @@ static int sd_revalidate_disk(struct gendisk *disk)
sd_read_app_tag_own(sdkp, buffer);
sd_read_write_same(sdkp, buffer);
sd_read_security(sdkp, buffer);
- sd_config_protection(sdkp, &lim);
+ sd_config_protection(sdkp, lim);
}
/*
* We now have all cache related info, determine how we deal
* with flush requests.
*/
- sd_set_flush_flag(sdkp, &lim);
+ sd_set_flush_flag(sdkp, lim);
/* Initial block count limit based on CDB TRANSFER LENGTH field size. */
dev_max = sdp->use_16_for_rw ? SD_MAX_XFER_BLOCKS : SD_DEF_XFER_BLOCKS;
/* Some devices report a maximum block count for READ/WRITE requests. */
dev_max = min_not_zero(dev_max, sdkp->max_xfer_blocks);
- lim.max_dev_sectors = logical_to_sectors(sdp, dev_max);
+ lim->max_dev_sectors = logical_to_sectors(sdp, dev_max);
if (sd_validate_min_xfer_size(sdkp))
- lim.io_min = logical_to_bytes(sdp, sdkp->min_xfer_blocks);
+ lim->io_min = logical_to_bytes(sdp, sdkp->min_xfer_blocks);
else
- lim.io_min = 0;
+ lim->io_min = 0;
/*
* Limit default to SCSI host optimal sector limit if set. There may be
* an impact on performance for when the size of a request exceeds this
* host limit.
*/
- lim.io_opt = sdp->host->opt_sectors << SECTOR_SHIFT;
+ lim->io_opt = sdp->host->opt_sectors << SECTOR_SHIFT;
if (sd_validate_opt_xfer_size(sdkp, dev_max)) {
- lim.io_opt = min_not_zero(lim.io_opt,
+ lim->io_opt = min_not_zero(lim->io_opt,
logical_to_bytes(sdp, sdkp->opt_xfer_blocks));
}
sdkp->first_scan = 0;
set_capacity_and_notify(disk, logical_to_sectors(sdp, sdkp->capacity));
- sd_config_write_same(sdkp, &lim);
- kfree(buffer);
+ sd_config_write_same(sdkp, lim);
- err = queue_limits_commit_update_frozen(sdkp->disk->queue, &lim);
+ err = queue_limits_commit_update_frozen(sdkp->disk->queue, lim);
if (err)
- return err;
+ goto out;
/*
* Query concurrent positioning ranges after
@@ -3820,7 +3820,9 @@ static int sd_revalidate_disk(struct gendisk *disk)
set_capacity_and_notify(disk, 0);
out:
- return 0;
+ kfree(buffer);
+ kfree(lim);
+
}
/**
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 3c02a5f7b5f3..4c62c597c7be 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1235,8 +1235,7 @@ sg_vma_fault(struct vm_fault *vmf)
len = vma->vm_end - sa;
len = (len < length) ? len : length;
if (offset < len) {
- struct page *page = nth_page(rsv_schp->pages[k],
- offset >> PAGE_SHIFT);
+ struct page *page = rsv_schp->pages[k] + (offset >> PAGE_SHIFT);
get_page(page); /* increment page count */
vmf->page = page;
return 0; /* success */
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 125944941601..03c97e60d36f 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -20,6 +20,7 @@
#include <linux/reboot.h>
#include <linux/cciss_ioctl.h>
#include <linux/crash_dump.h>
+#include <linux/string.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
@@ -6774,17 +6775,15 @@ static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
}
if (iocommand.buf_size > 0) {
- kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL);
- if (!kernel_buffer)
- return -ENOMEM;
if (iocommand.Request.Type.Direction & XFER_WRITE) {
- if (copy_from_user(kernel_buffer, iocommand.buf,
- iocommand.buf_size)) {
- rc = -EFAULT;
- goto out;
- }
+ kernel_buffer = memdup_user(iocommand.buf,
+ iocommand.buf_size);
+ if (IS_ERR(kernel_buffer))
+ return PTR_ERR(kernel_buffer);
} else {
- memset(kernel_buffer, 0, iocommand.buf_size);
+ kernel_buffer = kzalloc(iocommand.buf_size, GFP_KERNEL);
+ if (!kernel_buffer)
+ return -ENOMEM;
}
}
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index b17796d5ee66..add13e306898 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -475,13 +475,21 @@ static blk_status_t sr_init_command(struct scsi_cmnd *SCpnt)
static int sr_revalidate_disk(struct scsi_cd *cd)
{
+ struct request_queue *q = cd->device->request_queue;
struct scsi_sense_hdr sshdr;
+ struct queue_limits lim;
+ int sector_size;
/* if the unit is not ready, nothing more to do */
if (scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr))
return 0;
sr_cd_check(&cd->cdi);
- return get_sectorsize(cd);
+ sector_size = get_sectorsize(cd);
+
+ lim = queue_limits_start_update(q);
+ lim.logical_block_size = sector_size;
+ lim.features |= BLK_FEAT_ROTATIONAL;
+ return queue_limits_commit_update_frozen(q, &lim);
}
static int sr_block_open(struct gendisk *disk, blk_mode_t mode)
@@ -721,10 +729,8 @@ fail:
static int get_sectorsize(struct scsi_cd *cd)
{
- struct request_queue *q = cd->device->request_queue;
static const u8 cmd[10] = { READ_CAPACITY };
unsigned char buffer[8] = { };
- struct queue_limits lim;
int err;
int sector_size;
struct scsi_failure failure_defs[] = {
@@ -795,9 +801,7 @@ static int get_sectorsize(struct scsi_cd *cd)
set_capacity(cd->disk, cd->capacity);
}
- lim = queue_limits_start_update(q);
- lim.logical_block_size = sector_size;
- return queue_limits_commit_update_frozen(q, &lim);
+ return sector_size;
}
static int get_capabilities(struct scsi_cd *cd)
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 63ed7f9aaa93..d8ad02c29320 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -1457,7 +1457,7 @@ static void stex_reset_work(struct work_struct *work)
}
static int stex_biosparam(struct scsi_device *sdev,
- struct block_device *bdev, sector_t capacity, int geom[])
+ struct gendisk *unused, sector_t capacity, int geom[])
{
int heads = 255, sectors = 63;
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index d9e59204a9c3..567f9cd29102 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1615,7 +1615,7 @@ static int storvsc_sdev_configure(struct scsi_device *sdevice,
return 0;
}
-static int storvsc_get_chs(struct scsi_device *sdev, struct block_device * bdev,
+static int storvsc_get_chs(struct scsi_device *sdev, struct gendisk *unused,
sector_t capacity, int *info)
{
sector_t nsect = capacity;
@@ -1941,8 +1941,8 @@ static int storvsc_probe(struct hv_device *device,
int num_present_cpus = num_present_cpus();
struct Scsi_Host *host;
struct hv_host_device *host_dev;
- bool dev_is_ide = ((dev_id->driver_data == IDE_GUID) ? true : false);
- bool is_fc = ((dev_id->driver_data == SFC_GUID) ? true : false);
+ bool dev_is_ide = dev_id->driver_data == IDE_GUID;
+ bool is_fc = dev_id->driver_data == SFC_GUID;
int target = 0;
struct storvsc_device *stor_device;
int max_sub_channels = 0;
diff --git a/drivers/scsi/wd719x.c b/drivers/scsi/wd719x.c
index 5a380eecfc75..0c9987828774 100644
--- a/drivers/scsi/wd719x.c
+++ b/drivers/scsi/wd719x.c
@@ -544,7 +544,7 @@ static int wd719x_host_reset(struct scsi_cmnd *cmd)
return wd719x_chip_init(wd) == 0 ? SUCCESS : FAILED;
}
-static int wd719x_biosparam(struct scsi_device *sdev, struct block_device *bdev,
+static int wd719x_biosparam(struct scsi_device *sdev, struct gendisk *unused,
sector_t capacity, int geom[])
{
if (capacity >= 0x200000) {
diff --git a/drivers/siox/siox-bus-gpio.c b/drivers/siox/siox-bus-gpio.c
index d6f936464063..413d5f92311c 100644
--- a/drivers/siox/siox-bus-gpio.c
+++ b/drivers/siox/siox-bus-gpio.c
@@ -93,8 +93,7 @@ static int siox_gpio_probe(struct platform_device *pdev)
smaster = devm_siox_master_alloc(dev, sizeof(*ddata));
if (!smaster)
- return dev_err_probe(dev, -ENOMEM,
- "failed to allocate siox master\n");
+ return -ENOMEM;
platform_set_drvdata(pdev, smaster);
ddata = siox_master_get_devdata(smaster);
diff --git a/drivers/slimbus/Kconfig b/drivers/slimbus/Kconfig
index a0fdf9d792cb..60b0dcbc0ebb 100644
--- a/drivers/slimbus/Kconfig
+++ b/drivers/slimbus/Kconfig
@@ -13,13 +13,6 @@ menuconfig SLIMBUS
if SLIMBUS
# SLIMbus controllers
-config SLIM_QCOM_CTRL
- tristate "Qualcomm SLIMbus Manager Component"
- depends on HAS_IOMEM
- help
- Select driver if Qualcomm's SLIMbus Manager Component is
- programmed using Linux kernel.
-
config SLIM_QCOM_NGD_CTRL
tristate "Qualcomm SLIMbus Satellite Non-Generic Device Component"
depends on HAS_IOMEM && DMA_ENGINE && NET
diff --git a/drivers/slimbus/Makefile b/drivers/slimbus/Makefile
index d9aa011b6804..3cfb41c3b592 100644
--- a/drivers/slimbus/Makefile
+++ b/drivers/slimbus/Makefile
@@ -6,8 +6,5 @@ obj-$(CONFIG_SLIMBUS) += slimbus.o
slimbus-y := core.o messaging.o sched.o stream.o
#Controllers
-obj-$(CONFIG_SLIM_QCOM_CTRL) += slim-qcom-ctrl.o
-slim-qcom-ctrl-y := qcom-ctrl.o
-
obj-$(CONFIG_SLIM_QCOM_NGD_CTRL) += slim-qcom-ngd-ctrl.o
slim-qcom-ngd-ctrl-y := qcom-ngd-ctrl.o
diff --git a/drivers/slimbus/messaging.c b/drivers/slimbus/messaging.c
index 6f01d944f9c6..e2dbe4a66b70 100644
--- a/drivers/slimbus/messaging.c
+++ b/drivers/slimbus/messaging.c
@@ -143,8 +143,6 @@ int slim_do_transfer(struct slim_controller *ctrl, struct slim_msg_txn *txn)
if (!txn->msg->comp)
txn->comp = &done;
- else
- txn->comp = txn->comp;
}
ret = ctrl->xfer_msg(ctrl, txn);
@@ -224,7 +222,7 @@ static u16 slim_slicesize(int code)
/**
* slim_xfer_msg() - Transfer a value info message on slim device
*
- * @sbdev: slim device to which this msg has to be transfered
+ * @sbdev: slim device to which this msg has to be transferred
* @msg: value info message pointer
* @mc: message code of the message
*
diff --git a/drivers/slimbus/qcom-ctrl.c b/drivers/slimbus/qcom-ctrl.c
deleted file mode 100644
index ab344f7472f2..000000000000
--- a/drivers/slimbus/qcom-ctrl.c
+++ /dev/null
@@ -1,735 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (c) 2011-2017, The Linux Foundation
- */
-
-#include <linux/irq.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/io.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/clk.h>
-#include <linux/of.h>
-#include <linux/pm_runtime.h>
-#include "slimbus.h"
-
-/* Manager registers */
-#define MGR_CFG 0x200
-#define MGR_STATUS 0x204
-#define MGR_INT_EN 0x210
-#define MGR_INT_STAT 0x214
-#define MGR_INT_CLR 0x218
-#define MGR_TX_MSG 0x230
-#define MGR_RX_MSG 0x270
-#define MGR_IE_STAT 0x2F0
-#define MGR_VE_STAT 0x300
-#define MGR_CFG_ENABLE 1
-
-/* Framer registers */
-#define FRM_CFG 0x400
-#define FRM_STAT 0x404
-#define FRM_INT_EN 0x410
-#define FRM_INT_STAT 0x414
-#define FRM_INT_CLR 0x418
-#define FRM_WAKEUP 0x41C
-#define FRM_CLKCTL_DONE 0x420
-#define FRM_IE_STAT 0x430
-#define FRM_VE_STAT 0x440
-
-/* Interface registers */
-#define INTF_CFG 0x600
-#define INTF_STAT 0x604
-#define INTF_INT_EN 0x610
-#define INTF_INT_STAT 0x614
-#define INTF_INT_CLR 0x618
-#define INTF_IE_STAT 0x630
-#define INTF_VE_STAT 0x640
-
-/* Interrupt status bits */
-#define MGR_INT_TX_NACKED_2 BIT(25)
-#define MGR_INT_MSG_BUF_CONTE BIT(26)
-#define MGR_INT_RX_MSG_RCVD BIT(30)
-#define MGR_INT_TX_MSG_SENT BIT(31)
-
-/* Framer config register settings */
-#define FRM_ACTIVE 1
-#define CLK_GEAR 7
-#define ROOT_FREQ 11
-#define REF_CLK_GEAR 15
-#define INTR_WAKE 19
-
-#define SLIM_MSG_ASM_FIRST_WORD(l, mt, mc, dt, ad) \
- ((l) | ((mt) << 5) | ((mc) << 8) | ((dt) << 15) | ((ad) << 16))
-
-#define SLIM_ROOT_FREQ 24576000
-#define QCOM_SLIM_AUTOSUSPEND 1000
-
-/* MAX message size over control channel */
-#define SLIM_MSGQ_BUF_LEN 40
-#define QCOM_TX_MSGS 2
-#define QCOM_RX_MSGS 8
-#define QCOM_BUF_ALLOC_RETRIES 10
-
-#define CFG_PORT(r, v) ((v) ? CFG_PORT_V2(r) : CFG_PORT_V1(r))
-
-/* V2 Component registers */
-#define CFG_PORT_V2(r) ((r ## _V2))
-#define COMP_CFG_V2 4
-#define COMP_TRUST_CFG_V2 0x3000
-
-/* V1 Component registers */
-#define CFG_PORT_V1(r) ((r ## _V1))
-#define COMP_CFG_V1 0
-#define COMP_TRUST_CFG_V1 0x14
-
-/* Resource group info for manager, and non-ported generic device-components */
-#define EE_MGR_RSC_GRP (1 << 10)
-#define EE_NGD_2 (2 << 6)
-#define EE_NGD_1 0
-
-struct slim_ctrl_buf {
- void *base;
- spinlock_t lock;
- int head;
- int tail;
- int sl_sz;
- int n;
-};
-
-struct qcom_slim_ctrl {
- struct slim_controller ctrl;
- struct slim_framer framer;
- struct device *dev;
- void __iomem *base;
- void __iomem *slew_reg;
-
- struct slim_ctrl_buf rx;
- struct slim_ctrl_buf tx;
-
- struct completion **wr_comp;
- int irq;
- struct workqueue_struct *rxwq;
- struct work_struct wd;
- struct clk *rclk;
- struct clk *hclk;
-};
-
-static void qcom_slim_queue_tx(struct qcom_slim_ctrl *ctrl, void *buf,
- u8 len, u32 tx_reg)
-{
- int count = (len + 3) >> 2;
-
- __iowrite32_copy(ctrl->base + tx_reg, buf, count);
-
- /* Ensure Oder of subsequent writes */
- mb();
-}
-
-static void *slim_alloc_rxbuf(struct qcom_slim_ctrl *ctrl)
-{
- unsigned long flags;
- int idx;
-
- spin_lock_irqsave(&ctrl->rx.lock, flags);
- if ((ctrl->rx.tail + 1) % ctrl->rx.n == ctrl->rx.head) {
- spin_unlock_irqrestore(&ctrl->rx.lock, flags);
- dev_err(ctrl->dev, "RX QUEUE full!");
- return NULL;
- }
- idx = ctrl->rx.tail;
- ctrl->rx.tail = (ctrl->rx.tail + 1) % ctrl->rx.n;
- spin_unlock_irqrestore(&ctrl->rx.lock, flags);
-
- return ctrl->rx.base + (idx * ctrl->rx.sl_sz);
-}
-
-static void slim_ack_txn(struct qcom_slim_ctrl *ctrl, int err)
-{
- struct completion *comp;
- unsigned long flags;
- int idx;
-
- spin_lock_irqsave(&ctrl->tx.lock, flags);
- idx = ctrl->tx.head;
- ctrl->tx.head = (ctrl->tx.head + 1) % ctrl->tx.n;
- spin_unlock_irqrestore(&ctrl->tx.lock, flags);
-
- comp = ctrl->wr_comp[idx];
- ctrl->wr_comp[idx] = NULL;
-
- complete(comp);
-}
-
-static irqreturn_t qcom_slim_handle_tx_irq(struct qcom_slim_ctrl *ctrl,
- u32 stat)
-{
- int err = 0;
-
- if (stat & MGR_INT_TX_MSG_SENT)
- writel_relaxed(MGR_INT_TX_MSG_SENT,
- ctrl->base + MGR_INT_CLR);
-
- if (stat & MGR_INT_TX_NACKED_2) {
- u32 mgr_stat = readl_relaxed(ctrl->base + MGR_STATUS);
- u32 mgr_ie_stat = readl_relaxed(ctrl->base + MGR_IE_STAT);
- u32 frm_stat = readl_relaxed(ctrl->base + FRM_STAT);
- u32 frm_cfg = readl_relaxed(ctrl->base + FRM_CFG);
- u32 frm_intr_stat = readl_relaxed(ctrl->base + FRM_INT_STAT);
- u32 frm_ie_stat = readl_relaxed(ctrl->base + FRM_IE_STAT);
- u32 intf_stat = readl_relaxed(ctrl->base + INTF_STAT);
- u32 intf_intr_stat = readl_relaxed(ctrl->base + INTF_INT_STAT);
- u32 intf_ie_stat = readl_relaxed(ctrl->base + INTF_IE_STAT);
-
- writel_relaxed(MGR_INT_TX_NACKED_2, ctrl->base + MGR_INT_CLR);
-
- dev_err(ctrl->dev, "TX Nack MGR:int:0x%x, stat:0x%x\n",
- stat, mgr_stat);
- dev_err(ctrl->dev, "TX Nack MGR:ie:0x%x\n", mgr_ie_stat);
- dev_err(ctrl->dev, "TX Nack FRM:int:0x%x, stat:0x%x\n",
- frm_intr_stat, frm_stat);
- dev_err(ctrl->dev, "TX Nack FRM:cfg:0x%x, ie:0x%x\n",
- frm_cfg, frm_ie_stat);
- dev_err(ctrl->dev, "TX Nack INTF:intr:0x%x, stat:0x%x\n",
- intf_intr_stat, intf_stat);
- dev_err(ctrl->dev, "TX Nack INTF:ie:0x%x\n",
- intf_ie_stat);
- err = -ENOTCONN;
- }
-
- slim_ack_txn(ctrl, err);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t qcom_slim_handle_rx_irq(struct qcom_slim_ctrl *ctrl,
- u32 stat)
-{
- u32 *rx_buf, pkt[10];
- bool q_rx = false;
- u8 mc, mt, len;
-
- pkt[0] = readl_relaxed(ctrl->base + MGR_RX_MSG);
- mt = SLIM_HEADER_GET_MT(pkt[0]);
- len = SLIM_HEADER_GET_RL(pkt[0]);
- mc = SLIM_HEADER_GET_MC(pkt[0]>>8);
-
- /*
- * this message cannot be handled by ISR, so
- * let work-queue handle it
- */
- if (mt == SLIM_MSG_MT_CORE && mc == SLIM_MSG_MC_REPORT_PRESENT) {
- rx_buf = (u32 *)slim_alloc_rxbuf(ctrl);
- if (!rx_buf) {
- dev_err(ctrl->dev, "dropping RX:0x%x due to RX full\n",
- pkt[0]);
- goto rx_ret_irq;
- }
- rx_buf[0] = pkt[0];
-
- } else {
- rx_buf = pkt;
- }
-
- __ioread32_copy(rx_buf + 1, ctrl->base + MGR_RX_MSG + 4,
- DIV_ROUND_UP(len, 4));
-
- switch (mc) {
-
- case SLIM_MSG_MC_REPORT_PRESENT:
- q_rx = true;
- break;
- case SLIM_MSG_MC_REPLY_INFORMATION:
- case SLIM_MSG_MC_REPLY_VALUE:
- slim_msg_response(&ctrl->ctrl, (u8 *)(rx_buf + 1),
- (u8)(*rx_buf >> 24), (len - 4));
- break;
- default:
- dev_err(ctrl->dev, "unsupported MC,%x MT:%x\n",
- mc, mt);
- break;
- }
-rx_ret_irq:
- writel(MGR_INT_RX_MSG_RCVD, ctrl->base +
- MGR_INT_CLR);
- if (q_rx)
- queue_work(ctrl->rxwq, &ctrl->wd);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t qcom_slim_interrupt(int irq, void *d)
-{
- struct qcom_slim_ctrl *ctrl = d;
- u32 stat = readl_relaxed(ctrl->base + MGR_INT_STAT);
- int ret = IRQ_NONE;
-
- if (stat & MGR_INT_TX_MSG_SENT || stat & MGR_INT_TX_NACKED_2)
- ret = qcom_slim_handle_tx_irq(ctrl, stat);
-
- if (stat & MGR_INT_RX_MSG_RCVD)
- ret = qcom_slim_handle_rx_irq(ctrl, stat);
-
- return ret;
-}
-
-static int qcom_clk_pause_wakeup(struct slim_controller *sctrl)
-{
- struct qcom_slim_ctrl *ctrl = dev_get_drvdata(sctrl->dev);
-
- clk_prepare_enable(ctrl->hclk);
- clk_prepare_enable(ctrl->rclk);
- enable_irq(ctrl->irq);
-
- writel_relaxed(1, ctrl->base + FRM_WAKEUP);
- /* Make sure framer wakeup write goes through before ISR fires */
- mb();
- /*
- * HW Workaround: Currently, slave is reporting lost-sync messages
- * after SLIMbus comes out of clock pause.
- * Transaction with slave fail before slave reports that message
- * Give some time for that report to come
- * SLIMbus wakes up in clock gear 10 at 24.576MHz. With each superframe
- * being 250 usecs, we wait for 5-10 superframes here to ensure
- * we get the message
- */
- usleep_range(1250, 2500);
- return 0;
-}
-
-static void *slim_alloc_txbuf(struct qcom_slim_ctrl *ctrl,
- struct slim_msg_txn *txn,
- struct completion *done)
-{
- unsigned long flags;
- int idx;
-
- spin_lock_irqsave(&ctrl->tx.lock, flags);
- if (((ctrl->tx.head + 1) % ctrl->tx.n) == ctrl->tx.tail) {
- spin_unlock_irqrestore(&ctrl->tx.lock, flags);
- dev_err(ctrl->dev, "controller TX buf unavailable");
- return NULL;
- }
- idx = ctrl->tx.tail;
- ctrl->wr_comp[idx] = done;
- ctrl->tx.tail = (ctrl->tx.tail + 1) % ctrl->tx.n;
-
- spin_unlock_irqrestore(&ctrl->tx.lock, flags);
-
- return ctrl->tx.base + (idx * ctrl->tx.sl_sz);
-}
-
-
-static int qcom_xfer_msg(struct slim_controller *sctrl,
- struct slim_msg_txn *txn)
-{
- struct qcom_slim_ctrl *ctrl = dev_get_drvdata(sctrl->dev);
- DECLARE_COMPLETION_ONSTACK(done);
- void *pbuf = slim_alloc_txbuf(ctrl, txn, &done);
- unsigned long ms = txn->rl + HZ;
- u8 *puc;
- int ret = 0, retries = QCOM_BUF_ALLOC_RETRIES;
- unsigned long time_left;
- u8 la = txn->la;
- u32 *head;
- /* HW expects length field to be excluded */
- txn->rl--;
-
- /* spin till buffer is made available */
- if (!pbuf) {
- while (retries--) {
- usleep_range(10000, 15000);
- pbuf = slim_alloc_txbuf(ctrl, txn, &done);
- if (pbuf)
- break;
- }
- }
-
- if (retries < 0 && !pbuf)
- return -ENOMEM;
-
- puc = (u8 *)pbuf;
- head = (u32 *)pbuf;
-
- if (txn->dt == SLIM_MSG_DEST_LOGICALADDR) {
- *head = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt,
- txn->mc, 0, la);
- puc += 3;
- } else {
- *head = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt,
- txn->mc, 1, la);
- puc += 2;
- }
-
- if (slim_tid_txn(txn->mt, txn->mc))
- *(puc++) = txn->tid;
-
- if (slim_ec_txn(txn->mt, txn->mc)) {
- *(puc++) = (txn->ec & 0xFF);
- *(puc++) = (txn->ec >> 8) & 0xFF;
- }
-
- if (txn->msg && txn->msg->wbuf)
- memcpy(puc, txn->msg->wbuf, txn->msg->num_bytes);
-
- qcom_slim_queue_tx(ctrl, head, txn->rl, MGR_TX_MSG);
- time_left = wait_for_completion_timeout(&done, msecs_to_jiffies(ms));
-
- if (!time_left) {
- dev_err(ctrl->dev, "TX timed out:MC:0x%x,mt:0x%x", txn->mc,
- txn->mt);
- ret = -ETIMEDOUT;
- }
-
- return ret;
-
-}
-
-static int qcom_set_laddr(struct slim_controller *sctrl,
- struct slim_eaddr *ead, u8 laddr)
-{
- struct qcom_slim_ctrl *ctrl = dev_get_drvdata(sctrl->dev);
- struct {
- __be16 manf_id;
- __be16 prod_code;
- u8 dev_index;
- u8 instance;
- u8 laddr;
- } __packed p;
- struct slim_val_inf msg = {0};
- DEFINE_SLIM_EDEST_TXN(txn, SLIM_MSG_MC_ASSIGN_LOGICAL_ADDRESS,
- 10, laddr, &msg);
- int ret;
-
- p.manf_id = cpu_to_be16(ead->manf_id);
- p.prod_code = cpu_to_be16(ead->prod_code);
- p.dev_index = ead->dev_index;
- p.instance = ead->instance;
- p.laddr = laddr;
-
- msg.wbuf = (void *)&p;
- msg.num_bytes = 7;
- ret = slim_do_transfer(&ctrl->ctrl, &txn);
-
- if (ret)
- dev_err(ctrl->dev, "set LA:0x%x failed:ret:%d\n",
- laddr, ret);
- return ret;
-}
-
-static int slim_get_current_rxbuf(struct qcom_slim_ctrl *ctrl, void *buf)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ctrl->rx.lock, flags);
- if (ctrl->rx.tail == ctrl->rx.head) {
- spin_unlock_irqrestore(&ctrl->rx.lock, flags);
- return -ENODATA;
- }
- memcpy(buf, ctrl->rx.base + (ctrl->rx.head * ctrl->rx.sl_sz),
- ctrl->rx.sl_sz);
-
- ctrl->rx.head = (ctrl->rx.head + 1) % ctrl->rx.n;
- spin_unlock_irqrestore(&ctrl->rx.lock, flags);
-
- return 0;
-}
-
-static void qcom_slim_rxwq(struct work_struct *work)
-{
- u8 buf[SLIM_MSGQ_BUF_LEN];
- u8 mc, mt;
- int ret;
- struct qcom_slim_ctrl *ctrl = container_of(work, struct qcom_slim_ctrl,
- wd);
-
- while ((slim_get_current_rxbuf(ctrl, buf)) != -ENODATA) {
- mt = SLIM_HEADER_GET_MT(buf[0]);
- mc = SLIM_HEADER_GET_MC(buf[1]);
- if (mt == SLIM_MSG_MT_CORE &&
- mc == SLIM_MSG_MC_REPORT_PRESENT) {
- struct slim_eaddr ea;
- u8 laddr;
-
- ea.manf_id = be16_to_cpup((__be16 *)&buf[2]);
- ea.prod_code = be16_to_cpup((__be16 *)&buf[4]);
- ea.dev_index = buf[6];
- ea.instance = buf[7];
-
- ret = slim_device_report_present(&ctrl->ctrl, &ea,
- &laddr);
- if (ret < 0)
- dev_err(ctrl->dev, "assign laddr failed:%d\n",
- ret);
- } else {
- dev_err(ctrl->dev, "unexpected message:mc:%x, mt:%x\n",
- mc, mt);
- }
- }
-}
-
-static void qcom_slim_prg_slew(struct platform_device *pdev,
- struct qcom_slim_ctrl *ctrl)
-{
- if (!ctrl->slew_reg) {
- /* SLEW RATE register for this SLIMbus */
- ctrl->slew_reg = devm_platform_ioremap_resource_byname(pdev, "slew");
- if (IS_ERR(ctrl->slew_reg))
- return;
- }
-
- writel_relaxed(1, ctrl->slew_reg);
- /* Make sure SLIMbus-slew rate enabling goes through */
- wmb();
-}
-
-static int qcom_slim_probe(struct platform_device *pdev)
-{
- struct qcom_slim_ctrl *ctrl;
- struct slim_controller *sctrl;
- int ret, ver;
-
- ctrl = devm_kzalloc(&pdev->dev, sizeof(*ctrl), GFP_KERNEL);
- if (!ctrl)
- return -ENOMEM;
-
- ctrl->hclk = devm_clk_get(&pdev->dev, "iface");
- if (IS_ERR(ctrl->hclk))
- return PTR_ERR(ctrl->hclk);
-
- ctrl->rclk = devm_clk_get(&pdev->dev, "core");
- if (IS_ERR(ctrl->rclk))
- return PTR_ERR(ctrl->rclk);
-
- ret = clk_set_rate(ctrl->rclk, SLIM_ROOT_FREQ);
- if (ret) {
- dev_err(&pdev->dev, "ref-clock set-rate failed:%d\n", ret);
- return ret;
- }
-
- ctrl->irq = platform_get_irq(pdev, 0);
- if (ctrl->irq < 0)
- return ctrl->irq;
-
- sctrl = &ctrl->ctrl;
- sctrl->dev = &pdev->dev;
- ctrl->dev = &pdev->dev;
- platform_set_drvdata(pdev, ctrl);
- dev_set_drvdata(ctrl->dev, ctrl);
-
- ctrl->base = devm_platform_ioremap_resource_byname(pdev, "ctrl");
- if (IS_ERR(ctrl->base))
- return PTR_ERR(ctrl->base);
-
- sctrl->set_laddr = qcom_set_laddr;
- sctrl->xfer_msg = qcom_xfer_msg;
- sctrl->wakeup = qcom_clk_pause_wakeup;
- ctrl->tx.n = QCOM_TX_MSGS;
- ctrl->tx.sl_sz = SLIM_MSGQ_BUF_LEN;
- ctrl->rx.n = QCOM_RX_MSGS;
- ctrl->rx.sl_sz = SLIM_MSGQ_BUF_LEN;
- ctrl->wr_comp = kcalloc(QCOM_TX_MSGS, sizeof(struct completion *),
- GFP_KERNEL);
- if (!ctrl->wr_comp)
- return -ENOMEM;
-
- spin_lock_init(&ctrl->rx.lock);
- spin_lock_init(&ctrl->tx.lock);
- INIT_WORK(&ctrl->wd, qcom_slim_rxwq);
- ctrl->rxwq = create_singlethread_workqueue("qcom_slim_rx");
- if (!ctrl->rxwq) {
- dev_err(ctrl->dev, "Failed to start Rx WQ\n");
- return -ENOMEM;
- }
-
- ctrl->framer.rootfreq = SLIM_ROOT_FREQ / 8;
- ctrl->framer.superfreq =
- ctrl->framer.rootfreq / SLIM_CL_PER_SUPERFRAME_DIV8;
- sctrl->a_framer = &ctrl->framer;
- sctrl->clkgear = SLIM_MAX_CLK_GEAR;
-
- qcom_slim_prg_slew(pdev, ctrl);
-
- ret = devm_request_irq(&pdev->dev, ctrl->irq, qcom_slim_interrupt,
- IRQF_TRIGGER_HIGH, "qcom_slim_irq", ctrl);
- if (ret) {
- dev_err(&pdev->dev, "request IRQ failed\n");
- goto err_request_irq_failed;
- }
-
- ret = clk_prepare_enable(ctrl->hclk);
- if (ret)
- goto err_hclk_enable_failed;
-
- ret = clk_prepare_enable(ctrl->rclk);
- if (ret)
- goto err_rclk_enable_failed;
-
- ctrl->tx.base = devm_kcalloc(&pdev->dev, ctrl->tx.n, ctrl->tx.sl_sz,
- GFP_KERNEL);
- if (!ctrl->tx.base) {
- ret = -ENOMEM;
- goto err;
- }
-
- ctrl->rx.base = devm_kcalloc(&pdev->dev,ctrl->rx.n, ctrl->rx.sl_sz,
- GFP_KERNEL);
- if (!ctrl->rx.base) {
- ret = -ENOMEM;
- goto err;
- }
-
- /* Register with framework before enabling frame, clock */
- ret = slim_register_controller(&ctrl->ctrl);
- if (ret) {
- dev_err(ctrl->dev, "error adding controller\n");
- goto err;
- }
-
- ver = readl_relaxed(ctrl->base);
- /* Version info in 16 MSbits */
- ver >>= 16;
- /* Component register initialization */
- writel(1, ctrl->base + CFG_PORT(COMP_CFG, ver));
- writel((EE_MGR_RSC_GRP | EE_NGD_2 | EE_NGD_1),
- ctrl->base + CFG_PORT(COMP_TRUST_CFG, ver));
-
- writel((MGR_INT_TX_NACKED_2 |
- MGR_INT_MSG_BUF_CONTE | MGR_INT_RX_MSG_RCVD |
- MGR_INT_TX_MSG_SENT), ctrl->base + MGR_INT_EN);
- writel(1, ctrl->base + MGR_CFG);
- /* Framer register initialization */
- writel((1 << INTR_WAKE) | (0xA << REF_CLK_GEAR) |
- (0xA << CLK_GEAR) | (1 << ROOT_FREQ) | (1 << FRM_ACTIVE) | 1,
- ctrl->base + FRM_CFG);
- writel(MGR_CFG_ENABLE, ctrl->base + MGR_CFG);
- writel(1, ctrl->base + INTF_CFG);
- writel(1, ctrl->base + CFG_PORT(COMP_CFG, ver));
-
- pm_runtime_use_autosuspend(&pdev->dev);
- pm_runtime_set_autosuspend_delay(&pdev->dev, QCOM_SLIM_AUTOSUSPEND);
- pm_runtime_set_active(&pdev->dev);
- pm_runtime_mark_last_busy(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
-
- dev_dbg(ctrl->dev, "QCOM SB controller is up:ver:0x%x!\n", ver);
- return 0;
-
-err:
- clk_disable_unprepare(ctrl->rclk);
-err_rclk_enable_failed:
- clk_disable_unprepare(ctrl->hclk);
-err_hclk_enable_failed:
-err_request_irq_failed:
- destroy_workqueue(ctrl->rxwq);
- return ret;
-}
-
-static void qcom_slim_remove(struct platform_device *pdev)
-{
- struct qcom_slim_ctrl *ctrl = platform_get_drvdata(pdev);
-
- pm_runtime_disable(&pdev->dev);
- slim_unregister_controller(&ctrl->ctrl);
- clk_disable_unprepare(ctrl->rclk);
- clk_disable_unprepare(ctrl->hclk);
- destroy_workqueue(ctrl->rxwq);
-}
-
-/*
- * If PM_RUNTIME is not defined, these 2 functions become helper
- * functions to be called from system suspend/resume.
- */
-#ifdef CONFIG_PM
-static int qcom_slim_runtime_suspend(struct device *device)
-{
- struct qcom_slim_ctrl *ctrl = dev_get_drvdata(device);
- int ret;
-
- dev_dbg(device, "pm_runtime: suspending...\n");
- ret = slim_ctrl_clk_pause(&ctrl->ctrl, false, SLIM_CLK_UNSPECIFIED);
- if (ret) {
- dev_err(device, "clk pause not entered:%d", ret);
- } else {
- disable_irq(ctrl->irq);
- clk_disable_unprepare(ctrl->hclk);
- clk_disable_unprepare(ctrl->rclk);
- }
- return ret;
-}
-
-static int qcom_slim_runtime_resume(struct device *device)
-{
- struct qcom_slim_ctrl *ctrl = dev_get_drvdata(device);
- int ret = 0;
-
- dev_dbg(device, "pm_runtime: resuming...\n");
- ret = slim_ctrl_clk_pause(&ctrl->ctrl, true, 0);
- if (ret)
- dev_err(device, "clk pause not exited:%d", ret);
- return ret;
-}
-#endif
-
-#ifdef CONFIG_PM_SLEEP
-static int qcom_slim_suspend(struct device *dev)
-{
- int ret = 0;
-
- if (!pm_runtime_enabled(dev) ||
- (!pm_runtime_suspended(dev))) {
- dev_dbg(dev, "system suspend");
- ret = qcom_slim_runtime_suspend(dev);
- }
-
- return ret;
-}
-
-static int qcom_slim_resume(struct device *dev)
-{
- if (!pm_runtime_enabled(dev) || !pm_runtime_suspended(dev)) {
- int ret;
-
- dev_dbg(dev, "system resume");
- ret = qcom_slim_runtime_resume(dev);
- if (!ret) {
- pm_runtime_mark_last_busy(dev);
- pm_request_autosuspend(dev);
- }
- return ret;
-
- }
- return 0;
-}
-#endif /* CONFIG_PM_SLEEP */
-
-static const struct dev_pm_ops qcom_slim_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(qcom_slim_suspend, qcom_slim_resume)
- SET_RUNTIME_PM_OPS(
- qcom_slim_runtime_suspend,
- qcom_slim_runtime_resume,
- NULL
- )
-};
-
-static const struct of_device_id qcom_slim_dt_match[] = {
- { .compatible = "qcom,slim", },
- {}
-};
-MODULE_DEVICE_TABLE(of, qcom_slim_dt_match);
-
-static struct platform_driver qcom_slim_driver = {
- .probe = qcom_slim_probe,
- .remove = qcom_slim_remove,
- .driver = {
- .name = "qcom_slim_ctrl",
- .of_match_table = qcom_slim_dt_match,
- .pm = &qcom_slim_dev_pm_ops,
- },
-};
-module_platform_driver(qcom_slim_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("Qualcomm SLIMbus Controller");
diff --git a/drivers/soc/apple/Kconfig b/drivers/soc/apple/Kconfig
index 6388cbe1e56b..ad6736889231 100644
--- a/drivers/soc/apple/Kconfig
+++ b/drivers/soc/apple/Kconfig
@@ -8,7 +8,6 @@ config APPLE_MAILBOX
tristate "Apple SoC mailboxes"
depends on PM
depends on ARCH_APPLE || (64BIT && COMPILE_TEST)
- default ARCH_APPLE
help
Apple SoCs have various co-processors required for certain
peripherals to work (NVMe, display controller, etc.). This
@@ -21,7 +20,6 @@ config APPLE_RTKIT
tristate "Apple RTKit co-processor IPC protocol"
depends on APPLE_MAILBOX
depends on ARCH_APPLE || COMPILE_TEST
- default ARCH_APPLE
help
Apple SoCs such as the M1 come with various co-processors running
their proprietary RTKit operating system. This option enables support
@@ -33,7 +31,6 @@ config APPLE_RTKIT
config APPLE_SART
tristate "Apple SART DMA address filter"
depends on ARCH_APPLE || COMPILE_TEST
- default ARCH_APPLE
help
Apple SART is a simple DMA address filter used on Apple SoCs such
as the M1. It is usually required for the NVMe coprocessor which does
diff --git a/drivers/soc/apple/mailbox.c b/drivers/soc/apple/mailbox.c
index 49a0955e82d6..8f29108dc69a 100644
--- a/drivers/soc/apple/mailbox.c
+++ b/drivers/soc/apple/mailbox.c
@@ -47,6 +47,9 @@
#define APPLE_ASC_MBOX_I2A_RECV0 0x830
#define APPLE_ASC_MBOX_I2A_RECV1 0x838
+#define APPLE_T8015_MBOX_A2I_CONTROL 0x108
+#define APPLE_T8015_MBOX_I2A_CONTROL 0x10c
+
#define APPLE_M3_MBOX_CONTROL_FULL BIT(16)
#define APPLE_M3_MBOX_CONTROL_EMPTY BIT(17)
@@ -382,6 +385,21 @@ static int apple_mbox_probe(struct platform_device *pdev)
return 0;
}
+static const struct apple_mbox_hw apple_mbox_t8015_hw = {
+ .control_full = APPLE_ASC_MBOX_CONTROL_FULL,
+ .control_empty = APPLE_ASC_MBOX_CONTROL_EMPTY,
+
+ .a2i_control = APPLE_T8015_MBOX_A2I_CONTROL,
+ .a2i_send0 = APPLE_ASC_MBOX_A2I_SEND0,
+ .a2i_send1 = APPLE_ASC_MBOX_A2I_SEND1,
+
+ .i2a_control = APPLE_T8015_MBOX_I2A_CONTROL,
+ .i2a_recv0 = APPLE_ASC_MBOX_I2A_RECV0,
+ .i2a_recv1 = APPLE_ASC_MBOX_I2A_RECV1,
+
+ .has_irq_controls = false,
+};
+
static const struct apple_mbox_hw apple_mbox_asc_hw = {
.control_full = APPLE_ASC_MBOX_CONTROL_FULL,
.control_empty = APPLE_ASC_MBOX_CONTROL_EMPTY,
@@ -418,6 +436,7 @@ static const struct apple_mbox_hw apple_mbox_m3_hw = {
static const struct of_device_id apple_mbox_of_match[] = {
{ .compatible = "apple,asc-mailbox-v4", .data = &apple_mbox_asc_hw },
+ { .compatible = "apple,t8015-asc-mailbox", .data = &apple_mbox_t8015_hw },
{ .compatible = "apple,m3-mailbox-v2", .data = &apple_mbox_m3_hw },
{}
};
diff --git a/drivers/soc/apple/sart.c b/drivers/soc/apple/sart.c
index afa111736899..4ff1942b82a7 100644
--- a/drivers/soc/apple/sart.c
+++ b/drivers/soc/apple/sart.c
@@ -25,8 +25,17 @@
#define APPLE_SART_MAX_ENTRIES 16
-/* This is probably a bitfield but the exact meaning of each bit is unknown. */
-#define APPLE_SART_FLAGS_ALLOW 0xff
+/* SARTv0 registers */
+#define APPLE_SART0_CONFIG(idx) (0x00 + 4 * (idx))
+#define APPLE_SART0_CONFIG_FLAGS GENMASK(28, 24)
+#define APPLE_SART0_CONFIG_SIZE GENMASK(18, 0)
+#define APPLE_SART0_CONFIG_SIZE_SHIFT 12
+#define APPLE_SART0_CONFIG_SIZE_MAX GENMASK(18, 0)
+
+#define APPLE_SART0_PADDR(idx) (0x40 + 4 * (idx))
+#define APPLE_SART0_PADDR_SHIFT 12
+
+#define APPLE_SART0_FLAGS_ALLOW 0xf
/* SARTv2 registers */
#define APPLE_SART2_CONFIG(idx) (0x00 + 4 * (idx))
@@ -38,6 +47,8 @@
#define APPLE_SART2_PADDR(idx) (0x40 + 4 * (idx))
#define APPLE_SART2_PADDR_SHIFT 12
+#define APPLE_SART2_FLAGS_ALLOW 0xff
+
/* SARTv3 registers */
#define APPLE_SART3_CONFIG(idx) (0x00 + 4 * (idx))
@@ -48,11 +59,15 @@
#define APPLE_SART3_SIZE_SHIFT 12
#define APPLE_SART3_SIZE_MAX GENMASK(29, 0)
+#define APPLE_SART3_FLAGS_ALLOW 0xff
+
struct apple_sart_ops {
void (*get_entry)(struct apple_sart *sart, int index, u8 *flags,
phys_addr_t *paddr, size_t *size);
void (*set_entry)(struct apple_sart *sart, int index, u8 flags,
phys_addr_t paddr_shifted, size_t size_shifted);
+ /* This is probably a bitfield but the exact meaning of each bit is unknown. */
+ unsigned int flags_allow;
unsigned int size_shift;
unsigned int paddr_shift;
size_t size_max;
@@ -68,6 +83,39 @@ struct apple_sart {
unsigned long used_entries;
};
+static void sart0_get_entry(struct apple_sart *sart, int index, u8 *flags,
+ phys_addr_t *paddr, size_t *size)
+{
+ u32 cfg = readl(sart->regs + APPLE_SART0_CONFIG(index));
+ phys_addr_t paddr_ = readl(sart->regs + APPLE_SART0_PADDR(index));
+ size_t size_ = FIELD_GET(APPLE_SART0_CONFIG_SIZE, cfg);
+
+ *flags = FIELD_GET(APPLE_SART0_CONFIG_FLAGS, cfg);
+ *size = size_ << APPLE_SART0_CONFIG_SIZE_SHIFT;
+ *paddr = paddr_ << APPLE_SART0_PADDR_SHIFT;
+}
+
+static void sart0_set_entry(struct apple_sart *sart, int index, u8 flags,
+ phys_addr_t paddr_shifted, size_t size_shifted)
+{
+ u32 cfg;
+
+ cfg = FIELD_PREP(APPLE_SART0_CONFIG_FLAGS, flags);
+ cfg |= FIELD_PREP(APPLE_SART0_CONFIG_SIZE, size_shifted);
+
+ writel(paddr_shifted, sart->regs + APPLE_SART0_PADDR(index));
+ writel(cfg, sart->regs + APPLE_SART0_CONFIG(index));
+}
+
+static struct apple_sart_ops sart_ops_v0 = {
+ .get_entry = sart0_get_entry,
+ .set_entry = sart0_set_entry,
+ .flags_allow = APPLE_SART0_FLAGS_ALLOW,
+ .size_shift = APPLE_SART0_CONFIG_SIZE_SHIFT,
+ .paddr_shift = APPLE_SART0_PADDR_SHIFT,
+ .size_max = APPLE_SART0_CONFIG_SIZE_MAX,
+};
+
static void sart2_get_entry(struct apple_sart *sart, int index, u8 *flags,
phys_addr_t *paddr, size_t *size)
{
@@ -95,6 +143,7 @@ static void sart2_set_entry(struct apple_sart *sart, int index, u8 flags,
static struct apple_sart_ops sart_ops_v2 = {
.get_entry = sart2_get_entry,
.set_entry = sart2_set_entry,
+ .flags_allow = APPLE_SART2_FLAGS_ALLOW,
.size_shift = APPLE_SART2_CONFIG_SIZE_SHIFT,
.paddr_shift = APPLE_SART2_PADDR_SHIFT,
.size_max = APPLE_SART2_CONFIG_SIZE_MAX,
@@ -122,6 +171,7 @@ static void sart3_set_entry(struct apple_sart *sart, int index, u8 flags,
static struct apple_sart_ops sart_ops_v3 = {
.get_entry = sart3_get_entry,
.set_entry = sart3_set_entry,
+ .flags_allow = APPLE_SART3_FLAGS_ALLOW,
.size_shift = APPLE_SART3_SIZE_SHIFT,
.paddr_shift = APPLE_SART3_PADDR_SHIFT,
.size_max = APPLE_SART3_SIZE_MAX,
@@ -233,7 +283,7 @@ int apple_sart_add_allowed_region(struct apple_sart *sart, phys_addr_t paddr,
if (test_and_set_bit(i, &sart->used_entries))
continue;
- ret = sart_set_entry(sart, i, APPLE_SART_FLAGS_ALLOW, paddr,
+ ret = sart_set_entry(sart, i, sart->ops->flags_allow, paddr,
size);
if (ret) {
dev_dbg(sart->dev,
@@ -314,6 +364,10 @@ static const struct of_device_id apple_sart_of_match[] = {
.compatible = "apple,t8103-sart",
.data = &sart_ops_v2,
},
+ {
+ .compatible = "apple,t8015-sart",
+ .data = &sart_ops_v0,
+ },
{}
};
MODULE_DEVICE_TABLE(of, apple_sart_of_match);
diff --git a/drivers/soc/aspeed/aspeed-lpc-ctrl.c b/drivers/soc/aspeed/aspeed-lpc-ctrl.c
index ee58151bd69e..b7dbb12bd095 100644
--- a/drivers/soc/aspeed/aspeed-lpc-ctrl.c
+++ b/drivers/soc/aspeed/aspeed-lpc-ctrl.c
@@ -10,6 +10,7 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/of_address.h>
+#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/poll.h>
#include <linux/regmap.h>
@@ -254,17 +255,8 @@ static int aspeed_lpc_ctrl_probe(struct platform_device *pdev)
dev_set_drvdata(&pdev->dev, lpc_ctrl);
/* If memory-region is described in device tree then store */
- node = of_parse_phandle(dev->of_node, "memory-region", 0);
- if (!node) {
- dev_dbg(dev, "Didn't find reserved memory\n");
- } else {
- rc = of_address_to_resource(node, 0, &resm);
- of_node_put(node);
- if (rc) {
- dev_err(dev, "Couldn't address to resource for reserved memory\n");
- return -ENXIO;
- }
-
+ rc = of_reserved_mem_region_to_resource(dev->of_node, 0, &resm);
+ if (!rc) {
lpc_ctrl->mem_size = resource_size(&resm);
lpc_ctrl->mem_base = resm.start;
diff --git a/drivers/soc/aspeed/aspeed-p2a-ctrl.c b/drivers/soc/aspeed/aspeed-p2a-ctrl.c
index 6cc943744e12..3be2e1b1085b 100644
--- a/drivers/soc/aspeed/aspeed-p2a-ctrl.c
+++ b/drivers/soc/aspeed/aspeed-p2a-ctrl.c
@@ -19,7 +19,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
-#include <linux/of_address.h>
+#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
@@ -334,7 +334,6 @@ static int aspeed_p2a_ctrl_probe(struct platform_device *pdev)
struct aspeed_p2a_ctrl *misc_ctrl;
struct device *dev;
struct resource resm;
- struct device_node *node;
int rc = 0;
dev = &pdev->dev;
@@ -346,15 +345,8 @@ static int aspeed_p2a_ctrl_probe(struct platform_device *pdev)
mutex_init(&misc_ctrl->tracking);
/* optional. */
- node = of_parse_phandle(dev->of_node, "memory-region", 0);
- if (node) {
- rc = of_address_to_resource(node, 0, &resm);
- of_node_put(node);
- if (rc) {
- dev_err(dev, "Couldn't address to resource for reserved memory\n");
- return -ENODEV;
- }
-
+ rc = of_reserved_mem_region_to_resource(dev->of_node, 0, &resm);
+ if (!rc) {
misc_ctrl->mem_size = resource_size(&resm);
misc_ctrl->mem_base = resm.start;
}
diff --git a/drivers/soc/aspeed/aspeed-socinfo.c b/drivers/soc/aspeed/aspeed-socinfo.c
index 3f759121dc00..67e9ac3d08ec 100644
--- a/drivers/soc/aspeed/aspeed-socinfo.c
+++ b/drivers/soc/aspeed/aspeed-socinfo.c
@@ -27,6 +27,10 @@ static struct {
{ "AST2620", 0x05010203 },
{ "AST2605", 0x05030103 },
{ "AST2625", 0x05030403 },
+ /* AST2700 */
+ { "AST2750", 0x06000003 },
+ { "AST2700", 0x06000103 },
+ { "AST2720", 0x06000203 },
};
static const char *siliconid_to_name(u32 siliconid)
diff --git a/drivers/soc/bcm/brcmstb/pm/pm.h b/drivers/soc/bcm/brcmstb/pm/pm.h
index 94a380470a2f..17f7a06a7a83 100644
--- a/drivers/soc/bcm/brcmstb/pm/pm.h
+++ b/drivers/soc/bcm/brcmstb/pm/pm.h
@@ -60,7 +60,7 @@
PM_DEEP_STANDBY | \
PM_PLL_PWRDOWN | PM_PWR_DOWN)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#ifndef CONFIG_MIPS
extern const unsigned long brcmstb_pm_do_s2_sz;
diff --git a/drivers/soc/fsl/qbman/qman_test_stash.c b/drivers/soc/fsl/qbman/qman_test_stash.c
index f4d3c2146f4f..6f7597950aa3 100644
--- a/drivers/soc/fsl/qbman/qman_test_stash.c
+++ b/drivers/soc/fsl/qbman/qman_test_stash.c
@@ -103,7 +103,7 @@ static int on_all_cpus(int (*fn)(void))
{
int cpu;
- for_each_cpu(cpu, cpu_online_mask) {
+ for_each_online_cpu(cpu) {
struct bstrap bstrap = {
.fn = fn,
.started = ATOMIC_INIT(0)
diff --git a/drivers/soc/fsl/qe/gpio.c b/drivers/soc/fsl/qe/gpio.c
index 710a3a03758b..c54154b404df 100644
--- a/drivers/soc/fsl/qe/gpio.c
+++ b/drivers/soc/fsl/qe/gpio.c
@@ -12,18 +12,19 @@
#include <linux/spinlock.h>
#include <linux/err.h>
#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/gpio/legacy-of-mm-gpiochip.h>
#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
#include <linux/slab.h>
#include <linux/export.h>
-#include <linux/property.h>
+#include <linux/platform_device.h>
#include <soc/fsl/qe/qe.h>
+#define PIN_MASK(gpio) (1UL << (QE_PIO_PINS - 1 - (gpio)))
+
struct qe_gpio_chip {
- struct of_mm_gpio_chip mm_gc;
+ struct gpio_chip gc;
+ void __iomem *regs;
spinlock_t lock;
/* shadowed data register to clear/set bits safely */
@@ -33,11 +34,9 @@ struct qe_gpio_chip {
struct qe_pio_regs saved_regs;
};
-static void qe_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
+static void qe_gpio_save_regs(struct qe_gpio_chip *qe_gc)
{
- struct qe_gpio_chip *qe_gc =
- container_of(mm_gc, struct qe_gpio_chip, mm_gc);
- struct qe_pio_regs __iomem *regs = mm_gc->regs;
+ struct qe_pio_regs __iomem *regs = qe_gc->regs;
qe_gc->cpdata = ioread32be(&regs->cpdata);
qe_gc->saved_regs.cpdata = qe_gc->cpdata;
@@ -50,20 +49,19 @@ static void qe_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
static int qe_gpio_get(struct gpio_chip *gc, unsigned int gpio)
{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
- struct qe_pio_regs __iomem *regs = mm_gc->regs;
- u32 pin_mask = 1 << (QE_PIO_PINS - 1 - gpio);
+ struct qe_gpio_chip *qe_gc = gpiochip_get_data(gc);
+ struct qe_pio_regs __iomem *regs = qe_gc->regs;
+ u32 pin_mask = PIN_MASK(gpio);
return !!(ioread32be(&regs->cpdata) & pin_mask);
}
static int qe_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct qe_gpio_chip *qe_gc = gpiochip_get_data(gc);
- struct qe_pio_regs __iomem *regs = mm_gc->regs;
+ struct qe_pio_regs __iomem *regs = qe_gc->regs;
unsigned long flags;
- u32 pin_mask = 1 << (QE_PIO_PINS - 1 - gpio);
+ u32 pin_mask = PIN_MASK(gpio);
spin_lock_irqsave(&qe_gc->lock, flags);
@@ -82,9 +80,8 @@ static int qe_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
static int qe_gpio_set_multiple(struct gpio_chip *gc,
unsigned long *mask, unsigned long *bits)
{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct qe_gpio_chip *qe_gc = gpiochip_get_data(gc);
- struct qe_pio_regs __iomem *regs = mm_gc->regs;
+ struct qe_pio_regs __iomem *regs = qe_gc->regs;
unsigned long flags;
int i;
@@ -95,9 +92,9 @@ static int qe_gpio_set_multiple(struct gpio_chip *gc,
break;
if (__test_and_clear_bit(i, mask)) {
if (test_bit(i, bits))
- qe_gc->cpdata |= (1U << (QE_PIO_PINS - 1 - i));
+ qe_gc->cpdata |= PIN_MASK(i);
else
- qe_gc->cpdata &= ~(1U << (QE_PIO_PINS - 1 - i));
+ qe_gc->cpdata &= ~PIN_MASK(i);
}
}
@@ -110,13 +107,12 @@ static int qe_gpio_set_multiple(struct gpio_chip *gc,
static int qe_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct qe_gpio_chip *qe_gc = gpiochip_get_data(gc);
unsigned long flags;
spin_lock_irqsave(&qe_gc->lock, flags);
- __par_io_config_pin(mm_gc->regs, gpio, QE_PIO_DIR_IN, 0, 0, 0);
+ __par_io_config_pin(qe_gc->regs, gpio, QE_PIO_DIR_IN, 0, 0, 0);
spin_unlock_irqrestore(&qe_gc->lock, flags);
@@ -125,7 +121,6 @@ static int qe_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
static int qe_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct qe_gpio_chip *qe_gc = gpiochip_get_data(gc);
unsigned long flags;
@@ -133,7 +128,7 @@ static int qe_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
spin_lock_irqsave(&qe_gc->lock, flags);
- __par_io_config_pin(mm_gc->regs, gpio, QE_PIO_DIR_OUT, 0, 0, 0);
+ __par_io_config_pin(qe_gc->regs, gpio, QE_PIO_DIR_OUT, 0, 0, 0);
spin_unlock_irqrestore(&qe_gc->lock, flags);
@@ -239,7 +234,7 @@ EXPORT_SYMBOL(qe_pin_free);
void qe_pin_set_dedicated(struct qe_pin *qe_pin)
{
struct qe_gpio_chip *qe_gc = qe_pin->controller;
- struct qe_pio_regs __iomem *regs = qe_gc->mm_gc.regs;
+ struct qe_pio_regs __iomem *regs = qe_gc->regs;
struct qe_pio_regs *sregs = &qe_gc->saved_regs;
int pin = qe_pin->num;
u32 mask1 = 1 << (QE_PIO_PINS - (pin + 1));
@@ -268,7 +263,6 @@ void qe_pin_set_dedicated(struct qe_pin *qe_pin)
iowrite32be(qe_gc->cpdata, &regs->cpdata);
qe_clrsetbits_be32(&regs->cpodr, mask1, sregs->cpodr & mask1);
-
spin_unlock_irqrestore(&qe_gc->lock, flags);
}
EXPORT_SYMBOL(qe_pin_set_dedicated);
@@ -283,7 +277,7 @@ EXPORT_SYMBOL(qe_pin_set_dedicated);
void qe_pin_set_gpio(struct qe_pin *qe_pin)
{
struct qe_gpio_chip *qe_gc = qe_pin->controller;
- struct qe_pio_regs __iomem *regs = qe_gc->mm_gc.regs;
+ struct qe_pio_regs __iomem *regs = qe_gc->regs;
unsigned long flags;
spin_lock_irqsave(&qe_gc->lock, flags);
@@ -295,45 +289,62 @@ void qe_pin_set_gpio(struct qe_pin *qe_pin)
}
EXPORT_SYMBOL(qe_pin_set_gpio);
-static int __init qe_add_gpiochips(void)
+static int qe_gpio_probe(struct platform_device *ofdev)
{
- struct device_node *np;
-
- for_each_compatible_node(np, NULL, "fsl,mpc8323-qe-pario-bank") {
- int ret;
- struct qe_gpio_chip *qe_gc;
- struct of_mm_gpio_chip *mm_gc;
- struct gpio_chip *gc;
-
- qe_gc = kzalloc(sizeof(*qe_gc), GFP_KERNEL);
- if (!qe_gc) {
- ret = -ENOMEM;
- goto err;
- }
+ struct device *dev = &ofdev->dev;
+ struct device_node *np = dev->of_node;
+ struct qe_gpio_chip *qe_gc;
+ struct gpio_chip *gc;
- spin_lock_init(&qe_gc->lock);
-
- mm_gc = &qe_gc->mm_gc;
- gc = &mm_gc->gc;
-
- mm_gc->save_regs = qe_gpio_save_regs;
- gc->ngpio = QE_PIO_PINS;
- gc->direction_input = qe_gpio_dir_in;
- gc->direction_output = qe_gpio_dir_out;
- gc->get = qe_gpio_get;
- gc->set_rv = qe_gpio_set;
- gc->set_multiple_rv = qe_gpio_set_multiple;
-
- ret = of_mm_gpiochip_add_data(np, mm_gc, qe_gc);
- if (ret)
- goto err;
- continue;
-err:
- pr_err("%pOF: registration failed with status %d\n",
- np, ret);
- kfree(qe_gc);
- /* try others anyway */
- }
- return 0;
+ qe_gc = devm_kzalloc(dev, sizeof(*qe_gc), GFP_KERNEL);
+ if (!qe_gc)
+ return -ENOMEM;
+
+ spin_lock_init(&qe_gc->lock);
+
+ gc = &qe_gc->gc;
+
+ gc->base = -1;
+ gc->ngpio = QE_PIO_PINS;
+ gc->direction_input = qe_gpio_dir_in;
+ gc->direction_output = qe_gpio_dir_out;
+ gc->get = qe_gpio_get;
+ gc->set = qe_gpio_set;
+ gc->set_multiple = qe_gpio_set_multiple;
+ gc->parent = dev;
+ gc->owner = THIS_MODULE;
+
+ gc->label = devm_kasprintf(dev, GFP_KERNEL, "%pOF", np);
+ if (!gc->label)
+ return -ENOMEM;
+
+ qe_gc->regs = devm_of_iomap(dev, np, 0, NULL);
+ if (IS_ERR(qe_gc->regs))
+ return PTR_ERR(qe_gc->regs);
+
+ qe_gpio_save_regs(qe_gc);
+
+ return devm_gpiochip_add_data(dev, gc, qe_gc);
+}
+
+static const struct of_device_id qe_gpio_match[] = {
+ {
+ .compatible = "fsl,mpc8323-qe-pario-bank",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, qe_gpio_match);
+
+static struct platform_driver qe_gpio_driver = {
+ .probe = qe_gpio_probe,
+ .driver = {
+ .name = "qe-gpio",
+ .of_match_table = qe_gpio_match,
+ },
+};
+
+static int __init qe_gpio_init(void)
+{
+ return platform_driver_register(&qe_gpio_driver);
}
-arch_initcall(qe_add_gpiochips);
+arch_initcall(qe_gpio_init);
diff --git a/drivers/soc/fsl/qe/qmc.c b/drivers/soc/fsl/qe/qmc.c
index 36c0ccc06151..da5ea6d35618 100644
--- a/drivers/soc/fsl/qe/qmc.c
+++ b/drivers/soc/fsl/qe/qmc.c
@@ -461,9 +461,16 @@ int qmc_chan_write_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length,
ctrl = qmc_read16(&bd->cbd_sc);
if (ctrl & (QMC_BD_TX_R | QMC_BD_TX_UB)) {
- /* We are full ... */
- ret = -EBUSY;
- goto end;
+ if (!(ctrl & (QMC_BD_TX_R | QMC_BD_TX_I)) && bd == chan->txbd_done) {
+ if (ctrl & QMC_BD_TX_W)
+ chan->txbd_done = chan->txbds;
+ else
+ chan->txbd_done++;
+ } else {
+ /* We are full ... */
+ ret = -EBUSY;
+ goto end;
+ }
}
qmc_write16(&bd->cbd_datlen, length);
@@ -475,6 +482,10 @@ int qmc_chan_write_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length,
/* Activate the descriptor */
ctrl |= (QMC_BD_TX_R | QMC_BD_TX_UB);
+ if (complete)
+ ctrl |= QMC_BD_TX_I;
+ else
+ ctrl &= ~QMC_BD_TX_I;
wmb(); /* Be sure to flush the descriptor before control update */
qmc_write16(&bd->cbd_sc, ctrl);
@@ -569,9 +580,16 @@ int qmc_chan_read_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length,
ctrl = qmc_read16(&bd->cbd_sc);
if (ctrl & (QMC_BD_RX_E | QMC_BD_RX_UB)) {
- /* We are full ... */
- ret = -EBUSY;
- goto end;
+ if (!(ctrl & (QMC_BD_RX_E | QMC_BD_RX_I)) && bd == chan->rxbd_done) {
+ if (ctrl & QMC_BD_RX_W)
+ chan->rxbd_done = chan->rxbds;
+ else
+ chan->rxbd_done++;
+ } else {
+ /* We are full ... */
+ ret = -EBUSY;
+ goto end;
+ }
}
qmc_write16(&bd->cbd_datlen, 0); /* data length is updated by the QMC */
@@ -587,6 +605,10 @@ int qmc_chan_read_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length,
/* Activate the descriptor */
ctrl |= (QMC_BD_RX_E | QMC_BD_RX_UB);
+ if (complete)
+ ctrl |= QMC_BD_RX_I;
+ else
+ ctrl &= ~QMC_BD_RX_I;
wmb(); /* Be sure to flush data before descriptor activation */
qmc_write16(&bd->cbd_sc, ctrl);
@@ -1482,19 +1504,19 @@ static int qmc_setup_chan(struct qmc *qmc, struct qmc_chan *chan)
/* Init Rx BDs and set Wrap bit on last descriptor */
BUILD_BUG_ON(QMC_NB_RXBDS == 0);
- val = QMC_BD_RX_I;
for (i = 0; i < QMC_NB_RXBDS; i++) {
bd = chan->rxbds + i;
- qmc_write16(&bd->cbd_sc, val);
+ qmc_write16(&bd->cbd_sc, 0);
}
bd = chan->rxbds + QMC_NB_RXBDS - 1;
- qmc_write16(&bd->cbd_sc, val | QMC_BD_RX_W);
+ qmc_write16(&bd->cbd_sc, QMC_BD_RX_W);
/* Init Tx BDs and set Wrap bit on last descriptor */
BUILD_BUG_ON(QMC_NB_TXBDS == 0);
- val = QMC_BD_TX_I;
if (chan->mode == QMC_HDLC)
- val |= QMC_BD_TX_L | QMC_BD_TX_TC;
+ val = QMC_BD_TX_L | QMC_BD_TX_TC;
+ else
+ val = 0;
for (i = 0; i < QMC_NB_TXBDS; i++) {
bd = chan->txbds + i;
qmc_write16(&bd->cbd_sc, val);
diff --git a/drivers/soc/hisilicon/kunpeng_hccs.c b/drivers/soc/hisilicon/kunpeng_hccs.c
index 65ff45fdcac7..006fec47ea10 100644
--- a/drivers/soc/hisilicon/kunpeng_hccs.c
+++ b/drivers/soc/hisilicon/kunpeng_hccs.c
@@ -1464,7 +1464,7 @@ static ssize_t dec_lane_of_type_store(struct kobject *kobj, struct kobj_attribut
goto out;
if (!all_in_idle) {
ret = -EBUSY;
- dev_err(hdev->dev, "please don't decrese lanes on high load with %s, ret = %d.\n",
+ dev_err(hdev->dev, "please don't decrease lanes on high load with %s, ret = %d.\n",
hccs_port_type_to_name(hdev, port_type), ret);
goto out;
}
diff --git a/drivers/soc/mediatek/mtk-svs.c b/drivers/soc/mediatek/mtk-svs.c
index 7c349a94b45c..f45537546553 100644
--- a/drivers/soc/mediatek/mtk-svs.c
+++ b/drivers/soc/mediatek/mtk-svs.c
@@ -2165,10 +2165,18 @@ static struct device *svs_add_device_link(struct svs_platform *svsp,
return dev;
}
+static void svs_put_device(void *_dev)
+{
+ struct device *dev = _dev;
+
+ put_device(dev);
+}
+
static int svs_mt8192_platform_probe(struct svs_platform *svsp)
{
struct device *dev;
u32 idx;
+ int ret;
svsp->rst = devm_reset_control_get_optional(svsp->dev, "svs_rst");
if (IS_ERR(svsp->rst))
@@ -2179,6 +2187,7 @@ static int svs_mt8192_platform_probe(struct svs_platform *svsp)
if (IS_ERR(dev))
return dev_err_probe(svsp->dev, PTR_ERR(dev),
"failed to get lvts device\n");
+ put_device(dev);
for (idx = 0; idx < svsp->bank_max; idx++) {
struct svs_bank *svsb = &svsp->banks[idx];
@@ -2188,6 +2197,7 @@ static int svs_mt8192_platform_probe(struct svs_platform *svsp)
case SVSB_SWID_CPU_LITTLE:
case SVSB_SWID_CPU_BIG:
svsb->opp_dev = get_cpu_device(bdata->cpu_id);
+ get_device(svsb->opp_dev);
break;
case SVSB_SWID_CCI:
svsb->opp_dev = svs_add_device_link(svsp, "cci");
@@ -2207,6 +2217,11 @@ static int svs_mt8192_platform_probe(struct svs_platform *svsp)
return dev_err_probe(svsp->dev, PTR_ERR(svsb->opp_dev),
"failed to get OPP device for bank %d\n",
idx);
+
+ ret = devm_add_action_or_reset(svsp->dev, svs_put_device,
+ svsb->opp_dev);
+ if (ret)
+ return ret;
}
return 0;
@@ -2216,11 +2231,13 @@ static int svs_mt8183_platform_probe(struct svs_platform *svsp)
{
struct device *dev;
u32 idx;
+ int ret;
dev = svs_add_device_link(svsp, "thermal-sensor");
if (IS_ERR(dev))
return dev_err_probe(svsp->dev, PTR_ERR(dev),
"failed to get thermal device\n");
+ put_device(dev);
for (idx = 0; idx < svsp->bank_max; idx++) {
struct svs_bank *svsb = &svsp->banks[idx];
@@ -2230,6 +2247,7 @@ static int svs_mt8183_platform_probe(struct svs_platform *svsp)
case SVSB_SWID_CPU_LITTLE:
case SVSB_SWID_CPU_BIG:
svsb->opp_dev = get_cpu_device(bdata->cpu_id);
+ get_device(svsb->opp_dev);
break;
case SVSB_SWID_CCI:
svsb->opp_dev = svs_add_device_link(svsp, "cci");
@@ -2246,6 +2264,11 @@ static int svs_mt8183_platform_probe(struct svs_platform *svsp)
return dev_err_probe(svsp->dev, PTR_ERR(svsb->opp_dev),
"failed to get OPP device for bank %d\n",
idx);
+
+ ret = devm_add_action_or_reset(svsp->dev, svs_put_device,
+ svsb->opp_dev);
+ if (ret)
+ return ret;
}
return 0;
diff --git a/drivers/soc/qcom/icc-bwmon.c b/drivers/soc/qcom/icc-bwmon.c
index 3dfa448bf8cf..597f9025e422 100644
--- a/drivers/soc/qcom/icc-bwmon.c
+++ b/drivers/soc/qcom/icc-bwmon.c
@@ -656,6 +656,9 @@ static irqreturn_t bwmon_intr_thread(int irq, void *dev_id)
if (IS_ERR(target_opp) && PTR_ERR(target_opp) == -ERANGE)
target_opp = dev_pm_opp_find_bw_floor(bwmon->dev, &bw_kbps, 0);
+ if (IS_ERR(target_opp))
+ return IRQ_HANDLED;
+
bwmon->target_kbps = bw_kbps;
bw_kbps--;
diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c
index 192edc3f64dc..857ead56b37d 100644
--- a/drivers/soc/qcom/llcc-qcom.c
+++ b/drivers/soc/qcom/llcc-qcom.c
@@ -4409,7 +4409,6 @@ static struct regmap *qcom_llcc_init_mmio(struct platform_device *pdev, u8 index
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
- .fast_io = true,
};
base = devm_platform_ioremap_resource(pdev, index);
diff --git a/drivers/soc/qcom/mdt_loader.c b/drivers/soc/qcom/mdt_loader.c
index 0ca268bdf1f8..a5c80d4fcc36 100644
--- a/drivers/soc/qcom/mdt_loader.c
+++ b/drivers/soc/qcom/mdt_loader.c
@@ -39,12 +39,14 @@ static bool mdt_header_valid(const struct firmware *fw)
if (phend > fw->size)
return false;
- if (ehdr->e_shentsize != sizeof(struct elf32_shdr))
- return false;
+ if (ehdr->e_shentsize || ehdr->e_shnum) {
+ if (ehdr->e_shentsize != sizeof(struct elf32_shdr))
+ return false;
- shend = size_add(size_mul(sizeof(struct elf32_shdr), ehdr->e_shnum), ehdr->e_shoff);
- if (shend > fw->size)
- return false;
+ shend = size_add(size_mul(sizeof(struct elf32_shdr), ehdr->e_shnum), ehdr->e_shoff);
+ if (shend > fw->size)
+ return false;
+ }
return true;
}
@@ -302,7 +304,7 @@ out:
}
EXPORT_SYMBOL_GPL(qcom_mdt_pas_init);
-static bool qcom_mdt_bins_are_split(const struct firmware *fw, const char *fw_name)
+static bool qcom_mdt_bins_are_split(const struct firmware *fw)
{
const struct elf32_phdr *phdrs;
const struct elf32_hdr *ehdr;
@@ -331,9 +333,9 @@ static bool qcom_mdt_bins_are_split(const struct firmware *fw, const char *fw_na
}
static int __qcom_mdt_load(struct device *dev, const struct firmware *fw,
- const char *fw_name, int pas_id, void *mem_region,
+ const char *fw_name, void *mem_region,
phys_addr_t mem_phys, size_t mem_size,
- phys_addr_t *reloc_base, bool pas_init)
+ phys_addr_t *reloc_base)
{
const struct elf32_phdr *phdrs;
const struct elf32_phdr *phdr;
@@ -353,7 +355,7 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw,
if (!mdt_header_valid(fw))
return -EINVAL;
- is_split = qcom_mdt_bins_are_split(fw, fw_name);
+ is_split = qcom_mdt_bins_are_split(fw);
ehdr = (struct elf32_hdr *)fw->data;
phdrs = (struct elf32_phdr *)(fw->data + ehdr->e_phoff);
@@ -458,8 +460,8 @@ int qcom_mdt_load(struct device *dev, const struct firmware *fw,
if (ret)
return ret;
- return __qcom_mdt_load(dev, fw, firmware, pas_id, mem_region, mem_phys,
- mem_size, reloc_base, true);
+ return __qcom_mdt_load(dev, fw, firmware, mem_region, mem_phys,
+ mem_size, reloc_base);
}
EXPORT_SYMBOL_GPL(qcom_mdt_load);
@@ -468,7 +470,6 @@ EXPORT_SYMBOL_GPL(qcom_mdt_load);
* @dev: device handle to associate resources with
* @fw: firmware object for the mdt file
* @firmware: name of the firmware, for construction of segment file names
- * @pas_id: PAS identifier
* @mem_region: allocated memory region to load firmware into
* @mem_phys: physical address of allocated memory region
* @mem_size: size of the allocated memory region
@@ -477,12 +478,11 @@ EXPORT_SYMBOL_GPL(qcom_mdt_load);
* Returns 0 on success, negative errno otherwise.
*/
int qcom_mdt_load_no_init(struct device *dev, const struct firmware *fw,
- const char *firmware, int pas_id,
- void *mem_region, phys_addr_t mem_phys,
+ const char *firmware, void *mem_region, phys_addr_t mem_phys,
size_t mem_size, phys_addr_t *reloc_base)
{
- return __qcom_mdt_load(dev, fw, firmware, pas_id, mem_region, mem_phys,
- mem_size, reloc_base, false);
+ return __qcom_mdt_load(dev, fw, firmware, mem_region, mem_phys,
+ mem_size, reloc_base);
}
EXPORT_SYMBOL_GPL(qcom_mdt_load_no_init);
diff --git a/drivers/soc/qcom/qcom-geni-se.c b/drivers/soc/qcom/qcom-geni-se.c
index 3c3b796333a6..cd1779b6a91a 100644
--- a/drivers/soc/qcom/qcom-geni-se.c
+++ b/drivers/soc/qcom/qcom-geni-se.c
@@ -1,11 +1,16 @@
// SPDX-License-Identifier: GPL-2.0
-// Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
/* Disable MMIO tracing to prevent excessive logging of unwanted MMIO traces */
#define __DISABLE_TRACE_MMIO__
#include <linux/acpi.h>
+#include <linux/bitfield.h>
#include <linux/clk.h>
+#include <linux/firmware.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
@@ -110,22 +115,94 @@ struct geni_se_desc {
static const char * const icc_path_names[] = {"qup-core", "qup-config",
"qup-memory"};
-#define QUP_HW_VER_REG 0x4
+static const char * const protocol_name[] = { "None", "SPI", "UART", "I2C", "I3C", "SPI SLAVE" };
+
+/**
+ * struct se_fw_hdr - Serial Engine firmware configuration header
+ *
+ * This structure defines the SE firmware header, which together with the
+ * firmware payload is stored in individual ELF segments.
+ *
+ * @magic: Set to 'SEFW'.
+ * @version: Structure version number.
+ * @core_version: QUPV3 hardware version.
+ * @serial_protocol: Encoded in GENI_FW_REVISION.
+ * @fw_version: Firmware version, from GENI_FW_REVISION.
+ * @cfg_version: Configuration version, from GENI_INIT_CFG_REVISION.
+ * @fw_size_in_items: Number of 32-bit words in GENI_FW_RAM.
+ * @fw_offset: Byte offset to GENI_FW_RAM array.
+ * @cfg_size_in_items: Number of GENI_FW_CFG index/value pairs.
+ * @cfg_idx_offset: Byte offset to GENI_FW_CFG index array.
+ * @cfg_val_offset: Byte offset to GENI_FW_CFG values array.
+ */
+struct se_fw_hdr {
+ __le32 magic;
+ __le32 version;
+ __le32 core_version;
+ __le16 serial_protocol;
+ __le16 fw_version;
+ __le16 cfg_version;
+ __le16 fw_size_in_items;
+ __le16 fw_offset;
+ __le16 cfg_size_in_items;
+ __le16 cfg_idx_offset;
+ __le16 cfg_val_offset;
+};
+
+/*Magic numbers*/
+#define SE_MAGIC_NUM 0x57464553
+
+#define MAX_GENI_CFG_RAMn_CNT 455
+
+#define MI_PBT_NON_PAGED_SEGMENT 0x0
+#define MI_PBT_HASH_SEGMENT 0x2
+#define MI_PBT_NOTUSED_SEGMENT 0x3
+#define MI_PBT_SHARED_SEGMENT 0x4
+
+#define MI_PBT_FLAG_PAGE_MODE BIT(20)
+#define MI_PBT_FLAG_SEGMENT_TYPE GENMASK(26, 24)
+#define MI_PBT_FLAG_ACCESS_TYPE GENMASK(23, 21)
+
+#define MI_PBT_PAGE_MODE_VALUE(x) FIELD_GET(MI_PBT_FLAG_PAGE_MODE, x)
+
+#define MI_PBT_SEGMENT_TYPE_VALUE(x) FIELD_GET(MI_PBT_FLAG_SEGMENT_TYPE, x)
+
+#define MI_PBT_ACCESS_TYPE_VALUE(x) FIELD_GET(MI_PBT_FLAG_ACCESS_TYPE, x)
+
+#define M_COMMON_GENI_M_IRQ_EN (GENMASK(6, 1) | \
+ M_IO_DATA_DEASSERT_EN | \
+ M_IO_DATA_ASSERT_EN | M_RX_FIFO_RD_ERR_EN | \
+ M_RX_FIFO_WR_ERR_EN | M_TX_FIFO_RD_ERR_EN | \
+ M_TX_FIFO_WR_ERR_EN)
+
+/* Common QUPV3 registers */
+#define QUPV3_HW_VER_REG 0x4
+#define QUPV3_SE_AHB_M_CFG 0x118
+#define QUPV3_COMMON_CFG 0x120
+#define QUPV3_COMMON_CGC_CTRL 0x21c
+
+/* QUPV3_COMMON_CFG fields */
+#define FAST_SWITCH_TO_HIGH_DISABLE BIT(0)
+
+/* QUPV3_SE_AHB_M_CFG fields */
+#define AHB_M_CLK_CGC_ON BIT(0)
+
+/* QUPV3_COMMON_CGC_CTRL fields */
+#define COMMON_CSR_SLV_CLK_CGC_ON BIT(0)
/* Common SE registers */
-#define GENI_INIT_CFG_REVISION 0x0
-#define GENI_S_INIT_CFG_REVISION 0x4
-#define GENI_OUTPUT_CTRL 0x24
-#define GENI_CGC_CTRL 0x28
-#define GENI_CLK_CTRL_RO 0x60
-#define GENI_FW_S_REVISION_RO 0x6c
+#define SE_GENI_INIT_CFG_REVISION 0x0
+#define SE_GENI_S_INIT_CFG_REVISION 0x4
+#define SE_GENI_CGC_CTRL 0x28
+#define SE_GENI_CLK_CTRL_RO 0x60
+#define SE_GENI_FW_S_REVISION_RO 0x6c
+#define SE_GENI_CFG_REG0 0x100
#define SE_GENI_BYTE_GRAN 0x254
#define SE_GENI_TX_PACKING_CFG0 0x260
#define SE_GENI_TX_PACKING_CFG1 0x264
#define SE_GENI_RX_PACKING_CFG0 0x284
#define SE_GENI_RX_PACKING_CFG1 0x288
-#define SE_GENI_M_GP_LENGTH 0x910
-#define SE_GENI_S_GP_LENGTH 0x914
+#define SE_GENI_S_IRQ_ENABLE 0x644
#define SE_DMA_TX_PTR_L 0xc30
#define SE_DMA_TX_PTR_H 0xc34
#define SE_DMA_TX_ATTR 0xc38
@@ -142,12 +219,20 @@ static const char * const icc_path_names[] = {"qup-core", "qup-config",
#define SE_DMA_RX_IRQ_EN 0xd48
#define SE_DMA_RX_IRQ_EN_SET 0xd4c
#define SE_DMA_RX_IRQ_EN_CLR 0xd50
-#define SE_DMA_RX_LEN_IN 0xd54
#define SE_DMA_RX_MAX_BURST 0xd5c
#define SE_DMA_RX_FLUSH 0xd60
#define SE_GSI_EVENT_EN 0xe18
#define SE_IRQ_EN 0xe1c
#define SE_DMA_GENERAL_CFG 0xe30
+#define SE_GENI_FW_REVISION 0x1000
+#define SE_GENI_S_FW_REVISION 0x1004
+#define SE_GENI_CFG_RAMN 0x1010
+#define SE_GENI_CLK_CTRL 0x2000
+#define SE_DMA_IF_EN 0x2004
+#define SE_FIFO_IF_DISABLE 0x2008
+
+/* GENI_FW_REVISION_RO fields */
+#define FW_REV_VERSION_MSK GENMASK(7, 0)
/* GENI_OUTPUT_CTRL fields */
#define DEFAULT_IO_OUTPUT_CTRL_MSK GENMASK(6, 0)
@@ -179,13 +264,22 @@ static const char * const icc_path_names[] = {"qup-core", "qup-config",
/* SE_DMA_GENERAL_CFG */
#define DMA_RX_CLK_CGC_ON BIT(0)
#define DMA_TX_CLK_CGC_ON BIT(1)
-#define DMA_AHB_SLV_CFG_ON BIT(2)
+#define DMA_AHB_SLV_CLK_CGC_ON BIT(2)
#define AHB_SEC_SLV_CLK_CGC_ON BIT(3)
#define DUMMY_RX_NON_BUFFERABLE BIT(4)
#define RX_DMA_ZERO_PADDING_EN BIT(5)
#define RX_DMA_IRQ_DELAY_MSK GENMASK(8, 6)
#define RX_DMA_IRQ_DELAY_SHFT 6
+/* GENI_CLK_CTRL fields */
+#define SER_CLK_SEL BIT(0)
+
+/* GENI_DMA_IF_EN fields */
+#define DMA_IF_EN BIT(0)
+
+#define geni_setbits32(_addr, _v) writel(readl(_addr) | (_v), _addr)
+#define geni_clrbits32(_addr, _v) writel(readl(_addr) & ~(_v), _addr)
+
/**
* geni_se_get_qup_hw_version() - Read the QUP wrapper Hardware version
* @se: Pointer to the corresponding serial engine.
@@ -196,7 +290,7 @@ u32 geni_se_get_qup_hw_version(struct geni_se *se)
{
struct geni_wrapper *wrapper = se->wrapper;
- return readl_relaxed(wrapper->base + QUP_HW_VER_REG);
+ return readl_relaxed(wrapper->base + QUPV3_HW_VER_REG);
}
EXPORT_SYMBOL_GPL(geni_se_get_qup_hw_version);
@@ -220,12 +314,12 @@ static void geni_se_io_init(void __iomem *base)
{
u32 val;
- val = readl_relaxed(base + GENI_CGC_CTRL);
+ val = readl_relaxed(base + SE_GENI_CGC_CTRL);
val |= DEFAULT_CGC_EN;
- writel_relaxed(val, base + GENI_CGC_CTRL);
+ writel_relaxed(val, base + SE_GENI_CGC_CTRL);
val = readl_relaxed(base + SE_DMA_GENERAL_CFG);
- val |= AHB_SEC_SLV_CLK_CGC_ON | DMA_AHB_SLV_CFG_ON;
+ val |= AHB_SEC_SLV_CLK_CGC_ON | DMA_AHB_SLV_CLK_CGC_ON;
val |= DMA_TX_CLK_CGC_ON | DMA_RX_CLK_CGC_ON;
writel_relaxed(val, base + SE_DMA_GENERAL_CFG);
@@ -658,9 +752,12 @@ int geni_se_clk_freq_match(struct geni_se *se, unsigned long req_freq,
}
EXPORT_SYMBOL_GPL(geni_se_clk_freq_match);
-#define GENI_SE_DMA_DONE_EN BIT(0)
-#define GENI_SE_DMA_EOT_EN BIT(1)
-#define GENI_SE_DMA_AHB_ERR_EN BIT(2)
+#define GENI_SE_DMA_DONE_EN BIT(0)
+#define GENI_SE_DMA_EOT_EN BIT(1)
+#define GENI_SE_DMA_AHB_ERR_EN BIT(2)
+#define GENI_SE_DMA_RESET_DONE_EN BIT(3)
+#define GENI_SE_DMA_FLUSH_DONE BIT(4)
+
#define GENI_SE_DMA_EOT_BUF BIT(0)
/**
@@ -891,6 +988,377 @@ int geni_icc_disable(struct geni_se *se)
}
EXPORT_SYMBOL_GPL(geni_icc_disable);
+/**
+ * geni_find_protocol_fw() - Locate and validate SE firmware for a protocol.
+ * @dev: Pointer to the device structure.
+ * @fw: Pointer to the firmware image.
+ * @protocol: Expected serial engine protocol type.
+ *
+ * Identifies the appropriate firmware image or configuration required for a
+ * specific communication protocol instance running on a Qualcomm GENI
+ * controller.
+ *
+ * Return: pointer to a valid 'struct se_fw_hdr' if found, or NULL otherwise.
+ */
+static struct se_fw_hdr *geni_find_protocol_fw(struct device *dev, const struct firmware *fw,
+ enum geni_se_protocol_type protocol)
+{
+ const struct elf32_hdr *ehdr;
+ const struct elf32_phdr *phdrs;
+ const struct elf32_phdr *phdr;
+ struct se_fw_hdr *sefw;
+ u32 fw_end, cfg_idx_end, cfg_val_end;
+ u16 fw_size;
+ int i;
+
+ if (!fw || fw->size < sizeof(struct elf32_hdr))
+ return NULL;
+
+ ehdr = (const struct elf32_hdr *)fw->data;
+ phdrs = (const struct elf32_phdr *)(fw->data + ehdr->e_phoff);
+
+ /*
+ * The firmware is expected to have at least two program headers (segments).
+ * One for metadata and the other for the actual protocol-specific firmware.
+ */
+ if (ehdr->e_phnum < 2) {
+ dev_err(dev, "Invalid firmware: less than 2 program headers\n");
+ return NULL;
+ }
+
+ for (i = 0; i < ehdr->e_phnum; i++) {
+ phdr = &phdrs[i];
+
+ if (fw->size < phdr->p_offset + phdr->p_filesz) {
+ dev_err(dev, "Firmware size (%zu) < expected offset (%u) + size (%u)\n",
+ fw->size, phdr->p_offset, phdr->p_filesz);
+ return NULL;
+ }
+
+ if (phdr->p_type != PT_LOAD || !phdr->p_memsz)
+ continue;
+
+ if (MI_PBT_PAGE_MODE_VALUE(phdr->p_flags) != MI_PBT_NON_PAGED_SEGMENT ||
+ MI_PBT_SEGMENT_TYPE_VALUE(phdr->p_flags) == MI_PBT_HASH_SEGMENT ||
+ MI_PBT_ACCESS_TYPE_VALUE(phdr->p_flags) == MI_PBT_NOTUSED_SEGMENT ||
+ MI_PBT_ACCESS_TYPE_VALUE(phdr->p_flags) == MI_PBT_SHARED_SEGMENT)
+ continue;
+
+ if (phdr->p_filesz < sizeof(struct se_fw_hdr))
+ continue;
+
+ sefw = (struct se_fw_hdr *)(fw->data + phdr->p_offset);
+ fw_size = le16_to_cpu(sefw->fw_size_in_items);
+ fw_end = le16_to_cpu(sefw->fw_offset) + fw_size * sizeof(u32);
+ cfg_idx_end = le16_to_cpu(sefw->cfg_idx_offset) +
+ le16_to_cpu(sefw->cfg_size_in_items) * sizeof(u8);
+ cfg_val_end = le16_to_cpu(sefw->cfg_val_offset) +
+ le16_to_cpu(sefw->cfg_size_in_items) * sizeof(u32);
+
+ if (le32_to_cpu(sefw->magic) != SE_MAGIC_NUM || le32_to_cpu(sefw->version) != 1)
+ continue;
+
+ if (le32_to_cpu(sefw->serial_protocol) != protocol)
+ continue;
+
+ if (fw_size % 2 != 0) {
+ fw_size++;
+ sefw->fw_size_in_items = cpu_to_le16(fw_size);
+ }
+
+ if (fw_size >= MAX_GENI_CFG_RAMn_CNT) {
+ dev_err(dev,
+ "Firmware size (%u) exceeds max allowed RAMn count (%u)\n",
+ fw_size, MAX_GENI_CFG_RAMn_CNT);
+ continue;
+ }
+
+ if (fw_end > phdr->p_filesz || cfg_idx_end > phdr->p_filesz ||
+ cfg_val_end > phdr->p_filesz) {
+ dev_err(dev, "Truncated or corrupt SE FW segment found at index %d\n", i);
+ continue;
+ }
+
+ return sefw;
+ }
+
+ dev_err(dev, "Failed to get %s protocol firmware\n", protocol_name[protocol]);
+ return NULL;
+}
+
+/**
+ * geni_configure_xfer_mode() - Set the transfer mode.
+ * @se: Pointer to the concerned serial engine.
+ * @mode: SE data transfer mode.
+ *
+ * Set the transfer mode to either FIFO or DMA according to the mode specified
+ * by the protocol driver.
+ *
+ * Return: 0 if successful, otherwise return an error value.
+ */
+static int geni_configure_xfer_mode(struct geni_se *se, enum geni_se_xfer_mode mode)
+{
+ /* Configure SE FIFO, DMA or GSI mode. */
+ switch (mode) {
+ case GENI_GPI_DMA:
+ geni_setbits32(se->base + SE_GENI_DMA_MODE_EN, GENI_DMA_MODE_EN);
+ writel(0x0, se->base + SE_IRQ_EN);
+ writel(DMA_RX_EVENT_EN | DMA_TX_EVENT_EN | GENI_M_EVENT_EN | GENI_S_EVENT_EN,
+ se->base + SE_GSI_EVENT_EN);
+ break;
+
+ case GENI_SE_FIFO:
+ geni_clrbits32(se->base + SE_GENI_DMA_MODE_EN, GENI_DMA_MODE_EN);
+ writel(DMA_RX_IRQ_EN | DMA_TX_IRQ_EN | GENI_M_IRQ_EN | GENI_S_IRQ_EN,
+ se->base + SE_IRQ_EN);
+ writel(0x0, se->base + SE_GSI_EVENT_EN);
+ break;
+
+ case GENI_SE_DMA:
+ geni_setbits32(se->base + SE_GENI_DMA_MODE_EN, GENI_DMA_MODE_EN);
+ writel(DMA_RX_IRQ_EN | DMA_TX_IRQ_EN | GENI_M_IRQ_EN | GENI_S_IRQ_EN,
+ se->base + SE_IRQ_EN);
+ writel(0x0, se->base + SE_GSI_EVENT_EN);
+ break;
+
+ default:
+ dev_err(se->dev, "Invalid geni-se transfer mode: %d\n", mode);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * geni_enable_interrupts() - Enable interrupts.
+ * @se: Pointer to the concerned serial engine.
+ *
+ * Enable the required interrupts during the firmware load process.
+ */
+static void geni_enable_interrupts(struct geni_se *se)
+{
+ u32 val;
+
+ /* Enable required interrupts. */
+ writel(M_COMMON_GENI_M_IRQ_EN, se->base + SE_GENI_M_IRQ_EN);
+
+ val = S_CMD_OVERRUN_EN | S_ILLEGAL_CMD_EN | S_CMD_CANCEL_EN | S_CMD_ABORT_EN |
+ S_GP_IRQ_0_EN | S_GP_IRQ_1_EN | S_GP_IRQ_2_EN | S_GP_IRQ_3_EN |
+ S_RX_FIFO_WR_ERR_EN | S_RX_FIFO_RD_ERR_EN;
+ writel(val, se->base + SE_GENI_S_IRQ_ENABLE);
+
+ /* DMA mode configuration. */
+ val = GENI_SE_DMA_RESET_DONE_EN | GENI_SE_DMA_AHB_ERR_EN | GENI_SE_DMA_DONE_EN;
+ writel(val, se->base + SE_DMA_TX_IRQ_EN_SET);
+ val = GENI_SE_DMA_FLUSH_DONE | GENI_SE_DMA_RESET_DONE_EN | GENI_SE_DMA_AHB_ERR_EN |
+ GENI_SE_DMA_DONE_EN;
+ writel(val, se->base + SE_DMA_RX_IRQ_EN_SET);
+}
+
+/**
+ * geni_write_fw_revision() - Write the firmware revision.
+ * @se: Pointer to the concerned serial engine.
+ * @serial_protocol: serial protocol type.
+ * @fw_version: QUP firmware version.
+ *
+ * Write the firmware revision and protocol into the respective register.
+ */
+static void geni_write_fw_revision(struct geni_se *se, u16 serial_protocol, u16 fw_version)
+{
+ u32 reg;
+
+ reg = FIELD_PREP(FW_REV_PROTOCOL_MSK, serial_protocol);
+ reg |= FIELD_PREP(FW_REV_VERSION_MSK, fw_version);
+
+ writel(reg, se->base + SE_GENI_FW_REVISION);
+ writel(reg, se->base + SE_GENI_S_FW_REVISION);
+}
+
+/**
+ * geni_load_se_fw() - Load Serial Engine specific firmware.
+ * @se: Pointer to the concerned serial engine.
+ * @fw: Pointer to the firmware structure.
+ * @mode: SE data transfer mode.
+ * @protocol: Protocol type to be used with the SE (e.g., UART, SPI, I2C).
+ *
+ * Load the protocol firmware into the IRAM of the Serial Engine.
+ *
+ * Return: 0 if successful, otherwise return an error value.
+ */
+static int geni_load_se_fw(struct geni_se *se, const struct firmware *fw,
+ enum geni_se_xfer_mode mode, enum geni_se_protocol_type protocol)
+{
+ const u32 *fw_data, *cfg_val_arr;
+ const u8 *cfg_idx_arr;
+ u32 i, reg_value;
+ int ret;
+ struct se_fw_hdr *hdr;
+
+ hdr = geni_find_protocol_fw(se->dev, fw, protocol);
+ if (!hdr)
+ return -EINVAL;
+
+ fw_data = (const u32 *)((u8 *)hdr + le16_to_cpu(hdr->fw_offset));
+ cfg_idx_arr = (const u8 *)hdr + le16_to_cpu(hdr->cfg_idx_offset);
+ cfg_val_arr = (const u32 *)((u8 *)hdr + le16_to_cpu(hdr->cfg_val_offset));
+
+ ret = geni_icc_set_bw(se);
+ if (ret)
+ return ret;
+
+ ret = geni_icc_enable(se);
+ if (ret)
+ return ret;
+
+ ret = geni_se_resources_on(se);
+ if (ret)
+ goto out_icc_disable;
+
+ /*
+ * Disable high-priority interrupts until all currently executing
+ * low-priority interrupts have been fully handled.
+ */
+ geni_setbits32(se->wrapper->base + QUPV3_COMMON_CFG, FAST_SWITCH_TO_HIGH_DISABLE);
+
+ /* Set AHB_M_CLK_CGC_ON to indicate hardware controls se-wrapper cgc clock. */
+ geni_setbits32(se->wrapper->base + QUPV3_SE_AHB_M_CFG, AHB_M_CLK_CGC_ON);
+
+ /* Let hardware to control common cgc. */
+ geni_setbits32(se->wrapper->base + QUPV3_COMMON_CGC_CTRL, COMMON_CSR_SLV_CLK_CGC_ON);
+
+ /*
+ * Setting individual bits in GENI_OUTPUT_CTRL activates corresponding output lines,
+ * allowing the hardware to drive data as configured.
+ */
+ writel(0x0, se->base + GENI_OUTPUT_CTRL);
+
+ /* Set SCLK and HCLK to program RAM */
+ geni_setbits32(se->base + SE_GENI_CGC_CTRL, PROG_RAM_SCLK_OFF | PROG_RAM_HCLK_OFF);
+ writel(0x0, se->base + SE_GENI_CLK_CTRL);
+ geni_clrbits32(se->base + SE_GENI_CGC_CTRL, PROG_RAM_SCLK_OFF | PROG_RAM_HCLK_OFF);
+
+ /* Enable required clocks for DMA CSR, TX and RX. */
+ reg_value = AHB_SEC_SLV_CLK_CGC_ON | DMA_AHB_SLV_CLK_CGC_ON |
+ DMA_TX_CLK_CGC_ON | DMA_RX_CLK_CGC_ON;
+ geni_setbits32(se->base + SE_DMA_GENERAL_CFG, reg_value);
+
+ /* Let hardware control CGC by default. */
+ writel(DEFAULT_CGC_EN, se->base + SE_GENI_CGC_CTRL);
+
+ /* Set version of the configuration register part of firmware. */
+ writel(le16_to_cpu(hdr->cfg_version), se->base + SE_GENI_INIT_CFG_REVISION);
+ writel(le16_to_cpu(hdr->cfg_version), se->base + SE_GENI_S_INIT_CFG_REVISION);
+
+ /* Configure GENI primitive table. */
+ for (i = 0; i < le16_to_cpu(hdr->cfg_size_in_items); i++)
+ writel(cfg_val_arr[i],
+ se->base + SE_GENI_CFG_REG0 + (cfg_idx_arr[i] * sizeof(u32)));
+
+ /* Configure condition for assertion of RX_RFR_WATERMARK condition. */
+ reg_value = geni_se_get_rx_fifo_depth(se);
+ writel(reg_value - 2, se->base + SE_GENI_RX_RFR_WATERMARK_REG);
+
+ /* Let hardware control CGC */
+ geni_setbits32(se->base + GENI_OUTPUT_CTRL, DEFAULT_IO_OUTPUT_CTRL_MSK);
+
+ ret = geni_configure_xfer_mode(se, mode);
+ if (ret)
+ goto out_resources_off;
+
+ geni_enable_interrupts(se);
+
+ geni_write_fw_revision(se, le16_to_cpu(hdr->serial_protocol), le16_to_cpu(hdr->fw_version));
+
+ /* Program RAM address space. */
+ memcpy_toio(se->base + SE_GENI_CFG_RAMN, fw_data,
+ le16_to_cpu(hdr->fw_size_in_items) * sizeof(u32));
+
+ /* Put default values on GENI's output pads. */
+ writel_relaxed(0x1, se->base + GENI_FORCE_DEFAULT_REG);
+
+ /* Toggle SCLK/HCLK from high to low to finalize RAM programming and apply config. */
+ geni_setbits32(se->base + SE_GENI_CGC_CTRL, PROG_RAM_SCLK_OFF | PROG_RAM_HCLK_OFF);
+ geni_setbits32(se->base + SE_GENI_CLK_CTRL, SER_CLK_SEL);
+ geni_clrbits32(se->base + SE_GENI_CGC_CTRL, PROG_RAM_SCLK_OFF | PROG_RAM_HCLK_OFF);
+
+ /* Serial engine DMA interface is enabled. */
+ geni_setbits32(se->base + SE_DMA_IF_EN, DMA_IF_EN);
+
+ /* Enable or disable FIFO interface of the serial engine. */
+ if (mode == GENI_SE_FIFO)
+ geni_clrbits32(se->base + SE_FIFO_IF_DISABLE, FIFO_IF_DISABLE);
+ else
+ geni_setbits32(se->base + SE_FIFO_IF_DISABLE, FIFO_IF_DISABLE);
+
+out_resources_off:
+ geni_se_resources_off(se);
+
+out_icc_disable:
+ geni_icc_disable(se);
+ return ret;
+}
+
+/**
+ * geni_load_se_firmware() - Load firmware for SE based on protocol
+ * @se: Pointer to the concerned serial engine.
+ * @protocol: Protocol type to be used with the SE (e.g., UART, SPI, I2C).
+ *
+ * Retrieves the firmware name from device properties and sets the transfer mode
+ * (FIFO or GSI DMA) based on device tree configuration. Enforces FIFO mode for
+ * UART protocol due to lack of GSI DMA support. Requests the firmware and loads
+ * it into the SE.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int geni_load_se_firmware(struct geni_se *se, enum geni_se_protocol_type protocol)
+{
+ const char *fw_name;
+ const struct firmware *fw;
+ enum geni_se_xfer_mode mode = GENI_SE_FIFO;
+ int ret;
+
+ if (protocol >= ARRAY_SIZE(protocol_name)) {
+ dev_err(se->dev, "Invalid geni-se protocol: %d", protocol);
+ return -EINVAL;
+ }
+
+ ret = device_property_read_string(se->wrapper->dev, "firmware-name", &fw_name);
+ if (ret) {
+ dev_err(se->dev, "Failed to read firmware-name property: %d\n", ret);
+ return -EINVAL;
+ }
+
+ if (of_property_read_bool(se->dev->of_node, "qcom,enable-gsi-dma"))
+ mode = GENI_GPI_DMA;
+
+ /* GSI mode is not supported by the UART driver; therefore, setting FIFO mode */
+ if (protocol == GENI_SE_UART)
+ mode = GENI_SE_FIFO;
+
+ ret = request_firmware(&fw, fw_name, se->dev);
+ if (ret) {
+ if (ret == -ENOENT)
+ return -EPROBE_DEFER;
+
+ dev_err(se->dev, "Failed to request firmware '%s' for protocol %d: ret: %d\n",
+ fw_name, protocol, ret);
+ return ret;
+ }
+
+ ret = geni_load_se_fw(se, fw, mode, protocol);
+ release_firmware(fw);
+
+ if (ret) {
+ dev_err(se->dev, "Failed to load SE firmware for protocol %d: ret: %d\n",
+ protocol, ret);
+ return ret;
+ }
+
+ dev_dbg(se->dev, "Firmware load for %s protocol is successful for xfer mode: %d\n",
+ protocol_name[protocol], mode);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(geni_load_se_firmware);
+
static int geni_se_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
diff --git a/drivers/soc/qcom/qcom_pd_mapper.c b/drivers/soc/qcom/qcom_pd_mapper.c
index 3abea241b1c4..6384f271953d 100644
--- a/drivers/soc/qcom/qcom_pd_mapper.c
+++ b/drivers/soc/qcom/qcom_pd_mapper.c
@@ -584,6 +584,7 @@ static const struct of_device_id qcom_pdm_domains[] __maybe_unused = {
{ .compatible = "qcom,sm8450", .data = sm8350_domains, },
{ .compatible = "qcom,sm8550", .data = sm8550_domains, },
{ .compatible = "qcom,sm8650", .data = sm8550_domains, },
+ { .compatible = "qcom,sm8750", .data = sm8550_domains, },
{ .compatible = "qcom,x1e80100", .data = x1e80100_domains, },
{ .compatible = "qcom,x1p42100", .data = x1e80100_domains, },
{},
diff --git a/drivers/soc/qcom/ramp_controller.c b/drivers/soc/qcom/ramp_controller.c
index 349bdfbc61ef..15782bed2925 100644
--- a/drivers/soc/qcom/ramp_controller.c
+++ b/drivers/soc/qcom/ramp_controller.c
@@ -229,7 +229,6 @@ static const struct regmap_config qrc_regmap_config = {
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x68,
- .fast_io = true,
};
static const struct reg_sequence msm8976_cfg_dfs_sid[] = {
diff --git a/drivers/soc/qcom/rpm_master_stats.c b/drivers/soc/qcom/rpm_master_stats.c
index 49e4f9457279..c7788337e164 100644
--- a/drivers/soc/qcom/rpm_master_stats.c
+++ b/drivers/soc/qcom/rpm_master_stats.c
@@ -78,7 +78,7 @@ static int master_stats_probe(struct platform_device *pdev)
if (count < 0)
return count;
- data = devm_kzalloc(dev, count * sizeof(*data), GFP_KERNEL);
+ data = devm_kcalloc(dev, count, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c
index fdab2b1067db..c6f7d5c9c493 100644
--- a/drivers/soc/qcom/rpmh-rsc.c
+++ b/drivers/soc/qcom/rpmh-rsc.c
@@ -453,13 +453,10 @@ static irqreturn_t tcs_tx_done(int irq, void *p)
trace_rpmh_tx_done(drv, i, req);
- /*
- * If wake tcs was re-purposed for sending active
- * votes, clear AMC trigger & enable modes and
+ /* Clear AMC trigger & enable modes and
* disable interrupt for this TCS
*/
- if (!drv->tcs[ACTIVE_TCS].num_tcs)
- __tcs_set_trigger(drv, i, false);
+ __tcs_set_trigger(drv, i, false);
skip:
/* Reclaim the TCS */
write_tcs_reg(drv, drv->regs[RSC_DRV_CMD_ENABLE], i, 0);
diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c
index cf425930539e..c4c45f15dca4 100644
--- a/drivers/soc/qcom/smem.c
+++ b/drivers/soc/qcom/smem.c
@@ -898,7 +898,7 @@ static u32 qcom_smem_get_item_count(struct qcom_smem *smem)
if (IS_ERR_OR_NULL(ptable))
return SMEM_ITEM_COUNT;
- info = (struct smem_info *)&ptable->entry[ptable->num_entries];
+ info = (struct smem_info *)&ptable->entry[le32_to_cpu(ptable->num_entries)];
if (memcmp(info->magic, SMEM_INFO_MAGIC, sizeof(info->magic)))
return SMEM_ITEM_COUNT;
diff --git a/drivers/soc/qcom/ubwc_config.c b/drivers/soc/qcom/ubwc_config.c
index bd0a98aad9f3..15d373bff231 100644
--- a/drivers/soc/qcom/ubwc_config.c
+++ b/drivers/soc/qcom/ubwc_config.c
@@ -12,6 +12,10 @@
#include <linux/soc/qcom/ubwc.h>
+static const struct qcom_ubwc_cfg_data no_ubwc_data = {
+ /* no UBWC, no HBB */
+};
+
static const struct qcom_ubwc_cfg_data msm8937_data = {
.ubwc_enc_version = UBWC_1_0,
.ubwc_dec_version = UBWC_1_0,
@@ -215,12 +219,20 @@ static const struct qcom_ubwc_cfg_data x1e80100_data = {
};
static const struct of_device_id qcom_ubwc_configs[] __maybe_unused = {
+ { .compatible = "qcom,apq8016", .data = &no_ubwc_data },
+ { .compatible = "qcom,apq8026", .data = &no_ubwc_data },
+ { .compatible = "qcom,apq8074", .data = &no_ubwc_data },
{ .compatible = "qcom,apq8096", .data = &msm8998_data },
- { .compatible = "qcom,msm8917", .data = &msm8937_data },
+ { .compatible = "qcom,msm8226", .data = &no_ubwc_data },
+ { .compatible = "qcom,msm8916", .data = &no_ubwc_data },
+ { .compatible = "qcom,msm8917", .data = &no_ubwc_data },
{ .compatible = "qcom,msm8937", .data = &msm8937_data },
+ { .compatible = "qcom,msm8929", .data = &no_ubwc_data },
+ { .compatible = "qcom,msm8939", .data = &no_ubwc_data },
{ .compatible = "qcom,msm8953", .data = &msm8937_data },
- { .compatible = "qcom,msm8956", .data = &msm8937_data },
- { .compatible = "qcom,msm8976", .data = &msm8937_data },
+ { .compatible = "qcom,msm8956", .data = &no_ubwc_data },
+ { .compatible = "qcom,msm8974", .data = &no_ubwc_data },
+ { .compatible = "qcom,msm8976", .data = &no_ubwc_data },
{ .compatible = "qcom,msm8996", .data = &msm8998_data },
{ .compatible = "qcom,msm8998", .data = &msm8998_data },
{ .compatible = "qcom,qcm2290", .data = &qcm2290_data, },
@@ -233,7 +245,10 @@ static const struct of_device_id qcom_ubwc_configs[] __maybe_unused = {
{ .compatible = "qcom,sc7280", .data = &sc7280_data, },
{ .compatible = "qcom,sc8180x", .data = &sc8180x_data, },
{ .compatible = "qcom,sc8280xp", .data = &sc8280xp_data, },
+ { .compatible = "qcom,sda660", .data = &msm8937_data },
+ { .compatible = "qcom,sdm450", .data = &msm8937_data },
{ .compatible = "qcom,sdm630", .data = &msm8937_data },
+ { .compatible = "qcom,sdm632", .data = &msm8937_data },
{ .compatible = "qcom,sdm636", .data = &msm8937_data },
{ .compatible = "qcom,sdm660", .data = &msm8937_data },
{ .compatible = "qcom,sdm670", .data = &sdm670_data, },
@@ -246,6 +261,8 @@ static const struct of_device_id qcom_ubwc_configs[] __maybe_unused = {
{ .compatible = "qcom,sm6375", .data = &sm6350_data, },
{ .compatible = "qcom,sm7125", .data = &sc7180_data },
{ .compatible = "qcom,sm7150", .data = &sm7150_data, },
+ { .compatible = "qcom,sm7225", .data = &sm6350_data, },
+ { .compatible = "qcom,sm7325", .data = &sc7280_data, },
{ .compatible = "qcom,sm8150", .data = &sm8150_data, },
{ .compatible = "qcom,sm8250", .data = &sm8250_data, },
{ .compatible = "qcom,sm8350", .data = &sm8350_data, },
diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig
index 719b7f4f376f..340a1ff7e92b 100644
--- a/drivers/soc/renesas/Kconfig
+++ b/drivers/soc/renesas/Kconfig
@@ -39,6 +39,10 @@ config ARCH_RCAR_GEN4
bool
select ARCH_RCAR_GEN3
+config ARCH_RCAR_GEN5
+ bool
+ select ARCH_RCAR_GEN4
+
config ARCH_RMOBILE
bool
select PM
@@ -348,6 +352,14 @@ config ARCH_R8A779H0
help
This enables support for the Renesas R-Car V4M SoC.
+config ARCH_R8A78000
+ bool "ARM64 Platform support for R8A78000 (R-Car X5H)"
+ default y if ARCH_RENESAS
+ default ARCH_RENESAS
+ select ARCH_RCAR_GEN5
+ help
+ This enables support for the Renesas R-Car X5H SoC.
+
config ARCH_R9A07G043
bool "ARM64 Platform support for R9A07G043U (RZ/G2UL)"
default y if ARCH_RENESAS
@@ -449,6 +461,7 @@ config RST_RCAR
config SYSC_RZ
bool "System controller for RZ SoCs" if COMPILE_TEST
+ select MFD_SYSCON
config SYSC_R9A08G045
bool "Renesas System controller support for R9A08G045 (RZ/G3S)" if COMPILE_TEST
diff --git a/drivers/soc/renesas/pwc-rzv2m.c b/drivers/soc/renesas/pwc-rzv2m.c
index 4dbcb3d4a90c..6209168b3734 100644
--- a/drivers/soc/renesas/pwc-rzv2m.c
+++ b/drivers/soc/renesas/pwc-rzv2m.c
@@ -64,7 +64,7 @@ static const struct gpio_chip rzv2m_pwc_gc = {
.label = "gpio_rzv2m_pwc",
.owner = THIS_MODULE,
.get = rzv2m_pwc_gpio_get,
- .set_rv = rzv2m_pwc_gpio_set,
+ .set = rzv2m_pwc_gpio_set,
.direction_output = rzv2m_pwc_gpio_direction_output,
.can_sleep = false,
.ngpio = 2,
diff --git a/drivers/soc/renesas/r9a08g045-sysc.c b/drivers/soc/renesas/r9a08g045-sysc.c
index f4db1431e036..0504d4e68761 100644
--- a/drivers/soc/renesas/r9a08g045-sysc.c
+++ b/drivers/soc/renesas/r9a08g045-sysc.c
@@ -20,4 +20,5 @@ static const struct rz_sysc_soc_id_init_data rzg3s_sysc_soc_id_init_data __initc
const struct rz_sysc_init_data rzg3s_sysc_init_data __initconst = {
.soc_id_init_data = &rzg3s_sysc_soc_id_init_data,
+ .max_register = 0xe20,
};
diff --git a/drivers/soc/renesas/r9a09g047-sys.c b/drivers/soc/renesas/r9a09g047-sys.c
index cd2eb7782cfe..2e8426c03050 100644
--- a/drivers/soc/renesas/r9a09g047-sys.c
+++ b/drivers/soc/renesas/r9a09g047-sys.c
@@ -64,4 +64,5 @@ static const struct rz_sysc_soc_id_init_data rzg3e_sys_soc_id_init_data __initco
const struct rz_sysc_init_data rzg3e_sys_init_data = {
.soc_id_init_data = &rzg3e_sys_soc_id_init_data,
+ .max_register = 0x170c,
};
diff --git a/drivers/soc/renesas/r9a09g057-sys.c b/drivers/soc/renesas/r9a09g057-sys.c
index 4c21cc29edbc..e3390e7c7fe5 100644
--- a/drivers/soc/renesas/r9a09g057-sys.c
+++ b/drivers/soc/renesas/r9a09g057-sys.c
@@ -64,4 +64,5 @@ static const struct rz_sysc_soc_id_init_data rzv2h_sys_soc_id_init_data __initco
const struct rz_sysc_init_data rzv2h_sys_init_data = {
.soc_id_init_data = &rzv2h_sys_soc_id_init_data,
+ .max_register = 0x170c,
};
diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c
index df2b38417b80..1eb52356b996 100644
--- a/drivers/soc/renesas/renesas-soc.c
+++ b/drivers/soc/renesas/renesas-soc.c
@@ -36,6 +36,10 @@ static const struct renesas_family fam_rcar_gen4 __initconst __maybe_unused = {
.name = "R-Car Gen4",
};
+static const struct renesas_family fam_rcar_gen5 __initconst __maybe_unused = {
+ .name = "R-Car Gen5",
+};
+
static const struct renesas_family fam_rmobile __initconst __maybe_unused = {
.name = "R-Mobile",
.reg = 0xe600101c, /* CCCR (Common Chip Code Register) */
@@ -266,6 +270,11 @@ static const struct renesas_soc soc_rcar_v4m __initconst __maybe_unused = {
.id = 0x5d,
};
+static const struct renesas_soc soc_rcar_x5h __initconst __maybe_unused = {
+ .family = &fam_rcar_gen5,
+ .id = 0x60,
+};
+
static const struct renesas_soc soc_shmobile_ag5 __initconst __maybe_unused = {
.family = &fam_shmobile,
.id = 0x37,
@@ -378,6 +387,9 @@ static const struct of_device_id renesas_socs[] __initconst __maybe_unused = {
#ifdef CONFIG_ARCH_R8A779H0
{ .compatible = "renesas,r8a779h0", .data = &soc_rcar_v4m },
#endif
+#ifdef CONFIG_ARCH_R8A78000
+ { .compatible = "renesas,r8a78000", .data = &soc_rcar_x5h },
+#endif
#ifdef CONFIG_ARCH_R9A07G043
#ifdef CONFIG_RISCV
{ .compatible = "renesas,r9a07g043", .data = &soc_rz_five },
diff --git a/drivers/soc/renesas/rz-sysc.c b/drivers/soc/renesas/rz-sysc.c
index ffa65fb4dade..9f79e299e6f4 100644
--- a/drivers/soc/renesas/rz-sysc.c
+++ b/drivers/soc/renesas/rz-sysc.c
@@ -5,9 +5,13 @@
* Copyright (C) 2024 Renesas Electronics Corp.
*/
+#include <linux/cleanup.h>
#include <linux/io.h>
+#include <linux/mfd/syscon.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
#include <linux/sys_soc.h>
#include "rz-sysc.h"
@@ -100,14 +104,23 @@ MODULE_DEVICE_TABLE(of, rz_sysc_match);
static int rz_sysc_probe(struct platform_device *pdev)
{
+ const struct rz_sysc_init_data *data;
const struct of_device_id *match;
struct device *dev = &pdev->dev;
+ struct regmap *regmap;
struct rz_sysc *sysc;
+ int ret;
+
+ struct regmap_config *regmap_cfg __free(kfree) = kzalloc(sizeof(*regmap_cfg), GFP_KERNEL);
+ if (!regmap_cfg)
+ return -ENOMEM;
match = of_match_node(rz_sysc_match, dev->of_node);
if (!match)
return -ENODEV;
+ data = match->data;
+
sysc = devm_kzalloc(dev, sizeof(*sysc), GFP_KERNEL);
if (!sysc)
return -ENOMEM;
@@ -117,7 +130,22 @@ static int rz_sysc_probe(struct platform_device *pdev)
return PTR_ERR(sysc->base);
sysc->dev = dev;
- return rz_sysc_soc_init(sysc, match);
+ ret = rz_sysc_soc_init(sysc, match);
+ if (ret)
+ return ret;
+
+ regmap_cfg->name = "rz_sysc_regs";
+ regmap_cfg->reg_bits = 32;
+ regmap_cfg->reg_stride = 4;
+ regmap_cfg->val_bits = 32;
+ regmap_cfg->fast_io = true;
+ regmap_cfg->max_register = data->max_register;
+
+ regmap = devm_regmap_init_mmio(dev, sysc->base, regmap_cfg);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ return of_syscon_register_regmap(dev->of_node, regmap);
}
static struct platform_driver rz_sysc_driver = {
diff --git a/drivers/soc/renesas/rz-sysc.h b/drivers/soc/renesas/rz-sysc.h
index 56bc047a1bff..8eec355d5d56 100644
--- a/drivers/soc/renesas/rz-sysc.h
+++ b/drivers/soc/renesas/rz-sysc.h
@@ -34,9 +34,11 @@ struct rz_sysc_soc_id_init_data {
/**
* struct rz_sysc_init_data - RZ SYSC initialization data
* @soc_id_init_data: RZ SYSC SoC ID initialization data
+ * @max_register: Maximum SYSC register offset to be used by the regmap config
*/
struct rz_sysc_init_data {
const struct rz_sysc_soc_id_init_data *soc_id_init_data;
+ u32 max_register;
};
extern const struct rz_sysc_init_data rzg3e_sys_init_data;
diff --git a/drivers/soc/rockchip/grf.c b/drivers/soc/rockchip/grf.c
index 1eab4bb0eacf..344870da7675 100644
--- a/drivers/soc/rockchip/grf.c
+++ b/drivers/soc/rockchip/grf.c
@@ -6,13 +6,12 @@
*/
#include <linux/err.h>
+#include <linux/hw_bitfield.h>
#include <linux/mfd/syscon.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
-#define HIWORD_UPDATE(val, mask, shift) \
- ((val) << (shift) | (mask) << ((shift) + 16))
struct rockchip_grf_value {
const char *desc;
@@ -32,7 +31,7 @@ static const struct rockchip_grf_value rk3036_defaults[] __initconst = {
* Disable auto jtag/sdmmc switching that causes issues with the
* clock-framework and the mmc controllers making them unreliable.
*/
- { "jtag switching", RK3036_GRF_SOC_CON0, HIWORD_UPDATE(0, 1, 11) },
+ { "jtag switching", RK3036_GRF_SOC_CON0, FIELD_PREP_WM16_CONST(BIT(11), 0) },
};
static const struct rockchip_grf_info rk3036_grf __initconst = {
@@ -44,8 +43,8 @@ static const struct rockchip_grf_info rk3036_grf __initconst = {
#define RK3128_GRF_SOC_CON1 0x144
static const struct rockchip_grf_value rk3128_defaults[] __initconst = {
- { "jtag switching", RK3128_GRF_SOC_CON0, HIWORD_UPDATE(0, 1, 8) },
- { "vpu main clock", RK3128_GRF_SOC_CON1, HIWORD_UPDATE(0, 1, 10) },
+ { "jtag switching", RK3128_GRF_SOC_CON0, FIELD_PREP_WM16_CONST(BIT(8), 0) },
+ { "vpu main clock", RK3128_GRF_SOC_CON1, FIELD_PREP_WM16_CONST(BIT(10), 0) },
};
static const struct rockchip_grf_info rk3128_grf __initconst = {
@@ -56,7 +55,7 @@ static const struct rockchip_grf_info rk3128_grf __initconst = {
#define RK3228_GRF_SOC_CON6 0x418
static const struct rockchip_grf_value rk3228_defaults[] __initconst = {
- { "jtag switching", RK3228_GRF_SOC_CON6, HIWORD_UPDATE(0, 1, 8) },
+ { "jtag switching", RK3228_GRF_SOC_CON6, FIELD_PREP_WM16_CONST(BIT(8), 0) },
};
static const struct rockchip_grf_info rk3228_grf __initconst = {
@@ -68,8 +67,8 @@ static const struct rockchip_grf_info rk3228_grf __initconst = {
#define RK3288_GRF_SOC_CON2 0x24c
static const struct rockchip_grf_value rk3288_defaults[] __initconst = {
- { "jtag switching", RK3288_GRF_SOC_CON0, HIWORD_UPDATE(0, 1, 12) },
- { "pwm select", RK3288_GRF_SOC_CON2, HIWORD_UPDATE(1, 1, 0) },
+ { "jtag switching", RK3288_GRF_SOC_CON0, FIELD_PREP_WM16_CONST(BIT(12), 0) },
+ { "pwm select", RK3288_GRF_SOC_CON2, FIELD_PREP_WM16_CONST(BIT(0), 1) },
};
static const struct rockchip_grf_info rk3288_grf __initconst = {
@@ -80,7 +79,7 @@ static const struct rockchip_grf_info rk3288_grf __initconst = {
#define RK3328_GRF_SOC_CON4 0x410
static const struct rockchip_grf_value rk3328_defaults[] __initconst = {
- { "jtag switching", RK3328_GRF_SOC_CON4, HIWORD_UPDATE(0, 1, 12) },
+ { "jtag switching", RK3328_GRF_SOC_CON4, FIELD_PREP_WM16_CONST(BIT(12), 0) },
};
static const struct rockchip_grf_info rk3328_grf __initconst = {
@@ -91,7 +90,7 @@ static const struct rockchip_grf_info rk3328_grf __initconst = {
#define RK3368_GRF_SOC_CON15 0x43c
static const struct rockchip_grf_value rk3368_defaults[] __initconst = {
- { "jtag switching", RK3368_GRF_SOC_CON15, HIWORD_UPDATE(0, 1, 13) },
+ { "jtag switching", RK3368_GRF_SOC_CON15, FIELD_PREP_WM16_CONST(BIT(13), 0) },
};
static const struct rockchip_grf_info rk3368_grf __initconst = {
@@ -102,7 +101,7 @@ static const struct rockchip_grf_info rk3368_grf __initconst = {
#define RK3399_GRF_SOC_CON7 0xe21c
static const struct rockchip_grf_value rk3399_defaults[] __initconst = {
- { "jtag switching", RK3399_GRF_SOC_CON7, HIWORD_UPDATE(0, 1, 12) },
+ { "jtag switching", RK3399_GRF_SOC_CON7, FIELD_PREP_WM16_CONST(BIT(12), 0) },
};
static const struct rockchip_grf_info rk3399_grf __initconst = {
@@ -113,9 +112,9 @@ static const struct rockchip_grf_info rk3399_grf __initconst = {
#define RK3566_GRF_USB3OTG0_CON1 0x0104
static const struct rockchip_grf_value rk3566_defaults[] __initconst = {
- { "usb3otg port switch", RK3566_GRF_USB3OTG0_CON1, HIWORD_UPDATE(0, 1, 12) },
- { "usb3otg clock switch", RK3566_GRF_USB3OTG0_CON1, HIWORD_UPDATE(1, 1, 7) },
- { "usb3otg disable usb3", RK3566_GRF_USB3OTG0_CON1, HIWORD_UPDATE(1, 1, 0) },
+ { "usb3otg port switch", RK3566_GRF_USB3OTG0_CON1, FIELD_PREP_WM16_CONST(BIT(12), 0) },
+ { "usb3otg clock switch", RK3566_GRF_USB3OTG0_CON1, FIELD_PREP_WM16_CONST(BIT(7), 1) },
+ { "usb3otg disable usb3", RK3566_GRF_USB3OTG0_CON1, FIELD_PREP_WM16_CONST(BIT(0), 1) },
};
static const struct rockchip_grf_info rk3566_pipegrf __initconst = {
@@ -126,8 +125,8 @@ static const struct rockchip_grf_info rk3566_pipegrf __initconst = {
#define RK3576_SYSGRF_SOC_CON1 0x0004
static const struct rockchip_grf_value rk3576_defaults_sys_grf[] __initconst = {
- { "i3c0 weakpull", RK3576_SYSGRF_SOC_CON1, HIWORD_UPDATE(3, 3, 6) },
- { "i3c1 weakpull", RK3576_SYSGRF_SOC_CON1, HIWORD_UPDATE(3, 3, 8) },
+ { "i3c0 weakpull", RK3576_SYSGRF_SOC_CON1, FIELD_PREP_WM16_CONST(GENMASK(7, 6), 3) },
+ { "i3c1 weakpull", RK3576_SYSGRF_SOC_CON1, FIELD_PREP_WM16_CONST(GENMASK(9, 8), 3) },
};
static const struct rockchip_grf_info rk3576_sysgrf __initconst = {
@@ -138,7 +137,7 @@ static const struct rockchip_grf_info rk3576_sysgrf __initconst = {
#define RK3576_IOCGRF_MISC_CON 0x04F0
static const struct rockchip_grf_value rk3576_defaults_ioc_grf[] __initconst = {
- { "jtag switching", RK3576_IOCGRF_MISC_CON, HIWORD_UPDATE(0, 1, 1) },
+ { "jtag switching", RK3576_IOCGRF_MISC_CON, FIELD_PREP_WM16_CONST(BIT(1), 0) },
};
static const struct rockchip_grf_info rk3576_iocgrf __initconst = {
@@ -149,7 +148,7 @@ static const struct rockchip_grf_info rk3576_iocgrf __initconst = {
#define RK3588_GRF_SOC_CON6 0x0318
static const struct rockchip_grf_value rk3588_defaults[] __initconst = {
- { "jtag switching", RK3588_GRF_SOC_CON6, HIWORD_UPDATE(0, 1, 14) },
+ { "jtag switching", RK3588_GRF_SOC_CON6, FIELD_PREP_WM16_CONST(BIT(14), 0) },
};
static const struct rockchip_grf_info rk3588_sysgrf __initconst = {
diff --git a/drivers/soc/samsung/exynos-pmu.c b/drivers/soc/samsung/exynos-pmu.c
index a77288f49d24..22c50ca2aa79 100644
--- a/drivers/soc/samsung/exynos-pmu.c
+++ b/drivers/soc/samsung/exynos-pmu.c
@@ -7,7 +7,9 @@
#include <linux/array_size.h>
#include <linux/arm-smccc.h>
+#include <linux/bitmap.h>
#include <linux/cpuhotplug.h>
+#include <linux/cpu_pm.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/mfd/core.h>
@@ -15,6 +17,7 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
+#include <linux/reboot.h>
#include <linux/regmap.h>
#include <linux/soc/samsung/exynos-regs-pmu.h>
@@ -35,6 +38,15 @@ struct exynos_pmu_context {
const struct exynos_pmu_data *pmu_data;
struct regmap *pmureg;
struct regmap *pmuintrgen;
+ /*
+ * Serialization lock for CPU hot plug and cpuidle ACPM hint
+ * programming. Also protects in_cpuhp, sys_insuspend & sys_inreboot
+ * flags.
+ */
+ raw_spinlock_t cpupm_lock;
+ unsigned long *in_cpuhp;
+ bool sys_insuspend;
+ bool sys_inreboot;
};
void __iomem *pmu_base_addr;
@@ -221,6 +233,15 @@ static const struct regmap_config regmap_smccfg = {
.reg_read = tensor_sec_reg_read,
.reg_write = tensor_sec_reg_write,
.reg_update_bits = tensor_sec_update_bits,
+ .use_raw_spinlock = true,
+};
+
+static const struct regmap_config regmap_pmu_intr = {
+ .name = "pmu_intr_gen",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .use_raw_spinlock = true,
};
static const struct exynos_pmu_data gs101_pmu_data = {
@@ -330,13 +351,19 @@ struct regmap *exynos_get_pmu_regmap_by_phandle(struct device_node *np,
EXPORT_SYMBOL_GPL(exynos_get_pmu_regmap_by_phandle);
/*
- * CPU_INFORM register hint values which are used by
- * EL3 firmware (el3mon).
+ * CPU_INFORM register "hint" values are required to be programmed in addition to
+ * the standard PSCI calls to have functional CPU hotplug and CPU idle states.
+ * This is required to workaround limitations in the el3mon/ACPM firmware.
*/
#define CPU_INFORM_CLEAR 0
#define CPU_INFORM_C2 1
-static int gs101_cpuhp_pmu_online(unsigned int cpu)
+/*
+ * __gs101_cpu_pmu_ prefix functions are common code shared by CPU PM notifiers
+ * (CPUIdle) and CPU hotplug callbacks. Functions should be called with IRQs
+ * disabled and cpupm_lock held.
+ */
+static int __gs101_cpu_pmu_online(unsigned int cpu)
{
unsigned int cpuhint = smp_processor_id();
u32 reg, mask;
@@ -358,10 +385,48 @@ static int gs101_cpuhp_pmu_online(unsigned int cpu)
return 0;
}
-static int gs101_cpuhp_pmu_offline(unsigned int cpu)
+/* Called from CPU PM notifier (CPUIdle code path) with IRQs disabled */
+static int gs101_cpu_pmu_online(void)
+{
+ int cpu;
+
+ raw_spin_lock(&pmu_context->cpupm_lock);
+
+ if (pmu_context->sys_inreboot) {
+ raw_spin_unlock(&pmu_context->cpupm_lock);
+ return NOTIFY_OK;
+ }
+
+ cpu = smp_processor_id();
+ __gs101_cpu_pmu_online(cpu);
+ raw_spin_unlock(&pmu_context->cpupm_lock);
+
+ return NOTIFY_OK;
+}
+
+/* Called from CPU hot plug callback with IRQs enabled */
+static int gs101_cpuhp_pmu_online(unsigned int cpu)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&pmu_context->cpupm_lock, flags);
+
+ __gs101_cpu_pmu_online(cpu);
+ /*
+ * Mark this CPU as having finished the hotplug.
+ * This means this CPU can now enter C2 idle state.
+ */
+ clear_bit(cpu, pmu_context->in_cpuhp);
+ raw_spin_unlock_irqrestore(&pmu_context->cpupm_lock, flags);
+
+ return 0;
+}
+
+/* Common function shared by both CPU hot plug and CPUIdle */
+static int __gs101_cpu_pmu_offline(unsigned int cpu)
{
- u32 reg, mask;
unsigned int cpuhint = smp_processor_id();
+ u32 reg, mask;
/* set cpu inform hint */
regmap_write(pmu_context->pmureg, GS101_CPU_INFORM(cpuhint),
@@ -379,6 +444,165 @@ static int gs101_cpuhp_pmu_offline(unsigned int cpu)
regmap_read(pmu_context->pmuintrgen, GS101_GRP1_INTR_BID_UPEND, &reg);
regmap_write(pmu_context->pmuintrgen, GS101_GRP1_INTR_BID_CLEAR,
reg & mask);
+
+ return 0;
+}
+
+/* Called from CPU PM notifier (CPUIdle code path) with IRQs disabled */
+static int gs101_cpu_pmu_offline(void)
+{
+ int cpu;
+
+ raw_spin_lock(&pmu_context->cpupm_lock);
+ cpu = smp_processor_id();
+
+ if (test_bit(cpu, pmu_context->in_cpuhp)) {
+ raw_spin_unlock(&pmu_context->cpupm_lock);
+ return NOTIFY_BAD;
+ }
+
+ /* Ignore CPU_PM_ENTER event in reboot or suspend sequence. */
+ if (pmu_context->sys_insuspend || pmu_context->sys_inreboot) {
+ raw_spin_unlock(&pmu_context->cpupm_lock);
+ return NOTIFY_OK;
+ }
+
+ __gs101_cpu_pmu_offline(cpu);
+ raw_spin_unlock(&pmu_context->cpupm_lock);
+
+ return NOTIFY_OK;
+}
+
+/* Called from CPU hot plug callback with IRQs enabled */
+static int gs101_cpuhp_pmu_offline(unsigned int cpu)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&pmu_context->cpupm_lock, flags);
+ /*
+ * Mark this CPU as entering hotplug. So as not to confuse
+ * ACPM the CPU entering hotplug should not enter C2 idle state.
+ */
+ set_bit(cpu, pmu_context->in_cpuhp);
+ __gs101_cpu_pmu_offline(cpu);
+
+ raw_spin_unlock_irqrestore(&pmu_context->cpupm_lock, flags);
+
+ return 0;
+}
+
+static int gs101_cpu_pm_notify_callback(struct notifier_block *self,
+ unsigned long action, void *v)
+{
+ switch (action) {
+ case CPU_PM_ENTER:
+ return gs101_cpu_pmu_offline();
+
+ case CPU_PM_EXIT:
+ return gs101_cpu_pmu_online();
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block gs101_cpu_pm_notifier = {
+ .notifier_call = gs101_cpu_pm_notify_callback,
+ /*
+ * We want to be called first, as the ACPM hint and handshake is what
+ * puts the CPU into C2.
+ */
+ .priority = INT_MAX
+};
+
+static int exynos_cpupm_reboot_notifier(struct notifier_block *nb,
+ unsigned long event, void *v)
+{
+ unsigned long flags;
+
+ switch (event) {
+ case SYS_POWER_OFF:
+ case SYS_RESTART:
+ raw_spin_lock_irqsave(&pmu_context->cpupm_lock, flags);
+ pmu_context->sys_inreboot = true;
+ raw_spin_unlock_irqrestore(&pmu_context->cpupm_lock, flags);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block exynos_cpupm_reboot_nb = {
+ .priority = INT_MAX,
+ .notifier_call = exynos_cpupm_reboot_notifier,
+};
+
+static int setup_cpuhp_and_cpuidle(struct device *dev)
+{
+ struct device_node *intr_gen_node;
+ struct resource intrgen_res;
+ void __iomem *virt_addr;
+ int ret, cpu;
+
+ intr_gen_node = of_parse_phandle(dev->of_node,
+ "google,pmu-intr-gen-syscon", 0);
+ if (!intr_gen_node) {
+ /*
+ * To maintain support for older DTs that didn't specify syscon
+ * phandle just issue a warning rather than fail to probe.
+ */
+ dev_warn(dev, "pmu-intr-gen syscon unavailable\n");
+ return 0;
+ }
+
+ /*
+ * To avoid lockdep issues (CPU PM notifiers use raw spinlocks) create
+ * a mmio regmap for pmu-intr-gen that uses raw spinlocks instead of
+ * syscon provided regmap.
+ */
+ ret = of_address_to_resource(intr_gen_node, 0, &intrgen_res);
+ of_node_put(intr_gen_node);
+
+ virt_addr = devm_ioremap(dev, intrgen_res.start,
+ resource_size(&intrgen_res));
+ if (!virt_addr)
+ return -ENOMEM;
+
+ pmu_context->pmuintrgen = devm_regmap_init_mmio(dev, virt_addr,
+ &regmap_pmu_intr);
+ if (IS_ERR(pmu_context->pmuintrgen)) {
+ dev_err(dev, "failed to initialize pmu-intr-gen regmap\n");
+ return PTR_ERR(pmu_context->pmuintrgen);
+ }
+
+ /* register custom mmio regmap with syscon */
+ ret = of_syscon_register_regmap(intr_gen_node,
+ pmu_context->pmuintrgen);
+ if (ret)
+ return ret;
+
+ pmu_context->in_cpuhp = devm_bitmap_zalloc(dev, num_possible_cpus(),
+ GFP_KERNEL);
+ if (!pmu_context->in_cpuhp)
+ return -ENOMEM;
+
+ raw_spin_lock_init(&pmu_context->cpupm_lock);
+ pmu_context->sys_inreboot = false;
+ pmu_context->sys_insuspend = false;
+
+ /* set PMU to power on */
+ for_each_online_cpu(cpu)
+ gs101_cpuhp_pmu_online(cpu);
+
+ /* register CPU hotplug callbacks */
+ cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "soc/exynos-pmu:prepare",
+ gs101_cpuhp_pmu_online, NULL);
+
+ cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "soc/exynos-pmu:online",
+ NULL, gs101_cpuhp_pmu_offline);
+
+ /* register CPU PM notifiers for cpuidle */
+ cpu_pm_register_notifier(&gs101_cpu_pm_notifier);
+ register_reboot_notifier(&exynos_cpupm_reboot_nb);
return 0;
}
@@ -435,23 +659,9 @@ static int exynos_pmu_probe(struct platform_device *pdev)
pmu_context->dev = dev;
if (pmu_context->pmu_data && pmu_context->pmu_data->pmu_cpuhp) {
- pmu_context->pmuintrgen = syscon_regmap_lookup_by_phandle(dev->of_node,
- "google,pmu-intr-gen-syscon");
- if (IS_ERR(pmu_context->pmuintrgen)) {
- /*
- * To maintain support for older DTs that didn't specify syscon phandle
- * just issue a warning rather than fail to probe.
- */
- dev_warn(&pdev->dev, "pmu-intr-gen syscon unavailable\n");
- } else {
- cpuhp_setup_state(CPUHP_BP_PREPARE_DYN,
- "soc/exynos-pmu:prepare",
- gs101_cpuhp_pmu_online, NULL);
-
- cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
- "soc/exynos-pmu:online",
- NULL, gs101_cpuhp_pmu_offline);
- }
+ ret = setup_cpuhp_and_cpuidle(dev);
+ if (ret)
+ return ret;
}
if (pmu_context->pmu_data && pmu_context->pmu_data->pmu_init)
@@ -471,10 +681,32 @@ static int exynos_pmu_probe(struct platform_device *pdev)
return 0;
}
+static int exynos_cpupm_suspend_noirq(struct device *dev)
+{
+ raw_spin_lock(&pmu_context->cpupm_lock);
+ pmu_context->sys_insuspend = true;
+ raw_spin_unlock(&pmu_context->cpupm_lock);
+ return 0;
+}
+
+static int exynos_cpupm_resume_noirq(struct device *dev)
+{
+ raw_spin_lock(&pmu_context->cpupm_lock);
+ pmu_context->sys_insuspend = false;
+ raw_spin_unlock(&pmu_context->cpupm_lock);
+ return 0;
+}
+
+static const struct dev_pm_ops cpupm_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(exynos_cpupm_suspend_noirq,
+ exynos_cpupm_resume_noirq)
+};
+
static struct platform_driver exynos_pmu_driver = {
.driver = {
.name = "exynos-pmu",
.of_match_table = exynos_pmu_of_device_ids,
+ .pm = pm_sleep_ptr(&cpupm_pm_ops),
},
.probe = exynos_pmu_probe,
};
diff --git a/drivers/soc/sunxi/sunxi_sram.c b/drivers/soc/sunxi/sunxi_sram.c
index 2781a091a6a6..446b9fc1f175 100644
--- a/drivers/soc/sunxi/sunxi_sram.c
+++ b/drivers/soc/sunxi/sunxi_sram.c
@@ -12,6 +12,7 @@
#include <linux/debugfs.h>
#include <linux/io.h>
+#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -310,6 +311,10 @@ static const struct sunxi_sramc_variant sun50i_h616_sramc_variant = {
.has_ths_offset = true,
};
+static const struct sunxi_sramc_variant sun55i_a523_sramc_variant = {
+ .num_emac_clocks = 2,
+};
+
#define SUNXI_SRAM_THS_OFFSET_REG 0x0
#define SUNXI_SRAM_EMAC_CLOCK_REG 0x30
#define SUNXI_SYS_LDO_CTRL_REG 0x150
@@ -363,6 +368,7 @@ static int __init sunxi_sram_probe(struct platform_device *pdev)
const struct sunxi_sramc_variant *variant;
struct device *dev = &pdev->dev;
struct regmap *regmap;
+ int ret;
sram_dev = &pdev->dev;
@@ -380,6 +386,10 @@ static int __init sunxi_sram_probe(struct platform_device *pdev)
regmap = devm_regmap_init_mmio(dev, base, &sunxi_sram_regmap_config);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
+
+ ret = of_syscon_register_regmap(dev->of_node, regmap);
+ if (ret)
+ return ret;
}
of_platform_populate(dev->of_node, NULL, NULL, dev);
@@ -430,6 +440,10 @@ static const struct of_device_id sunxi_sram_dt_match[] = {
.compatible = "allwinner,sun50i-h616-system-control",
.data = &sun50i_h616_sramc_variant,
},
+ {
+ .compatible = "allwinner,sun55i-a523-system-control",
+ .data = &sun55i_a523_sramc_variant,
+ },
{ },
};
MODULE_DEVICE_TABLE(of, sunxi_sram_dt_match);
diff --git a/drivers/soc/tegra/Kconfig b/drivers/soc/tegra/Kconfig
index 9392c2c43cc8..c0fc54c3cd35 100644
--- a/drivers/soc/tegra/Kconfig
+++ b/drivers/soc/tegra/Kconfig
@@ -96,6 +96,7 @@ config ARCH_TEGRA_210_SOC
config ARCH_TEGRA_186_SOC
bool "NVIDIA Tegra186 SoC"
depends on !CPU_BIG_ENDIAN
+ select PINCTRL_TEGRA186
select MAILBOX
select SOC_TEGRA_PMC
help
diff --git a/drivers/soc/tegra/fuse/fuse-tegra30.c b/drivers/soc/tegra/fuse/fuse-tegra30.c
index e24ab5f7d2bf..524fa1b0cd3d 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra30.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra30.c
@@ -117,6 +117,124 @@ const struct tegra_fuse_soc tegra30_fuse_soc = {
#endif
#ifdef CONFIG_ARCH_TEGRA_114_SOC
+static const struct nvmem_cell_info tegra114_fuse_cells[] = {
+ {
+ .name = "tsensor-cpu1",
+ .offset = 0x084,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "tsensor-cpu2",
+ .offset = 0x088,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "tsensor-common",
+ .offset = 0x08c,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "tsensor-cpu0",
+ .offset = 0x098,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "xusb-pad-calibration",
+ .offset = 0x0f0,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "tsensor-cpu3",
+ .offset = 0x12c,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "tsensor-gpu",
+ .offset = 0x154,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "tsensor-mem0",
+ .offset = 0x158,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "tsensor-mem1",
+ .offset = 0x15c,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ }, {
+ .name = "tsensor-pllx",
+ .offset = 0x160,
+ .bytes = 4,
+ .bit_offset = 0,
+ .nbits = 32,
+ },
+};
+
+static const struct nvmem_cell_lookup tegra114_fuse_lookups[] = {
+ {
+ .nvmem_name = "fuse",
+ .cell_name = "xusb-pad-calibration",
+ .dev_id = "7009f000.padctl",
+ .con_id = "calibration",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-common",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "common",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-cpu0",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "cpu0",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-cpu1",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "cpu1",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-cpu2",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "cpu2",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-cpu3",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "cpu3",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-mem0",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "mem0",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-mem1",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "mem1",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-gpu",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "gpu",
+ }, {
+ .nvmem_name = "fuse",
+ .cell_name = "tsensor-pllx",
+ .dev_id = "700e2000.thermal-sensor",
+ .con_id = "pllx",
+ },
+};
+
static const struct tegra_fuse_info tegra114_fuse_info = {
.read = tegra30_fuse_read,
.size = 0x2a0,
@@ -127,6 +245,10 @@ const struct tegra_fuse_soc tegra114_fuse_soc = {
.init = tegra30_fuse_init,
.speedo_init = tegra114_init_speedo_data,
.info = &tegra114_fuse_info,
+ .lookups = tegra114_fuse_lookups,
+ .num_lookups = ARRAY_SIZE(tegra114_fuse_lookups),
+ .cells = tegra114_fuse_cells,
+ .num_cells = ARRAY_SIZE(tegra114_fuse_cells),
.soc_attr_group = &tegra_soc_attr_group,
.clk_suspend_on = false,
};
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index 2a5f24ee858c..034a2a535a1e 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/